[Pkg-clamav-commits] [SCM] Debian repository for ClamAV branch, debian/unstable, updated. debian/0.95+dfsg-1-6617-g1f6e4d4
aCaB
acab at clamav.net
Tue Nov 30 16:46:05 UTC 2010
The following commit has been merged in the debian/unstable branch:
commit 6e6cfd000c8f86393c74d93385c6011517638ba0
Author: aCaB <acab at clamav.net>
Date: Tue Nov 30 16:24:36 2010 +0100
git merge --squash clamav-0.96.5
diff --git a/ChangeLog b/ChangeLog
index e79a3db..9f127d0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,7 +1,99 @@
+Tue Nov 30 14:05:34 CET 2010 (tk)
+---------------------------------
+ * V 0.96.5
+
+Tue Nov 30 13:07:15 EET 2010 (edwin)
+------------------------------------
+ * libclamav/pdf.c: fix crashes (bb #2358, bb #2380, bb #2396).
+ Thanks to Arkadiusz Miskiewicz <arekm*maven.pl> for bb #2380.
+
+Tue Nov 30 12:09:56 CET 2010 (acab)
+-----------------------------------
+ * libclamav/pe_icons.c: off by one while (bb#2344)
+
+Mon Nov 29 17:40:39 CET 2010 (tk)
+---------------------------------
+ * libclamav: fix detection of embedded executables
+
+Wed Nov 24 23:52:28 CET 2010 (tk)
+---------------------------------
+ * libclamav/matcher-ac.c: fix offset handling for sigs with {x-y}
+ wildcards (bb#2393)
+
+Tue Nov 23 12:44:25 CET 2010 (tk)
+---------------------------------
+ * freshclam/manager.c: fix error path infinite loop (bb#2389)
+
+Tue Nov 23 13:13:40 EET 2010 (edwin)
+------------------------------------
+ * clamd/clamd.c: fix RLIMIT_DATA setting on BSD (bb #1941), thanks to
+ Andreas Longwitz <longwitz* incore.de>.
+
+Mon Nov 15 16:04:31 CET 2010 (tk)
+---------------------------------
+ * freshclam: improve mirror management
+
+Fri Nov 12 15:47:09 CET 2010 (tk)
+---------------------------------
+ * libclamav: fix possible use of uninitialized values (bb#2291)
+
+Wed Nov 10 16:31:49 CET 2010 (acab)
+-----------------------------------
+ * libclamav: Set the unreliability flag on (un)packed files (bb#2307)
+
+Sat Nov 6 15:47:01 EET 2010 (edwin)
+------------------------------------
+ * libclamav/c++: Update embedded copy of LLVM to version 2.8 (bb #2327)
+
+Fri Nov 5 16:40:31 CET 2010 (tk)
+---------------------------------
+ * freshclam: make query format backward compatible
+
+Fri Nov 5 15:32:22 CET 2010 (tk)
+---------------------------------
+ * freshclam: get detection stats directly from clamd (bb#2312)
+
+Thu Nov 4 21:12:53 EET 2010 (edwin)
+------------------------------------
+ * libclamav/cache.c,c++/bytecode2llvm.cpp}: make cl_load thread safe (bb #2333).
+
+Thu Nov 4 19:47:17 EET 2010 (edwin)
+------------------------------------
+ * freshclam: load database in subprocess (bb #2147).
+
+Wed Nov 3 13:38:47 CET 2010 (tk)
+---------------------------------
+ * clamd: add new commands DETSTATS and DETSTATSCLEAR (part of bb#2312)
+
+Tue Nov 2 13:01:14 EET 2010 (edwin)
+------------------------------------
+ * libclamav/7z.c: fix file descriptor leak (bb #2347)
+
+Mon Oct 18 11:35:45 EEST 2010 (edwin)
+-------------------------------------
+ * clamd, libclamavll: add ability to logg messages from libclamav (bb #1965)
+
Sun Oct 31 09:13:33 EET 2010 (edwin)
------------------------------------
* libclamav/builtin_bytecodes.h: Don't disable JIT on pentium4 (bb #2345)
+Fri Oct 29 22:26:55 CEST 2010 (acab)
+------------------------------------
+ * clamav-for-windows: displace clamav-for-windows to a separate solution and directory
+
+Fri Oct 29 19:03:31 CEST 2010 (tk)
+----------------------------------
+ * clamd: add new option OLE2BlockMacros (requested by Mike)
+
+Fri Oct 29 17:55:10 CEST 2010 (tk)
+----------------------------------
+ * freshclam: DatabaseCustomURL: add support for If-Modified-Since
+ and signature counter
+
+Thu Oct 28 16:23:47 CEST 2010 (tk)
+----------------------------------
+ * freshclam: add initial support for DatabaseCustomURL
+
Mon Oct 25 18:02:56 CEST 2010 (tk)
----------------------------------
* V 0.96.4
diff --git a/Makefile.am b/Makefile.am
index 32ba6be..bf0dc9c 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -18,7 +18,7 @@
ACLOCAL_AMFLAGS=-I m4
-SUBDIRS = libltdl libclamav clamscan clamd clamdscan freshclam sigtool clamconf database docs etc clamav-milter test unit_tests clamdtop clambc
+SUBDIRS = libltdl libclamav clamscan clamd clamdscan freshclam sigtool clamconf database docs etc clamav-milter test clamdtop clambc unit_tests
EXTRA_DIST = FAQ examples BUGS shared libclamav.pc.in libclamunrar_iface/Makefile.am libclamunrar_iface/Makefile.in UPGRADE COPYING.bzip2 COPYING.lzma COPYING.unrar COPYING.LGPL COPYING.llvm COPYING.file COPYING.zlib COPYING.getopt COPYING.regex COPYING.sha256 platform.h.in clamdscan/clamdscan.map
bin_SCRIPTS=clamav-config
@@ -35,3 +35,5 @@ lcov:
quick-check:
($(MAKE); cd unit_tests; $(MAKE) quick-check)
+dist-hook:
+ rm -rf $(distdir)/win32/clamav-for-windows $(distdir)/win32/build
diff --git a/Makefile.in b/Makefile.in
index 08ed280..ecd0705 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -341,7 +341,7 @@ top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
ACLOCAL_AMFLAGS = -I m4
-SUBDIRS = libltdl libclamav clamscan clamd clamdscan freshclam sigtool clamconf database docs etc clamav-milter test unit_tests clamdtop clambc
+SUBDIRS = libltdl libclamav clamscan clamd clamdscan freshclam sigtool clamconf database docs etc clamav-milter test clamdtop clambc unit_tests
EXTRA_DIST = FAQ examples BUGS shared libclamav.pc.in libclamunrar_iface/Makefile.am libclamunrar_iface/Makefile.in UPGRADE COPYING.bzip2 COPYING.lzma COPYING.unrar COPYING.LGPL COPYING.llvm COPYING.file COPYING.zlib COPYING.getopt COPYING.regex COPYING.sha256 platform.h.in clamdscan/clamdscan.map
bin_SCRIPTS = clamav-config
pkgconfigdir = $(libdir)/pkgconfig
@@ -708,6 +708,9 @@ distdir: $(DISTFILES)
|| exit 1; \
fi; \
done
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$(top_distdir)" distdir="$(distdir)" \
+ dist-hook
-test -n "$(am__skip_mode_fix)" \
|| find "$(distdir)" -type d ! -perm -755 \
-exec chmod u+rwx,go+rx {} \; -o \
@@ -933,8 +936,8 @@ uninstall-am: uninstall-binSCRIPTS uninstall-pkgconfigDATA
.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
all all-am am--refresh check check-am clean clean-generic \
clean-libtool ctags ctags-recursive dist dist-all dist-bzip2 \
- dist-gzip dist-lzma dist-shar dist-tarZ dist-xz dist-zip \
- distcheck distclean distclean-generic distclean-hdr \
+ dist-gzip dist-hook dist-lzma dist-shar dist-tarZ dist-xz \
+ dist-zip distcheck distclean distclean-generic distclean-hdr \
distclean-libtool distclean-tags distcleancheck distdir \
distuninstallcheck dvi dvi-am html html-am info info-am \
install install-am install-binSCRIPTS install-data \
@@ -954,6 +957,9 @@ lcov:
quick-check:
($(MAKE); cd unit_tests; $(MAKE) quick-check)
+dist-hook:
+ rm -rf $(distdir)/win32/clamav-for-windows $(distdir)/win32/build
+
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
diff --git a/NEWS b/NEWS
index 952d8f0..477822d 100644
--- a/NEWS
+++ b/NEWS
@@ -1,7 +1,10 @@
-0.96.4
+0.96.5
------
-ClamAV 0.96.4 is a bugfix release recommended for all users.
+ClamAV 0.96.5 includes bugfixes and minor feature enhancements, such as
+improved handling of detection statistics, better file logging,
+and support for custom database URLs in freshclam. Please refer to the
+ChangeLog for details.
--
The ClamAV team (http://www.clamav.net/team)
diff --git a/README b/README
index 8ac117c..92acf44 100644
--- a/README
+++ b/README
@@ -2,6 +2,19 @@ Note: This README/NEWS file refers to the source tarball. Some things described
here may not be available in binary packages.
--
+
+0.96.5
+------
+
+ClamAV 0.96.5 includes bugfixes and minor feature enhancements, such as
+improved handling of detection statistics, better file logging,
+and support for custom database URLs in freshclam. Please refer to the
+ChangeLog for details.
+
+--
+The ClamAV team (http://www.clamav.net/team)
+
+
0.96.4
------
@@ -10,7 +23,6 @@ ClamAV 0.96.4 is a bugfix release recommended for all users.
--
The ClamAV team (http://www.clamav.net/team)
-
0.96.3
------
@@ -1562,7 +1574,7 @@ Noteworthy changes in this version:
-) documentation:
+ new Spanish documentation on ClamAV and Sendmail integration by
Erick Ivaan Lopez Carreon
- + included clamdoc.pdf Turkish translation by yavuz kaya and Ýbrahim erken
+ + included clamdoc.pdf Turkish translation by yavuz kaya and �brahim erken
+ included clamav-mirror-howto.pdf by Luca Gibelli
+ included clamd+daemontools HOWTO by Jesse D. Guardiani
+ included signatures.pdf
diff --git a/clamav-config.h.in b/clamav-config.h.in
index 2946ae2..004f0cf 100644
--- a/clamav-config.h.in
+++ b/clamav-config.h.in
@@ -96,7 +96,7 @@
/* file i/o buffer size */
#undef FILEBUFF
-/* FPU byte ordering is little endian */
+/* FPU byte ordering matches CPU */
#undef FPU_WORDS_BIGENDIAN
/* enable workaround for broken DNS servers */
diff --git a/clamd/clamd.c b/clamd/clamd.c
index ddb70ed..5950181 100644
--- a/clamd/clamd.c
+++ b/clamd/clamd.c
@@ -69,6 +69,7 @@
#include "localserver.h"
#include "others.h"
#include "shared.h"
+#include "scanner.h"
short debug_mode = 0, logok = 0;
short foreground = 0;
@@ -243,6 +244,8 @@ int main(int argc, char **argv)
} else
logg_file = NULL;
+ if (optget(opts,"DevLiblog")->enabled)
+ cl_set_clcb_msg(msg_callback);
if((ret = cl_init(CL_INIT_DEFAULT))) {
logg("!Can't initialize libclamav: %s\n", cl_strerror(ret));
ret = 1;
@@ -304,14 +307,13 @@ int main(int argc, char **argv)
* too soon (after ~120 MB).
* Set limit lower than 2G if on 32-bit */
uint64_t lim = rlim.rlim_cur;
- lim = (int32_t) lim;
if (sizeof(void*) == 4 &&
- lim != rlim.rlim_cur) {
- rlim.rlim_cur = 2048*1024-1;
+ lim > (1ULL << 31)) {
+ rlim.rlim_cur = 1ULL << 31;
if (setrlimit(RLIMIT_DATA, &rlim) < 0)
logg("!setrlimit(RLIMIT_DATA) failed: %s\n", strerror(errno));
else
- logg("^Running on 32-bit system, and RLIMIT_DATA > 2GB, lowering to 2GB!\n");
+ logg("Running on 32-bit system, and RLIMIT_DATA > 2GB, lowering to 2GB!\n");
}
}
#endif
@@ -423,6 +425,9 @@ int main(int argc, char **argv)
}
}
+ cl_engine_set_clcb_hash(engine, hash_callback);
+ detstats_clear();
+
if(optget(opts, "LeaveTemporaryFiles")->enabled)
cl_engine_set_num(engine, CL_ENGINE_KEEPTMP, 1);
diff --git a/clamd/clamuko.c b/clamd/clamuko.c
index b560856..8ea2ca1 100644
--- a/clamd/clamuko.c
+++ b/clamd/clamuko.c
@@ -43,6 +43,7 @@
#include "dazukoio.h"
#include "clamukofs.h"
#include "clamuko.h"
+#include "scanner.h"
struct dazuko_access *acc;
short int clamuko_scanning;
@@ -76,8 +77,7 @@ static void *clamukolegacyth(void *arg)
short int scan;
int sizelimit = 0, extinfo;
struct stat sb;
- char virhash[33];
- unsigned int virsize;
+ struct cb_context context;
clamuko_scanning = 0;
@@ -185,9 +185,13 @@ static void *clamukolegacyth(void *arg)
}
}
- if(scan && cli_scanfile_stats(acc->filename, &virname, virhash, &virsize, NULL, tharg->engine, tharg->options) == CL_VIRUS) {
- if(extinfo && virsize)
- logg("Clamuko: %s: %s(%s:%u) FOUND\n", acc->filename, virname, virhash, virsize);
+ context.filename = acc->filename;
+ context.virsize = 0;
+ if(scan && cl_scanfile_callback(acc->filename, &virname, NULL, tharg->engine, tharg->options, &context) == CL_VIRUS) {
+ if(context.virsize)
+ detstats_add(virname, acc->filename, context.virsize, context.virhash);
+ if(extinfo && context.virsize)
+ logg("Clamuko: %s: %s(%s:%llu) FOUND\n", acc->filename, virname, context.virhash, context.virsize);
else
logg("Clamuko: %s: %s FOUND\n", acc->filename, virname);
virusaction(acc->filename, virname, tharg->opts);
diff --git a/clamd/clamukofs.c b/clamd/clamukofs.c
index e4a1586..deb9f27 100644
--- a/clamd/clamukofs.c
+++ b/clamd/clamukofs.c
@@ -41,6 +41,7 @@
#include "others.h"
#include "dazukofs.h"
#include "clamuko.h"
+#include "scanner.h"
static pthread_mutex_t running_mutex = PTHREAD_MUTEX_INITIALIZER;
static dazukofs_handle_t shutdown_hndl;
@@ -83,14 +84,15 @@ static void *clamuko_scanth(void *arg)
{
struct thrarg *tharg = (struct thrarg *) arg;
sigset_t sigset;
- unsigned int sizelimit = 0, virsize;
+ unsigned int sizelimit = 0;
struct stat sb;
dazukofs_handle_t scan_hndl;
struct dazukofs_access acc;
const char *groupname = "ClamAV";
int skip_scan = 0, extinfo;
const char *virname;
- char filename[4096], virhash[33];
+ char filename[4096];
+ struct cb_context context;
/* ignore all signals */
sigfillset(&sigset);
@@ -151,15 +153,19 @@ static void *clamuko_scanth(void *arg)
}
}
+ context.filename = NULL;
+ context.virsize = 0;
if(skip_scan) {
acc.deny = 0;
/* reset skip flag */
skip_scan = 0;
- } else if(cli_scandesc_stats(acc.fd, &virname, virhash, &virsize, NULL, tharg->engine,
- tharg->options) == CL_VIRUS) {
+ } else if(cl_scandesc_callback(acc.fd, &virname, NULL, tharg->engine,
+ tharg->options, &context) == CL_VIRUS) {
dazukofs_get_filename(&acc, filename, sizeof(filename));
- if(extinfo && virsize)
- logg("Clamuko: %s: %s(%s:%u) FOUND\n", filename, virname, virhash, virsize);
+ if(context.virsize)
+ detstats_add(virname, filename, context.virsize, context.virhash);
+ if(extinfo && context.virsize)
+ logg("Clamuko: %s: %s(%s:%llu) FOUND\n", filename, virname, context.virhash, context.virsize);
else
logg("Clamuko: %s: %s FOUND\n", filename, virname);
/* we can not perform any special action because it will
diff --git a/clamd/others.c b/clamd/others.c
index 90e20f1..66a0d94 100644
--- a/clamd/others.c
+++ b/clamd/others.c
@@ -74,6 +74,9 @@
#include "session.h"
#include "others.h"
+static pthread_mutex_t virusaction_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t detstats_lock = PTHREAD_MUTEX_INITIALIZER;
+
#ifdef _WIN32
void virusaction(const char *filename, const char *virname, const struct optstruct *opts)
{
@@ -86,8 +89,6 @@ void virusaction(const char *filename, const char *virname, const struct optstru
#define VE_FILENAME "CLAM_VIRUSEVENT_FILENAME"
#define VE_VIRUSNAME "CLAM_VIRUSEVENT_VIRUSNAME"
-static pthread_mutex_t virusaction_lock = PTHREAD_MUTEX_INITIALIZER;
-
void virusaction(const char *filename, const char *virname, const struct optstruct *opts)
{
pid_t pid;
@@ -670,3 +671,54 @@ void fds_free(struct fd_data *data)
data->nfds = 0;
fds_unlock(data);
}
+
+struct detstats_s {
+ char virname[128];
+ char fname[128];
+ char md5[33];
+ unsigned int fsize;
+ unsigned int time;
+};
+#define DETSTATS_MAX 50
+static struct detstats_s detstats_data[DETSTATS_MAX];
+static unsigned int detstats_idx = 0, detstats_total = 0;
+
+void detstats_clear(void)
+{
+ pthread_mutex_lock(&detstats_lock);
+ detstats_idx = detstats_total = 0;
+ pthread_mutex_unlock(&detstats_lock);
+}
+
+void detstats_add(const char *virname, const char *fname, unsigned int fsize, const char *md5)
+{
+ pthread_mutex_lock(&detstats_lock);
+
+ strncpy(detstats_data[detstats_idx].virname, virname, sizeof(detstats_data[detstats_idx].virname));
+ detstats_data[detstats_idx].virname[sizeof(detstats_data[detstats_idx].virname) - 1] = 0;
+
+ if((fname = strrchr(fname, *PATHSEP)))
+ fname++;
+ strncpy(detstats_data[detstats_idx].fname, (!fname || !strlen(fname)) ? "NOFNAME" : fname, sizeof(detstats_data[detstats_idx].fname));
+ detstats_data[detstats_idx].fname[sizeof(detstats_data[detstats_idx].fname) - 1] = 0;
+
+ strncpy(detstats_data[detstats_idx].md5, md5, sizeof(detstats_data[detstats_idx].md5));
+ detstats_data[detstats_idx].md5[sizeof(detstats_data[detstats_idx].md5) - 1] = 0;
+
+ detstats_data[detstats_idx].fsize = fsize;
+ detstats_data[detstats_idx++].time = time(NULL);
+ if(detstats_idx == DETSTATS_MAX)
+ detstats_idx = 0;
+ detstats_total++;
+ pthread_mutex_unlock(&detstats_lock);
+}
+
+void detstats_print(int desc, char term)
+{
+ unsigned int i;
+
+ pthread_mutex_lock(&detstats_lock);
+ for(i = 0; i < DETSTATS_MAX && i < detstats_total; i++)
+ mdprintf(desc, "%u:%s:%u:%s:%s%c", detstats_data[i].time, detstats_data[i].md5, detstats_data[i].fsize, detstats_data[i].virname, detstats_data[i].fname, term);
+ pthread_mutex_unlock(&detstats_lock);
+}
diff --git a/clamd/others.h b/clamd/others.h
index af1c4a9..44f98c8 100644
--- a/clamd/others.h
+++ b/clamd/others.h
@@ -82,4 +82,8 @@ void fds_cleanup(struct fd_data *data);
int fds_poll_recv(struct fd_data *data, int timeout, int check_signals, void *event);
void fds_free(struct fd_data *data);
+void detstats_clear(void);
+void detstats_add(const char *virname, const char *fname, unsigned int fsize, const char *md5);
+void detstats_print(int desc, char term);
+
#endif
diff --git a/clamd/scanner.c b/clamd/scanner.c
index a15e320..1d1c5a5 100644
--- a/clamd/scanner.c
+++ b/clamd/scanner.c
@@ -71,6 +71,37 @@ extern int progexit;
extern time_t reloaded_time;
extern pthread_mutex_t reload_mutex;
+void msg_callback(enum cl_msg severity, const char *fullmsg, const char *msg, void *ctx)
+{
+ struct cb_context *c = ctx;
+ const char *filename = (c && c->filename) ? c->filename : "";
+
+ switch (severity) {
+ case CL_MSG_ERROR:
+ logg("^[LibClamAV] %s: %s", filename, msg);
+ break;
+ case CL_MSG_WARN:
+ logg("~[LibClamAV] %s: %s", filename, msg);
+ break;
+ case CL_MSG_INFO_VERBOSE:
+ logg("*[LibClamAV] %s: %s", filename, msg);
+ break;
+ default:
+ logg("$[LibClamAV] %s: %s", filename, msg);
+ break;
+ }
+}
+
+void hash_callback(int fd, unsigned long long size, const unsigned char *md5, const char *virname, void *ctx)
+{
+ struct cb_context *c = ctx;
+ if (!c)
+ return;
+ c->virsize = size;
+ strncpy(c->virhash, md5, 32);
+ c->virhash[32] = '\0';
+}
+
#define BUFFSIZE 1024
int scan_callback(struct stat *sb, char *filename, const char *msg, enum cli_ftw_reason reason, struct cli_ftw_cbdata *data)
{
@@ -78,8 +109,7 @@ int scan_callback(struct stat *sb, char *filename, const char *msg, enum cli_ftw
const char *virname;
int ret;
int type = scandata->type;
- char virhash[33];
- unsigned int virsize;
+ struct cb_context context;
/* detect disconnected socket,
* this should NOT detect half-shutdown sockets (SHUT_WR) */
@@ -196,7 +226,9 @@ int scan_callback(struct stat *sb, char *filename, const char *msg, enum cli_ftw
thrmgr_setactivetask(filename,
type == TYPE_MULTISCAN ? "MULTISCANFILE" : NULL);
- ret = cli_scanfile_stats(filename, &virname, virhash, &virsize, &scandata->scanned, scandata->engine, scandata->options);
+ context.filename = filename;
+ context.virsize = 0;
+ ret = cl_scanfile_callback(filename, &virname, &scandata->scanned, scandata->engine, scandata->options, &context);
thrmgr_setactivetask(NULL, NULL);
if (thrmgr_group_need_terminate(scandata->conn->group)) {
@@ -207,14 +239,14 @@ int scan_callback(struct stat *sb, char *filename, const char *msg, enum cli_ftw
if (ret == CL_VIRUS) {
scandata->infected++;
- if(!optget(scandata->opts, "ExtendedDetectionInfo")->enabled)
- virsize = 0;
- if (conn_reply_virus(scandata->conn, filename, virname, virhash, virsize) == -1) {
+ if (conn_reply_virus(scandata->conn, filename, virname, context.virhash, context.virsize) == -1) {
free(filename);
return CL_ETIMEOUT;
}
- if(virsize)
- logg("~%s: %s(%s:%u) FOUND\n", filename, virname, virhash, virsize);
+ if(context.virsize)
+ detstats_add(virname, filename, context.virsize, context.virhash);
+ if(context.virsize && optget(scandata->opts, "ExtendedDetectionInfo")->enabled)
+ logg("~%s: %s(%s:%llu) FOUND\n", filename, virname, context.virhash, context.virsize);
else
logg("~%s: %s FOUND\n", filename, virname);
virusaction(filename, virname, scandata->opts);
@@ -279,8 +311,8 @@ int scanfd(const int fd, const client_conn_t *conn, unsigned long int *scanned,
int ret;
const char *virname;
struct stat statbuf;
- char fdstr[32], virhash[33];
- unsigned int virsize;
+ struct cb_context context;
+ char fdstr[32];
if (stream)
strncpy(fdstr, "stream", sizeof(fdstr));
@@ -294,7 +326,9 @@ int scanfd(const int fd, const client_conn_t *conn, unsigned long int *scanned,
}
thrmgr_setactivetask(fdstr, NULL);
- ret = cli_scandesc_stats(fd, &virname, virhash, &virsize, scanned, engine, options);
+ context.filename = fdstr;
+ context.virsize = 0;
+ ret = cl_scandesc_callback(fd, &virname, scanned, engine, options, &context);
thrmgr_setactivetask(NULL, NULL);
if (thrmgr_group_need_terminate(conn->group)) {
@@ -303,12 +337,12 @@ int scanfd(const int fd, const client_conn_t *conn, unsigned long int *scanned,
}
if(ret == CL_VIRUS) {
- if(!optget(opts, "ExtendedDetectionInfo")->enabled)
- virsize = 0;
- if (conn_reply_virus(conn, fdstr, virname, virhash, virsize) == -1)
+ if (conn_reply_virus(conn, fdstr, virname, context.virhash, context.virsize) == -1)
ret = CL_ETIMEOUT;
- if(virsize)
- logg("%s: %s(%s:%u) FOUND\n", fdstr, virname, virhash, virsize);
+ if(context.virsize)
+ detstats_add(virname, "NOFNAME", context.virsize, context.virhash);
+ if(context.virsize && optget(opts, "ExtendedDetectionInfo")->enabled)
+ logg("%s: %s(%s:%llu) FOUND\n", fdstr, virname, context.virhash, context.virsize);
else
logg("%s: %s FOUND\n", fdstr, virname);
virusaction(fdstr, virname, opts);
@@ -329,12 +363,13 @@ int scanstream(int odesc, unsigned long int *scanned, const struct cl_engine *en
{
int ret, sockfd, acceptd;
int tmpd, bread, retval, firsttimeout, timeout, btread;
- unsigned int port = 0, portscan, min_port, max_port, virsize;
+ unsigned int port = 0, portscan, min_port, max_port;
unsigned long int quota = 0, maxsize = 0;
short bound = 0;
const char *virname;
char buff[FILEBUFF];
- char peer_addr[32], virhash[33];
+ char peer_addr[32];
+ struct cb_context context;
struct sockaddr_in server;
struct sockaddr_in peer;
socklen_t addrlen;
@@ -462,7 +497,9 @@ int scanstream(int odesc, unsigned long int *scanned, const struct cl_engine *en
if(retval == 1) {
lseek(tmpd, 0, SEEK_SET);
thrmgr_setactivetask(peer_addr, NULL);
- ret = cli_scandesc_stats(tmpd, &virname, virhash, &virsize, scanned, engine, options);
+ context.filename = peer_addr;
+ context.virsize = 0;
+ ret = cl_scandesc_callback(tmpd, &virname, scanned, engine, options, &context);
thrmgr_setactivetask(NULL, NULL);
} else {
ret = -1;
@@ -476,9 +513,11 @@ int scanstream(int odesc, unsigned long int *scanned, const struct cl_engine *en
closesocket(sockfd);
if(ret == CL_VIRUS) {
- if(optget(opts, "ExtendedDetectionInfo")->enabled && virsize) {
- mdprintf(odesc, "stream: %s(%s:%u) FOUND%c", virname, virhash, virsize, term);
- logg("stream(%s@%u): %s(%s:%u) FOUND\n", peer_addr, port, virname, virhash, virsize);
+ if(context.virsize)
+ detstats_add(virname, "NOFNAME", context.virsize, context.virhash);
+ if(context.virsize && optget(opts, "ExtendedDetectionInfo")->enabled) {
+ mdprintf(odesc, "stream: %s(%s:%llu) FOUND%c", virname, context.virhash, context.virsize, term);
+ logg("stream(%s@%u): %s(%s:%llu) FOUND\n", peer_addr, port, virname, context.virhash, context.virsize);
} else {
mdprintf(odesc, "stream: %s FOUND%c", virname, term);
logg("stream(%s@%u): %s FOUND\n", peer_addr, port, virname);
diff --git a/clamd/scanner.h b/clamd/scanner.h
index a56fc5d..f435d14 100644
--- a/clamd/scanner.h
+++ b/clamd/scanner.h
@@ -51,9 +51,17 @@ struct scan_cb_data {
dev_t dev;
};
+struct cb_context {
+ const char *filename;
+ unsigned long long virsize;
+ char virhash[33];
+};
+
int scanfd(const int fd, const client_conn_t *conn, unsigned long int *scanned, const struct cl_engine *engine, unsigned int options, const struct optstruct *opts, int odesc, int stream);
int scanstream(int odesc, unsigned long int *scanned, const struct cl_engine *engine, unsigned int options, const struct optstruct *opts, char term);
int scan_callback(struct stat *sb, char *filename, const char *msg, enum cli_ftw_reason reason, struct cli_ftw_cbdata *data);
int scan_pathchk(const char *path, struct cli_ftw_cbdata *data);
+void hash_callback(int fd, unsigned long long size, const unsigned char *md5, const char *virname, void *ctx);
+void msg_callback(enum cl_msg severity, const char *fullmsg, const char *msg, void *ctx);
#endif
diff --git a/clamd/server-th.c b/clamd/server-th.c
index ab81572..eee1496 100644
--- a/clamd/server-th.c
+++ b/clamd/server-th.c
@@ -846,6 +846,10 @@ int recvloop_th(int *socketds, unsigned nsockets, struct cl_engine *engine, unsi
if(optget(opts, "ScanOLE2")->enabled) {
logg("OLE2 support enabled.\n");
options |= CL_SCAN_OLE2;
+ if(optget(opts, "OLE2BlockMacros")->enabled) {
+ logg("OLE2: Blocking all VBA macros.\n");
+ options |= CL_SCAN_BLOCKMACROS;
+ }
} else {
logg("OLE2 support disabled.\n");
}
diff --git a/clamd/session.c b/clamd/session.c
index 6509314..f12eb22 100644
--- a/clamd/session.c
+++ b/clamd/session.c
@@ -92,10 +92,11 @@ static struct {
{CMD14, sizeof(CMD14)-1, COMMAND_FILDES, 0, 1, FEATURE_FDPASSING},
{CMD15, sizeof(CMD15)-1, COMMAND_STATS, 0, 0, 1},
{CMD16, sizeof(CMD16)-1, COMMAND_IDSESSION, 0, 0, 1},
- {CMD17, sizeof(CMD17)-1, COMMAND_INSTREAM, 0, 0, 1}
+ {CMD17, sizeof(CMD17)-1, COMMAND_INSTREAM, 0, 0, 1},
+ {CMD19, sizeof(CMD19)-1, COMMAND_DETSTATSCLEAR, 0, 1, 1},
+ {CMD20, sizeof(CMD20)-1, COMMAND_DETSTATS, 0, 1, 1}
};
-
enum commands parse_command(const char *cmd, const char **argument, int oldstyle)
{
size_t i;
@@ -571,6 +572,16 @@ int execute_or_dispatch_command(client_conn_t *conn, enum commands cmd, const ch
print_commands(desc, conn->term, engine);
return conn->group ? 0 : 1;
}
+ case COMMAND_DETSTATSCLEAR:
+ {
+ detstats_clear();
+ return 1;
+ }
+ case COMMAND_DETSTATS:
+ {
+ detstats_print(desc, conn->term);
+ return 1;
+ }
case COMMAND_INSTREAM:
{
int rc = cli_gentempfd(optget(conn->opts, "TemporaryDirectory")->strarg, &conn->filename, &conn->scanfd);
diff --git a/clamd/session.h b/clamd/session.h
index b34f516..23fe976 100644
--- a/clamd/session.h
+++ b/clamd/session.h
@@ -40,6 +40,8 @@
#define CMD16 "IDSESSION"
#define CMD17 "INSTREAM"
#define CMD18 "VERSIONCOMMANDS"
+#define CMD19 "DETSTATSCLEAR"
+#define CMD20 "DETSTATS"
#include "libclamav/clamav.h"
#include "shared/optparser.h"
@@ -64,6 +66,8 @@ enum commands {
COMMAND_IDSESSION,
COMMAND_INSTREAM,
COMMAND_COMMANDS,
+ COMMAND_DETSTATSCLEAR,
+ COMMAND_DETSTATS,
/* internal commands */
COMMAND_MULTISCANFILE,
COMMAND_INSTREAMSCAN
diff --git a/clamdscan/Makefile.am b/clamdscan/Makefile.am
index e1602c0..5c514ab 100644
--- a/clamdscan/Makefile.am
+++ b/clamdscan/Makefile.am
@@ -31,6 +31,8 @@ clamdscan_SOURCES = \
$(top_srcdir)/shared/getopt.h \
$(top_srcdir)/shared/actions.c \
$(top_srcdir)/shared/actions.h \
+ $(top_srcdir)/shared/clamdcom.c \
+ $(top_srcdir)/shared/clamdcom.h \
clamdscan.c \
proto.c \
proto.h \
diff --git a/clamdscan/Makefile.in b/clamdscan/Makefile.in
index b5b9f63..a95acd4 100644
--- a/clamdscan/Makefile.in
+++ b/clamdscan/Makefile.in
@@ -81,13 +81,14 @@ am__clamdscan_SOURCES_DIST = $(top_srcdir)/shared/output.c \
$(top_srcdir)/shared/optparser.h $(top_srcdir)/shared/misc.c \
$(top_srcdir)/shared/misc.h $(top_srcdir)/shared/getopt.c \
$(top_srcdir)/shared/getopt.h $(top_srcdir)/shared/actions.c \
- $(top_srcdir)/shared/actions.h clamdscan.c proto.c proto.h \
+ $(top_srcdir)/shared/actions.h $(top_srcdir)/shared/clamdcom.c \
+ $(top_srcdir)/shared/clamdcom.h clamdscan.c proto.c proto.h \
client.c client.h
@BUILD_CLAMD_TRUE at am_clamdscan_OBJECTS = output.$(OBJEXT) \
@BUILD_CLAMD_TRUE@ optparser.$(OBJEXT) misc.$(OBJEXT) \
@BUILD_CLAMD_TRUE@ getopt.$(OBJEXT) actions.$(OBJEXT) \
- at BUILD_CLAMD_TRUE@ clamdscan.$(OBJEXT) proto.$(OBJEXT) \
- at BUILD_CLAMD_TRUE@ client.$(OBJEXT)
+ at BUILD_CLAMD_TRUE@ clamdcom.$(OBJEXT) clamdscan.$(OBJEXT) \
+ at BUILD_CLAMD_TRUE@ proto.$(OBJEXT) client.$(OBJEXT)
clamdscan_OBJECTS = $(am_clamdscan_OBJECTS)
clamdscan_LDADD = $(LDADD)
AM_V_lt = $(am__v_lt_$(V))
@@ -295,6 +296,8 @@ top_srcdir = @top_srcdir@
@BUILD_CLAMD_TRUE@ $(top_srcdir)/shared/getopt.h \
@BUILD_CLAMD_TRUE@ $(top_srcdir)/shared/actions.c \
@BUILD_CLAMD_TRUE@ $(top_srcdir)/shared/actions.h \
+ at BUILD_CLAMD_TRUE@ $(top_srcdir)/shared/clamdcom.c \
+ at BUILD_CLAMD_TRUE@ $(top_srcdir)/shared/clamdcom.h \
@BUILD_CLAMD_TRUE@ clamdscan.c \
@BUILD_CLAMD_TRUE@ proto.c \
@BUILD_CLAMD_TRUE@ proto.h \
@@ -408,6 +411,7 @@ distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/actions.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/clamdcom.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/clamdscan.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/client.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/getopt.Po at am__quote@
@@ -520,6 +524,22 @@ actions.obj: $(top_srcdir)/shared/actions.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o actions.obj `if test -f '$(top_srcdir)/shared/actions.c'; then $(CYGPATH_W) '$(top_srcdir)/shared/actions.c'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/shared/actions.c'; fi`
+clamdcom.o: $(top_srcdir)/shared/clamdcom.c
+ at am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT clamdcom.o -MD -MP -MF $(DEPDIR)/clamdcom.Tpo -c -o clamdcom.o `test -f '$(top_srcdir)/shared/clamdcom.c' || echo '$(srcdir)/'`$(top_srcdir)/shared/clamdcom.c
+ at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/clamdcom.Tpo $(DEPDIR)/clamdcom.Po
+ at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_srcdir)/shared/clamdcom.c' object='clamdcom.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o clamdcom.o `test -f '$(top_srcdir)/shared/clamdcom.c' || echo '$(srcdir)/'`$(top_srcdir)/shared/clamdcom.c
+
+clamdcom.obj: $(top_srcdir)/shared/clamdcom.c
+ at am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT clamdcom.obj -MD -MP -MF $(DEPDIR)/clamdcom.Tpo -c -o clamdcom.obj `if test -f '$(top_srcdir)/shared/clamdcom.c'; then $(CYGPATH_W) '$(top_srcdir)/shared/clamdcom.c'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/shared/clamdcom.c'; fi`
+ at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/clamdcom.Tpo $(DEPDIR)/clamdcom.Po
+ at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_srcdir)/shared/clamdcom.c' object='clamdcom.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o clamdcom.obj `if test -f '$(top_srcdir)/shared/clamdcom.c'; then $(CYGPATH_W) '$(top_srcdir)/shared/clamdcom.c'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/shared/clamdcom.c'; fi`
+
mostlyclean-libtool:
-rm -f *.lo
diff --git a/clamdscan/client.c b/clamdscan/client.c
index b548af3..924a0d6 100644
--- a/clamdscan/client.c
+++ b/clamdscan/client.c
@@ -56,6 +56,8 @@
#include "shared/output.h"
#include "shared/misc.h"
#include "shared/actions.h"
+#include "shared/clamdcom.h"
+
#include "libclamav/str.h"
#include "libclamav/others.h"
diff --git a/clamdscan/proto.c b/clamdscan/proto.c
index 85d979a..e57a33d 100644
--- a/clamdscan/proto.c
+++ b/clamdscan/proto.c
@@ -47,6 +47,7 @@
#include "shared/actions.h"
#include "shared/output.h"
#include "shared/misc.h"
+#include "shared/clamdcom.h"
#include "proto.h"
#include "client.h"
@@ -77,89 +78,6 @@ int dconnect() {
return sockd;
}
-/* Sends bytes over a socket
- * Returns 0 on success */
-int sendln(int sockd, const char *line, unsigned int len) {
- while(len) {
- int sent = send(sockd, line, len, 0);
- if(sent <= 0) {
- if(sent && errno == EINTR) continue;
- logg("!Can't send to clamd: %s\n", strerror(errno));
- return 1;
- }
- line += sent;
- len -= sent;
- }
- return 0;
-}
-
-/* Inits a RECVLN struct before it can be used in recvln() - see below */
-void recvlninit(struct RCVLN *s, int sockd) {
- s->sockd = sockd;
- s->bol = s->cur = s->buf;
- s->r = 0;
-}
-
-/* Receives a full (terminated with \0) line from a socket
- * Sets rbol to the begin of the received line, and optionally
- * reol to the ond of line.
- * Should be called repeatedly untill all input is conumed
- * Returns
- * - the lenght of the line (a positive number) on success
- * - 0 if the connection is closed
- * - -1 on error
- */
-int recvln(struct RCVLN *s, char **rbol, char **reol) {
- char *eol;
-
- while(1) {
- if(!s->r) {
- s->r = recv(s->sockd, s->cur, sizeof(s->buf) - (s->cur - s->buf), 0);
- if(s->r<=0) {
- if(s->r && errno == EINTR) {
- s->r = 0;
- continue;
- }
- if(s->r || s->cur!=s->buf) {
- *s->cur = '\0';
- if(strcmp(s->buf, "UNKNOWN COMMAND\n"))
- logg("!Communication error\n");
- else
- logg("!Command rejected by clamd (wrong clamd version?)\n");
- return -1;
- }
- return 0;
- }
- }
- if((eol = memchr(s->cur, 0, s->r))) {
- int ret = 0;
- eol++;
- s->r -= eol - s->cur;
- *rbol = s->bol;
- if(reol) *reol = eol;
- ret = eol - s->bol;
- if(s->r)
- s->bol = s->cur = eol;
- else
- s->bol = s->cur = s->buf;
- return ret;
- }
- s->r += s->cur - s->bol;
- if(!eol && s->r==sizeof(s->buf)) {
- logg("!Overlong reply from clamd\n");
- return -1;
- }
- if(!eol) {
- if(s->buf != s->bol) { /* old memmove sux */
- memmove(s->buf, s->bol, s->r);
- s->bol = s->buf;
- }
- s->cur = &s->bol[s->r];
- s->r = 0;
- }
- }
-}
-
/* Issues an INSTREAM command to clamd and streams the given file
* Returns >0 on success, 0 soft fail, -1 hard fail */
static int send_stream(int sockd, const char *filename) {
diff --git a/clamdscan/proto.h b/clamdscan/proto.h
index e80f926..94a0454 100644
--- a/clamdscan/proto.h
+++ b/clamdscan/proto.h
@@ -22,18 +22,7 @@
#define PROTO_H
#include "shared/misc.h"
-struct RCVLN {
- char buf[PATH_MAX+1024]; /* FIXME must match that in clamd - bb1349 */
- int sockd;
- int r;
- char *cur;
- char *bol;
-};
-
int dconnect(void);
-int sendln(int sockd, const char *line, unsigned int len);
-void recvlninit(struct RCVLN *s, int sockd);
-int recvln(struct RCVLN *s, char **rbol, char **reol);
int serial_client_scan(char *file, int scantype, int *infected, int *err, int maxlevel, int flags);
int parallel_client_scan(char *file, int scantype, int *infected, int *err, int maxlevel, int flags);
int dsresult(int sockd, int scantype, const char *filename, int *printok, int *errors);
diff --git a/config/config.guess b/config/config.guess
index e3a2116..c2246a4 100755
--- a/config/config.guess
+++ b/config/config.guess
@@ -1,10 +1,10 @@
#! /bin/sh
# Attempt to guess a canonical system name.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
# Free Software Foundation, Inc.
-timestamp='2009-06-10'
+timestamp='2009-12-30'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -27,16 +27,16 @@ timestamp='2009-06-10'
# the same distribution terms that you use for the rest of that program.
-# Originally written by Per Bothner <per at bothner.com>.
-# Please send patches to <config-patches at gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
+# Originally written by Per Bothner. Please send patches (context
+# diff format) to <config-patches at gnu.org> and include a ChangeLog
+# entry.
#
# This script attempts to guess a canonical system name similar to
# config.sub. If it succeeds, it prints the system name on stdout, and
# exits with 0. Otherwise, it exits with 1.
#
-# The plan is that this can be called by configure scripts if you
-# don't specify an explicit build system type.
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
me=`echo "$0" | sed -e 's,.*/,,'`
@@ -56,8 +56,9 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free
+Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -333,6 +334,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
eval $set_cc_for_build
SUN_ARCH="i386"
@@ -807,12 +811,12 @@ EOF
i*:PW*:*)
echo ${UNAME_MACHINE}-pc-pw32
exit ;;
- *:Interix*:[3456]*)
+ *:Interix*:*)
case ${UNAME_MACHINE} in
x86)
echo i586-pc-interix${UNAME_RELEASE}
exit ;;
- EM64T | authenticamd | genuineintel)
+ authenticamd | genuineintel | EM64T)
echo x86_64-unknown-interix${UNAME_RELEASE}
exit ;;
IA64)
@@ -854,6 +858,20 @@ EOF
i*86:Minix:*:*)
echo ${UNAME_MACHINE}-pc-minix
exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ exit ;;
arm*:Linux:*:*)
eval $set_cc_for_build
if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
@@ -876,6 +894,17 @@ EOF
frv:Linux:*:*)
echo frv-unknown-linux-gnu
exit ;;
+ i*86:Linux:*:*)
+ LIBC=gnu
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #ifdef __dietlibc__
+ LIBC=dietlibc
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+ echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+ exit ;;
ia64:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
@@ -901,39 +930,18 @@ EOF
#endif
#endif
EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
;;
or32:Linux:*:*)
echo or32-unknown-linux-gnu
exit ;;
- ppc:Linux:*:*)
- echo powerpc-unknown-linux-gnu
- exit ;;
- ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-gnu
- exit ;;
- alpha:Linux:*:*)
- case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
- EV5) UNAME_MACHINE=alphaev5 ;;
- EV56) UNAME_MACHINE=alphaev56 ;;
- PCA56) UNAME_MACHINE=alphapca56 ;;
- PCA57) UNAME_MACHINE=alphapca56 ;;
- EV6) UNAME_MACHINE=alphaev6 ;;
- EV67) UNAME_MACHINE=alphaev67 ;;
- EV68*) UNAME_MACHINE=alphaev68 ;;
- esac
- objdump --private-headers /bin/sh | grep -q ld.so.1
- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
- exit ;;
padre:Linux:*:*)
echo sparc-unknown-linux-gnu
exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-gnu
+ exit ;;
parisc:Linux:*:* | hppa:Linux:*:*)
# Look for CPU level
case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
@@ -942,8 +950,11 @@ EOF
*) echo hppa-unknown-linux-gnu ;;
esac
exit ;;
- parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-gnu
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-gnu
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-gnu
exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
echo ${UNAME_MACHINE}-ibm-linux
@@ -966,58 +977,6 @@ EOF
xtensa*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
- i*86:Linux:*:*)
- # The BFD linker knows what the default object file format is, so
- # first see if it will tell us. cd to the root directory to prevent
- # problems with other programs or directories called `ld' in the path.
- # Set LC_ALL=C to ensure ld outputs messages in English.
- ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
- | sed -ne '/supported targets:/!d
- s/[ ][ ]*/ /g
- s/.*supported targets: *//
- s/ .*//
- p'`
- case "$ld_supported_targets" in
- elf32-i386)
- TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
- ;;
- esac
- # Determine whether the default compiler is a.out or elf
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <features.h>
- #ifdef __ELF__
- # ifdef __GLIBC__
- # if __GLIBC__ >= 2
- LIBC=gnu
- # else
- LIBC=gnulibc1
- # endif
- # else
- LIBC=gnulibc1
- # endif
- #else
- #if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- LIBC=gnu
- #else
- LIBC=gnuaout
- #endif
- #endif
- #ifdef __dietlibc__
- LIBC=dietlibc
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^LIBC/{
- s: ::g
- p
- }'`"
- test x"${LIBC}" != x && {
- echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
- exit
- }
- test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
- ;;
i*86:DYNIX/ptx:4*:*)
# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
# earlier versions are messed up and put the nodename in both
@@ -1247,6 +1206,16 @@ EOF
*:Darwin:*:*)
UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
case $UNAME_PROCESSOR in
+ i386)
+ eval $set_cc_for_build
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ UNAME_PROCESSOR="x86_64"
+ fi
+ fi ;;
unknown) UNAME_PROCESSOR=powerpc ;;
esac
echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
diff --git a/config/config.sub b/config/config.sub
index eb0389a..c2d1257 100755
--- a/config/config.sub
+++ b/config/config.sub
@@ -1,10 +1,10 @@
#! /bin/sh
# Configuration validation subroutine script.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
# Free Software Foundation, Inc.
-timestamp='2009-06-11'
+timestamp='2010-01-22'
# This file is (in principle) common to ALL GNU software.
# The presence of a machine in this file suggests that SOME GNU software
@@ -32,13 +32,16 @@ timestamp='2009-06-11'
# Please send patches to <config-patches at gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
+# diff and a properly formatted GNU ChangeLog entry.
#
# Configuration subroutine to validate and canonicalize a configuration type.
# Supply the specified configuration type as an argument.
# If it is invalid, we print an error message on stderr and exit with code 1.
# Otherwise, we print the canonical config type on stdout and succeed.
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
# This file is supposed to be the same for all GNU packages
# and recognize all the CPU types, system types and aliases
# that are meaningful with *any* GNU software.
@@ -72,8 +75,9 @@ Report bugs and patches to <config-patches at gnu.org>."
version="\
GNU config.sub ($timestamp)
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free
+Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -149,7 +153,7 @@ case $os in
-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis | -knuth | -cray)
+ -apple | -axis | -knuth | -cray | -microblaze)
os=
basic_machine=$1
;;
@@ -284,6 +288,7 @@ case $basic_machine in
| pdp10 | pdp11 | pj | pjl \
| powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
| pyramid \
+ | rx \
| score \
| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
| sh64 | sh64le \
@@ -291,13 +296,14 @@ case $basic_machine in
| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
| spu | strongarm \
| tahoe | thumb | tic4x | tic80 | tron \
+ | ubicom32 \
| v850 | v850e \
| we32k \
| x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
| z8k | z80)
basic_machine=$basic_machine-unknown
;;
- m6811 | m68hc11 | m6812 | m68hc12)
+ m6811 | m68hc11 | m6812 | m68hc12 | picochip)
# Motorola 68HC11/12.
basic_machine=$basic_machine-unknown
os=-none
@@ -340,7 +346,7 @@ case $basic_machine in
| lm32-* \
| m32c-* | m32r-* | m32rle-* \
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
| mips16-* \
| mips64-* | mips64el-* \
@@ -368,15 +374,17 @@ case $basic_machine in
| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
| pyramid-* \
- | romp-* | rs6000-* \
+ | romp-* | rs6000-* | rx-* \
| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
| sparclite-* \
| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
| tahoe-* | thumb-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tile-* | tilegx-* \
| tron-* \
+ | ubicom32-* \
| v850-* | v850e-* | vax-* \
| we32k-* \
| x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
@@ -726,6 +734,9 @@ case $basic_machine in
basic_machine=ns32k-utek
os=-sysv
;;
+ microblaze)
+ basic_machine=microblaze-xilinx
+ ;;
mingw32)
basic_machine=i386-pc
os=-mingw32
@@ -1076,6 +1087,11 @@ case $basic_machine in
basic_machine=tic6x-unknown
os=-coff
;;
+ # This must be matched before tile*.
+ tilegx*)
+ basic_machine=tilegx-unknown
+ os=-linux-gnu
+ ;;
tile*)
basic_machine=tile-unknown
os=-linux-gnu
@@ -1247,6 +1263,9 @@ case $os in
# First match some system type aliases
# that might get confused with valid system types.
# -solaris* is a basic system type, with this one exception.
+ -auroraux)
+ os=-auroraux
+ ;;
-solaris1 | -solaris1.*)
os=`echo $os | sed -e 's|solaris1|sunos4|'`
;;
@@ -1268,8 +1287,8 @@ case $os in
# -sysv* is not here because it comes later, after sysvr4.
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
| -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
- | -kopensolaris* \
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+ | -sym* | -kopensolaris* \
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
| -aos* | -aros* \
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
@@ -1290,7 +1309,7 @@ case $os in
| -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
| -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
| -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
- | -skyos* | -haiku* | -rdos* | -toppers* | -drops*)
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
# Remember, each alternative MUST END IN *, to match a version number.
;;
-qnx*)
@@ -1423,6 +1442,8 @@ case $os in
-dicos*)
os=-dicos
;;
+ -nacl*)
+ ;;
-none)
;;
*)
diff --git a/configure b/configure
index f6f27aa..78454ac 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.67 for ClamAV 0.96.4.
+# Generated by GNU Autoconf 2.67 for ClamAV 0.96.5.
#
# Report bugs to <http://bugs.clamav.net/>.
#
@@ -703,8 +703,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='ClamAV'
PACKAGE_TARNAME='clamav'
-PACKAGE_VERSION='0.96.4'
-PACKAGE_STRING='ClamAV 0.96.4'
+PACKAGE_VERSION='0.96.5'
+PACKAGE_STRING='ClamAV 0.96.5'
PACKAGE_BUGREPORT='http://bugs.clamav.net/'
PACKAGE_URL='http://www.clamav.net/'
@@ -1541,7 +1541,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures ClamAV 0.96.4 to adapt to many kinds of systems.
+\`configure' configures ClamAV 0.96.5 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1612,7 +1612,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of ClamAV 0.96.4:";;
+ short | recursive ) echo "Configuration of ClamAV 0.96.5:";;
esac
cat <<\_ACEOF
@@ -1769,7 +1769,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-ClamAV configure 0.96.4
+ClamAV configure 0.96.5
generated by GNU Autoconf 2.67
Copyright (C) 2010 Free Software Foundation, Inc.
@@ -2238,7 +2238,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by ClamAV $as_me 0.96.4, which was
+It was created by ClamAV $as_me 0.96.5, which was
generated by GNU Autoconf 2.67. Invocation command line was
$ $0 $@
@@ -3359,7 +3359,7 @@ fi
# Define the identity of the package.
PACKAGE='clamav'
- VERSION='0.96.4'
+ VERSION='0.96.5'
# Some tools Automake needs.
@@ -3488,7 +3488,7 @@ AM_BACKSLASH='\'
$as_echo "#define PACKAGE PACKAGE_NAME" >>confdefs.h
-VERSION="0.96.4"
+VERSION="0.96.5"
cat >>confdefs.h <<_ACEOF
#define VERSION "$VERSION"
@@ -3496,7 +3496,7 @@ _ACEOF
LC_CURRENT=7
-LC_REVISION=6
+LC_REVISION=7
LC_AGE=1
LIBCLAMAV_VERSION="$LC_CURRENT":"$LC_REVISION":"$LC_AGE"
@@ -11191,7 +11191,6 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
LIBADD_DLOPEN=
-lt_save_LIBS="$LIBS"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5
$as_echo_n "checking for library containing dlopen... " >&6; }
if test "${ac_cv_search_dlopen+set}" = set; then :
@@ -11326,6 +11325,7 @@ fi
if test x"$libltdl_cv_func_dlopen" = xyes || test x"$libltdl_cv_lib_dl_dlopen" = xyes
then
+ lt_save_LIBS="$LIBS"
LIBS="$LIBS $LIBADD_DLOPEN"
for ac_func in dlerror
do :
@@ -11338,8 +11338,8 @@ _ACEOF
fi
done
+ LIBS="$lt_save_LIBS"
fi
-LIBS="$lt_save_LIBS"
LIBADD_SHL_LOAD=
@@ -20637,7 +20637,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by ClamAV $as_me 0.96.4, which was
+This file was extended by ClamAV $as_me 0.96.5, which was
generated by GNU Autoconf 2.67. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -20704,7 +20704,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-ClamAV config.status 0.96.4
+ClamAV config.status 0.96.5
configured by $0, generated by GNU Autoconf 2.67,
with options \\"\$ac_cs_config\\"
@@ -23240,7 +23240,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by ClamAV $as_me 0.96.4, which was
+This file was extended by ClamAV $as_me 0.96.5, which was
generated by GNU Autoconf 2.67. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -23307,7 +23307,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-ClamAV config.status 0.96.4
+ClamAV config.status 0.96.5
configured by $0, generated by GNU Autoconf 2.67,
with options \\"\$ac_cs_config\\"
diff --git a/configure.in b/configure.in
index adcbd81..e5a934b 100644
--- a/configure.in
+++ b/configure.in
@@ -20,7 +20,7 @@ dnl MA 02110-1301, USA.
AC_PREREQ([2.59])
dnl For a release change [devel] to the real version [0.xy]
dnl also change VERSION below
-AC_INIT([ClamAV], [0.96.4], [http://bugs.clamav.net/], [clamav], [http://www.clamav.net/])
+AC_INIT([ClamAV], [0.96.5], [http://bugs.clamav.net/], [clamav], [http://www.clamav.net/])
AH_BOTTOM([#include "platform.h"])
dnl put configure auxiliary into config
@@ -42,11 +42,11 @@ dnl the date in the version
AC_DEFINE([PACKAGE], PACKAGE_NAME, [Name of package])
dnl change this on a release
-VERSION="0.96.4"
+VERSION="0.96.5"
AC_DEFINE_UNQUOTED([VERSION],"$VERSION",[Version number of package])
LC_CURRENT=7
-LC_REVISION=6
+LC_REVISION=7
LC_AGE=1
LIBCLAMAV_VERSION="$LC_CURRENT":"$LC_REVISION":"$LC_AGE"
AC_SUBST([LIBCLAMAV_VERSION])
diff --git a/docs/clamdoc.pdf b/docs/clamdoc.pdf
index d3b789b..9817aba 100644
Binary files a/docs/clamdoc.pdf and b/docs/clamdoc.pdf differ
diff --git a/docs/clamdoc.tex b/docs/clamdoc.tex
index 4e98a5c..b2b6133 100644
--- a/docs/clamdoc.tex
+++ b/docs/clamdoc.tex
@@ -71,7 +71,7 @@
\vspace{3cm}
\begin{flushright}
\rule[-1ex]{8cm}{3pt}\\
- \huge Clam AntiVirus 0.96.4\\
+ \huge Clam AntiVirus 0.96.5\\
\huge \emph{User Manual}\\
\end{flushright}
@@ -127,7 +127,7 @@
\item{POSIX compliant, portable}
\item{Fast scanning}
\item{Supports on-access scanning (Linux and FreeBSD only)}
- \item{Detects over 830.000 viruses, worms and trojans, including
+ \item{Detects over 850.000 viruses, worms and trojans, including
Microsoft Office macro viruses, mobile malware, and other threats}
\item{Built-in bytecode interpreter allows the ClamAV signature writers
to create and distribute very complex detection routines and
diff --git a/docs/html/clamdoc.html b/docs/html/clamdoc.html
index b70f4f8..e525949 100644
--- a/docs/html/clamdoc.html
+++ b/docs/html/clamdoc.html
@@ -56,7 +56,7 @@ original version by: Nikos Drakos, CBLU, University of Leeds
<BR>
<BR>
<DIV ALIGN="RIGHT">
-<BR> <BIG CLASS="HUGE">Clam AntiVirus 0.96.4
+<BR> <BIG CLASS="HUGE">Clam AntiVirus 0.96.5
<BR> <BIG CLASS="HUGE"><SPAN CLASS="textit">User Manual</SPAN>
<BR>
</BIG></BIG></DIV>
diff --git a/docs/html/index.html b/docs/html/index.html
index b70f4f8..e525949 100644
--- a/docs/html/index.html
+++ b/docs/html/index.html
@@ -56,7 +56,7 @@ original version by: Nikos Drakos, CBLU, University of Leeds
<BR>
<BR>
<DIV ALIGN="RIGHT">
-<BR> <BIG CLASS="HUGE">Clam AntiVirus 0.96.4
+<BR> <BIG CLASS="HUGE">Clam AntiVirus 0.96.5
<BR> <BIG CLASS="HUGE"><SPAN CLASS="textit">User Manual</SPAN>
<BR>
</BIG></BIG></DIV>
diff --git a/docs/html/node3.html b/docs/html/node3.html
index 15a0cec..311f9f3 100644
--- a/docs/html/node3.html
+++ b/docs/html/node3.html
@@ -67,7 +67,7 @@ Features</A>
</LI>
<LI>Supports on-access scanning (Linux and FreeBSD only)
</LI>
-<LI>Detects over 830.000 viruses, worms and trojans, including
+<LI>Detects over 850.000 viruses, worms and trojans, including
Microsoft Office macro viruses, mobile malware, and other threats
</LI>
<LI>Built-in bytecode interpreter allows the ClamAV signature writers
diff --git a/docs/man/clamd.conf.5.in b/docs/man/clamd.conf.5.in
index b3e454d..ac8c4bb 100644
--- a/docs/man/clamd.conf.5.in
+++ b/docs/man/clamd.conf.5.in
@@ -67,7 +67,7 @@ Enable verbose logging.
Default: no
.TP
\fBExtendedDetectionInfo BOOL\fR
-Provide additional information about the infected file, such as its size and hash, together with the virus name. It's recommended to enable this option along with SubmitDetectionStats in freshclam.conf.
+Log additional information about the infected file, such as its size and hash, together with the virus name.
.br
Default: no
.TP
@@ -299,6 +299,11 @@ This option enables scanning of OLE2 files, such as Microsoft Office documents a
.br
Default: yes
.TP
+\fBOLE2BlockMacros BOOL\fR
+With this option enabled OLE2 files with VBA macros, which were not detected by signatures will be marked as "Heuristics.OLE2.ContainsMacros".
+.br
+Default: no
+.TP
\fBScanPDF BOOL\fR
This option enables scanning within PDF files.
.br
diff --git a/docs/man/freshclam.conf.5.in b/docs/man/freshclam.conf.5.in
index 19becf4..b3f91f8 100644
--- a/docs/man/freshclam.conf.5.in
+++ b/docs/man/freshclam.conf.5.in
@@ -106,6 +106,11 @@ By default freshclam will keep the local databases (.cld) uncompressed to make t
.br
Default: no
.TP
+\fBDatabaseCustomURL STR\fR
+With this option you can provide custom sources (http:// or file://) for database files. This option can be used multiple times.
+.br
+Default: no custom URLs
+.TP
\fBHTTPProxyServer STR\fR, \fBHTTPProxyPort NUMBER\fR
Use given proxy server and TCP port for database downloads.
.TP
@@ -155,7 +160,7 @@ Timeout in seconds when reading from database server.
Default: 30
.TP
\fBSubmitDetectionStats STRING\fR
-When enabled freshclam will submit statistics to the ClamAV Project about the latest virus detections in your environment. The ClamAV maintainers will then use this data to determine what types of malware are the most detected in the field and in what geographic area they are. This feature requires LogTime and LogFile to be enabled in clamd.conf, it's also recommended to turn on ExtendedDetectionInfo. The path for clamd.conf file must be provided.
+When enabled freshclam will submit statistics to the ClamAV Project about the latest virus detections in your environment. The ClamAV maintainers will then use this data to determine what types of malware are the most detected in the field and in what geographic area they are. Freshclam will connect to clamd in order to get the recent statistics. The path for clamd.conf file must be provided.
.br
Default: disabled
.TP
diff --git a/etc/clamd.conf b/etc/clamd.conf
index 101b25f..6cbb05e 100644
--- a/etc/clamd.conf
+++ b/etc/clamd.conf
@@ -11,7 +11,7 @@ Example
# LogFile must be writable for the user running daemon.
# A full path is required.
# Default: disabled
-#LogFile /var/log/clamav/clamd.log
+#LogFile /tmp/clamd.log
# By default the log file is locked for writing - the lock protects against
# running clamd multiple times (if want to run another clamd, please
@@ -51,15 +51,14 @@ Example
# Default: no
#LogVerbose yes
-# Provide additional information about the infected file, such as its
-# size and hash, together with the virus name. It's recommended to enable
-# this option along with SubmitDetectionStats in freshclam.conf.
+# Log additional information about the infected file, such as its
+# size and hash, together with the virus name.
#ExtendedDetectionInfo yes
# This option allows you to save a process identifier of the listening
# daemon (main thread).
# Default: disabled
-#PidFile /var/run/clamav/clamd.pid
+#PidFile /var/run/clamd.pid
# Optional path to the global temporary directory.
# Default: system specific (usually /tmp or /var/tmp).
@@ -78,7 +77,7 @@ Example
# Path to a local socket file the daemon will listen on.
# Default: disabled (must be specified by a user)
-#LocalSocket /var/run/clamav/clamd
+#LocalSocket /tmp/clamd.socket
# Sets the group ownership on the unix socket.
# Default: disabled (the primary group of the user running clamd)
@@ -266,6 +265,12 @@ Example
# Default: yes
#ScanOLE2 yes
+
+# With this option enabled OLE2 files with VBA macros, which were not
+# detected by signatures will be marked as "Heuristics.OLE2.ContainsMacros".
+# Default: no
+#OLE2BlockMacros no
+
# This option enables scanning within PDF files.
# Default: yes
#ScanPDF yes
diff --git a/etc/freshclam.conf b/etc/freshclam.conf
index 18be1a3..bea3b1d 100644
--- a/etc/freshclam.conf
+++ b/etc/freshclam.conf
@@ -14,7 +14,7 @@ Example
# Path to the log file (make sure it has proper permissions)
# Default: disabled
-#UpdateLogFile /var/log/clamav/freshclam.log
+#UpdateLogFile /var/log/freshclam.log
# Maximum size of the log file.
# Value of 0 disables the limit.
@@ -43,7 +43,7 @@ Example
# This option allows you to save the process identifier of the daemon
# Default: disabled
-#PidFile /var/run/clamav/freshclam.pid
+#PidFile /var/run/freshclam.pid
# By default when started freshclam drops privileges and switches to the
# "clamav" user. This directive allows you to change the database owner.
@@ -64,6 +64,7 @@ Example
# Uncomment the following line and replace XY with your country
# code. See http://www.iana.org/cctld/cctld-whois.htm for the full list.
+# You can use db.XY.ipv6.clamav.net for IPv6 connections.
#DatabaseMirror db.XY.clamav.net
# database.clamav.net is a round-robin record which points to our most
@@ -87,6 +88,12 @@ DatabaseMirror database.clamav.net
# Default: no
#CompressLocalDatabase no
+# With this option you can provide custom sources (http:// or file://) for
+# database files. This option can be used multiple times.
+# Default: no custom URLs
+#DatabaseCustomURL http://myserver.com/mysigs.ndb
+#DatabaseCustomURL file:///mnt/nfs/local.hdb
+
# Number of database checks per day.
# Default: 12 (every two hours)
#Checks 24
@@ -152,8 +159,7 @@ DatabaseMirror database.clamav.net
# the latest virus detections in your environment. The ClamAV maintainers
# will then use this data to determine what types of malware are the most
# detected in the field and in what geographic area they are.
-# This feature requires LogTime and LogFile to be enabled in clamd.conf,
-# it's also recommended to turn on ExtendedDetectionInfo.
+# Freshclam will connect to clamd in order to get recent statistics.
# Default: no
#SubmitDetectionStats /path/to/clamd.conf
diff --git a/freshclam/Makefile.am b/freshclam/Makefile.am
index 7b691f4..64fd260 100644
--- a/freshclam/Makefile.am
+++ b/freshclam/Makefile.am
@@ -32,6 +32,8 @@ freshclam_SOURCES = \
$(top_srcdir)/shared/cdiff.h \
$(top_srcdir)/shared/tar.c \
$(top_srcdir)/shared/tar.h \
+ $(top_srcdir)/shared/clamdcom.c \
+ $(top_srcdir)/shared/clamdcom.h \
freshclam.c \
manager.c \
manager.h \
diff --git a/freshclam/Makefile.in b/freshclam/Makefile.in
index 51588ec..f129d83 100644
--- a/freshclam/Makefile.in
+++ b/freshclam/Makefile.in
@@ -79,9 +79,9 @@ am__installdirs = "$(DESTDIR)$(bindir)"
PROGRAMS = $(bin_PROGRAMS)
am_freshclam_OBJECTS = output.$(OBJEXT) optparser.$(OBJEXT) \
getopt.$(OBJEXT) misc.$(OBJEXT) cdiff.$(OBJEXT) tar.$(OBJEXT) \
- freshclam.$(OBJEXT) manager.$(OBJEXT) notify.$(OBJEXT) \
- dns.$(OBJEXT) execute.$(OBJEXT) nonblock.$(OBJEXT) \
- mirman.$(OBJEXT)
+ clamdcom.$(OBJEXT) freshclam.$(OBJEXT) manager.$(OBJEXT) \
+ notify.$(OBJEXT) dns.$(OBJEXT) execute.$(OBJEXT) \
+ nonblock.$(OBJEXT) mirman.$(OBJEXT)
freshclam_OBJECTS = $(am_freshclam_OBJECTS)
freshclam_LDADD = $(LDADD)
AM_V_lt = $(am__v_lt_$(V))
@@ -291,6 +291,8 @@ freshclam_SOURCES = \
$(top_srcdir)/shared/cdiff.h \
$(top_srcdir)/shared/tar.c \
$(top_srcdir)/shared/tar.h \
+ $(top_srcdir)/shared/clamdcom.c \
+ $(top_srcdir)/shared/clamdcom.h \
freshclam.c \
manager.c \
manager.h \
@@ -412,6 +414,7 @@ distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/cdiff.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/clamdcom.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/dns.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/execute.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/freshclam.Po at am__quote@
@@ -545,6 +548,22 @@ tar.obj: $(top_srcdir)/shared/tar.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tar.obj `if test -f '$(top_srcdir)/shared/tar.c'; then $(CYGPATH_W) '$(top_srcdir)/shared/tar.c'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/shared/tar.c'; fi`
+clamdcom.o: $(top_srcdir)/shared/clamdcom.c
+ at am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT clamdcom.o -MD -MP -MF $(DEPDIR)/clamdcom.Tpo -c -o clamdcom.o `test -f '$(top_srcdir)/shared/clamdcom.c' || echo '$(srcdir)/'`$(top_srcdir)/shared/clamdcom.c
+ at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/clamdcom.Tpo $(DEPDIR)/clamdcom.Po
+ at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_srcdir)/shared/clamdcom.c' object='clamdcom.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o clamdcom.o `test -f '$(top_srcdir)/shared/clamdcom.c' || echo '$(srcdir)/'`$(top_srcdir)/shared/clamdcom.c
+
+clamdcom.obj: $(top_srcdir)/shared/clamdcom.c
+ at am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT clamdcom.obj -MD -MP -MF $(DEPDIR)/clamdcom.Tpo -c -o clamdcom.obj `if test -f '$(top_srcdir)/shared/clamdcom.c'; then $(CYGPATH_W) '$(top_srcdir)/shared/clamdcom.c'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/shared/clamdcom.c'; fi`
+ at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/clamdcom.Tpo $(DEPDIR)/clamdcom.Po
+ at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_srcdir)/shared/clamdcom.c' object='clamdcom.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o clamdcom.obj `if test -f '$(top_srcdir)/shared/clamdcom.c'; then $(CYGPATH_W) '$(top_srcdir)/shared/clamdcom.c'; else $(CYGPATH_W) '$(srcdir)/$(top_srcdir)/shared/clamdcom.c'; fi`
+
mostlyclean-libtool:
-rm -f *.lo
diff --git a/freshclam/freshclam.c b/freshclam/freshclam.c
index 7136d0a..4aae243 100644
--- a/freshclam/freshclam.c
+++ b/freshclam/freshclam.c
@@ -64,17 +64,25 @@ extern int active_children;
static short foreground = 1;
char updtmpdir[512];
+int sigchld_wait = 1;
static void sighandler(int sig) {
switch(sig) {
#ifdef SIGCHLD
case SIGCHLD:
- waitpid(-1, NULL, WNOHANG);
+ if (sigchld_wait)
+ waitpid(-1, NULL, WNOHANG);
active_children--;
break;
#endif
+#ifdef SIGPIPE
+ case SIGPIPE:
+ /* no action, app will get EPIPE */
+ break;
+#endif
+
#ifdef SIGALRM
case SIGALRM:
terminate = -1;
@@ -198,6 +206,20 @@ static int download(const struct optstruct *opts, const char *datadir, const cha
return ret;
}
+void msg_callback(enum cl_msg severity, const char *fullmsg, const char *msg, void *ctx)
+{
+ switch (severity) {
+ case CL_MSG_ERROR:
+ logg("^[LibClamAV] %s", msg);
+ break;
+ case CL_MSG_WARN:
+ logg("~[LibClamAV] %s", msg);
+ default:
+ logg("*[LibClamAV] %s", msg);
+ break;
+ }
+}
+
int main(int argc, char **argv)
{
int ret = 52, retcl;
@@ -361,6 +383,7 @@ int main(int argc, char **argv)
}
#endif
+ cl_set_clcb_msg(msg_callback);
/* change the current working directory */
if(chdir(optget(opts, "DatabaseDirectory")->strarg)) {
logg("!Can't change dir to %s\n", optget(opts, "DatabaseDirectory")->strarg);
@@ -396,6 +419,7 @@ int main(int argc, char **argv)
memset(&sigact, 0, sizeof(struct sigaction));
sigact.sa_handler = sighandler;
sigaction(SIGINT, &sigact, NULL);
+ sigaction(SIGPIPE, &sigact, NULL);
#endif
if(optget(opts, "daemon")->enabled) {
int bigsleep, checks;
diff --git a/freshclam/manager.c b/freshclam/manager.c
index eb17e9d..f9a2f12 100644
--- a/freshclam/manager.c
+++ b/freshclam/manager.c
@@ -53,6 +53,9 @@
#include <sys/types.h>
#include <time.h>
#include <fcntl.h>
+#ifndef _WIN32
+#include <sys/wait.h>
+#endif
#include <sys/stat.h>
#include <dirent.h>
#include <errno.h>
@@ -72,6 +75,7 @@
#include "shared/misc.h"
#include "shared/cdiff.h"
#include "shared/tar.h"
+#include "shared/clamdcom.h"
#include "libclamav/clamav.h"
#include "libclamav/others.h"
@@ -298,7 +302,7 @@ static int wwwconnect(const char *server, const char *proxy, int pport, char *ip
loadbal_rp = rp;
strncpy(loadbal_ipaddr, ipaddr, sizeof(loadbal_ipaddr));
} else {
- if(md->succ < minsucc && md->fail <= minfail) {
+ if(md->succ <= minsucc && md->fail <= minfail) {
minsucc = md->succ;
minfail = md->fail;
loadbal_rp = rp;
@@ -320,7 +324,12 @@ static int wwwconnect(const char *server, const char *proxy, int pport, char *ip
}
rp = loadbal_rp;
strncpy(ipaddr, loadbal_ipaddr, sizeof(ipaddr));
-
+#ifdef SUPPORT_IPv6
+ if(rp->ai_family == AF_INET6)
+ addr = &((struct sockaddr_in6 *) rp->ai_addr)->sin6_addr;
+ else
+#endif
+ addr = &((struct sockaddr_in *) rp->ai_addr)->sin_addr;
} else if(loadbal_rp == rp) {
i++;
continue;
@@ -348,7 +357,7 @@ static int wwwconnect(const char *server, const char *proxy, int pport, char *ip
if(loadbal) {
loadbal = 0;
i = 0;
- }
+ } else i++;
continue;
} else {
if(mdat) {
@@ -426,6 +435,7 @@ static int wwwconnect(const char *server, const char *proxy, int pport, char *ip
return -2;
}
+/*
static const char *readblineraw(int fd, char *buf, int bufsize, int filesize, int *bread)
{
char *pt;
@@ -486,6 +496,7 @@ static const char *readbline(int fd, char *buf, int bufsize, int filesize, int *
return line;
}
+*/
static unsigned int fmt_base64(char *dest, const char *src, unsigned int len)
{
@@ -556,25 +567,18 @@ static char *proxyauth(const char *user, const char *pass)
return auth;
}
-/*
- * TODO:
- * - strptime() is most likely not portable enough
- */
int submitstats(const char *clamdcfg, const struct optstruct *opts)
{
- int fd, sd, bread, lread = 0, cnt, ret;
+ int sd, clamsockd, bread, cnt, ret;
char post[SUBMIT_MIN_ENTRIES * 256 + 512];
char query[SUBMIT_MIN_ENTRIES * 256];
- char buff[512], statsdat[512], newstatsdat[512], uastr[128];
- char logfile[256], fbuff[FILEBUFF];
- char *pt, *pt2, *auth = NULL;
- const char *line, *country = NULL, *user, *proxy = NULL, *hostid = NULL;
- struct optstruct *clamdopt;
+ char uastr[128], *line;
+ char *pt, *auth = NULL;
+ const char *country = NULL, *user, *proxy = NULL, *hostid = NULL;
const struct optstruct *opt;
- struct stat sb;
- struct tm tms;
- time_t epoch;
unsigned int qcnt, entries, submitted = 0, permfail = 0, port = 0;
+ struct RCVLN rcv;
+ const char *tokens[5];
if((opt = optget(opts, "DetectionStatsCountry"))->enabled) {
@@ -593,67 +597,6 @@ int submitstats(const char *clamdcfg, const struct optstruct *opts)
hostid = opt->strarg;
}
- if(!(clamdopt = optparse(clamdcfg, 0, NULL, 1, OPT_CLAMD, 0, NULL))) {
- logg("!SubmitDetectionStats: Can't open or parse configuration file %s\n", clamdcfg);
- return 56;
- }
-
- if(!(opt = optget(clamdopt, "LogFile"))->enabled) {
- logg("!SubmitDetectionStats: LogFile needs to be enabled in %s\n", clamdcfg);
- logg("SubmitDetectionStats: Please consider enabling ExtendedDetectionInfo\n");
- optfree(clamdopt);
- return 56;
- }
- strncpy(logfile, opt->strarg, sizeof(logfile));
- logfile[sizeof(logfile) - 1] = 0;
-
- if(!optget(clamdopt, "LogTime")->enabled) {
- logg("!SubmitDetectionStats: LogTime needs to be enabled in %s\n", clamdcfg);
- optfree(clamdopt);
- return 56;
- }
- optfree(clamdopt);
-
- if((fd = open("stats.dat", O_RDONLY|O_BINARY)) != -1) {
- if((bread = read(fd, statsdat, sizeof(statsdat) - 1)) == -1) {
- logg("^SubmitDetectionStats: Can't read stats.dat\n");
- bread = 0;
- }
- statsdat[bread] = 0;
- close(fd);
- } else {
- *statsdat = 0;
- }
-
- if((fd = open(logfile, O_RDONLY|O_BINARY)) == -1) {
- logg("!SubmitDetectionStats: Can't open %s for reading\n", logfile);
- return 56;
- }
-
- if(fstat(fd, &sb) == -1) {
- logg("!SubmitDetectionStats: fstat() failed\n");
- close(fd);
- return 56;
- }
-
- while((line = readbline(fd, fbuff, FILEBUFF, sb.st_size, &lread)))
- if(strlen(line) >= 32 && !strcmp(&line[strlen(line) - 6], " FOUND"))
- break;
-
- if(!line) {
- logg("SubmitDetectionStats: No detection records found\n");
- close(fd);
- return 1;
- }
-
- if(*statsdat && !strcmp(line, statsdat)) {
- logg("SubmitDetectionStats: No new detection records found\n");
- close(fd);
- return 1;
- } else {
- strncpy(newstatsdat, line, sizeof(newstatsdat));
- }
-
if((opt = optget(opts, "HTTPUserAgent"))->enabled)
strncpy(uastr, opt->strarg, sizeof(uastr));
else
@@ -669,14 +612,11 @@ int submitstats(const char *clamdcfg, const struct optstruct *opts)
user = opt->strarg;
if(!(opt = optget(opts, "HTTPProxyPassword"))->enabled) {
logg("!SubmitDetectionStats: HTTPProxyUsername requires HTTPProxyPassword\n");
- close(fd);
return 56;
}
auth = proxyauth(user, opt->strarg);
- if(!auth) {
- close(fd);
+ if(!auth)
return 56;
- }
}
if((opt = optget(opts, "HTTPProxyPort"))->enabled)
@@ -685,50 +625,27 @@ int submitstats(const char *clamdcfg, const struct optstruct *opts)
logg("*Connecting via %s\n", proxy);
}
+ if((clamsockd = clamd_connect(clamdcfg, "SubmitDetectionStats")) < 0)
+ return 52;
+
+ recvlninit(&rcv, clamsockd);
+ if(sendln(clamsockd, "zDETSTATS", 10)) {
+ closesocket(clamsockd);
+ return 52;
+ }
+
ret = 0;
memset(query, 0, sizeof(query));
qcnt = 0;
entries = 0;
- do {
- if(strlen(line) < 32 || strcmp(&line[strlen(line) - 6], " FOUND"))
- continue;
-
- if(*statsdat && !strcmp(line, statsdat))
- break;
-
- strncpy(buff, line, sizeof(buff));
- buff[sizeof(buff) - 1] = 0;
- if(!(pt = strstr(buff, " -> "))) {
- logg("*SubmitDetectionStats: Skipping detection entry logged without time\b");
- continue;
- }
- *pt = 0;
- pt += 4;
-
- tms.tm_isdst = -1;
- if(!strptime(buff, "%a %b %d %H:%M:%S %Y", &tms) || (epoch = mktime(&tms)) == -1) {
- logg("!SubmitDetectionStats: Failed to convert date string\n");
- ret = 1;
- break;
- }
-
- pt2 = &pt[strlen(pt) - 6];
- *pt2 = 0;
-
- if(!(pt2 = strrchr(pt, ' ')) || pt2[-1] != ':') {
- logg("!SubmitDetectionStats: Incorrect format of the log file (1)\n");
- ret = 1;
+ while(recvln(&rcv, &line, NULL) > 0) {
+ if(cli_strtokenize(line, ':', 5, tokens) != 5) {
+ logg("!SubmitDetectionStats: Invalid data format\n");
+ ret = 52;
break;
}
- pt2[-1] = 0;
- pt2++;
- if((pt = strrchr(pt, *PATHSEP)))
- *pt++ = 0;
- if(!pt)
- pt = (char*) "NOFNAME";
-
- qcnt += snprintf(&query[qcnt], sizeof(query) - qcnt, "ts[]=%u&fname[]=%s&virus[]=%s&", (unsigned int) epoch, pt, pt2);
+ qcnt += snprintf(&query[qcnt], sizeof(query) - qcnt, "ts[]=%s&fname[]=%s&virus[]=%s(%s:%s)&", tokens[0], tokens[4], tokens[3], tokens[1], tokens[2]);
entries++;
if(entries == SUBMIT_MIN_ENTRIES) {
@@ -738,7 +655,6 @@ int submitstats(const char *clamdcfg, const struct optstruct *opts)
ret = 52;
break;
}
-
query[sizeof(query) - 1] = 0;
if(mdprintf(sd,
"POST http://stats.clamav.net/submit.php HTTP/1.0\r\n"
@@ -809,30 +725,23 @@ int submitstats(const char *clamdcfg, const struct optstruct *opts)
break;
}
-
- } while((line = readbline(fd, fbuff, FILEBUFF, sb.st_size, &lread)));
-
- close(fd);
+ }
+ closesocket(clamsockd);
if(auth)
free(auth);
- if(submitted || permfail) {
- if((fd = open("stats.dat", O_WRONLY | O_BINARY | O_CREAT | O_TRUNC, 0600)) == -1) {
- logg("^SubmitDetectionStats: Can't open stats.dat for writing\n");
- } else {
- if((bread = write(fd, newstatsdat, sizeof(newstatsdat))) != sizeof(newstatsdat))
- logg("^SubmitDetectionStats: Can't write to stats.dat\n");
- close(fd);
- }
- }
-
if(ret == 0) {
- if(!submitted)
+ if(!submitted) {
logg("SubmitDetectionStats: Not enough recent data for submission\n");
- else
+ } else {
logg("SubmitDetectionStats: Submitted %u records\n", submitted);
+ if((clamsockd = clamd_connect(clamdcfg, "SubmitDetectionStats")) != -1) {
+ sendln(clamsockd, "DETSTATSCLEAR", 14);
+ recv(clamsockd, query, sizeof(query), 0);
+ closesocket(clamsockd);
+ }
+ }
}
-
return ret;
}
@@ -1016,14 +925,14 @@ static struct cl_cvd *remote_cvdhead(const char *cvdfile, const char *localfile,
return cvd;
}
-static int getfile(const char *srcfile, const char *destfile, const char *hostname, char *ip, const char *localip, const char *proxy, int port, const char *user, const char *pass, const char *uas, int ctimeout, int rtimeout, struct mirdat *mdat, int logerr, unsigned int can_whitelist)
+static int getfile_mirman(const char *srcfile, const char *destfile, const char *hostname, char *ip, const char *localip, const char *proxy, int port, const char *user, const char *pass, const char *uas, int ctimeout, int rtimeout, struct mirdat *mdat, int logerr, unsigned int can_whitelist, const char *ims, const char *ipaddr, int sd)
{
char cmd[512], uastr[128], buffer[FILEBUFF], *ch;
int bread, fd, totalsize = 0, rot = 0, totaldownloaded = 0,
- percentage = 0, sd;
+ percentage = 0;
unsigned int i;
- char *remotename = NULL, *authorization = NULL, *headerline, ipaddr[46];
- const char *rotation = "|/-\\";
+ char *remotename = NULL, *authorization = NULL, *headerline;
+ const char *rotation = "|/-\\", *fname;
if(proxy) {
@@ -1043,6 +952,9 @@ static int getfile(const char *srcfile, const char *destfile, const char *hostna
}
}
+ if(ims)
+ logg("*If-Modified-Since: %s\n", ims);
+
if(uas)
strncpy(uastr, uas, sizeof(uastr));
else
@@ -1057,7 +969,8 @@ static int getfile(const char *srcfile, const char *destfile, const char *hostna
"Cache-Control: no-cache\r\n"
#endif
"Connection: close\r\n"
- "\r\n", (remotename != NULL) ? remotename : "", srcfile, hostname, (authorization != NULL) ? authorization : "", uastr);
+ "%s%s%s"
+ "\r\n", (remotename != NULL) ? remotename : "", srcfile, hostname, (authorization != NULL) ? authorization : "", uastr, ims ? "If-Modified-Since: " : "", ims ? ims : "", ims ? "\r\n": "");
if(remotename)
free(remotename);
@@ -1065,24 +978,13 @@ static int getfile(const char *srcfile, const char *destfile, const char *hostna
if(authorization)
free(authorization);
- memset(ipaddr, 0, sizeof(ipaddr));
- if(ip[0]) /* use ip to connect */
- sd = wwwconnect(ip, proxy, port, ipaddr, localip, ctimeout, mdat, logerr, can_whitelist);
- else
- sd = wwwconnect(hostname, proxy, port, ipaddr, localip, ctimeout, mdat, logerr, can_whitelist);
-
- if(sd < 0) {
- return 52;
- } else {
- logg("*Trying to download http://%s/%s (IP: %s)\n", hostname, srcfile, ipaddr);
- }
+ logg("*Trying to download http://%s/%s (IP: %s)\n", hostname, srcfile, ipaddr);
- if(!ip[0])
+ if(ip && !ip[0])
strcpy(ip, ipaddr);
if(send(sd, cmd, strlen(cmd), 0) < 0) {
logg("%cgetfile: Can't write to socket\n", logerr ? '!' : '^');
- closesocket(sd);
return 52;
}
@@ -1097,8 +999,8 @@ static int getfile(const char *srcfile, const char *destfile, const char *hostna
if((i >= sizeof(buffer) - 1) || recv(sd, buffer + i, 1, 0) == -1) {
#endif
logg("%cgetfile: Error while reading database from %s (IP: %s): %s\n", logerr ? '!' : '^', hostname, ipaddr, strerror(errno));
- mirman_update(mdat->currip, mdat->af, mdat, 1);
- closesocket(sd);
+ if(mdat)
+ mirman_update(mdat->currip, mdat->af, mdat, 1);
return 52;
}
@@ -1115,16 +1017,23 @@ static int getfile(const char *srcfile, const char *destfile, const char *hostna
/* check whether the resource actually existed or not */
if((strstr(buffer, "HTTP/1.1 404")) != NULL || (strstr(buffer, "HTTP/1.0 404")) != NULL) {
logg("^getfile: %s not found on remote server (IP: %s)\n", srcfile, ipaddr);
- mirman_update(mdat->currip, mdat->af, mdat, 2);
- closesocket(sd);
+ if(mdat)
+ mirman_update(mdat->currip, mdat->af, mdat, 2);
return 58;
}
+ /* If-Modified-Since */
+ if(strstr(buffer, "HTTP/1.1 304") || strstr(buffer, "HTTP/1.0 304")) {
+ if(mdat)
+ mirman_update(mdat->currip, mdat->af, mdat, 0);
+ return 1;
+ }
+
if(!strstr(buffer, "HTTP/1.1 200") && !strstr(buffer, "HTTP/1.0 200") &&
!strstr(buffer, "HTTP/1.1 206") && !strstr(buffer, "HTTP/1.0 206")) {
logg("%cgetfile: Unknown response from remote server (IP: %s)\n", logerr ? '!' : '^', ipaddr);
- mirman_update(mdat->currip, mdat->af, mdat, 1);
- closesocket(sd);
+ if(mdat)
+ mirman_update(mdat->currip, mdat->af, mdat, 1);
return 58;
}
@@ -1150,10 +1059,14 @@ static int getfile(const char *srcfile, const char *destfile, const char *hostna
logg("!getfile: Can't create new file %s in the current directory\n", destfile);
logg("Hint: The database directory must be writable for UID %d or GID %d\n", getuid(), getgid());
- closesocket(sd);
return 57;
}
+ if((fname = strrchr(srcfile, '/')))
+ fname++;
+ else
+ fname = srcfile;
+
#ifdef SO_ERROR
while((bread = wait_recv(sd, buffer, FILEBUFF, 0, rtimeout)) > 0) {
#else
@@ -1161,9 +1074,8 @@ static int getfile(const char *srcfile, const char *destfile, const char *hostna
#endif
if(write(fd, buffer, bread) != bread) {
logg("getfile: Can't write %d bytes to %s\n", bread, destfile);
- unlink(destfile);
close(fd);
- closesocket(sd);
+ unlink(destfile);
return 57; /* FIXME */
}
@@ -1173,21 +1085,21 @@ static int getfile(const char *srcfile, const char *destfile, const char *hostna
if(!mprintf_quiet) {
if(totalsize > 0) {
- mprintf("Downloading %s [%3i%%]\r", srcfile, percentage);
+ mprintf("Downloading %s [%3i%%]\r", fname, percentage);
} else {
- mprintf("Downloading %s [%c]\r", srcfile, rotation[rot]);
+ mprintf("Downloading %s [%c]\r", fname, rotation[rot]);
rot++;
rot %= 4;
}
fflush(stdout);
}
}
- closesocket(sd);
close(fd);
if(bread == -1) {
logg("%cgetfile: Download interrupted: %s (IP: %s)\n", logerr ? '!' : '^', strerror(errno), ipaddr);
- mirman_update(mdat->currip, mdat->af, mdat, 2);
+ if(mdat)
+ mirman_update(mdat->currip, mdat->af, mdat, 2);
return 52;
}
@@ -1195,22 +1107,54 @@ static int getfile(const char *srcfile, const char *destfile, const char *hostna
return 53;
if(totalsize > 0)
- logg("Downloading %s [%i%%]\n", srcfile, percentage);
+ logg("Downloading %s [100%%]\n", fname);
else
- logg("Downloading %s [*]\n", srcfile);
+ logg("Downloading %s [*]\n", fname);
- mirman_update(mdat->currip, mdat->af, mdat, 0);
+ if(mdat)
+ mirman_update(mdat->currip, mdat->af, mdat, 0);
return 0;
}
-static int getcvd(const char *cvdfile, const char *newfile, const char *hostname, char *ip, const char *localip, const char *proxy, int port, const char *user, const char *pass, const char *uas, unsigned int newver, int ctimeout, int rtimeout, struct mirdat *mdat, int logerr, unsigned int can_whitelist)
+static int getfile(const char *srcfile, const char *destfile, const char *hostname, char *ip, const char *localip, const char *proxy, int port, const char *user, const char *pass, const char *uas, int ctimeout, int rtimeout, struct mirdat *mdat, int logerr, unsigned int can_whitelist, const char *ims, const struct optstruct *opts)
+{
+ int ret, sd;
+ char ipaddr[46];
+
+ memset(ipaddr, 0, sizeof(ipaddr));
+ if(ip && ip[0]) /* use ip to connect */
+ sd = wwwconnect(ip, proxy, port, ipaddr, localip, ctimeout, mdat, logerr, can_whitelist);
+ else
+ sd = wwwconnect(hostname, proxy, port, ipaddr, localip, ctimeout, mdat, logerr, can_whitelist);
+
+ if(sd < 0)
+ return 52;
+
+ if(mdat) {
+ mirman_update_sf(mdat->currip, mdat->af, mdat, 0, 1);
+ mirman_write("mirrors.dat", optget(opts, "DatabaseDirectory")->strarg, mdat);
+ }
+
+ ret = getfile_mirman(srcfile, destfile, hostname, ip, localip, proxy, port, user, pass, uas, ctimeout, rtimeout, mdat, logerr, can_whitelist, ims, ipaddr, sd);
+ closesocket(sd);
+
+ if(mdat) {
+ mirman_update_sf(mdat->currip, mdat->af, mdat, 0, -1);
+ mirman_write("mirrors.dat", optget(opts, "DatabaseDirectory")->strarg, mdat);
+ }
+
+ return ret;
+}
+
+static int getcvd(const char *cvdfile, const char *newfile, const char *hostname, char *ip, const char *localip, const char *proxy, int port, const char *user, const char *pass, const char *uas, unsigned int newver, int ctimeout, int rtimeout, struct mirdat *mdat, int logerr, unsigned int can_whitelist, const struct optstruct *opts)
{
struct cl_cvd *cvd;
int ret;
logg("*Retrieving http://%s/%s\n", hostname, cvdfile);
- if((ret = getfile(cvdfile, newfile, hostname, ip, localip, proxy, port, user, pass, uas, ctimeout, rtimeout, mdat, logerr, can_whitelist))) {
+
+ if((ret = getfile(cvdfile, newfile, hostname, ip, localip, proxy, port, user, pass, uas, ctimeout, rtimeout, mdat, logerr, can_whitelist, NULL, opts))) {
logg("%cCan't download %s from %s\n", logerr ? '!' : '^', cvdfile, hostname);
unlink(newfile);
return ret;
@@ -1275,7 +1219,7 @@ static int chdir_tmp(const char *dbname, const char *tmpdir)
return 0;
}
-static int getpatch(const char *dbname, const char *tmpdir, int version, const char *hostname, char *ip, const char *localip, const char *proxy, int port, const char *user, const char *pass, const char *uas, int ctimeout, int rtimeout, struct mirdat *mdat, int logerr, unsigned int can_whitelist)
+static int getpatch(const char *dbname, const char *tmpdir, int version, const char *hostname, char *ip, const char *localip, const char *proxy, int port, const char *user, const char *pass, const char *uas, int ctimeout, int rtimeout, struct mirdat *mdat, int logerr, unsigned int can_whitelist, const struct optstruct *opts)
{
char *tempname, patch[32], olddir[512];
int ret, fd;
@@ -1293,7 +1237,7 @@ static int getpatch(const char *dbname, const char *tmpdir, int version, const c
snprintf(patch, sizeof(patch), "%s-%d.cdiff", dbname, version);
logg("*Retrieving http://%s/%s\n", hostname, patch);
- if((ret = getfile(patch, tempname, hostname, ip, localip, proxy, port, user, pass, uas, ctimeout, rtimeout, mdat, logerr, can_whitelist))) {
+ if((ret = getfile(patch, tempname, hostname, ip, localip, proxy, port, user, pass, uas, ctimeout, rtimeout, mdat, logerr, can_whitelist, NULL, opts))) {
if(ret == 53)
logg("Empty script %s, need to download entire database\n", patch);
else
@@ -1402,16 +1346,16 @@ static int buildcld(const char *tmpdir, const char *dbname, const char *newfile,
if(write(fd, buff, 512) != 512) {
logg("!buildcld: Can't write to %s\n", newfile);
CHDIR_ERR(cwd);
- unlink(newfile);
close(fd);
+ unlink(newfile);
return -1;
}
if((dir = opendir(".")) == NULL) {
logg("!buildcld: Can't open directory %s\n", tmpdir);
CHDIR_ERR(cwd);
- unlink(newfile);
close(fd);
+ unlink(newfile);
return -1;
}
@@ -1420,8 +1364,8 @@ static int buildcld(const char *tmpdir, const char *dbname, const char *newfile,
if(!(gzs = gzopen(newfile, "ab9f"))) {
logg("!buildcld: gzopen() failed for %s\n", newfile);
CHDIR_ERR(cwd);
- unlink(newfile);
closedir(dir);
+ unlink(newfile);
return -1;
}
}
@@ -1449,8 +1393,8 @@ static int buildcld(const char *tmpdir, const char *dbname, const char *newfile,
gzclose(gzs);
else
close(fd);
- unlink(newfile);
closedir(dir);
+ unlink(newfile);
return -1;
}
@@ -1467,8 +1411,8 @@ static int buildcld(const char *tmpdir, const char *dbname, const char *newfile,
gzclose(gzs);
else
close(fd);
- unlink(newfile);
closedir(dir);
+ unlink(newfile);
return -1;
}
}
@@ -1497,11 +1441,128 @@ static int buildcld(const char *tmpdir, const char *dbname, const char *newfile,
return 0;
}
+static int test_database(const char *newfile, const char *newdb, int bytecode)
+{
+ struct cl_engine *engine;
+ unsigned newsigs = 0;
+ int ret;
+
+ logg("*Loading signatures from %s\n", newdb);
+ if(!(engine = cl_engine_new())) {
+ return 55;
+ }
+
+ if((ret = cl_load(newfile, engine, &newsigs, CL_DB_PHISHING | CL_DB_PHISHING_URLS | CL_DB_BYTECODE | CL_DB_PUA)) != CL_SUCCESS) {
+ logg("!Failed to load new database: %s\n", cl_strerror(ret));
+ cl_engine_free(engine);
+ return 55;
+ }
+ if(bytecode && (ret = cli_bytecode_prepare2(engine, &engine->bcs, engine->dconf->bytecode/*FIXME: dconf has no sense here*/))) {
+ logg("!Failed to compile/load bytecode: %s\n", cl_strerror(ret));
+ cl_engine_free(engine);
+ return 55;
+ }
+ logg("*Properly loaded %u signatures from new %s\n", newsigs, newdb);
+ if(engine->domainlist_matcher && engine->domainlist_matcher->sha256_pfx_set.keys)
+ cli_hashset_destroy(&engine->domainlist_matcher->sha256_pfx_set);
+ cl_engine_free(engine);
+ return 0;
+}
+
+#ifndef WIN32
+static int test_database_wrap(const char *file, const char *newdb, int bytecode)
+{
+ char firstline[256];
+ char lastline[256];
+ int pipefd[2];
+ pid_t pid;
+ int status = 0;
+ FILE *f;
+
+ if (pipe(pipefd) == -1) {
+ logg("^pipe() failed: %s\n", strerror(errno));
+ return test_database(file, newdb, bytecode);
+ }
+
+ switch ( pid = fork() ) {
+ case 0:
+ close(pipefd[0]);
+ dup2(pipefd[1], 2);
+ exit(test_database(file, newdb, bytecode));
+ case -1:
+ close(pipefd[0]);
+ close(pipefd[1]);
+ logg("^fork() failed: %s\n", strerror(errno));
+ return test_database(file, newdb, bytecode);
+ default:
+ /* read first / last line printed by child*/
+ close(pipefd[1]);
+ f = fdopen(pipefd[0], "r");
+ firstline[0] = 0;
+ lastline[0] = 0;
+ do {
+ if (!fgets(firstline, sizeof(firstline), f))
+ break;
+ /* ignore warning messages, otherwise the outdated warning will
+ * make us miss the important part of the error message */
+ } while (!strncmp(firstline, "LibClamAV Warning:", 18));
+ /* must read entire output, child doesn't like EPIPE */
+ while (fgets(lastline, sizeof(firstline), f)) {
+ /* print the full output only when LogVerbose or -v is given */
+ logg("*%s", lastline);
+ }
+ fclose(f);
+
+ if (waitpid(pid, &status, 0) == -1 && errno != ECHILD)
+ logg("^waitpid() failed: %s\n", strerror(errno));
+ cli_chomp(firstline);
+ cli_chomp(lastline);
+ if (firstline[0]) {
+ logg("!During database load : %s%s%s\n",
+ firstline, lastline[0] ? " [...] " : "",
+ lastline);
+ }
+ if (WIFEXITED(status)) {
+ int ret = WEXITSTATUS(status);
+ if (ret) {
+ logg("^Database load exited with status %d\n", ret);
+ return ret;
+ }
+ if (firstline[0])
+ logg("^Database successfully loaded, but there is stderr output\n");
+ return 0;
+ }
+ if (WIFSIGNALED(status)) {
+ logg("!Database load killed by signal %d\n", WTERMSIG(status));
+ return 55;
+ }
+ logg("^Unknown status from wait: %d\n", status);
+ return 55;
+ }
+}
+#else
+static int test_database_wrap(const char *file, const char *newdb, int bytecode)
+{
+ int ret = 55;
+ __try
+ {
+ ret = test_database(file, newdb, bytecode);
+ }
+ __except (logg("!Exception during database testing, code %08x\n",
+ GetExceptionCode()),
+ EXCEPTION_CONTINUE_SEARCH)
+ { }
+ return ret;
+}
+#endif
+
+extern int sigchld_wait;
+
static int updatedb(const char *dbname, const char *hostname, char *ip, int *signo, const struct optstruct *opts, const char *dnsreply, char *localip, int outdated, struct mirdat *mdat, int logerr, int extra)
{
struct cl_cvd *current, *remote;
const struct optstruct *opt;
- unsigned int nodb = 0, currver = 0, newver = 0, port = 0, i, j, newsigs = 0;
+ unsigned int nodb = 0, currver = 0, newver = 0, port = 0, i, j;
int ret, ims = -1;
char *pt, cvdfile[32], localname[32], *tmpdir = NULL, *newfile, *newfile2, newdb[32];
char extradbinfo[64], *extradnsreply = NULL;
@@ -1509,7 +1570,6 @@ static int updatedb(const char *dbname, const char *hostname, char *ip, int *sig
unsigned int flevel = cl_retflevel(), remote_flevel = 0, maxattempts;
unsigned int can_whitelist = 0;
int ctimeout, rtimeout;
- struct cl_engine *engine;
snprintf(cvdfile, sizeof(cvdfile), "%s.cvd", dbname);
@@ -1689,7 +1749,7 @@ static int updatedb(const char *dbname, const char *hostname, char *ip, int *sig
newfile = cli_gentemp(updtmpdir);
if(nodb) {
- ret = getcvd(cvdfile, newfile, hostname, ip, localip, proxy, port, user, pass, uas, newver, ctimeout, rtimeout, mdat, logerr, can_whitelist);
+ ret = getcvd(cvdfile, newfile, hostname, ip, localip, proxy, port, user, pass, uas, newver, ctimeout, rtimeout, mdat, logerr, can_whitelist, opts);
if(ret) {
memset(ip, 0, 16);
free(newfile);
@@ -1707,7 +1767,7 @@ static int updatedb(const char *dbname, const char *hostname, char *ip, int *sig
int llogerr = logerr;
if(logerr)
llogerr = (j == maxattempts - 1);
- ret = getpatch(dbname, tmpdir, i, hostname, ip, localip, proxy, port, user, pass, uas, ctimeout, rtimeout, mdat, llogerr, can_whitelist);
+ ret = getpatch(dbname, tmpdir, i, hostname, ip, localip, proxy, port, user, pass, uas, ctimeout, rtimeout, mdat, llogerr, can_whitelist, opts);
if(ret == 52 || ret == 58) {
memset(ip, 0, 16);
continue;
@@ -1725,7 +1785,7 @@ static int updatedb(const char *dbname, const char *hostname, char *ip, int *sig
if(ret != 53)
logg("^Incremental update failed, trying to download %s\n", cvdfile);
mirman_whitelist(mdat, 2);
- ret = getcvd(cvdfile, newfile, hostname, ip, localip, proxy, port, user, pass, uas, newver, ctimeout, rtimeout, mdat, logerr, can_whitelist);
+ ret = getcvd(cvdfile, newfile, hostname, ip, localip, proxy, port, user, pass, uas, newver, ctimeout, rtimeout, mdat, logerr, can_whitelist, opts);
if(ret) {
free(newfile);
return ret;
@@ -1753,16 +1813,11 @@ static int updatedb(const char *dbname, const char *hostname, char *ip, int *sig
}
if(optget(opts, "TestDatabases")->enabled && strlen(newfile) > 4) {
- if(!(engine = cl_engine_new())) {
- unlink(newfile);
- free(newfile);
- return 55;
- }
newfile2 = strdup(newfile);
if(!newfile2) {
+ logg("!Can't allocate memory for filename!\n");
unlink(newfile);
free(newfile);
- cl_engine_free(engine);
return 55;
}
newfile2[strlen(newfile2) - 4] = '.';
@@ -1774,29 +1829,18 @@ static int updatedb(const char *dbname, const char *hostname, char *ip, int *sig
unlink(newfile);
free(newfile);
free(newfile2);
- cl_engine_free(engine);
return 57;
}
free(newfile);
newfile = newfile2;
- if((ret = cl_load(newfile, engine, &newsigs, CL_DB_PHISHING | CL_DB_PHISHING_URLS | CL_DB_BYTECODE | CL_DB_PUA)) != CL_SUCCESS) {
+ sigchld_wait = 0;/* we need to wait() for the child ourselves */
+ if (test_database_wrap(newfile, newdb, optget(opts, "Bytecode")->enabled)) {
logg("!Failed to load new database: %s\n", cl_strerror(ret));
unlink(newfile);
free(newfile);
- cl_engine_free(engine);
return 55;
}
- if(optget(opts, "Bytecode")->enabled && (ret = cli_bytecode_prepare2(engine, &engine->bcs, engine->dconf->bytecode/*FIXME: dconf has no sense here*/))) {
- logg("!Failed to compile/load bytecode: %s\n", cl_strerror(ret));
- unlink(newfile);
- free(newfile);
- cl_engine_free(engine);
- return 55;
- }
- logg("*Properly loaded %u signatures from new %s\n", newsigs, newdb);
- if(engine->domainlist_matcher && engine->domainlist_matcher->sha256_pfx_set.keys)
- cli_hashset_destroy(&engine->domainlist_matcher->sha256_pfx_set);
- cl_engine_free(engine);
+ sigchld_wait = 1;
}
#ifdef _WIN32
@@ -1842,6 +1886,166 @@ static int updatedb(const char *dbname, const char *hostname, char *ip, int *sig
return 0;
}
+static int updatecustomdb(const char *url, int *signo, const struct optstruct *opts, char *localip, int logerr)
+{
+ const struct optstruct *opt;
+ unsigned int port = 0, sigs = 0;
+ int ret;
+ char *pt, *host, urlcpy[256], *newfile = NULL, mtime[36], *newfile2;
+ const char *proxy = NULL, *user = NULL, *pass = NULL, *uas = NULL, *rpath, *dbname;
+ int ctimeout, rtimeout;
+ struct stat sb;
+ struct cl_cvd *cvd;
+
+ if(!strncasecmp(url, "http://", 7)) {
+ strncpy(urlcpy, url, sizeof(urlcpy));
+ host = &urlcpy[7];
+ if(!(pt = strchr(host, '/'))) {
+ logg("!DatabaseCustomURL: Incorrect URL\n");
+ return 70;
+ }
+ *pt = 0;
+ rpath = &url[pt - urlcpy + 1];
+ dbname = strrchr(url, '/') + 1;
+ if(!dbname || strlen(dbname) < 4) {
+ logg("DatabaseCustomURL: Incorrect URL\n");
+ return 70;
+ }
+
+ /* Initialize proxy settings */
+ if((opt = optget(opts, "HTTPProxyServer"))->enabled) {
+ proxy = opt->strarg;
+ if(strncasecmp(proxy, "http://", 7) == 0)
+ proxy += 7;
+
+ if((opt = optget(opts, "HTTPProxyUsername"))->enabled) {
+ user = opt->strarg;
+ if((opt = optget(opts, "HTTPProxyPassword"))->enabled) {
+ pass = opt->strarg;
+ } else {
+ logg("HTTPProxyUsername requires HTTPProxyPassword\n");
+ return 56;
+ }
+ }
+ if((opt = optget(opts, "HTTPProxyPort"))->enabled)
+ port = opt->numarg;
+ logg("Connecting via %s\n", proxy);
+ }
+
+ if((opt = optget(opts, "HTTPUserAgent"))->enabled)
+ uas = opt->strarg;
+
+ ctimeout = optget(opts, "ConnectTimeout")->numarg;
+ rtimeout = optget(opts, "ReceiveTimeout")->numarg;
+
+ *mtime = 0;
+ if(stat(dbname, &sb) != -1)
+ Rfc2822DateTime(mtime, sb.st_mtime);
+
+ newfile = cli_gentemp(updtmpdir);
+ ret = getfile(rpath, newfile, host, NULL, localip, proxy, port, user, pass, uas, ctimeout, rtimeout, NULL, logerr, 0, *mtime ? mtime : NULL, opts);
+ if(ret == 1) {
+ logg("%s is up to date (version: custom database)\n", dbname);
+ unlink(newfile);
+ free(newfile);
+ return 1;
+ } else if(ret > 1) {
+ logg("%cCan't download %s from %s\n", logerr ? '!' : '^', dbname, host);
+ unlink(newfile);
+ free(newfile);
+ return ret;
+ }
+
+ } else if(!strncasecmp(url, "file://", 7)) {
+ rpath = &url[7];
+#ifdef _WIN32
+ dbname = strrchr(rpath, '\\');
+#else
+ dbname = strrchr(rpath, '/');
+#endif
+ if(!dbname || strlen(dbname++) < 5) {
+ logg("DatabaseCustomURL: Incorrect URL\n");
+ return 70;
+ }
+
+ newfile = cli_gentemp(updtmpdir);
+ if(!newfile)
+ return 70;
+
+ /* FIXME: preserve file permissions, calculate % */
+ logg("Downloading %s [ 0%%]\r", dbname);
+ if(cli_filecopy(rpath, newfile) == -1) {
+ logg("DatabaseCustomURL: Can't copy file %s into database directory\n", rpath);
+ free(newfile);
+ return 70;
+ }
+ logg("Downloading %s [100%%]\n", dbname);
+ } else {
+ logg("!DatabaseCustomURL: Not supported protocol\n");
+ return 70;
+ }
+
+ if(optget(opts, "TestDatabases")->enabled && strlen(newfile) > 4) {
+ newfile2 = malloc(strlen(newfile) + strlen(dbname) + 1);
+ if(!newfile2) {
+ unlink(newfile);
+ free(newfile);
+ return 55;
+ }
+ sprintf(newfile2, "%s%s", newfile, dbname);
+ newfile2[strlen(newfile) + strlen(dbname)] = 0;
+ if(rename(newfile, newfile2) == -1) {
+ logg("!Can't rename %s to %s: %s\n", newfile, newfile2, strerror(errno));
+ unlink(newfile);
+ free(newfile);
+ free(newfile2);
+ return 57;
+ }
+ free(newfile);
+ newfile = newfile2;
+ sigchld_wait = 0;/* we need to wait() for the child ourselves */
+ if (test_database_wrap(newfile, dbname, optget(opts, "Bytecode")->enabled)) {
+ logg("!Failed to load new database: %s\n", cl_strerror(ret));
+ unlink(newfile);
+ free(newfile);
+ return 55;
+ }
+ sigchld_wait = 1;
+ }
+
+#ifdef _WIN32
+ if(!access(dbname, R_OK) && unlink(dbname)) {
+ logg("!Can't unlink %s. Please fix the problem manually and try again.\n", dbname);
+ unlink(newfile);
+ free(newfile);
+ return 53;
+ }
+#endif
+
+ if(rename(newfile, dbname) == -1) {
+ logg("!Can't rename %s to %s: %s\n", newfile, dbname, strerror(errno));
+ unlink(newfile);
+ free(newfile);
+ return 57;
+ }
+ free(newfile);
+
+ if(cli_strbcasestr(dbname, ".cld") || cli_strbcasestr(dbname, ".cvd")) {
+ if((cvd = cl_cvdhead(dbname))) {
+ sigs = cvd->sigs;
+ cl_cvdfree(cvd);
+ }
+ } else if(cli_strbcasestr(dbname, ".cbc")) {
+ sigs = 1;
+ } else {
+ sigs = countlines(dbname);
+ }
+
+ logg("%s updated (version: custom database, sigs: %u)\n", dbname, sigs);
+ *signo += sigs;
+ return 0;
+}
+
int downloadmanager(const struct optstruct *opts, const char *hostname, const char *dbdir, int logerr)
{
time_t currtime;
@@ -1952,7 +2156,8 @@ int downloadmanager(const struct optstruct *opts, const char *hostname, const ch
if(newver)
free(newver);
- mirman_write("mirrors.dat", &mdat);
+ mirman_write("mirrors.dat", dbdir, &mdat);
+ mirman_free(&mdat);
cli_rmdirs(updtmpdir);
return ret;
@@ -1967,7 +2172,8 @@ int downloadmanager(const struct optstruct *opts, const char *hostname, const ch
if(newver)
free(newver);
- mirman_write("mirrors.dat", &mdat);
+ mirman_write("mirrors.dat", dbdir, &mdat);
+ mirman_free(&mdat);
cli_rmdirs(updtmpdir);
return ret;
@@ -1996,7 +2202,8 @@ int downloadmanager(const struct optstruct *opts, const char *hostname, const ch
if(newver)
free(newver);
- mirman_write("mirrors.dat", &mdat);
+ mirman_write("mirrors.dat", dbdir, &mdat);
+ mirman_free(&mdat);
cli_rmdirs(updtmpdir);
return ret;
} else if(ret == 0)
@@ -2023,7 +2230,8 @@ int downloadmanager(const struct optstruct *opts, const char *hostname, const ch
if(newver)
free(newver);
- mirman_write("mirrors.dat", &mdat);
+ mirman_write("mirrors.dat", dbdir, &mdat);
+ mirman_free(&mdat);
cli_rmdirs(updtmpdir);
return ret;
} else if(ret == 0)
@@ -2037,7 +2245,8 @@ int downloadmanager(const struct optstruct *opts, const char *hostname, const ch
if((ret = updatedb(opt->strarg, hostname, ipaddr, &signo, opts, NULL, localip, outdated, &mdat, logerr, 1)) > 50) {
if(newver)
free(newver);
- mirman_write("mirrors.dat", &mdat);
+ mirman_write("mirrors.dat", dbdir, &mdat);
+ mirman_free(&mdat);
cli_rmdirs(updtmpdir);
return ret;
} else if(ret == 0)
@@ -2046,11 +2255,22 @@ int downloadmanager(const struct optstruct *opts, const char *hostname, const ch
}
}
- mirman_write("mirrors.dat", &mdat);
+ mirman_write("mirrors.dat", dbdir, &mdat);
+ mirman_free(&mdat);
+
+ /* custom dbs */
+ if((opt = optget(opts, "DatabaseCustomURL"))->enabled) {
+ while(opt) {
+ if(updatecustomdb(opt->strarg, &signo, opts, localip, logerr) == 0)
+ updated = 1;
+ opt = opt->nextarg;
+ }
+ }
+
cli_rmdirs(updtmpdir);
if(updated) {
- if(optget(opts, "HTTPProxyServer")->enabled) {
+ if(optget(opts, "HTTPProxyServer")->enabled || !ipaddr[0]) {
logg("Database updated (%d signatures) from %s\n", signo, hostname);
} else {
logg("Database updated (%d signatures) from %s (IP: %s)\n", signo, hostname, ipaddr);
diff --git a/freshclam/mirman.c b/freshclam/mirman.c
index 7326468..3885fea 100644
--- a/freshclam/mirman.c
+++ b/freshclam/mirman.c
@@ -152,7 +152,7 @@ int mirman_check(uint32_t *ip, int af, struct mirdat *mdat, struct mirdat_ip **m
return 0;
}
-int mirman_update(uint32_t *ip, int af, struct mirdat *mdat, uint8_t broken)
+static int mirman_update_int(uint32_t *ip, int af, struct mirdat *mdat, uint8_t broken, int succ, int fail)
{
unsigned int i, found = 0;
@@ -169,24 +169,32 @@ int mirman_update(uint32_t *ip, int af, struct mirdat *mdat, uint8_t broken)
if(found) {
mdat->mirtab[i].atime = 0; /* will be updated in mirman_write() */
- if(broken)
- mdat->mirtab[i].fail++;
- else
- mdat->mirtab[i].succ++;
-
- if(broken == 2) {
- mdat->mirtab[i].ignore = 2;
+ if(succ || fail) {
+ mdat->mirtab[i].fail += fail;
+ if(mdat->mirtab[i].fail < 0)
+ mdat->mirtab[i].fail = 0;
+ mdat->mirtab[i].succ += succ;
+ if(mdat->mirtab[i].succ < 0)
+ mdat->mirtab[i].succ = 0;
} else {
- /*
- * If the total number of failures is less than 3 then never
- * mark a permanent failure, in other case use the real status.
- */
- if(mdat->mirtab[i].fail < 3)
- mdat->mirtab[i].ignore = 0;
+ if(broken)
+ mdat->mirtab[i].fail++;
else
- mdat->mirtab[i].ignore = broken;
+ mdat->mirtab[i].succ++;
+
+ if(broken == 2) {
+ mdat->mirtab[i].ignore = 2;
+ } else {
+ /*
+ * If the total number of failures is less than 3 then never
+ * mark a permanent failure, in other case use the real status.
+ */
+ if(mdat->mirtab[i].fail < 3)
+ mdat->mirtab[i].ignore = 0;
+ else
+ mdat->mirtab[i].ignore = broken;
+ }
}
-
} else {
mdat->mirtab = (struct mirdat_ip *) realloc(mdat->mirtab, (mdat->num + 1) * sizeof(struct mirdat_ip));
if(!mdat->mirtab) {
@@ -200,20 +208,32 @@ int mirman_update(uint32_t *ip, int af, struct mirdat *mdat, uint8_t broken)
memcpy(mdat->mirtab[mdat->num].ip6, ip, 4 * sizeof(uint32_t));
}
mdat->mirtab[mdat->num].atime = 0;
- mdat->mirtab[mdat->num].succ = 0;
- mdat->mirtab[mdat->num].fail = 0;
+ mdat->mirtab[mdat->num].succ = (succ > 0) ? succ : 0;
+ mdat->mirtab[mdat->num].fail = (fail > 0) ? fail : 0;
mdat->mirtab[mdat->num].ignore = (broken == 2) ? 2 : 0;
memset(&mdat->mirtab[mdat->num].res, 0xff, sizeof(mdat->mirtab[mdat->num].res));
- if(broken)
- mdat->mirtab[mdat->num].fail++;
- else
- mdat->mirtab[mdat->num].succ++;
+ if(!succ && !fail) {
+ if(broken)
+ mdat->mirtab[mdat->num].fail++;
+ else
+ mdat->mirtab[mdat->num].succ++;
+ }
mdat->num++;
}
return 0;
}
+int mirman_update(uint32_t *ip, int af, struct mirdat *mdat, uint8_t broken)
+{
+ return mirman_update_int(ip, af, mdat, broken, 0, 0);
+}
+
+int mirman_update_sf(uint32_t *ip, int af, struct mirdat *mdat, int succ, int fail)
+{
+ return mirman_update_int(ip, af, mdat, 0, succ, fail);
+}
+
void mirman_list(const struct mirdat *mdat)
{
unsigned int i;
@@ -252,18 +272,20 @@ void mirman_whitelist(struct mirdat *mdat, unsigned int mode)
mdat->mirtab[i].ignore = 0;
}
-int mirman_write(const char *file, struct mirdat *mdat)
+int mirman_write(const char *file, const char *dir, struct mirdat *mdat)
{
int fd;
unsigned int i;
+ char path[512];
+ snprintf(path, sizeof(path), "%s/%s", dir, file);
+ path[sizeof(path) - 1] = 0;
if(!mdat->num)
return 0;
- if((fd = open(file, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0600)) == -1) {
- logg("!Can't open %s for writing\n", file);
- mirman_free(mdat);
+ if((fd = open(path, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0600)) == -1) {
+ logg("!Can't open %s for writing\n", path);
return -1;
}
@@ -272,13 +294,11 @@ int mirman_write(const char *file, struct mirdat *mdat)
mdat->mirtab[i].atime = (uint32_t) time(NULL);
if(write(fd, mdat->mirtab, mdat->num * sizeof(struct mirdat_ip)) == -1) {
- logg("!Can't write to %s\n", file);
- mirman_free(mdat);
+ logg("!Can't write to %s\n", path);
close(fd);
return -1;
}
- mirman_free(mdat);
close(fd);
return 0;
}
diff --git a/freshclam/mirman.h b/freshclam/mirman.h
index e65b97c..512bfef 100644
--- a/freshclam/mirman.h
+++ b/freshclam/mirman.h
@@ -43,9 +43,10 @@ struct mirdat {
int mirman_read(const char *file, struct mirdat *mdat, uint8_t active);
int mirman_check(uint32_t *ip, int af, struct mirdat *mdat, struct mirdat_ip **md);
int mirman_update(uint32_t *ip, int af, struct mirdat *mdat, uint8_t broken);
+int mirman_update_sf(uint32_t *ip, int af, struct mirdat *mdat, int succ, int fail);
void mirman_list(const struct mirdat *mdat);
void mirman_whitelist(struct mirdat *mdat, unsigned int mode);
-int mirman_write(const char *file, struct mirdat *mdat);
+int mirman_write(const char *file, const char *dir, struct mirdat *mdat);
void mirman_free(struct mirdat *mdat);
#endif
diff --git a/freshclam/notify.c b/freshclam/notify.c
index b6f1a35..e5921af 100644
--- a/freshclam/notify.c
+++ b/freshclam/notify.c
@@ -39,11 +39,12 @@
#include "shared/optparser.h"
#include "shared/output.h"
+#include "shared/clamdcom.h"
+
#include "notify.h"
-int notify(const char *cfgfile)
+int clamd_connect(const char *cfgfile, const char *option)
{
- char buff[20];
#ifndef _WIN32
struct sockaddr_un server;
#endif
@@ -63,8 +64,8 @@ int notify(const char *cfgfile)
if((opts = optparse(cfgfile, 0, NULL, 1, OPT_CLAMD, 0, NULL)) == NULL) {
- logg("^Clamd was NOT notified: Can't find or parse configuration file %s\n", cfgfile);
- return 1;
+ logg("!%s: Can't find or parse configuration file %s\n", option, cfgfile);
+ return -11;
}
#ifndef _WIN32
@@ -78,7 +79,7 @@ int notify(const char *cfgfile)
logg("^Clamd was NOT notified: Can't create socket endpoint for %s\n", opt->strarg);
perror("socket()");
optfree(opts);
- return 1;
+ return -1;
}
if(connect(sockd, (struct sockaddr *) &server, sizeof(struct sockaddr_un)) < 0) {
@@ -86,7 +87,7 @@ int notify(const char *cfgfile)
logg("^Clamd was NOT notified: Can't connect to clamd through %s\n", opt->strarg);
perror("connect()");
optfree(opts);
- return 1;
+ return -11;
}
} else
@@ -96,11 +97,12 @@ int notify(const char *cfgfile)
#ifdef HAVE_GETADDRINFO
memset(&hints, 0, sizeof(hints));
+/*
#ifdef SUPPORT_IPv6
hints.ai_family = AF_UNSPEC;
#else
+*/
hints.ai_family = AF_INET;
-#endif
hints.ai_socktype = SOCK_STREAM;
snprintf(port, sizeof(port), "%u", (unsigned int) opt->numarg);
port[5] = 0;
@@ -113,36 +115,36 @@ int notify(const char *cfgfile)
ret = getaddrinfo(addr, port, &hints, &res);
if(ret) {
- logg("^Clamd was NOT notified: Can't resolve hostname %s (%s)\n", addr ? addr : "", (ret == EAI_SYSTEM) ? strerror(errno) : gai_strerror(ret));
+ logg("!%s: Can't resolve hostname %s (%s)\n", option, addr ? addr : "", (ret == EAI_SYSTEM) ? strerror(errno) : gai_strerror(ret));
optfree(opts);
- return 1;
+ return -1;
}
if((sockd = socket(res->ai_family, SOCK_STREAM, 0)) < 0) {
perror("socket()");
- logg("^Clamd was NOT notified: Can't create TCP socket\n");
+ logg("!%s: Can't create TCP socket\n", option);
optfree(opts);
freeaddrinfo(res);
- return 1;
+ return -1;
}
if(connect(sockd, res->ai_addr, res->ai_addrlen) == -1) {
perror("connect()");
closesocket(sockd);
- logg("^Clamd was NOT notified: Can't connect to clamd on %s:%s\n", addr ? addr : "localhost", port);
+ logg("!%s: Can't connect to clamd on %s:%s\n", option, addr ? addr : "localhost", port);
optfree(opts);
freeaddrinfo(res);
- return 1;
+ return -1;
}
freeaddrinfo(res);
#else /* IPv4 */
if((sockd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
- logg("^Clamd was NOT notified: Can't create TCP socket\n");
+ logg("!%s: Can't create TCP socket\n", option);
perror("socket()");
optfree(opts);
- return 1;
+ return -1;
}
server2.sin_family = AF_INET;
@@ -154,7 +156,7 @@ int notify(const char *cfgfile)
logg("^Clamd was NOT notified: Can't resolve hostname '%s'\n", opt->strarg);
optfree(opts);
closesocket(sockd);
- return 1;
+ return -1;
}
server2.sin_addr = *(struct in_addr *) he->h_addr_list[0];
} else
@@ -167,39 +169,48 @@ int notify(const char *cfgfile)
inet_ntoa(server2.sin_addr), ntohs(server2.sin_port));
perror("connect()");
optfree(opts);
- return 1;
+ return -1;
}
#endif
} else {
- logg("^Clamd was NOT notified: No socket specified in %s\n", cfgfile);
+ logg("!%s: No communication socket specified in %s\n", option, cfgfile);
optfree(opts);
return 1;
}
- if(send(sockd, "RELOAD", 6, 0) < 0) {
- logg("^Clamd was NOT notified: Could not write to %s socket\n", socktype);
- perror("write()");
+ optfree(opts);
+ return sockd;
+}
+
+int notify(const char *cfgfile)
+{
+ char buff[20];
+ int sockd, bread;
+ const char *socktype;
+
+ if((sockd = clamd_connect(cfgfile, "NotifyClamd")) < 0)
+ return 1;
+
+ if(sendln(sockd, "RELOAD", 7) < 0) {
+ logg("!NotifyClamd: Could not write to clamd socket\n");
+ perror("send()");
closesocket(sockd);
- optfree(opts);
return 1;
}
- /* TODO: Handle timeout */
memset(buff, 0, sizeof(buff));
- if((bread = recv(sockd, buff, sizeof(buff), 0)) > 0)
+ if((bread = recv(sockd, buff, sizeof(buff), 0)) > 0) {
if(!strstr(buff, "RELOADING")) {
- logg("^Clamd was NOT notified: Unknown answer from clamd: '%s'\n", buff);
+ logg("!NotifyClamd: Unknown answer from clamd: '%s'\n", buff);
closesocket(sockd);
- optfree(opts);
return 1;
}
+ }
closesocket(sockd);
logg("Clamd successfully notified about the update.\n");
- optfree(opts);
return 0;
}
-
#endif
diff --git a/freshclam/notify.h b/freshclam/notify.h
index 10c5926..21314b6 100644
--- a/freshclam/notify.h
+++ b/freshclam/notify.h
@@ -20,5 +20,6 @@
#define __NOTIFY_H
int notify(const char *cfgfile);
+int clamd_connect(const char *cfgfile, const char *option);
#endif
diff --git a/libclamav/7z.c b/libclamav/7z.c
index 7136fdb..3bc5096 100644
--- a/libclamav/7z.c
+++ b/libclamav/7z.c
@@ -69,6 +69,7 @@ int cli_7unz (int fd, cli_ctx *ctx) {
if(SzArEx_Open(&db, &lookStream.s, &allocImp, &allocTempImp) != SZ_OK) {
SzArEx_Free(&db, &allocImp);
cli_dbgmsg("cli_7unz: possibly damaged archive\n");
+ fclose(archiveStream.file.file);
return CL_CLEAN;
}
for (i = 0; i < db.db.NumFiles; i++) {
diff --git a/libclamav/builtin_bytecodes.h b/libclamav/builtin_bytecodes.h
index 3b21bbb..c7bb6f5 100644
--- a/libclamav/builtin_bytecodes.h
+++ b/libclamav/builtin_bytecodes.h
@@ -29,64 +29,65 @@
* fallback.
* Usually bytecode.cvd will contain this bytecode */
-static const char* builtin_bc_startup = "ClamBCafhiifamlld|afefdfggifnf```aa```|biacflfafmfbfcfmb`cnbacacmbacgchcmbgfbcbfgcfffcffec``bgcaap`clamcoincidencejb:4096\n"
+static const char* builtin_bc_startup = "ClamBCafhhbfkjmld|afefdfggifnf```aa```|biacflfafmfbfcfmb`cnbacacmbachcccmbgfbfcc`ccchcbfdf``bgcaap`clamcoincidencejb:4096\n"
"\n"
-"Teddaaahdabahdacahdadahdaeahdafahdagahebfgebidebegebdgebgdebkdebcgebbgebageb`gebofebnfebmfebedeblfebkfebjfebadcbgab`bb`bb`bb`bb`bb`bb`bbifbifbifbifbifbifbifahahahahahahahahahebneebifaaaaaaaab`baabb`bb`baacb`bbadb`baacb`bboeb`baacb`bb`bb`baadb`bbadb`bb`baadb`bbadbadb`bdbadahdbkaahdbbcahdbibahdb`eahdbddahdbodahdbdaahdaiahdakahdamahdahahdbgcahdbnbah\n"
+"Teddaaahdabahdacahdadahdaeahdafahdagahebfgebidebegebdgebgdebkdebcgebbgebageb`gebofebnfebmfebedeblfebkfebjfebadcbgab`bb`bb`bb`bb`bb`bb`bbifbifbifbifbifbifbifahahahahahahahahahebneebifaaaaaaaab`baabb`bb`baacb`bbadb`baacb`bboeb`baacb`bb`bb`baadb`bbadb`bb`baadb`bbadbadb`bdbadahdbkaahdbbcahdbibahdb`eahdbddahdbodahdbdaahdaiahdakahdamahdahahdbncahdbnbah\n"
"Ebjdaibcdbbf|bcaefnfgfifnfefoedfcfofnfffoelfeffgeflf``bbdbbf|bkaefnfgfifnfefoeffegnfcfdgifofnfaflfifdgigoelfeffgeflf``agbcf|baadfefbfeggfoe`gbgifnfdgoeegifnfdg``bcabcf|afdgefcgdgbc``afbdf|b`adfefbfeggfoe`gbgifnfdgoecgdgbg``bhdbef|b`agfefdgoeefnffgifbgofnfmfefnfdg``aabff|afdgefcgdgac``bidbgf|bdadfifcgafbflfefoebfigdgefcfofdfefoeifff``bjdbgf|aodfifcgafbflfefoejfifdgoeifff``\n"
-"G`b`c`@`b`aAa`bfgBifBkeBccBdcBmeBhcBfcB`bBdfBefBdgBefBcfBdgBefBdfBlbB`bBjdBidBdeB`bBnfBefBefBdfBcgB`bB`gBefBnfBdgBifBegBmfB`bBofBbgB`bBbfBefBdgBdgBefBbg@`bidBifBccBhcBfc@`bidBifBdcBhcBfc@`begBcdB`eBeeB`bBdfBofBefBcgBnfBgbBdgB`bBcgBegB`gB`gBofBbgBdgB`bBcdBmdBodBfeBlbB`bBggBofBegBlfBdfB`bBnfBefBefBdfB`bBldBldBfeBmdB`bBbcBnbBhcB`bBdgBofB`bBggBofBbgBkfBab@`bidBifBecBhcBfc@`bdgB`gBefBnfBdgBifBegBmf@`bidBifBfcBhcBfc@`bgdBkfBfc@`bidBkfBfcBmbBbc@`bidBkfBfcBmbBcc@`bkdBafBdgBhfBlfBofBnf@`bcgBafBdgBhfBlfBofBnfBmbBdgBbfBifBbgBdf@`bbgBggBifBnfBcfBhfBifB`gBmbBcfBfc@`bagBggBifBnfBcfBhfBifB`gBbc@`bgdBcfBcc@`b`gBbeBgeBheB`bBmfBafB`gB`gBifBnfBgfB`bBdfBefBnfBifBefBdfBnb@`bofBneBceBedBldBifBnfBegBhgB`bBifBcgB`bB`gBbgBefBfgBefBnfBdgBifBnfBgfB`bBgbBefBhgBefBcfBmfBefBmfBgbB`bBafBcfBcfBefBcgBcgBnbAjBbeBegBnfB`bB`bBgbBcgBefBdgBcgBefBbfBofBofBlfB`bBmbB`eB`bBcfBlfBafBmfBdfBoeBegBcgBefBoeBjfBifBdgB`bBofBnfBgbBnb@`bnfBneB`eBafBheB`bBifBcgB`bB`gBbgBefBfgBefBnfBdgBifBnfBgfB`bBgbBmfB`gBbgBofBdgBefBcfBdgBgbB`bBafBcfBcfBefBcgBcgBnbAjBbeBegBnfB`bBgbB`gBafBhgBcfBdgBlfB`bBmbBcfBmfB`bBlcBefBhgBefBcfBegBdgBafBbfBlfBefBncBgb@`bmfBneBbeBgeBheB`bBmfBafB`gB`gBifBnfBgfB`bBdfBefBnfBifBefBdfB`bBffBofBbgB`bBegBnfBkfBnfBofBggBnfB`bBbgBefBafBcgBofBnfBnbB`eBlfBefBafBcgBefB`bBbgBefB`gBofBbgBdgB`bBdgBofB`bBhfBdgBdgB`gBjcBobBobBbfBegBgfBcgBnbBcfBlfBafBmfBafBfgBnbBnfBefBdgAj@`bed@`blfBcgBdgBafBbgBdgBegB`gBjcB`bBbfBigBdgBefBcfBofBdfBefB`bBefBhgBefBcfBegBdgBifBofBnfB`bBifBnfB`bBafBegBdgBofB`bBmfBofBdfBef@`bkfBcgBdgBafBbgBdgBegB`gBjcB`bBbfBigBdgBefBcfBofBdfBefB`bBefBhgBefBcfBegBdgBifBofBnfB`bBggBifBdgBhfB`bBifBnfBdgBefBbgB`gBbgBefBdgBefBbgB`bBofBnfBlfBig@`bjfBcgBdgBafBbgBdgBegB`gBjcB`bBbfBigBdgBefBcfBofBdfBefB`bBdfBifBcgBafBbfBlfBefBdf@`bad at Ab`bad at Ac`bad at Ad`bad at Ae`bad at Af`bad at Ag`bad at Ah`bad at Ai`bad at Aj`bad at Ak`bad at Al`bad at Am`bad at An`bad at Ao`bad at B`a`bad at Baa`bad at Bba`bad at Bca`bad at Bda`bad at Bea`bad at Bfa`bad at Bga`bad at Bha`\n"
-"A`b`bLbjib`bab`bab`babneab`b`bad`ah`aa`bad`ah`aa`b`f`bad`b`b`aa`b`b`aa`b`b`b`b`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`b`b`bad`ah`b`b`b`b`aa`b`b`bad`ah`aa`ah`b`b`b`b`aa`b`b`b`b`aa`b`b`b`b`bad`ah`aa`bad`ah`aa`b`b`aa`b`b`b`b`aa`aa`aa`aa`aa`b`b`b`b`b`b`ah`aa`bcd`b`b`aa`bcd`b`b`bcd`b`b`aa`b`b`aa`b`b`b`b`aa`bad`ah`b`b`aa`b`b`aa`bad`ah`b`b`b`b`bad`ah`b`b`b`b`bad`ah`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`bad`ah`b`b`b`b`bcd`b`b`b`b`b`b`bad`ah`b`b`b`b`bcd`b`b`b`b`bcd`b`b`aa`b`b`bcd`b`b`aa`b`b`bcd`b`b`aa`b`b`b`b`aa`b`b`b`b`aa`b`b`b`b`Fbombdc\n"
-"Bb`badabbbhdacClnadbadaedbboeacBdadahafgbaeaaageaahaf at aTaaagbkaaa\n"
-"BbadahdbboeacB`adahaigbahaaajeaahaiAaaTaaajabb`a\n"
+"G`b`c`@`b`aAa`bfgBifBkeBccBdcBmeBhcBfcB`bBdfBefBdgBefBcfBdgBefBdfBlbB`bBjdBidBdeB`bBnfBefBefBdfBcgB`bB`gBefBnfBdgBifBegBmfB`bBofBbgB`bBbfBefBdgBdgBefBbg@`bidBifBccBhcBfc@`bidBifBdcBhcBfc@`begBcdB`eBeeB`bBdfBofBefBcgBnfBgbBdgB`bBcgBegB`gB`gBofBbgBdgB`bBcdBmdBodBfeBlbB`bBggBofBegBlfBdfB`bBnfBefBefBdfB`bB`cBnbBicBgcB`bBhbBldBldBfeBmdB`bBbcBnbBhcBibB`bBdgBofB`bBggBofBbgBkfBab@`bidBifBecBhcBfc@`bdgB`gBefBnfBdgBifBegBmf@`bidBifBfcBhcBfc@`bgdBkfBfc@`bidBkfBfcBmbBbc@`bidBkfBfcBmbBcc@`bkdBafBdgBhfBlfBofBnf@`bcgBafBdgBhfBlfBofBnfBmbBdgBbfBifBbgBdf@`bbgBggBifBnfBcfBhfBifB`gBmbBcfBfc@`bagBggBifBnfBcfBhfBifB`gBbc@`bgdBcfBcc@`b`gBbeBgeBheB`bBmfBafB`gB`gBifBnfBgfB`bBdfBefBnfBifBefBdfBnb@`bofBneBceBedBldBifBnfBegBhgB`bBifBcgB`bB`gBbgBefBfgBefBnfBdgBifBnfBgfB`bBgbBefBhgBefBcfBmfBefBmfBgbB`bBafBcfBcfBefBcgBcgBnbAjBbeBegBnfB`bB`bBgbBcgBefBdgBcgBefBbfBofBofBlfB`bBmbB`eB`bBcfBlfBafBmfBdfBoeBegBcgBefBoeBjfBifBdgB`bBofBnfBgbBnb@`bnfBneB`eBafBheB`bBifBcgB`bB`gBbgBefBfgBefBnfBdgBifBnfBgfB`bBgbBmfB`gBbgBofBdgBefBcfBdgBgbB`bBafBcfBcfBefBcgBcgBnbAjBbeBegBnfB`bBgbB`gBafBhgBcfBdgBlfB`bBmbBcfBmfB`bBlcBefBhgBefBcfBegBdgBafBbfBlfBefBncBgb@`bmfBneBbeBgeBheB`bBmfBafB`gB`gBifBnfBgfB`bBdfBefBnfBifBefBdfB`bBffBofBbgB`bBegBnfBkfBnfBofBggBnfB`bBbgBefBafBcgBofBnfBnbB`eBlfBefBafBcgBefB`bBbgBefB`gBofBbgBdgB`bBdgBofB`bBhfBdgBdgB`gBjcBobBobBbfBegBgfBcgBnbBcfBlfBafBmfBafBfgBnbBnfBefBdgAj@`bed@`blfBcgBdgBafBbgBdgBegB`gBjcB`bBbfBigBdgBefBcfBofBdfBefB`bBefBhgBefBcfBegBdgBifBofBnfB`bBifBnfB`bBafBegBdgBofB`bBmfBofBdfBef@`bkfBcgBdgBafBbgBdgBegB`gBjcB`bBbfBigBdgBefBcfBofBdfBefB`bBefBhgBefBcfBegBdgBifBofBnfB`bBggBifBdgBhfB`bBifBnfBdgBefBbgB`gBbgBefBdgBefBbgB`bBofBnfBlfBig@`bjfBcgBdgBafBbgBdgBegB`gBjcB`bBbfBigBdgBefBcfBofBdfBefB`bBdfBifBcgBafBbfBlfBefBdf@`bad at Ab`bad at Ac`bad at Ad`bad at Ae`bad at Af`bad at Ag`bad at Ah`bad at Ai`bad at Aj`bad at Ak`bad at Al`bad at Am`bad at An`bad at Ao`bad at B`a`bad at Baa`bad at Bba`bad at Bca`bad at Bda`bad at Bea`bad at Bfa`bad at Bga`bad at Bha`\n"
+"A`b`bLblib`bab`b`b`b`bneab`b`bad`ah`aa`bad`ah`aa`b`f`bad`b`b`aa`b`b`aa`b`b`b`b`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`aa`b`b`b`b`bad`ah`b`b`b`b`aa`b`b`bad`ah`aa`ah`b`b`b`b`aa`b`b`b`b`aa`b`b`b`b`bad`ah`aa`bad`ah`aa`b`b`aa`b`b`b`b`aa`aa`aa`aa`aa`b`b`b`b`b`b`ah`aa`bcd`b`b`aa`bcd`b`b`bcd`b`b`aa`b`b`aa`b`b`b`b`aa`bad`ah`b`b`aa`b`b`aa`bad`ah`b`b`b`b`bad`ah`b`b`b`b`bad`ah`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`b`bad`ah`b`b`b`b`bcd`b`b`b`b`b`b`bad`ah`b`b`b`b`bcd`b`b`b`b`bcd`b`b`aa`b`b`bcd`b`b`aa`b`b`bcd`b`b`aa`b`b`b`b`aa`b`b`b`b`aa`b`b`b`b`Fbbnbec\n"
+"Bb`badabbbhdacClnadbadaedbboeacBdadahafgbaeaaageaahaf at aTaaagblaaa\n"
+"BbadahdbboeacB`adahaigbahaaajeaahaiAaaTaaajabbaa\n"
"Bb`fakdbboeacAidbadaldbb`fak at db`bamkbalBja`Aedaaaneab`bam at db`b`bbAadabTaaanadac\n"
"Bb`baokbalBka`Aedaab`aeab`bao at db`bab`ab`ab`b`bbababTbaad\n"
-"Bb`bbaabbabbaab`bbbaabcbjdBia`@dbaab`bbcakbalBma`Aedaabdaeab`bbca at db`b`bbAadaaTaabdaaoae\n"
-"Bb`bbeakbalBna`Ahdaabfaeab`bbea at db`b`bbAadaaTaabfaaoaf\n"
-"Bb`bbgakbalBoa`Aedaabhaeab`bbga at db`b`bbAadaaTaabhaaoag\n"
-"Bb`bbiakbalB`b`Acdaabjaeab`bbia at db`b`bbAadaaTaabjaaoah\n"
-"Bb`bbkakbalBab`Aedaablaeab`bbka at db`b`bbAadaaTaablaaoai\n"
-"Bb`bbmakbalBbb`Aedaabnaeab`bbma at db`b`bbAadaaTaabnaaoaj\n"
-"Bb`bboakbalBcb`Agdaab`beab`bboa at db`b`bbAadaaTaab`baoak\n"
-"Bb`bbabkbalBdb`Amdaabbbeab`bbab at db`b`bbAadaaTaabbbaoal\n"
-"Bb`bbcbkbalBeb`Akdaabdbeab`bbcb at db`b`bbAadaaTaabdbaoam\n"
-"Bb`bbebkbalBfb`Aidaabfbeab`bbeb at db`b`bbAadaaTaabfbaoan\n"
-"Bb`bbgbkbalBgb`Acdaabhbeab`bbgb at db`baa`abhbb`b`bbaaaaTbaao\n"
-"Bb`bbibbbaabibb`bbjbabcbjdBla`@dbibTbab`a\n"
-"BbadbkbdbboeacBeadahblbgbbkbb`bbmb`ablbb`bbnbk`bmbAadaabobeab`bbnb at dTaabobbaabga\n"
-"Bb`bb`cabcbjdBhb`@dAadbadbacdbboeacBaadahbbcgbbacaabcceaahbbcAjaTaabccbbabka\n"
-"Bahbdcgbbkbb`bbec`abdcb`bbfck`becAbdaabgceab`bbfc at dTaabgcbdabca\n"
-"Bb`bbhcabcbjdBib`@dAadTbabka\n"
-"Bb`bbick`becAhdaabjceab`bbic at dTaabjcbfabea\n"
-"Bb`bbkcabcbjdBjb`@dAadTbabka\n"
-"Bb`bblcabcbjdBkb`@dAadTbabka\n"
-"BbadbmcdbboeacBaadahbncgbbmcaaboceaahbncAjaTaabocbiabha\n"
-"Bbadb`ddbboeacBbadahbadgbb`daabbdeaahbadAfaTaabbdbiabka\n"
-"Bb`bbcdk`bmbB`adaabddeab`bbcd at dTaabddbkabja\n"
-"Bb`bbedabcbjdBjb`@dAadTbabka\n"
-"Bb`bbfdabcbidBlb`@d at daabgdnab`bbfdAadTaabgdboabla\n"
-"Baabhdnab`bbfdAbdTaabhdbnabma\n"
-"Baabideab`bbfdAbdTaabidbbbbcb\n"
-"Baabjdeab`bbfdAadTaabjdbabbcb\n"
-"Baabkdeab`bbfd at dTaabkdb`bbcb\n"
-"Bb`bbldabbafBmb`@dTbabcb\n"
-"Bb`bbmdabbafBnb`@dTbabcb\n"
-"Bb`bbndabbafBob`@dTbabcb\n"
-"Bahbodgbaeaab`eeaahbod at aTaab`ebebbdb\n"
-"BbcdbaedbboeacAddb`bbbegbbaeaabceeab`bbbe at db`b`bbEamjnmd`Taabcebccbeb\n"
-"BbcdbdedbboeacAfdb`bbeegbbdebcdbfedbboeacAedb`bbgegbbfeaabheiab`bbeebgeb`b`bbEbmjnmd`Taabhebccbfb\n"
-"Bb`bbieab`bbdaabjeeab`bbgebieb`b`bbEcmjnmd`Taabjebgbbcc\n"
-"Bb`bbkegbbdeb`bbleab`bcdaabmeeab`bbkebleb`b`bbEdmjnmd`Taabmebhbbcc\n"
-"BbadbnedbboeacAndahboegbbneb`bb`f`aboeaabaflbb`bbbf`abafaabcfeab`bb`fbbfb`b`bbEemjnmd`Taabcfbibbcc\n"
-"BbadbdfdbboeacBaadahbefgbbdfb`bbff`abefb`bbgfh`bffBhadbadbhfdbboeacB`adahbifgbbhfb`bbjf`abifb`bbkfh`bjfBdadbadblfdbboeacBcadahbmfgbblfb`bbnf`abmfb`bbofh`bnfB`adb`bb`ggbbfeb`bbagh`b`gAhdb`bbbggbbdeb`bbcgl`bkfbgfb`bbdgl`bcgbagb`bbegl`bdgbbgb`bbfgl`begbofb`bbggh`b`fBladbadbhgdbboeacAodahbiggbbhgb`bbjg`abigb`bbkgh`bjgBhadbcdblgdbboeacAddb`bbmggbblgb`bbngl`bmgbggb`bbogl`bngbkgbadb`hdbboeacBeadahbahgbb`hb`bbbh`abahb`bbchh`bbhBhadbcdbdhdbboeacAcdb`bbehgbbdhb`bbfhl`bchbehbcdbghdbboeac at db`bbhhgbbghaabiheab`bbfgbhhTaabihbkbbjb\n"
-"Bb`bbjhabaagbfgTcab`bEfmjnmd\n"
-"BbcdbkhdbboeacAadb`bblhgbbkhaabmheab`bbogblhTaabmhbmbblb\n"
-"Bb`bbnhabaagbogTcab`bEgmjnmd\n"
-"BbcdbohdbboeacAbdb`bb`igbbohaabaieab`bbfhb`iTaabaibobbnb\n"
-"Bb`bbbiabaagbfhTcab`bEhmjnmd\n"
-"Bb`bbciabbaaHonnkm``odHm``oonnkdaabdieab`bbciHhgfedcbadTaabdibacb`c\n"
-"Bb`bbeiabaagbciTcab`bEimjnmd\n"
-"Bb`bbfiababcaDm``odaabgieab`bbfiDo``mdb`b`bbHnejkjgjmd`Taabgibccbbc\n"
-"Bb`bbhiabaagbfiTcab`bF`amjnmd\n"
-"Bb`bbiibb`biiTcab`bbiiE\n"
+"Bb`bbaabbabbaab`bbbaabcbjdBia`@dbaab`bbcaab`bbdaabdaiab`bbcaBicdTaabdaaebaa\n"
+"Bb`bbeakbalBma`Aedaabfaeab`bbea at db`b`bbAadaaTaabfab`aaf\n"
+"Bb`bbgakbalBna`Ahdaabhaeab`bbga at db`b`bbAadaaTaabhab`aag\n"
+"Bb`bbiakbalBoa`Aedaabjaeab`bbia at db`b`bbAadaaTaabjab`aah\n"
+"Bb`bbkakbalB`b`Acdaablaeab`bbka at db`b`bbAadaaTaablab`aai\n"
+"Bb`bbmakbalBab`Aedaabnaeab`bbma at db`b`bbAadaaTaabnab`aaj\n"
+"Bb`bboakbalBbb`Aedaab`beab`bboa at db`b`bbAadaaTaab`bb`aak\n"
+"Bb`bbabkbalBcb`Agdaabbbeab`bbab at db`b`bbAadaaTaabbbb`aal\n"
+"Bb`bbcbkbalBdb`Amdaabdbeab`bbcb at db`b`bbAadaaTaabdbb`aam\n"
+"Bb`bbebkbalBeb`Akdaabfbeab`bbeb at db`b`bbAadaaTaabfbb`aan\n"
+"Bb`bbgbkbalBfb`Aidaabhbeab`bbgb at db`b`bbAadaaTaabhbb`aao\n"
+"Bb`bbibkbalBgb`Acdaabjbeab`bbib at db`baa`abjbb`b`bbaaaaTbab`a\n"
+"Bb`bbkbbbaabkbb`bblbabcbjdBla`@dbkbTbabaa\n"
+"BbadbmbdbboeacBeadahbnbgbbmbb`bbob`abnbb`bb`ck`bobAadaabaceab`bb`c at dTaabacbbabha\n"
+"Bb`bbbcabcbjdBhb`@dAadbadbccdbboeacBaadahbdcgbbccaabeceaahbdcAjaTaabecbcabla\n"
+"Bahbfcgbbmbb`bbgc`abfcb`bbhck`bgcAbdaabiceab`bbhc at dTaabicbeabda\n"
+"Bb`bbjcabcbjdBib`@dAadTbabla\n"
+"Bb`bbkck`bgcAhdaablceab`bbkc at dTaablcbgabfa\n"
+"Bb`bbmcabcbjdBjb`@dAadTbabla\n"
+"Bb`bbncabcbjdBkb`@dAadTbabla\n"
+"BbadbocdbboeacBaadahb`dgbbocaabadeaahb`dAjaTaabadbjabia\n"
+"BbadbbddbboeacBbadahbcdgbbbdaabddeaahbcdAfaTaabddbjabla\n"
+"Bb`bbedk`bobB`adaabfdeab`bbed at dTaabfdblabka\n"
+"Bb`bbgdabcbjdBjb`@dAadTbabla\n"
+"Bb`bbhdabcbidBlb`@d at daabidnab`bbhdAadTaabidb`bbma\n"
+"Baabjdnab`bbhdAbdTaabjdboabna\n"
+"Baabkdeab`bbhdAbdTaabkdbcbbdb\n"
+"Baabldeab`bbhdAadTaabldbbbbdb\n"
+"Baabmdeab`bbhd at dTaabmdbabbdb\n"
+"Bb`bbndabbafBmb`@dTbabdb\n"
+"Bb`bbodabbafBnb`@dTbabdb\n"
+"Bb`bb`eabbafBob`@dTbabdb\n"
+"Bahbaegbaeaabbeeaahbae at aTaabbebfbbeb\n"
+"BbcdbcedbboeacAddb`bbdegbbceaabeeeab`bbde at db`b`bbEamjnmd`Taabeebdcbfb\n"
+"BbcdbfedbboeacAfdb`bbgegbbfebcdbhedbboeacAedb`bbiegbbheaabjeiab`bbgebieb`b`bbEbmjnmd`Taabjebdcbgb\n"
+"Bb`bbkeab`bbdaableeab`bbiebkeb`b`bbEcmjnmd`Taablebhbbdc\n"
+"Bb`bbmegbbfeb`bbneab`bcdaaboeeab`bbmebneb`b`bbEdmjnmd`Taaboebibbdc\n"
+"Bbadb`fdbboeacAndahbafgbb`fb`bbbf`abafaabcflbb`bbdf`abcfaabefeab`bbbfbdfb`b`bbEemjnmd`Taabefbjbbdc\n"
+"BbadbffdbboeacBaadahbgfgbbffb`bbhf`abgfb`bbifh`bhfBhadbadbjfdbboeacB`adahbkfgbbjfb`bblf`abkfb`bbmfh`blfBdadbadbnfdbboeacBcadahbofgbbnfb`bb`g`abofb`bbagh`b`gB`adb`bbbggbbheb`bbcgh`bbgAhdb`bbdggbbfeb`bbegl`bmfbifb`bbfgl`begbcgb`bbggl`bfgbdgb`bbhgl`bggbagb`bbigh`bbfBladbadbjgdbboeacAodahbkggbbjgb`bblg`abkgb`bbmgh`blgBhadbcdbngdbboeacAddb`bboggbbngb`bb`hl`bogbigb`bbahl`b`hbmgbadbbhdbboeacBeadahbchgbbbhb`bbdh`abchb`bbehh`bdhBhadbcdbfhdbboeacAcdb`bbghgbbfhb`bbhhl`behbghbcdbihdbboeac at db`bbjhgbbihaabkheab`bbhgbjhTaabkhblbbkb\n"
+"Bb`bblhabaagbhgTcab`bEfmjnmd\n"
+"BbcdbmhdbboeacAadb`bbnhgbbmhaaboheab`bbahbnhTaabohbnbbmb\n"
+"Bb`bb`iabaagbahTcab`bEgmjnmd\n"
+"BbcdbaidbboeacAbdb`bbbigbbaiaabcieab`bbhhbbiTaabcib`cbob\n"
+"Bb`bbdiabaagbhhTcab`bEhmjnmd\n"
+"Bb`bbeiabbaaHonnkm``odHm``oonnkdaabfieab`bbeiHhgfedcbadTaabfibbcbac\n"
+"Bb`bbgiabaagbeiTcab`bEimjnmd\n"
+"Bb`bbhiababcaDm``odaabiieab`bbhiDo``mdb`b`bbHnejkjgjmd`Taabiibdcbcc\n"
+"Bb`bbjiabaagbhiTcab`bF`amjnmd\n"
+"Bb`bbkibb`bkiTcab`bbkiE\n"
;
/* source-code for builtin_bc_startup: */
#if 0
@@ -108,21 +109,23 @@ int entrypoint()
disable_jit_if("i[34]86 detected, JIT needs pentium or better",0,
!memcmp(env.cpu,"i386",5) ||
!memcmp(env.cpu,"i486",5));
- /* FIXME: update embedded LLVM to 2.8 which correctly skips CMOV if CPU
- * doesn't support it.
- * For now disable JIT on CPUs without cmov */
- disable_jit_if("CPU doesn't support CMOV, would need LLVM 2.8 to work!",0,
- !memcmp(env.cpu,"i586",5) ||
- !memcmp(env.cpu,"pentium",8) ||
- !memcmp(env.cpu,"i686",5) ||
- !memcmp(env.cpu,"k6",3) ||
- !memcmp(env.cpu,"k6-2",5) ||
- !memcmp(env.cpu,"k6-3",5) ||
- !memcmp(env.cpu,"athlon",7) ||
- !memcmp(env.cpu,"athlon-tbird",13) ||
- !memcmp(env.cpu,"winchip-c6",11) ||
- !memcmp(env.cpu,"winchip2",9) ||
- !memcmp(env.cpu,"c3",3));
+ if (engine_functionality_level() < FUNC_LEVEL_097) {
+ /* LLVM 2.7 bug, fixed in 2.8, but only 0.97 has 2.8 */
+ /* bug is using CMOV instr, when CPU doesn't support it, 2.8 correctly
+ * handles this, 2.7 doesn't */
+ disable_jit_if("CPU doesn't support CMOV, would need 0.97 (LLVM 2.8) to work!",0,
+ !memcmp(env.cpu,"i586",5) ||
+ !memcmp(env.cpu,"pentium",8) ||
+ !memcmp(env.cpu,"i686",5) ||
+ !memcmp(env.cpu,"k6",3) ||
+ !memcmp(env.cpu,"k6-2",5) ||
+ !memcmp(env.cpu,"k6-3",5) ||
+ !memcmp(env.cpu,"athlon",7) ||
+ !memcmp(env.cpu,"athlon-tbird",13) ||
+ !memcmp(env.cpu,"winchip-c6",11) ||
+ !memcmp(env.cpu,"winchip2",9) ||
+ !memcmp(env.cpu,"c3",3));
+ }
break;
default:
break;
diff --git a/libclamav/bytecode.c b/libclamav/bytecode.c
index aec614a..44e94d0 100644
--- a/libclamav/bytecode.c
+++ b/libclamav/bytecode.c
@@ -2411,6 +2411,11 @@ int cli_bytecode_prepare2(struct cl_engine *engine, struct cli_all_bc *bcs, unsi
int rc;
struct cli_bc_ctx *ctx;
+ if (!bcs->count) {
+ cli_dbgmsg("No bytecodes loaded, not running builtin test\n");
+ return CL_SUCCESS;
+ }
+
cli_detect_environment(&bcs->env);
switch (bcs->env.arch) {
case arch_i386:
diff --git a/libclamav/c++/ARMGenAsmWriter.inc b/libclamav/c++/ARMGenAsmWriter.inc
deleted file mode 100644
index 9eef864..0000000
--- a/libclamav/c++/ARMGenAsmWriter.inc
+++ /dev/null
@@ -1,7135 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// Assembly Writer Source Fragment
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-/// printInstruction - This method is automatically generated by tablegen
-/// from the instruction set description.
-void ARMAsmPrinter::printInstruction(const MachineInstr *MI) {
- static const unsigned OpInfo[] = {
- 0U, // PHI
- 0U, // INLINEASM
- 0U, // DBG_LABEL
- 0U, // EH_LABEL
- 0U, // GC_LABEL
- 0U, // KILL
- 0U, // EXTRACT_SUBREG
- 0U, // INSERT_SUBREG
- 0U, // IMPLICIT_DEF
- 0U, // SUBREG_TO_REG
- 0U, // COPY_TO_REGCLASS
- 1U, // DBG_VALUE
- 67108875U, // ADCSSri
- 67108875U, // ADCSSrr
- 67108875U, // ADCSSrs
- 134750225U, // ADCri
- 134758417U, // ADCrr
- 202375185U, // ADCrs
- 135815189U, // ADDSri
- 135815189U, // ADDSrr
- 202924053U, // ADDSrs
- 134750234U, // ADDri
- 134758426U, // ADDrr
- 202375194U, // ADDrs
- 69206046U, // ADJCALLSTACKDOWN
- 69206066U, // ADJCALLSTACKUP
- 134750276U, // ANDri
- 134758468U, // ANDrr
- 202375236U, // ANDrs
- 271056968U, // ATOMIC_CMP_SWAP_I16
- 271581256U, // ATOMIC_CMP_SWAP_I32
- 272105544U, // ATOMIC_CMP_SWAP_I8
- 272629832U, // ATOMIC_LOAD_ADD_I16
- 273154120U, // ATOMIC_LOAD_ADD_I32
- 273678408U, // ATOMIC_LOAD_ADD_I8
- 274202696U, // ATOMIC_LOAD_AND_I16
- 274726984U, // ATOMIC_LOAD_AND_I32
- 275251272U, // ATOMIC_LOAD_AND_I8
- 275775560U, // ATOMIC_LOAD_NAND_I16
- 276299848U, // ATOMIC_LOAD_NAND_I32
- 276824136U, // ATOMIC_LOAD_NAND_I8
- 277348424U, // ATOMIC_LOAD_OR_I16
- 277872712U, // ATOMIC_LOAD_OR_I32
- 278397000U, // ATOMIC_LOAD_OR_I8
- 278921288U, // ATOMIC_LOAD_SUB_I16
- 279445576U, // ATOMIC_LOAD_SUB_I32
- 279969864U, // ATOMIC_LOAD_SUB_I8
- 280494152U, // ATOMIC_LOAD_XOR_I16
- 281018440U, // ATOMIC_LOAD_XOR_I32
- 281542728U, // ATOMIC_LOAD_XOR_I8
- 282067016U, // ATOMIC_SWAP_I16
- 282591304U, // ATOMIC_SWAP_I32
- 283115592U, // ATOMIC_SWAP_I8
- 69206089U, // B
- 135815244U, // BFC
- 135815248U, // BFI
- 134750292U, // BICri
- 134758484U, // BICrr
- 202375252U, // BICrs
- 337141848U, // BKPT
- 402653277U, // BL
- 69206113U, // BLX
- 69206113U, // BLXr9
- 337150054U, // BL_pred
- 402653277U, // BLr9
- 337150054U, // BLr9_pred
- 69206121U, // BMOVPCRX
- 69206121U, // BMOVPCRXr9
- 69206142U, // BRIND
- 67108994U, // BR_JTadd
- 485007499U, // BR_JTm
- 82362516U, // BR_JTr
- 69206173U, // BX
- 337141933U, // BXJ
- 552599729U, // BX_RET
- 69206173U, // BXr9
- 337141940U, // Bcc
- 620290230U, // CDP
- 687866042U, // CDP2
- 193U, // CLREX
- 739795143U, // CLZ
- 739795147U, // CMNzri
- 739795147U, // CMNzrr
- 806904011U, // CMNzrs
- 739795151U, // CMPri
- 739795151U, // CMPrr
- 806904015U, // CMPrs
- 739795151U, // CMPzri
- 739795151U, // CMPzrr
- 806904015U, // CMPzrs
- 872415304U, // CONSTPOOL_ENTRY
- 939524307U, // CPS
- 337141975U, // DBG
- 219U, // DMBish
- 227U, // DMBishst
- 237U, // DMBnsh
- 245U, // DMBnshst
- 255U, // DMBosh
- 263U, // DMBoshst
- 273U, // DMBst
- 280U, // DSBish
- 288U, // DSBishst
- 298U, // DSBnsh
- 306U, // DSBnshst
- 316U, // DSBosh
- 324U, // DSBoshst
- 334U, // DSBst
- 134750549U, // EORri
- 134758741U, // EORrr
- 202375509U, // EORrs
- 755556697U, // FCONSTD
- 756080985U, // FCONSTS
- 555221342U, // FMSTAT
- 355U, // ISBsy
- 85983591U, // Int_MemBarrierV6
- 372U, // Int_MemBarrierV7
- 86507879U, // Int_SyncBarrierV6
- 376U, // Int_SyncBarrierV7
- 87032188U, // Int_eh_sjlj_setjmp
- 221831558U, // LDC2L_OFFSET
- 825819526U, // LDC2L_OPTION
- 221839750U, // LDC2L_POST
- 221831558U, // LDC2L_PRE
- 217653638U, // LDC2_OFFSET
- 821633414U, // LDC2_OPTION
- 217653638U, // LDC2_POST
- 217653638U, // LDC2_PRE
- 221831563U, // LDCL_OFFSET
- 825819531U, // LDCL_OPTION
- 221839755U, // LDCL_POST
- 221831563U, // LDCL_PRE
- 217653643U, // LDC_OFFSET
- 821633419U, // LDC_OPTION
- 217653643U, // LDC_POST
- 217653643U, // LDC_PRE
- 1027686799U, // LDM
- 1027686799U, // LDM_RET
- 806904211U, // LDR
- 806904215U, // LDRB
- 202924444U, // LDRBT
- 202924439U, // LDRB_POST
- 202924439U, // LDRB_PRE
- 202924450U, // LDRD
- 605577634U, // LDRD_POST
- 605577634U, // LDRD_PRE
- 739795367U, // LDREX
- 739795373U, // LDREXB
- 135815604U, // LDREXD
- 739795387U, // LDREXH
- 806904258U, // LDRH
- 202924487U, // LDRHT
- 202924482U, // LDRH_POST
- 202924482U, // LDRH_PRE
- 806904269U, // LDRSB
- 202924499U, // LDRSBT
- 202924493U, // LDRSB_POST
- 202924493U, // LDRSB_PRE
- 806904282U, // LDRSH
- 202924512U, // LDRSHT
- 202924506U, // LDRSH_POST
- 202924506U, // LDRSH_PRE
- 202924519U, // LDRT
- 202924435U, // LDR_POST
- 202924435U, // LDR_PRE
- 806904211U, // LDRcp
- 1095238124U, // LEApcrel
- 1095762412U, // LEApcrelJT
- 620290546U, // MCR
- 671105526U, // MCR2
- 217637373U, // MCRR
- 671105538U, // MCRR2
- 827851274U, // MLA
- 806904334U, // MLS
- 135815698U, // MOVCCi
- 135815698U, // MOVCCr
- 202924562U, // MOVCCs
- 559940114U, // MOVPCLR
- 69206164U, // MOVPCRX
- 135815702U, // MOVTi16
- 761881106U, // MOVi
- 739795483U, // MOVi16
- 739795474U, // MOVi2pieces
- 739795483U, // MOVi32imm
- 761790994U, // MOVr
- 761790994U, // MOVrx
- 827949586U, // MOVs
- 739795488U, // MOVsra_flag
- 739795488U, // MOVsrl_flag
- 620290597U, // MRC
- 671105577U, // MRC2
- 217637424U, // MRRC
- 671105589U, // MRRC2
- 337142333U, // MRS
- 337142333U, // MRSsys
- 359686721U, // MSR
- 359768641U, // MSRi
- 360211009U, // MSRsys
- 360292929U, // MSRsysi
- 134758981U, // MUL
- 761881161U, // MVNi
- 761791049U, // MVNr
- 827949641U, // MVNs
- 538968653U, // NOP
- 134750801U, // ORRri
- 134758993U, // ORRrr
- 202375761U, // ORRrs
- 1166017109U, // PICADD
- 1233650261U, // PICLDR
- 1234174549U, // PICLDRB
- 1234698837U, // PICLDRH
- 1235223125U, // PICLDRSB
- 1235747413U, // PICLDRSH
- 1236271701U, // PICSTR
- 1236795989U, // PICSTRB
- 1237320277U, // PICSTRH
- 806904407U, // PKHBT
- 806904413U, // PKHTB
- 67109475U, // PLDWi
- 471859818U, // PLDWr
- 67109488U, // PLDi
- 471859830U, // PLDr
- 67109499U, // PLIi
- 471859841U, // PLIr
- 135815814U, // QADD
- 135815819U, // QADD16
- 135815826U, // QADD8
- 135815832U, // QASX
- 135815837U, // QDADD
- 135815843U, // QDSUB
- 135815849U, // QSAX
- 135815854U, // QSUB
- 135815859U, // QSUB16
- 135815866U, // QSUB8
- 739795648U, // RBIT
- 739795653U, // REV
- 739795657U, // REV16
- 739795663U, // REVSH
- 1008222933U, // RFE
- 1008222933U, // RFEW
- 135815897U, // RSBSri
- 202924761U, // RSBSrs
- 134750942U, // RSBri
- 202375902U, // RSBrs
- 67109602U, // RSCSri
- 67109602U, // RSCSrs
- 134750952U, // RSCri
- 202375912U, // RSCrs
- 135815916U, // SADD16
- 135815923U, // SADD8
- 135815929U, // SASX
- 67109630U, // SBCSSri
- 67109630U, // SBCSSrr
- 67109630U, // SBCSSrs
- 134750980U, // SBCri
- 134759172U, // SBCrr
- 202375940U, // SBCrs
- 806904584U, // SBFX
- 135815949U, // SEL
- 785U, // SETENDBE
- 795U, // SETENDLE
- 538968869U, // SEV
- 135815977U, // SHADD16
- 135815985U, // SHADD8
- 135815992U, // SHASX
- 135815998U, // SHSAX
- 135816004U, // SHSUB16
- 135816012U, // SHSUB8
- 337142611U, // SMC
- 806904663U, // SMLABB
- 806904670U, // SMLABT
- 806904677U, // SMLAD
- 806904683U, // SMLADX
- 827851634U, // SMLAL
- 806904696U, // SMLALBB
- 806904704U, // SMLALBT
- 806904712U, // SMLALD
- 806904719U, // SMLALDX
- 806904727U, // SMLALTB
- 806904735U, // SMLALTT
- 806904743U, // SMLATB
- 806904750U, // SMLATT
- 806904757U, // SMLAWB
- 806904764U, // SMLAWT
- 806904771U, // SMLSD
- 806904777U, // SMLSDX
- 806904784U, // SMLSLD
- 806904791U, // SMLSLDX
- 806904799U, // SMMLA
- 806904805U, // SMMLAR
- 806904812U, // SMMLS
- 806904818U, // SMMLSR
- 135816185U, // SMMUL
- 135816191U, // SMMULR
- 135816198U, // SMUAD
- 135816204U, // SMUADX
- 135816211U, // SMULBB
- 135816218U, // SMULBT
- 827851809U, // SMULL
- 135816231U, // SMULTB
- 135816238U, // SMULTT
- 135816245U, // SMULWB
- 135816252U, // SMULWT
- 135816259U, // SMUSD
- 135816265U, // SMUSDX
- 1036534864U, // SRS
- 1037059152U, // SRSW
- 135816276U, // SSAT16
- 806904923U, // SSATasr
- 806904923U, // SSATlsl
- 135816288U, // SSAX
- 135816293U, // SSUB16
- 135816300U, // SSUB8
- 221832306U, // STC2L_OFFSET
- 825820274U, // STC2L_OPTION
- 221840498U, // STC2L_POST
- 221832306U, // STC2L_PRE
- 217654386U, // STC2_OFFSET
- 821634162U, // STC2_OPTION
- 217654386U, // STC2_POST
- 217654386U, // STC2_PRE
- 221832311U, // STCL_OFFSET
- 825820279U, // STCL_OPTION
- 221840503U, // STCL_POST
- 221832311U, // STCL_PRE
- 217654391U, // STC_OFFSET
- 821634167U, // STC_OPTION
- 217654391U, // STC_POST
- 217654391U, // STC_PRE
- 1027687547U, // STM
- 806904959U, // STR
- 806904963U, // STRB
- 202900616U, // STRBT
- 202900611U, // STRB_POST
- 202900611U, // STRB_PRE
- 202925198U, // STRD
- 605553806U, // STRD_POST
- 605553806U, // STRD_PRE
- 135816339U, // STREX
- 135816345U, // STREXB
- 806904992U, // STREXD
- 135816359U, // STREXH
- 806905006U, // STRH
- 202900659U, // STRHT
- 202900654U, // STRH_POST
- 202900654U, // STRH_PRE
- 202900665U, // STRT
- 202900607U, // STR_POST
- 202900607U, // STR_PRE
- 135816382U, // SUBSri
- 135816382U, // SUBSrr
- 202925246U, // SUBSrs
- 134751427U, // SUBri
- 134759619U, // SUBrr
- 202376387U, // SUBrs
- 337142983U, // SVC
- 135816395U, // SWP
- 135816399U, // SWPB
- 135816404U, // SXTAB16rr
- 806905044U, // SXTAB16rr_rot
- 135816412U, // SXTABrr
- 806905052U, // SXTABrr_rot
- 135816418U, // SXTAHrr
- 806905058U, // SXTAHrr_rot
- 739796200U, // SXTB16r
- 135816424U, // SXTB16r_rot
- 739796207U, // SXTBr
- 135816431U, // SXTBr_rot
- 739796212U, // SXTHr
- 135816436U, // SXTHr_rot
- 739796217U, // TEQri
- 739796217U, // TEQrr
- 806905081U, // TEQrs
- 1277U, // TPsoft
- 538969360U, // TRAP
- 739796245U, // TSTri
- 739796245U, // TSTrr
- 806905109U, // TSTrs
- 135816473U, // UADD16
- 135816480U, // UADD8
- 135816486U, // UASX
- 806905131U, // UBFX
- 135816496U, // UHADD16
- 135816504U, // UHADD8
- 135816511U, // UHASX
- 135816517U, // UHSAX
- 135816523U, // UHSUB16
- 135816531U, // UHSUB8
- 806905178U, // UMAAL
- 827852128U, // UMLAL
- 827852134U, // UMULL
- 135816556U, // UQADD16
- 135816564U, // UQADD8
- 135816571U, // UQASX
- 135816577U, // UQSAX
- 135816583U, // UQSUB16
- 135816591U, // UQSUB8
- 135816598U, // USAD8
- 806905244U, // USADA8
- 135816611U, // USAT16
- 806905258U, // USATasr
- 806905258U, // USATlsl
- 135816623U, // USAX
- 135816628U, // USUB16
- 135816635U, // USUB8
- 135816641U, // UXTAB16rr
- 806905281U, // UXTAB16rr_rot
- 135816649U, // UXTABrr
- 806905289U, // UXTABrr_rot
- 135816655U, // UXTAHrr
- 806905295U, // UXTAHrr_rot
- 739796437U, // UXTB16r
- 135816661U, // UXTB16r_rot
- 739796444U, // UXTBr
- 135816668U, // UXTBr_rot
- 739796449U, // UXTHr
- 135816673U, // UXTHr_rot
- 836257254U, // VABALsv2i64
- 836781542U, // VABALsv4i32
- 837305830U, // VABALsv8i16
- 837830118U, // VABALuv2i64
- 838354406U, // VABALuv4i32
- 838878694U, // VABALuv8i16
- 837305836U, // VABAsv16i8
- 836257260U, // VABAsv2i32
- 836781548U, // VABAsv4i16
- 836257260U, // VABAsv4i32
- 836781548U, // VABAsv8i16
- 837305836U, // VABAsv8i8
- 838878700U, // VABAuv16i8
- 837830124U, // VABAuv2i32
- 838354412U, // VABAuv4i16
- 837830124U, // VABAuv4i32
- 838354412U, // VABAuv8i16
- 838878700U, // VABAuv8i8
- 165152241U, // VABDLsv2i64
- 165676529U, // VABDLsv4i32
- 166200817U, // VABDLsv8i16
- 166725105U, // VABDLuv2i64
- 167249393U, // VABDLuv4i32
- 167773681U, // VABDLuv8i16
- 152102391U, // VABDfd
- 152102391U, // VABDfq
- 166200823U, // VABDsv16i8
- 165152247U, // VABDsv2i32
- 165676535U, // VABDsv4i16
- 165152247U, // VABDsv4i32
- 165676535U, // VABDsv8i16
- 166200823U, // VABDsv8i8
- 167773687U, // VABDuv16i8
- 166725111U, // VABDuv2i32
- 167249399U, // VABDuv4i16
- 166725111U, // VABDuv4i32
- 167249399U, // VABDuv8i16
- 167773687U, // VABDuv8i8
- 755557884U, // VABSD
- 756082172U, // VABSS
- 756082172U, // VABSfd
- 756082172U, // VABSfd_sfp
- 756082172U, // VABSfq
- 770180604U, // VABSv16i8
- 769132028U, // VABSv2i32
- 769656316U, // VABSv4i16
- 769132028U, // VABSv4i32
- 769656316U, // VABSv8i16
- 770180604U, // VABSv8i8
- 152102401U, // VACGEd
- 152102401U, // VACGEq
- 152102407U, // VACGTd
- 152102407U, // VACGTq
- 151578125U, // VADDD
- 168298002U, // VADDHNv2i32
- 168822290U, // VADDHNv4i16
- 169346578U, // VADDHNv8i8
- 165152281U, // VADDLsv2i64
- 165676569U, // VADDLsv4i32
- 166200857U, // VADDLsv8i16
- 166725145U, // VADDLuv2i64
- 167249433U, // VADDLuv4i32
- 167773721U, // VADDLuv8i16
- 152102413U, // VADDS
- 165152287U, // VADDWsv2i64
- 165676575U, // VADDWsv4i32
- 166200863U, // VADDWsv8i16
- 166725151U, // VADDWuv2i64
- 167249439U, // VADDWuv4i32
- 167773727U, // VADDWuv8i16
- 152102413U, // VADDfd
- 152102413U, // VADDfd_sfp
- 152102413U, // VADDfq
- 169870861U, // VADDv16i8
- 168297997U, // VADDv1i64
- 168822285U, // VADDv2i32
- 168297997U, // VADDv2i64
- 169346573U, // VADDv4i16
- 168822285U, // VADDv4i32
- 169346573U, // VADDv8i16
- 169870861U, // VADDv8i8
- 135816741U, // VANDd
- 135816741U, // VANDq
- 135816746U, // VBICd
- 135816746U, // VBICq
- 806905391U, // VBIFd
- 806905391U, // VBIFq
- 806905396U, // VBITd
- 806905396U, // VBITq
- 806905401U, // VBSLd
- 806905401U, // VBSLq
- 152102462U, // VCEQfd
- 152102462U, // VCEQfq
- 169870910U, // VCEQv16i8
- 168822334U, // VCEQv2i32
- 169346622U, // VCEQv4i16
- 168822334U, // VCEQv4i32
- 169346622U, // VCEQv8i16
- 169870910U, // VCEQv8i8
- 773850686U, // VCEQzv16i8
- 756082238U, // VCEQzv2f32
- 772802110U, // VCEQzv2i32
- 756082238U, // VCEQzv4f32
- 773326398U, // VCEQzv4i16
- 772802110U, // VCEQzv4i32
- 773326398U, // VCEQzv8i16
- 773850686U, // VCEQzv8i8
- 152102467U, // VCGEfd
- 152102467U, // VCGEfq
- 166200899U, // VCGEsv16i8
- 165152323U, // VCGEsv2i32
- 165676611U, // VCGEsv4i16
- 165152323U, // VCGEsv4i32
- 165676611U, // VCGEsv8i16
- 166200899U, // VCGEsv8i8
- 167773763U, // VCGEuv16i8
- 166725187U, // VCGEuv2i32
- 167249475U, // VCGEuv4i16
- 166725187U, // VCGEuv4i32
- 167249475U, // VCGEuv8i16
- 167773763U, // VCGEuv8i8
- 770180675U, // VCGEzv16i8
- 756082243U, // VCGEzv2f32
- 769132099U, // VCGEzv2i32
- 756082243U, // VCGEzv4f32
- 769656387U, // VCGEzv4i16
- 769132099U, // VCGEzv4i32
- 769656387U, // VCGEzv8i16
- 770180675U, // VCGEzv8i8
- 152102472U, // VCGTfd
- 152102472U, // VCGTfq
- 166200904U, // VCGTsv16i8
- 165152328U, // VCGTsv2i32
- 165676616U, // VCGTsv4i16
- 165152328U, // VCGTsv4i32
- 165676616U, // VCGTsv8i16
- 166200904U, // VCGTsv8i8
- 167773768U, // VCGTuv16i8
- 166725192U, // VCGTuv2i32
- 167249480U, // VCGTuv4i16
- 166725192U, // VCGTuv4i32
- 167249480U, // VCGTuv8i16
- 167773768U, // VCGTuv8i8
- 770180680U, // VCGTzv16i8
- 756082248U, // VCGTzv2f32
- 769132104U, // VCGTzv2i32
- 756082248U, // VCGTzv4f32
- 769656392U, // VCGTzv4i16
- 769132104U, // VCGTzv4i32
- 769656392U, // VCGTzv8i16
- 770180680U, // VCGTzv8i8
- 770180685U, // VCLEzv16i8
- 756082253U, // VCLEzv2f32
- 769132109U, // VCLEzv2i32
- 756082253U, // VCLEzv4f32
- 769656397U, // VCLEzv4i16
- 769132109U, // VCLEzv4i32
- 769656397U, // VCLEzv8i16
- 770180685U, // VCLEzv8i8
- 770180690U, // VCLSv16i8
- 769132114U, // VCLSv2i32
- 769656402U, // VCLSv4i16
- 769132114U, // VCLSv4i32
- 769656402U, // VCLSv8i16
- 770180690U, // VCLSv8i8
- 770180695U, // VCLTzv16i8
- 756082263U, // VCLTzv2f32
- 769132119U, // VCLTzv2i32
- 756082263U, // VCLTzv4f32
- 769656407U, // VCLTzv4i16
- 769132119U, // VCLTzv4i32
- 769656407U, // VCLTzv8i16
- 770180695U, // VCLTzv8i8
- 773850716U, // VCLZv16i8
- 772802140U, // VCLZv2i32
- 773326428U, // VCLZv4i16
- 772802140U, // VCLZv4i32
- 773326428U, // VCLZv8i16
- 773850716U, // VCLZv8i8
- 755557985U, // VCMPD
- 755557990U, // VCMPED
- 756082278U, // VCMPES
- 352962150U, // VCMPEZD
- 353486438U, // VCMPEZS
- 756082273U, // VCMPS
- 352962145U, // VCMPZD
- 353486433U, // VCMPZS
- 774399596U, // VCNTd
- 774399596U, // VCNTq
- 774899313U, // VCVTBHS
- 775423601U, // VCVTBSH
- 775947895U, // VCVTDS
- 776472183U, // VCVTSD
- 774899324U, // VCVTTHS
- 775423612U, // VCVTTSH
- 777119351U, // VCVTf2sd
- 777119351U, // VCVTf2sd_sfp
- 777119351U, // VCVTf2sq
- 777643639U, // VCVTf2ud
- 777643639U, // VCVTf2ud_sfp
- 777643639U, // VCVTf2uq
- 173074039U, // VCVTf2xsd
- 173074039U, // VCVTf2xsq
- 173598327U, // VCVTf2xud
- 173598327U, // VCVTf2xuq
- 778167927U, // VCVTs2fd
- 778167927U, // VCVTs2fd_sfp
- 778167927U, // VCVTs2fq
- 778692215U, // VCVTu2fd
- 778692215U, // VCVTu2fd_sfp
- 778692215U, // VCVTu2fq
- 174122615U, // VCVTxs2fd
- 174122615U, // VCVTxs2fq
- 174646903U, // VCVTxu2fd
- 174646903U, // VCVTxu2fq
- 151578242U, // VDIVD
- 152102530U, // VDIVS
- 779118215U, // VDUP16d
- 779118215U, // VDUP16q
- 779642503U, // VDUP32d
- 779642503U, // VDUP32q
- 774399623U, // VDUP8d
- 774399623U, // VDUP8q
- 175138439U, // VDUPLN16d
- 175138439U, // VDUPLN16q
- 175662727U, // VDUPLN32d
- 175662727U, // VDUPLN32q
- 170419847U, // VDUPLN8d
- 170419847U, // VDUPLN8q
- 175662727U, // VDUPLNfd
- 175662727U, // VDUPLNfq
- 779642503U, // VDUPfd
- 779642503U, // VDUPfdf
- 779642503U, // VDUPfq
- 779642503U, // VDUPfqf
- 135816844U, // VEORd
- 135816844U, // VEORq
- 846227089U, // VEXTd16
- 846751377U, // VEXTd32
- 841508497U, // VEXTd8
- 846751377U, // VEXTdf
- 846227089U, // VEXTq16
- 846751377U, // VEXTq32
- 841508497U, // VEXTq8
- 846751377U, // VEXTqf
- 175661401U, // VGETLNi32
- 165675353U, // VGETLNs16
- 166199641U, // VGETLNs8
- 167248217U, // VGETLNu16
- 167772505U, // VGETLNu8
- 166200982U, // VHADDsv16i8
- 165152406U, // VHADDsv2i32
- 165676694U, // VHADDsv4i16
- 165152406U, // VHADDsv4i32
- 165676694U, // VHADDsv8i16
- 166200982U, // VHADDsv8i8
- 167773846U, // VHADDuv16i8
- 166725270U, // VHADDuv2i32
- 167249558U, // VHADDuv4i16
- 166725270U, // VHADDuv4i32
- 167249558U, // VHADDuv8i16
- 167773846U, // VHADDuv8i8
- 166200988U, // VHSUBsv16i8
- 165152412U, // VHSUBsv2i32
- 165676700U, // VHSUBsv4i16
- 165152412U, // VHSUBsv4i32
- 165676700U, // VHSUBsv8i16
- 166200988U, // VHSUBsv8i8
- 167773852U, // VHSUBuv16i8
- 166725276U, // VHSUBuv2i32
- 167249564U, // VHSUBuv4i16
- 166725276U, // VHSUBuv4i32
- 167249564U, // VHSUBuv8i16
- 167773852U, // VHSUBuv8i8
- 243295906U, // VLD1d16
- 1317037730U, // VLD1d16Q
- 1384146594U, // VLD1d16T
- 243820194U, // VLD1d32
- 1317562018U, // VLD1d32Q
- 1384670882U, // VLD1d32T
- 244344482U, // VLD1d64
- 244868770U, // VLD1d8
- 1318610594U, // VLD1d8Q
- 1385719458U, // VLD1d8T
- 243820194U, // VLD1df
- 242353826U, // VLD1q16
- 242878114U, // VLD1q32
- 245499554U, // VLD1q64
- 237635234U, // VLD1q8
- 242878114U, // VLD1qf
- 1451255463U, // VLD2LNd16
- 1451779751U, // VLD2LNd32
- 1452828327U, // VLD2LNd8
- 1451255463U, // VLD2LNq16a
- 1451255463U, // VLD2LNq16b
- 1451779751U, // VLD2LNq32a
- 1451779751U, // VLD2LNq32b
- 645949095U, // VLD2d16
- 645949095U, // VLD2d16D
- 646473383U, // VLD2d32
- 646473383U, // VLD2d32D
- 646997666U, // VLD2d64
- 647521959U, // VLD2d8
- 647521959U, // VLD2d8D
- 1317037735U, // VLD2q16
- 1317562023U, // VLD2q32
- 1318610599U, // VLD2q8
- 1518364332U, // VLD3LNd16
- 1518888620U, // VLD3LNd32
- 1519937196U, // VLD3LNd8
- 1518364332U, // VLD3LNq16a
- 1518364332U, // VLD3LNq16b
- 1518888620U, // VLD3LNq32a
- 1518888620U, // VLD3LNq32b
- 1384146604U, // VLD3d16
- 1384670892U, // VLD3d32
- 1385195170U, // VLD3d64
- 1385719468U, // VLD3d8
- 1317037740U, // VLD3q16a
- 1317037740U, // VLD3q16b
- 1317562028U, // VLD3q32a
- 1317562028U, // VLD3q32b
- 1318610604U, // VLD3q8a
- 1318610604U, // VLD3q8b
- 1585473201U, // VLD4LNd16
- 1585997489U, // VLD4LNd32
- 1587046065U, // VLD4LNd8
- 1585473201U, // VLD4LNq16a
- 1585473201U, // VLD4LNq16b
- 1585997489U, // VLD4LNq32a
- 1585997489U, // VLD4LNq32b
- 1317037745U, // VLD4d16
- 1317562033U, // VLD4d32
- 1318086306U, // VLD4d64
- 1318610609U, // VLD4d8
- 1451255473U, // VLD4q16a
- 1451255473U, // VLD4q16b
- 1451779761U, // VLD4q32a
- 1451779761U, // VLD4q32b
- 1452828337U, // VLD4q8a
- 1452828337U, // VLD4q8b
- 1610614454U, // VLDMD
- 1610614454U, // VLDMS
- 178284219U, // VLDRD
- 135931584U, // VLDRQ
- 175662779U, // VLDRS
- 152102599U, // VMAXfd
- 152102599U, // VMAXfd_sfp
- 152102599U, // VMAXfq
- 166201031U, // VMAXsv16i8
- 165152455U, // VMAXsv2i32
- 165676743U, // VMAXsv4i16
- 165152455U, // VMAXsv4i32
- 165676743U, // VMAXsv8i16
- 166201031U, // VMAXsv8i8
- 167773895U, // VMAXuv16i8
- 166725319U, // VMAXuv2i32
- 167249607U, // VMAXuv4i16
- 166725319U, // VMAXuv4i32
- 167249607U, // VMAXuv8i16
- 167773895U, // VMAXuv8i8
- 152102604U, // VMINfd
- 152102604U, // VMINfd_sfp
- 152102604U, // VMINfq
- 166201036U, // VMINsv16i8
- 165152460U, // VMINsv2i32
- 165676748U, // VMINsv4i16
- 165152460U, // VMINsv4i32
- 165676748U, // VMINsv8i16
- 166201036U, // VMINsv8i8
- 167773900U, // VMINuv16i8
- 166725324U, // VMINuv2i32
- 167249612U, // VMINuv4i16
- 166725324U, // VMINuv4i32
- 167249612U, // VMINuv8i16
- 167773900U, // VMINuv8i8
- 822666961U, // VMLAD
- 232277718U, // VMLALslsv2i32
- 232802006U, // VMLALslsv4i16
- 233850582U, // VMLALsluv2i32
- 234374870U, // VMLALsluv4i16
- 836257494U, // VMLALsv2i64
- 836781782U, // VMLALsv4i32
- 837306070U, // VMLALsv8i16
- 837830358U, // VMLALuv2i64
- 838354646U, // VMLALuv4i32
- 838878934U, // VMLALuv8i16
- 823191249U, // VMLAS
- 823191249U, // VMLAfd
- 823191249U, // VMLAfq
- 219211473U, // VMLAslfd
- 219211473U, // VMLAslfq
- 235947729U, // VMLAslv2i32
- 236472017U, // VMLAslv4i16
- 235947729U, // VMLAslv4i32
- 236472017U, // VMLAslv8i16
- 840976081U, // VMLAv16i8
- 839927505U, // VMLAv2i32
- 840451793U, // VMLAv4i16
- 839927505U, // VMLAv4i32
- 840451793U, // VMLAv8i16
- 840976081U, // VMLAv8i8
- 822666972U, // VMLSD
- 232277729U, // VMLSLslsv2i32
- 232802017U, // VMLSLslsv4i16
- 233850593U, // VMLSLsluv2i32
- 234374881U, // VMLSLsluv4i16
- 836257505U, // VMLSLsv2i64
- 836781793U, // VMLSLsv4i32
- 837306081U, // VMLSLsv8i16
- 837830369U, // VMLSLuv2i64
- 838354657U, // VMLSLuv4i32
- 838878945U, // VMLSLuv8i16
- 823191260U, // VMLSS
- 823191260U, // VMLSfd
- 823191260U, // VMLSfq
- 219211484U, // VMLSslfd
- 219211484U, // VMLSslfq
- 235947740U, // VMLSslv2i32
- 236472028U, // VMLSslv4i16
- 235947740U, // VMLSslv4i32
- 236472028U, // VMLSslv8i16
- 840976092U, // VMLSv16i8
- 839927516U, // VMLSv2i32
- 840451804U, // VMLSv4i16
- 839927516U, // VMLSv4i32
- 840451804U, // VMLSv8i16
- 840976092U, // VMLSv8i8
- 755556697U, // VMOVD
- 135815513U, // VMOVDRR
- 151576921U, // VMOVDcc
- 739795289U, // VMOVDneon
- 769132263U, // VMOVLsv2i64
- 769656551U, // VMOVLsv4i32
- 770180839U, // VMOVLsv8i16
- 770705127U, // VMOVLuv2i64
- 771229415U, // VMOVLuv4i32
- 771753703U, // VMOVLuv8i16
- 772277997U, // VMOVNv2i32
- 772802285U, // VMOVNv4i16
- 773326573U, // VMOVNv8i8
- 739795289U, // VMOVQ
- 135815513U, // VMOVRRD
- 806904153U, // VMOVRRS
- 739795289U, // VMOVRS
- 756080985U, // VMOVS
- 739795289U, // VMOVSR
- 806904153U, // VMOVSRR
- 152101209U, // VMOVScc
- 773996889U, // VMOVv16i8
- 772432217U, // VMOVv1i64
- 772964697U, // VMOVv2i32
- 772432217U, // VMOVv2i64
- 773497177U, // VMOVv4i16
- 772964697U, // VMOVv4i32
- 773497177U, // VMOVv8i16
- 773996889U, // VMOVv8i8
- 337142110U, // VMRS
- 380110579U, // VMSR
- 151578360U, // VMULD
- 179308285U, // VMULLp
- 836241149U, // VMULLslsv2i32
- 836765437U, // VMULLslsv4i16
- 837814013U, // VMULLsluv2i32
- 838338301U, // VMULLsluv4i16
- 165152509U, // VMULLsv2i64
- 165676797U, // VMULLsv4i32
- 166201085U, // VMULLsv8i16
- 166725373U, // VMULLuv2i64
- 167249661U, // VMULLuv4i32
- 167773949U, // VMULLuv8i16
- 152102648U, // VMULS
- 152102648U, // VMULfd
- 152102648U, // VMULfd_sfp
- 152102648U, // VMULfq
- 179308280U, // VMULpd
- 179308280U, // VMULpq
- 823191288U, // VMULslfd
- 823191288U, // VMULslfq
- 839911160U, // VMULslv2i32
- 840435448U, // VMULslv4i16
- 839911160U, // VMULslv4i32
- 840435448U, // VMULslv8i16
- 169871096U, // VMULv16i8
- 168822520U, // VMULv2i32
- 169346808U, // VMULv4i16
- 168822520U, // VMULv4i32
- 169346808U, // VMULv8i16
- 169871096U, // VMULv8i8
- 739796739U, // VMVNd
- 739796739U, // VMVNq
- 755558152U, // VNEGD
- 151578376U, // VNEGDcc
- 756082440U, // VNEGS
- 152102664U, // VNEGScc
- 756082440U, // VNEGf32q
- 756082440U, // VNEGfd
- 756082440U, // VNEGfd_sfp
- 769656584U, // VNEGs16d
- 769656584U, // VNEGs16q
- 769132296U, // VNEGs32d
- 769132296U, // VNEGs32q
- 770180872U, // VNEGs8d
- 770180872U, // VNEGs8q
- 822667021U, // VNMLAD
- 823191309U, // VNMLAS
- 822667027U, // VNMLSD
- 823191315U, // VNMLSS
- 151578393U, // VNMULD
- 152102681U, // VNMULS
- 135816991U, // VORNd
- 135816991U, // VORNq
- 135816996U, // VORRd
- 135816996U, // VORRq
- 166217513U, // VPADALsv16i8
- 165168937U, // VPADALsv2i32
- 165693225U, // VPADALsv4i16
- 165168937U, // VPADALsv4i32
- 165693225U, // VPADALsv8i16
- 166217513U, // VPADALsv8i8
- 167790377U, // VPADALuv16i8
- 166741801U, // VPADALuv2i32
- 167266089U, // VPADALuv4i16
- 166741801U, // VPADALuv4i32
- 167266089U, // VPADALuv8i16
- 167790377U, // VPADALuv8i8
- 770180912U, // VPADDLsv16i8
- 769132336U, // VPADDLsv2i32
- 769656624U, // VPADDLsv4i16
- 769132336U, // VPADDLsv4i32
- 769656624U, // VPADDLsv8i16
- 770180912U, // VPADDLsv8i8
- 771753776U, // VPADDLuv16i8
- 770705200U, // VPADDLuv2i32
- 771229488U, // VPADDLuv4i16
- 770705200U, // VPADDLuv4i32
- 771229488U, // VPADDLuv8i16
- 771753776U, // VPADDLuv8i8
- 152102711U, // VPADDf
- 169346871U, // VPADDi16
- 168822583U, // VPADDi32
- 169871159U, // VPADDi8
- 152102717U, // VPMAXf
- 165676861U, // VPMAXs16
- 165152573U, // VPMAXs32
- 166201149U, // VPMAXs8
- 167249725U, // VPMAXu16
- 166725437U, // VPMAXu32
- 167774013U, // VPMAXu8
- 152102723U, // VPMINf
- 165676867U, // VPMINs16
- 165152579U, // VPMINs32
- 166201155U, // VPMINs8
- 167249731U, // VPMINu16
- 166725443U, // VPMINu32
- 167774019U, // VPMINu8
- 770180937U, // VQABSv16i8
- 769132361U, // VQABSv2i32
- 769656649U, // VQABSv4i16
- 769132361U, // VQABSv4i32
- 769656649U, // VQABSv8i16
- 770180937U, // VQABSv8i8
- 166201167U, // VQADDsv16i8
- 179832655U, // VQADDsv1i64
- 165152591U, // VQADDsv2i32
- 179832655U, // VQADDsv2i64
- 165676879U, // VQADDsv4i16
- 165152591U, // VQADDsv4i32
- 165676879U, // VQADDsv8i16
- 166201167U, // VQADDsv8i8
- 167774031U, // VQADDuv16i8
- 180356943U, // VQADDuv1i64
- 166725455U, // VQADDuv2i32
- 180356943U, // VQADDuv2i64
- 167249743U, // VQADDuv4i16
- 166725455U, // VQADDuv4i32
- 167249743U, // VQADDuv8i16
- 167774031U, // VQADDuv8i8
- 232277845U, // VQDMLALslv2i32
- 232802133U, // VQDMLALslv4i16
- 836257621U, // VQDMLALv2i64
- 836781909U, // VQDMLALv4i32
- 232277853U, // VQDMLSLslv2i32
- 232802141U, // VQDMLSLslv4i16
- 836257629U, // VQDMLSLv2i64
- 836781917U, // VQDMLSLv4i32
- 836241253U, // VQDMULHslv2i32
- 836765541U, // VQDMULHslv4i16
- 836241253U, // VQDMULHslv4i32
- 836765541U, // VQDMULHslv8i16
- 165152613U, // VQDMULHv2i32
- 165676901U, // VQDMULHv4i16
- 165152613U, // VQDMULHv4i32
- 165676901U, // VQDMULHv8i16
- 836241261U, // VQDMULLslv2i32
- 836765549U, // VQDMULLslv4i16
- 165152621U, // VQDMULLv2i64
- 165676909U, // VQDMULLv4i32
- 783812469U, // VQMOVNsuv2i32
- 769132405U, // VQMOVNsuv4i16
- 769656693U, // VQMOVNsuv8i8
- 783812477U, // VQMOVNsv2i32
- 769132413U, // VQMOVNsv4i16
- 769656701U, // VQMOVNsv8i8
- 784336765U, // VQMOVNuv2i32
- 770705277U, // VQMOVNuv4i16
- 771229565U, // VQMOVNuv8i8
- 770180996U, // VQNEGv16i8
- 769132420U, // VQNEGv2i32
- 769656708U, // VQNEGv4i16
- 769132420U, // VQNEGv4i32
- 769656708U, // VQNEGv8i16
- 770180996U, // VQNEGv8i8
- 836241290U, // VQRDMULHslv2i32
- 836765578U, // VQRDMULHslv4i16
- 836241290U, // VQRDMULHslv4i32
- 836765578U, // VQRDMULHslv8i16
- 165152650U, // VQRDMULHv2i32
- 165676938U, // VQRDMULHv4i16
- 165152650U, // VQRDMULHv4i32
- 165676938U, // VQRDMULHv8i16
- 166201235U, // VQRSHLsv16i8
- 179832723U, // VQRSHLsv1i64
- 165152659U, // VQRSHLsv2i32
- 179832723U, // VQRSHLsv2i64
- 165676947U, // VQRSHLsv4i16
- 165152659U, // VQRSHLsv4i32
- 165676947U, // VQRSHLsv8i16
- 166201235U, // VQRSHLsv8i8
- 167774099U, // VQRSHLuv16i8
- 180357011U, // VQRSHLuv1i64
- 166725523U, // VQRSHLuv2i32
- 180357011U, // VQRSHLuv2i64
- 167249811U, // VQRSHLuv4i16
- 166725523U, // VQRSHLuv4i32
- 167249811U, // VQRSHLuv8i16
- 167774099U, // VQRSHLuv8i8
- 179832730U, // VQRSHRNsv2i32
- 165152666U, // VQRSHRNsv4i16
- 165676954U, // VQRSHRNsv8i8
- 180357018U, // VQRSHRNuv2i32
- 166725530U, // VQRSHRNuv4i16
- 167249818U, // VQRSHRNuv8i8
- 179832738U, // VQRSHRUNv2i32
- 165152674U, // VQRSHRUNv4i16
- 165676962U, // VQRSHRUNv8i8
- 166201259U, // VQSHLsiv16i8
- 179832747U, // VQSHLsiv1i64
- 165152683U, // VQSHLsiv2i32
- 179832747U, // VQSHLsiv2i64
- 165676971U, // VQSHLsiv4i16
- 165152683U, // VQSHLsiv4i32
- 165676971U, // VQSHLsiv8i16
- 166201259U, // VQSHLsiv8i8
- 166201265U, // VQSHLsuv16i8
- 179832753U, // VQSHLsuv1i64
- 165152689U, // VQSHLsuv2i32
- 179832753U, // VQSHLsuv2i64
- 165676977U, // VQSHLsuv4i16
- 165152689U, // VQSHLsuv4i32
- 165676977U, // VQSHLsuv8i16
- 166201265U, // VQSHLsuv8i8
- 166201259U, // VQSHLsv16i8
- 179832747U, // VQSHLsv1i64
- 165152683U, // VQSHLsv2i32
- 179832747U, // VQSHLsv2i64
- 165676971U, // VQSHLsv4i16
- 165152683U, // VQSHLsv4i32
- 165676971U, // VQSHLsv8i16
- 166201259U, // VQSHLsv8i8
- 167774123U, // VQSHLuiv16i8
- 180357035U, // VQSHLuiv1i64
- 166725547U, // VQSHLuiv2i32
- 180357035U, // VQSHLuiv2i64
- 167249835U, // VQSHLuiv4i16
- 166725547U, // VQSHLuiv4i32
- 167249835U, // VQSHLuiv8i16
- 167774123U, // VQSHLuiv8i8
- 167774123U, // VQSHLuv16i8
- 180357035U, // VQSHLuv1i64
- 166725547U, // VQSHLuv2i32
- 180357035U, // VQSHLuv2i64
- 167249835U, // VQSHLuv4i16
- 166725547U, // VQSHLuv4i32
- 167249835U, // VQSHLuv8i16
- 167774123U, // VQSHLuv8i8
- 179832760U, // VQSHRNsv2i32
- 165152696U, // VQSHRNsv4i16
- 165676984U, // VQSHRNsv8i8
- 180357048U, // VQSHRNuv2i32
- 166725560U, // VQSHRNuv4i16
- 167249848U, // VQSHRNuv8i8
- 179832767U, // VQSHRUNv2i32
- 165152703U, // VQSHRUNv4i16
- 165676991U, // VQSHRUNv8i8
- 166201287U, // VQSUBsv16i8
- 179832775U, // VQSUBsv1i64
- 165152711U, // VQSUBsv2i32
- 179832775U, // VQSUBsv2i64
- 165676999U, // VQSUBsv4i16
- 165152711U, // VQSUBsv4i32
- 165676999U, // VQSUBsv8i16
- 166201287U, // VQSUBsv8i8
- 167774151U, // VQSUBuv16i8
- 180357063U, // VQSUBuv1i64
- 166725575U, // VQSUBuv2i32
- 180357063U, // VQSUBuv2i64
- 167249863U, // VQSUBuv4i16
- 166725575U, // VQSUBuv4i32
- 167249863U, // VQSUBuv8i16
- 167774151U, // VQSUBuv8i8
- 168298445U, // VRADDHNv2i32
- 168822733U, // VRADDHNv4i16
- 169347021U, // VRADDHNv8i8
- 770705365U, // VRECPEd
- 756082645U, // VRECPEfd
- 756082645U, // VRECPEfq
- 770705365U, // VRECPEq
- 152102876U, // VRECPSfd
- 152102876U, // VRECPSfq
- 774399971U, // VREV16d8
- 774399971U, // VREV16q8
- 779118570U, // VREV32d16
- 774399978U, // VREV32d8
- 779118570U, // VREV32q16
- 774399978U, // VREV32q8
- 779118577U, // VREV64d16
- 779642865U, // VREV64d32
- 774399985U, // VREV64d8
- 779642865U, // VREV64df
- 779118577U, // VREV64q16
- 779642865U, // VREV64q32
- 774399985U, // VREV64q8
- 779642865U, // VREV64qf
- 166201336U, // VRHADDsv16i8
- 165152760U, // VRHADDsv2i32
- 165677048U, // VRHADDsv4i16
- 165152760U, // VRHADDsv4i32
- 165677048U, // VRHADDsv8i16
- 166201336U, // VRHADDsv8i8
- 167774200U, // VRHADDuv16i8
- 166725624U, // VRHADDuv2i32
- 167249912U, // VRHADDuv4i16
- 166725624U, // VRHADDuv4i32
- 167249912U, // VRHADDuv8i16
- 167774200U, // VRHADDuv8i8
- 166201343U, // VRSHLsv16i8
- 179832831U, // VRSHLsv1i64
- 165152767U, // VRSHLsv2i32
- 179832831U, // VRSHLsv2i64
- 165677055U, // VRSHLsv4i16
- 165152767U, // VRSHLsv4i32
- 165677055U, // VRSHLsv8i16
- 166201343U, // VRSHLsv8i8
- 167774207U, // VRSHLuv16i8
- 180357119U, // VRSHLuv1i64
- 166725631U, // VRSHLuv2i32
- 180357119U, // VRSHLuv2i64
- 167249919U, // VRSHLuv4i16
- 166725631U, // VRSHLuv4i32
- 167249919U, // VRSHLuv8i16
- 167774207U, // VRSHLuv8i8
- 168298501U, // VRSHRNv2i32
- 168822789U, // VRSHRNv4i16
- 169347077U, // VRSHRNv8i8
- 166201356U, // VRSHRsv16i8
- 179832844U, // VRSHRsv1i64
- 165152780U, // VRSHRsv2i32
- 179832844U, // VRSHRsv2i64
- 165677068U, // VRSHRsv4i16
- 165152780U, // VRSHRsv4i32
- 165677068U, // VRSHRsv8i16
- 166201356U, // VRSHRsv8i8
- 167774220U, // VRSHRuv16i8
- 180357132U, // VRSHRuv1i64
- 166725644U, // VRSHRuv2i32
- 180357132U, // VRSHRuv2i64
- 167249932U, // VRSHRuv4i16
- 166725644U, // VRSHRuv4i32
- 167249932U, // VRSHRuv8i16
- 167774220U, // VRSHRuv8i8
- 770705426U, // VRSQRTEd
- 756082706U, // VRSQRTEfd
- 756082706U, // VRSQRTEfq
- 770705426U, // VRSQRTEq
- 152102938U, // VRSQRTSfd
- 152102938U, // VRSQRTSfq
- 837306402U, // VRSRAsv16i8
- 850937890U, // VRSRAsv1i64
- 836257826U, // VRSRAsv2i32
- 850937890U, // VRSRAsv2i64
- 836782114U, // VRSRAsv4i16
- 836257826U, // VRSRAsv4i32
- 836782114U, // VRSRAsv8i16
- 837306402U, // VRSRAsv8i8
- 838879266U, // VRSRAuv16i8
- 851462178U, // VRSRAuv1i64
- 837830690U, // VRSRAuv2i32
- 851462178U, // VRSRAuv2i64
- 838354978U, // VRSRAuv4i16
- 837830690U, // VRSRAuv4i32
- 838354978U, // VRSRAuv8i16
- 838879266U, // VRSRAuv8i8
- 168298536U, // VRSUBHNv2i32
- 168822824U, // VRSUBHNv4i16
- 169347112U, // VRSUBHNv8i8
- 846225753U, // VSETLNi16
- 846750041U, // VSETLNi32
- 841507161U, // VSETLNi8
- 169347120U, // VSHLLi16
- 168822832U, // VSHLLi32
- 169871408U, // VSHLLi8
- 165152816U, // VSHLLsv2i64
- 165677104U, // VSHLLsv4i32
- 166201392U, // VSHLLsv8i16
- 166725680U, // VSHLLuv2i64
- 167249968U, // VSHLLuv4i32
- 167774256U, // VSHLLuv8i16
- 169871414U, // VSHLiv16i8
- 168298550U, // VSHLiv1i64
- 168822838U, // VSHLiv2i32
- 168298550U, // VSHLiv2i64
- 169347126U, // VSHLiv4i16
- 168822838U, // VSHLiv4i32
- 169347126U, // VSHLiv8i16
- 169871414U, // VSHLiv8i8
- 166201398U, // VSHLsv16i8
- 179832886U, // VSHLsv1i64
- 165152822U, // VSHLsv2i32
- 179832886U, // VSHLsv2i64
- 165677110U, // VSHLsv4i16
- 165152822U, // VSHLsv4i32
- 165677110U, // VSHLsv8i16
- 166201398U, // VSHLsv8i8
- 167774262U, // VSHLuv16i8
- 180357174U, // VSHLuv1i64
- 166725686U, // VSHLuv2i32
- 180357174U, // VSHLuv2i64
- 167249974U, // VSHLuv4i16
- 166725686U, // VSHLuv4i32
- 167249974U, // VSHLuv8i16
- 167774262U, // VSHLuv8i8
- 168298555U, // VSHRNv2i32
- 168822843U, // VSHRNv4i16
- 169347131U, // VSHRNv8i8
- 166201409U, // VSHRsv16i8
- 179832897U, // VSHRsv1i64
- 165152833U, // VSHRsv2i32
- 179832897U, // VSHRsv2i64
- 165677121U, // VSHRsv4i16
- 165152833U, // VSHRsv4i32
- 165677121U, // VSHRsv8i16
- 166201409U, // VSHRsv8i8
- 167774273U, // VSHRuv16i8
- 180357185U, // VSHRuv1i64
- 166725697U, // VSHRuv2i32
- 180357185U, // VSHRuv2i64
- 167249985U, // VSHRuv4i16
- 166725697U, // VSHRuv4i32
- 167249985U, // VSHRuv8i16
- 167774273U, // VSHRuv8i8
- 180881015U, // VSHTOD
- 181405303U, // VSHTOS
- 786032247U, // VSITOD
- 778167927U, // VSITOS
- 841508934U, // VSLIv16i8
- 849373254U, // VSLIv1i64
- 846751814U, // VSLIv2i32
- 849373254U, // VSLIv2i64
- 846227526U, // VSLIv4i16
- 846751814U, // VSLIv4i32
- 846227526U, // VSLIv8i16
- 841508934U, // VSLIv8i8
- 181986935U, // VSLTOD
- 174122615U, // VSLTOS
- 755558475U, // VSQRTD
- 756082763U, // VSQRTS
- 837306449U, // VSRAsv16i8
- 850937937U, // VSRAsv1i64
- 836257873U, // VSRAsv2i32
- 850937937U, // VSRAsv2i64
- 836782161U, // VSRAsv4i16
- 836257873U, // VSRAsv4i32
- 836782161U, // VSRAsv8i16
- 837306449U, // VSRAsv8i8
- 838879313U, // VSRAuv16i8
- 851462225U, // VSRAuv1i64
- 837830737U, // VSRAuv2i32
- 851462225U, // VSRAuv2i64
- 838355025U, // VSRAuv4i16
- 837830737U, // VSRAuv4i32
- 838355025U, // VSRAuv8i16
- 838879313U, // VSRAuv8i8
- 841508950U, // VSRIv16i8
- 849373270U, // VSRIv1i64
- 846751830U, // VSRIv2i32
- 849373270U, // VSRIv2i64
- 846227542U, // VSRIv4i16
- 846751830U, // VSRIv4i32
- 846227542U, // VSRIv8i16
- 841508950U, // VSRIv8i8
- 243451995U, // VST1d16
- 1317193819U, // VST1d16Q
- 1384302683U, // VST1d16T
- 243976283U, // VST1d32
- 1317718107U, // VST1d32Q
- 1384826971U, // VST1d32T
- 244500571U, // VST1d64
- 245024859U, // VST1d8
- 1318766683U, // VST1d8Q
- 1385875547U, // VST1d8T
- 243976283U, // VST1df
- 242411611U, // VST1q16
- 242935899U, // VST1q32
- 245557339U, // VST1q64
- 237693019U, // VST1q8
- 242935899U, // VST1qf
- 1384302688U, // VST2LNd16
- 1384826976U, // VST2LNd32
- 1385875552U, // VST2LNd8
- 1384302688U, // VST2LNq16a
- 1384302688U, // VST2LNq16b
- 1384826976U, // VST2LNq32a
- 1384826976U, // VST2LNq32b
- 646105184U, // VST2d16
- 646105184U, // VST2d16D
- 646629472U, // VST2d32
- 646629472U, // VST2d32D
- 647153755U, // VST2d64
- 647678048U, // VST2d8
- 647678048U, // VST2d8D
- 1317193824U, // VST2q16
- 1317718112U, // VST2q32
- 1318766688U, // VST2q8
- 1317193829U, // VST3LNd16
- 1317718117U, // VST3LNd32
- 1318766693U, // VST3LNd8
- 1317193829U, // VST3LNq16a
- 1317193829U, // VST3LNq16b
- 1317718117U, // VST3LNq32a
- 1317718117U, // VST3LNq32b
- 1384302693U, // VST3d16
- 1384826981U, // VST3d32
- 1385351259U, // VST3d64
- 1385875557U, // VST3d8
- 1317210213U, // VST3q16a
- 1317210213U, // VST3q16b
- 1317734501U, // VST3q32a
- 1317734501U, // VST3q32b
- 1318783077U, // VST3q8a
- 1318783077U, // VST3q8b
- 1451411562U, // VST4LNd16
- 1451935850U, // VST4LNd32
- 1452984426U, // VST4LNd8
- 1451411562U, // VST4LNq16a
- 1451411562U, // VST4LNq16b
- 1451935850U, // VST4LNq32a
- 1451935850U, // VST4LNq32b
- 1317193834U, // VST4d16
- 1317718122U, // VST4d32
- 1318242395U, // VST4d64
- 1318766698U, // VST4d8
- 1451427946U, // VST4q16a
- 1451427946U, // VST4q16b
- 1451952234U, // VST4q32a
- 1451952234U, // VST4q32b
- 1453000810U, // VST4q8a
- 1453000810U, // VST4q8b
- 1610614895U, // VSTMD
- 1610614895U, // VSTMS
- 178284660U, // VSTRD
- 135932025U, // VSTRQ
- 175663220U, // VSTRS
- 151578752U, // VSUBD
- 168298629U, // VSUBHNv2i32
- 168822917U, // VSUBHNv4i16
- 169347205U, // VSUBHNv8i8
- 165152908U, // VSUBLsv2i64
- 165677196U, // VSUBLsv4i32
- 166201484U, // VSUBLsv8i16
- 166725772U, // VSUBLuv2i64
- 167250060U, // VSUBLuv4i32
- 167774348U, // VSUBLuv8i16
- 152103040U, // VSUBS
- 165152914U, // VSUBWsv2i64
- 165677202U, // VSUBWsv4i32
- 166201490U, // VSUBWsv8i16
- 166725778U, // VSUBWuv2i64
- 167250066U, // VSUBWuv4i32
- 167774354U, // VSUBWuv8i16
- 152103040U, // VSUBfd
- 152103040U, // VSUBfd_sfp
- 152103040U, // VSUBfq
- 169871488U, // VSUBv16i8
- 168298624U, // VSUBv1i64
- 168822912U, // VSUBv2i32
- 168298624U, // VSUBv2i64
- 169347200U, // VSUBv4i16
- 168822912U, // VSUBv4i32
- 169347200U, // VSUBv8i16
- 169871488U, // VSUBv8i8
- 739797144U, // VSWPd
- 739797144U, // VSWPq
- 170420381U, // VTBL1
- 841509021U, // VTBL2
- 237529245U, // VTBL3
- 640182429U, // VTBL4
- 841509026U, // VTBX1
- 237529250U, // VTBX2
- 640182434U, // VTBX3
- 1378379938U, // VTBX4
- 182453879U, // VTOSHD
- 182978167U, // VTOSHS
- 787605671U, // VTOSIRD
- 777119911U, // VTOSIRS
- 787605111U, // VTOSIZD
- 777119351U, // VTOSIZS
- 183559799U, // VTOSLD
- 173074039U, // VTOSLS
- 184026743U, // VTOUHD
- 184551031U, // VTOUHS
- 789178535U, // VTOUIRD
- 777644199U, // VTOUIRS
- 789177975U, // VTOUIZD
- 777643639U, // VTOUIZS
- 185132663U, // VTOULD
- 173598327U, // VTOULS
- 846227629U, // VTRNd16
- 846751917U, // VTRNd32
- 841509037U, // VTRNd8
- 846227629U, // VTRNq16
- 846751917U, // VTRNq32
- 841509037U, // VTRNq8
- 170420402U, // VTSTv16i8
- 175663282U, // VTSTv2i32
- 175138994U, // VTSTv4i16
- 175663282U, // VTSTv4i32
- 175138994U, // VTSTv8i16
- 170420402U, // VTSTv8i8
- 185599607U, // VUHTOD
- 186123895U, // VUHTOS
- 790750839U, // VUITOD
- 778692215U, // VUITOS
- 186705527U, // VULTOD
- 174646903U, // VULTOS
- 846227639U, // VUZPd16
- 846751927U, // VUZPd32
- 841509047U, // VUZPd8
- 846227639U, // VUZPq16
- 846751927U, // VUZPq32
- 841509047U, // VUZPq8
- 846227644U, // VZIPd16
- 846751932U, // VZIPd32
- 841509052U, // VZIPd8
- 846227644U, // VZIPq16
- 846751932U, // VZIPq32
- 841509052U, // VZIPq8
- 538970305U, // WFE
- 538970309U, // WFI
- 538970313U, // YIELD
- 1679319057U, // t2ADCSri
- 1730732049U, // t2ADCSrr
- 1797840913U, // t2ADCSrs
- 1679319057U, // t2ADCri
- 1730732049U, // t2ADCrr
- 1797840913U, // t2ADCrs
- 187228181U, // t2ADDSri
- 187228181U, // t2ADDSrr
- 858316821U, // t2ADDSrs
- 1730732058U, // t2ADDrSPi
- 135817423U, // t2ADDrSPi12
- 1797840922U, // t2ADDrSPs
- 1730732058U, // t2ADDri
- 1679321295U, // t2ADDri12
- 1730732058U, // t2ADDrr
- 1797840922U, // t2ADDrs
- 1679319108U, // t2ANDri
- 1730732100U, // t2ANDrr
- 1797840964U, // t2ANDrs
- 1730734292U, // t2ASRri
- 1730734292U, // t2ASRrr
- 69208280U, // t2B
- 135815244U, // t2BFC
- 806903888U, // t2BFI
- 1679319124U, // t2BICri
- 1730732116U, // t2BICrr
- 1797840980U, // t2BICrs
- 120586388U, // t2BR_JT
- 337141933U, // t2BXJ
- 388620468U, // t2Bcc
- 538968257U, // t2CLREX
- 739795143U, // t2CLZ
- 791208139U, // t2CMNzri
- 791208139U, // t2CMNzrr
- 187228363U, // t2CMNzrs
- 791208143U, // t2CMPri
- 791208143U, // t2CMPrr
- 187228367U, // t2CMPrs
- 791208143U, // t2CMPzri
- 791208143U, // t2CMPzrr
- 187228367U, // t2CMPzrs
- 939524307U, // t2CPS
- 337141975U, // t2DBG
- 590872948U, // t2DMBish
- 591397236U, // t2DMBishst
- 591921524U, // t2DMBnsh
- 592445812U, // t2DMBnshst
- 592970100U, // t2DMBosh
- 593494388U, // t2DMBoshst
- 594018676U, // t2DMBst
- 590872952U, // t2DSBish
- 591397240U, // t2DSBishst
- 591921528U, // t2DSBnsh
- 592445816U, // t2DSBnshst
- 592970104U, // t2DSBosh
- 593494392U, // t2DSBoshst
- 594018680U, // t2DSBst
- 1679319381U, // t2EORri
- 1730732373U, // t2EORrr
- 1797841237U, // t2EORrs
- 538968419U, // t2ISBsy
- 1811941597U, // t2IT
- 372U, // t2Int_MemBarrierV7
- 376U, // t2Int_SyncBarrierV7
- 1879050464U, // t2Int_eh_sjlj_setjmp
- 1027809679U, // t2LDM
- 1027809679U, // t2LDM_RET
- 135815580U, // t2LDRBT
- 806904215U, // t2LDRB_POST
- 806904215U, // t2LDRB_PRE
- 187228567U, // t2LDRBi12
- 135815575U, // t2LDRBi8
- 791208343U, // t2LDRBpci
- 858317207U, // t2LDRBs
- 806904226U, // t2LDRDi8
- 135815586U, // t2LDRDpci
- 739795367U, // t2LDREX
- 739795373U, // t2LDREXB
- 135815604U, // t2LDREXD
- 739795387U, // t2LDREXH
- 135815623U, // t2LDRHT
- 806904258U, // t2LDRH_POST
- 806904258U, // t2LDRH_PRE
- 187228610U, // t2LDRHi12
- 135815618U, // t2LDRHi8
- 791208386U, // t2LDRHpci
- 858317250U, // t2LDRHs
- 135815635U, // t2LDRSBT
- 806904269U, // t2LDRSB_POST
- 806904269U, // t2LDRSB_PRE
- 187228621U, // t2LDRSBi12
- 135815629U, // t2LDRSBi8
- 791208397U, // t2LDRSBpci
- 858317261U, // t2LDRSBs
- 135815648U, // t2LDRSHT
- 806904282U, // t2LDRSH_POST
- 806904282U, // t2LDRSH_PRE
- 187228634U, // t2LDRSHi12
- 135815642U, // t2LDRSHi8
- 791208410U, // t2LDRSHpci
- 858317274U, // t2LDRSHs
- 135815655U, // t2LDRT
- 806904211U, // t2LDR_POST
- 806904211U, // t2LDR_PRE
- 187228563U, // t2LDRi12
- 135815571U, // t2LDRi8
- 791208339U, // t2LDRpci
- 67111141U, // t2LDRpci_pic
- 858317203U, // t2LDRs
- 791365870U, // t2LEApcrel
- 187386094U, // t2LEApcrelJT
- 1730734322U, // t2LSLri
- 1730734322U, // t2LSLrr
- 1730734326U, // t2LSRri
- 1730734326U, // t2LSRrr
- 806904330U, // t2MLA
- 806904334U, // t2MLS
- 858319060U, // t2MOVCCasr
- 187228690U, // t2MOVCCi
- 858319090U, // t2MOVCClsl
- 858319094U, // t2MOVCClsr
- 187228690U, // t2MOVCCr
- 858319098U, // t2MOVCCror
- 135815702U, // t2MOVTi16
- 1967350290U, // t2MOVi
- 739795483U, // t2MOVi16
- 739795483U, // t2MOVi32imm
- 1967350290U, // t2MOVr
- 1967212798U, // t2MOVrx
- 67111170U, // t2MOVsra_flag
- 67111178U, // t2MOVsrl_flag
- 337142333U, // t2MRS
- 337142333U, // t2MRSsys
- 359686721U, // t2MSR
- 360211009U, // t2MSRsys
- 135815749U, // t2MUL
- 1967211081U, // t2MVNi
- 791208521U, // t2MVNr
- 187228745U, // t2MVNs
- 594543181U, // t2NOP
- 1679321362U, // t2ORNri
- 1679321362U, // t2ORNrr
- 1746430226U, // t2ORNrs
- 1679319633U, // t2ORRri
- 1730732625U, // t2ORRrr
- 1797841489U, // t2ORRrs
- 806904407U, // t2PKHBT
- 806904413U, // t2PKHTB
- 740002070U, // t2PLDWi12
- 740010262U, // t2PLDWi8
- 796395798U, // t2PLDWpci
- 797165846U, // t2PLDWr
- 193194262U, // t2PLDWs
- 740002075U, // t2PLDi12
- 740010267U, // t2PLDi8
- 796395803U, // t2PLDpci
- 797165851U, // t2PLDr
- 193194267U, // t2PLDs
- 740002079U, // t2PLIi12
- 740010271U, // t2PLIi8
- 796395807U, // t2PLIpci
- 797165855U, // t2PLIr
- 193194271U, // t2PLIs
- 135815814U, // t2QADD
- 135815819U, // t2QADD16
- 135815826U, // t2QADD8
- 135815832U, // t2QASX
- 135815837U, // t2QDADD
- 135815843U, // t2QDSUB
- 135815849U, // t2QSAX
- 135815854U, // t2QSUB
- 135815859U, // t2QSUB16
- 135815866U, // t2QSUB8
- 739795648U, // t2RBIT
- 791208645U, // t2REV
- 791208649U, // t2REV16
- 791208655U, // t2REVSH
- 337144099U, // t2RFEDB
- 337144105U, // t2RFEDBW
- 337144111U, // t2RFEIA
- 337144111U, // t2RFEIAW
- 1730734330U, // t2RORri
- 1730734330U, // t2RORrr
- 2013266654U, // t2RSBSri
- 1947755230U, // t2RSBSrs
- 187228894U, // t2RSBri
- 806904542U, // t2RSBrs
- 135815916U, // t2SADD16
- 135815923U, // t2SADD8
- 135815929U, // t2SASX
- 1679319812U, // t2SBCSri
- 1730732804U, // t2SBCSrr
- 1797841668U, // t2SBCSrs
- 1679319812U, // t2SBCri
- 1730732804U, // t2SBCrr
- 1797841668U, // t2SBCrs
- 806904584U, // t2SBFX
- 135817525U, // t2SDIV
- 135815949U, // t2SEL
- 594543397U, // t2SEV
- 135815977U, // t2SHADD16
- 135815985U, // t2SHADD8
- 135815992U, // t2SHASX
- 135815998U, // t2SHSAX
- 135816004U, // t2SHSUB16
- 135816012U, // t2SHSUB8
- 337142611U, // t2SMC
- 806904663U, // t2SMLABB
- 806904670U, // t2SMLABT
- 806904677U, // t2SMLAD
- 806904683U, // t2SMLADX
- 806904690U, // t2SMLAL
- 806904696U, // t2SMLALBB
- 806904704U, // t2SMLALBT
- 806904712U, // t2SMLALD
- 806904719U, // t2SMLALDX
- 806904727U, // t2SMLALTB
- 806904735U, // t2SMLALTT
- 806904743U, // t2SMLATB
- 806904750U, // t2SMLATT
- 806904757U, // t2SMLAWB
- 806904764U, // t2SMLAWT
- 806904771U, // t2SMLSD
- 806904777U, // t2SMLSDX
- 806904784U, // t2SMLSLD
- 806904791U, // t2SMLSLDX
- 806904799U, // t2SMMLA
- 806904805U, // t2SMMLAR
- 806904812U, // t2SMMLS
- 806904818U, // t2SMMLSR
- 135816185U, // t2SMMUL
- 135816191U, // t2SMMULR
- 135816198U, // t2SMUAD
- 135816204U, // t2SMUADX
- 135816211U, // t2SMULBB
- 135816218U, // t2SMULBT
- 806904865U, // t2SMULL
- 135816231U, // t2SMULTB
- 135816238U, // t2SMULTT
- 135816245U, // t2SMULWB
- 135816252U, // t2SMULWT
- 135816259U, // t2SMUSD
- 135816265U, // t2SMUSDX
- 365455674U, // t2SRSDB
- 365979962U, // t2SRSDBW
- 365455680U, // t2SRSIA
- 365979968U, // t2SRSIAW
- 135816276U, // t2SSAT16
- 806904923U, // t2SSATasr
- 806904923U, // t2SSATlsl
- 135816288U, // t2SSAX
- 135816293U, // t2SSUB16
- 135816300U, // t2SSUB8
- 1027810427U, // t2STM
- 135816328U, // t2STRBT
- 806880387U, // t2STRB_POST
- 806880387U, // t2STRB_PRE
- 187229315U, // t2STRBi12
- 135816323U, // t2STRBi8
- 858317955U, // t2STRBs
- 806904974U, // t2STRDi8
- 135816339U, // t2STREX
- 135816345U, // t2STREXB
- 806904992U, // t2STREXD
- 135816359U, // t2STREXH
- 135816371U, // t2STRHT
- 806880430U, // t2STRH_POST
- 806880430U, // t2STRH_PRE
- 187229358U, // t2STRHi12
- 135816366U, // t2STRHi8
- 858317998U, // t2STRHs
- 135816377U, // t2STRT
- 806880383U, // t2STR_POST
- 806880383U, // t2STR_PRE
- 187229311U, // t2STRi12
- 135816319U, // t2STRi8
- 858317951U, // t2STRs
- 187229374U, // t2SUBSri
- 187229374U, // t2SUBSrr
- 858318014U, // t2SUBSrs
- 1730733251U, // t2SUBrSPi
- 135817542U, // t2SUBrSPi12
- 67111243U, // t2SUBrSPi12_
- 67111251U, // t2SUBrSPi_
- 1746429123U, // t2SUBrSPs
- 67111260U, // t2SUBrSPs_
- 1730733251U, // t2SUBri
- 1679321414U, // t2SUBri12
- 1730733251U, // t2SUBrr
- 1797842115U, // t2SUBrs
- 135816404U, // t2SXTAB16rr
- 806905044U, // t2SXTAB16rr_rot
- 135816412U, // t2SXTABrr
- 806905052U, // t2SXTABrr_rot
- 135816418U, // t2SXTAHrr
- 806905058U, // t2SXTAHrr_rot
- 739796200U, // t2SXTB16r
- 135816424U, // t2SXTB16r_rot
- 791209199U, // t2SXTBr
- 187229423U, // t2SXTBr_rot
- 791209204U, // t2SXTHr
- 187229428U, // t2SXTHr_rot
- 2080377187U, // t2TBB
- 797165928U, // t2TBBgen
- 2080377196U, // t2TBH
- 797182321U, // t2TBHgen
- 791209209U, // t2TEQri
- 791209209U, // t2TEQrr
- 187229433U, // t2TEQrs
- 1277U, // t2TPsoft
- 791209237U, // t2TSTri
- 791209237U, // t2TSTrr
- 187229461U, // t2TSTrs
- 135816473U, // t2UADD16
- 135816480U, // t2UADD8
- 135816486U, // t2UASX
- 806905131U, // t2UBFX
- 135817589U, // t2UDIV
- 135816496U, // t2UHADD16
- 135816504U, // t2UHADD8
- 135816511U, // t2UHASX
- 135816517U, // t2UHSAX
- 135816523U, // t2UHSUB16
- 135816531U, // t2UHSUB8
- 806905178U, // t2UMAAL
- 806905184U, // t2UMLAL
- 806905190U, // t2UMULL
- 135816556U, // t2UQADD16
- 135816564U, // t2UQADD8
- 135816571U, // t2UQASX
- 135816577U, // t2UQSAX
- 135816583U, // t2UQSUB16
- 135816591U, // t2UQSUB8
- 135816598U, // t2USAD8
- 806905244U, // t2USADA8
- 135816611U, // t2USAT16
- 806905258U, // t2USATasr
- 806905258U, // t2USATlsl
- 135816623U, // t2USAX
- 135816628U, // t2USUB16
- 135816635U, // t2USUB8
- 135816641U, // t2UXTAB16rr
- 806905281U, // t2UXTAB16rr_rot
- 135816649U, // t2UXTABrr
- 806905289U, // t2UXTABrr_rot
- 135816655U, // t2UXTAHrr
- 806905295U, // t2UXTAHrr_rot
- 739796437U, // t2UXTB16r
- 135816661U, // t2UXTB16r_rot
- 791209436U, // t2UXTBr
- 187229660U, // t2UXTBr_rot
- 791209441U, // t2UXTHr
- 187229665U, // t2UXTHr_rot
- 594544833U, // t2WFE
- 594544837U, // t2WFI
- 594544841U, // t2YIELD
- 2206998545U, // tADC
- 135815194U, // tADDhirr
- 2206744602U, // tADDi3
- 2206998554U, // tADDi8
- 126880122U, // tADDrPCi
- 67127674U, // tADDrSP
- 67111290U, // tADDrSPi
- 2206744602U, // tADDrr
- 67389818U, // tADDspi
- 67127674U, // tADDspr
- 67127679U, // tADDspr_
- 69208454U, // tADJCALLSTACKDOWN
- 69208475U, // tADJCALLSTACKUP
- 2206998596U, // tAND
- 67127726U, // tANDsp
- 2206746836U, // tASRri
- 2207000788U, // tASRrr
- 69206089U, // tB
- 2206998612U, // tBIC
- 69208501U, // tBKPT
- 402653277U, // tBL
- 402653281U, // tBLXi
- 402653281U, // tBLXi_r9
- 69206113U, // tBLXr
- 69206113U, // tBLXr_r9
- 402653277U, // tBLr9
- 69206164U, // tBRIND
- 127402132U, // tBR_JTr
- 69206173U, // tBX
- 2491U, // tBX_RET
- 69206142U, // tBX_RET_vararg
- 69206173U, // tBXr9
- 337141940U, // tBcc
- 127926365U, // tBfar
- 67111361U, // tCBNZ
- 67111367U, // tCBZ
- 739795147U, // tCMNz
- 739795151U, // tCMPhir
- 739795151U, // tCMPi8
- 739795151U, // tCMPr
- 739795151U, // tCMPzhir
- 739795151U, // tCMPzi8
- 739795151U, // tCMPzr
- 939524307U, // tCPS
- 2206998869U, // tEOR
- 1879050464U, // tInt_eh_sjlj_setjmp
- 1027686799U, // tLDM
- 806904211U, // tLDR
- 806904215U, // tLDRB
- 806904215U, // tLDRBi
- 806904258U, // tLDRH
- 806904258U, // tLDRHi
- 135815629U, // tLDRSB
- 135815642U, // tLDRSH
- 739795347U, // tLDRcp
- 806904211U, // tLDRi
- 799539603U, // tLDRpci
- 67111372U, // tLDRpci_pic
- 135815571U, // tLDRspi
- 739797230U, // tLEApcrel
- 135817454U, // tLEApcrelJT
- 2206746866U, // tLSLri
- 2207000818U, // tLSLrr
- 2206746870U, // tLSRri
- 2207000822U, // tLSRrr
- 135815698U, // tMOVCCi
- 135815698U, // tMOVCCr
- 136317397U, // tMOVCCr_pseudo
- 67111392U, // tMOVSr
- 67111398U, // tMOVgpr2gpr
- 67111398U, // tMOVgpr2tgpr
- 2209473042U, // tMOVi8
- 67111398U, // tMOVr
- 67111398U, // tMOVtgpr2gpr
- 2206999109U, // tMUL
- 2209473097U, // tMVN
- 538968653U, // tNOP
- 2206999121U, // tORR
- 1203241557U, // tPICADD
- 538733035U, // tPOP
- 538733035U, // tPOP_RET
- 538733039U, // tPUSH
- 739795653U, // tREV
- 739795657U, // tREV16
- 739795663U, // tREVSH
- 2207000826U, // tROR
- 2209465054U, // tRSB
- 135815571U, // tRestore
- 2206999300U, // tSBC
- 785U, // tSETENDBE
- 795U, // tSETENDLE
- 538968869U, // tSEV
- 1027687547U, // tSTM
- 806904959U, // tSTR
- 806904963U, // tSTRB
- 806904963U, // tSTRBi
- 806905006U, // tSTRH
- 806905006U, // tSTRHi
- 806904959U, // tSTRi
- 135816319U, // tSTRspi
- 2206745795U, // tSUBi3
- 2206999747U, // tSUBi8
- 2206745795U, // tSUBrr
- 67389940U, // tSUBspi
- 67389788U, // tSUBspi_
- 337142983U, // tSVC
- 739796207U, // tSXTB
- 739796212U, // tSXTH
- 135816319U, // tSpill
- 1277U, // tTPsoft
- 1296U, // tTRAP
- 739796245U, // tTST
- 739796444U, // tUXTB
- 739796449U, // tUXTH
- 538970305U, // tWFE
- 538970309U, // tWFI
- 538970313U, // tYIELD
- 0U
- };
-
- const char *AsmStrs =
- "DBG_VALUE\000adcs\t\000adc\000adds\000add\000@ ADJCALLSTACKDOWN \000@ A"
- "DJCALLSTACKUP \000and\000\000b\t\000bfc\000bfi\000bic\000bkpt\000bl\t\000"
- "blx\t\000bl\000mov\tlr, pc\n\tmov\tpc, \000bx\t\000add\tpc, \000ldr\tpc"
- ", \000mov\tpc, \000mov\tlr, pc\n\tbx\t\000bxj\000bx\000b\000cdp\000cdp2"
- "\tp\000clrex\000clz\000cmn\000cmp\000cps\000dbg\000dmb\tish\000dmb\tish"
- "st\000dmb\tnsh\000dmb\tnshst\000dmb\tosh\000dmb\toshst\000dmb\tst\000ds"
- "b\tish\000dsb\tishst\000dsb\tnsh\000dsb\tnshst\000dsb\tosh\000dsb\toshs"
- "t\000dsb\tst\000eor\000vmov\000vmrs\000isb\000mcr\tp15, 0, \000dmb\000d"
- "sb\000str\tsp, [\000ldc2\000ldc\000ldm\000ldr\000ldrb\000ldrbt\000ldrd\000"
- "ldrex\000ldrexb\000ldrexd\000ldrexh\000ldrh\000ldrht\000ldrsb\000ldrsbt"
- "\000ldrsh\000ldrsht\000ldrt\000.set \000mcr\000mcr2\tp\000mcrr\000mcrr2"
- "\tp\000mla\000mls\000mov\000movt\000movw\000movs\000mrc\000mrc2\tp\000m"
- "rrc\000mrrc2\tp\000mrs\000msr\000mul\000mvn\000nop\000orr\000\n\000pkhb"
- "t\000pkhtb\000pldw\t[\000pldw\t\000pld\t[\000pld\t\000pli\t[\000pli\t\000"
- "qadd\000qadd16\000qadd8\000qasx\000qdadd\000qdsub\000qsax\000qsub\000qs"
- "ub16\000qsub8\000rbit\000rev\000rev16\000revsh\000rfe\000rsbs\000rsb\000"
- "rscs\t\000rsc\000sadd16\000sadd8\000sasx\000sbcs\t\000sbc\000sbfx\000se"
- "l\000setend\tbe\000setend\tle\000sev\000shadd16\000shadd8\000shasx\000s"
- "hsax\000shsub16\000shsub8\000smc\000smlabb\000smlabt\000smlad\000smladx"
- "\000smlal\000smlalbb\000smlalbt\000smlald\000smlaldx\000smlaltb\000smla"
- "ltt\000smlatb\000smlatt\000smlawb\000smlawt\000smlsd\000smlsdx\000smlsl"
- "d\000smlsldx\000smmla\000smmlar\000smmls\000smmlsr\000smmul\000smmulr\000"
- "smuad\000smuadx\000smulbb\000smulbt\000smull\000smultb\000smultt\000smu"
- "lwb\000smulwt\000smusd\000smusdx\000srs\000ssat16\000ssat\000ssax\000ss"
- "ub16\000ssub8\000stc2\000stc\000stm\000str\000strb\000strbt\000strd\000"
- "strex\000strexb\000strexd\000strexh\000strh\000strht\000strt\000subs\000"
- "sub\000svc\000swp\000swpb\000sxtab16\000sxtab\000sxtah\000sxtb16\000sxt"
- "b\000sxth\000teq\000bl\t__aeabi_read_tp\000trap\000tst\000uadd16\000uad"
- "d8\000uasx\000ubfx\000uhadd16\000uhadd8\000uhasx\000uhsax\000uhsub16\000"
- "uhsub8\000umaal\000umlal\000umull\000uqadd16\000uqadd8\000uqasx\000uqsa"
- "x\000uqsub16\000uqsub8\000usad8\000usada8\000usat16\000usat\000usax\000"
- "usub16\000usub8\000uxtab16\000uxtab\000uxtah\000uxtb16\000uxtb\000uxth\000"
- "vabal\000vaba\000vabdl\000vabd\000vabs\000vacge\000vacgt\000vadd\000vad"
- "dhn\000vaddl\000vaddw\000vand\000vbic\000vbif\000vbit\000vbsl\000vceq\000"
- "vcge\000vcgt\000vcle\000vcls\000vclt\000vclz\000vcmp\000vcmpe\000vcnt\000"
- "vcvtb\000vcvt\000vcvtt\000vdiv\000vdup\000veor\000vext\000vhadd\000vhsu"
- "b\000vld1\000vld2\000vld3\000vld4\000vldm\000vldr\000vldmia\000vmax\000"
- "vmin\000vmla\000vmlal\000vmls\000vmlsl\000vmovl\000vmovn\000vmsr\000vmu"
- "l\000vmull\000vmvn\000vneg\000vnmla\000vnmls\000vnmul\000vorn\000vorr\000"
- "vpadal\000vpaddl\000vpadd\000vpmax\000vpmin\000vqabs\000vqadd\000vqdmla"
- "l\000vqdmlsl\000vqdmulh\000vqdmull\000vqmovun\000vqmovn\000vqneg\000vqr"
- "dmulh\000vqrshl\000vqrshrn\000vqrshrun\000vqshl\000vqshlu\000vqshrn\000"
- "vqshrun\000vqsub\000vraddhn\000vrecpe\000vrecps\000vrev16\000vrev32\000"
- "vrev64\000vrhadd\000vrshl\000vrshrn\000vrshr\000vrsqrte\000vrsqrts\000v"
- "rsra\000vrsubhn\000vshll\000vshl\000vshrn\000vshr\000vsli\000vsqrt\000v"
- "sra\000vsri\000vst1\000vst2\000vst3\000vst4\000vstm\000vstr\000vstmia\000"
- "vsub\000vsubhn\000vsubl\000vsubw\000vswp\000vtbl\000vtbx\000vcvtr\000vt"
- "rn\000vtst\000vuzp\000vzip\000wfe\000wfi\000yield\000addw\000asr\000b.w"
- "\t\000it\000str\t\000@ ldr.w\t\000adr\000lsl\000lsr\000ror\000rrx\000as"
- "rs.w\t\000lsrs.w\t\000orn\000pldw\000pld\000pli\000rfeab\000rfedb\000rf"
- "eia\000sdiv\000srsdb\000srsia\000subw\000@ subw\t\000@ sub.w\t\000@ sub"
- "\t\000tbb\t\000tbb\000tbh\t\000tbh\000udiv\000add\t\000@ add\t\000@ tAD"
- "JCALLSTACKDOWN \000@ tADJCALLSTACKUP \000@ and\t\000bkpt\t\000bx\tlr\000"
- "cbnz\t\000cbz\t\000@ ldr.n\t\000@ tMOVCCr \000movs\t\000mov\t\000pop\000"
- "push\000sub\t\000";
-
- O << "\t";
-
- // Emit the opcode for the instruction.
- unsigned Bits = OpInfo[MI->getOpcode()];
- assert(Bits != 0 && "Cannot print this instruction.");
- O << AsmStrs+(Bits & 4095)-1;
-
-
- // Fragment 0 encoded into 6 bits for 33 unique commands.
- switch ((Bits >> 26) & 63) {
- default: // unreachable.
- case 0:
- // DBG_VALUE, CLREX, DMBish, DMBishst, DMBnsh, DMBnshst, DMBosh, DMBoshst...
- return;
- break;
- case 1:
- // ADCSSri, ADCSSrr, ADCSSrs, ADJCALLSTACKDOWN, ADJCALLSTACKUP, B, BLX, B...
- printOperand(MI, 0);
- break;
- case 2:
- // ADCri, ADCrr, ADDSri, ADDSrr, ADDri, ADDrr, ANDri, ANDrr, BFC, BFI, BI...
- printPredicateOperand(MI, 3);
- break;
- case 3:
- // ADCrs, ADDSrs, ADDrs, ANDrs, BICrs, EORrs, LDC2L_OFFSET, LDC2L_POST, L...
- printPredicateOperand(MI, 5);
- break;
- case 4:
- // ATOMIC_CMP_SWAP_I16, ATOMIC_CMP_SWAP_I32, ATOMIC_CMP_SWAP_I8, ATOMIC_L...
- PrintSpecial(MI, "comment");
- break;
- case 5:
- // BKPT, BL_pred, BLr9_pred, BXJ, Bcc, DBG, MRS, MRSsys, MSR, MSRi, MSRsy...
- printPredicateOperand(MI, 1);
- break;
- case 6:
- // BL, BLr9, tBL, tBLXi, tBLXi_r9, tBLr9
- printOperand(MI, 0, "call");
- return;
- break;
- case 7:
- // BR_JTm, PLDWr, PLDr, PLIr
- printAddrMode2Operand(MI, 0);
- break;
- case 8:
- // BX_RET, FMSTAT, MOVPCLR, NOP, SEV, TRAP, WFE, WFI, YIELD, t2CLREX, t2D...
- printPredicateOperand(MI, 0);
- break;
- case 9:
- // CDP, LDRD_POST, LDRD_PRE, MCR, MRC, STRD_POST, STRD_PRE, VLD2d16, VLD2...
- printPredicateOperand(MI, 6);
- break;
- case 10:
- // CDP2, MCR2, MCRR2, MRC2, MRRC2
- printNoHashImmediate(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 11:
- // CLZ, CMNzri, CMNzrr, CMPri, CMPrr, CMPzri, CMPzrr, FCONSTD, FCONSTS, L...
- printPredicateOperand(MI, 2);
- break;
- case 12:
- // CMNzrs, CMPrs, CMPzrs, LDC2L_OPTION, LDC2_OPTION, LDCL_OPTION, LDC_OPT...
- printPredicateOperand(MI, 4);
- break;
- case 13:
- // CONSTPOOL_ENTRY
- printCPInstOperand(MI, 0, "label");
- O << ' ';
- printCPInstOperand(MI, 1, "cpentry");
- return;
- break;
- case 14:
- // CPS, t2CPS, tCPS
- printOperand(MI, 0, "cps");
- return;
- break;
- case 15:
- // LDM, LDM_RET, RFE, RFEW, SRS, SRSW, STM, t2LDM, t2LDM_RET, t2STM, tLDM...
- printAddrMode4Operand(MI, 0, "submode");
- break;
- case 16:
- // LEApcrel, LEApcrelJT
- PrintSpecial(MI, "private");
- O << "PCRELV";
- PrintSpecial(MI, "uid");
- O << ", (";
- printOperand(MI, 1);
- break;
- case 17:
- // PICADD, tPICADD
- printPCLabel(MI, 2);
- break;
- case 18:
- // PICLDR, PICLDRB, PICLDRH, PICLDRSB, PICLDRSH, PICSTR, PICSTRB, PICSTRH
- printAddrModePCOperand(MI, 1, "label");
- break;
- case 19:
- // VLD1d16Q, VLD1d32Q, VLD1d8Q, VLD2q16, VLD2q32, VLD2q8, VLD3q16a, VLD3q...
- printPredicateOperand(MI, 8);
- break;
- case 20:
- // VLD1d16T, VLD1d32T, VLD1d8T, VLD3d16, VLD3d32, VLD3d64, VLD3d8, VST1d1...
- printPredicateOperand(MI, 7);
- break;
- case 21:
- // VLD2LNd16, VLD2LNd32, VLD2LNd8, VLD2LNq16a, VLD2LNq16b, VLD2LNq32a, VL...
- printPredicateOperand(MI, 9);
- break;
- case 22:
- // VLD3LNd16, VLD3LNd32, VLD3LNd8, VLD3LNq16a, VLD3LNq16b, VLD3LNq32a, VL...
- printPredicateOperand(MI, 11);
- break;
- case 23:
- // VLD4LNd16, VLD4LNd32, VLD4LNd8, VLD4LNq16a, VLD4LNq16b, VLD4LNq32a, VL...
- printPredicateOperand(MI, 13);
- break;
- case 24:
- // VLDMD, VLDMS, VSTMD, VSTMS
- printAddrMode5Operand(MI, 0, "submode");
- printPredicateOperand(MI, 2);
- O << "\t";
- printAddrMode5Operand(MI, 0, "base");
- O << ", ";
- printRegisterList(MI, 4);
- return;
- break;
- case 25:
- // t2ADCSri, t2ADCSrr, t2ADCri, t2ADCrr, t2ADDrSPi, t2ADDri, t2ADDri12, t...
- printSBitModifierOperand(MI, 5);
- printPredicateOperand(MI, 3);
- break;
- case 26:
- // t2ADCSrs, t2ADCrs, t2ADDrSPs, t2ADDrs, t2ANDrs, t2BICrs, t2EORrs, t2OR...
- printSBitModifierOperand(MI, 6);
- printPredicateOperand(MI, 4);
- break;
- case 27:
- // t2IT
- printThumbITMask(MI, 1);
- O << "\t";
- printMandatoryPredicateOperand(MI, 0);
- return;
- break;
- case 28:
- // t2Int_eh_sjlj_setjmp, tInt_eh_sjlj_setjmp
- printOperand(MI, 1);
- O << ", [";
- printOperand(MI, 0);
- O << ", #8]\t@ begin eh.setjmp\n\tmov\t";
- printOperand(MI, 1);
- O << ", pc\n\tadds\t";
- printOperand(MI, 1);
- O << ", #9\n\tstr\t";
- printOperand(MI, 1);
- O << ", [";
- printOperand(MI, 0);
- O << ", #4]\n\tmovs\tr0, #0\n\tb\t1f\n\tmovs\tr0, #1\t@ end eh.setjmp\n1:";
- return;
- break;
- case 29:
- // t2MOVi, t2MOVr, t2MOVrx, t2MVNi, t2RSBSrs
- printSBitModifierOperand(MI, 4);
- break;
- case 30:
- // t2RSBSri
- printSBitModifierOperand(MI, 3);
- O << ".w\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 31:
- // t2TBB, t2TBH
- printTBAddrMode(MI, 0);
- O << "\n";
- printJT2BlockOperand(MI, 1);
- return;
- break;
- case 32:
- // tADC, tADDi3, tADDi8, tADDrr, tAND, tASRri, tASRrr, tBIC, tEOR, tLSLri...
- printSBitModifierOperand(MI, 1);
- break;
- }
-
-
- // Fragment 1 encoded into 7 bits for 120 unique commands.
- switch ((Bits >> 19) & 127) {
- default: // unreachable.
- case 0:
- // ADCSSri, ADCSSrr, ADCSSrs, BR_JTadd, MCR2, MCRR2, MRC2, MRRC2, PLDWi, ...
- O << ", ";
- break;
- case 1:
- // ADCri, ADCrr, ADDri, ADDrr, ANDri, ANDrr, BICri, BICrr, EORri, EORrr, ...
- printSBitModifierOperand(MI, 5);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- break;
- case 2:
- // ADCrs, ADDrs, ANDrs, BICrs, EORrs, ORRrs, RSBrs, RSCrs, SBCrs, SUBrs
- printSBitModifierOperand(MI, 7);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printSORegOperand(MI, 2);
- return;
- break;
- case 3:
- // ADDSri, ADDSrr, ADDSrs, BFC, BFI, BKPT, BL_pred, BLr9_pred, BXJ, Bcc, ...
- O << "\t";
- break;
- case 4:
- // ADJCALLSTACKDOWN, ADJCALLSTACKUP, B, BLX, BLXr9, BMOVPCRX, BMOVPCRXr9,...
- return;
- break;
- case 5:
- // ATOMIC_CMP_SWAP_I16
- O << " ATOMIC_CMP_SWAP_I16 PSEUDO!";
- return;
- break;
- case 6:
- // ATOMIC_CMP_SWAP_I32
- O << " ATOMIC_CMP_SWAP_I32 PSEUDO!";
- return;
- break;
- case 7:
- // ATOMIC_CMP_SWAP_I8
- O << " ATOMIC_CMP_SWAP_I8 PSEUDO!";
- return;
- break;
- case 8:
- // ATOMIC_LOAD_ADD_I16
- O << " ATOMIC_LOAD_ADD_I16 PSEUDO!";
- return;
- break;
- case 9:
- // ATOMIC_LOAD_ADD_I32
- O << " ATOMIC_LOAD_ADD_I32 PSEUDO!";
- return;
- break;
- case 10:
- // ATOMIC_LOAD_ADD_I8
- O << " ATOMIC_LOAD_ADD_I8 PSEUDO!";
- return;
- break;
- case 11:
- // ATOMIC_LOAD_AND_I16
- O << " ATOMIC_LOAD_AND_I16 PSEUDO!";
- return;
- break;
- case 12:
- // ATOMIC_LOAD_AND_I32
- O << " ATOMIC_LOAD_AND_I32 PSEUDO!";
- return;
- break;
- case 13:
- // ATOMIC_LOAD_AND_I8
- O << " ATOMIC_LOAD_AND_I8 PSEUDO!";
- return;
- break;
- case 14:
- // ATOMIC_LOAD_NAND_I16
- O << " ATOMIC_LOAD_NAND_I16 PSEUDO!";
- return;
- break;
- case 15:
- // ATOMIC_LOAD_NAND_I32
- O << " ATOMIC_LOAD_NAND_I32 PSEUDO!";
- return;
- break;
- case 16:
- // ATOMIC_LOAD_NAND_I8
- O << " ATOMIC_LOAD_NAND_I8 PSEUDO!";
- return;
- break;
- case 17:
- // ATOMIC_LOAD_OR_I16
- O << " ATOMIC_LOAD_OR_I16 PSEUDO!";
- return;
- break;
- case 18:
- // ATOMIC_LOAD_OR_I32
- O << " ATOMIC_LOAD_OR_I32 PSEUDO!";
- return;
- break;
- case 19:
- // ATOMIC_LOAD_OR_I8
- O << " ATOMIC_LOAD_OR_I8 PSEUDO!";
- return;
- break;
- case 20:
- // ATOMIC_LOAD_SUB_I16
- O << " ATOMIC_LOAD_SUB_I16 PSEUDO!";
- return;
- break;
- case 21:
- // ATOMIC_LOAD_SUB_I32
- O << " ATOMIC_LOAD_SUB_I32 PSEUDO!";
- return;
- break;
- case 22:
- // ATOMIC_LOAD_SUB_I8
- O << " ATOMIC_LOAD_SUB_I8 PSEUDO!";
- return;
- break;
- case 23:
- // ATOMIC_LOAD_XOR_I16
- O << " ATOMIC_LOAD_XOR_I16 PSEUDO!";
- return;
- break;
- case 24:
- // ATOMIC_LOAD_XOR_I32
- O << " ATOMIC_LOAD_XOR_I32 PSEUDO!";
- return;
- break;
- case 25:
- // ATOMIC_LOAD_XOR_I8
- O << " ATOMIC_LOAD_XOR_I8 PSEUDO!";
- return;
- break;
- case 26:
- // ATOMIC_SWAP_I16
- O << " ATOMIC_SWAP_I16 PSEUDO!";
- return;
- break;
- case 27:
- // ATOMIC_SWAP_I32
- O << " ATOMIC_SWAP_I32 PSEUDO!";
- return;
- break;
- case 28:
- // ATOMIC_SWAP_I8
- O << " ATOMIC_SWAP_I8 PSEUDO!";
- return;
- break;
- case 29:
- // BR_JTm, BR_JTr
- O << " \n";
- break;
- case 30:
- // BX_RET
- O << "\tlr";
- return;
- break;
- case 31:
- // CDP, LDC2_OFFSET, LDC2_OPTION, LDC2_POST, LDC2_PRE, LDC_OFFSET, LDC_OP...
- O << "\tp";
- printNoHashImmediate(MI, 0);
- break;
- case 32:
- // CDP2
- O << ", cr";
- printNoHashImmediate(MI, 2);
- O << ", cr";
- printNoHashImmediate(MI, 3);
- O << ", cr";
- printNoHashImmediate(MI, 4);
- O << ", ";
- printOperand(MI, 5);
- return;
- break;
- case 33:
- // FCONSTD, VABSD, VADDD, VCMPD, VCMPED, VCMPEZD, VCMPZD, VDIVD, VMLAD, V...
- O << ".f64\t";
- printOperand(MI, 0);
- break;
- case 34:
- // FCONSTS, VABDfd, VABDfq, VABSS, VABSfd, VABSfd_sfp, VABSfq, VACGEd, VA...
- O << ".f32\t";
- printOperand(MI, 0);
- break;
- case 35:
- // FMSTAT
- O << "\tapsr_nzcv, fpscr";
- return;
- break;
- case 36:
- // Int_MemBarrierV6
- O << ", c7, c10, 5";
- return;
- break;
- case 37:
- // Int_SyncBarrierV6
- O << ", c7, c10, 4";
- return;
- break;
- case 38:
- // Int_eh_sjlj_setjmp
- O << ", #+8] @ eh_setjmp begin\n\tadd\t";
- printOperand(MI, 1);
- O << ", pc, #8\n\tstr\t";
- printOperand(MI, 1);
- O << ", [";
- printOperand(MI, 0);
- O << ", #+4]\n\tmov\tr0, #0\n\tadd\tpc, pc, #0\n\tmov\tr0, #1 @ eh_setjmp end";
- return;
- break;
- case 39:
- // LDC2L_OFFSET, LDC2L_OPTION, LDC2L_POST, LDC2L_PRE, LDCL_OFFSET, LDCL_O...
- O << "l\tp";
- printNoHashImmediate(MI, 0);
- O << ", cr";
- printNoHashImmediate(MI, 1);
- break;
- case 40:
- // LDM, LDM_RET, STM, t2LDM, t2LDM_RET, t2MOVi, t2MOVr, t2MOVrx, t2MVNi, ...
- printPredicateOperand(MI, 2);
- break;
- case 41:
- // LEApcrel
- O << "-(";
- PrintSpecial(MI, "private");
- O << "PCRELL";
- PrintSpecial(MI, "uid");
- O << "+8))\n";
- PrintSpecial(MI, "private");
- O << "PCRELL";
- PrintSpecial(MI, "uid");
- O << ":\n\tadd";
- printPredicateOperand(MI, 2);
- O << "\t";
- printOperand(MI, 0);
- O << ", pc, #";
- PrintSpecial(MI, "private");
- O << "PCRELV";
- PrintSpecial(MI, "uid");
- return;
- break;
- case 42:
- // LEApcrelJT
- O << '_';
- printNoHashImmediate(MI, 2);
- O << "-(";
- PrintSpecial(MI, "private");
- O << "PCRELL";
- PrintSpecial(MI, "uid");
- O << "+8))\n";
- PrintSpecial(MI, "private");
- O << "PCRELL";
- PrintSpecial(MI, "uid");
- O << ":\n\tadd";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", pc, #";
- PrintSpecial(MI, "private");
- O << "PCRELV";
- PrintSpecial(MI, "uid");
- return;
- break;
- case 43:
- // MLA, MOVs, MVNs, SMLAL, SMULL, UMLAL, UMULL
- printSBitModifierOperand(MI, 6);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 44:
- // MOVPCLR
- O << "\tpc, lr";
- return;
- break;
- case 45:
- // MOVi, MOVr, MOVrx, MVNi, MVNr
- printSBitModifierOperand(MI, 4);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 46:
- // MSR, MSRi, t2MSR
- O << "\tcpsr, ";
- break;
- case 47:
- // MSRsys, MSRsysi, t2MSRsys
- O << "\tspsr, ";
- break;
- case 48:
- // PICADD
- O << ":\n\tadd";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", pc, ";
- printOperand(MI, 1);
- return;
- break;
- case 49:
- // PICLDR
- O << ":\n\tldr";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printAddrModePCOperand(MI, 1);
- return;
- break;
- case 50:
- // PICLDRB
- O << ":\n\tldrb";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printAddrModePCOperand(MI, 1);
- return;
- break;
- case 51:
- // PICLDRH
- O << ":\n\tldrh";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printAddrModePCOperand(MI, 1);
- return;
- break;
- case 52:
- // PICLDRSB
- O << ":\n\tldrsb";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printAddrModePCOperand(MI, 1);
- return;
- break;
- case 53:
- // PICLDRSH
- O << ":\n\tldrsh";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printAddrModePCOperand(MI, 1);
- return;
- break;
- case 54:
- // PICSTR
- O << ":\n\tstr";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printAddrModePCOperand(MI, 1);
- return;
- break;
- case 55:
- // PICSTRB
- O << ":\n\tstrb";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printAddrModePCOperand(MI, 1);
- return;
- break;
- case 56:
- // PICSTRH
- O << ":\n\tstrh";
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printAddrModePCOperand(MI, 1);
- return;
- break;
- case 57:
- // SRS, t2SRSDB, t2SRSIA
- O << "\tsp, ";
- break;
- case 58:
- // SRSW, t2SRSDBW, t2SRSIAW
- O << "\tsp!, ";
- break;
- case 59:
- // VABALsv2i64, VABAsv2i32, VABAsv4i32, VABDLsv2i64, VABDsv2i32, VABDsv4i...
- O << ".s32\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 60:
- // VABALsv4i32, VABAsv4i16, VABAsv8i16, VABDLsv4i32, VABDsv4i16, VABDsv8i...
- O << ".s16\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 61:
- // VABALsv8i16, VABAsv16i8, VABAsv8i8, VABDLsv8i16, VABDsv16i8, VABDsv8i8...
- O << ".s8\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 62:
- // VABALuv2i64, VABAuv2i32, VABAuv4i32, VABDLuv2i64, VABDuv2i32, VABDuv4i...
- O << ".u32\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 63:
- // VABALuv4i32, VABAuv4i16, VABAuv8i16, VABDLuv4i32, VABDuv4i16, VABDuv8i...
- O << ".u16\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 64:
- // VABALuv8i16, VABAuv16i8, VABAuv8i8, VABDLuv8i16, VABDuv16i8, VABDuv8i8...
- O << ".u8\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 65:
- // VADDHNv2i32, VADDv1i64, VADDv2i64, VMOVNv2i32, VMOVv1i64, VMOVv2i64, V...
- O << ".i64\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 66:
- // VADDHNv4i16, VADDv2i32, VADDv4i32, VCEQv2i32, VCEQv4i32, VCEQzv2i32, V...
- O << ".i32\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 67:
- // VADDHNv8i8, VADDv4i16, VADDv8i16, VCEQv4i16, VCEQv8i16, VCEQzv4i16, VC...
- O << ".i16\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 68:
- // VADDv16i8, VADDv8i8, VCEQv16i8, VCEQv8i8, VCEQzv16i8, VCEQzv8i8, VCLZv...
- O << ".i8\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 69:
- // VCNTd, VCNTq, VDUP8d, VDUP8q, VDUPLN8d, VDUPLN8q, VEXTd8, VEXTq8, VLD1...
- O << ".8\t";
- break;
- case 70:
- // VCVTBHS, VCVTTHS
- O << ".f16.f32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- return;
- break;
- case 71:
- // VCVTBSH, VCVTTSH
- O << ".f32.f16\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- return;
- break;
- case 72:
- // VCVTDS
- O << ".f64.f32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- return;
- break;
- case 73:
- // VCVTSD
- O << ".f32.f64\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- return;
- break;
- case 74:
- // VCVTf2sd, VCVTf2sd_sfp, VCVTf2sq, VCVTf2xsd, VCVTf2xsq, VTOSIRS, VTOSI...
- O << ".s32.f32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 75:
- // VCVTf2ud, VCVTf2ud_sfp, VCVTf2uq, VCVTf2xud, VCVTf2xuq, VTOUIRS, VTOUI...
- O << ".u32.f32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 76:
- // VCVTs2fd, VCVTs2fd_sfp, VCVTs2fq, VCVTxs2fd, VCVTxs2fq, VSITOS, VSLTOS
- O << ".f32.s32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 77:
- // VCVTu2fd, VCVTu2fd_sfp, VCVTu2fq, VCVTxu2fd, VCVTxu2fq, VUITOS, VULTOS
- O << ".f32.u32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 78:
- // VDUP16d, VDUP16q, VDUPLN16d, VDUPLN16q, VEXTd16, VEXTq16, VLD1q16, VRE...
- O << ".16\t";
- break;
- case 79:
- // VDUP32d, VDUP32q, VDUPLN32d, VDUPLN32q, VDUPLNfd, VDUPLNfq, VDUPfd, VD...
- O << ".32\t";
- break;
- case 80:
- // VLD1d16, VLD1d16Q, VLD1d16T, VLD2LNd16, VLD2LNq16a, VLD2LNq16b, VLD2d1...
- O << ".16\t{";
- break;
- case 81:
- // VLD1d32, VLD1d32Q, VLD1d32T, VLD1df, VLD2LNd32, VLD2LNq32a, VLD2LNq32b...
- O << ".32\t{";
- break;
- case 82:
- // VLD1d64, VLD2d64, VLD3d64, VLD4d64, VST1d64, VST2d64, VST3d64, VST4d64
- O << ".64\t{";
- break;
- case 83:
- // VLD1d8, VLD1d8Q, VLD1d8T, VLD2LNd8, VLD2d8, VLD2d8D, VLD2q8, VLD3LNd8,...
- O << ".8\t{";
- break;
- case 84:
- // VLD1q64, VLDRD, VSLIv1i64, VSLIv2i64, VSRIv1i64, VSRIv2i64, VST1q64, V...
- O << ".64\t";
- break;
- case 85:
- // VMSR
- O << "\tfpscr, ";
- printOperand(MI, 0);
- return;
- break;
- case 86:
- // VMULLp, VMULpd, VMULpq
- O << ".p8\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 87:
- // VQADDsv1i64, VQADDsv2i64, VQMOVNsuv2i32, VQMOVNsv2i32, VQRSHLsv1i64, V...
- O << ".s64\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 88:
- // VQADDuv1i64, VQADDuv2i64, VQMOVNuv2i32, VQRSHLuv1i64, VQRSHLuv2i64, VQ...
- O << ".u64\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 89:
- // VSHTOD
- O << ".f64.s16\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 90:
- // VSHTOS
- O << ".f32.s16\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 91:
- // VSITOD, VSLTOD
- O << ".f64.s32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 92:
- // VTOSHD
- O << ".s16.f64\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 93:
- // VTOSHS
- O << ".s16.f32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 94:
- // VTOSIRD, VTOSIZD, VTOSLD
- O << ".s32.f64\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 95:
- // VTOUHD
- O << ".u16.f64\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 96:
- // VTOUHS
- O << ".u16.f32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 97:
- // VTOUIRD, VTOUIZD, VTOULD
- O << ".u32.f64\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 98:
- // VUHTOD
- O << ".f64.u16\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 99:
- // VUHTOS
- O << ".f32.u16\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- return;
- break;
- case 100:
- // VUITOD, VULTOD
- O << ".f64.u32\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 101:
- // t2ADCSrr, t2ADCSrs, t2ADCrr, t2ADCrs, t2ADDSri, t2ADDSrr, t2ADDSrs, t2...
- O << ".w\t";
- printOperand(MI, 0);
- break;
- case 102:
- // t2BR_JT
- O << "\n";
- printJT2BlockOperand(MI, 2);
- return;
- break;
- case 103:
- // t2DMBish, t2DSBish
- O << "\tish";
- return;
- break;
- case 104:
- // t2DMBishst, t2DSBishst
- O << "\tishst";
- return;
- break;
- case 105:
- // t2DMBnsh, t2DSBnsh
- O << "\tnsh";
- return;
- break;
- case 106:
- // t2DMBnshst, t2DSBnshst
- O << "\tnshst";
- return;
- break;
- case 107:
- // t2DMBosh, t2DSBosh
- O << "\tosh";
- return;
- break;
- case 108:
- // t2DMBoshst, t2DSBoshst
- O << "\toshst";
- return;
- break;
- case 109:
- // t2DMBst, t2DSBst
- O << "\tst";
- return;
- break;
- case 110:
- // t2NOP, t2SEV, t2WFE, t2WFI, t2YIELD
- O << ".w";
- return;
- break;
- case 111:
- // t2PLDWpci, t2PLDpci, t2PLIpci
- O << "\t[pc, ";
- printOperand(MI, 1, "negzero");
- O << ']';
- return;
- break;
- case 112:
- // t2PLDWr, t2PLDWs, t2PLDr, t2PLDs, t2PLIr, t2PLIs, t2TBBgen, t2TBHgen
- O << "\t[";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- break;
- case 113:
- // tADC, tADDi3, tADDi8, tADDrr, tAND, tASRri, tASRrr, tBIC, tEOR, tLSLri...
- printPredicateOperand(MI, 4);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- break;
- case 114:
- // tADDrPCi
- O << ", pc, ";
- printThumbS4ImmOperand(MI, 1);
- return;
- break;
- case 115:
- // tBR_JTr
- O << "\n\t.align\t2\n";
- printJTBlockOperand(MI, 1);
- return;
- break;
- case 116:
- // tBfar
- O << "\t@ far jump";
- return;
- break;
- case 117:
- // tLDRpci
- O << ".n\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- return;
- break;
- case 118:
- // tMOVi8, tMVN, tRSB
- printPredicateOperand(MI, 3);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 2);
- break;
- case 119:
- // tPICADD
- O << ":\n\tadd\t";
- printOperand(MI, 0);
- O << ", pc";
- return;
- break;
- }
-
-
- // Fragment 2 encoded into 6 bits for 36 unique commands.
- switch ((Bits >> 13) & 63) {
- default: // unreachable.
- case 0:
- // ADCSSri, ADCSSrr, ADCSSrs, BR_JTadd, MLA, MOVr, MOVrx, MVNr, PLDWi, PL...
- printOperand(MI, 1);
- break;
- case 1:
- // ADCri, ADDri, ANDri, BICri, EORri, ORRri, RSBri, RSCri, SBCri, SUBri
- printSOImmOperand(MI, 2);
- return;
- break;
- case 2:
- // ADCrr, ADDrr, ANDrr, BICrr, EORrr, MCR2, MCRR2, MRC2, MRRC2, MUL, ORRr...
- printOperand(MI, 2);
- break;
- case 3:
- // ADDSri, ADDSrr, ADDSrs, BFC, BFI, BKPT, BXJ, Bcc, CLZ, CMNzri, CMNzrr,...
- printOperand(MI, 0);
- break;
- case 4:
- // BL_pred, BLr9_pred
- printOperand(MI, 0, "call");
- return;
- break;
- case 5:
- // BR_JTm
- printJTBlockOperand(MI, 3);
- return;
- break;
- case 6:
- // BR_JTr
- printJTBlockOperand(MI, 1);
- return;
- break;
- case 7:
- // CDP, FCONSTD, FCONSTS, LDC2L_OFFSET, LDC2L_PRE, LDCL_OFFSET, LDCL_PRE,...
- O << ", ";
- break;
- case 8:
- // LDC2L_OPTION, LDC2L_POST, LDCL_OPTION, LDCL_POST, STC2L_OPTION, STC2L_...
- O << ", [";
- printOperand(MI, 2);
- O << "], ";
- break;
- case 9:
- // LDC2_OFFSET, LDC2_OPTION, LDC2_POST, LDC2_PRE, LDC_OFFSET, LDC_OPTION,...
- O << ", cr";
- printNoHashImmediate(MI, 1);
- break;
- case 10:
- // LDM, LDM_RET, STM, t2MOVrx, t2MVNi, tLDM, tSTM
- O << "\t";
- break;
- case 11:
- // MOVi, MVNi
- printSOImmOperand(MI, 1);
- return;
- break;
- case 12:
- // MOVs, MVNs
- printSORegOperand(MI, 1);
- return;
- break;
- case 13:
- // MSRi, MSRsysi
- printSOImmOperand(MI, 0);
- return;
- break;
- case 14:
- // VCMPEZD, VCMPEZS, VCMPZD, VCMPZS, tRSB
- O << ", #0";
- return;
- break;
- case 15:
- // VCVTf2sd, VCVTf2sd_sfp, VCVTf2sq, VCVTf2ud, VCVTf2ud_sfp, VCVTf2uq, VC...
- return;
- break;
- case 16:
- // VLD1q16, VLD1q32, VLD1q64, VLD1q8, VLD1qf
- printOperand(MI, 0, "dregpair");
- O << ", ";
- printAddrMode6Operand(MI, 1);
- return;
- break;
- case 17:
- // VLDRQ, VSTRQ
- printAddrMode4Operand(MI, 1);
- O << ", ";
- printOperand(MI, 0, "dregpair");
- return;
- break;
- case 18:
- // VMOVv16i8, VMOVv8i8
- printHex8ImmOperand(MI, 1);
- return;
- break;
- case 19:
- // VMOVv1i64, VMOVv2i64
- printHex64ImmOperand(MI, 1);
- return;
- break;
- case 20:
- // VMOVv2i32, VMOVv4i32
- printHex32ImmOperand(MI, 1);
- return;
- break;
- case 21:
- // VMOVv4i16, VMOVv8i16
- printHex16ImmOperand(MI, 1);
- return;
- break;
- case 22:
- // VST1d16, VST1d16Q, VST1d16T, VST1d32, VST1d32Q, VST1d32T, VST1d64, VST...
- printOperand(MI, 4);
- break;
- case 23:
- // VST1q16, VST1q32, VST1q64, VST1q8, VST1qf
- printOperand(MI, 4, "dregpair");
- O << ", ";
- printAddrMode6Operand(MI, 0);
- return;
- break;
- case 24:
- // VST3q16a, VST3q16b, VST3q32a, VST3q32b, VST3q8a, VST3q8b, VST4q16a, VS...
- printOperand(MI, 5);
- O << ", ";
- printOperand(MI, 6);
- O << ", ";
- printOperand(MI, 7);
- break;
- case 25:
- // t2LDM, t2LDM_RET, t2STM
- printAddrMode4Operand(MI, 0, "wide");
- O << "\t";
- printAddrMode4Operand(MI, 0);
- O << ", ";
- printRegisterList(MI, 4);
- return;
- break;
- case 26:
- // t2LEApcrel, t2LEApcrelJT
- O << ", #";
- printOperand(MI, 1);
- break;
- case 27:
- // t2MOVi, t2MOVr
- O << ".w\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- return;
- break;
- case 28:
- // t2PLDWi12, t2PLDi12, t2PLIi12
- printT2AddrModeImm12Operand(MI, 0);
- return;
- break;
- case 29:
- // t2PLDWi8, t2PLDi8, t2PLIi8
- printT2AddrModeImm8Operand(MI, 0);
- return;
- break;
- case 30:
- // t2PLDWr, t2PLDr, t2PLIr, t2TBBgen
- O << ']';
- return;
- break;
- case 31:
- // t2PLDWs, t2PLDs, t2PLIs
- O << ", lsl ";
- printOperand(MI, 2);
- O << ']';
- return;
- break;
- case 32:
- // t2TBHgen
- O << ", lsl #1]";
- return;
- break;
- case 33:
- // tADC, tADDi8, tAND, tASRrr, tBIC, tEOR, tLSLrr, tLSRrr, tMUL, tORR, tR...
- printOperand(MI, 3);
- break;
- case 34:
- // tADDspi, tSUBspi, tSUBspi_
- printThumbS4ImmOperand(MI, 2);
- return;
- break;
- case 35:
- // tPOP, tPOP_RET, tPUSH
- printRegisterList(MI, 2);
- return;
- break;
- }
-
- switch (MI->getOpcode()) {
- case ARM::ADCSSri:
- case ARM::ADCSSrr:
- case ARM::ADCSSrs:
- case ARM::BFC:
- case ARM::CLZ:
- case ARM::CMNzri:
- case ARM::CMNzrr:
- case ARM::CMNzrs:
- case ARM::CMPri:
- case ARM::CMPrr:
- case ARM::CMPrs:
- case ARM::CMPzri:
- case ARM::CMPzrr:
- case ARM::CMPzrs:
- case ARM::LDC2_OFFSET:
- case ARM::LDC_OFFSET:
- case ARM::LDR:
- case ARM::LDRB:
- case ARM::LDRD:
- case ARM::LDRH:
- case ARM::LDRSB:
- case ARM::LDRSH:
- case ARM::LDRcp:
- case ARM::MOVCCi:
- case ARM::MOVCCr:
- case ARM::MOVCCs:
- case ARM::MOVTi16:
- case ARM::MOVi16:
- case ARM::MOVi2pieces:
- case ARM::RBIT:
- case ARM::REV:
- case ARM::REV16:
- case ARM::REVSH:
- case ARM::RSCSri:
- case ARM::RSCSrs:
- case ARM::SBCSSri:
- case ARM::SBCSSrr:
- case ARM::SBCSSrs:
- case ARM::STC2_OFFSET:
- case ARM::STC_OFFSET:
- case ARM::STR:
- case ARM::STRB:
- case ARM::STRD:
- case ARM::STRH:
- case ARM::SXTB16r:
- case ARM::SXTBr:
- case ARM::SXTHr:
- case ARM::TEQri:
- case ARM::TEQrr:
- case ARM::TEQrs:
- case ARM::TSTri:
- case ARM::TSTrr:
- case ARM::TSTrs:
- case ARM::UXTB16r:
- case ARM::UXTBr:
- case ARM::UXTHr:
- case ARM::VABALsv2i64:
- case ARM::VABALsv4i32:
- case ARM::VABALsv8i16:
- case ARM::VABALuv2i64:
- case ARM::VABALuv4i32:
- case ARM::VABALuv8i16:
- case ARM::VABAsv16i8:
- case ARM::VABAsv2i32:
- case ARM::VABAsv4i16:
- case ARM::VABAsv4i32:
- case ARM::VABAsv8i16:
- case ARM::VABAsv8i8:
- case ARM::VABAuv16i8:
- case ARM::VABAuv2i32:
- case ARM::VABAuv4i16:
- case ARM::VABAuv4i32:
- case ARM::VABAuv8i16:
- case ARM::VABAuv8i8:
- case ARM::VABDLsv2i64:
- case ARM::VABDLsv4i32:
- case ARM::VABDLsv8i16:
- case ARM::VABDLuv2i64:
- case ARM::VABDLuv4i32:
- case ARM::VABDLuv8i16:
- case ARM::VABDsv16i8:
- case ARM::VABDsv2i32:
- case ARM::VABDsv4i16:
- case ARM::VABDsv4i32:
- case ARM::VABDsv8i16:
- case ARM::VABDsv8i8:
- case ARM::VABDuv16i8:
- case ARM::VABDuv2i32:
- case ARM::VABDuv4i16:
- case ARM::VABDuv4i32:
- case ARM::VABDuv8i16:
- case ARM::VABDuv8i8:
- case ARM::VADDHNv2i32:
- case ARM::VADDHNv4i16:
- case ARM::VADDHNv8i8:
- case ARM::VADDLsv2i64:
- case ARM::VADDLsv4i32:
- case ARM::VADDLsv8i16:
- case ARM::VADDLuv2i64:
- case ARM::VADDLuv4i32:
- case ARM::VADDLuv8i16:
- case ARM::VADDWsv2i64:
- case ARM::VADDWsv4i32:
- case ARM::VADDWsv8i16:
- case ARM::VADDWuv2i64:
- case ARM::VADDWuv4i32:
- case ARM::VADDWuv8i16:
- case ARM::VADDv16i8:
- case ARM::VADDv1i64:
- case ARM::VADDv2i32:
- case ARM::VADDv2i64:
- case ARM::VADDv4i16:
- case ARM::VADDv4i32:
- case ARM::VADDv8i16:
- case ARM::VADDv8i8:
- case ARM::VCEQv16i8:
- case ARM::VCEQv2i32:
- case ARM::VCEQv4i16:
- case ARM::VCEQv4i32:
- case ARM::VCEQv8i16:
- case ARM::VCEQv8i8:
- case ARM::VCGEsv16i8:
- case ARM::VCGEsv2i32:
- case ARM::VCGEsv4i16:
- case ARM::VCGEsv4i32:
- case ARM::VCGEsv8i16:
- case ARM::VCGEsv8i8:
- case ARM::VCGEuv16i8:
- case ARM::VCGEuv2i32:
- case ARM::VCGEuv4i16:
- case ARM::VCGEuv4i32:
- case ARM::VCGEuv8i16:
- case ARM::VCGEuv8i8:
- case ARM::VCGTsv16i8:
- case ARM::VCGTsv2i32:
- case ARM::VCGTsv4i16:
- case ARM::VCGTsv4i32:
- case ARM::VCGTsv8i16:
- case ARM::VCGTsv8i8:
- case ARM::VCGTuv16i8:
- case ARM::VCGTuv2i32:
- case ARM::VCGTuv4i16:
- case ARM::VCGTuv4i32:
- case ARM::VCGTuv8i16:
- case ARM::VCGTuv8i8:
- case ARM::VCNTd:
- case ARM::VCNTq:
- case ARM::VDUP16d:
- case ARM::VDUP16q:
- case ARM::VDUP32d:
- case ARM::VDUP32q:
- case ARM::VDUP8d:
- case ARM::VDUP8q:
- case ARM::VDUPfd:
- case ARM::VDUPfdf:
- case ARM::VDUPfq:
- case ARM::VDUPfqf:
- case ARM::VHADDsv16i8:
- case ARM::VHADDsv2i32:
- case ARM::VHADDsv4i16:
- case ARM::VHADDsv4i32:
- case ARM::VHADDsv8i16:
- case ARM::VHADDsv8i8:
- case ARM::VHADDuv16i8:
- case ARM::VHADDuv2i32:
- case ARM::VHADDuv4i16:
- case ARM::VHADDuv4i32:
- case ARM::VHADDuv8i16:
- case ARM::VHADDuv8i8:
- case ARM::VHSUBsv16i8:
- case ARM::VHSUBsv2i32:
- case ARM::VHSUBsv4i16:
- case ARM::VHSUBsv4i32:
- case ARM::VHSUBsv8i16:
- case ARM::VHSUBsv8i8:
- case ARM::VHSUBuv16i8:
- case ARM::VHSUBuv2i32:
- case ARM::VHSUBuv4i16:
- case ARM::VHSUBuv4i32:
- case ARM::VHSUBuv8i16:
- case ARM::VHSUBuv8i8:
- case ARM::VLDRD:
- case ARM::VLDRS:
- case ARM::VMAXsv16i8:
- case ARM::VMAXsv2i32:
- case ARM::VMAXsv4i16:
- case ARM::VMAXsv4i32:
- case ARM::VMAXsv8i16:
- case ARM::VMAXsv8i8:
- case ARM::VMAXuv16i8:
- case ARM::VMAXuv2i32:
- case ARM::VMAXuv4i16:
- case ARM::VMAXuv4i32:
- case ARM::VMAXuv8i16:
- case ARM::VMAXuv8i8:
- case ARM::VMINsv16i8:
- case ARM::VMINsv2i32:
- case ARM::VMINsv4i16:
- case ARM::VMINsv4i32:
- case ARM::VMINsv8i16:
- case ARM::VMINsv8i8:
- case ARM::VMINuv16i8:
- case ARM::VMINuv2i32:
- case ARM::VMINuv4i16:
- case ARM::VMINuv4i32:
- case ARM::VMINuv8i16:
- case ARM::VMINuv8i8:
- case ARM::VMLALsv2i64:
- case ARM::VMLALsv4i32:
- case ARM::VMLALsv8i16:
- case ARM::VMLALuv2i64:
- case ARM::VMLALuv4i32:
- case ARM::VMLALuv8i16:
- case ARM::VMLAv16i8:
- case ARM::VMLAv2i32:
- case ARM::VMLAv4i16:
- case ARM::VMLAv4i32:
- case ARM::VMLAv8i16:
- case ARM::VMLAv8i8:
- case ARM::VMLSLsv2i64:
- case ARM::VMLSLsv4i32:
- case ARM::VMLSLsv8i16:
- case ARM::VMLSLuv2i64:
- case ARM::VMLSLuv4i32:
- case ARM::VMLSLuv8i16:
- case ARM::VMLSv16i8:
- case ARM::VMLSv2i32:
- case ARM::VMLSv4i16:
- case ARM::VMLSv4i32:
- case ARM::VMLSv8i16:
- case ARM::VMLSv8i8:
- case ARM::VMOVDneon:
- case ARM::VMOVQ:
- case ARM::VMOVRS:
- case ARM::VMOVSR:
- case ARM::VMULLsv2i64:
- case ARM::VMULLsv4i32:
- case ARM::VMULLsv8i16:
- case ARM::VMULLuv2i64:
- case ARM::VMULLuv4i32:
- case ARM::VMULLuv8i16:
- case ARM::VMULv16i8:
- case ARM::VMULv2i32:
- case ARM::VMULv4i16:
- case ARM::VMULv4i32:
- case ARM::VMULv8i16:
- case ARM::VMULv8i8:
- case ARM::VMVNd:
- case ARM::VMVNq:
- case ARM::VPADDi16:
- case ARM::VPADDi32:
- case ARM::VPADDi8:
- case ARM::VPMAXs16:
- case ARM::VPMAXs32:
- case ARM::VPMAXs8:
- case ARM::VPMAXu16:
- case ARM::VPMAXu32:
- case ARM::VPMAXu8:
- case ARM::VPMINs16:
- case ARM::VPMINs32:
- case ARM::VPMINs8:
- case ARM::VPMINu16:
- case ARM::VPMINu32:
- case ARM::VPMINu8:
- case ARM::VQADDsv16i8:
- case ARM::VQADDsv1i64:
- case ARM::VQADDsv2i32:
- case ARM::VQADDsv2i64:
- case ARM::VQADDsv4i16:
- case ARM::VQADDsv4i32:
- case ARM::VQADDsv8i16:
- case ARM::VQADDsv8i8:
- case ARM::VQADDuv16i8:
- case ARM::VQADDuv1i64:
- case ARM::VQADDuv2i32:
- case ARM::VQADDuv2i64:
- case ARM::VQADDuv4i16:
- case ARM::VQADDuv4i32:
- case ARM::VQADDuv8i16:
- case ARM::VQADDuv8i8:
- case ARM::VQDMLALv2i64:
- case ARM::VQDMLALv4i32:
- case ARM::VQDMLSLv2i64:
- case ARM::VQDMLSLv4i32:
- case ARM::VQDMULHv2i32:
- case ARM::VQDMULHv4i16:
- case ARM::VQDMULHv4i32:
- case ARM::VQDMULHv8i16:
- case ARM::VQDMULLv2i64:
- case ARM::VQDMULLv4i32:
- case ARM::VQRDMULHv2i32:
- case ARM::VQRDMULHv4i16:
- case ARM::VQRDMULHv4i32:
- case ARM::VQRDMULHv8i16:
- case ARM::VQRSHLsv16i8:
- case ARM::VQRSHLsv1i64:
- case ARM::VQRSHLsv2i32:
- case ARM::VQRSHLsv2i64:
- case ARM::VQRSHLsv4i16:
- case ARM::VQRSHLsv4i32:
- case ARM::VQRSHLsv8i16:
- case ARM::VQRSHLsv8i8:
- case ARM::VQRSHLuv16i8:
- case ARM::VQRSHLuv1i64:
- case ARM::VQRSHLuv2i32:
- case ARM::VQRSHLuv2i64:
- case ARM::VQRSHLuv4i16:
- case ARM::VQRSHLuv4i32:
- case ARM::VQRSHLuv8i16:
- case ARM::VQRSHLuv8i8:
- case ARM::VQRSHRNsv2i32:
- case ARM::VQRSHRNsv4i16:
- case ARM::VQRSHRNsv8i8:
- case ARM::VQRSHRNuv2i32:
- case ARM::VQRSHRNuv4i16:
- case ARM::VQRSHRNuv8i8:
- case ARM::VQRSHRUNv2i32:
- case ARM::VQRSHRUNv4i16:
- case ARM::VQRSHRUNv8i8:
- case ARM::VQSHLsiv16i8:
- case ARM::VQSHLsiv1i64:
- case ARM::VQSHLsiv2i32:
- case ARM::VQSHLsiv2i64:
- case ARM::VQSHLsiv4i16:
- case ARM::VQSHLsiv4i32:
- case ARM::VQSHLsiv8i16:
- case ARM::VQSHLsiv8i8:
- case ARM::VQSHLsuv16i8:
- case ARM::VQSHLsuv1i64:
- case ARM::VQSHLsuv2i32:
- case ARM::VQSHLsuv2i64:
- case ARM::VQSHLsuv4i16:
- case ARM::VQSHLsuv4i32:
- case ARM::VQSHLsuv8i16:
- case ARM::VQSHLsuv8i8:
- case ARM::VQSHLsv16i8:
- case ARM::VQSHLsv1i64:
- case ARM::VQSHLsv2i32:
- case ARM::VQSHLsv2i64:
- case ARM::VQSHLsv4i16:
- case ARM::VQSHLsv4i32:
- case ARM::VQSHLsv8i16:
- case ARM::VQSHLsv8i8:
- case ARM::VQSHLuiv16i8:
- case ARM::VQSHLuiv1i64:
- case ARM::VQSHLuiv2i32:
- case ARM::VQSHLuiv2i64:
- case ARM::VQSHLuiv4i16:
- case ARM::VQSHLuiv4i32:
- case ARM::VQSHLuiv8i16:
- case ARM::VQSHLuiv8i8:
- case ARM::VQSHLuv16i8:
- case ARM::VQSHLuv1i64:
- case ARM::VQSHLuv2i32:
- case ARM::VQSHLuv2i64:
- case ARM::VQSHLuv4i16:
- case ARM::VQSHLuv4i32:
- case ARM::VQSHLuv8i16:
- case ARM::VQSHLuv8i8:
- case ARM::VQSHRNsv2i32:
- case ARM::VQSHRNsv4i16:
- case ARM::VQSHRNsv8i8:
- case ARM::VQSHRNuv2i32:
- case ARM::VQSHRNuv4i16:
- case ARM::VQSHRNuv8i8:
- case ARM::VQSHRUNv2i32:
- case ARM::VQSHRUNv4i16:
- case ARM::VQSHRUNv8i8:
- case ARM::VQSUBsv16i8:
- case ARM::VQSUBsv1i64:
- case ARM::VQSUBsv2i32:
- case ARM::VQSUBsv2i64:
- case ARM::VQSUBsv4i16:
- case ARM::VQSUBsv4i32:
- case ARM::VQSUBsv8i16:
- case ARM::VQSUBsv8i8:
- case ARM::VQSUBuv16i8:
- case ARM::VQSUBuv1i64:
- case ARM::VQSUBuv2i32:
- case ARM::VQSUBuv2i64:
- case ARM::VQSUBuv4i16:
- case ARM::VQSUBuv4i32:
- case ARM::VQSUBuv8i16:
- case ARM::VQSUBuv8i8:
- case ARM::VRADDHNv2i32:
- case ARM::VRADDHNv4i16:
- case ARM::VRADDHNv8i8:
- case ARM::VREV16d8:
- case ARM::VREV16q8:
- case ARM::VREV32d16:
- case ARM::VREV32d8:
- case ARM::VREV32q16:
- case ARM::VREV32q8:
- case ARM::VREV64d16:
- case ARM::VREV64d32:
- case ARM::VREV64d8:
- case ARM::VREV64df:
- case ARM::VREV64q16:
- case ARM::VREV64q32:
- case ARM::VREV64q8:
- case ARM::VREV64qf:
- case ARM::VRHADDsv16i8:
- case ARM::VRHADDsv2i32:
- case ARM::VRHADDsv4i16:
- case ARM::VRHADDsv4i32:
- case ARM::VRHADDsv8i16:
- case ARM::VRHADDsv8i8:
- case ARM::VRHADDuv16i8:
- case ARM::VRHADDuv2i32:
- case ARM::VRHADDuv4i16:
- case ARM::VRHADDuv4i32:
- case ARM::VRHADDuv8i16:
- case ARM::VRHADDuv8i8:
- case ARM::VRSHLsv16i8:
- case ARM::VRSHLsv1i64:
- case ARM::VRSHLsv2i32:
- case ARM::VRSHLsv2i64:
- case ARM::VRSHLsv4i16:
- case ARM::VRSHLsv4i32:
- case ARM::VRSHLsv8i16:
- case ARM::VRSHLsv8i8:
- case ARM::VRSHLuv16i8:
- case ARM::VRSHLuv1i64:
- case ARM::VRSHLuv2i32:
- case ARM::VRSHLuv2i64:
- case ARM::VRSHLuv4i16:
- case ARM::VRSHLuv4i32:
- case ARM::VRSHLuv8i16:
- case ARM::VRSHLuv8i8:
- case ARM::VRSHRNv2i32:
- case ARM::VRSHRNv4i16:
- case ARM::VRSHRNv8i8:
- case ARM::VRSHRsv16i8:
- case ARM::VRSHRsv1i64:
- case ARM::VRSHRsv2i32:
- case ARM::VRSHRsv2i64:
- case ARM::VRSHRsv4i16:
- case ARM::VRSHRsv4i32:
- case ARM::VRSHRsv8i16:
- case ARM::VRSHRsv8i8:
- case ARM::VRSHRuv16i8:
- case ARM::VRSHRuv1i64:
- case ARM::VRSHRuv2i32:
- case ARM::VRSHRuv2i64:
- case ARM::VRSHRuv4i16:
- case ARM::VRSHRuv4i32:
- case ARM::VRSHRuv8i16:
- case ARM::VRSHRuv8i8:
- case ARM::VRSRAsv16i8:
- case ARM::VRSRAsv1i64:
- case ARM::VRSRAsv2i32:
- case ARM::VRSRAsv2i64:
- case ARM::VRSRAsv4i16:
- case ARM::VRSRAsv4i32:
- case ARM::VRSRAsv8i16:
- case ARM::VRSRAsv8i8:
- case ARM::VRSRAuv16i8:
- case ARM::VRSRAuv1i64:
- case ARM::VRSRAuv2i32:
- case ARM::VRSRAuv2i64:
- case ARM::VRSRAuv4i16:
- case ARM::VRSRAuv4i32:
- case ARM::VRSRAuv8i16:
- case ARM::VRSRAuv8i8:
- case ARM::VRSUBHNv2i32:
- case ARM::VRSUBHNv4i16:
- case ARM::VRSUBHNv8i8:
- case ARM::VSHLLi16:
- case ARM::VSHLLi32:
- case ARM::VSHLLi8:
- case ARM::VSHLLsv2i64:
- case ARM::VSHLLsv4i32:
- case ARM::VSHLLsv8i16:
- case ARM::VSHLLuv2i64:
- case ARM::VSHLLuv4i32:
- case ARM::VSHLLuv8i16:
- case ARM::VSHLiv16i8:
- case ARM::VSHLiv1i64:
- case ARM::VSHLiv2i32:
- case ARM::VSHLiv2i64:
- case ARM::VSHLiv4i16:
- case ARM::VSHLiv4i32:
- case ARM::VSHLiv8i16:
- case ARM::VSHLiv8i8:
- case ARM::VSHLsv16i8:
- case ARM::VSHLsv1i64:
- case ARM::VSHLsv2i32:
- case ARM::VSHLsv2i64:
- case ARM::VSHLsv4i16:
- case ARM::VSHLsv4i32:
- case ARM::VSHLsv8i16:
- case ARM::VSHLsv8i8:
- case ARM::VSHLuv16i8:
- case ARM::VSHLuv1i64:
- case ARM::VSHLuv2i32:
- case ARM::VSHLuv2i64:
- case ARM::VSHLuv4i16:
- case ARM::VSHLuv4i32:
- case ARM::VSHLuv8i16:
- case ARM::VSHLuv8i8:
- case ARM::VSHRNv2i32:
- case ARM::VSHRNv4i16:
- case ARM::VSHRNv8i8:
- case ARM::VSHRsv16i8:
- case ARM::VSHRsv1i64:
- case ARM::VSHRsv2i32:
- case ARM::VSHRsv2i64:
- case ARM::VSHRsv4i16:
- case ARM::VSHRsv4i32:
- case ARM::VSHRsv8i16:
- case ARM::VSHRsv8i8:
- case ARM::VSHRuv16i8:
- case ARM::VSHRuv1i64:
- case ARM::VSHRuv2i32:
- case ARM::VSHRuv2i64:
- case ARM::VSHRuv4i16:
- case ARM::VSHRuv4i32:
- case ARM::VSHRuv8i16:
- case ARM::VSHRuv8i8:
- case ARM::VSRAsv16i8:
- case ARM::VSRAsv1i64:
- case ARM::VSRAsv2i32:
- case ARM::VSRAsv2i64:
- case ARM::VSRAsv4i16:
- case ARM::VSRAsv4i32:
- case ARM::VSRAsv8i16:
- case ARM::VSRAsv8i8:
- case ARM::VSRAuv16i8:
- case ARM::VSRAuv1i64:
- case ARM::VSRAuv2i32:
- case ARM::VSRAuv2i64:
- case ARM::VSRAuv4i16:
- case ARM::VSRAuv4i32:
- case ARM::VSRAuv8i16:
- case ARM::VSRAuv8i8:
- case ARM::VSTRD:
- case ARM::VSTRS:
- case ARM::VSUBHNv2i32:
- case ARM::VSUBHNv4i16:
- case ARM::VSUBHNv8i8:
- case ARM::VSUBLsv2i64:
- case ARM::VSUBLsv4i32:
- case ARM::VSUBLsv8i16:
- case ARM::VSUBLuv2i64:
- case ARM::VSUBLuv4i32:
- case ARM::VSUBLuv8i16:
- case ARM::VSUBWsv2i64:
- case ARM::VSUBWsv4i32:
- case ARM::VSUBWsv8i16:
- case ARM::VSUBWuv2i64:
- case ARM::VSUBWuv4i32:
- case ARM::VSUBWuv8i16:
- case ARM::VSUBv16i8:
- case ARM::VSUBv1i64:
- case ARM::VSUBv2i32:
- case ARM::VSUBv2i64:
- case ARM::VSUBv4i16:
- case ARM::VSUBv4i32:
- case ARM::VSUBv8i16:
- case ARM::VSUBv8i8:
- case ARM::VSWPd:
- case ARM::VSWPq:
- case ARM::VTRNd16:
- case ARM::VTRNd32:
- case ARM::VTRNd8:
- case ARM::VTRNq16:
- case ARM::VTRNq32:
- case ARM::VTRNq8:
- case ARM::VUZPd16:
- case ARM::VUZPd32:
- case ARM::VUZPd8:
- case ARM::VUZPq16:
- case ARM::VUZPq32:
- case ARM::VUZPq8:
- case ARM::VZIPd16:
- case ARM::VZIPd32:
- case ARM::VZIPd8:
- case ARM::VZIPq16:
- case ARM::VZIPq32:
- case ARM::VZIPq8:
- case ARM::t2BFC:
- case ARM::t2CLZ:
- case ARM::t2LDRBT:
- case ARM::t2LDRBi8:
- case ARM::t2LDRDi8:
- case ARM::t2LDRDpci:
- case ARM::t2LDRHT:
- case ARM::t2LDRHi8:
- case ARM::t2LDRSBT:
- case ARM::t2LDRSBi8:
- case ARM::t2LDRSHT:
- case ARM::t2LDRSHi8:
- case ARM::t2LDRT:
- case ARM::t2LDRi8:
- case ARM::t2MOVTi16:
- case ARM::t2MOVi16:
- case ARM::t2RBIT:
- case ARM::t2STRBT:
- case ARM::t2STRBi8:
- case ARM::t2STRDi8:
- case ARM::t2STRHT:
- case ARM::t2STRHi8:
- case ARM::t2STRT:
- case ARM::t2STRi8:
- case ARM::t2SUBrSPi12_:
- case ARM::t2SUBrSPi_:
- case ARM::t2SUBrSPs_:
- case ARM::t2SXTB16r:
- case ARM::t2UXTB16r:
- case ARM::tADDhirr:
- case ARM::tADDi3:
- case ARM::tADDrSPi:
- case ARM::tADDrr:
- case ARM::tASRri:
- case ARM::tCMNz:
- case ARM::tCMPhir:
- case ARM::tCMPi8:
- case ARM::tCMPr:
- case ARM::tCMPzhir:
- case ARM::tCMPzi8:
- case ARM::tCMPzr:
- case ARM::tLDR:
- case ARM::tLDRB:
- case ARM::tLDRBi:
- case ARM::tLDRH:
- case ARM::tLDRHi:
- case ARM::tLDRSB:
- case ARM::tLDRSH:
- case ARM::tLDRcp:
- case ARM::tLDRi:
- case ARM::tLDRspi:
- case ARM::tLSLri:
- case ARM::tLSRri:
- case ARM::tMOVCCi:
- case ARM::tMOVCCr:
- case ARM::tMUL:
- case ARM::tREV:
- case ARM::tREV16:
- case ARM::tREVSH:
- case ARM::tRestore:
- case ARM::tSTR:
- case ARM::tSTRB:
- case ARM::tSTRBi:
- case ARM::tSTRH:
- case ARM::tSTRHi:
- case ARM::tSTRi:
- case ARM::tSTRspi:
- case ARM::tSUBi3:
- case ARM::tSUBrr:
- case ARM::tSXTB:
- case ARM::tSXTH:
- case ARM::tSpill:
- case ARM::tTST:
- case ARM::tUXTB:
- case ARM::tUXTH:
- O << ", ";
- switch (MI->getOpcode()) {
- case ARM::ADCSSri:
- case ARM::MOVCCi:
- case ARM::RSCSri:
- case ARM::SBCSSri: printSOImmOperand(MI, 2); break;
- case ARM::ADCSSrr:
- case ARM::MOVCCr:
- case ARM::MOVTi16:
- case ARM::SBCSSrr:
- case ARM::VABDLsv2i64:
- case ARM::VABDLsv4i32:
- case ARM::VABDLsv8i16:
- case ARM::VABDLuv2i64:
- case ARM::VABDLuv4i32:
- case ARM::VABDLuv8i16:
- case ARM::VABDsv16i8:
- case ARM::VABDsv2i32:
- case ARM::VABDsv4i16:
- case ARM::VABDsv4i32:
- case ARM::VABDsv8i16:
- case ARM::VABDsv8i8:
- case ARM::VABDuv16i8:
- case ARM::VABDuv2i32:
- case ARM::VABDuv4i16:
- case ARM::VABDuv4i32:
- case ARM::VABDuv8i16:
- case ARM::VABDuv8i8:
- case ARM::VADDHNv2i32:
- case ARM::VADDHNv4i16:
- case ARM::VADDHNv8i8:
- case ARM::VADDLsv2i64:
- case ARM::VADDLsv4i32:
- case ARM::VADDLsv8i16:
- case ARM::VADDLuv2i64:
- case ARM::VADDLuv4i32:
- case ARM::VADDLuv8i16:
- case ARM::VADDWsv2i64:
- case ARM::VADDWsv4i32:
- case ARM::VADDWsv8i16:
- case ARM::VADDWuv2i64:
- case ARM::VADDWuv4i32:
- case ARM::VADDWuv8i16:
- case ARM::VADDv16i8:
- case ARM::VADDv1i64:
- case ARM::VADDv2i32:
- case ARM::VADDv2i64:
- case ARM::VADDv4i16:
- case ARM::VADDv4i32:
- case ARM::VADDv8i16:
- case ARM::VADDv8i8:
- case ARM::VCEQv16i8:
- case ARM::VCEQv2i32:
- case ARM::VCEQv4i16:
- case ARM::VCEQv4i32:
- case ARM::VCEQv8i16:
- case ARM::VCEQv8i8:
- case ARM::VCGEsv16i8:
- case ARM::VCGEsv2i32:
- case ARM::VCGEsv4i16:
- case ARM::VCGEsv4i32:
- case ARM::VCGEsv8i16:
- case ARM::VCGEsv8i8:
- case ARM::VCGEuv16i8:
- case ARM::VCGEuv2i32:
- case ARM::VCGEuv4i16:
- case ARM::VCGEuv4i32:
- case ARM::VCGEuv8i16:
- case ARM::VCGEuv8i8:
- case ARM::VCGTsv16i8:
- case ARM::VCGTsv2i32:
- case ARM::VCGTsv4i16:
- case ARM::VCGTsv4i32:
- case ARM::VCGTsv8i16:
- case ARM::VCGTsv8i8:
- case ARM::VCGTuv16i8:
- case ARM::VCGTuv2i32:
- case ARM::VCGTuv4i16:
- case ARM::VCGTuv4i32:
- case ARM::VCGTuv8i16:
- case ARM::VCGTuv8i8:
- case ARM::VHADDsv16i8:
- case ARM::VHADDsv2i32:
- case ARM::VHADDsv4i16:
- case ARM::VHADDsv4i32:
- case ARM::VHADDsv8i16:
- case ARM::VHADDsv8i8:
- case ARM::VHADDuv16i8:
- case ARM::VHADDuv2i32:
- case ARM::VHADDuv4i16:
- case ARM::VHADDuv4i32:
- case ARM::VHADDuv8i16:
- case ARM::VHADDuv8i8:
- case ARM::VHSUBsv16i8:
- case ARM::VHSUBsv2i32:
- case ARM::VHSUBsv4i16:
- case ARM::VHSUBsv4i32:
- case ARM::VHSUBsv8i16:
- case ARM::VHSUBsv8i8:
- case ARM::VHSUBuv16i8:
- case ARM::VHSUBuv2i32:
- case ARM::VHSUBuv4i16:
- case ARM::VHSUBuv4i32:
- case ARM::VHSUBuv8i16:
- case ARM::VHSUBuv8i8:
- case ARM::VMAXsv16i8:
- case ARM::VMAXsv2i32:
- case ARM::VMAXsv4i16:
- case ARM::VMAXsv4i32:
- case ARM::VMAXsv8i16:
- case ARM::VMAXsv8i8:
- case ARM::VMAXuv16i8:
- case ARM::VMAXuv2i32:
- case ARM::VMAXuv4i16:
- case ARM::VMAXuv4i32:
- case ARM::VMAXuv8i16:
- case ARM::VMAXuv8i8:
- case ARM::VMINsv16i8:
- case ARM::VMINsv2i32:
- case ARM::VMINsv4i16:
- case ARM::VMINsv4i32:
- case ARM::VMINsv8i16:
- case ARM::VMINsv8i8:
- case ARM::VMINuv16i8:
- case ARM::VMINuv2i32:
- case ARM::VMINuv4i16:
- case ARM::VMINuv4i32:
- case ARM::VMINuv8i16:
- case ARM::VMINuv8i8:
- case ARM::VMULLsv2i64:
- case ARM::VMULLsv4i32:
- case ARM::VMULLsv8i16:
- case ARM::VMULLuv2i64:
- case ARM::VMULLuv4i32:
- case ARM::VMULLuv8i16:
- case ARM::VMULv16i8:
- case ARM::VMULv2i32:
- case ARM::VMULv4i16:
- case ARM::VMULv4i32:
- case ARM::VMULv8i16:
- case ARM::VMULv8i8:
- case ARM::VPADDi16:
- case ARM::VPADDi32:
- case ARM::VPADDi8:
- case ARM::VPMAXs16:
- case ARM::VPMAXs32:
- case ARM::VPMAXs8:
- case ARM::VPMAXu16:
- case ARM::VPMAXu32:
- case ARM::VPMAXu8:
- case ARM::VPMINs16:
- case ARM::VPMINs32:
- case ARM::VPMINs8:
- case ARM::VPMINu16:
- case ARM::VPMINu32:
- case ARM::VPMINu8:
- case ARM::VQADDsv16i8:
- case ARM::VQADDsv1i64:
- case ARM::VQADDsv2i32:
- case ARM::VQADDsv2i64:
- case ARM::VQADDsv4i16:
- case ARM::VQADDsv4i32:
- case ARM::VQADDsv8i16:
- case ARM::VQADDsv8i8:
- case ARM::VQADDuv16i8:
- case ARM::VQADDuv1i64:
- case ARM::VQADDuv2i32:
- case ARM::VQADDuv2i64:
- case ARM::VQADDuv4i16:
- case ARM::VQADDuv4i32:
- case ARM::VQADDuv8i16:
- case ARM::VQADDuv8i8:
- case ARM::VQDMULHv2i32:
- case ARM::VQDMULHv4i16:
- case ARM::VQDMULHv4i32:
- case ARM::VQDMULHv8i16:
- case ARM::VQDMULLv2i64:
- case ARM::VQDMULLv4i32:
- case ARM::VQRDMULHv2i32:
- case ARM::VQRDMULHv4i16:
- case ARM::VQRDMULHv4i32:
- case ARM::VQRDMULHv8i16:
- case ARM::VQRSHLsv16i8:
- case ARM::VQRSHLsv1i64:
- case ARM::VQRSHLsv2i32:
- case ARM::VQRSHLsv2i64:
- case ARM::VQRSHLsv4i16:
- case ARM::VQRSHLsv4i32:
- case ARM::VQRSHLsv8i16:
- case ARM::VQRSHLsv8i8:
- case ARM::VQRSHLuv16i8:
- case ARM::VQRSHLuv1i64:
- case ARM::VQRSHLuv2i32:
- case ARM::VQRSHLuv2i64:
- case ARM::VQRSHLuv4i16:
- case ARM::VQRSHLuv4i32:
- case ARM::VQRSHLuv8i16:
- case ARM::VQRSHLuv8i8:
- case ARM::VQRSHRNsv2i32:
- case ARM::VQRSHRNsv4i16:
- case ARM::VQRSHRNsv8i8:
- case ARM::VQRSHRNuv2i32:
- case ARM::VQRSHRNuv4i16:
- case ARM::VQRSHRNuv8i8:
- case ARM::VQRSHRUNv2i32:
- case ARM::VQRSHRUNv4i16:
- case ARM::VQRSHRUNv8i8:
- case ARM::VQSHLsiv16i8:
- case ARM::VQSHLsiv1i64:
- case ARM::VQSHLsiv2i32:
- case ARM::VQSHLsiv2i64:
- case ARM::VQSHLsiv4i16:
- case ARM::VQSHLsiv4i32:
- case ARM::VQSHLsiv8i16:
- case ARM::VQSHLsiv8i8:
- case ARM::VQSHLsuv16i8:
- case ARM::VQSHLsuv1i64:
- case ARM::VQSHLsuv2i32:
- case ARM::VQSHLsuv2i64:
- case ARM::VQSHLsuv4i16:
- case ARM::VQSHLsuv4i32:
- case ARM::VQSHLsuv8i16:
- case ARM::VQSHLsuv8i8:
- case ARM::VQSHLsv16i8:
- case ARM::VQSHLsv1i64:
- case ARM::VQSHLsv2i32:
- case ARM::VQSHLsv2i64:
- case ARM::VQSHLsv4i16:
- case ARM::VQSHLsv4i32:
- case ARM::VQSHLsv8i16:
- case ARM::VQSHLsv8i8:
- case ARM::VQSHLuiv16i8:
- case ARM::VQSHLuiv1i64:
- case ARM::VQSHLuiv2i32:
- case ARM::VQSHLuiv2i64:
- case ARM::VQSHLuiv4i16:
- case ARM::VQSHLuiv4i32:
- case ARM::VQSHLuiv8i16:
- case ARM::VQSHLuiv8i8:
- case ARM::VQSHLuv16i8:
- case ARM::VQSHLuv1i64:
- case ARM::VQSHLuv2i32:
- case ARM::VQSHLuv2i64:
- case ARM::VQSHLuv4i16:
- case ARM::VQSHLuv4i32:
- case ARM::VQSHLuv8i16:
- case ARM::VQSHLuv8i8:
- case ARM::VQSHRNsv2i32:
- case ARM::VQSHRNsv4i16:
- case ARM::VQSHRNsv8i8:
- case ARM::VQSHRNuv2i32:
- case ARM::VQSHRNuv4i16:
- case ARM::VQSHRNuv8i8:
- case ARM::VQSHRUNv2i32:
- case ARM::VQSHRUNv4i16:
- case ARM::VQSHRUNv8i8:
- case ARM::VQSUBsv16i8:
- case ARM::VQSUBsv1i64:
- case ARM::VQSUBsv2i32:
- case ARM::VQSUBsv2i64:
- case ARM::VQSUBsv4i16:
- case ARM::VQSUBsv4i32:
- case ARM::VQSUBsv8i16:
- case ARM::VQSUBsv8i8:
- case ARM::VQSUBuv16i8:
- case ARM::VQSUBuv1i64:
- case ARM::VQSUBuv2i32:
- case ARM::VQSUBuv2i64:
- case ARM::VQSUBuv4i16:
- case ARM::VQSUBuv4i32:
- case ARM::VQSUBuv8i16:
- case ARM::VQSUBuv8i8:
- case ARM::VRADDHNv2i32:
- case ARM::VRADDHNv4i16:
- case ARM::VRADDHNv8i8:
- case ARM::VRHADDsv16i8:
- case ARM::VRHADDsv2i32:
- case ARM::VRHADDsv4i16:
- case ARM::VRHADDsv4i32:
- case ARM::VRHADDsv8i16:
- case ARM::VRHADDsv8i8:
- case ARM::VRHADDuv16i8:
- case ARM::VRHADDuv2i32:
- case ARM::VRHADDuv4i16:
- case ARM::VRHADDuv4i32:
- case ARM::VRHADDuv8i16:
- case ARM::VRHADDuv8i8:
- case ARM::VRSHLsv16i8:
- case ARM::VRSHLsv1i64:
- case ARM::VRSHLsv2i32:
- case ARM::VRSHLsv2i64:
- case ARM::VRSHLsv4i16:
- case ARM::VRSHLsv4i32:
- case ARM::VRSHLsv8i16:
- case ARM::VRSHLsv8i8:
- case ARM::VRSHLuv16i8:
- case ARM::VRSHLuv1i64:
- case ARM::VRSHLuv2i32:
- case ARM::VRSHLuv2i64:
- case ARM::VRSHLuv4i16:
- case ARM::VRSHLuv4i32:
- case ARM::VRSHLuv8i16:
- case ARM::VRSHLuv8i8:
- case ARM::VRSHRNv2i32:
- case ARM::VRSHRNv4i16:
- case ARM::VRSHRNv8i8:
- case ARM::VRSHRsv16i8:
- case ARM::VRSHRsv1i64:
- case ARM::VRSHRsv2i32:
- case ARM::VRSHRsv2i64:
- case ARM::VRSHRsv4i16:
- case ARM::VRSHRsv4i32:
- case ARM::VRSHRsv8i16:
- case ARM::VRSHRsv8i8:
- case ARM::VRSHRuv16i8:
- case ARM::VRSHRuv1i64:
- case ARM::VRSHRuv2i32:
- case ARM::VRSHRuv2i64:
- case ARM::VRSHRuv4i16:
- case ARM::VRSHRuv4i32:
- case ARM::VRSHRuv8i16:
- case ARM::VRSHRuv8i8:
- case ARM::VRSUBHNv2i32:
- case ARM::VRSUBHNv4i16:
- case ARM::VRSUBHNv8i8:
- case ARM::VSHLLi16:
- case ARM::VSHLLi32:
- case ARM::VSHLLi8:
- case ARM::VSHLLsv2i64:
- case ARM::VSHLLsv4i32:
- case ARM::VSHLLsv8i16:
- case ARM::VSHLLuv2i64:
- case ARM::VSHLLuv4i32:
- case ARM::VSHLLuv8i16:
- case ARM::VSHLiv16i8:
- case ARM::VSHLiv1i64:
- case ARM::VSHLiv2i32:
- case ARM::VSHLiv2i64:
- case ARM::VSHLiv4i16:
- case ARM::VSHLiv4i32:
- case ARM::VSHLiv8i16:
- case ARM::VSHLiv8i8:
- case ARM::VSHLsv16i8:
- case ARM::VSHLsv1i64:
- case ARM::VSHLsv2i32:
- case ARM::VSHLsv2i64:
- case ARM::VSHLsv4i16:
- case ARM::VSHLsv4i32:
- case ARM::VSHLsv8i16:
- case ARM::VSHLsv8i8:
- case ARM::VSHLuv16i8:
- case ARM::VSHLuv1i64:
- case ARM::VSHLuv2i32:
- case ARM::VSHLuv2i64:
- case ARM::VSHLuv4i16:
- case ARM::VSHLuv4i32:
- case ARM::VSHLuv8i16:
- case ARM::VSHLuv8i8:
- case ARM::VSHRNv2i32:
- case ARM::VSHRNv4i16:
- case ARM::VSHRNv8i8:
- case ARM::VSHRsv16i8:
- case ARM::VSHRsv1i64:
- case ARM::VSHRsv2i32:
- case ARM::VSHRsv2i64:
- case ARM::VSHRsv4i16:
- case ARM::VSHRsv4i32:
- case ARM::VSHRsv8i16:
- case ARM::VSHRsv8i8:
- case ARM::VSHRuv16i8:
- case ARM::VSHRuv1i64:
- case ARM::VSHRuv2i32:
- case ARM::VSHRuv2i64:
- case ARM::VSHRuv4i16:
- case ARM::VSHRuv4i32:
- case ARM::VSHRuv8i16:
- case ARM::VSHRuv8i8:
- case ARM::VSUBHNv2i32:
- case ARM::VSUBHNv4i16:
- case ARM::VSUBHNv8i8:
- case ARM::VSUBLsv2i64:
- case ARM::VSUBLsv4i32:
- case ARM::VSUBLsv8i16:
- case ARM::VSUBLuv2i64:
- case ARM::VSUBLuv4i32:
- case ARM::VSUBLuv8i16:
- case ARM::VSUBWsv2i64:
- case ARM::VSUBWsv4i32:
- case ARM::VSUBWsv8i16:
- case ARM::VSUBWuv2i64:
- case ARM::VSUBWuv4i32:
- case ARM::VSUBWuv8i16:
- case ARM::VSUBv16i8:
- case ARM::VSUBv1i64:
- case ARM::VSUBv2i32:
- case ARM::VSUBv2i64:
- case ARM::VSUBv4i16:
- case ARM::VSUBv4i32:
- case ARM::VSUBv8i16:
- case ARM::VSUBv8i8:
- case ARM::t2LDRDpci:
- case ARM::t2MOVTi16:
- case ARM::t2SUBrSPi12_:
- case ARM::t2SUBrSPi_:
- case ARM::tADDhirr:
- case ARM::tMOVCCi:
- case ARM::tMOVCCr: printOperand(MI, 2); break;
- case ARM::ADCSSrs:
- case ARM::MOVCCs:
- case ARM::RSCSrs:
- case ARM::SBCSSrs: printSORegOperand(MI, 2); break;
- case ARM::BFC:
- case ARM::t2BFC: printBitfieldInvMaskImmOperand(MI, 2); break;
- case ARM::CLZ:
- case ARM::CMNzrr:
- case ARM::CMPrr:
- case ARM::CMPzrr:
- case ARM::MOVi16:
- case ARM::RBIT:
- case ARM::REV:
- case ARM::REV16:
- case ARM::REVSH:
- case ARM::SXTB16r:
- case ARM::SXTBr:
- case ARM::SXTHr:
- case ARM::TEQrr:
- case ARM::TSTrr:
- case ARM::UXTB16r:
- case ARM::UXTBr:
- case ARM::UXTHr:
- case ARM::VCNTd:
- case ARM::VCNTq:
- case ARM::VDUP16d:
- case ARM::VDUP16q:
- case ARM::VDUP32d:
- case ARM::VDUP32q:
- case ARM::VDUP8d:
- case ARM::VDUP8q:
- case ARM::VDUPfd:
- case ARM::VDUPfq:
- case ARM::VMOVDneon:
- case ARM::VMOVQ:
- case ARM::VMOVRS:
- case ARM::VMOVSR:
- case ARM::VMVNd:
- case ARM::VMVNq:
- case ARM::VREV16d8:
- case ARM::VREV16q8:
- case ARM::VREV32d16:
- case ARM::VREV32d8:
- case ARM::VREV32q16:
- case ARM::VREV32q8:
- case ARM::VREV64d16:
- case ARM::VREV64d32:
- case ARM::VREV64d8:
- case ARM::VREV64df:
- case ARM::VREV64q16:
- case ARM::VREV64q32:
- case ARM::VREV64q8:
- case ARM::VREV64qf:
- case ARM::VSWPd:
- case ARM::VSWPq:
- case ARM::VTRNd16:
- case ARM::VTRNd32:
- case ARM::VTRNd8:
- case ARM::VTRNq16:
- case ARM::VTRNq32:
- case ARM::VTRNq8:
- case ARM::VUZPd16:
- case ARM::VUZPd32:
- case ARM::VUZPd8:
- case ARM::VUZPq16:
- case ARM::VUZPq32:
- case ARM::VUZPq8:
- case ARM::VZIPd16:
- case ARM::VZIPd32:
- case ARM::VZIPd8:
- case ARM::VZIPq16:
- case ARM::VZIPq32:
- case ARM::VZIPq8:
- case ARM::t2CLZ:
- case ARM::t2MOVi16:
- case ARM::t2RBIT:
- case ARM::t2SXTB16r:
- case ARM::t2UXTB16r:
- case ARM::tCMNz:
- case ARM::tCMPhir:
- case ARM::tCMPi8:
- case ARM::tCMPr:
- case ARM::tCMPzhir:
- case ARM::tCMPzi8:
- case ARM::tCMPzr:
- case ARM::tLDRcp:
- case ARM::tREV:
- case ARM::tREV16:
- case ARM::tREVSH:
- case ARM::tSXTB:
- case ARM::tSXTH:
- case ARM::tTST:
- case ARM::tUXTB:
- case ARM::tUXTH: printOperand(MI, 1); break;
- case ARM::CMNzri:
- case ARM::CMPri:
- case ARM::CMPzri:
- case ARM::TEQri:
- case ARM::TSTri: printSOImmOperand(MI, 1); break;
- case ARM::CMNzrs:
- case ARM::CMPrs:
- case ARM::CMPzrs:
- case ARM::TEQrs:
- case ARM::TSTrs: printSORegOperand(MI, 1); break;
- case ARM::LDC2_OFFSET:
- case ARM::LDC_OFFSET:
- case ARM::STC2_OFFSET:
- case ARM::STC_OFFSET: printAddrMode2Operand(MI, 2); break;
- case ARM::LDR:
- case ARM::LDRB:
- case ARM::LDRcp:
- case ARM::STR:
- case ARM::STRB: printAddrMode2Operand(MI, 1); break;
- case ARM::LDRD:
- case ARM::STRD: printAddrMode3Operand(MI, 2); break;
- case ARM::LDRH:
- case ARM::LDRSB:
- case ARM::LDRSH:
- case ARM::STRH: printAddrMode3Operand(MI, 1); break;
- case ARM::MOVi2pieces: printSOImm2PartOperand(MI, 1); break;
- case ARM::VABALsv2i64:
- case ARM::VABALsv4i32:
- case ARM::VABALsv8i16:
- case ARM::VABALuv2i64:
- case ARM::VABALuv4i32:
- case ARM::VABALuv8i16:
- case ARM::VABAsv16i8:
- case ARM::VABAsv2i32:
- case ARM::VABAsv4i16:
- case ARM::VABAsv4i32:
- case ARM::VABAsv8i16:
- case ARM::VABAsv8i8:
- case ARM::VABAuv16i8:
- case ARM::VABAuv2i32:
- case ARM::VABAuv4i16:
- case ARM::VABAuv4i32:
- case ARM::VABAuv8i16:
- case ARM::VABAuv8i8:
- case ARM::VMLALsv2i64:
- case ARM::VMLALsv4i32:
- case ARM::VMLALsv8i16:
- case ARM::VMLALuv2i64:
- case ARM::VMLALuv4i32:
- case ARM::VMLALuv8i16:
- case ARM::VMLAv16i8:
- case ARM::VMLAv2i32:
- case ARM::VMLAv4i16:
- case ARM::VMLAv4i32:
- case ARM::VMLAv8i16:
- case ARM::VMLAv8i8:
- case ARM::VMLSLsv2i64:
- case ARM::VMLSLsv4i32:
- case ARM::VMLSLsv8i16:
- case ARM::VMLSLuv2i64:
- case ARM::VMLSLuv4i32:
- case ARM::VMLSLuv8i16:
- case ARM::VMLSv16i8:
- case ARM::VMLSv2i32:
- case ARM::VMLSv4i16:
- case ARM::VMLSv4i32:
- case ARM::VMLSv8i16:
- case ARM::VMLSv8i8:
- case ARM::VQDMLALv2i64:
- case ARM::VQDMLALv4i32:
- case ARM::VQDMLSLv2i64:
- case ARM::VQDMLSLv4i32:
- case ARM::VRSRAsv16i8:
- case ARM::VRSRAsv1i64:
- case ARM::VRSRAsv2i32:
- case ARM::VRSRAsv2i64:
- case ARM::VRSRAsv4i16:
- case ARM::VRSRAsv4i32:
- case ARM::VRSRAsv8i16:
- case ARM::VRSRAsv8i8:
- case ARM::VRSRAuv16i8:
- case ARM::VRSRAuv1i64:
- case ARM::VRSRAuv2i32:
- case ARM::VRSRAuv2i64:
- case ARM::VRSRAuv4i16:
- case ARM::VRSRAuv4i32:
- case ARM::VRSRAuv8i16:
- case ARM::VRSRAuv8i8:
- case ARM::VSRAsv16i8:
- case ARM::VSRAsv1i64:
- case ARM::VSRAsv2i32:
- case ARM::VSRAsv2i64:
- case ARM::VSRAsv4i16:
- case ARM::VSRAsv4i32:
- case ARM::VSRAsv8i16:
- case ARM::VSRAsv8i8:
- case ARM::VSRAuv16i8:
- case ARM::VSRAuv1i64:
- case ARM::VSRAuv2i32:
- case ARM::VSRAuv2i64:
- case ARM::VSRAuv4i16:
- case ARM::VSRAuv4i32:
- case ARM::VSRAuv8i16:
- case ARM::VSRAuv8i8:
- case ARM::tADDi3:
- case ARM::tADDrr:
- case ARM::tASRri:
- case ARM::tLSLri:
- case ARM::tLSRri:
- case ARM::tSUBi3:
- case ARM::tSUBrr: printOperand(MI, 3); break;
- case ARM::VDUPfdf:
- case ARM::VDUPfqf: printOperand(MI, 1, "lane"); break;
- case ARM::VLDRD:
- case ARM::VLDRS:
- case ARM::VSTRD:
- case ARM::VSTRS: printAddrMode5Operand(MI, 1); break;
- case ARM::t2LDRBT:
- case ARM::t2LDRBi8:
- case ARM::t2LDRHT:
- case ARM::t2LDRHi8:
- case ARM::t2LDRSBT:
- case ARM::t2LDRSBi8:
- case ARM::t2LDRSHT:
- case ARM::t2LDRSHi8:
- case ARM::t2LDRT:
- case ARM::t2LDRi8:
- case ARM::t2STRBT:
- case ARM::t2STRBi8:
- case ARM::t2STRHT:
- case ARM::t2STRHi8:
- case ARM::t2STRT:
- case ARM::t2STRi8: printT2AddrModeImm8Operand(MI, 1); break;
- case ARM::t2LDRDi8:
- case ARM::t2STRDi8: printT2AddrModeImm8s4Operand(MI, 2); break;
- case ARM::t2SUBrSPs_: printT2SOOperand(MI, 2); break;
- case ARM::tADDrSPi: printThumbS4ImmOperand(MI, 2); break;
- case ARM::tLDR:
- case ARM::tLDRi:
- case ARM::tSTR:
- case ARM::tSTRi: printThumbAddrModeS4Operand(MI, 1); break;
- case ARM::tLDRB:
- case ARM::tLDRBi:
- case ARM::tSTRB:
- case ARM::tSTRBi: printThumbAddrModeS1Operand(MI, 1); break;
- case ARM::tLDRH:
- case ARM::tLDRHi:
- case ARM::tSTRH:
- case ARM::tSTRHi: printThumbAddrModeS2Operand(MI, 1); break;
- case ARM::tLDRSB:
- case ARM::tLDRSH: printThumbAddrModeRROperand(MI, 1); break;
- case ARM::tLDRspi:
- case ARM::tRestore:
- case ARM::tSTRspi:
- case ARM::tSpill: printThumbAddrModeSPOperand(MI, 1); break;
- case ARM::tMUL: printOperand(MI, 0); break;
- }
- return;
- break;
- case ARM::ADCrr:
- case ARM::ADDrr:
- case ARM::ANDrr:
- case ARM::BICrr:
- case ARM::BKPT:
- case ARM::BXJ:
- case ARM::Bcc:
- case ARM::DBG:
- case ARM::EORrr:
- case ARM::MOVr:
- case ARM::MSR:
- case ARM::MSRsys:
- case ARM::MUL:
- case ARM::MVNr:
- case ARM::ORRrr:
- case ARM::RFE:
- case ARM::SBCrr:
- case ARM::SMC:
- case ARM::SRS:
- case ARM::SRSW:
- case ARM::SUBrr:
- case ARM::SVC:
- case ARM::VABSv16i8:
- case ARM::VABSv2i32:
- case ARM::VABSv4i16:
- case ARM::VABSv4i32:
- case ARM::VABSv8i16:
- case ARM::VABSv8i8:
- case ARM::VCLSv16i8:
- case ARM::VCLSv2i32:
- case ARM::VCLSv4i16:
- case ARM::VCLSv4i32:
- case ARM::VCLSv8i16:
- case ARM::VCLSv8i8:
- case ARM::VCLZv16i8:
- case ARM::VCLZv2i32:
- case ARM::VCLZv4i16:
- case ARM::VCLZv4i32:
- case ARM::VCLZv8i16:
- case ARM::VCLZv8i8:
- case ARM::VMOVLsv2i64:
- case ARM::VMOVLsv4i32:
- case ARM::VMOVLsv8i16:
- case ARM::VMOVLuv2i64:
- case ARM::VMOVLuv4i32:
- case ARM::VMOVLuv8i16:
- case ARM::VMOVNv2i32:
- case ARM::VMOVNv4i16:
- case ARM::VMOVNv8i8:
- case ARM::VNEGs16d:
- case ARM::VNEGs16q:
- case ARM::VNEGs32d:
- case ARM::VNEGs32q:
- case ARM::VNEGs8d:
- case ARM::VNEGs8q:
- case ARM::VPADALsv16i8:
- case ARM::VPADALsv2i32:
- case ARM::VPADALsv4i16:
- case ARM::VPADALsv4i32:
- case ARM::VPADALsv8i16:
- case ARM::VPADALsv8i8:
- case ARM::VPADALuv16i8:
- case ARM::VPADALuv2i32:
- case ARM::VPADALuv4i16:
- case ARM::VPADALuv4i32:
- case ARM::VPADALuv8i16:
- case ARM::VPADALuv8i8:
- case ARM::VPADDLsv16i8:
- case ARM::VPADDLsv2i32:
- case ARM::VPADDLsv4i16:
- case ARM::VPADDLsv4i32:
- case ARM::VPADDLsv8i16:
- case ARM::VPADDLsv8i8:
- case ARM::VPADDLuv16i8:
- case ARM::VPADDLuv2i32:
- case ARM::VPADDLuv4i16:
- case ARM::VPADDLuv4i32:
- case ARM::VPADDLuv8i16:
- case ARM::VPADDLuv8i8:
- case ARM::VQABSv16i8:
- case ARM::VQABSv2i32:
- case ARM::VQABSv4i16:
- case ARM::VQABSv4i32:
- case ARM::VQABSv8i16:
- case ARM::VQABSv8i8:
- case ARM::VQMOVNsuv2i32:
- case ARM::VQMOVNsuv4i16:
- case ARM::VQMOVNsuv8i8:
- case ARM::VQMOVNsv2i32:
- case ARM::VQMOVNsv4i16:
- case ARM::VQMOVNsv8i8:
- case ARM::VQMOVNuv2i32:
- case ARM::VQMOVNuv4i16:
- case ARM::VQMOVNuv8i8:
- case ARM::VQNEGv16i8:
- case ARM::VQNEGv2i32:
- case ARM::VQNEGv4i16:
- case ARM::VQNEGv4i32:
- case ARM::VQNEGv8i16:
- case ARM::VQNEGv8i8:
- case ARM::VRECPEd:
- case ARM::VRECPEq:
- case ARM::VRSQRTEd:
- case ARM::VRSQRTEq:
- case ARM::t2BXJ:
- case ARM::t2DBG:
- case ARM::t2LEApcrel:
- case ARM::t2MSR:
- case ARM::t2MSRsys:
- case ARM::t2RFEDB:
- case ARM::t2RFEIA:
- case ARM::t2SMC:
- case ARM::t2SRSDB:
- case ARM::t2SRSDBW:
- case ARM::t2SRSIA:
- case ARM::t2SRSIAW:
- case ARM::tADC:
- case ARM::tADDi8:
- case ARM::tADDrSP:
- case ARM::tADDspr:
- case ARM::tADDspr_:
- case ARM::tAND:
- case ARM::tANDsp:
- case ARM::tASRrr:
- case ARM::tBIC:
- case ARM::tBcc:
- case ARM::tCBNZ:
- case ARM::tCBZ:
- case ARM::tEOR:
- case ARM::tLSLrr:
- case ARM::tLSRrr:
- case ARM::tMOVSr:
- case ARM::tMOVgpr2gpr:
- case ARM::tMOVgpr2tgpr:
- case ARM::tMOVr:
- case ARM::tMOVtgpr2gpr:
- case ARM::tORR:
- case ARM::tROR:
- case ARM::tSBC:
- case ARM::tSUBi8:
- case ARM::tSVC:
- return;
- break;
- case ARM::ADDSri:
- case ARM::ADDSrr:
- case ARM::ADDSrs:
- case ARM::BFI:
- case ARM::QADD:
- case ARM::QADD16:
- case ARM::QADD8:
- case ARM::QASX:
- case ARM::QDADD:
- case ARM::QDSUB:
- case ARM::QSAX:
- case ARM::QSUB:
- case ARM::QSUB16:
- case ARM::QSUB8:
- case ARM::RSBSri:
- case ARM::RSBSrs:
- case ARM::SADD16:
- case ARM::SADD8:
- case ARM::SASX:
- case ARM::SEL:
- case ARM::SHADD16:
- case ARM::SHADD8:
- case ARM::SHASX:
- case ARM::SHSAX:
- case ARM::SHSUB16:
- case ARM::SHSUB8:
- case ARM::SMMUL:
- case ARM::SMMULR:
- case ARM::SMUAD:
- case ARM::SMUADX:
- case ARM::SMULBB:
- case ARM::SMULBT:
- case ARM::SMULTB:
- case ARM::SMULTT:
- case ARM::SMULWB:
- case ARM::SMULWT:
- case ARM::SMUSD:
- case ARM::SMUSDX:
- case ARM::SSAT16:
- case ARM::SSAX:
- case ARM::SSUB16:
- case ARM::SSUB8:
- case ARM::SUBSri:
- case ARM::SUBSrr:
- case ARM::SUBSrs:
- case ARM::SXTAB16rr:
- case ARM::SXTABrr:
- case ARM::SXTAHrr:
- case ARM::UADD16:
- case ARM::UADD8:
- case ARM::UASX:
- case ARM::UHADD16:
- case ARM::UHADD8:
- case ARM::UHASX:
- case ARM::UHSAX:
- case ARM::UHSUB16:
- case ARM::UHSUB8:
- case ARM::UQADD16:
- case ARM::UQADD8:
- case ARM::UQASX:
- case ARM::UQSAX:
- case ARM::UQSUB16:
- case ARM::UQSUB8:
- case ARM::USAD8:
- case ARM::USAT16:
- case ARM::USAX:
- case ARM::USUB16:
- case ARM::USUB8:
- case ARM::UXTAB16rr:
- case ARM::UXTABrr:
- case ARM::UXTAHrr:
- case ARM::VANDd:
- case ARM::VANDq:
- case ARM::VBICd:
- case ARM::VBICq:
- case ARM::VEORd:
- case ARM::VEORq:
- case ARM::VMOVDRR:
- case ARM::VMOVRRD:
- case ARM::VORNd:
- case ARM::VORNq:
- case ARM::VORRd:
- case ARM::VORRq:
- case ARM::VTSTv16i8:
- case ARM::VTSTv2i32:
- case ARM::VTSTv4i16:
- case ARM::VTSTv4i32:
- case ARM::VTSTv8i16:
- case ARM::VTSTv8i8:
- case ARM::t2ADCSri:
- case ARM::t2ADCri:
- case ARM::t2ADDrSPi12:
- case ARM::t2ADDri12:
- case ARM::t2ANDri:
- case ARM::t2BICri:
- case ARM::t2EORri:
- case ARM::t2MUL:
- case ARM::t2ORNri:
- case ARM::t2ORNrr:
- case ARM::t2ORNrs:
- case ARM::t2ORRri:
- case ARM::t2QADD:
- case ARM::t2QADD16:
- case ARM::t2QADD8:
- case ARM::t2QASX:
- case ARM::t2QDADD:
- case ARM::t2QDSUB:
- case ARM::t2QSAX:
- case ARM::t2QSUB:
- case ARM::t2QSUB16:
- case ARM::t2QSUB8:
- case ARM::t2RSBSrs:
- case ARM::t2RSBrs:
- case ARM::t2SADD16:
- case ARM::t2SADD8:
- case ARM::t2SASX:
- case ARM::t2SBCSri:
- case ARM::t2SBCri:
- case ARM::t2SDIV:
- case ARM::t2SEL:
- case ARM::t2SHADD16:
- case ARM::t2SHADD8:
- case ARM::t2SHASX:
- case ARM::t2SHSAX:
- case ARM::t2SHSUB16:
- case ARM::t2SHSUB8:
- case ARM::t2SMMUL:
- case ARM::t2SMMULR:
- case ARM::t2SMUAD:
- case ARM::t2SMUADX:
- case ARM::t2SMULBB:
- case ARM::t2SMULBT:
- case ARM::t2SMULTB:
- case ARM::t2SMULTT:
- case ARM::t2SMULWB:
- case ARM::t2SMULWT:
- case ARM::t2SMUSD:
- case ARM::t2SMUSDX:
- case ARM::t2SSAT16:
- case ARM::t2SSAX:
- case ARM::t2SSUB16:
- case ARM::t2SSUB8:
- case ARM::t2SUBrSPi12:
- case ARM::t2SUBrSPs:
- case ARM::t2SUBri12:
- case ARM::t2SXTAB16rr:
- case ARM::t2SXTABrr:
- case ARM::t2SXTAHrr:
- case ARM::t2UADD16:
- case ARM::t2UADD8:
- case ARM::t2UASX:
- case ARM::t2UDIV:
- case ARM::t2UHADD16:
- case ARM::t2UHADD8:
- case ARM::t2UHASX:
- case ARM::t2UHSAX:
- case ARM::t2UHSUB16:
- case ARM::t2UHSUB8:
- case ARM::t2UQADD16:
- case ARM::t2UQADD8:
- case ARM::t2UQASX:
- case ARM::t2UQSAX:
- case ARM::t2UQSUB16:
- case ARM::t2UQSUB8:
- case ARM::t2USAD8:
- case ARM::t2USAT16:
- case ARM::t2USAX:
- case ARM::t2USUB16:
- case ARM::t2USUB8:
- case ARM::t2UXTAB16rr:
- case ARM::t2UXTABrr:
- case ARM::t2UXTAHrr:
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- switch (MI->getOpcode()) {
- case ARM::ADDSri:
- case ARM::RSBSri:
- case ARM::SUBSri: printSOImmOperand(MI, 2); break;
- case ARM::ADDSrr:
- case ARM::QADD:
- case ARM::QADD16:
- case ARM::QADD8:
- case ARM::QASX:
- case ARM::QDADD:
- case ARM::QDSUB:
- case ARM::QSAX:
- case ARM::QSUB:
- case ARM::QSUB16:
- case ARM::QSUB8:
- case ARM::SADD16:
- case ARM::SADD8:
- case ARM::SASX:
- case ARM::SEL:
- case ARM::SHADD16:
- case ARM::SHADD8:
- case ARM::SHASX:
- case ARM::SHSAX:
- case ARM::SHSUB16:
- case ARM::SHSUB8:
- case ARM::SMMUL:
- case ARM::SMMULR:
- case ARM::SMUAD:
- case ARM::SMUADX:
- case ARM::SMULBB:
- case ARM::SMULBT:
- case ARM::SMULTB:
- case ARM::SMULTT:
- case ARM::SMULWB:
- case ARM::SMULWT:
- case ARM::SMUSD:
- case ARM::SMUSDX:
- case ARM::SSAT16:
- case ARM::SSAX:
- case ARM::SSUB16:
- case ARM::SSUB8:
- case ARM::SUBSrr:
- case ARM::SXTAB16rr:
- case ARM::SXTABrr:
- case ARM::SXTAHrr:
- case ARM::UADD16:
- case ARM::UADD8:
- case ARM::UASX:
- case ARM::UHADD16:
- case ARM::UHADD8:
- case ARM::UHASX:
- case ARM::UHSAX:
- case ARM::UHSUB16:
- case ARM::UHSUB8:
- case ARM::UQADD16:
- case ARM::UQADD8:
- case ARM::UQASX:
- case ARM::UQSAX:
- case ARM::UQSUB16:
- case ARM::UQSUB8:
- case ARM::USAD8:
- case ARM::USAT16:
- case ARM::USAX:
- case ARM::USUB16:
- case ARM::USUB8:
- case ARM::UXTAB16rr:
- case ARM::UXTABrr:
- case ARM::UXTAHrr:
- case ARM::VANDd:
- case ARM::VANDq:
- case ARM::VBICd:
- case ARM::VBICq:
- case ARM::VEORd:
- case ARM::VEORq:
- case ARM::VMOVDRR:
- case ARM::VMOVRRD:
- case ARM::VORNd:
- case ARM::VORNq:
- case ARM::VORRd:
- case ARM::VORRq:
- case ARM::VTSTv16i8:
- case ARM::VTSTv2i32:
- case ARM::VTSTv4i16:
- case ARM::VTSTv4i32:
- case ARM::VTSTv8i16:
- case ARM::VTSTv8i8:
- case ARM::t2ADCSri:
- case ARM::t2ADCri:
- case ARM::t2ADDrSPi12:
- case ARM::t2ADDri12:
- case ARM::t2ANDri:
- case ARM::t2BICri:
- case ARM::t2EORri:
- case ARM::t2MUL:
- case ARM::t2ORNri:
- case ARM::t2ORNrr:
- case ARM::t2ORRri:
- case ARM::t2QADD:
- case ARM::t2QADD16:
- case ARM::t2QADD8:
- case ARM::t2QASX:
- case ARM::t2QDADD:
- case ARM::t2QDSUB:
- case ARM::t2QSAX:
- case ARM::t2QSUB:
- case ARM::t2QSUB16:
- case ARM::t2QSUB8:
- case ARM::t2SADD16:
- case ARM::t2SADD8:
- case ARM::t2SASX:
- case ARM::t2SBCSri:
- case ARM::t2SBCri:
- case ARM::t2SDIV:
- case ARM::t2SEL:
- case ARM::t2SHADD16:
- case ARM::t2SHADD8:
- case ARM::t2SHASX:
- case ARM::t2SHSAX:
- case ARM::t2SHSUB16:
- case ARM::t2SHSUB8:
- case ARM::t2SMMUL:
- case ARM::t2SMMULR:
- case ARM::t2SMUAD:
- case ARM::t2SMUADX:
- case ARM::t2SMULBB:
- case ARM::t2SMULBT:
- case ARM::t2SMULTB:
- case ARM::t2SMULTT:
- case ARM::t2SMULWB:
- case ARM::t2SMULWT:
- case ARM::t2SMUSD:
- case ARM::t2SMUSDX:
- case ARM::t2SSAT16:
- case ARM::t2SSAX:
- case ARM::t2SSUB16:
- case ARM::t2SSUB8:
- case ARM::t2SUBrSPi12:
- case ARM::t2SUBri12:
- case ARM::t2SXTAB16rr:
- case ARM::t2SXTABrr:
- case ARM::t2SXTAHrr:
- case ARM::t2UADD16:
- case ARM::t2UADD8:
- case ARM::t2UASX:
- case ARM::t2UDIV:
- case ARM::t2UHADD16:
- case ARM::t2UHADD8:
- case ARM::t2UHASX:
- case ARM::t2UHSAX:
- case ARM::t2UHSUB16:
- case ARM::t2UHSUB8:
- case ARM::t2UQADD16:
- case ARM::t2UQADD8:
- case ARM::t2UQASX:
- case ARM::t2UQSAX:
- case ARM::t2UQSUB16:
- case ARM::t2UQSUB8:
- case ARM::t2USAD8:
- case ARM::t2USAT16:
- case ARM::t2USAX:
- case ARM::t2USUB16:
- case ARM::t2USUB8:
- case ARM::t2UXTAB16rr:
- case ARM::t2UXTABrr:
- case ARM::t2UXTAHrr: printOperand(MI, 2); break;
- case ARM::ADDSrs:
- case ARM::RSBSrs:
- case ARM::SUBSrs: printSORegOperand(MI, 2); break;
- case ARM::BFI: printBitfieldInvMaskImmOperand(MI, 2); break;
- case ARM::t2ORNrs:
- case ARM::t2RSBSrs:
- case ARM::t2RSBrs:
- case ARM::t2SUBrSPs: printT2SOOperand(MI, 2); break;
- }
- return;
- break;
- case ARM::BR_JTadd:
- O << " \n";
- printJTBlockOperand(MI, 2);
- return;
- break;
- case ARM::CDP:
- printOperand(MI, 1);
- O << ", cr";
- printNoHashImmediate(MI, 2);
- O << ", cr";
- printNoHashImmediate(MI, 3);
- O << ", cr";
- printNoHashImmediate(MI, 4);
- O << ", ";
- printOperand(MI, 5);
- return;
- break;
- case ARM::FCONSTD:
- case ARM::FCONSTS:
- case ARM::LDC2L_OFFSET:
- case ARM::LDC2L_OPTION:
- case ARM::LDC2L_POST:
- case ARM::LDCL_OFFSET:
- case ARM::LDCL_OPTION:
- case ARM::LDCL_POST:
- case ARM::MOVrx:
- case ARM::MRS:
- case ARM::MRSsys:
- case ARM::PLDWi:
- case ARM::PLDi:
- case ARM::PLIi:
- case ARM::RFEW:
- case ARM::STC2L_OFFSET:
- case ARM::STC2L_OPTION:
- case ARM::STC2L_POST:
- case ARM::STCL_OFFSET:
- case ARM::STCL_OPTION:
- case ARM::STCL_POST:
- case ARM::VABSD:
- case ARM::VABSS:
- case ARM::VABSfd:
- case ARM::VABSfd_sfp:
- case ARM::VABSfq:
- case ARM::VCEQzv16i8:
- case ARM::VCEQzv2i32:
- case ARM::VCEQzv4i16:
- case ARM::VCEQzv4i32:
- case ARM::VCEQzv8i16:
- case ARM::VCEQzv8i8:
- case ARM::VCGEzv16i8:
- case ARM::VCGEzv2i32:
- case ARM::VCGEzv4i16:
- case ARM::VCGEzv4i32:
- case ARM::VCGEzv8i16:
- case ARM::VCGEzv8i8:
- case ARM::VCGTzv16i8:
- case ARM::VCGTzv2i32:
- case ARM::VCGTzv4i16:
- case ARM::VCGTzv4i32:
- case ARM::VCGTzv8i16:
- case ARM::VCGTzv8i8:
- case ARM::VCLEzv16i8:
- case ARM::VCLEzv2i32:
- case ARM::VCLEzv4i16:
- case ARM::VCLEzv4i32:
- case ARM::VCLEzv8i16:
- case ARM::VCLEzv8i8:
- case ARM::VCLTzv16i8:
- case ARM::VCLTzv2i32:
- case ARM::VCLTzv4i16:
- case ARM::VCLTzv4i32:
- case ARM::VCLTzv8i16:
- case ARM::VCLTzv8i8:
- case ARM::VCMPD:
- case ARM::VCMPED:
- case ARM::VCMPES:
- case ARM::VCMPS:
- case ARM::VCVTf2xsd:
- case ARM::VCVTf2xsq:
- case ARM::VCVTf2xud:
- case ARM::VCVTf2xuq:
- case ARM::VCVTxs2fd:
- case ARM::VCVTxs2fq:
- case ARM::VCVTxu2fd:
- case ARM::VCVTxu2fq:
- case ARM::VMOVD:
- case ARM::VMOVDcc:
- case ARM::VMOVS:
- case ARM::VMOVScc:
- case ARM::VMRS:
- case ARM::VNEGD:
- case ARM::VNEGDcc:
- case ARM::VNEGS:
- case ARM::VNEGScc:
- case ARM::VNEGf32q:
- case ARM::VNEGfd:
- case ARM::VNEGfd_sfp:
- case ARM::VRECPEfd:
- case ARM::VRECPEfq:
- case ARM::VRSQRTEfd:
- case ARM::VRSQRTEfq:
- case ARM::VSLTOD:
- case ARM::VSLTOS:
- case ARM::VSQRTD:
- case ARM::VSQRTS:
- case ARM::VTOSLD:
- case ARM::VTOSLS:
- case ARM::VTOULD:
- case ARM::VTOULS:
- case ARM::VULTOD:
- case ARM::VULTOS:
- case ARM::t2CMNzri:
- case ARM::t2CMNzrr:
- case ARM::t2CMNzrs:
- case ARM::t2CMPri:
- case ARM::t2CMPrr:
- case ARM::t2CMPrs:
- case ARM::t2CMPzri:
- case ARM::t2CMPzrr:
- case ARM::t2CMPzrs:
- case ARM::t2LDRBi12:
- case ARM::t2LDRBpci:
- case ARM::t2LDRBs:
- case ARM::t2LDRHi12:
- case ARM::t2LDRHpci:
- case ARM::t2LDRHs:
- case ARM::t2LDRSBi12:
- case ARM::t2LDRSBpci:
- case ARM::t2LDRSBs:
- case ARM::t2LDRSHi12:
- case ARM::t2LDRSHpci:
- case ARM::t2LDRSHs:
- case ARM::t2LDRi12:
- case ARM::t2LDRpci:
- case ARM::t2LDRs:
- case ARM::t2MOVCCi:
- case ARM::t2MOVCCr:
- case ARM::t2MOVsra_flag:
- case ARM::t2MOVsrl_flag:
- case ARM::t2MRS:
- case ARM::t2MRSsys:
- case ARM::t2MVNr:
- case ARM::t2MVNs:
- case ARM::t2REV:
- case ARM::t2REV16:
- case ARM::t2REVSH:
- case ARM::t2RFEDBW:
- case ARM::t2RFEIAW:
- case ARM::t2STRBi12:
- case ARM::t2STRBs:
- case ARM::t2STRHi12:
- case ARM::t2STRHs:
- case ARM::t2STRi12:
- case ARM::t2STRs:
- case ARM::t2SXTBr:
- case ARM::t2SXTHr:
- case ARM::t2TEQri:
- case ARM::t2TEQrr:
- case ARM::t2TEQrs:
- case ARM::t2TSTri:
- case ARM::t2TSTrr:
- case ARM::t2TSTrs:
- case ARM::t2UXTBr:
- case ARM::t2UXTHr:
- switch (MI->getOpcode()) {
- case ARM::FCONSTD: printVFPf64ImmOperand(MI, 1); break;
- case ARM::FCONSTS: printVFPf32ImmOperand(MI, 1); break;
- case ARM::LDC2L_OFFSET:
- case ARM::LDCL_OFFSET:
- case ARM::STC2L_OFFSET:
- case ARM::STCL_OFFSET: printAddrMode2Operand(MI, 2); break;
- case ARM::LDC2L_OPTION:
- case ARM::LDCL_OPTION:
- case ARM::STC2L_OPTION:
- case ARM::STCL_OPTION: printNoHashImmediate(MI, 3); break;
- case ARM::LDC2L_POST:
- case ARM::LDCL_POST:
- case ARM::STC2L_POST:
- case ARM::STCL_POST: printAddrMode2OffsetOperand(MI, 3); break;
- case ARM::MOVrx: O << ", rrx"; break;
- case ARM::MRS:
- case ARM::t2MRS: O << ", cpsr"; break;
- case ARM::MRSsys:
- case ARM::t2MRSsys: O << ", spsr"; break;
- case ARM::PLDWi:
- case ARM::PLDi:
- case ARM::PLIi: O << ']'; break;
- case ARM::RFEW:
- case ARM::t2RFEDBW:
- case ARM::t2RFEIAW: O << '!'; break;
- case ARM::VABSD:
- case ARM::VABSS:
- case ARM::VABSfd:
- case ARM::VABSfd_sfp:
- case ARM::VABSfq:
- case ARM::VCMPD:
- case ARM::VCMPED:
- case ARM::VCMPES:
- case ARM::VCMPS:
- case ARM::VMOVD:
- case ARM::VMOVS:
- case ARM::VNEGD:
- case ARM::VNEGS:
- case ARM::VNEGf32q:
- case ARM::VNEGfd:
- case ARM::VNEGfd_sfp:
- case ARM::VRECPEfd:
- case ARM::VRECPEfq:
- case ARM::VRSQRTEfd:
- case ARM::VRSQRTEfq:
- case ARM::VSQRTD:
- case ARM::VSQRTS:
- case ARM::t2CMNzri:
- case ARM::t2CMNzrr:
- case ARM::t2CMPri:
- case ARM::t2CMPrr:
- case ARM::t2CMPzri:
- case ARM::t2CMPzrr:
- case ARM::t2LDRBpci:
- case ARM::t2LDRHpci:
- case ARM::t2LDRSBpci:
- case ARM::t2LDRSHpci:
- case ARM::t2LDRpci:
- case ARM::t2MVNr:
- case ARM::t2REV:
- case ARM::t2REV16:
- case ARM::t2REVSH:
- case ARM::t2SXTBr:
- case ARM::t2SXTHr:
- case ARM::t2TEQri:
- case ARM::t2TEQrr:
- case ARM::t2TSTri:
- case ARM::t2TSTrr:
- case ARM::t2UXTBr:
- case ARM::t2UXTHr: printOperand(MI, 1); break;
- case ARM::VCEQzv16i8:
- case ARM::VCEQzv2i32:
- case ARM::VCEQzv4i16:
- case ARM::VCEQzv4i32:
- case ARM::VCEQzv8i16:
- case ARM::VCEQzv8i8:
- case ARM::VCGEzv16i8:
- case ARM::VCGEzv2i32:
- case ARM::VCGEzv4i16:
- case ARM::VCGEzv4i32:
- case ARM::VCGEzv8i16:
- case ARM::VCGEzv8i8:
- case ARM::VCGTzv16i8:
- case ARM::VCGTzv2i32:
- case ARM::VCGTzv4i16:
- case ARM::VCGTzv4i32:
- case ARM::VCGTzv8i16:
- case ARM::VCGTzv8i8:
- case ARM::VCLEzv16i8:
- case ARM::VCLEzv2i32:
- case ARM::VCLEzv4i16:
- case ARM::VCLEzv4i32:
- case ARM::VCLEzv8i16:
- case ARM::VCLEzv8i8:
- case ARM::VCLTzv16i8:
- case ARM::VCLTzv2i32:
- case ARM::VCLTzv4i16:
- case ARM::VCLTzv4i32:
- case ARM::VCLTzv8i16:
- case ARM::VCLTzv8i8: O << ", #0"; break;
- case ARM::VCVTf2xsd:
- case ARM::VCVTf2xsq:
- case ARM::VCVTf2xud:
- case ARM::VCVTf2xuq:
- case ARM::VCVTxs2fd:
- case ARM::VCVTxs2fq:
- case ARM::VCVTxu2fd:
- case ARM::VCVTxu2fq:
- case ARM::VMOVDcc:
- case ARM::VMOVScc:
- case ARM::VNEGDcc:
- case ARM::VNEGScc:
- case ARM::VSLTOD:
- case ARM::VSLTOS:
- case ARM::VTOSLD:
- case ARM::VTOSLS:
- case ARM::VTOULD:
- case ARM::VTOULS:
- case ARM::VULTOD:
- case ARM::VULTOS:
- case ARM::t2MOVCCi:
- case ARM::t2MOVCCr: printOperand(MI, 2); break;
- case ARM::VMRS: O << ", fpscr"; break;
- case ARM::t2CMNzrs:
- case ARM::t2CMPrs:
- case ARM::t2CMPzrs:
- case ARM::t2MVNs:
- case ARM::t2TEQrs:
- case ARM::t2TSTrs: printT2SOOperand(MI, 1); break;
- case ARM::t2LDRBi12:
- case ARM::t2LDRHi12:
- case ARM::t2LDRSBi12:
- case ARM::t2LDRSHi12:
- case ARM::t2LDRi12:
- case ARM::t2STRBi12:
- case ARM::t2STRHi12:
- case ARM::t2STRi12: printT2AddrModeImm12Operand(MI, 1); break;
- case ARM::t2LDRBs:
- case ARM::t2LDRHs:
- case ARM::t2LDRSBs:
- case ARM::t2LDRSHs:
- case ARM::t2LDRs:
- case ARM::t2STRBs:
- case ARM::t2STRHs:
- case ARM::t2STRs: printT2AddrModeSoRegOperand(MI, 1); break;
- case ARM::t2MOVsra_flag:
- case ARM::t2MOVsrl_flag: O << ", #1"; break;
- }
- return;
- break;
- case ARM::LDC2L_PRE:
- case ARM::LDCL_PRE:
- case ARM::STC2L_PRE:
- case ARM::STCL_PRE:
- printAddrMode2Operand(MI, 2);
- O << '!';
- return;
- break;
- case ARM::LDC2_OPTION:
- case ARM::LDC2_POST:
- case ARM::LDC_OPTION:
- case ARM::LDC_POST:
- case ARM::LDRBT:
- case ARM::LDRB_POST:
- case ARM::LDRHT:
- case ARM::LDRH_POST:
- case ARM::LDRSBT:
- case ARM::LDRSB_POST:
- case ARM::LDRSHT:
- case ARM::LDRSH_POST:
- case ARM::LDRT:
- case ARM::LDR_POST:
- case ARM::STC2_OPTION:
- case ARM::STC2_POST:
- case ARM::STC_OPTION:
- case ARM::STC_POST:
- case ARM::STRBT:
- case ARM::STRB_POST:
- case ARM::STRHT:
- case ARM::STRH_POST:
- case ARM::STRT:
- case ARM::STR_POST:
- case ARM::t2LDRB_POST:
- case ARM::t2LDRH_POST:
- case ARM::t2LDRSB_POST:
- case ARM::t2LDRSH_POST:
- case ARM::t2LDR_POST:
- case ARM::t2STRB_POST:
- case ARM::t2STRH_POST:
- case ARM::t2STR_POST:
- O << ", [";
- printOperand(MI, 2);
- O << "], ";
- switch (MI->getOpcode()) {
- case ARM::LDC2_OPTION:
- case ARM::LDC_OPTION:
- case ARM::STC2_OPTION:
- case ARM::STC_OPTION: printOperand(MI, 3); break;
- case ARM::LDC2_POST:
- case ARM::LDC_POST:
- case ARM::LDRBT:
- case ARM::LDRB_POST:
- case ARM::LDRSBT:
- case ARM::LDRT:
- case ARM::LDR_POST:
- case ARM::STC2_POST:
- case ARM::STC_POST:
- case ARM::STRBT:
- case ARM::STRB_POST:
- case ARM::STRT:
- case ARM::STR_POST: printAddrMode2OffsetOperand(MI, 3); break;
- case ARM::LDRHT:
- case ARM::LDRH_POST:
- case ARM::LDRSB_POST:
- case ARM::LDRSHT:
- case ARM::LDRSH_POST:
- case ARM::STRHT:
- case ARM::STRH_POST: printAddrMode3OffsetOperand(MI, 3); break;
- case ARM::t2LDRB_POST:
- case ARM::t2LDRH_POST:
- case ARM::t2LDRSB_POST:
- case ARM::t2LDRSH_POST:
- case ARM::t2LDR_POST:
- case ARM::t2STRB_POST:
- case ARM::t2STRH_POST:
- case ARM::t2STR_POST: printT2AddrModeImm8OffsetOperand(MI, 3); break;
- }
- return;
- break;
- case ARM::LDC2_PRE:
- case ARM::LDC_PRE:
- case ARM::LDRB_PRE:
- case ARM::LDRH_PRE:
- case ARM::LDRSB_PRE:
- case ARM::LDRSH_PRE:
- case ARM::LDR_PRE:
- case ARM::STC2_PRE:
- case ARM::STC_PRE:
- case ARM::t2LDRB_PRE:
- case ARM::t2LDRH_PRE:
- case ARM::t2LDRSB_PRE:
- case ARM::t2LDRSH_PRE:
- case ARM::t2LDR_PRE:
- O << ", ";
- switch (MI->getOpcode()) {
- case ARM::LDC2_PRE:
- case ARM::LDC_PRE:
- case ARM::LDRB_PRE:
- case ARM::LDR_PRE:
- case ARM::STC2_PRE:
- case ARM::STC_PRE: printAddrMode2Operand(MI, 2); break;
- case ARM::LDRH_PRE:
- case ARM::LDRSB_PRE:
- case ARM::LDRSH_PRE: printAddrMode3Operand(MI, 2); break;
- case ARM::t2LDRB_PRE:
- case ARM::t2LDRH_PRE:
- case ARM::t2LDRSB_PRE:
- case ARM::t2LDRSH_PRE:
- case ARM::t2LDR_PRE: printT2AddrModeImm8Operand(MI, 2); break;
- }
- O << '!';
- return;
- break;
- case ARM::LDM:
- case ARM::LDM_RET:
- case ARM::STM:
- case ARM::tLDM:
- case ARM::tSTM:
- printAddrMode4Operand(MI, 0);
- O << ", ";
- printRegisterList(MI, 4);
- return;
- break;
- case ARM::LDRD_POST:
- case ARM::STRD_POST:
- O << ", ";
- switch (MI->getOpcode()) {
- case ARM::LDRD_POST: printOperand(MI, 1); break;
- case ARM::STRD_POST: printOperand(MI, 2); break;
- }
- O << ", [";
- printOperand(MI, 3);
- O << "], ";
- printAddrMode3OffsetOperand(MI, 4);
- return;
- break;
- case ARM::LDRD_PRE:
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printAddrMode3Operand(MI, 3);
- O << '!';
- return;
- break;
- case ARM::LDREX:
- case ARM::LDREXB:
- case ARM::LDREXH:
- case ARM::t2LDREX:
- case ARM::t2LDREXB:
- case ARM::t2LDREXH:
- O << ", [";
- printOperand(MI, 1);
- O << ']';
- return;
- break;
- case ARM::LDREXD:
- case ARM::STREX:
- case ARM::STREXB:
- case ARM::STREXH:
- case ARM::SWP:
- case ARM::SWPB:
- case ARM::t2LDREXD:
- case ARM::t2STREX:
- case ARM::t2STREXB:
- case ARM::t2STREXH:
- O << ", ";
- printOperand(MI, 1);
- O << ", [";
- printOperand(MI, 2);
- O << ']';
- return;
- break;
- case ARM::MCR:
- case ARM::MRC:
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- O << ", cr";
- printNoHashImmediate(MI, 3);
- O << ", cr";
- printNoHashImmediate(MI, 4);
- O << ", ";
- printOperand(MI, 5);
- return;
- break;
- case ARM::MCR2:
- case ARM::MRC2:
- O << ", cr";
- printNoHashImmediate(MI, 3);
- O << ", cr";
- printNoHashImmediate(MI, 4);
- O << ", ";
- printOperand(MI, 5);
- return;
- break;
- case ARM::MCRR:
- case ARM::MRRC:
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- O << ", cr";
- printNoHashImmediate(MI, 4);
- return;
- break;
- case ARM::MCRR2:
- case ARM::MRRC2:
- O << ", ";
- printOperand(MI, 3);
- O << ", cr";
- printNoHashImmediate(MI, 4);
- return;
- break;
- case ARM::MLA:
- case ARM::SMLAL:
- case ARM::SMULL:
- case ARM::UMLAL:
- case ARM::UMULL:
- case ARM::VBIFd:
- case ARM::VBIFq:
- case ARM::VBITd:
- case ARM::VBITq:
- case ARM::VBSLd:
- case ARM::VBSLq:
- case ARM::VSLIv16i8:
- case ARM::VSLIv1i64:
- case ARM::VSLIv2i32:
- case ARM::VSLIv2i64:
- case ARM::VSLIv4i16:
- case ARM::VSLIv4i32:
- case ARM::VSLIv8i16:
- case ARM::VSLIv8i8:
- case ARM::VSRIv16i8:
- case ARM::VSRIv1i64:
- case ARM::VSRIv2i32:
- case ARM::VSRIv2i64:
- case ARM::VSRIv4i16:
- case ARM::VSRIv4i32:
- case ARM::VSRIv8i16:
- case ARM::VSRIv8i8:
- O << ", ";
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- return;
- break;
- case ARM::MLS:
- case ARM::PKHBT:
- case ARM::PKHTB:
- case ARM::SBFX:
- case ARM::SMLABB:
- case ARM::SMLABT:
- case ARM::SMLAD:
- case ARM::SMLADX:
- case ARM::SMLALBB:
- case ARM::SMLALBT:
- case ARM::SMLALD:
- case ARM::SMLALDX:
- case ARM::SMLALTB:
- case ARM::SMLALTT:
- case ARM::SMLATB:
- case ARM::SMLATT:
- case ARM::SMLAWB:
- case ARM::SMLAWT:
- case ARM::SMLSD:
- case ARM::SMLSDX:
- case ARM::SMLSLD:
- case ARM::SMLSLDX:
- case ARM::SMMLA:
- case ARM::SMMLAR:
- case ARM::SMMLS:
- case ARM::SMMLSR:
- case ARM::SSATasr:
- case ARM::SSATlsl:
- case ARM::SXTAB16rr_rot:
- case ARM::SXTABrr_rot:
- case ARM::SXTAHrr_rot:
- case ARM::UBFX:
- case ARM::UMAAL:
- case ARM::USADA8:
- case ARM::USATasr:
- case ARM::USATlsl:
- case ARM::UXTAB16rr_rot:
- case ARM::UXTABrr_rot:
- case ARM::UXTAHrr_rot:
- case ARM::VEXTd16:
- case ARM::VEXTd32:
- case ARM::VEXTd8:
- case ARM::VEXTdf:
- case ARM::VEXTq16:
- case ARM::VEXTq32:
- case ARM::VEXTq8:
- case ARM::VEXTqf:
- case ARM::VMOVRRS:
- case ARM::VMOVSRR:
- case ARM::t2BFI:
- case ARM::t2MLA:
- case ARM::t2MLS:
- case ARM::t2PKHBT:
- case ARM::t2PKHTB:
- case ARM::t2SBFX:
- case ARM::t2SMLABB:
- case ARM::t2SMLABT:
- case ARM::t2SMLAD:
- case ARM::t2SMLADX:
- case ARM::t2SMLAL:
- case ARM::t2SMLALBB:
- case ARM::t2SMLALBT:
- case ARM::t2SMLALD:
- case ARM::t2SMLALDX:
- case ARM::t2SMLALTB:
- case ARM::t2SMLALTT:
- case ARM::t2SMLATB:
- case ARM::t2SMLATT:
- case ARM::t2SMLAWB:
- case ARM::t2SMLAWT:
- case ARM::t2SMLSD:
- case ARM::t2SMLSDX:
- case ARM::t2SMLSLD:
- case ARM::t2SMLSLDX:
- case ARM::t2SMMLA:
- case ARM::t2SMMLAR:
- case ARM::t2SMMLS:
- case ARM::t2SMMLSR:
- case ARM::t2SMULL:
- case ARM::t2SSATasr:
- case ARM::t2SSATlsl:
- case ARM::t2SXTAB16rr_rot:
- case ARM::t2SXTABrr_rot:
- case ARM::t2SXTAHrr_rot:
- case ARM::t2UBFX:
- case ARM::t2UMAAL:
- case ARM::t2UMLAL:
- case ARM::t2UMULL:
- case ARM::t2USADA8:
- case ARM::t2USATasr:
- case ARM::t2USATlsl:
- case ARM::t2UXTAB16rr_rot:
- case ARM::t2UXTABrr_rot:
- case ARM::t2UXTAHrr_rot:
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- switch (MI->getOpcode()) {
- case ARM::MLS:
- case ARM::SBFX:
- case ARM::SMLABB:
- case ARM::SMLABT:
- case ARM::SMLAD:
- case ARM::SMLADX:
- case ARM::SMLALBB:
- case ARM::SMLALBT:
- case ARM::SMLALD:
- case ARM::SMLALDX:
- case ARM::SMLALTB:
- case ARM::SMLALTT:
- case ARM::SMLATB:
- case ARM::SMLATT:
- case ARM::SMLAWB:
- case ARM::SMLAWT:
- case ARM::SMLSD:
- case ARM::SMLSDX:
- case ARM::SMLSLD:
- case ARM::SMLSLDX:
- case ARM::SMMLA:
- case ARM::SMMLAR:
- case ARM::SMMLS:
- case ARM::SMMLSR:
- case ARM::UBFX:
- case ARM::UMAAL:
- case ARM::USADA8:
- case ARM::VEXTd16:
- case ARM::VEXTd32:
- case ARM::VEXTd8:
- case ARM::VEXTdf:
- case ARM::VEXTq16:
- case ARM::VEXTq32:
- case ARM::VEXTq8:
- case ARM::VEXTqf:
- case ARM::VMOVRRS:
- case ARM::VMOVSRR:
- case ARM::t2BFI:
- case ARM::t2MLA:
- case ARM::t2MLS:
- case ARM::t2SBFX:
- case ARM::t2SMLABB:
- case ARM::t2SMLABT:
- case ARM::t2SMLAD:
- case ARM::t2SMLADX:
- case ARM::t2SMLAL:
- case ARM::t2SMLALBB:
- case ARM::t2SMLALBT:
- case ARM::t2SMLALD:
- case ARM::t2SMLALDX:
- case ARM::t2SMLALTB:
- case ARM::t2SMLALTT:
- case ARM::t2SMLATB:
- case ARM::t2SMLATT:
- case ARM::t2SMLAWB:
- case ARM::t2SMLAWT:
- case ARM::t2SMLSD:
- case ARM::t2SMLSDX:
- case ARM::t2SMLSLD:
- case ARM::t2SMLSLDX:
- case ARM::t2SMMLA:
- case ARM::t2SMMLAR:
- case ARM::t2SMMLS:
- case ARM::t2SMMLSR:
- case ARM::t2SMULL:
- case ARM::t2UBFX:
- case ARM::t2UMAAL:
- case ARM::t2UMLAL:
- case ARM::t2UMULL:
- case ARM::t2USADA8: O << ", "; break;
- case ARM::PKHBT:
- case ARM::SSATlsl:
- case ARM::USATlsl:
- case ARM::t2PKHBT:
- case ARM::t2SSATlsl:
- case ARM::t2USATlsl: O << ", lsl "; break;
- case ARM::PKHTB:
- case ARM::SSATasr:
- case ARM::USATasr:
- case ARM::t2PKHTB:
- case ARM::t2SSATasr:
- case ARM::t2USATasr: O << ", asr "; break;
- case ARM::SXTAB16rr_rot:
- case ARM::SXTABrr_rot:
- case ARM::SXTAHrr_rot:
- case ARM::UXTAB16rr_rot:
- case ARM::UXTABrr_rot:
- case ARM::UXTAHrr_rot:
- case ARM::t2SXTAB16rr_rot:
- case ARM::t2SXTABrr_rot:
- case ARM::t2SXTAHrr_rot:
- case ARM::t2UXTAB16rr_rot:
- case ARM::t2UXTABrr_rot:
- case ARM::t2UXTAHrr_rot: O << ", ror "; break;
- }
- printOperand(MI, 3);
- return;
- break;
- case ARM::MOVi32imm:
- case ARM::t2MOVi32imm:
- O << ", ";
- printOperand(MI, 1, "lo16");
- O << "\n\tmovt";
- printPredicateOperand(MI, 2);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1, "hi16");
- return;
- break;
- case ARM::MOVsra_flag:
- case ARM::MOVsrl_flag:
- O << ", ";
- printOperand(MI, 1);
- switch (MI->getOpcode()) {
- case ARM::MOVsra_flag: O << ", asr #1"; break;
- case ARM::MOVsrl_flag: O << ", lsr #1"; break;
- }
- return;
- break;
- case ARM::STRB_PRE:
- case ARM::STRH_PRE:
- case ARM::STR_PRE:
- case ARM::t2STRB_PRE:
- case ARM::t2STRH_PRE:
- case ARM::t2STR_PRE:
- O << ", [";
- printOperand(MI, 2);
- O << ", ";
- switch (MI->getOpcode()) {
- case ARM::STRB_PRE:
- case ARM::STR_PRE: printAddrMode2OffsetOperand(MI, 3); break;
- case ARM::STRH_PRE: printAddrMode3OffsetOperand(MI, 3); break;
- case ARM::t2STRB_PRE:
- case ARM::t2STRH_PRE:
- case ARM::t2STR_PRE: printT2AddrModeImm8OffsetOperand(MI, 3); break;
- }
- O << "]!";
- return;
- break;
- case ARM::STRD_PRE:
- O << ", ";
- printOperand(MI, 2);
- O << ", [";
- printOperand(MI, 3);
- O << ", ";
- printAddrMode3OffsetOperand(MI, 4);
- O << "]!";
- return;
- break;
- case ARM::STREXD:
- case ARM::t2STREXD:
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- O << ", [";
- printOperand(MI, 3);
- O << ']';
- return;
- break;
- case ARM::SXTB16r_rot:
- case ARM::SXTBr_rot:
- case ARM::SXTHr_rot:
- case ARM::UXTB16r_rot:
- case ARM::UXTBr_rot:
- case ARM::UXTHr_rot:
- case ARM::t2SXTB16r_rot:
- case ARM::t2UXTB16r_rot:
- O << ", ";
- printOperand(MI, 1);
- O << ", ror ";
- printOperand(MI, 2);
- return;
- break;
- case ARM::VABDfd:
- case ARM::VABDfq:
- case ARM::VACGEd:
- case ARM::VACGEq:
- case ARM::VACGTd:
- case ARM::VACGTq:
- case ARM::VADDD:
- case ARM::VADDS:
- case ARM::VADDfd:
- case ARM::VADDfd_sfp:
- case ARM::VADDfq:
- case ARM::VCEQfd:
- case ARM::VCEQfq:
- case ARM::VCGEfd:
- case ARM::VCGEfq:
- case ARM::VCGTfd:
- case ARM::VCGTfq:
- case ARM::VDIVD:
- case ARM::VDIVS:
- case ARM::VMAXfd:
- case ARM::VMAXfd_sfp:
- case ARM::VMAXfq:
- case ARM::VMINfd:
- case ARM::VMINfd_sfp:
- case ARM::VMINfq:
- case ARM::VMULD:
- case ARM::VMULS:
- case ARM::VMULfd:
- case ARM::VMULfd_sfp:
- case ARM::VMULfq:
- case ARM::VNMULD:
- case ARM::VNMULS:
- case ARM::VPADDf:
- case ARM::VPMAXf:
- case ARM::VPMINf:
- case ARM::VRECPSfd:
- case ARM::VRECPSfq:
- case ARM::VRSQRTSfd:
- case ARM::VRSQRTSfq:
- case ARM::VSUBD:
- case ARM::VSUBS:
- case ARM::VSUBfd:
- case ARM::VSUBfd_sfp:
- case ARM::VSUBfq:
- case ARM::t2ADCSrr:
- case ARM::t2ADCSrs:
- case ARM::t2ADCrr:
- case ARM::t2ADCrs:
- case ARM::t2ADDSri:
- case ARM::t2ADDSrr:
- case ARM::t2ADDSrs:
- case ARM::t2ADDrSPi:
- case ARM::t2ADDrSPs:
- case ARM::t2ADDri:
- case ARM::t2ADDrr:
- case ARM::t2ADDrs:
- case ARM::t2ANDrr:
- case ARM::t2ANDrs:
- case ARM::t2ASRri:
- case ARM::t2ASRrr:
- case ARM::t2BICrr:
- case ARM::t2BICrs:
- case ARM::t2EORrr:
- case ARM::t2EORrs:
- case ARM::t2LSLri:
- case ARM::t2LSLrr:
- case ARM::t2LSRri:
- case ARM::t2LSRrr:
- case ARM::t2ORRrr:
- case ARM::t2ORRrs:
- case ARM::t2RORri:
- case ARM::t2RORrr:
- case ARM::t2RSBri:
- case ARM::t2SBCSrr:
- case ARM::t2SBCSrs:
- case ARM::t2SBCrr:
- case ARM::t2SBCrs:
- case ARM::t2SUBSri:
- case ARM::t2SUBSrr:
- case ARM::t2SUBSrs:
- case ARM::t2SUBrSPi:
- case ARM::t2SUBri:
- case ARM::t2SUBrr:
- case ARM::t2SUBrs:
- printOperand(MI, 1);
- O << ", ";
- switch (MI->getOpcode()) {
- case ARM::VABDfd:
- case ARM::VABDfq:
- case ARM::VACGEd:
- case ARM::VACGEq:
- case ARM::VACGTd:
- case ARM::VACGTq:
- case ARM::VADDD:
- case ARM::VADDS:
- case ARM::VADDfd:
- case ARM::VADDfd_sfp:
- case ARM::VADDfq:
- case ARM::VCEQfd:
- case ARM::VCEQfq:
- case ARM::VCGEfd:
- case ARM::VCGEfq:
- case ARM::VCGTfd:
- case ARM::VCGTfq:
- case ARM::VDIVD:
- case ARM::VDIVS:
- case ARM::VMAXfd:
- case ARM::VMAXfd_sfp:
- case ARM::VMAXfq:
- case ARM::VMINfd:
- case ARM::VMINfd_sfp:
- case ARM::VMINfq:
- case ARM::VMULD:
- case ARM::VMULS:
- case ARM::VMULfd:
- case ARM::VMULfd_sfp:
- case ARM::VMULfq:
- case ARM::VNMULD:
- case ARM::VNMULS:
- case ARM::VPADDf:
- case ARM::VPMAXf:
- case ARM::VPMINf:
- case ARM::VRECPSfd:
- case ARM::VRECPSfq:
- case ARM::VRSQRTSfd:
- case ARM::VRSQRTSfq:
- case ARM::VSUBD:
- case ARM::VSUBS:
- case ARM::VSUBfd:
- case ARM::VSUBfd_sfp:
- case ARM::VSUBfq:
- case ARM::t2ADCSrr:
- case ARM::t2ADCrr:
- case ARM::t2ADDSri:
- case ARM::t2ADDSrr:
- case ARM::t2ADDrSPi:
- case ARM::t2ADDri:
- case ARM::t2ADDrr:
- case ARM::t2ANDrr:
- case ARM::t2ASRri:
- case ARM::t2ASRrr:
- case ARM::t2BICrr:
- case ARM::t2EORrr:
- case ARM::t2LSLri:
- case ARM::t2LSLrr:
- case ARM::t2LSRri:
- case ARM::t2LSRrr:
- case ARM::t2ORRrr:
- case ARM::t2RORri:
- case ARM::t2RORrr:
- case ARM::t2RSBri:
- case ARM::t2SBCSrr:
- case ARM::t2SBCrr:
- case ARM::t2SUBSri:
- case ARM::t2SUBSrr:
- case ARM::t2SUBrSPi:
- case ARM::t2SUBri:
- case ARM::t2SUBrr: printOperand(MI, 2); break;
- case ARM::t2ADCSrs:
- case ARM::t2ADCrs:
- case ARM::t2ADDSrs:
- case ARM::t2ADDrSPs:
- case ARM::t2ADDrs:
- case ARM::t2ANDrs:
- case ARM::t2BICrs:
- case ARM::t2EORrs:
- case ARM::t2ORRrs:
- case ARM::t2SBCSrs:
- case ARM::t2SBCrs:
- case ARM::t2SUBSrs:
- case ARM::t2SUBrs: printT2SOOperand(MI, 2); break;
- }
- return;
- break;
- case ARM::VCEQzv2f32:
- case ARM::VCEQzv4f32:
- case ARM::VCGEzv2f32:
- case ARM::VCGEzv4f32:
- case ARM::VCGTzv2f32:
- case ARM::VCGTzv4f32:
- case ARM::VCLEzv2f32:
- case ARM::VCLEzv4f32:
- case ARM::VCLTzv2f32:
- case ARM::VCLTzv4f32:
- printOperand(MI, 1);
- O << ", #0";
- return;
- break;
- case ARM::VDUPLN16d:
- case ARM::VDUPLN16q:
- case ARM::VDUPLN32d:
- case ARM::VDUPLN32q:
- case ARM::VDUPLN8d:
- case ARM::VDUPLN8q:
- case ARM::VDUPLNfd:
- case ARM::VDUPLNfq:
- case ARM::VGETLNi32:
- O << ", ";
- printOperand(MI, 1);
- O << '[';
- printNoHashImmediate(MI, 2);
- O << ']';
- return;
- break;
- case ARM::VGETLNs16:
- case ARM::VGETLNs8:
- case ARM::VGETLNu16:
- case ARM::VGETLNu8:
- O << '[';
- printNoHashImmediate(MI, 2);
- O << ']';
- return;
- break;
- case ARM::VLD1d16:
- case ARM::VLD1d32:
- case ARM::VLD1d64:
- case ARM::VLD1d8:
- case ARM::VLD1df:
- case ARM::VST1d16:
- case ARM::VST1d32:
- case ARM::VST1d64:
- case ARM::VST1d8:
- case ARM::VST1df:
- case ARM::VST3q16a:
- case ARM::VST3q16b:
- case ARM::VST3q32a:
- case ARM::VST3q32b:
- case ARM::VST3q8a:
- case ARM::VST3q8b:
- O << "}, ";
- switch (MI->getOpcode()) {
- case ARM::VLD1d16:
- case ARM::VLD1d32:
- case ARM::VLD1d64:
- case ARM::VLD1d8:
- case ARM::VLD1df:
- case ARM::VST3q16a:
- case ARM::VST3q16b:
- case ARM::VST3q32a:
- case ARM::VST3q32b:
- case ARM::VST3q8a:
- case ARM::VST3q8b: printAddrMode6Operand(MI, 1); break;
- case ARM::VST1d16:
- case ARM::VST1d32:
- case ARM::VST1d64:
- case ARM::VST1d8:
- case ARM::VST1df: printAddrMode6Operand(MI, 0); break;
- }
- return;
- break;
- case ARM::VLD1d16Q:
- case ARM::VLD1d32Q:
- case ARM::VLD1d8Q:
- case ARM::VLD2q16:
- case ARM::VLD2q32:
- case ARM::VLD2q8:
- case ARM::VLD4d16:
- case ARM::VLD4d32:
- case ARM::VLD4d64:
- case ARM::VLD4d8:
- case ARM::VLD4q16a:
- case ARM::VLD4q16b:
- case ARM::VLD4q32a:
- case ARM::VLD4q32b:
- case ARM::VLD4q8a:
- case ARM::VLD4q8b:
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- O << "}, ";
- switch (MI->getOpcode()) {
- case ARM::VLD1d16Q:
- case ARM::VLD1d32Q:
- case ARM::VLD1d8Q:
- case ARM::VLD2q16:
- case ARM::VLD2q32:
- case ARM::VLD2q8:
- case ARM::VLD4d16:
- case ARM::VLD4d32:
- case ARM::VLD4d64:
- case ARM::VLD4d8: printAddrMode6Operand(MI, 4); break;
- case ARM::VLD4q16a:
- case ARM::VLD4q16b:
- case ARM::VLD4q32a:
- case ARM::VLD4q32b:
- case ARM::VLD4q8a:
- case ARM::VLD4q8b: printAddrMode6Operand(MI, 5); break;
- }
- return;
- break;
- case ARM::VLD1d16T:
- case ARM::VLD1d32T:
- case ARM::VLD1d8T:
- case ARM::VLD3d16:
- case ARM::VLD3d32:
- case ARM::VLD3d64:
- case ARM::VLD3d8:
- case ARM::VLD3q16a:
- case ARM::VLD3q16b:
- case ARM::VLD3q32a:
- case ARM::VLD3q32b:
- case ARM::VLD3q8a:
- case ARM::VLD3q8b:
- O << ", ";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- O << "}, ";
- switch (MI->getOpcode()) {
- case ARM::VLD1d16T:
- case ARM::VLD1d32T:
- case ARM::VLD1d8T:
- case ARM::VLD3d16:
- case ARM::VLD3d32:
- case ARM::VLD3d64:
- case ARM::VLD3d8: printAddrMode6Operand(MI, 3); break;
- case ARM::VLD3q16a:
- case ARM::VLD3q16b:
- case ARM::VLD3q32a:
- case ARM::VLD3q32b:
- case ARM::VLD3q8a:
- case ARM::VLD3q8b: printAddrMode6Operand(MI, 4); break;
- }
- return;
- break;
- case ARM::VLD2LNd16:
- case ARM::VLD2LNd32:
- case ARM::VLD2LNd8:
- case ARM::VLD2LNq16a:
- case ARM::VLD2LNq16b:
- case ARM::VLD2LNq32a:
- case ARM::VLD2LNq32b:
- O << '[';
- printNoHashImmediate(MI, 8);
- O << "], ";
- printOperand(MI, 1);
- O << '[';
- printNoHashImmediate(MI, 8);
- O << "]}, ";
- printAddrMode6Operand(MI, 2);
- return;
- break;
- case ARM::VLD2d16:
- case ARM::VLD2d16D:
- case ARM::VLD2d32:
- case ARM::VLD2d32D:
- case ARM::VLD2d64:
- case ARM::VLD2d8:
- case ARM::VLD2d8D:
- O << ", ";
- printOperand(MI, 1);
- O << "}, ";
- printAddrMode6Operand(MI, 2);
- return;
- break;
- case ARM::VLD3LNd16:
- case ARM::VLD3LNd32:
- case ARM::VLD3LNd8:
- case ARM::VLD3LNq16a:
- case ARM::VLD3LNq16b:
- case ARM::VLD3LNq32a:
- case ARM::VLD3LNq32b:
- O << '[';
- printNoHashImmediate(MI, 10);
- O << "], ";
- printOperand(MI, 1);
- O << '[';
- printNoHashImmediate(MI, 10);
- O << "], ";
- printOperand(MI, 2);
- O << '[';
- printNoHashImmediate(MI, 10);
- O << "]}, ";
- printAddrMode6Operand(MI, 3);
- return;
- break;
- case ARM::VLD4LNd16:
- case ARM::VLD4LNd32:
- case ARM::VLD4LNd8:
- case ARM::VLD4LNq16a:
- case ARM::VLD4LNq16b:
- case ARM::VLD4LNq32a:
- case ARM::VLD4LNq32b:
- O << '[';
- printNoHashImmediate(MI, 12);
- O << "], ";
- printOperand(MI, 1);
- O << '[';
- printNoHashImmediate(MI, 12);
- O << "], ";
- printOperand(MI, 2);
- O << '[';
- printNoHashImmediate(MI, 12);
- O << "], ";
- printOperand(MI, 3);
- O << '[';
- printNoHashImmediate(MI, 12);
- O << "]}, ";
- printAddrMode6Operand(MI, 4);
- return;
- break;
- case ARM::VMLAD:
- case ARM::VMLAS:
- case ARM::VMLAfd:
- case ARM::VMLAfq:
- case ARM::VMLSD:
- case ARM::VMLSS:
- case ARM::VMLSfd:
- case ARM::VMLSfq:
- case ARM::VNMLAD:
- case ARM::VNMLAS:
- case ARM::VNMLSD:
- case ARM::VNMLSS:
- case ARM::t2MOVCCasr:
- case ARM::t2MOVCClsl:
- case ARM::t2MOVCClsr:
- case ARM::t2MOVCCror:
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- return;
- break;
- case ARM::VMLALslsv2i32:
- case ARM::VMLALslsv4i16:
- case ARM::VMLALsluv2i32:
- case ARM::VMLALsluv4i16:
- case ARM::VMLAslv2i32:
- case ARM::VMLAslv4i16:
- case ARM::VMLAslv4i32:
- case ARM::VMLAslv8i16:
- case ARM::VMLSLslsv2i32:
- case ARM::VMLSLslsv4i16:
- case ARM::VMLSLsluv2i32:
- case ARM::VMLSLsluv4i16:
- case ARM::VMLSslv2i32:
- case ARM::VMLSslv4i16:
- case ARM::VMLSslv4i32:
- case ARM::VMLSslv8i16:
- case ARM::VQDMLALslv2i32:
- case ARM::VQDMLALslv4i16:
- case ARM::VQDMLSLslv2i32:
- case ARM::VQDMLSLslv4i16:
- O << ", ";
- printOperand(MI, 3);
- O << '[';
- printNoHashImmediate(MI, 4);
- O << ']';
- return;
- break;
- case ARM::VMLAslfd:
- case ARM::VMLAslfq:
- case ARM::VMLSslfd:
- case ARM::VMLSslfq:
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- O << '[';
- printNoHashImmediate(MI, 4);
- O << ']';
- return;
- break;
- case ARM::VMULLslsv2i32:
- case ARM::VMULLslsv4i16:
- case ARM::VMULLsluv2i32:
- case ARM::VMULLsluv4i16:
- case ARM::VMULslv2i32:
- case ARM::VMULslv4i16:
- case ARM::VMULslv4i32:
- case ARM::VMULslv8i16:
- case ARM::VQDMULHslv2i32:
- case ARM::VQDMULHslv4i16:
- case ARM::VQDMULHslv4i32:
- case ARM::VQDMULHslv8i16:
- case ARM::VQDMULLslv2i32:
- case ARM::VQDMULLslv4i16:
- case ARM::VQRDMULHslv2i32:
- case ARM::VQRDMULHslv4i16:
- case ARM::VQRDMULHslv4i32:
- case ARM::VQRDMULHslv8i16:
- O << ", ";
- printOperand(MI, 2);
- O << '[';
- printNoHashImmediate(MI, 3);
- O << ']';
- return;
- break;
- case ARM::VMULslfd:
- case ARM::VMULslfq:
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- O << '[';
- printNoHashImmediate(MI, 3);
- O << ']';
- return;
- break;
- case ARM::VSETLNi16:
- case ARM::VSETLNi32:
- case ARM::VSETLNi8:
- O << '[';
- printNoHashImmediate(MI, 3);
- O << "], ";
- printOperand(MI, 2);
- return;
- break;
- case ARM::VST1d16Q:
- case ARM::VST1d32Q:
- case ARM::VST1d8Q:
- case ARM::VST2q16:
- case ARM::VST2q32:
- case ARM::VST2q8:
- case ARM::VST4d16:
- case ARM::VST4d32:
- case ARM::VST4d64:
- case ARM::VST4d8:
- O << ", ";
- printOperand(MI, 5);
- O << ", ";
- printOperand(MI, 6);
- O << ", ";
- printOperand(MI, 7);
- O << "}, ";
- printAddrMode6Operand(MI, 0);
- return;
- break;
- case ARM::VST1d16T:
- case ARM::VST1d32T:
- case ARM::VST1d8T:
- case ARM::VST3d16:
- case ARM::VST3d32:
- case ARM::VST3d64:
- case ARM::VST3d8:
- O << ", ";
- printOperand(MI, 5);
- O << ", ";
- printOperand(MI, 6);
- O << "}, ";
- printAddrMode6Operand(MI, 0);
- return;
- break;
- case ARM::VST2LNd16:
- case ARM::VST2LNd32:
- case ARM::VST2LNd8:
- case ARM::VST2LNq16a:
- case ARM::VST2LNq16b:
- case ARM::VST2LNq32a:
- case ARM::VST2LNq32b:
- O << '[';
- printNoHashImmediate(MI, 6);
- O << "], ";
- printOperand(MI, 5);
- O << '[';
- printNoHashImmediate(MI, 6);
- O << "]}, ";
- printAddrMode6Operand(MI, 0);
- return;
- break;
- case ARM::VST2d16:
- case ARM::VST2d16D:
- case ARM::VST2d32:
- case ARM::VST2d32D:
- case ARM::VST2d64:
- case ARM::VST2d8:
- case ARM::VST2d8D:
- O << ", ";
- printOperand(MI, 5);
- O << "}, ";
- printAddrMode6Operand(MI, 0);
- return;
- break;
- case ARM::VST3LNd16:
- case ARM::VST3LNd32:
- case ARM::VST3LNd8:
- case ARM::VST3LNq16a:
- case ARM::VST3LNq16b:
- case ARM::VST3LNq32a:
- case ARM::VST3LNq32b:
- O << '[';
- printNoHashImmediate(MI, 7);
- O << "], ";
- printOperand(MI, 5);
- O << '[';
- printNoHashImmediate(MI, 7);
- O << "], ";
- printOperand(MI, 6);
- O << '[';
- printNoHashImmediate(MI, 7);
- O << "]}, ";
- printAddrMode6Operand(MI, 0);
- return;
- break;
- case ARM::VST4LNd16:
- case ARM::VST4LNd32:
- case ARM::VST4LNd8:
- case ARM::VST4LNq16a:
- case ARM::VST4LNq16b:
- case ARM::VST4LNq32a:
- case ARM::VST4LNq32b:
- O << '[';
- printNoHashImmediate(MI, 8);
- O << "], ";
- printOperand(MI, 5);
- O << '[';
- printNoHashImmediate(MI, 8);
- O << "], ";
- printOperand(MI, 6);
- O << '[';
- printNoHashImmediate(MI, 8);
- O << "], ";
- printOperand(MI, 7);
- O << '[';
- printNoHashImmediate(MI, 8);
- O << "]}, ";
- printAddrMode6Operand(MI, 0);
- return;
- break;
- case ARM::VST4q16a:
- case ARM::VST4q16b:
- case ARM::VST4q32a:
- case ARM::VST4q32b:
- case ARM::VST4q8a:
- case ARM::VST4q8b:
- O << ", ";
- printOperand(MI, 8);
- O << "}, ";
- printAddrMode6Operand(MI, 1);
- return;
- break;
- case ARM::VTBL1:
- O << ", {";
- printOperand(MI, 1);
- O << "}, ";
- printOperand(MI, 2);
- return;
- break;
- case ARM::VTBL2:
- O << ", {";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- O << "}, ";
- printOperand(MI, 3);
- return;
- break;
- case ARM::VTBL3:
- O << ", {";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- O << "}, ";
- printOperand(MI, 4);
- return;
- break;
- case ARM::VTBL4:
- O << ", {";
- printOperand(MI, 1);
- O << ", ";
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- O << ", ";
- printOperand(MI, 4);
- O << "}, ";
- printOperand(MI, 5);
- return;
- break;
- case ARM::VTBX1:
- O << ", {";
- printOperand(MI, 2);
- O << "}, ";
- printOperand(MI, 3);
- return;
- break;
- case ARM::VTBX2:
- O << ", {";
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- O << "}, ";
- printOperand(MI, 4);
- return;
- break;
- case ARM::VTBX3:
- O << ", {";
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- O << ", ";
- printOperand(MI, 4);
- O << "}, ";
- printOperand(MI, 5);
- return;
- break;
- case ARM::VTBX4:
- O << ", {";
- printOperand(MI, 2);
- O << ", ";
- printOperand(MI, 3);
- O << ", ";
- printOperand(MI, 4);
- O << ", ";
- printOperand(MI, 5);
- O << "}, ";
- printOperand(MI, 6);
- return;
- break;
- case ARM::t2LDRpci_pic:
- case ARM::tLDRpci_pic:
- O << "\n";
- printPCLabel(MI, 2);
- O << ":\n\tadd\t";
- printOperand(MI, 0);
- O << ", pc";
- return;
- break;
- case ARM::t2LEApcrelJT:
- O << '_';
- printNoHashImmediate(MI, 2);
- return;
- break;
- case ARM::t2MOVrx:
- case ARM::t2MVNi:
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- return;
- break;
- case ARM::t2SXTBr_rot:
- case ARM::t2SXTHr_rot:
- case ARM::t2UXTBr_rot:
- case ARM::t2UXTHr_rot:
- printOperand(MI, 1);
- O << ", ror ";
- printOperand(MI, 2);
- return;
- break;
- case ARM::tLEApcrel:
- O << ", #";
- printOperand(MI, 1);
- return;
- break;
- case ARM::tLEApcrelJT:
- O << ", #";
- printOperand(MI, 1);
- O << '_';
- printNoHashImmediate(MI, 2);
- return;
- break;
- }
- return;
-}
-
-
-/// getRegisterName - This method is automatically generated by tblgen
-/// from the register set description. This returns the assembler name
-/// for the specified register.
-const char *ARMAsmPrinter::getRegisterName(unsigned RegNo) {
- assert(RegNo && RegNo < 100 && "Invalid register number!");
-
- static const unsigned RegAsmOffset[] = {
- 0, 5, 8, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51,
- 54, 58, 62, 66, 70, 74, 78, 82, 86, 90, 94, 97, 101, 105,
- 108, 111, 114, 117, 120, 123, 129, 132, 135, 138, 141, 145, 149, 153,
- 157, 161, 165, 168, 171, 174, 177, 180, 183, 186, 189, 192, 195, 199,
- 203, 207, 210, 213, 216, 219, 222, 225, 228, 231, 234, 237, 241, 245,
- 249, 253, 257, 261, 265, 269, 273, 277, 280, 284, 288, 292, 296, 300,
- 304, 308, 312, 316, 320, 323, 327, 331, 334, 337, 340, 343, 346, 349,
- 358, 0
- };
-
- const char *AsmStrs =
- "cpsr\000d0\000d1\000d10\000d11\000d12\000d13\000d14\000d15\000d16\000d1"
- "7\000d18\000d19\000d2\000d20\000d21\000d22\000d23\000d24\000d25\000d26\000"
- "d27\000d28\000d29\000d3\000d30\000d31\000d4\000d5\000d6\000d7\000d8\000"
- "d9\000fpscr\000lr\000pc\000q0\000q1\000q10\000q11\000q12\000q13\000q14\000"
- "q15\000q2\000q3\000q4\000q5\000q6\000q7\000q8\000q9\000r0\000r1\000r10\000"
- "r11\000r12\000r2\000r3\000r4\000r5\000r6\000r7\000r8\000r9\000s0\000s1\000"
- "s10\000s11\000s12\000s13\000s14\000s15\000s16\000s17\000s18\000s19\000s"
- "2\000s20\000s21\000s22\000s23\000s24\000s25\000s26\000s27\000s28\000s29"
- "\000s3\000s30\000s31\000s4\000s5\000s6\000s7\000s8\000s9\000sINVALID\000"
- "sp\000";
- return AsmStrs+RegAsmOffset[RegNo-1];
-}
-
-
-#ifdef GET_INSTRUCTION_NAME
-#undef GET_INSTRUCTION_NAME
-
-/// getInstructionName: This method is automatically generated by tblgen
-/// from the instruction set description. This returns the enum name of the
-/// specified instruction.
-const char *ARMAsmPrinter::getInstructionName(unsigned Opcode) {
- assert(Opcode < 1949 && "Invalid instruction number!");
-
- static const unsigned InstAsmOffset[] = {
- 0, 4, 14, 24, 33, 42, 47, 62, 76, 89, 103, 120, 130, 138,
- 146, 154, 160, 166, 172, 179, 186, 193, 199, 205, 211, 228, 243, 249,
- 255, 261, 281, 301, 320, 340, 360, 379, 399, 419, 438, 459, 480, 500,
- 519, 538, 556, 576, 596, 615, 635, 655, 674, 690, 706, 721, 723, 727,
- 731, 737, 743, 749, 754, 757, 761, 767, 775, 780, 790, 799, 810, 816,
- 825, 832, 839, 842, 846, 853, 858, 862, 866, 871, 877, 881, 888, 895,
- 902, 908, 914, 920, 927, 934, 941, 957, 961, 965, 972, 981, 988, 997,
- 1004, 1013, 1019, 1026, 1035, 1042, 1051, 1058, 1067, 1073, 1079, 1085, 1091, 1099,
- 1107, 1114, 1120, 1137, 1154, 1172, 1190, 1209, 1222, 1235, 1246, 1256, 1268, 1280,
- 1290, 1299, 1311, 1323, 1333, 1342, 1353, 1364, 1373, 1381, 1385, 1393, 1397, 1402,
- 1408, 1418, 1427, 1432, 1442, 1451, 1457, 1464, 1471, 1478, 1483, 1489, 1499, 1508,
- 1514, 1521, 1532, 1542, 1548, 1555, 1566, 1576, 1581, 1590, 1598, 1604, 1613, 1624,
- 1628, 1633, 1638, 1644, 1648, 1652, 1659, 1666, 1673, 1681, 1689, 1697, 1702, 1709,
- 1721, 1731, 1736, 1742, 1747, 1759, 1771, 1775, 1780, 1785, 1791, 1795, 1802, 1806,
- 1811, 1818, 1826, 1830, 1835, 1840, 1845, 1849, 1855, 1861, 1867, 1874, 1881, 1889,
- 1897, 1906, 1915, 1922, 1930, 1938, 1944, 1950, 1956, 1962, 1967, 1972, 1977, 1982,
- 1987, 1994, 2000, 2005, 2011, 2017, 2022, 2027, 2034, 2040, 2045, 2049, 2055, 2061,
- 2065, 2070, 2077, 2084, 2090, 2096, 2103, 2110, 2116, 2122, 2129, 2135, 2140, 2148,
- 2156, 2164, 2170, 2176, 2182, 2187, 2191, 2200, 2209, 2213, 2221, 2228, 2234, 2240,
- 2248, 2255, 2259, 2266, 2273, 2279, 2286, 2292, 2300, 2308, 2315, 2323, 2331, 2339,
- 2346, 2353, 2360, 2367, 2373, 2380, 2387, 2395, 2401, 2408, 2414, 2421, 2427, 2434,
- 2440, 2447, 2454, 2461, 2467, 2474, 2481, 2488, 2495, 2501, 2508, 2512, 2517, 2524,
- 2532, 2540, 2545, 2552, 2558, 2571, 2584, 2595, 2605, 2617, 2629, 2639, 2648, 2660,
- 2672, 2682, 2691, 2702, 2713, 2722, 2730, 2734, 2738, 2743, 2749, 2759, 2768, 2773,
- 2783, 2792, 2798, 2805, 2812, 2819, 2824, 2830, 2840, 2849, 2854, 2863, 2871, 2878,
- 2885, 2892, 2898, 2904, 2910, 2914, 2918, 2923, 2933, 2947, 2955, 2967, 2975, 2987,
- 2995, 3007, 3013, 3023, 3029, 3039, 3045, 3051, 3057, 3064, 3069, 3075, 3081, 3087,
- 3094, 3100, 3105, 3110, 3118, 3125, 3131, 3137, 3145, 3152, 3158, 3164, 3170, 3178,
- 3185, 3191, 3197, 3205, 3212, 3218, 3225, 3232, 3240, 3248, 3253, 3260, 3266, 3276,
- 3290, 3298, 3310, 3318, 3330, 3338, 3350, 3356, 3366, 3372, 3382, 3394, 3406, 3418,
- 3430, 3442, 3454, 3465, 3476, 3487, 3498, 3509, 3519, 3530, 3541, 3552, 3563, 3574,
- 3584, 3596, 3608, 3620, 3632, 3644, 3656, 3663, 3670, 3681, 3692, 3703, 3714, 3725,
- 3735, 3746, 3757, 3768, 3779, 3790, 3800, 3806, 3812, 3819, 3830, 3837, 3847, 3857,
- 3867, 3877, 3887, 3896, 3903, 3910, 3917, 3924, 3930, 3942, 3954, 3965, 3977, 3989,
- 4001, 4013, 4025, 4037, 4043, 4055, 4067, 4079, 4091, 4103, 4115, 4122, 4133, 4140,
- 4150, 4160, 4170, 4180, 4190, 4200, 4210, 4219, 4225, 4231, 4237, 4243, 4249, 4255,
- 4261, 4267, 4273, 4279, 4286, 4293, 4303, 4313, 4323, 4333, 4343, 4352, 4363, 4374,
- 4385, 4396, 4407, 4418, 4429, 4439, 4446, 4453, 4464, 4475, 4486, 4497, 4508, 4518,
- 4529, 4540, 4551, 4562, 4573, 4583, 4594, 4605, 4616, 4627, 4638, 4649, 4660, 4670,
- 4677, 4684, 4695, 4706, 4717, 4728, 4739, 4749, 4760, 4771, 4782, 4793, 4804, 4814,
- 4825, 4836, 4847, 4858, 4869, 4880, 4891, 4901, 4912, 4923, 4934, 4945, 4956, 4967,
- 4978, 4988, 4998, 5008, 5018, 5028, 5038, 5047, 5058, 5069, 5080, 5091, 5102, 5113,
- 5124, 5134, 5144, 5154, 5164, 5174, 5184, 5193, 5199, 5206, 5213, 5221, 5229, 5235,
- 5242, 5249, 5255, 5261, 5269, 5277, 5284, 5291, 5299, 5307, 5316, 5329, 5338, 5347,
- 5360, 5369, 5379, 5389, 5399, 5409, 5418, 5431, 5440, 5449, 5462, 5471, 5481, 5491,
- 5501, 5511, 5517, 5523, 5531, 5539, 5547, 5555, 5562, 5569, 5579, 5589, 5599, 5609,
- 5618, 5627, 5636, 5645, 5652, 5660, 5667, 5675, 5681, 5687, 5695, 5703, 5710, 5717,
- 5725, 5733, 5740, 5747, 5757, 5767, 5776, 5786, 5795, 5807, 5819, 5831, 5843, 5855,
- 5866, 5878, 5890, 5902, 5914, 5926, 5937, 5949, 5961, 5973, 5985, 5997, 6008, 6020,
- 6032, 6044, 6056, 6068, 6079, 6087, 6096, 6105, 6113, 6122, 6131, 6139, 6146, 6154,
- 6162, 6169, 6177, 6185, 6193, 6200, 6207, 6217, 6227, 6236, 6247, 6258, 6269, 6280,
- 6288, 6297, 6305, 6314, 6322, 6329, 6337, 6345, 6353, 6360, 6370, 6380, 6389, 6400,
- 6411, 6422, 6433, 6441, 6449, 6457, 6464, 6473, 6482, 6491, 6500, 6508, 6516, 6526,
- 6536, 6545, 6556, 6567, 6578, 6589, 6597, 6605, 6613, 6620, 6629, 6638, 6647, 6656,
- 6664, 6672, 6678, 6684, 6690, 6696, 6702, 6709, 6720, 6727, 6738, 6749, 6760, 6771,
- 6782, 6792, 6803, 6814, 6825, 6836, 6847, 6857, 6864, 6875, 6882, 6893, 6904, 6915,
- 6926, 6937, 6947, 6958, 6969, 6980, 6991, 7002, 7012, 7018, 7032, 7046, 7060, 7074,
- 7086, 7098, 7110, 7122, 7134, 7146, 7152, 7159, 7166, 7175, 7184, 7196, 7208, 7220,
- 7232, 7242, 7252, 7262, 7272, 7282, 7291, 7297, 7311, 7325, 7339, 7353, 7365, 7377,
- 7389, 7401, 7413, 7425, 7431, 7438, 7445, 7454, 7463, 7475, 7487, 7499, 7511, 7521,
- 7531, 7541, 7551, 7561, 7570, 7576, 7584, 7592, 7602, 7614, 7626, 7638, 7650, 7662,
- 7674, 7685, 7696, 7706, 7712, 7720, 7728, 7735, 7741, 7748, 7756, 7764, 7774, 7784,
- 7794, 7804, 7814, 7824, 7834, 7843, 7848, 7853, 7859, 7866, 7880, 7894, 7908, 7922,
- 7934, 7946, 7958, 7970, 7982, 7994, 8000, 8007, 8018, 8025, 8032, 8039, 8048, 8057,
- 8069, 8081, 8093, 8105, 8115, 8125, 8135, 8145, 8155, 8164, 8170, 8176, 8182, 8190,
- 8196, 8204, 8213, 8220, 8231, 8240, 8249, 8258, 8267, 8275, 8283, 8290, 8297, 8304,
- 8311, 8318, 8325, 8331, 8337, 8343, 8349, 8362, 8375, 8388, 8401, 8414, 8426, 8439,
- 8452, 8465, 8478, 8491, 8503, 8516, 8529, 8542, 8555, 8568, 8580, 8593, 8606, 8619,
- 8632, 8645, 8657, 8664, 8673, 8682, 8690, 8697, 8706, 8715, 8723, 8732, 8741, 8749,
- 8756, 8765, 8774, 8782, 8791, 8800, 8808, 8819, 8830, 8841, 8852, 8863, 8873, 8885,
- 8897, 8909, 8921, 8933, 8945, 8957, 8968, 8980, 8992, 9004, 9016, 9028, 9040, 9052,
- 9063, 9078, 9093, 9106, 9119, 9134, 9149, 9162, 9175, 9190, 9205, 9220, 9235, 9248,
- 9261, 9274, 9287, 9302, 9317, 9330, 9343, 9357, 9371, 9384, 9397, 9410, 9422, 9435,
- 9448, 9460, 9471, 9482, 9493, 9504, 9515, 9525, 9541, 9557, 9573, 9589, 9603, 9617,
- 9631, 9645, 9658, 9671, 9684, 9697, 9710, 9723, 9736, 9748, 9761, 9774, 9787, 9800,
- 9813, 9826, 9839, 9851, 9865, 9879, 9892, 9906, 9920, 9933, 9947, 9961, 9974, 9987,
- 10000, 10013, 10026, 10039, 10052, 10065, 10077, 10090, 10103, 10116, 10129, 10142, 10155, 10168,
- 10180, 10192, 10204, 10216, 10228, 10240, 10252, 10264, 10275, 10288, 10301, 10314, 10327, 10340,
- 10353, 10366, 10378, 10390, 10402, 10414, 10426, 10438, 10450, 10462, 10473, 10486, 10499, 10511,
- 10524, 10537, 10549, 10562, 10575, 10587, 10599, 10611, 10623, 10635, 10647, 10659, 10671, 10682,
- 10694, 10706, 10718, 10730, 10742, 10754, 10766, 10777, 10790, 10803, 10815, 10823, 10832, 10841,
- 10849, 10858, 10867, 10876, 10885, 10895, 10904, 10914, 10923, 10933, 10943, 10952, 10961, 10971,
- 10981, 10990, 10999, 11012, 11025, 11038, 11051, 11064, 11076, 11089, 11102, 11115, 11128, 11141,
- 11153, 11165, 11177, 11189, 11201, 11213, 11225, 11237, 11248, 11260, 11272, 11284, 11296, 11308,
- 11320, 11332, 11343, 11355, 11367, 11378, 11390, 11402, 11414, 11426, 11438, 11450, 11462, 11473,
- 11485, 11497, 11509, 11521, 11533, 11545, 11557, 11568, 11577, 11587, 11597, 11606, 11616, 11626,
- 11638, 11650, 11662, 11674, 11686, 11698, 11710, 11721, 11733, 11745, 11757, 11769, 11781, 11793,
- 11805, 11816, 11829, 11842, 11854, 11864, 11874, 11883, 11892, 11901, 11909, 11921, 11933, 11945,
- 11957, 11969, 11981, 11992, 12003, 12014, 12025, 12036, 12047, 12058, 12068, 12079, 12090, 12101,
- 12112, 12123, 12134, 12145, 12155, 12166, 12177, 12188, 12199, 12210, 12221, 12232, 12242, 12253,
- 12264, 12274, 12285, 12296, 12307, 12318, 12329, 12340, 12351, 12361, 12372, 12383, 12394, 12405,
- 12416, 12427, 12438, 12448, 12455, 12462, 12469, 12476, 12486, 12496, 12506, 12516, 12526, 12536,
- 12546, 12555, 12562, 12569, 12576, 12583, 12594, 12605, 12616, 12627, 12638, 12649, 12660, 12670,
- 12681, 12692, 12703, 12714, 12725, 12736, 12747, 12757, 12767, 12777, 12787, 12797, 12807, 12817,
- 12827, 12836, 12844, 12853, 12862, 12870, 12879, 12888, 12896, 12903, 12911, 12919, 12926, 12934,
- 12942, 12950, 12957, 12964, 12974, 12984, 12993, 13004, 13015, 13026, 13037, 13045, 13054, 13062,
- 13071, 13079, 13086, 13094, 13102, 13110, 13117, 13127, 13137, 13146, 13157, 13168, 13179, 13190,
- 13198, 13206, 13214, 13221, 13230, 13239, 13248, 13257, 13265, 13273, 13283, 13293, 13302, 13313,
- 13324, 13335, 13346, 13354, 13362, 13370, 13377, 13386, 13395, 13404, 13413, 13421, 13429, 13435,
- 13441, 13447, 13453, 13459, 13465, 13477, 13489, 13500, 13512, 13524, 13536, 13548, 13560, 13572,
- 13578, 13590, 13602, 13614, 13626, 13638, 13650, 13657, 13668, 13675, 13685, 13695, 13705, 13715,
- 13725, 13735, 13745, 13754, 13760, 13766, 13772, 13778, 13784, 13790, 13796, 13802, 13808, 13814,
- 13821, 13828, 13836, 13844, 13852, 13860, 13867, 13874, 13881, 13888, 13896, 13904, 13912, 13920,
- 13927, 13934, 13942, 13950, 13957, 13965, 13973, 13980, 13990, 14000, 14010, 14020, 14030, 14039,
- 14046, 14053, 14060, 14067, 14074, 14081, 14089, 14097, 14104, 14112, 14120, 14127, 14135, 14143,
- 14150, 14158, 14166, 14173, 14177, 14181, 14187, 14196, 14205, 14214, 14222, 14230, 14238, 14247,
- 14256, 14265, 14275, 14287, 14297, 14305, 14315, 14323, 14331, 14339, 14347, 14355, 14363, 14371,
- 14375, 14381, 14387, 14395, 14403, 14411, 14419, 14425, 14431, 14439, 14445, 14454, 14463, 14472,
- 14480, 14488, 14496, 14505, 14514, 14523, 14529, 14535, 14544, 14555, 14564, 14575, 14584, 14595,
- 14603, 14612, 14623, 14632, 14643, 14652, 14663, 14671, 14679, 14687, 14695, 14703, 14708, 14727,
- 14747, 14768, 14774, 14784, 14792, 14804, 14815, 14825, 14834, 14844, 14852, 14861, 14871, 14879,
- 14888, 14897, 14906, 14914, 14926, 14937, 14947, 14956, 14966, 14974, 14983, 14996, 15008, 15019,
- 15029, 15040, 15049, 15058, 15071, 15083, 15094, 15104, 15115, 15124, 15131, 15142, 15152, 15161,
- 15169, 15178, 15191, 15198, 15209, 15222, 15230, 15238, 15246, 15254, 15260, 15266, 15277, 15286,
- 15297, 15308, 15317, 15328, 15338, 15345, 15354, 15366, 15373, 15381, 15395, 15409, 15415, 15424,
- 15430, 15439, 15445, 15452, 15459, 15466, 15472, 15480, 15488, 15496, 15504, 15512, 15520, 15528,
- 15536, 15546, 15555, 15565, 15573, 15581, 15590, 15598, 15607, 15614, 15621, 15630, 15638, 15647,
- 15654, 15661, 15668, 15677, 15685, 15692, 15700, 15708, 15715, 15722, 15731, 15739, 15746, 15752,
- 15760, 15768, 15776, 15785, 15793, 15802, 15810, 15818, 15827, 15836, 15844, 15852, 15861, 15869,
- 15876, 15885, 15894, 15903, 15911, 15919, 15927, 15934, 15941, 15947, 15953, 15963, 15972, 15980,
- 15988, 15998, 16007, 16013, 16022, 16031, 16039, 16048, 16056, 16066, 16076, 16085, 16095, 16105,
- 16115, 16124, 16133, 16142, 16151, 16159, 16168, 16177, 16187, 16195, 16204, 16212, 16221, 16229,
- 16238, 16246, 16255, 16264, 16273, 16281, 16290, 16299, 16308, 16317, 16325, 16334, 16342, 16351,
- 16359, 16368, 16377, 16387, 16397, 16404, 16413, 16421, 16427, 16435, 16447, 16458, 16468, 16477,
- 16485, 16494, 16502, 16511, 16520, 16529, 16537, 16549, 16560, 16570, 16579, 16587, 16594, 16605,
- 16615, 16624, 16632, 16639, 16648, 16657, 16666, 16676, 16688, 16701, 16712, 16722, 16733, 16741,
- 16751, 16759, 16767, 16779, 16795, 16805, 16819, 16829, 16843, 16853, 16867, 16875, 16887, 16895,
- 16907, 16913, 16922, 16928, 16937, 16945, 16953, 16961, 16970, 16978, 16986, 16994, 17003, 17011,
- 17018, 17025, 17032, 17042, 17051, 17059, 17067, 17077, 17086, 17094, 17102, 17110, 17120, 17129,
- 17137, 17145, 17155, 17164, 17172, 17181, 17190, 17200, 17210, 17217, 17226, 17234, 17246, 17262,
- 17272, 17286, 17296, 17310, 17320, 17334, 17342, 17354, 17362, 17374, 17380, 17386, 17394, 17399,
- 17408, 17415, 17422, 17431, 17439, 17448, 17455, 17463, 17471, 17480, 17498, 17514, 17519, 17526,
- 17533, 17540, 17543, 17548, 17554, 17558, 17564, 17573, 17579, 17588, 17594, 17601, 17609, 17613,
- 17621, 17636, 17642, 17647, 17653, 17659, 17664, 17670, 17678, 17685, 17691, 17700, 17708, 17715,
- 17720, 17725, 17745, 17750, 17755, 17761, 17768, 17774, 17781, 17788, 17795, 17802, 17808, 17816,
- 17828, 17836, 17846, 17858, 17865, 17872, 17879, 17886, 17894, 17902, 17917, 17924, 17936, 17949,
- 17956, 17962, 17975, 17980, 17985, 17990, 17995, 18003, 18008, 18017, 18023, 18028, 18035, 18042,
- 18047, 18052, 18061, 18066, 18076, 18086, 18091, 18096, 18101, 18107, 18114, 18120, 18127, 18133,
- 18141, 18148, 18155, 18162, 18170, 18179, 18184, 18190, 18196, 18203, 18211, 18217, 18222, 18228,
- 18234, 18239, 18244, 0
- };
-
- const char *Strs =
- "PHI\000INLINEASM\000DBG_LABEL\000EH_LABEL\000GC_LABEL\000KILL\000EXTRAC"
- "T_SUBREG\000INSERT_SUBREG\000IMPLICIT_DEF\000SUBREG_TO_REG\000COPY_TO_R"
- "EGCLASS\000DBG_VALUE\000ADCSSri\000ADCSSrr\000ADCSSrs\000ADCri\000ADCrr"
- "\000ADCrs\000ADDSri\000ADDSrr\000ADDSrs\000ADDri\000ADDrr\000ADDrs\000A"
- "DJCALLSTACKDOWN\000ADJCALLSTACKUP\000ANDri\000ANDrr\000ANDrs\000ATOMIC_"
- "CMP_SWAP_I16\000ATOMIC_CMP_SWAP_I32\000ATOMIC_CMP_SWAP_I8\000ATOMIC_LOA"
- "D_ADD_I16\000ATOMIC_LOAD_ADD_I32\000ATOMIC_LOAD_ADD_I8\000ATOMIC_LOAD_A"
- "ND_I16\000ATOMIC_LOAD_AND_I32\000ATOMIC_LOAD_AND_I8\000ATOMIC_LOAD_NAND"
- "_I16\000ATOMIC_LOAD_NAND_I32\000ATOMIC_LOAD_NAND_I8\000ATOMIC_LOAD_OR_I"
- "16\000ATOMIC_LOAD_OR_I32\000ATOMIC_LOAD_OR_I8\000ATOMIC_LOAD_SUB_I16\000"
- "ATOMIC_LOAD_SUB_I32\000ATOMIC_LOAD_SUB_I8\000ATOMIC_LOAD_XOR_I16\000ATO"
- "MIC_LOAD_XOR_I32\000ATOMIC_LOAD_XOR_I8\000ATOMIC_SWAP_I16\000ATOMIC_SWA"
- "P_I32\000ATOMIC_SWAP_I8\000B\000BFC\000BFI\000BICri\000BICrr\000BICrs\000"
- "BKPT\000BL\000BLX\000BLXr9\000BL_pred\000BLr9\000BLr9_pred\000BMOVPCRX\000"
- "BMOVPCRXr9\000BRIND\000BR_JTadd\000BR_JTm\000BR_JTr\000BX\000BXJ\000BX_"
- "RET\000BXr9\000Bcc\000CDP\000CDP2\000CLREX\000CLZ\000CMNzri\000CMNzrr\000"
- "CMNzrs\000CMPri\000CMPrr\000CMPrs\000CMPzri\000CMPzrr\000CMPzrs\000CONS"
- "TPOOL_ENTRY\000CPS\000DBG\000DMBish\000DMBishst\000DMBnsh\000DMBnshst\000"
- "DMBosh\000DMBoshst\000DMBst\000DSBish\000DSBishst\000DSBnsh\000DSBnshst"
- "\000DSBosh\000DSBoshst\000DSBst\000EORri\000EORrr\000EORrs\000FCONSTD\000"
- "FCONSTS\000FMSTAT\000ISBsy\000Int_MemBarrierV6\000Int_MemBarrierV7\000I"
- "nt_SyncBarrierV6\000Int_SyncBarrierV7\000Int_eh_sjlj_setjmp\000LDC2L_OF"
- "FSET\000LDC2L_OPTION\000LDC2L_POST\000LDC2L_PRE\000LDC2_OFFSET\000LDC2_"
- "OPTION\000LDC2_POST\000LDC2_PRE\000LDCL_OFFSET\000LDCL_OPTION\000LDCL_P"
- "OST\000LDCL_PRE\000LDC_OFFSET\000LDC_OPTION\000LDC_POST\000LDC_PRE\000L"
- "DM\000LDM_RET\000LDR\000LDRB\000LDRBT\000LDRB_POST\000LDRB_PRE\000LDRD\000"
- "LDRD_POST\000LDRD_PRE\000LDREX\000LDREXB\000LDREXD\000LDREXH\000LDRH\000"
- "LDRHT\000LDRH_POST\000LDRH_PRE\000LDRSB\000LDRSBT\000LDRSB_POST\000LDRS"
- "B_PRE\000LDRSH\000LDRSHT\000LDRSH_POST\000LDRSH_PRE\000LDRT\000LDR_POST"
- "\000LDR_PRE\000LDRcp\000LEApcrel\000LEApcrelJT\000MCR\000MCR2\000MCRR\000"
- "MCRR2\000MLA\000MLS\000MOVCCi\000MOVCCr\000MOVCCs\000MOVPCLR\000MOVPCRX"
- "\000MOVTi16\000MOVi\000MOVi16\000MOVi2pieces\000MOVi32imm\000MOVr\000MO"
- "Vrx\000MOVs\000MOVsra_flag\000MOVsrl_flag\000MRC\000MRC2\000MRRC\000MRR"
- "C2\000MRS\000MRSsys\000MSR\000MSRi\000MSRsys\000MSRsysi\000MUL\000MVNi\000"
- "MVNr\000MVNs\000NOP\000ORRri\000ORRrr\000ORRrs\000PICADD\000PICLDR\000P"
- "ICLDRB\000PICLDRH\000PICLDRSB\000PICLDRSH\000PICSTR\000PICSTRB\000PICST"
- "RH\000PKHBT\000PKHTB\000PLDWi\000PLDWr\000PLDi\000PLDr\000PLIi\000PLIr\000"
- "QADD\000QADD16\000QADD8\000QASX\000QDADD\000QDSUB\000QSAX\000QSUB\000QS"
- "UB16\000QSUB8\000RBIT\000REV\000REV16\000REVSH\000RFE\000RFEW\000RSBSri"
- "\000RSBSrs\000RSBri\000RSBrs\000RSCSri\000RSCSrs\000RSCri\000RSCrs\000S"
- "ADD16\000SADD8\000SASX\000SBCSSri\000SBCSSrr\000SBCSSrs\000SBCri\000SBC"
- "rr\000SBCrs\000SBFX\000SEL\000SETENDBE\000SETENDLE\000SEV\000SHADD16\000"
- "SHADD8\000SHASX\000SHSAX\000SHSUB16\000SHSUB8\000SMC\000SMLABB\000SMLAB"
- "T\000SMLAD\000SMLADX\000SMLAL\000SMLALBB\000SMLALBT\000SMLALD\000SMLALD"
- "X\000SMLALTB\000SMLALTT\000SMLATB\000SMLATT\000SMLAWB\000SMLAWT\000SMLS"
- "D\000SMLSDX\000SMLSLD\000SMLSLDX\000SMMLA\000SMMLAR\000SMMLS\000SMMLSR\000"
- "SMMUL\000SMMULR\000SMUAD\000SMUADX\000SMULBB\000SMULBT\000SMULL\000SMUL"
- "TB\000SMULTT\000SMULWB\000SMULWT\000SMUSD\000SMUSDX\000SRS\000SRSW\000S"
- "SAT16\000SSATasr\000SSATlsl\000SSAX\000SSUB16\000SSUB8\000STC2L_OFFSET\000"
- "STC2L_OPTION\000STC2L_POST\000STC2L_PRE\000STC2_OFFSET\000STC2_OPTION\000"
- "STC2_POST\000STC2_PRE\000STCL_OFFSET\000STCL_OPTION\000STCL_POST\000STC"
- "L_PRE\000STC_OFFSET\000STC_OPTION\000STC_POST\000STC_PRE\000STM\000STR\000"
- "STRB\000STRBT\000STRB_POST\000STRB_PRE\000STRD\000STRD_POST\000STRD_PRE"
- "\000STREX\000STREXB\000STREXD\000STREXH\000STRH\000STRHT\000STRH_POST\000"
- "STRH_PRE\000STRT\000STR_POST\000STR_PRE\000SUBSri\000SUBSrr\000SUBSrs\000"
- "SUBri\000SUBrr\000SUBrs\000SVC\000SWP\000SWPB\000SXTAB16rr\000SXTAB16rr"
- "_rot\000SXTABrr\000SXTABrr_rot\000SXTAHrr\000SXTAHrr_rot\000SXTB16r\000"
- "SXTB16r_rot\000SXTBr\000SXTBr_rot\000SXTHr\000SXTHr_rot\000TEQri\000TEQ"
- "rr\000TEQrs\000TPsoft\000TRAP\000TSTri\000TSTrr\000TSTrs\000UADD16\000U"
- "ADD8\000UASX\000UBFX\000UHADD16\000UHADD8\000UHASX\000UHSAX\000UHSUB16\000"
- "UHSUB8\000UMAAL\000UMLAL\000UMULL\000UQADD16\000UQADD8\000UQASX\000UQSA"
- "X\000UQSUB16\000UQSUB8\000USAD8\000USADA8\000USAT16\000USATasr\000USATl"
- "sl\000USAX\000USUB16\000USUB8\000UXTAB16rr\000UXTAB16rr_rot\000UXTABrr\000"
- "UXTABrr_rot\000UXTAHrr\000UXTAHrr_rot\000UXTB16r\000UXTB16r_rot\000UXTB"
- "r\000UXTBr_rot\000UXTHr\000UXTHr_rot\000VABALsv2i64\000VABALsv4i32\000V"
- "ABALsv8i16\000VABALuv2i64\000VABALuv4i32\000VABALuv8i16\000VABAsv16i8\000"
- "VABAsv2i32\000VABAsv4i16\000VABAsv4i32\000VABAsv8i16\000VABAsv8i8\000VA"
- "BAuv16i8\000VABAuv2i32\000VABAuv4i16\000VABAuv4i32\000VABAuv8i16\000VAB"
- "Auv8i8\000VABDLsv2i64\000VABDLsv4i32\000VABDLsv8i16\000VABDLuv2i64\000V"
- "ABDLuv4i32\000VABDLuv8i16\000VABDfd\000VABDfq\000VABDsv16i8\000VABDsv2i"
- "32\000VABDsv4i16\000VABDsv4i32\000VABDsv8i16\000VABDsv8i8\000VABDuv16i8"
- "\000VABDuv2i32\000VABDuv4i16\000VABDuv4i32\000VABDuv8i16\000VABDuv8i8\000"
- "VABSD\000VABSS\000VABSfd\000VABSfd_sfp\000VABSfq\000VABSv16i8\000VABSv2"
- "i32\000VABSv4i16\000VABSv4i32\000VABSv8i16\000VABSv8i8\000VACGEd\000VAC"
- "GEq\000VACGTd\000VACGTq\000VADDD\000VADDHNv2i32\000VADDHNv4i16\000VADDH"
- "Nv8i8\000VADDLsv2i64\000VADDLsv4i32\000VADDLsv8i16\000VADDLuv2i64\000VA"
- "DDLuv4i32\000VADDLuv8i16\000VADDS\000VADDWsv2i64\000VADDWsv4i32\000VADD"
- "Wsv8i16\000VADDWuv2i64\000VADDWuv4i32\000VADDWuv8i16\000VADDfd\000VADDf"
- "d_sfp\000VADDfq\000VADDv16i8\000VADDv1i64\000VADDv2i32\000VADDv2i64\000"
- "VADDv4i16\000VADDv4i32\000VADDv8i16\000VADDv8i8\000VANDd\000VANDq\000VB"
- "ICd\000VBICq\000VBIFd\000VBIFq\000VBITd\000VBITq\000VBSLd\000VBSLq\000V"
- "CEQfd\000VCEQfq\000VCEQv16i8\000VCEQv2i32\000VCEQv4i16\000VCEQv4i32\000"
- "VCEQv8i16\000VCEQv8i8\000VCEQzv16i8\000VCEQzv2f32\000VCEQzv2i32\000VCEQ"
- "zv4f32\000VCEQzv4i16\000VCEQzv4i32\000VCEQzv8i16\000VCEQzv8i8\000VCGEfd"
- "\000VCGEfq\000VCGEsv16i8\000VCGEsv2i32\000VCGEsv4i16\000VCGEsv4i32\000V"
- "CGEsv8i16\000VCGEsv8i8\000VCGEuv16i8\000VCGEuv2i32\000VCGEuv4i16\000VCG"
- "Euv4i32\000VCGEuv8i16\000VCGEuv8i8\000VCGEzv16i8\000VCGEzv2f32\000VCGEz"
- "v2i32\000VCGEzv4f32\000VCGEzv4i16\000VCGEzv4i32\000VCGEzv8i16\000VCGEzv"
- "8i8\000VCGTfd\000VCGTfq\000VCGTsv16i8\000VCGTsv2i32\000VCGTsv4i16\000VC"
- "GTsv4i32\000VCGTsv8i16\000VCGTsv8i8\000VCGTuv16i8\000VCGTuv2i32\000VCGT"
- "uv4i16\000VCGTuv4i32\000VCGTuv8i16\000VCGTuv8i8\000VCGTzv16i8\000VCGTzv"
- "2f32\000VCGTzv2i32\000VCGTzv4f32\000VCGTzv4i16\000VCGTzv4i32\000VCGTzv8"
- "i16\000VCGTzv8i8\000VCLEzv16i8\000VCLEzv2f32\000VCLEzv2i32\000VCLEzv4f3"
- "2\000VCLEzv4i16\000VCLEzv4i32\000VCLEzv8i16\000VCLEzv8i8\000VCLSv16i8\000"
- "VCLSv2i32\000VCLSv4i16\000VCLSv4i32\000VCLSv8i16\000VCLSv8i8\000VCLTzv1"
- "6i8\000VCLTzv2f32\000VCLTzv2i32\000VCLTzv4f32\000VCLTzv4i16\000VCLTzv4i"
- "32\000VCLTzv8i16\000VCLTzv8i8\000VCLZv16i8\000VCLZv2i32\000VCLZv4i16\000"
- "VCLZv4i32\000VCLZv8i16\000VCLZv8i8\000VCMPD\000VCMPED\000VCMPES\000VCMP"
- "EZD\000VCMPEZS\000VCMPS\000VCMPZD\000VCMPZS\000VCNTd\000VCNTq\000VCVTBH"
- "S\000VCVTBSH\000VCVTDS\000VCVTSD\000VCVTTHS\000VCVTTSH\000VCVTf2sd\000V"
- "CVTf2sd_sfp\000VCVTf2sq\000VCVTf2ud\000VCVTf2ud_sfp\000VCVTf2uq\000VCVT"
- "f2xsd\000VCVTf2xsq\000VCVTf2xud\000VCVTf2xuq\000VCVTs2fd\000VCVTs2fd_sf"
- "p\000VCVTs2fq\000VCVTu2fd\000VCVTu2fd_sfp\000VCVTu2fq\000VCVTxs2fd\000V"
- "CVTxs2fq\000VCVTxu2fd\000VCVTxu2fq\000VDIVD\000VDIVS\000VDUP16d\000VDUP"
- "16q\000VDUP32d\000VDUP32q\000VDUP8d\000VDUP8q\000VDUPLN16d\000VDUPLN16q"
- "\000VDUPLN32d\000VDUPLN32q\000VDUPLN8d\000VDUPLN8q\000VDUPLNfd\000VDUPL"
- "Nfq\000VDUPfd\000VDUPfdf\000VDUPfq\000VDUPfqf\000VEORd\000VEORq\000VEXT"
- "d16\000VEXTd32\000VEXTd8\000VEXTdf\000VEXTq16\000VEXTq32\000VEXTq8\000V"
- "EXTqf\000VGETLNi32\000VGETLNs16\000VGETLNs8\000VGETLNu16\000VGETLNu8\000"
- "VHADDsv16i8\000VHADDsv2i32\000VHADDsv4i16\000VHADDsv4i32\000VHADDsv8i16"
- "\000VHADDsv8i8\000VHADDuv16i8\000VHADDuv2i32\000VHADDuv4i16\000VHADDuv4"
- "i32\000VHADDuv8i16\000VHADDuv8i8\000VHSUBsv16i8\000VHSUBsv2i32\000VHSUB"
- "sv4i16\000VHSUBsv4i32\000VHSUBsv8i16\000VHSUBsv8i8\000VHSUBuv16i8\000VH"
- "SUBuv2i32\000VHSUBuv4i16\000VHSUBuv4i32\000VHSUBuv8i16\000VHSUBuv8i8\000"
- "VLD1d16\000VLD1d16Q\000VLD1d16T\000VLD1d32\000VLD1d32Q\000VLD1d32T\000V"
- "LD1d64\000VLD1d8\000VLD1d8Q\000VLD1d8T\000VLD1df\000VLD1q16\000VLD1q32\000"
- "VLD1q64\000VLD1q8\000VLD1qf\000VLD2LNd16\000VLD2LNd32\000VLD2LNd8\000VL"
- "D2LNq16a\000VLD2LNq16b\000VLD2LNq32a\000VLD2LNq32b\000VLD2d16\000VLD2d1"
- "6D\000VLD2d32\000VLD2d32D\000VLD2d64\000VLD2d8\000VLD2d8D\000VLD2q16\000"
- "VLD2q32\000VLD2q8\000VLD3LNd16\000VLD3LNd32\000VLD3LNd8\000VLD3LNq16a\000"
- "VLD3LNq16b\000VLD3LNq32a\000VLD3LNq32b\000VLD3d16\000VLD3d32\000VLD3d64"
- "\000VLD3d8\000VLD3q16a\000VLD3q16b\000VLD3q32a\000VLD3q32b\000VLD3q8a\000"
- "VLD3q8b\000VLD4LNd16\000VLD4LNd32\000VLD4LNd8\000VLD4LNq16a\000VLD4LNq1"
- "6b\000VLD4LNq32a\000VLD4LNq32b\000VLD4d16\000VLD4d32\000VLD4d64\000VLD4"
- "d8\000VLD4q16a\000VLD4q16b\000VLD4q32a\000VLD4q32b\000VLD4q8a\000VLD4q8"
- "b\000VLDMD\000VLDMS\000VLDRD\000VLDRQ\000VLDRS\000VMAXfd\000VMAXfd_sfp\000"
- "VMAXfq\000VMAXsv16i8\000VMAXsv2i32\000VMAXsv4i16\000VMAXsv4i32\000VMAXs"
- "v8i16\000VMAXsv8i8\000VMAXuv16i8\000VMAXuv2i32\000VMAXuv4i16\000VMAXuv4"
- "i32\000VMAXuv8i16\000VMAXuv8i8\000VMINfd\000VMINfd_sfp\000VMINfq\000VMI"
- "Nsv16i8\000VMINsv2i32\000VMINsv4i16\000VMINsv4i32\000VMINsv8i16\000VMIN"
- "sv8i8\000VMINuv16i8\000VMINuv2i32\000VMINuv4i16\000VMINuv4i32\000VMINuv"
- "8i16\000VMINuv8i8\000VMLAD\000VMLALslsv2i32\000VMLALslsv4i16\000VMLALsl"
- "uv2i32\000VMLALsluv4i16\000VMLALsv2i64\000VMLALsv4i32\000VMLALsv8i16\000"
- "VMLALuv2i64\000VMLALuv4i32\000VMLALuv8i16\000VMLAS\000VMLAfd\000VMLAfq\000"
- "VMLAslfd\000VMLAslfq\000VMLAslv2i32\000VMLAslv4i16\000VMLAslv4i32\000VM"
- "LAslv8i16\000VMLAv16i8\000VMLAv2i32\000VMLAv4i16\000VMLAv4i32\000VMLAv8"
- "i16\000VMLAv8i8\000VMLSD\000VMLSLslsv2i32\000VMLSLslsv4i16\000VMLSLsluv"
- "2i32\000VMLSLsluv4i16\000VMLSLsv2i64\000VMLSLsv4i32\000VMLSLsv8i16\000V"
- "MLSLuv2i64\000VMLSLuv4i32\000VMLSLuv8i16\000VMLSS\000VMLSfd\000VMLSfq\000"
- "VMLSslfd\000VMLSslfq\000VMLSslv2i32\000VMLSslv4i16\000VMLSslv4i32\000VM"
- "LSslv8i16\000VMLSv16i8\000VMLSv2i32\000VMLSv4i16\000VMLSv4i32\000VMLSv8"
- "i16\000VMLSv8i8\000VMOVD\000VMOVDRR\000VMOVDcc\000VMOVDneon\000VMOVLsv2"
- "i64\000VMOVLsv4i32\000VMOVLsv8i16\000VMOVLuv2i64\000VMOVLuv4i32\000VMOV"
- "Luv8i16\000VMOVNv2i32\000VMOVNv4i16\000VMOVNv8i8\000VMOVQ\000VMOVRRD\000"
- "VMOVRRS\000VMOVRS\000VMOVS\000VMOVSR\000VMOVSRR\000VMOVScc\000VMOVv16i8"
- "\000VMOVv1i64\000VMOVv2i32\000VMOVv2i64\000VMOVv4i16\000VMOVv4i32\000VM"
- "OVv8i16\000VMOVv8i8\000VMRS\000VMSR\000VMULD\000VMULLp\000VMULLslsv2i32"
- "\000VMULLslsv4i16\000VMULLsluv2i32\000VMULLsluv4i16\000VMULLsv2i64\000V"
- "MULLsv4i32\000VMULLsv8i16\000VMULLuv2i64\000VMULLuv4i32\000VMULLuv8i16\000"
- "VMULS\000VMULfd\000VMULfd_sfp\000VMULfq\000VMULpd\000VMULpq\000VMULslfd"
- "\000VMULslfq\000VMULslv2i32\000VMULslv4i16\000VMULslv4i32\000VMULslv8i1"
- "6\000VMULv16i8\000VMULv2i32\000VMULv4i16\000VMULv4i32\000VMULv8i16\000V"
- "MULv8i8\000VMVNd\000VMVNq\000VNEGD\000VNEGDcc\000VNEGS\000VNEGScc\000VN"
- "EGf32q\000VNEGfd\000VNEGfd_sfp\000VNEGs16d\000VNEGs16q\000VNEGs32d\000V"
- "NEGs32q\000VNEGs8d\000VNEGs8q\000VNMLAD\000VNMLAS\000VNMLSD\000VNMLSS\000"
- "VNMULD\000VNMULS\000VORNd\000VORNq\000VORRd\000VORRq\000VPADALsv16i8\000"
- "VPADALsv2i32\000VPADALsv4i16\000VPADALsv4i32\000VPADALsv8i16\000VPADALs"
- "v8i8\000VPADALuv16i8\000VPADALuv2i32\000VPADALuv4i16\000VPADALuv4i32\000"
- "VPADALuv8i16\000VPADALuv8i8\000VPADDLsv16i8\000VPADDLsv2i32\000VPADDLsv"
- "4i16\000VPADDLsv4i32\000VPADDLsv8i16\000VPADDLsv8i8\000VPADDLuv16i8\000"
- "VPADDLuv2i32\000VPADDLuv4i16\000VPADDLuv4i32\000VPADDLuv8i16\000VPADDLu"
- "v8i8\000VPADDf\000VPADDi16\000VPADDi32\000VPADDi8\000VPMAXf\000VPMAXs16"
- "\000VPMAXs32\000VPMAXs8\000VPMAXu16\000VPMAXu32\000VPMAXu8\000VPMINf\000"
- "VPMINs16\000VPMINs32\000VPMINs8\000VPMINu16\000VPMINu32\000VPMINu8\000V"
- "QABSv16i8\000VQABSv2i32\000VQABSv4i16\000VQABSv4i32\000VQABSv8i16\000VQ"
- "ABSv8i8\000VQADDsv16i8\000VQADDsv1i64\000VQADDsv2i32\000VQADDsv2i64\000"
- "VQADDsv4i16\000VQADDsv4i32\000VQADDsv8i16\000VQADDsv8i8\000VQADDuv16i8\000"
- "VQADDuv1i64\000VQADDuv2i32\000VQADDuv2i64\000VQADDuv4i16\000VQADDuv4i32"
- "\000VQADDuv8i16\000VQADDuv8i8\000VQDMLALslv2i32\000VQDMLALslv4i16\000VQ"
- "DMLALv2i64\000VQDMLALv4i32\000VQDMLSLslv2i32\000VQDMLSLslv4i16\000VQDML"
- "SLv2i64\000VQDMLSLv4i32\000VQDMULHslv2i32\000VQDMULHslv4i16\000VQDMULHs"
- "lv4i32\000VQDMULHslv8i16\000VQDMULHv2i32\000VQDMULHv4i16\000VQDMULHv4i3"
- "2\000VQDMULHv8i16\000VQDMULLslv2i32\000VQDMULLslv4i16\000VQDMULLv2i64\000"
- "VQDMULLv4i32\000VQMOVNsuv2i32\000VQMOVNsuv4i16\000VQMOVNsuv8i8\000VQMOV"
- "Nsv2i32\000VQMOVNsv4i16\000VQMOVNsv8i8\000VQMOVNuv2i32\000VQMOVNuv4i16\000"
- "VQMOVNuv8i8\000VQNEGv16i8\000VQNEGv2i32\000VQNEGv4i16\000VQNEGv4i32\000"
- "VQNEGv8i16\000VQNEGv8i8\000VQRDMULHslv2i32\000VQRDMULHslv4i16\000VQRDMU"
- "LHslv4i32\000VQRDMULHslv8i16\000VQRDMULHv2i32\000VQRDMULHv4i16\000VQRDM"
- "ULHv4i32\000VQRDMULHv8i16\000VQRSHLsv16i8\000VQRSHLsv1i64\000VQRSHLsv2i"
- "32\000VQRSHLsv2i64\000VQRSHLsv4i16\000VQRSHLsv4i32\000VQRSHLsv8i16\000V"
- "QRSHLsv8i8\000VQRSHLuv16i8\000VQRSHLuv1i64\000VQRSHLuv2i32\000VQRSHLuv2"
- "i64\000VQRSHLuv4i16\000VQRSHLuv4i32\000VQRSHLuv8i16\000VQRSHLuv8i8\000V"
- "QRSHRNsv2i32\000VQRSHRNsv4i16\000VQRSHRNsv8i8\000VQRSHRNuv2i32\000VQRSH"
- "RNuv4i16\000VQRSHRNuv8i8\000VQRSHRUNv2i32\000VQRSHRUNv4i16\000VQRSHRUNv"
- "8i8\000VQSHLsiv16i8\000VQSHLsiv1i64\000VQSHLsiv2i32\000VQSHLsiv2i64\000"
- "VQSHLsiv4i16\000VQSHLsiv4i32\000VQSHLsiv8i16\000VQSHLsiv8i8\000VQSHLsuv"
- "16i8\000VQSHLsuv1i64\000VQSHLsuv2i32\000VQSHLsuv2i64\000VQSHLsuv4i16\000"
- "VQSHLsuv4i32\000VQSHLsuv8i16\000VQSHLsuv8i8\000VQSHLsv16i8\000VQSHLsv1i"
- "64\000VQSHLsv2i32\000VQSHLsv2i64\000VQSHLsv4i16\000VQSHLsv4i32\000VQSHL"
- "sv8i16\000VQSHLsv8i8\000VQSHLuiv16i8\000VQSHLuiv1i64\000VQSHLuiv2i32\000"
- "VQSHLuiv2i64\000VQSHLuiv4i16\000VQSHLuiv4i32\000VQSHLuiv8i16\000VQSHLui"
- "v8i8\000VQSHLuv16i8\000VQSHLuv1i64\000VQSHLuv2i32\000VQSHLuv2i64\000VQS"
- "HLuv4i16\000VQSHLuv4i32\000VQSHLuv8i16\000VQSHLuv8i8\000VQSHRNsv2i32\000"
- "VQSHRNsv4i16\000VQSHRNsv8i8\000VQSHRNuv2i32\000VQSHRNuv4i16\000VQSHRNuv"
- "8i8\000VQSHRUNv2i32\000VQSHRUNv4i16\000VQSHRUNv8i8\000VQSUBsv16i8\000VQ"
- "SUBsv1i64\000VQSUBsv2i32\000VQSUBsv2i64\000VQSUBsv4i16\000VQSUBsv4i32\000"
- "VQSUBsv8i16\000VQSUBsv8i8\000VQSUBuv16i8\000VQSUBuv1i64\000VQSUBuv2i32\000"
- "VQSUBuv2i64\000VQSUBuv4i16\000VQSUBuv4i32\000VQSUBuv8i16\000VQSUBuv8i8\000"
- "VRADDHNv2i32\000VRADDHNv4i16\000VRADDHNv8i8\000VRECPEd\000VRECPEfd\000V"
- "RECPEfq\000VRECPEq\000VRECPSfd\000VRECPSfq\000VREV16d8\000VREV16q8\000V"
- "REV32d16\000VREV32d8\000VREV32q16\000VREV32q8\000VREV64d16\000VREV64d32"
- "\000VREV64d8\000VREV64df\000VREV64q16\000VREV64q32\000VREV64q8\000VREV6"
- "4qf\000VRHADDsv16i8\000VRHADDsv2i32\000VRHADDsv4i16\000VRHADDsv4i32\000"
- "VRHADDsv8i16\000VRHADDsv8i8\000VRHADDuv16i8\000VRHADDuv2i32\000VRHADDuv"
- "4i16\000VRHADDuv4i32\000VRHADDuv8i16\000VRHADDuv8i8\000VRSHLsv16i8\000V"
- "RSHLsv1i64\000VRSHLsv2i32\000VRSHLsv2i64\000VRSHLsv4i16\000VRSHLsv4i32\000"
- "VRSHLsv8i16\000VRSHLsv8i8\000VRSHLuv16i8\000VRSHLuv1i64\000VRSHLuv2i32\000"
- "VRSHLuv2i64\000VRSHLuv4i16\000VRSHLuv4i32\000VRSHLuv8i16\000VRSHLuv8i8\000"
- "VRSHRNv2i32\000VRSHRNv4i16\000VRSHRNv8i8\000VRSHRsv16i8\000VRSHRsv1i64\000"
- "VRSHRsv2i32\000VRSHRsv2i64\000VRSHRsv4i16\000VRSHRsv4i32\000VRSHRsv8i16"
- "\000VRSHRsv8i8\000VRSHRuv16i8\000VRSHRuv1i64\000VRSHRuv2i32\000VRSHRuv2"
- "i64\000VRSHRuv4i16\000VRSHRuv4i32\000VRSHRuv8i16\000VRSHRuv8i8\000VRSQR"
- "TEd\000VRSQRTEfd\000VRSQRTEfq\000VRSQRTEq\000VRSQRTSfd\000VRSQRTSfq\000"
- "VRSRAsv16i8\000VRSRAsv1i64\000VRSRAsv2i32\000VRSRAsv2i64\000VRSRAsv4i16"
- "\000VRSRAsv4i32\000VRSRAsv8i16\000VRSRAsv8i8\000VRSRAuv16i8\000VRSRAuv1"
- "i64\000VRSRAuv2i32\000VRSRAuv2i64\000VRSRAuv4i16\000VRSRAuv4i32\000VRSR"
- "Auv8i16\000VRSRAuv8i8\000VRSUBHNv2i32\000VRSUBHNv4i16\000VRSUBHNv8i8\000"
- "VSETLNi16\000VSETLNi32\000VSETLNi8\000VSHLLi16\000VSHLLi32\000VSHLLi8\000"
- "VSHLLsv2i64\000VSHLLsv4i32\000VSHLLsv8i16\000VSHLLuv2i64\000VSHLLuv4i32"
- "\000VSHLLuv8i16\000VSHLiv16i8\000VSHLiv1i64\000VSHLiv2i32\000VSHLiv2i64"
- "\000VSHLiv4i16\000VSHLiv4i32\000VSHLiv8i16\000VSHLiv8i8\000VSHLsv16i8\000"
- "VSHLsv1i64\000VSHLsv2i32\000VSHLsv2i64\000VSHLsv4i16\000VSHLsv4i32\000V"
- "SHLsv8i16\000VSHLsv8i8\000VSHLuv16i8\000VSHLuv1i64\000VSHLuv2i32\000VSH"
- "Luv2i64\000VSHLuv4i16\000VSHLuv4i32\000VSHLuv8i16\000VSHLuv8i8\000VSHRN"
- "v2i32\000VSHRNv4i16\000VSHRNv8i8\000VSHRsv16i8\000VSHRsv1i64\000VSHRsv2"
- "i32\000VSHRsv2i64\000VSHRsv4i16\000VSHRsv4i32\000VSHRsv8i16\000VSHRsv8i"
- "8\000VSHRuv16i8\000VSHRuv1i64\000VSHRuv2i32\000VSHRuv2i64\000VSHRuv4i16"
- "\000VSHRuv4i32\000VSHRuv8i16\000VSHRuv8i8\000VSHTOD\000VSHTOS\000VSITOD"
- "\000VSITOS\000VSLIv16i8\000VSLIv1i64\000VSLIv2i32\000VSLIv2i64\000VSLIv"
- "4i16\000VSLIv4i32\000VSLIv8i16\000VSLIv8i8\000VSLTOD\000VSLTOS\000VSQRT"
- "D\000VSQRTS\000VSRAsv16i8\000VSRAsv1i64\000VSRAsv2i32\000VSRAsv2i64\000"
- "VSRAsv4i16\000VSRAsv4i32\000VSRAsv8i16\000VSRAsv8i8\000VSRAuv16i8\000VS"
- "RAuv1i64\000VSRAuv2i32\000VSRAuv2i64\000VSRAuv4i16\000VSRAuv4i32\000VSR"
- "Auv8i16\000VSRAuv8i8\000VSRIv16i8\000VSRIv1i64\000VSRIv2i32\000VSRIv2i6"
- "4\000VSRIv4i16\000VSRIv4i32\000VSRIv8i16\000VSRIv8i8\000VST1d16\000VST1"
- "d16Q\000VST1d16T\000VST1d32\000VST1d32Q\000VST1d32T\000VST1d64\000VST1d"
- "8\000VST1d8Q\000VST1d8T\000VST1df\000VST1q16\000VST1q32\000VST1q64\000V"
- "ST1q8\000VST1qf\000VST2LNd16\000VST2LNd32\000VST2LNd8\000VST2LNq16a\000"
- "VST2LNq16b\000VST2LNq32a\000VST2LNq32b\000VST2d16\000VST2d16D\000VST2d3"
- "2\000VST2d32D\000VST2d64\000VST2d8\000VST2d8D\000VST2q16\000VST2q32\000"
- "VST2q8\000VST3LNd16\000VST3LNd32\000VST3LNd8\000VST3LNq16a\000VST3LNq16"
- "b\000VST3LNq32a\000VST3LNq32b\000VST3d16\000VST3d32\000VST3d64\000VST3d"
- "8\000VST3q16a\000VST3q16b\000VST3q32a\000VST3q32b\000VST3q8a\000VST3q8b"
- "\000VST4LNd16\000VST4LNd32\000VST4LNd8\000VST4LNq16a\000VST4LNq16b\000V"
- "ST4LNq32a\000VST4LNq32b\000VST4d16\000VST4d32\000VST4d64\000VST4d8\000V"
- "ST4q16a\000VST4q16b\000VST4q32a\000VST4q32b\000VST4q8a\000VST4q8b\000VS"
- "TMD\000VSTMS\000VSTRD\000VSTRQ\000VSTRS\000VSUBD\000VSUBHNv2i32\000VSUB"
- "HNv4i16\000VSUBHNv8i8\000VSUBLsv2i64\000VSUBLsv4i32\000VSUBLsv8i16\000V"
- "SUBLuv2i64\000VSUBLuv4i32\000VSUBLuv8i16\000VSUBS\000VSUBWsv2i64\000VSU"
- "BWsv4i32\000VSUBWsv8i16\000VSUBWuv2i64\000VSUBWuv4i32\000VSUBWuv8i16\000"
- "VSUBfd\000VSUBfd_sfp\000VSUBfq\000VSUBv16i8\000VSUBv1i64\000VSUBv2i32\000"
- "VSUBv2i64\000VSUBv4i16\000VSUBv4i32\000VSUBv8i16\000VSUBv8i8\000VSWPd\000"
- "VSWPq\000VTBL1\000VTBL2\000VTBL3\000VTBL4\000VTBX1\000VTBX2\000VTBX3\000"
- "VTBX4\000VTOSHD\000VTOSHS\000VTOSIRD\000VTOSIRS\000VTOSIZD\000VTOSIZS\000"
- "VTOSLD\000VTOSLS\000VTOUHD\000VTOUHS\000VTOUIRD\000VTOUIRS\000VTOUIZD\000"
- "VTOUIZS\000VTOULD\000VTOULS\000VTRNd16\000VTRNd32\000VTRNd8\000VTRNq16\000"
- "VTRNq32\000VTRNq8\000VTSTv16i8\000VTSTv2i32\000VTSTv4i16\000VTSTv4i32\000"
- "VTSTv8i16\000VTSTv8i8\000VUHTOD\000VUHTOS\000VUITOD\000VUITOS\000VULTOD"
- "\000VULTOS\000VUZPd16\000VUZPd32\000VUZPd8\000VUZPq16\000VUZPq32\000VUZ"
- "Pq8\000VZIPd16\000VZIPd32\000VZIPd8\000VZIPq16\000VZIPq32\000VZIPq8\000"
- "WFE\000WFI\000YIELD\000t2ADCSri\000t2ADCSrr\000t2ADCSrs\000t2ADCri\000t"
- "2ADCrr\000t2ADCrs\000t2ADDSri\000t2ADDSrr\000t2ADDSrs\000t2ADDrSPi\000t"
- "2ADDrSPi12\000t2ADDrSPs\000t2ADDri\000t2ADDri12\000t2ADDrr\000t2ADDrs\000"
- "t2ANDri\000t2ANDrr\000t2ANDrs\000t2ASRri\000t2ASRrr\000t2B\000t2BFC\000"
- "t2BFI\000t2BICri\000t2BICrr\000t2BICrs\000t2BR_JT\000t2BXJ\000t2Bcc\000"
- "t2CLREX\000t2CLZ\000t2CMNzri\000t2CMNzrr\000t2CMNzrs\000t2CMPri\000t2CM"
- "Prr\000t2CMPrs\000t2CMPzri\000t2CMPzrr\000t2CMPzrs\000t2CPS\000t2DBG\000"
- "t2DMBish\000t2DMBishst\000t2DMBnsh\000t2DMBnshst\000t2DMBosh\000t2DMBos"
- "hst\000t2DMBst\000t2DSBish\000t2DSBishst\000t2DSBnsh\000t2DSBnshst\000t"
- "2DSBosh\000t2DSBoshst\000t2DSBst\000t2EORri\000t2EORrr\000t2EORrs\000t2"
- "ISBsy\000t2IT\000t2Int_MemBarrierV7\000t2Int_SyncBarrierV7\000t2Int_eh_"
- "sjlj_setjmp\000t2LDM\000t2LDM_RET\000t2LDRBT\000t2LDRB_POST\000t2LDRB_P"
- "RE\000t2LDRBi12\000t2LDRBi8\000t2LDRBpci\000t2LDRBs\000t2LDRDi8\000t2LD"
- "RDpci\000t2LDREX\000t2LDREXB\000t2LDREXD\000t2LDREXH\000t2LDRHT\000t2LD"
- "RH_POST\000t2LDRH_PRE\000t2LDRHi12\000t2LDRHi8\000t2LDRHpci\000t2LDRHs\000"
- "t2LDRSBT\000t2LDRSB_POST\000t2LDRSB_PRE\000t2LDRSBi12\000t2LDRSBi8\000t"
- "2LDRSBpci\000t2LDRSBs\000t2LDRSHT\000t2LDRSH_POST\000t2LDRSH_PRE\000t2L"
- "DRSHi12\000t2LDRSHi8\000t2LDRSHpci\000t2LDRSHs\000t2LDRT\000t2LDR_POST\000"
- "t2LDR_PRE\000t2LDRi12\000t2LDRi8\000t2LDRpci\000t2LDRpci_pic\000t2LDRs\000"
- "t2LEApcrel\000t2LEApcrelJT\000t2LSLri\000t2LSLrr\000t2LSRri\000t2LSRrr\000"
- "t2MLA\000t2MLS\000t2MOVCCasr\000t2MOVCCi\000t2MOVCClsl\000t2MOVCClsr\000"
- "t2MOVCCr\000t2MOVCCror\000t2MOVTi16\000t2MOVi\000t2MOVi16\000t2MOVi32im"
- "m\000t2MOVr\000t2MOVrx\000t2MOVsra_flag\000t2MOVsrl_flag\000t2MRS\000t2"
- "MRSsys\000t2MSR\000t2MSRsys\000t2MUL\000t2MVNi\000t2MVNr\000t2MVNs\000t"
- "2NOP\000t2ORNri\000t2ORNrr\000t2ORNrs\000t2ORRri\000t2ORRrr\000t2ORRrs\000"
- "t2PKHBT\000t2PKHTB\000t2PLDWi12\000t2PLDWi8\000t2PLDWpci\000t2PLDWr\000"
- "t2PLDWs\000t2PLDi12\000t2PLDi8\000t2PLDpci\000t2PLDr\000t2PLDs\000t2PLI"
- "i12\000t2PLIi8\000t2PLIpci\000t2PLIr\000t2PLIs\000t2QADD\000t2QADD16\000"
- "t2QADD8\000t2QASX\000t2QDADD\000t2QDSUB\000t2QSAX\000t2QSUB\000t2QSUB16"
- "\000t2QSUB8\000t2RBIT\000t2REV\000t2REV16\000t2REVSH\000t2RFEDB\000t2RF"
- "EDBW\000t2RFEIA\000t2RFEIAW\000t2RORri\000t2RORrr\000t2RSBSri\000t2RSBS"
- "rs\000t2RSBri\000t2RSBrs\000t2SADD16\000t2SADD8\000t2SASX\000t2SBCSri\000"
- "t2SBCSrr\000t2SBCSrs\000t2SBCri\000t2SBCrr\000t2SBCrs\000t2SBFX\000t2SD"
- "IV\000t2SEL\000t2SEV\000t2SHADD16\000t2SHADD8\000t2SHASX\000t2SHSAX\000"
- "t2SHSUB16\000t2SHSUB8\000t2SMC\000t2SMLABB\000t2SMLABT\000t2SMLAD\000t2"
- "SMLADX\000t2SMLAL\000t2SMLALBB\000t2SMLALBT\000t2SMLALD\000t2SMLALDX\000"
- "t2SMLALTB\000t2SMLALTT\000t2SMLATB\000t2SMLATT\000t2SMLAWB\000t2SMLAWT\000"
- "t2SMLSD\000t2SMLSDX\000t2SMLSLD\000t2SMLSLDX\000t2SMMLA\000t2SMMLAR\000"
- "t2SMMLS\000t2SMMLSR\000t2SMMUL\000t2SMMULR\000t2SMUAD\000t2SMUADX\000t2"
- "SMULBB\000t2SMULBT\000t2SMULL\000t2SMULTB\000t2SMULTT\000t2SMULWB\000t2"
- "SMULWT\000t2SMUSD\000t2SMUSDX\000t2SRSDB\000t2SRSDBW\000t2SRSIA\000t2SR"
- "SIAW\000t2SSAT16\000t2SSATasr\000t2SSATlsl\000t2SSAX\000t2SSUB16\000t2S"
- "SUB8\000t2STM\000t2STRBT\000t2STRB_POST\000t2STRB_PRE\000t2STRBi12\000t"
- "2STRBi8\000t2STRBs\000t2STRDi8\000t2STREX\000t2STREXB\000t2STREXD\000t2"
- "STREXH\000t2STRHT\000t2STRH_POST\000t2STRH_PRE\000t2STRHi12\000t2STRHi8"
- "\000t2STRHs\000t2STRT\000t2STR_POST\000t2STR_PRE\000t2STRi12\000t2STRi8"
- "\000t2STRs\000t2SUBSri\000t2SUBSrr\000t2SUBSrs\000t2SUBrSPi\000t2SUBrSP"
- "i12\000t2SUBrSPi12_\000t2SUBrSPi_\000t2SUBrSPs\000t2SUBrSPs_\000t2SUBri"
- "\000t2SUBri12\000t2SUBrr\000t2SUBrs\000t2SXTAB16rr\000t2SXTAB16rr_rot\000"
- "t2SXTABrr\000t2SXTABrr_rot\000t2SXTAHrr\000t2SXTAHrr_rot\000t2SXTB16r\000"
- "t2SXTB16r_rot\000t2SXTBr\000t2SXTBr_rot\000t2SXTHr\000t2SXTHr_rot\000t2"
- "TBB\000t2TBBgen\000t2TBH\000t2TBHgen\000t2TEQri\000t2TEQrr\000t2TEQrs\000"
- "t2TPsoft\000t2TSTri\000t2TSTrr\000t2TSTrs\000t2UADD16\000t2UADD8\000t2U"
- "ASX\000t2UBFX\000t2UDIV\000t2UHADD16\000t2UHADD8\000t2UHASX\000t2UHSAX\000"
- "t2UHSUB16\000t2UHSUB8\000t2UMAAL\000t2UMLAL\000t2UMULL\000t2UQADD16\000"
- "t2UQADD8\000t2UQASX\000t2UQSAX\000t2UQSUB16\000t2UQSUB8\000t2USAD8\000t"
- "2USADA8\000t2USAT16\000t2USATasr\000t2USATlsl\000t2USAX\000t2USUB16\000"
- "t2USUB8\000t2UXTAB16rr\000t2UXTAB16rr_rot\000t2UXTABrr\000t2UXTABrr_rot"
- "\000t2UXTAHrr\000t2UXTAHrr_rot\000t2UXTB16r\000t2UXTB16r_rot\000t2UXTBr"
- "\000t2UXTBr_rot\000t2UXTHr\000t2UXTHr_rot\000t2WFE\000t2WFI\000t2YIELD\000"
- "tADC\000tADDhirr\000tADDi3\000tADDi8\000tADDrPCi\000tADDrSP\000tADDrSPi"
- "\000tADDrr\000tADDspi\000tADDspr\000tADDspr_\000tADJCALLSTACKDOWN\000tA"
- "DJCALLSTACKUP\000tAND\000tANDsp\000tASRri\000tASRrr\000tB\000tBIC\000tB"
- "KPT\000tBL\000tBLXi\000tBLXi_r9\000tBLXr\000tBLXr_r9\000tBLr9\000tBRIND"
- "\000tBR_JTr\000tBX\000tBX_RET\000tBX_RET_vararg\000tBXr9\000tBcc\000tBf"
- "ar\000tCBNZ\000tCBZ\000tCMNz\000tCMPhir\000tCMPi8\000tCMPr\000tCMPzhir\000"
- "tCMPzi8\000tCMPzr\000tCPS\000tEOR\000tInt_eh_sjlj_setjmp\000tLDM\000tLD"
- "R\000tLDRB\000tLDRBi\000tLDRH\000tLDRHi\000tLDRSB\000tLDRSH\000tLDRcp\000"
- "tLDRi\000tLDRpci\000tLDRpci_pic\000tLDRspi\000tLEApcrel\000tLEApcrelJT\000"
- "tLSLri\000tLSLrr\000tLSRri\000tLSRrr\000tMOVCCi\000tMOVCCr\000tMOVCCr_p"
- "seudo\000tMOVSr\000tMOVgpr2gpr\000tMOVgpr2tgpr\000tMOVi8\000tMOVr\000tM"
- "OVtgpr2gpr\000tMUL\000tMVN\000tNOP\000tORR\000tPICADD\000tPOP\000tPOP_R"
- "ET\000tPUSH\000tREV\000tREV16\000tREVSH\000tROR\000tRSB\000tRestore\000"
- "tSBC\000tSETENDBE\000tSETENDLE\000tSEV\000tSTM\000tSTR\000tSTRB\000tSTR"
- "Bi\000tSTRH\000tSTRHi\000tSTRi\000tSTRspi\000tSUBi3\000tSUBi8\000tSUBrr"
- "\000tSUBspi\000tSUBspi_\000tSVC\000tSXTB\000tSXTH\000tSpill\000tTPsoft\000"
- "tTRAP\000tTST\000tUXTB\000tUXTH\000tWFE\000tWFI\000tYIELD\000";
- return Strs+InstAsmOffset[Opcode];
-}
-
-#endif
diff --git a/libclamav/c++/ARMGenCallingConv.inc b/libclamav/c++/ARMGenCallingConv.inc
deleted file mode 100644
index 0845027..0000000
--- a/libclamav/c++/ARMGenCallingConv.inc
+++ /dev/null
@@ -1,470 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// Calling Convention Implementation Fragment
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-static bool CC_ARM_AAPCS(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
-static bool CC_ARM_AAPCS_Common(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
-static bool CC_ARM_AAPCS_VFP(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
-static bool CC_ARM_APCS(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
-static bool RetCC_ARM_AAPCS(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
-static bool RetCC_ARM_AAPCS_Common(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
-static bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
-static bool RetCC_ARM_APCS(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State);
-
-
-static bool CC_ARM_AAPCS(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
-
- if (LocVT == MVT::v1i64 ||
- LocVT == MVT::v2i32 ||
- LocVT == MVT::v4i16 ||
- LocVT == MVT::v8i8 ||
- LocVT == MVT::v2f32) {
- LocVT = MVT::f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::v2i64 ||
- LocVT == MVT::v4i32 ||
- LocVT == MVT::v8i16 ||
- LocVT == MVT::v16i8 ||
- LocVT == MVT::v4f32) {
- LocVT = MVT::v2f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::f64 ||
- LocVT == MVT::v2f64) {
- if (CC_ARM_AAPCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
- return false;
- }
-
- if (LocVT == MVT::f32) {
- LocVT = MVT::i32;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (!CC_ARM_AAPCS_Common(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
- return false;
-
- return true; // CC didn't match.
-}
-
-
-static bool CC_ARM_AAPCS_Common(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
-
- if (LocVT == MVT::i8 ||
- LocVT == MVT::i16) {
- LocVT = MVT::i32;
- if (ArgFlags.isSExt())
- LocInfo = CCValAssign::SExt;
- else if (ArgFlags.isZExt())
- LocInfo = CCValAssign::ZExt;
- else
- LocInfo = CCValAssign::AExt;
- }
-
- if (LocVT == MVT::i32) {
- if (ArgFlags.getOrigAlign() == 8) {
- static const unsigned RegList1[] = {
- ARM::R0, ARM::R2
- };
- static const unsigned RegList2[] = {
- ARM::R0, ARM::R1
- };
- if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
- }
-
- if (LocVT == MVT::i32) {
- if (State.getNextStackOffset() == 0 &&ArgFlags.getOrigAlign() != 8) {
- static const unsigned RegList3[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3
- };
- if (unsigned Reg = State.AllocateReg(RegList3, 4)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
- }
-
- if (LocVT == MVT::i32) {
- if (ArgFlags.getOrigAlign() == 8) {
- unsigned Offset4 = State.AllocateStack(4, 8);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::i32 ||
- LocVT == MVT::f32) {
- unsigned Offset5 = State.AllocateStack(4, 4);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
- return false;
- }
-
- if (LocVT == MVT::f64) {
- unsigned Offset6 = State.AllocateStack(8, 8);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset6, LocVT, LocInfo));
- return false;
- }
-
- if (LocVT == MVT::v2f64) {
- unsigned Offset7 = State.AllocateStack(16, 8);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset7, LocVT, LocInfo));
- return false;
- }
-
- return true; // CC didn't match.
-}
-
-
-static bool CC_ARM_AAPCS_VFP(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
-
- if (LocVT == MVT::v1i64 ||
- LocVT == MVT::v2i32 ||
- LocVT == MVT::v4i16 ||
- LocVT == MVT::v8i8 ||
- LocVT == MVT::v2f32) {
- LocVT = MVT::f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::v2i64 ||
- LocVT == MVT::v4i32 ||
- LocVT == MVT::v8i16 ||
- LocVT == MVT::v16i8 ||
- LocVT == MVT::v4f32) {
- LocVT = MVT::v2f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::v2f64) {
- static const unsigned RegList1[] = {
- ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3
- };
- if (unsigned Reg = State.AllocateReg(RegList1, 4)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::f64) {
- static const unsigned RegList2[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7
- };
- if (unsigned Reg = State.AllocateReg(RegList2, 8)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::f32) {
- static const unsigned RegList3[] = {
- ARM::S0, ARM::S1, ARM::S2, ARM::S3, ARM::S4, ARM::S5, ARM::S6, ARM::S7, ARM::S8, ARM::S9, ARM::S10, ARM::S11, ARM::S12, ARM::S13, ARM::S14, ARM::S15
- };
- if (unsigned Reg = State.AllocateReg(RegList3, 16)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- if (!CC_ARM_AAPCS_Common(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
- return false;
-
- return true; // CC didn't match.
-}
-
-
-static bool CC_ARM_APCS(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
-
- if (LocVT == MVT::i8 ||
- LocVT == MVT::i16) {
- LocVT = MVT::i32;
- if (ArgFlags.isSExt())
- LocInfo = CCValAssign::SExt;
- else if (ArgFlags.isZExt())
- LocInfo = CCValAssign::ZExt;
- else
- LocInfo = CCValAssign::AExt;
- }
-
- if (LocVT == MVT::v1i64 ||
- LocVT == MVT::v2i32 ||
- LocVT == MVT::v4i16 ||
- LocVT == MVT::v8i8 ||
- LocVT == MVT::v2f32) {
- LocVT = MVT::f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::v2i64 ||
- LocVT == MVT::v4i32 ||
- LocVT == MVT::v8i16 ||
- LocVT == MVT::v16i8 ||
- LocVT == MVT::v4f32) {
- LocVT = MVT::v2f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::f64 ||
- LocVT == MVT::v2f64) {
- if (CC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
- return false;
- }
-
- if (LocVT == MVT::f32) {
- LocVT = MVT::i32;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::i32) {
- static const unsigned RegList1[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3
- };
- if (unsigned Reg = State.AllocateReg(RegList1, 4)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::i32) {
- unsigned Offset2 = State.AllocateStack(4, 4);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo));
- return false;
- }
-
- if (LocVT == MVT::f64) {
- unsigned Offset3 = State.AllocateStack(8, 4);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset3, LocVT, LocInfo));
- return false;
- }
-
- if (LocVT == MVT::v2f64) {
- unsigned Offset4 = State.AllocateStack(16, 4);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
- return false;
- }
-
- return true; // CC didn't match.
-}
-
-
-static bool RetCC_ARM_AAPCS(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
-
- if (LocVT == MVT::v1i64 ||
- LocVT == MVT::v2i32 ||
- LocVT == MVT::v4i16 ||
- LocVT == MVT::v8i8 ||
- LocVT == MVT::v2f32) {
- LocVT = MVT::f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::v2i64 ||
- LocVT == MVT::v4i32 ||
- LocVT == MVT::v8i16 ||
- LocVT == MVT::v16i8 ||
- LocVT == MVT::v4f32) {
- LocVT = MVT::v2f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::f64 ||
- LocVT == MVT::v2f64) {
- if (RetCC_ARM_AAPCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
- return false;
- }
-
- if (LocVT == MVT::f32) {
- LocVT = MVT::i32;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (!RetCC_ARM_AAPCS_Common(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
- return false;
-
- return true; // CC didn't match.
-}
-
-
-static bool RetCC_ARM_AAPCS_Common(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
-
- if (LocVT == MVT::i32) {
- static const unsigned RegList1[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3
- };
- if (unsigned Reg = State.AllocateReg(RegList1, 4)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::i64) {
- static const unsigned RegList2[] = {
- ARM::R0, ARM::R2
- };
- static const unsigned RegList3[] = {
- ARM::R1, ARM::R3
- };
- if (unsigned Reg = State.AllocateReg(RegList2, RegList3, 2)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- return true; // CC didn't match.
-}
-
-
-static bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
-
- if (LocVT == MVT::v1i64 ||
- LocVT == MVT::v2i32 ||
- LocVT == MVT::v4i16 ||
- LocVT == MVT::v8i8 ||
- LocVT == MVT::v2f32) {
- LocVT = MVT::f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::v2i64 ||
- LocVT == MVT::v4i32 ||
- LocVT == MVT::v8i16 ||
- LocVT == MVT::v16i8 ||
- LocVT == MVT::v4f32) {
- LocVT = MVT::v2f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::v2f64) {
- static const unsigned RegList1[] = {
- ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3
- };
- if (unsigned Reg = State.AllocateReg(RegList1, 4)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::f64) {
- static const unsigned RegList2[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7
- };
- if (unsigned Reg = State.AllocateReg(RegList2, 8)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::f32) {
- static const unsigned RegList3[] = {
- ARM::S0, ARM::S1, ARM::S2, ARM::S3, ARM::S4, ARM::S5, ARM::S6, ARM::S7, ARM::S8, ARM::S9, ARM::S10, ARM::S11, ARM::S12, ARM::S13, ARM::S14, ARM::S15
- };
- if (unsigned Reg = State.AllocateReg(RegList3, 16)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- if (!RetCC_ARM_AAPCS_Common(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
- return false;
-
- return true; // CC didn't match.
-}
-
-
-static bool RetCC_ARM_APCS(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- ISD::ArgFlagsTy ArgFlags, CCState &State) {
-
- if (LocVT == MVT::f32) {
- LocVT = MVT::i32;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::v1i64 ||
- LocVT == MVT::v2i32 ||
- LocVT == MVT::v4i16 ||
- LocVT == MVT::v8i8 ||
- LocVT == MVT::v2f32) {
- LocVT = MVT::f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::v2i64 ||
- LocVT == MVT::v4i32 ||
- LocVT == MVT::v8i16 ||
- LocVT == MVT::v16i8 ||
- LocVT == MVT::v4f32) {
- LocVT = MVT::v2f64;
- LocInfo = CCValAssign::BCvt;
- }
-
- if (LocVT == MVT::f64 ||
- LocVT == MVT::v2f64) {
- if (RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
- return false;
- }
-
- if (LocVT == MVT::i32) {
- static const unsigned RegList1[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3
- };
- if (unsigned Reg = State.AllocateReg(RegList1, 4)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- if (LocVT == MVT::i64) {
- static const unsigned RegList2[] = {
- ARM::R0, ARM::R2
- };
- static const unsigned RegList3[] = {
- ARM::R1, ARM::R3
- };
- if (unsigned Reg = State.AllocateReg(RegList2, RegList3, 2)) {
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- return false;
- }
- }
-
- return true; // CC didn't match.
-}
diff --git a/libclamav/c++/ARMGenCodeEmitter.inc b/libclamav/c++/ARMGenCodeEmitter.inc
deleted file mode 100644
index 8ce1986..0000000
--- a/libclamav/c++/ARMGenCodeEmitter.inc
+++ /dev/null
@@ -1,3914 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// Machine Code Emitter
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-unsigned ARMCodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) {
- static const unsigned InstBits[] = {
- 0U,
- 0U,
- 0U,
- 0U,
- 0U,
- 0U,
- 0U,
- 0U,
- 0U,
- 0U,
- 0U,
- 0U,
- 45088768U, // ADCSSri
- 11534336U, // ADCSSrr
- 11534336U, // ADCSSrs
- 44040192U, // ADCri
- 10485760U, // ADCrr
- 10485760U, // ADCrs
- 42991616U, // ADDSri
- 9437184U, // ADDSrr
- 9437184U, // ADDSrs
- 41943040U, // ADDri
- 8388608U, // ADDrr
- 8388608U, // ADDrs
- 0U, // ADJCALLSTACKDOWN
- 0U, // ADJCALLSTACKUP
- 33554432U, // ANDri
- 0U, // ANDrr
- 0U, // ANDrs
- 0U, // ATOMIC_CMP_SWAP_I16
- 0U, // ATOMIC_CMP_SWAP_I32
- 0U, // ATOMIC_CMP_SWAP_I8
- 0U, // ATOMIC_LOAD_ADD_I16
- 0U, // ATOMIC_LOAD_ADD_I32
- 0U, // ATOMIC_LOAD_ADD_I8
- 0U, // ATOMIC_LOAD_AND_I16
- 0U, // ATOMIC_LOAD_AND_I32
- 0U, // ATOMIC_LOAD_AND_I8
- 0U, // ATOMIC_LOAD_NAND_I16
- 0U, // ATOMIC_LOAD_NAND_I32
- 0U, // ATOMIC_LOAD_NAND_I8
- 0U, // ATOMIC_LOAD_OR_I16
- 0U, // ATOMIC_LOAD_OR_I32
- 0U, // ATOMIC_LOAD_OR_I8
- 0U, // ATOMIC_LOAD_SUB_I16
- 0U, // ATOMIC_LOAD_SUB_I32
- 0U, // ATOMIC_LOAD_SUB_I8
- 0U, // ATOMIC_LOAD_XOR_I16
- 0U, // ATOMIC_LOAD_XOR_I32
- 0U, // ATOMIC_LOAD_XOR_I8
- 0U, // ATOMIC_SWAP_I16
- 0U, // ATOMIC_SWAP_I32
- 0U, // ATOMIC_SWAP_I8
- 167772160U, // B
- 130023455U, // BFC
- 130023440U, // BFI
- 62914560U, // BICri
- 29360128U, // BICrr
- 29360128U, // BICrs
- 18874480U, // BKPT
- 3942645760U, // BL
- 19922736U, // BLX
- 19922736U, // BLXr9
- 184549376U, // BL_pred
- 3942645760U, // BLr9
- 184549376U, // BLr9_pred
- 27324416U, // BMOVPCRX
- 27324416U, // BMOVPCRXr9
- 3778019088U, // BRIND
- 8450048U, // BR_JTadd
- 118550528U, // BR_JTm
- 27324416U, // BR_JTr
- 19922704U, // BX
- 18874400U, // BXJ
- 19922718U, // BX_RET
- 19922704U, // BXr9
- 167772160U, // Bcc
- 234881024U, // CDP
- 4261412864U, // CDP2
- 4117757968U, // CLREX
- 24055568U, // CLZ
- 57671680U, // CMNzri
- 24117248U, // CMNzrr
- 24117248U, // CMNzrs
- 55574528U, // CMPri
- 22020096U, // CMPrr
- 22020096U, // CMPrs
- 55574528U, // CMPzri
- 22020096U, // CMPzrr
- 22020096U, // CMPzrs
- 0U, // CONSTPOOL_ENTRY
- 4043309056U, // CPS
- 52429040U, // DBG
- 4117758043U, // DMBish
- 4117758042U, // DMBishst
- 4117758039U, // DMBnsh
- 4117758038U, // DMBnshst
- 4117758035U, // DMBosh
- 4117758034U, // DMBoshst
- 4117758046U, // DMBst
- 4117758027U, // DSBish
- 4117758026U, // DSBishst
- 4117758023U, // DSBnsh
- 4117758022U, // DSBnshst
- 4117758019U, // DSBosh
- 4117758018U, // DSBoshst
- 4117758030U, // DSBst
- 35651584U, // EORri
- 2097152U, // EORrr
- 2097152U, // EORrs
- 246418176U, // FCONSTD
- 246417920U, // FCONSTS
- 250739216U, // FMSTAT
- 4117758063U, // ISBsy
- 0U, // Int_MemBarrierV6
- 4118802527U, // Int_MemBarrierV7
- 0U, // Int_SyncBarrierV6
- 4118802511U, // Int_SyncBarrierV7
- 0U, // Int_eh_sjlj_setjmp
- 4249878528U, // LDC2L_OFFSET
- 4241489920U, // LDC2L_OPTION
- 4235198464U, // LDC2L_POST
- 4251975680U, // LDC2L_PRE
- 4245684224U, // LDC2_OFFSET
- 4237295616U, // LDC2_OPTION
- 4231004160U, // LDC2_POST
- 4247781376U, // LDC2_PRE
- 223346688U, // LDCL_OFFSET
- 214958080U, // LDCL_OPTION
- 208666624U, // LDCL_POST
- 225443840U, // LDCL_PRE
- 219152384U, // LDC_OFFSET
- 210763776U, // LDC_OPTION
- 204472320U, // LDC_POST
- 221249536U, // LDC_PRE
- 135266304U, // LDM
- 135266304U, // LDM_RET
- 84934656U, // LDR
- 89128960U, // LDRB
- 74448896U, // LDRBT
- 72351744U, // LDRB_POST
- 91226112U, // LDRB_PRE
- 16777424U, // LDRD
- 208U, // LDRD_POST
- 18874576U, // LDRD_PRE
- 26218399U, // LDREX
- 30412703U, // LDREXB
- 28315551U, // LDREXD
- 32509855U, // LDREXH
- 17825968U, // LDRH
- 3145904U, // LDRHT
- 1048752U, // LDRH_POST
- 19923120U, // LDRH_PRE
- 17826000U, // LDRSB
- 3145936U, // LDRSBT
- 1048784U, // LDRSB_POST
- 19923152U, // LDRSB_PRE
- 17826032U, // LDRSH
- 3145968U, // LDRSHT
- 1048816U, // LDRSH_POST
- 19923184U, // LDRSH_PRE
- 70254592U, // LDRT
- 68157440U, // LDR_POST
- 87031808U, // LDR_PRE
- 84934656U, // LDRcp
- 0U, // LEApcrel
- 33554432U, // LEApcrelJT
- 234881040U, // MCR
- 4261412880U, // MCR2
- 205520896U, // MCRR
- 4232052736U, // MCRR2
- 2097296U, // MLA
- 6291600U, // MLS
- 60817408U, // MOVCCi
- 27262976U, // MOVCCr
- 27262976U, // MOVCCs
- 27324430U, // MOVPCLR
- 3785420800U, // MOVPCRX
- 54525952U, // MOVTi16
- 60817408U, // MOVi
- 50331648U, // MOVi16
- 0U, // MOVi2pieces
- 0U, // MOVi32imm
- 27262976U, // MOVr
- 27262976U, // MOVrx
- 27262976U, // MOVs
- 27262976U, // MOVsra_flag
- 27262976U, // MOVsrl_flag
- 235929616U, // MRC
- 4262461456U, // MRC2
- 206569472U, // MRRC
- 4233101312U, // MRRC2
- 16777216U, // MRS
- 20971520U, // MRSsys
- 18874368U, // MSR
- 52428800U, // MSRi
- 23068672U, // MSRsys
- 56623104U, // MSRsysi
- 144U, // MUL
- 65011712U, // MVNi
- 31457280U, // MVNr
- 31457280U, // MVNs
- 52428800U, // NOP
- 58720256U, // ORRri
- 25165824U, // ORRrr
- 25165824U, // ORRrs
- 8388608U, // PICADD
- 84934656U, // PICLDR
- 89128960U, // PICLDRB
- 17825968U, // PICLDRH
- 17826000U, // PICLDRSB
- 17826032U, // PICLDRSH
- 83886080U, // PICSTR
- 88080384U, // PICSTRB
- 16777392U, // PICSTRH
- 109051920U, // PKHBT
- 109051984U, // PKHTB
- 4111466496U, // PLDWi
- 4145020928U, // PLDWr
- 4115660800U, // PLDi
- 4149215232U, // PLDr
- 4098883584U, // PLIi
- 4132438016U, // PLIr
- 16777296U, // QADD
- 102760464U, // QADD16
- 102760592U, // QADD8
- 102760496U, // QASX
- 20971600U, // QDADD
- 23068752U, // QDSUB
- 102760528U, // QSAX
- 18874448U, // QSUB
- 102760560U, // QSUB16
- 102760688U, // QSUB8
- 117378864U, // RBIT
- 113184560U, // REV
- 113184688U, // REV16
- 117378992U, // REVSH
- 4161798144U, // RFE
- 4163895296U, // RFEW
- 40894464U, // RSBSri
- 7340032U, // RSBSrs
- 39845888U, // RSBri
- 6291456U, // RSBrs
- 49283072U, // RSCSri
- 15728640U, // RSCSrs
- 48234496U, // RSCri
- 14680064U, // RSCrs
- 101711888U, // SADD16
- 101712016U, // SADD8
- 101711920U, // SASX
- 47185920U, // SBCSSri
- 13631488U, // SBCSSrr
- 13631488U, // SBCSSrs
- 46137344U, // SBCri
- 12582912U, // SBCrr
- 12582912U, // SBCrs
- 127926352U, // SBFX
- 109052080U, // SEL
- 4043375104U, // SETENDBE
- 4043374592U, // SETENDLE
- 52428804U, // SEV
- 103809040U, // SHADD16
- 103809168U, // SHADD8
- 103809072U, // SHASX
- 103809104U, // SHSAX
- 103809136U, // SHSUB16
- 103809264U, // SHSUB8
- 23068784U, // SMC
- 16777344U, // SMLABB
- 16777408U, // SMLABT
- 117440528U, // SMLAD
- 117440560U, // SMLADX
- 14680208U, // SMLAL
- 20971648U, // SMLALBB
- 20971712U, // SMLALBT
- 121634832U, // SMLALD
- 121634864U, // SMLALDX
- 20971680U, // SMLALTB
- 20971744U, // SMLALTT
- 16777376U, // SMLATB
- 16777440U, // SMLATT
- 18874496U, // SMLAWB
- 18874560U, // SMLAWT
- 117440592U, // SMLSD
- 117440624U, // SMLSDX
- 121634896U, // SMLSLD
- 121634928U, // SMLSLDX
- 122683408U, // SMMLA
- 122683440U, // SMMLAR
- 122683600U, // SMMLS
- 122683632U, // SMMLSR
- 122744848U, // SMMUL
- 122744880U, // SMMULR
- 117501968U, // SMUAD
- 117502000U, // SMUADX
- 23068800U, // SMULBB
- 23068864U, // SMULBT
- 12583056U, // SMULL
- 23068832U, // SMULTB
- 23068896U, // SMULTT
- 18874528U, // SMULWB
- 18874592U, // SMULWT
- 117502032U, // SMUSD
- 117502064U, // SMUSDX
- 4164943872U, // SRS
- 4167041024U, // SRSW
- 111149104U, // SSAT16
- 111149136U, // SSATasr
- 111149072U, // SSATlsl
- 101711952U, // SSAX
- 101711984U, // SSUB16
- 101712112U, // SSUB8
- 4248829952U, // STC2L_OFFSET
- 4240441344U, // STC2L_OPTION
- 4234149888U, // STC2L_POST
- 4250927104U, // STC2L_PRE
- 4244635648U, // STC2_OFFSET
- 4236247040U, // STC2_OPTION
- 4229955584U, // STC2_POST
- 4246732800U, // STC2_PRE
- 222298112U, // STCL_OFFSET
- 213909504U, // STCL_OPTION
- 207618048U, // STCL_POST
- 224395264U, // STCL_PRE
- 218103808U, // STC_OFFSET
- 209715200U, // STC_OPTION
- 203423744U, // STC_POST
- 220200960U, // STC_PRE
- 134217728U, // STM
- 83886080U, // STR
- 88080384U, // STRB
- 73400320U, // STRBT
- 71303168U, // STRB_POST
- 90177536U, // STRB_PRE
- 16777456U, // STRD
- 240U, // STRD_POST
- 18874608U, // STRD_PRE
- 25169808U, // STREX
- 29364112U, // STREXB
- 27266960U, // STREXD
- 31461264U, // STREXH
- 16777392U, // STRH
- 2097328U, // STRHT
- 176U, // STRH_POST
- 18874544U, // STRH_PRE
- 69206016U, // STRT
- 67108864U, // STR_POST
- 85983232U, // STR_PRE
- 38797312U, // SUBSri
- 5242880U, // SUBSrr
- 5242880U, // SUBSrs
- 37748736U, // SUBri
- 4194304U, // SUBrr
- 4194304U, // SUBrs
- 251658240U, // SVC
- 16777360U, // SWP
- 20971664U, // SWPB
- 109052016U, // SXTAB16rr
- 109052016U, // SXTAB16rr_rot
- 111149168U, // SXTABrr
- 111149168U, // SXTABrr_rot
- 112197744U, // SXTAHrr
- 112197744U, // SXTAHrr_rot
- 110035056U, // SXTB16r
- 110035056U, // SXTB16r_rot
- 112132208U, // SXTBr
- 112132208U, // SXTBr_rot
- 113180784U, // SXTHr
- 113180784U, // SXTHr_rot
- 53477376U, // TEQri
- 19922944U, // TEQrr
- 19922944U, // TEQrs
- 184549376U, // TPsoft
- 133169392U, // TRAP
- 51380224U, // TSTri
- 17825792U, // TSTrr
- 17825792U, // TSTrs
- 105906192U, // UADD16
- 105906320U, // UADD8
- 105906224U, // UASX
- 132120656U, // UBFX
- 108003344U, // UHADD16
- 108003472U, // UHADD8
- 108003376U, // UHASX
- 108003408U, // UHSAX
- 108003440U, // UHSUB16
- 108003568U, // UHSUB8
- 4194448U, // UMAAL
- 10485904U, // UMLAL
- 8388752U, // UMULL
- 106954768U, // UQADD16
- 106954896U, // UQADD8
- 106954800U, // UQASX
- 106954832U, // UQSAX
- 106954864U, // UQSUB16
- 106954992U, // UQSUB8
- 125890576U, // USAD8
- 125829136U, // USADA8
- 115343408U, // USAT16
- 115343440U, // USATasr
- 115343376U, // USATlsl
- 105906256U, // USAX
- 105906288U, // USUB16
- 105906416U, // USUB8
- 113246320U, // UXTAB16rr
- 113246320U, // UXTAB16rr_rot
- 115343472U, // UXTABrr
- 115343472U, // UXTABrr_rot
- 116392048U, // UXTAHrr
- 116392048U, // UXTAHrr_rot
- 114229360U, // UXTB16r
- 114229360U, // UXTB16r_rot
- 116326512U, // UXTBr
- 116326512U, // UXTBr_rot
- 117375088U, // UXTHr
- 117375088U, // UXTHr_rot
- 4070573312U, // VABALsv2i64
- 4069524736U, // VABALsv4i32
- 4068476160U, // VABALsv8i16
- 4087350528U, // VABALuv2i64
- 4086301952U, // VABALuv4i32
- 4085253376U, // VABALuv8i16
- 4060088144U, // VABAsv16i8
- 4062185232U, // VABAsv2i32
- 4061136656U, // VABAsv4i16
- 4062185296U, // VABAsv4i32
- 4061136720U, // VABAsv8i16
- 4060088080U, // VABAsv8i8
- 4076865360U, // VABAuv16i8
- 4078962448U, // VABAuv2i32
- 4077913872U, // VABAuv4i16
- 4078962512U, // VABAuv4i32
- 4077913936U, // VABAuv8i16
- 4076865296U, // VABAuv8i8
- 4070573824U, // VABDLsv2i64
- 4069525248U, // VABDLsv4i32
- 4068476672U, // VABDLsv8i16
- 4087351040U, // VABDLuv2i64
- 4086302464U, // VABDLuv4i32
- 4085253888U, // VABDLuv8i16
- 4078963968U, // VABDfd
- 4078964032U, // VABDfq
- 4060088128U, // VABDsv16i8
- 4062185216U, // VABDsv2i32
- 4061136640U, // VABDsv4i16
- 4062185280U, // VABDsv4i32
- 4061136704U, // VABDsv8i16
- 4060088064U, // VABDsv8i8
- 4076865344U, // VABDuv16i8
- 4078962432U, // VABDuv2i32
- 4077913856U, // VABDuv4i16
- 4078962496U, // VABDuv4i32
- 4077913920U, // VABDuv8i16
- 4076865280U, // VABDuv8i8
- 246418368U, // VABSD
- 246418112U, // VABSS
- 4088989440U, // VABSfd
- 4088989440U, // VABSfd_sfp
- 4088989504U, // VABSfq
- 4088464192U, // VABSv16i8
- 4088988416U, // VABSv2i32
- 4088726272U, // VABSv4i16
- 4088988480U, // VABSv4i32
- 4088726336U, // VABSv8i16
- 4088464128U, // VABSv8i8
- 4076867088U, // VACGEd
- 4076867152U, // VACGEq
- 4078964240U, // VACGTd
- 4078964304U, // VACGTq
- 238029568U, // VADDD
- 4070573056U, // VADDHNv2i32
- 4069524480U, // VADDHNv4i16
- 4068475904U, // VADDHNv8i8
- 4070572032U, // VADDLsv2i64
- 4069523456U, // VADDLsv4i32
- 4068474880U, // VADDLsv8i16
- 4087349248U, // VADDLuv2i64
- 4086300672U, // VADDLuv4i32
- 4085252096U, // VADDLuv8i16
- 238029312U, // VADDS
- 4070572288U, // VADDWsv2i64
- 4069523712U, // VADDWsv4i32
- 4068475136U, // VADDWsv8i16
- 4087349504U, // VADDWuv2i64
- 4086300928U, // VADDWuv4i32
- 4085252352U, // VADDWuv8i16
- 4060089600U, // VADDfd
- 4060089600U, // VADDfd_sfp
- 4060089664U, // VADDfq
- 4060088384U, // VADDv16i8
- 4063234048U, // VADDv1i64
- 4062185472U, // VADDv2i32
- 4063234112U, // VADDv2i64
- 4061136896U, // VADDv4i16
- 4062185536U, // VADDv4i32
- 4061136960U, // VADDv8i16
- 4060088320U, // VADDv8i8
- 4060086544U, // VANDd
- 4060086608U, // VANDq
- 4061135120U, // VBICd
- 4061135184U, // VBICq
- 4080009488U, // VBIFd
- 4080009552U, // VBIFq
- 4078960912U, // VBITd
- 4078960976U, // VBITq
- 4077912336U, // VBSLd
- 4077912400U, // VBSLq
- 4060089856U, // VCEQfd
- 4060089920U, // VCEQfq
- 4076865616U, // VCEQv16i8
- 4078962704U, // VCEQv2i32
- 4077914128U, // VCEQv4i16
- 4078962768U, // VCEQv4i32
- 4077914192U, // VCEQv8i16
- 4076865552U, // VCEQv8i8
- 4088463680U, // VCEQzv16i8
- 4088988928U, // VCEQzv2f32
- 4088987904U, // VCEQzv2i32
- 4088988992U, // VCEQzv4f32
- 4088725760U, // VCEQzv4i16
- 4088987968U, // VCEQzv4i32
- 4088725824U, // VCEQzv8i16
- 4088463616U, // VCEQzv8i8
- 4076867072U, // VCGEfd
- 4076867136U, // VCGEfq
- 4060087120U, // VCGEsv16i8
- 4062184208U, // VCGEsv2i32
- 4061135632U, // VCGEsv4i16
- 4062184272U, // VCGEsv4i32
- 4061135696U, // VCGEsv8i16
- 4060087056U, // VCGEsv8i8
- 4076864336U, // VCGEuv16i8
- 4078961424U, // VCGEuv2i32
- 4077912848U, // VCGEuv4i16
- 4078961488U, // VCGEuv4i32
- 4077912912U, // VCGEuv8i16
- 4076864272U, // VCGEuv8i8
- 4088463552U, // VCGEzv16i8
- 4088988800U, // VCGEzv2f32
- 4088987776U, // VCGEzv2i32
- 4088988864U, // VCGEzv4f32
- 4088725632U, // VCGEzv4i16
- 4088987840U, // VCGEzv4i32
- 4088725696U, // VCGEzv8i16
- 4088463488U, // VCGEzv8i8
- 4078964224U, // VCGTfd
- 4078964288U, // VCGTfq
- 4060087104U, // VCGTsv16i8
- 4062184192U, // VCGTsv2i32
- 4061135616U, // VCGTsv4i16
- 4062184256U, // VCGTsv4i32
- 4061135680U, // VCGTsv8i16
- 4060087040U, // VCGTsv8i8
- 4076864320U, // VCGTuv16i8
- 4078961408U, // VCGTuv2i32
- 4077912832U, // VCGTuv4i16
- 4078961472U, // VCGTuv4i32
- 4077912896U, // VCGTuv8i16
- 4076864256U, // VCGTuv8i8
- 4088463424U, // VCGTzv16i8
- 4088988672U, // VCGTzv2f32
- 4088987648U, // VCGTzv2i32
- 4088988736U, // VCGTzv4f32
- 4088725504U, // VCGTzv4i16
- 4088987712U, // VCGTzv4i32
- 4088725568U, // VCGTzv8i16
- 4088463360U, // VCGTzv8i8
- 4088463808U, // VCLEzv16i8
- 4088989056U, // VCLEzv2f32
- 4088988032U, // VCLEzv2i32
- 4088989120U, // VCLEzv4f32
- 4088725888U, // VCLEzv4i16
- 4088988096U, // VCLEzv4i32
- 4088725952U, // VCLEzv8i16
- 4088463744U, // VCLEzv8i8
- 4088398912U, // VCLSv16i8
- 4088923136U, // VCLSv2i32
- 4088660992U, // VCLSv4i16
- 4088923200U, // VCLSv4i32
- 4088661056U, // VCLSv8i16
- 4088398848U, // VCLSv8i8
- 4088463936U, // VCLTzv16i8
- 4088989184U, // VCLTzv2f32
- 4088988160U, // VCLTzv2i32
- 4088989248U, // VCLTzv4f32
- 4088726016U, // VCLTzv4i16
- 4088988224U, // VCLTzv4i32
- 4088726080U, // VCLTzv8i16
- 4088463872U, // VCLTzv8i8
- 4088399040U, // VCLZv16i8
- 4088923264U, // VCLZv2i32
- 4088661120U, // VCLZv4i16
- 4088923328U, // VCLZv4i32
- 4088661184U, // VCLZv8i16
- 4088398976U, // VCLZv8i8
- 246680384U, // VCMPD
- 246680512U, // VCMPED
- 246680256U, // VCMPES
- 246746048U, // VCMPEZD
- 246745792U, // VCMPEZS
- 246680128U, // VCMPS
- 246745920U, // VCMPZD
- 246745664U, // VCMPZS
- 4088399104U, // VCNTd
- 4088399168U, // VCNTq
- 246614592U, // VCVTBHS
- 246549056U, // VCVTBSH
- 246876864U, // VCVTDS
- 246877120U, // VCVTSD
- 246614720U, // VCVTTHS
- 246549184U, // VCVTTSH
- 4089120512U, // VCVTf2sd
- 4089120512U, // VCVTf2sd_sfp
- 4089120576U, // VCVTf2sq
- 4089120640U, // VCVTf2ud
- 4089120640U, // VCVTf2ud_sfp
- 4089120704U, // VCVTf2uq
- 4068478736U, // VCVTf2xsd
- 4068478800U, // VCVTf2xsq
- 4085255952U, // VCVTf2xud
- 4085256016U, // VCVTf2xuq
- 4089120256U, // VCVTs2fd
- 4089120256U, // VCVTs2fd_sfp
- 4089120320U, // VCVTs2fq
- 4089120384U, // VCVTu2fd
- 4089120384U, // VCVTu2fd_sfp
- 4089120448U, // VCVTu2fq
- 4068478480U, // VCVTxs2fd
- 4068478544U, // VCVTxs2fq
- 4085255696U, // VCVTxu2fd
- 4085255760U, // VCVTxu2fq
- 243272448U, // VDIVD
- 243272192U, // VDIVS
- 243272496U, // VDUP16d
- 245369648U, // VDUP16q
- 243272464U, // VDUP32d
- 245369616U, // VDUP32q
- 247466768U, // VDUP8d
- 249563920U, // VDUP8q
- 4088531968U, // VDUPLN16d
- 4088532032U, // VDUPLN16q
- 4088663040U, // VDUPLN32d
- 4088663104U, // VDUPLN32q
- 4088466432U, // VDUPLN8d
- 4088466496U, // VDUPLN8q
- 4088663040U, // VDUPLNfd
- 4088663104U, // VDUPLNfq
- 243272464U, // VDUPfd
- 4088663040U, // VDUPfdf
- 245369616U, // VDUPfq
- 4088663104U, // VDUPfqf
- 4076863760U, // VEORd
- 4076863824U, // VEORq
- 4071620608U, // VEXTd16
- 4071620608U, // VEXTd32
- 4071620608U, // VEXTd8
- 4071620608U, // VEXTdf
- 4071620672U, // VEXTq16
- 4071620672U, // VEXTq32
- 4071620672U, // VEXTq8
- 4071620672U, // VEXTqf
- 235932432U, // VGETLNi32
- 235932464U, // VGETLNs16
- 240126736U, // VGETLNs8
- 244321072U, // VGETLNu16
- 248515344U, // VGETLNu8
- 4060086336U, // VHADDsv16i8
- 4062183424U, // VHADDsv2i32
- 4061134848U, // VHADDsv4i16
- 4062183488U, // VHADDsv4i32
- 4061134912U, // VHADDsv8i16
- 4060086272U, // VHADDsv8i8
- 4076863552U, // VHADDuv16i8
- 4078960640U, // VHADDuv2i32
- 4077912064U, // VHADDuv4i16
- 4078960704U, // VHADDuv4i32
- 4077912128U, // VHADDuv8i16
- 4076863488U, // VHADDuv8i8
- 4060086848U, // VHSUBsv16i8
- 4062183936U, // VHSUBsv2i32
- 4061135360U, // VHSUBsv4i16
- 4062184000U, // VHSUBsv4i32
- 4061135424U, // VHSUBsv8i16
- 4060086784U, // VHSUBsv8i8
- 4076864064U, // VHSUBuv16i8
- 4078961152U, // VHSUBuv2i32
- 4077912576U, // VHSUBuv4i16
- 4078961216U, // VHSUBuv4i32
- 4077912640U, // VHSUBuv8i16
- 4076864000U, // VHSUBuv8i8
- 4095739712U, // VLD1d16
- 4095738432U, // VLD1d16Q
- 4095739456U, // VLD1d16T
- 4095739776U, // VLD1d32
- 4095738496U, // VLD1d32Q
- 4095739520U, // VLD1d32T
- 4095739840U, // VLD1d64
- 4095739648U, // VLD1d8
- 4095738368U, // VLD1d8Q
- 4095739392U, // VLD1d8T
- 4095739776U, // VLD1df
- 4095740480U, // VLD1q16
- 4095740544U, // VLD1q32
- 4095740608U, // VLD1q64
- 4095740416U, // VLD1q8
- 4095740544U, // VLD1qf
- 4104127744U, // VLD2LNd16
- 4104128768U, // VLD2LNd32
- 4104126720U, // VLD2LNd8
- 4104127776U, // VLD2LNq16a
- 4104127776U, // VLD2LNq16b
- 4104128832U, // VLD2LNq32a
- 4104128832U, // VLD2LNq32b
- 4095739968U, // VLD2d16
- 4095740224U, // VLD2d16D
- 4095740032U, // VLD2d32
- 4095740288U, // VLD2d32D
- 4095740608U, // VLD2d64
- 4095739904U, // VLD2d8
- 4095740160U, // VLD2d8D
- 4095738688U, // VLD2q16
- 4095738752U, // VLD2q32
- 4095738624U, // VLD2q8
- 4104128000U, // VLD3LNd16
- 4104129024U, // VLD3LNd32
- 4104126976U, // VLD3LNd8
- 4104128032U, // VLD3LNq16a
- 4104128032U, // VLD3LNq16b
- 4104129088U, // VLD3LNq32a
- 4104129088U, // VLD3LNq32b
- 4095738944U, // VLD3d16
- 4095739008U, // VLD3d32
- 4095739584U, // VLD3d64
- 4095738880U, // VLD3d8
- 4095739200U, // VLD3q16a
- 4095739200U, // VLD3q16b
- 4095739264U, // VLD3q32a
- 4095739264U, // VLD3q32b
- 4095739136U, // VLD3q8a
- 4095739136U, // VLD3q8b
- 4104128256U, // VLD4LNd16
- 4104129280U, // VLD4LNd32
- 4104127232U, // VLD4LNd8
- 4104128288U, // VLD4LNq16a
- 4104128288U, // VLD4LNq16b
- 4104129344U, // VLD4LNq32a
- 4104129344U, // VLD4LNq32b
- 4095737920U, // VLD4d16
- 4095737984U, // VLD4d32
- 4095738560U, // VLD4d64
- 4095737856U, // VLD4d8
- 4095738176U, // VLD4q16a
- 4095738176U, // VLD4q16b
- 4095738240U, // VLD4q32a
- 4095738240U, // VLD4q32b
- 4095738112U, // VLD4q8a
- 4095738112U, // VLD4q8b
- 202377984U, // VLDMD
- 202377728U, // VLDMS
- 219155200U, // VLDRD
- 210766592U, // VLDRQ
- 219154944U, // VLDRS
- 4060090112U, // VMAXfd
- 4060090112U, // VMAXfd_sfp
- 4060090176U, // VMAXfq
- 4060087872U, // VMAXsv16i8
- 4062184960U, // VMAXsv2i32
- 4061136384U, // VMAXsv4i16
- 4062185024U, // VMAXsv4i32
- 4061136448U, // VMAXsv8i16
- 4060087808U, // VMAXsv8i8
- 4076865088U, // VMAXuv16i8
- 4078962176U, // VMAXuv2i32
- 4077913600U, // VMAXuv4i16
- 4078962240U, // VMAXuv4i32
- 4077913664U, // VMAXuv8i16
- 4076865024U, // VMAXuv8i8
- 4062187264U, // VMINfd
- 4060090112U, // VMINfd_sfp
- 4062187328U, // VMINfq
- 4060087888U, // VMINsv16i8
- 4062184976U, // VMINsv2i32
- 4061136400U, // VMINsv4i16
- 4062185040U, // VMINsv4i32
- 4061136464U, // VMINsv8i16
- 4060087824U, // VMINsv8i8
- 4076865104U, // VMINuv16i8
- 4078962192U, // VMINuv2i32
- 4077913616U, // VMINuv4i16
- 4078962256U, // VMINuv4i32
- 4077913680U, // VMINuv8i16
- 4076865040U, // VMINuv8i8
- 234883840U, // VMLAD
- 4070572608U, // VMLALslsv2i32
- 4069524032U, // VMLALslsv4i16
- 4087349824U, // VMLALsluv2i32
- 4086301248U, // VMLALsluv4i16
- 4070574080U, // VMLALsv2i64
- 4069525504U, // VMLALsv4i32
- 4068476928U, // VMLALsv8i16
- 4087351296U, // VMLALuv2i64
- 4086302720U, // VMLALuv4i32
- 4085254144U, // VMLALuv8i16
- 234883584U, // VMLAS
- 4060089616U, // VMLAfd
- 4060089680U, // VMLAfq
- 4070572352U, // VMLAslfd
- 4087349568U, // VMLAslfq
- 4070572096U, // VMLAslv2i32
- 4069523520U, // VMLAslv4i16
- 4087349312U, // VMLAslv4i32
- 4086300736U, // VMLAslv8i16
- 4060088640U, // VMLAv16i8
- 4062185728U, // VMLAv2i32
- 4061137152U, // VMLAv4i16
- 4062185792U, // VMLAv4i32
- 4061137216U, // VMLAv8i16
- 4060088576U, // VMLAv8i8
- 234883904U, // VMLSD
- 4070573632U, // VMLSLslsv2i32
- 4069525056U, // VMLSLslsv4i16
- 4087350848U, // VMLSLsluv2i32
- 4086302272U, // VMLSLsluv4i16
- 4070574592U, // VMLSLsv2i64
- 4069526016U, // VMLSLsv4i32
- 4068477440U, // VMLSLsv8i16
- 4087351808U, // VMLSLuv2i64
- 4086303232U, // VMLSLuv4i32
- 4085254656U, // VMLSLuv8i16
- 234883648U, // VMLSS
- 4062186768U, // VMLSfd
- 4062186832U, // VMLSfq
- 4070573376U, // VMLSslfd
- 4087350592U, // VMLSslfq
- 4070573120U, // VMLSslv2i32
- 4069524544U, // VMLSslv4i16
- 4087350336U, // VMLSslv4i32
- 4086301760U, // VMLSslv8i16
- 4076865856U, // VMLSv16i8
- 4078962944U, // VMLSv2i32
- 4077914368U, // VMLSv4i16
- 4078963008U, // VMLSv4i32
- 4077914432U, // VMLSv8i16
- 4076865792U, // VMLSv8i8
- 246418240U, // VMOVD
- 205523728U, // VMOVDRR
- 246418240U, // VMOVDcc
- 4062183696U, // VMOVDneon
- 4070574608U, // VMOVLsv2i64
- 4069526032U, // VMOVLsv4i32
- 4069001744U, // VMOVLsv8i16
- 4087351824U, // VMOVLuv2i64
- 4086303248U, // VMOVLuv4i32
- 4085778960U, // VMOVLuv8i16
- 4089053696U, // VMOVNv2i32
- 4088791552U, // VMOVNv4i16
- 4088529408U, // VMOVNv8i8
- 4062183760U, // VMOVQ
- 206572304U, // VMOVRRD
- 206572048U, // VMOVRRS
- 235932176U, // VMOVRS
- 246417984U, // VMOVS
- 234883600U, // VMOVSR
- 205523472U, // VMOVSRR
- 246417984U, // VMOVScc
- 4068478544U, // VMOVv16i8
- 4068478512U, // VMOVv1i64
- 4068474896U, // VMOVv2i32
- 4068478576U, // VMOVv2i64
- 4068476944U, // VMOVv4i16
- 4068474960U, // VMOVv4i32
- 4068477008U, // VMOVv8i16
- 4068478480U, // VMOVv8i8
- 250677776U, // VMRS
- 249629200U, // VMSR
- 236980992U, // VMULD
- 4068478464U, // VMULLp
- 4070574656U, // VMULLslsv2i32
- 4069526080U, // VMULLslsv4i16
- 4087351872U, // VMULLsluv2i32
- 4086303296U, // VMULLsluv4i16
- 4070575104U, // VMULLsv2i64
- 4069526528U, // VMULLsv4i32
- 4068477952U, // VMULLsv8i16
- 4087352320U, // VMULLuv2i64
- 4086303744U, // VMULLuv4i32
- 4085255168U, // VMULLuv8i16
- 236980736U, // VMULS
- 4076866832U, // VMULfd
- 4076866832U, // VMULfd_sfp
- 4076866896U, // VMULfq
- 4076865808U, // VMULpd
- 4076865872U, // VMULpq
- 4070574400U, // VMULslfd
- 4087351616U, // VMULslfq
- 4070574144U, // VMULslv2i32
- 4069525568U, // VMULslv4i16
- 4087351360U, // VMULslv4i32
- 4086302784U, // VMULslv8i16
- 4060088656U, // VMULv16i8
- 4062185744U, // VMULv2i32
- 4061137168U, // VMULv4i16
- 4062185808U, // VMULv4i32
- 4061137232U, // VMULv8i16
- 4060088592U, // VMULv8i8
- 4088399232U, // VMVNd
- 4088399296U, // VMVNq
- 246483776U, // VNEGD
- 246483776U, // VNEGDcc
- 246483520U, // VNEGS
- 246483520U, // VNEGScc
- 4088989632U, // VNEGf32q
- 4088989568U, // VNEGfd
- 4088989568U, // VNEGfd_sfp
- 4088726400U, // VNEGs16d
- 4088726464U, // VNEGs16q
- 4088988544U, // VNEGs32d
- 4088988608U, // VNEGs32q
- 4088464256U, // VNEGs8d
- 4088464320U, // VNEGs8q
- 235932480U, // VNMLAD
- 235932224U, // VNMLAS
- 235932416U, // VNMLSD
- 235932160U, // VNMLSS
- 236981056U, // VNMULD
- 236980800U, // VNMULS
- 4063232272U, // VORNd
- 4063232336U, // VORNq
- 4062183696U, // VORRd
- 4062183760U, // VORRq
- 4088399424U, // VPADALsv16i8
- 4088923648U, // VPADALsv2i32
- 4088661504U, // VPADALsv4i16
- 4088923712U, // VPADALsv4i32
- 4088661568U, // VPADALsv8i16
- 4088399360U, // VPADALsv8i8
- 4088399552U, // VPADALuv16i8
- 4088923776U, // VPADALuv2i32
- 4088661632U, // VPADALuv4i16
- 4088923840U, // VPADALuv4i32
- 4088661696U, // VPADALuv8i16
- 4088399488U, // VPADALuv8i8
- 4088398400U, // VPADDLsv16i8
- 4088922624U, // VPADDLsv2i32
- 4088660480U, // VPADDLsv4i16
- 4088922688U, // VPADDLsv4i32
- 4088660544U, // VPADDLsv8i16
- 4088398336U, // VPADDLsv8i8
- 4088398528U, // VPADDLuv16i8
- 4088922752U, // VPADDLuv2i32
- 4088660608U, // VPADDLuv4i16
- 4088922816U, // VPADDLuv4i32
- 4088660672U, // VPADDLuv8i16
- 4088398464U, // VPADDLuv8i8
- 4076866816U, // VPADDf
- 4061137680U, // VPADDi16
- 4062186256U, // VPADDi32
- 4060089104U, // VPADDi8
- 4076867328U, // VPMAXf
- 4061137408U, // VPMAXs16
- 4062185984U, // VPMAXs32
- 4060088832U, // VPMAXs8
- 4077914624U, // VPMAXu16
- 4078963200U, // VPMAXu32
- 4076866048U, // VPMAXu8
- 4078964480U, // VPMINf
- 4061137424U, // VPMINs16
- 4062186000U, // VPMINs32
- 4060088848U, // VPMINs8
- 4077914640U, // VPMINu16
- 4078963216U, // VPMINu32
- 4076866064U, // VPMINu8
- 4088399680U, // VQABSv16i8
- 4088923904U, // VQABSv2i32
- 4088661760U, // VQABSv4i16
- 4088923968U, // VQABSv4i32
- 4088661824U, // VQABSv8i16
- 4088399616U, // VQABSv8i8
- 4060086352U, // VQADDsv16i8
- 4063232016U, // VQADDsv1i64
- 4062183440U, // VQADDsv2i32
- 4063232080U, // VQADDsv2i64
- 4061134864U, // VQADDsv4i16
- 4062183504U, // VQADDsv4i32
- 4061134928U, // VQADDsv8i16
- 4060086288U, // VQADDsv8i8
- 4076863568U, // VQADDuv16i8
- 4080009232U, // VQADDuv1i64
- 4078960656U, // VQADDuv2i32
- 4080009296U, // VQADDuv2i64
- 4077912080U, // VQADDuv4i16
- 4078960720U, // VQADDuv4i32
- 4077912144U, // VQADDuv8i16
- 4076863504U, // VQADDuv8i8
- 4070572864U, // VQDMLALslv2i32
- 4069524288U, // VQDMLALslv4i16
- 4070574336U, // VQDMLALv2i64
- 4069525760U, // VQDMLALv4i32
- 4070573888U, // VQDMLSLslv2i32
- 4069525312U, // VQDMLSLslv4i16
- 4070574848U, // VQDMLSLv2i64
- 4069526272U, // VQDMLSLv4i32
- 4070575168U, // VQDMULHslv2i32
- 4069526592U, // VQDMULHslv4i16
- 4087352384U, // VQDMULHslv4i32
- 4086303808U, // VQDMULHslv8i16
- 4062186240U, // VQDMULHv2i32
- 4061137664U, // VQDMULHv4i16
- 4062186304U, // VQDMULHv4i32
- 4061137728U, // VQDMULHv8i16
- 4070574912U, // VQDMULLslv2i32
- 4069526336U, // VQDMULLslv4i16
- 4070575360U, // VQDMULLv2i64
- 4069526784U, // VQDMULLv4i32
- 4089053760U, // VQMOVNsuv2i32
- 4088791616U, // VQMOVNsuv4i16
- 4088529472U, // VQMOVNsuv8i8
- 4089053824U, // VQMOVNsv2i32
- 4088791680U, // VQMOVNsv4i16
- 4088529536U, // VQMOVNsv8i8
- 4089053888U, // VQMOVNuv2i32
- 4088791744U, // VQMOVNuv4i16
- 4088529600U, // VQMOVNuv8i8
- 4088399808U, // VQNEGv16i8
- 4088924032U, // VQNEGv2i32
- 4088661888U, // VQNEGv4i16
- 4088924096U, // VQNEGv4i32
- 4088661952U, // VQNEGv8i16
- 4088399744U, // VQNEGv8i8
- 4070575424U, // VQRDMULHslv2i32
- 4069526848U, // VQRDMULHslv4i16
- 4087352640U, // VQRDMULHslv4i32
- 4086304064U, // VQRDMULHslv8i16
- 4078963456U, // VQRDMULHv2i32
- 4077914880U, // VQRDMULHv4i16
- 4078963520U, // VQRDMULHv4i32
- 4077914944U, // VQRDMULHv8i16
- 4060087632U, // VQRSHLsv16i8
- 4063233296U, // VQRSHLsv1i64
- 4062184720U, // VQRSHLsv2i32
- 4063233360U, // VQRSHLsv2i64
- 4061136144U, // VQRSHLsv4i16
- 4062184784U, // VQRSHLsv4i32
- 4061136208U, // VQRSHLsv8i16
- 4060087568U, // VQRSHLsv8i8
- 4076864848U, // VQRSHLuv16i8
- 4080010512U, // VQRSHLuv1i64
- 4078961936U, // VQRSHLuv2i32
- 4080010576U, // VQRSHLuv2i64
- 4077913360U, // VQRSHLuv4i16
- 4078962000U, // VQRSHLuv4i32
- 4077913424U, // VQRSHLuv8i16
- 4076864784U, // VQRSHLuv8i8
- 4070574416U, // VQRSHRNsv2i32
- 4069525840U, // VQRSHRNsv4i16
- 4069001552U, // VQRSHRNsv8i8
- 4087351632U, // VQRSHRNuv2i32
- 4086303056U, // VQRSHRNuv4i16
- 4085778768U, // VQRSHRNuv8i8
- 4087351376U, // VQRSHRUNv2i32
- 4086302800U, // VQRSHRUNv4i16
- 4085778512U, // VQRSHRUNv8i8
- 4069001040U, // VQSHLsiv16i8
- 4068476816U, // VQSHLsiv1i64
- 4070573840U, // VQSHLsiv2i32
- 4068476880U, // VQSHLsiv2i64
- 4069525264U, // VQSHLsiv4i16
- 4070573904U, // VQSHLsiv4i32
- 4069525328U, // VQSHLsiv8i16
- 4069000976U, // VQSHLsiv8i8
- 4085778000U, // VQSHLsuv16i8
- 4085253776U, // VQSHLsuv1i64
- 4087350800U, // VQSHLsuv2i32
- 4085253840U, // VQSHLsuv2i64
- 4086302224U, // VQSHLsuv4i16
- 4087350864U, // VQSHLsuv4i32
- 4086302288U, // VQSHLsuv8i16
- 4085777936U, // VQSHLsuv8i8
- 4060087376U, // VQSHLsv16i8
- 4063233040U, // VQSHLsv1i64
- 4062184464U, // VQSHLsv2i32
- 4063233104U, // VQSHLsv2i64
- 4061135888U, // VQSHLsv4i16
- 4062184528U, // VQSHLsv4i32
- 4061135952U, // VQSHLsv8i16
- 4060087312U, // VQSHLsv8i8
- 4085778256U, // VQSHLuiv16i8
- 4085254032U, // VQSHLuiv1i64
- 4087351056U, // VQSHLuiv2i32
- 4085254096U, // VQSHLuiv2i64
- 4086302480U, // VQSHLuiv4i16
- 4087351120U, // VQSHLuiv4i32
- 4086302544U, // VQSHLuiv8i16
- 4085778192U, // VQSHLuiv8i8
- 4076864592U, // VQSHLuv16i8
- 4080010256U, // VQSHLuv1i64
- 4078961680U, // VQSHLuv2i32
- 4080010320U, // VQSHLuv2i64
- 4077913104U, // VQSHLuv4i16
- 4078961744U, // VQSHLuv4i32
- 4077913168U, // VQSHLuv8i16
- 4076864528U, // VQSHLuv8i8
- 4070574352U, // VQSHRNsv2i32
- 4069525776U, // VQSHRNsv4i16
- 4069001488U, // VQSHRNsv8i8
- 4087351568U, // VQSHRNuv2i32
- 4086302992U, // VQSHRNuv4i16
- 4085778704U, // VQSHRNuv8i8
- 4087351312U, // VQSHRUNv2i32
- 4086302736U, // VQSHRUNv4i16
- 4085778448U, // VQSHRUNv8i8
- 4060086864U, // VQSUBsv16i8
- 4063232528U, // VQSUBsv1i64
- 4062183952U, // VQSUBsv2i32
- 4063232592U, // VQSUBsv2i64
- 4061135376U, // VQSUBsv4i16
- 4062184016U, // VQSUBsv4i32
- 4061135440U, // VQSUBsv8i16
- 4060086800U, // VQSUBsv8i8
- 4076864080U, // VQSUBuv16i8
- 4080009744U, // VQSUBuv1i64
- 4078961168U, // VQSUBuv2i32
- 4080009808U, // VQSUBuv2i64
- 4077912592U, // VQSUBuv4i16
- 4078961232U, // VQSUBuv4i32
- 4077912656U, // VQSUBuv8i16
- 4076864016U, // VQSUBuv8i8
- 4087350272U, // VRADDHNv2i32
- 4086301696U, // VRADDHNv4i16
- 4085253120U, // VRADDHNv8i8
- 4089119744U, // VRECPEd
- 4089120000U, // VRECPEfd
- 4089120064U, // VRECPEfq
- 4089119808U, // VRECPEq
- 4060090128U, // VRECPSfd
- 4060090192U, // VRECPSfq
- 4088398080U, // VREV16d8
- 4088398144U, // VREV16q8
- 4088660096U, // VREV32d16
- 4088397952U, // VREV32d8
- 4088660160U, // VREV32q16
- 4088398016U, // VREV32q8
- 4088659968U, // VREV64d16
- 4088922112U, // VREV64d32
- 4088397824U, // VREV64d8
- 4088922112U, // VREV64df
- 4088660032U, // VREV64q16
- 4088922176U, // VREV64q32
- 4088397888U, // VREV64q8
- 4088922176U, // VREV64qf
- 4060086592U, // VRHADDsv16i8
- 4062183680U, // VRHADDsv2i32
- 4061135104U, // VRHADDsv4i16
- 4062183744U, // VRHADDsv4i32
- 4061135168U, // VRHADDsv8i16
- 4060086528U, // VRHADDsv8i8
- 4076863808U, // VRHADDuv16i8
- 4078960896U, // VRHADDuv2i32
- 4077912320U, // VRHADDuv4i16
- 4078960960U, // VRHADDuv4i32
- 4077912384U, // VRHADDuv8i16
- 4076863744U, // VRHADDuv8i8
- 4060087616U, // VRSHLsv16i8
- 4063233280U, // VRSHLsv1i64
- 4062184704U, // VRSHLsv2i32
- 4063233344U, // VRSHLsv2i64
- 4061136128U, // VRSHLsv4i16
- 4062184768U, // VRSHLsv4i32
- 4061136192U, // VRSHLsv8i16
- 4060087552U, // VRSHLsv8i8
- 4076864832U, // VRSHLuv16i8
- 4080010496U, // VRSHLuv1i64
- 4078961920U, // VRSHLuv2i32
- 4080010560U, // VRSHLuv2i64
- 4077913344U, // VRSHLuv4i16
- 4078961984U, // VRSHLuv4i32
- 4077913408U, // VRSHLuv8i16
- 4076864768U, // VRSHLuv8i8
- 4070574160U, // VRSHRNv2i32
- 4069525584U, // VRSHRNv4i16
- 4069001296U, // VRSHRNv8i8
- 4068999760U, // VRSHRsv16i8
- 4068475536U, // VRSHRsv1i64
- 4070572560U, // VRSHRsv2i32
- 4068475600U, // VRSHRsv2i64
- 4069523984U, // VRSHRsv4i16
- 4070572624U, // VRSHRsv4i32
- 4069524048U, // VRSHRsv8i16
- 4068999696U, // VRSHRsv8i8
- 4085776976U, // VRSHRuv16i8
- 4085252752U, // VRSHRuv1i64
- 4087349776U, // VRSHRuv2i32
- 4085252816U, // VRSHRuv2i64
- 4086301200U, // VRSHRuv4i16
- 4087349840U, // VRSHRuv4i32
- 4086301264U, // VRSHRuv8i16
- 4085776912U, // VRSHRuv8i8
- 4089119872U, // VRSQRTEd
- 4089120128U, // VRSQRTEfd
- 4089120192U, // VRSQRTEfq
- 4089119936U, // VRSQRTEq
- 4062187280U, // VRSQRTSfd
- 4062187344U, // VRSQRTSfq
- 4069000016U, // VRSRAsv16i8
- 4068475792U, // VRSRAsv1i64
- 4070572816U, // VRSRAsv2i32
- 4068475856U, // VRSRAsv2i64
- 4069524240U, // VRSRAsv4i16
- 4070572880U, // VRSRAsv4i32
- 4069524304U, // VRSRAsv8i16
- 4068999952U, // VRSRAsv8i8
- 4085777232U, // VRSRAuv16i8
- 4085253008U, // VRSRAuv1i64
- 4087350032U, // VRSRAuv2i32
- 4085253072U, // VRSRAuv2i64
- 4086301456U, // VRSRAuv4i16
- 4087350096U, // VRSRAuv4i32
- 4086301520U, // VRSRAuv8i16
- 4085777168U, // VRSRAuv8i8
- 4087350784U, // VRSUBHNv2i32
- 4086302208U, // VRSUBHNv4i16
- 4085253632U, // VRSUBHNv8i8
- 234883888U, // VSETLNi16
- 234883856U, // VSETLNi32
- 239078160U, // VSETLNi8
- 4088791808U, // VSHLLi16
- 4089053952U, // VSHLLi32
- 4088529664U, // VSHLLi8
- 4070574608U, // VSHLLsv2i64
- 4069526032U, // VSHLLsv4i32
- 4069001744U, // VSHLLsv8i16
- 4087351824U, // VSHLLuv2i64
- 4086303248U, // VSHLLuv4i32
- 4085778960U, // VSHLLuv8i16
- 4069000528U, // VSHLiv16i8
- 4068476304U, // VSHLiv1i64
- 4070573328U, // VSHLiv2i32
- 4068476368U, // VSHLiv2i64
- 4069524752U, // VSHLiv4i16
- 4070573392U, // VSHLiv4i32
- 4069524816U, // VSHLiv8i16
- 4069000464U, // VSHLiv8i8
- 4060087360U, // VSHLsv16i8
- 4063233024U, // VSHLsv1i64
- 4062184448U, // VSHLsv2i32
- 4063233088U, // VSHLsv2i64
- 4061135872U, // VSHLsv4i16
- 4062184512U, // VSHLsv4i32
- 4061135936U, // VSHLsv8i16
- 4060087296U, // VSHLsv8i8
- 4076864576U, // VSHLuv16i8
- 4080010240U, // VSHLuv1i64
- 4078961664U, // VSHLuv2i32
- 4080010304U, // VSHLuv2i64
- 4077913088U, // VSHLuv4i16
- 4078961728U, // VSHLuv4i32
- 4077913152U, // VSHLuv8i16
- 4076864512U, // VSHLuv8i8
- 4070574096U, // VSHRNv2i32
- 4069525520U, // VSHRNv4i16
- 4069001232U, // VSHRNv8i8
- 4068999248U, // VSHRsv16i8
- 4068475024U, // VSHRsv1i64
- 4070572048U, // VSHRsv2i32
- 4068475088U, // VSHRsv2i64
- 4069523472U, // VSHRsv4i16
- 4070572112U, // VSHRsv4i32
- 4069523536U, // VSHRsv8i16
- 4068999184U, // VSHRsv8i8
- 4085776464U, // VSHRuv16i8
- 4085252240U, // VSHRuv1i64
- 4087349264U, // VSHRuv2i32
- 4085252304U, // VSHRuv2i64
- 4086300688U, // VSHRuv4i16
- 4087349328U, // VSHRuv4i32
- 4086300752U, // VSHRuv8i16
- 4085776400U, // VSHRuv8i8
- 247073600U, // VSHTOD
- 247073344U, // VSHTOS
- 246942656U, // VSITOD
- 246942400U, // VSITOS
- 4085777744U, // VSLIv16i8
- 4085253520U, // VSLIv1i64
- 4087350544U, // VSLIv2i32
- 4085253584U, // VSLIv2i64
- 4086301968U, // VSLIv4i16
- 4087350608U, // VSLIv4i32
- 4086302032U, // VSLIv8i16
- 4085777680U, // VSLIv8i8
- 247073728U, // VSLTOD
- 247073472U, // VSLTOS
- 246483904U, // VSQRTD
- 246483648U, // VSQRTS
- 4068999504U, // VSRAsv16i8
- 4068475280U, // VSRAsv1i64
- 4070572304U, // VSRAsv2i32
- 4068475344U, // VSRAsv2i64
- 4069523728U, // VSRAsv4i16
- 4070572368U, // VSRAsv4i32
- 4069523792U, // VSRAsv8i16
- 4068999440U, // VSRAsv8i8
- 4085776720U, // VSRAuv16i8
- 4085252496U, // VSRAuv1i64
- 4087349520U, // VSRAuv2i32
- 4085252560U, // VSRAuv2i64
- 4086300944U, // VSRAuv4i16
- 4087349584U, // VSRAuv4i32
- 4086301008U, // VSRAuv8i16
- 4085776656U, // VSRAuv8i8
- 4085777488U, // VSRIv16i8
- 4085253264U, // VSRIv1i64
- 4087350288U, // VSRIv2i32
- 4085253328U, // VSRIv2i64
- 4086301712U, // VSRIv4i16
- 4087350352U, // VSRIv4i32
- 4086301776U, // VSRIv8i16
- 4085777424U, // VSRIv8i8
- 4093642560U, // VST1d16
- 4093641280U, // VST1d16Q
- 4093642304U, // VST1d16T
- 4093642624U, // VST1d32
- 4093641344U, // VST1d32Q
- 4093642368U, // VST1d32T
- 4093642688U, // VST1d64
- 4093642496U, // VST1d8
- 4093641216U, // VST1d8Q
- 4093642240U, // VST1d8T
- 4093642624U, // VST1df
- 4093643328U, // VST1q16
- 4093643392U, // VST1q32
- 4093643456U, // VST1q64
- 4093643264U, // VST1q8
- 4093643392U, // VST1qf
- 4102030592U, // VST2LNd16
- 4102031616U, // VST2LNd32
- 4102029568U, // VST2LNd8
- 4102030624U, // VST2LNq16a
- 4102030624U, // VST2LNq16b
- 4102031680U, // VST2LNq32a
- 4102031680U, // VST2LNq32b
- 4093642816U, // VST2d16
- 4093643072U, // VST2d16D
- 4093642880U, // VST2d32
- 4093643136U, // VST2d32D
- 4093643456U, // VST2d64
- 4093642752U, // VST2d8
- 4093643008U, // VST2d8D
- 4093641536U, // VST2q16
- 4093641600U, // VST2q32
- 4093641472U, // VST2q8
- 4102030848U, // VST3LNd16
- 4102031872U, // VST3LNd32
- 4102029824U, // VST3LNd8
- 4102030880U, // VST3LNq16a
- 4102030880U, // VST3LNq16b
- 4102031936U, // VST3LNq32a
- 4102031936U, // VST3LNq32b
- 4093641792U, // VST3d16
- 4093641856U, // VST3d32
- 4093642432U, // VST3d64
- 4093641728U, // VST3d8
- 4093642048U, // VST3q16a
- 4093642048U, // VST3q16b
- 4093642112U, // VST3q32a
- 4093642112U, // VST3q32b
- 4093641984U, // VST3q8a
- 4093641984U, // VST3q8b
- 4102031104U, // VST4LNd16
- 4102032128U, // VST4LNd32
- 4102030080U, // VST4LNd8
- 4102031136U, // VST4LNq16a
- 4102031136U, // VST4LNq16b
- 4102032192U, // VST4LNq32a
- 4102032192U, // VST4LNq32b
- 4093640768U, // VST4d16
- 4093640832U, // VST4d32
- 4093641408U, // VST4d64
- 4093640704U, // VST4d8
- 4093641024U, // VST4q16a
- 4093641024U, // VST4q16b
- 4093641088U, // VST4q32a
- 4093641088U, // VST4q32b
- 4093640960U, // VST4q8a
- 4093640960U, // VST4q8b
- 201329408U, // VSTMD
- 201329152U, // VSTMS
- 218106624U, // VSTRD
- 209718016U, // VSTRQ
- 218106368U, // VSTRS
- 238029632U, // VSUBD
- 4070573568U, // VSUBHNv2i32
- 4069524992U, // VSUBHNv4i16
- 4068476416U, // VSUBHNv8i8
- 4070572544U, // VSUBLsv2i64
- 4069523968U, // VSUBLsv4i32
- 4068475392U, // VSUBLsv8i16
- 4087349760U, // VSUBLuv2i64
- 4086301184U, // VSUBLuv4i32
- 4085252608U, // VSUBLuv8i16
- 238029376U, // VSUBS
- 4070572800U, // VSUBWsv2i64
- 4069524224U, // VSUBWsv4i32
- 4068475648U, // VSUBWsv8i16
- 4087350016U, // VSUBWuv2i64
- 4086301440U, // VSUBWuv4i32
- 4085252864U, // VSUBWuv8i16
- 4062186752U, // VSUBfd
- 4062186752U, // VSUBfd_sfp
- 4062186816U, // VSUBfq
- 4076865600U, // VSUBv16i8
- 4080011264U, // VSUBv1i64
- 4078962688U, // VSUBv2i32
- 4080011328U, // VSUBv2i64
- 4077914112U, // VSUBv4i16
- 4078962752U, // VSUBv4i32
- 4077914176U, // VSUBv8i16
- 4076865536U, // VSUBv8i8
- 4088528896U, // VSWPd
- 4088528960U, // VSWPq
- 4088399872U, // VTBL1
- 4088400128U, // VTBL2
- 4088400384U, // VTBL3
- 4088400640U, // VTBL4
- 4088399936U, // VTBX1
- 4088400192U, // VTBX2
- 4088400448U, // VTBX3
- 4088400704U, // VTBX4
- 247335744U, // VTOSHD
- 247335488U, // VTOSHS
- 247270208U, // VTOSIRD
- 247269952U, // VTOSIRS
- 247270336U, // VTOSIZD
- 247270080U, // VTOSIZS
- 247335872U, // VTOSLD
- 247335616U, // VTOSLS
- 247401280U, // VTOUHD
- 247401024U, // VTOUHS
- 247204672U, // VTOUIRD
- 247204416U, // VTOUIRS
- 247204800U, // VTOUIZD
- 247204544U, // VTOUIZS
- 247401408U, // VTOULD
- 247401152U, // VTOULS
- 4088791168U, // VTRNd16
- 4089053312U, // VTRNd32
- 4088529024U, // VTRNd8
- 4088791232U, // VTRNq16
- 4089053376U, // VTRNq32
- 4088529088U, // VTRNq8
- 4060088400U, // VTSTv16i8
- 4062185488U, // VTSTv2i32
- 4061136912U, // VTSTv4i16
- 4062185552U, // VTSTv4i32
- 4061136976U, // VTSTv8i16
- 4060088336U, // VTSTv8i8
- 247139136U, // VUHTOD
- 247138880U, // VUHTOS
- 246942528U, // VUITOD
- 246942272U, // VUITOS
- 247139264U, // VULTOD
- 247139008U, // VULTOS
- 4088791296U, // VUZPd16
- 4089053440U, // VUZPd32
- 4088529152U, // VUZPd8
- 4088791360U, // VUZPq16
- 4089053504U, // VUZPq32
- 4088529216U, // VUZPq8
- 4088791424U, // VZIPd16
- 4089053568U, // VZIPd32
- 4088529280U, // VZIPd8
- 4088791488U, // VZIPq16
- 4089053632U, // VZIPq32
- 4088529344U, // VZIPq8
- 52428802U, // WFE
- 52428803U, // WFI
- 52428801U, // YIELD
- 4048551936U, // t2ADCSri
- 3947888640U, // t2ADCSrr
- 3947888640U, // t2ADCSrs
- 4047503360U, // t2ADCri
- 3946840064U, // t2ADCrr
- 3946840064U, // t2ADCrs
- 4044357632U, // t2ADDSri
- 3943694336U, // t2ADDSrr
- 3943694336U, // t2ADDSrs
- 4044161024U, // t2ADDrSPi
- 4060938240U, // t2ADDrSPi12
- 3943497728U, // t2ADDrSPs
- 4043309056U, // t2ADDri
- 4060086272U, // t2ADDri12
- 3942645760U, // t2ADDrr
- 3942645760U, // t2ADDrs
- 4026531840U, // t2ANDri
- 3925868544U, // t2ANDrr
- 3925868544U, // t2ANDrs
- 3931045920U, // t2ASRri
- 4198559744U, // t2ASRrr
- 4026568704U, // t2B
- 4084137984U, // t2BFC
- 4083154944U, // t2BFI
- 4028628992U, // t2BICri
- 3927965696U, // t2BICrr
- 3927965696U, // t2BICrs
- 3931049728U, // t2BR_JT
- 4089479168U, // t2BXJ
- 4026564608U, // t2Bcc
- 4088430624U, // t2CLREX
- 4205899904U, // t2CLZ
- 4044361472U, // t2CMNzri
- 3943698176U, // t2CMNzrr
- 3943698176U, // t2CMNzrs
- 4054847232U, // t2CMPri
- 3954183936U, // t2CMPrr
- 3954183936U, // t2CMPrs
- 4054847232U, // t2CMPzri
- 3954183936U, // t2CMPzrr
- 3954183936U, // t2CMPzrs
- 4087382016U, // t2CPS
- 4087382256U, // t2DBG
- 4088430683U, // t2DMBish
- 4088430682U, // t2DMBishst
- 4088430679U, // t2DMBnsh
- 4088430678U, // t2DMBnshst
- 4088430675U, // t2DMBosh
- 4088430674U, // t2DMBoshst
- 4088430686U, // t2DMBst
- 4088430667U, // t2DSBish
- 4088430666U, // t2DSBishst
- 4088430663U, // t2DSBnsh
- 4088430662U, // t2DSBnshst
- 4088430659U, // t2DSBosh
- 4088430658U, // t2DSBoshst
- 4088430670U, // t2DSBst
- 4034920448U, // t2EORri
- 3934257152U, // t2EORrr
- 3934257152U, // t2EORrs
- 4088430703U, // t2ISBsy
- 48896U, // t2IT
- 4089417567U, // t2Int_MemBarrierV7
- 4089417551U, // t2Int_SyncBarrierV7
- 0U, // t2Int_eh_sjlj_setjmp
- 3893362688U, // t2LDM
- 3893362688U, // t2LDM_RET
- 4161801728U, // t2LDRBT
- 4161800448U, // t2LDRB_POST
- 4161801472U, // t2LDRB_PRE
- 4170186752U, // t2LDRBi12
- 4161801216U, // t2LDRBi8
- 4162781184U, // t2LDRBpci
- 4161798144U, // t2LDRBs
- 3914334208U, // t2LDRDi8
- 3898540032U, // t2LDRDpci
- 3897560832U, // t2LDREX
- 3905949519U, // t2LDREXB
- 3905945727U, // t2LDREXD
- 3905949535U, // t2LDREXH
- 4163898880U, // t2LDRHT
- 4163897600U, // t2LDRH_POST
- 4163898624U, // t2LDRH_PRE
- 4172283904U, // t2LDRHi12
- 4163898368U, // t2LDRHi8
- 4164878336U, // t2LDRHpci
- 4163895296U, // t2LDRHs
- 4178578944U, // t2LDRSBT
- 4178577664U, // t2LDRSB_POST
- 4178578688U, // t2LDRSB_PRE
- 4186963968U, // t2LDRSBi12
- 4178578432U, // t2LDRSBi8
- 4179558400U, // t2LDRSBpci
- 4178575360U, // t2LDRSBs
- 4180676096U, // t2LDRSHT
- 4180674816U, // t2LDRSH_POST
- 4180675840U, // t2LDRSH_PRE
- 4189061120U, // t2LDRSHi12
- 4180675584U, // t2LDRSHi8
- 4181655552U, // t2LDRSHpci
- 4180672512U, // t2LDRSHs
- 4165996032U, // t2LDRT
- 4165994752U, // t2LDR_POST
- 4165995776U, // t2LDR_PRE
- 4174381056U, // t2LDRi12
- 4165995520U, // t2LDRi8
- 4166975488U, // t2LDRpci
- 0U, // t2LDRpci_pic
- 4165992448U, // t2LDRs
- 4061069312U, // t2LEApcrel
- 4061069312U, // t2LEApcrelJT
- 3931045888U, // t2LSLri
- 4194365440U, // t2LSLrr
- 3931045904U, // t2LSRri
- 4196462592U, // t2LSRrr
- 4211081216U, // t2MLA
- 4211081232U, // t2MLS
- 3931045920U, // t2MOVCCasr
- 4031709184U, // t2MOVCCi
- 3931045888U, // t2MOVCClsl
- 3931045904U, // t2MOVCClsr
- 3931045888U, // t2MOVCCr
- 3931045936U, // t2MOVCCror
- 4072669184U, // t2MOVTi16
- 4031709184U, // t2MOVi
- 4064280576U, // t2MOVi16
- 0U, // t2MOVi32imm
- 3931045888U, // t2MOVr
- 3931045936U, // t2MOVrx
- 3932094560U, // t2MOVsra_flag
- 3932094544U, // t2MOVsrl_flag
- 4091576320U, // t2MRS
- 4092624896U, // t2MRSsys
- 4085284864U, // t2MSR
- 4086333440U, // t2MSRsys
- 4211142656U, // t2MUL
- 4033806336U, // t2MVNi
- 3933143040U, // t2MVNr
- 3933143040U, // t2MVNs
- 4087382016U, // t2NOP
- 4032823296U, // t2ORNri
- 3932160000U, // t2ORNrr
- 3932160000U, // t2ORNrs
- 4030726144U, // t2ORRri
- 3930062848U, // t2ORRrr
- 3930062848U, // t2ORRrs
- 3938451456U, // t2PKHBT
- 3938451488U, // t2PKHTB
- 4172345344U, // t2PLDWi12
- 4163959808U, // t2PLDWi8
- 4164939776U, // t2PLDWpci
- 4163956736U, // t2PLDWr
- 4163956736U, // t2PLDWs
- 4170248192U, // t2PLDi12
- 4161862656U, // t2PLDi8
- 4162842624U, // t2PLDpci
- 4161859584U, // t2PLDr
- 4161859584U, // t2PLDs
- 4187025408U, // t2PLIi12
- 4178639872U, // t2PLIi8
- 4179619840U, // t2PLIpci
- 4178636800U, // t2PLIr
- 4178636800U, // t2PLIs
- 4202754176U, // t2QADD
- 4203802640U, // t2QADD16
- 4202754064U, // t2QADD8
- 4204851216U, // t2QASX
- 4202754192U, // t2QDADD
- 4202754224U, // t2QDSUB
- 4209045520U, // t2QSAX
- 4202754208U, // t2QSUB
- 4207996944U, // t2QSUB16
- 4206948368U, // t2QSUB8
- 4203802784U, // t2RBIT
- 4203802752U, // t2REV
- 4203802768U, // t2REV16
- 4203802800U, // t2REVSH
- 3893362688U, // t2RFEDB
- 3895459840U, // t2RFEDBW
- 3918528512U, // t2RFEIA
- 3920625664U, // t2RFEIAW
- 3931045936U, // t2RORri
- 4200656896U, // t2RORrr
- 4056940544U, // t2RSBSri
- 3956277248U, // t2RSBSrs
- 4055891968U, // t2RSBri
- 3955228672U, // t2RSBrs
- 4203802624U, // t2SADD16
- 4202754048U, // t2SADD8
- 4204851200U, // t2SASX
- 4050649088U, // t2SBCSri
- 3949985792U, // t2SBCSrr
- 3949985792U, // t2SBCSrs
- 4049600512U, // t2SBCri
- 3948937216U, // t2SBCrr
- 3948937216U, // t2SBCrs
- 4081057792U, // t2SBFX
- 4220580080U, // t2SDIV
- 4204851328U, // t2SEL
- 4087382020U, // t2SEV
- 4203802656U, // t2SHADD16
- 4202754080U, // t2SHADD8
- 4204851232U, // t2SHASX
- 4209045536U, // t2SHSAX
- 4207996960U, // t2SHSUB16
- 4206948384U, // t2SHSUB8
- 4159733760U, // t2SMC
- 4212129792U, // t2SMLABB
- 4212129808U, // t2SMLABT
- 4213178368U, // t2SMLAD
- 4213178384U, // t2SMLADX
- 4223664128U, // t2SMLAL
- 4223664256U, // t2SMLALBB
- 4223664272U, // t2SMLALBT
- 4223664320U, // t2SMLALD
- 4223664336U, // t2SMLALDX
- 4223664288U, // t2SMLALTB
- 4223664304U, // t2SMLALTT
- 4212129824U, // t2SMLATB
- 4212129840U, // t2SMLATT
- 4214226944U, // t2SMLAWB
- 4214226960U, // t2SMLAWT
- 4215275520U, // t2SMLSD
- 4215275536U, // t2SMLSDX
- 4224712896U, // t2SMLSLD
- 4224712912U, // t2SMLSLDX
- 4216324096U, // t2SMMLA
- 4216324112U, // t2SMMLAR
- 4217372672U, // t2SMMLS
- 4217372688U, // t2SMMLSR
- 4216385536U, // t2SMMUL
- 4216385552U, // t2SMMULR
- 4213239808U, // t2SMUAD
- 4213239824U, // t2SMUADX
- 4212191232U, // t2SMULBB
- 4212191248U, // t2SMULBT
- 4219469824U, // t2SMULL
- 4212191264U, // t2SMULTB
- 4212191280U, // t2SMULTT
- 4214288384U, // t2SMULWB
- 4214288400U, // t2SMULWT
- 4215336960U, // t2SMUSD
- 4215336976U, // t2SMUSDX
- 3892314112U, // t2SRSDB
- 3894411264U, // t2SRSDBW
- 3917479936U, // t2SRSIA
- 3919577088U, // t2SRSIAW
- 4078960640U, // t2SSAT16
- 4078960640U, // t2SSATasr
- 4076863488U, // t2SSATlsl
- 4209045504U, // t2SSAX
- 4207996928U, // t2SSUB16
- 4206948352U, // t2SSUB8
- 3892314112U, // t2STM
- 4160753152U, // t2STRBT
- 4160751872U, // t2STRB_POST
- 4160752896U, // t2STRB_PRE
- 4169138176U, // t2STRBi12
- 4160752640U, // t2STRBi8
- 4160749568U, // t2STRBs
- 3913285632U, // t2STRDi8
- 3896508416U, // t2STREX
- 3904900928U, // t2STREXB
- 3904897136U, // t2STREXD
- 3904900944U, // t2STREXH
- 4162850304U, // t2STRHT
- 4162849024U, // t2STRH_POST
- 4162850048U, // t2STRH_PRE
- 4171235328U, // t2STRHi12
- 4162849792U, // t2STRHi8
- 4162846720U, // t2STRHs
- 4164947456U, // t2STRT
- 4164946176U, // t2STR_POST
- 4164947200U, // t2STR_PRE
- 4173332480U, // t2STRi12
- 4164946944U, // t2STRi8
- 4164943872U, // t2STRs
- 4054843392U, // t2SUBSri
- 3954180096U, // t2SUBSrr
- 3954180096U, // t2SUBSrs
- 4054646784U, // t2SUBrSPi
- 4071424000U, // t2SUBrSPi12
- 0U, // t2SUBrSPi12_
- 0U, // t2SUBrSPi_
- 3953983488U, // t2SUBrSPs
- 0U, // t2SUBrSPs_
- 4053794816U, // t2SUBri
- 4070572032U, // t2SUBri12
- 3953131520U, // t2SUBrr
- 3953131520U, // t2SUBrs
- 4196462720U, // t2SXTAB16rr
- 4196462720U, // t2SXTAB16rr_rot
- 4198559872U, // t2SXTABrr
- 4198559872U, // t2SXTABrr_rot
- 4194365568U, // t2SXTAHrr
- 4194365568U, // t2SXTAHrr_rot
- 4197445760U, // t2SXTB16r
- 4197445760U, // t2SXTB16r_rot
- 4199542912U, // t2SXTBr
- 4199542912U, // t2SXTBr_rot
- 4195348608U, // t2SXTHr
- 4195348608U, // t2SXTHr_rot
- 3906990080U, // t2TBB
- 3906007040U, // t2TBBgen
- 3906990096U, // t2TBH
- 3906007056U, // t2TBHgen
- 4035972864U, // t2TEQri
- 3935309568U, // t2TEQrr
- 3935309568U, // t2TEQrs
- 4026585088U, // t2TPsoft
- 4027584256U, // t2TSTri
- 3926920960U, // t2TSTrr
- 3926920960U, // t2TSTrs
- 4203802688U, // t2UADD16
- 4202754112U, // t2UADD8
- 4204851264U, // t2UASX
- 4089446400U, // t2UBFX
- 4222677232U, // t2UDIV
- 4203802720U, // t2UHADD16
- 4202754144U, // t2UHADD8
- 4204851296U, // t2UHASX
- 4209045600U, // t2UHSAX
- 4207997024U, // t2UHSUB16
- 4206948448U, // t2UHSUB8
- 4225761376U, // t2UMAAL
- 4225761280U, // t2UMLAL
- 4221566976U, // t2UMULL
- 4203802704U, // t2UQADD16
- 4202754128U, // t2UQADD8
- 4204851280U, // t2UQASX
- 4209045584U, // t2UQSAX
- 4207997008U, // t2UQSUB16
- 4206948432U, // t2UQSUB8
- 4218482688U, // t2USAD8
- 4218421248U, // t2USADA8
- 4087349248U, // t2USAT16
- 4087349248U, // t2USATasr
- 4085252096U, // t2USATlsl
- 4209045568U, // t2USAX
- 4207996992U, // t2USUB16
- 4206948416U, // t2USUB8
- 4197511296U, // t2UXTAB16rr
- 4197511296U, // t2UXTAB16rr_rot
- 4199608448U, // t2UXTABrr
- 4199608448U, // t2UXTABrr_rot
- 4195414144U, // t2UXTAHrr
- 4195414144U, // t2UXTAHrr_rot
- 4198494336U, // t2UXTB16r
- 4198494336U, // t2UXTB16r_rot
- 4200591488U, // t2UXTBr
- 4200591488U, // t2UXTBr_rot
- 4196397184U, // t2UXTHr
- 4196397184U, // t2UXTHr_rot
- 4087382018U, // t2WFE
- 4087382019U, // t2WFI
- 4087382017U, // t2YIELD
- 16704U, // tADC
- 17408U, // tADDhirr
- 7168U, // tADDi3
- 12288U, // tADDi8
- 40960U, // tADDrPCi
- 17512U, // tADDrSP
- 43008U, // tADDrSPi
- 6144U, // tADDrr
- 45056U, // tADDspi
- 17541U, // tADDspr
- 0U, // tADDspr_
- 0U, // tADJCALLSTACKDOWN
- 0U, // tADJCALLSTACKUP
- 16384U, // tAND
- 0U, // tANDsp
- 4096U, // tASRri
- 16640U, // tASRrr
- 57344U, // tB
- 17280U, // tBIC
- 48640U, // tBKPT
- 4026585088U, // tBL
- 4026580992U, // tBLXi
- 4026580992U, // tBLXi_r9
- 18304U, // tBLXr
- 18304U, // tBLXr_r9
- 4026585088U, // tBLr9
- 18055U, // tBRIND
- 18055U, // tBR_JTr
- 0U, // tBX
- 18288U, // tBX_RET
- 18176U, // tBX_RET_vararg
- 0U, // tBXr9
- 53248U, // tBcc
- 4026585088U, // tBfar
- 47360U, // tCBNZ
- 45312U, // tCBZ
- 17088U, // tCMNz
- 17664U, // tCMPhir
- 10240U, // tCMPi8
- 17024U, // tCMPr
- 17664U, // tCMPzhir
- 10240U, // tCMPzi8
- 17024U, // tCMPzr
- 46688U, // tCPS
- 16448U, // tEOR
- 0U, // tInt_eh_sjlj_setjmp
- 51200U, // tLDM
- 22528U, // tLDR
- 23552U, // tLDRB
- 30720U, // tLDRBi
- 23040U, // tLDRH
- 34816U, // tLDRHi
- 22016U, // tLDRSB
- 24064U, // tLDRSH
- 38912U, // tLDRcp
- 26624U, // tLDRi
- 18432U, // tLDRpci
- 0U, // tLDRpci_pic
- 38912U, // tLDRspi
- 40960U, // tLEApcrel
- 40960U, // tLEApcrelJT
- 0U, // tLSLri
- 16512U, // tLSLrr
- 2048U, // tLSRri
- 16576U, // tLSRrr
- 8192U, // tMOVCCi
- 17920U, // tMOVCCr
- 0U, // tMOVCCr_pseudo
- 0U, // tMOVSr
- 17920U, // tMOVgpr2gpr
- 17920U, // tMOVgpr2tgpr
- 8192U, // tMOVi8
- 17920U, // tMOVr
- 17920U, // tMOVtgpr2gpr
- 17216U, // tMUL
- 17344U, // tMVN
- 48896U, // tNOP
- 17152U, // tORR
- 17528U, // tPICADD
- 48128U, // tPOP
- 48128U, // tPOP_RET
- 46080U, // tPUSH
- 47616U, // tREV
- 47680U, // tREV16
- 47808U, // tREVSH
- 16832U, // tROR
- 16960U, // tRSB
- 38912U, // tRestore
- 16768U, // tSBC
- 46664U, // tSETENDBE
- 46656U, // tSETENDLE
- 48960U, // tSEV
- 49152U, // tSTM
- 20480U, // tSTR
- 21504U, // tSTRB
- 28672U, // tSTRBi
- 20992U, // tSTRH
- 32768U, // tSTRHi
- 24576U, // tSTRi
- 36864U, // tSTRspi
- 7680U, // tSUBi3
- 14336U, // tSUBi8
- 6656U, // tSUBrr
- 45184U, // tSUBspi
- 0U, // tSUBspi_
- 57088U, // tSVC
- 45632U, // tSXTB
- 45568U, // tSXTH
- 36864U, // tSpill
- 4026585088U, // tTPsoft
- 56832U, // tTRAP
- 16896U, // tTST
- 45760U, // tUXTB
- 45696U, // tUXTH
- 48928U, // tWFE
- 48944U, // tWFI
- 48912U, // tYIELD
- 0U
- };
- const unsigned opcode = MI.getOpcode();
- unsigned Value = InstBits[opcode];
- unsigned op = 0;
- op = op; // suppress warning
- switch (opcode) {
- case ARM::ADCSSri:
- case ARM::ADCSSrr:
- case ARM::ADCSSrs:
- case ARM::ADCri:
- case ARM::ADCrr:
- case ARM::ADCrs:
- case ARM::ADDSri:
- case ARM::ADDSrr:
- case ARM::ADDSrs:
- case ARM::ADDri:
- case ARM::ADDrr:
- case ARM::ADDrs:
- case ARM::ADJCALLSTACKDOWN:
- case ARM::ADJCALLSTACKUP:
- case ARM::ANDri:
- case ARM::ANDrr:
- case ARM::ANDrs:
- case ARM::ATOMIC_CMP_SWAP_I16:
- case ARM::ATOMIC_CMP_SWAP_I32:
- case ARM::ATOMIC_CMP_SWAP_I8:
- case ARM::ATOMIC_LOAD_ADD_I16:
- case ARM::ATOMIC_LOAD_ADD_I32:
- case ARM::ATOMIC_LOAD_ADD_I8:
- case ARM::ATOMIC_LOAD_AND_I16:
- case ARM::ATOMIC_LOAD_AND_I32:
- case ARM::ATOMIC_LOAD_AND_I8:
- case ARM::ATOMIC_LOAD_NAND_I16:
- case ARM::ATOMIC_LOAD_NAND_I32:
- case ARM::ATOMIC_LOAD_NAND_I8:
- case ARM::ATOMIC_LOAD_OR_I16:
- case ARM::ATOMIC_LOAD_OR_I32:
- case ARM::ATOMIC_LOAD_OR_I8:
- case ARM::ATOMIC_LOAD_SUB_I16:
- case ARM::ATOMIC_LOAD_SUB_I32:
- case ARM::ATOMIC_LOAD_SUB_I8:
- case ARM::ATOMIC_LOAD_XOR_I16:
- case ARM::ATOMIC_LOAD_XOR_I32:
- case ARM::ATOMIC_LOAD_XOR_I8:
- case ARM::ATOMIC_SWAP_I16:
- case ARM::ATOMIC_SWAP_I32:
- case ARM::ATOMIC_SWAP_I8:
- case ARM::B:
- case ARM::BFC:
- case ARM::BFI:
- case ARM::BICri:
- case ARM::BICrr:
- case ARM::BICrs:
- case ARM::BKPT:
- case ARM::BL:
- case ARM::BLX:
- case ARM::BLXr9:
- case ARM::BL_pred:
- case ARM::BLr9:
- case ARM::BLr9_pred:
- case ARM::BMOVPCRX:
- case ARM::BMOVPCRXr9:
- case ARM::BRIND:
- case ARM::BR_JTadd:
- case ARM::BR_JTm:
- case ARM::BR_JTr:
- case ARM::BX:
- case ARM::BXJ:
- case ARM::BX_RET:
- case ARM::BXr9:
- case ARM::Bcc:
- case ARM::CDP:
- case ARM::CDP2:
- case ARM::CLREX:
- case ARM::CLZ:
- case ARM::CMNzri:
- case ARM::CMNzrr:
- case ARM::CMNzrs:
- case ARM::CMPri:
- case ARM::CMPrr:
- case ARM::CMPrs:
- case ARM::CMPzri:
- case ARM::CMPzrr:
- case ARM::CMPzrs:
- case ARM::CONSTPOOL_ENTRY:
- case ARM::CPS:
- case ARM::DBG:
- case ARM::DMBish:
- case ARM::DMBishst:
- case ARM::DMBnsh:
- case ARM::DMBnshst:
- case ARM::DMBosh:
- case ARM::DMBoshst:
- case ARM::DMBst:
- case ARM::DSBish:
- case ARM::DSBishst:
- case ARM::DSBnsh:
- case ARM::DSBnshst:
- case ARM::DSBosh:
- case ARM::DSBoshst:
- case ARM::DSBst:
- case ARM::EORri:
- case ARM::EORrr:
- case ARM::EORrs:
- case ARM::FCONSTD:
- case ARM::FCONSTS:
- case ARM::FMSTAT:
- case ARM::ISBsy:
- case ARM::Int_MemBarrierV6:
- case ARM::Int_MemBarrierV7:
- case ARM::Int_SyncBarrierV6:
- case ARM::Int_SyncBarrierV7:
- case ARM::Int_eh_sjlj_setjmp:
- case ARM::LDC2L_OFFSET:
- case ARM::LDC2L_OPTION:
- case ARM::LDC2L_POST:
- case ARM::LDC2L_PRE:
- case ARM::LDC2_OFFSET:
- case ARM::LDC2_OPTION:
- case ARM::LDC2_POST:
- case ARM::LDC2_PRE:
- case ARM::LDCL_OFFSET:
- case ARM::LDCL_OPTION:
- case ARM::LDCL_POST:
- case ARM::LDCL_PRE:
- case ARM::LDC_OFFSET:
- case ARM::LDC_OPTION:
- case ARM::LDC_POST:
- case ARM::LDC_PRE:
- case ARM::LDM:
- case ARM::LDM_RET:
- case ARM::LDR:
- case ARM::LDRB:
- case ARM::LDRBT:
- case ARM::LDRB_POST:
- case ARM::LDRB_PRE:
- case ARM::LDRD:
- case ARM::LDRD_POST:
- case ARM::LDRD_PRE:
- case ARM::LDREX:
- case ARM::LDREXB:
- case ARM::LDREXD:
- case ARM::LDREXH:
- case ARM::LDRH:
- case ARM::LDRHT:
- case ARM::LDRH_POST:
- case ARM::LDRH_PRE:
- case ARM::LDRSB:
- case ARM::LDRSBT:
- case ARM::LDRSB_POST:
- case ARM::LDRSB_PRE:
- case ARM::LDRSH:
- case ARM::LDRSHT:
- case ARM::LDRSH_POST:
- case ARM::LDRSH_PRE:
- case ARM::LDRT:
- case ARM::LDR_POST:
- case ARM::LDR_PRE:
- case ARM::LDRcp:
- case ARM::LEApcrel:
- case ARM::LEApcrelJT:
- case ARM::MCR:
- case ARM::MCR2:
- case ARM::MCRR:
- case ARM::MCRR2:
- case ARM::MLA:
- case ARM::MLS:
- case ARM::MOVCCi:
- case ARM::MOVCCr:
- case ARM::MOVCCs:
- case ARM::MOVPCLR:
- case ARM::MOVPCRX:
- case ARM::MOVTi16:
- case ARM::MOVi:
- case ARM::MOVi16:
- case ARM::MOVi2pieces:
- case ARM::MOVi32imm:
- case ARM::MOVr:
- case ARM::MOVrx:
- case ARM::MOVs:
- case ARM::MOVsra_flag:
- case ARM::MOVsrl_flag:
- case ARM::MRC:
- case ARM::MRC2:
- case ARM::MRRC:
- case ARM::MRRC2:
- case ARM::MRS:
- case ARM::MRSsys:
- case ARM::MSR:
- case ARM::MSRi:
- case ARM::MSRsys:
- case ARM::MSRsysi:
- case ARM::MUL:
- case ARM::MVNi:
- case ARM::MVNr:
- case ARM::MVNs:
- case ARM::NOP:
- case ARM::ORRri:
- case ARM::ORRrr:
- case ARM::ORRrs:
- case ARM::PICADD:
- case ARM::PICLDR:
- case ARM::PICLDRB:
- case ARM::PICLDRH:
- case ARM::PICLDRSB:
- case ARM::PICLDRSH:
- case ARM::PICSTR:
- case ARM::PICSTRB:
- case ARM::PICSTRH:
- case ARM::PKHBT:
- case ARM::PKHTB:
- case ARM::PLDWi:
- case ARM::PLDWr:
- case ARM::PLDi:
- case ARM::PLDr:
- case ARM::PLIi:
- case ARM::PLIr:
- case ARM::QADD:
- case ARM::QADD16:
- case ARM::QADD8:
- case ARM::QASX:
- case ARM::QDADD:
- case ARM::QDSUB:
- case ARM::QSAX:
- case ARM::QSUB:
- case ARM::QSUB16:
- case ARM::QSUB8:
- case ARM::RBIT:
- case ARM::REV:
- case ARM::REV16:
- case ARM::REVSH:
- case ARM::RFE:
- case ARM::RFEW:
- case ARM::RSBSri:
- case ARM::RSBSrs:
- case ARM::RSBri:
- case ARM::RSBrs:
- case ARM::RSCSri:
- case ARM::RSCSrs:
- case ARM::RSCri:
- case ARM::RSCrs:
- case ARM::SADD16:
- case ARM::SADD8:
- case ARM::SASX:
- case ARM::SBCSSri:
- case ARM::SBCSSrr:
- case ARM::SBCSSrs:
- case ARM::SBCri:
- case ARM::SBCrr:
- case ARM::SBCrs:
- case ARM::SBFX:
- case ARM::SEL:
- case ARM::SETENDBE:
- case ARM::SETENDLE:
- case ARM::SEV:
- case ARM::SHADD16:
- case ARM::SHADD8:
- case ARM::SHASX:
- case ARM::SHSAX:
- case ARM::SHSUB16:
- case ARM::SHSUB8:
- case ARM::SMC:
- case ARM::SMLABB:
- case ARM::SMLABT:
- case ARM::SMLAD:
- case ARM::SMLADX:
- case ARM::SMLAL:
- case ARM::SMLALBB:
- case ARM::SMLALBT:
- case ARM::SMLALD:
- case ARM::SMLALDX:
- case ARM::SMLALTB:
- case ARM::SMLALTT:
- case ARM::SMLATB:
- case ARM::SMLATT:
- case ARM::SMLAWB:
- case ARM::SMLAWT:
- case ARM::SMLSD:
- case ARM::SMLSDX:
- case ARM::SMLSLD:
- case ARM::SMLSLDX:
- case ARM::SMMLA:
- case ARM::SMMLAR:
- case ARM::SMMLS:
- case ARM::SMMLSR:
- case ARM::SMMUL:
- case ARM::SMMULR:
- case ARM::SMUAD:
- case ARM::SMUADX:
- case ARM::SMULBB:
- case ARM::SMULBT:
- case ARM::SMULL:
- case ARM::SMULTB:
- case ARM::SMULTT:
- case ARM::SMULWB:
- case ARM::SMULWT:
- case ARM::SMUSD:
- case ARM::SMUSDX:
- case ARM::SRS:
- case ARM::SRSW:
- case ARM::SSAT16:
- case ARM::SSATasr:
- case ARM::SSATlsl:
- case ARM::SSAX:
- case ARM::SSUB16:
- case ARM::SSUB8:
- case ARM::STC2L_OFFSET:
- case ARM::STC2L_OPTION:
- case ARM::STC2L_POST:
- case ARM::STC2L_PRE:
- case ARM::STC2_OFFSET:
- case ARM::STC2_OPTION:
- case ARM::STC2_POST:
- case ARM::STC2_PRE:
- case ARM::STCL_OFFSET:
- case ARM::STCL_OPTION:
- case ARM::STCL_POST:
- case ARM::STCL_PRE:
- case ARM::STC_OFFSET:
- case ARM::STC_OPTION:
- case ARM::STC_POST:
- case ARM::STC_PRE:
- case ARM::STM:
- case ARM::STR:
- case ARM::STRB:
- case ARM::STRBT:
- case ARM::STRB_POST:
- case ARM::STRB_PRE:
- case ARM::STRD:
- case ARM::STRD_POST:
- case ARM::STRD_PRE:
- case ARM::STREX:
- case ARM::STREXB:
- case ARM::STREXD:
- case ARM::STREXH:
- case ARM::STRH:
- case ARM::STRHT:
- case ARM::STRH_POST:
- case ARM::STRH_PRE:
- case ARM::STRT:
- case ARM::STR_POST:
- case ARM::STR_PRE:
- case ARM::SUBSri:
- case ARM::SUBSrr:
- case ARM::SUBSrs:
- case ARM::SUBri:
- case ARM::SUBrr:
- case ARM::SUBrs:
- case ARM::SVC:
- case ARM::SWP:
- case ARM::SWPB:
- case ARM::SXTAB16rr:
- case ARM::SXTAB16rr_rot:
- case ARM::SXTABrr:
- case ARM::SXTABrr_rot:
- case ARM::SXTAHrr:
- case ARM::SXTAHrr_rot:
- case ARM::SXTB16r:
- case ARM::SXTB16r_rot:
- case ARM::SXTBr:
- case ARM::SXTBr_rot:
- case ARM::SXTHr:
- case ARM::SXTHr_rot:
- case ARM::TEQri:
- case ARM::TEQrr:
- case ARM::TEQrs:
- case ARM::TPsoft:
- case ARM::TRAP:
- case ARM::TSTri:
- case ARM::TSTrr:
- case ARM::TSTrs:
- case ARM::UADD16:
- case ARM::UADD8:
- case ARM::UASX:
- case ARM::UBFX:
- case ARM::UHADD16:
- case ARM::UHADD8:
- case ARM::UHASX:
- case ARM::UHSAX:
- case ARM::UHSUB16:
- case ARM::UHSUB8:
- case ARM::UMAAL:
- case ARM::UMLAL:
- case ARM::UMULL:
- case ARM::UQADD16:
- case ARM::UQADD8:
- case ARM::UQASX:
- case ARM::UQSAX:
- case ARM::UQSUB16:
- case ARM::UQSUB8:
- case ARM::USAD8:
- case ARM::USADA8:
- case ARM::USAT16:
- case ARM::USATasr:
- case ARM::USATlsl:
- case ARM::USAX:
- case ARM::USUB16:
- case ARM::USUB8:
- case ARM::UXTAB16rr:
- case ARM::UXTAB16rr_rot:
- case ARM::UXTABrr:
- case ARM::UXTABrr_rot:
- case ARM::UXTAHrr:
- case ARM::UXTAHrr_rot:
- case ARM::UXTB16r:
- case ARM::UXTB16r_rot:
- case ARM::UXTBr:
- case ARM::UXTBr_rot:
- case ARM::UXTHr:
- case ARM::UXTHr_rot:
- case ARM::VABALsv2i64:
- case ARM::VABALsv4i32:
- case ARM::VABALsv8i16:
- case ARM::VABALuv2i64:
- case ARM::VABALuv4i32:
- case ARM::VABALuv8i16:
- case ARM::VABAsv16i8:
- case ARM::VABAsv2i32:
- case ARM::VABAsv4i16:
- case ARM::VABAsv4i32:
- case ARM::VABAsv8i16:
- case ARM::VABAsv8i8:
- case ARM::VABAuv16i8:
- case ARM::VABAuv2i32:
- case ARM::VABAuv4i16:
- case ARM::VABAuv4i32:
- case ARM::VABAuv8i16:
- case ARM::VABAuv8i8:
- case ARM::VABDLsv2i64:
- case ARM::VABDLsv4i32:
- case ARM::VABDLsv8i16:
- case ARM::VABDLuv2i64:
- case ARM::VABDLuv4i32:
- case ARM::VABDLuv8i16:
- case ARM::VABDfd:
- case ARM::VABDfq:
- case ARM::VABDsv16i8:
- case ARM::VABDsv2i32:
- case ARM::VABDsv4i16:
- case ARM::VABDsv4i32:
- case ARM::VABDsv8i16:
- case ARM::VABDsv8i8:
- case ARM::VABDuv16i8:
- case ARM::VABDuv2i32:
- case ARM::VABDuv4i16:
- case ARM::VABDuv4i32:
- case ARM::VABDuv8i16:
- case ARM::VABDuv8i8:
- case ARM::VABSD:
- case ARM::VABSS:
- case ARM::VABSfd:
- case ARM::VABSfd_sfp:
- case ARM::VABSfq:
- case ARM::VABSv16i8:
- case ARM::VABSv2i32:
- case ARM::VABSv4i16:
- case ARM::VABSv4i32:
- case ARM::VABSv8i16:
- case ARM::VABSv8i8:
- case ARM::VACGEd:
- case ARM::VACGEq:
- case ARM::VACGTd:
- case ARM::VACGTq:
- case ARM::VADDD:
- case ARM::VADDHNv2i32:
- case ARM::VADDHNv4i16:
- case ARM::VADDHNv8i8:
- case ARM::VADDLsv2i64:
- case ARM::VADDLsv4i32:
- case ARM::VADDLsv8i16:
- case ARM::VADDLuv2i64:
- case ARM::VADDLuv4i32:
- case ARM::VADDLuv8i16:
- case ARM::VADDS:
- case ARM::VADDWsv2i64:
- case ARM::VADDWsv4i32:
- case ARM::VADDWsv8i16:
- case ARM::VADDWuv2i64:
- case ARM::VADDWuv4i32:
- case ARM::VADDWuv8i16:
- case ARM::VADDfd:
- case ARM::VADDfd_sfp:
- case ARM::VADDfq:
- case ARM::VADDv16i8:
- case ARM::VADDv1i64:
- case ARM::VADDv2i32:
- case ARM::VADDv2i64:
- case ARM::VADDv4i16:
- case ARM::VADDv4i32:
- case ARM::VADDv8i16:
- case ARM::VADDv8i8:
- case ARM::VANDd:
- case ARM::VANDq:
- case ARM::VBICd:
- case ARM::VBICq:
- case ARM::VBIFd:
- case ARM::VBIFq:
- case ARM::VBITd:
- case ARM::VBITq:
- case ARM::VBSLd:
- case ARM::VBSLq:
- case ARM::VCEQfd:
- case ARM::VCEQfq:
- case ARM::VCEQv16i8:
- case ARM::VCEQv2i32:
- case ARM::VCEQv4i16:
- case ARM::VCEQv4i32:
- case ARM::VCEQv8i16:
- case ARM::VCEQv8i8:
- case ARM::VCEQzv16i8:
- case ARM::VCEQzv2f32:
- case ARM::VCEQzv2i32:
- case ARM::VCEQzv4f32:
- case ARM::VCEQzv4i16:
- case ARM::VCEQzv4i32:
- case ARM::VCEQzv8i16:
- case ARM::VCEQzv8i8:
- case ARM::VCGEfd:
- case ARM::VCGEfq:
- case ARM::VCGEsv16i8:
- case ARM::VCGEsv2i32:
- case ARM::VCGEsv4i16:
- case ARM::VCGEsv4i32:
- case ARM::VCGEsv8i16:
- case ARM::VCGEsv8i8:
- case ARM::VCGEuv16i8:
- case ARM::VCGEuv2i32:
- case ARM::VCGEuv4i16:
- case ARM::VCGEuv4i32:
- case ARM::VCGEuv8i16:
- case ARM::VCGEuv8i8:
- case ARM::VCGEzv16i8:
- case ARM::VCGEzv2f32:
- case ARM::VCGEzv2i32:
- case ARM::VCGEzv4f32:
- case ARM::VCGEzv4i16:
- case ARM::VCGEzv4i32:
- case ARM::VCGEzv8i16:
- case ARM::VCGEzv8i8:
- case ARM::VCGTfd:
- case ARM::VCGTfq:
- case ARM::VCGTsv16i8:
- case ARM::VCGTsv2i32:
- case ARM::VCGTsv4i16:
- case ARM::VCGTsv4i32:
- case ARM::VCGTsv8i16:
- case ARM::VCGTsv8i8:
- case ARM::VCGTuv16i8:
- case ARM::VCGTuv2i32:
- case ARM::VCGTuv4i16:
- case ARM::VCGTuv4i32:
- case ARM::VCGTuv8i16:
- case ARM::VCGTuv8i8:
- case ARM::VCGTzv16i8:
- case ARM::VCGTzv2f32:
- case ARM::VCGTzv2i32:
- case ARM::VCGTzv4f32:
- case ARM::VCGTzv4i16:
- case ARM::VCGTzv4i32:
- case ARM::VCGTzv8i16:
- case ARM::VCGTzv8i8:
- case ARM::VCLEzv16i8:
- case ARM::VCLEzv2f32:
- case ARM::VCLEzv2i32:
- case ARM::VCLEzv4f32:
- case ARM::VCLEzv4i16:
- case ARM::VCLEzv4i32:
- case ARM::VCLEzv8i16:
- case ARM::VCLEzv8i8:
- case ARM::VCLSv16i8:
- case ARM::VCLSv2i32:
- case ARM::VCLSv4i16:
- case ARM::VCLSv4i32:
- case ARM::VCLSv8i16:
- case ARM::VCLSv8i8:
- case ARM::VCLTzv16i8:
- case ARM::VCLTzv2f32:
- case ARM::VCLTzv2i32:
- case ARM::VCLTzv4f32:
- case ARM::VCLTzv4i16:
- case ARM::VCLTzv4i32:
- case ARM::VCLTzv8i16:
- case ARM::VCLTzv8i8:
- case ARM::VCLZv16i8:
- case ARM::VCLZv2i32:
- case ARM::VCLZv4i16:
- case ARM::VCLZv4i32:
- case ARM::VCLZv8i16:
- case ARM::VCLZv8i8:
- case ARM::VCMPD:
- case ARM::VCMPED:
- case ARM::VCMPES:
- case ARM::VCMPEZD:
- case ARM::VCMPEZS:
- case ARM::VCMPS:
- case ARM::VCMPZD:
- case ARM::VCMPZS:
- case ARM::VCNTd:
- case ARM::VCNTq:
- case ARM::VCVTBHS:
- case ARM::VCVTBSH:
- case ARM::VCVTDS:
- case ARM::VCVTSD:
- case ARM::VCVTTHS:
- case ARM::VCVTTSH:
- case ARM::VCVTf2sd:
- case ARM::VCVTf2sd_sfp:
- case ARM::VCVTf2sq:
- case ARM::VCVTf2ud:
- case ARM::VCVTf2ud_sfp:
- case ARM::VCVTf2uq:
- case ARM::VCVTf2xsd:
- case ARM::VCVTf2xsq:
- case ARM::VCVTf2xud:
- case ARM::VCVTf2xuq:
- case ARM::VCVTs2fd:
- case ARM::VCVTs2fd_sfp:
- case ARM::VCVTs2fq:
- case ARM::VCVTu2fd:
- case ARM::VCVTu2fd_sfp:
- case ARM::VCVTu2fq:
- case ARM::VCVTxs2fd:
- case ARM::VCVTxs2fq:
- case ARM::VCVTxu2fd:
- case ARM::VCVTxu2fq:
- case ARM::VDIVD:
- case ARM::VDIVS:
- case ARM::VDUP16d:
- case ARM::VDUP16q:
- case ARM::VDUP32d:
- case ARM::VDUP32q:
- case ARM::VDUP8d:
- case ARM::VDUP8q:
- case ARM::VDUPLN16d:
- case ARM::VDUPLN16q:
- case ARM::VDUPLN32d:
- case ARM::VDUPLN32q:
- case ARM::VDUPLN8d:
- case ARM::VDUPLN8q:
- case ARM::VDUPLNfd:
- case ARM::VDUPLNfq:
- case ARM::VDUPfd:
- case ARM::VDUPfdf:
- case ARM::VDUPfq:
- case ARM::VDUPfqf:
- case ARM::VEORd:
- case ARM::VEORq:
- case ARM::VEXTd16:
- case ARM::VEXTd32:
- case ARM::VEXTd8:
- case ARM::VEXTdf:
- case ARM::VEXTq16:
- case ARM::VEXTq32:
- case ARM::VEXTq8:
- case ARM::VEXTqf:
- case ARM::VGETLNi32:
- case ARM::VGETLNs16:
- case ARM::VGETLNs8:
- case ARM::VGETLNu16:
- case ARM::VGETLNu8:
- case ARM::VHADDsv16i8:
- case ARM::VHADDsv2i32:
- case ARM::VHADDsv4i16:
- case ARM::VHADDsv4i32:
- case ARM::VHADDsv8i16:
- case ARM::VHADDsv8i8:
- case ARM::VHADDuv16i8:
- case ARM::VHADDuv2i32:
- case ARM::VHADDuv4i16:
- case ARM::VHADDuv4i32:
- case ARM::VHADDuv8i16:
- case ARM::VHADDuv8i8:
- case ARM::VHSUBsv16i8:
- case ARM::VHSUBsv2i32:
- case ARM::VHSUBsv4i16:
- case ARM::VHSUBsv4i32:
- case ARM::VHSUBsv8i16:
- case ARM::VHSUBsv8i8:
- case ARM::VHSUBuv16i8:
- case ARM::VHSUBuv2i32:
- case ARM::VHSUBuv4i16:
- case ARM::VHSUBuv4i32:
- case ARM::VHSUBuv8i16:
- case ARM::VHSUBuv8i8:
- case ARM::VLD1d16:
- case ARM::VLD1d16Q:
- case ARM::VLD1d16T:
- case ARM::VLD1d32:
- case ARM::VLD1d32Q:
- case ARM::VLD1d32T:
- case ARM::VLD1d64:
- case ARM::VLD1d8:
- case ARM::VLD1d8Q:
- case ARM::VLD1d8T:
- case ARM::VLD1df:
- case ARM::VLD1q16:
- case ARM::VLD1q32:
- case ARM::VLD1q64:
- case ARM::VLD1q8:
- case ARM::VLD1qf:
- case ARM::VLD2LNd16:
- case ARM::VLD2LNd32:
- case ARM::VLD2LNd8:
- case ARM::VLD2LNq16a:
- case ARM::VLD2LNq16b:
- case ARM::VLD2LNq32a:
- case ARM::VLD2LNq32b:
- case ARM::VLD2d16:
- case ARM::VLD2d16D:
- case ARM::VLD2d32:
- case ARM::VLD2d32D:
- case ARM::VLD2d64:
- case ARM::VLD2d8:
- case ARM::VLD2d8D:
- case ARM::VLD2q16:
- case ARM::VLD2q32:
- case ARM::VLD2q8:
- case ARM::VLD3LNd16:
- case ARM::VLD3LNd32:
- case ARM::VLD3LNd8:
- case ARM::VLD3LNq16a:
- case ARM::VLD3LNq16b:
- case ARM::VLD3LNq32a:
- case ARM::VLD3LNq32b:
- case ARM::VLD3d16:
- case ARM::VLD3d32:
- case ARM::VLD3d64:
- case ARM::VLD3d8:
- case ARM::VLD3q16a:
- case ARM::VLD3q16b:
- case ARM::VLD3q32a:
- case ARM::VLD3q32b:
- case ARM::VLD3q8a:
- case ARM::VLD3q8b:
- case ARM::VLD4LNd16:
- case ARM::VLD4LNd32:
- case ARM::VLD4LNd8:
- case ARM::VLD4LNq16a:
- case ARM::VLD4LNq16b:
- case ARM::VLD4LNq32a:
- case ARM::VLD4LNq32b:
- case ARM::VLD4d16:
- case ARM::VLD4d32:
- case ARM::VLD4d64:
- case ARM::VLD4d8:
- case ARM::VLD4q16a:
- case ARM::VLD4q16b:
- case ARM::VLD4q32a:
- case ARM::VLD4q32b:
- case ARM::VLD4q8a:
- case ARM::VLD4q8b:
- case ARM::VLDMD:
- case ARM::VLDMS:
- case ARM::VLDRD:
- case ARM::VLDRQ:
- case ARM::VLDRS:
- case ARM::VMAXfd:
- case ARM::VMAXfd_sfp:
- case ARM::VMAXfq:
- case ARM::VMAXsv16i8:
- case ARM::VMAXsv2i32:
- case ARM::VMAXsv4i16:
- case ARM::VMAXsv4i32:
- case ARM::VMAXsv8i16:
- case ARM::VMAXsv8i8:
- case ARM::VMAXuv16i8:
- case ARM::VMAXuv2i32:
- case ARM::VMAXuv4i16:
- case ARM::VMAXuv4i32:
- case ARM::VMAXuv8i16:
- case ARM::VMAXuv8i8:
- case ARM::VMINfd:
- case ARM::VMINfd_sfp:
- case ARM::VMINfq:
- case ARM::VMINsv16i8:
- case ARM::VMINsv2i32:
- case ARM::VMINsv4i16:
- case ARM::VMINsv4i32:
- case ARM::VMINsv8i16:
- case ARM::VMINsv8i8:
- case ARM::VMINuv16i8:
- case ARM::VMINuv2i32:
- case ARM::VMINuv4i16:
- case ARM::VMINuv4i32:
- case ARM::VMINuv8i16:
- case ARM::VMINuv8i8:
- case ARM::VMLAD:
- case ARM::VMLALslsv2i32:
- case ARM::VMLALslsv4i16:
- case ARM::VMLALsluv2i32:
- case ARM::VMLALsluv4i16:
- case ARM::VMLALsv2i64:
- case ARM::VMLALsv4i32:
- case ARM::VMLALsv8i16:
- case ARM::VMLALuv2i64:
- case ARM::VMLALuv4i32:
- case ARM::VMLALuv8i16:
- case ARM::VMLAS:
- case ARM::VMLAfd:
- case ARM::VMLAfq:
- case ARM::VMLAslfd:
- case ARM::VMLAslfq:
- case ARM::VMLAslv2i32:
- case ARM::VMLAslv4i16:
- case ARM::VMLAslv4i32:
- case ARM::VMLAslv8i16:
- case ARM::VMLAv16i8:
- case ARM::VMLAv2i32:
- case ARM::VMLAv4i16:
- case ARM::VMLAv4i32:
- case ARM::VMLAv8i16:
- case ARM::VMLAv8i8:
- case ARM::VMLSD:
- case ARM::VMLSLslsv2i32:
- case ARM::VMLSLslsv4i16:
- case ARM::VMLSLsluv2i32:
- case ARM::VMLSLsluv4i16:
- case ARM::VMLSLsv2i64:
- case ARM::VMLSLsv4i32:
- case ARM::VMLSLsv8i16:
- case ARM::VMLSLuv2i64:
- case ARM::VMLSLuv4i32:
- case ARM::VMLSLuv8i16:
- case ARM::VMLSS:
- case ARM::VMLSfd:
- case ARM::VMLSfq:
- case ARM::VMLSslfd:
- case ARM::VMLSslfq:
- case ARM::VMLSslv2i32:
- case ARM::VMLSslv4i16:
- case ARM::VMLSslv4i32:
- case ARM::VMLSslv8i16:
- case ARM::VMLSv16i8:
- case ARM::VMLSv2i32:
- case ARM::VMLSv4i16:
- case ARM::VMLSv4i32:
- case ARM::VMLSv8i16:
- case ARM::VMLSv8i8:
- case ARM::VMOVD:
- case ARM::VMOVDRR:
- case ARM::VMOVDcc:
- case ARM::VMOVDneon:
- case ARM::VMOVLsv2i64:
- case ARM::VMOVLsv4i32:
- case ARM::VMOVLsv8i16:
- case ARM::VMOVLuv2i64:
- case ARM::VMOVLuv4i32:
- case ARM::VMOVLuv8i16:
- case ARM::VMOVNv2i32:
- case ARM::VMOVNv4i16:
- case ARM::VMOVNv8i8:
- case ARM::VMOVQ:
- case ARM::VMOVRRD:
- case ARM::VMOVRRS:
- case ARM::VMOVRS:
- case ARM::VMOVS:
- case ARM::VMOVSR:
- case ARM::VMOVSRR:
- case ARM::VMOVScc:
- case ARM::VMOVv16i8:
- case ARM::VMOVv1i64:
- case ARM::VMOVv2i32:
- case ARM::VMOVv2i64:
- case ARM::VMOVv4i16:
- case ARM::VMOVv4i32:
- case ARM::VMOVv8i16:
- case ARM::VMOVv8i8:
- case ARM::VMRS:
- case ARM::VMSR:
- case ARM::VMULD:
- case ARM::VMULLp:
- case ARM::VMULLslsv2i32:
- case ARM::VMULLslsv4i16:
- case ARM::VMULLsluv2i32:
- case ARM::VMULLsluv4i16:
- case ARM::VMULLsv2i64:
- case ARM::VMULLsv4i32:
- case ARM::VMULLsv8i16:
- case ARM::VMULLuv2i64:
- case ARM::VMULLuv4i32:
- case ARM::VMULLuv8i16:
- case ARM::VMULS:
- case ARM::VMULfd:
- case ARM::VMULfd_sfp:
- case ARM::VMULfq:
- case ARM::VMULpd:
- case ARM::VMULpq:
- case ARM::VMULslfd:
- case ARM::VMULslfq:
- case ARM::VMULslv2i32:
- case ARM::VMULslv4i16:
- case ARM::VMULslv4i32:
- case ARM::VMULslv8i16:
- case ARM::VMULv16i8:
- case ARM::VMULv2i32:
- case ARM::VMULv4i16:
- case ARM::VMULv4i32:
- case ARM::VMULv8i16:
- case ARM::VMULv8i8:
- case ARM::VMVNd:
- case ARM::VMVNq:
- case ARM::VNEGD:
- case ARM::VNEGDcc:
- case ARM::VNEGS:
- case ARM::VNEGScc:
- case ARM::VNEGf32q:
- case ARM::VNEGfd:
- case ARM::VNEGfd_sfp:
- case ARM::VNEGs16d:
- case ARM::VNEGs16q:
- case ARM::VNEGs32d:
- case ARM::VNEGs32q:
- case ARM::VNEGs8d:
- case ARM::VNEGs8q:
- case ARM::VNMLAD:
- case ARM::VNMLAS:
- case ARM::VNMLSD:
- case ARM::VNMLSS:
- case ARM::VNMULD:
- case ARM::VNMULS:
- case ARM::VORNd:
- case ARM::VORNq:
- case ARM::VORRd:
- case ARM::VORRq:
- case ARM::VPADALsv16i8:
- case ARM::VPADALsv2i32:
- case ARM::VPADALsv4i16:
- case ARM::VPADALsv4i32:
- case ARM::VPADALsv8i16:
- case ARM::VPADALsv8i8:
- case ARM::VPADALuv16i8:
- case ARM::VPADALuv2i32:
- case ARM::VPADALuv4i16:
- case ARM::VPADALuv4i32:
- case ARM::VPADALuv8i16:
- case ARM::VPADALuv8i8:
- case ARM::VPADDLsv16i8:
- case ARM::VPADDLsv2i32:
- case ARM::VPADDLsv4i16:
- case ARM::VPADDLsv4i32:
- case ARM::VPADDLsv8i16:
- case ARM::VPADDLsv8i8:
- case ARM::VPADDLuv16i8:
- case ARM::VPADDLuv2i32:
- case ARM::VPADDLuv4i16:
- case ARM::VPADDLuv4i32:
- case ARM::VPADDLuv8i16:
- case ARM::VPADDLuv8i8:
- case ARM::VPADDf:
- case ARM::VPADDi16:
- case ARM::VPADDi32:
- case ARM::VPADDi8:
- case ARM::VPMAXf:
- case ARM::VPMAXs16:
- case ARM::VPMAXs32:
- case ARM::VPMAXs8:
- case ARM::VPMAXu16:
- case ARM::VPMAXu32:
- case ARM::VPMAXu8:
- case ARM::VPMINf:
- case ARM::VPMINs16:
- case ARM::VPMINs32:
- case ARM::VPMINs8:
- case ARM::VPMINu16:
- case ARM::VPMINu32:
- case ARM::VPMINu8:
- case ARM::VQABSv16i8:
- case ARM::VQABSv2i32:
- case ARM::VQABSv4i16:
- case ARM::VQABSv4i32:
- case ARM::VQABSv8i16:
- case ARM::VQABSv8i8:
- case ARM::VQADDsv16i8:
- case ARM::VQADDsv1i64:
- case ARM::VQADDsv2i32:
- case ARM::VQADDsv2i64:
- case ARM::VQADDsv4i16:
- case ARM::VQADDsv4i32:
- case ARM::VQADDsv8i16:
- case ARM::VQADDsv8i8:
- case ARM::VQADDuv16i8:
- case ARM::VQADDuv1i64:
- case ARM::VQADDuv2i32:
- case ARM::VQADDuv2i64:
- case ARM::VQADDuv4i16:
- case ARM::VQADDuv4i32:
- case ARM::VQADDuv8i16:
- case ARM::VQADDuv8i8:
- case ARM::VQDMLALslv2i32:
- case ARM::VQDMLALslv4i16:
- case ARM::VQDMLALv2i64:
- case ARM::VQDMLALv4i32:
- case ARM::VQDMLSLslv2i32:
- case ARM::VQDMLSLslv4i16:
- case ARM::VQDMLSLv2i64:
- case ARM::VQDMLSLv4i32:
- case ARM::VQDMULHslv2i32:
- case ARM::VQDMULHslv4i16:
- case ARM::VQDMULHslv4i32:
- case ARM::VQDMULHslv8i16:
- case ARM::VQDMULHv2i32:
- case ARM::VQDMULHv4i16:
- case ARM::VQDMULHv4i32:
- case ARM::VQDMULHv8i16:
- case ARM::VQDMULLslv2i32:
- case ARM::VQDMULLslv4i16:
- case ARM::VQDMULLv2i64:
- case ARM::VQDMULLv4i32:
- case ARM::VQMOVNsuv2i32:
- case ARM::VQMOVNsuv4i16:
- case ARM::VQMOVNsuv8i8:
- case ARM::VQMOVNsv2i32:
- case ARM::VQMOVNsv4i16:
- case ARM::VQMOVNsv8i8:
- case ARM::VQMOVNuv2i32:
- case ARM::VQMOVNuv4i16:
- case ARM::VQMOVNuv8i8:
- case ARM::VQNEGv16i8:
- case ARM::VQNEGv2i32:
- case ARM::VQNEGv4i16:
- case ARM::VQNEGv4i32:
- case ARM::VQNEGv8i16:
- case ARM::VQNEGv8i8:
- case ARM::VQRDMULHslv2i32:
- case ARM::VQRDMULHslv4i16:
- case ARM::VQRDMULHslv4i32:
- case ARM::VQRDMULHslv8i16:
- case ARM::VQRDMULHv2i32:
- case ARM::VQRDMULHv4i16:
- case ARM::VQRDMULHv4i32:
- case ARM::VQRDMULHv8i16:
- case ARM::VQRSHLsv16i8:
- case ARM::VQRSHLsv1i64:
- case ARM::VQRSHLsv2i32:
- case ARM::VQRSHLsv2i64:
- case ARM::VQRSHLsv4i16:
- case ARM::VQRSHLsv4i32:
- case ARM::VQRSHLsv8i16:
- case ARM::VQRSHLsv8i8:
- case ARM::VQRSHLuv16i8:
- case ARM::VQRSHLuv1i64:
- case ARM::VQRSHLuv2i32:
- case ARM::VQRSHLuv2i64:
- case ARM::VQRSHLuv4i16:
- case ARM::VQRSHLuv4i32:
- case ARM::VQRSHLuv8i16:
- case ARM::VQRSHLuv8i8:
- case ARM::VQRSHRNsv2i32:
- case ARM::VQRSHRNsv4i16:
- case ARM::VQRSHRNsv8i8:
- case ARM::VQRSHRNuv2i32:
- case ARM::VQRSHRNuv4i16:
- case ARM::VQRSHRNuv8i8:
- case ARM::VQRSHRUNv2i32:
- case ARM::VQRSHRUNv4i16:
- case ARM::VQRSHRUNv8i8:
- case ARM::VQSHLsiv16i8:
- case ARM::VQSHLsiv1i64:
- case ARM::VQSHLsiv2i32:
- case ARM::VQSHLsiv2i64:
- case ARM::VQSHLsiv4i16:
- case ARM::VQSHLsiv4i32:
- case ARM::VQSHLsiv8i16:
- case ARM::VQSHLsiv8i8:
- case ARM::VQSHLsuv16i8:
- case ARM::VQSHLsuv1i64:
- case ARM::VQSHLsuv2i32:
- case ARM::VQSHLsuv2i64:
- case ARM::VQSHLsuv4i16:
- case ARM::VQSHLsuv4i32:
- case ARM::VQSHLsuv8i16:
- case ARM::VQSHLsuv8i8:
- case ARM::VQSHLsv16i8:
- case ARM::VQSHLsv1i64:
- case ARM::VQSHLsv2i32:
- case ARM::VQSHLsv2i64:
- case ARM::VQSHLsv4i16:
- case ARM::VQSHLsv4i32:
- case ARM::VQSHLsv8i16:
- case ARM::VQSHLsv8i8:
- case ARM::VQSHLuiv16i8:
- case ARM::VQSHLuiv1i64:
- case ARM::VQSHLuiv2i32:
- case ARM::VQSHLuiv2i64:
- case ARM::VQSHLuiv4i16:
- case ARM::VQSHLuiv4i32:
- case ARM::VQSHLuiv8i16:
- case ARM::VQSHLuiv8i8:
- case ARM::VQSHLuv16i8:
- case ARM::VQSHLuv1i64:
- case ARM::VQSHLuv2i32:
- case ARM::VQSHLuv2i64:
- case ARM::VQSHLuv4i16:
- case ARM::VQSHLuv4i32:
- case ARM::VQSHLuv8i16:
- case ARM::VQSHLuv8i8:
- case ARM::VQSHRNsv2i32:
- case ARM::VQSHRNsv4i16:
- case ARM::VQSHRNsv8i8:
- case ARM::VQSHRNuv2i32:
- case ARM::VQSHRNuv4i16:
- case ARM::VQSHRNuv8i8:
- case ARM::VQSHRUNv2i32:
- case ARM::VQSHRUNv4i16:
- case ARM::VQSHRUNv8i8:
- case ARM::VQSUBsv16i8:
- case ARM::VQSUBsv1i64:
- case ARM::VQSUBsv2i32:
- case ARM::VQSUBsv2i64:
- case ARM::VQSUBsv4i16:
- case ARM::VQSUBsv4i32:
- case ARM::VQSUBsv8i16:
- case ARM::VQSUBsv8i8:
- case ARM::VQSUBuv16i8:
- case ARM::VQSUBuv1i64:
- case ARM::VQSUBuv2i32:
- case ARM::VQSUBuv2i64:
- case ARM::VQSUBuv4i16:
- case ARM::VQSUBuv4i32:
- case ARM::VQSUBuv8i16:
- case ARM::VQSUBuv8i8:
- case ARM::VRADDHNv2i32:
- case ARM::VRADDHNv4i16:
- case ARM::VRADDHNv8i8:
- case ARM::VRECPEd:
- case ARM::VRECPEfd:
- case ARM::VRECPEfq:
- case ARM::VRECPEq:
- case ARM::VRECPSfd:
- case ARM::VRECPSfq:
- case ARM::VREV16d8:
- case ARM::VREV16q8:
- case ARM::VREV32d16:
- case ARM::VREV32d8:
- case ARM::VREV32q16:
- case ARM::VREV32q8:
- case ARM::VREV64d16:
- case ARM::VREV64d32:
- case ARM::VREV64d8:
- case ARM::VREV64df:
- case ARM::VREV64q16:
- case ARM::VREV64q32:
- case ARM::VREV64q8:
- case ARM::VREV64qf:
- case ARM::VRHADDsv16i8:
- case ARM::VRHADDsv2i32:
- case ARM::VRHADDsv4i16:
- case ARM::VRHADDsv4i32:
- case ARM::VRHADDsv8i16:
- case ARM::VRHADDsv8i8:
- case ARM::VRHADDuv16i8:
- case ARM::VRHADDuv2i32:
- case ARM::VRHADDuv4i16:
- case ARM::VRHADDuv4i32:
- case ARM::VRHADDuv8i16:
- case ARM::VRHADDuv8i8:
- case ARM::VRSHLsv16i8:
- case ARM::VRSHLsv1i64:
- case ARM::VRSHLsv2i32:
- case ARM::VRSHLsv2i64:
- case ARM::VRSHLsv4i16:
- case ARM::VRSHLsv4i32:
- case ARM::VRSHLsv8i16:
- case ARM::VRSHLsv8i8:
- case ARM::VRSHLuv16i8:
- case ARM::VRSHLuv1i64:
- case ARM::VRSHLuv2i32:
- case ARM::VRSHLuv2i64:
- case ARM::VRSHLuv4i16:
- case ARM::VRSHLuv4i32:
- case ARM::VRSHLuv8i16:
- case ARM::VRSHLuv8i8:
- case ARM::VRSHRNv2i32:
- case ARM::VRSHRNv4i16:
- case ARM::VRSHRNv8i8:
- case ARM::VRSHRsv16i8:
- case ARM::VRSHRsv1i64:
- case ARM::VRSHRsv2i32:
- case ARM::VRSHRsv2i64:
- case ARM::VRSHRsv4i16:
- case ARM::VRSHRsv4i32:
- case ARM::VRSHRsv8i16:
- case ARM::VRSHRsv8i8:
- case ARM::VRSHRuv16i8:
- case ARM::VRSHRuv1i64:
- case ARM::VRSHRuv2i32:
- case ARM::VRSHRuv2i64:
- case ARM::VRSHRuv4i16:
- case ARM::VRSHRuv4i32:
- case ARM::VRSHRuv8i16:
- case ARM::VRSHRuv8i8:
- case ARM::VRSQRTEd:
- case ARM::VRSQRTEfd:
- case ARM::VRSQRTEfq:
- case ARM::VRSQRTEq:
- case ARM::VRSQRTSfd:
- case ARM::VRSQRTSfq:
- case ARM::VRSRAsv16i8:
- case ARM::VRSRAsv1i64:
- case ARM::VRSRAsv2i32:
- case ARM::VRSRAsv2i64:
- case ARM::VRSRAsv4i16:
- case ARM::VRSRAsv4i32:
- case ARM::VRSRAsv8i16:
- case ARM::VRSRAsv8i8:
- case ARM::VRSRAuv16i8:
- case ARM::VRSRAuv1i64:
- case ARM::VRSRAuv2i32:
- case ARM::VRSRAuv2i64:
- case ARM::VRSRAuv4i16:
- case ARM::VRSRAuv4i32:
- case ARM::VRSRAuv8i16:
- case ARM::VRSRAuv8i8:
- case ARM::VRSUBHNv2i32:
- case ARM::VRSUBHNv4i16:
- case ARM::VRSUBHNv8i8:
- case ARM::VSETLNi16:
- case ARM::VSETLNi32:
- case ARM::VSETLNi8:
- case ARM::VSHLLi16:
- case ARM::VSHLLi32:
- case ARM::VSHLLi8:
- case ARM::VSHLLsv2i64:
- case ARM::VSHLLsv4i32:
- case ARM::VSHLLsv8i16:
- case ARM::VSHLLuv2i64:
- case ARM::VSHLLuv4i32:
- case ARM::VSHLLuv8i16:
- case ARM::VSHLiv16i8:
- case ARM::VSHLiv1i64:
- case ARM::VSHLiv2i32:
- case ARM::VSHLiv2i64:
- case ARM::VSHLiv4i16:
- case ARM::VSHLiv4i32:
- case ARM::VSHLiv8i16:
- case ARM::VSHLiv8i8:
- case ARM::VSHLsv16i8:
- case ARM::VSHLsv1i64:
- case ARM::VSHLsv2i32:
- case ARM::VSHLsv2i64:
- case ARM::VSHLsv4i16:
- case ARM::VSHLsv4i32:
- case ARM::VSHLsv8i16:
- case ARM::VSHLsv8i8:
- case ARM::VSHLuv16i8:
- case ARM::VSHLuv1i64:
- case ARM::VSHLuv2i32:
- case ARM::VSHLuv2i64:
- case ARM::VSHLuv4i16:
- case ARM::VSHLuv4i32:
- case ARM::VSHLuv8i16:
- case ARM::VSHLuv8i8:
- case ARM::VSHRNv2i32:
- case ARM::VSHRNv4i16:
- case ARM::VSHRNv8i8:
- case ARM::VSHRsv16i8:
- case ARM::VSHRsv1i64:
- case ARM::VSHRsv2i32:
- case ARM::VSHRsv2i64:
- case ARM::VSHRsv4i16:
- case ARM::VSHRsv4i32:
- case ARM::VSHRsv8i16:
- case ARM::VSHRsv8i8:
- case ARM::VSHRuv16i8:
- case ARM::VSHRuv1i64:
- case ARM::VSHRuv2i32:
- case ARM::VSHRuv2i64:
- case ARM::VSHRuv4i16:
- case ARM::VSHRuv4i32:
- case ARM::VSHRuv8i16:
- case ARM::VSHRuv8i8:
- case ARM::VSHTOD:
- case ARM::VSHTOS:
- case ARM::VSITOD:
- case ARM::VSITOS:
- case ARM::VSLIv16i8:
- case ARM::VSLIv1i64:
- case ARM::VSLIv2i32:
- case ARM::VSLIv2i64:
- case ARM::VSLIv4i16:
- case ARM::VSLIv4i32:
- case ARM::VSLIv8i16:
- case ARM::VSLIv8i8:
- case ARM::VSLTOD:
- case ARM::VSLTOS:
- case ARM::VSQRTD:
- case ARM::VSQRTS:
- case ARM::VSRAsv16i8:
- case ARM::VSRAsv1i64:
- case ARM::VSRAsv2i32:
- case ARM::VSRAsv2i64:
- case ARM::VSRAsv4i16:
- case ARM::VSRAsv4i32:
- case ARM::VSRAsv8i16:
- case ARM::VSRAsv8i8:
- case ARM::VSRAuv16i8:
- case ARM::VSRAuv1i64:
- case ARM::VSRAuv2i32:
- case ARM::VSRAuv2i64:
- case ARM::VSRAuv4i16:
- case ARM::VSRAuv4i32:
- case ARM::VSRAuv8i16:
- case ARM::VSRAuv8i8:
- case ARM::VSRIv16i8:
- case ARM::VSRIv1i64:
- case ARM::VSRIv2i32:
- case ARM::VSRIv2i64:
- case ARM::VSRIv4i16:
- case ARM::VSRIv4i32:
- case ARM::VSRIv8i16:
- case ARM::VSRIv8i8:
- case ARM::VST1d16:
- case ARM::VST1d16Q:
- case ARM::VST1d16T:
- case ARM::VST1d32:
- case ARM::VST1d32Q:
- case ARM::VST1d32T:
- case ARM::VST1d64:
- case ARM::VST1d8:
- case ARM::VST1d8Q:
- case ARM::VST1d8T:
- case ARM::VST1df:
- case ARM::VST1q16:
- case ARM::VST1q32:
- case ARM::VST1q64:
- case ARM::VST1q8:
- case ARM::VST1qf:
- case ARM::VST2LNd16:
- case ARM::VST2LNd32:
- case ARM::VST2LNd8:
- case ARM::VST2LNq16a:
- case ARM::VST2LNq16b:
- case ARM::VST2LNq32a:
- case ARM::VST2LNq32b:
- case ARM::VST2d16:
- case ARM::VST2d16D:
- case ARM::VST2d32:
- case ARM::VST2d32D:
- case ARM::VST2d64:
- case ARM::VST2d8:
- case ARM::VST2d8D:
- case ARM::VST2q16:
- case ARM::VST2q32:
- case ARM::VST2q8:
- case ARM::VST3LNd16:
- case ARM::VST3LNd32:
- case ARM::VST3LNd8:
- case ARM::VST3LNq16a:
- case ARM::VST3LNq16b:
- case ARM::VST3LNq32a:
- case ARM::VST3LNq32b:
- case ARM::VST3d16:
- case ARM::VST3d32:
- case ARM::VST3d64:
- case ARM::VST3d8:
- case ARM::VST3q16a:
- case ARM::VST3q16b:
- case ARM::VST3q32a:
- case ARM::VST3q32b:
- case ARM::VST3q8a:
- case ARM::VST3q8b:
- case ARM::VST4LNd16:
- case ARM::VST4LNd32:
- case ARM::VST4LNd8:
- case ARM::VST4LNq16a:
- case ARM::VST4LNq16b:
- case ARM::VST4LNq32a:
- case ARM::VST4LNq32b:
- case ARM::VST4d16:
- case ARM::VST4d32:
- case ARM::VST4d64:
- case ARM::VST4d8:
- case ARM::VST4q16a:
- case ARM::VST4q16b:
- case ARM::VST4q32a:
- case ARM::VST4q32b:
- case ARM::VST4q8a:
- case ARM::VST4q8b:
- case ARM::VSTMD:
- case ARM::VSTMS:
- case ARM::VSTRD:
- case ARM::VSTRQ:
- case ARM::VSTRS:
- case ARM::VSUBD:
- case ARM::VSUBHNv2i32:
- case ARM::VSUBHNv4i16:
- case ARM::VSUBHNv8i8:
- case ARM::VSUBLsv2i64:
- case ARM::VSUBLsv4i32:
- case ARM::VSUBLsv8i16:
- case ARM::VSUBLuv2i64:
- case ARM::VSUBLuv4i32:
- case ARM::VSUBLuv8i16:
- case ARM::VSUBS:
- case ARM::VSUBWsv2i64:
- case ARM::VSUBWsv4i32:
- case ARM::VSUBWsv8i16:
- case ARM::VSUBWuv2i64:
- case ARM::VSUBWuv4i32:
- case ARM::VSUBWuv8i16:
- case ARM::VSUBfd:
- case ARM::VSUBfd_sfp:
- case ARM::VSUBfq:
- case ARM::VSUBv16i8:
- case ARM::VSUBv1i64:
- case ARM::VSUBv2i32:
- case ARM::VSUBv2i64:
- case ARM::VSUBv4i16:
- case ARM::VSUBv4i32:
- case ARM::VSUBv8i16:
- case ARM::VSUBv8i8:
- case ARM::VSWPd:
- case ARM::VSWPq:
- case ARM::VTBL1:
- case ARM::VTBL2:
- case ARM::VTBL3:
- case ARM::VTBL4:
- case ARM::VTBX1:
- case ARM::VTBX2:
- case ARM::VTBX3:
- case ARM::VTBX4:
- case ARM::VTOSHD:
- case ARM::VTOSHS:
- case ARM::VTOSIRD:
- case ARM::VTOSIRS:
- case ARM::VTOSIZD:
- case ARM::VTOSIZS:
- case ARM::VTOSLD:
- case ARM::VTOSLS:
- case ARM::VTOUHD:
- case ARM::VTOUHS:
- case ARM::VTOUIRD:
- case ARM::VTOUIRS:
- case ARM::VTOUIZD:
- case ARM::VTOUIZS:
- case ARM::VTOULD:
- case ARM::VTOULS:
- case ARM::VTRNd16:
- case ARM::VTRNd32:
- case ARM::VTRNd8:
- case ARM::VTRNq16:
- case ARM::VTRNq32:
- case ARM::VTRNq8:
- case ARM::VTSTv16i8:
- case ARM::VTSTv2i32:
- case ARM::VTSTv4i16:
- case ARM::VTSTv4i32:
- case ARM::VTSTv8i16:
- case ARM::VTSTv8i8:
- case ARM::VUHTOD:
- case ARM::VUHTOS:
- case ARM::VUITOD:
- case ARM::VUITOS:
- case ARM::VULTOD:
- case ARM::VULTOS:
- case ARM::VUZPd16:
- case ARM::VUZPd32:
- case ARM::VUZPd8:
- case ARM::VUZPq16:
- case ARM::VUZPq32:
- case ARM::VUZPq8:
- case ARM::VZIPd16:
- case ARM::VZIPd32:
- case ARM::VZIPd8:
- case ARM::VZIPq16:
- case ARM::VZIPq32:
- case ARM::VZIPq8:
- case ARM::WFE:
- case ARM::WFI:
- case ARM::YIELD:
- case ARM::t2ADCSri:
- case ARM::t2ADCSrr:
- case ARM::t2ADCSrs:
- case ARM::t2ADCri:
- case ARM::t2ADCrr:
- case ARM::t2ADCrs:
- case ARM::t2ADDSri:
- case ARM::t2ADDSrr:
- case ARM::t2ADDSrs:
- case ARM::t2ADDrSPi:
- case ARM::t2ADDrSPi12:
- case ARM::t2ADDrSPs:
- case ARM::t2ADDri:
- case ARM::t2ADDri12:
- case ARM::t2ADDrr:
- case ARM::t2ADDrs:
- case ARM::t2ANDri:
- case ARM::t2ANDrr:
- case ARM::t2ANDrs:
- case ARM::t2ASRri:
- case ARM::t2ASRrr:
- case ARM::t2B:
- case ARM::t2BFC:
- case ARM::t2BFI:
- case ARM::t2BICri:
- case ARM::t2BICrr:
- case ARM::t2BICrs:
- case ARM::t2BR_JT:
- case ARM::t2BXJ:
- case ARM::t2Bcc:
- case ARM::t2CLREX:
- case ARM::t2CLZ:
- case ARM::t2CMNzri:
- case ARM::t2CMNzrr:
- case ARM::t2CMNzrs:
- case ARM::t2CMPri:
- case ARM::t2CMPrr:
- case ARM::t2CMPrs:
- case ARM::t2CMPzri:
- case ARM::t2CMPzrr:
- case ARM::t2CMPzrs:
- case ARM::t2CPS:
- case ARM::t2DBG:
- case ARM::t2DMBish:
- case ARM::t2DMBishst:
- case ARM::t2DMBnsh:
- case ARM::t2DMBnshst:
- case ARM::t2DMBosh:
- case ARM::t2DMBoshst:
- case ARM::t2DMBst:
- case ARM::t2DSBish:
- case ARM::t2DSBishst:
- case ARM::t2DSBnsh:
- case ARM::t2DSBnshst:
- case ARM::t2DSBosh:
- case ARM::t2DSBoshst:
- case ARM::t2DSBst:
- case ARM::t2EORri:
- case ARM::t2EORrr:
- case ARM::t2EORrs:
- case ARM::t2ISBsy:
- case ARM::t2IT:
- case ARM::t2Int_MemBarrierV7:
- case ARM::t2Int_SyncBarrierV7:
- case ARM::t2Int_eh_sjlj_setjmp:
- case ARM::t2LDM:
- case ARM::t2LDM_RET:
- case ARM::t2LDRBT:
- case ARM::t2LDRB_POST:
- case ARM::t2LDRB_PRE:
- case ARM::t2LDRBi12:
- case ARM::t2LDRBi8:
- case ARM::t2LDRBpci:
- case ARM::t2LDRBs:
- case ARM::t2LDRDi8:
- case ARM::t2LDRDpci:
- case ARM::t2LDREX:
- case ARM::t2LDREXB:
- case ARM::t2LDREXD:
- case ARM::t2LDREXH:
- case ARM::t2LDRHT:
- case ARM::t2LDRH_POST:
- case ARM::t2LDRH_PRE:
- case ARM::t2LDRHi12:
- case ARM::t2LDRHi8:
- case ARM::t2LDRHpci:
- case ARM::t2LDRHs:
- case ARM::t2LDRSBT:
- case ARM::t2LDRSB_POST:
- case ARM::t2LDRSB_PRE:
- case ARM::t2LDRSBi12:
- case ARM::t2LDRSBi8:
- case ARM::t2LDRSBpci:
- case ARM::t2LDRSBs:
- case ARM::t2LDRSHT:
- case ARM::t2LDRSH_POST:
- case ARM::t2LDRSH_PRE:
- case ARM::t2LDRSHi12:
- case ARM::t2LDRSHi8:
- case ARM::t2LDRSHpci:
- case ARM::t2LDRSHs:
- case ARM::t2LDRT:
- case ARM::t2LDR_POST:
- case ARM::t2LDR_PRE:
- case ARM::t2LDRi12:
- case ARM::t2LDRi8:
- case ARM::t2LDRpci:
- case ARM::t2LDRpci_pic:
- case ARM::t2LDRs:
- case ARM::t2LEApcrel:
- case ARM::t2LEApcrelJT:
- case ARM::t2LSLri:
- case ARM::t2LSLrr:
- case ARM::t2LSRri:
- case ARM::t2LSRrr:
- case ARM::t2MLA:
- case ARM::t2MLS:
- case ARM::t2MOVCCasr:
- case ARM::t2MOVCCi:
- case ARM::t2MOVCClsl:
- case ARM::t2MOVCClsr:
- case ARM::t2MOVCCr:
- case ARM::t2MOVCCror:
- case ARM::t2MOVTi16:
- case ARM::t2MOVi:
- case ARM::t2MOVi16:
- case ARM::t2MOVi32imm:
- case ARM::t2MOVr:
- case ARM::t2MOVrx:
- case ARM::t2MOVsra_flag:
- case ARM::t2MOVsrl_flag:
- case ARM::t2MRS:
- case ARM::t2MRSsys:
- case ARM::t2MSR:
- case ARM::t2MSRsys:
- case ARM::t2MUL:
- case ARM::t2MVNi:
- case ARM::t2MVNr:
- case ARM::t2MVNs:
- case ARM::t2NOP:
- case ARM::t2ORNri:
- case ARM::t2ORNrr:
- case ARM::t2ORNrs:
- case ARM::t2ORRri:
- case ARM::t2ORRrr:
- case ARM::t2ORRrs:
- case ARM::t2PKHBT:
- case ARM::t2PKHTB:
- case ARM::t2PLDWi12:
- case ARM::t2PLDWi8:
- case ARM::t2PLDWpci:
- case ARM::t2PLDWr:
- case ARM::t2PLDWs:
- case ARM::t2PLDi12:
- case ARM::t2PLDi8:
- case ARM::t2PLDpci:
- case ARM::t2PLDr:
- case ARM::t2PLDs:
- case ARM::t2PLIi12:
- case ARM::t2PLIi8:
- case ARM::t2PLIpci:
- case ARM::t2PLIr:
- case ARM::t2PLIs:
- case ARM::t2QADD:
- case ARM::t2QADD16:
- case ARM::t2QADD8:
- case ARM::t2QASX:
- case ARM::t2QDADD:
- case ARM::t2QDSUB:
- case ARM::t2QSAX:
- case ARM::t2QSUB:
- case ARM::t2QSUB16:
- case ARM::t2QSUB8:
- case ARM::t2RBIT:
- case ARM::t2REV:
- case ARM::t2REV16:
- case ARM::t2REVSH:
- case ARM::t2RFEDB:
- case ARM::t2RFEDBW:
- case ARM::t2RFEIA:
- case ARM::t2RFEIAW:
- case ARM::t2RORri:
- case ARM::t2RORrr:
- case ARM::t2RSBSri:
- case ARM::t2RSBSrs:
- case ARM::t2RSBri:
- case ARM::t2RSBrs:
- case ARM::t2SADD16:
- case ARM::t2SADD8:
- case ARM::t2SASX:
- case ARM::t2SBCSri:
- case ARM::t2SBCSrr:
- case ARM::t2SBCSrs:
- case ARM::t2SBCri:
- case ARM::t2SBCrr:
- case ARM::t2SBCrs:
- case ARM::t2SBFX:
- case ARM::t2SDIV:
- case ARM::t2SEL:
- case ARM::t2SEV:
- case ARM::t2SHADD16:
- case ARM::t2SHADD8:
- case ARM::t2SHASX:
- case ARM::t2SHSAX:
- case ARM::t2SHSUB16:
- case ARM::t2SHSUB8:
- case ARM::t2SMC:
- case ARM::t2SMLABB:
- case ARM::t2SMLABT:
- case ARM::t2SMLAD:
- case ARM::t2SMLADX:
- case ARM::t2SMLAL:
- case ARM::t2SMLALBB:
- case ARM::t2SMLALBT:
- case ARM::t2SMLALD:
- case ARM::t2SMLALDX:
- case ARM::t2SMLALTB:
- case ARM::t2SMLALTT:
- case ARM::t2SMLATB:
- case ARM::t2SMLATT:
- case ARM::t2SMLAWB:
- case ARM::t2SMLAWT:
- case ARM::t2SMLSD:
- case ARM::t2SMLSDX:
- case ARM::t2SMLSLD:
- case ARM::t2SMLSLDX:
- case ARM::t2SMMLA:
- case ARM::t2SMMLAR:
- case ARM::t2SMMLS:
- case ARM::t2SMMLSR:
- case ARM::t2SMMUL:
- case ARM::t2SMMULR:
- case ARM::t2SMUAD:
- case ARM::t2SMUADX:
- case ARM::t2SMULBB:
- case ARM::t2SMULBT:
- case ARM::t2SMULL:
- case ARM::t2SMULTB:
- case ARM::t2SMULTT:
- case ARM::t2SMULWB:
- case ARM::t2SMULWT:
- case ARM::t2SMUSD:
- case ARM::t2SMUSDX:
- case ARM::t2SRSDB:
- case ARM::t2SRSDBW:
- case ARM::t2SRSIA:
- case ARM::t2SRSIAW:
- case ARM::t2SSAT16:
- case ARM::t2SSATasr:
- case ARM::t2SSATlsl:
- case ARM::t2SSAX:
- case ARM::t2SSUB16:
- case ARM::t2SSUB8:
- case ARM::t2STM:
- case ARM::t2STRBT:
- case ARM::t2STRB_POST:
- case ARM::t2STRB_PRE:
- case ARM::t2STRBi12:
- case ARM::t2STRBi8:
- case ARM::t2STRBs:
- case ARM::t2STRDi8:
- case ARM::t2STREX:
- case ARM::t2STREXB:
- case ARM::t2STREXD:
- case ARM::t2STREXH:
- case ARM::t2STRHT:
- case ARM::t2STRH_POST:
- case ARM::t2STRH_PRE:
- case ARM::t2STRHi12:
- case ARM::t2STRHi8:
- case ARM::t2STRHs:
- case ARM::t2STRT:
- case ARM::t2STR_POST:
- case ARM::t2STR_PRE:
- case ARM::t2STRi12:
- case ARM::t2STRi8:
- case ARM::t2STRs:
- case ARM::t2SUBSri:
- case ARM::t2SUBSrr:
- case ARM::t2SUBSrs:
- case ARM::t2SUBrSPi:
- case ARM::t2SUBrSPi12:
- case ARM::t2SUBrSPi12_:
- case ARM::t2SUBrSPi_:
- case ARM::t2SUBrSPs:
- case ARM::t2SUBrSPs_:
- case ARM::t2SUBri:
- case ARM::t2SUBri12:
- case ARM::t2SUBrr:
- case ARM::t2SUBrs:
- case ARM::t2SXTAB16rr:
- case ARM::t2SXTAB16rr_rot:
- case ARM::t2SXTABrr:
- case ARM::t2SXTABrr_rot:
- case ARM::t2SXTAHrr:
- case ARM::t2SXTAHrr_rot:
- case ARM::t2SXTB16r:
- case ARM::t2SXTB16r_rot:
- case ARM::t2SXTBr:
- case ARM::t2SXTBr_rot:
- case ARM::t2SXTHr:
- case ARM::t2SXTHr_rot:
- case ARM::t2TBB:
- case ARM::t2TBBgen:
- case ARM::t2TBH:
- case ARM::t2TBHgen:
- case ARM::t2TEQri:
- case ARM::t2TEQrr:
- case ARM::t2TEQrs:
- case ARM::t2TPsoft:
- case ARM::t2TSTri:
- case ARM::t2TSTrr:
- case ARM::t2TSTrs:
- case ARM::t2UADD16:
- case ARM::t2UADD8:
- case ARM::t2UASX:
- case ARM::t2UBFX:
- case ARM::t2UDIV:
- case ARM::t2UHADD16:
- case ARM::t2UHADD8:
- case ARM::t2UHASX:
- case ARM::t2UHSAX:
- case ARM::t2UHSUB16:
- case ARM::t2UHSUB8:
- case ARM::t2UMAAL:
- case ARM::t2UMLAL:
- case ARM::t2UMULL:
- case ARM::t2UQADD16:
- case ARM::t2UQADD8:
- case ARM::t2UQASX:
- case ARM::t2UQSAX:
- case ARM::t2UQSUB16:
- case ARM::t2UQSUB8:
- case ARM::t2USAD8:
- case ARM::t2USADA8:
- case ARM::t2USAT16:
- case ARM::t2USATasr:
- case ARM::t2USATlsl:
- case ARM::t2USAX:
- case ARM::t2USUB16:
- case ARM::t2USUB8:
- case ARM::t2UXTAB16rr:
- case ARM::t2UXTAB16rr_rot:
- case ARM::t2UXTABrr:
- case ARM::t2UXTABrr_rot:
- case ARM::t2UXTAHrr:
- case ARM::t2UXTAHrr_rot:
- case ARM::t2UXTB16r:
- case ARM::t2UXTB16r_rot:
- case ARM::t2UXTBr:
- case ARM::t2UXTBr_rot:
- case ARM::t2UXTHr:
- case ARM::t2UXTHr_rot:
- case ARM::t2WFE:
- case ARM::t2WFI:
- case ARM::t2YIELD:
- case ARM::tADC:
- case ARM::tADDhirr:
- case ARM::tADDi3:
- case ARM::tADDi8:
- case ARM::tADDrPCi:
- case ARM::tADDrSP:
- case ARM::tADDrSPi:
- case ARM::tADDrr:
- case ARM::tADDspi:
- case ARM::tADDspr:
- case ARM::tADDspr_:
- case ARM::tADJCALLSTACKDOWN:
- case ARM::tADJCALLSTACKUP:
- case ARM::tAND:
- case ARM::tANDsp:
- case ARM::tASRri:
- case ARM::tASRrr:
- case ARM::tB:
- case ARM::tBIC:
- case ARM::tBKPT:
- case ARM::tBL:
- case ARM::tBLXi:
- case ARM::tBLXi_r9:
- case ARM::tBLXr:
- case ARM::tBLXr_r9:
- case ARM::tBLr9:
- case ARM::tBRIND:
- case ARM::tBR_JTr:
- case ARM::tBX:
- case ARM::tBX_RET:
- case ARM::tBX_RET_vararg:
- case ARM::tBXr9:
- case ARM::tBcc:
- case ARM::tBfar:
- case ARM::tCBNZ:
- case ARM::tCBZ:
- case ARM::tCMNz:
- case ARM::tCMPhir:
- case ARM::tCMPi8:
- case ARM::tCMPr:
- case ARM::tCMPzhir:
- case ARM::tCMPzi8:
- case ARM::tCMPzr:
- case ARM::tCPS:
- case ARM::tEOR:
- case ARM::tInt_eh_sjlj_setjmp:
- case ARM::tLDM:
- case ARM::tLDR:
- case ARM::tLDRB:
- case ARM::tLDRBi:
- case ARM::tLDRH:
- case ARM::tLDRHi:
- case ARM::tLDRSB:
- case ARM::tLDRSH:
- case ARM::tLDRcp:
- case ARM::tLDRi:
- case ARM::tLDRpci:
- case ARM::tLDRpci_pic:
- case ARM::tLDRspi:
- case ARM::tLEApcrel:
- case ARM::tLEApcrelJT:
- case ARM::tLSLri:
- case ARM::tLSLrr:
- case ARM::tLSRri:
- case ARM::tLSRrr:
- case ARM::tMOVCCi:
- case ARM::tMOVCCr:
- case ARM::tMOVCCr_pseudo:
- case ARM::tMOVSr:
- case ARM::tMOVgpr2gpr:
- case ARM::tMOVgpr2tgpr:
- case ARM::tMOVi8:
- case ARM::tMOVr:
- case ARM::tMOVtgpr2gpr:
- case ARM::tMUL:
- case ARM::tMVN:
- case ARM::tNOP:
- case ARM::tORR:
- case ARM::tPICADD:
- case ARM::tPOP:
- case ARM::tPOP_RET:
- case ARM::tPUSH:
- case ARM::tREV:
- case ARM::tREV16:
- case ARM::tREVSH:
- case ARM::tROR:
- case ARM::tRSB:
- case ARM::tRestore:
- case ARM::tSBC:
- case ARM::tSETENDBE:
- case ARM::tSETENDLE:
- case ARM::tSEV:
- case ARM::tSTM:
- case ARM::tSTR:
- case ARM::tSTRB:
- case ARM::tSTRBi:
- case ARM::tSTRH:
- case ARM::tSTRHi:
- case ARM::tSTRi:
- case ARM::tSTRspi:
- case ARM::tSUBi3:
- case ARM::tSUBi8:
- case ARM::tSUBrr:
- case ARM::tSUBspi:
- case ARM::tSUBspi_:
- case ARM::tSVC:
- case ARM::tSXTB:
- case ARM::tSXTH:
- case ARM::tSpill:
- case ARM::tTPsoft:
- case ARM::tTRAP:
- case ARM::tTST:
- case ARM::tUXTB:
- case ARM::tUXTH:
- case ARM::tWFE:
- case ARM::tWFI:
- case ARM::tYIELD: {
- break;
- }
- default:
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Not supported instr: " << MI;
- llvm_report_error(Msg.str());
- }
- return Value;
-}
-
diff --git a/libclamav/c++/ARMGenDAGISel.inc b/libclamav/c++/ARMGenDAGISel.inc
deleted file mode 100644
index 275c63e..0000000
--- a/libclamav/c++/ARMGenDAGISel.inc
+++ /dev/null
@@ -1,26244 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// DAG Instruction Selector for the ARM target
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-// *** NOTE: This file is #included into the middle of the target
-// *** instruction selector class. These functions are really methods.
-
-
-// Predicate functions.
-inline bool Predicate_adde_dead_carry(SDNode *N) const {
-return !N->hasAnyUseOfValue(1);
-}
-inline bool Predicate_adde_live_carry(SDNode *N) const {
-return N->hasAnyUseOfValue(1);
-}
-inline bool Predicate_atomic_cmp_swap_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_cmp_swap_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_cmp_swap_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_cmp_swap_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_add_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_add_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_add_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_add_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_and_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_and_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_and_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_and_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_max_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_max_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_max_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_max_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_min_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_min_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_min_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_min_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_nand_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_nand_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_nand_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_nand_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_or_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_or_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_or_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_or_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_sub_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_sub_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_sub_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_sub_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_umax_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_umax_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_umax_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_umax_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_umin_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_umin_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_umin_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_umin_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_xor_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_xor_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_xor_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_xor_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_swap_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_swap_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_swap_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_swap_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_bf_inv_mask_imm(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- uint32_t v = (uint32_t)N->getZExtValue();
- if (v == 0xffffffff)
- return 0;
- // there can be 1's on either or both "outsides", all the "inside"
- // bits must be 0's
- unsigned int lsb = 0, msb = 31;
- while (v & (1 << msb)) --msb;
- while (v & (1 << lsb)) ++lsb;
- for (unsigned int i = lsb; i <= msb; ++i) {
- if (v & (1 << i))
- return 0;
- }
- return 1;
-
-}
-inline bool Predicate_cvtff(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FF;
-
-}
-inline bool Predicate_cvtfs(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FS;
-
-}
-inline bool Predicate_cvtfu(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FU;
-
-}
-inline bool Predicate_cvtsf(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SF;
-
-}
-inline bool Predicate_cvtss(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SS;
-
-}
-inline bool Predicate_cvtsu(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SU;
-
-}
-inline bool Predicate_cvtuf(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UF;
-
-}
-inline bool Predicate_cvtus(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_US;
-
-}
-inline bool Predicate_cvtuu(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UU;
-
-}
-inline bool Predicate_extload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
-
-}
-inline bool Predicate_extloadf32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_extloadf64(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64;
-
-}
-inline bool Predicate_extloadi1(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_extloadi16(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_extloadi32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_extloadi8(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_imm0_255(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (uint32_t)N->getZExtValue() < 256;
-
-}
-inline bool Predicate_imm0_255_comp(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ~((uint32_t)N->getZExtValue()) < 256;
-
-}
-inline bool Predicate_imm0_255_neg(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (uint32_t)(-N->getZExtValue()) < 255;
-
-}
-inline bool Predicate_imm0_31(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (int32_t)N->getZExtValue() < 32;
-
-}
-inline bool Predicate_imm0_4095(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (uint32_t)N->getZExtValue() < 4096;
-
-}
-inline bool Predicate_imm0_4095_neg(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (uint32_t)(-N->getZExtValue()) < 4096;
-
-}
-inline bool Predicate_imm0_65535(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (uint32_t)N->getZExtValue() < 65536;
-
-}
-inline bool Predicate_imm0_7(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (uint32_t)N->getZExtValue() < 8;
-
-}
-inline bool Predicate_imm0_7_neg(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (uint32_t)-N->getZExtValue() < 8;
-
-}
-inline bool Predicate_imm16_31(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (int32_t)N->getZExtValue() >= 16 && (int32_t)N->getZExtValue() < 32;
-
-}
-inline bool Predicate_imm1_15(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (int32_t)N->getZExtValue() >= 1 && (int32_t)N->getZExtValue() < 16;
-
-}
-inline bool Predicate_imm1_31(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (int32_t)N->getZExtValue() >= 1 && (int32_t)N->getZExtValue() < 32;
-
-}
-inline bool Predicate_imm8_255(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return (uint32_t)N->getZExtValue() >= 8 && (uint32_t)N->getZExtValue() < 256;
-
-}
-inline bool Predicate_imm8_255_neg(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- unsigned Val = -N->getZExtValue();
- return Val >= 8 && Val < 256;
-
-}
-inline bool Predicate_immAllOnesV(SDNode *N) const {
-
- return ISD::isBuildVectorAllOnes(N);
-
-}
-inline bool Predicate_immAllOnesV_bc(SDNode *N) const {
-
- return ISD::isBuildVectorAllOnes(N);
-
-}
-inline bool Predicate_immAllZerosV(SDNode *N) const {
-
- return ISD::isBuildVectorAllZeros(N);
-
-}
-inline bool Predicate_immAllZerosV_bc(SDNode *N) const {
-
- return ISD::isBuildVectorAllZeros(N);
-
-}
-inline bool Predicate_istore(SDNode *N) const {
-
- return !cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_itruncstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_lo16AllZero(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- // Returns true if all low 16-bits are 0.
- return (((uint32_t)N->getZExtValue()) & 0xFFFFUL) == 0;
-
-}
-inline bool Predicate_load(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
-
-}
-inline bool Predicate_post_store(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::POST_INC || AM == ISD::POST_DEC;
-
-}
-inline bool Predicate_post_truncst(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::POST_INC || AM == ISD::POST_DEC;
-
-}
-inline bool Predicate_post_truncstf32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_post_truncsti1(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_post_truncsti16(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_post_truncsti32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_post_truncsti8(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_pre_store(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
-
-}
-inline bool Predicate_pre_truncst(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
-
-}
-inline bool Predicate_pre_truncstf32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_pre_truncsti1(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_pre_truncsti16(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_pre_truncsti32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_pre_truncsti8(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_rot_imm(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- int32_t v = (int32_t)N->getZExtValue();
- return v == 8 || v == 16 || v == 24;
-
-}
-inline bool Predicate_sext_16_node(SDNode *N) const {
-
- return CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17;
-
-}
-inline bool Predicate_sextload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
-
-}
-inline bool Predicate_sextloadi1(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_sextloadi16(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_sextloadi32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_sextloadi8(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_so_imm(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::getSOImmVal(N->getZExtValue()) != -1;
-
-}
-inline bool Predicate_so_imm2part(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
-
-}
-inline bool Predicate_so_imm_neg(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::getSOImmVal(-(int)N->getZExtValue()) != -1;
-
-}
-inline bool Predicate_so_imm_not(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::getSOImmVal(~(int)N->getZExtValue()) != -1;
-
-}
-inline bool Predicate_so_neg_imm2part(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::isSOImmTwoPartVal(-(int)N->getZExtValue());
-
-}
-inline bool Predicate_store(SDNode *N) const {
-
- return !cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_sube_dead_carry(SDNode *N) const {
-return !N->hasAnyUseOfValue(1);
-}
-inline bool Predicate_sube_live_carry(SDNode *N) const {
-return N->hasAnyUseOfValue(1);
-}
-inline bool Predicate_t2_so_imm(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::getT2SOImmVal((uint32_t)N->getZExtValue()) != -1;
-
-}
-inline bool Predicate_t2_so_imm2part(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::isT2SOImmTwoPartVal((unsigned)N->getZExtValue());
-
-}
-inline bool Predicate_t2_so_imm_neg(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::getT2SOImmVal(-((int)N->getZExtValue())) != -1;
-
-}
-inline bool Predicate_t2_so_imm_not(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::getT2SOImmVal(~((uint32_t)N->getZExtValue())) != -1;
-
-}
-inline bool Predicate_t2_so_neg_imm2part(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::isT2SOImmTwoPartVal(-(int)N->getZExtValue());
-
-}
-inline bool Predicate_thumb_immshifted(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return ARM_AM::isThumbImmShiftedVal((unsigned)N->getZExtValue());
-
-}
-inline bool Predicate_truncstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_truncstoref32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_truncstoref64(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64;
-
-}
-inline bool Predicate_truncstorei16(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_truncstorei32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_truncstorei8(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_unindexedload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
-
-}
-inline bool Predicate_unindexedstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
-
-}
-inline bool Predicate_vfp_f32imm(SDNode *inN) const {
- ConstantFPSDNode *N = cast<ConstantFPSDNode>(inN);
-
- return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
-
-}
-inline bool Predicate_vfp_f64imm(SDNode *inN) const {
- ConstantFPSDNode *N = cast<ConstantFPSDNode>(inN);
-
- return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
-
-}
-inline bool Predicate_vmovImm16(SDNode *N) const {
-
- return ARM::getVMOVImm(N, 2, *CurDAG).getNode() != 0;
-
-}
-inline bool Predicate_vmovImm32(SDNode *N) const {
-
- return ARM::getVMOVImm(N, 4, *CurDAG).getNode() != 0;
-
-}
-inline bool Predicate_vmovImm64(SDNode *N) const {
-
- return ARM::getVMOVImm(N, 8, *CurDAG).getNode() != 0;
-
-}
-inline bool Predicate_vmovImm8(SDNode *N) const {
-
- return ARM::getVMOVImm(N, 1, *CurDAG).getNode() != 0;
-
-}
-inline bool Predicate_vtFP(SDNode *inN) const {
- VTSDNode *N = cast<VTSDNode>(inN);
- return N->getVT().isFloatingPoint();
-}
-inline bool Predicate_vtInt(SDNode *inN) const {
- VTSDNode *N = cast<VTSDNode>(inN);
- return N->getVT().isInteger();
-}
-inline bool Predicate_zextload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
-
-}
-inline bool Predicate_zextloadi1(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_zextloadi16(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_zextloadi32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_zextloadi8(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-
-
-// The main instruction selector code.
-SDNode *SelectCode(SDNode *N) {
- // Opcodes are emitted as 2 bytes, TARGET_OPCODE handles this.
- #define TARGET_OPCODE(X) X & 255, unsigned(X) >> 8
- static const unsigned char MatcherTable[] = {
- OPC_SwitchOpcode , 94|128,83, ISD::OR,
- OPC_Scope, 27|128,59,
- OPC_MoveChild, 0,
- OPC_Scope, 80|128,8,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 12|128,1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 102|128,3,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 95,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 44|128,1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 69,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 69,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 45|128,1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 70,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 70,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 99,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 75|128,10,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 56|128,5,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 114,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 86|128,1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 92,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 92,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 87|128,1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 93,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 93,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 88|128,1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 94,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 94,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 0,
- 66|128,7,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 102|128,3,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 95,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 44|128,1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 69,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 69,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 45|128,1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 70,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 70,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 99,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 75|128,10,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 56|128,5,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 114,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 86|128,1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 92,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 92,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 87|128,1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 93,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 93,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 88|128,1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 94,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 94,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 0,
- 66|128,7,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 102|128,3,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 95,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 95,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 44|128,1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 69,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 69,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 45|128,1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 70,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 70,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 99,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 75|128,10,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 56|128,5,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 114,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 114,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 86|128,1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 92,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 92,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 87|128,1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 93,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 93,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 88|128,1,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
- OPC_MoveChild, 0,
- OPC_Scope, 94,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 94,
- OPC_CheckAndImm, 0|128,0|128,0|128,120|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV16), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 0,
- 20|128,1,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 63, ISD::SRA,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 0,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHTB), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHTB), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 0,
- 63, ISD::SRL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHTB), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHTB), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 0,
- 0,
- 110|128,1,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_Scope, 26|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 73, ISD::SRA,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 0,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHTB), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHTB), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 73, ISD::SRL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHTB), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHTB), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 0,
- 75,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHBT), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHBT), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 0,
- 0,
- 79,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHBT), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHBT), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 0,
- 50,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORNrs), 0,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 115|128,14,
- OPC_MoveChild, 0,
- OPC_Scope, 48,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORNrs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 41,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHBT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 46,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 16,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHTB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 41,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHBT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 81,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_Scope, 36,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 16,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHTB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 31,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHBT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 46,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 16,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHTB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 41,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHBT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 46,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 16,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHTB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 72,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 0,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHBT), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHBT), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 0,
- 72,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 0,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2PKHBT), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PKHBT), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 36|128,10,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 9|128,4,
- OPC_RecordChild0,
- OPC_Scope, 98|128,2,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 103,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_Scope, 58,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 1, 0, 2, 3, 4,
- 20, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 0,
- 34,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 1, 0, 2, 3, 4,
- 0,
- 80,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_Scope, 35,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 1, 0, 2, 3, 4,
- 35,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 1, 0, 2, 3, 4,
- 0,
- 79,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_Scope, 34,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 0, 1, 2, 3, 4,
- 34,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 0, 1, 2, 3, 4,
- 0,
- 80,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_Scope, 35,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 0, 1, 2, 3, 4,
- 35,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 0, 1, 2, 3, 4,
- 0,
- 0,
- 32|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 76,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 1, 2, 0, 3, 4,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 1, 2, 0, 3, 4,
- 0,
- 76,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 1, 2, 0, 3, 4,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 1, 2, 0, 3, 4,
- 0,
- 0,
- 0,
- 34|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 77,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 0, 2, 1, 3, 4,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 0, 2, 1, 3, 4,
- 0,
- 77,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 0, 2, 1, 3, 4,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLd), 0,
- 1, MVT::v2i32, 5, 0, 2, 1, 3, 4,
- 0,
- 0,
- 75|128,3,
- OPC_RecordChild0,
- OPC_Scope, 36|128,2,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 41,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 80,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_Scope, 35,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 35,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 0,
- 79,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_Scope, 34,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 34,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 80,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_Scope, 35,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 35,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 0,
- 32|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 76,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 1, 2, 0, 3, 4,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 1, 2, 0, 3, 4,
- 0,
- 76,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 1, 2, 0, 3, 4,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 1, 2, 0, 3, 4,
- 0,
- 0,
- 0,
- 34|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 77,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 0, 2, 1, 3, 4,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 0, 2, 1, 3, 4,
- 0,
- 77,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::AND,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 0, 2, 1, 3, 4,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBSLq), 0,
- 1, MVT::v4i32, 5, 0, 2, 1, 3, 4,
- 0,
- 0,
- 0,
- 40,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 0, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVTi16), 0,
- 1, MVT::i32, 4, 0, 3, 4, 5,
- 0,
- 32,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ORRrs), 0,
- 1, MVT::i32, 7, 0, 2, 3, 4, 5, 6, 7,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 0, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MOVTi16), 0,
- 1, MVT::i32, 4, 0, 3, 4, 5,
- 17|128,1,
- OPC_RecordChild0,
- OPC_Scope, 54,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORNri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 31,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ORRrs), 0,
- 1, MVT::i32, 7, 1, 2, 3, 4, 5, 6, 7,
- 54,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORNri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 0,
- 110,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 51,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORNri), 0,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 51,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORNri), 0,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 0,
- 109,
- OPC_RecordChild0,
- OPC_Scope, 59,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 25,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORRrs), 0,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORRrs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 0,
- 45,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORNrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 46,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORNrr), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 92,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 53,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 19, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORNd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 19, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORNq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 30,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORNd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 70,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 31,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORNd), 0,
- 1, MVT::v2i32, 4, 1, 0, 2, 3,
- 31,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORNd), 0,
- 1, MVT::v2i32, 4, 1, 0, 2, 3,
- 0,
- 35,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORNq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 70,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 31,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORNq), 0,
- 1, MVT::v4i32, 4, 1, 0, 2, 3,
- 31,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORNq), 0,
- 1, MVT::v4i32, 4, 1, 0, 2, 3,
- 0,
- 61,
- OPC_CheckOrImm, 0|128,0|128,124|128,127|128,15,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 24,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitInteger, MVT::i32, 127|128,127|128,3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVTi16), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 127|128,127|128,3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MOVTi16), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 93|128,2,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 94|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 30,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ORRri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 30,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORRri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 33,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 1, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORNri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- 59,
- OPC_CheckPredicate, 7,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 2, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::ORRri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 3, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ORRri), 0,
- 1, MVT::i32, 5, 7, 9, 10, 11, 12,
- 59,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 4, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::t2ORRri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 5, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORRri), 0,
- 1, MVT::i32, 5, 7, 9, 10, 11, 12,
- 0,
- 76,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ORRrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tORR), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ORRrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 21,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORRd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 21,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VORRq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 30|128,71, ISD::ADD,
- OPC_Scope, 94,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_Scope, 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABB), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 0,
- 94,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_Scope, 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 59|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_Scope, 44,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTABrr_rot), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 45,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTAHrr_rot), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 44,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTABrr_rot), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 45,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTAHrr_rot), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 0,
- 62|128,1,
- OPC_MoveChild, 0,
- OPC_Scope, 45,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTABrr_rot), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 46,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTAHrr_rot), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 45,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTABrr_rot), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 46,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTAHrr_rot), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 81|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 10|128,1, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_Scope, 57,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 73,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_Scope, 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 0,
- 0,
- 61, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 0|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_Scope, 58,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 58,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 66,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 0|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_Scope, 58,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 58,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 66,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 0|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 58,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 58,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 115,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_Scope, 26,
- OPC_CheckAndImm, 127|128,1,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTABrr), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 27,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTAHrr), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 26,
- OPC_CheckAndImm, 127|128,1,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTABrr), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 27,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTAHrr), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 118,
- OPC_MoveChild, 0,
- OPC_Scope, 27,
- OPC_CheckAndImm, 127|128,1,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTABrr), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 28,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTAHrr), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 27,
- OPC_CheckAndImm, 127|128,1,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTABrr), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 28,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTAHrr), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 48|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 94, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATT), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 20,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLATT), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 20,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATT), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 0,
- 73, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWT), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 20,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLAWT), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 0,
- 70,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_Scope, 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 54,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWT), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 104,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 46,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 46,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 54,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLATT), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 70,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_Scope, 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLATT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 12,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLATT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 54,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLAWT), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 104,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 46,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLAWT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 46,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLAWT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 116|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 35|128,1, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 67, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 20,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLABT), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 88, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 20,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLATB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 20,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 0,
- 0,
- 71, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 20,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLAWB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 0,
- 100,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 44, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 52,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 100,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 44, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 52,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 100,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 44,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 44,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 52,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLABT), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 100,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLABT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 44, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLABT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 52,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLATB), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 100,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLATB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 44, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLATB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 52,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLAWB), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 100,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 44,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLAWB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 44,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLAWB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 126|128,1,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADDrs), 0,
- 1, MVT::i32, 7, 0, 2, 3, 4, 5, 6, 7,
- 10|128,1,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 88, ISD::MUL,
- OPC_Scope, 42,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 42, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 31,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADDrs), 0,
- 1, MVT::i32, 7, 1, 2, 3, 4, 5, 6, 7,
- 46,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 0,
- 94,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 43,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 43,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABT), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 47,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 94,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 43,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 43,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLATB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 47,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 89,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_Scope, 38,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 38,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLAWB), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 7|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_Scope, 54,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTABrr_rot), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTABrr_rot), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 0,
- 54,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTAHrr_rot), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTAHrr_rot), 0,
- 1, MVT::i32, 5, 0, 1, 3, 4, 5,
- 0,
- 0,
- 8|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_Scope, 55,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTABrr_rot), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTABrr_rot), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 55,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTAHrr_rot), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTAHrr_rot), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 0,
- 18|128,2,
- OPC_RecordChild0,
- OPC_Scope, 49,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 30,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDrs), 0,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 60|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 45,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLABB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 6|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 62,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 23, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i16), 0,
- 1, MVT::v4i16, 6, 0, 1, 2, 4, 5, 6,
- 23, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv8i16), 0,
- 1, MVT::v8i16, 6, 0, 1, 2, 4, 5, 6,
- 0,
- 62,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 23, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv2i32), 0,
- 1, MVT::v2i32, 6, 0, 1, 2, 4, 5, 6,
- 23, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i32), 0,
- 1, MVT::v4i32, 6, 0, 1, 2, 4, 5, 6,
- 0,
- 0,
- 0,
- 0,
- 50,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 31,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDrs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 50,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMLABB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 46,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i16), 0,
- 1, MVT::v4i16, 6, 0, 3, 1, 4, 5, 6,
- 92,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 42,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i16), 0,
- 1, MVT::v4i16, 6, 3, 0, 1, 4, 5, 6,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i16), 0,
- 1, MVT::v4i16, 6, 3, 2, 0, 4, 5, 6,
- 0,
- 46,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv2i32), 0,
- 1, MVT::v2i32, 6, 0, 3, 1, 4, 5, 6,
- 92,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 42,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv2i32), 0,
- 1, MVT::v2i32, 6, 3, 0, 1, 4, 5, 6,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv2i32), 0,
- 1, MVT::v2i32, 6, 3, 2, 0, 4, 5, 6,
- 0,
- 46,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv8i16), 0,
- 1, MVT::v8i16, 6, 0, 3, 1, 4, 5, 6,
- 92,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 42,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv8i16), 0,
- 1, MVT::v8i16, 6, 3, 0, 1, 4, 5, 6,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv8i16), 0,
- 1, MVT::v8i16, 6, 3, 2, 0, 4, 5, 6,
- 0,
- 46,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i32), 0,
- 1, MVT::v4i32, 6, 0, 3, 1, 4, 5, 6,
- 92,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 42,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i32), 0,
- 1, MVT::v4i32, 6, 3, 0, 1, 4, 5, 6,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i32), 0,
- 1, MVT::v4i32, 6, 3, 2, 0, 4, 5, 6,
- 0,
- 47|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 110,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 50,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 6, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 2, 5,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 7, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv8i16), 0,
- 1, MVT::v8i16, 6, 0, 1, 6, 8, 9, 10,
- 50,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 2, 5,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i32), 0,
- 1, MVT::v4i32, 6, 0, 1, 6, 8, 9, 10,
- 0,
- 56,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 6, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 1, 5,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 7, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv8i16), 0,
- 1, MVT::v8i16, 6, 0, 3, 6, 8, 9, 10,
- 0,
- 122,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 57,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 6, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 1, 5,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 7, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv8i16), 0,
- 1, MVT::v8i16, 6, 3, 0, 6, 8, 9, 10,
- 57,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 6, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 0, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 7, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv8i16), 0,
- 1, MVT::v8i16, 6, 3, 2, 6, 8, 9, 10,
- 0,
- 61,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 1, 5,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i32), 0,
- 1, MVT::v4i32, 6, 0, 3, 6, 8, 9, 10,
- 122,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 57,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 1, 5,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i32), 0,
- 1, MVT::v4i32, 6, 3, 0, 6, 8, 9, 10,
- 57,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 0, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslv4i32), 0,
- 1, MVT::v4i32, 6, 3, 2, 6, 8, 9, 10,
- 0,
- 57|128,6,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 74|128,1, ARMISD::VSHRs,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv1i64), 0,
- 1, MVT::v1i64, 5, 0, 1, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 3, 4, 5,
- 0,
- 74|128,1, ARMISD::VSHRu,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv1i64), 0,
- 1, MVT::v1i64, 5, 0, 1, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 3, 4, 5,
- 0,
- 74|128,1, ARMISD::VRSHRs,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv1i64), 0,
- 1, MVT::v1i64, 5, 0, 1, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 3, 4, 5,
- 0,
- 74|128,1, ARMISD::VRSHRu,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv1i64), 0,
- 1, MVT::v1i64, 5, 0, 1, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 3, 4, 5,
- 0,
- 0,
- 60|128,6,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 75|128,1, ARMISD::VSHRs,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv8i8), 0,
- 1, MVT::v8i8, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv4i16), 0,
- 1, MVT::v4i16, 5, 2, 0, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv2i32), 0,
- 1, MVT::v2i32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv1i64), 0,
- 1, MVT::v1i64, 5, 2, 0, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv16i8), 0,
- 1, MVT::v16i8, 5, 2, 0, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv8i16), 0,
- 1, MVT::v8i16, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv4i32), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAsv2i64), 0,
- 1, MVT::v2i64, 5, 2, 0, 3, 4, 5,
- 0,
- 75|128,1, ARMISD::VSHRu,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv8i8), 0,
- 1, MVT::v8i8, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv4i16), 0,
- 1, MVT::v4i16, 5, 2, 0, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv2i32), 0,
- 1, MVT::v2i32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv1i64), 0,
- 1, MVT::v1i64, 5, 2, 0, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv16i8), 0,
- 1, MVT::v16i8, 5, 2, 0, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv8i16), 0,
- 1, MVT::v8i16, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv4i32), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRAuv2i64), 0,
- 1, MVT::v2i64, 5, 2, 0, 3, 4, 5,
- 0,
- 75|128,1, ARMISD::VRSHRs,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv8i8), 0,
- 1, MVT::v8i8, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv4i16), 0,
- 1, MVT::v4i16, 5, 2, 0, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv2i32), 0,
- 1, MVT::v2i32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv1i64), 0,
- 1, MVT::v1i64, 5, 2, 0, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv16i8), 0,
- 1, MVT::v16i8, 5, 2, 0, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv8i16), 0,
- 1, MVT::v8i16, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv4i32), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAsv2i64), 0,
- 1, MVT::v2i64, 5, 2, 0, 3, 4, 5,
- 0,
- 75|128,1, ARMISD::VRSHRu,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv8i8), 0,
- 1, MVT::v8i8, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv4i16), 0,
- 1, MVT::v4i16, 5, 2, 0, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv2i32), 0,
- 1, MVT::v2i32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv1i64), 0,
- 1, MVT::v1i64, 5, 2, 0, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv16i8), 0,
- 1, MVT::v16i8, 5, 2, 0, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv8i16), 0,
- 1, MVT::v8i16, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv4i32), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSRAuv2i64), 0,
- 1, MVT::v2i64, 5, 2, 0, 3, 4, 5,
- 0,
- 0,
- 82,
- OPC_RecordChild0,
- OPC_Scope, 39,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABB), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 38,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 10, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- 0,
- 40,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMLABB), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 127|128,4,
- OPC_RecordChild0,
- OPC_Scope, 60|128,4,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 30,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADDri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 33,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 11, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SUBri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- 30,
- OPC_CheckPredicate, 13,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tADDi3), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 30,
- OPC_CheckPredicate, 14,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tADDi8), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 33,
- OPC_CheckPredicate, 15,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 10, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSUBi3), 0,
- 1, MVT::i32, 5, 2, 0, 4, 5, 6,
- 33,
- OPC_CheckPredicate, 16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 10, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSUBi8), 0,
- 1, MVT::i32, 5, 2, 0, 4, 5, 6,
- 30,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 30,
- OPC_CheckPredicate, 17,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDri12), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 33,
- OPC_CheckPredicate, 18,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 12, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- 33,
- OPC_CheckPredicate, 19,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 10, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBri12), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- 59,
- OPC_CheckPredicate, 7,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 2, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::ADDri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 3, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADDri), 0,
- 1, MVT::i32, 5, 7, 9, 10, 11, 12,
- 59,
- OPC_CheckPredicate, 20,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 13, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::SUBri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 14, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SUBri), 0,
- 1, MVT::i32, 5, 7, 9, 10, 11, 12,
- 59,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 4, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::t2ADDri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 5, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDri), 0,
- 1, MVT::i32, 5, 7, 9, 10, 11, 12,
- 59,
- OPC_CheckPredicate, 21,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 15, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::t2SUBri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 16, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBri), 0,
- 1, MVT::i32, 5, 7, 9, 10, 11, 12,
- 0,
- 61,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_Scope, 25,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTABrr), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 25,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTAHrr), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 64,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 30, ISD::MUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MLA), 0,
- 1, MVT::i32, 6, 0, 1, 2, 3, 4, 5,
- 26, ISD::MULHS,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMMLA), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 62,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_Scope, 25,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTABrr), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 25,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTAHrr), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 60,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 26, ISD::MUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MLA), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 26, ISD::MULHS,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMMLA), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 14|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 2, 3, 4,
- 20, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 2, 3, 4,
- 20, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 2, 3, 4,
- 20, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 2, 3, 4,
- 20, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 20, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 63,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_Scope, 26,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTABrr), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 26,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTAHrr), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 63,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 29, ISD::MUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MLA), 0,
- 1, MVT::i32, 6, 1, 2, 0, 3, 4, 5,
- 25, ISD::MULHS,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMMLA), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 63,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_Scope, 26,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTABrr), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 26,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTAHrr), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 59,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 25, ISD::MUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MLA), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 25, ISD::MULHS,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMMLA), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 14|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv8i8), 0,
- 1, MVT::v8i8, 5, 2, 0, 1, 3, 4,
- 20, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv4i16), 0,
- 1, MVT::v4i16, 5, 2, 0, 1, 3, 4,
- 20, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv2i32), 0,
- 1, MVT::v2i32, 5, 2, 0, 1, 3, 4,
- 20, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv16i8), 0,
- 1, MVT::v16i8, 5, 2, 0, 1, 3, 4,
- 20, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv8i16), 0,
- 1, MVT::v8i16, 5, 2, 0, 1, 3, 4,
- 20, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAv4i32), 0,
- 1, MVT::v4i32, 5, 2, 0, 1, 3, 4,
- 0,
- 120|128,1,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 74, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADDrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tADDrr), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 19, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 19, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 19, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 19, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 19, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 19, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 19, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 19, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 0,
- 80|128,11, ISD::MUL,
- OPC_Scope, 50|128,4,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 65|128,3, ISD::SRA,
- OPC_Scope, 116,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_Scope, 57,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_Scope, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 11,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 30,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBT), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 70,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_Scope, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULTB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 11,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBT), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 55,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULTB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 73|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 93, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULTT), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULTT), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULTT), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULTT), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 91, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULTB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULTB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBT), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULBT), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 0,
- 0,
- 104, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBT), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULBT), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULTB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULTB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 0,
- 40,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBT), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 55,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_Scope, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULTB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 11,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBT), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 40,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULTB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 66,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULBB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 0|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 59,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 0,
- 59,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 0,
- 0,
- 1|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 60,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv4i16), 0,
- 1, MVT::v4i16, 5, 2, 0, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv8i16), 0,
- 1, MVT::v8i16, 5, 2, 0, 3, 4, 5,
- 0,
- 60,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv2i32), 0,
- 1, MVT::v2i32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv4i32), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 0,
- 0,
- 106,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 48,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 6, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 1, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 7, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv8i16), 0,
- 1, MVT::v8i16, 5, 0, 5, 7, 8, 9,
- 48,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 1, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv4i32), 0,
- 1, MVT::v4i32, 5, 0, 5, 7, 8, 9,
- 0,
- 107,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 49,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 6, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 0, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 7, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv8i16), 0,
- 1, MVT::v8i16, 5, 2, 5, 7, 8, 9,
- 49,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 8, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 0, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 9, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslv4i32), 0,
- 1, MVT::v4i32, 5, 2, 5, 7, 8, 9,
- 0,
- 111|128,1,
- OPC_RecordChild0,
- OPC_Scope, 32,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULBB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 73|128,1,
- OPC_RecordChild1,
- OPC_SwitchType , 70, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MUL), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tMUL), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MUL), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 19, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 19, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 19, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 19, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 19, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 19, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 0,
- 120|128,16, ISD::AND,
- OPC_Scope, 3|128,1,
- OPC_CheckAndImm, 127|128,1|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 59, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 24,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTB16r_rot), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 24,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTB16r_rot), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 59, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTB16r_rot), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTB16r_rot), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 42,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTBr_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 43,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTHr_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 44,
- OPC_CheckAndImm, 127|128,1|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTB16r_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 42,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTBr_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 43,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTHr_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 44,
- OPC_CheckAndImm, 127|128,1|128,124|128,7,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTB16r_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 24,
- OPC_CheckAndImm, 127|128,1,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTBr), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 25,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTHr), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 26,
- OPC_CheckAndImm, 127|128,1|128,124|128,7,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::UXTB16r), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 24,
- OPC_CheckAndImm, 127|128,1,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTBr), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 25,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTHr), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 26,
- OPC_CheckAndImm, 127|128,1|128,124|128,7,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2UXTB16r), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 51,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BICrs), 0,
- 1, MVT::i32, 7, 0, 2, 3, 4, 5, 6, 7,
- 51,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BICrs), 0,
- 1, MVT::i32, 7, 1, 2, 3, 4, 5, 6, 7,
- 50,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BICrs), 0,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 50,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BICrs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 98|128,1,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ANDrs), 0,
- 1, MVT::i32, 7, 0, 2, 3, 4, 5, 6, 7,
- 103,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 45,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BICri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 45,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BICri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 0,
- 31,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ANDrs), 0,
- 1, MVT::i32, 7, 1, 2, 3, 4, 5, 6, 7,
- 54,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BICri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 0,
- 110,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 51,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BICri), 0,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 51,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BICri), 0,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 0,
- 55,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BICri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 110,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 51,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BICri), 0,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 51,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BICri), 0,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 0,
- 32|128,1,
- OPC_RecordChild0,
- OPC_Scope, 59,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 25,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ANDrs), 0,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ANDrs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 0,
- 96,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BICrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBIC), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BICrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 0,
- 97,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BICrr), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBIC), 0,
- 1, MVT::i32, 5, 2, 1, 0, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BICrr), 0,
- 1, MVT::i32, 5, 1, 0, 2, 3, 4,
- 0,
- 92,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 53,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 19, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBICd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 19, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBICq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 30,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBICd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 70,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 31,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBICd), 0,
- 1, MVT::v2i32, 4, 1, 0, 2, 3,
- 31,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBICd), 0,
- 1, MVT::v2i32, 4, 1, 0, 2, 3,
- 0,
- 35,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBICq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 70,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 31,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBICq), 0,
- 1, MVT::v4i32, 4, 1, 0, 2, 3,
- 31,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VBICq), 0,
- 1, MVT::v4i32, 4, 1, 0, 2, 3,
- 0,
- 24,
- OPC_CheckAndImm, 127|128,1,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tUXTB), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 25,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tUXTH), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 61|128,2,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 62|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 30,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ANDri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 26,
- OPC_CheckPredicate, 22,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BFC), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 33,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 17, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BICri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- 30,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ANDri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 26,
- OPC_CheckPredicate, 22,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BFC), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 33,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 1, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BICri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- 0,
- 76,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ANDrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tAND), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ANDrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 21,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VANDd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 21,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VANDq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 94|128,3, ISD::SIGN_EXTEND_INREG,
- OPC_Scope, 81|128,2,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 80|128,1, ISD::OR,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 100, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REVSH), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREVSH), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REVSH), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 100, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckAndImm, 0|128,126|128,3,
- OPC_MoveChild, 0,
- OPC_CheckSame, 0,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REVSH), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREVSH), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REVSH), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 0,
- 120, ISD::ROTR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_Scope, 51,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 21,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTBr_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 21,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTBr_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 0,
- 51,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 21,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTHr_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 21,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTHr_rot), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 0,
- 0,
- 0,
- 7|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_Scope, 64,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTBr), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSXTB), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTBr), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 64,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SXTHr), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSXTH), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SXTHr), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 0,
- 0,
- 115|128,4, ISD::SRA,
- OPC_Scope, 126|128,2,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 55,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULWB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 55,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULWB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 66,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULWT), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULWT), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 66,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRA,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULWT), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULWT), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 64,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULWB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULWB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 64,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULWB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMULWB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 0,
- 30,
- OPC_RecordNode,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 79,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::MUL,
- OPC_RecordChild0,
- OPC_Scope, 35,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULWB), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 35,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 10,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 16,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMULWB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 0|128,1,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 68,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 30,
- OPC_CheckPredicate, 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ASRri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 28,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tASRri), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 54,
- OPC_CheckChild1Type, MVT::i32,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tASRrr), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ASRrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 0,
- 0,
- 17|128,1, ARMISD::BR_JT,
- OPC_RecordNode,
- OPC_Scope, 89,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 47, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_CheckPredicate, 25,
- OPC_CheckPredicate, 26,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::TargetJumpTable,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BR_JTm), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 5, 6, 7, 3, 8,
- 34, ISD::ADD,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::TargetJumpTable,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BR_JTadd), 0|OPFL_Chain,
- 0, 4, 1, 2, 3, 5,
- 0,
- 51,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::TargetJumpTable,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_Scope, 16,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BR_JTr), 0|OPFL_Chain,
- 0, 3, 1, 2, 4,
- 16,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBR_JTr), 0|OPFL_Chain,
- 0, 3, 1, 2, 4,
- 0,
- 0,
- 87|128,17, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_Scope, 60|128,15,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_CheckPredicate, 25,
- OPC_Scope, 29,
- OPC_CheckPredicate, 26,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICLDR), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 62,
- OPC_CheckPredicate, 27,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 27,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICLDRH), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 27,
- OPC_CheckPredicate, 29,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICLDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 119,
- OPC_CheckPredicate, 30,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 27,
- OPC_CheckPredicate, 31,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICLDRSH), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 56,
- OPC_CheckPredicate, 32,
- OPC_Scope, 25,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICLDRSB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 25,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/4, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRSB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 27,
- OPC_CheckPredicate, 31,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/4, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRSH), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 30,
- OPC_CheckPredicate, 26,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LDR), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 64,
- OPC_CheckPredicate, 27,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 28,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/5, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LDRH), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 29,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 64,
- OPC_CheckPredicate, 30,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 28,
- OPC_CheckPredicate, 31,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/5, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LDRSH), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/5, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LDRSB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 32,
- OPC_CheckPredicate, 27,
- OPC_CheckPredicate, 33,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 93,
- OPC_CheckPredicate, 34,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 28,
- OPC_CheckPredicate, 35,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 36,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 37,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/5, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LDRH), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 30,
- OPC_CheckPredicate, 26,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/6, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDR), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 93,
- OPC_CheckPredicate, 27,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 28,
- OPC_CheckPredicate, 29,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/7, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/8, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRH), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 33,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/7, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 93,
- OPC_CheckPredicate, 34,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 28,
- OPC_CheckPredicate, 35,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/7, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 36,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/7, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 37,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/8, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRH), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 30,
- OPC_CheckPredicate, 26,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRs), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 64,
- OPC_CheckPredicate, 27,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 28,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRHs), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 29,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBs), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 64,
- OPC_CheckPredicate, 30,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 28,
- OPC_CheckPredicate, 31,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRSHs), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRSBs), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 32,
- OPC_CheckPredicate, 27,
- OPC_CheckPredicate, 33,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBs), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 93,
- OPC_CheckPredicate, 34,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 28,
- OPC_CheckPredicate, 35,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBs), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 36,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBs), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 37,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRHs), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 6|128,2,
- OPC_CheckPredicate, 30,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 47,
- OPC_CheckPredicate, 32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/7, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tLDRB), 0|OPFL_Chain,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tSXTB), 0|OPFL_MemRefs,
- 1, MVT::i32, 3, 7, 8, 9,
- OPC_CompleteMatch, 1, 10,
-
- 47,
- OPC_CheckPredicate, 31,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/8, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tLDRH), 0|OPFL_Chain,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tSXTH), 0|OPFL_MemRefs,
- 1, MVT::i32, 3, 7, 8, 9,
- OPC_CompleteMatch, 1, 10,
-
- 79,
- OPC_CheckPredicate, 32,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/7, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tLDRB), 0|OPFL_Chain,
- 1, MVT::i32, 5, 2, 3, 4, 7, 8,
- OPC_EmitInteger, MVT::i32, 24,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tLSLri), 0,
- 1, MVT::i32, 5, 6, 9, 10, 11, 12,
- OPC_EmitInteger, MVT::i32, 24,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tASRri), 0|OPFL_MemRefs,
- 1, MVT::i32, 5, 5, 13, 14, 15, 16,
- OPC_CompleteMatch, 1, 17,
-
- 79,
- OPC_CheckPredicate, 31,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/7, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tLDRH), 0|OPFL_Chain,
- 1, MVT::i32, 5, 2, 3, 4, 7, 8,
- OPC_EmitInteger, MVT::i32, 16,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tLSLri), 0,
- 1, MVT::i32, 5, 6, 9, 10, 11, 12,
- OPC_EmitInteger, MVT::i32, 16,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tASRri), 0|OPFL_MemRefs,
- 1, MVT::i32, 5, 5, 13, 14, 15, 16,
- OPC_CompleteMatch, 1, 17,
-
- 0,
- 62,
- OPC_CheckPredicate, 34,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 27,
- OPC_CheckPredicate, 36,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICLDRB), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 27,
- OPC_CheckPredicate, 37,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICLDRH), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 85,
- OPC_CheckPredicate, 26,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 25,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/10, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRspi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 52,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRi12), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 0,
- 116,
- OPC_CheckPredicate, 27,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 54,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRHi12), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRHi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 54,
- OPC_CheckPredicate, 29,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBi12), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 0,
- 116,
- OPC_CheckPredicate, 30,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 54,
- OPC_CheckPredicate, 31,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRSHi12), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRSHi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 54,
- OPC_CheckPredicate, 32,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRSBi12), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRSBi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 0,
- 58,
- OPC_CheckPredicate, 27,
- OPC_CheckPredicate, 33,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBi12), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 43|128,1,
- OPC_CheckPredicate, 34,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 54,
- OPC_CheckPredicate, 35,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBi12), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 54,
- OPC_CheckPredicate, 36,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBi12), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 54,
- OPC_CheckPredicate, 37,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRHi12), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRHi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 0,
- 0,
- 85,
- OPC_CheckPredicate, 26,
- OPC_SwitchType , 25, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/13, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLDRD), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 4, 2, 3, 4, 5,
- 25, MVT::f32,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/13, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLDRS), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 4, 2, 3, 4, 5,
- 25, MVT::v2f64,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/14, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLDRQ), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 4, 2, 3, 4, 5,
- 0,
- 0,
- 19|128,2,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::Wrapper,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::TargetConstantPool,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 25,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 48,
- OPC_CheckPredicate, 26,
- OPC_Scope, 21,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 21,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 0,
- 52,
- OPC_CheckPredicate, 27,
- OPC_Scope, 23,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRHpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 23,
- OPC_CheckPredicate, 29,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 0,
- 52,
- OPC_CheckPredicate, 30,
- OPC_Scope, 23,
- OPC_CheckPredicate, 31,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRSHpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 23,
- OPC_CheckPredicate, 32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRSBpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 0,
- 25,
- OPC_CheckPredicate, 27,
- OPC_CheckPredicate, 33,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 76,
- OPC_CheckPredicate, 34,
- OPC_Scope, 23,
- OPC_CheckPredicate, 35,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 23,
- OPC_CheckPredicate, 36,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRBpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 23,
- OPC_CheckPredicate, 37,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRHpci), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 0,
- 0,
- 0,
- 83|128,8, ISD::STORE,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_Scope, 99|128,7,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_Scope, 115|128,2,
- OPC_CheckChild2Type, MVT::i32,
- OPC_CheckPredicate, 38,
- OPC_Scope, 27,
- OPC_CheckPredicate, 39,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICSTR), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 60,
- OPC_CheckPredicate, 40,
- OPC_Scope, 27,
- OPC_CheckPredicate, 41,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICSTRH), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 27,
- OPC_CheckPredicate, 42,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/3, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICSTRB), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 0,
- 28,
- OPC_CheckPredicate, 39,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::STR), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
- 62,
- OPC_CheckPredicate, 40,
- OPC_Scope, 28,
- OPC_CheckPredicate, 41,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/5, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::STRH), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
- 28,
- OPC_CheckPredicate, 42,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::STRB), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
- 0,
- 28,
- OPC_CheckPredicate, 39,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/6, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSTR), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
- 62,
- OPC_CheckPredicate, 40,
- OPC_Scope, 28,
- OPC_CheckPredicate, 42,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/7, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSTRB), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
- 28,
- OPC_CheckPredicate, 41,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/8, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSTRH), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
- 0,
- 28,
- OPC_CheckPredicate, 39,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRs), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
- 62,
- OPC_CheckPredicate, 40,
- OPC_Scope, 28,
- OPC_CheckPredicate, 42,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRBs), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
- 28,
- OPC_CheckPredicate, 41,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/9, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRHs), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
- 0,
- 0,
- 78|128,1,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::i32,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 64,
- OPC_CheckPredicate, 43,
- OPC_Scope, 29,
- OPC_CheckPredicate, 44,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/15, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::STR_PRE), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 1, 2, 4, 5, 6, 7,
- 29,
- OPC_CheckPredicate, 45,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/15, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::STR_POST), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 1, 2, 4, 5, 6, 7,
- 0,
- 4|128,1,
- OPC_CheckPredicate, 46,
- OPC_Scope, 31,
- OPC_CheckPredicate, 47,
- OPC_CheckPredicate, 48,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/16, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::STRH_PRE), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 1, 2, 4, 5, 6, 7,
- 31,
- OPC_CheckPredicate, 49,
- OPC_CheckPredicate, 50,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/16, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::STRH_POST), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 1, 2, 4, 5, 6, 7,
- 31,
- OPC_CheckPredicate, 47,
- OPC_CheckPredicate, 51,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/15, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::STRB_PRE), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 1, 2, 4, 5, 6, 7,
- 31,
- OPC_CheckPredicate, 49,
- OPC_CheckPredicate, 52,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/15, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::STRB_POST), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 1, 2, 4, 5, 6, 7,
- 0,
- 0,
- 77|128,1,
- OPC_CheckChild2Type, MVT::i32,
- OPC_CheckPredicate, 38,
- OPC_Scope, 83,
- OPC_CheckPredicate, 39,
- OPC_Scope, 25,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/10, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSTRspi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 52,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRi12), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRi8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 0,
- 0,
- 114,
- OPC_CheckPredicate, 40,
- OPC_Scope, 54,
- OPC_CheckPredicate, 42,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRBi12), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRBi8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 0,
- 54,
- OPC_CheckPredicate, 41,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 23,
- OPC_CheckComplexPat, /*CP*/11, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRHi12), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 23,
- OPC_CheckComplexPat, /*CP*/12, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRHi8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 0,
- 0,
- 0,
- 72|128,1,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::i32,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 62,
- OPC_CheckPredicate, 43,
- OPC_Scope, 28,
- OPC_CheckPredicate, 44,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/17, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STR_PRE), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 1, 2, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 45,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/17, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STR_POST), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 1, 2, 4, 5, 6,
- 0,
- 0|128,1,
- OPC_CheckPredicate, 46,
- OPC_Scope, 30,
- OPC_CheckPredicate, 47,
- OPC_CheckPredicate, 48,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/17, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRH_PRE), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 1, 2, 4, 5, 6,
- 30,
- OPC_CheckPredicate, 49,
- OPC_CheckPredicate, 50,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/17, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRH_POST), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 1, 2, 4, 5, 6,
- 30,
- OPC_CheckPredicate, 47,
- OPC_CheckPredicate, 51,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/17, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRB_PRE), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 1, 2, 4, 5, 6,
- 30,
- OPC_CheckPredicate, 49,
- OPC_CheckPredicate, 52,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/17, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2STRB_POST), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 1, 2, 4, 5, 6,
- 0,
- 0,
- 0,
- 34,
- OPC_CheckChild1Type, MVT::f64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::i32,
- OPC_CheckPredicate, 38,
- OPC_CheckPredicate, 39,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/13, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSTRD), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 34,
- OPC_CheckChild1Type, MVT::f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::i32,
- OPC_CheckPredicate, 38,
- OPC_CheckPredicate, 39,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/13, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSTRS), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 34,
- OPC_CheckChild1Type, MVT::v2f64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::i32,
- OPC_CheckPredicate, 38,
- OPC_CheckPredicate, 39,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/14, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSTRQ), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 1, 3, 4, 5, 6,
- 0,
- 71|128,10, ARMISD::CMPZ,
- OPC_Scope, 74,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 33, ISD::AND,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::TSTrs), 0|OPFL_FlagOutput,
- 0, 6, 0, 2, 3, 4, 5, 6,
- 33, ISD::XOR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::TEQrs), 0|OPFL_FlagOutput,
- 0, 6, 0, 2, 3, 4, 5, 6,
- 0,
- 37,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMNzrs), 0|OPFL_FlagOutput,
- 0, 6, 0, 2, 3, 4, 5, 6,
- 109,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 33, ISD::AND,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::TSTrs), 0|OPFL_FlagOutput,
- 0, 6, 1, 2, 3, 4, 5, 6,
- 33, ISD::XOR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::TEQrs), 0|OPFL_FlagOutput,
- 0, 6, 1, 2, 3, 4, 5, 6,
- 33, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMNzrs), 0|OPFL_FlagOutput,
- 0, 6, 1, 2, 3, 4, 5, 6,
- 0,
- 36,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMNzrs), 0|OPFL_FlagOutput,
- 0, 5, 0, 2, 3, 4, 5,
- 104|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 95, ISD::AND,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 54,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 20,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2TSTrs), 0|OPFL_FlagOutput,
- 0, 5, 0, 2, 3, 4, 5,
- 20,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2TSTrs), 0|OPFL_FlagOutput,
- 0, 5, 1, 2, 3, 4, 5,
- 0,
- 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::TSTri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 0,
- 95, ISD::XOR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 54,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 20,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2TEQrs), 0|OPFL_FlagOutput,
- 0, 5, 0, 2, 3, 4, 5,
- 20,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2TEQrs), 0|OPFL_FlagOutput,
- 0, 5, 1, 2, 3, 4, 5,
- 0,
- 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::TEQri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 0,
- 32, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMNzrs), 0|OPFL_FlagOutput,
- 0, 5, 1, 2, 3, 4, 5,
- 0,
- 97,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_Scope, 24,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMPzrs), 0|OPFL_FlagOutput,
- 0, 6, 0, 2, 3, 4, 5, 6,
- 66,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 24,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMNzri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 24,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMNzri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 0,
- 0,
- 82,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 37, ISD::AND,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2TSTri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 37, ISD::XOR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2TEQri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 0,
- 27,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMPzrs), 0|OPFL_FlagOutput,
- 0, 6, 1, 2, 3, 4, 5, 6,
- 72,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 27,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMNzri), 0|OPFL_FlagOutput,
- 0, 4, 1, 2, 3, 4,
- 27,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMNzri), 0|OPFL_FlagOutput,
- 0, 4, 1, 2, 3, 4,
- 0,
- 50,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 20,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMPzrs), 0|OPFL_FlagOutput,
- 0, 5, 0, 2, 3, 4, 5,
- 20,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMPzrs), 0|OPFL_FlagOutput,
- 0, 5, 1, 2, 3, 4, 5,
- 0,
- 64,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 28, ISD::AND,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::TSTrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 28, ISD::XOR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::TEQrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 0,
- 54,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMNzrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 18,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tCMNz), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 0,
- 32,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::AND,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tTST), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 32,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMNzrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 7|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 28, ISD::AND,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2TSTrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 28, ISD::XOR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2TEQrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 69, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMNzrr), 0|OPFL_FlagOutput,
- 0, 4, 1, 0, 2, 3,
- 18,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tCMNz), 0|OPFL_FlagOutput,
- 0, 4, 1, 0, 2, 3,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMNzrr), 0|OPFL_FlagOutput,
- 0, 4, 1, 0, 2, 3,
- 0,
- 0,
- 69|128,1,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_RecordChild1,
- OPC_Scope, 4|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 23,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMPzri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 26,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 11, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMNzri), 0|OPFL_FlagOutput,
- 0, 4, 0, 3, 4, 5,
- 23,
- OPC_CheckPredicate, 53,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tCMPzi8), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 23,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMPzri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 26,
- OPC_CheckPredicate, 18,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 12, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMNzri), 0|OPFL_FlagOutput,
- 0, 4, 0, 3, 4, 5,
- 0,
- 18,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMPzrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 18,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tCMPzr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMPzrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 0,
- 0,
- 45|128,2, ISD::INTRINSIC_W_CHAIN,
- OPC_RecordNode,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::i32,
- OPC_SwitchType , 27, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1d8), 0|OPFL_Chain,
- 1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
- 27, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1d16), 0|OPFL_Chain,
- 1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
- 27, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1d32), 0|OPFL_Chain,
- 1, MVT::v2i32, 6, 2, 3, 4, 5, 6, 7,
- 27, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1df), 0|OPFL_Chain,
- 1, MVT::v2f32, 6, 2, 3, 4, 5, 6, 7,
- 27, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1d64), 0|OPFL_Chain,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 27, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1q8), 0|OPFL_Chain,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 27, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1q16), 0|OPFL_Chain,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 27, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1q32), 0|OPFL_Chain,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
- 27, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1qf), 0|OPFL_Chain,
- 1, MVT::v4f32, 6, 2, 3, 4, 5, 6, 7,
- 27, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VLD1q64), 0|OPFL_Chain,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 56|128,2, ISD::INTRINSIC_VOID,
- OPC_RecordNode,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 105,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::i32,
- OPC_RecordChild3,
- OPC_Scope, 29,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1d8), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 29,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1d16), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 29,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1d32), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 29,
- OPC_CheckChild3Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1df), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 29,
- OPC_CheckChild3Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1d64), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 29,
- OPC_CheckChild3Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1q8), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 29,
- OPC_CheckChild3Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1q16), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 29,
- OPC_CheckChild3Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1q32), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 29,
- OPC_CheckChild3Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1qf), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 29,
- OPC_CheckChild3Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/18, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VST1q64), 0|OPFL_Chain,
- 0, 7, 3, 4, 5, 6, 2, 7, 8,
- 0,
- 5|128,7, ISD::XOR,
- OPC_Scope, 55|128,1,
- OPC_RecordChild0,
- OPC_Scope, 69,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 27,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MVNs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MVNs), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 61,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_Scope, 26,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::EORrs), 0,
- 1, MVT::i32, 7, 0, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::EORrs), 0,
- 1, MVT::i32, 7, 1, 2, 3, 4, 5, 6, 7,
- 0,
- 47,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MVNi), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 48,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MVNi), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 56|128,2,
- OPC_RecordChild0,
- OPC_Scope, 59,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 25,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2EORrs), 0,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2EORrs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 0,
- 83,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MVNr), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 22,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MVNr), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tMVN), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 0,
- 69,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 30,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::EORri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 30,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2EORri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 0,
- 94,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 45, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 54,
- OPC_MoveParent,
- OPC_SwitchType , 18, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMVNd), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 18, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMVNq), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 41, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_SwitchType , 16, MVT::v2i32,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMVNd), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 16, MVT::v4i32,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMVNq), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 0,
- 0,
- 96,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 46, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 54,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 18, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMVNd), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 18, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMVNq), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 42, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 16, MVT::v2i32,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMVNd), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 16, MVT::v4i32,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMVNq), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 0,
- 124|128,1,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 126,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 59,
- OPC_CheckPredicate, 7,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 2, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::EORri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 3, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::EORri), 0,
- 1, MVT::i32, 5, 7, 9, 10, 11, 12,
- 59,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 4, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::t2EORri), 0,
- 1, MVT::i32, 5, 0, 3, 4, 5, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 5, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2EORri), 0,
- 1, MVT::i32, 5, 7, 9, 10, 11, 12,
- 0,
- 76,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::EORrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tEOR), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2EORrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 21,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEORd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 21,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEORq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 106|128,3, ISD::ADDE,
- OPC_CaptureFlagInput,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 32,
- OPC_CheckPredicate, 55,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADCrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 7, 0, 2, 3, 4, 5, 6, 7,
- 20,
- OPC_CheckPredicate, 56,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADCSSrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 32,
- OPC_CheckPredicate, 55,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADCrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 7, 1, 2, 3, 4, 5, 6, 7,
- 20,
- OPC_CheckPredicate, 56,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADCSSrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 31,
- OPC_CheckPredicate, 55,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADCrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 31,
- OPC_CheckPredicate, 56,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADCSrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 31,
- OPC_CheckPredicate, 55,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADCrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 31,
- OPC_CheckPredicate, 56,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADCSrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 122,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 51,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 27,
- OPC_CheckPredicate, 55,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADCri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 15,
- OPC_CheckPredicate, 56,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADCSSri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 0,
- 63,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 27,
- OPC_CheckPredicate, 55,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADCri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 27,
- OPC_CheckPredicate, 56,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADCSri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 0,
- 0,
- 27,
- OPC_CheckPredicate, 55,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADCrr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 15,
- OPC_CheckPredicate, 56,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADCSSrr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 1,
- 27,
- OPC_CheckPredicate, 55,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADCrr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 27,
- OPC_CheckPredicate, 56,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADCSrr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 25,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tADC), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 0,
- 96|128,3, ISD::SUBE,
- OPC_CaptureFlagInput,
- OPC_RecordChild0,
- OPC_Scope, 96|128,1,
- OPC_RecordChild1,
- OPC_Scope, 32,
- OPC_CheckPredicate, 57,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SBCrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 7, 0, 2, 3, 4, 5, 6, 7,
- 20,
- OPC_CheckPredicate, 58,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SBCSSrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 78,
- OPC_CheckPredicate, 57,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 43,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_Scope, 23,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::RSCrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 7, 1, 2, 3, 4, 5, 6, 7,
- 11,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::RSCSrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 27,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SBCrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 0,
- 31,
- OPC_CheckPredicate, 58,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SBCSrs), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 55,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 27,
- OPC_CheckPredicate, 57,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SBCri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 15,
- OPC_CheckPredicate, 58,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SBCSSri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 0,
- 0,
- 50,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPredicate, 57,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 0,
- OPC_Scope, 21,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::RSCri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::RSCSri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 1, 2,
- 0,
- 69|128,1,
- OPC_RecordChild1,
- OPC_Scope, 67,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 27,
- OPC_CheckPredicate, 57,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SBCri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 27,
- OPC_CheckPredicate, 58,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SBCSri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 0,
- 27,
- OPC_CheckPredicate, 57,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SBCrr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 15,
- OPC_CheckPredicate, 58,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SBCSSrr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 1,
- 27,
- OPC_CheckPredicate, 57,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SBCrr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 27,
- OPC_CheckPredicate, 58,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SBCSrr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 25,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSBC), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 0,
- 0,
- 118, ARMISD::PIC_ADD,
- OPC_Scope, 67,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::Wrapper,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::TargetConstantPool,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 25,
- OPC_CheckPredicate, 26,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 16,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLDRpci_pic), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 3,
- 16,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LDRpci_pic), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 3,
- 0,
- 47,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 21,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::PICADD), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 13,
- OPC_CheckPatternPredicate, 9,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tPICADD), 0,
- 1, MVT::i32, 2, 0, 2,
- 0,
- 0,
- 50|128,12, ISD::SUB,
- OPC_Scope, 113|128,4,
- OPC_RecordChild0,
- OPC_Scope, 117,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 58,
- OPC_CheckPatternPredicate, 5,
- OPC_Scope, 26,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SUBrs), 0,
- 1, MVT::i32, 7, 0, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::RSBrs), 0,
- 1, MVT::i32, 7, 1, 2, 3, 4, 5, 6, 7,
- 0,
- 52,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 25,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBrs), 0,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 21,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2RSBrs), 0,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 0,
- 0,
- 118|128,3,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::MUL,
- OPC_Scope, 6|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 62,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 23, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv4i16), 0,
- 1, MVT::v4i16, 6, 0, 1, 2, 4, 5, 6,
- 23, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv8i16), 0,
- 1, MVT::v8i16, 6, 0, 1, 2, 4, 5, 6,
- 0,
- 62,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 23, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv2i32), 0,
- 1, MVT::v2i32, 6, 0, 1, 2, 4, 5, 6,
- 23, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv4i32), 0,
- 1, MVT::v4i32, 6, 0, 1, 2, 4, 5, 6,
- 0,
- 0,
- 7|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 63,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 23, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv4i16), 0,
- 1, MVT::v4i16, 6, 0, 3, 1, 4, 5, 6,
- 23, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv8i16), 0,
- 1, MVT::v8i16, 6, 0, 3, 1, 4, 5, 6,
- 0,
- 63,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 23, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv2i32), 0,
- 1, MVT::v2i32, 6, 0, 3, 1, 4, 5, 6,
- 23, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv4i32), 0,
- 1, MVT::v4i32, 6, 0, 3, 1, 4, 5, 6,
- 0,
- 0,
- 110,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 50,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 6, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 2, 5,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 7, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv8i16), 0,
- 1, MVT::v8i16, 6, 0, 1, 6, 8, 9, 10,
- 50,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 2, 5,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv4i32), 0,
- 1, MVT::v4i32, 6, 0, 1, 6, 8, 9, 10,
- 0,
- 111,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 51,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 6, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 1, 5,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 7, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv8i16), 0,
- 1, MVT::v8i16, 6, 0, 3, 6, 8, 9, 10,
- 51,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 1, 5,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslv4i32), 0,
- 1, MVT::v4i32, 6, 0, 3, 6, 8, 9, 10,
- 0,
- 0,
- 0,
- 30,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tRSB), 0,
- 1, MVT::i32, 4, 1, 0, 2, 3,
- 49|128,1,
- OPC_RecordChild0,
- OPC_Scope, 35,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SUBri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 35,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::RSBri), 0,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 69,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 30,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 30,
- OPC_CheckPredicate, 17,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBri12), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 0,
- 31,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2RSBri), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 120|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 126, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 59,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 18, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs8d), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 18, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs16d), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 18, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs32d), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 18, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs8q), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 18, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs16q), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 18, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs32q), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 114, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 60,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 16, MVT::v8i8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs8d), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 16, MVT::v4i16,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs16d), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 16, MVT::v2i32,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs32d), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 16, MVT::v16i8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs8q), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 16, MVT::v8i16,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs16q), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 16, MVT::v4i32,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGs32q), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 0,
- 111|128,3,
- OPC_RecordChild0,
- OPC_Scope, 113|128,1,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 55|128,1, ISD::MUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 44, MVT::i32,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MLS), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 20,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MLS), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 20, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 2, 3, 4,
- 20, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 2, 3, 4,
- 20, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 2, 3, 4,
- 20, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 2, 3, 4,
- 20, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 20, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 49, ISD::MULHS,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMMLS), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 20,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMMLS), 0,
- 1, MVT::i32, 5, 1, 2, 0, 3, 4,
- 0,
- 0,
- 119|128,1,
- OPC_RecordChild1,
- OPC_SwitchType , 74, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SUBrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSUBrr), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 19, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 19, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 19, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 19, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 19, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 19, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 19, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 19, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 0,
- 0,
- 114|128,2, ISD::ADDC,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 104,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 50,
- OPC_CheckPatternPredicate, 5,
- OPC_Scope, 22,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADDSrs), 0|OPFL_FlagOutput,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADDSrs), 0|OPFL_FlagOutput,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 0,
- 48,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 21,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDSrs), 0|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 21,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDSrs), 0|OPFL_FlagOutput,
- 1, MVT::i32, 5, 1, 2, 3, 4, 5,
- 0,
- 0,
- 62|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 26,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADDSri), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 30,
- OPC_CheckPredicate, 13,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tADDi3), 0|OPFL_FlagOutput,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 30,
- OPC_CheckPredicate, 14,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tADDi8), 0|OPFL_FlagOutput,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 33,
- OPC_CheckPredicate, 15,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 10, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSUBi3), 0|OPFL_FlagOutput,
- 1, MVT::i32, 5, 2, 0, 4, 5, 6,
- 33,
- OPC_CheckPredicate, 16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 10, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSUBi8), 0|OPFL_FlagOutput,
- 1, MVT::i32, 5, 2, 0, 4, 5, 6,
- 26,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDSri), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 0,
- 68,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADDSrr), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tADDrr), 0|OPFL_FlagOutput,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2ADDSrr), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 46|128,2, ISD::SUBC,
- OPC_RecordChild0,
- OPC_Scope, 7|128,1,
- OPC_RecordChild1,
- OPC_Scope, 100,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 50,
- OPC_CheckPatternPredicate, 5,
- OPC_Scope, 22,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SUBSrs), 0|OPFL_FlagOutput,
- 1, MVT::i32, 6, 0, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::RSBSrs), 0|OPFL_FlagOutput,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 0,
- 44,
- OPC_CheckPatternPredicate, 2,
- OPC_Scope, 21,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBSrs), 0|OPFL_FlagOutput,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 17,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2RSBSrs), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 0,
- 30,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SUBSri), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 0,
- 31,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::RSBSri), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 31,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBSri), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 27,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2RSBSri), 0|OPFL_FlagOutput,
- 1, MVT::i32, 3, 1, 2, 3,
- 69,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SUBSrr), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tSUBrr), 0|OPFL_FlagOutput,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SUBSrr), 0|OPFL_FlagOutput,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 61|128,1, ARMISD::CMP,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_RecordChild1,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMPrs), 0|OPFL_FlagOutput,
- 0, 6, 0, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMPrs), 0|OPFL_FlagOutput,
- 0, 5, 0, 2, 3, 4, 5,
- 78,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 23,
- OPC_CheckPredicate, 5,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMPri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 23,
- OPC_CheckPredicate, 53,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tCMPi8), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 23,
- OPC_CheckPredicate, 4,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMPri), 0|OPFL_FlagOutput,
- 0, 4, 0, 2, 3, 4,
- 0,
- 18,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CMPrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 18,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tCMPr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CMPrr), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 0,
- 47|128,111, ISD::INTRINSIC_WO_CHAIN,
- OPC_MoveChild, 0,
- OPC_Scope, 37|128,5,
- OPC_CheckInteger, 68,
- OPC_MoveParent,
- OPC_Scope, 47|128,1,
- OPC_RecordChild1,
- OPC_Scope, 42,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 42,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 42,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 42,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 0,
- 21|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 70,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 28, MVT::v4i16,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv4i16), 0,
- 1, MVT::v4i16, 5, 2, 0, 3, 4, 5,
- 28, MVT::v8i16,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv8i16), 0,
- 1, MVT::v8i16, 5, 2, 0, 3, 4, 5,
- 0,
- 70,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 28, MVT::v2i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv2i32), 0,
- 1, MVT::v2i32, 5, 2, 0, 3, 4, 5,
- 28, MVT::v4i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv4i32), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 0,
- 0,
- 119,
- OPC_RecordChild1,
- OPC_Scope, 57,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 6, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 1, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 7, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv8i16), 0,
- 1, MVT::v8i16, 5, 0, 5, 7, 8, 9,
- 57,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 1, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv4i32), 0,
- 1, MVT::v4i32, 5, 0, 5, 7, 8, 9,
- 0,
- 115,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 53,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 6, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 0, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 7, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv8i16), 0,
- 1, MVT::v8i16, 5, 2, 5, 7, 8, 9,
- 53,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 8, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 0, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 9, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHslv4i32), 0,
- 1, MVT::v4i32, 5, 2, 5, 7, 8, 9,
- 0,
- 107,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULHv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 37|128,5,
- OPC_CheckInteger, 74,
- OPC_MoveParent,
- OPC_Scope, 47|128,1,
- OPC_RecordChild1,
- OPC_Scope, 42,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 42,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 42,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 42,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 0,
- 21|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 70,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 28, MVT::v4i16,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv4i16), 0,
- 1, MVT::v4i16, 5, 2, 0, 3, 4, 5,
- 28, MVT::v8i16,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv8i16), 0,
- 1, MVT::v8i16, 5, 2, 0, 3, 4, 5,
- 0,
- 70,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 28, MVT::v2i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv2i32), 0,
- 1, MVT::v2i32, 5, 2, 0, 3, 4, 5,
- 28, MVT::v4i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv4i32), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 0,
- 0,
- 119,
- OPC_RecordChild1,
- OPC_Scope, 57,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 6, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 1, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 7, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv8i16), 0,
- 1, MVT::v8i16, 5, 0, 5, 7, 8, 9,
- 57,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 1, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv4i32), 0,
- 1, MVT::v4i32, 5, 0, 5, 7, 8, 9,
- 0,
- 115,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 53,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 6, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 0, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 7, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv8i16), 0,
- 1, MVT::v8i16, 5, 2, 5, 7, 8, 9,
- 53,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 8, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 0, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 9, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHslv4i32), 0,
- 1, MVT::v4i32, 5, 2, 5, 7, 8, 9,
- 0,
- 107,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRDMULHv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 7|128,2,
- OPC_CheckInteger, 51,
- OPC_MoveParent,
- OPC_Scope, 89,
- OPC_RecordChild1,
- OPC_Scope, 42,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLslsv4i16), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 42,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLslsv2i32), 0,
- 1, MVT::v2i64, 5, 0, 1, 3, 4, 5,
- 0,
- 85,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 38,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLslsv4i16), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 38,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLslsv2i32), 0,
- 1, MVT::v2i64, 5, 2, 0, 3, 4, 5,
- 0,
- 81,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 0,
- 0,
- 7|128,2,
- OPC_CheckInteger, 52,
- OPC_MoveParent,
- OPC_Scope, 89,
- OPC_RecordChild1,
- OPC_Scope, 42,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLsluv4i16), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 42,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLsluv2i32), 0,
- 1, MVT::v2i64, 5, 0, 1, 3, 4, 5,
- 0,
- 85,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 38,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLsluv4i16), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 38,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLsluv2i32), 0,
- 1, MVT::v2i64, 5, 2, 0, 3, 4, 5,
- 0,
- 81,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 0,
- 0,
- 109|128,1,
- OPC_CheckInteger, 69,
- OPC_MoveParent,
- OPC_Scope, 89,
- OPC_RecordChild1,
- OPC_Scope, 42,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULLslv4i16), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 42,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULLslv2i32), 0,
- 1, MVT::v2i64, 5, 0, 1, 3, 4, 5,
- 0,
- 85,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 38,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULLslv4i16), 0,
- 1, MVT::v4i32, 5, 2, 0, 3, 4, 5,
- 38,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULLslv2i32), 0,
- 1, MVT::v2i64, 5, 2, 0, 3, 4, 5,
- 0,
- 55,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULLv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMULLv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 0,
- 116|128,2,
- OPC_CheckInteger, 43,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 75,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALslsv4i16), 0,
- 1, MVT::v4i32, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALslsv2i32), 0,
- 1, MVT::v2i64, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALslsv4i16), 0,
- 1, MVT::v4i32, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsv4i32), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALslsv2i32), 0,
- 1, MVT::v2i64, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsv2i64), 0,
- 1, MVT::v2i64, 5, 1, 0, 2, 3, 4,
- 0,
- 30,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 30,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsv8i16), 0,
- 1, MVT::v8i16, 5, 1, 0, 2, 3, 4,
- 0,
- 116|128,2,
- OPC_CheckInteger, 44,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 75,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsluv4i16), 0,
- 1, MVT::v4i32, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALuv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsluv2i32), 0,
- 1, MVT::v2i64, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALuv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsluv4i16), 0,
- 1, MVT::v4i32, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALuv4i32), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALsluv2i32), 0,
- 1, MVT::v2i64, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALuv2i64), 0,
- 1, MVT::v2i64, 5, 1, 0, 2, 3, 4,
- 0,
- 30,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALuv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 30,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLALuv8i16), 0,
- 1, MVT::v8i16, 5, 1, 0, 2, 3, 4,
- 0,
- 54|128,2,
- OPC_CheckInteger, 66,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 75,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLALslv4i16), 0,
- 1, MVT::v4i32, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLALv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLALslv2i32), 0,
- 1, MVT::v2i64, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLALv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLALslv4i16), 0,
- 1, MVT::v4i32, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLALv4i32), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLALslv2i32), 0,
- 1, MVT::v2i64, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLALv2i64), 0,
- 1, MVT::v2i64, 5, 1, 0, 2, 3, 4,
- 0,
- 0,
- 116|128,2,
- OPC_CheckInteger, 45,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 75,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLslsv4i16), 0,
- 1, MVT::v4i32, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLslsv2i32), 0,
- 1, MVT::v2i64, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLslsv4i16), 0,
- 1, MVT::v4i32, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsv4i32), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLslsv2i32), 0,
- 1, MVT::v2i64, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsv2i64), 0,
- 1, MVT::v2i64, 5, 1, 0, 2, 3, 4,
- 0,
- 30,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 30,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsv8i16), 0,
- 1, MVT::v8i16, 5, 1, 0, 2, 3, 4,
- 0,
- 116|128,2,
- OPC_CheckInteger, 46,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 75,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsluv4i16), 0,
- 1, MVT::v4i32, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLuv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsluv2i32), 0,
- 1, MVT::v2i64, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLuv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsluv4i16), 0,
- 1, MVT::v4i32, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLuv4i32), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLsluv2i32), 0,
- 1, MVT::v2i64, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLuv2i64), 0,
- 1, MVT::v2i64, 5, 1, 0, 2, 3, 4,
- 0,
- 30,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLuv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 30,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSLuv8i16), 0,
- 1, MVT::v8i16, 5, 1, 0, 2, 3, 4,
- 0,
- 54|128,2,
- OPC_CheckInteger, 67,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 75,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLSLslv4i16), 0,
- 1, MVT::v4i32, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLSLv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLSLslv2i32), 0,
- 1, MVT::v2i64, 6, 0, 1, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLSLv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLSLslv4i16), 0,
- 1, MVT::v4i32, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLSLv4i32), 0,
- 1, MVT::v4i32, 5, 1, 0, 2, 3, 4,
- 0,
- 75,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_Scope, 41,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLSLslv2i32), 0,
- 1, MVT::v2i64, 6, 1, 0, 2, 4, 5, 6,
- 25,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQDMLSLv2i64), 0,
- 1, MVT::v2i64, 5, 1, 0, 2, 3, 4,
- 0,
- 0,
- 70,
- OPC_CheckInteger, 24,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 31,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTf2xsd), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTf2xsq), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 0,
- 70,
- OPC_CheckInteger, 25,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 31,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTf2xud), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTf2xuq), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 0,
- 70,
- OPC_CheckInteger, 26,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 31,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTxs2fd), 0,
- 1, MVT::v2f32, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTxs2fq), 0,
- 1, MVT::v4f32, 4, 0, 2, 3, 4,
- 0,
- 70,
- OPC_CheckInteger, 27,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 31,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTxu2fd), 0,
- 1, MVT::v2f32, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTxu2fq), 0,
- 1, MVT::v4f32, 4, 0, 2, 3, 4,
- 0,
- 84,
- OPC_CheckInteger, 17,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDLsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDLsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDLsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 18,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDLuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDLuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDLuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 0,
- 40|128,1,
- OPC_CheckInteger, 19,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 52, MVT::v8i16,
- OPC_Scope, 24,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWsv8i16), 0,
- 1, MVT::v8i16, 4, 1, 0, 2, 3,
- 0,
- 52, MVT::v4i32,
- OPC_Scope, 24,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWsv4i32), 0,
- 1, MVT::v4i32, 4, 1, 0, 2, 3,
- 0,
- 52, MVT::v2i64,
- OPC_Scope, 24,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWsv2i64), 0,
- 1, MVT::v2i64, 4, 1, 0, 2, 3,
- 0,
- 0,
- 40|128,1,
- OPC_CheckInteger, 20,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 52, MVT::v8i16,
- OPC_Scope, 24,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWuv8i16), 0,
- 1, MVT::v8i16, 4, 1, 0, 2, 3,
- 0,
- 52, MVT::v4i32,
- OPC_Scope, 24,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWuv4i32), 0,
- 1, MVT::v4i32, 4, 1, 0, 2, 3,
- 0,
- 52, MVT::v2i64,
- OPC_Scope, 24,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDWuv2i64), 0,
- 1, MVT::v2i64, 4, 1, 0, 2, 3,
- 0,
- 0,
- 34|128,1,
- OPC_CheckInteger, 28,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 34|128,1,
- OPC_CheckInteger, 29,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHADDuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 34|128,1,
- OPC_CheckInteger, 91,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 34|128,1,
- OPC_CheckInteger, 92,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRHADDuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 64,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDsv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 65,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDuv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQADDuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDHNv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDHNv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDHNv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 88,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRADDHNv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRADDHNv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRADDHNv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 58,
- OPC_CheckInteger, 53,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULpd), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULpq), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 30,
- OPC_CheckInteger, 50,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULLp), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 84,
- OPC_CheckInteger, 113,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBLsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBLsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBLsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 114,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBLuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBLuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBLuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 115,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBWsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBWsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBWsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 116,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBWuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBWuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBWuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 34|128,1,
- OPC_CheckInteger, 30,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 34|128,1,
- OPC_CheckInteger, 31,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VHSUBuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 86,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBsv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 87,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBuv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSUBuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 112,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBHNv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBHNv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBHNv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 98,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSUBHNv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSUBHNv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSUBHNv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 24,
- OPC_CheckInteger, 12,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VACGEd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckInteger, 13,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VACGEq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckInteger, 14,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VACGTd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24,
- OPC_CheckInteger, 15,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VACGTq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 86|128,1,
- OPC_CheckInteger, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDfd), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 24, MVT::v4f32,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDfq), 0,
- 1, MVT::v4f32, 4, 0, 1, 2, 3,
- 0,
- 34|128,1,
- OPC_CheckInteger, 10,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 7,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDLsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDLsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDLsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 8,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDLuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDLuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABDLuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 0,
- 58|128,1,
- OPC_CheckInteger, 5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 28, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAsv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 2, 3, 4,
- 28, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAsv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 2, 3, 4,
- 28, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAsv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 2, 3, 4,
- 28, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAsv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 2, 3, 4,
- 28, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAsv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 28, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAsv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 58|128,1,
- OPC_CheckInteger, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 28, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAuv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 2, 3, 4,
- 28, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAuv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 2, 3, 4,
- 28, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAuv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 2, 3, 4,
- 28, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAuv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 2, 3, 4,
- 28, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAuv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 28, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABAuv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 0,
- 96,
- OPC_CheckInteger, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 28, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABALsv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 28, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABALsv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 2, 3, 4,
- 28, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABALsv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 0,
- 96,
- OPC_CheckInteger, 4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 28, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABALuv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 2, 3, 4,
- 28, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABALuv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 2, 3, 4,
- 28, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_RecordChild3,
- OPC_CheckChild3Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABALuv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 2, 3, 4,
- 0,
- 86|128,1,
- OPC_CheckInteger, 39,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXfd), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 24, MVT::v4f32,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXfq), 0,
- 1, MVT::v4f32, 4, 0, 1, 2, 3,
- 0,
- 34|128,1,
- OPC_CheckInteger, 40,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMAXuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 41,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINfd), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 24, MVT::v4f32,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINfq), 0,
- 1, MVT::v4f32, 4, 0, 1, 2, 3,
- 0,
- 34|128,1,
- OPC_CheckInteger, 42,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMINuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 0,
- 110,
- OPC_CheckInteger, 56,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDi8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDi16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDi32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDf), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 0,
- 10|128,1,
- OPC_CheckInteger, 57,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLsv8i8), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLsv4i16), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLsv2i32), 0,
- 1, MVT::v1i64, 3, 0, 1, 2,
- 20, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLsv16i8), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLsv8i16), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 20, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLsv4i32), 0,
- 1, MVT::v2i64, 3, 0, 1, 2,
- 0,
- 10|128,1,
- OPC_CheckInteger, 58,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLuv8i8), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLuv4i16), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLuv2i32), 0,
- 1, MVT::v1i64, 3, 0, 1, 2,
- 20, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLuv16i8), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLuv8i16), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 20, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADDLuv4i32), 0,
- 1, MVT::v2i64, 3, 0, 1, 2,
- 0,
- 34|128,1,
- OPC_CheckInteger, 54,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALsv8i8), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALsv4i16), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALsv2i32), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALsv16i8), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALsv8i16), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALsv4i32), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 34|128,1,
- OPC_CheckInteger, 55,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALuv8i8), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALuv4i16), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALuv2i32), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALuv16i8), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALuv8i16), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPADALuv4i32), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 110,
- OPC_CheckInteger, 59,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMAXs8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMAXs16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMAXs32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMAXf), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 60,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMAXu8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMAXu16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMAXu32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 110,
- OPC_CheckInteger, 61,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMINs8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMINs16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMINs32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMINf), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 0,
- 84,
- OPC_CheckInteger, 62,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMINu8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMINu16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VPMINu32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 94,
- OPC_CheckInteger, 89,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRECPEd), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRECPEq), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 20, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRECPEfd), 0,
- 1, MVT::v2f32, 3, 0, 1, 2,
- 20, MVT::v4f32,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRECPEfq), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 58,
- OPC_CheckInteger, 90,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRECPSfd), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 24, MVT::v4f32,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRECPSfq), 0,
- 1, MVT::v4f32, 4, 0, 1, 2, 3,
- 0,
- 94,
- OPC_CheckInteger, 96,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSQRTEd), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSQRTEq), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 20, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSQRTEfd), 0,
- 1, MVT::v2f32, 3, 0, 1, 2,
- 20, MVT::v4f32,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSQRTEfq), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 58,
- OPC_CheckInteger, 97,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSQRTSfd), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 24, MVT::v4f32,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSQRTSfq), 0,
- 1, MVT::v4f32, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 103,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLsv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 104,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLuv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 94,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLsv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 95,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLuv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHLuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 83,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 85,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 78,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLsv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 86|128,1,
- OPC_CheckInteger, 79,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 24, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 24, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 24, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 24, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 24, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 24, MVT::v1i64,
- OPC_CheckChild1Type, MVT::v1i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLuv1i64), 0,
- 1, MVT::v1i64, 4, 0, 1, 2, 3,
- 24, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHLuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 1, 2, 3,
- 0,
- 54|128,1,
- OPC_CheckInteger, 11,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSv8i8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSv4i16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSv2i32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSv16i8), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 20, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSv8i16), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSv4i32), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 20, MVT::v2f32,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSfd), 0,
- 1, MVT::v2f32, 3, 0, 1, 2,
- 20, MVT::v4f32,
- OPC_CheckChild1Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSfq), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 10|128,1,
- OPC_CheckInteger, 63,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQABSv8i8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQABSv4i16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQABSv2i32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQABSv16i8), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 20, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQABSv8i16), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQABSv4i32), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 10|128,1,
- OPC_CheckInteger, 73,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQNEGv8i8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQNEGv4i16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQNEGv2i32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQNEGv16i8), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 20, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQNEGv8i16), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQNEGv4i32), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 10|128,1,
- OPC_CheckInteger, 21,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLSv8i8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLSv4i16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLSv2i32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLSv16i8), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 20, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLSv8i16), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLSv4i32), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 10|128,1,
- OPC_CheckInteger, 22,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLZv8i8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLZv4i16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLZv2i32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLZv16i8), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 20, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLZv8i16), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCLZv4i32), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 50,
- OPC_CheckInteger, 23,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCNTd), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v16i8,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCNTq), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 0,
- 72,
- OPC_CheckInteger, 49,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVNv8i8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVNv4i16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVNv2i32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 0,
- 72,
- OPC_CheckInteger, 70,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQMOVNsv8i8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQMOVNsv4i16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQMOVNsv2i32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 0,
- 72,
- OPC_CheckInteger, 72,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQMOVNuv8i8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQMOVNuv4i16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQMOVNuv2i32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 0,
- 72,
- OPC_CheckInteger, 71,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i8,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQMOVNsuv8i8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 20, MVT::v4i16,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQMOVNsuv4i16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 20, MVT::v2i32,
- OPC_CheckChild1Type, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQMOVNsuv2i32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 0,
- 72,
- OPC_CheckInteger, 47,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVLsv8i16), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVLsv4i32), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 20, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVLsv2i64), 0,
- 1, MVT::v2i64, 3, 0, 1, 2,
- 0,
- 72,
- OPC_CheckInteger, 48,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v8i16,
- OPC_CheckChild1Type, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVLuv8i16), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild1Type, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVLuv4i32), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 20, MVT::v2i64,
- OPC_CheckChild1Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVLuv2i64), 0,
- 1, MVT::v2i64, 3, 0, 1, 2,
- 0,
- 24,
- OPC_CheckInteger, 117,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTBL1), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 26,
- OPC_CheckInteger, 118,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTBL2), 0,
- 1, MVT::v8i8, 5, 0, 1, 2, 3, 4,
- 28,
- OPC_CheckInteger, 119,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTBL3), 0,
- 1, MVT::v8i8, 6, 0, 1, 2, 3, 4, 5,
- 30,
- OPC_CheckInteger, 120,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTBL4), 0,
- 1, MVT::v8i8, 7, 0, 1, 2, 3, 4, 5, 6,
- 26,
- OPC_CheckInteger, 121,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTBX1), 0,
- 1, MVT::v8i8, 5, 0, 1, 2, 3, 4,
- 28,
- OPC_CheckInteger, 122,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTBX2), 0,
- 1, MVT::v8i8, 6, 0, 1, 2, 3, 4, 5,
- 30,
- OPC_CheckInteger, 123,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTBX3), 0,
- 1, MVT::v8i8, 7, 0, 1, 2, 3, 4, 5, 6,
- 32,
- OPC_CheckInteger, 124,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_RecordChild6,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTBX4), 0,
- 1, MVT::v8i8, 8, 0, 1, 2, 3, 4, 5, 6, 7,
- 0,
- 35|128,1, ISD::SHL,
- OPC_Scope, 30,
- OPC_RecordNode,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 0|128,1,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 68,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 30,
- OPC_CheckPredicate, 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LSLri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 28,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLSLri), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 54,
- OPC_CheckChild1Type, MVT::i32,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLSLrr), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LSLrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 0,
- 0,
- 35|128,1, ISD::SRL,
- OPC_Scope, 30,
- OPC_RecordNode,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 0|128,1,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 68,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 30,
- OPC_CheckPredicate, 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LSRri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 28,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLSRri), 0,
- 1, MVT::i32, 5, 2, 0, 3, 4, 5,
- 0,
- 54,
- OPC_CheckChild1Type, MVT::i32,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLSRrr), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LSRrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 0,
- 0,
- 2|128,1, ISD::ROTR,
- OPC_Scope, 30,
- OPC_RecordNode,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 5,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVs), 0,
- 1, MVT::i32, 6, 1, 2, 3, 4, 5, 6,
- 96,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 36,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 24,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2RORri), 0,
- 1, MVT::i32, 5, 0, 2, 3, 4, 5,
- 54,
- OPC_CheckChild1Type, MVT::i32,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 23,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tROR), 0,
- 1, MVT::i32, 5, 2, 0, 1, 3, 4,
- 23,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2RORrr), 0,
- 1, MVT::i32, 5, 0, 1, 2, 3, 4,
- 0,
- 0,
- 0,
- 69|128,8, ISD::FADD,
- OPC_Scope, 118,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_Scope, 68,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 23, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfd), 0,
- 1, MVT::v2f32, 6, 0, 1, 2, 4, 5, 6,
- 23, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfq), 0,
- 1, MVT::v4f32, 6, 0, 1, 2, 4, 5, 6,
- 0,
- 41,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfd), 0,
- 1, MVT::v2f32, 6, 0, 3, 1, 4, 5, 6,
- 0,
- 92,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_Scope, 42,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfd), 0,
- 1, MVT::v2f32, 6, 3, 0, 1, 4, 5, 6,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfd), 0,
- 1, MVT::v2f32, 6, 3, 2, 0, 4, 5, 6,
- 0,
- 46,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfq), 0,
- 1, MVT::v4f32, 6, 0, 3, 1, 4, 5, 6,
- 92,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_Scope, 42,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfq), 0,
- 1, MVT::v4f32, 6, 3, 0, 1, 4, 5, 6,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfq), 0,
- 1, MVT::v4f32, 6, 3, 2, 0, 4, 5, 6,
- 0,
- 121,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_Scope, 56,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2f32, 2, 2, 5,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfq), 0,
- 1, MVT::v4f32, 6, 0, 1, 6, 8, 9, 10,
- 56,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2f32, 2, 1, 5,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfq), 0,
- 1, MVT::v4f32, 6, 0, 3, 6, 8, 9, 10,
- 0,
- 53|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 118, ISD::FMUL,
- OPC_Scope, 57,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2f32, 2, 1, 5,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfq), 0,
- 1, MVT::v4f32, 6, 3, 0, 6, 8, 9, 10,
- 57,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2f32, 2, 0, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAslfq), 0,
- 1, MVT::v4f32, 6, 3, 2, 6, 8, 9, 10,
- 0,
- 55, ISD::FNEG,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSD), 0,
- 1, MVT::f64, 5, 2, 0, 1, 3, 4,
- 20, MVT::f32,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSS), 0,
- 1, MVT::f32, 5, 2, 0, 1, 3, 4,
- 0,
- 0,
- 59,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FNEG,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSD), 0,
- 1, MVT::f64, 5, 0, 1, 2, 3, 4,
- 20, MVT::f32,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSS), 0,
- 1, MVT::f32, 5, 0, 1, 2, 3, 4,
- 0,
- 54,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAD), 0,
- 1, MVT::f64, 5, 2, 0, 1, 3, 4,
- 20, MVT::f32,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAS), 0,
- 1, MVT::f32, 5, 2, 0, 1, 3, 4,
- 0,
- 98,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAfd), 0,
- 1, MVT::v2f32, 5, 0, 1, 2, 3, 4,
- 20, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAfq), 0,
- 1, MVT::v4f32, 5, 0, 1, 2, 3, 4,
- 20, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAD), 0,
- 1, MVT::f64, 5, 0, 1, 2, 3, 4,
- 20, MVT::f32,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAS), 0,
- 1, MVT::f32, 5, 0, 1, 2, 3, 4,
- 0,
- 54,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAfd), 0,
- 1, MVT::v2f32, 5, 2, 0, 1, 3, 4,
- 20, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLAfq), 0,
- 1, MVT::v4f32, 5, 2, 0, 1, 3, 4,
- 0,
- 35|128,1,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 19, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDD), 0,
- 1, MVT::f64, 4, 0, 1, 2, 3,
- 94, MVT::f32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDS), 0,
- 1, MVT::f32, 4, 0, 1, 2, 3,
- 71,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 2, 0, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 5, 1, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VADDfd_sfp), 0,
- 1, MVT::f64, 4, 4, 7, 8, 9,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 10, 11,
- 0,
- 19, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDfd), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 19, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VADDfq), 0,
- 1, MVT::v4f32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 4|128,5, ISD::FSUB,
- OPC_Scope, 3|128,2,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_Scope, 68,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 23, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslfd), 0,
- 1, MVT::v2f32, 6, 0, 1, 2, 4, 5, 6,
- 23, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslfq), 0,
- 1, MVT::v4f32, 6, 0, 1, 2, 4, 5, 6,
- 0,
- 68,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 23, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslfd), 0,
- 1, MVT::v2f32, 6, 0, 3, 1, 4, 5, 6,
- 23, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslfq), 0,
- 1, MVT::v4f32, 6, 0, 3, 1, 4, 5, 6,
- 0,
- 56,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2f32, 2, 2, 5,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslfq), 0,
- 1, MVT::v4f32, 6, 0, 1, 6, 8, 9, 10,
- 56,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2f32, 2, 1, 5,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 7,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSslfq), 0,
- 1, MVT::v4f32, 6, 0, 3, 6, 8, 9, 10,
- 0,
- 113,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 55, ISD::FNEG,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMLAD), 0,
- 1, MVT::f64, 5, 2, 0, 1, 3, 4,
- 20, MVT::f32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMLAS), 0,
- 1, MVT::f32, 5, 2, 0, 1, 3, 4,
- 0,
- 50, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMLSD), 0,
- 1, MVT::f64, 5, 2, 0, 1, 3, 4,
- 20, MVT::f32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMLSS), 0,
- 1, MVT::f32, 5, 2, 0, 1, 3, 4,
- 0,
- 0,
- 9|128,2,
- OPC_RecordChild0,
- OPC_Scope, 97,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::f64,
- OPC_CheckPatternPredicate, 12,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSD), 0,
- 1, MVT::f64, 5, 0, 1, 2, 3, 4,
- 20, MVT::f32,
- OPC_CheckPatternPredicate, 12,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSS), 0,
- 1, MVT::f32, 5, 0, 1, 2, 3, 4,
- 20, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSfd), 0,
- 1, MVT::v2f32, 5, 0, 1, 2, 3, 4,
- 20, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMLSfq), 0,
- 1, MVT::v4f32, 5, 0, 1, 2, 3, 4,
- 0,
- 34|128,1,
- OPC_RecordChild1,
- OPC_SwitchType , 19, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBD), 0,
- 1, MVT::f64, 4, 0, 1, 2, 3,
- 94, MVT::f32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBS), 0,
- 1, MVT::f32, 4, 0, 1, 2, 3,
- 71,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 2, 0, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 5, 1, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VSUBfd_sfp), 0,
- 1, MVT::f64, 4, 4, 7, 8, 9,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 10, 11,
- 0,
- 19, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBfd), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 19, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSUBfq), 0,
- 1, MVT::v4f32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 0,
- 61, ISD::CALLSEQ_END,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 26, ISD::TargetConstant,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::TargetConstant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADJCALLSTACKUP), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 4, 1, 2, 3, 4,
- 24, ISD::Constant,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tADJCALLSTACKUP), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 2, 3, 4,
- 0,
- 82, ARMISD::WrapperJT,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::TargetJumpTable,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 21,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LEApcrelJT), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 21,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLEApcrelJT), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 21,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LEApcrelJT), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 0,
- 34, ARMISD::BR2_JT,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::TargetJumpTable,
- OPC_MoveParent,
- OPC_RecordChild4,
- OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2BR_JT), 0|OPFL_Chain,
- 0, 4, 1, 2, 3, 5,
- 3|128,4, ISD::FMUL,
- OPC_Scope, 65,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslfd), 0,
- 1, MVT::v2f32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslfq), 0,
- 1, MVT::v4f32, 5, 0, 1, 3, 4, 5,
- 0,
- 65,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 22, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslfd), 0,
- 1, MVT::v2f32, 5, 2, 0, 3, 4, 5,
- 22, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslfq), 0,
- 1, MVT::v4f32, 5, 2, 0, 3, 4, 5,
- 0,
- 54,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2f32, 2, 1, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslfq), 0,
- 1, MVT::v4f32, 5, 0, 5, 7, 8, 9,
- 105,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 50, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 8, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2f32, 2, 0, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 9, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULslfq), 0,
- 1, MVT::v4f32, 5, 2, 5, 7, 8, 9,
- 47, ISD::FNEG,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 19, MVT::f64,
- OPC_CheckPatternPredicate, 13,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMULD), 0,
- 1, MVT::f64, 4, 0, 1, 2, 3,
- 19, MVT::f32,
- OPC_CheckPatternPredicate, 13,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMULS), 0,
- 1, MVT::f32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 90|128,1,
- OPC_RecordChild0,
- OPC_Scope, 50,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FNEG,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_SwitchType , 19, MVT::f64,
- OPC_CheckPatternPredicate, 13,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMULD), 0,
- 1, MVT::f64, 4, 1, 0, 2, 3,
- 19, MVT::f32,
- OPC_CheckPatternPredicate, 13,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMULS), 0,
- 1, MVT::f32, 4, 1, 0, 2, 3,
- 0,
- 34|128,1,
- OPC_RecordChild1,
- OPC_SwitchType , 19, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULD), 0,
- 1, MVT::f64, 4, 0, 1, 2, 3,
- 94, MVT::f32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULS), 0,
- 1, MVT::f32, 4, 0, 1, 2, 3,
- 71,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 2, 0, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 5, 1, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VMULfd_sfp), 0,
- 1, MVT::f64, 4, 4, 7, 8, 9,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 10, 11,
- 0,
- 19, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULfd), 0,
- 1, MVT::v2f32, 4, 0, 1, 2, 3,
- 19, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMULfq), 0,
- 1, MVT::v4f32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 0,
- 44, ISD::CALLSEQ_START,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 19, ISD::TargetConstant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ADJCALLSTACKDOWN), 0|OPFL_Chain|OPFL_FlagOutput,
- 0, 3, 1, 2, 3,
- 15, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tADJCALLSTACKDOWN), 0|OPFL_Chain|OPFL_FlagOutput,
- 0, 1, 2,
- 0,
- 53|128,1, ARMISD::CALL,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_RecordChild1,
- OPC_Scope, 118,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 55, ISD::TargetGlobalAddress,
- OPC_MoveParent,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 15,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BLr9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 16,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLXi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 17,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLXi_r9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 55, ISD::TargetExternalSymbol,
- OPC_MoveParent,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 15,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BLr9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 16,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLXi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 17,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLXi_r9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 0,
- 56,
- OPC_CheckChild1Type, MVT::i32,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 18,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BLX), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 19,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BLXr9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 16,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLXr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 17,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLXr_r9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 0,
- 52, ARMISD::CALL_PRED,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TargetGlobalAddress,
- OPC_MoveParent,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BL_pred), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 3, 1, 2, 3,
- 20,
- OPC_CheckPatternPredicate, 15,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BLr9_pred), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 3, 1, 2, 3,
- 0,
- 43|128,1, ARMISD::Wrapper,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 100, ISD::TargetGlobalAddress,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 20,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LEApcrel), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 21,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVi32imm), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLEApcrel), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 22,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LEApcrel), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 23,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MOVi32imm), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 62, ISD::TargetConstantPool,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::LEApcrel), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLEApcrel), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2LEApcrel), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 0,
- 103, ARMISD::tCALL,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_RecordChild1,
- OPC_Scope, 66,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 29, ISD::TargetGlobalAddress,
- OPC_MoveParent,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 24,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 25,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLr9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 29, ISD::TargetExternalSymbol,
- OPC_MoveParent,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 24,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 25,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLr9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 0,
- 30,
- OPC_CheckChild1Type, MVT::i32,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 16,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLXr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 17,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBLXr_r9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 0,
- 65|128,1, ISD::FNEG,
- OPC_Scope, 51,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 19, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMULD), 0,
- 1, MVT::f64, 4, 0, 1, 2, 3,
- 19, MVT::f32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNMULS), 0,
- 1, MVT::f32, 4, 0, 1, 2, 3,
- 0,
- 9|128,1,
- OPC_RecordChild0,
- OPC_SwitchType , 18, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGD), 0,
- 1, MVT::f64, 3, 0, 1, 2,
- 72, MVT::f32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 50,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 1, 0, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VNEGfd_sfp), 0,
- 1, MVT::f64, 3, 3, 4, 5,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 6, 7,
- 0,
- 18, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGfd), 0,
- 1, MVT::v2f32, 3, 0, 1, 2,
- 18, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VNEGf32q), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 0,
- 65|128,1, ARMISD::VSHL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLiv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLiv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLiv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLiv1i64), 0,
- 1, MVT::v1i64, 4, 0, 2, 3, 4,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLiv16i8), 0,
- 1, MVT::v16i8, 4, 0, 2, 3, 4,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLiv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLiv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 21, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLiv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 65|128,1, ARMISD::VSHRs,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRsv1i64), 0,
- 1, MVT::v1i64, 4, 0, 2, 3, 4,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 2, 3, 4,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 21, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 65|128,1, ARMISD::VSHRu,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRuv1i64), 0,
- 1, MVT::v1i64, 4, 0, 2, 3, 4,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 2, 3, 4,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 21, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VSHLLs,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLLsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLLsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLLsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VSHLLu,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLLuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLLuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLLuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VSHLLi,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLLi8), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLLi16), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHLLi32), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VSHRN,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRNv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRNv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSHRNv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 0,
- 65|128,1, ARMISD::VRSHRs,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRsv1i64), 0,
- 1, MVT::v1i64, 4, 0, 2, 3, 4,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 2, 3, 4,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 21, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRsv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 65|128,1, ARMISD::VRSHRu,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRuv1i64), 0,
- 1, MVT::v1i64, 4, 0, 2, 3, 4,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 2, 3, 4,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 21, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VRSHRN,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRNv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRNv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VRSHRNv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 0,
- 65|128,1, ARMISD::VQSHLs,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsiv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsiv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsiv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsiv1i64), 0,
- 1, MVT::v1i64, 4, 0, 2, 3, 4,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsiv16i8), 0,
- 1, MVT::v16i8, 4, 0, 2, 3, 4,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsiv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsiv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 21, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsiv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 65|128,1, ARMISD::VQSHLu,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuiv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuiv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuiv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuiv1i64), 0,
- 1, MVT::v1i64, 4, 0, 2, 3, 4,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuiv16i8), 0,
- 1, MVT::v16i8, 4, 0, 2, 3, 4,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuiv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuiv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 21, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLuiv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 65|128,1, ARMISD::VQSHLsu,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsuv1i64), 0,
- 1, MVT::v1i64, 4, 0, 2, 3, 4,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 2, 3, 4,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 21, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHLsuv2i64), 0,
- 1, MVT::v2i64, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VQSHRNs,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHRNsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHRNsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHRNsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VQSHRNu,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHRNuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHRNuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHRNuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VQSHRNsu,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHRUNv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHRUNv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQSHRUNv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VQRSHRNs,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHRNsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHRNsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHRNsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VQRSHRNu,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHRNuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHRNuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHRNuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 0,
- 99, ARMISD::VQRSHRNsu,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHRUNv8i8), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHRUNv4i16), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 31,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VQRSHRUNv2i32), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 0,
- 74|128,1, ARMISD::VSLI,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSLIv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSLIv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSLIv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSLIv1i64), 0,
- 1, MVT::v1i64, 5, 0, 1, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSLIv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSLIv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSLIv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSLIv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 3, 4, 5,
- 0,
- 74|128,1, ARMISD::VSRI,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRIv8i8), 0,
- 1, MVT::v8i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRIv4i16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRIv2i32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRIv1i64), 0,
- 1, MVT::v1i64, 5, 0, 1, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRIv16i8), 0,
- 1, MVT::v16i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRIv8i16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRIv4i32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSRIv2i64), 0,
- 1, MVT::v2i64, 5, 0, 1, 3, 4, 5,
- 0,
- 25|128,1, ARMISD::VGETLANEs,
- OPC_RecordChild0,
- OPC_Scope, 29,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNs8), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 29,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNs16), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 44,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 18, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v8i8, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 19, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNs8), 0,
- 1, MVT::i32, 4, 4, 6, 7, 8,
- 44,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 6, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 7, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNs16), 0,
- 1, MVT::i32, 4, 4, 6, 7, 8,
- 0,
- 25|128,1, ARMISD::VGETLANEu,
- OPC_RecordChild0,
- OPC_Scope, 29,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNu8), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 29,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNu16), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 44,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 18, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v8i8, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 19, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNu8), 0,
- 1, MVT::i32, 4, 4, 6, 7, 8,
- 44,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 6, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 7, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNu16), 0,
- 1, MVT::i32, 4, 4, 6, 7, 8,
- 0,
- 53|128,1, ISD::EXTRACT_VECTOR_ELT,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNi32), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 24,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 20, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f64, 2, 0, 3,
- 46,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 8, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 9, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VGETLNi32), 0,
- 1, MVT::i32, 4, 4, 6, 7, 8,
- 36,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_EmitInteger, MVT::i32, ARM::DPR_VFP2RegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::v2f32, 2, 0, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 21, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 3, 5,
- 36,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_EmitInteger, MVT::i32, ARM::QPR_VFP2RegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::v4f32, 2, 0, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 21, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 3, 5,
- 0,
- 95|128,2, ISD::INSERT_VECTOR_ELT,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 58,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSETLNi8), 0,
- 1, MVT::v8i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSETLNi16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 0,
- 107,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSETLNi32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 15, MVT::v2f64,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 20, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v2f64, 3, 0, 1, 4,
- 27, MVT::v2f32,
- OPC_EmitInteger, MVT::i32, ARM::DPR_VFP2RegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::v2f32, 2, 0, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 21, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v2f32, 3, 4, 1, 6,
- 27, MVT::v4f32,
- OPC_EmitInteger, MVT::i32, ARM::QPR_VFP2RegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::v4f32, 2, 0, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 21, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v4f32, 3, 4, 1, 6,
- 0,
- 118,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 52, MVT::v16i8,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 18, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v8i8, 2, 0, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 19, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VSETLNi8), 0,
- 1, MVT::f64, 5, 5, 1, 7, 8, 9,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 18, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v16i8, 3, 0, 10, 12,
- 52, MVT::v8i16,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 6, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 0, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 7, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VSETLNi16), 0,
- 1, MVT::f64, 5, 5, 1, 7, 8, 9,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 6, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v8i16, 3, 0, 10, 12,
- 0,
- 60,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 0, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 9, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VSETLNi32), 0,
- 1, MVT::f64, 5, 5, 1, 7, 8, 9,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 8, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v4i32, 3, 0, 10, 12,
- 0,
- 102|128,1, ARMISD::VDUP,
- OPC_Scope, 52,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::f32,
- OPC_MoveParent,
- OPC_SwitchType , 18, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPfd), 0,
- 1, MVT::v2f32, 3, 0, 1, 2,
- 18, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPfq), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 45|128,1,
- OPC_RecordChild0,
- OPC_Scope, 124,
- OPC_CheckChild0Type, MVT::i32,
- OPC_SwitchType , 18, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUP8d), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 18, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUP16d), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 18, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUP32d), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 18, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUP8q), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 18, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUP16q), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 18, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUP32q), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 44,
- OPC_CheckChild0Type, MVT::f32,
- OPC_SwitchType , 18, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPfdf), 0,
- 1, MVT::v2f32, 3, 0, 1, 2,
- 18, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPfqf), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 0,
- 0,
- 115|128,3, ARMISD::VDUPLANE,
- OPC_RecordChild0,
- OPC_Scope, 56,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLN8d), 0,
- 1, MVT::v8i8, 4, 0, 2, 3, 4,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLN8q), 0,
- 1, MVT::v16i8, 4, 0, 2, 3, 4,
- 0,
- 56,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLN16d), 0,
- 1, MVT::v4i16, 4, 0, 2, 3, 4,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLN16q), 0,
- 1, MVT::v8i16, 4, 0, 2, 3, 4,
- 0,
- 56,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLN32d), 0,
- 1, MVT::v2i32, 4, 0, 2, 3, 4,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLN32q), 0,
- 1, MVT::v4i32, 4, 0, 2, 3, 4,
- 0,
- 56,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLNfd), 0,
- 1, MVT::v2f32, 4, 0, 2, 3, 4,
- 21, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLNfq), 0,
- 1, MVT::v4f32, 4, 0, 2, 3, 4,
- 0,
- 46,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v16i8,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 18, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v8i8, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 19, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLN8q), 0,
- 1, MVT::v16i8, 4, 4, 6, 7, 8,
- 46,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 6, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v4i16, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 7, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLN16q), 0,
- 1, MVT::v8i16, 4, 4, 6, 7, 8,
- 46,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 8, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2i32, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 9, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLN32q), 0,
- 1, MVT::v4i32, 4, 4, 6, 7, 8,
- 46,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 8, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::v2f32, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 9, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDUPLNfq), 0,
- 1, MVT::v4f32, 4, 4, 6, 7, 8,
- 39,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 20, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i64, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 22, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v2i64, 3, 0, 4, 6,
- 39,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 20, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f64, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 22, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v2f64, 3, 0, 4, 6,
- 0,
- 74|128,1, ARMISD::VEXT,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEXTd8), 0,
- 1, MVT::v8i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEXTd16), 0,
- 1, MVT::v4i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEXTd32), 0,
- 1, MVT::v2i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEXTdf), 0,
- 1, MVT::v2f32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEXTq8), 0,
- 1, MVT::v16i8, 5, 0, 1, 3, 4, 5,
- 22, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEXTq16), 0,
- 1, MVT::v8i16, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEXTq32), 0,
- 1, MVT::v4i32, 5, 0, 1, 3, 4, 5,
- 22, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VEXTqf), 0,
- 1, MVT::v4f32, 5, 0, 1, 3, 4, 5,
- 0,
- 110|128,2, ISD::Constant,
- OPC_RecordNode,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 26,
- OPC_CheckPredicate, 4,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MOVi), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 26,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVi), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 22,
- OPC_CheckPredicate, 61,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVi16), 0,
- 1, MVT::i32, 3, 1, 2, 3,
- 29,
- OPC_CheckPredicate, 23,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitNodeXForm, 17, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MVNi), 0,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 22,
- OPC_CheckPredicate, 7,
- OPC_CheckPatternPredicate, 26,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVi2pieces), 0,
- 1, MVT::i32, 3, 1, 2, 3,
- 26,
- OPC_CheckPredicate, 53,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tMOVi8), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 22,
- OPC_CheckPredicate, 61,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MOVi16), 0,
- 1, MVT::i32, 3, 1, 2, 3,
- 29,
- OPC_CheckPredicate, 6,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitNodeXForm, 1, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MVNi), 0,
- 1, MVT::i32, 4, 2, 3, 4, 5,
- 55,
- OPC_CheckPredicate, 62,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitNodeXForm, 23, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tMOVi8), 0,
- 1, MVT::i32, 4, 2, 4, 5, 6,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitNodeXForm, 24, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tLSLri), 0,
- 1, MVT::i32, 5, 1, 7, 9, 10, 11,
- 49,
- OPC_CheckPredicate, 63,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitRegister, MVT::i32, ARM::CPSR,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitNodeXForm, 25, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::tMOVi8), 0,
- 1, MVT::i32, 4, 2, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tMVN), 0,
- 1, MVT::i32, 4, 1, 7, 8, 9,
- 44,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVi32imm), 0,
- 1, MVT::i32, 3, 1, 2, 3,
- 20,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MOVi32imm), 0,
- 1, MVT::i32, 3, 1, 2, 3,
- 0,
- 0,
- 51, ISD::ConstantFP,
- OPC_RecordNode,
- OPC_SwitchType , 22, MVT::f64,
- OPC_CheckPredicate, 64,
- OPC_CheckPatternPredicate, 27,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::FCONSTD), 0,
- 1, MVT::f64, 3, 1, 2, 3,
- 22, MVT::f32,
- OPC_CheckPredicate, 65,
- OPC_CheckPatternPredicate, 27,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::FCONSTS), 0,
- 1, MVT::f32, 3, 1, 2, 3,
- 0,
- 79|128,1, ISD::BUILD_VECTOR,
- OPC_RecordNode,
- OPC_Scope, 50,
- OPC_CheckPredicate, 66,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitNodeXForm, 26, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVv8i8), 0,
- 1, MVT::v8i8, 3, 1, 2, 3,
- 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitNodeXForm, 26, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVv16i8), 0,
- 1, MVT::v16i8, 3, 1, 2, 3,
- 0,
- 50,
- OPC_CheckPredicate, 67,
- OPC_SwitchType , 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitNodeXForm, 27, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVv4i16), 0,
- 1, MVT::v4i16, 3, 1, 2, 3,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitNodeXForm, 27, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVv8i16), 0,
- 1, MVT::v8i16, 3, 1, 2, 3,
- 0,
- 50,
- OPC_CheckPredicate, 68,
- OPC_SwitchType , 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitNodeXForm, 28, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVv2i32), 0,
- 1, MVT::v2i32, 3, 1, 2, 3,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitNodeXForm, 28, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVv4i32), 0,
- 1, MVT::v4i32, 3, 1, 2, 3,
- 0,
- 50,
- OPC_CheckPredicate, 69,
- OPC_SwitchType , 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitNodeXForm, 29, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVv1i64), 0,
- 1, MVT::v1i64, 3, 1, 2, 3,
- 21, MVT::v2i64,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitNodeXForm, 29, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVv2i64), 0,
- 1, MVT::v2i64, 3, 1, 2, 3,
- 0,
- 0,
- 55, ISD::ATOMIC_LOAD_ADD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 14,
- OPC_CheckPredicate, 70,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_ADD_I8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 71,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_ADD_I16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 72,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_ADD_I32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 0,
- 55, ISD::ATOMIC_LOAD_SUB,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 14,
- OPC_CheckPredicate, 73,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_SUB_I8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 74,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_SUB_I16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 75,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_SUB_I32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 0,
- 55, ISD::ATOMIC_LOAD_AND,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 14,
- OPC_CheckPredicate, 76,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_AND_I8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 77,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_AND_I16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 78,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_AND_I32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 0,
- 55, ISD::ATOMIC_LOAD_OR,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 14,
- OPC_CheckPredicate, 79,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_OR_I8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 80,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_OR_I16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 81,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_OR_I32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 0,
- 55, ISD::ATOMIC_LOAD_XOR,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 14,
- OPC_CheckPredicate, 82,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_XOR_I8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 83,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_XOR_I16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 84,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_XOR_I32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 0,
- 55, ISD::ATOMIC_LOAD_NAND,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 14,
- OPC_CheckPredicate, 85,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_NAND_I8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 86,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_NAND_I16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 87,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_LOAD_NAND_I32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 0,
- 55, ISD::ATOMIC_SWAP,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 14,
- OPC_CheckPredicate, 88,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_SWAP_I8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 89,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_SWAP_I16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 14,
- OPC_CheckPredicate, 90,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_SWAP_I32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 1, 2,
- 0,
- 59, ISD::ATOMIC_CMP_SWAP,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 15,
- OPC_CheckPredicate, 91,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_CMP_SWAP_I8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 15,
- OPC_CheckPredicate, 92,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_CMP_SWAP_I16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 15,
- OPC_CheckPredicate, 93,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::ATOMIC_CMP_SWAP_I32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 3, 1, 2, 3,
- 0,
- 25|128,3, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_Scope, 29,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_SwitchType , 3, MVT::v1i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::f64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f32,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_SwitchType , 3, MVT::v1i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::f64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f32,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_SwitchType , 3, MVT::v1i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::f64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f32,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::f64,
- OPC_SwitchType , 3, MVT::v1i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f32,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_SwitchType , 3, MVT::v1i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::f64,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v1i64,
- OPC_SwitchType , 3, MVT::v2i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::f64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f32,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_SwitchType , 3, MVT::v2i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v16i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4f32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f64,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_SwitchType , 3, MVT::v2i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v16i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4f32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f64,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_SwitchType , 3, MVT::v2i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4f32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f64,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_SwitchType , 3, MVT::v2i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v16i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4f32,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_SwitchType , 3, MVT::v2i64,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v16i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f64,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 29,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_SwitchType , 3, MVT::v4i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v8i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v16i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4f32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f64,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 22,
- OPC_CheckChild0Type, MVT::f32,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVRS), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 22,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVSR), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 0,
- 56, ARMISD::RET_FLAG,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 28,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BX_RET), 0|OPFL_Chain|OPFL_FlagInput,
- 0, 2, 1, 2,
- 19,
- OPC_CheckPatternPredicate, 29,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVPCLR), 0|OPFL_Chain|OPFL_FlagInput,
- 0, 2, 1, 2,
- 11,
- OPC_CheckPatternPredicate, 9,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBX_RET), 0|OPFL_Chain|OPFL_FlagInput,
- 0, 0,
- 0,
- 45, ISD::BRIND,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 28,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BRIND), 0|OPFL_Chain,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 29,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVPCRX), 0|OPFL_Chain,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 9,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBRIND), 0|OPFL_Chain,
- 0, 1, 1,
- 0,
- 85, ARMISD::CALL_NOLINK,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 30,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BX), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 31,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BMOVPCRX), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 32,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BXr9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 33,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::BMOVPCRXr9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 34,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBX), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 35,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tBXr9), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 48, ISD::BR,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BasicBlock,
- OPC_MoveParent,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::B), 0|OPFL_Chain,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tB), 0|OPFL_Chain,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2B), 0|OPFL_Chain,
- 0, 1, 1,
- 0,
- 52, ARMISD::RRX,
- OPC_CaptureFlagInput,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVrx), 0|OPFL_FlagInput,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MOVrx), 0|OPFL_FlagInput,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 35, ARMISD::SRL_FLAG,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVsrl_flag), 0|OPFL_FlagOutput,
- 1, MVT::i32, 3, 0, 1, 2,
- 10,
- OPC_CheckPatternPredicate, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MOVsrl_flag), 0|OPFL_FlagOutput,
- 1, MVT::i32, 1, 0,
- 0,
- 35, ARMISD::SRA_FLAG,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 5,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::MOVsra_flag), 0|OPFL_FlagOutput,
- 1, MVT::i32, 3, 0, 1, 2,
- 10,
- OPC_CheckPatternPredicate, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2MOVsra_flag), 0|OPFL_FlagOutput,
- 1, MVT::i32, 1, 0,
- 0,
- 46, ISD::MULHS,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::SMMUL), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 19,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2SMMUL), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 0,
- 43, ISD::CTLZ,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 36,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::CLZ), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2CLZ), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 43, ARMISD::RBIT,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::RBIT), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2RBIT), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 62, ISD::BSWAP,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::REV), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tREV), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 18,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2REV), 0,
- 1, MVT::i32, 3, 0, 1, 2,
- 0,
- 43, ARMISD::MEMBARRIER,
- OPC_RecordNode,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 37,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::Int_MemBarrierV7), 0|OPFL_Chain,
- 0, 0,
- 15,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::Int_MemBarrierV6), 0|OPFL_Chain,
- 0, 1, 1,
- 11,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2Int_MemBarrierV7), 0|OPFL_Chain,
- 0, 0,
- 0,
- 43, ARMISD::SYNCBARRIER,
- OPC_RecordNode,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 37,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::Int_SyncBarrierV7), 0|OPFL_Chain,
- 0, 0,
- 15,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::Int_SyncBarrierV6), 0|OPFL_Chain,
- 0, 1, 1,
- 11,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2Int_SyncBarrierV7), 0|OPFL_Chain,
- 0, 0,
- 0,
- 34, ARMISD::THREAD_POINTER,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 9,
- OPC_CheckPatternPredicate, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::TPsoft), 0,
- 1, MVT::i32, 0,
- 9,
- OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tTPsoft), 0,
- 1, MVT::i32, 0,
- 9,
- OPC_CheckPatternPredicate, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2TPsoft), 0,
- 1, MVT::i32, 0,
- 0,
- 46, ARMISD::EH_SJLJ_SETJMP,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_CheckType, MVT::i32,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::Int_eh_sjlj_setjmp), 0,
- 1, MVT::i32, 2, 0, 1,
- 11,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::tInt_eh_sjlj_setjmp), 0,
- 1, MVT::i32, 2, 0, 1,
- 11,
- OPC_CheckPatternPredicate, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::t2Int_eh_sjlj_setjmp), 0,
- 1, MVT::i32, 2, 0, 1,
- 0,
- 47, ARMISD::CMPFP,
- OPC_RecordChild0,
- OPC_Scope, 21,
- OPC_CheckChild0Type, MVT::f64,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCMPED), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 21,
- OPC_CheckChild0Type, MVT::f32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCMPES), 0|OPFL_FlagOutput,
- 0, 4, 0, 1, 2, 3,
- 0,
- 46, ISD::FDIV,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 19, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDIVD), 0,
- 1, MVT::f64, 4, 0, 1, 2, 3,
- 19, MVT::f32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VDIVS), 0,
- 1, MVT::f32, 4, 0, 1, 2, 3,
- 0,
- 97, ISD::FABS,
- OPC_RecordChild0,
- OPC_SwitchType , 18, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSD), 0,
- 1, MVT::f64, 3, 0, 1, 2,
- 72, MVT::f32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VABSS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 50,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 1, 0, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VABSfd_sfp), 0,
- 1, MVT::f64, 3, 3, 4, 5,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 6, 7,
- 0,
- 0,
- 43, ARMISD::CMPFPw0,
- OPC_RecordChild0,
- OPC_Scope, 19,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCMPEZD), 0|OPFL_FlagOutput,
- 0, 3, 0, 1, 2,
- 19,
- OPC_CheckChild0Type, MVT::f32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCMPEZS), 0|OPFL_FlagOutput,
- 0, 3, 0, 1, 2,
- 0,
- 23, ISD::FP_EXTEND,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::f32,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTDS), 0,
- 1, MVT::f64, 3, 0, 1, 2,
- 23, ISD::FP_ROUND,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTSD), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 43, ISD::FSQRT,
- OPC_RecordChild0,
- OPC_SwitchType , 18, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSQRTD), 0,
- 1, MVT::f64, 3, 0, 1, 2,
- 18, MVT::f32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSQRTS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 0,
- 21, ARMISD::VMOVDRR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VMOVDRR), 0,
- 1, MVT::f64, 4, 0, 1, 2, 3,
- 97, ARMISD::SITOF,
- OPC_RecordChild0,
- OPC_SwitchType , 18, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSITOD), 0,
- 1, MVT::f64, 3, 0, 1, 2,
- 72, MVT::f32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSITOS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 50,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2i32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 1, 0, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VCVTs2fd_sfp), 0,
- 1, MVT::f64, 3, 3, 4, 5,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 6, 7,
- 0,
- 0,
- 97, ARMISD::UITOF,
- OPC_RecordChild0,
- OPC_SwitchType , 18, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VUITOD), 0,
- 1, MVT::f64, 3, 0, 1, 2,
- 72, MVT::f32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VUITOS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 50,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2i32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 1, 0, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VCVTu2fd_sfp), 0,
- 1, MVT::f64, 3, 3, 4, 5,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 6, 7,
- 0,
- 0,
- 99, ARMISD::FTOSI,
- OPC_RecordChild0,
- OPC_Scope, 20,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTOSIZD), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 74,
- OPC_CheckChild0Type, MVT::f32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTOSIZS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 50,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 1, 0, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VCVTf2sd_sfp), 0,
- 1, MVT::f64, 3, 3, 4, 5,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 6, 7,
- 0,
- 0,
- 99, ARMISD::FTOUI,
- OPC_RecordChild0,
- OPC_Scope, 20,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTOUIZD), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 74,
- OPC_CheckChild0Type, MVT::f32,
- OPC_Scope, 18,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTOUIZS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 50,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 1, 0, 2,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VCVTf2ud_sfp), 0,
- 1, MVT::f64, 3, 3, 4, 5,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 6, 7,
- 0,
- 0,
- 17, ARMISD::FMSTAT,
- OPC_CaptureFlagInput,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::FMSTAT), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 2, 0, 1,
- 71|128,1, ARMISD::VCEQ,
- OPC_RecordChild0,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCEQv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 22, MVT::v4i16,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCEQv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 48, MVT::v2i32,
- OPC_Scope, 22,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCEQv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCEQfd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 22, MVT::v16i8,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCEQv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 22, MVT::v8i16,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCEQv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 48, MVT::v4i32,
- OPC_Scope, 22,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCEQv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCEQfq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 71|128,1, ARMISD::VCGE,
- OPC_RecordChild0,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 22, MVT::v4i16,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 48, MVT::v2i32,
- OPC_Scope, 22,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEfd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 22, MVT::v16i8,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 22, MVT::v8i16,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 48, MVT::v4i32,
- OPC_Scope, 22,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEfq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 19|128,1, ARMISD::VCGEU,
- OPC_RecordChild0,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 22, MVT::v4i16,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 22, MVT::v2i32,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 22, MVT::v16i8,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 22, MVT::v8i16,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 22, MVT::v4i32,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGEuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 71|128,1, ARMISD::VCGT,
- OPC_RecordChild0,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTsv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 22, MVT::v4i16,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTsv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 48, MVT::v2i32,
- OPC_Scope, 22,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTsv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTfd), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 0,
- 22, MVT::v16i8,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTsv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 22, MVT::v8i16,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTsv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 48, MVT::v4i32,
- OPC_Scope, 22,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTsv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 22,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTfq), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 0,
- 19|128,1, ARMISD::VCGTU,
- OPC_RecordChild0,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTuv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 22, MVT::v4i16,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTuv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 22, MVT::v2i32,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTuv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 22, MVT::v16i8,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTuv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 22, MVT::v8i16,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTuv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 22, MVT::v4i32,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCGTuv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 19|128,1, ARMISD::VTST,
- OPC_RecordChild0,
- OPC_SwitchType , 22, MVT::v8i8,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTSTv8i8), 0,
- 1, MVT::v8i8, 4, 0, 1, 2, 3,
- 22, MVT::v4i16,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTSTv4i16), 0,
- 1, MVT::v4i16, 4, 0, 1, 2, 3,
- 22, MVT::v2i32,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTSTv2i32), 0,
- 1, MVT::v2i32, 4, 0, 1, 2, 3,
- 22, MVT::v16i8,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTSTv16i8), 0,
- 1, MVT::v16i8, 4, 0, 1, 2, 3,
- 22, MVT::v8i16,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTSTv8i16), 0,
- 1, MVT::v8i16, 4, 0, 1, 2, 3,
- 22, MVT::v4i32,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VTSTv4i32), 0,
- 1, MVT::v4i32, 4, 0, 1, 2, 3,
- 0,
- 47, ISD::FP_TO_SINT,
- OPC_RecordChild0,
- OPC_SwitchType , 20, MVT::v2i32,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTf2sd), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTf2sq), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 47, ISD::FP_TO_UINT,
- OPC_RecordChild0,
- OPC_SwitchType , 20, MVT::v2i32,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTf2ud), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 20, MVT::v4i32,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTf2uq), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 0,
- 47, ISD::SINT_TO_FP,
- OPC_RecordChild0,
- OPC_SwitchType , 20, MVT::v2f32,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTs2fd), 0,
- 1, MVT::v2f32, 3, 0, 1, 2,
- 20, MVT::v4f32,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTs2fq), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 47, ISD::UINT_TO_FP,
- OPC_RecordChild0,
- OPC_SwitchType , 20, MVT::v2f32,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTu2fd), 0,
- 1, MVT::v2f32, 3, 0, 1, 2,
- 20, MVT::v4f32,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VCVTu2fq), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 35|128,1, ARMISD::VREV64,
- OPC_RecordChild0,
- OPC_SwitchType , 18, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV64d8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 18, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV64d16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 18, MVT::v2i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV64d32), 0,
- 1, MVT::v2i32, 3, 0, 1, 2,
- 18, MVT::v2f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV64df), 0,
- 1, MVT::v2f32, 3, 0, 1, 2,
- 18, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV64q8), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 18, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV64q16), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 18, MVT::v4i32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV64q32), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 18, MVT::v4f32,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV64qf), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 83, ARMISD::VREV32,
- OPC_RecordChild0,
- OPC_SwitchType , 18, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV32d8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 18, MVT::v4i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV32d16), 0,
- 1, MVT::v4i16, 3, 0, 1, 2,
- 18, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV32q8), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 18, MVT::v8i16,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV32q16), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 0,
- 43, ARMISD::VREV16,
- OPC_RecordChild0,
- OPC_SwitchType , 18, MVT::v8i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV16d8), 0,
- 1, MVT::v8i8, 3, 0, 1, 2,
- 18, MVT::v16i8,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VREV16q8), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 0,
- 67|128,2, ISD::SCALAR_TO_VECTOR,
- OPC_RecordChild0,
- OPC_Scope, 48,
- OPC_CheckChild0Type, MVT::f32,
- OPC_SwitchType , 20, MVT::v2f32,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v2f32, 3, 1, 0, 2,
- 20, MVT::v4f32,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v4f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v4f32, 3, 1, 0, 2,
- 0,
- 24,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckType, MVT::v2f64,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f64, 0,
- OPC_EmitInteger, MVT::i32, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v2f64, 3, 1, 0, 2,
- 116|128,1,
- OPC_CheckChild0Type, MVT::i32,
- OPC_SwitchType , 28, MVT::v8i8,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v8i8, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSETLNi8), 0,
- 1, MVT::v8i8, 5, 1, 0, 2, 3, 4,
- 28, MVT::v4i16,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v4i16, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSETLNi16), 0,
- 1, MVT::v4i16, 5, 1, 0, 2, 3, 4,
- 28, MVT::v2i32,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2i32, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(ARM::VSETLNi32), 0,
- 1, MVT::v2i32, 5, 1, 0, 2, 3, 4,
- 48, MVT::v16i8,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v16i8, 0,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v8i8, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VSETLNi8), 0,
- 1, MVT::f64, 5, 2, 0, 3, 4, 5,
- OPC_EmitInteger, MVT::i32, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v16i8, 3, 1, 6, 7,
- 48, MVT::v8i16,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v8i16, 0,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v4i16, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VSETLNi16), 0,
- 1, MVT::f64, 5, 2, 0, 3, 4, 5,
- OPC_EmitInteger, MVT::i32, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v8i16, 3, 1, 6, 7,
- 48, MVT::v4i32,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v4i32, 0,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2i32, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VSETLNi32), 0,
- 1, MVT::f64, 5, 2, 0, 3, 4, 5,
- OPC_EmitInteger, MVT::i32, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v4i32, 3, 1, 6, 7,
- 0,
- 0,
- 73, ARMISD::FMAX,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 2, 0, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 5, 1, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VMAXfd_sfp), 0,
- 1, MVT::f64, 4, 4, 7, 8, 9,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 10, 11,
- 73, ARMISD::FMIN,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 2, 0, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::f64, 3, 5, 1, 6,
- OPC_EmitInteger, MVT::i32, 14,
- OPC_EmitRegister, MVT::i32, 0 ,
- OPC_EmitNode, TARGET_OPCODE(ARM::VMINfd_sfp), 0,
- 1, MVT::f64, 4, 4, 7, 8, 9,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 10, 11,
- 0,
- 0
- }; // Total Array size is 62431 bytes
-
- #undef TARGET_OPCODE
- return SelectCodeCommon(N, MatcherTable,sizeof(MatcherTable));
-}
-
-bool CheckPatternPredicate(unsigned PredNo) const {
- switch (PredNo) {
- default: assert(0 && "Invalid predicate in table?");
- case 0: return (!Subtarget->isThumb()) && (Subtarget->hasV6Ops());
- case 1: return (Subtarget->isThumb1Only()) && (Subtarget->hasV6Ops());
- case 2: return (Subtarget->isThumb2());
- case 3: return (Subtarget->hasNEON());
- case 4: return (!Subtarget->isThumb()) && (Subtarget->hasV6T2Ops());
- case 5: return (!Subtarget->isThumb());
- case 6: return (Subtarget->isThumb1Only());
- case 7: return (!Subtarget->isThumb()) && (Subtarget->hasV5TEOps());
- case 8: return (Subtarget->hasVFP2());
- case 9: return (Subtarget->isThumb());
- case 10: return (Subtarget->hasVFP2()) && (!Subtarget->useNEONForSinglePrecisionFP());
- case 11: return (Subtarget->hasNEON()) && (Subtarget->useNEONForSinglePrecisionFP());
- case 12: return (!Subtarget->useNEONForSinglePrecisionFP());
- case 13: return (!HonorSignDependentRoundingFPMath());
- case 14: return (!Subtarget->isThumb()) && (!Subtarget->isTargetDarwin());
- case 15: return (!Subtarget->isThumb()) && (Subtarget->isTargetDarwin());
- case 16: return (Subtarget->isThumb()) && (Subtarget->hasV5TOps()) && (!Subtarget->isTargetDarwin());
- case 17: return (Subtarget->isThumb()) && (Subtarget->hasV5TOps()) && (Subtarget->isTargetDarwin());
- case 18: return (!Subtarget->isThumb()) && (Subtarget->hasV5TOps()) && (!Subtarget->isTargetDarwin());
- case 19: return (!Subtarget->isThumb()) && (Subtarget->hasV5TOps()) && (Subtarget->isTargetDarwin());
- case 20: return (!Subtarget->isThumb()) && (!Subtarget->useMovt());
- case 21: return (!Subtarget->isThumb()) && (Subtarget->useMovt());
- case 22: return (Subtarget->isThumb2()) && (!Subtarget->useMovt());
- case 23: return (Subtarget->isThumb2()) && (Subtarget->useMovt());
- case 24: return (Subtarget->isThumb()) && (!Subtarget->isTargetDarwin());
- case 25: return (Subtarget->isThumb()) && (Subtarget->isTargetDarwin());
- case 26: return (!Subtarget->isThumb()) && (!Subtarget->hasV6T2Ops());
- case 27: return (Subtarget->hasVFP3());
- case 28: return (!Subtarget->isThumb()) && (Subtarget->hasV4TOps());
- case 29: return (!Subtarget->isThumb()) && (!Subtarget->hasV4TOps());
- case 30: return (!Subtarget->isThumb()) && (Subtarget->hasV4TOps()) && (!Subtarget->isTargetDarwin());
- case 31: return (!Subtarget->isThumb()) && (!Subtarget->hasV4TOps()) && (!Subtarget->isTargetDarwin());
- case 32: return (!Subtarget->isThumb()) && (Subtarget->hasV4TOps()) && (Subtarget->isTargetDarwin());
- case 33: return (!Subtarget->isThumb()) && (!Subtarget->hasV4TOps()) && (Subtarget->isTargetDarwin());
- case 34: return (Subtarget->isThumb1Only()) && (!Subtarget->isTargetDarwin());
- case 35: return (Subtarget->isThumb1Only()) && (Subtarget->isTargetDarwin());
- case 36: return (!Subtarget->isThumb()) && (Subtarget->hasV5TOps());
- case 37: return (!Subtarget->isThumb()) && (Subtarget->hasV7Ops());
- }
-}
-
-bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
- switch (PredNo) {
- default: assert(0 && "Invalid predicate in table?");
- case 0: { // Predicate_imm16_31
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (int32_t)N->getZExtValue() >= 16 && (int32_t)N->getZExtValue() < 32;
-
- }
- case 1: { // Predicate_imm1_15
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (int32_t)N->getZExtValue() >= 1 && (int32_t)N->getZExtValue() < 16;
-
- }
- case 2: { // Predicate_immAllOnesV_bc
- SDNode *N = Node;
-
- return ISD::isBuildVectorAllOnes(N);
-
- }
- case 3: { // Predicate_lo16AllZero
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- // Returns true if all low 16-bits are 0.
- return (((uint32_t)N->getZExtValue()) & 0xFFFFUL) == 0;
-
- }
- case 4: { // Predicate_t2_so_imm
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::getT2SOImmVal((uint32_t)N->getZExtValue()) != -1;
-
- }
- case 5: { // Predicate_so_imm
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::getSOImmVal(N->getZExtValue()) != -1;
-
- }
- case 6: { // Predicate_t2_so_imm_not
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::getT2SOImmVal(~((uint32_t)N->getZExtValue())) != -1;
-
- }
- case 7: { // Predicate_so_imm2part
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
-
- }
- case 8: { // Predicate_t2_so_imm2part
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::isT2SOImmTwoPartVal((unsigned)N->getZExtValue());
-
- }
- case 9: { // Predicate_rot_imm
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- int32_t v = (int32_t)N->getZExtValue();
- return v == 8 || v == 16 || v == 24;
-
- }
- case 10: { // Predicate_sext_16_node
- SDNode *N = Node;
-
- return CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17;
-
- }
- case 11: { // Predicate_imm0_255_neg
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (uint32_t)(-N->getZExtValue()) < 255;
-
- }
- case 12: { // Predicate_so_imm_neg
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::getSOImmVal(-(int)N->getZExtValue()) != -1;
-
- }
- case 13: { // Predicate_imm0_7
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (uint32_t)N->getZExtValue() < 8;
-
- }
- case 14: { // Predicate_imm8_255
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (uint32_t)N->getZExtValue() >= 8 && (uint32_t)N->getZExtValue() < 256;
-
- }
- case 15: { // Predicate_imm0_7_neg
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (uint32_t)-N->getZExtValue() < 8;
-
- }
- case 16: { // Predicate_imm8_255_neg
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- unsigned Val = -N->getZExtValue();
- return Val >= 8 && Val < 256;
-
- }
- case 17: { // Predicate_imm0_4095
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (uint32_t)N->getZExtValue() < 4096;
-
- }
- case 18: { // Predicate_t2_so_imm_neg
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::getT2SOImmVal(-((int)N->getZExtValue())) != -1;
-
- }
- case 19: { // Predicate_imm0_4095_neg
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (uint32_t)(-N->getZExtValue()) < 4096;
-
- }
- case 20: { // Predicate_so_neg_imm2part
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::isSOImmTwoPartVal(-(int)N->getZExtValue());
-
- }
- case 21: { // Predicate_t2_so_neg_imm2part
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::isT2SOImmTwoPartVal(-(int)N->getZExtValue());
-
- }
- case 22: { // Predicate_bf_inv_mask_imm
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- uint32_t v = (uint32_t)N->getZExtValue();
- if (v == 0xffffffff)
- return 0;
- // there can be 1's on either or both "outsides", all the "inside"
- // bits must be 0's
- unsigned int lsb = 0, msb = 31;
- while (v & (1 << msb)) --msb;
- while (v & (1 << lsb)) ++lsb;
- for (unsigned int i = lsb; i <= msb; ++i) {
- if (v & (1 << i))
- return 0;
- }
- return 1;
-
- }
- case 23: { // Predicate_so_imm_not
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::getSOImmVal(~(int)N->getZExtValue()) != -1;
-
- }
- case 24: { // Predicate_imm1_31
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (int32_t)N->getZExtValue() >= 1 && (int32_t)N->getZExtValue() < 32;
-
- }
- case 25: { // Predicate_unindexedload
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
-
- }
- case 26: { // Predicate_load
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
-
- }
- case 27: { // Predicate_zextload
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
-
- }
- case 28: { // Predicate_zextloadi16
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 29: { // Predicate_zextloadi8
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 30: { // Predicate_sextload
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
-
- }
- case 31: { // Predicate_sextloadi16
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 32: { // Predicate_sextloadi8
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 33: { // Predicate_zextloadi1
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
- }
- case 34: { // Predicate_extload
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
-
- }
- case 35: { // Predicate_extloadi1
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
- }
- case 36: { // Predicate_extloadi8
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 37: { // Predicate_extloadi16
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 38: { // Predicate_unindexedstore
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
-
- }
- case 39: { // Predicate_store
- SDNode *N = Node;
-
- return !cast<StoreSDNode>(N)->isTruncatingStore();
-
- }
- case 40: { // Predicate_truncstore
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->isTruncatingStore();
-
- }
- case 41: { // Predicate_truncstorei16
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 42: { // Predicate_truncstorei8
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 43: { // Predicate_istore
- SDNode *N = Node;
-
- return !cast<StoreSDNode>(N)->isTruncatingStore();
-
- }
- case 44: { // Predicate_pre_store
- SDNode *N = Node;
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
-
- }
- case 45: { // Predicate_post_store
- SDNode *N = Node;
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::POST_INC || AM == ISD::POST_DEC;
-
- }
- case 46: { // Predicate_itruncstore
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->isTruncatingStore();
-
- }
- case 47: { // Predicate_pre_truncst
- SDNode *N = Node;
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
-
- }
- case 48: { // Predicate_pre_truncsti16
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 49: { // Predicate_post_truncst
- SDNode *N = Node;
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::POST_INC || AM == ISD::POST_DEC;
-
- }
- case 50: { // Predicate_post_truncsti16
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 51: { // Predicate_pre_truncsti8
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 52: { // Predicate_post_truncsti8
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 53: { // Predicate_imm0_255
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (uint32_t)N->getZExtValue() < 256;
-
- }
- case 54: { // Predicate_immAllOnesV
- SDNode *N = Node;
-
- return ISD::isBuildVectorAllOnes(N);
-
- }
- case 55: { // Predicate_adde_dead_carry
- SDNode *N = Node;
-return !N->hasAnyUseOfValue(1);
- }
- case 56: { // Predicate_adde_live_carry
- SDNode *N = Node;
-return N->hasAnyUseOfValue(1);
- }
- case 57: { // Predicate_sube_dead_carry
- SDNode *N = Node;
-return !N->hasAnyUseOfValue(1);
- }
- case 58: { // Predicate_sube_live_carry
- SDNode *N = Node;
-return N->hasAnyUseOfValue(1);
- }
- case 59: { // Predicate_immAllZerosV
- SDNode *N = Node;
-
- return ISD::isBuildVectorAllZeros(N);
-
- }
- case 60: { // Predicate_immAllZerosV_bc
- SDNode *N = Node;
-
- return ISD::isBuildVectorAllZeros(N);
-
- }
- case 61: { // Predicate_imm0_65535
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return (uint32_t)N->getZExtValue() < 65536;
-
- }
- case 62: { // Predicate_thumb_immshifted
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ARM_AM::isThumbImmShiftedVal((unsigned)N->getZExtValue());
-
- }
- case 63: { // Predicate_imm0_255_comp
- ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return ~((uint32_t)N->getZExtValue()) < 256;
-
- }
- case 64: { // Predicate_vfp_f64imm
- ConstantFPSDNode*N = cast<ConstantFPSDNode>(Node);
-
- return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
-
- }
- case 65: { // Predicate_vfp_f32imm
- ConstantFPSDNode*N = cast<ConstantFPSDNode>(Node);
-
- return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
-
- }
- case 66: { // Predicate_vmovImm8
- SDNode *N = Node;
-
- return ARM::getVMOVImm(N, 1, *CurDAG).getNode() != 0;
-
- }
- case 67: { // Predicate_vmovImm16
- SDNode *N = Node;
-
- return ARM::getVMOVImm(N, 2, *CurDAG).getNode() != 0;
-
- }
- case 68: { // Predicate_vmovImm32
- SDNode *N = Node;
-
- return ARM::getVMOVImm(N, 4, *CurDAG).getNode() != 0;
-
- }
- case 69: { // Predicate_vmovImm64
- SDNode *N = Node;
-
- return ARM::getVMOVImm(N, 8, *CurDAG).getNode() != 0;
-
- }
- case 70: { // Predicate_atomic_load_add_8
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 71: { // Predicate_atomic_load_add_16
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 72: { // Predicate_atomic_load_add_32
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
- }
- case 73: { // Predicate_atomic_load_sub_8
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 74: { // Predicate_atomic_load_sub_16
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 75: { // Predicate_atomic_load_sub_32
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
- }
- case 76: { // Predicate_atomic_load_and_8
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 77: { // Predicate_atomic_load_and_16
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 78: { // Predicate_atomic_load_and_32
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
- }
- case 79: { // Predicate_atomic_load_or_8
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 80: { // Predicate_atomic_load_or_16
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 81: { // Predicate_atomic_load_or_32
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
- }
- case 82: { // Predicate_atomic_load_xor_8
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 83: { // Predicate_atomic_load_xor_16
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 84: { // Predicate_atomic_load_xor_32
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
- }
- case 85: { // Predicate_atomic_load_nand_8
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 86: { // Predicate_atomic_load_nand_16
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 87: { // Predicate_atomic_load_nand_32
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
- }
- case 88: { // Predicate_atomic_swap_8
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 89: { // Predicate_atomic_swap_16
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 90: { // Predicate_atomic_swap_32
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
- }
- case 91: { // Predicate_atomic_cmp_swap_8
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
- }
- case 92: { // Predicate_atomic_cmp_swap_16
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 93: { // Predicate_atomic_cmp_swap_32
- SDNode *N = Node;
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
- }
- }
-}
-
-bool CheckComplexPattern(SDNode *Root, SDValue N,
- unsigned PatternNo, SmallVectorImpl<SDValue> &Result) {
- switch (PatternNo) {
- default: assert(0 && "Invalid pattern # in table?");
- case 0:
- Result.resize(Result.size()+2);
- return SelectT2ShifterOperandReg(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 1:
- Result.resize(Result.size()+3);
- return SelectShifterOperandReg(Root, N, Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
- case 2:
- Result.resize(Result.size()+3);
- return SelectAddrMode2(Root, N, Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
- case 3:
- Result.resize(Result.size()+2);
- return SelectAddrModePC(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 4:
- Result.resize(Result.size()+2);
- return SelectThumbAddrModeRR(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 5:
- Result.resize(Result.size()+3);
- return SelectAddrMode3(Root, N, Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
- case 6:
- Result.resize(Result.size()+3);
- return SelectThumbAddrModeS4(Root, N, Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
- case 7:
- Result.resize(Result.size()+3);
- return SelectThumbAddrModeS1(Root, N, Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
- case 8:
- Result.resize(Result.size()+3);
- return SelectThumbAddrModeS2(Root, N, Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
- case 9:
- Result.resize(Result.size()+3);
- return SelectT2AddrModeSoReg(Root, N, Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
- case 10:
- Result.resize(Result.size()+2);
- return SelectThumbAddrModeSP(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 11:
- Result.resize(Result.size()+2);
- return SelectT2AddrModeImm12(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 12:
- Result.resize(Result.size()+2);
- return SelectT2AddrModeImm8(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 13:
- Result.resize(Result.size()+2);
- return SelectAddrMode5(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 14:
- Result.resize(Result.size()+2);
- return SelectAddrMode4(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 15:
- Result.resize(Result.size()+2);
- return SelectAddrMode2Offset(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 16:
- Result.resize(Result.size()+2);
- return SelectAddrMode3Offset(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
- case 17:
- Result.resize(Result.size()+1);
- return SelectT2AddrModeImm8Offset(Root, N, Result[Result.size()-1]);
- case 18:
- Result.resize(Result.size()+4);
- return SelectAddrMode6(Root, N, Result[Result.size()-4], Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
- }
-}
-
-SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
- switch (XFormNo) {
- default: assert(0 && "Invalid xform # in table?");
- case 0: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, MVT::i32);
-
- }
- case 1: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
-
- }
- case 2: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getSOImmTwoPartFirst((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 3: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getSOImmTwoPartSecond((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 4: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getT2SOImmTwoPartFirst((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 5: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getT2SOImmTwoPartSecond((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 6: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(5 + N->getZExtValue() / 4, MVT::i32);
-
- }
- case 7: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
-
- }
- case 8: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(5 + N->getZExtValue() / 2, MVT::i32);
-
- }
- case 9: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
-
- }
- case 10: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
-
- }
- case 11: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
-
- }
- case 12: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(-((int)N->getZExtValue()), MVT::i32);
-
- }
- case 13: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getSOImmTwoPartFirst(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 14: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getSOImmTwoPartSecond(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 15: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getT2SOImmTwoPartFirst(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 16: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getT2SOImmTwoPartSecond(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 17: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
-
- }
- case 18: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(5 + N->getZExtValue() / 8, MVT::i32);
-
- }
- case 19: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
-
- }
- case 20: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(5 + N->getZExtValue(), MVT::i32);
-
- }
- case 21: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(1 + N->getZExtValue(), MVT::i32);
-
- }
- case 22: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(5 + (1 - N->getZExtValue()), MVT::i32);
-
- }
- case 23: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getThumbImmNonShiftedVal((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 24: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- unsigned V = ARM_AM::getThumbImmValShift((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-
- }
- case 25: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
-
- return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
-
- }
- case 26: {
- SDNode *N = V.getNode();
-
- return ARM::getVMOVImm(N, 1, *CurDAG);
-
- }
- case 27: {
- SDNode *N = V.getNode();
-
- return ARM::getVMOVImm(N, 2, *CurDAG);
-
- }
- case 28: {
- SDNode *N = V.getNode();
-
- return ARM::getVMOVImm(N, 4, *CurDAG);
-
- }
- case 29: {
- SDNode *N = V.getNode();
-
- return ARM::getVMOVImm(N, 8, *CurDAG);
-
- }
- }
-}
-
diff --git a/libclamav/c++/ARMGenInstrInfo.inc b/libclamav/c++/ARMGenInstrInfo.inc
deleted file mode 100644
index 8348acf..0000000
--- a/libclamav/c++/ARMGenInstrInfo.inc
+++ /dev/null
@@ -1,2154 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// Target Instruction Descriptors
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-namespace llvm {
-
-static const unsigned ImplicitList1[] = { ARM::CPSR, 0 };
-static const TargetRegisterClass* Barriers1[] = { &ARM::CCRRegClass, NULL };
-static const unsigned ImplicitList2[] = { ARM::SP, 0 };
-static const TargetRegisterClass* Barriers2[] = { &ARM::CCRRegClass, &ARM::DPR_8RegClass, NULL };
-static const unsigned ImplicitList3[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R12, ARM::LR, ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7, ARM::D16, ARM::D17, ARM::D18, ARM::D19, ARM::D20, ARM::D21, ARM::D22, ARM::D23, ARM::D24, ARM::D25, ARM::D26, ARM::D27, ARM::D28, ARM::D29, ARM::D30, ARM::D31, ARM::CPSR, ARM::FPSCR, 0 };
-static const unsigned ImplicitList4[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R9, ARM::R12, ARM::LR, ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7, ARM::D16, ARM::D17, ARM::D18, ARM::D19, ARM::D20, ARM::D21, ARM::D22, ARM::D23, ARM::D24, ARM::D25, ARM::D26, ARM::D27, ARM::D28, ARM::D29, ARM::D30, ARM::D31, ARM::CPSR, ARM::FPSCR, 0 };
-static const unsigned ImplicitList5[] = { ARM::FPSCR, 0 };
-static const TargetRegisterClass* Barriers3[] = { &ARM::DPRRegClass, &ARM::DPR_8RegClass, &ARM::DPR_VFP2RegClass, &ARM::tGPRRegClass, NULL };
-static const unsigned ImplicitList6[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R4, ARM::R5, ARM::R6, ARM::R7, ARM::R8, ARM::R9, ARM::R10, ARM::R11, ARM::R12, ARM::LR, ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7, ARM::D8, ARM::D9, ARM::D10, ARM::D11, ARM::D12, ARM::D13, ARM::D14, ARM::D15, ARM::D16, ARM::D17, ARM::D18, ARM::D19, ARM::D20, ARM::D21, ARM::D22, ARM::D23, ARM::D24, ARM::D25, ARM::D26, ARM::D27, ARM::D28, ARM::D29, ARM::D30, ARM::D31, 0 };
-static const unsigned ImplicitList7[] = { ARM::R0, ARM::R12, ARM::LR, ARM::CPSR, 0 };
-static const unsigned ImplicitList8[] = { ARM::LR, 0 };
-static const TargetRegisterClass* Barriers4[] = { &ARM::tGPRRegClass, NULL };
-static const unsigned ImplicitList9[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R4, ARM::R5, ARM::R6, ARM::R7, ARM::R12, 0 };
-static const unsigned ImplicitList10[] = { ARM::R0, ARM::LR, 0 };
-
-static const TargetOperandInfo OperandInfo2[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo3[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo4[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo5[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, };
-static const TargetOperandInfo OperandInfo6[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, };
-static const TargetOperandInfo OperandInfo7[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, };
-static const TargetOperandInfo OperandInfo8[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo9[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo10[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo11[] = { { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo12[] = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo13[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo14[] = { { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo15[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo16[] = { { ARM::GPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo17[] = { { ARM::tGPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo18[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo19[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo20[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo21[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo22[] = { { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo23[] = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo24[] = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo25[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo26[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo27[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo28[] = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo29[] = { { ARM::DPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo30[] = { { ARM::SPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo31[] = { { 0, 0, 0 }, { 0, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo32[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo33[] = { { 0, 0, 0 }, { 0, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo34[] = { { 0, 0, 0 }, { 0, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo35[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo36[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((1 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo37[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((2 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo38[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo39[] = { { 0, 0, 0 }, { 0, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo40[] = { { 0, 0, 0 }, { 0, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo41[] = { { 0, 0, 0 }, { 0, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo42[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, };
-static const TargetOperandInfo OperandInfo43[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo44[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo45[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo46[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, };
-static const TargetOperandInfo OperandInfo47[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, };
-static const TargetOperandInfo OperandInfo48[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, };
-static const TargetOperandInfo OperandInfo49[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo50[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo51[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo52[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo53[] = { { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo54[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo55[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo56[] = { { ARM::GPRRegClassID, 0, (1 << TOI::EARLY_CLOBBER) }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo57[] = { { ARM::GPRRegClassID, 0, (1 << TOI::EARLY_CLOBBER) }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo58[] = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo59[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo60[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo61[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo62[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo63[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo64[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo65[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo66[] = { { ARM::SPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo67[] = { { ARM::DPR_VFP2RegClassID, 0, 0 }, { ARM::DPR_VFP2RegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo68[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo69[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo70[] = { { ARM::SPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo71[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo72[] = { { ARM::DPR_VFP2RegClassID, 0, 0 }, { ARM::DPR_VFP2RegClassID, 0, 0 }, { ARM::DPR_VFP2RegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo73[] = { { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo74[] = { { ARM::SPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo75[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo76[] = { { ARM::SPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo77[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo78[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo79[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo80[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo81[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo82[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo83[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo84[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo85[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo86[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo87[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo88[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo89[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo90[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, ((1 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo91[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo92[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, ((1 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, ((2 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo93[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((3 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo94[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, ((1 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, ((2 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, ((3 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo95[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((4 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo96[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo97[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo98[] = { { ARM::SPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo99[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPR_VFP2RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo100[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPR_8RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo101[] = { { ARM::SPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::SPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo102[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPR_VFP2RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo103[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::QPRRegClassID, 0, 0 }, { ARM::DPR_VFP2RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo104[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPR_8RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo105[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::QPRRegClassID, 0, 0 }, { ARM::DPR_8RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo106[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo107[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo108[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo109[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo110[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo111[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo112[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo113[] = { { ARM::SPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo114[] = { { ARM::SPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo115[] = { { ARM::SPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::SPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo116[] = { { ARM::QPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo117[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPR_VFP2RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo118[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPR_8RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo119[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPR_VFP2RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo120[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { ARM::DPR_VFP2RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo121[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPR_8RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo122[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { ARM::DPR_8RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo123[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo124[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo125[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo126[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo127[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo128[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo129[] = { { ARM::SPRRegClassID, 0, 0 }, { ARM::SPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo130[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo131[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo132[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo133[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo134[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo135[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo136[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo137[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo138[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo139[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo140[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo141[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo142[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo143[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo144[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo145[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo146[] = { { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, 0 }, { ARM::DPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::DPRRegClassID, 0, ((1 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo147[] = { { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, 0 }, { ARM::QPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::QPRRegClassID, 0, ((1 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo148[] = { { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo149[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo150[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((1 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo151[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo152[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, };
-static const TargetOperandInfo OperandInfo153[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, };
-static const TargetOperandInfo OperandInfo154[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo155[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo156[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, { ARM::tGPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::tGPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo157[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, { ARM::tGPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo158[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, { ARM::tGPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo159[] = { { ARM::tGPRRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo160[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { ARM::GPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo161[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo162[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, { ARM::tGPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo163[] = { { ARM::GPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo164[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo165[] = { { ARM::tGPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo166[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo167[] = { { ARM::tGPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo168[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo169[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, { 0, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo170[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo171[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo172[] = { { ARM::tGPRRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo173[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::tGPRRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo174[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::GPRRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo175[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo176[] = { { ARM::tGPRRegClassID, 0, 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }, { ARM::tGPRRegClassID, 0, 0 }, { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo177[] = { { 0, 0|(1<<TOI::Predicate), 0 }, { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }, { 0, 0, 0 }, };
-
-static const TargetInstrDesc ARMInsts[] = {
- { 0, 0, 0, 128, "PHI", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, 0 }, // Inst #0 = PHI
- { 1, 0, 0, 128, "INLINEASM", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, 0 }, // Inst #1 = INLINEASM
- { 2, 1, 0, 128, "DBG_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo14 }, // Inst #2 = DBG_LABEL
- { 3, 1, 0, 128, "EH_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo14 }, // Inst #3 = EH_LABEL
- { 4, 1, 0, 128, "GC_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo14 }, // Inst #4 = GC_LABEL
- { 5, 0, 0, 128, "KILL", 0|(1<<TID::Variadic), 0, NULL, NULL, NULL, 0 }, // Inst #5 = KILL
- { 6, 3, 1, 128, "EXTRACT_SUBREG", 0, 0, NULL, NULL, NULL, OperandInfo28 }, // Inst #6 = EXTRACT_SUBREG
- { 7, 4, 1, 128, "INSERT_SUBREG", 0, 0, NULL, NULL, NULL, OperandInfo31 }, // Inst #7 = INSERT_SUBREG
- { 8, 1, 1, 128, "IMPLICIT_DEF", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0, NULL, NULL, NULL, OperandInfo14 }, // Inst #8 = IMPLICIT_DEF
- { 9, 4, 1, 128, "SUBREG_TO_REG", 0, 0, NULL, NULL, NULL, OperandInfo58 }, // Inst #9 = SUBREG_TO_REG
- { 10, 3, 1, 128, "COPY_TO_REGCLASS", 0|(1<<TID::CheapAsAMove), 0, NULL, NULL, NULL, OperandInfo28 }, // Inst #10 = COPY_TO_REGCLASS
- { 11, 0, 0, 128, "DBG_VALUE", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::CheapAsAMove), 0, NULL, NULL, NULL, 0 }, // Inst #11 = DBG_VALUE
- { 12, 3, 1, 88, "ADCSSri", 0, 0|1|(3<<4)|(4<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #12 = ADCSSri
- { 13, 3, 1, 89, "ADCSSrr", 0, 0|1|(3<<4)|(4<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo3 }, // Inst #13 = ADCSSrr
- { 14, 5, 1, 91, "ADCSSrs", 0, 0|1|(3<<4)|(5<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo4 }, // Inst #14 = ADCSSrs
- { 15, 6, 1, 88, "ADCri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #15 = ADCri
- { 16, 6, 1, 89, "ADCrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), ImplicitList1, NULL, NULL, OperandInfo6 }, // Inst #16 = ADCrr
- { 17, 8, 1, 91, "ADCrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), ImplicitList1, NULL, NULL, OperandInfo7 }, // Inst #17 = ADCrs
- { 18, 5, 1, 88, "ADDSri", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #18 = ADDSri
- { 19, 5, 1, 89, "ADDSrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #19 = ADDSrr
- { 20, 7, 1, 91, "ADDSrs", 0|(1<<TID::Predicable), 0|1|(3<<4)|(5<<9), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #20 = ADDSrs
- { 21, 6, 1, 88, "ADDri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #21 = ADDri
- { 22, 6, 1, 89, "ADDrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #22 = ADDrr
- { 23, 8, 1, 91, "ADDrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), NULL, NULL, NULL, OperandInfo7 }, // Inst #23 = ADDrs
- { 24, 3, 0, 128, "ADJCALLSTACKDOWN", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(1<<4), ImplicitList2, ImplicitList2, NULL, OperandInfo11 }, // Inst #24 = ADJCALLSTACKDOWN
- { 25, 4, 0, 128, "ADJCALLSTACKUP", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(1<<4), ImplicitList2, ImplicitList2, NULL, OperandInfo12 }, // Inst #25 = ADJCALLSTACKUP
- { 26, 6, 1, 88, "ANDri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #26 = ANDri
- { 27, 6, 1, 89, "ANDrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #27 = ANDrr
- { 28, 8, 1, 91, "ANDrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), NULL, NULL, NULL, OperandInfo7 }, // Inst #28 = ANDrs
- { 29, 4, 1, 128, "ATOMIC_CMP_SWAP_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #29 = ATOMIC_CMP_SWAP_I16
- { 30, 4, 1, 128, "ATOMIC_CMP_SWAP_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #30 = ATOMIC_CMP_SWAP_I32
- { 31, 4, 1, 128, "ATOMIC_CMP_SWAP_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #31 = ATOMIC_CMP_SWAP_I8
- { 32, 3, 1, 128, "ATOMIC_LOAD_ADD_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #32 = ATOMIC_LOAD_ADD_I16
- { 33, 3, 1, 128, "ATOMIC_LOAD_ADD_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #33 = ATOMIC_LOAD_ADD_I32
- { 34, 3, 1, 128, "ATOMIC_LOAD_ADD_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #34 = ATOMIC_LOAD_ADD_I8
- { 35, 3, 1, 128, "ATOMIC_LOAD_AND_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #35 = ATOMIC_LOAD_AND_I16
- { 36, 3, 1, 128, "ATOMIC_LOAD_AND_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #36 = ATOMIC_LOAD_AND_I32
- { 37, 3, 1, 128, "ATOMIC_LOAD_AND_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #37 = ATOMIC_LOAD_AND_I8
- { 38, 3, 1, 128, "ATOMIC_LOAD_NAND_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #38 = ATOMIC_LOAD_NAND_I16
- { 39, 3, 1, 128, "ATOMIC_LOAD_NAND_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #39 = ATOMIC_LOAD_NAND_I32
- { 40, 3, 1, 128, "ATOMIC_LOAD_NAND_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #40 = ATOMIC_LOAD_NAND_I8
- { 41, 3, 1, 128, "ATOMIC_LOAD_OR_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #41 = ATOMIC_LOAD_OR_I16
- { 42, 3, 1, 128, "ATOMIC_LOAD_OR_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #42 = ATOMIC_LOAD_OR_I32
- { 43, 3, 1, 128, "ATOMIC_LOAD_OR_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #43 = ATOMIC_LOAD_OR_I8
- { 44, 3, 1, 128, "ATOMIC_LOAD_SUB_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #44 = ATOMIC_LOAD_SUB_I16
- { 45, 3, 1, 128, "ATOMIC_LOAD_SUB_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #45 = ATOMIC_LOAD_SUB_I32
- { 46, 3, 1, 128, "ATOMIC_LOAD_SUB_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #46 = ATOMIC_LOAD_SUB_I8
- { 47, 3, 1, 128, "ATOMIC_LOAD_XOR_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #47 = ATOMIC_LOAD_XOR_I16
- { 48, 3, 1, 128, "ATOMIC_LOAD_XOR_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #48 = ATOMIC_LOAD_XOR_I32
- { 49, 3, 1, 128, "ATOMIC_LOAD_XOR_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #49 = ATOMIC_LOAD_XOR_I8
- { 50, 3, 1, 128, "ATOMIC_SWAP_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #50 = ATOMIC_SWAP_I16
- { 51, 3, 1, 128, "ATOMIC_SWAP_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #51 = ATOMIC_SWAP_I32
- { 52, 3, 1, 128, "ATOMIC_SWAP_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0|(1<<4), ImplicitList1, NULL, NULL, OperandInfo3 }, // Inst #52 = ATOMIC_SWAP_I8
- { 53, 1, 0, 0, "B", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Predicable)|(1<<TID::Terminator), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo14 }, // Inst #53 = B
- { 54, 5, 1, 126, "BFC", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo15 }, // Inst #54 = BFC
- { 55, 5, 1, 126, "BFI", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #55 = BFI
- { 56, 6, 1, 88, "BICri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #56 = BICri
- { 57, 6, 1, 89, "BICrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #57 = BICrr
- { 58, 8, 1, 91, "BICrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), NULL, NULL, NULL, OperandInfo7 }, // Inst #58 = BICrs
- { 59, 3, 0, 128, "BKPT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #59 = BKPT
- { 60, 1, 0, 0, "BL", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(2<<9), NULL, ImplicitList3, Barriers2, OperandInfo14 }, // Inst #60 = BL
- { 61, 1, 0, 0, "BLX", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(3<<9), NULL, ImplicitList3, Barriers2, OperandInfo16 }, // Inst #61 = BLX
- { 62, 1, 0, 0, "BLXr9", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(3<<9), NULL, ImplicitList4, Barriers2, OperandInfo16 }, // Inst #62 = BLXr9
- { 63, 3, 0, 0, "BL_pred", 0|(1<<TID::Call)|(1<<TID::Predicable)|(1<<TID::Variadic), 0|(3<<4)|(2<<9), NULL, ImplicitList3, Barriers2, OperandInfo11 }, // Inst #63 = BL_pred
- { 64, 1, 0, 0, "BLr9", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(2<<9), NULL, ImplicitList4, Barriers2, OperandInfo14 }, // Inst #64 = BLr9
- { 65, 3, 0, 0, "BLr9_pred", 0|(1<<TID::Call)|(1<<TID::Predicable)|(1<<TID::Variadic), 0|(3<<4)|(2<<9), NULL, ImplicitList4, Barriers2, OperandInfo11 }, // Inst #65 = BLr9_pred
- { 66, 1, 0, 0, "BMOVPCRX", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(2<<4)|(3<<9), NULL, ImplicitList3, Barriers2, OperandInfo17 }, // Inst #66 = BMOVPCRX
- { 67, 1, 0, 0, "BMOVPCRXr9", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(2<<4)|(3<<9), NULL, ImplicitList4, Barriers2, OperandInfo17 }, // Inst #67 = BMOVPCRXr9
- { 68, 1, 0, 0, "BRIND", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|(3<<4)|(3<<9), NULL, NULL, NULL, OperandInfo16 }, // Inst #68 = BRIND
- { 69, 4, 0, 0, "BR_JTadd", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::NotDuplicable), 0|(1<<4)|(3<<9), NULL, NULL, NULL, OperandInfo18 }, // Inst #69 = BR_JTadd
- { 70, 5, 0, 0, "BR_JTm", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::MayLoad)|(1<<TID::Terminator)|(1<<TID::NotDuplicable), 0|(1<<4)|(3<<9), NULL, NULL, NULL, OperandInfo19 }, // Inst #70 = BR_JTm
- { 71, 3, 0, 0, "BR_JTr", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::NotDuplicable), 0|(1<<4)|(3<<9), NULL, NULL, NULL, OperandInfo20 }, // Inst #71 = BR_JTr
- { 72, 1, 0, 0, "BX", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(2<<4)|(3<<9), NULL, ImplicitList3, Barriers2, OperandInfo17 }, // Inst #72 = BX
- { 73, 3, 0, 128, "BXJ", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #73 = BXJ
- { 74, 2, 0, 0, "BX_RET", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Predicable)|(1<<TID::Terminator), 0|(3<<4)|(3<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #74 = BX_RET
- { 75, 1, 0, 0, "BXr9", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(2<<4)|(3<<9), NULL, ImplicitList4, Barriers2, OperandInfo17 }, // Inst #75 = BXr9
- { 76, 3, 0, 0, "Bcc", 0|(1<<TID::Branch)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #76 = Bcc
- { 77, 8, 0, 128, "CDP", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo23 }, // Inst #77 = CDP
- { 78, 6, 0, 128, "CDP2", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo24 }, // Inst #78 = CDP2
- { 79, 0, 0, 128, "CLREX", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #79 = CLREX
- { 80, 4, 1, 125, "CLZ", 0|(1<<TID::Predicable), 0|(3<<4)|(11<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #80 = CLZ
- { 81, 4, 0, 97, "CMNzri", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #81 = CMNzri
- { 82, 4, 0, 98, "CMNzrr", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #82 = CMNzrr
- { 83, 6, 0, 100, "CMNzrs", 0|(1<<TID::Predicable), 0|1|(3<<4)|(5<<9), NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #83 = CMNzrs
- { 84, 4, 0, 97, "CMPri", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #84 = CMPri
- { 85, 4, 0, 98, "CMPrr", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #85 = CMPrr
- { 86, 6, 0, 100, "CMPrs", 0|(1<<TID::Predicable), 0|1|(3<<4)|(5<<9), NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #86 = CMPrs
- { 87, 4, 0, 97, "CMPzri", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #87 = CMPzri
- { 88, 4, 0, 98, "CMPzrr", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #88 = CMPzrr
- { 89, 6, 0, 100, "CMPzrs", 0|(1<<TID::Predicable), 0|1|(3<<4)|(5<<9), NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #89 = CMPzrs
- { 90, 3, 0, 128, "CONSTPOOL_ENTRY", 0|(1<<TID::NotDuplicable), 0|(1<<4), NULL, NULL, NULL, OperandInfo28 }, // Inst #90 = CONSTPOOL_ENTRY
- { 91, 1, 0, 128, "CPS", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo14 }, // Inst #91 = CPS
- { 92, 3, 0, 128, "DBG", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #92 = DBG
- { 93, 0, 0, 128, "DMBish", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #93 = DMBish
- { 94, 0, 0, 128, "DMBishst", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #94 = DMBishst
- { 95, 0, 0, 128, "DMBnsh", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #95 = DMBnsh
- { 96, 0, 0, 128, "DMBnshst", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #96 = DMBnshst
- { 97, 0, 0, 128, "DMBosh", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #97 = DMBosh
- { 98, 0, 0, 128, "DMBoshst", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #98 = DMBoshst
- { 99, 0, 0, 128, "DMBst", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #99 = DMBst
- { 100, 0, 0, 128, "DSBish", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #100 = DSBish
- { 101, 0, 0, 128, "DSBishst", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #101 = DSBishst
- { 102, 0, 0, 128, "DSBnsh", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #102 = DSBnsh
- { 103, 0, 0, 128, "DSBnshst", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #103 = DSBnshst
- { 104, 0, 0, 128, "DSBosh", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #104 = DSBosh
- { 105, 0, 0, 128, "DSBoshst", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #105 = DSBoshst
- { 106, 0, 0, 128, "DSBst", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #106 = DSBst
- { 107, 6, 1, 88, "EORri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #107 = EORri
- { 108, 6, 1, 89, "EORrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #108 = EORrr
- { 109, 8, 1, 91, "EORrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), NULL, NULL, NULL, OperandInfo7 }, // Inst #109 = EORrs
- { 110, 4, 1, 26, "FCONSTD", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|(3<<4)|(22<<9)|(1<<17), NULL, NULL, NULL, OperandInfo29 }, // Inst #110 = FCONSTD
- { 111, 4, 1, 26, "FCONSTS", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|(3<<4)|(22<<9)|(1<<17), NULL, NULL, NULL, OperandInfo30 }, // Inst #111 = FCONSTS
- { 112, 2, 0, 82, "FMSTAT", 0|(1<<TID::Predicable), 0|(3<<4)|(22<<9)|(1<<17), ImplicitList5, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #112 = FMSTAT
- { 113, 0, 0, 128, "ISBsy", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #113 = ISBsy
- { 114, 1, 0, 128, "Int_MemBarrierV6", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4), NULL, NULL, NULL, OperandInfo16 }, // Inst #114 = Int_MemBarrierV6
- { 115, 0, 0, 128, "Int_MemBarrierV7", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4), NULL, NULL, NULL, 0 }, // Inst #115 = Int_MemBarrierV7
- { 116, 1, 0, 128, "Int_SyncBarrierV6", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4), NULL, NULL, NULL, OperandInfo16 }, // Inst #116 = Int_SyncBarrierV6
- { 117, 0, 0, 128, "Int_SyncBarrierV7", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4), NULL, NULL, NULL, 0 }, // Inst #117 = Int_SyncBarrierV7
- { 118, 2, 0, 128, "Int_eh_sjlj_setjmp", 0, 0|(1<<4), NULL, ImplicitList6, Barriers3, OperandInfo32 }, // Inst #118 = Int_eh_sjlj_setjmp
- { 119, 7, 0, 128, "LDC2L_OFFSET", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #119 = LDC2L_OFFSET
- { 120, 6, 0, 128, "LDC2L_OPTION", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo34 }, // Inst #120 = LDC2L_OPTION
- { 121, 7, 0, 128, "LDC2L_POST", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #121 = LDC2L_POST
- { 122, 7, 0, 128, "LDC2L_PRE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #122 = LDC2L_PRE
- { 123, 7, 0, 128, "LDC2_OFFSET", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #123 = LDC2_OFFSET
- { 124, 6, 0, 128, "LDC2_OPTION", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo34 }, // Inst #124 = LDC2_OPTION
- { 125, 7, 0, 128, "LDC2_POST", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #125 = LDC2_POST
- { 126, 7, 0, 128, "LDC2_PRE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #126 = LDC2_PRE
- { 127, 7, 0, 128, "LDCL_OFFSET", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #127 = LDCL_OFFSET
- { 128, 6, 0, 128, "LDCL_OPTION", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo34 }, // Inst #128 = LDCL_OPTION
- { 129, 7, 0, 128, "LDCL_POST", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #129 = LDCL_POST
- { 130, 7, 0, 128, "LDCL_PRE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #130 = LDCL_PRE
- { 131, 7, 0, 128, "LDC_OFFSET", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #131 = LDC_OFFSET
- { 132, 6, 0, 128, "LDC_OPTION", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo34 }, // Inst #132 = LDC_OPTION
- { 133, 7, 0, 128, "LDC_POST", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #133 = LDC_POST
- { 134, 7, 0, 128, "LDC_PRE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #134 = LDC_PRE
- { 135, 5, 0, 103, "LDM", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|4|(3<<4)|(10<<9), NULL, NULL, NULL, OperandInfo35 }, // Inst #135 = LDM
- { 136, 5, 0, 0, "LDM_RET", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|4|(3<<4)|(10<<9), NULL, NULL, NULL, OperandInfo35 }, // Inst #136 = LDM_RET
- { 137, 6, 1, 104, "LDR", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|2|(3<<4)|(6<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #137 = LDR
- { 138, 6, 1, 104, "LDRB", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|2|(3<<4)|(6<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #138 = LDRB
- { 139, 7, 2, 105, "LDRBT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|2|(3<<4)|(2<<7)|(6<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #139 = LDRBT
- { 140, 7, 2, 105, "LDRB_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|2|(3<<4)|(2<<7)|(6<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #140 = LDRB_POST
- { 141, 7, 2, 105, "LDRB_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|2|(3<<4)|(1<<7)|(6<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #141 = LDRB_PRE
- { 142, 7, 2, 104, "LDRD", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|3|(3<<4)|(8<<9), NULL, NULL, NULL, OperandInfo10 }, // Inst #142 = LDRD
- { 143, 8, 3, 104, "LDRD_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|3|(3<<4)|(2<<7)|(8<<9), NULL, NULL, NULL, OperandInfo37 }, // Inst #143 = LDRD_POST
- { 144, 8, 3, 104, "LDRD_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|3|(3<<4)|(1<<7)|(8<<9), NULL, NULL, NULL, OperandInfo37 }, // Inst #144 = LDRD_PRE
- { 145, 4, 1, 128, "LDREX", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #145 = LDREX
- { 146, 4, 1, 128, "LDREXB", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #146 = LDREXB
- { 147, 5, 2, 128, "LDREXD", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #147 = LDREXD
- { 148, 4, 1, 128, "LDREXH", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #148 = LDREXH
- { 149, 6, 1, 104, "LDRH", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|3|(3<<4)|(8<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #149 = LDRH
- { 150, 7, 2, 105, "LDRHT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|3|(3<<4)|(2<<7)|(8<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #150 = LDRHT
- { 151, 7, 2, 105, "LDRH_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|3|(3<<4)|(2<<7)|(8<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #151 = LDRH_POST
- { 152, 7, 2, 105, "LDRH_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|3|(3<<4)|(1<<7)|(8<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #152 = LDRH_PRE
- { 153, 6, 1, 104, "LDRSB", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|3|(3<<4)|(8<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #153 = LDRSB
- { 154, 7, 2, 105, "LDRSBT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|3|(3<<4)|(2<<7)|(8<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #154 = LDRSBT
- { 155, 7, 2, 105, "LDRSB_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|3|(3<<4)|(2<<7)|(8<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #155 = LDRSB_POST
- { 156, 7, 2, 105, "LDRSB_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|3|(3<<4)|(1<<7)|(8<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #156 = LDRSB_PRE
- { 157, 6, 1, 104, "LDRSH", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|3|(3<<4)|(8<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #157 = LDRSH
- { 158, 7, 2, 105, "LDRSHT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|3|(3<<4)|(2<<7)|(8<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #158 = LDRSHT
- { 159, 7, 2, 105, "LDRSH_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|3|(3<<4)|(2<<7)|(8<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #159 = LDRSH_POST
- { 160, 7, 2, 105, "LDRSH_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|3|(3<<4)|(1<<7)|(8<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #160 = LDRSH_PRE
- { 161, 7, 2, 105, "LDRT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|2|(3<<4)|(2<<7)|(6<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #161 = LDRT
- { 162, 7, 2, 105, "LDR_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|2|(3<<4)|(2<<7)|(6<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #162 = LDR_POST
- { 163, 7, 2, 105, "LDR_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|2|(3<<4)|(1<<7)|(6<<9), NULL, NULL, NULL, OperandInfo36 }, // Inst #163 = LDR_PRE
- { 164, 6, 1, 104, "LDRcp", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable)|(1<<TID::UnmodeledSideEffects), 0|2|(3<<4)|(6<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #164 = LDRcp
- { 165, 4, 1, 88, "LEApcrel", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|1|(3<<4), NULL, NULL, NULL, OperandInfo26 }, // Inst #165 = LEApcrel
- { 166, 5, 1, 88, "LEApcrelJT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|1|(3<<4), NULL, NULL, NULL, OperandInfo38 }, // Inst #166 = LEApcrelJT
- { 167, 8, 0, 128, "MCR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo39 }, // Inst #167 = MCR
- { 168, 6, 0, 128, "MCR2", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo40 }, // Inst #168 = MCR2
- { 169, 7, 0, 128, "MCRR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #169 = MCRR
- { 170, 5, 0, 128, "MCRR2", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo41 }, // Inst #170 = MCRR2
- { 171, 7, 1, 109, "MLA", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo42 }, // Inst #171 = MLA
- { 172, 6, 1, 109, "MLS", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #172 = MLS
- { 173, 5, 1, 93, "MOVCCi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|1|(3<<4)|(4<<9)|(1<<15), NULL, NULL, NULL, OperandInfo15 }, // Inst #173 = MOVCCi
- { 174, 5, 1, 94, "MOVCCr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|1|(3<<4)|(4<<9)|(1<<15), NULL, NULL, NULL, OperandInfo44 }, // Inst #174 = MOVCCr
- { 175, 7, 1, 96, "MOVCCs", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|1|(3<<4)|(5<<9)|(1<<15), NULL, NULL, NULL, OperandInfo45 }, // Inst #175 = MOVCCs
- { 176, 2, 0, 0, "MOVPCLR", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Predicable)|(1<<TID::Terminator), 0|(3<<4)|(3<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #176 = MOVPCLR
- { 177, 1, 0, 0, "MOVPCRX", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|(3<<4)|(3<<9), NULL, NULL, NULL, OperandInfo16 }, // Inst #177 = MOVPCRX
- { 178, 5, 1, 111, "MOVTi16", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9)|(1<<15), NULL, NULL, NULL, OperandInfo15 }, // Inst #178 = MOVTi16
- { 179, 5, 1, 111, "MOVi", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable)|(1<<TID::HasOptionalDef)|(1<<TID::CheapAsAMove), 0|1|(3<<4)|(4<<9)|(1<<15), NULL, NULL, NULL, OperandInfo46 }, // Inst #179 = MOVi
- { 180, 4, 1, 111, "MOVi16", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|1|(3<<4)|(4<<9)|(1<<15), NULL, NULL, NULL, OperandInfo26 }, // Inst #180 = MOVi16
- { 181, 4, 1, 111, "MOVi2pieces", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|1|(2<<4), NULL, NULL, NULL, OperandInfo26 }, // Inst #181 = MOVi2pieces
- { 182, 4, 1, 111, "MOVi32imm", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|1|(2<<4), NULL, NULL, NULL, OperandInfo26 }, // Inst #182 = MOVi32imm
- { 183, 5, 1, 112, "MOVr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9)|(1<<15), NULL, NULL, NULL, OperandInfo47 }, // Inst #183 = MOVr
- { 184, 5, 1, 113, "MOVrx", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(1<<15), ImplicitList1, NULL, NULL, OperandInfo47 }, // Inst #184 = MOVrx
- { 185, 7, 1, 114, "MOVs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9)|(1<<15), NULL, NULL, NULL, OperandInfo48 }, // Inst #185 = MOVs
- { 186, 4, 1, 113, "MOVsra_flag", 0|(1<<TID::Predicable), 0|1|(3<<4)|(1<<15), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #186 = MOVsra_flag
- { 187, 4, 1, 113, "MOVsrl_flag", 0|(1<<TID::Predicable), 0|1|(3<<4)|(1<<15), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #187 = MOVsrl_flag
- { 188, 8, 0, 128, "MRC", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo39 }, // Inst #188 = MRC
- { 189, 6, 0, 128, "MRC2", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo40 }, // Inst #189 = MRC2
- { 190, 7, 0, 128, "MRRC", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #190 = MRRC
- { 191, 5, 0, 128, "MRRC2", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo41 }, // Inst #191 = MRRC2
- { 192, 3, 1, 128, "MRS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #192 = MRS
- { 193, 3, 1, 128, "MRSsys", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #193 = MRSsys
- { 194, 3, 0, 128, "MSR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #194 = MSR
- { 195, 3, 0, 128, "MSRi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #195 = MSRi
- { 196, 3, 0, 128, "MSRsys", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #196 = MSRsys
- { 197, 3, 0, 128, "MSRsysi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #197 = MSRsysi
- { 198, 6, 1, 116, "MUL", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #198 = MUL
- { 199, 5, 1, 111, "MVNi", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable)|(1<<TID::HasOptionalDef)|(1<<TID::CheapAsAMove), 0|1|(3<<4)|(4<<9)|(1<<15), NULL, NULL, NULL, OperandInfo46 }, // Inst #199 = MVNi
- { 200, 5, 1, 112, "MVNr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9)|(1<<15), NULL, NULL, NULL, OperandInfo47 }, // Inst #200 = MVNr
- { 201, 7, 1, 114, "MVNs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9)|(1<<15), NULL, NULL, NULL, OperandInfo48 }, // Inst #201 = MVNs
- { 202, 2, 0, 128, "NOP", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #202 = NOP
- { 203, 6, 1, 88, "ORRri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #203 = ORRri
- { 204, 6, 1, 89, "ORRrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #204 = ORRrr
- { 205, 8, 1, 91, "ORRrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), NULL, NULL, NULL, OperandInfo7 }, // Inst #205 = ORRrs
- { 206, 5, 1, 89, "PICADD", 0|(1<<TID::Predicable)|(1<<TID::NotDuplicable), 0|1|(3<<4), NULL, NULL, NULL, OperandInfo8 }, // Inst #206 = PICADD
- { 207, 5, 1, 104, "PICLDR", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::NotDuplicable), 0|2|(3<<4), NULL, NULL, NULL, OperandInfo8 }, // Inst #207 = PICLDR
- { 208, 5, 1, 104, "PICLDRB", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::NotDuplicable), 0|2|(3<<4), NULL, NULL, NULL, OperandInfo8 }, // Inst #208 = PICLDRB
- { 209, 5, 1, 104, "PICLDRH", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::NotDuplicable), 0|3|(3<<4), NULL, NULL, NULL, OperandInfo8 }, // Inst #209 = PICLDRH
- { 210, 5, 1, 104, "PICLDRSB", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::NotDuplicable), 0|3|(3<<4), NULL, NULL, NULL, OperandInfo8 }, // Inst #210 = PICLDRSB
- { 211, 5, 1, 104, "PICLDRSH", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::NotDuplicable), 0|3|(3<<4), NULL, NULL, NULL, OperandInfo8 }, // Inst #211 = PICLDRSH
- { 212, 5, 0, 121, "PICSTR", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::NotDuplicable), 0|2|(3<<4), NULL, NULL, NULL, OperandInfo8 }, // Inst #212 = PICSTR
- { 213, 5, 0, 121, "PICSTRB", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::NotDuplicable), 0|2|(3<<4), NULL, NULL, NULL, OperandInfo8 }, // Inst #213 = PICSTRB
- { 214, 5, 0, 121, "PICSTRH", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::NotDuplicable), 0|3|(3<<4), NULL, NULL, NULL, OperandInfo8 }, // Inst #214 = PICSTRH
- { 215, 6, 1, 90, "PKHBT", 0|(1<<TID::Predicable), 0|(3<<4)|(11<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #215 = PKHBT
- { 216, 6, 1, 90, "PKHTB", 0|(1<<TID::Predicable), 0|(3<<4)|(11<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #216 = PKHTB
- { 217, 2, 0, 128, "PLDWi", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo49 }, // Inst #217 = PLDWi
- { 218, 3, 0, 128, "PLDWr", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo2 }, // Inst #218 = PLDWr
- { 219, 2, 0, 128, "PLDi", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo49 }, // Inst #219 = PLDi
- { 220, 3, 0, 128, "PLDr", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo2 }, // Inst #220 = PLDr
- { 221, 2, 0, 128, "PLIi", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo49 }, // Inst #221 = PLIi
- { 222, 3, 0, 128, "PLIr", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo2 }, // Inst #222 = PLIr
- { 223, 5, 1, 89, "QADD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #223 = QADD
- { 224, 5, 1, 89, "QADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #224 = QADD16
- { 225, 5, 1, 89, "QADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #225 = QADD8
- { 226, 5, 1, 89, "QASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #226 = QASX
- { 227, 5, 1, 89, "QDADD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #227 = QDADD
- { 228, 5, 1, 89, "QDSUB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #228 = QDSUB
- { 229, 5, 1, 89, "QSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #229 = QSAX
- { 230, 5, 1, 89, "QSUB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #230 = QSUB
- { 231, 5, 1, 89, "QSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #231 = QSUB16
- { 232, 5, 1, 89, "QSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #232 = QSUB8
- { 233, 4, 1, 125, "RBIT", 0|(1<<TID::Predicable), 0|(3<<4)|(11<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #233 = RBIT
- { 234, 4, 1, 125, "REV", 0|(1<<TID::Predicable), 0|(3<<4)|(11<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #234 = REV
- { 235, 4, 1, 125, "REV16", 0|(1<<TID::Predicable), 0|(3<<4)|(11<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #235 = REV16
- { 236, 4, 1, 125, "REVSH", 0|(1<<TID::Predicable), 0|(3<<4)|(11<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #236 = REVSH
- { 237, 3, 0, 128, "RFE", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo50 }, // Inst #237 = RFE
- { 238, 3, 0, 128, "RFEW", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo50 }, // Inst #238 = RFEW
- { 239, 5, 1, 88, "RSBSri", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #239 = RSBSri
- { 240, 7, 1, 91, "RSBSrs", 0|(1<<TID::Predicable), 0|1|(3<<4)|(5<<9), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #240 = RSBSrs
- { 241, 6, 1, 88, "RSBri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #241 = RSBri
- { 242, 8, 1, 91, "RSBrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), NULL, NULL, NULL, OperandInfo7 }, // Inst #242 = RSBrs
- { 243, 3, 1, 88, "RSCSri", 0, 0|1|(3<<4)|(4<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #243 = RSCSri
- { 244, 5, 1, 91, "RSCSrs", 0, 0|1|(3<<4)|(5<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo4 }, // Inst #244 = RSCSrs
- { 245, 6, 1, 88, "RSCri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #245 = RSCri
- { 246, 8, 1, 91, "RSCrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), ImplicitList1, NULL, NULL, OperandInfo7 }, // Inst #246 = RSCrs
- { 247, 5, 1, 89, "SADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #247 = SADD16
- { 248, 5, 1, 89, "SADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #248 = SADD8
- { 249, 5, 1, 89, "SASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #249 = SASX
- { 250, 3, 1, 88, "SBCSSri", 0, 0|1|(3<<4)|(4<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #250 = SBCSSri
- { 251, 3, 1, 89, "SBCSSrr", 0, 0|1|(3<<4)|(4<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo3 }, // Inst #251 = SBCSSrr
- { 252, 5, 1, 91, "SBCSSrs", 0, 0|1|(3<<4)|(5<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo4 }, // Inst #252 = SBCSSrs
- { 253, 6, 1, 88, "SBCri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #253 = SBCri
- { 254, 6, 1, 89, "SBCrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), ImplicitList1, NULL, NULL, OperandInfo6 }, // Inst #254 = SBCrr
- { 255, 8, 1, 91, "SBCrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), ImplicitList1, NULL, NULL, OperandInfo7 }, // Inst #255 = SBCrs
- { 256, 6, 1, 88, "SBFX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo51 }, // Inst #256 = SBFX
- { 257, 5, 1, 128, "SEL", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #257 = SEL
- { 258, 0, 0, 128, "SETENDBE", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #258 = SETENDBE
- { 259, 0, 0, 128, "SETENDLE", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, 0 }, // Inst #259 = SETENDLE
- { 260, 2, 0, 128, "SEV", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #260 = SEV
- { 261, 5, 1, 89, "SHADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #261 = SHADD16
- { 262, 5, 1, 89, "SHADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #262 = SHADD8
- { 263, 5, 1, 89, "SHASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #263 = SHASX
- { 264, 5, 1, 89, "SHSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #264 = SHSAX
- { 265, 5, 1, 89, "SHSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #265 = SHSUB16
- { 266, 5, 1, 89, "SHSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #266 = SHSUB8
- { 267, 3, 0, 128, "SMC", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #267 = SMC
- { 268, 6, 1, 108, "SMLABB", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #268 = SMLABB
- { 269, 6, 1, 108, "SMLABT", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #269 = SMLABT
- { 270, 6, 1, 128, "SMLAD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #270 = SMLAD
- { 271, 6, 1, 128, "SMLADX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #271 = SMLADX
- { 272, 7, 2, 110, "SMLAL", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo42 }, // Inst #272 = SMLAL
- { 273, 6, 2, 110, "SMLALBB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #273 = SMLALBB
- { 274, 6, 2, 110, "SMLALBT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #274 = SMLALBT
- { 275, 6, 2, 128, "SMLALD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #275 = SMLALD
- { 276, 6, 2, 128, "SMLALDX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #276 = SMLALDX
- { 277, 6, 2, 110, "SMLALTB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #277 = SMLALTB
- { 278, 6, 2, 110, "SMLALTT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #278 = SMLALTT
- { 279, 6, 1, 108, "SMLATB", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #279 = SMLATB
- { 280, 6, 1, 108, "SMLATT", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #280 = SMLATT
- { 281, 6, 1, 108, "SMLAWB", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #281 = SMLAWB
- { 282, 6, 1, 108, "SMLAWT", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #282 = SMLAWT
- { 283, 6, 1, 128, "SMLSD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #283 = SMLSD
- { 284, 6, 1, 128, "SMLSDX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #284 = SMLSDX
- { 285, 6, 2, 128, "SMLSLD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #285 = SMLSLD
- { 286, 6, 2, 128, "SMLSLDX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #286 = SMLSLDX
- { 287, 6, 1, 109, "SMMLA", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #287 = SMMLA
- { 288, 6, 1, 109, "SMMLAR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #288 = SMMLAR
- { 289, 6, 1, 109, "SMMLS", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #289 = SMMLS
- { 290, 6, 1, 109, "SMMLSR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #290 = SMMLSR
- { 291, 5, 1, 116, "SMMUL", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #291 = SMMUL
- { 292, 5, 1, 116, "SMMULR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #292 = SMMULR
- { 293, 5, 1, 128, "SMUAD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #293 = SMUAD
- { 294, 5, 1, 128, "SMUADX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #294 = SMUADX
- { 295, 5, 1, 116, "SMULBB", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #295 = SMULBB
- { 296, 5, 1, 116, "SMULBT", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #296 = SMULBT
- { 297, 7, 2, 117, "SMULL", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo42 }, // Inst #297 = SMULL
- { 298, 5, 1, 116, "SMULTB", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #298 = SMULTB
- { 299, 5, 1, 116, "SMULTT", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #299 = SMULTT
- { 300, 5, 1, 115, "SMULWB", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #300 = SMULWB
- { 301, 5, 1, 115, "SMULWT", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #301 = SMULWT
- { 302, 5, 1, 128, "SMUSD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #302 = SMUSD
- { 303, 5, 1, 128, "SMUSDX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #303 = SMUSDX
- { 304, 3, 0, 128, "SRS", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo20 }, // Inst #304 = SRS
- { 305, 3, 0, 128, "SRSW", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo20 }, // Inst #305 = SRSW
- { 306, 5, 1, 128, "SSAT16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo52 }, // Inst #306 = SSAT16
- { 307, 6, 1, 128, "SSATasr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo53 }, // Inst #307 = SSATasr
- { 308, 6, 1, 128, "SSATlsl", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo53 }, // Inst #308 = SSATlsl
- { 309, 5, 1, 89, "SSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #309 = SSAX
- { 310, 5, 1, 89, "SSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #310 = SSUB16
- { 311, 5, 1, 89, "SSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #311 = SSUB8
- { 312, 7, 0, 128, "STC2L_OFFSET", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #312 = STC2L_OFFSET
- { 313, 6, 0, 128, "STC2L_OPTION", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo34 }, // Inst #313 = STC2L_OPTION
- { 314, 7, 0, 128, "STC2L_POST", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #314 = STC2L_POST
- { 315, 7, 0, 128, "STC2L_PRE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #315 = STC2L_PRE
- { 316, 7, 0, 128, "STC2_OFFSET", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #316 = STC2_OFFSET
- { 317, 6, 0, 128, "STC2_OPTION", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo34 }, // Inst #317 = STC2_OPTION
- { 318, 7, 0, 128, "STC2_POST", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #318 = STC2_POST
- { 319, 7, 0, 128, "STC2_PRE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #319 = STC2_PRE
- { 320, 7, 0, 128, "STCL_OFFSET", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #320 = STCL_OFFSET
- { 321, 6, 0, 128, "STCL_OPTION", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo34 }, // Inst #321 = STCL_OPTION
- { 322, 7, 0, 128, "STCL_POST", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #322 = STCL_POST
- { 323, 7, 0, 128, "STCL_PRE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #323 = STCL_PRE
- { 324, 7, 0, 128, "STC_OFFSET", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #324 = STC_OFFSET
- { 325, 6, 0, 128, "STC_OPTION", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo34 }, // Inst #325 = STC_OPTION
- { 326, 7, 0, 128, "STC_POST", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #326 = STC_POST
- { 327, 7, 0, 128, "STC_PRE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo33 }, // Inst #327 = STC_PRE
- { 328, 5, 0, 120, "STM", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|4|(3<<4)|(10<<9), NULL, NULL, NULL, OperandInfo35 }, // Inst #328 = STM
- { 329, 6, 0, 121, "STR", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|2|(3<<4)|(7<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #329 = STR
- { 330, 6, 0, 121, "STRB", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|2|(3<<4)|(7<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #330 = STRB
- { 331, 7, 1, 122, "STRBT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|2|(3<<4)|(2<<7)|(7<<9), NULL, NULL, NULL, OperandInfo54 }, // Inst #331 = STRBT
- { 332, 7, 1, 122, "STRB_POST", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|2|(3<<4)|(2<<7)|(7<<9), NULL, NULL, NULL, OperandInfo54 }, // Inst #332 = STRB_POST
- { 333, 7, 1, 122, "STRB_PRE", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|2|(3<<4)|(1<<7)|(7<<9), NULL, NULL, NULL, OperandInfo54 }, // Inst #333 = STRB_PRE
- { 334, 7, 0, 121, "STRD", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|3|(3<<4)|(9<<9), NULL, NULL, NULL, OperandInfo10 }, // Inst #334 = STRD
- { 335, 8, 1, 122, "STRD_POST", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|3|(3<<4)|(2<<7)|(9<<9), NULL, NULL, NULL, OperandInfo55 }, // Inst #335 = STRD_POST
- { 336, 8, 1, 122, "STRD_PRE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|3|(3<<4)|(1<<7)|(9<<9), NULL, NULL, NULL, OperandInfo55 }, // Inst #336 = STRD_PRE
- { 337, 5, 1, 128, "STREX", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo56 }, // Inst #337 = STREX
- { 338, 5, 1, 128, "STREXB", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo56 }, // Inst #338 = STREXB
- { 339, 6, 1, 128, "STREXD", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo57 }, // Inst #339 = STREXD
- { 340, 5, 1, 128, "STREXH", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo56 }, // Inst #340 = STREXH
- { 341, 6, 0, 121, "STRH", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|3|(3<<4)|(9<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #341 = STRH
- { 342, 7, 1, 122, "STRHT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|3|(3<<4)|(2<<7)|(9<<9), NULL, NULL, NULL, OperandInfo54 }, // Inst #342 = STRHT
- { 343, 7, 1, 122, "STRH_POST", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|3|(3<<4)|(2<<7)|(9<<9), NULL, NULL, NULL, OperandInfo54 }, // Inst #343 = STRH_POST
- { 344, 7, 1, 122, "STRH_PRE", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|3|(3<<4)|(1<<7)|(9<<9), NULL, NULL, NULL, OperandInfo54 }, // Inst #344 = STRH_PRE
- { 345, 7, 1, 122, "STRT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|2|(3<<4)|(2<<7)|(7<<9), NULL, NULL, NULL, OperandInfo54 }, // Inst #345 = STRT
- { 346, 7, 1, 122, "STR_POST", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|2|(3<<4)|(2<<7)|(7<<9), NULL, NULL, NULL, OperandInfo54 }, // Inst #346 = STR_POST
- { 347, 7, 1, 122, "STR_PRE", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|2|(3<<4)|(1<<7)|(7<<9), NULL, NULL, NULL, OperandInfo54 }, // Inst #347 = STR_PRE
- { 348, 5, 1, 88, "SUBSri", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #348 = SUBSri
- { 349, 5, 1, 89, "SUBSrr", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #349 = SUBSrr
- { 350, 7, 1, 91, "SUBSrs", 0|(1<<TID::Predicable), 0|1|(3<<4)|(5<<9), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #350 = SUBSrs
- { 351, 6, 1, 88, "SUBri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #351 = SUBri
- { 352, 6, 1, 89, "SUBrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #352 = SUBrr
- { 353, 8, 1, 91, "SUBrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|1|(3<<4)|(5<<9), NULL, NULL, NULL, OperandInfo7 }, // Inst #353 = SUBrs
- { 354, 3, 0, 0, "SVC", 0|(1<<TID::Call)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(2<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #354 = SVC
- { 355, 5, 1, 128, "SWP", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #355 = SWP
- { 356, 5, 1, 128, "SWPB", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(28<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #356 = SWPB
- { 357, 5, 1, 89, "SXTAB16rr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #357 = SXTAB16rr
- { 358, 6, 1, 90, "SXTAB16rr_rot", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #358 = SXTAB16rr_rot
- { 359, 5, 1, 89, "SXTABrr", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #359 = SXTABrr
- { 360, 6, 1, 90, "SXTABrr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #360 = SXTABrr_rot
- { 361, 5, 1, 89, "SXTAHrr", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #361 = SXTAHrr
- { 362, 6, 1, 90, "SXTAHrr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #362 = SXTAHrr_rot
- { 363, 4, 1, 125, "SXTB16r", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #363 = SXTB16r
- { 364, 5, 1, 126, "SXTB16r_rot", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #364 = SXTB16r_rot
- { 365, 4, 1, 125, "SXTBr", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #365 = SXTBr
- { 366, 5, 1, 126, "SXTBr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #366 = SXTBr_rot
- { 367, 4, 1, 125, "SXTHr", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #367 = SXTHr
- { 368, 5, 1, 126, "SXTHr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #368 = SXTHr_rot
- { 369, 4, 0, 97, "TEQri", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #369 = TEQri
- { 370, 4, 0, 98, "TEQrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #370 = TEQrr
- { 371, 6, 0, 100, "TEQrs", 0|(1<<TID::Predicable), 0|1|(3<<4)|(5<<9), NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #371 = TEQrs
- { 372, 0, 0, 0, "TPsoft", 0|(1<<TID::Call), 0|(3<<4)|(2<<9), NULL, ImplicitList7, Barriers1, 0 }, // Inst #372 = TPsoft
- { 373, 2, 0, 128, "TRAP", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #373 = TRAP
- { 374, 4, 0, 97, "TSTri", 0|(1<<TID::Predicable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #374 = TSTri
- { 375, 4, 0, 98, "TSTrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|1|(3<<4)|(4<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #375 = TSTrr
- { 376, 6, 0, 100, "TSTrs", 0|(1<<TID::Predicable), 0|1|(3<<4)|(5<<9), NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #376 = TSTrs
- { 377, 5, 1, 89, "UADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #377 = UADD16
- { 378, 5, 1, 89, "UADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #378 = UADD8
- { 379, 5, 1, 89, "UASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #379 = UASX
- { 380, 6, 1, 88, "UBFX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|1|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo51 }, // Inst #380 = UBFX
- { 381, 5, 1, 89, "UHADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #381 = UHADD16
- { 382, 5, 1, 89, "UHADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #382 = UHADD8
- { 383, 5, 1, 89, "UHASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #383 = UHASX
- { 384, 5, 1, 89, "UHSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #384 = UHSAX
- { 385, 5, 1, 89, "UHSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #385 = UHSUB16
- { 386, 5, 1, 89, "UHSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #386 = UHSUB8
- { 387, 6, 2, 110, "UMAAL", 0|(1<<TID::Predicable), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #387 = UMAAL
- { 388, 7, 2, 110, "UMLAL", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo42 }, // Inst #388 = UMLAL
- { 389, 7, 2, 117, "UMULL", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo42 }, // Inst #389 = UMULL
- { 390, 5, 1, 89, "UQADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #390 = UQADD16
- { 391, 5, 1, 89, "UQADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #391 = UQADD8
- { 392, 5, 1, 89, "UQASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #392 = UQASX
- { 393, 5, 1, 89, "UQSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #393 = UQSAX
- { 394, 5, 1, 89, "UQSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #394 = UQSUB16
- { 395, 5, 1, 89, "UQSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #395 = UQSUB8
- { 396, 5, 1, 128, "USAD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #396 = USAD8
- { 397, 6, 1, 128, "USADA8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(1<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #397 = USADA8
- { 398, 5, 1, 128, "USAT16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo52 }, // Inst #398 = USAT16
- { 399, 6, 1, 128, "USATasr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo53 }, // Inst #399 = USATasr
- { 400, 6, 1, 128, "USATlsl", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo53 }, // Inst #400 = USATlsl
- { 401, 5, 1, 89, "USAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #401 = USAX
- { 402, 5, 1, 89, "USUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #402 = USUB16
- { 403, 5, 1, 89, "USUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(4<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #403 = USUB8
- { 404, 5, 1, 89, "UXTAB16rr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #404 = UXTAB16rr
- { 405, 6, 1, 90, "UXTAB16rr_rot", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #405 = UXTAB16rr_rot
- { 406, 5, 1, 89, "UXTABrr", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #406 = UXTABrr
- { 407, 6, 1, 90, "UXTABrr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #407 = UXTABrr_rot
- { 408, 5, 1, 89, "UXTAHrr", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #408 = UXTAHrr
- { 409, 6, 1, 90, "UXTAHrr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #409 = UXTAHrr_rot
- { 410, 4, 1, 125, "UXTB16r", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #410 = UXTB16r
- { 411, 5, 1, 126, "UXTB16r_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #411 = UXTB16r_rot
- { 412, 4, 1, 125, "UXTBr", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #412 = UXTBr
- { 413, 5, 1, 126, "UXTBr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #413 = UXTBr_rot
- { 414, 4, 1, 125, "UXTHr", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #414 = UXTHr
- { 415, 5, 1, 126, "UXTHr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(12<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #415 = UXTHr_rot
- { 416, 6, 1, 17, "VABALsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #416 = VABALsv2i64
- { 417, 6, 1, 17, "VABALsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #417 = VABALsv4i32
- { 418, 6, 1, 17, "VABALsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #418 = VABALsv8i16
- { 419, 6, 1, 17, "VABALuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #419 = VABALuv2i64
- { 420, 6, 1, 17, "VABALuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #420 = VABALuv4i32
- { 421, 6, 1, 17, "VABALuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #421 = VABALuv8i16
- { 422, 6, 1, 18, "VABAsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #422 = VABAsv16i8
- { 423, 6, 1, 19, "VABAsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #423 = VABAsv2i32
- { 424, 6, 1, 17, "VABAsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #424 = VABAsv4i16
- { 425, 6, 1, 20, "VABAsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #425 = VABAsv4i32
- { 426, 6, 1, 18, "VABAsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #426 = VABAsv8i16
- { 427, 6, 1, 17, "VABAsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #427 = VABAsv8i8
- { 428, 6, 1, 18, "VABAuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #428 = VABAuv16i8
- { 429, 6, 1, 19, "VABAuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #429 = VABAuv2i32
- { 430, 6, 1, 17, "VABAuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #430 = VABAuv4i16
- { 431, 6, 1, 20, "VABAuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #431 = VABAuv4i32
- { 432, 6, 1, 18, "VABAuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #432 = VABAuv8i16
- { 433, 6, 1, 17, "VABAuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #433 = VABAuv8i8
- { 434, 5, 1, 4, "VABDLsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #434 = VABDLsv2i64
- { 435, 5, 1, 4, "VABDLsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #435 = VABDLsv4i32
- { 436, 5, 1, 4, "VABDLsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #436 = VABDLsv8i16
- { 437, 5, 1, 4, "VABDLuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #437 = VABDLuv2i64
- { 438, 5, 1, 4, "VABDLuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #438 = VABDLuv4i32
- { 439, 5, 1, 4, "VABDLuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #439 = VABDLuv8i16
- { 440, 5, 1, 1, "VABDfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #440 = VABDfd
- { 441, 5, 1, 2, "VABDfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #441 = VABDfq
- { 442, 5, 1, 4, "VABDsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #442 = VABDsv16i8
- { 443, 5, 1, 3, "VABDsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #443 = VABDsv2i32
- { 444, 5, 1, 3, "VABDsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #444 = VABDsv4i16
- { 445, 5, 1, 4, "VABDsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #445 = VABDsv4i32
- { 446, 5, 1, 4, "VABDsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #446 = VABDsv8i16
- { 447, 5, 1, 3, "VABDsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #447 = VABDsv8i8
- { 448, 5, 1, 4, "VABDuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #448 = VABDuv16i8
- { 449, 5, 1, 3, "VABDuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #449 = VABDuv2i32
- { 450, 5, 1, 3, "VABDuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #450 = VABDuv4i16
- { 451, 5, 1, 4, "VABDuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #451 = VABDuv4i32
- { 452, 5, 1, 4, "VABDuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #452 = VABDuv8i16
- { 453, 5, 1, 3, "VABDuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #453 = VABDuv8i8
- { 454, 4, 1, 87, "VABSD", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #454 = VABSD
- { 455, 4, 1, 86, "VABSS", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #455 = VABSS
- { 456, 4, 1, 57, "VABSfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #456 = VABSfd
- { 457, 4, 1, 57, "VABSfd_sfp", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo67 }, // Inst #457 = VABSfd_sfp
- { 458, 4, 1, 58, "VABSfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #458 = VABSfq
- { 459, 4, 1, 60, "VABSv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #459 = VABSv16i8
- { 460, 4, 1, 59, "VABSv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #460 = VABSv2i32
- { 461, 4, 1, 59, "VABSv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #461 = VABSv4i16
- { 462, 4, 1, 60, "VABSv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #462 = VABSv4i32
- { 463, 4, 1, 60, "VABSv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #463 = VABSv8i16
- { 464, 4, 1, 59, "VABSv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #464 = VABSv8i8
- { 465, 5, 1, 1, "VACGEd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #465 = VACGEd
- { 466, 5, 1, 2, "VACGEq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #466 = VACGEq
- { 467, 5, 1, 1, "VACGTd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #467 = VACGTd
- { 468, 5, 1, 2, "VACGTq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #468 = VACGTq
- { 469, 5, 1, 62, "VADDD", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #469 = VADDD
- { 470, 5, 1, 3, "VADDHNv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #470 = VADDHNv2i32
- { 471, 5, 1, 3, "VADDHNv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #471 = VADDHNv4i16
- { 472, 5, 1, 3, "VADDHNv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #472 = VADDHNv8i8
- { 473, 5, 1, 44, "VADDLsv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #473 = VADDLsv2i64
- { 474, 5, 1, 44, "VADDLsv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #474 = VADDLsv4i32
- { 475, 5, 1, 44, "VADDLsv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #475 = VADDLsv8i16
- { 476, 5, 1, 44, "VADDLuv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #476 = VADDLuv2i64
- { 477, 5, 1, 44, "VADDLuv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #477 = VADDLuv4i32
- { 478, 5, 1, 44, "VADDLuv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #478 = VADDLuv8i16
- { 479, 5, 1, 61, "VADDS", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo70 }, // Inst #479 = VADDS
- { 480, 5, 1, 47, "VADDWsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #480 = VADDWsv2i64
- { 481, 5, 1, 47, "VADDWsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #481 = VADDWsv4i32
- { 482, 5, 1, 47, "VADDWsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #482 = VADDWsv8i16
- { 483, 5, 1, 47, "VADDWuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #483 = VADDWuv2i64
- { 484, 5, 1, 47, "VADDWuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #484 = VADDWuv4i32
- { 485, 5, 1, 47, "VADDWuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #485 = VADDWuv8i16
- { 486, 5, 1, 1, "VADDfd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #486 = VADDfd
- { 487, 5, 1, 1, "VADDfd_sfp", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo72 }, // Inst #487 = VADDfd_sfp
- { 488, 5, 1, 2, "VADDfq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #488 = VADDfq
- { 489, 5, 1, 6, "VADDv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #489 = VADDv16i8
- { 490, 5, 1, 5, "VADDv1i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #490 = VADDv1i64
- { 491, 5, 1, 5, "VADDv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #491 = VADDv2i32
- { 492, 5, 1, 6, "VADDv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #492 = VADDv2i64
- { 493, 5, 1, 5, "VADDv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #493 = VADDv4i16
- { 494, 5, 1, 6, "VADDv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #494 = VADDv4i32
- { 495, 5, 1, 6, "VADDv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #495 = VADDv8i16
- { 496, 5, 1, 5, "VADDv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #496 = VADDv8i8
- { 497, 5, 1, 5, "VANDd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #497 = VANDd
- { 498, 5, 1, 6, "VANDq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #498 = VANDq
- { 499, 5, 1, 5, "VBICd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #499 = VBICd
- { 500, 5, 1, 6, "VBICq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #500 = VBICq
- { 501, 6, 1, 5, "VBIFd", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #501 = VBIFd
- { 502, 6, 1, 6, "VBIFq", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #502 = VBIFq
- { 503, 6, 1, 5, "VBITd", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #503 = VBITd
- { 504, 6, 1, 6, "VBITq", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #504 = VBITq
- { 505, 6, 1, 7, "VBSLd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #505 = VBSLd
- { 506, 6, 1, 8, "VBSLq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #506 = VBSLq
- { 507, 5, 1, 1, "VCEQfd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #507 = VCEQfd
- { 508, 5, 1, 2, "VCEQfq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #508 = VCEQfq
- { 509, 5, 1, 4, "VCEQv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #509 = VCEQv16i8
- { 510, 5, 1, 3, "VCEQv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #510 = VCEQv2i32
- { 511, 5, 1, 3, "VCEQv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #511 = VCEQv4i16
- { 512, 5, 1, 4, "VCEQv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #512 = VCEQv4i32
- { 513, 5, 1, 4, "VCEQv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #513 = VCEQv8i16
- { 514, 5, 1, 3, "VCEQv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #514 = VCEQv8i8
- { 515, 4, 1, 128, "VCEQzv16i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #515 = VCEQzv16i8
- { 516, 4, 1, 128, "VCEQzv2f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #516 = VCEQzv2f32
- { 517, 4, 1, 128, "VCEQzv2i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #517 = VCEQzv2i32
- { 518, 4, 1, 128, "VCEQzv4f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #518 = VCEQzv4f32
- { 519, 4, 1, 128, "VCEQzv4i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #519 = VCEQzv4i16
- { 520, 4, 1, 128, "VCEQzv4i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #520 = VCEQzv4i32
- { 521, 4, 1, 128, "VCEQzv8i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #521 = VCEQzv8i16
- { 522, 4, 1, 128, "VCEQzv8i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #522 = VCEQzv8i8
- { 523, 5, 1, 1, "VCGEfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #523 = VCGEfd
- { 524, 5, 1, 2, "VCGEfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #524 = VCGEfq
- { 525, 5, 1, 4, "VCGEsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #525 = VCGEsv16i8
- { 526, 5, 1, 3, "VCGEsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #526 = VCGEsv2i32
- { 527, 5, 1, 3, "VCGEsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #527 = VCGEsv4i16
- { 528, 5, 1, 4, "VCGEsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #528 = VCGEsv4i32
- { 529, 5, 1, 4, "VCGEsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #529 = VCGEsv8i16
- { 530, 5, 1, 3, "VCGEsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #530 = VCGEsv8i8
- { 531, 5, 1, 4, "VCGEuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #531 = VCGEuv16i8
- { 532, 5, 1, 3, "VCGEuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #532 = VCGEuv2i32
- { 533, 5, 1, 3, "VCGEuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #533 = VCGEuv4i16
- { 534, 5, 1, 4, "VCGEuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #534 = VCGEuv4i32
- { 535, 5, 1, 4, "VCGEuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #535 = VCGEuv8i16
- { 536, 5, 1, 3, "VCGEuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #536 = VCGEuv8i8
- { 537, 4, 1, 128, "VCGEzv16i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #537 = VCGEzv16i8
- { 538, 4, 1, 128, "VCGEzv2f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #538 = VCGEzv2f32
- { 539, 4, 1, 128, "VCGEzv2i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #539 = VCGEzv2i32
- { 540, 4, 1, 128, "VCGEzv4f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #540 = VCGEzv4f32
- { 541, 4, 1, 128, "VCGEzv4i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #541 = VCGEzv4i16
- { 542, 4, 1, 128, "VCGEzv4i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #542 = VCGEzv4i32
- { 543, 4, 1, 128, "VCGEzv8i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #543 = VCGEzv8i16
- { 544, 4, 1, 128, "VCGEzv8i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #544 = VCGEzv8i8
- { 545, 5, 1, 1, "VCGTfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #545 = VCGTfd
- { 546, 5, 1, 2, "VCGTfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #546 = VCGTfq
- { 547, 5, 1, 4, "VCGTsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #547 = VCGTsv16i8
- { 548, 5, 1, 3, "VCGTsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #548 = VCGTsv2i32
- { 549, 5, 1, 3, "VCGTsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #549 = VCGTsv4i16
- { 550, 5, 1, 4, "VCGTsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #550 = VCGTsv4i32
- { 551, 5, 1, 4, "VCGTsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #551 = VCGTsv8i16
- { 552, 5, 1, 3, "VCGTsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #552 = VCGTsv8i8
- { 553, 5, 1, 4, "VCGTuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #553 = VCGTuv16i8
- { 554, 5, 1, 3, "VCGTuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #554 = VCGTuv2i32
- { 555, 5, 1, 3, "VCGTuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #555 = VCGTuv4i16
- { 556, 5, 1, 4, "VCGTuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #556 = VCGTuv4i32
- { 557, 5, 1, 4, "VCGTuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #557 = VCGTuv8i16
- { 558, 5, 1, 3, "VCGTuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #558 = VCGTuv8i8
- { 559, 4, 1, 128, "VCGTzv16i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #559 = VCGTzv16i8
- { 560, 4, 1, 128, "VCGTzv2f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #560 = VCGTzv2f32
- { 561, 4, 1, 128, "VCGTzv2i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #561 = VCGTzv2i32
- { 562, 4, 1, 128, "VCGTzv4f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #562 = VCGTzv4f32
- { 563, 4, 1, 128, "VCGTzv4i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #563 = VCGTzv4i16
- { 564, 4, 1, 128, "VCGTzv4i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #564 = VCGTzv4i32
- { 565, 4, 1, 128, "VCGTzv8i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #565 = VCGTzv8i16
- { 566, 4, 1, 128, "VCGTzv8i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #566 = VCGTzv8i8
- { 567, 4, 1, 128, "VCLEzv16i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #567 = VCLEzv16i8
- { 568, 4, 1, 128, "VCLEzv2f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #568 = VCLEzv2f32
- { 569, 4, 1, 128, "VCLEzv2i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #569 = VCLEzv2i32
- { 570, 4, 1, 128, "VCLEzv4f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #570 = VCLEzv4f32
- { 571, 4, 1, 128, "VCLEzv4i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #571 = VCLEzv4i16
- { 572, 4, 1, 128, "VCLEzv4i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #572 = VCLEzv4i32
- { 573, 4, 1, 128, "VCLEzv8i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #573 = VCLEzv8i16
- { 574, 4, 1, 128, "VCLEzv8i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #574 = VCLEzv8i8
- { 575, 4, 1, 8, "VCLSv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #575 = VCLSv16i8
- { 576, 4, 1, 7, "VCLSv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #576 = VCLSv2i32
- { 577, 4, 1, 7, "VCLSv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #577 = VCLSv4i16
- { 578, 4, 1, 8, "VCLSv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #578 = VCLSv4i32
- { 579, 4, 1, 8, "VCLSv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #579 = VCLSv8i16
- { 580, 4, 1, 7, "VCLSv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #580 = VCLSv8i8
- { 581, 4, 1, 128, "VCLTzv16i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #581 = VCLTzv16i8
- { 582, 4, 1, 128, "VCLTzv2f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #582 = VCLTzv2f32
- { 583, 4, 1, 128, "VCLTzv2i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #583 = VCLTzv2i32
- { 584, 4, 1, 128, "VCLTzv4f32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #584 = VCLTzv4f32
- { 585, 4, 1, 128, "VCLTzv4i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #585 = VCLTzv4i16
- { 586, 4, 1, 128, "VCLTzv4i32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #586 = VCLTzv4i32
- { 587, 4, 1, 128, "VCLTzv8i16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #587 = VCLTzv8i16
- { 588, 4, 1, 128, "VCLTzv8i8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #588 = VCLTzv8i8
- { 589, 4, 1, 8, "VCLZv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #589 = VCLZv16i8
- { 590, 4, 1, 7, "VCLZv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #590 = VCLZv2i32
- { 591, 4, 1, 7, "VCLZv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #591 = VCLZv4i16
- { 592, 4, 1, 8, "VCLZv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #592 = VCLZv4i32
- { 593, 4, 1, 8, "VCLZv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #593 = VCLZv8i16
- { 594, 4, 1, 7, "VCLZv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #594 = VCLZv8i8
- { 595, 4, 0, 64, "VCMPD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, ImplicitList5, NULL, OperandInfo65 }, // Inst #595 = VCMPD
- { 596, 4, 0, 64, "VCMPED", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, ImplicitList5, NULL, OperandInfo65 }, // Inst #596 = VCMPED
- { 597, 4, 0, 63, "VCMPES", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, ImplicitList5, NULL, OperandInfo66 }, // Inst #597 = VCMPES
- { 598, 3, 0, 64, "VCMPEZD", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, ImplicitList5, NULL, OperandInfo73 }, // Inst #598 = VCMPEZD
- { 599, 3, 0, 63, "VCMPEZS", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, ImplicitList5, NULL, OperandInfo74 }, // Inst #599 = VCMPEZS
- { 600, 4, 0, 63, "VCMPS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, ImplicitList5, NULL, OperandInfo66 }, // Inst #600 = VCMPS
- { 601, 3, 0, 64, "VCMPZD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, ImplicitList5, NULL, OperandInfo73 }, // Inst #601 = VCMPZD
- { 602, 3, 0, 63, "VCMPZS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, ImplicitList5, NULL, OperandInfo74 }, // Inst #602 = VCMPZS
- { 603, 4, 1, 7, "VCNTd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #603 = VCNTd
- { 604, 4, 1, 8, "VCNTq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #604 = VCNTq
- { 605, 4, 1, 66, "VCVTBHS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #605 = VCVTBHS
- { 606, 4, 1, 66, "VCVTBSH", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #606 = VCVTBSH
- { 607, 4, 1, 66, "VCVTDS", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo75 }, // Inst #607 = VCVTDS
- { 608, 4, 1, 69, "VCVTSD", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo76 }, // Inst #608 = VCVTSD
- { 609, 4, 1, 66, "VCVTTHS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #609 = VCVTTHS
- { 610, 4, 1, 66, "VCVTTSH", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #610 = VCVTTSH
- { 611, 4, 1, 57, "VCVTf2sd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #611 = VCVTf2sd
- { 612, 4, 1, 57, "VCVTf2sd_sfp", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo67 }, // Inst #612 = VCVTf2sd_sfp
- { 613, 4, 1, 58, "VCVTf2sq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #613 = VCVTf2sq
- { 614, 4, 1, 57, "VCVTf2ud", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #614 = VCVTf2ud
- { 615, 4, 1, 57, "VCVTf2ud_sfp", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo67 }, // Inst #615 = VCVTf2ud_sfp
- { 616, 4, 1, 58, "VCVTf2uq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #616 = VCVTf2uq
- { 617, 5, 1, 57, "VCVTf2xsd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #617 = VCVTf2xsd
- { 618, 5, 1, 58, "VCVTf2xsq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #618 = VCVTf2xsq
- { 619, 5, 1, 57, "VCVTf2xud", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #619 = VCVTf2xud
- { 620, 5, 1, 58, "VCVTf2xuq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #620 = VCVTf2xuq
- { 621, 4, 1, 57, "VCVTs2fd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #621 = VCVTs2fd
- { 622, 4, 1, 57, "VCVTs2fd_sfp", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo67 }, // Inst #622 = VCVTs2fd_sfp
- { 623, 4, 1, 58, "VCVTs2fq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #623 = VCVTs2fq
- { 624, 4, 1, 57, "VCVTu2fd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #624 = VCVTu2fd
- { 625, 4, 1, 57, "VCVTu2fd_sfp", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo67 }, // Inst #625 = VCVTu2fd_sfp
- { 626, 4, 1, 58, "VCVTu2fq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #626 = VCVTu2fq
- { 627, 5, 1, 57, "VCVTxs2fd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #627 = VCVTxs2fd
- { 628, 5, 1, 58, "VCVTxs2fq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #628 = VCVTxs2fq
- { 629, 5, 1, 57, "VCVTxu2fd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #629 = VCVTxu2fd
- { 630, 5, 1, 58, "VCVTxu2fq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #630 = VCVTxu2fq
- { 631, 5, 1, 72, "VDIVD", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #631 = VDIVD
- { 632, 5, 1, 71, "VDIVS", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo70 }, // Inst #632 = VDIVS
- { 633, 4, 1, 24, "VDUP16d", 0|(1<<TID::Predicable), 0|(3<<4)|(27<<9), NULL, NULL, NULL, OperandInfo79 }, // Inst #633 = VDUP16d
- { 634, 4, 1, 24, "VDUP16q", 0|(1<<TID::Predicable), 0|(3<<4)|(27<<9), NULL, NULL, NULL, OperandInfo80 }, // Inst #634 = VDUP16q
- { 635, 4, 1, 24, "VDUP32d", 0|(1<<TID::Predicable), 0|(3<<4)|(27<<9), NULL, NULL, NULL, OperandInfo79 }, // Inst #635 = VDUP32d
- { 636, 4, 1, 24, "VDUP32q", 0|(1<<TID::Predicable), 0|(3<<4)|(27<<9), NULL, NULL, NULL, OperandInfo80 }, // Inst #636 = VDUP32q
- { 637, 4, 1, 24, "VDUP8d", 0|(1<<TID::Predicable), 0|(3<<4)|(27<<9), NULL, NULL, NULL, OperandInfo79 }, // Inst #637 = VDUP8d
- { 638, 4, 1, 24, "VDUP8q", 0|(1<<TID::Predicable), 0|(3<<4)|(27<<9), NULL, NULL, NULL, OperandInfo80 }, // Inst #638 = VDUP8q
- { 639, 5, 1, 21, "VDUPLN16d", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #639 = VDUPLN16d
- { 640, 5, 1, 21, "VDUPLN16q", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #640 = VDUPLN16q
- { 641, 5, 1, 21, "VDUPLN32d", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #641 = VDUPLN32d
- { 642, 5, 1, 21, "VDUPLN32q", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #642 = VDUPLN32q
- { 643, 5, 1, 21, "VDUPLN8d", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #643 = VDUPLN8d
- { 644, 5, 1, 21, "VDUPLN8q", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #644 = VDUPLN8q
- { 645, 5, 1, 21, "VDUPLNfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #645 = VDUPLNfd
- { 646, 5, 1, 21, "VDUPLNfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #646 = VDUPLNfq
- { 647, 4, 1, 24, "VDUPfd", 0|(1<<TID::Predicable), 0|(3<<4)|(27<<9), NULL, NULL, NULL, OperandInfo79 }, // Inst #647 = VDUPfd
- { 648, 4, 1, 21, "VDUPfdf", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo75 }, // Inst #648 = VDUPfdf
- { 649, 4, 1, 24, "VDUPfq", 0|(1<<TID::Predicable), 0|(3<<4)|(27<<9), NULL, NULL, NULL, OperandInfo80 }, // Inst #649 = VDUPfq
- { 650, 4, 1, 21, "VDUPfqf", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo82 }, // Inst #650 = VDUPfqf
- { 651, 5, 1, 5, "VEORd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #651 = VEORd
- { 652, 5, 1, 6, "VEORq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #652 = VEORq
- { 653, 6, 1, 9, "VEXTd16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo83 }, // Inst #653 = VEXTd16
- { 654, 6, 1, 9, "VEXTd32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo83 }, // Inst #654 = VEXTd32
- { 655, 6, 1, 9, "VEXTd8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo83 }, // Inst #655 = VEXTd8
- { 656, 6, 1, 9, "VEXTdf", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo83 }, // Inst #656 = VEXTdf
- { 657, 6, 1, 10, "VEXTq16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo84 }, // Inst #657 = VEXTq16
- { 658, 6, 1, 10, "VEXTq32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo84 }, // Inst #658 = VEXTq32
- { 659, 6, 1, 10, "VEXTq8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo84 }, // Inst #659 = VEXTq8
- { 660, 6, 1, 10, "VEXTqf", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo84 }, // Inst #660 = VEXTqf
- { 661, 5, 1, 28, "VGETLNi32", 0|(1<<TID::Predicable), 0|(3<<4)|(25<<9), NULL, NULL, NULL, OperandInfo85 }, // Inst #661 = VGETLNi32
- { 662, 5, 1, 28, "VGETLNs16", 0|(1<<TID::Predicable), 0|(3<<4)|(25<<9), NULL, NULL, NULL, OperandInfo85 }, // Inst #662 = VGETLNs16
- { 663, 5, 1, 28, "VGETLNs8", 0|(1<<TID::Predicable), 0|(3<<4)|(25<<9), NULL, NULL, NULL, OperandInfo85 }, // Inst #663 = VGETLNs8
- { 664, 5, 1, 28, "VGETLNu16", 0|(1<<TID::Predicable), 0|(3<<4)|(25<<9), NULL, NULL, NULL, OperandInfo85 }, // Inst #664 = VGETLNu16
- { 665, 5, 1, 28, "VGETLNu8", 0|(1<<TID::Predicable), 0|(3<<4)|(25<<9), NULL, NULL, NULL, OperandInfo85 }, // Inst #665 = VGETLNu8
- { 666, 5, 1, 4, "VHADDsv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #666 = VHADDsv16i8
- { 667, 5, 1, 3, "VHADDsv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #667 = VHADDsv2i32
- { 668, 5, 1, 3, "VHADDsv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #668 = VHADDsv4i16
- { 669, 5, 1, 4, "VHADDsv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #669 = VHADDsv4i32
- { 670, 5, 1, 4, "VHADDsv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #670 = VHADDsv8i16
- { 671, 5, 1, 3, "VHADDsv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #671 = VHADDsv8i8
- { 672, 5, 1, 4, "VHADDuv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #672 = VHADDuv16i8
- { 673, 5, 1, 3, "VHADDuv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #673 = VHADDuv2i32
- { 674, 5, 1, 3, "VHADDuv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #674 = VHADDuv4i16
- { 675, 5, 1, 4, "VHADDuv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #675 = VHADDuv4i32
- { 676, 5, 1, 4, "VHADDuv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #676 = VHADDuv8i16
- { 677, 5, 1, 3, "VHADDuv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #677 = VHADDuv8i8
- { 678, 5, 1, 4, "VHSUBsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #678 = VHSUBsv16i8
- { 679, 5, 1, 3, "VHSUBsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #679 = VHSUBsv2i32
- { 680, 5, 1, 3, "VHSUBsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #680 = VHSUBsv4i16
- { 681, 5, 1, 4, "VHSUBsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #681 = VHSUBsv4i32
- { 682, 5, 1, 4, "VHSUBsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #682 = VHSUBsv8i16
- { 683, 5, 1, 3, "VHSUBsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #683 = VHSUBsv8i8
- { 684, 5, 1, 4, "VHSUBuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #684 = VHSUBuv16i8
- { 685, 5, 1, 3, "VHSUBuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #685 = VHSUBuv2i32
- { 686, 5, 1, 3, "VHSUBuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #686 = VHSUBuv4i16
- { 687, 5, 1, 4, "VHSUBuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #687 = VHSUBuv4i32
- { 688, 5, 1, 4, "VHSUBuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #688 = VHSUBuv8i16
- { 689, 5, 1, 3, "VHSUBuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #689 = VHSUBuv8i8
- { 690, 7, 1, 11, "VLD1d16", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo86 }, // Inst #690 = VLD1d16
- { 691, 10, 4, 11, "VLD1d16Q", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #691 = VLD1d16Q
- { 692, 9, 3, 11, "VLD1d16T", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo88 }, // Inst #692 = VLD1d16T
- { 693, 7, 1, 11, "VLD1d32", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo86 }, // Inst #693 = VLD1d32
- { 694, 10, 4, 11, "VLD1d32Q", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #694 = VLD1d32Q
- { 695, 9, 3, 11, "VLD1d32T", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo88 }, // Inst #695 = VLD1d32T
- { 696, 7, 1, 11, "VLD1d64", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo86 }, // Inst #696 = VLD1d64
- { 697, 7, 1, 11, "VLD1d8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo86 }, // Inst #697 = VLD1d8
- { 698, 10, 4, 11, "VLD1d8Q", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #698 = VLD1d8Q
- { 699, 9, 3, 11, "VLD1d8T", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo88 }, // Inst #699 = VLD1d8T
- { 700, 7, 1, 11, "VLD1df", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo86 }, // Inst #700 = VLD1df
- { 701, 7, 1, 11, "VLD1q16", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo89 }, // Inst #701 = VLD1q16
- { 702, 7, 1, 11, "VLD1q32", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo89 }, // Inst #702 = VLD1q32
- { 703, 7, 1, 11, "VLD1q64", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo89 }, // Inst #703 = VLD1q64
- { 704, 7, 1, 11, "VLD1q8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo89 }, // Inst #704 = VLD1q8
- { 705, 7, 1, 11, "VLD1qf", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo89 }, // Inst #705 = VLD1qf
- { 706, 11, 2, 12, "VLD2LNd16", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo90 }, // Inst #706 = VLD2LNd16
- { 707, 11, 2, 12, "VLD2LNd32", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo90 }, // Inst #707 = VLD2LNd32
- { 708, 11, 2, 12, "VLD2LNd8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo90 }, // Inst #708 = VLD2LNd8
- { 709, 11, 2, 12, "VLD2LNq16a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo90 }, // Inst #709 = VLD2LNq16a
- { 710, 11, 2, 12, "VLD2LNq16b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo90 }, // Inst #710 = VLD2LNq16b
- { 711, 11, 2, 12, "VLD2LNq32a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo90 }, // Inst #711 = VLD2LNq32a
- { 712, 11, 2, 12, "VLD2LNq32b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo90 }, // Inst #712 = VLD2LNq32b
- { 713, 8, 2, 12, "VLD2d16", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo91 }, // Inst #713 = VLD2d16
- { 714, 8, 2, 12, "VLD2d16D", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo91 }, // Inst #714 = VLD2d16D
- { 715, 8, 2, 12, "VLD2d32", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo91 }, // Inst #715 = VLD2d32
- { 716, 8, 2, 12, "VLD2d32D", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo91 }, // Inst #716 = VLD2d32D
- { 717, 8, 2, 11, "VLD2d64", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo91 }, // Inst #717 = VLD2d64
- { 718, 8, 2, 12, "VLD2d8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo91 }, // Inst #718 = VLD2d8
- { 719, 8, 2, 12, "VLD2d8D", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo91 }, // Inst #719 = VLD2d8D
- { 720, 10, 4, 12, "VLD2q16", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #720 = VLD2q16
- { 721, 10, 4, 12, "VLD2q32", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #721 = VLD2q32
- { 722, 10, 4, 12, "VLD2q8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #722 = VLD2q8
- { 723, 13, 3, 13, "VLD3LNd16", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo92 }, // Inst #723 = VLD3LNd16
- { 724, 13, 3, 13, "VLD3LNd32", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo92 }, // Inst #724 = VLD3LNd32
- { 725, 13, 3, 13, "VLD3LNd8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo92 }, // Inst #725 = VLD3LNd8
- { 726, 13, 3, 13, "VLD3LNq16a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo92 }, // Inst #726 = VLD3LNq16a
- { 727, 13, 3, 13, "VLD3LNq16b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo92 }, // Inst #727 = VLD3LNq16b
- { 728, 13, 3, 13, "VLD3LNq32a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo92 }, // Inst #728 = VLD3LNq32a
- { 729, 13, 3, 13, "VLD3LNq32b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo92 }, // Inst #729 = VLD3LNq32b
- { 730, 9, 3, 13, "VLD3d16", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo88 }, // Inst #730 = VLD3d16
- { 731, 9, 3, 13, "VLD3d32", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo88 }, // Inst #731 = VLD3d32
- { 732, 9, 3, 11, "VLD3d64", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo88 }, // Inst #732 = VLD3d64
- { 733, 9, 3, 13, "VLD3d8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo88 }, // Inst #733 = VLD3d8
- { 734, 10, 4, 13, "VLD3q16a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo93 }, // Inst #734 = VLD3q16a
- { 735, 10, 4, 13, "VLD3q16b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo93 }, // Inst #735 = VLD3q16b
- { 736, 10, 4, 13, "VLD3q32a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo93 }, // Inst #736 = VLD3q32a
- { 737, 10, 4, 13, "VLD3q32b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo93 }, // Inst #737 = VLD3q32b
- { 738, 10, 4, 13, "VLD3q8a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo93 }, // Inst #738 = VLD3q8a
- { 739, 10, 4, 13, "VLD3q8b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo93 }, // Inst #739 = VLD3q8b
- { 740, 15, 4, 14, "VLD4LNd16", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo94 }, // Inst #740 = VLD4LNd16
- { 741, 15, 4, 14, "VLD4LNd32", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo94 }, // Inst #741 = VLD4LNd32
- { 742, 15, 4, 14, "VLD4LNd8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo94 }, // Inst #742 = VLD4LNd8
- { 743, 15, 4, 14, "VLD4LNq16a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo94 }, // Inst #743 = VLD4LNq16a
- { 744, 15, 4, 14, "VLD4LNq16b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo94 }, // Inst #744 = VLD4LNq16b
- { 745, 15, 4, 14, "VLD4LNq32a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo94 }, // Inst #745 = VLD4LNq32a
- { 746, 15, 4, 14, "VLD4LNq32b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo94 }, // Inst #746 = VLD4LNq32b
- { 747, 10, 4, 14, "VLD4d16", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #747 = VLD4d16
- { 748, 10, 4, 14, "VLD4d32", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #748 = VLD4d32
- { 749, 10, 4, 11, "VLD4d64", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #749 = VLD4d64
- { 750, 10, 4, 14, "VLD4d8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo87 }, // Inst #750 = VLD4d8
- { 751, 11, 5, 14, "VLD4q16a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo95 }, // Inst #751 = VLD4q16a
- { 752, 11, 5, 14, "VLD4q16b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo95 }, // Inst #752 = VLD4q16b
- { 753, 11, 5, 14, "VLD4q32a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo95 }, // Inst #753 = VLD4q32a
- { 754, 11, 5, 14, "VLD4q32b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo95 }, // Inst #754 = VLD4q32b
- { 755, 11, 5, 14, "VLD4q8a", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo95 }, // Inst #755 = VLD4q8a
- { 756, 11, 5, 14, "VLD4q8b", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo95 }, // Inst #756 = VLD4q8b
- { 757, 5, 0, 75, "VLDMD", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|5|(3<<4)|(21<<9)|(3<<17), NULL, NULL, NULL, OperandInfo35 }, // Inst #757 = VLDMD
- { 758, 5, 0, 75, "VLDMS", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|5|(3<<4)|(21<<9)|(1<<17), NULL, NULL, NULL, OperandInfo35 }, // Inst #758 = VLDMS
- { 759, 5, 1, 74, "VLDRD", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|5|(3<<4)|(20<<9)|(3<<17), NULL, NULL, NULL, OperandInfo96 }, // Inst #759 = VLDRD
- { 760, 5, 1, 75, "VLDRQ", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|4|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo97 }, // Inst #760 = VLDRQ
- { 761, 5, 1, 73, "VLDRS", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|5|(3<<4)|(20<<9)|(1<<17), NULL, NULL, NULL, OperandInfo98 }, // Inst #761 = VLDRS
- { 762, 5, 1, 1, "VMAXfd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #762 = VMAXfd
- { 763, 5, 1, 1, "VMAXfd_sfp", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo72 }, // Inst #763 = VMAXfd_sfp
- { 764, 5, 1, 2, "VMAXfq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #764 = VMAXfq
- { 765, 5, 1, 4, "VMAXsv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #765 = VMAXsv16i8
- { 766, 5, 1, 3, "VMAXsv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #766 = VMAXsv2i32
- { 767, 5, 1, 3, "VMAXsv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #767 = VMAXsv4i16
- { 768, 5, 1, 4, "VMAXsv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #768 = VMAXsv4i32
- { 769, 5, 1, 4, "VMAXsv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #769 = VMAXsv8i16
- { 770, 5, 1, 3, "VMAXsv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #770 = VMAXsv8i8
- { 771, 5, 1, 4, "VMAXuv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #771 = VMAXuv16i8
- { 772, 5, 1, 3, "VMAXuv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #772 = VMAXuv2i32
- { 773, 5, 1, 3, "VMAXuv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #773 = VMAXuv4i16
- { 774, 5, 1, 4, "VMAXuv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #774 = VMAXuv4i32
- { 775, 5, 1, 4, "VMAXuv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #775 = VMAXuv8i16
- { 776, 5, 1, 3, "VMAXuv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #776 = VMAXuv8i8
- { 777, 5, 1, 1, "VMINfd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #777 = VMINfd
- { 778, 5, 1, 1, "VMINfd_sfp", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo72 }, // Inst #778 = VMINfd_sfp
- { 779, 5, 1, 2, "VMINfq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #779 = VMINfq
- { 780, 5, 1, 4, "VMINsv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #780 = VMINsv16i8
- { 781, 5, 1, 3, "VMINsv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #781 = VMINsv2i32
- { 782, 5, 1, 3, "VMINsv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #782 = VMINsv4i16
- { 783, 5, 1, 4, "VMINsv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #783 = VMINsv4i32
- { 784, 5, 1, 4, "VMINsv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #784 = VMINsv8i16
- { 785, 5, 1, 3, "VMINsv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #785 = VMINsv8i8
- { 786, 5, 1, 4, "VMINuv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #786 = VMINuv16i8
- { 787, 5, 1, 3, "VMINuv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #787 = VMINuv2i32
- { 788, 5, 1, 3, "VMINuv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #788 = VMINuv4i16
- { 789, 5, 1, 4, "VMINuv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #789 = VMINuv4i32
- { 790, 5, 1, 4, "VMINuv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #790 = VMINuv8i16
- { 791, 5, 1, 3, "VMINuv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #791 = VMINuv8i8
- { 792, 6, 1, 77, "VMLAD", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #792 = VMLAD
- { 793, 7, 1, 19, "VMLALslsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo99 }, // Inst #793 = VMLALslsv2i32
- { 794, 7, 1, 17, "VMLALslsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo100 }, // Inst #794 = VMLALslsv4i16
- { 795, 7, 1, 19, "VMLALsluv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo99 }, // Inst #795 = VMLALsluv2i32
- { 796, 7, 1, 17, "VMLALsluv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo100 }, // Inst #796 = VMLALsluv4i16
- { 797, 6, 1, 17, "VMLALsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #797 = VMLALsv2i64
- { 798, 6, 1, 17, "VMLALsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #798 = VMLALsv4i32
- { 799, 6, 1, 17, "VMLALsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #799 = VMLALsv8i16
- { 800, 6, 1, 17, "VMLALuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #800 = VMLALuv2i64
- { 801, 6, 1, 17, "VMLALuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #801 = VMLALuv4i32
- { 802, 6, 1, 17, "VMLALuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #802 = VMLALuv8i16
- { 803, 6, 1, 76, "VMLAS", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo101 }, // Inst #803 = VMLAS
- { 804, 6, 1, 15, "VMLAfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #804 = VMLAfd
- { 805, 6, 1, 16, "VMLAfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #805 = VMLAfq
- { 806, 7, 1, 15, "VMLAslfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo102 }, // Inst #806 = VMLAslfd
- { 807, 7, 1, 16, "VMLAslfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo103 }, // Inst #807 = VMLAslfq
- { 808, 7, 1, 19, "VMLAslv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo102 }, // Inst #808 = VMLAslv2i32
- { 809, 7, 1, 17, "VMLAslv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo104 }, // Inst #809 = VMLAslv4i16
- { 810, 7, 1, 20, "VMLAslv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo103 }, // Inst #810 = VMLAslv4i32
- { 811, 7, 1, 18, "VMLAslv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo105 }, // Inst #811 = VMLAslv8i16
- { 812, 6, 1, 18, "VMLAv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #812 = VMLAv16i8
- { 813, 6, 1, 19, "VMLAv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #813 = VMLAv2i32
- { 814, 6, 1, 17, "VMLAv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #814 = VMLAv4i16
- { 815, 6, 1, 20, "VMLAv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #815 = VMLAv4i32
- { 816, 6, 1, 18, "VMLAv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #816 = VMLAv8i16
- { 817, 6, 1, 17, "VMLAv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #817 = VMLAv8i8
- { 818, 6, 1, 77, "VMLSD", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #818 = VMLSD
- { 819, 7, 1, 19, "VMLSLslsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo99 }, // Inst #819 = VMLSLslsv2i32
- { 820, 7, 1, 17, "VMLSLslsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo100 }, // Inst #820 = VMLSLslsv4i16
- { 821, 7, 1, 19, "VMLSLsluv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo99 }, // Inst #821 = VMLSLsluv2i32
- { 822, 7, 1, 17, "VMLSLsluv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo100 }, // Inst #822 = VMLSLsluv4i16
- { 823, 6, 1, 17, "VMLSLsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #823 = VMLSLsv2i64
- { 824, 6, 1, 17, "VMLSLsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #824 = VMLSLsv4i32
- { 825, 6, 1, 17, "VMLSLsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #825 = VMLSLsv8i16
- { 826, 6, 1, 17, "VMLSLuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #826 = VMLSLuv2i64
- { 827, 6, 1, 17, "VMLSLuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #827 = VMLSLuv4i32
- { 828, 6, 1, 17, "VMLSLuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #828 = VMLSLuv8i16
- { 829, 6, 1, 76, "VMLSS", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo101 }, // Inst #829 = VMLSS
- { 830, 6, 1, 15, "VMLSfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #830 = VMLSfd
- { 831, 6, 1, 16, "VMLSfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #831 = VMLSfq
- { 832, 7, 1, 15, "VMLSslfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo102 }, // Inst #832 = VMLSslfd
- { 833, 7, 1, 16, "VMLSslfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo103 }, // Inst #833 = VMLSslfq
- { 834, 7, 1, 19, "VMLSslv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo102 }, // Inst #834 = VMLSslv2i32
- { 835, 7, 1, 17, "VMLSslv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo104 }, // Inst #835 = VMLSslv4i16
- { 836, 7, 1, 20, "VMLSslv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo103 }, // Inst #836 = VMLSslv4i32
- { 837, 7, 1, 18, "VMLSslv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo105 }, // Inst #837 = VMLSslv8i16
- { 838, 6, 1, 18, "VMLSv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #838 = VMLSv16i8
- { 839, 6, 1, 19, "VMLSv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #839 = VMLSv2i32
- { 840, 6, 1, 17, "VMLSv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #840 = VMLSv4i16
- { 841, 6, 1, 20, "VMLSv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #841 = VMLSv4i32
- { 842, 6, 1, 18, "VMLSv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo60 }, // Inst #842 = VMLSv8i16
- { 843, 6, 1, 17, "VMLSv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #843 = VMLSv8i8
- { 844, 4, 1, 87, "VMOVD", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #844 = VMOVD
- { 845, 5, 1, 23, "VMOVDRR", 0|(1<<TID::Predicable), 0|(3<<4)|(19<<9)|(1<<17), NULL, NULL, NULL, OperandInfo106 }, // Inst #845 = VMOVDRR
- { 846, 5, 1, 87, "VMOVDcc", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo107 }, // Inst #846 = VMOVDcc
- { 847, 4, 1, 21, "VMOVDneon", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #847 = VMOVDneon
- { 848, 4, 1, 38, "VMOVLsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo108 }, // Inst #848 = VMOVLsv2i64
- { 849, 4, 1, 38, "VMOVLsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo108 }, // Inst #849 = VMOVLsv4i32
- { 850, 4, 1, 38, "VMOVLsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo108 }, // Inst #850 = VMOVLsv8i16
- { 851, 4, 1, 38, "VMOVLuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo108 }, // Inst #851 = VMOVLuv2i64
- { 852, 4, 1, 38, "VMOVLuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo108 }, // Inst #852 = VMOVLuv4i32
- { 853, 4, 1, 38, "VMOVLuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo108 }, // Inst #853 = VMOVLuv8i16
- { 854, 4, 1, 21, "VMOVNv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #854 = VMOVNv2i32
- { 855, 4, 1, 21, "VMOVNv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #855 = VMOVNv4i16
- { 856, 4, 1, 21, "VMOVNv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #856 = VMOVNv8i8
- { 857, 4, 1, 21, "VMOVQ", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #857 = VMOVQ
- { 858, 5, 2, 22, "VMOVRRD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(17<<9)|(1<<17), NULL, NULL, NULL, OperandInfo110 }, // Inst #858 = VMOVRRD
- { 859, 6, 2, 22, "VMOVRRS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(17<<9)|(1<<17), NULL, NULL, NULL, OperandInfo111 }, // Inst #859 = VMOVRRS
- { 860, 4, 1, 28, "VMOVRS", 0|(1<<TID::Predicable), 0|(3<<4)|(16<<9)|(1<<17), NULL, NULL, NULL, OperandInfo112 }, // Inst #860 = VMOVRS
- { 861, 4, 1, 86, "VMOVS", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #861 = VMOVS
- { 862, 4, 1, 24, "VMOVSR", 0|(1<<TID::Predicable), 0|(3<<4)|(18<<9)|(1<<17), NULL, NULL, NULL, OperandInfo113 }, // Inst #862 = VMOVSR
- { 863, 6, 2, 23, "VMOVSRR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(19<<9)|(1<<17), NULL, NULL, NULL, OperandInfo114 }, // Inst #863 = VMOVSRR
- { 864, 5, 1, 86, "VMOVScc", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo115 }, // Inst #864 = VMOVScc
- { 865, 4, 1, 26, "VMOVv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo116 }, // Inst #865 = VMOVv16i8
- { 866, 4, 1, 26, "VMOVv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo29 }, // Inst #866 = VMOVv1i64
- { 867, 4, 1, 26, "VMOVv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo29 }, // Inst #867 = VMOVv2i32
- { 868, 4, 1, 26, "VMOVv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo116 }, // Inst #868 = VMOVv2i64
- { 869, 4, 1, 26, "VMOVv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo29 }, // Inst #869 = VMOVv4i16
- { 870, 4, 1, 26, "VMOVv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo116 }, // Inst #870 = VMOVv4i32
- { 871, 4, 1, 26, "VMOVv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo116 }, // Inst #871 = VMOVv8i16
- { 872, 4, 1, 26, "VMOVv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo29 }, // Inst #872 = VMOVv8i8
- { 873, 3, 1, 82, "VMRS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(22<<9)|(1<<17), ImplicitList5, NULL, NULL, OperandInfo21 }, // Inst #873 = VMRS
- { 874, 3, 0, 82, "VMSR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(22<<9)|(1<<17), NULL, ImplicitList5, NULL, OperandInfo21 }, // Inst #874 = VMSR
- { 875, 5, 1, 79, "VMULD", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #875 = VMULD
- { 876, 5, 1, 29, "VMULLp", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #876 = VMULLp
- { 877, 6, 1, 29, "VMULLslsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo117 }, // Inst #877 = VMULLslsv2i32
- { 878, 6, 1, 29, "VMULLslsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo118 }, // Inst #878 = VMULLslsv4i16
- { 879, 6, 1, 29, "VMULLsluv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo117 }, // Inst #879 = VMULLsluv2i32
- { 880, 6, 1, 29, "VMULLsluv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo118 }, // Inst #880 = VMULLsluv4i16
- { 881, 5, 1, 29, "VMULLsv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #881 = VMULLsv2i64
- { 882, 5, 1, 29, "VMULLsv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #882 = VMULLsv4i32
- { 883, 5, 1, 29, "VMULLsv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #883 = VMULLsv8i16
- { 884, 5, 1, 29, "VMULLuv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #884 = VMULLuv2i64
- { 885, 5, 1, 29, "VMULLuv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #885 = VMULLuv4i32
- { 886, 5, 1, 29, "VMULLuv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #886 = VMULLuv8i16
- { 887, 5, 1, 78, "VMULS", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo70 }, // Inst #887 = VMULS
- { 888, 5, 1, 1, "VMULfd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #888 = VMULfd
- { 889, 5, 1, 1, "VMULfd_sfp", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo72 }, // Inst #889 = VMULfd_sfp
- { 890, 5, 1, 2, "VMULfq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #890 = VMULfq
- { 891, 5, 1, 29, "VMULpd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #891 = VMULpd
- { 892, 5, 1, 30, "VMULpq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #892 = VMULpq
- { 893, 6, 1, 1, "VMULslfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo119 }, // Inst #893 = VMULslfd
- { 894, 6, 1, 2, "VMULslfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo120 }, // Inst #894 = VMULslfq
- { 895, 6, 1, 31, "VMULslv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo119 }, // Inst #895 = VMULslv2i32
- { 896, 6, 1, 29, "VMULslv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo121 }, // Inst #896 = VMULslv4i16
- { 897, 6, 1, 32, "VMULslv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo120 }, // Inst #897 = VMULslv4i32
- { 898, 6, 1, 30, "VMULslv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo122 }, // Inst #898 = VMULslv8i16
- { 899, 5, 1, 30, "VMULv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #899 = VMULv16i8
- { 900, 5, 1, 31, "VMULv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #900 = VMULv2i32
- { 901, 5, 1, 29, "VMULv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #901 = VMULv4i16
- { 902, 5, 1, 32, "VMULv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #902 = VMULv4i32
- { 903, 5, 1, 30, "VMULv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #903 = VMULv8i16
- { 904, 5, 1, 29, "VMULv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #904 = VMULv8i8
- { 905, 4, 1, 44, "VMVNd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #905 = VMVNd
- { 906, 4, 1, 44, "VMVNq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #906 = VMVNq
- { 907, 4, 1, 87, "VNEGD", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #907 = VNEGD
- { 908, 5, 1, 87, "VNEGDcc", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo107 }, // Inst #908 = VNEGDcc
- { 909, 4, 1, 86, "VNEGS", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #909 = VNEGS
- { 910, 5, 1, 86, "VNEGScc", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo115 }, // Inst #910 = VNEGScc
- { 911, 4, 1, 58, "VNEGf32q", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #911 = VNEGf32q
- { 912, 4, 1, 57, "VNEGfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #912 = VNEGfd
- { 913, 4, 1, 57, "VNEGfd_sfp", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo67 }, // Inst #913 = VNEGfd_sfp
- { 914, 4, 1, 44, "VNEGs16d", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #914 = VNEGs16d
- { 915, 4, 1, 44, "VNEGs16q", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #915 = VNEGs16q
- { 916, 4, 1, 44, "VNEGs32d", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #916 = VNEGs32d
- { 917, 4, 1, 44, "VNEGs32q", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #917 = VNEGs32q
- { 918, 4, 1, 44, "VNEGs8d", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #918 = VNEGs8d
- { 919, 4, 1, 44, "VNEGs8q", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #919 = VNEGs8q
- { 920, 6, 1, 77, "VNMLAD", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #920 = VNMLAD
- { 921, 6, 1, 76, "VNMLAS", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo101 }, // Inst #921 = VNMLAS
- { 922, 6, 1, 77, "VNMLSD", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #922 = VNMLSD
- { 923, 6, 1, 76, "VNMLSS", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo101 }, // Inst #923 = VNMLSS
- { 924, 5, 1, 79, "VNMULD", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #924 = VNMULD
- { 925, 5, 1, 78, "VNMULS", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo70 }, // Inst #925 = VNMULS
- { 926, 5, 1, 5, "VORNd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #926 = VORNd
- { 927, 5, 1, 6, "VORNq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #927 = VORNq
- { 928, 5, 1, 5, "VORRd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #928 = VORRd
- { 929, 5, 1, 6, "VORRq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #929 = VORRq
- { 930, 5, 1, 34, "VPADALsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo123 }, // Inst #930 = VPADALsv16i8
- { 931, 5, 1, 33, "VPADALsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo107 }, // Inst #931 = VPADALsv2i32
- { 932, 5, 1, 33, "VPADALsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo107 }, // Inst #932 = VPADALsv4i16
- { 933, 5, 1, 34, "VPADALsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo123 }, // Inst #933 = VPADALsv4i32
- { 934, 5, 1, 34, "VPADALsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo123 }, // Inst #934 = VPADALsv8i16
- { 935, 5, 1, 33, "VPADALsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo107 }, // Inst #935 = VPADALsv8i8
- { 936, 5, 1, 34, "VPADALuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo123 }, // Inst #936 = VPADALuv16i8
- { 937, 5, 1, 33, "VPADALuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo107 }, // Inst #937 = VPADALuv2i32
- { 938, 5, 1, 33, "VPADALuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo107 }, // Inst #938 = VPADALuv4i16
- { 939, 5, 1, 34, "VPADALuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo123 }, // Inst #939 = VPADALuv4i32
- { 940, 5, 1, 34, "VPADALuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo123 }, // Inst #940 = VPADALuv8i16
- { 941, 5, 1, 33, "VPADALuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo107 }, // Inst #941 = VPADALuv8i8
- { 942, 4, 1, 44, "VPADDLsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #942 = VPADDLsv16i8
- { 943, 4, 1, 44, "VPADDLsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #943 = VPADDLsv2i32
- { 944, 4, 1, 44, "VPADDLsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #944 = VPADDLsv4i16
- { 945, 4, 1, 44, "VPADDLsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #945 = VPADDLsv4i32
- { 946, 4, 1, 44, "VPADDLsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #946 = VPADDLsv8i16
- { 947, 4, 1, 44, "VPADDLsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #947 = VPADDLsv8i8
- { 948, 4, 1, 44, "VPADDLuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #948 = VPADDLuv16i8
- { 949, 4, 1, 44, "VPADDLuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #949 = VPADDLuv2i32
- { 950, 4, 1, 44, "VPADDLuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #950 = VPADDLuv4i16
- { 951, 4, 1, 44, "VPADDLuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #951 = VPADDLuv4i32
- { 952, 4, 1, 44, "VPADDLuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #952 = VPADDLuv8i16
- { 953, 4, 1, 44, "VPADDLuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #953 = VPADDLuv8i8
- { 954, 5, 1, 1, "VPADDf", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #954 = VPADDf
- { 955, 5, 1, 5, "VPADDi16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #955 = VPADDi16
- { 956, 5, 1, 5, "VPADDi32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #956 = VPADDi32
- { 957, 5, 1, 5, "VPADDi8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #957 = VPADDi8
- { 958, 5, 1, 3, "VPMAXf", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #958 = VPMAXf
- { 959, 5, 1, 3, "VPMAXs16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #959 = VPMAXs16
- { 960, 5, 1, 3, "VPMAXs32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #960 = VPMAXs32
- { 961, 5, 1, 3, "VPMAXs8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #961 = VPMAXs8
- { 962, 5, 1, 3, "VPMAXu16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #962 = VPMAXu16
- { 963, 5, 1, 3, "VPMAXu32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #963 = VPMAXu32
- { 964, 5, 1, 3, "VPMAXu8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #964 = VPMAXu8
- { 965, 5, 1, 3, "VPMINf", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #965 = VPMINf
- { 966, 5, 1, 3, "VPMINs16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #966 = VPMINs16
- { 967, 5, 1, 3, "VPMINs32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #967 = VPMINs32
- { 968, 5, 1, 3, "VPMINs8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #968 = VPMINs8
- { 969, 5, 1, 3, "VPMINu16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #969 = VPMINu16
- { 970, 5, 1, 3, "VPMINu32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #970 = VPMINu32
- { 971, 5, 1, 3, "VPMINu8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #971 = VPMINu8
- { 972, 4, 1, 39, "VQABSv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #972 = VQABSv16i8
- { 973, 4, 1, 38, "VQABSv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #973 = VQABSv2i32
- { 974, 4, 1, 38, "VQABSv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #974 = VQABSv4i16
- { 975, 4, 1, 39, "VQABSv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #975 = VQABSv4i32
- { 976, 4, 1, 39, "VQABSv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #976 = VQABSv8i16
- { 977, 4, 1, 38, "VQABSv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #977 = VQABSv8i8
- { 978, 5, 1, 4, "VQADDsv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #978 = VQADDsv16i8
- { 979, 5, 1, 3, "VQADDsv1i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #979 = VQADDsv1i64
- { 980, 5, 1, 3, "VQADDsv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #980 = VQADDsv2i32
- { 981, 5, 1, 4, "VQADDsv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #981 = VQADDsv2i64
- { 982, 5, 1, 3, "VQADDsv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #982 = VQADDsv4i16
- { 983, 5, 1, 4, "VQADDsv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #983 = VQADDsv4i32
- { 984, 5, 1, 4, "VQADDsv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #984 = VQADDsv8i16
- { 985, 5, 1, 3, "VQADDsv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #985 = VQADDsv8i8
- { 986, 5, 1, 4, "VQADDuv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #986 = VQADDuv16i8
- { 987, 5, 1, 3, "VQADDuv1i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #987 = VQADDuv1i64
- { 988, 5, 1, 3, "VQADDuv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #988 = VQADDuv2i32
- { 989, 5, 1, 4, "VQADDuv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #989 = VQADDuv2i64
- { 990, 5, 1, 3, "VQADDuv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #990 = VQADDuv4i16
- { 991, 5, 1, 4, "VQADDuv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #991 = VQADDuv4i32
- { 992, 5, 1, 4, "VQADDuv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #992 = VQADDuv8i16
- { 993, 5, 1, 3, "VQADDuv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #993 = VQADDuv8i8
- { 994, 7, 1, 19, "VQDMLALslv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo99 }, // Inst #994 = VQDMLALslv2i32
- { 995, 7, 1, 17, "VQDMLALslv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo100 }, // Inst #995 = VQDMLALslv4i16
- { 996, 6, 1, 17, "VQDMLALv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #996 = VQDMLALv2i64
- { 997, 6, 1, 17, "VQDMLALv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #997 = VQDMLALv4i32
- { 998, 7, 1, 19, "VQDMLSLslv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo99 }, // Inst #998 = VQDMLSLslv2i32
- { 999, 7, 1, 17, "VQDMLSLslv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo100 }, // Inst #999 = VQDMLSLslv4i16
- { 1000, 6, 1, 17, "VQDMLSLv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #1000 = VQDMLSLv2i64
- { 1001, 6, 1, 17, "VQDMLSLv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo59 }, // Inst #1001 = VQDMLSLv4i32
- { 1002, 6, 1, 31, "VQDMULHslv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo119 }, // Inst #1002 = VQDMULHslv2i32
- { 1003, 6, 1, 29, "VQDMULHslv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo121 }, // Inst #1003 = VQDMULHslv4i16
- { 1004, 6, 1, 32, "VQDMULHslv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo120 }, // Inst #1004 = VQDMULHslv4i32
- { 1005, 6, 1, 30, "VQDMULHslv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo122 }, // Inst #1005 = VQDMULHslv8i16
- { 1006, 5, 1, 31, "VQDMULHv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1006 = VQDMULHv2i32
- { 1007, 5, 1, 29, "VQDMULHv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1007 = VQDMULHv4i16
- { 1008, 5, 1, 32, "VQDMULHv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1008 = VQDMULHv4i32
- { 1009, 5, 1, 30, "VQDMULHv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1009 = VQDMULHv8i16
- { 1010, 6, 1, 29, "VQDMULLslv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo117 }, // Inst #1010 = VQDMULLslv2i32
- { 1011, 6, 1, 29, "VQDMULLslv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo118 }, // Inst #1011 = VQDMULLslv4i16
- { 1012, 5, 1, 29, "VQDMULLv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #1012 = VQDMULLv2i64
- { 1013, 5, 1, 29, "VQDMULLv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #1013 = VQDMULLv4i32
- { 1014, 4, 1, 38, "VQMOVNsuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #1014 = VQMOVNsuv2i32
- { 1015, 4, 1, 38, "VQMOVNsuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #1015 = VQMOVNsuv4i16
- { 1016, 4, 1, 38, "VQMOVNsuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #1016 = VQMOVNsuv8i8
- { 1017, 4, 1, 38, "VQMOVNsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #1017 = VQMOVNsv2i32
- { 1018, 4, 1, 38, "VQMOVNsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #1018 = VQMOVNsv4i16
- { 1019, 4, 1, 38, "VQMOVNsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #1019 = VQMOVNsv8i8
- { 1020, 4, 1, 38, "VQMOVNuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #1020 = VQMOVNuv2i32
- { 1021, 4, 1, 38, "VQMOVNuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #1021 = VQMOVNuv4i16
- { 1022, 4, 1, 38, "VQMOVNuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo109 }, // Inst #1022 = VQMOVNuv8i8
- { 1023, 4, 1, 39, "VQNEGv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1023 = VQNEGv16i8
- { 1024, 4, 1, 38, "VQNEGv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1024 = VQNEGv2i32
- { 1025, 4, 1, 38, "VQNEGv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1025 = VQNEGv4i16
- { 1026, 4, 1, 39, "VQNEGv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1026 = VQNEGv4i32
- { 1027, 4, 1, 39, "VQNEGv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1027 = VQNEGv8i16
- { 1028, 4, 1, 38, "VQNEGv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1028 = VQNEGv8i8
- { 1029, 6, 1, 31, "VQRDMULHslv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo119 }, // Inst #1029 = VQRDMULHslv2i32
- { 1030, 6, 1, 29, "VQRDMULHslv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo121 }, // Inst #1030 = VQRDMULHslv4i16
- { 1031, 6, 1, 32, "VQRDMULHslv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo120 }, // Inst #1031 = VQRDMULHslv4i32
- { 1032, 6, 1, 30, "VQRDMULHslv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo122 }, // Inst #1032 = VQRDMULHslv8i16
- { 1033, 5, 1, 31, "VQRDMULHv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1033 = VQRDMULHv2i32
- { 1034, 5, 1, 29, "VQRDMULHv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1034 = VQRDMULHv4i16
- { 1035, 5, 1, 32, "VQRDMULHv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1035 = VQRDMULHv4i32
- { 1036, 5, 1, 30, "VQRDMULHv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1036 = VQRDMULHv8i16
- { 1037, 5, 1, 43, "VQRSHLsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1037 = VQRSHLsv16i8
- { 1038, 5, 1, 42, "VQRSHLsv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1038 = VQRSHLsv1i64
- { 1039, 5, 1, 42, "VQRSHLsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1039 = VQRSHLsv2i32
- { 1040, 5, 1, 43, "VQRSHLsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1040 = VQRSHLsv2i64
- { 1041, 5, 1, 42, "VQRSHLsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1041 = VQRSHLsv4i16
- { 1042, 5, 1, 43, "VQRSHLsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1042 = VQRSHLsv4i32
- { 1043, 5, 1, 43, "VQRSHLsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1043 = VQRSHLsv8i16
- { 1044, 5, 1, 42, "VQRSHLsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1044 = VQRSHLsv8i8
- { 1045, 5, 1, 43, "VQRSHLuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1045 = VQRSHLuv16i8
- { 1046, 5, 1, 42, "VQRSHLuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1046 = VQRSHLuv1i64
- { 1047, 5, 1, 42, "VQRSHLuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1047 = VQRSHLuv2i32
- { 1048, 5, 1, 43, "VQRSHLuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1048 = VQRSHLuv2i64
- { 1049, 5, 1, 42, "VQRSHLuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1049 = VQRSHLuv4i16
- { 1050, 5, 1, 43, "VQRSHLuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1050 = VQRSHLuv4i32
- { 1051, 5, 1, 43, "VQRSHLuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1051 = VQRSHLuv8i16
- { 1052, 5, 1, 42, "VQRSHLuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1052 = VQRSHLuv8i8
- { 1053, 5, 1, 42, "VQRSHRNsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1053 = VQRSHRNsv2i32
- { 1054, 5, 1, 42, "VQRSHRNsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1054 = VQRSHRNsv4i16
- { 1055, 5, 1, 42, "VQRSHRNsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1055 = VQRSHRNsv8i8
- { 1056, 5, 1, 42, "VQRSHRNuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1056 = VQRSHRNuv2i32
- { 1057, 5, 1, 42, "VQRSHRNuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1057 = VQRSHRNuv4i16
- { 1058, 5, 1, 42, "VQRSHRNuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1058 = VQRSHRNuv8i8
- { 1059, 5, 1, 42, "VQRSHRUNv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1059 = VQRSHRUNv2i32
- { 1060, 5, 1, 42, "VQRSHRUNv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1060 = VQRSHRUNv4i16
- { 1061, 5, 1, 42, "VQRSHRUNv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1061 = VQRSHRUNv8i8
- { 1062, 5, 1, 42, "VQSHLsiv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1062 = VQSHLsiv16i8
- { 1063, 5, 1, 42, "VQSHLsiv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1063 = VQSHLsiv1i64
- { 1064, 5, 1, 42, "VQSHLsiv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1064 = VQSHLsiv2i32
- { 1065, 5, 1, 42, "VQSHLsiv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1065 = VQSHLsiv2i64
- { 1066, 5, 1, 42, "VQSHLsiv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1066 = VQSHLsiv4i16
- { 1067, 5, 1, 42, "VQSHLsiv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1067 = VQSHLsiv4i32
- { 1068, 5, 1, 42, "VQSHLsiv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1068 = VQSHLsiv8i16
- { 1069, 5, 1, 42, "VQSHLsiv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1069 = VQSHLsiv8i8
- { 1070, 5, 1, 42, "VQSHLsuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1070 = VQSHLsuv16i8
- { 1071, 5, 1, 42, "VQSHLsuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1071 = VQSHLsuv1i64
- { 1072, 5, 1, 42, "VQSHLsuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1072 = VQSHLsuv2i32
- { 1073, 5, 1, 42, "VQSHLsuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1073 = VQSHLsuv2i64
- { 1074, 5, 1, 42, "VQSHLsuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1074 = VQSHLsuv4i16
- { 1075, 5, 1, 42, "VQSHLsuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1075 = VQSHLsuv4i32
- { 1076, 5, 1, 42, "VQSHLsuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1076 = VQSHLsuv8i16
- { 1077, 5, 1, 42, "VQSHLsuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1077 = VQSHLsuv8i8
- { 1078, 5, 1, 43, "VQSHLsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1078 = VQSHLsv16i8
- { 1079, 5, 1, 42, "VQSHLsv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1079 = VQSHLsv1i64
- { 1080, 5, 1, 42, "VQSHLsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1080 = VQSHLsv2i32
- { 1081, 5, 1, 43, "VQSHLsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1081 = VQSHLsv2i64
- { 1082, 5, 1, 42, "VQSHLsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1082 = VQSHLsv4i16
- { 1083, 5, 1, 43, "VQSHLsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1083 = VQSHLsv4i32
- { 1084, 5, 1, 43, "VQSHLsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1084 = VQSHLsv8i16
- { 1085, 5, 1, 42, "VQSHLsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1085 = VQSHLsv8i8
- { 1086, 5, 1, 42, "VQSHLuiv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1086 = VQSHLuiv16i8
- { 1087, 5, 1, 42, "VQSHLuiv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1087 = VQSHLuiv1i64
- { 1088, 5, 1, 42, "VQSHLuiv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1088 = VQSHLuiv2i32
- { 1089, 5, 1, 42, "VQSHLuiv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1089 = VQSHLuiv2i64
- { 1090, 5, 1, 42, "VQSHLuiv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1090 = VQSHLuiv4i16
- { 1091, 5, 1, 42, "VQSHLuiv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1091 = VQSHLuiv4i32
- { 1092, 5, 1, 42, "VQSHLuiv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1092 = VQSHLuiv8i16
- { 1093, 5, 1, 42, "VQSHLuiv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1093 = VQSHLuiv8i8
- { 1094, 5, 1, 43, "VQSHLuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1094 = VQSHLuv16i8
- { 1095, 5, 1, 42, "VQSHLuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1095 = VQSHLuv1i64
- { 1096, 5, 1, 42, "VQSHLuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1096 = VQSHLuv2i32
- { 1097, 5, 1, 43, "VQSHLuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1097 = VQSHLuv2i64
- { 1098, 5, 1, 42, "VQSHLuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1098 = VQSHLuv4i16
- { 1099, 5, 1, 43, "VQSHLuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1099 = VQSHLuv4i32
- { 1100, 5, 1, 43, "VQSHLuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1100 = VQSHLuv8i16
- { 1101, 5, 1, 42, "VQSHLuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1101 = VQSHLuv8i8
- { 1102, 5, 1, 42, "VQSHRNsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1102 = VQSHRNsv2i32
- { 1103, 5, 1, 42, "VQSHRNsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1103 = VQSHRNsv4i16
- { 1104, 5, 1, 42, "VQSHRNsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1104 = VQSHRNsv8i8
- { 1105, 5, 1, 42, "VQSHRNuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1105 = VQSHRNuv2i32
- { 1106, 5, 1, 42, "VQSHRNuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1106 = VQSHRNuv4i16
- { 1107, 5, 1, 42, "VQSHRNuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1107 = VQSHRNuv8i8
- { 1108, 5, 1, 42, "VQSHRUNv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1108 = VQSHRUNv2i32
- { 1109, 5, 1, 42, "VQSHRUNv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1109 = VQSHRUNv4i16
- { 1110, 5, 1, 42, "VQSHRUNv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1110 = VQSHRUNv8i8
- { 1111, 5, 1, 4, "VQSUBsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1111 = VQSUBsv16i8
- { 1112, 5, 1, 3, "VQSUBsv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1112 = VQSUBsv1i64
- { 1113, 5, 1, 3, "VQSUBsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1113 = VQSUBsv2i32
- { 1114, 5, 1, 4, "VQSUBsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1114 = VQSUBsv2i64
- { 1115, 5, 1, 3, "VQSUBsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1115 = VQSUBsv4i16
- { 1116, 5, 1, 4, "VQSUBsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1116 = VQSUBsv4i32
- { 1117, 5, 1, 4, "VQSUBsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1117 = VQSUBsv8i16
- { 1118, 5, 1, 3, "VQSUBsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1118 = VQSUBsv8i8
- { 1119, 5, 1, 4, "VQSUBuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1119 = VQSUBuv16i8
- { 1120, 5, 1, 3, "VQSUBuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1120 = VQSUBuv1i64
- { 1121, 5, 1, 3, "VQSUBuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1121 = VQSUBuv2i32
- { 1122, 5, 1, 4, "VQSUBuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1122 = VQSUBuv2i64
- { 1123, 5, 1, 3, "VQSUBuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1123 = VQSUBuv4i16
- { 1124, 5, 1, 4, "VQSUBuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1124 = VQSUBuv4i32
- { 1125, 5, 1, 4, "VQSUBuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1125 = VQSUBuv8i16
- { 1126, 5, 1, 3, "VQSUBuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1126 = VQSUBuv8i8
- { 1127, 5, 1, 3, "VRADDHNv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #1127 = VRADDHNv2i32
- { 1128, 5, 1, 3, "VRADDHNv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #1128 = VRADDHNv4i16
- { 1129, 5, 1, 3, "VRADDHNv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #1129 = VRADDHNv8i8
- { 1130, 4, 1, 57, "VRECPEd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1130 = VRECPEd
- { 1131, 4, 1, 57, "VRECPEfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1131 = VRECPEfd
- { 1132, 4, 1, 58, "VRECPEfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1132 = VRECPEfq
- { 1133, 4, 1, 58, "VRECPEq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1133 = VRECPEq
- { 1134, 5, 1, 40, "VRECPSfd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1134 = VRECPSfd
- { 1135, 5, 1, 41, "VRECPSfq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1135 = VRECPSfq
- { 1136, 4, 1, 21, "VREV16d8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1136 = VREV16d8
- { 1137, 4, 1, 21, "VREV16q8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1137 = VREV16q8
- { 1138, 4, 1, 21, "VREV32d16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1138 = VREV32d16
- { 1139, 4, 1, 21, "VREV32d8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1139 = VREV32d8
- { 1140, 4, 1, 21, "VREV32q16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1140 = VREV32q16
- { 1141, 4, 1, 21, "VREV32q8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1141 = VREV32q8
- { 1142, 4, 1, 21, "VREV64d16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1142 = VREV64d16
- { 1143, 4, 1, 21, "VREV64d32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1143 = VREV64d32
- { 1144, 4, 1, 21, "VREV64d8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1144 = VREV64d8
- { 1145, 4, 1, 21, "VREV64df", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1145 = VREV64df
- { 1146, 4, 1, 21, "VREV64q16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1146 = VREV64q16
- { 1147, 4, 1, 21, "VREV64q32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1147 = VREV64q32
- { 1148, 4, 1, 21, "VREV64q8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1148 = VREV64q8
- { 1149, 4, 1, 21, "VREV64qf", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1149 = VREV64qf
- { 1150, 5, 1, 4, "VRHADDsv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1150 = VRHADDsv16i8
- { 1151, 5, 1, 3, "VRHADDsv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1151 = VRHADDsv2i32
- { 1152, 5, 1, 3, "VRHADDsv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1152 = VRHADDsv4i16
- { 1153, 5, 1, 4, "VRHADDsv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1153 = VRHADDsv4i32
- { 1154, 5, 1, 4, "VRHADDsv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1154 = VRHADDsv8i16
- { 1155, 5, 1, 3, "VRHADDsv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1155 = VRHADDsv8i8
- { 1156, 5, 1, 4, "VRHADDuv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1156 = VRHADDuv16i8
- { 1157, 5, 1, 3, "VRHADDuv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1157 = VRHADDuv2i32
- { 1158, 5, 1, 3, "VRHADDuv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1158 = VRHADDuv4i16
- { 1159, 5, 1, 4, "VRHADDuv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1159 = VRHADDuv4i32
- { 1160, 5, 1, 4, "VRHADDuv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1160 = VRHADDuv8i16
- { 1161, 5, 1, 3, "VRHADDuv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1161 = VRHADDuv8i8
- { 1162, 5, 1, 43, "VRSHLsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1162 = VRSHLsv16i8
- { 1163, 5, 1, 42, "VRSHLsv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1163 = VRSHLsv1i64
- { 1164, 5, 1, 42, "VRSHLsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1164 = VRSHLsv2i32
- { 1165, 5, 1, 43, "VRSHLsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1165 = VRSHLsv2i64
- { 1166, 5, 1, 42, "VRSHLsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1166 = VRSHLsv4i16
- { 1167, 5, 1, 43, "VRSHLsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1167 = VRSHLsv4i32
- { 1168, 5, 1, 43, "VRSHLsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1168 = VRSHLsv8i16
- { 1169, 5, 1, 42, "VRSHLsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1169 = VRSHLsv8i8
- { 1170, 5, 1, 43, "VRSHLuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1170 = VRSHLuv16i8
- { 1171, 5, 1, 42, "VRSHLuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1171 = VRSHLuv1i64
- { 1172, 5, 1, 42, "VRSHLuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1172 = VRSHLuv2i32
- { 1173, 5, 1, 43, "VRSHLuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1173 = VRSHLuv2i64
- { 1174, 5, 1, 42, "VRSHLuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1174 = VRSHLuv4i16
- { 1175, 5, 1, 43, "VRSHLuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1175 = VRSHLuv4i32
- { 1176, 5, 1, 43, "VRSHLuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1176 = VRSHLuv8i16
- { 1177, 5, 1, 42, "VRSHLuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1177 = VRSHLuv8i8
- { 1178, 5, 1, 42, "VRSHRNv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1178 = VRSHRNv2i32
- { 1179, 5, 1, 42, "VRSHRNv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1179 = VRSHRNv4i16
- { 1180, 5, 1, 42, "VRSHRNv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1180 = VRSHRNv8i8
- { 1181, 5, 1, 42, "VRSHRsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1181 = VRSHRsv16i8
- { 1182, 5, 1, 42, "VRSHRsv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1182 = VRSHRsv1i64
- { 1183, 5, 1, 42, "VRSHRsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1183 = VRSHRsv2i32
- { 1184, 5, 1, 42, "VRSHRsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1184 = VRSHRsv2i64
- { 1185, 5, 1, 42, "VRSHRsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1185 = VRSHRsv4i16
- { 1186, 5, 1, 42, "VRSHRsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1186 = VRSHRsv4i32
- { 1187, 5, 1, 42, "VRSHRsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1187 = VRSHRsv8i16
- { 1188, 5, 1, 42, "VRSHRsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1188 = VRSHRsv8i8
- { 1189, 5, 1, 42, "VRSHRuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1189 = VRSHRuv16i8
- { 1190, 5, 1, 42, "VRSHRuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1190 = VRSHRuv1i64
- { 1191, 5, 1, 42, "VRSHRuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1191 = VRSHRuv2i32
- { 1192, 5, 1, 42, "VRSHRuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1192 = VRSHRuv2i64
- { 1193, 5, 1, 42, "VRSHRuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1193 = VRSHRuv4i16
- { 1194, 5, 1, 42, "VRSHRuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1194 = VRSHRuv4i32
- { 1195, 5, 1, 42, "VRSHRuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1195 = VRSHRuv8i16
- { 1196, 5, 1, 42, "VRSHRuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1196 = VRSHRuv8i8
- { 1197, 4, 1, 57, "VRSQRTEd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1197 = VRSQRTEd
- { 1198, 4, 1, 57, "VRSQRTEfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1198 = VRSQRTEfd
- { 1199, 4, 1, 58, "VRSQRTEfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1199 = VRSQRTEfq
- { 1200, 4, 1, 58, "VRSQRTEq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1200 = VRSQRTEq
- { 1201, 5, 1, 40, "VRSQRTSfd", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1201 = VRSQRTSfd
- { 1202, 5, 1, 41, "VRSQRTSfq", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1202 = VRSQRTSfq
- { 1203, 6, 1, 33, "VRSRAsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1203 = VRSRAsv16i8
- { 1204, 6, 1, 33, "VRSRAsv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1204 = VRSRAsv1i64
- { 1205, 6, 1, 33, "VRSRAsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1205 = VRSRAsv2i32
- { 1206, 6, 1, 33, "VRSRAsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1206 = VRSRAsv2i64
- { 1207, 6, 1, 33, "VRSRAsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1207 = VRSRAsv4i16
- { 1208, 6, 1, 33, "VRSRAsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1208 = VRSRAsv4i32
- { 1209, 6, 1, 33, "VRSRAsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1209 = VRSRAsv8i16
- { 1210, 6, 1, 33, "VRSRAsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1210 = VRSRAsv8i8
- { 1211, 6, 1, 33, "VRSRAuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1211 = VRSRAuv16i8
- { 1212, 6, 1, 33, "VRSRAuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1212 = VRSRAuv1i64
- { 1213, 6, 1, 33, "VRSRAuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1213 = VRSRAuv2i32
- { 1214, 6, 1, 33, "VRSRAuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1214 = VRSRAuv2i64
- { 1215, 6, 1, 33, "VRSRAuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1215 = VRSRAuv4i16
- { 1216, 6, 1, 33, "VRSRAuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1216 = VRSRAuv4i32
- { 1217, 6, 1, 33, "VRSRAuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1217 = VRSRAuv8i16
- { 1218, 6, 1, 33, "VRSRAuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1218 = VRSRAuv8i8
- { 1219, 5, 1, 3, "VRSUBHNv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #1219 = VRSUBHNv2i32
- { 1220, 5, 1, 3, "VRSUBHNv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #1220 = VRSUBHNv4i16
- { 1221, 5, 1, 3, "VRSUBHNv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #1221 = VRSUBHNv8i8
- { 1222, 6, 1, 25, "VSETLNi16", 0|(1<<TID::Predicable), 0|(3<<4)|(26<<9), NULL, NULL, NULL, OperandInfo127 }, // Inst #1222 = VSETLNi16
- { 1223, 6, 1, 25, "VSETLNi32", 0|(1<<TID::Predicable), 0|(3<<4)|(26<<9), NULL, NULL, NULL, OperandInfo127 }, // Inst #1223 = VSETLNi32
- { 1224, 6, 1, 25, "VSETLNi8", 0|(1<<TID::Predicable), 0|(3<<4)|(26<<9), NULL, NULL, NULL, OperandInfo127 }, // Inst #1224 = VSETLNi8
- { 1225, 5, 1, 44, "VSHLLi16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #1225 = VSHLLi16
- { 1226, 5, 1, 44, "VSHLLi32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #1226 = VSHLLi32
- { 1227, 5, 1, 44, "VSHLLi8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #1227 = VSHLLi8
- { 1228, 5, 1, 44, "VSHLLsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #1228 = VSHLLsv2i64
- { 1229, 5, 1, 44, "VSHLLsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #1229 = VSHLLsv4i32
- { 1230, 5, 1, 44, "VSHLLsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #1230 = VSHLLsv8i16
- { 1231, 5, 1, 44, "VSHLLuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #1231 = VSHLLuv2i64
- { 1232, 5, 1, 44, "VSHLLuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #1232 = VSHLLuv4i32
- { 1233, 5, 1, 44, "VSHLLuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo81 }, // Inst #1233 = VSHLLuv8i16
- { 1234, 5, 1, 44, "VSHLiv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1234 = VSHLiv16i8
- { 1235, 5, 1, 44, "VSHLiv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1235 = VSHLiv1i64
- { 1236, 5, 1, 44, "VSHLiv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1236 = VSHLiv2i32
- { 1237, 5, 1, 44, "VSHLiv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1237 = VSHLiv2i64
- { 1238, 5, 1, 44, "VSHLiv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1238 = VSHLiv4i16
- { 1239, 5, 1, 44, "VSHLiv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1239 = VSHLiv4i32
- { 1240, 5, 1, 44, "VSHLiv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1240 = VSHLiv8i16
- { 1241, 5, 1, 44, "VSHLiv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1241 = VSHLiv8i8
- { 1242, 5, 1, 45, "VSHLsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1242 = VSHLsv16i8
- { 1243, 5, 1, 44, "VSHLsv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1243 = VSHLsv1i64
- { 1244, 5, 1, 44, "VSHLsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1244 = VSHLsv2i32
- { 1245, 5, 1, 45, "VSHLsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1245 = VSHLsv2i64
- { 1246, 5, 1, 44, "VSHLsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1246 = VSHLsv4i16
- { 1247, 5, 1, 45, "VSHLsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1247 = VSHLsv4i32
- { 1248, 5, 1, 45, "VSHLsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1248 = VSHLsv8i16
- { 1249, 5, 1, 44, "VSHLsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1249 = VSHLsv8i8
- { 1250, 5, 1, 45, "VSHLuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1250 = VSHLuv16i8
- { 1251, 5, 1, 44, "VSHLuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1251 = VSHLuv1i64
- { 1252, 5, 1, 44, "VSHLuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1252 = VSHLuv2i32
- { 1253, 5, 1, 45, "VSHLuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1253 = VSHLuv2i64
- { 1254, 5, 1, 44, "VSHLuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1254 = VSHLuv4i16
- { 1255, 5, 1, 45, "VSHLuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1255 = VSHLuv4i32
- { 1256, 5, 1, 45, "VSHLuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1256 = VSHLuv8i16
- { 1257, 5, 1, 44, "VSHLuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1257 = VSHLuv8i8
- { 1258, 5, 1, 44, "VSHRNv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1258 = VSHRNv2i32
- { 1259, 5, 1, 44, "VSHRNv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1259 = VSHRNv4i16
- { 1260, 5, 1, 44, "VSHRNv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo124 }, // Inst #1260 = VSHRNv8i8
- { 1261, 5, 1, 44, "VSHRsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1261 = VSHRsv16i8
- { 1262, 5, 1, 44, "VSHRsv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1262 = VSHRsv1i64
- { 1263, 5, 1, 44, "VSHRsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1263 = VSHRsv2i32
- { 1264, 5, 1, 44, "VSHRsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1264 = VSHRsv2i64
- { 1265, 5, 1, 44, "VSHRsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1265 = VSHRsv4i16
- { 1266, 5, 1, 44, "VSHRsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1266 = VSHRsv4i32
- { 1267, 5, 1, 44, "VSHRsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1267 = VSHRsv8i16
- { 1268, 5, 1, 44, "VSHRsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1268 = VSHRsv8i8
- { 1269, 5, 1, 44, "VSHRuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1269 = VSHRuv16i8
- { 1270, 5, 1, 44, "VSHRuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1270 = VSHRuv1i64
- { 1271, 5, 1, 44, "VSHRuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1271 = VSHRuv2i32
- { 1272, 5, 1, 44, "VSHRuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1272 = VSHRuv2i64
- { 1273, 5, 1, 44, "VSHRuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1273 = VSHRuv4i16
- { 1274, 5, 1, 44, "VSHRuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1274 = VSHRuv4i32
- { 1275, 5, 1, 44, "VSHRuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo78 }, // Inst #1275 = VSHRuv8i16
- { 1276, 5, 1, 44, "VSHRuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo77 }, // Inst #1276 = VSHRuv8i8
- { 1277, 5, 1, 67, "VSHTOD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo128 }, // Inst #1277 = VSHTOD
- { 1278, 5, 1, 68, "VSHTOS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo129 }, // Inst #1278 = VSHTOS
- { 1279, 4, 1, 67, "VSITOD", 0|(1<<TID::Predicable), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo75 }, // Inst #1279 = VSITOD
- { 1280, 4, 1, 68, "VSITOS", 0|(1<<TID::Predicable), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #1280 = VSITOS
- { 1281, 6, 1, 45, "VSLIv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1281 = VSLIv16i8
- { 1282, 6, 1, 44, "VSLIv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1282 = VSLIv1i64
- { 1283, 6, 1, 44, "VSLIv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1283 = VSLIv2i32
- { 1284, 6, 1, 45, "VSLIv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1284 = VSLIv2i64
- { 1285, 6, 1, 44, "VSLIv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1285 = VSLIv4i16
- { 1286, 6, 1, 45, "VSLIv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1286 = VSLIv4i32
- { 1287, 6, 1, 45, "VSLIv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1287 = VSLIv8i16
- { 1288, 6, 1, 44, "VSLIv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1288 = VSLIv8i8
- { 1289, 5, 1, 67, "VSLTOD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo128 }, // Inst #1289 = VSLTOD
- { 1290, 5, 1, 68, "VSLTOS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo129 }, // Inst #1290 = VSLTOS
- { 1291, 4, 1, 81, "VSQRTD", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1291 = VSQRTD
- { 1292, 4, 1, 80, "VSQRTS", 0|(1<<TID::Predicable), 0|(3<<4)|(13<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #1292 = VSQRTS
- { 1293, 6, 1, 33, "VSRAsv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1293 = VSRAsv16i8
- { 1294, 6, 1, 33, "VSRAsv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1294 = VSRAsv1i64
- { 1295, 6, 1, 33, "VSRAsv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1295 = VSRAsv2i32
- { 1296, 6, 1, 33, "VSRAsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1296 = VSRAsv2i64
- { 1297, 6, 1, 33, "VSRAsv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1297 = VSRAsv4i16
- { 1298, 6, 1, 33, "VSRAsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1298 = VSRAsv4i32
- { 1299, 6, 1, 33, "VSRAsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1299 = VSRAsv8i16
- { 1300, 6, 1, 33, "VSRAsv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1300 = VSRAsv8i8
- { 1301, 6, 1, 33, "VSRAuv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1301 = VSRAuv16i8
- { 1302, 6, 1, 33, "VSRAuv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1302 = VSRAuv1i64
- { 1303, 6, 1, 33, "VSRAuv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1303 = VSRAuv2i32
- { 1304, 6, 1, 33, "VSRAuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1304 = VSRAuv2i64
- { 1305, 6, 1, 33, "VSRAuv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1305 = VSRAuv4i16
- { 1306, 6, 1, 33, "VSRAuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1306 = VSRAuv4i32
- { 1307, 6, 1, 33, "VSRAuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1307 = VSRAuv8i16
- { 1308, 6, 1, 33, "VSRAuv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1308 = VSRAuv8i8
- { 1309, 6, 1, 45, "VSRIv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1309 = VSRIv16i8
- { 1310, 6, 1, 44, "VSRIv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1310 = VSRIv1i64
- { 1311, 6, 1, 44, "VSRIv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1311 = VSRIv2i32
- { 1312, 6, 1, 45, "VSRIv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1312 = VSRIv2i64
- { 1313, 6, 1, 44, "VSRIv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1313 = VSRIv4i16
- { 1314, 6, 1, 45, "VSRIv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1314 = VSRIv4i32
- { 1315, 6, 1, 45, "VSRIv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo125 }, // Inst #1315 = VSRIv8i16
- { 1316, 6, 1, 44, "VSRIv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo126 }, // Inst #1316 = VSRIv8i8
- { 1317, 7, 0, 46, "VST1d16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo130 }, // Inst #1317 = VST1d16
- { 1318, 10, 0, 46, "VST1d16Q", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1318 = VST1d16Q
- { 1319, 9, 0, 46, "VST1d16T", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo132 }, // Inst #1319 = VST1d16T
- { 1320, 7, 0, 46, "VST1d32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo130 }, // Inst #1320 = VST1d32
- { 1321, 10, 0, 46, "VST1d32Q", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1321 = VST1d32Q
- { 1322, 9, 0, 46, "VST1d32T", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo132 }, // Inst #1322 = VST1d32T
- { 1323, 7, 0, 46, "VST1d64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo130 }, // Inst #1323 = VST1d64
- { 1324, 7, 0, 46, "VST1d8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo130 }, // Inst #1324 = VST1d8
- { 1325, 10, 0, 46, "VST1d8Q", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1325 = VST1d8Q
- { 1326, 9, 0, 46, "VST1d8T", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo132 }, // Inst #1326 = VST1d8T
- { 1327, 7, 0, 46, "VST1df", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo130 }, // Inst #1327 = VST1df
- { 1328, 7, 0, 46, "VST1q16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo133 }, // Inst #1328 = VST1q16
- { 1329, 7, 0, 46, "VST1q32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo133 }, // Inst #1329 = VST1q32
- { 1330, 7, 0, 46, "VST1q64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo133 }, // Inst #1330 = VST1q64
- { 1331, 7, 0, 46, "VST1q8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo133 }, // Inst #1331 = VST1q8
- { 1332, 7, 0, 46, "VST1qf", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo133 }, // Inst #1332 = VST1qf
- { 1333, 9, 0, 46, "VST2LNd16", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo134 }, // Inst #1333 = VST2LNd16
- { 1334, 9, 0, 46, "VST2LNd32", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo134 }, // Inst #1334 = VST2LNd32
- { 1335, 9, 0, 46, "VST2LNd8", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo134 }, // Inst #1335 = VST2LNd8
- { 1336, 9, 0, 46, "VST2LNq16a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo134 }, // Inst #1336 = VST2LNq16a
- { 1337, 9, 0, 46, "VST2LNq16b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo134 }, // Inst #1337 = VST2LNq16b
- { 1338, 9, 0, 46, "VST2LNq32a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo134 }, // Inst #1338 = VST2LNq32a
- { 1339, 9, 0, 46, "VST2LNq32b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo134 }, // Inst #1339 = VST2LNq32b
- { 1340, 8, 0, 46, "VST2d16", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo135 }, // Inst #1340 = VST2d16
- { 1341, 8, 0, 46, "VST2d16D", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo135 }, // Inst #1341 = VST2d16D
- { 1342, 8, 0, 46, "VST2d32", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo135 }, // Inst #1342 = VST2d32
- { 1343, 8, 0, 46, "VST2d32D", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo135 }, // Inst #1343 = VST2d32D
- { 1344, 8, 0, 46, "VST2d64", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo135 }, // Inst #1344 = VST2d64
- { 1345, 8, 0, 46, "VST2d8", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo135 }, // Inst #1345 = VST2d8
- { 1346, 8, 0, 46, "VST2d8D", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo135 }, // Inst #1346 = VST2d8D
- { 1347, 10, 0, 46, "VST2q16", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1347 = VST2q16
- { 1348, 10, 0, 46, "VST2q32", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1348 = VST2q32
- { 1349, 10, 0, 46, "VST2q8", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1349 = VST2q8
- { 1350, 10, 0, 46, "VST3LNd16", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo136 }, // Inst #1350 = VST3LNd16
- { 1351, 10, 0, 46, "VST3LNd32", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo136 }, // Inst #1351 = VST3LNd32
- { 1352, 10, 0, 46, "VST3LNd8", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo136 }, // Inst #1352 = VST3LNd8
- { 1353, 10, 0, 46, "VST3LNq16a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo136 }, // Inst #1353 = VST3LNq16a
- { 1354, 10, 0, 46, "VST3LNq16b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo136 }, // Inst #1354 = VST3LNq16b
- { 1355, 10, 0, 46, "VST3LNq32a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo136 }, // Inst #1355 = VST3LNq32a
- { 1356, 10, 0, 46, "VST3LNq32b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo136 }, // Inst #1356 = VST3LNq32b
- { 1357, 9, 0, 46, "VST3d16", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo132 }, // Inst #1357 = VST3d16
- { 1358, 9, 0, 46, "VST3d32", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo132 }, // Inst #1358 = VST3d32
- { 1359, 9, 0, 46, "VST3d64", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo132 }, // Inst #1359 = VST3d64
- { 1360, 9, 0, 46, "VST3d8", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo132 }, // Inst #1360 = VST3d8
- { 1361, 10, 1, 46, "VST3q16a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo137 }, // Inst #1361 = VST3q16a
- { 1362, 10, 1, 46, "VST3q16b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo137 }, // Inst #1362 = VST3q16b
- { 1363, 10, 1, 46, "VST3q32a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo137 }, // Inst #1363 = VST3q32a
- { 1364, 10, 1, 46, "VST3q32b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo137 }, // Inst #1364 = VST3q32b
- { 1365, 10, 1, 46, "VST3q8a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo137 }, // Inst #1365 = VST3q8a
- { 1366, 10, 1, 46, "VST3q8b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo137 }, // Inst #1366 = VST3q8b
- { 1367, 11, 0, 46, "VST4LNd16", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo138 }, // Inst #1367 = VST4LNd16
- { 1368, 11, 0, 46, "VST4LNd32", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo138 }, // Inst #1368 = VST4LNd32
- { 1369, 11, 0, 46, "VST4LNd8", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo138 }, // Inst #1369 = VST4LNd8
- { 1370, 11, 0, 46, "VST4LNq16a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo138 }, // Inst #1370 = VST4LNq16a
- { 1371, 11, 0, 46, "VST4LNq16b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo138 }, // Inst #1371 = VST4LNq16b
- { 1372, 11, 0, 46, "VST4LNq32a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo138 }, // Inst #1372 = VST4LNq32a
- { 1373, 11, 0, 46, "VST4LNq32b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo138 }, // Inst #1373 = VST4LNq32b
- { 1374, 10, 0, 46, "VST4d16", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1374 = VST4d16
- { 1375, 10, 0, 46, "VST4d32", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1375 = VST4d32
- { 1376, 10, 0, 46, "VST4d64", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1376 = VST4d64
- { 1377, 10, 0, 46, "VST4d8", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo131 }, // Inst #1377 = VST4d8
- { 1378, 11, 1, 46, "VST4q16a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo139 }, // Inst #1378 = VST4q16a
- { 1379, 11, 1, 46, "VST4q16b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo139 }, // Inst #1379 = VST4q16b
- { 1380, 11, 1, 46, "VST4q32a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo139 }, // Inst #1380 = VST4q32a
- { 1381, 11, 1, 46, "VST4q32b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo139 }, // Inst #1381 = VST4q32b
- { 1382, 11, 1, 46, "VST4q8a", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo139 }, // Inst #1382 = VST4q8a
- { 1383, 11, 1, 46, "VST4q8b", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|6|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo139 }, // Inst #1383 = VST4q8b
- { 1384, 5, 0, 85, "VSTMD", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|5|(3<<4)|(21<<9)|(3<<17), NULL, NULL, NULL, OperandInfo35 }, // Inst #1384 = VSTMD
- { 1385, 5, 0, 85, "VSTMS", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|5|(3<<4)|(21<<9)|(1<<17), NULL, NULL, NULL, OperandInfo35 }, // Inst #1385 = VSTMS
- { 1386, 5, 0, 84, "VSTRD", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|5|(3<<4)|(20<<9)|(3<<17), NULL, NULL, NULL, OperandInfo96 }, // Inst #1386 = VSTRD
- { 1387, 5, 0, 85, "VSTRQ", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|4|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo97 }, // Inst #1387 = VSTRQ
- { 1388, 5, 0, 83, "VSTRS", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|5|(3<<4)|(20<<9)|(1<<17), NULL, NULL, NULL, OperandInfo98 }, // Inst #1388 = VSTRS
- { 1389, 5, 1, 62, "VSUBD", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1389 = VSUBD
- { 1390, 5, 1, 3, "VSUBHNv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #1390 = VSUBHNv2i32
- { 1391, 5, 1, 3, "VSUBHNv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #1391 = VSUBHNv4i16
- { 1392, 5, 1, 3, "VSUBHNv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo69 }, // Inst #1392 = VSUBHNv8i8
- { 1393, 5, 1, 44, "VSUBLsv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #1393 = VSUBLsv2i64
- { 1394, 5, 1, 44, "VSUBLsv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #1394 = VSUBLsv4i32
- { 1395, 5, 1, 44, "VSUBLsv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #1395 = VSUBLsv8i16
- { 1396, 5, 1, 44, "VSUBLuv2i64", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #1396 = VSUBLuv2i64
- { 1397, 5, 1, 44, "VSUBLuv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #1397 = VSUBLuv4i32
- { 1398, 5, 1, 44, "VSUBLuv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo62 }, // Inst #1398 = VSUBLuv8i16
- { 1399, 5, 1, 61, "VSUBS", 0|(1<<TID::Predicable), 0|(3<<4)|(14<<9)|(1<<17), NULL, NULL, NULL, OperandInfo70 }, // Inst #1399 = VSUBS
- { 1400, 5, 1, 47, "VSUBWsv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #1400 = VSUBWsv2i64
- { 1401, 5, 1, 47, "VSUBWsv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #1401 = VSUBWsv4i32
- { 1402, 5, 1, 47, "VSUBWsv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #1402 = VSUBWsv8i16
- { 1403, 5, 1, 47, "VSUBWuv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #1403 = VSUBWuv2i64
- { 1404, 5, 1, 47, "VSUBWuv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #1404 = VSUBWuv4i32
- { 1405, 5, 1, 47, "VSUBWuv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo71 }, // Inst #1405 = VSUBWuv8i16
- { 1406, 5, 1, 1, "VSUBfd", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1406 = VSUBfd
- { 1407, 5, 1, 1, "VSUBfd_sfp", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo72 }, // Inst #1407 = VSUBfd_sfp
- { 1408, 5, 1, 2, "VSUBfq", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1408 = VSUBfq
- { 1409, 5, 1, 48, "VSUBv16i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1409 = VSUBv16i8
- { 1410, 5, 1, 47, "VSUBv1i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1410 = VSUBv1i64
- { 1411, 5, 1, 47, "VSUBv2i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1411 = VSUBv2i32
- { 1412, 5, 1, 48, "VSUBv2i64", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1412 = VSUBv2i64
- { 1413, 5, 1, 47, "VSUBv4i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1413 = VSUBv4i16
- { 1414, 5, 1, 48, "VSUBv4i32", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1414 = VSUBv4i32
- { 1415, 5, 1, 48, "VSUBv8i16", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1415 = VSUBv8i16
- { 1416, 5, 1, 47, "VSUBv8i8", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1416 = VSUBv8i8
- { 1417, 4, 1, 128, "VSWPd", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo65 }, // Inst #1417 = VSWPd
- { 1418, 4, 1, 128, "VSWPq", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo68 }, // Inst #1418 = VSWPq
- { 1419, 5, 1, 49, "VTBL1", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1419 = VTBL1
- { 1420, 6, 1, 50, "VTBL2", 0|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo140 }, // Inst #1420 = VTBL2
- { 1421, 7, 1, 51, "VTBL3", 0|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo141 }, // Inst #1421 = VTBL3
- { 1422, 8, 1, 52, "VTBL4", 0|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo142 }, // Inst #1422 = VTBL4
- { 1423, 6, 1, 53, "VTBX1", 0|(1<<TID::Predicable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo61 }, // Inst #1423 = VTBX1
- { 1424, 7, 1, 54, "VTBX2", 0|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo143 }, // Inst #1424 = VTBX2
- { 1425, 8, 1, 55, "VTBX3", 0|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo144 }, // Inst #1425 = VTBX3
- { 1426, 9, 1, 56, "VTBX4", 0|(1<<TID::Predicable)|(1<<TID::ExtraSrcRegAllocReq), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo145 }, // Inst #1426 = VTBX4
- { 1427, 5, 1, 65, "VTOSHD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo128 }, // Inst #1427 = VTOSHD
- { 1428, 5, 1, 70, "VTOSHS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo129 }, // Inst #1428 = VTOSHS
- { 1429, 4, 1, 65, "VTOSIRD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo76 }, // Inst #1429 = VTOSIRD
- { 1430, 4, 1, 70, "VTOSIRS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #1430 = VTOSIRS
- { 1431, 4, 1, 65, "VTOSIZD", 0|(1<<TID::Predicable), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo76 }, // Inst #1431 = VTOSIZD
- { 1432, 4, 1, 70, "VTOSIZS", 0|(1<<TID::Predicable), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #1432 = VTOSIZS
- { 1433, 5, 1, 65, "VTOSLD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo128 }, // Inst #1433 = VTOSLD
- { 1434, 5, 1, 70, "VTOSLS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo129 }, // Inst #1434 = VTOSLS
- { 1435, 5, 1, 65, "VTOUHD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo128 }, // Inst #1435 = VTOUHD
- { 1436, 5, 1, 70, "VTOUHS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo129 }, // Inst #1436 = VTOUHS
- { 1437, 4, 1, 65, "VTOUIRD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo76 }, // Inst #1437 = VTOUIRD
- { 1438, 4, 1, 70, "VTOUIRS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #1438 = VTOUIRS
- { 1439, 4, 1, 65, "VTOUIZD", 0|(1<<TID::Predicable), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo76 }, // Inst #1439 = VTOUIZD
- { 1440, 4, 1, 70, "VTOUIZS", 0|(1<<TID::Predicable), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #1440 = VTOUIZS
- { 1441, 5, 1, 65, "VTOULD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo128 }, // Inst #1441 = VTOULD
- { 1442, 5, 1, 70, "VTOULS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo129 }, // Inst #1442 = VTOULS
- { 1443, 6, 2, 35, "VTRNd16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo146 }, // Inst #1443 = VTRNd16
- { 1444, 6, 2, 35, "VTRNd32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo146 }, // Inst #1444 = VTRNd32
- { 1445, 6, 2, 35, "VTRNd8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo146 }, // Inst #1445 = VTRNd8
- { 1446, 6, 2, 36, "VTRNq16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo147 }, // Inst #1446 = VTRNq16
- { 1447, 6, 2, 36, "VTRNq32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo147 }, // Inst #1447 = VTRNq32
- { 1448, 6, 2, 36, "VTRNq8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo147 }, // Inst #1448 = VTRNq8
- { 1449, 5, 1, 4, "VTSTv16i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1449 = VTSTv16i8
- { 1450, 5, 1, 3, "VTSTv2i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1450 = VTSTv2i32
- { 1451, 5, 1, 3, "VTSTv4i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1451 = VTSTv4i16
- { 1452, 5, 1, 4, "VTSTv4i32", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1452 = VTSTv4i32
- { 1453, 5, 1, 4, "VTSTv8i16", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo64 }, // Inst #1453 = VTSTv8i16
- { 1454, 5, 1, 3, "VTSTv8i8", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo63 }, // Inst #1454 = VTSTv8i8
- { 1455, 5, 1, 67, "VUHTOD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo128 }, // Inst #1455 = VUHTOD
- { 1456, 5, 1, 68, "VUHTOS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo129 }, // Inst #1456 = VUHTOS
- { 1457, 4, 1, 67, "VUITOD", 0|(1<<TID::Predicable), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo75 }, // Inst #1457 = VUITOD
- { 1458, 4, 1, 68, "VUITOS", 0|(1<<TID::Predicable), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo66 }, // Inst #1458 = VUITOS
- { 1459, 5, 1, 67, "VULTOD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo128 }, // Inst #1459 = VULTOD
- { 1460, 5, 1, 68, "VULTOS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(15<<9)|(1<<17), NULL, NULL, NULL, OperandInfo129 }, // Inst #1460 = VULTOS
- { 1461, 6, 2, 35, "VUZPd16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo146 }, // Inst #1461 = VUZPd16
- { 1462, 6, 2, 35, "VUZPd32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo146 }, // Inst #1462 = VUZPd32
- { 1463, 6, 2, 35, "VUZPd8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo146 }, // Inst #1463 = VUZPd8
- { 1464, 6, 2, 37, "VUZPq16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo147 }, // Inst #1464 = VUZPq16
- { 1465, 6, 2, 37, "VUZPq32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo147 }, // Inst #1465 = VUZPq32
- { 1466, 6, 2, 37, "VUZPq8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo147 }, // Inst #1466 = VUZPq8
- { 1467, 6, 2, 35, "VZIPd16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo146 }, // Inst #1467 = VZIPd16
- { 1468, 6, 2, 35, "VZIPd32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo146 }, // Inst #1468 = VZIPd32
- { 1469, 6, 2, 35, "VZIPd8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo146 }, // Inst #1469 = VZIPd8
- { 1470, 6, 2, 37, "VZIPq16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo147 }, // Inst #1470 = VZIPq16
- { 1471, 6, 2, 37, "VZIPq32", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo147 }, // Inst #1471 = VZIPq32
- { 1472, 6, 2, 37, "VZIPq8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(24<<9)|(2<<17), NULL, NULL, NULL, OperandInfo147 }, // Inst #1472 = VZIPq8
- { 1473, 2, 0, 128, "WFE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1473 = WFE
- { 1474, 2, 0, 128, "WFI", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1474 = WFI
- { 1475, 2, 0, 128, "YIELD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(29<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1475 = YIELD
- { 1476, 6, 1, 88, "t2ADCSri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #1476 = t2ADCSri
- { 1477, 6, 1, 89, "t2ADCSrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1477 = t2ADCSrr
- { 1478, 7, 1, 90, "t2ADCSrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #1478 = t2ADCSrs
- { 1479, 6, 1, 88, "t2ADCri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #1479 = t2ADCri
- { 1480, 6, 1, 89, "t2ADCrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, NULL, NULL, OperandInfo6 }, // Inst #1480 = t2ADCrr
- { 1481, 7, 1, 90, "t2ADCrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, NULL, NULL, OperandInfo48 }, // Inst #1481 = t2ADCrs
- { 1482, 5, 1, 88, "t2ADDSri", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1482 = t2ADDSri
- { 1483, 5, 1, 89, "t2ADDSrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #1483 = t2ADDSrr
- { 1484, 6, 1, 90, "t2ADDSrs", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #1484 = t2ADDSrs
- { 1485, 6, 1, 88, "t2ADDrSPi", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1485 = t2ADDrSPi
- { 1486, 5, 1, 88, "t2ADDrSPi12", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1486 = t2ADDrSPi12
- { 1487, 7, 1, 90, "t2ADDrSPs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo48 }, // Inst #1487 = t2ADDrSPs
- { 1488, 6, 1, 88, "t2ADDri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1488 = t2ADDri
- { 1489, 6, 1, 88, "t2ADDri12", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1489 = t2ADDri12
- { 1490, 6, 1, 89, "t2ADDrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1490 = t2ADDrr
- { 1491, 7, 1, 90, "t2ADDrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo48 }, // Inst #1491 = t2ADDrs
- { 1492, 6, 1, 88, "t2ANDri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1492 = t2ANDri
- { 1493, 6, 1, 89, "t2ANDrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1493 = t2ANDrr
- { 1494, 7, 1, 90, "t2ANDrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo48 }, // Inst #1494 = t2ANDrs
- { 1495, 6, 1, 113, "t2ASRri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1495 = t2ASRri
- { 1496, 6, 1, 114, "t2ASRrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1496 = t2ASRrr
- { 1497, 1, 0, 0, "t2B", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Predicable)|(1<<TID::Terminator), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo14 }, // Inst #1497 = t2B
- { 1498, 5, 1, 126, "t2BFC", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo15 }, // Inst #1498 = t2BFC
- { 1499, 6, 1, 88, "t2BFI", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo51 }, // Inst #1499 = t2BFI
- { 1500, 6, 1, 88, "t2BICri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1500 = t2BICri
- { 1501, 6, 1, 89, "t2BICrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1501 = t2BICrr
- { 1502, 7, 1, 90, "t2BICrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo48 }, // Inst #1502 = t2BICrs
- { 1503, 4, 0, 0, "t2BR_JT", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::NotDuplicable), 0|(1<<4)|(23<<9), NULL, NULL, NULL, OperandInfo18 }, // Inst #1503 = t2BR_JT
- { 1504, 3, 0, 128, "t2BXJ", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #1504 = t2BXJ
- { 1505, 3, 0, 0, "t2Bcc", 0|(1<<TID::Branch)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #1505 = t2Bcc
- { 1506, 2, 0, 128, "t2CLREX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1506 = t2CLREX
- { 1507, 4, 1, 125, "t2CLZ", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1507 = t2CLZ
- { 1508, 4, 0, 97, "t2CMNzri", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #1508 = t2CMNzri
- { 1509, 4, 0, 98, "t2CMNzrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #1509 = t2CMNzrr
- { 1510, 5, 0, 99, "t2CMNzrs", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1510 = t2CMNzrs
- { 1511, 4, 0, 97, "t2CMPri", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #1511 = t2CMPri
- { 1512, 4, 0, 98, "t2CMPrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #1512 = t2CMPrr
- { 1513, 5, 0, 99, "t2CMPrs", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1513 = t2CMPrs
- { 1514, 4, 0, 97, "t2CMPzri", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #1514 = t2CMPzri
- { 1515, 4, 0, 98, "t2CMPzrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #1515 = t2CMPzrr
- { 1516, 5, 0, 99, "t2CMPzrs", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1516 = t2CMPzrs
- { 1517, 1, 0, 128, "t2CPS", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo14 }, // Inst #1517 = t2CPS
- { 1518, 3, 0, 128, "t2DBG", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #1518 = t2DBG
- { 1519, 2, 0, 128, "t2DMBish", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1519 = t2DMBish
- { 1520, 2, 0, 128, "t2DMBishst", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1520 = t2DMBishst
- { 1521, 2, 0, 128, "t2DMBnsh", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1521 = t2DMBnsh
- { 1522, 2, 0, 128, "t2DMBnshst", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1522 = t2DMBnshst
- { 1523, 2, 0, 128, "t2DMBosh", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1523 = t2DMBosh
- { 1524, 2, 0, 128, "t2DMBoshst", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1524 = t2DMBoshst
- { 1525, 2, 0, 128, "t2DMBst", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1525 = t2DMBst
- { 1526, 2, 0, 128, "t2DSBish", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1526 = t2DSBish
- { 1527, 2, 0, 128, "t2DSBishst", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1527 = t2DSBishst
- { 1528, 2, 0, 128, "t2DSBnsh", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1528 = t2DSBnsh
- { 1529, 2, 0, 128, "t2DSBnshst", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1529 = t2DSBnshst
- { 1530, 2, 0, 128, "t2DSBosh", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1530 = t2DSBosh
- { 1531, 2, 0, 128, "t2DSBoshst", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1531 = t2DSBoshst
- { 1532, 2, 0, 128, "t2DSBst", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1532 = t2DSBst
- { 1533, 6, 1, 88, "t2EORri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1533 = t2EORri
- { 1534, 6, 1, 89, "t2EORrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1534 = t2EORrr
- { 1535, 7, 1, 90, "t2EORrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo48 }, // Inst #1535 = t2EORrs
- { 1536, 2, 0, 128, "t2ISBsy", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1536 = t2ISBsy
- { 1537, 2, 0, 92, "t2IT", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo148 }, // Inst #1537 = t2IT
- { 1538, 0, 0, 128, "t2Int_MemBarrierV7", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4), NULL, NULL, NULL, 0 }, // Inst #1538 = t2Int_MemBarrierV7
- { 1539, 0, 0, 128, "t2Int_SyncBarrierV7", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<4), NULL, NULL, NULL, 0 }, // Inst #1539 = t2Int_SyncBarrierV7
- { 1540, 2, 0, 128, "t2Int_eh_sjlj_setjmp", 0, 0|(1<<4)|(23<<9), NULL, ImplicitList6, Barriers3, OperandInfo149 }, // Inst #1540 = t2Int_eh_sjlj_setjmp
- { 1541, 5, 0, 103, "t2LDM", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo35 }, // Inst #1541 = t2LDM
- { 1542, 5, 0, 0, "t2LDM_RET", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo35 }, // Inst #1542 = t2LDM_RET
- { 1543, 5, 1, 101, "t2LDRBT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1543 = t2LDRBT
- { 1544, 6, 2, 102, "t2LDRB_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(2<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1544 = t2LDRB_POST
- { 1545, 6, 2, 102, "t2LDRB_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(1<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1545 = t2LDRB_PRE
- { 1546, 5, 1, 101, "t2LDRBi12", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|11|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1546 = t2LDRBi12
- { 1547, 5, 1, 101, "t2LDRBi8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1547 = t2LDRBi8
- { 1548, 4, 1, 101, "t2LDRBpci", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|14|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1548 = t2LDRBpci
- { 1549, 6, 1, 104, "t2LDRBs", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|13|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1549 = t2LDRBs
- { 1550, 6, 2, 101, "t2LDRDi8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|15|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1550 = t2LDRDi8
- { 1551, 5, 2, 101, "t2LDRDpci", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|15|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1551 = t2LDRDpci
- { 1552, 4, 1, 128, "t2LDREX", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1552 = t2LDREX
- { 1553, 4, 1, 128, "t2LDREXB", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1553 = t2LDREXB
- { 1554, 5, 2, 128, "t2LDREXD", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1554 = t2LDREXD
- { 1555, 4, 1, 128, "t2LDREXH", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1555 = t2LDREXH
- { 1556, 5, 1, 101, "t2LDRHT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1556 = t2LDRHT
- { 1557, 6, 2, 102, "t2LDRH_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(2<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1557 = t2LDRH_POST
- { 1558, 6, 2, 102, "t2LDRH_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(1<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1558 = t2LDRH_PRE
- { 1559, 5, 1, 101, "t2LDRHi12", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|11|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1559 = t2LDRHi12
- { 1560, 5, 1, 101, "t2LDRHi8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1560 = t2LDRHi8
- { 1561, 4, 1, 101, "t2LDRHpci", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|14|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1561 = t2LDRHpci
- { 1562, 6, 1, 104, "t2LDRHs", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|13|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1562 = t2LDRHs
- { 1563, 5, 1, 101, "t2LDRSBT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1563 = t2LDRSBT
- { 1564, 6, 2, 102, "t2LDRSB_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(2<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1564 = t2LDRSB_POST
- { 1565, 6, 2, 102, "t2LDRSB_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(1<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1565 = t2LDRSB_PRE
- { 1566, 5, 1, 101, "t2LDRSBi12", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|11|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1566 = t2LDRSBi12
- { 1567, 5, 1, 101, "t2LDRSBi8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1567 = t2LDRSBi8
- { 1568, 4, 1, 101, "t2LDRSBpci", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|14|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1568 = t2LDRSBpci
- { 1569, 6, 1, 104, "t2LDRSBs", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|13|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1569 = t2LDRSBs
- { 1570, 5, 1, 101, "t2LDRSHT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1570 = t2LDRSHT
- { 1571, 6, 2, 102, "t2LDRSH_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(2<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1571 = t2LDRSH_POST
- { 1572, 6, 2, 102, "t2LDRSH_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(1<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1572 = t2LDRSH_PRE
- { 1573, 5, 1, 101, "t2LDRSHi12", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|11|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1573 = t2LDRSHi12
- { 1574, 5, 1, 101, "t2LDRSHi8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1574 = t2LDRSHi8
- { 1575, 4, 1, 101, "t2LDRSHpci", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|14|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1575 = t2LDRSHpci
- { 1576, 6, 1, 104, "t2LDRSHs", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|13|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1576 = t2LDRSHs
- { 1577, 5, 1, 101, "t2LDRT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1577 = t2LDRT
- { 1578, 6, 2, 102, "t2LDR_POST", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(2<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1578 = t2LDR_POST
- { 1579, 6, 2, 102, "t2LDR_PRE", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(1<<7)|(23<<9), NULL, NULL, NULL, OperandInfo150 }, // Inst #1579 = t2LDR_PRE
- { 1580, 5, 1, 101, "t2LDRi12", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|11|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1580 = t2LDRi12
- { 1581, 5, 1, 101, "t2LDRi8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1581 = t2LDRi8
- { 1582, 4, 1, 101, "t2LDRpci", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|14|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1582 = t2LDRpci
- { 1583, 3, 1, 128, "t2LDRpci_pic", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|(1<<4), NULL, NULL, NULL, OperandInfo20 }, // Inst #1583 = t2LDRpci_pic
- { 1584, 6, 1, 104, "t2LDRs", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|13|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1584 = t2LDRs
- { 1585, 4, 1, 88, "t2LEApcrel", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1585 = t2LEApcrel
- { 1586, 5, 1, 88, "t2LEApcrelJT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo38 }, // Inst #1586 = t2LEApcrelJT
- { 1587, 6, 1, 113, "t2LSLri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1587 = t2LSLri
- { 1588, 6, 1, 114, "t2LSLrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1588 = t2LSLrr
- { 1589, 6, 1, 113, "t2LSRri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1589 = t2LSRri
- { 1590, 6, 1, 114, "t2LSRrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1590 = t2LSRrr
- { 1591, 6, 1, 109, "t2MLA", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1591 = t2MLA
- { 1592, 6, 1, 109, "t2MLS", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1592 = t2MLS
- { 1593, 6, 1, 95, "t2MOVCCasr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo151 }, // Inst #1593 = t2MOVCCasr
- { 1594, 5, 1, 93, "t2MOVCCi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo15 }, // Inst #1594 = t2MOVCCi
- { 1595, 6, 1, 95, "t2MOVCClsl", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo151 }, // Inst #1595 = t2MOVCClsl
- { 1596, 6, 1, 95, "t2MOVCClsr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo151 }, // Inst #1596 = t2MOVCClsr
- { 1597, 5, 1, 94, "t2MOVCCr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo44 }, // Inst #1597 = t2MOVCCr
- { 1598, 6, 1, 95, "t2MOVCCror", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo151 }, // Inst #1598 = t2MOVCCror
- { 1599, 5, 1, 111, "t2MOVTi16", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo15 }, // Inst #1599 = t2MOVTi16
- { 1600, 5, 1, 111, "t2MOVi", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable)|(1<<TID::HasOptionalDef)|(1<<TID::CheapAsAMove), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo46 }, // Inst #1600 = t2MOVi
- { 1601, 4, 1, 111, "t2MOVi16", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1601 = t2MOVi16
- { 1602, 4, 1, 111, "t2MOVi32imm", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|(2<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1602 = t2MOVi32imm
- { 1603, 5, 1, 112, "t2MOVr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo47 }, // Inst #1603 = t2MOVr
- { 1604, 5, 1, 113, "t2MOVrx", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, NULL, NULL, OperandInfo47 }, // Inst #1604 = t2MOVrx
- { 1605, 2, 1, 113, "t2MOVsra_flag", 0, 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo32 }, // Inst #1605 = t2MOVsra_flag
- { 1606, 2, 1, 113, "t2MOVsrl_flag", 0, 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo32 }, // Inst #1606 = t2MOVsrl_flag
- { 1607, 3, 1, 128, "t2MRS", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #1607 = t2MRS
- { 1608, 3, 1, 128, "t2MRSsys", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #1608 = t2MRSsys
- { 1609, 3, 0, 128, "t2MSR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #1609 = t2MSR
- { 1610, 3, 0, 128, "t2MSRsys", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #1610 = t2MSRsys
- { 1611, 5, 1, 116, "t2MUL", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1611 = t2MUL
- { 1612, 5, 1, 111, "t2MVNi", 0|(1<<TID::Predicable)|(1<<TID::Rematerializable)|(1<<TID::HasOptionalDef)|(1<<TID::CheapAsAMove), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo46 }, // Inst #1612 = t2MVNi
- { 1613, 4, 1, 112, "t2MVNr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1613 = t2MVNr
- { 1614, 5, 1, 113, "t2MVNs", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1614 = t2MVNs
- { 1615, 2, 0, 128, "t2NOP", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1615 = t2NOP
- { 1616, 6, 1, 88, "t2ORNri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1616 = t2ORNri
- { 1617, 6, 1, 89, "t2ORNrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1617 = t2ORNrr
- { 1618, 7, 1, 90, "t2ORNrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo48 }, // Inst #1618 = t2ORNrs
- { 1619, 6, 1, 88, "t2ORRri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1619 = t2ORRri
- { 1620, 6, 1, 89, "t2ORRrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1620 = t2ORRrr
- { 1621, 7, 1, 90, "t2ORRrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo48 }, // Inst #1621 = t2ORRrs
- { 1622, 6, 1, 90, "t2PKHBT", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1622 = t2PKHBT
- { 1623, 6, 1, 90, "t2PKHTB", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1623 = t2PKHTB
- { 1624, 4, 0, 101, "t2PLDWi12", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1624 = t2PLDWi12
- { 1625, 4, 0, 101, "t2PLDWi8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1625 = t2PLDWi8
- { 1626, 4, 0, 101, "t2PLDWpci", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1626 = t2PLDWpci
- { 1627, 4, 0, 101, "t2PLDWr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1627 = t2PLDWr
- { 1628, 5, 0, 101, "t2PLDWs", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1628 = t2PLDWs
- { 1629, 4, 0, 101, "t2PLDi12", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1629 = t2PLDi12
- { 1630, 4, 0, 101, "t2PLDi8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1630 = t2PLDi8
- { 1631, 4, 0, 101, "t2PLDpci", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1631 = t2PLDpci
- { 1632, 4, 0, 101, "t2PLDr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1632 = t2PLDr
- { 1633, 5, 0, 101, "t2PLDs", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1633 = t2PLDs
- { 1634, 4, 0, 101, "t2PLIi12", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1634 = t2PLIi12
- { 1635, 4, 0, 101, "t2PLIi8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1635 = t2PLIi8
- { 1636, 4, 0, 101, "t2PLIpci", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo26 }, // Inst #1636 = t2PLIpci
- { 1637, 4, 0, 101, "t2PLIr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1637 = t2PLIr
- { 1638, 5, 0, 101, "t2PLIs", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1638 = t2PLIs
- { 1639, 5, 1, 128, "t2QADD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1639 = t2QADD
- { 1640, 5, 1, 128, "t2QADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1640 = t2QADD16
- { 1641, 5, 1, 128, "t2QADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1641 = t2QADD8
- { 1642, 5, 1, 128, "t2QASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1642 = t2QASX
- { 1643, 5, 1, 128, "t2QDADD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1643 = t2QDADD
- { 1644, 5, 1, 128, "t2QDSUB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1644 = t2QDSUB
- { 1645, 5, 1, 128, "t2QSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1645 = t2QSAX
- { 1646, 5, 1, 128, "t2QSUB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1646 = t2QSUB
- { 1647, 5, 1, 128, "t2QSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1647 = t2QSUB16
- { 1648, 5, 1, 128, "t2QSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1648 = t2QSUB8
- { 1649, 4, 1, 125, "t2RBIT", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1649 = t2RBIT
- { 1650, 4, 1, 125, "t2REV", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1650 = t2REV
- { 1651, 4, 1, 125, "t2REV16", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1651 = t2REV16
- { 1652, 4, 1, 125, "t2REVSH", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1652 = t2REVSH
- { 1653, 3, 0, 128, "t2RFEDB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #1653 = t2RFEDB
- { 1654, 3, 0, 128, "t2RFEDBW", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #1654 = t2RFEDBW
- { 1655, 3, 0, 128, "t2RFEIA", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #1655 = t2RFEIA
- { 1656, 3, 0, 128, "t2RFEIAW", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo21 }, // Inst #1656 = t2RFEIAW
- { 1657, 6, 1, 113, "t2RORri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1657 = t2RORri
- { 1658, 6, 1, 114, "t2RORrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1658 = t2RORrr
- { 1659, 4, 1, 88, "t2RSBSri", 0|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo152 }, // Inst #1659 = t2RSBSri
- { 1660, 5, 1, 90, "t2RSBSrs", 0|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo153 }, // Inst #1660 = t2RSBSrs
- { 1661, 5, 1, 88, "t2RSBri", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1661 = t2RSBri
- { 1662, 6, 1, 90, "t2RSBrs", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1662 = t2RSBrs
- { 1663, 5, 1, 128, "t2SADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1663 = t2SADD16
- { 1664, 5, 1, 128, "t2SADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1664 = t2SADD8
- { 1665, 5, 1, 128, "t2SASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1665 = t2SASX
- { 1666, 6, 1, 88, "t2SBCSri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #1666 = t2SBCSri
- { 1667, 6, 1, 89, "t2SBCSrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1667 = t2SBCSrr
- { 1668, 7, 1, 90, "t2SBCSrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #1668 = t2SBCSrs
- { 1669, 6, 1, 88, "t2SBCri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #1669 = t2SBCri
- { 1670, 6, 1, 89, "t2SBCrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, NULL, NULL, OperandInfo6 }, // Inst #1670 = t2SBCrr
- { 1671, 7, 1, 90, "t2SBCrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), ImplicitList1, NULL, NULL, OperandInfo48 }, // Inst #1671 = t2SBCrs
- { 1672, 6, 1, 88, "t2SBFX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo51 }, // Inst #1672 = t2SBFX
- { 1673, 5, 1, 88, "t2SDIV", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1673 = t2SDIV
- { 1674, 5, 1, 128, "t2SEL", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1674 = t2SEL
- { 1675, 2, 0, 128, "t2SEV", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1675 = t2SEV
- { 1676, 5, 1, 128, "t2SHADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1676 = t2SHADD16
- { 1677, 5, 1, 128, "t2SHADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1677 = t2SHADD8
- { 1678, 5, 1, 128, "t2SHASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1678 = t2SHASX
- { 1679, 5, 1, 128, "t2SHSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1679 = t2SHSAX
- { 1680, 5, 1, 128, "t2SHSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1680 = t2SHSUB16
- { 1681, 5, 1, 128, "t2SHSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1681 = t2SHSUB8
- { 1682, 3, 0, 128, "t2SMC", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #1682 = t2SMC
- { 1683, 6, 1, 108, "t2SMLABB", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1683 = t2SMLABB
- { 1684, 6, 1, 108, "t2SMLABT", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1684 = t2SMLABT
- { 1685, 6, 1, 109, "t2SMLAD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1685 = t2SMLAD
- { 1686, 6, 1, 109, "t2SMLADX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1686 = t2SMLADX
- { 1687, 6, 2, 110, "t2SMLAL", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1687 = t2SMLAL
- { 1688, 6, 2, 110, "t2SMLALBB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1688 = t2SMLALBB
- { 1689, 6, 2, 110, "t2SMLALBT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1689 = t2SMLALBT
- { 1690, 6, 2, 110, "t2SMLALD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1690 = t2SMLALD
- { 1691, 6, 2, 110, "t2SMLALDX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1691 = t2SMLALDX
- { 1692, 6, 2, 110, "t2SMLALTB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1692 = t2SMLALTB
- { 1693, 6, 2, 110, "t2SMLALTT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1693 = t2SMLALTT
- { 1694, 6, 1, 108, "t2SMLATB", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1694 = t2SMLATB
- { 1695, 6, 1, 108, "t2SMLATT", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1695 = t2SMLATT
- { 1696, 6, 1, 108, "t2SMLAWB", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1696 = t2SMLAWB
- { 1697, 6, 1, 108, "t2SMLAWT", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1697 = t2SMLAWT
- { 1698, 6, 1, 109, "t2SMLSD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1698 = t2SMLSD
- { 1699, 6, 1, 109, "t2SMLSDX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1699 = t2SMLSDX
- { 1700, 6, 2, 110, "t2SMLSLD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1700 = t2SMLSLD
- { 1701, 6, 2, 110, "t2SMLSLDX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1701 = t2SMLSLDX
- { 1702, 6, 1, 109, "t2SMMLA", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1702 = t2SMMLA
- { 1703, 6, 1, 109, "t2SMMLAR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1703 = t2SMMLAR
- { 1704, 6, 1, 109, "t2SMMLS", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1704 = t2SMMLS
- { 1705, 6, 1, 109, "t2SMMLSR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1705 = t2SMMLSR
- { 1706, 5, 1, 116, "t2SMMUL", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1706 = t2SMMUL
- { 1707, 5, 1, 116, "t2SMMULR", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1707 = t2SMMULR
- { 1708, 5, 1, 109, "t2SMUAD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1708 = t2SMUAD
- { 1709, 5, 1, 109, "t2SMUADX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1709 = t2SMUADX
- { 1710, 5, 1, 116, "t2SMULBB", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1710 = t2SMULBB
- { 1711, 5, 1, 116, "t2SMULBT", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1711 = t2SMULBT
- { 1712, 6, 2, 117, "t2SMULL", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1712 = t2SMULL
- { 1713, 5, 1, 116, "t2SMULTB", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1713 = t2SMULTB
- { 1714, 5, 1, 116, "t2SMULTT", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1714 = t2SMULTT
- { 1715, 5, 1, 115, "t2SMULWB", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1715 = t2SMULWB
- { 1716, 5, 1, 115, "t2SMULWT", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1716 = t2SMULWT
- { 1717, 5, 1, 109, "t2SMUSD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1717 = t2SMUSD
- { 1718, 5, 1, 109, "t2SMUSDX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1718 = t2SMUSDX
- { 1719, 3, 0, 128, "t2SRSDB", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #1719 = t2SRSDB
- { 1720, 3, 0, 128, "t2SRSDBW", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #1720 = t2SRSDBW
- { 1721, 3, 0, 128, "t2SRSIA", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #1721 = t2SRSIA
- { 1722, 3, 0, 128, "t2SRSIAW", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #1722 = t2SRSIAW
- { 1723, 5, 1, 128, "t2SSAT16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo52 }, // Inst #1723 = t2SSAT16
- { 1724, 6, 1, 128, "t2SSATasr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo53 }, // Inst #1724 = t2SSATasr
- { 1725, 6, 1, 128, "t2SSATlsl", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo53 }, // Inst #1725 = t2SSATlsl
- { 1726, 5, 1, 128, "t2SSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1726 = t2SSAX
- { 1727, 5, 1, 128, "t2SSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1727 = t2SSUB16
- { 1728, 5, 1, 128, "t2SSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1728 = t2SSUB8
- { 1729, 5, 0, 120, "t2STM", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo35 }, // Inst #1729 = t2STM
- { 1730, 5, 1, 118, "t2STRBT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1730 = t2STRBT
- { 1731, 6, 1, 119, "t2STRB_POST", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|12|(3<<4)|(2<<7)|(23<<9), NULL, NULL, NULL, OperandInfo154 }, // Inst #1731 = t2STRB_POST
- { 1732, 6, 1, 119, "t2STRB_PRE", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|12|(3<<4)|(1<<7)|(23<<9), NULL, NULL, NULL, OperandInfo154 }, // Inst #1732 = t2STRB_PRE
- { 1733, 5, 0, 118, "t2STRBi12", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|11|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1733 = t2STRBi12
- { 1734, 5, 0, 118, "t2STRBi8", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1734 = t2STRBi8
- { 1735, 6, 0, 121, "t2STRBs", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|13|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1735 = t2STRBs
- { 1736, 6, 0, 121, "t2STRDi8", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|15|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1736 = t2STRDi8
- { 1737, 5, 1, 128, "t2STREX", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo56 }, // Inst #1737 = t2STREX
- { 1738, 5, 1, 128, "t2STREXB", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo56 }, // Inst #1738 = t2STREXB
- { 1739, 6, 1, 128, "t2STREXD", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo57 }, // Inst #1739 = t2STREXD
- { 1740, 5, 1, 128, "t2STREXH", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo56 }, // Inst #1740 = t2STREXH
- { 1741, 5, 1, 118, "t2STRHT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1741 = t2STRHT
- { 1742, 6, 1, 119, "t2STRH_POST", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|12|(3<<4)|(2<<7)|(23<<9), NULL, NULL, NULL, OperandInfo154 }, // Inst #1742 = t2STRH_POST
- { 1743, 6, 1, 119, "t2STRH_PRE", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|12|(3<<4)|(1<<7)|(23<<9), NULL, NULL, NULL, OperandInfo154 }, // Inst #1743 = t2STRH_PRE
- { 1744, 5, 0, 118, "t2STRHi12", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|11|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1744 = t2STRHi12
- { 1745, 5, 0, 118, "t2STRHi8", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1745 = t2STRHi8
- { 1746, 6, 0, 121, "t2STRHs", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|13|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1746 = t2STRHs
- { 1747, 5, 1, 118, "t2STRT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1747 = t2STRT
- { 1748, 6, 1, 119, "t2STR_POST", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|12|(3<<4)|(2<<7)|(23<<9), NULL, NULL, NULL, OperandInfo154 }, // Inst #1748 = t2STR_POST
- { 1749, 6, 1, 119, "t2STR_PRE", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|12|(3<<4)|(1<<7)|(23<<9), NULL, NULL, NULL, OperandInfo154 }, // Inst #1749 = t2STR_PRE
- { 1750, 5, 0, 118, "t2STRi12", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|11|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1750 = t2STRi12
- { 1751, 5, 0, 118, "t2STRi8", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|12|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1751 = t2STRi8
- { 1752, 6, 0, 121, "t2STRs", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|13|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1752 = t2STRs
- { 1753, 5, 1, 88, "t2SUBSri", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1753 = t2SUBSri
- { 1754, 5, 1, 89, "t2SUBSrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #1754 = t2SUBSrr
- { 1755, 6, 1, 90, "t2SUBSrs", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #1755 = t2SUBSrs
- { 1756, 6, 1, 88, "t2SUBrSPi", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1756 = t2SUBrSPi
- { 1757, 5, 1, 88, "t2SUBrSPi12", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1757 = t2SUBrSPi12
- { 1758, 3, 1, 128, "t2SUBrSPi12_", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<4), NULL, NULL, NULL, OperandInfo2 }, // Inst #1758 = t2SUBrSPi12_
- { 1759, 3, 1, 128, "t2SUBrSPi_", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<4), NULL, NULL, NULL, OperandInfo2 }, // Inst #1759 = t2SUBrSPi_
- { 1760, 7, 1, 90, "t2SUBrSPs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo48 }, // Inst #1760 = t2SUBrSPs
- { 1761, 4, 1, 128, "t2SUBrSPs_", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<4), NULL, NULL, NULL, OperandInfo155 }, // Inst #1761 = t2SUBrSPs_
- { 1762, 6, 1, 88, "t2SUBri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1762 = t2SUBri
- { 1763, 6, 1, 88, "t2SUBri12", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo5 }, // Inst #1763 = t2SUBri12
- { 1764, 6, 1, 89, "t2SUBrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo6 }, // Inst #1764 = t2SUBrr
- { 1765, 7, 1, 90, "t2SUBrs", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo48 }, // Inst #1765 = t2SUBrs
- { 1766, 5, 1, 89, "t2SXTAB16rr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1766 = t2SXTAB16rr
- { 1767, 6, 1, 91, "t2SXTAB16rr_rot", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1767 = t2SXTAB16rr_rot
- { 1768, 5, 1, 89, "t2SXTABrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1768 = t2SXTABrr
- { 1769, 6, 1, 91, "t2SXTABrr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1769 = t2SXTABrr_rot
- { 1770, 5, 1, 89, "t2SXTAHrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1770 = t2SXTAHrr
- { 1771, 6, 1, 91, "t2SXTAHrr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1771 = t2SXTAHrr_rot
- { 1772, 4, 1, 125, "t2SXTB16r", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1772 = t2SXTB16r
- { 1773, 5, 1, 126, "t2SXTB16r_rot", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1773 = t2SXTB16r_rot
- { 1774, 4, 1, 125, "t2SXTBr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1774 = t2SXTBr
- { 1775, 5, 1, 126, "t2SXTBr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1775 = t2SXTBr_rot
- { 1776, 4, 1, 125, "t2SXTHr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1776 = t2SXTHr
- { 1777, 5, 1, 126, "t2SXTHr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1777 = t2SXTHr_rot
- { 1778, 3, 0, 0, "t2TBB", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0|(1<<4)|(23<<9), NULL, NULL, NULL, OperandInfo28 }, // Inst #1778 = t2TBB
- { 1779, 4, 0, 0, "t2TBBgen", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1779 = t2TBBgen
- { 1780, 3, 0, 0, "t2TBH", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0|(1<<4)|(23<<9), NULL, NULL, NULL, OperandInfo28 }, // Inst #1780 = t2TBH
- { 1781, 4, 0, 0, "t2TBHgen", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1781 = t2TBHgen
- { 1782, 4, 0, 97, "t2TEQri", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #1782 = t2TEQri
- { 1783, 4, 0, 98, "t2TEQrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #1783 = t2TEQrr
- { 1784, 5, 0, 99, "t2TEQrs", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1784 = t2TEQrs
- { 1785, 0, 0, 0, "t2TPsoft", 0|(1<<TID::Call), 0|(3<<4)|(23<<9), NULL, ImplicitList7, Barriers1, 0 }, // Inst #1785 = t2TPsoft
- { 1786, 4, 0, 97, "t2TSTri", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #1786 = t2TSTri
- { 1787, 4, 0, 98, "t2TSTrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #1787 = t2TSTrr
- { 1788, 5, 0, 99, "t2TSTrs", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1788 = t2TSTrs
- { 1789, 5, 1, 128, "t2UADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1789 = t2UADD16
- { 1790, 5, 1, 128, "t2UADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1790 = t2UADD8
- { 1791, 5, 1, 128, "t2UASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1791 = t2UASX
- { 1792, 6, 1, 88, "t2UBFX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo51 }, // Inst #1792 = t2UBFX
- { 1793, 5, 1, 88, "t2UDIV", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1793 = t2UDIV
- { 1794, 5, 1, 128, "t2UHADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1794 = t2UHADD16
- { 1795, 5, 1, 128, "t2UHADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1795 = t2UHADD8
- { 1796, 5, 1, 128, "t2UHASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1796 = t2UHASX
- { 1797, 5, 1, 128, "t2UHSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1797 = t2UHSAX
- { 1798, 5, 1, 128, "t2UHSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1798 = t2UHSUB16
- { 1799, 5, 1, 128, "t2UHSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1799 = t2UHSUB8
- { 1800, 6, 2, 110, "t2UMAAL", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1800 = t2UMAAL
- { 1801, 6, 2, 110, "t2UMLAL", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1801 = t2UMLAL
- { 1802, 6, 2, 117, "t2UMULL", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1802 = t2UMULL
- { 1803, 5, 1, 128, "t2UQADD16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1803 = t2UQADD16
- { 1804, 5, 1, 128, "t2UQADD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1804 = t2UQADD8
- { 1805, 5, 1, 128, "t2UQASX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1805 = t2UQASX
- { 1806, 5, 1, 128, "t2UQSAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1806 = t2UQSAX
- { 1807, 5, 1, 128, "t2UQSUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1807 = t2UQSUB16
- { 1808, 5, 1, 128, "t2UQSUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1808 = t2UQSUB8
- { 1809, 5, 1, 128, "t2USAD8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1809 = t2USAD8
- { 1810, 6, 1, 128, "t2USADA8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo43 }, // Inst #1810 = t2USADA8
- { 1811, 5, 1, 128, "t2USAT16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo52 }, // Inst #1811 = t2USAT16
- { 1812, 6, 1, 128, "t2USATasr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo53 }, // Inst #1812 = t2USATasr
- { 1813, 6, 1, 128, "t2USATlsl", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo53 }, // Inst #1813 = t2USATlsl
- { 1814, 5, 1, 128, "t2USAX", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1814 = t2USAX
- { 1815, 5, 1, 128, "t2USUB16", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1815 = t2USUB16
- { 1816, 5, 1, 128, "t2USUB8", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1816 = t2USUB8
- { 1817, 5, 1, 89, "t2UXTAB16rr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1817 = t2UXTAB16rr
- { 1818, 6, 1, 91, "t2UXTAB16rr_rot", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1818 = t2UXTAB16rr_rot
- { 1819, 5, 1, 89, "t2UXTABrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1819 = t2UXTABrr
- { 1820, 6, 1, 91, "t2UXTABrr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1820 = t2UXTABrr_rot
- { 1821, 5, 1, 89, "t2UXTAHrr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo9 }, // Inst #1821 = t2UXTAHrr
- { 1822, 6, 1, 91, "t2UXTAHrr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo27 }, // Inst #1822 = t2UXTAHrr_rot
- { 1823, 4, 1, 125, "t2UXTB16r", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1823 = t2UXTB16r
- { 1824, 5, 1, 126, "t2UXTB16r_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1824 = t2UXTB16r_rot
- { 1825, 4, 1, 125, "t2UXTBr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1825 = t2UXTBr
- { 1826, 5, 1, 126, "t2UXTBr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1826 = t2UXTBr_rot
- { 1827, 4, 1, 125, "t2UXTHr", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo25 }, // Inst #1827 = t2UXTHr
- { 1828, 5, 1, 126, "t2UXTHr_rot", 0|(1<<TID::Predicable), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo8 }, // Inst #1828 = t2UXTHr_rot
- { 1829, 2, 0, 128, "t2WFE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1829 = t2WFE
- { 1830, 2, 0, 128, "t2WFI", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1830 = t2WFI
- { 1831, 2, 0, 128, "t2YIELD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1831 = t2YIELD
- { 1832, 6, 2, 89, "tADC", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), ImplicitList1, NULL, NULL, OperandInfo156 }, // Inst #1832 = tADC
- { 1833, 5, 1, 89, "tADDhirr", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo44 }, // Inst #1833 = tADDhirr
- { 1834, 6, 2, 88, "tADDi3", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo157 }, // Inst #1834 = tADDi3
- { 1835, 6, 2, 88, "tADDi8", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo158 }, // Inst #1835 = tADDi8
- { 1836, 2, 1, 88, "tADDrPCi", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo159 }, // Inst #1836 = tADDrPCi
- { 1837, 3, 1, 89, "tADDrSP", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo160 }, // Inst #1837 = tADDrSP
- { 1838, 3, 1, 88, "tADDrSPi", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo161 }, // Inst #1838 = tADDrSPi
- { 1839, 6, 2, 89, "tADDrr", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo162 }, // Inst #1839 = tADDrr
- { 1840, 3, 1, 88, "tADDspi", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo163 }, // Inst #1840 = tADDspi
- { 1841, 3, 1, 89, "tADDspr", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo160 }, // Inst #1841 = tADDspr
- { 1842, 3, 1, 128, "tADDspr_", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<4), NULL, NULL, NULL, OperandInfo3 }, // Inst #1842 = tADDspr_
- { 1843, 1, 0, 128, "tADJCALLSTACKDOWN", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<4), ImplicitList2, ImplicitList2, NULL, OperandInfo14 }, // Inst #1843 = tADJCALLSTACKDOWN
- { 1844, 2, 0, 128, "tADJCALLSTACKUP", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<4), ImplicitList2, ImplicitList2, NULL, OperandInfo148 }, // Inst #1844 = tADJCALLSTACKUP
- { 1845, 6, 2, 89, "tAND", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo156 }, // Inst #1845 = tAND
- { 1846, 3, 1, 128, "tANDsp", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<4), NULL, ImplicitList1, Barriers1, OperandInfo164 }, // Inst #1846 = tANDsp
- { 1847, 6, 2, 113, "tASRri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo157 }, // Inst #1847 = tASRri
- { 1848, 6, 2, 114, "tASRrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo156 }, // Inst #1848 = tASRrr
- { 1849, 1, 0, 0, "tB", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Predicable)|(1<<TID::Terminator), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo14 }, // Inst #1849 = tB
- { 1850, 6, 2, 89, "tBIC", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo156 }, // Inst #1850 = tBIC
- { 1851, 1, 0, 128, "tBKPT", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo14 }, // Inst #1851 = tBKPT
- { 1852, 1, 0, 0, "tBL", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(23<<9), NULL, ImplicitList3, Barriers2, OperandInfo14 }, // Inst #1852 = tBL
- { 1853, 1, 0, 0, "tBLXi", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(23<<9), NULL, ImplicitList3, Barriers2, OperandInfo14 }, // Inst #1853 = tBLXi
- { 1854, 1, 0, 0, "tBLXi_r9", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(23<<9), NULL, ImplicitList4, Barriers2, OperandInfo14 }, // Inst #1854 = tBLXi_r9
- { 1855, 1, 0, 0, "tBLXr", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(4<<4)|(23<<9), NULL, ImplicitList3, Barriers2, OperandInfo16 }, // Inst #1855 = tBLXr
- { 1856, 1, 0, 0, "tBLXr_r9", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(4<<4)|(23<<9), NULL, ImplicitList4, Barriers2, OperandInfo16 }, // Inst #1856 = tBLXr_r9
- { 1857, 1, 0, 0, "tBLr9", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(23<<9), NULL, ImplicitList4, Barriers2, OperandInfo14 }, // Inst #1857 = tBLr9
- { 1858, 1, 0, 0, "tBRIND", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo16 }, // Inst #1858 = tBRIND
- { 1859, 3, 0, 0, "tBR_JTr", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|(1<<4)|(23<<9), NULL, NULL, NULL, OperandInfo165 }, // Inst #1859 = tBR_JTr
- { 1860, 1, 0, 0, "tBX", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(23<<9), NULL, ImplicitList3, Barriers2, OperandInfo17 }, // Inst #1860 = tBX
- { 1861, 0, 0, 0, "tBX_RET", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|(4<<4)|(23<<9), NULL, NULL, NULL, 0 }, // Inst #1861 = tBX_RET
- { 1862, 1, 0, 0, "tBX_RET_vararg", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo17 }, // Inst #1862 = tBX_RET_vararg
- { 1863, 1, 0, 0, "tBXr9", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(3<<4)|(23<<9), NULL, ImplicitList4, Barriers2, OperandInfo17 }, // Inst #1863 = tBXr9
- { 1864, 3, 0, 0, "tBcc", 0|(1<<TID::Branch)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #1864 = tBcc
- { 1865, 1, 0, 0, "tBfar", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(3<<4)|(23<<9), NULL, ImplicitList8, NULL, OperandInfo14 }, // Inst #1865 = tBfar
- { 1866, 2, 0, 0, "tCBNZ", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo159 }, // Inst #1866 = tCBNZ
- { 1867, 2, 0, 0, "tCBZ", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo159 }, // Inst #1867 = tCBZ
- { 1868, 4, 0, 98, "tCMNz", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo166 }, // Inst #1868 = tCMNz
- { 1869, 4, 0, 98, "tCMPhir", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #1869 = tCMPhir
- { 1870, 4, 0, 97, "tCMPi8", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo167 }, // Inst #1870 = tCMPi8
- { 1871, 4, 0, 98, "tCMPr", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo166 }, // Inst #1871 = tCMPr
- { 1872, 4, 0, 98, "tCMPzhir", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #1872 = tCMPzhir
- { 1873, 4, 0, 97, "tCMPzi8", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo167 }, // Inst #1873 = tCMPzi8
- { 1874, 4, 0, 98, "tCMPzr", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo166 }, // Inst #1874 = tCMPzr
- { 1875, 1, 0, 128, "tCPS", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo14 }, // Inst #1875 = tCPS
- { 1876, 6, 2, 89, "tEOR", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo156 }, // Inst #1876 = tEOR
- { 1877, 2, 0, 128, "tInt_eh_sjlj_setjmp", 0, 0|(1<<4)|(23<<9), NULL, ImplicitList9, Barriers4, OperandInfo168 }, // Inst #1877 = tInt_eh_sjlj_setjmp
- { 1878, 5, 0, 103, "tLDM", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo35 }, // Inst #1878 = tLDM
- { 1879, 6, 1, 104, "tLDR", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|9|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1879 = tLDR
- { 1880, 6, 1, 104, "tLDRB", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|7|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1880 = tLDRB
- { 1881, 6, 1, 104, "tLDRBi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|7|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1881 = tLDRBi
- { 1882, 6, 1, 104, "tLDRH", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|8|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1882 = tLDRH
- { 1883, 6, 1, 104, "tLDRHi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|8|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1883 = tLDRHi
- { 1884, 5, 1, 104, "tLDRSB", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|7|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo170 }, // Inst #1884 = tLDRSB
- { 1885, 5, 1, 104, "tLDRSH", 0|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|8|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo170 }, // Inst #1885 = tLDRSH
- { 1886, 4, 1, 101, "tLDRcp", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable)|(1<<TID::UnmodeledSideEffects), 0|10|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo167 }, // Inst #1886 = tLDRcp
- { 1887, 6, 1, 104, "tLDRi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|9|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1887 = tLDRi
- { 1888, 4, 1, 101, "tLDRpci", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Rematerializable), 0|10|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo167 }, // Inst #1888 = tLDRpci
- { 1889, 3, 1, 128, "tLDRpci_pic", 0|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|(1<<4), NULL, NULL, NULL, OperandInfo20 }, // Inst #1889 = tLDRpci_pic
- { 1890, 5, 1, 101, "tLDRspi", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable), 0|10|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo171 }, // Inst #1890 = tLDRspi
- { 1891, 4, 1, 88, "tLEApcrel", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo167 }, // Inst #1891 = tLEApcrel
- { 1892, 5, 1, 88, "tLEApcrelJT", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo172 }, // Inst #1892 = tLEApcrelJT
- { 1893, 6, 2, 113, "tLSLri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo157 }, // Inst #1893 = tLSLri
- { 1894, 6, 2, 114, "tLSLrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo156 }, // Inst #1894 = tLSLrr
- { 1895, 6, 2, 113, "tLSRri", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo157 }, // Inst #1895 = tLSRri
- { 1896, 6, 2, 114, "tLSRrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo156 }, // Inst #1896 = tLSRrr
- { 1897, 5, 1, 93, "tMOVCCi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo173 }, // Inst #1897 = tMOVCCi
- { 1898, 5, 1, 94, "tMOVCCr", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo44 }, // Inst #1898 = tMOVCCr
- { 1899, 5, 1, 128, "tMOVCCr_pseudo", 0|(1<<TID::Predicable)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<4), NULL, NULL, NULL, OperandInfo170 }, // Inst #1899 = tMOVCCr_pseudo
- { 1900, 2, 1, 112, "tMOVSr", 0, 0|(4<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo168 }, // Inst #1900 = tMOVSr
- { 1901, 2, 1, 112, "tMOVgpr2gpr", 0, 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo32 }, // Inst #1901 = tMOVgpr2gpr
- { 1902, 2, 1, 112, "tMOVgpr2tgpr", 0, 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo174 }, // Inst #1902 = tMOVgpr2tgpr
- { 1903, 5, 2, 111, "tMOVi8", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo175 }, // Inst #1903 = tMOVi8
- { 1904, 2, 1, 112, "tMOVr", 0, 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo168 }, // Inst #1904 = tMOVr
- { 1905, 2, 1, 112, "tMOVtgpr2gpr", 0, 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo149 }, // Inst #1905 = tMOVtgpr2gpr
- { 1906, 6, 2, 116, "tMUL", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo156 }, // Inst #1906 = tMUL
- { 1907, 5, 2, 112, "tMVN", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo176 }, // Inst #1907 = tMVN
- { 1908, 2, 0, 128, "tNOP", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1908 = tNOP
- { 1909, 6, 2, 89, "tORR", 0|(1<<TID::Predicable)|(1<<TID::Commutable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo156 }, // Inst #1909 = tORR
- { 1910, 3, 1, 89, "tPICADD", 0|(1<<TID::NotDuplicable), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo163 }, // Inst #1910 = tPICADD
- { 1911, 3, 0, 0, "tPOP", 0|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|(4<<4)|(23<<9), ImplicitList2, ImplicitList2, NULL, OperandInfo177 }, // Inst #1911 = tPOP
- { 1912, 3, 0, 0, "tPOP_RET", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraDefRegAllocReq), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo177 }, // Inst #1912 = tPOP_RET
- { 1913, 3, 0, 0, "tPUSH", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|(4<<4)|(23<<9), ImplicitList2, ImplicitList2, NULL, OperandInfo177 }, // Inst #1913 = tPUSH
- { 1914, 4, 1, 125, "tREV", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo166 }, // Inst #1914 = tREV
- { 1915, 4, 1, 125, "tREV16", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo166 }, // Inst #1915 = tREV16
- { 1916, 4, 1, 125, "tREVSH", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo166 }, // Inst #1916 = tREVSH
- { 1917, 6, 2, 114, "tROR", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo156 }, // Inst #1917 = tROR
- { 1918, 5, 2, 88, "tRSB", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo176 }, // Inst #1918 = tRSB
- { 1919, 5, 1, 101, "tRestore", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|10|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo171 }, // Inst #1919 = tRestore
- { 1920, 6, 2, 89, "tSBC", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), ImplicitList1, NULL, NULL, OperandInfo156 }, // Inst #1920 = tSBC
- { 1921, 0, 0, 128, "tSETENDBE", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, 0 }, // Inst #1921 = tSETENDBE
- { 1922, 0, 0, 128, "tSETENDLE", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, 0 }, // Inst #1922 = tSETENDLE
- { 1923, 2, 0, 128, "tSEV", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1923 = tSEV
- { 1924, 5, 0, 120, "tSTM", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::ExtraSrcRegAllocReq), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo35 }, // Inst #1924 = tSTM
- { 1925, 6, 0, 121, "tSTR", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|9|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1925 = tSTR
- { 1926, 6, 0, 121, "tSTRB", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|7|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1926 = tSTRB
- { 1927, 6, 0, 121, "tSTRBi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|7|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1927 = tSTRBi
- { 1928, 6, 0, 121, "tSTRH", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|8|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1928 = tSTRH
- { 1929, 6, 0, 121, "tSTRHi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|8|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1929 = tSTRHi
- { 1930, 6, 0, 121, "tSTRi", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|9|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo169 }, // Inst #1930 = tSTRi
- { 1931, 5, 0, 118, "tSTRspi", 0|(1<<TID::MayStore)|(1<<TID::Predicable), 0|10|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo171 }, // Inst #1931 = tSTRspi
- { 1932, 6, 2, 88, "tSUBi3", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo157 }, // Inst #1932 = tSUBi3
- { 1933, 6, 2, 88, "tSUBi8", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo158 }, // Inst #1933 = tSUBi8
- { 1934, 6, 2, 89, "tSUBrr", 0|(1<<TID::Predicable)|(1<<TID::HasOptionalDef), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo162 }, // Inst #1934 = tSUBrr
- { 1935, 3, 1, 88, "tSUBspi", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo163 }, // Inst #1935 = tSUBspi
- { 1936, 3, 1, 128, "tSUBspi_", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<4), NULL, NULL, NULL, OperandInfo2 }, // Inst #1936 = tSUBspi_
- { 1937, 3, 0, 0, "tSVC", 0|(1<<TID::Call)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo11 }, // Inst #1937 = tSVC
- { 1938, 4, 1, 125, "tSXTB", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo166 }, // Inst #1938 = tSXTB
- { 1939, 4, 1, 125, "tSXTH", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo166 }, // Inst #1939 = tSXTH
- { 1940, 5, 0, 118, "tSpill", 0|(1<<TID::MayStore)|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|10|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo171 }, // Inst #1940 = tSpill
- { 1941, 0, 0, 0, "tTPsoft", 0|(1<<TID::Call), 0|(3<<4)|(23<<9), NULL, ImplicitList10, NULL, 0 }, // Inst #1941 = tTPsoft
- { 1942, 0, 0, 0, "tTRAP", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, 0 }, // Inst #1942 = tTRAP
- { 1943, 4, 0, 98, "tTST", 0|(1<<TID::Predicable)|(1<<TID::Commutable), 0|(4<<4)|(23<<9), NULL, ImplicitList1, Barriers1, OperandInfo166 }, // Inst #1943 = tTST
- { 1944, 4, 1, 125, "tUXTB", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo166 }, // Inst #1944 = tUXTB
- { 1945, 4, 1, 125, "tUXTH", 0|(1<<TID::Predicable), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo166 }, // Inst #1945 = tUXTH
- { 1946, 2, 0, 128, "tWFE", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1946 = tWFE
- { 1947, 2, 0, 128, "tWFI", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1947 = tWFI
- { 1948, 2, 0, 128, "tYIELD", 0|(1<<TID::Predicable)|(1<<TID::UnmodeledSideEffects), 0|(4<<4)|(23<<9), NULL, NULL, NULL, OperandInfo22 }, // Inst #1948 = tYIELD
-};
-} // End llvm namespace
diff --git a/libclamav/c++/ARMGenInstrNames.inc b/libclamav/c++/ARMGenInstrNames.inc
deleted file mode 100644
index db1f6bb..0000000
--- a/libclamav/c++/ARMGenInstrNames.inc
+++ /dev/null
@@ -1,1965 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// Target Instruction Enum Values
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-namespace llvm {
-
-namespace ARM {
- enum {
- PHI = 0,
- INLINEASM = 1,
- DBG_LABEL = 2,
- EH_LABEL = 3,
- GC_LABEL = 4,
- KILL = 5,
- EXTRACT_SUBREG = 6,
- INSERT_SUBREG = 7,
- IMPLICIT_DEF = 8,
- SUBREG_TO_REG = 9,
- COPY_TO_REGCLASS = 10,
- DBG_VALUE = 11,
- ADCSSri = 12,
- ADCSSrr = 13,
- ADCSSrs = 14,
- ADCri = 15,
- ADCrr = 16,
- ADCrs = 17,
- ADDSri = 18,
- ADDSrr = 19,
- ADDSrs = 20,
- ADDri = 21,
- ADDrr = 22,
- ADDrs = 23,
- ADJCALLSTACKDOWN = 24,
- ADJCALLSTACKUP = 25,
- ANDri = 26,
- ANDrr = 27,
- ANDrs = 28,
- ATOMIC_CMP_SWAP_I16 = 29,
- ATOMIC_CMP_SWAP_I32 = 30,
- ATOMIC_CMP_SWAP_I8 = 31,
- ATOMIC_LOAD_ADD_I16 = 32,
- ATOMIC_LOAD_ADD_I32 = 33,
- ATOMIC_LOAD_ADD_I8 = 34,
- ATOMIC_LOAD_AND_I16 = 35,
- ATOMIC_LOAD_AND_I32 = 36,
- ATOMIC_LOAD_AND_I8 = 37,
- ATOMIC_LOAD_NAND_I16 = 38,
- ATOMIC_LOAD_NAND_I32 = 39,
- ATOMIC_LOAD_NAND_I8 = 40,
- ATOMIC_LOAD_OR_I16 = 41,
- ATOMIC_LOAD_OR_I32 = 42,
- ATOMIC_LOAD_OR_I8 = 43,
- ATOMIC_LOAD_SUB_I16 = 44,
- ATOMIC_LOAD_SUB_I32 = 45,
- ATOMIC_LOAD_SUB_I8 = 46,
- ATOMIC_LOAD_XOR_I16 = 47,
- ATOMIC_LOAD_XOR_I32 = 48,
- ATOMIC_LOAD_XOR_I8 = 49,
- ATOMIC_SWAP_I16 = 50,
- ATOMIC_SWAP_I32 = 51,
- ATOMIC_SWAP_I8 = 52,
- B = 53,
- BFC = 54,
- BFI = 55,
- BICri = 56,
- BICrr = 57,
- BICrs = 58,
- BKPT = 59,
- BL = 60,
- BLX = 61,
- BLXr9 = 62,
- BL_pred = 63,
- BLr9 = 64,
- BLr9_pred = 65,
- BMOVPCRX = 66,
- BMOVPCRXr9 = 67,
- BRIND = 68,
- BR_JTadd = 69,
- BR_JTm = 70,
- BR_JTr = 71,
- BX = 72,
- BXJ = 73,
- BX_RET = 74,
- BXr9 = 75,
- Bcc = 76,
- CDP = 77,
- CDP2 = 78,
- CLREX = 79,
- CLZ = 80,
- CMNzri = 81,
- CMNzrr = 82,
- CMNzrs = 83,
- CMPri = 84,
- CMPrr = 85,
- CMPrs = 86,
- CMPzri = 87,
- CMPzrr = 88,
- CMPzrs = 89,
- CONSTPOOL_ENTRY = 90,
- CPS = 91,
- DBG = 92,
- DMBish = 93,
- DMBishst = 94,
- DMBnsh = 95,
- DMBnshst = 96,
- DMBosh = 97,
- DMBoshst = 98,
- DMBst = 99,
- DSBish = 100,
- DSBishst = 101,
- DSBnsh = 102,
- DSBnshst = 103,
- DSBosh = 104,
- DSBoshst = 105,
- DSBst = 106,
- EORri = 107,
- EORrr = 108,
- EORrs = 109,
- FCONSTD = 110,
- FCONSTS = 111,
- FMSTAT = 112,
- ISBsy = 113,
- Int_MemBarrierV6 = 114,
- Int_MemBarrierV7 = 115,
- Int_SyncBarrierV6 = 116,
- Int_SyncBarrierV7 = 117,
- Int_eh_sjlj_setjmp = 118,
- LDC2L_OFFSET = 119,
- LDC2L_OPTION = 120,
- LDC2L_POST = 121,
- LDC2L_PRE = 122,
- LDC2_OFFSET = 123,
- LDC2_OPTION = 124,
- LDC2_POST = 125,
- LDC2_PRE = 126,
- LDCL_OFFSET = 127,
- LDCL_OPTION = 128,
- LDCL_POST = 129,
- LDCL_PRE = 130,
- LDC_OFFSET = 131,
- LDC_OPTION = 132,
- LDC_POST = 133,
- LDC_PRE = 134,
- LDM = 135,
- LDM_RET = 136,
- LDR = 137,
- LDRB = 138,
- LDRBT = 139,
- LDRB_POST = 140,
- LDRB_PRE = 141,
- LDRD = 142,
- LDRD_POST = 143,
- LDRD_PRE = 144,
- LDREX = 145,
- LDREXB = 146,
- LDREXD = 147,
- LDREXH = 148,
- LDRH = 149,
- LDRHT = 150,
- LDRH_POST = 151,
- LDRH_PRE = 152,
- LDRSB = 153,
- LDRSBT = 154,
- LDRSB_POST = 155,
- LDRSB_PRE = 156,
- LDRSH = 157,
- LDRSHT = 158,
- LDRSH_POST = 159,
- LDRSH_PRE = 160,
- LDRT = 161,
- LDR_POST = 162,
- LDR_PRE = 163,
- LDRcp = 164,
- LEApcrel = 165,
- LEApcrelJT = 166,
- MCR = 167,
- MCR2 = 168,
- MCRR = 169,
- MCRR2 = 170,
- MLA = 171,
- MLS = 172,
- MOVCCi = 173,
- MOVCCr = 174,
- MOVCCs = 175,
- MOVPCLR = 176,
- MOVPCRX = 177,
- MOVTi16 = 178,
- MOVi = 179,
- MOVi16 = 180,
- MOVi2pieces = 181,
- MOVi32imm = 182,
- MOVr = 183,
- MOVrx = 184,
- MOVs = 185,
- MOVsra_flag = 186,
- MOVsrl_flag = 187,
- MRC = 188,
- MRC2 = 189,
- MRRC = 190,
- MRRC2 = 191,
- MRS = 192,
- MRSsys = 193,
- MSR = 194,
- MSRi = 195,
- MSRsys = 196,
- MSRsysi = 197,
- MUL = 198,
- MVNi = 199,
- MVNr = 200,
- MVNs = 201,
- NOP = 202,
- ORRri = 203,
- ORRrr = 204,
- ORRrs = 205,
- PICADD = 206,
- PICLDR = 207,
- PICLDRB = 208,
- PICLDRH = 209,
- PICLDRSB = 210,
- PICLDRSH = 211,
- PICSTR = 212,
- PICSTRB = 213,
- PICSTRH = 214,
- PKHBT = 215,
- PKHTB = 216,
- PLDWi = 217,
- PLDWr = 218,
- PLDi = 219,
- PLDr = 220,
- PLIi = 221,
- PLIr = 222,
- QADD = 223,
- QADD16 = 224,
- QADD8 = 225,
- QASX = 226,
- QDADD = 227,
- QDSUB = 228,
- QSAX = 229,
- QSUB = 230,
- QSUB16 = 231,
- QSUB8 = 232,
- RBIT = 233,
- REV = 234,
- REV16 = 235,
- REVSH = 236,
- RFE = 237,
- RFEW = 238,
- RSBSri = 239,
- RSBSrs = 240,
- RSBri = 241,
- RSBrs = 242,
- RSCSri = 243,
- RSCSrs = 244,
- RSCri = 245,
- RSCrs = 246,
- SADD16 = 247,
- SADD8 = 248,
- SASX = 249,
- SBCSSri = 250,
- SBCSSrr = 251,
- SBCSSrs = 252,
- SBCri = 253,
- SBCrr = 254,
- SBCrs = 255,
- SBFX = 256,
- SEL = 257,
- SETENDBE = 258,
- SETENDLE = 259,
- SEV = 260,
- SHADD16 = 261,
- SHADD8 = 262,
- SHASX = 263,
- SHSAX = 264,
- SHSUB16 = 265,
- SHSUB8 = 266,
- SMC = 267,
- SMLABB = 268,
- SMLABT = 269,
- SMLAD = 270,
- SMLADX = 271,
- SMLAL = 272,
- SMLALBB = 273,
- SMLALBT = 274,
- SMLALD = 275,
- SMLALDX = 276,
- SMLALTB = 277,
- SMLALTT = 278,
- SMLATB = 279,
- SMLATT = 280,
- SMLAWB = 281,
- SMLAWT = 282,
- SMLSD = 283,
- SMLSDX = 284,
- SMLSLD = 285,
- SMLSLDX = 286,
- SMMLA = 287,
- SMMLAR = 288,
- SMMLS = 289,
- SMMLSR = 290,
- SMMUL = 291,
- SMMULR = 292,
- SMUAD = 293,
- SMUADX = 294,
- SMULBB = 295,
- SMULBT = 296,
- SMULL = 297,
- SMULTB = 298,
- SMULTT = 299,
- SMULWB = 300,
- SMULWT = 301,
- SMUSD = 302,
- SMUSDX = 303,
- SRS = 304,
- SRSW = 305,
- SSAT16 = 306,
- SSATasr = 307,
- SSATlsl = 308,
- SSAX = 309,
- SSUB16 = 310,
- SSUB8 = 311,
- STC2L_OFFSET = 312,
- STC2L_OPTION = 313,
- STC2L_POST = 314,
- STC2L_PRE = 315,
- STC2_OFFSET = 316,
- STC2_OPTION = 317,
- STC2_POST = 318,
- STC2_PRE = 319,
- STCL_OFFSET = 320,
- STCL_OPTION = 321,
- STCL_POST = 322,
- STCL_PRE = 323,
- STC_OFFSET = 324,
- STC_OPTION = 325,
- STC_POST = 326,
- STC_PRE = 327,
- STM = 328,
- STR = 329,
- STRB = 330,
- STRBT = 331,
- STRB_POST = 332,
- STRB_PRE = 333,
- STRD = 334,
- STRD_POST = 335,
- STRD_PRE = 336,
- STREX = 337,
- STREXB = 338,
- STREXD = 339,
- STREXH = 340,
- STRH = 341,
- STRHT = 342,
- STRH_POST = 343,
- STRH_PRE = 344,
- STRT = 345,
- STR_POST = 346,
- STR_PRE = 347,
- SUBSri = 348,
- SUBSrr = 349,
- SUBSrs = 350,
- SUBri = 351,
- SUBrr = 352,
- SUBrs = 353,
- SVC = 354,
- SWP = 355,
- SWPB = 356,
- SXTAB16rr = 357,
- SXTAB16rr_rot = 358,
- SXTABrr = 359,
- SXTABrr_rot = 360,
- SXTAHrr = 361,
- SXTAHrr_rot = 362,
- SXTB16r = 363,
- SXTB16r_rot = 364,
- SXTBr = 365,
- SXTBr_rot = 366,
- SXTHr = 367,
- SXTHr_rot = 368,
- TEQri = 369,
- TEQrr = 370,
- TEQrs = 371,
- TPsoft = 372,
- TRAP = 373,
- TSTri = 374,
- TSTrr = 375,
- TSTrs = 376,
- UADD16 = 377,
- UADD8 = 378,
- UASX = 379,
- UBFX = 380,
- UHADD16 = 381,
- UHADD8 = 382,
- UHASX = 383,
- UHSAX = 384,
- UHSUB16 = 385,
- UHSUB8 = 386,
- UMAAL = 387,
- UMLAL = 388,
- UMULL = 389,
- UQADD16 = 390,
- UQADD8 = 391,
- UQASX = 392,
- UQSAX = 393,
- UQSUB16 = 394,
- UQSUB8 = 395,
- USAD8 = 396,
- USADA8 = 397,
- USAT16 = 398,
- USATasr = 399,
- USATlsl = 400,
- USAX = 401,
- USUB16 = 402,
- USUB8 = 403,
- UXTAB16rr = 404,
- UXTAB16rr_rot = 405,
- UXTABrr = 406,
- UXTABrr_rot = 407,
- UXTAHrr = 408,
- UXTAHrr_rot = 409,
- UXTB16r = 410,
- UXTB16r_rot = 411,
- UXTBr = 412,
- UXTBr_rot = 413,
- UXTHr = 414,
- UXTHr_rot = 415,
- VABALsv2i64 = 416,
- VABALsv4i32 = 417,
- VABALsv8i16 = 418,
- VABALuv2i64 = 419,
- VABALuv4i32 = 420,
- VABALuv8i16 = 421,
- VABAsv16i8 = 422,
- VABAsv2i32 = 423,
- VABAsv4i16 = 424,
- VABAsv4i32 = 425,
- VABAsv8i16 = 426,
- VABAsv8i8 = 427,
- VABAuv16i8 = 428,
- VABAuv2i32 = 429,
- VABAuv4i16 = 430,
- VABAuv4i32 = 431,
- VABAuv8i16 = 432,
- VABAuv8i8 = 433,
- VABDLsv2i64 = 434,
- VABDLsv4i32 = 435,
- VABDLsv8i16 = 436,
- VABDLuv2i64 = 437,
- VABDLuv4i32 = 438,
- VABDLuv8i16 = 439,
- VABDfd = 440,
- VABDfq = 441,
- VABDsv16i8 = 442,
- VABDsv2i32 = 443,
- VABDsv4i16 = 444,
- VABDsv4i32 = 445,
- VABDsv8i16 = 446,
- VABDsv8i8 = 447,
- VABDuv16i8 = 448,
- VABDuv2i32 = 449,
- VABDuv4i16 = 450,
- VABDuv4i32 = 451,
- VABDuv8i16 = 452,
- VABDuv8i8 = 453,
- VABSD = 454,
- VABSS = 455,
- VABSfd = 456,
- VABSfd_sfp = 457,
- VABSfq = 458,
- VABSv16i8 = 459,
- VABSv2i32 = 460,
- VABSv4i16 = 461,
- VABSv4i32 = 462,
- VABSv8i16 = 463,
- VABSv8i8 = 464,
- VACGEd = 465,
- VACGEq = 466,
- VACGTd = 467,
- VACGTq = 468,
- VADDD = 469,
- VADDHNv2i32 = 470,
- VADDHNv4i16 = 471,
- VADDHNv8i8 = 472,
- VADDLsv2i64 = 473,
- VADDLsv4i32 = 474,
- VADDLsv8i16 = 475,
- VADDLuv2i64 = 476,
- VADDLuv4i32 = 477,
- VADDLuv8i16 = 478,
- VADDS = 479,
- VADDWsv2i64 = 480,
- VADDWsv4i32 = 481,
- VADDWsv8i16 = 482,
- VADDWuv2i64 = 483,
- VADDWuv4i32 = 484,
- VADDWuv8i16 = 485,
- VADDfd = 486,
- VADDfd_sfp = 487,
- VADDfq = 488,
- VADDv16i8 = 489,
- VADDv1i64 = 490,
- VADDv2i32 = 491,
- VADDv2i64 = 492,
- VADDv4i16 = 493,
- VADDv4i32 = 494,
- VADDv8i16 = 495,
- VADDv8i8 = 496,
- VANDd = 497,
- VANDq = 498,
- VBICd = 499,
- VBICq = 500,
- VBIFd = 501,
- VBIFq = 502,
- VBITd = 503,
- VBITq = 504,
- VBSLd = 505,
- VBSLq = 506,
- VCEQfd = 507,
- VCEQfq = 508,
- VCEQv16i8 = 509,
- VCEQv2i32 = 510,
- VCEQv4i16 = 511,
- VCEQv4i32 = 512,
- VCEQv8i16 = 513,
- VCEQv8i8 = 514,
- VCEQzv16i8 = 515,
- VCEQzv2f32 = 516,
- VCEQzv2i32 = 517,
- VCEQzv4f32 = 518,
- VCEQzv4i16 = 519,
- VCEQzv4i32 = 520,
- VCEQzv8i16 = 521,
- VCEQzv8i8 = 522,
- VCGEfd = 523,
- VCGEfq = 524,
- VCGEsv16i8 = 525,
- VCGEsv2i32 = 526,
- VCGEsv4i16 = 527,
- VCGEsv4i32 = 528,
- VCGEsv8i16 = 529,
- VCGEsv8i8 = 530,
- VCGEuv16i8 = 531,
- VCGEuv2i32 = 532,
- VCGEuv4i16 = 533,
- VCGEuv4i32 = 534,
- VCGEuv8i16 = 535,
- VCGEuv8i8 = 536,
- VCGEzv16i8 = 537,
- VCGEzv2f32 = 538,
- VCGEzv2i32 = 539,
- VCGEzv4f32 = 540,
- VCGEzv4i16 = 541,
- VCGEzv4i32 = 542,
- VCGEzv8i16 = 543,
- VCGEzv8i8 = 544,
- VCGTfd = 545,
- VCGTfq = 546,
- VCGTsv16i8 = 547,
- VCGTsv2i32 = 548,
- VCGTsv4i16 = 549,
- VCGTsv4i32 = 550,
- VCGTsv8i16 = 551,
- VCGTsv8i8 = 552,
- VCGTuv16i8 = 553,
- VCGTuv2i32 = 554,
- VCGTuv4i16 = 555,
- VCGTuv4i32 = 556,
- VCGTuv8i16 = 557,
- VCGTuv8i8 = 558,
- VCGTzv16i8 = 559,
- VCGTzv2f32 = 560,
- VCGTzv2i32 = 561,
- VCGTzv4f32 = 562,
- VCGTzv4i16 = 563,
- VCGTzv4i32 = 564,
- VCGTzv8i16 = 565,
- VCGTzv8i8 = 566,
- VCLEzv16i8 = 567,
- VCLEzv2f32 = 568,
- VCLEzv2i32 = 569,
- VCLEzv4f32 = 570,
- VCLEzv4i16 = 571,
- VCLEzv4i32 = 572,
- VCLEzv8i16 = 573,
- VCLEzv8i8 = 574,
- VCLSv16i8 = 575,
- VCLSv2i32 = 576,
- VCLSv4i16 = 577,
- VCLSv4i32 = 578,
- VCLSv8i16 = 579,
- VCLSv8i8 = 580,
- VCLTzv16i8 = 581,
- VCLTzv2f32 = 582,
- VCLTzv2i32 = 583,
- VCLTzv4f32 = 584,
- VCLTzv4i16 = 585,
- VCLTzv4i32 = 586,
- VCLTzv8i16 = 587,
- VCLTzv8i8 = 588,
- VCLZv16i8 = 589,
- VCLZv2i32 = 590,
- VCLZv4i16 = 591,
- VCLZv4i32 = 592,
- VCLZv8i16 = 593,
- VCLZv8i8 = 594,
- VCMPD = 595,
- VCMPED = 596,
- VCMPES = 597,
- VCMPEZD = 598,
- VCMPEZS = 599,
- VCMPS = 600,
- VCMPZD = 601,
- VCMPZS = 602,
- VCNTd = 603,
- VCNTq = 604,
- VCVTBHS = 605,
- VCVTBSH = 606,
- VCVTDS = 607,
- VCVTSD = 608,
- VCVTTHS = 609,
- VCVTTSH = 610,
- VCVTf2sd = 611,
- VCVTf2sd_sfp = 612,
- VCVTf2sq = 613,
- VCVTf2ud = 614,
- VCVTf2ud_sfp = 615,
- VCVTf2uq = 616,
- VCVTf2xsd = 617,
- VCVTf2xsq = 618,
- VCVTf2xud = 619,
- VCVTf2xuq = 620,
- VCVTs2fd = 621,
- VCVTs2fd_sfp = 622,
- VCVTs2fq = 623,
- VCVTu2fd = 624,
- VCVTu2fd_sfp = 625,
- VCVTu2fq = 626,
- VCVTxs2fd = 627,
- VCVTxs2fq = 628,
- VCVTxu2fd = 629,
- VCVTxu2fq = 630,
- VDIVD = 631,
- VDIVS = 632,
- VDUP16d = 633,
- VDUP16q = 634,
- VDUP32d = 635,
- VDUP32q = 636,
- VDUP8d = 637,
- VDUP8q = 638,
- VDUPLN16d = 639,
- VDUPLN16q = 640,
- VDUPLN32d = 641,
- VDUPLN32q = 642,
- VDUPLN8d = 643,
- VDUPLN8q = 644,
- VDUPLNfd = 645,
- VDUPLNfq = 646,
- VDUPfd = 647,
- VDUPfdf = 648,
- VDUPfq = 649,
- VDUPfqf = 650,
- VEORd = 651,
- VEORq = 652,
- VEXTd16 = 653,
- VEXTd32 = 654,
- VEXTd8 = 655,
- VEXTdf = 656,
- VEXTq16 = 657,
- VEXTq32 = 658,
- VEXTq8 = 659,
- VEXTqf = 660,
- VGETLNi32 = 661,
- VGETLNs16 = 662,
- VGETLNs8 = 663,
- VGETLNu16 = 664,
- VGETLNu8 = 665,
- VHADDsv16i8 = 666,
- VHADDsv2i32 = 667,
- VHADDsv4i16 = 668,
- VHADDsv4i32 = 669,
- VHADDsv8i16 = 670,
- VHADDsv8i8 = 671,
- VHADDuv16i8 = 672,
- VHADDuv2i32 = 673,
- VHADDuv4i16 = 674,
- VHADDuv4i32 = 675,
- VHADDuv8i16 = 676,
- VHADDuv8i8 = 677,
- VHSUBsv16i8 = 678,
- VHSUBsv2i32 = 679,
- VHSUBsv4i16 = 680,
- VHSUBsv4i32 = 681,
- VHSUBsv8i16 = 682,
- VHSUBsv8i8 = 683,
- VHSUBuv16i8 = 684,
- VHSUBuv2i32 = 685,
- VHSUBuv4i16 = 686,
- VHSUBuv4i32 = 687,
- VHSUBuv8i16 = 688,
- VHSUBuv8i8 = 689,
- VLD1d16 = 690,
- VLD1d16Q = 691,
- VLD1d16T = 692,
- VLD1d32 = 693,
- VLD1d32Q = 694,
- VLD1d32T = 695,
- VLD1d64 = 696,
- VLD1d8 = 697,
- VLD1d8Q = 698,
- VLD1d8T = 699,
- VLD1df = 700,
- VLD1q16 = 701,
- VLD1q32 = 702,
- VLD1q64 = 703,
- VLD1q8 = 704,
- VLD1qf = 705,
- VLD2LNd16 = 706,
- VLD2LNd32 = 707,
- VLD2LNd8 = 708,
- VLD2LNq16a = 709,
- VLD2LNq16b = 710,
- VLD2LNq32a = 711,
- VLD2LNq32b = 712,
- VLD2d16 = 713,
- VLD2d16D = 714,
- VLD2d32 = 715,
- VLD2d32D = 716,
- VLD2d64 = 717,
- VLD2d8 = 718,
- VLD2d8D = 719,
- VLD2q16 = 720,
- VLD2q32 = 721,
- VLD2q8 = 722,
- VLD3LNd16 = 723,
- VLD3LNd32 = 724,
- VLD3LNd8 = 725,
- VLD3LNq16a = 726,
- VLD3LNq16b = 727,
- VLD3LNq32a = 728,
- VLD3LNq32b = 729,
- VLD3d16 = 730,
- VLD3d32 = 731,
- VLD3d64 = 732,
- VLD3d8 = 733,
- VLD3q16a = 734,
- VLD3q16b = 735,
- VLD3q32a = 736,
- VLD3q32b = 737,
- VLD3q8a = 738,
- VLD3q8b = 739,
- VLD4LNd16 = 740,
- VLD4LNd32 = 741,
- VLD4LNd8 = 742,
- VLD4LNq16a = 743,
- VLD4LNq16b = 744,
- VLD4LNq32a = 745,
- VLD4LNq32b = 746,
- VLD4d16 = 747,
- VLD4d32 = 748,
- VLD4d64 = 749,
- VLD4d8 = 750,
- VLD4q16a = 751,
- VLD4q16b = 752,
- VLD4q32a = 753,
- VLD4q32b = 754,
- VLD4q8a = 755,
- VLD4q8b = 756,
- VLDMD = 757,
- VLDMS = 758,
- VLDRD = 759,
- VLDRQ = 760,
- VLDRS = 761,
- VMAXfd = 762,
- VMAXfd_sfp = 763,
- VMAXfq = 764,
- VMAXsv16i8 = 765,
- VMAXsv2i32 = 766,
- VMAXsv4i16 = 767,
- VMAXsv4i32 = 768,
- VMAXsv8i16 = 769,
- VMAXsv8i8 = 770,
- VMAXuv16i8 = 771,
- VMAXuv2i32 = 772,
- VMAXuv4i16 = 773,
- VMAXuv4i32 = 774,
- VMAXuv8i16 = 775,
- VMAXuv8i8 = 776,
- VMINfd = 777,
- VMINfd_sfp = 778,
- VMINfq = 779,
- VMINsv16i8 = 780,
- VMINsv2i32 = 781,
- VMINsv4i16 = 782,
- VMINsv4i32 = 783,
- VMINsv8i16 = 784,
- VMINsv8i8 = 785,
- VMINuv16i8 = 786,
- VMINuv2i32 = 787,
- VMINuv4i16 = 788,
- VMINuv4i32 = 789,
- VMINuv8i16 = 790,
- VMINuv8i8 = 791,
- VMLAD = 792,
- VMLALslsv2i32 = 793,
- VMLALslsv4i16 = 794,
- VMLALsluv2i32 = 795,
- VMLALsluv4i16 = 796,
- VMLALsv2i64 = 797,
- VMLALsv4i32 = 798,
- VMLALsv8i16 = 799,
- VMLALuv2i64 = 800,
- VMLALuv4i32 = 801,
- VMLALuv8i16 = 802,
- VMLAS = 803,
- VMLAfd = 804,
- VMLAfq = 805,
- VMLAslfd = 806,
- VMLAslfq = 807,
- VMLAslv2i32 = 808,
- VMLAslv4i16 = 809,
- VMLAslv4i32 = 810,
- VMLAslv8i16 = 811,
- VMLAv16i8 = 812,
- VMLAv2i32 = 813,
- VMLAv4i16 = 814,
- VMLAv4i32 = 815,
- VMLAv8i16 = 816,
- VMLAv8i8 = 817,
- VMLSD = 818,
- VMLSLslsv2i32 = 819,
- VMLSLslsv4i16 = 820,
- VMLSLsluv2i32 = 821,
- VMLSLsluv4i16 = 822,
- VMLSLsv2i64 = 823,
- VMLSLsv4i32 = 824,
- VMLSLsv8i16 = 825,
- VMLSLuv2i64 = 826,
- VMLSLuv4i32 = 827,
- VMLSLuv8i16 = 828,
- VMLSS = 829,
- VMLSfd = 830,
- VMLSfq = 831,
- VMLSslfd = 832,
- VMLSslfq = 833,
- VMLSslv2i32 = 834,
- VMLSslv4i16 = 835,
- VMLSslv4i32 = 836,
- VMLSslv8i16 = 837,
- VMLSv16i8 = 838,
- VMLSv2i32 = 839,
- VMLSv4i16 = 840,
- VMLSv4i32 = 841,
- VMLSv8i16 = 842,
- VMLSv8i8 = 843,
- VMOVD = 844,
- VMOVDRR = 845,
- VMOVDcc = 846,
- VMOVDneon = 847,
- VMOVLsv2i64 = 848,
- VMOVLsv4i32 = 849,
- VMOVLsv8i16 = 850,
- VMOVLuv2i64 = 851,
- VMOVLuv4i32 = 852,
- VMOVLuv8i16 = 853,
- VMOVNv2i32 = 854,
- VMOVNv4i16 = 855,
- VMOVNv8i8 = 856,
- VMOVQ = 857,
- VMOVRRD = 858,
- VMOVRRS = 859,
- VMOVRS = 860,
- VMOVS = 861,
- VMOVSR = 862,
- VMOVSRR = 863,
- VMOVScc = 864,
- VMOVv16i8 = 865,
- VMOVv1i64 = 866,
- VMOVv2i32 = 867,
- VMOVv2i64 = 868,
- VMOVv4i16 = 869,
- VMOVv4i32 = 870,
- VMOVv8i16 = 871,
- VMOVv8i8 = 872,
- VMRS = 873,
- VMSR = 874,
- VMULD = 875,
- VMULLp = 876,
- VMULLslsv2i32 = 877,
- VMULLslsv4i16 = 878,
- VMULLsluv2i32 = 879,
- VMULLsluv4i16 = 880,
- VMULLsv2i64 = 881,
- VMULLsv4i32 = 882,
- VMULLsv8i16 = 883,
- VMULLuv2i64 = 884,
- VMULLuv4i32 = 885,
- VMULLuv8i16 = 886,
- VMULS = 887,
- VMULfd = 888,
- VMULfd_sfp = 889,
- VMULfq = 890,
- VMULpd = 891,
- VMULpq = 892,
- VMULslfd = 893,
- VMULslfq = 894,
- VMULslv2i32 = 895,
- VMULslv4i16 = 896,
- VMULslv4i32 = 897,
- VMULslv8i16 = 898,
- VMULv16i8 = 899,
- VMULv2i32 = 900,
- VMULv4i16 = 901,
- VMULv4i32 = 902,
- VMULv8i16 = 903,
- VMULv8i8 = 904,
- VMVNd = 905,
- VMVNq = 906,
- VNEGD = 907,
- VNEGDcc = 908,
- VNEGS = 909,
- VNEGScc = 910,
- VNEGf32q = 911,
- VNEGfd = 912,
- VNEGfd_sfp = 913,
- VNEGs16d = 914,
- VNEGs16q = 915,
- VNEGs32d = 916,
- VNEGs32q = 917,
- VNEGs8d = 918,
- VNEGs8q = 919,
- VNMLAD = 920,
- VNMLAS = 921,
- VNMLSD = 922,
- VNMLSS = 923,
- VNMULD = 924,
- VNMULS = 925,
- VORNd = 926,
- VORNq = 927,
- VORRd = 928,
- VORRq = 929,
- VPADALsv16i8 = 930,
- VPADALsv2i32 = 931,
- VPADALsv4i16 = 932,
- VPADALsv4i32 = 933,
- VPADALsv8i16 = 934,
- VPADALsv8i8 = 935,
- VPADALuv16i8 = 936,
- VPADALuv2i32 = 937,
- VPADALuv4i16 = 938,
- VPADALuv4i32 = 939,
- VPADALuv8i16 = 940,
- VPADALuv8i8 = 941,
- VPADDLsv16i8 = 942,
- VPADDLsv2i32 = 943,
- VPADDLsv4i16 = 944,
- VPADDLsv4i32 = 945,
- VPADDLsv8i16 = 946,
- VPADDLsv8i8 = 947,
- VPADDLuv16i8 = 948,
- VPADDLuv2i32 = 949,
- VPADDLuv4i16 = 950,
- VPADDLuv4i32 = 951,
- VPADDLuv8i16 = 952,
- VPADDLuv8i8 = 953,
- VPADDf = 954,
- VPADDi16 = 955,
- VPADDi32 = 956,
- VPADDi8 = 957,
- VPMAXf = 958,
- VPMAXs16 = 959,
- VPMAXs32 = 960,
- VPMAXs8 = 961,
- VPMAXu16 = 962,
- VPMAXu32 = 963,
- VPMAXu8 = 964,
- VPMINf = 965,
- VPMINs16 = 966,
- VPMINs32 = 967,
- VPMINs8 = 968,
- VPMINu16 = 969,
- VPMINu32 = 970,
- VPMINu8 = 971,
- VQABSv16i8 = 972,
- VQABSv2i32 = 973,
- VQABSv4i16 = 974,
- VQABSv4i32 = 975,
- VQABSv8i16 = 976,
- VQABSv8i8 = 977,
- VQADDsv16i8 = 978,
- VQADDsv1i64 = 979,
- VQADDsv2i32 = 980,
- VQADDsv2i64 = 981,
- VQADDsv4i16 = 982,
- VQADDsv4i32 = 983,
- VQADDsv8i16 = 984,
- VQADDsv8i8 = 985,
- VQADDuv16i8 = 986,
- VQADDuv1i64 = 987,
- VQADDuv2i32 = 988,
- VQADDuv2i64 = 989,
- VQADDuv4i16 = 990,
- VQADDuv4i32 = 991,
- VQADDuv8i16 = 992,
- VQADDuv8i8 = 993,
- VQDMLALslv2i32 = 994,
- VQDMLALslv4i16 = 995,
- VQDMLALv2i64 = 996,
- VQDMLALv4i32 = 997,
- VQDMLSLslv2i32 = 998,
- VQDMLSLslv4i16 = 999,
- VQDMLSLv2i64 = 1000,
- VQDMLSLv4i32 = 1001,
- VQDMULHslv2i32 = 1002,
- VQDMULHslv4i16 = 1003,
- VQDMULHslv4i32 = 1004,
- VQDMULHslv8i16 = 1005,
- VQDMULHv2i32 = 1006,
- VQDMULHv4i16 = 1007,
- VQDMULHv4i32 = 1008,
- VQDMULHv8i16 = 1009,
- VQDMULLslv2i32 = 1010,
- VQDMULLslv4i16 = 1011,
- VQDMULLv2i64 = 1012,
- VQDMULLv4i32 = 1013,
- VQMOVNsuv2i32 = 1014,
- VQMOVNsuv4i16 = 1015,
- VQMOVNsuv8i8 = 1016,
- VQMOVNsv2i32 = 1017,
- VQMOVNsv4i16 = 1018,
- VQMOVNsv8i8 = 1019,
- VQMOVNuv2i32 = 1020,
- VQMOVNuv4i16 = 1021,
- VQMOVNuv8i8 = 1022,
- VQNEGv16i8 = 1023,
- VQNEGv2i32 = 1024,
- VQNEGv4i16 = 1025,
- VQNEGv4i32 = 1026,
- VQNEGv8i16 = 1027,
- VQNEGv8i8 = 1028,
- VQRDMULHslv2i32 = 1029,
- VQRDMULHslv4i16 = 1030,
- VQRDMULHslv4i32 = 1031,
- VQRDMULHslv8i16 = 1032,
- VQRDMULHv2i32 = 1033,
- VQRDMULHv4i16 = 1034,
- VQRDMULHv4i32 = 1035,
- VQRDMULHv8i16 = 1036,
- VQRSHLsv16i8 = 1037,
- VQRSHLsv1i64 = 1038,
- VQRSHLsv2i32 = 1039,
- VQRSHLsv2i64 = 1040,
- VQRSHLsv4i16 = 1041,
- VQRSHLsv4i32 = 1042,
- VQRSHLsv8i16 = 1043,
- VQRSHLsv8i8 = 1044,
- VQRSHLuv16i8 = 1045,
- VQRSHLuv1i64 = 1046,
- VQRSHLuv2i32 = 1047,
- VQRSHLuv2i64 = 1048,
- VQRSHLuv4i16 = 1049,
- VQRSHLuv4i32 = 1050,
- VQRSHLuv8i16 = 1051,
- VQRSHLuv8i8 = 1052,
- VQRSHRNsv2i32 = 1053,
- VQRSHRNsv4i16 = 1054,
- VQRSHRNsv8i8 = 1055,
- VQRSHRNuv2i32 = 1056,
- VQRSHRNuv4i16 = 1057,
- VQRSHRNuv8i8 = 1058,
- VQRSHRUNv2i32 = 1059,
- VQRSHRUNv4i16 = 1060,
- VQRSHRUNv8i8 = 1061,
- VQSHLsiv16i8 = 1062,
- VQSHLsiv1i64 = 1063,
- VQSHLsiv2i32 = 1064,
- VQSHLsiv2i64 = 1065,
- VQSHLsiv4i16 = 1066,
- VQSHLsiv4i32 = 1067,
- VQSHLsiv8i16 = 1068,
- VQSHLsiv8i8 = 1069,
- VQSHLsuv16i8 = 1070,
- VQSHLsuv1i64 = 1071,
- VQSHLsuv2i32 = 1072,
- VQSHLsuv2i64 = 1073,
- VQSHLsuv4i16 = 1074,
- VQSHLsuv4i32 = 1075,
- VQSHLsuv8i16 = 1076,
- VQSHLsuv8i8 = 1077,
- VQSHLsv16i8 = 1078,
- VQSHLsv1i64 = 1079,
- VQSHLsv2i32 = 1080,
- VQSHLsv2i64 = 1081,
- VQSHLsv4i16 = 1082,
- VQSHLsv4i32 = 1083,
- VQSHLsv8i16 = 1084,
- VQSHLsv8i8 = 1085,
- VQSHLuiv16i8 = 1086,
- VQSHLuiv1i64 = 1087,
- VQSHLuiv2i32 = 1088,
- VQSHLuiv2i64 = 1089,
- VQSHLuiv4i16 = 1090,
- VQSHLuiv4i32 = 1091,
- VQSHLuiv8i16 = 1092,
- VQSHLuiv8i8 = 1093,
- VQSHLuv16i8 = 1094,
- VQSHLuv1i64 = 1095,
- VQSHLuv2i32 = 1096,
- VQSHLuv2i64 = 1097,
- VQSHLuv4i16 = 1098,
- VQSHLuv4i32 = 1099,
- VQSHLuv8i16 = 1100,
- VQSHLuv8i8 = 1101,
- VQSHRNsv2i32 = 1102,
- VQSHRNsv4i16 = 1103,
- VQSHRNsv8i8 = 1104,
- VQSHRNuv2i32 = 1105,
- VQSHRNuv4i16 = 1106,
- VQSHRNuv8i8 = 1107,
- VQSHRUNv2i32 = 1108,
- VQSHRUNv4i16 = 1109,
- VQSHRUNv8i8 = 1110,
- VQSUBsv16i8 = 1111,
- VQSUBsv1i64 = 1112,
- VQSUBsv2i32 = 1113,
- VQSUBsv2i64 = 1114,
- VQSUBsv4i16 = 1115,
- VQSUBsv4i32 = 1116,
- VQSUBsv8i16 = 1117,
- VQSUBsv8i8 = 1118,
- VQSUBuv16i8 = 1119,
- VQSUBuv1i64 = 1120,
- VQSUBuv2i32 = 1121,
- VQSUBuv2i64 = 1122,
- VQSUBuv4i16 = 1123,
- VQSUBuv4i32 = 1124,
- VQSUBuv8i16 = 1125,
- VQSUBuv8i8 = 1126,
- VRADDHNv2i32 = 1127,
- VRADDHNv4i16 = 1128,
- VRADDHNv8i8 = 1129,
- VRECPEd = 1130,
- VRECPEfd = 1131,
- VRECPEfq = 1132,
- VRECPEq = 1133,
- VRECPSfd = 1134,
- VRECPSfq = 1135,
- VREV16d8 = 1136,
- VREV16q8 = 1137,
- VREV32d16 = 1138,
- VREV32d8 = 1139,
- VREV32q16 = 1140,
- VREV32q8 = 1141,
- VREV64d16 = 1142,
- VREV64d32 = 1143,
- VREV64d8 = 1144,
- VREV64df = 1145,
- VREV64q16 = 1146,
- VREV64q32 = 1147,
- VREV64q8 = 1148,
- VREV64qf = 1149,
- VRHADDsv16i8 = 1150,
- VRHADDsv2i32 = 1151,
- VRHADDsv4i16 = 1152,
- VRHADDsv4i32 = 1153,
- VRHADDsv8i16 = 1154,
- VRHADDsv8i8 = 1155,
- VRHADDuv16i8 = 1156,
- VRHADDuv2i32 = 1157,
- VRHADDuv4i16 = 1158,
- VRHADDuv4i32 = 1159,
- VRHADDuv8i16 = 1160,
- VRHADDuv8i8 = 1161,
- VRSHLsv16i8 = 1162,
- VRSHLsv1i64 = 1163,
- VRSHLsv2i32 = 1164,
- VRSHLsv2i64 = 1165,
- VRSHLsv4i16 = 1166,
- VRSHLsv4i32 = 1167,
- VRSHLsv8i16 = 1168,
- VRSHLsv8i8 = 1169,
- VRSHLuv16i8 = 1170,
- VRSHLuv1i64 = 1171,
- VRSHLuv2i32 = 1172,
- VRSHLuv2i64 = 1173,
- VRSHLuv4i16 = 1174,
- VRSHLuv4i32 = 1175,
- VRSHLuv8i16 = 1176,
- VRSHLuv8i8 = 1177,
- VRSHRNv2i32 = 1178,
- VRSHRNv4i16 = 1179,
- VRSHRNv8i8 = 1180,
- VRSHRsv16i8 = 1181,
- VRSHRsv1i64 = 1182,
- VRSHRsv2i32 = 1183,
- VRSHRsv2i64 = 1184,
- VRSHRsv4i16 = 1185,
- VRSHRsv4i32 = 1186,
- VRSHRsv8i16 = 1187,
- VRSHRsv8i8 = 1188,
- VRSHRuv16i8 = 1189,
- VRSHRuv1i64 = 1190,
- VRSHRuv2i32 = 1191,
- VRSHRuv2i64 = 1192,
- VRSHRuv4i16 = 1193,
- VRSHRuv4i32 = 1194,
- VRSHRuv8i16 = 1195,
- VRSHRuv8i8 = 1196,
- VRSQRTEd = 1197,
- VRSQRTEfd = 1198,
- VRSQRTEfq = 1199,
- VRSQRTEq = 1200,
- VRSQRTSfd = 1201,
- VRSQRTSfq = 1202,
- VRSRAsv16i8 = 1203,
- VRSRAsv1i64 = 1204,
- VRSRAsv2i32 = 1205,
- VRSRAsv2i64 = 1206,
- VRSRAsv4i16 = 1207,
- VRSRAsv4i32 = 1208,
- VRSRAsv8i16 = 1209,
- VRSRAsv8i8 = 1210,
- VRSRAuv16i8 = 1211,
- VRSRAuv1i64 = 1212,
- VRSRAuv2i32 = 1213,
- VRSRAuv2i64 = 1214,
- VRSRAuv4i16 = 1215,
- VRSRAuv4i32 = 1216,
- VRSRAuv8i16 = 1217,
- VRSRAuv8i8 = 1218,
- VRSUBHNv2i32 = 1219,
- VRSUBHNv4i16 = 1220,
- VRSUBHNv8i8 = 1221,
- VSETLNi16 = 1222,
- VSETLNi32 = 1223,
- VSETLNi8 = 1224,
- VSHLLi16 = 1225,
- VSHLLi32 = 1226,
- VSHLLi8 = 1227,
- VSHLLsv2i64 = 1228,
- VSHLLsv4i32 = 1229,
- VSHLLsv8i16 = 1230,
- VSHLLuv2i64 = 1231,
- VSHLLuv4i32 = 1232,
- VSHLLuv8i16 = 1233,
- VSHLiv16i8 = 1234,
- VSHLiv1i64 = 1235,
- VSHLiv2i32 = 1236,
- VSHLiv2i64 = 1237,
- VSHLiv4i16 = 1238,
- VSHLiv4i32 = 1239,
- VSHLiv8i16 = 1240,
- VSHLiv8i8 = 1241,
- VSHLsv16i8 = 1242,
- VSHLsv1i64 = 1243,
- VSHLsv2i32 = 1244,
- VSHLsv2i64 = 1245,
- VSHLsv4i16 = 1246,
- VSHLsv4i32 = 1247,
- VSHLsv8i16 = 1248,
- VSHLsv8i8 = 1249,
- VSHLuv16i8 = 1250,
- VSHLuv1i64 = 1251,
- VSHLuv2i32 = 1252,
- VSHLuv2i64 = 1253,
- VSHLuv4i16 = 1254,
- VSHLuv4i32 = 1255,
- VSHLuv8i16 = 1256,
- VSHLuv8i8 = 1257,
- VSHRNv2i32 = 1258,
- VSHRNv4i16 = 1259,
- VSHRNv8i8 = 1260,
- VSHRsv16i8 = 1261,
- VSHRsv1i64 = 1262,
- VSHRsv2i32 = 1263,
- VSHRsv2i64 = 1264,
- VSHRsv4i16 = 1265,
- VSHRsv4i32 = 1266,
- VSHRsv8i16 = 1267,
- VSHRsv8i8 = 1268,
- VSHRuv16i8 = 1269,
- VSHRuv1i64 = 1270,
- VSHRuv2i32 = 1271,
- VSHRuv2i64 = 1272,
- VSHRuv4i16 = 1273,
- VSHRuv4i32 = 1274,
- VSHRuv8i16 = 1275,
- VSHRuv8i8 = 1276,
- VSHTOD = 1277,
- VSHTOS = 1278,
- VSITOD = 1279,
- VSITOS = 1280,
- VSLIv16i8 = 1281,
- VSLIv1i64 = 1282,
- VSLIv2i32 = 1283,
- VSLIv2i64 = 1284,
- VSLIv4i16 = 1285,
- VSLIv4i32 = 1286,
- VSLIv8i16 = 1287,
- VSLIv8i8 = 1288,
- VSLTOD = 1289,
- VSLTOS = 1290,
- VSQRTD = 1291,
- VSQRTS = 1292,
- VSRAsv16i8 = 1293,
- VSRAsv1i64 = 1294,
- VSRAsv2i32 = 1295,
- VSRAsv2i64 = 1296,
- VSRAsv4i16 = 1297,
- VSRAsv4i32 = 1298,
- VSRAsv8i16 = 1299,
- VSRAsv8i8 = 1300,
- VSRAuv16i8 = 1301,
- VSRAuv1i64 = 1302,
- VSRAuv2i32 = 1303,
- VSRAuv2i64 = 1304,
- VSRAuv4i16 = 1305,
- VSRAuv4i32 = 1306,
- VSRAuv8i16 = 1307,
- VSRAuv8i8 = 1308,
- VSRIv16i8 = 1309,
- VSRIv1i64 = 1310,
- VSRIv2i32 = 1311,
- VSRIv2i64 = 1312,
- VSRIv4i16 = 1313,
- VSRIv4i32 = 1314,
- VSRIv8i16 = 1315,
- VSRIv8i8 = 1316,
- VST1d16 = 1317,
- VST1d16Q = 1318,
- VST1d16T = 1319,
- VST1d32 = 1320,
- VST1d32Q = 1321,
- VST1d32T = 1322,
- VST1d64 = 1323,
- VST1d8 = 1324,
- VST1d8Q = 1325,
- VST1d8T = 1326,
- VST1df = 1327,
- VST1q16 = 1328,
- VST1q32 = 1329,
- VST1q64 = 1330,
- VST1q8 = 1331,
- VST1qf = 1332,
- VST2LNd16 = 1333,
- VST2LNd32 = 1334,
- VST2LNd8 = 1335,
- VST2LNq16a = 1336,
- VST2LNq16b = 1337,
- VST2LNq32a = 1338,
- VST2LNq32b = 1339,
- VST2d16 = 1340,
- VST2d16D = 1341,
- VST2d32 = 1342,
- VST2d32D = 1343,
- VST2d64 = 1344,
- VST2d8 = 1345,
- VST2d8D = 1346,
- VST2q16 = 1347,
- VST2q32 = 1348,
- VST2q8 = 1349,
- VST3LNd16 = 1350,
- VST3LNd32 = 1351,
- VST3LNd8 = 1352,
- VST3LNq16a = 1353,
- VST3LNq16b = 1354,
- VST3LNq32a = 1355,
- VST3LNq32b = 1356,
- VST3d16 = 1357,
- VST3d32 = 1358,
- VST3d64 = 1359,
- VST3d8 = 1360,
- VST3q16a = 1361,
- VST3q16b = 1362,
- VST3q32a = 1363,
- VST3q32b = 1364,
- VST3q8a = 1365,
- VST3q8b = 1366,
- VST4LNd16 = 1367,
- VST4LNd32 = 1368,
- VST4LNd8 = 1369,
- VST4LNq16a = 1370,
- VST4LNq16b = 1371,
- VST4LNq32a = 1372,
- VST4LNq32b = 1373,
- VST4d16 = 1374,
- VST4d32 = 1375,
- VST4d64 = 1376,
- VST4d8 = 1377,
- VST4q16a = 1378,
- VST4q16b = 1379,
- VST4q32a = 1380,
- VST4q32b = 1381,
- VST4q8a = 1382,
- VST4q8b = 1383,
- VSTMD = 1384,
- VSTMS = 1385,
- VSTRD = 1386,
- VSTRQ = 1387,
- VSTRS = 1388,
- VSUBD = 1389,
- VSUBHNv2i32 = 1390,
- VSUBHNv4i16 = 1391,
- VSUBHNv8i8 = 1392,
- VSUBLsv2i64 = 1393,
- VSUBLsv4i32 = 1394,
- VSUBLsv8i16 = 1395,
- VSUBLuv2i64 = 1396,
- VSUBLuv4i32 = 1397,
- VSUBLuv8i16 = 1398,
- VSUBS = 1399,
- VSUBWsv2i64 = 1400,
- VSUBWsv4i32 = 1401,
- VSUBWsv8i16 = 1402,
- VSUBWuv2i64 = 1403,
- VSUBWuv4i32 = 1404,
- VSUBWuv8i16 = 1405,
- VSUBfd = 1406,
- VSUBfd_sfp = 1407,
- VSUBfq = 1408,
- VSUBv16i8 = 1409,
- VSUBv1i64 = 1410,
- VSUBv2i32 = 1411,
- VSUBv2i64 = 1412,
- VSUBv4i16 = 1413,
- VSUBv4i32 = 1414,
- VSUBv8i16 = 1415,
- VSUBv8i8 = 1416,
- VSWPd = 1417,
- VSWPq = 1418,
- VTBL1 = 1419,
- VTBL2 = 1420,
- VTBL3 = 1421,
- VTBL4 = 1422,
- VTBX1 = 1423,
- VTBX2 = 1424,
- VTBX3 = 1425,
- VTBX4 = 1426,
- VTOSHD = 1427,
- VTOSHS = 1428,
- VTOSIRD = 1429,
- VTOSIRS = 1430,
- VTOSIZD = 1431,
- VTOSIZS = 1432,
- VTOSLD = 1433,
- VTOSLS = 1434,
- VTOUHD = 1435,
- VTOUHS = 1436,
- VTOUIRD = 1437,
- VTOUIRS = 1438,
- VTOUIZD = 1439,
- VTOUIZS = 1440,
- VTOULD = 1441,
- VTOULS = 1442,
- VTRNd16 = 1443,
- VTRNd32 = 1444,
- VTRNd8 = 1445,
- VTRNq16 = 1446,
- VTRNq32 = 1447,
- VTRNq8 = 1448,
- VTSTv16i8 = 1449,
- VTSTv2i32 = 1450,
- VTSTv4i16 = 1451,
- VTSTv4i32 = 1452,
- VTSTv8i16 = 1453,
- VTSTv8i8 = 1454,
- VUHTOD = 1455,
- VUHTOS = 1456,
- VUITOD = 1457,
- VUITOS = 1458,
- VULTOD = 1459,
- VULTOS = 1460,
- VUZPd16 = 1461,
- VUZPd32 = 1462,
- VUZPd8 = 1463,
- VUZPq16 = 1464,
- VUZPq32 = 1465,
- VUZPq8 = 1466,
- VZIPd16 = 1467,
- VZIPd32 = 1468,
- VZIPd8 = 1469,
- VZIPq16 = 1470,
- VZIPq32 = 1471,
- VZIPq8 = 1472,
- WFE = 1473,
- WFI = 1474,
- YIELD = 1475,
- t2ADCSri = 1476,
- t2ADCSrr = 1477,
- t2ADCSrs = 1478,
- t2ADCri = 1479,
- t2ADCrr = 1480,
- t2ADCrs = 1481,
- t2ADDSri = 1482,
- t2ADDSrr = 1483,
- t2ADDSrs = 1484,
- t2ADDrSPi = 1485,
- t2ADDrSPi12 = 1486,
- t2ADDrSPs = 1487,
- t2ADDri = 1488,
- t2ADDri12 = 1489,
- t2ADDrr = 1490,
- t2ADDrs = 1491,
- t2ANDri = 1492,
- t2ANDrr = 1493,
- t2ANDrs = 1494,
- t2ASRri = 1495,
- t2ASRrr = 1496,
- t2B = 1497,
- t2BFC = 1498,
- t2BFI = 1499,
- t2BICri = 1500,
- t2BICrr = 1501,
- t2BICrs = 1502,
- t2BR_JT = 1503,
- t2BXJ = 1504,
- t2Bcc = 1505,
- t2CLREX = 1506,
- t2CLZ = 1507,
- t2CMNzri = 1508,
- t2CMNzrr = 1509,
- t2CMNzrs = 1510,
- t2CMPri = 1511,
- t2CMPrr = 1512,
- t2CMPrs = 1513,
- t2CMPzri = 1514,
- t2CMPzrr = 1515,
- t2CMPzrs = 1516,
- t2CPS = 1517,
- t2DBG = 1518,
- t2DMBish = 1519,
- t2DMBishst = 1520,
- t2DMBnsh = 1521,
- t2DMBnshst = 1522,
- t2DMBosh = 1523,
- t2DMBoshst = 1524,
- t2DMBst = 1525,
- t2DSBish = 1526,
- t2DSBishst = 1527,
- t2DSBnsh = 1528,
- t2DSBnshst = 1529,
- t2DSBosh = 1530,
- t2DSBoshst = 1531,
- t2DSBst = 1532,
- t2EORri = 1533,
- t2EORrr = 1534,
- t2EORrs = 1535,
- t2ISBsy = 1536,
- t2IT = 1537,
- t2Int_MemBarrierV7 = 1538,
- t2Int_SyncBarrierV7 = 1539,
- t2Int_eh_sjlj_setjmp = 1540,
- t2LDM = 1541,
- t2LDM_RET = 1542,
- t2LDRBT = 1543,
- t2LDRB_POST = 1544,
- t2LDRB_PRE = 1545,
- t2LDRBi12 = 1546,
- t2LDRBi8 = 1547,
- t2LDRBpci = 1548,
- t2LDRBs = 1549,
- t2LDRDi8 = 1550,
- t2LDRDpci = 1551,
- t2LDREX = 1552,
- t2LDREXB = 1553,
- t2LDREXD = 1554,
- t2LDREXH = 1555,
- t2LDRHT = 1556,
- t2LDRH_POST = 1557,
- t2LDRH_PRE = 1558,
- t2LDRHi12 = 1559,
- t2LDRHi8 = 1560,
- t2LDRHpci = 1561,
- t2LDRHs = 1562,
- t2LDRSBT = 1563,
- t2LDRSB_POST = 1564,
- t2LDRSB_PRE = 1565,
- t2LDRSBi12 = 1566,
- t2LDRSBi8 = 1567,
- t2LDRSBpci = 1568,
- t2LDRSBs = 1569,
- t2LDRSHT = 1570,
- t2LDRSH_POST = 1571,
- t2LDRSH_PRE = 1572,
- t2LDRSHi12 = 1573,
- t2LDRSHi8 = 1574,
- t2LDRSHpci = 1575,
- t2LDRSHs = 1576,
- t2LDRT = 1577,
- t2LDR_POST = 1578,
- t2LDR_PRE = 1579,
- t2LDRi12 = 1580,
- t2LDRi8 = 1581,
- t2LDRpci = 1582,
- t2LDRpci_pic = 1583,
- t2LDRs = 1584,
- t2LEApcrel = 1585,
- t2LEApcrelJT = 1586,
- t2LSLri = 1587,
- t2LSLrr = 1588,
- t2LSRri = 1589,
- t2LSRrr = 1590,
- t2MLA = 1591,
- t2MLS = 1592,
- t2MOVCCasr = 1593,
- t2MOVCCi = 1594,
- t2MOVCClsl = 1595,
- t2MOVCClsr = 1596,
- t2MOVCCr = 1597,
- t2MOVCCror = 1598,
- t2MOVTi16 = 1599,
- t2MOVi = 1600,
- t2MOVi16 = 1601,
- t2MOVi32imm = 1602,
- t2MOVr = 1603,
- t2MOVrx = 1604,
- t2MOVsra_flag = 1605,
- t2MOVsrl_flag = 1606,
- t2MRS = 1607,
- t2MRSsys = 1608,
- t2MSR = 1609,
- t2MSRsys = 1610,
- t2MUL = 1611,
- t2MVNi = 1612,
- t2MVNr = 1613,
- t2MVNs = 1614,
- t2NOP = 1615,
- t2ORNri = 1616,
- t2ORNrr = 1617,
- t2ORNrs = 1618,
- t2ORRri = 1619,
- t2ORRrr = 1620,
- t2ORRrs = 1621,
- t2PKHBT = 1622,
- t2PKHTB = 1623,
- t2PLDWi12 = 1624,
- t2PLDWi8 = 1625,
- t2PLDWpci = 1626,
- t2PLDWr = 1627,
- t2PLDWs = 1628,
- t2PLDi12 = 1629,
- t2PLDi8 = 1630,
- t2PLDpci = 1631,
- t2PLDr = 1632,
- t2PLDs = 1633,
- t2PLIi12 = 1634,
- t2PLIi8 = 1635,
- t2PLIpci = 1636,
- t2PLIr = 1637,
- t2PLIs = 1638,
- t2QADD = 1639,
- t2QADD16 = 1640,
- t2QADD8 = 1641,
- t2QASX = 1642,
- t2QDADD = 1643,
- t2QDSUB = 1644,
- t2QSAX = 1645,
- t2QSUB = 1646,
- t2QSUB16 = 1647,
- t2QSUB8 = 1648,
- t2RBIT = 1649,
- t2REV = 1650,
- t2REV16 = 1651,
- t2REVSH = 1652,
- t2RFEDB = 1653,
- t2RFEDBW = 1654,
- t2RFEIA = 1655,
- t2RFEIAW = 1656,
- t2RORri = 1657,
- t2RORrr = 1658,
- t2RSBSri = 1659,
- t2RSBSrs = 1660,
- t2RSBri = 1661,
- t2RSBrs = 1662,
- t2SADD16 = 1663,
- t2SADD8 = 1664,
- t2SASX = 1665,
- t2SBCSri = 1666,
- t2SBCSrr = 1667,
- t2SBCSrs = 1668,
- t2SBCri = 1669,
- t2SBCrr = 1670,
- t2SBCrs = 1671,
- t2SBFX = 1672,
- t2SDIV = 1673,
- t2SEL = 1674,
- t2SEV = 1675,
- t2SHADD16 = 1676,
- t2SHADD8 = 1677,
- t2SHASX = 1678,
- t2SHSAX = 1679,
- t2SHSUB16 = 1680,
- t2SHSUB8 = 1681,
- t2SMC = 1682,
- t2SMLABB = 1683,
- t2SMLABT = 1684,
- t2SMLAD = 1685,
- t2SMLADX = 1686,
- t2SMLAL = 1687,
- t2SMLALBB = 1688,
- t2SMLALBT = 1689,
- t2SMLALD = 1690,
- t2SMLALDX = 1691,
- t2SMLALTB = 1692,
- t2SMLALTT = 1693,
- t2SMLATB = 1694,
- t2SMLATT = 1695,
- t2SMLAWB = 1696,
- t2SMLAWT = 1697,
- t2SMLSD = 1698,
- t2SMLSDX = 1699,
- t2SMLSLD = 1700,
- t2SMLSLDX = 1701,
- t2SMMLA = 1702,
- t2SMMLAR = 1703,
- t2SMMLS = 1704,
- t2SMMLSR = 1705,
- t2SMMUL = 1706,
- t2SMMULR = 1707,
- t2SMUAD = 1708,
- t2SMUADX = 1709,
- t2SMULBB = 1710,
- t2SMULBT = 1711,
- t2SMULL = 1712,
- t2SMULTB = 1713,
- t2SMULTT = 1714,
- t2SMULWB = 1715,
- t2SMULWT = 1716,
- t2SMUSD = 1717,
- t2SMUSDX = 1718,
- t2SRSDB = 1719,
- t2SRSDBW = 1720,
- t2SRSIA = 1721,
- t2SRSIAW = 1722,
- t2SSAT16 = 1723,
- t2SSATasr = 1724,
- t2SSATlsl = 1725,
- t2SSAX = 1726,
- t2SSUB16 = 1727,
- t2SSUB8 = 1728,
- t2STM = 1729,
- t2STRBT = 1730,
- t2STRB_POST = 1731,
- t2STRB_PRE = 1732,
- t2STRBi12 = 1733,
- t2STRBi8 = 1734,
- t2STRBs = 1735,
- t2STRDi8 = 1736,
- t2STREX = 1737,
- t2STREXB = 1738,
- t2STREXD = 1739,
- t2STREXH = 1740,
- t2STRHT = 1741,
- t2STRH_POST = 1742,
- t2STRH_PRE = 1743,
- t2STRHi12 = 1744,
- t2STRHi8 = 1745,
- t2STRHs = 1746,
- t2STRT = 1747,
- t2STR_POST = 1748,
- t2STR_PRE = 1749,
- t2STRi12 = 1750,
- t2STRi8 = 1751,
- t2STRs = 1752,
- t2SUBSri = 1753,
- t2SUBSrr = 1754,
- t2SUBSrs = 1755,
- t2SUBrSPi = 1756,
- t2SUBrSPi12 = 1757,
- t2SUBrSPi12_ = 1758,
- t2SUBrSPi_ = 1759,
- t2SUBrSPs = 1760,
- t2SUBrSPs_ = 1761,
- t2SUBri = 1762,
- t2SUBri12 = 1763,
- t2SUBrr = 1764,
- t2SUBrs = 1765,
- t2SXTAB16rr = 1766,
- t2SXTAB16rr_rot = 1767,
- t2SXTABrr = 1768,
- t2SXTABrr_rot = 1769,
- t2SXTAHrr = 1770,
- t2SXTAHrr_rot = 1771,
- t2SXTB16r = 1772,
- t2SXTB16r_rot = 1773,
- t2SXTBr = 1774,
- t2SXTBr_rot = 1775,
- t2SXTHr = 1776,
- t2SXTHr_rot = 1777,
- t2TBB = 1778,
- t2TBBgen = 1779,
- t2TBH = 1780,
- t2TBHgen = 1781,
- t2TEQri = 1782,
- t2TEQrr = 1783,
- t2TEQrs = 1784,
- t2TPsoft = 1785,
- t2TSTri = 1786,
- t2TSTrr = 1787,
- t2TSTrs = 1788,
- t2UADD16 = 1789,
- t2UADD8 = 1790,
- t2UASX = 1791,
- t2UBFX = 1792,
- t2UDIV = 1793,
- t2UHADD16 = 1794,
- t2UHADD8 = 1795,
- t2UHASX = 1796,
- t2UHSAX = 1797,
- t2UHSUB16 = 1798,
- t2UHSUB8 = 1799,
- t2UMAAL = 1800,
- t2UMLAL = 1801,
- t2UMULL = 1802,
- t2UQADD16 = 1803,
- t2UQADD8 = 1804,
- t2UQASX = 1805,
- t2UQSAX = 1806,
- t2UQSUB16 = 1807,
- t2UQSUB8 = 1808,
- t2USAD8 = 1809,
- t2USADA8 = 1810,
- t2USAT16 = 1811,
- t2USATasr = 1812,
- t2USATlsl = 1813,
- t2USAX = 1814,
- t2USUB16 = 1815,
- t2USUB8 = 1816,
- t2UXTAB16rr = 1817,
- t2UXTAB16rr_rot = 1818,
- t2UXTABrr = 1819,
- t2UXTABrr_rot = 1820,
- t2UXTAHrr = 1821,
- t2UXTAHrr_rot = 1822,
- t2UXTB16r = 1823,
- t2UXTB16r_rot = 1824,
- t2UXTBr = 1825,
- t2UXTBr_rot = 1826,
- t2UXTHr = 1827,
- t2UXTHr_rot = 1828,
- t2WFE = 1829,
- t2WFI = 1830,
- t2YIELD = 1831,
- tADC = 1832,
- tADDhirr = 1833,
- tADDi3 = 1834,
- tADDi8 = 1835,
- tADDrPCi = 1836,
- tADDrSP = 1837,
- tADDrSPi = 1838,
- tADDrr = 1839,
- tADDspi = 1840,
- tADDspr = 1841,
- tADDspr_ = 1842,
- tADJCALLSTACKDOWN = 1843,
- tADJCALLSTACKUP = 1844,
- tAND = 1845,
- tANDsp = 1846,
- tASRri = 1847,
- tASRrr = 1848,
- tB = 1849,
- tBIC = 1850,
- tBKPT = 1851,
- tBL = 1852,
- tBLXi = 1853,
- tBLXi_r9 = 1854,
- tBLXr = 1855,
- tBLXr_r9 = 1856,
- tBLr9 = 1857,
- tBRIND = 1858,
- tBR_JTr = 1859,
- tBX = 1860,
- tBX_RET = 1861,
- tBX_RET_vararg = 1862,
- tBXr9 = 1863,
- tBcc = 1864,
- tBfar = 1865,
- tCBNZ = 1866,
- tCBZ = 1867,
- tCMNz = 1868,
- tCMPhir = 1869,
- tCMPi8 = 1870,
- tCMPr = 1871,
- tCMPzhir = 1872,
- tCMPzi8 = 1873,
- tCMPzr = 1874,
- tCPS = 1875,
- tEOR = 1876,
- tInt_eh_sjlj_setjmp = 1877,
- tLDM = 1878,
- tLDR = 1879,
- tLDRB = 1880,
- tLDRBi = 1881,
- tLDRH = 1882,
- tLDRHi = 1883,
- tLDRSB = 1884,
- tLDRSH = 1885,
- tLDRcp = 1886,
- tLDRi = 1887,
- tLDRpci = 1888,
- tLDRpci_pic = 1889,
- tLDRspi = 1890,
- tLEApcrel = 1891,
- tLEApcrelJT = 1892,
- tLSLri = 1893,
- tLSLrr = 1894,
- tLSRri = 1895,
- tLSRrr = 1896,
- tMOVCCi = 1897,
- tMOVCCr = 1898,
- tMOVCCr_pseudo = 1899,
- tMOVSr = 1900,
- tMOVgpr2gpr = 1901,
- tMOVgpr2tgpr = 1902,
- tMOVi8 = 1903,
- tMOVr = 1904,
- tMOVtgpr2gpr = 1905,
- tMUL = 1906,
- tMVN = 1907,
- tNOP = 1908,
- tORR = 1909,
- tPICADD = 1910,
- tPOP = 1911,
- tPOP_RET = 1912,
- tPUSH = 1913,
- tREV = 1914,
- tREV16 = 1915,
- tREVSH = 1916,
- tROR = 1917,
- tRSB = 1918,
- tRestore = 1919,
- tSBC = 1920,
- tSETENDBE = 1921,
- tSETENDLE = 1922,
- tSEV = 1923,
- tSTM = 1924,
- tSTR = 1925,
- tSTRB = 1926,
- tSTRBi = 1927,
- tSTRH = 1928,
- tSTRHi = 1929,
- tSTRi = 1930,
- tSTRspi = 1931,
- tSUBi3 = 1932,
- tSUBi8 = 1933,
- tSUBrr = 1934,
- tSUBspi = 1935,
- tSUBspi_ = 1936,
- tSVC = 1937,
- tSXTB = 1938,
- tSXTH = 1939,
- tSpill = 1940,
- tTPsoft = 1941,
- tTRAP = 1942,
- tTST = 1943,
- tUXTB = 1944,
- tUXTH = 1945,
- tWFE = 1946,
- tWFI = 1947,
- tYIELD = 1948,
- INSTRUCTION_LIST_END = 1949
- };
-}
-} // End llvm namespace
diff --git a/libclamav/c++/ARMGenRegisterInfo.h.inc b/libclamav/c++/ARMGenRegisterInfo.h.inc
deleted file mode 100644
index 1e26e88..0000000
--- a/libclamav/c++/ARMGenRegisterInfo.h.inc
+++ /dev/null
@@ -1,111 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// Register Information Header Fragment
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Target/TargetRegisterInfo.h"
-#include <string>
-
-namespace llvm {
-
-struct ARMGenRegisterInfo : public TargetRegisterInfo {
- explicit ARMGenRegisterInfo(int CallFrameSetupOpcode = -1, int CallFrameDestroyOpcode = -1);
- virtual int getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) const;
- virtual int getDwarfRegNum(unsigned RegNum, bool isEH) const = 0;
- virtual bool needsStackRealignment(const MachineFunction &) const
- { return false; }
- unsigned getSubReg(unsigned RegNo, unsigned Index) const;
- unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const;
-};
-
-namespace ARM { // Register classes
- enum {
- CCRRegClassID = 1,
- DPRRegClassID = 2,
- DPR_8RegClassID = 3,
- DPR_VFP2RegClassID = 4,
- GPRRegClassID = 5,
- QPRRegClassID = 6,
- QPR_8RegClassID = 7,
- QPR_VFP2RegClassID = 8,
- SPRRegClassID = 9,
- SPR_8RegClassID = 10,
- SPR_INVALIDRegClassID = 11,
- tGPRRegClassID = 12
- };
-
- struct CCRClass : public TargetRegisterClass {
- CCRClass();
- };
- extern CCRClass CCRRegClass;
- static TargetRegisterClass * const CCRRegisterClass = &CCRRegClass;
- struct DPRClass : public TargetRegisterClass {
- DPRClass();
-
- iterator allocation_order_begin(const MachineFunction &MF) const;
- iterator allocation_order_end(const MachineFunction &MF) const;
- };
- extern DPRClass DPRRegClass;
- static TargetRegisterClass * const DPRRegisterClass = &DPRRegClass;
- struct DPR_8Class : public TargetRegisterClass {
- DPR_8Class();
- };
- extern DPR_8Class DPR_8RegClass;
- static TargetRegisterClass * const DPR_8RegisterClass = &DPR_8RegClass;
- struct DPR_VFP2Class : public TargetRegisterClass {
- DPR_VFP2Class();
- };
- extern DPR_VFP2Class DPR_VFP2RegClass;
- static TargetRegisterClass * const DPR_VFP2RegisterClass = &DPR_VFP2RegClass;
- struct GPRClass : public TargetRegisterClass {
- GPRClass();
-
- iterator allocation_order_begin(const MachineFunction &MF) const;
- iterator allocation_order_end(const MachineFunction &MF) const;
- };
- extern GPRClass GPRRegClass;
- static TargetRegisterClass * const GPRRegisterClass = &GPRRegClass;
- struct QPRClass : public TargetRegisterClass {
- QPRClass();
- };
- extern QPRClass QPRRegClass;
- static TargetRegisterClass * const QPRRegisterClass = &QPRRegClass;
- struct QPR_8Class : public TargetRegisterClass {
- QPR_8Class();
- };
- extern QPR_8Class QPR_8RegClass;
- static TargetRegisterClass * const QPR_8RegisterClass = &QPR_8RegClass;
- struct QPR_VFP2Class : public TargetRegisterClass {
- QPR_VFP2Class();
- };
- extern QPR_VFP2Class QPR_VFP2RegClass;
- static TargetRegisterClass * const QPR_VFP2RegisterClass = &QPR_VFP2RegClass;
- struct SPRClass : public TargetRegisterClass {
- SPRClass();
- };
- extern SPRClass SPRRegClass;
- static TargetRegisterClass * const SPRRegisterClass = &SPRRegClass;
- struct SPR_8Class : public TargetRegisterClass {
- SPR_8Class();
- };
- extern SPR_8Class SPR_8RegClass;
- static TargetRegisterClass * const SPR_8RegisterClass = &SPR_8RegClass;
- struct SPR_INVALIDClass : public TargetRegisterClass {
- SPR_INVALIDClass();
- };
- extern SPR_INVALIDClass SPR_INVALIDRegClass;
- static TargetRegisterClass * const SPR_INVALIDRegisterClass = &SPR_INVALIDRegClass;
- struct tGPRClass : public TargetRegisterClass {
- tGPRClass();
-
- iterator allocation_order_begin(const MachineFunction &MF) const;
- iterator allocation_order_end(const MachineFunction &MF) const;
- };
- extern tGPRClass tGPRRegClass;
- static TargetRegisterClass * const tGPRRegisterClass = &tGPRRegClass;
-} // end of namespace ARM
-
-} // End llvm namespace
diff --git a/libclamav/c++/ARMGenRegisterInfo.inc b/libclamav/c++/ARMGenRegisterInfo.inc
deleted file mode 100644
index 9e40132..0000000
--- a/libclamav/c++/ARMGenRegisterInfo.inc
+++ /dev/null
@@ -1,3707 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// Register Information Source Fragment
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-namespace llvm {
-
-namespace { // Register classes...
- // CCR Register Class...
- static const unsigned CCR[] = {
- ARM::CPSR,
- };
-
- // DPR Register Class...
- static const unsigned DPR[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7, ARM::D8, ARM::D9, ARM::D10, ARM::D11, ARM::D12, ARM::D13, ARM::D14, ARM::D15, ARM::D16, ARM::D17, ARM::D18, ARM::D19, ARM::D20, ARM::D21, ARM::D22, ARM::D23, ARM::D24, ARM::D25, ARM::D26, ARM::D27, ARM::D28, ARM::D29, ARM::D30, ARM::D31,
- };
-
- // DPR_8 Register Class...
- static const unsigned DPR_8[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7,
- };
-
- // DPR_VFP2 Register Class...
- static const unsigned DPR_VFP2[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7, ARM::D8, ARM::D9, ARM::D10, ARM::D11, ARM::D12, ARM::D13, ARM::D14, ARM::D15,
- };
-
- // GPR Register Class...
- static const unsigned GPR[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R4, ARM::R5, ARM::R6, ARM::R7, ARM::R8, ARM::R9, ARM::R10, ARM::R11, ARM::R12, ARM::SP, ARM::LR, ARM::PC,
- };
-
- // QPR Register Class...
- static const unsigned QPR[] = {
- ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, ARM::Q8, ARM::Q9, ARM::Q10, ARM::Q11, ARM::Q12, ARM::Q13, ARM::Q14, ARM::Q15,
- };
-
- // QPR_8 Register Class...
- static const unsigned QPR_8[] = {
- ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
- };
-
- // QPR_VFP2 Register Class...
- static const unsigned QPR_VFP2[] = {
- ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7,
- };
-
- // SPR Register Class...
- static const unsigned SPR[] = {
- ARM::S0, ARM::S1, ARM::S2, ARM::S3, ARM::S4, ARM::S5, ARM::S6, ARM::S7, ARM::S8, ARM::S9, ARM::S10, ARM::S11, ARM::S12, ARM::S13, ARM::S14, ARM::S15, ARM::S16, ARM::S17, ARM::S18, ARM::S19, ARM::S20, ARM::S21, ARM::S22, ARM::S23, ARM::S24, ARM::S25, ARM::S26, ARM::S27, ARM::S28, ARM::S29, ARM::S30, ARM::S31,
- };
-
- // SPR_8 Register Class...
- static const unsigned SPR_8[] = {
- ARM::S0, ARM::S1, ARM::S2, ARM::S3, ARM::S4, ARM::S5, ARM::S6, ARM::S7, ARM::S8, ARM::S9, ARM::S10, ARM::S11, ARM::S12, ARM::S13, ARM::S14, ARM::S15,
- };
-
- // SPR_INVALID Register Class...
- static const unsigned SPR_INVALID[] = {
- ARM::SDummy,
- };
-
- // tGPR Register Class...
- static const unsigned tGPR[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R4, ARM::R5, ARM::R6, ARM::R7,
- };
-
- // CCRVTs Register Class Value Types...
- static const EVT CCRVTs[] = {
- MVT::i32, MVT::Other
- };
-
- // DPRVTs Register Class Value Types...
- static const EVT DPRVTs[] = {
- MVT::f64, MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v1i64, MVT::v2f32, MVT::Other
- };
-
- // DPR_8VTs Register Class Value Types...
- static const EVT DPR_8VTs[] = {
- MVT::f64, MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v1i64, MVT::v2f32, MVT::Other
- };
-
- // DPR_VFP2VTs Register Class Value Types...
- static const EVT DPR_VFP2VTs[] = {
- MVT::f64, MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v1i64, MVT::v2f32, MVT::Other
- };
-
- // GPRVTs Register Class Value Types...
- static const EVT GPRVTs[] = {
- MVT::i32, MVT::Other
- };
-
- // QPRVTs Register Class Value Types...
- static const EVT QPRVTs[] = {
- MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64, MVT::Other
- };
-
- // QPR_8VTs Register Class Value Types...
- static const EVT QPR_8VTs[] = {
- MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64, MVT::Other
- };
-
- // QPR_VFP2VTs Register Class Value Types...
- static const EVT QPR_VFP2VTs[] = {
- MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64, MVT::Other
- };
-
- // SPRVTs Register Class Value Types...
- static const EVT SPRVTs[] = {
- MVT::f32, MVT::Other
- };
-
- // SPR_8VTs Register Class Value Types...
- static const EVT SPR_8VTs[] = {
- MVT::f32, MVT::Other
- };
-
- // SPR_INVALIDVTs Register Class Value Types...
- static const EVT SPR_INVALIDVTs[] = {
- MVT::f32, MVT::Other
- };
-
- // tGPRVTs Register Class Value Types...
- static const EVT tGPRVTs[] = {
- MVT::i32, MVT::Other
- };
-
-} // end anonymous namespace
-
-namespace ARM { // Register class instances
- CCRClass CCRRegClass;
- DPRClass DPRRegClass;
- DPR_8Class DPR_8RegClass;
- DPR_VFP2Class DPR_VFP2RegClass;
- GPRClass GPRRegClass;
- QPRClass QPRRegClass;
- QPR_8Class QPR_8RegClass;
- QPR_VFP2Class QPR_VFP2RegClass;
- SPRClass SPRRegClass;
- SPR_8Class SPR_8RegClass;
- SPR_INVALIDClass SPR_INVALIDRegClass;
- tGPRClass tGPRRegClass;
-
- // CCR Sub-register Classes...
- static const TargetRegisterClass* const CCRSubRegClasses[] = {
- NULL
- };
-
- // DPR Sub-register Classes...
- static const TargetRegisterClass* const DPRSubRegClasses[] = {
- &ARM::SPR_INVALIDRegClass, &ARM::SPR_INVALIDRegClass, NULL
- };
-
- // DPR_8 Sub-register Classes...
- static const TargetRegisterClass* const DPR_8SubRegClasses[] = {
- &ARM::SPR_8RegClass, &ARM::SPR_8RegClass, NULL
- };
-
- // DPR_VFP2 Sub-register Classes...
- static const TargetRegisterClass* const DPR_VFP2SubRegClasses[] = {
- &ARM::SPRRegClass, &ARM::SPRRegClass, NULL
- };
-
- // GPR Sub-register Classes...
- static const TargetRegisterClass* const GPRSubRegClasses[] = {
- NULL
- };
-
- // QPR Sub-register Classes...
- static const TargetRegisterClass* const QPRSubRegClasses[] = {
- &ARM::SPR_INVALIDRegClass, &ARM::SPR_INVALIDRegClass, &ARM::SPR_INVALIDRegClass, &ARM::SPR_INVALIDRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, NULL
- };
-
- // QPR_8 Sub-register Classes...
- static const TargetRegisterClass* const QPR_8SubRegClasses[] = {
- &ARM::SPR_8RegClass, &ARM::SPR_8RegClass, &ARM::SPR_8RegClass, &ARM::SPR_8RegClass, &ARM::DPR_8RegClass, &ARM::DPR_8RegClass, NULL
- };
-
- // QPR_VFP2 Sub-register Classes...
- static const TargetRegisterClass* const QPR_VFP2SubRegClasses[] = {
- &ARM::SPRRegClass, &ARM::SPRRegClass, &ARM::SPRRegClass, &ARM::SPRRegClass, &ARM::DPR_VFP2RegClass, &ARM::DPR_VFP2RegClass, NULL
- };
-
- // SPR Sub-register Classes...
- static const TargetRegisterClass* const SPRSubRegClasses[] = {
- NULL
- };
-
- // SPR_8 Sub-register Classes...
- static const TargetRegisterClass* const SPR_8SubRegClasses[] = {
- NULL
- };
-
- // SPR_INVALID Sub-register Classes...
- static const TargetRegisterClass* const SPR_INVALIDSubRegClasses[] = {
- NULL
- };
-
- // tGPR Sub-register Classes...
- static const TargetRegisterClass* const tGPRSubRegClasses[] = {
- NULL
- };
-
- // CCR Super-register Classes...
- static const TargetRegisterClass* const CCRSuperRegClasses[] = {
- NULL
- };
-
- // DPR Super-register Classes...
- static const TargetRegisterClass* const DPRSuperRegClasses[] = {
- &ARM::QPRRegClass, NULL
- };
-
- // DPR_8 Super-register Classes...
- static const TargetRegisterClass* const DPR_8SuperRegClasses[] = {
- &ARM::QPR_8RegClass, NULL
- };
-
- // DPR_VFP2 Super-register Classes...
- static const TargetRegisterClass* const DPR_VFP2SuperRegClasses[] = {
- &ARM::QPR_VFP2RegClass, NULL
- };
-
- // GPR Super-register Classes...
- static const TargetRegisterClass* const GPRSuperRegClasses[] = {
- NULL
- };
-
- // QPR Super-register Classes...
- static const TargetRegisterClass* const QPRSuperRegClasses[] = {
- NULL
- };
-
- // QPR_8 Super-register Classes...
- static const TargetRegisterClass* const QPR_8SuperRegClasses[] = {
- NULL
- };
-
- // QPR_VFP2 Super-register Classes...
- static const TargetRegisterClass* const QPR_VFP2SuperRegClasses[] = {
- NULL
- };
-
- // SPR Super-register Classes...
- static const TargetRegisterClass* const SPRSuperRegClasses[] = {
- &ARM::DPR_VFP2RegClass, &ARM::QPR_VFP2RegClass, NULL
- };
-
- // SPR_8 Super-register Classes...
- static const TargetRegisterClass* const SPR_8SuperRegClasses[] = {
- &ARM::DPR_8RegClass, &ARM::QPR_8RegClass, NULL
- };
-
- // SPR_INVALID Super-register Classes...
- static const TargetRegisterClass* const SPR_INVALIDSuperRegClasses[] = {
- &ARM::DPRRegClass, &ARM::QPRRegClass, NULL
- };
-
- // tGPR Super-register Classes...
- static const TargetRegisterClass* const tGPRSuperRegClasses[] = {
- NULL
- };
-
- // CCR Register Class sub-classes...
- static const TargetRegisterClass* const CCRSubclasses[] = {
- NULL
- };
-
- // DPR Register Class sub-classes...
- static const TargetRegisterClass* const DPRSubclasses[] = {
- &ARM::DPR_8RegClass, &ARM::DPR_VFP2RegClass, NULL
- };
-
- // DPR_8 Register Class sub-classes...
- static const TargetRegisterClass* const DPR_8Subclasses[] = {
- NULL
- };
-
- // DPR_VFP2 Register Class sub-classes...
- static const TargetRegisterClass* const DPR_VFP2Subclasses[] = {
- &ARM::DPR_8RegClass, NULL
- };
-
- // GPR Register Class sub-classes...
- static const TargetRegisterClass* const GPRSubclasses[] = {
- &ARM::tGPRRegClass, NULL
- };
-
- // QPR Register Class sub-classes...
- static const TargetRegisterClass* const QPRSubclasses[] = {
- &ARM::QPR_8RegClass, &ARM::QPR_VFP2RegClass, NULL
- };
-
- // QPR_8 Register Class sub-classes...
- static const TargetRegisterClass* const QPR_8Subclasses[] = {
- NULL
- };
-
- // QPR_VFP2 Register Class sub-classes...
- static const TargetRegisterClass* const QPR_VFP2Subclasses[] = {
- &ARM::QPR_8RegClass, NULL
- };
-
- // SPR Register Class sub-classes...
- static const TargetRegisterClass* const SPRSubclasses[] = {
- &ARM::SPR_8RegClass, NULL
- };
-
- // SPR_8 Register Class sub-classes...
- static const TargetRegisterClass* const SPR_8Subclasses[] = {
- NULL
- };
-
- // SPR_INVALID Register Class sub-classes...
- static const TargetRegisterClass* const SPR_INVALIDSubclasses[] = {
- NULL
- };
-
- // tGPR Register Class sub-classes...
- static const TargetRegisterClass* const tGPRSubclasses[] = {
- NULL
- };
-
- // CCR Register Class super-classes...
- static const TargetRegisterClass* const CCRSuperclasses[] = {
- NULL
- };
-
- // DPR Register Class super-classes...
- static const TargetRegisterClass* const DPRSuperclasses[] = {
- NULL
- };
-
- // DPR_8 Register Class super-classes...
- static const TargetRegisterClass* const DPR_8Superclasses[] = {
- &ARM::DPRRegClass, &ARM::DPR_VFP2RegClass, NULL
- };
-
- // DPR_VFP2 Register Class super-classes...
- static const TargetRegisterClass* const DPR_VFP2Superclasses[] = {
- &ARM::DPRRegClass, NULL
- };
-
- // GPR Register Class super-classes...
- static const TargetRegisterClass* const GPRSuperclasses[] = {
- NULL
- };
-
- // QPR Register Class super-classes...
- static const TargetRegisterClass* const QPRSuperclasses[] = {
- NULL
- };
-
- // QPR_8 Register Class super-classes...
- static const TargetRegisterClass* const QPR_8Superclasses[] = {
- &ARM::QPRRegClass, &ARM::QPR_VFP2RegClass, NULL
- };
-
- // QPR_VFP2 Register Class super-classes...
- static const TargetRegisterClass* const QPR_VFP2Superclasses[] = {
- &ARM::QPRRegClass, NULL
- };
-
- // SPR Register Class super-classes...
- static const TargetRegisterClass* const SPRSuperclasses[] = {
- NULL
- };
-
- // SPR_8 Register Class super-classes...
- static const TargetRegisterClass* const SPR_8Superclasses[] = {
- &ARM::SPRRegClass, NULL
- };
-
- // SPR_INVALID Register Class super-classes...
- static const TargetRegisterClass* const SPR_INVALIDSuperclasses[] = {
- NULL
- };
-
- // tGPR Register Class super-classes...
- static const TargetRegisterClass* const tGPRSuperclasses[] = {
- &ARM::GPRRegClass, NULL
- };
-
-
-CCRClass::CCRClass() : TargetRegisterClass(CCRRegClassID, "CCR", CCRVTs, CCRSubclasses, CCRSuperclasses, CCRSubRegClasses, CCRSuperRegClasses, 4, 4, 1, CCR, CCR + 1) {}
-
- // VFP2
- static const unsigned ARM_DPR_VFP2[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3,
- ARM::D4, ARM::D5, ARM::D6, ARM::D7,
- ARM::D8, ARM::D9, ARM::D10, ARM::D11,
- ARM::D12, ARM::D13, ARM::D14, ARM::D15 };
- // VFP3
- static const unsigned ARM_DPR_VFP3[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3,
- ARM::D4, ARM::D5, ARM::D6, ARM::D7,
- ARM::D8, ARM::D9, ARM::D10, ARM::D11,
- ARM::D12, ARM::D13, ARM::D14, ARM::D15,
- ARM::D16, ARM::D17, ARM::D18, ARM::D19,
- ARM::D20, ARM::D21, ARM::D22, ARM::D23,
- ARM::D24, ARM::D25, ARM::D26, ARM::D27,
- ARM::D28, ARM::D29, ARM::D30, ARM::D31 };
- DPRClass::iterator
- DPRClass::allocation_order_begin(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- if (Subtarget.hasVFP3())
- return ARM_DPR_VFP3;
- return ARM_DPR_VFP2;
- }
-
- DPRClass::iterator
- DPRClass::allocation_order_end(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- if (Subtarget.hasVFP3())
- return ARM_DPR_VFP3 + (sizeof(ARM_DPR_VFP3)/sizeof(unsigned));
- else
- return ARM_DPR_VFP2 + (sizeof(ARM_DPR_VFP2)/sizeof(unsigned));
- }
-
-DPRClass::DPRClass() : TargetRegisterClass(DPRRegClassID, "DPR", DPRVTs, DPRSubclasses, DPRSuperclasses, DPRSubRegClasses, DPRSuperRegClasses, 8, 8, 1, DPR, DPR + 32) {}
-
-DPR_8Class::DPR_8Class() : TargetRegisterClass(DPR_8RegClassID, "DPR_8", DPR_8VTs, DPR_8Subclasses, DPR_8Superclasses, DPR_8SubRegClasses, DPR_8SuperRegClasses, 8, 8, 1, DPR_8, DPR_8 + 8) {}
-
-DPR_VFP2Class::DPR_VFP2Class() : TargetRegisterClass(DPR_VFP2RegClassID, "DPR_VFP2", DPR_VFP2VTs, DPR_VFP2Subclasses, DPR_VFP2Superclasses, DPR_VFP2SubRegClasses, DPR_VFP2SuperRegClasses, 8, 8, 1, DPR_VFP2, DPR_VFP2 + 16) {}
-
- // FP is R11, R9 is available.
- static const unsigned ARM_GPR_AO_1[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7,
- ARM::R8, ARM::R9, ARM::R10,
- ARM::R11 };
- // FP is R11, R9 is not available.
- static const unsigned ARM_GPR_AO_2[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7,
- ARM::R8, ARM::R10,
- ARM::R11 };
- // FP is R7, R9 is available as non-callee-saved register.
- // This is used by Darwin.
- static const unsigned ARM_GPR_AO_3[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R9, ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6,
- ARM::R8, ARM::R10,ARM::R11,ARM::R7 };
- // FP is R7, R9 is not available.
- static const unsigned ARM_GPR_AO_4[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6,
- ARM::R8, ARM::R10,ARM::R11,
- ARM::R7 };
- // FP is R7, R9 is available as callee-saved register.
- // This is used by non-Darwin platform in Thumb mode.
- static const unsigned ARM_GPR_AO_5[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6,
- ARM::R8, ARM::R9, ARM::R10,ARM::R11,ARM::R7 };
-
- // For Thumb1 mode, we don't want to allocate hi regs at all, as we
- // don't know how to spill them. If we make our prologue/epilogue code
- // smarter at some point, we can go back to using the above allocation
- // orders for the Thumb1 instructions that know how to use hi regs.
- static const unsigned THUMB_GPR_AO[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7 };
-
- GPRClass::iterator
- GPRClass::allocation_order_begin(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- if (Subtarget.isThumb1Only())
- return THUMB_GPR_AO;
- if (Subtarget.isTargetDarwin()) {
- if (Subtarget.isR9Reserved())
- return ARM_GPR_AO_4;
- else
- return ARM_GPR_AO_3;
- } else {
- if (Subtarget.isR9Reserved())
- return ARM_GPR_AO_2;
- else if (Subtarget.isThumb())
- return ARM_GPR_AO_5;
- else
- return ARM_GPR_AO_1;
- }
- }
-
- GPRClass::iterator
- GPRClass::allocation_order_end(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- GPRClass::iterator I;
-
- if (Subtarget.isThumb1Only()) {
- I = THUMB_GPR_AO + (sizeof(THUMB_GPR_AO)/sizeof(unsigned));
- // Mac OS X requires FP not to be clobbered for backtracing purpose.
- return (Subtarget.isTargetDarwin() || RI->hasFP(MF)) ? I-1 : I;
- }
-
- if (Subtarget.isTargetDarwin()) {
- if (Subtarget.isR9Reserved())
- I = ARM_GPR_AO_4 + (sizeof(ARM_GPR_AO_4)/sizeof(unsigned));
- else
- I = ARM_GPR_AO_3 + (sizeof(ARM_GPR_AO_3)/sizeof(unsigned));
- } else {
- if (Subtarget.isR9Reserved())
- I = ARM_GPR_AO_2 + (sizeof(ARM_GPR_AO_2)/sizeof(unsigned));
- else if (Subtarget.isThumb())
- I = ARM_GPR_AO_5 + (sizeof(ARM_GPR_AO_5)/sizeof(unsigned));
- else
- I = ARM_GPR_AO_1 + (sizeof(ARM_GPR_AO_1)/sizeof(unsigned));
- }
-
- // Mac OS X requires FP not to be clobbered for backtracing purpose.
- return (Subtarget.isTargetDarwin() || RI->hasFP(MF)) ? I-1 : I;
- }
-
-GPRClass::GPRClass() : TargetRegisterClass(GPRRegClassID, "GPR", GPRVTs, GPRSubclasses, GPRSuperclasses, GPRSubRegClasses, GPRSuperRegClasses, 4, 4, 1, GPR, GPR + 16) {}
-
-QPRClass::QPRClass() : TargetRegisterClass(QPRRegClassID, "QPR", QPRVTs, QPRSubclasses, QPRSuperclasses, QPRSubRegClasses, QPRSuperRegClasses, 16, 16, 1, QPR, QPR + 16) {}
-
-QPR_8Class::QPR_8Class() : TargetRegisterClass(QPR_8RegClassID, "QPR_8", QPR_8VTs, QPR_8Subclasses, QPR_8Superclasses, QPR_8SubRegClasses, QPR_8SuperRegClasses, 16, 16, 1, QPR_8, QPR_8 + 4) {}
-
-QPR_VFP2Class::QPR_VFP2Class() : TargetRegisterClass(QPR_VFP2RegClassID, "QPR_VFP2", QPR_VFP2VTs, QPR_VFP2Subclasses, QPR_VFP2Superclasses, QPR_VFP2SubRegClasses, QPR_VFP2SuperRegClasses, 16, 16, 1, QPR_VFP2, QPR_VFP2 + 8) {}
-
-SPRClass::SPRClass() : TargetRegisterClass(SPRRegClassID, "SPR", SPRVTs, SPRSubclasses, SPRSuperclasses, SPRSubRegClasses, SPRSuperRegClasses, 4, 4, 1, SPR, SPR + 32) {}
-
-SPR_8Class::SPR_8Class() : TargetRegisterClass(SPR_8RegClassID, "SPR_8", SPR_8VTs, SPR_8Subclasses, SPR_8Superclasses, SPR_8SubRegClasses, SPR_8SuperRegClasses, 4, 4, 1, SPR_8, SPR_8 + 16) {}
-
-SPR_INVALIDClass::SPR_INVALIDClass() : TargetRegisterClass(SPR_INVALIDRegClassID, "SPR_INVALID", SPR_INVALIDVTs, SPR_INVALIDSubclasses, SPR_INVALIDSuperclasses, SPR_INVALIDSubRegClasses, SPR_INVALIDSuperRegClasses, 4, 4, -1, SPR_INVALID, SPR_INVALID + 1) {}
-
- static const unsigned THUMB_tGPR_AO[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7 };
-
- // FP is R7, only low registers available.
- tGPRClass::iterator
- tGPRClass::allocation_order_begin(const MachineFunction &MF) const {
- return THUMB_tGPR_AO;
- }
-
- tGPRClass::iterator
- tGPRClass::allocation_order_end(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- tGPRClass::iterator I =
- THUMB_tGPR_AO + (sizeof(THUMB_tGPR_AO)/sizeof(unsigned));
- // Mac OS X requires FP not to be clobbered for backtracing purpose.
- return (Subtarget.isTargetDarwin() || RI->hasFP(MF)) ? I-1 : I;
- }
-
-tGPRClass::tGPRClass() : TargetRegisterClass(tGPRRegClassID, "tGPR", tGPRVTs, tGPRSubclasses, tGPRSuperclasses, tGPRSubRegClasses, tGPRSuperRegClasses, 4, 4, 1, tGPR, tGPR + 8) {}
-}
-
-namespace {
- const TargetRegisterClass* const RegisterClasses[] = {
- &ARM::CCRRegClass,
- &ARM::DPRRegClass,
- &ARM::DPR_8RegClass,
- &ARM::DPR_VFP2RegClass,
- &ARM::GPRRegClass,
- &ARM::QPRRegClass,
- &ARM::QPR_8RegClass,
- &ARM::QPR_VFP2RegClass,
- &ARM::SPRRegClass,
- &ARM::SPR_8RegClass,
- &ARM::SPR_INVALIDRegClass,
- &ARM::tGPRRegClass,
- };
-
-
- // Number of hash collisions: 6
- const unsigned SubregHashTable[] = { ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::S10,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D12, ARM::S24,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::S23,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::S8,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q15, ARM::D31,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q9, ARM::D19,
- ARM::D5, ARM::S11,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D4, ARM::S9,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::S11,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D12, ARM::S25,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::D2,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::S24,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::S9,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::D4,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D6, ARM::S12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::S12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D13, ARM::S26,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q10, ARM::D20,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::S25,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::D5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D6, ARM::S13,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::D0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::S13,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D13, ARM::S27,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q10, ARM::D21,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::S26,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::D6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D7, ARM::S14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::D1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::S14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D14, ARM::S28,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q11, ARM::D22,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::S27,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::D7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D7, ARM::S15,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::S15,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D14, ARM::S29,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q11, ARM::D23,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::D10,
- ARM::Q7, ARM::S28,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::D8,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D8, ARM::S16,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D1, ARM::S3,
- ARM::Q4, ARM::S16,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q12, ARM::D24,
- ARM::Q5, ARM::D11,
- ARM::Q7, ARM::S29,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::D9,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D8, ARM::S17,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::S3,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::S17,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D15, ARM::S30,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q12, ARM::D25,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::D12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D9, ARM::S18,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::S18,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D15, ARM::S31,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q13, ARM::D26,
- ARM::Q6, ARM::D13,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q7, ARM::S30,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D9, ARM::S19,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D1, ARM::S2,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::S19,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q13, ARM::D27,
- ARM::Q7, ARM::S31,
- ARM::D2, ARM::S4,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q7, ARM::D14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::S2,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D10, ARM::S20,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::S4,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q14, ARM::D28,
- ARM::D2, ARM::S5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q7, ARM::D15,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D0, ARM::S0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D10, ARM::S21,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::S5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::S20,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q14, ARM::D29,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q8, ARM::D16,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D3, ARM::S6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D0, ARM::S1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::S0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D11, ARM::S22,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::S6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::S21,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::D3,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q8, ARM::D17,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D3, ARM::S7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::S1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D11, ARM::S23,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::S7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::S22,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q15, ARM::D30,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q9, ARM::D18,
- ARM::D5, ARM::S10,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D4, ARM::S8,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
-ARM::NoRegister, ARM::NoRegister };
- const unsigned SubregHashTableSize = 512;
-
-
- // Number of hash collisions: 18
- const unsigned SuperregHashTable[] = { ARM::D24, ARM::Q12,
- ARM::D25, ARM::Q12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D26, ARM::Q13,
- ARM::D27, ARM::Q13,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D28, ARM::Q14,
- ARM::D29, ARM::Q14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S4, ARM::D2,
- ARM::S5, ARM::D2,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S8, ARM::D4,
- ARM::S9, ARM::D4,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S10, ARM::D5,
- ARM::D30, ARM::Q15,
- ARM::D31, ARM::Q15,
- ARM::S11, ARM::D5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S0, ARM::D0,
- ARM::S1, ARM::D0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S12, ARM::D6,
- ARM::D4, ARM::Q2,
- ARM::D5, ARM::Q2,
- ARM::S13, ARM::D6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S2, ARM::D1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S14, ARM::D7,
- ARM::D6, ARM::Q3,
- ARM::D7, ARM::Q3,
- ARM::S11, ARM::Q2,
- ARM::S10, ARM::Q2,
- ARM::S3, ARM::D1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S15, ARM::D7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S8, ARM::Q2,
- ARM::S9, ARM::Q2,
- ARM::S20, ARM::D10,
- ARM::S21, ARM::D10,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S16, ARM::D8,
- ARM::D8, ARM::Q4,
- ARM::D9, ARM::Q4,
- ARM::S13, ARM::Q3,
- ARM::S12, ARM::Q3,
- ARM::S15, ARM::Q3,
- ARM::S14, ARM::Q3,
- ARM::S17, ARM::D8,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D10, ARM::Q5,
- ARM::D11, ARM::Q5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S22, ARM::D11,
- ARM::S23, ARM::D11,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S18, ARM::D9,
- ARM::S19, ARM::D9,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S16, ARM::Q4,
- ARM::S17, ARM::Q4,
- ARM::S18, ARM::Q4,
- ARM::S19, ARM::Q4,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D12, ARM::Q6,
- ARM::D13, ARM::Q6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S24, ARM::D12,
- ARM::S25, ARM::D12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S20, ARM::Q5,
- ARM::S21, ARM::Q5,
- ARM::S22, ARM::Q5,
- ARM::D14, ARM::Q7,
- ARM::D15, ARM::Q7,
- ARM::S23, ARM::Q5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S26, ARM::D13,
- ARM::S27, ARM::D13,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D0, ARM::Q0,
- ARM::D1, ARM::Q0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S24, ARM::Q6,
- ARM::D16, ARM::Q8,
- ARM::D17, ARM::Q8,
- ARM::S25, ARM::Q6,
- ARM::S26, ARM::Q6,
- ARM::S27, ARM::Q6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S28, ARM::D14,
- ARM::S29, ARM::D14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D2, ARM::Q1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D18, ARM::Q9,
- ARM::D19, ARM::Q9,
- ARM::S29, ARM::Q7,
- ARM::S28, ARM::Q7,
- ARM::S30, ARM::Q7,
- ARM::S31, ARM::Q7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D3, ARM::Q1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S0, ARM::Q0,
- ARM::S1, ARM::Q0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S2, ARM::Q0,
- ARM::S31, ARM::D15,
- ARM::S30, ARM::D15,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D20, ARM::Q10,
- ARM::D21, ARM::Q10,
- ARM::S3, ARM::Q0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D22, ARM::Q11,
- ARM::D23, ARM::Q11,
- ARM::S5, ARM::Q1,
- ARM::S4, ARM::Q1,
- ARM::S7, ARM::Q1,
- ARM::S6, ARM::Q1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S6, ARM::D3,
- ARM::S7, ARM::D3,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
-ARM::NoRegister, ARM::NoRegister };
- const unsigned SuperregHashTableSize = 512;
-
-
- // Number of hash collisions: 38
- const unsigned AliasesHashTable[] = { ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D12, ARM::S24,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::S23,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q15, ARM::D31,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D12, ARM::S25,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::S24,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::D4,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D13, ARM::S26,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::S25,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::D5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S8, ARM::D4,
- ARM::S9, ARM::D4,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::D0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D13, ARM::S27,
- ARM::S11, ARM::D5,
- ARM::S10, ARM::D5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::S26,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::D6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S0, ARM::D0,
- ARM::S1, ARM::D0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::D1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D14, ARM::S28,
- ARM::S12, ARM::D6,
- ARM::S13, ARM::D6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::S27,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::D7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S2, ARM::D1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D14, ARM::S29,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S14, ARM::D7,
- ARM::Q5, ARM::D10,
- ARM::Q7, ARM::S28,
- ARM::S15, ARM::D7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S3, ARM::D1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::D8,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D1, ARM::S3,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S20, ARM::D10,
- ARM::S21, ARM::D10,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::D11,
- ARM::Q7, ARM::S29,
- ARM::S17, ARM::D8,
- ARM::S16, ARM::D8,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::D9,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::S3,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S22, ARM::D11,
- ARM::D15, ARM::S30,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S23, ARM::D11,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::D12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S18, ARM::D9,
- ARM::S19, ARM::D9,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D15, ARM::S31,
- ARM::S24, ARM::D12,
- ARM::S25, ARM::D12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q6, ARM::D13,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q7, ARM::S30,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S26, ARM::D13,
- ARM::Q7, ARM::S31,
- ARM::D2, ARM::S4,
- ARM::D0, ARM::Q0,
- ARM::D1, ARM::Q0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S27, ARM::D13,
- ARM::Q7, ARM::D14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::S4,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D2, ARM::S5,
- ARM::S29, ARM::D14,
- ARM::Q7, ARM::D15,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S28, ARM::D14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D0, ARM::S0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D2, ARM::Q1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D3, ARM::Q1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::S5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S0, ARM::Q0,
- ARM::S1, ARM::Q0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q8, ARM::D16,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S2, ARM::Q0,
- ARM::S31, ARM::D15,
- ARM::S30, ARM::D15,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D3, ARM::S6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D0, ARM::S1,
- ARM::D20, ARM::Q10,
- ARM::D21, ARM::Q10,
- ARM::S3, ARM::Q0,
- ARM::Q0, ARM::S0,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::S6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q8, ARM::D17,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D3, ARM::S7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q0, ARM::S1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D22, ARM::Q11,
- ARM::D23, ARM::Q11,
- ARM::S5, ARM::Q1,
- ARM::S4, ARM::Q1,
- ARM::S7, ARM::Q1,
- ARM::S6, ARM::Q1,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::S7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q9, ARM::D18,
- ARM::D5, ARM::S10,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D4, ARM::S8,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D24, ARM::Q12,
- ARM::D25, ARM::Q12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::S10,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::S8,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q9, ARM::D19,
- ARM::D5, ARM::S11,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D4, ARM::S9,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::S11,
- ARM::D26, ARM::Q13,
- ARM::D27, ARM::Q13,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::D2,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q2, ARM::S9,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D6, ARM::S12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::S12,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D28, ARM::Q14,
- ARM::D29, ARM::Q14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q10, ARM::D20,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D6, ARM::S13,
- ARM::S4, ARM::D2,
- ARM::S5, ARM::D2,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::S13,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D30, ARM::Q15,
- ARM::D31, ARM::Q15,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q10, ARM::D21,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D7, ARM::S14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::S14,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D4, ARM::Q2,
- ARM::D5, ARM::Q2,
- ARM::Q11, ARM::D22,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D7, ARM::S15,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q3, ARM::S15,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q11, ARM::D23,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D6, ARM::Q3,
- ARM::D7, ARM::Q3,
- ARM::S11, ARM::Q2,
- ARM::S10, ARM::Q2,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D8, ARM::S16,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::S16,
- ARM::S9, ARM::Q2,
- ARM::S8, ARM::Q2,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q12, ARM::D24,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D8, ARM::Q4,
- ARM::D9, ARM::Q4,
- ARM::S13, ARM::Q3,
- ARM::S12, ARM::Q3,
- ARM::S15, ARM::Q3,
- ARM::S14, ARM::Q3,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D10, ARM::Q5,
- ARM::D11, ARM::Q5,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D8, ARM::S17,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::S17,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q12, ARM::D25,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S16, ARM::Q4,
- ARM::S17, ARM::Q4,
- ARM::S18, ARM::Q4,
- ARM::S19, ARM::Q4,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D12, ARM::Q6,
- ARM::D13, ARM::Q6,
- ARM::D9, ARM::S18,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::S18,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q13, ARM::D26,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S20, ARM::Q5,
- ARM::S21, ARM::Q5,
- ARM::S22, ARM::Q5,
- ARM::D14, ARM::Q7,
- ARM::D15, ARM::Q7,
- ARM::D9, ARM::S19,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S23, ARM::Q5,
- ARM::D1, ARM::S2,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q4, ARM::S19,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q13, ARM::D27,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S24, ARM::Q6,
- ARM::D16, ARM::Q8,
- ARM::D17, ARM::Q8,
- ARM::Q0, ARM::S2,
- ARM::S26, ARM::Q6,
- ARM::S27, ARM::Q6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D10, ARM::S20,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S25, ARM::Q6,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q14, ARM::D28,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D18, ARM::Q9,
- ARM::D19, ARM::Q9,
- ARM::S29, ARM::Q7,
- ARM::S28, ARM::Q7,
- ARM::D10, ARM::S21,
- ARM::S31, ARM::Q7,
- ARM::S30, ARM::Q7,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::S20,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q14, ARM::D29,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D11, ARM::S22,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::S21,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q1, ARM::D3,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::D11, ARM::S23,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q5, ARM::S22,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::Q15, ARM::D30,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
- ARM::S6, ARM::D3,
- ARM::S7, ARM::D3,
- ARM::NoRegister, ARM::NoRegister,
- ARM::NoRegister, ARM::NoRegister,
-ARM::NoRegister, ARM::NoRegister };
- const unsigned AliasesHashTableSize = 1024;
-
-
- // Register Alias Sets...
- const unsigned Empty_AliasSet[] = { 0 };
- const unsigned CPSR_AliasSet[] = { 0 };
- const unsigned D0_AliasSet[] = { ARM::S0, ARM::S1, ARM::Q0, 0 };
- const unsigned D1_AliasSet[] = { ARM::S2, ARM::S3, ARM::Q0, 0 };
- const unsigned D10_AliasSet[] = { ARM::S20, ARM::S21, ARM::Q5, 0 };
- const unsigned D11_AliasSet[] = { ARM::S22, ARM::S23, ARM::Q5, 0 };
- const unsigned D12_AliasSet[] = { ARM::S24, ARM::S25, ARM::Q6, 0 };
- const unsigned D13_AliasSet[] = { ARM::S26, ARM::S27, ARM::Q6, 0 };
- const unsigned D14_AliasSet[] = { ARM::S28, ARM::S29, ARM::Q7, 0 };
- const unsigned D15_AliasSet[] = { ARM::S30, ARM::S31, ARM::Q7, 0 };
- const unsigned D16_AliasSet[] = { ARM::Q8, 0 };
- const unsigned D17_AliasSet[] = { ARM::Q8, 0 };
- const unsigned D18_AliasSet[] = { ARM::Q9, 0 };
- const unsigned D19_AliasSet[] = { ARM::Q9, 0 };
- const unsigned D2_AliasSet[] = { ARM::S4, ARM::S5, ARM::Q1, 0 };
- const unsigned D20_AliasSet[] = { ARM::Q10, 0 };
- const unsigned D21_AliasSet[] = { ARM::Q10, 0 };
- const unsigned D22_AliasSet[] = { ARM::Q11, 0 };
- const unsigned D23_AliasSet[] = { ARM::Q11, 0 };
- const unsigned D24_AliasSet[] = { ARM::Q12, 0 };
- const unsigned D25_AliasSet[] = { ARM::Q12, 0 };
- const unsigned D26_AliasSet[] = { ARM::Q13, 0 };
- const unsigned D27_AliasSet[] = { ARM::Q13, 0 };
- const unsigned D28_AliasSet[] = { ARM::Q14, 0 };
- const unsigned D29_AliasSet[] = { ARM::Q14, 0 };
- const unsigned D3_AliasSet[] = { ARM::S6, ARM::S7, ARM::Q1, 0 };
- const unsigned D30_AliasSet[] = { ARM::Q15, 0 };
- const unsigned D31_AliasSet[] = { ARM::Q15, 0 };
- const unsigned D4_AliasSet[] = { ARM::S8, ARM::S9, ARM::Q2, 0 };
- const unsigned D5_AliasSet[] = { ARM::S10, ARM::S11, ARM::Q2, 0 };
- const unsigned D6_AliasSet[] = { ARM::S12, ARM::S13, ARM::Q3, 0 };
- const unsigned D7_AliasSet[] = { ARM::S14, ARM::S15, ARM::Q3, 0 };
- const unsigned D8_AliasSet[] = { ARM::S16, ARM::S17, ARM::Q4, 0 };
- const unsigned D9_AliasSet[] = { ARM::S18, ARM::S19, ARM::Q4, 0 };
- const unsigned FPSCR_AliasSet[] = { 0 };
- const unsigned LR_AliasSet[] = { 0 };
- const unsigned PC_AliasSet[] = { 0 };
- const unsigned Q0_AliasSet[] = { ARM::S0, ARM::S1, ARM::S2, ARM::S3, ARM::D0, ARM::D1, 0 };
- const unsigned Q1_AliasSet[] = { ARM::S4, ARM::S5, ARM::S6, ARM::S7, ARM::D2, ARM::D3, 0 };
- const unsigned Q10_AliasSet[] = { ARM::D20, ARM::D21, 0 };
- const unsigned Q11_AliasSet[] = { ARM::D22, ARM::D23, 0 };
- const unsigned Q12_AliasSet[] = { ARM::D24, ARM::D25, 0 };
- const unsigned Q13_AliasSet[] = { ARM::D26, ARM::D27, 0 };
- const unsigned Q14_AliasSet[] = { ARM::D28, ARM::D29, 0 };
- const unsigned Q15_AliasSet[] = { ARM::D30, ARM::D31, 0 };
- const unsigned Q2_AliasSet[] = { ARM::S8, ARM::S9, ARM::S10, ARM::S11, ARM::D4, ARM::D5, 0 };
- const unsigned Q3_AliasSet[] = { ARM::S12, ARM::S13, ARM::S14, ARM::S15, ARM::D6, ARM::D7, 0 };
- const unsigned Q4_AliasSet[] = { ARM::S16, ARM::S17, ARM::S18, ARM::S19, ARM::D8, ARM::D9, 0 };
- const unsigned Q5_AliasSet[] = { ARM::S20, ARM::S21, ARM::S22, ARM::S23, ARM::D10, ARM::D11, 0 };
- const unsigned Q6_AliasSet[] = { ARM::S24, ARM::S25, ARM::S26, ARM::S27, ARM::D12, ARM::D13, 0 };
- const unsigned Q7_AliasSet[] = { ARM::S28, ARM::S29, ARM::S30, ARM::S31, ARM::D14, ARM::D15, 0 };
- const unsigned Q8_AliasSet[] = { ARM::D16, ARM::D17, 0 };
- const unsigned Q9_AliasSet[] = { ARM::D18, ARM::D19, 0 };
- const unsigned R0_AliasSet[] = { 0 };
- const unsigned R1_AliasSet[] = { 0 };
- const unsigned R10_AliasSet[] = { 0 };
- const unsigned R11_AliasSet[] = { 0 };
- const unsigned R12_AliasSet[] = { 0 };
- const unsigned R2_AliasSet[] = { 0 };
- const unsigned R3_AliasSet[] = { 0 };
- const unsigned R4_AliasSet[] = { 0 };
- const unsigned R5_AliasSet[] = { 0 };
- const unsigned R6_AliasSet[] = { 0 };
- const unsigned R7_AliasSet[] = { 0 };
- const unsigned R8_AliasSet[] = { 0 };
- const unsigned R9_AliasSet[] = { 0 };
- const unsigned S0_AliasSet[] = { ARM::D0, ARM::Q0, 0 };
- const unsigned S1_AliasSet[] = { ARM::D0, ARM::Q0, 0 };
- const unsigned S10_AliasSet[] = { ARM::D5, ARM::Q2, 0 };
- const unsigned S11_AliasSet[] = { ARM::D5, ARM::Q2, 0 };
- const unsigned S12_AliasSet[] = { ARM::D6, ARM::Q3, 0 };
- const unsigned S13_AliasSet[] = { ARM::D6, ARM::Q3, 0 };
- const unsigned S14_AliasSet[] = { ARM::D7, ARM::Q3, 0 };
- const unsigned S15_AliasSet[] = { ARM::D7, ARM::Q3, 0 };
- const unsigned S16_AliasSet[] = { ARM::D8, ARM::Q4, 0 };
- const unsigned S17_AliasSet[] = { ARM::D8, ARM::Q4, 0 };
- const unsigned S18_AliasSet[] = { ARM::D9, ARM::Q4, 0 };
- const unsigned S19_AliasSet[] = { ARM::D9, ARM::Q4, 0 };
- const unsigned S2_AliasSet[] = { ARM::D1, ARM::Q0, 0 };
- const unsigned S20_AliasSet[] = { ARM::D10, ARM::Q5, 0 };
- const unsigned S21_AliasSet[] = { ARM::D10, ARM::Q5, 0 };
- const unsigned S22_AliasSet[] = { ARM::D11, ARM::Q5, 0 };
- const unsigned S23_AliasSet[] = { ARM::D11, ARM::Q5, 0 };
- const unsigned S24_AliasSet[] = { ARM::D12, ARM::Q6, 0 };
- const unsigned S25_AliasSet[] = { ARM::D12, ARM::Q6, 0 };
- const unsigned S26_AliasSet[] = { ARM::D13, ARM::Q6, 0 };
- const unsigned S27_AliasSet[] = { ARM::D13, ARM::Q6, 0 };
- const unsigned S28_AliasSet[] = { ARM::D14, ARM::Q7, 0 };
- const unsigned S29_AliasSet[] = { ARM::D14, ARM::Q7, 0 };
- const unsigned S3_AliasSet[] = { ARM::D1, ARM::Q0, 0 };
- const unsigned S30_AliasSet[] = { ARM::D15, ARM::Q7, 0 };
- const unsigned S31_AliasSet[] = { ARM::D15, ARM::Q7, 0 };
- const unsigned S4_AliasSet[] = { ARM::D2, ARM::Q1, 0 };
- const unsigned S5_AliasSet[] = { ARM::D2, ARM::Q1, 0 };
- const unsigned S6_AliasSet[] = { ARM::D3, ARM::Q1, 0 };
- const unsigned S7_AliasSet[] = { ARM::D3, ARM::Q1, 0 };
- const unsigned S8_AliasSet[] = { ARM::D4, ARM::Q2, 0 };
- const unsigned S9_AliasSet[] = { ARM::D4, ARM::Q2, 0 };
- const unsigned SDummy_AliasSet[] = { 0 };
- const unsigned SP_AliasSet[] = { 0 };
-
-
- // Register Sub-registers Sets...
- const unsigned Empty_SubRegsSet[] = { 0 };
- const unsigned CPSR_SubRegsSet[] = { 0 };
- const unsigned D0_SubRegsSet[] = { ARM::S0, ARM::S1, 0 };
- const unsigned D1_SubRegsSet[] = { ARM::S2, ARM::S3, 0 };
- const unsigned D10_SubRegsSet[] = { ARM::S20, ARM::S21, 0 };
- const unsigned D11_SubRegsSet[] = { ARM::S22, ARM::S23, 0 };
- const unsigned D12_SubRegsSet[] = { ARM::S24, ARM::S25, 0 };
- const unsigned D13_SubRegsSet[] = { ARM::S26, ARM::S27, 0 };
- const unsigned D14_SubRegsSet[] = { ARM::S28, ARM::S29, 0 };
- const unsigned D15_SubRegsSet[] = { ARM::S30, ARM::S31, 0 };
- const unsigned D16_SubRegsSet[] = { 0 };
- const unsigned D17_SubRegsSet[] = { 0 };
- const unsigned D18_SubRegsSet[] = { 0 };
- const unsigned D19_SubRegsSet[] = { 0 };
- const unsigned D2_SubRegsSet[] = { ARM::S4, ARM::S5, 0 };
- const unsigned D20_SubRegsSet[] = { 0 };
- const unsigned D21_SubRegsSet[] = { 0 };
- const unsigned D22_SubRegsSet[] = { 0 };
- const unsigned D23_SubRegsSet[] = { 0 };
- const unsigned D24_SubRegsSet[] = { 0 };
- const unsigned D25_SubRegsSet[] = { 0 };
- const unsigned D26_SubRegsSet[] = { 0 };
- const unsigned D27_SubRegsSet[] = { 0 };
- const unsigned D28_SubRegsSet[] = { 0 };
- const unsigned D29_SubRegsSet[] = { 0 };
- const unsigned D3_SubRegsSet[] = { ARM::S6, ARM::S7, 0 };
- const unsigned D30_SubRegsSet[] = { 0 };
- const unsigned D31_SubRegsSet[] = { 0 };
- const unsigned D4_SubRegsSet[] = { ARM::S8, ARM::S9, 0 };
- const unsigned D5_SubRegsSet[] = { ARM::S10, ARM::S11, 0 };
- const unsigned D6_SubRegsSet[] = { ARM::S12, ARM::S13, 0 };
- const unsigned D7_SubRegsSet[] = { ARM::S14, ARM::S15, 0 };
- const unsigned D8_SubRegsSet[] = { ARM::S16, ARM::S17, 0 };
- const unsigned D9_SubRegsSet[] = { ARM::S18, ARM::S19, 0 };
- const unsigned FPSCR_SubRegsSet[] = { 0 };
- const unsigned LR_SubRegsSet[] = { 0 };
- const unsigned PC_SubRegsSet[] = { 0 };
- const unsigned Q0_SubRegsSet[] = { ARM::S0, ARM::S1, ARM::D1, ARM::S2, ARM::S3, ARM::D0, 0 };
- const unsigned Q1_SubRegsSet[] = { ARM::S4, ARM::S5, ARM::D3, ARM::S6, ARM::S7, ARM::D2, 0 };
- const unsigned Q10_SubRegsSet[] = { ARM::D20, ARM::D21, 0 };
- const unsigned Q11_SubRegsSet[] = { ARM::D22, ARM::D23, 0 };
- const unsigned Q12_SubRegsSet[] = { ARM::D24, ARM::D25, 0 };
- const unsigned Q13_SubRegsSet[] = { ARM::D26, ARM::D27, 0 };
- const unsigned Q14_SubRegsSet[] = { ARM::D28, ARM::D29, 0 };
- const unsigned Q15_SubRegsSet[] = { ARM::D30, ARM::D31, 0 };
- const unsigned Q2_SubRegsSet[] = { ARM::S8, ARM::S9, ARM::D5, ARM::S10, ARM::S11, ARM::D4, 0 };
- const unsigned Q3_SubRegsSet[] = { ARM::S12, ARM::S13, ARM::D7, ARM::S14, ARM::S15, ARM::D6, 0 };
- const unsigned Q4_SubRegsSet[] = { ARM::S16, ARM::S17, ARM::D9, ARM::S18, ARM::S19, ARM::D8, 0 };
- const unsigned Q5_SubRegsSet[] = { ARM::S20, ARM::S21, ARM::D11, ARM::S22, ARM::S23, ARM::D10, 0 };
- const unsigned Q6_SubRegsSet[] = { ARM::S24, ARM::S25, ARM::D13, ARM::S26, ARM::S27, ARM::D12, 0 };
- const unsigned Q7_SubRegsSet[] = { ARM::S28, ARM::S29, ARM::D15, ARM::S30, ARM::S31, ARM::D14, 0 };
- const unsigned Q8_SubRegsSet[] = { ARM::D16, ARM::D17, 0 };
- const unsigned Q9_SubRegsSet[] = { ARM::D18, ARM::D19, 0 };
- const unsigned R0_SubRegsSet[] = { 0 };
- const unsigned R1_SubRegsSet[] = { 0 };
- const unsigned R10_SubRegsSet[] = { 0 };
- const unsigned R11_SubRegsSet[] = { 0 };
- const unsigned R12_SubRegsSet[] = { 0 };
- const unsigned R2_SubRegsSet[] = { 0 };
- const unsigned R3_SubRegsSet[] = { 0 };
- const unsigned R4_SubRegsSet[] = { 0 };
- const unsigned R5_SubRegsSet[] = { 0 };
- const unsigned R6_SubRegsSet[] = { 0 };
- const unsigned R7_SubRegsSet[] = { 0 };
- const unsigned R8_SubRegsSet[] = { 0 };
- const unsigned R9_SubRegsSet[] = { 0 };
- const unsigned S0_SubRegsSet[] = { 0 };
- const unsigned S1_SubRegsSet[] = { 0 };
- const unsigned S10_SubRegsSet[] = { 0 };
- const unsigned S11_SubRegsSet[] = { 0 };
- const unsigned S12_SubRegsSet[] = { 0 };
- const unsigned S13_SubRegsSet[] = { 0 };
- const unsigned S14_SubRegsSet[] = { 0 };
- const unsigned S15_SubRegsSet[] = { 0 };
- const unsigned S16_SubRegsSet[] = { 0 };
- const unsigned S17_SubRegsSet[] = { 0 };
- const unsigned S18_SubRegsSet[] = { 0 };
- const unsigned S19_SubRegsSet[] = { 0 };
- const unsigned S2_SubRegsSet[] = { 0 };
- const unsigned S20_SubRegsSet[] = { 0 };
- const unsigned S21_SubRegsSet[] = { 0 };
- const unsigned S22_SubRegsSet[] = { 0 };
- const unsigned S23_SubRegsSet[] = { 0 };
- const unsigned S24_SubRegsSet[] = { 0 };
- const unsigned S25_SubRegsSet[] = { 0 };
- const unsigned S26_SubRegsSet[] = { 0 };
- const unsigned S27_SubRegsSet[] = { 0 };
- const unsigned S28_SubRegsSet[] = { 0 };
- const unsigned S29_SubRegsSet[] = { 0 };
- const unsigned S3_SubRegsSet[] = { 0 };
- const unsigned S30_SubRegsSet[] = { 0 };
- const unsigned S31_SubRegsSet[] = { 0 };
- const unsigned S4_SubRegsSet[] = { 0 };
- const unsigned S5_SubRegsSet[] = { 0 };
- const unsigned S6_SubRegsSet[] = { 0 };
- const unsigned S7_SubRegsSet[] = { 0 };
- const unsigned S8_SubRegsSet[] = { 0 };
- const unsigned S9_SubRegsSet[] = { 0 };
- const unsigned SDummy_SubRegsSet[] = { 0 };
- const unsigned SP_SubRegsSet[] = { 0 };
-
-
- // Register Super-registers Sets...
- const unsigned Empty_SuperRegsSet[] = { 0 };
- const unsigned CPSR_SuperRegsSet[] = { 0 };
- const unsigned D0_SuperRegsSet[] = { ARM::Q0, 0 };
- const unsigned D1_SuperRegsSet[] = { ARM::Q0, 0 };
- const unsigned D10_SuperRegsSet[] = { ARM::Q5, 0 };
- const unsigned D11_SuperRegsSet[] = { ARM::Q5, 0 };
- const unsigned D12_SuperRegsSet[] = { ARM::Q6, 0 };
- const unsigned D13_SuperRegsSet[] = { ARM::Q6, 0 };
- const unsigned D14_SuperRegsSet[] = { ARM::Q7, 0 };
- const unsigned D15_SuperRegsSet[] = { ARM::Q7, 0 };
- const unsigned D16_SuperRegsSet[] = { ARM::Q8, 0 };
- const unsigned D17_SuperRegsSet[] = { ARM::Q8, 0 };
- const unsigned D18_SuperRegsSet[] = { ARM::Q9, 0 };
- const unsigned D19_SuperRegsSet[] = { ARM::Q9, 0 };
- const unsigned D2_SuperRegsSet[] = { ARM::Q1, 0 };
- const unsigned D20_SuperRegsSet[] = { ARM::Q10, 0 };
- const unsigned D21_SuperRegsSet[] = { ARM::Q10, 0 };
- const unsigned D22_SuperRegsSet[] = { ARM::Q11, 0 };
- const unsigned D23_SuperRegsSet[] = { ARM::Q11, 0 };
- const unsigned D24_SuperRegsSet[] = { ARM::Q12, 0 };
- const unsigned D25_SuperRegsSet[] = { ARM::Q12, 0 };
- const unsigned D26_SuperRegsSet[] = { ARM::Q13, 0 };
- const unsigned D27_SuperRegsSet[] = { ARM::Q13, 0 };
- const unsigned D28_SuperRegsSet[] = { ARM::Q14, 0 };
- const unsigned D29_SuperRegsSet[] = { ARM::Q14, 0 };
- const unsigned D3_SuperRegsSet[] = { ARM::Q1, 0 };
- const unsigned D30_SuperRegsSet[] = { ARM::Q15, 0 };
- const unsigned D31_SuperRegsSet[] = { ARM::Q15, 0 };
- const unsigned D4_SuperRegsSet[] = { ARM::Q2, 0 };
- const unsigned D5_SuperRegsSet[] = { ARM::Q2, 0 };
- const unsigned D6_SuperRegsSet[] = { ARM::Q3, 0 };
- const unsigned D7_SuperRegsSet[] = { ARM::Q3, 0 };
- const unsigned D8_SuperRegsSet[] = { ARM::Q4, 0 };
- const unsigned D9_SuperRegsSet[] = { ARM::Q4, 0 };
- const unsigned FPSCR_SuperRegsSet[] = { 0 };
- const unsigned LR_SuperRegsSet[] = { 0 };
- const unsigned PC_SuperRegsSet[] = { 0 };
- const unsigned Q0_SuperRegsSet[] = { 0 };
- const unsigned Q1_SuperRegsSet[] = { 0 };
- const unsigned Q10_SuperRegsSet[] = { 0 };
- const unsigned Q11_SuperRegsSet[] = { 0 };
- const unsigned Q12_SuperRegsSet[] = { 0 };
- const unsigned Q13_SuperRegsSet[] = { 0 };
- const unsigned Q14_SuperRegsSet[] = { 0 };
- const unsigned Q15_SuperRegsSet[] = { 0 };
- const unsigned Q2_SuperRegsSet[] = { 0 };
- const unsigned Q3_SuperRegsSet[] = { 0 };
- const unsigned Q4_SuperRegsSet[] = { 0 };
- const unsigned Q5_SuperRegsSet[] = { 0 };
- const unsigned Q6_SuperRegsSet[] = { 0 };
- const unsigned Q7_SuperRegsSet[] = { 0 };
- const unsigned Q8_SuperRegsSet[] = { 0 };
- const unsigned Q9_SuperRegsSet[] = { 0 };
- const unsigned R0_SuperRegsSet[] = { 0 };
- const unsigned R1_SuperRegsSet[] = { 0 };
- const unsigned R10_SuperRegsSet[] = { 0 };
- const unsigned R11_SuperRegsSet[] = { 0 };
- const unsigned R12_SuperRegsSet[] = { 0 };
- const unsigned R2_SuperRegsSet[] = { 0 };
- const unsigned R3_SuperRegsSet[] = { 0 };
- const unsigned R4_SuperRegsSet[] = { 0 };
- const unsigned R5_SuperRegsSet[] = { 0 };
- const unsigned R6_SuperRegsSet[] = { 0 };
- const unsigned R7_SuperRegsSet[] = { 0 };
- const unsigned R8_SuperRegsSet[] = { 0 };
- const unsigned R9_SuperRegsSet[] = { 0 };
- const unsigned S0_SuperRegsSet[] = { ARM::Q0, ARM::D0, 0 };
- const unsigned S1_SuperRegsSet[] = { ARM::Q0, ARM::D0, 0 };
- const unsigned S10_SuperRegsSet[] = { ARM::Q2, ARM::D5, 0 };
- const unsigned S11_SuperRegsSet[] = { ARM::Q2, ARM::D5, 0 };
- const unsigned S12_SuperRegsSet[] = { ARM::Q3, ARM::D6, 0 };
- const unsigned S13_SuperRegsSet[] = { ARM::Q3, ARM::D6, 0 };
- const unsigned S14_SuperRegsSet[] = { ARM::Q3, ARM::D7, 0 };
- const unsigned S15_SuperRegsSet[] = { ARM::Q3, ARM::D7, 0 };
- const unsigned S16_SuperRegsSet[] = { ARM::Q4, ARM::D8, 0 };
- const unsigned S17_SuperRegsSet[] = { ARM::Q4, ARM::D8, 0 };
- const unsigned S18_SuperRegsSet[] = { ARM::Q4, ARM::D9, 0 };
- const unsigned S19_SuperRegsSet[] = { ARM::Q4, ARM::D9, 0 };
- const unsigned S2_SuperRegsSet[] = { ARM::Q0, ARM::D1, 0 };
- const unsigned S20_SuperRegsSet[] = { ARM::Q5, ARM::D10, 0 };
- const unsigned S21_SuperRegsSet[] = { ARM::Q5, ARM::D10, 0 };
- const unsigned S22_SuperRegsSet[] = { ARM::Q5, ARM::D11, 0 };
- const unsigned S23_SuperRegsSet[] = { ARM::Q5, ARM::D11, 0 };
- const unsigned S24_SuperRegsSet[] = { ARM::Q6, ARM::D12, 0 };
- const unsigned S25_SuperRegsSet[] = { ARM::Q6, ARM::D12, 0 };
- const unsigned S26_SuperRegsSet[] = { ARM::Q6, ARM::D13, 0 };
- const unsigned S27_SuperRegsSet[] = { ARM::Q6, ARM::D13, 0 };
- const unsigned S28_SuperRegsSet[] = { ARM::Q7, ARM::D14, 0 };
- const unsigned S29_SuperRegsSet[] = { ARM::Q7, ARM::D14, 0 };
- const unsigned S3_SuperRegsSet[] = { ARM::Q0, ARM::D1, 0 };
- const unsigned S30_SuperRegsSet[] = { ARM::Q7, ARM::D15, 0 };
- const unsigned S31_SuperRegsSet[] = { ARM::Q7, ARM::D15, 0 };
- const unsigned S4_SuperRegsSet[] = { ARM::Q1, ARM::D2, 0 };
- const unsigned S5_SuperRegsSet[] = { ARM::Q1, ARM::D2, 0 };
- const unsigned S6_SuperRegsSet[] = { ARM::Q1, ARM::D3, 0 };
- const unsigned S7_SuperRegsSet[] = { ARM::Q1, ARM::D3, 0 };
- const unsigned S8_SuperRegsSet[] = { ARM::Q2, ARM::D4, 0 };
- const unsigned S9_SuperRegsSet[] = { ARM::Q2, ARM::D4, 0 };
- const unsigned SDummy_SuperRegsSet[] = { 0 };
- const unsigned SP_SuperRegsSet[] = { 0 };
-
- const TargetRegisterDesc RegisterDescriptors[] = { // Descriptors
- { "NOREG", 0, 0, 0 },
- { "CPSR", CPSR_AliasSet, CPSR_SubRegsSet, CPSR_SuperRegsSet },
- { "D0", D0_AliasSet, D0_SubRegsSet, D0_SuperRegsSet },
- { "D1", D1_AliasSet, D1_SubRegsSet, D1_SuperRegsSet },
- { "D10", D10_AliasSet, D10_SubRegsSet, D10_SuperRegsSet },
- { "D11", D11_AliasSet, D11_SubRegsSet, D11_SuperRegsSet },
- { "D12", D12_AliasSet, D12_SubRegsSet, D12_SuperRegsSet },
- { "D13", D13_AliasSet, D13_SubRegsSet, D13_SuperRegsSet },
- { "D14", D14_AliasSet, D14_SubRegsSet, D14_SuperRegsSet },
- { "D15", D15_AliasSet, D15_SubRegsSet, D15_SuperRegsSet },
- { "D16", D16_AliasSet, D16_SubRegsSet, D16_SuperRegsSet },
- { "D17", D17_AliasSet, D17_SubRegsSet, D17_SuperRegsSet },
- { "D18", D18_AliasSet, D18_SubRegsSet, D18_SuperRegsSet },
- { "D19", D19_AliasSet, D19_SubRegsSet, D19_SuperRegsSet },
- { "D2", D2_AliasSet, D2_SubRegsSet, D2_SuperRegsSet },
- { "D20", D20_AliasSet, D20_SubRegsSet, D20_SuperRegsSet },
- { "D21", D21_AliasSet, D21_SubRegsSet, D21_SuperRegsSet },
- { "D22", D22_AliasSet, D22_SubRegsSet, D22_SuperRegsSet },
- { "D23", D23_AliasSet, D23_SubRegsSet, D23_SuperRegsSet },
- { "D24", D24_AliasSet, D24_SubRegsSet, D24_SuperRegsSet },
- { "D25", D25_AliasSet, D25_SubRegsSet, D25_SuperRegsSet },
- { "D26", D26_AliasSet, D26_SubRegsSet, D26_SuperRegsSet },
- { "D27", D27_AliasSet, D27_SubRegsSet, D27_SuperRegsSet },
- { "D28", D28_AliasSet, D28_SubRegsSet, D28_SuperRegsSet },
- { "D29", D29_AliasSet, D29_SubRegsSet, D29_SuperRegsSet },
- { "D3", D3_AliasSet, D3_SubRegsSet, D3_SuperRegsSet },
- { "D30", D30_AliasSet, D30_SubRegsSet, D30_SuperRegsSet },
- { "D31", D31_AliasSet, D31_SubRegsSet, D31_SuperRegsSet },
- { "D4", D4_AliasSet, D4_SubRegsSet, D4_SuperRegsSet },
- { "D5", D5_AliasSet, D5_SubRegsSet, D5_SuperRegsSet },
- { "D6", D6_AliasSet, D6_SubRegsSet, D6_SuperRegsSet },
- { "D7", D7_AliasSet, D7_SubRegsSet, D7_SuperRegsSet },
- { "D8", D8_AliasSet, D8_SubRegsSet, D8_SuperRegsSet },
- { "D9", D9_AliasSet, D9_SubRegsSet, D9_SuperRegsSet },
- { "FPSCR", FPSCR_AliasSet, FPSCR_SubRegsSet, FPSCR_SuperRegsSet },
- { "LR", LR_AliasSet, LR_SubRegsSet, LR_SuperRegsSet },
- { "PC", PC_AliasSet, PC_SubRegsSet, PC_SuperRegsSet },
- { "Q0", Q0_AliasSet, Q0_SubRegsSet, Q0_SuperRegsSet },
- { "Q1", Q1_AliasSet, Q1_SubRegsSet, Q1_SuperRegsSet },
- { "Q10", Q10_AliasSet, Q10_SubRegsSet, Q10_SuperRegsSet },
- { "Q11", Q11_AliasSet, Q11_SubRegsSet, Q11_SuperRegsSet },
- { "Q12", Q12_AliasSet, Q12_SubRegsSet, Q12_SuperRegsSet },
- { "Q13", Q13_AliasSet, Q13_SubRegsSet, Q13_SuperRegsSet },
- { "Q14", Q14_AliasSet, Q14_SubRegsSet, Q14_SuperRegsSet },
- { "Q15", Q15_AliasSet, Q15_SubRegsSet, Q15_SuperRegsSet },
- { "Q2", Q2_AliasSet, Q2_SubRegsSet, Q2_SuperRegsSet },
- { "Q3", Q3_AliasSet, Q3_SubRegsSet, Q3_SuperRegsSet },
- { "Q4", Q4_AliasSet, Q4_SubRegsSet, Q4_SuperRegsSet },
- { "Q5", Q5_AliasSet, Q5_SubRegsSet, Q5_SuperRegsSet },
- { "Q6", Q6_AliasSet, Q6_SubRegsSet, Q6_SuperRegsSet },
- { "Q7", Q7_AliasSet, Q7_SubRegsSet, Q7_SuperRegsSet },
- { "Q8", Q8_AliasSet, Q8_SubRegsSet, Q8_SuperRegsSet },
- { "Q9", Q9_AliasSet, Q9_SubRegsSet, Q9_SuperRegsSet },
- { "R0", R0_AliasSet, R0_SubRegsSet, R0_SuperRegsSet },
- { "R1", R1_AliasSet, R1_SubRegsSet, R1_SuperRegsSet },
- { "R10", R10_AliasSet, R10_SubRegsSet, R10_SuperRegsSet },
- { "R11", R11_AliasSet, R11_SubRegsSet, R11_SuperRegsSet },
- { "R12", R12_AliasSet, R12_SubRegsSet, R12_SuperRegsSet },
- { "R2", R2_AliasSet, R2_SubRegsSet, R2_SuperRegsSet },
- { "R3", R3_AliasSet, R3_SubRegsSet, R3_SuperRegsSet },
- { "R4", R4_AliasSet, R4_SubRegsSet, R4_SuperRegsSet },
- { "R5", R5_AliasSet, R5_SubRegsSet, R5_SuperRegsSet },
- { "R6", R6_AliasSet, R6_SubRegsSet, R6_SuperRegsSet },
- { "R7", R7_AliasSet, R7_SubRegsSet, R7_SuperRegsSet },
- { "R8", R8_AliasSet, R8_SubRegsSet, R8_SuperRegsSet },
- { "R9", R9_AliasSet, R9_SubRegsSet, R9_SuperRegsSet },
- { "S0", S0_AliasSet, S0_SubRegsSet, S0_SuperRegsSet },
- { "S1", S1_AliasSet, S1_SubRegsSet, S1_SuperRegsSet },
- { "S10", S10_AliasSet, S10_SubRegsSet, S10_SuperRegsSet },
- { "S11", S11_AliasSet, S11_SubRegsSet, S11_SuperRegsSet },
- { "S12", S12_AliasSet, S12_SubRegsSet, S12_SuperRegsSet },
- { "S13", S13_AliasSet, S13_SubRegsSet, S13_SuperRegsSet },
- { "S14", S14_AliasSet, S14_SubRegsSet, S14_SuperRegsSet },
- { "S15", S15_AliasSet, S15_SubRegsSet, S15_SuperRegsSet },
- { "S16", S16_AliasSet, S16_SubRegsSet, S16_SuperRegsSet },
- { "S17", S17_AliasSet, S17_SubRegsSet, S17_SuperRegsSet },
- { "S18", S18_AliasSet, S18_SubRegsSet, S18_SuperRegsSet },
- { "S19", S19_AliasSet, S19_SubRegsSet, S19_SuperRegsSet },
- { "S2", S2_AliasSet, S2_SubRegsSet, S2_SuperRegsSet },
- { "S20", S20_AliasSet, S20_SubRegsSet, S20_SuperRegsSet },
- { "S21", S21_AliasSet, S21_SubRegsSet, S21_SuperRegsSet },
- { "S22", S22_AliasSet, S22_SubRegsSet, S22_SuperRegsSet },
- { "S23", S23_AliasSet, S23_SubRegsSet, S23_SuperRegsSet },
- { "S24", S24_AliasSet, S24_SubRegsSet, S24_SuperRegsSet },
- { "S25", S25_AliasSet, S25_SubRegsSet, S25_SuperRegsSet },
- { "S26", S26_AliasSet, S26_SubRegsSet, S26_SuperRegsSet },
- { "S27", S27_AliasSet, S27_SubRegsSet, S27_SuperRegsSet },
- { "S28", S28_AliasSet, S28_SubRegsSet, S28_SuperRegsSet },
- { "S29", S29_AliasSet, S29_SubRegsSet, S29_SuperRegsSet },
- { "S3", S3_AliasSet, S3_SubRegsSet, S3_SuperRegsSet },
- { "S30", S30_AliasSet, S30_SubRegsSet, S30_SuperRegsSet },
- { "S31", S31_AliasSet, S31_SubRegsSet, S31_SuperRegsSet },
- { "S4", S4_AliasSet, S4_SubRegsSet, S4_SuperRegsSet },
- { "S5", S5_AliasSet, S5_SubRegsSet, S5_SuperRegsSet },
- { "S6", S6_AliasSet, S6_SubRegsSet, S6_SuperRegsSet },
- { "S7", S7_AliasSet, S7_SubRegsSet, S7_SuperRegsSet },
- { "S8", S8_AliasSet, S8_SubRegsSet, S8_SuperRegsSet },
- { "S9", S9_AliasSet, S9_SubRegsSet, S9_SuperRegsSet },
- { "SDummy", SDummy_AliasSet, SDummy_SubRegsSet, SDummy_SuperRegsSet },
- { "SP", SP_AliasSet, SP_SubRegsSet, SP_SuperRegsSet },
- };
-}
-
-unsigned ARMGenRegisterInfo::getSubReg(unsigned RegNo, unsigned Index) const {
- switch (RegNo) {
- default:
- return 0;
- case ARM::D0:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S0;
- case 2: return ARM::S1;
- };
- break;
- case ARM::D1:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S2;
- case 2: return ARM::S3;
- };
- break;
- case ARM::D2:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S4;
- case 2: return ARM::S5;
- };
- break;
- case ARM::D3:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S6;
- case 2: return ARM::S7;
- };
- break;
- case ARM::D4:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S8;
- case 2: return ARM::S9;
- };
- break;
- case ARM::D5:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S10;
- case 2: return ARM::S11;
- };
- break;
- case ARM::D6:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S12;
- case 2: return ARM::S13;
- };
- break;
- case ARM::D7:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S14;
- case 2: return ARM::S15;
- };
- break;
- case ARM::D8:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S16;
- case 2: return ARM::S17;
- };
- break;
- case ARM::D9:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S18;
- case 2: return ARM::S19;
- };
- break;
- case ARM::D10:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S20;
- case 2: return ARM::S21;
- };
- break;
- case ARM::D11:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S22;
- case 2: return ARM::S23;
- };
- break;
- case ARM::D12:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S24;
- case 2: return ARM::S25;
- };
- break;
- case ARM::D13:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S26;
- case 2: return ARM::S27;
- };
- break;
- case ARM::D14:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S28;
- case 2: return ARM::S29;
- };
- break;
- case ARM::D15:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S30;
- case 2: return ARM::S31;
- };
- break;
- case ARM::Q0:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S0;
- case 2: return ARM::S1;
- case 3: return ARM::S2;
- case 4: return ARM::S3;
- case 5: return ARM::D0;
- case 6: return ARM::D1;
- };
- break;
- case ARM::Q1:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S4;
- case 2: return ARM::S5;
- case 3: return ARM::S6;
- case 4: return ARM::S7;
- case 5: return ARM::D2;
- case 6: return ARM::D3;
- };
- break;
- case ARM::Q2:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S8;
- case 2: return ARM::S9;
- case 3: return ARM::S10;
- case 4: return ARM::S11;
- case 5: return ARM::D4;
- case 6: return ARM::D5;
- };
- break;
- case ARM::Q3:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S12;
- case 2: return ARM::S13;
- case 3: return ARM::S14;
- case 4: return ARM::S15;
- case 5: return ARM::D6;
- case 6: return ARM::D7;
- };
- break;
- case ARM::Q4:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S16;
- case 2: return ARM::S17;
- case 3: return ARM::S18;
- case 4: return ARM::S19;
- case 5: return ARM::D8;
- case 6: return ARM::D9;
- };
- break;
- case ARM::Q5:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S20;
- case 2: return ARM::S21;
- case 3: return ARM::S22;
- case 4: return ARM::S23;
- case 5: return ARM::D10;
- case 6: return ARM::D11;
- };
- break;
- case ARM::Q6:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S24;
- case 2: return ARM::S25;
- case 3: return ARM::S26;
- case 4: return ARM::S27;
- case 5: return ARM::D12;
- case 6: return ARM::D13;
- };
- break;
- case ARM::Q7:
- switch (Index) {
- default: return 0;
- case 1: return ARM::S28;
- case 2: return ARM::S29;
- case 3: return ARM::S30;
- case 4: return ARM::S31;
- case 5: return ARM::D14;
- case 6: return ARM::D15;
- };
- break;
- case ARM::Q8:
- switch (Index) {
- default: return 0;
- case 5: return ARM::D16;
- case 6: return ARM::D17;
- };
- break;
- case ARM::Q9:
- switch (Index) {
- default: return 0;
- case 5: return ARM::D18;
- case 6: return ARM::D19;
- };
- break;
- case ARM::Q10:
- switch (Index) {
- default: return 0;
- case 5: return ARM::D20;
- case 6: return ARM::D21;
- };
- break;
- case ARM::Q11:
- switch (Index) {
- default: return 0;
- case 5: return ARM::D22;
- case 6: return ARM::D23;
- };
- break;
- case ARM::Q12:
- switch (Index) {
- default: return 0;
- case 5: return ARM::D24;
- case 6: return ARM::D25;
- };
- break;
- case ARM::Q13:
- switch (Index) {
- default: return 0;
- case 5: return ARM::D26;
- case 6: return ARM::D27;
- };
- break;
- case ARM::Q14:
- switch (Index) {
- default: return 0;
- case 5: return ARM::D28;
- case 6: return ARM::D29;
- };
- break;
- case ARM::Q15:
- switch (Index) {
- default: return 0;
- case 5: return ARM::D30;
- case 6: return ARM::D31;
- };
- break;
- };
- return 0;
-}
-
-unsigned ARMGenRegisterInfo::getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const {
- switch (RegNo) {
- default:
- return 0;
- case ARM::D0:
- if (SubRegNo == ARM::S0) return 1;
- if (SubRegNo == ARM::S1) return 2;
- return 0;
- case ARM::D1:
- if (SubRegNo == ARM::S2) return 1;
- if (SubRegNo == ARM::S3) return 2;
- return 0;
- case ARM::D2:
- if (SubRegNo == ARM::S4) return 1;
- if (SubRegNo == ARM::S5) return 2;
- return 0;
- case ARM::D3:
- if (SubRegNo == ARM::S6) return 1;
- if (SubRegNo == ARM::S7) return 2;
- return 0;
- case ARM::D4:
- if (SubRegNo == ARM::S8) return 1;
- if (SubRegNo == ARM::S9) return 2;
- return 0;
- case ARM::D5:
- if (SubRegNo == ARM::S10) return 1;
- if (SubRegNo == ARM::S11) return 2;
- return 0;
- case ARM::D6:
- if (SubRegNo == ARM::S12) return 1;
- if (SubRegNo == ARM::S13) return 2;
- return 0;
- case ARM::D7:
- if (SubRegNo == ARM::S14) return 1;
- if (SubRegNo == ARM::S15) return 2;
- return 0;
- case ARM::D8:
- if (SubRegNo == ARM::S16) return 1;
- if (SubRegNo == ARM::S17) return 2;
- return 0;
- case ARM::D9:
- if (SubRegNo == ARM::S18) return 1;
- if (SubRegNo == ARM::S19) return 2;
- return 0;
- case ARM::D10:
- if (SubRegNo == ARM::S20) return 1;
- if (SubRegNo == ARM::S21) return 2;
- return 0;
- case ARM::D11:
- if (SubRegNo == ARM::S22) return 1;
- if (SubRegNo == ARM::S23) return 2;
- return 0;
- case ARM::D12:
- if (SubRegNo == ARM::S24) return 1;
- if (SubRegNo == ARM::S25) return 2;
- return 0;
- case ARM::D13:
- if (SubRegNo == ARM::S26) return 1;
- if (SubRegNo == ARM::S27) return 2;
- return 0;
- case ARM::D14:
- if (SubRegNo == ARM::S28) return 1;
- if (SubRegNo == ARM::S29) return 2;
- return 0;
- case ARM::D15:
- if (SubRegNo == ARM::S30) return 1;
- if (SubRegNo == ARM::S31) return 2;
- return 0;
- case ARM::Q0:
- if (SubRegNo == ARM::S0) return 1;
- if (SubRegNo == ARM::S1) return 2;
- if (SubRegNo == ARM::S2) return 3;
- if (SubRegNo == ARM::S3) return 4;
- if (SubRegNo == ARM::D0) return 5;
- if (SubRegNo == ARM::D1) return 6;
- return 0;
- case ARM::Q1:
- if (SubRegNo == ARM::S4) return 1;
- if (SubRegNo == ARM::S5) return 2;
- if (SubRegNo == ARM::S6) return 3;
- if (SubRegNo == ARM::S7) return 4;
- if (SubRegNo == ARM::D2) return 5;
- if (SubRegNo == ARM::D3) return 6;
- return 0;
- case ARM::Q2:
- if (SubRegNo == ARM::S8) return 1;
- if (SubRegNo == ARM::S9) return 2;
- if (SubRegNo == ARM::S10) return 3;
- if (SubRegNo == ARM::S11) return 4;
- if (SubRegNo == ARM::D4) return 5;
- if (SubRegNo == ARM::D5) return 6;
- return 0;
- case ARM::Q3:
- if (SubRegNo == ARM::S12) return 1;
- if (SubRegNo == ARM::S13) return 2;
- if (SubRegNo == ARM::S14) return 3;
- if (SubRegNo == ARM::S15) return 4;
- if (SubRegNo == ARM::D6) return 5;
- if (SubRegNo == ARM::D7) return 6;
- return 0;
- case ARM::Q4:
- if (SubRegNo == ARM::S16) return 1;
- if (SubRegNo == ARM::S17) return 2;
- if (SubRegNo == ARM::S18) return 3;
- if (SubRegNo == ARM::S19) return 4;
- if (SubRegNo == ARM::D8) return 5;
- if (SubRegNo == ARM::D9) return 6;
- return 0;
- case ARM::Q5:
- if (SubRegNo == ARM::S20) return 1;
- if (SubRegNo == ARM::S21) return 2;
- if (SubRegNo == ARM::S22) return 3;
- if (SubRegNo == ARM::S23) return 4;
- if (SubRegNo == ARM::D10) return 5;
- if (SubRegNo == ARM::D11) return 6;
- return 0;
- case ARM::Q6:
- if (SubRegNo == ARM::S24) return 1;
- if (SubRegNo == ARM::S25) return 2;
- if (SubRegNo == ARM::S26) return 3;
- if (SubRegNo == ARM::S27) return 4;
- if (SubRegNo == ARM::D12) return 5;
- if (SubRegNo == ARM::D13) return 6;
- return 0;
- case ARM::Q7:
- if (SubRegNo == ARM::S28) return 1;
- if (SubRegNo == ARM::S29) return 2;
- if (SubRegNo == ARM::S30) return 3;
- if (SubRegNo == ARM::S31) return 4;
- if (SubRegNo == ARM::D14) return 5;
- if (SubRegNo == ARM::D15) return 6;
- return 0;
- case ARM::Q8:
- if (SubRegNo == ARM::D16) return 5;
- if (SubRegNo == ARM::D17) return 6;
- return 0;
- case ARM::Q9:
- if (SubRegNo == ARM::D18) return 5;
- if (SubRegNo == ARM::D19) return 6;
- return 0;
- case ARM::Q10:
- if (SubRegNo == ARM::D20) return 5;
- if (SubRegNo == ARM::D21) return 6;
- return 0;
- case ARM::Q11:
- if (SubRegNo == ARM::D22) return 5;
- if (SubRegNo == ARM::D23) return 6;
- return 0;
- case ARM::Q12:
- if (SubRegNo == ARM::D24) return 5;
- if (SubRegNo == ARM::D25) return 6;
- return 0;
- case ARM::Q13:
- if (SubRegNo == ARM::D26) return 5;
- if (SubRegNo == ARM::D27) return 6;
- return 0;
- case ARM::Q14:
- if (SubRegNo == ARM::D28) return 5;
- if (SubRegNo == ARM::D29) return 6;
- return 0;
- case ARM::Q15:
- if (SubRegNo == ARM::D30) return 5;
- if (SubRegNo == ARM::D31) return 6;
- return 0;
- };
- return 0;
-}
-
-ARMGenRegisterInfo::ARMGenRegisterInfo(int CallFrameSetupOpcode, int CallFrameDestroyOpcode)
- : TargetRegisterInfo(RegisterDescriptors, 100, RegisterClasses, RegisterClasses+12,
- CallFrameSetupOpcode, CallFrameDestroyOpcode,
- SubregHashTable, SubregHashTableSize,
- SuperregHashTable, SuperregHashTableSize,
- AliasesHashTable, AliasesHashTableSize) {
-}
-
-int ARMGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) const {
- switch (Flavour) {
- default:
- assert(0 && "Unknown DWARF flavour");
- return -1;
- case 0:
- switch (RegNum) {
- default:
- assert(0 && "Invalid RegNum");
- return -1;
- case ARM::CPSR:
- return -1;
- case ARM::D0:
- return -1;
- case ARM::D1:
- return -1;
- case ARM::D10:
- return -1;
- case ARM::D11:
- return -1;
- case ARM::D12:
- return -1;
- case ARM::D13:
- return -1;
- case ARM::D14:
- return -1;
- case ARM::D15:
- return -1;
- case ARM::D16:
- return -1;
- case ARM::D17:
- return -1;
- case ARM::D18:
- return -1;
- case ARM::D19:
- return -1;
- case ARM::D2:
- return -1;
- case ARM::D20:
- return -1;
- case ARM::D21:
- return -1;
- case ARM::D22:
- return -1;
- case ARM::D23:
- return -1;
- case ARM::D24:
- return -1;
- case ARM::D25:
- return -1;
- case ARM::D26:
- return -1;
- case ARM::D27:
- return -1;
- case ARM::D28:
- return -1;
- case ARM::D29:
- return -1;
- case ARM::D3:
- return -1;
- case ARM::D30:
- return -1;
- case ARM::D31:
- return -1;
- case ARM::D4:
- return -1;
- case ARM::D5:
- return -1;
- case ARM::D6:
- return -1;
- case ARM::D7:
- return -1;
- case ARM::D8:
- return -1;
- case ARM::D9:
- return -1;
- case ARM::FPSCR:
- return -1;
- case ARM::LR:
- return 14;
- case ARM::PC:
- return 15;
- case ARM::Q0:
- return -1;
- case ARM::Q1:
- return -1;
- case ARM::Q10:
- return -1;
- case ARM::Q11:
- return -1;
- case ARM::Q12:
- return -1;
- case ARM::Q13:
- return -1;
- case ARM::Q14:
- return -1;
- case ARM::Q15:
- return -1;
- case ARM::Q2:
- return -1;
- case ARM::Q3:
- return -1;
- case ARM::Q4:
- return -1;
- case ARM::Q5:
- return -1;
- case ARM::Q6:
- return -1;
- case ARM::Q7:
- return -1;
- case ARM::Q8:
- return -1;
- case ARM::Q9:
- return -1;
- case ARM::R0:
- return 0;
- case ARM::R1:
- return 1;
- case ARM::R10:
- return 10;
- case ARM::R11:
- return 11;
- case ARM::R12:
- return 12;
- case ARM::R2:
- return 2;
- case ARM::R3:
- return 3;
- case ARM::R4:
- return 4;
- case ARM::R5:
- return 5;
- case ARM::R6:
- return 6;
- case ARM::R7:
- return 7;
- case ARM::R8:
- return 8;
- case ARM::R9:
- return 9;
- case ARM::S0:
- return -1;
- case ARM::S1:
- return -1;
- case ARM::S10:
- return -1;
- case ARM::S11:
- return -1;
- case ARM::S12:
- return -1;
- case ARM::S13:
- return -1;
- case ARM::S14:
- return -1;
- case ARM::S15:
- return -1;
- case ARM::S16:
- return -1;
- case ARM::S17:
- return -1;
- case ARM::S18:
- return -1;
- case ARM::S19:
- return -1;
- case ARM::S2:
- return -1;
- case ARM::S20:
- return -1;
- case ARM::S21:
- return -1;
- case ARM::S22:
- return -1;
- case ARM::S23:
- return -1;
- case ARM::S24:
- return -1;
- case ARM::S25:
- return -1;
- case ARM::S26:
- return -1;
- case ARM::S27:
- return -1;
- case ARM::S28:
- return -1;
- case ARM::S29:
- return -1;
- case ARM::S3:
- return -1;
- case ARM::S30:
- return -1;
- case ARM::S31:
- return -1;
- case ARM::S4:
- return -1;
- case ARM::S5:
- return -1;
- case ARM::S6:
- return -1;
- case ARM::S7:
- return -1;
- case ARM::S8:
- return -1;
- case ARM::S9:
- return -1;
- case ARM::SDummy:
- return -1;
- case ARM::SP:
- return 13;
- };
- };
-}
-
-} // End llvm namespace
diff --git a/libclamav/c++/ARMGenRegisterNames.inc b/libclamav/c++/ARMGenRegisterNames.inc
deleted file mode 100644
index 17ad868..0000000
--- a/libclamav/c++/ARMGenRegisterNames.inc
+++ /dev/null
@@ -1,116 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// Target Register Enum Values
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-namespace llvm {
-
-namespace ARM {
- enum {
- NoRegister,
- CPSR, // 1
- D0, // 2
- D1, // 3
- D10, // 4
- D11, // 5
- D12, // 6
- D13, // 7
- D14, // 8
- D15, // 9
- D16, // 10
- D17, // 11
- D18, // 12
- D19, // 13
- D2, // 14
- D20, // 15
- D21, // 16
- D22, // 17
- D23, // 18
- D24, // 19
- D25, // 20
- D26, // 21
- D27, // 22
- D28, // 23
- D29, // 24
- D3, // 25
- D30, // 26
- D31, // 27
- D4, // 28
- D5, // 29
- D6, // 30
- D7, // 31
- D8, // 32
- D9, // 33
- FPSCR, // 34
- LR, // 35
- PC, // 36
- Q0, // 37
- Q1, // 38
- Q10, // 39
- Q11, // 40
- Q12, // 41
- Q13, // 42
- Q14, // 43
- Q15, // 44
- Q2, // 45
- Q3, // 46
- Q4, // 47
- Q5, // 48
- Q6, // 49
- Q7, // 50
- Q8, // 51
- Q9, // 52
- R0, // 53
- R1, // 54
- R10, // 55
- R11, // 56
- R12, // 57
- R2, // 58
- R3, // 59
- R4, // 60
- R5, // 61
- R6, // 62
- R7, // 63
- R8, // 64
- R9, // 65
- S0, // 66
- S1, // 67
- S10, // 68
- S11, // 69
- S12, // 70
- S13, // 71
- S14, // 72
- S15, // 73
- S16, // 74
- S17, // 75
- S18, // 76
- S19, // 77
- S2, // 78
- S20, // 79
- S21, // 80
- S22, // 81
- S23, // 82
- S24, // 83
- S25, // 84
- S26, // 85
- S27, // 86
- S28, // 87
- S29, // 88
- S3, // 89
- S30, // 90
- S31, // 91
- S4, // 92
- S5, // 93
- S6, // 94
- S7, // 95
- S8, // 96
- S9, // 97
- SDummy, // 98
- SP, // 99
- NUM_TARGET_REGS // 100
- };
-}
-} // End llvm namespace
diff --git a/libclamav/c++/ARMGenSubtarget.inc b/libclamav/c++/ARMGenSubtarget.inc
deleted file mode 100644
index c70c950..0000000
--- a/libclamav/c++/ARMGenSubtarget.inc
+++ /dev/null
@@ -1,703 +0,0 @@
-//===- TableGen'erated file -------------------------------------*- C++ -*-===//
-//
-// Subtarget Enumeration Source Fragment
-//
-// Automatically generated file, do not edit!
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/SubtargetFeature.h"
-#include "llvm/Target/TargetInstrItineraries.h"
-
-enum {
- FU_Issue = 1 << 0,
- FU_LdSt0 = 1 << 1,
- FU_LdSt1 = 1 << 2,
- FU_NLSPipe = 1 << 3,
- FU_NPipe = 1 << 4,
- FU_Pipe0 = 1 << 5,
- FU_Pipe1 = 1 << 6
-};
-
-enum {
- ArchV4T = 1 << 0,
- ArchV5T = 1 << 1,
- ArchV5TE = 1 << 2,
- ArchV6 = 1 << 3,
- ArchV6T2 = 1 << 4,
- ArchV7A = 1 << 5,
- FeatureNEON = 1 << 6,
- FeatureThumb2 = 1 << 7,
- FeatureVFP2 = 1 << 8,
- FeatureVFP3 = 1 << 9
-};
-
-// Sorted (by key) array of values for CPU features.
-static const llvm::SubtargetFeatureKV FeatureKV[] = {
- { "neon", "Enable NEON instructions", FeatureNEON, 0 },
- { "thumb2", "Enable Thumb2 instructions", FeatureThumb2, 0 },
- { "v4t", "ARM v4T", ArchV4T, 0 },
- { "v5t", "ARM v5T", ArchV5T, 0 },
- { "v5te", "ARM v5TE, v5TEj, v5TExp", ArchV5TE, 0 },
- { "v6", "ARM v6", ArchV6, 0 },
- { "v6t2", "ARM v6t2", ArchV6T2, 0 },
- { "v7a", "ARM v7A", ArchV7A, 0 },
- { "vfp2", "Enable VFP2 instructions", FeatureVFP2, 0 },
- { "vfp3", "Enable VFP3 instructions", FeatureVFP3, 0 }
-};
-
-enum {
- FeatureKVSize = sizeof(FeatureKV)/sizeof(llvm::SubtargetFeatureKV)
-};
-
-// Sorted (by key) array of values for CPU subtype.
-static const llvm::SubtargetFeatureKV SubTypeKV[] = {
- { "arm1020e", "Select the arm1020e processor", ArchV5TE, 0 },
- { "arm1020t", "Select the arm1020t processor", ArchV5T, 0 },
- { "arm1022e", "Select the arm1022e processor", ArchV5TE, 0 },
- { "arm10e", "Select the arm10e processor", ArchV5TE, 0 },
- { "arm10tdmi", "Select the arm10tdmi processor", ArchV5T, 0 },
- { "arm1136j-s", "Select the arm1136j-s processor", ArchV6, 0 },
- { "arm1136jf-s", "Select the arm1136jf-s processor", ArchV6 | FeatureVFP2, 0 },
- { "arm1156t2-s", "Select the arm1156t2-s processor", ArchV6T2 | FeatureThumb2, 0 },
- { "arm1156t2f-s", "Select the arm1156t2f-s processor", ArchV6T2 | FeatureThumb2 | FeatureVFP2, 0 },
- { "arm1176jz-s", "Select the arm1176jz-s processor", ArchV6, 0 },
- { "arm1176jzf-s", "Select the arm1176jzf-s processor", ArchV6 | FeatureVFP2, 0 },
- { "arm710t", "Select the arm710t processor", ArchV4T, 0 },
- { "arm720t", "Select the arm720t processor", ArchV4T, 0 },
- { "arm7tdmi", "Select the arm7tdmi processor", ArchV4T, 0 },
- { "arm7tdmi-s", "Select the arm7tdmi-s processor", ArchV4T, 0 },
- { "arm8", "Select the arm8 processor", 0, 0 },
- { "arm810", "Select the arm810 processor", 0, 0 },
- { "arm9", "Select the arm9 processor", ArchV4T, 0 },
- { "arm920", "Select the arm920 processor", ArchV4T, 0 },
- { "arm920t", "Select the arm920t processor", ArchV4T, 0 },
- { "arm922t", "Select the arm922t processor", ArchV4T, 0 },
- { "arm926ej-s", "Select the arm926ej-s processor", ArchV5TE, 0 },
- { "arm940t", "Select the arm940t processor", ArchV4T, 0 },
- { "arm946e-s", "Select the arm946e-s processor", ArchV5TE, 0 },
- { "arm966e-s", "Select the arm966e-s processor", ArchV5TE, 0 },
- { "arm968e-s", "Select the arm968e-s processor", ArchV5TE, 0 },
- { "arm9e", "Select the arm9e processor", ArchV5TE, 0 },
- { "arm9tdmi", "Select the arm9tdmi processor", ArchV4T, 0 },
- { "cortex-a8", "Select the cortex-a8 processor", ArchV7A | FeatureThumb2 | FeatureNEON, 0 },
- { "cortex-a9", "Select the cortex-a9 processor", ArchV7A | FeatureThumb2 | FeatureNEON, 0 },
- { "ep9312", "Select the ep9312 processor", ArchV4T, 0 },
- { "generic", "Select the generic processor", 0, 0 },
- { "iwmmxt", "Select the iwmmxt processor", ArchV5TE, 0 },
- { "mpcore", "Select the mpcore processor", ArchV6 | FeatureVFP2, 0 },
- { "mpcorenovfp", "Select the mpcorenovfp processor", ArchV6, 0 },
- { "strongarm", "Select the strongarm processor", 0, 0 },
- { "strongarm110", "Select the strongarm110 processor", 0, 0 },
- { "strongarm1100", "Select the strongarm1100 processor", 0, 0 },
- { "strongarm1110", "Select the strongarm1110 processor", 0, 0 },
- { "xscale", "Select the xscale processor", ArchV5TE, 0 }
-};
-
-enum {
- SubTypeKVSize = sizeof(SubTypeKV)/sizeof(llvm::SubtargetFeatureKV)
-};
-
-
-enum {
- ItinClassesSize = 129
-};
-static const llvm::InstrStage Stages[] = {
- { 0, 0, 0 }, // No itinerary
- { 1, FU_Pipe0, -1 }, // 1
- { 2, FU_Pipe0, -1 }, // 2
- { 3, FU_Pipe0, -1 }, // 3
- { 15, FU_Pipe0, -1 }, // 4
- { 29, FU_Pipe0, -1 }, // 5
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, // 6
- { 1, FU_Pipe1, 0 }, { 2, FU_Pipe0, -1 }, // 7
- { 2, FU_Pipe1, 0 }, { 3, FU_Pipe0, -1 }, // 8
- { 1, FU_Issue, 0 }, { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_LdSt0, -1 }, // 9
- { 2, FU_Issue, 0 }, { 1, FU_Pipe0, 0 }, { 1, FU_Pipe1, -1 }, { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_LdSt0, -1 }, // 10
- { 2, FU_Issue, 0 }, { 2, FU_Pipe0, 0 }, { 2, FU_Pipe1, -1 }, { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_LdSt0, -1 }, // 11
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_NLSPipe, -1 }, // 12
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_NPipe, -1 }, // 13
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 4, FU_NPipe, 0 }, { 4, FU_NLSPipe, -1 }, // 14
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 7, FU_NPipe, 0 }, { 7, FU_NLSPipe, -1 }, // 15
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 5, FU_NPipe, 0 }, { 5, FU_NLSPipe, -1 }, // 16
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 8, FU_NPipe, 0 }, { 8, FU_NLSPipe, -1 }, // 17
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 9, FU_NPipe, 0 }, { 9, FU_NLSPipe, -1 }, // 18
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 11, FU_NPipe, 0 }, { 11, FU_NLSPipe, -1 }, // 19
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 19, FU_NPipe, 0 }, { 19, FU_NLSPipe, -1 }, // 20
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 20, FU_NPipe, 0 }, { 20, FU_NLSPipe, -1 }, // 21
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 29, FU_NPipe, 0 }, { 29, FU_NLSPipe, -1 }, // 22
- { 1, FU_Issue, 0 }, { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_LdSt0, 0 }, { 1, FU_NLSPipe, -1 }, // 23
- { 2, FU_Issue, 0 }, { 1, FU_Pipe0, 0 }, { 1, FU_Pipe1, -1 }, { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_LdSt0, 0 }, { 1, FU_NLSPipe, -1 }, // 24
- { 3, FU_Issue, 0 }, { 2, FU_Pipe0, 0 }, { 2, FU_Pipe1, -1 }, { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_LdSt0, 0 }, { 1, FU_NLSPipe, -1 }, // 25
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 2, FU_NPipe, -1 }, // 26
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 2, FU_NLSPipe, -1 }, // 27
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_NLSPipe, -1 }, { 1, FU_NPipe, 0 }, { 2, FU_NLSPipe, -1 }, // 28
- { 1, FU_Pipe0 | FU_Pipe1, -1 }, { 1, FU_NPipe, -1 }, { 2, FU_NLSPipe, 0 }, { 3, FU_NPipe, -1 }, // 29
- { 0, 0, 0 } // End itinerary
-};
-static const unsigned OperandCycles[] = {
- 0, // No itinerary
- 2, 2, // 1
- 2, 2, 2, // 2
- 2, 2, 1, // 3
- 3, 3, 2, 1, // 4
- 2, 1, // 5
- 3, 2, 1, // 6
- 2, // 7
- 3, // 8
- 3, 2, // 9
- 3, 1, // 10
- 4, 2, 1, // 11
- 4, 1, 1, // 12
- 4, 1, 1, 2, // 13
- 5, 1, 1, // 14
- 5, 1, 1, 2, // 15
- 6, 1, 1, // 16
- 6, 1, 1, 2, // 17
- 4, 1, // 18
- 5, 2, 1, // 19
- 4, 2, 1, 1, // 20
- 5, 2, 2, 1, // 21
- 2, 1, 1, // 22
- 2, 2, 1, 1, // 23
- 2, 2, 2, 1, // 24
- 5, 2, // 25
- 9, 2, // 26
- 9, 2, 2, // 27
- 9, 2, 2, 2, // 28
- 20, 2, 2, // 29
- 34, 2, 2, // 30
- 5, 2, 2, // 31
- 1, // 32
- 1, 1, // 33
- 1, 1, 1, // 34
- 6, 1, 1, 4, // 35
- 6, 6, 1, 1, // 36
- 3, 1, 1, // 37
- 3, 2, 1, 1, // 38
- 4, 3, 1, 1, // 39
- 2, 3, 1, // 40
- 2, 3, 1, 1, // 41
- 3, 3, 1, 1, // 42
- 7, 1, // 43
- 5, 1, // 44
- 8, 1, // 45
- 7, 1, 1, // 46
- 9, 1, 1, // 47
- 11, 1, 1, // 48
- 7, 2, 1, 1, // 49
- 19, 2, 1, 1, // 50
- 20, 1, 1, // 51
- 29, 1, 1, // 52
- 19, 1, // 53
- 29, 1, // 54
- 2, 2, 2, 2, 1, // 55
- 6, 2, // 56
- 6, 2, 2, // 57
- 20, 1, // 58
- 20, 20, 1, // 59
- 4, 4, 1, 1, // 60
- 9, 2, 2, 3, // 61
- 10, 2, 2, 3, // 62
- 10, 2, 2, // 63
- 3, 2, 2, // 64
- 4, 2, 2, // 65
- 4, 2, // 66
- 6, 3, 2, 1, // 67
- 7, 3, 2, 1, // 68
- 7, 2, 1, // 69
- 7, 2, 2, // 70
- 9, 2, 1, // 71
- 6, 2, 2, 3, // 72
- 7, 2, 1, 3, // 73
- 7, 2, 2, 3, // 74
- 9, 2, 1, 3, // 75
- 3, 2, 2, 1, // 76
- 4, 2, 2, 3, 1, // 77
- 4, 2, 2, 3, 3, 1, // 78
- 3, 1, 2, 1, // 79
- 3, 1, 2, 2, 1, // 80
- 4, 1, 2, 2, 3, 1, // 81
- 4, 1, 2, 2, 3, 3, 1, // 82
- 0 // End itinerary
-};
-
-enum {
- StagesSize = sizeof(Stages)/sizeof(llvm::InstrStage),
- OperandCyclesSize = sizeof(OperandCycles)/sizeof(unsigned)
-};
-
-static const llvm::InstrItinerary ARMV6Itineraries[] = {
- { 1, 2, 0, 0 }, // 0
- { 0, 0, 0, 0 }, // 1
- { 0, 0, 0, 0 }, // 2
- { 0, 0, 0, 0 }, // 3
- { 0, 0, 0, 0 }, // 4
- { 0, 0, 0, 0 }, // 5
- { 0, 0, 0, 0 }, // 6
- { 0, 0, 0, 0 }, // 7
- { 0, 0, 0, 0 }, // 8
- { 0, 0, 0, 0 }, // 9
- { 0, 0, 0, 0 }, // 10
- { 0, 0, 0, 0 }, // 11
- { 0, 0, 0, 0 }, // 12
- { 0, 0, 0, 0 }, // 13
- { 0, 0, 0, 0 }, // 14
- { 0, 0, 0, 0 }, // 15
- { 0, 0, 0, 0 }, // 16
- { 0, 0, 0, 0 }, // 17
- { 0, 0, 0, 0 }, // 18
- { 0, 0, 0, 0 }, // 19
- { 0, 0, 0, 0 }, // 20
- { 0, 0, 0, 0 }, // 21
- { 0, 0, 0, 0 }, // 22
- { 0, 0, 0, 0 }, // 23
- { 0, 0, 0, 0 }, // 24
- { 0, 0, 0, 0 }, // 25
- { 0, 0, 0, 0 }, // 26
- { 0, 0, 0, 0 }, // 27
- { 0, 0, 0, 0 }, // 28
- { 0, 0, 0, 0 }, // 29
- { 0, 0, 0, 0 }, // 30
- { 0, 0, 0, 0 }, // 31
- { 0, 0, 0, 0 }, // 32
- { 0, 0, 0, 0 }, // 33
- { 0, 0, 0, 0 }, // 34
- { 0, 0, 0, 0 }, // 35
- { 0, 0, 0, 0 }, // 36
- { 0, 0, 0, 0 }, // 37
- { 0, 0, 0, 0 }, // 38
- { 0, 0, 0, 0 }, // 39
- { 0, 0, 0, 0 }, // 40
- { 0, 0, 0, 0 }, // 41
- { 0, 0, 0, 0 }, // 42
- { 0, 0, 0, 0 }, // 43
- { 0, 0, 0, 0 }, // 44
- { 0, 0, 0, 0 }, // 45
- { 0, 0, 0, 0 }, // 46
- { 0, 0, 0, 0 }, // 47
- { 0, 0, 0, 0 }, // 48
- { 0, 0, 0, 0 }, // 49
- { 0, 0, 0, 0 }, // 50
- { 0, 0, 0, 0 }, // 51
- { 0, 0, 0, 0 }, // 52
- { 0, 0, 0, 0 }, // 53
- { 0, 0, 0, 0 }, // 54
- { 0, 0, 0, 0 }, // 55
- { 0, 0, 0, 0 }, // 56
- { 0, 0, 0, 0 }, // 57
- { 0, 0, 0, 0 }, // 58
- { 0, 0, 0, 0 }, // 59
- { 0, 0, 0, 0 }, // 60
- { 1, 2, 76, 79 }, // 61
- { 1, 2, 76, 79 }, // 62
- { 1, 2, 1, 3 }, // 63
- { 1, 2, 1, 3 }, // 64
- { 1, 2, 74, 76 }, // 65
- { 1, 2, 72, 74 }, // 66
- { 1, 2, 74, 76 }, // 67
- { 1, 2, 74, 76 }, // 68
- { 1, 2, 72, 74 }, // 69
- { 1, 2, 74, 76 }, // 70
- { 4, 5, 83, 86 }, // 71
- { 5, 6, 86, 89 }, // 72
- { 1, 2, 89, 92 }, // 73
- { 1, 2, 89, 92 }, // 74
- { 3, 4, 0, 0 }, // 75
- { 1, 2, 79, 83 }, // 76
- { 2, 3, 79, 83 }, // 77
- { 1, 2, 76, 79 }, // 78
- { 2, 3, 76, 79 }, // 79
- { 4, 5, 83, 86 }, // 80
- { 5, 6, 86, 89 }, // 81
- { 1, 2, 19, 20 }, // 82
- { 1, 2, 3, 6 }, // 83
- { 1, 2, 3, 6 }, // 84
- { 3, 4, 0, 0 }, // 85
- { 1, 2, 72, 74 }, // 86
- { 1, 2, 72, 74 }, // 87
- { 1, 2, 1, 3 }, // 88
- { 1, 2, 3, 6 }, // 89
- { 1, 2, 6, 9 }, // 90
- { 2, 3, 9, 13 }, // 91
- { 1, 2, 0, 0 }, // 92
- { 1, 2, 19, 20 }, // 93
- { 1, 2, 20, 22 }, // 94
- { 1, 2, 22, 24 }, // 95
- { 1, 2, 24, 27 }, // 96
- { 1, 2, 18, 19 }, // 97
- { 1, 2, 1, 3 }, // 98
- { 1, 2, 13, 15 }, // 99
- { 2, 3, 15, 18 }, // 100
- { 1, 2, 48, 50 }, // 101
- { 1, 2, 24, 27 }, // 102
- { 3, 4, 0, 0 }, // 103
- { 1, 2, 27, 30 }, // 104
- { 1, 2, 53, 57 }, // 105
- { 2, 3, 50, 53 }, // 106
- { 2, 3, 57, 61 }, // 107
- { 1, 2, 30, 34 }, // 108
- { 2, 3, 37, 41 }, // 109
- { 3, 4, 44, 48 }, // 110
- { 1, 2, 18, 19 }, // 111
- { 1, 2, 1, 3 }, // 112
- { 1, 2, 13, 15 }, // 113
- { 2, 3, 15, 18 }, // 114
- { 1, 2, 27, 30 }, // 115
- { 2, 3, 34, 37 }, // 116
- { 3, 4, 41, 44 }, // 117
- { 1, 2, 13, 15 }, // 118
- { 1, 2, 6, 9 }, // 119
- { 3, 4, 0, 0 }, // 120
- { 1, 2, 61, 64 }, // 121
- { 1, 2, 64, 68 }, // 122
- { 2, 3, 6, 9 }, // 123
- { 2, 3, 68, 72 }, // 124
- { 1, 2, 1, 3 }, // 125
- { 1, 2, 13, 15 }, // 126
- { 2, 3, 15, 18 }, // 127
- { 0, 0, 0, 0 }, // 128
- { ~0U, ~0U, ~0U, ~0U } // end marker
-};
-
-static const llvm::InstrItinerary CortexA8Itineraries[] = {
- { 6, 7, 0, 0 }, // 0
- { 26, 28, 89, 92 }, // 1
- { 71, 73, 168, 171 }, // 2
- { 26, 28, 24, 27 }, // 3
- { 26, 28, 24, 27 }, // 4
- { 26, 28, 191, 194 }, // 5
- { 26, 28, 191, 194 }, // 6
- { 26, 28, 191, 194 }, // 7
- { 71, 73, 194, 197 }, // 8
- { 24, 26, 61, 64 }, // 9
- { 73, 75, 106, 109 }, // 10
- { 55, 59, 0, 0 }, // 11
- { 55, 59, 6, 9 }, // 12
- { 55, 59, 68, 72 }, // 13
- { 55, 59, 161, 166 }, // 14
- { 26, 28, 180, 184 }, // 15
- { 71, 73, 184, 188 }, // 16
- { 26, 28, 216, 220 }, // 17
- { 71, 73, 224, 228 }, // 18
- { 71, 73, 220, 224 }, // 19
- { 79, 83, 228, 232 }, // 20
- { 24, 26, 13, 15 }, // 21
- { 24, 26, 173, 176 }, // 22
- { 24, 26, 61, 64 }, // 23
- { 24, 26, 13, 15 }, // 24
- { 73, 75, 106, 109 }, // 25
- { 26, 28, 19, 20 }, // 26
- { 73, 75, 22, 24 }, // 27
- { 24, 26, 171, 173 }, // 28
- { 26, 28, 168, 171 }, // 29
- { 71, 73, 210, 213 }, // 30
- { 71, 73, 207, 210 }, // 31
- { 79, 83, 213, 216 }, // 32
- { 26, 28, 199, 203 }, // 33
- { 71, 73, 203, 207 }, // 34
- { 24, 26, 64, 68 }, // 35
- { 73, 75, 124, 128 }, // 36
- { 75, 79, 176, 180 }, // 37
- { 26, 28, 48, 50 }, // 38
- { 26, 28, 48, 50 }, // 39
- { 26, 28, 76, 79 }, // 40
- { 71, 73, 188, 191 }, // 41
- { 26, 28, 27, 30 }, // 42
- { 71, 73, 34, 37 }, // 43
- { 26, 28, 106, 109 }, // 44
- { 71, 73, 27, 30 }, // 45
- { 55, 59, 0, 0 }, // 46
- { 26, 28, 15, 18 }, // 47
- { 26, 28, 15, 18 }, // 48
- { 73, 75, 15, 18 }, // 49
- { 73, 75, 232, 236 }, // 50
- { 75, 79, 236, 241 }, // 51
- { 75, 79, 241, 247 }, // 52
- { 73, 75, 247, 251 }, // 53
- { 73, 75, 251, 256 }, // 54
- { 75, 79, 256, 262 }, // 55
- { 75, 79, 262, 269 }, // 56
- { 26, 28, 72, 74 }, // 57
- { 71, 73, 166, 168 }, // 58
- { 26, 28, 197, 199 }, // 59
- { 26, 28, 197, 199 }, // 60
- { 26, 28, 134, 137 }, // 61
- { 40, 43, 137, 140 }, // 62
- { 26, 28, 93, 95 }, // 63
- { 28, 31, 48, 50 }, // 64
- { 37, 40, 132, 134 }, // 65
- { 34, 37, 130, 132 }, // 66
- { 37, 40, 132, 134 }, // 67
- { 26, 28, 128, 130 }, // 68
- { 31, 34, 128, 130 }, // 69
- { 26, 28, 128, 130 }, // 70
- { 49, 52, 151, 154 }, // 71
- { 52, 55, 154, 157 }, // 72
- { 55, 59, 0, 0 }, // 73
- { 59, 65, 0, 0 }, // 74
- { 65, 71, 0, 0 }, // 75
- { 26, 28, 143, 147 }, // 76
- { 46, 49, 147, 151 }, // 77
- { 26, 28, 134, 137 }, // 78
- { 43, 46, 140, 143 }, // 79
- { 46, 49, 157, 159 }, // 80
- { 52, 55, 159, 161 }, // 81
- { 24, 26, 0, 0 }, // 82
- { 55, 59, 0, 0 }, // 83
- { 59, 65, 0, 0 }, // 84
- { 65, 71, 0, 0 }, // 85
- { 26, 28, 128, 130 }, // 86
- { 28, 31, 48, 50 }, // 87
- { 6, 7, 1, 3 }, // 88
- { 6, 7, 3, 6 }, // 89
- { 6, 7, 6, 9 }, // 90
- { 6, 7, 64, 68 }, // 91
- { 6, 7, 0, 0 }, // 92
- { 6, 7, 18, 19 }, // 93
- { 6, 7, 13, 15 }, // 94
- { 6, 7, 13, 15 }, // 95
- { 6, 7, 61, 64 }, // 96
- { 6, 7, 18, 19 }, // 97
- { 6, 7, 1, 3 }, // 98
- { 6, 7, 13, 15 }, // 99
- { 6, 7, 61, 64 }, // 100
- { 11, 14, 22, 24 }, // 101
- { 11, 14, 15, 18 }, // 102
- { 19, 24, 0, 0 }, // 103
- { 11, 14, 106, 109 }, // 104
- { 11, 14, 109, 113 }, // 105
- { 14, 19, 27, 30 }, // 106
- { 14, 19, 113, 117 }, // 107
- { 7, 9, 98, 102 }, // 108
- { 7, 9, 98, 102 }, // 109
- { 9, 11, 102, 106 }, // 110
- { 6, 7, 92, 93 }, // 111
- { 6, 7, 93, 95 }, // 112
- { 6, 7, 93, 95 }, // 113
- { 6, 7, 95, 98 }, // 114
- { 1, 2, 34, 37 }, // 115
- { 7, 9, 41, 44 }, // 116
- { 9, 11, 102, 106 }, // 117
- { 11, 14, 22, 24 }, // 118
- { 11, 14, 117, 120 }, // 119
- { 19, 24, 0, 0 }, // 120
- { 11, 14, 106, 109 }, // 121
- { 11, 14, 120, 124 }, // 122
- { 14, 19, 106, 109 }, // 123
- { 14, 19, 124, 128 }, // 124
- { 6, 7, 1, 3 }, // 125
- { 6, 7, 13, 15 }, // 126
- { 6, 7, 61, 64 }, // 127
- { 0, 0, 0, 0 }, // 128
- { ~0U, ~0U, ~0U, ~0U } // end marker
-};
-
-static const llvm::InstrItinerary GenericItineraries[] = {
- { 0, 0, 0, 0 }, // 0
- { 0, 0, 0, 0 }, // 1
- { 0, 0, 0, 0 }, // 2
- { 0, 0, 0, 0 }, // 3
- { 0, 0, 0, 0 }, // 4
- { 0, 0, 0, 0 }, // 5
- { 0, 0, 0, 0 }, // 6
- { 0, 0, 0, 0 }, // 7
- { 0, 0, 0, 0 }, // 8
- { 0, 0, 0, 0 }, // 9
- { 0, 0, 0, 0 }, // 10
- { 0, 0, 0, 0 }, // 11
- { 0, 0, 0, 0 }, // 12
- { 0, 0, 0, 0 }, // 13
- { 0, 0, 0, 0 }, // 14
- { 0, 0, 0, 0 }, // 15
- { 0, 0, 0, 0 }, // 16
- { 0, 0, 0, 0 }, // 17
- { 0, 0, 0, 0 }, // 18
- { 0, 0, 0, 0 }, // 19
- { 0, 0, 0, 0 }, // 20
- { 0, 0, 0, 0 }, // 21
- { 0, 0, 0, 0 }, // 22
- { 0, 0, 0, 0 }, // 23
- { 0, 0, 0, 0 }, // 24
- { 0, 0, 0, 0 }, // 25
- { 0, 0, 0, 0 }, // 26
- { 0, 0, 0, 0 }, // 27
- { 0, 0, 0, 0 }, // 28
- { 0, 0, 0, 0 }, // 29
- { 0, 0, 0, 0 }, // 30
- { 0, 0, 0, 0 }, // 31
- { 0, 0, 0, 0 }, // 32
- { 0, 0, 0, 0 }, // 33
- { 0, 0, 0, 0 }, // 34
- { 0, 0, 0, 0 }, // 35
- { 0, 0, 0, 0 }, // 36
- { 0, 0, 0, 0 }, // 37
- { 0, 0, 0, 0 }, // 38
- { 0, 0, 0, 0 }, // 39
- { 0, 0, 0, 0 }, // 40
- { 0, 0, 0, 0 }, // 41
- { 0, 0, 0, 0 }, // 42
- { 0, 0, 0, 0 }, // 43
- { 0, 0, 0, 0 }, // 44
- { 0, 0, 0, 0 }, // 45
- { 0, 0, 0, 0 }, // 46
- { 0, 0, 0, 0 }, // 47
- { 0, 0, 0, 0 }, // 48
- { 0, 0, 0, 0 }, // 49
- { 0, 0, 0, 0 }, // 50
- { 0, 0, 0, 0 }, // 51
- { 0, 0, 0, 0 }, // 52
- { 0, 0, 0, 0 }, // 53
- { 0, 0, 0, 0 }, // 54
- { 0, 0, 0, 0 }, // 55
- { 0, 0, 0, 0 }, // 56
- { 0, 0, 0, 0 }, // 57
- { 0, 0, 0, 0 }, // 58
- { 0, 0, 0, 0 }, // 59
- { 0, 0, 0, 0 }, // 60
- { 0, 0, 0, 0 }, // 61
- { 0, 0, 0, 0 }, // 62
- { 0, 0, 0, 0 }, // 63
- { 0, 0, 0, 0 }, // 64
- { 0, 0, 0, 0 }, // 65
- { 0, 0, 0, 0 }, // 66
- { 0, 0, 0, 0 }, // 67
- { 0, 0, 0, 0 }, // 68
- { 0, 0, 0, 0 }, // 69
- { 0, 0, 0, 0 }, // 70
- { 0, 0, 0, 0 }, // 71
- { 0, 0, 0, 0 }, // 72
- { 0, 0, 0, 0 }, // 73
- { 0, 0, 0, 0 }, // 74
- { 0, 0, 0, 0 }, // 75
- { 0, 0, 0, 0 }, // 76
- { 0, 0, 0, 0 }, // 77
- { 0, 0, 0, 0 }, // 78
- { 0, 0, 0, 0 }, // 79
- { 0, 0, 0, 0 }, // 80
- { 0, 0, 0, 0 }, // 81
- { 0, 0, 0, 0 }, // 82
- { 0, 0, 0, 0 }, // 83
- { 0, 0, 0, 0 }, // 84
- { 0, 0, 0, 0 }, // 85
- { 0, 0, 0, 0 }, // 86
- { 0, 0, 0, 0 }, // 87
- { 0, 0, 0, 0 }, // 88
- { 0, 0, 0, 0 }, // 89
- { 0, 0, 0, 0 }, // 90
- { 0, 0, 0, 0 }, // 91
- { 0, 0, 0, 0 }, // 92
- { 0, 0, 0, 0 }, // 93
- { 0, 0, 0, 0 }, // 94
- { 0, 0, 0, 0 }, // 95
- { 0, 0, 0, 0 }, // 96
- { 0, 0, 0, 0 }, // 97
- { 0, 0, 0, 0 }, // 98
- { 0, 0, 0, 0 }, // 99
- { 0, 0, 0, 0 }, // 100
- { 0, 0, 0, 0 }, // 101
- { 0, 0, 0, 0 }, // 102
- { 0, 0, 0, 0 }, // 103
- { 0, 0, 0, 0 }, // 104
- { 0, 0, 0, 0 }, // 105
- { 0, 0, 0, 0 }, // 106
- { 0, 0, 0, 0 }, // 107
- { 0, 0, 0, 0 }, // 108
- { 0, 0, 0, 0 }, // 109
- { 0, 0, 0, 0 }, // 110
- { 0, 0, 0, 0 }, // 111
- { 0, 0, 0, 0 }, // 112
- { 0, 0, 0, 0 }, // 113
- { 0, 0, 0, 0 }, // 114
- { 0, 0, 0, 0 }, // 115
- { 0, 0, 0, 0 }, // 116
- { 0, 0, 0, 0 }, // 117
- { 0, 0, 0, 0 }, // 118
- { 0, 0, 0, 0 }, // 119
- { 0, 0, 0, 0 }, // 120
- { 0, 0, 0, 0 }, // 121
- { 0, 0, 0, 0 }, // 122
- { 0, 0, 0, 0 }, // 123
- { 0, 0, 0, 0 }, // 124
- { 0, 0, 0, 0 }, // 125
- { 0, 0, 0, 0 }, // 126
- { 0, 0, 0, 0 }, // 127
- { 0, 0, 0, 0 }, // 128
- { ~0U, ~0U, ~0U, ~0U } // end marker
-};
-
-// Sorted (by key) array of itineraries for CPU subtype.
-static const llvm::SubtargetInfoKV ProcItinKV[] = {
- { "arm1020e", (void *)&GenericItineraries },
- { "arm1020t", (void *)&GenericItineraries },
- { "arm1022e", (void *)&GenericItineraries },
- { "arm10e", (void *)&GenericItineraries },
- { "arm10tdmi", (void *)&GenericItineraries },
- { "arm1136j-s", (void *)&ARMV6Itineraries },
- { "arm1136jf-s", (void *)&ARMV6Itineraries },
- { "arm1156t2-s", (void *)&ARMV6Itineraries },
- { "arm1156t2f-s", (void *)&ARMV6Itineraries },
- { "arm1176jz-s", (void *)&ARMV6Itineraries },
- { "arm1176jzf-s", (void *)&ARMV6Itineraries },
- { "arm710t", (void *)&GenericItineraries },
- { "arm720t", (void *)&GenericItineraries },
- { "arm7tdmi", (void *)&GenericItineraries },
- { "arm7tdmi-s", (void *)&GenericItineraries },
- { "arm8", (void *)&GenericItineraries },
- { "arm810", (void *)&GenericItineraries },
- { "arm9", (void *)&GenericItineraries },
- { "arm920", (void *)&GenericItineraries },
- { "arm920t", (void *)&GenericItineraries },
- { "arm922t", (void *)&GenericItineraries },
- { "arm926ej-s", (void *)&GenericItineraries },
- { "arm940t", (void *)&GenericItineraries },
- { "arm946e-s", (void *)&GenericItineraries },
- { "arm966e-s", (void *)&GenericItineraries },
- { "arm968e-s", (void *)&GenericItineraries },
- { "arm9e", (void *)&GenericItineraries },
- { "arm9tdmi", (void *)&GenericItineraries },
- { "cortex-a8", (void *)&CortexA8Itineraries },
- { "cortex-a9", (void *)&GenericItineraries },
- { "ep9312", (void *)&GenericItineraries },
- { "generic", (void *)&GenericItineraries },
- { "iwmmxt", (void *)&GenericItineraries },
- { "mpcore", (void *)&ARMV6Itineraries },
- { "mpcorenovfp", (void *)&ARMV6Itineraries },
- { "strongarm", (void *)&GenericItineraries },
- { "strongarm110", (void *)&GenericItineraries },
- { "strongarm1100", (void *)&GenericItineraries },
- { "strongarm1110", (void *)&GenericItineraries },
- { "xscale", (void *)&GenericItineraries }
-};
-
-enum {
- ProcItinKVSize = sizeof(ProcItinKV)/sizeof(llvm::SubtargetInfoKV)
-};
-
-// ParseSubtargetFeatures - Parses features string setting specified
-// subtarget options.
-std::string llvm::ARMSubtarget::ParseSubtargetFeatures(const std::string &FS,
- const std::string &CPU) {
- DEBUG(dbgs() << "\nFeatures:" << FS);
- DEBUG(dbgs() << "\nCPU:" << CPU);
- SubtargetFeatures Features(FS);
- Features.setCPUIfNone(CPU);
- uint32_t Bits = Features.getBits(SubTypeKV, SubTypeKVSize,
- FeatureKV, FeatureKVSize);
- if ((Bits & ArchV4T) != 0 && ARMArchVersion < V4T) ARMArchVersion = V4T;
- if ((Bits & ArchV5T) != 0 && ARMArchVersion < V5T) ARMArchVersion = V5T;
- if ((Bits & ArchV5TE) != 0 && ARMArchVersion < V5TE) ARMArchVersion = V5TE;
- if ((Bits & ArchV6) != 0 && ARMArchVersion < V6) ARMArchVersion = V6;
- if ((Bits & ArchV6T2) != 0 && ARMArchVersion < V6T2) ARMArchVersion = V6T2;
- if ((Bits & ArchV7A) != 0 && ARMArchVersion < V7A) ARMArchVersion = V7A;
- if ((Bits & FeatureNEON) != 0 && ARMFPUType < NEON) ARMFPUType = NEON;
- if ((Bits & FeatureThumb2) != 0 && ThumbMode < Thumb2) ThumbMode = Thumb2;
- if ((Bits & FeatureVFP2) != 0 && ARMFPUType < VFPv2) ARMFPUType = VFPv2;
- if ((Bits & FeatureVFP3) != 0 && ARMFPUType < VFPv3) ARMFPUType = VFPv3;
-
- InstrItinerary *Itinerary = (InstrItinerary *)Features.getInfo(ProcItinKV, ProcItinKVSize);
- InstrItins = InstrItineraryData(Stages, OperandCycles, Itinerary);
- return Features.getCPU();
-}
diff --git a/libclamav/c++/ClamBCRTChecks.cpp b/libclamav/c++/ClamBCRTChecks.cpp
index 6243c0a..d3def6a 100644
--- a/libclamav/c++/ClamBCRTChecks.cpp
+++ b/libclamav/c++/ClamBCRTChecks.cpp
@@ -54,6 +54,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Support/Debug.h"
+#define LLVM28
#ifdef LLVM28
#define DEFINEPASS(passname) passname() : FunctionPass(ID)
#else
diff --git a/libclamav/c++/Makefile.am b/libclamav/c++/Makefile.am
index c3a4a3a..ec42741 100644
--- a/libclamav/c++/Makefile.am
+++ b/libclamav/c++/Makefile.am
@@ -42,7 +42,6 @@ else
AM_CPPFLAGS += $(LLVM_INCLUDES) $(LLVM_DEFS)
noinst_LTLIBRARIES = libclamavcxx.la libllvmsystem.la\
libllvmcodegen.la libllvmjit.la
-lli_LDADD=libllvmbitreader.la libllvmfullcodegen.la libllvmjit.la
libclamavcxx_la_LIBADD=libllvmjit.la
libclamavcxx_la_DEPENDENCIES=libllvmjit.la libllvmcodegen.la libllvmsystem.la
libclamavcxx_la_LDFLAGS=-no-undefined
@@ -52,21 +51,13 @@ libclamavcxx_la_CXXFLAGS = $(LLVM_CXXFLAGS)
if BUILD_X86
libclamavcxx_la_LIBADD+=libllvmx86codegen.la
libclamavcxx_la_DEPENDENCIES+=libllvmx86codegen.la
-lli_LDADD+=libllvmx86codegen.la
noinst_LTLIBRARIES+=libllvmx86codegen.la
endif
if BUILD_PPC
libclamavcxx_la_LIBADD+=libllvmpowerpccodegen.la
libclamavcxx_la_DEPENDENCIES+=libllvmpowerpccodegen.la
-lli_LDADD+=libllvmpowerpccodegen.la
noinst_LTLIBRARIES+=libllvmpowerpccodegen.la
endif
-if BUILD_ARM
-libclamavcxx_la_LIBADD+=libllvmarmcodegen.la
-libclamavcxx_la_DEPENDENCIES+=libllvmarmcodegen.la
-lli_LDADD+=libllvmarmcodegen.la
-noinst_LTLIBRARIES+=libllvmarmcodegen.la
-endif
libclamavcxx_la_LIBADD+=libllvmcodegen.la libllvmsystem.la
@@ -74,10 +65,9 @@ LLVM_CXXFLAGS=-Woverloaded-virtual -pedantic -Wno-long-long -Wall -W -Wno-unused
unittest_CXXFLAGS=@NO_VARIADIC_MACROS@ @NO_MISSING_FIELD_INITIALIZERS@ -DGTEST_HAS_TR1_TUPLE=0
TBLGENFILES=llvm/include/llvm/Intrinsics.gen X86GenRegisterInfo.h.inc X86GenRegisterNames.inc X86GenRegisterInfo.inc X86GenInstrNames.inc X86GenInstrInfo.inc\
- X86GenAsmWriter.inc X86GenAsmWriter1.inc X86GenAsmMatcher.inc X86GenDAGISel.inc X86GenFastISel.inc X86GenCallingConv.inc\
- X86GenSubtarget.inc PPCGenInstrNames.inc PPCGenRegisterNames.inc PPCGenAsmWriter.inc PPCGenCodeEmitter.inc PPCGenRegisterInfo.h.inc PPCGenRegisterInfo.inc\
- PPCGenInstrInfo.inc PPCGenDAGISel.inc PPCGenCallingConv.inc PPCGenSubtarget.inc ARMGenRegisterInfo.h.inc ARMGenRegisterNames.inc ARMGenRegisterInfo.inc ARMGenInstrNames.inc ARMGenInstrInfo.inc ARMGenCodeEmitter.inc\
- ARMGenAsmWriter.inc ARMGenDAGISel.inc ARMGenCallingConv.inc ARMGenSubtarget.inc
+ X86GenDAGISel.inc X86GenFastISel.inc X86GenCallingConv.inc\
+ X86GenSubtarget.inc PPCGenInstrNames.inc PPCGenRegisterNames.inc PPCGenCodeEmitter.inc PPCGenRegisterInfo.h.inc PPCGenRegisterInfo.inc\
+ PPCGenInstrInfo.inc PPCGenDAGISel.inc PPCGenCallingConv.inc PPCGenSubtarget.inc
BUILT_SOURCES=
EXTRA_DIST=$(top_srcdir)/llvm llvmcheck.sh $(TBLGENFILES)
@@ -101,74 +91,8 @@ libllvmsystem_la_SOURCES=\
llvm/lib/System/Signals.cpp\
llvm/lib/System/ThreadLocal.cpp\
llvm/lib/System/Threading.cpp\
- llvm/lib/System/TimeValue.cpp
-
-# support is split into 2:
-# a full llvmsupport, and another that contains only objs
-# that aren't already contained in llvmjit
-libllvmsupport_la_SOURCES=\
- llvm/lib/Support/APFloat.cpp\
- llvm/lib/Support/APInt.cpp\
- llvm/lib/Support/APSInt.cpp\
- llvm/lib/Support/Allocator.cpp\
- llvm/lib/Support/CommandLine.cpp\
- llvm/lib/Support/ConstantRange.cpp\
- llvm/lib/Support/Debug.cpp\
- llvm/lib/Support/DeltaAlgorithm.cpp\
- llvm/lib/Support/Dwarf.cpp\
- llvm/lib/Support/ErrorHandling.cpp\
- llvm/lib/Support/FileUtilities.cpp\
- llvm/lib/Support/FoldingSet.cpp\
- llvm/lib/Support/FormattedStream.cpp\
- llvm/lib/Support/GraphWriter.cpp\
- llvm/lib/Support/IsInf.cpp\
- llvm/lib/Support/IsNAN.cpp\
- llvm/lib/Support/ManagedStatic.cpp\
- llvm/lib/Support/MemoryBuffer.cpp\
- llvm/lib/Support/MemoryObject.cpp\
- llvm/lib/Support/PluginLoader.cpp\
- llvm/lib/Support/PrettyStackTrace.cpp\
- llvm/lib/Support/Regex.cpp\
- llvm/lib/Support/SlowOperationInformer.cpp\
- llvm/lib/Support/SmallPtrSet.cpp\
- llvm/lib/Support/SmallVector.cpp\
- llvm/lib/Support/SourceMgr.cpp\
- llvm/lib/Support/Statistic.cpp\
- llvm/lib/Support/StringExtras.cpp\
- llvm/lib/Support/StringMap.cpp\
- llvm/lib/Support/StringPool.cpp\
- llvm/lib/Support/StringRef.cpp\
- llvm/lib/Support/SystemUtils.cpp\
- llvm/lib/Support/TargetRegistry.cpp\
- llvm/lib/Support/Timer.cpp\
- llvm/lib/Support/Triple.cpp\
- llvm/lib/Support/Twine.cpp\
- llvm/lib/Support/circular_raw_ostream.cpp\
- llvm/lib/Support/raw_os_ostream.cpp\
- llvm/lib/Support/raw_ostream.cpp\
- llvm/lib/Support/regcomp.c\
- llvm/lib/Support/regerror.c\
- llvm/lib/Support/regexec.c\
- llvm/lib/Support/regfree.c\
- llvm/lib/Support/regstrlcpy.c
-
-libllvmsupport_nodups_la_SOURCES=\
- llvm/lib/Support/APSInt.cpp\
- llvm/lib/Support/DeltaAlgorithm.cpp\
- llvm/lib/Support/FileUtilities.cpp\
- llvm/lib/Support/IsInf.cpp\
- llvm/lib/Support/IsNAN.cpp\
- llvm/lib/Support/MemoryObject.cpp\
- llvm/lib/Support/PluginLoader.cpp\
- llvm/lib/Support/Regex.cpp\
- llvm/lib/Support/SlowOperationInformer.cpp\
- llvm/lib/Support/SystemUtils.cpp\
- llvm/lib/Support/raw_os_ostream.cpp\
- llvm/lib/Support/regcomp.c\
- llvm/lib/Support/regerror.c\
- llvm/lib/Support/regexec.c\
- llvm/lib/Support/regfree.c\
- llvm/lib/Support/regstrlcpy.c
+ llvm/lib/System/TimeValue.cpp\
+ llvm/lib/System/Valgrind.cpp
if MAINTAINER_MODE
BUILT_SOURCES+=$(TBLGENFILES)
@@ -181,38 +105,38 @@ tblgen_LDFLAGS=@THREAD_LIBS@ -Wl,--version-script, at top_srcdir@/llvm/autoconf/Exp
# since tblgen is only a maintainer-mode tool, build these files twice (once for
# libllvmsupport.la -fno-rtti, and once here, with defaults (rtti)).
tblgen_SOURCES=\
- llvm/utils/TableGen/AsmMatcherEmitter.cpp\
- llvm/utils/TableGen/AsmWriterEmitter.cpp\
- llvm/utils/TableGen/AsmWriterInst.cpp\
- llvm/utils/TableGen/CallingConvEmitter.cpp\
- llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp\
- llvm/utils/TableGen/CodeEmitterGen.cpp\
- llvm/utils/TableGen/CodeGenDAGPatterns.cpp\
- llvm/utils/TableGen/CodeGenInstruction.cpp\
- llvm/utils/TableGen/CodeGenTarget.cpp\
- llvm/utils/TableGen/DAGISelEmitter.cpp\
- llvm/utils/TableGen/DAGISelMatcher.cpp\
- llvm/utils/TableGen/DAGISelMatcherEmitter.cpp\
- llvm/utils/TableGen/DAGISelMatcherGen.cpp\
- llvm/utils/TableGen/DAGISelMatcherOpt.cpp\
- llvm/utils/TableGen/DisassemblerEmitter.cpp\
- llvm/utils/TableGen/EDEmitter.cpp\
- llvm/utils/TableGen/FastISelEmitter.cpp\
- llvm/utils/TableGen/InstrEnumEmitter.cpp\
- llvm/utils/TableGen/InstrInfoEmitter.cpp\
- llvm/utils/TableGen/IntrinsicEmitter.cpp\
- llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp\
- llvm/utils/TableGen/OptParserEmitter.cpp\
- llvm/utils/TableGen/Record.cpp\
- llvm/utils/TableGen/RegisterInfoEmitter.cpp\
- llvm/utils/TableGen/SubtargetEmitter.cpp\
- llvm/utils/TableGen/TGLexer.cpp\
- llvm/utils/TableGen/TGParser.cpp\
- llvm/utils/TableGen/TGValueTypes.cpp\
- llvm/utils/TableGen/TableGen.cpp\
- llvm/utils/TableGen/TableGenBackend.cpp\
- llvm/utils/TableGen/X86DisassemblerTables.cpp\
- llvm/utils/TableGen/X86RecognizableInstr.cpp \
+ llvm/utils/TableGen/ARMDecoderEmitter.cpp\
+ llvm/utils/TableGen/AsmMatcherEmitter.cpp\
+ llvm/utils/TableGen/AsmWriterInst.cpp\
+ llvm/utils/TableGen/CallingConvEmitter.cpp\
+ llvm/utils/TableGen/ClangASTNodesEmitter.cpp\
+ llvm/utils/TableGen/ClangAttrEmitter.cpp\
+ llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp\
+ llvm/utils/TableGen/CodeEmitterGen.cpp\
+ llvm/utils/TableGen/CodeGenDAGPatterns.cpp\
+ llvm/utils/TableGen/CodeGenInstruction.cpp\
+ llvm/utils/TableGen/CodeGenTarget.cpp\
+ llvm/utils/TableGen/DAGISelEmitter.cpp\
+ llvm/utils/TableGen/DAGISelMatcher.cpp\
+ llvm/utils/TableGen/DAGISelMatcherEmitter.cpp\
+ llvm/utils/TableGen/DAGISelMatcherGen.cpp\
+ llvm/utils/TableGen/DAGISelMatcherOpt.cpp\
+ llvm/utils/TableGen/EDEmitter.cpp\
+ llvm/utils/TableGen/FastISelEmitter.cpp\
+ llvm/utils/TableGen/InstrEnumEmitter.cpp\
+ llvm/utils/TableGen/InstrInfoEmitter.cpp\
+ llvm/utils/TableGen/IntrinsicEmitter.cpp\
+ llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp\
+ llvm/utils/TableGen/NeonEmitter.cpp\
+ llvm/utils/TableGen/OptParserEmitter.cpp\
+ llvm/utils/TableGen/Record.cpp\
+ llvm/utils/TableGen/RegisterInfoEmitter.cpp\
+ llvm/utils/TableGen/SubtargetEmitter.cpp\
+ llvm/utils/TableGen/TableGenBackend.cpp\
+ llvm/utils/TableGen/TableGen.cpp\
+ llvm/utils/TableGen/TGLexer.cpp\
+ llvm/utils/TableGen/TGParser.cpp\
+ llvm/utils/TableGen/TGValueTypes.cpp\
llvm/lib/System/Alarm.cpp\
llvm/lib/System/Atomic.cpp\
llvm/lib/System/Disassembler.cpp\
@@ -230,6 +154,7 @@ tblgen_SOURCES=\
llvm/lib/System/ThreadLocal.cpp\
llvm/lib/System/Threading.cpp\
llvm/lib/System/TimeValue.cpp\
+ llvm/lib/System/Valgrind.cpp\
llvm/lib/Support/APFloat.cpp\
llvm/lib/Support/APInt.cpp\
llvm/lib/Support/APSInt.cpp\
@@ -252,7 +177,6 @@ tblgen_SOURCES=\
llvm/lib/Support/PluginLoader.cpp\
llvm/lib/Support/PrettyStackTrace.cpp\
llvm/lib/Support/Regex.cpp\
- llvm/lib/Support/SlowOperationInformer.cpp\
llvm/lib/Support/SmallPtrSet.cpp\
llvm/lib/Support/SmallVector.cpp\
llvm/lib/Support/SourceMgr.cpp\
@@ -299,15 +223,6 @@ X86GenInstrNames.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
X86GenInstrInfo.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
$(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-instr-desc -o $@ $<
-X86GenAsmWriter.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-asm-writer -o $@ $<
-
-X86GenAsmWriter1.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-asm-writer -asmwriternum=1 -o $@ $<
-
-X86GenAsmMatcher.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-asm-matcher -o $@ $<
-
X86GenDAGISel.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
$(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-dag-isel -o $@ $<
@@ -328,9 +243,6 @@ PPCGenInstrNames.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
PPCGenRegisterNames.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
$(TBLGEN_V) $(TBLGEN_FLAGS_PPC) -gen-register-enums -o $@ $<
-PPCGenAsmWriter.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_PPC) -gen-asm-writer -o $@ $<
-
PPCGenCodeEmitter.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
$(TBLGEN_V) $(TBLGEN_FLAGS_PPC) -gen-emitter -o $@ $<
@@ -355,46 +267,24 @@ PPCGenCallingConv.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
PPCGenSubtarget.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
$(TBLGEN_V) $(TBLGEN_FLAGS_PPC) -gen-subtarget -o $@ $<
-# ARM Target
-TBLGEN_FLAGS_ARM= $(TBLGEN_FLAGS) -I$(top_srcdir)/llvm/lib/Target/ARM
-ARMGenRegisterInfo.h.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-register-desc-header -o $@ $<
-
-ARMGenRegisterNames.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-register-enums -o $@ $<
-
-ARMGenRegisterInfo.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-register-desc -o $@ $<
-
-ARMGenInstrNames.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-instr-enums -o $@ $<
-
-ARMGenInstrInfo.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-instr-desc -o $@ $<
-
-ARMGenCodeEmitter.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-emitter -o $@ $<
-
-ARMGenAsmWriter.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-asm-writer -o $@ $<
-
-ARMGenDAGISel.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-dag-isel -o $@ $<
-
-ARMGenCallingConv.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-callingconv -o $@ $<
-
-ARMGenSubtarget.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-subtarget -o $@ $<
endif
if BUILD_X86
libllvmx86codegen_la_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_builddir) -I$(top_srcdir)/llvm/lib/Target/X86
libllvmx86codegen_la_SOURCES=\
+ llvm/lib/MC/ELFObjectWriter.cpp\
llvm/lib/MC/MCAsmInfoCOFF.cpp\
llvm/lib/MC/MCCodeEmitter.cpp\
+ llvm/lib/MC/MCELFStreamer.cpp\
+ llvm/lib/MC/MCMachOStreamer.cpp\
+ llvm/lib/MC/MCObjectStreamer.cpp\
+ llvm/lib/MC/MCObjectWriter.cpp\
+ llvm/lib/MC/MachObjectWriter.cpp\
llvm/lib/MC/TargetAsmBackend.cpp\
+ llvm/lib/MC/WinCOFFObjectWriter.cpp\
+ llvm/lib/MC/WinCOFFStreamer.cpp\
llvm/lib/Target/TargetELFWriterInfo.cpp\
+ llvm/lib/Target/X86/SSEDomainFix.cpp\
llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp\
llvm/lib/Target/X86/X86AsmBackend.cpp\
llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp\
@@ -402,15 +292,14 @@ libllvmx86codegen_la_SOURCES=\
llvm/lib/Target/X86/X86ELFWriterInfo.cpp\
llvm/lib/Target/X86/X86FastISel.cpp\
llvm/lib/Target/X86/X86FloatingPoint.cpp\
- llvm/lib/Target/X86/X86FloatingPointRegKill.cpp\
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp\
llvm/lib/Target/X86/X86ISelLowering.cpp\
llvm/lib/Target/X86/X86InstrInfo.cpp\
llvm/lib/Target/X86/X86JITInfo.cpp\
llvm/lib/Target/X86/X86MCAsmInfo.cpp\
llvm/lib/Target/X86/X86MCCodeEmitter.cpp\
- llvm/lib/Target/X86/X86MCTargetExpr.cpp\
llvm/lib/Target/X86/X86RegisterInfo.cpp\
+ llvm/lib/Target/X86/X86SelectionDAGInfo.cpp\
llvm/lib/Target/X86/X86Subtarget.cpp\
llvm/lib/Target/X86/X86TargetMachine.cpp\
llvm/lib/Target/X86/X86TargetObjectFile.cpp
@@ -429,65 +318,44 @@ libllvmpowerpccodegen_la_SOURCES=\
llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp\
llvm/lib/Target/PowerPC/PPCPredicates.cpp\
llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp\
+ llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp\
llvm/lib/Target/PowerPC/PPCSubtarget.cpp\
llvm/lib/Target/PowerPC/PPCTargetMachine.cpp\
llvm/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp
endif
-if BUILD_ARM
-
-libllvmarmcodegen_la_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_builddir) -I$(top_srcdir)/llvm/lib/Target/ARM
-libllvmarmcodegen_la_SOURCES=\
- llvm/lib/CodeGen/IfConversion.cpp\
- llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp\
- llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp\
- llvm/lib/Target/ARM/ARMCodeEmitter.cpp\
- llvm/lib/Target/ARM/ARMConstantIslandPass.cpp\
- llvm/lib/Target/ARM/ARMConstantPoolValue.cpp\
- llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp\
- llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp\
- llvm/lib/Target/ARM/ARMISelLowering.cpp\
- llvm/lib/Target/ARM/ARMInstrInfo.cpp\
- llvm/lib/Target/ARM/ARMJITInfo.cpp\
- llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp\
- llvm/lib/Target/ARM/ARMMCAsmInfo.cpp\
- llvm/lib/Target/ARM/ARMRegisterInfo.cpp\
- llvm/lib/Target/ARM/ARMSubtarget.cpp\
- llvm/lib/Target/ARM/ARMTargetMachine.cpp\
- llvm/lib/Target/ARM/NEONMoveFix.cpp\
- llvm/lib/Target/ARM/NEONPreAllocPass.cpp\
- llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp\
- llvm/lib/Target/ARM/Thumb1InstrInfo.cpp\
- llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp\
- llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp\
- llvm/lib/Target/ARM/Thumb2InstrInfo.cpp\
- llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp\
- llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
-endif
-
# End of Targets
libllvmjit_la_SOURCES=\
- llvm/lib/Analysis/IPA/CallGraph.cpp\
llvm/lib/Analysis/AliasAnalysis.cpp\
+ llvm/lib/Analysis/AliasSetTracker.cpp\
llvm/lib/Analysis/BasicAliasAnalysis.cpp\
llvm/lib/Analysis/CaptureTracking.cpp\
llvm/lib/Analysis/DebugInfo.cpp\
+ llvm/lib/Analysis/IPA/CallGraph.cpp\
llvm/lib/Analysis/MemoryBuiltins.cpp\
llvm/lib/Analysis/PointerTracking.cpp\
+ llvm/lib/Analysis/ProfileInfo.cpp\
llvm/lib/Analysis/ValueTracking.cpp\
llvm/lib/CodeGen/ELFCodeEmitter.cpp\
llvm/lib/CodeGen/ELFWriter.cpp\
+ llvm/lib/CodeGen/LiveVariables.cpp\
llvm/lib/CodeGen/MachineBasicBlock.cpp\
+ llvm/lib/CodeGen/MachineDominators.cpp\
llvm/lib/CodeGen/MachineFunction.cpp\
llvm/lib/CodeGen/MachineFunctionAnalysis.cpp\
llvm/lib/CodeGen/MachineFunctionPass.cpp\
+ llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp\
llvm/lib/CodeGen/MachineInstr.cpp\
+ llvm/lib/CodeGen/MachineLoopInfo.cpp\
llvm/lib/CodeGen/MachineModuleInfo.cpp\
llvm/lib/CodeGen/MachineRegisterInfo.cpp\
llvm/lib/CodeGen/ObjectCodeEmitter.cpp\
+ llvm/lib/CodeGen/PostRAHazardRecognizer.cpp\
llvm/lib/CodeGen/PseudoSourceValue.cpp\
+ llvm/lib/CodeGen/ScheduleDAG.cpp\
llvm/lib/CodeGen/TargetInstrInfoImpl.cpp\
+ llvm/lib/CodeGen/UnreachableBlockElim.cpp\
llvm/lib/ExecutionEngine/ExecutionEngine.cpp\
llvm/lib/ExecutionEngine/JIT/Intercept.cpp\
llvm/lib/ExecutionEngine/JIT/JIT.cpp\
@@ -498,10 +366,14 @@ libllvmjit_la_SOURCES=\
llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp\
llvm/lib/ExecutionEngine/JIT/TargetSelect.cpp\
llvm/lib/MC/MCAsmInfo.cpp\
+ llvm/lib/MC/MCAssembler.cpp\
llvm/lib/MC/MCContext.cpp\
llvm/lib/MC/MCExpr.cpp\
+ llvm/lib/MC/MCInst.cpp\
llvm/lib/MC/MCSection.cpp\
+ llvm/lib/MC/MCSectionCOFF.cpp\
llvm/lib/MC/MCSectionELF.cpp\
+ llvm/lib/MC/MCSectionMachO.cpp\
llvm/lib/MC/MCSymbol.cpp\
llvm/lib/Support/APFloat.cpp\
llvm/lib/Support/APInt.cpp\
@@ -513,7 +385,6 @@ libllvmjit_la_SOURCES=\
llvm/lib/Support/ErrorHandling.cpp\
llvm/lib/Support/FoldingSet.cpp\
llvm/lib/Support/FormattedStream.cpp\
- llvm/lib/Support/GraphWriter.cpp\
llvm/lib/Support/ManagedStatic.cpp\
llvm/lib/Support/MemoryBuffer.cpp\
llvm/lib/Support/PrettyStackTrace.cpp\
@@ -529,8 +400,8 @@ libllvmjit_la_SOURCES=\
llvm/lib/Support/Timer.cpp\
llvm/lib/Support/Triple.cpp\
llvm/lib/Support/Twine.cpp\
- llvm/lib/Support/circular_raw_ostream.cpp\
llvm/lib/Support/raw_ostream.cpp\
+ llvm/lib/Support/circular_raw_ostream.cpp\
llvm/lib/Target/Mangler.cpp\
llvm/lib/Target/SubtargetFeature.cpp\
llvm/lib/Target/TargetData.cpp\
@@ -538,13 +409,8 @@ libllvmjit_la_SOURCES=\
llvm/lib/Target/TargetLoweringObjectFile.cpp\
llvm/lib/Target/TargetMachine.cpp\
llvm/lib/Target/TargetRegisterInfo.cpp\
- llvm/lib/Transforms/Scalar/ADCE.cpp\
llvm/lib/Transforms/Scalar/DCE.cpp\
- llvm/lib/Transforms/Scalar/SCCP.cpp\
llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp\
- llvm/lib/Transforms/IPO/ConstantMerge.cpp\
- llvm/lib/Transforms/IPO/GlobalOpt.cpp\
- llvm/lib/Transforms/IPO/GlobalDCE.cpp\
llvm/lib/VMCore/AsmWriter.cpp\
llvm/lib/VMCore/Attributes.cpp\
llvm/lib/VMCore/AutoUpgrade.cpp\
@@ -552,6 +418,7 @@ libllvmjit_la_SOURCES=\
llvm/lib/VMCore/ConstantFold.cpp\
llvm/lib/VMCore/Constants.cpp\
llvm/lib/VMCore/Core.cpp\
+ llvm/lib/VMCore/DebugLoc.cpp\
llvm/lib/VMCore/Dominators.cpp\
llvm/lib/VMCore/Function.cpp\
llvm/lib/VMCore/GVMaterializer.cpp\
@@ -568,6 +435,7 @@ libllvmjit_la_SOURCES=\
llvm/lib/VMCore/Module.cpp\
llvm/lib/VMCore/Pass.cpp\
llvm/lib/VMCore/PassManager.cpp\
+ llvm/lib/VMCore/PassRegistry.cpp\
llvm/lib/VMCore/PrintModulePass.cpp\
llvm/lib/VMCore/Type.cpp\
llvm/lib/VMCore/TypeSymbolTable.cpp\
@@ -578,66 +446,57 @@ libllvmjit_la_SOURCES=\
llvm/lib/VMCore/Verifier.cpp
libllvmcodegen_la_SOURCES=\
- llvm/lib/Analysis/AliasSetTracker.cpp\
llvm/lib/Analysis/ConstantFolding.cpp\
llvm/lib/Analysis/IVUsers.cpp\
llvm/lib/Analysis/InstructionSimplify.cpp\
+ llvm/lib/Analysis/Loads.cpp\
llvm/lib/Analysis/LoopInfo.cpp\
llvm/lib/Analysis/LoopPass.cpp\
llvm/lib/Analysis/MemoryDependenceAnalysis.cpp\
llvm/lib/Analysis/PHITransAddr.cpp\
- llvm/lib/Analysis/ProfileInfo.cpp\
llvm/lib/Analysis/ScalarEvolution.cpp\
llvm/lib/Analysis/ScalarEvolutionExpander.cpp\
+ llvm/lib/Analysis/ScalarEvolutionNormalization.cpp\
llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp\
- llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp\
- llvm/lib/CodeGen/AsmPrinter/DIE.cpp\
- llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp\
- llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp\
- llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp\
- llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp\
- llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp\
+ llvm/lib/CodeGen/Analysis.cpp\
llvm/lib/CodeGen/BranchFolding.cpp\
llvm/lib/CodeGen/CalcSpillWeights.cpp\
+ llvm/lib/CodeGen/CallingConvLower.cpp\
llvm/lib/CodeGen/CodePlacementOpt.cpp\
llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp\
llvm/lib/CodeGen/DeadMachineInstructionElim.cpp\
llvm/lib/CodeGen/DwarfEHPrepare.cpp\
- llvm/lib/CodeGen/ExactHazardRecognizer.cpp\
llvm/lib/CodeGen/GCMetadata.cpp\
llvm/lib/CodeGen/GCStrategy.cpp\
+ llvm/lib/CodeGen/InlineSpiller.cpp\
llvm/lib/CodeGen/LLVMTargetMachine.cpp\
llvm/lib/CodeGen/LatencyPriorityQueue.cpp\
llvm/lib/CodeGen/LiveInterval.cpp\
llvm/lib/CodeGen/LiveIntervalAnalysis.cpp\
llvm/lib/CodeGen/LiveStackAnalysis.cpp\
- llvm/lib/CodeGen/LiveVariables.cpp\
+ llvm/lib/CodeGen/LocalStackSlotAllocation.cpp\
llvm/lib/CodeGen/LowerSubregs.cpp\
llvm/lib/CodeGen/MachineCSE.cpp\
- llvm/lib/CodeGen/MachineDominators.cpp\
llvm/lib/CodeGen/MachineLICM.cpp\
- llvm/lib/CodeGen/MachineLoopInfo.cpp\
llvm/lib/CodeGen/MachineModuleInfoImpls.cpp\
llvm/lib/CodeGen/MachinePassRegistry.cpp\
llvm/lib/CodeGen/MachineSSAUpdater.cpp\
llvm/lib/CodeGen/MachineSink.cpp\
llvm/lib/CodeGen/MachineVerifier.cpp\
- llvm/lib/CodeGen/OptimizeExts.cpp\
llvm/lib/CodeGen/OptimizePHIs.cpp\
llvm/lib/CodeGen/PHIElimination.cpp\
llvm/lib/CodeGen/Passes.cpp\
+ llvm/lib/CodeGen/PeepholeOptimizer.cpp\
llvm/lib/CodeGen/PostRASchedulerList.cpp\
llvm/lib/CodeGen/PreAllocSplitting.cpp\
llvm/lib/CodeGen/ProcessImplicitDefs.cpp\
llvm/lib/CodeGen/PrologEpilogInserter.cpp\
+ llvm/lib/CodeGen/RegAllocFast.cpp\
llvm/lib/CodeGen/RegAllocLinearScan.cpp\
llvm/lib/CodeGen/RegisterCoalescer.cpp\
llvm/lib/CodeGen/RegisterScavenging.cpp\
- llvm/lib/CodeGen/ScheduleDAG.cpp\
llvm/lib/CodeGen/ScheduleDAGEmit.cpp\
llvm/lib/CodeGen/ScheduleDAGInstrs.cpp\
- llvm/lib/CodeGen/ScheduleDAGPrinter.cpp\
- llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp\
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp\
llvm/lib/CodeGen/SelectionDAG/FastISel.cpp\
llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp\
@@ -649,7 +508,6 @@ libllvmcodegen_la_SOURCES=\
llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp\
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp\
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp\
- llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp\
llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp\
llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp\
llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp\
@@ -658,37 +516,42 @@ libllvmcodegen_la_SOURCES=\
llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp\
llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp\
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp\
+ llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp\
+ llvm/lib/CodeGen/ScheduleDAGPrinter.cpp\
llvm/lib/CodeGen/ShrinkWrapping.cpp\
llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp\
llvm/lib/CodeGen/SjLjEHPrepare.cpp\
llvm/lib/CodeGen/SlotIndexes.cpp\
llvm/lib/CodeGen/Spiller.cpp\
+ llvm/lib/CodeGen/SplitKit.cpp\
llvm/lib/CodeGen/StackProtector.cpp\
llvm/lib/CodeGen/StackSlotColoring.cpp\
llvm/lib/CodeGen/StrongPHIElimination.cpp\
llvm/lib/CodeGen/TailDuplication.cpp\
llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp\
llvm/lib/CodeGen/TwoAddressInstructionPass.cpp\
- llvm/lib/CodeGen/UnreachableBlockElim.cpp\
llvm/lib/CodeGen/VirtRegMap.cpp\
llvm/lib/CodeGen/VirtRegRewriter.cpp\
llvm/lib/MC/MCAsmInfoDarwin.cpp\
llvm/lib/MC/MCAsmStreamer.cpp\
- llvm/lib/MC/MCAssembler.cpp\
- llvm/lib/MC/MCInst.cpp\
- llvm/lib/MC/MCMachOStreamer.cpp\
+ llvm/lib/MC/MCInstPrinter.cpp\
+ llvm/lib/MC/MCLoggingStreamer.cpp\
llvm/lib/MC/MCNullStreamer.cpp\
- llvm/lib/MC/MCSectionMachO.cpp\
llvm/lib/MC/MCStreamer.cpp\
+ llvm/lib/Support/GraphWriter.cpp\
llvm/lib/Target/TargetFrameInfo.cpp\
llvm/lib/Target/TargetSubtarget.cpp\
+ llvm/lib/Transforms/IPO/ConstantMerge.cpp\
+ llvm/lib/Transforms/IPO/GlobalOpt.cpp\
llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp\
llvm/lib/Transforms/Scalar/GEPSplitter.cpp\
llvm/lib/Transforms/Scalar/GVN.cpp\
llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp\
+ llvm/lib/Transforms/Scalar/SCCP.cpp\
llvm/lib/Transforms/Utils/AddrModeMatcher.cpp\
llvm/lib/Transforms/Utils/BasicBlockUtils.cpp\
llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp\
+ llvm/lib/Transforms/Utils/BuildLibCalls.cpp\
llvm/lib/Transforms/Utils/DemoteRegToStack.cpp\
llvm/lib/Transforms/Utils/LCSSA.cpp\
llvm/lib/Transforms/Utils/Local.cpp\
@@ -701,189 +564,6 @@ libllvmcodegen_la_SOURCES=\
llvm/lib/Transforms/Utils/SimplifyCFG.cpp\
llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
-
-
-# Used only by make check
-
-libllvmbitreader_la_SOURCES=\
- llvm/lib/Bitcode/Reader/BitReader.cpp\
- llvm/lib/Bitcode/Reader/BitcodeReader.cpp
-
-libllvmbitwriter_la_SOURCES=\
- llvm/lib/Bitcode/Writer/BitWriter.cpp\
- llvm/lib/Bitcode/Writer/BitcodeWriter.cpp\
- llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp\
- llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
-
-libllvmasmparser_la_SOURCES=\
- llvm/lib/AsmParser/LLLexer.cpp\
- llvm/lib/AsmParser/LLParser.cpp\
- llvm/lib/AsmParser/Parser.cpp
-
-libllvminterpreter_la_SOURCES=\
- llvm/lib/ExecutionEngine/Interpreter/Execution.cpp\
- llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp\
- llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
-
-libgoogletest_la_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
-libgoogletest_la_CXXFLAGS=$(unittest_CXXFLAGS)
-libgoogletest_la_SOURCES=\
- llvm/utils/unittest/googletest/gtest-death-test.cc\
- llvm/utils/unittest/googletest/gtest-filepath.cc\
- llvm/utils/unittest/googletest/gtest-port.cc\
- llvm/utils/unittest/googletest/gtest-test-part.cc\
- llvm/utils/unittest/googletest/gtest-typed-test.cc\
- llvm/utils/unittest/googletest/gtest.cc\
- llvm/utils/unittest/UnitTestMain/TestMain.cpp
-
-llvmunittest_ADT_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
-llvmunittest_ADT_CXXFLAGS=$(unittest_CXXFLAGS)
-llvmunittest_ADT_LDADD=libgoogletest.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
-llvmunittest_ADT_SOURCES=\
- llvm/unittests/ADT/APFloatTest.cpp\
- llvm/unittests/ADT/APIntTest.cpp\
- llvm/unittests/ADT/DenseMapTest.cpp\
- llvm/unittests/ADT/DenseSetTest.cpp\
- llvm/unittests/ADT/ImmutableSetTest.cpp\
- llvm/unittests/ADT/SmallStringTest.cpp\
- llvm/unittests/ADT/SmallVectorTest.cpp\
- llvm/unittests/ADT/SparseBitVectorTest.cpp\
- llvm/unittests/ADT/StringMapTest.cpp\
- llvm/unittests/ADT/StringRefTest.cpp\
- llvm/unittests/ADT/TripleTest.cpp\
- llvm/unittests/ADT/TwineTest.cpp
-
-llvmunittest_Support_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
-llvmunittest_Support_CXXFLAGS=$(unittest_CXXFLAGS)
-llvmunittest_Support_LDADD=libgoogletest.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
-llvmunittest_Support_SOURCES=\
- llvm/unittests/Support/AllocatorTest.cpp\
- llvm/unittests/Support/ConstantRangeTest.cpp\
- llvm/unittests/Support/MathExtrasTest.cpp\
- llvm/unittests/Support/RegexTest.cpp\
- llvm/unittests/Support/TypeBuilderTest.cpp\
- llvm/unittests/Support/ValueHandleTest.cpp\
- llvm/unittests/Support/raw_ostream_test.cpp
-
-llvmunittest_VMCore_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
-llvmunittest_VMCore_CXXFLAGS=$(unittest_CXXFLAGS)
-llvmunittest_VMCore_LDADD=libgoogletest.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
-llvmunittest_VMCore_SOURCES=\
- llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp\
- llvm/lib/Analysis/LoopInfo.cpp\
- llvm/lib/Analysis/LoopPass.cpp\
- llvm/unittests/VMCore/ConstantsTest.cpp\
- llvm/unittests/VMCore/MetadataTest.cpp\
- llvm/unittests/VMCore/PassManagerTest.cpp
-
-llvmunittest_JIT_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
-llvmunittest_JIT_CXXFLAGS=$(unittest_CXXFLAGS)
-llvmunittest_JIT_LDADD=libgoogletest.la libllvmasmparser.la $(lli_LDADD)
-llvmunittest_JIT_SOURCES=\
- llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp\
- llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp\
- llvm/unittests/ExecutionEngine/JIT/JITTest.cpp
-
-llvmunittest_ExecutionEngine_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
-llvmunittest_ExecutionEngine_CXXFLAGS=$(unittest_CXXFLAGS)
-llvmunittest_ExecutionEngine_LDADD=libgoogletest.la libllvminterpreter.la libllvmsupport_nodups.la $(libclamavcxx_la_LIBADD) libllvmsystem.la
-llvmunittest_ExecutionEngine_SOURCES=\
- llvm/lib/CodeGen/IntrinsicLowering.cpp\
- llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp
-
-count_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS)
-count_SOURCES=llvm/utils/count/count.c
-count_LDADD=libllvmsystem.la
-not_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS)
-not_CXXFLAGS=$(LLVM_CXXFLAGS)
-not_SOURCES=llvm/utils/not/not.cpp
-not_LDADD=libllvmsystem.la
-
-FileCheck_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS)
-FileCheck_CXXFLAGS=$(LLVM_CXXFLAGS)
-FileCheck_LDADD=libllvmsupport.la libllvmsystem.la
-FileCheck_SOURCES=llvm/utils/FileCheck/FileCheck.cpp
-
-check_LTLIBRARIES=libllvmbitreader.la libllvmsupport_nodups.la libllvmsupport.la libllvmfullcodegen.la libllvmasmprinter.la libllvmbitwriter.la libllvmasmparser.la libgoogletest.la libllvminterpreter.la
-check_PROGRAMS=count not lli llc llvm-as llvm-dis llvmunittest_ADT llvmunittest_Support llvmunittest_VMCore llvmunittest_ExecutionEngine llvmunittest_JIT FileCheck
-check_SCRIPTS=llvmcheck.sh
-TESTS_ENVIRONMENT=export GMAKE=@GMAKE@;
-TESTS=llvmunittest_ADT llvmunittest_Support llvmunittest_VMCore llvmunittest_ExecutionEngine llvmunittest_JIT
- at ifGNUmake@ TESTS+=llvmcheck.sh
-
-libllvmasmprinter_la_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/lib/Target/X86 \
- -I$(top_srcdir)/llvm/lib/Target/PowerPC -I$(top_srcdir)/llvm/lib/Target/ARM
-libllvmasmprinter_la_SOURCES=\
- llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp\
- llvm/lib/CodeGen/ELFCodeEmitter.cpp\
- llvm/lib/CodeGen/ELFWriter.cpp
-
-if BUILD_X86
-libllvmasmprinter_la_SOURCES+=llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp\
- llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp\
- llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp\
- llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp\
- llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp\
- llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
-endif
-
-if BUILD_PPC
-libllvmasmprinter_la_SOURCES+= llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp
-endif
-
-if BUILD_ARM
-libllvmasmprinter_la_SOURCES+= llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp \
- llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp\
- llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp
-endif
-
-libllvmfullcodegen_la_SOURCES=\
- llvm/lib/CodeGen/GCMetadataPrinter.cpp\
- llvm/lib/CodeGen/IfConversion.cpp\
- llvm/lib/CodeGen/IntrinsicLowering.cpp\
- llvm/lib/CodeGen/OcamlGC.cpp\
- llvm/lib/CodeGen/RegAllocLocal.cpp\
- llvm/lib/CodeGen/RegAllocPBQP.cpp\
- llvm/lib/CodeGen/ShadowStackGC.cpp\
- llvm/lib/ExecutionEngine/Interpreter/Execution.cpp\
- llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp\
- llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp\
- llvm/lib/Target/Target.cpp\
- llvm/lib/Target/TargetAsmLexer.cpp\
- llvm/lib/Target/TargetELFWriterInfo.cpp\
- llvm/lib/Target/TargetIntrinsicInfo.cpp
-
-lli_LDADD+=libllvmfullcodegen.la libllvmcodegen.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
-lli_SOURCES=\
- llvm/tools/lli/lli.cpp
-
-llc_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS)
-llc_CXXFLAGS=$(LLVM_CXXFLAGS)
-llc_LDADD=libllvmbitreader.la libllvmasmprinter.la libllvmasmparser.la $(lli_LDADD)
-llc_SOURCES=\
- llvm/lib/MC/MCInstPrinter.cpp\
- llvm/tools/llc/llc.cpp
-
-llvm_as_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS)
-llvm_as_CXXFLAGS=$(LLVM_CXXFLAGS)
-llvm_as_LDADD=libllvmasmparser.la libllvmbitwriter.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
-llvm_as_SOURCES=\
- llvm/tools/llvm-as/llvm-as.cpp
-
-llvm_dis_CPPFLAGS=$(LLVM_INCLUDES) $(LLVM_DEFS)
-llvm_dis_CXXFLAGS=$(LLVM_CXXFLAGS)
-llvm_dis_LDADD=libllvmasmparser.la libllvmbitreader.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
-llvm_dis_SOURCES=\
- llvm/tools/llvm-dis/llvm-dis.cpp
-
-#bytecode2llvm.cpp: build-llvm
-
-build-llvm:
- +$(GMAKE) -C llvm OPTIMIZE_OPTION=-O2 libs-only
-
-build-llvm-for-check:
- +$(GMAKE) -C llvm OPTIMIZE_OPTION=-O2 tools-only
-
# Don't use make -C here, otherwise there's a racecondition between distclean
# and clean (distclean removes all Makefiles in llvm/)
clean-local:
@@ -894,6 +574,7 @@ if MAINTAINER_MODE
rm -f llvm/include/llvm/Intrinsics.gen
endif
rm -f llvm/test/site.exp llvm/test/site.bak llvm/test/*.out llvm/test/*.sum llvm/test/*.log
+ rm -f $(top_srcdir)/llvm/utils/lit/lit/*.pyc
distclean-local:
rm -f llvm/docs/doxygen.cfg llvm/test/Unit/.dir llvm/test/Unit/lit.site.cfg
@@ -901,7 +582,7 @@ distclean-local:
rm -f llvm/mklib llvm/tools/llvmc/llvm-config.in
rm -f llvm/Makefile.config llvm/config.log
rm -f llvm/llvm.spec llvm/include/llvm/Config/AsmPrinters.def llvm/include/llvm/Config/config.h
- rm -f llvm/include/llvm/Config/Disassemblers.def
+ rm -f llvm/include/llvm/Config/Disassemblers.def llvm/include/llvm/Config/llvm-config.h
rm -f llvm/include/llvm/System/DataTypes.h llvm/include/llvm/Config/Targets.def
rm -f llvm/tools/llvmc/plugins/Base/Base.td llvm/tools/llvm-config/llvm-config.in
rm -f llvm/include/llvm/Config/AsmParsers.def
@@ -911,10 +592,6 @@ distclean-local:
llvm/Makefile.common llvm/unittests/Makefile;\
fi
-check-llvm: build-llvm-for-check
- +$(GMAKE) -C llvm check
- +$(GMAKE) -C llvm unittests
-
# Rule to rerun LLVM's configure if it changed, before building anything else
# LLVM
BUILT_SOURCES += llvm/config.status
@@ -924,9 +601,8 @@ $(top_builddir)/llvm/config.status: llvm/configure
endif
# rm configure generated files
-dist-hook:
+dist-hook: clean-local
make -C llvm dist-hook
rm -f $(distdir)/llvm/include/llvm/Config/*.h $(distdir)/llvm/include/llvm/Config/*.def $(distdir)/llvm/Makefile.config $(distdir)/llvm/llvm.spec
rm -f $(distdir)/llvm/docs/doxygen.cfg $(distdir)/llvm/tools/llvmc/plugins/Base/Base.td $(distdir)/llvm/tools/llvm-config/llvm-config.in
rm -f $(distdir)/llvm/include/llvm/System/DataTypes.h $(distdir)/llvm/config.log $(distdir)/llvm/config.status
-
diff --git a/libclamav/c++/Makefile.in b/libclamav/c++/Makefile.in
index 0c7548b..8a38e38 100644
--- a/libclamav/c++/Makefile.in
+++ b/libclamav/c++/Makefile.in
@@ -55,44 +55,11 @@ target_triplet = @target@
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at am__append_2 = libllvmx86codegen.la
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at am__append_3 = libllvmx86codegen.la
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at am__append_4 = libllvmx86codegen.la
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at am__append_5 = libllvmx86codegen.la
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE at am__append_5 = libllvmpowerpccodegen.la
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE at am__append_6 = libllvmpowerpccodegen.la
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE at am__append_7 = libllvmpowerpccodegen.la
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE at am__append_8 = libllvmpowerpccodegen.la
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE at am__append_9 = libllvmpowerpccodegen.la
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at am__append_10 = libllvmarmcodegen.la
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at am__append_11 = libllvmarmcodegen.la
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at am__append_12 = libllvmarmcodegen.la
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at am__append_13 = libllvmarmcodegen.la
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at am__append_14 = $(TBLGENFILES)
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at am__append_8 = $(TBLGENFILES)
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at noinst_PROGRAMS = tblgen$(EXEEXT)
- at BUILD_EXTERNAL_LLVM_FALSE@check_PROGRAMS = count$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ not$(EXEEXT) lli$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llc$(EXEEXT) llvm-as$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm-dis$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_Support$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_VMCore$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ExecutionEngine$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_JIT$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ FileCheck$(EXEEXT)
- at BUILD_EXTERNAL_LLVM_FALSE@TESTS = llvmunittest_ADT$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_Support$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_VMCore$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ExecutionEngine$(EXEEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_JIT$(EXEEXT)
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at am__append_15 = llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE at am__append_16 = llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at am__append_17 = llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp
-
subdir = .
DIST_COMMON = $(am__configure_deps) $(srcdir)/Makefile.am \
$(srcdir)/Makefile.in $(srcdir)/clamavcxx-config.h.in \
@@ -126,201 +93,58 @@ libclamavcxx_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(libclamavcxx_la_LDFLAGS) $(LDFLAGS) -o $@
@BUILD_EXTERNAL_LLVM_FALSE at am_libclamavcxx_la_rpath =
@BUILD_EXTERNAL_LLVM_TRUE at am_libclamavcxx_la_rpath =
-libgoogletest_la_LIBADD =
-am__libgoogletest_la_SOURCES_DIST = \
- llvm/utils/unittest/googletest/gtest-death-test.cc \
- llvm/utils/unittest/googletest/gtest-filepath.cc \
- llvm/utils/unittest/googletest/gtest-port.cc \
- llvm/utils/unittest/googletest/gtest-test-part.cc \
- llvm/utils/unittest/googletest/gtest-typed-test.cc \
- llvm/utils/unittest/googletest/gtest.cc \
- llvm/utils/unittest/UnitTestMain/TestMain.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_libgoogletest_la_OBJECTS = libgoogletest_la-gtest-death-test.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest_la-gtest-filepath.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest_la-gtest-port.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest_la-gtest-test-part.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest_la-gtest-typed-test.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest_la-gtest.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest_la-TestMain.lo
-libgoogletest_la_OBJECTS = $(am_libgoogletest_la_OBJECTS)
-libgoogletest_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
- $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
- $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
- $(LDFLAGS) -o $@
- at BUILD_EXTERNAL_LLVM_FALSE@am_libgoogletest_la_rpath =
-libllvmarmcodegen_la_LIBADD =
-am__libllvmarmcodegen_la_SOURCES_DIST = \
- llvm/lib/CodeGen/IfConversion.cpp \
- llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp \
- llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp \
- llvm/lib/Target/ARM/ARMCodeEmitter.cpp \
- llvm/lib/Target/ARM/ARMConstantIslandPass.cpp \
- llvm/lib/Target/ARM/ARMConstantPoolValue.cpp \
- llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp \
- llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp \
- llvm/lib/Target/ARM/ARMISelLowering.cpp \
- llvm/lib/Target/ARM/ARMInstrInfo.cpp \
- llvm/lib/Target/ARM/ARMJITInfo.cpp \
- llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp \
- llvm/lib/Target/ARM/ARMMCAsmInfo.cpp \
- llvm/lib/Target/ARM/ARMRegisterInfo.cpp \
- llvm/lib/Target/ARM/ARMSubtarget.cpp \
- llvm/lib/Target/ARM/ARMTargetMachine.cpp \
- llvm/lib/Target/ARM/NEONMoveFix.cpp \
- llvm/lib/Target/ARM/NEONPreAllocPass.cpp \
- llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp \
- llvm/lib/Target/ARM/Thumb1InstrInfo.cpp \
- llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp \
- llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp \
- llvm/lib/Target/ARM/Thumb2InstrInfo.cpp \
- llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp \
- llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at am_libllvmarmcodegen_la_OBJECTS = libllvmarmcodegen_la-IfConversion.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMBaseInstrInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMBaseRegisterInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMCodeEmitter.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMConstantIslandPass.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMConstantPoolValue.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMExpandPseudoInsts.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMISelDAGToDAG.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMISelLowering.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMInstrInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMJITInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMLoadStoreOptimizer.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMMCAsmInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMRegisterInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMSubtarget.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMTargetMachine.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-NEONMoveFix.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-NEONPreAllocPass.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-ARMTargetInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-Thumb1InstrInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-Thumb1RegisterInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-Thumb2ITBlockPass.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-Thumb2InstrInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-Thumb2RegisterInfo.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmarmcodegen_la-Thumb2SizeReduction.lo
-libllvmarmcodegen_la_OBJECTS = $(am_libllvmarmcodegen_la_OBJECTS)
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at am_libllvmarmcodegen_la_rpath =
-libllvmasmparser_la_LIBADD =
-am__libllvmasmparser_la_SOURCES_DIST = llvm/lib/AsmParser/LLLexer.cpp \
- llvm/lib/AsmParser/LLParser.cpp llvm/lib/AsmParser/Parser.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmasmparser_la_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ LLLexer.lo LLParser.lo Parser.lo
-libllvmasmparser_la_OBJECTS = $(am_libllvmasmparser_la_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmasmparser_la_rpath =
-libllvmasmprinter_la_LIBADD =
-am__libllvmasmprinter_la_SOURCES_DIST = \
- llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp \
- llvm/lib/CodeGen/ELFCodeEmitter.cpp \
- llvm/lib/CodeGen/ELFWriter.cpp \
- llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp \
- llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp \
- llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp \
- llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp \
- llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp \
- llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp \
- llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp \
- llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp \
- llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at am__objects_1 = libllvmasmprinter_la-X86AsmPrinter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmasmprinter_la-X86ATTInstPrinter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmasmprinter_la-X86IntelInstPrinter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmasmprinter_la-X86AsmPrinter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmasmprinter_la-X86MCInstLower.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmasmprinter_la-X86COFFMachineModuleInfo.lo
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE at am__objects_2 = libllvmasmprinter_la-PPCAsmPrinter.lo
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at am__objects_3 = libllvmasmprinter_la-ARMAsmPrinter.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmasmprinter_la-ARMInstPrinter.lo \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ libllvmasmprinter_la-ARMMCInstLower.lo
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmasmprinter_la_OBJECTS = libllvmasmprinter_la-OcamlGCPrinter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmasmprinter_la-ELFCodeEmitter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmasmprinter_la-ELFWriter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__objects_1) $(am__objects_2) \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__objects_3)
-libllvmasmprinter_la_OBJECTS = $(am_libllvmasmprinter_la_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmasmprinter_la_rpath =
-libllvmbitreader_la_LIBADD =
-am__libllvmbitreader_la_SOURCES_DIST = \
- llvm/lib/Bitcode/Reader/BitReader.cpp \
- llvm/lib/Bitcode/Reader/BitcodeReader.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmbitreader_la_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ BitReader.lo BitcodeReader.lo
-libllvmbitreader_la_OBJECTS = $(am_libllvmbitreader_la_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmbitreader_la_rpath =
-libllvmbitwriter_la_LIBADD =
-am__libllvmbitwriter_la_SOURCES_DIST = \
- llvm/lib/Bitcode/Writer/BitWriter.cpp \
- llvm/lib/Bitcode/Writer/BitcodeWriter.cpp \
- llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp \
- llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmbitwriter_la_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ BitWriter.lo BitcodeWriter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ BitcodeWriterPass.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ValueEnumerator.lo
-libllvmbitwriter_la_OBJECTS = $(am_libllvmbitwriter_la_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmbitwriter_la_rpath =
libllvmcodegen_la_LIBADD =
am__libllvmcodegen_la_SOURCES_DIST = \
- llvm/lib/Analysis/AliasSetTracker.cpp \
llvm/lib/Analysis/ConstantFolding.cpp \
llvm/lib/Analysis/IVUsers.cpp \
llvm/lib/Analysis/InstructionSimplify.cpp \
- llvm/lib/Analysis/LoopInfo.cpp llvm/lib/Analysis/LoopPass.cpp \
+ llvm/lib/Analysis/Loads.cpp llvm/lib/Analysis/LoopInfo.cpp \
+ llvm/lib/Analysis/LoopPass.cpp \
llvm/lib/Analysis/MemoryDependenceAnalysis.cpp \
llvm/lib/Analysis/PHITransAddr.cpp \
- llvm/lib/Analysis/ProfileInfo.cpp \
llvm/lib/Analysis/ScalarEvolution.cpp \
llvm/lib/Analysis/ScalarEvolutionExpander.cpp \
+ llvm/lib/Analysis/ScalarEvolutionNormalization.cpp \
llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp \
- llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp \
- llvm/lib/CodeGen/AsmPrinter/DIE.cpp \
- llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp \
- llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp \
- llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp \
- llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp \
- llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp \
+ llvm/lib/CodeGen/Analysis.cpp \
llvm/lib/CodeGen/BranchFolding.cpp \
llvm/lib/CodeGen/CalcSpillWeights.cpp \
+ llvm/lib/CodeGen/CallingConvLower.cpp \
llvm/lib/CodeGen/CodePlacementOpt.cpp \
llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp \
llvm/lib/CodeGen/DeadMachineInstructionElim.cpp \
llvm/lib/CodeGen/DwarfEHPrepare.cpp \
- llvm/lib/CodeGen/ExactHazardRecognizer.cpp \
llvm/lib/CodeGen/GCMetadata.cpp \
llvm/lib/CodeGen/GCStrategy.cpp \
+ llvm/lib/CodeGen/InlineSpiller.cpp \
llvm/lib/CodeGen/LLVMTargetMachine.cpp \
llvm/lib/CodeGen/LatencyPriorityQueue.cpp \
llvm/lib/CodeGen/LiveInterval.cpp \
llvm/lib/CodeGen/LiveIntervalAnalysis.cpp \
llvm/lib/CodeGen/LiveStackAnalysis.cpp \
- llvm/lib/CodeGen/LiveVariables.cpp \
+ llvm/lib/CodeGen/LocalStackSlotAllocation.cpp \
llvm/lib/CodeGen/LowerSubregs.cpp \
llvm/lib/CodeGen/MachineCSE.cpp \
- llvm/lib/CodeGen/MachineDominators.cpp \
llvm/lib/CodeGen/MachineLICM.cpp \
- llvm/lib/CodeGen/MachineLoopInfo.cpp \
llvm/lib/CodeGen/MachineModuleInfoImpls.cpp \
llvm/lib/CodeGen/MachinePassRegistry.cpp \
llvm/lib/CodeGen/MachineSSAUpdater.cpp \
llvm/lib/CodeGen/MachineSink.cpp \
llvm/lib/CodeGen/MachineVerifier.cpp \
- llvm/lib/CodeGen/OptimizeExts.cpp \
llvm/lib/CodeGen/OptimizePHIs.cpp \
llvm/lib/CodeGen/PHIElimination.cpp \
llvm/lib/CodeGen/Passes.cpp \
+ llvm/lib/CodeGen/PeepholeOptimizer.cpp \
llvm/lib/CodeGen/PostRASchedulerList.cpp \
llvm/lib/CodeGen/PreAllocSplitting.cpp \
llvm/lib/CodeGen/ProcessImplicitDefs.cpp \
llvm/lib/CodeGen/PrologEpilogInserter.cpp \
+ llvm/lib/CodeGen/RegAllocFast.cpp \
llvm/lib/CodeGen/RegAllocLinearScan.cpp \
llvm/lib/CodeGen/RegisterCoalescer.cpp \
llvm/lib/CodeGen/RegisterScavenging.cpp \
- llvm/lib/CodeGen/ScheduleDAG.cpp \
llvm/lib/CodeGen/ScheduleDAGEmit.cpp \
llvm/lib/CodeGen/ScheduleDAGInstrs.cpp \
- llvm/lib/CodeGen/ScheduleDAGPrinter.cpp \
- llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp \
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp \
llvm/lib/CodeGen/SelectionDAG/FastISel.cpp \
llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp \
@@ -332,7 +156,6 @@ am__libllvmcodegen_la_SOURCES_DIST = \
llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp \
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp \
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp \
- llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp \
llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp \
llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp \
llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp \
@@ -341,32 +164,39 @@ am__libllvmcodegen_la_SOURCES_DIST = \
llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp \
llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp \
llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp \
+ llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp \
+ llvm/lib/CodeGen/ScheduleDAGPrinter.cpp \
llvm/lib/CodeGen/ShrinkWrapping.cpp \
llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp \
llvm/lib/CodeGen/SjLjEHPrepare.cpp \
llvm/lib/CodeGen/SlotIndexes.cpp llvm/lib/CodeGen/Spiller.cpp \
+ llvm/lib/CodeGen/SplitKit.cpp \
llvm/lib/CodeGen/StackProtector.cpp \
llvm/lib/CodeGen/StackSlotColoring.cpp \
llvm/lib/CodeGen/StrongPHIElimination.cpp \
llvm/lib/CodeGen/TailDuplication.cpp \
llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp \
llvm/lib/CodeGen/TwoAddressInstructionPass.cpp \
- llvm/lib/CodeGen/UnreachableBlockElim.cpp \
llvm/lib/CodeGen/VirtRegMap.cpp \
llvm/lib/CodeGen/VirtRegRewriter.cpp \
llvm/lib/MC/MCAsmInfoDarwin.cpp llvm/lib/MC/MCAsmStreamer.cpp \
- llvm/lib/MC/MCAssembler.cpp llvm/lib/MC/MCInst.cpp \
- llvm/lib/MC/MCMachOStreamer.cpp llvm/lib/MC/MCNullStreamer.cpp \
- llvm/lib/MC/MCSectionMachO.cpp llvm/lib/MC/MCStreamer.cpp \
+ llvm/lib/MC/MCInstPrinter.cpp \
+ llvm/lib/MC/MCLoggingStreamer.cpp \
+ llvm/lib/MC/MCNullStreamer.cpp llvm/lib/MC/MCStreamer.cpp \
+ llvm/lib/Support/GraphWriter.cpp \
llvm/lib/Target/TargetFrameInfo.cpp \
llvm/lib/Target/TargetSubtarget.cpp \
+ llvm/lib/Transforms/IPO/ConstantMerge.cpp \
+ llvm/lib/Transforms/IPO/GlobalOpt.cpp \
llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp \
llvm/lib/Transforms/Scalar/GEPSplitter.cpp \
llvm/lib/Transforms/Scalar/GVN.cpp \
llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp \
+ llvm/lib/Transforms/Scalar/SCCP.cpp \
llvm/lib/Transforms/Utils/AddrModeMatcher.cpp \
llvm/lib/Transforms/Utils/BasicBlockUtils.cpp \
llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp \
+ llvm/lib/Transforms/Utils/BuildLibCalls.cpp \
llvm/lib/Transforms/Utils/DemoteRegToStack.cpp \
llvm/lib/Transforms/Utils/LCSSA.cpp \
llvm/lib/Transforms/Utils/Local.cpp \
@@ -379,50 +209,46 @@ am__libllvmcodegen_la_SOURCES_DIST = \
llvm/lib/Transforms/Utils/SimplifyCFG.cpp \
llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
@BUILD_EXTERNAL_LLVM_FALSE at am_libllvmcodegen_la_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ AliasSetTracker.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ConstantFolding.lo IVUsers.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ InstructionSimplify.lo LoopInfo.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ LoopPass.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ InstructionSimplify.lo Loads.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ LoopInfo.lo LoopPass.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MemoryDependenceAnalysis.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ PHITransAddr.lo ProfileInfo.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ScalarEvolution.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ PHITransAddr.lo ScalarEvolution.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ScalarEvolutionExpander.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ ScalarEvolutionNormalization.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ AggressiveAntiDepBreaker.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ AsmPrinter.lo DIE.lo DwarfDebug.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ DwarfException.lo DwarfLabel.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ DwarfPrinter.lo DwarfWriter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ BranchFolding.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ Analysis.lo BranchFolding.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ CalcSpillWeights.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ CallingConvLower.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ CodePlacementOpt.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ CriticalAntiDepBreaker.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ DeadMachineInstructionElim.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ DwarfEHPrepare.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ExactHazardRecognizer.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ GCMetadata.lo GCStrategy.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ DwarfEHPrepare.lo GCMetadata.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ GCStrategy.lo InlineSpiller.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ LLVMTargetMachine.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ LatencyPriorityQueue.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ LiveInterval.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ LiveIntervalAnalysis.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ LiveStackAnalysis.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ LiveVariables.lo LowerSubregs.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MachineCSE.lo MachineDominators.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MachineLICM.lo MachineLoopInfo.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ LocalStackSlotAllocation.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ LowerSubregs.lo MachineCSE.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MachineLICM.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MachineModuleInfoImpls.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MachinePassRegistry.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MachineSSAUpdater.lo MachineSink.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MachineVerifier.lo OptimizeExts.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ OptimizePHIs.lo PHIElimination.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Passes.lo PostRASchedulerList.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MachineVerifier.lo OptimizePHIs.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ PHIElimination.lo Passes.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ PeepholeOptimizer.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ PostRASchedulerList.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ PreAllocSplitting.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ProcessImplicitDefs.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ PrologEpilogInserter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ RegAllocFast.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ RegAllocLinearScan.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ RegisterCoalescer.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ RegisterScavenging.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAG.lo ScheduleDAGEmit.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAGInstrs.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAGPrinter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ CallingConvLower.lo DAGCombiner.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAGEmit.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAGInstrs.lo DAGCombiner.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ FastISel.lo FunctionLoweringInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ InstrEmitter.lo LegalizeDAG.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ LegalizeFloatTypes.lo \
@@ -431,7 +257,6 @@ am__libllvmcodegen_la_SOURCES_DIST = \
@BUILD_EXTERNAL_LLVM_FALSE@ LegalizeTypesGeneric.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ LegalizeVectorOps.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ LegalizeVectorTypes.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAGFast.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAGList.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAGRRList.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAGSDNodes.lo \
@@ -439,28 +264,33 @@ am__libllvmcodegen_la_SOURCES_DIST = \
@BUILD_EXTERNAL_LLVM_FALSE@ SelectionDAGBuilder.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ SelectionDAGISel.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ SelectionDAGPrinter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ TargetLowering.lo ShrinkWrapping.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ TargetLowering.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ TargetSelectionDAGInfo.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ ScheduleDAGPrinter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ ShrinkWrapping.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ SimpleRegisterCoalescing.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ SjLjEHPrepare.lo SlotIndexes.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Spiller.lo StackProtector.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ Spiller.lo SplitKit.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ StackProtector.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ StackSlotColoring.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ StrongPHIElimination.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ TailDuplication.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ TargetLoweringObjectFileImpl.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ TwoAddressInstructionPass.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ UnreachableBlockElim.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ VirtRegMap.lo VirtRegRewriter.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MCAsmInfoDarwin.lo MCAsmStreamer.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MCAssembler.lo MCInst.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MCMachOStreamer.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MCNullStreamer.lo MCSectionMachO.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MCStreamer.lo TargetFrameInfo.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ TargetSubtarget.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ CodeGenPrepare.lo GEPSplitter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ GVN.lo LoopStrengthReduce.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MCInstPrinter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MCLoggingStreamer.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MCNullStreamer.lo MCStreamer.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ GraphWriter.lo TargetFrameInfo.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ TargetSubtarget.lo ConstantMerge.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ GlobalOpt.lo CodeGenPrepare.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ GEPSplitter.lo GVN.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ LoopStrengthReduce.lo SCCP.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ AddrModeMatcher.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ BasicBlockUtils.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ BreakCriticalEdges.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ BuildLibCalls.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ DemoteRegToStack.lo LCSSA.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ Local.lo LoopSimplify.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ LowerInvoke.lo LowerSwitch.lo \
@@ -470,64 +300,36 @@ am__libllvmcodegen_la_SOURCES_DIST = \
@BUILD_EXTERNAL_LLVM_FALSE@ UnifyFunctionExitNodes.lo
libllvmcodegen_la_OBJECTS = $(am_libllvmcodegen_la_OBJECTS)
@BUILD_EXTERNAL_LLVM_FALSE at am_libllvmcodegen_la_rpath =
-libllvmfullcodegen_la_LIBADD =
-am__libllvmfullcodegen_la_SOURCES_DIST = \
- llvm/lib/CodeGen/GCMetadataPrinter.cpp \
- llvm/lib/CodeGen/IfConversion.cpp \
- llvm/lib/CodeGen/IntrinsicLowering.cpp \
- llvm/lib/CodeGen/OcamlGC.cpp \
- llvm/lib/CodeGen/RegAllocLocal.cpp \
- llvm/lib/CodeGen/RegAllocPBQP.cpp \
- llvm/lib/CodeGen/ShadowStackGC.cpp \
- llvm/lib/ExecutionEngine/Interpreter/Execution.cpp \
- llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp \
- llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp \
- llvm/lib/Target/Target.cpp llvm/lib/Target/TargetAsmLexer.cpp \
- llvm/lib/Target/TargetELFWriterInfo.cpp \
- llvm/lib/Target/TargetIntrinsicInfo.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmfullcodegen_la_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ GCMetadataPrinter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ IfConversion.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ IntrinsicLowering.lo OcamlGC.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ RegAllocLocal.lo RegAllocPBQP.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ShadowStackGC.lo Execution.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ExternalFunctions.lo Interpreter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Target.lo TargetAsmLexer.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ TargetELFWriterInfo.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ TargetIntrinsicInfo.lo
-libllvmfullcodegen_la_OBJECTS = $(am_libllvmfullcodegen_la_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmfullcodegen_la_rpath =
-libllvminterpreter_la_LIBADD =
-am__libllvminterpreter_la_SOURCES_DIST = \
- llvm/lib/ExecutionEngine/Interpreter/Execution.cpp \
- llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp \
- llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvminterpreter_la_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ Execution.lo ExternalFunctions.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Interpreter.lo
-libllvminterpreter_la_OBJECTS = $(am_libllvminterpreter_la_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvminterpreter_la_rpath =
libllvmjit_la_LIBADD =
-am__libllvmjit_la_SOURCES_DIST = llvm/lib/Analysis/IPA/CallGraph.cpp \
- llvm/lib/Analysis/AliasAnalysis.cpp \
+am__libllvmjit_la_SOURCES_DIST = llvm/lib/Analysis/AliasAnalysis.cpp \
+ llvm/lib/Analysis/AliasSetTracker.cpp \
llvm/lib/Analysis/BasicAliasAnalysis.cpp \
llvm/lib/Analysis/CaptureTracking.cpp \
llvm/lib/Analysis/DebugInfo.cpp \
+ llvm/lib/Analysis/IPA/CallGraph.cpp \
llvm/lib/Analysis/MemoryBuiltins.cpp \
llvm/lib/Analysis/PointerTracking.cpp \
+ llvm/lib/Analysis/ProfileInfo.cpp \
llvm/lib/Analysis/ValueTracking.cpp \
llvm/lib/CodeGen/ELFCodeEmitter.cpp \
llvm/lib/CodeGen/ELFWriter.cpp \
+ llvm/lib/CodeGen/LiveVariables.cpp \
llvm/lib/CodeGen/MachineBasicBlock.cpp \
+ llvm/lib/CodeGen/MachineDominators.cpp \
llvm/lib/CodeGen/MachineFunction.cpp \
llvm/lib/CodeGen/MachineFunctionAnalysis.cpp \
llvm/lib/CodeGen/MachineFunctionPass.cpp \
+ llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp \
llvm/lib/CodeGen/MachineInstr.cpp \
+ llvm/lib/CodeGen/MachineLoopInfo.cpp \
llvm/lib/CodeGen/MachineModuleInfo.cpp \
llvm/lib/CodeGen/MachineRegisterInfo.cpp \
llvm/lib/CodeGen/ObjectCodeEmitter.cpp \
+ llvm/lib/CodeGen/PostRAHazardRecognizer.cpp \
llvm/lib/CodeGen/PseudoSourceValue.cpp \
+ llvm/lib/CodeGen/ScheduleDAG.cpp \
llvm/lib/CodeGen/TargetInstrInfoImpl.cpp \
+ llvm/lib/CodeGen/UnreachableBlockElim.cpp \
llvm/lib/ExecutionEngine/ExecutionEngine.cpp \
llvm/lib/ExecutionEngine/JIT/Intercept.cpp \
llvm/lib/ExecutionEngine/JIT/JIT.cpp \
@@ -537,9 +339,11 @@ am__libllvmjit_la_SOURCES_DIST = llvm/lib/Analysis/IPA/CallGraph.cpp \
llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp \
llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp \
llvm/lib/ExecutionEngine/JIT/TargetSelect.cpp \
- llvm/lib/MC/MCAsmInfo.cpp llvm/lib/MC/MCContext.cpp \
- llvm/lib/MC/MCExpr.cpp llvm/lib/MC/MCSection.cpp \
- llvm/lib/MC/MCSectionELF.cpp llvm/lib/MC/MCSymbol.cpp \
+ llvm/lib/MC/MCAsmInfo.cpp llvm/lib/MC/MCAssembler.cpp \
+ llvm/lib/MC/MCContext.cpp llvm/lib/MC/MCExpr.cpp \
+ llvm/lib/MC/MCInst.cpp llvm/lib/MC/MCSection.cpp \
+ llvm/lib/MC/MCSectionCOFF.cpp llvm/lib/MC/MCSectionELF.cpp \
+ llvm/lib/MC/MCSectionMachO.cpp llvm/lib/MC/MCSymbol.cpp \
llvm/lib/Support/APFloat.cpp llvm/lib/Support/APInt.cpp \
llvm/lib/Support/Allocator.cpp \
llvm/lib/Support/CommandLine.cpp \
@@ -547,7 +351,6 @@ am__libllvmjit_la_SOURCES_DIST = llvm/lib/Analysis/IPA/CallGraph.cpp \
llvm/lib/Support/Dwarf.cpp llvm/lib/Support/ErrorHandling.cpp \
llvm/lib/Support/FoldingSet.cpp \
llvm/lib/Support/FormattedStream.cpp \
- llvm/lib/Support/GraphWriter.cpp \
llvm/lib/Support/ManagedStatic.cpp \
llvm/lib/Support/MemoryBuffer.cpp \
llvm/lib/Support/PrettyStackTrace.cpp \
@@ -559,26 +362,22 @@ am__libllvmjit_la_SOURCES_DIST = llvm/lib/Analysis/IPA/CallGraph.cpp \
llvm/lib/Support/StringRef.cpp \
llvm/lib/Support/TargetRegistry.cpp llvm/lib/Support/Timer.cpp \
llvm/lib/Support/Triple.cpp llvm/lib/Support/Twine.cpp \
+ llvm/lib/Support/raw_ostream.cpp \
llvm/lib/Support/circular_raw_ostream.cpp \
- llvm/lib/Support/raw_ostream.cpp llvm/lib/Target/Mangler.cpp \
+ llvm/lib/Target/Mangler.cpp \
llvm/lib/Target/SubtargetFeature.cpp \
llvm/lib/Target/TargetData.cpp \
llvm/lib/Target/TargetInstrInfo.cpp \
llvm/lib/Target/TargetLoweringObjectFile.cpp \
llvm/lib/Target/TargetMachine.cpp \
llvm/lib/Target/TargetRegisterInfo.cpp \
- llvm/lib/Transforms/Scalar/ADCE.cpp \
llvm/lib/Transforms/Scalar/DCE.cpp \
- llvm/lib/Transforms/Scalar/SCCP.cpp \
llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp \
- llvm/lib/Transforms/IPO/ConstantMerge.cpp \
- llvm/lib/Transforms/IPO/GlobalOpt.cpp \
- llvm/lib/Transforms/IPO/GlobalDCE.cpp \
llvm/lib/VMCore/AsmWriter.cpp llvm/lib/VMCore/Attributes.cpp \
llvm/lib/VMCore/AutoUpgrade.cpp llvm/lib/VMCore/BasicBlock.cpp \
llvm/lib/VMCore/ConstantFold.cpp llvm/lib/VMCore/Constants.cpp \
- llvm/lib/VMCore/Core.cpp llvm/lib/VMCore/Dominators.cpp \
- llvm/lib/VMCore/Function.cpp \
+ llvm/lib/VMCore/Core.cpp llvm/lib/VMCore/DebugLoc.cpp \
+ llvm/lib/VMCore/Dominators.cpp llvm/lib/VMCore/Function.cpp \
llvm/lib/VMCore/GVMaterializer.cpp llvm/lib/VMCore/Globals.cpp \
llvm/lib/VMCore/IRBuilder.cpp llvm/lib/VMCore/InlineAsm.cpp \
llvm/lib/VMCore/Instruction.cpp \
@@ -589,67 +388,73 @@ am__libllvmjit_la_SOURCES_DIST = llvm/lib/Analysis/IPA/CallGraph.cpp \
llvm/lib/VMCore/LeakDetector.cpp llvm/lib/VMCore/Metadata.cpp \
llvm/lib/VMCore/Module.cpp llvm/lib/VMCore/Pass.cpp \
llvm/lib/VMCore/PassManager.cpp \
+ llvm/lib/VMCore/PassRegistry.cpp \
llvm/lib/VMCore/PrintModulePass.cpp llvm/lib/VMCore/Type.cpp \
llvm/lib/VMCore/TypeSymbolTable.cpp llvm/lib/VMCore/Use.cpp \
llvm/lib/VMCore/Value.cpp llvm/lib/VMCore/ValueSymbolTable.cpp \
llvm/lib/VMCore/ValueTypes.cpp llvm/lib/VMCore/Verifier.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmjit_la_OBJECTS = CallGraph.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ AliasAnalysis.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmjit_la_OBJECTS = \
+ at BUILD_EXTERNAL_LLVM_FALSE@ AliasAnalysis.lo AliasSetTracker.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ BasicAliasAnalysis.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ CaptureTracking.lo DebugInfo.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MemoryBuiltins.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ PointerTracking.lo ValueTracking.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ELFCodeEmitter.lo ELFWriter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ CallGraph.lo MemoryBuiltins.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ PointerTracking.lo ProfileInfo.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ ValueTracking.lo ELFCodeEmitter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ ELFWriter.lo LiveVariables.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MachineBasicBlock.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MachineDominators.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MachineFunction.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MachineFunctionAnalysis.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MachineFunctionPass.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MachineInstr.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MachineFunctionPrinterPass.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MachineInstr.lo MachineLoopInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MachineModuleInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ MachineRegisterInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ObjectCodeEmitter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ PseudoSourceValue.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ PostRAHazardRecognizer.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ PseudoSourceValue.lo ScheduleDAG.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ TargetInstrInfoImpl.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ UnreachableBlockElim.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ExecutionEngine.lo Intercept.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ JIT.lo JITDebugRegisterer.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ JITDwarfEmitter.lo JITEmitter.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ JITMemoryManager.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ OProfileJITEventListener.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ TargetSelect.lo MCAsmInfo.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MCContext.lo MCExpr.lo MCSection.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MCSectionELF.lo MCSymbol.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MCAssembler.lo MCContext.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MCExpr.lo MCInst.lo MCSection.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MCSectionCOFF.lo MCSectionELF.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ MCSectionMachO.lo MCSymbol.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ APFloat.lo APInt.lo Allocator.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ CommandLine.lo ConstantRange.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ Debug.lo Dwarf.lo ErrorHandling.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ FoldingSet.lo FormattedStream.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ GraphWriter.lo ManagedStatic.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MemoryBuffer.lo PrettyStackTrace.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ SmallPtrSet.lo SmallVector.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ SourceMgr.lo Statistic.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ StringExtras.lo StringMap.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ StringPool.lo StringRef.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ TargetRegistry.lo Timer.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Triple.lo Twine.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ circular_raw_ostream.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ raw_ostream.lo Mangler.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ ManagedStatic.lo MemoryBuffer.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ PrettyStackTrace.lo SmallPtrSet.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ SmallVector.lo SourceMgr.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ Statistic.lo StringExtras.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ StringMap.lo StringPool.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ StringRef.lo TargetRegistry.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ Timer.lo Triple.lo Twine.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ raw_ostream.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ circular_raw_ostream.lo Mangler.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ SubtargetFeature.lo TargetData.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ TargetInstrInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ TargetLoweringObjectFile.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ TargetMachine.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ TargetRegisterInfo.lo ADCE.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ DCE.lo SCCP.lo SimplifyCFGPass.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ConstantMerge.lo GlobalOpt.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ GlobalDCE.lo AsmWriter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ TargetRegisterInfo.lo DCE.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ SimplifyCFGPass.lo AsmWriter.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ Attributes.lo AutoUpgrade.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ BasicBlock.lo ConstantFold.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Constants.lo Core.lo Dominators.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Function.lo GVMaterializer.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Globals.lo IRBuilder.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ InlineAsm.lo Instruction.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Instructions.lo IntrinsicInst.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ LLVMContext.lo LLVMContextImpl.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ LeakDetector.lo Metadata.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Module.lo Pass.lo PassManager.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ Constants.lo Core.lo DebugLoc.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ Dominators.lo Function.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ GVMaterializer.lo Globals.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ IRBuilder.lo InlineAsm.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ Instruction.lo Instructions.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ IntrinsicInst.lo LLVMContext.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ LLVMContextImpl.lo LeakDetector.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ Metadata.lo Module.lo Pass.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@ PassManager.lo PassRegistry.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ PrintModulePass.lo Type.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ TypeSymbolTable.lo Use.lo Value.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ValueSymbolTable.lo ValueTypes.lo \
@@ -668,6 +473,7 @@ am__libllvmpowerpccodegen_la_SOURCES_DIST = \
llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp \
llvm/lib/Target/PowerPC/PPCPredicates.cpp \
llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp \
+ llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp \
llvm/lib/Target/PowerPC/PPCSubtarget.cpp \
llvm/lib/Target/PowerPC/PPCTargetMachine.cpp \
llvm/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp
@@ -681,91 +487,13 @@ am__libllvmpowerpccodegen_la_SOURCES_DIST = \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ libllvmpowerpccodegen_la-PPCMCAsmInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ libllvmpowerpccodegen_la-PPCPredicates.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ libllvmpowerpccodegen_la-PPCRegisterInfo.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ libllvmpowerpccodegen_la-PPCSelectionDAGInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ libllvmpowerpccodegen_la-PPCSubtarget.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ libllvmpowerpccodegen_la-PPCTargetMachine.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ libllvmpowerpccodegen_la-PowerPCTargetInfo.lo
libllvmpowerpccodegen_la_OBJECTS = \
$(am_libllvmpowerpccodegen_la_OBJECTS)
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE at am_libllvmpowerpccodegen_la_rpath =
-libllvmsupport_la_LIBADD =
-am__libllvmsupport_la_SOURCES_DIST = llvm/lib/Support/APFloat.cpp \
- llvm/lib/Support/APInt.cpp llvm/lib/Support/APSInt.cpp \
- llvm/lib/Support/Allocator.cpp \
- llvm/lib/Support/CommandLine.cpp \
- llvm/lib/Support/ConstantRange.cpp llvm/lib/Support/Debug.cpp \
- llvm/lib/Support/DeltaAlgorithm.cpp llvm/lib/Support/Dwarf.cpp \
- llvm/lib/Support/ErrorHandling.cpp \
- llvm/lib/Support/FileUtilities.cpp \
- llvm/lib/Support/FoldingSet.cpp \
- llvm/lib/Support/FormattedStream.cpp \
- llvm/lib/Support/GraphWriter.cpp llvm/lib/Support/IsInf.cpp \
- llvm/lib/Support/IsNAN.cpp llvm/lib/Support/ManagedStatic.cpp \
- llvm/lib/Support/MemoryBuffer.cpp \
- llvm/lib/Support/MemoryObject.cpp \
- llvm/lib/Support/PluginLoader.cpp \
- llvm/lib/Support/PrettyStackTrace.cpp \
- llvm/lib/Support/Regex.cpp \
- llvm/lib/Support/SlowOperationInformer.cpp \
- llvm/lib/Support/SmallPtrSet.cpp \
- llvm/lib/Support/SmallVector.cpp \
- llvm/lib/Support/SourceMgr.cpp llvm/lib/Support/Statistic.cpp \
- llvm/lib/Support/StringExtras.cpp \
- llvm/lib/Support/StringMap.cpp llvm/lib/Support/StringPool.cpp \
- llvm/lib/Support/StringRef.cpp \
- llvm/lib/Support/SystemUtils.cpp \
- llvm/lib/Support/TargetRegistry.cpp llvm/lib/Support/Timer.cpp \
- llvm/lib/Support/Triple.cpp llvm/lib/Support/Twine.cpp \
- llvm/lib/Support/circular_raw_ostream.cpp \
- llvm/lib/Support/raw_os_ostream.cpp \
- llvm/lib/Support/raw_ostream.cpp llvm/lib/Support/regcomp.c \
- llvm/lib/Support/regerror.c llvm/lib/Support/regexec.c \
- llvm/lib/Support/regfree.c llvm/lib/Support/regstrlcpy.c
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmsupport_la_OBJECTS = APFloat.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ APInt.lo APSInt.lo Allocator.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ CommandLine.lo ConstantRange.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Debug.lo DeltaAlgorithm.lo Dwarf.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ErrorHandling.lo FileUtilities.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ FoldingSet.lo FormattedStream.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ GraphWriter.lo IsInf.lo IsNAN.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ ManagedStatic.lo MemoryBuffer.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MemoryObject.lo PluginLoader.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ PrettyStackTrace.lo Regex.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ SlowOperationInformer.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ SmallPtrSet.lo SmallVector.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ SourceMgr.lo Statistic.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ StringExtras.lo StringMap.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ StringPool.lo StringRef.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ SystemUtils.lo TargetRegistry.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Timer.lo Triple.lo Twine.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ circular_raw_ostream.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ raw_os_ostream.lo raw_ostream.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ regcomp.lo regerror.lo regexec.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ regfree.lo regstrlcpy.lo
-libllvmsupport_la_OBJECTS = $(am_libllvmsupport_la_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmsupport_la_rpath =
-libllvmsupport_nodups_la_LIBADD =
-am__libllvmsupport_nodups_la_SOURCES_DIST = \
- llvm/lib/Support/APSInt.cpp \
- llvm/lib/Support/DeltaAlgorithm.cpp \
- llvm/lib/Support/FileUtilities.cpp llvm/lib/Support/IsInf.cpp \
- llvm/lib/Support/IsNAN.cpp llvm/lib/Support/MemoryObject.cpp \
- llvm/lib/Support/PluginLoader.cpp llvm/lib/Support/Regex.cpp \
- llvm/lib/Support/SlowOperationInformer.cpp \
- llvm/lib/Support/SystemUtils.cpp \
- llvm/lib/Support/raw_os_ostream.cpp llvm/lib/Support/regcomp.c \
- llvm/lib/Support/regerror.c llvm/lib/Support/regexec.c \
- llvm/lib/Support/regfree.c llvm/lib/Support/regstrlcpy.c
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmsupport_nodups_la_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ APSInt.lo DeltaAlgorithm.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ FileUtilities.lo IsInf.lo IsNAN.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ MemoryObject.lo PluginLoader.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ Regex.lo SlowOperationInformer.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ SystemUtils.lo raw_os_ostream.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ regcomp.lo regerror.lo regexec.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ regfree.lo regstrlcpy.lo
-libllvmsupport_nodups_la_OBJECTS = \
- $(am_libllvmsupport_nodups_la_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@am_libllvmsupport_nodups_la_rpath =
libllvmsystem_la_DEPENDENCIES =
am__libllvmsystem_la_SOURCES_DIST = llvm/lib/System/Alarm.cpp \
llvm/lib/System/Atomic.cpp llvm/lib/System/Disassembler.cpp \
@@ -776,7 +504,7 @@ am__libllvmsystem_la_SOURCES_DIST = llvm/lib/System/Alarm.cpp \
llvm/lib/System/Program.cpp llvm/lib/System/RWMutex.cpp \
llvm/lib/System/Signals.cpp llvm/lib/System/ThreadLocal.cpp \
llvm/lib/System/Threading.cpp llvm/lib/System/TimeValue.cpp \
- llvm/config.status
+ llvm/lib/System/Valgrind.cpp llvm/config.status
@BUILD_EXTERNAL_LLVM_FALSE at am_libllvmsystem_la_OBJECTS = Alarm.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ Atomic.lo Disassembler.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ DynamicLibrary.lo Errno.lo Host.lo \
@@ -784,7 +512,7 @@ am__libllvmsystem_la_SOURCES_DIST = llvm/lib/System/Alarm.cpp \
@BUILD_EXTERNAL_LLVM_FALSE@ Path.lo Process.lo Program.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ RWMutex.lo Signals.lo \
@BUILD_EXTERNAL_LLVM_FALSE@ ThreadLocal.lo Threading.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@ TimeValue.lo
+ at BUILD_EXTERNAL_LLVM_FALSE@ TimeValue.lo Valgrind.lo
libllvmsystem_la_OBJECTS = $(am_libllvmsystem_la_OBJECTS)
libllvmsystem_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
@@ -792,9 +520,18 @@ libllvmsystem_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
$(LDFLAGS) -o $@
@BUILD_EXTERNAL_LLVM_FALSE at am_libllvmsystem_la_rpath =
libllvmx86codegen_la_LIBADD =
-am__libllvmx86codegen_la_SOURCES_DIST = llvm/lib/MC/MCAsmInfoCOFF.cpp \
- llvm/lib/MC/MCCodeEmitter.cpp llvm/lib/MC/TargetAsmBackend.cpp \
+am__libllvmx86codegen_la_SOURCES_DIST = \
+ llvm/lib/MC/ELFObjectWriter.cpp llvm/lib/MC/MCAsmInfoCOFF.cpp \
+ llvm/lib/MC/MCCodeEmitter.cpp llvm/lib/MC/MCELFStreamer.cpp \
+ llvm/lib/MC/MCMachOStreamer.cpp \
+ llvm/lib/MC/MCObjectStreamer.cpp \
+ llvm/lib/MC/MCObjectWriter.cpp \
+ llvm/lib/MC/MachObjectWriter.cpp \
+ llvm/lib/MC/TargetAsmBackend.cpp \
+ llvm/lib/MC/WinCOFFObjectWriter.cpp \
+ llvm/lib/MC/WinCOFFStreamer.cpp \
llvm/lib/Target/TargetELFWriterInfo.cpp \
+ llvm/lib/Target/X86/SSEDomainFix.cpp \
llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp \
llvm/lib/Target/X86/X86AsmBackend.cpp \
llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp \
@@ -802,22 +539,30 @@ am__libllvmx86codegen_la_SOURCES_DIST = llvm/lib/MC/MCAsmInfoCOFF.cpp \
llvm/lib/Target/X86/X86ELFWriterInfo.cpp \
llvm/lib/Target/X86/X86FastISel.cpp \
llvm/lib/Target/X86/X86FloatingPoint.cpp \
- llvm/lib/Target/X86/X86FloatingPointRegKill.cpp \
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp \
llvm/lib/Target/X86/X86ISelLowering.cpp \
llvm/lib/Target/X86/X86InstrInfo.cpp \
llvm/lib/Target/X86/X86JITInfo.cpp \
llvm/lib/Target/X86/X86MCAsmInfo.cpp \
llvm/lib/Target/X86/X86MCCodeEmitter.cpp \
- llvm/lib/Target/X86/X86MCTargetExpr.cpp \
llvm/lib/Target/X86/X86RegisterInfo.cpp \
+ llvm/lib/Target/X86/X86SelectionDAGInfo.cpp \
llvm/lib/Target/X86/X86Subtarget.cpp \
llvm/lib/Target/X86/X86TargetMachine.cpp \
llvm/lib/Target/X86/X86TargetObjectFile.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at am_libllvmx86codegen_la_OBJECTS = libllvmx86codegen_la-MCAsmInfoCOFF.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at am_libllvmx86codegen_la_OBJECTS = libllvmx86codegen_la-ELFObjectWriter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-MCAsmInfoCOFF.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-MCCodeEmitter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-MCELFStreamer.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-MCMachOStreamer.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-MCObjectStreamer.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-MCObjectWriter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-MachObjectWriter.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-TargetAsmBackend.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-WinCOFFObjectWriter.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-WinCOFFStreamer.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-TargetELFWriterInfo.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-SSEDomainFix.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86TargetInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86AsmBackend.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86COFFMachineModuleInfo.lo \
@@ -825,202 +570,26 @@ am__libllvmx86codegen_la_SOURCES_DIST = llvm/lib/MC/MCAsmInfoCOFF.cpp \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86ELFWriterInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86FastISel.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86FloatingPoint.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86FloatingPointRegKill.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86ISelDAGToDAG.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86ISelLowering.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86InstrInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86JITInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86MCAsmInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86MCCodeEmitter.lo \
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86MCTargetExpr.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86RegisterInfo.lo \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86SelectionDAGInfo.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86Subtarget.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86TargetMachine.lo \
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ libllvmx86codegen_la-X86TargetObjectFile.lo
libllvmx86codegen_la_OBJECTS = $(am_libllvmx86codegen_la_OBJECTS)
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at am_libllvmx86codegen_la_rpath =
PROGRAMS = $(noinst_PROGRAMS)
-am__FileCheck_SOURCES_DIST = llvm/utils/FileCheck/FileCheck.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_FileCheck_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ FileCheck-FileCheck.$(OBJEXT)
-FileCheck_OBJECTS = $(am_FileCheck_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@FileCheck_DEPENDENCIES = libllvmsupport.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsystem.la
-FileCheck_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(FileCheck_CXXFLAGS) \
- $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
-am__count_SOURCES_DIST = llvm/utils/count/count.c
- at BUILD_EXTERNAL_LLVM_FALSE@am_count_OBJECTS = count-count.$(OBJEXT)
-count_OBJECTS = $(am_count_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@count_DEPENDENCIES = libllvmsystem.la
-am__llc_SOURCES_DIST = llvm/lib/MC/MCInstPrinter.cpp \
- llvm/tools/llc/llc.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_llc_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llc-MCInstPrinter.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llc-llc.$(OBJEXT)
-llc_OBJECTS = $(am_llc_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@llc_DEPENDENCIES = libllvmbitreader.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmasmprinter.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmasmparser.la $(lli_LDADD)
-llc_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(llc_CXXFLAGS) \
- $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
-am__lli_SOURCES_DIST = llvm/tools/lli/lli.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_lli_OBJECTS = lli.$(OBJEXT)
-lli_OBJECTS = $(am_lli_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@lli_DEPENDENCIES = libllvmbitreader.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmfullcodegen.la libllvmjit.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_4) $(am__append_8) \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_12) \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmfullcodegen.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmcodegen.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsupport_nodups.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la libllvmsystem.la
-am__llvm_as_SOURCES_DIST = llvm/tools/llvm-as/llvm-as.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_llvm_as_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm_as-llvm-as.$(OBJEXT)
-llvm_as_OBJECTS = $(am_llvm_as_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_as_DEPENDENCIES = libllvmasmparser.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmbitwriter.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsupport_nodups.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la libllvmsystem.la
-llvm_as_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(llvm_as_CXXFLAGS) \
- $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
-am__llvm_dis_SOURCES_DIST = llvm/tools/llvm-dis/llvm-dis.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_llvm_dis_OBJECTS = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm_dis-llvm-dis.$(OBJEXT)
-llvm_dis_OBJECTS = $(am_llvm_dis_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_dis_DEPENDENCIES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmasmparser.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmbitreader.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsupport_nodups.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la libllvmsystem.la
-llvm_dis_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(llvm_dis_CXXFLAGS) \
- $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
-am__llvmunittest_ADT_SOURCES_DIST = \
- llvm/unittests/ADT/APFloatTest.cpp \
- llvm/unittests/ADT/APIntTest.cpp \
- llvm/unittests/ADT/DenseMapTest.cpp \
- llvm/unittests/ADT/DenseSetTest.cpp \
- llvm/unittests/ADT/ImmutableSetTest.cpp \
- llvm/unittests/ADT/SmallStringTest.cpp \
- llvm/unittests/ADT/SmallVectorTest.cpp \
- llvm/unittests/ADT/SparseBitVectorTest.cpp \
- llvm/unittests/ADT/StringMapTest.cpp \
- llvm/unittests/ADT/StringRefTest.cpp \
- llvm/unittests/ADT/TripleTest.cpp \
- llvm/unittests/ADT/TwineTest.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_llvmunittest_ADT_OBJECTS = llvmunittest_ADT-APFloatTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-APIntTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-DenseMapTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-DenseSetTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-ImmutableSetTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-SmallStringTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-SmallVectorTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-SparseBitVectorTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-StringMapTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-StringRefTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-TripleTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ADT-TwineTest.$(OBJEXT)
-llvmunittest_ADT_OBJECTS = $(am_llvmunittest_ADT_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ADT_DEPENDENCIES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsupport_nodups.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la libllvmsystem.la
-llvmunittest_ADT_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
- $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
- $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
- $(LDFLAGS) -o $@
-am__llvmunittest_ExecutionEngine_SOURCES_DIST = \
- llvm/lib/CodeGen/IntrinsicLowering.cpp \
- llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_llvmunittest_ExecutionEngine_OBJECTS = llvmunittest_ExecutionEngine-IntrinsicLowering.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_ExecutionEngine-ExecutionEngineTest.$(OBJEXT)
-llvmunittest_ExecutionEngine_OBJECTS = \
- $(am_llvmunittest_ExecutionEngine_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ExecutionEngine_DEPENDENCIES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvminterpreter.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsupport_nodups.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(libclamavcxx_la_LIBADD) \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsystem.la
-llvmunittest_ExecutionEngine_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
- $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
- $(llvmunittest_ExecutionEngine_CXXFLAGS) $(CXXFLAGS) \
- $(AM_LDFLAGS) $(LDFLAGS) -o $@
-am__llvmunittest_JIT_SOURCES_DIST = \
- llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp \
- llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp \
- llvm/unittests/ExecutionEngine/JIT/JITTest.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_llvmunittest_JIT_OBJECTS = llvmunittest_JIT-JITEventListenerTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_JIT-JITMemoryManagerTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_JIT-JITTest.$(OBJEXT)
-llvmunittest_JIT_OBJECTS = $(am_llvmunittest_JIT_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_JIT_DEPENDENCIES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmasmparser.la $(lli_LDADD)
-llvmunittest_JIT_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
- $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
- $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
- $(LDFLAGS) -o $@
-am__llvmunittest_Support_SOURCES_DIST = \
- llvm/unittests/Support/AllocatorTest.cpp \
- llvm/unittests/Support/ConstantRangeTest.cpp \
- llvm/unittests/Support/MathExtrasTest.cpp \
- llvm/unittests/Support/RegexTest.cpp \
- llvm/unittests/Support/TypeBuilderTest.cpp \
- llvm/unittests/Support/ValueHandleTest.cpp \
- llvm/unittests/Support/raw_ostream_test.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_llvmunittest_Support_OBJECTS = llvmunittest_Support-AllocatorTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_Support-ConstantRangeTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_Support-MathExtrasTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_Support-RegexTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_Support-TypeBuilderTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_Support-ValueHandleTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_Support-raw_ostream_test.$(OBJEXT)
-llvmunittest_Support_OBJECTS = $(am_llvmunittest_Support_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_Support_DEPENDENCIES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsupport_nodups.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la libllvmsystem.la
-llvmunittest_Support_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
- $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
- $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
- $(LDFLAGS) -o $@
-am__llvmunittest_VMCore_SOURCES_DIST = \
- llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp \
- llvm/lib/Analysis/LoopInfo.cpp llvm/lib/Analysis/LoopPass.cpp \
- llvm/unittests/VMCore/ConstantsTest.cpp \
- llvm/unittests/VMCore/MetadataTest.cpp \
- llvm/unittests/VMCore/PassManagerTest.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_llvmunittest_VMCore_OBJECTS = llvmunittest_VMCore-CallGraphSCCPass.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_VMCore-LoopInfo.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_VMCore-LoopPass.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_VMCore-ConstantsTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_VMCore-MetadataTest.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvmunittest_VMCore-PassManagerTest.$(OBJEXT)
-llvmunittest_VMCore_OBJECTS = $(am_llvmunittest_VMCore_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_VMCore_DEPENDENCIES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ libgoogletest.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsupport_nodups.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la libllvmsystem.la
-llvmunittest_VMCore_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \
- $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \
- $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
- $(LDFLAGS) -o $@
-am__not_SOURCES_DIST = llvm/utils/not/not.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@am_not_OBJECTS = not-not.$(OBJEXT)
-not_OBJECTS = $(am_not_OBJECTS)
- at BUILD_EXTERNAL_LLVM_FALSE@not_DEPENDENCIES = libllvmsystem.la
-not_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(not_CXXFLAGS) \
- $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
-am__tblgen_SOURCES_DIST = llvm/utils/TableGen/AsmMatcherEmitter.cpp \
- llvm/utils/TableGen/AsmWriterEmitter.cpp \
+am__tblgen_SOURCES_DIST = llvm/utils/TableGen/ARMDecoderEmitter.cpp \
+ llvm/utils/TableGen/AsmMatcherEmitter.cpp \
llvm/utils/TableGen/AsmWriterInst.cpp \
llvm/utils/TableGen/CallingConvEmitter.cpp \
+ llvm/utils/TableGen/ClangASTNodesEmitter.cpp \
+ llvm/utils/TableGen/ClangAttrEmitter.cpp \
llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp \
llvm/utils/TableGen/CodeEmitterGen.cpp \
llvm/utils/TableGen/CodeGenDAGPatterns.cpp \
@@ -1031,26 +600,23 @@ am__tblgen_SOURCES_DIST = llvm/utils/TableGen/AsmMatcherEmitter.cpp \
llvm/utils/TableGen/DAGISelMatcherEmitter.cpp \
llvm/utils/TableGen/DAGISelMatcherGen.cpp \
llvm/utils/TableGen/DAGISelMatcherOpt.cpp \
- llvm/utils/TableGen/DisassemblerEmitter.cpp \
llvm/utils/TableGen/EDEmitter.cpp \
llvm/utils/TableGen/FastISelEmitter.cpp \
llvm/utils/TableGen/InstrEnumEmitter.cpp \
llvm/utils/TableGen/InstrInfoEmitter.cpp \
llvm/utils/TableGen/IntrinsicEmitter.cpp \
llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp \
+ llvm/utils/TableGen/NeonEmitter.cpp \
llvm/utils/TableGen/OptParserEmitter.cpp \
llvm/utils/TableGen/Record.cpp \
llvm/utils/TableGen/RegisterInfoEmitter.cpp \
llvm/utils/TableGen/SubtargetEmitter.cpp \
+ llvm/utils/TableGen/TableGenBackend.cpp \
+ llvm/utils/TableGen/TableGen.cpp \
llvm/utils/TableGen/TGLexer.cpp \
llvm/utils/TableGen/TGParser.cpp \
- llvm/utils/TableGen/TGValueTypes.cpp \
- llvm/utils/TableGen/TableGen.cpp \
- llvm/utils/TableGen/TableGenBackend.cpp \
- llvm/utils/TableGen/X86DisassemblerTables.cpp \
- llvm/utils/TableGen/X86RecognizableInstr.cpp \
- llvm/lib/System/Alarm.cpp llvm/lib/System/Atomic.cpp \
- llvm/lib/System/Disassembler.cpp \
+ llvm/utils/TableGen/TGValueTypes.cpp llvm/lib/System/Alarm.cpp \
+ llvm/lib/System/Atomic.cpp llvm/lib/System/Disassembler.cpp \
llvm/lib/System/DynamicLibrary.cpp llvm/lib/System/Errno.cpp \
llvm/lib/System/Host.cpp llvm/lib/System/IncludeFile.cpp \
llvm/lib/System/Memory.cpp llvm/lib/System/Mutex.cpp \
@@ -1058,8 +624,9 @@ am__tblgen_SOURCES_DIST = llvm/utils/TableGen/AsmMatcherEmitter.cpp \
llvm/lib/System/Program.cpp llvm/lib/System/RWMutex.cpp \
llvm/lib/System/Signals.cpp llvm/lib/System/ThreadLocal.cpp \
llvm/lib/System/Threading.cpp llvm/lib/System/TimeValue.cpp \
- llvm/lib/Support/APFloat.cpp llvm/lib/Support/APInt.cpp \
- llvm/lib/Support/APSInt.cpp llvm/lib/Support/Allocator.cpp \
+ llvm/lib/System/Valgrind.cpp llvm/lib/Support/APFloat.cpp \
+ llvm/lib/Support/APInt.cpp llvm/lib/Support/APSInt.cpp \
+ llvm/lib/Support/Allocator.cpp \
llvm/lib/Support/CommandLine.cpp \
llvm/lib/Support/ConstantRange.cpp llvm/lib/Support/Debug.cpp \
llvm/lib/Support/DeltaAlgorithm.cpp llvm/lib/Support/Dwarf.cpp \
@@ -1073,9 +640,7 @@ am__tblgen_SOURCES_DIST = llvm/utils/TableGen/AsmMatcherEmitter.cpp \
llvm/lib/Support/MemoryObject.cpp \
llvm/lib/Support/PluginLoader.cpp \
llvm/lib/Support/PrettyStackTrace.cpp \
- llvm/lib/Support/Regex.cpp \
- llvm/lib/Support/SlowOperationInformer.cpp \
- llvm/lib/Support/SmallPtrSet.cpp \
+ llvm/lib/Support/Regex.cpp llvm/lib/Support/SmallPtrSet.cpp \
llvm/lib/Support/SmallVector.cpp \
llvm/lib/Support/SourceMgr.cpp llvm/lib/Support/Statistic.cpp \
llvm/lib/Support/StringExtras.cpp \
@@ -1089,10 +654,12 @@ am__tblgen_SOURCES_DIST = llvm/utils/TableGen/AsmMatcherEmitter.cpp \
llvm/lib/Support/raw_ostream.cpp llvm/lib/Support/regcomp.c \
llvm/lib/Support/regerror.c llvm/lib/Support/regexec.c \
llvm/lib/Support/regfree.c llvm/lib/Support/regstrlcpy.c
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at am_tblgen_OBJECTS = tblgen-AsmMatcherEmitter.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-AsmWriterEmitter.$(OBJEXT) \
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at am_tblgen_OBJECTS = tblgen-ARMDecoderEmitter.$(OBJEXT) \
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-AsmMatcherEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-AsmWriterInst.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-CallingConvEmitter.$(OBJEXT) \
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-ClangASTNodesEmitter.$(OBJEXT) \
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-ClangAttrEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-ClangDiagnosticsEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-CodeEmitterGen.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-CodeGenDAGPatterns.$(OBJEXT) \
@@ -1103,24 +670,22 @@ am__tblgen_SOURCES_DIST = llvm/utils/TableGen/AsmMatcherEmitter.cpp \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-DAGISelMatcherEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-DAGISelMatcherGen.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-DAGISelMatcherOpt.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-DisassemblerEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-EDEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-FastISelEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-InstrEnumEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-InstrInfoEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-IntrinsicEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-LLVMCConfigurationEmitter.$(OBJEXT) \
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-NeonEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-OptParserEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-Record.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-RegisterInfoEmitter.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-SubtargetEmitter.$(OBJEXT) \
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-TableGenBackend.$(OBJEXT) \
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-TableGen.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-TGLexer.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-TGParser.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-TGValueTypes.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-TableGen.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-TableGenBackend.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-X86DisassemblerTables.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-X86RecognizableInstr.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-Alarm.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-Atomic.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-Disassembler.$(OBJEXT) \
@@ -1138,6 +703,7 @@ am__tblgen_SOURCES_DIST = llvm/utils/TableGen/AsmMatcherEmitter.cpp \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-ThreadLocal.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-Threading.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-TimeValue.$(OBJEXT) \
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-Valgrind.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-APFloat.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-APInt.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-APSInt.$(OBJEXT) \
@@ -1160,7 +726,6 @@ am__tblgen_SOURCES_DIST = llvm/utils/TableGen/AsmMatcherEmitter.cpp \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-PluginLoader.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-PrettyStackTrace.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-Regex.$(OBJEXT) \
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-SlowOperationInformer.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-SmallPtrSet.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-SmallVector.$(OBJEXT) \
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ tblgen-SourceMgr.$(OBJEXT) \
@@ -1229,65 +794,19 @@ am__v_CXXLD_0 = @echo " CXXLD " $@;
AM_V_GEN = $(am__v_GEN_$(V))
am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY))
am__v_GEN_0 = @echo " GEN " $@;
-SOURCES = $(libclamavcxx_la_SOURCES) $(libgoogletest_la_SOURCES) \
- $(libllvmarmcodegen_la_SOURCES) $(libllvmasmparser_la_SOURCES) \
- $(libllvmasmprinter_la_SOURCES) $(libllvmbitreader_la_SOURCES) \
- $(libllvmbitwriter_la_SOURCES) $(libllvmcodegen_la_SOURCES) \
- $(libllvmfullcodegen_la_SOURCES) \
- $(libllvminterpreter_la_SOURCES) $(libllvmjit_la_SOURCES) \
- $(libllvmpowerpccodegen_la_SOURCES) \
- $(libllvmsupport_la_SOURCES) \
- $(libllvmsupport_nodups_la_SOURCES) \
+SOURCES = $(libclamavcxx_la_SOURCES) $(libllvmcodegen_la_SOURCES) \
+ $(libllvmjit_la_SOURCES) $(libllvmpowerpccodegen_la_SOURCES) \
$(libllvmsystem_la_SOURCES) $(libllvmx86codegen_la_SOURCES) \
- $(FileCheck_SOURCES) $(count_SOURCES) $(llc_SOURCES) \
- $(lli_SOURCES) $(llvm_as_SOURCES) $(llvm_dis_SOURCES) \
- $(llvmunittest_ADT_SOURCES) \
- $(llvmunittest_ExecutionEngine_SOURCES) \
- $(llvmunittest_JIT_SOURCES) $(llvmunittest_Support_SOURCES) \
- $(llvmunittest_VMCore_SOURCES) $(not_SOURCES) \
$(tblgen_SOURCES)
DIST_SOURCES = $(libclamavcxx_la_SOURCES) \
- $(am__libgoogletest_la_SOURCES_DIST) \
- $(am__libllvmarmcodegen_la_SOURCES_DIST) \
- $(am__libllvmasmparser_la_SOURCES_DIST) \
- $(am__libllvmasmprinter_la_SOURCES_DIST) \
- $(am__libllvmbitreader_la_SOURCES_DIST) \
- $(am__libllvmbitwriter_la_SOURCES_DIST) \
$(am__libllvmcodegen_la_SOURCES_DIST) \
- $(am__libllvmfullcodegen_la_SOURCES_DIST) \
- $(am__libllvminterpreter_la_SOURCES_DIST) \
$(am__libllvmjit_la_SOURCES_DIST) \
$(am__libllvmpowerpccodegen_la_SOURCES_DIST) \
- $(am__libllvmsupport_la_SOURCES_DIST) \
- $(am__libllvmsupport_nodups_la_SOURCES_DIST) \
$(am__libllvmsystem_la_SOURCES_DIST) \
$(am__libllvmx86codegen_la_SOURCES_DIST) \
- $(am__FileCheck_SOURCES_DIST) $(am__count_SOURCES_DIST) \
- $(am__llc_SOURCES_DIST) $(am__lli_SOURCES_DIST) \
- $(am__llvm_as_SOURCES_DIST) $(am__llvm_dis_SOURCES_DIST) \
- $(am__llvmunittest_ADT_SOURCES_DIST) \
- $(am__llvmunittest_ExecutionEngine_SOURCES_DIST) \
- $(am__llvmunittest_JIT_SOURCES_DIST) \
- $(am__llvmunittest_Support_SOURCES_DIST) \
- $(am__llvmunittest_VMCore_SOURCES_DIST) \
- $(am__not_SOURCES_DIST) $(am__tblgen_SOURCES_DIST)
+ $(am__tblgen_SOURCES_DIST)
ETAGS = etags
CTAGS = ctags
-# If stdout is a non-dumb tty, use colors. If test -t is not supported,
-# then this fails; a conservative approach. Of course do not redirect
-# stdout here, just stderr.
-am__tty_colors = \
-red=; grn=; lgn=; blu=; std=; \
-test "X$(AM_COLOR_TESTS)" != Xno \
-&& test "X$$TERM" != Xdumb \
-&& { test "X$(AM_COLOR_TESTS)" = Xalways || test -t 1 2>/dev/null; } \
-&& { \
- red='[0;31m'; \
- grn='[0;32m'; \
- lgn='[1;32m'; \
- blu='[1;34m'; \
- std='[m'; \
-}
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
distdir = $(PACKAGE)-$(VERSION)
top_distdir = $(distdir)
@@ -1457,42 +976,30 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE at libclamavcxx_la_DEPENDENCIES = \
@BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la libllvmcodegen.la \
@BUILD_EXTERNAL_LLVM_FALSE@ libllvmsystem.la $(am__append_3) \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_7) $(am__append_11)
+ at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_6)
@BUILD_EXTERNAL_LLVM_TRUE at libclamavcxx_la_DEPENDENCIES = \
@BUILD_EXTERNAL_LLVM_TRUE@ @LLVMCONFIG_LIBFILES@ \
- at BUILD_EXTERNAL_LLVM_TRUE@ $(am__append_3) $(am__append_7) \
- at BUILD_EXTERNAL_LLVM_TRUE@ $(am__append_11)
+ at BUILD_EXTERNAL_LLVM_TRUE@ $(am__append_3) $(am__append_6)
@BUILD_EXTERNAL_LLVM_FALSE at noinst_LTLIBRARIES = libclamavcxx.la \
@BUILD_EXTERNAL_LLVM_FALSE@ libllvmsystem.la libllvmcodegen.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la $(am__append_5) \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_9) $(am__append_13)
+ at BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la $(am__append_4) \
+ at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_7)
@BUILD_EXTERNAL_LLVM_TRUE at noinst_LTLIBRARIES = libclamavcxx.la \
- at BUILD_EXTERNAL_LLVM_TRUE@ $(am__append_5) $(am__append_9) \
- at BUILD_EXTERNAL_LLVM_TRUE@ $(am__append_13)
- at BUILD_EXTERNAL_LLVM_FALSE@lli_LDADD = libllvmbitreader.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmfullcodegen.la libllvmjit.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_4) $(am__append_8) \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_12) \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmfullcodegen.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmcodegen.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsupport_nodups.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmjit.la libllvmsystem.la
+ at BUILD_EXTERNAL_LLVM_TRUE@ $(am__append_4) $(am__append_7)
@BUILD_EXTERNAL_LLVM_FALSE at libclamavcxx_la_LIBADD = libllvmjit.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_2) $(am__append_6) \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_10) libllvmcodegen.la \
- at BUILD_EXTERNAL_LLVM_FALSE@ libllvmsystem.la
+ at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_2) $(am__append_5) \
+ at BUILD_EXTERNAL_LLVM_FALSE@ libllvmcodegen.la libllvmsystem.la
@BUILD_EXTERNAL_LLVM_FALSE at LLVM_CXXFLAGS = -Woverloaded-virtual -pedantic -Wno-long-long -Wall -W -Wno-unused-parameter -Wwrite-strings
@BUILD_EXTERNAL_LLVM_FALSE at unittest_CXXFLAGS = @NO_VARIADIC_MACROS@ @NO_MISSING_FIELD_INITIALIZERS@ -DGTEST_HAS_TR1_TUPLE=0
@BUILD_EXTERNAL_LLVM_FALSE at TBLGENFILES = llvm/include/llvm/Intrinsics.gen X86GenRegisterInfo.h.inc X86GenRegisterNames.inc X86GenRegisterInfo.inc X86GenInstrNames.inc X86GenInstrInfo.inc\
- at BUILD_EXTERNAL_LLVM_FALSE@ X86GenAsmWriter.inc X86GenAsmWriter1.inc X86GenAsmMatcher.inc X86GenDAGISel.inc X86GenFastISel.inc X86GenCallingConv.inc\
- at BUILD_EXTERNAL_LLVM_FALSE@ X86GenSubtarget.inc PPCGenInstrNames.inc PPCGenRegisterNames.inc PPCGenAsmWriter.inc PPCGenCodeEmitter.inc PPCGenRegisterInfo.h.inc PPCGenRegisterInfo.inc\
- at BUILD_EXTERNAL_LLVM_FALSE@ PPCGenInstrInfo.inc PPCGenDAGISel.inc PPCGenCallingConv.inc PPCGenSubtarget.inc ARMGenRegisterInfo.h.inc ARMGenRegisterNames.inc ARMGenRegisterInfo.inc ARMGenInstrNames.inc ARMGenInstrInfo.inc ARMGenCodeEmitter.inc\
- at BUILD_EXTERNAL_LLVM_FALSE@ ARMGenAsmWriter.inc ARMGenDAGISel.inc ARMGenCallingConv.inc ARMGenSubtarget.inc
+ at BUILD_EXTERNAL_LLVM_FALSE@ X86GenDAGISel.inc X86GenFastISel.inc X86GenCallingConv.inc\
+ at BUILD_EXTERNAL_LLVM_FALSE@ X86GenSubtarget.inc PPCGenInstrNames.inc PPCGenRegisterNames.inc PPCGenCodeEmitter.inc PPCGenRegisterInfo.h.inc PPCGenRegisterInfo.inc\
+ at BUILD_EXTERNAL_LLVM_FALSE@ PPCGenInstrInfo.inc PPCGenDAGISel.inc PPCGenCallingConv.inc PPCGenSubtarget.inc
# Rule to rerun LLVM's configure if it changed, before building anything else
# LLVM
- at BUILD_EXTERNAL_LLVM_FALSE@BUILT_SOURCES = $(am__append_14) \
+ at BUILD_EXTERNAL_LLVM_FALSE@BUILT_SOURCES = $(am__append_8) \
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/config.status
@BUILD_EXTERNAL_LLVM_FALSE at EXTRA_DIST = $(top_srcdir)/llvm llvmcheck.sh $(TBLGENFILES)
@BUILD_EXTERNAL_LLVM_FALSE at libllvmsystem_la_LDFLAGS = @THREAD_LIBS@
@@ -1515,75 +1022,8 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/System/ThreadLocal.cpp \
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/System/Threading.cpp \
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/System/TimeValue.cpp \
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/System/Valgrind.cpp \
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/config.status
-
-# support is split into 2:
-# a full llvmsupport, and another that contains only objs
-# that aren't already contained in llvmjit
- at BUILD_EXTERNAL_LLVM_FALSE@libllvmsupport_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/APFloat.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/APInt.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/APSInt.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Allocator.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/CommandLine.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/ConstantRange.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Debug.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/DeltaAlgorithm.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Dwarf.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/ErrorHandling.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/FileUtilities.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/FoldingSet.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/FormattedStream.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/GraphWriter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/IsInf.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/IsNAN.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/ManagedStatic.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/MemoryBuffer.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/MemoryObject.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/PluginLoader.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/PrettyStackTrace.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Regex.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/SlowOperationInformer.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/SmallPtrSet.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/SmallVector.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/SourceMgr.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Statistic.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/StringExtras.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/StringMap.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/StringPool.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/StringRef.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/SystemUtils.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/TargetRegistry.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Timer.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Triple.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Twine.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/circular_raw_ostream.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/raw_os_ostream.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/raw_ostream.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regcomp.c\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regerror.c\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regexec.c\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regfree.c\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regstrlcpy.c
-
- at BUILD_EXTERNAL_LLVM_FALSE@libllvmsupport_nodups_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/APSInt.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/DeltaAlgorithm.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/FileUtilities.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/IsInf.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/IsNAN.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/MemoryObject.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/PluginLoader.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Regex.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/SlowOperationInformer.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/SystemUtils.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/raw_os_ostream.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regcomp.c\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regerror.c\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regexec.c\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regfree.c\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/regstrlcpy.c
-
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at tblgen_CXXFLAGS = $(LLVM_CXXFLAGS)
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at tblgen_CFLAGS =
#TODO: if VERSIONSCRIPT
@@ -1592,38 +1032,38 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
# since tblgen is only a maintainer-mode tool, build these files twice (once for
# libllvmsupport.la -fno-rtti, and once here, with defaults (rtti)).
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at tblgen_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/AsmMatcherEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/AsmWriterEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/AsmWriterInst.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CallingConvEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CodeEmitterGen.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CodeGenDAGPatterns.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CodeGenInstruction.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CodeGenTarget.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelMatcher.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelMatcherEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelMatcherGen.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelMatcherOpt.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DisassemblerEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/EDEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/FastISelEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/InstrEnumEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/InstrInfoEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/IntrinsicEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/OptParserEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/Record.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/RegisterInfoEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/SubtargetEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TGLexer.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TGParser.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TGValueTypes.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TableGen.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TableGenBackend.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/X86DisassemblerTables.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/X86RecognizableInstr.cpp \
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/ARMDecoderEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/AsmMatcherEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/AsmWriterInst.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CallingConvEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/ClangASTNodesEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/ClangAttrEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CodeEmitterGen.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CodeGenDAGPatterns.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CodeGenInstruction.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/CodeGenTarget.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelMatcher.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelMatcherEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelMatcherGen.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/DAGISelMatcherOpt.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/EDEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/FastISelEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/InstrEnumEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/InstrInfoEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/IntrinsicEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/NeonEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/OptParserEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/Record.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/RegisterInfoEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/SubtargetEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TableGenBackend.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TableGen.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TGLexer.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TGParser.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/utils/TableGen/TGValueTypes.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/System/Alarm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/System/Atomic.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/System/Disassembler.cpp\
@@ -1641,6 +1081,7 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/System/ThreadLocal.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/System/Threading.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/System/TimeValue.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/System/Valgrind.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/APFloat.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/APInt.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/APSInt.cpp\
@@ -1663,7 +1104,6 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/PluginLoader.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/PrettyStackTrace.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/Regex.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/SlowOperationInformer.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/SmallPtrSet.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/SmallVector.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ llvm/lib/Support/SourceMgr.cpp\
@@ -1695,15 +1135,21 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
# PPC Target
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at TBLGEN_FLAGS_PPC = $(TBLGEN_FLAGS) -I$(top_srcdir)/llvm/lib/Target/PowerPC
-
-# ARM Target
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at TBLGEN_FLAGS_ARM = $(TBLGEN_FLAGS) -I$(top_srcdir)/llvm/lib/Target/ARM
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at libllvmx86codegen_la_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_builddir) -I$(top_srcdir)/llvm/lib/Target/X86
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE at libllvmx86codegen_la_SOURCES = \
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/ELFObjectWriter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/MCAsmInfoCOFF.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/MCCodeEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/MCELFStreamer.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/MCMachOStreamer.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/MCObjectStreamer.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/MCObjectWriter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/MachObjectWriter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/TargetAsmBackend.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/WinCOFFObjectWriter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/MC/WinCOFFStreamer.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/TargetELFWriterInfo.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/SSEDomainFix.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86AsmBackend.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp\
@@ -1711,15 +1157,14 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86ELFWriterInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86FastISel.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86FloatingPoint.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86FloatingPointRegKill.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86ISelDAGToDAG.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86ISelLowering.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86InstrInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86JITInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86MCAsmInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86MCCodeEmitter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86MCTargetExpr.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86RegisterInfo.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86SelectionDAGInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86Subtarget.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86TargetMachine.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_X86_TRUE@ llvm/lib/Target/X86/X86TargetObjectFile.cpp
@@ -1736,61 +1181,43 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ llvm/lib/Target/PowerPC/PPCPredicates.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ llvm/lib/Target/PowerPC/PPCSubtarget.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ llvm/lib/Target/PowerPC/PPCTargetMachine.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@@BUILD_PPC_TRUE@ llvm/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at libllvmarmcodegen_la_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_builddir) -I$(top_srcdir)/llvm/lib/Target/ARM
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE at libllvmarmcodegen_la_SOURCES = \
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/IfConversion.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMCodeEmitter.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMConstantIslandPass.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMConstantPoolValue.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMISelLowering.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMInstrInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMJITInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMMCAsmInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMRegisterInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMSubtarget.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/ARMTargetMachine.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/NEONMoveFix.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/NEONPreAllocPass.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/Thumb1InstrInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/Thumb2InstrInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp\
- at BUILD_ARM_TRUE@@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
-
# End of Targets
@BUILD_EXTERNAL_LLVM_FALSE at libllvmjit_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/IPA/CallGraph.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/AliasAnalysis.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/AliasSetTracker.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/BasicAliasAnalysis.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/CaptureTracking.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/DebugInfo.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/IPA/CallGraph.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/MemoryBuiltins.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/PointerTracking.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/ProfileInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/ValueTracking.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ELFCodeEmitter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ELFWriter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/LiveVariables.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineBasicBlock.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineDominators.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineFunction.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineFunctionAnalysis.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineFunctionPass.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineInstr.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineLoopInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineModuleInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineRegisterInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ObjectCodeEmitter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/PostRAHazardRecognizer.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/PseudoSourceValue.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ScheduleDAG.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/TargetInstrInfoImpl.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/UnreachableBlockElim.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/ExecutionEngine.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/JIT/Intercept.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/JIT/JIT.cpp\
@@ -1801,10 +1228,14 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/JIT/TargetSelect.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCAsmInfo.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCAssembler.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCContext.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCExpr.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCInst.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCSection.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCSectionCOFF.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCSectionELF.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCSectionMachO.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCSymbol.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/APFloat.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/APInt.cpp\
@@ -1816,7 +1247,6 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/ErrorHandling.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/FoldingSet.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/FormattedStream.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/GraphWriter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/ManagedStatic.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/MemoryBuffer.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/PrettyStackTrace.cpp\
@@ -1832,8 +1262,8 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Timer.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Triple.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/Twine.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/circular_raw_ostream.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/raw_ostream.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/circular_raw_ostream.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/Mangler.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/SubtargetFeature.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/TargetData.cpp\
@@ -1841,13 +1271,8 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/TargetLoweringObjectFile.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/TargetMachine.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/TargetRegisterInfo.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Scalar/ADCE.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Scalar/DCE.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Scalar/SCCP.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/IPO/ConstantMerge.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/IPO/GlobalOpt.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/IPO/GlobalDCE.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/AsmWriter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/Attributes.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/AutoUpgrade.cpp\
@@ -1855,6 +1280,7 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/ConstantFold.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/Constants.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/Core.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/DebugLoc.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/Dominators.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/Function.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/GVMaterializer.cpp\
@@ -1871,6 +1297,7 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/Module.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/Pass.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/PassManager.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/PassRegistry.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/PrintModulePass.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/Type.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/TypeSymbolTable.cpp\
@@ -1881,66 +1308,57 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/VMCore/Verifier.cpp
@BUILD_EXTERNAL_LLVM_FALSE at libllvmcodegen_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/AliasSetTracker.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/ConstantFolding.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/IVUsers.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/InstructionSimplify.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/Loads.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/LoopInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/LoopPass.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/MemoryDependenceAnalysis.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/PHITransAddr.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/ProfileInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/ScalarEvolution.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/ScalarEvolutionExpander.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/ScalarEvolutionNormalization.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/AsmPrinter/DIE.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/Analysis.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/BranchFolding.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/CalcSpillWeights.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/CallingConvLower.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/CodePlacementOpt.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/DeadMachineInstructionElim.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/DwarfEHPrepare.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ExactHazardRecognizer.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/GCMetadata.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/GCStrategy.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/InlineSpiller.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/LLVMTargetMachine.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/LatencyPriorityQueue.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/LiveInterval.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/LiveIntervalAnalysis.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/LiveStackAnalysis.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/LiveVariables.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/LocalStackSlotAllocation.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/LowerSubregs.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineCSE.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineDominators.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineLICM.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineLoopInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineModuleInfoImpls.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachinePassRegistry.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineSSAUpdater.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineSink.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/MachineVerifier.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/OptimizeExts.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/OptimizePHIs.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/PHIElimination.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/Passes.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/PeepholeOptimizer.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/PostRASchedulerList.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/PreAllocSplitting.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ProcessImplicitDefs.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/PrologEpilogInserter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/RegAllocFast.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/RegAllocLinearScan.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/RegisterCoalescer.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/RegisterScavenging.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ScheduleDAG.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ScheduleDAGEmit.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ScheduleDAGInstrs.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ScheduleDAGPrinter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/FastISel.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp\
@@ -1952,7 +1370,6 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp\
@@ -1961,37 +1378,42 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ScheduleDAGPrinter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ShrinkWrapping.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SjLjEHPrepare.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SlotIndexes.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/Spiller.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/SplitKit.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/StackProtector.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/StackSlotColoring.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/StrongPHIElimination.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/TailDuplication.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/TwoAddressInstructionPass.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/UnreachableBlockElim.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/VirtRegMap.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/VirtRegRewriter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCAsmInfoDarwin.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCAsmStreamer.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCAssembler.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCInst.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCMachOStreamer.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCInstPrinter.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCLoggingStreamer.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCNullStreamer.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCSectionMachO.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCStreamer.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Support/GraphWriter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/TargetFrameInfo.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/TargetSubtarget.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/IPO/ConstantMerge.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/IPO/GlobalOpt.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Scalar/GEPSplitter.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Scalar/GVN.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Scalar/SCCP.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Utils/AddrModeMatcher.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Utils/BasicBlockUtils.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp\
+ at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Utils/BuildLibCalls.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Utils/DemoteRegToStack.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Utils/LCSSA.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Utils/Local.cpp\
@@ -2004,159 +1426,11 @@ libclamavcxx_la_SOURCES = bytecode2llvm.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Utils/SimplifyCFG.cpp\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
-
-# Used only by make check
- at BUILD_EXTERNAL_LLVM_FALSE@libllvmbitreader_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Bitcode/Reader/BitReader.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Bitcode/Reader/BitcodeReader.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@libllvmbitwriter_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Bitcode/Writer/BitWriter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Bitcode/Writer/BitcodeWriter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@libllvmasmparser_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/AsmParser/LLLexer.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/AsmParser/LLParser.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/AsmParser/Parser.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@libllvminterpreter_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/Interpreter/Execution.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@libgoogletest_la_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
- at BUILD_EXTERNAL_LLVM_FALSE@libgoogletest_la_CXXFLAGS = $(unittest_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@libgoogletest_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/utils/unittest/googletest/gtest-death-test.cc\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/utils/unittest/googletest/gtest-filepath.cc\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/utils/unittest/googletest/gtest-port.cc\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/utils/unittest/googletest/gtest-test-part.cc\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/utils/unittest/googletest/gtest-typed-test.cc\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/utils/unittest/googletest/gtest.cc\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/utils/unittest/UnitTestMain/TestMain.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ADT_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ADT_CXXFLAGS = $(unittest_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ADT_LDADD = libgoogletest.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ADT_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/APFloatTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/APIntTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/DenseMapTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/DenseSetTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/ImmutableSetTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/SmallStringTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/SmallVectorTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/SparseBitVectorTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/StringMapTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/StringRefTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/TripleTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ADT/TwineTest.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_Support_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_Support_CXXFLAGS = $(unittest_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_Support_LDADD = libgoogletest.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_Support_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/Support/AllocatorTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/Support/ConstantRangeTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/Support/MathExtrasTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/Support/RegexTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/Support/TypeBuilderTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/Support/ValueHandleTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/Support/raw_ostream_test.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_VMCore_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_VMCore_CXXFLAGS = $(unittest_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_VMCore_LDADD = libgoogletest.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_VMCore_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/LoopInfo.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Analysis/LoopPass.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/VMCore/ConstantsTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/VMCore/MetadataTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/VMCore/PassManagerTest.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_JIT_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_JIT_CXXFLAGS = $(unittest_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_JIT_LDADD = libgoogletest.la libllvmasmparser.la $(lli_LDADD)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_JIT_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ExecutionEngine/JIT/JITTest.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ExecutionEngine_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/utils/unittest/googletest/include
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ExecutionEngine_CXXFLAGS = $(unittest_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ExecutionEngine_LDADD = libgoogletest.la libllvminterpreter.la libllvmsupport_nodups.la $(libclamavcxx_la_LIBADD) libllvmsystem.la
- at BUILD_EXTERNAL_LLVM_FALSE@llvmunittest_ExecutionEngine_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/IntrinsicLowering.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@count_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS)
- at BUILD_EXTERNAL_LLVM_FALSE@count_SOURCES = llvm/utils/count/count.c
- at BUILD_EXTERNAL_LLVM_FALSE@count_LDADD = libllvmsystem.la
- at BUILD_EXTERNAL_LLVM_FALSE@not_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS)
- at BUILD_EXTERNAL_LLVM_FALSE@not_CXXFLAGS = $(LLVM_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@not_SOURCES = llvm/utils/not/not.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@not_LDADD = libllvmsystem.la
- at BUILD_EXTERNAL_LLVM_FALSE@FileCheck_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS)
- at BUILD_EXTERNAL_LLVM_FALSE@FileCheck_CXXFLAGS = $(LLVM_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@FileCheck_LDADD = libllvmsupport.la libllvmsystem.la
- at BUILD_EXTERNAL_LLVM_FALSE@FileCheck_SOURCES = llvm/utils/FileCheck/FileCheck.cpp
- at BUILD_EXTERNAL_LLVM_FALSE@check_LTLIBRARIES = libllvmbitreader.la libllvmsupport_nodups.la libllvmsupport.la libllvmfullcodegen.la libllvmasmprinter.la libllvmbitwriter.la libllvmasmparser.la libgoogletest.la libllvminterpreter.la
- at BUILD_EXTERNAL_LLVM_FALSE@check_SCRIPTS = llvmcheck.sh
- at BUILD_EXTERNAL_LLVM_FALSE@TESTS_ENVIRONMENT = export GMAKE=@GMAKE@;
- at BUILD_EXTERNAL_LLVM_FALSE@libllvmasmprinter_la_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS) -I$(top_srcdir)/llvm/lib/Target/X86 \
- at BUILD_EXTERNAL_LLVM_FALSE@ -I$(top_srcdir)/llvm/lib/Target/PowerPC -I$(top_srcdir)/llvm/lib/Target/ARM
-
- at BUILD_EXTERNAL_LLVM_FALSE@libllvmasmprinter_la_SOURCES = llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ELFCodeEmitter.cpp \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ELFWriter.cpp \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_15) $(am__append_16) \
- at BUILD_EXTERNAL_LLVM_FALSE@ $(am__append_17)
- at BUILD_EXTERNAL_LLVM_FALSE@libllvmfullcodegen_la_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/GCMetadataPrinter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/IfConversion.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/IntrinsicLowering.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/OcamlGC.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/RegAllocLocal.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/RegAllocPBQP.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/CodeGen/ShadowStackGC.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/Interpreter/Execution.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/Target.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/TargetAsmLexer.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/TargetELFWriterInfo.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/Target/TargetIntrinsicInfo.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@lli_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/tools/lli/lli.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@llc_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS)
- at BUILD_EXTERNAL_LLVM_FALSE@llc_CXXFLAGS = $(LLVM_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@llc_LDADD = libllvmbitreader.la libllvmasmprinter.la libllvmasmparser.la $(lli_LDADD)
- at BUILD_EXTERNAL_LLVM_FALSE@llc_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/lib/MC/MCInstPrinter.cpp\
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/tools/llc/llc.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_as_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_as_CXXFLAGS = $(LLVM_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_as_LDADD = libllvmasmparser.la libllvmbitwriter.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_as_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/tools/llvm-as/llvm-as.cpp
-
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_dis_CPPFLAGS = $(LLVM_INCLUDES) $(LLVM_DEFS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_dis_CXXFLAGS = $(LLVM_CXXFLAGS)
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_dis_LDADD = libllvmasmparser.la libllvmbitreader.la libllvmsupport_nodups.la libllvmjit.la libllvmsystem.la
- at BUILD_EXTERNAL_LLVM_FALSE@llvm_dis_SOURCES = \
- at BUILD_EXTERNAL_LLVM_FALSE@ llvm/tools/llvm-dis/llvm-dis.cpp
-
all: $(BUILT_SOURCES) clamavcxx-config.h
$(MAKE) $(AM_MAKEFLAGS) all-am
.SUFFIXES:
-.SUFFIXES: .c .cc .cpp .lo .o .obj
+.SUFFIXES: .c .cpp .lo .o .obj
am--refresh:
@:
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
@@ -2209,15 +1483,6 @@ $(srcdir)/clamavcxx-config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
distclean-hdr:
-rm -f clamavcxx-config.h stamp-h1
-clean-checkLTLIBRARIES:
- -test -z "$(check_LTLIBRARIES)" || rm -f $(check_LTLIBRARIES)
- @list='$(check_LTLIBRARIES)'; for p in $$list; do \
- dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
- test "$$dir" != "$$p" || dir=.; \
- echo "rm -f \"$${dir}/so_locations\""; \
- rm -f "$${dir}/so_locations"; \
- done
-
clean-noinstLTLIBRARIES:
-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
@@ -2228,46 +1493,17 @@ clean-noinstLTLIBRARIES:
done
libclamavcxx.la: $(libclamavcxx_la_OBJECTS) $(libclamavcxx_la_DEPENDENCIES)
$(AM_V_CXXLD)$(libclamavcxx_la_LINK) $(am_libclamavcxx_la_rpath) $(libclamavcxx_la_OBJECTS) $(libclamavcxx_la_LIBADD) $(LIBS)
-libgoogletest.la: $(libgoogletest_la_OBJECTS) $(libgoogletest_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(libgoogletest_la_LINK) $(am_libgoogletest_la_rpath) $(libgoogletest_la_OBJECTS) $(libgoogletest_la_LIBADD) $(LIBS)
-libllvmarmcodegen.la: $(libllvmarmcodegen_la_OBJECTS) $(libllvmarmcodegen_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(CXXLINK) $(am_libllvmarmcodegen_la_rpath) $(libllvmarmcodegen_la_OBJECTS) $(libllvmarmcodegen_la_LIBADD) $(LIBS)
-libllvmasmparser.la: $(libllvmasmparser_la_OBJECTS) $(libllvmasmparser_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(CXXLINK) $(am_libllvmasmparser_la_rpath) $(libllvmasmparser_la_OBJECTS) $(libllvmasmparser_la_LIBADD) $(LIBS)
-libllvmasmprinter.la: $(libllvmasmprinter_la_OBJECTS) $(libllvmasmprinter_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(CXXLINK) $(am_libllvmasmprinter_la_rpath) $(libllvmasmprinter_la_OBJECTS) $(libllvmasmprinter_la_LIBADD) $(LIBS)
-libllvmbitreader.la: $(libllvmbitreader_la_OBJECTS) $(libllvmbitreader_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(CXXLINK) $(am_libllvmbitreader_la_rpath) $(libllvmbitreader_la_OBJECTS) $(libllvmbitreader_la_LIBADD) $(LIBS)
-libllvmbitwriter.la: $(libllvmbitwriter_la_OBJECTS) $(libllvmbitwriter_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(CXXLINK) $(am_libllvmbitwriter_la_rpath) $(libllvmbitwriter_la_OBJECTS) $(libllvmbitwriter_la_LIBADD) $(LIBS)
libllvmcodegen.la: $(libllvmcodegen_la_OBJECTS) $(libllvmcodegen_la_DEPENDENCIES)
$(AM_V_CXXLD)$(CXXLINK) $(am_libllvmcodegen_la_rpath) $(libllvmcodegen_la_OBJECTS) $(libllvmcodegen_la_LIBADD) $(LIBS)
-libllvmfullcodegen.la: $(libllvmfullcodegen_la_OBJECTS) $(libllvmfullcodegen_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(CXXLINK) $(am_libllvmfullcodegen_la_rpath) $(libllvmfullcodegen_la_OBJECTS) $(libllvmfullcodegen_la_LIBADD) $(LIBS)
-libllvminterpreter.la: $(libllvminterpreter_la_OBJECTS) $(libllvminterpreter_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(CXXLINK) $(am_libllvminterpreter_la_rpath) $(libllvminterpreter_la_OBJECTS) $(libllvminterpreter_la_LIBADD) $(LIBS)
libllvmjit.la: $(libllvmjit_la_OBJECTS) $(libllvmjit_la_DEPENDENCIES)
$(AM_V_CXXLD)$(CXXLINK) $(am_libllvmjit_la_rpath) $(libllvmjit_la_OBJECTS) $(libllvmjit_la_LIBADD) $(LIBS)
libllvmpowerpccodegen.la: $(libllvmpowerpccodegen_la_OBJECTS) $(libllvmpowerpccodegen_la_DEPENDENCIES)
$(AM_V_CXXLD)$(CXXLINK) $(am_libllvmpowerpccodegen_la_rpath) $(libllvmpowerpccodegen_la_OBJECTS) $(libllvmpowerpccodegen_la_LIBADD) $(LIBS)
-libllvmsupport.la: $(libllvmsupport_la_OBJECTS) $(libllvmsupport_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(CXXLINK) $(am_libllvmsupport_la_rpath) $(libllvmsupport_la_OBJECTS) $(libllvmsupport_la_LIBADD) $(LIBS)
-libllvmsupport_nodups.la: $(libllvmsupport_nodups_la_OBJECTS) $(libllvmsupport_nodups_la_DEPENDENCIES)
- $(AM_V_CXXLD)$(CXXLINK) $(am_libllvmsupport_nodups_la_rpath) $(libllvmsupport_nodups_la_OBJECTS) $(libllvmsupport_nodups_la_LIBADD) $(LIBS)
libllvmsystem.la: $(libllvmsystem_la_OBJECTS) $(libllvmsystem_la_DEPENDENCIES)
$(AM_V_CXXLD)$(libllvmsystem_la_LINK) $(am_libllvmsystem_la_rpath) $(libllvmsystem_la_OBJECTS) $(libllvmsystem_la_LIBADD) $(LIBS)
libllvmx86codegen.la: $(libllvmx86codegen_la_OBJECTS) $(libllvmx86codegen_la_DEPENDENCIES)
$(AM_V_CXXLD)$(CXXLINK) $(am_libllvmx86codegen_la_rpath) $(libllvmx86codegen_la_OBJECTS) $(libllvmx86codegen_la_LIBADD) $(LIBS)
-clean-checkPROGRAMS:
- @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \
- echo " rm -f" $$list; \
- rm -f $$list || exit $$?; \
- test -n "$(EXEEXT)" || exit 0; \
- list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
- echo " rm -f" $$list; \
- rm -f $$list
-
clean-noinstPROGRAMS:
@list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \
echo " rm -f" $$list; \
@@ -2276,42 +1512,6 @@ clean-noinstPROGRAMS:
list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
echo " rm -f" $$list; \
rm -f $$list
-FileCheck$(EXEEXT): $(FileCheck_OBJECTS) $(FileCheck_DEPENDENCIES)
- @rm -f FileCheck$(EXEEXT)
- $(AM_V_CXXLD)$(FileCheck_LINK) $(FileCheck_OBJECTS) $(FileCheck_LDADD) $(LIBS)
-count$(EXEEXT): $(count_OBJECTS) $(count_DEPENDENCIES)
- @rm -f count$(EXEEXT)
- $(AM_V_CCLD)$(LINK) $(count_OBJECTS) $(count_LDADD) $(LIBS)
-llc$(EXEEXT): $(llc_OBJECTS) $(llc_DEPENDENCIES)
- @rm -f llc$(EXEEXT)
- $(AM_V_CXXLD)$(llc_LINK) $(llc_OBJECTS) $(llc_LDADD) $(LIBS)
-lli$(EXEEXT): $(lli_OBJECTS) $(lli_DEPENDENCIES)
- @rm -f lli$(EXEEXT)
- $(AM_V_CXXLD)$(CXXLINK) $(lli_OBJECTS) $(lli_LDADD) $(LIBS)
-llvm-as$(EXEEXT): $(llvm_as_OBJECTS) $(llvm_as_DEPENDENCIES)
- @rm -f llvm-as$(EXEEXT)
- $(AM_V_CXXLD)$(llvm_as_LINK) $(llvm_as_OBJECTS) $(llvm_as_LDADD) $(LIBS)
-llvm-dis$(EXEEXT): $(llvm_dis_OBJECTS) $(llvm_dis_DEPENDENCIES)
- @rm -f llvm-dis$(EXEEXT)
- $(AM_V_CXXLD)$(llvm_dis_LINK) $(llvm_dis_OBJECTS) $(llvm_dis_LDADD) $(LIBS)
-llvmunittest_ADT$(EXEEXT): $(llvmunittest_ADT_OBJECTS) $(llvmunittest_ADT_DEPENDENCIES)
- @rm -f llvmunittest_ADT$(EXEEXT)
- $(AM_V_CXXLD)$(llvmunittest_ADT_LINK) $(llvmunittest_ADT_OBJECTS) $(llvmunittest_ADT_LDADD) $(LIBS)
-llvmunittest_ExecutionEngine$(EXEEXT): $(llvmunittest_ExecutionEngine_OBJECTS) $(llvmunittest_ExecutionEngine_DEPENDENCIES)
- @rm -f llvmunittest_ExecutionEngine$(EXEEXT)
- $(AM_V_CXXLD)$(llvmunittest_ExecutionEngine_LINK) $(llvmunittest_ExecutionEngine_OBJECTS) $(llvmunittest_ExecutionEngine_LDADD) $(LIBS)
-llvmunittest_JIT$(EXEEXT): $(llvmunittest_JIT_OBJECTS) $(llvmunittest_JIT_DEPENDENCIES)
- @rm -f llvmunittest_JIT$(EXEEXT)
- $(AM_V_CXXLD)$(llvmunittest_JIT_LINK) $(llvmunittest_JIT_OBJECTS) $(llvmunittest_JIT_LDADD) $(LIBS)
-llvmunittest_Support$(EXEEXT): $(llvmunittest_Support_OBJECTS) $(llvmunittest_Support_DEPENDENCIES)
- @rm -f llvmunittest_Support$(EXEEXT)
- $(AM_V_CXXLD)$(llvmunittest_Support_LINK) $(llvmunittest_Support_OBJECTS) $(llvmunittest_Support_LDADD) $(LIBS)
-llvmunittest_VMCore$(EXEEXT): $(llvmunittest_VMCore_OBJECTS) $(llvmunittest_VMCore_DEPENDENCIES)
- @rm -f llvmunittest_VMCore$(EXEEXT)
- $(AM_V_CXXLD)$(llvmunittest_VMCore_LINK) $(llvmunittest_VMCore_OBJECTS) $(llvmunittest_VMCore_LDADD) $(LIBS)
-not$(EXEEXT): $(not_OBJECTS) $(not_DEPENDENCIES)
- @rm -f not$(EXEEXT)
- $(AM_V_CXXLD)$(not_LINK) $(not_OBJECTS) $(not_LDADD) $(LIBS)
tblgen$(EXEEXT): $(tblgen_OBJECTS) $(tblgen_DEPENDENCIES)
@rm -f tblgen$(EXEEXT)
$(AM_V_CXXLD)$(tblgen_LINK) $(tblgen_OBJECTS) $(tblgen_LDADD) $(LIBS)
@@ -2322,17 +1522,15 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ADCE.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/APFloat.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/APInt.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/APSInt.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/AddrModeMatcher.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/AggressiveAntiDepBreaker.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Alarm.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/AliasAnalysis.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/AliasSetTracker.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Allocator.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/AsmPrinter.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Analysis.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/AsmWriter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Atomic.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Attributes.Plo at am__quote@
@@ -2340,13 +1538,9 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BasicAliasAnalysis.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BasicBlock.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BasicBlockUtils.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BitReader.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BitWriter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BitcodeReader.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BitcodeWriter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BitcodeWriterPass.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BranchFolding.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BreakCriticalEdges.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/BuildLibCalls.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/CalcSpillWeights.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/CallGraph.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/CallingConvLower.Plo at am__quote@
@@ -2363,71 +1557,52 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/CriticalAntiDepBreaker.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DAGCombiner.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DCE.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DIE.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DeadMachineInstructionElim.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Debug.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DebugInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DeltaAlgorithm.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DebugLoc.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DemoteRegToStack.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Disassembler.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Dominators.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Dwarf.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DwarfDebug.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DwarfEHPrepare.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DwarfException.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DwarfLabel.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DwarfPrinter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DwarfWriter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/DynamicLibrary.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ELFCodeEmitter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ELFWriter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Errno.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ErrorHandling.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ExactHazardRecognizer.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Execution.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ExecutionEngine.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ExternalFunctions.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/FastISel.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/FileCheck-FileCheck.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/FileUtilities.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/FoldingSet.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/FormattedStream.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Function.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/FunctionLoweringInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/GCMetadata.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/GCMetadataPrinter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/GCStrategy.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/GEPSplitter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/GVMaterializer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/GVN.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/GlobalDCE.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/GlobalOpt.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Globals.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/GraphWriter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Host.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/IRBuilder.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/IVUsers.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/IfConversion.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/IncludeFile.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/InlineAsm.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/InlineSpiller.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/InstrEmitter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Instruction.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/InstructionSimplify.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Instructions.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Intercept.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Interpreter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/IntrinsicInst.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/IntrinsicLowering.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/IsInf.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/IsNAN.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/JIT.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/JITDebugRegisterer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/JITDwarfEmitter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/JITEmitter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/JITMemoryManager.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LCSSA.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LLLexer.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LLParser.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LLVMContext.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LLVMContextImpl.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LLVMTargetMachine.Plo at am__quote@
@@ -2444,7 +1619,9 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LiveIntervalAnalysis.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LiveStackAnalysis.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LiveVariables.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Loads.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Local.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LocalStackSlotAllocation.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LoopInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LoopPass.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/LoopSimplify.Plo at am__quote@
@@ -2459,9 +1636,11 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCContext.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCExpr.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCInst.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCMachOStreamer.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCInstPrinter.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCLoggingStreamer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCNullStreamer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCSection.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCSectionCOFF.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCSectionELF.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCSectionMachO.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MCStreamer.Plo at am__quote@
@@ -2472,6 +1651,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MachineFunction.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MachineFunctionAnalysis.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MachineFunctionPass.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MachineFunctionPrinterPass.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MachineInstr.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MachineLICM.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MachineLoopInfo.Plo at am__quote@
@@ -2489,24 +1669,22 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MemoryBuffer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MemoryBuiltins.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MemoryDependenceAnalysis.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/MemoryObject.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Metadata.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Module.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Mutex.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/OProfileJITEventListener.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ObjectCodeEmitter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/OcamlGC.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/OptimizeExts.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/OptimizePHIs.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PHIElimination.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PHITransAddr.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Parser.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Pass.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PassManager.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PassRegistry.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Passes.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Path.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PluginLoader.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PeepholeOptimizer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PointerTracking.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PostRAHazardRecognizer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PostRASchedulerList.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PreAllocSplitting.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PrettyStackTrace.Plo at am__quote@
@@ -2519,19 +1697,17 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PromoteMemoryToRegister.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/PseudoSourceValue.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/RWMutex.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/RegAllocFast.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/RegAllocLinearScan.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/RegAllocLocal.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/RegAllocPBQP.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Regex.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/RegisterCoalescer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/RegisterScavenging.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SCCP.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SSAUpdater.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ScalarEvolution.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ScalarEvolutionExpander.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ScalarEvolutionNormalization.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ScheduleDAG.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ScheduleDAGEmit.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ScheduleDAGFast.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ScheduleDAGInstrs.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ScheduleDAGList.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ScheduleDAGPrinter.Plo at am__quote@
@@ -2541,7 +1717,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SelectionDAGBuilder.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SelectionDAGISel.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SelectionDAGPrinter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ShadowStackGC.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ShrinkWrapping.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Signals.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SimpleRegisterCoalescing.Plo at am__quote@
@@ -2549,11 +1724,11 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SimplifyCFGPass.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SjLjEHPrepare.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SlotIndexes.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SlowOperationInformer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SmallPtrSet.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SmallVector.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SourceMgr.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Spiller.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SplitKit.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/StackProtector.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/StackSlotColoring.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Statistic.Plo at am__quote@
@@ -2563,16 +1738,11 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/StringRef.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/StrongPHIElimination.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SubtargetFeature.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/SystemUtils.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TailDuplication.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Target.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetAsmLexer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetData.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetELFWriterInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetFrameInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetInstrInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetInstrInfoImpl.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetIntrinsicInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetLowering.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetLoweringObjectFile.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetLoweringObjectFileImpl.Plo at am__quote@
@@ -2580,6 +1750,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetRegisterInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetRegistry.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetSelect.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetSelectionDAGInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/TargetSubtarget.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ThreadLocal.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Threading.Plo at am__quote@
@@ -2593,8 +1764,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/UnifyFunctionExitNodes.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/UnreachableBlockElim.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Use.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Valgrind.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/Value.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ValueEnumerator.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ValueSymbolTable.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ValueTracking.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ValueTypes.Plo at am__quote@
@@ -2602,54 +1773,9 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/VirtRegMap.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/VirtRegRewriter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/circular_raw_ostream.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/count-count.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libclamavcxx_la-ClamBCRTChecks.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libclamavcxx_la-bytecode2llvm.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libclamavcxx_la-detect.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libgoogletest_la-TestMain.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libgoogletest_la-gtest-death-test.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libgoogletest_la-gtest-filepath.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libgoogletest_la-gtest-port.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libgoogletest_la-gtest-test-part.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libgoogletest_la-gtest-typed-test.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libgoogletest_la-gtest.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMBaseInstrInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMBaseRegisterInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMCodeEmitter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMConstantIslandPass.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMConstantPoolValue.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMExpandPseudoInsts.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMISelDAGToDAG.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMISelLowering.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMInstrInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMJITInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMLoadStoreOptimizer.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMMCAsmInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMRegisterInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMSubtarget.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMTargetInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-ARMTargetMachine.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-IfConversion.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-NEONMoveFix.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-NEONPreAllocPass.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-Thumb1InstrInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-Thumb1RegisterInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-Thumb2ITBlockPass.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-Thumb2InstrInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-Thumb2RegisterInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmarmcodegen_la-Thumb2SizeReduction.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-ARMAsmPrinter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-ARMInstPrinter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-ARMMCInstLower.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-ELFCodeEmitter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-ELFWriter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-OcamlGCPrinter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-PPCAsmPrinter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-X86ATTInstPrinter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-X86AsmPrinter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-X86COFFMachineModuleInfo.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-X86IntelInstPrinter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmasmprinter_la-X86MCInstLower.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PPCBranchSelector.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PPCCodeEmitter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PPCHazardRecognizers.Plo at am__quote@
@@ -2660,85 +1786,54 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PPCMCAsmInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PPCPredicates.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PPCRegisterInfo.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PPCSelectionDAGInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PPCSubtarget.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PPCTargetMachine.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmpowerpccodegen_la-PowerPCTargetInfo.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-ELFObjectWriter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-MCAsmInfoCOFF.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-MCCodeEmitter.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-MCELFStreamer.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-MCMachOStreamer.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-MCObjectStreamer.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-MCObjectWriter.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-MachObjectWriter.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-SSEDomainFix.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-TargetAsmBackend.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-TargetELFWriterInfo.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-WinCOFFObjectWriter.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-WinCOFFStreamer.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86AsmBackend.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86COFFMachineModuleInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86CodeEmitter.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86ELFWriterInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86FastISel.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86FloatingPoint.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86FloatingPointRegKill.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86ISelDAGToDAG.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86ISelLowering.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86InstrInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86JITInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86MCAsmInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86MCCodeEmitter.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86MCTargetExpr.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86RegisterInfo.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86SelectionDAGInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86Subtarget.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86TargetInfo.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86TargetMachine.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/libllvmx86codegen_la-X86TargetObjectFile.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llc-MCInstPrinter.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llc-llc.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/lli.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvm_as-llvm-as.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvm_dis-llvm-dis.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-APFloatTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-APIntTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-DenseMapTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-DenseSetTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-ImmutableSetTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-SmallStringTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-SmallVectorTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-SparseBitVectorTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-StringMapTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-StringRefTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-TripleTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ADT-TwineTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ExecutionEngine-ExecutionEngineTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_ExecutionEngine-IntrinsicLowering.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_JIT-JITEventListenerTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_JIT-JITMemoryManagerTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_JIT-JITTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_Support-AllocatorTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_Support-ConstantRangeTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_Support-MathExtrasTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_Support-RegexTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_Support-TypeBuilderTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_Support-ValueHandleTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_Support-raw_ostream_test.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_VMCore-CallGraphSCCPass.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_VMCore-ConstantsTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_VMCore-LoopInfo.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_VMCore-LoopPass.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_VMCore-MetadataTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/llvmunittest_VMCore-PassManagerTest.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/not-not.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/raw_os_ostream.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/raw_ostream.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/regcomp.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/regerror.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/regexec.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/regfree.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/regstrlcpy.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-APFloat.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-APInt.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-APSInt.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-ARMDecoderEmitter.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Alarm.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Allocator.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-AsmMatcherEmitter.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-AsmWriterEmitter.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-AsmWriterInst.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Atomic.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-CallingConvEmitter.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-ClangASTNodesEmitter.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-ClangAttrEmitter.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-ClangDiagnosticsEmitter.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-CodeEmitterGen.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-CodeGenDAGPatterns.Po at am__quote@
@@ -2754,7 +1849,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Debug.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-DeltaAlgorithm.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Disassembler.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-DisassemblerEmitter.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Dwarf.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-DynamicLibrary.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-EDEmitter.Po at am__quote@
@@ -2778,6 +1872,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-MemoryBuffer.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-MemoryObject.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Mutex.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-NeonEmitter.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-OptParserEmitter.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Path.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-PluginLoader.Po at am__quote@
@@ -2789,7 +1884,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Regex.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-RegisterInfoEmitter.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Signals.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-SlowOperationInformer.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-SmallPtrSet.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-SmallVector.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-SourceMgr.Po at am__quote@
@@ -2812,8 +1906,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Timer.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Triple.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Twine.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-X86DisassemblerTables.Po at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-X86RecognizableInstr.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-Valgrind.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-circular_raw_ostream.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-raw_os_ostream.Po at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/tblgen-raw_ostream.Po at am__quote@
@@ -2847,62 +1940,6 @@ distclean-compile:
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $<
-regcomp.lo: llvm/lib/Support/regcomp.c
- at am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT regcomp.lo -MD -MP -MF $(DEPDIR)/regcomp.Tpo -c -o regcomp.lo `test -f 'llvm/lib/Support/regcomp.c' || echo '$(srcdir)/'`llvm/lib/Support/regcomp.c
- at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/regcomp.Tpo $(DEPDIR)/regcomp.Plo
- at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='llvm/lib/Support/regcomp.c' object='regcomp.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCC_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o regcomp.lo `test -f 'llvm/lib/Support/regcomp.c' || echo '$(srcdir)/'`llvm/lib/Support/regcomp.c
-
-regerror.lo: llvm/lib/Support/regerror.c
- at am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT regerror.lo -MD -MP -MF $(DEPDIR)/regerror.Tpo -c -o regerror.lo `test -f 'llvm/lib/Support/regerror.c' || echo '$(srcdir)/'`llvm/lib/Support/regerror.c
- at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/regerror.Tpo $(DEPDIR)/regerror.Plo
- at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='llvm/lib/Support/regerror.c' object='regerror.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCC_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o regerror.lo `test -f 'llvm/lib/Support/regerror.c' || echo '$(srcdir)/'`llvm/lib/Support/regerror.c
-
-regexec.lo: llvm/lib/Support/regexec.c
- at am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT regexec.lo -MD -MP -MF $(DEPDIR)/regexec.Tpo -c -o regexec.lo `test -f 'llvm/lib/Support/regexec.c' || echo '$(srcdir)/'`llvm/lib/Support/regexec.c
- at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/regexec.Tpo $(DEPDIR)/regexec.Plo
- at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='llvm/lib/Support/regexec.c' object='regexec.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCC_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o regexec.lo `test -f 'llvm/lib/Support/regexec.c' || echo '$(srcdir)/'`llvm/lib/Support/regexec.c
-
-regfree.lo: llvm/lib/Support/regfree.c
- at am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT regfree.lo -MD -MP -MF $(DEPDIR)/regfree.Tpo -c -o regfree.lo `test -f 'llvm/lib/Support/regfree.c' || echo '$(srcdir)/'`llvm/lib/Support/regfree.c
- at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/regfree.Tpo $(DEPDIR)/regfree.Plo
- at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='llvm/lib/Support/regfree.c' object='regfree.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCC_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o regfree.lo `test -f 'llvm/lib/Support/regfree.c' || echo '$(srcdir)/'`llvm/lib/Support/regfree.c
-
-regstrlcpy.lo: llvm/lib/Support/regstrlcpy.c
- at am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT regstrlcpy.lo -MD -MP -MF $(DEPDIR)/regstrlcpy.Tpo -c -o regstrlcpy.lo `test -f 'llvm/lib/Support/regstrlcpy.c' || echo '$(srcdir)/'`llvm/lib/Support/regstrlcpy.c
- at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/regstrlcpy.Tpo $(DEPDIR)/regstrlcpy.Plo
- at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='llvm/lib/Support/regstrlcpy.c' object='regstrlcpy.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCC_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o regstrlcpy.lo `test -f 'llvm/lib/Support/regstrlcpy.c' || echo '$(srcdir)/'`llvm/lib/Support/regstrlcpy.c
-
-count-count.o: llvm/utils/count/count.c
- at am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(count_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT count-count.o -MD -MP -MF $(DEPDIR)/count-count.Tpo -c -o count-count.o `test -f 'llvm/utils/count/count.c' || echo '$(srcdir)/'`llvm/utils/count/count.c
- at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/count-count.Tpo $(DEPDIR)/count-count.Po
- at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='llvm/utils/count/count.c' object='count-count.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(count_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o count-count.o `test -f 'llvm/utils/count/count.c' || echo '$(srcdir)/'`llvm/utils/count/count.c
-
-count-count.obj: llvm/utils/count/count.c
- at am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(count_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT count-count.obj -MD -MP -MF $(DEPDIR)/count-count.Tpo -c -o count-count.obj `if test -f 'llvm/utils/count/count.c'; then $(CYGPATH_W) 'llvm/utils/count/count.c'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/count/count.c'; fi`
- at am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/count-count.Tpo $(DEPDIR)/count-count.Po
- at am__fastdepCC_FALSE@ $(AM_V_CC) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ source='llvm/utils/count/count.c' object='count-count.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(count_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o count-count.obj `if test -f 'llvm/utils/count/count.c'; then $(CYGPATH_W) 'llvm/utils/count/count.c'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/count/count.c'; fi`
-
tblgen-regcomp.o: llvm/lib/Support/regcomp.c
@am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CFLAGS) $(CFLAGS) -MT tblgen-regcomp.o -MD -MP -MF $(DEPDIR)/tblgen-regcomp.Tpo -c -o tblgen-regcomp.o `test -f 'llvm/lib/Support/regcomp.c' || echo '$(srcdir)/'`llvm/lib/Support/regcomp.c
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-regcomp.Tpo $(DEPDIR)/tblgen-regcomp.Po
@@ -2983,7 +2020,7 @@ tblgen-regstrlcpy.obj: llvm/lib/Support/regstrlcpy.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CFLAGS) $(CFLAGS) -c -o tblgen-regstrlcpy.obj `if test -f 'llvm/lib/Support/regstrlcpy.c'; then $(CYGPATH_W) 'llvm/lib/Support/regstrlcpy.c'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Support/regstrlcpy.c'; fi`
-.cc.o:
+.cpp.o:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
@@ -2991,7 +2028,7 @@ tblgen-regstrlcpy.obj: llvm/lib/Support/regstrlcpy.c
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $<
-.cc.obj:
+.cpp.obj:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
@@ -2999,7 +2036,7 @@ tblgen-regstrlcpy.obj: llvm/lib/Support/regstrlcpy.c
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
-.cc.lo:
+.cpp.lo:
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
@am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
@@ -3031,438 +2068,6 @@ libclamavcxx_la-detect.lo: detect.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libclamavcxx_la_CXXFLAGS) $(CXXFLAGS) -c -o libclamavcxx_la-detect.lo `test -f 'detect.cpp' || echo '$(srcdir)/'`detect.cpp
-libgoogletest_la-gtest-death-test.lo: llvm/utils/unittest/googletest/gtest-death-test.cc
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -MT libgoogletest_la-gtest-death-test.lo -MD -MP -MF $(DEPDIR)/libgoogletest_la-gtest-death-test.Tpo -c -o libgoogletest_la-gtest-death-test.lo `test -f 'llvm/utils/unittest/googletest/gtest-death-test.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-death-test.cc
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libgoogletest_la-gtest-death-test.Tpo $(DEPDIR)/libgoogletest_la-gtest-death-test.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/unittest/googletest/gtest-death-test.cc' object='libgoogletest_la-gtest-death-test.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -c -o libgoogletest_la-gtest-death-test.lo `test -f 'llvm/utils/unittest/googletest/gtest-death-test.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-death-test.cc
-
-libgoogletest_la-gtest-filepath.lo: llvm/utils/unittest/googletest/gtest-filepath.cc
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -MT libgoogletest_la-gtest-filepath.lo -MD -MP -MF $(DEPDIR)/libgoogletest_la-gtest-filepath.Tpo -c -o libgoogletest_la-gtest-filepath.lo `test -f 'llvm/utils/unittest/googletest/gtest-filepath.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-filepath.cc
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libgoogletest_la-gtest-filepath.Tpo $(DEPDIR)/libgoogletest_la-gtest-filepath.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/unittest/googletest/gtest-filepath.cc' object='libgoogletest_la-gtest-filepath.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -c -o libgoogletest_la-gtest-filepath.lo `test -f 'llvm/utils/unittest/googletest/gtest-filepath.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-filepath.cc
-
-libgoogletest_la-gtest-port.lo: llvm/utils/unittest/googletest/gtest-port.cc
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -MT libgoogletest_la-gtest-port.lo -MD -MP -MF $(DEPDIR)/libgoogletest_la-gtest-port.Tpo -c -o libgoogletest_la-gtest-port.lo `test -f 'llvm/utils/unittest/googletest/gtest-port.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-port.cc
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libgoogletest_la-gtest-port.Tpo $(DEPDIR)/libgoogletest_la-gtest-port.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/unittest/googletest/gtest-port.cc' object='libgoogletest_la-gtest-port.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -c -o libgoogletest_la-gtest-port.lo `test -f 'llvm/utils/unittest/googletest/gtest-port.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-port.cc
-
-libgoogletest_la-gtest-test-part.lo: llvm/utils/unittest/googletest/gtest-test-part.cc
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -MT libgoogletest_la-gtest-test-part.lo -MD -MP -MF $(DEPDIR)/libgoogletest_la-gtest-test-part.Tpo -c -o libgoogletest_la-gtest-test-part.lo `test -f 'llvm/utils/unittest/googletest/gtest-test-part.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-test-part.cc
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libgoogletest_la-gtest-test-part.Tpo $(DEPDIR)/libgoogletest_la-gtest-test-part.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/unittest/googletest/gtest-test-part.cc' object='libgoogletest_la-gtest-test-part.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -c -o libgoogletest_la-gtest-test-part.lo `test -f 'llvm/utils/unittest/googletest/gtest-test-part.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-test-part.cc
-
-libgoogletest_la-gtest-typed-test.lo: llvm/utils/unittest/googletest/gtest-typed-test.cc
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -MT libgoogletest_la-gtest-typed-test.lo -MD -MP -MF $(DEPDIR)/libgoogletest_la-gtest-typed-test.Tpo -c -o libgoogletest_la-gtest-typed-test.lo `test -f 'llvm/utils/unittest/googletest/gtest-typed-test.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-typed-test.cc
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libgoogletest_la-gtest-typed-test.Tpo $(DEPDIR)/libgoogletest_la-gtest-typed-test.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/unittest/googletest/gtest-typed-test.cc' object='libgoogletest_la-gtest-typed-test.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -c -o libgoogletest_la-gtest-typed-test.lo `test -f 'llvm/utils/unittest/googletest/gtest-typed-test.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest-typed-test.cc
-
-libgoogletest_la-gtest.lo: llvm/utils/unittest/googletest/gtest.cc
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -MT libgoogletest_la-gtest.lo -MD -MP -MF $(DEPDIR)/libgoogletest_la-gtest.Tpo -c -o libgoogletest_la-gtest.lo `test -f 'llvm/utils/unittest/googletest/gtest.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest.cc
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libgoogletest_la-gtest.Tpo $(DEPDIR)/libgoogletest_la-gtest.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/unittest/googletest/gtest.cc' object='libgoogletest_la-gtest.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -c -o libgoogletest_la-gtest.lo `test -f 'llvm/utils/unittest/googletest/gtest.cc' || echo '$(srcdir)/'`llvm/utils/unittest/googletest/gtest.cc
-
-libgoogletest_la-TestMain.lo: llvm/utils/unittest/UnitTestMain/TestMain.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -MT libgoogletest_la-TestMain.lo -MD -MP -MF $(DEPDIR)/libgoogletest_la-TestMain.Tpo -c -o libgoogletest_la-TestMain.lo `test -f 'llvm/utils/unittest/UnitTestMain/TestMain.cpp' || echo '$(srcdir)/'`llvm/utils/unittest/UnitTestMain/TestMain.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libgoogletest_la-TestMain.Tpo $(DEPDIR)/libgoogletest_la-TestMain.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/unittest/UnitTestMain/TestMain.cpp' object='libgoogletest_la-TestMain.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libgoogletest_la_CPPFLAGS) $(CPPFLAGS) $(libgoogletest_la_CXXFLAGS) $(CXXFLAGS) -c -o libgoogletest_la-TestMain.lo `test -f 'llvm/utils/unittest/UnitTestMain/TestMain.cpp' || echo '$(srcdir)/'`llvm/utils/unittest/UnitTestMain/TestMain.cpp
-
-libllvmarmcodegen_la-IfConversion.lo: llvm/lib/CodeGen/IfConversion.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-IfConversion.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-IfConversion.Tpo -c -o libllvmarmcodegen_la-IfConversion.lo `test -f 'llvm/lib/CodeGen/IfConversion.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/IfConversion.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-IfConversion.Tpo $(DEPDIR)/libllvmarmcodegen_la-IfConversion.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/IfConversion.cpp' object='libllvmarmcodegen_la-IfConversion.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-IfConversion.lo `test -f 'llvm/lib/CodeGen/IfConversion.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/IfConversion.cpp
-
-libllvmarmcodegen_la-ARMBaseInstrInfo.lo: llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMBaseInstrInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMBaseInstrInfo.Tpo -c -o libllvmarmcodegen_la-ARMBaseInstrInfo.lo `test -f 'llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMBaseInstrInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMBaseInstrInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp' object='libllvmarmcodegen_la-ARMBaseInstrInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMBaseInstrInfo.lo `test -f 'llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
-
-libllvmarmcodegen_la-ARMBaseRegisterInfo.lo: llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMBaseRegisterInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMBaseRegisterInfo.Tpo -c -o libllvmarmcodegen_la-ARMBaseRegisterInfo.lo `test -f 'llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMBaseRegisterInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMBaseRegisterInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp' object='libllvmarmcodegen_la-ARMBaseRegisterInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMBaseRegisterInfo.lo `test -f 'llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
-
-libllvmarmcodegen_la-ARMCodeEmitter.lo: llvm/lib/Target/ARM/ARMCodeEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMCodeEmitter.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMCodeEmitter.Tpo -c -o libllvmarmcodegen_la-ARMCodeEmitter.lo `test -f 'llvm/lib/Target/ARM/ARMCodeEmitter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMCodeEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMCodeEmitter.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMCodeEmitter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMCodeEmitter.cpp' object='libllvmarmcodegen_la-ARMCodeEmitter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMCodeEmitter.lo `test -f 'llvm/lib/Target/ARM/ARMCodeEmitter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMCodeEmitter.cpp
-
-libllvmarmcodegen_la-ARMConstantIslandPass.lo: llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMConstantIslandPass.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMConstantIslandPass.Tpo -c -o libllvmarmcodegen_la-ARMConstantIslandPass.lo `test -f 'llvm/lib/Target/ARM/ARMConstantIslandPass.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMConstantIslandPass.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMConstantIslandPass.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMConstantIslandPass.cpp' object='libllvmarmcodegen_la-ARMConstantIslandPass.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMConstantIslandPass.lo `test -f 'llvm/lib/Target/ARM/ARMConstantIslandPass.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
-
-libllvmarmcodegen_la-ARMConstantPoolValue.lo: llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMConstantPoolValue.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMConstantPoolValue.Tpo -c -o libllvmarmcodegen_la-ARMConstantPoolValue.lo `test -f 'llvm/lib/Target/ARM/ARMConstantPoolValue.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMConstantPoolValue.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMConstantPoolValue.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMConstantPoolValue.cpp' object='libllvmarmcodegen_la-ARMConstantPoolValue.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMConstantPoolValue.lo `test -f 'llvm/lib/Target/ARM/ARMConstantPoolValue.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
-
-libllvmarmcodegen_la-ARMExpandPseudoInsts.lo: llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMExpandPseudoInsts.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMExpandPseudoInsts.Tpo -c -o libllvmarmcodegen_la-ARMExpandPseudoInsts.lo `test -f 'llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMExpandPseudoInsts.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMExpandPseudoInsts.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp' object='libllvmarmcodegen_la-ARMExpandPseudoInsts.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMExpandPseudoInsts.lo `test -f 'llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
-
-libllvmarmcodegen_la-ARMISelDAGToDAG.lo: llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMISelDAGToDAG.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMISelDAGToDAG.Tpo -c -o libllvmarmcodegen_la-ARMISelDAGToDAG.lo `test -f 'llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMISelDAGToDAG.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMISelDAGToDAG.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp' object='libllvmarmcodegen_la-ARMISelDAGToDAG.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMISelDAGToDAG.lo `test -f 'llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
-
-libllvmarmcodegen_la-ARMISelLowering.lo: llvm/lib/Target/ARM/ARMISelLowering.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMISelLowering.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMISelLowering.Tpo -c -o libllvmarmcodegen_la-ARMISelLowering.lo `test -f 'llvm/lib/Target/ARM/ARMISelLowering.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMISelLowering.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMISelLowering.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMISelLowering.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMISelLowering.cpp' object='libllvmarmcodegen_la-ARMISelLowering.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMISelLowering.lo `test -f 'llvm/lib/Target/ARM/ARMISelLowering.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMISelLowering.cpp
-
-libllvmarmcodegen_la-ARMInstrInfo.lo: llvm/lib/Target/ARM/ARMInstrInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMInstrInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMInstrInfo.Tpo -c -o libllvmarmcodegen_la-ARMInstrInfo.lo `test -f 'llvm/lib/Target/ARM/ARMInstrInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMInstrInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMInstrInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMInstrInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMInstrInfo.cpp' object='libllvmarmcodegen_la-ARMInstrInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMInstrInfo.lo `test -f 'llvm/lib/Target/ARM/ARMInstrInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMInstrInfo.cpp
-
-libllvmarmcodegen_la-ARMJITInfo.lo: llvm/lib/Target/ARM/ARMJITInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMJITInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMJITInfo.Tpo -c -o libllvmarmcodegen_la-ARMJITInfo.lo `test -f 'llvm/lib/Target/ARM/ARMJITInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMJITInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMJITInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMJITInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMJITInfo.cpp' object='libllvmarmcodegen_la-ARMJITInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMJITInfo.lo `test -f 'llvm/lib/Target/ARM/ARMJITInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMJITInfo.cpp
-
-libllvmarmcodegen_la-ARMLoadStoreOptimizer.lo: llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMLoadStoreOptimizer.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMLoadStoreOptimizer.Tpo -c -o libllvmarmcodegen_la-ARMLoadStoreOptimizer.lo `test -f 'llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMLoadStoreOptimizer.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMLoadStoreOptimizer.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp' object='libllvmarmcodegen_la-ARMLoadStoreOptimizer.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMLoadStoreOptimizer.lo `test -f 'llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
-
-libllvmarmcodegen_la-ARMMCAsmInfo.lo: llvm/lib/Target/ARM/ARMMCAsmInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMMCAsmInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMMCAsmInfo.Tpo -c -o libllvmarmcodegen_la-ARMMCAsmInfo.lo `test -f 'llvm/lib/Target/ARM/ARMMCAsmInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMMCAsmInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMMCAsmInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMMCAsmInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMMCAsmInfo.cpp' object='libllvmarmcodegen_la-ARMMCAsmInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMMCAsmInfo.lo `test -f 'llvm/lib/Target/ARM/ARMMCAsmInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMMCAsmInfo.cpp
-
-libllvmarmcodegen_la-ARMRegisterInfo.lo: llvm/lib/Target/ARM/ARMRegisterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMRegisterInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMRegisterInfo.Tpo -c -o libllvmarmcodegen_la-ARMRegisterInfo.lo `test -f 'llvm/lib/Target/ARM/ARMRegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMRegisterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMRegisterInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMRegisterInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMRegisterInfo.cpp' object='libllvmarmcodegen_la-ARMRegisterInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMRegisterInfo.lo `test -f 'llvm/lib/Target/ARM/ARMRegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMRegisterInfo.cpp
-
-libllvmarmcodegen_la-ARMSubtarget.lo: llvm/lib/Target/ARM/ARMSubtarget.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMSubtarget.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMSubtarget.Tpo -c -o libllvmarmcodegen_la-ARMSubtarget.lo `test -f 'llvm/lib/Target/ARM/ARMSubtarget.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMSubtarget.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMSubtarget.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMSubtarget.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMSubtarget.cpp' object='libllvmarmcodegen_la-ARMSubtarget.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMSubtarget.lo `test -f 'llvm/lib/Target/ARM/ARMSubtarget.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMSubtarget.cpp
-
-libllvmarmcodegen_la-ARMTargetMachine.lo: llvm/lib/Target/ARM/ARMTargetMachine.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMTargetMachine.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMTargetMachine.Tpo -c -o libllvmarmcodegen_la-ARMTargetMachine.lo `test -f 'llvm/lib/Target/ARM/ARMTargetMachine.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMTargetMachine.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMTargetMachine.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMTargetMachine.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/ARMTargetMachine.cpp' object='libllvmarmcodegen_la-ARMTargetMachine.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMTargetMachine.lo `test -f 'llvm/lib/Target/ARM/ARMTargetMachine.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/ARMTargetMachine.cpp
-
-libllvmarmcodegen_la-NEONMoveFix.lo: llvm/lib/Target/ARM/NEONMoveFix.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-NEONMoveFix.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-NEONMoveFix.Tpo -c -o libllvmarmcodegen_la-NEONMoveFix.lo `test -f 'llvm/lib/Target/ARM/NEONMoveFix.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/NEONMoveFix.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-NEONMoveFix.Tpo $(DEPDIR)/libllvmarmcodegen_la-NEONMoveFix.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/NEONMoveFix.cpp' object='libllvmarmcodegen_la-NEONMoveFix.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-NEONMoveFix.lo `test -f 'llvm/lib/Target/ARM/NEONMoveFix.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/NEONMoveFix.cpp
-
-libllvmarmcodegen_la-NEONPreAllocPass.lo: llvm/lib/Target/ARM/NEONPreAllocPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-NEONPreAllocPass.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-NEONPreAllocPass.Tpo -c -o libllvmarmcodegen_la-NEONPreAllocPass.lo `test -f 'llvm/lib/Target/ARM/NEONPreAllocPass.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/NEONPreAllocPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-NEONPreAllocPass.Tpo $(DEPDIR)/libllvmarmcodegen_la-NEONPreAllocPass.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/NEONPreAllocPass.cpp' object='libllvmarmcodegen_la-NEONPreAllocPass.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-NEONPreAllocPass.lo `test -f 'llvm/lib/Target/ARM/NEONPreAllocPass.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/NEONPreAllocPass.cpp
-
-libllvmarmcodegen_la-ARMTargetInfo.lo: llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-ARMTargetInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-ARMTargetInfo.Tpo -c -o libllvmarmcodegen_la-ARMTargetInfo.lo `test -f 'llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-ARMTargetInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-ARMTargetInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp' object='libllvmarmcodegen_la-ARMTargetInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-ARMTargetInfo.lo `test -f 'llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
-
-libllvmarmcodegen_la-Thumb1InstrInfo.lo: llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-Thumb1InstrInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-Thumb1InstrInfo.Tpo -c -o libllvmarmcodegen_la-Thumb1InstrInfo.lo `test -f 'llvm/lib/Target/ARM/Thumb1InstrInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-Thumb1InstrInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-Thumb1InstrInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/Thumb1InstrInfo.cpp' object='libllvmarmcodegen_la-Thumb1InstrInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-Thumb1InstrInfo.lo `test -f 'llvm/lib/Target/ARM/Thumb1InstrInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
-
-libllvmarmcodegen_la-Thumb1RegisterInfo.lo: llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-Thumb1RegisterInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-Thumb1RegisterInfo.Tpo -c -o libllvmarmcodegen_la-Thumb1RegisterInfo.lo `test -f 'llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-Thumb1RegisterInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-Thumb1RegisterInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp' object='libllvmarmcodegen_la-Thumb1RegisterInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-Thumb1RegisterInfo.lo `test -f 'llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
-
-libllvmarmcodegen_la-Thumb2ITBlockPass.lo: llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-Thumb2ITBlockPass.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-Thumb2ITBlockPass.Tpo -c -o libllvmarmcodegen_la-Thumb2ITBlockPass.lo `test -f 'llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-Thumb2ITBlockPass.Tpo $(DEPDIR)/libllvmarmcodegen_la-Thumb2ITBlockPass.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp' object='libllvmarmcodegen_la-Thumb2ITBlockPass.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-Thumb2ITBlockPass.lo `test -f 'llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
-
-libllvmarmcodegen_la-Thumb2InstrInfo.lo: llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-Thumb2InstrInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-Thumb2InstrInfo.Tpo -c -o libllvmarmcodegen_la-Thumb2InstrInfo.lo `test -f 'llvm/lib/Target/ARM/Thumb2InstrInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-Thumb2InstrInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-Thumb2InstrInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/Thumb2InstrInfo.cpp' object='libllvmarmcodegen_la-Thumb2InstrInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-Thumb2InstrInfo.lo `test -f 'llvm/lib/Target/ARM/Thumb2InstrInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
-
-libllvmarmcodegen_la-Thumb2RegisterInfo.lo: llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-Thumb2RegisterInfo.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-Thumb2RegisterInfo.Tpo -c -o libllvmarmcodegen_la-Thumb2RegisterInfo.lo `test -f 'llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-Thumb2RegisterInfo.Tpo $(DEPDIR)/libllvmarmcodegen_la-Thumb2RegisterInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp' object='libllvmarmcodegen_la-Thumb2RegisterInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-Thumb2RegisterInfo.lo `test -f 'llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
-
-libllvmarmcodegen_la-Thumb2SizeReduction.lo: llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmarmcodegen_la-Thumb2SizeReduction.lo -MD -MP -MF $(DEPDIR)/libllvmarmcodegen_la-Thumb2SizeReduction.Tpo -c -o libllvmarmcodegen_la-Thumb2SizeReduction.lo `test -f 'llvm/lib/Target/ARM/Thumb2SizeReduction.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmarmcodegen_la-Thumb2SizeReduction.Tpo $(DEPDIR)/libllvmarmcodegen_la-Thumb2SizeReduction.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/Thumb2SizeReduction.cpp' object='libllvmarmcodegen_la-Thumb2SizeReduction.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmarmcodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmarmcodegen_la-Thumb2SizeReduction.lo `test -f 'llvm/lib/Target/ARM/Thumb2SizeReduction.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
-
-LLLexer.lo: llvm/lib/AsmParser/LLLexer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT LLLexer.lo -MD -MP -MF $(DEPDIR)/LLLexer.Tpo -c -o LLLexer.lo `test -f 'llvm/lib/AsmParser/LLLexer.cpp' || echo '$(srcdir)/'`llvm/lib/AsmParser/LLLexer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/LLLexer.Tpo $(DEPDIR)/LLLexer.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/AsmParser/LLLexer.cpp' object='LLLexer.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o LLLexer.lo `test -f 'llvm/lib/AsmParser/LLLexer.cpp' || echo '$(srcdir)/'`llvm/lib/AsmParser/LLLexer.cpp
-
-LLParser.lo: llvm/lib/AsmParser/LLParser.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT LLParser.lo -MD -MP -MF $(DEPDIR)/LLParser.Tpo -c -o LLParser.lo `test -f 'llvm/lib/AsmParser/LLParser.cpp' || echo '$(srcdir)/'`llvm/lib/AsmParser/LLParser.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/LLParser.Tpo $(DEPDIR)/LLParser.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/AsmParser/LLParser.cpp' object='LLParser.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o LLParser.lo `test -f 'llvm/lib/AsmParser/LLParser.cpp' || echo '$(srcdir)/'`llvm/lib/AsmParser/LLParser.cpp
-
-Parser.lo: llvm/lib/AsmParser/Parser.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Parser.lo -MD -MP -MF $(DEPDIR)/Parser.Tpo -c -o Parser.lo `test -f 'llvm/lib/AsmParser/Parser.cpp' || echo '$(srcdir)/'`llvm/lib/AsmParser/Parser.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Parser.Tpo $(DEPDIR)/Parser.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/AsmParser/Parser.cpp' object='Parser.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Parser.lo `test -f 'llvm/lib/AsmParser/Parser.cpp' || echo '$(srcdir)/'`llvm/lib/AsmParser/Parser.cpp
-
-libllvmasmprinter_la-OcamlGCPrinter.lo: llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-OcamlGCPrinter.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-OcamlGCPrinter.Tpo -c -o libllvmasmprinter_la-OcamlGCPrinter.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-OcamlGCPrinter.Tpo $(DEPDIR)/libllvmasmprinter_la-OcamlGCPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp' object='libllvmasmprinter_la-OcamlGCPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-OcamlGCPrinter.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
-
-libllvmasmprinter_la-ELFCodeEmitter.lo: llvm/lib/CodeGen/ELFCodeEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-ELFCodeEmitter.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-ELFCodeEmitter.Tpo -c -o libllvmasmprinter_la-ELFCodeEmitter.lo `test -f 'llvm/lib/CodeGen/ELFCodeEmitter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ELFCodeEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-ELFCodeEmitter.Tpo $(DEPDIR)/libllvmasmprinter_la-ELFCodeEmitter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/ELFCodeEmitter.cpp' object='libllvmasmprinter_la-ELFCodeEmitter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-ELFCodeEmitter.lo `test -f 'llvm/lib/CodeGen/ELFCodeEmitter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ELFCodeEmitter.cpp
-
-libllvmasmprinter_la-ELFWriter.lo: llvm/lib/CodeGen/ELFWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-ELFWriter.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-ELFWriter.Tpo -c -o libllvmasmprinter_la-ELFWriter.lo `test -f 'llvm/lib/CodeGen/ELFWriter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ELFWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-ELFWriter.Tpo $(DEPDIR)/libllvmasmprinter_la-ELFWriter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/ELFWriter.cpp' object='libllvmasmprinter_la-ELFWriter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-ELFWriter.lo `test -f 'llvm/lib/CodeGen/ELFWriter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ELFWriter.cpp
-
-libllvmasmprinter_la-X86AsmPrinter.lo: llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-X86AsmPrinter.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-X86AsmPrinter.Tpo -c -o libllvmasmprinter_la-X86AsmPrinter.lo `test -f 'llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-X86AsmPrinter.Tpo $(DEPDIR)/libllvmasmprinter_la-X86AsmPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp' object='libllvmasmprinter_la-X86AsmPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-X86AsmPrinter.lo `test -f 'llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
-
-libllvmasmprinter_la-X86ATTInstPrinter.lo: llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-X86ATTInstPrinter.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-X86ATTInstPrinter.Tpo -c -o libllvmasmprinter_la-X86ATTInstPrinter.lo `test -f 'llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-X86ATTInstPrinter.Tpo $(DEPDIR)/libllvmasmprinter_la-X86ATTInstPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp' object='libllvmasmprinter_la-X86ATTInstPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-X86ATTInstPrinter.lo `test -f 'llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
-
-libllvmasmprinter_la-X86IntelInstPrinter.lo: llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-X86IntelInstPrinter.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-X86IntelInstPrinter.Tpo -c -o libllvmasmprinter_la-X86IntelInstPrinter.lo `test -f 'llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-X86IntelInstPrinter.Tpo $(DEPDIR)/libllvmasmprinter_la-X86IntelInstPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp' object='libllvmasmprinter_la-X86IntelInstPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-X86IntelInstPrinter.lo `test -f 'llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
-
-libllvmasmprinter_la-X86MCInstLower.lo: llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-X86MCInstLower.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-X86MCInstLower.Tpo -c -o libllvmasmprinter_la-X86MCInstLower.lo `test -f 'llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-X86MCInstLower.Tpo $(DEPDIR)/libllvmasmprinter_la-X86MCInstLower.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp' object='libllvmasmprinter_la-X86MCInstLower.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-X86MCInstLower.lo `test -f 'llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
-
-libllvmasmprinter_la-X86COFFMachineModuleInfo.lo: llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-X86COFFMachineModuleInfo.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-X86COFFMachineModuleInfo.Tpo -c -o libllvmasmprinter_la-X86COFFMachineModuleInfo.lo `test -f 'llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-X86COFFMachineModuleInfo.Tpo $(DEPDIR)/libllvmasmprinter_la-X86COFFMachineModuleInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp' object='libllvmasmprinter_la-X86COFFMachineModuleInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-X86COFFMachineModuleInfo.lo `test -f 'llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
-
-libllvmasmprinter_la-PPCAsmPrinter.lo: llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-PPCAsmPrinter.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-PPCAsmPrinter.Tpo -c -o libllvmasmprinter_la-PPCAsmPrinter.lo `test -f 'llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-PPCAsmPrinter.Tpo $(DEPDIR)/libllvmasmprinter_la-PPCAsmPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp' object='libllvmasmprinter_la-PPCAsmPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-PPCAsmPrinter.lo `test -f 'llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp
-
-libllvmasmprinter_la-ARMAsmPrinter.lo: llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-ARMAsmPrinter.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-ARMAsmPrinter.Tpo -c -o libllvmasmprinter_la-ARMAsmPrinter.lo `test -f 'llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-ARMAsmPrinter.Tpo $(DEPDIR)/libllvmasmprinter_la-ARMAsmPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp' object='libllvmasmprinter_la-ARMAsmPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-ARMAsmPrinter.lo `test -f 'llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
-
-libllvmasmprinter_la-ARMInstPrinter.lo: llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-ARMInstPrinter.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-ARMInstPrinter.Tpo -c -o libllvmasmprinter_la-ARMInstPrinter.lo `test -f 'llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-ARMInstPrinter.Tpo $(DEPDIR)/libllvmasmprinter_la-ARMInstPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp' object='libllvmasmprinter_la-ARMInstPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-ARMInstPrinter.lo `test -f 'llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
-
-libllvmasmprinter_la-ARMMCInstLower.lo: llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmasmprinter_la-ARMMCInstLower.lo -MD -MP -MF $(DEPDIR)/libllvmasmprinter_la-ARMMCInstLower.Tpo -c -o libllvmasmprinter_la-ARMMCInstLower.lo `test -f 'llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmasmprinter_la-ARMMCInstLower.Tpo $(DEPDIR)/libllvmasmprinter_la-ARMMCInstLower.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp' object='libllvmasmprinter_la-ARMMCInstLower.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmasmprinter_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmasmprinter_la-ARMMCInstLower.lo `test -f 'llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp' || echo '$(srcdir)/'`llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp
-
-BitReader.lo: llvm/lib/Bitcode/Reader/BitReader.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT BitReader.lo -MD -MP -MF $(DEPDIR)/BitReader.Tpo -c -o BitReader.lo `test -f 'llvm/lib/Bitcode/Reader/BitReader.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Reader/BitReader.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/BitReader.Tpo $(DEPDIR)/BitReader.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Bitcode/Reader/BitReader.cpp' object='BitReader.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o BitReader.lo `test -f 'llvm/lib/Bitcode/Reader/BitReader.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Reader/BitReader.cpp
-
-BitcodeReader.lo: llvm/lib/Bitcode/Reader/BitcodeReader.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT BitcodeReader.lo -MD -MP -MF $(DEPDIR)/BitcodeReader.Tpo -c -o BitcodeReader.lo `test -f 'llvm/lib/Bitcode/Reader/BitcodeReader.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Reader/BitcodeReader.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/BitcodeReader.Tpo $(DEPDIR)/BitcodeReader.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Bitcode/Reader/BitcodeReader.cpp' object='BitcodeReader.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o BitcodeReader.lo `test -f 'llvm/lib/Bitcode/Reader/BitcodeReader.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Reader/BitcodeReader.cpp
-
-BitWriter.lo: llvm/lib/Bitcode/Writer/BitWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT BitWriter.lo -MD -MP -MF $(DEPDIR)/BitWriter.Tpo -c -o BitWriter.lo `test -f 'llvm/lib/Bitcode/Writer/BitWriter.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Writer/BitWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/BitWriter.Tpo $(DEPDIR)/BitWriter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Bitcode/Writer/BitWriter.cpp' object='BitWriter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o BitWriter.lo `test -f 'llvm/lib/Bitcode/Writer/BitWriter.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Writer/BitWriter.cpp
-
-BitcodeWriter.lo: llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT BitcodeWriter.lo -MD -MP -MF $(DEPDIR)/BitcodeWriter.Tpo -c -o BitcodeWriter.lo `test -f 'llvm/lib/Bitcode/Writer/BitcodeWriter.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/BitcodeWriter.Tpo $(DEPDIR)/BitcodeWriter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Bitcode/Writer/BitcodeWriter.cpp' object='BitcodeWriter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o BitcodeWriter.lo `test -f 'llvm/lib/Bitcode/Writer/BitcodeWriter.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
-
-BitcodeWriterPass.lo: llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT BitcodeWriterPass.lo -MD -MP -MF $(DEPDIR)/BitcodeWriterPass.Tpo -c -o BitcodeWriterPass.lo `test -f 'llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/BitcodeWriterPass.Tpo $(DEPDIR)/BitcodeWriterPass.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp' object='BitcodeWriterPass.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o BitcodeWriterPass.lo `test -f 'llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
-
-ValueEnumerator.lo: llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ValueEnumerator.lo -MD -MP -MF $(DEPDIR)/ValueEnumerator.Tpo -c -o ValueEnumerator.lo `test -f 'llvm/lib/Bitcode/Writer/ValueEnumerator.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ValueEnumerator.Tpo $(DEPDIR)/ValueEnumerator.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Bitcode/Writer/ValueEnumerator.cpp' object='ValueEnumerator.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ValueEnumerator.lo `test -f 'llvm/lib/Bitcode/Writer/ValueEnumerator.cpp' || echo '$(srcdir)/'`llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
-
-AliasSetTracker.lo: llvm/lib/Analysis/AliasSetTracker.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT AliasSetTracker.lo -MD -MP -MF $(DEPDIR)/AliasSetTracker.Tpo -c -o AliasSetTracker.lo `test -f 'llvm/lib/Analysis/AliasSetTracker.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/AliasSetTracker.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/AliasSetTracker.Tpo $(DEPDIR)/AliasSetTracker.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/AliasSetTracker.cpp' object='AliasSetTracker.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o AliasSetTracker.lo `test -f 'llvm/lib/Analysis/AliasSetTracker.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/AliasSetTracker.cpp
-
ConstantFolding.lo: llvm/lib/Analysis/ConstantFolding.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ConstantFolding.lo -MD -MP -MF $(DEPDIR)/ConstantFolding.Tpo -c -o ConstantFolding.lo `test -f 'llvm/lib/Analysis/ConstantFolding.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ConstantFolding.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ConstantFolding.Tpo $(DEPDIR)/ConstantFolding.Plo
@@ -3487,6 +2092,14 @@ InstructionSimplify.lo: llvm/lib/Analysis/InstructionSimplify.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o InstructionSimplify.lo `test -f 'llvm/lib/Analysis/InstructionSimplify.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/InstructionSimplify.cpp
+Loads.lo: llvm/lib/Analysis/Loads.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Loads.lo -MD -MP -MF $(DEPDIR)/Loads.Tpo -c -o Loads.lo `test -f 'llvm/lib/Analysis/Loads.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/Loads.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Loads.Tpo $(DEPDIR)/Loads.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/Loads.cpp' object='Loads.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Loads.lo `test -f 'llvm/lib/Analysis/Loads.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/Loads.cpp
+
LoopInfo.lo: llvm/lib/Analysis/LoopInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT LoopInfo.lo -MD -MP -MF $(DEPDIR)/LoopInfo.Tpo -c -o LoopInfo.lo `test -f 'llvm/lib/Analysis/LoopInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/LoopInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/LoopInfo.Tpo $(DEPDIR)/LoopInfo.Plo
@@ -3519,14 +2132,6 @@ PHITransAddr.lo: llvm/lib/Analysis/PHITransAddr.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o PHITransAddr.lo `test -f 'llvm/lib/Analysis/PHITransAddr.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/PHITransAddr.cpp
-ProfileInfo.lo: llvm/lib/Analysis/ProfileInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ProfileInfo.lo -MD -MP -MF $(DEPDIR)/ProfileInfo.Tpo -c -o ProfileInfo.lo `test -f 'llvm/lib/Analysis/ProfileInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ProfileInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ProfileInfo.Tpo $(DEPDIR)/ProfileInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/ProfileInfo.cpp' object='ProfileInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ProfileInfo.lo `test -f 'llvm/lib/Analysis/ProfileInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ProfileInfo.cpp
-
ScalarEvolution.lo: llvm/lib/Analysis/ScalarEvolution.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ScalarEvolution.lo -MD -MP -MF $(DEPDIR)/ScalarEvolution.Tpo -c -o ScalarEvolution.lo `test -f 'llvm/lib/Analysis/ScalarEvolution.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ScalarEvolution.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ScalarEvolution.Tpo $(DEPDIR)/ScalarEvolution.Plo
@@ -3543,6 +2148,14 @@ ScalarEvolutionExpander.lo: llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ScalarEvolutionExpander.lo `test -f 'llvm/lib/Analysis/ScalarEvolutionExpander.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+ScalarEvolutionNormalization.lo: llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ScalarEvolutionNormalization.lo -MD -MP -MF $(DEPDIR)/ScalarEvolutionNormalization.Tpo -c -o ScalarEvolutionNormalization.lo `test -f 'llvm/lib/Analysis/ScalarEvolutionNormalization.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ScalarEvolutionNormalization.Tpo $(DEPDIR)/ScalarEvolutionNormalization.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/ScalarEvolutionNormalization.cpp' object='ScalarEvolutionNormalization.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ScalarEvolutionNormalization.lo `test -f 'llvm/lib/Analysis/ScalarEvolutionNormalization.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+
AggressiveAntiDepBreaker.lo: llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT AggressiveAntiDepBreaker.lo -MD -MP -MF $(DEPDIR)/AggressiveAntiDepBreaker.Tpo -c -o AggressiveAntiDepBreaker.lo `test -f 'llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/AggressiveAntiDepBreaker.Tpo $(DEPDIR)/AggressiveAntiDepBreaker.Plo
@@ -3551,61 +2164,13 @@ AggressiveAntiDepBreaker.lo: llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o AggressiveAntiDepBreaker.lo `test -f 'llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
-AsmPrinter.lo: llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT AsmPrinter.lo -MD -MP -MF $(DEPDIR)/AsmPrinter.Tpo -c -o AsmPrinter.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/AsmPrinter.Tpo $(DEPDIR)/AsmPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp' object='AsmPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o AsmPrinter.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
-
-DIE.lo: llvm/lib/CodeGen/AsmPrinter/DIE.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DIE.lo -MD -MP -MF $(DEPDIR)/DIE.Tpo -c -o DIE.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DIE.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DIE.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DIE.Tpo $(DEPDIR)/DIE.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/AsmPrinter/DIE.cpp' object='DIE.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DIE.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DIE.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DIE.cpp
-
-DwarfDebug.lo: llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DwarfDebug.lo -MD -MP -MF $(DEPDIR)/DwarfDebug.Tpo -c -o DwarfDebug.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DwarfDebug.Tpo $(DEPDIR)/DwarfDebug.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp' object='DwarfDebug.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DwarfDebug.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
-
-DwarfException.lo: llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DwarfException.lo -MD -MP -MF $(DEPDIR)/DwarfException.Tpo -c -o DwarfException.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DwarfException.Tpo $(DEPDIR)/DwarfException.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp' object='DwarfException.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DwarfException.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
-
-DwarfLabel.lo: llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DwarfLabel.lo -MD -MP -MF $(DEPDIR)/DwarfLabel.Tpo -c -o DwarfLabel.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DwarfLabel.Tpo $(DEPDIR)/DwarfLabel.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp' object='DwarfLabel.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DwarfLabel.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp
-
-DwarfPrinter.lo: llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DwarfPrinter.lo -MD -MP -MF $(DEPDIR)/DwarfPrinter.Tpo -c -o DwarfPrinter.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DwarfPrinter.Tpo $(DEPDIR)/DwarfPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp' object='DwarfPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DwarfPrinter.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp
-
-DwarfWriter.lo: llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DwarfWriter.lo -MD -MP -MF $(DEPDIR)/DwarfWriter.Tpo -c -o DwarfWriter.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DwarfWriter.Tpo $(DEPDIR)/DwarfWriter.Plo
+Analysis.lo: llvm/lib/CodeGen/Analysis.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Analysis.lo -MD -MP -MF $(DEPDIR)/Analysis.Tpo -c -o Analysis.lo `test -f 'llvm/lib/CodeGen/Analysis.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/Analysis.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Analysis.Tpo $(DEPDIR)/Analysis.Plo
@am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp' object='DwarfWriter.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/Analysis.cpp' object='Analysis.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DwarfWriter.lo `test -f 'llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Analysis.lo `test -f 'llvm/lib/CodeGen/Analysis.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/Analysis.cpp
BranchFolding.lo: llvm/lib/CodeGen/BranchFolding.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT BranchFolding.lo -MD -MP -MF $(DEPDIR)/BranchFolding.Tpo -c -o BranchFolding.lo `test -f 'llvm/lib/CodeGen/BranchFolding.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/BranchFolding.cpp
@@ -3623,6 +2188,14 @@ CalcSpillWeights.lo: llvm/lib/CodeGen/CalcSpillWeights.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o CalcSpillWeights.lo `test -f 'llvm/lib/CodeGen/CalcSpillWeights.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/CalcSpillWeights.cpp
+CallingConvLower.lo: llvm/lib/CodeGen/CallingConvLower.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT CallingConvLower.lo -MD -MP -MF $(DEPDIR)/CallingConvLower.Tpo -c -o CallingConvLower.lo `test -f 'llvm/lib/CodeGen/CallingConvLower.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/CallingConvLower.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/CallingConvLower.Tpo $(DEPDIR)/CallingConvLower.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/CallingConvLower.cpp' object='CallingConvLower.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o CallingConvLower.lo `test -f 'llvm/lib/CodeGen/CallingConvLower.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/CallingConvLower.cpp
+
CodePlacementOpt.lo: llvm/lib/CodeGen/CodePlacementOpt.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT CodePlacementOpt.lo -MD -MP -MF $(DEPDIR)/CodePlacementOpt.Tpo -c -o CodePlacementOpt.lo `test -f 'llvm/lib/CodeGen/CodePlacementOpt.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/CodePlacementOpt.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/CodePlacementOpt.Tpo $(DEPDIR)/CodePlacementOpt.Plo
@@ -3655,14 +2228,6 @@ DwarfEHPrepare.lo: llvm/lib/CodeGen/DwarfEHPrepare.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DwarfEHPrepare.lo `test -f 'llvm/lib/CodeGen/DwarfEHPrepare.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/DwarfEHPrepare.cpp
-ExactHazardRecognizer.lo: llvm/lib/CodeGen/ExactHazardRecognizer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ExactHazardRecognizer.lo -MD -MP -MF $(DEPDIR)/ExactHazardRecognizer.Tpo -c -o ExactHazardRecognizer.lo `test -f 'llvm/lib/CodeGen/ExactHazardRecognizer.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ExactHazardRecognizer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ExactHazardRecognizer.Tpo $(DEPDIR)/ExactHazardRecognizer.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/ExactHazardRecognizer.cpp' object='ExactHazardRecognizer.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ExactHazardRecognizer.lo `test -f 'llvm/lib/CodeGen/ExactHazardRecognizer.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ExactHazardRecognizer.cpp
-
GCMetadata.lo: llvm/lib/CodeGen/GCMetadata.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT GCMetadata.lo -MD -MP -MF $(DEPDIR)/GCMetadata.Tpo -c -o GCMetadata.lo `test -f 'llvm/lib/CodeGen/GCMetadata.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/GCMetadata.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/GCMetadata.Tpo $(DEPDIR)/GCMetadata.Plo
@@ -3679,6 +2244,14 @@ GCStrategy.lo: llvm/lib/CodeGen/GCStrategy.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o GCStrategy.lo `test -f 'llvm/lib/CodeGen/GCStrategy.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/GCStrategy.cpp
+InlineSpiller.lo: llvm/lib/CodeGen/InlineSpiller.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT InlineSpiller.lo -MD -MP -MF $(DEPDIR)/InlineSpiller.Tpo -c -o InlineSpiller.lo `test -f 'llvm/lib/CodeGen/InlineSpiller.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/InlineSpiller.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/InlineSpiller.Tpo $(DEPDIR)/InlineSpiller.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/InlineSpiller.cpp' object='InlineSpiller.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o InlineSpiller.lo `test -f 'llvm/lib/CodeGen/InlineSpiller.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/InlineSpiller.cpp
+
LLVMTargetMachine.lo: llvm/lib/CodeGen/LLVMTargetMachine.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT LLVMTargetMachine.lo -MD -MP -MF $(DEPDIR)/LLVMTargetMachine.Tpo -c -o LLVMTargetMachine.lo `test -f 'llvm/lib/CodeGen/LLVMTargetMachine.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/LLVMTargetMachine.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/LLVMTargetMachine.Tpo $(DEPDIR)/LLVMTargetMachine.Plo
@@ -3719,13 +2292,13 @@ LiveStackAnalysis.lo: llvm/lib/CodeGen/LiveStackAnalysis.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o LiveStackAnalysis.lo `test -f 'llvm/lib/CodeGen/LiveStackAnalysis.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/LiveStackAnalysis.cpp
-LiveVariables.lo: llvm/lib/CodeGen/LiveVariables.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT LiveVariables.lo -MD -MP -MF $(DEPDIR)/LiveVariables.Tpo -c -o LiveVariables.lo `test -f 'llvm/lib/CodeGen/LiveVariables.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/LiveVariables.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/LiveVariables.Tpo $(DEPDIR)/LiveVariables.Plo
+LocalStackSlotAllocation.lo: llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT LocalStackSlotAllocation.lo -MD -MP -MF $(DEPDIR)/LocalStackSlotAllocation.Tpo -c -o LocalStackSlotAllocation.lo `test -f 'llvm/lib/CodeGen/LocalStackSlotAllocation.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/LocalStackSlotAllocation.Tpo $(DEPDIR)/LocalStackSlotAllocation.Plo
@am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/LiveVariables.cpp' object='LiveVariables.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/LocalStackSlotAllocation.cpp' object='LocalStackSlotAllocation.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o LiveVariables.lo `test -f 'llvm/lib/CodeGen/LiveVariables.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/LiveVariables.cpp
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o LocalStackSlotAllocation.lo `test -f 'llvm/lib/CodeGen/LocalStackSlotAllocation.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
LowerSubregs.lo: llvm/lib/CodeGen/LowerSubregs.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT LowerSubregs.lo -MD -MP -MF $(DEPDIR)/LowerSubregs.Tpo -c -o LowerSubregs.lo `test -f 'llvm/lib/CodeGen/LowerSubregs.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/LowerSubregs.cpp
@@ -3743,14 +2316,6 @@ MachineCSE.lo: llvm/lib/CodeGen/MachineCSE.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineCSE.lo `test -f 'llvm/lib/CodeGen/MachineCSE.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineCSE.cpp
-MachineDominators.lo: llvm/lib/CodeGen/MachineDominators.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineDominators.lo -MD -MP -MF $(DEPDIR)/MachineDominators.Tpo -c -o MachineDominators.lo `test -f 'llvm/lib/CodeGen/MachineDominators.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineDominators.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineDominators.Tpo $(DEPDIR)/MachineDominators.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/MachineDominators.cpp' object='MachineDominators.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineDominators.lo `test -f 'llvm/lib/CodeGen/MachineDominators.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineDominators.cpp
-
MachineLICM.lo: llvm/lib/CodeGen/MachineLICM.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineLICM.lo -MD -MP -MF $(DEPDIR)/MachineLICM.Tpo -c -o MachineLICM.lo `test -f 'llvm/lib/CodeGen/MachineLICM.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineLICM.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineLICM.Tpo $(DEPDIR)/MachineLICM.Plo
@@ -3759,14 +2324,6 @@ MachineLICM.lo: llvm/lib/CodeGen/MachineLICM.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineLICM.lo `test -f 'llvm/lib/CodeGen/MachineLICM.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineLICM.cpp
-MachineLoopInfo.lo: llvm/lib/CodeGen/MachineLoopInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineLoopInfo.lo -MD -MP -MF $(DEPDIR)/MachineLoopInfo.Tpo -c -o MachineLoopInfo.lo `test -f 'llvm/lib/CodeGen/MachineLoopInfo.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineLoopInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineLoopInfo.Tpo $(DEPDIR)/MachineLoopInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/MachineLoopInfo.cpp' object='MachineLoopInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineLoopInfo.lo `test -f 'llvm/lib/CodeGen/MachineLoopInfo.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineLoopInfo.cpp
-
MachineModuleInfoImpls.lo: llvm/lib/CodeGen/MachineModuleInfoImpls.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineModuleInfoImpls.lo -MD -MP -MF $(DEPDIR)/MachineModuleInfoImpls.Tpo -c -o MachineModuleInfoImpls.lo `test -f 'llvm/lib/CodeGen/MachineModuleInfoImpls.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineModuleInfoImpls.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineModuleInfoImpls.Tpo $(DEPDIR)/MachineModuleInfoImpls.Plo
@@ -3807,14 +2364,6 @@ MachineVerifier.lo: llvm/lib/CodeGen/MachineVerifier.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineVerifier.lo `test -f 'llvm/lib/CodeGen/MachineVerifier.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineVerifier.cpp
-OptimizeExts.lo: llvm/lib/CodeGen/OptimizeExts.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT OptimizeExts.lo -MD -MP -MF $(DEPDIR)/OptimizeExts.Tpo -c -o OptimizeExts.lo `test -f 'llvm/lib/CodeGen/OptimizeExts.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/OptimizeExts.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/OptimizeExts.Tpo $(DEPDIR)/OptimizeExts.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/OptimizeExts.cpp' object='OptimizeExts.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o OptimizeExts.lo `test -f 'llvm/lib/CodeGen/OptimizeExts.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/OptimizeExts.cpp
-
OptimizePHIs.lo: llvm/lib/CodeGen/OptimizePHIs.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT OptimizePHIs.lo -MD -MP -MF $(DEPDIR)/OptimizePHIs.Tpo -c -o OptimizePHIs.lo `test -f 'llvm/lib/CodeGen/OptimizePHIs.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/OptimizePHIs.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/OptimizePHIs.Tpo $(DEPDIR)/OptimizePHIs.Plo
@@ -3839,6 +2388,14 @@ Passes.lo: llvm/lib/CodeGen/Passes.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Passes.lo `test -f 'llvm/lib/CodeGen/Passes.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/Passes.cpp
+PeepholeOptimizer.lo: llvm/lib/CodeGen/PeepholeOptimizer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT PeepholeOptimizer.lo -MD -MP -MF $(DEPDIR)/PeepholeOptimizer.Tpo -c -o PeepholeOptimizer.lo `test -f 'llvm/lib/CodeGen/PeepholeOptimizer.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/PeepholeOptimizer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/PeepholeOptimizer.Tpo $(DEPDIR)/PeepholeOptimizer.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/PeepholeOptimizer.cpp' object='PeepholeOptimizer.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o PeepholeOptimizer.lo `test -f 'llvm/lib/CodeGen/PeepholeOptimizer.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/PeepholeOptimizer.cpp
+
PostRASchedulerList.lo: llvm/lib/CodeGen/PostRASchedulerList.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT PostRASchedulerList.lo -MD -MP -MF $(DEPDIR)/PostRASchedulerList.Tpo -c -o PostRASchedulerList.lo `test -f 'llvm/lib/CodeGen/PostRASchedulerList.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/PostRASchedulerList.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/PostRASchedulerList.Tpo $(DEPDIR)/PostRASchedulerList.Plo
@@ -3871,6 +2428,14 @@ PrologEpilogInserter.lo: llvm/lib/CodeGen/PrologEpilogInserter.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o PrologEpilogInserter.lo `test -f 'llvm/lib/CodeGen/PrologEpilogInserter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/PrologEpilogInserter.cpp
+RegAllocFast.lo: llvm/lib/CodeGen/RegAllocFast.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT RegAllocFast.lo -MD -MP -MF $(DEPDIR)/RegAllocFast.Tpo -c -o RegAllocFast.lo `test -f 'llvm/lib/CodeGen/RegAllocFast.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/RegAllocFast.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/RegAllocFast.Tpo $(DEPDIR)/RegAllocFast.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/RegAllocFast.cpp' object='RegAllocFast.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o RegAllocFast.lo `test -f 'llvm/lib/CodeGen/RegAllocFast.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/RegAllocFast.cpp
+
RegAllocLinearScan.lo: llvm/lib/CodeGen/RegAllocLinearScan.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT RegAllocLinearScan.lo -MD -MP -MF $(DEPDIR)/RegAllocLinearScan.Tpo -c -o RegAllocLinearScan.lo `test -f 'llvm/lib/CodeGen/RegAllocLinearScan.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/RegAllocLinearScan.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/RegAllocLinearScan.Tpo $(DEPDIR)/RegAllocLinearScan.Plo
@@ -3895,14 +2460,6 @@ RegisterScavenging.lo: llvm/lib/CodeGen/RegisterScavenging.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o RegisterScavenging.lo `test -f 'llvm/lib/CodeGen/RegisterScavenging.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/RegisterScavenging.cpp
-ScheduleDAG.lo: llvm/lib/CodeGen/ScheduleDAG.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ScheduleDAG.lo -MD -MP -MF $(DEPDIR)/ScheduleDAG.Tpo -c -o ScheduleDAG.lo `test -f 'llvm/lib/CodeGen/ScheduleDAG.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAG.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ScheduleDAG.Tpo $(DEPDIR)/ScheduleDAG.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/ScheduleDAG.cpp' object='ScheduleDAG.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ScheduleDAG.lo `test -f 'llvm/lib/CodeGen/ScheduleDAG.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAG.cpp
-
ScheduleDAGEmit.lo: llvm/lib/CodeGen/ScheduleDAGEmit.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ScheduleDAGEmit.lo -MD -MP -MF $(DEPDIR)/ScheduleDAGEmit.Tpo -c -o ScheduleDAGEmit.lo `test -f 'llvm/lib/CodeGen/ScheduleDAGEmit.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAGEmit.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ScheduleDAGEmit.Tpo $(DEPDIR)/ScheduleDAGEmit.Plo
@@ -3919,22 +2476,6 @@ ScheduleDAGInstrs.lo: llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ScheduleDAGInstrs.lo `test -f 'llvm/lib/CodeGen/ScheduleDAGInstrs.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
-ScheduleDAGPrinter.lo: llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ScheduleDAGPrinter.lo -MD -MP -MF $(DEPDIR)/ScheduleDAGPrinter.Tpo -c -o ScheduleDAGPrinter.lo `test -f 'llvm/lib/CodeGen/ScheduleDAGPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ScheduleDAGPrinter.Tpo $(DEPDIR)/ScheduleDAGPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/ScheduleDAGPrinter.cpp' object='ScheduleDAGPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ScheduleDAGPrinter.lo `test -f 'llvm/lib/CodeGen/ScheduleDAGPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
-
-CallingConvLower.lo: llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT CallingConvLower.lo -MD -MP -MF $(DEPDIR)/CallingConvLower.Tpo -c -o CallingConvLower.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/CallingConvLower.Tpo $(DEPDIR)/CallingConvLower.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp' object='CallingConvLower.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o CallingConvLower.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp
-
DAGCombiner.lo: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DAGCombiner.lo -MD -MP -MF $(DEPDIR)/DAGCombiner.Tpo -c -o DAGCombiner.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DAGCombiner.Tpo $(DEPDIR)/DAGCombiner.Plo
@@ -4023,14 +2564,6 @@ LegalizeVectorTypes.lo: llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o LegalizeVectorTypes.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
-ScheduleDAGFast.lo: llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ScheduleDAGFast.lo -MD -MP -MF $(DEPDIR)/ScheduleDAGFast.Tpo -c -o ScheduleDAGFast.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ScheduleDAGFast.Tpo $(DEPDIR)/ScheduleDAGFast.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp' object='ScheduleDAGFast.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ScheduleDAGFast.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
-
ScheduleDAGList.lo: llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ScheduleDAGList.lo -MD -MP -MF $(DEPDIR)/ScheduleDAGList.Tpo -c -o ScheduleDAGList.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ScheduleDAGList.Tpo $(DEPDIR)/ScheduleDAGList.Plo
@@ -4095,6 +2628,22 @@ TargetLowering.lo: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TargetLowering.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+TargetSelectionDAGInfo.lo: llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT TargetSelectionDAGInfo.lo -MD -MP -MF $(DEPDIR)/TargetSelectionDAGInfo.Tpo -c -o TargetSelectionDAGInfo.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/TargetSelectionDAGInfo.Tpo $(DEPDIR)/TargetSelectionDAGInfo.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp' object='TargetSelectionDAGInfo.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TargetSelectionDAGInfo.lo `test -f 'llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
+
+ScheduleDAGPrinter.lo: llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ScheduleDAGPrinter.lo -MD -MP -MF $(DEPDIR)/ScheduleDAGPrinter.Tpo -c -o ScheduleDAGPrinter.lo `test -f 'llvm/lib/CodeGen/ScheduleDAGPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ScheduleDAGPrinter.Tpo $(DEPDIR)/ScheduleDAGPrinter.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/ScheduleDAGPrinter.cpp' object='ScheduleDAGPrinter.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ScheduleDAGPrinter.lo `test -f 'llvm/lib/CodeGen/ScheduleDAGPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAGPrinter.cpp
+
ShrinkWrapping.lo: llvm/lib/CodeGen/ShrinkWrapping.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ShrinkWrapping.lo -MD -MP -MF $(DEPDIR)/ShrinkWrapping.Tpo -c -o ShrinkWrapping.lo `test -f 'llvm/lib/CodeGen/ShrinkWrapping.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ShrinkWrapping.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ShrinkWrapping.Tpo $(DEPDIR)/ShrinkWrapping.Plo
@@ -4135,6 +2684,14 @@ Spiller.lo: llvm/lib/CodeGen/Spiller.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Spiller.lo `test -f 'llvm/lib/CodeGen/Spiller.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/Spiller.cpp
+SplitKit.lo: llvm/lib/CodeGen/SplitKit.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT SplitKit.lo -MD -MP -MF $(DEPDIR)/SplitKit.Tpo -c -o SplitKit.lo `test -f 'llvm/lib/CodeGen/SplitKit.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SplitKit.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/SplitKit.Tpo $(DEPDIR)/SplitKit.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/SplitKit.cpp' object='SplitKit.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o SplitKit.lo `test -f 'llvm/lib/CodeGen/SplitKit.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/SplitKit.cpp
+
StackProtector.lo: llvm/lib/CodeGen/StackProtector.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT StackProtector.lo -MD -MP -MF $(DEPDIR)/StackProtector.Tpo -c -o StackProtector.lo `test -f 'llvm/lib/CodeGen/StackProtector.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/StackProtector.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/StackProtector.Tpo $(DEPDIR)/StackProtector.Plo
@@ -4183,14 +2740,6 @@ TwoAddressInstructionPass.lo: llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TwoAddressInstructionPass.lo `test -f 'llvm/lib/CodeGen/TwoAddressInstructionPass.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
-UnreachableBlockElim.lo: llvm/lib/CodeGen/UnreachableBlockElim.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT UnreachableBlockElim.lo -MD -MP -MF $(DEPDIR)/UnreachableBlockElim.Tpo -c -o UnreachableBlockElim.lo `test -f 'llvm/lib/CodeGen/UnreachableBlockElim.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/UnreachableBlockElim.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/UnreachableBlockElim.Tpo $(DEPDIR)/UnreachableBlockElim.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/UnreachableBlockElim.cpp' object='UnreachableBlockElim.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o UnreachableBlockElim.lo `test -f 'llvm/lib/CodeGen/UnreachableBlockElim.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/UnreachableBlockElim.cpp
-
VirtRegMap.lo: llvm/lib/CodeGen/VirtRegMap.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT VirtRegMap.lo -MD -MP -MF $(DEPDIR)/VirtRegMap.Tpo -c -o VirtRegMap.lo `test -f 'llvm/lib/CodeGen/VirtRegMap.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/VirtRegMap.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/VirtRegMap.Tpo $(DEPDIR)/VirtRegMap.Plo
@@ -4223,29 +2772,21 @@ MCAsmStreamer.lo: llvm/lib/MC/MCAsmStreamer.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCAsmStreamer.lo `test -f 'llvm/lib/MC/MCAsmStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCAsmStreamer.cpp
-MCAssembler.lo: llvm/lib/MC/MCAssembler.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCAssembler.lo -MD -MP -MF $(DEPDIR)/MCAssembler.Tpo -c -o MCAssembler.lo `test -f 'llvm/lib/MC/MCAssembler.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCAssembler.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCAssembler.Tpo $(DEPDIR)/MCAssembler.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCAssembler.cpp' object='MCAssembler.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCAssembler.lo `test -f 'llvm/lib/MC/MCAssembler.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCAssembler.cpp
-
-MCInst.lo: llvm/lib/MC/MCInst.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCInst.lo -MD -MP -MF $(DEPDIR)/MCInst.Tpo -c -o MCInst.lo `test -f 'llvm/lib/MC/MCInst.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCInst.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCInst.Tpo $(DEPDIR)/MCInst.Plo
+MCInstPrinter.lo: llvm/lib/MC/MCInstPrinter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCInstPrinter.lo -MD -MP -MF $(DEPDIR)/MCInstPrinter.Tpo -c -o MCInstPrinter.lo `test -f 'llvm/lib/MC/MCInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCInstPrinter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCInstPrinter.Tpo $(DEPDIR)/MCInstPrinter.Plo
@am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCInst.cpp' object='MCInst.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCInstPrinter.cpp' object='MCInstPrinter.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCInst.lo `test -f 'llvm/lib/MC/MCInst.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCInst.cpp
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCInstPrinter.lo `test -f 'llvm/lib/MC/MCInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCInstPrinter.cpp
-MCMachOStreamer.lo: llvm/lib/MC/MCMachOStreamer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCMachOStreamer.lo -MD -MP -MF $(DEPDIR)/MCMachOStreamer.Tpo -c -o MCMachOStreamer.lo `test -f 'llvm/lib/MC/MCMachOStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCMachOStreamer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCMachOStreamer.Tpo $(DEPDIR)/MCMachOStreamer.Plo
+MCLoggingStreamer.lo: llvm/lib/MC/MCLoggingStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCLoggingStreamer.lo -MD -MP -MF $(DEPDIR)/MCLoggingStreamer.Tpo -c -o MCLoggingStreamer.lo `test -f 'llvm/lib/MC/MCLoggingStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCLoggingStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCLoggingStreamer.Tpo $(DEPDIR)/MCLoggingStreamer.Plo
@am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCMachOStreamer.cpp' object='MCMachOStreamer.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCLoggingStreamer.cpp' object='MCLoggingStreamer.lo' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCMachOStreamer.lo `test -f 'llvm/lib/MC/MCMachOStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCMachOStreamer.cpp
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCLoggingStreamer.lo `test -f 'llvm/lib/MC/MCLoggingStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCLoggingStreamer.cpp
MCNullStreamer.lo: llvm/lib/MC/MCNullStreamer.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCNullStreamer.lo -MD -MP -MF $(DEPDIR)/MCNullStreamer.Tpo -c -o MCNullStreamer.lo `test -f 'llvm/lib/MC/MCNullStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCNullStreamer.cpp
@@ -4255,14 +2796,6 @@ MCNullStreamer.lo: llvm/lib/MC/MCNullStreamer.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCNullStreamer.lo `test -f 'llvm/lib/MC/MCNullStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCNullStreamer.cpp
-MCSectionMachO.lo: llvm/lib/MC/MCSectionMachO.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCSectionMachO.lo -MD -MP -MF $(DEPDIR)/MCSectionMachO.Tpo -c -o MCSectionMachO.lo `test -f 'llvm/lib/MC/MCSectionMachO.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSectionMachO.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCSectionMachO.Tpo $(DEPDIR)/MCSectionMachO.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCSectionMachO.cpp' object='MCSectionMachO.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCSectionMachO.lo `test -f 'llvm/lib/MC/MCSectionMachO.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSectionMachO.cpp
-
MCStreamer.lo: llvm/lib/MC/MCStreamer.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCStreamer.lo -MD -MP -MF $(DEPDIR)/MCStreamer.Tpo -c -o MCStreamer.lo `test -f 'llvm/lib/MC/MCStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCStreamer.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCStreamer.Tpo $(DEPDIR)/MCStreamer.Plo
@@ -4271,6 +2804,14 @@ MCStreamer.lo: llvm/lib/MC/MCStreamer.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCStreamer.lo `test -f 'llvm/lib/MC/MCStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCStreamer.cpp
+GraphWriter.lo: llvm/lib/Support/GraphWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT GraphWriter.lo -MD -MP -MF $(DEPDIR)/GraphWriter.Tpo -c -o GraphWriter.lo `test -f 'llvm/lib/Support/GraphWriter.cpp' || echo '$(srcdir)/'`llvm/lib/Support/GraphWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/GraphWriter.Tpo $(DEPDIR)/GraphWriter.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/GraphWriter.cpp' object='GraphWriter.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o GraphWriter.lo `test -f 'llvm/lib/Support/GraphWriter.cpp' || echo '$(srcdir)/'`llvm/lib/Support/GraphWriter.cpp
+
TargetFrameInfo.lo: llvm/lib/Target/TargetFrameInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT TargetFrameInfo.lo -MD -MP -MF $(DEPDIR)/TargetFrameInfo.Tpo -c -o TargetFrameInfo.lo `test -f 'llvm/lib/Target/TargetFrameInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetFrameInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/TargetFrameInfo.Tpo $(DEPDIR)/TargetFrameInfo.Plo
@@ -4287,6 +2828,22 @@ TargetSubtarget.lo: llvm/lib/Target/TargetSubtarget.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TargetSubtarget.lo `test -f 'llvm/lib/Target/TargetSubtarget.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetSubtarget.cpp
+ConstantMerge.lo: llvm/lib/Transforms/IPO/ConstantMerge.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ConstantMerge.lo -MD -MP -MF $(DEPDIR)/ConstantMerge.Tpo -c -o ConstantMerge.lo `test -f 'llvm/lib/Transforms/IPO/ConstantMerge.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/ConstantMerge.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ConstantMerge.Tpo $(DEPDIR)/ConstantMerge.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Transforms/IPO/ConstantMerge.cpp' object='ConstantMerge.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ConstantMerge.lo `test -f 'llvm/lib/Transforms/IPO/ConstantMerge.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/ConstantMerge.cpp
+
+GlobalOpt.lo: llvm/lib/Transforms/IPO/GlobalOpt.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT GlobalOpt.lo -MD -MP -MF $(DEPDIR)/GlobalOpt.Tpo -c -o GlobalOpt.lo `test -f 'llvm/lib/Transforms/IPO/GlobalOpt.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/GlobalOpt.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/GlobalOpt.Tpo $(DEPDIR)/GlobalOpt.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Transforms/IPO/GlobalOpt.cpp' object='GlobalOpt.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o GlobalOpt.lo `test -f 'llvm/lib/Transforms/IPO/GlobalOpt.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/GlobalOpt.cpp
+
CodeGenPrepare.lo: llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT CodeGenPrepare.lo -MD -MP -MF $(DEPDIR)/CodeGenPrepare.Tpo -c -o CodeGenPrepare.lo `test -f 'llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/CodeGenPrepare.Tpo $(DEPDIR)/CodeGenPrepare.Plo
@@ -4319,6 +2876,14 @@ LoopStrengthReduce.lo: llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o LoopStrengthReduce.lo `test -f 'llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+SCCP.lo: llvm/lib/Transforms/Scalar/SCCP.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT SCCP.lo -MD -MP -MF $(DEPDIR)/SCCP.Tpo -c -o SCCP.lo `test -f 'llvm/lib/Transforms/Scalar/SCCP.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/SCCP.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/SCCP.Tpo $(DEPDIR)/SCCP.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Transforms/Scalar/SCCP.cpp' object='SCCP.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o SCCP.lo `test -f 'llvm/lib/Transforms/Scalar/SCCP.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/SCCP.cpp
+
AddrModeMatcher.lo: llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT AddrModeMatcher.lo -MD -MP -MF $(DEPDIR)/AddrModeMatcher.Tpo -c -o AddrModeMatcher.lo `test -f 'llvm/lib/Transforms/Utils/AddrModeMatcher.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/AddrModeMatcher.Tpo $(DEPDIR)/AddrModeMatcher.Plo
@@ -4343,6 +2908,14 @@ BreakCriticalEdges.lo: llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o BreakCriticalEdges.lo `test -f 'llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
+BuildLibCalls.lo: llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT BuildLibCalls.lo -MD -MP -MF $(DEPDIR)/BuildLibCalls.Tpo -c -o BuildLibCalls.lo `test -f 'llvm/lib/Transforms/Utils/BuildLibCalls.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/BuildLibCalls.Tpo $(DEPDIR)/BuildLibCalls.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Transforms/Utils/BuildLibCalls.cpp' object='BuildLibCalls.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o BuildLibCalls.lo `test -f 'llvm/lib/Transforms/Utils/BuildLibCalls.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+
DemoteRegToStack.lo: llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DemoteRegToStack.lo -MD -MP -MF $(DEPDIR)/DemoteRegToStack.Tpo -c -o DemoteRegToStack.lo `test -f 'llvm/lib/Transforms/Utils/DemoteRegToStack.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DemoteRegToStack.Tpo $(DEPDIR)/DemoteRegToStack.Plo
@@ -4431,126 +3004,6 @@ UnifyFunctionExitNodes.lo: llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o UnifyFunctionExitNodes.lo `test -f 'llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
-GCMetadataPrinter.lo: llvm/lib/CodeGen/GCMetadataPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT GCMetadataPrinter.lo -MD -MP -MF $(DEPDIR)/GCMetadataPrinter.Tpo -c -o GCMetadataPrinter.lo `test -f 'llvm/lib/CodeGen/GCMetadataPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/GCMetadataPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/GCMetadataPrinter.Tpo $(DEPDIR)/GCMetadataPrinter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/GCMetadataPrinter.cpp' object='GCMetadataPrinter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o GCMetadataPrinter.lo `test -f 'llvm/lib/CodeGen/GCMetadataPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/GCMetadataPrinter.cpp
-
-IfConversion.lo: llvm/lib/CodeGen/IfConversion.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT IfConversion.lo -MD -MP -MF $(DEPDIR)/IfConversion.Tpo -c -o IfConversion.lo `test -f 'llvm/lib/CodeGen/IfConversion.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/IfConversion.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/IfConversion.Tpo $(DEPDIR)/IfConversion.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/IfConversion.cpp' object='IfConversion.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o IfConversion.lo `test -f 'llvm/lib/CodeGen/IfConversion.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/IfConversion.cpp
-
-IntrinsicLowering.lo: llvm/lib/CodeGen/IntrinsicLowering.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT IntrinsicLowering.lo -MD -MP -MF $(DEPDIR)/IntrinsicLowering.Tpo -c -o IntrinsicLowering.lo `test -f 'llvm/lib/CodeGen/IntrinsicLowering.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/IntrinsicLowering.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/IntrinsicLowering.Tpo $(DEPDIR)/IntrinsicLowering.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/IntrinsicLowering.cpp' object='IntrinsicLowering.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o IntrinsicLowering.lo `test -f 'llvm/lib/CodeGen/IntrinsicLowering.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/IntrinsicLowering.cpp
-
-OcamlGC.lo: llvm/lib/CodeGen/OcamlGC.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT OcamlGC.lo -MD -MP -MF $(DEPDIR)/OcamlGC.Tpo -c -o OcamlGC.lo `test -f 'llvm/lib/CodeGen/OcamlGC.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/OcamlGC.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/OcamlGC.Tpo $(DEPDIR)/OcamlGC.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/OcamlGC.cpp' object='OcamlGC.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o OcamlGC.lo `test -f 'llvm/lib/CodeGen/OcamlGC.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/OcamlGC.cpp
-
-RegAllocLocal.lo: llvm/lib/CodeGen/RegAllocLocal.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT RegAllocLocal.lo -MD -MP -MF $(DEPDIR)/RegAllocLocal.Tpo -c -o RegAllocLocal.lo `test -f 'llvm/lib/CodeGen/RegAllocLocal.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/RegAllocLocal.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/RegAllocLocal.Tpo $(DEPDIR)/RegAllocLocal.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/RegAllocLocal.cpp' object='RegAllocLocal.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o RegAllocLocal.lo `test -f 'llvm/lib/CodeGen/RegAllocLocal.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/RegAllocLocal.cpp
-
-RegAllocPBQP.lo: llvm/lib/CodeGen/RegAllocPBQP.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT RegAllocPBQP.lo -MD -MP -MF $(DEPDIR)/RegAllocPBQP.Tpo -c -o RegAllocPBQP.lo `test -f 'llvm/lib/CodeGen/RegAllocPBQP.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/RegAllocPBQP.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/RegAllocPBQP.Tpo $(DEPDIR)/RegAllocPBQP.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/RegAllocPBQP.cpp' object='RegAllocPBQP.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o RegAllocPBQP.lo `test -f 'llvm/lib/CodeGen/RegAllocPBQP.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/RegAllocPBQP.cpp
-
-ShadowStackGC.lo: llvm/lib/CodeGen/ShadowStackGC.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ShadowStackGC.lo -MD -MP -MF $(DEPDIR)/ShadowStackGC.Tpo -c -o ShadowStackGC.lo `test -f 'llvm/lib/CodeGen/ShadowStackGC.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ShadowStackGC.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ShadowStackGC.Tpo $(DEPDIR)/ShadowStackGC.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/ShadowStackGC.cpp' object='ShadowStackGC.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ShadowStackGC.lo `test -f 'llvm/lib/CodeGen/ShadowStackGC.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ShadowStackGC.cpp
-
-Execution.lo: llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Execution.lo -MD -MP -MF $(DEPDIR)/Execution.Tpo -c -o Execution.lo `test -f 'llvm/lib/ExecutionEngine/Interpreter/Execution.cpp' || echo '$(srcdir)/'`llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Execution.Tpo $(DEPDIR)/Execution.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/ExecutionEngine/Interpreter/Execution.cpp' object='Execution.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Execution.lo `test -f 'llvm/lib/ExecutionEngine/Interpreter/Execution.cpp' || echo '$(srcdir)/'`llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
-
-ExternalFunctions.lo: llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ExternalFunctions.lo -MD -MP -MF $(DEPDIR)/ExternalFunctions.Tpo -c -o ExternalFunctions.lo `test -f 'llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp' || echo '$(srcdir)/'`llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ExternalFunctions.Tpo $(DEPDIR)/ExternalFunctions.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp' object='ExternalFunctions.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ExternalFunctions.lo `test -f 'llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp' || echo '$(srcdir)/'`llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
-
-Interpreter.lo: llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Interpreter.lo -MD -MP -MF $(DEPDIR)/Interpreter.Tpo -c -o Interpreter.lo `test -f 'llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp' || echo '$(srcdir)/'`llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Interpreter.Tpo $(DEPDIR)/Interpreter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp' object='Interpreter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Interpreter.lo `test -f 'llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp' || echo '$(srcdir)/'`llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
-
-Target.lo: llvm/lib/Target/Target.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Target.lo -MD -MP -MF $(DEPDIR)/Target.Tpo -c -o Target.lo `test -f 'llvm/lib/Target/Target.cpp' || echo '$(srcdir)/'`llvm/lib/Target/Target.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Target.Tpo $(DEPDIR)/Target.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/Target.cpp' object='Target.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Target.lo `test -f 'llvm/lib/Target/Target.cpp' || echo '$(srcdir)/'`llvm/lib/Target/Target.cpp
-
-TargetAsmLexer.lo: llvm/lib/Target/TargetAsmLexer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT TargetAsmLexer.lo -MD -MP -MF $(DEPDIR)/TargetAsmLexer.Tpo -c -o TargetAsmLexer.lo `test -f 'llvm/lib/Target/TargetAsmLexer.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetAsmLexer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/TargetAsmLexer.Tpo $(DEPDIR)/TargetAsmLexer.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/TargetAsmLexer.cpp' object='TargetAsmLexer.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TargetAsmLexer.lo `test -f 'llvm/lib/Target/TargetAsmLexer.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetAsmLexer.cpp
-
-TargetELFWriterInfo.lo: llvm/lib/Target/TargetELFWriterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT TargetELFWriterInfo.lo -MD -MP -MF $(DEPDIR)/TargetELFWriterInfo.Tpo -c -o TargetELFWriterInfo.lo `test -f 'llvm/lib/Target/TargetELFWriterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetELFWriterInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/TargetELFWriterInfo.Tpo $(DEPDIR)/TargetELFWriterInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/TargetELFWriterInfo.cpp' object='TargetELFWriterInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TargetELFWriterInfo.lo `test -f 'llvm/lib/Target/TargetELFWriterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetELFWriterInfo.cpp
-
-TargetIntrinsicInfo.lo: llvm/lib/Target/TargetIntrinsicInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT TargetIntrinsicInfo.lo -MD -MP -MF $(DEPDIR)/TargetIntrinsicInfo.Tpo -c -o TargetIntrinsicInfo.lo `test -f 'llvm/lib/Target/TargetIntrinsicInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetIntrinsicInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/TargetIntrinsicInfo.Tpo $(DEPDIR)/TargetIntrinsicInfo.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/TargetIntrinsicInfo.cpp' object='TargetIntrinsicInfo.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TargetIntrinsicInfo.lo `test -f 'llvm/lib/Target/TargetIntrinsicInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetIntrinsicInfo.cpp
-
-CallGraph.lo: llvm/lib/Analysis/IPA/CallGraph.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT CallGraph.lo -MD -MP -MF $(DEPDIR)/CallGraph.Tpo -c -o CallGraph.lo `test -f 'llvm/lib/Analysis/IPA/CallGraph.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/IPA/CallGraph.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/CallGraph.Tpo $(DEPDIR)/CallGraph.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/IPA/CallGraph.cpp' object='CallGraph.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o CallGraph.lo `test -f 'llvm/lib/Analysis/IPA/CallGraph.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/IPA/CallGraph.cpp
-
AliasAnalysis.lo: llvm/lib/Analysis/AliasAnalysis.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT AliasAnalysis.lo -MD -MP -MF $(DEPDIR)/AliasAnalysis.Tpo -c -o AliasAnalysis.lo `test -f 'llvm/lib/Analysis/AliasAnalysis.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/AliasAnalysis.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/AliasAnalysis.Tpo $(DEPDIR)/AliasAnalysis.Plo
@@ -4559,6 +3012,14 @@ AliasAnalysis.lo: llvm/lib/Analysis/AliasAnalysis.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o AliasAnalysis.lo `test -f 'llvm/lib/Analysis/AliasAnalysis.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/AliasAnalysis.cpp
+AliasSetTracker.lo: llvm/lib/Analysis/AliasSetTracker.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT AliasSetTracker.lo -MD -MP -MF $(DEPDIR)/AliasSetTracker.Tpo -c -o AliasSetTracker.lo `test -f 'llvm/lib/Analysis/AliasSetTracker.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/AliasSetTracker.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/AliasSetTracker.Tpo $(DEPDIR)/AliasSetTracker.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/AliasSetTracker.cpp' object='AliasSetTracker.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o AliasSetTracker.lo `test -f 'llvm/lib/Analysis/AliasSetTracker.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/AliasSetTracker.cpp
+
BasicAliasAnalysis.lo: llvm/lib/Analysis/BasicAliasAnalysis.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT BasicAliasAnalysis.lo -MD -MP -MF $(DEPDIR)/BasicAliasAnalysis.Tpo -c -o BasicAliasAnalysis.lo `test -f 'llvm/lib/Analysis/BasicAliasAnalysis.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/BasicAliasAnalysis.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/BasicAliasAnalysis.Tpo $(DEPDIR)/BasicAliasAnalysis.Plo
@@ -4583,6 +3044,14 @@ DebugInfo.lo: llvm/lib/Analysis/DebugInfo.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DebugInfo.lo `test -f 'llvm/lib/Analysis/DebugInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/DebugInfo.cpp
+CallGraph.lo: llvm/lib/Analysis/IPA/CallGraph.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT CallGraph.lo -MD -MP -MF $(DEPDIR)/CallGraph.Tpo -c -o CallGraph.lo `test -f 'llvm/lib/Analysis/IPA/CallGraph.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/IPA/CallGraph.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/CallGraph.Tpo $(DEPDIR)/CallGraph.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/IPA/CallGraph.cpp' object='CallGraph.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o CallGraph.lo `test -f 'llvm/lib/Analysis/IPA/CallGraph.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/IPA/CallGraph.cpp
+
MemoryBuiltins.lo: llvm/lib/Analysis/MemoryBuiltins.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MemoryBuiltins.lo -MD -MP -MF $(DEPDIR)/MemoryBuiltins.Tpo -c -o MemoryBuiltins.lo `test -f 'llvm/lib/Analysis/MemoryBuiltins.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/MemoryBuiltins.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MemoryBuiltins.Tpo $(DEPDIR)/MemoryBuiltins.Plo
@@ -4599,6 +3068,14 @@ PointerTracking.lo: llvm/lib/Analysis/PointerTracking.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o PointerTracking.lo `test -f 'llvm/lib/Analysis/PointerTracking.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/PointerTracking.cpp
+ProfileInfo.lo: llvm/lib/Analysis/ProfileInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ProfileInfo.lo -MD -MP -MF $(DEPDIR)/ProfileInfo.Tpo -c -o ProfileInfo.lo `test -f 'llvm/lib/Analysis/ProfileInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ProfileInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ProfileInfo.Tpo $(DEPDIR)/ProfileInfo.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/ProfileInfo.cpp' object='ProfileInfo.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ProfileInfo.lo `test -f 'llvm/lib/Analysis/ProfileInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ProfileInfo.cpp
+
ValueTracking.lo: llvm/lib/Analysis/ValueTracking.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ValueTracking.lo -MD -MP -MF $(DEPDIR)/ValueTracking.Tpo -c -o ValueTracking.lo `test -f 'llvm/lib/Analysis/ValueTracking.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/ValueTracking.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ValueTracking.Tpo $(DEPDIR)/ValueTracking.Plo
@@ -4623,6 +3100,14 @@ ELFWriter.lo: llvm/lib/CodeGen/ELFWriter.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ELFWriter.lo `test -f 'llvm/lib/CodeGen/ELFWriter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ELFWriter.cpp
+LiveVariables.lo: llvm/lib/CodeGen/LiveVariables.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT LiveVariables.lo -MD -MP -MF $(DEPDIR)/LiveVariables.Tpo -c -o LiveVariables.lo `test -f 'llvm/lib/CodeGen/LiveVariables.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/LiveVariables.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/LiveVariables.Tpo $(DEPDIR)/LiveVariables.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/LiveVariables.cpp' object='LiveVariables.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o LiveVariables.lo `test -f 'llvm/lib/CodeGen/LiveVariables.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/LiveVariables.cpp
+
MachineBasicBlock.lo: llvm/lib/CodeGen/MachineBasicBlock.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineBasicBlock.lo -MD -MP -MF $(DEPDIR)/MachineBasicBlock.Tpo -c -o MachineBasicBlock.lo `test -f 'llvm/lib/CodeGen/MachineBasicBlock.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineBasicBlock.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineBasicBlock.Tpo $(DEPDIR)/MachineBasicBlock.Plo
@@ -4631,6 +3116,14 @@ MachineBasicBlock.lo: llvm/lib/CodeGen/MachineBasicBlock.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineBasicBlock.lo `test -f 'llvm/lib/CodeGen/MachineBasicBlock.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineBasicBlock.cpp
+MachineDominators.lo: llvm/lib/CodeGen/MachineDominators.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineDominators.lo -MD -MP -MF $(DEPDIR)/MachineDominators.Tpo -c -o MachineDominators.lo `test -f 'llvm/lib/CodeGen/MachineDominators.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineDominators.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineDominators.Tpo $(DEPDIR)/MachineDominators.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/MachineDominators.cpp' object='MachineDominators.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineDominators.lo `test -f 'llvm/lib/CodeGen/MachineDominators.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineDominators.cpp
+
MachineFunction.lo: llvm/lib/CodeGen/MachineFunction.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineFunction.lo -MD -MP -MF $(DEPDIR)/MachineFunction.Tpo -c -o MachineFunction.lo `test -f 'llvm/lib/CodeGen/MachineFunction.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineFunction.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineFunction.Tpo $(DEPDIR)/MachineFunction.Plo
@@ -4655,6 +3148,14 @@ MachineFunctionPass.lo: llvm/lib/CodeGen/MachineFunctionPass.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineFunctionPass.lo `test -f 'llvm/lib/CodeGen/MachineFunctionPass.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineFunctionPass.cpp
+MachineFunctionPrinterPass.lo: llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineFunctionPrinterPass.lo -MD -MP -MF $(DEPDIR)/MachineFunctionPrinterPass.Tpo -c -o MachineFunctionPrinterPass.lo `test -f 'llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineFunctionPrinterPass.Tpo $(DEPDIR)/MachineFunctionPrinterPass.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp' object='MachineFunctionPrinterPass.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineFunctionPrinterPass.lo `test -f 'llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
+
MachineInstr.lo: llvm/lib/CodeGen/MachineInstr.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineInstr.lo -MD -MP -MF $(DEPDIR)/MachineInstr.Tpo -c -o MachineInstr.lo `test -f 'llvm/lib/CodeGen/MachineInstr.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineInstr.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineInstr.Tpo $(DEPDIR)/MachineInstr.Plo
@@ -4663,6 +3164,14 @@ MachineInstr.lo: llvm/lib/CodeGen/MachineInstr.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineInstr.lo `test -f 'llvm/lib/CodeGen/MachineInstr.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineInstr.cpp
+MachineLoopInfo.lo: llvm/lib/CodeGen/MachineLoopInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineLoopInfo.lo -MD -MP -MF $(DEPDIR)/MachineLoopInfo.Tpo -c -o MachineLoopInfo.lo `test -f 'llvm/lib/CodeGen/MachineLoopInfo.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineLoopInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineLoopInfo.Tpo $(DEPDIR)/MachineLoopInfo.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/MachineLoopInfo.cpp' object='MachineLoopInfo.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MachineLoopInfo.lo `test -f 'llvm/lib/CodeGen/MachineLoopInfo.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineLoopInfo.cpp
+
MachineModuleInfo.lo: llvm/lib/CodeGen/MachineModuleInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MachineModuleInfo.lo -MD -MP -MF $(DEPDIR)/MachineModuleInfo.Tpo -c -o MachineModuleInfo.lo `test -f 'llvm/lib/CodeGen/MachineModuleInfo.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/MachineModuleInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MachineModuleInfo.Tpo $(DEPDIR)/MachineModuleInfo.Plo
@@ -4687,6 +3196,14 @@ ObjectCodeEmitter.lo: llvm/lib/CodeGen/ObjectCodeEmitter.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ObjectCodeEmitter.lo `test -f 'llvm/lib/CodeGen/ObjectCodeEmitter.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ObjectCodeEmitter.cpp
+PostRAHazardRecognizer.lo: llvm/lib/CodeGen/PostRAHazardRecognizer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT PostRAHazardRecognizer.lo -MD -MP -MF $(DEPDIR)/PostRAHazardRecognizer.Tpo -c -o PostRAHazardRecognizer.lo `test -f 'llvm/lib/CodeGen/PostRAHazardRecognizer.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/PostRAHazardRecognizer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/PostRAHazardRecognizer.Tpo $(DEPDIR)/PostRAHazardRecognizer.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/PostRAHazardRecognizer.cpp' object='PostRAHazardRecognizer.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o PostRAHazardRecognizer.lo `test -f 'llvm/lib/CodeGen/PostRAHazardRecognizer.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/PostRAHazardRecognizer.cpp
+
PseudoSourceValue.lo: llvm/lib/CodeGen/PseudoSourceValue.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT PseudoSourceValue.lo -MD -MP -MF $(DEPDIR)/PseudoSourceValue.Tpo -c -o PseudoSourceValue.lo `test -f 'llvm/lib/CodeGen/PseudoSourceValue.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/PseudoSourceValue.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/PseudoSourceValue.Tpo $(DEPDIR)/PseudoSourceValue.Plo
@@ -4695,6 +3212,14 @@ PseudoSourceValue.lo: llvm/lib/CodeGen/PseudoSourceValue.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o PseudoSourceValue.lo `test -f 'llvm/lib/CodeGen/PseudoSourceValue.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/PseudoSourceValue.cpp
+ScheduleDAG.lo: llvm/lib/CodeGen/ScheduleDAG.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ScheduleDAG.lo -MD -MP -MF $(DEPDIR)/ScheduleDAG.Tpo -c -o ScheduleDAG.lo `test -f 'llvm/lib/CodeGen/ScheduleDAG.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAG.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ScheduleDAG.Tpo $(DEPDIR)/ScheduleDAG.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/ScheduleDAG.cpp' object='ScheduleDAG.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ScheduleDAG.lo `test -f 'llvm/lib/CodeGen/ScheduleDAG.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/ScheduleDAG.cpp
+
TargetInstrInfoImpl.lo: llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT TargetInstrInfoImpl.lo -MD -MP -MF $(DEPDIR)/TargetInstrInfoImpl.Tpo -c -o TargetInstrInfoImpl.lo `test -f 'llvm/lib/CodeGen/TargetInstrInfoImpl.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/TargetInstrInfoImpl.Tpo $(DEPDIR)/TargetInstrInfoImpl.Plo
@@ -4703,6 +3228,14 @@ TargetInstrInfoImpl.lo: llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TargetInstrInfoImpl.lo `test -f 'llvm/lib/CodeGen/TargetInstrInfoImpl.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
+UnreachableBlockElim.lo: llvm/lib/CodeGen/UnreachableBlockElim.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT UnreachableBlockElim.lo -MD -MP -MF $(DEPDIR)/UnreachableBlockElim.Tpo -c -o UnreachableBlockElim.lo `test -f 'llvm/lib/CodeGen/UnreachableBlockElim.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/UnreachableBlockElim.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/UnreachableBlockElim.Tpo $(DEPDIR)/UnreachableBlockElim.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/UnreachableBlockElim.cpp' object='UnreachableBlockElim.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o UnreachableBlockElim.lo `test -f 'llvm/lib/CodeGen/UnreachableBlockElim.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/UnreachableBlockElim.cpp
+
ExecutionEngine.lo: llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ExecutionEngine.lo -MD -MP -MF $(DEPDIR)/ExecutionEngine.Tpo -c -o ExecutionEngine.lo `test -f 'llvm/lib/ExecutionEngine/ExecutionEngine.cpp' || echo '$(srcdir)/'`llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ExecutionEngine.Tpo $(DEPDIR)/ExecutionEngine.Plo
@@ -4783,6 +3316,14 @@ MCAsmInfo.lo: llvm/lib/MC/MCAsmInfo.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCAsmInfo.lo `test -f 'llvm/lib/MC/MCAsmInfo.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCAsmInfo.cpp
+MCAssembler.lo: llvm/lib/MC/MCAssembler.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCAssembler.lo -MD -MP -MF $(DEPDIR)/MCAssembler.Tpo -c -o MCAssembler.lo `test -f 'llvm/lib/MC/MCAssembler.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCAssembler.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCAssembler.Tpo $(DEPDIR)/MCAssembler.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCAssembler.cpp' object='MCAssembler.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCAssembler.lo `test -f 'llvm/lib/MC/MCAssembler.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCAssembler.cpp
+
MCContext.lo: llvm/lib/MC/MCContext.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCContext.lo -MD -MP -MF $(DEPDIR)/MCContext.Tpo -c -o MCContext.lo `test -f 'llvm/lib/MC/MCContext.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCContext.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCContext.Tpo $(DEPDIR)/MCContext.Plo
@@ -4799,6 +3340,14 @@ MCExpr.lo: llvm/lib/MC/MCExpr.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCExpr.lo `test -f 'llvm/lib/MC/MCExpr.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCExpr.cpp
+MCInst.lo: llvm/lib/MC/MCInst.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCInst.lo -MD -MP -MF $(DEPDIR)/MCInst.Tpo -c -o MCInst.lo `test -f 'llvm/lib/MC/MCInst.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCInst.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCInst.Tpo $(DEPDIR)/MCInst.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCInst.cpp' object='MCInst.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCInst.lo `test -f 'llvm/lib/MC/MCInst.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCInst.cpp
+
MCSection.lo: llvm/lib/MC/MCSection.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCSection.lo -MD -MP -MF $(DEPDIR)/MCSection.Tpo -c -o MCSection.lo `test -f 'llvm/lib/MC/MCSection.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSection.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCSection.Tpo $(DEPDIR)/MCSection.Plo
@@ -4807,6 +3356,14 @@ MCSection.lo: llvm/lib/MC/MCSection.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCSection.lo `test -f 'llvm/lib/MC/MCSection.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSection.cpp
+MCSectionCOFF.lo: llvm/lib/MC/MCSectionCOFF.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCSectionCOFF.lo -MD -MP -MF $(DEPDIR)/MCSectionCOFF.Tpo -c -o MCSectionCOFF.lo `test -f 'llvm/lib/MC/MCSectionCOFF.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSectionCOFF.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCSectionCOFF.Tpo $(DEPDIR)/MCSectionCOFF.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCSectionCOFF.cpp' object='MCSectionCOFF.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCSectionCOFF.lo `test -f 'llvm/lib/MC/MCSectionCOFF.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSectionCOFF.cpp
+
MCSectionELF.lo: llvm/lib/MC/MCSectionELF.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCSectionELF.lo -MD -MP -MF $(DEPDIR)/MCSectionELF.Tpo -c -o MCSectionELF.lo `test -f 'llvm/lib/MC/MCSectionELF.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSectionELF.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCSectionELF.Tpo $(DEPDIR)/MCSectionELF.Plo
@@ -4815,6 +3372,14 @@ MCSectionELF.lo: llvm/lib/MC/MCSectionELF.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCSectionELF.lo `test -f 'llvm/lib/MC/MCSectionELF.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSectionELF.cpp
+MCSectionMachO.lo: llvm/lib/MC/MCSectionMachO.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCSectionMachO.lo -MD -MP -MF $(DEPDIR)/MCSectionMachO.Tpo -c -o MCSectionMachO.lo `test -f 'llvm/lib/MC/MCSectionMachO.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSectionMachO.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCSectionMachO.Tpo $(DEPDIR)/MCSectionMachO.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCSectionMachO.cpp' object='MCSectionMachO.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MCSectionMachO.lo `test -f 'llvm/lib/MC/MCSectionMachO.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSectionMachO.cpp
+
MCSymbol.lo: llvm/lib/MC/MCSymbol.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MCSymbol.lo -MD -MP -MF $(DEPDIR)/MCSymbol.Tpo -c -o MCSymbol.lo `test -f 'llvm/lib/MC/MCSymbol.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCSymbol.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MCSymbol.Tpo $(DEPDIR)/MCSymbol.Plo
@@ -4903,14 +3468,6 @@ FormattedStream.lo: llvm/lib/Support/FormattedStream.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o FormattedStream.lo `test -f 'llvm/lib/Support/FormattedStream.cpp' || echo '$(srcdir)/'`llvm/lib/Support/FormattedStream.cpp
-GraphWriter.lo: llvm/lib/Support/GraphWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT GraphWriter.lo -MD -MP -MF $(DEPDIR)/GraphWriter.Tpo -c -o GraphWriter.lo `test -f 'llvm/lib/Support/GraphWriter.cpp' || echo '$(srcdir)/'`llvm/lib/Support/GraphWriter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/GraphWriter.Tpo $(DEPDIR)/GraphWriter.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/GraphWriter.cpp' object='GraphWriter.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o GraphWriter.lo `test -f 'llvm/lib/Support/GraphWriter.cpp' || echo '$(srcdir)/'`llvm/lib/Support/GraphWriter.cpp
-
ManagedStatic.lo: llvm/lib/Support/ManagedStatic.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ManagedStatic.lo -MD -MP -MF $(DEPDIR)/ManagedStatic.Tpo -c -o ManagedStatic.lo `test -f 'llvm/lib/Support/ManagedStatic.cpp' || echo '$(srcdir)/'`llvm/lib/Support/ManagedStatic.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ManagedStatic.Tpo $(DEPDIR)/ManagedStatic.Plo
@@ -5031,14 +3588,6 @@ Twine.lo: llvm/lib/Support/Twine.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Twine.lo `test -f 'llvm/lib/Support/Twine.cpp' || echo '$(srcdir)/'`llvm/lib/Support/Twine.cpp
-circular_raw_ostream.lo: llvm/lib/Support/circular_raw_ostream.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT circular_raw_ostream.lo -MD -MP -MF $(DEPDIR)/circular_raw_ostream.Tpo -c -o circular_raw_ostream.lo `test -f 'llvm/lib/Support/circular_raw_ostream.cpp' || echo '$(srcdir)/'`llvm/lib/Support/circular_raw_ostream.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/circular_raw_ostream.Tpo $(DEPDIR)/circular_raw_ostream.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/circular_raw_ostream.cpp' object='circular_raw_ostream.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o circular_raw_ostream.lo `test -f 'llvm/lib/Support/circular_raw_ostream.cpp' || echo '$(srcdir)/'`llvm/lib/Support/circular_raw_ostream.cpp
-
raw_ostream.lo: llvm/lib/Support/raw_ostream.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT raw_ostream.lo -MD -MP -MF $(DEPDIR)/raw_ostream.Tpo -c -o raw_ostream.lo `test -f 'llvm/lib/Support/raw_ostream.cpp' || echo '$(srcdir)/'`llvm/lib/Support/raw_ostream.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/raw_ostream.Tpo $(DEPDIR)/raw_ostream.Plo
@@ -5047,6 +3596,14 @@ raw_ostream.lo: llvm/lib/Support/raw_ostream.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o raw_ostream.lo `test -f 'llvm/lib/Support/raw_ostream.cpp' || echo '$(srcdir)/'`llvm/lib/Support/raw_ostream.cpp
+circular_raw_ostream.lo: llvm/lib/Support/circular_raw_ostream.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT circular_raw_ostream.lo -MD -MP -MF $(DEPDIR)/circular_raw_ostream.Tpo -c -o circular_raw_ostream.lo `test -f 'llvm/lib/Support/circular_raw_ostream.cpp' || echo '$(srcdir)/'`llvm/lib/Support/circular_raw_ostream.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/circular_raw_ostream.Tpo $(DEPDIR)/circular_raw_ostream.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/circular_raw_ostream.cpp' object='circular_raw_ostream.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o circular_raw_ostream.lo `test -f 'llvm/lib/Support/circular_raw_ostream.cpp' || echo '$(srcdir)/'`llvm/lib/Support/circular_raw_ostream.cpp
+
Mangler.lo: llvm/lib/Target/Mangler.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Mangler.lo -MD -MP -MF $(DEPDIR)/Mangler.Tpo -c -o Mangler.lo `test -f 'llvm/lib/Target/Mangler.cpp' || echo '$(srcdir)/'`llvm/lib/Target/Mangler.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Mangler.Tpo $(DEPDIR)/Mangler.Plo
@@ -5103,14 +3660,6 @@ TargetRegisterInfo.lo: llvm/lib/Target/TargetRegisterInfo.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TargetRegisterInfo.lo `test -f 'llvm/lib/Target/TargetRegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetRegisterInfo.cpp
-ADCE.lo: llvm/lib/Transforms/Scalar/ADCE.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ADCE.lo -MD -MP -MF $(DEPDIR)/ADCE.Tpo -c -o ADCE.lo `test -f 'llvm/lib/Transforms/Scalar/ADCE.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/ADCE.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ADCE.Tpo $(DEPDIR)/ADCE.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Transforms/Scalar/ADCE.cpp' object='ADCE.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ADCE.lo `test -f 'llvm/lib/Transforms/Scalar/ADCE.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/ADCE.cpp
-
DCE.lo: llvm/lib/Transforms/Scalar/DCE.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DCE.lo -MD -MP -MF $(DEPDIR)/DCE.Tpo -c -o DCE.lo `test -f 'llvm/lib/Transforms/Scalar/DCE.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/DCE.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DCE.Tpo $(DEPDIR)/DCE.Plo
@@ -5119,14 +3668,6 @@ DCE.lo: llvm/lib/Transforms/Scalar/DCE.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DCE.lo `test -f 'llvm/lib/Transforms/Scalar/DCE.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/DCE.cpp
-SCCP.lo: llvm/lib/Transforms/Scalar/SCCP.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT SCCP.lo -MD -MP -MF $(DEPDIR)/SCCP.Tpo -c -o SCCP.lo `test -f 'llvm/lib/Transforms/Scalar/SCCP.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/SCCP.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/SCCP.Tpo $(DEPDIR)/SCCP.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Transforms/Scalar/SCCP.cpp' object='SCCP.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o SCCP.lo `test -f 'llvm/lib/Transforms/Scalar/SCCP.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/SCCP.cpp
-
SimplifyCFGPass.lo: llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT SimplifyCFGPass.lo -MD -MP -MF $(DEPDIR)/SimplifyCFGPass.Tpo -c -o SimplifyCFGPass.lo `test -f 'llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/SimplifyCFGPass.Tpo $(DEPDIR)/SimplifyCFGPass.Plo
@@ -5135,30 +3676,6 @@ SimplifyCFGPass.lo: llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o SimplifyCFGPass.lo `test -f 'llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
-ConstantMerge.lo: llvm/lib/Transforms/IPO/ConstantMerge.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT ConstantMerge.lo -MD -MP -MF $(DEPDIR)/ConstantMerge.Tpo -c -o ConstantMerge.lo `test -f 'llvm/lib/Transforms/IPO/ConstantMerge.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/ConstantMerge.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/ConstantMerge.Tpo $(DEPDIR)/ConstantMerge.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Transforms/IPO/ConstantMerge.cpp' object='ConstantMerge.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o ConstantMerge.lo `test -f 'llvm/lib/Transforms/IPO/ConstantMerge.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/ConstantMerge.cpp
-
-GlobalOpt.lo: llvm/lib/Transforms/IPO/GlobalOpt.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT GlobalOpt.lo -MD -MP -MF $(DEPDIR)/GlobalOpt.Tpo -c -o GlobalOpt.lo `test -f 'llvm/lib/Transforms/IPO/GlobalOpt.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/GlobalOpt.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/GlobalOpt.Tpo $(DEPDIR)/GlobalOpt.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Transforms/IPO/GlobalOpt.cpp' object='GlobalOpt.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o GlobalOpt.lo `test -f 'llvm/lib/Transforms/IPO/GlobalOpt.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/GlobalOpt.cpp
-
-GlobalDCE.lo: llvm/lib/Transforms/IPO/GlobalDCE.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT GlobalDCE.lo -MD -MP -MF $(DEPDIR)/GlobalDCE.Tpo -c -o GlobalDCE.lo `test -f 'llvm/lib/Transforms/IPO/GlobalDCE.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/GlobalDCE.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/GlobalDCE.Tpo $(DEPDIR)/GlobalDCE.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Transforms/IPO/GlobalDCE.cpp' object='GlobalDCE.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o GlobalDCE.lo `test -f 'llvm/lib/Transforms/IPO/GlobalDCE.cpp' || echo '$(srcdir)/'`llvm/lib/Transforms/IPO/GlobalDCE.cpp
-
AsmWriter.lo: llvm/lib/VMCore/AsmWriter.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT AsmWriter.lo -MD -MP -MF $(DEPDIR)/AsmWriter.Tpo -c -o AsmWriter.lo `test -f 'llvm/lib/VMCore/AsmWriter.cpp' || echo '$(srcdir)/'`llvm/lib/VMCore/AsmWriter.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/AsmWriter.Tpo $(DEPDIR)/AsmWriter.Plo
@@ -5215,6 +3732,14 @@ Core.lo: llvm/lib/VMCore/Core.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Core.lo `test -f 'llvm/lib/VMCore/Core.cpp' || echo '$(srcdir)/'`llvm/lib/VMCore/Core.cpp
+DebugLoc.lo: llvm/lib/VMCore/DebugLoc.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DebugLoc.lo -MD -MP -MF $(DEPDIR)/DebugLoc.Tpo -c -o DebugLoc.lo `test -f 'llvm/lib/VMCore/DebugLoc.cpp' || echo '$(srcdir)/'`llvm/lib/VMCore/DebugLoc.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DebugLoc.Tpo $(DEPDIR)/DebugLoc.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/VMCore/DebugLoc.cpp' object='DebugLoc.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DebugLoc.lo `test -f 'llvm/lib/VMCore/DebugLoc.cpp' || echo '$(srcdir)/'`llvm/lib/VMCore/DebugLoc.cpp
+
Dominators.lo: llvm/lib/VMCore/Dominators.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Dominators.lo -MD -MP -MF $(DEPDIR)/Dominators.Tpo -c -o Dominators.lo `test -f 'llvm/lib/VMCore/Dominators.cpp' || echo '$(srcdir)/'`llvm/lib/VMCore/Dominators.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Dominators.Tpo $(DEPDIR)/Dominators.Plo
@@ -5343,6 +3868,14 @@ PassManager.lo: llvm/lib/VMCore/PassManager.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o PassManager.lo `test -f 'llvm/lib/VMCore/PassManager.cpp' || echo '$(srcdir)/'`llvm/lib/VMCore/PassManager.cpp
+PassRegistry.lo: llvm/lib/VMCore/PassRegistry.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT PassRegistry.lo -MD -MP -MF $(DEPDIR)/PassRegistry.Tpo -c -o PassRegistry.lo `test -f 'llvm/lib/VMCore/PassRegistry.cpp' || echo '$(srcdir)/'`llvm/lib/VMCore/PassRegistry.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/PassRegistry.Tpo $(DEPDIR)/PassRegistry.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/VMCore/PassRegistry.cpp' object='PassRegistry.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o PassRegistry.lo `test -f 'llvm/lib/VMCore/PassRegistry.cpp' || echo '$(srcdir)/'`llvm/lib/VMCore/PassRegistry.cpp
+
PrintModulePass.lo: llvm/lib/VMCore/PrintModulePass.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT PrintModulePass.lo -MD -MP -MF $(DEPDIR)/PrintModulePass.Tpo -c -o PrintModulePass.lo `test -f 'llvm/lib/VMCore/PrintModulePass.cpp' || echo '$(srcdir)/'`llvm/lib/VMCore/PrintModulePass.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/PrintModulePass.Tpo $(DEPDIR)/PrintModulePass.Plo
@@ -5487,6 +4020,14 @@ libllvmpowerpccodegen_la-PPCRegisterInfo.lo: llvm/lib/Target/PowerPC/PPCRegister
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmpowerpccodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmpowerpccodegen_la-PPCRegisterInfo.lo `test -f 'llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+libllvmpowerpccodegen_la-PPCSelectionDAGInfo.lo: llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmpowerpccodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmpowerpccodegen_la-PPCSelectionDAGInfo.lo -MD -MP -MF $(DEPDIR)/libllvmpowerpccodegen_la-PPCSelectionDAGInfo.Tpo -c -o libllvmpowerpccodegen_la-PPCSelectionDAGInfo.lo `test -f 'llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmpowerpccodegen_la-PPCSelectionDAGInfo.Tpo $(DEPDIR)/libllvmpowerpccodegen_la-PPCSelectionDAGInfo.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp' object='libllvmpowerpccodegen_la-PPCSelectionDAGInfo.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmpowerpccodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmpowerpccodegen_la-PPCSelectionDAGInfo.lo `test -f 'llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
+
libllvmpowerpccodegen_la-PPCSubtarget.lo: llvm/lib/Target/PowerPC/PPCSubtarget.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmpowerpccodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmpowerpccodegen_la-PPCSubtarget.lo -MD -MP -MF $(DEPDIR)/libllvmpowerpccodegen_la-PPCSubtarget.Tpo -c -o libllvmpowerpccodegen_la-PPCSubtarget.lo `test -f 'llvm/lib/Target/PowerPC/PPCSubtarget.cpp' || echo '$(srcdir)/'`llvm/lib/Target/PowerPC/PPCSubtarget.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmpowerpccodegen_la-PPCSubtarget.Tpo $(DEPDIR)/libllvmpowerpccodegen_la-PPCSubtarget.Plo
@@ -5511,94 +4052,6 @@ libllvmpowerpccodegen_la-PowerPCTargetInfo.lo: llvm/lib/Target/PowerPC/TargetInf
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmpowerpccodegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmpowerpccodegen_la-PowerPCTargetInfo.lo `test -f 'llvm/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/PowerPC/TargetInfo/PowerPCTargetInfo.cpp
-APSInt.lo: llvm/lib/Support/APSInt.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT APSInt.lo -MD -MP -MF $(DEPDIR)/APSInt.Tpo -c -o APSInt.lo `test -f 'llvm/lib/Support/APSInt.cpp' || echo '$(srcdir)/'`llvm/lib/Support/APSInt.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/APSInt.Tpo $(DEPDIR)/APSInt.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/APSInt.cpp' object='APSInt.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o APSInt.lo `test -f 'llvm/lib/Support/APSInt.cpp' || echo '$(srcdir)/'`llvm/lib/Support/APSInt.cpp
-
-DeltaAlgorithm.lo: llvm/lib/Support/DeltaAlgorithm.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT DeltaAlgorithm.lo -MD -MP -MF $(DEPDIR)/DeltaAlgorithm.Tpo -c -o DeltaAlgorithm.lo `test -f 'llvm/lib/Support/DeltaAlgorithm.cpp' || echo '$(srcdir)/'`llvm/lib/Support/DeltaAlgorithm.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/DeltaAlgorithm.Tpo $(DEPDIR)/DeltaAlgorithm.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/DeltaAlgorithm.cpp' object='DeltaAlgorithm.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o DeltaAlgorithm.lo `test -f 'llvm/lib/Support/DeltaAlgorithm.cpp' || echo '$(srcdir)/'`llvm/lib/Support/DeltaAlgorithm.cpp
-
-FileUtilities.lo: llvm/lib/Support/FileUtilities.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT FileUtilities.lo -MD -MP -MF $(DEPDIR)/FileUtilities.Tpo -c -o FileUtilities.lo `test -f 'llvm/lib/Support/FileUtilities.cpp' || echo '$(srcdir)/'`llvm/lib/Support/FileUtilities.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/FileUtilities.Tpo $(DEPDIR)/FileUtilities.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/FileUtilities.cpp' object='FileUtilities.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o FileUtilities.lo `test -f 'llvm/lib/Support/FileUtilities.cpp' || echo '$(srcdir)/'`llvm/lib/Support/FileUtilities.cpp
-
-IsInf.lo: llvm/lib/Support/IsInf.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT IsInf.lo -MD -MP -MF $(DEPDIR)/IsInf.Tpo -c -o IsInf.lo `test -f 'llvm/lib/Support/IsInf.cpp' || echo '$(srcdir)/'`llvm/lib/Support/IsInf.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/IsInf.Tpo $(DEPDIR)/IsInf.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/IsInf.cpp' object='IsInf.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o IsInf.lo `test -f 'llvm/lib/Support/IsInf.cpp' || echo '$(srcdir)/'`llvm/lib/Support/IsInf.cpp
-
-IsNAN.lo: llvm/lib/Support/IsNAN.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT IsNAN.lo -MD -MP -MF $(DEPDIR)/IsNAN.Tpo -c -o IsNAN.lo `test -f 'llvm/lib/Support/IsNAN.cpp' || echo '$(srcdir)/'`llvm/lib/Support/IsNAN.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/IsNAN.Tpo $(DEPDIR)/IsNAN.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/IsNAN.cpp' object='IsNAN.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o IsNAN.lo `test -f 'llvm/lib/Support/IsNAN.cpp' || echo '$(srcdir)/'`llvm/lib/Support/IsNAN.cpp
-
-MemoryObject.lo: llvm/lib/Support/MemoryObject.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MemoryObject.lo -MD -MP -MF $(DEPDIR)/MemoryObject.Tpo -c -o MemoryObject.lo `test -f 'llvm/lib/Support/MemoryObject.cpp' || echo '$(srcdir)/'`llvm/lib/Support/MemoryObject.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/MemoryObject.Tpo $(DEPDIR)/MemoryObject.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/MemoryObject.cpp' object='MemoryObject.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MemoryObject.lo `test -f 'llvm/lib/Support/MemoryObject.cpp' || echo '$(srcdir)/'`llvm/lib/Support/MemoryObject.cpp
-
-PluginLoader.lo: llvm/lib/Support/PluginLoader.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT PluginLoader.lo -MD -MP -MF $(DEPDIR)/PluginLoader.Tpo -c -o PluginLoader.lo `test -f 'llvm/lib/Support/PluginLoader.cpp' || echo '$(srcdir)/'`llvm/lib/Support/PluginLoader.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/PluginLoader.Tpo $(DEPDIR)/PluginLoader.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/PluginLoader.cpp' object='PluginLoader.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o PluginLoader.lo `test -f 'llvm/lib/Support/PluginLoader.cpp' || echo '$(srcdir)/'`llvm/lib/Support/PluginLoader.cpp
-
-Regex.lo: llvm/lib/Support/Regex.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Regex.lo -MD -MP -MF $(DEPDIR)/Regex.Tpo -c -o Regex.lo `test -f 'llvm/lib/Support/Regex.cpp' || echo '$(srcdir)/'`llvm/lib/Support/Regex.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Regex.Tpo $(DEPDIR)/Regex.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/Regex.cpp' object='Regex.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Regex.lo `test -f 'llvm/lib/Support/Regex.cpp' || echo '$(srcdir)/'`llvm/lib/Support/Regex.cpp
-
-SlowOperationInformer.lo: llvm/lib/Support/SlowOperationInformer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT SlowOperationInformer.lo -MD -MP -MF $(DEPDIR)/SlowOperationInformer.Tpo -c -o SlowOperationInformer.lo `test -f 'llvm/lib/Support/SlowOperationInformer.cpp' || echo '$(srcdir)/'`llvm/lib/Support/SlowOperationInformer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/SlowOperationInformer.Tpo $(DEPDIR)/SlowOperationInformer.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/SlowOperationInformer.cpp' object='SlowOperationInformer.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o SlowOperationInformer.lo `test -f 'llvm/lib/Support/SlowOperationInformer.cpp' || echo '$(srcdir)/'`llvm/lib/Support/SlowOperationInformer.cpp
-
-SystemUtils.lo: llvm/lib/Support/SystemUtils.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT SystemUtils.lo -MD -MP -MF $(DEPDIR)/SystemUtils.Tpo -c -o SystemUtils.lo `test -f 'llvm/lib/Support/SystemUtils.cpp' || echo '$(srcdir)/'`llvm/lib/Support/SystemUtils.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/SystemUtils.Tpo $(DEPDIR)/SystemUtils.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/SystemUtils.cpp' object='SystemUtils.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o SystemUtils.lo `test -f 'llvm/lib/Support/SystemUtils.cpp' || echo '$(srcdir)/'`llvm/lib/Support/SystemUtils.cpp
-
-raw_os_ostream.lo: llvm/lib/Support/raw_os_ostream.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT raw_os_ostream.lo -MD -MP -MF $(DEPDIR)/raw_os_ostream.Tpo -c -o raw_os_ostream.lo `test -f 'llvm/lib/Support/raw_os_ostream.cpp' || echo '$(srcdir)/'`llvm/lib/Support/raw_os_ostream.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/raw_os_ostream.Tpo $(DEPDIR)/raw_os_ostream.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/raw_os_ostream.cpp' object='raw_os_ostream.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o raw_os_ostream.lo `test -f 'llvm/lib/Support/raw_os_ostream.cpp' || echo '$(srcdir)/'`llvm/lib/Support/raw_os_ostream.cpp
-
Alarm.lo: llvm/lib/System/Alarm.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Alarm.lo -MD -MP -MF $(DEPDIR)/Alarm.Tpo -c -o Alarm.lo `test -f 'llvm/lib/System/Alarm.cpp' || echo '$(srcdir)/'`llvm/lib/System/Alarm.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Alarm.Tpo $(DEPDIR)/Alarm.Plo
@@ -5735,6 +4188,22 @@ TimeValue.lo: llvm/lib/System/TimeValue.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o TimeValue.lo `test -f 'llvm/lib/System/TimeValue.cpp' || echo '$(srcdir)/'`llvm/lib/System/TimeValue.cpp
+Valgrind.lo: llvm/lib/System/Valgrind.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT Valgrind.lo -MD -MP -MF $(DEPDIR)/Valgrind.Tpo -c -o Valgrind.lo `test -f 'llvm/lib/System/Valgrind.cpp' || echo '$(srcdir)/'`llvm/lib/System/Valgrind.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/Valgrind.Tpo $(DEPDIR)/Valgrind.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/System/Valgrind.cpp' object='Valgrind.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o Valgrind.lo `test -f 'llvm/lib/System/Valgrind.cpp' || echo '$(srcdir)/'`llvm/lib/System/Valgrind.cpp
+
+libllvmx86codegen_la-ELFObjectWriter.lo: llvm/lib/MC/ELFObjectWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-ELFObjectWriter.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-ELFObjectWriter.Tpo -c -o libllvmx86codegen_la-ELFObjectWriter.lo `test -f 'llvm/lib/MC/ELFObjectWriter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/ELFObjectWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-ELFObjectWriter.Tpo $(DEPDIR)/libllvmx86codegen_la-ELFObjectWriter.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/ELFObjectWriter.cpp' object='libllvmx86codegen_la-ELFObjectWriter.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-ELFObjectWriter.lo `test -f 'llvm/lib/MC/ELFObjectWriter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/ELFObjectWriter.cpp
+
libllvmx86codegen_la-MCAsmInfoCOFF.lo: llvm/lib/MC/MCAsmInfoCOFF.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-MCAsmInfoCOFF.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-MCAsmInfoCOFF.Tpo -c -o libllvmx86codegen_la-MCAsmInfoCOFF.lo `test -f 'llvm/lib/MC/MCAsmInfoCOFF.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCAsmInfoCOFF.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-MCAsmInfoCOFF.Tpo $(DEPDIR)/libllvmx86codegen_la-MCAsmInfoCOFF.Plo
@@ -5751,6 +4220,46 @@ libllvmx86codegen_la-MCCodeEmitter.lo: llvm/lib/MC/MCCodeEmitter.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-MCCodeEmitter.lo `test -f 'llvm/lib/MC/MCCodeEmitter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCCodeEmitter.cpp
+libllvmx86codegen_la-MCELFStreamer.lo: llvm/lib/MC/MCELFStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-MCELFStreamer.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-MCELFStreamer.Tpo -c -o libllvmx86codegen_la-MCELFStreamer.lo `test -f 'llvm/lib/MC/MCELFStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCELFStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-MCELFStreamer.Tpo $(DEPDIR)/libllvmx86codegen_la-MCELFStreamer.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCELFStreamer.cpp' object='libllvmx86codegen_la-MCELFStreamer.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-MCELFStreamer.lo `test -f 'llvm/lib/MC/MCELFStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCELFStreamer.cpp
+
+libllvmx86codegen_la-MCMachOStreamer.lo: llvm/lib/MC/MCMachOStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-MCMachOStreamer.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-MCMachOStreamer.Tpo -c -o libllvmx86codegen_la-MCMachOStreamer.lo `test -f 'llvm/lib/MC/MCMachOStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCMachOStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-MCMachOStreamer.Tpo $(DEPDIR)/libllvmx86codegen_la-MCMachOStreamer.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCMachOStreamer.cpp' object='libllvmx86codegen_la-MCMachOStreamer.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-MCMachOStreamer.lo `test -f 'llvm/lib/MC/MCMachOStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCMachOStreamer.cpp
+
+libllvmx86codegen_la-MCObjectStreamer.lo: llvm/lib/MC/MCObjectStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-MCObjectStreamer.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-MCObjectStreamer.Tpo -c -o libllvmx86codegen_la-MCObjectStreamer.lo `test -f 'llvm/lib/MC/MCObjectStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCObjectStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-MCObjectStreamer.Tpo $(DEPDIR)/libllvmx86codegen_la-MCObjectStreamer.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCObjectStreamer.cpp' object='libllvmx86codegen_la-MCObjectStreamer.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-MCObjectStreamer.lo `test -f 'llvm/lib/MC/MCObjectStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCObjectStreamer.cpp
+
+libllvmx86codegen_la-MCObjectWriter.lo: llvm/lib/MC/MCObjectWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-MCObjectWriter.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-MCObjectWriter.Tpo -c -o libllvmx86codegen_la-MCObjectWriter.lo `test -f 'llvm/lib/MC/MCObjectWriter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCObjectWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-MCObjectWriter.Tpo $(DEPDIR)/libllvmx86codegen_la-MCObjectWriter.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCObjectWriter.cpp' object='libllvmx86codegen_la-MCObjectWriter.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-MCObjectWriter.lo `test -f 'llvm/lib/MC/MCObjectWriter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCObjectWriter.cpp
+
+libllvmx86codegen_la-MachObjectWriter.lo: llvm/lib/MC/MachObjectWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-MachObjectWriter.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-MachObjectWriter.Tpo -c -o libllvmx86codegen_la-MachObjectWriter.lo `test -f 'llvm/lib/MC/MachObjectWriter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MachObjectWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-MachObjectWriter.Tpo $(DEPDIR)/libllvmx86codegen_la-MachObjectWriter.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MachObjectWriter.cpp' object='libllvmx86codegen_la-MachObjectWriter.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-MachObjectWriter.lo `test -f 'llvm/lib/MC/MachObjectWriter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MachObjectWriter.cpp
+
libllvmx86codegen_la-TargetAsmBackend.lo: llvm/lib/MC/TargetAsmBackend.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-TargetAsmBackend.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-TargetAsmBackend.Tpo -c -o libllvmx86codegen_la-TargetAsmBackend.lo `test -f 'llvm/lib/MC/TargetAsmBackend.cpp' || echo '$(srcdir)/'`llvm/lib/MC/TargetAsmBackend.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-TargetAsmBackend.Tpo $(DEPDIR)/libllvmx86codegen_la-TargetAsmBackend.Plo
@@ -5759,6 +4268,22 @@ libllvmx86codegen_la-TargetAsmBackend.lo: llvm/lib/MC/TargetAsmBackend.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-TargetAsmBackend.lo `test -f 'llvm/lib/MC/TargetAsmBackend.cpp' || echo '$(srcdir)/'`llvm/lib/MC/TargetAsmBackend.cpp
+libllvmx86codegen_la-WinCOFFObjectWriter.lo: llvm/lib/MC/WinCOFFObjectWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-WinCOFFObjectWriter.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-WinCOFFObjectWriter.Tpo -c -o libllvmx86codegen_la-WinCOFFObjectWriter.lo `test -f 'llvm/lib/MC/WinCOFFObjectWriter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/WinCOFFObjectWriter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-WinCOFFObjectWriter.Tpo $(DEPDIR)/libllvmx86codegen_la-WinCOFFObjectWriter.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/WinCOFFObjectWriter.cpp' object='libllvmx86codegen_la-WinCOFFObjectWriter.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-WinCOFFObjectWriter.lo `test -f 'llvm/lib/MC/WinCOFFObjectWriter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/WinCOFFObjectWriter.cpp
+
+libllvmx86codegen_la-WinCOFFStreamer.lo: llvm/lib/MC/WinCOFFStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-WinCOFFStreamer.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-WinCOFFStreamer.Tpo -c -o libllvmx86codegen_la-WinCOFFStreamer.lo `test -f 'llvm/lib/MC/WinCOFFStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/WinCOFFStreamer.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-WinCOFFStreamer.Tpo $(DEPDIR)/libllvmx86codegen_la-WinCOFFStreamer.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/WinCOFFStreamer.cpp' object='libllvmx86codegen_la-WinCOFFStreamer.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-WinCOFFStreamer.lo `test -f 'llvm/lib/MC/WinCOFFStreamer.cpp' || echo '$(srcdir)/'`llvm/lib/MC/WinCOFFStreamer.cpp
+
libllvmx86codegen_la-TargetELFWriterInfo.lo: llvm/lib/Target/TargetELFWriterInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-TargetELFWriterInfo.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-TargetELFWriterInfo.Tpo -c -o libllvmx86codegen_la-TargetELFWriterInfo.lo `test -f 'llvm/lib/Target/TargetELFWriterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetELFWriterInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-TargetELFWriterInfo.Tpo $(DEPDIR)/libllvmx86codegen_la-TargetELFWriterInfo.Plo
@@ -5767,6 +4292,14 @@ libllvmx86codegen_la-TargetELFWriterInfo.lo: llvm/lib/Target/TargetELFWriterInfo
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-TargetELFWriterInfo.lo `test -f 'llvm/lib/Target/TargetELFWriterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/TargetELFWriterInfo.cpp
+libllvmx86codegen_la-SSEDomainFix.lo: llvm/lib/Target/X86/SSEDomainFix.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-SSEDomainFix.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-SSEDomainFix.Tpo -c -o libllvmx86codegen_la-SSEDomainFix.lo `test -f 'llvm/lib/Target/X86/SSEDomainFix.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/SSEDomainFix.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-SSEDomainFix.Tpo $(DEPDIR)/libllvmx86codegen_la-SSEDomainFix.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/X86/SSEDomainFix.cpp' object='libllvmx86codegen_la-SSEDomainFix.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-SSEDomainFix.lo `test -f 'llvm/lib/Target/X86/SSEDomainFix.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/SSEDomainFix.cpp
+
libllvmx86codegen_la-X86TargetInfo.lo: llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-X86TargetInfo.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-X86TargetInfo.Tpo -c -o libllvmx86codegen_la-X86TargetInfo.lo `test -f 'llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/TargetInfo/X86TargetInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-X86TargetInfo.Tpo $(DEPDIR)/libllvmx86codegen_la-X86TargetInfo.Plo
@@ -5823,14 +4356,6 @@ libllvmx86codegen_la-X86FloatingPoint.lo: llvm/lib/Target/X86/X86FloatingPoint.c
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-X86FloatingPoint.lo `test -f 'llvm/lib/Target/X86/X86FloatingPoint.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86FloatingPoint.cpp
-libllvmx86codegen_la-X86FloatingPointRegKill.lo: llvm/lib/Target/X86/X86FloatingPointRegKill.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-X86FloatingPointRegKill.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-X86FloatingPointRegKill.Tpo -c -o libllvmx86codegen_la-X86FloatingPointRegKill.lo `test -f 'llvm/lib/Target/X86/X86FloatingPointRegKill.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86FloatingPointRegKill.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-X86FloatingPointRegKill.Tpo $(DEPDIR)/libllvmx86codegen_la-X86FloatingPointRegKill.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/X86/X86FloatingPointRegKill.cpp' object='libllvmx86codegen_la-X86FloatingPointRegKill.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-X86FloatingPointRegKill.lo `test -f 'llvm/lib/Target/X86/X86FloatingPointRegKill.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86FloatingPointRegKill.cpp
-
libllvmx86codegen_la-X86ISelDAGToDAG.lo: llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-X86ISelDAGToDAG.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-X86ISelDAGToDAG.Tpo -c -o libllvmx86codegen_la-X86ISelDAGToDAG.lo `test -f 'llvm/lib/Target/X86/X86ISelDAGToDAG.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-X86ISelDAGToDAG.Tpo $(DEPDIR)/libllvmx86codegen_la-X86ISelDAGToDAG.Plo
@@ -5879,14 +4404,6 @@ libllvmx86codegen_la-X86MCCodeEmitter.lo: llvm/lib/Target/X86/X86MCCodeEmitter.c
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-X86MCCodeEmitter.lo `test -f 'llvm/lib/Target/X86/X86MCCodeEmitter.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86MCCodeEmitter.cpp
-libllvmx86codegen_la-X86MCTargetExpr.lo: llvm/lib/Target/X86/X86MCTargetExpr.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-X86MCTargetExpr.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-X86MCTargetExpr.Tpo -c -o libllvmx86codegen_la-X86MCTargetExpr.lo `test -f 'llvm/lib/Target/X86/X86MCTargetExpr.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86MCTargetExpr.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-X86MCTargetExpr.Tpo $(DEPDIR)/libllvmx86codegen_la-X86MCTargetExpr.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/X86/X86MCTargetExpr.cpp' object='libllvmx86codegen_la-X86MCTargetExpr.lo' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-X86MCTargetExpr.lo `test -f 'llvm/lib/Target/X86/X86MCTargetExpr.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86MCTargetExpr.cpp
-
libllvmx86codegen_la-X86RegisterInfo.lo: llvm/lib/Target/X86/X86RegisterInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-X86RegisterInfo.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-X86RegisterInfo.Tpo -c -o libllvmx86codegen_la-X86RegisterInfo.lo `test -f 'llvm/lib/Target/X86/X86RegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86RegisterInfo.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-X86RegisterInfo.Tpo $(DEPDIR)/libllvmx86codegen_la-X86RegisterInfo.Plo
@@ -5895,6 +4412,14 @@ libllvmx86codegen_la-X86RegisterInfo.lo: llvm/lib/Target/X86/X86RegisterInfo.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-X86RegisterInfo.lo `test -f 'llvm/lib/Target/X86/X86RegisterInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86RegisterInfo.cpp
+libllvmx86codegen_la-X86SelectionDAGInfo.lo: llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-X86SelectionDAGInfo.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-X86SelectionDAGInfo.Tpo -c -o libllvmx86codegen_la-X86SelectionDAGInfo.lo `test -f 'llvm/lib/Target/X86/X86SelectionDAGInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-X86SelectionDAGInfo.Tpo $(DEPDIR)/libllvmx86codegen_la-X86SelectionDAGInfo.Plo
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Target/X86/X86SelectionDAGInfo.cpp' object='libllvmx86codegen_la-X86SelectionDAGInfo.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-X86SelectionDAGInfo.lo `test -f 'llvm/lib/Target/X86/X86SelectionDAGInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+
libllvmx86codegen_la-X86Subtarget.lo: llvm/lib/Target/X86/X86Subtarget.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT libllvmx86codegen_la-X86Subtarget.lo -MD -MP -MF $(DEPDIR)/libllvmx86codegen_la-X86Subtarget.Tpo -c -o libllvmx86codegen_la-X86Subtarget.lo `test -f 'llvm/lib/Target/X86/X86Subtarget.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86Subtarget.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libllvmx86codegen_la-X86Subtarget.Tpo $(DEPDIR)/libllvmx86codegen_la-X86Subtarget.Plo
@@ -5919,597 +4444,21 @@ libllvmx86codegen_la-X86TargetObjectFile.lo: llvm/lib/Target/X86/X86TargetObject
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libllvmx86codegen_la_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o libllvmx86codegen_la-X86TargetObjectFile.lo `test -f 'llvm/lib/Target/X86/X86TargetObjectFile.cpp' || echo '$(srcdir)/'`llvm/lib/Target/X86/X86TargetObjectFile.cpp
-FileCheck-FileCheck.o: llvm/utils/FileCheck/FileCheck.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(FileCheck_CPPFLAGS) $(CPPFLAGS) $(FileCheck_CXXFLAGS) $(CXXFLAGS) -MT FileCheck-FileCheck.o -MD -MP -MF $(DEPDIR)/FileCheck-FileCheck.Tpo -c -o FileCheck-FileCheck.o `test -f 'llvm/utils/FileCheck/FileCheck.cpp' || echo '$(srcdir)/'`llvm/utils/FileCheck/FileCheck.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/FileCheck-FileCheck.Tpo $(DEPDIR)/FileCheck-FileCheck.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/FileCheck/FileCheck.cpp' object='FileCheck-FileCheck.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(FileCheck_CPPFLAGS) $(CPPFLAGS) $(FileCheck_CXXFLAGS) $(CXXFLAGS) -c -o FileCheck-FileCheck.o `test -f 'llvm/utils/FileCheck/FileCheck.cpp' || echo '$(srcdir)/'`llvm/utils/FileCheck/FileCheck.cpp
-
-FileCheck-FileCheck.obj: llvm/utils/FileCheck/FileCheck.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(FileCheck_CPPFLAGS) $(CPPFLAGS) $(FileCheck_CXXFLAGS) $(CXXFLAGS) -MT FileCheck-FileCheck.obj -MD -MP -MF $(DEPDIR)/FileCheck-FileCheck.Tpo -c -o FileCheck-FileCheck.obj `if test -f 'llvm/utils/FileCheck/FileCheck.cpp'; then $(CYGPATH_W) 'llvm/utils/FileCheck/FileCheck.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/FileCheck/FileCheck.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/FileCheck-FileCheck.Tpo $(DEPDIR)/FileCheck-FileCheck.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/FileCheck/FileCheck.cpp' object='FileCheck-FileCheck.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(FileCheck_CPPFLAGS) $(CPPFLAGS) $(FileCheck_CXXFLAGS) $(CXXFLAGS) -c -o FileCheck-FileCheck.obj `if test -f 'llvm/utils/FileCheck/FileCheck.cpp'; then $(CYGPATH_W) 'llvm/utils/FileCheck/FileCheck.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/FileCheck/FileCheck.cpp'; fi`
-
-llc-MCInstPrinter.o: llvm/lib/MC/MCInstPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llc_CPPFLAGS) $(CPPFLAGS) $(llc_CXXFLAGS) $(CXXFLAGS) -MT llc-MCInstPrinter.o -MD -MP -MF $(DEPDIR)/llc-MCInstPrinter.Tpo -c -o llc-MCInstPrinter.o `test -f 'llvm/lib/MC/MCInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCInstPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llc-MCInstPrinter.Tpo $(DEPDIR)/llc-MCInstPrinter.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCInstPrinter.cpp' object='llc-MCInstPrinter.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llc_CPPFLAGS) $(CPPFLAGS) $(llc_CXXFLAGS) $(CXXFLAGS) -c -o llc-MCInstPrinter.o `test -f 'llvm/lib/MC/MCInstPrinter.cpp' || echo '$(srcdir)/'`llvm/lib/MC/MCInstPrinter.cpp
-
-llc-MCInstPrinter.obj: llvm/lib/MC/MCInstPrinter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llc_CPPFLAGS) $(CPPFLAGS) $(llc_CXXFLAGS) $(CXXFLAGS) -MT llc-MCInstPrinter.obj -MD -MP -MF $(DEPDIR)/llc-MCInstPrinter.Tpo -c -o llc-MCInstPrinter.obj `if test -f 'llvm/lib/MC/MCInstPrinter.cpp'; then $(CYGPATH_W) 'llvm/lib/MC/MCInstPrinter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/MC/MCInstPrinter.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llc-MCInstPrinter.Tpo $(DEPDIR)/llc-MCInstPrinter.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/MC/MCInstPrinter.cpp' object='llc-MCInstPrinter.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llc_CPPFLAGS) $(CPPFLAGS) $(llc_CXXFLAGS) $(CXXFLAGS) -c -o llc-MCInstPrinter.obj `if test -f 'llvm/lib/MC/MCInstPrinter.cpp'; then $(CYGPATH_W) 'llvm/lib/MC/MCInstPrinter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/MC/MCInstPrinter.cpp'; fi`
-
-llc-llc.o: llvm/tools/llc/llc.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llc_CPPFLAGS) $(CPPFLAGS) $(llc_CXXFLAGS) $(CXXFLAGS) -MT llc-llc.o -MD -MP -MF $(DEPDIR)/llc-llc.Tpo -c -o llc-llc.o `test -f 'llvm/tools/llc/llc.cpp' || echo '$(srcdir)/'`llvm/tools/llc/llc.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llc-llc.Tpo $(DEPDIR)/llc-llc.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/tools/llc/llc.cpp' object='llc-llc.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llc_CPPFLAGS) $(CPPFLAGS) $(llc_CXXFLAGS) $(CXXFLAGS) -c -o llc-llc.o `test -f 'llvm/tools/llc/llc.cpp' || echo '$(srcdir)/'`llvm/tools/llc/llc.cpp
-
-llc-llc.obj: llvm/tools/llc/llc.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llc_CPPFLAGS) $(CPPFLAGS) $(llc_CXXFLAGS) $(CXXFLAGS) -MT llc-llc.obj -MD -MP -MF $(DEPDIR)/llc-llc.Tpo -c -o llc-llc.obj `if test -f 'llvm/tools/llc/llc.cpp'; then $(CYGPATH_W) 'llvm/tools/llc/llc.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/tools/llc/llc.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llc-llc.Tpo $(DEPDIR)/llc-llc.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/tools/llc/llc.cpp' object='llc-llc.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llc_CPPFLAGS) $(CPPFLAGS) $(llc_CXXFLAGS) $(CXXFLAGS) -c -o llc-llc.obj `if test -f 'llvm/tools/llc/llc.cpp'; then $(CYGPATH_W) 'llvm/tools/llc/llc.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/tools/llc/llc.cpp'; fi`
-
-lli.o: llvm/tools/lli/lli.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT lli.o -MD -MP -MF $(DEPDIR)/lli.Tpo -c -o lli.o `test -f 'llvm/tools/lli/lli.cpp' || echo '$(srcdir)/'`llvm/tools/lli/lli.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/lli.Tpo $(DEPDIR)/lli.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/tools/lli/lli.cpp' object='lli.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o lli.o `test -f 'llvm/tools/lli/lli.cpp' || echo '$(srcdir)/'`llvm/tools/lli/lli.cpp
-
-lli.obj: llvm/tools/lli/lli.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT lli.obj -MD -MP -MF $(DEPDIR)/lli.Tpo -c -o lli.obj `if test -f 'llvm/tools/lli/lli.cpp'; then $(CYGPATH_W) 'llvm/tools/lli/lli.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/tools/lli/lli.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/lli.Tpo $(DEPDIR)/lli.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/tools/lli/lli.cpp' object='lli.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o lli.obj `if test -f 'llvm/tools/lli/lli.cpp'; then $(CYGPATH_W) 'llvm/tools/lli/lli.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/tools/lli/lli.cpp'; fi`
-
-llvm_as-llvm-as.o: llvm/tools/llvm-as/llvm-as.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvm_as_CPPFLAGS) $(CPPFLAGS) $(llvm_as_CXXFLAGS) $(CXXFLAGS) -MT llvm_as-llvm-as.o -MD -MP -MF $(DEPDIR)/llvm_as-llvm-as.Tpo -c -o llvm_as-llvm-as.o `test -f 'llvm/tools/llvm-as/llvm-as.cpp' || echo '$(srcdir)/'`llvm/tools/llvm-as/llvm-as.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvm_as-llvm-as.Tpo $(DEPDIR)/llvm_as-llvm-as.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/tools/llvm-as/llvm-as.cpp' object='llvm_as-llvm-as.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvm_as_CPPFLAGS) $(CPPFLAGS) $(llvm_as_CXXFLAGS) $(CXXFLAGS) -c -o llvm_as-llvm-as.o `test -f 'llvm/tools/llvm-as/llvm-as.cpp' || echo '$(srcdir)/'`llvm/tools/llvm-as/llvm-as.cpp
-
-llvm_as-llvm-as.obj: llvm/tools/llvm-as/llvm-as.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvm_as_CPPFLAGS) $(CPPFLAGS) $(llvm_as_CXXFLAGS) $(CXXFLAGS) -MT llvm_as-llvm-as.obj -MD -MP -MF $(DEPDIR)/llvm_as-llvm-as.Tpo -c -o llvm_as-llvm-as.obj `if test -f 'llvm/tools/llvm-as/llvm-as.cpp'; then $(CYGPATH_W) 'llvm/tools/llvm-as/llvm-as.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/tools/llvm-as/llvm-as.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvm_as-llvm-as.Tpo $(DEPDIR)/llvm_as-llvm-as.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/tools/llvm-as/llvm-as.cpp' object='llvm_as-llvm-as.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvm_as_CPPFLAGS) $(CPPFLAGS) $(llvm_as_CXXFLAGS) $(CXXFLAGS) -c -o llvm_as-llvm-as.obj `if test -f 'llvm/tools/llvm-as/llvm-as.cpp'; then $(CYGPATH_W) 'llvm/tools/llvm-as/llvm-as.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/tools/llvm-as/llvm-as.cpp'; fi`
-
-llvm_dis-llvm-dis.o: llvm/tools/llvm-dis/llvm-dis.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvm_dis_CPPFLAGS) $(CPPFLAGS) $(llvm_dis_CXXFLAGS) $(CXXFLAGS) -MT llvm_dis-llvm-dis.o -MD -MP -MF $(DEPDIR)/llvm_dis-llvm-dis.Tpo -c -o llvm_dis-llvm-dis.o `test -f 'llvm/tools/llvm-dis/llvm-dis.cpp' || echo '$(srcdir)/'`llvm/tools/llvm-dis/llvm-dis.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvm_dis-llvm-dis.Tpo $(DEPDIR)/llvm_dis-llvm-dis.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/tools/llvm-dis/llvm-dis.cpp' object='llvm_dis-llvm-dis.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvm_dis_CPPFLAGS) $(CPPFLAGS) $(llvm_dis_CXXFLAGS) $(CXXFLAGS) -c -o llvm_dis-llvm-dis.o `test -f 'llvm/tools/llvm-dis/llvm-dis.cpp' || echo '$(srcdir)/'`llvm/tools/llvm-dis/llvm-dis.cpp
-
-llvm_dis-llvm-dis.obj: llvm/tools/llvm-dis/llvm-dis.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvm_dis_CPPFLAGS) $(CPPFLAGS) $(llvm_dis_CXXFLAGS) $(CXXFLAGS) -MT llvm_dis-llvm-dis.obj -MD -MP -MF $(DEPDIR)/llvm_dis-llvm-dis.Tpo -c -o llvm_dis-llvm-dis.obj `if test -f 'llvm/tools/llvm-dis/llvm-dis.cpp'; then $(CYGPATH_W) 'llvm/tools/llvm-dis/llvm-dis.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/tools/llvm-dis/llvm-dis.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvm_dis-llvm-dis.Tpo $(DEPDIR)/llvm_dis-llvm-dis.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/tools/llvm-dis/llvm-dis.cpp' object='llvm_dis-llvm-dis.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvm_dis_CPPFLAGS) $(CPPFLAGS) $(llvm_dis_CXXFLAGS) $(CXXFLAGS) -c -o llvm_dis-llvm-dis.obj `if test -f 'llvm/tools/llvm-dis/llvm-dis.cpp'; then $(CYGPATH_W) 'llvm/tools/llvm-dis/llvm-dis.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/tools/llvm-dis/llvm-dis.cpp'; fi`
-
-llvmunittest_ADT-APFloatTest.o: llvm/unittests/ADT/APFloatTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-APFloatTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-APFloatTest.Tpo -c -o llvmunittest_ADT-APFloatTest.o `test -f 'llvm/unittests/ADT/APFloatTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/APFloatTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-APFloatTest.Tpo $(DEPDIR)/llvmunittest_ADT-APFloatTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/APFloatTest.cpp' object='llvmunittest_ADT-APFloatTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-APFloatTest.o `test -f 'llvm/unittests/ADT/APFloatTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/APFloatTest.cpp
-
-llvmunittest_ADT-APFloatTest.obj: llvm/unittests/ADT/APFloatTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-APFloatTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-APFloatTest.Tpo -c -o llvmunittest_ADT-APFloatTest.obj `if test -f 'llvm/unittests/ADT/APFloatTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/APFloatTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/APFloatTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-APFloatTest.Tpo $(DEPDIR)/llvmunittest_ADT-APFloatTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/APFloatTest.cpp' object='llvmunittest_ADT-APFloatTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-APFloatTest.obj `if test -f 'llvm/unittests/ADT/APFloatTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/APFloatTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/APFloatTest.cpp'; fi`
-
-llvmunittest_ADT-APIntTest.o: llvm/unittests/ADT/APIntTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-APIntTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-APIntTest.Tpo -c -o llvmunittest_ADT-APIntTest.o `test -f 'llvm/unittests/ADT/APIntTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/APIntTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-APIntTest.Tpo $(DEPDIR)/llvmunittest_ADT-APIntTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/APIntTest.cpp' object='llvmunittest_ADT-APIntTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-APIntTest.o `test -f 'llvm/unittests/ADT/APIntTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/APIntTest.cpp
-
-llvmunittest_ADT-APIntTest.obj: llvm/unittests/ADT/APIntTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-APIntTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-APIntTest.Tpo -c -o llvmunittest_ADT-APIntTest.obj `if test -f 'llvm/unittests/ADT/APIntTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/APIntTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/APIntTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-APIntTest.Tpo $(DEPDIR)/llvmunittest_ADT-APIntTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/APIntTest.cpp' object='llvmunittest_ADT-APIntTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-APIntTest.obj `if test -f 'llvm/unittests/ADT/APIntTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/APIntTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/APIntTest.cpp'; fi`
-
-llvmunittest_ADT-DenseMapTest.o: llvm/unittests/ADT/DenseMapTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-DenseMapTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-DenseMapTest.Tpo -c -o llvmunittest_ADT-DenseMapTest.o `test -f 'llvm/unittests/ADT/DenseMapTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/DenseMapTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-DenseMapTest.Tpo $(DEPDIR)/llvmunittest_ADT-DenseMapTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/DenseMapTest.cpp' object='llvmunittest_ADT-DenseMapTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-DenseMapTest.o `test -f 'llvm/unittests/ADT/DenseMapTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/DenseMapTest.cpp
-
-llvmunittest_ADT-DenseMapTest.obj: llvm/unittests/ADT/DenseMapTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-DenseMapTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-DenseMapTest.Tpo -c -o llvmunittest_ADT-DenseMapTest.obj `if test -f 'llvm/unittests/ADT/DenseMapTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/DenseMapTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/DenseMapTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-DenseMapTest.Tpo $(DEPDIR)/llvmunittest_ADT-DenseMapTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/DenseMapTest.cpp' object='llvmunittest_ADT-DenseMapTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-DenseMapTest.obj `if test -f 'llvm/unittests/ADT/DenseMapTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/DenseMapTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/DenseMapTest.cpp'; fi`
-
-llvmunittest_ADT-DenseSetTest.o: llvm/unittests/ADT/DenseSetTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-DenseSetTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-DenseSetTest.Tpo -c -o llvmunittest_ADT-DenseSetTest.o `test -f 'llvm/unittests/ADT/DenseSetTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/DenseSetTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-DenseSetTest.Tpo $(DEPDIR)/llvmunittest_ADT-DenseSetTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/DenseSetTest.cpp' object='llvmunittest_ADT-DenseSetTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-DenseSetTest.o `test -f 'llvm/unittests/ADT/DenseSetTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/DenseSetTest.cpp
-
-llvmunittest_ADT-DenseSetTest.obj: llvm/unittests/ADT/DenseSetTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-DenseSetTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-DenseSetTest.Tpo -c -o llvmunittest_ADT-DenseSetTest.obj `if test -f 'llvm/unittests/ADT/DenseSetTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/DenseSetTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/DenseSetTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-DenseSetTest.Tpo $(DEPDIR)/llvmunittest_ADT-DenseSetTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/DenseSetTest.cpp' object='llvmunittest_ADT-DenseSetTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-DenseSetTest.obj `if test -f 'llvm/unittests/ADT/DenseSetTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/DenseSetTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/DenseSetTest.cpp'; fi`
-
-llvmunittest_ADT-ImmutableSetTest.o: llvm/unittests/ADT/ImmutableSetTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-ImmutableSetTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-ImmutableSetTest.Tpo -c -o llvmunittest_ADT-ImmutableSetTest.o `test -f 'llvm/unittests/ADT/ImmutableSetTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/ImmutableSetTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-ImmutableSetTest.Tpo $(DEPDIR)/llvmunittest_ADT-ImmutableSetTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/ImmutableSetTest.cpp' object='llvmunittest_ADT-ImmutableSetTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-ImmutableSetTest.o `test -f 'llvm/unittests/ADT/ImmutableSetTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/ImmutableSetTest.cpp
-
-llvmunittest_ADT-ImmutableSetTest.obj: llvm/unittests/ADT/ImmutableSetTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-ImmutableSetTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-ImmutableSetTest.Tpo -c -o llvmunittest_ADT-ImmutableSetTest.obj `if test -f 'llvm/unittests/ADT/ImmutableSetTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/ImmutableSetTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/ImmutableSetTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-ImmutableSetTest.Tpo $(DEPDIR)/llvmunittest_ADT-ImmutableSetTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/ImmutableSetTest.cpp' object='llvmunittest_ADT-ImmutableSetTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-ImmutableSetTest.obj `if test -f 'llvm/unittests/ADT/ImmutableSetTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/ImmutableSetTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/ImmutableSetTest.cpp'; fi`
-
-llvmunittest_ADT-SmallStringTest.o: llvm/unittests/ADT/SmallStringTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-SmallStringTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-SmallStringTest.Tpo -c -o llvmunittest_ADT-SmallStringTest.o `test -f 'llvm/unittests/ADT/SmallStringTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/SmallStringTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-SmallStringTest.Tpo $(DEPDIR)/llvmunittest_ADT-SmallStringTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/SmallStringTest.cpp' object='llvmunittest_ADT-SmallStringTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-SmallStringTest.o `test -f 'llvm/unittests/ADT/SmallStringTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/SmallStringTest.cpp
-
-llvmunittest_ADT-SmallStringTest.obj: llvm/unittests/ADT/SmallStringTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-SmallStringTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-SmallStringTest.Tpo -c -o llvmunittest_ADT-SmallStringTest.obj `if test -f 'llvm/unittests/ADT/SmallStringTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/SmallStringTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/SmallStringTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-SmallStringTest.Tpo $(DEPDIR)/llvmunittest_ADT-SmallStringTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/SmallStringTest.cpp' object='llvmunittest_ADT-SmallStringTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-SmallStringTest.obj `if test -f 'llvm/unittests/ADT/SmallStringTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/SmallStringTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/SmallStringTest.cpp'; fi`
-
-llvmunittest_ADT-SmallVectorTest.o: llvm/unittests/ADT/SmallVectorTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-SmallVectorTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-SmallVectorTest.Tpo -c -o llvmunittest_ADT-SmallVectorTest.o `test -f 'llvm/unittests/ADT/SmallVectorTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/SmallVectorTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-SmallVectorTest.Tpo $(DEPDIR)/llvmunittest_ADT-SmallVectorTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/SmallVectorTest.cpp' object='llvmunittest_ADT-SmallVectorTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-SmallVectorTest.o `test -f 'llvm/unittests/ADT/SmallVectorTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/SmallVectorTest.cpp
-
-llvmunittest_ADT-SmallVectorTest.obj: llvm/unittests/ADT/SmallVectorTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-SmallVectorTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-SmallVectorTest.Tpo -c -o llvmunittest_ADT-SmallVectorTest.obj `if test -f 'llvm/unittests/ADT/SmallVectorTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/SmallVectorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/SmallVectorTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-SmallVectorTest.Tpo $(DEPDIR)/llvmunittest_ADT-SmallVectorTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/SmallVectorTest.cpp' object='llvmunittest_ADT-SmallVectorTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-SmallVectorTest.obj `if test -f 'llvm/unittests/ADT/SmallVectorTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/SmallVectorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/SmallVectorTest.cpp'; fi`
-
-llvmunittest_ADT-SparseBitVectorTest.o: llvm/unittests/ADT/SparseBitVectorTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-SparseBitVectorTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-SparseBitVectorTest.Tpo -c -o llvmunittest_ADT-SparseBitVectorTest.o `test -f 'llvm/unittests/ADT/SparseBitVectorTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/SparseBitVectorTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-SparseBitVectorTest.Tpo $(DEPDIR)/llvmunittest_ADT-SparseBitVectorTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/SparseBitVectorTest.cpp' object='llvmunittest_ADT-SparseBitVectorTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-SparseBitVectorTest.o `test -f 'llvm/unittests/ADT/SparseBitVectorTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/SparseBitVectorTest.cpp
-
-llvmunittest_ADT-SparseBitVectorTest.obj: llvm/unittests/ADT/SparseBitVectorTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-SparseBitVectorTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-SparseBitVectorTest.Tpo -c -o llvmunittest_ADT-SparseBitVectorTest.obj `if test -f 'llvm/unittests/ADT/SparseBitVectorTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/SparseBitVectorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/SparseBitVectorTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-SparseBitVectorTest.Tpo $(DEPDIR)/llvmunittest_ADT-SparseBitVectorTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/SparseBitVectorTest.cpp' object='llvmunittest_ADT-SparseBitVectorTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-SparseBitVectorTest.obj `if test -f 'llvm/unittests/ADT/SparseBitVectorTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/SparseBitVectorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/SparseBitVectorTest.cpp'; fi`
-
-llvmunittest_ADT-StringMapTest.o: llvm/unittests/ADT/StringMapTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-StringMapTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-StringMapTest.Tpo -c -o llvmunittest_ADT-StringMapTest.o `test -f 'llvm/unittests/ADT/StringMapTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/StringMapTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-StringMapTest.Tpo $(DEPDIR)/llvmunittest_ADT-StringMapTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/StringMapTest.cpp' object='llvmunittest_ADT-StringMapTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-StringMapTest.o `test -f 'llvm/unittests/ADT/StringMapTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/StringMapTest.cpp
-
-llvmunittest_ADT-StringMapTest.obj: llvm/unittests/ADT/StringMapTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-StringMapTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-StringMapTest.Tpo -c -o llvmunittest_ADT-StringMapTest.obj `if test -f 'llvm/unittests/ADT/StringMapTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/StringMapTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/StringMapTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-StringMapTest.Tpo $(DEPDIR)/llvmunittest_ADT-StringMapTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/StringMapTest.cpp' object='llvmunittest_ADT-StringMapTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-StringMapTest.obj `if test -f 'llvm/unittests/ADT/StringMapTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/StringMapTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/StringMapTest.cpp'; fi`
-
-llvmunittest_ADT-StringRefTest.o: llvm/unittests/ADT/StringRefTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-StringRefTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-StringRefTest.Tpo -c -o llvmunittest_ADT-StringRefTest.o `test -f 'llvm/unittests/ADT/StringRefTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/StringRefTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-StringRefTest.Tpo $(DEPDIR)/llvmunittest_ADT-StringRefTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/StringRefTest.cpp' object='llvmunittest_ADT-StringRefTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-StringRefTest.o `test -f 'llvm/unittests/ADT/StringRefTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/StringRefTest.cpp
-
-llvmunittest_ADT-StringRefTest.obj: llvm/unittests/ADT/StringRefTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-StringRefTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-StringRefTest.Tpo -c -o llvmunittest_ADT-StringRefTest.obj `if test -f 'llvm/unittests/ADT/StringRefTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/StringRefTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/StringRefTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-StringRefTest.Tpo $(DEPDIR)/llvmunittest_ADT-StringRefTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/StringRefTest.cpp' object='llvmunittest_ADT-StringRefTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-StringRefTest.obj `if test -f 'llvm/unittests/ADT/StringRefTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/StringRefTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/StringRefTest.cpp'; fi`
-
-llvmunittest_ADT-TripleTest.o: llvm/unittests/ADT/TripleTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-TripleTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-TripleTest.Tpo -c -o llvmunittest_ADT-TripleTest.o `test -f 'llvm/unittests/ADT/TripleTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/TripleTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-TripleTest.Tpo $(DEPDIR)/llvmunittest_ADT-TripleTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/TripleTest.cpp' object='llvmunittest_ADT-TripleTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-TripleTest.o `test -f 'llvm/unittests/ADT/TripleTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/TripleTest.cpp
-
-llvmunittest_ADT-TripleTest.obj: llvm/unittests/ADT/TripleTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-TripleTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-TripleTest.Tpo -c -o llvmunittest_ADT-TripleTest.obj `if test -f 'llvm/unittests/ADT/TripleTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/TripleTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/TripleTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-TripleTest.Tpo $(DEPDIR)/llvmunittest_ADT-TripleTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/TripleTest.cpp' object='llvmunittest_ADT-TripleTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-TripleTest.obj `if test -f 'llvm/unittests/ADT/TripleTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/TripleTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/TripleTest.cpp'; fi`
-
-llvmunittest_ADT-TwineTest.o: llvm/unittests/ADT/TwineTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-TwineTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-TwineTest.Tpo -c -o llvmunittest_ADT-TwineTest.o `test -f 'llvm/unittests/ADT/TwineTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/TwineTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-TwineTest.Tpo $(DEPDIR)/llvmunittest_ADT-TwineTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/TwineTest.cpp' object='llvmunittest_ADT-TwineTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-TwineTest.o `test -f 'llvm/unittests/ADT/TwineTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ADT/TwineTest.cpp
-
-llvmunittest_ADT-TwineTest.obj: llvm/unittests/ADT/TwineTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ADT-TwineTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ADT-TwineTest.Tpo -c -o llvmunittest_ADT-TwineTest.obj `if test -f 'llvm/unittests/ADT/TwineTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/TwineTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/TwineTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ADT-TwineTest.Tpo $(DEPDIR)/llvmunittest_ADT-TwineTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ADT/TwineTest.cpp' object='llvmunittest_ADT-TwineTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ADT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ADT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ADT-TwineTest.obj `if test -f 'llvm/unittests/ADT/TwineTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ADT/TwineTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ADT/TwineTest.cpp'; fi`
-
-llvmunittest_ExecutionEngine-IntrinsicLowering.o: llvm/lib/CodeGen/IntrinsicLowering.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ExecutionEngine_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ExecutionEngine_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ExecutionEngine-IntrinsicLowering.o -MD -MP -MF $(DEPDIR)/llvmunittest_ExecutionEngine-IntrinsicLowering.Tpo -c -o llvmunittest_ExecutionEngine-IntrinsicLowering.o `test -f 'llvm/lib/CodeGen/IntrinsicLowering.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/IntrinsicLowering.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ExecutionEngine-IntrinsicLowering.Tpo $(DEPDIR)/llvmunittest_ExecutionEngine-IntrinsicLowering.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/IntrinsicLowering.cpp' object='llvmunittest_ExecutionEngine-IntrinsicLowering.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ExecutionEngine_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ExecutionEngine_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ExecutionEngine-IntrinsicLowering.o `test -f 'llvm/lib/CodeGen/IntrinsicLowering.cpp' || echo '$(srcdir)/'`llvm/lib/CodeGen/IntrinsicLowering.cpp
-
-llvmunittest_ExecutionEngine-IntrinsicLowering.obj: llvm/lib/CodeGen/IntrinsicLowering.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ExecutionEngine_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ExecutionEngine_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ExecutionEngine-IntrinsicLowering.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ExecutionEngine-IntrinsicLowering.Tpo -c -o llvmunittest_ExecutionEngine-IntrinsicLowering.obj `if test -f 'llvm/lib/CodeGen/IntrinsicLowering.cpp'; then $(CYGPATH_W) 'llvm/lib/CodeGen/IntrinsicLowering.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/CodeGen/IntrinsicLowering.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ExecutionEngine-IntrinsicLowering.Tpo $(DEPDIR)/llvmunittest_ExecutionEngine-IntrinsicLowering.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/CodeGen/IntrinsicLowering.cpp' object='llvmunittest_ExecutionEngine-IntrinsicLowering.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ExecutionEngine_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ExecutionEngine_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ExecutionEngine-IntrinsicLowering.obj `if test -f 'llvm/lib/CodeGen/IntrinsicLowering.cpp'; then $(CYGPATH_W) 'llvm/lib/CodeGen/IntrinsicLowering.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/CodeGen/IntrinsicLowering.cpp'; fi`
-
-llvmunittest_ExecutionEngine-ExecutionEngineTest.o: llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ExecutionEngine_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ExecutionEngine_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ExecutionEngine-ExecutionEngineTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_ExecutionEngine-ExecutionEngineTest.Tpo -c -o llvmunittest_ExecutionEngine-ExecutionEngineTest.o `test -f 'llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ExecutionEngine-ExecutionEngineTest.Tpo $(DEPDIR)/llvmunittest_ExecutionEngine-ExecutionEngineTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp' object='llvmunittest_ExecutionEngine-ExecutionEngineTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ExecutionEngine_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ExecutionEngine_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ExecutionEngine-ExecutionEngineTest.o `test -f 'llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp
-
-llvmunittest_ExecutionEngine-ExecutionEngineTest.obj: llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ExecutionEngine_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ExecutionEngine_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_ExecutionEngine-ExecutionEngineTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_ExecutionEngine-ExecutionEngineTest.Tpo -c -o llvmunittest_ExecutionEngine-ExecutionEngineTest.obj `if test -f 'llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_ExecutionEngine-ExecutionEngineTest.Tpo $(DEPDIR)/llvmunittest_ExecutionEngine-ExecutionEngineTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp' object='llvmunittest_ExecutionEngine-ExecutionEngineTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_ExecutionEngine_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_ExecutionEngine_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_ExecutionEngine-ExecutionEngineTest.obj `if test -f 'llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp'; fi`
-
-llvmunittest_JIT-JITEventListenerTest.o: llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_JIT-JITEventListenerTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_JIT-JITEventListenerTest.Tpo -c -o llvmunittest_JIT-JITEventListenerTest.o `test -f 'llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_JIT-JITEventListenerTest.Tpo $(DEPDIR)/llvmunittest_JIT-JITEventListenerTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp' object='llvmunittest_JIT-JITEventListenerTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_JIT-JITEventListenerTest.o `test -f 'llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp
-
-llvmunittest_JIT-JITEventListenerTest.obj: llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_JIT-JITEventListenerTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_JIT-JITEventListenerTest.Tpo -c -o llvmunittest_JIT-JITEventListenerTest.obj `if test -f 'llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_JIT-JITEventListenerTest.Tpo $(DEPDIR)/llvmunittest_JIT-JITEventListenerTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp' object='llvmunittest_JIT-JITEventListenerTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_JIT-JITEventListenerTest.obj `if test -f 'llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp'; fi`
-
-llvmunittest_JIT-JITMemoryManagerTest.o: llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_JIT-JITMemoryManagerTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_JIT-JITMemoryManagerTest.Tpo -c -o llvmunittest_JIT-JITMemoryManagerTest.o `test -f 'llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_JIT-JITMemoryManagerTest.Tpo $(DEPDIR)/llvmunittest_JIT-JITMemoryManagerTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp' object='llvmunittest_JIT-JITMemoryManagerTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_JIT-JITMemoryManagerTest.o `test -f 'llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp
-
-llvmunittest_JIT-JITMemoryManagerTest.obj: llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_JIT-JITMemoryManagerTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_JIT-JITMemoryManagerTest.Tpo -c -o llvmunittest_JIT-JITMemoryManagerTest.obj `if test -f 'llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_JIT-JITMemoryManagerTest.Tpo $(DEPDIR)/llvmunittest_JIT-JITMemoryManagerTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp' object='llvmunittest_JIT-JITMemoryManagerTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_JIT-JITMemoryManagerTest.obj `if test -f 'llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp'; fi`
-
-llvmunittest_JIT-JITTest.o: llvm/unittests/ExecutionEngine/JIT/JITTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_JIT-JITTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_JIT-JITTest.Tpo -c -o llvmunittest_JIT-JITTest.o `test -f 'llvm/unittests/ExecutionEngine/JIT/JITTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ExecutionEngine/JIT/JITTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_JIT-JITTest.Tpo $(DEPDIR)/llvmunittest_JIT-JITTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ExecutionEngine/JIT/JITTest.cpp' object='llvmunittest_JIT-JITTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_JIT-JITTest.o `test -f 'llvm/unittests/ExecutionEngine/JIT/JITTest.cpp' || echo '$(srcdir)/'`llvm/unittests/ExecutionEngine/JIT/JITTest.cpp
-
-llvmunittest_JIT-JITTest.obj: llvm/unittests/ExecutionEngine/JIT/JITTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_JIT-JITTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_JIT-JITTest.Tpo -c -o llvmunittest_JIT-JITTest.obj `if test -f 'llvm/unittests/ExecutionEngine/JIT/JITTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ExecutionEngine/JIT/JITTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ExecutionEngine/JIT/JITTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_JIT-JITTest.Tpo $(DEPDIR)/llvmunittest_JIT-JITTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/ExecutionEngine/JIT/JITTest.cpp' object='llvmunittest_JIT-JITTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_JIT_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_JIT_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_JIT-JITTest.obj `if test -f 'llvm/unittests/ExecutionEngine/JIT/JITTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/ExecutionEngine/JIT/JITTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/ExecutionEngine/JIT/JITTest.cpp'; fi`
-
-llvmunittest_Support-AllocatorTest.o: llvm/unittests/Support/AllocatorTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-AllocatorTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_Support-AllocatorTest.Tpo -c -o llvmunittest_Support-AllocatorTest.o `test -f 'llvm/unittests/Support/AllocatorTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/AllocatorTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-AllocatorTest.Tpo $(DEPDIR)/llvmunittest_Support-AllocatorTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/AllocatorTest.cpp' object='llvmunittest_Support-AllocatorTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-AllocatorTest.o `test -f 'llvm/unittests/Support/AllocatorTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/AllocatorTest.cpp
-
-llvmunittest_Support-AllocatorTest.obj: llvm/unittests/Support/AllocatorTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-AllocatorTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_Support-AllocatorTest.Tpo -c -o llvmunittest_Support-AllocatorTest.obj `if test -f 'llvm/unittests/Support/AllocatorTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/AllocatorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/AllocatorTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-AllocatorTest.Tpo $(DEPDIR)/llvmunittest_Support-AllocatorTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/AllocatorTest.cpp' object='llvmunittest_Support-AllocatorTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-AllocatorTest.obj `if test -f 'llvm/unittests/Support/AllocatorTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/AllocatorTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/AllocatorTest.cpp'; fi`
-
-llvmunittest_Support-ConstantRangeTest.o: llvm/unittests/Support/ConstantRangeTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-ConstantRangeTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_Support-ConstantRangeTest.Tpo -c -o llvmunittest_Support-ConstantRangeTest.o `test -f 'llvm/unittests/Support/ConstantRangeTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/ConstantRangeTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-ConstantRangeTest.Tpo $(DEPDIR)/llvmunittest_Support-ConstantRangeTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/ConstantRangeTest.cpp' object='llvmunittest_Support-ConstantRangeTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-ConstantRangeTest.o `test -f 'llvm/unittests/Support/ConstantRangeTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/ConstantRangeTest.cpp
-
-llvmunittest_Support-ConstantRangeTest.obj: llvm/unittests/Support/ConstantRangeTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-ConstantRangeTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_Support-ConstantRangeTest.Tpo -c -o llvmunittest_Support-ConstantRangeTest.obj `if test -f 'llvm/unittests/Support/ConstantRangeTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/ConstantRangeTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/ConstantRangeTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-ConstantRangeTest.Tpo $(DEPDIR)/llvmunittest_Support-ConstantRangeTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/ConstantRangeTest.cpp' object='llvmunittest_Support-ConstantRangeTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-ConstantRangeTest.obj `if test -f 'llvm/unittests/Support/ConstantRangeTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/ConstantRangeTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/ConstantRangeTest.cpp'; fi`
-
-llvmunittest_Support-MathExtrasTest.o: llvm/unittests/Support/MathExtrasTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-MathExtrasTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_Support-MathExtrasTest.Tpo -c -o llvmunittest_Support-MathExtrasTest.o `test -f 'llvm/unittests/Support/MathExtrasTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/MathExtrasTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-MathExtrasTest.Tpo $(DEPDIR)/llvmunittest_Support-MathExtrasTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/MathExtrasTest.cpp' object='llvmunittest_Support-MathExtrasTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-MathExtrasTest.o `test -f 'llvm/unittests/Support/MathExtrasTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/MathExtrasTest.cpp
-
-llvmunittest_Support-MathExtrasTest.obj: llvm/unittests/Support/MathExtrasTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-MathExtrasTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_Support-MathExtrasTest.Tpo -c -o llvmunittest_Support-MathExtrasTest.obj `if test -f 'llvm/unittests/Support/MathExtrasTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/MathExtrasTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/MathExtrasTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-MathExtrasTest.Tpo $(DEPDIR)/llvmunittest_Support-MathExtrasTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/MathExtrasTest.cpp' object='llvmunittest_Support-MathExtrasTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-MathExtrasTest.obj `if test -f 'llvm/unittests/Support/MathExtrasTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/MathExtrasTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/MathExtrasTest.cpp'; fi`
-
-llvmunittest_Support-RegexTest.o: llvm/unittests/Support/RegexTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-RegexTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_Support-RegexTest.Tpo -c -o llvmunittest_Support-RegexTest.o `test -f 'llvm/unittests/Support/RegexTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/RegexTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-RegexTest.Tpo $(DEPDIR)/llvmunittest_Support-RegexTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/RegexTest.cpp' object='llvmunittest_Support-RegexTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-RegexTest.o `test -f 'llvm/unittests/Support/RegexTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/RegexTest.cpp
-
-llvmunittest_Support-RegexTest.obj: llvm/unittests/Support/RegexTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-RegexTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_Support-RegexTest.Tpo -c -o llvmunittest_Support-RegexTest.obj `if test -f 'llvm/unittests/Support/RegexTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/RegexTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/RegexTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-RegexTest.Tpo $(DEPDIR)/llvmunittest_Support-RegexTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/RegexTest.cpp' object='llvmunittest_Support-RegexTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-RegexTest.obj `if test -f 'llvm/unittests/Support/RegexTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/RegexTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/RegexTest.cpp'; fi`
-
-llvmunittest_Support-TypeBuilderTest.o: llvm/unittests/Support/TypeBuilderTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-TypeBuilderTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_Support-TypeBuilderTest.Tpo -c -o llvmunittest_Support-TypeBuilderTest.o `test -f 'llvm/unittests/Support/TypeBuilderTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/TypeBuilderTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-TypeBuilderTest.Tpo $(DEPDIR)/llvmunittest_Support-TypeBuilderTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/TypeBuilderTest.cpp' object='llvmunittest_Support-TypeBuilderTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-TypeBuilderTest.o `test -f 'llvm/unittests/Support/TypeBuilderTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/TypeBuilderTest.cpp
-
-llvmunittest_Support-TypeBuilderTest.obj: llvm/unittests/Support/TypeBuilderTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-TypeBuilderTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_Support-TypeBuilderTest.Tpo -c -o llvmunittest_Support-TypeBuilderTest.obj `if test -f 'llvm/unittests/Support/TypeBuilderTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/TypeBuilderTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/TypeBuilderTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-TypeBuilderTest.Tpo $(DEPDIR)/llvmunittest_Support-TypeBuilderTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/TypeBuilderTest.cpp' object='llvmunittest_Support-TypeBuilderTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-TypeBuilderTest.obj `if test -f 'llvm/unittests/Support/TypeBuilderTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/TypeBuilderTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/TypeBuilderTest.cpp'; fi`
-
-llvmunittest_Support-ValueHandleTest.o: llvm/unittests/Support/ValueHandleTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-ValueHandleTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_Support-ValueHandleTest.Tpo -c -o llvmunittest_Support-ValueHandleTest.o `test -f 'llvm/unittests/Support/ValueHandleTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/ValueHandleTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-ValueHandleTest.Tpo $(DEPDIR)/llvmunittest_Support-ValueHandleTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/ValueHandleTest.cpp' object='llvmunittest_Support-ValueHandleTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-ValueHandleTest.o `test -f 'llvm/unittests/Support/ValueHandleTest.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/ValueHandleTest.cpp
-
-llvmunittest_Support-ValueHandleTest.obj: llvm/unittests/Support/ValueHandleTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-ValueHandleTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_Support-ValueHandleTest.Tpo -c -o llvmunittest_Support-ValueHandleTest.obj `if test -f 'llvm/unittests/Support/ValueHandleTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/ValueHandleTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/ValueHandleTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-ValueHandleTest.Tpo $(DEPDIR)/llvmunittest_Support-ValueHandleTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/ValueHandleTest.cpp' object='llvmunittest_Support-ValueHandleTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-ValueHandleTest.obj `if test -f 'llvm/unittests/Support/ValueHandleTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/ValueHandleTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/ValueHandleTest.cpp'; fi`
-
-llvmunittest_Support-raw_ostream_test.o: llvm/unittests/Support/raw_ostream_test.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-raw_ostream_test.o -MD -MP -MF $(DEPDIR)/llvmunittest_Support-raw_ostream_test.Tpo -c -o llvmunittest_Support-raw_ostream_test.o `test -f 'llvm/unittests/Support/raw_ostream_test.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/raw_ostream_test.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-raw_ostream_test.Tpo $(DEPDIR)/llvmunittest_Support-raw_ostream_test.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/raw_ostream_test.cpp' object='llvmunittest_Support-raw_ostream_test.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-raw_ostream_test.o `test -f 'llvm/unittests/Support/raw_ostream_test.cpp' || echo '$(srcdir)/'`llvm/unittests/Support/raw_ostream_test.cpp
-
-llvmunittest_Support-raw_ostream_test.obj: llvm/unittests/Support/raw_ostream_test.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_Support-raw_ostream_test.obj -MD -MP -MF $(DEPDIR)/llvmunittest_Support-raw_ostream_test.Tpo -c -o llvmunittest_Support-raw_ostream_test.obj `if test -f 'llvm/unittests/Support/raw_ostream_test.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/raw_ostream_test.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/raw_ostream_test.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_Support-raw_ostream_test.Tpo $(DEPDIR)/llvmunittest_Support-raw_ostream_test.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/Support/raw_ostream_test.cpp' object='llvmunittest_Support-raw_ostream_test.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_Support_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_Support_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_Support-raw_ostream_test.obj `if test -f 'llvm/unittests/Support/raw_ostream_test.cpp'; then $(CYGPATH_W) 'llvm/unittests/Support/raw_ostream_test.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/Support/raw_ostream_test.cpp'; fi`
-
-llvmunittest_VMCore-CallGraphSCCPass.o: llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-CallGraphSCCPass.o -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-CallGraphSCCPass.Tpo -c -o llvmunittest_VMCore-CallGraphSCCPass.o `test -f 'llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-CallGraphSCCPass.Tpo $(DEPDIR)/llvmunittest_VMCore-CallGraphSCCPass.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp' object='llvmunittest_VMCore-CallGraphSCCPass.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-CallGraphSCCPass.o `test -f 'llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
-
-llvmunittest_VMCore-CallGraphSCCPass.obj: llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-CallGraphSCCPass.obj -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-CallGraphSCCPass.Tpo -c -o llvmunittest_VMCore-CallGraphSCCPass.obj `if test -f 'llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp'; then $(CYGPATH_W) 'llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-CallGraphSCCPass.Tpo $(DEPDIR)/llvmunittest_VMCore-CallGraphSCCPass.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp' object='llvmunittest_VMCore-CallGraphSCCPass.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-CallGraphSCCPass.obj `if test -f 'llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp'; then $(CYGPATH_W) 'llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp'; fi`
-
-llvmunittest_VMCore-LoopInfo.o: llvm/lib/Analysis/LoopInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-LoopInfo.o -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-LoopInfo.Tpo -c -o llvmunittest_VMCore-LoopInfo.o `test -f 'llvm/lib/Analysis/LoopInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/LoopInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-LoopInfo.Tpo $(DEPDIR)/llvmunittest_VMCore-LoopInfo.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/LoopInfo.cpp' object='llvmunittest_VMCore-LoopInfo.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-LoopInfo.o `test -f 'llvm/lib/Analysis/LoopInfo.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/LoopInfo.cpp
-
-llvmunittest_VMCore-LoopInfo.obj: llvm/lib/Analysis/LoopInfo.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-LoopInfo.obj -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-LoopInfo.Tpo -c -o llvmunittest_VMCore-LoopInfo.obj `if test -f 'llvm/lib/Analysis/LoopInfo.cpp'; then $(CYGPATH_W) 'llvm/lib/Analysis/LoopInfo.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Analysis/LoopInfo.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-LoopInfo.Tpo $(DEPDIR)/llvmunittest_VMCore-LoopInfo.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/LoopInfo.cpp' object='llvmunittest_VMCore-LoopInfo.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-LoopInfo.obj `if test -f 'llvm/lib/Analysis/LoopInfo.cpp'; then $(CYGPATH_W) 'llvm/lib/Analysis/LoopInfo.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Analysis/LoopInfo.cpp'; fi`
-
-llvmunittest_VMCore-LoopPass.o: llvm/lib/Analysis/LoopPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-LoopPass.o -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-LoopPass.Tpo -c -o llvmunittest_VMCore-LoopPass.o `test -f 'llvm/lib/Analysis/LoopPass.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/LoopPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-LoopPass.Tpo $(DEPDIR)/llvmunittest_VMCore-LoopPass.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/LoopPass.cpp' object='llvmunittest_VMCore-LoopPass.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-LoopPass.o `test -f 'llvm/lib/Analysis/LoopPass.cpp' || echo '$(srcdir)/'`llvm/lib/Analysis/LoopPass.cpp
-
-llvmunittest_VMCore-LoopPass.obj: llvm/lib/Analysis/LoopPass.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-LoopPass.obj -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-LoopPass.Tpo -c -o llvmunittest_VMCore-LoopPass.obj `if test -f 'llvm/lib/Analysis/LoopPass.cpp'; then $(CYGPATH_W) 'llvm/lib/Analysis/LoopPass.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Analysis/LoopPass.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-LoopPass.Tpo $(DEPDIR)/llvmunittest_VMCore-LoopPass.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Analysis/LoopPass.cpp' object='llvmunittest_VMCore-LoopPass.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-LoopPass.obj `if test -f 'llvm/lib/Analysis/LoopPass.cpp'; then $(CYGPATH_W) 'llvm/lib/Analysis/LoopPass.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Analysis/LoopPass.cpp'; fi`
-
-llvmunittest_VMCore-ConstantsTest.o: llvm/unittests/VMCore/ConstantsTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-ConstantsTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-ConstantsTest.Tpo -c -o llvmunittest_VMCore-ConstantsTest.o `test -f 'llvm/unittests/VMCore/ConstantsTest.cpp' || echo '$(srcdir)/'`llvm/unittests/VMCore/ConstantsTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-ConstantsTest.Tpo $(DEPDIR)/llvmunittest_VMCore-ConstantsTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/VMCore/ConstantsTest.cpp' object='llvmunittest_VMCore-ConstantsTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-ConstantsTest.o `test -f 'llvm/unittests/VMCore/ConstantsTest.cpp' || echo '$(srcdir)/'`llvm/unittests/VMCore/ConstantsTest.cpp
-
-llvmunittest_VMCore-ConstantsTest.obj: llvm/unittests/VMCore/ConstantsTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-ConstantsTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-ConstantsTest.Tpo -c -o llvmunittest_VMCore-ConstantsTest.obj `if test -f 'llvm/unittests/VMCore/ConstantsTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/VMCore/ConstantsTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/VMCore/ConstantsTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-ConstantsTest.Tpo $(DEPDIR)/llvmunittest_VMCore-ConstantsTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/VMCore/ConstantsTest.cpp' object='llvmunittest_VMCore-ConstantsTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-ConstantsTest.obj `if test -f 'llvm/unittests/VMCore/ConstantsTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/VMCore/ConstantsTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/VMCore/ConstantsTest.cpp'; fi`
-
-llvmunittest_VMCore-MetadataTest.o: llvm/unittests/VMCore/MetadataTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-MetadataTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-MetadataTest.Tpo -c -o llvmunittest_VMCore-MetadataTest.o `test -f 'llvm/unittests/VMCore/MetadataTest.cpp' || echo '$(srcdir)/'`llvm/unittests/VMCore/MetadataTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-MetadataTest.Tpo $(DEPDIR)/llvmunittest_VMCore-MetadataTest.Po
+tblgen-ARMDecoderEmitter.o: llvm/utils/TableGen/ARMDecoderEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-ARMDecoderEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-ARMDecoderEmitter.Tpo -c -o tblgen-ARMDecoderEmitter.o `test -f 'llvm/utils/TableGen/ARMDecoderEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/ARMDecoderEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-ARMDecoderEmitter.Tpo $(DEPDIR)/tblgen-ARMDecoderEmitter.Po
@am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/VMCore/MetadataTest.cpp' object='llvmunittest_VMCore-MetadataTest.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/ARMDecoderEmitter.cpp' object='tblgen-ARMDecoderEmitter.o' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-MetadataTest.o `test -f 'llvm/unittests/VMCore/MetadataTest.cpp' || echo '$(srcdir)/'`llvm/unittests/VMCore/MetadataTest.cpp
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-ARMDecoderEmitter.o `test -f 'llvm/utils/TableGen/ARMDecoderEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/ARMDecoderEmitter.cpp
-llvmunittest_VMCore-MetadataTest.obj: llvm/unittests/VMCore/MetadataTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-MetadataTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-MetadataTest.Tpo -c -o llvmunittest_VMCore-MetadataTest.obj `if test -f 'llvm/unittests/VMCore/MetadataTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/VMCore/MetadataTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/VMCore/MetadataTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-MetadataTest.Tpo $(DEPDIR)/llvmunittest_VMCore-MetadataTest.Po
+tblgen-ARMDecoderEmitter.obj: llvm/utils/TableGen/ARMDecoderEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-ARMDecoderEmitter.obj -MD -MP -MF $(DEPDIR)/tblgen-ARMDecoderEmitter.Tpo -c -o tblgen-ARMDecoderEmitter.obj `if test -f 'llvm/utils/TableGen/ARMDecoderEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/ARMDecoderEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/ARMDecoderEmitter.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-ARMDecoderEmitter.Tpo $(DEPDIR)/tblgen-ARMDecoderEmitter.Po
@am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/VMCore/MetadataTest.cpp' object='llvmunittest_VMCore-MetadataTest.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/ARMDecoderEmitter.cpp' object='tblgen-ARMDecoderEmitter.obj' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-MetadataTest.obj `if test -f 'llvm/unittests/VMCore/MetadataTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/VMCore/MetadataTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/VMCore/MetadataTest.cpp'; fi`
-
-llvmunittest_VMCore-PassManagerTest.o: llvm/unittests/VMCore/PassManagerTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-PassManagerTest.o -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-PassManagerTest.Tpo -c -o llvmunittest_VMCore-PassManagerTest.o `test -f 'llvm/unittests/VMCore/PassManagerTest.cpp' || echo '$(srcdir)/'`llvm/unittests/VMCore/PassManagerTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-PassManagerTest.Tpo $(DEPDIR)/llvmunittest_VMCore-PassManagerTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/VMCore/PassManagerTest.cpp' object='llvmunittest_VMCore-PassManagerTest.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-PassManagerTest.o `test -f 'llvm/unittests/VMCore/PassManagerTest.cpp' || echo '$(srcdir)/'`llvm/unittests/VMCore/PassManagerTest.cpp
-
-llvmunittest_VMCore-PassManagerTest.obj: llvm/unittests/VMCore/PassManagerTest.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -MT llvmunittest_VMCore-PassManagerTest.obj -MD -MP -MF $(DEPDIR)/llvmunittest_VMCore-PassManagerTest.Tpo -c -o llvmunittest_VMCore-PassManagerTest.obj `if test -f 'llvm/unittests/VMCore/PassManagerTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/VMCore/PassManagerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/VMCore/PassManagerTest.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/llvmunittest_VMCore-PassManagerTest.Tpo $(DEPDIR)/llvmunittest_VMCore-PassManagerTest.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/unittests/VMCore/PassManagerTest.cpp' object='llvmunittest_VMCore-PassManagerTest.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(llvmunittest_VMCore_CPPFLAGS) $(CPPFLAGS) $(llvmunittest_VMCore_CXXFLAGS) $(CXXFLAGS) -c -o llvmunittest_VMCore-PassManagerTest.obj `if test -f 'llvm/unittests/VMCore/PassManagerTest.cpp'; then $(CYGPATH_W) 'llvm/unittests/VMCore/PassManagerTest.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/unittests/VMCore/PassManagerTest.cpp'; fi`
-
-not-not.o: llvm/utils/not/not.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(not_CPPFLAGS) $(CPPFLAGS) $(not_CXXFLAGS) $(CXXFLAGS) -MT not-not.o -MD -MP -MF $(DEPDIR)/not-not.Tpo -c -o not-not.o `test -f 'llvm/utils/not/not.cpp' || echo '$(srcdir)/'`llvm/utils/not/not.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/not-not.Tpo $(DEPDIR)/not-not.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/not/not.cpp' object='not-not.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(not_CPPFLAGS) $(CPPFLAGS) $(not_CXXFLAGS) $(CXXFLAGS) -c -o not-not.o `test -f 'llvm/utils/not/not.cpp' || echo '$(srcdir)/'`llvm/utils/not/not.cpp
-
-not-not.obj: llvm/utils/not/not.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(not_CPPFLAGS) $(CPPFLAGS) $(not_CXXFLAGS) $(CXXFLAGS) -MT not-not.obj -MD -MP -MF $(DEPDIR)/not-not.Tpo -c -o not-not.obj `if test -f 'llvm/utils/not/not.cpp'; then $(CYGPATH_W) 'llvm/utils/not/not.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/not/not.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/not-not.Tpo $(DEPDIR)/not-not.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/not/not.cpp' object='not-not.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(not_CPPFLAGS) $(CPPFLAGS) $(not_CXXFLAGS) $(CXXFLAGS) -c -o not-not.obj `if test -f 'llvm/utils/not/not.cpp'; then $(CYGPATH_W) 'llvm/utils/not/not.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/not/not.cpp'; fi`
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-ARMDecoderEmitter.obj `if test -f 'llvm/utils/TableGen/ARMDecoderEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/ARMDecoderEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/ARMDecoderEmitter.cpp'; fi`
tblgen-AsmMatcherEmitter.o: llvm/utils/TableGen/AsmMatcherEmitter.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-AsmMatcherEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-AsmMatcherEmitter.Tpo -c -o tblgen-AsmMatcherEmitter.o `test -f 'llvm/utils/TableGen/AsmMatcherEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -6527,22 +4476,6 @@ tblgen-AsmMatcherEmitter.obj: llvm/utils/TableGen/AsmMatcherEmitter.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-AsmMatcherEmitter.obj `if test -f 'llvm/utils/TableGen/AsmMatcherEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/AsmMatcherEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/AsmMatcherEmitter.cpp'; fi`
-tblgen-AsmWriterEmitter.o: llvm/utils/TableGen/AsmWriterEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-AsmWriterEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-AsmWriterEmitter.Tpo -c -o tblgen-AsmWriterEmitter.o `test -f 'llvm/utils/TableGen/AsmWriterEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/AsmWriterEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-AsmWriterEmitter.Tpo $(DEPDIR)/tblgen-AsmWriterEmitter.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/AsmWriterEmitter.cpp' object='tblgen-AsmWriterEmitter.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-AsmWriterEmitter.o `test -f 'llvm/utils/TableGen/AsmWriterEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/AsmWriterEmitter.cpp
-
-tblgen-AsmWriterEmitter.obj: llvm/utils/TableGen/AsmWriterEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-AsmWriterEmitter.obj -MD -MP -MF $(DEPDIR)/tblgen-AsmWriterEmitter.Tpo -c -o tblgen-AsmWriterEmitter.obj `if test -f 'llvm/utils/TableGen/AsmWriterEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/AsmWriterEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/AsmWriterEmitter.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-AsmWriterEmitter.Tpo $(DEPDIR)/tblgen-AsmWriterEmitter.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/AsmWriterEmitter.cpp' object='tblgen-AsmWriterEmitter.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-AsmWriterEmitter.obj `if test -f 'llvm/utils/TableGen/AsmWriterEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/AsmWriterEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/AsmWriterEmitter.cpp'; fi`
-
tblgen-AsmWriterInst.o: llvm/utils/TableGen/AsmWriterInst.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-AsmWriterInst.o -MD -MP -MF $(DEPDIR)/tblgen-AsmWriterInst.Tpo -c -o tblgen-AsmWriterInst.o `test -f 'llvm/utils/TableGen/AsmWriterInst.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/AsmWriterInst.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-AsmWriterInst.Tpo $(DEPDIR)/tblgen-AsmWriterInst.Po
@@ -6575,6 +4508,38 @@ tblgen-CallingConvEmitter.obj: llvm/utils/TableGen/CallingConvEmitter.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-CallingConvEmitter.obj `if test -f 'llvm/utils/TableGen/CallingConvEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/CallingConvEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/CallingConvEmitter.cpp'; fi`
+tblgen-ClangASTNodesEmitter.o: llvm/utils/TableGen/ClangASTNodesEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-ClangASTNodesEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-ClangASTNodesEmitter.Tpo -c -o tblgen-ClangASTNodesEmitter.o `test -f 'llvm/utils/TableGen/ClangASTNodesEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/ClangASTNodesEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-ClangASTNodesEmitter.Tpo $(DEPDIR)/tblgen-ClangASTNodesEmitter.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/ClangASTNodesEmitter.cpp' object='tblgen-ClangASTNodesEmitter.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-ClangASTNodesEmitter.o `test -f 'llvm/utils/TableGen/ClangASTNodesEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/ClangASTNodesEmitter.cpp
+
+tblgen-ClangASTNodesEmitter.obj: llvm/utils/TableGen/ClangASTNodesEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-ClangASTNodesEmitter.obj -MD -MP -MF $(DEPDIR)/tblgen-ClangASTNodesEmitter.Tpo -c -o tblgen-ClangASTNodesEmitter.obj `if test -f 'llvm/utils/TableGen/ClangASTNodesEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/ClangASTNodesEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/ClangASTNodesEmitter.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-ClangASTNodesEmitter.Tpo $(DEPDIR)/tblgen-ClangASTNodesEmitter.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/ClangASTNodesEmitter.cpp' object='tblgen-ClangASTNodesEmitter.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-ClangASTNodesEmitter.obj `if test -f 'llvm/utils/TableGen/ClangASTNodesEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/ClangASTNodesEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/ClangASTNodesEmitter.cpp'; fi`
+
+tblgen-ClangAttrEmitter.o: llvm/utils/TableGen/ClangAttrEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-ClangAttrEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-ClangAttrEmitter.Tpo -c -o tblgen-ClangAttrEmitter.o `test -f 'llvm/utils/TableGen/ClangAttrEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/ClangAttrEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-ClangAttrEmitter.Tpo $(DEPDIR)/tblgen-ClangAttrEmitter.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/ClangAttrEmitter.cpp' object='tblgen-ClangAttrEmitter.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-ClangAttrEmitter.o `test -f 'llvm/utils/TableGen/ClangAttrEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/ClangAttrEmitter.cpp
+
+tblgen-ClangAttrEmitter.obj: llvm/utils/TableGen/ClangAttrEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-ClangAttrEmitter.obj -MD -MP -MF $(DEPDIR)/tblgen-ClangAttrEmitter.Tpo -c -o tblgen-ClangAttrEmitter.obj `if test -f 'llvm/utils/TableGen/ClangAttrEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/ClangAttrEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/ClangAttrEmitter.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-ClangAttrEmitter.Tpo $(DEPDIR)/tblgen-ClangAttrEmitter.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/ClangAttrEmitter.cpp' object='tblgen-ClangAttrEmitter.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-ClangAttrEmitter.obj `if test -f 'llvm/utils/TableGen/ClangAttrEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/ClangAttrEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/ClangAttrEmitter.cpp'; fi`
+
tblgen-ClangDiagnosticsEmitter.o: llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-ClangDiagnosticsEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-ClangDiagnosticsEmitter.Tpo -c -o tblgen-ClangDiagnosticsEmitter.o `test -f 'llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-ClangDiagnosticsEmitter.Tpo $(DEPDIR)/tblgen-ClangDiagnosticsEmitter.Po
@@ -6735,22 +4700,6 @@ tblgen-DAGISelMatcherOpt.obj: llvm/utils/TableGen/DAGISelMatcherOpt.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-DAGISelMatcherOpt.obj `if test -f 'llvm/utils/TableGen/DAGISelMatcherOpt.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/DAGISelMatcherOpt.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/DAGISelMatcherOpt.cpp'; fi`
-tblgen-DisassemblerEmitter.o: llvm/utils/TableGen/DisassemblerEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-DisassemblerEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-DisassemblerEmitter.Tpo -c -o tblgen-DisassemblerEmitter.o `test -f 'llvm/utils/TableGen/DisassemblerEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/DisassemblerEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-DisassemblerEmitter.Tpo $(DEPDIR)/tblgen-DisassemblerEmitter.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/DisassemblerEmitter.cpp' object='tblgen-DisassemblerEmitter.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-DisassemblerEmitter.o `test -f 'llvm/utils/TableGen/DisassemblerEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/DisassemblerEmitter.cpp
-
-tblgen-DisassemblerEmitter.obj: llvm/utils/TableGen/DisassemblerEmitter.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-DisassemblerEmitter.obj -MD -MP -MF $(DEPDIR)/tblgen-DisassemblerEmitter.Tpo -c -o tblgen-DisassemblerEmitter.obj `if test -f 'llvm/utils/TableGen/DisassemblerEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/DisassemblerEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/DisassemblerEmitter.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-DisassemblerEmitter.Tpo $(DEPDIR)/tblgen-DisassemblerEmitter.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/DisassemblerEmitter.cpp' object='tblgen-DisassemblerEmitter.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-DisassemblerEmitter.obj `if test -f 'llvm/utils/TableGen/DisassemblerEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/DisassemblerEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/DisassemblerEmitter.cpp'; fi`
-
tblgen-EDEmitter.o: llvm/utils/TableGen/EDEmitter.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-EDEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-EDEmitter.Tpo -c -o tblgen-EDEmitter.o `test -f 'llvm/utils/TableGen/EDEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/EDEmitter.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-EDEmitter.Tpo $(DEPDIR)/tblgen-EDEmitter.Po
@@ -6847,6 +4796,22 @@ tblgen-LLVMCConfigurationEmitter.obj: llvm/utils/TableGen/LLVMCConfigurationEmit
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-LLVMCConfigurationEmitter.obj `if test -f 'llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp'; fi`
+tblgen-NeonEmitter.o: llvm/utils/TableGen/NeonEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-NeonEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-NeonEmitter.Tpo -c -o tblgen-NeonEmitter.o `test -f 'llvm/utils/TableGen/NeonEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/NeonEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-NeonEmitter.Tpo $(DEPDIR)/tblgen-NeonEmitter.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/NeonEmitter.cpp' object='tblgen-NeonEmitter.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-NeonEmitter.o `test -f 'llvm/utils/TableGen/NeonEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/NeonEmitter.cpp
+
+tblgen-NeonEmitter.obj: llvm/utils/TableGen/NeonEmitter.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-NeonEmitter.obj -MD -MP -MF $(DEPDIR)/tblgen-NeonEmitter.Tpo -c -o tblgen-NeonEmitter.obj `if test -f 'llvm/utils/TableGen/NeonEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/NeonEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/NeonEmitter.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-NeonEmitter.Tpo $(DEPDIR)/tblgen-NeonEmitter.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/NeonEmitter.cpp' object='tblgen-NeonEmitter.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-NeonEmitter.obj `if test -f 'llvm/utils/TableGen/NeonEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/NeonEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/NeonEmitter.cpp'; fi`
+
tblgen-OptParserEmitter.o: llvm/utils/TableGen/OptParserEmitter.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-OptParserEmitter.o -MD -MP -MF $(DEPDIR)/tblgen-OptParserEmitter.Tpo -c -o tblgen-OptParserEmitter.o `test -f 'llvm/utils/TableGen/OptParserEmitter.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/OptParserEmitter.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-OptParserEmitter.Tpo $(DEPDIR)/tblgen-OptParserEmitter.Po
@@ -6911,6 +4876,38 @@ tblgen-SubtargetEmitter.obj: llvm/utils/TableGen/SubtargetEmitter.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-SubtargetEmitter.obj `if test -f 'llvm/utils/TableGen/SubtargetEmitter.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/SubtargetEmitter.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/SubtargetEmitter.cpp'; fi`
+tblgen-TableGenBackend.o: llvm/utils/TableGen/TableGenBackend.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-TableGenBackend.o -MD -MP -MF $(DEPDIR)/tblgen-TableGenBackend.Tpo -c -o tblgen-TableGenBackend.o `test -f 'llvm/utils/TableGen/TableGenBackend.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/TableGenBackend.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-TableGenBackend.Tpo $(DEPDIR)/tblgen-TableGenBackend.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/TableGenBackend.cpp' object='tblgen-TableGenBackend.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TableGenBackend.o `test -f 'llvm/utils/TableGen/TableGenBackend.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/TableGenBackend.cpp
+
+tblgen-TableGenBackend.obj: llvm/utils/TableGen/TableGenBackend.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-TableGenBackend.obj -MD -MP -MF $(DEPDIR)/tblgen-TableGenBackend.Tpo -c -o tblgen-TableGenBackend.obj `if test -f 'llvm/utils/TableGen/TableGenBackend.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/TableGenBackend.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/TableGenBackend.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-TableGenBackend.Tpo $(DEPDIR)/tblgen-TableGenBackend.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/TableGenBackend.cpp' object='tblgen-TableGenBackend.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TableGenBackend.obj `if test -f 'llvm/utils/TableGen/TableGenBackend.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/TableGenBackend.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/TableGenBackend.cpp'; fi`
+
+tblgen-TableGen.o: llvm/utils/TableGen/TableGen.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-TableGen.o -MD -MP -MF $(DEPDIR)/tblgen-TableGen.Tpo -c -o tblgen-TableGen.o `test -f 'llvm/utils/TableGen/TableGen.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/TableGen.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-TableGen.Tpo $(DEPDIR)/tblgen-TableGen.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/TableGen.cpp' object='tblgen-TableGen.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TableGen.o `test -f 'llvm/utils/TableGen/TableGen.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/TableGen.cpp
+
+tblgen-TableGen.obj: llvm/utils/TableGen/TableGen.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-TableGen.obj -MD -MP -MF $(DEPDIR)/tblgen-TableGen.Tpo -c -o tblgen-TableGen.obj `if test -f 'llvm/utils/TableGen/TableGen.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/TableGen.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/TableGen.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-TableGen.Tpo $(DEPDIR)/tblgen-TableGen.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/TableGen.cpp' object='tblgen-TableGen.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TableGen.obj `if test -f 'llvm/utils/TableGen/TableGen.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/TableGen.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/TableGen.cpp'; fi`
+
tblgen-TGLexer.o: llvm/utils/TableGen/TGLexer.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-TGLexer.o -MD -MP -MF $(DEPDIR)/tblgen-TGLexer.Tpo -c -o tblgen-TGLexer.o `test -f 'llvm/utils/TableGen/TGLexer.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/TGLexer.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-TGLexer.Tpo $(DEPDIR)/tblgen-TGLexer.Po
@@ -6959,70 +4956,6 @@ tblgen-TGValueTypes.obj: llvm/utils/TableGen/TGValueTypes.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TGValueTypes.obj `if test -f 'llvm/utils/TableGen/TGValueTypes.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/TGValueTypes.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/TGValueTypes.cpp'; fi`
-tblgen-TableGen.o: llvm/utils/TableGen/TableGen.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-TableGen.o -MD -MP -MF $(DEPDIR)/tblgen-TableGen.Tpo -c -o tblgen-TableGen.o `test -f 'llvm/utils/TableGen/TableGen.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/TableGen.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-TableGen.Tpo $(DEPDIR)/tblgen-TableGen.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/TableGen.cpp' object='tblgen-TableGen.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TableGen.o `test -f 'llvm/utils/TableGen/TableGen.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/TableGen.cpp
-
-tblgen-TableGen.obj: llvm/utils/TableGen/TableGen.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-TableGen.obj -MD -MP -MF $(DEPDIR)/tblgen-TableGen.Tpo -c -o tblgen-TableGen.obj `if test -f 'llvm/utils/TableGen/TableGen.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/TableGen.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/TableGen.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-TableGen.Tpo $(DEPDIR)/tblgen-TableGen.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/TableGen.cpp' object='tblgen-TableGen.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TableGen.obj `if test -f 'llvm/utils/TableGen/TableGen.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/TableGen.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/TableGen.cpp'; fi`
-
-tblgen-TableGenBackend.o: llvm/utils/TableGen/TableGenBackend.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-TableGenBackend.o -MD -MP -MF $(DEPDIR)/tblgen-TableGenBackend.Tpo -c -o tblgen-TableGenBackend.o `test -f 'llvm/utils/TableGen/TableGenBackend.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/TableGenBackend.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-TableGenBackend.Tpo $(DEPDIR)/tblgen-TableGenBackend.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/TableGenBackend.cpp' object='tblgen-TableGenBackend.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TableGenBackend.o `test -f 'llvm/utils/TableGen/TableGenBackend.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/TableGenBackend.cpp
-
-tblgen-TableGenBackend.obj: llvm/utils/TableGen/TableGenBackend.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-TableGenBackend.obj -MD -MP -MF $(DEPDIR)/tblgen-TableGenBackend.Tpo -c -o tblgen-TableGenBackend.obj `if test -f 'llvm/utils/TableGen/TableGenBackend.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/TableGenBackend.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/TableGenBackend.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-TableGenBackend.Tpo $(DEPDIR)/tblgen-TableGenBackend.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/TableGenBackend.cpp' object='tblgen-TableGenBackend.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TableGenBackend.obj `if test -f 'llvm/utils/TableGen/TableGenBackend.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/TableGenBackend.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/TableGenBackend.cpp'; fi`
-
-tblgen-X86DisassemblerTables.o: llvm/utils/TableGen/X86DisassemblerTables.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-X86DisassemblerTables.o -MD -MP -MF $(DEPDIR)/tblgen-X86DisassemblerTables.Tpo -c -o tblgen-X86DisassemblerTables.o `test -f 'llvm/utils/TableGen/X86DisassemblerTables.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/X86DisassemblerTables.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-X86DisassemblerTables.Tpo $(DEPDIR)/tblgen-X86DisassemblerTables.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/X86DisassemblerTables.cpp' object='tblgen-X86DisassemblerTables.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-X86DisassemblerTables.o `test -f 'llvm/utils/TableGen/X86DisassemblerTables.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/X86DisassemblerTables.cpp
-
-tblgen-X86DisassemblerTables.obj: llvm/utils/TableGen/X86DisassemblerTables.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-X86DisassemblerTables.obj -MD -MP -MF $(DEPDIR)/tblgen-X86DisassemblerTables.Tpo -c -o tblgen-X86DisassemblerTables.obj `if test -f 'llvm/utils/TableGen/X86DisassemblerTables.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/X86DisassemblerTables.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/X86DisassemblerTables.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-X86DisassemblerTables.Tpo $(DEPDIR)/tblgen-X86DisassemblerTables.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/X86DisassemblerTables.cpp' object='tblgen-X86DisassemblerTables.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-X86DisassemblerTables.obj `if test -f 'llvm/utils/TableGen/X86DisassemblerTables.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/X86DisassemblerTables.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/X86DisassemblerTables.cpp'; fi`
-
-tblgen-X86RecognizableInstr.o: llvm/utils/TableGen/X86RecognizableInstr.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-X86RecognizableInstr.o -MD -MP -MF $(DEPDIR)/tblgen-X86RecognizableInstr.Tpo -c -o tblgen-X86RecognizableInstr.o `test -f 'llvm/utils/TableGen/X86RecognizableInstr.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/X86RecognizableInstr.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-X86RecognizableInstr.Tpo $(DEPDIR)/tblgen-X86RecognizableInstr.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/X86RecognizableInstr.cpp' object='tblgen-X86RecognizableInstr.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-X86RecognizableInstr.o `test -f 'llvm/utils/TableGen/X86RecognizableInstr.cpp' || echo '$(srcdir)/'`llvm/utils/TableGen/X86RecognizableInstr.cpp
-
-tblgen-X86RecognizableInstr.obj: llvm/utils/TableGen/X86RecognizableInstr.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-X86RecognizableInstr.obj -MD -MP -MF $(DEPDIR)/tblgen-X86RecognizableInstr.Tpo -c -o tblgen-X86RecognizableInstr.obj `if test -f 'llvm/utils/TableGen/X86RecognizableInstr.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/X86RecognizableInstr.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/X86RecognizableInstr.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-X86RecognizableInstr.Tpo $(DEPDIR)/tblgen-X86RecognizableInstr.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/utils/TableGen/X86RecognizableInstr.cpp' object='tblgen-X86RecognizableInstr.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-X86RecognizableInstr.obj `if test -f 'llvm/utils/TableGen/X86RecognizableInstr.cpp'; then $(CYGPATH_W) 'llvm/utils/TableGen/X86RecognizableInstr.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/utils/TableGen/X86RecognizableInstr.cpp'; fi`
-
tblgen-Alarm.o: llvm/lib/System/Alarm.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-Alarm.o -MD -MP -MF $(DEPDIR)/tblgen-Alarm.Tpo -c -o tblgen-Alarm.o `test -f 'llvm/lib/System/Alarm.cpp' || echo '$(srcdir)/'`llvm/lib/System/Alarm.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-Alarm.Tpo $(DEPDIR)/tblgen-Alarm.Po
@@ -7295,6 +5228,22 @@ tblgen-TimeValue.obj: llvm/lib/System/TimeValue.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-TimeValue.obj `if test -f 'llvm/lib/System/TimeValue.cpp'; then $(CYGPATH_W) 'llvm/lib/System/TimeValue.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/System/TimeValue.cpp'; fi`
+tblgen-Valgrind.o: llvm/lib/System/Valgrind.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-Valgrind.o -MD -MP -MF $(DEPDIR)/tblgen-Valgrind.Tpo -c -o tblgen-Valgrind.o `test -f 'llvm/lib/System/Valgrind.cpp' || echo '$(srcdir)/'`llvm/lib/System/Valgrind.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-Valgrind.Tpo $(DEPDIR)/tblgen-Valgrind.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/System/Valgrind.cpp' object='tblgen-Valgrind.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-Valgrind.o `test -f 'llvm/lib/System/Valgrind.cpp' || echo '$(srcdir)/'`llvm/lib/System/Valgrind.cpp
+
+tblgen-Valgrind.obj: llvm/lib/System/Valgrind.cpp
+ at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-Valgrind.obj -MD -MP -MF $(DEPDIR)/tblgen-Valgrind.Tpo -c -o tblgen-Valgrind.obj `if test -f 'llvm/lib/System/Valgrind.cpp'; then $(CYGPATH_W) 'llvm/lib/System/Valgrind.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/System/Valgrind.cpp'; fi`
+ at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-Valgrind.Tpo $(DEPDIR)/tblgen-Valgrind.Po
+ at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/System/Valgrind.cpp' object='tblgen-Valgrind.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-Valgrind.obj `if test -f 'llvm/lib/System/Valgrind.cpp'; then $(CYGPATH_W) 'llvm/lib/System/Valgrind.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/System/Valgrind.cpp'; fi`
+
tblgen-APFloat.o: llvm/lib/Support/APFloat.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-APFloat.o -MD -MP -MF $(DEPDIR)/tblgen-APFloat.Tpo -c -o tblgen-APFloat.o `test -f 'llvm/lib/Support/APFloat.cpp' || echo '$(srcdir)/'`llvm/lib/Support/APFloat.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-APFloat.Tpo $(DEPDIR)/tblgen-APFloat.Po
@@ -7647,22 +5596,6 @@ tblgen-Regex.obj: llvm/lib/Support/Regex.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-Regex.obj `if test -f 'llvm/lib/Support/Regex.cpp'; then $(CYGPATH_W) 'llvm/lib/Support/Regex.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Support/Regex.cpp'; fi`
-tblgen-SlowOperationInformer.o: llvm/lib/Support/SlowOperationInformer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-SlowOperationInformer.o -MD -MP -MF $(DEPDIR)/tblgen-SlowOperationInformer.Tpo -c -o tblgen-SlowOperationInformer.o `test -f 'llvm/lib/Support/SlowOperationInformer.cpp' || echo '$(srcdir)/'`llvm/lib/Support/SlowOperationInformer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-SlowOperationInformer.Tpo $(DEPDIR)/tblgen-SlowOperationInformer.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/SlowOperationInformer.cpp' object='tblgen-SlowOperationInformer.o' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-SlowOperationInformer.o `test -f 'llvm/lib/Support/SlowOperationInformer.cpp' || echo '$(srcdir)/'`llvm/lib/Support/SlowOperationInformer.cpp
-
-tblgen-SlowOperationInformer.obj: llvm/lib/Support/SlowOperationInformer.cpp
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-SlowOperationInformer.obj -MD -MP -MF $(DEPDIR)/tblgen-SlowOperationInformer.Tpo -c -o tblgen-SlowOperationInformer.obj `if test -f 'llvm/lib/Support/SlowOperationInformer.cpp'; then $(CYGPATH_W) 'llvm/lib/Support/SlowOperationInformer.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Support/SlowOperationInformer.cpp'; fi`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-SlowOperationInformer.Tpo $(DEPDIR)/tblgen-SlowOperationInformer.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='llvm/lib/Support/SlowOperationInformer.cpp' object='tblgen-SlowOperationInformer.obj' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-SlowOperationInformer.obj `if test -f 'llvm/lib/Support/SlowOperationInformer.cpp'; then $(CYGPATH_W) 'llvm/lib/Support/SlowOperationInformer.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Support/SlowOperationInformer.cpp'; fi`
-
tblgen-SmallPtrSet.o: llvm/lib/Support/SmallPtrSet.cpp
@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -MT tblgen-SmallPtrSet.o -MD -MP -MF $(DEPDIR)/tblgen-SmallPtrSet.Tpo -c -o tblgen-SmallPtrSet.o `test -f 'llvm/lib/Support/SmallPtrSet.cpp' || echo '$(srcdir)/'`llvm/lib/Support/SmallPtrSet.cpp
@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tblgen-SmallPtrSet.Tpo $(DEPDIR)/tblgen-SmallPtrSet.Po
@@ -7919,30 +5852,6 @@ tblgen-raw_ostream.obj: llvm/lib/Support/raw_ostream.cpp
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tblgen_CXXFLAGS) $(CXXFLAGS) -c -o tblgen-raw_ostream.obj `if test -f 'llvm/lib/Support/raw_ostream.cpp'; then $(CYGPATH_W) 'llvm/lib/Support/raw_ostream.cpp'; else $(CYGPATH_W) '$(srcdir)/llvm/lib/Support/raw_ostream.cpp'; fi`
-.cpp.o:
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $<
-
-.cpp.obj:
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
-
-.cpp.lo:
- at am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
- at am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
- at am__fastdepCXX_FALSE@ $(AM_V_CXX) @AM_BACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
- at AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
- at am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $<
-
mostlyclean-libtool:
-rm -f *.lo
@@ -8004,98 +5913,6 @@ GTAGS:
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-check-TESTS: $(TESTS)
- @failed=0; all=0; xfail=0; xpass=0; skip=0; \
- srcdir=$(srcdir); export srcdir; \
- list=' $(TESTS) '; \
- $(am__tty_colors); \
- if test -n "$$list"; then \
- for tst in $$list; do \
- if test -f ./$$tst; then dir=./; \
- elif test -f $$tst; then dir=; \
- else dir="$(srcdir)/"; fi; \
- if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \
- all=`expr $$all + 1`; \
- case " $(XFAIL_TESTS) " in \
- *[\ \ ]$$tst[\ \ ]*) \
- xpass=`expr $$xpass + 1`; \
- failed=`expr $$failed + 1`; \
- col=$$red; res=XPASS; \
- ;; \
- *) \
- col=$$grn; res=PASS; \
- ;; \
- esac; \
- elif test $$? -ne 77; then \
- all=`expr $$all + 1`; \
- case " $(XFAIL_TESTS) " in \
- *[\ \ ]$$tst[\ \ ]*) \
- xfail=`expr $$xfail + 1`; \
- col=$$lgn; res=XFAIL; \
- ;; \
- *) \
- failed=`expr $$failed + 1`; \
- col=$$red; res=FAIL; \
- ;; \
- esac; \
- else \
- skip=`expr $$skip + 1`; \
- col=$$blu; res=SKIP; \
- fi; \
- echo "$${col}$$res$${std}: $$tst"; \
- done; \
- if test "$$all" -eq 1; then \
- tests="test"; \
- All=""; \
- else \
- tests="tests"; \
- All="All "; \
- fi; \
- if test "$$failed" -eq 0; then \
- if test "$$xfail" -eq 0; then \
- banner="$$All$$all $$tests passed"; \
- else \
- if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \
- banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \
- fi; \
- else \
- if test "$$xpass" -eq 0; then \
- banner="$$failed of $$all $$tests failed"; \
- else \
- if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \
- banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \
- fi; \
- fi; \
- dashes="$$banner"; \
- skipped=""; \
- if test "$$skip" -ne 0; then \
- if test "$$skip" -eq 1; then \
- skipped="($$skip test was not run)"; \
- else \
- skipped="($$skip tests were not run)"; \
- fi; \
- test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \
- dashes="$$skipped"; \
- fi; \
- report=""; \
- if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \
- report="Please report to $(PACKAGE_BUGREPORT)"; \
- test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \
- dashes="$$report"; \
- fi; \
- dashes=`echo "$$dashes" | sed s/./=/g`; \
- if test "$$failed" -eq 0; then \
- echo "$$grn$$dashes"; \
- else \
- echo "$$red$$dashes"; \
- fi; \
- echo "$$banner"; \
- test -z "$$skipped" || echo "$$skipped"; \
- test -z "$$report" || echo "$$report"; \
- echo "$$dashes$$std"; \
- test "$$failed" -eq 0; \
- else :; fi
-
distdir: $(DISTFILES)
$(am__remove_distdir)
test -d "$(distdir)" || mkdir "$(distdir)"
@@ -8247,9 +6064,6 @@ distcleancheck: distclean
$(distcleancheck_listfiles) ; \
exit 1; } >&2
check-am: all-am
- $(MAKE) $(AM_MAKEFLAGS) $(check_LTLIBRARIES) $(check_PROGRAMS) \
- $(check_SCRIPTS)
- $(MAKE) $(AM_MAKEFLAGS) check-TESTS
check: $(BUILT_SOURCES)
$(MAKE) $(AM_MAKEFLAGS) check-am
all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) clamavcxx-config.h
@@ -8285,9 +6099,8 @@ maintainer-clean-generic:
@BUILD_EXTERNAL_LLVM_TRUE at clean-local:
clean: clean-am
-clean-am: clean-checkLTLIBRARIES clean-checkPROGRAMS clean-generic \
- clean-libtool clean-local clean-noinstLTLIBRARIES \
- clean-noinstPROGRAMS mostlyclean-am
+clean-am: clean-generic clean-libtool clean-local \
+ clean-noinstLTLIBRARIES clean-noinstPROGRAMS mostlyclean-am
distclean: distclean-am
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
@@ -8358,15 +6171,15 @@ ps-am:
uninstall-am:
-.MAKE: all check check-am install install-am install-strip
+.MAKE: all check install install-am install-strip
-.PHONY: CTAGS GTAGS all all-am am--refresh check check-TESTS check-am \
- clean clean-checkLTLIBRARIES clean-checkPROGRAMS clean-generic \
- clean-libtool clean-local clean-noinstLTLIBRARIES \
- clean-noinstPROGRAMS ctags dist dist-all dist-bzip2 dist-gzip \
- dist-hook dist-lzma dist-shar dist-tarZ dist-xz dist-zip \
- distcheck distclean distclean-compile distclean-generic \
- distclean-hdr distclean-libtool distclean-local distclean-tags \
+.PHONY: CTAGS GTAGS all all-am am--refresh check check-am clean \
+ clean-generic clean-libtool clean-local \
+ clean-noinstLTLIBRARIES clean-noinstPROGRAMS ctags dist \
+ dist-all dist-bzip2 dist-gzip dist-hook dist-lzma dist-shar \
+ dist-tarZ dist-xz dist-zip distcheck distclean \
+ distclean-compile distclean-generic distclean-hdr \
+ distclean-libtool distclean-local distclean-tags \
distcleancheck distdir distuninstallcheck dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dvi install-dvi-am install-exec \
@@ -8396,15 +6209,6 @@ uninstall-am:
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at X86GenInstrInfo.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-instr-desc -o $@ $<
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at X86GenAsmWriter.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-asm-writer -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at X86GenAsmWriter1.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-asm-writer -asmwriternum=1 -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at X86GenAsmMatcher.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-asm-matcher -o $@ $<
-
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at X86GenDAGISel.inc: llvm/lib/Target/X86/X86.td $(TBLGEN)
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_X86) -gen-dag-isel -o $@ $<
@@ -8422,9 +6226,6 @@ uninstall-am:
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at PPCGenRegisterNames.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_PPC) -gen-register-enums -o $@ $<
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at PPCGenAsmWriter.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_PPC) -gen-asm-writer -o $@ $<
-
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at PPCGenCodeEmitter.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_PPC) -gen-emitter -o $@ $<
@@ -8448,44 +6249,6 @@ uninstall-am:
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at PPCGenSubtarget.inc: llvm/lib/Target/PowerPC/PPC.td $(TBLGEN)
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_PPC) -gen-subtarget -o $@ $<
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenRegisterInfo.h.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-register-desc-header -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenRegisterNames.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-register-enums -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenRegisterInfo.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-register-desc -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenInstrNames.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-instr-enums -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenInstrInfo.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-instr-desc -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenCodeEmitter.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-emitter -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenAsmWriter.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-asm-writer -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenDAGISel.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-dag-isel -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenCallingConv.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-callingconv -o $@ $<
-
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE at ARMGenSubtarget.inc: llvm/lib/Target/ARM/ARM.td $(TBLGEN)
- at BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ $(TBLGEN_V) $(TBLGEN_FLAGS_ARM) -gen-subtarget -o $@ $<
- at BUILD_EXTERNAL_LLVM_FALSE@@ifGNUmake@ TESTS+=llvmcheck.sh
-
-#bytecode2llvm.cpp: build-llvm
-
- at BUILD_EXTERNAL_LLVM_FALSE@build-llvm:
- at BUILD_EXTERNAL_LLVM_FALSE@ +$(GMAKE) -C llvm OPTIMIZE_OPTION=-O2 libs-only
-
- at BUILD_EXTERNAL_LLVM_FALSE@build-llvm-for-check:
- at BUILD_EXTERNAL_LLVM_FALSE@ +$(GMAKE) -C llvm OPTIMIZE_OPTION=-O2 tools-only
# Don't use make -C here, otherwise there's a racecondition between distclean
# and clean (distclean removes all Makefiles in llvm/)
@@ -8495,6 +6258,7 @@ uninstall-am:
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ rm -f *.inc
@BUILD_EXTERNAL_LLVM_FALSE@@MAINTAINER_MODE_TRUE@ rm -f llvm/include/llvm/Intrinsics.gen
@BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/test/site.exp llvm/test/site.bak llvm/test/*.out llvm/test/*.sum llvm/test/*.log
+ at BUILD_EXTERNAL_LLVM_FALSE@ rm -f $(top_srcdir)/llvm/utils/lit/lit/*.pyc
@BUILD_EXTERNAL_LLVM_FALSE at distclean-local:
@BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/docs/doxygen.cfg llvm/test/Unit/.dir llvm/test/Unit/lit.site.cfg
@@ -8502,7 +6266,7 @@ uninstall-am:
@BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/mklib llvm/tools/llvmc/llvm-config.in
@BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/Makefile.config llvm/config.log
@BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/llvm.spec llvm/include/llvm/Config/AsmPrinters.def llvm/include/llvm/Config/config.h
- at BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/include/llvm/Config/Disassemblers.def
+ at BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/include/llvm/Config/Disassemblers.def llvm/include/llvm/Config/llvm-config.h
@BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/include/llvm/System/DataTypes.h llvm/include/llvm/Config/Targets.def
@BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/tools/llvmc/plugins/Base/Base.td llvm/tools/llvm-config/llvm-config.in
@BUILD_EXTERNAL_LLVM_FALSE@ rm -f llvm/include/llvm/Config/AsmParsers.def
@@ -8511,15 +6275,11 @@ uninstall-am:
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/test/Makefile.tests llvm/tools/Makefile llvm/utils/Makefile\
@BUILD_EXTERNAL_LLVM_FALSE@ llvm/Makefile.common llvm/unittests/Makefile;\
@BUILD_EXTERNAL_LLVM_FALSE@ fi
-
- at BUILD_EXTERNAL_LLVM_FALSE@check-llvm: build-llvm-for-check
- at BUILD_EXTERNAL_LLVM_FALSE@ +$(GMAKE) -C llvm check
- at BUILD_EXTERNAL_LLVM_FALSE@ +$(GMAKE) -C llvm unittests
@BUILD_EXTERNAL_LLVM_FALSE@$(top_builddir)/llvm/config.status: llvm/configure
@BUILD_EXTERNAL_LLVM_FALSE@ (cd llvm; ./config.status --recheck; ./config.status)
# rm configure generated files
-dist-hook:
+dist-hook: clean-local
make -C llvm dist-hook
rm -f $(distdir)/llvm/include/llvm/Config/*.h $(distdir)/llvm/include/llvm/Config/*.def $(distdir)/llvm/Makefile.config $(distdir)/llvm/llvm.spec
rm -f $(distdir)/llvm/docs/doxygen.cfg $(distdir)/llvm/tools/llvmc/plugins/Base/Base.td $(distdir)/llvm/tools/llvm-config/llvm-config.in
diff --git a/libclamav/c++/PPCGenCodeEmitter.inc b/libclamav/c++/PPCGenCodeEmitter.inc
index 8283292..2679956 100644
--- a/libclamav/c++/PPCGenCodeEmitter.inc
+++ b/libclamav/c++/PPCGenCodeEmitter.inc
@@ -20,6 +20,8 @@ unsigned PPCCodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) {
0U,
0U,
0U,
+ 0U,
+ 0U,
2080375316U, // ADD4
2080375316U, // ADD8
2080374804U, // ADDC
@@ -156,7 +158,6 @@ unsigned PPCCodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) {
4227858490U, // FMADD
3959423034U, // FMADDS
4227858576U, // FMR
- 4227858576U, // FMRSD
4227858488U, // FMSUB
3959423032U, // FMSUBS
4227858482U, // FMUL
@@ -231,7 +232,7 @@ unsigned PPCCodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) {
2080374830U, // LWZX
2080374830U, // LWZX8
1275068416U, // MCRF
- 2080374822U, // MFCR
+ 2080374822U, // MFCRpseud
2080965286U, // MFCTR
2080965286U, // MFCTR8
4227859598U, // MFFS
@@ -1153,7 +1154,6 @@ unsigned PPCCodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) {
case PPC::FCTIDZ:
case PPC::FCTIWZ:
case PPC::FMR:
- case PPC::FMRSD:
case PPC::FNABSD:
case PPC::FNABSS:
case PPC::FNEGD:
@@ -1209,7 +1209,7 @@ unsigned PPCCodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) {
Value |= (op & 31U) << 16;
break;
}
- case PPC::MFCR:
+ case PPC::MFCRpseud:
case PPC::MFCTR:
case PPC::MFCTR8:
case PPC::MFLR:
@@ -1557,7 +1557,7 @@ unsigned PPCCodeEmitter::getBinaryCodeForInstr(const MachineInstr &MI) {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "Not supported instr: " << MI;
- llvm_report_error(Msg.str());
+ report_fatal_error(Msg.str());
}
return Value;
}
diff --git a/libclamav/c++/PPCGenDAGISel.inc b/libclamav/c++/PPCGenDAGISel.inc
index cd4615a..3ff9afd 100644
--- a/libclamav/c++/PPCGenDAGISel.inc
+++ b/libclamav/c++/PPCGenDAGISel.inc
@@ -9,1003 +9,291 @@
// *** NOTE: This file is #included into the middle of the target
// *** instruction selector class. These functions are really methods.
-
-// Predicate functions.
-inline bool Predicate_V_immneg0(SDNode *N) const {
-
- return PPC::isAllNegativeZeroVector(N);
-
-}
-inline bool Predicate_atomic_cmp_swap_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_cmp_swap_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_cmp_swap_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_cmp_swap_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_add_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_add_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_add_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_add_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_and_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_and_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_and_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_and_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_max_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_max_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_max_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_max_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_min_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_min_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_min_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_min_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_nand_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_nand_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_nand_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_nand_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_or_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_or_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_or_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_or_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_sub_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_sub_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_sub_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_sub_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_umax_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_umax_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_umax_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_umax_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_umin_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_umin_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_umin_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_umin_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_xor_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_xor_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_xor_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_xor_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_swap_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_swap_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_swap_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_swap_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_cvtff(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FF;
-
-}
-inline bool Predicate_cvtfs(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FS;
-
-}
-inline bool Predicate_cvtfu(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FU;
-
-}
-inline bool Predicate_cvtsf(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SF;
-
-}
-inline bool Predicate_cvtss(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SS;
-
-}
-inline bool Predicate_cvtsu(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SU;
-
-}
-inline bool Predicate_cvtuf(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UF;
-
-}
-inline bool Predicate_cvtus(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_US;
-
-}
-inline bool Predicate_cvtuu(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UU;
-
-}
-inline bool Predicate_extload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
-
-}
-inline bool Predicate_extloadf32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_extloadf64(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64;
-
-}
-inline bool Predicate_extloadi1(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_extloadi16(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_extloadi32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_extloadi8(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_imm16ShiftedSExt(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- // imm16ShiftedSExt predicate - True if only bits in the top 16-bits of the
- // immediate are set. Used by instructions like 'addis'. Identical to
- // imm16ShiftedZExt in 32-bit mode.
- if (N->getZExtValue() & 0xFFFF) return false;
- if (N->getValueType(0) == MVT::i32)
- return true;
- // For 64-bit, make sure it is sext right.
- return N->getZExtValue() == (uint64_t)(int)N->getZExtValue();
-
-}
-inline bool Predicate_imm16ShiftedZExt(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- // imm16ShiftedZExt predicate - True if only bits in the top 16-bits of the
- // immediate are set. Used by instructions like 'xoris'.
- return (N->getZExtValue() & ~uint64_t(0xFFFF0000)) == 0;
-
-}
-inline bool Predicate_immAllOnesV(SDNode *N) const {
-
- return ISD::isBuildVectorAllOnes(N);
-
-}
-inline bool Predicate_immAllOnesV_bc(SDNode *N) const {
-
- return ISD::isBuildVectorAllOnes(N);
-
-}
-inline bool Predicate_immAllZerosV(SDNode *N) const {
-
- return ISD::isBuildVectorAllZeros(N);
-
-}
-inline bool Predicate_immAllZerosV_bc(SDNode *N) const {
-
- return ISD::isBuildVectorAllZeros(N);
-
-}
-inline bool Predicate_immSExt16(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- // immSExt16 predicate - True if the immediate fits in a 16-bit sign extended
- // field. Used by instructions like 'addi'.
- if (N->getValueType(0) == MVT::i32)
- return (int32_t)N->getZExtValue() == (short)N->getZExtValue();
- else
- return (int64_t)N->getZExtValue() == (short)N->getZExtValue();
-
-}
-inline bool Predicate_immZExt16(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- // immZExt16 predicate - True if the immediate fits in a 16-bit zero extended
- // field. Used by instructions like 'ori'.
- return (uint64_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
-
-}
-inline bool Predicate_istore(SDNode *N) const {
-
- return !cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_itruncstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_load(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
-
-}
-inline bool Predicate_maskimm32(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- // maskImm predicate - True if immediate is a run of ones.
- unsigned mb, me;
- if (N->getValueType(0) == MVT::i32)
- return isRunOfOnes((unsigned)N->getZExtValue(), mb, me);
- else
- return false;
-
-}
-inline bool Predicate_post_store(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::POST_INC || AM == ISD::POST_DEC;
-
-}
-inline bool Predicate_post_truncst(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::POST_INC || AM == ISD::POST_DEC;
-
-}
-inline bool Predicate_post_truncstf32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_post_truncsti1(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_post_truncsti16(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_post_truncsti32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_post_truncsti8(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_pre_store(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
-
-}
-inline bool Predicate_pre_truncst(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
-
-}
-inline bool Predicate_pre_truncstf32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_pre_truncsti1(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_pre_truncsti16(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_pre_truncsti32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_pre_truncsti8(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_sextload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
-
-}
-inline bool Predicate_sextloadi1(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_sextloadi16(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_sextloadi32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_sextloadi8(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_store(SDNode *N) const {
-
- return !cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_truncstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_truncstoref32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_truncstoref64(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64;
-
-}
-inline bool Predicate_truncstorei16(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_truncstorei32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_truncstorei8(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_unindexedload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
-
-}
-inline bool Predicate_unindexedstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
-
-}
-inline bool Predicate_vecspltisb(SDNode *N) const {
-
- return PPC::get_VSPLTI_elt(N, 1, *CurDAG).getNode() != 0;
-
-}
-inline bool Predicate_vecspltish(SDNode *N) const {
-
- return PPC::get_VSPLTI_elt(N, 2, *CurDAG).getNode() != 0;
-
-}
-inline bool Predicate_vecspltisw(SDNode *N) const {
-
- return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != 0;
-
-}
-inline bool Predicate_vmrghb_shuffle(SDNode *N) const {
-
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
-
-}
-inline bool Predicate_vmrghb_unary_shuffle(SDNode *N) const {
-
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, true);
-
-}
-inline bool Predicate_vmrghh_shuffle(SDNode *N) const {
-
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
-
-}
-inline bool Predicate_vmrghh_unary_shuffle(SDNode *N) const {
-
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, true);
-
-}
-inline bool Predicate_vmrghw_shuffle(SDNode *N) const {
-
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
-
-}
-inline bool Predicate_vmrghw_unary_shuffle(SDNode *N) const {
-
- return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, true);
-
-}
-inline bool Predicate_vmrglb_shuffle(SDNode *N) const {
-
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
-
-}
-inline bool Predicate_vmrglb_unary_shuffle(SDNode *N) const {
-
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, true);
-
-}
-inline bool Predicate_vmrglh_shuffle(SDNode *N) const {
-
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
-
-}
-inline bool Predicate_vmrglh_unary_shuffle(SDNode *N) const {
-
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, true);
-
-}
-inline bool Predicate_vmrglw_shuffle(SDNode *N) const {
-
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
-
-}
-inline bool Predicate_vmrglw_unary_shuffle(SDNode *N) const {
-
- return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, true);
-
-}
-inline bool Predicate_vpkuhum_shuffle(SDNode *N) const {
-
- return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), false);
-
-}
-inline bool Predicate_vpkuhum_unary_shuffle(SDNode *N) const {
-
- return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), true);
-
-}
-inline bool Predicate_vpkuwum_shuffle(SDNode *N) const {
-
- return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), false);
-
-}
-inline bool Predicate_vpkuwum_unary_shuffle(SDNode *N) const {
-
- return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), true);
-
-}
-inline bool Predicate_vsldoi_shuffle(SDNode *N) const {
-
- return PPC::isVSLDOIShuffleMask(N, false) != -1;
-
-}
-inline bool Predicate_vsldoi_unary_shuffle(SDNode *N) const {
-
- return PPC::isVSLDOIShuffleMask(N, true) != -1;
-
-}
-inline bool Predicate_vspltb_shuffle(SDNode *N) const {
-
- return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 1);
-
-}
-inline bool Predicate_vsplth_shuffle(SDNode *N) const {
-
- return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 2);
-
-}
-inline bool Predicate_vspltw_shuffle(SDNode *N) const {
-
- return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 4);
-
-}
-inline bool Predicate_vtFP(SDNode *inN) const {
- VTSDNode *N = cast<VTSDNode>(inN);
- return N->getVT().isFloatingPoint();
-}
-inline bool Predicate_vtInt(SDNode *inN) const {
- VTSDNode *N = cast<VTSDNode>(inN);
- return N->getVT().isInteger();
-}
-inline bool Predicate_zextload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
-
-}
-inline bool Predicate_zextloadi1(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_zextloadi16(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_zextloadi32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_zextloadi8(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-
-
// The main instruction selector code.
SDNode *SelectCode(SDNode *N) {
// Opcodes are emitted as 2 bytes, TARGET_OPCODE handles this.
#define TARGET_OPCODE(X) X & 255, unsigned(X) >> 8
static const unsigned char MatcherTable[] = {
- OPC_SwitchOpcode , 37, ISD::MEMBARRIER,
+ OPC_SwitchOpcode , 40, TARGET_OPCODE(ISD::MEMBARRIER),
OPC_RecordNode,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::SYNC), 0|OPFL_Chain,
0, 0,
- 114|128,4, ISD::INTRINSIC_VOID,
+ 73|128,4, TARGET_OPCODE(ISD::INTRINSIC_VOID),
OPC_RecordNode,
OPC_MoveChild, 1,
- OPC_Scope, 19,
- OPC_CheckInteger, 80|128,2,
+ OPC_Scope, 17,
+ OPC_CheckInteger, 67|128,2,
OPC_MoveParent,
OPC_RecordChild2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DCBA), 0|OPFL_Chain,
0, 2, 2, 3,
- 19,
- OPC_CheckInteger, 81|128,2,
+ 17,
+ OPC_CheckInteger, 68|128,2,
OPC_MoveParent,
OPC_RecordChild2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DCBF), 0|OPFL_Chain,
0, 2, 2, 3,
- 19,
- OPC_CheckInteger, 82|128,2,
+ 17,
+ OPC_CheckInteger, 69|128,2,
OPC_MoveParent,
OPC_RecordChild2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DCBI), 0|OPFL_Chain,
0, 2, 2, 3,
- 19,
- OPC_CheckInteger, 83|128,2,
+ 17,
+ OPC_CheckInteger, 70|128,2,
OPC_MoveParent,
OPC_RecordChild2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DCBST), 0|OPFL_Chain,
0, 2, 2, 3,
- 19,
- OPC_CheckInteger, 84|128,2,
+ 17,
+ OPC_CheckInteger, 71|128,2,
OPC_MoveParent,
OPC_RecordChild2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DCBT), 0|OPFL_Chain,
0, 2, 2, 3,
- 19,
- OPC_CheckInteger, 85|128,2,
+ 17,
+ OPC_CheckInteger, 72|128,2,
OPC_MoveParent,
OPC_RecordChild2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DCBTST), 0|OPFL_Chain,
0, 2, 2, 3,
- 19,
- OPC_CheckInteger, 86|128,2,
+ 17,
+ OPC_CheckInteger, 73|128,2,
OPC_MoveParent,
OPC_RecordChild2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DCBZ), 0|OPFL_Chain,
0, 2, 2, 3,
- 19,
- OPC_CheckInteger, 87|128,2,
+ 17,
+ OPC_CheckInteger, 74|128,2,
OPC_MoveParent,
OPC_RecordChild2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DCBZL), 0|OPFL_Chain,
0, 2, 2, 3,
- 21,
- OPC_CheckInteger, 76|128,1,
+ 19,
+ OPC_CheckInteger, 63|128,1,
OPC_MoveParent,
OPC_RecordChild2,
OPC_RecordChild3,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STVEBX), 0|OPFL_Chain,
0, 3, 1, 3, 4,
- 21,
- OPC_CheckInteger, 77|128,1,
+ 19,
+ OPC_CheckInteger, 64|128,1,
OPC_MoveParent,
OPC_RecordChild2,
OPC_RecordChild3,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STVEHX), 0|OPFL_Chain,
0, 3, 1, 3, 4,
- 21,
- OPC_CheckInteger, 78|128,1,
+ 19,
+ OPC_CheckInteger, 65|128,1,
OPC_MoveParent,
OPC_RecordChild2,
OPC_RecordChild3,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STVEWX), 0|OPFL_Chain,
0, 3, 1, 3, 4,
- 21,
- OPC_CheckInteger, 79|128,1,
+ 19,
+ OPC_CheckInteger, 66|128,1,
OPC_MoveParent,
OPC_RecordChild2,
OPC_RecordChild3,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STVX), 0|OPFL_Chain,
0, 3, 1, 3, 4,
- 21,
- OPC_CheckInteger, 80|128,1,
+ 19,
+ OPC_CheckInteger, 67|128,1,
OPC_MoveParent,
OPC_RecordChild2,
OPC_RecordChild3,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STVXL), 0|OPFL_Chain,
0, 3, 1, 3, 4,
- 34,
- OPC_CheckInteger, 61|128,1,
+ 33,
+ OPC_CheckInteger, 48|128,1,
OPC_MoveParent,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 0,
OPC_EmitConvertToTarget, 1,
OPC_EmitInteger, MVT::i32, 0,
OPC_EmitInteger, MVT::i32, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DSS), 0|OPFL_Chain,
0, 4, 2, 3, 4, 5,
- 63,
- OPC_CheckInteger, 63|128,1,
+ 61,
+ OPC_CheckInteger, 50|128,1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_Scope, 27,
+ OPC_Scope, 26,
OPC_CheckChild2Type, MVT::i32,
OPC_RecordChild3,
OPC_RecordChild4,
OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 0,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DST), 0|OPFL_Chain,
0, 4, 4, 5, 1, 2,
- 27,
+ 26,
OPC_CheckChild2Type, MVT::i64,
OPC_RecordChild3,
OPC_RecordChild4,
OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 0,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DST64), 0|OPFL_Chain,
0, 4, 4, 5, 1, 2,
0,
- 63,
- OPC_CheckInteger, 66|128,1,
+ 61,
+ OPC_CheckInteger, 53|128,1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_Scope, 27,
+ OPC_Scope, 26,
OPC_CheckChild2Type, MVT::i32,
OPC_RecordChild3,
OPC_RecordChild4,
OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DSTT), 0|OPFL_Chain,
0, 4, 4, 5, 1, 2,
- 27,
+ 26,
OPC_CheckChild2Type, MVT::i64,
OPC_RecordChild3,
OPC_RecordChild4,
OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DSTT64), 0|OPFL_Chain,
0, 4, 4, 5, 1, 2,
0,
- 63,
- OPC_CheckInteger, 64|128,1,
+ 61,
+ OPC_CheckInteger, 51|128,1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_Scope, 27,
+ OPC_Scope, 26,
OPC_CheckChild2Type, MVT::i32,
OPC_RecordChild3,
OPC_RecordChild4,
OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 0,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DSTST), 0|OPFL_Chain,
0, 4, 4, 5, 1, 2,
- 27,
+ 26,
OPC_CheckChild2Type, MVT::i64,
OPC_RecordChild3,
OPC_RecordChild4,
OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 0,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DSTST64), 0|OPFL_Chain,
0, 4, 4, 5, 1, 2,
0,
- 63,
- OPC_CheckInteger, 65|128,1,
+ 61,
+ OPC_CheckInteger, 52|128,1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_Scope, 27,
+ OPC_Scope, 26,
OPC_CheckChild2Type, MVT::i32,
OPC_RecordChild3,
OPC_RecordChild4,
OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DSTSTT), 0|OPFL_Chain,
0, 4, 4, 5, 1, 2,
- 27,
+ 26,
OPC_CheckChild2Type, MVT::i64,
OPC_RecordChild3,
OPC_RecordChild4,
OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DSTSTT64), 0|OPFL_Chain,
0, 4, 4, 5, 1, 2,
0,
- 13,
- OPC_CheckInteger, 88|128,2,
+ 11,
+ OPC_CheckInteger, 75|128,2,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::SYNC), 0|OPFL_Chain,
0, 0,
- 15,
- OPC_CheckInteger, 75|128,1,
+ 13,
+ OPC_CheckInteger, 62|128,1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::MTVSCR), 0|OPFL_Chain,
0, 1, 1,
- 29,
- OPC_CheckInteger, 62|128,1,
+ 27,
+ OPC_CheckInteger, 49|128,1,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 1,
OPC_EmitInteger, MVT::i32, 0,
OPC_EmitInteger, MVT::i32, 0,
@@ -1013,803 +301,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DSSALL), 0|OPFL_Chain,
0, 4, 1, 2, 3, 4,
0,
- 125, ISD::INTRINSIC_W_CHAIN,
- OPC_RecordNode,
- OPC_MoveChild, 1,
- OPC_Scope, 20,
- OPC_CheckInteger, 67|128,1,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVEBX), 0|OPFL_Chain,
- 1, MVT::v16i8, 2, 2, 3,
- 20,
- OPC_CheckInteger, 68|128,1,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVEHX), 0|OPFL_Chain,
- 1, MVT::v8i16, 2, 2, 3,
- 20,
- OPC_CheckInteger, 69|128,1,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVEWX), 0|OPFL_Chain,
- 1, MVT::v4i32, 2, 2, 3,
- 20,
- OPC_CheckInteger, 72|128,1,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVX), 0|OPFL_Chain,
- 1, MVT::v4i32, 2, 2, 3,
- 20,
- OPC_CheckInteger, 73|128,1,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVXL), 0|OPFL_Chain,
- 1, MVT::v4i32, 2, 2, 3,
- 14,
- OPC_CheckInteger, 74|128,1,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MFVSCR), 0|OPFL_Chain,
- 1, MVT::v8i16, 0,
- 0,
- 18|128,13, ISD::INTRINSIC_WO_CHAIN,
- OPC_MoveChild, 0,
- OPC_Scope, 17,
- OPC_CheckInteger, 70|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVSL), 0,
- 1, MVT::v16i8, 2, 1, 2,
- 17,
- OPC_CheckInteger, 71|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVSR), 0,
- 1, MVT::v16i8, 2, 1, 2,
- 22,
- OPC_CheckInteger, 94|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCFSX), 0,
- 1, MVT::v4f32, 2, 2, 0,
- 22,
- OPC_CheckInteger, 95|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCFUX), 0,
- 1, MVT::v4f32, 2, 2, 0,
- 22,
- OPC_CheckInteger, 122|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCTSXS), 0,
- 1, MVT::v4i32, 2, 2, 0,
- 22,
- OPC_CheckInteger, 123|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCTUXS), 0,
- 1, MVT::v4i32, 2, 2, 0,
- 17,
- OPC_CheckInteger, 6|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMHADDSHS), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 7|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMHRADDSHS), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 15|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMLADDUHM), 0,
- 1, MVT::v8i16, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 31|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPERM), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 48|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSEL), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 15,
- OPC_CheckInteger, 81|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDCUW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 82|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDSBS), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 83|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDSHS), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 84|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDSWS), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 85|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDUBS), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 86|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDUHS), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 87|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDUWS), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 13,
- OPC_CheckInteger, 124|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VEXPTEFP), 0,
- 1, MVT::v4f32, 1, 0,
- 13,
- OPC_CheckInteger, 125|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VLOGEFP), 0,
- 1, MVT::v4f32, 1, 0,
- 15,
- OPC_CheckInteger, 88|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGSB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 89|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGSH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 90|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGSW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 91|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGUB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 92|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGUH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 93|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGUW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 127|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXFP), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 0|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXSB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 1|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXSH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 2|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXSW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 3|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXUB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 4|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXUH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 5|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXUW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 8|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINFP), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 9|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINSB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 10|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINSH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 11|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINSW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 12|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINUB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 13|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINUH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 14|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINUW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 17,
- OPC_CheckInteger, 16|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMMBM), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 17|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMSHM), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 18|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMSHS), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 19|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMUBM), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 20|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMUHM), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 21|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMUHS), 0,
- 1, MVT::v4i32, 3, 0, 1, 2,
- 15,
- OPC_CheckInteger, 22|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULESB), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 23|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULESH), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 24|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULEUB), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 25|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULEUH), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 26|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULOSB), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 27|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULOSH), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 28|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULOUB), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 29|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULOUH), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 13,
- OPC_CheckInteger, 39|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VREFP), 0,
- 1, MVT::v4f32, 1, 0,
- 13,
- OPC_CheckInteger, 40|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRFIM), 0,
- 1, MVT::v4f32, 1, 0,
- 13,
- OPC_CheckInteger, 41|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRFIN), 0,
- 1, MVT::v4f32, 1, 0,
- 13,
- OPC_CheckInteger, 42|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRFIP), 0,
- 1, MVT::v4f32, 1, 0,
- 13,
- OPC_CheckInteger, 43|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRFIZ), 0,
- 1, MVT::v4f32, 1, 0,
- 13,
- OPC_CheckInteger, 47|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRSQRTEFP), 0,
- 1, MVT::v4f32, 1, 0,
- 15,
- OPC_CheckInteger, 62|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBCUW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 63|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBSBS), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 64|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBSHS), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 65|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBSWS), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 66|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUBS), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 67|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUHS), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 68|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUWS), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 73|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUMSWS), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 69|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUM2SWS), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 70|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUM4SBS), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 71|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUM4SHS), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 72|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUM4UBS), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 44|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRLB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 45|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRLH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 46|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRLW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 49|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSL), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 52|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLO), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 50|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 51|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 53|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 54|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSR), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 60|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRO), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 55|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 56|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 57|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 58|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 59|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 61|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
- OPC_CheckInteger, 32|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKPX), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 33|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKSHSS), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 34|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKSHUS), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 35|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKSWSS), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 36|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKSWUS), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
- OPC_CheckInteger, 37|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKUHUS), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
- OPC_CheckInteger, 38|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKUWUS), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 13,
- OPC_CheckInteger, 74|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKHPX), 0,
- 1, MVT::v4i32, 1, 0,
- 13,
- OPC_CheckInteger, 75|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKHSB), 0,
- 1, MVT::v8i16, 1, 0,
- 13,
- OPC_CheckInteger, 76|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKHSH), 0,
- 1, MVT::v4i32, 1, 0,
- 13,
- OPC_CheckInteger, 77|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKLPX), 0,
- 1, MVT::v4i32, 1, 0,
- 13,
- OPC_CheckInteger, 78|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKLSB), 0,
- 1, MVT::v8i16, 1, 0,
- 13,
- OPC_CheckInteger, 79|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKLSH), 0,
- 1, MVT::v4i32, 1, 0,
- 17,
- OPC_CheckInteger, 126|128,1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMADDFP), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 17,
- OPC_CheckInteger, 30|128,2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNMSUBFP), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 17|128,4, ISD::ADD,
- OPC_Scope, 42|128,1,
+ 32|128,4, TARGET_OPCODE(ISD::ADD),
+ OPC_Scope, 49|128,1,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_SwitchOpcode , 23, PPCISD::Lo,
+ OPC_SwitchOpcode , 24, TARGET_OPCODE(PPCISD::Lo),
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::TargetGlobalAddress,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetGlobalAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1818,10 +317,10 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LA), 0,
1, MVT::i32, 2, 0, 1,
- 9|128,1, PPCISD::Hi,
+ 13|128,1, TARGET_OPCODE(PPCISD::Hi),
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 31, ISD::TargetGlobalAddress,
+ OPC_SwitchOpcode , 31, TARGET_OPCODE(ISD::TargetGlobalAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1834,7 +333,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIS8), 0,
1, MVT::i64, 2, 0, 1,
0,
- 31, ISD::TargetConstantPool,
+ 31, TARGET_OPCODE(ISD::TargetConstantPool),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1847,7 +346,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIS8), 0,
1, MVT::i64, 2, 0, 1,
0,
- 31, ISD::TargetJumpTable,
+ 31, TARGET_OPCODE(ISD::TargetJumpTable),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1860,7 +359,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIS8), 0,
1, MVT::i64, 2, 0, 1,
0,
- 31, ISD::TargetBlockAddress,
+ 31, TARGET_OPCODE(ISD::TargetBlockAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1875,12 +374,12 @@ SDNode *SelectCode(SDNode *N) {
0,
0,
0,
- 46|128,1,
+ 53|128,1,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 24, PPCISD::Lo,
+ OPC_SwitchOpcode , 25, TARGET_OPCODE(PPCISD::Lo),
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::TargetGlobalAddress,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetGlobalAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1890,10 +389,10 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LA), 0,
1, MVT::i32, 2, 1, 0,
- 13|128,1, PPCISD::Hi,
+ 17|128,1, TARGET_OPCODE(PPCISD::Hi),
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 32, ISD::TargetGlobalAddress,
+ OPC_SwitchOpcode , 32, TARGET_OPCODE(ISD::TargetGlobalAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1907,7 +406,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIS), 0,
1, MVT::i32, 2, 1, 0,
0,
- 32, ISD::TargetConstantPool,
+ 32, TARGET_OPCODE(ISD::TargetConstantPool),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1921,7 +420,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIS), 0,
1, MVT::i32, 2, 1, 0,
0,
- 32, ISD::TargetJumpTable,
+ 32, TARGET_OPCODE(ISD::TargetJumpTable),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1935,7 +434,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIS), 0,
1, MVT::i32, 2, 1, 0,
0,
- 32, ISD::TargetBlockAddress,
+ 32, TARGET_OPCODE(ISD::TargetBlockAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -1951,12 +450,12 @@ SDNode *SelectCode(SDNode *N) {
0,
0,
0,
- 49|128,1,
+ 50|128,1,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 112,
+ OPC_Scope, 113,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 16,
OPC_CheckPredicate, 0,
OPC_MoveParent,
@@ -2004,6 +503,10 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADD4), 0,
1, MVT::i32, 2, 0, 1,
11,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADD8), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 11,
OPC_CheckType, MVT::v16i8,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDUBM), 0,
1, MVT::v16i8, 2, 0, 1,
@@ -2015,1026 +518,899 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDUWM), 0,
1, MVT::v4i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADD8), 0,
- 1, MVT::i64, 2, 0, 1,
0,
0,
- 27|128,7, ISD::LOAD,
+ 72|128,6, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 21,
+ OPC_Scope, 19,
OPC_CheckPredicate, 3,
OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 21,
+ 19,
OPC_CheckPredicate, 5,
OPC_CheckPredicate, 6,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHA), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 21,
+ 19,
OPC_CheckPredicate, 3,
OPC_CheckPredicate, 7,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZ), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 55,
+ 17,
OPC_CheckPredicate, 8,
- OPC_SwitchType , 15, MVT::i32,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZ), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 2, 3,
- 15, MVT::f32,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LFS), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 2, 2, 3,
- 15, MVT::f64,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LFD), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 2, 2, 3,
- 0,
- 21,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZ), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 2, 2, 3,
+ 19,
OPC_CheckPredicate, 3,
OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 21,
+ 19,
OPC_CheckPredicate, 5,
OPC_CheckPredicate, 6,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHAX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 21,
+ 19,
OPC_CheckPredicate, 3,
OPC_CheckPredicate, 7,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 55,
+ 17,
OPC_CheckPredicate, 8,
- OPC_SwitchType , 15, MVT::i32,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZX), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 2, 2, 3,
- 15, MVT::f32,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LFSX), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 2, 2, 3,
- 15, MVT::f64,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LFDX), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 2, 2, 3,
- 0,
- 40,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZX), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 2, 2, 3,
+ 36,
OPC_CheckPredicate, 3,
OPC_CheckPredicate, 9,
OPC_CheckType, MVT::i32,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
0,
- 117,
+ 105,
OPC_CheckPredicate, 10,
OPC_CheckType, MVT::i32,
- OPC_Scope, 36,
+ OPC_Scope, 32,
OPC_CheckPredicate, 11,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
0,
- 36,
+ 32,
OPC_CheckPredicate, 12,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
0,
- 36,
+ 32,
OPC_CheckPredicate, 13,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZ), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 2, 2, 3,
0,
0,
- 19,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVX), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 2, 2, 3,
- 78,
+ 70,
OPC_CheckPredicate, 5,
OPC_CheckType, MVT::i64,
- OPC_Scope, 17,
+ OPC_Scope, 15,
OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHA8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 17,
+ 15,
OPC_CheckPredicate, 14,
OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWA), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 17,
+ 15,
OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHAX8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 17,
+ 15,
OPC_CheckPredicate, 14,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWAX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
0,
- 114,
+ 102,
OPC_CheckPredicate, 3,
OPC_CheckType, MVT::i64,
- OPC_Scope, 17,
+ OPC_Scope, 15,
OPC_CheckPredicate, 4,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 17,
+ 15,
OPC_CheckPredicate, 7,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZ8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 17,
+ 15,
OPC_CheckPredicate, 15,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZ8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 17,
+ 15,
OPC_CheckPredicate, 4,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 17,
+ 15,
OPC_CheckPredicate, 7,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZX8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 17,
+ 15,
OPC_CheckPredicate, 15,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZX8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
0,
- 38,
+ 34,
OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i64,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LD), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LDX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
0,
- 40,
+ 36,
OPC_CheckPredicate, 3,
OPC_CheckPredicate, 9,
OPC_CheckType, MVT::i64,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 2, 2, 3,
0,
- 89|128,1,
+ 10|128,1,
OPC_CheckPredicate, 10,
- OPC_SwitchType , 22|128,1, MVT::i64,
- OPC_Scope, 36,
- OPC_CheckPredicate, 11,
- OPC_Scope, 15,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 2, 2, 3,
- 15,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 2, 2, 3,
- 0,
- 36,
- OPC_CheckPredicate, 12,
- OPC_Scope, 15,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 2, 2, 3,
- 15,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 2, 2, 3,
- 0,
- 36,
- OPC_CheckPredicate, 13,
- OPC_Scope, 15,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZ8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 2, 2, 3,
- 15,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZX8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 2, 2, 3,
- 0,
- 36,
- OPC_CheckPredicate, 16,
- OPC_Scope, 15,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZ8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 2, 2, 3,
- 15,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZX8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 2, 2, 3,
- 0,
+ OPC_CheckType, MVT::i64,
+ OPC_Scope, 32,
+ OPC_CheckPredicate, 11,
+ OPC_Scope, 13,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 2, 2, 3,
+ 13,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 2, 2, 3,
+ 0,
+ 32,
+ OPC_CheckPredicate, 12,
+ OPC_Scope, 13,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZ8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 2, 2, 3,
+ 13,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LBZX8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 2, 2, 3,
0,
- 58, MVT::f64,
- OPC_CheckPredicate, 17,
- OPC_Scope, 26,
+ 32,
+ OPC_CheckPredicate, 13,
+ OPC_Scope, 13,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZ8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 2, 2, 3,
+ 13,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHZX8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 2, 2, 3,
+ 0,
+ 32,
+ OPC_CheckPredicate, 16,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitNode, TARGET_OPCODE(PPC::LFS), 0|OPFL_Chain,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZ8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 2, 2, 3,
+ 13,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWZX8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 2, 2, 3,
+ 0,
+ 0,
+ 68,
+ OPC_CheckPredicate, 8,
+ OPC_SwitchType , 30, MVT::f32,
+ OPC_Scope, 13,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LFS), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 2, 2, 3,
- OPC_EmitNode, TARGET_OPCODE(PPC::FMRSD), 0|OPFL_MemRefs,
- 1, MVT::f64, 1, 4,
- OPC_CompleteMatch, 1, 5,
-
- 26,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitNode, TARGET_OPCODE(PPC::LFSX), 0|OPFL_Chain,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LFSX), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 2, 2, 3,
- OPC_EmitNode, TARGET_OPCODE(PPC::FMRSD), 0|OPFL_MemRefs,
- 1, MVT::f64, 1, 4,
- OPC_CompleteMatch, 1, 5,
-
+ 0,
+ 30, MVT::f64,
+ OPC_Scope, 13,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LFD), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 2, 2, 3,
+ 13,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LFDX), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 2, 2, 3,
0,
0,
+ 60,
+ OPC_CheckPredicate, 10,
+ OPC_CheckPredicate, 17,
+ OPC_CheckType, MVT::f64,
+ OPC_Scope, 25,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitNode, TARGET_OPCODE(PPC::LFS), 0|OPFL_Chain,
+ 1, MVT::f32, 2, 2, 3,
+ OPC_EmitInteger, MVT::i32, PPC::F8RCRegClassID,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 2, 4, 5,
+ 25,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitNode, TARGET_OPCODE(PPC::LFSX), 0|OPFL_Chain,
+ 1, MVT::f32, 2, 2, 3,
+ OPC_EmitInteger, MVT::i32, PPC::F8RCRegClassID,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 2, 4, 5,
+ 0,
+ 17,
+ OPC_CheckPredicate, 8,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVX), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 2, 2, 3,
0,
- 83|128,4, ISD::STORE,
+ 29|128,4, TARGET_OPCODE(ISD::STORE),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
- OPC_Scope, 70|128,1,
+ OPC_Scope, 52|128,1,
OPC_CheckChild1Type, MVT::i32,
OPC_RecordChild2,
- OPC_Scope, 122,
+ OPC_Scope, 110,
OPC_CheckPredicate, 18,
- OPC_Scope, 40,
+ OPC_Scope, 36,
OPC_CheckPredicate, 19,
- OPC_Scope, 17,
+ OPC_Scope, 15,
OPC_CheckPredicate, 20,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STB), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 17,
+ 15,
OPC_CheckPredicate, 21,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STH), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
0,
- 17,
+ 15,
OPC_CheckPredicate, 22,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STW), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 40,
+ 36,
OPC_CheckPredicate, 19,
- OPC_Scope, 17,
+ OPC_Scope, 15,
OPC_CheckPredicate, 20,
OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STBX), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 17,
+ 15,
OPC_CheckPredicate, 21,
OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STHX), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
0,
- 17,
+ 15,
OPC_CheckPredicate, 22,
OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STWX), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
0,
- 69,
+ 63,
OPC_RecordChild3,
- OPC_Scope, 44,
+ OPC_Scope, 40,
OPC_CheckPredicate, 23,
OPC_CheckPredicate, 24,
- OPC_Scope, 18,
+ OPC_Scope, 16,
OPC_CheckPredicate, 25,
OPC_CheckComplexPat, /*CP*/4, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STBU), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::iPTR, 3, 1, 4, 2,
- 18,
+ 16,
OPC_CheckPredicate, 26,
OPC_CheckComplexPat, /*CP*/4, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STHU), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::iPTR, 3, 1, 4, 2,
0,
- 20,
+ 18,
OPC_CheckPredicate, 27,
OPC_CheckPredicate, 28,
OPC_CheckComplexPat, /*CP*/4, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STWU), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::iPTR, 3, 1, 4, 2,
0,
0,
- 66,
+ 60,
OPC_CheckChild1Type, MVT::f32,
OPC_RecordChild2,
- OPC_Scope, 38,
+ OPC_Scope, 34,
OPC_CheckPredicate, 18,
OPC_CheckPredicate, 22,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STFS), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STFSX), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
0,
- 21,
+ 19,
OPC_RecordChild3,
OPC_CheckPredicate, 27,
OPC_CheckPredicate, 28,
OPC_CheckComplexPat, /*CP*/4, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STFSU), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::iPTR, 3, 1, 4, 2,
0,
- 66,
+ 60,
OPC_CheckChild1Type, MVT::f64,
OPC_RecordChild2,
- OPC_Scope, 38,
+ OPC_Scope, 34,
OPC_CheckPredicate, 18,
OPC_CheckPredicate, 22,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STFD), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STFDX), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
0,
- 21,
+ 19,
OPC_RecordChild3,
OPC_CheckPredicate, 27,
OPC_CheckPredicate, 28,
OPC_CheckComplexPat, /*CP*/4, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STFDU), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::iPTR, 3, 1, 4, 2,
0,
- 22,
+ 20,
OPC_CheckChild1Type, MVT::v4i32,
OPC_RecordChild2,
OPC_CheckPredicate, 18,
OPC_CheckPredicate, 22,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STVX), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 103|128,1,
+ 81|128,1,
OPC_CheckChild1Type, MVT::i64,
OPC_RecordChild2,
- OPC_Scope, 26|128,1,
+ OPC_Scope, 10|128,1,
OPC_CheckPredicate, 18,
- OPC_Scope, 112,
+ OPC_Scope, 100,
OPC_CheckPredicate, 19,
- OPC_Scope, 17,
+ OPC_Scope, 15,
OPC_CheckPredicate, 20,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STB8), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 17,
+ 15,
OPC_CheckPredicate, 21,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STH8), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 17,
+ 15,
OPC_CheckPredicate, 29,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STW8), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 17,
+ 15,
OPC_CheckPredicate, 20,
OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STBX8), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 17,
+ 15,
OPC_CheckPredicate, 21,
OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STHX8), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 17,
+ 15,
OPC_CheckPredicate, 29,
OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STWX8), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
0,
- 36,
+ 32,
OPC_CheckPredicate, 22,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/3, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STD), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STDX), 0|OPFL_Chain|OPFL_MemRefs,
0, 3, 1, 3, 4,
0,
0,
- 69,
+ 63,
OPC_RecordChild3,
- OPC_Scope, 44,
+ OPC_Scope, 40,
OPC_CheckPredicate, 23,
OPC_CheckPredicate, 24,
- OPC_Scope, 18,
+ OPC_Scope, 16,
OPC_CheckPredicate, 25,
OPC_CheckComplexPat, /*CP*/4, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STBU8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::iPTR, 3, 1, 4, 2,
- 18,
+ 16,
OPC_CheckPredicate, 26,
OPC_CheckComplexPat, /*CP*/4, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STHU8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::iPTR, 3, 1, 4, 2,
0,
- 20,
+ 18,
OPC_CheckPredicate, 27,
OPC_CheckPredicate, 28,
OPC_CheckComplexPat, /*CP*/4, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::STDU), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::iPTR, 3, 1, 4, 2,
0,
0,
0,
- 26|128,1, ISD::FSUB,
- OPC_Scope, 72,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 30, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 30,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FSUB,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNMSUBFP), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 34, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 12, MVT::f64,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMSUB), 0,
- 1, MVT::f64, 3, 0, 1, 2,
- 12, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMSUBS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 0,
- 0,
- 78,
- OPC_RecordChild0,
- OPC_Scope, 37,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::f64,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FNMSUB), 0,
- 1, MVT::f64, 3, 1, 2, 0,
- 12, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FNMSUBS), 0,
- 1, MVT::f32, 3, 1, 2, 0,
- 0,
- 36,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSUB), 0,
- 1, MVT::f64, 2, 0, 1,
- 9, MVT::f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSUBS), 0,
- 1, MVT::f32, 2, 0, 1,
- 9, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBFP), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 0,
- 0,
- 0,
- 87, ISD::ATOMIC_LOAD_ADD,
+ 79, TARGET_OPCODE(ISD::ATOMIC_LOAD_ADD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_SwitchType , 59, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPredicate, 31,
+ OPC_SwitchType , 53, MVT::i32,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 30,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_ADD_I8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 32,
+ 16,
+ OPC_CheckPredicate, 31,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_ADD_I16), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 33,
+ 16,
+ OPC_CheckPredicate, 32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_ADD_I32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
0,
- 18, MVT::i64,
- OPC_CheckPredicate, 34,
+ 16, MVT::i64,
+ OPC_CheckPredicate, 33,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_ADD_I64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 3, 3, 4, 2,
0,
- 87, ISD::ATOMIC_LOAD_SUB,
+ 79, TARGET_OPCODE(ISD::ATOMIC_LOAD_SUB),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_SwitchType , 59, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPredicate, 35,
+ OPC_SwitchType , 53, MVT::i32,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 34,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_SUB_I8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 36,
+ 16,
+ OPC_CheckPredicate, 35,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_SUB_I16), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 37,
+ 16,
+ OPC_CheckPredicate, 36,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_SUB_I32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
0,
- 18, MVT::i64,
- OPC_CheckPredicate, 38,
+ 16, MVT::i64,
+ OPC_CheckPredicate, 37,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_SUB_I64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 3, 3, 4, 2,
0,
- 87, ISD::ATOMIC_LOAD_AND,
+ 79, TARGET_OPCODE(ISD::ATOMIC_LOAD_AND),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_SwitchType , 59, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPredicate, 39,
+ OPC_SwitchType , 53, MVT::i32,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 38,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_AND_I8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 40,
+ 16,
+ OPC_CheckPredicate, 39,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_AND_I16), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 41,
+ 16,
+ OPC_CheckPredicate, 40,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_AND_I32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
0,
- 18, MVT::i64,
- OPC_CheckPredicate, 42,
+ 16, MVT::i64,
+ OPC_CheckPredicate, 41,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_AND_I64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 3, 3, 4, 2,
0,
- 87, ISD::ATOMIC_LOAD_OR,
+ 79, TARGET_OPCODE(ISD::ATOMIC_LOAD_OR),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_SwitchType , 59, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPredicate, 43,
+ OPC_SwitchType , 53, MVT::i32,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 42,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_OR_I8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 44,
+ 16,
+ OPC_CheckPredicate, 43,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_OR_I16), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 45,
+ 16,
+ OPC_CheckPredicate, 44,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_OR_I32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
0,
- 18, MVT::i64,
- OPC_CheckPredicate, 46,
+ 16, MVT::i64,
+ OPC_CheckPredicate, 45,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_OR_I64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 3, 3, 4, 2,
0,
- 87, ISD::ATOMIC_LOAD_XOR,
+ 79, TARGET_OPCODE(ISD::ATOMIC_LOAD_XOR),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_SwitchType , 59, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPredicate, 47,
+ OPC_SwitchType , 53, MVT::i32,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 46,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_XOR_I8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 48,
+ 16,
+ OPC_CheckPredicate, 47,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_XOR_I16), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 49,
+ 16,
+ OPC_CheckPredicate, 48,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_XOR_I32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
0,
- 18, MVT::i64,
- OPC_CheckPredicate, 50,
+ 16, MVT::i64,
+ OPC_CheckPredicate, 49,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_XOR_I64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 3, 3, 4, 2,
0,
- 87, ISD::ATOMIC_LOAD_NAND,
+ 79, TARGET_OPCODE(ISD::ATOMIC_LOAD_NAND),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_SwitchType , 59, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPredicate, 51,
+ OPC_SwitchType , 53, MVT::i32,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 50,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_NAND_I8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 52,
+ 16,
+ OPC_CheckPredicate, 51,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_NAND_I16), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 53,
+ 16,
+ OPC_CheckPredicate, 52,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_NAND_I32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
0,
- 18, MVT::i64,
- OPC_CheckPredicate, 54,
+ 16, MVT::i64,
+ OPC_CheckPredicate, 53,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_LOAD_NAND_I64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 3, 3, 4, 2,
0,
- 92, ISD::ATOMIC_CMP_SWAP,
+ 84, TARGET_OPCODE(ISD::ATOMIC_CMP_SWAP),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_RecordChild3,
- OPC_SwitchType , 62, MVT::i32,
- OPC_Scope, 19,
- OPC_CheckPredicate, 55,
+ OPC_SwitchType , 56, MVT::i32,
+ OPC_Scope, 17,
+ OPC_CheckPredicate, 54,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_CMP_SWAP_I8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 4, 4, 5, 2, 3,
- 19,
- OPC_CheckPredicate, 56,
+ 17,
+ OPC_CheckPredicate, 55,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_CMP_SWAP_I16), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 4, 4, 5, 2, 3,
- 19,
- OPC_CheckPredicate, 57,
+ 17,
+ OPC_CheckPredicate, 56,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_CMP_SWAP_I32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 4, 4, 5, 2, 3,
0,
- 19, MVT::i64,
- OPC_CheckPredicate, 58,
+ 17, MVT::i64,
+ OPC_CheckPredicate, 57,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_CMP_SWAP_I64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 4, 4, 5, 2, 3,
0,
- 87, ISD::ATOMIC_SWAP,
+ 79, TARGET_OPCODE(ISD::ATOMIC_SWAP),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_SwitchType , 59, MVT::i32,
- OPC_Scope, 18,
- OPC_CheckPredicate, 59,
+ OPC_SwitchType , 53, MVT::i32,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 58,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_SWAP_I8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 60,
+ 16,
+ OPC_CheckPredicate, 59,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_SWAP_I16), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
- 18,
- OPC_CheckPredicate, 61,
+ 16,
+ OPC_CheckPredicate, 60,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_SWAP_I32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 3, 3, 4, 2,
0,
- 18, MVT::i64,
- OPC_CheckPredicate, 62,
+ 16, MVT::i64,
+ OPC_CheckPredicate, 61,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ATOMIC_SWAP_I64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i64, 3, 3, 4, 2,
0,
- 50, PPCISD::DYNALLOC,
+ 48, TARGET_OPCODE(PPCISD::DYNALLOC),
OPC_RecordNode,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::i32,
+ OPC_SwitchType , 20, MVT::i32,
OPC_CheckChild1Type, MVT::i32,
OPC_RecordChild2,
OPC_CheckChild2Type, MVT::iPTR,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DYNALLOC), 0|OPFL_Chain,
- 1, MVT::i32, 3, 1, 3, 4,
- 21, MVT::i64,
+ 2, MVT::i32, MVT::i32, 3, 1, 3, 4,
+ 20, MVT::i64,
OPC_CheckChild1Type, MVT::i64,
OPC_RecordChild2,
OPC_CheckChild2Type, MVT::iPTR,
OPC_CheckComplexPat, /*CP*/1, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::DYNALLOC8), 0|OPFL_Chain,
- 1, MVT::i64, 3, 1, 3, 4,
+ 2, MVT::i64, MVT::i64, 3, 1, 3, 4,
0,
- 38, PPCISD::LARX,
+ 34, TARGET_OPCODE(PPCISD::LARX),
OPC_RecordNode,
OPC_RecordChild1,
- OPC_SwitchType , 15, MVT::i32,
+ OPC_SwitchType , 13, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWARX), 0|OPFL_Chain,
1, MVT::i32, 2, 2, 3,
- 15, MVT::i64,
+ 13, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LDARX), 0|OPFL_Chain,
1, MVT::i64, 2, 2, 3,
0,
- 42, PPCISD::STCX,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_Scope, 18,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::STWCX), 0|OPFL_Chain,
- 0, 3, 1, 3, 4,
- 18,
- OPC_CheckChild1Type, MVT::i64,
- OPC_RecordChild2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::STDCX), 0|OPFL_Chain,
- 0, 3, 1, 3, 4,
- 0,
- 44, PPCISD::LBRX,
+ 40, TARGET_OPCODE(PPCISD::LBRX),
OPC_RecordNode,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_Scope, 18,
+ OPC_Scope, 16,
OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LHBRX), 0|OPFL_Chain,
1, MVT::i32, 2, 2, 3,
- 18,
+ 16,
OPC_CheckValueType, MVT::i32,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LWBRX), 0|OPFL_Chain,
1, MVT::i32, 2, 2, 3,
0,
- 45, PPCISD::STBRX,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 3,
- OPC_Scope, 18,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::STHBRX), 0|OPFL_Chain,
- 0, 3, 1, 3, 4,
- 18,
- OPC_CheckValueType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::STWBRX), 0|OPFL_Chain,
- 0, 3, 1, 3, 4,
- 0,
- 18, PPCISD::STFIWX,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::STFIWX), 0|OPFL_Chain,
- 0, 3, 1, 3, 4,
- 41, PPCISD::LOAD,
+ 37, TARGET_OPCODE(PPCISD::LOAD),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::iPTR,
OPC_CheckType, MVT::i64,
- OPC_Scope, 15,
+ OPC_Scope, 13,
OPC_CheckComplexPat, /*CP*/3, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LD), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
1, MVT::i64, 2, 2, 3,
- 15,
+ 13,
OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LDX), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
1, MVT::i64, 2, 2, 3,
0,
- 39, PPCISD::STD_32,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_Scope, 15,
- OPC_CheckComplexPat, /*CP*/3, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::STD_32), 0|OPFL_Chain,
- 0, 3, 1, 3, 4,
- 15,
- OPC_CheckComplexPat, /*CP*/2, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::STDX_32), 0|OPFL_Chain,
- 0, 3, 1, 3, 4,
- 0,
- 127|128,4, ISD::XOR,
- OPC_Scope, 36|128,1,
+ 84|128,4, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 39|128,1,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 41, ISD::AND,
+ OPC_SwitchOpcode , 41, TARGET_OPCODE(ISD::AND),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_MoveParent,
@@ -3048,7 +1424,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::NAND8), 0,
1, MVT::i64, 2, 0, 1,
0,
- 41, ISD::OR,
+ 41, TARGET_OPCODE(ISD::OR),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_MoveParent,
@@ -3062,7 +1438,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::NOR8), 0,
1, MVT::i64, 2, 0, 1,
0,
- 72, ISD::XOR,
+ 72, TARGET_OPCODE(ISD::XOR),
OPC_RecordChild0,
OPC_Scope, 40,
OPC_RecordChild1,
@@ -3088,10 +1464,10 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::i32, 2, 0, 1,
0,
0,
- 32,
+ 33,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
OPC_RecordChild0,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3100,9 +1476,9 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::EQV), 0,
1, MVT::i32, 2, 1, 0,
- 32,
+ 33,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
OPC_RecordChild0,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3112,54 +1488,112 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::i64,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::EQV8), 0,
1, MVT::i64, 2, 0, 1,
- 32,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
+ 84|128,1,
OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::EQV8), 0,
- 1, MVT::i64, 2, 1, 0,
- 91,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 39, ISD::OR,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
+ OPC_Scope, 59,
OPC_MoveChild, 1,
- OPC_SwitchOpcode , 14, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 63,
+ OPC_Scope, 30,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 14, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 64,
OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::EQV8), 0,
+ 1, MVT::i64, 2, 1, 0,
+ 23,
+ OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::NOR), 0,
+ 1, MVT::i32, 2, 0, 0,
0,
- 21, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 63,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
+ 19|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 119,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 19,
+ OPC_CheckPredicate, 62,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORI), 0,
+ 1, MVT::i32, 2, 0, 3,
+ 19,
+ OPC_CheckPredicate, 63,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORIS), 0,
+ 1, MVT::i32, 2, 0, 3,
+ 19,
+ OPC_CheckPredicate, 62,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORI8), 0,
+ 1, MVT::i64, 2, 0, 3,
+ 19,
+ OPC_CheckPredicate, 63,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORIS8), 0,
+ 1, MVT::i64, 2, 0, 3,
+ 31,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 1, 2,
+ OPC_EmitNode, TARGET_OPCODE(PPC::XORI), 0,
+ 1, MVT::i32, 2, 0, 3,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORIS), 0,
+ 1, MVT::i32, 2, 4, 6,
+ 0,
+ 11,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::XOR), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::XOR8), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 0,
+ 70,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 30, TARGET_OPCODE(ISD::OR),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
1, MVT::v4i32, 2, 0, 1,
- 21, ISD::BIT_CONVERT,
+ 30, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::OR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::OR),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_MoveParent,
@@ -3167,125 +1601,44 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
1, MVT::v4i32, 2, 0, 1,
0,
- 34|128,1,
+ 28,
OPC_RecordChild0,
- OPC_Scope, 25,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::NOR), 0,
- 1, MVT::i32, 2, 0, 0,
- 47,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 19,
- OPC_CheckPredicate, 65,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORI), 0,
- 1, MVT::i32, 2, 0, 3,
- 19,
- OPC_CheckPredicate, 66,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORIS), 0,
- 1, MVT::i32, 2, 0, 3,
- 0,
- 36,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 14, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 63,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
- 1, MVT::v4i32, 2, 0, 0,
- 14, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
- 1, MVT::v4i32, 2, 0, 0,
- 0,
- 47,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 19,
- OPC_CheckPredicate, 65,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORI8), 0,
- 1, MVT::i64, 2, 0, 3,
- 19,
- OPC_CheckPredicate, 66,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORIS8), 0,
- 1, MVT::i64, 2, 0, 3,
- 0,
- 0,
- 38,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 15, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 63,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
- 1, MVT::v4i32, 2, 0, 0,
- 15, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 64,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
- 1, MVT::v4i32, 2, 0, 0,
- 0,
- 76,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
+ 1, MVT::v4i32, 2, 0, 0,
+ 28,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNOR), 0,
+ 1, MVT::v4i32, 2, 0, 0,
+ 13,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 1, 2,
- OPC_EmitNode, TARGET_OPCODE(PPC::XORI), 0,
- 1, MVT::i32, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 0, 5,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::XORIS), 0,
- 1, MVT::i32, 2, 4, 6,
- 11,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::XOR), 0,
- 1, MVT::i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VXOR), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::XOR8), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VXOR), 0,
+ 1, MVT::v4i32, 2, 0, 1,
0,
- 75|128,3, ISD::AND,
- OPC_Scope, 45,
+ 9|128,4, TARGET_OPCODE(ISD::AND),
+ OPC_Scope, 46,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
OPC_RecordChild0,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3298,9 +1651,9 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDC8), 0,
1, MVT::i64, 2, 0, 1,
0,
- 85,
+ 88,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 41, ISD::XOR,
+ OPC_SwitchOpcode , 41, TARGET_OPCODE(ISD::XOR),
OPC_RecordChild0,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3314,15 +1667,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDC8), 0,
1, MVT::i64, 2, 1, 0,
0,
- 36, ISD::ROTL,
+ 37, TARGET_OPCODE(ISD::ROTL),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i32,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 67,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 65,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_EmitConvertToTarget, 2,
@@ -3332,57 +1685,114 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLWNM), 0,
1, MVT::i32, 4, 0, 1, 4, 6,
0,
- 68,
+ 63|128,1,
OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 39,
- OPC_RecordChild0,
+ OPC_Scope, 119,
+ OPC_RecordChild1,
+ OPC_Scope, 91,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 62,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDIo), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 3,
+ 20,
+ OPC_CheckPredicate, 63,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDISo), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 3,
+ 20,
+ OPC_CheckPredicate, 62,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDIo8), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 3,
+ 20,
+ OPC_CheckPredicate, 63,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDISo8), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 3,
+ 0,
+ 11,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::AND), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::AND8), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_SwitchOpcode , 15, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 63,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 29,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VANDC), 0,
1, MVT::v4i32, 2, 0, 1,
- 15, ISD::BIT_CONVERT,
+ 29,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VANDC), 0,
1, MVT::v4i32, 2, 0, 1,
0,
- 20,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 63,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VANDC), 0,
- 1, MVT::v4i32, 2, 0, 1,
0,
- 50,
+ 69,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 21,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 30,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 63,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VANDC), 0,
1, MVT::v4i32, 2, 1, 0,
- 21,
+ 30,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 63,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
@@ -3391,37 +1801,49 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VANDC), 0,
1, MVT::v4i32, 2, 1, 0,
0,
- 25,
+ 35,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VANDC), 0,
1, MVT::v4i32, 2, 0, 1,
- 50,
+ 69,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 21,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 30,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VANDC), 0,
1, MVT::v4i32, 2, 1, 0,
- 21,
+ 30,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
OPC_CheckPredicate, 64,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
@@ -3430,64 +1852,18 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VANDC), 0,
1, MVT::v4i32, 2, 1, 0,
0,
- 127,
+ 13,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 86,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 19,
- OPC_CheckPredicate, 65,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDIo), 0,
- 1, MVT::i32, 2, 0, 3,
- 19,
- OPC_CheckPredicate, 66,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDISo), 0,
- 1, MVT::i32, 2, 0, 3,
- 19,
- OPC_CheckPredicate, 65,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDIo8), 0,
- 1, MVT::i64, 2, 0, 3,
- 19,
- OPC_CheckPredicate, 66,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ANDISo8), 0,
- 1, MVT::i64, 2, 0, 3,
- 0,
- 11,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::AND), 0,
- 1, MVT::i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAND), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::AND8), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAND), 0,
+ 1, MVT::v4i32, 2, 0, 1,
0,
- 127|128,1, ISD::OR,
- OPC_Scope, 45,
+ 2|128,2, TARGET_OPCODE(ISD::OR),
+ OPC_Scope, 46,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
OPC_RecordChild0,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3500,9 +1876,9 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ORC8), 0,
1, MVT::i64, 2, 0, 1,
0,
- 45,
+ 46,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
OPC_RecordChild0,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3516,14 +1892,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ORC8), 0,
1, MVT::i64, 2, 1, 0,
0,
- 31|128,1,
+ 32|128,1,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 118,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 19,
- OPC_CheckPredicate, 65,
+ OPC_CheckPredicate, 62,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_EmitConvertToTarget, 1,
@@ -3531,7 +1907,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ORI), 0,
1, MVT::i32, 2, 0, 3,
19,
- OPC_CheckPredicate, 66,
+ OPC_CheckPredicate, 63,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_EmitConvertToTarget, 1,
@@ -3539,7 +1915,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ORIS), 0,
1, MVT::i32, 2, 0, 3,
19,
- OPC_CheckPredicate, 65,
+ OPC_CheckPredicate, 62,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_EmitConvertToTarget, 1,
@@ -3547,7 +1923,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ORI8), 0,
1, MVT::i64, 2, 0, 3,
19,
- OPC_CheckPredicate, 66,
+ OPC_CheckPredicate, 63,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_EmitConvertToTarget, 1,
@@ -3571,19 +1947,19 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::OR), 0,
1, MVT::i32, 2, 0, 1,
11,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VOR), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 11,
OPC_CheckType, MVT::i64,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::OR8), 0,
1, MVT::i64, 2, 0, 1,
+ 11,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VOR), 0,
+ 1, MVT::v4i32, 2, 0, 1,
0,
0,
- 125, PPCISD::Hi,
+ 1|128,1, TARGET_OPCODE(PPCISD::Hi),
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 28, ISD::TargetGlobalAddress,
+ OPC_SwitchOpcode , 28, TARGET_OPCODE(ISD::TargetGlobalAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -3595,7 +1971,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LIS8), 0,
1, MVT::i64, 1, 0,
0,
- 28, ISD::TargetConstantPool,
+ 28, TARGET_OPCODE(ISD::TargetConstantPool),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -3607,7 +1983,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LIS8), 0,
1, MVT::i64, 1, 0,
0,
- 28, ISD::TargetJumpTable,
+ 28, TARGET_OPCODE(ISD::TargetJumpTable),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -3619,7 +1995,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LIS8), 0,
1, MVT::i64, 1, 0,
0,
- 28, ISD::TargetBlockAddress,
+ 28, TARGET_OPCODE(ISD::TargetBlockAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -3632,10 +2008,10 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::i64, 1, 0,
0,
0,
- 125, PPCISD::Lo,
+ 1|128,1, TARGET_OPCODE(PPCISD::Lo),
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 28, ISD::TargetGlobalAddress,
+ OPC_SwitchOpcode , 28, TARGET_OPCODE(ISD::TargetGlobalAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -3647,7 +2023,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LI8), 0,
1, MVT::i64, 1, 0,
0,
- 28, ISD::TargetConstantPool,
+ 28, TARGET_OPCODE(ISD::TargetConstantPool),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -3659,7 +2035,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LI8), 0,
1, MVT::i64, 1, 0,
0,
- 28, ISD::TargetJumpTable,
+ 28, TARGET_OPCODE(ISD::TargetJumpTable),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -3671,7 +2047,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LI8), 0,
1, MVT::i64, 1, 0,
0,
- 28, ISD::TargetBlockAddress,
+ 28, TARGET_OPCODE(ISD::TargetBlockAddress),
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -3684,123 +2060,565 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::i64, 1, 0,
0,
0,
- 25, ISD::CALLSEQ_END,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TargetConstant,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::TargetConstant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADJCALLSTACKUP), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 2, 1, 2,
- 75|128,1, PPCISD::TC_RETURN,
- OPC_RecordNode,
+ 118, TARGET_OPCODE(ISD::SUB),
+ OPC_Scope, 28,
+ OPC_MoveChild, 0,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::NEG), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::NEG8), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 86,
+ OPC_RecordChild0,
+ OPC_Scope, 23,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 0,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFIC), 0,
+ 2, MVT::i32, MVT::i32, 2, 1, 2,
+ 58,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBF), 0,
+ 1, MVT::i32, 2, 1, 0,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBF8), 0,
+ 1, MVT::i64, 2, 1, 0,
+ 9, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUBM), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 9, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUHM), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 9, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUWM), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 0,
+ 0,
+ 102, TARGET_OPCODE(ISD::ADDE),
OPC_CaptureFlagInput,
- OPC_RecordChild1,
- OPC_Scope, 24|128,1,
+ OPC_RecordChild0,
+ OPC_Scope, 69,
OPC_MoveChild, 1,
- OPC_SwitchOpcode , 50, ISD::Constant,
- OPC_SwitchType , 22, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNai), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 3, 4,
- 22, MVT::i64,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNai8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 3, 4,
+ OPC_Scope, 36,
+ OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
+ OPC_MoveParent,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDME), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDME8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 1, 0,
0,
- 46, ISD::TargetGlobalAddress,
- OPC_SwitchType , 20, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNdi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
- 20, MVT::i64,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNdi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
+ 27,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDZE), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDZE8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 1, 0,
0,
- 46, ISD::TargetExternalSymbol,
- OPC_SwitchType , 20, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNdi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
- 20, MVT::i64,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNdi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
+ 0,
+ 27,
+ OPC_RecordChild1,
+ OPC_SwitchType , 10, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDE), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 10, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDE8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 104, TARGET_OPCODE(ISD::SUBE),
+ OPC_CaptureFlagInput,
+ OPC_Scope, 71,
+ OPC_MoveChild, 0,
+ OPC_Scope, 37,
+ OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFME), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFME8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 28,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFZE), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFZE8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 1, 0,
0,
0,
- 21,
+ 28,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 10, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFE), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 1, 0,
+ 10, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFE8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 1, 0,
+ 0,
+ 0,
+ 69, TARGET_OPCODE(ISD::ADDC),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 38,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 0,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIC), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIC8), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDC), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDC8), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 50, TARGET_OPCODE(ISD::MUL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 21,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULLI), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 11,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULLW), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULLD), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 71, TARGET_OPCODE(ISD::SUBC),
+ OPC_RecordChild0,
+ OPC_Scope, 39,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 0,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 12, MVT::i32,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFIC), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 1, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFIC8), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 1, 2,
+ 0,
+ 27,
+ OPC_RecordChild1,
+ OPC_SwitchType , 10, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFC), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 1, 0,
+ 10, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFC8), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 1, 0,
+ 0,
+ 0,
+ 114, TARGET_OPCODE(ISD::SRA),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 38,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAWI), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRADI), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 28,
OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_SwitchType , 10, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAW), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 10, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAD), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 13,
+ OPC_CheckChild1Type, MVT::v16i8,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 13,
+ OPC_CheckChild1Type, MVT::v8i16,
+ OPC_CheckType, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 13,
+ OPC_CheckChild1Type, MVT::v4i32,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 126, TARGET_OPCODE(ISD::SHL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 52,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNri), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
- 21,
- OPC_CheckChild1Type, MVT::i64,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_SwitchType , 21, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitInteger, MVT::i32, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 5, 4,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLWINM), 0,
+ 1, MVT::i32, 4, 0, 2, 3, 5,
+ 17, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 6, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDICR), 0,
+ 1, MVT::i64, 3, 0, 2, 4,
+ 0,
+ 26,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SLW), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SLD), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 13,
+ OPC_CheckChild1Type, MVT::v16i8,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 13,
+ OPC_CheckChild1Type, MVT::v8i16,
+ OPC_CheckType, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 13,
+ OPC_CheckChild1Type, MVT::v4i32,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 126, TARGET_OPCODE(ISD::SRL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 52,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNri8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
+ OPC_SwitchType , 21, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 7, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitInteger, MVT::i32, 31,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLWINM), 0,
+ 1, MVT::i32, 4, 0, 3, 4, 5,
+ 17, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 8, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDICL), 0,
+ 1, MVT::i64, 3, 0, 3, 4,
+ 0,
+ 26,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRW), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRD), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 13,
+ OPC_CheckChild1Type, MVT::v16i8,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 13,
+ OPC_CheckChild1Type, MVT::v8i16,
+ OPC_CheckType, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 13,
+ OPC_CheckChild1Type, MVT::v4i32,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 92, TARGET_OPCODE(ISD::ROTL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 48,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_SwitchType , 19, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitInteger, MVT::i32, 0,
+ OPC_EmitInteger, MVT::i32, 31,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLWINM), 0,
+ 1, MVT::i32, 4, 0, 2, 3, 4,
+ 15, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitInteger, MVT::i32, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDICL), 0,
+ 1, MVT::i64, 3, 0, 2, 3,
+ 0,
+ 38,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_SwitchType , 17, MVT::i32,
+ OPC_EmitInteger, MVT::i32, 0,
+ OPC_EmitInteger, MVT::i32, 31,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLWNM), 0,
+ 1, MVT::i32, 4, 0, 1, 2, 3,
+ 13, MVT::i64,
+ OPC_EmitInteger, MVT::i32, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDCL), 0,
+ 1, MVT::i64, 3, 0, 1, 2,
+ 0,
+ 0,
+ 19, TARGET_OPCODE(PPCISD::TOC_ENTRY),
+ OPC_RecordChild0,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetGlobalAddress),
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LDtoc), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 97, TARGET_OPCODE(ISD::Constant),
+ OPC_RecordNode,
+ OPC_SwitchType , 59, MVT::i32,
+ OPC_Scope, 12,
+ OPC_CheckPredicate, 0,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LI), 0,
+ 1, MVT::i32, 1, 1,
+ 15,
+ OPC_CheckPredicate, 1,
+ OPC_EmitConvertToTarget, 0,
+ OPC_EmitNodeXForm, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LIS), 0,
+ 1, MVT::i32, 1, 2,
+ 27,
+ OPC_EmitConvertToTarget, 0,
+ OPC_EmitNodeXForm, 0, 1,
+ OPC_EmitNode, TARGET_OPCODE(PPC::LIS), 0,
+ 1, MVT::i32, 1, 2,
+ OPC_EmitConvertToTarget, 0,
+ OPC_EmitNodeXForm, 1, 4,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ORI), 0,
+ 1, MVT::i32, 2, 3, 5,
+ 0,
+ 31, MVT::i64,
+ OPC_Scope, 12,
+ OPC_CheckPredicate, 0,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LI8), 0,
+ 1, MVT::i64, 1, 1,
+ 15,
+ OPC_CheckPredicate, 1,
+ OPC_EmitConvertToTarget, 0,
+ OPC_EmitNodeXForm, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LIS8), 0,
+ 1, MVT::i64, 1, 2,
+ 0,
0,
- 55|128,1, ISD::FNEG,
- OPC_Scope, 27|128,1,
+ 28, TARGET_OPCODE(PPCISD::SHL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SLW), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SLD), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 28, TARGET_OPCODE(PPCISD::SRL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRW), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRD), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 30, TARGET_OPCODE(PPCISD::SRA),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_SwitchType , 10, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAW), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 10, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAD), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 23, TARGET_OPCODE(ISD::CTLZ),
+ OPC_RecordChild0,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::CNTLZW), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::CNTLZD), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 69, TARGET_OPCODE(ISD::SIGN_EXTEND_INREG),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_Scope, 25,
+ OPC_CheckValueType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSB), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSB8), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 25,
+ OPC_CheckValueType, MVT::i16,
+ OPC_MoveParent,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSH), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSH8), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 11,
+ OPC_CheckValueType, MVT::i32,
+ OPC_MoveParent,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSW), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 26, TARGET_OPCODE(ISD::SDIV),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::DIVW), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::DIVD), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 26, TARGET_OPCODE(ISD::UDIV),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::DIVWU), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::DIVDU), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 26, TARGET_OPCODE(ISD::MULHS),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULHW), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULHD), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 26, TARGET_OPCODE(ISD::MULHU),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULHWU), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULHDU), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 11, TARGET_OPCODE(PPCISD::EXTSW_32),
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSW_32), 0,
+ 1, MVT::i32, 1, 0,
+ 13, TARGET_OPCODE(ISD::SIGN_EXTEND),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSW_32_64), 0,
+ 1, MVT::i64, 1, 0,
+ 14, TARGET_OPCODE(ISD::ANY_EXTEND),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::OR4To8), 0,
+ 1, MVT::i64, 2, 0, 0,
+ 12, TARGET_OPCODE(ISD::TRUNCATE),
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::OR8To4), 0,
+ 1, MVT::i32, 2, 0, 0,
+ 30, TARGET_OPCODE(ISD::ZERO_EXTEND),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitNode, TARGET_OPCODE(PPC::OR4To8), 0,
+ 1, MVT::i64, 2, 0, 0,
+ OPC_EmitInteger, MVT::i32, 0,
+ OPC_EmitInteger, MVT::i32, 32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDICL), 0,
+ 1, MVT::i64, 3, 1, 2, 3,
+ 61|128,1, TARGET_OPCODE(ISD::FNEG),
+ OPC_Scope, 33|128,1,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 82, ISD::FADD,
- OPC_Scope, 39,
+ OPC_SwitchOpcode , 84, TARGET_OPCODE(ISD::FADD),
+ OPC_Scope, 40,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_MoveParent,
@@ -3815,10 +2633,10 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::FNMADDS), 0,
1, MVT::f32, 3, 0, 1, 2,
0,
- 39,
+ 40,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FMUL,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_MoveParent,
@@ -3833,9 +2651,9 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::f32, 3, 1, 2, 0,
0,
0,
- 39, ISD::FSUB,
+ 40, TARGET_OPCODE(ISD::FSUB),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_MoveParent,
@@ -3850,7 +2668,7 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(PPC::FNMSUBS), 0,
1, MVT::f32, 3, 0, 1, 2,
0,
- 24, ISD::FABS,
+ 24, TARGET_OPCODE(ISD::FABS),
OPC_RecordChild0,
OPC_MoveParent,
OPC_SwitchType , 8, MVT::f32,
@@ -3871,154 +2689,1215 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::f64, 1, 0,
0,
0,
- 116, ISD::SUB,
- OPC_Scope, 28,
+ 24, TARGET_OPCODE(PPCISD::MTFSF),
+ OPC_CaptureFlagInput,
+ OPC_RecordChild0,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MTFSF), 0|OPFL_FlagInput,
+ 1, MVT::f64, 3, 3, 1, 2,
+ 47|128,1, TARGET_OPCODE(ISD::FADD),
+ OPC_Scope, 39,
OPC_MoveChild, 0,
- OPC_CheckInteger, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::NEG), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::NEG8), 0,
- 1, MVT::i64, 1, 0,
+ OPC_SwitchType , 12, MVT::f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMADD), 0,
+ 1, MVT::f64, 3, 0, 1, 2,
+ 12, MVT::f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMADDS), 0,
+ 1, MVT::f32, 3, 0, 1, 2,
0,
- 84,
+ 68,
OPC_RecordChild0,
- OPC_Scope, 21,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFIC), 0,
- 1, MVT::i32, 2, 1, 2,
- 58,
+ OPC_Scope, 38,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
+ OPC_RecordChild0,
OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBF), 0,
- 1, MVT::i32, 2, 1, 0,
- 9, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUBM), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 9, MVT::v8i16,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUHM), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 9, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUWM), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBF8), 0,
- 1, MVT::i64, 2, 1, 0,
- 0,
- 0,
- 0,
- 96, ISD::ADDE,
- OPC_CaptureFlagInput,
- OPC_RecordChild0,
- OPC_Scope, 65,
- OPC_MoveChild, 1,
- OPC_Scope, 34,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDME), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDME8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 1, 0,
+ OPC_SwitchType , 12, MVT::f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMADD), 0,
+ 1, MVT::f64, 3, 1, 2, 0,
+ 12, MVT::f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMADDS), 0,
+ 1, MVT::f32, 3, 1, 2, 0,
0,
25,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDZE), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDZE8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 1, 0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FADD), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 9, MVT::f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FADDS), 0,
+ 1, MVT::f32, 2, 0, 1,
0,
0,
- 25,
+ 23,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
+ OPC_RecordChild0,
OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDE), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDE8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMADDFP), 0,
+ 1, MVT::v4f32, 3, 0, 1, 2,
+ 39,
+ OPC_RecordChild0,
+ OPC_Scope, 22,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMADDFP), 0,
+ 1, MVT::v4f32, 3, 1, 2, 0,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDFP), 0,
+ 1, MVT::v4f32, 2, 0, 1,
0,
0,
- 98, ISD::SUBE,
- OPC_CaptureFlagInput,
- OPC_Scope, 67,
+ 35|128,1, TARGET_OPCODE(ISD::FSUB),
+ OPC_Scope, 39,
OPC_MoveChild, 0,
- OPC_Scope, 35,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 12, MVT::f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMSUB), 0,
+ 1, MVT::f64, 3, 0, 1, 2,
+ 12, MVT::f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMSUBS), 0,
+ 1, MVT::f32, 3, 0, 1, 2,
+ 0,
+ 68,
+ OPC_RecordChild0,
+ OPC_Scope, 38,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
+ OPC_RecordChild0,
OPC_RecordChild1,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFME), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFME8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 1, 0,
- 0,
- 26,
- OPC_CheckInteger, 0,
OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FNMSUB), 0,
+ 1, MVT::f64, 3, 1, 2, 0,
+ 12, MVT::f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FNMSUBS), 0,
+ 1, MVT::f32, 3, 1, 2, 0,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFZE), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFZE8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 1, 0,
+ OPC_SwitchType , 9, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSUB), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 9, MVT::f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSUBS), 0,
+ 1, MVT::f32, 2, 0, 1,
0,
0,
- 26,
+ 37,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 66,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FSUB),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::FMUL),
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFE), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 1, 0,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFE8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 2, 1, 0,
- 0,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNMSUBFP), 0,
+ 1, MVT::v4f32, 3, 0, 1, 2,
+ 13,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBFP), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 11, TARGET_OPCODE(PPCISD::FCTIWZ),
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FCTIWZ), 0,
+ 1, MVT::f64, 1, 0,
+ 9, TARGET_OPCODE(ISD::FP_ROUND),
+ OPC_RecordChild0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FRSP), 0,
+ 1, MVT::f32, 1, 0,
+ 23, TARGET_OPCODE(ISD::FSQRT),
+ OPC_RecordChild0,
+ OPC_SwitchType , 8, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSQRT), 0,
+ 1, MVT::f64, 1, 0,
+ 8, MVT::f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSQRTS), 0,
+ 1, MVT::f32, 1, 0,
0,
- 84|128,1, PPCISD::VCMP,
+ 23, TARGET_OPCODE(ISD::FABS),
+ OPC_RecordChild0,
+ OPC_SwitchType , 8, MVT::f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FABSS), 0,
+ 1, MVT::f32, 1, 0,
+ 8, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FABSD), 0,
+ 1, MVT::f64, 1, 0,
+ 0,
+ 7, TARGET_OPCODE(PPCISD::MFFS),
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MFFS), 0|OPFL_FlagOutput,
+ 1, MVT::f64, 0,
+ 14, TARGET_OPCODE(PPCISD::FADDRTZ),
+ OPC_CaptureFlagInput,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_Scope, 15,
- OPC_CheckInteger, 70|128,7,
+ OPC_CheckType, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FADDrtz), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 1, MVT::f64, 2, 0, 1,
+ 29, TARGET_OPCODE(PPCISD::FSEL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 10, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSELD), 0,
+ 1, MVT::f64, 3, 0, 1, 2,
+ 10, MVT::f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSELS), 0,
+ 1, MVT::f32, 3, 0, 1, 2,
+ 0,
+ 26, TARGET_OPCODE(ISD::FDIV),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FDIV), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 9, MVT::f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FDIVS), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 0,
+ 45, TARGET_OPCODE(ISD::FMUL),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMUL), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 9, MVT::f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMULS), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 17, MVT::v4f32,
+ OPC_EmitNode, TARGET_OPCODE(PPC::V_SET0), 0,
+ 1, MVT::v4i32, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMADDFP), 0,
+ 1, MVT::v4f32, 3, 0, 1, 2,
+ 0,
+ 13, TARGET_OPCODE(ISD::FP_EXTEND),
+ OPC_RecordChild0,
+ OPC_EmitInteger, MVT::i32, PPC::F8RCRegClassID,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11, TARGET_OPCODE(PPCISD::FCFID),
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FCFID), 0,
+ 1, MVT::f64, 1, 0,
+ 11, TARGET_OPCODE(PPCISD::FCTIDZ),
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::FCTIDZ), 0,
+ 1, MVT::f64, 1, 0,
+ 113, TARGET_OPCODE(ISD::INTRINSIC_W_CHAIN),
+ OPC_RecordNode,
+ OPC_MoveChild, 1,
+ OPC_Scope, 18,
+ OPC_CheckInteger, 54|128,1,
OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPBFP), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVEBX), 0|OPFL_Chain,
+ 1, MVT::v16i8, 2, 2, 3,
+ 18,
+ OPC_CheckInteger, 55|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVEHX), 0|OPFL_Chain,
+ 1, MVT::v8i16, 2, 2, 3,
+ 18,
+ OPC_CheckInteger, 56|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVEWX), 0|OPFL_Chain,
+ 1, MVT::v4i32, 2, 2, 3,
+ 18,
+ OPC_CheckInteger, 59|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVX), 0|OPFL_Chain,
+ 1, MVT::v4i32, 2, 2, 3,
+ 18,
+ OPC_CheckInteger, 60|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVXL), 0|OPFL_Chain,
+ 1, MVT::v4i32, 2, 2, 3,
+ 12,
+ OPC_CheckInteger, 61|128,1,
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::MFVSCR), 0|OPFL_Chain,
+ 1, MVT::v8i16, 0,
+ 0,
+ 22|128,13, TARGET_OPCODE(ISD::INTRINSIC_WO_CHAIN),
+ OPC_MoveChild, 0,
+ OPC_Scope, 17,
+ OPC_CheckInteger, 57|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVSL), 0,
+ 1, MVT::v16i8, 2, 1, 2,
+ 17,
+ OPC_CheckInteger, 58|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::LVSR), 0,
+ 1, MVT::v16i8, 2, 1, 2,
+ 23,
+ OPC_CheckInteger, 109|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCTSXS), 0,
+ 1, MVT::v4i32, 2, 2, 0,
+ 23,
+ OPC_CheckInteger, 110|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCTUXS), 0,
+ 1, MVT::v4i32, 2, 2, 0,
+ 17,
+ OPC_CheckInteger, 121|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMHADDSHS), 0,
+ 1, MVT::v8i16, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 122|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMHRADDSHS), 0,
+ 1, MVT::v8i16, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 2|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMLADDUHM), 0,
+ 1, MVT::v8i16, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 18|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPERM), 0,
+ 1, MVT::v4i32, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 35|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSEL), 0,
+ 1, MVT::v4i32, 3, 0, 1, 2,
+ 15,
+ OPC_CheckInteger, 68|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDCUW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 69|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDSBS), 0,
+ 1, MVT::v16i8, 2, 0, 1,
15,
OPC_CheckInteger, 70|128,1,
OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPEQFP), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDSHS), 0,
+ 1, MVT::v8i16, 2, 0, 1,
15,
- OPC_CheckInteger, 70|128,3,
+ OPC_CheckInteger, 71|128,1,
OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGEFP), 0,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDSWS), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 72|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDUBS), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 73|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDUHS), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 74|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDUWS), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 75|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGSB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 76|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGSH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 77|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGSW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 78|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGUB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 79|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGUH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 80|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VAVGUW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 115|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXSB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 116|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXSH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 117|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXSW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 118|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXUB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 119|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXUH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 120|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXUW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 124|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINSB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 125|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINSH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 126|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINSW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 127|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINUB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 0|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINUH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 1|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINUW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 17,
+ OPC_CheckInteger, 3|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMMBM), 0,
+ 1, MVT::v4i32, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 4|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMSHM), 0,
+ 1, MVT::v4i32, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 5|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMSHS), 0,
+ 1, MVT::v4i32, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 6|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMUBM), 0,
+ 1, MVT::v4i32, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 7|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMUHM), 0,
+ 1, MVT::v4i32, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 8|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMSUMUHS), 0,
+ 1, MVT::v4i32, 3, 0, 1, 2,
+ 15,
+ OPC_CheckInteger, 9|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULESB), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 10|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULESH), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 11|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULEUB), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 12|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULEUH), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 13|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULOSB), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 14|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULOSH), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 15|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULOUB), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 16|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMULOUH), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 49|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBCUW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 50|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBSBS), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 51|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBSHS), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 52|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBSWS), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 53|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUBS), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 54|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUHS), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 55|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUBUWS), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 60|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUMSWS), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 56|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUM2SWS), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 57|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUM4SBS), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 58|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUM4SHS), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 59|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSUM4UBS), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 31|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRLB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 32|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRLH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 33|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRLW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 36|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSL), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 39|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLO), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 37|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 38|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 40|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 41|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSR), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 47|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRO), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 42|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 43|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 44|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 45|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRB), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 46|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRH), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 48|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRW), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 19|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKPX), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 20|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKSHSS), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 21|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKSHUS), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 22|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKSWSS), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 23|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKSWUS), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 24|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKUHUS), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 15,
+ OPC_CheckInteger, 25|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKUWUS), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 13,
+ OPC_CheckInteger, 61|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKHPX), 0,
+ 1, MVT::v4i32, 1, 0,
+ 13,
+ OPC_CheckInteger, 62|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKHSB), 0,
+ 1, MVT::v8i16, 1, 0,
+ 13,
+ OPC_CheckInteger, 63|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKHSH), 0,
+ 1, MVT::v4i32, 1, 0,
+ 13,
+ OPC_CheckInteger, 64|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKLPX), 0,
+ 1, MVT::v4i32, 1, 0,
+ 13,
+ OPC_CheckInteger, 65|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKLSB), 0,
+ 1, MVT::v8i16, 1, 0,
+ 13,
+ OPC_CheckInteger, 66|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VUPKLSH), 0,
+ 1, MVT::v4i32, 1, 0,
+ 23,
+ OPC_CheckInteger, 81|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCFSX), 0,
+ 1, MVT::v4f32, 2, 2, 0,
+ 23,
+ OPC_CheckInteger, 82|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCFUX), 0,
+ 1, MVT::v4f32, 2, 2, 0,
+ 13,
+ OPC_CheckInteger, 111|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VEXPTEFP), 0,
+ 1, MVT::v4f32, 1, 0,
+ 13,
+ OPC_CheckInteger, 112|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VLOGEFP), 0,
+ 1, MVT::v4f32, 1, 0,
+ 15,
+ OPC_CheckInteger, 114|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMAXFP), 0,
1, MVT::v4f32, 2, 0, 1,
15,
- OPC_CheckInteger, 70|128,5,
+ OPC_CheckInteger, 123|128,1,
OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTFP), 0,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMINFP), 0,
1, MVT::v4f32, 2, 0, 1,
- 14,
+ 13,
+ OPC_CheckInteger, 26|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VREFP), 0,
+ 1, MVT::v4f32, 1, 0,
+ 13,
+ OPC_CheckInteger, 27|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRFIM), 0,
+ 1, MVT::v4f32, 1, 0,
+ 13,
+ OPC_CheckInteger, 28|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRFIN), 0,
+ 1, MVT::v4f32, 1, 0,
+ 13,
+ OPC_CheckInteger, 29|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRFIP), 0,
+ 1, MVT::v4f32, 1, 0,
+ 13,
+ OPC_CheckInteger, 30|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRFIZ), 0,
+ 1, MVT::v4f32, 1, 0,
+ 13,
+ OPC_CheckInteger, 34|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VRSQRTEFP), 0,
+ 1, MVT::v4f32, 1, 0,
+ 17,
+ OPC_CheckInteger, 113|128,1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMADDFP), 0,
+ 1, MVT::v4f32, 3, 0, 1, 2,
+ 17,
+ OPC_CheckInteger, 17|128,2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNMSUBFP), 0,
+ 1, MVT::v4f32, 3, 0, 1, 2,
+ 0,
+ 40, TARGET_OPCODE(PPCISD::STCX),
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_Scope, 17,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::STWCX), 0|OPFL_Chain,
+ 1, MVT::i32, 3, 1, 3, 4,
+ 17,
+ OPC_CheckChild1Type, MVT::i64,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::STDCX), 0|OPFL_Chain,
+ 1, MVT::i32, 3, 1, 3, 4,
+ 0,
+ 41, TARGET_OPCODE(PPCISD::STBRX),
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 3,
+ OPC_Scope, 16,
+ OPC_CheckValueType, MVT::i16,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::STHBRX), 0|OPFL_Chain,
+ 0, 3, 1, 3, 4,
+ 16,
+ OPC_CheckValueType, MVT::i32,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::STWBRX), 0|OPFL_Chain,
+ 0, 3, 1, 3, 4,
+ 0,
+ 16, TARGET_OPCODE(PPCISD::STFIWX),
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::STFIWX), 0|OPFL_Chain,
+ 0, 3, 1, 3, 4,
+ 35, TARGET_OPCODE(PPCISD::STD_32),
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_RecordChild2,
+ OPC_Scope, 13,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::STD_32), 0|OPFL_Chain,
+ 0, 3, 1, 3, 4,
+ 13,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::STDX_32), 0|OPFL_Chain,
+ 0, 3, 1, 3, 4,
+ 0,
+ 26, TARGET_OPCODE(ISD::CALLSEQ_END),
+ OPC_RecordNode,
+ OPC_CaptureFlagInput,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetConstant),
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetConstant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADJCALLSTACKUP), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
+ 1, MVT::i32, 2, 1, 2,
+ 70|128,1, TARGET_OPCODE(PPCISD::TC_RETURN),
+ OPC_RecordNode,
+ OPC_CaptureFlagInput,
+ OPC_RecordChild1,
+ OPC_Scope, 21|128,1,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 48, TARGET_OPCODE(ISD::Constant),
+ OPC_SwitchType , 21, MVT::i32,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNai), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 0, 2, 3, 4,
+ 21, MVT::i64,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNai8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 0, 2, 3, 4,
+ 0,
+ 44, TARGET_OPCODE(ISD::TargetGlobalAddress),
+ OPC_SwitchType , 19, MVT::i32,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNdi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 0, 2, 1, 3,
+ 19, MVT::i64,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNdi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 0, 2, 1, 3,
+ 0,
+ 44, TARGET_OPCODE(ISD::TargetExternalSymbol),
+ OPC_SwitchType , 19, MVT::i32,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNdi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 0, 2, 1, 3,
+ 19, MVT::i64,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNdi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 0, 2, 1, 3,
+ 0,
+ 0,
+ 20,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNri), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 0, 2, 1, 3,
+ 20,
+ OPC_CheckChild1Type, MVT::i64,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::TCRETURNri8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 0, 2, 1, 3,
+ 0,
+ 84|128,1, TARGET_OPCODE(PPCISD::VCMP),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_MoveChild, 2,
+ OPC_Scope, 14,
OPC_CheckInteger, 6,
OPC_MoveParent,
OPC_CheckType, MVT::v16i8,
@@ -4072,229 +3951,179 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTUW), 0,
1, MVT::v4i32, 2, 0, 1,
- 0,
- 84|128,1, PPCISD::VCMPo,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_Scope, 15,
+ 15,
OPC_CheckInteger, 70|128,7,
OPC_MoveParent,
OPC_CheckType, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPBFPo), 0|OPFL_FlagOutput,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPBFP), 0,
1, MVT::v4f32, 2, 0, 1,
15,
OPC_CheckInteger, 70|128,1,
OPC_MoveParent,
OPC_CheckType, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPEQFPo), 0|OPFL_FlagOutput,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPEQFP), 0,
1, MVT::v4f32, 2, 0, 1,
15,
OPC_CheckInteger, 70|128,3,
OPC_MoveParent,
OPC_CheckType, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGEFPo), 0|OPFL_FlagOutput,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGEFP), 0,
1, MVT::v4f32, 2, 0, 1,
15,
OPC_CheckInteger, 70|128,5,
OPC_MoveParent,
OPC_CheckType, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTFPo), 0|OPFL_FlagOutput,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTFP), 0,
1, MVT::v4f32, 2, 0, 1,
- 14,
+ 0,
+ 97|128,1, TARGET_OPCODE(PPCISD::VCMPo),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_MoveChild, 2,
+ OPC_Scope, 15,
OPC_CheckInteger, 6,
OPC_MoveParent,
OPC_CheckType, MVT::v16i8,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPEQUBo), 0|OPFL_FlagOutput,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
+ 2, MVT::v16i8, MVT::i32, 2, 0, 1,
+ 16,
OPC_CheckInteger, 6|128,6,
OPC_MoveParent,
OPC_CheckType, MVT::v16i8,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTSBo), 0|OPFL_FlagOutput,
- 1, MVT::v16i8, 2, 0, 1,
- 15,
+ 2, MVT::v16i8, MVT::i32, 2, 0, 1,
+ 16,
OPC_CheckInteger, 6|128,4,
OPC_MoveParent,
OPC_CheckType, MVT::v16i8,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTUBo), 0|OPFL_FlagOutput,
- 1, MVT::v16i8, 2, 0, 1,
- 14,
+ 2, MVT::v16i8, MVT::i32, 2, 0, 1,
+ 15,
OPC_CheckInteger, 70,
OPC_MoveParent,
OPC_CheckType, MVT::v8i16,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPEQUHo), 0|OPFL_FlagOutput,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
+ 2, MVT::v8i16, MVT::i32, 2, 0, 1,
+ 16,
OPC_CheckInteger, 70|128,6,
OPC_MoveParent,
OPC_CheckType, MVT::v8i16,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTSHo), 0|OPFL_FlagOutput,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
+ 2, MVT::v8i16, MVT::i32, 2, 0, 1,
+ 16,
OPC_CheckInteger, 70|128,4,
OPC_MoveParent,
OPC_CheckType, MVT::v8i16,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTUHo), 0|OPFL_FlagOutput,
- 1, MVT::v8i16, 2, 0, 1,
- 15,
+ 2, MVT::v8i16, MVT::i32, 2, 0, 1,
+ 16,
OPC_CheckInteger, 6|128,1,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPEQUWo), 0|OPFL_FlagOutput,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
+ 2, MVT::v4i32, MVT::i32, 2, 0, 1,
+ 16,
OPC_CheckInteger, 6|128,7,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTSWo), 0|OPFL_FlagOutput,
- 1, MVT::v4i32, 2, 0, 1,
- 15,
+ 2, MVT::v4i32, MVT::i32, 2, 0, 1,
+ 16,
OPC_CheckInteger, 6|128,5,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTUWo), 0|OPFL_FlagOutput,
- 1, MVT::v4i32, 2, 0, 1,
- 0,
- 64, ISD::ADDC,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 0,
+ 2, MVT::v4i32, MVT::i32, 2, 0, 1,
+ 16,
+ OPC_CheckInteger, 70|128,7,
OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIC), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDIC8), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 11,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDC), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADDC8), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 49, ISD::MUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 20,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 0,
+ OPC_CheckType, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPBFPo), 0|OPFL_FlagOutput,
+ 2, MVT::v4f32, MVT::i32, 2, 0, 1,
+ 16,
+ OPC_CheckInteger, 70|128,1,
OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULLI), 0,
- 1, MVT::i32, 2, 0, 2,
- 11,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULLW), 0,
- 1, MVT::i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULLD), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 66, ISD::SUBC,
- OPC_RecordChild0,
- OPC_Scope, 36,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 0,
+ OPC_CheckType, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPEQFPo), 0|OPFL_FlagOutput,
+ 2, MVT::v4f32, MVT::i32, 2, 0, 1,
+ 16,
+ OPC_CheckInteger, 70|128,3,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 11, MVT::i32,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFIC), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 1, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFIC8), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 1, 2,
- 0,
- 25,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFC), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 1, 0,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SUBFC8), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 1, 0,
- 0,
+ OPC_CheckType, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGEFPo), 0|OPFL_FlagOutput,
+ 2, MVT::v4f32, MVT::i32, 2, 0, 1,
+ 16,
+ OPC_CheckInteger, 70|128,5,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VCMPGTFPo), 0|OPFL_FlagOutput,
+ 2, MVT::v4f32, MVT::i32, 2, 0, 1,
0,
- 50|128,2, ISD::VECTOR_SHUFFLE,
- OPC_Scope, 72,
+ 52|128,2, TARGET_OPCODE(ISD::VECTOR_SHUFFLE),
+ OPC_Scope, 73,
OPC_RecordNode,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
OPC_CheckType, MVT::v16i8,
OPC_Scope, 14,
- OPC_CheckPredicate, 68,
- OPC_EmitNodeXForm, 5, 0,
+ OPC_CheckPredicate, 67,
+ OPC_EmitNodeXForm, 9, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSPLTB), 0,
1, MVT::v16i8, 2, 2, 1,
14,
- OPC_CheckPredicate, 69,
- OPC_EmitNodeXForm, 6, 0,
+ OPC_CheckPredicate, 68,
+ OPC_EmitNodeXForm, 10, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSPLTH), 0,
1, MVT::v16i8, 2, 2, 1,
14,
- OPC_CheckPredicate, 70,
- OPC_EmitNodeXForm, 7, 0,
+ OPC_CheckPredicate, 69,
+ OPC_EmitNodeXForm, 11, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSPLTW), 0,
1, MVT::v16i8, 2, 2, 1,
15,
- OPC_CheckPredicate, 71,
- OPC_EmitNodeXForm, 8, 0,
+ OPC_CheckPredicate, 70,
+ OPC_EmitNodeXForm, 12, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLDOI), 0,
1, MVT::v16i8, 3, 1, 1, 2,
0,
- 106,
+ 107,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
OPC_CheckType, MVT::v16i8,
OPC_Scope, 11,
- OPC_CheckPredicate, 72,
+ OPC_CheckPredicate, 71,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKUWUM), 0,
1, MVT::v16i8, 2, 0, 0,
11,
- OPC_CheckPredicate, 73,
+ OPC_CheckPredicate, 72,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKUHUM), 0,
1, MVT::v16i8, 2, 0, 0,
11,
- OPC_CheckPredicate, 74,
+ OPC_CheckPredicate, 73,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGLB), 0,
1, MVT::v16i8, 2, 0, 0,
11,
- OPC_CheckPredicate, 75,
+ OPC_CheckPredicate, 74,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGLH), 0,
1, MVT::v16i8, 2, 0, 0,
11,
- OPC_CheckPredicate, 76,
+ OPC_CheckPredicate, 75,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGLW), 0,
1, MVT::v16i8, 2, 0, 0,
11,
- OPC_CheckPredicate, 77,
+ OPC_CheckPredicate, 76,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGHB), 0,
1, MVT::v16i8, 2, 0, 0,
11,
- OPC_CheckPredicate, 78,
+ OPC_CheckPredicate, 77,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGHH), 0,
1, MVT::v16i8, 2, 0, 0,
11,
- OPC_CheckPredicate, 79,
+ OPC_CheckPredicate, 78,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGHW), 0,
1, MVT::v16i8, 2, 0, 0,
0,
@@ -4302,9 +4131,9 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordNode,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_CheckPredicate, 80,
+ OPC_CheckPredicate, 79,
OPC_CheckType, MVT::v16i8,
- OPC_EmitNodeXForm, 9, 0,
+ OPC_EmitNodeXForm, 13, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLDOI), 0,
1, MVT::v16i8, 3, 1, 2, 3,
102,
@@ -4312,466 +4141,182 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild1,
OPC_CheckType, MVT::v16i8,
OPC_Scope, 11,
- OPC_CheckPredicate, 81,
+ OPC_CheckPredicate, 80,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGHB), 0,
1, MVT::v16i8, 2, 0, 1,
11,
- OPC_CheckPredicate, 82,
+ OPC_CheckPredicate, 81,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGHH), 0,
1, MVT::v16i8, 2, 0, 1,
11,
- OPC_CheckPredicate, 83,
+ OPC_CheckPredicate, 82,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGHW), 0,
1, MVT::v16i8, 2, 0, 1,
11,
- OPC_CheckPredicate, 84,
+ OPC_CheckPredicate, 83,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGLB), 0,
1, MVT::v16i8, 2, 0, 1,
11,
- OPC_CheckPredicate, 85,
+ OPC_CheckPredicate, 84,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGLH), 0,
1, MVT::v16i8, 2, 0, 1,
11,
- OPC_CheckPredicate, 86,
+ OPC_CheckPredicate, 85,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMRGLW), 0,
1, MVT::v16i8, 2, 0, 1,
11,
- OPC_CheckPredicate, 87,
+ OPC_CheckPredicate, 86,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKUHUM), 0,
1, MVT::v16i8, 2, 0, 1,
11,
- OPC_CheckPredicate, 88,
+ OPC_CheckPredicate, 87,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPKUWUM), 0,
1, MVT::v16i8, 2, 0, 1,
0,
0,
- 17, ISD::CALLSEQ_START,
+ 17, TARGET_OPCODE(ISD::CALLSEQ_START),
OPC_RecordNode,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TargetConstant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetConstant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::ADJCALLSTACKDOWN), 0|OPFL_Chain|OPFL_FlagOutput,
- 0, 1, 1,
- 101, PPCISD::CALL_Darwin,
+ 1, MVT::i32, 1, 1,
+ 98, TARGET_OPCODE(PPCISD::CALL_Darwin),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_SwitchOpcode , 32, ISD::Constant,
- OPC_SwitchType , 13, MVT::i32,
+ OPC_SwitchOpcode , 30, TARGET_OPCODE(ISD::Constant),
+ OPC_SwitchType , 12, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BLA_Darwin), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 2,
- 13, MVT::i64,
+ 1, MVT::i32, 1, 2,
+ 12, MVT::i64,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BLA8_Darwin), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 2,
+ 1, MVT::i64, 1, 2,
0,
- 28, ISD::TargetGlobalAddress,
- OPC_SwitchType , 11, MVT::i32,
+ 26, TARGET_OPCODE(ISD::TargetGlobalAddress),
+ OPC_SwitchType , 10, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BL_Darwin), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 11, MVT::i64,
+ 1, MVT::i32, 1, 1,
+ 10, MVT::i64,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BL8_Darwin), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
+ 1, MVT::i64, 1, 1,
0,
- 28, ISD::TargetExternalSymbol,
- OPC_SwitchType , 11, MVT::i32,
+ 26, TARGET_OPCODE(ISD::TargetExternalSymbol),
+ OPC_SwitchType , 10, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BL_Darwin), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 11, MVT::i64,
+ 1, MVT::i32, 1, 1,
+ 10, MVT::i64,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BL8_Darwin), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
+ 1, MVT::i64, 1, 1,
0,
0,
- 101, PPCISD::CALL_SVR4,
+ 98, TARGET_OPCODE(PPCISD::CALL_SVR4),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_SwitchOpcode , 32, ISD::Constant,
- OPC_SwitchType , 13, MVT::i32,
+ OPC_SwitchOpcode , 30, TARGET_OPCODE(ISD::Constant),
+ OPC_SwitchType , 12, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BLA_SVR4), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 2,
- 13, MVT::i64,
+ 1, MVT::i32, 1, 2,
+ 12, MVT::i64,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BLA8_ELF), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 2,
+ 1, MVT::i64, 1, 2,
0,
- 28, ISD::TargetGlobalAddress,
- OPC_SwitchType , 11, MVT::i32,
+ 26, TARGET_OPCODE(ISD::TargetGlobalAddress),
+ OPC_SwitchType , 10, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BL_SVR4), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 11, MVT::i64,
+ 1, MVT::i32, 1, 1,
+ 10, MVT::i64,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BL8_ELF), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
+ 1, MVT::i64, 1, 1,
0,
- 28, ISD::TargetExternalSymbol,
- OPC_SwitchType , 11, MVT::i32,
+ 26, TARGET_OPCODE(ISD::TargetExternalSymbol),
+ OPC_SwitchType , 10, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BL_SVR4), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 11, MVT::i64,
+ 1, MVT::i32, 1, 1,
+ 10, MVT::i64,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BL8_ELF), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 0,
- 109, ISD::SRA,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAWI), 0,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRADI), 0,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 26,
- OPC_CheckChild1Type, MVT::i32,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAW), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAD), 0,
- 1, MVT::i64, 2, 0, 1,
+ 1, MVT::i64, 1, 1,
0,
- 13,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckType, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 13,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckType, MVT::v8i16,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 13,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRAW), 0,
- 1, MVT::v4i32, 2, 0, 1,
0,
- 18, PPCISD::MTFSB0,
+ 19, TARGET_OPCODE(PPCISD::MTFSB0),
OPC_CaptureFlagInput,
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_EmitConvertToTarget, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::MTFSB0), 0|OPFL_FlagInput|OPFL_FlagOutput,
0, 1, 1,
- 18, PPCISD::MTFSB1,
+ 19, TARGET_OPCODE(PPCISD::MTFSB1),
OPC_CaptureFlagInput,
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_EmitConvertToTarget, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::MTFSB1), 0|OPFL_FlagInput|OPFL_FlagOutput,
0, 1, 1,
- 23, PPCISD::MTFSF,
- OPC_CaptureFlagInput,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MTFSF), 0|OPFL_FlagInput,
- 1, MVT::f64, 3, 3, 1, 2,
- 20|128,1, ISD::FADD,
- OPC_Scope, 52,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 12, MVT::f64,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMADD), 0,
- 1, MVT::f64, 3, 0, 1, 2,
- 12, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMADDS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 12, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMADDFP), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 92,
- OPC_RecordChild0,
- OPC_Scope, 51,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::f64,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMADD), 0,
- 1, MVT::f64, 3, 1, 2, 0,
- 12, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMADDS), 0,
- 1, MVT::f32, 3, 1, 2, 0,
- 12, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMADDFP), 0,
- 1, MVT::v4f32, 3, 1, 2, 0,
- 0,
- 36,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FADD), 0,
- 1, MVT::f64, 2, 0, 1,
- 9, MVT::f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FADDS), 0,
- 1, MVT::f32, 2, 0, 1,
- 9, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VADDFP), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 0,
- 0,
- 0,
- 125, ISD::SHL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 51,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 10, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLWINM), 0,
- 1, MVT::i32, 4, 0, 2, 3, 5,
- 17, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 11, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDICR), 0,
- 1, MVT::i64, 3, 0, 2, 4,
- 0,
- 26,
- OPC_CheckChild1Type, MVT::i32,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SLW), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SLD), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 13,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckType, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 13,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckType, MVT::v8i16,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 13,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSLW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 0,
- 125, ISD::SRL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 51,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 12, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 31,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLWINM), 0,
- 1, MVT::i32, 4, 0, 3, 4, 5,
- 17, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 13, 2,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDICL), 0,
- 1, MVT::i64, 3, 0, 3, 4,
- 0,
- 26,
- OPC_CheckChild1Type, MVT::i32,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRW), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRD), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 13,
- OPC_CheckChild1Type, MVT::v16i8,
- OPC_CheckType, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRB), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 13,
- OPC_CheckChild1Type, MVT::v8i16,
- OPC_CheckType, MVT::v8i16,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRH), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 13,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_CheckType, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSRW), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 0,
- 91, ISD::ROTL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 47,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_SwitchType , 19, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 31,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLWINM), 0,
- 1, MVT::i32, 4, 0, 2, 3, 4,
- 15, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDICL), 0,
- 1, MVT::i64, 3, 0, 2, 3,
- 0,
- 38,
- OPC_CheckChild1Type, MVT::i32,
- OPC_SwitchType , 17, MVT::i32,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 31,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLWNM), 0,
- 1, MVT::i32, 4, 0, 1, 2, 3,
- 13, MVT::i64,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDCL), 0,
- 1, MVT::i64, 3, 0, 1, 2,
- 0,
- 0,
- 18, PPCISD::TOC_ENTRY,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::TargetGlobalAddress,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LDtoc), 0,
- 1, MVT::i64, 2, 0, 1,
- 97, ISD::Constant,
- OPC_RecordNode,
- OPC_SwitchType , 59, MVT::i32,
- OPC_Scope, 12,
- OPC_CheckPredicate, 0,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LI), 0,
- 1, MVT::i32, 1, 1,
- 15,
- OPC_CheckPredicate, 1,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitNodeXForm, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LIS), 0,
- 1, MVT::i32, 1, 2,
- 27,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitNodeXForm, 0, 1,
- OPC_EmitNode, TARGET_OPCODE(PPC::LIS), 0,
- 1, MVT::i32, 1, 2,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitNodeXForm, 1, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::ORI), 0,
- 1, MVT::i32, 2, 3, 5,
- 0,
- 31, MVT::i64,
- OPC_Scope, 12,
- OPC_CheckPredicate, 0,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LI8), 0,
- 1, MVT::i64, 1, 1,
- 15,
- OPC_CheckPredicate, 1,
- OPC_EmitConvertToTarget, 0,
- OPC_EmitNodeXForm, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::LIS8), 0,
- 1, MVT::i64, 1, 2,
- 0,
- 0,
- 63, ISD::BUILD_VECTOR,
+ 63, TARGET_OPCODE(ISD::BUILD_VECTOR),
OPC_Scope, 48,
OPC_RecordNode,
OPC_SwitchType , 13, MVT::v16i8,
- OPC_CheckPredicate, 89,
+ OPC_CheckPredicate, 88,
OPC_EmitNodeXForm, 14, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSPLTISB), 0,
1, MVT::v16i8, 1, 1,
13, MVT::v8i16,
- OPC_CheckPredicate, 90,
+ OPC_CheckPredicate, 89,
OPC_EmitNodeXForm, 15, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSPLTISH), 0,
1, MVT::v8i16, 1, 1,
13, MVT::v4i32,
- OPC_CheckPredicate, 91,
+ OPC_CheckPredicate, 90,
OPC_EmitNodeXForm, 16, 0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VSPLTISW), 0,
1, MVT::v4i32, 1, 1,
0,
11,
- OPC_CheckPredicate, 92,
+ OPC_CheckPredicate, 91,
OPC_CheckType, MVT::v4i32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::V_SET0), 0,
1, MVT::v4i32, 0,
0,
- 83, ISD::BIT_CONVERT,
+ 83, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_RecordChild0,
OPC_Scope, 19,
OPC_CheckChild0Type, MVT::v8i16,
@@ -4822,349 +4367,113 @@ SDNode *SelectCode(SDNode *N) {
0,
0,
- 19, PPCISD::RET_FLAG,
+ 17, TARGET_OPCODE(PPCISD::RET_FLAG),
OPC_RecordNode,
OPC_CaptureFlagInput,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, 20,
OPC_EmitRegister, MVT::i32, 0 ,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::BLR), 0|OPFL_Chain|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::BLR), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic0,
0, 2, 1, 2,
- 17, ISD::BR,
+ 16, TARGET_OPCODE(ISD::BR),
OPC_RecordNode,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BasicBlock,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BasicBlock),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::B), 0|OPFL_Chain,
0, 1, 1,
- 28, PPCISD::BCTRL_Darwin,
+ 26, TARGET_OPCODE(PPCISD::BCTRL_Darwin),
OPC_RecordNode,
OPC_CaptureFlagInput,
- OPC_Scope, 11,
+ OPC_Scope, 10,
OPC_CheckPatternPredicate, 1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BCTRL_Darwin), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic0,
- 0, 0,
- 11,
+ 1, MVT::i32, 0,
+ 10,
OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BCTRL8_Darwin), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic0,
- 0, 0,
+ 1, MVT::i64, 0,
0,
- 28, PPCISD::BCTRL_SVR4,
+ 26, TARGET_OPCODE(PPCISD::BCTRL_SVR4),
OPC_RecordNode,
OPC_CaptureFlagInput,
- OPC_Scope, 11,
+ OPC_Scope, 10,
OPC_CheckPatternPredicate, 1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BCTRL_SVR4), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic0,
- 0, 0,
- 11,
+ 1, MVT::i32, 0,
+ 10,
OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::BCTRL8_ELF), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic0,
- 0, 0,
+ 1, MVT::i64, 0,
0,
- 10, ISD::TRAP,
+ 8, TARGET_OPCODE(ISD::TRAP),
OPC_RecordNode,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::TRAP), 0|OPFL_Chain,
0, 0,
- 28, PPCISD::SHL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SLW), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SLD), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 28, PPCISD::SRL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRW), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRD), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 28, PPCISD::SRA,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i32,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAW), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::SRAD), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 23, ISD::CTLZ,
- OPC_RecordChild0,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::CNTLZW), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::CNTLZD), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 71, ISD::SIGN_EXTEND_INREG,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_Scope, 25,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSB), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSB8), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 25,
- OPC_CheckValueType, MVT::i16,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSH), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSH8), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 13,
- OPC_CheckValueType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSW), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 11, PPCISD::FCTIWZ,
- OPC_RecordChild0,
- OPC_CheckType, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FCTIWZ), 0,
- 1, MVT::f64, 1, 0,
- 13, ISD::FP_ROUND,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckType, MVT::f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FRSP), 0,
- 1, MVT::f32, 1, 0,
- 23, ISD::FSQRT,
- OPC_RecordChild0,
- OPC_SwitchType , 8, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSQRT), 0,
- 1, MVT::f64, 1, 0,
- 8, MVT::f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSQRTS), 0,
- 1, MVT::f32, 1, 0,
- 0,
- 13, ISD::FP_EXTEND,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::f32,
- OPC_CheckType, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMRSD), 0,
- 1, MVT::f64, 1, 0,
- 23, ISD::FABS,
- OPC_RecordChild0,
- OPC_SwitchType , 8, MVT::f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FABSS), 0,
- 1, MVT::f32, 1, 0,
- 8, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FABSD), 0,
- 1, MVT::f64, 1, 0,
- 0,
- 31, PPCISD::MTCTR,
+ 29, TARGET_OPCODE(PPCISD::MTCTR),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_RecordChild1,
- OPC_Scope, 12,
+ OPC_Scope, 11,
OPC_CheckChild1Type, MVT::i32,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::MTCTR), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 1, 1,
- 12,
+ 1, MVT::i32, 1, 1,
+ 11,
OPC_CheckChild1Type, MVT::i64,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::MTCTR8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 1, 1,
- 0,
- 7, PPCISD::MFFS,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MFFS), 0|OPFL_FlagOutput,
- 1, MVT::f64, 0,
- 14, PPCISD::FADDRTZ,
- OPC_CaptureFlagInput,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_CheckType, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FADDrtz), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::f64, 2, 0, 1,
- 26, ISD::SDIV,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::DIVW), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::DIVD), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 26, ISD::UDIV,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::DIVWU), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::DIVDU), 0,
- 1, MVT::i64, 2, 0, 1,
+ 1, MVT::i64, 1, 1,
0,
- 26, ISD::MULHS,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULHW), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULHD), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 26, ISD::MULHU,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULHWU), 0,
- 1, MVT::i32, 2, 0, 1,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::MULHDU), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 29, PPCISD::FSEL,
+ 15, TARGET_OPCODE(PPCISD::VPERM),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_SwitchType , 10, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSELD), 0,
- 1, MVT::f64, 3, 0, 1, 2,
- 10, MVT::f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FSELS), 0,
- 1, MVT::f32, 3, 0, 1, 2,
- 0,
- 26, ISD::FDIV,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FDIV), 0,
- 1, MVT::f64, 2, 0, 1,
- 9, MVT::f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FDIVS), 0,
- 1, MVT::f32, 2, 0, 1,
- 0,
- 45, ISD::FMUL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMUL), 0,
- 1, MVT::f64, 2, 0, 1,
- 9, MVT::f32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FMULS), 0,
- 1, MVT::f32, 2, 0, 1,
- 17, MVT::v4f32,
- OPC_EmitNode, TARGET_OPCODE(PPC::V_SET0), 0,
- 1, MVT::v4i32, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMADDFP), 0,
- 1, MVT::v4f32, 3, 0, 1, 2,
- 0,
- 15, PPCISD::VMADDFP,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPERM), 0,
+ 1, MVT::v16i8, 3, 0, 1, 2,
+ 15, TARGET_OPCODE(PPCISD::VMADDFP),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_CheckType, MVT::v4f32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VMADDFP), 0,
1, MVT::v4f32, 3, 0, 1, 2,
- 15, PPCISD::VNMSUBFP,
+ 15, TARGET_OPCODE(PPCISD::VNMSUBFP),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_CheckType, MVT::v4f32,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::VNMSUBFP), 0,
1, MVT::v4f32, 3, 0, 1, 2,
- 15, PPCISD::VPERM,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckType, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::VPERM), 0,
- 1, MVT::v16i8, 3, 0, 1, 2,
- 7, PPCISD::NOP,
+ 7, TARGET_OPCODE(PPCISD::NOP),
OPC_CaptureFlagInput,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::NOP), 0|OPFL_FlagInput|OPFL_FlagOutput,
0, 0,
- 11, PPCISD::EXTSW_32,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSW_32), 0,
- 1, MVT::i32, 1, 0,
- 13, ISD::SIGN_EXTEND,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::EXTSW_32_64), 0,
- 1, MVT::i64, 1, 0,
- 15, PPCISD::LOAD_TOC,
+ 13, TARGET_OPCODE(PPCISD::LOAD_TOC),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i64,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LDinto_toc), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
0, 1, 1,
- 11, PPCISD::TOC_RESTORE,
+ 9, TARGET_OPCODE(PPCISD::TOC_RESTORE),
OPC_RecordNode,
OPC_CaptureFlagInput,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(PPC::LDtoc_restore), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
0, 0,
- 11, PPCISD::FCFID,
- OPC_RecordChild0,
- OPC_CheckType, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FCFID), 0,
- 1, MVT::f64, 1, 0,
- 11, PPCISD::FCTIDZ,
- OPC_RecordChild0,
- OPC_CheckType, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::FCTIDZ), 0,
- 1, MVT::f64, 1, 0,
- 14, ISD::ANY_EXTEND,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::OR4To8), 0,
- 1, MVT::i64, 2, 0, 0,
- 14, ISD::TRUNCATE,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i64,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::OR8To4), 0,
- 1, MVT::i32, 2, 0, 0,
- 30, ISD::ZERO_EXTEND,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::i64,
- OPC_EmitNode, TARGET_OPCODE(PPC::OR4To8), 0,
- 1, MVT::i64, 2, 0, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitInteger, MVT::i32, 32,
- OPC_MorphNodeTo, TARGET_OPCODE(PPC::RLDICL), 0,
- 1, MVT::i64, 3, 1, 2, 3,
0,
0
- }; // Total Array size is 11066 bytes
+ }; // Total Array size is 10997 bytes
#undef TARGET_OPCODE
return SelectCodeCommon(N, MatcherTable,sizeof(MatcherTable));
@@ -5376,217 +4685,199 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 30: { // Predicate_V_immneg0
- SDNode *N = Node;
-
- return PPC::isAllNegativeZeroVector(N);
-
- }
- case 31: { // Predicate_atomic_load_add_8
+ case 30: { // Predicate_atomic_load_add_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 32: { // Predicate_atomic_load_add_16
+ case 31: { // Predicate_atomic_load_add_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 33: { // Predicate_atomic_load_add_32
+ case 32: { // Predicate_atomic_load_add_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 34: { // Predicate_atomic_load_add_64
+ case 33: { // Predicate_atomic_load_add_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 35: { // Predicate_atomic_load_sub_8
+ case 34: { // Predicate_atomic_load_sub_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 36: { // Predicate_atomic_load_sub_16
+ case 35: { // Predicate_atomic_load_sub_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 37: { // Predicate_atomic_load_sub_32
+ case 36: { // Predicate_atomic_load_sub_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 38: { // Predicate_atomic_load_sub_64
+ case 37: { // Predicate_atomic_load_sub_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 39: { // Predicate_atomic_load_and_8
+ case 38: { // Predicate_atomic_load_and_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 40: { // Predicate_atomic_load_and_16
+ case 39: { // Predicate_atomic_load_and_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 41: { // Predicate_atomic_load_and_32
+ case 40: { // Predicate_atomic_load_and_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 42: { // Predicate_atomic_load_and_64
+ case 41: { // Predicate_atomic_load_and_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 43: { // Predicate_atomic_load_or_8
+ case 42: { // Predicate_atomic_load_or_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 44: { // Predicate_atomic_load_or_16
+ case 43: { // Predicate_atomic_load_or_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 45: { // Predicate_atomic_load_or_32
+ case 44: { // Predicate_atomic_load_or_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 46: { // Predicate_atomic_load_or_64
+ case 45: { // Predicate_atomic_load_or_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 47: { // Predicate_atomic_load_xor_8
+ case 46: { // Predicate_atomic_load_xor_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 48: { // Predicate_atomic_load_xor_16
+ case 47: { // Predicate_atomic_load_xor_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 49: { // Predicate_atomic_load_xor_32
+ case 48: { // Predicate_atomic_load_xor_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 50: { // Predicate_atomic_load_xor_64
+ case 49: { // Predicate_atomic_load_xor_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 51: { // Predicate_atomic_load_nand_8
+ case 50: { // Predicate_atomic_load_nand_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 52: { // Predicate_atomic_load_nand_16
+ case 51: { // Predicate_atomic_load_nand_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 53: { // Predicate_atomic_load_nand_32
+ case 52: { // Predicate_atomic_load_nand_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 54: { // Predicate_atomic_load_nand_64
+ case 53: { // Predicate_atomic_load_nand_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 55: { // Predicate_atomic_cmp_swap_8
+ case 54: { // Predicate_atomic_cmp_swap_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 56: { // Predicate_atomic_cmp_swap_16
+ case 55: { // Predicate_atomic_cmp_swap_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 57: { // Predicate_atomic_cmp_swap_32
+ case 56: { // Predicate_atomic_cmp_swap_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 58: { // Predicate_atomic_cmp_swap_64
+ case 57: { // Predicate_atomic_cmp_swap_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 59: { // Predicate_atomic_swap_8
+ case 58: { // Predicate_atomic_swap_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 60: { // Predicate_atomic_swap_16
+ case 59: { // Predicate_atomic_swap_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 61: { // Predicate_atomic_swap_32
+ case 60: { // Predicate_atomic_swap_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 62: { // Predicate_atomic_swap_64
+ case 61: { // Predicate_atomic_swap_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 63: { // Predicate_immAllOnesV
- SDNode *N = Node;
-
- return ISD::isBuildVectorAllOnes(N);
-
- }
- case 64: { // Predicate_immAllOnesV_bc
- SDNode *N = Node;
-
- return ISD::isBuildVectorAllOnes(N);
-
- }
- case 65: { // Predicate_immZExt16
+ case 62: { // Predicate_immZExt16
ConstantSDNode*N = cast<ConstantSDNode>(Node);
// immZExt16 predicate - True if the immediate fits in a 16-bit zero extended
@@ -5594,7 +4885,7 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
return (uint64_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
}
- case 66: { // Predicate_imm16ShiftedZExt
+ case 63: { // Predicate_imm16ShiftedZExt
ConstantSDNode*N = cast<ConstantSDNode>(Node);
// imm16ShiftedZExt predicate - True if only bits in the top 16-bits of the
@@ -5602,7 +4893,13 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
return (N->getZExtValue() & ~uint64_t(0xFFFF0000)) == 0;
}
- case 67: { // Predicate_maskimm32
+ case 64: { // Predicate_immAllOnesV
+ SDNode *N = Node;
+
+ return ISD::isBuildVectorAllOnes(N);
+
+ }
+ case 65: { // Predicate_maskimm32
ConstantSDNode*N = cast<ConstantSDNode>(Node);
// maskImm predicate - True if immediate is a run of ones.
@@ -5613,151 +4910,157 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
return false;
}
- case 68: { // Predicate_vspltb_shuffle
+ case 66: { // Predicate_V_immneg0
+ SDNode *N = Node;
+
+ return PPC::isAllNegativeZeroVector(N);
+
+ }
+ case 67: { // Predicate_vspltb_shuffle
SDNode *N = Node;
return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 1);
}
- case 69: { // Predicate_vsplth_shuffle
+ case 68: { // Predicate_vsplth_shuffle
SDNode *N = Node;
return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 2);
}
- case 70: { // Predicate_vspltw_shuffle
+ case 69: { // Predicate_vspltw_shuffle
SDNode *N = Node;
return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 4);
}
- case 71: { // Predicate_vsldoi_unary_shuffle
+ case 70: { // Predicate_vsldoi_unary_shuffle
SDNode *N = Node;
return PPC::isVSLDOIShuffleMask(N, true) != -1;
}
- case 72: { // Predicate_vpkuwum_unary_shuffle
+ case 71: { // Predicate_vpkuwum_unary_shuffle
SDNode *N = Node;
return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), true);
}
- case 73: { // Predicate_vpkuhum_unary_shuffle
+ case 72: { // Predicate_vpkuhum_unary_shuffle
SDNode *N = Node;
return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), true);
}
- case 74: { // Predicate_vmrglb_unary_shuffle
+ case 73: { // Predicate_vmrglb_unary_shuffle
SDNode *N = Node;
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, true);
}
- case 75: { // Predicate_vmrglh_unary_shuffle
+ case 74: { // Predicate_vmrglh_unary_shuffle
SDNode *N = Node;
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, true);
}
- case 76: { // Predicate_vmrglw_unary_shuffle
+ case 75: { // Predicate_vmrglw_unary_shuffle
SDNode *N = Node;
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, true);
}
- case 77: { // Predicate_vmrghb_unary_shuffle
+ case 76: { // Predicate_vmrghb_unary_shuffle
SDNode *N = Node;
return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, true);
}
- case 78: { // Predicate_vmrghh_unary_shuffle
+ case 77: { // Predicate_vmrghh_unary_shuffle
SDNode *N = Node;
return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, true);
}
- case 79: { // Predicate_vmrghw_unary_shuffle
+ case 78: { // Predicate_vmrghw_unary_shuffle
SDNode *N = Node;
return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, true);
}
- case 80: { // Predicate_vsldoi_shuffle
+ case 79: { // Predicate_vsldoi_shuffle
SDNode *N = Node;
return PPC::isVSLDOIShuffleMask(N, false) != -1;
}
- case 81: { // Predicate_vmrghb_shuffle
+ case 80: { // Predicate_vmrghb_shuffle
SDNode *N = Node;
return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
}
- case 82: { // Predicate_vmrghh_shuffle
+ case 81: { // Predicate_vmrghh_shuffle
SDNode *N = Node;
return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
}
- case 83: { // Predicate_vmrghw_shuffle
+ case 82: { // Predicate_vmrghw_shuffle
SDNode *N = Node;
return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
}
- case 84: { // Predicate_vmrglb_shuffle
+ case 83: { // Predicate_vmrglb_shuffle
SDNode *N = Node;
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
}
- case 85: { // Predicate_vmrglh_shuffle
+ case 84: { // Predicate_vmrglh_shuffle
SDNode *N = Node;
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
}
- case 86: { // Predicate_vmrglw_shuffle
+ case 85: { // Predicate_vmrglw_shuffle
SDNode *N = Node;
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
}
- case 87: { // Predicate_vpkuhum_shuffle
+ case 86: { // Predicate_vpkuhum_shuffle
SDNode *N = Node;
return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), false);
}
- case 88: { // Predicate_vpkuwum_shuffle
+ case 87: { // Predicate_vpkuwum_shuffle
SDNode *N = Node;
return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), false);
}
- case 89: { // Predicate_vecspltisb
+ case 88: { // Predicate_vecspltisb
SDNode *N = Node;
return PPC::get_VSPLTI_elt(N, 1, *CurDAG).getNode() != 0;
}
- case 90: { // Predicate_vecspltish
+ case 89: { // Predicate_vecspltish
SDNode *N = Node;
return PPC::get_VSPLTI_elt(N, 2, *CurDAG).getNode() != 0;
}
- case 91: { // Predicate_vecspltisw
+ case 90: { // Predicate_vecspltisw
SDNode *N = Node;
return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != 0;
}
- case 92: { // Predicate_immAllZerosV
+ case 91: { // Predicate_immAllZerosV
SDNode *N = Node;
return ISD::isBuildVectorAllZeros(N);
@@ -5768,23 +5071,24 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
bool CheckComplexPattern(SDNode *Root, SDValue N,
unsigned PatternNo, SmallVectorImpl<SDValue> &Result) {
+ unsigned NextRes = Result.size();
switch (PatternNo) {
default: assert(0 && "Invalid pattern # in table?");
case 0:
- Result.resize(Result.size()+2);
- return SelectAddrIdxOnly(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+2);
+ return SelectAddrIdxOnly(Root, N, Result[NextRes+0], Result[NextRes+1]);
case 1:
- Result.resize(Result.size()+2);
- return SelectAddrImm(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+2);
+ return SelectAddrImm(Root, N, Result[NextRes+0], Result[NextRes+1]);
case 2:
- Result.resize(Result.size()+2);
- return SelectAddrIdx(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+2);
+ return SelectAddrIdx(Root, N, Result[NextRes+0], Result[NextRes+1]);
case 3:
- Result.resize(Result.size()+2);
- return SelectAddrImmShift(Root, N, Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+2);
+ return SelectAddrImmShift(Root, N, Result[NextRes+0], Result[NextRes+1]);
case 4:
- Result.resize(Result.size()+1);
- return SelectAddrImmOffs(Root, N, Result[Result.size()-1]);
+ Result.resize(NextRes+1);
+ return SelectAddrImmOffs(Root, N, Result[NextRes+0]);
}
}
@@ -5832,61 +5136,61 @@ SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
}
case 5: {
- SDNode *N = V.getNode();
+ ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
- return getI32Imm(PPC::getVSPLTImmediate(N, 1));
+ // Transformation function: 31 - imm
+ return getI32Imm(31 - N->getZExtValue());
}
case 6: {
- SDNode *N = V.getNode();
+ ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
- return getI32Imm(PPC::getVSPLTImmediate(N, 2));
+ // Transformation function: 63 - imm
+ return getI32Imm(63 - N->getZExtValue());
}
case 7: {
- SDNode *N = V.getNode();
+ ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
- return getI32Imm(PPC::getVSPLTImmediate(N, 4));
+ // Transformation function: 32 - imm
+ return N->getZExtValue() ? getI32Imm(32 - N->getZExtValue()) : getI32Imm(0);
}
case 8: {
- SDNode *N = V.getNode();
+ ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
- return getI32Imm(PPC::isVSLDOIShuffleMask(N, true));
+ // Transformation function: 64 - imm
+ return N->getZExtValue() ? getI32Imm(64 - N->getZExtValue()) : getI32Imm(0);
}
case 9: {
SDNode *N = V.getNode();
- return getI32Imm(PPC::isVSLDOIShuffleMask(N, false));
+ return getI32Imm(PPC::getVSPLTImmediate(N, 1));
}
case 10: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
+ SDNode *N = V.getNode();
- // Transformation function: 31 - imm
- return getI32Imm(31 - N->getZExtValue());
+ return getI32Imm(PPC::getVSPLTImmediate(N, 2));
}
case 11: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
+ SDNode *N = V.getNode();
- // Transformation function: 63 - imm
- return getI32Imm(63 - N->getZExtValue());
+ return getI32Imm(PPC::getVSPLTImmediate(N, 4));
}
case 12: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
+ SDNode *N = V.getNode();
- // Transformation function: 32 - imm
- return N->getZExtValue() ? getI32Imm(32 - N->getZExtValue()) : getI32Imm(0);
+ return getI32Imm(PPC::isVSLDOIShuffleMask(N, true));
}
case 13: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
+ SDNode *N = V.getNode();
- // Transformation function: 64 - imm
- return N->getZExtValue() ? getI32Imm(64 - N->getZExtValue()) : getI32Imm(0);
+ return getI32Imm(PPC::isVSLDOIShuffleMask(N, false));
}
case 14: {
diff --git a/libclamav/c++/PPCGenInstrInfo.inc b/libclamav/c++/PPCGenInstrInfo.inc
index 5d204ac..cc311a1 100644
--- a/libclamav/c++/PPCGenInstrInfo.inc
+++ b/libclamav/c++/PPCGenInstrInfo.inc
@@ -30,615 +30,615 @@ static const TargetRegisterClass* Barriers4[] = { &PPC::CTRRCRegClass, NULL };
static const TargetRegisterClass* Barriers5[] = { &PPC::CTRRC8RegClass, NULL };
static const unsigned ImplicitList16[] = { PPC::CR6, 0 };
-static const TargetOperandInfo OperandInfo2[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo3[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo4[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo5[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo6[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo7[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo8[] = { { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo9[] = { { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo10[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo11[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo12[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo13[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo14[] = { { 0, 0|(1<<TOI::Predicate), 0 }, { PPC::CRRCRegClassID, 0|(1<<TOI::Predicate), 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo15[] = { { 0, 0|(1<<TOI::Predicate), 0 }, { PPC::CRRCRegClassID, 0|(1<<TOI::Predicate), 0 }, };
-static const TargetOperandInfo OperandInfo16[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo17[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo18[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo19[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo20[] = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo21[] = { { PPC::CRBITRCRegClassID, 0, 0 }, { PPC::CRBITRCRegClassID, 0, 0 }, { PPC::CRBITRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo22[] = { { PPC::CRBITRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo23[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo24[] = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo25[] = { { 0, 0, 0 }, { 0, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo26[] = { { 0, 0, 0 }, { 0, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo27[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo28[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo29[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo30[] = { { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo31[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo32[] = { { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo33[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo34[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo35[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo36[] = { { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo37[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo38[] = { { PPC::F8RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo2[] = { { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo3[] = { { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo4[] = { { -1, 0, 0 }, { -1, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo5[] = { { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo6[] = { { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo7[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo8[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo9[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo10[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo11[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo12[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo13[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo14[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo15[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo16[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo17[] = { { -1, 0|(1<<TOI::Predicate), 0 }, { PPC::CRRCRegClassID, 0|(1<<TOI::Predicate), 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo18[] = { { -1, 0|(1<<TOI::Predicate), 0 }, { PPC::CRRCRegClassID, 0|(1<<TOI::Predicate), 0 }, };
+static const TargetOperandInfo OperandInfo19[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo20[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo21[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo22[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo23[] = { { PPC::CRBITRCRegClassID, 0, 0 }, { PPC::CRBITRCRegClassID, 0, 0 }, { PPC::CRBITRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo24[] = { { PPC::CRBITRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo25[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo26[] = { { -1, 0, 0 }, { -1, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo27[] = { { -1, 0, 0 }, { -1, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo28[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo29[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo30[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo31[] = { { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo32[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo33[] = { { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo34[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo35[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo36[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo37[] = { { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo38[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
static const TargetOperandInfo OperandInfo39[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, };
static const TargetOperandInfo OperandInfo40[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo41[] = { { 0, 0, 0 }, { 0, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo42[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo43[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo44[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((1 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo45[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((1 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo46[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo47[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo48[] = { { PPC::G8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo49[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo50[] = { { PPC::F8RCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo51[] = { { PPC::F8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((1 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo52[] = { { PPC::F8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo53[] = { { PPC::F4RCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo54[] = { { PPC::F4RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((1 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo55[] = { { PPC::F4RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo56[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo57[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo58[] = { { PPC::VRRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
-static const TargetOperandInfo OperandInfo59[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo60[] = { { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo61[] = { { PPC::F8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo62[] = { { PPC::VRRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo63[] = { { 0, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo64[] = { { PPC::F8RCRegClassID, 0, 0 }, { 0, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo65[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo66[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo67[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo68[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo69[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo70[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo71[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo72[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo73[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo74[] = { { PPC::F8RCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo75[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo76[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo77[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo78[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo79[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::GPRCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo80[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::G8RCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo81[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::F8RCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo82[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::F4RCRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo83[] = { { PPC::CTRRCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo84[] = { { PPC::CTRRC8RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo85[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo86[] = { { PPC::VRRCRegClassID, 0, 0 }, { 0, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo87[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo88[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo89[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo90[] = { { PPC::VRRCRegClassID, 0, 0 }, { 0, 0, 0 }, };
+static const TargetOperandInfo OperandInfo41[] = { { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo42[] = { { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo43[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((1 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo44[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((1 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo45[] = { { PPC::GPRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo46[] = { { PPC::G8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo47[] = { { PPC::G8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo48[] = { { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo49[] = { { PPC::F8RCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo50[] = { { PPC::F8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((1 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo51[] = { { PPC::F8RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo52[] = { { PPC::F4RCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo53[] = { { PPC::F4RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((1 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo54[] = { { PPC::F4RCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo55[] = { { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo56[] = { { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo57[] = { { PPC::VRRCRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, };
+static const TargetOperandInfo OperandInfo58[] = { { PPC::CRRCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo59[] = { { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo60[] = { { PPC::F8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo61[] = { { PPC::VRRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo62[] = { { -1, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo63[] = { { PPC::F8RCRegClassID, 0, 0 }, { -1, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo64[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo65[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo66[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo67[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo68[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo69[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo70[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo71[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo72[] = { { PPC::F4RCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { PPC::F4RCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo73[] = { { PPC::F8RCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { PPC::F8RCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo74[] = { { PPC::GPRCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo75[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo76[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::CRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo77[] = { { PPC::G8RCRegClassID, 0, 0 }, { PPC::G8RCRegClassID, 0, 0 }, { PPC::GPRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo78[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::GPRCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo79[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::G8RCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo80[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::F8RCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo81[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { PPC::F4RCRegClassID, 0, 0 }, { -1, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo82[] = { { PPC::CTRRCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo83[] = { { PPC::CTRRC8RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo84[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo85[] = { { PPC::VRRCRegClassID, 0, 0 }, { -1, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo86[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo87[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo88[] = { { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { PPC::VRRCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo89[] = { { PPC::VRRCRegClassID, 0, 0 }, { -1, 0, 0 }, };
static const TargetInstrDesc PPCInsts[] = {
- { 0, 0, 0, 52, "PHI", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, 0 }, // Inst #0 = PHI
- { 1, 0, 0, 52, "INLINEASM", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, 0 }, // Inst #1 = INLINEASM
- { 2, 1, 0, 52, "DBG_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo8 }, // Inst #2 = DBG_LABEL
- { 3, 1, 0, 52, "EH_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo8 }, // Inst #3 = EH_LABEL
- { 4, 1, 0, 52, "GC_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo8 }, // Inst #4 = GC_LABEL
- { 5, 0, 0, 52, "KILL", 0|(1<<TID::Variadic), 0, NULL, NULL, NULL, 0 }, // Inst #5 = KILL
- { 6, 3, 1, 52, "EXTRACT_SUBREG", 0, 0, NULL, NULL, NULL, OperandInfo20 }, // Inst #6 = EXTRACT_SUBREG
- { 7, 4, 1, 52, "INSERT_SUBREG", 0, 0, NULL, NULL, NULL, OperandInfo41 }, // Inst #7 = INSERT_SUBREG
- { 8, 1, 1, 52, "IMPLICIT_DEF", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0, NULL, NULL, NULL, OperandInfo8 }, // Inst #8 = IMPLICIT_DEF
- { 9, 4, 1, 52, "SUBREG_TO_REG", 0, 0, NULL, NULL, NULL, OperandInfo24 }, // Inst #9 = SUBREG_TO_REG
- { 10, 3, 1, 52, "COPY_TO_REGCLASS", 0|(1<<TID::CheapAsAMove), 0, NULL, NULL, NULL, OperandInfo20 }, // Inst #10 = COPY_TO_REGCLASS
- { 11, 0, 0, 52, "DBG_VALUE", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::CheapAsAMove), 0, NULL, NULL, NULL, 0 }, // Inst #11 = DBG_VALUE
- { 12, 3, 1, 14, "ADD4", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #12 = ADD4
- { 13, 3, 1, 14, "ADD8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #13 = ADD8
- { 14, 3, 1, 14, "ADDC", 0, 0|(1<<2)|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #14 = ADDC
- { 15, 3, 1, 14, "ADDC8", 0, 0|(1<<2)|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo3 }, // Inst #15 = ADDC8
- { 16, 3, 1, 14, "ADDE", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #16 = ADDE
- { 17, 3, 1, 14, "ADDE8", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo3 }, // Inst #17 = ADDE8
- { 18, 3, 1, 14, "ADDI", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo4 }, // Inst #18 = ADDI
- { 19, 3, 1, 14, "ADDI8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo5 }, // Inst #19 = ADDI8
- { 20, 3, 1, 14, "ADDIC", 0, 0|(1<<2)|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo4 }, // Inst #20 = ADDIC
- { 21, 3, 1, 14, "ADDIC8", 0, 0|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #21 = ADDIC8
- { 22, 3, 1, 14, "ADDICo", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo4 }, // Inst #22 = ADDICo
- { 23, 3, 1, 14, "ADDIS", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo4 }, // Inst #23 = ADDIS
- { 24, 3, 1, 14, "ADDIS8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo5 }, // Inst #24 = ADDIS8
- { 25, 2, 1, 14, "ADDME", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #25 = ADDME
- { 26, 2, 1, 14, "ADDME8", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #26 = ADDME8
- { 27, 2, 1, 14, "ADDZE", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #27 = ADDZE
- { 28, 2, 1, 14, "ADDZE8", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #28 = ADDZE8
- { 29, 1, 0, 52, "ADJCALLSTACKDOWN", 0, 0, ImplicitList2, ImplicitList2, NULL, OperandInfo8 }, // Inst #29 = ADJCALLSTACKDOWN
- { 30, 2, 0, 52, "ADJCALLSTACKUP", 0, 0, ImplicitList2, ImplicitList2, NULL, OperandInfo9 }, // Inst #30 = ADJCALLSTACKUP
- { 31, 3, 1, 14, "AND", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #31 = AND
- { 32, 3, 1, 14, "AND8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #32 = AND8
- { 33, 3, 1, 14, "ANDC", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #33 = ANDC
- { 34, 3, 1, 14, "ANDC8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #34 = ANDC8
- { 35, 3, 1, 14, "ANDISo", 0, 0|(1<<3), NULL, ImplicitList3, NULL, OperandInfo4 }, // Inst #35 = ANDISo
- { 36, 3, 1, 14, "ANDISo8", 0, 0|(1<<3), NULL, ImplicitList3, NULL, OperandInfo5 }, // Inst #36 = ANDISo8
- { 37, 3, 1, 14, "ANDIo", 0, 0|(1<<3), NULL, ImplicitList3, NULL, OperandInfo4 }, // Inst #37 = ANDIo
- { 38, 3, 1, 14, "ANDIo8", 0, 0|(1<<3), NULL, ImplicitList3, NULL, OperandInfo5 }, // Inst #38 = ANDIo8
- { 39, 5, 1, 52, "ATOMIC_CMP_SWAP_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo10 }, // Inst #39 = ATOMIC_CMP_SWAP_I16
- { 40, 5, 1, 52, "ATOMIC_CMP_SWAP_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo10 }, // Inst #40 = ATOMIC_CMP_SWAP_I32
- { 41, 5, 1, 52, "ATOMIC_CMP_SWAP_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo11 }, // Inst #41 = ATOMIC_CMP_SWAP_I64
- { 42, 5, 1, 52, "ATOMIC_CMP_SWAP_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo10 }, // Inst #42 = ATOMIC_CMP_SWAP_I8
- { 43, 4, 1, 52, "ATOMIC_LOAD_ADD_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #43 = ATOMIC_LOAD_ADD_I16
- { 44, 4, 1, 52, "ATOMIC_LOAD_ADD_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #44 = ATOMIC_LOAD_ADD_I32
- { 45, 4, 1, 52, "ATOMIC_LOAD_ADD_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #45 = ATOMIC_LOAD_ADD_I64
- { 46, 4, 1, 52, "ATOMIC_LOAD_ADD_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #46 = ATOMIC_LOAD_ADD_I8
- { 47, 4, 1, 52, "ATOMIC_LOAD_AND_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #47 = ATOMIC_LOAD_AND_I16
- { 48, 4, 1, 52, "ATOMIC_LOAD_AND_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #48 = ATOMIC_LOAD_AND_I32
- { 49, 4, 1, 52, "ATOMIC_LOAD_AND_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #49 = ATOMIC_LOAD_AND_I64
- { 50, 4, 1, 52, "ATOMIC_LOAD_AND_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #50 = ATOMIC_LOAD_AND_I8
- { 51, 4, 1, 52, "ATOMIC_LOAD_NAND_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #51 = ATOMIC_LOAD_NAND_I16
- { 52, 4, 1, 52, "ATOMIC_LOAD_NAND_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #52 = ATOMIC_LOAD_NAND_I32
- { 53, 4, 1, 52, "ATOMIC_LOAD_NAND_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #53 = ATOMIC_LOAD_NAND_I64
- { 54, 4, 1, 52, "ATOMIC_LOAD_NAND_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #54 = ATOMIC_LOAD_NAND_I8
- { 55, 4, 1, 52, "ATOMIC_LOAD_OR_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #55 = ATOMIC_LOAD_OR_I16
- { 56, 4, 1, 52, "ATOMIC_LOAD_OR_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #56 = ATOMIC_LOAD_OR_I32
- { 57, 4, 1, 52, "ATOMIC_LOAD_OR_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #57 = ATOMIC_LOAD_OR_I64
- { 58, 4, 1, 52, "ATOMIC_LOAD_OR_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #58 = ATOMIC_LOAD_OR_I8
- { 59, 4, 1, 52, "ATOMIC_LOAD_SUB_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #59 = ATOMIC_LOAD_SUB_I16
- { 60, 4, 1, 52, "ATOMIC_LOAD_SUB_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #60 = ATOMIC_LOAD_SUB_I32
- { 61, 4, 1, 52, "ATOMIC_LOAD_SUB_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #61 = ATOMIC_LOAD_SUB_I64
- { 62, 4, 1, 52, "ATOMIC_LOAD_SUB_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #62 = ATOMIC_LOAD_SUB_I8
- { 63, 4, 1, 52, "ATOMIC_LOAD_XOR_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #63 = ATOMIC_LOAD_XOR_I16
- { 64, 4, 1, 52, "ATOMIC_LOAD_XOR_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #64 = ATOMIC_LOAD_XOR_I32
- { 65, 4, 1, 52, "ATOMIC_LOAD_XOR_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #65 = ATOMIC_LOAD_XOR_I64
- { 66, 4, 1, 52, "ATOMIC_LOAD_XOR_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #66 = ATOMIC_LOAD_XOR_I8
- { 67, 4, 1, 52, "ATOMIC_SWAP_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #67 = ATOMIC_SWAP_I16
- { 68, 4, 1, 52, "ATOMIC_SWAP_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #68 = ATOMIC_SWAP_I32
- { 69, 4, 1, 52, "ATOMIC_SWAP_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #69 = ATOMIC_SWAP_I64
- { 70, 4, 1, 52, "ATOMIC_SWAP_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, ImplicitList3, NULL, NULL, OperandInfo12 }, // Inst #70 = ATOMIC_SWAP_I8
- { 71, 1, 0, 0, "B", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|(7<<3), NULL, NULL, NULL, OperandInfo8 }, // Inst #71 = B
- { 72, 3, 0, 0, "BCC", 0|(1<<TID::Branch)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), NULL, NULL, NULL, OperandInfo14 }, // Inst #72 = BCC
- { 73, 0, 0, 0, "BCTR", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList4, NULL, NULL, 0 }, // Inst #73 = BCTR
- { 74, 0, 0, 0, "BCTRL8_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(7<<3), ImplicitList5, ImplicitList6, Barriers2, 0 }, // Inst #74 = BCTRL8_Darwin
- { 75, 0, 0, 0, "BCTRL8_ELF", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(7<<3), ImplicitList5, ImplicitList6, Barriers2, 0 }, // Inst #75 = BCTRL8_ELF
- { 76, 0, 0, 0, "BCTRL_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(7<<3), ImplicitList7, ImplicitList8, Barriers3, 0 }, // Inst #76 = BCTRL_Darwin
- { 77, 0, 0, 0, "BCTRL_SVR4", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(7<<3), ImplicitList7, ImplicitList9, Barriers3, 0 }, // Inst #77 = BCTRL_SVR4
- { 78, 1, 0, 0, "BL8_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList10, ImplicitList6, Barriers2, OperandInfo8 }, // Inst #78 = BL8_Darwin
- { 79, 1, 0, 0, "BL8_ELF", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList10, ImplicitList6, Barriers2, OperandInfo8 }, // Inst #79 = BL8_ELF
- { 80, 1, 0, 0, "BLA8_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(7<<3), ImplicitList10, ImplicitList6, Barriers2, OperandInfo8 }, // Inst #80 = BLA8_Darwin
- { 81, 1, 0, 0, "BLA8_ELF", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(7<<3), ImplicitList10, ImplicitList6, Barriers2, OperandInfo8 }, // Inst #81 = BLA8_ELF
- { 82, 1, 0, 0, "BLA_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(7<<3), ImplicitList10, ImplicitList8, Barriers3, OperandInfo8 }, // Inst #82 = BLA_Darwin
- { 83, 1, 0, 0, "BLA_SVR4", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|(7<<3), ImplicitList10, ImplicitList9, Barriers3, OperandInfo8 }, // Inst #83 = BLA_SVR4
- { 84, 2, 0, 0, "BLR", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Predicable)|(1<<TID::Terminator), 0|(7<<3), ImplicitList11, NULL, NULL, OperandInfo15 }, // Inst #84 = BLR
- { 85, 1, 0, 0, "BL_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList10, ImplicitList8, Barriers3, OperandInfo8 }, // Inst #85 = BL_Darwin
- { 86, 1, 0, 0, "BL_SVR4", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList10, ImplicitList9, Barriers3, OperandInfo8 }, // Inst #86 = BL_SVR4
- { 87, 3, 1, 11, "CMPD", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo16 }, // Inst #87 = CMPD
- { 88, 3, 1, 11, "CMPDI", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo17 }, // Inst #88 = CMPDI
- { 89, 3, 1, 11, "CMPLD", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo16 }, // Inst #89 = CMPLD
- { 90, 3, 1, 11, "CMPLDI", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo17 }, // Inst #90 = CMPLDI
- { 91, 3, 1, 11, "CMPLW", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo18 }, // Inst #91 = CMPLW
- { 92, 3, 1, 11, "CMPLWI", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo19 }, // Inst #92 = CMPLWI
- { 93, 3, 1, 11, "CMPW", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo18 }, // Inst #93 = CMPW
- { 94, 3, 1, 11, "CMPWI", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo19 }, // Inst #94 = CMPWI
- { 95, 2, 1, 14, "CNTLZD", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo7 }, // Inst #95 = CNTLZD
- { 96, 2, 1, 14, "CNTLZW", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo6 }, // Inst #96 = CNTLZW
- { 97, 3, 1, 1, "CREQV", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo21 }, // Inst #97 = CREQV
- { 98, 3, 1, 1, "CROR", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo21 }, // Inst #98 = CROR
- { 99, 1, 1, 1, "CRSET", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo22 }, // Inst #99 = CRSET
- { 100, 2, 0, 30, "DCBA", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo23 }, // Inst #100 = DCBA
- { 101, 2, 0, 30, "DCBF", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo23 }, // Inst #101 = DCBF
- { 102, 2, 0, 30, "DCBI", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo23 }, // Inst #102 = DCBI
- { 103, 2, 0, 30, "DCBST", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo23 }, // Inst #103 = DCBST
- { 104, 2, 0, 30, "DCBT", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo23 }, // Inst #104 = DCBT
- { 105, 2, 0, 30, "DCBTST", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo23 }, // Inst #105 = DCBTST
- { 106, 2, 0, 30, "DCBZ", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo23 }, // Inst #106 = DCBZ
- { 107, 2, 0, 30, "DCBZL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo23 }, // Inst #107 = DCBZL
- { 108, 3, 1, 12, "DIVD", 0, 0|1|(1<<2)|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #108 = DIVD
- { 109, 3, 1, 12, "DIVDU", 0, 0|1|(1<<2)|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #109 = DIVDU
- { 110, 3, 1, 13, "DIVW", 0, 0|1|(1<<2)|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #110 = DIVW
- { 111, 3, 1, 13, "DIVWU", 0, 0|1|(1<<2)|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #111 = DIVWU
- { 112, 4, 0, 33, "DSS", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo24 }, // Inst #112 = DSS
- { 113, 4, 0, 33, "DSSALL", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo24 }, // Inst #113 = DSSALL
- { 114, 4, 0, 33, "DST", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo25 }, // Inst #114 = DST
- { 115, 4, 0, 33, "DST64", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo26 }, // Inst #115 = DST64
- { 116, 4, 0, 33, "DSTST", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo25 }, // Inst #116 = DSTST
- { 117, 4, 0, 33, "DSTST64", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo26 }, // Inst #117 = DSTST64
- { 118, 4, 0, 33, "DSTSTT", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo25 }, // Inst #118 = DSTSTT
- { 119, 4, 0, 33, "DSTSTT64", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo26 }, // Inst #119 = DSTSTT64
- { 120, 4, 0, 33, "DSTT", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo25 }, // Inst #120 = DSTT
- { 121, 4, 0, 33, "DSTT64", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo26 }, // Inst #121 = DSTT64
- { 122, 4, 1, 52, "DYNALLOC", 0, 0, ImplicitList2, ImplicitList2, NULL, OperandInfo27 }, // Inst #122 = DYNALLOC
- { 123, 4, 1, 52, "DYNALLOC8", 0, 0, ImplicitList12, ImplicitList12, NULL, OperandInfo28 }, // Inst #123 = DYNALLOC8
- { 124, 3, 1, 14, "EQV", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #124 = EQV
- { 125, 3, 1, 14, "EQV8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #125 = EQV8
- { 126, 2, 1, 14, "EXTSB", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo6 }, // Inst #126 = EXTSB
- { 127, 2, 1, 14, "EXTSB8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo7 }, // Inst #127 = EXTSB8
- { 128, 2, 1, 14, "EXTSH", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo6 }, // Inst #128 = EXTSH
- { 129, 2, 1, 14, "EXTSH8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo7 }, // Inst #129 = EXTSH8
- { 130, 2, 1, 14, "EXTSW", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo7 }, // Inst #130 = EXTSW
- { 131, 2, 1, 14, "EXTSW_32", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo6 }, // Inst #131 = EXTSW_32
- { 132, 2, 1, 14, "EXTSW_32_64", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo29 }, // Inst #132 = EXTSW_32_64
- { 133, 2, 1, 8, "FABSD", 0, 0|(3<<3), NULL, NULL, NULL, OperandInfo30 }, // Inst #133 = FABSD
- { 134, 2, 1, 8, "FABSS", 0, 0|(3<<3), NULL, NULL, NULL, OperandInfo31 }, // Inst #134 = FABSS
- { 135, 3, 1, 8, "FADD", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo32 }, // Inst #135 = FADD
- { 136, 3, 1, 8, "FADDS", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo33 }, // Inst #136 = FADDS
- { 137, 3, 1, 8, "FADDrtz", 0, 0|(1<<1)|(3<<3), ImplicitList10, NULL, NULL, OperandInfo32 }, // Inst #137 = FADDrtz
- { 138, 2, 1, 8, "FCFID", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo30 }, // Inst #138 = FCFID
- { 139, 3, 1, 4, "FCMPUD", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<3), NULL, NULL, NULL, OperandInfo34 }, // Inst #139 = FCMPUD
- { 140, 3, 1, 4, "FCMPUS", 0|(1<<TID::UnmodeledSideEffects), 0|(3<<3), NULL, NULL, NULL, OperandInfo35 }, // Inst #140 = FCMPUS
- { 141, 2, 1, 8, "FCTIDZ", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo30 }, // Inst #141 = FCTIDZ
- { 142, 2, 1, 8, "FCTIWZ", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo30 }, // Inst #142 = FCTIWZ
- { 143, 3, 1, 5, "FDIV", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo32 }, // Inst #143 = FDIV
- { 144, 3, 1, 6, "FDIVS", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo33 }, // Inst #144 = FDIVS
- { 145, 4, 1, 7, "FMADD", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo36 }, // Inst #145 = FMADD
- { 146, 4, 1, 8, "FMADDS", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo37 }, // Inst #146 = FMADDS
- { 147, 2, 1, 8, "FMR", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo31 }, // Inst #147 = FMR
- { 148, 2, 1, 8, "FMRSD", 0, 0, NULL, NULL, NULL, OperandInfo38 }, // Inst #148 = FMRSD
- { 149, 4, 1, 7, "FMSUB", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo36 }, // Inst #149 = FMSUB
- { 150, 4, 1, 8, "FMSUBS", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo37 }, // Inst #150 = FMSUBS
- { 151, 3, 1, 7, "FMUL", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo32 }, // Inst #151 = FMUL
- { 152, 3, 1, 8, "FMULS", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo33 }, // Inst #152 = FMULS
- { 153, 2, 1, 8, "FNABSD", 0, 0|(3<<3), NULL, NULL, NULL, OperandInfo30 }, // Inst #153 = FNABSD
- { 154, 2, 1, 8, "FNABSS", 0, 0|(3<<3), NULL, NULL, NULL, OperandInfo31 }, // Inst #154 = FNABSS
- { 155, 2, 1, 8, "FNEGD", 0, 0|(3<<3), NULL, NULL, NULL, OperandInfo30 }, // Inst #155 = FNEGD
- { 156, 2, 1, 8, "FNEGS", 0, 0|(3<<3), NULL, NULL, NULL, OperandInfo31 }, // Inst #156 = FNEGS
- { 157, 4, 1, 7, "FNMADD", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo36 }, // Inst #157 = FNMADD
- { 158, 4, 1, 8, "FNMADDS", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo37 }, // Inst #158 = FNMADDS
- { 159, 4, 1, 7, "FNMSUB", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo36 }, // Inst #159 = FNMSUB
- { 160, 4, 1, 8, "FNMSUBS", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo37 }, // Inst #160 = FNMSUBS
- { 161, 2, 1, 8, "FRSP", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo39 }, // Inst #161 = FRSP
- { 162, 4, 1, 8, "FSELD", 0, 0|(3<<3), NULL, NULL, NULL, OperandInfo36 }, // Inst #162 = FSELD
- { 163, 4, 1, 8, "FSELS", 0, 0|(3<<3), NULL, NULL, NULL, OperandInfo40 }, // Inst #163 = FSELS
- { 164, 2, 1, 10, "FSQRT", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo30 }, // Inst #164 = FSQRT
- { 165, 2, 1, 10, "FSQRTS", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo31 }, // Inst #165 = FSQRTS
- { 166, 3, 1, 8, "FSUB", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo32 }, // Inst #166 = FSUB
- { 167, 3, 1, 8, "FSUBS", 0, 0|(3<<3), ImplicitList10, NULL, NULL, OperandInfo33 }, // Inst #167 = FSUBS
- { 168, 3, 1, 14, "LA", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo4 }, // Inst #168 = LA
- { 169, 3, 1, 33, "LBZ", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo42 }, // Inst #169 = LBZ
- { 170, 3, 1, 33, "LBZ8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #170 = LBZ8
- { 171, 4, 2, 33, "LBZU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo44 }, // Inst #171 = LBZU
- { 172, 4, 2, 33, "LBZU8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo45 }, // Inst #172 = LBZU8
- { 173, 3, 1, 33, "LBZX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #173 = LBZX
- { 174, 3, 1, 33, "LBZX8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #174 = LBZX8
- { 175, 3, 1, 35, "LD", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #175 = LD
- { 176, 3, 1, 36, "LDARX", 0|(1<<TID::MayLoad), 0, NULL, NULL, NULL, OperandInfo47 }, // Inst #176 = LDARX
- { 177, 4, 2, 35, "LDU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo45 }, // Inst #177 = LDU
- { 178, 3, 1, 35, "LDX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #178 = LDX
- { 179, 1, 0, 35, "LDinto_toc", 0|(1<<TID::FoldableAsLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo48 }, // Inst #179 = LDinto_toc
- { 180, 3, 1, 35, "LDtoc", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo49 }, // Inst #180 = LDtoc
- { 181, 0, 0, 35, "LDtoc_restore", 0|(1<<TID::FoldableAsLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, 0 }, // Inst #181 = LDtoc_restore
- { 182, 3, 1, 37, "LFD", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo50 }, // Inst #182 = LFD
- { 183, 4, 2, 37, "LFDU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo51 }, // Inst #183 = LFDU
- { 184, 3, 1, 38, "LFDX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo52 }, // Inst #184 = LFDX
- { 185, 3, 1, 38, "LFS", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo53 }, // Inst #185 = LFS
- { 186, 4, 2, 38, "LFSU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo54 }, // Inst #186 = LFSU
- { 187, 3, 1, 38, "LFSX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo55 }, // Inst #187 = LFSX
- { 188, 3, 1, 39, "LHA", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo42 }, // Inst #188 = LHA
- { 189, 3, 1, 39, "LHA8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #189 = LHA8
- { 190, 4, 2, 33, "LHAU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo44 }, // Inst #190 = LHAU
- { 191, 4, 2, 33, "LHAU8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo45 }, // Inst #191 = LHAU8
- { 192, 3, 1, 39, "LHAX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #192 = LHAX
- { 193, 3, 1, 39, "LHAX8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #193 = LHAX8
- { 194, 3, 1, 33, "LHBRX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #194 = LHBRX
- { 195, 3, 1, 33, "LHZ", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo42 }, // Inst #195 = LHZ
- { 196, 3, 1, 33, "LHZ8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #196 = LHZ8
- { 197, 4, 2, 33, "LHZU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo44 }, // Inst #197 = LHZU
- { 198, 4, 2, 33, "LHZU8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo45 }, // Inst #198 = LHZU8
- { 199, 3, 1, 33, "LHZX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #199 = LHZX
- { 200, 3, 1, 33, "LHZX8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #200 = LHZX8
- { 201, 2, 1, 14, "LI", 0|(1<<TID::Rematerializable), 0|(1<<3), NULL, NULL, NULL, OperandInfo56 }, // Inst #201 = LI
- { 202, 2, 1, 14, "LI8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo57 }, // Inst #202 = LI8
- { 203, 2, 1, 14, "LIS", 0|(1<<TID::Rematerializable), 0|(1<<3), NULL, NULL, NULL, OperandInfo56 }, // Inst #203 = LIS
- { 204, 2, 1, 14, "LIS8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo57 }, // Inst #204 = LIS8
- { 205, 3, 1, 33, "LVEBX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #205 = LVEBX
- { 206, 3, 1, 33, "LVEHX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #206 = LVEHX
- { 207, 3, 1, 33, "LVEWX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #207 = LVEWX
- { 208, 3, 1, 33, "LVSL", 0, 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #208 = LVSL
- { 209, 3, 1, 33, "LVSR", 0, 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #209 = LVSR
- { 210, 3, 1, 33, "LVX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #210 = LVX
- { 211, 3, 1, 33, "LVXL", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #211 = LVXL
- { 212, 3, 1, 42, "LWA", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #212 = LWA
- { 213, 3, 1, 43, "LWARX", 0|(1<<TID::MayLoad), 0, NULL, NULL, NULL, OperandInfo46 }, // Inst #213 = LWARX
- { 214, 3, 1, 39, "LWAX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #214 = LWAX
- { 215, 3, 1, 33, "LWBRX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #215 = LWBRX
- { 216, 3, 1, 33, "LWZ", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo42 }, // Inst #216 = LWZ
- { 217, 3, 1, 33, "LWZ8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #217 = LWZ8
- { 218, 4, 2, 33, "LWZU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo44 }, // Inst #218 = LWZU
- { 219, 4, 2, 33, "LWZU8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo45 }, // Inst #219 = LWZU8
- { 220, 3, 1, 33, "LWZX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #220 = LWZX
- { 221, 3, 1, 33, "LWZX8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #221 = LWZX8
- { 222, 2, 1, 2, "MCRF", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<3), NULL, NULL, NULL, OperandInfo59 }, // Inst #222 = MCRF
- { 223, 1, 1, 54, "MFCR", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<3), NULL, NULL, NULL, OperandInfo60 }, // Inst #223 = MFCR
- { 224, 1, 1, 56, "MFCTR", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<3), ImplicitList4, NULL, NULL, OperandInfo60 }, // Inst #224 = MFCTR
- { 225, 1, 1, 56, "MFCTR8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<3), ImplicitList13, NULL, NULL, OperandInfo48 }, // Inst #225 = MFCTR8
- { 226, 1, 1, 15, "MFFS", 0, 0|(1<<1)|(3<<3), ImplicitList10, NULL, NULL, OperandInfo61 }, // Inst #226 = MFFS
- { 227, 1, 1, 56, "MFLR", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<3), ImplicitList14, NULL, NULL, OperandInfo60 }, // Inst #227 = MFLR
- { 228, 1, 1, 56, "MFLR8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<3), ImplicitList15, NULL, NULL, OperandInfo48 }, // Inst #228 = MFLR8
- { 229, 2, 1, 54, "MFOCRF", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<3), NULL, NULL, NULL, OperandInfo56 }, // Inst #229 = MFOCRF
- { 230, 1, 1, 14, "MFVRSAVE", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<3), NULL, NULL, NULL, OperandInfo60 }, // Inst #230 = MFVRSAVE
- { 231, 1, 1, 33, "MFVSCR", 0|(1<<TID::MayLoad), 0, NULL, NULL, NULL, OperandInfo62 }, // Inst #231 = MFVSCR
- { 232, 2, 0, 3, "MTCRF", 0|(1<<TID::UnmodeledSideEffects), 0|(4<<3), NULL, NULL, NULL, OperandInfo63 }, // Inst #232 = MTCRF
- { 233, 1, 0, 60, "MTCTR", 0, 0|1|(1<<3), NULL, ImplicitList4, Barriers4, OperandInfo60 }, // Inst #233 = MTCTR
- { 234, 1, 0, 60, "MTCTR8", 0, 0|1|(1<<3), NULL, ImplicitList13, Barriers5, OperandInfo48 }, // Inst #234 = MTCTR8
- { 235, 1, 0, 17, "MTFSB0", 0, 0|(1<<1)|(3<<3), ImplicitList10, ImplicitList10, NULL, OperandInfo8 }, // Inst #235 = MTFSB0
- { 236, 1, 0, 17, "MTFSB1", 0, 0|(1<<1)|(3<<3), ImplicitList10, ImplicitList10, NULL, OperandInfo8 }, // Inst #236 = MTFSB1
- { 237, 4, 1, 17, "MTFSF", 0, 0|(1<<1)|(3<<3), ImplicitList10, ImplicitList10, NULL, OperandInfo64 }, // Inst #237 = MTFSF
- { 238, 1, 0, 60, "MTLR", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<3), NULL, ImplicitList14, NULL, OperandInfo60 }, // Inst #238 = MTLR
- { 239, 1, 0, 60, "MTLR8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<3), NULL, ImplicitList15, NULL, OperandInfo48 }, // Inst #239 = MTLR8
- { 240, 1, 0, 14, "MTVRSAVE", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<1)|(1<<3), NULL, NULL, NULL, OperandInfo60 }, // Inst #240 = MTVRSAVE
- { 241, 1, 0, 33, "MTVSCR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo62 }, // Inst #241 = MTVSCR
- { 242, 3, 1, 20, "MULHD", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #242 = MULHD
- { 243, 3, 1, 21, "MULHDU", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #243 = MULHDU
- { 244, 3, 1, 20, "MULHW", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #244 = MULHW
- { 245, 3, 1, 21, "MULHWU", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #245 = MULHWU
- { 246, 3, 1, 19, "MULLD", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #246 = MULLD
- { 247, 3, 1, 22, "MULLI", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo4 }, // Inst #247 = MULLI
- { 248, 3, 1, 20, "MULLW", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #248 = MULLW
- { 249, 1, 0, 52, "MovePCtoLR", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<3), NULL, ImplicitList14, NULL, OperandInfo8 }, // Inst #249 = MovePCtoLR
- { 250, 1, 0, 52, "MovePCtoLR8", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<3), NULL, ImplicitList15, NULL, OperandInfo8 }, // Inst #250 = MovePCtoLR8
- { 251, 3, 1, 14, "NAND", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #251 = NAND
- { 252, 3, 1, 14, "NAND8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #252 = NAND8
- { 253, 2, 1, 14, "NEG", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo6 }, // Inst #253 = NEG
- { 254, 2, 1, 14, "NEG8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo7 }, // Inst #254 = NEG8
- { 255, 0, 0, 14, "NOP", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, 0 }, // Inst #255 = NOP
- { 256, 3, 1, 14, "NOR", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #256 = NOR
- { 257, 3, 1, 14, "NOR8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #257 = NOR8
- { 258, 3, 1, 14, "OR", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #258 = OR
- { 259, 3, 1, 14, "OR4To8", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo65 }, // Inst #259 = OR4To8
- { 260, 3, 1, 14, "OR8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #260 = OR8
- { 261, 3, 1, 14, "OR8To4", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo66 }, // Inst #261 = OR8To4
- { 262, 3, 1, 14, "ORC", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #262 = ORC
- { 263, 3, 1, 14, "ORC8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #263 = ORC8
- { 264, 3, 1, 14, "ORI", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo4 }, // Inst #264 = ORI
- { 265, 3, 1, 14, "ORI8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo5 }, // Inst #265 = ORI8
- { 266, 3, 1, 14, "ORIS", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo4 }, // Inst #266 = ORIS
- { 267, 3, 1, 14, "ORIS8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo5 }, // Inst #267 = ORIS8
- { 268, 4, 1, 25, "RLDCL", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo67 }, // Inst #268 = RLDCL
- { 269, 4, 1, 25, "RLDICL", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo68 }, // Inst #269 = RLDICL
- { 270, 4, 1, 25, "RLDICR", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo68 }, // Inst #270 = RLDICR
- { 271, 5, 1, 25, "RLDIMI", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo69 }, // Inst #271 = RLDIMI
- { 272, 6, 1, 24, "RLWIMI", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0|(1<<2)|(1<<3), NULL, NULL, NULL, OperandInfo70 }, // Inst #272 = RLWIMI
- { 273, 5, 1, 14, "RLWINM", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo71 }, // Inst #273 = RLWINM
- { 274, 5, 1, 14, "RLWINMo", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<2)|(1<<3), NULL, ImplicitList3, NULL, OperandInfo71 }, // Inst #274 = RLWINMo
- { 275, 5, 1, 14, "RLWNM", 0|(1<<TID::UnmodeledSideEffects), 0|(1<<3), NULL, NULL, NULL, OperandInfo72 }, // Inst #275 = RLWNM
- { 276, 5, 1, 52, "SELECT_CC_F4", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo73 }, // Inst #276 = SELECT_CC_F4
- { 277, 5, 1, 52, "SELECT_CC_F8", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo74 }, // Inst #277 = SELECT_CC_F8
- { 278, 5, 1, 52, "SELECT_CC_I4", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo75 }, // Inst #278 = SELECT_CC_I4
- { 279, 5, 1, 52, "SELECT_CC_I8", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo76 }, // Inst #279 = SELECT_CC_I8
- { 280, 5, 1, 52, "SELECT_CC_VRRC", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0|(1<<1), NULL, NULL, NULL, OperandInfo77 }, // Inst #280 = SELECT_CC_VRRC
- { 281, 3, 1, 25, "SLD", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo78 }, // Inst #281 = SLD
- { 282, 3, 1, 14, "SLW", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #282 = SLW
- { 283, 3, 0, 52, "SPILL_CR", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo42 }, // Inst #283 = SPILL_CR
- { 284, 3, 1, 25, "SRAD", 0, 0|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo78 }, // Inst #284 = SRAD
- { 285, 3, 1, 25, "SRADI", 0, 0|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #285 = SRADI
- { 286, 3, 1, 26, "SRAW", 0, 0|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #286 = SRAW
- { 287, 3, 1, 26, "SRAWI", 0, 0|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo4 }, // Inst #287 = SRAWI
- { 288, 3, 1, 25, "SRD", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo78 }, // Inst #288 = SRD
- { 289, 3, 1, 14, "SRW", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #289 = SRW
- { 290, 3, 0, 33, "STB", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo42 }, // Inst #290 = STB
- { 291, 3, 0, 33, "STB8", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #291 = STB8
- { 292, 4, 1, 33, "STBU", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo79 }, // Inst #292 = STBU
- { 293, 4, 1, 33, "STBU8", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo80 }, // Inst #293 = STBU8
- { 294, 3, 0, 33, "STBX", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #294 = STBX
- { 295, 3, 0, 33, "STBX8", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #295 = STBX8
- { 296, 3, 0, 46, "STD", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #296 = STD
- { 297, 3, 0, 47, "STDCX", 0|(1<<TID::MayStore), 0, NULL, ImplicitList3, NULL, OperandInfo47 }, // Inst #297 = STDCX
- { 298, 4, 1, 46, "STDU", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo80 }, // Inst #298 = STDU
- { 299, 3, 0, 46, "STDUX", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #299 = STDUX
- { 300, 3, 0, 46, "STDX", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #300 = STDX
- { 301, 3, 0, 46, "STDX_32", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #301 = STDX_32
- { 302, 3, 0, 46, "STD_32", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo42 }, // Inst #302 = STD_32
- { 303, 3, 0, 51, "STFD", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo50 }, // Inst #303 = STFD
- { 304, 4, 1, 33, "STFDU", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo81 }, // Inst #304 = STFDU
- { 305, 3, 0, 51, "STFDX", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo52 }, // Inst #305 = STFDX
- { 306, 3, 0, 51, "STFIWX", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo52 }, // Inst #306 = STFIWX
- { 307, 3, 0, 51, "STFS", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo53 }, // Inst #307 = STFS
- { 308, 4, 1, 33, "STFSU", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo82 }, // Inst #308 = STFSU
- { 309, 3, 0, 51, "STFSX", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo55 }, // Inst #309 = STFSX
- { 310, 3, 0, 33, "STH", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo42 }, // Inst #310 = STH
- { 311, 3, 0, 33, "STH8", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #311 = STH8
- { 312, 3, 0, 33, "STHBRX", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #312 = STHBRX
- { 313, 4, 1, 33, "STHU", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo79 }, // Inst #313 = STHU
- { 314, 4, 1, 33, "STHU8", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo80 }, // Inst #314 = STHU8
- { 315, 3, 0, 33, "STHX", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #315 = STHX
- { 316, 3, 0, 33, "STHX8", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #316 = STHX8
- { 317, 3, 0, 33, "STVEBX", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #317 = STVEBX
- { 318, 3, 0, 33, "STVEHX", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #318 = STVEHX
- { 319, 3, 0, 33, "STVEWX", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #319 = STVEWX
- { 320, 3, 0, 33, "STVX", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #320 = STVX
- { 321, 3, 0, 33, "STVXL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo58 }, // Inst #321 = STVXL
- { 322, 3, 0, 33, "STW", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo42 }, // Inst #322 = STW
- { 323, 3, 0, 33, "STW8", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo43 }, // Inst #323 = STW8
- { 324, 3, 0, 33, "STWBRX", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #324 = STWBRX
- { 325, 3, 0, 49, "STWCX", 0|(1<<TID::MayStore), 0, NULL, ImplicitList3, NULL, OperandInfo46 }, // Inst #325 = STWCX
- { 326, 4, 1, 33, "STWU", 0|(1<<TID::MayStore), 0|(2<<3), NULL, NULL, NULL, OperandInfo79 }, // Inst #326 = STWU
- { 327, 3, 0, 33, "STWUX", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|(2<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #327 = STWUX
- { 328, 3, 0, 33, "STWX", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo46 }, // Inst #328 = STWX
- { 329, 3, 0, 33, "STWX8", 0|(1<<TID::MayStore), 0|(1<<2)|(2<<3), NULL, NULL, NULL, OperandInfo47 }, // Inst #329 = STWX8
- { 330, 3, 1, 14, "SUBF", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #330 = SUBF
- { 331, 3, 1, 14, "SUBF8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #331 = SUBF8
- { 332, 3, 1, 14, "SUBFC", 0, 0|(1<<2)|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #332 = SUBFC
- { 333, 3, 1, 14, "SUBFC8", 0, 0|(1<<2)|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo3 }, // Inst #333 = SUBFC8
- { 334, 3, 1, 14, "SUBFE", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #334 = SUBFE
- { 335, 3, 1, 14, "SUBFE8", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo3 }, // Inst #335 = SUBFE8
- { 336, 3, 1, 14, "SUBFIC", 0, 0|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo4 }, // Inst #336 = SUBFIC
- { 337, 3, 1, 14, "SUBFIC8", 0, 0|(1<<3), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #337 = SUBFIC8
- { 338, 2, 1, 14, "SUBFME", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #338 = SUBFME
- { 339, 2, 1, 14, "SUBFME8", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #339 = SUBFME8
- { 340, 2, 1, 14, "SUBFZE", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #340 = SUBFZE
- { 341, 2, 1, 14, "SUBFZE8", 0, 0|(1<<3), ImplicitList1, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #341 = SUBFZE8
- { 342, 0, 0, 50, "SYNC", 0|(1<<TID::Barrier)|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, 0 }, // Inst #342 = SYNC
- { 343, 1, 0, 0, "TAILB", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList10, NULL, NULL, OperandInfo8 }, // Inst #343 = TAILB
- { 344, 1, 0, 0, "TAILB8", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList10, NULL, NULL, OperandInfo8 }, // Inst #344 = TAILB8
- { 345, 1, 0, 0, "TAILBA", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList10, NULL, NULL, OperandInfo8 }, // Inst #345 = TAILBA
- { 346, 1, 0, 0, "TAILBA8", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList10, NULL, NULL, OperandInfo8 }, // Inst #346 = TAILBA8
- { 347, 0, 0, 0, "TAILBCTR", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList7, NULL, NULL, 0 }, // Inst #347 = TAILBCTR
- { 348, 0, 0, 0, "TAILBCTR8", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|(7<<3), ImplicitList7, NULL, NULL, 0 }, // Inst #348 = TAILBCTR8
- { 349, 2, 0, 52, "TCRETURNai", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic), 0, ImplicitList10, NULL, NULL, OperandInfo9 }, // Inst #349 = TCRETURNai
- { 350, 2, 0, 52, "TCRETURNai8", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic), 0, ImplicitList10, NULL, NULL, OperandInfo9 }, // Inst #350 = TCRETURNai8
- { 351, 2, 0, 52, "TCRETURNdi", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList10, NULL, NULL, OperandInfo9 }, // Inst #351 = TCRETURNdi
- { 352, 2, 0, 52, "TCRETURNdi8", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList10, NULL, NULL, OperandInfo9 }, // Inst #352 = TCRETURNdi8
- { 353, 2, 0, 52, "TCRETURNri", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList10, NULL, NULL, OperandInfo83 }, // Inst #353 = TCRETURNri
- { 354, 2, 0, 52, "TCRETURNri8", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList10, NULL, NULL, OperandInfo84 }, // Inst #354 = TCRETURNri8
- { 355, 0, 0, 33, "TRAP", 0|(1<<TID::Barrier)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, 0 }, // Inst #355 = TRAP
- { 356, 2, 1, 52, "UPDATE_VRSAVE", 0|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo6 }, // Inst #356 = UPDATE_VRSAVE
- { 357, 3, 1, 67, "VADDCUW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #357 = VADDCUW
- { 358, 3, 1, 67, "VADDFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #358 = VADDFP
- { 359, 3, 1, 67, "VADDSBS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #359 = VADDSBS
- { 360, 3, 1, 67, "VADDSHS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #360 = VADDSHS
- { 361, 3, 1, 67, "VADDSWS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #361 = VADDSWS
- { 362, 3, 1, 70, "VADDUBM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #362 = VADDUBM
- { 363, 3, 1, 67, "VADDUBS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #363 = VADDUBS
- { 364, 3, 1, 70, "VADDUHM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #364 = VADDUHM
- { 365, 3, 1, 67, "VADDUHS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #365 = VADDUHS
- { 366, 3, 1, 70, "VADDUWM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #366 = VADDUWM
- { 367, 3, 1, 67, "VADDUWS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #367 = VADDUWS
- { 368, 3, 1, 67, "VAND", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #368 = VAND
- { 369, 3, 1, 67, "VANDC", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #369 = VANDC
- { 370, 3, 1, 67, "VAVGSB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #370 = VAVGSB
- { 371, 3, 1, 67, "VAVGSH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #371 = VAVGSH
- { 372, 3, 1, 67, "VAVGSW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #372 = VAVGSW
- { 373, 3, 1, 67, "VAVGUB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #373 = VAVGUB
- { 374, 3, 1, 67, "VAVGUH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #374 = VAVGUH
- { 375, 3, 1, 67, "VAVGUW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #375 = VAVGUW
- { 376, 3, 1, 67, "VCFSX", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo86 }, // Inst #376 = VCFSX
- { 377, 3, 1, 67, "VCFUX", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo86 }, // Inst #377 = VCFUX
- { 378, 3, 1, 68, "VCMPBFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #378 = VCMPBFP
- { 379, 3, 1, 68, "VCMPBFPo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #379 = VCMPBFPo
- { 380, 3, 1, 68, "VCMPEQFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #380 = VCMPEQFP
- { 381, 3, 1, 68, "VCMPEQFPo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #381 = VCMPEQFPo
- { 382, 3, 1, 68, "VCMPEQUB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #382 = VCMPEQUB
- { 383, 3, 1, 68, "VCMPEQUBo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #383 = VCMPEQUBo
- { 384, 3, 1, 68, "VCMPEQUH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #384 = VCMPEQUH
- { 385, 3, 1, 68, "VCMPEQUHo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #385 = VCMPEQUHo
- { 386, 3, 1, 68, "VCMPEQUW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #386 = VCMPEQUW
- { 387, 3, 1, 68, "VCMPEQUWo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #387 = VCMPEQUWo
- { 388, 3, 1, 68, "VCMPGEFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #388 = VCMPGEFP
- { 389, 3, 1, 68, "VCMPGEFPo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #389 = VCMPGEFPo
- { 390, 3, 1, 68, "VCMPGTFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #390 = VCMPGTFP
- { 391, 3, 1, 68, "VCMPGTFPo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #391 = VCMPGTFPo
- { 392, 3, 1, 68, "VCMPGTSB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #392 = VCMPGTSB
- { 393, 3, 1, 68, "VCMPGTSBo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #393 = VCMPGTSBo
- { 394, 3, 1, 68, "VCMPGTSH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #394 = VCMPGTSH
- { 395, 3, 1, 68, "VCMPGTSHo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #395 = VCMPGTSHo
- { 396, 3, 1, 68, "VCMPGTSW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #396 = VCMPGTSW
- { 397, 3, 1, 68, "VCMPGTSWo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #397 = VCMPGTSWo
- { 398, 3, 1, 68, "VCMPGTUB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #398 = VCMPGTUB
- { 399, 3, 1, 68, "VCMPGTUBo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #399 = VCMPGTUBo
- { 400, 3, 1, 68, "VCMPGTUH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #400 = VCMPGTUH
- { 401, 3, 1, 68, "VCMPGTUHo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #401 = VCMPGTUHo
- { 402, 3, 1, 68, "VCMPGTUW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #402 = VCMPGTUW
- { 403, 3, 1, 68, "VCMPGTUWo", 0, 0|(5<<3), NULL, ImplicitList16, NULL, OperandInfo85 }, // Inst #403 = VCMPGTUWo
- { 404, 3, 1, 67, "VCTSXS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo86 }, // Inst #404 = VCTSXS
- { 405, 3, 1, 67, "VCTUXS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo86 }, // Inst #405 = VCTUXS
- { 406, 2, 1, 67, "VEXPTEFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #406 = VEXPTEFP
- { 407, 2, 1, 67, "VLOGEFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #407 = VLOGEFP
- { 408, 4, 1, 67, "VMADDFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #408 = VMADDFP
- { 409, 3, 1, 67, "VMAXFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #409 = VMAXFP
- { 410, 3, 1, 67, "VMAXSB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #410 = VMAXSB
- { 411, 3, 1, 67, "VMAXSH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #411 = VMAXSH
- { 412, 3, 1, 67, "VMAXSW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #412 = VMAXSW
- { 413, 3, 1, 67, "VMAXUB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #413 = VMAXUB
- { 414, 3, 1, 67, "VMAXUH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #414 = VMAXUH
- { 415, 3, 1, 67, "VMAXUW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #415 = VMAXUW
- { 416, 4, 1, 67, "VMHADDSHS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #416 = VMHADDSHS
- { 417, 4, 1, 67, "VMHRADDSHS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #417 = VMHRADDSHS
- { 418, 3, 1, 67, "VMINFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #418 = VMINFP
- { 419, 3, 1, 67, "VMINSB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #419 = VMINSB
- { 420, 3, 1, 67, "VMINSH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #420 = VMINSH
- { 421, 3, 1, 67, "VMINSW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #421 = VMINSW
- { 422, 3, 1, 67, "VMINUB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #422 = VMINUB
- { 423, 3, 1, 67, "VMINUH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #423 = VMINUH
- { 424, 3, 1, 67, "VMINUW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #424 = VMINUW
- { 425, 4, 1, 67, "VMLADDUHM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #425 = VMLADDUHM
- { 426, 3, 1, 67, "VMRGHB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #426 = VMRGHB
- { 427, 3, 1, 67, "VMRGHH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #427 = VMRGHH
- { 428, 3, 1, 67, "VMRGHW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #428 = VMRGHW
- { 429, 3, 1, 67, "VMRGLB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #429 = VMRGLB
- { 430, 3, 1, 67, "VMRGLH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #430 = VMRGLH
- { 431, 3, 1, 67, "VMRGLW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #431 = VMRGLW
- { 432, 4, 1, 67, "VMSUMMBM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #432 = VMSUMMBM
- { 433, 4, 1, 67, "VMSUMSHM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #433 = VMSUMSHM
- { 434, 4, 1, 67, "VMSUMSHS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #434 = VMSUMSHS
- { 435, 4, 1, 67, "VMSUMUBM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #435 = VMSUMUBM
- { 436, 4, 1, 67, "VMSUMUHM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #436 = VMSUMUHM
- { 437, 4, 1, 67, "VMSUMUHS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #437 = VMSUMUHS
- { 438, 3, 1, 67, "VMULESB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #438 = VMULESB
- { 439, 3, 1, 67, "VMULESH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #439 = VMULESH
- { 440, 3, 1, 67, "VMULEUB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #440 = VMULEUB
- { 441, 3, 1, 67, "VMULEUH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #441 = VMULEUH
- { 442, 3, 1, 67, "VMULOSB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #442 = VMULOSB
- { 443, 3, 1, 67, "VMULOSH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #443 = VMULOSH
- { 444, 3, 1, 67, "VMULOUB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #444 = VMULOUB
- { 445, 3, 1, 67, "VMULOUH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #445 = VMULOUH
- { 446, 4, 1, 67, "VNMSUBFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #446 = VNMSUBFP
- { 447, 3, 1, 67, "VNOR", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #447 = VNOR
- { 448, 3, 1, 67, "VOR", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #448 = VOR
- { 449, 4, 1, 67, "VPERM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #449 = VPERM
- { 450, 3, 1, 67, "VPKPX", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #450 = VPKPX
- { 451, 3, 1, 67, "VPKSHSS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #451 = VPKSHSS
- { 452, 3, 1, 67, "VPKSHUS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #452 = VPKSHUS
- { 453, 3, 1, 67, "VPKSWSS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #453 = VPKSWSS
- { 454, 3, 1, 67, "VPKSWUS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #454 = VPKSWUS
- { 455, 3, 1, 67, "VPKUHUM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #455 = VPKUHUM
- { 456, 3, 1, 67, "VPKUHUS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #456 = VPKUHUS
- { 457, 3, 1, 67, "VPKUWUM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #457 = VPKUWUM
- { 458, 3, 1, 67, "VPKUWUS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #458 = VPKUWUS
- { 459, 2, 1, 67, "VREFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #459 = VREFP
- { 460, 2, 1, 67, "VRFIM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #460 = VRFIM
- { 461, 2, 1, 67, "VRFIN", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #461 = VRFIN
- { 462, 2, 1, 67, "VRFIP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #462 = VRFIP
- { 463, 2, 1, 67, "VRFIZ", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #463 = VRFIZ
- { 464, 3, 1, 67, "VRLB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #464 = VRLB
- { 465, 3, 1, 67, "VRLH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #465 = VRLH
- { 466, 3, 1, 67, "VRLW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #466 = VRLW
- { 467, 2, 1, 67, "VRSQRTEFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #467 = VRSQRTEFP
- { 468, 4, 1, 67, "VSEL", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo88 }, // Inst #468 = VSEL
- { 469, 3, 1, 67, "VSL", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #469 = VSL
- { 470, 3, 1, 67, "VSLB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #470 = VSLB
- { 471, 4, 1, 67, "VSLDOI", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo89 }, // Inst #471 = VSLDOI
- { 472, 3, 1, 67, "VSLH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #472 = VSLH
- { 473, 3, 1, 67, "VSLO", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #473 = VSLO
- { 474, 3, 1, 67, "VSLW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #474 = VSLW
- { 475, 3, 1, 71, "VSPLTB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo86 }, // Inst #475 = VSPLTB
- { 476, 3, 1, 71, "VSPLTH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo86 }, // Inst #476 = VSPLTH
- { 477, 2, 1, 71, "VSPLTISB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo90 }, // Inst #477 = VSPLTISB
- { 478, 2, 1, 71, "VSPLTISH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo90 }, // Inst #478 = VSPLTISH
- { 479, 2, 1, 71, "VSPLTISW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo90 }, // Inst #479 = VSPLTISW
- { 480, 3, 1, 71, "VSPLTW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo86 }, // Inst #480 = VSPLTW
- { 481, 3, 1, 67, "VSR", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #481 = VSR
- { 482, 3, 1, 67, "VSRAB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #482 = VSRAB
- { 483, 3, 1, 67, "VSRAH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #483 = VSRAH
- { 484, 3, 1, 67, "VSRAW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #484 = VSRAW
- { 485, 3, 1, 67, "VSRB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #485 = VSRB
- { 486, 3, 1, 67, "VSRH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #486 = VSRH
- { 487, 3, 1, 67, "VSRO", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #487 = VSRO
- { 488, 3, 1, 67, "VSRW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #488 = VSRW
- { 489, 3, 1, 67, "VSUBCUW", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #489 = VSUBCUW
- { 490, 3, 1, 70, "VSUBFP", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #490 = VSUBFP
- { 491, 3, 1, 67, "VSUBSBS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #491 = VSUBSBS
- { 492, 3, 1, 67, "VSUBSHS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #492 = VSUBSHS
- { 493, 3, 1, 67, "VSUBSWS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #493 = VSUBSWS
- { 494, 3, 1, 70, "VSUBUBM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #494 = VSUBUBM
- { 495, 3, 1, 67, "VSUBUBS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #495 = VSUBUBS
- { 496, 3, 1, 70, "VSUBUHM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #496 = VSUBUHM
- { 497, 3, 1, 67, "VSUBUHS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #497 = VSUBUHS
- { 498, 3, 1, 70, "VSUBUWM", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #498 = VSUBUWM
- { 499, 3, 1, 67, "VSUBUWS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #499 = VSUBUWS
- { 500, 3, 1, 67, "VSUM2SWS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #500 = VSUM2SWS
- { 501, 3, 1, 67, "VSUM4SBS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #501 = VSUM4SBS
- { 502, 3, 1, 67, "VSUM4SHS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #502 = VSUM4SHS
- { 503, 3, 1, 67, "VSUM4UBS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #503 = VSUM4UBS
- { 504, 3, 1, 67, "VSUMSWS", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #504 = VSUMSWS
- { 505, 2, 1, 67, "VUPKHPX", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #505 = VUPKHPX
- { 506, 2, 1, 67, "VUPKHSB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #506 = VUPKHSB
- { 507, 2, 1, 67, "VUPKHSH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #507 = VUPKHSH
- { 508, 2, 1, 67, "VUPKLPX", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #508 = VUPKLPX
- { 509, 2, 1, 67, "VUPKLSB", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #509 = VUPKLSB
- { 510, 2, 1, 67, "VUPKLSH", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo87 }, // Inst #510 = VUPKLSH
- { 511, 3, 1, 67, "VXOR", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo85 }, // Inst #511 = VXOR
- { 512, 1, 1, 67, "V_SET0", 0, 0|(5<<3), NULL, NULL, NULL, OperandInfo62 }, // Inst #512 = V_SET0
- { 513, 3, 1, 14, "XOR", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo2 }, // Inst #513 = XOR
- { 514, 3, 1, 14, "XOR8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo3 }, // Inst #514 = XOR8
- { 515, 3, 1, 14, "XORI", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo4 }, // Inst #515 = XORI
- { 516, 3, 1, 14, "XORI8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo5 }, // Inst #516 = XORI8
- { 517, 3, 1, 14, "XORIS", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo4 }, // Inst #517 = XORIS
- { 518, 3, 1, 14, "XORIS8", 0, 0|(1<<3), NULL, NULL, NULL, OperandInfo5 }, // Inst #518 = XORIS8
+ { 0, 0, 0, 52, "PHI", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #0 = PHI
+ { 1, 0, 0, 52, "INLINEASM", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #1 = INLINEASM
+ { 2, 1, 0, 52, "PROLOG_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #2 = PROLOG_LABEL
+ { 3, 1, 0, 52, "EH_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #3 = EH_LABEL
+ { 4, 1, 0, 52, "GC_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #4 = GC_LABEL
+ { 5, 0, 0, 52, "KILL", 0|(1<<TID::Variadic), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #5 = KILL
+ { 6, 3, 1, 52, "EXTRACT_SUBREG", 0, 0x0ULL, NULL, NULL, NULL, OperandInfo3 }, // Inst #6 = EXTRACT_SUBREG
+ { 7, 4, 1, 52, "INSERT_SUBREG", 0, 0x0ULL, NULL, NULL, NULL, OperandInfo4 }, // Inst #7 = INSERT_SUBREG
+ { 8, 1, 1, 52, "IMPLICIT_DEF", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #8 = IMPLICIT_DEF
+ { 9, 4, 1, 52, "SUBREG_TO_REG", 0, 0x0ULL, NULL, NULL, NULL, OperandInfo5 }, // Inst #9 = SUBREG_TO_REG
+ { 10, 3, 1, 52, "COPY_TO_REGCLASS", 0|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, OperandInfo3 }, // Inst #10 = COPY_TO_REGCLASS
+ { 11, 0, 0, 52, "DBG_VALUE", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #11 = DBG_VALUE
+ { 12, 1, 1, 52, "REG_SEQUENCE", 0|(1<<TID::Variadic)|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #12 = REG_SEQUENCE
+ { 13, 2, 1, 52, "COPY", 0|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, OperandInfo6 }, // Inst #13 = COPY
+ { 14, 3, 1, 14, "ADD4", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #14 = ADD4
+ { 15, 3, 1, 14, "ADD8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #15 = ADD8
+ { 16, 3, 1, 14, "ADDC", 0, 0xcULL, NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #16 = ADDC
+ { 17, 3, 1, 14, "ADDC8", 0, 0xcULL, NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #17 = ADDC8
+ { 18, 3, 1, 14, "ADDE", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #18 = ADDE
+ { 19, 3, 1, 14, "ADDE8", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #19 = ADDE8
+ { 20, 3, 1, 14, "ADDI", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #20 = ADDI
+ { 21, 3, 1, 14, "ADDI8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #21 = ADDI8
+ { 22, 3, 1, 14, "ADDIC", 0, 0xcULL, NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #22 = ADDIC
+ { 23, 3, 1, 14, "ADDIC8", 0, 0x8ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #23 = ADDIC8
+ { 24, 3, 1, 14, "ADDICo", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #24 = ADDICo
+ { 25, 3, 1, 14, "ADDIS", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #25 = ADDIS
+ { 26, 3, 1, 14, "ADDIS8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #26 = ADDIS8
+ { 27, 2, 1, 14, "ADDME", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #27 = ADDME
+ { 28, 2, 1, 14, "ADDME8", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #28 = ADDME8
+ { 29, 2, 1, 14, "ADDZE", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #29 = ADDZE
+ { 30, 2, 1, 14, "ADDZE8", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #30 = ADDZE8
+ { 31, 1, 0, 52, "ADJCALLSTACKDOWN", 0, 0x0ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo2 }, // Inst #31 = ADJCALLSTACKDOWN
+ { 32, 2, 0, 52, "ADJCALLSTACKUP", 0, 0x0ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo6 }, // Inst #32 = ADJCALLSTACKUP
+ { 33, 3, 1, 14, "AND", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #33 = AND
+ { 34, 3, 1, 14, "AND8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #34 = AND8
+ { 35, 3, 1, 14, "ANDC", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #35 = ANDC
+ { 36, 3, 1, 14, "ANDC8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #36 = ANDC8
+ { 37, 3, 1, 14, "ANDISo", 0, 0x8ULL, NULL, ImplicitList3, NULL, OperandInfo9 }, // Inst #37 = ANDISo
+ { 38, 3, 1, 14, "ANDISo8", 0, 0x8ULL, NULL, ImplicitList3, NULL, OperandInfo10 }, // Inst #38 = ANDISo8
+ { 39, 3, 1, 14, "ANDIo", 0, 0x8ULL, NULL, ImplicitList3, NULL, OperandInfo9 }, // Inst #39 = ANDIo
+ { 40, 3, 1, 14, "ANDIo8", 0, 0x8ULL, NULL, ImplicitList3, NULL, OperandInfo10 }, // Inst #40 = ANDIo8
+ { 41, 5, 1, 52, "ATOMIC_CMP_SWAP_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #41 = ATOMIC_CMP_SWAP_I16
+ { 42, 5, 1, 52, "ATOMIC_CMP_SWAP_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #42 = ATOMIC_CMP_SWAP_I32
+ { 43, 5, 1, 52, "ATOMIC_CMP_SWAP_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo14 }, // Inst #43 = ATOMIC_CMP_SWAP_I64
+ { 44, 5, 1, 52, "ATOMIC_CMP_SWAP_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo13 }, // Inst #44 = ATOMIC_CMP_SWAP_I8
+ { 45, 4, 1, 52, "ATOMIC_LOAD_ADD_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #45 = ATOMIC_LOAD_ADD_I16
+ { 46, 4, 1, 52, "ATOMIC_LOAD_ADD_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #46 = ATOMIC_LOAD_ADD_I32
+ { 47, 4, 1, 52, "ATOMIC_LOAD_ADD_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo16 }, // Inst #47 = ATOMIC_LOAD_ADD_I64
+ { 48, 4, 1, 52, "ATOMIC_LOAD_ADD_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #48 = ATOMIC_LOAD_ADD_I8
+ { 49, 4, 1, 52, "ATOMIC_LOAD_AND_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #49 = ATOMIC_LOAD_AND_I16
+ { 50, 4, 1, 52, "ATOMIC_LOAD_AND_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #50 = ATOMIC_LOAD_AND_I32
+ { 51, 4, 1, 52, "ATOMIC_LOAD_AND_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo16 }, // Inst #51 = ATOMIC_LOAD_AND_I64
+ { 52, 4, 1, 52, "ATOMIC_LOAD_AND_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #52 = ATOMIC_LOAD_AND_I8
+ { 53, 4, 1, 52, "ATOMIC_LOAD_NAND_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #53 = ATOMIC_LOAD_NAND_I16
+ { 54, 4, 1, 52, "ATOMIC_LOAD_NAND_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #54 = ATOMIC_LOAD_NAND_I32
+ { 55, 4, 1, 52, "ATOMIC_LOAD_NAND_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo16 }, // Inst #55 = ATOMIC_LOAD_NAND_I64
+ { 56, 4, 1, 52, "ATOMIC_LOAD_NAND_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #56 = ATOMIC_LOAD_NAND_I8
+ { 57, 4, 1, 52, "ATOMIC_LOAD_OR_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #57 = ATOMIC_LOAD_OR_I16
+ { 58, 4, 1, 52, "ATOMIC_LOAD_OR_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #58 = ATOMIC_LOAD_OR_I32
+ { 59, 4, 1, 52, "ATOMIC_LOAD_OR_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo16 }, // Inst #59 = ATOMIC_LOAD_OR_I64
+ { 60, 4, 1, 52, "ATOMIC_LOAD_OR_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #60 = ATOMIC_LOAD_OR_I8
+ { 61, 4, 1, 52, "ATOMIC_LOAD_SUB_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #61 = ATOMIC_LOAD_SUB_I16
+ { 62, 4, 1, 52, "ATOMIC_LOAD_SUB_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #62 = ATOMIC_LOAD_SUB_I32
+ { 63, 4, 1, 52, "ATOMIC_LOAD_SUB_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo16 }, // Inst #63 = ATOMIC_LOAD_SUB_I64
+ { 64, 4, 1, 52, "ATOMIC_LOAD_SUB_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #64 = ATOMIC_LOAD_SUB_I8
+ { 65, 4, 1, 52, "ATOMIC_LOAD_XOR_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #65 = ATOMIC_LOAD_XOR_I16
+ { 66, 4, 1, 52, "ATOMIC_LOAD_XOR_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #66 = ATOMIC_LOAD_XOR_I32
+ { 67, 4, 1, 52, "ATOMIC_LOAD_XOR_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo16 }, // Inst #67 = ATOMIC_LOAD_XOR_I64
+ { 68, 4, 1, 52, "ATOMIC_LOAD_XOR_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #68 = ATOMIC_LOAD_XOR_I8
+ { 69, 4, 1, 52, "ATOMIC_SWAP_I16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #69 = ATOMIC_SWAP_I16
+ { 70, 4, 1, 52, "ATOMIC_SWAP_I32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #70 = ATOMIC_SWAP_I32
+ { 71, 4, 1, 52, "ATOMIC_SWAP_I64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo16 }, // Inst #71 = ATOMIC_SWAP_I64
+ { 72, 4, 1, 52, "ATOMIC_SWAP_I8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList3, NULL, NULL, OperandInfo15 }, // Inst #72 = ATOMIC_SWAP_I8
+ { 73, 1, 0, 0, "B", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0x38ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #73 = B
+ { 74, 3, 0, 0, "BCC", 0|(1<<TID::Branch)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x38ULL, NULL, NULL, NULL, OperandInfo17 }, // Inst #74 = BCC
+ { 75, 0, 0, 0, "BCTR", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList4, NULL, NULL, 0 }, // Inst #75 = BCTR
+ { 76, 0, 0, 0, "BCTRL8_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic), 0x38ULL, ImplicitList5, ImplicitList6, Barriers2, 0 }, // Inst #76 = BCTRL8_Darwin
+ { 77, 0, 0, 0, "BCTRL8_ELF", 0|(1<<TID::Call)|(1<<TID::Variadic), 0x38ULL, ImplicitList5, ImplicitList6, Barriers2, 0 }, // Inst #77 = BCTRL8_ELF
+ { 78, 0, 0, 0, "BCTRL_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic), 0x38ULL, ImplicitList7, ImplicitList8, Barriers3, 0 }, // Inst #78 = BCTRL_Darwin
+ { 79, 0, 0, 0, "BCTRL_SVR4", 0|(1<<TID::Call)|(1<<TID::Variadic), 0x38ULL, ImplicitList7, ImplicitList9, Barriers3, 0 }, // Inst #79 = BCTRL_SVR4
+ { 80, 1, 0, 0, "BL8_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList10, ImplicitList6, Barriers2, OperandInfo2 }, // Inst #80 = BL8_Darwin
+ { 81, 1, 0, 0, "BL8_ELF", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList10, ImplicitList6, Barriers2, OperandInfo2 }, // Inst #81 = BL8_ELF
+ { 82, 1, 0, 0, "BLA8_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic), 0x38ULL, ImplicitList10, ImplicitList6, Barriers2, OperandInfo2 }, // Inst #82 = BLA8_Darwin
+ { 83, 1, 0, 0, "BLA8_ELF", 0|(1<<TID::Call)|(1<<TID::Variadic), 0x38ULL, ImplicitList10, ImplicitList6, Barriers2, OperandInfo2 }, // Inst #83 = BLA8_ELF
+ { 84, 1, 0, 0, "BLA_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic), 0x38ULL, ImplicitList10, ImplicitList8, Barriers3, OperandInfo2 }, // Inst #84 = BLA_Darwin
+ { 85, 1, 0, 0, "BLA_SVR4", 0|(1<<TID::Call)|(1<<TID::Variadic), 0x38ULL, ImplicitList10, ImplicitList9, Barriers3, OperandInfo2 }, // Inst #85 = BLA_SVR4
+ { 86, 2, 0, 0, "BLR", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Predicable)|(1<<TID::Terminator)|(1<<TID::Variadic), 0x38ULL, ImplicitList11, NULL, NULL, OperandInfo18 }, // Inst #86 = BLR
+ { 87, 1, 0, 0, "BL_Darwin", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList10, ImplicitList8, Barriers3, OperandInfo2 }, // Inst #87 = BL_Darwin
+ { 88, 1, 0, 0, "BL_SVR4", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList10, ImplicitList9, Barriers3, OperandInfo2 }, // Inst #88 = BL_SVR4
+ { 89, 3, 1, 11, "CMPD", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo19 }, // Inst #89 = CMPD
+ { 90, 3, 1, 11, "CMPDI", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo20 }, // Inst #90 = CMPDI
+ { 91, 3, 1, 11, "CMPLD", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo19 }, // Inst #91 = CMPLD
+ { 92, 3, 1, 11, "CMPLDI", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo20 }, // Inst #92 = CMPLDI
+ { 93, 3, 1, 11, "CMPLW", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo21 }, // Inst #93 = CMPLW
+ { 94, 3, 1, 11, "CMPLWI", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo22 }, // Inst #94 = CMPLWI
+ { 95, 3, 1, 11, "CMPW", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo21 }, // Inst #95 = CMPW
+ { 96, 3, 1, 11, "CMPWI", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo22 }, // Inst #96 = CMPWI
+ { 97, 2, 1, 14, "CNTLZD", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo12 }, // Inst #97 = CNTLZD
+ { 98, 2, 1, 14, "CNTLZW", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo11 }, // Inst #98 = CNTLZW
+ { 99, 3, 1, 1, "CREQV", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo23 }, // Inst #99 = CREQV
+ { 100, 3, 1, 1, "CROR", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo23 }, // Inst #100 = CROR
+ { 101, 1, 1, 1, "CRSET", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo24 }, // Inst #101 = CRSET
+ { 102, 2, 0, 30, "DCBA", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo25 }, // Inst #102 = DCBA
+ { 103, 2, 0, 30, "DCBF", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo25 }, // Inst #103 = DCBF
+ { 104, 2, 0, 30, "DCBI", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo25 }, // Inst #104 = DCBI
+ { 105, 2, 0, 30, "DCBST", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo25 }, // Inst #105 = DCBST
+ { 106, 2, 0, 30, "DCBT", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo25 }, // Inst #106 = DCBT
+ { 107, 2, 0, 30, "DCBTST", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo25 }, // Inst #107 = DCBTST
+ { 108, 2, 0, 30, "DCBZ", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo25 }, // Inst #108 = DCBZ
+ { 109, 2, 0, 30, "DCBZL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo25 }, // Inst #109 = DCBZL
+ { 110, 3, 1, 12, "DIVD", 0, 0xdULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #110 = DIVD
+ { 111, 3, 1, 12, "DIVDU", 0, 0xdULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #111 = DIVDU
+ { 112, 3, 1, 13, "DIVW", 0, 0xdULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #112 = DIVW
+ { 113, 3, 1, 13, "DIVWU", 0, 0xdULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #113 = DIVWU
+ { 114, 4, 0, 33, "DSS", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo5 }, // Inst #114 = DSS
+ { 115, 4, 0, 33, "DSSALL", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo5 }, // Inst #115 = DSSALL
+ { 116, 4, 0, 33, "DST", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo26 }, // Inst #116 = DST
+ { 117, 4, 0, 33, "DST64", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo27 }, // Inst #117 = DST64
+ { 118, 4, 0, 33, "DSTST", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo26 }, // Inst #118 = DSTST
+ { 119, 4, 0, 33, "DSTST64", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo27 }, // Inst #119 = DSTST64
+ { 120, 4, 0, 33, "DSTSTT", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo26 }, // Inst #120 = DSTSTT
+ { 121, 4, 0, 33, "DSTSTT64", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo27 }, // Inst #121 = DSTSTT64
+ { 122, 4, 0, 33, "DSTT", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo26 }, // Inst #122 = DSTT
+ { 123, 4, 0, 33, "DSTT64", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo27 }, // Inst #123 = DSTT64
+ { 124, 4, 1, 52, "DYNALLOC", 0, 0x0ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo28 }, // Inst #124 = DYNALLOC
+ { 125, 4, 1, 52, "DYNALLOC8", 0, 0x0ULL, ImplicitList12, ImplicitList12, NULL, OperandInfo29 }, // Inst #125 = DYNALLOC8
+ { 126, 3, 1, 14, "EQV", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #126 = EQV
+ { 127, 3, 1, 14, "EQV8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #127 = EQV8
+ { 128, 2, 1, 14, "EXTSB", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo11 }, // Inst #128 = EXTSB
+ { 129, 2, 1, 14, "EXTSB8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo12 }, // Inst #129 = EXTSB8
+ { 130, 2, 1, 14, "EXTSH", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo11 }, // Inst #130 = EXTSH
+ { 131, 2, 1, 14, "EXTSH8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo12 }, // Inst #131 = EXTSH8
+ { 132, 2, 1, 14, "EXTSW", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo12 }, // Inst #132 = EXTSW
+ { 133, 2, 1, 14, "EXTSW_32", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo11 }, // Inst #133 = EXTSW_32
+ { 134, 2, 1, 14, "EXTSW_32_64", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #134 = EXTSW_32_64
+ { 135, 2, 1, 8, "FABSD", 0, 0x18ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #135 = FABSD
+ { 136, 2, 1, 8, "FABSS", 0, 0x18ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #136 = FABSS
+ { 137, 3, 1, 8, "FADD", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo33 }, // Inst #137 = FADD
+ { 138, 3, 1, 8, "FADDS", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo34 }, // Inst #138 = FADDS
+ { 139, 3, 1, 8, "FADDrtz", 0, 0x1aULL, ImplicitList10, NULL, NULL, OperandInfo33 }, // Inst #139 = FADDrtz
+ { 140, 2, 1, 8, "FCFID", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo31 }, // Inst #140 = FCFID
+ { 141, 3, 1, 4, "FCMPUD", 0|(1<<TID::UnmodeledSideEffects), 0x18ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #141 = FCMPUD
+ { 142, 3, 1, 4, "FCMPUS", 0|(1<<TID::UnmodeledSideEffects), 0x18ULL, NULL, NULL, NULL, OperandInfo36 }, // Inst #142 = FCMPUS
+ { 143, 2, 1, 8, "FCTIDZ", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo31 }, // Inst #143 = FCTIDZ
+ { 144, 2, 1, 8, "FCTIWZ", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo31 }, // Inst #144 = FCTIWZ
+ { 145, 3, 1, 5, "FDIV", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo33 }, // Inst #145 = FDIV
+ { 146, 3, 1, 6, "FDIVS", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo34 }, // Inst #146 = FDIVS
+ { 147, 4, 1, 7, "FMADD", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo37 }, // Inst #147 = FMADD
+ { 148, 4, 1, 8, "FMADDS", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo38 }, // Inst #148 = FMADDS
+ { 149, 2, 1, 8, "FMR", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #149 = FMR
+ { 150, 4, 1, 7, "FMSUB", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo37 }, // Inst #150 = FMSUB
+ { 151, 4, 1, 8, "FMSUBS", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo38 }, // Inst #151 = FMSUBS
+ { 152, 3, 1, 7, "FMUL", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo33 }, // Inst #152 = FMUL
+ { 153, 3, 1, 8, "FMULS", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo34 }, // Inst #153 = FMULS
+ { 154, 2, 1, 8, "FNABSD", 0, 0x18ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #154 = FNABSD
+ { 155, 2, 1, 8, "FNABSS", 0, 0x18ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #155 = FNABSS
+ { 156, 2, 1, 8, "FNEGD", 0, 0x18ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #156 = FNEGD
+ { 157, 2, 1, 8, "FNEGS", 0, 0x18ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #157 = FNEGS
+ { 158, 4, 1, 7, "FNMADD", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo37 }, // Inst #158 = FNMADD
+ { 159, 4, 1, 8, "FNMADDS", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo38 }, // Inst #159 = FNMADDS
+ { 160, 4, 1, 7, "FNMSUB", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo37 }, // Inst #160 = FNMSUB
+ { 161, 4, 1, 8, "FNMSUBS", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo38 }, // Inst #161 = FNMSUBS
+ { 162, 2, 1, 8, "FRSP", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo39 }, // Inst #162 = FRSP
+ { 163, 4, 1, 8, "FSELD", 0, 0x18ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #163 = FSELD
+ { 164, 4, 1, 8, "FSELS", 0, 0x18ULL, NULL, NULL, NULL, OperandInfo40 }, // Inst #164 = FSELS
+ { 165, 2, 1, 10, "FSQRT", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo31 }, // Inst #165 = FSQRT
+ { 166, 2, 1, 10, "FSQRTS", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo32 }, // Inst #166 = FSQRTS
+ { 167, 3, 1, 8, "FSUB", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo33 }, // Inst #167 = FSUB
+ { 168, 3, 1, 8, "FSUBS", 0, 0x18ULL, ImplicitList10, NULL, NULL, OperandInfo34 }, // Inst #168 = FSUBS
+ { 169, 3, 1, 14, "LA", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #169 = LA
+ { 170, 3, 1, 33, "LBZ", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #170 = LBZ
+ { 171, 3, 1, 33, "LBZ8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #171 = LBZ8
+ { 172, 4, 2, 33, "LBZU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #172 = LBZU
+ { 173, 4, 2, 33, "LBZU8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #173 = LBZU8
+ { 174, 3, 1, 33, "LBZX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #174 = LBZX
+ { 175, 3, 1, 33, "LBZX8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #175 = LBZX8
+ { 176, 3, 1, 35, "LD", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #176 = LD
+ { 177, 3, 1, 36, "LDARX", 0|(1<<TID::MayLoad), 0x0ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #177 = LDARX
+ { 178, 4, 2, 35, "LDU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #178 = LDU
+ { 179, 3, 1, 35, "LDX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #179 = LDX
+ { 180, 1, 0, 35, "LDinto_toc", 0|(1<<TID::FoldableAsLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo47 }, // Inst #180 = LDinto_toc
+ { 181, 3, 1, 35, "LDtoc", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo48 }, // Inst #181 = LDtoc
+ { 182, 0, 0, 35, "LDtoc_restore", 0|(1<<TID::FoldableAsLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, 0 }, // Inst #182 = LDtoc_restore
+ { 183, 3, 1, 37, "LFD", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo49 }, // Inst #183 = LFD
+ { 184, 4, 2, 37, "LFDU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo50 }, // Inst #184 = LFDU
+ { 185, 3, 1, 38, "LFDX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #185 = LFDX
+ { 186, 3, 1, 38, "LFS", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo52 }, // Inst #186 = LFS
+ { 187, 4, 2, 38, "LFSU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #187 = LFSU
+ { 188, 3, 1, 38, "LFSX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #188 = LFSX
+ { 189, 3, 1, 39, "LHA", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x14ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #189 = LHA
+ { 190, 3, 1, 39, "LHA8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x14ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #190 = LHA8
+ { 191, 4, 2, 33, "LHAU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #191 = LHAU
+ { 192, 4, 2, 33, "LHAU8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #192 = LHAU8
+ { 193, 3, 1, 39, "LHAX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x14ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #193 = LHAX
+ { 194, 3, 1, 39, "LHAX8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x14ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #194 = LHAX8
+ { 195, 3, 1, 33, "LHBRX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #195 = LHBRX
+ { 196, 3, 1, 33, "LHZ", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #196 = LHZ
+ { 197, 3, 1, 33, "LHZ8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #197 = LHZ8
+ { 198, 4, 2, 33, "LHZU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #198 = LHZU
+ { 199, 4, 2, 33, "LHZU8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #199 = LHZU8
+ { 200, 3, 1, 33, "LHZX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #200 = LHZX
+ { 201, 3, 1, 33, "LHZX8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #201 = LHZX8
+ { 202, 2, 1, 14, "LI", 0|(1<<TID::Rematerializable), 0x8ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #202 = LI
+ { 203, 2, 1, 14, "LI8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo56 }, // Inst #203 = LI8
+ { 204, 2, 1, 14, "LIS", 0|(1<<TID::Rematerializable), 0x8ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #204 = LIS
+ { 205, 2, 1, 14, "LIS8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo56 }, // Inst #205 = LIS8
+ { 206, 3, 1, 33, "LVEBX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #206 = LVEBX
+ { 207, 3, 1, 33, "LVEHX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #207 = LVEHX
+ { 208, 3, 1, 33, "LVEWX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #208 = LVEWX
+ { 209, 3, 1, 33, "LVSL", 0, 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #209 = LVSL
+ { 210, 3, 1, 33, "LVSR", 0, 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #210 = LVSR
+ { 211, 3, 1, 33, "LVX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #211 = LVX
+ { 212, 3, 1, 33, "LVXL", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #212 = LVXL
+ { 213, 3, 1, 42, "LWA", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x14ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #213 = LWA
+ { 214, 3, 1, 43, "LWARX", 0|(1<<TID::MayLoad), 0x0ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #214 = LWARX
+ { 215, 3, 1, 39, "LWAX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x14ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #215 = LWAX
+ { 216, 3, 1, 33, "LWBRX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #216 = LWBRX
+ { 217, 3, 1, 33, "LWZ", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #217 = LWZ
+ { 218, 3, 1, 33, "LWZ8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #218 = LWZ8
+ { 219, 4, 2, 33, "LWZU", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #219 = LWZU
+ { 220, 4, 2, 33, "LWZU8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #220 = LWZU8
+ { 221, 3, 1, 33, "LWZX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #221 = LWZX
+ { 222, 3, 1, 33, "LWZX8", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #222 = LWZX8
+ { 223, 2, 1, 2, "MCRF", 0|(1<<TID::UnmodeledSideEffects), 0x21ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #223 = MCRF
+ { 224, 2, 1, 54, "MFCRpseud", 0|(1<<TID::UnmodeledSideEffects), 0x20ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #224 = MFCRpseud
+ { 225, 1, 1, 56, "MFCTR", 0|(1<<TID::UnmodeledSideEffects), 0x9ULL, ImplicitList4, NULL, NULL, OperandInfo59 }, // Inst #225 = MFCTR
+ { 226, 1, 1, 56, "MFCTR8", 0|(1<<TID::UnmodeledSideEffects), 0x9ULL, ImplicitList13, NULL, NULL, OperandInfo47 }, // Inst #226 = MFCTR8
+ { 227, 1, 1, 15, "MFFS", 0, 0x1aULL, ImplicitList10, NULL, NULL, OperandInfo60 }, // Inst #227 = MFFS
+ { 228, 1, 1, 56, "MFLR", 0|(1<<TID::UnmodeledSideEffects), 0x9ULL, ImplicitList14, NULL, NULL, OperandInfo59 }, // Inst #228 = MFLR
+ { 229, 1, 1, 56, "MFLR8", 0|(1<<TID::UnmodeledSideEffects), 0x9ULL, ImplicitList15, NULL, NULL, OperandInfo47 }, // Inst #229 = MFLR8
+ { 230, 2, 1, 54, "MFOCRF", 0|(1<<TID::UnmodeledSideEffects), 0x21ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #230 = MFOCRF
+ { 231, 1, 1, 14, "MFVRSAVE", 0|(1<<TID::UnmodeledSideEffects), 0x9ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #231 = MFVRSAVE
+ { 232, 1, 1, 33, "MFVSCR", 0|(1<<TID::MayLoad), 0x0ULL, NULL, NULL, NULL, OperandInfo61 }, // Inst #232 = MFVSCR
+ { 233, 2, 0, 3, "MTCRF", 0|(1<<TID::UnmodeledSideEffects), 0x20ULL, NULL, NULL, NULL, OperandInfo62 }, // Inst #233 = MTCRF
+ { 234, 1, 0, 60, "MTCTR", 0, 0x9ULL, NULL, ImplicitList4, Barriers4, OperandInfo59 }, // Inst #234 = MTCTR
+ { 235, 1, 0, 60, "MTCTR8", 0, 0x9ULL, NULL, ImplicitList13, Barriers5, OperandInfo47 }, // Inst #235 = MTCTR8
+ { 236, 1, 0, 17, "MTFSB0", 0, 0x1aULL, ImplicitList10, ImplicitList10, NULL, OperandInfo2 }, // Inst #236 = MTFSB0
+ { 237, 1, 0, 17, "MTFSB1", 0, 0x1aULL, ImplicitList10, ImplicitList10, NULL, OperandInfo2 }, // Inst #237 = MTFSB1
+ { 238, 4, 1, 17, "MTFSF", 0, 0x1aULL, ImplicitList10, ImplicitList10, NULL, OperandInfo63 }, // Inst #238 = MTFSF
+ { 239, 1, 0, 60, "MTLR", 0|(1<<TID::UnmodeledSideEffects), 0x9ULL, NULL, ImplicitList14, NULL, OperandInfo59 }, // Inst #239 = MTLR
+ { 240, 1, 0, 60, "MTLR8", 0|(1<<TID::UnmodeledSideEffects), 0x9ULL, NULL, ImplicitList15, NULL, OperandInfo47 }, // Inst #240 = MTLR8
+ { 241, 1, 0, 14, "MTVRSAVE", 0|(1<<TID::UnmodeledSideEffects), 0xaULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #241 = MTVRSAVE
+ { 242, 1, 0, 33, "MTVSCR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo61 }, // Inst #242 = MTVSCR
+ { 243, 3, 1, 20, "MULHD", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #243 = MULHD
+ { 244, 3, 1, 21, "MULHDU", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #244 = MULHDU
+ { 245, 3, 1, 20, "MULHW", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #245 = MULHW
+ { 246, 3, 1, 21, "MULHWU", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #246 = MULHWU
+ { 247, 3, 1, 19, "MULLD", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #247 = MULLD
+ { 248, 3, 1, 22, "MULLI", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #248 = MULLI
+ { 249, 3, 1, 20, "MULLW", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #249 = MULLW
+ { 250, 1, 0, 52, "MovePCtoLR", 0|(1<<TID::UnmodeledSideEffects), 0x38ULL, NULL, ImplicitList14, NULL, OperandInfo2 }, // Inst #250 = MovePCtoLR
+ { 251, 1, 0, 52, "MovePCtoLR8", 0|(1<<TID::UnmodeledSideEffects), 0x38ULL, NULL, ImplicitList15, NULL, OperandInfo2 }, // Inst #251 = MovePCtoLR8
+ { 252, 3, 1, 14, "NAND", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #252 = NAND
+ { 253, 3, 1, 14, "NAND8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #253 = NAND8
+ { 254, 2, 1, 14, "NEG", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo11 }, // Inst #254 = NEG
+ { 255, 2, 1, 14, "NEG8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo12 }, // Inst #255 = NEG8
+ { 256, 0, 0, 14, "NOP", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, 0 }, // Inst #256 = NOP
+ { 257, 3, 1, 14, "NOR", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #257 = NOR
+ { 258, 3, 1, 14, "NOR8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #258 = NOR8
+ { 259, 3, 1, 14, "OR", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #259 = OR
+ { 260, 3, 1, 14, "OR4To8", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo64 }, // Inst #260 = OR4To8
+ { 261, 3, 1, 14, "OR8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #261 = OR8
+ { 262, 3, 1, 14, "OR8To4", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo65 }, // Inst #262 = OR8To4
+ { 263, 3, 1, 14, "ORC", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #263 = ORC
+ { 264, 3, 1, 14, "ORC8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #264 = ORC8
+ { 265, 3, 1, 14, "ORI", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #265 = ORI
+ { 266, 3, 1, 14, "ORI8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #266 = ORI8
+ { 267, 3, 1, 14, "ORIS", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #267 = ORIS
+ { 268, 3, 1, 14, "ORIS8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #268 = ORIS8
+ { 269, 4, 1, 25, "RLDCL", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo66 }, // Inst #269 = RLDCL
+ { 270, 4, 1, 25, "RLDICL", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo67 }, // Inst #270 = RLDICL
+ { 271, 4, 1, 25, "RLDICR", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo67 }, // Inst #271 = RLDICR
+ { 272, 5, 1, 25, "RLDIMI", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo68 }, // Inst #272 = RLDIMI
+ { 273, 6, 1, 24, "RLWIMI", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0xcULL, NULL, NULL, NULL, OperandInfo69 }, // Inst #273 = RLWIMI
+ { 274, 5, 1, 14, "RLWINM", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo70 }, // Inst #274 = RLWINM
+ { 275, 5, 1, 14, "RLWINMo", 0|(1<<TID::UnmodeledSideEffects), 0xcULL, NULL, ImplicitList3, NULL, OperandInfo70 }, // Inst #275 = RLWINMo
+ { 276, 5, 1, 14, "RLWNM", 0|(1<<TID::UnmodeledSideEffects), 0x8ULL, NULL, NULL, NULL, OperandInfo71 }, // Inst #276 = RLWNM
+ { 277, 5, 1, 52, "SELECT_CC_F4", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo72 }, // Inst #277 = SELECT_CC_F4
+ { 278, 5, 1, 52, "SELECT_CC_F8", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo73 }, // Inst #278 = SELECT_CC_F8
+ { 279, 5, 1, 52, "SELECT_CC_I4", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo74 }, // Inst #279 = SELECT_CC_I4
+ { 280, 5, 1, 52, "SELECT_CC_I8", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo75 }, // Inst #280 = SELECT_CC_I8
+ { 281, 5, 1, 52, "SELECT_CC_VRRC", 0|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x2ULL, NULL, NULL, NULL, OperandInfo76 }, // Inst #281 = SELECT_CC_VRRC
+ { 282, 3, 1, 25, "SLD", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo77 }, // Inst #282 = SLD
+ { 283, 3, 1, 14, "SLW", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #283 = SLW
+ { 284, 3, 0, 52, "SPILL_CR", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #284 = SPILL_CR
+ { 285, 3, 1, 25, "SRAD", 0, 0x8ULL, NULL, ImplicitList1, Barriers1, OperandInfo77 }, // Inst #285 = SRAD
+ { 286, 3, 1, 25, "SRADI", 0, 0x8ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #286 = SRADI
+ { 287, 3, 1, 26, "SRAW", 0, 0x8ULL, NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #287 = SRAW
+ { 288, 3, 1, 26, "SRAWI", 0, 0x8ULL, NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #288 = SRAWI
+ { 289, 3, 1, 25, "SRD", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo77 }, // Inst #289 = SRD
+ { 290, 3, 1, 14, "SRW", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #290 = SRW
+ { 291, 3, 0, 33, "STB", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #291 = STB
+ { 292, 3, 0, 33, "STB8", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #292 = STB8
+ { 293, 4, 1, 33, "STBU", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo78 }, // Inst #293 = STBU
+ { 294, 4, 1, 33, "STBU8", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo79 }, // Inst #294 = STBU8
+ { 295, 3, 0, 33, "STBX", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #295 = STBX
+ { 296, 3, 0, 33, "STBX8", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #296 = STBX8
+ { 297, 3, 0, 46, "STD", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #297 = STD
+ { 298, 3, 0, 47, "STDCX", 0|(1<<TID::MayStore), 0x0ULL, NULL, ImplicitList3, NULL, OperandInfo46 }, // Inst #298 = STDCX
+ { 299, 4, 1, 46, "STDU", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo79 }, // Inst #299 = STDU
+ { 300, 3, 0, 46, "STDUX", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #300 = STDUX
+ { 301, 3, 0, 46, "STDX", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #301 = STDX
+ { 302, 3, 0, 46, "STDX_32", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #302 = STDX_32
+ { 303, 3, 0, 46, "STD_32", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #303 = STD_32
+ { 304, 3, 0, 51, "STFD", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo49 }, // Inst #304 = STFD
+ { 305, 4, 1, 33, "STFDU", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #305 = STFDU
+ { 306, 3, 0, 51, "STFDX", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #306 = STFDX
+ { 307, 3, 0, 51, "STFIWX", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #307 = STFIWX
+ { 308, 3, 0, 51, "STFS", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo52 }, // Inst #308 = STFS
+ { 309, 4, 1, 33, "STFSU", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo81 }, // Inst #309 = STFSU
+ { 310, 3, 0, 51, "STFSX", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #310 = STFSX
+ { 311, 3, 0, 33, "STH", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #311 = STH
+ { 312, 3, 0, 33, "STH8", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #312 = STH8
+ { 313, 3, 0, 33, "STHBRX", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #313 = STHBRX
+ { 314, 4, 1, 33, "STHU", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo78 }, // Inst #314 = STHU
+ { 315, 4, 1, 33, "STHU8", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo79 }, // Inst #315 = STHU8
+ { 316, 3, 0, 33, "STHX", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #316 = STHX
+ { 317, 3, 0, 33, "STHX8", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #317 = STHX8
+ { 318, 3, 0, 33, "STVEBX", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #318 = STVEBX
+ { 319, 3, 0, 33, "STVEHX", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #319 = STVEHX
+ { 320, 3, 0, 33, "STVEWX", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #320 = STVEWX
+ { 321, 3, 0, 33, "STVX", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #321 = STVX
+ { 322, 3, 0, 33, "STVXL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #322 = STVXL
+ { 323, 3, 0, 33, "STW", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #323 = STW
+ { 324, 3, 0, 33, "STW8", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #324 = STW8
+ { 325, 3, 0, 33, "STWBRX", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #325 = STWBRX
+ { 326, 3, 0, 49, "STWCX", 0|(1<<TID::MayStore), 0x0ULL, NULL, ImplicitList3, NULL, OperandInfo45 }, // Inst #326 = STWCX
+ { 327, 4, 1, 33, "STWU", 0|(1<<TID::MayStore), 0x10ULL, NULL, NULL, NULL, OperandInfo78 }, // Inst #327 = STWU
+ { 328, 3, 0, 33, "STWUX", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x10ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #328 = STWUX
+ { 329, 3, 0, 33, "STWX", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #329 = STWX
+ { 330, 3, 0, 33, "STWX8", 0|(1<<TID::MayStore), 0x14ULL, NULL, NULL, NULL, OperandInfo46 }, // Inst #330 = STWX8
+ { 331, 3, 1, 14, "SUBF", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #331 = SUBF
+ { 332, 3, 1, 14, "SUBF8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #332 = SUBF8
+ { 333, 3, 1, 14, "SUBFC", 0, 0xcULL, NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #333 = SUBFC
+ { 334, 3, 1, 14, "SUBFC8", 0, 0xcULL, NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #334 = SUBFC8
+ { 335, 3, 1, 14, "SUBFE", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #335 = SUBFE
+ { 336, 3, 1, 14, "SUBFE8", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #336 = SUBFE8
+ { 337, 3, 1, 14, "SUBFIC", 0, 0x8ULL, NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #337 = SUBFIC
+ { 338, 3, 1, 14, "SUBFIC8", 0, 0x8ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #338 = SUBFIC8
+ { 339, 2, 1, 14, "SUBFME", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #339 = SUBFME
+ { 340, 2, 1, 14, "SUBFME8", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #340 = SUBFME8
+ { 341, 2, 1, 14, "SUBFZE", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #341 = SUBFZE
+ { 342, 2, 1, 14, "SUBFZE8", 0, 0x8ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #342 = SUBFZE8
+ { 343, 0, 0, 50, "SYNC", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #343 = SYNC
+ { 344, 1, 0, 0, "TAILB", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList10, NULL, NULL, OperandInfo2 }, // Inst #344 = TAILB
+ { 345, 1, 0, 0, "TAILB8", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList10, NULL, NULL, OperandInfo2 }, // Inst #345 = TAILB8
+ { 346, 1, 0, 0, "TAILBA", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList10, NULL, NULL, OperandInfo2 }, // Inst #346 = TAILBA
+ { 347, 1, 0, 0, "TAILBA8", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList10, NULL, NULL, OperandInfo2 }, // Inst #347 = TAILBA8
+ { 348, 0, 0, 0, "TAILBCTR", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList7, NULL, NULL, 0 }, // Inst #348 = TAILBCTR
+ { 349, 0, 0, 0, "TAILBCTR8", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x38ULL, ImplicitList7, NULL, NULL, 0 }, // Inst #349 = TAILBCTR8
+ { 350, 2, 0, 52, "TCRETURNai", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic), 0x0ULL, ImplicitList10, NULL, NULL, OperandInfo6 }, // Inst #350 = TCRETURNai
+ { 351, 2, 0, 52, "TCRETURNai8", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic), 0x0ULL, ImplicitList10, NULL, NULL, OperandInfo6 }, // Inst #351 = TCRETURNai8
+ { 352, 2, 0, 52, "TCRETURNdi", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList10, NULL, NULL, OperandInfo6 }, // Inst #352 = TCRETURNdi
+ { 353, 2, 0, 52, "TCRETURNdi8", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList10, NULL, NULL, OperandInfo6 }, // Inst #353 = TCRETURNdi8
+ { 354, 2, 0, 52, "TCRETURNri", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList10, NULL, NULL, OperandInfo82 }, // Inst #354 = TCRETURNri
+ { 355, 2, 0, 52, "TCRETURNri8", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList10, NULL, NULL, OperandInfo83 }, // Inst #355 = TCRETURNri8
+ { 356, 0, 0, 33, "TRAP", 0|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #356 = TRAP
+ { 357, 2, 1, 52, "UPDATE_VRSAVE", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo11 }, // Inst #357 = UPDATE_VRSAVE
+ { 358, 3, 1, 67, "VADDCUW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #358 = VADDCUW
+ { 359, 3, 1, 67, "VADDFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #359 = VADDFP
+ { 360, 3, 1, 67, "VADDSBS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #360 = VADDSBS
+ { 361, 3, 1, 67, "VADDSHS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #361 = VADDSHS
+ { 362, 3, 1, 67, "VADDSWS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #362 = VADDSWS
+ { 363, 3, 1, 70, "VADDUBM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #363 = VADDUBM
+ { 364, 3, 1, 67, "VADDUBS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #364 = VADDUBS
+ { 365, 3, 1, 70, "VADDUHM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #365 = VADDUHM
+ { 366, 3, 1, 67, "VADDUHS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #366 = VADDUHS
+ { 367, 3, 1, 70, "VADDUWM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #367 = VADDUWM
+ { 368, 3, 1, 67, "VADDUWS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #368 = VADDUWS
+ { 369, 3, 1, 67, "VAND", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #369 = VAND
+ { 370, 3, 1, 67, "VANDC", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #370 = VANDC
+ { 371, 3, 1, 67, "VAVGSB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #371 = VAVGSB
+ { 372, 3, 1, 67, "VAVGSH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #372 = VAVGSH
+ { 373, 3, 1, 67, "VAVGSW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #373 = VAVGSW
+ { 374, 3, 1, 67, "VAVGUB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #374 = VAVGUB
+ { 375, 3, 1, 67, "VAVGUH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #375 = VAVGUH
+ { 376, 3, 1, 67, "VAVGUW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #376 = VAVGUW
+ { 377, 3, 1, 67, "VCFSX", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo85 }, // Inst #377 = VCFSX
+ { 378, 3, 1, 67, "VCFUX", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo85 }, // Inst #378 = VCFUX
+ { 379, 3, 1, 68, "VCMPBFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #379 = VCMPBFP
+ { 380, 3, 1, 68, "VCMPBFPo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #380 = VCMPBFPo
+ { 381, 3, 1, 68, "VCMPEQFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #381 = VCMPEQFP
+ { 382, 3, 1, 68, "VCMPEQFPo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #382 = VCMPEQFPo
+ { 383, 3, 1, 68, "VCMPEQUB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #383 = VCMPEQUB
+ { 384, 3, 1, 68, "VCMPEQUBo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #384 = VCMPEQUBo
+ { 385, 3, 1, 68, "VCMPEQUH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #385 = VCMPEQUH
+ { 386, 3, 1, 68, "VCMPEQUHo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #386 = VCMPEQUHo
+ { 387, 3, 1, 68, "VCMPEQUW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #387 = VCMPEQUW
+ { 388, 3, 1, 68, "VCMPEQUWo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #388 = VCMPEQUWo
+ { 389, 3, 1, 68, "VCMPGEFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #389 = VCMPGEFP
+ { 390, 3, 1, 68, "VCMPGEFPo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #390 = VCMPGEFPo
+ { 391, 3, 1, 68, "VCMPGTFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #391 = VCMPGTFP
+ { 392, 3, 1, 68, "VCMPGTFPo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #392 = VCMPGTFPo
+ { 393, 3, 1, 68, "VCMPGTSB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #393 = VCMPGTSB
+ { 394, 3, 1, 68, "VCMPGTSBo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #394 = VCMPGTSBo
+ { 395, 3, 1, 68, "VCMPGTSH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #395 = VCMPGTSH
+ { 396, 3, 1, 68, "VCMPGTSHo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #396 = VCMPGTSHo
+ { 397, 3, 1, 68, "VCMPGTSW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #397 = VCMPGTSW
+ { 398, 3, 1, 68, "VCMPGTSWo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #398 = VCMPGTSWo
+ { 399, 3, 1, 68, "VCMPGTUB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #399 = VCMPGTUB
+ { 400, 3, 1, 68, "VCMPGTUBo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #400 = VCMPGTUBo
+ { 401, 3, 1, 68, "VCMPGTUH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #401 = VCMPGTUH
+ { 402, 3, 1, 68, "VCMPGTUHo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #402 = VCMPGTUHo
+ { 403, 3, 1, 68, "VCMPGTUW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #403 = VCMPGTUW
+ { 404, 3, 1, 68, "VCMPGTUWo", 0, 0x28ULL, NULL, ImplicitList16, NULL, OperandInfo84 }, // Inst #404 = VCMPGTUWo
+ { 405, 3, 1, 67, "VCTSXS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo85 }, // Inst #405 = VCTSXS
+ { 406, 3, 1, 67, "VCTUXS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo85 }, // Inst #406 = VCTUXS
+ { 407, 2, 1, 67, "VEXPTEFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #407 = VEXPTEFP
+ { 408, 2, 1, 67, "VLOGEFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #408 = VLOGEFP
+ { 409, 4, 1, 67, "VMADDFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #409 = VMADDFP
+ { 410, 3, 1, 67, "VMAXFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #410 = VMAXFP
+ { 411, 3, 1, 67, "VMAXSB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #411 = VMAXSB
+ { 412, 3, 1, 67, "VMAXSH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #412 = VMAXSH
+ { 413, 3, 1, 67, "VMAXSW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #413 = VMAXSW
+ { 414, 3, 1, 67, "VMAXUB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #414 = VMAXUB
+ { 415, 3, 1, 67, "VMAXUH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #415 = VMAXUH
+ { 416, 3, 1, 67, "VMAXUW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #416 = VMAXUW
+ { 417, 4, 1, 67, "VMHADDSHS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #417 = VMHADDSHS
+ { 418, 4, 1, 67, "VMHRADDSHS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #418 = VMHRADDSHS
+ { 419, 3, 1, 67, "VMINFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #419 = VMINFP
+ { 420, 3, 1, 67, "VMINSB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #420 = VMINSB
+ { 421, 3, 1, 67, "VMINSH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #421 = VMINSH
+ { 422, 3, 1, 67, "VMINSW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #422 = VMINSW
+ { 423, 3, 1, 67, "VMINUB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #423 = VMINUB
+ { 424, 3, 1, 67, "VMINUH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #424 = VMINUH
+ { 425, 3, 1, 67, "VMINUW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #425 = VMINUW
+ { 426, 4, 1, 67, "VMLADDUHM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #426 = VMLADDUHM
+ { 427, 3, 1, 67, "VMRGHB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #427 = VMRGHB
+ { 428, 3, 1, 67, "VMRGHH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #428 = VMRGHH
+ { 429, 3, 1, 67, "VMRGHW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #429 = VMRGHW
+ { 430, 3, 1, 67, "VMRGLB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #430 = VMRGLB
+ { 431, 3, 1, 67, "VMRGLH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #431 = VMRGLH
+ { 432, 3, 1, 67, "VMRGLW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #432 = VMRGLW
+ { 433, 4, 1, 67, "VMSUMMBM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #433 = VMSUMMBM
+ { 434, 4, 1, 67, "VMSUMSHM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #434 = VMSUMSHM
+ { 435, 4, 1, 67, "VMSUMSHS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #435 = VMSUMSHS
+ { 436, 4, 1, 67, "VMSUMUBM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #436 = VMSUMUBM
+ { 437, 4, 1, 67, "VMSUMUHM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #437 = VMSUMUHM
+ { 438, 4, 1, 67, "VMSUMUHS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #438 = VMSUMUHS
+ { 439, 3, 1, 67, "VMULESB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #439 = VMULESB
+ { 440, 3, 1, 67, "VMULESH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #440 = VMULESH
+ { 441, 3, 1, 67, "VMULEUB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #441 = VMULEUB
+ { 442, 3, 1, 67, "VMULEUH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #442 = VMULEUH
+ { 443, 3, 1, 67, "VMULOSB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #443 = VMULOSB
+ { 444, 3, 1, 67, "VMULOSH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #444 = VMULOSH
+ { 445, 3, 1, 67, "VMULOUB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #445 = VMULOUB
+ { 446, 3, 1, 67, "VMULOUH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #446 = VMULOUH
+ { 447, 4, 1, 67, "VNMSUBFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #447 = VNMSUBFP
+ { 448, 3, 1, 67, "VNOR", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #448 = VNOR
+ { 449, 3, 1, 67, "VOR", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #449 = VOR
+ { 450, 4, 1, 67, "VPERM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #450 = VPERM
+ { 451, 3, 1, 67, "VPKPX", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #451 = VPKPX
+ { 452, 3, 1, 67, "VPKSHSS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #452 = VPKSHSS
+ { 453, 3, 1, 67, "VPKSHUS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #453 = VPKSHUS
+ { 454, 3, 1, 67, "VPKSWSS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #454 = VPKSWSS
+ { 455, 3, 1, 67, "VPKSWUS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #455 = VPKSWUS
+ { 456, 3, 1, 67, "VPKUHUM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #456 = VPKUHUM
+ { 457, 3, 1, 67, "VPKUHUS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #457 = VPKUHUS
+ { 458, 3, 1, 67, "VPKUWUM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #458 = VPKUWUM
+ { 459, 3, 1, 67, "VPKUWUS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #459 = VPKUWUS
+ { 460, 2, 1, 67, "VREFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #460 = VREFP
+ { 461, 2, 1, 67, "VRFIM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #461 = VRFIM
+ { 462, 2, 1, 67, "VRFIN", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #462 = VRFIN
+ { 463, 2, 1, 67, "VRFIP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #463 = VRFIP
+ { 464, 2, 1, 67, "VRFIZ", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #464 = VRFIZ
+ { 465, 3, 1, 67, "VRLB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #465 = VRLB
+ { 466, 3, 1, 67, "VRLH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #466 = VRLH
+ { 467, 3, 1, 67, "VRLW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #467 = VRLW
+ { 468, 2, 1, 67, "VRSQRTEFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #468 = VRSQRTEFP
+ { 469, 4, 1, 67, "VSEL", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #469 = VSEL
+ { 470, 3, 1, 67, "VSL", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #470 = VSL
+ { 471, 3, 1, 67, "VSLB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #471 = VSLB
+ { 472, 4, 1, 67, "VSLDOI", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo88 }, // Inst #472 = VSLDOI
+ { 473, 3, 1, 67, "VSLH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #473 = VSLH
+ { 474, 3, 1, 67, "VSLO", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #474 = VSLO
+ { 475, 3, 1, 67, "VSLW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #475 = VSLW
+ { 476, 3, 1, 71, "VSPLTB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo85 }, // Inst #476 = VSPLTB
+ { 477, 3, 1, 71, "VSPLTH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo85 }, // Inst #477 = VSPLTH
+ { 478, 2, 1, 71, "VSPLTISB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo89 }, // Inst #478 = VSPLTISB
+ { 479, 2, 1, 71, "VSPLTISH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo89 }, // Inst #479 = VSPLTISH
+ { 480, 2, 1, 71, "VSPLTISW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo89 }, // Inst #480 = VSPLTISW
+ { 481, 3, 1, 71, "VSPLTW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo85 }, // Inst #481 = VSPLTW
+ { 482, 3, 1, 67, "VSR", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #482 = VSR
+ { 483, 3, 1, 67, "VSRAB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #483 = VSRAB
+ { 484, 3, 1, 67, "VSRAH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #484 = VSRAH
+ { 485, 3, 1, 67, "VSRAW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #485 = VSRAW
+ { 486, 3, 1, 67, "VSRB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #486 = VSRB
+ { 487, 3, 1, 67, "VSRH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #487 = VSRH
+ { 488, 3, 1, 67, "VSRO", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #488 = VSRO
+ { 489, 3, 1, 67, "VSRW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #489 = VSRW
+ { 490, 3, 1, 67, "VSUBCUW", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #490 = VSUBCUW
+ { 491, 3, 1, 70, "VSUBFP", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #491 = VSUBFP
+ { 492, 3, 1, 67, "VSUBSBS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #492 = VSUBSBS
+ { 493, 3, 1, 67, "VSUBSHS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #493 = VSUBSHS
+ { 494, 3, 1, 67, "VSUBSWS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #494 = VSUBSWS
+ { 495, 3, 1, 70, "VSUBUBM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #495 = VSUBUBM
+ { 496, 3, 1, 67, "VSUBUBS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #496 = VSUBUBS
+ { 497, 3, 1, 70, "VSUBUHM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #497 = VSUBUHM
+ { 498, 3, 1, 67, "VSUBUHS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #498 = VSUBUHS
+ { 499, 3, 1, 70, "VSUBUWM", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #499 = VSUBUWM
+ { 500, 3, 1, 67, "VSUBUWS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #500 = VSUBUWS
+ { 501, 3, 1, 67, "VSUM2SWS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #501 = VSUM2SWS
+ { 502, 3, 1, 67, "VSUM4SBS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #502 = VSUM4SBS
+ { 503, 3, 1, 67, "VSUM4SHS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #503 = VSUM4SHS
+ { 504, 3, 1, 67, "VSUM4UBS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #504 = VSUM4UBS
+ { 505, 3, 1, 67, "VSUMSWS", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #505 = VSUMSWS
+ { 506, 2, 1, 67, "VUPKHPX", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #506 = VUPKHPX
+ { 507, 2, 1, 67, "VUPKHSB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #507 = VUPKHSB
+ { 508, 2, 1, 67, "VUPKHSH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #508 = VUPKHSH
+ { 509, 2, 1, 67, "VUPKLPX", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #509 = VUPKLPX
+ { 510, 2, 1, 67, "VUPKLSB", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #510 = VUPKLSB
+ { 511, 2, 1, 67, "VUPKLSH", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #511 = VUPKLSH
+ { 512, 3, 1, 67, "VXOR", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #512 = VXOR
+ { 513, 1, 1, 67, "V_SET0", 0, 0x28ULL, NULL, NULL, NULL, OperandInfo61 }, // Inst #513 = V_SET0
+ { 514, 3, 1, 14, "XOR", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #514 = XOR
+ { 515, 3, 1, 14, "XOR8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #515 = XOR8
+ { 516, 3, 1, 14, "XORI", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #516 = XORI
+ { 517, 3, 1, 14, "XORI8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #517 = XORI8
+ { 518, 3, 1, 14, "XORIS", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #518 = XORIS
+ { 519, 3, 1, 14, "XORIS8", 0, 0x8ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #519 = XORIS8
};
} // End llvm namespace
diff --git a/libclamav/c++/PPCGenInstrNames.inc b/libclamav/c++/PPCGenInstrNames.inc
index d9c1a24..d062dc5 100644
--- a/libclamav/c++/PPCGenInstrNames.inc
+++ b/libclamav/c++/PPCGenInstrNames.inc
@@ -12,7 +12,7 @@ namespace PPC {
enum {
PHI = 0,
INLINEASM = 1,
- DBG_LABEL = 2,
+ PROLOG_LABEL = 2,
EH_LABEL = 3,
GC_LABEL = 4,
KILL = 5,
@@ -22,514 +22,515 @@ namespace PPC {
SUBREG_TO_REG = 9,
COPY_TO_REGCLASS = 10,
DBG_VALUE = 11,
- ADD4 = 12,
- ADD8 = 13,
- ADDC = 14,
- ADDC8 = 15,
- ADDE = 16,
- ADDE8 = 17,
- ADDI = 18,
- ADDI8 = 19,
- ADDIC = 20,
- ADDIC8 = 21,
- ADDICo = 22,
- ADDIS = 23,
- ADDIS8 = 24,
- ADDME = 25,
- ADDME8 = 26,
- ADDZE = 27,
- ADDZE8 = 28,
- ADJCALLSTACKDOWN = 29,
- ADJCALLSTACKUP = 30,
- AND = 31,
- AND8 = 32,
- ANDC = 33,
- ANDC8 = 34,
- ANDISo = 35,
- ANDISo8 = 36,
- ANDIo = 37,
- ANDIo8 = 38,
- ATOMIC_CMP_SWAP_I16 = 39,
- ATOMIC_CMP_SWAP_I32 = 40,
- ATOMIC_CMP_SWAP_I64 = 41,
- ATOMIC_CMP_SWAP_I8 = 42,
- ATOMIC_LOAD_ADD_I16 = 43,
- ATOMIC_LOAD_ADD_I32 = 44,
- ATOMIC_LOAD_ADD_I64 = 45,
- ATOMIC_LOAD_ADD_I8 = 46,
- ATOMIC_LOAD_AND_I16 = 47,
- ATOMIC_LOAD_AND_I32 = 48,
- ATOMIC_LOAD_AND_I64 = 49,
- ATOMIC_LOAD_AND_I8 = 50,
- ATOMIC_LOAD_NAND_I16 = 51,
- ATOMIC_LOAD_NAND_I32 = 52,
- ATOMIC_LOAD_NAND_I64 = 53,
- ATOMIC_LOAD_NAND_I8 = 54,
- ATOMIC_LOAD_OR_I16 = 55,
- ATOMIC_LOAD_OR_I32 = 56,
- ATOMIC_LOAD_OR_I64 = 57,
- ATOMIC_LOAD_OR_I8 = 58,
- ATOMIC_LOAD_SUB_I16 = 59,
- ATOMIC_LOAD_SUB_I32 = 60,
- ATOMIC_LOAD_SUB_I64 = 61,
- ATOMIC_LOAD_SUB_I8 = 62,
- ATOMIC_LOAD_XOR_I16 = 63,
- ATOMIC_LOAD_XOR_I32 = 64,
- ATOMIC_LOAD_XOR_I64 = 65,
- ATOMIC_LOAD_XOR_I8 = 66,
- ATOMIC_SWAP_I16 = 67,
- ATOMIC_SWAP_I32 = 68,
- ATOMIC_SWAP_I64 = 69,
- ATOMIC_SWAP_I8 = 70,
- B = 71,
- BCC = 72,
- BCTR = 73,
- BCTRL8_Darwin = 74,
- BCTRL8_ELF = 75,
- BCTRL_Darwin = 76,
- BCTRL_SVR4 = 77,
- BL8_Darwin = 78,
- BL8_ELF = 79,
- BLA8_Darwin = 80,
- BLA8_ELF = 81,
- BLA_Darwin = 82,
- BLA_SVR4 = 83,
- BLR = 84,
- BL_Darwin = 85,
- BL_SVR4 = 86,
- CMPD = 87,
- CMPDI = 88,
- CMPLD = 89,
- CMPLDI = 90,
- CMPLW = 91,
- CMPLWI = 92,
- CMPW = 93,
- CMPWI = 94,
- CNTLZD = 95,
- CNTLZW = 96,
- CREQV = 97,
- CROR = 98,
- CRSET = 99,
- DCBA = 100,
- DCBF = 101,
- DCBI = 102,
- DCBST = 103,
- DCBT = 104,
- DCBTST = 105,
- DCBZ = 106,
- DCBZL = 107,
- DIVD = 108,
- DIVDU = 109,
- DIVW = 110,
- DIVWU = 111,
- DSS = 112,
- DSSALL = 113,
- DST = 114,
- DST64 = 115,
- DSTST = 116,
- DSTST64 = 117,
- DSTSTT = 118,
- DSTSTT64 = 119,
- DSTT = 120,
- DSTT64 = 121,
- DYNALLOC = 122,
- DYNALLOC8 = 123,
- EQV = 124,
- EQV8 = 125,
- EXTSB = 126,
- EXTSB8 = 127,
- EXTSH = 128,
- EXTSH8 = 129,
- EXTSW = 130,
- EXTSW_32 = 131,
- EXTSW_32_64 = 132,
- FABSD = 133,
- FABSS = 134,
- FADD = 135,
- FADDS = 136,
- FADDrtz = 137,
- FCFID = 138,
- FCMPUD = 139,
- FCMPUS = 140,
- FCTIDZ = 141,
- FCTIWZ = 142,
- FDIV = 143,
- FDIVS = 144,
- FMADD = 145,
- FMADDS = 146,
- FMR = 147,
- FMRSD = 148,
- FMSUB = 149,
- FMSUBS = 150,
- FMUL = 151,
- FMULS = 152,
- FNABSD = 153,
- FNABSS = 154,
- FNEGD = 155,
- FNEGS = 156,
- FNMADD = 157,
- FNMADDS = 158,
- FNMSUB = 159,
- FNMSUBS = 160,
- FRSP = 161,
- FSELD = 162,
- FSELS = 163,
- FSQRT = 164,
- FSQRTS = 165,
- FSUB = 166,
- FSUBS = 167,
- LA = 168,
- LBZ = 169,
- LBZ8 = 170,
- LBZU = 171,
- LBZU8 = 172,
- LBZX = 173,
- LBZX8 = 174,
- LD = 175,
- LDARX = 176,
- LDU = 177,
- LDX = 178,
- LDinto_toc = 179,
- LDtoc = 180,
- LDtoc_restore = 181,
- LFD = 182,
- LFDU = 183,
- LFDX = 184,
- LFS = 185,
- LFSU = 186,
- LFSX = 187,
- LHA = 188,
- LHA8 = 189,
- LHAU = 190,
- LHAU8 = 191,
- LHAX = 192,
- LHAX8 = 193,
- LHBRX = 194,
- LHZ = 195,
- LHZ8 = 196,
- LHZU = 197,
- LHZU8 = 198,
- LHZX = 199,
- LHZX8 = 200,
- LI = 201,
- LI8 = 202,
- LIS = 203,
- LIS8 = 204,
- LVEBX = 205,
- LVEHX = 206,
- LVEWX = 207,
- LVSL = 208,
- LVSR = 209,
- LVX = 210,
- LVXL = 211,
- LWA = 212,
- LWARX = 213,
- LWAX = 214,
- LWBRX = 215,
- LWZ = 216,
- LWZ8 = 217,
- LWZU = 218,
- LWZU8 = 219,
- LWZX = 220,
- LWZX8 = 221,
- MCRF = 222,
- MFCR = 223,
- MFCTR = 224,
- MFCTR8 = 225,
- MFFS = 226,
- MFLR = 227,
- MFLR8 = 228,
- MFOCRF = 229,
- MFVRSAVE = 230,
- MFVSCR = 231,
- MTCRF = 232,
- MTCTR = 233,
- MTCTR8 = 234,
- MTFSB0 = 235,
- MTFSB1 = 236,
- MTFSF = 237,
- MTLR = 238,
- MTLR8 = 239,
- MTVRSAVE = 240,
- MTVSCR = 241,
- MULHD = 242,
- MULHDU = 243,
- MULHW = 244,
- MULHWU = 245,
- MULLD = 246,
- MULLI = 247,
- MULLW = 248,
- MovePCtoLR = 249,
- MovePCtoLR8 = 250,
- NAND = 251,
- NAND8 = 252,
- NEG = 253,
- NEG8 = 254,
- NOP = 255,
- NOR = 256,
- NOR8 = 257,
- OR = 258,
- OR4To8 = 259,
- OR8 = 260,
- OR8To4 = 261,
- ORC = 262,
- ORC8 = 263,
- ORI = 264,
- ORI8 = 265,
- ORIS = 266,
- ORIS8 = 267,
- RLDCL = 268,
- RLDICL = 269,
- RLDICR = 270,
- RLDIMI = 271,
- RLWIMI = 272,
- RLWINM = 273,
- RLWINMo = 274,
- RLWNM = 275,
- SELECT_CC_F4 = 276,
- SELECT_CC_F8 = 277,
- SELECT_CC_I4 = 278,
- SELECT_CC_I8 = 279,
- SELECT_CC_VRRC = 280,
- SLD = 281,
- SLW = 282,
- SPILL_CR = 283,
- SRAD = 284,
- SRADI = 285,
- SRAW = 286,
- SRAWI = 287,
- SRD = 288,
- SRW = 289,
- STB = 290,
- STB8 = 291,
- STBU = 292,
- STBU8 = 293,
- STBX = 294,
- STBX8 = 295,
- STD = 296,
- STDCX = 297,
- STDU = 298,
- STDUX = 299,
- STDX = 300,
- STDX_32 = 301,
- STD_32 = 302,
- STFD = 303,
- STFDU = 304,
- STFDX = 305,
- STFIWX = 306,
- STFS = 307,
- STFSU = 308,
- STFSX = 309,
- STH = 310,
- STH8 = 311,
- STHBRX = 312,
- STHU = 313,
- STHU8 = 314,
- STHX = 315,
- STHX8 = 316,
- STVEBX = 317,
- STVEHX = 318,
- STVEWX = 319,
- STVX = 320,
- STVXL = 321,
- STW = 322,
- STW8 = 323,
- STWBRX = 324,
- STWCX = 325,
- STWU = 326,
- STWUX = 327,
- STWX = 328,
- STWX8 = 329,
- SUBF = 330,
- SUBF8 = 331,
- SUBFC = 332,
- SUBFC8 = 333,
- SUBFE = 334,
- SUBFE8 = 335,
- SUBFIC = 336,
- SUBFIC8 = 337,
- SUBFME = 338,
- SUBFME8 = 339,
- SUBFZE = 340,
- SUBFZE8 = 341,
- SYNC = 342,
- TAILB = 343,
- TAILB8 = 344,
- TAILBA = 345,
- TAILBA8 = 346,
- TAILBCTR = 347,
- TAILBCTR8 = 348,
- TCRETURNai = 349,
- TCRETURNai8 = 350,
- TCRETURNdi = 351,
- TCRETURNdi8 = 352,
- TCRETURNri = 353,
- TCRETURNri8 = 354,
- TRAP = 355,
- UPDATE_VRSAVE = 356,
- VADDCUW = 357,
- VADDFP = 358,
- VADDSBS = 359,
- VADDSHS = 360,
- VADDSWS = 361,
- VADDUBM = 362,
- VADDUBS = 363,
- VADDUHM = 364,
- VADDUHS = 365,
- VADDUWM = 366,
- VADDUWS = 367,
- VAND = 368,
- VANDC = 369,
- VAVGSB = 370,
- VAVGSH = 371,
- VAVGSW = 372,
- VAVGUB = 373,
- VAVGUH = 374,
- VAVGUW = 375,
- VCFSX = 376,
- VCFUX = 377,
- VCMPBFP = 378,
- VCMPBFPo = 379,
- VCMPEQFP = 380,
- VCMPEQFPo = 381,
- VCMPEQUB = 382,
- VCMPEQUBo = 383,
- VCMPEQUH = 384,
- VCMPEQUHo = 385,
- VCMPEQUW = 386,
- VCMPEQUWo = 387,
- VCMPGEFP = 388,
- VCMPGEFPo = 389,
- VCMPGTFP = 390,
- VCMPGTFPo = 391,
- VCMPGTSB = 392,
- VCMPGTSBo = 393,
- VCMPGTSH = 394,
- VCMPGTSHo = 395,
- VCMPGTSW = 396,
- VCMPGTSWo = 397,
- VCMPGTUB = 398,
- VCMPGTUBo = 399,
- VCMPGTUH = 400,
- VCMPGTUHo = 401,
- VCMPGTUW = 402,
- VCMPGTUWo = 403,
- VCTSXS = 404,
- VCTUXS = 405,
- VEXPTEFP = 406,
- VLOGEFP = 407,
- VMADDFP = 408,
- VMAXFP = 409,
- VMAXSB = 410,
- VMAXSH = 411,
- VMAXSW = 412,
- VMAXUB = 413,
- VMAXUH = 414,
- VMAXUW = 415,
- VMHADDSHS = 416,
- VMHRADDSHS = 417,
- VMINFP = 418,
- VMINSB = 419,
- VMINSH = 420,
- VMINSW = 421,
- VMINUB = 422,
- VMINUH = 423,
- VMINUW = 424,
- VMLADDUHM = 425,
- VMRGHB = 426,
- VMRGHH = 427,
- VMRGHW = 428,
- VMRGLB = 429,
- VMRGLH = 430,
- VMRGLW = 431,
- VMSUMMBM = 432,
- VMSUMSHM = 433,
- VMSUMSHS = 434,
- VMSUMUBM = 435,
- VMSUMUHM = 436,
- VMSUMUHS = 437,
- VMULESB = 438,
- VMULESH = 439,
- VMULEUB = 440,
- VMULEUH = 441,
- VMULOSB = 442,
- VMULOSH = 443,
- VMULOUB = 444,
- VMULOUH = 445,
- VNMSUBFP = 446,
- VNOR = 447,
- VOR = 448,
- VPERM = 449,
- VPKPX = 450,
- VPKSHSS = 451,
- VPKSHUS = 452,
- VPKSWSS = 453,
- VPKSWUS = 454,
- VPKUHUM = 455,
- VPKUHUS = 456,
- VPKUWUM = 457,
- VPKUWUS = 458,
- VREFP = 459,
- VRFIM = 460,
- VRFIN = 461,
- VRFIP = 462,
- VRFIZ = 463,
- VRLB = 464,
- VRLH = 465,
- VRLW = 466,
- VRSQRTEFP = 467,
- VSEL = 468,
- VSL = 469,
- VSLB = 470,
- VSLDOI = 471,
- VSLH = 472,
- VSLO = 473,
- VSLW = 474,
- VSPLTB = 475,
- VSPLTH = 476,
- VSPLTISB = 477,
- VSPLTISH = 478,
- VSPLTISW = 479,
- VSPLTW = 480,
- VSR = 481,
- VSRAB = 482,
- VSRAH = 483,
- VSRAW = 484,
- VSRB = 485,
- VSRH = 486,
- VSRO = 487,
- VSRW = 488,
- VSUBCUW = 489,
- VSUBFP = 490,
- VSUBSBS = 491,
- VSUBSHS = 492,
- VSUBSWS = 493,
- VSUBUBM = 494,
- VSUBUBS = 495,
- VSUBUHM = 496,
- VSUBUHS = 497,
- VSUBUWM = 498,
- VSUBUWS = 499,
- VSUM2SWS = 500,
- VSUM4SBS = 501,
- VSUM4SHS = 502,
- VSUM4UBS = 503,
- VSUMSWS = 504,
- VUPKHPX = 505,
- VUPKHSB = 506,
- VUPKHSH = 507,
- VUPKLPX = 508,
- VUPKLSB = 509,
- VUPKLSH = 510,
- VXOR = 511,
- V_SET0 = 512,
- XOR = 513,
- XOR8 = 514,
- XORI = 515,
- XORI8 = 516,
- XORIS = 517,
- XORIS8 = 518,
- INSTRUCTION_LIST_END = 519
+ REG_SEQUENCE = 12,
+ COPY = 13,
+ ADD4 = 14,
+ ADD8 = 15,
+ ADDC = 16,
+ ADDC8 = 17,
+ ADDE = 18,
+ ADDE8 = 19,
+ ADDI = 20,
+ ADDI8 = 21,
+ ADDIC = 22,
+ ADDIC8 = 23,
+ ADDICo = 24,
+ ADDIS = 25,
+ ADDIS8 = 26,
+ ADDME = 27,
+ ADDME8 = 28,
+ ADDZE = 29,
+ ADDZE8 = 30,
+ ADJCALLSTACKDOWN = 31,
+ ADJCALLSTACKUP = 32,
+ AND = 33,
+ AND8 = 34,
+ ANDC = 35,
+ ANDC8 = 36,
+ ANDISo = 37,
+ ANDISo8 = 38,
+ ANDIo = 39,
+ ANDIo8 = 40,
+ ATOMIC_CMP_SWAP_I16 = 41,
+ ATOMIC_CMP_SWAP_I32 = 42,
+ ATOMIC_CMP_SWAP_I64 = 43,
+ ATOMIC_CMP_SWAP_I8 = 44,
+ ATOMIC_LOAD_ADD_I16 = 45,
+ ATOMIC_LOAD_ADD_I32 = 46,
+ ATOMIC_LOAD_ADD_I64 = 47,
+ ATOMIC_LOAD_ADD_I8 = 48,
+ ATOMIC_LOAD_AND_I16 = 49,
+ ATOMIC_LOAD_AND_I32 = 50,
+ ATOMIC_LOAD_AND_I64 = 51,
+ ATOMIC_LOAD_AND_I8 = 52,
+ ATOMIC_LOAD_NAND_I16 = 53,
+ ATOMIC_LOAD_NAND_I32 = 54,
+ ATOMIC_LOAD_NAND_I64 = 55,
+ ATOMIC_LOAD_NAND_I8 = 56,
+ ATOMIC_LOAD_OR_I16 = 57,
+ ATOMIC_LOAD_OR_I32 = 58,
+ ATOMIC_LOAD_OR_I64 = 59,
+ ATOMIC_LOAD_OR_I8 = 60,
+ ATOMIC_LOAD_SUB_I16 = 61,
+ ATOMIC_LOAD_SUB_I32 = 62,
+ ATOMIC_LOAD_SUB_I64 = 63,
+ ATOMIC_LOAD_SUB_I8 = 64,
+ ATOMIC_LOAD_XOR_I16 = 65,
+ ATOMIC_LOAD_XOR_I32 = 66,
+ ATOMIC_LOAD_XOR_I64 = 67,
+ ATOMIC_LOAD_XOR_I8 = 68,
+ ATOMIC_SWAP_I16 = 69,
+ ATOMIC_SWAP_I32 = 70,
+ ATOMIC_SWAP_I64 = 71,
+ ATOMIC_SWAP_I8 = 72,
+ B = 73,
+ BCC = 74,
+ BCTR = 75,
+ BCTRL8_Darwin = 76,
+ BCTRL8_ELF = 77,
+ BCTRL_Darwin = 78,
+ BCTRL_SVR4 = 79,
+ BL8_Darwin = 80,
+ BL8_ELF = 81,
+ BLA8_Darwin = 82,
+ BLA8_ELF = 83,
+ BLA_Darwin = 84,
+ BLA_SVR4 = 85,
+ BLR = 86,
+ BL_Darwin = 87,
+ BL_SVR4 = 88,
+ CMPD = 89,
+ CMPDI = 90,
+ CMPLD = 91,
+ CMPLDI = 92,
+ CMPLW = 93,
+ CMPLWI = 94,
+ CMPW = 95,
+ CMPWI = 96,
+ CNTLZD = 97,
+ CNTLZW = 98,
+ CREQV = 99,
+ CROR = 100,
+ CRSET = 101,
+ DCBA = 102,
+ DCBF = 103,
+ DCBI = 104,
+ DCBST = 105,
+ DCBT = 106,
+ DCBTST = 107,
+ DCBZ = 108,
+ DCBZL = 109,
+ DIVD = 110,
+ DIVDU = 111,
+ DIVW = 112,
+ DIVWU = 113,
+ DSS = 114,
+ DSSALL = 115,
+ DST = 116,
+ DST64 = 117,
+ DSTST = 118,
+ DSTST64 = 119,
+ DSTSTT = 120,
+ DSTSTT64 = 121,
+ DSTT = 122,
+ DSTT64 = 123,
+ DYNALLOC = 124,
+ DYNALLOC8 = 125,
+ EQV = 126,
+ EQV8 = 127,
+ EXTSB = 128,
+ EXTSB8 = 129,
+ EXTSH = 130,
+ EXTSH8 = 131,
+ EXTSW = 132,
+ EXTSW_32 = 133,
+ EXTSW_32_64 = 134,
+ FABSD = 135,
+ FABSS = 136,
+ FADD = 137,
+ FADDS = 138,
+ FADDrtz = 139,
+ FCFID = 140,
+ FCMPUD = 141,
+ FCMPUS = 142,
+ FCTIDZ = 143,
+ FCTIWZ = 144,
+ FDIV = 145,
+ FDIVS = 146,
+ FMADD = 147,
+ FMADDS = 148,
+ FMR = 149,
+ FMSUB = 150,
+ FMSUBS = 151,
+ FMUL = 152,
+ FMULS = 153,
+ FNABSD = 154,
+ FNABSS = 155,
+ FNEGD = 156,
+ FNEGS = 157,
+ FNMADD = 158,
+ FNMADDS = 159,
+ FNMSUB = 160,
+ FNMSUBS = 161,
+ FRSP = 162,
+ FSELD = 163,
+ FSELS = 164,
+ FSQRT = 165,
+ FSQRTS = 166,
+ FSUB = 167,
+ FSUBS = 168,
+ LA = 169,
+ LBZ = 170,
+ LBZ8 = 171,
+ LBZU = 172,
+ LBZU8 = 173,
+ LBZX = 174,
+ LBZX8 = 175,
+ LD = 176,
+ LDARX = 177,
+ LDU = 178,
+ LDX = 179,
+ LDinto_toc = 180,
+ LDtoc = 181,
+ LDtoc_restore = 182,
+ LFD = 183,
+ LFDU = 184,
+ LFDX = 185,
+ LFS = 186,
+ LFSU = 187,
+ LFSX = 188,
+ LHA = 189,
+ LHA8 = 190,
+ LHAU = 191,
+ LHAU8 = 192,
+ LHAX = 193,
+ LHAX8 = 194,
+ LHBRX = 195,
+ LHZ = 196,
+ LHZ8 = 197,
+ LHZU = 198,
+ LHZU8 = 199,
+ LHZX = 200,
+ LHZX8 = 201,
+ LI = 202,
+ LI8 = 203,
+ LIS = 204,
+ LIS8 = 205,
+ LVEBX = 206,
+ LVEHX = 207,
+ LVEWX = 208,
+ LVSL = 209,
+ LVSR = 210,
+ LVX = 211,
+ LVXL = 212,
+ LWA = 213,
+ LWARX = 214,
+ LWAX = 215,
+ LWBRX = 216,
+ LWZ = 217,
+ LWZ8 = 218,
+ LWZU = 219,
+ LWZU8 = 220,
+ LWZX = 221,
+ LWZX8 = 222,
+ MCRF = 223,
+ MFCRpseud = 224,
+ MFCTR = 225,
+ MFCTR8 = 226,
+ MFFS = 227,
+ MFLR = 228,
+ MFLR8 = 229,
+ MFOCRF = 230,
+ MFVRSAVE = 231,
+ MFVSCR = 232,
+ MTCRF = 233,
+ MTCTR = 234,
+ MTCTR8 = 235,
+ MTFSB0 = 236,
+ MTFSB1 = 237,
+ MTFSF = 238,
+ MTLR = 239,
+ MTLR8 = 240,
+ MTVRSAVE = 241,
+ MTVSCR = 242,
+ MULHD = 243,
+ MULHDU = 244,
+ MULHW = 245,
+ MULHWU = 246,
+ MULLD = 247,
+ MULLI = 248,
+ MULLW = 249,
+ MovePCtoLR = 250,
+ MovePCtoLR8 = 251,
+ NAND = 252,
+ NAND8 = 253,
+ NEG = 254,
+ NEG8 = 255,
+ NOP = 256,
+ NOR = 257,
+ NOR8 = 258,
+ OR = 259,
+ OR4To8 = 260,
+ OR8 = 261,
+ OR8To4 = 262,
+ ORC = 263,
+ ORC8 = 264,
+ ORI = 265,
+ ORI8 = 266,
+ ORIS = 267,
+ ORIS8 = 268,
+ RLDCL = 269,
+ RLDICL = 270,
+ RLDICR = 271,
+ RLDIMI = 272,
+ RLWIMI = 273,
+ RLWINM = 274,
+ RLWINMo = 275,
+ RLWNM = 276,
+ SELECT_CC_F4 = 277,
+ SELECT_CC_F8 = 278,
+ SELECT_CC_I4 = 279,
+ SELECT_CC_I8 = 280,
+ SELECT_CC_VRRC = 281,
+ SLD = 282,
+ SLW = 283,
+ SPILL_CR = 284,
+ SRAD = 285,
+ SRADI = 286,
+ SRAW = 287,
+ SRAWI = 288,
+ SRD = 289,
+ SRW = 290,
+ STB = 291,
+ STB8 = 292,
+ STBU = 293,
+ STBU8 = 294,
+ STBX = 295,
+ STBX8 = 296,
+ STD = 297,
+ STDCX = 298,
+ STDU = 299,
+ STDUX = 300,
+ STDX = 301,
+ STDX_32 = 302,
+ STD_32 = 303,
+ STFD = 304,
+ STFDU = 305,
+ STFDX = 306,
+ STFIWX = 307,
+ STFS = 308,
+ STFSU = 309,
+ STFSX = 310,
+ STH = 311,
+ STH8 = 312,
+ STHBRX = 313,
+ STHU = 314,
+ STHU8 = 315,
+ STHX = 316,
+ STHX8 = 317,
+ STVEBX = 318,
+ STVEHX = 319,
+ STVEWX = 320,
+ STVX = 321,
+ STVXL = 322,
+ STW = 323,
+ STW8 = 324,
+ STWBRX = 325,
+ STWCX = 326,
+ STWU = 327,
+ STWUX = 328,
+ STWX = 329,
+ STWX8 = 330,
+ SUBF = 331,
+ SUBF8 = 332,
+ SUBFC = 333,
+ SUBFC8 = 334,
+ SUBFE = 335,
+ SUBFE8 = 336,
+ SUBFIC = 337,
+ SUBFIC8 = 338,
+ SUBFME = 339,
+ SUBFME8 = 340,
+ SUBFZE = 341,
+ SUBFZE8 = 342,
+ SYNC = 343,
+ TAILB = 344,
+ TAILB8 = 345,
+ TAILBA = 346,
+ TAILBA8 = 347,
+ TAILBCTR = 348,
+ TAILBCTR8 = 349,
+ TCRETURNai = 350,
+ TCRETURNai8 = 351,
+ TCRETURNdi = 352,
+ TCRETURNdi8 = 353,
+ TCRETURNri = 354,
+ TCRETURNri8 = 355,
+ TRAP = 356,
+ UPDATE_VRSAVE = 357,
+ VADDCUW = 358,
+ VADDFP = 359,
+ VADDSBS = 360,
+ VADDSHS = 361,
+ VADDSWS = 362,
+ VADDUBM = 363,
+ VADDUBS = 364,
+ VADDUHM = 365,
+ VADDUHS = 366,
+ VADDUWM = 367,
+ VADDUWS = 368,
+ VAND = 369,
+ VANDC = 370,
+ VAVGSB = 371,
+ VAVGSH = 372,
+ VAVGSW = 373,
+ VAVGUB = 374,
+ VAVGUH = 375,
+ VAVGUW = 376,
+ VCFSX = 377,
+ VCFUX = 378,
+ VCMPBFP = 379,
+ VCMPBFPo = 380,
+ VCMPEQFP = 381,
+ VCMPEQFPo = 382,
+ VCMPEQUB = 383,
+ VCMPEQUBo = 384,
+ VCMPEQUH = 385,
+ VCMPEQUHo = 386,
+ VCMPEQUW = 387,
+ VCMPEQUWo = 388,
+ VCMPGEFP = 389,
+ VCMPGEFPo = 390,
+ VCMPGTFP = 391,
+ VCMPGTFPo = 392,
+ VCMPGTSB = 393,
+ VCMPGTSBo = 394,
+ VCMPGTSH = 395,
+ VCMPGTSHo = 396,
+ VCMPGTSW = 397,
+ VCMPGTSWo = 398,
+ VCMPGTUB = 399,
+ VCMPGTUBo = 400,
+ VCMPGTUH = 401,
+ VCMPGTUHo = 402,
+ VCMPGTUW = 403,
+ VCMPGTUWo = 404,
+ VCTSXS = 405,
+ VCTUXS = 406,
+ VEXPTEFP = 407,
+ VLOGEFP = 408,
+ VMADDFP = 409,
+ VMAXFP = 410,
+ VMAXSB = 411,
+ VMAXSH = 412,
+ VMAXSW = 413,
+ VMAXUB = 414,
+ VMAXUH = 415,
+ VMAXUW = 416,
+ VMHADDSHS = 417,
+ VMHRADDSHS = 418,
+ VMINFP = 419,
+ VMINSB = 420,
+ VMINSH = 421,
+ VMINSW = 422,
+ VMINUB = 423,
+ VMINUH = 424,
+ VMINUW = 425,
+ VMLADDUHM = 426,
+ VMRGHB = 427,
+ VMRGHH = 428,
+ VMRGHW = 429,
+ VMRGLB = 430,
+ VMRGLH = 431,
+ VMRGLW = 432,
+ VMSUMMBM = 433,
+ VMSUMSHM = 434,
+ VMSUMSHS = 435,
+ VMSUMUBM = 436,
+ VMSUMUHM = 437,
+ VMSUMUHS = 438,
+ VMULESB = 439,
+ VMULESH = 440,
+ VMULEUB = 441,
+ VMULEUH = 442,
+ VMULOSB = 443,
+ VMULOSH = 444,
+ VMULOUB = 445,
+ VMULOUH = 446,
+ VNMSUBFP = 447,
+ VNOR = 448,
+ VOR = 449,
+ VPERM = 450,
+ VPKPX = 451,
+ VPKSHSS = 452,
+ VPKSHUS = 453,
+ VPKSWSS = 454,
+ VPKSWUS = 455,
+ VPKUHUM = 456,
+ VPKUHUS = 457,
+ VPKUWUM = 458,
+ VPKUWUS = 459,
+ VREFP = 460,
+ VRFIM = 461,
+ VRFIN = 462,
+ VRFIP = 463,
+ VRFIZ = 464,
+ VRLB = 465,
+ VRLH = 466,
+ VRLW = 467,
+ VRSQRTEFP = 468,
+ VSEL = 469,
+ VSL = 470,
+ VSLB = 471,
+ VSLDOI = 472,
+ VSLH = 473,
+ VSLO = 474,
+ VSLW = 475,
+ VSPLTB = 476,
+ VSPLTH = 477,
+ VSPLTISB = 478,
+ VSPLTISH = 479,
+ VSPLTISW = 480,
+ VSPLTW = 481,
+ VSR = 482,
+ VSRAB = 483,
+ VSRAH = 484,
+ VSRAW = 485,
+ VSRB = 486,
+ VSRH = 487,
+ VSRO = 488,
+ VSRW = 489,
+ VSUBCUW = 490,
+ VSUBFP = 491,
+ VSUBSBS = 492,
+ VSUBSHS = 493,
+ VSUBSWS = 494,
+ VSUBUBM = 495,
+ VSUBUBS = 496,
+ VSUBUHM = 497,
+ VSUBUHS = 498,
+ VSUBUWM = 499,
+ VSUBUWS = 500,
+ VSUM2SWS = 501,
+ VSUM4SBS = 502,
+ VSUM4SHS = 503,
+ VSUM4UBS = 504,
+ VSUMSWS = 505,
+ VUPKHPX = 506,
+ VUPKHSB = 507,
+ VUPKHSH = 508,
+ VUPKLPX = 509,
+ VUPKLSB = 510,
+ VUPKLSH = 511,
+ VXOR = 512,
+ V_SET0 = 513,
+ XOR = 514,
+ XOR8 = 515,
+ XORI = 516,
+ XORI8 = 517,
+ XORIS = 518,
+ XORIS8 = 519,
+ INSTRUCTION_LIST_END = 520
};
}
} // End llvm namespace
diff --git a/libclamav/c++/PPCGenRegisterInfo.h.inc b/libclamav/c++/PPCGenRegisterInfo.h.inc
index 9997c92..aca71df 100644
--- a/libclamav/c++/PPCGenRegisterInfo.h.inc
+++ b/libclamav/c++/PPCGenRegisterInfo.h.inc
@@ -19,21 +19,22 @@ struct PPCGenRegisterInfo : public TargetRegisterInfo {
{ return false; }
unsigned getSubReg(unsigned RegNo, unsigned Index) const;
unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const;
+ unsigned composeSubRegIndices(unsigned, unsigned) const;
};
namespace PPC { // Register classes
enum {
- CARRYRCRegClassID = 1,
- CRBITRCRegClassID = 2,
- CRRCRegClassID = 3,
- CTRRCRegClassID = 4,
- CTRRC8RegClassID = 5,
- F4RCRegClassID = 6,
- F8RCRegClassID = 7,
- G8RCRegClassID = 8,
- GPRCRegClassID = 9,
- VRRCRegClassID = 10,
- VRSAVERCRegClassID = 11
+ CARRYRCRegClassID = 0,
+ CRBITRCRegClassID = 1,
+ CRRCRegClassID = 2,
+ CTRRCRegClassID = 3,
+ CTRRC8RegClassID = 4,
+ F4RCRegClassID = 5,
+ F8RCRegClassID = 6,
+ G8RCRegClassID = 7,
+ GPRCRegClassID = 8,
+ VRRCRegClassID = 9,
+ VRSAVERCRegClassID = 10
};
struct CARRYRCClass : public TargetRegisterClass {
diff --git a/libclamav/c++/PPCGenRegisterInfo.inc b/libclamav/c++/PPCGenRegisterInfo.inc
index 3a88052..87c97bc 100644
--- a/libclamav/c++/PPCGenRegisterInfo.inc
+++ b/libclamav/c++/PPCGenRegisterInfo.inc
@@ -136,57 +136,57 @@ namespace PPC { // Register class instances
// CARRYRC Sub-register Classes...
static const TargetRegisterClass* const CARRYRCSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// CRBITRC Sub-register Classes...
static const TargetRegisterClass* const CRBITRCSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// CRRC Sub-register Classes...
static const TargetRegisterClass* const CRRCSubRegClasses[] = {
- &PPC::CRBITRCRegClass, &PPC::CRBITRCRegClass, &PPC::CRBITRCRegClass, &PPC::CRBITRCRegClass, NULL
+ 0, &PPC::CRBITRCRegClass, &PPC::CRBITRCRegClass, &PPC::CRBITRCRegClass, &PPC::CRBITRCRegClass
};
// CTRRC Sub-register Classes...
static const TargetRegisterClass* const CTRRCSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// CTRRC8 Sub-register Classes...
static const TargetRegisterClass* const CTRRC8SubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// F4RC Sub-register Classes...
static const TargetRegisterClass* const F4RCSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// F8RC Sub-register Classes...
static const TargetRegisterClass* const F8RCSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// G8RC Sub-register Classes...
static const TargetRegisterClass* const G8RCSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// GPRC Sub-register Classes...
static const TargetRegisterClass* const GPRCSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// VRRC Sub-register Classes...
static const TargetRegisterClass* const VRRCSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// VRSAVERC Sub-register Classes...
static const TargetRegisterClass* const VRSAVERCSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0
};
// CARRYRC Super-register Classes...
@@ -451,7 +451,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X22, PPC::R22,
+ PPC::X15, PPC::R15,
PPC::NoRegister, PPC::NoRegister,
PPC::CR2, PPC::CR2GT,
PPC::NoRegister, PPC::NoRegister,
@@ -469,9 +469,9 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X10, PPC::R10,
+ PPC::X2, PPC::R2,
PPC::NoRegister, PPC::NoRegister,
- PPC::X7, PPC::R7,
+ PPC::X29, PPC::R29,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR5, PPC::CR5EQ,
@@ -489,7 +489,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X23, PPC::R23,
+ PPC::X16, PPC::R16,
PPC::CR2, PPC::CR2LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -507,9 +507,9 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X11, PPC::R11,
+ PPC::X3, PPC::R3,
PPC::NoRegister, PPC::NoRegister,
- PPC::X8, PPC::R8,
+ PPC::X30, PPC::R30,
PPC::NoRegister, PPC::NoRegister,
PPC::CR5, PPC::CR5GT,
PPC::NoRegister, PPC::NoRegister,
@@ -529,7 +529,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::CR2, PPC::CR2UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X24, PPC::R24,
+ PPC::X17, PPC::R17,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -545,9 +545,9 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X12, PPC::R12,
+ PPC::X4, PPC::R4,
PPC::NoRegister, PPC::NoRegister,
- PPC::X9, PPC::R9,
+ PPC::X31, PPC::R31,
PPC::CR0, PPC::CR0EQ,
PPC::NoRegister, PPC::NoRegister,
PPC::CR5, PPC::CR5LT,
@@ -565,7 +565,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X25, PPC::R25,
+ PPC::X18, PPC::R18,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -583,7 +583,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X13, PPC::R13,
+ PPC::X5, PPC::R5,
PPC::NoRegister, PPC::NoRegister,
PPC::CR0, PPC::CR0GT,
PPC::NoRegister, PPC::NoRegister,
@@ -603,7 +603,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X26, PPC::R26,
+ PPC::X19, PPC::R19,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR3, PPC::CR3EQ,
@@ -621,7 +621,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X14, PPC::R14,
+ PPC::X6, PPC::R6,
PPC::CR0, PPC::CR0LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -641,7 +641,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X27, PPC::R27,
+ PPC::X20, PPC::R20,
PPC::NoRegister, PPC::NoRegister,
PPC::CR3, PPC::CR3GT,
PPC::NoRegister, PPC::NoRegister,
@@ -661,7 +661,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::CR0, PPC::CR0UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X15, PPC::R15,
+ PPC::X7, PPC::R7,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR6, PPC::CR6EQ,
@@ -679,7 +679,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X28, PPC::R28,
+ PPC::X21, PPC::R21,
PPC::CR3, PPC::CR3LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -697,7 +697,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X16, PPC::R16,
+ PPC::X8, PPC::R8,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -719,7 +719,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::CR3, PPC::CR3UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X29, PPC::R29,
+ PPC::X22, PPC::R22,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -735,7 +735,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X17, PPC::R17,
+ PPC::X9, PPC::R9,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR1, PPC::CR1EQ,
@@ -755,7 +755,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X3, PPC::R3,
+ PPC::X23, PPC::R23,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -773,7 +773,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X18, PPC::R18,
+ PPC::X10, PPC::R10,
PPC::NoRegister, PPC::NoRegister,
PPC::CR1, PPC::CR1GT,
PPC::NoRegister, PPC::NoRegister,
@@ -793,7 +793,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X30, PPC::R30,
+ PPC::X24, PPC::R24,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR4, PPC::CR4EQ,
@@ -811,7 +811,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X19, PPC::R19,
+ PPC::X11, PPC::R11,
PPC::CR1, PPC::CR1LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -831,7 +831,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X31, PPC::R31,
+ PPC::X25, PPC::R25,
PPC::NoRegister, PPC::NoRegister,
PPC::CR4, PPC::CR4GT,
PPC::NoRegister, PPC::NoRegister,
@@ -851,7 +851,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::CR1, PPC::CR1UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X2, PPC::R2,
+ PPC::X12, PPC::R12,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR7, PPC::CR7EQ,
@@ -869,7 +869,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X4, PPC::R4,
+ PPC::X26, PPC::R26,
PPC::CR4, PPC::CR4LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -887,7 +887,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X20, PPC::R20,
+ PPC::X13, PPC::R13,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -909,7 +909,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::CR4, PPC::CR4UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X5, PPC::R5,
+ PPC::X27, PPC::R27,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -925,7 +925,7 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X21, PPC::R21,
+ PPC::X14, PPC::R14,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR2, PPC::CR2EQ,
@@ -945,527 +945,11 @@ namespace {
PPC::NoRegister, PPC::NoRegister,
PPC::X1, PPC::R1,
PPC::NoRegister, PPC::NoRegister,
- PPC::X6, PPC::R6,
+ PPC::X28, PPC::R28,
PPC::NoRegister, PPC::NoRegister };
const unsigned SubregHashTableSize = 512;
- // Number of hash collisions: 10
- const unsigned SuperregHashTable[] = { PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R14, PPC::X14,
- PPC::CR5EQ, PPC::CR5,
- PPC::CR5GT, PPC::CR5,
- PPC::CR5LT, PPC::CR5,
- PPC::CR5UN, PPC::CR5,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R27, PPC::X27,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R15, PPC::X15,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R28, PPC::X28,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::CR0EQ, PPC::CR0,
- PPC::CR0GT, PPC::CR0,
- PPC::CR0LT, PPC::CR0,
- PPC::CR0UN, PPC::CR0,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R16, PPC::X16,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R29, PPC::X29,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R17, PPC::X17,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::CR3EQ, PPC::CR3,
- PPC::CR3GT, PPC::CR3,
- PPC::CR3LT, PPC::CR3,
- PPC::CR3UN, PPC::CR3,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R3, PPC::X3,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R18, PPC::X18,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R30, PPC::X30,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R19, PPC::X19,
- PPC::CR6EQ, PPC::CR6,
- PPC::CR6GT, PPC::CR6,
- PPC::CR6LT, PPC::CR6,
- PPC::CR6UN, PPC::CR6,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R31, PPC::X31,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R2, PPC::X2,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R4, PPC::X4,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::CR1EQ, PPC::CR1,
- PPC::CR1GT, PPC::CR1,
- PPC::CR1LT, PPC::CR1,
- PPC::CR1UN, PPC::CR1,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R20, PPC::X20,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R0, PPC::X0,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R5, PPC::X5,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R21, PPC::X21,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R1, PPC::X1,
- PPC::CR4EQ, PPC::CR4,
- PPC::CR4GT, PPC::CR4,
- PPC::CR4LT, PPC::CR4,
- PPC::CR4UN, PPC::CR4,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R6, PPC::X6,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R22, PPC::X22,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R10, PPC::X10,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R7, PPC::X7,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R23, PPC::X23,
- PPC::CR7EQ, PPC::CR7,
- PPC::CR7GT, PPC::CR7,
- PPC::CR7LT, PPC::CR7,
- PPC::CR7UN, PPC::CR7,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R11, PPC::X11,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R8, PPC::X8,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R24, PPC::X24,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R12, PPC::X12,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R9, PPC::X9,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::CR2EQ, PPC::CR2,
- PPC::CR2GT, PPC::CR2,
- PPC::CR2LT, PPC::CR2,
- PPC::CR2UN, PPC::CR2,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R25, PPC::X25,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R13, PPC::X13,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::R26, PPC::X26,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
- PPC::NoRegister, PPC::NoRegister,
-PPC::NoRegister, PPC::NoRegister };
- const unsigned SuperregHashTableSize = 512;
-
-
// Number of hash collisions: 11
const unsigned AliasesHashTable[] = { PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1489,7 +973,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R27, PPC::X27,
+ PPC::R20, PPC::X20,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1501,9 +985,9 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X10, PPC::R10,
+ PPC::X2, PPC::R2,
PPC::NoRegister, PPC::NoRegister,
- PPC::X7, PPC::R7,
+ PPC::X29, PPC::R29,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR5, PPC::CR5EQ,
@@ -1527,7 +1011,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R28, PPC::X28,
+ PPC::R21, PPC::X21,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1539,9 +1023,9 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X11, PPC::R11,
+ PPC::X3, PPC::R3,
PPC::NoRegister, PPC::NoRegister,
- PPC::X8, PPC::R8,
+ PPC::X30, PPC::R30,
PPC::NoRegister, PPC::NoRegister,
PPC::CR5, PPC::CR5GT,
PPC::CR0EQ, PPC::CR0,
@@ -1565,7 +1049,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R29, PPC::X29,
+ PPC::R22, PPC::X22,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1577,9 +1061,9 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X12, PPC::R12,
+ PPC::X4, PPC::R4,
PPC::NoRegister, PPC::NoRegister,
- PPC::X9, PPC::R9,
+ PPC::X31, PPC::R31,
PPC::CR0, PPC::CR0EQ,
PPC::NoRegister, PPC::NoRegister,
PPC::CR5, PPC::CR5LT,
@@ -1603,7 +1087,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R3, PPC::X3,
+ PPC::R23, PPC::X23,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1615,7 +1099,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X13, PPC::R13,
+ PPC::X5, PPC::R5,
PPC::NoRegister, PPC::NoRegister,
PPC::CR0, PPC::CR0GT,
PPC::NoRegister, PPC::NoRegister,
@@ -1641,7 +1125,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R30, PPC::X30,
+ PPC::R24, PPC::X24,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1653,7 +1137,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X14, PPC::R14,
+ PPC::X6, PPC::R6,
PPC::CR0, PPC::CR0LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1679,7 +1163,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R31, PPC::X31,
+ PPC::R25, PPC::X25,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1693,7 +1177,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::CR0, PPC::CR0UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X15, PPC::R15,
+ PPC::X7, PPC::R7,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR6, PPC::CR6EQ,
@@ -1717,7 +1201,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R4, PPC::X4,
+ PPC::R26, PPC::X26,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1729,7 +1213,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X16, PPC::R16,
+ PPC::X8, PPC::R8,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1755,7 +1239,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::R0, PPC::X0,
PPC::NoRegister, PPC::NoRegister,
- PPC::R5, PPC::X5,
+ PPC::R27, PPC::X27,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1767,7 +1251,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X17, PPC::R17,
+ PPC::X9, PPC::R9,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR1, PPC::CR1EQ,
@@ -1793,7 +1277,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::R1, PPC::X1,
PPC::NoRegister, PPC::NoRegister,
- PPC::R6, PPC::X6,
+ PPC::R28, PPC::X28,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1805,7 +1289,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X18, PPC::R18,
+ PPC::X10, PPC::R10,
PPC::NoRegister, PPC::NoRegister,
PPC::CR1, PPC::CR1GT,
PPC::NoRegister, PPC::NoRegister,
@@ -1829,9 +1313,9 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R10, PPC::X10,
+ PPC::R2, PPC::X2,
PPC::NoRegister, PPC::NoRegister,
- PPC::R7, PPC::X7,
+ PPC::R29, PPC::X29,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1843,7 +1327,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X19, PPC::R19,
+ PPC::X11, PPC::R11,
PPC::CR1, PPC::CR1LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1867,9 +1351,9 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R11, PPC::X11,
+ PPC::R3, PPC::X3,
PPC::NoRegister, PPC::NoRegister,
- PPC::R8, PPC::X8,
+ PPC::R30, PPC::X30,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1883,7 +1367,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::CR1, PPC::CR1UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X2, PPC::R2,
+ PPC::X12, PPC::R12,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR7, PPC::CR7EQ,
@@ -1905,9 +1389,9 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R12, PPC::X12,
+ PPC::R4, PPC::X4,
PPC::NoRegister, PPC::NoRegister,
- PPC::R9, PPC::X9,
+ PPC::R31, PPC::X31,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1919,7 +1403,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X20, PPC::R20,
+ PPC::X13, PPC::R13,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1943,7 +1427,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R13, PPC::X13,
+ PPC::R5, PPC::X5,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1957,7 +1441,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X21, PPC::R21,
+ PPC::X14, PPC::R14,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR2, PPC::CR2EQ,
@@ -1981,7 +1465,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R14, PPC::X14,
+ PPC::R6, PPC::X6,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -1995,7 +1479,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X22, PPC::R22,
+ PPC::X15, PPC::R15,
PPC::NoRegister, PPC::NoRegister,
PPC::CR2, PPC::CR2GT,
PPC::NoRegister, PPC::NoRegister,
@@ -2019,7 +1503,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R15, PPC::X15,
+ PPC::R7, PPC::X7,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2033,7 +1517,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X23, PPC::R23,
+ PPC::X16, PPC::R16,
PPC::CR2, PPC::CR2LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2057,7 +1541,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R16, PPC::X16,
+ PPC::R8, PPC::X8,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2073,7 +1557,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::CR2, PPC::CR2UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X24, PPC::R24,
+ PPC::X17, PPC::R17,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2095,7 +1579,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R17, PPC::X17,
+ PPC::R9, PPC::X9,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2109,7 +1593,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X25, PPC::R25,
+ PPC::X18, PPC::R18,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2133,7 +1617,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R18, PPC::X18,
+ PPC::R10, PPC::X10,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2147,7 +1631,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X26, PPC::R26,
+ PPC::X19, PPC::R19,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR3, PPC::CR3EQ,
@@ -2171,7 +1655,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R19, PPC::X19,
+ PPC::R11, PPC::X11,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2185,7 +1669,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X27, PPC::R27,
+ PPC::X20, PPC::R20,
PPC::NoRegister, PPC::NoRegister,
PPC::CR3, PPC::CR3GT,
PPC::NoRegister, PPC::NoRegister,
@@ -2209,7 +1693,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R2, PPC::X2,
+ PPC::R12, PPC::X12,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2223,7 +1707,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X28, PPC::R28,
+ PPC::X21, PPC::R21,
PPC::CR3, PPC::CR3LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2247,7 +1731,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R20, PPC::X20,
+ PPC::R13, PPC::X13,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2263,7 +1747,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::CR3, PPC::CR3UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X29, PPC::R29,
+ PPC::X22, PPC::R22,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2285,7 +1769,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R21, PPC::X21,
+ PPC::R14, PPC::X14,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2299,7 +1783,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X3, PPC::R3,
+ PPC::X23, PPC::R23,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2323,7 +1807,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R22, PPC::X22,
+ PPC::R15, PPC::X15,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2337,7 +1821,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X30, PPC::R30,
+ PPC::X24, PPC::R24,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::CR4, PPC::CR4EQ,
@@ -2361,7 +1845,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R23, PPC::X23,
+ PPC::R16, PPC::X16,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2375,7 +1859,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X31, PPC::R31,
+ PPC::X25, PPC::R25,
PPC::NoRegister, PPC::NoRegister,
PPC::CR4, PPC::CR4GT,
PPC::NoRegister, PPC::NoRegister,
@@ -2399,7 +1883,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R24, PPC::X24,
+ PPC::R17, PPC::X17,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2413,7 +1897,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::X4, PPC::R4,
+ PPC::X26, PPC::R26,
PPC::CR4, PPC::CR4LT,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2437,7 +1921,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R25, PPC::X25,
+ PPC::R18, PPC::X18,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2453,7 +1937,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::CR4, PPC::CR4UN,
PPC::NoRegister, PPC::NoRegister,
- PPC::X5, PPC::R5,
+ PPC::X27, PPC::R27,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2475,7 +1959,7 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
- PPC::R26, PPC::X26,
+ PPC::R19, PPC::X19,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
PPC::NoRegister, PPC::NoRegister,
@@ -2489,14 +1973,13 @@ PPC::NoRegister, PPC::NoRegister };
PPC::NoRegister, PPC::NoRegister,
PPC::X1, PPC::R1,
PPC::NoRegister, PPC::NoRegister,
- PPC::X6, PPC::R6,
+ PPC::X28, PPC::R28,
PPC::NoRegister, PPC::NoRegister };
const unsigned AliasesHashTableSize = 1024;
// Register Alias Sets...
const unsigned Empty_AliasSet[] = { 0 };
- const unsigned CARRY_AliasSet[] = { 0 };
const unsigned CR0_AliasSet[] = { PPC::CR0LT, PPC::CR0GT, PPC::CR0EQ, PPC::CR0UN, 0 };
const unsigned CR0EQ_AliasSet[] = { PPC::CR0, 0 };
const unsigned CR0GT_AliasSet[] = { PPC::CR0, 0 };
@@ -2537,44 +2020,16 @@ PPC::NoRegister, PPC::NoRegister };
const unsigned CR7GT_AliasSet[] = { PPC::CR7, 0 };
const unsigned CR7LT_AliasSet[] = { PPC::CR7, 0 };
const unsigned CR7UN_AliasSet[] = { PPC::CR7, 0 };
- const unsigned CTR_AliasSet[] = { 0 };
- const unsigned CTR8_AliasSet[] = { 0 };
- const unsigned F0_AliasSet[] = { 0 };
- const unsigned F1_AliasSet[] = { 0 };
- const unsigned F10_AliasSet[] = { 0 };
- const unsigned F11_AliasSet[] = { 0 };
- const unsigned F12_AliasSet[] = { 0 };
- const unsigned F13_AliasSet[] = { 0 };
- const unsigned F14_AliasSet[] = { 0 };
- const unsigned F15_AliasSet[] = { 0 };
- const unsigned F16_AliasSet[] = { 0 };
- const unsigned F17_AliasSet[] = { 0 };
- const unsigned F18_AliasSet[] = { 0 };
- const unsigned F19_AliasSet[] = { 0 };
- const unsigned F2_AliasSet[] = { 0 };
- const unsigned F20_AliasSet[] = { 0 };
- const unsigned F21_AliasSet[] = { 0 };
- const unsigned F22_AliasSet[] = { 0 };
- const unsigned F23_AliasSet[] = { 0 };
- const unsigned F24_AliasSet[] = { 0 };
- const unsigned F25_AliasSet[] = { 0 };
- const unsigned F26_AliasSet[] = { 0 };
- const unsigned F27_AliasSet[] = { 0 };
- const unsigned F28_AliasSet[] = { 0 };
- const unsigned F29_AliasSet[] = { 0 };
- const unsigned F3_AliasSet[] = { 0 };
- const unsigned F30_AliasSet[] = { 0 };
- const unsigned F31_AliasSet[] = { 0 };
- const unsigned F4_AliasSet[] = { 0 };
- const unsigned F5_AliasSet[] = { 0 };
- const unsigned F6_AliasSet[] = { 0 };
- const unsigned F7_AliasSet[] = { 0 };
- const unsigned F8_AliasSet[] = { 0 };
- const unsigned F9_AliasSet[] = { 0 };
- const unsigned LR_AliasSet[] = { 0 };
- const unsigned LR8_AliasSet[] = { 0 };
const unsigned R0_AliasSet[] = { PPC::X0, 0 };
const unsigned R1_AliasSet[] = { PPC::X1, 0 };
+ const unsigned R2_AliasSet[] = { PPC::X2, 0 };
+ const unsigned R3_AliasSet[] = { PPC::X3, 0 };
+ const unsigned R4_AliasSet[] = { PPC::X4, 0 };
+ const unsigned R5_AliasSet[] = { PPC::X5, 0 };
+ const unsigned R6_AliasSet[] = { PPC::X6, 0 };
+ const unsigned R7_AliasSet[] = { PPC::X7, 0 };
+ const unsigned R8_AliasSet[] = { PPC::X8, 0 };
+ const unsigned R9_AliasSet[] = { PPC::X9, 0 };
const unsigned R10_AliasSet[] = { PPC::X10, 0 };
const unsigned R11_AliasSet[] = { PPC::X11, 0 };
const unsigned R12_AliasSet[] = { PPC::X12, 0 };
@@ -2585,7 +2040,6 @@ PPC::NoRegister, PPC::NoRegister };
const unsigned R17_AliasSet[] = { PPC::X17, 0 };
const unsigned R18_AliasSet[] = { PPC::X18, 0 };
const unsigned R19_AliasSet[] = { PPC::X19, 0 };
- const unsigned R2_AliasSet[] = { PPC::X2, 0 };
const unsigned R20_AliasSet[] = { PPC::X20, 0 };
const unsigned R21_AliasSet[] = { PPC::X21, 0 };
const unsigned R22_AliasSet[] = { PPC::X22, 0 };
@@ -2596,51 +2050,18 @@ PPC::NoRegister, PPC::NoRegister };
const unsigned R27_AliasSet[] = { PPC::X27, 0 };
const unsigned R28_AliasSet[] = { PPC::X28, 0 };
const unsigned R29_AliasSet[] = { PPC::X29, 0 };
- const unsigned R3_AliasSet[] = { PPC::X3, 0 };
const unsigned R30_AliasSet[] = { PPC::X30, 0 };
const unsigned R31_AliasSet[] = { PPC::X31, 0 };
- const unsigned R4_AliasSet[] = { PPC::X4, 0 };
- const unsigned R5_AliasSet[] = { PPC::X5, 0 };
- const unsigned R6_AliasSet[] = { PPC::X6, 0 };
- const unsigned R7_AliasSet[] = { PPC::X7, 0 };
- const unsigned R8_AliasSet[] = { PPC::X8, 0 };
- const unsigned R9_AliasSet[] = { PPC::X9, 0 };
- const unsigned RM_AliasSet[] = { 0 };
- const unsigned V0_AliasSet[] = { 0 };
- const unsigned V1_AliasSet[] = { 0 };
- const unsigned V10_AliasSet[] = { 0 };
- const unsigned V11_AliasSet[] = { 0 };
- const unsigned V12_AliasSet[] = { 0 };
- const unsigned V13_AliasSet[] = { 0 };
- const unsigned V14_AliasSet[] = { 0 };
- const unsigned V15_AliasSet[] = { 0 };
- const unsigned V16_AliasSet[] = { 0 };
- const unsigned V17_AliasSet[] = { 0 };
- const unsigned V18_AliasSet[] = { 0 };
- const unsigned V19_AliasSet[] = { 0 };
- const unsigned V2_AliasSet[] = { 0 };
- const unsigned V20_AliasSet[] = { 0 };
- const unsigned V21_AliasSet[] = { 0 };
- const unsigned V22_AliasSet[] = { 0 };
- const unsigned V23_AliasSet[] = { 0 };
- const unsigned V24_AliasSet[] = { 0 };
- const unsigned V25_AliasSet[] = { 0 };
- const unsigned V26_AliasSet[] = { 0 };
- const unsigned V27_AliasSet[] = { 0 };
- const unsigned V28_AliasSet[] = { 0 };
- const unsigned V29_AliasSet[] = { 0 };
- const unsigned V3_AliasSet[] = { 0 };
- const unsigned V30_AliasSet[] = { 0 };
- const unsigned V31_AliasSet[] = { 0 };
- const unsigned V4_AliasSet[] = { 0 };
- const unsigned V5_AliasSet[] = { 0 };
- const unsigned V6_AliasSet[] = { 0 };
- const unsigned V7_AliasSet[] = { 0 };
- const unsigned V8_AliasSet[] = { 0 };
- const unsigned V9_AliasSet[] = { 0 };
- const unsigned VRSAVE_AliasSet[] = { 0 };
const unsigned X0_AliasSet[] = { PPC::R0, 0 };
const unsigned X1_AliasSet[] = { PPC::R1, 0 };
+ const unsigned X2_AliasSet[] = { PPC::R2, 0 };
+ const unsigned X3_AliasSet[] = { PPC::R3, 0 };
+ const unsigned X4_AliasSet[] = { PPC::R4, 0 };
+ const unsigned X5_AliasSet[] = { PPC::R5, 0 };
+ const unsigned X6_AliasSet[] = { PPC::R6, 0 };
+ const unsigned X7_AliasSet[] = { PPC::R7, 0 };
+ const unsigned X8_AliasSet[] = { PPC::R8, 0 };
+ const unsigned X9_AliasSet[] = { PPC::R9, 0 };
const unsigned X10_AliasSet[] = { PPC::R10, 0 };
const unsigned X11_AliasSet[] = { PPC::R11, 0 };
const unsigned X12_AliasSet[] = { PPC::R12, 0 };
@@ -2651,7 +2072,6 @@ PPC::NoRegister, PPC::NoRegister };
const unsigned X17_AliasSet[] = { PPC::R17, 0 };
const unsigned X18_AliasSet[] = { PPC::R18, 0 };
const unsigned X19_AliasSet[] = { PPC::R19, 0 };
- const unsigned X2_AliasSet[] = { PPC::R2, 0 };
const unsigned X20_AliasSet[] = { PPC::R20, 0 };
const unsigned X21_AliasSet[] = { PPC::R21, 0 };
const unsigned X22_AliasSet[] = { PPC::R22, 0 };
@@ -2662,164 +2082,30 @@ PPC::NoRegister, PPC::NoRegister };
const unsigned X27_AliasSet[] = { PPC::R27, 0 };
const unsigned X28_AliasSet[] = { PPC::R28, 0 };
const unsigned X29_AliasSet[] = { PPC::R29, 0 };
- const unsigned X3_AliasSet[] = { PPC::R3, 0 };
const unsigned X30_AliasSet[] = { PPC::R30, 0 };
const unsigned X31_AliasSet[] = { PPC::R31, 0 };
- const unsigned X4_AliasSet[] = { PPC::R4, 0 };
- const unsigned X5_AliasSet[] = { PPC::R5, 0 };
- const unsigned X6_AliasSet[] = { PPC::R6, 0 };
- const unsigned X7_AliasSet[] = { PPC::R7, 0 };
- const unsigned X8_AliasSet[] = { PPC::R8, 0 };
- const unsigned X9_AliasSet[] = { PPC::R9, 0 };
// Register Sub-registers Sets...
const unsigned Empty_SubRegsSet[] = { 0 };
- const unsigned CARRY_SubRegsSet[] = { 0 };
const unsigned CR0_SubRegsSet[] = { PPC::CR0LT, PPC::CR0GT, PPC::CR0EQ, PPC::CR0UN, 0 };
- const unsigned CR0EQ_SubRegsSet[] = { 0 };
- const unsigned CR0GT_SubRegsSet[] = { 0 };
- const unsigned CR0LT_SubRegsSet[] = { 0 };
- const unsigned CR0UN_SubRegsSet[] = { 0 };
const unsigned CR1_SubRegsSet[] = { PPC::CR1LT, PPC::CR1GT, PPC::CR1EQ, PPC::CR1UN, 0 };
- const unsigned CR1EQ_SubRegsSet[] = { 0 };
- const unsigned CR1GT_SubRegsSet[] = { 0 };
- const unsigned CR1LT_SubRegsSet[] = { 0 };
- const unsigned CR1UN_SubRegsSet[] = { 0 };
const unsigned CR2_SubRegsSet[] = { PPC::CR2LT, PPC::CR2GT, PPC::CR2EQ, PPC::CR2UN, 0 };
- const unsigned CR2EQ_SubRegsSet[] = { 0 };
- const unsigned CR2GT_SubRegsSet[] = { 0 };
- const unsigned CR2LT_SubRegsSet[] = { 0 };
- const unsigned CR2UN_SubRegsSet[] = { 0 };
const unsigned CR3_SubRegsSet[] = { PPC::CR3LT, PPC::CR3GT, PPC::CR3EQ, PPC::CR3UN, 0 };
- const unsigned CR3EQ_SubRegsSet[] = { 0 };
- const unsigned CR3GT_SubRegsSet[] = { 0 };
- const unsigned CR3LT_SubRegsSet[] = { 0 };
- const unsigned CR3UN_SubRegsSet[] = { 0 };
const unsigned CR4_SubRegsSet[] = { PPC::CR4LT, PPC::CR4GT, PPC::CR4EQ, PPC::CR4UN, 0 };
- const unsigned CR4EQ_SubRegsSet[] = { 0 };
- const unsigned CR4GT_SubRegsSet[] = { 0 };
- const unsigned CR4LT_SubRegsSet[] = { 0 };
- const unsigned CR4UN_SubRegsSet[] = { 0 };
const unsigned CR5_SubRegsSet[] = { PPC::CR5LT, PPC::CR5GT, PPC::CR5EQ, PPC::CR5UN, 0 };
- const unsigned CR5EQ_SubRegsSet[] = { 0 };
- const unsigned CR5GT_SubRegsSet[] = { 0 };
- const unsigned CR5LT_SubRegsSet[] = { 0 };
- const unsigned CR5UN_SubRegsSet[] = { 0 };
const unsigned CR6_SubRegsSet[] = { PPC::CR6LT, PPC::CR6GT, PPC::CR6EQ, PPC::CR6UN, 0 };
- const unsigned CR6EQ_SubRegsSet[] = { 0 };
- const unsigned CR6GT_SubRegsSet[] = { 0 };
- const unsigned CR6LT_SubRegsSet[] = { 0 };
- const unsigned CR6UN_SubRegsSet[] = { 0 };
const unsigned CR7_SubRegsSet[] = { PPC::CR7LT, PPC::CR7GT, PPC::CR7EQ, PPC::CR7UN, 0 };
- const unsigned CR7EQ_SubRegsSet[] = { 0 };
- const unsigned CR7GT_SubRegsSet[] = { 0 };
- const unsigned CR7LT_SubRegsSet[] = { 0 };
- const unsigned CR7UN_SubRegsSet[] = { 0 };
- const unsigned CTR_SubRegsSet[] = { 0 };
- const unsigned CTR8_SubRegsSet[] = { 0 };
- const unsigned F0_SubRegsSet[] = { 0 };
- const unsigned F1_SubRegsSet[] = { 0 };
- const unsigned F10_SubRegsSet[] = { 0 };
- const unsigned F11_SubRegsSet[] = { 0 };
- const unsigned F12_SubRegsSet[] = { 0 };
- const unsigned F13_SubRegsSet[] = { 0 };
- const unsigned F14_SubRegsSet[] = { 0 };
- const unsigned F15_SubRegsSet[] = { 0 };
- const unsigned F16_SubRegsSet[] = { 0 };
- const unsigned F17_SubRegsSet[] = { 0 };
- const unsigned F18_SubRegsSet[] = { 0 };
- const unsigned F19_SubRegsSet[] = { 0 };
- const unsigned F2_SubRegsSet[] = { 0 };
- const unsigned F20_SubRegsSet[] = { 0 };
- const unsigned F21_SubRegsSet[] = { 0 };
- const unsigned F22_SubRegsSet[] = { 0 };
- const unsigned F23_SubRegsSet[] = { 0 };
- const unsigned F24_SubRegsSet[] = { 0 };
- const unsigned F25_SubRegsSet[] = { 0 };
- const unsigned F26_SubRegsSet[] = { 0 };
- const unsigned F27_SubRegsSet[] = { 0 };
- const unsigned F28_SubRegsSet[] = { 0 };
- const unsigned F29_SubRegsSet[] = { 0 };
- const unsigned F3_SubRegsSet[] = { 0 };
- const unsigned F30_SubRegsSet[] = { 0 };
- const unsigned F31_SubRegsSet[] = { 0 };
- const unsigned F4_SubRegsSet[] = { 0 };
- const unsigned F5_SubRegsSet[] = { 0 };
- const unsigned F6_SubRegsSet[] = { 0 };
- const unsigned F7_SubRegsSet[] = { 0 };
- const unsigned F8_SubRegsSet[] = { 0 };
- const unsigned F9_SubRegsSet[] = { 0 };
- const unsigned LR_SubRegsSet[] = { 0 };
- const unsigned LR8_SubRegsSet[] = { 0 };
- const unsigned R0_SubRegsSet[] = { 0 };
- const unsigned R1_SubRegsSet[] = { 0 };
- const unsigned R10_SubRegsSet[] = { 0 };
- const unsigned R11_SubRegsSet[] = { 0 };
- const unsigned R12_SubRegsSet[] = { 0 };
- const unsigned R13_SubRegsSet[] = { 0 };
- const unsigned R14_SubRegsSet[] = { 0 };
- const unsigned R15_SubRegsSet[] = { 0 };
- const unsigned R16_SubRegsSet[] = { 0 };
- const unsigned R17_SubRegsSet[] = { 0 };
- const unsigned R18_SubRegsSet[] = { 0 };
- const unsigned R19_SubRegsSet[] = { 0 };
- const unsigned R2_SubRegsSet[] = { 0 };
- const unsigned R20_SubRegsSet[] = { 0 };
- const unsigned R21_SubRegsSet[] = { 0 };
- const unsigned R22_SubRegsSet[] = { 0 };
- const unsigned R23_SubRegsSet[] = { 0 };
- const unsigned R24_SubRegsSet[] = { 0 };
- const unsigned R25_SubRegsSet[] = { 0 };
- const unsigned R26_SubRegsSet[] = { 0 };
- const unsigned R27_SubRegsSet[] = { 0 };
- const unsigned R28_SubRegsSet[] = { 0 };
- const unsigned R29_SubRegsSet[] = { 0 };
- const unsigned R3_SubRegsSet[] = { 0 };
- const unsigned R30_SubRegsSet[] = { 0 };
- const unsigned R31_SubRegsSet[] = { 0 };
- const unsigned R4_SubRegsSet[] = { 0 };
- const unsigned R5_SubRegsSet[] = { 0 };
- const unsigned R6_SubRegsSet[] = { 0 };
- const unsigned R7_SubRegsSet[] = { 0 };
- const unsigned R8_SubRegsSet[] = { 0 };
- const unsigned R9_SubRegsSet[] = { 0 };
- const unsigned RM_SubRegsSet[] = { 0 };
- const unsigned V0_SubRegsSet[] = { 0 };
- const unsigned V1_SubRegsSet[] = { 0 };
- const unsigned V10_SubRegsSet[] = { 0 };
- const unsigned V11_SubRegsSet[] = { 0 };
- const unsigned V12_SubRegsSet[] = { 0 };
- const unsigned V13_SubRegsSet[] = { 0 };
- const unsigned V14_SubRegsSet[] = { 0 };
- const unsigned V15_SubRegsSet[] = { 0 };
- const unsigned V16_SubRegsSet[] = { 0 };
- const unsigned V17_SubRegsSet[] = { 0 };
- const unsigned V18_SubRegsSet[] = { 0 };
- const unsigned V19_SubRegsSet[] = { 0 };
- const unsigned V2_SubRegsSet[] = { 0 };
- const unsigned V20_SubRegsSet[] = { 0 };
- const unsigned V21_SubRegsSet[] = { 0 };
- const unsigned V22_SubRegsSet[] = { 0 };
- const unsigned V23_SubRegsSet[] = { 0 };
- const unsigned V24_SubRegsSet[] = { 0 };
- const unsigned V25_SubRegsSet[] = { 0 };
- const unsigned V26_SubRegsSet[] = { 0 };
- const unsigned V27_SubRegsSet[] = { 0 };
- const unsigned V28_SubRegsSet[] = { 0 };
- const unsigned V29_SubRegsSet[] = { 0 };
- const unsigned V3_SubRegsSet[] = { 0 };
- const unsigned V30_SubRegsSet[] = { 0 };
- const unsigned V31_SubRegsSet[] = { 0 };
- const unsigned V4_SubRegsSet[] = { 0 };
- const unsigned V5_SubRegsSet[] = { 0 };
- const unsigned V6_SubRegsSet[] = { 0 };
- const unsigned V7_SubRegsSet[] = { 0 };
- const unsigned V8_SubRegsSet[] = { 0 };
- const unsigned V9_SubRegsSet[] = { 0 };
- const unsigned VRSAVE_SubRegsSet[] = { 0 };
const unsigned X0_SubRegsSet[] = { PPC::R0, 0 };
const unsigned X1_SubRegsSet[] = { PPC::R1, 0 };
+ const unsigned X2_SubRegsSet[] = { PPC::R2, 0 };
+ const unsigned X3_SubRegsSet[] = { PPC::R3, 0 };
+ const unsigned X4_SubRegsSet[] = { PPC::R4, 0 };
+ const unsigned X5_SubRegsSet[] = { PPC::R5, 0 };
+ const unsigned X6_SubRegsSet[] = { PPC::R6, 0 };
+ const unsigned X7_SubRegsSet[] = { PPC::R7, 0 };
+ const unsigned X8_SubRegsSet[] = { PPC::R8, 0 };
+ const unsigned X9_SubRegsSet[] = { PPC::R9, 0 };
const unsigned X10_SubRegsSet[] = { PPC::R10, 0 };
const unsigned X11_SubRegsSet[] = { PPC::R11, 0 };
const unsigned X12_SubRegsSet[] = { PPC::R12, 0 };
@@ -2830,7 +2116,6 @@ PPC::NoRegister, PPC::NoRegister };
const unsigned X17_SubRegsSet[] = { PPC::R17, 0 };
const unsigned X18_SubRegsSet[] = { PPC::R18, 0 };
const unsigned X19_SubRegsSet[] = { PPC::R19, 0 };
- const unsigned X2_SubRegsSet[] = { PPC::R2, 0 };
const unsigned X20_SubRegsSet[] = { PPC::R20, 0 };
const unsigned X21_SubRegsSet[] = { PPC::R21, 0 };
const unsigned X22_SubRegsSet[] = { PPC::R22, 0 };
@@ -2841,98 +2126,54 @@ PPC::NoRegister, PPC::NoRegister };
const unsigned X27_SubRegsSet[] = { PPC::R27, 0 };
const unsigned X28_SubRegsSet[] = { PPC::R28, 0 };
const unsigned X29_SubRegsSet[] = { PPC::R29, 0 };
- const unsigned X3_SubRegsSet[] = { PPC::R3, 0 };
const unsigned X30_SubRegsSet[] = { PPC::R30, 0 };
const unsigned X31_SubRegsSet[] = { PPC::R31, 0 };
- const unsigned X4_SubRegsSet[] = { PPC::R4, 0 };
- const unsigned X5_SubRegsSet[] = { PPC::R5, 0 };
- const unsigned X6_SubRegsSet[] = { PPC::R6, 0 };
- const unsigned X7_SubRegsSet[] = { PPC::R7, 0 };
- const unsigned X8_SubRegsSet[] = { PPC::R8, 0 };
- const unsigned X9_SubRegsSet[] = { PPC::R9, 0 };
// Register Super-registers Sets...
const unsigned Empty_SuperRegsSet[] = { 0 };
- const unsigned CARRY_SuperRegsSet[] = { 0 };
- const unsigned CR0_SuperRegsSet[] = { 0 };
const unsigned CR0EQ_SuperRegsSet[] = { PPC::CR0, 0 };
const unsigned CR0GT_SuperRegsSet[] = { PPC::CR0, 0 };
const unsigned CR0LT_SuperRegsSet[] = { PPC::CR0, 0 };
const unsigned CR0UN_SuperRegsSet[] = { PPC::CR0, 0 };
- const unsigned CR1_SuperRegsSet[] = { 0 };
const unsigned CR1EQ_SuperRegsSet[] = { PPC::CR1, 0 };
const unsigned CR1GT_SuperRegsSet[] = { PPC::CR1, 0 };
const unsigned CR1LT_SuperRegsSet[] = { PPC::CR1, 0 };
const unsigned CR1UN_SuperRegsSet[] = { PPC::CR1, 0 };
- const unsigned CR2_SuperRegsSet[] = { 0 };
const unsigned CR2EQ_SuperRegsSet[] = { PPC::CR2, 0 };
const unsigned CR2GT_SuperRegsSet[] = { PPC::CR2, 0 };
const unsigned CR2LT_SuperRegsSet[] = { PPC::CR2, 0 };
const unsigned CR2UN_SuperRegsSet[] = { PPC::CR2, 0 };
- const unsigned CR3_SuperRegsSet[] = { 0 };
const unsigned CR3EQ_SuperRegsSet[] = { PPC::CR3, 0 };
const unsigned CR3GT_SuperRegsSet[] = { PPC::CR3, 0 };
const unsigned CR3LT_SuperRegsSet[] = { PPC::CR3, 0 };
const unsigned CR3UN_SuperRegsSet[] = { PPC::CR3, 0 };
- const unsigned CR4_SuperRegsSet[] = { 0 };
const unsigned CR4EQ_SuperRegsSet[] = { PPC::CR4, 0 };
const unsigned CR4GT_SuperRegsSet[] = { PPC::CR4, 0 };
const unsigned CR4LT_SuperRegsSet[] = { PPC::CR4, 0 };
const unsigned CR4UN_SuperRegsSet[] = { PPC::CR4, 0 };
- const unsigned CR5_SuperRegsSet[] = { 0 };
const unsigned CR5EQ_SuperRegsSet[] = { PPC::CR5, 0 };
const unsigned CR5GT_SuperRegsSet[] = { PPC::CR5, 0 };
const unsigned CR5LT_SuperRegsSet[] = { PPC::CR5, 0 };
const unsigned CR5UN_SuperRegsSet[] = { PPC::CR5, 0 };
- const unsigned CR6_SuperRegsSet[] = { 0 };
const unsigned CR6EQ_SuperRegsSet[] = { PPC::CR6, 0 };
const unsigned CR6GT_SuperRegsSet[] = { PPC::CR6, 0 };
const unsigned CR6LT_SuperRegsSet[] = { PPC::CR6, 0 };
const unsigned CR6UN_SuperRegsSet[] = { PPC::CR6, 0 };
- const unsigned CR7_SuperRegsSet[] = { 0 };
const unsigned CR7EQ_SuperRegsSet[] = { PPC::CR7, 0 };
const unsigned CR7GT_SuperRegsSet[] = { PPC::CR7, 0 };
const unsigned CR7LT_SuperRegsSet[] = { PPC::CR7, 0 };
const unsigned CR7UN_SuperRegsSet[] = { PPC::CR7, 0 };
- const unsigned CTR_SuperRegsSet[] = { 0 };
- const unsigned CTR8_SuperRegsSet[] = { 0 };
- const unsigned F0_SuperRegsSet[] = { 0 };
- const unsigned F1_SuperRegsSet[] = { 0 };
- const unsigned F10_SuperRegsSet[] = { 0 };
- const unsigned F11_SuperRegsSet[] = { 0 };
- const unsigned F12_SuperRegsSet[] = { 0 };
- const unsigned F13_SuperRegsSet[] = { 0 };
- const unsigned F14_SuperRegsSet[] = { 0 };
- const unsigned F15_SuperRegsSet[] = { 0 };
- const unsigned F16_SuperRegsSet[] = { 0 };
- const unsigned F17_SuperRegsSet[] = { 0 };
- const unsigned F18_SuperRegsSet[] = { 0 };
- const unsigned F19_SuperRegsSet[] = { 0 };
- const unsigned F2_SuperRegsSet[] = { 0 };
- const unsigned F20_SuperRegsSet[] = { 0 };
- const unsigned F21_SuperRegsSet[] = { 0 };
- const unsigned F22_SuperRegsSet[] = { 0 };
- const unsigned F23_SuperRegsSet[] = { 0 };
- const unsigned F24_SuperRegsSet[] = { 0 };
- const unsigned F25_SuperRegsSet[] = { 0 };
- const unsigned F26_SuperRegsSet[] = { 0 };
- const unsigned F27_SuperRegsSet[] = { 0 };
- const unsigned F28_SuperRegsSet[] = { 0 };
- const unsigned F29_SuperRegsSet[] = { 0 };
- const unsigned F3_SuperRegsSet[] = { 0 };
- const unsigned F30_SuperRegsSet[] = { 0 };
- const unsigned F31_SuperRegsSet[] = { 0 };
- const unsigned F4_SuperRegsSet[] = { 0 };
- const unsigned F5_SuperRegsSet[] = { 0 };
- const unsigned F6_SuperRegsSet[] = { 0 };
- const unsigned F7_SuperRegsSet[] = { 0 };
- const unsigned F8_SuperRegsSet[] = { 0 };
- const unsigned F9_SuperRegsSet[] = { 0 };
- const unsigned LR_SuperRegsSet[] = { 0 };
- const unsigned LR8_SuperRegsSet[] = { 0 };
const unsigned R0_SuperRegsSet[] = { PPC::X0, 0 };
const unsigned R1_SuperRegsSet[] = { PPC::X1, 0 };
+ const unsigned R2_SuperRegsSet[] = { PPC::X2, 0 };
+ const unsigned R3_SuperRegsSet[] = { PPC::X3, 0 };
+ const unsigned R4_SuperRegsSet[] = { PPC::X4, 0 };
+ const unsigned R5_SuperRegsSet[] = { PPC::X5, 0 };
+ const unsigned R6_SuperRegsSet[] = { PPC::X6, 0 };
+ const unsigned R7_SuperRegsSet[] = { PPC::X7, 0 };
+ const unsigned R8_SuperRegsSet[] = { PPC::X8, 0 };
+ const unsigned R9_SuperRegsSet[] = { PPC::X9, 0 };
const unsigned R10_SuperRegsSet[] = { PPC::X10, 0 };
const unsigned R11_SuperRegsSet[] = { PPC::X11, 0 };
const unsigned R12_SuperRegsSet[] = { PPC::X12, 0 };
@@ -2943,7 +2184,6 @@ PPC::NoRegister, PPC::NoRegister };
const unsigned R17_SuperRegsSet[] = { PPC::X17, 0 };
const unsigned R18_SuperRegsSet[] = { PPC::X18, 0 };
const unsigned R19_SuperRegsSet[] = { PPC::X19, 0 };
- const unsigned R2_SuperRegsSet[] = { PPC::X2, 0 };
const unsigned R20_SuperRegsSet[] = { PPC::X20, 0 };
const unsigned R21_SuperRegsSet[] = { PPC::X21, 0 };
const unsigned R22_SuperRegsSet[] = { PPC::X22, 0 };
@@ -2954,260 +2194,190 @@ PPC::NoRegister, PPC::NoRegister };
const unsigned R27_SuperRegsSet[] = { PPC::X27, 0 };
const unsigned R28_SuperRegsSet[] = { PPC::X28, 0 };
const unsigned R29_SuperRegsSet[] = { PPC::X29, 0 };
- const unsigned R3_SuperRegsSet[] = { PPC::X3, 0 };
const unsigned R30_SuperRegsSet[] = { PPC::X30, 0 };
const unsigned R31_SuperRegsSet[] = { PPC::X31, 0 };
- const unsigned R4_SuperRegsSet[] = { PPC::X4, 0 };
- const unsigned R5_SuperRegsSet[] = { PPC::X5, 0 };
- const unsigned R6_SuperRegsSet[] = { PPC::X6, 0 };
- const unsigned R7_SuperRegsSet[] = { PPC::X7, 0 };
- const unsigned R8_SuperRegsSet[] = { PPC::X8, 0 };
- const unsigned R9_SuperRegsSet[] = { PPC::X9, 0 };
- const unsigned RM_SuperRegsSet[] = { 0 };
- const unsigned V0_SuperRegsSet[] = { 0 };
- const unsigned V1_SuperRegsSet[] = { 0 };
- const unsigned V10_SuperRegsSet[] = { 0 };
- const unsigned V11_SuperRegsSet[] = { 0 };
- const unsigned V12_SuperRegsSet[] = { 0 };
- const unsigned V13_SuperRegsSet[] = { 0 };
- const unsigned V14_SuperRegsSet[] = { 0 };
- const unsigned V15_SuperRegsSet[] = { 0 };
- const unsigned V16_SuperRegsSet[] = { 0 };
- const unsigned V17_SuperRegsSet[] = { 0 };
- const unsigned V18_SuperRegsSet[] = { 0 };
- const unsigned V19_SuperRegsSet[] = { 0 };
- const unsigned V2_SuperRegsSet[] = { 0 };
- const unsigned V20_SuperRegsSet[] = { 0 };
- const unsigned V21_SuperRegsSet[] = { 0 };
- const unsigned V22_SuperRegsSet[] = { 0 };
- const unsigned V23_SuperRegsSet[] = { 0 };
- const unsigned V24_SuperRegsSet[] = { 0 };
- const unsigned V25_SuperRegsSet[] = { 0 };
- const unsigned V26_SuperRegsSet[] = { 0 };
- const unsigned V27_SuperRegsSet[] = { 0 };
- const unsigned V28_SuperRegsSet[] = { 0 };
- const unsigned V29_SuperRegsSet[] = { 0 };
- const unsigned V3_SuperRegsSet[] = { 0 };
- const unsigned V30_SuperRegsSet[] = { 0 };
- const unsigned V31_SuperRegsSet[] = { 0 };
- const unsigned V4_SuperRegsSet[] = { 0 };
- const unsigned V5_SuperRegsSet[] = { 0 };
- const unsigned V6_SuperRegsSet[] = { 0 };
- const unsigned V7_SuperRegsSet[] = { 0 };
- const unsigned V8_SuperRegsSet[] = { 0 };
- const unsigned V9_SuperRegsSet[] = { 0 };
- const unsigned VRSAVE_SuperRegsSet[] = { 0 };
- const unsigned X0_SuperRegsSet[] = { 0 };
- const unsigned X1_SuperRegsSet[] = { 0 };
- const unsigned X10_SuperRegsSet[] = { 0 };
- const unsigned X11_SuperRegsSet[] = { 0 };
- const unsigned X12_SuperRegsSet[] = { 0 };
- const unsigned X13_SuperRegsSet[] = { 0 };
- const unsigned X14_SuperRegsSet[] = { 0 };
- const unsigned X15_SuperRegsSet[] = { 0 };
- const unsigned X16_SuperRegsSet[] = { 0 };
- const unsigned X17_SuperRegsSet[] = { 0 };
- const unsigned X18_SuperRegsSet[] = { 0 };
- const unsigned X19_SuperRegsSet[] = { 0 };
- const unsigned X2_SuperRegsSet[] = { 0 };
- const unsigned X20_SuperRegsSet[] = { 0 };
- const unsigned X21_SuperRegsSet[] = { 0 };
- const unsigned X22_SuperRegsSet[] = { 0 };
- const unsigned X23_SuperRegsSet[] = { 0 };
- const unsigned X24_SuperRegsSet[] = { 0 };
- const unsigned X25_SuperRegsSet[] = { 0 };
- const unsigned X26_SuperRegsSet[] = { 0 };
- const unsigned X27_SuperRegsSet[] = { 0 };
- const unsigned X28_SuperRegsSet[] = { 0 };
- const unsigned X29_SuperRegsSet[] = { 0 };
- const unsigned X3_SuperRegsSet[] = { 0 };
- const unsigned X30_SuperRegsSet[] = { 0 };
- const unsigned X31_SuperRegsSet[] = { 0 };
- const unsigned X4_SuperRegsSet[] = { 0 };
- const unsigned X5_SuperRegsSet[] = { 0 };
- const unsigned X6_SuperRegsSet[] = { 0 };
- const unsigned X7_SuperRegsSet[] = { 0 };
- const unsigned X8_SuperRegsSet[] = { 0 };
- const unsigned X9_SuperRegsSet[] = { 0 };
const TargetRegisterDesc RegisterDescriptors[] = { // Descriptors
{ "NOREG", 0, 0, 0 },
- { "CARRY", CARRY_AliasSet, CARRY_SubRegsSet, CARRY_SuperRegsSet },
- { "CR0", CR0_AliasSet, CR0_SubRegsSet, CR0_SuperRegsSet },
- { "CR0EQ", CR0EQ_AliasSet, CR0EQ_SubRegsSet, CR0EQ_SuperRegsSet },
- { "CR0GT", CR0GT_AliasSet, CR0GT_SubRegsSet, CR0GT_SuperRegsSet },
- { "CR0LT", CR0LT_AliasSet, CR0LT_SubRegsSet, CR0LT_SuperRegsSet },
- { "CR0UN", CR0UN_AliasSet, CR0UN_SubRegsSet, CR0UN_SuperRegsSet },
- { "CR1", CR1_AliasSet, CR1_SubRegsSet, CR1_SuperRegsSet },
- { "CR1EQ", CR1EQ_AliasSet, CR1EQ_SubRegsSet, CR1EQ_SuperRegsSet },
- { "CR1GT", CR1GT_AliasSet, CR1GT_SubRegsSet, CR1GT_SuperRegsSet },
- { "CR1LT", CR1LT_AliasSet, CR1LT_SubRegsSet, CR1LT_SuperRegsSet },
- { "CR1UN", CR1UN_AliasSet, CR1UN_SubRegsSet, CR1UN_SuperRegsSet },
- { "CR2", CR2_AliasSet, CR2_SubRegsSet, CR2_SuperRegsSet },
- { "CR2EQ", CR2EQ_AliasSet, CR2EQ_SubRegsSet, CR2EQ_SuperRegsSet },
- { "CR2GT", CR2GT_AliasSet, CR2GT_SubRegsSet, CR2GT_SuperRegsSet },
- { "CR2LT", CR2LT_AliasSet, CR2LT_SubRegsSet, CR2LT_SuperRegsSet },
- { "CR2UN", CR2UN_AliasSet, CR2UN_SubRegsSet, CR2UN_SuperRegsSet },
- { "CR3", CR3_AliasSet, CR3_SubRegsSet, CR3_SuperRegsSet },
- { "CR3EQ", CR3EQ_AliasSet, CR3EQ_SubRegsSet, CR3EQ_SuperRegsSet },
- { "CR3GT", CR3GT_AliasSet, CR3GT_SubRegsSet, CR3GT_SuperRegsSet },
- { "CR3LT", CR3LT_AliasSet, CR3LT_SubRegsSet, CR3LT_SuperRegsSet },
- { "CR3UN", CR3UN_AliasSet, CR3UN_SubRegsSet, CR3UN_SuperRegsSet },
- { "CR4", CR4_AliasSet, CR4_SubRegsSet, CR4_SuperRegsSet },
- { "CR4EQ", CR4EQ_AliasSet, CR4EQ_SubRegsSet, CR4EQ_SuperRegsSet },
- { "CR4GT", CR4GT_AliasSet, CR4GT_SubRegsSet, CR4GT_SuperRegsSet },
- { "CR4LT", CR4LT_AliasSet, CR4LT_SubRegsSet, CR4LT_SuperRegsSet },
- { "CR4UN", CR4UN_AliasSet, CR4UN_SubRegsSet, CR4UN_SuperRegsSet },
- { "CR5", CR5_AliasSet, CR5_SubRegsSet, CR5_SuperRegsSet },
- { "CR5EQ", CR5EQ_AliasSet, CR5EQ_SubRegsSet, CR5EQ_SuperRegsSet },
- { "CR5GT", CR5GT_AliasSet, CR5GT_SubRegsSet, CR5GT_SuperRegsSet },
- { "CR5LT", CR5LT_AliasSet, CR5LT_SubRegsSet, CR5LT_SuperRegsSet },
- { "CR5UN", CR5UN_AliasSet, CR5UN_SubRegsSet, CR5UN_SuperRegsSet },
- { "CR6", CR6_AliasSet, CR6_SubRegsSet, CR6_SuperRegsSet },
- { "CR6EQ", CR6EQ_AliasSet, CR6EQ_SubRegsSet, CR6EQ_SuperRegsSet },
- { "CR6GT", CR6GT_AliasSet, CR6GT_SubRegsSet, CR6GT_SuperRegsSet },
- { "CR6LT", CR6LT_AliasSet, CR6LT_SubRegsSet, CR6LT_SuperRegsSet },
- { "CR6UN", CR6UN_AliasSet, CR6UN_SubRegsSet, CR6UN_SuperRegsSet },
- { "CR7", CR7_AliasSet, CR7_SubRegsSet, CR7_SuperRegsSet },
- { "CR7EQ", CR7EQ_AliasSet, CR7EQ_SubRegsSet, CR7EQ_SuperRegsSet },
- { "CR7GT", CR7GT_AliasSet, CR7GT_SubRegsSet, CR7GT_SuperRegsSet },
- { "CR7LT", CR7LT_AliasSet, CR7LT_SubRegsSet, CR7LT_SuperRegsSet },
- { "CR7UN", CR7UN_AliasSet, CR7UN_SubRegsSet, CR7UN_SuperRegsSet },
- { "CTR", CTR_AliasSet, CTR_SubRegsSet, CTR_SuperRegsSet },
- { "CTR8", CTR8_AliasSet, CTR8_SubRegsSet, CTR8_SuperRegsSet },
- { "F0", F0_AliasSet, F0_SubRegsSet, F0_SuperRegsSet },
- { "F1", F1_AliasSet, F1_SubRegsSet, F1_SuperRegsSet },
- { "F10", F10_AliasSet, F10_SubRegsSet, F10_SuperRegsSet },
- { "F11", F11_AliasSet, F11_SubRegsSet, F11_SuperRegsSet },
- { "F12", F12_AliasSet, F12_SubRegsSet, F12_SuperRegsSet },
- { "F13", F13_AliasSet, F13_SubRegsSet, F13_SuperRegsSet },
- { "F14", F14_AliasSet, F14_SubRegsSet, F14_SuperRegsSet },
- { "F15", F15_AliasSet, F15_SubRegsSet, F15_SuperRegsSet },
- { "F16", F16_AliasSet, F16_SubRegsSet, F16_SuperRegsSet },
- { "F17", F17_AliasSet, F17_SubRegsSet, F17_SuperRegsSet },
- { "F18", F18_AliasSet, F18_SubRegsSet, F18_SuperRegsSet },
- { "F19", F19_AliasSet, F19_SubRegsSet, F19_SuperRegsSet },
- { "F2", F2_AliasSet, F2_SubRegsSet, F2_SuperRegsSet },
- { "F20", F20_AliasSet, F20_SubRegsSet, F20_SuperRegsSet },
- { "F21", F21_AliasSet, F21_SubRegsSet, F21_SuperRegsSet },
- { "F22", F22_AliasSet, F22_SubRegsSet, F22_SuperRegsSet },
- { "F23", F23_AliasSet, F23_SubRegsSet, F23_SuperRegsSet },
- { "F24", F24_AliasSet, F24_SubRegsSet, F24_SuperRegsSet },
- { "F25", F25_AliasSet, F25_SubRegsSet, F25_SuperRegsSet },
- { "F26", F26_AliasSet, F26_SubRegsSet, F26_SuperRegsSet },
- { "F27", F27_AliasSet, F27_SubRegsSet, F27_SuperRegsSet },
- { "F28", F28_AliasSet, F28_SubRegsSet, F28_SuperRegsSet },
- { "F29", F29_AliasSet, F29_SubRegsSet, F29_SuperRegsSet },
- { "F3", F3_AliasSet, F3_SubRegsSet, F3_SuperRegsSet },
- { "F30", F30_AliasSet, F30_SubRegsSet, F30_SuperRegsSet },
- { "F31", F31_AliasSet, F31_SubRegsSet, F31_SuperRegsSet },
- { "F4", F4_AliasSet, F4_SubRegsSet, F4_SuperRegsSet },
- { "F5", F5_AliasSet, F5_SubRegsSet, F5_SuperRegsSet },
- { "F6", F6_AliasSet, F6_SubRegsSet, F6_SuperRegsSet },
- { "F7", F7_AliasSet, F7_SubRegsSet, F7_SuperRegsSet },
- { "F8", F8_AliasSet, F8_SubRegsSet, F8_SuperRegsSet },
- { "F9", F9_AliasSet, F9_SubRegsSet, F9_SuperRegsSet },
- { "LR", LR_AliasSet, LR_SubRegsSet, LR_SuperRegsSet },
- { "LR8", LR8_AliasSet, LR8_SubRegsSet, LR8_SuperRegsSet },
- { "R0", R0_AliasSet, R0_SubRegsSet, R0_SuperRegsSet },
- { "R1", R1_AliasSet, R1_SubRegsSet, R1_SuperRegsSet },
- { "R10", R10_AliasSet, R10_SubRegsSet, R10_SuperRegsSet },
- { "R11", R11_AliasSet, R11_SubRegsSet, R11_SuperRegsSet },
- { "R12", R12_AliasSet, R12_SubRegsSet, R12_SuperRegsSet },
- { "R13", R13_AliasSet, R13_SubRegsSet, R13_SuperRegsSet },
- { "R14", R14_AliasSet, R14_SubRegsSet, R14_SuperRegsSet },
- { "R15", R15_AliasSet, R15_SubRegsSet, R15_SuperRegsSet },
- { "R16", R16_AliasSet, R16_SubRegsSet, R16_SuperRegsSet },
- { "R17", R17_AliasSet, R17_SubRegsSet, R17_SuperRegsSet },
- { "R18", R18_AliasSet, R18_SubRegsSet, R18_SuperRegsSet },
- { "R19", R19_AliasSet, R19_SubRegsSet, R19_SuperRegsSet },
- { "R2", R2_AliasSet, R2_SubRegsSet, R2_SuperRegsSet },
- { "R20", R20_AliasSet, R20_SubRegsSet, R20_SuperRegsSet },
- { "R21", R21_AliasSet, R21_SubRegsSet, R21_SuperRegsSet },
- { "R22", R22_AliasSet, R22_SubRegsSet, R22_SuperRegsSet },
- { "R23", R23_AliasSet, R23_SubRegsSet, R23_SuperRegsSet },
- { "R24", R24_AliasSet, R24_SubRegsSet, R24_SuperRegsSet },
- { "R25", R25_AliasSet, R25_SubRegsSet, R25_SuperRegsSet },
- { "R26", R26_AliasSet, R26_SubRegsSet, R26_SuperRegsSet },
- { "R27", R27_AliasSet, R27_SubRegsSet, R27_SuperRegsSet },
- { "R28", R28_AliasSet, R28_SubRegsSet, R28_SuperRegsSet },
- { "R29", R29_AliasSet, R29_SubRegsSet, R29_SuperRegsSet },
- { "R3", R3_AliasSet, R3_SubRegsSet, R3_SuperRegsSet },
- { "R30", R30_AliasSet, R30_SubRegsSet, R30_SuperRegsSet },
- { "R31", R31_AliasSet, R31_SubRegsSet, R31_SuperRegsSet },
- { "R4", R4_AliasSet, R4_SubRegsSet, R4_SuperRegsSet },
- { "R5", R5_AliasSet, R5_SubRegsSet, R5_SuperRegsSet },
- { "R6", R6_AliasSet, R6_SubRegsSet, R6_SuperRegsSet },
- { "R7", R7_AliasSet, R7_SubRegsSet, R7_SuperRegsSet },
- { "R8", R8_AliasSet, R8_SubRegsSet, R8_SuperRegsSet },
- { "R9", R9_AliasSet, R9_SubRegsSet, R9_SuperRegsSet },
- { "RM", RM_AliasSet, RM_SubRegsSet, RM_SuperRegsSet },
- { "V0", V0_AliasSet, V0_SubRegsSet, V0_SuperRegsSet },
- { "V1", V1_AliasSet, V1_SubRegsSet, V1_SuperRegsSet },
- { "V10", V10_AliasSet, V10_SubRegsSet, V10_SuperRegsSet },
- { "V11", V11_AliasSet, V11_SubRegsSet, V11_SuperRegsSet },
- { "V12", V12_AliasSet, V12_SubRegsSet, V12_SuperRegsSet },
- { "V13", V13_AliasSet, V13_SubRegsSet, V13_SuperRegsSet },
- { "V14", V14_AliasSet, V14_SubRegsSet, V14_SuperRegsSet },
- { "V15", V15_AliasSet, V15_SubRegsSet, V15_SuperRegsSet },
- { "V16", V16_AliasSet, V16_SubRegsSet, V16_SuperRegsSet },
- { "V17", V17_AliasSet, V17_SubRegsSet, V17_SuperRegsSet },
- { "V18", V18_AliasSet, V18_SubRegsSet, V18_SuperRegsSet },
- { "V19", V19_AliasSet, V19_SubRegsSet, V19_SuperRegsSet },
- { "V2", V2_AliasSet, V2_SubRegsSet, V2_SuperRegsSet },
- { "V20", V20_AliasSet, V20_SubRegsSet, V20_SuperRegsSet },
- { "V21", V21_AliasSet, V21_SubRegsSet, V21_SuperRegsSet },
- { "V22", V22_AliasSet, V22_SubRegsSet, V22_SuperRegsSet },
- { "V23", V23_AliasSet, V23_SubRegsSet, V23_SuperRegsSet },
- { "V24", V24_AliasSet, V24_SubRegsSet, V24_SuperRegsSet },
- { "V25", V25_AliasSet, V25_SubRegsSet, V25_SuperRegsSet },
- { "V26", V26_AliasSet, V26_SubRegsSet, V26_SuperRegsSet },
- { "V27", V27_AliasSet, V27_SubRegsSet, V27_SuperRegsSet },
- { "V28", V28_AliasSet, V28_SubRegsSet, V28_SuperRegsSet },
- { "V29", V29_AliasSet, V29_SubRegsSet, V29_SuperRegsSet },
- { "V3", V3_AliasSet, V3_SubRegsSet, V3_SuperRegsSet },
- { "V30", V30_AliasSet, V30_SubRegsSet, V30_SuperRegsSet },
- { "V31", V31_AliasSet, V31_SubRegsSet, V31_SuperRegsSet },
- { "V4", V4_AliasSet, V4_SubRegsSet, V4_SuperRegsSet },
- { "V5", V5_AliasSet, V5_SubRegsSet, V5_SuperRegsSet },
- { "V6", V6_AliasSet, V6_SubRegsSet, V6_SuperRegsSet },
- { "V7", V7_AliasSet, V7_SubRegsSet, V7_SuperRegsSet },
- { "V8", V8_AliasSet, V8_SubRegsSet, V8_SuperRegsSet },
- { "V9", V9_AliasSet, V9_SubRegsSet, V9_SuperRegsSet },
- { "VRSAVE", VRSAVE_AliasSet, VRSAVE_SubRegsSet, VRSAVE_SuperRegsSet },
- { "X0", X0_AliasSet, X0_SubRegsSet, X0_SuperRegsSet },
- { "X1", X1_AliasSet, X1_SubRegsSet, X1_SuperRegsSet },
- { "X10", X10_AliasSet, X10_SubRegsSet, X10_SuperRegsSet },
- { "X11", X11_AliasSet, X11_SubRegsSet, X11_SuperRegsSet },
- { "X12", X12_AliasSet, X12_SubRegsSet, X12_SuperRegsSet },
- { "X13", X13_AliasSet, X13_SubRegsSet, X13_SuperRegsSet },
- { "X14", X14_AliasSet, X14_SubRegsSet, X14_SuperRegsSet },
- { "X15", X15_AliasSet, X15_SubRegsSet, X15_SuperRegsSet },
- { "X16", X16_AliasSet, X16_SubRegsSet, X16_SuperRegsSet },
- { "X17", X17_AliasSet, X17_SubRegsSet, X17_SuperRegsSet },
- { "X18", X18_AliasSet, X18_SubRegsSet, X18_SuperRegsSet },
- { "X19", X19_AliasSet, X19_SubRegsSet, X19_SuperRegsSet },
- { "X2", X2_AliasSet, X2_SubRegsSet, X2_SuperRegsSet },
- { "X20", X20_AliasSet, X20_SubRegsSet, X20_SuperRegsSet },
- { "X21", X21_AliasSet, X21_SubRegsSet, X21_SuperRegsSet },
- { "X22", X22_AliasSet, X22_SubRegsSet, X22_SuperRegsSet },
- { "X23", X23_AliasSet, X23_SubRegsSet, X23_SuperRegsSet },
- { "X24", X24_AliasSet, X24_SubRegsSet, X24_SuperRegsSet },
- { "X25", X25_AliasSet, X25_SubRegsSet, X25_SuperRegsSet },
- { "X26", X26_AliasSet, X26_SubRegsSet, X26_SuperRegsSet },
- { "X27", X27_AliasSet, X27_SubRegsSet, X27_SuperRegsSet },
- { "X28", X28_AliasSet, X28_SubRegsSet, X28_SuperRegsSet },
- { "X29", X29_AliasSet, X29_SubRegsSet, X29_SuperRegsSet },
- { "X3", X3_AliasSet, X3_SubRegsSet, X3_SuperRegsSet },
- { "X30", X30_AliasSet, X30_SubRegsSet, X30_SuperRegsSet },
- { "X31", X31_AliasSet, X31_SubRegsSet, X31_SuperRegsSet },
- { "X4", X4_AliasSet, X4_SubRegsSet, X4_SuperRegsSet },
- { "X5", X5_AliasSet, X5_SubRegsSet, X5_SuperRegsSet },
- { "X6", X6_AliasSet, X6_SubRegsSet, X6_SuperRegsSet },
- { "X7", X7_AliasSet, X7_SubRegsSet, X7_SuperRegsSet },
- { "X8", X8_AliasSet, X8_SubRegsSet, X8_SuperRegsSet },
- { "X9", X9_AliasSet, X9_SubRegsSet, X9_SuperRegsSet },
- };
+ { "CARRY", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CR0", CR0_AliasSet, CR0_SubRegsSet, Empty_SuperRegsSet },
+ { "CR0EQ", CR0EQ_AliasSet, Empty_SubRegsSet, CR0EQ_SuperRegsSet },
+ { "CR0GT", CR0GT_AliasSet, Empty_SubRegsSet, CR0GT_SuperRegsSet },
+ { "CR0LT", CR0LT_AliasSet, Empty_SubRegsSet, CR0LT_SuperRegsSet },
+ { "CR0UN", CR0UN_AliasSet, Empty_SubRegsSet, CR0UN_SuperRegsSet },
+ { "CR1", CR1_AliasSet, CR1_SubRegsSet, Empty_SuperRegsSet },
+ { "CR1EQ", CR1EQ_AliasSet, Empty_SubRegsSet, CR1EQ_SuperRegsSet },
+ { "CR1GT", CR1GT_AliasSet, Empty_SubRegsSet, CR1GT_SuperRegsSet },
+ { "CR1LT", CR1LT_AliasSet, Empty_SubRegsSet, CR1LT_SuperRegsSet },
+ { "CR1UN", CR1UN_AliasSet, Empty_SubRegsSet, CR1UN_SuperRegsSet },
+ { "CR2", CR2_AliasSet, CR2_SubRegsSet, Empty_SuperRegsSet },
+ { "CR2EQ", CR2EQ_AliasSet, Empty_SubRegsSet, CR2EQ_SuperRegsSet },
+ { "CR2GT", CR2GT_AliasSet, Empty_SubRegsSet, CR2GT_SuperRegsSet },
+ { "CR2LT", CR2LT_AliasSet, Empty_SubRegsSet, CR2LT_SuperRegsSet },
+ { "CR2UN", CR2UN_AliasSet, Empty_SubRegsSet, CR2UN_SuperRegsSet },
+ { "CR3", CR3_AliasSet, CR3_SubRegsSet, Empty_SuperRegsSet },
+ { "CR3EQ", CR3EQ_AliasSet, Empty_SubRegsSet, CR3EQ_SuperRegsSet },
+ { "CR3GT", CR3GT_AliasSet, Empty_SubRegsSet, CR3GT_SuperRegsSet },
+ { "CR3LT", CR3LT_AliasSet, Empty_SubRegsSet, CR3LT_SuperRegsSet },
+ { "CR3UN", CR3UN_AliasSet, Empty_SubRegsSet, CR3UN_SuperRegsSet },
+ { "CR4", CR4_AliasSet, CR4_SubRegsSet, Empty_SuperRegsSet },
+ { "CR4EQ", CR4EQ_AliasSet, Empty_SubRegsSet, CR4EQ_SuperRegsSet },
+ { "CR4GT", CR4GT_AliasSet, Empty_SubRegsSet, CR4GT_SuperRegsSet },
+ { "CR4LT", CR4LT_AliasSet, Empty_SubRegsSet, CR4LT_SuperRegsSet },
+ { "CR4UN", CR4UN_AliasSet, Empty_SubRegsSet, CR4UN_SuperRegsSet },
+ { "CR5", CR5_AliasSet, CR5_SubRegsSet, Empty_SuperRegsSet },
+ { "CR5EQ", CR5EQ_AliasSet, Empty_SubRegsSet, CR5EQ_SuperRegsSet },
+ { "CR5GT", CR5GT_AliasSet, Empty_SubRegsSet, CR5GT_SuperRegsSet },
+ { "CR5LT", CR5LT_AliasSet, Empty_SubRegsSet, CR5LT_SuperRegsSet },
+ { "CR5UN", CR5UN_AliasSet, Empty_SubRegsSet, CR5UN_SuperRegsSet },
+ { "CR6", CR6_AliasSet, CR6_SubRegsSet, Empty_SuperRegsSet },
+ { "CR6EQ", CR6EQ_AliasSet, Empty_SubRegsSet, CR6EQ_SuperRegsSet },
+ { "CR6GT", CR6GT_AliasSet, Empty_SubRegsSet, CR6GT_SuperRegsSet },
+ { "CR6LT", CR6LT_AliasSet, Empty_SubRegsSet, CR6LT_SuperRegsSet },
+ { "CR6UN", CR6UN_AliasSet, Empty_SubRegsSet, CR6UN_SuperRegsSet },
+ { "CR7", CR7_AliasSet, CR7_SubRegsSet, Empty_SuperRegsSet },
+ { "CR7EQ", CR7EQ_AliasSet, Empty_SubRegsSet, CR7EQ_SuperRegsSet },
+ { "CR7GT", CR7GT_AliasSet, Empty_SubRegsSet, CR7GT_SuperRegsSet },
+ { "CR7LT", CR7LT_AliasSet, Empty_SubRegsSet, CR7LT_SuperRegsSet },
+ { "CR7UN", CR7UN_AliasSet, Empty_SubRegsSet, CR7UN_SuperRegsSet },
+ { "CTR", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CTR8", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F0", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F1", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F2", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F3", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F4", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F5", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F6", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F7", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F8", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F9", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F10", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F11", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F12", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F13", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F14", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F15", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F16", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F17", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F18", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F19", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F20", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F21", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F22", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F23", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F24", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F25", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F26", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F27", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F28", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F29", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F30", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "F31", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "LR", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "LR8", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "R0", R0_AliasSet, Empty_SubRegsSet, R0_SuperRegsSet },
+ { "R1", R1_AliasSet, Empty_SubRegsSet, R1_SuperRegsSet },
+ { "R2", R2_AliasSet, Empty_SubRegsSet, R2_SuperRegsSet },
+ { "R3", R3_AliasSet, Empty_SubRegsSet, R3_SuperRegsSet },
+ { "R4", R4_AliasSet, Empty_SubRegsSet, R4_SuperRegsSet },
+ { "R5", R5_AliasSet, Empty_SubRegsSet, R5_SuperRegsSet },
+ { "R6", R6_AliasSet, Empty_SubRegsSet, R6_SuperRegsSet },
+ { "R7", R7_AliasSet, Empty_SubRegsSet, R7_SuperRegsSet },
+ { "R8", R8_AliasSet, Empty_SubRegsSet, R8_SuperRegsSet },
+ { "R9", R9_AliasSet, Empty_SubRegsSet, R9_SuperRegsSet },
+ { "R10", R10_AliasSet, Empty_SubRegsSet, R10_SuperRegsSet },
+ { "R11", R11_AliasSet, Empty_SubRegsSet, R11_SuperRegsSet },
+ { "R12", R12_AliasSet, Empty_SubRegsSet, R12_SuperRegsSet },
+ { "R13", R13_AliasSet, Empty_SubRegsSet, R13_SuperRegsSet },
+ { "R14", R14_AliasSet, Empty_SubRegsSet, R14_SuperRegsSet },
+ { "R15", R15_AliasSet, Empty_SubRegsSet, R15_SuperRegsSet },
+ { "R16", R16_AliasSet, Empty_SubRegsSet, R16_SuperRegsSet },
+ { "R17", R17_AliasSet, Empty_SubRegsSet, R17_SuperRegsSet },
+ { "R18", R18_AliasSet, Empty_SubRegsSet, R18_SuperRegsSet },
+ { "R19", R19_AliasSet, Empty_SubRegsSet, R19_SuperRegsSet },
+ { "R20", R20_AliasSet, Empty_SubRegsSet, R20_SuperRegsSet },
+ { "R21", R21_AliasSet, Empty_SubRegsSet, R21_SuperRegsSet },
+ { "R22", R22_AliasSet, Empty_SubRegsSet, R22_SuperRegsSet },
+ { "R23", R23_AliasSet, Empty_SubRegsSet, R23_SuperRegsSet },
+ { "R24", R24_AliasSet, Empty_SubRegsSet, R24_SuperRegsSet },
+ { "R25", R25_AliasSet, Empty_SubRegsSet, R25_SuperRegsSet },
+ { "R26", R26_AliasSet, Empty_SubRegsSet, R26_SuperRegsSet },
+ { "R27", R27_AliasSet, Empty_SubRegsSet, R27_SuperRegsSet },
+ { "R28", R28_AliasSet, Empty_SubRegsSet, R28_SuperRegsSet },
+ { "R29", R29_AliasSet, Empty_SubRegsSet, R29_SuperRegsSet },
+ { "R30", R30_AliasSet, Empty_SubRegsSet, R30_SuperRegsSet },
+ { "R31", R31_AliasSet, Empty_SubRegsSet, R31_SuperRegsSet },
+ { "RM", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V0", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V1", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V2", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V3", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V4", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V5", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V6", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V7", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V8", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V9", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V10", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V11", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V12", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V13", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V14", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V15", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V16", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V17", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V18", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V19", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V20", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V21", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V22", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V23", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V24", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V25", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V26", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V27", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V28", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V29", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V30", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "V31", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "VRSAVE", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "X0", X0_AliasSet, X0_SubRegsSet, Empty_SuperRegsSet },
+ { "X1", X1_AliasSet, X1_SubRegsSet, Empty_SuperRegsSet },
+ { "X2", X2_AliasSet, X2_SubRegsSet, Empty_SuperRegsSet },
+ { "X3", X3_AliasSet, X3_SubRegsSet, Empty_SuperRegsSet },
+ { "X4", X4_AliasSet, X4_SubRegsSet, Empty_SuperRegsSet },
+ { "X5", X5_AliasSet, X5_SubRegsSet, Empty_SuperRegsSet },
+ { "X6", X6_AliasSet, X6_SubRegsSet, Empty_SuperRegsSet },
+ { "X7", X7_AliasSet, X7_SubRegsSet, Empty_SuperRegsSet },
+ { "X8", X8_AliasSet, X8_SubRegsSet, Empty_SuperRegsSet },
+ { "X9", X9_AliasSet, X9_SubRegsSet, Empty_SuperRegsSet },
+ { "X10", X10_AliasSet, X10_SubRegsSet, Empty_SuperRegsSet },
+ { "X11", X11_AliasSet, X11_SubRegsSet, Empty_SuperRegsSet },
+ { "X12", X12_AliasSet, X12_SubRegsSet, Empty_SuperRegsSet },
+ { "X13", X13_AliasSet, X13_SubRegsSet, Empty_SuperRegsSet },
+ { "X14", X14_AliasSet, X14_SubRegsSet, Empty_SuperRegsSet },
+ { "X15", X15_AliasSet, X15_SubRegsSet, Empty_SuperRegsSet },
+ { "X16", X16_AliasSet, X16_SubRegsSet, Empty_SuperRegsSet },
+ { "X17", X17_AliasSet, X17_SubRegsSet, Empty_SuperRegsSet },
+ { "X18", X18_AliasSet, X18_SubRegsSet, Empty_SuperRegsSet },
+ { "X19", X19_AliasSet, X19_SubRegsSet, Empty_SuperRegsSet },
+ { "X20", X20_AliasSet, X20_SubRegsSet, Empty_SuperRegsSet },
+ { "X21", X21_AliasSet, X21_SubRegsSet, Empty_SuperRegsSet },
+ { "X22", X22_AliasSet, X22_SubRegsSet, Empty_SuperRegsSet },
+ { "X23", X23_AliasSet, X23_SubRegsSet, Empty_SuperRegsSet },
+ { "X24", X24_AliasSet, X24_SubRegsSet, Empty_SuperRegsSet },
+ { "X25", X25_AliasSet, X25_SubRegsSet, Empty_SuperRegsSet },
+ { "X26", X26_AliasSet, X26_SubRegsSet, Empty_SuperRegsSet },
+ { "X27", X27_AliasSet, X27_SubRegsSet, Empty_SuperRegsSet },
+ { "X28", X28_AliasSet, X28_SubRegsSet, Empty_SuperRegsSet },
+ { "X29", X29_AliasSet, X29_SubRegsSet, Empty_SuperRegsSet },
+ { "X30", X30_AliasSet, X30_SubRegsSet, Empty_SuperRegsSet },
+ { "X31", X31_AliasSet, X31_SubRegsSet, Empty_SuperRegsSet },
+ };
+
+ const char *const SubRegIndexTable[] = { "sub_32", "sub_eq", "sub_gt", "sub_lt", "sub_un" };
+
}
unsigned PPCGenRegisterInfo::getSubReg(unsigned RegNo, unsigned Index) const {
@@ -3217,73 +2387,265 @@ unsigned PPCGenRegisterInfo::getSubReg(unsigned RegNo, unsigned Index) const {
case PPC::CR0:
switch (Index) {
default: return 0;
- case 1: return PPC::CR0LT;
- case 2: return PPC::CR0GT;
- case 3: return PPC::CR0EQ;
- case 4: return PPC::CR0UN;
+ case PPC::sub_eq: return PPC::CR0EQ;
+ case PPC::sub_gt: return PPC::CR0GT;
+ case PPC::sub_lt: return PPC::CR0LT;
+ case PPC::sub_un: return PPC::CR0UN;
};
break;
case PPC::CR1:
switch (Index) {
default: return 0;
- case 1: return PPC::CR1LT;
- case 2: return PPC::CR1GT;
- case 3: return PPC::CR1EQ;
- case 4: return PPC::CR1UN;
+ case PPC::sub_eq: return PPC::CR1EQ;
+ case PPC::sub_gt: return PPC::CR1GT;
+ case PPC::sub_lt: return PPC::CR1LT;
+ case PPC::sub_un: return PPC::CR1UN;
};
break;
case PPC::CR2:
switch (Index) {
default: return 0;
- case 1: return PPC::CR2LT;
- case 2: return PPC::CR2GT;
- case 3: return PPC::CR2EQ;
- case 4: return PPC::CR2UN;
+ case PPC::sub_eq: return PPC::CR2EQ;
+ case PPC::sub_gt: return PPC::CR2GT;
+ case PPC::sub_lt: return PPC::CR2LT;
+ case PPC::sub_un: return PPC::CR2UN;
};
break;
case PPC::CR3:
switch (Index) {
default: return 0;
- case 1: return PPC::CR3LT;
- case 2: return PPC::CR3GT;
- case 3: return PPC::CR3EQ;
- case 4: return PPC::CR3UN;
+ case PPC::sub_eq: return PPC::CR3EQ;
+ case PPC::sub_gt: return PPC::CR3GT;
+ case PPC::sub_lt: return PPC::CR3LT;
+ case PPC::sub_un: return PPC::CR3UN;
};
break;
case PPC::CR4:
switch (Index) {
default: return 0;
- case 1: return PPC::CR4LT;
- case 2: return PPC::CR4GT;
- case 3: return PPC::CR4EQ;
- case 4: return PPC::CR4UN;
+ case PPC::sub_eq: return PPC::CR4EQ;
+ case PPC::sub_gt: return PPC::CR4GT;
+ case PPC::sub_lt: return PPC::CR4LT;
+ case PPC::sub_un: return PPC::CR4UN;
};
break;
case PPC::CR5:
switch (Index) {
default: return 0;
- case 1: return PPC::CR5LT;
- case 2: return PPC::CR5GT;
- case 3: return PPC::CR5EQ;
- case 4: return PPC::CR5UN;
+ case PPC::sub_eq: return PPC::CR5EQ;
+ case PPC::sub_gt: return PPC::CR5GT;
+ case PPC::sub_lt: return PPC::CR5LT;
+ case PPC::sub_un: return PPC::CR5UN;
};
break;
case PPC::CR6:
switch (Index) {
default: return 0;
- case 1: return PPC::CR6LT;
- case 2: return PPC::CR6GT;
- case 3: return PPC::CR6EQ;
- case 4: return PPC::CR6UN;
+ case PPC::sub_eq: return PPC::CR6EQ;
+ case PPC::sub_gt: return PPC::CR6GT;
+ case PPC::sub_lt: return PPC::CR6LT;
+ case PPC::sub_un: return PPC::CR6UN;
};
break;
case PPC::CR7:
switch (Index) {
default: return 0;
- case 1: return PPC::CR7LT;
- case 2: return PPC::CR7GT;
- case 3: return PPC::CR7EQ;
- case 4: return PPC::CR7UN;
+ case PPC::sub_eq: return PPC::CR7EQ;
+ case PPC::sub_gt: return PPC::CR7GT;
+ case PPC::sub_lt: return PPC::CR7LT;
+ case PPC::sub_un: return PPC::CR7UN;
+ };
+ break;
+ case PPC::X0:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R0;
+ };
+ break;
+ case PPC::X1:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R1;
+ };
+ break;
+ case PPC::X2:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R2;
+ };
+ break;
+ case PPC::X3:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R3;
+ };
+ break;
+ case PPC::X4:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R4;
+ };
+ break;
+ case PPC::X5:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R5;
+ };
+ break;
+ case PPC::X6:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R6;
+ };
+ break;
+ case PPC::X7:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R7;
+ };
+ break;
+ case PPC::X8:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R8;
+ };
+ break;
+ case PPC::X9:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R9;
+ };
+ break;
+ case PPC::X10:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R10;
+ };
+ break;
+ case PPC::X11:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R11;
+ };
+ break;
+ case PPC::X12:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R12;
+ };
+ break;
+ case PPC::X13:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R13;
+ };
+ break;
+ case PPC::X14:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R14;
+ };
+ break;
+ case PPC::X15:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R15;
+ };
+ break;
+ case PPC::X16:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R16;
+ };
+ break;
+ case PPC::X17:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R17;
+ };
+ break;
+ case PPC::X18:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R18;
+ };
+ break;
+ case PPC::X19:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R19;
+ };
+ break;
+ case PPC::X20:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R20;
+ };
+ break;
+ case PPC::X21:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R21;
+ };
+ break;
+ case PPC::X22:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R22;
+ };
+ break;
+ case PPC::X23:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R23;
+ };
+ break;
+ case PPC::X24:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R24;
+ };
+ break;
+ case PPC::X25:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R25;
+ };
+ break;
+ case PPC::X26:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R26;
+ };
+ break;
+ case PPC::X27:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R27;
+ };
+ break;
+ case PPC::X28:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R28;
+ };
+ break;
+ case PPC::X29:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R29;
+ };
+ break;
+ case PPC::X30:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R30;
+ };
+ break;
+ case PPC::X31:
+ switch (Index) {
+ default: return 0;
+ case PPC::sub_32: return PPC::R31;
};
break;
};
@@ -3295,62 +2657,165 @@ unsigned PPCGenRegisterInfo::getSubRegIndex(unsigned RegNo, unsigned SubRegNo) c
default:
return 0;
case PPC::CR0:
- if (SubRegNo == PPC::CR0LT) return 1;
- if (SubRegNo == PPC::CR0GT) return 2;
- if (SubRegNo == PPC::CR0EQ) return 3;
- if (SubRegNo == PPC::CR0UN) return 4;
+ if (SubRegNo == PPC::CR0EQ) return PPC::sub_eq;
+ if (SubRegNo == PPC::CR0GT) return PPC::sub_gt;
+ if (SubRegNo == PPC::CR0LT) return PPC::sub_lt;
+ if (SubRegNo == PPC::CR0UN) return PPC::sub_un;
return 0;
case PPC::CR1:
- if (SubRegNo == PPC::CR1LT) return 1;
- if (SubRegNo == PPC::CR1GT) return 2;
- if (SubRegNo == PPC::CR1EQ) return 3;
- if (SubRegNo == PPC::CR1UN) return 4;
+ if (SubRegNo == PPC::CR1EQ) return PPC::sub_eq;
+ if (SubRegNo == PPC::CR1GT) return PPC::sub_gt;
+ if (SubRegNo == PPC::CR1LT) return PPC::sub_lt;
+ if (SubRegNo == PPC::CR1UN) return PPC::sub_un;
return 0;
case PPC::CR2:
- if (SubRegNo == PPC::CR2LT) return 1;
- if (SubRegNo == PPC::CR2GT) return 2;
- if (SubRegNo == PPC::CR2EQ) return 3;
- if (SubRegNo == PPC::CR2UN) return 4;
+ if (SubRegNo == PPC::CR2EQ) return PPC::sub_eq;
+ if (SubRegNo == PPC::CR2GT) return PPC::sub_gt;
+ if (SubRegNo == PPC::CR2LT) return PPC::sub_lt;
+ if (SubRegNo == PPC::CR2UN) return PPC::sub_un;
return 0;
case PPC::CR3:
- if (SubRegNo == PPC::CR3LT) return 1;
- if (SubRegNo == PPC::CR3GT) return 2;
- if (SubRegNo == PPC::CR3EQ) return 3;
- if (SubRegNo == PPC::CR3UN) return 4;
+ if (SubRegNo == PPC::CR3EQ) return PPC::sub_eq;
+ if (SubRegNo == PPC::CR3GT) return PPC::sub_gt;
+ if (SubRegNo == PPC::CR3LT) return PPC::sub_lt;
+ if (SubRegNo == PPC::CR3UN) return PPC::sub_un;
return 0;
case PPC::CR4:
- if (SubRegNo == PPC::CR4LT) return 1;
- if (SubRegNo == PPC::CR4GT) return 2;
- if (SubRegNo == PPC::CR4EQ) return 3;
- if (SubRegNo == PPC::CR4UN) return 4;
+ if (SubRegNo == PPC::CR4EQ) return PPC::sub_eq;
+ if (SubRegNo == PPC::CR4GT) return PPC::sub_gt;
+ if (SubRegNo == PPC::CR4LT) return PPC::sub_lt;
+ if (SubRegNo == PPC::CR4UN) return PPC::sub_un;
return 0;
case PPC::CR5:
- if (SubRegNo == PPC::CR5LT) return 1;
- if (SubRegNo == PPC::CR5GT) return 2;
- if (SubRegNo == PPC::CR5EQ) return 3;
- if (SubRegNo == PPC::CR5UN) return 4;
+ if (SubRegNo == PPC::CR5EQ) return PPC::sub_eq;
+ if (SubRegNo == PPC::CR5GT) return PPC::sub_gt;
+ if (SubRegNo == PPC::CR5LT) return PPC::sub_lt;
+ if (SubRegNo == PPC::CR5UN) return PPC::sub_un;
return 0;
case PPC::CR6:
- if (SubRegNo == PPC::CR6LT) return 1;
- if (SubRegNo == PPC::CR6GT) return 2;
- if (SubRegNo == PPC::CR6EQ) return 3;
- if (SubRegNo == PPC::CR6UN) return 4;
+ if (SubRegNo == PPC::CR6EQ) return PPC::sub_eq;
+ if (SubRegNo == PPC::CR6GT) return PPC::sub_gt;
+ if (SubRegNo == PPC::CR6LT) return PPC::sub_lt;
+ if (SubRegNo == PPC::CR6UN) return PPC::sub_un;
return 0;
case PPC::CR7:
- if (SubRegNo == PPC::CR7LT) return 1;
- if (SubRegNo == PPC::CR7GT) return 2;
- if (SubRegNo == PPC::CR7EQ) return 3;
- if (SubRegNo == PPC::CR7UN) return 4;
+ if (SubRegNo == PPC::CR7EQ) return PPC::sub_eq;
+ if (SubRegNo == PPC::CR7GT) return PPC::sub_gt;
+ if (SubRegNo == PPC::CR7LT) return PPC::sub_lt;
+ if (SubRegNo == PPC::CR7UN) return PPC::sub_un;
+ return 0;
+ case PPC::X0:
+ if (SubRegNo == PPC::R0) return PPC::sub_32;
+ return 0;
+ case PPC::X1:
+ if (SubRegNo == PPC::R1) return PPC::sub_32;
+ return 0;
+ case PPC::X2:
+ if (SubRegNo == PPC::R2) return PPC::sub_32;
+ return 0;
+ case PPC::X3:
+ if (SubRegNo == PPC::R3) return PPC::sub_32;
+ return 0;
+ case PPC::X4:
+ if (SubRegNo == PPC::R4) return PPC::sub_32;
+ return 0;
+ case PPC::X5:
+ if (SubRegNo == PPC::R5) return PPC::sub_32;
+ return 0;
+ case PPC::X6:
+ if (SubRegNo == PPC::R6) return PPC::sub_32;
+ return 0;
+ case PPC::X7:
+ if (SubRegNo == PPC::R7) return PPC::sub_32;
+ return 0;
+ case PPC::X8:
+ if (SubRegNo == PPC::R8) return PPC::sub_32;
+ return 0;
+ case PPC::X9:
+ if (SubRegNo == PPC::R9) return PPC::sub_32;
+ return 0;
+ case PPC::X10:
+ if (SubRegNo == PPC::R10) return PPC::sub_32;
+ return 0;
+ case PPC::X11:
+ if (SubRegNo == PPC::R11) return PPC::sub_32;
+ return 0;
+ case PPC::X12:
+ if (SubRegNo == PPC::R12) return PPC::sub_32;
+ return 0;
+ case PPC::X13:
+ if (SubRegNo == PPC::R13) return PPC::sub_32;
+ return 0;
+ case PPC::X14:
+ if (SubRegNo == PPC::R14) return PPC::sub_32;
+ return 0;
+ case PPC::X15:
+ if (SubRegNo == PPC::R15) return PPC::sub_32;
+ return 0;
+ case PPC::X16:
+ if (SubRegNo == PPC::R16) return PPC::sub_32;
+ return 0;
+ case PPC::X17:
+ if (SubRegNo == PPC::R17) return PPC::sub_32;
+ return 0;
+ case PPC::X18:
+ if (SubRegNo == PPC::R18) return PPC::sub_32;
+ return 0;
+ case PPC::X19:
+ if (SubRegNo == PPC::R19) return PPC::sub_32;
+ return 0;
+ case PPC::X20:
+ if (SubRegNo == PPC::R20) return PPC::sub_32;
+ return 0;
+ case PPC::X21:
+ if (SubRegNo == PPC::R21) return PPC::sub_32;
+ return 0;
+ case PPC::X22:
+ if (SubRegNo == PPC::R22) return PPC::sub_32;
+ return 0;
+ case PPC::X23:
+ if (SubRegNo == PPC::R23) return PPC::sub_32;
+ return 0;
+ case PPC::X24:
+ if (SubRegNo == PPC::R24) return PPC::sub_32;
+ return 0;
+ case PPC::X25:
+ if (SubRegNo == PPC::R25) return PPC::sub_32;
+ return 0;
+ case PPC::X26:
+ if (SubRegNo == PPC::R26) return PPC::sub_32;
+ return 0;
+ case PPC::X27:
+ if (SubRegNo == PPC::R27) return PPC::sub_32;
+ return 0;
+ case PPC::X28:
+ if (SubRegNo == PPC::R28) return PPC::sub_32;
+ return 0;
+ case PPC::X29:
+ if (SubRegNo == PPC::R29) return PPC::sub_32;
+ return 0;
+ case PPC::X30:
+ if (SubRegNo == PPC::R30) return PPC::sub_32;
+ return 0;
+ case PPC::X31:
+ if (SubRegNo == PPC::R31) return PPC::sub_32;
return 0;
};
return 0;
}
+unsigned PPCGenRegisterInfo::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {
+ switch (IdxA) {
+ default:
+ return IdxB;
+ }
+}
+
PPCGenRegisterInfo::PPCGenRegisterInfo(int CallFrameSetupOpcode, int CallFrameDestroyOpcode)
: TargetRegisterInfo(RegisterDescriptors, 176, RegisterClasses, RegisterClasses+11,
- CallFrameSetupOpcode, CallFrameDestroyOpcode,
+ SubRegIndexTable,
+ CallFrameSetupOpcode, CallFrameDestroyOpcode,
SubregHashTable, SubregHashTableSize,
- SuperregHashTable, SuperregHashTableSize,
AliasesHashTable, AliasesHashTableSize) {
}
@@ -3454,6 +2919,22 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 32;
case PPC::F1:
return 33;
+ case PPC::F2:
+ return 34;
+ case PPC::F3:
+ return 35;
+ case PPC::F4:
+ return 36;
+ case PPC::F5:
+ return 37;
+ case PPC::F6:
+ return 38;
+ case PPC::F7:
+ return 39;
+ case PPC::F8:
+ return 40;
+ case PPC::F9:
+ return 41;
case PPC::F10:
return 42;
case PPC::F11:
@@ -3474,8 +2955,6 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 50;
case PPC::F19:
return 51;
- case PPC::F2:
- return 34;
case PPC::F20:
return 52;
case PPC::F21:
@@ -3496,24 +2975,10 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 60;
case PPC::F29:
return 61;
- case PPC::F3:
- return 35;
case PPC::F30:
return 62;
case PPC::F31:
return 63;
- case PPC::F4:
- return 36;
- case PPC::F5:
- return 37;
- case PPC::F6:
- return 38;
- case PPC::F7:
- return 39;
- case PPC::F8:
- return 40;
- case PPC::F9:
- return 41;
case PPC::LR:
return 65;
case PPC::LR8:
@@ -3522,6 +2987,22 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 0;
case PPC::R1:
return 1;
+ case PPC::R2:
+ return 2;
+ case PPC::R3:
+ return 3;
+ case PPC::R4:
+ return 4;
+ case PPC::R5:
+ return 5;
+ case PPC::R6:
+ return 6;
+ case PPC::R7:
+ return 7;
+ case PPC::R8:
+ return 8;
+ case PPC::R9:
+ return 9;
case PPC::R10:
return 10;
case PPC::R11:
@@ -3542,8 +3023,6 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 18;
case PPC::R19:
return 19;
- case PPC::R2:
- return 2;
case PPC::R20:
return 20;
case PPC::R21:
@@ -3564,30 +3043,32 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 28;
case PPC::R29:
return 29;
- case PPC::R3:
- return 3;
case PPC::R30:
return 30;
case PPC::R31:
return 31;
- case PPC::R4:
- return 4;
- case PPC::R5:
- return 5;
- case PPC::R6:
- return 6;
- case PPC::R7:
- return 7;
- case PPC::R8:
- return 8;
- case PPC::R9:
- return 9;
case PPC::RM:
return 0;
case PPC::V0:
return 77;
case PPC::V1:
return 78;
+ case PPC::V2:
+ return 79;
+ case PPC::V3:
+ return 80;
+ case PPC::V4:
+ return 81;
+ case PPC::V5:
+ return 82;
+ case PPC::V6:
+ return 83;
+ case PPC::V7:
+ return 84;
+ case PPC::V8:
+ return 85;
+ case PPC::V9:
+ return 86;
case PPC::V10:
return 87;
case PPC::V11:
@@ -3608,8 +3089,6 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 95;
case PPC::V19:
return 96;
- case PPC::V2:
- return 79;
case PPC::V20:
return 97;
case PPC::V21:
@@ -3630,30 +3109,32 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 105;
case PPC::V29:
return 106;
- case PPC::V3:
- return 80;
case PPC::V30:
return 107;
case PPC::V31:
return 108;
- case PPC::V4:
- return 81;
- case PPC::V5:
- return 82;
- case PPC::V6:
- return 83;
- case PPC::V7:
- return 84;
- case PPC::V8:
- return 85;
- case PPC::V9:
- return 86;
case PPC::VRSAVE:
return 107;
case PPC::X0:
return 0;
case PPC::X1:
return 1;
+ case PPC::X2:
+ return 2;
+ case PPC::X3:
+ return 3;
+ case PPC::X4:
+ return 4;
+ case PPC::X5:
+ return 5;
+ case PPC::X6:
+ return 6;
+ case PPC::X7:
+ return 7;
+ case PPC::X8:
+ return 8;
+ case PPC::X9:
+ return 9;
case PPC::X10:
return 10;
case PPC::X11:
@@ -3674,8 +3155,6 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 18;
case PPC::X19:
return 19;
- case PPC::X2:
- return 2;
case PPC::X20:
return 20;
case PPC::X21:
@@ -3696,24 +3175,10 @@ int PPCGenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 28;
case PPC::X29:
return 29;
- case PPC::X3:
- return 3;
case PPC::X30:
return 30;
case PPC::X31:
return 31;
- case PPC::X4:
- return 4;
- case PPC::X5:
- return 5;
- case PPC::X6:
- return 6;
- case PPC::X7:
- return 7;
- case PPC::X8:
- return 8;
- case PPC::X9:
- return 9;
};
};
}
diff --git a/libclamav/c++/PPCGenRegisterNames.inc b/libclamav/c++/PPCGenRegisterNames.inc
index 26d9cb3..8271218 100644
--- a/libclamav/c++/PPCGenRegisterNames.inc
+++ b/libclamav/c++/PPCGenRegisterNames.inc
@@ -9,184 +9,197 @@
namespace llvm {
namespace PPC {
- enum {
- NoRegister,
- CARRY, // 1
- CR0, // 2
- CR0EQ, // 3
- CR0GT, // 4
- CR0LT, // 5
- CR0UN, // 6
- CR1, // 7
- CR1EQ, // 8
- CR1GT, // 9
- CR1LT, // 10
- CR1UN, // 11
- CR2, // 12
- CR2EQ, // 13
- CR2GT, // 14
- CR2LT, // 15
- CR2UN, // 16
- CR3, // 17
- CR3EQ, // 18
- CR3GT, // 19
- CR3LT, // 20
- CR3UN, // 21
- CR4, // 22
- CR4EQ, // 23
- CR4GT, // 24
- CR4LT, // 25
- CR4UN, // 26
- CR5, // 27
- CR5EQ, // 28
- CR5GT, // 29
- CR5LT, // 30
- CR5UN, // 31
- CR6, // 32
- CR6EQ, // 33
- CR6GT, // 34
- CR6LT, // 35
- CR6UN, // 36
- CR7, // 37
- CR7EQ, // 38
- CR7GT, // 39
- CR7LT, // 40
- CR7UN, // 41
- CTR, // 42
- CTR8, // 43
- F0, // 44
- F1, // 45
- F10, // 46
- F11, // 47
- F12, // 48
- F13, // 49
- F14, // 50
- F15, // 51
- F16, // 52
- F17, // 53
- F18, // 54
- F19, // 55
- F2, // 56
- F20, // 57
- F21, // 58
- F22, // 59
- F23, // 60
- F24, // 61
- F25, // 62
- F26, // 63
- F27, // 64
- F28, // 65
- F29, // 66
- F3, // 67
- F30, // 68
- F31, // 69
- F4, // 70
- F5, // 71
- F6, // 72
- F7, // 73
- F8, // 74
- F9, // 75
- LR, // 76
- LR8, // 77
- R0, // 78
- R1, // 79
- R10, // 80
- R11, // 81
- R12, // 82
- R13, // 83
- R14, // 84
- R15, // 85
- R16, // 86
- R17, // 87
- R18, // 88
- R19, // 89
- R2, // 90
- R20, // 91
- R21, // 92
- R22, // 93
- R23, // 94
- R24, // 95
- R25, // 96
- R26, // 97
- R27, // 98
- R28, // 99
- R29, // 100
- R3, // 101
- R30, // 102
- R31, // 103
- R4, // 104
- R5, // 105
- R6, // 106
- R7, // 107
- R8, // 108
- R9, // 109
- RM, // 110
- V0, // 111
- V1, // 112
- V10, // 113
- V11, // 114
- V12, // 115
- V13, // 116
- V14, // 117
- V15, // 118
- V16, // 119
- V17, // 120
- V18, // 121
- V19, // 122
- V2, // 123
- V20, // 124
- V21, // 125
- V22, // 126
- V23, // 127
- V24, // 128
- V25, // 129
- V26, // 130
- V27, // 131
- V28, // 132
- V29, // 133
- V3, // 134
- V30, // 135
- V31, // 136
- V4, // 137
- V5, // 138
- V6, // 139
- V7, // 140
- V8, // 141
- V9, // 142
- VRSAVE, // 143
- X0, // 144
- X1, // 145
- X10, // 146
- X11, // 147
- X12, // 148
- X13, // 149
- X14, // 150
- X15, // 151
- X16, // 152
- X17, // 153
- X18, // 154
- X19, // 155
- X2, // 156
- X20, // 157
- X21, // 158
- X22, // 159
- X23, // 160
- X24, // 161
- X25, // 162
- X26, // 163
- X27, // 164
- X28, // 165
- X29, // 166
- X3, // 167
- X30, // 168
- X31, // 169
- X4, // 170
- X5, // 171
- X6, // 172
- X7, // 173
- X8, // 174
- X9, // 175
- NUM_TARGET_REGS // 176
- };
+enum {
+ NoRegister,
+ CARRY, // 1
+ CR0, // 2
+ CR0EQ, // 3
+ CR0GT, // 4
+ CR0LT, // 5
+ CR0UN, // 6
+ CR1, // 7
+ CR1EQ, // 8
+ CR1GT, // 9
+ CR1LT, // 10
+ CR1UN, // 11
+ CR2, // 12
+ CR2EQ, // 13
+ CR2GT, // 14
+ CR2LT, // 15
+ CR2UN, // 16
+ CR3, // 17
+ CR3EQ, // 18
+ CR3GT, // 19
+ CR3LT, // 20
+ CR3UN, // 21
+ CR4, // 22
+ CR4EQ, // 23
+ CR4GT, // 24
+ CR4LT, // 25
+ CR4UN, // 26
+ CR5, // 27
+ CR5EQ, // 28
+ CR5GT, // 29
+ CR5LT, // 30
+ CR5UN, // 31
+ CR6, // 32
+ CR6EQ, // 33
+ CR6GT, // 34
+ CR6LT, // 35
+ CR6UN, // 36
+ CR7, // 37
+ CR7EQ, // 38
+ CR7GT, // 39
+ CR7LT, // 40
+ CR7UN, // 41
+ CTR, // 42
+ CTR8, // 43
+ F0, // 44
+ F1, // 45
+ F2, // 46
+ F3, // 47
+ F4, // 48
+ F5, // 49
+ F6, // 50
+ F7, // 51
+ F8, // 52
+ F9, // 53
+ F10, // 54
+ F11, // 55
+ F12, // 56
+ F13, // 57
+ F14, // 58
+ F15, // 59
+ F16, // 60
+ F17, // 61
+ F18, // 62
+ F19, // 63
+ F20, // 64
+ F21, // 65
+ F22, // 66
+ F23, // 67
+ F24, // 68
+ F25, // 69
+ F26, // 70
+ F27, // 71
+ F28, // 72
+ F29, // 73
+ F30, // 74
+ F31, // 75
+ LR, // 76
+ LR8, // 77
+ R0, // 78
+ R1, // 79
+ R2, // 80
+ R3, // 81
+ R4, // 82
+ R5, // 83
+ R6, // 84
+ R7, // 85
+ R8, // 86
+ R9, // 87
+ R10, // 88
+ R11, // 89
+ R12, // 90
+ R13, // 91
+ R14, // 92
+ R15, // 93
+ R16, // 94
+ R17, // 95
+ R18, // 96
+ R19, // 97
+ R20, // 98
+ R21, // 99
+ R22, // 100
+ R23, // 101
+ R24, // 102
+ R25, // 103
+ R26, // 104
+ R27, // 105
+ R28, // 106
+ R29, // 107
+ R30, // 108
+ R31, // 109
+ RM, // 110
+ V0, // 111
+ V1, // 112
+ V2, // 113
+ V3, // 114
+ V4, // 115
+ V5, // 116
+ V6, // 117
+ V7, // 118
+ V8, // 119
+ V9, // 120
+ V10, // 121
+ V11, // 122
+ V12, // 123
+ V13, // 124
+ V14, // 125
+ V15, // 126
+ V16, // 127
+ V17, // 128
+ V18, // 129
+ V19, // 130
+ V20, // 131
+ V21, // 132
+ V22, // 133
+ V23, // 134
+ V24, // 135
+ V25, // 136
+ V26, // 137
+ V27, // 138
+ V28, // 139
+ V29, // 140
+ V30, // 141
+ V31, // 142
+ VRSAVE, // 143
+ X0, // 144
+ X1, // 145
+ X2, // 146
+ X3, // 147
+ X4, // 148
+ X5, // 149
+ X6, // 150
+ X7, // 151
+ X8, // 152
+ X9, // 153
+ X10, // 154
+ X11, // 155
+ X12, // 156
+ X13, // 157
+ X14, // 158
+ X15, // 159
+ X16, // 160
+ X17, // 161
+ X18, // 162
+ X19, // 163
+ X20, // 164
+ X21, // 165
+ X22, // 166
+ X23, // 167
+ X24, // 168
+ X25, // 169
+ X26, // 170
+ X27, // 171
+ X28, // 172
+ X29, // 173
+ X30, // 174
+ X31, // 175
+ NUM_TARGET_REGS // 176
+};
+}
+
+// Subregister indices
+namespace PPC {
+enum {
+ NoSubRegister,
+ sub_32, // 1
+ sub_eq, // 2
+ sub_gt, // 3
+ sub_lt, // 4
+ sub_un, // 5
+ NUM_TARGET_SUBREGS = 6
+};
}
} // End llvm namespace
diff --git a/libclamav/c++/PPCGenSubtarget.inc b/libclamav/c++/PPCGenSubtarget.inc
index a0be949..df9cf24 100644
--- a/libclamav/c++/PPCGenSubtarget.inc
+++ b/libclamav/c++/PPCGenSubtarget.inc
@@ -12,32 +12,16 @@
#include "llvm/Target/TargetInstrItineraries.h"
enum {
- BPU = 1 << 0,
- FPU1 = 1 << 1,
- FPU2 = 1 << 2,
- IU1 = 1 << 3,
- IU2 = 1 << 4,
- IU3 = 1 << 5,
- IU4 = 1 << 6,
- SLU = 1 << 7,
- SRU = 1 << 8,
- VFPU = 1 << 9,
- VIU1 = 1 << 10,
- VIU2 = 1 << 11,
- VPU = 1 << 12
-};
-
-enum {
Directive32 = 1 << 0,
- Directive601 = 1 << 1,
- Directive602 = 1 << 2,
- Directive603 = 1 << 3,
- Directive604 = 1 << 4,
- Directive620 = 1 << 5,
- Directive64 = 1 << 6,
- Directive7400 = 1 << 7,
- Directive750 = 1 << 8,
- Directive970 = 1 << 9,
+ Directive64 = 1 << 1,
+ Directive601 = 1 << 2,
+ Directive602 = 1 << 3,
+ Directive603 = 1 << 4,
+ Directive604 = 1 << 5,
+ Directive620 = 1 << 6,
+ Directive750 = 1 << 7,
+ Directive970 = 1 << 8,
+ Directive7400 = 1 << 9,
Feature64Bit = 1 << 10,
Feature64BitRegs = 1 << 11,
FeatureAltivec = 1 << 12,
@@ -91,80 +75,168 @@ enum {
enum {
ItinClassesSize = 74
};
+
+// Functional units for itineraries "G3Itineraries"
+namespace G3ItinerariesFU {
+ const unsigned IU1 = 1 << 0;
+ const unsigned IU2 = 1 << 1;
+ const unsigned FPU1 = 1 << 2;
+ const unsigned BPU = 1 << 3;
+ const unsigned SRU = 1 << 4;
+ const unsigned SLU = 1 << 5;
+}
+
+// Functional units for itineraries "G4Itineraries"
+namespace G4ItinerariesFU {
+ const unsigned IU1 = 1 << 0;
+ const unsigned IU2 = 1 << 1;
+ const unsigned SLU = 1 << 2;
+ const unsigned SRU = 1 << 3;
+ const unsigned BPU = 1 << 4;
+ const unsigned FPU1 = 1 << 5;
+ const unsigned VIU1 = 1 << 6;
+ const unsigned VIU2 = 1 << 7;
+ const unsigned VPU = 1 << 8;
+ const unsigned VFPU = 1 << 9;
+}
+
+// Functional units for itineraries "G4PlusItineraries"
+namespace G4PlusItinerariesFU {
+ const unsigned IU1 = 1 << 0;
+ const unsigned IU2 = 1 << 1;
+ const unsigned IU3 = 1 << 2;
+ const unsigned IU4 = 1 << 3;
+ const unsigned BPU = 1 << 4;
+ const unsigned SLU = 1 << 5;
+ const unsigned FPU1 = 1 << 6;
+ const unsigned VFPU = 1 << 7;
+ const unsigned VIU1 = 1 << 8;
+ const unsigned VIU2 = 1 << 9;
+ const unsigned VPU = 1 << 10;
+}
+
+// Functional units for itineraries "G5Itineraries"
+namespace G5ItinerariesFU {
+ const unsigned IU1 = 1 << 0;
+ const unsigned IU2 = 1 << 1;
+ const unsigned SLU = 1 << 2;
+ const unsigned BPU = 1 << 3;
+ const unsigned FPU1 = 1 << 4;
+ const unsigned FPU2 = 1 << 5;
+ const unsigned VFPU = 1 << 6;
+ const unsigned VIU1 = 1 << 7;
+ const unsigned VIU2 = 1 << 8;
+ const unsigned VPU = 1 << 9;
+}
+
static const llvm::InstrStage Stages[] = {
- { 0, 0, 0 }, // No itinerary
- { 1, IU1 | IU2, -1 }, // 1
- { 19, IU1, -1 }, // 2
- { 1, FPU1, -1 }, // 3
- { 3, FPU1, -1 }, // 4
- { 5, IU1, -1 }, // 5
- { 6, IU1, -1 }, // 6
- { 3, IU1, -1 }, // 7
- { 2, IU1 | IU2, -1 }, // 8
- { 1, BPU, -1 }, // 9
- { 1, SRU, -1 }, // 10
- { 2, SLU, -1 }, // 11
- { 3, SLU, -1 }, // 12
- { 34, SLU, -1 }, // 13
- { 8, SLU, -1 }, // 14
- { 2, SRU, -1 }, // 15
- { 3, SRU, -1 }, // 16
- { 31, FPU1, -1 }, // 17
- { 17, FPU1, -1 }, // 18
- { 2, FPU1, -1 }, // 19
- { 10, FPU1, -1 }, // 20
- { 1, VIU1, -1 }, // 21
- { 5, SLU, -1 }, // 22
- { 8, SRU, -1 }, // 23
- { 4, VFPU, -1 }, // 24
- { 3, VIU2, -1 }, // 25
- { 1, VPU, -1 }, // 26
- { 1, IU1 | IU2 | IU3 | IU4, -1 }, // 27
- { 23, IU2, -1 }, // 28
- { 5, FPU1, -1 }, // 29
- { 2, VFPU, -1 }, // 30
- { 4, IU2, -1 }, // 31
- { 3, IU2, -1 }, // 32
- { 2, IU1 | IU2 | IU3 | IU4, -1 }, // 33
- { 2, IU2, -1 }, // 34
- { 4, SLU, -1 }, // 35
- { 37, SLU, -1 }, // 36
- { 35, SLU, -1 }, // 37
- { 0, IU1 | IU2 | IU3 | IU4, -1 }, // 38
- { 5, IU2, -1 }, // 39
- { 35, FPU1, -1 }, // 40
- { 21, FPU1, -1 }, // 41
- { 14, FPU1, -1 }, // 42
- { 4, VIU2, -1 }, // 43
- { 2, VPU, -1 }, // 44
- { 4, VIU1, -1 }, // 45
- { 3, IU1 | IU2, -1 }, // 46
- { 68, IU1, -1 }, // 47
- { 36, IU1, -1 }, // 48
- { 6, IU2, -1 }, // 49
- { 1, VFPU, -1 }, // 50
- { 6, FPU1 | FPU2, -1 }, // 51
- { 7, IU1 | IU2, -1 }, // 52
- { 5, IU1 | IU2, -1 }, // 53
- { 4, IU1 | IU2, -1 }, // 54
- { 1, IU2, -1 }, // 55
- { 4, BPU, -1 }, // 56
- { 2, BPU, -1 }, // 57
- { 3, BPU, -1 }, // 58
- { 10, SLU, -1 }, // 59
- { 40, SLU, -1 }, // 60
- { 11, SLU, -1 }, // 61
- { 64, SLU, -1 }, // 62
- { 10, IU2, -1 }, // 63
- { 8, IU2, -1 }, // 64
- { 8, FPU1 | FPU2, -1 }, // 65
- { 33, FPU1 | FPU2, -1 }, // 66
- { 40, FPU1 | FPU2, -1 }, // 67
- { 2, VIU1, -1 }, // 68
- { 8, VFPU, -1 }, // 69
- { 5, VIU2, -1 }, // 70
- { 3, VPU, -1 }, // 71
- { 0, 0, 0 } // End itinerary
+ { 0, 0, 0, llvm::InstrStage::Required }, // No itinerary
+ { 1, G3ItinerariesFU::IU1 | G3ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 1
+ { 19, G3ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 2
+ { 1, G3ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 3
+ { 3, G3ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 4
+ { 5, G3ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 5
+ { 6, G3ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 6
+ { 3, G3ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 7
+ { 2, G3ItinerariesFU::IU1 | G3ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 8
+ { 1, G3ItinerariesFU::BPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 9
+ { 1, G3ItinerariesFU::SRU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 10
+ { 2, G3ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 11
+ { 3, G3ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 12
+ { 34, G3ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 13
+ { 8, G3ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 14
+ { 2, G3ItinerariesFU::SRU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 15
+ { 3, G3ItinerariesFU::SRU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 16
+ { 31, G3ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 17
+ { 17, G3ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 18
+ { 2, G3ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 19
+ { 10, G3ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 20
+ { 1, G4ItinerariesFU::IU1 | G4ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 21
+ { 19, G4ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 22
+ { 3, G4ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 23
+ { 1, G4ItinerariesFU::VIU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 24
+ { 5, G4ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 25
+ { 6, G4ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 26
+ { 3, G4ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 27
+ { 2, G4ItinerariesFU::IU1 | G4ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 28
+ { 1, G4ItinerariesFU::BPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 29
+ { 1, G4ItinerariesFU::SRU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 30
+ { 2, G4ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 31
+ { 34, G4ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 32
+ { 3, G4ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 33
+ { 5, G4ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 34
+ { 8, G4ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 35
+ { 2, G4ItinerariesFU::SRU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 36
+ { 3, G4ItinerariesFU::SRU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 37
+ { 8, G4ItinerariesFU::SRU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 38
+ { 1, G4ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 39
+ { 31, G4ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 40
+ { 17, G4ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 41
+ { 10, G4ItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 42
+ { 4, G4ItinerariesFU::VFPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 43
+ { 3, G4ItinerariesFU::VIU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 44
+ { 1, G4ItinerariesFU::VPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 45
+ { 1, G4PlusItinerariesFU::IU1 | G4PlusItinerariesFU::IU2 | G4PlusItinerariesFU::IU3 | G4PlusItinerariesFU::IU4, -1, (llvm::InstrStage::ReservationKinds)0 }, // 46
+ { 23, G4PlusItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 47
+ { 5, G4PlusItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 48
+ { 2, G4PlusItinerariesFU::VFPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 49
+ { 4, G4PlusItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 50
+ { 3, G4PlusItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 51
+ { 2, G4PlusItinerariesFU::IU1 | G4PlusItinerariesFU::IU2 | G4PlusItinerariesFU::IU3 | G4PlusItinerariesFU::IU4, -1, (llvm::InstrStage::ReservationKinds)0 }, // 52
+ { 1, G4PlusItinerariesFU::BPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 53
+ { 2, G4PlusItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 54
+ { 3, G4PlusItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 55
+ { 4, G4PlusItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 56
+ { 37, G4PlusItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 57
+ { 35, G4PlusItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 58
+ { 0, G4PlusItinerariesFU::IU1 | G4PlusItinerariesFU::IU2 | G4PlusItinerariesFU::IU3 | G4PlusItinerariesFU::IU4, -1, (llvm::InstrStage::ReservationKinds)0 }, // 59
+ { 5, G4PlusItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 60
+ { 35, G4PlusItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 61
+ { 21, G4PlusItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 62
+ { 14, G4PlusItinerariesFU::FPU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 63
+ { 1, G4PlusItinerariesFU::VIU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 64
+ { 4, G4PlusItinerariesFU::VFPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 65
+ { 4, G4PlusItinerariesFU::VIU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 66
+ { 2, G4PlusItinerariesFU::VPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 67
+ { 4, G4PlusItinerariesFU::VIU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 68
+ { 2, G5ItinerariesFU::IU1 | G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 69
+ { 3, G5ItinerariesFU::IU1 | G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 70
+ { 68, G5ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 71
+ { 36, G5ItinerariesFU::IU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 72
+ { 6, G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 73
+ { 1, G5ItinerariesFU::VFPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 74
+ { 6, G5ItinerariesFU::FPU1 | G5ItinerariesFU::FPU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 75
+ { 7, G5ItinerariesFU::IU1 | G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 76
+ { 5, G5ItinerariesFU::IU1 | G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 77
+ { 4, G5ItinerariesFU::IU1 | G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 78
+ { 1, G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 79
+ { 1, G5ItinerariesFU::IU1 | G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 80
+ { 1, G5ItinerariesFU::BPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 81
+ { 4, G5ItinerariesFU::BPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 82
+ { 2, G5ItinerariesFU::BPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 83
+ { 3, G5ItinerariesFU::BPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 84
+ { 3, G5ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 85
+ { 10, G5ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 86
+ { 40, G5ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 87
+ { 4, G5ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 88
+ { 11, G5ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 89
+ { 5, G5ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 90
+ { 64, G5ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 91
+ { 2, G5ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 92
+ { 35, G5ItinerariesFU::SLU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 93
+ { 2, G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 94
+ { 3, G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 95
+ { 10, G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 96
+ { 8, G5ItinerariesFU::IU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 97
+ { 8, G5ItinerariesFU::FPU1 | G5ItinerariesFU::FPU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 98
+ { 33, G5ItinerariesFU::FPU1 | G5ItinerariesFU::FPU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 99
+ { 40, G5ItinerariesFU::FPU1 | G5ItinerariesFU::FPU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 100
+ { 2, G5ItinerariesFU::VIU1, -1, (llvm::InstrStage::ReservationKinds)0 }, // 101
+ { 8, G5ItinerariesFU::VFPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 102
+ { 2, G5ItinerariesFU::VFPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 103
+ { 5, G5ItinerariesFU::VIU2, -1, (llvm::InstrStage::ReservationKinds)0 }, // 104
+ { 3, G5ItinerariesFU::VPU, -1, (llvm::InstrStage::ReservationKinds)0 }, // 105
+ { 0, 0, 0, llvm::InstrStage::Required } // End itinerary
};
static const unsigned OperandCycles[] = {
0, // No itinerary
@@ -255,236 +327,236 @@ static const llvm::InstrItinerary G3Itineraries[] = {
};
static const llvm::InstrItinerary G4Itineraries[] = {
- { 9, 10, 0, 0 }, // 0
- { 10, 11, 0, 0 }, // 1
- { 10, 11, 0, 0 }, // 2
- { 10, 11, 0, 0 }, // 3
- { 3, 4, 0, 0 }, // 4
- { 17, 18, 0, 0 }, // 5
- { 18, 19, 0, 0 }, // 6
- { 3, 4, 0, 0 }, // 7
- { 3, 4, 0, 0 }, // 8
- { 20, 21, 0, 0 }, // 9
+ { 29, 30, 0, 0 }, // 0
+ { 30, 31, 0, 0 }, // 1
+ { 30, 31, 0, 0 }, // 2
+ { 30, 31, 0, 0 }, // 3
+ { 39, 40, 0, 0 }, // 4
+ { 40, 41, 0, 0 }, // 5
+ { 41, 42, 0, 0 }, // 6
+ { 39, 40, 0, 0 }, // 7
+ { 39, 40, 0, 0 }, // 8
+ { 42, 43, 0, 0 }, // 9
{ 0, 0, 0, 0 }, // 10
- { 1, 2, 0, 0 }, // 11
+ { 21, 22, 0, 0 }, // 11
{ 0, 0, 0, 0 }, // 12
- { 2, 3, 0, 0 }, // 13
- { 1, 2, 0, 0 }, // 14
- { 4, 5, 0, 0 }, // 15
- { 21, 22, 0, 0 }, // 16
- { 4, 5, 0, 0 }, // 17
+ { 22, 23, 0, 0 }, // 13
+ { 21, 22, 0, 0 }, // 14
+ { 23, 24, 0, 0 }, // 15
+ { 24, 25, 0, 0 }, // 16
+ { 23, 24, 0, 0 }, // 17
{ 0, 0, 0, 0 }, // 18
{ 0, 0, 0, 0 }, // 19
- { 5, 6, 0, 0 }, // 20
- { 6, 7, 0, 0 }, // 21
- { 7, 8, 0, 0 }, // 22
+ { 25, 26, 0, 0 }, // 20
+ { 26, 27, 0, 0 }, // 21
+ { 27, 28, 0, 0 }, // 22
{ 0, 0, 0, 0 }, // 23
- { 1, 2, 0, 0 }, // 24
+ { 21, 22, 0, 0 }, // 24
{ 0, 0, 0, 0 }, // 25
- { 1, 2, 0, 0 }, // 26
+ { 21, 22, 0, 0 }, // 26
{ 0, 0, 0, 0 }, // 27
- { 8, 9, 0, 0 }, // 28
+ { 28, 29, 0, 0 }, // 28
{ 0, 0, 0, 0 }, // 29
- { 11, 12, 0, 0 }, // 30
- { 11, 12, 0, 0 }, // 31
- { 11, 12, 0, 0 }, // 32
- { 11, 12, 0, 0 }, // 33
- { 11, 12, 0, 0 }, // 34
+ { 31, 32, 0, 0 }, // 30
+ { 31, 32, 0, 0 }, // 31
+ { 31, 32, 0, 0 }, // 32
+ { 31, 32, 0, 0 }, // 33
+ { 31, 32, 0, 0 }, // 34
{ 0, 0, 0, 0 }, // 35
{ 0, 0, 0, 0 }, // 36
- { 11, 12, 0, 0 }, // 37
- { 11, 12, 0, 0 }, // 38
- { 11, 12, 0, 0 }, // 39
- { 13, 14, 0, 0 }, // 40
- { 11, 12, 0, 0 }, // 41
+ { 31, 32, 0, 0 }, // 37
+ { 31, 32, 0, 0 }, // 38
+ { 31, 32, 0, 0 }, // 39
+ { 32, 33, 0, 0 }, // 40
+ { 31, 32, 0, 0 }, // 41
{ 0, 0, 0, 0 }, // 42
- { 12, 13, 0, 0 }, // 43
+ { 33, 34, 0, 0 }, // 43
{ 0, 0, 0, 0 }, // 44
{ 0, 0, 0, 0 }, // 45
{ 0, 0, 0, 0 }, // 46
{ 0, 0, 0, 0 }, // 47
- { 11, 12, 0, 0 }, // 48
- { 22, 23, 0, 0 }, // 49
- { 14, 15, 0, 0 }, // 50
- { 11, 12, 0, 0 }, // 51
+ { 31, 32, 0, 0 }, // 48
+ { 34, 35, 0, 0 }, // 49
+ { 35, 36, 0, 0 }, // 50
+ { 31, 32, 0, 0 }, // 51
{ 0, 0, 0, 0 }, // 52
- { 15, 16, 0, 0 }, // 53
- { 10, 11, 0, 0 }, // 54
- { 10, 11, 0, 0 }, // 55
- { 16, 17, 0, 0 }, // 56
- { 16, 17, 0, 0 }, // 57
- { 10, 11, 0, 0 }, // 58
- { 10, 11, 0, 0 }, // 59
- { 15, 16, 0, 0 }, // 60
- { 15, 16, 0, 0 }, // 61
- { 15, 16, 0, 0 }, // 62
- { 15, 16, 0, 0 }, // 63
- { 15, 16, 0, 0 }, // 64
- { 23, 24, 0, 0 }, // 65
- { 25, 26, 0, 0 }, // 66
- { 24, 25, 0, 0 }, // 67
- { 21, 22, 0, 0 }, // 68
- { 24, 25, 0, 0 }, // 69
- { 21, 22, 0, 0 }, // 70
- { 26, 27, 0, 0 }, // 71
- { 21, 22, 0, 0 }, // 72
- { 21, 22, 0, 0 }, // 73
+ { 36, 37, 0, 0 }, // 53
+ { 30, 31, 0, 0 }, // 54
+ { 30, 31, 0, 0 }, // 55
+ { 37, 38, 0, 0 }, // 56
+ { 37, 38, 0, 0 }, // 57
+ { 30, 31, 0, 0 }, // 58
+ { 30, 31, 0, 0 }, // 59
+ { 36, 37, 0, 0 }, // 60
+ { 36, 37, 0, 0 }, // 61
+ { 36, 37, 0, 0 }, // 62
+ { 36, 37, 0, 0 }, // 63
+ { 36, 37, 0, 0 }, // 64
+ { 38, 39, 0, 0 }, // 65
+ { 44, 45, 0, 0 }, // 66
+ { 43, 44, 0, 0 }, // 67
+ { 24, 25, 0, 0 }, // 68
+ { 43, 44, 0, 0 }, // 69
+ { 24, 25, 0, 0 }, // 70
+ { 45, 46, 0, 0 }, // 71
+ { 24, 25, 0, 0 }, // 72
+ { 24, 25, 0, 0 }, // 73
{ ~0U, ~0U, ~0U, ~0U } // end marker
};
static const llvm::InstrItinerary G4PlusItineraries[] = {
- { 9, 10, 0, 0 }, // 0
- { 34, 35, 0, 0 }, // 1
- { 34, 35, 0, 0 }, // 2
- { 34, 35, 0, 0 }, // 3
- { 29, 30, 0, 0 }, // 4
- { 40, 41, 0, 0 }, // 5
- { 41, 42, 0, 0 }, // 6
- { 29, 30, 0, 0 }, // 7
- { 29, 30, 0, 0 }, // 8
- { 42, 43, 0, 0 }, // 9
+ { 53, 54, 0, 0 }, // 0
+ { 54, 55, 0, 0 }, // 1
+ { 54, 55, 0, 0 }, // 2
+ { 54, 55, 0, 0 }, // 3
+ { 48, 49, 0, 0 }, // 4
+ { 61, 62, 0, 0 }, // 5
+ { 62, 63, 0, 0 }, // 6
+ { 48, 49, 0, 0 }, // 7
+ { 48, 49, 0, 0 }, // 8
+ { 63, 64, 0, 0 }, // 9
{ 0, 0, 0, 0 }, // 10
- { 27, 28, 0, 0 }, // 11
+ { 46, 47, 0, 0 }, // 11
{ 0, 0, 0, 0 }, // 12
- { 28, 29, 0, 0 }, // 13
- { 27, 28, 0, 0 }, // 14
- { 29, 30, 0, 0 }, // 15
- { 30, 31, 0, 0 }, // 16
- { 29, 30, 0, 0 }, // 17
+ { 47, 48, 0, 0 }, // 13
+ { 46, 47, 0, 0 }, // 14
+ { 48, 49, 0, 0 }, // 15
+ { 49, 50, 0, 0 }, // 16
+ { 48, 49, 0, 0 }, // 17
{ 0, 0, 0, 0 }, // 18
{ 0, 0, 0, 0 }, // 19
- { 31, 32, 0, 0 }, // 20
- { 31, 32, 0, 0 }, // 21
- { 32, 33, 0, 0 }, // 22
+ { 50, 51, 0, 0 }, // 20
+ { 50, 51, 0, 0 }, // 21
+ { 51, 52, 0, 0 }, // 22
{ 0, 0, 0, 0 }, // 23
- { 27, 28, 0, 0 }, // 24
+ { 46, 47, 0, 0 }, // 24
{ 0, 0, 0, 0 }, // 25
- { 33, 34, 0, 0 }, // 26
+ { 52, 53, 0, 0 }, // 26
{ 0, 0, 0, 0 }, // 27
- { 33, 34, 0, 0 }, // 28
+ { 52, 53, 0, 0 }, // 28
{ 0, 0, 0, 0 }, // 29
- { 12, 13, 0, 0 }, // 30
- { 12, 13, 0, 0 }, // 31
- { 12, 13, 0, 0 }, // 32
- { 12, 13, 0, 0 }, // 33
- { 32, 33, 0, 0 }, // 34
+ { 55, 56, 0, 0 }, // 30
+ { 55, 56, 0, 0 }, // 31
+ { 55, 56, 0, 0 }, // 32
+ { 55, 56, 0, 0 }, // 33
+ { 51, 52, 0, 0 }, // 34
{ 0, 0, 0, 0 }, // 35
{ 0, 0, 0, 0 }, // 36
- { 35, 36, 0, 0 }, // 37
- { 35, 36, 0, 0 }, // 38
- { 12, 13, 0, 0 }, // 39
- { 36, 37, 0, 0 }, // 40
- { 12, 13, 0, 0 }, // 41
- { 12, 13, 0, 0 }, // 42
- { 12, 13, 0, 0 }, // 43
+ { 56, 57, 0, 0 }, // 37
+ { 56, 57, 0, 0 }, // 38
+ { 55, 56, 0, 0 }, // 39
+ { 57, 58, 0, 0 }, // 40
+ { 55, 56, 0, 0 }, // 41
+ { 55, 56, 0, 0 }, // 42
+ { 55, 56, 0, 0 }, // 43
{ 0, 0, 0, 0 }, // 44
{ 0, 0, 0, 0 }, // 45
- { 12, 13, 0, 0 }, // 46
- { 12, 13, 0, 0 }, // 47
- { 12, 13, 0, 0 }, // 48
- { 12, 13, 0, 0 }, // 49
- { 37, 38, 0, 0 }, // 50
- { 12, 13, 0, 0 }, // 51
+ { 55, 56, 0, 0 }, // 46
+ { 55, 56, 0, 0 }, // 47
+ { 55, 56, 0, 0 }, // 48
+ { 55, 56, 0, 0 }, // 49
+ { 58, 59, 0, 0 }, // 50
+ { 55, 56, 0, 0 }, // 51
{ 0, 0, 0, 0 }, // 52
- { 38, 39, 0, 0 }, // 53
- { 34, 35, 0, 0 }, // 54
- { 32, 33, 0, 0 }, // 55
- { 31, 32, 0, 0 }, // 56
- { 31, 32, 0, 0 }, // 57
- { 39, 40, 0, 0 }, // 58
- { 34, 35, 0, 0 }, // 59
- { 34, 35, 0, 0 }, // 60
- { 34, 35, 0, 0 }, // 61
- { 34, 35, 0, 0 }, // 62
- { 27, 28, 0, 0 }, // 63
- { 38, 39, 0, 0 }, // 64
- { 12, 13, 0, 0 }, // 65
- { 43, 44, 0, 0 }, // 66
- { 24, 25, 0, 0 }, // 67
- { 30, 31, 0, 0 }, // 68
- { 45, 46, 0, 0 }, // 69
- { 21, 22, 0, 0 }, // 70
- { 44, 45, 0, 0 }, // 71
- { 44, 45, 0, 0 }, // 72
- { 44, 45, 0, 0 }, // 73
+ { 59, 60, 0, 0 }, // 53
+ { 54, 55, 0, 0 }, // 54
+ { 51, 52, 0, 0 }, // 55
+ { 50, 51, 0, 0 }, // 56
+ { 50, 51, 0, 0 }, // 57
+ { 60, 61, 0, 0 }, // 58
+ { 54, 55, 0, 0 }, // 59
+ { 54, 55, 0, 0 }, // 60
+ { 54, 55, 0, 0 }, // 61
+ { 54, 55, 0, 0 }, // 62
+ { 46, 47, 0, 0 }, // 63
+ { 59, 60, 0, 0 }, // 64
+ { 55, 56, 0, 0 }, // 65
+ { 66, 67, 0, 0 }, // 66
+ { 65, 66, 0, 0 }, // 67
+ { 49, 50, 0, 0 }, // 68
+ { 68, 69, 0, 0 }, // 69
+ { 64, 65, 0, 0 }, // 70
+ { 67, 68, 0, 0 }, // 71
+ { 67, 68, 0, 0 }, // 72
+ { 67, 68, 0, 0 }, // 73
{ ~0U, ~0U, ~0U, ~0U } // end marker
};
static const llvm::InstrItinerary G5Itineraries[] = {
- { 9, 10, 0, 0 }, // 0
- { 56, 57, 0, 0 }, // 1
- { 57, 58, 0, 0 }, // 2
- { 58, 59, 0, 0 }, // 3
- { 65, 66, 0, 0 }, // 4
- { 66, 67, 0, 0 }, // 5
- { 66, 67, 0, 0 }, // 6
- { 51, 52, 0, 0 }, // 7
- { 51, 52, 0, 0 }, // 8
- { 51, 52, 0, 0 }, // 9
- { 67, 68, 0, 0 }, // 10
- { 46, 47, 0, 0 }, // 11
- { 47, 48, 0, 0 }, // 12
- { 48, 49, 0, 0 }, // 13
- { 8, 9, 0, 0 }, // 14
- { 49, 50, 0, 0 }, // 15
- { 50, 51, 0, 0 }, // 16
- { 51, 52, 0, 0 }, // 17
+ { 81, 82, 0, 0 }, // 0
+ { 82, 83, 0, 0 }, // 1
+ { 83, 84, 0, 0 }, // 2
+ { 84, 85, 0, 0 }, // 3
+ { 98, 99, 0, 0 }, // 4
+ { 99, 100, 0, 0 }, // 5
+ { 99, 100, 0, 0 }, // 6
+ { 75, 76, 0, 0 }, // 7
+ { 75, 76, 0, 0 }, // 8
+ { 75, 76, 0, 0 }, // 9
+ { 100, 101, 0, 0 }, // 10
+ { 70, 71, 0, 0 }, // 11
+ { 71, 72, 0, 0 }, // 12
+ { 72, 73, 0, 0 }, // 13
+ { 69, 70, 0, 0 }, // 14
+ { 73, 74, 0, 0 }, // 15
+ { 74, 75, 0, 0 }, // 16
+ { 75, 76, 0, 0 }, // 17
{ 0, 0, 0, 0 }, // 18
- { 52, 53, 0, 0 }, // 19
- { 53, 54, 0, 0 }, // 20
- { 53, 54, 0, 0 }, // 21
- { 54, 55, 0, 0 }, // 22
- { 55, 56, 0, 0 }, // 23
- { 54, 55, 0, 0 }, // 24
- { 8, 9, 0, 0 }, // 25
- { 8, 9, 0, 0 }, // 26
- { 1, 2, 0, 0 }, // 27
- { 1, 2, 0, 0 }, // 28
+ { 76, 77, 0, 0 }, // 19
+ { 77, 78, 0, 0 }, // 20
+ { 77, 78, 0, 0 }, // 21
+ { 78, 79, 0, 0 }, // 22
+ { 79, 80, 0, 0 }, // 23
+ { 78, 79, 0, 0 }, // 24
+ { 69, 70, 0, 0 }, // 25
+ { 69, 70, 0, 0 }, // 26
+ { 80, 81, 0, 0 }, // 27
+ { 80, 81, 0, 0 }, // 28
{ 0, 0, 0, 0 }, // 29
- { 12, 13, 0, 0 }, // 30
+ { 85, 86, 0, 0 }, // 30
{ 0, 0, 0, 0 }, // 31
- { 59, 60, 0, 0 }, // 32
- { 12, 13, 0, 0 }, // 33
- { 60, 61, 0, 0 }, // 34
- { 12, 13, 0, 0 }, // 35
- { 61, 62, 0, 0 }, // 36
- { 12, 13, 0, 0 }, // 37
- { 22, 23, 0, 0 }, // 38
- { 22, 23, 0, 0 }, // 39
- { 62, 63, 0, 0 }, // 40
- { 12, 13, 0, 0 }, // 41
- { 22, 23, 0, 0 }, // 42
- { 61, 62, 0, 0 }, // 43
- { 60, 61, 0, 0 }, // 44
- { 11, 12, 0, 0 }, // 45
- { 12, 13, 0, 0 }, // 46
- { 61, 62, 0, 0 }, // 47
- { 22, 23, 0, 0 }, // 48
- { 61, 62, 0, 0 }, // 49
- { 37, 38, 0, 0 }, // 50
- { 35, 36, 0, 0 }, // 51
+ { 86, 87, 0, 0 }, // 32
+ { 85, 86, 0, 0 }, // 33
+ { 87, 88, 0, 0 }, // 34
+ { 85, 86, 0, 0 }, // 35
+ { 89, 90, 0, 0 }, // 36
+ { 85, 86, 0, 0 }, // 37
+ { 90, 91, 0, 0 }, // 38
+ { 90, 91, 0, 0 }, // 39
+ { 91, 92, 0, 0 }, // 40
+ { 85, 86, 0, 0 }, // 41
+ { 90, 91, 0, 0 }, // 42
+ { 89, 90, 0, 0 }, // 43
+ { 87, 88, 0, 0 }, // 44
+ { 92, 93, 0, 0 }, // 45
+ { 85, 86, 0, 0 }, // 46
+ { 89, 90, 0, 0 }, // 47
+ { 90, 91, 0, 0 }, // 48
+ { 89, 90, 0, 0 }, // 49
+ { 93, 94, 0, 0 }, // 50
+ { 88, 89, 0, 0 }, // 51
{ 0, 0, 0, 0 }, // 52
- { 60, 61, 0, 0 }, // 53
- { 34, 35, 0, 0 }, // 54
- { 32, 33, 0, 0 }, // 55
- { 32, 33, 0, 0 }, // 56
- { 12, 13, 0, 0 }, // 57
- { 63, 64, 0, 0 }, // 58
- { 12, 13, 0, 0 }, // 59
- { 64, 65, 0, 0 }, // 60
- { 12, 13, 0, 0 }, // 61
+ { 87, 88, 0, 0 }, // 53
+ { 94, 95, 0, 0 }, // 54
+ { 95, 96, 0, 0 }, // 55
+ { 95, 96, 0, 0 }, // 56
+ { 85, 86, 0, 0 }, // 57
+ { 96, 97, 0, 0 }, // 58
+ { 85, 86, 0, 0 }, // 59
+ { 97, 98, 0, 0 }, // 60
+ { 85, 86, 0, 0 }, // 61
{ 0, 0, 0, 0 }, // 62
{ 0, 0, 0, 0 }, // 63
- { 55, 56, 0, 0 }, // 64
- { 12, 13, 0, 0 }, // 65
- { 70, 71, 0, 0 }, // 66
- { 69, 70, 0, 0 }, // 67
- { 30, 31, 0, 0 }, // 68
- { 69, 70, 0, 0 }, // 69
- { 68, 69, 0, 0 }, // 70
- { 71, 72, 0, 0 }, // 71
- { 68, 69, 0, 0 }, // 72
- { 71, 72, 0, 0 }, // 73
+ { 79, 80, 0, 0 }, // 64
+ { 85, 86, 0, 0 }, // 65
+ { 104, 105, 0, 0 }, // 66
+ { 102, 103, 0, 0 }, // 67
+ { 103, 104, 0, 0 }, // 68
+ { 102, 103, 0, 0 }, // 69
+ { 101, 102, 0, 0 }, // 70
+ { 105, 106, 0, 0 }, // 71
+ { 101, 102, 0, 0 }, // 72
+ { 105, 106, 0, 0 }, // 73
{ ~0U, ~0U, ~0U, ~0U } // end marker
};
@@ -526,15 +598,15 @@ std::string llvm::PPCSubtarget::ParseSubtargetFeatures(const std::string &FS,
uint32_t Bits = Features.getBits(SubTypeKV, SubTypeKVSize,
FeatureKV, FeatureKVSize);
if ((Bits & Directive32) != 0 && DarwinDirective < PPC::DIR_32) DarwinDirective = PPC::DIR_32;
+ if ((Bits & Directive64) != 0 && DarwinDirective < PPC::DIR_64) DarwinDirective = PPC::DIR_64;
if ((Bits & Directive601) != 0 && DarwinDirective < PPC::DIR_601) DarwinDirective = PPC::DIR_601;
if ((Bits & Directive602) != 0 && DarwinDirective < PPC::DIR_602) DarwinDirective = PPC::DIR_602;
if ((Bits & Directive603) != 0 && DarwinDirective < PPC::DIR_603) DarwinDirective = PPC::DIR_603;
if ((Bits & Directive604) != 0 && DarwinDirective < PPC::DIR_603) DarwinDirective = PPC::DIR_603;
if ((Bits & Directive620) != 0 && DarwinDirective < PPC::DIR_603) DarwinDirective = PPC::DIR_603;
- if ((Bits & Directive64) != 0 && DarwinDirective < PPC::DIR_64) DarwinDirective = PPC::DIR_64;
- if ((Bits & Directive7400) != 0 && DarwinDirective < PPC::DIR_7400) DarwinDirective = PPC::DIR_7400;
if ((Bits & Directive750) != 0 && DarwinDirective < PPC::DIR_750) DarwinDirective = PPC::DIR_750;
if ((Bits & Directive970) != 0 && DarwinDirective < PPC::DIR_970) DarwinDirective = PPC::DIR_970;
+ if ((Bits & Directive7400) != 0 && DarwinDirective < PPC::DIR_7400) DarwinDirective = PPC::DIR_7400;
if ((Bits & Feature64Bit) != 0) Has64BitSupport = true;
if ((Bits & Feature64BitRegs) != 0) Use64BitRegs = true;
if ((Bits & FeatureAltivec) != 0) HasAltivec = true;
diff --git a/libclamav/c++/X86GenCallingConv.inc b/libclamav/c++/X86GenCallingConv.inc
index ec2d76e..504917d 100644
--- a/libclamav/c++/X86GenCallingConv.inc
+++ b/libclamav/c++/X86GenCallingConv.inc
@@ -21,6 +21,9 @@ static bool CC_X86_32_FastCall(unsigned ValNo, EVT ValVT,
static bool CC_X86_32_GHC(unsigned ValNo, EVT ValVT,
EVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State);
+static bool CC_X86_32_ThisCall(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
static bool CC_X86_64_C(unsigned ValNo, EVT ValVT,
EVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State);
@@ -128,8 +131,7 @@ static bool CC_X86_32_Common(unsigned ValNo, EVT ValVT,
if (!State.isVarArg()) {
if (LocVT == MVT::v8i8 ||
LocVT == MVT::v4i16 ||
- LocVT == MVT::v2i32 ||
- LocVT == MVT::v2f32) {
+ LocVT == MVT::v2i32) {
static const unsigned RegList2[] = {
X86::MM0, X86::MM1, X86::MM2
};
@@ -177,14 +179,44 @@ static bool CC_X86_32_Common(unsigned ValNo, EVT ValVT,
}
}
+ if (!State.isVarArg()) {
+ if (LocVT == MVT::v32i8 ||
+ LocVT == MVT::v16i16 ||
+ LocVT == MVT::v8i32 ||
+ LocVT == MVT::v4i64 ||
+ LocVT == MVT::v8f32 ||
+ LocVT == MVT::v4f64) {
+ if (State.getTarget().getSubtarget<X86Subtarget>().hasAVX()) {
+ static const unsigned RegList7[] = {
+ X86::YMM0, X86::YMM1, X86::YMM2, X86::YMM3
+ };
+ if (unsigned Reg = State.AllocateReg(RegList7, 4)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+ }
+ }
+
if (LocVT == MVT::v16i8 ||
LocVT == MVT::v8i16 ||
LocVT == MVT::v4i32 ||
LocVT == MVT::v2i64 ||
LocVT == MVT::v4f32 ||
LocVT == MVT::v2f64) {
- unsigned Offset7 = State.AllocateStack(16, 16);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset7, LocVT, LocInfo));
+ unsigned Offset8 = State.AllocateStack(16, 16);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset8, LocVT, LocInfo));
+ return false;
+ }
+
+ if (LocVT == MVT::v32i8 ||
+ LocVT == MVT::v16i16 ||
+ LocVT == MVT::v8i32 ||
+ LocVT == MVT::v4i64 ||
+ LocVT == MVT::v8f32 ||
+ LocVT == MVT::v4f64) {
+ unsigned Offset9 = State.AllocateStack(32, 32);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset9, LocVT, LocInfo));
return false;
}
@@ -192,8 +224,8 @@ static bool CC_X86_32_Common(unsigned ValNo, EVT ValVT,
LocVT == MVT::v4i16 ||
LocVT == MVT::v2i32 ||
LocVT == MVT::v1i64) {
- unsigned Offset8 = State.AllocateStack(8, 4);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset8, LocVT, LocInfo));
+ unsigned Offset10 = State.AllocateStack(8, 4);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset10, LocVT, LocInfo));
return false;
}
@@ -334,6 +366,42 @@ static bool CC_X86_32_GHC(unsigned ValNo, EVT ValVT,
}
+static bool CC_X86_32_ThisCall(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State) {
+
+ if (LocVT == MVT::i8 ||
+ LocVT == MVT::i16) {
+ LocVT = MVT::i32;
+ if (ArgFlags.isSExt())
+ LocInfo = CCValAssign::SExt;
+ else if (ArgFlags.isZExt())
+ LocInfo = CCValAssign::ZExt;
+ else
+ LocInfo = CCValAssign::AExt;
+ }
+
+ if (ArgFlags.isNest()) {
+ if (unsigned Reg = State.AllocateReg(X86::EAX)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ if (LocVT == MVT::i32) {
+ if (unsigned Reg = State.AllocateReg(X86::ECX)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
+ if (!CC_X86_32_Common(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
+ return false;
+
+ return true; // CC didn't match.
+}
+
+
static bool CC_X86_64_C(unsigned ValNo, EVT ValVT,
EVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) {
@@ -390,8 +458,7 @@ static bool CC_X86_64_C(unsigned ValNo, EVT ValVT,
if (LocVT == MVT::v8i8 ||
LocVT == MVT::v4i16 ||
- LocVT == MVT::v2i32 ||
- LocVT == MVT::v2f32) {
+ LocVT == MVT::v2i32) {
if (State.getTarget().getSubtarget<X86Subtarget>().isTargetDarwin()) {
if (State.getTarget().getSubtarget<X86Subtarget>().hasSSE2()) {
LocVT = MVT::v2i64;
@@ -424,20 +491,37 @@ static bool CC_X86_64_C(unsigned ValNo, EVT ValVT,
}
}
+ if (LocVT == MVT::v32i8 ||
+ LocVT == MVT::v16i16 ||
+ LocVT == MVT::v8i32 ||
+ LocVT == MVT::v4i64 ||
+ LocVT == MVT::v8f32 ||
+ LocVT == MVT::v4f64) {
+ if (State.getTarget().getSubtarget<X86Subtarget>().hasAVX()) {
+ static const unsigned RegList4[] = {
+ X86::YMM0, X86::YMM1, X86::YMM2, X86::YMM3, X86::YMM4, X86::YMM5, X86::YMM6, X86::YMM7
+ };
+ if (unsigned Reg = State.AllocateReg(RegList4, 8)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+ }
+
if (LocVT == MVT::i32 ||
LocVT == MVT::i64 ||
LocVT == MVT::f32 ||
LocVT == MVT::f64) {
- unsigned Offset4 = State.AllocateStack(8, 8);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
+ unsigned Offset5 = State.AllocateStack(8, 8);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
return false;
}
if (LocVT == MVT::f80) {
- unsigned Offset5 = State.AllocateStack(
+ unsigned Offset6 = State.AllocateStack(
State.getTarget().getTargetData()->getTypeAllocSize(LocVT.getTypeForEVT(State.getContext())),
State.getTarget().getTargetData()->getABITypeAlignment(LocVT.getTypeForEVT(State.getContext())));
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset6, LocVT, LocInfo));
return false;
}
@@ -447,18 +531,28 @@ static bool CC_X86_64_C(unsigned ValNo, EVT ValVT,
LocVT == MVT::v2i64 ||
LocVT == MVT::v4f32 ||
LocVT == MVT::v2f64) {
- unsigned Offset6 = State.AllocateStack(16, 16);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset6, LocVT, LocInfo));
+ unsigned Offset7 = State.AllocateStack(16, 16);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset7, LocVT, LocInfo));
+ return false;
+ }
+
+ if (LocVT == MVT::v32i8 ||
+ LocVT == MVT::v16i16 ||
+ LocVT == MVT::v8i32 ||
+ LocVT == MVT::v4i64 ||
+ LocVT == MVT::v8f32 ||
+ LocVT == MVT::v4f64) {
+ unsigned Offset8 = State.AllocateStack(32, 32);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset8, LocVT, LocInfo));
return false;
}
if (LocVT == MVT::v8i8 ||
LocVT == MVT::v4i16 ||
LocVT == MVT::v2i32 ||
- LocVT == MVT::v1i64 ||
- LocVT == MVT::v2f32) {
- unsigned Offset7 = State.AllocateStack(8, 8);
- State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset7, LocVT, LocInfo));
+ LocVT == MVT::v1i64) {
+ unsigned Offset9 = State.AllocateStack(8, 8);
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset9, LocVT, LocInfo));
return false;
}
@@ -550,8 +644,7 @@ static bool CC_X86_Win64_C(unsigned ValNo, EVT ValVT,
if (LocVT == MVT::v8i8 ||
LocVT == MVT::v4i16 ||
LocVT == MVT::v2i32 ||
- LocVT == MVT::v1i64 ||
- LocVT == MVT::v2f32) {
+ LocVT == MVT::v1i64) {
LocVT = MVT::i64;
LocInfo = CCValAssign::BCvt;
}
@@ -707,11 +800,27 @@ static bool RetCC_X86Common(unsigned ValNo, EVT ValVT,
}
}
+ if (LocVT == MVT::v32i8 ||
+ LocVT == MVT::v16i16 ||
+ LocVT == MVT::v8i32 ||
+ LocVT == MVT::v4i64 ||
+ LocVT == MVT::v8f32 ||
+ LocVT == MVT::v4f64) {
+ if (State.getTarget().getSubtarget<X86Subtarget>().hasAVX()) {
+ static const unsigned RegList6[] = {
+ X86::YMM0, X86::YMM1, X86::YMM2, X86::YMM3
+ };
+ if (unsigned Reg = State.AllocateReg(RegList6, 4)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+ }
+
if (LocVT == MVT::v8i8 ||
LocVT == MVT::v4i16 ||
LocVT == MVT::v2i32 ||
- LocVT == MVT::v1i64 ||
- LocVT == MVT::v2f32) {
+ LocVT == MVT::v1i64) {
if (unsigned Reg = State.AllocateReg(X86::MM0)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
@@ -719,10 +828,10 @@ static bool RetCC_X86Common(unsigned ValNo, EVT ValVT,
}
if (LocVT == MVT::f80) {
- static const unsigned RegList6[] = {
+ static const unsigned RegList7[] = {
X86::ST0, X86::ST1
};
- if (unsigned Reg = State.AllocateReg(RegList6, 2)) {
+ if (unsigned Reg = State.AllocateReg(RegList7, 2)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
}
@@ -899,8 +1008,7 @@ static bool RetCC_X86_64_C(unsigned ValNo, EVT ValVT,
if (LocVT == MVT::v8i8 ||
LocVT == MVT::v4i16 ||
- LocVT == MVT::v2i32 ||
- LocVT == MVT::v2f32) {
+ LocVT == MVT::v2i32) {
static const unsigned RegList3[] = {
X86::XMM0, X86::XMM1
};
diff --git a/libclamav/c++/X86GenDAGISel.inc b/libclamav/c++/X86GenDAGISel.inc
index ee2a218..2853698 100644
--- a/libclamav/c++/X86GenDAGISel.inc
+++ b/libclamav/c++/X86GenDAGISel.inc
@@ -9,2238 +9,131 @@
// *** NOTE: This file is #included into the middle of the target
// *** instruction selector class. These functions are really methods.
-
-// Predicate functions.
-inline bool Predicate_alignedload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getAlignment() >= 16;
-
-}
-inline bool Predicate_alignednontemporalstore(SDNode *N) const {
-
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isNonTemporal() && !ST->isTruncatingStore() &&
- ST->getAddressingMode() == ISD::UNINDEXED &&
- ST->getAlignment() >= 16;
- return false;
-
-}
-inline bool Predicate_alignedstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getAlignment() >= 16;
-
-}
-inline bool Predicate_and_su(SDNode *N) const {
-
- return N->hasOneUse();
-
-}
-inline bool Predicate_atomic_cmp_swap_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_cmp_swap_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_cmp_swap_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_cmp_swap_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_add_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_add_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_add_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_add_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_and_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_and_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_and_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_and_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_max_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_max_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_max_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_max_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_min_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_min_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_min_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_min_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_nand_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_nand_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_nand_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_nand_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_or_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_or_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_or_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_or_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_sub_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_sub_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_sub_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_sub_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_umax_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_umax_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_umax_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_umax_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_umin_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_umin_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_umin_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_umin_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_load_xor_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_load_xor_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_load_xor_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_load_xor_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_atomic_swap_16(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_atomic_swap_32(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_atomic_swap_64(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
-
-}
-inline bool Predicate_atomic_swap_8(SDNode *N) const {
-
- return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_cvtff(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FF;
-
-}
-inline bool Predicate_cvtfs(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FS;
-
-}
-inline bool Predicate_cvtfu(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FU;
-
-}
-inline bool Predicate_cvtsf(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SF;
-
-}
-inline bool Predicate_cvtss(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SS;
-
-}
-inline bool Predicate_cvtsu(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SU;
-
-}
-inline bool Predicate_cvtuf(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UF;
-
-}
-inline bool Predicate_cvtus(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_US;
-
-}
-inline bool Predicate_cvtuu(SDNode *N) const {
-
- return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UU;
-
-}
-inline bool Predicate_def32(SDNode *N) const {
-
- return N->getOpcode() != ISD::TRUNCATE &&
- N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
- N->getOpcode() != ISD::CopyFromReg &&
- N->getOpcode() != X86ISD::CMOV;
-
-}
-inline bool Predicate_dsload(SDNode *N) const {
-
- if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- if (PT->getAddressSpace() > 255)
- return false;
- return true;
-
-}
-inline bool Predicate_extload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
-
-}
-inline bool Predicate_extloadf32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_extloadf64(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64;
-
-}
-inline bool Predicate_extloadi1(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_extloadi16(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_extloadi32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_extloadi8(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_fp32imm0(SDNode *inN) const {
- ConstantFPSDNode *N = cast<ConstantFPSDNode>(inN);
-
- return N->isExactlyValue(+0.0);
-
-}
-inline bool Predicate_fpimm0(SDNode *inN) const {
- ConstantFPSDNode *N = cast<ConstantFPSDNode>(inN);
-
- return N->isExactlyValue(+0.0);
-
-}
-inline bool Predicate_fpimm1(SDNode *inN) const {
- ConstantFPSDNode *N = cast<ConstantFPSDNode>(inN);
-
- return N->isExactlyValue(+1.0);
-
-}
-inline bool Predicate_fpimmneg0(SDNode *inN) const {
- ConstantFPSDNode *N = cast<ConstantFPSDNode>(inN);
-
- return N->isExactlyValue(-0.0);
-
-}
-inline bool Predicate_fpimmneg1(SDNode *inN) const {
- ConstantFPSDNode *N = cast<ConstantFPSDNode>(inN);
-
- return N->isExactlyValue(-1.0);
-
-}
-inline bool Predicate_fsload(SDNode *N) const {
-
- if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- return PT->getAddressSpace() == 257;
- return false;
-
-}
-inline bool Predicate_gsload(SDNode *N) const {
-
- if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- return PT->getAddressSpace() == 256;
- return false;
-
-}
-inline bool Predicate_i64immSExt32(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
- // sign extended field.
- return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
-
-}
-inline bool Predicate_i64immZExt32(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- // i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
- // unsignedsign extended field.
- return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
-
-}
-inline bool Predicate_immAllOnesV(SDNode *N) const {
-
- return ISD::isBuildVectorAllOnes(N);
-
-}
-inline bool Predicate_immAllOnesV_bc(SDNode *N) const {
-
- return ISD::isBuildVectorAllOnes(N);
-
-}
-inline bool Predicate_immAllZerosV(SDNode *N) const {
-
- return ISD::isBuildVectorAllZeros(N);
-
-}
-inline bool Predicate_immAllZerosV_bc(SDNode *N) const {
-
- return ISD::isBuildVectorAllZeros(N);
-
-}
-inline bool Predicate_immSext8(SDNode *inN) const {
- ConstantSDNode *N = cast<ConstantSDNode>(inN);
-
- return N->getSExtValue() == (int8_t)N->getSExtValue();
-
-}
-inline bool Predicate_istore(SDNode *N) const {
-
- return !cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_itruncstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_load(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
-
-}
-inline bool Predicate_loadi16(SDNode *N) const {
-
- LoadSDNode *LD = cast<LoadSDNode>(N);
- if (const Value *Src = LD->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- if (PT->getAddressSpace() > 255)
- return false;
- ISD::LoadExtType ExtType = LD->getExtensionType();
- if (ExtType == ISD::NON_EXTLOAD)
- return true;
- if (ExtType == ISD::EXTLOAD)
- return LD->getAlignment() >= 2 && !LD->isVolatile();
- return false;
-
-}
-inline bool Predicate_loadi16_anyext(SDNode *N) const {
-
- LoadSDNode *LD = cast<LoadSDNode>(N);
- if (const Value *Src = LD->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- if (PT->getAddressSpace() > 255)
- return false;
- ISD::LoadExtType ExtType = LD->getExtensionType();
- if (ExtType == ISD::EXTLOAD)
- return LD->getAlignment() >= 2 && !LD->isVolatile();
- return false;
-
-}
-inline bool Predicate_loadi32(SDNode *N) const {
-
- LoadSDNode *LD = cast<LoadSDNode>(N);
- if (const Value *Src = LD->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- if (PT->getAddressSpace() > 255)
- return false;
- ISD::LoadExtType ExtType = LD->getExtensionType();
- if (ExtType == ISD::NON_EXTLOAD)
- return true;
- if (ExtType == ISD::EXTLOAD)
- return LD->getAlignment() >= 4 && !LD->isVolatile();
- return false;
-
-}
-inline bool Predicate_memop(SDNode *N) const {
-
- return Subtarget->hasVectorUAMem()
- || cast<LoadSDNode>(N)->getAlignment() >= 16;
-
-}
-inline bool Predicate_memop64(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getAlignment() >= 8;
-
-}
-inline bool Predicate_mmx_pshufw(SDNode *N) const {
-
- return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_mmx_unpckh(SDNode *N) const {
-
- return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_mmx_unpckh_undef(SDNode *N) const {
-
- return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_mmx_unpckl(SDNode *N) const {
-
- return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_mmx_unpckl_undef(SDNode *N) const {
-
- return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_movddup(SDNode *N) const {
-
- return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_movhlps(SDNode *N) const {
-
- return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_movhlps_undef(SDNode *N) const {
-
- return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_movl(SDNode *N) const {
-
- return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_movlhps(SDNode *N) const {
-
- return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_movlp(SDNode *N) const {
-
- return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_movshdup(SDNode *N) const {
-
- return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_movsldup(SDNode *N) const {
-
- return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_nontemporalstore(SDNode *N) const {
-
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isNonTemporal();
- return false;
-
-}
-inline bool Predicate_or_is_add(SDNode *N) const {
-
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
- return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
- else {
- unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
- APInt Mask = APInt::getAllOnesValue(BitWidth);
- APInt KnownZero0, KnownOne0;
- CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
- APInt KnownZero1, KnownOne1;
- CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
- return (~KnownZero0 & ~KnownZero1) == 0;
- }
-
-}
-inline bool Predicate_palign(SDNode *N) const {
-
- return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_post_store(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::POST_INC || AM == ISD::POST_DEC;
-
-}
-inline bool Predicate_post_truncst(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::POST_INC || AM == ISD::POST_DEC;
-
-}
-inline bool Predicate_post_truncstf32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_post_truncsti1(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_post_truncsti16(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_post_truncsti32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_post_truncsti8(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_pre_store(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
-
-}
-inline bool Predicate_pre_truncst(SDNode *N) const {
-
- ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
- return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
-
-}
-inline bool Predicate_pre_truncstf32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_pre_truncsti1(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_pre_truncsti16(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_pre_truncsti32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_pre_truncsti8(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_pshufd(SDNode *N) const {
-
- return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_pshufhw(SDNode *N) const {
-
- return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_pshuflw(SDNode *N) const {
-
- return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_sextload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
-
-}
-inline bool Predicate_sextloadi1(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_sextloadi16(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_sextloadi32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_sextloadi8(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_shld(SDNode *N) const {
-
- assert(N->getOpcode() == ISD::OR);
- return N->getOperand(0).getOpcode() == ISD::SHL &&
- N->getOperand(1).getOpcode() == ISD::SRL &&
- isa<ConstantSDNode>(N->getOperand(0).getOperand(1)) &&
- isa<ConstantSDNode>(N->getOperand(1).getOperand(1)) &&
- N->getOperand(0).getConstantOperandVal(1) ==
- N->getValueSizeInBits(0) - N->getOperand(1).getConstantOperandVal(1);
-
-}
-inline bool Predicate_shrd(SDNode *N) const {
-
- assert(N->getOpcode() == ISD::OR);
- return N->getOperand(0).getOpcode() == ISD::SRL &&
- N->getOperand(1).getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(N->getOperand(0).getOperand(1)) &&
- isa<ConstantSDNode>(N->getOperand(1).getOperand(1)) &&
- N->getOperand(0).getConstantOperandVal(1) ==
- N->getValueSizeInBits(0) - N->getOperand(1).getConstantOperandVal(1);
-
-}
-inline bool Predicate_shufp(SDNode *N) const {
-
- return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_splat_lo(SDNode *N) const {
-
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
-
-}
-inline bool Predicate_srl_su(SDNode *N) const {
-
- return N->hasOneUse();
-
-}
-inline bool Predicate_store(SDNode *N) const {
-
- return !cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_trunc_su(SDNode *N) const {
-
- return N->hasOneUse();
-
-}
-inline bool Predicate_truncstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->isTruncatingStore();
-
-}
-inline bool Predicate_truncstoref32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
-
-}
-inline bool Predicate_truncstoref64(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64;
-
-}
-inline bool Predicate_truncstorei16(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_truncstorei32(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_truncstorei8(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-inline bool Predicate_unalignednontemporalstore(SDNode *N) const {
-
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isNonTemporal() &&
- ST->getAlignment() < 16;
- return false;
-
-}
-inline bool Predicate_unindexedload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
-
-}
-inline bool Predicate_unindexedstore(SDNode *N) const {
-
- return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
-
-}
-inline bool Predicate_unpckh(SDNode *N) const {
-
- return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_unpckh_undef(SDNode *N) const {
-
- return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_unpckl(SDNode *N) const {
-
- return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_unpckl_undef(SDNode *N) const {
-
- return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-
-}
-inline bool Predicate_vtFP(SDNode *inN) const {
- VTSDNode *N = cast<VTSDNode>(inN);
- return N->getVT().isFloatingPoint();
-}
-inline bool Predicate_vtInt(SDNode *inN) const {
- VTSDNode *N = cast<VTSDNode>(inN);
- return N->getVT().isInteger();
-}
-inline bool Predicate_zextload(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
-
-}
-inline bool Predicate_zextloadi1(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
-
-}
-inline bool Predicate_zextloadi16(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-
-}
-inline bool Predicate_zextloadi32(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-
-}
-inline bool Predicate_zextloadi8(SDNode *N) const {
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-
-}
-
-
// The main instruction selector code.
SDNode *SelectCode(SDNode *N) {
// Opcodes are emitted as 2 bytes, TARGET_OPCODE handles this.
#define TARGET_OPCODE(X) X & 255, unsigned(X) >> 8
static const unsigned char MatcherTable[] = {
- OPC_SwitchOpcode , 117|128,6|128,1, ISD::STORE,
+ OPC_SwitchOpcode , 86|128,98, TARGET_OPCODE(ISD::STORE),
OPC_RecordMemRef,
OPC_RecordNode,
- OPC_Scope, 43|128,1,
+ OPC_Scope, 34|128,2,
OPC_RecordChild1,
- OPC_Scope, 49,
+ OPC_Scope, 80,
OPC_CheckChild1Type, MVT::v4f32,
OPC_RecordChild2,
OPC_CheckPredicate, 0,
- OPC_Scope, 20,
+ OPC_Scope, 34,
OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_Scope, 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTDQmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 0,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 20,
- OPC_CheckPatternPredicate, 1,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTDQmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 41,
+ 77,
OPC_CheckChild1Type, MVT::v2f64,
OPC_RecordChild2,
OPC_CheckPredicate, 0,
- OPC_CheckPatternPredicate, 1,
+ OPC_Scope, 34,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_Scope, 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTDQ_64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 0,
+ 34,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_Scope, 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTDQ_64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 0,
+ 0,
+ 39,
+ OPC_CheckChild1Type, MVT::v8f32,
+ OPC_RecordChild2,
+ OPC_CheckPredicate, 0,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_Scope, 12,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTDQ_64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTPSYmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
12,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTDQYmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 25,
+ 39,
+ OPC_CheckChild1Type, MVT::v4f64,
+ OPC_RecordChild2,
+ OPC_CheckPredicate, 0,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_Scope, 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTPDYmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTDQY_64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 0,
+ 23,
OPC_CheckChild1Type, MVT::i32,
OPC_RecordChild2,
OPC_CheckPredicate, 1,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTImr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 25,
+ 23,
OPC_CheckChild1Type, MVT::i64,
OPC_RecordChild2,
OPC_CheckPredicate, 1,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTI_64mr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 23,
- OPC_CheckChild1Type, MVT::v4i32,
- OPC_RecordChild2,
- OPC_CheckPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTDQmr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 1, 3, 4, 5, 6, 7,
0,
- 104|128,124,
+ 33|128,86,
OPC_MoveChild, 1,
- OPC_SwitchOpcode , 68|128,20, ISD::OR,
- OPC_Scope, 46|128,19,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 118|128,7, ISD::SRL,
- OPC_Scope, 44|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 79,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_RecordChild0,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitCopyToReg, 4, X86::ECX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 5,
- 79,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_RecordChild0,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i16,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitCopyToReg, 4, X86::CX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 5,
- 0,
- 39|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 76,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 5,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/5,
- OPC_EmitMergeInputChains, 2, 0, 4,
- OPC_EmitCopyToReg, 3, X86::ECX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 1,
- 76,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 5,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/5,
- OPC_EmitMergeInputChains, 2, 0, 4,
- OPC_EmitCopyToReg, 3, X86::CX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 1,
- 0,
- 16|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 65,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitCopyToReg, 4, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 5,
- 65,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitCopyToReg, 4, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 5,
- 0,
- 15|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 66,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 5,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/5,
- OPC_EmitMergeInputChains, 2, 0, 4,
- OPC_EmitCopyToReg, 3, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 1,
- 66,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 5,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/5,
- OPC_EmitMergeInputChains, 2, 0, 4,
- OPC_EmitCopyToReg, 3, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 1,
- 0,
- 72|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 61,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 7,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 4, 10,
- 61,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 7,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 4, 10,
- 63,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 7,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 4, 10,
- 0,
- 46|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 48,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 10,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_EmitConvertToTarget, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 1, 10,
- 48,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 10,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_EmitConvertToTarget, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 1, 10,
- 50,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 10,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_EmitConvertToTarget, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 1, 10,
- 0,
- 0,
- 118|128,7, ISD::SHL,
- OPC_Scope, 44|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 79,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_RecordChild0,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitCopyToReg, 4, X86::ECX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 5,
- 79,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_RecordChild0,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::i16,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitCopyToReg, 4, X86::CX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 5,
- 0,
- 39|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 76,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 5,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/5,
- OPC_EmitMergeInputChains, 2, 0, 4,
- OPC_EmitCopyToReg, 3, X86::ECX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 1,
- 76,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 5,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/5,
- OPC_EmitMergeInputChains, 2, 0, 4,
- OPC_EmitCopyToReg, 3, X86::CX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 1,
- 0,
- 16|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 65,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitCopyToReg, 4, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 5,
- 65,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitCopyToReg, 4, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 5,
- 0,
- 15|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 66,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 5,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/5,
- OPC_EmitMergeInputChains, 2, 0, 4,
- OPC_EmitCopyToReg, 3, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 1,
- 66,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 5,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/5,
- OPC_EmitMergeInputChains, 2, 0, 4,
- OPC_EmitCopyToReg, 3, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 6, 7, 8, 9, 10, 1,
- 0,
- 72|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 61,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 10,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 4, 10,
- 61,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 10,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 4, 10,
- 63,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 10,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 4, 10,
- 0,
- 46|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 48,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 7,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_EmitConvertToTarget, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 1, 10,
- 48,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 7,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_EmitConvertToTarget, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 1, 10,
- 50,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 7,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_EmitConvertToTarget, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 1, 10,
- 0,
- 0,
- 53|128,3, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 75|128,1,
- OPC_CheckPredicate, 8,
- OPC_Scope, 113,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 32, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 32, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 32, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 0,
- 84,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 37,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mi32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 35,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 0,
- 0,
- 43,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 6|128,1,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 30, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 30, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 30, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 30, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 0,
- 0,
- 0,
- 16|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_SwitchType , 30, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 30, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 30, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 30, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 0,
- 0,
- 61|128,5, ISD::SHL,
+ OPC_SwitchOpcode , 82|128,5, TARGET_OPCODE(ISD::SHL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 46,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckAndImm, 31,
@@ -2252,15 +145,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 44,
- OPC_CheckPredicate, 6,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 45,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckAndImm, 31,
@@ -2272,15 +165,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 44,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 45,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckAndImm, 31,
@@ -2292,20 +185,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 83,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 85,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckType, MVT::i8,
- OPC_Scope, 37,
+ OPC_Scope, 38,
OPC_CheckAndImm, 63,
OPC_RecordChild0,
OPC_MoveParent,
@@ -2314,14 +207,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 33,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 34,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_CheckType, MVT::i8,
@@ -2329,15 +222,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
0,
- 40,
- OPC_CheckPredicate, 6,
+ 41,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -2348,14 +241,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 40,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 41,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -2366,17 +259,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 87,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 90,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_Scope, 37,
+ OPC_Scope, 38,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
OPC_CheckType, MVT::i8,
@@ -2386,16 +279,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 41,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 43,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i8,
@@ -2403,20 +296,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 44,
- OPC_CheckPredicate, 6,
+ 46,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
@@ -2424,19 +317,19 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 44,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 46,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
@@ -2444,21 +337,21 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 85,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 88,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 40,
+ OPC_Scope, 42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
@@ -2466,30 +359,30 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 35,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 36,
OPC_CheckChild1Type, MVT::i8,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 39,
- OPC_CheckPredicate, 6,
+ 40,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -2498,15 +391,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 39,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 40,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -2515,16 +408,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 41,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 42,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -2533,25 +426,25 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 61|128,5, ISD::SRL,
+ 82|128,5, TARGET_OPCODE(ISD::SRL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 46,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckAndImm, 31,
@@ -2563,15 +456,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 44,
- OPC_CheckPredicate, 6,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 45,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckAndImm, 31,
@@ -2583,15 +476,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 44,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 45,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckAndImm, 31,
@@ -2603,20 +496,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 83,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 85,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckType, MVT::i8,
- OPC_Scope, 37,
+ OPC_Scope, 38,
OPC_CheckAndImm, 63,
OPC_RecordChild0,
OPC_MoveParent,
@@ -2625,14 +518,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 33,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 34,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_CheckType, MVT::i8,
@@ -2640,15 +533,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
0,
- 40,
- OPC_CheckPredicate, 6,
+ 41,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -2659,14 +552,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 40,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 41,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -2677,17 +570,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 87,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 90,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_Scope, 37,
+ OPC_Scope, 38,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
OPC_CheckType, MVT::i8,
@@ -2697,16 +590,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 41,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 43,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i8,
@@ -2714,20 +607,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 44,
- OPC_CheckPredicate, 6,
+ 46,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
@@ -2735,19 +628,19 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 44,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 46,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
@@ -2755,21 +648,21 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 85,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 88,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 40,
+ OPC_Scope, 42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
@@ -2777,30 +670,30 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 35,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 36,
OPC_CheckChild1Type, MVT::i8,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 39,
- OPC_CheckPredicate, 6,
+ 40,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -2809,15 +702,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 39,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 40,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -2826,16 +719,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 41,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 42,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -2844,25 +737,25 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 61|128,5, ISD::SRA,
+ 82|128,5, TARGET_OPCODE(ISD::SRA),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 46,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckAndImm, 31,
@@ -2874,15 +767,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 44,
- OPC_CheckPredicate, 6,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 45,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckAndImm, 31,
@@ -2894,15 +787,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 44,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 45,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckAndImm, 31,
@@ -2914,20 +807,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 83,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 85,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckType, MVT::i8,
- OPC_Scope, 37,
+ OPC_Scope, 38,
OPC_CheckAndImm, 63,
OPC_RecordChild0,
OPC_MoveParent,
@@ -2936,14 +829,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 33,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 34,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_CheckType, MVT::i8,
@@ -2951,15 +844,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
0,
- 40,
- OPC_CheckPredicate, 6,
+ 41,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -2970,14 +863,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 40,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 41,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -2988,17 +881,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 87,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 90,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_Scope, 37,
+ OPC_Scope, 38,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
OPC_CheckType, MVT::i8,
@@ -3008,16 +901,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 41,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 43,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i8,
@@ -3025,20 +918,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 44,
- OPC_CheckPredicate, 6,
+ 46,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
@@ -3046,19 +939,19 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 44,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 46,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
@@ -3066,21 +959,21 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 85,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 88,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 40,
+ OPC_Scope, 42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
@@ -3088,30 +981,30 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 35,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 36,
OPC_CheckChild1Type, MVT::i8,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 39,
- OPC_CheckPredicate, 6,
+ 40,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -3120,15 +1013,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 39,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 40,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -3137,16 +1030,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 41,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 42,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -3155,109 +1048,109 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 95|128,4, ISD::SUB,
+ 101|128,4, TARGET_OPCODE(ISD::SUB),
OPC_MoveChild, 0,
- OPC_Scope, 31|128,1,
+ OPC_Scope, 32|128,1,
OPC_CheckInteger, 0,
OPC_MoveParent,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
OPC_Scope, 36,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::NEG8m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
34,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::NEG16m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
34,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::NEG32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
36,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::NEG64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
0,
- 56|128,3,
- OPC_CheckOpcode, ISD::LOAD,
+ 61|128,3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 76|128,1,
- OPC_CheckPredicate, 8,
- OPC_Scope, 26|128,1,
+ OPC_Scope, 78|128,1,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 27|128,1,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 107,
- OPC_CheckPredicate, 11,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_SwitchType , 32, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -3268,8 +1161,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -3280,8 +1173,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -3289,81 +1182,81 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
37,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64mi32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 43,
- OPC_CheckPredicate, 9,
+ 44,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 43,
- OPC_CheckPredicate, 6,
+ 44,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
+ 44,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
6|128,1,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild1,
OPC_SwitchType , 30, MVT::i8,
@@ -3371,8 +1264,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3382,8 +1275,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3393,8 +1286,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3404,8 +1297,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3413,18 +1306,18 @@ SDNode *SelectCode(SDNode *N) {
0,
0,
0,
- 17|128,6, ISD::XOR,
- OPC_Scope, 123|128,4,
+ 23|128,6, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 0|128,5,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
OPC_Scope, 49,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3434,14 +1327,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT8m), 0|OPFL_Chain|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
47,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3451,14 +1344,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT16m), 0|OPFL_Chain|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
47,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3468,16 +1361,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT32m), 0|OPFL_Chain|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 123|128,1,
- OPC_CheckPredicate, 8,
+ 125|128,1,
+ OPC_CheckPredicate, 3,
OPC_Scope, 47,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3487,26 +1380,26 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT64m), 0|OPFL_Chain|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 113,
+ 114,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_SwitchType , 32, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -3517,8 +1410,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -3529,30 +1422,30 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64mi8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 84,
- OPC_CheckPredicate, 9,
+ 85,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 37,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -3565,8 +1458,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -3574,46 +1467,46 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
0,
- 43,
- OPC_CheckPredicate, 6,
+ 44,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
+ 44,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
6|128,1,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild1,
OPC_SwitchType , 30, MVT::i8,
@@ -3621,8 +1514,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3632,8 +1525,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3643,8 +1536,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3654,32 +1547,32 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64mr), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
0,
0,
- 16|128,1,
+ 17|128,1,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_SwitchType , 30, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3689,8 +1582,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3700,8 +1593,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3711,26 +1604,26 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64mr), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
0,
0,
- 80|128,10, ISD::ADD,
- OPC_Scope, 58|128,9,
+ 90|128,10, TARGET_OPCODE(ISD::ADD),
+ OPC_Scope, 67|128,9,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
OPC_Scope, 41,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -3740,14 +1633,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC8m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
41,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -3757,15 +1650,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 2,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 3,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC16m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
41,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -3775,16 +1668,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 2,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 3,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
50,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3794,14 +1687,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC8m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
50,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3811,15 +1704,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 2,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 3,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC16m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
50,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3829,16 +1722,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 2,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 3,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
88,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_Scope, 34,
@@ -3849,8 +1742,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64m), 0|OPFL_Chain|OPFL_MemRefs,
@@ -3863,15 +1756,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
0,
41,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -3881,15 +1774,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 4,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_16m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
41,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -3899,15 +1792,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 4,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
50,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3917,15 +1810,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 4,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_16m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
50,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
@@ -3935,15 +1828,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 4,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 52,
- OPC_CheckPredicate, 6,
+ 53,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0|128,1,
@@ -3953,15 +1846,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitInteger, MVT::i16, 0|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 8,
- 52,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 53,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0|128,1,
@@ -3971,20 +1864,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitInteger, MVT::i32, 0|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 8,
- 56|128,2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 107,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 60|128,2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 109,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
- OPC_Scope, 47,
+ OPC_Scope, 48,
OPC_CheckInteger, 0|128,1,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
@@ -3992,14 +1885,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitInteger, MVT::i64, 0|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 8,
- 51,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 52,
OPC_CheckInteger, 0|128,0|128,0|128,0|128,0|128,1,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
@@ -4007,29 +1900,29 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitInteger, MVT::i64, 0|128,0|128,0|128,0|128,120|128,127|128,127|128,127|128,127|128,1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64mi32), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
0,
- 26|128,1,
+ 27|128,1,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 107,
- OPC_CheckPredicate, 11,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_SwitchType , 32, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -4040,8 +1933,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -4052,8 +1945,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -4061,81 +1954,81 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
37,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64mi32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 43,
- OPC_CheckPredicate, 9,
+ 44,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 43,
- OPC_CheckPredicate, 6,
+ 44,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
+ 44,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
6|128,1,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild1,
OPC_SwitchType , 30, MVT::i8,
@@ -4143,8 +2036,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4154,8 +2047,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4165,8 +2058,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4176,32 +2069,32 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64mr), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
0,
0,
- 16|128,1,
+ 17|128,1,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_SwitchType , 30, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4211,8 +2104,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4222,8 +2115,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4233,25 +2126,25 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64mr), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
0,
0,
- 11|128,4, ISD::ROTL,
+ 28|128,4, TARGET_OPCODE(ISD::ROTL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 42,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 43,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -4262,14 +2155,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL8m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 40,
- OPC_CheckPredicate, 6,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 41,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -4280,14 +2173,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL16m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 40,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 41,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -4298,17 +2191,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL32m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 87,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 90,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_Scope, 37,
+ OPC_Scope, 38,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
OPC_CheckType, MVT::i8,
@@ -4318,16 +2211,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL64m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 41,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 43,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i8,
@@ -4335,20 +2228,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 44,
- OPC_CheckPredicate, 6,
+ 46,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
@@ -4356,19 +2249,19 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 44,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 46,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
@@ -4376,21 +2269,21 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 85,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 88,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 40,
+ OPC_Scope, 42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
@@ -4398,30 +2291,30 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL64mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 35,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 36,
OPC_CheckChild1Type, MVT::i8,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL8mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 39,
- OPC_CheckPredicate, 6,
+ 40,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -4430,15 +2323,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL16mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 39,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 40,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -4447,16 +2340,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL32mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 41,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 42,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -4465,25 +2358,25 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL64mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 11|128,4, ISD::ROTR,
+ 28|128,4, TARGET_OPCODE(ISD::ROTR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 42,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 43,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -4494,14 +2387,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR8m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 40,
- OPC_CheckPredicate, 6,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 41,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -4512,14 +2405,14 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR16m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 40,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 41,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
@@ -4530,17 +2423,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR32m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 87,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 90,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_Scope, 37,
+ OPC_Scope, 38,
OPC_MoveChild, 1,
OPC_CheckInteger, 1,
OPC_CheckType, MVT::i8,
@@ -4550,16 +2443,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR64m1), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 41,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 43,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i8,
@@ -4567,20 +2460,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 44,
- OPC_CheckPredicate, 6,
+ 46,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
@@ -4588,19 +2481,19 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 44,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 46,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
@@ -4608,21 +2501,21 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 85,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 88,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 40,
+ OPC_Scope, 42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
@@ -4630,30 +2523,30 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR64mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 4, 5, 6, 7, 8, 9,
- 35,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 36,
OPC_CheckChild1Type, MVT::i8,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR8mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 39,
- OPC_CheckPredicate, 6,
+ 40,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -4662,15 +2555,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR16mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 39,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 40,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -4679,16 +2572,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR32mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
- 41,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
+ 42,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckChild1Type, MVT::i8,
@@ -4697,39 +2590,39 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 3, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR64mCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 5, 4, 5, 6, 7, 8,
+ 1, MVT::i32, 5, 4, 5, 6, 7, 8,
0,
- 79|128,4, ISD::AND,
- OPC_Scope, 57|128,3,
+ 85|128,4, TARGET_OPCODE(ISD::AND),
+ OPC_Scope, 62|128,3,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 75|128,1,
- OPC_CheckPredicate, 8,
- OPC_Scope, 113,
+ OPC_Scope, 77|128,1,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 114,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_SwitchType , 32, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -4740,8 +2633,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -4752,30 +2645,30 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64mi8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
- 84,
- OPC_CheckPredicate, 9,
+ 85,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 37,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -4788,8 +2681,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
@@ -4797,46 +2690,46 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
0,
0,
- 43,
- OPC_CheckPredicate, 6,
+ 44,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
+ 44,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32mi), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
6|128,1,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild1,
OPC_SwitchType , 30, MVT::i8,
@@ -4844,8 +2737,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4855,8 +2748,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4866,8 +2759,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4877,32 +2770,32 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64mr), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
0,
0,
- 16|128,1,
+ 17|128,1,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_SwitchType , 30, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4912,8 +2805,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4923,8 +2816,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32mr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -4934,1789 +2827,741 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64mr), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
0,
0,
- 112|128,4, ISD::ADDE,
+ 85|128,4, TARGET_OPCODE(ISD::OR),
+ OPC_Scope, 62|128,3,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 77|128,1,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 114,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 32, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mi8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 32, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mi8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 32, MVT::i64,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mi8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 0,
+ 85,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 37,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mi32), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 35,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 0,
+ 0,
+ 44,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 44,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 6|128,1,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 30, MVT::i8,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
+ 30, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
+ 30, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
+ 30, MVT::i64,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 2,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
+ 0,
+ 0,
+ 17|128,1,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 30, MVT::i8,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 3,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 2, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
+ 30, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 3,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 2, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
+ 30, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 3,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 2, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
+ 30, MVT::i64,
+ OPC_MoveParent,
+ OPC_MoveChild, 2,
+ OPC_CheckSame, 3,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 2, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
+ 0,
+ 0,
+ 5|128,5, TARGET_OPCODE(ISD::ADDE),
OPC_RecordNode,
OPC_CaptureFlagInput,
- OPC_Scope, 80|128,3,
+ OPC_Scope, 96|128,3,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 86|128,1,
- OPC_CheckPredicate, 8,
- OPC_Scope, 34|128,1,
+ OPC_Scope, 93|128,1,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 39|128,1,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 113,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 116,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
- OPC_SwitchType , 34, MVT::i16,
+ OPC_SwitchType , 35, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16mi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
- 34, MVT::i32,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
+ 35, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32mi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
- 34, MVT::i64,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
+ 35, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64mi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
0,
- 39,
- OPC_CheckPredicate, 12,
+ 40,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64mi32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
0,
- 45,
- OPC_CheckPredicate, 9,
+ 47,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8mi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
0,
- 45,
- OPC_CheckPredicate, 6,
+ 47,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16mi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
- 45,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
+ 47,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32mi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
- 14|128,1,
- OPC_CheckPredicate, 8,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
+ 18|128,1,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 32, MVT::i8,
+ OPC_SwitchType , 33, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 4,
- 32, MVT::i16,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 4,
+ 33, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 4,
- 32, MVT::i32,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 4,
+ 33, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 4,
- 32, MVT::i64,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 4,
+ 33, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 4,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 4,
0,
0,
- 24|128,1,
+ 29|128,1,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_SwitchType , 32, MVT::i8,
+ OPC_SwitchType , 33, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 4,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/4,
OPC_EmitMergeInputChains, 2, 0, 3,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 2,
- 32, MVT::i16,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 2,
+ 33, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 4,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/4,
OPC_EmitMergeInputChains, 2, 0, 3,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 2,
- 32, MVT::i32,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 2,
+ 33, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 4,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/4,
OPC_EmitMergeInputChains, 2, 0, 3,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 2,
- 32, MVT::i64,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 2,
+ 33, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 4,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/4,
OPC_EmitMergeInputChains, 2, 0, 3,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 2,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 2,
0,
0,
- 82|128,3, ISD::SUBE,
+ 98|128,3, TARGET_OPCODE(ISD::SUBE),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 86|128,1,
- OPC_CheckPredicate, 8,
- OPC_Scope, 34|128,1,
+ OPC_Scope, 93|128,1,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 39|128,1,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 113,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 116,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
- OPC_SwitchType , 34, MVT::i16,
+ OPC_SwitchType , 35, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16mi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
- 34, MVT::i32,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
+ 35, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32mi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
- 34, MVT::i64,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
+ 35, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64mi8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
0,
- 39,
- OPC_CheckPredicate, 12,
+ 40,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64mi32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
0,
- 45,
- OPC_CheckPredicate, 9,
+ 47,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB8mi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
0,
- 45,
- OPC_CheckPredicate, 6,
+ 47,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16mi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
- 45,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
+ 47,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_EmitConvertToTarget, 4,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32mi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 10,
- 14|128,1,
- OPC_CheckPredicate, 8,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 10,
+ 18|128,1,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 32, MVT::i8,
+ OPC_SwitchType , 33, MVT::i8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB8mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 4,
- 32, MVT::i16,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 4,
+ 33, MVT::i16,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 4,
- 32, MVT::i32,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 4,
+ 33, MVT::i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 4,
- 32, MVT::i64,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 4,
+ 33, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
OPC_EmitMergeInputChains, 2, 0, 2,
OPC_MarkFlagResults, 1, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64mr), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 4,
- 0,
- 0,
- 124|128,4, X86ISD::ADD,
- OPC_Scope, 85|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 45,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 45,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 124,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 37,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 37,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64mi32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 35,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 0,
- 43,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 0,
- 33|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 37,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 35,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 35,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 37,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 4,
0,
0,
- 85|128,3, X86ISD::SUB,
+ 40|128,1, TARGET_OPCODE(ISD::VECTOR_SHUFFLE),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 45,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 45,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 124,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 37,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 37,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64mi32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 35,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 0,
- 43,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 0,
- 124|128,4, X86ISD::OR,
- OPC_Scope, 85|128,3,
+ OPC_SwitchOpcode , 51, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 45,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 45,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 124,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 37,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 37,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mi32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 35,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 0,
- 43,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 0,
- 33|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 37,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 35,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 35,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 37,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 0,
- 0,
- 124|128,4, X86ISD::XOR,
- OPC_Scope, 85|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 45,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 45,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 124,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 37,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 37,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64mi32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 35,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 0,
- 43,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 0,
- 33|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 37,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 35,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 35,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 37,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 0,
- 0,
- 124|128,4, X86ISD::AND,
- OPC_Scope, 85|128,3,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 45,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 45,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 124,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 37,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 37,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64mi32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 35,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 0,
- 43,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 43,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 36,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 38,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 3,
- 0,
- 33|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 37,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 35,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 35,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 37,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 3,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 4, 5, 6, 7, 8, 1,
- 0,
- 0,
- 37|128,1, ISD::VECTOR_SHUFFLE,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 50, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPredicate, 13,
+ OPC_CheckPredicate, 11,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 4, 5, 6, 7, 8, 3,
- 107, ISD::LOAD,
+ 107, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPredicate, 13,
+ OPC_CheckPredicate, 11,
OPC_SwitchType , 29, MVT::v4f32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSmr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -6726,8 +3571,8 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDmr), 0|OPFL_Chain|OPFL_MemRefs,
@@ -6737,29 +3582,29 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 4, 5, 6, 7, 8, 3,
0,
0,
- 27|128,2, X86ISD::SHLD,
+ 37|128,2, TARGET_OPCODE(X86ISD::SHLD),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 46,
- OPC_CheckPredicate, 3,
+ OPC_Scope, 48,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
@@ -6767,20 +3612,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 3, 10,
- 46,
- OPC_CheckPredicate, 6,
+ 1, MVT::i32, 7, 5, 6, 7, 8, 9, 3, 10,
+ 48,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
@@ -6788,21 +3633,21 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 3, 10,
- 48,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 7, 5, 6, 7, 8, 9, 3, 10,
+ 50,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
@@ -6810,15 +3655,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 3, 10,
- 41,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 7, 5, 6, 7, 8, 9, 3, 10,
+ 42,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
@@ -6828,15 +3673,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 4, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 3,
- 41,
- OPC_CheckPredicate, 6,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 3,
+ 42,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
@@ -6846,16 +3691,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 4, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 3,
- 43,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 3,
+ 44,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
@@ -6865,29 +3710,29 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 4, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 3,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 3,
0,
- 27|128,2, X86ISD::SHRD,
+ 37|128,2, TARGET_OPCODE(X86ISD::SHRD),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 46,
- OPC_CheckPredicate, 3,
+ OPC_Scope, 48,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
@@ -6895,20 +3740,20 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 3, 10,
- 46,
- OPC_CheckPredicate, 6,
+ 1, MVT::i32, 7, 5, 6, 7, 8, 9, 3, 10,
+ 48,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
@@ -6916,21 +3761,21 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 3, 10,
- 48,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 7, 5, 6, 7, 8, 9, 3, 10,
+ 50,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
@@ -6938,15 +3783,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitConvertToTarget, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64mri8), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 5, 6, 7, 8, 9, 3, 10,
- 41,
- OPC_CheckPredicate, 3,
+ 1, MVT::i32, 7, 5, 6, 7, 8, 9, 3, 10,
+ 42,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
@@ -6956,15 +3801,15 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 4, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 3,
- 41,
- OPC_CheckPredicate, 6,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 3,
+ 42,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
@@ -6974,16 +3819,16 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 4, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 3,
- 43,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 3,
+ 44,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
@@ -6993,234 +3838,109 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
OPC_EmitCopyToReg, 4, X86::CL,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64mrCL), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 0, 6, 5, 6, 7, 8, 9, 3,
+ 1, MVT::i32, 6, 5, 6, 7, 8, 9, 3,
0,
- 116|128,1, X86ISD::INC,
+ 100, TARGET_OPCODE(X86ISD::MOVLPS),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_SwitchType , 36, MVT::i8,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_SwitchOpcode , 49, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
- OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC8m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 76, MVT::i16,
- OPC_Scope, 36,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC16m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 36,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_16m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 0,
- 76, MVT::i32,
- OPC_Scope, 36,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC32m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 36,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_32m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 0,
- 36, MVT::i64,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 4, 5, 6, 7, 8, 3,
+ 41, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::i64,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4f32,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 4, 5, 6, 7, 8, 3,
0,
- 116|128,1, X86ISD::DEC,
+ 79, TARGET_OPCODE(X86ISD::MOVLPD),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_SwitchType , 36, MVT::i8,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 29, MVT::v2f64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC8m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 76, MVT::i16,
- OPC_Scope, 36,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC16m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 36,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_16m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 0,
- 76, MVT::i32,
- OPC_Scope, 36,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC32m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 36,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckSame, 2,
- OPC_MoveParent,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_32m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
- 0,
- 36, MVT::i64,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 4, 5, 6, 7, 8, 3,
+ 29, MVT::v2i64,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckSame, 2,
OPC_MoveParent,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 4, 5, 6, 7, 8, 3,
0,
- 83|128,3, ISD::EXTRACT_VECTOR_ELT,
- OPC_Scope, 60|128,1,
+ 102|128,5, TARGET_OPCODE(ISD::EXTRACT_VECTOR_ELT),
+ OPC_Scope, 106|128,2,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 99, ISD::VECTOR_SHUFFLE,
- OPC_Scope, 51,
+ OPC_SwitchOpcode , 14|128,1, TARGET_OPCODE(ISD::VECTOR_SHUFFLE),
+ OPC_Scope, 73,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_RecordChild0,
OPC_CheckChild0Type, MVT::v4f32,
OPC_MoveParent,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
- OPC_CheckPredicate, 14,
+ OPC_CheckPredicate, 12,
OPC_CheckType, MVT::v2f64,
OPC_MoveParent,
OPC_MoveChild, 1,
@@ -7229,19 +3949,27 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::f64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPSmr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 1,
- 44,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVHPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 0,
+ 65,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
- OPC_CheckPredicate, 14,
+ OPC_CheckPredicate, 12,
OPC_CheckType, MVT::v2f64,
OPC_MoveParent,
OPC_MoveChild, 1,
@@ -7250,18 +3978,64 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::f64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPDmr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 1,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVHPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 0,
0,
- 81, ISD::BIT_CONVERT,
+ 39, TARGET_OPCODE(X86ISD::UNPCKHPS),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f64,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 39, TARGET_OPCODE(X86ISD::UNPCKHPD),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f64,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 125, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_RecordChild0,
OPC_CheckChild0Type, MVT::v4f32,
- OPC_SwitchType , 34, MVT::v2f64,
+ OPC_SwitchType , 54, MVT::v2f64,
OPC_MoveParent,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -7269,35 +4043,52 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::f64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSmr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 1,
- 38, MVT::v4i32,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVLPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 0,
+ 62, MVT::v4i32,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::EXTRACTPSmr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 4, 5, 6, 7, 8, 1, 9,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VEXTRACTPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 7, 4, 5, 6, 7, 8, 1, 9,
+ 21,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::EXTRACTPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 7, 4, 5, 6, 7, 8, 1, 9,
+ 0,
0,
0,
- 17|128,2,
+ 118|128,2,
OPC_RecordChild0,
- OPC_Scope, 69,
+ OPC_Scope, 84,
OPC_CheckChild0Type, MVT::v2f64,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -7305,90 +4096,130 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::f64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVLPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 30,
+ 28,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
1, MVT::f64, 2, 1, 8,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 9,
0,
- 76,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_Scope, 33,
+ 120,
+ OPC_CheckChild0Type, MVT::v4i32,
+ OPC_Scope, 53,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
OPC_MoveParent,
- OPC_CheckType, MVT::i64,
+ OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVPQI2QImr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 1,
- 37,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVPDI2DImr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVPDI2DImr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 0,
+ 61,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::i64,
+ OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRQmr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 4, 5, 6, 7, 8, 1, 9,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPEXTRDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 7, 4, 5, 6, 7, 8, 1, 9,
+ 21,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 7, 4, 5, 6, 7, 8, 1, 9,
+ 0,
0,
- 76,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_Scope, 33,
+ 120,
+ OPC_CheckChild0Type, MVT::v2i64,
+ OPC_Scope, 53,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
OPC_MoveParent,
- OPC_CheckType, MVT::i32,
+ OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVPDI2DImr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 1,
- 37,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVPQI2QImr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVPQI2QImr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 0,
+ 61,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::i32,
+ OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRDmr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 7, 4, 5, 6, 7, 8, 1, 9,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPEXTRQmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 7, 4, 5, 6, 7, 8, 1, 9,
+ 21,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRQmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 7, 4, 5, 6, 7, 8, 1, 9,
+ 0,
0,
- 45,
+ 43,
OPC_CheckChild0Type, MVT::v4f32,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
@@ -7396,6395 +4227,6380 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::f32,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
1, MVT::f32, 2, 1, 8,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 9,
0,
0,
- 57|128,1, ISD::TRUNCATE,
+ 52|128,1, TARGET_OPCODE(ISD::TRUNCATE),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SRL),
OPC_RecordChild0,
OPC_MoveChild, 1,
OPC_CheckInteger, 8,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
- OPC_CheckPredicate, 15,
- OPC_SwitchType , 53, MVT::i64,
+ OPC_CheckPredicate, 13,
+ OPC_SwitchType , 51, MVT::i64,
OPC_MoveParent,
- OPC_CheckPredicate, 16,
+ OPC_CheckPredicate, 14,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, X86::GR64_ABCDRegClassID,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
1, MVT::i64, 2, 1, 8,
- OPC_EmitInteger, MVT::i32, 2,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
1, MVT::i8, 2, 9, 10,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8mr_NOREX), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 11,
- 55, MVT::i32,
+ 53, MVT::i32,
OPC_MoveParent,
- OPC_CheckPredicate, 16,
+ OPC_CheckPredicate, 14,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 4,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
1, MVT::i32, 2, 1, 8,
- OPC_EmitInteger, MVT::i32, 2,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
1, MVT::i8, 2, 9, 10,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8mr_NOREX), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 11,
- 55, MVT::i16,
+ 53, MVT::i16,
OPC_MoveParent,
- OPC_CheckPredicate, 16,
+ OPC_CheckPredicate, 14,
OPC_CheckType, MVT::i8,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 4,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
1, MVT::i16, 2, 1, 8,
- OPC_EmitInteger, MVT::i32, 2,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
1, MVT::i8, 2, 9, 10,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8mr_NOREX), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 11,
0,
- 54, ISD::BIT_CONVERT,
+ 55, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::EXTRACT_VECTOR_ELT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::EXTRACT_VECTOR_ELT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_RecordChild0,
OPC_CheckChild0Type, MVT::v4f32,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_CheckType, MVT::f32,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::EXTRACTPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 7, 4, 5, 6, 7, 8, 1, 9,
- 116|128,3, X86ISD::SETCC,
+ 84|128,3, TARGET_OPCODE(X86ISD::SETCC),
OPC_MoveChild, 0,
- OPC_Scope, 30,
+ OPC_Scope, 28,
OPC_CheckInteger, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETEm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 9,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNEm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETLm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETGEm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETLEm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETGm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 2,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETBm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETAEm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 3,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETBEm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 0,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETAm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 15,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETSm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 12,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNSm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 14,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETPm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 11,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNPm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 13,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETOm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
- 30,
+ 28,
OPC_CheckInteger, 10,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 1, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNOm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
0, 5, 3, 4, 5, 6, 7,
0,
- 116|128,1, X86ISD::Wrapper,
+ 105|128,1, TARGET_OPCODE(X86ISD::Wrapper),
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 57, ISD::TargetGlobalAddress,
+ OPC_SwitchOpcode , 53, TARGET_OPCODE(ISD::TargetGlobalAddress),
OPC_MoveParent,
- OPC_SwitchType , 24, MVT::i32,
+ OPC_SwitchType , 22, MVT::i32,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32mi), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 26, MVT::i64,
+ 24, MVT::i64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64mi32), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 57, ISD::TargetExternalSymbol,
+ 53, TARGET_OPCODE(ISD::TargetExternalSymbol),
OPC_MoveParent,
- OPC_SwitchType , 24, MVT::i32,
+ OPC_SwitchType , 22, MVT::i32,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32mi), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 26, MVT::i64,
+ 24, MVT::i64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64mi32), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 57, ISD::TargetBlockAddress,
+ 53, TARGET_OPCODE(ISD::TargetBlockAddress),
OPC_MoveParent,
- OPC_SwitchType , 24, MVT::i32,
+ OPC_SwitchType , 22, MVT::i32,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32mi), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 26, MVT::i64,
+ 24, MVT::i64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64mi32), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 29, ISD::TargetConstantPool,
+ 27, TARGET_OPCODE(ISD::TargetConstantPool),
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64mi32), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 29, ISD::TargetJumpTable,
+ 27, TARGET_OPCODE(ISD::TargetJumpTable),
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64mi32), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
0,
- 21|128,1,
+ 114,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_SwitchType , 28, MVT::i64,
- OPC_CheckPredicate, 12,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_SwitchType , 26, MVT::i64,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64mi32), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 8,
- 26, MVT::i8,
+ 24, MVT::i8,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8mi), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 8,
- 26, MVT::i16,
+ 24, MVT::i16,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16mi), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 8,
- 54, MVT::i32,
+ 24, MVT::i32,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
- OPC_Scope, 22,
- OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 86,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_SwitchType , 26, MVT::i64,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDto64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 48, MVT::i32,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 8,
- 24,
- OPC_CheckPredicate, 17,
- OPC_CheckPredicate, 18,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSS2DImr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 8,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSS2DImr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
0,
0,
- 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDto64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 1,
- 35,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 17,
- OPC_CheckPredicate, 18,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 8,
- 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::f32,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSS2DImr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 1,
- 85|128,6,
+ 63|128,8,
OPC_RecordChild1,
- OPC_Scope, 51,
+ OPC_Scope, 66,
OPC_CheckChild1Type, MVT::f32,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 6,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 20,
- OPC_CheckPatternPredicate, 0,
+ 18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
0,
- 79,
+ 92,
OPC_CheckChild1Type, MVT::f64,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
- OPC_Scope, 24,
- OPC_CheckPredicate, 17,
- OPC_CheckPredicate, 19,
- OPC_CheckPatternPredicate, 7,
+ OPC_CheckPredicate, 5,
+ OPC_Scope, 22,
+ OPC_CheckPredicate, 15,
+ OPC_CheckPredicate, 16,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp64m32), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 46,
- OPC_CheckPredicate, 5,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 7,
+ 61,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 20,
- OPC_CheckPatternPredicate, 1,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
0,
0,
- 75,
+ 69,
OPC_CheckChild1Type, MVT::f80,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
- OPC_Scope, 46,
- OPC_CheckPredicate, 17,
- OPC_Scope, 20,
- OPC_CheckPredicate, 19,
+ OPC_CheckPredicate, 5,
+ OPC_Scope, 42,
+ OPC_CheckPredicate, 15,
+ OPC_Scope, 18,
+ OPC_CheckPredicate, 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp80m32), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 20,
- OPC_CheckPredicate, 20,
+ 18,
+ OPC_CheckPredicate, 17,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp80m64), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 20,
- OPC_CheckPredicate, 5,
+ 18,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_FpP80m), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 25,
+ 23,
OPC_CheckChild1Type, MVT::i8,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8mr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 25,
+ 23,
OPC_CheckChild1Type, MVT::i16,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16mr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 63,
+ 23,
OPC_CheckChild1Type, MVT::i32,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 23,
+ OPC_CheckChild1Type, MVT::i64,
+ OPC_RecordChild2,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 89,
+ OPC_CheckChild1Type, MVT::v4f32,
+ OPC_RecordChild2,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_Scope, 20,
- OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32mr), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 34,
- OPC_CheckPredicate, 17,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 20,
OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 1, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 9,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
0,
- 63,
- OPC_CheckChild1Type, MVT::i64,
+ 89,
+ OPC_CheckChild1Type, MVT::v2f64,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
+ OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_Scope, 20,
- OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPDmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 34,
- OPC_CheckPredicate, 17,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 20,
OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 1, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 9,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
0,
- 53,
- OPC_CheckChild1Type, MVT::v4f32,
+ 49,
+ OPC_CheckChild1Type, MVT::v8f32,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_Scope, 22,
- OPC_CheckPredicate, 21,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 18,
OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSYmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 20,
+ 18,
OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSYmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 53,
- OPC_CheckChild1Type, MVT::v2f64,
+ 49,
+ OPC_CheckChild1Type, MVT::v4f64,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_Scope, 22,
- OPC_CheckPredicate, 21,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPDYmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 20,
- OPC_CheckPatternPredicate, 1,
+ 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPDmr), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPDYmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 49,
+ 89,
OPC_CheckChild1Type, MVT::v2i64,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_Scope, 20,
- OPC_CheckPredicate, 21,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
+ 20,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
0,
- 49,
+ 89,
OPC_CheckChild1Type, MVT::v4i32,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_Scope, 20,
- OPC_CheckPredicate, 21,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
+ 20,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
0,
- 49,
+ 89,
OPC_CheckChild1Type, MVT::v8i16,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_Scope, 20,
- OPC_CheckPredicate, 21,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
+ 20,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
0,
- 49,
+ 89,
OPC_CheckChild1Type, MVT::v16i8,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_Scope, 20,
- OPC_CheckPredicate, 21,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
+ 20,
+ OPC_CheckPredicate, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSmr), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 6, 3, 4, 5, 6, 7, 1,
0,
- 49,
+ 45,
OPC_CheckChild1Type, MVT::v1i64,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPredicate, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ64mr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ64mr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 25,
+ 23,
OPC_CheckChild1Type, MVT::v8i8,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ64mr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 25,
+ 23,
OPC_CheckChild1Type, MVT::v4i16,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ64mr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
- 25,
+ 23,
OPC_CheckChild1Type, MVT::v2i32,
OPC_RecordChild2,
- OPC_CheckPredicate, 4,
- OPC_CheckPredicate, 5,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 6, 3, 4, 5, 6, 7, 1,
- 25,
- OPC_CheckChild1Type, MVT::v2f32,
- OPC_RecordChild2,
- OPC_CheckPredicate, 4,
OPC_CheckPredicate, 5,
+ OPC_CheckPredicate, 6,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ64mr), 0|OPFL_Chain|OPFL_MemRefs,
0, 6, 3, 4, 5, 6, 7, 1,
0,
0,
- 7|128,24, ISD::VECTOR_SHUFFLE,
- OPC_Scope, 82,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ 82|128,12, TARGET_OPCODE(X86ISD::CMP),
+ OPC_Scope, 121|128,5,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_Scope, 23,
- OPC_CheckPredicate, 13,
- OPC_CheckPatternPredicate, 0,
+ OPC_SwitchOpcode , 58|128,3, TARGET_OPCODE(ISD::AND),
+ OPC_Scope, 35|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 73,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 30,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64mi32), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 28,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 37,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 37,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 6|128,1,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 30,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 28,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 28,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 30,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 9|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 31,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 29,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 29,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 31,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 51|128,2, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 33,
+ OPC_CheckPredicate, 7,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16mi8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 33,
+ OPC_CheckPredicate, 8,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32mi8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 92,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_SwitchType , 55, MVT::i64,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 22,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64mi8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 22,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64mi32), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 27, MVT::i8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 31,
+ OPC_CheckPredicate, 7,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 31,
+ OPC_CheckPredicate, 8,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32mi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
+ 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 23,
+ OPC_CheckPredicate, 7,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 23,
+ OPC_CheckPredicate, 8,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 0,
+ 107,
+ OPC_RecordChild0,
+ OPC_Scope, 35,
+ OPC_CheckChild0Type, MVT::i8,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 23,
- OPC_CheckPredicate, 22,
- OPC_CheckPatternPredicate, 0,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 33,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 33,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
0,
- 77,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ 36,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::i64,
OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_Scope, 22,
- OPC_CheckPredicate, 24,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPredicate, 25,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 0,
- 48|128,1,
+ OPC_RecordChild1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64mr), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 33|128,1,
OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 70, ISD::SCALAR_TO_VECTOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 35,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::f64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 60,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_Scope, 23,
- OPC_CheckPredicate, 13,
- OPC_CheckPatternPredicate, 1,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
- 23,
- OPC_CheckPredicate, 22,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUCOMISSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOMISSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
0,
- 97, ISD::LOAD,
+ 60,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_CheckPredicate, 13,
- OPC_SwitchType , 19, MVT::v4f32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::v2f64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::v4i32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::v2i64,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUCOMISDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOMISDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 56,
- OPC_RecordNode,
+ 49|128,1,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
- OPC_MoveParent,
- OPC_CheckPredicate, 26,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 3, 4, 5, 6, 7, 8,
- 86,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::AND),
+ OPC_RecordChild0,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_SwitchType , 31, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveParent,
+ OPC_Scope, 93,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
- OPC_MoveParent,
- OPC_CheckPredicate, 27,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 31, MVT::v2i64,
- OPC_MoveParent,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 62,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 19,
+ OPC_SwitchType , 17, MVT::i8,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8ri), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 17, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16ri), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 17, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32ri), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 22,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64ri32), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 55,
+ OPC_CheckPredicate, 19,
+ OPC_SwitchType , 15, MVT::i8,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8rr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 15, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16rr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 15, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32rr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 17,
+ OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_CheckInteger, 0,
OPC_MoveParent,
- OPC_CheckPredicate, 27,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64rr), 0,
+ 1, MVT::i32, 2, 0, 1,
0,
- 122,
+ 108|128,2,
OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_SwitchType , 50, MVT::v4f32,
- OPC_Scope, 23,
- OPC_CheckPredicate, 14,
+ OPC_Scope, 51,
+ OPC_CheckChild0Type, MVT::i8,
+ OPC_Scope, 14,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8rr), 0,
+ 1, MVT::i32, 2, 0, 0,
+ 31,
+ OPC_RecordChild1,
+ OPC_Scope, 17,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8ri), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8rr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 69,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_Scope, 14,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16rr), 0,
+ 1, MVT::i32, 2, 0, 0,
+ 49,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 14,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16ri8), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 12,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16ri), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16rr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 69,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_Scope, 14,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32rr), 0,
+ 1, MVT::i32, 2, 0, 0,
+ 49,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 14,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32ri8), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 12,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32ri), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32rr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 71,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_Scope, 14,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64rr), 0,
+ 1, MVT::i32, 2, 0, 0,
+ 51,
+ OPC_RecordChild1,
+ OPC_Scope, 37,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 14,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64ri8), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 14,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64ri32), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64rr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 41,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_RecordChild1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOM_FpIr32), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 23,
- OPC_CheckPredicate, 28,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUCOMISSrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOMISSrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 41,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_RecordChild1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOM_FpIr64), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUCOMISDrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOMISDrr), 0,
+ 1, MVT::i32, 2, 0, 1,
0,
- 50, MVT::v2f64,
+ 12,
+ OPC_CheckChild0Type, MVT::f80,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOM_FpIr80), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 73|128,90|128,1, TARGET_OPCODE(ISD::INTRINSIC_WO_CHAIN),
+ OPC_MoveChild, 0,
+ OPC_Scope, 122,
+ OPC_CheckInteger, 12|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
OPC_Scope, 23,
- OPC_CheckPredicate, 14,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
23,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 10,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 42,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 15,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRIrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ 15,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
0,
0,
- 126,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 54, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ 122,
+ OPC_CheckInteger, 13|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_Scope, 23,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRIArm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 23,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIArm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 42,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPredicate, 27,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
- 64, ISD::LOAD,
+ OPC_Scope, 15,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRIArr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ 15,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIArr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ 0,
+ 0,
+ 122,
+ OPC_CheckInteger, 14|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPredicate, 27,
- OPC_SwitchType , 20, MVT::v2f64,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
- 20, MVT::v2i64,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ OPC_Scope, 23,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRICrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 23,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRICrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 42,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 15,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRICrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ 15,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRICrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
0,
0,
- 86,
- OPC_RecordNode,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_Scope, 26,
- OPC_CheckPredicate, 29,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitNodeXForm, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFHWmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 3, 4, 5, 6, 7, 8,
- 26,
- OPC_CheckPredicate, 30,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitNodeXForm, 2, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFLWmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 3, 4, 5, 6, 7, 8,
- 0,
- 51,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::f64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ 122,
+ OPC_CheckInteger, 15|128,6,
OPC_MoveParent,
- OPC_CheckPredicate, 27,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
- 95,
- OPC_RecordNode,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_SwitchType , 37, MVT::v4f32,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
- OPC_MoveParent,
- OPC_CheckPredicate, 26,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 3, 4, 5, 6, 7, 8,
- 35, MVT::v1i64,
- OPC_MoveParent,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPredicate, 31,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitNodeXForm, 3, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSHUFWmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 3, 4, 5, 6, 7, 8,
- 0,
- 45|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_Scope, 73,
- OPC_CheckPredicate, 28,
- OPC_SwitchType , 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLBWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
+ OPC_Scope, 23,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLWDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRIOrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 23,
+ OPC_CheckPatternPredicate, 10,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIOrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
0,
- 73,
- OPC_CheckPredicate, 14,
- OPC_SwitchType , 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHBWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHWDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 42,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 15,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRIOrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ 15,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIOrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
0,
0,
- 70,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ 122,
+ OPC_CheckInteger, 16|128,6,
OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_Scope, 22,
- OPC_CheckPredicate, 24,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPredicate, 25,
- OPC_CheckPatternPredicate, 9,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 0,
- 53,
- OPC_RecordNode,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 32,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 7, 1, 4, 5, 6, 7, 8, 9,
- 12|128,2,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 73|128,1, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 34|128,1, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_Scope, 73,
- OPC_CheckPredicate, 33,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHBWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHWDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 73,
- OPC_CheckPredicate, 34,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLBWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLWDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 0,
- 30, X86ISD::VZEXT_LOAD,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 22,
- OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 23,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPSrm), 0|OPFL_Chain,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRISrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 23,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRISrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
0,
- 57, ISD::UNDEF,
+ 42,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_Scope, 26,
- OPC_CheckPredicate, 27,
- OPC_SwitchType , 9, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
- 1, MVT::v4f32, 2, 0, 0,
- 9, MVT::v2i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
- 1, MVT::v2i64, 2, 0, 0,
- 0,
- 26,
- OPC_CheckPredicate, 35,
- OPC_SwitchType , 9, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
- 1, MVT::v4f32, 2, 0, 0,
- 9, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
- 1, MVT::v4i32, 2, 0, 0,
- 0,
+ OPC_Scope, 15,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRISrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ 15,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRISrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
0,
0,
- 75,
- OPC_RecordNode,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ 122,
+ OPC_CheckInteger, 17|128,6,
OPC_MoveParent,
- OPC_CheckPredicate, 32,
- OPC_SwitchType , 25, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 7, 1, 4, 5, 6, 7, 8, 9,
- 25, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 7, 1, 4, 5, 6, 7, 8, 9,
- 0,
- 7|128,1,
- OPC_RecordChild0,
- OPC_Scope, 67,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_RecordChild1,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
OPC_Scope, 23,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLQDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRIZrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
23,
- OPC_CheckPredicate, 14,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 10,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHQDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIZrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
0,
- 63,
- OPC_RecordChild1,
- OPC_SwitchType , 30, MVT::v4f32,
- OPC_Scope, 13,
- OPC_CheckPredicate, 22,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 13,
- OPC_CheckPredicate, 36,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 0,
- 26, MVT::v4i32,
- OPC_Scope, 11,
- OPC_CheckPredicate, 22,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 11,
- OPC_CheckPredicate, 36,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 0,
+ 42,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 15,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRIZrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ 15,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIZrr), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
0,
0,
- 22,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 37,
+ 25|128,1,
+ OPC_CheckInteger, 4|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPredicate, 38,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZPQILo2PQIrr), 0,
- 1, MVT::v2f64, 1, 0,
- 36,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_Scope, 12,
- OPC_CheckPredicate, 24,
- OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrr), 0,
- 1, MVT::v4i32, 1, 0,
- 12,
- OPC_CheckPredicate, 25,
- OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrr), 0,
- 1, MVT::v4i32, 1, 0,
- 0,
- 83,
- OPC_RecordNode,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
- OPC_MoveParent,
- OPC_Scope, 36,
- OPC_CheckPredicate, 39,
- OPC_SwitchType , 14, MVT::v4i32,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
- 1, MVT::v4i32, 2, 1, 2,
- 14, MVT::v4f32,
+ OPC_RecordChild2,
+ OPC_Scope, 88,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 31,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRIrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 31,
OPC_CheckPatternPredicate, 10,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
- 1, MVT::v4f32, 2, 1, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
0,
- 36,
- OPC_CheckPredicate, 40,
- OPC_SwitchType , 14, MVT::v4i32,
- OPC_CheckPatternPredicate, 10,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
- 1, MVT::v4i32, 2, 1, 2,
- 14, MVT::v4f32,
+ 55,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRIrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
+ 21,
OPC_CheckPatternPredicate, 10,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
- 1, MVT::v4f32, 2, 1, 2,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
0,
0,
- 91|128,3,
- OPC_RecordChild0,
- OPC_Scope, 27|128,1,
+ 25|128,1,
+ OPC_CheckInteger, 5|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 88,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_Scope, 96,
- OPC_CheckPredicate, 38,
- OPC_SwitchType , 21, MVT::v4f32,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
- 1, MVT::v4f32, 2, 0, 3,
- 21, MVT::v2f64,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f64, 2, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
- 1, MVT::v2f64, 2, 0, 3,
- 21, MVT::v4i32,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
- 1, MVT::v4i32, 2, 0, 3,
- 21, MVT::v2i64,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f64, 2, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
- 1, MVT::v2i64, 2, 0, 3,
- 0,
- 54,
- OPC_CheckPredicate, 13,
- OPC_SwitchType , 23, MVT::v4f32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f64, 2, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
- 1, MVT::v4f32, 2, 0, 3,
- 23, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f64, 2, 1, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
- 1, MVT::v4i32, 2, 0, 3,
- 0,
- 0,
- 117|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_Scope, 15,
- OPC_CheckPredicate, 41,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPDrr), 0,
- 1, MVT::v2f64, 2, 0, 0,
- 15,
- OPC_CheckPredicate, 14,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPDrr), 0,
- 1, MVT::v2f64, 2, 0, 0,
- 15,
- OPC_CheckPredicate, 41,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLQDQrr), 0,
- 1, MVT::v2i64, 2, 0, 0,
- 15,
- OPC_CheckPredicate, 14,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHQDQrr), 0,
- 1, MVT::v2i64, 2, 0, 0,
- 48,
- OPC_CheckPredicate, 39,
- OPC_SwitchType , 9, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrr), 0,
- 1, MVT::v4f32, 2, 0, 0,
- 9, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLBWrr), 0,
- 1, MVT::v16i8, 2, 0, 0,
- 9, MVT::v8i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLWDrr), 0,
- 1, MVT::v8i16, 2, 0, 0,
- 9, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLDQrr), 0,
- 1, MVT::v4i32, 2, 0, 0,
- 0,
- 48,
- OPC_CheckPredicate, 40,
- OPC_SwitchType , 9, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPSrr), 0,
- 1, MVT::v4f32, 2, 0, 0,
- 9, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHBWrr), 0,
- 1, MVT::v16i8, 2, 0, 0,
- 9, MVT::v8i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHWDrr), 0,
- 1, MVT::v8i16, 2, 0, 0,
- 9, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHDQrr), 0,
- 1, MVT::v4i32, 2, 0, 0,
- 0,
- 37,
- OPC_CheckPredicate, 42,
- OPC_SwitchType , 9, MVT::v8i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLBWrr), 0,
- 1, MVT::v8i8, 2, 0, 0,
- 9, MVT::v4i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLWDrr), 0,
- 1, MVT::v4i16, 2, 0, 0,
- 9, MVT::v2i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLDQrr), 0,
- 1, MVT::v2i32, 2, 0, 0,
- 0,
- 37,
- OPC_CheckPredicate, 43,
- OPC_SwitchType , 9, MVT::v8i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHBWrr), 0,
- 1, MVT::v8i8, 2, 0, 0,
- 9, MVT::v4i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHWDrr), 0,
- 1, MVT::v4i16, 2, 0, 0,
- 9, MVT::v2i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHDQrr), 0,
- 1, MVT::v2i32, 2, 0, 0,
- 0,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 31,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRIArm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 31,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIArm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
0,
- 67,
- OPC_RecordChild1,
- OPC_SwitchType , 30, MVT::v4f32,
- OPC_Scope, 13,
- OPC_CheckPredicate, 14,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 13,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 0,
- 30, MVT::v2f64,
- OPC_Scope, 13,
- OPC_CheckPredicate, 14,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
- 13,
- OPC_CheckPredicate, 28,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
- 0,
+ 55,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRIArr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
+ 21,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIArr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
0,
0,
- 36,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 44,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
+ 25|128,1,
+ OPC_CheckInteger, 6|128,6,
OPC_MoveParent,
- OPC_CheckPredicate, 38,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
- 1, MVT::v2i64, 1, 0,
- 120,
- OPC_RecordNode,
- OPC_RecordChild0,
- OPC_Scope, 41,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 88,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 26,
- OPC_SwitchType , 14, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
- 1, MVT::v4i32, 2, 1, 2,
- 14, MVT::v4f32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
- 1, MVT::v4f32, 2, 1, 2,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 31,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRICrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 31,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRICrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
0,
- 73,
- OPC_RecordChild1,
- OPC_CheckPredicate, 45,
- OPC_SwitchType , 15, MVT::v4i32,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNodeXForm, 4, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
- 1, MVT::v4i32, 3, 2, 1, 3,
- 15, MVT::v4f32,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNodeXForm, 4, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
- 1, MVT::v4f32, 3, 2, 1, 3,
- 15, MVT::v8i16,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNodeXForm, 4, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
- 1, MVT::v8i16, 3, 2, 1, 3,
- 15, MVT::v16i8,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitNodeXForm, 4, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
- 1, MVT::v16i8, 3, 2, 1, 3,
+ 55,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRICrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
+ 21,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRICrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
0,
0,
- 46,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
- OPC_RecordChild0,
- OPC_Scope, 18,
- OPC_CheckChild0Type, MVT::f32,
+ 25|128,1,
+ OPC_CheckInteger, 7|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 88,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckPredicate, 38,
- OPC_CheckType, MVT::v16i8,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 18,
- OPC_CheckChild0Type, MVT::f64,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPredicate, 38,
- OPC_CheckType, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 31,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRIOrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 31,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIOrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 0,
+ 55,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRIOrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
+ 21,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIOrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
+ 0,
0,
- 45,
- OPC_RecordNode,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ 25|128,1,
+ OPC_CheckInteger, 8|128,6,
OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_Scope, 16,
- OPC_CheckPredicate, 29,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitNodeXForm, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFHWri), 0,
- 1, MVT::v8i16, 2, 1, 2,
- 16,
- OPC_CheckPredicate, 30,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitNodeXForm, 2, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFLWri), 0,
- 1, MVT::v8i16, 2, 1, 2,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 88,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 31,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRISrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 31,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRISrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 0,
+ 55,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRISrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
+ 21,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRISrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
+ 0,
0,
- 52,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ 25|128,1,
+ OPC_CheckInteger, 9|128,6,
OPC_MoveParent,
- OPC_SwitchType , 28, MVT::v4f32,
- OPC_Scope, 12,
- OPC_CheckPredicate, 24,
- OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrr), 0,
- 1, MVT::v4f32, 1, 0,
- 12,
- OPC_CheckPredicate, 25,
- OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrr), 0,
- 1, MVT::v4f32, 1, 0,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 88,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 31,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRIZrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 31,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIZrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 0,
+ 55,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_RecordChild5,
+ OPC_MoveChild, 5,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRIZrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
+ 21,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIZrr), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
0,
- 12, MVT::v2f64,
- OPC_CheckPredicate, 27,
- OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrr), 0,
- 1, MVT::v2f64, 1, 0,
0,
- 125,
- OPC_RecordNode,
- OPC_RecordChild0,
- OPC_Scope, 80,
+ 87,
+ OPC_CheckInteger, 34|128,6,
+ OPC_MoveParent,
+ OPC_Scope, 54,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::UNDEF,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_Scope, 53,
- OPC_CheckPredicate, 26,
- OPC_SwitchType , 13, MVT::v4f32,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
- 1, MVT::v4f32, 3, 1, 1, 2,
- 15, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
- 1, MVT::v2i64, 3, 1, 1, 2,
- 15, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
- 1, MVT::v2f64, 3, 1, 1, 2,
- 0,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSS2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
18,
- OPC_CheckPredicate, 31,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitNodeXForm, 3, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSHUFWri), 0,
- 1, MVT::v4i16, 2, 1, 2,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 39,
+ 25,
OPC_RecordChild1,
- OPC_CheckPredicate, 32,
- OPC_SwitchType , 15, MVT::v4f32,
+ OPC_Scope, 10,
OPC_CheckPatternPredicate, 0,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
- 1, MVT::v4f32, 3, 1, 2, 3,
- 15, MVT::v2f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSS2SIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
OPC_CheckPatternPredicate, 1,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
- 1, MVT::v2f64, 3, 1, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SIrr), 0,
+ 1, MVT::i32, 1, 0,
0,
0,
- 118,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 56,
- OPC_CheckPredicate, 28,
- OPC_SwitchType , 11, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLBWrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 11, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLWDrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 11, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLDQrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 11, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLQDQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 0,
- 56,
- OPC_CheckPredicate, 14,
- OPC_SwitchType , 11, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHBWrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 11, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHWDrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 11, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHDQrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 11, MVT::v2i64,
+ 87,
+ OPC_CheckInteger, 35|128,6,
+ OPC_MoveParent,
+ OPC_Scope, 54,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSS2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHQDQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
0,
- 0,
- 62,
- OPC_RecordNode,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 38,
- OPC_CheckPredicate, 32,
- OPC_SwitchType , 15, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
- 1, MVT::v4i32, 3, 1, 2, 3,
- 15, MVT::v2i64,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSS2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
OPC_CheckPatternPredicate, 1,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
- 1, MVT::v2i64, 3, 1, 2, 3,
- 0,
- 17,
- OPC_CheckPredicate, 13,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitNodeXForm, 0, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
- 1, MVT::v4f32, 3, 2, 1, 3,
- 0,
- 92,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_CheckPredicate, 33,
- OPC_SwitchType , 11, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHBWrr), 0,
- 1, MVT::v8i8, 2, 0, 1,
- 11, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHWDrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
- 11, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHDQrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
- 0,
- 43,
- OPC_CheckPredicate, 34,
- OPC_SwitchType , 11, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLBWrr), 0,
- 1, MVT::v8i8, 2, 0, 1,
- 11, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLWDrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
- 11, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLDQrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
0,
0,
- 0,
- 83|128,5, X86ISD::VZEXT_MOVL,
- OPC_Scope, 101|128,4,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 30|128,2, ISD::SCALAR_TO_VECTOR,
- OPC_Scope, 60|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 28,
- OPC_CheckPredicate, 3,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 30,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 28,
- OPC_CheckPredicate, 3,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
- 86,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_SwitchType , 38, MVT::f32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 3, 7, 8, 9,
- 38, MVT::f64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 3, 7, 8, 9,
- 0,
- 0,
- 93,
- OPC_RecordChild0,
- OPC_Scope, 29,
- OPC_CheckChild0Type, MVT::i32,
- OPC_MoveParent,
- OPC_SwitchType , 10, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrr), 0,
- 1, MVT::v4i32, 1, 0,
- 10, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVZDI2PDIrr), 0,
- 1, MVT::v2i32, 1, 0,
- 0,
- 15,
- OPC_CheckChild0Type, MVT::i64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrr), 0,
- 1, MVT::v2i64, 1, 0,
- 21,
- OPC_CheckChild0Type, MVT::f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_EmitNode, TARGET_OPCODE(X86::V_SET0), 0,
- 1, MVT::v2f64, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
- 1, MVT::v2f64, 2, 1, 0,
- 21,
- OPC_CheckChild0Type, MVT::f32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitNode, TARGET_OPCODE(X86::V_SET0), 0,
- 1, MVT::v4f32, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
- 1, MVT::v4f32, 2, 1, 0,
- 0,
- 0,
- 40|128,1, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ 87,
+ OPC_CheckInteger, 114|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 54,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_SwitchType , 80, MVT::v4f32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 18, MVT::v4i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 18, MVT::v2i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 34, MVT::v2f64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 3, 7, 8, 9,
- 0,
- 22, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 22, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZPQILo2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 22, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSD2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSD2SIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SIrr), 0,
+ 1, MVT::i32, 1, 0,
0,
- 18|128,1, ISD::LOAD,
+ 0,
+ 87,
+ OPC_CheckInteger, 115|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 54,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_SwitchType , 18, MVT::v4i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 42, MVT::v2i64,
- OPC_Scope, 18,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 20,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZPQILo2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 0,
- 34, MVT::v4f32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 3, 7, 8, 9,
- 34, MVT::v2f64,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSD2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 3, 7, 8, 9,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSD2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
0,
0,
- 105,
- OPC_RecordChild0,
- OPC_SwitchType , 10, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZPQILo2PQIrr), 0,
- 1, MVT::v2i64, 1, 0,
- 16, MVT::v2i32,
- OPC_EmitNode, TARGET_OPCODE(X86::MMX_V_SET0), 0,
- 1, MVT::v8i8, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLDQrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
- 28, MVT::v4f32,
- OPC_EmitNode, TARGET_OPCODE(X86::V_SET0), 0,
- 1, MVT::v4f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
- 1, MVT::v4f32, 2, 1, 3,
- 28, MVT::v4i32,
- OPC_EmitNode, TARGET_OPCODE(X86::V_SET0), 0,
- 1, MVT::v4i32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 0, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
- 1, MVT::v4i32, 2, 1, 3,
- 10, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZPQILo2PQIrr), 0,
- 1, MVT::v2f64, 1, 0,
- 0,
- 0,
- 108|128,2, ISD::SCALAR_TO_VECTOR,
- OPC_Scope, 120|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 50|128,1, ISD::LOAD,
+ 87,
+ OPC_CheckInteger, 38|128,6,
+ OPC_MoveParent,
+ OPC_Scope, 54,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 84,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_SwitchType , 37, MVT::f32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i32, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 3, 7, 8, 9,
- 37, MVT::f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 3, 7, 8, 9,
- 0,
- 27,
- OPC_CheckPredicate, 3,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 29,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTSS2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 18,
OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 27,
- OPC_CheckPredicate, 3,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
- 0,
- 61, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i64,
- OPC_Scope, 13,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
- 1, MVT::v2i64, 1, 0,
- 13,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
- 1, MVT::v2i64, 1, 0,
- 13,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
- 1, MVT::v2i64, 1, 0,
- 13,
- OPC_CheckChild0Type, MVT::v1i64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
- 1, MVT::v2i64, 1, 0,
- 0,
- 0,
- 111,
- OPC_RecordChild0,
- OPC_Scope, 28,
- OPC_CheckChild0Type, MVT::i64,
- OPC_SwitchType , 10, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64toPQIrr), 0,
- 1, MVT::v2i64, 1, 0,
- 10, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64rrv164), 0,
- 1, MVT::v1i64, 1, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSS2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 28,
- OPC_CheckChild0Type, MVT::i32,
- OPC_SwitchType , 10, MVT::v4i32,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTSS2SIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2PDIrr), 0,
- 1, MVT::v4i32, 1, 0,
- 10, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64rr), 0,
- 1, MVT::v2i32, 1, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSS2SIrr), 0,
+ 1, MVT::i32, 1, 0,
0,
- 24,
- OPC_CheckChild0Type, MVT::f32,
- OPC_CheckType, MVT::v4f32,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v4f32, 0,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v4f32, 3, 1, 0, 2,
- 24,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckType, MVT::v2f64,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
- 1, MVT::v2f64, 0,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
- 1, MVT::v2f64, 3, 1, 0, 2,
0,
- 0,
- 90|128,11, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 62|128,1,
- OPC_CheckPredicate, 8,
- OPC_Scope, 24,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPredicate, 46,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::GS_MOV32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPredicate, 47,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FS_MOV32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPredicate, 46,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64GSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPredicate, 47,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64FSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 68,
- OPC_CheckPredicate, 9,
- OPC_SwitchType , 20, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 20, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 87,
+ OPC_CheckInteger, 39|128,6,
+ OPC_MoveParent,
+ OPC_Scope, 54,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 18, MVT::f80,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTSS2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp80m), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSS2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
0,
- 0,
- 74,
- OPC_CheckPredicate, 48,
- OPC_SwitchType , 22, MVT::f64,
- OPC_CheckPredicate, 49,
- OPC_CheckPatternPredicate, 7,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp32m64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 44, MVT::f80,
- OPC_Scope, 20,
- OPC_CheckPredicate, 50,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp64m80), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f80, 5, 2, 3, 4, 5, 6,
- 20,
- OPC_CheckPredicate, 49,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp32m80), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTSS2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSS2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
0,
0,
- 24,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i8, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPredicate, 6,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPredicate, 3,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 72,
- OPC_CheckPredicate, 51,
- OPC_Scope, 44,
- OPC_CheckPredicate, 52,
- OPC_SwitchType , 18, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 5, 2, 3, 4, 5, 6,
- 18, MVT::i32,
+ 87,
+ OPC_CheckInteger, 122|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 54,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTSD2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 22,
- OPC_CheckPredicate, 53,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 9|128,1,
- OPC_CheckPredicate, 54,
- OPC_Scope, 44,
- OPC_CheckPredicate, 55,
- OPC_SwitchType , 18, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 5, 2, 3, 4, 5, 6,
- 18, MVT::i32,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSD2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 22,
- OPC_CheckPredicate, 56,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 64,
- OPC_CheckPredicate, 57,
- OPC_SwitchType , 18, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i8, 5, 2, 3, 4, 5, 6,
- 18, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 5, 2, 3, 4, 5, 6,
- 18, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTSD2SIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSD2SIrr), 0,
+ 1, MVT::i32, 1, 0,
0,
0,
- 9|128,1,
- OPC_CheckPredicate, 48,
- OPC_Scope, 64,
- OPC_CheckPredicate, 58,
- OPC_SwitchType , 18, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i8, 5, 2, 3, 4, 5, 6,
- 18, MVT::i16,
+ 87,
+ OPC_CheckInteger, 123|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 54,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 5, 2, 3, 4, 5, 6,
- 18, MVT::i32,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTSD2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSD2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
0,
- 44,
- OPC_CheckPredicate, 59,
- OPC_SwitchType , 18, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 5, 2, 3, 4, 5, 6,
- 18, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTSD2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSD2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
0,
- 22,
- OPC_CheckPredicate, 60,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 24,
- OPC_CheckPredicate, 51,
- OPC_CheckPredicate, 53,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 24,
- OPC_CheckPredicate, 54,
- OPC_CheckPredicate, 56,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 24,
- OPC_CheckPredicate, 48,
- OPC_CheckPredicate, 60,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 69,
- OPC_CheckPredicate, 51,
- OPC_CheckType, MVT::i64,
- OPC_Scope, 20,
- OPC_CheckPredicate, 52,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 20,
- OPC_CheckPredicate, 53,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 20,
- OPC_CheckPredicate, 61,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rm32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 54,
+ OPC_CheckInteger, 1|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 33,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32m8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 10,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32r8), 0,
+ 1, MVT::i32, 2, 0, 1,
0,
- 90,
- OPC_CheckPredicate, 54,
- OPC_CheckType, MVT::i64,
- OPC_Scope, 20,
- OPC_CheckPredicate, 55,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 20,
- OPC_CheckPredicate, 56,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 20,
- OPC_CheckPredicate, 62,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 20,
- OPC_CheckPredicate, 57,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 54,
+ OPC_CheckInteger, 127|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 33,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32m16), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 10,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32r16), 0,
+ 1, MVT::i32, 2, 0, 1,
0,
- 69,
- OPC_CheckPredicate, 48,
- OPC_CheckType, MVT::i64,
- OPC_Scope, 20,
- OPC_CheckPredicate, 58,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 20,
- OPC_CheckPredicate, 59,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 20,
- OPC_CheckPredicate, 60,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 54,
+ OPC_CheckInteger, 0|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 33,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32m32), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 10,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32r32), 0,
+ 1, MVT::i32, 2, 0, 1,
0,
- 24,
- OPC_CheckPredicate, 51,
- OPC_CheckPredicate, 53,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 24,
- OPC_CheckPredicate, 54,
- OPC_CheckPredicate, 56,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 24,
- OPC_CheckPredicate, 48,
- OPC_CheckPredicate, 60,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 101,
- OPC_CheckPredicate, 8,
- OPC_Scope, 24,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 48,
- OPC_CheckPredicate, 63,
- OPC_SwitchType , 20, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsMOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 20, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 0,
- 22,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 54,
+ OPC_CheckInteger, 3|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 33,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC64m8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 10,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC64r8), 0,
+ 1, MVT::i64, 2, 0, 1,
0,
- 26,
- OPC_CheckPredicate, 48,
- OPC_CheckPredicate, 49,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 12,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSS2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 61|128,1,
- OPC_CheckPredicate, 8,
- OPC_Scope, 48,
- OPC_CheckPredicate, 63,
- OPC_SwitchType , 20, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsMOVAPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 20, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
- 0,
- 22,
- OPC_CheckType, MVT::v2f64,
+ 54,
+ OPC_CheckInteger, 2|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 33,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC64m64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 10,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC64r64), 0,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 29,
+ OPC_CheckInteger, 47|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
- 44,
- OPC_CheckType, MVT::v4i32,
- OPC_Scope, 20,
- OPC_CheckPredicate, 63,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 18,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 0,
- 44,
- OPC_CheckType, MVT::v2i64,
- OPC_Scope, 20,
- OPC_CheckPredicate, 63,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 18,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 0,
- 22,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 5, 2, 3, 4, 5, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVMSKPSrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVMSKPSrr), 0,
+ 1, MVT::i32, 1, 0,
0,
- 77,
- OPC_CheckPredicate, 48,
- OPC_SwitchType , 36, MVT::i64,
- OPC_CheckPredicate, 64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOV32rm), 0|OPFL_Chain,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 3, 7, 8, 9,
- 33, MVT::f64,
- OPC_CheckPredicate, 49,
- OPC_CheckPatternPredicate, 13,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- OPC_EmitNode, TARGET_OPCODE(X86::CVTSS2SDrr), 0|OPFL_MemRefs,
- 1, MVT::f64, 1, 7,
- OPC_CompleteMatch, 1, 8,
-
+ 29,
+ OPC_CheckInteger, 6|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVMSKPDrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVMSKPDrr), 0,
+ 1, MVT::i32, 1, 0,
0,
- 0,
- 60, X86ISD::VZEXT_LOAD,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_SwitchType , 18, MVT::v2i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrm), 0|OPFL_Chain,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 34, MVT::v2f64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain,
- 1, MVT::v2f64, 3, 7, 8, 9,
- 0,
- 9|128,9|128,1, ISD::INTRINSIC_WO_CHAIN,
- OPC_MoveChild, 0,
- OPC_Scope, 20|128,1,
- OPC_CheckInteger, 126|128,4,
+ 15,
+ OPC_CheckInteger, 97|128,3,
OPC_MoveParent,
- OPC_Scope, 0|128,1,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVMSKPSYrr), 0,
+ 1, MVT::i32, 1, 0,
+ 15,
+ OPC_CheckInteger, 96|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVMSKPDYrr), 0,
+ 1, MVT::i32, 1, 0,
+ 29,
+ OPC_CheckInteger, 31|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVMSKBrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVMSKBrr), 0,
+ 1, MVT::i32, 1, 0,
+ 0,
+ 15,
+ OPC_CheckInteger, 56|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMOVMSKBrr), 0,
+ 1, MVT::i32, 1, 0,
+ 57|128,1,
+ OPC_CheckInteger, 109|128,5,
+ OPC_MoveParent,
+ OPC_Scope, 23|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 46, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 46, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
- 41, ISD::SCALAR_TO_VECTOR,
+ 62, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
- 27, X86ISD::VZEXT_LOAD,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25, TARGET_OPCODE(X86ISD::VZEXT_LOAD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBWrm), 0|OPFL_Chain,
1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBWrr), 0,
- 1, MVT::v8i16, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXBWrr), 0,
+ 1, MVT::v8i16, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBWrr), 0,
+ 1, MVT::v8i16, 1, 0,
+ 0,
0,
- 20|128,1,
- OPC_CheckInteger, 0|128,5,
+ 57|128,1,
+ OPC_CheckInteger, 111|128,5,
OPC_MoveParent,
- OPC_Scope, 0|128,1,
+ OPC_Scope, 23|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 46, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 46, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWDrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 41, ISD::SCALAR_TO_VECTOR,
+ 62, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 27, X86ISD::VZEXT_LOAD,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25, TARGET_OPCODE(X86ISD::VZEXT_LOAD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWDrm), 0|OPFL_Chain,
1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWDrr), 0,
- 1, MVT::v4i32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXWDrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWDrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 20|128,1,
- OPC_CheckInteger, 127|128,4,
+ 57|128,1,
+ OPC_CheckInteger, 110|128,5,
OPC_MoveParent,
- OPC_Scope, 0|128,1,
+ OPC_Scope, 23|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 46, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 46, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXDQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 41, ISD::SCALAR_TO_VECTOR,
+ 62, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 27, X86ISD::VZEXT_LOAD,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25, TARGET_OPCODE(X86ISD::VZEXT_LOAD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXDQrm), 0|OPFL_Chain,
1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXDQrr), 0,
- 1, MVT::v2i64, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXDQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXDQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
0,
- 20|128,1,
- OPC_CheckInteger, 4|128,5,
+ 57|128,1,
+ OPC_CheckInteger, 115|128,5,
OPC_MoveParent,
- OPC_Scope, 0|128,1,
+ OPC_Scope, 23|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 46, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 46, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
- 41, ISD::SCALAR_TO_VECTOR,
+ 62, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
- 27, X86ISD::VZEXT_LOAD,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25, TARGET_OPCODE(X86ISD::VZEXT_LOAD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBWrm), 0|OPFL_Chain,
1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBWrr), 0,
- 1, MVT::v8i16, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXBWrr), 0,
+ 1, MVT::v8i16, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBWrr), 0,
+ 1, MVT::v8i16, 1, 0,
+ 0,
0,
- 20|128,1,
- OPC_CheckInteger, 6|128,5,
+ 57|128,1,
+ OPC_CheckInteger, 117|128,5,
OPC_MoveParent,
- OPC_Scope, 0|128,1,
+ OPC_Scope, 23|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 46, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 46, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWDrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 41, ISD::SCALAR_TO_VECTOR,
+ 62, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 27, X86ISD::VZEXT_LOAD,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25, TARGET_OPCODE(X86ISD::VZEXT_LOAD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWDrm), 0|OPFL_Chain,
1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWDrr), 0,
- 1, MVT::v4i32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXWDrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWDrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 20|128,1,
- OPC_CheckInteger, 5|128,5,
+ 57|128,1,
+ OPC_CheckInteger, 116|128,5,
OPC_MoveParent,
- OPC_Scope, 0|128,1,
+ OPC_Scope, 23|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 46, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 46, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXDQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 41, ISD::SCALAR_TO_VECTOR,
+ 62, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 27, X86ISD::VZEXT_LOAD,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25, TARGET_OPCODE(X86ISD::VZEXT_LOAD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXDQrm), 0|OPFL_Chain,
1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXDQrr), 0,
- 1, MVT::v2i64, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXDQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXDQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
0,
- 114,
- OPC_CheckInteger, 124|128,4,
+ 24|128,1,
+ OPC_CheckInteger, 107|128,5,
OPC_MoveParent,
- OPC_Scope, 95,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 44, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBDrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 39, ISD::SCALAR_TO_VECTOR,
+ 60, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXBDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBDrr), 0,
- 1, MVT::v4i32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXBDrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBDrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 114,
- OPC_CheckInteger, 1|128,5,
+ 24|128,1,
+ OPC_CheckInteger, 112|128,5,
OPC_MoveParent,
- OPC_Scope, 95,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 44, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 39, ISD::SCALAR_TO_VECTOR,
+ 60, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXWQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWQrr), 0,
- 1, MVT::v2i64, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXWQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXWQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
0,
- 114,
- OPC_CheckInteger, 2|128,5,
+ 24|128,1,
+ OPC_CheckInteger, 113|128,5,
OPC_MoveParent,
- OPC_Scope, 95,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 44, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBDrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 39, ISD::SCALAR_TO_VECTOR,
+ 60, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXBDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBDrr), 0,
- 1, MVT::v4i32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXBDrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBDrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 114,
- OPC_CheckInteger, 7|128,5,
+ 24|128,1,
+ OPC_CheckInteger, 118|128,5,
OPC_MoveParent,
- OPC_Scope, 95,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 44, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 39, ISD::SCALAR_TO_VECTOR,
+ 60, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXWQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWQrr), 0,
- 1, MVT::v2i64, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXWQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXWQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
0,
- 114,
- OPC_CheckInteger, 125|128,4,
+ 24|128,1,
+ OPC_CheckInteger, 108|128,5,
OPC_MoveParent,
- OPC_Scope, 95,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 44, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 39, ISD::SCALAR_TO_VECTOR,
+ 60, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 65,
+ OPC_CheckPredicate, 20,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXBQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBQrr), 0,
- 1, MVT::v2i64, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVSXBQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVSXBQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
0,
- 114,
- OPC_CheckInteger, 3|128,5,
+ 24|128,1,
+ OPC_CheckInteger, 114|128,5,
OPC_MoveParent,
- OPC_Scope, 95,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44, X86ISD::VZEXT_MOVL,
+ OPC_SwitchOpcode , 44, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 39, ISD::SCALAR_TO_VECTOR,
+ 60, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 65,
+ OPC_CheckPredicate, 20,
OPC_CheckType, MVT::i32,
OPC_MoveParent,
OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXBQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
0,
- 11,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBQrr), 0,
- 1, MVT::v2i64, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMOVZXBQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVZXBQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
0,
- 82,
- OPC_CheckInteger, 99|128,4,
+ 0|128,1,
+ OPC_CheckInteger, 93|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 52,
+ OPC_Scope, 79,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild3,
OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPBLENDWrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PBLENDWrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 40,
OPC_RecordChild2,
OPC_RecordChild3,
OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDPSrri), 0,
- 1, MVT::v4f32, 3, 0, 1, 3,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPBLENDWrri), 0,
+ 1, MVT::v8i16, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PBLENDWrri), 0,
+ 1, MVT::v8i16, 3, 0, 1, 3,
+ 0,
0,
- 82,
- OPC_CheckInteger, 98|128,4,
+ 102,
+ OPC_CheckInteger, 56|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 52,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDPDrri), 0,
- 1, MVT::v2f64, 3, 0, 1, 3,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 82,
- OPC_CheckInteger, 110|128,4,
+ 102,
+ OPC_CheckInteger, 57|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 52,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PBLENDWrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PBLENDWrri), 0,
- 1, MVT::v8i16, 3, 0, 1, 3,
- 0,
- 9|128,1,
- OPC_CheckInteger, 103|128,4,
- OPC_MoveParent,
- OPC_Scope, 53,
- OPC_RecordChild1,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
- 53,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 7, 2, 4, 5, 6, 7, 8, 9,
- 22,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPSrri), 0,
- 1, MVT::v4f32, 3, 0, 1, 3,
- 0,
- 9|128,1,
- OPC_CheckInteger, 102|128,4,
- OPC_MoveParent,
- OPC_Scope, 53,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
- 53,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 7, 2, 4, 5, 6, 7, 8, 9,
- 22,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPDrri), 0,
- 1, MVT::v2f64, 3, 0, 1, 3,
- 0,
- 9|128,1,
- OPC_CheckInteger, 107|128,4,
- OPC_MoveParent,
- OPC_Scope, 53,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MPSADBWrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 7, 0, 4, 5, 6, 7, 8, 9,
- 53,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MPSADBWrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 7, 2, 4, 5, 6, 7, 8, 9,
- 22,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MPSADBWrri), 0,
- 1, MVT::v16i8, 3, 0, 1, 3,
- 0,
- 73,
- OPC_CheckInteger, 38|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CMPSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CMPSSrr), 0,
- 1, MVT::v4f32, 3, 0, 1, 3,
- 0,
- 75,
- OPC_CheckInteger, 37|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 45,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPSrri), 0,
- 1, MVT::v4f32, 3, 0, 1, 3,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 73,
- OPC_CheckInteger, 118|128,3,
+ 102,
+ OPC_CheckInteger, 58|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CMPSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CMPSDrr), 0,
- 1, MVT::v2f64, 3, 0, 1, 3,
- 0,
- 61,
- OPC_CheckInteger, 126|128,3,
- OPC_MoveParent,
- OPC_Scope, 42,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PSrr), 0,
- 1, MVT::v4f32, 1, 0,
- 0,
- 61,
- OPC_CheckInteger, 125|128,3,
- OPC_MoveParent,
- OPC_Scope, 42,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PDrr), 0,
- 1, MVT::v2f64, 1, 0,
- 0,
- 75,
- OPC_CheckInteger, 117|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 45,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPDrri), 0,
- 1, MVT::v2f64, 3, 0, 1, 3,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBUSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBUSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 31|128,4,
+ 102,
+ OPC_CheckInteger, 59|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
- OPC_RecordChild1,
+ OPC_RecordChild1,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 44,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 13,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBUSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBUSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 32|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 14|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 33|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 15|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 34|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 16|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDUSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 73|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 17|128,5,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ OPC_Scope, 67,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBSBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 0,
- 64,
- OPC_CheckInteger, 74|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBSWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 0,
- 64,
- OPC_CheckInteger, 75|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBUSBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 0,
- 64,
- OPC_CheckInteger, 76|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBUSWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDUSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDUSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 50|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 33|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHUWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULHUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHUWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULHUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHUWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULHUWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHUWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 49|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 32|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULHWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULHWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULHWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 51|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 34|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULUDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULUDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULUDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULUDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULUDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULUDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULUDQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULUDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULUDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 43|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 26|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDWDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMADDWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDWDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMADDWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDWDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMADDWDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDWDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 35|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 18|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPAVGBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPAVGBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPAVGBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 36|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 19|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPAVGWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPAVGWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPAVGWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PAVGWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 47|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 30|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINUBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINUBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINUBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 46|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 29|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 45|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 28|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXUBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXUBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXUBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 44|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 27|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 52|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 35|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSADBWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSADBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSADBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSADBWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSADBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSADBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSADBWrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSADBWrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSADBWrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 57|128,4,
+ 102,
+ OPC_CheckInteger, 40|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 53|128,4,
+ 102,
+ OPC_CheckInteger, 36|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 56|128,4,
+ 102,
+ OPC_CheckInteger, 52|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 69|128,4,
+ 102,
+ OPC_CheckInteger, 48|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 65|128,4,
+ 102,
+ OPC_CheckInteger, 45|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRAWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRAWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRAWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRAWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 68|128,4,
+ 102,
+ OPC_CheckInteger, 44|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRADrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRADrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRADrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRADrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 62|128,4,
+ 102,
+ OPC_CheckInteger, 20|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRAWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRAWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPEQBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 61|128,4,
+ 102,
+ OPC_CheckInteger, 22|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRADrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRADrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPEQWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 37|128,4,
+ 102,
+ OPC_CheckInteger, 21|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPEQDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 39|128,4,
+ 102,
+ OPC_CheckInteger, 23|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPGTBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPGTBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 38|128,4,
+ 102,
+ OPC_CheckInteger, 25|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPGTWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPGTWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 40|128,4,
+ 102,
+ OPC_CheckInteger, 24|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPGTDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPGTDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 42|128,4,
+ 102,
+ OPC_CheckInteger, 12|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPACKSSWBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKSSWBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPACKSSWBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKSSWBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 41|128,4,
+ 102,
+ OPC_CheckInteger, 11|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPACKSSDWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKSSDWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPACKSSDWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKSSDWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 29|128,4,
+ 102,
+ OPC_CheckInteger, 13|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKSSWBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPACKUSWBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKUSWBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKSSWBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPACKUSWBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKUSWBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 28|128,4,
+ 102,
+ OPC_CheckInteger, 77|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKSSDWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHADDWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKSSDWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHADDWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 30|128,4,
+ 102,
+ OPC_CheckInteger, 73|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKUSWBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKUSWBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 0,
- 61,
- OPC_CheckInteger, 83|128,5,
- OPC_MoveParent,
- OPC_Scope, 42,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSBrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSBrr64), 0,
- 1, MVT::v8i8, 1, 0,
- 0,
- 61,
- OPC_CheckInteger, 84|128,5,
- OPC_MoveParent,
- OPC_Scope, 42,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSBrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSBrr128), 0,
- 1, MVT::v16i8, 1, 0,
- 0,
- 61,
- OPC_CheckInteger, 87|128,5,
- OPC_MoveParent,
- OPC_Scope, 42,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSWrr64), 0,
- 1, MVT::v4i16, 1, 0,
- 0,
- 61,
- OPC_CheckInteger, 88|128,5,
- OPC_MoveParent,
- OPC_Scope, 42,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSWrr128), 0,
- 1, MVT::v8i16, 1, 0,
- 0,
- 61,
- OPC_CheckInteger, 85|128,5,
- OPC_MoveParent,
- OPC_Scope, 42,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSDrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSDrr64), 0,
- 1, MVT::v2i32, 1, 0,
- 0,
- 61,
- OPC_CheckInteger, 86|128,5,
- OPC_MoveParent,
- OPC_Scope, 42,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSDrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSDrr128), 0,
- 1, MVT::v4i32, 1, 0,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHADDDrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDDrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHADDDrr128), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDDrr128), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 95|128,5,
+ 102,
+ OPC_CheckInteger, 75|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v4i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDWrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHADDSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDWrr64), 0,
- 1, MVT::v4i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHADDSWrr128), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDSWrr128), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 96|128,5,
+ 102,
+ OPC_CheckInteger, 83|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHSUBWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDWrr128), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHSUBWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 91|128,5,
+ 102,
+ OPC_CheckInteger, 79|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v2i32,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDDrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHSUBDrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBDrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDDrr64), 0,
- 1, MVT::v2i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHSUBDrr128), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBDrr128), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 92|128,5,
+ 102,
+ OPC_CheckInteger, 81|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v4i32,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDDrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHSUBSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDDrr128), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHSUBSWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBSWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 93|128,5,
+ 102,
+ OPC_CheckInteger, 85|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v4i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMADDUBSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDUBSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDSWrr64), 0,
- 1, MVT::v4i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMADDUBSWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDUBSWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 94|128,5,
+ 102,
+ OPC_CheckInteger, 96|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSIGNWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDSWrr128), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSIGNWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 101|128,5,
+ 102,
+ OPC_CheckInteger, 94|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v4i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBWrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSIGNDrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNDrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBWrr64), 0,
- 1, MVT::v4i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSIGNDrr128), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNDrr128), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 102|128,5,
+ 43|128,1,
+ OPC_CheckInteger, 87|128,6,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 67,
+ OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBWrr128), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 0,
- 64,
- OPC_CheckInteger, 97|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULHRSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v2i32,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBDrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBDrr64), 0,
- 1, MVT::v2i32, 2, 0, 1,
- 0,
- 64,
- OPC_CheckInteger, 98|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULHRSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBDrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBDrr128), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULHRSWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrr128), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
64,
- OPC_CheckInteger, 99|128,5,
+ OPC_CheckInteger, 76|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v4i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
+ OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDWrm64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBSWrr64), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDWrr64), 0,
1, MVT::v4i16, 2, 0, 1,
0,
64,
- OPC_CheckInteger, 100|128,5,
+ OPC_CheckInteger, 72|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
+ OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDDrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBSWrr128), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDDrr64), 0,
+ 1, MVT::v2i32, 2, 0, 1,
0,
64,
- OPC_CheckInteger, 103|128,5,
+ OPC_CheckInteger, 74|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDUBSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDUBSWrr64), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHADDSWrr64), 0,
1, MVT::v4i16, 2, 0, 1,
0,
64,
- OPC_CheckInteger, 104|128,5,
+ OPC_CheckInteger, 82|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDUBSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDUBSWrr128), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 0,
- 110,
- OPC_CheckInteger, 105|128,5,
- OPC_MoveParent,
- OPC_Scope, 44,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v4i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
+ OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBWrm64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
- OPC_RecordChild1,
+ 12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrr64), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBWrr64), 0,
1, MVT::v4i16, 2, 0, 1,
0,
- 110,
- OPC_CheckInteger, 106|128,5,
- OPC_MoveParent,
- OPC_Scope, 44,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrr128), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 0,
64,
- OPC_CheckInteger, 107|128,5,
+ OPC_CheckInteger, 78|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrr64), 0,
- 1, MVT::v8i8, 2, 0, 1,
- 0,
- 64,
- OPC_CheckInteger, 108|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBDrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrr128), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBDrr64), 0,
+ 1, MVT::v2i32, 2, 0, 1,
0,
64,
- OPC_CheckInteger, 109|128,5,
+ OPC_CheckInteger, 80|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNBrm64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNBrr64), 0,
- 1, MVT::v8i8, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHSUBSWrr64), 0,
+ 1, MVT::v4i16, 2, 0, 1,
0,
64,
- OPC_CheckInteger, 110|128,5,
+ OPC_CheckInteger, 84|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
+ OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNBrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDUBSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNBrr128), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMADDUBSWrr64), 0,
+ 1, MVT::v4i16, 2, 0, 1,
0,
64,
- OPC_CheckInteger, 113|128,5,
+ OPC_CheckInteger, 95|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v4i16,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
+ OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNWrm64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
@@ -13794,57 +10610,27 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::v4i16, 2, 0, 1,
0,
64,
- OPC_CheckInteger, 114|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNWrr128), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 0,
- 64,
- OPC_CheckInteger, 111|128,5,
+ OPC_CheckInteger, 93|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v2i32,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
+ OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNDrm64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
12,
@@ -13853,7554 +10639,7756 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNDrr64), 0,
1, MVT::v2i32, 2, 0, 1,
0,
- 64,
- OPC_CheckInteger, 112|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 43,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNDrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNDrr128), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 0,
- 81,
- OPC_CheckInteger, 89|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 48,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 5, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 7, 0, 4, 5, 6, 7, 8, 10,
- 24,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 5, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rr), 0,
- 1, MVT::v1i64, 3, 0, 1, 4,
- 0,
- 81,
- OPC_CheckInteger, 90|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 48,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_EmitNodeXForm, 5, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 7, 0, 4, 5, 6, 7, 8, 10,
- 24,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitNodeXForm, 5, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
- 1, MVT::v2i64, 3, 0, 1, 4,
- 0,
- 72,
- OPC_CheckInteger, 14|128,5,
- OPC_MoveParent,
- OPC_Scope, 44,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 3, 4, 5, 6, 7, 8,
- 20,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDPSr_Int), 0,
- 1, MVT::v4f32, 2, 0, 2,
- 0,
- 72,
- OPC_CheckInteger, 13|128,5,
- OPC_MoveParent,
- OPC_Scope, 44,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDPDm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 3, 4, 5, 6, 7, 8,
- 20,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDPDr_Int), 0,
- 1, MVT::v2f64, 2, 0, 2,
- 0,
- 61,
- OPC_CheckInteger, 115|128,4,
- OPC_MoveParent,
- OPC_Scope, 42,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 66,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHMINPOSUWrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PHMINPOSUWrr128), 0,
- 1, MVT::v8i16, 1, 0,
- 0,
110,
- OPC_CheckInteger, 111|128,4,
+ OPC_CheckInteger, 86|128,6,
OPC_MoveParent,
OPC_Scope, 44,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
+ OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
44,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
+ OPC_CheckType, MVT::v8i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULHRSWrr64), 0,
+ 1, MVT::v4i16, 2, 0, 1,
0,
- 64,
- OPC_CheckInteger, 108|128,4,
+ 102,
+ OPC_CheckInteger, 91|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKUSDWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPACKUSDWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKUSDWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKUSDWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPACKUSDWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PACKUSDWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 120|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 94|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPEQQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPEQQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPEQQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 121|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 104|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINSDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 122|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 105|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINUDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINUDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINUDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 123|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 106|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINUWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINUWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 116|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 100|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXSDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 117|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 101|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXUDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXUDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXUDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 118|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 102|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXUWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 119|128,4,
+ 43|128,1,
+ OPC_CheckInteger, 119|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 67,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXUWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 110,
- OPC_CheckInteger, 8|128,5,
+ 66|128,1,
+ OPC_CheckInteger, 90|128,5,
OPC_MoveParent,
- OPC_Scope, 44,
+ OPC_Scope, 72,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 44,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMPSADBWrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MPSADBWrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 72,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 13,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULDQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 0,
- 72,
- OPC_CheckInteger, 100|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 47,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitCopyToReg, 3, X86::XMM0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDVPDrm0), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 4, 5, 6, 7, 8,
- 16,
OPC_RecordChild2,
OPC_RecordChild3,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitCopyToReg, 2, X86::XMM0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDVPDrr0), 0|OPFL_FlagInput,
- 1, MVT::v2f64, 2, 0, 1,
- 0,
- 72,
- OPC_CheckInteger, 101|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 47,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitCopyToReg, 3, X86::XMM0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDVPSrm0), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 4, 5, 6, 7, 8,
- 16,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMPSADBWrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 7, 2, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MPSADBWrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 7, 2, 4, 5, 6, 7, 8, 9,
+ 0,
+ 41,
+ OPC_RecordChild1,
OPC_RecordChild2,
OPC_RecordChild3,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitCopyToReg, 2, X86::XMM0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDVPSrr0), 0|OPFL_FlagInput,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMPSADBWrri), 0,
+ 1, MVT::v16i8, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MPSADBWrri), 0,
+ 1, MVT::v16i8, 3, 0, 1, 3,
+ 0,
0,
- 72,
- OPC_CheckInteger, 109|128,4,
+ 102,
+ OPC_CheckInteger, 11|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 47,
+ OPC_Scope, 66,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitCopyToReg, 3, X86::XMM0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PBLENDVBrm0), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 4, 5, 6, 7, 8,
- 16,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPGTQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitCopyToReg, 2, X86::XMM0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PBLENDVBrr0), 0|OPFL_FlagInput,
- 1, MVT::v16i8, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPGTQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 10,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 64,
- OPC_CheckInteger, 28|128,5,
+ 17|128,1,
+ OPC_CheckInteger, 55|128,3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 99,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_SwitchOpcode , 61, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 12,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESENCrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 13,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESENCrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 28, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESENCrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 37,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 14,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESENCrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 13,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESENCrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESENCrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 75,
- OPC_CheckInteger, 29|128,5,
+ 17|128,1,
+ OPC_CheckInteger, 56|128,3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 44,
+ OPC_Scope, 99,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
- 22,
+ OPC_SwitchOpcode , 61, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 12,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESENCLASTrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 13,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESENCLASTrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 28, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESENCLASTrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 37,
OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIrr), 0,
- 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESENCLASTrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 13,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESENCLASTrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESENCLASTrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 75,
- OPC_CheckInteger, 30|128,5,
+ 17|128,1,
+ OPC_CheckInteger, 53|128,3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 44,
+ OPC_Scope, 99,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIArm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
- 22,
+ OPC_SwitchOpcode , 61, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 12,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESDECrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 13,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESDECrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 28, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESDECrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 37,
OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIArr), 0,
- 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESDECrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 13,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESDECrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESDECrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 75,
- OPC_CheckInteger, 31|128,5,
+ 17|128,1,
+ OPC_CheckInteger, 54|128,3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 44,
+ OPC_Scope, 99,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRICrm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
- 22,
+ OPC_SwitchOpcode , 61, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 12,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESDECLASTrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 13,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESDECLASTrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 28, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESDECLASTrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 37,
OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRICrr), 0,
- 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESDECLASTrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 13,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESDECLASTrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESDECLASTrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 75,
- OPC_CheckInteger, 32|128,5,
+ 115,
+ OPC_CheckInteger, 58|128,3,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 44,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 69,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIOrm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
- 22,
OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIOrr), 0,
- 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
- 0,
- 75,
- OPC_CheckInteger, 33|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 44,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRISrm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
- 22,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 12,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESKEYGENASSIST128rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 3, 4, 5, 6, 7, 8,
+ 21,
+ OPC_CheckPatternPredicate, 13,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESKEYGENASSIST128rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 38,
+ OPC_RecordChild1,
OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRISrr), 0,
- 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 12,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESKEYGENASSIST128rr), 0,
+ 1, MVT::v2i64, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 13,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESKEYGENASSIST128rr), 0,
+ 1, MVT::v2i64, 2, 0, 2,
+ 0,
0,
- 75,
- OPC_CheckInteger, 34|128,5,
+ 72,
+ OPC_CheckInteger, 126|128,3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 44,
+ OPC_Scope, 43,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
OPC_RecordChild3,
OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIZrm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
- 22,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERM2F128rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 20,
OPC_RecordChild2,
OPC_RecordChild3,
OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRIZrr), 0,
- 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
- 0,
- 90,
- OPC_CheckInteger, 21|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_Scope, 51,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitConvertToTarget, 5,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 4, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
- 29,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 4,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 3, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIrr), 0|OPFL_FlagInput,
- 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
- 0,
- 90,
- OPC_CheckInteger, 22|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_Scope, 51,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitConvertToTarget, 5,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 4, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIArm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
- 29,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 4,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 3, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIArr), 0|OPFL_FlagInput,
- 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
- 0,
- 90,
- OPC_CheckInteger, 23|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_Scope, 51,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitConvertToTarget, 5,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 4, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRICrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
- 29,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 4,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 3, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRICrr), 0|OPFL_FlagInput,
- 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
- 0,
- 90,
- OPC_CheckInteger, 24|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_Scope, 51,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitConvertToTarget, 5,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 4, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIOrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
- 29,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 4,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 3, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIOrr), 0|OPFL_FlagInput,
- 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
- 0,
- 90,
- OPC_CheckInteger, 25|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_Scope, 51,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitConvertToTarget, 5,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 4, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRISrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
- 29,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 4,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 3, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRISrr), 0|OPFL_FlagInput,
- 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
- 0,
- 90,
- OPC_CheckInteger, 26|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_Scope, 51,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitConvertToTarget, 5,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 4, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIZrm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
- 29,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_RecordChild5,
- OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 4,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 3, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRIZrr), 0|OPFL_FlagInput,
- 2, MVT::i32, MVT::i32, 3, 0, 2, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERM2F128rr), 0,
+ 1, MVT::v8i32, 3, 0, 1, 3,
0,
106,
- OPC_CheckInteger, 73|128,3,
+ OPC_CheckInteger, 35|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDSBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDSBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDSBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 74|128,3,
+ OPC_CheckInteger, 36|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDSWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 75|128,3,
+ OPC_CheckInteger, 37|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDUSBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 76|128,3,
+ OPC_CheckInteger, 38|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDUSWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 111|128,3,
+ OPC_CheckInteger, 83|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBSBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBSBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 112|128,3,
+ OPC_CheckInteger, 84|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBSWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 113|128,3,
+ OPC_CheckInteger, 85|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBUSBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBUSBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 114|128,3,
+ OPC_CheckInteger, 86|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBUSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBUSWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 91|128,3,
+ OPC_CheckInteger, 57|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULHWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULHWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULHWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 92|128,3,
+ OPC_CheckInteger, 58|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULHUWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULHUWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULHUWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 93|128,3,
+ OPC_CheckInteger, 60|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULUDQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULUDQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i32, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULUDQrr), 0,
1, MVT::v2i32, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 85|128,3,
+ OPC_CheckInteger, 51|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMADDWDrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMADDWDrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i32, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMADDWDrr), 0,
1, MVT::v2i32, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 77|128,3,
+ OPC_CheckInteger, 41|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PAVGBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PAVGBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PAVGBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 78|128,3,
+ OPC_CheckInteger, 42|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PAVGWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PAVGWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PAVGWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 89|128,3,
+ OPC_CheckInteger, 55|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMINUBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMINUBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMINUBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 88|128,3,
+ OPC_CheckInteger, 54|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMINSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMINSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMINSWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 87|128,3,
+ OPC_CheckInteger, 53|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMAXUBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMAXUBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMAXUBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 86|128,3,
+ OPC_CheckInteger, 52|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMAXSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMAXSWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMAXSWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
106,
- OPC_CheckInteger, 94|128,3,
+ OPC_CheckInteger, 62|128,4,
OPC_MoveParent,
OPC_Scope, 42,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSADBWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
42,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSADBWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
13,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSADBWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 107|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 41,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLWrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
- 0,
- 62,
- OPC_CheckInteger, 105|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 41,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLDrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
- 0,
- 62,
- OPC_CheckInteger, 106|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 41,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLQrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 0,
- 62,
- OPC_CheckInteger, 97|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 41,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLWrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
- 0,
- 62,
- OPC_CheckInteger, 95|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 41,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLDrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
- 0,
- 62,
- OPC_CheckInteger, 96|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 41,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLQrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 0,
- 62,
- OPC_CheckInteger, 102|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 41,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRAWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRAWrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
- 0,
- 62,
- OPC_CheckInteger, 101|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 41,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRADrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRADrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
- 0,
- 62,
- OPC_CheckInteger, 79|128,3,
+ OPC_CheckInteger, 43|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 81|128,3,
+ OPC_CheckInteger, 45|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 80|128,3,
+ OPC_CheckInteger, 44|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQDrr), 0,
1, MVT::v2i32, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 82|128,3,
+ OPC_CheckInteger, 46|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 84|128,3,
+ OPC_CheckInteger, 48|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 83|128,3,
+ OPC_CheckInteger, 47|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTDrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTDrr), 0,
1, MVT::v2i32, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 71|128,3,
+ OPC_CheckInteger, 29|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PACKSSWBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PACKSSWBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 70|128,3,
+ OPC_CheckInteger, 28|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PACKSSDWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PACKSSDWrr), 0,
1, MVT::v4i16, 2, 0, 1,
0,
62,
- OPC_CheckInteger, 72|128,3,
+ OPC_CheckInteger, 30|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_Scope, 41,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PACKUSWBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PACKUSWBrr), 0,
1, MVT::v8i8, 2, 0, 1,
0,
- 73,
- OPC_CheckInteger, 35|128,5,
+ 122,
+ OPC_CheckInteger, 18|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 43,
+ OPC_Scope, 71,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild3,
OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRM128MEM), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
+ OPC_Scope, 23,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRM128MEM), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::v16i8, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 23,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRM128MEM), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::v16i8, MVT::i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 42,
OPC_RecordChild2,
OPC_RecordChild3,
OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRM128REG), 0,
- 1, MVT::v16i8, 3, 0, 1, 3,
+ OPC_Scope, 15,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPISTRM128REG), 0,
+ 2, MVT::v16i8, MVT::i32, 3, 0, 1, 3,
+ 15,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPISTRM128REG), 0,
+ 2, MVT::v16i8, MVT::i32, 3, 0, 1, 3,
+ 0,
0,
- 88,
- OPC_CheckInteger, 27|128,5,
+ 25|128,1,
+ OPC_CheckInteger, 10|128,6,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_Scope, 50,
+ OPC_Scope, 88,
OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild4,
OPC_RecordChild5,
OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/3,
- OPC_EmitMergeInputChains, 1, 2,
- OPC_EmitConvertToTarget, 5,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 4, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRM128MEM), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 1, MVT::v16i8, 7, 0, 6, 7, 8, 9, 10, 11,
- 28,
+ OPC_Scope, 31,
+ OPC_CheckPatternPredicate, 10,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRM128MEM), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::v16i8, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 31,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitConvertToTarget, 5,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 4, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRM128MEM), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::v16i8, MVT::i32, 7, 0, 6, 7, 8, 9, 10, 11,
+ 0,
+ 55,
OPC_RecordChild3,
OPC_RecordChild4,
OPC_RecordChild5,
OPC_MoveChild, 5,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_EmitConvertToTarget, 4,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 3, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRM128REG), 0|OPFL_FlagInput,
- 1, MVT::v16i8, 3, 0, 2, 5,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 10,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPESTRM128REG), 0|OPFL_FlagInput,
+ 2, MVT::v16i8, MVT::i32, 3, 0, 2, 5,
+ 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 4,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPCMPESTRM128REG), 0|OPFL_FlagInput,
+ 2, MVT::v16i8, MVT::i32, 3, 0, 2, 5,
+ 0,
0,
- 52,
- OPC_CheckInteger, 4|128,4,
+ 51,
+ OPC_CheckInteger, 31|128,6,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 32,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2PIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
11,
OPC_RecordChild1,
OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SI64rr), 0,
- 1, MVT::i64, 1, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2PIrr), 0,
+ 1, MVT::v2i32, 1, 0,
0,
- 52,
- OPC_CheckInteger, 12|128,4,
+ 53,
+ OPC_CheckInteger, 28|128,6,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 34,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSD2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2PIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
11,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSD2SI64rr), 0,
- 1, MVT::i64, 1, 0,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2PIrr), 0,
+ 1, MVT::v2i32, 1, 0,
0,
- 57,
- OPC_CheckInteger, 7|128,4,
+ 51,
+ OPC_CheckInteger, 37|128,6,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 36,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 32,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SD64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SD64rr), 0,
- 1, MVT::v2f64, 2, 0, 1,
- 0,
- 57,
- OPC_CheckInteger, 50|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 36,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2PIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
+ 11,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SS64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SS64rr), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2PIrr), 0,
+ 1, MVT::v2i32, 1, 0,
0,
- 52,
- OPC_CheckInteger, 52|128,5,
+ 53,
+ OPC_CheckInteger, 36|128,6,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 34,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2PIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
11,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SI64rr), 0,
- 1, MVT::i64, 1, 0,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2PIrr), 0,
+ 1, MVT::v2i32, 1, 0,
0,
- 52,
- OPC_CheckInteger, 56|128,5,
+ 89,
+ OPC_CheckInteger, 112|128,4,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSS2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTPS2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSS2SI64rr), 0,
- 1, MVT::i64, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTPS2DQrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2DQrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 52,
- OPC_CheckInteger, 51|128,5,
+ 89,
+ OPC_CheckInteger, 110|128,4,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTPD2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SIrr), 0,
- 1, MVT::i32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTPD2DQrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2DQrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 52,
- OPC_CheckInteger, 48|128,5,
+ 89,
+ OPC_CheckInteger, 121|128,4,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2PIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTPS2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2PIrr), 0,
- 1, MVT::v2i32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTPS2DQrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2DQrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 52,
- OPC_CheckInteger, 54|128,5,
+ 89,
+ OPC_CheckInteger, 120|128,4,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2PIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTPD2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2PIrr), 0,
- 1, MVT::v2i32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTTPD2DQrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2DQrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 55,
- OPC_CheckInteger, 47|128,5,
+ 49,
+ OPC_CheckInteger, 70|128,3,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 34,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 32,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTPS2DQYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i32, 5, 2, 3, 4, 5, 6,
+ 9,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTPS2DQYrr), 0,
+ 1, MVT::v8i32, 1, 0,
0,
- 52,
- OPC_CheckInteger, 55|128,5,
+ 49,
+ OPC_CheckInteger, 73|128,3,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 32,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSS2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTPD2DQYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 9,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSS2SIrr), 0,
- 1, MVT::i32, 1, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTPD2DQYrr), 0,
+ 1, MVT::v4i32, 1, 0,
0,
- 55,
- OPC_CheckInteger, 49|128,5,
+ 49,
+ OPC_CheckInteger, 74|128,3,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 34,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 32,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTPS2DQYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i32, 5, 2, 3, 4, 5, 6,
+ 9,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTPS2DQYrr), 0,
+ 1, MVT::v8i32, 1, 0,
0,
- 57,
- OPC_CheckInteger, 60|128,5,
+ 94,
+ OPC_CheckInteger, 39|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 58,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPSrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPSrr_Int), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 57,
- OPC_CheckInteger, 62|128,5,
+ 94,
+ OPC_CheckInteger, 51|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 58,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPSrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPSrr_Int), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
0,
- 54,
- OPC_CheckInteger, 72|128,5,
+ 49,
+ OPC_CheckInteger, 68|128,3,
OPC_MoveParent,
- OPC_Scope, 35,
+ OPC_Scope, 32,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTPD2DQYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 9,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPSr_Int), 0,
- 1, MVT::v4f32, 1, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTPD2DQYrr), 0,
+ 1, MVT::v4i32, 1, 0,
0,
- 54,
- OPC_CheckInteger, 69|128,5,
+ 89,
+ OPC_CheckInteger, 67|128,6,
OPC_MoveParent,
- OPC_Scope, 35,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPABSBrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSBrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTPSr_Int), 0,
- 1, MVT::v4f32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPABSBrr128), 0,
+ 1, MVT::v16i8, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSBrr128), 0,
+ 1, MVT::v16i8, 1, 0,
+ 0,
0,
- 54,
- OPC_CheckInteger, 67|128,5,
+ 89,
+ OPC_CheckInteger, 71|128,6,
OPC_MoveParent,
- OPC_Scope, 35,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPABSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPPSr_Int), 0,
- 1, MVT::v4f32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPABSWrr128), 0,
+ 1, MVT::v8i16, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSWrr128), 0,
+ 1, MVT::v8i16, 1, 0,
+ 0,
0,
- 52,
- OPC_CheckInteger, 3|128,4,
+ 89,
+ OPC_CheckInteger, 69|128,6,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPABSDrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSDrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SIrr), 0,
- 1, MVT::i32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPABSDrr128), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSDrr128), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 54,
- OPC_CheckInteger, 45|128,5,
+ 53,
+ OPC_CheckInteger, 66|128,6,
OPC_MoveParent,
- OPC_Scope, 35,
+ OPC_Scope, 34,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2PIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSBrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 5, 2, 3, 4, 5, 6,
11,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2PIrr), 0,
- 1, MVT::v2i32, 1, 0,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSBrr64), 0,
+ 1, MVT::v8i8, 1, 0,
0,
- 54,
- OPC_CheckInteger, 53|128,5,
+ 53,
+ OPC_CheckInteger, 70|128,6,
OPC_MoveParent,
- OPC_Scope, 35,
+ OPC_Scope, 34,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2PIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSWrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 5, 2, 3, 4, 5, 6,
11,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2PIrr), 0,
- 1, MVT::v2i32, 1, 0,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSWrr64), 0,
+ 1, MVT::v4i16, 1, 0,
0,
- 52,
- OPC_CheckInteger, 46|128,5,
+ 53,
+ OPC_CheckInteger, 68|128,6,
OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 34,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSDrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
11,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PDrr), 0,
- 1, MVT::v2f64, 1, 0,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PABSDrr64), 0,
+ 1, MVT::v2i32, 1, 0,
0,
- 52,
- OPC_CheckInteger, 11|128,4,
+ 94,
+ OPC_CheckInteger, 89|128,6,
OPC_MoveParent,
- OPC_Scope, 33,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_RecordChild1,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSD2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTSD2SIrr), 0,
- 1, MVT::i32, 1, 0,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFBrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFBrr128), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrr128), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 57,
- OPC_CheckInteger, 18|128,4,
+ 94,
+ OPC_CheckInteger, 92|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 58,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPDrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSIGNBrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNBrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPDrr_Int), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSIGNBrr128), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNBrr128), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 57,
- OPC_CheckInteger, 21|128,4,
+ 56,
+ OPC_CheckInteger, 88|128,6,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 35,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 11,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPDrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPDrr_Int), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrr64), 0,
+ 1, MVT::v8i8, 2, 0, 1,
0,
- 54,
- OPC_CheckInteger, 1|128,4,
+ 56,
+ OPC_CheckInteger, 91|128,6,
OPC_MoveParent,
+ OPC_RecordChild1,
OPC_Scope, 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2DQrr), 0,
- 1, MVT::v4i32, 1, 0,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNBrm64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSIGNBrr64), 0,
+ 1, MVT::v8i8, 2, 0, 1,
0,
- 54,
- OPC_CheckInteger, 10|128,4,
+ 89,
+ OPC_CheckInteger, 98|128,5,
OPC_MoveParent,
- OPC_Scope, 35,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 22,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHMINPOSUWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHMINPOSUWrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2DQrr), 0,
- 1, MVT::v4i32, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPHMINPOSUWrr128), 0,
+ 1, MVT::v8i16, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PHMINPOSUWrr128), 0,
+ 1, MVT::v8i16, 1, 0,
+ 0,
0,
- 54,
- OPC_CheckInteger, 127|128,3,
+ 27|128,1,
+ OPC_CheckInteger, 103|128,5,
OPC_MoveParent,
- OPC_Scope, 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 59,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2DQrr), 0,
- 1, MVT::v4i32, 1, 0,
- 0,
- 54,
- OPC_CheckInteger, 9|128,4,
- OPC_MoveParent,
- OPC_Scope, 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2DQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
- 11,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2DQrr), 0,
- 1, MVT::v4i32, 1, 0,
- 0,
- 52,
- OPC_CheckInteger, 2|128,4,
- OPC_MoveParent,
- OPC_Scope, 33,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 59,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2PDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_RecordChild2,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2PDrr), 0,
- 1, MVT::v2f64, 1, 0,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMINSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMINSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 54,
- OPC_CheckInteger, 0|128,4,
+ 27|128,1,
+ OPC_CheckInteger, 99|128,5,
OPC_MoveParent,
- OPC_Scope, 35,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2PSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 59,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2PSrr), 0,
- 1, MVT::v4f32, 1, 0,
- 0,
- 55,
- OPC_CheckInteger, 6|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 34,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
- 0,
- 55,
- OPC_CheckInteger, 5|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 34,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 59,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 28,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMAXSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMAXSBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 55,
- OPC_CheckInteger, 8|128,4,
+ 104,
+ OPC_CheckInteger, 92|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 34,
+ OPC_Scope, 63,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_RecordChild3,
+ OPC_Scope, 20,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPBLENDVBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 7, 0, 4, 5, 6, 7, 8, 3,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitCopyToReg, 3, X86::XMM0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PBLENDVBrm0), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 4, 5, 6, 7, 8,
+ 0,
+ 32,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_RecordChild3,
+ OPC_Scope, 12,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPBLENDVBrr), 0,
+ 1, MVT::v16i8, 3, 0, 1, 2,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitCopyToReg, 2, X86::XMM0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PBLENDVBrr0), 0|OPFL_FlagInput,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
0,
- 54,
- OPC_CheckInteger, 77|128,4,
+ 89,
+ OPC_CheckInteger, 57|128,3,
OPC_MoveParent,
- OPC_Scope, 35,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPDm_Int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
- 11,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 12,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESIMCrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 13,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESIMCrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPDr_Int), 0,
- 1, MVT::v2f64, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 12,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VAESIMCrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 13,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AESIMCrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
0,
- 57,
- OPC_CheckInteger, 90|128,4,
+ 54,
+ OPC_CheckInteger, 75|128,4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 33,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSUBPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSUBPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLWrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
0,
- 57,
- OPC_CheckInteger, 89|128,4,
+ 54,
+ OPC_CheckInteger, 73|128,4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 33,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSUBPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSUBPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLDrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
0,
- 57,
- OPC_CheckInteger, 92|128,4,
+ 54,
+ OPC_CheckInteger, 74|128,4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 33,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::HADDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::HADDPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLQrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
0,
- 57,
- OPC_CheckInteger, 91|128,4,
+ 54,
+ OPC_CheckInteger, 65|128,4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 33,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::HADDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::HADDPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLWrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
0,
- 57,
- OPC_CheckInteger, 94|128,4,
+ 54,
+ OPC_CheckInteger, 63|128,4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 33,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::HSUBPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::HSUBPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLDrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
0,
- 57,
- OPC_CheckInteger, 93|128,4,
+ 54,
+ OPC_CheckInteger, 64|128,4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 36,
+ OPC_Scope, 33,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::HSUBPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
OPC_CheckPatternPredicate, 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::HSUBPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
- 0,
- 96,
- OPC_CheckInteger, 9|128,5,
- OPC_MoveParent,
- OPC_Scope, 37,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLDrm_int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 37,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLDrm_int), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
- 13,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLDrr_int), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 0,
- 55,
- OPC_CheckInteger, 20|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 34,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32m8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 14,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32r8), 0,
- 1, MVT::i32, 2, 0, 1,
- 0,
- 55,
- OPC_CheckInteger, 17|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_Scope, 34,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32m16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 14,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32r16), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLQrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
0,
- 55,
- OPC_CheckInteger, 18|128,5,
+ 54,
+ OPC_CheckInteger, 70|128,4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 34,
+ OPC_Scope, 33,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32m32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRAWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 14,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC32r32), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRAWrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
0,
- 55,
- OPC_CheckInteger, 19|128,5,
+ 54,
+ OPC_CheckInteger, 69|128,4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 34,
+ OPC_Scope, 33,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 14,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC64m64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRADrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 14,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CRC64r64), 0,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 52,
- OPC_CheckInteger, 16|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_Scope, 22,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 8,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDSSm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 7, 0, 3, 4, 5, 6, 7, 9,
- 12,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDSSr_Int), 0,
- 1, MVT::v4f32, 3, 0, 1, 3,
- 0,
- 52,
- OPC_CheckInteger, 15|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_Scope, 22,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 8,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDSDm_Int), 0|OPFL_Chain,
- 1, MVT::v2f64, 7, 0, 3, 4, 5, 6, 7, 9,
- 12,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDSDr_Int), 0,
- 1, MVT::v2f64, 3, 0, 1, 3,
- 0,
- 40,
- OPC_CheckInteger, 36|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrr_Int), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRADrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
0,
- 40,
- OPC_CheckInteger, 66|128,5,
+ 42,
+ OPC_CheckInteger, 43|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrr_Int), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 0,
- 40,
- OPC_CheckInteger, 76|128,5,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSSrm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSSrr_Int), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLWri), 0,
+ 1, MVT::v8i16, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLWri), 0,
+ 1, MVT::v8i16, 2, 0, 2,
0,
- 40,
- OPC_CheckInteger, 57|128,5,
+ 42,
+ OPC_CheckInteger, 41|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSSrm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSSrr_Int), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 0,
- 40,
- OPC_CheckInteger, 61|128,5,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSSrm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSSrr_Int), 0,
- 1, MVT::v4f32, 2, 0, 1,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLDri), 0,
+ 1, MVT::v4i32, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDri), 0,
+ 1, MVT::v4i32, 2, 0, 2,
0,
- 40,
- OPC_CheckInteger, 63|128,5,
+ 42,
+ OPC_CheckInteger, 42|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/1, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSSrm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSSrr_Int), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 0,
- 37,
- OPC_CheckInteger, 73|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_Scope, 18,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitMergeInputChains, 1, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSSm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 5, 1, 2, 3, 4, 5,
- 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSSr_Int), 0,
- 1, MVT::v4f32, 1, 0,
- 0,
- 37,
- OPC_CheckInteger, 70|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_Scope, 18,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitMergeInputChains, 1, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTSSm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 5, 1, 2, 3, 4, 5,
- 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTSSr_Int), 0,
- 1, MVT::v4f32, 1, 0,
- 0,
- 37,
- OPC_CheckInteger, 68|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_Scope, 18,
- OPC_CheckComplexPat, /*CP*/1, /*#*/0,
- OPC_EmitMergeInputChains, 1, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPSSm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 5, 1, 2, 3, 4, 5,
- 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPSSr_Int), 0,
- 1, MVT::v4f32, 1, 0,
- 0,
- 40,
- OPC_CheckInteger, 115|128,3,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrm_Int), 0|OPFL_Chain,
- 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrr_Int), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLQri), 0,
+ 1, MVT::v2i64, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLQri), 0,
+ 1, MVT::v2i64, 2, 0, 2,
0,
- 40,
- OPC_CheckInteger, 27|128,4,
+ 42,
+ OPC_CheckInteger, 55|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrm_Int), 0|OPFL_Chain,
- 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrr_Int), 0,
- 1, MVT::v2f64, 2, 0, 1,
- 0,
- 40,
- OPC_CheckInteger, 82|128,4,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSDrm_Int), 0|OPFL_Chain,
- 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSDrr_Int), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLWri), 0,
+ 1, MVT::v8i16, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLWri), 0,
+ 1, MVT::v8i16, 2, 0, 2,
0,
- 40,
- OPC_CheckInteger, 13|128,4,
+ 42,
+ OPC_CheckInteger, 53|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSDrm_Int), 0|OPFL_Chain,
- 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSDrr_Int), 0,
- 1, MVT::v2f64, 2, 0, 1,
- 0,
- 40,
- OPC_CheckInteger, 19|128,4,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSDrm_Int), 0|OPFL_Chain,
- 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSDrr_Int), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLDri), 0,
+ 1, MVT::v4i32, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDri), 0,
+ 1, MVT::v4i32, 2, 0, 2,
0,
- 40,
- OPC_CheckInteger, 22|128,4,
+ 42,
+ OPC_CheckInteger, 54|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
- OPC_Scope, 19,
- OPC_CheckComplexPat, /*CP*/2, /*#*/1,
- OPC_EmitMergeInputChains, 1, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSDrm_Int), 0|OPFL_Chain,
- 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSDrr_Int), 0,
- 1, MVT::v2f64, 2, 0, 1,
- 0,
- 37,
- OPC_CheckInteger, 78|128,4,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_Scope, 18,
- OPC_CheckComplexPat, /*CP*/2, /*#*/0,
- OPC_EmitMergeInputChains, 1, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSDm_Int), 0|OPFL_Chain,
- 1, MVT::v2f64, 5, 1, 2, 3, 4, 5,
- 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSDr_Int), 0,
- 1, MVT::v2f64, 1, 0,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLQri), 0,
+ 1, MVT::v2i64, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLQri), 0,
+ 1, MVT::v2i64, 2, 0, 2,
0,
- 24,
- OPC_CheckInteger, 60|128,4,
+ 42,
+ OPC_CheckInteger, 47|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLWri), 0,
- 1, MVT::v8i16, 2, 0, 2,
- 24,
- OPC_CheckInteger, 58|128,4,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRAWri), 0,
+ 1, MVT::v8i16, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRAWri), 0,
+ 1, MVT::v8i16, 2, 0, 2,
+ 0,
+ 42,
+ OPC_CheckInteger, 46|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDri), 0,
- 1, MVT::v4i32, 2, 0, 2,
- 24,
- OPC_CheckInteger, 59|128,4,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRADri), 0,
+ 1, MVT::v4i32, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRADri), 0,
+ 1, MVT::v4i32, 2, 0, 2,
+ 0,
+ 48,
+ OPC_CheckInteger, 37|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLQri), 0,
- 1, MVT::v2i64, 2, 0, 2,
- 24,
- OPC_CheckInteger, 72|128,4,
+ OPC_Scope, 16,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 0,
+ 48,
+ OPC_CheckInteger, 49|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLWri), 0,
- 1, MVT::v8i16, 2, 0, 2,
- 24,
- OPC_CheckInteger, 70|128,4,
+ OPC_Scope, 16,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 0,
+ 42,
+ OPC_CheckInteger, 38|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDri), 0,
- 1, MVT::v4i32, 2, 0, 2,
- 24,
- OPC_CheckInteger, 71|128,4,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 2,
+ 0,
+ 42,
+ OPC_CheckInteger, 50|128,5,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLQri), 0,
- 1, MVT::v2i64, 2, 0, 2,
- 24,
- OPC_CheckInteger, 64|128,4,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 2,
+ 0,
+ 25,
+ OPC_CheckInteger, 123|128,3,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRAWri), 0,
- 1, MVT::v8i16, 2, 0, 2,
- 24,
- OPC_CheckInteger, 63|128,4,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VINSERTF128rr), 0,
+ 1, MVT::v8i32, 3, 0, 1, 3,
+ 23,
+ OPC_CheckInteger, 120|128,3,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRADri), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VEXTRACTF128rr), 0,
1, MVT::v4i32, 2, 0, 2,
- 27,
- OPC_CheckInteger, 54|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 5, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDQri), 0,
- 1, MVT::v2i64, 2, 0, 3,
- 27,
- OPC_CheckInteger, 66|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 5, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDQri), 0,
- 1, MVT::v2i64, 2, 0, 3,
- 24,
- OPC_CheckInteger, 55|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDQri), 0,
- 1, MVT::v2i64, 2, 0, 2,
- 24,
- OPC_CheckInteger, 67|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDQri), 0,
- 1, MVT::v2i64, 2, 0, 2,
- 24,
- OPC_CheckInteger, 105|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INSERTPSrr), 0,
- 1, MVT::v4f32, 3, 0, 1, 3,
- 24,
- OPC_CheckInteger, 110|128,3,
+ 25,
+ OPC_CheckInteger, 78|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLWri), 0,
1, MVT::v4i16, 2, 0, 2,
- 24,
- OPC_CheckInteger, 108|128,3,
+ 25,
+ OPC_CheckInteger, 76|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLDri), 0,
1, MVT::v2i32, 2, 0, 2,
- 24,
- OPC_CheckInteger, 109|128,3,
+ 25,
+ OPC_CheckInteger, 77|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLQri), 0,
1, MVT::v1i64, 2, 0, 2,
- 24,
- OPC_CheckInteger, 100|128,3,
+ 25,
+ OPC_CheckInteger, 68|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLWri), 0,
1, MVT::v4i16, 2, 0, 2,
- 24,
- OPC_CheckInteger, 98|128,3,
+ 25,
+ OPC_CheckInteger, 66|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLDri), 0,
1, MVT::v2i32, 2, 0, 2,
- 24,
- OPC_CheckInteger, 99|128,3,
+ 25,
+ OPC_CheckInteger, 67|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLQri), 0,
1, MVT::v1i64, 2, 0, 2,
- 24,
- OPC_CheckInteger, 104|128,3,
+ 25,
+ OPC_CheckInteger, 72|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRAWri), 0,
1, MVT::v4i16, 2, 0, 2,
- 24,
- OPC_CheckInteger, 103|128,3,
+ 25,
+ OPC_CheckInteger, 71|128,4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRADri), 0,
1, MVT::v2i32, 2, 0, 2,
- 15,
- OPC_CheckInteger, 64|128,5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVMSKPSrr), 0,
- 1, MVT::i32, 1, 0,
- 15,
- OPC_CheckInteger, 23|128,4,
+ 0|128,1,
+ OPC_CheckInteger, 82|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVMSKPDrr), 0,
- 1, MVT::i32, 1, 0,
- 15,
- OPC_CheckInteger, 48|128,4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMOVMSKBrr), 0,
- 1, MVT::i32, 1, 0,
- 15,
- OPC_CheckInteger, 90|128,3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMOVMSKBrr), 0,
- 1, MVT::i32, 1, 0,
- 0,
- 65|128,32, ISD::AND,
- OPC_Scope, 118|128,14,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 33|128,7, ISD::XOR,
- OPC_Scope, 45|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_Scope, 57,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 57,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 50,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 12|128,2,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 44|128,1, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_SwitchType , 41, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 41, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 41, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 37, MVT::v2i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 82, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_SwitchType , 37, MVT::v4i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 37, MVT::v8i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 0,
- 72|128,2,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 50, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 13|128,2, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 48|128,1, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_SwitchType , 42, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 42, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 42, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 38, MVT::v2i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 84, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_SwitchType , 38, MVT::v4i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 38, MVT::v8i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 0,
- 0,
- 75,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 75,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 0,
- 114|128,6, ISD::LOAD,
+ OPC_Scope, 79,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 34|128,3,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 39|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 124, ISD::BIT_CONVERT,
- OPC_Scope, 42,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 35,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 35, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 39,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 39,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 39,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 39,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 39,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 39,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 107|128,1,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 37,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 37,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v2i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 37,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 37,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 37,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v8i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 37,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v8i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 75,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 32,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 32,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 73,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 32,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 32,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 64,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_Scope, 26,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 0,
- 87, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_Scope, 41,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 22,
OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 41,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 40,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDPSrri), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDPSrri), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
0,
0,
- 46|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ 0|128,1,
+ OPC_CheckInteger, 81|128,5,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 79,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 6,
OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 3,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 80,
- OPC_CheckPredicate, 8,
- OPC_Scope, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 40,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDPDrri), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDPDrri), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
0,
0,
- 51|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ 84,
+ OPC_CheckInteger, 62|128,3,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 28,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 53,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v32i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDPSYrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDPSYrri), 0,
+ 1, MVT::v8f32, 3, 0, 1, 3,
+ 0,
+ 84,
+ OPC_CheckInteger, 61|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 53,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v32i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDPDYrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDPDYrri), 0,
+ 1, MVT::v4f64, 3, 0, 1, 3,
+ 0,
+ 82|128,1,
+ OPC_CheckInteger, 86|128,5,
+ OPC_MoveParent,
+ OPC_Scope, 80,
OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 80,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 83,
- OPC_CheckPredicate, 8,
- OPC_Scope, 24,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 25,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 2, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 2, 4, 5, 6, 7, 8, 9,
0,
- 0,
- 16|128,1,
- OPC_CheckAndImm, 127|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_CheckPredicate, 15,
- OPC_MoveParent,
- OPC_SwitchType , 72, MVT::i32,
- OPC_Scope, 34,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i32, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
- 1, MVT::i32, 1, 4,
- 34,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i32, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
- 1, MVT::i32, 1, 4,
+ 41,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDPPSrri), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPSrri), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
0,
- 48, MVT::i64,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitInteger, MVT::i32, X86::GR64_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i64, 2, 0, 2,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 3, 4,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
- 1, MVT::i32, 1, 5,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
- 1, MVT::i64, 3, 1, 6, 7,
0,
- 73|128,2,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 126|128,1, ISD::XOR,
- OPC_Scope, 74,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 33, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 33, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 0,
- 16|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 93, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_SwitchType , 32, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 11,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 0,
- 17, MVT::v8i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 17, MVT::v16i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 15, MVT::v2i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 0,
- 38, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_SwitchType , 15, MVT::v4i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 15, MVT::v8i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 0,
- 0,
- 30,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
+ 82|128,1,
+ OPC_CheckInteger, 85|128,5,
+ OPC_MoveParent,
+ OPC_Scope, 80,
+ OPC_RecordChild1,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 22,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
0,
- 66, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
+ 80,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 25, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
- 25, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 2, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 2, 4, 5, 6, 7, 8, 9,
+ 0,
+ 41,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDPPDrri), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DPPDrri), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
0,
0,
- 67,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ 84,
+ OPC_CheckInteger, 75|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 53,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v32i8,
OPC_MoveParent,
OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDPPSYrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDPPSYrri), 0,
+ 1, MVT::v8f32, 3, 0, 1, 3,
0,
- 34,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
+ 97,
+ OPC_CheckInteger, 109|128,4,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 67,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 29,
- OPC_RecordChild0,
+ OPC_Scope, 64,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTDQ2PSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTDQ2PSrr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PSrr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 0,
+ 0,
+ 97,
+ OPC_CheckInteger, 108|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 64,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v4i32,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTDQ2PDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
OPC_RecordChild1,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTDQ2PDrr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PDrr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 0,
0,
- 34,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v8i16,
- OPC_MoveParent,
+ 118,
+ OPC_CheckInteger, 21|128,6,
OPC_MoveParent,
OPC_RecordChild1,
+ OPC_Scope, 69,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCMPSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CMPSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 40,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCMPSSrr), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 1,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CMPSSrr), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 0,
+ 0,
+ 118,
+ OPC_CheckInteger, 101|128,4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 67,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v8i16,
+ OPC_Scope, 69,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCMPSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CMPSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 40,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v8i16,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCMPSDrr), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CMPSDrr), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
+ 0,
+ 0,
+ 120,
+ OPC_CheckInteger, 20|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild1,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCMPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 40,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCMPPSrri), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 1,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPSrri), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 0,
0,
- 34,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v16i8,
+ 120,
+ OPC_CheckInteger, 100|128,4,
OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCMPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 40,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCMPPDrri), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPDrri), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
+ 0,
+ 0,
+ 76,
+ OPC_CheckInteger, 66|128,3,
OPC_MoveParent,
OPC_RecordChild1,
+ OPC_Scope, 45,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCMPPSYrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCMPPSYrri), 0,
+ 1, MVT::v8f32, 3, 0, 1, 3,
+ 0,
+ 76,
+ OPC_CheckInteger, 65|128,3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 67,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 29,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v16i8,
+ OPC_Scope, 45,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCMPPDYrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v16i8,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCMPPDYrri), 0,
+ 1, MVT::v4f64, 3, 0, 1, 3,
+ 0,
+ 115,
+ OPC_CheckInteger, 124|128,5,
+ OPC_MoveParent,
+ OPC_Scope, 69,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 3, 4, 5, 6, 7, 8,
+ 21,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 38,
OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDPSr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDPSr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 2,
+ 0,
0,
- 32,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v2i32,
+ 115,
+ OPC_CheckInteger, 123|128,5,
OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 63,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 27,
- OPC_RecordChild0,
+ OPC_Scope, 69,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v2i32,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDPDm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 3, 4, 5, 6, 7, 8,
+ 21,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDPDm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 38,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 1, 0,
- 27,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_CheckType, MVT::v2i32,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDPDr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDPDr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 2,
+ 0,
+ 0,
+ 73,
+ OPC_CheckInteger, 106|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 44,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDYPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 3, 4, 5, 6, 7, 8,
+ 21,
OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 1, 0,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDYPSr_Int), 0,
+ 1, MVT::v8f32, 2, 0, 2,
0,
- 32,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v4i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
+ 73,
+ OPC_CheckInteger, 105|128,3,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 63,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 27,
- OPC_RecordChild0,
+ OPC_Scope, 44,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v4i16,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDYPDm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 3, 4, 5, 6, 7, 8,
+ 21,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 1, 0,
- 27,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDYPDr_Int), 0,
+ 1, MVT::v4f64, 2, 0, 2,
+ 0,
+ 112,
+ OPC_CheckInteger, 83|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v4i16,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_Scope, 20,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDVPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 3,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitCopyToReg, 3, X86::XMM0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDVPDrm0), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 4, 5, 6, 7, 8,
+ 0,
+ 32,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_Scope, 12,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDVPDrr), 0,
+ 1, MVT::v2f64, 3, 0, 1, 2,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitCopyToReg, 2, X86::XMM0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDVPDrr0), 0|OPFL_FlagInput,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 0,
+ 112,
+ OPC_CheckInteger, 84|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 71,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v16i8,
OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 1, 0,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_Scope, 20,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDVPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 3,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitCopyToReg, 3, X86::XMM0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDVPSrm0), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 4, 5, 6, 7, 8,
+ 0,
+ 32,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_Scope, 12,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDVPSrr), 0,
+ 1, MVT::v4f32, 3, 0, 1, 2,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitCopyToReg, 2, X86::XMM0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BLENDVPSrr0), 0|OPFL_FlagInput,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
0,
- 32,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v8i8,
- OPC_MoveParent,
+ 68,
+ OPC_CheckInteger, 63|128,3,
OPC_MoveParent,
OPC_RecordChild1,
+ OPC_Scope, 45,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v32i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDVPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 7, 0, 4, 5, 6, 7, 8, 3,
+ 14,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDVPDYrr), 0,
+ 1, MVT::v4f64, 3, 0, 1, 2,
+ 0,
+ 68,
+ OPC_CheckInteger, 64|128,3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 63,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 27,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_Scope, 45,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v8i8,
- OPC_MoveParent,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v32i8,
OPC_MoveParent,
OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 1, 0,
- 27,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_CheckPredicate, 68,
- OPC_CheckType, MVT::v8i8,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDVPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 7, 0, 4, 5, 6, 7, 8, 3,
+ 14,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBLENDVPSYrr), 0,
+ 1, MVT::v8f32, 3, 0, 1, 2,
+ 0,
+ 73,
+ OPC_CheckInteger, 1|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 44,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPSmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 3, 4, 5, 6, 7, 8,
+ 21,
OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 1, 0,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPSri), 0,
+ 1, MVT::v4f32, 2, 0, 2,
0,
- 69,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 38,
- OPC_RecordChild0,
+ 73,
+ OPC_CheckInteger, 2|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 44,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPSYmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 3, 4, 5, 6, 7, 8,
+ 21,
OPC_RecordChild1,
- OPC_SwitchType , 11, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 11, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 0,
- 23,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPSYri), 0,
+ 1, MVT::v8f32, 2, 0, 2,
+ 0,
+ 73,
+ OPC_CheckInteger, 127|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 44,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPDmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 3, 4, 5, 6, 7, 8,
+ 21,
OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPDri), 0,
+ 1, MVT::v2f64, 2, 0, 2,
0,
- 53,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 22,
- OPC_RecordChild0,
+ 73,
+ OPC_CheckInteger, 0|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 44,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
- OPC_MoveParent,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
- 22,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPDYmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 3, 4, 5, 6, 7, 8,
+ 21,
OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
- 1, MVT::v2i64, 2, 1, 0,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPDYri), 0,
+ 1, MVT::v4f64, 2, 0, 2,
0,
- 27,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::XOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
+ 72,
+ OPC_CheckInteger, 125|128,3,
OPC_MoveParent,
OPC_RecordChild1,
+ OPC_Scope, 43,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERM2F128rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 20,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERM2F128rr), 0,
+ 1, MVT::v8f32, 3, 0, 1, 3,
+ 0,
+ 72,
+ OPC_CheckInteger, 124|128,3,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
- 53,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::XOR,
- OPC_Scope, 22,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
+ OPC_Scope, 43,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 1, 0,
- 22,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BUILD_VECTOR,
- OPC_CheckPredicate, 67,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERM2F128rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 20,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERM2F128rr), 0,
+ 1, MVT::v4f64, 3, 0, 1, 3,
+ 0,
+ 92,
+ OPC_CheckInteger, 32|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 56,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
- 1, MVT::v1i64, 2, 1, 0,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSI2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSI2SSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
0,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
+ 94,
+ OPC_CheckInteger, 33|128,6,
OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
+ OPC_RecordChild1,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSI2SS64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SS64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSI2SS64rr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SS64rr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 0,
+ 92,
+ OPC_CheckInteger, 117|128,4,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPDrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 27,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i32,
- OPC_EmitInteger, MVT::i32, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr16), 0,
- 1, MVT::i32, 1, 2,
- 29,
- OPC_CheckAndImm, 127|128,127|128,127|128,127|128,15,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i64,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i32, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr32), 0,
- 1, MVT::i64, 1, 2,
- 27,
- OPC_CheckAndImm, 127|128,127|128,3,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i64,
- OPC_EmitInteger, MVT::i32, 3,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr16), 0,
- 1, MVT::i64, 1, 2,
- 24|128,1,
- OPC_CheckAndImm, 127|128,1,
- OPC_RecordChild0,
- OPC_SwitchType , 20, MVT::i64,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr8), 0,
- 1, MVT::i64, 1, 2,
- 60, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
- 1, MVT::i32, 1, 2,
- 34,
+ OPC_RecordChild1,
+ OPC_Scope, 56,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSI2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i32, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
- 1, MVT::i32, 1, 4,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 60, MVT::i16,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rr8), 0,
- 1, MVT::i16, 1, 2,
- 34,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSI2SDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rr8), 0,
- 1, MVT::i16, 1, 4,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
0,
0,
- 11|128,2,
- OPC_RecordChild0,
+ 94,
+ OPC_CheckInteger, 118|128,4,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 38|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 47,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16ri8), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32ri8), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64ri8), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 0,
- 17,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64ri32), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 47,
- OPC_CheckPredicate, 69,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i32, 2, 0, 3,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 6, 5,
- OPC_EmitNode, TARGET_OPCODE(X86::AND32ri), 0,
- 1, MVT::i32, 2, 4, 6,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
- 1, MVT::i64, 3, 2, 7, 8,
- 45,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8ri), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 2,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16ri), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32ri), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 0,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSI2SD64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SD64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSI2SD64rr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSI2SD64rr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 0,
+ 51,
+ OPC_CheckInteger, 29|128,6,
+ OPC_MoveParent,
+ OPC_Scope, 32,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 11,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PDrr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 0,
+ 54,
+ OPC_CheckInteger, 30|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 33,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
12,
- OPC_CheckType, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rr), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rr), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rr), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rr), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 1,
- 28,
- OPC_CheckType, MVT::v2i64,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 92,
+ OPC_CheckInteger, 116|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 56,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSD2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
OPC_Scope, 11,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPSrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSD2SSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
11,
OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSD2SSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
0,
- 13,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
0,
- 0,
- 42|128,12, X86ISD::CMP,
- OPC_Scope, 23|128,6,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 76|128,3, ISD::AND,
- OPC_Scope, 39|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 76,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 32,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64mi32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 30,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 0,
- 38,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 38,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 0,
- 13|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 32,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 30,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 30,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 32,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 16|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 33,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 31,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 31,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 33,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 0,
+ 92,
+ OPC_CheckInteger, 119|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 56,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSS2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTSS2SDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTSS2SDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
0,
- 65|128,2, ISD::LOAD,
+ 0,
+ 87,
+ OPC_CheckInteger, 113|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 54,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 34,
- OPC_CheckPredicate, 6,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 96,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_SwitchType , 58, MVT::i64,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 24,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64mi8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 24,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64mi32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 0,
- 28, MVT::i8,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 0,
- 32,
- OPC_CheckPredicate, 6,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 32,
- OPC_CheckPredicate, 3,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32mi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_RecordChild1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTPS2PDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 25,
- OPC_CheckPredicate, 6,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_RecordChild1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2PDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTPS2PDrr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPS2PDrr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 0,
+ 0,
+ 89,
+ OPC_CheckInteger, 111|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 25,
- OPC_CheckPredicate, 3,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTPD2PSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2PSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCVTPD2PSrr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPD2PSrr), 0,
+ 1, MVT::v4f32, 1, 0,
0,
0,
- 110,
- OPC_RecordChild0,
- OPC_Scope, 36,
- OPC_CheckChild0Type, MVT::i8,
+ 49,
+ OPC_CheckInteger, 72|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 32,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 34,
- OPC_CheckChild0Type, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTDQ2PSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 9,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTDQ2PSYrr), 0,
+ 1, MVT::v8f32, 1, 0,
+ 0,
+ 49,
+ OPC_CheckInteger, 67|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 32,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 34,
- OPC_CheckChild0Type, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTPD2PSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 9,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTPD2PSYrr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 0,
+ 49,
+ OPC_CheckInteger, 69|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 32,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTPS2PDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 5, 2, 3, 4, 5, 6,
+ 9,
+ OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTPS2PDYrr), 0,
+ 1, MVT::v4f64, 1, 0,
0,
- 37,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ 94,
+ OPC_CheckInteger, 43|128,6,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::i64,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPSrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPSrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 0,
+ 94,
+ OPC_CheckInteger, 1|128,5,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64mr), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 118,
- OPC_RecordChild0,
- OPC_Scope, 36,
- OPC_CheckChild0Type, MVT::i64,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPDrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPDrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 0,
+ 56,
+ OPC_CheckInteger, 93|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 38,
- OPC_CheckChild0Type, MVT::f32,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPSYrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPSYrr_Int), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckInteger, 92|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOMISSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 38,
- OPC_CheckChild0Type, MVT::f64,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPDYrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPDYrr_Int), 0,
+ 1, MVT::v4f64, 2, 0, 1,
+ 0,
+ 94,
+ OPC_CheckInteger, 45|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPSrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPSrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 0,
+ 94,
+ OPC_CheckInteger, 4|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPDrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPDrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 0,
+ 56,
+ OPC_CheckInteger, 95|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOMISDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPSYrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPSYrr_Int), 0,
+ 1, MVT::v8f32, 2, 0, 1,
0,
- 47|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::AND,
- OPC_RecordChild0,
+ 56,
+ OPC_CheckInteger, 94|128,3,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_Scope, 92,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPDYrm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPDYrr_Int), 0,
+ 1, MVT::v4f64, 2, 0, 1,
+ 0,
+ 89,
+ OPC_CheckInteger, 55|128,6,
+ OPC_MoveParent,
+ OPC_Scope, 56,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 62,
- OPC_MoveParent,
- OPC_CheckPredicate, 70,
- OPC_SwitchType , 17, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 17, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 17, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 0,
- 22,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64ri32), 0,
- 1, MVT::i32, 2, 0, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
0,
- 55,
- OPC_CheckPredicate, 70,
- OPC_SwitchType , 15, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8rr), 0,
- 1, MVT::i32, 2, 0, 1,
- 15, MVT::i16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16rr), 0,
- 1, MVT::i32, 2, 0, 1,
- 15, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32rr), 0,
- 1, MVT::i32, 2, 0, 1,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
0,
- 17,
- OPC_CheckType, MVT::i64,
+ 0,
+ 89,
+ OPC_CheckInteger, 60|128,5,
+ OPC_MoveParent,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPDm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPDm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPDr_Int), 0,
+ 1, MVT::v2f64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPDr_Int), 0,
+ 1, MVT::v2f64, 1, 0,
+ 0,
+ 0,
+ 53,
+ OPC_CheckInteger, 109|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 34,
OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64rr), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPSYm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 11,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPSYr_Int), 0,
+ 1, MVT::v8f32, 1, 0,
0,
- 80|128,2,
- OPC_RecordChild0,
- OPC_Scope, 50,
- OPC_CheckChild0Type, MVT::i8,
- OPC_Scope, 14,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST8rr), 0,
- 1, MVT::i32, 2, 0, 0,
- 30,
- OPC_RecordChild1,
- OPC_Scope, 16,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP8rr), 0,
- 1, MVT::i32, 2, 0, 1,
- 0,
+ 53,
+ OPC_CheckInteger, 108|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 34,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPDYm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 5, 2, 3, 4, 5, 6,
+ 11,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPDYr_Int), 0,
+ 1, MVT::v4f64, 1, 0,
+ 0,
+ 53,
+ OPC_CheckInteger, 107|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 34,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTPSYm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 11,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTPSYr_Int), 0,
+ 1, MVT::v8f32, 1, 0,
+ 0,
+ 89,
+ OPC_CheckInteger, 52|128,6,
+ OPC_MoveParent,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
0,
- 68,
- OPC_CheckChild0Type, MVT::i16,
- OPC_Scope, 14,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST16rr), 0,
- 1, MVT::i32, 2, 0, 0,
- 48,
- OPC_RecordChild1,
- OPC_Scope, 34,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 14,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16ri8), 0,
- 1, MVT::i32, 2, 0, 2,
- 12,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 0,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP16rr), 0,
- 1, MVT::i32, 2, 0, 1,
- 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTPSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTPSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
0,
- 68,
- OPC_CheckChild0Type, MVT::i32,
- OPC_Scope, 14,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST32rr), 0,
- 1, MVT::i32, 2, 0, 0,
- 48,
- OPC_RecordChild1,
- OPC_Scope, 34,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 14,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32ri8), 0,
- 1, MVT::i32, 2, 0, 2,
- 12,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 0,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP32rr), 0,
- 1, MVT::i32, 2, 0, 1,
- 0,
+ 0,
+ 53,
+ OPC_CheckInteger, 104|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 34,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPPSYm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 11,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPPSYr_Int), 0,
+ 1, MVT::v8f32, 1, 0,
+ 0,
+ 89,
+ OPC_CheckInteger, 50|128,6,
+ OPC_MoveParent,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPPSm_Int), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
0,
- 70,
- OPC_CheckChild0Type, MVT::i64,
- OPC_Scope, 14,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TEST64rr), 0,
- 1, MVT::i32, 2, 0, 0,
- 50,
- OPC_RecordChild1,
- OPC_Scope, 36,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 14,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64ri8), 0,
- 1, MVT::i32, 2, 0, 2,
- 14,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64ri32), 0,
- 1, MVT::i32, 2, 0, 2,
- 0,
- 9,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMP64rr), 0,
- 1, MVT::i32, 2, 0, 1,
- 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPPSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPPSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
0,
- 29,
- OPC_CheckChild0Type, MVT::f32,
+ 0,
+ 49,
+ OPC_CheckInteger, 71|128,3,
+ OPC_MoveParent,
+ OPC_Scope, 32,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTDQ2PDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 5, 2, 3, 4, 5, 6,
+ 9,
OPC_RecordChild1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTDQ2PDYrr), 0,
+ 1, MVT::v4f64, 1, 0,
+ 0,
+ 94,
+ OPC_CheckInteger, 73|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSUBPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSUBPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOM_FpIr32), 0,
- 1, MVT::i32, 2, 0, 1,
- 11,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOMISSrr), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSUBPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSUBPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
0,
- 29,
- OPC_CheckChild0Type, MVT::f64,
+ 0,
+ 94,
+ OPC_CheckInteger, 72|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSUBPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSUBPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOM_FpIr64), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSUBPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
11,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOMISDrr), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSUBPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
0,
+ 0,
+ 56,
+ OPC_CheckInteger, 60|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSUBPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
12,
- OPC_CheckChild0Type, MVT::f80,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSUBPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckInteger, 59|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::UCOM_FpIr80), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSUBPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSUBPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
0,
- 0,
- 78, X86ISD::INSERTPS,
- OPC_RecordChild0,
- OPC_Scope, 52,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ 94,
+ OPC_CheckInteger, 75|128,5,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::f32,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHADDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::HADDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHADDPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::HADDPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 0,
+ 94,
+ OPC_CheckInteger, 74|128,5,
OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHADDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::HADDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHADDPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::HADDPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 0,
+ 94,
+ OPC_CheckInteger, 77|128,5,
OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_RecordChild1,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHSUBPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::HSUBPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHSUBPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::HSUBPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 0,
+ 94,
+ OPC_CheckInteger, 76|128,5,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INSERTPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_Scope, 58,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHSUBPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::HSUBPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild2,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHSUBPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::HSUBPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 0,
+ 56,
+ OPC_CheckInteger, 77|128,3,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INSERTPSrr), 0,
- 1, MVT::v4f32, 3, 0, 1, 3,
- 0,
- 76, X86ISD::MMX_PINSRW,
- OPC_RecordChild0,
- OPC_Scope, 50,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::ANY_EXTEND,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 6,
- OPC_CheckType, MVT::i16,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHADDPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHADDPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckInteger, 76|128,3,
OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHADDPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHADDPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckInteger, 79|128,3,
OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHSUBPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHSUBPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckInteger, 78|128,3,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PINSRWrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHSUBPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VHSUBPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckInteger, 5|128,4,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PINSRWrri), 0,
- 1, MVT::v4i16, 3, 0, 1, 3,
- 0,
- 88, X86ISD::MOVQ2DQ,
- OPC_Scope, 75,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 41, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SCALAR_TO_VECTOR,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
- OPC_CheckType, MVT::i32,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckInteger, 6|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckInteger, 3|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
- 26, ISD::LOAD,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckInteger, 4|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 35,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPERMILPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
0,
- 9,
- OPC_RecordChild0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
- 1, MVT::v2i64, 1, 0,
+ 95,
+ OPC_CheckInteger, 126|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 24,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 8,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDSSm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 7, 0, 3, 4, 5, 6, 7, 9,
+ 24,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 8,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDSSm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 7, 0, 3, 4, 5, 6, 7, 9,
+ 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDSSr_Int), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDSSr_Int), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 0,
+ 95,
+ OPC_CheckInteger, 125|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 24,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 8,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDSDm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 7, 0, 3, 4, 5, 6, 7, 9,
+ 24,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 8,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDSDm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 7, 0, 3, 4, 5, 6, 7, 9,
+ 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VROUNDSDr_Int), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROUNDSDr_Int), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
+ 0,
+ 76,
+ OPC_CheckInteger, 19|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 98|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 49|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 10|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 59|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 65|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 40|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 124|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 44|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 2|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 46|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSSrr_Int), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 76,
+ OPC_CheckInteger, 5|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/1,
+ OPC_EmitMergeInputChains, 1, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 0, 2, 3, 4, 5, 6,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSDrr_Int), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 71,
+ OPC_CheckInteger, 56|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 20,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/0,
+ OPC_EmitMergeInputChains, 1, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTSSm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 5, 1, 2, 3, 4, 5,
+ 20,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/0,
+ OPC_EmitMergeInputChains, 1, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSSm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 5, 1, 2, 3, 4, 5,
+ 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTSSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
+ 0,
+ 71,
+ OPC_CheckInteger, 61|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 20,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/0,
+ OPC_EmitMergeInputChains, 1, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTSDm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 5, 1, 2, 3, 4, 5,
+ 20,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/2, /*#*/0,
+ OPC_EmitMergeInputChains, 1, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSDm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 5, 1, 2, 3, 4, 5,
+ 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTSDr_Int), 0,
+ 1, MVT::v2f64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSDr_Int), 0,
+ 1, MVT::v2f64, 1, 0,
+ 0,
+ 71,
+ OPC_CheckInteger, 53|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 20,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/0,
+ OPC_EmitMergeInputChains, 1, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTSSm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 5, 1, 2, 3, 4, 5,
+ 20,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/0,
+ OPC_EmitMergeInputChains, 1, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTSSm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 5, 1, 2, 3, 4, 5,
+ 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTSSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTSSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
+ 0,
+ 71,
+ OPC_CheckInteger, 51|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_Scope, 20,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/0,
+ OPC_EmitMergeInputChains, 1, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPSSm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 5, 1, 2, 3, 4, 5,
+ 20,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/1, /*#*/0,
+ OPC_EmitMergeInputChains, 1, 6,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPSSm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 5, 1, 2, 3, 4, 5,
+ 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPSSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPSSr_Int), 0,
+ 1, MVT::v4f32, 1, 0,
+ 0,
+ 45,
+ OPC_CheckInteger, 88|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VINSERTPSrr), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INSERTPSrr), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 0,
+ 25,
+ OPC_CheckInteger, 121|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VINSERTF128rr), 0,
+ 1, MVT::v4f64, 3, 0, 1, 3,
+ 25,
+ OPC_CheckInteger, 122|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_MoveChild, 3,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VINSERTF128rr), 0,
+ 1, MVT::v8f32, 3, 0, 1, 3,
+ 23,
+ OPC_CheckInteger, 118|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VEXTRACTF128rr), 0,
+ 1, MVT::v2f64, 2, 0, 2,
+ 23,
+ OPC_CheckInteger, 119|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VEXTRACTF128rr), 0,
+ 1, MVT::v4f32, 2, 0, 2,
0,
- 25|128,35, X86ISD::CMOV,
- OPC_Scope, 77|128,8,
+ 32|128,36, TARGET_OPCODE(X86ISD::CMOV),
+ OPC_Scope, 78|128,8,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
OPC_Scope, 33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 2,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 2,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 4,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 4,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 9,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 9,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 3,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 3,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 0,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVA16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 0,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVA32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 7,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVL16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 7,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVL32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 6,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVGE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 6,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVGE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 8,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVLE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 8,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVLE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 5,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVG16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 5,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVG32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 15,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVS16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 15,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVS32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 12,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNS16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 12,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNS32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 14,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 14,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 11,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 11,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 13,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVO16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 13,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVO32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 10,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNO16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 0, 4, 5, 6, 7, 8,
33,
- OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_MoveChild, 2,
OPC_CheckInteger, 10,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNO32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 0, 4, 5, 6, 7, 8,
0,
- 108|128,8,
+ 45|128,8,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 34,
- OPC_CheckPredicate, 6,
+ OPC_Scope, 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21409,12 +18397,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21423,12 +18411,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21437,12 +18425,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21451,12 +18439,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21465,12 +18453,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21479,12 +18467,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21493,12 +18481,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21507,12 +18495,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21521,12 +18509,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVA16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21535,12 +18523,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVA32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21549,12 +18537,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21563,12 +18551,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21577,12 +18565,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVGE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21591,12 +18579,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVGE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21605,12 +18593,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVL16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21619,12 +18607,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVL32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21633,12 +18621,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVG16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21647,12 +18635,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVG32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21661,12 +18649,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVLE16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21675,12 +18663,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVLE32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21689,12 +18677,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21703,12 +18691,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21717,12 +18705,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21731,12 +18719,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21745,12 +18733,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNS16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21759,12 +18747,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNS32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21773,12 +18761,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVS16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21787,12 +18775,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVS32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21801,12 +18789,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNO16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21815,12 +18803,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNO32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21829,12 +18817,12 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVO16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i16, 6, 2, 4, 5, 6, 7, 8,
- 34,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
@@ -21843,387 +18831,375 @@ SDNode *SelectCode(SDNode *N) {
OPC_RecordChild3,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVO32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i32, 6, 2, 4, 5, 6, 7, 8,
0,
- 100|128,3,
+ 69|128,3,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_MoveChild, 2,
- OPC_Scope, 28,
+ OPC_Scope, 26,
OPC_CheckInteger, 2,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 4,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 9,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 3,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 0,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVA64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 7,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVL64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 6,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVGE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 8,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVLE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 5,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVG64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 15,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVS64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 12,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNS64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 14,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 11,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 13,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVO64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 10,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNO64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 0, 4, 5, 6, 7, 8,
0,
- 100|128,3,
+ 69|128,3,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_Scope, 28,
+ OPC_Scope, 26,
OPC_CheckInteger, 2,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 4,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 9,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 3,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVA64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 0,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 7,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVGE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 6,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVL64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 8,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVG64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 5,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVLE64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 14,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 11,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 15,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNS64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 12,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVS64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 13,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNO64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
- 28,
+ 26,
OPC_CheckInteger, 10,
OPC_MoveParent,
OPC_RecordChild3,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVO64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
1, MVT::i64, 6, 2, 4, 5, 6, 7, 8,
0,
- 12|128,10,
+ 15|128,12,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 4|128,9,
+ OPC_Scope, 84|128,6,
OPC_MoveChild, 2,
- OPC_Scope, 94,
+ OPC_Scope, 52,
OPC_CheckInteger, 2,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 14, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB_Fp32), 0|OPFL_FlagInput,
- 1, MVT::f32, 2, 0, 1,
- 14, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB_Fp64), 0|OPFL_FlagInput,
- 1, MVT::f64, 2, 0, 1,
- 12, MVT::f80,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB_Fp80), 0|OPFL_FlagInput,
- 1, MVT::f80, 2, 0, 1,
- 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22232,60 +19208,36 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 94,
- OPC_CheckInteger, 3,
+ 52,
+ OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 14, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE_Fp32), 0|OPFL_FlagInput,
- 1, MVT::f32, 2, 0, 1,
- 14, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE_Fp64), 0|OPFL_FlagInput,
- 1, MVT::f64, 2, 0, 1,
- 12, MVT::f80,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE_Fp80), 0|OPFL_FlagInput,
- 1, MVT::f80, 2, 0, 1,
- 12, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE16rr), 0|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE32rr), 0|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
12, MVT::i64,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE64rr), 0|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 94,
+ 52,
OPC_CheckInteger, 4,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 14, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE_Fp32), 0|OPFL_FlagInput,
- 1, MVT::f32, 2, 0, 1,
- 14, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE_Fp64), 0|OPFL_FlagInput,
- 1, MVT::f64, 2, 0, 1,
- 12, MVT::f80,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE_Fp80), 0|OPFL_FlagInput,
- 1, MVT::f80, 2, 0, 1,
- 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22294,91 +19246,55 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 94,
- OPC_CheckInteger, 14,
+ 52,
+ OPC_CheckInteger, 9,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 14, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP_Fp32), 0|OPFL_FlagInput,
- 1, MVT::f32, 2, 0, 1,
- 14, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP_Fp64), 0|OPFL_FlagInput,
- 1, MVT::f64, 2, 0, 1,
- 12, MVT::f80,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP_Fp80), 0|OPFL_FlagInput,
- 1, MVT::f80, 2, 0, 1,
- 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP16rr), 0|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP32rr), 0|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
12, MVT::i64,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP64rr), 0|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 94,
- OPC_CheckInteger, 1,
+ 52,
+ OPC_CheckInteger, 3,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 14, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNB_Fp32), 0|OPFL_FlagInput,
- 1, MVT::f32, 2, 0, 1,
- 14, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNB_Fp64), 0|OPFL_FlagInput,
- 1, MVT::f64, 2, 0, 1,
- 12, MVT::f80,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNB_Fp80), 0|OPFL_FlagInput,
- 1, MVT::f80, 2, 0, 1,
- 12, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE16rr), 0|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE32rr), 0|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
12, MVT::i64,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVAE64rr), 0|OPFL_FlagInput,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 94,
+ 52,
OPC_CheckInteger, 0,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 14, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNBE_Fp32), 0|OPFL_FlagInput,
- 1, MVT::f32, 2, 0, 1,
- 14, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNBE_Fp64), 0|OPFL_FlagInput,
- 1, MVT::f64, 2, 0, 1,
- 12, MVT::f80,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNBE_Fp80), 0|OPFL_FlagInput,
- 1, MVT::f80, 2, 0, 1,
- 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVA16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVA32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22387,77 +19303,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVA64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 94,
- OPC_CheckInteger, 9,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_SwitchType , 14, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE_Fp32), 0|OPFL_FlagInput,
- 1, MVT::f32, 2, 0, 1,
- 14, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE_Fp64), 0|OPFL_FlagInput,
- 1, MVT::f64, 2, 0, 1,
- 12, MVT::f80,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE_Fp80), 0|OPFL_FlagInput,
- 1, MVT::f80, 2, 0, 1,
- 12, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE16rr), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE32rr), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 0, 1,
- 12, MVT::i64,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE64rr), 0|OPFL_FlagInput,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 94,
- OPC_CheckInteger, 11,
- OPC_MoveParent,
- OPC_RecordChild3,
- OPC_SwitchType , 14, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP_Fp32), 0|OPFL_FlagInput,
- 1, MVT::f32, 2, 0, 1,
- 14, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP_Fp64), 0|OPFL_FlagInput,
- 1, MVT::f64, 2, 0, 1,
- 12, MVT::f80,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP_Fp80), 0|OPFL_FlagInput,
- 1, MVT::f80, 2, 0, 1,
- 12, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP16rr), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP32rr), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 0, 1,
- 12, MVT::i64,
- OPC_EmitCopyToReg, 2, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP64rr), 0|OPFL_FlagInput,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 48,
+ 52,
OPC_CheckInteger, 7,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVL16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVL32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22466,15 +19322,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVL64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 48,
+ 52,
OPC_CheckInteger, 6,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVGE16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVGE32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22483,15 +19341,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVGE64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 48,
+ 52,
OPC_CheckInteger, 8,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVLE16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVLE32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22500,15 +19360,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVLE64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 48,
+ 52,
OPC_CheckInteger, 5,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVG16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVG32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22517,15 +19379,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVG64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 48,
+ 52,
OPC_CheckInteger, 15,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVS16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVS32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22534,15 +19398,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVS64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 48,
+ 52,
OPC_CheckInteger, 12,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNS16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNS32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22551,15 +19417,55 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNS64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 48,
+ 52,
+ OPC_CheckInteger, 14,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP16rr), 0|OPFL_FlagInput,
+ 1, MVT::i16, 2, 0, 1,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP32rr), 0|OPFL_FlagInput,
+ 1, MVT::i32, 2, 0, 1,
+ 12, MVT::i64,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP64rr), 0|OPFL_FlagInput,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 52,
+ OPC_CheckInteger, 11,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP16rr), 0|OPFL_FlagInput,
+ 1, MVT::i16, 2, 0, 1,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP32rr), 0|OPFL_FlagInput,
+ 1, MVT::i32, 2, 0, 1,
+ 12, MVT::i64,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP64rr), 0|OPFL_FlagInput,
+ 1, MVT::i64, 2, 0, 1,
+ 0,
+ 52,
OPC_CheckInteger, 13,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVO16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVO32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22568,15 +19474,17 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVO64rr), 0|OPFL_FlagInput,
1, MVT::i64, 2, 0, 1,
0,
- 48,
+ 52,
OPC_CheckInteger, 10,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 12, MVT::i16,
+ OPC_SwitchType , 14, MVT::i16,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNO16rr), 0|OPFL_FlagInput,
1, MVT::i16, 2, 0, 1,
- 12, MVT::i32,
+ 14, MVT::i32,
+ OPC_CheckPatternPredicate, 15,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNO32rr), 0|OPFL_FlagInput,
1, MVT::i32, 2, 0, 1,
@@ -22586,37 +19494,231 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::i64, 2, 0, 1,
0,
0,
- 0|128,1,
+ 68,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_RecordChild3,
- OPC_SwitchType , 15, MVT::i8,
+ OPC_SwitchType , 16, MVT::i8,
OPC_EmitConvertToTarget, 2,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_GR8), 0|OPFL_FlagInput,
- 1, MVT::i8, 3, 0, 1, 4,
- 15, MVT::f32,
- OPC_EmitConvertToTarget, 2,
- OPC_EmitCopyToReg, 3, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_FR32), 0|OPFL_FlagInput,
- 1, MVT::f32, 3, 0, 1, 4,
- 15, MVT::f64,
+ 2, MVT::i8, MVT::i32, 3, 0, 1, 4,
+ 18, MVT::i32,
+ OPC_CheckPatternPredicate, 16,
OPC_EmitConvertToTarget, 2,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_FR64), 0|OPFL_FlagInput,
- 1, MVT::f64, 3, 0, 1, 4,
- 15, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_GR32), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 4,
+ 18, MVT::i16,
+ OPC_CheckPatternPredicate, 16,
OPC_EmitConvertToTarget, 2,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_V4F32), 0|OPFL_FlagInput,
- 1, MVT::v4f32, 3, 0, 1, 4,
- 15, MVT::v2f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_GR16), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 3, 0, 1, 4,
+ 0,
+ 60|128,3,
+ OPC_MoveChild, 2,
+ OPC_Scope, 54,
+ OPC_CheckInteger, 2,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::f32,
+ OPC_CheckPatternPredicate, 17,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB_Fp32), 0|OPFL_FlagInput,
+ 1, MVT::f32, 2, 0, 1,
+ 14, MVT::f64,
+ OPC_CheckPatternPredicate, 18,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB_Fp64), 0|OPFL_FlagInput,
+ 1, MVT::f64, 2, 0, 1,
+ 14, MVT::f80,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVB_Fp80), 0|OPFL_FlagInput,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 54,
+ OPC_CheckInteger, 3,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::f32,
+ OPC_CheckPatternPredicate, 17,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE_Fp32), 0|OPFL_FlagInput,
+ 1, MVT::f32, 2, 0, 1,
+ 14, MVT::f64,
+ OPC_CheckPatternPredicate, 18,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE_Fp64), 0|OPFL_FlagInput,
+ 1, MVT::f64, 2, 0, 1,
+ 14, MVT::f80,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVBE_Fp80), 0|OPFL_FlagInput,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 54,
+ OPC_CheckInteger, 4,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::f32,
+ OPC_CheckPatternPredicate, 17,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE_Fp32), 0|OPFL_FlagInput,
+ 1, MVT::f32, 2, 0, 1,
+ 14, MVT::f64,
+ OPC_CheckPatternPredicate, 18,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE_Fp64), 0|OPFL_FlagInput,
+ 1, MVT::f64, 2, 0, 1,
+ 14, MVT::f80,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVE_Fp80), 0|OPFL_FlagInput,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 54,
+ OPC_CheckInteger, 14,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::f32,
+ OPC_CheckPatternPredicate, 17,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP_Fp32), 0|OPFL_FlagInput,
+ 1, MVT::f32, 2, 0, 1,
+ 14, MVT::f64,
+ OPC_CheckPatternPredicate, 18,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP_Fp64), 0|OPFL_FlagInput,
+ 1, MVT::f64, 2, 0, 1,
+ 14, MVT::f80,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVP_Fp80), 0|OPFL_FlagInput,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 54,
+ OPC_CheckInteger, 1,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::f32,
+ OPC_CheckPatternPredicate, 17,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNB_Fp32), 0|OPFL_FlagInput,
+ 1, MVT::f32, 2, 0, 1,
+ 14, MVT::f64,
+ OPC_CheckPatternPredicate, 18,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNB_Fp64), 0|OPFL_FlagInput,
+ 1, MVT::f64, 2, 0, 1,
+ 14, MVT::f80,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNB_Fp80), 0|OPFL_FlagInput,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 54,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::f32,
+ OPC_CheckPatternPredicate, 17,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNBE_Fp32), 0|OPFL_FlagInput,
+ 1, MVT::f32, 2, 0, 1,
+ 14, MVT::f64,
+ OPC_CheckPatternPredicate, 18,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNBE_Fp64), 0|OPFL_FlagInput,
+ 1, MVT::f64, 2, 0, 1,
+ 14, MVT::f80,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNBE_Fp80), 0|OPFL_FlagInput,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 54,
+ OPC_CheckInteger, 9,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::f32,
+ OPC_CheckPatternPredicate, 17,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE_Fp32), 0|OPFL_FlagInput,
+ 1, MVT::f32, 2, 0, 1,
+ 14, MVT::f64,
+ OPC_CheckPatternPredicate, 18,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE_Fp64), 0|OPFL_FlagInput,
+ 1, MVT::f64, 2, 0, 1,
+ 14, MVT::f80,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNE_Fp80), 0|OPFL_FlagInput,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 54,
+ OPC_CheckInteger, 11,
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 14, MVT::f32,
+ OPC_CheckPatternPredicate, 17,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP_Fp32), 0|OPFL_FlagInput,
+ 1, MVT::f32, 2, 0, 1,
+ 14, MVT::f64,
+ OPC_CheckPatternPredicate, 18,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP_Fp64), 0|OPFL_FlagInput,
+ 1, MVT::f64, 2, 0, 1,
+ 14, MVT::f80,
+ OPC_CheckPatternPredicate, 15,
+ OPC_EmitCopyToReg, 2, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOVNP_Fp80), 0|OPFL_FlagInput,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 0,
+ 48|128,1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_RecordChild3,
+ OPC_SwitchType , 37, MVT::f32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 16,
+ OPC_EmitConvertToTarget, 2,
+ OPC_EmitCopyToReg, 3, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_RFP32), 0|OPFL_FlagInput,
+ 2, MVT::f32, MVT::i32, 3, 0, 1, 4,
+ 15,
+ OPC_EmitConvertToTarget, 2,
+ OPC_EmitCopyToReg, 3, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_FR32), 0|OPFL_FlagInput,
+ 1, MVT::f32, 3, 0, 1, 4,
+ 0,
+ 37, MVT::f64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 16,
+ OPC_EmitConvertToTarget, 2,
+ OPC_EmitCopyToReg, 3, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_RFP64), 0|OPFL_FlagInput,
+ 2, MVT::f64, MVT::i32, 3, 0, 1, 4,
+ 15,
+ OPC_EmitConvertToTarget, 2,
+ OPC_EmitCopyToReg, 3, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_FR64), 0|OPFL_FlagInput,
+ 1, MVT::f64, 3, 0, 1, 4,
+ 0,
+ 18, MVT::f80,
+ OPC_CheckPatternPredicate, 16,
OPC_EmitConvertToTarget, 2,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_V2F64), 0|OPFL_FlagInput,
- 1, MVT::v2f64, 3, 0, 1, 4,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_RFP80), 0|OPFL_FlagInput,
+ 2, MVT::f80, MVT::i32, 3, 0, 1, 4,
15, MVT::v2i64,
OPC_EmitConvertToTarget, 2,
OPC_EmitCopyToReg, 3, X86::EFLAGS,
@@ -22627,374 +19729,261 @@ SDNode *SelectCode(SDNode *N) {
OPC_EmitCopyToReg, 3, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_V1I64), 0|OPFL_FlagInput,
1, MVT::v1i64, 3, 0, 1, 4,
+ 15, MVT::v4f32,
+ OPC_EmitConvertToTarget, 2,
+ OPC_EmitCopyToReg, 3, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_V4F32), 0|OPFL_FlagInput,
+ 1, MVT::v4f32, 3, 0, 1, 4,
+ 15, MVT::v2f64,
+ OPC_EmitConvertToTarget, 2,
+ OPC_EmitCopyToReg, 3, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMOV_V2F64), 0|OPFL_FlagInput,
+ 1, MVT::v2f64, 3, 0, 1, 4,
0,
0,
0,
- 88|128,6, ISD::MUL,
- OPC_Scope, 50|128,1,
+ 3|128,4, TARGET_OPCODE(X86ISD::SMUL),
+ OPC_Scope, 40|128,1,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 77,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 71,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
- OPC_SwitchType , 22, MVT::i16,
+ OPC_SwitchType , 20, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rmi8), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 22, MVT::i32,
+ 20, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rmi8), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 22, MVT::i64,
+ 20, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rmi8), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
0,
- 27,
- OPC_CheckPredicate, 12,
+ 25,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rmi32), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 51,
+ 47,
OPC_MoveParent,
- OPC_SwitchType , 22, MVT::i16,
+ OPC_SwitchType , 20, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rmi), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 22, MVT::i32,
+ 20, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rmi), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
0,
0,
- 75,
+ 77,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_SwitchType , 27, MVT::v2i64,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 18, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 25, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
0,
- 76,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ 77,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_SwitchType , 28, MVT::v2i64,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 18, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 26, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
0,
- 12|128,1,
+ 57|128,1,
OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 27,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitCopyToReg, 0, X86::AL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL8m), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 5, 3, 4, 5, 6, 7,
- 69,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 113,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 70,
+ OPC_Scope, 31,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 2,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ OPC_SwitchType , 10, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 0,
+ 10, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 0,
0,
- 27,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
+ 20|128,1,
OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 57,
- OPC_RecordNode,
- OPC_SwitchType , 36, MVT::i32,
- OPC_Scope, 16,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 16,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 14, MVT::i64,
- OPC_CheckComplexPat, /*CP*/4, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
- 1, MVT::i64, 4, 1, 2, 3, 4,
- 0,
- 77|128,1,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_Scope, 104,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 47,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rri8), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rri8), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 12, MVT::i64,
+ OPC_Scope, 105,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rri8), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rri8), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rri8), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 17,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rri8), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rri32), 0,
2, MVT::i64, MVT::i32, 2, 0, 2,
+ 31,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 0,
0,
- 17,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
+ 12,
+ OPC_CheckType, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rri32), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 31,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rri), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rri), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rr), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
0,
- 14,
- OPC_CheckType, MVT::i8,
- OPC_EmitCopyToReg, 0, X86::AL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL8r), 0|OPFL_FlagInput,
- 2, MVT::i8, MVT::i32, 1, 1,
- 12,
- OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rr), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rr), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rr), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULLWrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
0,
0,
- 100|128,1, X86ISD::BT,
- OPC_Scope, 116,
+ 101|128,1, TARGET_OPCODE(X86ISD::BT),
+ OPC_Scope, 114,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_SwitchType , 32, MVT::i16,
- OPC_CheckPredicate, 6,
+ OPC_SwitchType , 31, MVT::i16,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::BT16mi8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 32, MVT::i32,
- OPC_CheckPredicate, 3,
+ 31, MVT::i32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::BT32mi8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 34, MVT::i64,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 33, MVT::i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::BT64mi8), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
0,
- 108,
+ 111,
OPC_RecordChild0,
- OPC_Scope, 34,
+ OPC_Scope, 35,
OPC_CheckChild0Type, MVT::i16,
OPC_RecordChild1,
- OPC_Scope, 18,
+ OPC_Scope, 19,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::BT16ri8), 0,
@@ -23003,13 +19992,13 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::BT16rr), 0,
1, MVT::i32, 2, 0, 1,
0,
- 34,
+ 35,
OPC_CheckChild0Type, MVT::i32,
OPC_RecordChild1,
- OPC_Scope, 18,
+ OPC_Scope, 19,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::BT32ri8), 0,
@@ -23018,13 +20007,13 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::BT32rr), 0,
1, MVT::i32, 2, 0, 1,
0,
- 34,
+ 35,
OPC_CheckChild0Type, MVT::i64,
OPC_RecordChild1,
- OPC_Scope, 18,
+ OPC_Scope, 19,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_EmitConvertToTarget, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::BT64ri8), 0,
@@ -23035,188 +20024,206 @@ SDNode *SelectCode(SDNode *N) {
0,
0,
0,
- 89|128,4, X86ISD::SMUL,
- OPC_Scope, 93|128,1,
+ 124|128,7, TARGET_OPCODE(ISD::MUL),
+ OPC_Scope, 87|128,1,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 35,
- OPC_CheckPredicate, 6,
+ OPC_Scope, 34,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rmi8), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 35,
- OPC_CheckPredicate, 3,
+ 34,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rmi8), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 68,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 65,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 27,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 25,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rmi8), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 27,
- OPC_CheckPredicate, 12,
+ 25,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rmi32), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
0,
- 33,
- OPC_CheckPredicate, 6,
+ 32,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rmi), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
- 33,
- OPC_CheckPredicate, 3,
+ 32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rmi), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 8,
0,
- 93,
+ 116,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 25,
- OPC_CheckPredicate, 6,
+ OPC_Scope, 27,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitCopyToReg, 0, X86::AL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL8m), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rm), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 3,
+ 23,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rm), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rm), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
0,
- 95,
+ 90,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 26,
- OPC_CheckPredicate, 6,
+ OPC_Scope, 24,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rm), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 3,
+ 24,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL32rm), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 28,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rm), 0|OPFL_Chain|OPFL_MemRefs,
2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
0,
- 56|128,1,
- OPC_RecordChild0,
- OPC_Scope, 31,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 2,
- OPC_MoveParent,
- OPC_SwitchType , 10, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rr), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 0,
- 10, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 0,
+ 60,
+ OPC_RecordNode,
+ OPC_SwitchType , 38, MVT::i32,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 3,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 17,
+ OPC_CheckPatternPredicate, 4,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
0,
- 19|128,1,
+ 15, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/4, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
+ 1, MVT::i64, 5, 1, 2, 3, 4, 5,
+ 0,
+ 55|128,2,
+ OPC_RecordChild0,
+ OPC_Scope, 35|128,1,
OPC_RecordChild1,
- OPC_Scope, 104,
+ OPC_Scope, 105,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 47,
- OPC_CheckPredicate, 11,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_SwitchType , 12, MVT::i16,
OPC_EmitConvertToTarget, 1,
@@ -23232,7 +20239,7 @@ SDNode *SelectCode(SDNode *N) {
2, MVT::i64, MVT::i32, 2, 0, 2,
0,
17,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_EmitConvertToTarget, 1,
@@ -23250,6 +20257,11 @@ SDNode *SelectCode(SDNode *N) {
2, MVT::i32, MVT::i32, 2, 0, 2,
0,
0,
+ 14,
+ OPC_CheckType, MVT::i8,
+ OPC_EmitCopyToReg, 0, X86::AL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL8r), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 1, 1,
12,
OPC_CheckType, MVT::i16,
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL16rr), 0,
@@ -23263,903 +20275,1011 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::IMUL64rr), 0,
2, MVT::i64, MVT::i32, 2, 0, 1,
0,
- 0,
- 0,
- 105, ISD::PREFETCH,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_MoveChild, 3,
- OPC_Scope, 22,
- OPC_CheckInteger, 3,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PREFETCHT0), 0|OPFL_Chain,
- 0, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckInteger, 2,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PREFETCHT1), 0|OPFL_Chain,
- 0, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PREFETCHT2), 0|OPFL_Chain,
- 0, 5, 2, 3, 4, 5, 6,
- 22,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PREFETCHNTA), 0|OPFL_Chain,
- 0, 5, 2, 3, 4, 5, 6,
- 0,
- 4|128,1, ISD::INSERT_VECTOR_ELT,
- OPC_RecordChild0,
- OPC_Scope, 86,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 37,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 7, 0, 4, 5, 6, 7, 8, 9,
- 35,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 7, 0, 4, 5, 6, 7, 8, 9,
- 0,
- 41,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_SwitchType , 14, MVT::v2i64,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRQrr), 0,
- 1, MVT::v2i64, 3, 0, 1, 3,
- 14, MVT::v4i32,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRDrr), 0,
- 1, MVT::v4i32, 3, 0, 1, 3,
- 0,
- 0,
- 60|128,15, ISD::OR,
- OPC_Scope, 39|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 87, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_Scope, 41,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 41,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 72, ISD::LOAD,
+ 13|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_Scope, 26,
- OPC_CheckChild0Type, MVT::v4f32,
+ OPC_CheckPredicate, 3,
+ OPC_SwitchType , 94, MVT::v2i64,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckChild0Type, MVT::v2f64,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ OPC_SwitchType , 42, MVT::v8i16,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v4i32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 23, MVT::v1i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 31|128,1,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ 15|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 91,
+ OPC_CheckPredicate, 3,
+ OPC_SwitchType , 95, MVT::v2i64,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_SwitchType , 20, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 42, MVT::v8i16,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v4i32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
0,
- 26,
- OPC_CheckPredicate, 23,
+ 24, MVT::v1i64,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PORrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 24,
OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PORrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 28|128,9,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 29|128,1, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 92,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::i8,
+ OPC_CheckType, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULLWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 73,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::v8i16,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULLWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
+ 26, MVT::v4i32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPMULLDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PMULLDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 11, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PMULLWrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
+ 0,
+ 0,
+ 33|128,12, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_Scope, 102|128,5,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 108,
+ OPC_CheckPredicate, 3,
+ OPC_SwitchType , 40, MVT::i32,
+ OPC_Scope, 18,
+ OPC_CheckPredicate, 23,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i16,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::GS_MOV32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 24,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i32,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FS_MOV32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 40, MVT::i64,
+ OPC_Scope, 18,
+ OPC_CheckPredicate, 23,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i64,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64GSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 24,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64FSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
0,
- 27,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
+ 18, MVT::i8,
+ OPC_CheckPredicate, 4,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PORrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 25,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i8, 5, 2, 3, 4, 5, 6,
+ 0,
+ 20,
+ OPC_CheckPredicate, 7,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i16, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckPredicate, 8,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 66,
+ OPC_CheckPredicate, 25,
+ OPC_Scope, 40,
+ OPC_CheckPredicate, 26,
+ OPC_SwitchType , 16, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i16, 5, 2, 3, 4, 5, 6,
+ 16, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 20,
+ OPC_CheckPredicate, 27,
+ OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PORrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rm16), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 121|128,3, ISD::SRL,
- OPC_RecordChild0,
- OPC_Scope, 82|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_Scope, 104,
- OPC_RecordChild0,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i8,
- OPC_Scope, 48,
- OPC_CheckChild0Type, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::ECX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 0, 3,
- 48,
- OPC_CheckChild0Type, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 0, 3,
- 0,
- 98,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 44,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::ECX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 3, 0,
- 44,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 3, 0,
- 0,
+ 125,
+ OPC_CheckPredicate, 28,
+ OPC_Scope, 40,
+ OPC_CheckPredicate, 29,
+ OPC_SwitchType , 16, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i16, 5, 2, 3, 4, 5, 6,
+ 16, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 72,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 26,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 0, 3,
- 26,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 0, 3,
+ 20,
+ OPC_CheckPredicate, 30,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm16), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 58,
+ OPC_CheckPredicate, 31,
+ OPC_SwitchType , 16, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i8, 5, 2, 3, 4, 5, 6,
+ 16, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i16, 5, 2, 3, 4, 5, 6,
+ 16, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 78,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 34,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 3, 0,
- 34,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 3, 0,
+ 0,
+ 125,
+ OPC_CheckPredicate, 32,
+ OPC_Scope, 58,
+ OPC_CheckPredicate, 33,
+ OPC_SwitchType , 16, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i8, 5, 2, 3, 4, 5, 6,
+ 16, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i16, 5, 2, 3, 4, 5, 6,
+ 16, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 68,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 7,
- OPC_SwitchType , 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rri8), 0,
- 1, MVT::i32, 3, 0, 2, 3,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rri8), 0,
- 1, MVT::i16, 3, 0, 2, 3,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64rri8), 0,
- 1, MVT::i64, 3, 0, 2, 3,
+ 40,
+ OPC_CheckPredicate, 34,
+ OPC_SwitchType , 16, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i16, 5, 2, 3, 4, 5, 6,
+ 16, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 68,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SHL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 10,
- OPC_SwitchType , 12, MVT::i32,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rri8), 0,
- 1, MVT::i32, 3, 1, 0, 3,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rri8), 0,
- 1, MVT::i16, 3, 1, 0, 3,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64rri8), 0,
- 1, MVT::i64, 3, 1, 0, 3,
+ 20,
+ OPC_CheckPredicate, 35,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rm16), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 20,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 63,
+ OPC_CheckPredicate, 25,
+ OPC_CheckType, MVT::i64,
+ OPC_Scope, 18,
+ OPC_CheckPredicate, 26,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 27,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rm16), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 36,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rm32), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 82,
+ OPC_CheckPredicate, 28,
+ OPC_CheckType, MVT::i64,
+ OPC_Scope, 18,
+ OPC_CheckPredicate, 29,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 30,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm16), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 37,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm32), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 31,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 98,
+ OPC_CheckPredicate, 32,
+ OPC_CheckType, MVT::i64,
+ OPC_Scope, 18,
+ OPC_CheckPredicate, 33,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 34,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm8), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 35,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rm16), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 34,
+ OPC_CheckPredicate, 38,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOV32rm), 0|OPFL_Chain,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 3, 7, 8, 9,
+ 0,
+ 0,
+ 30,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(X86ISD::Wrapper),
+ OPC_RecordChild0,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetGlobalTLSAddress),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 1, 1,
+ 20|128,6,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 105,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_SwitchType , 59, MVT::f64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
0,
+ 18, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 16, MVT::f80,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp80m), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f80, 5, 2, 3, 4, 5, 6,
0,
- 121|128,3, ISD::SHL,
- OPC_RecordChild0,
- OPC_Scope, 82|128,1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_Scope, 104,
- OPC_RecordChild0,
- OPC_RecordChild0,
- OPC_CheckType, MVT::i8,
- OPC_Scope, 48,
- OPC_CheckChild0Type, MVT::i32,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::ECX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 0, 3,
- 48,
- OPC_CheckChild0Type, MVT::i16,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 0, 3,
- 0,
- 98,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 44,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::ECX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 3, 0,
- 44,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TRUNCATE,
- OPC_MoveChild, 0,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 3, 0,
+ 68,
+ OPC_CheckPredicate, 32,
+ OPC_SwitchType , 20, MVT::f64,
+ OPC_CheckPredicate, 39,
+ OPC_CheckPatternPredicate, 8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp32m64), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 40, MVT::f80,
+ OPC_Scope, 18,
+ OPC_CheckPredicate, 40,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp64m80), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPredicate, 39,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp32m80), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ 0,
+ 0,
+ 46,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 24,
+ OPC_CheckPredicate, 32,
+ OPC_CheckPredicate, 39,
+ OPC_CheckType, MVT::f64,
+ OPC_CheckPatternPredicate, 19,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSS2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 46,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 41,
+ OPC_SwitchType , 18, MVT::f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsMOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 18, MVT::f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsMOVAPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 35,
+ OPC_CheckPredicate, 32,
+ OPC_CheckPredicate, 39,
+ OPC_CheckType, MVT::f64,
+ OPC_CheckPatternPredicate, 20,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitNode, TARGET_OPCODE(X86::CVTSS2SDrr), 0|OPFL_MemRefs,
+ 1, MVT::f64, 1, 7,
+ OPC_CompleteMatch, 1, 8,
+
+ 67|128,3,
+ OPC_CheckPredicate, 3,
+ OPC_SwitchType , 85, MVT::v4i32,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 61,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
0,
0,
- 72,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckChild1Type, MVT::i8,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 26,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 0, 3,
- 26,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 0, 3,
+ 82, MVT::v2i64,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
0,
- 78,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SUB,
- OPC_MoveChild, 0,
- OPC_Scope, 34,
- OPC_CheckInteger, 32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 3, 0,
- 34,
- OPC_CheckInteger, 16,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckSame, 1,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 3, 0,
+ 18, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 5, 2, 3, 4, 5, 6,
+ 82, MVT::v4f32,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
0,
- 68,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 10,
- OPC_SwitchType , 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rri8), 0,
- 1, MVT::i32, 3, 0, 2, 3,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rri8), 0,
- 1, MVT::i16, 3, 0, 2, 3,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64rri8), 0,
- 1, MVT::i64, 3, 0, 2, 3,
+ 82, MVT::v2f64,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVAPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
0,
- 68,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPredicate, 7,
- OPC_SwitchType , 12, MVT::i32,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rri8), 0,
- 1, MVT::i32, 3, 1, 0, 3,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rri8), 0,
- 1, MVT::i16, 3, 1, 0, 3,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64rri8), 0,
- 1, MVT::i64, 3, 1, 0, 3,
+ 42, MVT::v8f32,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 42, MVT::v4f64,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 41,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVAPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 5, 2, 3, 4, 5, 6,
0,
0,
0,
- 57,
+ 0,
+ 4|128,1, TARGET_OPCODE(X86ISD::BSF),
+ OPC_Scope, 86,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
OPC_RecordNode,
- OPC_SwitchType , 36, MVT::i32,
- OPC_Scope, 16,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 16,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 14, MVT::i64,
- OPC_CheckComplexPat, /*CP*/4, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
- 1, MVT::i64, 4, 1, 2, 3, 4,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_SwitchType , 22, MVT::i16,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 22, MVT::i32,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 24, MVT::i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 5, 2, 3, 4, 5, 6,
0,
- 28|128,1,
+ 42,
OPC_RecordChild0,
+ OPC_SwitchType , 11, MVT::i16,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF16rr), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 11, MVT::i32,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF32rr), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 11, MVT::i64,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF64rr), 0,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 0,
+ 4|128,1, TARGET_OPCODE(X86ISD::BSR),
+ OPC_Scope, 86,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
OPC_RecordChild1,
- OPC_Scope, 110,
+ OPC_CheckPredicate, 2,
+ OPC_SwitchType , 22, MVT::i16,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 22, MVT::i32,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 24, MVT::i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 42,
+ OPC_RecordChild0,
+ OPC_SwitchType , 11, MVT::i16,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR16rr), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 11, MVT::i32,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR32rr), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 11, MVT::i64,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR64rr), 0,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 0,
+ 25|128,3, TARGET_OPCODE(X86ISD::AND),
+ OPC_Scope, 112,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 115,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 48|128,1,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 49,
- OPC_CheckPredicate, 11,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
- OPC_CheckPredicate, 71,
OPC_SwitchType , 12, MVT::i16,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16ri8), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16ri8), 0,
2, MVT::i16, MVT::i32, 2, 0, 2,
12, MVT::i32,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri8), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32ri8), 0,
2, MVT::i32, MVT::i32, 2, 0, 2,
12, MVT::i64,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri8), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64ri8), 0,
2, MVT::i64, MVT::i32, 2, 0, 2,
0,
- 19,
- OPC_CheckPredicate, 12,
+ 17,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
- OPC_CheckPredicate, 71,
OPC_CheckType, MVT::i64,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri32), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64ri32), 0,
2, MVT::i64, MVT::i32, 2, 0, 2,
- 33,
+ 45,
OPC_MoveParent,
- OPC_CheckPredicate, 71,
- OPC_SwitchType , 12, MVT::i16,
+ OPC_SwitchType , 12, MVT::i8,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16ri), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16ri), 0,
2, MVT::i16, MVT::i32, 2, 0, 2,
12, MVT::i32,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32ri), 0,
2, MVT::i32, MVT::i32, 2, 0, 2,
0,
0,
- 40,
- OPC_CheckPredicate, 71,
- OPC_SwitchType , 10, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rr), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 1,
- 10, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 1,
- 10, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rr), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 1,
- 0,
+ 12,
+ OPC_CheckType, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rr), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rr), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
0,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ 0,
+ 120|128,2, TARGET_OPCODE(X86ISD::OR),
+ OPC_Scope, 97,
OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPDrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 90|128,1,
+ OPC_SwitchType , 18, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 97,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 18, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 48|128,1,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 118,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 47,
- OPC_CheckPredicate, 11,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_SwitchType , 12, MVT::i16,
OPC_EmitConvertToTarget, 1,
@@ -24175,7 +21295,7 @@ SDNode *SelectCode(SDNode *N) {
2, MVT::i64, MVT::i32, 2, 0, 2,
0,
17,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_EmitConvertToTarget, 1,
@@ -24213,245 +21333,81 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::i64,
OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rr), 0,
2, MVT::i64, MVT::i32, 2, 0, 1,
- 28,
- OPC_CheckType, MVT::v2i64,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPSrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 11,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PORrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 0,
- 13,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PORrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
0,
0,
- 35|128,6, ISD::XOR,
- OPC_Scope, 39|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 87, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_Scope, 41,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 41,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 72, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_Scope, 26,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 0,
- 31|128,1,
+ 120|128,2, TARGET_OPCODE(X86ISD::XOR),
+ OPC_Scope, 97,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 91,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 18, MVT::i8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PXORrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PXORrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
0,
- 33|128,1,
+ 97,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 92,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 27,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 18, MVT::i8,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PXORrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
- 25,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PXORrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 57,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT8r), 0,
- 1, MVT::i8, 1, 0,
- 8, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT16r), 0,
- 1, MVT::i16, 1, 0,
- 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT32r), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT64r), 0,
- 1, MVT::i64, 1, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
0,
- 29,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPDrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 90|128,1,
+ 48|128,1,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 118,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 47,
- OPC_CheckPredicate, 11,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_SwitchType , 12, MVT::i16,
OPC_EmitConvertToTarget, 1,
@@ -24467,7 +21423,7 @@ SDNode *SelectCode(SDNode *N) {
2, MVT::i64, MVT::i32, 2, 0, 2,
0,
17,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_EmitConvertToTarget, 1,
@@ -24505,432 +21461,831 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::i64,
OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rr), 0,
2, MVT::i64, MVT::i32, 2, 0, 1,
- 28,
- OPC_CheckType, MVT::v2i64,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPSrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 11,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PXORrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 0,
- 13,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PXORrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
0,
0,
- 73, X86ISD::CMPPS,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_Scope, 45,
+ 120|128,2, TARGET_OPCODE(X86ISD::ADD),
+ OPC_Scope, 97,
+ OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_SwitchType , 18, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 97,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_SwitchType , 18, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 48|128,1,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 119,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16ri8), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri8), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri8), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 17,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri32), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 45,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 0,
+ 0,
+ 12,
+ OPC_CheckType, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rr), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rr), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 121|128,2, TARGET_OPCODE(ISD::ADDE),
+ OPC_CaptureFlagInput,
+ OPC_Scope, 97,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPSrri), 0,
- 1, MVT::v4i32, 3, 0, 1, 3,
+ OPC_SwitchType , 18, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 97,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 18, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 48|128,1,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 119,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 17,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64ri32), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 45,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 0,
+ 0,
+ 12,
+ OPC_CheckType, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i8, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
0,
- 73, X86ISD::CMPPD,
+ 21|128,2, TARGET_OPCODE(X86ISD::SUB),
OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_Scope, 45,
+ OPC_Scope, 96,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
+ OPC_SwitchType , 18, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 47|128,1,
OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_Scope, 119,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16ri8), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri8), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri8), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 17,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri32), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 45,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 0,
+ 0,
+ 12,
+ OPC_CheckType, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8rr), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rr), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 22|128,2, TARGET_OPCODE(ISD::SUBE),
+ OPC_CaptureFlagInput,
+ OPC_RecordChild0,
+ OPC_Scope, 96,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPDrri), 0,
- 1, MVT::v2i64, 3, 0, 1, 3,
+ OPC_SwitchType , 18, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB8rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 47|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 119,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 17,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64ri32), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 45,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB8ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 0,
+ 0,
+ 12,
+ OPC_CheckType, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB8rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i8, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
0,
- 77|128,11, ISD::ADD,
- OPC_Scope, 66|128,1,
+ 97|128,1, TARGET_OPCODE(ISD::ADDC),
+ OPC_Scope, 57,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_SwitchType , 75, MVT::v2i64,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 96, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
- 0,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
0,
- 67|128,1,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ 57,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_SwitchType , 76, MVT::v2i64,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 106,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_Scope, 75,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 33,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri8), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri8), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 17,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri32), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 15,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
0,
- 97, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rr), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 36|128,1, TARGET_OPCODE(ISD::SUBC),
+ OPC_RecordChild0,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 18, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 18, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 103,
+ OPC_RecordChild1,
+ OPC_Scope, 73,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 33,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri8), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri8), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 31,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri32), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
0,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rr), 0|OPFL_FlagOutput,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rr), 0|OPFL_FlagOutput,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
0,
- 6|128,1,
+ 0,
+ 26|128,14, TARGET_OPCODE(ISD::ADD),
+ OPC_Scope, 114,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 91,
+ OPC_Scope, 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_SwitchType , 20, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 26,
- OPC_CheckPredicate, 23,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckType, MVT::i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
0,
- 7|128,1,
+ 117,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 92,
+ OPC_Scope, 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 20, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 20, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 27,
- OPC_CheckPredicate, 23,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
0,
- 57,
+ 60,
OPC_RecordNode,
- OPC_SwitchType , 36, MVT::i32,
- OPC_Scope, 16,
- OPC_CheckPatternPredicate, 2,
+ OPC_SwitchType , 38, MVT::i32,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 3,
OPC_CheckComplexPat, /*CP*/3, /*#*/0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 16,
- OPC_CheckPatternPredicate, 3,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 17,
+ OPC_CheckPatternPredicate, 4,
OPC_CheckComplexPat, /*CP*/3, /*#*/0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
0,
- 14, MVT::i64,
+ 15, MVT::i64,
OPC_CheckComplexPat, /*CP*/4, /*#*/0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
- 1, MVT::i64, 4, 1, 2, 3, 4,
+ 1, MVT::i64, 5, 1, 2, 3, 4, 5,
0,
- 85,
+ 113,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, X86ISD::Wrapper,
+ OPC_CheckOpcode, TARGET_OPCODE(X86ISD::Wrapper),
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 13, ISD::TargetConstantPool,
+ OPC_SwitchOpcode , 14, TARGET_OPCODE(ISD::TargetConstantPool),
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 0, 1,
- 13, ISD::TargetJumpTable,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 14, TARGET_OPCODE(ISD::TargetJumpTable),
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 0, 1,
- 13, ISD::TargetGlobalAddress,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 14, TARGET_OPCODE(ISD::TargetGlobalAddress),
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 0, 1,
- 13, ISD::TargetExternalSymbol,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 14, TARGET_OPCODE(ISD::TargetExternalSymbol),
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 0, 1,
- 13, ISD::TargetBlockAddress,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 14, TARGET_OPCODE(ISD::TargetBlockAddress),
OPC_MoveParent,
OPC_MoveParent,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 0, 1,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 14, TARGET_OPCODE(ISD::TargetGlobalTLSAddress),
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri32), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
0,
- 89,
+ 118,
OPC_MoveChild, 0,
- OPC_CheckOpcode, X86ISD::Wrapper,
+ OPC_CheckOpcode, TARGET_OPCODE(X86ISD::Wrapper),
OPC_RecordChild0,
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 14, ISD::TargetConstantPool,
+ OPC_SwitchOpcode , 15, TARGET_OPCODE(ISD::TargetGlobalTLSAddress),
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri32), 0,
+ 2, MVT::i64, MVT::i32, 2, 1, 0,
+ 15, TARGET_OPCODE(ISD::TargetConstantPool),
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 1, 0,
- 14, ISD::TargetJumpTable,
+ 2, MVT::i32, MVT::i32, 2, 1, 0,
+ 15, TARGET_OPCODE(ISD::TargetJumpTable),
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 1, 0,
- 14, ISD::TargetGlobalAddress,
+ 2, MVT::i32, MVT::i32, 2, 1, 0,
+ 15, TARGET_OPCODE(ISD::TargetGlobalAddress),
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 1, 0,
- 14, ISD::TargetExternalSymbol,
+ 2, MVT::i32, MVT::i32, 2, 1, 0,
+ 15, TARGET_OPCODE(ISD::TargetExternalSymbol),
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 1, 0,
- 14, ISD::TargetBlockAddress,
+ 2, MVT::i32, MVT::i32, 2, 1, 0,
+ 15, TARGET_OPCODE(ISD::TargetBlockAddress),
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::i32,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 1, MVT::i32, 2, 1, 0,
+ 2, MVT::i32, MVT::i32, 2, 1, 0,
0,
- 69|128,4,
+ 66|128,5,
OPC_RecordChild0,
- OPC_Scope, 32|128,2,
+ OPC_Scope, 36|128,2,
OPC_MoveChild, 1,
OPC_Scope, 83,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_SwitchType , 26, MVT::i16,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 2,
+ OPC_CheckPatternPredicate, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC16r), 0,
2, MVT::i16, MVT::i32, 1, 0,
11,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPatternPredicate, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_16r), 0,
2, MVT::i16, MVT::i32, 1, 0,
0,
26, MVT::i32,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 2,
+ OPC_CheckPatternPredicate, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC32r), 0,
2, MVT::i32, MVT::i32, 1, 0,
11,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPatternPredicate, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_32r), 0,
2, MVT::i32, MVT::i32, 1, 0,
0,
@@ -24946,21 +22301,21 @@ SDNode *SelectCode(SDNode *N) {
OPC_MoveParent,
OPC_SwitchType , 26, MVT::i16,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 2,
+ OPC_CheckPatternPredicate, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC16r), 0,
2, MVT::i16, MVT::i32, 1, 0,
11,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPatternPredicate, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_16r), 0,
2, MVT::i16, MVT::i32, 1, 0,
0,
26, MVT::i32,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 2,
+ OPC_CheckPatternPredicate, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC32r), 0,
2, MVT::i32, MVT::i32, 1, 0,
11,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPatternPredicate, 4,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_32r), 0,
2, MVT::i32, MVT::i32, 1, 0,
0,
@@ -24971,37 +22326,37 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64r), 0,
2, MVT::i64, MVT::i32, 1, 0,
0,
- 75,
+ 78,
OPC_CheckInteger, 0|128,1,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::i16,
+ OPC_SwitchType , 22, MVT::i16,
OPC_EmitInteger, MVT::i16, 0|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16ri8), 0,
- 1, MVT::i16, 2, 0, 1,
- 21, MVT::i32,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 22, MVT::i32,
OPC_EmitInteger, MVT::i32, 0|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri8), 0,
- 1, MVT::i32, 2, 0, 1,
- 21, MVT::i64,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 22, MVT::i64,
OPC_EmitInteger, MVT::i64, 0|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri8), 0,
- 1, MVT::i64, 2, 0, 1,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
0,
- 30,
+ 31,
OPC_CheckInteger, 0|128,0|128,0|128,0|128,8,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_EmitInteger, MVT::i64, 0|128,0|128,0|128,0|128,120|128,127|128,127|128,127|128,127|128,1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri32), 0,
- 1, MVT::i64, 2, 0, 1,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
0,
- 30|128,2,
+ 47|128,1,
OPC_RecordChild1,
- OPC_Scope, 118,
+ OPC_Scope, 119,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 47,
- OPC_CheckPredicate, 11,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_SwitchType , 12, MVT::i16,
OPC_EmitConvertToTarget, 1,
@@ -25017,7 +22372,7 @@ SDNode *SelectCode(SDNode *N) {
2, MVT::i64, MVT::i32, 2, 0, 2,
0,
17,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_EmitConvertToTarget, 1,
@@ -25055,173 +22410,369 @@ SDNode *SelectCode(SDNode *N) {
OPC_CheckType, MVT::i64,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rr), 0,
2, MVT::i64, MVT::i32, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDBrr), 0,
- 1, MVT::v8i8, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDWrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDDrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDQrr), 0,
- 1, MVT::v1i64, 2, 0, 1,
0,
- 0,
- 0,
- 93|128,5, ISD::SUB,
- OPC_Scope, 73|128,2,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 61|128,1, ISD::BIT_CONVERT,
+ 102|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_SwitchType , 75, MVT::v2i64,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_SwitchType , 10|128,1, MVT::v2i64,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_SwitchType , 42, MVT::v16i8,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v8i16,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v4i32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
0,
- 96, MVT::v1i64,
+ 67, MVT::v1i64,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
+ OPC_SwitchType , 19, MVT::v8i8,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
+ 19, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
+ 19, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBQrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 1|128,1, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ 0,
+ 104|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_SwitchType , 11|128,1, MVT::v2i64,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 91,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 20, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_SwitchType , 42, MVT::v16i8,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
0,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
+ 42, MVT::v8i16,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v4i32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 68, MVT::v1i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 19, MVT::v8i8,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
+ 19, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
+ 19, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 87,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
0,
+ 22,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
0,
- 57,
+ 88,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
OPC_RecordNode,
- OPC_SwitchType , 36, MVT::i32,
- OPC_Scope, 16,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 48,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 23,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 40|128,1,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::v16i8,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
+ 26, MVT::v8i16,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
+ 26, MVT::v4i32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 26, MVT::v2i64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPADDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PADDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 11, MVT::v8i8,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDBrr), 0,
+ 1, MVT::v8i8, 2, 0, 1,
+ 11, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDWrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
+ 11, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDDrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
+ 11, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PADDQrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
+ 0,
+ 0,
+ 5|128,7, TARGET_OPCODE(ISD::SUB),
+ OPC_Scope, 114,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 60,
+ OPC_RecordNode,
+ OPC_SwitchType , 38, MVT::i32,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 3,
OPC_CheckComplexPat, /*CP*/3, /*#*/0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 16,
- OPC_CheckPatternPredicate, 3,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 17,
+ OPC_CheckPatternPredicate, 4,
OPC_CheckComplexPat, /*CP*/3, /*#*/0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
0,
- 14, MVT::i64,
+ 15, MVT::i64,
OPC_CheckComplexPat, /*CP*/4, /*#*/0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
- 1, MVT::i64, 4, 1, 2, 3, 4,
+ 1, MVT::i64, 5, 1, 2, 3, 4, 5,
0,
52,
OPC_MoveChild, 0,
@@ -25241,1474 +22792,5939 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::NEG64r), 0,
2, MVT::i64, MVT::i32, 1, 0,
0,
- 31|128,2,
+ 28|128,5,
+ OPC_RecordChild0,
+ OPC_Scope, 47|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 119,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16ri8), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri8), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri8), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 17,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri32), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 45,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 0,
+ 0,
+ 12,
+ OPC_CheckType, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8rr), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rr), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 61|128,2,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 97|128,1, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_SwitchType , 10|128,1, MVT::v2i64,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::v16i8,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v8i16,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v4i32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 67, MVT::v1i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_SwitchType , 19, MVT::v8i8,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 81, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 22,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 39|128,1,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::v16i8,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
+ 26, MVT::v8i16,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
+ 26, MVT::v4i32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 26, MVT::v2i64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSUBQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 11, MVT::v8i8,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBBrr), 0,
+ 1, MVT::v8i8, 2, 0, 1,
+ 11, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBWrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
+ 11, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBDrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
+ 11, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBQrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
+ 0,
+ 0,
+ 0,
+ 35|128,8, TARGET_OPCODE(ISD::OR),
+ OPC_Scope, 114,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 117,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 60,
+ OPC_RecordNode,
+ OPC_SwitchType , 38, MVT::i32,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 3,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 17,
+ OPC_CheckPatternPredicate, 4,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 0,
+ 15, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/4, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
+ 1, MVT::i64, 5, 1, 2, 3, 4, 5,
+ 0,
+ 73|128,2,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 118,
+ OPC_Scope, 111,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 49,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 42,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16ri8), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri8), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri8), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 19,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 42,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri32), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 33,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 42,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 0,
+ 0,
+ 40,
+ OPC_CheckPredicate, 42,
+ OPC_SwitchType , 10, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 10, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 10, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rr), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 119,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_Scope, 47,
- OPC_CheckPredicate, 11,
+ OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_SwitchType , 12, MVT::i16,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16ri8), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16ri8), 0,
2, MVT::i16, MVT::i32, 2, 0, 2,
12, MVT::i32,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri8), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32ri8), 0,
2, MVT::i32, MVT::i32, 2, 0, 2,
12, MVT::i64,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri8), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64ri8), 0,
2, MVT::i64, MVT::i32, 2, 0, 2,
0,
17,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 10,
OPC_MoveParent,
OPC_CheckType, MVT::i64,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri32), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64ri32), 0,
2, MVT::i64, MVT::i32, 2, 0, 2,
45,
OPC_MoveParent,
OPC_SwitchType , 12, MVT::i8,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8ri), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8ri), 0,
2, MVT::i8, MVT::i32, 2, 0, 2,
12, MVT::i16,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16ri), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16ri), 0,
2, MVT::i16, MVT::i32, 2, 0, 2,
12, MVT::i32,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32ri), 0,
2, MVT::i32, MVT::i32, 2, 0, 2,
0,
0,
12,
OPC_CheckType, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8rr), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rr), 0,
2, MVT::i8, MVT::i32, 2, 0, 1,
12,
OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16rr), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rr), 0,
2, MVT::i16, MVT::i32, 2, 0, 1,
12,
OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rr), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rr), 0,
2, MVT::i32, MVT::i32, 2, 0, 1,
12,
OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rr), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rr), 0,
2, MVT::i64, MVT::i32, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 13,
+ 0,
+ 36|128,1,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 85, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_Scope, 40,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 40,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 69, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_Scope, 24,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 87,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSUBQrr), 0,
- 1, MVT::v2i64, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBBrr), 0,
- 1, MVT::v8i8, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBWrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
- 13,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBDrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
- 13,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 22,
+ OPC_MoveParent,
OPC_CheckType, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSUBQrr), 0,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 119,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 83, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 48,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 23,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 26, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPDrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 57,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 38, MVT::v2i64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ORPSrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPORrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PORrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 11, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PORrr), 0,
1, MVT::v1i64, 2, 0, 1,
0,
0,
- 71, X86ISD::PINSRW,
- OPC_RecordChild0,
- OPC_Scope, 45,
+ 10|128,7, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 114,
+ OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 48,
- OPC_CheckPredicate, 60,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRWrmi), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRWrri), 0,
- 1, MVT::v8i16, 3, 0, 1, 3,
- 0,
- 122, ISD::MEMBARRIER,
- OPC_RecordNode,
- OPC_MoveChild, 1,
- OPC_CheckType, MVT::i8,
- OPC_Scope, 32,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MoveChild, 3,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MoveChild, 4,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
- OPC_MoveChild, 5,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SFENCE), 0|OPFL_Chain,
- 0, 0,
- 32,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MoveChild, 3,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MoveChild, 4,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_MoveChild, 5,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LFENCE), 0|OPFL_Chain,
- 0, 0,
- 48,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveChild, 4,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_MoveChild, 5,
- OPC_Scope, 12,
- OPC_CheckInteger, 0,
+ OPC_Scope, 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::NOOP), 0|OPFL_Chain,
- 0, 0,
- 12,
- OPC_CheckInteger, 1,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 7,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MFENCE), 0|OPFL_Chain,
- 0, 0,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
0,
- 0,
- 60, X86ISD::PSHUFB,
- OPC_RecordChild0,
- OPC_Scope, 43,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ 117,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_CheckType, MVT::v2i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 11,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrm128), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 11,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrr128), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 0,
- 71, X86ISD::PINSRB,
- OPC_RecordChild0,
- OPC_Scope, 45,
+ OPC_Scope, 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 109|128,1,
+ OPC_RecordChild0,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,127|128,1,
+ OPC_MoveParent,
+ OPC_SwitchType , 8, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT8r), 0,
+ 1, MVT::i8, 1, 0,
+ 8, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT16r), 0,
+ 1, MVT::i16, 1, 0,
+ 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT32r), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::NOT64r), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 47|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 119,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16ri8), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32ri8), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64ri8), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 17,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64ri32), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 45,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 0,
+ 0,
+ 12,
+ OPC_CheckType, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rr), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rr), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 36|128,1,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 85, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_Scope, 40,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 40,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 69, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_Scope, 24,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 87,
+ OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 48,
- OPC_CheckPredicate, 59,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_EmitConvertToTarget, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 7, 0, 4, 5, 6, 7, 8, 9,
- 21,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPXORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PXORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 22,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PXORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 119,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 83, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 48,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPXORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PXORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 23,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PXORrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 26, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPDrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 57,
+ OPC_RecordChild0,
OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRBrr), 0,
- 1, MVT::v16i8, 3, 0, 1, 3,
+ OPC_SwitchType , 38, MVT::v2i64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XORPSrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPXORrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PXORrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 11, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PXORrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
+ 0,
0,
- 63|128,1, X86ISD::PCMPEQB,
- OPC_Scope, 42,
+ 97|128,30, TARGET_OPCODE(ISD::AND),
+ OPC_Scope, 114,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
- 42,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_Scope, 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 23,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 117,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
+ OPC_Scope, 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 7,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 17|128,1,
+ OPC_CheckAndImm, 127|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SRL),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 8,
+ OPC_CheckType, MVT::i8,
OPC_MoveParent,
+ OPC_CheckPredicate, 13,
OPC_MoveParent,
+ OPC_SwitchType , 72, MVT::i32,
+ OPC_Scope, 34,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
+ 1, MVT::i32, 1, 4,
+ 34,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
+ 1, MVT::i32, 1, 4,
+ 0,
+ 48, MVT::i64,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitInteger, MVT::i32, X86::GR64_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i64, 2, 0, 2,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 3, 4,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
+ 1, MVT::i32, 1, 5,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
+ 1, MVT::i64, 3, 1, 6, 7,
+ 0,
+ 27,
+ OPC_CheckAndImm, 127|128,127|128,3,
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitInteger, MVT::i32, X86::sub_16bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr16), 0,
+ 1, MVT::i32, 1, 2,
+ 29,
+ OPC_CheckAndImm, 127|128,127|128,127|128,127|128,15,
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr32), 0,
+ 1, MVT::i64, 1, 2,
+ 27,
+ OPC_CheckAndImm, 127|128,127|128,3,
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitInteger, MVT::i32, X86::sub_16bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr16), 0,
+ 1, MVT::i64, 1, 2,
+ 24|128,1,
+ OPC_CheckAndImm, 127|128,1,
+ OPC_RecordChild0,
+ OPC_SwitchType , 20, MVT::i64,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr8), 0,
+ 1, MVT::i64, 1, 2,
+ 60, MVT::i32,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
+ 1, MVT::i32, 1, 2,
+ 34,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
+ 1, MVT::i32, 1, 4,
+ 0,
+ 60, MVT::i16,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rr8), 0,
+ 1, MVT::i16, 1, 2,
+ 34,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rr8), 0,
+ 1, MVT::i16, 1, 4,
+ 0,
+ 0,
+ 98|128,1,
+ OPC_RecordChild0,
OPC_RecordChild1,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
- 37,
+ OPC_Scope, 40|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 9,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16ri8), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32ri8), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64ri8), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 17,
+ OPC_CheckPredicate, 10,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64ri32), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 48,
+ OPC_CheckPredicate, 43,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i32, 2, 0, 3,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 1, 5,
+ OPC_EmitNode, TARGET_OPCODE(X86::AND32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 4, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
+ 1, MVT::i64, 3, 2, 7, 9,
+ 45,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 0,
+ 0,
+ 12,
+ OPC_CheckType, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rr), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 12,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rr), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 22|128,13,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 27|128,6, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 48|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_Scope, 58,
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 58,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 50,
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 51|128,1,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_SwitchType , 40, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 40, MVT::v8i16,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 40, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 36, MVT::v2i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 110|128,1,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 50, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 49|128,1, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_SwitchType , 41, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 41, MVT::v8i16,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 41, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 37, MVT::v2i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 96,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 22,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 96,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 22,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 23|128,6, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 35|128,3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 39|128,1,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 123, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_Scope, 42,
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 42,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 34,
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 34, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 39,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 39,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 39,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v8i16,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 39,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v8i16,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 39,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 39,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 84,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 37,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v2i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ 37,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v2i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 10|128,1,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 31,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 31,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 31,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 31,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 72,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 31,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ 31,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 61,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_Scope, 24,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 24,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 85, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_Scope, 40,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 40,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 87,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v16i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 22,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 5|128,3,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 83, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 48,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 23,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v1i64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 93|128,1, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 80,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 35, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 35, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 104,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_SwitchType , 32, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 17, MVT::v8i16,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 17, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 15, MVT::v2i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v1i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
+ 0,
+ 32,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 71, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 26, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 26, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPDrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 0,
+ 0,
+ 72,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 31,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 31,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDNPSrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 0,
37,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::v16i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_RecordChild0,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 9, MVT::v8i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQBrr), 0,
- 1, MVT::v8i8, 2, 0, 1,
- 0,
- 0,
- 63|128,1, X86ISD::PCMPEQW,
- OPC_Scope, 42,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 72,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 31,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 31,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 0,
+ 37,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 42,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v8i16,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
- 37,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 72,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 31,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v8i16,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 31,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v8i16,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 0,
37,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::v8i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
- 26,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 72,
OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 31,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 31,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 0,
+ 35,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v2i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::v8i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 9, MVT::v4i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQWrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v1i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
+ 68,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 29,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v2i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
+ 1, MVT::v1i64, 2, 1, 0,
+ 29,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_CheckType, MVT::v2i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
+ 1, MVT::v1i64, 2, 1, 0,
0,
- 0,
- 63|128,1, X86ISD::PCMPEQD,
- OPC_Scope, 42,
+ 87,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 54,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::v2i64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 11, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
+ 0,
+ 24,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 56,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 23,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 23,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 0,
+ 29,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
OPC_MoveParent,
+ OPC_RecordChild1,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 42,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 56,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 23,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 23,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDNrr), 0,
+ 1, MVT::v2i64, 2, 1, 0,
+ 0,
+ 29,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
OPC_MoveParent,
+ OPC_RecordChild1,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 2, 3, 4, 5, 6, 7,
- 37,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
+ 56,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::XOR),
+ OPC_Scope, 23,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
+ 1, MVT::v1i64, 2, 1, 0,
+ 23,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 44,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDNrr), 0,
+ 1, MVT::v1i64, 2, 1, 0,
+ 0,
+ 31,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
- 37,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPDrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 57,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 38, MVT::v2i64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ANDPSrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPANDrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PANDrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 11, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PANDrr), 0,
+ 1, MVT::v1i64, 2, 0, 1,
+ 0,
+ 0,
+ 91|128,2, TARGET_OPCODE(ISD::FP_TO_SINT),
+ OPC_Scope, 66|128,1,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
- OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_SwitchType , 87, MVT::f32,
+ OPC_MoveParent,
+ OPC_SwitchType , 40, MVT::i32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTSS2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSS2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 40, MVT::i64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTSS2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSS2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 0,
+ 87, MVT::f64,
+ OPC_MoveParent,
+ OPC_SwitchType , 40, MVT::i32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTSD2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSD2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 40, MVT::i64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTSD2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSD2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 0,
+ 0,
+ 19|128,1,
OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 9, MVT::v2i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQDrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
+ OPC_Scope, 56,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_SwitchType , 24, MVT::i32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTSS2SIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSS2SIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 0,
+ 24, MVT::i64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTSS2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSS2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 0,
+ 56,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_SwitchType , 24, MVT::i32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTSD2SIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSD2SIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 0,
+ 24, MVT::i64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VCVTTSD2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSD2SI64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 0,
+ 14,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2DQrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 14,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_CheckType, MVT::v2i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2PIrr), 0,
+ 1, MVT::v2i32, 1, 0,
0,
0,
- 107, X86ISD::PCMPGTB,
+ 55|128,1, TARGET_OPCODE(X86ISD::UCOMI),
OPC_RecordChild0,
- OPC_Scope, 77,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 37, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 89,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VUCOMISSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_UCOMISSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VUCOMISSrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_UCOMISSrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 89,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::v8i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
- 32, ISD::LOAD,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VUCOMISDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_UCOMISDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VUCOMISDrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_UCOMISDrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 0,
+ 55|128,1, TARGET_OPCODE(X86ISD::COMI),
+ OPC_RecordChild0,
+ OPC_Scope, 89,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::v16i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTBrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCOMISSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_COMISSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCOMISSrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_COMISSrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
0,
- 25,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTBrr), 0,
- 1, MVT::v16i8, 2, 0, 1,
- 9, MVT::v8i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTBrr), 0,
- 1, MVT::v8i8, 2, 0, 1,
+ 89,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_Scope, 56,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCOMISDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_COMISDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_VCOMISDrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_COMISDrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
0,
0,
- 107, X86ISD::PCMPGTW,
+ 21|128,1, TARGET_OPCODE(X86ISD::PTEST),
OPC_RecordChild0,
- OPC_Scope, 77,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 37, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 91,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_Scope, 58,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
- OPC_MoveParent,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v4i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
- 32, ISD::LOAD,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPTESTrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PTESTrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild1,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPTESTrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 5,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PTESTrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 53,
+ OPC_CheckChild0Type, MVT::v4i64,
+ OPC_Scope, 35,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v8i16,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTWrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 25,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::v8i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTWrr), 0,
- 1, MVT::v8i16, 2, 0, 1,
- 9, MVT::v4i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTWrr), 0,
- 1, MVT::v4i16, 2, 0, 1,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPTESTYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPTESTYrr), 0,
+ 1, MVT::i32, 2, 0, 1,
0,
0,
- 107, X86ISD::PCMPGTD,
+ 91|128,1, TARGET_OPCODE(X86ISD::TESTP),
OPC_RecordChild0,
- OPC_Scope, 77,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 37, ISD::BIT_CONVERT,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 53,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_Scope, 35,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckType, MVT::v1i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VTESTPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VTESTPSrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 53,
+ OPC_CheckChild0Type, MVT::v8f32,
+ OPC_Scope, 35,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v2i32,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
- 32, ISD::LOAD,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VTESTPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VTESTPSYrr), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 0,
+ 53,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_Scope, 35,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v4i32,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VTESTPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VTESTPDrr), 0,
+ 1, MVT::i32, 2, 0, 1,
0,
- 25,
- OPC_RecordChild1,
- OPC_SwitchType , 9, MVT::v4i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTDrr), 0,
- 1, MVT::v4i32, 2, 0, 1,
- 9, MVT::v2i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTDrr), 0,
- 1, MVT::v2i32, 2, 0, 1,
+ 53,
+ OPC_CheckChild0Type, MVT::v4f64,
+ OPC_Scope, 35,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VTESTPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VTESTPDYrr), 0,
+ 1, MVT::i32, 2, 0, 1,
0,
0,
- 110, X86ISD::LCMPXCHG_DAG,
+ 90, TARGET_OPCODE(ISD::ATOMIC_SWAP),
+ OPC_RecordMemRef,
OPC_RecordNode,
- OPC_CaptureFlagInput,
OPC_RecordChild1,
OPC_RecordChild2,
- OPC_Scope, 25,
- OPC_CheckChild2Type, MVT::i32,
- OPC_MoveChild, 3,
- OPC_CheckInteger, 4,
- OPC_MoveParent,
+ OPC_SwitchType , 19, MVT::i32,
+ OPC_CheckPredicate, 45,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 25,
- OPC_CheckChild2Type, MVT::i16,
- OPC_MoveChild, 3,
- OPC_CheckInteger, 2,
- OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XCHG32rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 19, MVT::i16,
+ OPC_CheckPredicate, 46,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG16), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 25,
- OPC_CheckChild2Type, MVT::i8,
- OPC_MoveChild, 3,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XCHG16rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i16, 6, 2, 3, 4, 5, 6, 7,
+ 19, MVT::i8,
+ OPC_CheckPredicate, 47,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 25,
- OPC_CheckChild2Type, MVT::i64,
- OPC_MoveChild, 3,
- OPC_CheckInteger, 8,
- OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XCHG8rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i8, 6, 2, 3, 4, 5, 6, 7,
+ 19, MVT::i64,
+ OPC_CheckPredicate, 48,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 6, 3, 4, 5, 6, 7, 2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::XCHG64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::i64, 6, 2, 3, 4, 5, 6, 7,
0,
- 7|128,1, ISD::INTRINSIC_W_CHAIN,
+ 94, TARGET_OPCODE(ISD::ATOMIC_LOAD_ADD),
+ OPC_RecordMemRef,
OPC_RecordNode,
- OPC_MoveChild, 1,
- OPC_Scope, 25,
- OPC_CheckInteger, 59|128,5,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_CheckPredicate, 49,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSrm_Int), 0|OPFL_Chain,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckInteger, 16|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LXADD32), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 20, MVT::i16,
+ OPC_CheckPredicate, 50,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPDrm_Int), 0|OPFL_Chain,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckInteger, 15|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LXADD16), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 20, MVT::i8,
+ OPC_CheckPredicate, 51,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDQUrm_Int), 0|OPFL_Chain,
- 1, MVT::v16i8, 5, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckInteger, 95|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 9,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LXADD8), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 20, MVT::i64,
+ OPC_CheckPredicate, 52,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LDDQUrm), 0|OPFL_Chain,
- 1, MVT::v16i8, 5, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckInteger, 106|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 4,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LXADD64), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 94, TARGET_OPCODE(ISD::ATOMIC_LOAD_AND),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_CheckPredicate, 53,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTDQArm), 0|OPFL_Chain,
- 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMAND32), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i16,
+ OPC_CheckPredicate, 54,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMAND16), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i8,
+ OPC_CheckPredicate, 55,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMAND8), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i64,
+ OPC_CheckPredicate, 56,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMAND64), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
0,
- 41|128,4, ISD::INTRINSIC_VOID,
+ 94, TARGET_OPCODE(ISD::ATOMIC_LOAD_OR),
+ OPC_RecordMemRef,
OPC_RecordNode,
- OPC_MoveChild, 1,
- OPC_Scope, 26,
- OPC_CheckInteger, 75|128,5,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 0,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_CheckPredicate, 57,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSmr_Int), 0|OPFL_Chain,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 26,
- OPC_CheckInteger, 65|128,5,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMOR32), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i16,
+ OPC_CheckPredicate, 58,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTPSmr_Int), 0|OPFL_Chain,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 24,
- OPC_CheckInteger, 58|128,5,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMOR16), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i8,
+ OPC_CheckPredicate, 59,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LDMXCSR), 0|OPFL_Chain,
- 0, 5, 2, 3, 4, 5, 6,
- 24,
- OPC_CheckInteger, 74|128,5,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMOR8), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i64,
+ OPC_CheckPredicate, 60,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::STMXCSR), 0|OPFL_Chain,
- 0, 5, 2, 3, 4, 5, 6,
- 26,
- OPC_CheckInteger, 81|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMOR64), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 94, TARGET_OPCODE(ISD::ATOMIC_LOAD_XOR),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_CheckPredicate, 61,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPDmr_Int), 0|OPFL_Chain,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 26,
- OPC_CheckInteger, 80|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMXOR32), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i16,
+ OPC_CheckPredicate, 62,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDQUmr_Int), 0|OPFL_Chain,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 26,
- OPC_CheckInteger, 26|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMXOR16), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i8,
+ OPC_CheckPredicate, 63,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTPDmr_Int), 0|OPFL_Chain,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 26,
- OPC_CheckInteger, 24|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMXOR8), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i64,
+ OPC_CheckPredicate, 64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTDQmr_Int), 0|OPFL_Chain,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 26,
- OPC_CheckInteger, 25|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMXOR64), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 94, TARGET_OPCODE(ISD::ATOMIC_LOAD_NAND),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_CheckPredicate, 65,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTImr_Int), 0|OPFL_Chain,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 24,
- OPC_CheckInteger, 116|128,3,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMNAND32), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i16,
+ OPC_CheckPredicate, 66,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CLFLUSH), 0|OPFL_Chain,
- 0, 5, 2, 3, 4, 5, 6,
- 26,
- OPC_CheckInteger, 79|128,4,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMNAND16), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i8,
+ OPC_CheckPredicate, 67,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLQ128mr), 0|OPFL_Chain,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 26,
- OPC_CheckInteger, 69|128,3,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_CheckPatternPredicate, 8,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMNAND8), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i8, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i64,
+ OPC_CheckPredicate, 68,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVNTQmr), 0|OPFL_Chain,
- 0, 6, 3, 4, 5, 6, 7, 2,
- 15,
- OPC_CheckInteger, 71|128,5,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMNAND64), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 72, TARGET_OPCODE(ISD::ATOMIC_LOAD_MIN),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_CheckPredicate, 69,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMIN32), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i16,
+ OPC_CheckPredicate, 70,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMIN16), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i64,
+ OPC_CheckPredicate, 71,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMIN64), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 72, TARGET_OPCODE(ISD::ATOMIC_LOAD_MAX),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_CheckPredicate, 72,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMAX32), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i16,
+ OPC_CheckPredicate, 73,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMAX16), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i64,
+ OPC_CheckPredicate, 74,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMAX64), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 72, TARGET_OPCODE(ISD::ATOMIC_LOAD_UMIN),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_CheckPredicate, 75,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMIN32), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i16,
+ OPC_CheckPredicate, 76,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMIN16), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i64,
+ OPC_CheckPredicate, 77,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMIN64), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 72, TARGET_OPCODE(ISD::ATOMIC_LOAD_UMAX),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_CheckPredicate, 78,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMAX32), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i32, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i16,
+ OPC_CheckPredicate, 79,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMAX16), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i16, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 20, MVT::i64,
+ OPC_CheckPredicate, 80,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMAX64), 0|OPFL_Chain|OPFL_MemRefs,
+ 2, MVT::i64, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 60, TARGET_OPCODE(X86ISD::MUL_IMM),
+ OPC_RecordNode,
+ OPC_SwitchType , 38, MVT::i32,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 3,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 17,
+ OPC_CheckPatternPredicate, 4,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 0,
+ 15, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/4, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
+ 1, MVT::i64, 5, 1, 2, 3, 4, 5,
+ 0,
+ 75|128,2, TARGET_OPCODE(ISD::SHL),
+ OPC_Scope, 60,
+ OPC_RecordNode,
+ OPC_SwitchType , 38, MVT::i32,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 3,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 17,
+ OPC_CheckPatternPredicate, 4,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 0,
+ 15, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/4, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
+ 1, MVT::i64, 5, 1, 2, 3, 4, 5,
+ 0,
+ 10|128,2,
+ OPC_RecordChild0,
+ OPC_Scope, 0|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckType, MVT::i8,
+ OPC_Scope, 48,
+ OPC_CheckAndImm, 31,
+ OPC_RecordChild0,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8rCL), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 12, MVT::i16,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16rCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 12, MVT::i32,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32rCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 0,
+ 18,
+ OPC_CheckAndImm, 63,
+ OPC_RecordChild0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64rCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 53,
+ OPC_CheckInteger, 1,
+ OPC_MoveParent,
+ OPC_SwitchType , 10, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rr), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 0,
+ 10, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rr), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 0,
+ 10, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 0,
+ 10, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rr), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 0,
+ 0,
+ 0,
+ 3|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 66,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64ri), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 60,
+ OPC_CheckChild1Type, MVT::i8,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8rCL), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 12, MVT::i16,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16rCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 12, MVT::i32,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32rCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 12, MVT::i64,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64rCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 60, TARGET_OPCODE(ISD::FrameIndex),
+ OPC_RecordNode,
+ OPC_SwitchType , 38, MVT::i32,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 3,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 17,
+ OPC_CheckPatternPredicate, 4,
+ OPC_CheckComplexPat, /*CP*/3, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 0,
+ 15, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/4, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
+ 1, MVT::i64, 5, 1, 2, 3, 4, 5,
+ 0,
+ 18, TARGET_OPCODE(X86ISD::WrapperRIP),
+ OPC_RecordNode,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckComplexPat, /*CP*/4, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
+ 1, MVT::i64, 5, 1, 2, 3, 4, 5,
+ 82|128,3, TARGET_OPCODE(ISD::SRL),
+ OPC_Scope, 97,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::AND),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0|128,126|128,3,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SFENCE), 0|OPFL_Chain,
- 0, 0,
- 47,
- OPC_CheckInteger, 17|128,4,
+ OPC_CheckPredicate, 19,
OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_Scope, 18,
- OPC_CheckChild4Type, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitCopyToReg, 3, X86::EDI,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MASKMOVDQU), 0|OPFL_Chain|OPFL_FlagInput,
- 0, 2, 1, 2,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 8,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_Scope, 34,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
+ 1, MVT::i32, 1, 4,
+ 34,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
+ 1, MVT::i32, 1, 4,
+ 0,
+ 108|128,2,
+ OPC_RecordChild0,
+ OPC_Scope, 98|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckType, MVT::i8,
+ OPC_Scope, 48,
+ OPC_CheckAndImm, 31,
+ OPC_RecordChild0,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8rCL), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 12, MVT::i16,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16rCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 12, MVT::i32,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32rCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 0,
+ 18,
+ OPC_CheckAndImm, 63,
+ OPC_RecordChild0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64rCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 49,
+ OPC_CheckInteger, 1,
+ OPC_MoveParent,
+ OPC_SwitchType , 9, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8r1), 0,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 9, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16r1), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32r1), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64r1), 0,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 101,
+ OPC_CheckInteger, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i16,
+ OPC_Scope, 46,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32rr8), 0,
+ 1, MVT::i32, 1, 4,
+ OPC_EmitInteger, MVT::i32, X86::sub_16bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i16, 2, 5, 6,
+ 46,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
+ 1, MVT::i32, 1, 4,
+ OPC_EmitInteger, MVT::i32, X86::sub_16bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i16, 2, 5, 6,
+ 0,
+ 0,
+ 3|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 66,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64ri), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 60,
+ OPC_CheckChild1Type, MVT::i8,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8rCL), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 12, MVT::i16,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16rCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 12, MVT::i32,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32rCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 12, MVT::i64,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64rCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 120|128,1, TARGET_OPCODE(ISD::TRUNCATE),
+ OPC_Scope, 79,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SRL),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 8,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 13,
+ OPC_SwitchType , 29, MVT::i16,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ 29, MVT::i32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i8,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ 0,
+ 36|128,1,
+ OPC_RecordChild0,
+ OPC_Scope, 64,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_SwitchType , 12, MVT::i16,
+ OPC_EmitInteger, MVT::i32, X86::sub_16bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i16, 2, 0, 1,
+ 44, MVT::i8,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 0, 1,
+ 26,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ 0,
+ 0,
+ 46,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_SwitchType , 12, MVT::i32,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i32, 2, 0, 1,
+ 12, MVT::i16,
+ OPC_EmitInteger, MVT::i32, X86::sub_16bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i16, 2, 0, 1,
+ 12, MVT::i8,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 0, 1,
+ 0,
+ 48,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_CheckType, MVT::i8,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 0, 1,
+ 26,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ 0,
+ 0,
+ 0,
+ 123|128,1, TARGET_OPCODE(ISD::ZERO_EXTEND),
+ OPC_Scope, 16|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SRL),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 8,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 13,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_SwitchType , 72, MVT::i32,
+ OPC_Scope, 34,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
+ 1, MVT::i32, 1, 4,
+ 34,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
+ 1, MVT::i32, 1, 4,
+ 0,
+ 48, MVT::i64,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 2,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 3, 4,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
+ 1, MVT::i32, 1, 5,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
+ 1, MVT::i64, 3, 1, 6, 7,
+ 0,
+ 102,
+ OPC_RecordChild0,
+ OPC_Scope, 25,
+ OPC_MoveChild, 0,
+ OPC_CheckPredicate, 81,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
+ 1, MVT::i64, 3, 1, 0, 2,
+ 34,
+ OPC_CheckChild0Type, MVT::i8,
+ OPC_SwitchType , 8, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rr8), 0,
+ 1, MVT::i16, 1, 0,
+ 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr8), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 24,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr16), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr16), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 12,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr32), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 0,
+ 63|128,2, TARGET_OPCODE(ISD::ANY_EXTEND),
+ OPC_Scope, 90|128,1,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 11|128,1, TARGET_OPCODE(ISD::SRL),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 8,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 13,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_SwitchType , 72, MVT::i32,
+ OPC_Scope, 34,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
+ 1, MVT::i32, 1, 4,
+ 34,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
+ 1, MVT::i32, 1, 4,
+ 0,
+ 48, MVT::i64,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 2,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit_hi,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 3, 4,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
+ 1, MVT::i32, 1, 5,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
+ 1, MVT::i64, 3, 1, 6, 7,
+ 0,
+ 68, TARGET_OPCODE(X86ISD::SETCC_CARRY),
+ OPC_MoveChild, 0,
+ OPC_CheckInteger, 2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 42, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 11, MVT::i16,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C16r), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 0,
+ 11, MVT::i32,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C32r), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 0,
+ 11, MVT::i64,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C64r), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 0,
+ 0,
+ 14, MVT::i16,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C32r), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 0,
+ 0,
+ 0,
+ 96,
+ OPC_RecordChild0,
+ OPC_Scope, 20,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
+ 1, MVT::i64, 3, 1, 0, 2,
+ 34,
+ OPC_CheckChild0Type, MVT::i8,
+ OPC_SwitchType , 8, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rr8), 0,
+ 1, MVT::i16, 1, 0,
+ 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr8), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 36,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_SwitchType , 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr16), 0,
+ 1, MVT::i64, 1, 0,
+ 20, MVT::i32,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
+ 1, MVT::i32, 0,
+ OPC_EmitInteger, MVT::i32, X86::sub_16bit,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
+ 1, MVT::i32, 3, 1, 0, 2,
+ 0,
+ 0,
+ 0,
+ 5|128,2, TARGET_OPCODE(ISD::SRA),
+ OPC_RecordChild0,
+ OPC_Scope, 124,
+ OPC_MoveChild, 1,
+ OPC_CheckType, MVT::i8,
+ OPC_Scope, 48,
+ OPC_CheckAndImm, 31,
+ OPC_RecordChild0,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8rCL), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 12, MVT::i16,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16rCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 12, MVT::i32,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32rCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 0,
18,
- OPC_CheckChild4Type, MVT::i64,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitCopyToReg, 3, X86::RDI,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MASKMOVDQU64), 0|OPFL_Chain|OPFL_FlagInput,
- 0, 2, 1, 2,
+ OPC_CheckAndImm, 63,
+ OPC_RecordChild0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64rCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 49,
+ OPC_CheckInteger, 1,
+ OPC_MoveParent,
+ OPC_SwitchType , 9, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8r1), 0,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 9, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16r1), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32r1), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64r1), 0,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
0,
- 15,
- OPC_CheckInteger, 14|128,4,
+ 3|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 66,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64ri), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 60,
+ OPC_CheckChild1Type, MVT::i8,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8rCL), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 12, MVT::i16,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16rCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 12, MVT::i32,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32rCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 12, MVT::i64,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64rCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 0,
+ 0,
+ 114|128,1, TARGET_OPCODE(ISD::EXTRACT_VECTOR_ELT),
+ OPC_Scope, 50,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_CheckType, MVT::v4i32,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LFENCE), 0|OPFL_Chain,
- 0, 0,
- 15,
- OPC_CheckInteger, 20|128,4,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MFENCE), 0|OPFL_Chain,
- 0, 0,
- 29,
- OPC_CheckInteger, 96|128,4,
+ OPC_CheckType, MVT::i32,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VEXTRACTPSrr), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::EXTRACTPSrr), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 59|128,1,
+ OPC_RecordChild0,
+ OPC_Scope, 62,
+ OPC_CheckChild0Type, MVT::v2i64,
+ OPC_Scope, 17,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVPQIto64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 39,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPEXTRQrr), 0,
+ 1, MVT::i64, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRQrr), 0,
+ 1, MVT::i64, 2, 0, 2,
+ 0,
+ 0,
+ 76,
+ OPC_CheckChild0Type, MVT::v4i32,
+ OPC_Scope, 31,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVPDI2DIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVPDI2DIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 0,
+ 39,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i32,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPEXTRDrr), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRDrr), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 0,
+ 21,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f32,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 21,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f64,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 0,
+ 0,
+ 62|128,1, TARGET_OPCODE(ISD::ROTL),
+ OPC_RecordChild0,
+ OPC_Scope, 53,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 1,
+ OPC_CheckType, MVT::i8,
OPC_MoveParent,
- OPC_RecordChild2,
- OPC_CheckChild2Type, MVT::i32,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_CheckPatternPredicate, 9,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitCopyToReg, 1, X86::EAX,
- OPC_EmitCopyToReg, 2, X86::ECX,
- OPC_EmitCopyToReg, 3, X86::EDX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MONITOR), 0|OPFL_Chain|OPFL_FlagInput,
- 0, 0,
- 23,
- OPC_CheckInteger, 97|128,4,
+ OPC_SwitchType , 9, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL8r1), 0,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 9, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL16r1), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL32r1), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL64r1), 0,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 3|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 66,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL64ri), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 60,
+ OPC_CheckChild1Type, MVT::i8,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL8rCL), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 12, MVT::i16,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL16rCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 12, MVT::i32,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL32rCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 12, MVT::i64,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL64rCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 0,
+ 0,
+ 62|128,1, TARGET_OPCODE(ISD::ROTR),
+ OPC_RecordChild0,
+ OPC_Scope, 53,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 1,
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 9, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR8r1), 0,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 9, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR16r1), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 9, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR32r1), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 9, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR64r1), 0,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 3|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 66,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR8ri), 0,
+ 2, MVT::i8, MVT::i32, 2, 0, 2,
+ 12, MVT::i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR16ri), 0,
+ 2, MVT::i16, MVT::i32, 2, 0, 2,
+ 12, MVT::i32,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR32ri), 0,
+ 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 12, MVT::i64,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR64ri), 0,
+ 2, MVT::i64, MVT::i32, 2, 0, 2,
+ 0,
+ 60,
+ OPC_CheckChild1Type, MVT::i8,
+ OPC_SwitchType , 12, MVT::i8,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR8rCL), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 12, MVT::i16,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR16rCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 12, MVT::i32,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR32rCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 12, MVT::i64,
+ OPC_EmitCopyToReg, 1, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR64rCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 0,
+ 0,
+ 60, TARGET_OPCODE(X86ISD::SETCC_CARRY),
+ OPC_MoveChild, 0,
+ OPC_CheckInteger, 2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 11, MVT::i8,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C8r), 0|OPFL_FlagInput,
+ 2, MVT::i8, MVT::i32, 0,
+ 11, MVT::i16,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C16r), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 0,
+ 11, MVT::i32,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C32r), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 0,
+ 11, MVT::i64,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C64r), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 0,
+ 0,
+ 116|128,1, TARGET_OPCODE(X86ISD::SETCC),
+ OPC_MoveChild, 0,
+ OPC_Scope, 14,
+ OPC_CheckInteger, 4,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETEr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 9,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNEr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 7,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETLr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 6,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETGEr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 8,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETLEr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 5,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETGr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 2,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETBr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 1,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETAEr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETBEr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETAr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 15,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETSr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 12,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNSr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 14,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETPr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 11,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNPr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 13,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETOr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 14,
+ OPC_CheckInteger, 10,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_EmitCopyToReg, 0, X86::EFLAGS,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNOr), 0|OPFL_FlagInput,
+ 1, MVT::i8, 0,
+ 0,
+ 111, TARGET_OPCODE(X86ISD::SHLD),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 55,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 13, MVT::i32,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rri8), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ 13, MVT::i16,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rri8), 0,
+ 2, MVT::i16, MVT::i32, 3, 0, 1, 3,
+ 13, MVT::i64,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64rri8), 0,
+ 2, MVT::i64, MVT::i32, 3, 0, 1, 3,
+ 0,
+ 49,
+ OPC_CheckChild2Type, MVT::i8,
+ OPC_SwitchType , 13, MVT::i32,
+ OPC_EmitCopyToReg, 2, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rrCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 13, MVT::i16,
+ OPC_EmitCopyToReg, 2, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rrCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 13, MVT::i64,
+ OPC_EmitCopyToReg, 2, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64rrCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 111, TARGET_OPCODE(X86ISD::SHRD),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 55,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 13, MVT::i32,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rri8), 0,
+ 2, MVT::i32, MVT::i32, 3, 0, 1, 3,
+ 13, MVT::i16,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rri8), 0,
+ 2, MVT::i16, MVT::i32, 3, 0, 1, 3,
+ 13, MVT::i64,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64rri8), 0,
+ 2, MVT::i64, MVT::i32, 3, 0, 1, 3,
+ 0,
+ 49,
+ OPC_CheckChild2Type, MVT::i8,
+ OPC_SwitchType , 13, MVT::i32,
+ OPC_EmitCopyToReg, 2, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rrCL), 0|OPFL_FlagInput,
+ 2, MVT::i32, MVT::i32, 2, 0, 1,
+ 13, MVT::i16,
+ OPC_EmitCopyToReg, 2, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rrCL), 0|OPFL_FlagInput,
+ 2, MVT::i16, MVT::i32, 2, 0, 1,
+ 13, MVT::i64,
+ OPC_EmitCopyToReg, 2, X86::CL,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64rrCL), 0|OPFL_FlagInput,
+ 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 0,
+ 0,
+ 40|128,2, TARGET_OPCODE(X86ISD::Wrapper),
+ OPC_RecordChild0,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 50, TARGET_OPCODE(ISD::TargetConstantPool),
+ OPC_MoveParent,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
+ 1, MVT::i32, 1, 0,
+ 35, MVT::i64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 21,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 22,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 23,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 0,
+ 50, TARGET_OPCODE(ISD::TargetJumpTable),
+ OPC_MoveParent,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
+ 1, MVT::i32, 1, 0,
+ 35, MVT::i64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 21,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 22,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 23,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 0,
+ 23, TARGET_OPCODE(ISD::TargetGlobalTLSAddress),
+ OPC_MoveParent,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 50, TARGET_OPCODE(ISD::TargetGlobalAddress),
+ OPC_MoveParent,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
+ 1, MVT::i32, 1, 0,
+ 35, MVT::i64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 21,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 22,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 23,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 0,
+ 50, TARGET_OPCODE(ISD::TargetExternalSymbol),
+ OPC_MoveParent,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
+ 1, MVT::i32, 1, 0,
+ 35, MVT::i64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 21,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 22,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 23,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 0,
+ 50, TARGET_OPCODE(ISD::TargetBlockAddress),
+ OPC_MoveParent,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
+ 1, MVT::i32, 1, 0,
+ 35, MVT::i64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 21,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 22,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 23,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 0,
+ 0,
+ 0|128,1, TARGET_OPCODE(ISD::Constant),
+ OPC_Scope, 44,
+ OPC_CheckInteger, 0,
+ OPC_SwitchType , 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64r0), 0,
+ 2, MVT::i64, MVT::i32, 0,
+ 8, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8r0), 0,
+ 2, MVT::i8, MVT::i32, 0,
+ 8, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16r0), 0,
+ 2, MVT::i16, MVT::i32, 0,
+ 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32r0), 0,
+ 2, MVT::i32, MVT::i32, 0,
+ 0,
+ 80,
+ OPC_RecordNode,
+ OPC_SwitchType , 39, MVT::i64,
+ OPC_Scope, 12,
+ OPC_CheckPredicate, 43,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
+ 1, MVT::i64, 1, 1,
+ 12,
+ OPC_CheckPredicate, 10,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
+ 1, MVT::i64, 1, 1,
+ 10,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
+ 1, MVT::i64, 1, 1,
+ 0,
+ 10, MVT::i8,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8ri), 0,
+ 1, MVT::i8, 1, 1,
+ 10, MVT::i16,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16ri), 0,
+ 1, MVT::i16, 1, 1,
+ 10, MVT::i32,
+ OPC_EmitConvertToTarget, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
+ 1, MVT::i32, 1, 1,
+ 0,
+ 0,
+ 66, TARGET_OPCODE(X86ISD::PEXTRW),
+ OPC_RecordChild0,
+ OPC_Scope, 39,
+ OPC_CheckChild0Type, MVT::v8i16,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPEXTRWri), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRWri), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 22,
+ OPC_CheckChild0Type, MVT::v4i16,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
OPC_CheckPatternPredicate, 9,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitCopyToReg, 1, X86::ECX,
- OPC_EmitCopyToReg, 2, X86::EAX,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MWAIT), 0|OPFL_Chain|OPFL_FlagInput,
- 0, 0,
- 15,
- OPC_CheckInteger, 66|128,3,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PEXTRWri), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 40, TARGET_OPCODE(X86ISD::PEXTRB),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v16i8,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPEXTRBrr), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 13,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRBrr), 0,
+ 1, MVT::i32, 2, 0, 2,
+ 0,
+ 89, TARGET_OPCODE(X86ISD::INC),
+ OPC_RecordChild0,
+ OPC_SwitchType , 28, MVT::i16,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INC16r), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 11,
+ OPC_CheckPatternPredicate, 4,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_16r), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 0,
+ 28, MVT::i32,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INC32r), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 11,
+ OPC_CheckPatternPredicate, 4,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_32r), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 0,
+ 11, MVT::i8,
+ OPC_CheckChild0Type, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INC8r), 0,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 11, MVT::i64,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64r), 0,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 89, TARGET_OPCODE(X86ISD::DEC),
+ OPC_RecordChild0,
+ OPC_SwitchType , 28, MVT::i16,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC16r), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 11,
+ OPC_CheckPatternPredicate, 4,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_16r), 0,
+ 2, MVT::i16, MVT::i32, 1, 0,
+ 0,
+ 28, MVT::i32,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC32r), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 11,
+ OPC_CheckPatternPredicate, 4,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_32r), 0,
+ 2, MVT::i32, MVT::i32, 1, 0,
+ 0,
+ 11, MVT::i8,
+ OPC_CheckChild0Type, MVT::i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC8r), 0,
+ 2, MVT::i8, MVT::i32, 1, 0,
+ 11, MVT::i64,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64r), 0,
+ 2, MVT::i64, MVT::i32, 1, 0,
+ 0,
+ 23, TARGET_OPCODE(ISD::BSWAP),
+ OPC_RecordChild0,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSWAP32r), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::BSWAP64r), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 76, TARGET_OPCODE(ISD::SIGN_EXTEND),
+ OPC_RecordChild0,
+ OPC_Scope, 34,
+ OPC_CheckChild0Type, MVT::i8,
+ OPC_SwitchType , 8, MVT::i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX16rr8), 0,
+ 1, MVT::i16, 1, 0,
+ 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr8), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr8), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 24,
+ OPC_CheckChild0Type, MVT::i16,
+ OPC_SwitchType , 8, MVT::i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr16), 0,
+ 1, MVT::i32, 1, 0,
+ 8, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr16), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 12,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_CheckType, MVT::i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr32), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 111|128,5, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_Scope, 92,
+ OPC_RecordChild0,
+ OPC_SwitchType , 59, MVT::i64,
+ OPC_Scope, 12,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDto64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckChild0Type, MVT::v1i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64from64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckChild0Type, MVT::v2i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64from64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckChild0Type, MVT::v4i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64from64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 10,
+ OPC_CheckChild0Type, MVT::v8i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64from64rr), 0,
+ 1, MVT::i64, 1, 0,
+ 0,
+ 26, MVT::i32,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSS2DIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSS2DIrr), 0,
+ 1, MVT::i32, 1, 0,
+ 0,
+ 0,
+ 86,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_SwitchType , 25, MVT::i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f64,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64toSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 45, MVT::i32,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDI2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 0,
+ 92,
+ OPC_RecordChild0,
+ OPC_SwitchType , 59, MVT::f64,
+ OPC_Scope, 12,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64toSDrr), 0,
+ 1, MVT::f64, 1, 0,
+ 10,
+ OPC_CheckChild0Type, MVT::v1i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2FR64rr), 0,
+ 1, MVT::f64, 1, 0,
+ 10,
+ OPC_CheckChild0Type, MVT::v2i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2FR64rr), 0,
+ 1, MVT::f64, 1, 0,
+ 10,
+ OPC_CheckChild0Type, MVT::v4i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2FR64rr), 0,
+ 1, MVT::f64, 1, 0,
+ 10,
+ OPC_CheckChild0Type, MVT::v8i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2FR64rr), 0,
+ 1, MVT::f64, 1, 0,
+ 0,
+ 26, MVT::f32,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDI2SSrr), 0,
+ 1, MVT::f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2SSrr), 0,
+ 1, MVT::f32, 1, 0,
+ 0,
+ 0,
+ 60,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::EXTRACT_VECTOR_ELT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2i64,
+ OPC_MoveChild, 1,
+ OPC_CheckInteger, 0,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_EMMS), 0|OPFL_Chain,
- 0, 0,
- 15,
- OPC_CheckInteger, 67|128,3,
+ OPC_CheckType, MVT::i64,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_FEMMS), 0|OPFL_Chain,
- 0, 0,
- 47,
- OPC_CheckInteger, 68|128,3,
+ OPC_SwitchType , 10, MVT::v1i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVDQ2Qrr), 0,
+ 1, MVT::v1i64, 1, 0,
+ 8, MVT::v2i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVDQ2Qrr), 0,
+ 1, MVT::v2i32, 1, 0,
+ 8, MVT::v4i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVDQ2Qrr), 0,
+ 1, MVT::v4i16, 1, 0,
+ 8, MVT::v8i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVDQ2Qrr), 0,
+ 1, MVT::v8i8, 1, 0,
+ 0,
+ 29|128,3,
+ OPC_RecordChild0,
+ OPC_Scope, 39,
+ OPC_CheckChild0Type, MVT::v4i32,
+ OPC_SwitchType , 5, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v8i16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v16i8,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v4f32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 39,
+ OPC_CheckChild0Type, MVT::v8i16,
+ OPC_SwitchType , 5, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v16i8,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v4f32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 39,
+ OPC_CheckChild0Type, MVT::v16i8,
+ OPC_SwitchType , 5, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v8i16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v4f32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 39,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_SwitchType , 5, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v8i16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v16i8,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v4f32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 39,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_SwitchType , 5, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v8i16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v16i8,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 39,
+ OPC_CheckChild0Type, MVT::v2i64,
+ OPC_SwitchType , 5, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v8i16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v16i8,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v4f32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 5, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 19,
+ OPC_CheckChild0Type, MVT::v1i64,
+ OPC_SwitchType , 3, MVT::v8i8,
+ OPC_CompleteMatch, 1, 0,
+
+ 3, MVT::v4i16,
+ OPC_CompleteMatch, 1, 0,
+
+ 3, MVT::v2i32,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 19,
+ OPC_CheckChild0Type, MVT::v2i32,
+ OPC_SwitchType , 3, MVT::v8i8,
+ OPC_CompleteMatch, 1, 0,
+
+ 3, MVT::v4i16,
+ OPC_CompleteMatch, 1, 0,
+
+ 3, MVT::v1i64,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 19,
+ OPC_CheckChild0Type, MVT::v4i16,
+ OPC_SwitchType , 3, MVT::v8i8,
+ OPC_CompleteMatch, 1, 0,
+
+ 3, MVT::v2i32,
+ OPC_CompleteMatch, 1, 0,
+
+ 3, MVT::v1i64,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 19,
+ OPC_CheckChild0Type, MVT::v8i8,
+ OPC_SwitchType , 3, MVT::v4i16,
+ OPC_CompleteMatch, 1, 0,
+
+ 3, MVT::v2i32,
+ OPC_CompleteMatch, 1, 0,
+
+ 3, MVT::v1i64,
+ OPC_CompleteMatch, 1, 0,
+
+ 0,
+ 44,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_SwitchType , 8, MVT::v1i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64to64rr), 0,
+ 1, MVT::v1i64, 1, 0,
+ 8, MVT::v2i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64to64rr), 0,
+ 1, MVT::v2i32, 1, 0,
+ 8, MVT::v4i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64to64rr), 0,
+ 1, MVT::v4i16, 1, 0,
+ 8, MVT::v8i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64to64rr), 0,
+ 1, MVT::v8i8, 1, 0,
+ 0,
+ 44,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_SwitchType , 8, MVT::v1i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVFR642Qrr), 0,
+ 1, MVT::v1i64, 1, 0,
+ 8, MVT::v2i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVFR642Qrr), 0,
+ 1, MVT::v2i32, 1, 0,
+ 8, MVT::v4i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVFR642Qrr), 0,
+ 1, MVT::v4i16, 1, 0,
+ 8, MVT::v8i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVFR642Qrr), 0,
+ 1, MVT::v8i8, 1, 0,
+ 0,
+ 0,
+ 0,
+ 106|128,1, TARGET_OPCODE(ISD::SIGN_EXTEND_INREG),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_Scope, 49,
+ OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
- OPC_RecordChild2,
- OPC_RecordChild3,
- OPC_RecordChild4,
- OPC_Scope, 18,
- OPC_CheckChild4Type, MVT::i32,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitCopyToReg, 3, X86::EDI,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MASKMOVQ), 0|OPFL_Chain|OPFL_FlagInput,
- 0, 2, 1, 2,
- 18,
- OPC_CheckChild4Type, MVT::i64,
- OPC_CheckPatternPredicate, 15,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitCopyToReg, 3, X86::RDI,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MASKMOVQ64), 0|OPFL_Chain|OPFL_FlagInput,
- 0, 2, 1, 2,
+ OPC_SwitchType , 20, MVT::i32,
+ OPC_EmitInteger, MVT::i32, X86::sub_16bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr16), 0,
+ 1, MVT::i32, 1, 2,
+ 20, MVT::i64,
+ OPC_EmitInteger, MVT::i32, X86::sub_16bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr16), 0,
+ 1, MVT::i64, 1, 2,
+ 0,
+ 25,
+ OPC_CheckValueType, MVT::i32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::i64,
+ OPC_EmitInteger, MVT::i32, X86::sub_32bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr32), 0,
+ 1, MVT::i64, 1, 2,
+ 23|128,1,
+ OPC_CheckValueType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 20, MVT::i64,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr8), 0,
+ 1, MVT::i64, 1, 2,
+ 60, MVT::i32,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr8), 0,
+ 1, MVT::i32, 1, 2,
+ 34,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i32, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr8), 0,
+ 1, MVT::i32, 1, 4,
+ 0,
+ 60, MVT::i16,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX16rr8), 0,
+ 1, MVT::i16, 1, 2,
+ 34,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::i16, 2, 0, 1,
+ OPC_EmitInteger, MVT::i32, X86::sub_8bit,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::i8, 2, 2, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX16rr8), 0,
+ 1, MVT::i16, 1, 4,
+ 0,
0,
0,
- 72|128,7, ISD::FADD,
- OPC_Scope, 19|128,1,
+ 7|128,10, TARGET_OPCODE(ISD::FADD),
+ OPC_Scope, 10|128,1,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 53,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 49,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 79,
- OPC_CheckPredicate, 48,
- OPC_Scope, 49,
- OPC_CheckPredicate, 49,
+ 73,
+ OPC_CheckPredicate, 32,
+ OPC_Scope, 45,
+ OPC_CheckPredicate, 39,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ OPC_SwitchType , 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp64m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp80m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 24,
- OPC_CheckPredicate, 50,
+ 22,
+ OPC_CheckPredicate, 40,
OPC_MoveParent,
OPC_CheckType, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp80m64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 21|128,1,
+ 12|128,1,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 54,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 50,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
0,
- 81,
- OPC_CheckPredicate, 48,
- OPC_Scope, 50,
- OPC_CheckPredicate, 49,
+ 75,
+ OPC_CheckPredicate, 32,
+ OPC_Scope, 46,
+ OPC_CheckPredicate, 39,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ OPC_SwitchType , 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp64m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp80m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 2, 3, 4, 5, 6, 7,
0,
- 25,
- OPC_CheckPredicate, 50,
+ 23,
+ OPC_CheckPredicate, 40,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp80m64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 2, 3, 4, 5, 6, 7,
0,
0,
- 119,
+ 105,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
0,
- 122,
+ 105,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 25,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 2, 3, 4, 5, 6, 7,
- 25,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 2, 3, 4, 5, 6, 7,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
0,
- 32|128,1,
+ 21|128,1,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, X86ISD::FILD,
+ OPC_CheckOpcode, TARGET_OPCODE(X86ISD::FILD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_Scope, 73,
+ OPC_Scope, 67,
OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI16m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI16m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI16m80), 0|OPFL_Chain,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 73,
+ 67,
OPC_CheckValueType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI32m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI32m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI32m80), 0|OPFL_Chain,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 33|128,1,
+ 22|128,1,
OPC_MoveChild, 0,
- OPC_CheckOpcode, X86ISD::FILD,
+ OPC_CheckOpcode, TARGET_OPCODE(X86ISD::FILD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_Scope, 74,
+ OPC_Scope, 68,
OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI16m32), 0|OPFL_Chain,
1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI16m64), 0|OPFL_Chain,
1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI16m80), 0|OPFL_Chain,
1, MVT::f80, 6, 2, 3, 4, 5, 6, 7,
0,
- 74,
+ 68,
OPC_CheckValueType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI32m32), 0|OPFL_Chain,
1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI32m64), 0|OPFL_Chain,
1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_FpI32m80), 0|OPFL_Chain,
1, MVT::f80, 6, 2, 3, 4, 5, 6, 7,
0,
0,
- 97,
+ 120|128,1,
OPC_RecordChild0,
+ OPC_Scope, 94,
+ OPC_RecordChild1,
+ OPC_SwitchType , 38, MVT::f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp32), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 0,
+ 38, MVT::f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp64), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 0,
+ 9, MVT::f80,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp80), 0,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 20|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::v4f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 19, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 21|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_SwitchType , 26, MVT::f32,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp32), 0,
- 1, MVT::f32, 2, 0, 1,
- 11,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 42, MVT::v4f32,
+ OPC_Scope, 19,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSSrr), 0,
- 1, MVT::f32, 2, 0, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 2, 3, 4, 5, 6, 7,
0,
- 26, MVT::f64,
+ 42, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 19, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 2, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 86,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::v4f32,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp64), 0,
- 1, MVT::f64, 2, 0, 1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
11,
OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDSDrr), 0,
- 1, MVT::f64, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
0,
- 9, MVT::f80,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD_Fp80), 0,
- 1, MVT::f80, 2, 0, 1,
- 11, MVT::v4f32,
+ 26, MVT::v2f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 11, MVT::v8f32,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 11, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADDPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 11, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VADDPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
0,
0,
- 114|128,5, ISD::FSUB,
+ 47|128,7, TARGET_OPCODE(ISD::FSUB),
OPC_RecordChild0,
- OPC_Scope, 12|128,5,
+ OPC_Scope, 95|128,4,
OPC_MoveChild, 1,
- OPC_SwitchOpcode , 1|128,3, ISD::LOAD,
+ OPC_SwitchOpcode , 94|128,2, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 53,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 49,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 79,
- OPC_CheckPredicate, 48,
- OPC_Scope, 49,
- OPC_CheckPredicate, 49,
+ 73,
+ OPC_CheckPredicate, 32,
+ OPC_Scope, 45,
+ OPC_CheckPredicate, 39,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ OPC_SwitchType , 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_Fp64m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_Fp80m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 24,
- OPC_CheckPredicate, 50,
+ 22,
+ OPC_CheckPredicate, 40,
OPC_MoveParent,
OPC_CheckType, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_Fp80m64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 53,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 49,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBR_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBR_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 79,
- OPC_CheckPredicate, 48,
- OPC_Scope, 49,
- OPC_CheckPredicate, 49,
+ 73,
+ OPC_CheckPredicate, 32,
+ OPC_Scope, 45,
+ OPC_CheckPredicate, 39,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ OPC_SwitchType , 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBR_Fp64m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBR_Fp80m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 24,
- OPC_CheckPredicate, 50,
+ 22,
+ OPC_CheckPredicate, 40,
OPC_MoveParent,
OPC_CheckType, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBR_Fp80m64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 108,
- OPC_CheckPredicate, 8,
- OPC_Scope, 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 93,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
0,
0,
- 1|128,2, X86ISD::FILD,
+ 117|128,1, TARGET_OPCODE(X86ISD::FILD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_Scope, 124,
+ OPC_Scope, 118,
OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 38, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 36, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_FpI16m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
@@ -26716,10 +28732,10 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBR_FpI16m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
0,
- 38, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 36, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_FpI16m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
@@ -26727,9 +28743,9 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBR_FpI16m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 36, MVT::f80,
+ 34, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_FpI16m80), 0|OPFL_Chain,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
@@ -26738,14 +28754,14 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 124,
+ 118,
OPC_CheckValueType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 38, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 36, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_FpI32m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
@@ -26753,10 +28769,10 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBR_FpI32m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
0,
- 38, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 36, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_FpI32m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
@@ -26764,9 +28780,9 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBR_FpI32m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 36, MVT::f80,
+ 34, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_FpI32m80), 0|OPFL_Chain,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
@@ -26777,533 +28793,737 @@ SDNode *SelectCode(SDNode *N) {
0,
0,
0,
- 96,
+ 94,
OPC_RecordChild1,
- OPC_SwitchType , 26, MVT::f32,
+ OPC_SwitchType , 38, MVT::f32,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 6,
+ OPC_CheckPatternPredicate, 7,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_Fp32), 0,
1, MVT::f32, 2, 0, 1,
11,
OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSSrr), 0,
1, MVT::f32, 2, 0, 1,
0,
- 26, MVT::f64,
+ 38, MVT::f64,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 7,
+ OPC_CheckPatternPredicate, 8,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_Fp64), 0,
1, MVT::f64, 2, 0, 1,
11,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBSDrr), 0,
1, MVT::f64, 2, 0, 1,
0,
9, MVT::f80,
OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB_Fp80), 0,
1, MVT::f80, 2, 0, 1,
- 11, MVT::v4f32,
+ 0,
+ 20|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::v4f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 19, MVT::v8f32,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 11, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 85,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::v4f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 26, MVT::v2f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SUBPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 11, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 11, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSUBPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
0,
0,
- 72|128,7, ISD::FMUL,
- OPC_Scope, 19|128,1,
+ 7|128,10, TARGET_OPCODE(ISD::FMUL),
+ OPC_Scope, 10|128,1,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 53,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 49,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 79,
- OPC_CheckPredicate, 48,
- OPC_Scope, 49,
- OPC_CheckPredicate, 49,
+ 73,
+ OPC_CheckPredicate, 32,
+ OPC_Scope, 45,
+ OPC_CheckPredicate, 39,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ OPC_SwitchType , 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp64m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp80m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 24,
- OPC_CheckPredicate, 50,
+ 22,
+ OPC_CheckPredicate, 40,
OPC_MoveParent,
OPC_CheckType, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp80m64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 21|128,1,
+ 12|128,1,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 54,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 50,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
0,
- 81,
- OPC_CheckPredicate, 48,
- OPC_Scope, 50,
- OPC_CheckPredicate, 49,
+ 75,
+ OPC_CheckPredicate, 32,
+ OPC_Scope, 46,
+ OPC_CheckPredicate, 39,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ OPC_SwitchType , 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp64m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp80m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 2, 3, 4, 5, 6, 7,
0,
- 25,
- OPC_CheckPredicate, 50,
+ 23,
+ OPC_CheckPredicate, 40,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp80m64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 2, 3, 4, 5, 6, 7,
0,
0,
- 119,
+ 105,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
0,
- 122,
+ 105,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 25,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 2, 3, 4, 5, 6, 7,
- 25,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 2, 3, 4, 5, 6, 7,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
0,
- 32|128,1,
+ 21|128,1,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, X86ISD::FILD,
+ OPC_CheckOpcode, TARGET_OPCODE(X86ISD::FILD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_Scope, 73,
+ OPC_Scope, 67,
OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI16m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI16m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI16m80), 0|OPFL_Chain,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 73,
+ 67,
OPC_CheckValueType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI32m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI32m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI32m80), 0|OPFL_Chain,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 33|128,1,
+ 22|128,1,
OPC_MoveChild, 0,
- OPC_CheckOpcode, X86ISD::FILD,
+ OPC_CheckOpcode, TARGET_OPCODE(X86ISD::FILD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_Scope, 74,
+ OPC_Scope, 68,
OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI16m32), 0|OPFL_Chain,
1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI16m64), 0|OPFL_Chain,
1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI16m80), 0|OPFL_Chain,
1, MVT::f80, 6, 2, 3, 4, 5, 6, 7,
0,
- 74,
+ 68,
OPC_CheckValueType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI32m32), 0|OPFL_Chain,
1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI32m64), 0|OPFL_Chain,
1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_FpI32m80), 0|OPFL_Chain,
1, MVT::f80, 6, 2, 3, 4, 5, 6, 7,
0,
0,
- 97,
+ 120|128,1,
OPC_RecordChild0,
+ OPC_Scope, 94,
+ OPC_RecordChild1,
+ OPC_SwitchType , 38, MVT::f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp32), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 0,
+ 38, MVT::f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp64), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 0,
+ 9, MVT::f80,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp80), 0,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 20|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::v4f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 19, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 21|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_SwitchType , 26, MVT::f32,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp32), 0,
- 1, MVT::f32, 2, 0, 1,
- 11,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 42, MVT::v4f32,
+ OPC_Scope, 19,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSSrr), 0,
- 1, MVT::f32, 2, 0, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 2, 3, 4, 5, 6, 7,
0,
- 26, MVT::f64,
+ 42, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 19, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 2, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 86,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::v4f32,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp64), 0,
- 1, MVT::f64, 2, 0, 1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
11,
OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULSDrr), 0,
- 1, MVT::f64, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
0,
- 9, MVT::f80,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MUL_Fp80), 0,
- 1, MVT::f80, 2, 0, 1,
- 11, MVT::v4f32,
+ 26, MVT::v2f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 11, MVT::v8f32,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 11, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MULPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 11, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMULPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
0,
0,
- 114|128,5, ISD::FDIV,
+ 47|128,7, TARGET_OPCODE(ISD::FDIV),
OPC_RecordChild0,
- OPC_Scope, 12|128,5,
+ OPC_Scope, 95|128,4,
OPC_MoveChild, 1,
- OPC_SwitchOpcode , 1|128,3, ISD::LOAD,
+ OPC_SwitchOpcode , 94|128,2, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 53,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 49,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 79,
- OPC_CheckPredicate, 48,
- OPC_Scope, 49,
- OPC_CheckPredicate, 49,
+ 73,
+ OPC_CheckPredicate, 32,
+ OPC_Scope, 45,
+ OPC_CheckPredicate, 39,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ OPC_SwitchType , 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_Fp64m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_Fp80m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 24,
- OPC_CheckPredicate, 50,
+ 22,
+ OPC_CheckPredicate, 40,
OPC_MoveParent,
OPC_CheckType, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_Fp80m64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 53,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ 49,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 19, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVR_Fp32m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVR_Fp64m), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 79,
- OPC_CheckPredicate, 48,
- OPC_Scope, 49,
- OPC_CheckPredicate, 49,
+ 73,
+ OPC_CheckPredicate, 32,
+ OPC_Scope, 45,
+ OPC_CheckPredicate, 39,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ OPC_SwitchType , 19, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVR_Fp64m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::f80,
+ 17, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVR_Fp80m32), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 24,
- OPC_CheckPredicate, 50,
+ 22,
+ OPC_CheckPredicate, 40,
OPC_MoveParent,
OPC_CheckType, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVR_Fp80m64), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
- 108,
- OPC_CheckPredicate, 8,
- OPC_Scope, 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 24,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 93,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
0,
0,
- 1|128,2, X86ISD::FILD,
+ 117|128,1, TARGET_OPCODE(X86ISD::FILD),
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_MoveChild, 2,
- OPC_Scope, 124,
+ OPC_Scope, 118,
OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 38, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 36, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_FpI16m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
@@ -27311,10 +29531,10 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVR_FpI16m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
0,
- 38, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 36, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_FpI16m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
@@ -27322,9 +29542,9 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVR_FpI16m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 36, MVT::f80,
+ 34, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_FpI16m80), 0|OPFL_Chain,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
@@ -27333,14 +29553,14 @@ SDNode *SelectCode(SDNode *N) {
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 124,
+ 118,
OPC_CheckValueType, MVT::i32,
OPC_MoveParent,
OPC_MoveParent,
- OPC_SwitchType , 38, MVT::f32,
- OPC_CheckPatternPredicate, 6,
+ OPC_SwitchType , 36, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_FpI32m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
@@ -27348,10 +29568,10 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVR_FpI32m32), 0|OPFL_Chain,
1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
0,
- 38, MVT::f64,
- OPC_CheckPatternPredicate, 7,
+ 36, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_FpI32m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
@@ -27359,9 +29579,9 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVR_FpI32m64), 0|OPFL_Chain,
1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 36, MVT::f80,
+ 34, MVT::f80,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_Scope, 13,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_FpI32m80), 0|OPFL_Chain,
1, MVT::f80, 6, 0, 3, 4, 5, 6, 7,
@@ -27372,2581 +29592,6137 @@ SDNode *SelectCode(SDNode *N) {
0,
0,
0,
- 96,
+ 94,
OPC_RecordChild1,
- OPC_SwitchType , 26, MVT::f32,
+ OPC_SwitchType , 38, MVT::f32,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 6,
+ OPC_CheckPatternPredicate, 7,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_Fp32), 0,
1, MVT::f32, 2, 0, 1,
11,
OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSSrr), 0,
1, MVT::f32, 2, 0, 1,
0,
- 26, MVT::f64,
+ 38, MVT::f64,
OPC_Scope, 11,
- OPC_CheckPatternPredicate, 7,
+ OPC_CheckPatternPredicate, 8,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_Fp64), 0,
1, MVT::f64, 2, 0, 1,
11,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVSDrr), 0,
1, MVT::f64, 2, 0, 1,
0,
9, MVT::f80,
OPC_MorphNodeTo, TARGET_OPCODE(X86::DIV_Fp80), 0,
1, MVT::f80, 2, 0, 1,
- 11, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 11, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
0,
- 0,
- 94, ISD::BRIND,
- OPC_RecordNode,
- OPC_Scope, 60,
+ 20|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::v4f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 19, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::JMP32m), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
- 23, MVT::i64,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::JMP64m), 0|OPFL_Chain|OPFL_MemRefs,
- 0, 5, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 29,
+ 85,
OPC_RecordChild1,
- OPC_Scope, 12,
- OPC_CheckChild1Type, MVT::i32,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::JMP32r), 0|OPFL_Chain,
- 0, 1, 1,
- 12,
- OPC_CheckChild1Type, MVT::i64,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::JMP64r), 0|OPFL_Chain,
- 0, 1, 1,
+ OPC_SwitchType , 26, MVT::v4f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 26, MVT::v2f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::DIVPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 11, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 11, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VDIVPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
0,
0,
- 4|128,2, X86ISD::CALL,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_Scope, 86,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ 87|128,1, TARGET_OPCODE(ISD::SINT_TO_FP),
+ OPC_Scope, 120,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
- OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL32m), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs|OPFL_Variadic1,
- 0, 5, 3, 4, 5, 6, 7,
- 49, MVT::i64,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL64m), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs|OPFL_Variadic1,
- 0, 5, 3, 4, 5, 6, 7,
- 20,
- OPC_CheckPatternPredicate, 17,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::WINCALL64m), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs|OPFL_Variadic1,
- 0, 5, 3, 4, 5, 6, 7,
+ OPC_SwitchType , 50, MVT::i32,
+ OPC_Scope, 23,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 23,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
0,
- 0,
- 39|128,1,
- OPC_RecordChild1,
- OPC_Scope, 119,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 46, ISD::TargetGlobalAddress,
- OPC_SwitchType , 11, MVT::i32,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CALLpcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 29, MVT::i64,
- OPC_MoveParent,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 16,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL64pcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 17,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::WINCALL64pcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 0,
- 46, ISD::TargetExternalSymbol,
- OPC_SwitchType , 11, MVT::i32,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CALLpcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 29, MVT::i64,
- OPC_MoveParent,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 16,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL64pcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 12,
- OPC_CheckPatternPredicate, 17,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::WINCALL64pcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 0,
- 0,
- 17, ISD::Constant,
- OPC_CheckType, MVT::i32,
+ 54, MVT::i64,
+ OPC_Scope, 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 18,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CALLpcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 2,
+ OPC_CheckType, MVT::f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SS64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 25,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SD64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
0,
- 12,
- OPC_CheckChild1Type, MVT::i32,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL32r), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
- 30,
- OPC_CheckChild1Type, MVT::i64,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 16,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL64r), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
+ 0,
+ 91,
+ OPC_RecordChild0,
+ OPC_Scope, 28,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_SwitchType , 10, MVT::f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SSrr), 0,
+ 1, MVT::f32, 1, 0,
+ 10, MVT::f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SDrr), 0,
+ 1, MVT::f64, 1, 0,
+ 0,
+ 28,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_SwitchType , 10, MVT::f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SS64rr), 0,
+ 1, MVT::f32, 1, 0,
+ 10, MVT::f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SD64rr), 0,
+ 1, MVT::f64, 1, 0,
+ 0,
+ 14,
+ OPC_CheckChild0Type, MVT::v4i32,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PSrr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 14,
+ OPC_CheckChild0Type, MVT::v2i32,
+ OPC_CheckType, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PDrr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 0,
+ 0,
+ 111, TARGET_OPCODE(ISD::FP_ROUND),
+ OPC_Scope, 37,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f32,
+ OPC_CheckPatternPredicate, 19,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSD2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 70,
+ OPC_RecordChild0,
+ OPC_SwitchType , 49, MVT::f32,
+ OPC_Scope, 16,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_CheckPatternPredicate, 7,
+ OPC_EmitInteger, MVT::i32, X86::RFP32RegClassID,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 16,
+ OPC_CheckChild0Type, MVT::f80,
+ OPC_CheckPatternPredicate, 7,
+ OPC_EmitInteger, MVT::i32, X86::RFP32RegClassID,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::f32, 2, 0, 1,
12,
- OPC_CheckPatternPredicate, 17,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::WINCALL64r), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
- 0, 1, 1,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSD2SSrr), 0,
+ 1, MVT::f32, 1, 0,
0,
+ 14, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
+ OPC_EmitInteger, MVT::i32, X86::RFP64RegClassID,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::f64, 2, 0, 1,
0,
0,
- 0|128,1, X86ISD::BSF,
- OPC_Scope, 88,
+ 23|128,2, TARGET_OPCODE(X86ISD::FAND),
+ OPC_Scope, 107,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 107,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 24,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 5, 2, 3, 4, 5, 6,
- 24,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 5, 2, 3, 4, 5, 6,
- 26,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 5, 2, 3, 4, 5, 6,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
0,
- 36,
+ 60,
OPC_RecordChild0,
- OPC_SwitchType , 9, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF16rr), 0,
- 2, MVT::i16, MVT::i32, 1, 0,
- 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF32rr), 0,
- 2, MVT::i32, MVT::i32, 1, 0,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSF64rr), 0,
- 2, MVT::i64, MVT::i32, 1, 0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsANDPSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 0,
+ 26, MVT::f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsANDPDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 0,
0,
0,
- 0|128,1, X86ISD::BSR,
- OPC_Scope, 88,
+ 23|128,2, TARGET_OPCODE(X86ISD::FOR),
+ OPC_Scope, 107,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 107,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 24,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 5, 2, 3, 4, 5, 6,
- 24,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 5, 2, 3, 4, 5, 6,
- 26,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 5, 2, 3, 4, 5, 6,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
0,
- 36,
+ 60,
OPC_RecordChild0,
- OPC_SwitchType , 9, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR16rr), 0,
- 2, MVT::i16, MVT::i32, 1, 0,
- 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR32rr), 0,
- 2, MVT::i32, MVT::i32, 1, 0,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSR64rr), 0,
- 2, MVT::i64, MVT::i32, 1, 0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsORPSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 0,
+ 26, MVT::f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsORPDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 0,
0,
0,
- 115|128,2, ISD::ADDE,
- OPC_CaptureFlagInput,
- OPC_Scope, 100,
+ 23|128,2, TARGET_OPCODE(X86ISD::FXOR),
+ OPC_Scope, 107,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_SwitchType , 19, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i8, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i16, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsXORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsXORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
0,
- 100,
+ 107,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 19, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i8, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i16, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i64, 6, 2, 3, 4, 5, 6, 7,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsXORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsXORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
+ 0,
0,
- 36|128,1,
+ 60,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 111,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 44,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i16, 2, 0, 2,
- 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 16,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64ri32), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 2,
- 42,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i8, 2, 0, 2,
- 11, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i16, 2, 0, 2,
- 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 0,
+ OPC_SwitchType , 26, MVT::f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsXORPSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 0,
+ 26, MVT::f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VFsXORPDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
0,
- 11,
- OPC_CheckType, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC8rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i8, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC16rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i16, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC32rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADC64rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 1,
0,
0,
- 13|128,2, ISD::SUBE,
- OPC_CaptureFlagInput,
+ 20|128,3, TARGET_OPCODE(X86ISD::FMAX),
OPC_RecordChild0,
- OPC_Scope, 99,
+ OPC_Scope, 104,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_SwitchType , 19, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB8rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i8, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i16, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::i32,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 59,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 0,
+ 26, MVT::f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 0,
+ 0,
+ 20|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::v4f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 19, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::i64,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64rm), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 35|128,1,
+ 85,
OPC_RecordChild1,
- OPC_Scope, 111,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 44,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i16, 2, 0, 2,
- 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64ri8), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 16,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64ri32), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 2,
- 42,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB8ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i8, 2, 0, 2,
- 11, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i16, 2, 0, 2,
- 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32ri), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 0,
+ OPC_SwitchType , 26, MVT::v4f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
0,
- 11,
- OPC_CheckType, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB8rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i8, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB16rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i16, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB32rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SBB64rr), 0|OPFL_FlagInput|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 1,
+ 26, MVT::v2f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 11, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 11, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMAXPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
0,
0,
- 92|128,1, ISD::ADDC,
- OPC_Scope, 58,
- OPC_RecordChild0,
+ 20|128,3, TARGET_OPCODE(X86ISD::FMIN),
+ OPC_RecordChild0,
+ OPC_Scope, 104,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 42, MVT::f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 59,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSSrr), 0,
+ 1, MVT::f32, 2, 0, 1,
+ 0,
+ 26, MVT::f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSDrr), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 0,
+ 0,
+ 20|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_SwitchType , 19, MVT::i32,
+ OPC_SwitchType , 42, MVT::v4f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 42, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 19,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 19, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::i64,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 85,
+ OPC_RecordChild1,
+ OPC_SwitchType , 26, MVT::v4f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 26, MVT::v2f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 11, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 11, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMINPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
0,
- 58,
+ 0,
+ 91|128,2, TARGET_OPCODE(ISD::FSQRT),
+ OPC_Scope, 55,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
- OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
+ OPC_SwitchType , 18, MVT::f32,
+ OPC_CheckPatternPredicate, 24,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSSm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 18, MVT::f64,
+ OPC_CheckPatternPredicate, 19,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSDm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 65,
+ OPC_RecordChild0,
+ OPC_SwitchType , 24, MVT::f32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRT_Fp32), 0,
+ 1, MVT::f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSSr), 0,
+ 1, MVT::f32, 1, 0,
+ 0,
+ 24, MVT::f64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRT_Fp64), 0,
+ 1, MVT::f64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSDr), 0,
+ 1, MVT::f64, 1, 0,
+ 0,
+ 8, MVT::f80,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRT_Fp80), 0,
+ 1, MVT::f80, 1, 0,
+ 0,
+ 13|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
OPC_RecordChild1,
- OPC_SwitchType , 19, MVT::i64,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 40, MVT::v4f32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPSm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPSm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 40, MVT::v2f64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPDm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPDm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 18, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i64, 6, 2, 3, 4, 5, 6, 7,
- 19, MVT::i32,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPSYm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 18, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPDYm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 79,
+ OPC_RecordChild0,
+ OPC_SwitchType , 24, MVT::v4f32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPSr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPSr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 0,
+ 24, MVT::v2f64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPDr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPDr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 0,
+ 10, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPSYr), 0,
+ 1, MVT::v8f32, 1, 0,
+ 10, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSQRTPDYr), 0,
+ 1, MVT::v4f64, 1, 0,
0,
- 99,
+ 0,
+ 44|128,1, TARGET_OPCODE(X86ISD::FRSQRT),
+ OPC_Scope, 33,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f32,
+ OPC_CheckPatternPredicate, 24,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTSSm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 13,
OPC_RecordChild0,
+ OPC_CheckType, MVT::f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTSSr), 0,
+ 1, MVT::f32, 1, 0,
+ 79,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
OPC_RecordChild1,
- OPC_Scope, 70,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 31,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri8), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri8), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 16,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri32), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 2,
- 14,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 40, MVT::v4f32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTPSm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTPSm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
0,
- 11,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 1,
- 11,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rr), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 1,
+ 18, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTPSYm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 41,
+ OPC_RecordChild0,
+ OPC_SwitchType , 24, MVT::v4f32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTPSr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTPSr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 0,
+ 10, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRSQRTPSYr), 0,
+ 1, MVT::v8f32, 1, 0,
0,
0,
- 30|128,1, ISD::SUBC,
- OPC_RecordChild0,
- OPC_Scope, 57,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ 44|128,1, TARGET_OPCODE(X86ISD::FRCP),
+ OPC_Scope, 33,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
- OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_SwitchType , 19, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 19, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rm), 0|OPFL_Chain|OPFL_FlagOutput|OPFL_MemRefs,
- 1, MVT::i64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_CheckType, MVT::f32,
+ OPC_CheckPatternPredicate, 24,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPSSm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 13,
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPSSr), 0,
+ 1, MVT::f32, 1, 0,
+ 79,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 40, MVT::v4f32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPPSm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPPSm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 18, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPPSYm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
0,
- 96,
+ 41,
+ OPC_RecordChild0,
+ OPC_SwitchType , 24, MVT::v4f32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPPSr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPPSr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 0,
+ 10, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VRCPPSYr), 0,
+ 1, MVT::v8f32, 1, 0,
+ 0,
+ 0,
+ 114, TARGET_OPCODE(ISD::FP_EXTEND),
+ OPC_Scope, 37,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
OPC_RecordChild1,
- OPC_Scope, 68,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 31,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri8), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri8), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 29,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri32), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 2,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSS2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 73,
+ OPC_RecordChild0,
+ OPC_Scope, 50,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_SwitchType , 28, MVT::f64,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 7,
+ OPC_EmitInteger, MVT::i32, X86::RFP64RegClassID,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::f64, 2, 0, 1,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSS2SDrr), 0,
+ 1, MVT::f64, 1, 0,
0,
+ 14, MVT::f80,
+ OPC_CheckPatternPredicate, 7,
+ OPC_EmitInteger, MVT::i32, X86::RFP80RegClassID,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::f80, 2, 0, 1,
0,
+ 18,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_CheckType, MVT::f80,
+ OPC_CheckPatternPredicate, 8,
+ OPC_EmitInteger, MVT::i32, X86::RFP80RegClassID,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
+ 1, MVT::f80, 2, 0, 1,
+ 0,
+ 0,
+ 70|128,1, TARGET_OPCODE(X86ISD::FILD),
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_MoveChild, 2,
+ OPC_Scope, 63,
+ OPC_CheckValueType, MVT::i16,
+ OPC_MoveParent,
+ OPC_SwitchType , 18, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp16m32), 0|OPFL_Chain,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 18, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp16m64), 0|OPFL_Chain,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 16, MVT::f80,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp16m80), 0|OPFL_Chain,
+ 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ 0,
+ 63,
+ OPC_CheckValueType, MVT::i32,
+ OPC_MoveParent,
+ OPC_SwitchType , 18, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp32m32), 0|OPFL_Chain,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 18, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp32m64), 0|OPFL_Chain,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 16, MVT::f80,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp32m80), 0|OPFL_Chain,
+ 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ 0,
+ 63,
+ OPC_CheckValueType, MVT::i64,
+ OPC_MoveParent,
+ OPC_SwitchType , 18, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp64m32), 0|OPFL_Chain,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 18, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp64m64), 0|OPFL_Chain,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 16, MVT::f80,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp64m80), 0|OPFL_Chain,
+ 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ 0,
+ 0,
+ 72, TARGET_OPCODE(X86ISD::FLD),
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_MoveChild, 2,
+ OPC_Scope, 21,
+ OPC_CheckValueType, MVT::f32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp32m), 0|OPFL_Chain,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckValueType, MVT::f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp64m), 0|OPFL_Chain,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckValueType, MVT::f80,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f80,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp80m), 0|OPFL_Chain,
+ 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25, TARGET_OPCODE(X86ISD::FILD_FLAG),
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_MoveChild, 2,
+ OPC_CheckValueType, MVT::i64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp64m64), 0|OPFL_Chain|OPFL_FlagOutput,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ 94|128,1, TARGET_OPCODE(ISD::ConstantFP),
+ OPC_SwitchType , 78, MVT::f32,
+ OPC_Scope, 11,
+ OPC_CheckPredicate, 82,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp032), 0,
+ 1, MVT::f32, 0,
11,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rr), 0|OPFL_FlagOutput,
- 1, MVT::i32, 2, 0, 1,
+ OPC_CheckPredicate, 83,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp132), 0,
+ 1, MVT::f32, 0,
11,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rr), 0|OPFL_FlagOutput,
- 1, MVT::i64, 2, 0, 1,
+ OPC_CheckPredicate, 84,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsFLD0SS), 0,
+ 1, MVT::f32, 0,
+ 19,
+ OPC_CheckPredicate, 85,
+ OPC_CheckPatternPredicate, 7,
+ OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp032), 0,
+ 1, MVT::f32, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp32), 0,
+ 1, MVT::f32, 1, 0,
+ 19,
+ OPC_CheckPredicate, 86,
+ OPC_CheckPatternPredicate, 7,
+ OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp132), 0,
+ 1, MVT::f32, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp32), 0,
+ 1, MVT::f32, 1, 0,
+ 0,
+ 78, MVT::f64,
+ OPC_Scope, 11,
+ OPC_CheckPredicate, 82,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp064), 0,
+ 1, MVT::f64, 0,
+ 11,
+ OPC_CheckPredicate, 83,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp164), 0,
+ 1, MVT::f64, 0,
+ 11,
+ OPC_CheckPredicate, 82,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FsFLD0SD), 0,
+ 1, MVT::f64, 0,
+ 19,
+ OPC_CheckPredicate, 85,
+ OPC_CheckPatternPredicate, 8,
+ OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp064), 0,
+ 1, MVT::f64, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp64), 0,
+ 1, MVT::f64, 1, 0,
+ 19,
+ OPC_CheckPredicate, 86,
+ OPC_CheckPatternPredicate, 8,
+ OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp164), 0,
+ 1, MVT::f64, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp64), 0,
+ 1, MVT::f64, 1, 0,
0,
+ 58, MVT::f80,
+ OPC_Scope, 9,
+ OPC_CheckPredicate, 82,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp080), 0,
+ 1, MVT::f80, 0,
+ 9,
+ OPC_CheckPredicate, 83,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp180), 0,
+ 1, MVT::f80, 0,
+ 17,
+ OPC_CheckPredicate, 85,
+ OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp080), 0,
+ 1, MVT::f80, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp80), 0,
+ 1, MVT::f80, 1, 0,
+ 17,
+ OPC_CheckPredicate, 86,
+ OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp180), 0,
+ 1, MVT::f80, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp80), 0,
+ 1, MVT::f80, 1, 0,
+ 0,
+ 0,
+ 37, TARGET_OPCODE(ISD::FNEG),
+ OPC_RecordChild0,
+ OPC_SwitchType , 10, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp32), 0,
+ 1, MVT::f32, 1, 0,
+ 10, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp64), 0,
+ 1, MVT::f64, 1, 0,
+ 8, MVT::f80,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp80), 0,
+ 1, MVT::f80, 1, 0,
+ 0,
+ 37, TARGET_OPCODE(ISD::FABS),
+ OPC_RecordChild0,
+ OPC_SwitchType , 10, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ABS_Fp32), 0,
+ 1, MVT::f32, 1, 0,
+ 10, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ABS_Fp64), 0,
+ 1, MVT::f64, 1, 0,
+ 8, MVT::f80,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ABS_Fp80), 0,
+ 1, MVT::f80, 1, 0,
+ 0,
+ 37, TARGET_OPCODE(ISD::FSIN),
+ OPC_RecordChild0,
+ OPC_SwitchType , 10, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SIN_Fp32), 0,
+ 1, MVT::f32, 1, 0,
+ 10, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SIN_Fp64), 0,
+ 1, MVT::f64, 1, 0,
+ 8, MVT::f80,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SIN_Fp80), 0,
+ 1, MVT::f80, 1, 0,
+ 0,
+ 37, TARGET_OPCODE(ISD::FCOS),
+ OPC_RecordChild0,
+ OPC_SwitchType , 10, MVT::f32,
+ OPC_CheckPatternPredicate, 7,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::COS_Fp32), 0,
+ 1, MVT::f32, 1, 0,
+ 10, MVT::f64,
+ OPC_CheckPatternPredicate, 8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::COS_Fp64), 0,
+ 1, MVT::f64, 1, 0,
+ 8, MVT::f80,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::COS_Fp80), 0,
+ 1, MVT::f80, 1, 0,
0,
- 42|128,3, X86ISD::ADD,
- OPC_Scope, 121,
+ 121|128,33, TARGET_OPCODE(ISD::VECTOR_SHUFFLE),
+ OPC_Scope, 76,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 87,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckPredicate, 88,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 57,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 11,
+ OPC_SwitchType , 17, MVT::v4i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 17, MVT::v2i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
0,
- 124,
+ 83,
+ OPC_RecordNode,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 28,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckPredicate, 89,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFDmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 3, 4, 5, 6, 7, 8,
+ 22,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 96,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 47, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckPredicate, 90,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 39, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 28,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
+ OPC_CheckPredicate, 90,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
0,
- 47|128,1,
- OPC_RecordChild0,
+ 86|128,1,
+ OPC_RecordNode,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_Scope, 118,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 47,
- OPC_CheckPredicate, 11,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_Scope, 26|128,1,
+ OPC_CheckPredicate, 21,
+ OPC_SwitchType , 112, MVT::v2i64,
OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16ri8), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri8), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri8), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 0,
- 17,
- OPC_CheckPredicate, 12,
OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64ri32), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 45,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8ri), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 2,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16ri), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32ri), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
+ OPC_CheckType, MVT::v8i16,
+ OPC_Scope, 24,
+ OPC_CheckPredicate, 91,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitNodeXForm, 3, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFHWmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 3, 4, 5, 6, 7, 8,
+ 24,
+ OPC_CheckPredicate, 92,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitNodeXForm, 4, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFLWmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 3, 4, 5, 6, 7, 8,
+ 24,
+ OPC_CheckPredicate, 91,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitNodeXForm, 3, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFHWmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 3, 4, 5, 6, 7, 8,
+ 24,
+ OPC_CheckPredicate, 92,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitNodeXForm, 4, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFLWmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 3, 4, 5, 6, 7, 8,
0,
+ 34, MVT::v4f32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckPredicate, 89,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 3, 4, 5, 6, 7, 8,
0,
- 12,
- OPC_CheckType, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rr), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rr), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rr), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 36,
+ OPC_CheckType, MVT::v1i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckPredicate, 93,
+ OPC_CheckType, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitNodeXForm, 5, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSHUFWmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 3, 4, 5, 6, 7, 8,
0,
- 0,
- 44|128,2, X86ISD::SUB,
- OPC_RecordChild0,
- OPC_Scope, 120,
+ 43|128,2,
+ OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 6,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 3,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_Scope, 67,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 19, MVT::v16i8,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKLBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v8i16,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKLWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4i32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKLDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 19, MVT::v16i8,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKHBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v8i16,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKHWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4i32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKHDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 19, MVT::v16i8,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v8i16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 19, MVT::v16i8,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v8i16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
0,
- 46|128,1,
+ 55,
+ OPC_RecordNode,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_Scope, 118,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 95,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 7, 1, 4, 5, 6, 7, 8, 9,
+ 23|128,3,
+ OPC_RecordChild0,
+ OPC_Scope, 79|128,2,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 47,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16ri8), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri8), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri8), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
+ OPC_SwitchOpcode , 61|128,1, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 22|128,1, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_Scope, 67,
+ OPC_CheckPredicate, 96,
+ OPC_SwitchType , 19, MVT::v8i8,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 67,
+ OPC_CheckPredicate, 97,
+ OPC_SwitchType , 19, MVT::v8i8,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 28, TARGET_OPCODE(X86ISD::VZEXT_LOAD),
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 98,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPSrm), 0|OPFL_Chain,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
0,
- 17,
- OPC_CheckPredicate, 12,
+ 29, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64ri32), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 45,
+ OPC_SwitchType , 11, MVT::v2i64,
+ OPC_CheckPredicate, 90,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
+ 1, MVT::v2i64, 2, 0, 0,
+ 11, MVT::v4i32,
+ OPC_CheckPredicate, 99,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
+ 1, MVT::v4i32, 2, 0, 0,
+ 0,
+ 103, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8ri), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 2,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16ri), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32ri), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 21,
+ OPC_CheckPredicate, 94,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKLQDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPredicate, 12,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKHQDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPredicate, 94,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLQDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPredicate, 12,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHQDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 12,
- OPC_CheckType, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB8rr), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB16rr), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB32rr), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SUB64rr), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 29,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 11,
+ OPC_CheckPredicate, 98,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11,
+ OPC_CheckPredicate, 100,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 36,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 12,
+ OPC_CheckPredicate, 87,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 12,
+ OPC_CheckPredicate, 88,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
0,
- 0,
- 42|128,3, X86ISD::OR,
- OPC_Scope, 121,
+ 46,
+ OPC_RecordNode,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 101,
+ OPC_CheckPatternPredicate, 25,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
+ 1, MVT::v4i32, 2, 1, 2,
+ 16,
+ OPC_CheckPredicate, 102,
+ OPC_CheckPatternPredicate, 25,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
+ 1, MVT::v4i32, 2, 1, 2,
+ 0,
+ 24|128,2,
+ OPC_RecordChild0,
+ OPC_Scope, 82,
+ OPC_RecordChild1,
+ OPC_Scope, 50,
+ OPC_CheckPredicate, 103,
+ OPC_SwitchType , 21, MVT::v4i32,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f32, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
+ 1, MVT::v4i32, 2, 0, 3,
+ 21, MVT::v2i64,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f64, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 0,
+ 27,
+ OPC_CheckPredicate, 11,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f64, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v4i32, 2, 0, 3,
+ 0,
+ 64|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_Scope, 15,
+ OPC_CheckPredicate, 104,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLQDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 0,
+ 15,
+ OPC_CheckPredicate, 12,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHQDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 0,
+ 37,
+ OPC_CheckPredicate, 101,
+ OPC_SwitchType , 9, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLBWrr), 0,
+ 1, MVT::v16i8, 2, 0, 0,
+ 9, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLWDrr), 0,
+ 1, MVT::v8i16, 2, 0, 0,
+ 9, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLDQrr), 0,
+ 1, MVT::v4i32, 2, 0, 0,
+ 0,
+ 37,
+ OPC_CheckPredicate, 102,
+ OPC_SwitchType , 9, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHBWrr), 0,
+ 1, MVT::v16i8, 2, 0, 0,
+ 9, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHWDrr), 0,
+ 1, MVT::v8i16, 2, 0, 0,
+ 9, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHDQrr), 0,
+ 1, MVT::v4i32, 2, 0, 0,
+ 0,
+ 37,
+ OPC_CheckPredicate, 105,
+ OPC_SwitchType , 9, MVT::v8i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLBWrr), 0,
+ 1, MVT::v8i8, 2, 0, 0,
+ 9, MVT::v4i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLWDrr), 0,
+ 1, MVT::v4i16, 2, 0, 0,
+ 9, MVT::v2i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLDQrr), 0,
+ 1, MVT::v2i32, 2, 0, 0,
+ 0,
+ 37,
+ OPC_CheckPredicate, 106,
+ OPC_SwitchType , 9, MVT::v8i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHBWrr), 0,
+ 1, MVT::v8i8, 2, 0, 0,
+ 9, MVT::v4i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHWDrr), 0,
+ 1, MVT::v4i16, 2, 0, 0,
+ 9, MVT::v2i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHDQrr), 0,
+ 1, MVT::v2i32, 2, 0, 0,
+ 0,
+ 0,
+ 0,
+ 39,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_CheckPredicate, 107,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v1i64,
+ OPC_CheckType, MVT::i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 103,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 44|128,1,
OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_RecordChild0,
+ OPC_Scope, 42,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 6,
+ OPC_CheckPredicate, 89,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFDri), 0,
+ 1, MVT::v4i32, 2, 1, 2,
+ 14,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
+ 1, MVT::v4i32, 2, 1, 2,
+ 0,
+ 124,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 108,
+ OPC_SwitchType , 15, MVT::v1i64,
+ OPC_CheckPatternPredicate, 11,
+ OPC_EmitNodeXForm, 6, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rr), 0,
+ 1, MVT::v1i64, 3, 2, 1, 3,
+ 15, MVT::v2i32,
+ OPC_CheckPatternPredicate, 11,
+ OPC_EmitNodeXForm, 6, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rr), 0,
+ 1, MVT::v2i32, 3, 2, 1, 3,
+ 15, MVT::v4i16,
+ OPC_CheckPatternPredicate, 11,
+ OPC_EmitNodeXForm, 6, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rr), 0,
+ 1, MVT::v4i16, 3, 2, 1, 3,
+ 15, MVT::v8i8,
+ OPC_CheckPatternPredicate, 11,
+ OPC_EmitNodeXForm, 6, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rr), 0,
+ 1, MVT::v8i8, 3, 2, 1, 3,
+ 15, MVT::v4i32,
+ OPC_CheckPatternPredicate, 11,
+ OPC_EmitNodeXForm, 6, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
+ 1, MVT::v4i32, 3, 2, 1, 3,
+ 15, MVT::v8i16,
+ OPC_CheckPatternPredicate, 11,
+ OPC_EmitNodeXForm, 6, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
+ 1, MVT::v8i16, 3, 2, 1, 3,
+ 15, MVT::v16i8,
+ OPC_CheckPatternPredicate, 11,
+ OPC_EmitNodeXForm, 6, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
+ 1, MVT::v16i8, 3, 2, 1, 3,
+ 0,
+ 0,
+ 39|128,2,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 119, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
+ OPC_CheckType, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 21,
+ OPC_CheckPredicate, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPredicate, 98,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVHPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPredicate, 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPredicate, 98,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 111, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 21,
+ OPC_CheckPredicate, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPredicate, 98,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVHPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPredicate, 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPredicate, 98,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 51, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 11,
+ OPC_SwitchType , 17, MVT::v4f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 17, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
0,
- 124,
+ 52,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 28,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckPredicate, 90,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 45|128,2,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_Scope, 46,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 19, MVT::v4f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v2f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 46,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 19, MVT::v4f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v2f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 46,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 19, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 46,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 19, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPSYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPDYrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 46,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 19, MVT::v4f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 46,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 19, MVT::v4f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 19, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 0,
+ 11|128,2,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 55, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::i64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckPredicate, 90,
+ OPC_CheckType, MVT::v2f64,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 1|128,1, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_SwitchType , 20, MVT::v2f64,
+ OPC_CheckPredicate, 90,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 86, MVT::v4f32,
+ OPC_Scope, 20,
+ OPC_CheckPredicate, 87,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSHDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckPredicate, 88,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSLDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckPredicate, 87,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckPredicate, 88,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 0,
+ 69, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
+ OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckPredicate, 90,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 0,
+ 35,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 11,
+ OPC_CheckPredicate, 90,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 0,
+ 11,
+ OPC_CheckPredicate, 99,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 0,
+ 0,
+ 60|128,1,
+ OPC_RecordNode,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 95,
+ OPC_SwitchType , 54, MVT::v4f32,
+ OPC_Scope, 25,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 1, 4, 5, 6, 7, 8, 9,
+ 25,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 1, 4, 5, 6, 7, 8, 9,
+ 0,
+ 25, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPSYrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8f32, 7, 1, 4, 5, 6, 7, 8, 9,
+ 54, MVT::v2f64,
+ OPC_Scope, 25,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 1, 4, 5, 6, 7, 8, 9,
+ 25,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 1, 4, 5, 6, 7, 8, 9,
+ 0,
+ 25, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/3,
+ OPC_EmitMergeInputChains, 1, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPDYrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f64, 7, 1, 4, 5, 6, 7, 8, 9,
+ 0,
+ 62,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 13,
+ OPC_CheckPredicate, 98,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVLHPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 13,
+ OPC_CheckPredicate, 100,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVHLPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 13,
+ OPC_CheckPredicate, 98,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 13,
+ OPC_CheckPredicate, 100,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 46,
+ OPC_RecordNode,
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 101,
+ OPC_CheckPatternPredicate, 25,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
+ 1, MVT::v4f32, 2, 1, 2,
+ 16,
+ OPC_CheckPredicate, 102,
+ OPC_CheckPatternPredicate, 25,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
+ 1, MVT::v4f32, 2, 1, 2,
+ 0,
+ 90|128,2,
+ OPC_RecordChild0,
+ OPC_Scope, 82,
+ OPC_RecordChild1,
+ OPC_Scope, 50,
+ OPC_CheckPredicate, 103,
+ OPC_SwitchType , 21, MVT::v4f32,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f32, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
+ 1, MVT::v4f32, 2, 0, 3,
+ 21, MVT::v2f64,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f64, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v2f64, 2, 0, 3,
+ 0,
+ 27,
+ OPC_CheckPredicate, 11,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f64, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v4f32, 2, 0, 3,
+ 0,
+ 68,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
+ OPC_SwitchType , 30, MVT::v2f64,
+ OPC_Scope, 13,
+ OPC_CheckPredicate, 104,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 0,
+ 13,
+ OPC_CheckPredicate, 12,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 0,
+ 0,
+ 26, MVT::v4f32,
+ OPC_Scope, 11,
+ OPC_CheckPredicate, 101,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 0,
+ 11,
+ OPC_CheckPredicate, 102,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 0,
+ 0,
+ 0,
+ 61|128,1,
OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 28,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_Scope, 30,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 11, MVT::v4f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11, MVT::v2f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 30,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 11, MVT::v4f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11, MVT::v2f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 30,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 11, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 11, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
+ 0,
+ 30,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 11, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPSYrr), 0,
+ 1, MVT::v8f32, 2, 0, 1,
+ 11, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPDYrr), 0,
+ 1, MVT::v4f64, 2, 0, 1,
+ 0,
+ 30,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 11, MVT::v4f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 30,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 11, MVT::v4f32,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 0,
+ 0,
+ 40|128,1,
+ OPC_RecordNode,
+ OPC_RecordChild0,
+ OPC_Scope, 24,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
+ OPC_CheckPredicate, 89,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
+ 1, MVT::v4f32, 2, 1, 2,
+ 20,
OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ OPC_CheckPredicate, 108,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckPatternPredicate, 11,
+ OPC_EmitNodeXForm, 6, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
+ 1, MVT::v4f32, 3, 2, 1, 3,
+ 117,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_SwitchType , 70, MVT::v8i16,
+ OPC_Scope, 16,
+ OPC_CheckPredicate, 91,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitNodeXForm, 3, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFHWri), 0,
+ 1, MVT::v8i16, 2, 1, 2,
+ 16,
+ OPC_CheckPredicate, 92,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitNodeXForm, 4, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFLWri), 0,
+ 1, MVT::v8i16, 2, 1, 2,
+ 16,
+ OPC_CheckPredicate, 91,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitNodeXForm, 3, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFHWri), 0,
+ 1, MVT::v8i16, 2, 1, 2,
+ 16,
+ OPC_CheckPredicate, 92,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitNodeXForm, 4, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFLWri), 0,
+ 1, MVT::v8i16, 2, 1, 2,
+ 0,
+ 17, MVT::v2i64,
+ OPC_CheckPredicate, 89,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
+ 1, MVT::v2i64, 3, 1, 1, 2,
+ 16, MVT::v4i16,
+ OPC_CheckPredicate, 93,
+ OPC_CheckPatternPredicate, 9,
+ OPC_EmitNodeXForm, 5, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSHUFWri), 0,
+ 1, MVT::v4i16, 2, 1, 2,
+ 0,
0,
- 47|128,1,
+ 104|128,1,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 118,
+ OPC_Scope, 56,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 11, MVT::v16i8,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKLBWrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11, MVT::v8i16,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKLWDrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11, MVT::v4i32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKLDQrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11, MVT::v2i64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKLQDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 11, MVT::v16i8,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKHBWrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11, MVT::v8i16,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKHWDrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11, MVT::v4i32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKHDQrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11, MVT::v2i64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPUNPCKHQDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckPredicate, 94,
+ OPC_SwitchType , 11, MVT::v16i8,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLBWrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11, MVT::v8i16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLWDrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLDQrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLQDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 56,
+ OPC_CheckPredicate, 12,
+ OPC_SwitchType , 11, MVT::v16i8,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHBWrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 11, MVT::v8i16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHWDrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 11, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHDQrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 11, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHQDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 0,
+ 0,
+ 41,
+ OPC_RecordNode,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 95,
+ OPC_SwitchType , 15, MVT::v4i32,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
+ 1, MVT::v4i32, 3, 1, 2, 3,
+ 15, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
+ 1, MVT::v2i64, 3, 1, 2, 3,
+ 0,
+ 9|128,2,
+ OPC_RecordChild0,
+ OPC_Scope, 91,
+ OPC_RecordChild1,
+ OPC_Scope, 43,
+ OPC_CheckPredicate, 96,
+ OPC_SwitchType , 11, MVT::v8i8,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHBWrr), 0,
+ 1, MVT::v8i8, 2, 0, 1,
+ 11, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHWDrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
+ 11, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKHDQrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
+ 0,
+ 43,
+ OPC_CheckPredicate, 97,
+ OPC_SwitchType , 11, MVT::v8i8,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLBWrr), 0,
+ 1, MVT::v8i8, 2, 0, 1,
+ 11, MVT::v4i16,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLWDrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
+ 11, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLDQrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
+ 0,
+ 0,
+ 40|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 47,
- OPC_CheckPredicate, 11,
+ OPC_SwitchOpcode , 71, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_RecordChild0,
+ OPC_Scope, 33,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 103,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 33,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_MoveParent,
+ OPC_CheckPredicate, 103,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 11,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 0,
+ 0,
+ 87, TARGET_OPCODE(ISD::UNDEF),
OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16ri8), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32ri8), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64ri8), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
+ OPC_SwitchType , 54, MVT::v4f32,
+ OPC_Scope, 12,
+ OPC_CheckPredicate, 87,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSHDUPrr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 12,
+ OPC_CheckPredicate, 88,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVSLDUPrr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 12,
+ OPC_CheckPredicate, 87,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 12,
+ OPC_CheckPredicate, 88,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrr), 0,
+ 1, MVT::v4f32, 1, 0,
+ 0,
+ 26, MVT::v2f64,
+ OPC_CheckPredicate, 90,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDDUPrr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 14,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrr), 0,
+ 1, MVT::v2f64, 1, 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 53|128,1,
+ OPC_RecordNode,
+ OPC_RecordChild0,
+ OPC_Scope, 42,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::UNDEF),
+ OPC_MoveParent,
+ OPC_CheckPredicate, 89,
+ OPC_SwitchType , 13, MVT::v4f32,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
+ 1, MVT::v4f32, 3, 1, 1, 2,
+ 15, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
+ 1, MVT::v2f64, 3, 1, 1, 2,
+ 0,
+ 4|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 110,
+ OPC_CheckPredicate, 95,
+ OPC_SwitchType , 34, MVT::v4f32,
+ OPC_Scope, 15,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPSrri), 0,
+ 1, MVT::v4f32, 3, 1, 2, 3,
+ 15,
+ OPC_CheckPatternPredicate, 1,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
+ 1, MVT::v4f32, 3, 1, 2, 3,
+ 0,
+ 15, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPSYrri), 0,
+ 1, MVT::v8f32, 3, 1, 2, 3,
+ 34, MVT::v2f64,
+ OPC_Scope, 15,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPDrri), 0,
+ 1, MVT::v2f64, 3, 1, 2, 3,
+ 15,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
+ 1, MVT::v2f64, 3, 1, 2, 3,
+ 0,
+ 15, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPDYrri), 0,
+ 1, MVT::v4f64, 3, 1, 2, 3,
0,
17,
- OPC_CheckPredicate, 12,
+ OPC_CheckPredicate, 11,
+ OPC_CheckType, MVT::v4f32,
+ OPC_EmitNodeXForm, 2, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
+ 1, MVT::v4f32, 3, 2, 1, 3,
+ 0,
+ 0,
+ 0,
+ 94|128,6, TARGET_OPCODE(X86ISD::VZEXT_MOVL),
+ OPC_Scope, 61|128,3,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 93|128,1, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_Scope, 11|128,1,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_SwitchType , 74, MVT::i32,
+ OPC_Scope, 46,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 24,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 48, MVT::i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVZQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 0,
+ 77,
+ OPC_RecordChild0,
+ OPC_Scope, 43,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_MoveParent,
+ OPC_SwitchType , 24, MVT::v4i32,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVZDI2PDIrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 0,
+ 10, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVZDI2PDIrr), 0,
+ 1, MVT::v2i32, 1, 0,
+ 0,
+ 29,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVZQI2PQIrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
+ 0,
+ 0,
+ 123, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_SwitchType , 40, MVT::v4f32,
OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64ri32), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 45,
OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8ri), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 2,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16ri), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32ri), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
+ OPC_SwitchType , 16, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 16, MVT::v2i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 20, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 20, MVT::v4i32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZPQILo2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 20, MVT::v1i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 87, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 16, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 57, MVT::v2i64,
+ OPC_Scope, 16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVZPQILo2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZPQILo2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
0,
0,
- 12,
- OPC_CheckType, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR8rr), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR16rr), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR32rr), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::OR64rr), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 1,
0,
- 0,
- 42|128,3, X86ISD::XOR,
- OPC_Scope, 121,
+ 77,
OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_SwitchType , 24, MVT::v2i64,
+ OPC_Scope, 10,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVZPQILo2PQIrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZPQILo2PQIrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
+ 16, MVT::v2i32,
+ OPC_EmitNode, TARGET_OPCODE(X86::MMX_V_SET0), 0,
+ 1, MVT::v2i32, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PUNPCKLDQrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
+ 28, MVT::v4i32,
+ OPC_EmitNode, TARGET_OPCODE(X86::V_SET0PI), 0,
+ 1, MVT::v4i32, 0,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f32, 2, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
+ 1, MVT::v4i32, 2, 1, 3,
+ 0,
+ 31|128,2,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 16|128,1, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_Scope, 93,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_SwitchType , 36, MVT::f32,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i32, 0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 3, 7, 8, 9,
+ 36, MVT::f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 3, 7, 8, 9,
+ 0,
+ 47,
+ OPC_RecordChild0,
+ OPC_Scope, 21,
+ OPC_CheckChild0Type, MVT::f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_EmitNode, TARGET_OPCODE(X86::V_SET0PS), 0,
+ 1, MVT::v2f64, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v2f64, 2, 1, 0,
+ 21,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_EmitNode, TARGET_OPCODE(X86::V_SET0PS), 0,
+ 1, MVT::v4f32, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
+ 1, MVT::v4f32, 2, 1, 0,
+ 0,
+ 0,
+ 51, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v4f32,
OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 6,
OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
+ OPC_CheckType, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 3, 7, 8, 9,
+ 78, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_SwitchType , 32, MVT::v4f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i32, 0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 3, 7, 8, 9,
+ 32, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 3, 7, 8, 9,
+ 0,
+ 0,
+ 45,
+ OPC_RecordChild0,
+ OPC_SwitchType , 28, MVT::v4f32,
+ OPC_EmitNode, TARGET_OPCODE(X86::V_SET0PS), 0,
+ 1, MVT::v4f32, 0,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f32, 2, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
+ 1, MVT::v4f32, 2, 1, 3,
+ 10, MVT::v2f64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZPQILo2PQIrr), 0,
+ 1, MVT::v2f64, 1, 0,
0,
- 124,
+ 0,
+ 56, TARGET_OPCODE(X86ISD::VZEXT_LOAD),
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_SwitchType , 16, MVT::v2i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZQI2PQIrm), 0|OPFL_Chain,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 32, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain,
+ 1, MVT::v2f64, 3, 7, 8, 9,
+ 0,
+ 63|128,1, TARGET_OPCODE(X86ISD::PSHUFD),
+ OPC_Scope, 115,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 28,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_SwitchType , 57, MVT::v2i64,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 6,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFDmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 3, 4, 5, 6, 7, 8,
+ 19,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 3, 4, 5, 6, 7, 8,
+ 0,
+ 32, MVT::v4f32,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 28,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 3, 4, 5, 6, 7, 8,
0,
- 47|128,1,
+ 72,
OPC_RecordChild0,
OPC_RecordChild1,
- OPC_Scope, 118,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 47,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16ri8), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32ri8), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64ri8), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 0,
- 17,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 28, MVT::v4i32,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64ri32), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 45,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8ri), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 2,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16ri), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32ri), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFDri), 0,
+ 1, MVT::v4i32, 2, 0, 2,
+ 11,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
+ 1, MVT::v4i32, 2, 0, 2,
+ 0,
+ 28, MVT::v4f32,
+ OPC_Scope, 13,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSHUFDri), 0,
+ 1, MVT::v4f32, 2, 0, 2,
+ 11,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFDri), 0,
+ 1, MVT::v4f32, 2, 0, 2,
0,
- 12,
- OPC_CheckType, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR8rr), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR16rr), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR32rr), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XOR64rr), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 1,
0,
0,
- 42|128,3, X86ISD::AND,
- OPC_Scope, 121,
- OPC_RecordChild0,
+ 30|128,2, TARGET_OPCODE(X86ISD::SHUFPS),
+ OPC_RecordChild0,
+ OPC_Scope, 76|128,1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_Scope, 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_SwitchOpcode , 76, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
- OPC_CheckPredicate, 6,
OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 25,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 20,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 118, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 27,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_SwitchType , 46, MVT::v2f64,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 20,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 46, MVT::v4f32,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 20,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 0,
0,
- 124,
+ 76,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 30, MVT::v4i32,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPSrri), 0,
+ 1, MVT::v4i32, 3, 0, 1, 3,
+ 12,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
+ 1, MVT::v4i32, 3, 0, 1, 3,
+ 0,
+ 30, MVT::v4f32,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPSrri), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 12,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPSrri), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 0,
+ 0,
+ 0,
+ 81, TARGET_OPCODE(X86ISD::PSHUFHW),
+ OPC_Scope, 54,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_Scope, 28,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFHWmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 3, 4, 5, 6, 7, 8,
+ 23,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFHWri), 0,
+ 1, MVT::v8i16, 2, 0, 2,
+ 0,
+ 81, TARGET_OPCODE(X86ISD::PSHUFLW),
+ OPC_Scope, 54,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFLWmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 3, 4, 5, 6, 7, 8,
+ 23,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i16,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFLWri), 0,
+ 1, MVT::v8i16, 2, 0, 2,
+ 0,
+ 78, TARGET_OPCODE(X86ISD::MMX_PINSRW),
+ OPC_RecordChild0,
+ OPC_Scope, 51,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::ANY_EXTEND),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 7,
+ OPC_CheckType, MVT::i16,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PINSRWrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 9,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PINSRWrri), 0,
+ 1, MVT::v4i16, 3, 0, 1, 3,
+ 0,
+ 88, TARGET_OPCODE(X86ISD::MOVQ2DQ),
+ OPC_Scope, 75,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 41, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckType, MVT::i32,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i8, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 6,
+ OPC_CheckType, MVT::v2i32,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i16,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i16, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_RecordChild1,
- OPC_CheckType, MVT::i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i32, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 28,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 24, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
OPC_RecordChild1,
- OPC_CheckType, MVT::i64,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 2, MVT::i64, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
0,
- 47|128,1,
+ 9,
OPC_RecordChild0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
+ 98, TARGET_OPCODE(ISD::PREFETCH),
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_MoveChild, 3,
+ OPC_Scope, 20,
+ OPC_CheckInteger, 3,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PREFETCHT0), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckInteger, 2,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PREFETCHT1), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckInteger, 1,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PREFETCHT2), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
+ 20,
+ OPC_CheckInteger, 0,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PREFETCHNTA), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
+ 0,
+ 114|128,1, TARGET_OPCODE(X86ISD::TC_RETURN),
+ OPC_RecordNode,
+ OPC_CaptureFlagInput,
+ OPC_Scope, 73,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 24,
+ OPC_CheckPatternPredicate, 26,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNmi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs|OPFL_Variadic2,
+ 1, MVT::i32, 6, 4, 5, 6, 7, 8, 9,
+ 24,
+ OPC_CheckPatternPredicate, 4,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNmi64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_MemRefs|OPFL_Variadic2,
+ 1, MVT::i64, 6, 4, 5, 6, 7, 8, 9,
+ 0,
+ 34|128,1,
OPC_RecordChild1,
- OPC_Scope, 118,
+ OPC_Scope, 110,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_Scope, 47,
- OPC_CheckPredicate, 11,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16ri8), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32ri8), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64ri8), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
+ OPC_SwitchOpcode , 50, TARGET_OPCODE(ISD::TargetGlobalAddress),
+ OPC_SwitchType , 22, MVT::i32,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNdi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 1, MVT::i32, 2, 1, 3,
+ 22, MVT::i64,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNdi64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 1, MVT::i64, 2, 1, 3,
0,
- 17,
- OPC_CheckPredicate, 12,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64ri32), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 2,
- 45,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8ri), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 2,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16ri), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 2,
- 12, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32ri), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 2,
+ 50, TARGET_OPCODE(ISD::TargetExternalSymbol),
+ OPC_SwitchType , 22, MVT::i32,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNdi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 1, MVT::i32, 2, 1, 3,
+ 22, MVT::i64,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNdi64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 1, MVT::i64, 2, 1, 3,
0,
0,
- 12,
- OPC_CheckType, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND8rr), 0,
- 2, MVT::i8, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND16rr), 0,
- 2, MVT::i16, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND32rr), 0,
- 2, MVT::i32, MVT::i32, 2, 0, 1,
- 12,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::AND64rr), 0,
- 2, MVT::i64, MVT::i32, 2, 0, 1,
+ 23,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNri), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 1, MVT::i32, 2, 1, 3,
+ 23,
+ OPC_CheckChild1Type, MVT::i64,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNri64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
+ 1, MVT::i64, 2, 1, 3,
0,
0,
- 80|128,1, ISD::FP_TO_SINT,
- OPC_Scope, 113,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ 74, TARGET_OPCODE(X86ISD::CMPPS),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v4f32,
+ OPC_Scope, 45,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_SwitchType , 47, MVT::f64,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSD2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 20, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSD2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
- 47, MVT::f32,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::i64,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSS2SI64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 5, 2, 3, 4, 5, 6,
- 20, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSS2SIrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 5, 2, 3, 4, 5, 6,
- 0,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPSrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPSrri), 0,
+ 1, MVT::v4i32, 3, 0, 1, 3,
+ 0,
+ 74, TARGET_OPCODE(X86ISD::CMPPD),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::v2f64,
+ OPC_Scope, 45,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPDrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CMPPDrri), 0,
+ 1, MVT::v2i64, 3, 0, 1, 3,
+ 0,
+ 116, TARGET_OPCODE(X86ISD::PINSRW),
+ OPC_RecordChild0,
+ OPC_Scope, 71,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 32,
+ OPC_CheckPredicate, 35,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPINSRWrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRWrmi), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 7, 0, 4, 5, 6, 7, 8, 9,
0,
- 91,
- OPC_RecordChild0,
- OPC_Scope, 28,
- OPC_CheckChild0Type, MVT::f64,
- OPC_SwitchType , 10, MVT::i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSD2SI64rr), 0,
- 1, MVT::i64, 1, 0,
- 10, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSD2SIrr), 0,
- 1, MVT::i32, 1, 0,
- 0,
- 28,
- OPC_CheckChild0Type, MVT::f32,
- OPC_SwitchType , 10, MVT::i64,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSS2SI64rr), 0,
- 1, MVT::i64, 1, 0,
- 10, MVT::i32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTTSS2SIrr), 0,
- 1, MVT::i32, 1, 0,
- 0,
- 14,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPS2DQrr), 0,
- 1, MVT::v4i32, 1, 0,
+ 40,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPINSRWrri), 0,
+ 1, MVT::v8i16, 3, 0, 1, 3,
14,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTTPD2PIrr), 0,
- 1, MVT::v2i32, 1, 0,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRWrri), 0,
+ 1, MVT::v8i16, 3, 0, 1, 3,
0,
0,
- 82|128,1, ISD::SINT_TO_FP,
- OPC_Scope, 115,
+ 60, TARGET_OPCODE(X86ISD::PSHUFB),
+ OPC_RecordChild0,
+ OPC_Scope, 43,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 11,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrm128), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckPatternPredicate, 11,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFBrr128), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
+ 116, TARGET_OPCODE(X86ISD::PINSRB),
+ OPC_RecordChild0,
+ OPC_Scope, 71,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_SwitchType , 51, MVT::i64,
+ OPC_CheckPredicate, 32,
+ OPC_CheckPredicate, 34,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPINSRBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 7, 0, 4, 5, 6, 7, 8, 9,
+ 0,
+ 40,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPINSRBrr), 0,
+ 1, MVT::v16i8, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRBrr), 0,
+ 1, MVT::v16i8, 3, 0, 1, 3,
+ 0,
+ 0,
+ 93|128,1, TARGET_OPCODE(ISD::INSERT_VECTOR_ELT),
+ OPC_RecordChild0,
+ OPC_Scope, 9|128,1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 60,
OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
OPC_MoveParent,
- OPC_SwitchType , 20, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SD64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 20, MVT::f32,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 22,
OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SS64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPINSRDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 7, 0, 4, 5, 6, 7, 8, 9,
0,
- 49, MVT::i32,
+ 62,
OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_SwitchType , 20, MVT::f32,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 22,
OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 20, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPINSRQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 7, 0, 4, 5, 6, 7, 8, 9,
0,
0,
- 91,
- OPC_RecordChild0,
- OPC_Scope, 28,
- OPC_CheckChild0Type, MVT::i64,
- OPC_SwitchType , 10, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SD64rr), 0,
- 1, MVT::f64, 1, 0,
- 10, MVT::f32,
+ 78,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_SwitchType , 32, MVT::v4i32,
+ OPC_Scope, 14,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SS64rr), 0,
- 1, MVT::f32, 1, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPINSRDrr), 0,
+ 1, MVT::v4i32, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRDrr), 0,
+ 1, MVT::v4i32, 3, 0, 1, 3,
0,
- 28,
- OPC_CheckChild0Type, MVT::i32,
- OPC_SwitchType , 10, MVT::f32,
+ 32, MVT::v2i64,
+ OPC_Scope, 14,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SSrr), 0,
- 1, MVT::f32, 1, 0,
- 10, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSI2SDrr), 0,
- 1, MVT::f64, 1, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPINSRQrr), 0,
+ 1, MVT::v2i64, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PINSRQrr), 0,
+ 1, MVT::v2i64, 3, 0, 1, 3,
0,
- 14,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTDQ2PSrr), 0,
- 1, MVT::v4f32, 1, 0,
- 14,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_CVTPI2PDrr), 0,
- 1, MVT::v2f64, 1, 0,
0,
0,
- 61|128,5, ISD::BIT_CONVERT,
- OPC_Scope, 126,
+ 81|128,2, TARGET_OPCODE(X86ISD::MOVDDUP),
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 44|128,1, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_SwitchOpcode , 63, ISD::LOAD,
+ OPC_SwitchOpcode , 100, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_SwitchType , 27, MVT::i64,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_SwitchType , 42, MVT::v2i64,
OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64toSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 25, MVT::i32,
- OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 42, MVT::v2f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 0,
0,
- 55, ISD::EXTRACT_VECTOR_ELT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
+ 62, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_CheckType, MVT::i64,
OPC_MoveParent,
- OPC_SwitchType , 10, MVT::v1i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVDQ2Qrr), 0,
- 1, MVT::v1i64, 1, 0,
- 8, MVT::v2i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVDQ2Qrr), 0,
- 1, MVT::v2i32, 1, 0,
- 8, MVT::v4i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVDQ2Qrr), 0,
- 1, MVT::v4i16, 1, 0,
- 8, MVT::v8i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVDQ2Qrr), 0,
- 1, MVT::v8i8, 1, 0,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
0,
0,
- 58|128,4,
- OPC_RecordChild0,
- OPC_Scope, 39,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_SwitchType , 5, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v4f32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 39,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_SwitchType , 5, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v4f32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 39,
- OPC_CheckChild0Type, MVT::v16i8,
- OPC_SwitchType , 5, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v4f32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 39,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_SwitchType , 5, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v4f32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 39,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_SwitchType , 5, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 39,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_SwitchType , 5, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v8i16,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v16i8,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v4f32,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 5, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CompleteMatch, 1, 0,
-
- 0,
- 44,
- OPC_CheckChild0Type, MVT::v1i64,
- OPC_SwitchType , 3, MVT::v8i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f32,
- OPC_CompleteMatch, 1, 0,
-
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64from64rr), 0,
- 1, MVT::i64, 1, 0,
- 8, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2FR64rr), 0,
- 1, MVT::f64, 1, 0,
- 0,
- 44,
- OPC_CheckChild0Type, MVT::v2i32,
- OPC_SwitchType , 3, MVT::v8i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v1i64,
- OPC_CompleteMatch, 1, 0,
-
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64from64rr), 0,
- 1, MVT::i64, 1, 0,
- 8, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2FR64rr), 0,
- 1, MVT::f64, 1, 0,
- 0,
- 34,
- OPC_CheckChild0Type, MVT::v2f32,
- OPC_SwitchType , 3, MVT::v8i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v4i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v1i64,
- OPC_CompleteMatch, 1, 0,
-
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64from64rr), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 44,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_SwitchType , 3, MVT::v8i8,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v1i64,
- OPC_CompleteMatch, 1, 0,
-
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64from64rr), 0,
- 1, MVT::i64, 1, 0,
- 8, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2FR64rr), 0,
- 1, MVT::f64, 1, 0,
- 0,
- 44,
- OPC_CheckChild0Type, MVT::v8i8,
- OPC_SwitchType , 3, MVT::v4i16,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2i32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v2f32,
- OPC_CompleteMatch, 1, 0,
-
- 3, MVT::v1i64,
- OPC_CompleteMatch, 1, 0,
-
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64from64rr), 0,
- 1, MVT::i64, 1, 0,
- 8, MVT::f64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2FR64rr), 0,
- 1, MVT::f64, 1, 0,
+ 92, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_SwitchType , 38, MVT::v2i64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
0,
- 66,
- OPC_CheckChild0Type, MVT::i64,
- OPC_SwitchType , 10, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64toSDrr), 0,
- 1, MVT::f64, 1, 0,
- 8, MVT::v1i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64to64rr), 0,
- 1, MVT::v1i64, 1, 0,
- 8, MVT::v2i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64to64rr), 0,
- 1, MVT::v2i32, 1, 0,
- 8, MVT::v2f32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64to64rr), 0,
- 1, MVT::v2f32, 1, 0,
- 8, MVT::v4i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64to64rr), 0,
- 1, MVT::v4i16, 1, 0,
- 8, MVT::v8i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64to64rr), 0,
- 1, MVT::v8i8, 1, 0,
+ 38, MVT::v2f64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
0,
- 14,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckType, MVT::i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDto64rr), 0,
- 1, MVT::i64, 1, 0,
- 14,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2SSrr), 0,
- 1, MVT::f32, 1, 0,
- 14,
- OPC_CheckChild0Type, MVT::f32,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSS2DIrr), 0,
- 1, MVT::i32, 1, 0,
+ 0,
+ 59, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
0,
0,
- 109, X86ISD::UCOMI,
+ 60, TARGET_OPCODE(X86ISD::PUNPCKLBW),
OPC_RecordChild0,
- OPC_Scope, 52,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_Scope, 34,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_Scope, 43,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v16i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLBWrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
+ 60, TARGET_OPCODE(X86ISD::PUNPCKLWD),
+ OPC_RecordChild0,
+ OPC_Scope, 43,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLWDrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
+ 60, TARGET_OPCODE(X86ISD::PUNPCKLDQ),
+ OPC_RecordChild0,
+ OPC_Scope, 43,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLDQrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 60, TARGET_OPCODE(X86ISD::PUNPCKHBW),
+ OPC_RecordChild0,
+ OPC_Scope, 43,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v16i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHBWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHBWrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 0,
+ 60, TARGET_OPCODE(X86ISD::PUNPCKHWD),
+ OPC_RecordChild0,
+ OPC_Scope, 43,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHWDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHWDrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 0,
+ 60, TARGET_OPCODE(X86ISD::PUNPCKHDQ),
+ OPC_RecordChild0,
+ OPC_Scope, 43,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHDQrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 0,
+ 115, TARGET_OPCODE(X86ISD::MOVSS),
+ OPC_RecordChild0,
+ OPC_Scope, 61,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 36, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_UCOMISSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_UCOMISSrr), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 15, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
0,
- 52,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_Scope, 34,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ 49,
+ OPC_RecordChild1,
+ OPC_SwitchType , 21, MVT::v4i32,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f32, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
+ 1, MVT::v4i32, 2, 0, 3,
+ 21, MVT::v4f32,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f32, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSSrr), 0,
+ 1, MVT::v4f32, 2, 0, 3,
+ 0,
+ 0,
+ 102, TARGET_OPCODE(X86ISD::MOVSHDUP),
+ OPC_Scope, 75,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 37, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_UCOMISDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 28, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_UCOMISDrr), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 23,
+ OPC_RecordChild0,
+ OPC_SwitchType , 8, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 8, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSHDUPrr), 0,
+ 1, MVT::v4f32, 1, 0,
0,
0,
- 109, X86ISD::COMI,
- OPC_RecordChild0,
- OPC_Scope, 52,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_Scope, 34,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ 102, TARGET_OPCODE(X86ISD::MOVSLDUP),
+ OPC_Scope, 75,
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 37, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_COMISSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_COMISSrr), 0,
- 1, MVT::i32, 2, 0, 1,
- 0,
- 52,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_Scope, 34,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 28, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
- OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_COMISDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
- 12,
- OPC_RecordChild1,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_COMISDrr), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 23,
+ OPC_RecordChild0,
+ OPC_SwitchType , 8, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrr), 0,
+ 1, MVT::v4i32, 1, 0,
+ 8, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSLDUPrr), 0,
+ 1, MVT::v4f32, 1, 0,
0,
0,
- 35|128,1, X86ISD::FAND,
- OPC_Scope, 64,
+ 61|128,1, TARGET_OPCODE(X86ISD::PCMPEQB),
+ OPC_Scope, 42,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
+ 42,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 30,
+ OPC_CheckType, MVT::v8i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 6, 2, 3, 4, 5, 6, 7,
+ 36,
OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_SwitchType , 11, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPSrr), 0,
- 1, MVT::f32, 2, 0, 1,
- 11, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsANDPDrr), 0,
- 1, MVT::f64, 2, 0, 1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v16i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 36,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v16i8,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 2, 3, 4, 5, 6, 7,
+ 26,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 9, MVT::v8i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQBrr), 0,
+ 1, MVT::v8i8, 2, 0, 1,
0,
0,
- 35|128,1, X86ISD::FOR,
- OPC_Scope, 64,
+ 61|128,1, TARGET_OPCODE(X86ISD::PCMPEQW),
+ OPC_Scope, 42,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
+ 42,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 30,
+ OPC_CheckType, MVT::v4i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 2, 3, 4, 5, 6, 7,
+ 36,
OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_SwitchType , 11, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPSrr), 0,
- 1, MVT::f32, 2, 0, 1,
- 11, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsORPDrr), 0,
- 1, MVT::f64, 2, 0, 1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
+ 36,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v8i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 26,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 9, MVT::v4i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQWrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
0,
0,
- 35|128,1, X86ISD::FXOR,
- OPC_Scope, 64,
+ 61|128,1, TARGET_OPCODE(X86ISD::PCMPEQD),
+ OPC_Scope, 42,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 0,
- 64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
+ 42,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
+ OPC_MoveParent,
OPC_MoveParent,
OPC_RecordChild1,
- OPC_SwitchType , 21, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 30,
+ OPC_CheckType, MVT::v2i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 2, 3, 4, 5, 6, 7,
+ 36,
OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_SwitchType , 11, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPSrr), 0,
- 1, MVT::f32, 2, 0, 1,
- 11, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsXORPDrr), 0,
- 1, MVT::f64, 2, 0, 1,
- 0,
- 0,
- 50|128,1, X86ISD::FMAX,
- OPC_RecordChild0,
- OPC_Scope, 118,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 24,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 36,
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 2, 3, 4, 5, 6, 7,
+ 26,
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 9, MVT::v2i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPEQDrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
+ 0,
+ 0,
+ 106, TARGET_OPCODE(X86ISD::PCMPGTB),
+ OPC_RecordChild0,
+ OPC_Scope, 76,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 36, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i8, 6, 0, 3, 4, 5, 6, 7,
+ 30, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
+ OPC_CheckType, MVT::v16i8,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 24,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTBrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v16i8, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::v16i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTBrr), 0,
+ 1, MVT::v16i8, 2, 0, 1,
+ 9, MVT::v8i8,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTBrr), 0,
+ 1, MVT::v8i8, 2, 0, 1,
+ 0,
+ 0,
+ 106, TARGET_OPCODE(X86ISD::PCMPGTW),
+ OPC_RecordChild0,
+ OPC_Scope, 76,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 36, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i16, 6, 0, 3, 4, 5, 6, 7,
+ 30, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckType, MVT::v8i16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTWrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v8i16, 6, 0, 3, 4, 5, 6, 7,
0,
- 55,
+ 25,
OPC_RecordChild1,
- OPC_SwitchType , 11, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSSrr), 0,
- 1, MVT::f32, 2, 0, 1,
- 11, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPSrr), 0,
- 1, MVT::v4f32, 2, 0, 1,
- 11, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXSDrr), 0,
- 1, MVT::f64, 2, 0, 1,
- 11, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MAXPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_SwitchType , 9, MVT::v8i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTWrr), 0,
+ 1, MVT::v8i16, 2, 0, 1,
+ 9, MVT::v4i16,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTWrr), 0,
+ 1, MVT::v4i16, 2, 0, 1,
0,
0,
- 50|128,1, X86ISD::FMIN,
+ 106, TARGET_OPCODE(X86ISD::PCMPGTD),
OPC_RecordChild0,
- OPC_Scope, 118,
+ OPC_Scope, 76,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_CheckFoldableChainNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 24,
+ OPC_SwitchOpcode , 36, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckType, MVT::v1i64,
OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 0,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 6, 0, 3, 4, 5, 6, 7,
+ 30, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
+ OPC_CheckType, MVT::v4i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
- 24,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 25,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTDrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 9, MVT::v2i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PCMPGTDrr), 0,
+ 1, MVT::v2i32, 2, 0, 1,
+ 0,
+ 0,
+ 123, TARGET_OPCODE(X86ISD::MOVLHPS),
+ OPC_RecordChild0,
+ OPC_Scope, 82,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_SwitchOpcode , 26, TARGET_OPCODE(X86ISD::VZEXT_LOAD),
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 6, 0, 3, 4, 5, 6, 7,
- 26,
- OPC_CheckPredicate, 23,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPSrm), 0|OPFL_Chain,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 41, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
OPC_MoveParent,
OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
0,
- 55,
+ 36,
OPC_RecordChild1,
- OPC_SwitchType , 11, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSSrr), 0,
- 1, MVT::f32, 2, 0, 1,
- 11, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPSrr), 0,
+ OPC_SwitchType , 9, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 9, MVT::v2i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
+ 9, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLHPSrr), 0,
1, MVT::v4f32, 2, 0, 1,
- 11, MVT::f64,
+ 0,
+ 0,
+ 106, TARGET_OPCODE(X86ISD::LCMPXCHG_DAG),
+ OPC_RecordNode,
+ OPC_CaptureFlagInput,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_Scope, 24,
+ OPC_CheckChild2Type, MVT::i32,
+ OPC_MoveChild, 3,
+ OPC_CheckInteger, 4,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
+ 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
+ 24,
+ OPC_CheckChild2Type, MVT::i16,
+ OPC_MoveChild, 3,
+ OPC_CheckInteger, 2,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG16), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
+ 1, MVT::i16, 6, 3, 4, 5, 6, 7, 2,
+ 24,
+ OPC_CheckChild2Type, MVT::i8,
+ OPC_MoveChild, 3,
+ OPC_CheckInteger, 1,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG8), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
+ 1, MVT::i8, 6, 3, 4, 5, 6, 7, 2,
+ 24,
+ OPC_CheckChild2Type, MVT::i64,
+ OPC_MoveChild, 3,
+ OPC_CheckInteger, 8,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
+ 1, MVT::i64, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 27|128,8, TARGET_OPCODE(ISD::INTRINSIC_VOID),
+ OPC_RecordNode,
+ OPC_MoveChild, 1,
+ OPC_Scope, 22,
+ OPC_CheckInteger, 112|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSYmr), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 22,
+ OPC_CheckInteger, 111|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPDYmr), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 46,
+ OPC_CheckInteger, 58|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 18,
OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINSDrr), 0,
- 1, MVT::f64, 2, 0, 1,
- 11, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 46,
+ OPC_CheckInteger, 64|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPDmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPDmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 46,
+ OPC_CheckInteger, 48|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTPSmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 18,
OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MINPDrr), 0,
- 1, MVT::v2f64, 2, 0, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTPSmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
0,
- 0,
- 78|128,1, ISD::FSQRT,
- OPC_Scope, 113,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 19,
+ 46,
+ OPC_CheckInteger, 9|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSSm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTPDmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTPDmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 46,
+ OPC_CheckInteger, 7|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_Scope, 18,
OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPSm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
- 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTDQmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTDQmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 22,
+ OPC_CheckInteger, 98|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTDQYmr), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 22,
+ OPC_CheckInteger, 99|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTPDYmr), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 22,
+ OPC_CheckInteger, 100|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTPSYmr), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 24,
+ OPC_CheckInteger, 8|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTImr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 43,
+ OPC_CheckInteger, 41|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VLDMXCSR), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
+ 17,
OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSDm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LDMXCSR), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
+ 0,
+ 43,
+ OPC_CheckInteger, 57|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSTMXCSR), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
+ 17,
OPC_CheckPatternPredicate, 1,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPDm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::STMXCSR), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
0,
- 89,
- OPC_RecordChild0,
- OPC_SwitchType , 24, MVT::f32,
- OPC_Scope, 10,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRT_Fp32), 0,
- 1, MVT::f32, 1, 0,
- 10,
+ 46,
+ OPC_CheckInteger, 63|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDQUmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDQUmr_Int), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 22,
+ OPC_CheckInteger, 110|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDQUYmr), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 46,
+ OPC_CheckInteger, 62|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVLQ128mr), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLQ128mr), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 0,
+ 22,
+ OPC_CheckInteger, 99|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CLFLUSH), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
+ 26,
+ OPC_CheckInteger, 90|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVPSmr), 0|OPFL_Chain,
+ 0, 7, 4, 5, 6, 7, 8, 2, 3,
+ 26,
+ OPC_CheckInteger, 91|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVPSYmr), 0|OPFL_Chain,
+ 0, 7, 4, 5, 6, 7, 8, 2, 3,
+ 26,
+ OPC_CheckInteger, 88|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVPDmr), 0|OPFL_Chain,
+ 0, 7, 4, 5, 6, 7, 8, 2, 3,
+ 26,
+ OPC_CheckInteger, 89|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVPDYmr), 0|OPFL_Chain,
+ 0, 7, 4, 5, 6, 7, 8, 2, 3,
+ 24,
+ OPC_CheckInteger, 27|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVNTQmr), 0|OPFL_Chain,
+ 0, 6, 3, 4, 5, 6, 7, 2,
+ 37,
+ OPC_CheckInteger, 21|128,4,
+ OPC_MoveParent,
+ OPC_Scope, 12,
+ OPC_MoveChild, 2,
+ OPC_CheckInteger, 3,
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INT3), 0|OPFL_Chain,
+ 0, 0,
+ 17,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INT), 0|OPFL_Chain,
+ 0, 1, 2,
+ 0,
+ 13,
+ OPC_CheckInteger, 54|128,6,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SFENCE), 0|OPFL_Chain,
+ 0, 0,
+ 79,
+ OPC_CheckInteger, 0|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_Scope, 34,
+ OPC_CheckChild4Type, MVT::i32,
+ OPC_Scope, 14,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSSr), 0,
- 1, MVT::f32, 1, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitCopyToReg, 3, X86::EDI,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVDQU), 0|OPFL_Chain|OPFL_FlagInput,
+ 0, 2, 1, 2,
+ 14,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitCopyToReg, 3, X86::EDI,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MASKMOVDQU), 0|OPFL_Chain|OPFL_FlagInput,
+ 0, 2, 1, 2,
0,
- 24, MVT::f64,
- OPC_Scope, 10,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRT_Fp64), 0,
- 1, MVT::f64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTSDr), 0,
- 1, MVT::f64, 1, 0,
+ 34,
+ OPC_CheckChild4Type, MVT::i64,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitCopyToReg, 3, X86::RDI,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVDQU64), 0|OPFL_Chain|OPFL_FlagInput,
+ 0, 2, 1, 2,
+ 14,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitCopyToReg, 3, X86::RDI,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MASKMOVDQU64), 0|OPFL_Chain|OPFL_FlagInput,
+ 0, 2, 1, 2,
0,
- 8, MVT::f80,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRT_Fp80), 0,
- 1, MVT::f80, 1, 0,
- 10, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPSr), 0,
- 1, MVT::v4f32, 1, 0,
- 10, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SQRTPDr), 0,
- 1, MVT::v2f64, 1, 0,
+ 0,
+ 13,
+ OPC_CheckInteger, 125|128,4,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LFENCE), 0|OPFL_Chain,
+ 0, 0,
+ 13,
+ OPC_CheckInteger, 3|128,5,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MFENCE), 0|OPFL_Chain,
+ 0, 0,
+ 27,
+ OPC_CheckInteger, 79|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckChild2Type, MVT::i32,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_CheckPatternPredicate, 14,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitCopyToReg, 1, X86::EAX,
+ OPC_EmitCopyToReg, 2, X86::ECX,
+ OPC_EmitCopyToReg, 3, X86::EDX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MONITOR), 0|OPFL_Chain|OPFL_FlagInput,
+ 0, 0,
+ 21,
+ OPC_CheckInteger, 80|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 14,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitCopyToReg, 1, X86::ECX,
+ OPC_EmitCopyToReg, 2, X86::EAX,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MWAIT), 0|OPFL_Chain|OPFL_FlagInput,
+ 0, 0,
+ 13,
+ OPC_CheckInteger, 19|128,4,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VZEROALL), 0|OPFL_Chain,
+ 0, 0,
+ 13,
+ OPC_CheckInteger, 20|128,4,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VZEROUPPER), 0|OPFL_Chain,
+ 0, 0,
+ 13,
+ OPC_CheckInteger, 24|128,4,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 9,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_EMMS), 0|OPFL_Chain,
+ 0, 0,
+ 13,
+ OPC_CheckInteger, 25|128,4,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 9,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_FEMMS), 0|OPFL_Chain,
+ 0, 0,
+ 43,
+ OPC_CheckInteger, 26|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_RecordChild4,
+ OPC_Scope, 16,
+ OPC_CheckChild4Type, MVT::i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitCopyToReg, 3, X86::EDI,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MASKMOVQ), 0|OPFL_Chain|OPFL_FlagInput,
+ 0, 2, 1, 2,
+ 16,
+ OPC_CheckChild4Type, MVT::i64,
+ OPC_CheckPatternPredicate, 27,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitCopyToReg, 3, X86::RDI,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MASKMOVQ64), 0|OPFL_Chain|OPFL_FlagInput,
+ 0, 2, 1, 2,
0,
0,
- 94, X86ISD::FRSQRT,
- OPC_Scope, 63,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 19,
+ 35|128,4, TARGET_OPCODE(ISD::INTRINSIC_W_CHAIN),
+ OPC_RecordNode,
+ OPC_MoveChild, 1,
+ OPC_Scope, 45,
+ OPC_CheckInteger, 126|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTSSm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckPredicate, 23,
- OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDQUrm_Int), 0|OPFL_Chain,
+ 1, MVT::v16i8, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDQUrm_Int), 0|OPFL_Chain,
+ 1, MVT::v16i8, 5, 2, 3, 4, 5, 6,
+ 0,
+ 21,
+ OPC_CheckInteger, 81|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDQUYrm), 0|OPFL_Chain,
+ 1, MVT::v32i8, 5, 2, 3, 4, 5, 6,
+ 45,
+ OPC_CheckInteger, 78|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_Scope, 18,
OPC_CheckPatternPredicate, 0,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTPSm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VLDDQUrm), 0|OPFL_Chain,
+ 1, MVT::v16i8, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 14,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LDDQUrm), 0|OPFL_Chain,
+ 1, MVT::v16i8, 5, 2, 3, 4, 5, 6,
0,
- 27,
- OPC_RecordChild0,
- OPC_SwitchType , 10, MVT::f32,
+ 23,
+ OPC_CheckInteger, 80|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VLDDQUYrm), 0|OPFL_Chain,
+ 1, MVT::v32i8, 5, 2, 3, 4, 5, 6,
+ 45,
+ OPC_CheckInteger, 89|128,5,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_Scope, 18,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTSSr), 0,
- 1, MVT::f32, 1, 0,
- 10, MVT::v4f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVNTDQArm), 0|OPFL_Chain,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVNTDQArm), 0|OPFL_Chain,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 21,
+ OPC_CheckInteger, 83|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSYrm), 0|OPFL_Chain,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 21,
+ OPC_CheckInteger, 82|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPDYrm), 0|OPFL_Chain,
+ 1, MVT::v4f64, 5, 2, 3, 4, 5, 6,
+ 45,
+ OPC_CheckInteger, 42|128,6,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_Scope, 18,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RSQRTPSr), 0,
- 1, MVT::v4f32, 1, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPSrm_Int), 0|OPFL_Chain,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
0,
+ 45,
+ OPC_CheckInteger, 127|128,4,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVUPDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVUPDrm_Int), 0|OPFL_Chain,
+ 1, MVT::v2f64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 23,
+ OPC_CheckInteger, 116|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBROADCASTSS), 0|OPFL_Chain,
+ 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ 23,
+ OPC_CheckInteger, 117|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBROADCASTSSY), 0|OPFL_Chain,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
+ 23,
+ OPC_CheckInteger, 113|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBROADCASTSD), 0|OPFL_Chain,
+ 1, MVT::v4f64, 5, 2, 3, 4, 5, 6,
+ 23,
+ OPC_CheckInteger, 114|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBROADCASTF128), 0|OPFL_Chain,
+ 1, MVT::v4f64, 5, 2, 3, 4, 5, 6,
+ 25,
+ OPC_CheckInteger, 86|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVPSrm), 0|OPFL_Chain,
+ 1, MVT::v4f32, 6, 2, 3, 4, 5, 6, 7,
+ 25,
+ OPC_CheckInteger, 87|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVPSYrm), 0|OPFL_Chain,
+ 1, MVT::v8f32, 6, 2, 3, 4, 5, 6, 7,
+ 25,
+ OPC_CheckInteger, 84|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVPDrm), 0|OPFL_Chain,
+ 1, MVT::v2f64, 6, 2, 3, 4, 5, 6, 7,
+ 25,
+ OPC_CheckInteger, 85|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_RecordChild3,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMASKMOVPDYrm), 0|OPFL_Chain,
+ 1, MVT::v4f64, 6, 2, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckInteger, 115|128,3,
+ OPC_MoveParent,
+ OPC_RecordChild2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VBROADCASTF128), 0|OPFL_Chain,
+ 1, MVT::v8f32, 5, 2, 3, 4, 5, 6,
0,
- 94, X86ISD::FRCP,
- OPC_Scope, 63,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ 99, TARGET_OPCODE(ISD::BRIND),
+ OPC_RecordNode,
+ OPC_Scope, 65,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_Scope, 23,
+ OPC_SwitchType , 23, MVT::i32,
+ OPC_CheckPredicate, 8,
OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 19,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPSSm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 25,
- OPC_CheckPredicate, 23,
+ OPC_CheckPatternPredicate, 3,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::JMP32m), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 5, 3, 4, 5, 6, 7,
+ 25, MVT::i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
OPC_MoveParent,
- OPC_CheckType, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPPSm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::v4f32, 5, 2, 3, 4, 5, 6,
+ OPC_CheckPatternPredicate, 4,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::JMP64m), 0|OPFL_Chain|OPFL_MemRefs,
+ 0, 5, 3, 4, 5, 6, 7,
0,
- 27,
- OPC_RecordChild0,
- OPC_SwitchType , 10, MVT::f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPSSr), 0,
- 1, MVT::f32, 1, 0,
- 10, MVT::v4f32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::RCPPSr), 0,
- 1, MVT::v4f32, 1, 0,
+ 29,
+ OPC_RecordChild1,
+ OPC_Scope, 12,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_CheckPatternPredicate, 3,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::JMP32r), 0|OPFL_Chain,
+ 0, 1, 1,
+ 12,
+ OPC_CheckChild1Type, MVT::i64,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::JMP64r), 0|OPFL_Chain,
+ 0, 1, 1,
0,
0,
- 103, ISD::FP_ROUND,
- OPC_Scope, 38,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ 1|128,2, TARGET_OPCODE(X86ISD::CALL),
+ OPC_RecordNode,
+ OPC_CaptureFlagInput,
+ OPC_Scope, 90,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckPatternPredicate, 12,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSD2SSrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 61,
- OPC_RecordChild0,
- OPC_Scope, 28,
- OPC_CheckChild0Type, MVT::f64,
- OPC_CheckType, MVT::f32,
- OPC_Scope, 10,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV_Fp6432), 0,
- 1, MVT::f32, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSD2SSrr), 0,
- 1, MVT::f32, 1, 0,
+ OPC_SwitchType , 22, MVT::i32,
+ OPC_CheckPredicate, 8,
+ OPC_MoveParent,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL32m), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs|OPFL_Variadic1,
+ 1, MVT::i32, 5, 3, 4, 5, 6, 7,
+ 51, MVT::i64,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_MoveParent,
+ OPC_Scope, 21,
+ OPC_CheckPatternPredicate, 28,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL64m), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs|OPFL_Variadic1,
+ 1, MVT::i64, 5, 3, 4, 5, 6, 7,
+ 21,
+ OPC_CheckPatternPredicate, 29,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains, 2, 0, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::WINCALL64m), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_MemRefs|OPFL_Variadic1,
+ 1, MVT::i64, 5, 3, 4, 5, 6, 7,
0,
+ 0,
+ 32|128,1,
+ OPC_RecordChild1,
+ OPC_Scope, 115,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 43, TARGET_OPCODE(ISD::TargetGlobalAddress),
+ OPC_SwitchType , 10, MVT::i32,
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CALLpcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i32, 1, 1,
+ 27, MVT::i64,
+ OPC_MoveParent,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 28,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL64pcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i64, 1, 1,
+ 11,
+ OPC_CheckPatternPredicate, 29,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::WINCALL64pcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i64, 1, 1,
+ 0,
+ 0,
+ 43, TARGET_OPCODE(ISD::TargetExternalSymbol),
+ OPC_SwitchType , 10, MVT::i32,
+ OPC_MoveParent,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CALLpcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i32, 1, 1,
+ 27, MVT::i64,
+ OPC_MoveParent,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 28,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL64pcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i64, 1, 1,
+ 11,
+ OPC_CheckPatternPredicate, 29,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::WINCALL64pcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i64, 1, 1,
+ 0,
+ 0,
+ 16, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_CheckPatternPredicate, 30,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CALLpcrel32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i32, 1, 2,
+ 0,
+ 11,
+ OPC_CheckChild1Type, MVT::i32,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL32r), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i32, 1, 1,
28,
- OPC_CheckChild0Type, MVT::f80,
- OPC_SwitchType , 10, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV_Fp8032), 0,
- 1, MVT::f32, 1, 0,
- 10, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV_Fp8064), 0,
- 1, MVT::f64, 1, 0,
+ OPC_CheckChild1Type, MVT::i64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 28,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::CALL64r), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i64, 1, 1,
+ 11,
+ OPC_CheckPatternPredicate, 29,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::WINCALL64r), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput|OPFL_Variadic1,
+ 1, MVT::i64, 1, 1,
0,
0,
0,
- 103, ISD::FP_EXTEND,
- OPC_Scope, 38,
+ 30|128,3, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_Scope, 28|128,2,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 9,
- OPC_CheckType, MVT::f32,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckPatternPredicate, 1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSS2SDrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 61,
+ OPC_SwitchOpcode , 84|128,1, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_Scope, 47,
+ OPC_CheckPredicate, 8,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4i32,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2PDIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 5, 2, 3, 4, 5, 6,
+ 0,
+ 49,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::i64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 18,
+ OPC_CheckPatternPredicate, 2,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVQI2PQIrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 5, 2, 3, 4, 5, 6,
+ 0,
+ 25,
+ OPC_CheckPredicate, 8,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64rm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i32, 5, 2, 3, 4, 5, 6,
+ 80,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_SwitchType , 35, MVT::f32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i32, 0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVSSrm), 0|OPFL_Chain,
+ 1, MVT::f32, 5, 2, 3, 4, 5, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 3, 7, 8, 9,
+ 35, MVT::f64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitInteger, MVT::i64, 0,
+ OPC_EmitNode, TARGET_OPCODE(X86::MOVSDrm), 0|OPFL_Chain,
+ 1, MVT::f64, 5, 2, 3, 4, 5, 6,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 3, 7, 8, 9,
+ 0,
+ 0,
+ 61, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_RecordChild0,
+ OPC_CheckType, MVT::i64,
+ OPC_Scope, 13,
+ OPC_CheckChild0Type, MVT::v8i8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 13,
+ OPC_CheckChild0Type, MVT::v4i16,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 13,
+ OPC_CheckChild0Type, MVT::v2i32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 13,
+ OPC_CheckChild0Type, MVT::v1i64,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVQ2DQrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 0,
+ 0,
+ 125,
OPC_RecordChild0,
- OPC_Scope, 42,
- OPC_CheckChild0Type, MVT::f32,
- OPC_SwitchType , 24, MVT::f64,
+ OPC_Scope, 28,
+ OPC_CheckChild0Type, MVT::i64,
+ OPC_SwitchType , 10, MVT::v2i64,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64toPQIrr), 0,
+ 1, MVT::v2i64, 1, 0,
+ 10, MVT::v1i64,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64rrv164), 0,
+ 1, MVT::v1i64, 1, 0,
+ 0,
+ 42,
+ OPC_CheckChild0Type, MVT::i32,
+ OPC_SwitchType , 24, MVT::v4i32,
OPC_Scope, 10,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV_Fp3264), 0,
- 1, MVT::f64, 1, 0,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VMOVDI2PDIrr), 0,
+ 1, MVT::v4i32, 1, 0,
10,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CVTSS2SDrr), 0,
- 1, MVT::f64, 1, 0,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVDI2PDIrr), 0,
+ 1, MVT::v4i32, 1, 0,
0,
- 10, MVT::f80,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV_Fp3280), 0,
- 1, MVT::f80, 1, 0,
+ 10, MVT::v2i32,
+ OPC_CheckPatternPredicate, 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_MOVD64rr), 0,
+ 1, MVT::v2i32, 1, 0,
0,
- 14,
+ 24,
+ OPC_CheckChild0Type, MVT::f32,
+ OPC_CheckType, MVT::v4f32,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
+ 1, MVT::v4f32, 0,
+ OPC_EmitInteger, MVT::i32, X86::sub_ss,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
+ 1, MVT::v4f32, 3, 1, 0, 2,
+ 24,
OPC_CheckChild0Type, MVT::f64,
- OPC_CheckType, MVT::f80,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV_Fp6480), 0,
- 1, MVT::f80, 1, 0,
+ OPC_CheckType, MVT::v2f64,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::IMPLICIT_DEF), 0,
+ 1, MVT::v2f64, 0,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::INSERT_SUBREG), 0,
+ 1, MVT::v2f64, 3, 1, 0, 2,
0,
0,
- 92, X86ISD::PCMPEQQ,
- OPC_Scope, 37,
+ 90, TARGET_OPCODE(X86ISD::PCMPEQQ),
+ OPC_Scope, 36,
OPC_RecordChild0,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
+ OPC_EmitMergeInputChains1_1,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
- 37,
+ 36,
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
OPC_RecordChild1,
OPC_CheckType, MVT::v2i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 6, 2, 3, 4, 5, 6, 7,
13,
@@ -29956,2755 +35732,1310 @@ SDNode *SelectCode(SDNode *N) {
OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPEQQrr), 0,
1, MVT::v2i64, 2, 0, 1,
0,
- 51, X86ISD::PTEST,
+ 52, TARGET_OPCODE(X86ISD::PCMPGTQ),
OPC_RecordChild0,
- OPC_Scope, 34,
+ OPC_Scope, 35,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
+ OPC_CheckType, MVT::v2i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PTESTrm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 0, 3, 4, 5, 6, 7,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild1,
- OPC_CheckPatternPredicate, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PTESTrr), 0,
- 1, MVT::i32, 2, 0, 1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
0,
- 53, X86ISD::PCMPGTQ,
+ 52, TARGET_OPCODE(X86ISD::PUNPCKLQDQ),
OPC_RecordChild0,
- OPC_Scope, 36,
+ OPC_Scope, 35,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::LOAD,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
OPC_RecordMemRef,
OPC_RecordNode,
OPC_CheckFoldableChainNode,
OPC_RecordChild1,
OPC_CheckPredicate, 2,
- OPC_CheckPredicate, 8,
- OPC_CheckPredicate, 23,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
OPC_CheckType, MVT::v2i64,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLQDQrm), 0|OPFL_Chain|OPFL_MemRefs,
1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
12,
OPC_RecordChild1,
OPC_CheckType, MVT::v2i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PCMPGTQrr), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKLQDQrr), 0,
1, MVT::v2i64, 2, 0, 1,
0,
- 98, ISD::ATOMIC_SWAP,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 72,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XCHG32rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::i16,
- OPC_CheckPredicate, 73,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XCHG16rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::i8,
- OPC_CheckPredicate, 74,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XCHG8rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i8, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::i64,
- OPC_CheckPredicate, 75,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::XCHG64rm), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 98, ISD::ATOMIC_LOAD_ADD,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 76,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LXADD32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::i16,
- OPC_CheckPredicate, 77,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LXADD16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::i8,
- OPC_CheckPredicate, 78,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LXADD8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i8, 6, 2, 3, 4, 5, 6, 7,
- 21, MVT::i64,
- OPC_CheckPredicate, 79,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LXADD64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 2, 3, 4, 5, 6, 7,
- 0,
- 98, ISD::ATOMIC_LOAD_AND,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 80,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMAND32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i16,
- OPC_CheckPredicate, 81,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMAND16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i8,
- OPC_CheckPredicate, 82,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMAND8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i8, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i64,
- OPC_CheckPredicate, 83,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMAND64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 3, 4, 5, 6, 7, 2,
- 0,
- 98, ISD::ATOMIC_LOAD_OR,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 84,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMOR32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i16,
- OPC_CheckPredicate, 85,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMOR16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i8,
- OPC_CheckPredicate, 86,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMOR8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i8, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i64,
- OPC_CheckPredicate, 87,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMOR64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 3, 4, 5, 6, 7, 2,
- 0,
- 98, ISD::ATOMIC_LOAD_XOR,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 88,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMXOR32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i16,
- OPC_CheckPredicate, 89,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMXOR16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i8,
- OPC_CheckPredicate, 90,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMXOR8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i8, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i64,
- OPC_CheckPredicate, 91,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMXOR64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 3, 4, 5, 6, 7, 2,
- 0,
- 98, ISD::ATOMIC_LOAD_NAND,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 92,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMNAND32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i16,
- OPC_CheckPredicate, 93,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMNAND16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i8,
- OPC_CheckPredicate, 94,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMNAND8), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i8, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i64,
- OPC_CheckPredicate, 95,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMNAND64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 3, 4, 5, 6, 7, 2,
- 0,
- 75, ISD::ATOMIC_LOAD_MIN,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 96,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMIN32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i16,
- OPC_CheckPredicate, 97,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMIN16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i64,
- OPC_CheckPredicate, 98,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMIN64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 3, 4, 5, 6, 7, 2,
- 0,
- 75, ISD::ATOMIC_LOAD_MAX,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 99,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMAX32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i16,
- OPC_CheckPredicate, 100,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMAX16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i64,
- OPC_CheckPredicate, 101,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMMAX64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 3, 4, 5, 6, 7, 2,
- 0,
- 75, ISD::ATOMIC_LOAD_UMIN,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 102,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMIN32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i16,
- OPC_CheckPredicate, 103,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMIN16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i64,
- OPC_CheckPredicate, 104,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMIN64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 3, 4, 5, 6, 7, 2,
- 0,
- 75, ISD::ATOMIC_LOAD_UMAX,
- OPC_RecordMemRef,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_SwitchType , 21, MVT::i32,
- OPC_CheckPredicate, 105,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMAX32), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i32, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i16,
- OPC_CheckPredicate, 106,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMAX16), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i16, 6, 3, 4, 5, 6, 7, 2,
- 21, MVT::i64,
- OPC_CheckPredicate, 107,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ATOMUMAX64), 0|OPFL_Chain|OPFL_MemRefs,
- 1, MVT::i64, 6, 3, 4, 5, 6, 7, 2,
+ 52, TARGET_OPCODE(X86ISD::PUNPCKHQDQ),
+ OPC_RecordChild0,
+ OPC_Scope, 35,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHQDQrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 12,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2i64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PUNPCKHQDQrr), 0,
+ 1, MVT::v2i64, 2, 0, 1,
0,
- 88|128,1, X86ISD::FILD,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_Scope, 69,
- OPC_CheckValueType, MVT::i16,
+ 106, TARGET_OPCODE(X86ISD::MOVLPS),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 49, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
OPC_MoveParent,
- OPC_SwitchType , 20, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp16m32), 0|OPFL_Chain,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 20, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp16m64), 0|OPFL_Chain,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 18, MVT::f80,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp16m80), 0|OPFL_Chain,
- 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ OPC_SwitchType , 17, MVT::v4i32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4i32, 6, 0, 3, 4, 5, 6, 7,
+ 17, MVT::v4f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
0,
- 69,
- OPC_CheckValueType, MVT::i32,
+ 46, TARGET_OPCODE(ISD::BIT_CONVERT),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
OPC_MoveParent,
- OPC_SwitchType , 20, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp32m32), 0|OPFL_Chain,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 20, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp32m64), 0|OPFL_Chain,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 18, MVT::f80,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp32m80), 0|OPFL_Chain,
- 1, MVT::f80, 5, 2, 3, 4, 5, 6,
- 0,
- 69,
- OPC_CheckValueType, MVT::i64,
+ OPC_CheckType, MVT::v2f64,
OPC_MoveParent,
- OPC_SwitchType , 20, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp64m32), 0|OPFL_Chain,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 20, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp64m64), 0|OPFL_Chain,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 18, MVT::f80,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp64m80), 0|OPFL_Chain,
- 1, MVT::f80, 5, 2, 3, 4, 5, 6,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 98, TARGET_OPCODE(X86ISD::MOVLPD),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 49, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_MoveParent,
+ OPC_SwitchType , 17, MVT::v2i64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2i64, 6, 0, 3, 4, 5, 6, 7,
+ 17, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
0,
+ 38, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVLPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
0,
- 14|128,1, X86ISD::FP_TO_INT16_IN_MEM,
+ 30, TARGET_OPCODE(X86ISD::PSHUFHW_LD),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFHWmi), 0|OPFL_Chain,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 30, TARGET_OPCODE(X86ISD::PSHUFLW_LD),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v8i16,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSHUFLWmi), 0|OPFL_Chain,
+ 1, MVT::v8i16, 6, 2, 3, 4, 5, 6, 7,
+ 2|128,1, TARGET_OPCODE(X86ISD::FP_TO_INT16_IN_MEM),
OPC_RecordNode,
OPC_RecordChild1,
- OPC_Scope, 45,
+ OPC_Scope, 41,
OPC_CheckChild1Type, MVT::f32,
OPC_RecordChild2,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 9,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ISTT_Fp16m32), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::FP32_TO_INT16_IN_MEM), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 45,
+ 41,
OPC_CheckChild1Type, MVT::f64,
OPC_RecordChild2,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 9,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ISTT_Fp16m64), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::FP64_TO_INT16_IN_MEM), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 45,
+ 41,
OPC_CheckChild1Type, MVT::f80,
OPC_RecordChild2,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 9,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ISTT_Fp16m80), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::FP80_TO_INT16_IN_MEM), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
0,
0,
- 14|128,1, X86ISD::FP_TO_INT32_IN_MEM,
+ 2|128,1, TARGET_OPCODE(X86ISD::FP_TO_INT32_IN_MEM),
OPC_RecordNode,
OPC_RecordChild1,
- OPC_Scope, 45,
+ OPC_Scope, 41,
OPC_CheckChild1Type, MVT::f32,
OPC_RecordChild2,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 9,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ISTT_Fp32m32), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::FP32_TO_INT32_IN_MEM), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 45,
+ 41,
OPC_CheckChild1Type, MVT::f64,
OPC_RecordChild2,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 9,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ISTT_Fp32m64), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::FP64_TO_INT32_IN_MEM), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 45,
+ 41,
OPC_CheckChild1Type, MVT::f80,
OPC_RecordChild2,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 9,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ISTT_Fp32m80), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::FP80_TO_INT32_IN_MEM), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
0,
0,
- 14|128,1, X86ISD::FP_TO_INT64_IN_MEM,
+ 2|128,1, TARGET_OPCODE(X86ISD::FP_TO_INT64_IN_MEM),
OPC_RecordNode,
OPC_RecordChild1,
- OPC_Scope, 45,
+ OPC_Scope, 41,
OPC_CheckChild1Type, MVT::f32,
OPC_RecordChild2,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 9,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ISTT_Fp64m32), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::FP32_TO_INT64_IN_MEM), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 45,
+ 41,
OPC_CheckChild1Type, MVT::f64,
OPC_RecordChild2,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 9,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ISTT_Fp64m64), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::FP64_TO_INT64_IN_MEM), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 45,
+ 41,
OPC_CheckChild1Type, MVT::f80,
OPC_RecordChild2,
- OPC_Scope, 20,
- OPC_CheckPatternPredicate, 9,
+ OPC_Scope, 18,
+ OPC_CheckPatternPredicate, 14,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ISTT_Fp64m80), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
- 18,
+ 16,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::FP80_TO_INT64_IN_MEM), 0|OPFL_Chain,
0, 6, 3, 4, 5, 6, 7, 1,
0,
0,
- 78, X86ISD::FLD,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_Scope, 23,
- OPC_CheckValueType, MVT::f32,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp32m), 0|OPFL_Chain,
- 1, MVT::f32, 5, 2, 3, 4, 5, 6,
- 23,
- OPC_CheckValueType, MVT::f64,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp64m), 0|OPFL_Chain,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 23,
- OPC_CheckValueType, MVT::f80,
- OPC_MoveParent,
- OPC_CheckType, MVT::f80,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp80m), 0|OPFL_Chain,
- 1, MVT::f80, 5, 2, 3, 4, 5, 6,
- 0,
- 30|128,1, X86ISD::FST,
+ 18|128,1, TARGET_OPCODE(X86ISD::FST),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_RecordChild1,
- OPC_Scope, 26,
+ OPC_Scope, 24,
OPC_CheckChild1Type, MVT::f32,
OPC_RecordChild2,
OPC_MoveChild, 3,
OPC_CheckValueType, MVT::f32,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp32m), 0|OPFL_Chain|OPFL_FlagInput,
0, 6, 3, 4, 5, 6, 7, 1,
- 51,
+ 47,
OPC_CheckChild1Type, MVT::f64,
OPC_RecordChild2,
OPC_MoveChild, 3,
- OPC_Scope, 21,
+ OPC_Scope, 19,
OPC_CheckValueType, MVT::f32,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp64m32), 0|OPFL_Chain|OPFL_FlagInput,
0, 6, 3, 4, 5, 6, 7, 1,
- 21,
+ 19,
OPC_CheckValueType, MVT::f64,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp64m), 0|OPFL_Chain|OPFL_FlagInput,
0, 6, 3, 4, 5, 6, 7, 1,
0,
- 73,
+ 67,
OPC_CheckChild1Type, MVT::f80,
OPC_RecordChild2,
OPC_MoveChild, 3,
- OPC_Scope, 21,
+ OPC_Scope, 19,
OPC_CheckValueType, MVT::f32,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp80m32), 0|OPFL_Chain|OPFL_FlagInput,
0, 6, 3, 4, 5, 6, 7, 1,
- 21,
+ 19,
OPC_CheckValueType, MVT::f64,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_Fp80m64), 0|OPFL_Chain|OPFL_FlagInput,
0, 6, 3, 4, 5, 6, 7, 1,
- 21,
+ 19,
OPC_CheckValueType, MVT::f80,
OPC_MoveParent,
OPC_CheckComplexPat, /*CP*/0, /*#*/2,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ST_FpP80m), 0|OPFL_Chain|OPFL_FlagInput,
0, 6, 3, 4, 5, 6, 7, 1,
0,
0,
- 27, X86ISD::FILD_FLAG,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_MoveChild, 2,
- OPC_CheckValueType, MVT::i64,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ILD_Fp64m64), 0|OPFL_Chain|OPFL_FlagOutput,
- 1, MVT::f64, 5, 2, 3, 4, 5, 6,
- 20, X86ISD::LCMPXCHG8_DAG,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_RecordChild1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG8B), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 5, 2, 3, 4, 5, 6,
- 19, X86ISD::FNSTCW16m,
- OPC_RecordNode,
- OPC_RecordChild1,
- OPC_CheckComplexPat, /*CP*/0, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FNSTCW16m), 0|OPFL_Chain,
- 0, 5, 2, 3, 4, 5, 6,
- 47, X86ISD::TLSADDR,
+ 47, TARGET_OPCODE(X86ISD::TLSADDR),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_RecordChild1,
OPC_Scope, 20,
OPC_CheckChild1Type, MVT::i32,
- OPC_CheckPatternPredicate, 2,
+ OPC_CheckPatternPredicate, 3,
OPC_CheckComplexPat, /*CP*/5, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::TLS_addr32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 4, 2, 3, 4, 5,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
20,
OPC_CheckChild1Type, MVT::i64,
- OPC_CheckPatternPredicate, 3,
+ OPC_CheckPatternPredicate, 4,
OPC_CheckComplexPat, /*CP*/6, /*#*/1,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::TLS_addr64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 4, 2, 3, 4, 5,
+ 1, MVT::i64, 5, 2, 3, 4, 5, 6,
0,
- 57, X86ISD::MUL_IMM,
+ 19, TARGET_OPCODE(X86ISD::LCMPXCHG8_DAG),
OPC_RecordNode,
- OPC_SwitchType , 36, MVT::i32,
- OPC_Scope, 16,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 16,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 14, MVT::i64,
- OPC_CheckComplexPat, /*CP*/4, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
- 1, MVT::i64, 4, 1, 2, 3, 4,
- 0,
- 53|128,2, ISD::SHL,
- OPC_Scope, 57,
- OPC_RecordNode,
- OPC_SwitchType , 36, MVT::i32,
- OPC_Scope, 16,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 16,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 14, MVT::i64,
- OPC_CheckComplexPat, /*CP*/4, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
- 1, MVT::i64, 4, 1, 2, 3, 4,
- 0,
- 119|128,1,
- OPC_RecordChild0,
- OPC_Scope, 120,
- OPC_MoveChild, 1,
- OPC_CheckType, MVT::i8,
- OPC_Scope, 45,
- OPC_CheckAndImm, 31,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8rCL), 0|OPFL_FlagInput,
- 1, MVT::i8, 1, 0,
- 11, MVT::i16,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16rCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 1, 0,
- 11, MVT::i32,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32rCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 1, 0,
- 0,
- 17,
- OPC_CheckAndImm, 63,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64rCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 1, 0,
- 49,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
- OPC_SwitchType , 9, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD8rr), 0,
- 1, MVT::i8, 2, 0, 0,
- 9, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD16rr), 0,
- 1, MVT::i16, 2, 0, 0,
- 9, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD32rr), 0,
- 1, MVT::i32, 2, 0, 0,
- 9, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADD64rr), 0,
- 1, MVT::i64, 2, 0, 0,
- 0,
- 0,
- 122,
- OPC_RecordChild1,
- OPC_Scope, 61,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8ri), 0,
- 1, MVT::i8, 2, 0, 2,
- 11, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16ri), 0,
- 1, MVT::i16, 2, 0, 2,
- 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64ri), 0,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 56,
- OPC_CheckChild1Type, MVT::i8,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL8rCL), 0|OPFL_FlagInput,
- 1, MVT::i8, 1, 0,
- 11, MVT::i16,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL16rCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 1, 0,
- 11, MVT::i32,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL32rCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 1, 0,
- 11, MVT::i64,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHL64rCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 1, 0,
- 0,
- 0,
- 0,
- 0,
- 57, ISD::FrameIndex,
+ OPC_CaptureFlagInput,
+ OPC_RecordChild1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LCMPXCHG8B), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
+ 1, MVT::i32, 5, 2, 3, 4, 5, 6,
+ 17, TARGET_OPCODE(X86ISD::FNSTCW16m),
OPC_RecordNode,
- OPC_SwitchType , 36, MVT::i32,
- OPC_Scope, 16,
- OPC_CheckPatternPredicate, 2,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 16,
- OPC_CheckPatternPredicate, 3,
- OPC_CheckComplexPat, /*CP*/3, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64_32r), 0,
- 1, MVT::i32, 4, 1, 2, 3, 4,
- 0,
- 14, MVT::i64,
- OPC_CheckComplexPat, /*CP*/4, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
- 1, MVT::i64, 4, 1, 2, 3, 4,
+ OPC_RecordChild1,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/1,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::FNSTCW16m), 0|OPFL_Chain,
+ 0, 5, 2, 3, 4, 5, 6,
+ 39, TARGET_OPCODE(X86ISD::TLSCALL),
+ OPC_RecordChild0,
+ OPC_Scope, 17,
+ OPC_CheckPatternPredicate, 3,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TLSCall_32), 0,
+ 1, MVT::i32, 5, 1, 2, 3, 4, 5,
+ 17,
+ OPC_CheckPatternPredicate, 4,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::TLSCall_64), 0,
+ 1, MVT::i64, 5, 1, 2, 3, 4, 5,
0,
- 17, X86ISD::WrapperRIP,
- OPC_RecordNode,
- OPC_CheckType, MVT::i64,
- OPC_CheckComplexPat, /*CP*/4, /*#*/0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LEA64r), 0,
- 1, MVT::i64, 4, 1, 2, 3, 4,
- 119|128,1, ISD::TRUNCATE,
- OPC_Scope, 78,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
+ 124, TARGET_OPCODE(X86ISD::INSERTPS),
+ OPC_RecordChild0,
+ OPC_Scope, 79,
OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_CheckPredicate, 15,
- OPC_SwitchType , 29, MVT::i16,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i32, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- 29, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i8,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- 0,
- 36|128,1,
- OPC_RecordChild0,
- OPC_Scope, 64,
- OPC_CheckChild0Type, MVT::i32,
- OPC_SwitchType , 12, MVT::i16,
- OPC_EmitInteger, MVT::i32, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 0, 1,
- 44, MVT::i8,
- OPC_Scope, 14,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 0, 1,
- 26,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i32, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- 0,
- 0,
- 46,
- OPC_CheckChild0Type, MVT::i64,
- OPC_SwitchType , 12, MVT::i32,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i32, 2, 0, 1,
- 12, MVT::i16,
- OPC_EmitInteger, MVT::i32, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 0, 1,
- 12, MVT::i8,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 0, 1,
- 0,
- 48,
- OPC_CheckChild0Type, MVT::i16,
- OPC_CheckType, MVT::i8,
- OPC_Scope, 14,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 0, 1,
- 26,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- 0,
- 0,
- 0,
- 122|128,1, ISD::ZERO_EXTEND,
- OPC_Scope, 15|128,1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i8,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f32,
OPC_MoveParent,
- OPC_CheckPredicate, 15,
- OPC_CheckType, MVT::i16,
OPC_MoveParent,
- OPC_SwitchType , 72, MVT::i32,
- OPC_Scope, 34,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
- 1, MVT::i32, 1, 4,
- 34,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
- 1, MVT::i32, 1, 4,
- 0,
- 48, MVT::i64,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 2,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 3, 4,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
- 1, MVT::i32, 1, 5,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
- 1, MVT::i64, 3, 1, 6, 7,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 22,
+ OPC_CheckPatternPredicate, 5,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INSERTPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
+ 22,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitConvertToTarget, 3,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VINSERTPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 7, 0, 4, 5, 6, 7, 8, 9,
0,
- 102,
- OPC_RecordChild0,
- OPC_Scope, 25,
- OPC_MoveChild, 0,
- OPC_CheckPredicate, 108,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
- 1, MVT::i64, 3, 1, 0, 2,
- 34,
- OPC_CheckChild0Type, MVT::i8,
- OPC_SwitchType , 8, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rr8), 0,
- 1, MVT::i16, 1, 0,
- 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr8), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 24,
- OPC_CheckChild0Type, MVT::i16,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr16), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr16), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 12,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr32), 0,
- 1, MVT::i64, 1, 0,
+ 40,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_MoveParent,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 5,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::INSERTPSrr), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
+ 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VINSERTPSrr), 0,
+ 1, MVT::v4f32, 3, 0, 1, 3,
0,
0,
- 28|128,2, ISD::ANY_EXTEND,
- OPC_Scope, 67|128,1,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 11|128,1, ISD::SRL,
- OPC_RecordChild0,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 8,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_CheckPredicate, 15,
- OPC_CheckType, MVT::i16,
- OPC_MoveParent,
- OPC_SwitchType , 72, MVT::i32,
- OPC_Scope, 34,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
- 1, MVT::i32, 1, 4,
- 34,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
- 1, MVT::i32, 1, 4,
- 0,
- 48, MVT::i64,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 2,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 3, 4,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
- 1, MVT::i32, 1, 5,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
- 1, MVT::i64, 3, 1, 6, 7,
- 0,
- 47, X86ISD::SETCC_CARRY,
+ 44, TARGET_OPCODE(X86ISD::MOVLHPD),
+ OPC_RecordChild0,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_MoveChild, 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
+ OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 5|128,1, TARGET_OPCODE(X86ISD::UNPCKLPD),
+ OPC_RecordChild0,
+ OPC_Scope, 101,
+ OPC_MoveChild, 1,
+ OPC_SwitchOpcode , 38, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
OPC_MoveChild, 0,
- OPC_CheckInteger, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 4,
+ OPC_CheckType, MVT::f64,
OPC_MoveParent,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHPDrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 53, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
OPC_RecordChild1,
- OPC_CheckType, MVT::i8,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
OPC_MoveParent,
- OPC_SwitchType , 10, MVT::i16,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C16r), 0|OPFL_FlagInput,
- 1, MVT::i16, 0,
- 10, MVT::i32,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C32r), 0|OPFL_FlagInput,
- 1, MVT::i32, 0,
- 10, MVT::i64,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C64r), 0|OPFL_FlagInput,
- 1, MVT::i64, 0,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 17,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
0,
0,
- 84,
- OPC_RecordChild0,
- OPC_Scope, 20,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::i64,
- OPC_EmitInteger, MVT::i64, 0,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::SUBREG_TO_REG), 0,
- 1, MVT::i64, 3, 1, 0, 2,
- 34,
- OPC_CheckChild0Type, MVT::i8,
- OPC_SwitchType , 8, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX16rr8), 0,
- 1, MVT::i16, 1, 0,
- 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr8), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr8), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 24,
- OPC_CheckChild0Type, MVT::i16,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX32rr16), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVZX64rr16), 0,
- 1, MVT::i64, 1, 0,
- 0,
+ 27,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
0,
0,
- 90|128,2, ISD::SRL,
+ 90, TARGET_OPCODE(X86ISD::UNPCKLPS),
OPC_RecordChild0,
- OPC_Scope, 90|128,1,
+ OPC_Scope, 58,
OPC_MoveChild, 1,
- OPC_CheckType, MVT::i8,
- OPC_Scope, 45,
- OPC_CheckAndImm, 31,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8rCL), 0|OPFL_FlagInput,
- 1, MVT::i8, 1, 0,
- 11, MVT::i16,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16rCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 1, 0,
- 11, MVT::i32,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32rCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 1, 0,
- 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
17,
- OPC_CheckAndImm, 63,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64rCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 1, 0,
- 45,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8r1), 0,
- 1, MVT::i8, 1, 0,
- 8, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16r1), 0,
- 1, MVT::i16, 1, 0,
- 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32r1), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64r1), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 101,
- OPC_CheckInteger, 8,
- OPC_MoveParent,
- OPC_CheckType, MVT::i16,
- OPC_Scope, 46,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32rr8), 0,
- 1, MVT::i32, 1, 4,
- OPC_EmitInteger, MVT::i32, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 5, 6,
- 46,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_EmitNode, TARGET_OPCODE(X86::MOVZX32_NOREXrr8), 0,
- 1, MVT::i32, 1, 4,
- OPC_EmitInteger, MVT::i32, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 5, 6,
- 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
0,
- 122,
+ 27,
OPC_RecordChild1,
- OPC_Scope, 61,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8ri), 0,
- 1, MVT::i8, 2, 0, 2,
- 11, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16ri), 0,
- 1, MVT::i16, 2, 0, 2,
- 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64ri), 0,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 56,
- OPC_CheckChild1Type, MVT::i8,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR8rCL), 0|OPFL_FlagInput,
- 1, MVT::i8, 1, 0,
- 11, MVT::i16,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR16rCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 1, 0,
- 11, MVT::i32,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR32rCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 1, 0,
- 11, MVT::i64,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHR64rCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 1, 0,
- 0,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
0,
0,
- 115|128,1, ISD::SRA,
+ 90, TARGET_OPCODE(X86ISD::UNPCKHPS),
OPC_RecordChild0,
- OPC_Scope, 116,
+ OPC_Scope, 58,
OPC_MoveChild, 1,
- OPC_CheckType, MVT::i8,
- OPC_Scope, 45,
- OPC_CheckAndImm, 31,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8rCL), 0|OPFL_FlagInput,
- 1, MVT::i8, 1, 0,
- 11, MVT::i16,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16rCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 1, 0,
- 11, MVT::i32,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32rCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 1, 0,
- 0,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
17,
- OPC_CheckAndImm, 63,
- OPC_RecordChild0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64rCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 1, 0,
- 45,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8r1), 0,
- 1, MVT::i8, 1, 0,
- 8, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16r1), 0,
- 1, MVT::i16, 1, 0,
- 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32r1), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64r1), 0,
- 1, MVT::i64, 1, 0,
- 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v4f32, 6, 0, 3, 4, 5, 6, 7,
0,
- 122,
+ 27,
OPC_RecordChild1,
- OPC_Scope, 61,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8ri), 0,
- 1, MVT::i8, 2, 0, 2,
- 11, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16ri), 0,
- 1, MVT::i16, 2, 0, 2,
- 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64ri), 0,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 56,
- OPC_CheckChild1Type, MVT::i8,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR8rCL), 0|OPFL_FlagInput,
- 1, MVT::i8, 1, 0,
- 11, MVT::i16,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR16rCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 1, 0,
- 11, MVT::i32,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR32rCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 1, 0,
- 11, MVT::i64,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SAR64rCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 1, 0,
- 0,
+ OPC_CheckType, MVT::v4f32,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 0,
+ 90, TARGET_OPCODE(X86ISD::UNPCKHPD),
+ OPC_RecordChild0,
+ OPC_Scope, 58,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::LOAD),
+ OPC_RecordMemRef,
+ OPC_RecordNode,
+ OPC_CheckFoldableChainNode,
+ OPC_RecordChild1,
+ OPC_CheckPredicate, 2,
+ OPC_CheckPredicate, 3,
+ OPC_CheckPredicate, 21,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 19,
+ OPC_CheckPatternPredicate, 0,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 17,
+ OPC_CheckComplexPat, /*CP*/0, /*#*/2,
+ OPC_EmitMergeInputChains1_1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKLPSrm), 0|OPFL_Chain|OPFL_MemRefs,
+ 1, MVT::v2f64, 6, 0, 3, 4, 5, 6, 7,
+ 0,
+ 27,
+ OPC_RecordChild1,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 11,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VUNPCKHPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 9,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::UNPCKHPDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
0,
0,
- 44, ISD::CALLSEQ_END,
+ 44, TARGET_OPCODE(ISD::CALLSEQ_END),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TargetConstant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetConstant),
OPC_MoveParent,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::TargetConstant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetConstant),
OPC_MoveParent,
- OPC_Scope, 13,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADJCALLSTACKUP32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 2, 1, 2,
- 13,
+ OPC_Scope, 12,
OPC_CheckPatternPredicate, 3,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADJCALLSTACKUP64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 2, 1, 2,
- 0,
- 22|128,1, X86ISD::TC_RETURN,
- OPC_RecordNode,
- OPC_CaptureFlagInput,
- OPC_RecordChild1,
- OPC_Scope, 100,
- OPC_MoveChild, 1,
- OPC_SwitchOpcode , 46, ISD::TargetGlobalAddress,
- OPC_SwitchType , 20, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNdi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
- 20, MVT::i64,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNdi64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
- 0,
- 46, ISD::TargetExternalSymbol,
- OPC_SwitchType , 20, MVT::i32,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNdi), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
- 20, MVT::i64,
- OPC_MoveParent,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNdi64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
- 0,
- 0,
- 21,
- OPC_CheckChild1Type, MVT::i32,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNri), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
- 21,
- OPC_CheckChild1Type, MVT::i64,
- OPC_RecordChild2,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::TCRETURNri64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic2,
- 0, 2, 1, 3,
- 0,
- 45|128,1, ISD::EXTRACT_VECTOR_ELT,
- OPC_Scope, 31,
- OPC_MoveChild, 0,
- OPC_CheckOpcode, ISD::BIT_CONVERT,
- OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_CheckType, MVT::v4i32,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADJCALLSTACKUP32), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
+ 1, MVT::i32, 2, 1, 2,
+ 12,
OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::EXTRACTPSrr), 0,
- 1, MVT::i32, 2, 0, 2,
- 9|128,1,
- OPC_RecordChild0,
- OPC_Scope, 21,
- OPC_CheckChild0Type, MVT::v4f32,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckType, MVT::f32,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f32, 2, 0, 1,
- 21,
- OPC_CheckChild0Type, MVT::v2f64,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckType, MVT::f64,
- OPC_EmitInteger, MVT::i32, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::f64, 2, 0, 1,
- 44,
- OPC_CheckChild0Type, MVT::v2i64,
- OPC_Scope, 17,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVPQIto64rr), 0,
- 1, MVT::i64, 1, 0,
- 21,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRQrr), 0,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 44,
- OPC_CheckChild0Type, MVT::v4i32,
- OPC_Scope, 17,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVPDI2DIrr), 0,
- 1, MVT::i32, 1, 0,
- 21,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRDrr), 0,
- 1, MVT::i32, 2, 0, 2,
- 0,
- 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADJCALLSTACKUP64), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
+ 1, MVT::i64, 2, 1, 2,
0,
- 30, X86ISD::VASTART_SAVE_XMM_REGS,
+ 30, TARGET_OPCODE(X86ISD::VASTART_SAVE_XMM_REGS),
OPC_RecordNode,
OPC_RecordChild1,
OPC_RecordChild2,
OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
OPC_RecordChild3,
OPC_MoveChild, 3,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitConvertToTarget, 2,
OPC_EmitConvertToTarget, 3,
OPC_MorphNodeTo, TARGET_OPCODE(X86::VASTART_SAVE_XMM_REGS), 0|OPFL_Chain|OPFL_Variadic3,
0, 3, 1, 4, 5,
- 36, X86ISD::RET_FLAG,
+ 33, TARGET_OPCODE(X86ISD::RET_FLAG),
OPC_RecordNode,
OPC_CaptureFlagInput,
- OPC_Scope, 14,
+ OPC_Scope, 12,
OPC_MoveChild, 1,
OPC_CheckInteger, 0,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::RET), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic1,
0, 0,
- 16,
+ 15,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TargetConstant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetConstant),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::RETI), 0|OPFL_Chain|OPFL_FlagInput|OPFL_Variadic1,
0, 1, 1,
0,
- 43|128,2, X86ISD::BRCOND,
+ 12|128,2, TARGET_OPCODE(X86ISD::BRCOND),
OPC_RecordNode,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BasicBlock,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BasicBlock),
OPC_MoveParent,
OPC_MoveChild, 2,
- OPC_Scope, 17,
+ OPC_Scope, 15,
OPC_CheckInteger, 13,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JO_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 10,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JNO_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 2,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JB_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 1,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JAE_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 4,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JE_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 9,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JNE_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 3,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JBE_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 0,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JA_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 15,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JS_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 12,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JNS_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 14,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JP_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 11,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JNP_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 7,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JL_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 6,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JGE_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 8,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JLE_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
- 17,
+ 15,
OPC_CheckInteger, 5,
OPC_MoveParent,
OPC_RecordChild3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_EmitCopyToReg, 2, X86::EFLAGS,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JG_4), 0|OPFL_Chain|OPFL_FlagInput,
0, 1, 1,
0,
- 48|128,1, ISD::ROTL,
- OPC_RecordChild0,
- OPC_Scope, 49,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL8r1), 0,
- 1, MVT::i8, 1, 0,
- 8, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL16r1), 0,
- 1, MVT::i16, 1, 0,
- 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL32r1), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL64r1), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 122,
- OPC_RecordChild1,
- OPC_Scope, 61,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL8ri), 0,
- 1, MVT::i8, 2, 0, 2,
- 11, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL16ri), 0,
- 1, MVT::i16, 2, 0, 2,
- 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL32ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL64ri), 0,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 56,
- OPC_CheckChild1Type, MVT::i8,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL8rCL), 0|OPFL_FlagInput,
- 1, MVT::i8, 1, 0,
- 11, MVT::i16,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL16rCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 1, 0,
- 11, MVT::i32,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL32rCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 1, 0,
- 11, MVT::i64,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROL64rCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 1, 0,
- 0,
- 0,
- 0,
- 48|128,1, ISD::ROTR,
- OPC_RecordChild0,
- OPC_Scope, 49,
- OPC_MoveChild, 1,
- OPC_CheckInteger, 1,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR8r1), 0,
- 1, MVT::i8, 1, 0,
- 8, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR16r1), 0,
- 1, MVT::i16, 1, 0,
- 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR32r1), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR64r1), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 122,
- OPC_RecordChild1,
- OPC_Scope, 61,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR8ri), 0,
- 1, MVT::i8, 2, 0, 2,
- 11, MVT::i16,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR16ri), 0,
- 1, MVT::i16, 2, 0, 2,
- 11, MVT::i32,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR32ri), 0,
- 1, MVT::i32, 2, 0, 2,
- 11, MVT::i64,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR64ri), 0,
- 1, MVT::i64, 2, 0, 2,
- 0,
- 56,
- OPC_CheckChild1Type, MVT::i8,
- OPC_SwitchType , 11, MVT::i8,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR8rCL), 0|OPFL_FlagInput,
- 1, MVT::i8, 1, 0,
- 11, MVT::i16,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR16rCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 1, 0,
- 11, MVT::i32,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR32rCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 1, 0,
- 11, MVT::i64,
- OPC_EmitCopyToReg, 1, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ROR64rCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 1, 0,
- 0,
- 0,
- 0,
- 56, X86ISD::SETCC_CARRY,
- OPC_MoveChild, 0,
- OPC_CheckInteger, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_SwitchType , 10, MVT::i8,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C8r), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 10, MVT::i16,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C16r), 0|OPFL_FlagInput,
- 1, MVT::i16, 0,
- 10, MVT::i32,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C32r), 0|OPFL_FlagInput,
- 1, MVT::i32, 0,
- 10, MVT::i64,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETB_C64r), 0|OPFL_FlagInput,
- 1, MVT::i64, 0,
- 0,
- 116|128,1, X86ISD::SETCC,
- OPC_MoveChild, 0,
- OPC_Scope, 14,
- OPC_CheckInteger, 4,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETEr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 9,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNEr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 7,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETLr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 6,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETGEr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 8,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETLEr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 5,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETGr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 2,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETBr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 1,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETAEr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 3,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETBEr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 0,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETAr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 15,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETSr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 12,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNSr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 14,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETPr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 11,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNPr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 13,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETOr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 14,
- OPC_CheckInteger, 10,
- OPC_MoveParent,
- OPC_RecordChild1,
- OPC_EmitCopyToReg, 0, X86::EFLAGS,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SETNOr), 0|OPFL_FlagInput,
- 1, MVT::i8, 0,
- 0,
- 29, X86ISD::FSRL,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckPredicate, 11,
- OPC_CheckType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::v2f64,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 5, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDQri), 0,
- 1, MVT::v2f64, 2, 0, 3,
- 35, ISD::CALLSEQ_START,
+ 34, TARGET_OPCODE(ISD::CALLSEQ_START),
OPC_RecordNode,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::TargetConstant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::TargetConstant),
OPC_MoveParent,
- OPC_Scope, 12,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitMergeInputChains, 1, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ADJCALLSTACKDOWN32), 0|OPFL_Chain|OPFL_FlagOutput,
- 0, 1, 1,
- 12,
+ OPC_Scope, 11,
OPC_CheckPatternPredicate, 3,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::ADJCALLSTACKDOWN32), 0|OPFL_Chain|OPFL_FlagOutput,
+ 1, MVT::i32, 1, 1,
+ 11,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::ADJCALLSTACKDOWN64), 0|OPFL_Chain|OPFL_FlagOutput,
- 0, 1, 1,
- 0,
- 104, X86ISD::SHLD,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_Scope, 51,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i32,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rri8), 0,
- 1, MVT::i32, 3, 0, 1, 3,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rri8), 0,
- 1, MVT::i16, 3, 0, 1, 3,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64rri8), 0,
- 1, MVT::i64, 3, 0, 1, 3,
- 0,
- 46,
- OPC_CheckChild2Type, MVT::i8,
- OPC_SwitchType , 12, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 0, 1,
- 12, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 0, 1,
- 12, MVT::i64,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHLD64rrCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 0,
- 104, X86ISD::SHRD,
- OPC_RecordChild0,
- OPC_RecordChild1,
- OPC_RecordChild2,
- OPC_Scope, 51,
- OPC_MoveChild, 2,
- OPC_CheckOpcode, ISD::Constant,
- OPC_CheckType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 12, MVT::i32,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rri8), 0,
- 1, MVT::i32, 3, 0, 1, 3,
- 12, MVT::i16,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rri8), 0,
- 1, MVT::i16, 3, 0, 1, 3,
- 12, MVT::i64,
- OPC_EmitConvertToTarget, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64rri8), 0,
- 1, MVT::i64, 3, 0, 1, 3,
- 0,
- 46,
- OPC_CheckChild2Type, MVT::i8,
- OPC_SwitchType , 12, MVT::i32,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD32rrCL), 0|OPFL_FlagInput,
- 1, MVT::i32, 2, 0, 1,
- 12, MVT::i16,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD16rrCL), 0|OPFL_FlagInput,
- 1, MVT::i16, 2, 0, 1,
- 12, MVT::i64,
- OPC_EmitCopyToReg, 2, X86::CL,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SHRD64rrCL), 0|OPFL_FlagInput,
- 1, MVT::i64, 2, 0, 1,
- 0,
- 0,
- 22|128,2, X86ISD::Wrapper,
- OPC_RecordChild0,
- OPC_MoveChild, 0,
- OPC_SwitchOpcode , 50, ISD::TargetConstantPool,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
- 1, MVT::i32, 1, 0,
- 35, MVT::i64,
- OPC_Scope, 10,
- OPC_CheckPatternPredicate, 20,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 21,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 22,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 0,
- 50, ISD::TargetJumpTable,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
- 1, MVT::i32, 1, 0,
- 35, MVT::i64,
- OPC_Scope, 10,
- OPC_CheckPatternPredicate, 20,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 21,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 22,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 0,
- 11, ISD::TargetGlobalTLSAddress,
- OPC_MoveParent,
- OPC_CheckType, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
- 1, MVT::i32, 1, 0,
- 50, ISD::TargetGlobalAddress,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
- 1, MVT::i32, 1, 0,
- 35, MVT::i64,
- OPC_Scope, 10,
- OPC_CheckPatternPredicate, 20,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 21,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 22,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 0,
- 50, ISD::TargetExternalSymbol,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
- 1, MVT::i32, 1, 0,
- 35, MVT::i64,
- OPC_Scope, 10,
- OPC_CheckPatternPredicate, 20,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 21,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 22,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 0,
- 50, ISD::TargetBlockAddress,
- OPC_MoveParent,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
- 1, MVT::i32, 1, 0,
- 35, MVT::i64,
- OPC_Scope, 10,
- OPC_CheckPatternPredicate, 20,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 21,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
- 1, MVT::i64, 1, 0,
- 10,
- OPC_CheckPatternPredicate, 22,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 0,
+ 1, MVT::i64, 1, 1,
0,
- 124, ISD::Constant,
- OPC_Scope, 40,
- OPC_CheckInteger, 0,
- OPC_SwitchType , 7, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64r0), 0,
- 1, MVT::i64, 0,
- 7, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8r0), 0,
- 1, MVT::i8, 0,
- 7, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16r0), 0,
- 1, MVT::i16, 0,
- 7, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32r0), 0,
- 1, MVT::i32, 0,
- 0,
- 80,
- OPC_RecordNode,
- OPC_SwitchType , 39, MVT::i64,
- OPC_Scope, 12,
- OPC_CheckPredicate, 69,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri64i32), 0,
- 1, MVT::i64, 1, 1,
- 12,
- OPC_CheckPredicate, 12,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri32), 0,
- 1, MVT::i64, 1, 1,
- 10,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV64ri), 0,
- 1, MVT::i64, 1, 1,
- 0,
- 10, MVT::i8,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV8ri), 0,
- 1, MVT::i8, 1, 1,
- 10, MVT::i16,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV16ri), 0,
- 1, MVT::i16, 1, 1,
- 10, MVT::i32,
- OPC_EmitConvertToTarget, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOV32ri), 0,
- 1, MVT::i32, 1, 1,
- 0,
- 0,
- 42, X86ISD::VSHL,
+ 66, TARGET_OPCODE(X86ISD::VSHL),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
- OPC_SwitchType , 16, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 5, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDQri), 0,
- 1, MVT::v2i64, 2, 0, 3,
- 11, MVT::v1i64,
+ OPC_SwitchType , 36, MVT::v2i64,
+ OPC_Scope, 16,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSLLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSLLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 0,
+ 14, MVT::v1i64,
OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 1, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSLLQri), 0,
- 1, MVT::v1i64, 2, 0, 2,
+ 1, MVT::v1i64, 2, 0, 3,
0,
- 42, X86ISD::VSRL,
+ 66, TARGET_OPCODE(X86ISD::VSRL),
OPC_RecordChild0,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
OPC_CheckType, MVT::i8,
OPC_MoveParent,
- OPC_SwitchType , 16, MVT::v2i64,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_EmitNodeXForm, 5, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDQri), 0,
- 1, MVT::v2i64, 2, 0, 3,
- 11, MVT::v1i64,
+ OPC_SwitchType , 36, MVT::v2i64,
+ OPC_Scope, 16,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDQri), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 0,
+ 14, MVT::v1i64,
OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 1, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PSRLQri), 0,
- 1, MVT::v1i64, 2, 0, 2,
- 0,
- 47, X86ISD::PEXTRW,
- OPC_RecordChild0,
- OPC_Scope, 21,
- OPC_CheckChild0Type, MVT::v8i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 1,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRWri), 0,
- 1, MVT::i32, 2, 0, 2,
- 21,
- OPC_CheckChild0Type, MVT::v4i16,
- OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
- OPC_MoveParent,
- OPC_CheckPatternPredicate, 8,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_PEXTRWri), 0,
- 1, MVT::i32, 2, 0, 2,
+ 1, MVT::v1i64, 2, 0, 3,
0,
- 22, X86ISD::PEXTRB,
+ 77, TARGET_OPCODE(X86ISD::SHUFPD),
OPC_RecordChild0,
- OPC_CheckChild0Type, MVT::v16i8,
OPC_RecordChild1,
- OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::Constant,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
OPC_MoveParent,
- OPC_CheckPatternPredicate, 4,
- OPC_EmitConvertToTarget, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::PEXTRBrr), 0,
- 1, MVT::i32, 2, 0, 2,
- 94|128,1, ISD::ConstantFP,
- OPC_SwitchType , 78, MVT::f32,
- OPC_Scope, 11,
- OPC_CheckPredicate, 109,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp032), 0,
- 1, MVT::f32, 0,
- 11,
- OPC_CheckPredicate, 110,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp132), 0,
- 1, MVT::f32, 0,
- 11,
- OPC_CheckPredicate, 111,
+ OPC_SwitchType , 30, MVT::v2i64,
+ OPC_Scope, 14,
OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsFLD0SS), 0,
- 1, MVT::f32, 0,
- 19,
- OPC_CheckPredicate, 112,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp032), 0,
- 1, MVT::f32, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp32), 0,
- 1, MVT::f32, 1, 0,
- 19,
- OPC_CheckPredicate, 113,
- OPC_CheckPatternPredicate, 6,
- OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp132), 0,
- 1, MVT::f32, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp32), 0,
- 1, MVT::f32, 1, 0,
- 0,
- 78, MVT::f64,
- OPC_Scope, 11,
- OPC_CheckPredicate, 109,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp064), 0,
- 1, MVT::f64, 0,
- 11,
- OPC_CheckPredicate, 110,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp164), 0,
- 1, MVT::f64, 0,
- 11,
- OPC_CheckPredicate, 109,
- OPC_CheckPatternPredicate, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::FsFLD0SD), 0,
- 1, MVT::f64, 0,
- 19,
- OPC_CheckPredicate, 112,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp064), 0,
- 1, MVT::f64, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp64), 0,
- 1, MVT::f64, 1, 0,
- 19,
- OPC_CheckPredicate, 113,
- OPC_CheckPatternPredicate, 7,
- OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp164), 0,
- 1, MVT::f64, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp64), 0,
- 1, MVT::f64, 1, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPDrri), 0,
+ 1, MVT::v2i64, 3, 0, 1, 3,
+ 12,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
+ 1, MVT::v2i64, 3, 0, 1, 3,
0,
- 58, MVT::f80,
- OPC_Scope, 9,
- OPC_CheckPredicate, 109,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp080), 0,
- 1, MVT::f80, 0,
- 9,
- OPC_CheckPredicate, 110,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::LD_Fp180), 0,
- 1, MVT::f80, 0,
- 17,
- OPC_CheckPredicate, 112,
- OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp080), 0,
- 1, MVT::f80, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp80), 0,
- 1, MVT::f80, 1, 0,
- 17,
- OPC_CheckPredicate, 113,
- OPC_EmitNode, TARGET_OPCODE(X86::LD_Fp180), 0,
- 1, MVT::f80, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp80), 0,
- 1, MVT::f80, 1, 0,
+ 30, MVT::v2f64,
+ OPC_Scope, 14,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VSHUFPDrri), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
+ 12,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SHUFPDrri), 0,
+ 1, MVT::v2f64, 3, 0, 1, 3,
0,
0,
- 15|128,1, ISD::BUILD_VECTOR,
- OPC_Scope, 60,
- OPC_CheckPredicate, 44,
- OPC_SwitchType , 9, MVT::v4i32,
- OPC_CheckPatternPredicate, 0,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0), 0,
- 1, MVT::v4i32, 0,
+ 125, TARGET_OPCODE(X86ISD::PALIGN),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_RecordChild2,
+ OPC_MoveChild, 2,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckType, MVT::i8,
+ OPC_MoveParent,
+ OPC_SwitchType , 12, MVT::v1i64,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rr), 0,
+ 1, MVT::v1i64, 3, 1, 0, 3,
+ 12, MVT::v2i32,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rr), 0,
+ 1, MVT::v2i32, 3, 1, 0, 3,
+ 12, MVT::v4i16,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rr), 0,
+ 1, MVT::v4i16, 3, 1, 0, 3,
+ 12, MVT::v8i8,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR64rr), 0,
+ 1, MVT::v8i8, 3, 1, 0, 3,
+ 12, MVT::v4i32,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
+ 1, MVT::v4i32, 3, 1, 0, 3,
+ 12, MVT::v8i16,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
+ 1, MVT::v8i16, 3, 1, 0, 3,
+ 12, MVT::v16i8,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
+ 1, MVT::v16i8, 3, 1, 0, 3,
+ 12, MVT::v4f32,
+ OPC_EmitConvertToTarget, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PALIGNR128rr), 0,
+ 1, MVT::v4f32, 3, 1, 0, 3,
+ 0,
+ 80|128,1, TARGET_OPCODE(ISD::BUILD_VECTOR),
+ OPC_Scope, 55,
+ OPC_CheckPredicate, 107,
+ OPC_SwitchType , 22, MVT::v4i32,
+ OPC_Scope, 9,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0PI), 0,
+ 1, MVT::v4i32, 0,
+ 9,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AVX_SET0PI), 0,
+ 1, MVT::v4i32, 0,
+ 0,
7, MVT::v2i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0PI), 0,
1, MVT::v2i64, 0,
7, MVT::v8i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0PI), 0,
1, MVT::v8i16, 0,
7, MVT::v16i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0), 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0PI), 0,
1, MVT::v16i8, 0,
- 7, MVT::v2f64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0), 0,
- 1, MVT::v2f64, 0,
- 7, MVT::v4f32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0), 0,
- 1, MVT::v4f32, 0,
0,
13,
- OPC_CheckPredicate, 67,
+ OPC_CheckPredicate, 44,
OPC_CheckType, MVT::v4i32,
- OPC_CheckPatternPredicate, 1,
+ OPC_CheckPatternPredicate, 2,
OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SETALLONES), 0,
1, MVT::v4i32, 0,
13,
- OPC_CheckPredicate, 44,
+ OPC_CheckPredicate, 107,
OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_V_SET0), 0,
1, MVT::v2i32, 0,
13,
- OPC_CheckPredicate, 67,
+ OPC_CheckPredicate, 44,
OPC_CheckType, MVT::v2i32,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_V_SETALLONES), 0,
1, MVT::v2i32, 0,
- 37,
- OPC_CheckPredicate, 44,
+ 107,
+ OPC_CheckPredicate, 107,
OPC_SwitchType , 9, MVT::v1i64,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_V_SET0), 0,
1, MVT::v1i64, 0,
9, MVT::v4i16,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_V_SET0), 0,
1, MVT::v4i16, 0,
9, MVT::v8i8,
- OPC_CheckPatternPredicate, 8,
+ OPC_CheckPatternPredicate, 9,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MMX_V_SET0), 0,
1, MVT::v8i8, 0,
+ 22, MVT::v4f32,
+ OPC_Scope, 9,
+ OPC_CheckPatternPredicate, 1,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0PS), 0,
+ 1, MVT::v4f32, 0,
+ 9,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AVX_SET0PS), 0,
+ 1, MVT::v4f32, 0,
+ 0,
+ 22, MVT::v2f64,
+ OPC_Scope, 9,
+ OPC_CheckPatternPredicate, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::V_SET0PD), 0,
+ 1, MVT::v2f64, 0,
+ 9,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AVX_SET0PD), 0,
+ 1, MVT::v2f64, 0,
+ 0,
+ 9, MVT::v8f32,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AVX_SET0PSY), 0,
+ 1, MVT::v8f32, 0,
+ 9, MVT::v4f64,
+ OPC_CheckPatternPredicate, 0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::AVX_SET0PDY), 0,
+ 1, MVT::v4f64, 0,
0,
0,
- 37, ISD::FNEG,
- OPC_RecordChild0,
- OPC_SwitchType , 10, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp32), 0,
- 1, MVT::f32, 1, 0,
- 10, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp64), 0,
- 1, MVT::f64, 1, 0,
- 8, MVT::f80,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::CHS_Fp80), 0,
- 1, MVT::f80, 1, 0,
- 0,
- 37, ISD::FABS,
- OPC_RecordChild0,
- OPC_SwitchType , 10, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ABS_Fp32), 0,
- 1, MVT::f32, 1, 0,
- 10, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ABS_Fp64), 0,
- 1, MVT::f64, 1, 0,
- 8, MVT::f80,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::ABS_Fp80), 0,
- 1, MVT::f80, 1, 0,
- 0,
- 37, ISD::FSIN,
- OPC_RecordChild0,
- OPC_SwitchType , 10, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SIN_Fp32), 0,
- 1, MVT::f32, 1, 0,
- 10, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SIN_Fp64), 0,
- 1, MVT::f64, 1, 0,
- 8, MVT::f80,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::SIN_Fp80), 0,
- 1, MVT::f80, 1, 0,
- 0,
- 37, ISD::FCOS,
- OPC_RecordChild0,
- OPC_SwitchType , 10, MVT::f32,
- OPC_CheckPatternPredicate, 6,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::COS_Fp32), 0,
- 1, MVT::f32, 1, 0,
- 10, MVT::f64,
- OPC_CheckPatternPredicate, 7,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::COS_Fp64), 0,
- 1, MVT::f64, 1, 0,
- 8, MVT::f80,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::COS_Fp80), 0,
- 1, MVT::f80, 1, 0,
- 0,
- 89, X86ISD::INC,
- OPC_RecordChild0,
- OPC_SwitchType , 28, MVT::i16,
- OPC_CheckChild0Type, MVT::i16,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC16r), 0,
- 2, MVT::i16, MVT::i32, 1, 0,
- 11,
- OPC_CheckPatternPredicate, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_16r), 0,
- 2, MVT::i16, MVT::i32, 1, 0,
- 0,
- 28, MVT::i32,
- OPC_CheckChild0Type, MVT::i32,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC32r), 0,
- 2, MVT::i32, MVT::i32, 1, 0,
- 11,
- OPC_CheckPatternPredicate, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64_32r), 0,
- 2, MVT::i32, MVT::i32, 1, 0,
- 0,
- 11, MVT::i8,
- OPC_CheckChild0Type, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC8r), 0,
- 2, MVT::i8, MVT::i32, 1, 0,
- 11, MVT::i64,
- OPC_CheckChild0Type, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::INC64r), 0,
- 2, MVT::i64, MVT::i32, 1, 0,
- 0,
- 89, X86ISD::DEC,
- OPC_RecordChild0,
- OPC_SwitchType , 28, MVT::i16,
- OPC_CheckChild0Type, MVT::i16,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC16r), 0,
- 2, MVT::i16, MVT::i32, 1, 0,
- 11,
- OPC_CheckPatternPredicate, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_16r), 0,
- 2, MVT::i16, MVT::i32, 1, 0,
- 0,
- 28, MVT::i32,
- OPC_CheckChild0Type, MVT::i32,
- OPC_Scope, 11,
- OPC_CheckPatternPredicate, 2,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC32r), 0,
- 2, MVT::i32, MVT::i32, 1, 0,
- 11,
- OPC_CheckPatternPredicate, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64_32r), 0,
- 2, MVT::i32, MVT::i32, 1, 0,
- 0,
- 11, MVT::i8,
- OPC_CheckChild0Type, MVT::i8,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC8r), 0,
- 2, MVT::i8, MVT::i32, 1, 0,
- 11, MVT::i64,
- OPC_CheckChild0Type, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::DEC64r), 0,
- 2, MVT::i64, MVT::i32, 1, 0,
- 0,
- 17, ISD::BR,
+ 16, TARGET_OPCODE(ISD::BR),
OPC_RecordNode,
OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_CheckOpcode, ISD::BasicBlock,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::BasicBlock),
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::JMP_4), 0|OPFL_Chain,
0, 1, 1,
- 23, ISD::BSWAP,
- OPC_RecordChild0,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSWAP32r), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::BSWAP64r), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 58, X86ISD::REP_MOVS,
+ 54, TARGET_OPCODE(X86ISD::REP_MOVS),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_MoveChild, 1,
- OPC_Scope, 12,
+ OPC_Scope, 11,
OPC_CheckValueType, MVT::i8,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::REP_MOVSB), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 0,
- 12,
+ 1, MVT::i32, 0,
+ 11,
OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::REP_MOVSW), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 0,
- 12,
+ 1, MVT::i32, 0,
+ 11,
OPC_CheckValueType, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::REP_MOVSD), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 0,
- 12,
+ 1, MVT::i32, 0,
+ 11,
OPC_CheckValueType, MVT::i64,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::REP_MOVSQ), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 0,
+ 1, MVT::i64, 0,
0,
- 58, X86ISD::REP_STOS,
+ 54, TARGET_OPCODE(X86ISD::REP_STOS),
OPC_RecordNode,
OPC_CaptureFlagInput,
OPC_MoveChild, 1,
- OPC_Scope, 12,
+ OPC_Scope, 11,
OPC_CheckValueType, MVT::i8,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::REP_STOSB), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 0,
- 12,
+ 1, MVT::i32, 0,
+ 11,
OPC_CheckValueType, MVT::i16,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::REP_STOSW), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 0,
- 12,
+ 1, MVT::i32, 0,
+ 11,
OPC_CheckValueType, MVT::i32,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::REP_STOSD), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 0,
- 12,
+ 1, MVT::i32, 0,
+ 11,
OPC_CheckValueType, MVT::i64,
OPC_MoveParent,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::REP_STOSQ), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 0,
+ 1, MVT::i64, 0,
0,
- 10, X86ISD::RDTSC_DAG,
+ 9, TARGET_OPCODE(X86ISD::RDTSC_DAG),
OPC_RecordNode,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::RDTSC), 0|OPFL_Chain|OPFL_FlagOutput,
- 0, 0,
- 10, ISD::TRAP,
+ 1, MVT::i64, 0,
+ 8, TARGET_OPCODE(ISD::TRAP),
OPC_RecordNode,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::TRAP), 0|OPFL_Chain,
0, 0,
- 76, ISD::SIGN_EXTEND,
- OPC_RecordChild0,
- OPC_Scope, 34,
- OPC_CheckChild0Type, MVT::i8,
- OPC_SwitchType , 8, MVT::i16,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX16rr8), 0,
- 1, MVT::i16, 1, 0,
- 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr8), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr8), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 24,
- OPC_CheckChild0Type, MVT::i16,
- OPC_SwitchType , 8, MVT::i32,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr16), 0,
- 1, MVT::i32, 1, 0,
- 8, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr16), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 12,
- OPC_CheckChild0Type, MVT::i32,
- OPC_CheckType, MVT::i64,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr32), 0,
- 1, MVT::i64, 1, 0,
- 0,
- 30, X86ISD::EH_RETURN,
+ 26, TARGET_OPCODE(X86ISD::EH_RETURN),
OPC_RecordNode,
OPC_RecordChild1,
- OPC_Scope, 12,
+ OPC_Scope, 10,
OPC_CheckChild1Type, MVT::i32,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::EH_RETURN), 0|OPFL_Chain,
0, 1, 1,
- 12,
+ 10,
OPC_CheckChild1Type, MVT::i64,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::EH_RETURN64), 0|OPFL_Chain,
0, 1, 1,
0,
- 106|128,1, ISD::SIGN_EXTEND_INREG,
+ 28, TARGET_OPCODE(X86ISD::MEMBARRIER),
+ OPC_RecordNode,
+ OPC_Scope, 9,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_MemBarrier), 0|OPFL_Chain,
+ 0, 0,
+ 14,
+ OPC_RecordChild1,
+ OPC_CheckChild1Type, MVT::i64,
+ OPC_CheckPatternPredicate, 4,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::Int_MemBarrierNoSSE64), 0|OPFL_Chain,
+ 1, MVT::i32, 1, 1,
+ 0,
+ 8, TARGET_OPCODE(X86ISD::SFENCE),
+ OPC_RecordNode,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::SFENCE), 0|OPFL_Chain,
+ 0, 0,
+ 50, TARGET_OPCODE(X86ISD::FSRL),
OPC_RecordChild0,
+ OPC_RecordChild1,
OPC_MoveChild, 1,
- OPC_Scope, 49,
- OPC_CheckValueType, MVT::i16,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::Constant),
+ OPC_CheckPredicate, 9,
+ OPC_CheckType, MVT::i32,
+ OPC_MoveParent,
+ OPC_CheckType, MVT::v2f64,
+ OPC_Scope, 16,
+ OPC_CheckPatternPredicate, 0,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::VPSRLDQri), 0,
+ 1, MVT::v2f64, 2, 0, 3,
+ 16,
+ OPC_CheckPatternPredicate, 2,
+ OPC_EmitConvertToTarget, 1,
+ OPC_EmitNodeXForm, 0, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::PSRLDQri), 0,
+ 1, MVT::v2f64, 2, 0, 3,
+ 0,
+ 120, TARGET_OPCODE(X86ISD::MOVSD),
+ OPC_RecordChild0,
+ OPC_Scope, 20,
+ OPC_MoveChild, 1,
+ OPC_CheckOpcode, TARGET_OPCODE(ISD::SCALAR_TO_VECTOR),
+ OPC_RecordChild0,
+ OPC_CheckChild0Type, MVT::f64,
OPC_MoveParent,
- OPC_SwitchType , 20, MVT::i32,
- OPC_EmitInteger, MVT::i32, 3,
+ OPC_CheckType, MVT::v2f64,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v2f64, 2, 0, 1,
+ 95,
+ OPC_RecordChild1,
+ OPC_SwitchType , 21, MVT::v2i64,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr16), 0,
- 1, MVT::i32, 1, 2,
- 20, MVT::i64,
- OPC_EmitInteger, MVT::i32, 3,
+ 1, MVT::f64, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v2i64, 2, 0, 3,
+ 21, MVT::v4i32,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr16), 0,
- 1, MVT::i64, 1, 2,
- 0,
- 25,
- OPC_CheckValueType, MVT::i32,
- OPC_MoveParent,
- OPC_CheckType, MVT::i64,
- OPC_EmitInteger, MVT::i32, 4,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i32, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr32), 0,
- 1, MVT::i64, 1, 2,
- 23|128,1,
- OPC_CheckValueType, MVT::i8,
- OPC_MoveParent,
- OPC_SwitchType , 20, MVT::i64,
- OPC_EmitInteger, MVT::i32, 1,
+ 1, MVT::f64, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v4i32, 2, 0, 3,
+ 21, MVT::v2f64,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX64rr8), 0,
- 1, MVT::i64, 1, 2,
- 60, MVT::i32,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr8), 0,
- 1, MVT::i32, 1, 2,
- 34,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR32_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i32, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX32rr8), 0,
- 1, MVT::i32, 1, 4,
- 0,
- 60, MVT::i16,
- OPC_Scope, 22,
- OPC_CheckPatternPredicate, 3,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 0, 1,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX16rr8), 0,
- 1, MVT::i16, 1, 2,
- 34,
- OPC_CheckPatternPredicate, 2,
- OPC_EmitInteger, MVT::i32, X86::GR16_ABCDRegClassID,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::COPY_TO_REGCLASS), 0,
- 1, MVT::i16, 2, 0, 1,
- OPC_EmitInteger, MVT::i32, 1,
- OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
- 1, MVT::i8, 2, 2, 3,
- OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSX16rr8), 0,
- 1, MVT::i16, 1, 4,
- 0,
+ 1, MVT::f64, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v2f64, 2, 0, 3,
+ 21, MVT::v4f32,
+ OPC_EmitInteger, MVT::i32, X86::sub_sd,
+ OPC_EmitNode, TARGET_OPCODE(TargetOpcode::EXTRACT_SUBREG), 0,
+ 1, MVT::f64, 2, 1, 2,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVSDrr), 0,
+ 1, MVT::v4f32, 2, 0, 3,
0,
0,
- 11, X86ISD::MINGW_ALLOCA,
+ 8, TARGET_OPCODE(X86ISD::LFENCE),
+ OPC_RecordNode,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::LFENCE), 0|OPFL_Chain,
+ 0, 0,
+ 8, TARGET_OPCODE(X86ISD::MFENCE),
+ OPC_RecordNode,
+ OPC_EmitMergeInputChains1_0,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MFENCE), 0|OPFL_Chain,
+ 0, 0,
+ 26, TARGET_OPCODE(X86ISD::MOVHLPS),
+ OPC_RecordChild0,
+ OPC_RecordChild1,
+ OPC_SwitchType , 9, MVT::v4i32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
+ 1, MVT::v4i32, 2, 0, 1,
+ 9, MVT::v4f32,
+ OPC_MorphNodeTo, TARGET_OPCODE(X86::MOVHLPSrr), 0,
+ 1, MVT::v4f32, 2, 0, 1,
+ 0,
+ 10, TARGET_OPCODE(X86ISD::MINGW_ALLOCA),
OPC_RecordNode,
OPC_CaptureFlagInput,
- OPC_EmitMergeInputChains, 1, 0,
+ OPC_EmitMergeInputChains1_0,
OPC_MorphNodeTo, TARGET_OPCODE(X86::MINGW_ALLOCA), 0|OPFL_Chain|OPFL_FlagInput|OPFL_FlagOutput,
- 0, 0,
+ 1, MVT::i32, 0,
0,
0
- }; // Total Array size is 77632 bytes
+ }; // Total Array size is 92012 bytes
#undef TARGET_OPCODE
return SelectCodeCommon(N, MatcherTable,sizeof(MatcherTable));
@@ -32713,29 +37044,37 @@ SDNode *SelectCode(SDNode *N) {
bool CheckPatternPredicate(unsigned PredNo) const {
switch (PredNo) {
default: assert(0 && "Invalid predicate in table?");
- case 0: return (Subtarget->hasSSE1());
- case 1: return (Subtarget->hasSSE2());
- case 2: return (!Subtarget->is64Bit());
- case 3: return (Subtarget->is64Bit());
- case 4: return (Subtarget->hasSSE41());
- case 5: return (TM.getCodeModel() == CodeModel::Small ||TM.getCodeModel() == CodeModel::Kernel) && (TM.getRelocationModel() == Reloc::Static);
- case 6: return (!Subtarget->hasSSE1());
- case 7: return (!Subtarget->hasSSE2());
- case 8: return (Subtarget->hasMMX());
- case 9: return (Subtarget->hasSSE3());
- case 10: return (!OptForSize) && (Subtarget->hasSSE2());
- case 11: return (Subtarget->hasSSSE3());
- case 12: return (Subtarget->hasSSE2()) && (OptForSize);
- case 13: return (Subtarget->hasSSE2()) && (!OptForSize);
- case 14: return (Subtarget->hasSSE42());
- case 15: return (Subtarget->hasMMX()) && (Subtarget->is64Bit());
- case 16: return (!Subtarget->isTargetWin64());
- case 17: return (Subtarget->isTargetWin64());
- case 18: return (Subtarget->IsLegalToCallImmediateAddr(TM));
- case 19: return (Subtarget->hasSSE1()) && (OptForSize);
- case 20: return (TM.getCodeModel() != CodeModel::Small &&TM.getCodeModel() != CodeModel::Kernel);
- case 21: return (TM.getCodeModel() == CodeModel::Small);
- case 22: return (TM.getCodeModel() == CodeModel::Kernel);
+ case 0: return (Subtarget->hasAVX());
+ case 1: return (Subtarget->hasSSE1() && !Subtarget->hasAVX());
+ case 2: return (Subtarget->hasSSE2() && !Subtarget->hasAVX());
+ case 3: return (!Subtarget->is64Bit());
+ case 4: return (Subtarget->is64Bit());
+ case 5: return (Subtarget->hasSSE41() && !Subtarget->hasAVX());
+ case 6: return (TM.getCodeModel() == CodeModel::Small ||TM.getCodeModel() == CodeModel::Kernel) && (TM.getRelocationModel() == Reloc::Static);
+ case 7: return (!Subtarget->hasSSE1());
+ case 8: return (!Subtarget->hasSSE2());
+ case 9: return (Subtarget->hasMMX() && !Subtarget->hasAVX());
+ case 10: return (Subtarget->hasSSE42() && !Subtarget->hasAVX());
+ case 11: return (Subtarget->hasSSSE3() && !Subtarget->hasAVX());
+ case 12: return (Subtarget->hasAVX()) && (Subtarget->hasAES());
+ case 13: return (Subtarget->hasAES());
+ case 14: return (Subtarget->hasSSE3() && !Subtarget->hasAVX());
+ case 15: return (Subtarget->hasCMov());
+ case 16: return (!Subtarget->hasCMov());
+ case 17: return (!Subtarget->hasSSE1()) && (Subtarget->hasCMov());
+ case 18: return (!Subtarget->hasSSE2()) && (Subtarget->hasCMov());
+ case 19: return (Subtarget->hasSSE2() && !Subtarget->hasAVX()) && (OptForSize);
+ case 20: return (Subtarget->hasSSE2() && !Subtarget->hasAVX()) && (!OptForSize);
+ case 21: return (TM.getCodeModel() != CodeModel::Small &&TM.getCodeModel() != CodeModel::Kernel);
+ case 22: return (TM.getCodeModel() == CodeModel::Small);
+ case 23: return (TM.getCodeModel() == CodeModel::Kernel);
+ case 24: return (Subtarget->hasSSE1() && !Subtarget->hasAVX()) && (OptForSize);
+ case 25: return (!OptForSize) && (Subtarget->hasSSE2() && !Subtarget->hasAVX());
+ case 26: return (!Subtarget->is64Bit()) && (TM.getRelocationModel() != Reloc::PIC_);
+ case 27: return (Subtarget->hasMMX() && !Subtarget->hasAVX()) && (Subtarget->is64Bit());
+ case 28: return (!Subtarget->isTargetWin64());
+ case 29: return (Subtarget->isTargetWin64());
+ case 30: return (Subtarget->IsLegalToCallImmediateAddr(TM));
}
}
@@ -32766,35 +37105,35 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}
- case 3: { // Predicate_loadi32
+ case 3: { // Predicate_load
SDNode *N = Node;
- LoadSDNode *LD = cast<LoadSDNode>(N);
- if (const Value *Src = LD->getSrcValue())
+ return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
+
+ }
+ case 4: { // Predicate_dsload
+ SDNode *N = Node;
+
+ if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
if (PT->getAddressSpace() > 255)
return false;
- ISD::LoadExtType ExtType = LD->getExtensionType();
- if (ExtType == ISD::NON_EXTLOAD)
- return true;
- if (ExtType == ISD::EXTLOAD)
- return LD->getAlignment() >= 4 && !LD->isVolatile();
- return false;
+ return true;
}
- case 4: { // Predicate_unindexedstore
+ case 5: { // Predicate_unindexedstore
SDNode *N = Node;
return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
}
- case 5: { // Predicate_store
+ case 6: { // Predicate_store
SDNode *N = Node;
return !cast<StoreSDNode>(N)->isTruncatingStore();
}
- case 6: { // Predicate_loadi16
+ case 7: { // Predicate_loadi16
SDNode *N = Node;
LoadSDNode *LD = cast<LoadSDNode>(N);
@@ -32810,261 +37149,112 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
return false;
}
- case 7: { // Predicate_shrd
- SDNode *N = Node;
-
- assert(N->getOpcode() == ISD::OR);
- return N->getOperand(0).getOpcode() == ISD::SRL &&
- N->getOperand(1).getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(N->getOperand(0).getOperand(1)) &&
- isa<ConstantSDNode>(N->getOperand(1).getOperand(1)) &&
- N->getOperand(0).getConstantOperandVal(1) ==
- N->getValueSizeInBits(0) - N->getOperand(1).getConstantOperandVal(1);
-
- }
- case 8: { // Predicate_load
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
-
- }
- case 9: { // Predicate_dsload
+ case 8: { // Predicate_loadi32
SDNode *N = Node;
- if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ if (const Value *Src = LD->getSrcValue())
if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
if (PT->getAddressSpace() > 255)
return false;
- return true;
-
- }
- case 10: { // Predicate_shld
- SDNode *N = Node;
-
- assert(N->getOpcode() == ISD::OR);
- return N->getOperand(0).getOpcode() == ISD::SHL &&
- N->getOperand(1).getOpcode() == ISD::SRL &&
- isa<ConstantSDNode>(N->getOperand(0).getOperand(1)) &&
- isa<ConstantSDNode>(N->getOperand(1).getOperand(1)) &&
- N->getOperand(0).getConstantOperandVal(1) ==
- N->getValueSizeInBits(0) - N->getOperand(1).getConstantOperandVal(1);
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ if (ExtType == ISD::NON_EXTLOAD)
+ return true;
+ if (ExtType == ISD::EXTLOAD)
+ return LD->getAlignment() >= 4 && !LD->isVolatile();
+ return false;
}
- case 11: { // Predicate_immSext8
+ case 9: { // Predicate_immSext8
ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- return N->getSExtValue() == (int8_t)N->getSExtValue();
-
+ return immSext8(N);
}
- case 12: { // Predicate_i64immSExt32
+ case 10: { // Predicate_i64immSExt32
ConstantSDNode*N = cast<ConstantSDNode>(Node);
-
- // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
- // sign extended field.
- return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
-
+ return i64immSExt32(N);
}
- case 13: { // Predicate_movlp
+ case 11: { // Predicate_movlp
SDNode *N = Node;
return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
}
- case 14: { // Predicate_unpckh
+ case 12: { // Predicate_unpckh
SDNode *N = Node;
return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
}
- case 15: { // Predicate_srl_su
+ case 13: { // Predicate_srl_su
SDNode *N = Node;
return N->hasOneUse();
}
- case 16: { // Predicate_trunc_su
+ case 14: { // Predicate_trunc_su
SDNode *N = Node;
return N->hasOneUse();
}
- case 17: { // Predicate_truncstore
+ case 15: { // Predicate_truncstore
SDNode *N = Node;
return cast<StoreSDNode>(N)->isTruncatingStore();
}
- case 18: { // Predicate_truncstorei16
- SDNode *N = Node;
-
- return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
-
- }
- case 19: { // Predicate_truncstoref32
+ case 16: { // Predicate_truncstoref32
SDNode *N = Node;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
}
- case 20: { // Predicate_truncstoref64
+ case 17: { // Predicate_truncstoref64
SDNode *N = Node;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64;
}
- case 21: { // Predicate_alignedstore
+ case 18: { // Predicate_alignedstore
SDNode *N = Node;
return cast<StoreSDNode>(N)->getAlignment() >= 16;
}
- case 22: { // Predicate_movlhps
- SDNode *N = Node;
-
- return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 23: { // Predicate_memop
- SDNode *N = Node;
-
- return Subtarget->hasVectorUAMem()
- || cast<LoadSDNode>(N)->getAlignment() >= 16;
-
- }
- case 24: { // Predicate_movshdup
- SDNode *N = Node;
-
- return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 25: { // Predicate_movsldup
- SDNode *N = Node;
-
- return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 26: { // Predicate_pshufd
- SDNode *N = Node;
-
- return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 27: { // Predicate_movddup
- SDNode *N = Node;
-
- return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 28: { // Predicate_unpckl
- SDNode *N = Node;
-
- return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 29: { // Predicate_pshufhw
- SDNode *N = Node;
-
- return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 30: { // Predicate_pshuflw
- SDNode *N = Node;
-
- return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 31: { // Predicate_mmx_pshufw
- SDNode *N = Node;
-
- return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 32: { // Predicate_shufp
+ case 19: { // Predicate_and_su
SDNode *N = Node;
- return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 33: { // Predicate_mmx_unpckh
- SDNode *N = Node;
-
- return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 34: { // Predicate_mmx_unpckl
- SDNode *N = Node;
-
- return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 35: { // Predicate_movhlps_undef
- SDNode *N = Node;
-
- return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 36: { // Predicate_movhlps
- SDNode *N = Node;
-
- return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 37: { // Predicate_immAllZerosV_bc
- SDNode *N = Node;
-
- return ISD::isBuildVectorAllZeros(N);
-
- }
- case 38: { // Predicate_movl
- SDNode *N = Node;
-
- return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 39: { // Predicate_unpckl_undef
- SDNode *N = Node;
-
- return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 40: { // Predicate_unpckh_undef
- SDNode *N = Node;
-
- return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-
- }
- case 41: { // Predicate_splat_lo
- SDNode *N = Node;
-
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
-
- }
- case 42: { // Predicate_mmx_unpckl_undef
- SDNode *N = Node;
-
- return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+ return N->hasOneUse();
}
- case 43: { // Predicate_mmx_unpckh_undef
+ case 20: { // Predicate_loadi16_anyext
SDNode *N = Node;
- return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ if (const Value *Src = LD->getSrcValue())
+ if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
+ if (PT->getAddressSpace() > 255)
+ return false;
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ if (ExtType == ISD::EXTLOAD)
+ return LD->getAlignment() >= 2 && !LD->isVolatile();
+ return false;
}
- case 44: { // Predicate_immAllZerosV
+ case 21: { // Predicate_memop
SDNode *N = Node;
- return ISD::isBuildVectorAllZeros(N);
+ return Subtarget->hasVectorUAMem()
+ || cast<LoadSDNode>(N)->getAlignment() >= 16;
}
- case 45: { // Predicate_palign
+ case 22: { // Predicate_memop64
SDNode *N = Node;
- return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
+ return cast<LoadSDNode>(N)->getAlignment() >= 8;
}
- case 46: { // Predicate_gsload
+ case 23: { // Predicate_gsload
SDNode *N = Node;
if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
@@ -33073,7 +37263,7 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
return false;
}
- case 47: { // Predicate_fsload
+ case 24: { // Predicate_fsload
SDNode *N = Node;
if (const Value *Src = cast<LoadSDNode>(N)->getSrcValue())
@@ -33082,141 +37272,124 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
return false;
}
- case 48: { // Predicate_extload
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
-
- }
- case 49: { // Predicate_extloadf32
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32;
-
- }
- case 50: { // Predicate_extloadf64
- SDNode *N = Node;
-
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64;
-
- }
- case 51: { // Predicate_sextload
+ case 25: { // Predicate_sextload
SDNode *N = Node;
return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
}
- case 52: { // Predicate_sextloadi8
+ case 26: { // Predicate_sextloadi8
SDNode *N = Node;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 53: { // Predicate_sextloadi16
+ case 27: { // Predicate_sextloadi16
SDNode *N = Node;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 54: { // Predicate_zextload
+ case 28: { // Predicate_zextload
SDNode *N = Node;
return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
}
- case 55: { // Predicate_zextloadi8
+ case 29: { // Predicate_zextloadi8
SDNode *N = Node;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 56: { // Predicate_zextloadi16
+ case 30: { // Predicate_zextloadi16
SDNode *N = Node;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 57: { // Predicate_zextloadi1
+ case 31: { // Predicate_zextloadi1
SDNode *N = Node;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
}
- case 58: { // Predicate_extloadi1
+ case 32: { // Predicate_extload
SDNode *N = Node;
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
+ return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
}
- case 59: { // Predicate_extloadi8
+ case 33: { // Predicate_extloadi1
SDNode *N = Node;
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
}
- case 60: { // Predicate_extloadi16
+ case 34: { // Predicate_extloadi8
SDNode *N = Node;
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 61: { // Predicate_sextloadi32
+ case 35: { // Predicate_extloadi16
SDNode *N = Node;
- return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 62: { // Predicate_zextloadi32
+ case 36: { // Predicate_sextloadi32
SDNode *N = Node;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 63: { // Predicate_alignedload
+ case 37: { // Predicate_zextloadi32
SDNode *N = Node;
- return cast<LoadSDNode>(N)->getAlignment() >= 16;
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 64: { // Predicate_extloadi32
+ case 38: { // Predicate_extloadi32
SDNode *N = Node;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 65: { // Predicate_loadi16_anyext
+ case 39: { // Predicate_extloadf32
SDNode *N = Node;
- LoadSDNode *LD = cast<LoadSDNode>(N);
- if (const Value *Src = LD->getSrcValue())
- if (const PointerType *PT = dyn_cast<PointerType>(Src->getType()))
- if (PT->getAddressSpace() > 255)
- return false;
- ISD::LoadExtType ExtType = LD->getExtensionType();
- if (ExtType == ISD::EXTLOAD)
- return LD->getAlignment() >= 2 && !LD->isVolatile();
- return false;
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32;
}
- case 66: { // Predicate_memop64
+ case 40: { // Predicate_extloadf64
SDNode *N = Node;
- return cast<LoadSDNode>(N)->getAlignment() >= 8;
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64;
}
- case 67: { // Predicate_immAllOnesV
+ case 41: { // Predicate_alignedload
SDNode *N = Node;
- return ISD::isBuildVectorAllOnes(N);
+ return cast<LoadSDNode>(N)->getAlignment() >= 16;
}
- case 68: { // Predicate_immAllOnesV_bc
+ case 42: { // Predicate_or_is_add
SDNode *N = Node;
- return ISD::isBuildVectorAllOnes(N);
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
+ return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
+
+ unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
+ APInt Mask = APInt::getAllOnesValue(BitWidth);
+ APInt KnownZero0, KnownOne0;
+ CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
+ APInt KnownZero1, KnownOne1;
+ CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
+ return (~KnownZero0 & ~KnownZero1) == 0;
}
- case 69: { // Predicate_i64immZExt32
+ case 43: { // Predicate_i64immZExt32
ConstantSDNode*N = cast<ConstantSDNode>(Node);
// i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit
@@ -33224,245 +37397,229 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
return (uint64_t)N->getZExtValue() == (uint32_t)N->getZExtValue();
}
- case 70: { // Predicate_and_su
- SDNode *N = Node;
-
- return N->hasOneUse();
-
- }
- case 71: { // Predicate_or_is_add
+ case 44: { // Predicate_immAllOnesV
SDNode *N = Node;
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
- return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
- else {
- unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
- APInt Mask = APInt::getAllOnesValue(BitWidth);
- APInt KnownZero0, KnownOne0;
- CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
- APInt KnownZero1, KnownOne1;
- CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
- return (~KnownZero0 & ~KnownZero1) == 0;
- }
+ return ISD::isBuildVectorAllOnes(N);
}
- case 72: { // Predicate_atomic_swap_32
+ case 45: { // Predicate_atomic_swap_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 73: { // Predicate_atomic_swap_16
+ case 46: { // Predicate_atomic_swap_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 74: { // Predicate_atomic_swap_8
+ case 47: { // Predicate_atomic_swap_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 75: { // Predicate_atomic_swap_64
+ case 48: { // Predicate_atomic_swap_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 76: { // Predicate_atomic_load_add_32
+ case 49: { // Predicate_atomic_load_add_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 77: { // Predicate_atomic_load_add_16
+ case 50: { // Predicate_atomic_load_add_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 78: { // Predicate_atomic_load_add_8
+ case 51: { // Predicate_atomic_load_add_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 79: { // Predicate_atomic_load_add_64
+ case 52: { // Predicate_atomic_load_add_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 80: { // Predicate_atomic_load_and_32
+ case 53: { // Predicate_atomic_load_and_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 81: { // Predicate_atomic_load_and_16
+ case 54: { // Predicate_atomic_load_and_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 82: { // Predicate_atomic_load_and_8
+ case 55: { // Predicate_atomic_load_and_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 83: { // Predicate_atomic_load_and_64
+ case 56: { // Predicate_atomic_load_and_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 84: { // Predicate_atomic_load_or_32
+ case 57: { // Predicate_atomic_load_or_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 85: { // Predicate_atomic_load_or_16
+ case 58: { // Predicate_atomic_load_or_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 86: { // Predicate_atomic_load_or_8
+ case 59: { // Predicate_atomic_load_or_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 87: { // Predicate_atomic_load_or_64
+ case 60: { // Predicate_atomic_load_or_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 88: { // Predicate_atomic_load_xor_32
+ case 61: { // Predicate_atomic_load_xor_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 89: { // Predicate_atomic_load_xor_16
+ case 62: { // Predicate_atomic_load_xor_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 90: { // Predicate_atomic_load_xor_8
+ case 63: { // Predicate_atomic_load_xor_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 91: { // Predicate_atomic_load_xor_64
+ case 64: { // Predicate_atomic_load_xor_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 92: { // Predicate_atomic_load_nand_32
+ case 65: { // Predicate_atomic_load_nand_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 93: { // Predicate_atomic_load_nand_16
+ case 66: { // Predicate_atomic_load_nand_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 94: { // Predicate_atomic_load_nand_8
+ case 67: { // Predicate_atomic_load_nand_8
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}
- case 95: { // Predicate_atomic_load_nand_64
+ case 68: { // Predicate_atomic_load_nand_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 96: { // Predicate_atomic_load_min_32
+ case 69: { // Predicate_atomic_load_min_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 97: { // Predicate_atomic_load_min_16
+ case 70: { // Predicate_atomic_load_min_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 98: { // Predicate_atomic_load_min_64
+ case 71: { // Predicate_atomic_load_min_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 99: { // Predicate_atomic_load_max_32
+ case 72: { // Predicate_atomic_load_max_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 100: { // Predicate_atomic_load_max_16
+ case 73: { // Predicate_atomic_load_max_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 101: { // Predicate_atomic_load_max_64
+ case 74: { // Predicate_atomic_load_max_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 102: { // Predicate_atomic_load_umin_32
+ case 75: { // Predicate_atomic_load_umin_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 103: { // Predicate_atomic_load_umin_16
+ case 76: { // Predicate_atomic_load_umin_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 104: { // Predicate_atomic_load_umin_64
+ case 77: { // Predicate_atomic_load_umin_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 105: { // Predicate_atomic_load_umax_32
+ case 78: { // Predicate_atomic_load_umax_32
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}
- case 106: { // Predicate_atomic_load_umax_16
+ case 79: { // Predicate_atomic_load_umax_16
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}
- case 107: { // Predicate_atomic_load_umax_64
+ case 80: { // Predicate_atomic_load_umax_64
SDNode *N = Node;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}
- case 108: { // Predicate_def32
+ case 81: { // Predicate_def32
SDNode *N = Node;
return N->getOpcode() != ISD::TRUNCATE &&
@@ -33471,64 +37628,198 @@ bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {
N->getOpcode() != X86ISD::CMOV;
}
- case 109: { // Predicate_fpimm0
+ case 82: { // Predicate_fpimm0
ConstantFPSDNode*N = cast<ConstantFPSDNode>(Node);
return N->isExactlyValue(+0.0);
}
- case 110: { // Predicate_fpimm1
+ case 83: { // Predicate_fpimm1
ConstantFPSDNode*N = cast<ConstantFPSDNode>(Node);
return N->isExactlyValue(+1.0);
}
- case 111: { // Predicate_fp32imm0
+ case 84: { // Predicate_fp32imm0
ConstantFPSDNode*N = cast<ConstantFPSDNode>(Node);
return N->isExactlyValue(+0.0);
}
- case 112: { // Predicate_fpimmneg0
+ case 85: { // Predicate_fpimmneg0
ConstantFPSDNode*N = cast<ConstantFPSDNode>(Node);
return N->isExactlyValue(-0.0);
}
- case 113: { // Predicate_fpimmneg1
+ case 86: { // Predicate_fpimmneg1
ConstantFPSDNode*N = cast<ConstantFPSDNode>(Node);
return N->isExactlyValue(-1.0);
}
+ case 87: { // Predicate_movshdup
+ SDNode *N = Node;
+
+ return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 88: { // Predicate_movsldup
+ SDNode *N = Node;
+
+ return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 89: { // Predicate_pshufd
+ SDNode *N = Node;
+
+ return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 90: { // Predicate_movddup
+ SDNode *N = Node;
+
+ return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 91: { // Predicate_pshufhw
+ SDNode *N = Node;
+
+ return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 92: { // Predicate_pshuflw
+ SDNode *N = Node;
+
+ return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 93: { // Predicate_mmx_pshufw
+ SDNode *N = Node;
+
+ return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 94: { // Predicate_unpckl
+ SDNode *N = Node;
+
+ return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 95: { // Predicate_shufp
+ SDNode *N = Node;
+
+ return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 96: { // Predicate_mmx_unpckh
+ SDNode *N = Node;
+
+ return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 97: { // Predicate_mmx_unpckl
+ SDNode *N = Node;
+
+ return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 98: { // Predicate_movlhps
+ SDNode *N = Node;
+
+ return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 99: { // Predicate_movhlps_undef
+ SDNode *N = Node;
+
+ return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 100: { // Predicate_movhlps
+ SDNode *N = Node;
+
+ return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 101: { // Predicate_unpckl_undef
+ SDNode *N = Node;
+
+ return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 102: { // Predicate_unpckh_undef
+ SDNode *N = Node;
+
+ return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 103: { // Predicate_movl
+ SDNode *N = Node;
+
+ return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 104: { // Predicate_splat_lo
+ SDNode *N = Node;
+
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+ return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
+
+ }
+ case 105: { // Predicate_mmx_unpckl_undef
+ SDNode *N = Node;
+
+ return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 106: { // Predicate_mmx_unpckh_undef
+ SDNode *N = Node;
+
+ return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+
+ }
+ case 107: { // Predicate_immAllZerosV
+ SDNode *N = Node;
+
+ return ISD::isBuildVectorAllZeros(N);
+
+ }
+ case 108: { // Predicate_palign
+ SDNode *N = Node;
+
+ return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
+
+ }
}
}
bool CheckComplexPattern(SDNode *Root, SDValue N,
unsigned PatternNo, SmallVectorImpl<SDValue> &Result) {
+ unsigned NextRes = Result.size();
switch (PatternNo) {
default: assert(0 && "Invalid pattern # in table?");
case 0:
- Result.resize(Result.size()+5);
- return SelectAddr(Root, N, Result[Result.size()-5], Result[Result.size()-4], Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+5);
+ return SelectAddr(Root, N, Result[NextRes+0], Result[NextRes+1], Result[NextRes+2], Result[NextRes+3], Result[NextRes+4]);
case 1:
- Result.resize(Result.size()+6);
- return SelectScalarSSELoad(Root, N, Result[Result.size()-6], Result[Result.size()-5], Result[Result.size()-4], Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+6);
+ return SelectScalarSSELoad(Root, N, Result[NextRes+0], Result[NextRes+1], Result[NextRes+2], Result[NextRes+3], Result[NextRes+4], Result[NextRes+5]);
case 2:
- Result.resize(Result.size()+6);
- return SelectScalarSSELoad(Root, N, Result[Result.size()-6], Result[Result.size()-5], Result[Result.size()-4], Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+6);
+ return SelectScalarSSELoad(Root, N, Result[NextRes+0], Result[NextRes+1], Result[NextRes+2], Result[NextRes+3], Result[NextRes+4], Result[NextRes+5]);
case 3:
- Result.resize(Result.size()+4);
- return SelectLEAAddr(Root, N, Result[Result.size()-4], Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+5);
+ return SelectLEAAddr(Root, N, Result[NextRes+0], Result[NextRes+1], Result[NextRes+2], Result[NextRes+3], Result[NextRes+4]);
case 4:
- Result.resize(Result.size()+4);
- return SelectLEAAddr(Root, N, Result[Result.size()-4], Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+5);
+ return SelectLEAAddr(Root, N, Result[NextRes+0], Result[NextRes+1], Result[NextRes+2], Result[NextRes+3], Result[NextRes+4]);
case 5:
- Result.resize(Result.size()+4);
- return SelectTLSADDRAddr(Root, N, Result[Result.size()-4], Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+5);
+ return SelectTLSADDRAddr(Root, N, Result[NextRes+0], Result[NextRes+1], Result[NextRes+2], Result[NextRes+3], Result[NextRes+4]);
case 6:
- Result.resize(Result.size()+4);
- return SelectTLSADDRAddr(Root, N, Result[Result.size()-4], Result[Result.size()-3], Result[Result.size()-2], Result[Result.size()-1]);
+ Result.resize(NextRes+5);
+ return SelectTLSADDRAddr(Root, N, Result[NextRes+0], Result[NextRes+1], Result[NextRes+2], Result[NextRes+3], Result[NextRes+4]);
}
}
@@ -33536,47 +37827,47 @@ SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
switch (XFormNo) {
default: assert(0 && "Invalid xform # in table?");
case 0: {
- SDNode *N = V.getNode();
+ ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
- return getI8Imm(X86::getShuffleSHUFImmediate(N));
+ // Transformation function: imm >> 3
+ return getI32Imm(N->getZExtValue() >> 3);
}
case 1: {
- SDNode *N = V.getNode();
+ ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
- return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
+ // Transformation function: get the low 32 bits.
+ return getI32Imm((unsigned)N->getZExtValue());
}
case 2: {
SDNode *N = V.getNode();
- return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
+ return getI8Imm(X86::getShuffleSHUFImmediate(N));
}
case 3: {
SDNode *N = V.getNode();
- return getI8Imm(X86::getShuffleSHUFImmediate(N));
+ return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
}
case 4: {
SDNode *N = V.getNode();
- return getI8Imm(X86::getShufflePALIGNRImmediate(N));
+ return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
}
case 5: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
+ SDNode *N = V.getNode();
- // Transformation function: imm >> 3
- return getI32Imm(N->getZExtValue() >> 3);
+ return getI8Imm(X86::getShuffleSHUFImmediate(N));
}
case 6: {
- ConstantSDNode *N = cast<ConstantSDNode>(V.getNode());
+ SDNode *N = V.getNode();
- // Transformation function: get the low 32 bits.
- return getI32Imm((unsigned)N->getZExtValue());
+ return getI8Imm(X86::getShufflePALIGNRImmediate(N));
}
}
diff --git a/libclamav/c++/X86GenFastISel.inc b/libclamav/c++/X86GenFastISel.inc
index dc171fc..0107a9e 100644
--- a/libclamav/c++/X86GenFastISel.inc
+++ b/libclamav/c++/X86GenFastISel.inc
@@ -53,3444 +53,3674 @@ unsigned FastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t imm0) {
// FastEmit functions for ISD::ANY_EXTEND.
-unsigned FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i16_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX16rr8, X86::GR16RegisterClass, Op0);
+unsigned FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i16_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVZX16rr8, X86::GR16RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i32_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX32rr8, X86::GR32RegisterClass, Op0);
+unsigned FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i32_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVZX32rr8, X86::GR32RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX64rr8, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVZX64rr8, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ANY_EXTEND_MVT_i8_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_ANY_EXTEND_MVT_i8_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i16: return FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i16_r(Op0);
- case MVT::i32: return FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i32_r(Op0);
- case MVT::i64: return FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i64_r(Op0);
+ case MVT::i16: return FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i16_r(Op0, Op0IsKill);
+ case MVT::i32: return FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i32_r(Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_ANY_EXTEND_MVT_i8_MVT_i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_ANY_EXTEND_MVT_i16_MVT_i32_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX32rr16, X86::GR32RegisterClass, Op0);
-}
-
-unsigned FastEmit_ISD_ANY_EXTEND_MVT_i16_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX64rr16, X86::GR64RegisterClass, Op0);
-}
-
-unsigned FastEmit_ISD_ANY_EXTEND_MVT_i16_r(MVT RetVT, unsigned Op0) {
-switch (RetVT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_ANY_EXTEND_MVT_i16_MVT_i32_r(Op0);
- case MVT::i64: return FastEmit_ISD_ANY_EXTEND_MVT_i16_MVT_i64_r(Op0);
- default: return 0;
-}
+unsigned FastEmit_ISD_ANY_EXTEND_MVT_i16_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::i64)
+ return 0;
+ return FastEmitInst_r(X86::MOVZX64rr16, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ANY_EXTEND_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_ANY_EXTEND_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_ANY_EXTEND_MVT_i8_r(RetVT, Op0);
- case MVT::i16: return FastEmit_ISD_ANY_EXTEND_MVT_i16_r(RetVT, Op0);
+ case MVT::i8: return FastEmit_ISD_ANY_EXTEND_MVT_i8_r(RetVT, Op0, Op0IsKill);
+ case MVT::i16: return FastEmit_ISD_ANY_EXTEND_MVT_i16_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::BIT_CONVERT.
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOVDI2SSrr, X86::FR32RegisterClass, Op0);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VMOVDI2SSrr, X86::FR32RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MOVDI2SSrr, X86::FR32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_f64_r(unsigned Op0) {
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOV64toSDrr, X86::FR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_f64_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MOV64toSDrr, X86::FR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v8i8_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVD64to64rr, X86::VR64RegisterClass, Op0);
-}
-
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v4i16_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVD64to64rr, X86::VR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v8i8_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVD64to64rr, X86::VR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v2i32_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVD64to64rr, X86::VR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v4i16_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVD64to64rr, X86::VR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v1i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVD64to64rr, X86::VR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v2i32_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVD64to64rr, X86::VR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v2f32_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVD64to64rr, X86::VR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v1i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVD64to64rr, X86::VR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_f64_r(Op0);
- case MVT::v8i8: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v8i8_r(Op0);
- case MVT::v4i16: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v4i16_r(Op0);
- case MVT::v2i32: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v2i32_r(Op0);
- case MVT::v1i64: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v1i64_r(Op0);
- case MVT::v2f32: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v2f32_r(Op0);
+ case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_f64_r(Op0, Op0IsKill);
+ case MVT::v8i8: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v8i8_r(Op0, Op0IsKill);
+ case MVT::v4i16: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v4i16_r(Op0, Op0IsKill);
+ case MVT::v2i32: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v2i32_r(Op0, Op0IsKill);
+ case MVT::v1i64: return FastEmit_ISD_BIT_CONVERT_MVT_i64_MVT_v1i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_f32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOVSS2DIrr, X86::GR32RegisterClass, Op0);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VMOVSS2DIrr, X86::GR32RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MOVSS2DIrr, X86::GR32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_f64_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i64)
- return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOVSDto64rr, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MOVSDto64rr, X86::GR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v8i8_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVD64from64rr, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_v8i8_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVFR642Qrr, X86::VR64RegisterClass, Op0, Op0IsKill);
+}
+
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_v4i16_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVFR642Qrr, X86::VR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v8i8_MVT_f64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVQ2FR64rr, X86::FR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_v2i32_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVFR642Qrr, X86::VR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v8i8_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_v1i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVFR642Qrr, X86::VR64RegisterClass, Op0, Op0IsKill);
+}
+
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_v8i8_MVT_i64_r(Op0);
- case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_v8i8_MVT_f64_r(Op0);
+ case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_i64_r(Op0, Op0IsKill);
+ case MVT::v8i8: return FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_v8i8_r(Op0, Op0IsKill);
+ case MVT::v4i16: return FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_v4i16_r(Op0, Op0IsKill);
+ case MVT::v2i32: return FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_v2i32_r(Op0, Op0IsKill);
+ case MVT::v1i64: return FastEmit_ISD_BIT_CONVERT_MVT_f64_MVT_v1i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v4i16_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVD64from64rr, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v8i8_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVD64from64rr, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v4i16_MVT_f64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVQ2FR64rr, X86::FR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v8i8_MVT_f64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVQ2FR64rr, X86::FR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v4i16_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v8i8_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_v4i16_MVT_i64_r(Op0);
- case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_v4i16_MVT_f64_r(Op0);
+ case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_v8i8_MVT_i64_r(Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_v8i8_MVT_f64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v2i32_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVD64from64rr, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v4i16_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVD64from64rr, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v2i32_MVT_f64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVQ2FR64rr, X86::FR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v4i16_MVT_f64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVQ2FR64rr, X86::FR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v2i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v4i16_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_v2i32_MVT_i64_r(Op0);
- case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_v2i32_MVT_f64_r(Op0);
+ case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_v4i16_MVT_i64_r(Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_v4i16_MVT_f64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v1i64_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVD64from64rr, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v2i32_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVD64from64rr, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v1i64_MVT_f64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MMX_MOVQ2FR64rr, X86::FR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v2i32_MVT_f64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVQ2FR64rr, X86::FR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v1i64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v2i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_v1i64_MVT_i64_r(Op0);
- case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_v1i64_MVT_f64_r(Op0);
+ case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_v2i32_MVT_i64_r(Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_v2i32_MVT_f64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_BIT_CONVERT_MVT_v2f32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i64)
- return 0;
- return FastEmitInst_r(X86::MMX_MOVD64from64rr, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v1i64_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVD64from64rr, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BIT_CONVERT_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v1i64_MVT_f64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MMX_MOVQ2FR64rr, X86::FR64RegisterClass, Op0, Op0IsKill);
+}
+
+unsigned FastEmit_ISD_BIT_CONVERT_MVT_v1i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+switch (RetVT.SimpleTy) {
+ case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_v1i64_MVT_i64_r(Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_v1i64_MVT_f64_r(Op0, Op0IsKill);
+ default: return 0;
+}
+}
+
+unsigned FastEmit_ISD_BIT_CONVERT_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_BIT_CONVERT_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_i64_r(RetVT, Op0);
- case MVT::f32: return FastEmit_ISD_BIT_CONVERT_MVT_f32_r(RetVT, Op0);
- case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_f64_r(RetVT, Op0);
- case MVT::v8i8: return FastEmit_ISD_BIT_CONVERT_MVT_v8i8_r(RetVT, Op0);
- case MVT::v4i16: return FastEmit_ISD_BIT_CONVERT_MVT_v4i16_r(RetVT, Op0);
- case MVT::v2i32: return FastEmit_ISD_BIT_CONVERT_MVT_v2i32_r(RetVT, Op0);
- case MVT::v1i64: return FastEmit_ISD_BIT_CONVERT_MVT_v1i64_r(RetVT, Op0);
- case MVT::v2f32: return FastEmit_ISD_BIT_CONVERT_MVT_v2f32_r(RetVT, Op0);
+ case MVT::i32: return FastEmit_ISD_BIT_CONVERT_MVT_i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_BIT_CONVERT_MVT_i64_r(RetVT, Op0, Op0IsKill);
+ case MVT::f32: return FastEmit_ISD_BIT_CONVERT_MVT_f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_BIT_CONVERT_MVT_f64_r(RetVT, Op0, Op0IsKill);
+ case MVT::v8i8: return FastEmit_ISD_BIT_CONVERT_MVT_v8i8_r(RetVT, Op0, Op0IsKill);
+ case MVT::v4i16: return FastEmit_ISD_BIT_CONVERT_MVT_v4i16_r(RetVT, Op0, Op0IsKill);
+ case MVT::v2i32: return FastEmit_ISD_BIT_CONVERT_MVT_v2i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v1i64: return FastEmit_ISD_BIT_CONVERT_MVT_v1i64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::BRIND.
-unsigned FastEmit_ISD_BRIND_MVT_i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BRIND_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::isVoid)
return 0;
- return FastEmitInst_r(X86::JMP32r, X86::GR32RegisterClass, Op0);
+ if ((!Subtarget->is64Bit())) {
+ return FastEmitInst_r(X86::JMP32r, X86::GR32RegisterClass, Op0, Op0IsKill);
+ }
+ return 0;
}
-unsigned FastEmit_ISD_BRIND_MVT_i64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BRIND_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::isVoid)
return 0;
- return FastEmitInst_r(X86::JMP64r, X86::GR64RegisterClass, Op0);
+ if ((Subtarget->is64Bit())) {
+ return FastEmitInst_r(X86::JMP64r, X86::GR64RegisterClass, Op0, Op0IsKill);
+ }
+ return 0;
}
-unsigned FastEmit_ISD_BRIND_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BRIND_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_BRIND_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_ISD_BRIND_MVT_i64_r(RetVT, Op0);
+ case MVT::i32: return FastEmit_ISD_BRIND_MVT_i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_BRIND_MVT_i64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::BSWAP.
-unsigned FastEmit_ISD_BSWAP_MVT_i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BSWAP_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_r(X86::BSWAP32r, X86::GR32RegisterClass, Op0);
+ return FastEmitInst_r(X86::BSWAP32r, X86::GR32RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BSWAP_MVT_i64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BSWAP_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_r(X86::BSWAP64r, X86::GR64RegisterClass, Op0);
+ return FastEmitInst_r(X86::BSWAP64r, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_BSWAP_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_BSWAP_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_BSWAP_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_ISD_BSWAP_MVT_i64_r(RetVT, Op0);
+ case MVT::i32: return FastEmit_ISD_BSWAP_MVT_i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_BSWAP_MVT_i64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FABS.
-unsigned FastEmit_ISD_FABS_MVT_f32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FABS_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::ABS_Fp32, X86::RFP32RegisterClass, Op0);
+ return FastEmitInst_r(X86::ABS_Fp32, X86::RFP32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FABS_MVT_f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FABS_MVT_f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::ABS_Fp64, X86::RFP64RegisterClass, Op0);
+ return FastEmitInst_r(X86::ABS_Fp64, X86::RFP64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FABS_MVT_f80_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FABS_MVT_f80_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f80)
return 0;
- return FastEmitInst_r(X86::ABS_Fp80, X86::RFP80RegisterClass, Op0);
+ return FastEmitInst_r(X86::ABS_Fp80, X86::RFP80RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_FABS_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FABS_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FABS_MVT_f32_r(RetVT, Op0);
- case MVT::f64: return FastEmit_ISD_FABS_MVT_f64_r(RetVT, Op0);
- case MVT::f80: return FastEmit_ISD_FABS_MVT_f80_r(RetVT, Op0);
+ case MVT::f32: return FastEmit_ISD_FABS_MVT_f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_FABS_MVT_f64_r(RetVT, Op0, Op0IsKill);
+ case MVT::f80: return FastEmit_ISD_FABS_MVT_f80_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FCOS.
-unsigned FastEmit_ISD_FCOS_MVT_f32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FCOS_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::COS_Fp32, X86::RFP32RegisterClass, Op0);
+ return FastEmitInst_r(X86::COS_Fp32, X86::RFP32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FCOS_MVT_f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FCOS_MVT_f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::COS_Fp64, X86::RFP64RegisterClass, Op0);
+ return FastEmitInst_r(X86::COS_Fp64, X86::RFP64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FCOS_MVT_f80_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FCOS_MVT_f80_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f80)
return 0;
- return FastEmitInst_r(X86::COS_Fp80, X86::RFP80RegisterClass, Op0);
+ return FastEmitInst_r(X86::COS_Fp80, X86::RFP80RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_FCOS_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FCOS_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FCOS_MVT_f32_r(RetVT, Op0);
- case MVT::f64: return FastEmit_ISD_FCOS_MVT_f64_r(RetVT, Op0);
- case MVT::f80: return FastEmit_ISD_FCOS_MVT_f80_r(RetVT, Op0);
+ case MVT::f32: return FastEmit_ISD_FCOS_MVT_f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_FCOS_MVT_f64_r(RetVT, Op0, Op0IsKill);
+ case MVT::f80: return FastEmit_ISD_FCOS_MVT_f80_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FNEG.
-unsigned FastEmit_ISD_FNEG_MVT_f32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FNEG_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::CHS_Fp32, X86::RFP32RegisterClass, Op0);
+ return FastEmitInst_r(X86::CHS_Fp32, X86::RFP32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FNEG_MVT_f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FNEG_MVT_f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::CHS_Fp64, X86::RFP64RegisterClass, Op0);
+ return FastEmitInst_r(X86::CHS_Fp64, X86::RFP64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FNEG_MVT_f80_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FNEG_MVT_f80_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f80)
return 0;
- return FastEmitInst_r(X86::CHS_Fp80, X86::RFP80RegisterClass, Op0);
+ return FastEmitInst_r(X86::CHS_Fp80, X86::RFP80RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_FNEG_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FNEG_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FNEG_MVT_f32_r(RetVT, Op0);
- case MVT::f64: return FastEmit_ISD_FNEG_MVT_f64_r(RetVT, Op0);
- case MVT::f80: return FastEmit_ISD_FNEG_MVT_f80_r(RetVT, Op0);
+ case MVT::f32: return FastEmit_ISD_FNEG_MVT_f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_FNEG_MVT_f64_r(RetVT, Op0, Op0IsKill);
+ case MVT::f80: return FastEmit_ISD_FNEG_MVT_f80_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FP_EXTEND.
-unsigned FastEmit_ISD_FP_EXTEND_MVT_f32_MVT_f64_r(unsigned Op0) {
- if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::MOV_Fp3264, X86::RFP64RegisterClass, Op0);
- }
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::CVTSS2SDrr, X86::FR64RegisterClass, Op0);
- }
- return 0;
-}
-
-unsigned FastEmit_ISD_FP_EXTEND_MVT_f32_MVT_f80_r(unsigned Op0) {
- if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::MOV_Fp3280, X86::RFP80RegisterClass, Op0);
- }
- return 0;
-}
-
-unsigned FastEmit_ISD_FP_EXTEND_MVT_f32_r(MVT RetVT, unsigned Op0) {
-switch (RetVT.SimpleTy) {
- case MVT::f64: return FastEmit_ISD_FP_EXTEND_MVT_f32_MVT_f64_r(Op0);
- case MVT::f80: return FastEmit_ISD_FP_EXTEND_MVT_f32_MVT_f80_r(Op0);
- default: return 0;
-}
-}
-
-unsigned FastEmit_ISD_FP_EXTEND_MVT_f64_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::f80)
+unsigned FastEmit_ISD_FP_EXTEND_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::f64)
return 0;
- if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOV_Fp6480, X86::RFP80RegisterClass, Op0);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTSS2SDrr, X86::FR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FP_EXTEND_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FP_EXTEND_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FP_EXTEND_MVT_f32_r(RetVT, Op0);
- case MVT::f64: return FastEmit_ISD_FP_EXTEND_MVT_f64_r(RetVT, Op0);
+ case MVT::f32: return FastEmit_ISD_FP_EXTEND_MVT_f32_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FP_ROUND.
-unsigned FastEmit_ISD_FP_ROUND_MVT_f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FP_ROUND_MVT_f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
- if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::MOV_Fp6432, X86::RFP32RegisterClass, Op0);
- }
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::CVTSD2SSrr, X86::FR32RegisterClass, Op0);
- }
- return 0;
-}
-
-unsigned FastEmit_ISD_FP_ROUND_MVT_f80_MVT_f32_r(unsigned Op0) {
- if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::MOV_Fp8032, X86::RFP32RegisterClass, Op0);
- }
- return 0;
-}
-
-unsigned FastEmit_ISD_FP_ROUND_MVT_f80_MVT_f64_r(unsigned Op0) {
- if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOV_Fp8064, X86::RFP64RegisterClass, Op0);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTSD2SSrr, X86::FR32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FP_ROUND_MVT_f80_r(MVT RetVT, unsigned Op0) {
-switch (RetVT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FP_ROUND_MVT_f80_MVT_f32_r(Op0);
- case MVT::f64: return FastEmit_ISD_FP_ROUND_MVT_f80_MVT_f64_r(Op0);
- default: return 0;
-}
-}
-
-unsigned FastEmit_ISD_FP_ROUND_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FP_ROUND_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f64: return FastEmit_ISD_FP_ROUND_MVT_f64_r(RetVT, Op0);
- case MVT::f80: return FastEmit_ISD_FP_ROUND_MVT_f80_r(RetVT, Op0);
+ case MVT::f64: return FastEmit_ISD_FP_ROUND_MVT_f64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FP_TO_SINT.
-unsigned FastEmit_ISD_FP_TO_SINT_MVT_f32_MVT_i32_r(unsigned Op0) {
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::CVTTSS2SIrr, X86::GR32RegisterClass, Op0);
+unsigned FastEmit_ISD_FP_TO_SINT_MVT_f32_MVT_i32_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VCVTTSS2SIrr, X86::GR32RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTTSS2SIrr, X86::GR32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FP_TO_SINT_MVT_f32_MVT_i64_r(unsigned Op0) {
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::CVTTSS2SI64rr, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_FP_TO_SINT_MVT_f32_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VCVTTSS2SI64rr, X86::GR64RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTTSS2SI64rr, X86::GR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FP_TO_SINT_MVT_f32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FP_TO_SINT_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_FP_TO_SINT_MVT_f32_MVT_i32_r(Op0);
- case MVT::i64: return FastEmit_ISD_FP_TO_SINT_MVT_f32_MVT_i64_r(Op0);
+ case MVT::i32: return FastEmit_ISD_FP_TO_SINT_MVT_f32_MVT_i32_r(Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_FP_TO_SINT_MVT_f32_MVT_i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_FP_TO_SINT_MVT_f64_MVT_i32_r(unsigned Op0) {
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::CVTTSD2SIrr, X86::GR32RegisterClass, Op0);
+unsigned FastEmit_ISD_FP_TO_SINT_MVT_f64_MVT_i32_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VCVTTSD2SIrr, X86::GR32RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTTSD2SIrr, X86::GR32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FP_TO_SINT_MVT_f64_MVT_i64_r(unsigned Op0) {
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::CVTTSD2SI64rr, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_FP_TO_SINT_MVT_f64_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VCVTTSD2SI64rr, X86::GR64RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTTSD2SI64rr, X86::GR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FP_TO_SINT_MVT_f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FP_TO_SINT_MVT_f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_FP_TO_SINT_MVT_f64_MVT_i32_r(Op0);
- case MVT::i64: return FastEmit_ISD_FP_TO_SINT_MVT_f64_MVT_i64_r(Op0);
+ case MVT::i32: return FastEmit_ISD_FP_TO_SINT_MVT_f64_MVT_i32_r(Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_FP_TO_SINT_MVT_f64_MVT_i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_FP_TO_SINT_MVT_v4f32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FP_TO_SINT_MVT_v4f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::Int_CVTTPS2DQrr, X86::VR128RegisterClass, Op0);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::Int_CVTTPS2DQrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FP_TO_SINT_MVT_v2f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FP_TO_SINT_MVT_v2f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::v2i32)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::Int_CVTTPD2PIrr, X86::VR64RegisterClass, Op0);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::Int_CVTTPD2PIrr, X86::VR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FP_TO_SINT_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FP_TO_SINT_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FP_TO_SINT_MVT_f32_r(RetVT, Op0);
- case MVT::f64: return FastEmit_ISD_FP_TO_SINT_MVT_f64_r(RetVT, Op0);
- case MVT::v4f32: return FastEmit_ISD_FP_TO_SINT_MVT_v4f32_r(RetVT, Op0);
- case MVT::v2f64: return FastEmit_ISD_FP_TO_SINT_MVT_v2f64_r(RetVT, Op0);
+ case MVT::f32: return FastEmit_ISD_FP_TO_SINT_MVT_f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_FP_TO_SINT_MVT_f64_r(RetVT, Op0, Op0IsKill);
+ case MVT::v4f32: return FastEmit_ISD_FP_TO_SINT_MVT_v4f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v2f64: return FastEmit_ISD_FP_TO_SINT_MVT_v2f64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FSIN.
-unsigned FastEmit_ISD_FSIN_MVT_f32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSIN_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::SIN_Fp32, X86::RFP32RegisterClass, Op0);
+ return FastEmitInst_r(X86::SIN_Fp32, X86::RFP32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSIN_MVT_f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSIN_MVT_f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::SIN_Fp64, X86::RFP64RegisterClass, Op0);
+ return FastEmitInst_r(X86::SIN_Fp64, X86::RFP64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSIN_MVT_f80_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSIN_MVT_f80_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f80)
return 0;
- return FastEmitInst_r(X86::SIN_Fp80, X86::RFP80RegisterClass, Op0);
+ return FastEmitInst_r(X86::SIN_Fp80, X86::RFP80RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_FSIN_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSIN_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FSIN_MVT_f32_r(RetVT, Op0);
- case MVT::f64: return FastEmit_ISD_FSIN_MVT_f64_r(RetVT, Op0);
- case MVT::f80: return FastEmit_ISD_FSIN_MVT_f80_r(RetVT, Op0);
+ case MVT::f32: return FastEmit_ISD_FSIN_MVT_f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_FSIN_MVT_f64_r(RetVT, Op0, Op0IsKill);
+ case MVT::f80: return FastEmit_ISD_FSIN_MVT_f80_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FSQRT.
-unsigned FastEmit_ISD_FSQRT_MVT_f32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSQRT_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::SQRT_Fp32, X86::RFP32RegisterClass, Op0);
+ return FastEmitInst_r(X86::SQRT_Fp32, X86::RFP32RegisterClass, Op0, Op0IsKill);
}
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::SQRTSSr, X86::FR32RegisterClass, Op0);
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::SQRTSSr, X86::FR32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSQRT_MVT_f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSQRT_MVT_f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::SQRT_Fp64, X86::RFP64RegisterClass, Op0);
+ return FastEmitInst_r(X86::SQRT_Fp64, X86::RFP64RegisterClass, Op0, Op0IsKill);
}
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::SQRTSDr, X86::FR64RegisterClass, Op0);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::SQRTSDr, X86::FR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSQRT_MVT_f80_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSQRT_MVT_f80_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::f80)
return 0;
- return FastEmitInst_r(X86::SQRT_Fp80, X86::RFP80RegisterClass, Op0);
+ return FastEmitInst_r(X86::SQRT_Fp80, X86::RFP80RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_FSQRT_MVT_v4f32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSQRT_MVT_v4f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::SQRTPSr, X86::VR128RegisterClass, Op0);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VSQRTPSr, X86::VR128RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::SQRTPSr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSQRT_MVT_v2f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSQRT_MVT_v8f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v8f32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VSQRTPSYr, X86::VR256RegisterClass, Op0, Op0IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FSQRT_MVT_v2f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::SQRTPDr, X86::VR128RegisterClass, Op0);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VSQRTPDr, X86::VR128RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::SQRTPDr, X86::VR128RegisterClass, Op0, Op0IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FSQRT_MVT_v4f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f64)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VSQRTPDYr, X86::VR256RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSQRT_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_FSQRT_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FSQRT_MVT_f32_r(RetVT, Op0);
- case MVT::f64: return FastEmit_ISD_FSQRT_MVT_f64_r(RetVT, Op0);
- case MVT::f80: return FastEmit_ISD_FSQRT_MVT_f80_r(RetVT, Op0);
- case MVT::v4f32: return FastEmit_ISD_FSQRT_MVT_v4f32_r(RetVT, Op0);
- case MVT::v2f64: return FastEmit_ISD_FSQRT_MVT_v2f64_r(RetVT, Op0);
+ case MVT::f32: return FastEmit_ISD_FSQRT_MVT_f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_FSQRT_MVT_f64_r(RetVT, Op0, Op0IsKill);
+ case MVT::f80: return FastEmit_ISD_FSQRT_MVT_f80_r(RetVT, Op0, Op0IsKill);
+ case MVT::v4f32: return FastEmit_ISD_FSQRT_MVT_v4f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v8f32: return FastEmit_ISD_FSQRT_MVT_v8f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v2f64: return FastEmit_ISD_FSQRT_MVT_v2f64_r(RetVT, Op0, Op0IsKill);
+ case MVT::v4f64: return FastEmit_ISD_FSQRT_MVT_v4f64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::SCALAR_TO_VECTOR.
-unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_MVT_v2i32_r(unsigned Op0) {
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_r(X86::MMX_MOVD64rr, X86::VR64RegisterClass, Op0);
+unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_MVT_v2i32_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MMX_MOVD64rr, X86::VR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_MVT_v4i32_r(unsigned Op0) {
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOVDI2PDIrr, X86::VR128RegisterClass, Op0);
+unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_MVT_v4i32_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VMOVDI2PDIrr, X86::VR128RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MOVDI2PDIrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::v2i32: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_MVT_v2i32_r(Op0);
- case MVT::v4i32: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_MVT_v4i32_r(Op0);
+ case MVT::v2i32: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_MVT_v2i32_r(Op0, Op0IsKill);
+ case MVT::v4i32: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_MVT_v4i32_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_MVT_v1i64_r(unsigned Op0) {
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_r(X86::MMX_MOVD64rrv164, X86::VR64RegisterClass, Op0);
+unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_MVT_v1i64_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MMX_MOVD64rrv164, X86::VR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_MVT_v2i64_r(unsigned Op0) {
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOV64toPQIrr, X86::VR128RegisterClass, Op0);
+unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_MVT_v2i64_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MOV64toPQIrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::v1i64: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_MVT_v1i64_r(Op0);
- case MVT::v2i64: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_MVT_v2i64_r(Op0);
+ case MVT::v1i64: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_MVT_v1i64_r(Op0, Op0IsKill);
+ case MVT::v2i64: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_MVT_v2i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_SCALAR_TO_VECTOR_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SCALAR_TO_VECTOR_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_r(RetVT, Op0);
+ case MVT::i32: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_SCALAR_TO_VECTOR_MVT_i64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::SIGN_EXTEND.
-unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i16_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVSX16rr8, X86::GR16RegisterClass, Op0);
+unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i16_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVSX16rr8, X86::GR16RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i32_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVSX32rr8, X86::GR32RegisterClass, Op0);
+unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i32_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVSX32rr8, X86::GR32RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVSX64rr8, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVSX64rr8, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i8_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i8_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i16: return FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i16_r(Op0);
- case MVT::i32: return FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i32_r(Op0);
- case MVT::i64: return FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i64_r(Op0);
+ case MVT::i16: return FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i16_r(Op0, Op0IsKill);
+ case MVT::i32: return FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i32_r(Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_SIGN_EXTEND_MVT_i8_MVT_i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i16_MVT_i32_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVSX32rr16, X86::GR32RegisterClass, Op0);
+unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i16_MVT_i32_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVSX32rr16, X86::GR32RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i16_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVSX64rr16, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i16_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVSX64rr16, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i16_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i16_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_SIGN_EXTEND_MVT_i16_MVT_i32_r(Op0);
- case MVT::i64: return FastEmit_ISD_SIGN_EXTEND_MVT_i16_MVT_i64_r(Op0);
+ case MVT::i32: return FastEmit_ISD_SIGN_EXTEND_MVT_i16_MVT_i32_r(Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_SIGN_EXTEND_MVT_i16_MVT_i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SIGN_EXTEND_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_r(X86::MOVSX64rr32, X86::GR64RegisterClass, Op0);
+ return FastEmitInst_r(X86::MOVSX64rr32, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_SIGN_EXTEND_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SIGN_EXTEND_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_SIGN_EXTEND_MVT_i8_r(RetVT, Op0);
- case MVT::i16: return FastEmit_ISD_SIGN_EXTEND_MVT_i16_r(RetVT, Op0);
- case MVT::i32: return FastEmit_ISD_SIGN_EXTEND_MVT_i32_r(RetVT, Op0);
+ case MVT::i8: return FastEmit_ISD_SIGN_EXTEND_MVT_i8_r(RetVT, Op0, Op0IsKill);
+ case MVT::i16: return FastEmit_ISD_SIGN_EXTEND_MVT_i16_r(RetVT, Op0, Op0IsKill);
+ case MVT::i32: return FastEmit_ISD_SIGN_EXTEND_MVT_i32_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::SINT_TO_FP.
-unsigned FastEmit_ISD_SINT_TO_FP_MVT_i32_MVT_f32_r(unsigned Op0) {
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::CVTSI2SSrr, X86::FR32RegisterClass, Op0);
+unsigned FastEmit_ISD_SINT_TO_FP_MVT_i32_MVT_f32_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTSI2SSrr, X86::FR32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SINT_TO_FP_MVT_i32_MVT_f64_r(unsigned Op0) {
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::CVTSI2SDrr, X86::FR64RegisterClass, Op0);
+unsigned FastEmit_ISD_SINT_TO_FP_MVT_i32_MVT_f64_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTSI2SDrr, X86::FR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SINT_TO_FP_MVT_i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SINT_TO_FP_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_SINT_TO_FP_MVT_i32_MVT_f32_r(Op0);
- case MVT::f64: return FastEmit_ISD_SINT_TO_FP_MVT_i32_MVT_f64_r(Op0);
+ case MVT::f32: return FastEmit_ISD_SINT_TO_FP_MVT_i32_MVT_f32_r(Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_SINT_TO_FP_MVT_i32_MVT_f64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_SINT_TO_FP_MVT_i64_MVT_f32_r(unsigned Op0) {
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::CVTSI2SS64rr, X86::FR32RegisterClass, Op0);
+unsigned FastEmit_ISD_SINT_TO_FP_MVT_i64_MVT_f32_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTSI2SS64rr, X86::FR32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SINT_TO_FP_MVT_i64_MVT_f64_r(unsigned Op0) {
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::CVTSI2SD64rr, X86::FR64RegisterClass, Op0);
+unsigned FastEmit_ISD_SINT_TO_FP_MVT_i64_MVT_f64_r(unsigned Op0, bool Op0IsKill) {
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::CVTSI2SD64rr, X86::FR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SINT_TO_FP_MVT_i64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SINT_TO_FP_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_SINT_TO_FP_MVT_i64_MVT_f32_r(Op0);
- case MVT::f64: return FastEmit_ISD_SINT_TO_FP_MVT_i64_MVT_f64_r(Op0);
+ case MVT::f32: return FastEmit_ISD_SINT_TO_FP_MVT_i64_MVT_f32_r(Op0, Op0IsKill);
+ case MVT::f64: return FastEmit_ISD_SINT_TO_FP_MVT_i64_MVT_f64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_SINT_TO_FP_MVT_v2i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SINT_TO_FP_MVT_v2i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::Int_CVTPI2PDrr, X86::VR128RegisterClass, Op0);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::Int_CVTPI2PDrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SINT_TO_FP_MVT_v4i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SINT_TO_FP_MVT_v4i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::Int_CVTDQ2PSrr, X86::VR128RegisterClass, Op0);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::Int_CVTDQ2PSrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SINT_TO_FP_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_SINT_TO_FP_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_SINT_TO_FP_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_ISD_SINT_TO_FP_MVT_i64_r(RetVT, Op0);
- case MVT::v2i32: return FastEmit_ISD_SINT_TO_FP_MVT_v2i32_r(RetVT, Op0);
- case MVT::v4i32: return FastEmit_ISD_SINT_TO_FP_MVT_v4i32_r(RetVT, Op0);
+ case MVT::i32: return FastEmit_ISD_SINT_TO_FP_MVT_i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_SINT_TO_FP_MVT_i64_r(RetVT, Op0, Op0IsKill);
+ case MVT::v2i32: return FastEmit_ISD_SINT_TO_FP_MVT_v2i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v4i32: return FastEmit_ISD_SINT_TO_FP_MVT_v4i32_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::TRUNCATE.
-unsigned FastEmit_ISD_TRUNCATE_MVT_i16_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_TRUNCATE_MVT_i16_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
if ((Subtarget->is64Bit())) {
- return FastEmitInst_extractsubreg(RetVT, Op0, 1);
+ return FastEmitInst_extractsubreg(RetVT, Op0, Op0IsKill, X86::sub_8bit);
}
return 0;
}
-unsigned FastEmit_ISD_TRUNCATE_MVT_i32_MVT_i8_r(unsigned Op0) {
+unsigned FastEmit_ISD_TRUNCATE_MVT_i32_MVT_i8_r(unsigned Op0, bool Op0IsKill) {
if ((Subtarget->is64Bit())) {
- return FastEmitInst_extractsubreg(MVT::i8, Op0, 1);
+ return FastEmitInst_extractsubreg(MVT::i8, Op0, Op0IsKill, X86::sub_8bit);
}
return 0;
}
-unsigned FastEmit_ISD_TRUNCATE_MVT_i32_MVT_i16_r(unsigned Op0) {
- return FastEmitInst_extractsubreg(MVT::i16, Op0, 3);
+unsigned FastEmit_ISD_TRUNCATE_MVT_i32_MVT_i16_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_extractsubreg(MVT::i16, Op0, Op0IsKill, X86::sub_16bit);
}
-unsigned FastEmit_ISD_TRUNCATE_MVT_i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_TRUNCATE_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_TRUNCATE_MVT_i32_MVT_i8_r(Op0);
- case MVT::i16: return FastEmit_ISD_TRUNCATE_MVT_i32_MVT_i16_r(Op0);
+ case MVT::i8: return FastEmit_ISD_TRUNCATE_MVT_i32_MVT_i8_r(Op0, Op0IsKill);
+ case MVT::i16: return FastEmit_ISD_TRUNCATE_MVT_i32_MVT_i16_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i8_r(unsigned Op0) {
- return FastEmitInst_extractsubreg(MVT::i8, Op0, 1);
+unsigned FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i8_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_extractsubreg(MVT::i8, Op0, Op0IsKill, X86::sub_8bit);
}
-unsigned FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i16_r(unsigned Op0) {
- return FastEmitInst_extractsubreg(MVT::i16, Op0, 3);
+unsigned FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i16_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_extractsubreg(MVT::i16, Op0, Op0IsKill, X86::sub_16bit);
}
-unsigned FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i32_r(unsigned Op0) {
- return FastEmitInst_extractsubreg(MVT::i32, Op0, 4);
+unsigned FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i32_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_extractsubreg(MVT::i32, Op0, Op0IsKill, X86::sub_32bit);
}
-unsigned FastEmit_ISD_TRUNCATE_MVT_i64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_TRUNCATE_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i8_r(Op0);
- case MVT::i16: return FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i16_r(Op0);
- case MVT::i32: return FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i32_r(Op0);
+ case MVT::i8: return FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i8_r(Op0, Op0IsKill);
+ case MVT::i16: return FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i16_r(Op0, Op0IsKill);
+ case MVT::i32: return FastEmit_ISD_TRUNCATE_MVT_i64_MVT_i32_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_TRUNCATE_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_TRUNCATE_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i16: return FastEmit_ISD_TRUNCATE_MVT_i16_r(RetVT, Op0);
- case MVT::i32: return FastEmit_ISD_TRUNCATE_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_ISD_TRUNCATE_MVT_i64_r(RetVT, Op0);
+ case MVT::i16: return FastEmit_ISD_TRUNCATE_MVT_i16_r(RetVT, Op0, Op0IsKill);
+ case MVT::i32: return FastEmit_ISD_TRUNCATE_MVT_i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_TRUNCATE_MVT_i64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::ZERO_EXTEND.
-unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i16_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX16rr8, X86::GR16RegisterClass, Op0);
+unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i16_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVZX16rr8, X86::GR16RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i32_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX32rr8, X86::GR32RegisterClass, Op0);
+unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i32_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVZX32rr8, X86::GR32RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX64rr8, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVZX64rr8, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i8_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i8_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i16: return FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i16_r(Op0);
- case MVT::i32: return FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i32_r(Op0);
- case MVT::i64: return FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i64_r(Op0);
+ case MVT::i16: return FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i16_r(Op0, Op0IsKill);
+ case MVT::i32: return FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i32_r(Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_ZERO_EXTEND_MVT_i8_MVT_i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i16_MVT_i32_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX32rr16, X86::GR32RegisterClass, Op0);
+unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i16_MVT_i32_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVZX32rr16, X86::GR32RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i16_MVT_i64_r(unsigned Op0) {
- return FastEmitInst_r(X86::MOVZX64rr16, X86::GR64RegisterClass, Op0);
+unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i16_MVT_i64_r(unsigned Op0, bool Op0IsKill) {
+ return FastEmitInst_r(X86::MOVZX64rr16, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i16_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i16_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (RetVT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_ZERO_EXTEND_MVT_i16_MVT_i32_r(Op0);
- case MVT::i64: return FastEmit_ISD_ZERO_EXTEND_MVT_i16_MVT_i64_r(Op0);
+ case MVT::i32: return FastEmit_ISD_ZERO_EXTEND_MVT_i16_MVT_i32_r(Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_ISD_ZERO_EXTEND_MVT_i16_MVT_i64_r(Op0, Op0IsKill);
default: return 0;
}
}
-unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_ZERO_EXTEND_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_r(X86::MOVZX64rr32, X86::GR64RegisterClass, Op0);
+ return FastEmitInst_r(X86::MOVZX64rr32, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_ISD_ZERO_EXTEND_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_ISD_ZERO_EXTEND_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_ZERO_EXTEND_MVT_i8_r(RetVT, Op0);
- case MVT::i16: return FastEmit_ISD_ZERO_EXTEND_MVT_i16_r(RetVT, Op0);
- case MVT::i32: return FastEmit_ISD_ZERO_EXTEND_MVT_i32_r(RetVT, Op0);
+ case MVT::i8: return FastEmit_ISD_ZERO_EXTEND_MVT_i8_r(RetVT, Op0, Op0IsKill);
+ case MVT::i16: return FastEmit_ISD_ZERO_EXTEND_MVT_i16_r(RetVT, Op0, Op0IsKill);
+ case MVT::i32: return FastEmit_ISD_ZERO_EXTEND_MVT_i32_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::BSF.
-
-unsigned FastEmit_X86ISD_BSF_MVT_i16_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_r(X86::BSF16rr, X86::GR16RegisterClass, Op0);
-}
+// FastEmit functions for X86ISD::CALL.
-unsigned FastEmit_X86ISD_BSF_MVT_i32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i32)
+unsigned FastEmit_X86ISD_CALL_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::isVoid)
return 0;
- return FastEmitInst_r(X86::BSF32rr, X86::GR32RegisterClass, Op0);
+ return FastEmitInst_r(X86::CALL32r, X86::GR32RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_X86ISD_BSF_MVT_i64_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i64)
+unsigned FastEmit_X86ISD_CALL_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::isVoid)
return 0;
- return FastEmitInst_r(X86::BSF64rr, X86::GR64RegisterClass, Op0);
-}
-
-unsigned FastEmit_X86ISD_BSF_r(MVT VT, MVT RetVT, unsigned Op0) {
- switch (VT.SimpleTy) {
- case MVT::i16: return FastEmit_X86ISD_BSF_MVT_i16_r(RetVT, Op0);
- case MVT::i32: return FastEmit_X86ISD_BSF_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_X86ISD_BSF_MVT_i64_r(RetVT, Op0);
- default: return 0;
+ if ((!Subtarget->isTargetWin64())) {
+ return FastEmitInst_r(X86::CALL64r, X86::GR64RegisterClass, Op0, Op0IsKill);
}
+ if ((Subtarget->isTargetWin64())) {
+ return FastEmitInst_r(X86::WINCALL64r, X86::GR64RegisterClass, Op0, Op0IsKill);
+ }
+ return 0;
}
-// FastEmit functions for X86ISD::BSR.
-
-unsigned FastEmit_X86ISD_BSR_MVT_i16_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_r(X86::BSR16rr, X86::GR16RegisterClass, Op0);
-}
-
-unsigned FastEmit_X86ISD_BSR_MVT_i32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i32)
- return 0;
- return FastEmitInst_r(X86::BSR32rr, X86::GR32RegisterClass, Op0);
-}
-
-unsigned FastEmit_X86ISD_BSR_MVT_i64_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i64)
- return 0;
- return FastEmitInst_r(X86::BSR64rr, X86::GR64RegisterClass, Op0);
-}
-
-unsigned FastEmit_X86ISD_BSR_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_CALL_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i16: return FastEmit_X86ISD_BSR_MVT_i16_r(RetVT, Op0);
- case MVT::i32: return FastEmit_X86ISD_BSR_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_X86ISD_BSR_MVT_i64_r(RetVT, Op0);
+ case MVT::i32: return FastEmit_X86ISD_CALL_MVT_i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_X86ISD_CALL_MVT_i64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::CALL.
+// FastEmit functions for X86ISD::EH_RETURN.
-unsigned FastEmit_X86ISD_CALL_MVT_i32_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_EH_RETURN_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::isVoid)
return 0;
- return FastEmitInst_r(X86::CALL32r, X86::GR32RegisterClass, Op0);
+ return FastEmitInst_r(X86::EH_RETURN, X86::GR32RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_X86ISD_CALL_MVT_i64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_EH_RETURN_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::isVoid)
return 0;
- if ((!Subtarget->isTargetWin64())) {
- return FastEmitInst_r(X86::CALL64r, X86::GR64RegisterClass, Op0);
- }
- if ((Subtarget->isTargetWin64())) {
- return FastEmitInst_r(X86::WINCALL64r, X86::GR64RegisterClass, Op0);
- }
- return 0;
+ return FastEmitInst_r(X86::EH_RETURN64, X86::GR64RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_X86ISD_CALL_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_EH_RETURN_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_X86ISD_CALL_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_X86ISD_CALL_MVT_i64_r(RetVT, Op0);
+ case MVT::i32: return FastEmit_X86ISD_EH_RETURN_MVT_i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::i64: return FastEmit_X86ISD_EH_RETURN_MVT_i64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::DEC.
-
-unsigned FastEmit_X86ISD_DEC_MVT_i8_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i8)
- return 0;
- return FastEmitInst_r(X86::DEC8r, X86::GR8RegisterClass, Op0);
-}
+// FastEmit functions for X86ISD::FRCP.
-unsigned FastEmit_X86ISD_DEC_MVT_i16_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i16)
+unsigned FastEmit_X86ISD_FRCP_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::f32)
return 0;
- if ((!Subtarget->is64Bit())) {
- return FastEmitInst_r(X86::DEC16r, X86::GR16RegisterClass, Op0);
- }
- if ((Subtarget->is64Bit())) {
- return FastEmitInst_r(X86::DEC64_16r, X86::GR16RegisterClass, Op0);
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::RCPSSr, X86::FR32RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_DEC_MVT_i32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i32)
+unsigned FastEmit_X86ISD_FRCP_MVT_v4f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((!Subtarget->is64Bit())) {
- return FastEmitInst_r(X86::DEC32r, X86::GR32RegisterClass, Op0);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VRCPPSr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
- if ((Subtarget->is64Bit())) {
- return FastEmitInst_r(X86::DEC64_32r, X86::GR32RegisterClass, Op0);
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::RCPPSr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_DEC_MVT_i64_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i64)
+unsigned FastEmit_X86ISD_FRCP_MVT_v8f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v8f32)
return 0;
- return FastEmitInst_r(X86::DEC64r, X86::GR64RegisterClass, Op0);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VRCPPSYr, X86::VR256RegisterClass, Op0, Op0IsKill);
+ }
+ return 0;
}
-unsigned FastEmit_X86ISD_DEC_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_FRCP_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_DEC_MVT_i8_r(RetVT, Op0);
- case MVT::i16: return FastEmit_X86ISD_DEC_MVT_i16_r(RetVT, Op0);
- case MVT::i32: return FastEmit_X86ISD_DEC_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_X86ISD_DEC_MVT_i64_r(RetVT, Op0);
+ case MVT::f32: return FastEmit_X86ISD_FRCP_MVT_f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v4f32: return FastEmit_X86ISD_FRCP_MVT_v4f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v8f32: return FastEmit_X86ISD_FRCP_MVT_v8f32_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::EH_RETURN.
-
-unsigned FastEmit_X86ISD_EH_RETURN_MVT_i32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::isVoid)
- return 0;
- return FastEmitInst_r(X86::EH_RETURN, X86::GR32RegisterClass, Op0);
-}
+// FastEmit functions for X86ISD::FRSQRT.
-unsigned FastEmit_X86ISD_EH_RETURN_MVT_i64_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_FRSQRT_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::f32)
return 0;
- return FastEmitInst_r(X86::EH_RETURN64, X86::GR64RegisterClass, Op0);
-}
-
-unsigned FastEmit_X86ISD_EH_RETURN_r(MVT VT, MVT RetVT, unsigned Op0) {
- switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_X86ISD_EH_RETURN_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_X86ISD_EH_RETURN_MVT_i64_r(RetVT, Op0);
- default: return 0;
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::RSQRTSSr, X86::FR32RegisterClass, Op0, Op0IsKill);
}
+ return 0;
}
-// FastEmit functions for X86ISD::FRCP.
-
-unsigned FastEmit_X86ISD_FRCP_MVT_f32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::f32)
+unsigned FastEmit_X86ISD_FRSQRT_MVT_v4f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::RCPSSr, X86::FR32RegisterClass, Op0);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VRSQRTPSr, X86::VR128RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::RSQRTPSr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FRCP_MVT_v4f32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::v4f32)
+unsigned FastEmit_X86ISD_FRSQRT_MVT_v8f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v8f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::RCPPSr, X86::VR128RegisterClass, Op0);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VRSQRTPSYr, X86::VR256RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FRCP_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_FRSQRT_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_X86ISD_FRCP_MVT_f32_r(RetVT, Op0);
- case MVT::v4f32: return FastEmit_X86ISD_FRCP_MVT_v4f32_r(RetVT, Op0);
+ case MVT::f32: return FastEmit_X86ISD_FRSQRT_MVT_f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v4f32: return FastEmit_X86ISD_FRSQRT_MVT_v4f32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v8f32: return FastEmit_X86ISD_FRSQRT_MVT_v8f32_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::FRSQRT.
+// FastEmit functions for X86ISD::MEMBARRIER.
-unsigned FastEmit_X86ISD_FRSQRT_MVT_f32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::f32)
- return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::RSQRTSSr, X86::FR32RegisterClass, Op0);
- }
- return 0;
-}
-
-unsigned FastEmit_X86ISD_FRSQRT_MVT_v4f32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::v4f32)
+unsigned FastEmit_X86ISD_MEMBARRIER_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::isVoid)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_r(X86::RSQRTPSr, X86::VR128RegisterClass, Op0);
+ if ((Subtarget->is64Bit())) {
+ return FastEmitInst_r(X86::Int_MemBarrierNoSSE64, X86::GR64RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FRSQRT_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_MEMBARRIER_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_X86ISD_FRSQRT_MVT_f32_r(RetVT, Op0);
- case MVT::v4f32: return FastEmit_X86ISD_FRSQRT_MVT_v4f32_r(RetVT, Op0);
+ case MVT::i64: return FastEmit_X86ISD_MEMBARRIER_MVT_i64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::INC.
+// FastEmit functions for X86ISD::MOVQ2DQ.
-unsigned FastEmit_X86ISD_INC_MVT_i8_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i8)
+unsigned FastEmit_X86ISD_MOVQ2DQ_MVT_v1i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- return FastEmitInst_r(X86::INC8r, X86::GR8RegisterClass, Op0);
+ return FastEmitInst_r(X86::MMX_MOVQ2DQrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_X86ISD_INC_MVT_i16_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- if ((!Subtarget->is64Bit())) {
- return FastEmitInst_r(X86::INC16r, X86::GR16RegisterClass, Op0);
- }
- if ((Subtarget->is64Bit())) {
- return FastEmitInst_r(X86::INC64_16r, X86::GR16RegisterClass, Op0);
+unsigned FastEmit_X86ISD_MOVQ2DQ_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v1i64: return FastEmit_X86ISD_MOVQ2DQ_MVT_v1i64_r(RetVT, Op0, Op0IsKill);
+ default: return 0;
}
- return 0;
}
-unsigned FastEmit_X86ISD_INC_MVT_i32_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i32)
+// FastEmit functions for X86ISD::MOVSHDUP.
+
+unsigned FastEmit_X86ISD_MOVSHDUP_MVT_v4i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- if ((!Subtarget->is64Bit())) {
- return FastEmitInst_r(X86::INC32r, X86::GR32RegisterClass, Op0);
- }
- if ((Subtarget->is64Bit())) {
- return FastEmitInst_r(X86::INC64_32r, X86::GR32RegisterClass, Op0);
- }
- return 0;
+ return FastEmitInst_r(X86::MOVSHDUPrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_X86ISD_INC_MVT_i64_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::i64)
+unsigned FastEmit_X86ISD_MOVSHDUP_MVT_v4f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- return FastEmitInst_r(X86::INC64r, X86::GR64RegisterClass, Op0);
+ return FastEmitInst_r(X86::MOVSHDUPrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_X86ISD_INC_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_MOVSHDUP_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_INC_MVT_i8_r(RetVT, Op0);
- case MVT::i16: return FastEmit_X86ISD_INC_MVT_i16_r(RetVT, Op0);
- case MVT::i32: return FastEmit_X86ISD_INC_MVT_i32_r(RetVT, Op0);
- case MVT::i64: return FastEmit_X86ISD_INC_MVT_i64_r(RetVT, Op0);
+ case MVT::v4i32: return FastEmit_X86ISD_MOVSHDUP_MVT_v4i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v4f32: return FastEmit_X86ISD_MOVSHDUP_MVT_v4f32_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::MOVQ2DQ.
+// FastEmit functions for X86ISD::MOVSLDUP.
-unsigned FastEmit_X86ISD_MOVQ2DQ_MVT_v1i64_r(MVT RetVT, unsigned Op0) {
- if (RetVT.SimpleTy != MVT::v2i64)
+unsigned FastEmit_X86ISD_MOVSLDUP_MVT_v4i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v4i32)
+ return 0;
+ return FastEmitInst_r(X86::MOVSLDUPrr, X86::VR128RegisterClass, Op0, Op0IsKill);
+}
+
+unsigned FastEmit_X86ISD_MOVSLDUP_MVT_v4f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- return FastEmitInst_r(X86::MMX_MOVQ2DQrr, X86::VR128RegisterClass, Op0);
+ return FastEmitInst_r(X86::MOVSLDUPrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
-unsigned FastEmit_X86ISD_MOVQ2DQ_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_MOVSLDUP_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::v1i64: return FastEmit_X86ISD_MOVQ2DQ_MVT_v1i64_r(RetVT, Op0);
+ case MVT::v4i32: return FastEmit_X86ISD_MOVSLDUP_MVT_v4i32_r(RetVT, Op0, Op0IsKill);
+ case MVT::v4f32: return FastEmit_X86ISD_MOVSLDUP_MVT_v4f32_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::VZEXT_MOVL.
-unsigned FastEmit_X86ISD_VZEXT_MOVL_MVT_v2i64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_VZEXT_MOVL_MVT_v2i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOVZPQILo2PQIrr, X86::VR128RegisterClass, Op0);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::VMOVZPQILo2PQIrr, X86::VR128RegisterClass, Op0, Op0IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MOVZPQILo2PQIrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_VZEXT_MOVL_MVT_v2f64_r(MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_VZEXT_MOVL_MVT_v2f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) {
if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_r(X86::MOVZPQILo2PQIrr, X86::VR128RegisterClass, Op0);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_r(X86::MOVZPQILo2PQIrr, X86::VR128RegisterClass, Op0, Op0IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_VZEXT_MOVL_r(MVT VT, MVT RetVT, unsigned Op0) {
+unsigned FastEmit_X86ISD_VZEXT_MOVL_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) {
switch (VT.SimpleTy) {
- case MVT::v2i64: return FastEmit_X86ISD_VZEXT_MOVL_MVT_v2i64_r(RetVT, Op0);
- case MVT::v2f64: return FastEmit_X86ISD_VZEXT_MOVL_MVT_v2f64_r(RetVT, Op0);
+ case MVT::v2i64: return FastEmit_X86ISD_VZEXT_MOVL_MVT_v2i64_r(RetVT, Op0, Op0IsKill);
+ case MVT::v2f64: return FastEmit_X86ISD_VZEXT_MOVL_MVT_v2f64_r(RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// Top-level FastEmit function.
-unsigned FastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0) {
+unsigned FastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill) {
switch (Opcode) {
- case ISD::ANY_EXTEND: return FastEmit_ISD_ANY_EXTEND_r(VT, RetVT, Op0);
- case ISD::BIT_CONVERT: return FastEmit_ISD_BIT_CONVERT_r(VT, RetVT, Op0);
- case ISD::BRIND: return FastEmit_ISD_BRIND_r(VT, RetVT, Op0);
- case ISD::BSWAP: return FastEmit_ISD_BSWAP_r(VT, RetVT, Op0);
- case ISD::FABS: return FastEmit_ISD_FABS_r(VT, RetVT, Op0);
- case ISD::FCOS: return FastEmit_ISD_FCOS_r(VT, RetVT, Op0);
- case ISD::FNEG: return FastEmit_ISD_FNEG_r(VT, RetVT, Op0);
- case ISD::FP_EXTEND: return FastEmit_ISD_FP_EXTEND_r(VT, RetVT, Op0);
- case ISD::FP_ROUND: return FastEmit_ISD_FP_ROUND_r(VT, RetVT, Op0);
- case ISD::FP_TO_SINT: return FastEmit_ISD_FP_TO_SINT_r(VT, RetVT, Op0);
- case ISD::FSIN: return FastEmit_ISD_FSIN_r(VT, RetVT, Op0);
- case ISD::FSQRT: return FastEmit_ISD_FSQRT_r(VT, RetVT, Op0);
- case ISD::SCALAR_TO_VECTOR: return FastEmit_ISD_SCALAR_TO_VECTOR_r(VT, RetVT, Op0);
- case ISD::SIGN_EXTEND: return FastEmit_ISD_SIGN_EXTEND_r(VT, RetVT, Op0);
- case ISD::SINT_TO_FP: return FastEmit_ISD_SINT_TO_FP_r(VT, RetVT, Op0);
- case ISD::TRUNCATE: return FastEmit_ISD_TRUNCATE_r(VT, RetVT, Op0);
- case ISD::ZERO_EXTEND: return FastEmit_ISD_ZERO_EXTEND_r(VT, RetVT, Op0);
- case X86ISD::BSF: return FastEmit_X86ISD_BSF_r(VT, RetVT, Op0);
- case X86ISD::BSR: return FastEmit_X86ISD_BSR_r(VT, RetVT, Op0);
- case X86ISD::CALL: return FastEmit_X86ISD_CALL_r(VT, RetVT, Op0);
- case X86ISD::DEC: return FastEmit_X86ISD_DEC_r(VT, RetVT, Op0);
- case X86ISD::EH_RETURN: return FastEmit_X86ISD_EH_RETURN_r(VT, RetVT, Op0);
- case X86ISD::FRCP: return FastEmit_X86ISD_FRCP_r(VT, RetVT, Op0);
- case X86ISD::FRSQRT: return FastEmit_X86ISD_FRSQRT_r(VT, RetVT, Op0);
- case X86ISD::INC: return FastEmit_X86ISD_INC_r(VT, RetVT, Op0);
- case X86ISD::MOVQ2DQ: return FastEmit_X86ISD_MOVQ2DQ_r(VT, RetVT, Op0);
- case X86ISD::VZEXT_MOVL: return FastEmit_X86ISD_VZEXT_MOVL_r(VT, RetVT, Op0);
+ case ISD::ANY_EXTEND: return FastEmit_ISD_ANY_EXTEND_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::BIT_CONVERT: return FastEmit_ISD_BIT_CONVERT_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::BRIND: return FastEmit_ISD_BRIND_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::BSWAP: return FastEmit_ISD_BSWAP_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::FABS: return FastEmit_ISD_FABS_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::FCOS: return FastEmit_ISD_FCOS_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::FNEG: return FastEmit_ISD_FNEG_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::FP_EXTEND: return FastEmit_ISD_FP_EXTEND_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::FP_ROUND: return FastEmit_ISD_FP_ROUND_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::FP_TO_SINT: return FastEmit_ISD_FP_TO_SINT_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::FSIN: return FastEmit_ISD_FSIN_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::FSQRT: return FastEmit_ISD_FSQRT_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::SCALAR_TO_VECTOR: return FastEmit_ISD_SCALAR_TO_VECTOR_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::SIGN_EXTEND: return FastEmit_ISD_SIGN_EXTEND_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::SINT_TO_FP: return FastEmit_ISD_SINT_TO_FP_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::TRUNCATE: return FastEmit_ISD_TRUNCATE_r(VT, RetVT, Op0, Op0IsKill);
+ case ISD::ZERO_EXTEND: return FastEmit_ISD_ZERO_EXTEND_r(VT, RetVT, Op0, Op0IsKill);
+ case X86ISD::CALL: return FastEmit_X86ISD_CALL_r(VT, RetVT, Op0, Op0IsKill);
+ case X86ISD::EH_RETURN: return FastEmit_X86ISD_EH_RETURN_r(VT, RetVT, Op0, Op0IsKill);
+ case X86ISD::FRCP: return FastEmit_X86ISD_FRCP_r(VT, RetVT, Op0, Op0IsKill);
+ case X86ISD::FRSQRT: return FastEmit_X86ISD_FRSQRT_r(VT, RetVT, Op0, Op0IsKill);
+ case X86ISD::MEMBARRIER: return FastEmit_X86ISD_MEMBARRIER_r(VT, RetVT, Op0, Op0IsKill);
+ case X86ISD::MOVQ2DQ: return FastEmit_X86ISD_MOVQ2DQ_r(VT, RetVT, Op0, Op0IsKill);
+ case X86ISD::MOVSHDUP: return FastEmit_X86ISD_MOVSHDUP_r(VT, RetVT, Op0, Op0IsKill);
+ case X86ISD::MOVSLDUP: return FastEmit_X86ISD_MOVSLDUP_r(VT, RetVT, Op0, Op0IsKill);
+ case X86ISD::VZEXT_MOVL: return FastEmit_X86ISD_VZEXT_MOVL_r(VT, RetVT, Op0, Op0IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::ADD.
-unsigned FastEmit_ISD_ADD_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADD_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::ADD8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::ADD8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_ADD_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADD_MVT_i16_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_ri(X86::ADD16ri, X86::GR16RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::ADD16ri, X86::GR16RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_ADD_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADD_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::ADD32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::ADD32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_ADD_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADD_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_ADD_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_ISD_ADD_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_ISD_ADD_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_ADD_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i16: return FastEmit_ISD_ADD_MVT_i16_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i32: return FastEmit_ISD_ADD_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::ADDC.
-unsigned FastEmit_ISD_ADDC_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADDC_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::ADD32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::ADD32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_ADDC_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADDC_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_ADDC_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i32: return FastEmit_ISD_ADDC_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::ADDE.
-unsigned FastEmit_ISD_ADDE_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADDE_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::ADC8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::ADC8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_ADDE_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADDE_MVT_i16_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_ri(X86::ADC16ri, X86::GR16RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::ADC16ri, X86::GR16RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_ADDE_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADDE_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::ADC32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::ADC32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_ADDE_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ADDE_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_ADDE_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_ISD_ADDE_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_ISD_ADDE_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_ADDE_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i16: return FastEmit_ISD_ADDE_MVT_i16_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i32: return FastEmit_ISD_ADDE_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::AND.
-unsigned FastEmit_ISD_AND_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_AND_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::AND8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::AND8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_AND_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_AND_MVT_i16_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_ri(X86::AND16ri, X86::GR16RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::AND16ri, X86::GR16RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_AND_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_AND_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::AND32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::AND32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_AND_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_AND_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_AND_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_ISD_AND_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_ISD_AND_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_AND_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i16: return FastEmit_ISD_AND_MVT_i16_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i32: return FastEmit_ISD_AND_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::MUL.
-unsigned FastEmit_ISD_MUL_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_MUL_MVT_i16_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_ri(X86::IMUL16rri, X86::GR16RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::IMUL16rri, X86::GR16RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_MUL_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_MUL_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::IMUL32rri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::IMUL32rri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_MUL_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_MUL_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i16: return FastEmit_ISD_MUL_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_ISD_MUL_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i16: return FastEmit_ISD_MUL_MVT_i16_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i32: return FastEmit_ISD_MUL_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::OR.
-unsigned FastEmit_ISD_OR_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_OR_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::OR8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::OR8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_OR_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_OR_MVT_i16_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_ri(X86::OR16ri, X86::GR16RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::OR16ri, X86::GR16RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_OR_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_OR_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::OR32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::OR32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_OR_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_OR_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_OR_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_ISD_OR_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_ISD_OR_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_OR_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i16: return FastEmit_ISD_OR_MVT_i16_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i32: return FastEmit_ISD_OR_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::ROTL.
-unsigned FastEmit_ISD_ROTL_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ROTL_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::ROL8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::ROL8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_ROTL_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ROTL_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_ROTL_MVT_i8_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_ROTL_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::ROTR.
-unsigned FastEmit_ISD_ROTR_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ROTR_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::ROR8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::ROR8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_ROTR_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_ROTR_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_ROTR_MVT_i8_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_ROTR_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::SHL.
-unsigned FastEmit_ISD_SHL_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SHL_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::SHL8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SHL8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SHL_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SHL_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_SHL_MVT_i8_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_SHL_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::SRA.
-unsigned FastEmit_ISD_SRA_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SRA_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::SAR8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SAR8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SRA_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SRA_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_SRA_MVT_i8_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_SRA_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::SRL.
-unsigned FastEmit_ISD_SRL_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SRL_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::SHR8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SHR8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SRL_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SRL_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_SRL_MVT_i8_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_SRL_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::SUB.
-unsigned FastEmit_ISD_SUB_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUB_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::SUB8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SUB8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SUB_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUB_MVT_i16_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_ri(X86::SUB16ri, X86::GR16RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SUB16ri, X86::GR16RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SUB_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUB_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::SUB32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SUB32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SUB_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUB_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_SUB_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_ISD_SUB_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_ISD_SUB_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_SUB_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i16: return FastEmit_ISD_SUB_MVT_i16_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i32: return FastEmit_ISD_SUB_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::SUBC.
-unsigned FastEmit_ISD_SUBC_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUBC_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::SUB32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SUB32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SUBC_MVT_i64_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUBC_MVT_i64_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_ri(X86::SUB64ri32, X86::GR64RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SUB64ri32, X86::GR64RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SUBC_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUBC_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_SUBC_MVT_i32_ri(RetVT, Op0, imm1);
- case MVT::i64: return FastEmit_ISD_SUBC_MVT_i64_ri(RetVT, Op0, imm1);
+ case MVT::i32: return FastEmit_ISD_SUBC_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i64: return FastEmit_ISD_SUBC_MVT_i64_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::SUBE.
-unsigned FastEmit_ISD_SUBE_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUBE_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::SBB8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SBB8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SUBE_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUBE_MVT_i16_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_ri(X86::SBB16ri, X86::GR16RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SBB16ri, X86::GR16RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SUBE_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUBE_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::SBB32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::SBB32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_SUBE_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_SUBE_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_SUBE_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_ISD_SUBE_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_ISD_SUBE_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_SUBE_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i16: return FastEmit_ISD_SUBE_MVT_i16_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i32: return FastEmit_ISD_SUBE_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::XOR.
-unsigned FastEmit_ISD_XOR_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_XOR_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_ri(X86::XOR8ri, X86::GR8RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::XOR8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_XOR_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_XOR_MVT_i16_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_ri(X86::XOR16ri, X86::GR16RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::XOR16ri, X86::GR16RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_XOR_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_XOR_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::XOR32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::XOR32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_ISD_XOR_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ISD_XOR_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_XOR_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_ISD_XOR_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_ISD_XOR_MVT_i32_ri(RetVT, Op0, imm1);
- default: return 0;
- }
-}
-
-// FastEmit functions for X86ISD::ADD.
-
-unsigned FastEmit_X86ISD_ADD_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i8)
- return 0;
- return FastEmitInst_ri(X86::ADD8ri, X86::GR8RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_ADD_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_ri(X86::ADD16ri, X86::GR16RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_ADD_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i32)
- return 0;
- return FastEmitInst_ri(X86::ADD32ri, X86::GR32RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_ADD_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
- switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_ADD_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_X86ISD_ADD_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_X86ISD_ADD_MVT_i32_ri(RetVT, Op0, imm1);
- default: return 0;
- }
-}
-
-// FastEmit functions for X86ISD::AND.
-
-unsigned FastEmit_X86ISD_AND_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i8)
- return 0;
- return FastEmitInst_ri(X86::AND8ri, X86::GR8RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_AND_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_ri(X86::AND16ri, X86::GR16RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_AND_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i32)
- return 0;
- return FastEmitInst_ri(X86::AND32ri, X86::GR32RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_AND_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
- switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_AND_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_X86ISD_AND_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_X86ISD_AND_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_ISD_XOR_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i16: return FastEmit_ISD_XOR_MVT_i16_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i32: return FastEmit_ISD_XOR_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for X86ISD::CMP.
-unsigned FastEmit_X86ISD_CMP_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::isVoid)
- return 0;
- return FastEmitInst_ri(X86::CMP8ri, X86::GR8RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_CMP_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::isVoid)
- return 0;
- return FastEmitInst_ri(X86::CMP16ri, X86::GR16RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_CMP_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::isVoid)
- return 0;
- return FastEmitInst_ri(X86::CMP32ri, X86::GR32RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_CMP_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
- switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_CMP_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_X86ISD_CMP_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_X86ISD_CMP_MVT_i32_ri(RetVT, Op0, imm1);
- default: return 0;
- }
-}
-
-// FastEmit functions for X86ISD::OR.
-
-unsigned FastEmit_X86ISD_OR_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i8)
- return 0;
- return FastEmitInst_ri(X86::OR8ri, X86::GR8RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_OR_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_ri(X86::OR16ri, X86::GR16RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_OR_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_X86ISD_CMP_MVT_i8_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::OR32ri, X86::GR32RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_OR_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
- switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_OR_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_X86ISD_OR_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_X86ISD_OR_MVT_i32_ri(RetVT, Op0, imm1);
- default: return 0;
- }
-}
-
-// FastEmit functions for X86ISD::SMUL.
-
-unsigned FastEmit_X86ISD_SMUL_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_ri(X86::IMUL16rri, X86::GR16RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::CMP8ri, X86::GR8RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_X86ISD_SMUL_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_X86ISD_CMP_MVT_i16_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::IMUL32rri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::CMP16ri, X86::GR16RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_X86ISD_SMUL_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
- switch (VT.SimpleTy) {
- case MVT::i16: return FastEmit_X86ISD_SMUL_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_X86ISD_SMUL_MVT_i32_ri(RetVT, Op0, imm1);
- default: return 0;
- }
-}
-
-// FastEmit functions for X86ISD::SUB.
-
-unsigned FastEmit_X86ISD_SUB_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i8)
- return 0;
- return FastEmitInst_ri(X86::SUB8ri, X86::GR8RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_SUB_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_ri(X86::SUB16ri, X86::GR16RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_SUB_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_X86ISD_CMP_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_ri(X86::SUB32ri, X86::GR32RegisterClass, Op0, imm1);
+ return FastEmitInst_ri(X86::CMP32ri, X86::GR32RegisterClass, Op0, Op0IsKill, imm1);
}
-unsigned FastEmit_X86ISD_SUB_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_X86ISD_CMP_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_SUB_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_X86ISD_SUB_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_X86ISD_SUB_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i8: return FastEmit_X86ISD_CMP_MVT_i8_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i16: return FastEmit_X86ISD_CMP_MVT_i16_ri(RetVT, Op0, Op0IsKill, imm1);
+ case MVT::i32: return FastEmit_X86ISD_CMP_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for X86ISD::TC_RETURN.
-unsigned FastEmit_X86ISD_TC_RETURN_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_X86ISD_TC_RETURN_MVT_i32_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
if (RetVT.SimpleTy != MVT::isVoid)
return 0;
- return FastEmitInst_ri(X86::TCRETURNri, X86::GR32RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_TC_RETURN_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
- switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_X86ISD_TC_RETURN_MVT_i32_ri(RetVT, Op0, imm1);
- default: return 0;
+ if ((!Subtarget->is64Bit())) {
+ return FastEmitInst_ri(X86::TCRETURNri, X86::GR32_TCRegisterClass, Op0, Op0IsKill, imm1);
}
+ return 0;
}
-// FastEmit functions for X86ISD::XOR.
-
-unsigned FastEmit_X86ISD_XOR_MVT_i8_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i8)
- return 0;
- return FastEmitInst_ri(X86::XOR8ri, X86::GR8RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_XOR_MVT_i16_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_ri(X86::XOR16ri, X86::GR16RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_XOR_MVT_i32_ri(MVT RetVT, unsigned Op0, uint64_t imm1) {
- if (RetVT.SimpleTy != MVT::i32)
- return 0;
- return FastEmitInst_ri(X86::XOR32ri, X86::GR32RegisterClass, Op0, imm1);
-}
-
-unsigned FastEmit_X86ISD_XOR_ri(MVT VT, MVT RetVT, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_X86ISD_TC_RETURN_ri(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_XOR_MVT_i8_ri(RetVT, Op0, imm1);
- case MVT::i16: return FastEmit_X86ISD_XOR_MVT_i16_ri(RetVT, Op0, imm1);
- case MVT::i32: return FastEmit_X86ISD_XOR_MVT_i32_ri(RetVT, Op0, imm1);
+ case MVT::i32: return FastEmit_X86ISD_TC_RETURN_MVT_i32_ri(RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// Top-level FastEmit function.
-unsigned FastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, uint64_t imm1) {
+unsigned FastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, uint64_t imm1) {
switch (Opcode) {
- case ISD::ADD: return FastEmit_ISD_ADD_ri(VT, RetVT, Op0, imm1);
- case ISD::ADDC: return FastEmit_ISD_ADDC_ri(VT, RetVT, Op0, imm1);
- case ISD::ADDE: return FastEmit_ISD_ADDE_ri(VT, RetVT, Op0, imm1);
- case ISD::AND: return FastEmit_ISD_AND_ri(VT, RetVT, Op0, imm1);
- case ISD::MUL: return FastEmit_ISD_MUL_ri(VT, RetVT, Op0, imm1);
- case ISD::OR: return FastEmit_ISD_OR_ri(VT, RetVT, Op0, imm1);
- case ISD::ROTL: return FastEmit_ISD_ROTL_ri(VT, RetVT, Op0, imm1);
- case ISD::ROTR: return FastEmit_ISD_ROTR_ri(VT, RetVT, Op0, imm1);
- case ISD::SHL: return FastEmit_ISD_SHL_ri(VT, RetVT, Op0, imm1);
- case ISD::SRA: return FastEmit_ISD_SRA_ri(VT, RetVT, Op0, imm1);
- case ISD::SRL: return FastEmit_ISD_SRL_ri(VT, RetVT, Op0, imm1);
- case ISD::SUB: return FastEmit_ISD_SUB_ri(VT, RetVT, Op0, imm1);
- case ISD::SUBC: return FastEmit_ISD_SUBC_ri(VT, RetVT, Op0, imm1);
- case ISD::SUBE: return FastEmit_ISD_SUBE_ri(VT, RetVT, Op0, imm1);
- case ISD::XOR: return FastEmit_ISD_XOR_ri(VT, RetVT, Op0, imm1);
- case X86ISD::ADD: return FastEmit_X86ISD_ADD_ri(VT, RetVT, Op0, imm1);
- case X86ISD::AND: return FastEmit_X86ISD_AND_ri(VT, RetVT, Op0, imm1);
- case X86ISD::CMP: return FastEmit_X86ISD_CMP_ri(VT, RetVT, Op0, imm1);
- case X86ISD::OR: return FastEmit_X86ISD_OR_ri(VT, RetVT, Op0, imm1);
- case X86ISD::SMUL: return FastEmit_X86ISD_SMUL_ri(VT, RetVT, Op0, imm1);
- case X86ISD::SUB: return FastEmit_X86ISD_SUB_ri(VT, RetVT, Op0, imm1);
- case X86ISD::TC_RETURN: return FastEmit_X86ISD_TC_RETURN_ri(VT, RetVT, Op0, imm1);
- case X86ISD::XOR: return FastEmit_X86ISD_XOR_ri(VT, RetVT, Op0, imm1);
+ case ISD::ADD: return FastEmit_ISD_ADD_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::ADDC: return FastEmit_ISD_ADDC_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::ADDE: return FastEmit_ISD_ADDE_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::AND: return FastEmit_ISD_AND_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::MUL: return FastEmit_ISD_MUL_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::OR: return FastEmit_ISD_OR_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::ROTL: return FastEmit_ISD_ROTL_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::ROTR: return FastEmit_ISD_ROTR_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::SHL: return FastEmit_ISD_SHL_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::SRA: return FastEmit_ISD_SRA_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::SRL: return FastEmit_ISD_SRL_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::SUB: return FastEmit_ISD_SUB_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::SUBC: return FastEmit_ISD_SUBC_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::SUBE: return FastEmit_ISD_SUBE_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case ISD::XOR: return FastEmit_ISD_XOR_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case X86ISD::CMP: return FastEmit_X86ISD_CMP_ri(VT, RetVT, Op0, Op0IsKill, imm1);
+ case X86ISD::TC_RETURN: return FastEmit_X86ISD_TC_RETURN_ri(VT, RetVT, Op0, Op0IsKill, imm1);
default: return 0;
}
}
// FastEmit functions for ISD::ADD.
-unsigned FastEmit_ISD_ADD_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_rr(X86::ADD8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADD8rr, X86::GR8RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADD_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_rr(X86::ADD16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADD16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADD_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::ADD32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADD32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADD_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::ADD64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADD64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADD_MVT_v8i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_v8i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v8i8)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PADDBrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PADDBrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_ADD_MVT_v16i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_v16i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v16i8)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PADDBrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPADDBrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PADDBrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_ADD_MVT_v4i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_v4i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i16)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PADDWrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PADDWrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_ADD_MVT_v8i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_v8i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v8i16)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PADDWrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPADDWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PADDWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_ADD_MVT_v2i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_v2i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i32)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PADDDrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PADDDrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_ADD_MVT_v4i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_v4i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PADDDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPADDDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PADDDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_ADD_MVT_v1i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_v1i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v1i64)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PADDQrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PADDQrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_ADD_MVT_v2i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PADDQrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPADDQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PADDQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_ADD_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADD_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_ADD_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_ISD_ADD_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_ISD_ADD_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_ADD_MVT_i64_rr(RetVT, Op0, Op1);
- case MVT::v8i8: return FastEmit_ISD_ADD_MVT_v8i8_rr(RetVT, Op0, Op1);
- case MVT::v16i8: return FastEmit_ISD_ADD_MVT_v16i8_rr(RetVT, Op0, Op1);
- case MVT::v4i16: return FastEmit_ISD_ADD_MVT_v4i16_rr(RetVT, Op0, Op1);
- case MVT::v8i16: return FastEmit_ISD_ADD_MVT_v8i16_rr(RetVT, Op0, Op1);
- case MVT::v2i32: return FastEmit_ISD_ADD_MVT_v2i32_rr(RetVT, Op0, Op1);
- case MVT::v4i32: return FastEmit_ISD_ADD_MVT_v4i32_rr(RetVT, Op0, Op1);
- case MVT::v1i64: return FastEmit_ISD_ADD_MVT_v1i64_rr(RetVT, Op0, Op1);
- case MVT::v2i64: return FastEmit_ISD_ADD_MVT_v2i64_rr(RetVT, Op0, Op1);
+ case MVT::i8: return FastEmit_ISD_ADD_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i16: return FastEmit_ISD_ADD_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_ISD_ADD_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_ADD_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8i8: return FastEmit_ISD_ADD_MVT_v8i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v16i8: return FastEmit_ISD_ADD_MVT_v16i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4i16: return FastEmit_ISD_ADD_MVT_v4i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8i16: return FastEmit_ISD_ADD_MVT_v8i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2i32: return FastEmit_ISD_ADD_MVT_v2i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4i32: return FastEmit_ISD_ADD_MVT_v4i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v1i64: return FastEmit_ISD_ADD_MVT_v1i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2i64: return FastEmit_ISD_ADD_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::ADDC.
-unsigned FastEmit_ISD_ADDC_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADDC_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::ADD32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADD32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADDC_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADDC_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::ADD64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADD64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADDC_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADDC_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_ADDC_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_ADDC_MVT_i64_rr(RetVT, Op0, Op1);
+ case MVT::i32: return FastEmit_ISD_ADDC_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_ADDC_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::ADDE.
-unsigned FastEmit_ISD_ADDE_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADDE_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_rr(X86::ADC8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADC8rr, X86::GR8RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADDE_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADDE_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_rr(X86::ADC16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADC16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADDE_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADDE_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::ADC32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADC32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADDE_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADDE_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::ADC64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADC64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_ADDE_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_ADDE_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_ADDE_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_ISD_ADDE_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_ISD_ADDE_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_ADDE_MVT_i64_rr(RetVT, Op0, Op1);
+ case MVT::i8: return FastEmit_ISD_ADDE_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i16: return FastEmit_ISD_ADDE_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_ISD_ADDE_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_ADDE_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::AND.
-unsigned FastEmit_ISD_AND_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_AND_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_rr(X86::AND8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::AND8rr, X86::GR8RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_AND_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_AND_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_rr(X86::AND16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::AND16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_AND_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_AND_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::AND32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::AND32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_AND_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_AND_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::AND64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::AND64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_AND_MVT_v1i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_AND_MVT_v1i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v1i64)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PANDrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PANDrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_AND_MVT_v2i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_AND_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::ANDPSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPANDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PANDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::ANDPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PANDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_AND_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_AND_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_AND_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_ISD_AND_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_ISD_AND_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_AND_MVT_i64_rr(RetVT, Op0, Op1);
- case MVT::v1i64: return FastEmit_ISD_AND_MVT_v1i64_rr(RetVT, Op0, Op1);
- case MVT::v2i64: return FastEmit_ISD_AND_MVT_v2i64_rr(RetVT, Op0, Op1);
+ case MVT::i8: return FastEmit_ISD_AND_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i16: return FastEmit_ISD_AND_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_ISD_AND_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_AND_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v1i64: return FastEmit_ISD_AND_MVT_v1i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2i64: return FastEmit_ISD_AND_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FADD.
-unsigned FastEmit_ISD_FADD_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FADD_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::ADD_Fp32, X86::RFP32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADD_Fp32, X86::RFP32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VADDSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::ADDSSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::ADDSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FADD_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FADD_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::ADD_Fp64, X86::RFP64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADD_Fp64, X86::RFP64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::ADDSDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VADDSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::ADDSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FADD_MVT_f80_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FADD_MVT_f80_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f80)
return 0;
- return FastEmitInst_rr(X86::ADD_Fp80, X86::RFP80RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::ADD_Fp80, X86::RFP80RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_FADD_MVT_v4f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FADD_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::ADDPSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VADDPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::ADDPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FADD_MVT_v8f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v8f32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VADDPSYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FADD_MVT_v2f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FADD_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::ADDPDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VADDPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::ADDPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FADD_MVT_v4f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f64)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VADDPDYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FADD_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FADD_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FADD_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_ISD_FADD_MVT_f64_rr(RetVT, Op0, Op1);
- case MVT::f80: return FastEmit_ISD_FADD_MVT_f80_rr(RetVT, Op0, Op1);
- case MVT::v4f32: return FastEmit_ISD_FADD_MVT_v4f32_rr(RetVT, Op0, Op1);
- case MVT::v2f64: return FastEmit_ISD_FADD_MVT_v2f64_rr(RetVT, Op0, Op1);
+ case MVT::f32: return FastEmit_ISD_FADD_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_ISD_FADD_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f80: return FastEmit_ISD_FADD_MVT_f80_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f32: return FastEmit_ISD_FADD_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8f32: return FastEmit_ISD_FADD_MVT_v8f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2f64: return FastEmit_ISD_FADD_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f64: return FastEmit_ISD_FADD_MVT_v4f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FDIV.
-unsigned FastEmit_ISD_FDIV_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FDIV_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::DIV_Fp32, X86::RFP32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::DIV_Fp32, X86::RFP32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::DIVSSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VDIVSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::DIVSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FDIV_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FDIV_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::DIV_Fp64, X86::RFP64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::DIV_Fp64, X86::RFP64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VDIVSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::DIVSDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::DIVSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FDIV_MVT_f80_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FDIV_MVT_f80_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f80)
return 0;
- return FastEmitInst_rr(X86::DIV_Fp80, X86::RFP80RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::DIV_Fp80, X86::RFP80RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_FDIV_MVT_v4f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FDIV_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::DIVPSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VDIVPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::DIVPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FDIV_MVT_v8f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v8f32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VDIVPSYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FDIV_MVT_v2f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FDIV_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::DIVPDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VDIVPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::DIVPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FDIV_MVT_v4f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f64)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VDIVPDYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FDIV_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FDIV_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FDIV_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_ISD_FDIV_MVT_f64_rr(RetVT, Op0, Op1);
- case MVT::f80: return FastEmit_ISD_FDIV_MVT_f80_rr(RetVT, Op0, Op1);
- case MVT::v4f32: return FastEmit_ISD_FDIV_MVT_v4f32_rr(RetVT, Op0, Op1);
- case MVT::v2f64: return FastEmit_ISD_FDIV_MVT_v2f64_rr(RetVT, Op0, Op1);
+ case MVT::f32: return FastEmit_ISD_FDIV_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_ISD_FDIV_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f80: return FastEmit_ISD_FDIV_MVT_f80_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f32: return FastEmit_ISD_FDIV_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8f32: return FastEmit_ISD_FDIV_MVT_v8f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2f64: return FastEmit_ISD_FDIV_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f64: return FastEmit_ISD_FDIV_MVT_v4f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FMUL.
-unsigned FastEmit_ISD_FMUL_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FMUL_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::MUL_Fp32, X86::RFP32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MUL_Fp32, X86::RFP32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMULSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::MULSSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MULSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FMUL_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FMUL_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::MUL_Fp64, X86::RFP64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MUL_Fp64, X86::RFP64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMULSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::MULSDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MULSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FMUL_MVT_f80_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FMUL_MVT_f80_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f80)
return 0;
- return FastEmitInst_rr(X86::MUL_Fp80, X86::RFP80RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MUL_Fp80, X86::RFP80RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_FMUL_MVT_v4f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FMUL_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::MULPSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMULPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MULPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FMUL_MVT_v2f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FMUL_MVT_v8f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v8f32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMULPSYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FMUL_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::MULPDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMULPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MULPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FMUL_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FMUL_MVT_v4f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f64)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMULPDYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FMUL_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FMUL_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_ISD_FMUL_MVT_f64_rr(RetVT, Op0, Op1);
- case MVT::f80: return FastEmit_ISD_FMUL_MVT_f80_rr(RetVT, Op0, Op1);
- case MVT::v4f32: return FastEmit_ISD_FMUL_MVT_v4f32_rr(RetVT, Op0, Op1);
- case MVT::v2f64: return FastEmit_ISD_FMUL_MVT_v2f64_rr(RetVT, Op0, Op1);
+ case MVT::f32: return FastEmit_ISD_FMUL_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_ISD_FMUL_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f80: return FastEmit_ISD_FMUL_MVT_f80_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f32: return FastEmit_ISD_FMUL_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8f32: return FastEmit_ISD_FMUL_MVT_v8f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2f64: return FastEmit_ISD_FMUL_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f64: return FastEmit_ISD_FMUL_MVT_v4f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::FSUB.
-unsigned FastEmit_ISD_FSUB_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FSUB_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::SUB_Fp32, X86::RFP32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SUB_Fp32, X86::RFP32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VSUBSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::SUBSSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::SUBSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSUB_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FSUB_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::SUB_Fp64, X86::RFP64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SUB_Fp64, X86::RFP64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::SUBSDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VSUBSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::SUBSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSUB_MVT_f80_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FSUB_MVT_f80_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f80)
return 0;
- return FastEmitInst_rr(X86::SUB_Fp80, X86::RFP80RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SUB_Fp80, X86::RFP80RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_FSUB_MVT_v4f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FSUB_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::SUBPSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VSUBPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::SUBPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSUB_MVT_v2f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FSUB_MVT_v8f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v8f32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VSUBPSYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FSUB_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::SUBPDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VSUBPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::SUBPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_FSUB_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_FSUB_MVT_v4f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f64)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VSUBPDYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_ISD_FSUB_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_ISD_FSUB_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_ISD_FSUB_MVT_f64_rr(RetVT, Op0, Op1);
- case MVT::f80: return FastEmit_ISD_FSUB_MVT_f80_rr(RetVT, Op0, Op1);
- case MVT::v4f32: return FastEmit_ISD_FSUB_MVT_v4f32_rr(RetVT, Op0, Op1);
- case MVT::v2f64: return FastEmit_ISD_FSUB_MVT_v2f64_rr(RetVT, Op0, Op1);
+ case MVT::f32: return FastEmit_ISD_FSUB_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_ISD_FSUB_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f80: return FastEmit_ISD_FSUB_MVT_f80_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f32: return FastEmit_ISD_FSUB_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8f32: return FastEmit_ISD_FSUB_MVT_v8f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2f64: return FastEmit_ISD_FSUB_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f64: return FastEmit_ISD_FSUB_MVT_v4f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::MUL.
-unsigned FastEmit_ISD_MUL_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_MUL_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- TII.copyRegToReg(*MBB, MBB->end(), X86::AL, Op0, TM.getRegisterInfo()->getPhysicalRegisterRegClass(X86::AL), MRI.getRegClass(Op0));
- return FastEmitInst_r(X86::MUL8r, X86::GR8RegisterClass, Op1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), X86::AL).addReg(Op0);
+ return FastEmitInst_r(X86::MUL8r, X86::GR8RegisterClass, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_MUL_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_MUL_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_rr(X86::IMUL16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::IMUL16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_MUL_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_MUL_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::IMUL32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::IMUL32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_MUL_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_MUL_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::IMUL64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::IMUL64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_MUL_MVT_v4i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_MUL_MVT_v4i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i16)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PMULLWrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PMULLWrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_MUL_MVT_v8i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_MUL_MVT_v8i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v8i16)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PMULLWrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPMULLWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PMULLWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_MUL_MVT_v4i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_MUL_MVT_v4i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- if ((Subtarget->hasSSE41())) {
- return FastEmitInst_rr(X86::PMULLDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPMULLDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE41() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PMULLDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_MUL_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_MUL_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_MUL_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_ISD_MUL_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_ISD_MUL_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_MUL_MVT_i64_rr(RetVT, Op0, Op1);
- case MVT::v4i16: return FastEmit_ISD_MUL_MVT_v4i16_rr(RetVT, Op0, Op1);
- case MVT::v8i16: return FastEmit_ISD_MUL_MVT_v8i16_rr(RetVT, Op0, Op1);
- case MVT::v4i32: return FastEmit_ISD_MUL_MVT_v4i32_rr(RetVT, Op0, Op1);
+ case MVT::i8: return FastEmit_ISD_MUL_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i16: return FastEmit_ISD_MUL_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_ISD_MUL_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_MUL_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4i16: return FastEmit_ISD_MUL_MVT_v4i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8i16: return FastEmit_ISD_MUL_MVT_v8i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4i32: return FastEmit_ISD_MUL_MVT_v4i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::OR.
-unsigned FastEmit_ISD_OR_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_OR_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_rr(X86::OR8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::OR8rr, X86::GR8RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_OR_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_OR_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_rr(X86::OR16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::OR16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_OR_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_OR_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::OR32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::OR32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_OR_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_OR_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::OR64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::OR64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_OR_MVT_v1i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_OR_MVT_v1i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v1i64)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PORrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PORrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_OR_MVT_v2i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_OR_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::ORPSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPORrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::ORPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PORrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PORrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_OR_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_OR_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_OR_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_ISD_OR_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_ISD_OR_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_OR_MVT_i64_rr(RetVT, Op0, Op1);
- case MVT::v1i64: return FastEmit_ISD_OR_MVT_v1i64_rr(RetVT, Op0, Op1);
- case MVT::v2i64: return FastEmit_ISD_OR_MVT_v2i64_rr(RetVT, Op0, Op1);
+ case MVT::i8: return FastEmit_ISD_OR_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i16: return FastEmit_ISD_OR_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_ISD_OR_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_OR_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v1i64: return FastEmit_ISD_OR_MVT_v1i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2i64: return FastEmit_ISD_OR_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for ISD::ROTL.
+
+unsigned FastEmit_ISD_ROTL_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i8)
+ return 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), X86::CL).addReg(Op1);
+ return FastEmitInst_r(X86::ROL8rCL, X86::GR8RegisterClass, Op0, Op0IsKill);
+}
+
+unsigned FastEmit_ISD_ROTL_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::i8: return FastEmit_ISD_ROTL_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for ISD::ROTR.
+
+unsigned FastEmit_ISD_ROTR_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i8)
+ return 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), X86::CL).addReg(Op1);
+ return FastEmitInst_r(X86::ROR8rCL, X86::GR8RegisterClass, Op0, Op0IsKill);
+}
+
+unsigned FastEmit_ISD_ROTR_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::i8: return FastEmit_ISD_ROTR_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for ISD::SHL.
+
+unsigned FastEmit_ISD_SHL_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i8)
+ return 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), X86::CL).addReg(Op1);
+ return FastEmitInst_r(X86::SHL8rCL, X86::GR8RegisterClass, Op0, Op0IsKill);
+}
+
+unsigned FastEmit_ISD_SHL_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::i8: return FastEmit_ISD_SHL_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for ISD::SRA.
+
+unsigned FastEmit_ISD_SRA_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i8)
+ return 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), X86::CL).addReg(Op1);
+ return FastEmitInst_r(X86::SAR8rCL, X86::GR8RegisterClass, Op0, Op0IsKill);
+}
+
+unsigned FastEmit_ISD_SRA_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::i8: return FastEmit_ISD_SRA_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for ISD::SRL.
+
+unsigned FastEmit_ISD_SRL_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i8)
+ return 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), X86::CL).addReg(Op1);
+ return FastEmitInst_r(X86::SHR8rCL, X86::GR8RegisterClass, Op0, Op0IsKill);
+}
+
+unsigned FastEmit_ISD_SRL_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::i8: return FastEmit_ISD_SRL_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::SUB.
-unsigned FastEmit_ISD_SUB_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_rr(X86::SUB8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SUB8rr, X86::GR8RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUB_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_rr(X86::SUB16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SUB16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUB_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::SUB32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SUB32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUB_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::SUB64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SUB64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUB_MVT_v8i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_v8i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v8i8)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PSUBBrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PSUBBrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SUB_MVT_v16i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_v16i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v16i8)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PSUBBrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPSUBBrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PSUBBrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SUB_MVT_v4i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_v4i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i16)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PSUBWrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PSUBWrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SUB_MVT_v8i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_v8i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v8i16)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PSUBWrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPSUBWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PSUBWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SUB_MVT_v2i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_v2i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i32)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PSUBDrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PSUBDrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SUB_MVT_v4i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_v4i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PSUBDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPSUBDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PSUBDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SUB_MVT_v1i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_v1i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v1i64)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PSUBQrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PSUBQrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SUB_MVT_v2i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PSUBQrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPSUBQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PSUBQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_SUB_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUB_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_SUB_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_ISD_SUB_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_ISD_SUB_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_SUB_MVT_i64_rr(RetVT, Op0, Op1);
- case MVT::v8i8: return FastEmit_ISD_SUB_MVT_v8i8_rr(RetVT, Op0, Op1);
- case MVT::v16i8: return FastEmit_ISD_SUB_MVT_v16i8_rr(RetVT, Op0, Op1);
- case MVT::v4i16: return FastEmit_ISD_SUB_MVT_v4i16_rr(RetVT, Op0, Op1);
- case MVT::v8i16: return FastEmit_ISD_SUB_MVT_v8i16_rr(RetVT, Op0, Op1);
- case MVT::v2i32: return FastEmit_ISD_SUB_MVT_v2i32_rr(RetVT, Op0, Op1);
- case MVT::v4i32: return FastEmit_ISD_SUB_MVT_v4i32_rr(RetVT, Op0, Op1);
- case MVT::v1i64: return FastEmit_ISD_SUB_MVT_v1i64_rr(RetVT, Op0, Op1);
- case MVT::v2i64: return FastEmit_ISD_SUB_MVT_v2i64_rr(RetVT, Op0, Op1);
+ case MVT::i8: return FastEmit_ISD_SUB_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i16: return FastEmit_ISD_SUB_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_ISD_SUB_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_SUB_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8i8: return FastEmit_ISD_SUB_MVT_v8i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v16i8: return FastEmit_ISD_SUB_MVT_v16i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4i16: return FastEmit_ISD_SUB_MVT_v4i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8i16: return FastEmit_ISD_SUB_MVT_v8i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2i32: return FastEmit_ISD_SUB_MVT_v2i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4i32: return FastEmit_ISD_SUB_MVT_v4i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v1i64: return FastEmit_ISD_SUB_MVT_v1i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2i64: return FastEmit_ISD_SUB_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::SUBC.
-unsigned FastEmit_ISD_SUBC_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUBC_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::SUB32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SUB32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUBC_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUBC_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::SUB64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SUB64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUBC_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUBC_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i32: return FastEmit_ISD_SUBC_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_SUBC_MVT_i64_rr(RetVT, Op0, Op1);
+ case MVT::i32: return FastEmit_ISD_SUBC_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_SUBC_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::SUBE.
-unsigned FastEmit_ISD_SUBE_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUBE_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_rr(X86::SBB8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SBB8rr, X86::GR8RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUBE_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUBE_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_rr(X86::SBB16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SBB16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUBE_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUBE_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::SBB32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SBB32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUBE_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUBE_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::SBB64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::SBB64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_SUBE_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_SUBE_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_SUBE_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_ISD_SUBE_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_ISD_SUBE_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_SUBE_MVT_i64_rr(RetVT, Op0, Op1);
+ case MVT::i8: return FastEmit_ISD_SUBE_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i16: return FastEmit_ISD_SUBE_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_ISD_SUBE_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_SUBE_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for ISD::XOR.
-unsigned FastEmit_ISD_XOR_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_XOR_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i8)
return 0;
- return FastEmitInst_rr(X86::XOR8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::XOR8rr, X86::GR8RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_XOR_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_XOR_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i16)
return 0;
- return FastEmitInst_rr(X86::XOR16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::XOR16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_XOR_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_XOR_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::XOR32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::XOR32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_XOR_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_XOR_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i64)
return 0;
- return FastEmitInst_rr(X86::XOR64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::XOR64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_ISD_XOR_MVT_v1i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_XOR_MVT_v1i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v1i64)
return 0;
- if ((Subtarget->hasMMX())) {
- return FastEmitInst_rr(X86::MMX_PXORrr, X86::VR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasMMX() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MMX_PXORrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_XOR_MVT_v2i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_XOR_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::XORPSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPXORrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::XORPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::PXORrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PXORrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_ISD_XOR_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_ISD_XOR_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_ISD_XOR_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_ISD_XOR_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_ISD_XOR_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_ISD_XOR_MVT_i64_rr(RetVT, Op0, Op1);
- case MVT::v1i64: return FastEmit_ISD_XOR_MVT_v1i64_rr(RetVT, Op0, Op1);
- case MVT::v2i64: return FastEmit_ISD_XOR_MVT_v2i64_rr(RetVT, Op0, Op1);
+ case MVT::i8: return FastEmit_ISD_XOR_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i16: return FastEmit_ISD_XOR_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_ISD_XOR_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_ISD_XOR_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v1i64: return FastEmit_ISD_XOR_MVT_v1i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2i64: return FastEmit_ISD_XOR_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::ADD.
-
-unsigned FastEmit_X86ISD_ADD_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i8)
- return 0;
- return FastEmitInst_rr(X86::ADD8rr, X86::GR8RegisterClass, Op0, Op1);
-}
-
-unsigned FastEmit_X86ISD_ADD_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_rr(X86::ADD16rr, X86::GR16RegisterClass, Op0, Op1);
-}
+// FastEmit functions for X86ISD::BT.
-unsigned FastEmit_X86ISD_ADD_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_BT_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::ADD32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::BT16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_ADD_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i64)
- return 0;
- return FastEmitInst_rr(X86::ADD64rr, X86::GR64RegisterClass, Op0, Op1);
-}
-
-unsigned FastEmit_X86ISD_ADD_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
- switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_ADD_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_X86ISD_ADD_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_X86ISD_ADD_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_X86ISD_ADD_MVT_i64_rr(RetVT, Op0, Op1);
- default: return 0;
- }
-}
-
-// FastEmit functions for X86ISD::AND.
-
-unsigned FastEmit_X86ISD_AND_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i8)
- return 0;
- return FastEmitInst_rr(X86::AND8rr, X86::GR8RegisterClass, Op0, Op1);
-}
-
-unsigned FastEmit_X86ISD_AND_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i16)
- return 0;
- return FastEmitInst_rr(X86::AND16rr, X86::GR16RegisterClass, Op0, Op1);
-}
-
-unsigned FastEmit_X86ISD_AND_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_BT_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::AND32rr, X86::GR32RegisterClass, Op0, Op1);
-}
-
-unsigned FastEmit_X86ISD_AND_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i64)
- return 0;
- return FastEmitInst_rr(X86::AND64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::BT32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_AND_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
- switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_AND_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_X86ISD_AND_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_X86ISD_AND_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_X86ISD_AND_MVT_i64_rr(RetVT, Op0, Op1);
- default: return 0;
- }
-}
-
-// FastEmit functions for X86ISD::BT.
-
-unsigned FastEmit_X86ISD_BT_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
- return 0;
- return FastEmitInst_rr(X86::BT16rr, X86::GR16RegisterClass, Op0, Op1);
-}
-
-unsigned FastEmit_X86ISD_BT_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
- return 0;
- return FastEmitInst_rr(X86::BT32rr, X86::GR32RegisterClass, Op0, Op1);
-}
-
-unsigned FastEmit_X86ISD_BT_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_BT_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::BT64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::BT64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_BT_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_BT_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i16: return FastEmit_X86ISD_BT_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_X86ISD_BT_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_X86ISD_BT_MVT_i64_rr(RetVT, Op0, Op1);
+ case MVT::i16: return FastEmit_X86ISD_BT_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_X86ISD_BT_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_X86ISD_BT_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::CMP.
-unsigned FastEmit_X86ISD_CMP_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_CMP_MVT_i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::CMP8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::CMP8rr, X86::GR8RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_CMP_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_CMP_MVT_i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::CMP16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::CMP16rr, X86::GR16RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_CMP_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_CMP_MVT_i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::CMP32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::CMP32rr, X86::GR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_CMP_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_CMP_MVT_i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::CMP64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::CMP64rr, X86::GR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_CMP_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_CMP_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
if ((!Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::UCOM_FpIr32, X86::RFP32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::UCOM_FpIr32, X86::RFP32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VUCOMISSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::UCOMISSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::UCOMISSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_CMP_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_CMP_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
if ((!Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::UCOM_FpIr64, X86::RFP64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::UCOM_FpIr64, X86::RFP64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VUCOMISDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::UCOMISDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::UCOMISDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_CMP_MVT_f80_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_CMP_MVT_f80_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::UCOM_FpIr80, X86::RFP80RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::UCOM_FpIr80, X86::RFP80RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_CMP_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_CMP_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_CMP_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_X86ISD_CMP_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_X86ISD_CMP_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_X86ISD_CMP_MVT_i64_rr(RetVT, Op0, Op1);
- case MVT::f32: return FastEmit_X86ISD_CMP_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_X86ISD_CMP_MVT_f64_rr(RetVT, Op0, Op1);
- case MVT::f80: return FastEmit_X86ISD_CMP_MVT_f80_rr(RetVT, Op0, Op1);
+ case MVT::i8: return FastEmit_X86ISD_CMP_MVT_i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i16: return FastEmit_X86ISD_CMP_MVT_i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i32: return FastEmit_X86ISD_CMP_MVT_i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::i64: return FastEmit_X86ISD_CMP_MVT_i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f32: return FastEmit_X86ISD_CMP_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_X86ISD_CMP_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f80: return FastEmit_X86ISD_CMP_MVT_f80_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::COMI.
-unsigned FastEmit_X86ISD_COMI_MVT_v4f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_COMI_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::Int_COMISSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::Int_VCOMISSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::Int_COMISSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_COMI_MVT_v2f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_COMI_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::Int_COMISDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::Int_VCOMISDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::Int_COMISDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_COMI_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_COMI_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v4f32: return FastEmit_X86ISD_COMI_MVT_v4f32_rr(RetVT, Op0, Op1);
- case MVT::v2f64: return FastEmit_X86ISD_COMI_MVT_v2f64_rr(RetVT, Op0, Op1);
+ case MVT::v4f32: return FastEmit_X86ISD_COMI_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2f64: return FastEmit_X86ISD_COMI_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::FAND.
-unsigned FastEmit_X86ISD_FAND_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FAND_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::FsANDPSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VFsANDPSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::FsANDPSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FAND_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FAND_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::FsANDPDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VFsANDPDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::FsANDPDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FAND_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FAND_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_X86ISD_FAND_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_X86ISD_FAND_MVT_f64_rr(RetVT, Op0, Op1);
+ case MVT::f32: return FastEmit_X86ISD_FAND_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_X86ISD_FAND_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::FMAX.
-unsigned FastEmit_X86ISD_FMAX_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMAX_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::MAXSSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMAXSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MAXSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FMAX_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMAX_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::MAXSDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMAXSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MAXSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FMAX_MVT_v4f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMAX_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::MAXPSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMAXPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MAXPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_X86ISD_FMAX_MVT_v8f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v8f32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMAXPSYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FMAX_MVT_v2f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMAX_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::MAXPDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMAXPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MAXPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_X86ISD_FMAX_MVT_v4f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f64)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMAXPDYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FMAX_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMAX_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_X86ISD_FMAX_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_X86ISD_FMAX_MVT_f64_rr(RetVT, Op0, Op1);
- case MVT::v4f32: return FastEmit_X86ISD_FMAX_MVT_v4f32_rr(RetVT, Op0, Op1);
- case MVT::v2f64: return FastEmit_X86ISD_FMAX_MVT_v2f64_rr(RetVT, Op0, Op1);
+ case MVT::f32: return FastEmit_X86ISD_FMAX_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_X86ISD_FMAX_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f32: return FastEmit_X86ISD_FMAX_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8f32: return FastEmit_X86ISD_FMAX_MVT_v8f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2f64: return FastEmit_X86ISD_FMAX_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f64: return FastEmit_X86ISD_FMAX_MVT_v4f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::FMIN.
-unsigned FastEmit_X86ISD_FMIN_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMIN_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::MINSSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMINSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MINSSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FMIN_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMIN_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::MINSDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMINSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MINSDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FMIN_MVT_v4f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMIN_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::MINPSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMINPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MINPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_X86ISD_FMIN_MVT_v8f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v8f32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMINPSYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FMIN_MVT_v2f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMIN_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::MINPDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMINPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::MINPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_X86ISD_FMIN_MVT_v4f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f64)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VMINPDYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FMIN_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FMIN_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_X86ISD_FMIN_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_X86ISD_FMIN_MVT_f64_rr(RetVT, Op0, Op1);
- case MVT::v4f32: return FastEmit_X86ISD_FMIN_MVT_v4f32_rr(RetVT, Op0, Op1);
- case MVT::v2f64: return FastEmit_X86ISD_FMIN_MVT_v2f64_rr(RetVT, Op0, Op1);
+ case MVT::f32: return FastEmit_X86ISD_FMIN_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_X86ISD_FMIN_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f32: return FastEmit_X86ISD_FMIN_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8f32: return FastEmit_X86ISD_FMIN_MVT_v8f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2f64: return FastEmit_X86ISD_FMIN_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f64: return FastEmit_X86ISD_FMIN_MVT_v4f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::FOR.
-unsigned FastEmit_X86ISD_FOR_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FOR_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::FsORPSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VFsORPSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::FsORPSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FOR_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FOR_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::FsORPDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VFsORPDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::FsORPDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FOR_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FOR_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_X86ISD_FOR_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_X86ISD_FOR_MVT_f64_rr(RetVT, Op0, Op1);
+ case MVT::f32: return FastEmit_X86ISD_FOR_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_X86ISD_FOR_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::FXOR.
-unsigned FastEmit_X86ISD_FXOR_MVT_f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FXOR_MVT_f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::FsXORPSrr, X86::FR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VFsXORPSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::FsXORPSrr, X86::FR32RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FXOR_MVT_f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FXOR_MVT_f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::f64)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::FsXORPDrr, X86::FR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VFsXORPDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::FsXORPDrr, X86::FR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_FXOR_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_FXOR_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::f32: return FastEmit_X86ISD_FXOR_MVT_f32_rr(RetVT, Op0, Op1);
- case MVT::f64: return FastEmit_X86ISD_FXOR_MVT_f64_rr(RetVT, Op0, Op1);
+ case MVT::f32: return FastEmit_X86ISD_FXOR_MVT_f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::f64: return FastEmit_X86ISD_FXOR_MVT_f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::OR.
+// FastEmit functions for X86ISD::MOVHLPS.
-unsigned FastEmit_X86ISD_OR_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i8)
+unsigned FastEmit_X86ISD_MOVHLPS_MVT_v4i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- return FastEmitInst_rr(X86::OR8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MOVHLPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_OR_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i16)
+unsigned FastEmit_X86ISD_MOVHLPS_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- return FastEmitInst_rr(X86::OR16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MOVHLPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_OR_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i32)
+unsigned FastEmit_X86ISD_MOVHLPS_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v4i32: return FastEmit_X86ISD_MOVHLPS_MVT_v4i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f32: return FastEmit_X86ISD_MOVHLPS_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::MOVLHPS.
+
+unsigned FastEmit_X86ISD_MOVLHPS_MVT_v4i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- return FastEmitInst_rr(X86::OR32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MOVLHPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_OR_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i64)
+unsigned FastEmit_X86ISD_MOVLHPS_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v2i64)
+ return 0;
+ return FastEmitInst_rr(X86::MOVLHPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+}
+
+unsigned FastEmit_X86ISD_MOVLHPS_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- return FastEmitInst_rr(X86::OR64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MOVLHPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_OR_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_MOVLHPS_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_OR_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_X86ISD_OR_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_X86ISD_OR_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_X86ISD_OR_MVT_i64_rr(RetVT, Op0, Op1);
+ case MVT::v4i32: return FastEmit_X86ISD_MOVLHPS_MVT_v4i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2i64: return FastEmit_X86ISD_MOVLHPS_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f32: return FastEmit_X86ISD_MOVLHPS_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PCMPEQB.
-unsigned FastEmit_X86ISD_PCMPEQB_MVT_v8i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQB_MVT_v8i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v8i8)
return 0;
- return FastEmitInst_rr(X86::MMX_PCMPEQBrr, X86::VR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MMX_PCMPEQBrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPEQB_MVT_v16i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQB_MVT_v16i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v16i8)
return 0;
- return FastEmitInst_rr(X86::PCMPEQBrr, X86::VR128RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PCMPEQBrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPEQB_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQB_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v8i8: return FastEmit_X86ISD_PCMPEQB_MVT_v8i8_rr(RetVT, Op0, Op1);
- case MVT::v16i8: return FastEmit_X86ISD_PCMPEQB_MVT_v16i8_rr(RetVT, Op0, Op1);
+ case MVT::v8i8: return FastEmit_X86ISD_PCMPEQB_MVT_v8i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v16i8: return FastEmit_X86ISD_PCMPEQB_MVT_v16i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PCMPEQD.
-unsigned FastEmit_X86ISD_PCMPEQD_MVT_v2i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQD_MVT_v2i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i32)
return 0;
- return FastEmitInst_rr(X86::MMX_PCMPEQDrr, X86::VR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MMX_PCMPEQDrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPEQD_MVT_v4i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQD_MVT_v4i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- return FastEmitInst_rr(X86::PCMPEQDrr, X86::VR128RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PCMPEQDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPEQD_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQD_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v2i32: return FastEmit_X86ISD_PCMPEQD_MVT_v2i32_rr(RetVT, Op0, Op1);
- case MVT::v4i32: return FastEmit_X86ISD_PCMPEQD_MVT_v4i32_rr(RetVT, Op0, Op1);
+ case MVT::v2i32: return FastEmit_X86ISD_PCMPEQD_MVT_v2i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4i32: return FastEmit_X86ISD_PCMPEQD_MVT_v4i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PCMPEQQ.
-unsigned FastEmit_X86ISD_PCMPEQQ_MVT_v2i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQQ_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- return FastEmitInst_rr(X86::PCMPEQQrr, X86::VR128RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PCMPEQQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPEQQ_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQQ_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v2i64: return FastEmit_X86ISD_PCMPEQQ_MVT_v2i64_rr(RetVT, Op0, Op1);
+ case MVT::v2i64: return FastEmit_X86ISD_PCMPEQQ_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PCMPEQW.
-unsigned FastEmit_X86ISD_PCMPEQW_MVT_v4i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQW_MVT_v4i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i16)
return 0;
- return FastEmitInst_rr(X86::MMX_PCMPEQWrr, X86::VR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MMX_PCMPEQWrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPEQW_MVT_v8i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQW_MVT_v8i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v8i16)
return 0;
- return FastEmitInst_rr(X86::PCMPEQWrr, X86::VR128RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PCMPEQWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPEQW_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPEQW_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v4i16: return FastEmit_X86ISD_PCMPEQW_MVT_v4i16_rr(RetVT, Op0, Op1);
- case MVT::v8i16: return FastEmit_X86ISD_PCMPEQW_MVT_v8i16_rr(RetVT, Op0, Op1);
+ case MVT::v4i16: return FastEmit_X86ISD_PCMPEQW_MVT_v4i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8i16: return FastEmit_X86ISD_PCMPEQW_MVT_v8i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PCMPGTB.
-unsigned FastEmit_X86ISD_PCMPGTB_MVT_v8i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTB_MVT_v8i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v8i8)
return 0;
- return FastEmitInst_rr(X86::MMX_PCMPGTBrr, X86::VR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MMX_PCMPGTBrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPGTB_MVT_v16i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTB_MVT_v16i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v16i8)
return 0;
- return FastEmitInst_rr(X86::PCMPGTBrr, X86::VR128RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PCMPGTBrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPGTB_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTB_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v8i8: return FastEmit_X86ISD_PCMPGTB_MVT_v8i8_rr(RetVT, Op0, Op1);
- case MVT::v16i8: return FastEmit_X86ISD_PCMPGTB_MVT_v16i8_rr(RetVT, Op0, Op1);
+ case MVT::v8i8: return FastEmit_X86ISD_PCMPGTB_MVT_v8i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v16i8: return FastEmit_X86ISD_PCMPGTB_MVT_v16i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PCMPGTD.
-unsigned FastEmit_X86ISD_PCMPGTD_MVT_v2i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTD_MVT_v2i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i32)
return 0;
- return FastEmitInst_rr(X86::MMX_PCMPGTDrr, X86::VR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MMX_PCMPGTDrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPGTD_MVT_v4i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTD_MVT_v4i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- return FastEmitInst_rr(X86::PCMPGTDrr, X86::VR128RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PCMPGTDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPGTD_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTD_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v2i32: return FastEmit_X86ISD_PCMPGTD_MVT_v2i32_rr(RetVT, Op0, Op1);
- case MVT::v4i32: return FastEmit_X86ISD_PCMPGTD_MVT_v4i32_rr(RetVT, Op0, Op1);
+ case MVT::v2i32: return FastEmit_X86ISD_PCMPGTD_MVT_v2i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4i32: return FastEmit_X86ISD_PCMPGTD_MVT_v4i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PCMPGTQ.
-unsigned FastEmit_X86ISD_PCMPGTQ_MVT_v2i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTQ_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- return FastEmitInst_rr(X86::PCMPGTQrr, X86::VR128RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PCMPGTQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPGTQ_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTQ_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v2i64: return FastEmit_X86ISD_PCMPGTQ_MVT_v2i64_rr(RetVT, Op0, Op1);
+ case MVT::v2i64: return FastEmit_X86ISD_PCMPGTQ_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PCMPGTW.
-unsigned FastEmit_X86ISD_PCMPGTW_MVT_v4i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTW_MVT_v4i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v4i16)
return 0;
- return FastEmitInst_rr(X86::MMX_PCMPGTWrr, X86::VR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::MMX_PCMPGTWrr, X86::VR64RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPGTW_MVT_v8i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTW_MVT_v8i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v8i16)
return 0;
- return FastEmitInst_rr(X86::PCMPGTWrr, X86::VR128RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PCMPGTWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_PCMPGTW_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PCMPGTW_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v4i16: return FastEmit_X86ISD_PCMPGTW_MVT_v4i16_rr(RetVT, Op0, Op1);
- case MVT::v8i16: return FastEmit_X86ISD_PCMPGTW_MVT_v8i16_rr(RetVT, Op0, Op1);
+ case MVT::v4i16: return FastEmit_X86ISD_PCMPGTW_MVT_v4i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8i16: return FastEmit_X86ISD_PCMPGTW_MVT_v8i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PSHUFB.
-unsigned FastEmit_X86ISD_PSHUFB_MVT_v16i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PSHUFB_MVT_v16i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::v16i8)
return 0;
- if ((Subtarget->hasSSSE3())) {
- return FastEmitInst_rr(X86::PSHUFBrr128, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasSSSE3() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PSHUFBrr128, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_PSHUFB_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PSHUFB_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v16i8: return FastEmit_X86ISD_PSHUFB_MVT_v16i8_rr(RetVT, Op0, Op1);
+ case MVT::v16i8: return FastEmit_X86ISD_PSHUFB_MVT_v16i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::PTEST.
-unsigned FastEmit_X86ISD_PTEST_MVT_v4f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_PTEST_MVT_v4i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- if ((Subtarget->hasSSE41())) {
- return FastEmitInst_rr(X86::PTESTrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPTESTYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_PTEST_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PTEST_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VPTESTrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE41() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::PTESTrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_X86ISD_PTEST_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v4f32: return FastEmit_X86ISD_PTEST_MVT_v4f32_rr(RetVT, Op0, Op1);
+ case MVT::v4i64: return FastEmit_X86ISD_PTEST_MVT_v4i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f32: return FastEmit_X86ISD_PTEST_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::SMUL.
+// FastEmit functions for X86ISD::PUNPCKHBW.
-unsigned FastEmit_X86ISD_SMUL_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i16)
+unsigned FastEmit_X86ISD_PUNPCKHBW_MVT_v16i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v16i8)
return 0;
- return FastEmitInst_rr(X86::IMUL16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PUNPCKHBWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_SMUL_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i32)
+unsigned FastEmit_X86ISD_PUNPCKHBW_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v16i8: return FastEmit_X86ISD_PUNPCKHBW_MVT_v16i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::PUNPCKHDQ.
+
+unsigned FastEmit_X86ISD_PUNPCKHDQ_MVT_v4i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4i32)
return 0;
- return FastEmitInst_rr(X86::IMUL32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PUNPCKHDQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_SMUL_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i64)
+unsigned FastEmit_X86ISD_PUNPCKHDQ_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v4i32: return FastEmit_X86ISD_PUNPCKHDQ_MVT_v4i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::PUNPCKHQDQ.
+
+unsigned FastEmit_X86ISD_PUNPCKHQDQ_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- return FastEmitInst_rr(X86::IMUL64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PUNPCKHQDQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_SMUL_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PUNPCKHQDQ_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i16: return FastEmit_X86ISD_SMUL_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_X86ISD_SMUL_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_X86ISD_SMUL_MVT_i64_rr(RetVT, Op0, Op1);
+ case MVT::v2i64: return FastEmit_X86ISD_PUNPCKHQDQ_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::SUB.
+// FastEmit functions for X86ISD::PUNPCKHWD.
-unsigned FastEmit_X86ISD_SUB_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i8)
+unsigned FastEmit_X86ISD_PUNPCKHWD_MVT_v8i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v8i16)
return 0;
- return FastEmitInst_rr(X86::SUB8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PUNPCKHWDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_SUB_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i16)
+unsigned FastEmit_X86ISD_PUNPCKHWD_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v8i16: return FastEmit_X86ISD_PUNPCKHWD_MVT_v8i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::PUNPCKLBW.
+
+unsigned FastEmit_X86ISD_PUNPCKLBW_MVT_v16i8_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v16i8)
+ return 0;
+ return FastEmitInst_rr(X86::PUNPCKLBWrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+}
+
+unsigned FastEmit_X86ISD_PUNPCKLBW_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v16i8: return FastEmit_X86ISD_PUNPCKLBW_MVT_v16i8_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::PUNPCKLDQ.
+
+unsigned FastEmit_X86ISD_PUNPCKLDQ_MVT_v4i32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4i32)
+ return 0;
+ return FastEmitInst_rr(X86::PUNPCKLDQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+}
+
+unsigned FastEmit_X86ISD_PUNPCKLDQ_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v4i32: return FastEmit_X86ISD_PUNPCKLDQ_MVT_v4i32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::PUNPCKLQDQ.
+
+unsigned FastEmit_X86ISD_PUNPCKLQDQ_MVT_v2i64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v2i64)
return 0;
- return FastEmitInst_rr(X86::SUB16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::PUNPCKLQDQrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
-unsigned FastEmit_X86ISD_SUB_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_PUNPCKLQDQ_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v2i64: return FastEmit_X86ISD_PUNPCKLQDQ_MVT_v2i64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::PUNPCKLWD.
+
+unsigned FastEmit_X86ISD_PUNPCKLWD_MVT_v8i16_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v8i16)
+ return 0;
+ return FastEmitInst_rr(X86::PUNPCKLWDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+}
+
+unsigned FastEmit_X86ISD_PUNPCKLWD_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v8i16: return FastEmit_X86ISD_PUNPCKLWD_MVT_v8i16_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::TESTP.
+
+unsigned FastEmit_X86ISD_TESTP_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::SUB32rr, X86::GR32RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VTESTPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
}
-unsigned FastEmit_X86ISD_SUB_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i64)
+unsigned FastEmit_X86ISD_TESTP_MVT_v8f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- return FastEmitInst_rr(X86::SUB64rr, X86::GR64RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VTESTPSYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
}
-unsigned FastEmit_X86ISD_SUB_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_TESTP_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VTESTPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_X86ISD_TESTP_MVT_v4f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
+ return 0;
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VTESTPDYrr, X86::VR256RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
+}
+
+unsigned FastEmit_X86ISD_TESTP_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_SUB_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_X86ISD_SUB_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_X86ISD_SUB_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_X86ISD_SUB_MVT_i64_rr(RetVT, Op0, Op1);
+ case MVT::v4f32: return FastEmit_X86ISD_TESTP_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v8f32: return FastEmit_X86ISD_TESTP_MVT_v8f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2f64: return FastEmit_X86ISD_TESTP_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v4f64: return FastEmit_X86ISD_TESTP_MVT_v4f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// FastEmit functions for X86ISD::UCOMI.
-unsigned FastEmit_X86ISD_UCOMI_MVT_v4f32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_UCOMI_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- if ((Subtarget->hasSSE1())) {
- return FastEmitInst_rr(X86::Int_UCOMISSrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::Int_VUCOMISSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE1() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::Int_UCOMISSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_UCOMI_MVT_v2f64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::isVoid)
+unsigned FastEmit_X86ISD_UCOMI_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::i32)
return 0;
- if ((Subtarget->hasSSE2())) {
- return FastEmitInst_rr(X86::Int_UCOMISDrr, X86::VR128RegisterClass, Op0, Op1);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::Int_VUCOMISDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::Int_UCOMISDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
}
return 0;
}
-unsigned FastEmit_X86ISD_UCOMI_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_UCOMI_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::v4f32: return FastEmit_X86ISD_UCOMI_MVT_v4f32_rr(RetVT, Op0, Op1);
- case MVT::v2f64: return FastEmit_X86ISD_UCOMI_MVT_v2f64_rr(RetVT, Op0, Op1);
+ case MVT::v4f32: return FastEmit_X86ISD_UCOMI_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case MVT::v2f64: return FastEmit_X86ISD_UCOMI_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
-// FastEmit functions for X86ISD::XOR.
+// FastEmit functions for X86ISD::UNPCKHPD.
-unsigned FastEmit_X86ISD_XOR_MVT_i8_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i8)
+unsigned FastEmit_X86ISD_UNPCKHPD_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- return FastEmitInst_rr(X86::XOR8rr, X86::GR8RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::UNPCKHPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VUNPCKHPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
}
-unsigned FastEmit_X86ISD_XOR_MVT_i16_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i16)
+unsigned FastEmit_X86ISD_UNPCKHPD_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v2f64: return FastEmit_X86ISD_UNPCKHPD_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::UNPCKHPS.
+
+unsigned FastEmit_X86ISD_UNPCKHPS_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- return FastEmitInst_rr(X86::XOR16rr, X86::GR16RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::UNPCKHPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VUNPCKHPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
}
-unsigned FastEmit_X86ISD_XOR_MVT_i32_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i32)
+unsigned FastEmit_X86ISD_UNPCKHPS_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v4f32: return FastEmit_X86ISD_UNPCKHPS_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::UNPCKLPD.
+
+unsigned FastEmit_X86ISD_UNPCKLPD_MVT_v2f64_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v2f64)
return 0;
- return FastEmitInst_rr(X86::XOR32rr, X86::GR32RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::UNPCKLPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VUNPCKLPDrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
}
-unsigned FastEmit_X86ISD_XOR_MVT_i64_rr(MVT RetVT, unsigned Op0, unsigned Op1) {
- if (RetVT.SimpleTy != MVT::i64)
+unsigned FastEmit_X86ISD_UNPCKLPD_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ switch (VT.SimpleTy) {
+ case MVT::v2f64: return FastEmit_X86ISD_UNPCKLPD_MVT_v2f64_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ default: return 0;
+ }
+}
+
+// FastEmit functions for X86ISD::UNPCKLPS.
+
+unsigned FastEmit_X86ISD_UNPCKLPS_MVT_v4f32_rr(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
+ if (RetVT.SimpleTy != MVT::v4f32)
return 0;
- return FastEmitInst_rr(X86::XOR64rr, X86::GR64RegisterClass, Op0, Op1);
+ return FastEmitInst_rr(X86::UNPCKLPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ if ((Subtarget->hasAVX())) {
+ return FastEmitInst_rr(X86::VUNPCKLPSrr, X86::VR128RegisterClass, Op0, Op0IsKill, Op1, Op1IsKill);
+ }
+ return 0;
}
-unsigned FastEmit_X86ISD_XOR_rr(MVT VT, MVT RetVT, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_X86ISD_UNPCKLPS_rr(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (VT.SimpleTy) {
- case MVT::i8: return FastEmit_X86ISD_XOR_MVT_i8_rr(RetVT, Op0, Op1);
- case MVT::i16: return FastEmit_X86ISD_XOR_MVT_i16_rr(RetVT, Op0, Op1);
- case MVT::i32: return FastEmit_X86ISD_XOR_MVT_i32_rr(RetVT, Op0, Op1);
- case MVT::i64: return FastEmit_X86ISD_XOR_MVT_i64_rr(RetVT, Op0, Op1);
+ case MVT::v4f32: return FastEmit_X86ISD_UNPCKLPS_MVT_v4f32_rr(RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
// Top-level FastEmit function.
-unsigned FastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, unsigned Op1) {
+unsigned FastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, bool Op0IsKill, unsigned Op1, bool Op1IsKill) {
switch (Opcode) {
- case ISD::ADD: return FastEmit_ISD_ADD_rr(VT, RetVT, Op0, Op1);
- case ISD::ADDC: return FastEmit_ISD_ADDC_rr(VT, RetVT, Op0, Op1);
- case ISD::ADDE: return FastEmit_ISD_ADDE_rr(VT, RetVT, Op0, Op1);
- case ISD::AND: return FastEmit_ISD_AND_rr(VT, RetVT, Op0, Op1);
- case ISD::FADD: return FastEmit_ISD_FADD_rr(VT, RetVT, Op0, Op1);
- case ISD::FDIV: return FastEmit_ISD_FDIV_rr(VT, RetVT, Op0, Op1);
- case ISD::FMUL: return FastEmit_ISD_FMUL_rr(VT, RetVT, Op0, Op1);
- case ISD::FSUB: return FastEmit_ISD_FSUB_rr(VT, RetVT, Op0, Op1);
- case ISD::MUL: return FastEmit_ISD_MUL_rr(VT, RetVT, Op0, Op1);
- case ISD::OR: return FastEmit_ISD_OR_rr(VT, RetVT, Op0, Op1);
- case ISD::SUB: return FastEmit_ISD_SUB_rr(VT, RetVT, Op0, Op1);
- case ISD::SUBC: return FastEmit_ISD_SUBC_rr(VT, RetVT, Op0, Op1);
- case ISD::SUBE: return FastEmit_ISD_SUBE_rr(VT, RetVT, Op0, Op1);
- case ISD::XOR: return FastEmit_ISD_XOR_rr(VT, RetVT, Op0, Op1);
- case X86ISD::ADD: return FastEmit_X86ISD_ADD_rr(VT, RetVT, Op0, Op1);
- case X86ISD::AND: return FastEmit_X86ISD_AND_rr(VT, RetVT, Op0, Op1);
- case X86ISD::BT: return FastEmit_X86ISD_BT_rr(VT, RetVT, Op0, Op1);
- case X86ISD::CMP: return FastEmit_X86ISD_CMP_rr(VT, RetVT, Op0, Op1);
- case X86ISD::COMI: return FastEmit_X86ISD_COMI_rr(VT, RetVT, Op0, Op1);
- case X86ISD::FAND: return FastEmit_X86ISD_FAND_rr(VT, RetVT, Op0, Op1);
- case X86ISD::FMAX: return FastEmit_X86ISD_FMAX_rr(VT, RetVT, Op0, Op1);
- case X86ISD::FMIN: return FastEmit_X86ISD_FMIN_rr(VT, RetVT, Op0, Op1);
- case X86ISD::FOR: return FastEmit_X86ISD_FOR_rr(VT, RetVT, Op0, Op1);
- case X86ISD::FXOR: return FastEmit_X86ISD_FXOR_rr(VT, RetVT, Op0, Op1);
- case X86ISD::OR: return FastEmit_X86ISD_OR_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PCMPEQB: return FastEmit_X86ISD_PCMPEQB_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PCMPEQD: return FastEmit_X86ISD_PCMPEQD_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PCMPEQQ: return FastEmit_X86ISD_PCMPEQQ_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PCMPEQW: return FastEmit_X86ISD_PCMPEQW_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PCMPGTB: return FastEmit_X86ISD_PCMPGTB_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PCMPGTD: return FastEmit_X86ISD_PCMPGTD_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PCMPGTQ: return FastEmit_X86ISD_PCMPGTQ_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PCMPGTW: return FastEmit_X86ISD_PCMPGTW_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PSHUFB: return FastEmit_X86ISD_PSHUFB_rr(VT, RetVT, Op0, Op1);
- case X86ISD::PTEST: return FastEmit_X86ISD_PTEST_rr(VT, RetVT, Op0, Op1);
- case X86ISD::SMUL: return FastEmit_X86ISD_SMUL_rr(VT, RetVT, Op0, Op1);
- case X86ISD::SUB: return FastEmit_X86ISD_SUB_rr(VT, RetVT, Op0, Op1);
- case X86ISD::UCOMI: return FastEmit_X86ISD_UCOMI_rr(VT, RetVT, Op0, Op1);
- case X86ISD::XOR: return FastEmit_X86ISD_XOR_rr(VT, RetVT, Op0, Op1);
+ case ISD::ADD: return FastEmit_ISD_ADD_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::ADDC: return FastEmit_ISD_ADDC_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::ADDE: return FastEmit_ISD_ADDE_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::AND: return FastEmit_ISD_AND_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::FADD: return FastEmit_ISD_FADD_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::FDIV: return FastEmit_ISD_FDIV_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::FMUL: return FastEmit_ISD_FMUL_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::FSUB: return FastEmit_ISD_FSUB_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::MUL: return FastEmit_ISD_MUL_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::OR: return FastEmit_ISD_OR_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::ROTL: return FastEmit_ISD_ROTL_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::ROTR: return FastEmit_ISD_ROTR_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::SHL: return FastEmit_ISD_SHL_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::SRA: return FastEmit_ISD_SRA_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::SRL: return FastEmit_ISD_SRL_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::SUB: return FastEmit_ISD_SUB_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::SUBC: return FastEmit_ISD_SUBC_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::SUBE: return FastEmit_ISD_SUBE_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case ISD::XOR: return FastEmit_ISD_XOR_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::BT: return FastEmit_X86ISD_BT_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::CMP: return FastEmit_X86ISD_CMP_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::COMI: return FastEmit_X86ISD_COMI_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::FAND: return FastEmit_X86ISD_FAND_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::FMAX: return FastEmit_X86ISD_FMAX_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::FMIN: return FastEmit_X86ISD_FMIN_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::FOR: return FastEmit_X86ISD_FOR_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::FXOR: return FastEmit_X86ISD_FXOR_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::MOVHLPS: return FastEmit_X86ISD_MOVHLPS_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::MOVLHPS: return FastEmit_X86ISD_MOVLHPS_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PCMPEQB: return FastEmit_X86ISD_PCMPEQB_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PCMPEQD: return FastEmit_X86ISD_PCMPEQD_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PCMPEQQ: return FastEmit_X86ISD_PCMPEQQ_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PCMPEQW: return FastEmit_X86ISD_PCMPEQW_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PCMPGTB: return FastEmit_X86ISD_PCMPGTB_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PCMPGTD: return FastEmit_X86ISD_PCMPGTD_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PCMPGTQ: return FastEmit_X86ISD_PCMPGTQ_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PCMPGTW: return FastEmit_X86ISD_PCMPGTW_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PSHUFB: return FastEmit_X86ISD_PSHUFB_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PTEST: return FastEmit_X86ISD_PTEST_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PUNPCKHBW: return FastEmit_X86ISD_PUNPCKHBW_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PUNPCKHDQ: return FastEmit_X86ISD_PUNPCKHDQ_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PUNPCKHQDQ: return FastEmit_X86ISD_PUNPCKHQDQ_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PUNPCKHWD: return FastEmit_X86ISD_PUNPCKHWD_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PUNPCKLBW: return FastEmit_X86ISD_PUNPCKLBW_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PUNPCKLDQ: return FastEmit_X86ISD_PUNPCKLDQ_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PUNPCKLQDQ: return FastEmit_X86ISD_PUNPCKLQDQ_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::PUNPCKLWD: return FastEmit_X86ISD_PUNPCKLWD_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::TESTP: return FastEmit_X86ISD_TESTP_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::UCOMI: return FastEmit_X86ISD_UCOMI_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::UNPCKHPD: return FastEmit_X86ISD_UNPCKHPD_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::UNPCKHPS: return FastEmit_X86ISD_UNPCKHPS_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::UNPCKLPD: return FastEmit_X86ISD_UNPCKLPD_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
+ case X86ISD::UNPCKLPS: return FastEmit_X86ISD_UNPCKLPS_rr(VT, RetVT, Op0, Op0IsKill, Op1, Op1IsKill);
default: return 0;
}
}
diff --git a/libclamav/c++/X86GenInstrInfo.inc b/libclamav/c++/X86GenInstrInfo.inc
index 33e0003..1d842bf 100644
--- a/libclamav/c++/X86GenInstrInfo.inc
+++ b/libclamav/c++/X86GenInstrInfo.inc
@@ -15,12 +15,12 @@ static const unsigned ImplicitList3[] = { X86::ESP, X86::EFLAGS, 0 };
static const unsigned ImplicitList4[] = { X86::RSP, 0 };
static const unsigned ImplicitList5[] = { X86::RSP, X86::EFLAGS, 0 };
static const unsigned ImplicitList6[] = { X86::EAX, X86::EBX, X86::ECX, X86::EDX, 0 };
-static const TargetRegisterClass* Barriers2[] = { &X86::CCRRegClass, &X86::GR32_ABCDRegClass, &X86::GR32_ADRegClass, NULL };
+static const TargetRegisterClass* Barriers2[] = { &X86::CCRRegClass, &X86::GR32_ABCDRegClass, &X86::GR32_ADRegClass, &X86::GR32_TCRegClass, NULL };
static const unsigned ImplicitList7[] = { X86::EFLAGS, X86::EAX, X86::EBX, X86::ECX, X86::EDX, 0 };
static const unsigned ImplicitList8[] = { X86::XMM0, 0 };
-static const TargetRegisterClass* Barriers3[] = { &X86::CCRRegClass, &X86::FR32RegClass, &X86::FR64RegClass, &X86::GR32_ADRegClass, &X86::RFP32RegClass, &X86::RFP64RegClass, &X86::RFP80RegClass, &X86::VR128RegClass, &X86::VR64RegClass, NULL };
+static const TargetRegisterClass* Barriers3[] = { &X86::CCRRegClass, &X86::FR32RegClass, &X86::FR64RegClass, &X86::GR32_ADRegClass, &X86::GR32_TCRegClass, &X86::RFP32RegClass, &X86::RFP64RegClass, &X86::RFP80RegClass, &X86::VR128RegClass, &X86::VR64RegClass, NULL };
static const unsigned ImplicitList9[] = { X86::EAX, X86::ECX, X86::EDX, X86::FP0, X86::FP1, X86::FP2, X86::FP3, X86::FP4, X86::FP5, X86::FP6, X86::ST0, X86::MM0, X86::MM1, X86::MM2, X86::MM3, X86::MM4, X86::MM5, X86::MM6, X86::MM7, X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, X86::XMM14, X86::XMM15, X86::EFLAGS, 0 };
-static const TargetRegisterClass* Barriers4[] = { &X86::CCRRegClass, &X86::FR32RegClass, &X86::FR64RegClass, &X86::RFP32RegClass, &X86::RFP64RegClass, &X86::RFP80RegClass, &X86::VR128RegClass, &X86::VR64RegClass, NULL };
+static const TargetRegisterClass* Barriers4[] = { &X86::CCRRegClass, &X86::FR32RegClass, &X86::FR64RegClass, &X86::GR64_TCRegClass, &X86::RFP32RegClass, &X86::RFP64RegClass, &X86::RFP80RegClass, &X86::VR128RegClass, &X86::VR64RegClass, NULL };
static const unsigned ImplicitList10[] = { X86::RAX, X86::RCX, X86::RDX, X86::RSI, X86::RDI, X86::R8, X86::R9, X86::R10, X86::R11, X86::FP0, X86::FP1, X86::FP2, X86::FP3, X86::FP4, X86::FP5, X86::FP6, X86::ST0, X86::ST1, X86::MM0, X86::MM1, X86::MM2, X86::MM3, X86::MM4, X86::MM5, X86::MM6, X86::MM7, X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9, X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13, X86::XMM14, X86::XMM15, X86::EFLAGS, 0 };
static const unsigned ImplicitList11[] = { X86::AL, 0 };
static const unsigned ImplicitList12[] = { X86::AX, 0 };
@@ -36,21 +36,20 @@ static const unsigned ImplicitList19[] = { X86::RAX, X86::RDX, 0 };
static const unsigned ImplicitList20[] = { X86::AX, X86::DX, 0 };
static const unsigned ImplicitList21[] = { X86::AX, X86::DX, X86::EFLAGS, 0 };
static const unsigned ImplicitList22[] = { X86::AL, X86::EFLAGS, X86::AX, 0 };
-static const TargetRegisterClass* Barriers7[] = { &X86::RFP32RegClass, &X86::RFP64RegClass, &X86::RFP80RegClass, NULL };
-static const unsigned ImplicitList23[] = { X86::FP0, X86::FP1, X86::FP2, X86::FP3, X86::FP4, X86::FP5, X86::FP6, 0 };
-static const unsigned ImplicitList24[] = { X86::ST0, 0 };
-static const unsigned ImplicitList25[] = { X86::ST1, 0 };
-static const unsigned ImplicitList26[] = { X86::DX, 0 };
-static const unsigned ImplicitList27[] = { X86::ECX, 0 };
-static const unsigned ImplicitList28[] = { X86::AH, 0 };
-static const unsigned ImplicitList29[] = { X86::AX, X86::EFLAGS, 0 };
-static const unsigned ImplicitList30[] = { X86::EAX, X86::EFLAGS, 0 };
-static const unsigned ImplicitList31[] = { X86::RAX, X86::EFLAGS, 0 };
-static const unsigned ImplicitList32[] = { X86::AL, X86::EFLAGS, 0 };
-static const unsigned ImplicitList33[] = { X86::EBP, X86::ESP, 0 };
-static const unsigned ImplicitList34[] = { X86::RBP, X86::RSP, 0 };
-static const unsigned ImplicitList35[] = { X86::EDI, 0 };
-static const unsigned ImplicitList36[] = { X86::RDI, 0 };
+static const unsigned ImplicitList23[] = { X86::ST0, 0 };
+static const unsigned ImplicitList24[] = { X86::ST1, 0 };
+static const unsigned ImplicitList25[] = { X86::DX, 0 };
+static const unsigned ImplicitList26[] = { X86::ECX, 0 };
+static const unsigned ImplicitList27[] = { X86::AH, 0 };
+static const unsigned ImplicitList28[] = { X86::AX, X86::EFLAGS, 0 };
+static const unsigned ImplicitList29[] = { X86::EAX, X86::EFLAGS, 0 };
+static const unsigned ImplicitList30[] = { X86::RAX, X86::EFLAGS, 0 };
+static const unsigned ImplicitList31[] = { X86::AL, X86::EFLAGS, 0 };
+static const unsigned ImplicitList32[] = { X86::EBP, X86::ESP, 0 };
+static const unsigned ImplicitList33[] = { X86::RBP, X86::RSP, 0 };
+static const unsigned ImplicitList34[] = { X86::EDI, 0 };
+static const unsigned ImplicitList35[] = { X86::RDI, 0 };
+static const unsigned ImplicitList36[] = { X86::EAX, X86::ESP, X86::EFLAGS, 0 };
static const unsigned ImplicitList37[] = { X86::EDI, X86::ESI, X86::EFLAGS, 0 };
static const unsigned ImplicitList38[] = { X86::EDI, X86::ESI, 0 };
static const unsigned ImplicitList39[] = { X86::DX, X86::AX, 0 };
@@ -58,2747 +57,3937 @@ static const unsigned ImplicitList40[] = { X86::DX, X86::EAX, 0 };
static const unsigned ImplicitList41[] = { X86::DX, X86::AL, 0 };
static const unsigned ImplicitList42[] = { X86::ECX, X86::EFLAGS, 0 };
static const unsigned ImplicitList43[] = { X86::XMM0, X86::EFLAGS, 0 };
-static const unsigned ImplicitList44[] = { X86::CL, 0 };
-static const unsigned ImplicitList45[] = { X86::RAX, X86::RCX, X86::RDX, 0 };
-static const unsigned ImplicitList46[] = { X86::ECX, X86::EDI, X86::ESI, 0 };
-static const unsigned ImplicitList47[] = { X86::RCX, X86::RDI, X86::RSI, 0 };
-static const unsigned ImplicitList48[] = { X86::AL, X86::ECX, X86::EDI, 0 };
-static const unsigned ImplicitList49[] = { X86::ECX, X86::EDI, 0 };
-static const unsigned ImplicitList50[] = { X86::EAX, X86::ECX, X86::EDI, 0 };
-static const unsigned ImplicitList51[] = { X86::RAX, X86::RCX, X86::RDI, 0 };
-static const unsigned ImplicitList52[] = { X86::RCX, X86::RDI, 0 };
-static const unsigned ImplicitList53[] = { X86::AX, X86::ECX, X86::EDI, 0 };
-static const unsigned ImplicitList54[] = { X86::AL, X86::EDI, X86::EFLAGS, 0 };
-static const unsigned ImplicitList55[] = { X86::EAX, X86::EDI, X86::EFLAGS, 0 };
-static const unsigned ImplicitList56[] = { X86::AX, X86::EDI, X86::EFLAGS, 0 };
+static const TargetRegisterClass* Barriers7[] = { &X86::GR32_ABCDRegClass, &X86::GR32_ADRegClass, &X86::GR32_NOREXRegClass, &X86::GR32_TCRegClass, NULL };
+static const unsigned ImplicitList44[] = { X86::EDI, X86::ESI, X86::EBP, X86::EBX, X86::EDX, X86::ECX, X86::EAX, X86::ESP, 0 };
+static const unsigned ImplicitList45[] = { X86::CL, 0 };
+static const unsigned ImplicitList46[] = { X86::RAX, X86::RCX, X86::RDX, 0 };
+static const unsigned ImplicitList47[] = { X86::ECX, X86::EDI, X86::ESI, 0 };
+static const unsigned ImplicitList48[] = { X86::RCX, X86::RDI, X86::RSI, 0 };
+static const unsigned ImplicitList49[] = { X86::AL, X86::ECX, X86::EDI, 0 };
+static const unsigned ImplicitList50[] = { X86::ECX, X86::EDI, 0 };
+static const unsigned ImplicitList51[] = { X86::EAX, X86::ECX, X86::EDI, 0 };
+static const unsigned ImplicitList52[] = { X86::RAX, X86::RCX, X86::RDI, 0 };
+static const unsigned ImplicitList53[] = { X86::RCX, X86::RDI, 0 };
+static const unsigned ImplicitList54[] = { X86::AX, X86::ECX, X86::EDI, 0 };
+static const unsigned ImplicitList55[] = { X86::AL, X86::EDI, X86::EFLAGS, 0 };
+static const unsigned ImplicitList56[] = { X86::EAX, X86::EDI, X86::EFLAGS, 0 };
+static const unsigned ImplicitList57[] = { X86::RAX, X86::RCX, X86::RDI, X86::EFLAGS, 0 };
+static const unsigned ImplicitList58[] = { X86::AX, X86::EDI, X86::EFLAGS, 0 };
+static const unsigned ImplicitList59[] = { X86::EAX, X86::ECX, 0 };
static const TargetRegisterClass* Barriers8[] = { &X86::CCRRegClass, &X86::RFP32RegClass, &X86::RFP64RegClass, &X86::RFP80RegClass, &X86::VR64RegClass, NULL };
-static const unsigned ImplicitList57[] = { X86::RAX, X86::RCX, X86::RDX, X86::R8, X86::R9, X86::R10, X86::R11, X86::FP0, X86::FP1, X86::FP2, X86::FP3, X86::FP4, X86::FP5, X86::FP6, X86::ST0, X86::ST1, X86::MM0, X86::MM1, X86::MM2, X86::MM3, X86::MM4, X86::MM5, X86::MM6, X86::MM7, X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::EFLAGS, 0 };
+static const unsigned ImplicitList60[] = { X86::RAX, X86::RCX, X86::RDX, X86::R8, X86::R9, X86::R10, X86::R11, X86::FP0, X86::FP1, X86::FP2, X86::FP3, X86::FP4, X86::FP5, X86::FP6, X86::ST0, X86::ST1, X86::MM0, X86::MM1, X86::MM2, X86::MM3, X86::MM4, X86::MM5, X86::MM6, X86::MM7, X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::EFLAGS, 0 };
-static const TargetOperandInfo OperandInfo2[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo3[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo4[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo5[] = { { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo6[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo7[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo8[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo9[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo10[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR16RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo11[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo12[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo13[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo14[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo15[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo16[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo17[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo18[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo19[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo20[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo21[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo22[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo23[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR8RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo24[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo25[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR128RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo26[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo27[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo28[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo29[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo30[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo31[] = { { X86::RSTRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo32[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo33[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo34[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo35[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo36[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo37[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo38[] = { { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo39[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, ((1 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo40[] = { { X86::GR16RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo41[] = { { X86::GR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo42[] = { { X86::GR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo43[] = { { X86::GR8RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo44[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo45[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR128RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo46[] = { { X86::GR16RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo47[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo48[] = { { X86::GR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo49[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo50[] = { { X86::GR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo51[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo52[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo53[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo54[] = { { X86::GR16RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo55[] = { { X86::GR32RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo56[] = { { X86::GR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo57[] = { { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo58[] = { { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo59[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::RFP32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo60[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::RFP64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo61[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::RFP80RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo62[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo63[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo64[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo65[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo66[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo67[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo68[] = { { X86::GR8RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo69[] = { { X86::GR8RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo70[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo71[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo72[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo73[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR32RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo74[] = { { X86::VR128RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo75[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo76[] = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo77[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR16RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo78[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR8RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo79[] = { { X86::GR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo80[] = { { X86::FR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo81[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo82[] = { { X86::FR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo83[] = { { X86::FR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo84[] = { { X86::FR64RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo85[] = { { X86::FR32RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo86[] = { { X86::FR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo87[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo88[] = { { X86::GR64RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo89[] = { { X86::GR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo90[] = { { X86::GR32RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo91[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo92[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
-static const TargetOperandInfo OperandInfo93[] = { { X86::GR16RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo94[] = { { X86::GR8RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo95[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo96[] = { { X86::GR32RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo97[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo98[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo99[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo100[] = { { X86::RFP32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo101[] = { { X86::RFP64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo102[] = { { X86::RFP80RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo103[] = { { X86::FR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo104[] = { { X86::FR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo105[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo106[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo107[] = { { X86::RFP32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo108[] = { { X86::RFP64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo109[] = { { X86::RFP80RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo110[] = { { X86::GR16RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo111[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo112[] = { { X86::GR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo113[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo114[] = { { X86::GR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo115[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo116[] = { { 0, 0, 0 }, { 0, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo117[] = { { X86::VR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo118[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo119[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo120[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo121[] = { { X86::GR64RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo122[] = { { X86::GR32RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo123[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo124[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo125[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo126[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { 0, 0, 0 }, { X86::GR32_NOSPRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo127[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { 0, 0, 0 }, { X86::GR32_NOSPRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo128[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, { 0, 0, 0 }, { X86::GR64_NOSPRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo129[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo130[] = { { X86::GR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo131[] = { { X86::GR32RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo132[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo133[] = { { X86::VR64RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo134[] = { { X86::VR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo135[] = { { X86::FR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo136[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo137[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo138[] = { { X86::GR32RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo139[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo140[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo141[] = { { X86::VR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo142[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo143[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo144[] = { { X86::VR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo145[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::SEGMENT_REGRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo146[] = { { X86::GR16RegClassID, 0, 0 }, { X86::SEGMENT_REGRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo147[] = { { X86::SEGMENT_REGRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo148[] = { { X86::SEGMENT_REGRegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo149[] = { { X86::CONTROL_REG_32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo150[] = { { X86::DEBUG_REGRegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo151[] = { { X86::GR32RegClassID, 0, 0 }, { X86::CONTROL_REG_32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo152[] = { { X86::GR32RegClassID, 0, 0 }, { X86::DEBUG_REGRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo153[] = { { X86::CONTROL_REG_64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo154[] = { { X86::DEBUG_REGRegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo155[] = { { X86::GR64RegClassID, 0, 0 }, { X86::CONTROL_REG_64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo156[] = { { X86::GR64RegClassID, 0, 0 }, { X86::DEBUG_REGRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo157[] = { { X86::GR64RegClassID, 0, 0 }, { X86::SEGMENT_REGRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo158[] = { { X86::SEGMENT_REGRegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo159[] = { { X86::VR128RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo160[] = { { X86::GR64_NOREXRegClassID, 0, 0 }, { 0, 0, 0 }, { X86::GR64_NOREX_NOSPRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR8_NOREXRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo161[] = { { X86::GR8_NOREXRegClassID, 0, 0 }, { X86::GR64_NOREXRegClassID, 0, 0 }, { 0, 0, 0 }, { X86::GR64_NOREX_NOSPRegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo162[] = { { X86::GR8_NOREXRegClassID, 0, 0 }, { X86::GR8_NOREXRegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo163[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo164[] = { { X86::VR128RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo165[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo166[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo167[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo168[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo169[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo170[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo171[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo172[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo173[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo174[] = { { X86::GR32_NOREXRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo175[] = { { X86::GR32_NOREXRegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo176[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo177[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo178[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo179[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo180[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo181[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
-static const TargetOperandInfo OperandInfo182[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo183[] = { { X86::VR128RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo184[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo185[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo186[] = { { X86::GR64RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo187[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo188[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo189[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo190[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo191[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR16RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo192[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo193[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo194[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo195[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo196[] = { { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo197[] = { { X86::GR32RegClassID, 0, 0 }, { 0, 0, 0 }, { X86::GR32_NOSPRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo198[] = { { X86::GR64RegClassID, 0, 0 }, { 0, 0, 0 }, { X86::GR64_NOSPRegClassID, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo199[] = { { X86::GR8RegClassID, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, };
-static const TargetOperandInfo OperandInfo200[] = { { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo2[] = { { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo3[] = { { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo4[] = { { -1, 0, 0 }, { -1, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo5[] = { { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo6[] = { { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo7[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo8[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo9[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo10[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo11[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo12[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo13[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo14[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR16RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo15[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo16[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo17[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo18[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo19[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo20[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo21[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo22[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo23[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo24[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo25[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo26[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo27[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo28[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo29[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo30[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo31[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo32[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo33[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo34[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo35[] = { { X86::RSTRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo36[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo37[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo38[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo39[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo40[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo41[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo42[] = { { X86::VR128RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo43[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo44[] = { { X86::VR128RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo45[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo46[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, ((1 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo47[] = { { X86::GR16RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo48[] = { { X86::GR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo49[] = { { X86::GR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo50[] = { { X86::GR8RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo51[] = { { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo52[] = { { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo53[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo54[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR128RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo55[] = { { X86::GR16RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo56[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo57[] = { { X86::GR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo58[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo59[] = { { X86::GR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo60[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo61[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo62[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo63[] = { { X86::GR16RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo64[] = { { X86::GR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo65[] = { { X86::GR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo66[] = { { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo67[] = { { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo68[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::RFP32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo69[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::RFP64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo70[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::RFP80RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo71[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo72[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo73[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo74[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo75[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo76[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo77[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo78[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo79[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo80[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo81[] = { { X86::GR8RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo82[] = { { X86::GR8RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo83[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo84[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo85[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo86[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo87[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo88[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR16RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo89[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo90[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo91[] = { { X86::GR64RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo92[] = { { X86::FR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo93[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo94[] = { { X86::FR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo95[] = { { X86::FR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo96[] = { { X86::FR64RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo97[] = { { X86::FR32RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo98[] = { { X86::FR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo99[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo100[] = { { X86::GR64RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo101[] = { { X86::GR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo102[] = { { X86::GR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo103[] = { { X86::GR32RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo104[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo105[] = { { X86::GR8RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, };
+static const TargetOperandInfo OperandInfo106[] = { { X86::GR16RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo107[] = { { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo108[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo109[] = { { X86::GR32RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo110[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo111[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo112[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo113[] = { { X86::RFP32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo114[] = { { X86::RFP64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo115[] = { { X86::RFP80RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo116[] = { { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo117[] = { { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo118[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo119[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo120[] = { { X86::RFP32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo121[] = { { X86::RFP64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo122[] = { { X86::RFP80RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo123[] = { { X86::GR16RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo124[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo125[] = { { X86::GR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo126[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo127[] = { { X86::GR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo128[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo129[] = { { X86::VR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo130[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo131[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo132[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo133[] = { { X86::GR32RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo134[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo135[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo136[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo137[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo138[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo139[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo140[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo141[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo142[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR32_NOSPRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo143[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo144[] = { { X86::GR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo145[] = { { X86::GR32RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo146[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo147[] = { { X86::VR64RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo148[] = { { X86::VR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo149[] = { { X86::VR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo150[] = { { X86::FR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo151[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo152[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo153[] = { { X86::GR32RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo154[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo155[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo156[] = { { X86::VR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo157[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo158[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo159[] = { { X86::VR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo160[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::SEGMENT_REGRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo161[] = { { X86::GR16RegClassID, 0, 0 }, { X86::SEGMENT_REGRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo162[] = { { X86::SEGMENT_REGRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo163[] = { { X86::SEGMENT_REGRegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo164[] = { { X86::CONTROL_REGRegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo165[] = { { X86::DEBUG_REGRegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo166[] = { { X86::GR32_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR32_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR32_TCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo167[] = { { X86::GR32RegClassID, 0, 0 }, { X86::CONTROL_REGRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo168[] = { { X86::GR32RegClassID, 0, 0 }, { X86::DEBUG_REGRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo169[] = { { X86::GR32_TCRegClassID, 0, 0 }, { X86::GR32_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR32_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo170[] = { { X86::GR32_TCRegClassID, 0, 0 }, { X86::GR32_TCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo171[] = { { X86::GR32RegClassID, 0, 0 }, { X86::SEGMENT_REGRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo172[] = { { X86::SEGMENT_REGRegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo173[] = { { X86::CONTROL_REGRegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo174[] = { { X86::DEBUG_REGRegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo175[] = { { X86::GR64_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR64_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR64_TCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo176[] = { { X86::GR64RegClassID, 0, 0 }, { X86::CONTROL_REGRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo177[] = { { X86::GR64RegClassID, 0, 0 }, { X86::DEBUG_REGRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo178[] = { { X86::GR64_TCRegClassID, 0, 0 }, { X86::GR64_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR64_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo179[] = { { X86::GR64_TCRegClassID, 0, 0 }, { X86::GR64_TCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo180[] = { { X86::GR64RegClassID, 0, 0 }, { X86::SEGMENT_REGRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo181[] = { { X86::SEGMENT_REGRegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo182[] = { { X86::VR128RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo183[] = { { X86::GR64_NOREXRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR64_NOREX_NOSPRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR8_NOREXRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo184[] = { { X86::GR8_NOREXRegClassID, 0, 0 }, { X86::GR64_NOREXRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR64_NOREX_NOSPRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo185[] = { { X86::GR8_NOREXRegClassID, 0, 0 }, { X86::GR8_NOREXRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo186[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo187[] = { { X86::VR128RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo188[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo189[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo190[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo191[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo192[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo193[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo194[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo195[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo196[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo197[] = { { X86::GR32_NOREXRegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo198[] = { { X86::GR32_NOREXRegClassID, 0, 0 }, { X86::GR8RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo199[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo200[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo201[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo202[] = { { X86::RFP80RegClassID, 0, 0 }, { X86::RFP64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo203[] = { { X86::RFP32RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo204[] = { { X86::RFP64RegClassID, 0, 0 }, { X86::RFP80RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo205[] = { { X86::VR64RegClassID, 0, 0 }, { X86::VR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::VR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo206[] = { { X86::GR64RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo207[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo208[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo209[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo210[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR16RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo211[] = { { X86::GR16RegClassID, 0, 0 }, { X86::GR16RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR16RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo212[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo213[] = { { X86::GR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo214[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo215[] = { { X86::GR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, ((0 << 16) | (1 << TOI::TIED_TO)) }, { X86::GR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo216[] = { { X86::GR32_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR32_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo217[] = { { X86::GR64_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR64_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo218[] = { { X86::GR32_TCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo219[] = { { X86::GR64_TCRegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo220[] = { { X86::GR32_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR32_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo221[] = { { X86::GR64_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { X86::GR64_TCRegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo222[] = { { X86::GR32_TCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo223[] = { { X86::GR64_TCRegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo224[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo225[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo226[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo227[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo228[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo229[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo230[] = { { X86::GR8RegClassID, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo231[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo232[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo233[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo234[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo235[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo236[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo237[] = { { X86::VR256RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo238[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo239[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo240[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo241[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo242[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo243[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo244[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo245[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo246[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo247[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo248[] = { { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo249[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo250[] = { { X86::FR64RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo251[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo252[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo253[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo254[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo255[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo256[] = { { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo257[] = { { X86::GR64RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo258[] = { { X86::GR32RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo259[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::FR64RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo260[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::FR32RegClassID, 0, 0 }, };
+static const TargetOperandInfo OperandInfo261[] = { { X86::VR256RegClassID, 0, 0 }, { 0, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { 1, 0|(1<<TOI::LookupPtrRegClass), 0 }, { -1, 0, 0 }, { -1, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo262[] = { { X86::VR256RegClassID, 0, 0 }, { X86::VR256RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo263[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::GR32RegClassID, 0, 0 }, { -1, 0, 0 }, };
+static const TargetOperandInfo OperandInfo264[] = { { X86::VR128RegClassID, 0, 0 }, { X86::VR128RegClassID, 0, 0 }, { X86::GR64RegClassID, 0, 0 }, { -1, 0, 0 }, };
static const TargetInstrDesc X86Insts[] = {
- { 0, 0, 0, 0, "PHI", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, 0 }, // Inst #0 = PHI
- { 1, 0, 0, 0, "INLINEASM", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, 0 }, // Inst #1 = INLINEASM
- { 2, 1, 0, 0, "DBG_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo5 }, // Inst #2 = DBG_LABEL
- { 3, 1, 0, 0, "EH_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo5 }, // Inst #3 = EH_LABEL
- { 4, 1, 0, 0, "GC_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo5 }, // Inst #4 = GC_LABEL
- { 5, 0, 0, 0, "KILL", 0|(1<<TID::Variadic), 0, NULL, NULL, NULL, 0 }, // Inst #5 = KILL
- { 6, 3, 1, 0, "EXTRACT_SUBREG", 0, 0, NULL, NULL, NULL, OperandInfo76 }, // Inst #6 = EXTRACT_SUBREG
- { 7, 4, 1, 0, "INSERT_SUBREG", 0, 0, NULL, NULL, NULL, OperandInfo116 }, // Inst #7 = INSERT_SUBREG
- { 8, 1, 1, 0, "IMPLICIT_DEF", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0, NULL, NULL, NULL, OperandInfo5 }, // Inst #8 = IMPLICIT_DEF
- { 9, 4, 1, 0, "SUBREG_TO_REG", 0, 0, NULL, NULL, NULL, OperandInfo196 }, // Inst #9 = SUBREG_TO_REG
- { 10, 3, 1, 0, "COPY_TO_REGCLASS", 0|(1<<TID::CheapAsAMove), 0, NULL, NULL, NULL, OperandInfo76 }, // Inst #10 = COPY_TO_REGCLASS
- { 11, 0, 0, 0, "DBG_VALUE", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::CheapAsAMove), 0, NULL, NULL, NULL, 0 }, // Inst #11 = DBG_VALUE
- { 12, 0, 0, 0, "ABS_F", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(225<<24), NULL, NULL, NULL, 0 }, // Inst #12 = ABS_F
- { 13, 2, 1, 0, "ABS_Fp32", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo2 }, // Inst #13 = ABS_Fp32
- { 14, 2, 1, 0, "ABS_Fp64", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo3 }, // Inst #14 = ABS_Fp64
- { 15, 2, 1, 0, "ABS_Fp80", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo4 }, // Inst #15 = ABS_Fp80
- { 16, 1, 0, 0, "ADC16i16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(21<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #16 = ADC16i16
- { 17, 6, 0, 0, "ADC16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(1<<6)|(3<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #17 = ADC16mi
- { 18, 6, 0, 0, "ADC16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(1<<6)|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #18 = ADC16mi8
- { 19, 6, 0, 0, "ADC16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(17<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #19 = ADC16mr
- { 20, 3, 1, 0, "ADC16ri", 0, 0|18|(1<<6)|(3<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #20 = ADC16ri
- { 21, 3, 1, 0, "ADC16ri8", 0, 0|18|(1<<6)|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #21 = ADC16ri8
- { 22, 7, 1, 0, "ADC16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(19<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #22 = ADC16rm
- { 23, 3, 1, 0, "ADC16rr", 0|(1<<TID::Commutable), 0|3|(1<<6)|(17<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #23 = ADC16rr
- { 24, 3, 1, 0, "ADC16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(19<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #24 = ADC16rr_REV
- { 25, 1, 0, 0, "ADC32i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(21<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #25 = ADC32i32
- { 26, 6, 0, 0, "ADC32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(4<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #26 = ADC32mi
- { 27, 6, 0, 0, "ADC32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #27 = ADC32mi8
- { 28, 6, 0, 0, "ADC32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(17<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #28 = ADC32mr
- { 29, 3, 1, 0, "ADC32ri", 0, 0|18|(4<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #29 = ADC32ri
- { 30, 3, 1, 0, "ADC32ri8", 0, 0|18|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #30 = ADC32ri8
- { 31, 7, 1, 0, "ADC32rm", 0|(1<<TID::MayLoad), 0|6|(19<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #31 = ADC32rm
- { 32, 3, 1, 0, "ADC32rr", 0|(1<<TID::Commutable), 0|3|(17<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #32 = ADC32rr
- { 33, 3, 1, 0, "ADC32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(19<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #33 = ADC32rr_REV
- { 34, 1, 0, 0, "ADC64i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(21<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #34 = ADC64i32
- { 35, 6, 0, 0, "ADC64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(1<<12)|(4<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #35 = ADC64mi32
- { 36, 6, 0, 0, "ADC64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(1<<12)|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #36 = ADC64mi8
- { 37, 6, 0, 0, "ADC64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<12)|(17<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #37 = ADC64mr
- { 38, 3, 1, 0, "ADC64ri32", 0, 0|18|(1<<12)|(4<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #38 = ADC64ri32
- { 39, 3, 1, 0, "ADC64ri8", 0, 0|18|(1<<12)|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #39 = ADC64ri8
- { 40, 7, 1, 0, "ADC64rm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(19<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #40 = ADC64rm
- { 41, 3, 1, 0, "ADC64rr", 0|(1<<TID::Commutable), 0|3|(1<<12)|(17<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #41 = ADC64rr
- { 42, 3, 1, 0, "ADC64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(19<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #42 = ADC64rr_REV
- { 43, 1, 0, 0, "ADC8i8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(20<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #43 = ADC8i8
- { 44, 6, 0, 0, "ADC8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(1<<13)|(128<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #44 = ADC8mi
- { 45, 6, 0, 0, "ADC8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(16<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #45 = ADC8mr
- { 46, 3, 1, 0, "ADC8ri", 0, 0|18|(1<<13)|(128<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #46 = ADC8ri
- { 47, 7, 1, 0, "ADC8rm", 0|(1<<TID::MayLoad), 0|6|(18<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #47 = ADC8rm
- { 48, 3, 1, 0, "ADC8rr", 0|(1<<TID::Commutable), 0|3|(16<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #48 = ADC8rr
- { 49, 3, 1, 0, "ADC8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(18<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #49 = ADC8rr_REV
- { 50, 1, 0, 0, "ADD16i16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(5<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #50 = ADD16i16
- { 51, 6, 0, 0, "ADD16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #51 = ADD16mi
- { 52, 6, 0, 0, "ADD16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #52 = ADD16mi8
- { 53, 6, 0, 0, "ADD16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(1<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #53 = ADD16mr
- { 54, 3, 1, 0, "ADD16mrmrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(3<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #54 = ADD16mrmrr
- { 55, 3, 1, 0, "ADD16ri", 0|(1<<TID::ConvertibleTo3Addr), 0|16|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #55 = ADD16ri
- { 56, 3, 1, 0, "ADD16ri8", 0|(1<<TID::ConvertibleTo3Addr), 0|16|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #56 = ADD16ri8
- { 57, 7, 1, 0, "ADD16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(3<<24), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #57 = ADD16rm
- { 58, 3, 1, 0, "ADD16rr", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::Commutable), 0|3|(1<<6)|(1<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #58 = ADD16rr
- { 59, 1, 0, 0, "ADD32i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(5<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #59 = ADD32i32
- { 60, 6, 0, 0, "ADD32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #60 = ADD32mi
- { 61, 6, 0, 0, "ADD32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #61 = ADD32mi8
- { 62, 6, 0, 0, "ADD32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #62 = ADD32mr
- { 63, 3, 1, 0, "ADD32mrmrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(3<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #63 = ADD32mrmrr
- { 64, 3, 1, 0, "ADD32ri", 0|(1<<TID::ConvertibleTo3Addr), 0|16|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #64 = ADD32ri
- { 65, 3, 1, 0, "ADD32ri8", 0|(1<<TID::ConvertibleTo3Addr), 0|16|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #65 = ADD32ri8
- { 66, 7, 1, 0, "ADD32rm", 0|(1<<TID::MayLoad), 0|6|(3<<24), NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #66 = ADD32rm
- { 67, 3, 1, 0, "ADD32rr", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::Commutable), 0|3|(1<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #67 = ADD32rr
- { 68, 1, 0, 0, "ADD64i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(5<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #68 = ADD64i32
- { 69, 6, 0, 0, "ADD64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #69 = ADD64mi32
- { 70, 6, 0, 0, "ADD64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #70 = ADD64mi8
- { 71, 6, 0, 0, "ADD64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<12)|(1<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #71 = ADD64mr
- { 72, 3, 1, 0, "ADD64mrmrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(3<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #72 = ADD64mrmrr
- { 73, 3, 1, 0, "ADD64ri32", 0|(1<<TID::ConvertibleTo3Addr), 0|16|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #73 = ADD64ri32
- { 74, 3, 1, 0, "ADD64ri8", 0|(1<<TID::ConvertibleTo3Addr), 0|16|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #74 = ADD64ri8
- { 75, 7, 1, 0, "ADD64rm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(3<<24), NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #75 = ADD64rm
- { 76, 3, 1, 0, "ADD64rr", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::Commutable), 0|3|(1<<12)|(1<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #76 = ADD64rr
- { 77, 1, 0, 0, "ADD8i8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(4<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #77 = ADD8i8
- { 78, 6, 0, 0, "ADD8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #78 = ADD8mi
- { 79, 6, 0, 0, "ADD8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #79 = ADD8mr
- { 80, 3, 1, 0, "ADD8mrmrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(2<<24), NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #80 = ADD8mrmrr
- { 81, 3, 1, 0, "ADD8ri", 0, 0|16|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #81 = ADD8ri
- { 82, 7, 1, 0, "ADD8rm", 0|(1<<TID::MayLoad), 0|6|(2<<24), NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #82 = ADD8rm
- { 83, 3, 1, 0, "ADD8rr", 0|(1<<TID::Commutable), 0|3, NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #83 = ADD8rr
- { 84, 7, 1, 0, "ADDPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(88<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #84 = ADDPDrm
- { 85, 3, 1, 0, "ADDPDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(88<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #85 = ADDPDrr
- { 86, 7, 1, 0, "ADDPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(88<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #86 = ADDPSrm
- { 87, 3, 1, 0, "ADDPSrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(88<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #87 = ADDPSrr
- { 88, 7, 1, 0, "ADDSDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(88<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #88 = ADDSDrm
- { 89, 7, 1, 0, "ADDSDrm_Int", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(88<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #89 = ADDSDrm_Int
- { 90, 3, 1, 0, "ADDSDrr", 0|(1<<TID::Commutable), 0|5|(11<<8)|(88<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #90 = ADDSDrr
- { 91, 3, 1, 0, "ADDSDrr_Int", 0, 0|5|(11<<8)|(88<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #91 = ADDSDrr_Int
- { 92, 7, 1, 0, "ADDSSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(88<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #92 = ADDSSrm
- { 93, 7, 1, 0, "ADDSSrm_Int", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(88<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #93 = ADDSSrm_Int
- { 94, 3, 1, 0, "ADDSSrr", 0|(1<<TID::Commutable), 0|5|(12<<8)|(88<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #94 = ADDSSrr
- { 95, 3, 1, 0, "ADDSSrr_Int", 0, 0|5|(12<<8)|(88<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #95 = ADDSSrr_Int
- { 96, 7, 1, 0, "ADDSUBPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(208<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #96 = ADDSUBPDrm
- { 97, 3, 1, 0, "ADDSUBPDrr", 0, 0|5|(1<<6)|(1<<8)|(208<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #97 = ADDSUBPDrr
- { 98, 7, 1, 0, "ADDSUBPSrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(208<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #98 = ADDSUBPSrm
- { 99, 3, 1, 0, "ADDSUBPSrr", 0, 0|5|(11<<8)|(208<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #99 = ADDSUBPSrr
- { 100, 5, 0, 0, "ADD_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|24|(216<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #100 = ADD_F32m
- { 101, 5, 0, 0, "ADD_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|24|(220<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #101 = ADD_F64m
- { 102, 5, 0, 0, "ADD_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|24|(222<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #102 = ADD_FI16m
- { 103, 5, 0, 0, "ADD_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|24|(218<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #103 = ADD_FI32m
- { 104, 1, 0, 0, "ADD_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(9<<8)|(192<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #104 = ADD_FPrST0
- { 105, 1, 0, 0, "ADD_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0|2|(3<<8)|(192<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #105 = ADD_FST0r
- { 106, 3, 1, 0, "ADD_Fp32", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo32 }, // Inst #106 = ADD_Fp32
- { 107, 7, 1, 0, "ADD_Fp32m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #107 = ADD_Fp32m
- { 108, 3, 1, 0, "ADD_Fp64", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo34 }, // Inst #108 = ADD_Fp64
- { 109, 7, 1, 0, "ADD_Fp64m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #109 = ADD_Fp64m
- { 110, 7, 1, 0, "ADD_Fp64m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #110 = ADD_Fp64m32
- { 111, 3, 1, 0, "ADD_Fp80", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo36 }, // Inst #111 = ADD_Fp80
- { 112, 7, 1, 0, "ADD_Fp80m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #112 = ADD_Fp80m32
- { 113, 7, 1, 0, "ADD_Fp80m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #113 = ADD_Fp80m64
- { 114, 7, 1, 0, "ADD_FpI16m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #114 = ADD_FpI16m32
- { 115, 7, 1, 0, "ADD_FpI16m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #115 = ADD_FpI16m64
- { 116, 7, 1, 0, "ADD_FpI16m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #116 = ADD_FpI16m80
- { 117, 7, 1, 0, "ADD_FpI32m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #117 = ADD_FpI32m32
- { 118, 7, 1, 0, "ADD_FpI32m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #118 = ADD_FpI32m64
- { 119, 7, 1, 0, "ADD_FpI32m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #119 = ADD_FpI32m80
- { 120, 1, 0, 0, "ADD_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(7<<8)|(192<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #120 = ADD_FrST0
- { 121, 1, 0, 0, "ADJCALLSTACKDOWN32", 0, 0, ImplicitList2, ImplicitList3, Barriers1, OperandInfo5 }, // Inst #121 = ADJCALLSTACKDOWN32
- { 122, 1, 0, 0, "ADJCALLSTACKDOWN64", 0, 0, ImplicitList4, ImplicitList5, Barriers1, OperandInfo5 }, // Inst #122 = ADJCALLSTACKDOWN64
- { 123, 2, 0, 0, "ADJCALLSTACKUP32", 0, 0, ImplicitList2, ImplicitList3, Barriers1, OperandInfo38 }, // Inst #123 = ADJCALLSTACKUP32
- { 124, 2, 0, 0, "ADJCALLSTACKUP64", 0, 0, ImplicitList4, ImplicitList5, Barriers1, OperandInfo38 }, // Inst #124 = ADJCALLSTACKUP64
- { 125, 1, 0, 0, "AND16i16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(37<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #125 = AND16i16
- { 126, 6, 0, 0, "AND16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #126 = AND16mi
- { 127, 6, 0, 0, "AND16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #127 = AND16mi8
- { 128, 6, 0, 0, "AND16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(33<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #128 = AND16mr
- { 129, 3, 1, 0, "AND16ri", 0, 0|20|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #129 = AND16ri
- { 130, 3, 1, 0, "AND16ri8", 0, 0|20|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #130 = AND16ri8
- { 131, 7, 1, 0, "AND16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(35<<24), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #131 = AND16rm
- { 132, 3, 1, 0, "AND16rr", 0|(1<<TID::Commutable), 0|3|(1<<6)|(33<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #132 = AND16rr
- { 133, 3, 1, 0, "AND16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(35<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #133 = AND16rr_REV
- { 134, 1, 0, 0, "AND32i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(37<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #134 = AND32i32
- { 135, 6, 0, 0, "AND32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #135 = AND32mi
- { 136, 6, 0, 0, "AND32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #136 = AND32mi8
- { 137, 6, 0, 0, "AND32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(33<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #137 = AND32mr
- { 138, 3, 1, 0, "AND32ri", 0, 0|20|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #138 = AND32ri
- { 139, 3, 1, 0, "AND32ri8", 0, 0|20|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #139 = AND32ri8
- { 140, 7, 1, 0, "AND32rm", 0|(1<<TID::MayLoad), 0|6|(35<<24), NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #140 = AND32rm
- { 141, 3, 1, 0, "AND32rr", 0|(1<<TID::Commutable), 0|3|(33<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #141 = AND32rr
- { 142, 3, 1, 0, "AND32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(35<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #142 = AND32rr_REV
- { 143, 1, 0, 0, "AND64i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(37<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #143 = AND64i32
- { 144, 6, 0, 0, "AND64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #144 = AND64mi32
- { 145, 6, 0, 0, "AND64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #145 = AND64mi8
- { 146, 6, 0, 0, "AND64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<12)|(33<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #146 = AND64mr
- { 147, 3, 1, 0, "AND64ri32", 0, 0|20|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #147 = AND64ri32
- { 148, 3, 1, 0, "AND64ri8", 0, 0|20|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #148 = AND64ri8
- { 149, 7, 1, 0, "AND64rm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(35<<24), NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #149 = AND64rm
- { 150, 3, 1, 0, "AND64rr", 0|(1<<TID::Commutable), 0|3|(1<<12)|(33<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #150 = AND64rr
- { 151, 3, 1, 0, "AND64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(35<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #151 = AND64rr_REV
- { 152, 1, 0, 0, "AND8i8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(36<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #152 = AND8i8
- { 153, 6, 0, 0, "AND8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #153 = AND8mi
- { 154, 6, 0, 0, "AND8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(32<<24), NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #154 = AND8mr
- { 155, 3, 1, 0, "AND8ri", 0, 0|20|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #155 = AND8ri
- { 156, 7, 1, 0, "AND8rm", 0|(1<<TID::MayLoad), 0|6|(34<<24), NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #156 = AND8rm
- { 157, 3, 1, 0, "AND8rr", 0|(1<<TID::Commutable), 0|3|(32<<24), NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #157 = AND8rr
- { 158, 3, 1, 0, "AND8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(34<<24), NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #158 = AND8rr_REV
- { 159, 7, 1, 0, "ANDNPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(85<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #159 = ANDNPDrm
- { 160, 3, 1, 0, "ANDNPDrr", 0, 0|5|(1<<6)|(1<<8)|(85<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #160 = ANDNPDrr
- { 161, 7, 1, 0, "ANDNPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(85<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #161 = ANDNPSrm
- { 162, 3, 1, 0, "ANDNPSrr", 0, 0|5|(1<<8)|(85<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #162 = ANDNPSrr
- { 163, 7, 1, 0, "ANDPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(84<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #163 = ANDPDrm
- { 164, 3, 1, 0, "ANDPDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(84<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #164 = ANDPDrr
- { 165, 7, 1, 0, "ANDPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(84<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #165 = ANDPSrm
- { 166, 3, 1, 0, "ANDPSrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(84<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #166 = ANDPSrr
- { 167, 9, 2, 0, "ATOMADD6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList6, ImplicitList7, Barriers2, OperandInfo39 }, // Inst #167 = ATOMADD6432
- { 168, 7, 1, 0, "ATOMAND16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo40 }, // Inst #168 = ATOMAND16
- { 169, 7, 1, 0, "ATOMAND32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo41 }, // Inst #169 = ATOMAND32
- { 170, 7, 1, 0, "ATOMAND64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #170 = ATOMAND64
- { 171, 9, 2, 0, "ATOMAND6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList6, ImplicitList7, Barriers2, OperandInfo39 }, // Inst #171 = ATOMAND6432
- { 172, 7, 1, 0, "ATOMAND8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #172 = ATOMAND8
- { 173, 7, 1, 0, "ATOMMAX16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo40 }, // Inst #173 = ATOMMAX16
- { 174, 7, 1, 0, "ATOMMAX32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo41 }, // Inst #174 = ATOMMAX32
- { 175, 7, 1, 0, "ATOMMAX64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #175 = ATOMMAX64
- { 176, 7, 1, 0, "ATOMMIN16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo40 }, // Inst #176 = ATOMMIN16
- { 177, 7, 1, 0, "ATOMMIN32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo41 }, // Inst #177 = ATOMMIN32
- { 178, 7, 1, 0, "ATOMMIN64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #178 = ATOMMIN64
- { 179, 7, 1, 0, "ATOMNAND16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo40 }, // Inst #179 = ATOMNAND16
- { 180, 7, 1, 0, "ATOMNAND32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo41 }, // Inst #180 = ATOMNAND32
- { 181, 7, 1, 0, "ATOMNAND64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #181 = ATOMNAND64
- { 182, 9, 2, 0, "ATOMNAND6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList6, ImplicitList7, Barriers2, OperandInfo39 }, // Inst #182 = ATOMNAND6432
- { 183, 7, 1, 0, "ATOMNAND8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #183 = ATOMNAND8
- { 184, 7, 1, 0, "ATOMOR16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo40 }, // Inst #184 = ATOMOR16
- { 185, 7, 1, 0, "ATOMOR32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo41 }, // Inst #185 = ATOMOR32
- { 186, 7, 1, 0, "ATOMOR64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #186 = ATOMOR64
- { 187, 9, 2, 0, "ATOMOR6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList6, ImplicitList7, Barriers2, OperandInfo39 }, // Inst #187 = ATOMOR6432
- { 188, 7, 1, 0, "ATOMOR8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #188 = ATOMOR8
- { 189, 9, 2, 0, "ATOMSUB6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList6, ImplicitList7, Barriers2, OperandInfo39 }, // Inst #189 = ATOMSUB6432
- { 190, 9, 2, 0, "ATOMSWAP6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList6, ImplicitList7, Barriers2, OperandInfo39 }, // Inst #190 = ATOMSWAP6432
- { 191, 7, 1, 0, "ATOMUMAX16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo40 }, // Inst #191 = ATOMUMAX16
- { 192, 7, 1, 0, "ATOMUMAX32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo41 }, // Inst #192 = ATOMUMAX32
- { 193, 7, 1, 0, "ATOMUMAX64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #193 = ATOMUMAX64
- { 194, 7, 1, 0, "ATOMUMIN16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo40 }, // Inst #194 = ATOMUMIN16
- { 195, 7, 1, 0, "ATOMUMIN32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo41 }, // Inst #195 = ATOMUMIN32
- { 196, 7, 1, 0, "ATOMUMIN64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #196 = ATOMUMIN64
- { 197, 7, 1, 0, "ATOMXOR16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo40 }, // Inst #197 = ATOMXOR16
- { 198, 7, 1, 0, "ATOMXOR32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo41 }, // Inst #198 = ATOMXOR32
- { 199, 7, 1, 0, "ATOMXOR64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #199 = ATOMXOR64
- { 200, 9, 2, 0, "ATOMXOR6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0, ImplicitList6, ImplicitList7, Barriers2, OperandInfo39 }, // Inst #200 = ATOMXOR6432
- { 201, 7, 1, 0, "ATOMXOR8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #201 = ATOMXOR8
- { 202, 8, 1, 0, "BLENDPDrmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(13<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #202 = BLENDPDrmi
- { 203, 4, 1, 0, "BLENDPDrri", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(13<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #203 = BLENDPDrri
- { 204, 8, 1, 0, "BLENDPSrmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(12<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #204 = BLENDPSrmi
- { 205, 4, 1, 0, "BLENDPSrri", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(12<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #205 = BLENDPSrri
- { 206, 7, 1, 0, "BLENDVPDrm0", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(21<<24), ImplicitList8, NULL, NULL, OperandInfo24 }, // Inst #206 = BLENDVPDrm0
- { 207, 3, 1, 0, "BLENDVPDrr0", 0, 0|5|(1<<6)|(13<<8)|(21<<24), ImplicitList8, NULL, NULL, OperandInfo25 }, // Inst #207 = BLENDVPDrr0
- { 208, 7, 1, 0, "BLENDVPSrm0", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(20<<24), ImplicitList8, NULL, NULL, OperandInfo24 }, // Inst #208 = BLENDVPSrm0
- { 209, 3, 1, 0, "BLENDVPSrr0", 0, 0|5|(1<<6)|(13<<8)|(20<<24), ImplicitList8, NULL, NULL, OperandInfo25 }, // Inst #209 = BLENDVPSrr0
- { 210, 6, 1, 0, "BSF16rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(188<<24), NULL, ImplicitList1, Barriers1, OperandInfo46 }, // Inst #210 = BSF16rm
- { 211, 2, 1, 0, "BSF16rr", 0, 0|5|(1<<8)|(188<<24), NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #211 = BSF16rr
- { 212, 6, 1, 0, "BSF32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(188<<24), NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #212 = BSF32rm
- { 213, 2, 1, 0, "BSF32rr", 0, 0|5|(1<<8)|(188<<24), NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #213 = BSF32rr
- { 214, 6, 1, 0, "BSF64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(188<<24), NULL, ImplicitList1, Barriers1, OperandInfo50 }, // Inst #214 = BSF64rm
- { 215, 2, 1, 0, "BSF64rr", 0, 0|5|(1<<8)|(1<<12)|(188<<24), NULL, ImplicitList1, Barriers1, OperandInfo51 }, // Inst #215 = BSF64rr
- { 216, 6, 1, 0, "BSR16rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(189<<24), NULL, ImplicitList1, Barriers1, OperandInfo46 }, // Inst #216 = BSR16rm
- { 217, 2, 1, 0, "BSR16rr", 0, 0|5|(1<<8)|(189<<24), NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #217 = BSR16rr
- { 218, 6, 1, 0, "BSR32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(189<<24), NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #218 = BSR32rm
- { 219, 2, 1, 0, "BSR32rr", 0, 0|5|(1<<8)|(189<<24), NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #219 = BSR32rr
- { 220, 6, 1, 0, "BSR64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(189<<24), NULL, ImplicitList1, Barriers1, OperandInfo50 }, // Inst #220 = BSR64rm
- { 221, 2, 1, 0, "BSR64rr", 0, 0|5|(1<<8)|(1<<12)|(189<<24), NULL, ImplicitList1, Barriers1, OperandInfo51 }, // Inst #221 = BSR64rr
- { 222, 2, 1, 0, "BSWAP32r", 0, 0|2|(1<<8)|(200<<24), NULL, NULL, NULL, OperandInfo52 }, // Inst #222 = BSWAP32r
- { 223, 2, 1, 0, "BSWAP64r", 0, 0|2|(1<<8)|(1<<12)|(200<<24), NULL, NULL, NULL, OperandInfo53 }, // Inst #223 = BSWAP64r
- { 224, 6, 0, 0, "BT16mi8", 0|(1<<TID::MayLoad), 0|28|(1<<6)|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #224 = BT16mi8
- { 225, 6, 0, 0, "BT16mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(163<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #225 = BT16mr
- { 226, 2, 0, 0, "BT16ri8", 0, 0|20|(1<<6)|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo54 }, // Inst #226 = BT16ri8
- { 227, 2, 0, 0, "BT16rr", 0, 0|3|(1<<6)|(1<<8)|(163<<24), NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #227 = BT16rr
- { 228, 6, 0, 0, "BT32mi8", 0|(1<<TID::MayLoad), 0|28|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #228 = BT32mi8
- { 229, 6, 0, 0, "BT32mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(163<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #229 = BT32mr
- { 230, 2, 0, 0, "BT32ri8", 0, 0|20|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #230 = BT32ri8
- { 231, 2, 0, 0, "BT32rr", 0, 0|3|(1<<8)|(163<<24), NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #231 = BT32rr
- { 232, 6, 0, 0, "BT64mi8", 0|(1<<TID::MayLoad), 0|28|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #232 = BT64mi8
- { 233, 6, 0, 0, "BT64mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(1<<12)|(163<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #233 = BT64mr
- { 234, 2, 0, 0, "BT64ri8", 0, 0|20|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #234 = BT64ri8
- { 235, 2, 0, 0, "BT64rr", 0, 0|3|(1<<8)|(1<<12)|(163<<24), NULL, ImplicitList1, Barriers1, OperandInfo51 }, // Inst #235 = BT64rr
- { 236, 6, 0, 0, "BTC16mi8", 0|(1<<TID::UnmodeledSideEffects), 0|31|(1<<6)|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #236 = BTC16mi8
- { 237, 6, 0, 0, "BTC16mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(187<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #237 = BTC16mr
- { 238, 2, 0, 0, "BTC16ri8", 0|(1<<TID::UnmodeledSideEffects), 0|23|(1<<6)|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo54 }, // Inst #238 = BTC16ri8
- { 239, 2, 0, 0, "BTC16rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<6)|(1<<8)|(187<<24), NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #239 = BTC16rr
- { 240, 6, 0, 0, "BTC32mi8", 0|(1<<TID::UnmodeledSideEffects), 0|31|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #240 = BTC32mi8
- { 241, 6, 0, 0, "BTC32mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(187<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #241 = BTC32mr
- { 242, 2, 0, 0, "BTC32ri8", 0|(1<<TID::UnmodeledSideEffects), 0|23|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #242 = BTC32ri8
- { 243, 2, 0, 0, "BTC32rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(187<<24), NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #243 = BTC32rr
- { 244, 6, 0, 0, "BTC64mi8", 0|(1<<TID::UnmodeledSideEffects), 0|31|(1<<8)|(1<<12)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #244 = BTC64mi8
- { 245, 6, 0, 0, "BTC64mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(1<<12)|(187<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #245 = BTC64mr
- { 246, 2, 0, 0, "BTC64ri8", 0|(1<<TID::UnmodeledSideEffects), 0|23|(1<<8)|(1<<12)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #246 = BTC64ri8
- { 247, 2, 0, 0, "BTC64rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(1<<12)|(187<<24), NULL, ImplicitList1, Barriers1, OperandInfo51 }, // Inst #247 = BTC64rr
- { 248, 6, 0, 0, "BTR16mi8", 0|(1<<TID::UnmodeledSideEffects), 0|30|(1<<6)|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #248 = BTR16mi8
- { 249, 6, 0, 0, "BTR16mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(179<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #249 = BTR16mr
- { 250, 2, 0, 0, "BTR16ri8", 0|(1<<TID::UnmodeledSideEffects), 0|22|(1<<6)|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo54 }, // Inst #250 = BTR16ri8
- { 251, 2, 0, 0, "BTR16rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<6)|(1<<8)|(179<<24), NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #251 = BTR16rr
- { 252, 6, 0, 0, "BTR32mi8", 0|(1<<TID::UnmodeledSideEffects), 0|30|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #252 = BTR32mi8
- { 253, 6, 0, 0, "BTR32mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(179<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #253 = BTR32mr
- { 254, 2, 0, 0, "BTR32ri8", 0|(1<<TID::UnmodeledSideEffects), 0|22|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #254 = BTR32ri8
- { 255, 2, 0, 0, "BTR32rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(179<<24), NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #255 = BTR32rr
- { 256, 6, 0, 0, "BTR64mi8", 0|(1<<TID::UnmodeledSideEffects), 0|30|(1<<8)|(1<<12)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #256 = BTR64mi8
- { 257, 6, 0, 0, "BTR64mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(1<<12)|(179<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #257 = BTR64mr
- { 258, 2, 0, 0, "BTR64ri8", 0|(1<<TID::UnmodeledSideEffects), 0|22|(1<<8)|(1<<12)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #258 = BTR64ri8
- { 259, 2, 0, 0, "BTR64rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(1<<12)|(179<<24), NULL, ImplicitList1, Barriers1, OperandInfo51 }, // Inst #259 = BTR64rr
- { 260, 6, 0, 0, "BTS16mi8", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<6)|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #260 = BTS16mi8
- { 261, 6, 0, 0, "BTS16mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(171<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #261 = BTS16mr
- { 262, 2, 0, 0, "BTS16ri8", 0|(1<<TID::UnmodeledSideEffects), 0|21|(1<<6)|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo54 }, // Inst #262 = BTS16ri8
- { 263, 2, 0, 0, "BTS16rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<6)|(1<<8)|(171<<24), NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #263 = BTS16rr
- { 264, 6, 0, 0, "BTS32mi8", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #264 = BTS32mi8
- { 265, 6, 0, 0, "BTS32mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(171<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #265 = BTS32mr
- { 266, 2, 0, 0, "BTS32ri8", 0|(1<<TID::UnmodeledSideEffects), 0|21|(1<<8)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #266 = BTS32ri8
- { 267, 2, 0, 0, "BTS32rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(171<<24), NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #267 = BTS32rr
- { 268, 6, 0, 0, "BTS64mi8", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<8)|(1<<12)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #268 = BTS64mi8
- { 269, 6, 0, 0, "BTS64mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(1<<12)|(171<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #269 = BTS64mr
- { 270, 2, 0, 0, "BTS64ri8", 0|(1<<TID::UnmodeledSideEffects), 0|21|(1<<8)|(1<<12)|(1<<13)|(186<<24), NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #270 = BTS64ri8
- { 271, 2, 0, 0, "BTS64rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(1<<12)|(171<<24), NULL, ImplicitList1, Barriers1, OperandInfo51 }, // Inst #271 = BTS64rr
- { 272, 5, 0, 0, "CALL32m", 0|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Variadic), 0|26|(255<<24), ImplicitList2, ImplicitList9, Barriers3, OperandInfo30 }, // Inst #272 = CALL32m
- { 273, 1, 0, 0, "CALL32r", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|18|(255<<24), ImplicitList2, ImplicitList9, Barriers3, OperandInfo57 }, // Inst #273 = CALL32r
- { 274, 5, 0, 0, "CALL64m", 0|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Variadic), 0|26|(255<<24), ImplicitList4, ImplicitList10, Barriers4, OperandInfo30 }, // Inst #274 = CALL64m
- { 275, 1, 0, 0, "CALL64pcrel32", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(232<<24), ImplicitList4, ImplicitList10, Barriers4, OperandInfo5 }, // Inst #275 = CALL64pcrel32
- { 276, 1, 0, 0, "CALL64r", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|18|(255<<24), ImplicitList4, ImplicitList10, Barriers4, OperandInfo58 }, // Inst #276 = CALL64r
- { 277, 1, 0, 0, "CALLpcrel32", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|1|(5<<13)|(232<<24), ImplicitList2, ImplicitList9, Barriers3, OperandInfo5 }, // Inst #277 = CALLpcrel32
- { 278, 0, 0, 0, "CBW", 0, 0|1|(1<<6)|(152<<24), ImplicitList11, ImplicitList12, NULL, 0 }, // Inst #278 = CBW
- { 279, 0, 0, 0, "CDQ", 0, 0|1|(153<<24), ImplicitList13, ImplicitList14, Barriers5, 0 }, // Inst #279 = CDQ
- { 280, 0, 0, 0, "CDQE", 0, 0|1|(1<<12)|(152<<24), ImplicitList13, ImplicitList15, NULL, 0 }, // Inst #280 = CDQE
- { 281, 0, 0, 0, "CHS_F", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(224<<24), NULL, NULL, NULL, 0 }, // Inst #281 = CHS_F
- { 282, 2, 1, 0, "CHS_Fp32", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo2 }, // Inst #282 = CHS_Fp32
- { 283, 2, 1, 0, "CHS_Fp64", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo3 }, // Inst #283 = CHS_Fp64
- { 284, 2, 1, 0, "CHS_Fp80", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo4 }, // Inst #284 = CHS_Fp80
- { 285, 0, 0, 0, "CLC", 0|(1<<TID::UnmodeledSideEffects), 0|1|(248<<24), NULL, NULL, NULL, 0 }, // Inst #285 = CLC
- { 286, 0, 0, 0, "CLD", 0|(1<<TID::UnmodeledSideEffects), 0|1|(252<<24), NULL, NULL, NULL, 0 }, // Inst #286 = CLD
- { 287, 5, 0, 0, "CLFLUSH", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|31|(1<<8)|(174<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #287 = CLFLUSH
- { 288, 0, 0, 0, "CLI", 0|(1<<TID::UnmodeledSideEffects), 0|1|(250<<24), NULL, NULL, NULL, 0 }, // Inst #288 = CLI
- { 289, 0, 0, 0, "CLTS", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(6<<24), NULL, NULL, NULL, 0 }, // Inst #289 = CLTS
- { 290, 0, 0, 0, "CMC", 0|(1<<TID::UnmodeledSideEffects), 0|1|(245<<24), NULL, NULL, NULL, 0 }, // Inst #290 = CMC
- { 291, 7, 1, 0, "CMOVA16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(71<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #291 = CMOVA16rm
- { 292, 3, 1, 0, "CMOVA16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(71<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #292 = CMOVA16rr
- { 293, 7, 1, 0, "CMOVA32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(71<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #293 = CMOVA32rm
- { 294, 3, 1, 0, "CMOVA32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(71<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #294 = CMOVA32rr
- { 295, 7, 1, 0, "CMOVA64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(71<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #295 = CMOVA64rm
- { 296, 3, 1, 0, "CMOVA64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(71<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #296 = CMOVA64rr
- { 297, 7, 1, 0, "CMOVAE16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(67<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #297 = CMOVAE16rm
- { 298, 3, 1, 0, "CMOVAE16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(67<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #298 = CMOVAE16rr
- { 299, 7, 1, 0, "CMOVAE32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(67<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #299 = CMOVAE32rm
- { 300, 3, 1, 0, "CMOVAE32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(67<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #300 = CMOVAE32rr
- { 301, 7, 1, 0, "CMOVAE64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(67<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #301 = CMOVAE64rm
- { 302, 3, 1, 0, "CMOVAE64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(67<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #302 = CMOVAE64rr
- { 303, 7, 1, 0, "CMOVB16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(66<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #303 = CMOVB16rm
- { 304, 3, 1, 0, "CMOVB16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(66<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #304 = CMOVB16rr
- { 305, 7, 1, 0, "CMOVB32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(66<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #305 = CMOVB32rm
- { 306, 3, 1, 0, "CMOVB32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(66<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #306 = CMOVB32rr
- { 307, 7, 1, 0, "CMOVB64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(66<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #307 = CMOVB64rm
- { 308, 3, 1, 0, "CMOVB64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(66<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #308 = CMOVB64rr
- { 309, 7, 1, 0, "CMOVBE16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(70<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #309 = CMOVBE16rm
- { 310, 3, 1, 0, "CMOVBE16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(70<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #310 = CMOVBE16rr
- { 311, 7, 1, 0, "CMOVBE32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(70<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #311 = CMOVBE32rm
- { 312, 3, 1, 0, "CMOVBE32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(70<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #312 = CMOVBE32rr
- { 313, 7, 1, 0, "CMOVBE64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(70<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #313 = CMOVBE64rm
- { 314, 3, 1, 0, "CMOVBE64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(70<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #314 = CMOVBE64rr
- { 315, 1, 1, 0, "CMOVBE_F", 0|(1<<TID::UnmodeledSideEffects), 0|2|(5<<8)|(208<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #315 = CMOVBE_F
- { 316, 3, 1, 0, "CMOVBE_Fp32", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo59 }, // Inst #316 = CMOVBE_Fp32
- { 317, 3, 1, 0, "CMOVBE_Fp64", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo60 }, // Inst #317 = CMOVBE_Fp64
- { 318, 3, 1, 0, "CMOVBE_Fp80", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo61 }, // Inst #318 = CMOVBE_Fp80
- { 319, 1, 1, 0, "CMOVB_F", 0|(1<<TID::UnmodeledSideEffects), 0|2|(5<<8)|(192<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #319 = CMOVB_F
- { 320, 3, 1, 0, "CMOVB_Fp32", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo59 }, // Inst #320 = CMOVB_Fp32
- { 321, 3, 1, 0, "CMOVB_Fp64", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo60 }, // Inst #321 = CMOVB_Fp64
- { 322, 3, 1, 0, "CMOVB_Fp80", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo61 }, // Inst #322 = CMOVB_Fp80
- { 323, 7, 1, 0, "CMOVE16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(68<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #323 = CMOVE16rm
- { 324, 3, 1, 0, "CMOVE16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(68<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #324 = CMOVE16rr
- { 325, 7, 1, 0, "CMOVE32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(68<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #325 = CMOVE32rm
- { 326, 3, 1, 0, "CMOVE32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(68<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #326 = CMOVE32rr
- { 327, 7, 1, 0, "CMOVE64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(68<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #327 = CMOVE64rm
- { 328, 3, 1, 0, "CMOVE64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(68<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #328 = CMOVE64rr
- { 329, 1, 1, 0, "CMOVE_F", 0|(1<<TID::UnmodeledSideEffects), 0|2|(5<<8)|(200<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #329 = CMOVE_F
- { 330, 3, 1, 0, "CMOVE_Fp32", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo59 }, // Inst #330 = CMOVE_Fp32
- { 331, 3, 1, 0, "CMOVE_Fp64", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo60 }, // Inst #331 = CMOVE_Fp64
- { 332, 3, 1, 0, "CMOVE_Fp80", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo61 }, // Inst #332 = CMOVE_Fp80
- { 333, 7, 1, 0, "CMOVG16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(79<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #333 = CMOVG16rm
- { 334, 3, 1, 0, "CMOVG16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(79<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #334 = CMOVG16rr
- { 335, 7, 1, 0, "CMOVG32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(79<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #335 = CMOVG32rm
- { 336, 3, 1, 0, "CMOVG32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(79<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #336 = CMOVG32rr
- { 337, 7, 1, 0, "CMOVG64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(79<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #337 = CMOVG64rm
- { 338, 3, 1, 0, "CMOVG64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(79<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #338 = CMOVG64rr
- { 339, 7, 1, 0, "CMOVGE16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(77<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #339 = CMOVGE16rm
- { 340, 3, 1, 0, "CMOVGE16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(77<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #340 = CMOVGE16rr
- { 341, 7, 1, 0, "CMOVGE32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(77<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #341 = CMOVGE32rm
- { 342, 3, 1, 0, "CMOVGE32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(77<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #342 = CMOVGE32rr
- { 343, 7, 1, 0, "CMOVGE64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(77<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #343 = CMOVGE64rm
- { 344, 3, 1, 0, "CMOVGE64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(77<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #344 = CMOVGE64rr
- { 345, 7, 1, 0, "CMOVL16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(76<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #345 = CMOVL16rm
- { 346, 3, 1, 0, "CMOVL16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(76<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #346 = CMOVL16rr
- { 347, 7, 1, 0, "CMOVL32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(76<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #347 = CMOVL32rm
- { 348, 3, 1, 0, "CMOVL32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(76<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #348 = CMOVL32rr
- { 349, 7, 1, 0, "CMOVL64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(76<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #349 = CMOVL64rm
- { 350, 3, 1, 0, "CMOVL64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(76<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #350 = CMOVL64rr
- { 351, 7, 1, 0, "CMOVLE16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(78<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #351 = CMOVLE16rm
- { 352, 3, 1, 0, "CMOVLE16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(78<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #352 = CMOVLE16rr
- { 353, 7, 1, 0, "CMOVLE32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(78<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #353 = CMOVLE32rm
- { 354, 3, 1, 0, "CMOVLE32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(78<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #354 = CMOVLE32rr
- { 355, 7, 1, 0, "CMOVLE64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(78<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #355 = CMOVLE64rm
- { 356, 3, 1, 0, "CMOVLE64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(78<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #356 = CMOVLE64rr
- { 357, 1, 1, 0, "CMOVNBE_F", 0|(1<<TID::UnmodeledSideEffects), 0|2|(6<<8)|(208<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #357 = CMOVNBE_F
- { 358, 3, 1, 0, "CMOVNBE_Fp32", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo59 }, // Inst #358 = CMOVNBE_Fp32
- { 359, 3, 1, 0, "CMOVNBE_Fp64", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo60 }, // Inst #359 = CMOVNBE_Fp64
- { 360, 3, 1, 0, "CMOVNBE_Fp80", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo61 }, // Inst #360 = CMOVNBE_Fp80
- { 361, 1, 1, 0, "CMOVNB_F", 0|(1<<TID::UnmodeledSideEffects), 0|2|(6<<8)|(192<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #361 = CMOVNB_F
- { 362, 3, 1, 0, "CMOVNB_Fp32", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo59 }, // Inst #362 = CMOVNB_Fp32
- { 363, 3, 1, 0, "CMOVNB_Fp64", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo60 }, // Inst #363 = CMOVNB_Fp64
- { 364, 3, 1, 0, "CMOVNB_Fp80", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo61 }, // Inst #364 = CMOVNB_Fp80
- { 365, 7, 1, 0, "CMOVNE16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(69<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #365 = CMOVNE16rm
- { 366, 3, 1, 0, "CMOVNE16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(69<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #366 = CMOVNE16rr
- { 367, 7, 1, 0, "CMOVNE32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(69<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #367 = CMOVNE32rm
- { 368, 3, 1, 0, "CMOVNE32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(69<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #368 = CMOVNE32rr
- { 369, 7, 1, 0, "CMOVNE64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(69<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #369 = CMOVNE64rm
- { 370, 3, 1, 0, "CMOVNE64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(69<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #370 = CMOVNE64rr
- { 371, 1, 1, 0, "CMOVNE_F", 0|(1<<TID::UnmodeledSideEffects), 0|2|(6<<8)|(200<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #371 = CMOVNE_F
- { 372, 3, 1, 0, "CMOVNE_Fp32", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo59 }, // Inst #372 = CMOVNE_Fp32
- { 373, 3, 1, 0, "CMOVNE_Fp64", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo60 }, // Inst #373 = CMOVNE_Fp64
- { 374, 3, 1, 0, "CMOVNE_Fp80", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo61 }, // Inst #374 = CMOVNE_Fp80
- { 375, 7, 1, 0, "CMOVNO16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(65<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #375 = CMOVNO16rm
- { 376, 3, 1, 0, "CMOVNO16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(65<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #376 = CMOVNO16rr
- { 377, 7, 1, 0, "CMOVNO32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(65<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #377 = CMOVNO32rm
- { 378, 3, 1, 0, "CMOVNO32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(65<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #378 = CMOVNO32rr
- { 379, 7, 1, 0, "CMOVNO64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(65<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #379 = CMOVNO64rm
- { 380, 3, 1, 0, "CMOVNO64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(65<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #380 = CMOVNO64rr
- { 381, 7, 1, 0, "CMOVNP16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(75<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #381 = CMOVNP16rm
- { 382, 3, 1, 0, "CMOVNP16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(75<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #382 = CMOVNP16rr
- { 383, 7, 1, 0, "CMOVNP32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(75<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #383 = CMOVNP32rm
- { 384, 3, 1, 0, "CMOVNP32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(75<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #384 = CMOVNP32rr
- { 385, 7, 1, 0, "CMOVNP64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(75<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #385 = CMOVNP64rm
- { 386, 3, 1, 0, "CMOVNP64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(75<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #386 = CMOVNP64rr
- { 387, 1, 1, 0, "CMOVNP_F", 0|(1<<TID::UnmodeledSideEffects), 0|2|(6<<8)|(216<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #387 = CMOVNP_F
- { 388, 3, 1, 0, "CMOVNP_Fp32", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo59 }, // Inst #388 = CMOVNP_Fp32
- { 389, 3, 1, 0, "CMOVNP_Fp64", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo60 }, // Inst #389 = CMOVNP_Fp64
- { 390, 3, 1, 0, "CMOVNP_Fp80", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo61 }, // Inst #390 = CMOVNP_Fp80
- { 391, 7, 1, 0, "CMOVNS16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(73<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #391 = CMOVNS16rm
- { 392, 3, 1, 0, "CMOVNS16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(73<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #392 = CMOVNS16rr
- { 393, 7, 1, 0, "CMOVNS32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(73<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #393 = CMOVNS32rm
- { 394, 3, 1, 0, "CMOVNS32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(73<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #394 = CMOVNS32rr
- { 395, 7, 1, 0, "CMOVNS64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(73<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #395 = CMOVNS64rm
- { 396, 3, 1, 0, "CMOVNS64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(73<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #396 = CMOVNS64rr
- { 397, 7, 1, 0, "CMOVO16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(64<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #397 = CMOVO16rm
- { 398, 3, 1, 0, "CMOVO16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(64<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #398 = CMOVO16rr
- { 399, 7, 1, 0, "CMOVO32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(64<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #399 = CMOVO32rm
- { 400, 3, 1, 0, "CMOVO32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(64<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #400 = CMOVO32rr
- { 401, 7, 1, 0, "CMOVO64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(64<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #401 = CMOVO64rm
- { 402, 3, 1, 0, "CMOVO64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(64<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #402 = CMOVO64rr
- { 403, 7, 1, 0, "CMOVP16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(74<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #403 = CMOVP16rm
- { 404, 3, 1, 0, "CMOVP16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(74<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #404 = CMOVP16rr
- { 405, 7, 1, 0, "CMOVP32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(74<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #405 = CMOVP32rm
- { 406, 3, 1, 0, "CMOVP32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(74<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #406 = CMOVP32rr
- { 407, 7, 1, 0, "CMOVP64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(74<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #407 = CMOVP64rm
- { 408, 3, 1, 0, "CMOVP64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(74<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #408 = CMOVP64rr
- { 409, 1, 1, 0, "CMOVP_F", 0|(1<<TID::UnmodeledSideEffects), 0|2|(5<<8)|(216<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #409 = CMOVP_F
- { 410, 3, 1, 0, "CMOVP_Fp32", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo59 }, // Inst #410 = CMOVP_Fp32
- { 411, 3, 1, 0, "CMOVP_Fp64", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo60 }, // Inst #411 = CMOVP_Fp64
- { 412, 3, 1, 0, "CMOVP_Fp80", 0, 0|(6<<16), ImplicitList1, NULL, NULL, OperandInfo61 }, // Inst #412 = CMOVP_Fp80
- { 413, 7, 1, 0, "CMOVS16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(72<<24), ImplicitList1, NULL, NULL, OperandInfo9 }, // Inst #413 = CMOVS16rm
- { 414, 3, 1, 0, "CMOVS16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(72<<24), ImplicitList1, NULL, NULL, OperandInfo10 }, // Inst #414 = CMOVS16rr
- { 415, 7, 1, 0, "CMOVS32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(72<<24), ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #415 = CMOVS32rm
- { 416, 3, 1, 0, "CMOVS32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(72<<24), ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #416 = CMOVS32rr
- { 417, 7, 1, 0, "CMOVS64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(72<<24), ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #417 = CMOVS64rm
- { 418, 3, 1, 0, "CMOVS64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(72<<24), ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #418 = CMOVS64rr
- { 419, 4, 1, 0, "CMOV_FR32", 0|(1<<TID::UsesCustomInserter), 0, ImplicitList1, NULL, NULL, OperandInfo62 }, // Inst #419 = CMOV_FR32
- { 420, 4, 1, 0, "CMOV_FR64", 0|(1<<TID::UsesCustomInserter), 0, ImplicitList1, NULL, NULL, OperandInfo63 }, // Inst #420 = CMOV_FR64
- { 421, 4, 1, 0, "CMOV_GR8", 0|(1<<TID::UsesCustomInserter), 0, ImplicitList1, ImplicitList1, Barriers1, OperandInfo64 }, // Inst #421 = CMOV_GR8
- { 422, 4, 1, 0, "CMOV_V1I64", 0|(1<<TID::UsesCustomInserter), 0, ImplicitList1, NULL, NULL, OperandInfo65 }, // Inst #422 = CMOV_V1I64
- { 423, 4, 1, 0, "CMOV_V2F64", 0|(1<<TID::UsesCustomInserter), 0, ImplicitList1, NULL, NULL, OperandInfo66 }, // Inst #423 = CMOV_V2F64
- { 424, 4, 1, 0, "CMOV_V2I64", 0|(1<<TID::UsesCustomInserter), 0, ImplicitList1, NULL, NULL, OperandInfo66 }, // Inst #424 = CMOV_V2I64
- { 425, 4, 1, 0, "CMOV_V4F32", 0|(1<<TID::UsesCustomInserter), 0, ImplicitList1, NULL, NULL, OperandInfo66 }, // Inst #425 = CMOV_V4F32
- { 426, 1, 0, 0, "CMP16i16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(61<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #426 = CMP16i16
- { 427, 6, 0, 0, "CMP16mi", 0|(1<<TID::MayLoad), 0|31|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #427 = CMP16mi
- { 428, 6, 0, 0, "CMP16mi8", 0|(1<<TID::MayLoad), 0|31|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #428 = CMP16mi8
- { 429, 6, 0, 0, "CMP16mr", 0|(1<<TID::MayLoad), 0|4|(1<<6)|(57<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #429 = CMP16mr
- { 430, 2, 0, 0, "CMP16mrmrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(59<<24), NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #430 = CMP16mrmrr
- { 431, 2, 0, 0, "CMP16ri", 0, 0|23|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo54 }, // Inst #431 = CMP16ri
- { 432, 2, 0, 0, "CMP16ri8", 0, 0|23|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo54 }, // Inst #432 = CMP16ri8
- { 433, 6, 0, 0, "CMP16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(59<<24), NULL, ImplicitList1, Barriers1, OperandInfo46 }, // Inst #433 = CMP16rm
- { 434, 2, 0, 0, "CMP16rr", 0, 0|3|(1<<6)|(57<<24), NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #434 = CMP16rr
- { 435, 1, 0, 0, "CMP32i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(61<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #435 = CMP32i32
- { 436, 6, 0, 0, "CMP32mi", 0|(1<<TID::MayLoad), 0|31|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #436 = CMP32mi
- { 437, 6, 0, 0, "CMP32mi8", 0|(1<<TID::MayLoad), 0|31|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #437 = CMP32mi8
- { 438, 6, 0, 0, "CMP32mr", 0|(1<<TID::MayLoad), 0|4|(57<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #438 = CMP32mr
- { 439, 2, 0, 0, "CMP32mrmrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(59<<24), NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #439 = CMP32mrmrr
- { 440, 2, 0, 0, "CMP32ri", 0, 0|23|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #440 = CMP32ri
- { 441, 2, 0, 0, "CMP32ri8", 0, 0|23|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #441 = CMP32ri8
- { 442, 6, 0, 0, "CMP32rm", 0|(1<<TID::MayLoad), 0|6|(59<<24), NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #442 = CMP32rm
- { 443, 2, 0, 0, "CMP32rr", 0, 0|3|(57<<24), NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #443 = CMP32rr
- { 444, 1, 0, 0, "CMP64i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(61<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #444 = CMP64i32
- { 445, 6, 0, 0, "CMP64mi32", 0|(1<<TID::MayLoad), 0|31|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #445 = CMP64mi32
- { 446, 6, 0, 0, "CMP64mi8", 0|(1<<TID::MayLoad), 0|31|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #446 = CMP64mi8
- { 447, 6, 0, 0, "CMP64mr", 0|(1<<TID::MayLoad), 0|4|(1<<12)|(57<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #447 = CMP64mr
- { 448, 2, 0, 0, "CMP64mrmrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(59<<24), NULL, ImplicitList1, Barriers1, OperandInfo51 }, // Inst #448 = CMP64mrmrr
- { 449, 2, 0, 0, "CMP64ri32", 0, 0|23|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #449 = CMP64ri32
- { 450, 2, 0, 0, "CMP64ri8", 0, 0|23|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #450 = CMP64ri8
- { 451, 6, 0, 0, "CMP64rm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(59<<24), NULL, ImplicitList1, Barriers1, OperandInfo50 }, // Inst #451 = CMP64rm
- { 452, 2, 0, 0, "CMP64rr", 0, 0|3|(1<<12)|(57<<24), NULL, ImplicitList1, Barriers1, OperandInfo51 }, // Inst #452 = CMP64rr
- { 453, 1, 0, 0, "CMP8i8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(60<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #453 = CMP8i8
- { 454, 6, 0, 0, "CMP8mi", 0|(1<<TID::MayLoad), 0|31|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #454 = CMP8mi
- { 455, 6, 0, 0, "CMP8mr", 0|(1<<TID::MayLoad), 0|4|(56<<24), NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #455 = CMP8mr
- { 456, 2, 0, 0, "CMP8mrmrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(58<<24), NULL, ImplicitList1, Barriers1, OperandInfo67 }, // Inst #456 = CMP8mrmrr
- { 457, 2, 0, 0, "CMP8ri", 0, 0|23|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo68 }, // Inst #457 = CMP8ri
- { 458, 6, 0, 0, "CMP8rm", 0|(1<<TID::MayLoad), 0|6|(58<<24), NULL, ImplicitList1, Barriers1, OperandInfo69 }, // Inst #458 = CMP8rm
- { 459, 2, 0, 0, "CMP8rr", 0, 0|3|(56<<24), NULL, ImplicitList1, Barriers1, OperandInfo67 }, // Inst #459 = CMP8rr
- { 460, 8, 1, 0, "CMPPDrmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #460 = CMPPDrmi
- { 461, 4, 1, 0, "CMPPDrri", 0, 0|5|(1<<6)|(1<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #461 = CMPPDrri
- { 462, 8, 1, 0, "CMPPSrmi", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #462 = CMPPSrmi
- { 463, 4, 1, 0, "CMPPSrri", 0, 0|5|(1<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #463 = CMPPSrri
- { 464, 0, 0, 0, "CMPS16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(167<<24), NULL, NULL, NULL, 0 }, // Inst #464 = CMPS16
- { 465, 0, 0, 0, "CMPS32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(167<<24), NULL, NULL, NULL, 0 }, // Inst #465 = CMPS32
- { 466, 0, 0, 0, "CMPS64", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(167<<24), NULL, NULL, NULL, 0 }, // Inst #466 = CMPS64
- { 467, 0, 0, 0, "CMPS8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(166<<24), NULL, NULL, NULL, 0 }, // Inst #467 = CMPS8
- { 468, 8, 1, 0, "CMPSDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo70 }, // Inst #468 = CMPSDrm
- { 469, 4, 1, 0, "CMPSDrr", 0, 0|5|(11<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo71 }, // Inst #469 = CMPSDrr
- { 470, 8, 1, 0, "CMPSSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo72 }, // Inst #470 = CMPSSrm
- { 471, 4, 1, 0, "CMPSSrr", 0, 0|5|(12<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo73 }, // Inst #471 = CMPSSrr
- { 472, 5, 0, 0, "CMPXCHG16B", 0|(1<<TID::UnmodeledSideEffects), 0|25|(1<<8)|(1<<12)|(199<<24), ImplicitList16, ImplicitList17, Barriers1, OperandInfo30 }, // Inst #472 = CMPXCHG16B
- { 473, 6, 0, 0, "CMPXCHG16rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(177<<24), NULL, NULL, NULL, OperandInfo7 }, // Inst #473 = CMPXCHG16rm
- { 474, 2, 1, 0, "CMPXCHG16rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<6)|(1<<8)|(177<<24), NULL, NULL, NULL, OperandInfo47 }, // Inst #474 = CMPXCHG16rr
- { 475, 6, 0, 0, "CMPXCHG32rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(177<<24), NULL, NULL, NULL, OperandInfo11 }, // Inst #475 = CMPXCHG32rm
- { 476, 2, 1, 0, "CMPXCHG32rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(177<<24), NULL, NULL, NULL, OperandInfo49 }, // Inst #476 = CMPXCHG32rr
- { 477, 6, 0, 0, "CMPXCHG64rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(1<<12)|(177<<24), NULL, NULL, NULL, OperandInfo15 }, // Inst #477 = CMPXCHG64rm
- { 478, 2, 1, 0, "CMPXCHG64rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(1<<12)|(177<<24), NULL, NULL, NULL, OperandInfo51 }, // Inst #478 = CMPXCHG64rr
- { 479, 5, 0, 0, "CMPXCHG8B", 0|(1<<TID::UnmodeledSideEffects), 0|25|(1<<8)|(199<<24), ImplicitList6, ImplicitList18, Barriers6, OperandInfo30 }, // Inst #479 = CMPXCHG8B
- { 480, 6, 0, 0, "CMPXCHG8rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(176<<24), NULL, NULL, NULL, OperandInfo20 }, // Inst #480 = CMPXCHG8rm
- { 481, 2, 1, 0, "CMPXCHG8rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(176<<24), NULL, NULL, NULL, OperandInfo67 }, // Inst #481 = CMPXCHG8rr
- { 482, 6, 0, 0, "COMISDrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(47<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #482 = COMISDrm
- { 483, 2, 0, 0, "COMISDrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(1<<8)|(47<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #483 = COMISDrr
- { 484, 6, 0, 0, "COMISSrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(47<<24), NULL, ImplicitList1, Barriers1, OperandInfo74 }, // Inst #484 = COMISSrm
- { 485, 2, 0, 0, "COMISSrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(47<<24), NULL, ImplicitList1, Barriers1, OperandInfo75 }, // Inst #485 = COMISSrr
- { 486, 1, 0, 0, "COMP_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0|2|(3<<8)|(216<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #486 = COMP_FST0r
- { 487, 1, 0, 0, "COM_FIPr", 0|(1<<TID::UnmodeledSideEffects), 0|2|(10<<8)|(240<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #487 = COM_FIPr
- { 488, 1, 0, 0, "COM_FIr", 0|(1<<TID::UnmodeledSideEffects), 0|2|(6<<8)|(240<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #488 = COM_FIr
- { 489, 1, 0, 0, "COM_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0|2|(3<<8)|(208<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #489 = COM_FST0r
- { 490, 0, 0, 0, "COS_F", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(255<<24), NULL, NULL, NULL, 0 }, // Inst #490 = COS_F
- { 491, 2, 1, 0, "COS_Fp32", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo2 }, // Inst #491 = COS_Fp32
- { 492, 2, 1, 0, "COS_Fp64", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo3 }, // Inst #492 = COS_Fp64
- { 493, 2, 1, 0, "COS_Fp80", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo4 }, // Inst #493 = COS_Fp80
- { 494, 0, 0, 0, "CPUID", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(162<<24), NULL, NULL, NULL, 0 }, // Inst #494 = CPUID
- { 495, 0, 0, 0, "CQO", 0, 0|1|(1<<12)|(153<<24), ImplicitList15, ImplicitList19, NULL, 0 }, // Inst #495 = CQO
- { 496, 7, 1, 0, "CRC32m16", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(15<<8)|(241<<24), NULL, NULL, NULL, OperandInfo13 }, // Inst #496 = CRC32m16
- { 497, 7, 1, 0, "CRC32m32", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(15<<8)|(241<<24), NULL, NULL, NULL, OperandInfo13 }, // Inst #497 = CRC32m32
- { 498, 7, 1, 0, "CRC32m8", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(15<<8)|(240<<24), NULL, NULL, NULL, OperandInfo13 }, // Inst #498 = CRC32m8
- { 499, 3, 1, 0, "CRC32r16", 0, 0|5|(1<<6)|(15<<8)|(241<<24), NULL, NULL, NULL, OperandInfo77 }, // Inst #499 = CRC32r16
- { 500, 3, 1, 0, "CRC32r32", 0, 0|5|(1<<6)|(15<<8)|(241<<24), NULL, NULL, NULL, OperandInfo14 }, // Inst #500 = CRC32r32
- { 501, 3, 1, 0, "CRC32r8", 0, 0|5|(1<<6)|(15<<8)|(240<<24), NULL, NULL, NULL, OperandInfo78 }, // Inst #501 = CRC32r8
- { 502, 7, 1, 0, "CRC64m64", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(15<<8)|(1<<12)|(240<<24), NULL, NULL, NULL, OperandInfo17 }, // Inst #502 = CRC64m64
- { 503, 3, 1, 0, "CRC64r64", 0, 0|5|(1<<6)|(15<<8)|(1<<12)|(240<<24), NULL, NULL, NULL, OperandInfo18 }, // Inst #503 = CRC64r64
- { 504, 0, 0, 0, "CS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(46<<24), NULL, NULL, NULL, 0 }, // Inst #504 = CS_PREFIX
- { 505, 6, 1, 0, "CVTDQ2PDrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(12<<8)|(230<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #505 = CVTDQ2PDrm
- { 506, 2, 1, 0, "CVTDQ2PDrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(12<<8)|(230<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #506 = CVTDQ2PDrr
- { 507, 6, 1, 0, "CVTDQ2PSrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(91<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #507 = CVTDQ2PSrm
- { 508, 2, 1, 0, "CVTDQ2PSrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(91<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #508 = CVTDQ2PSrr
- { 509, 6, 1, 0, "CVTPD2DQrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(11<<8)|(230<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #509 = CVTPD2DQrm
- { 510, 2, 1, 0, "CVTPD2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(11<<8)|(230<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #510 = CVTPD2DQrr
- { 511, 6, 1, 0, "CVTPD2PSrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(90<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #511 = CVTPD2PSrm
- { 512, 2, 1, 0, "CVTPD2PSrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(1<<8)|(90<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #512 = CVTPD2PSrr
- { 513, 6, 1, 0, "CVTPS2DQrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(91<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #513 = CVTPS2DQrm
- { 514, 2, 1, 0, "CVTPS2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(1<<8)|(91<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #514 = CVTPS2DQrr
- { 515, 6, 1, 0, "CVTPS2PDrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(90<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #515 = CVTPS2PDrm
- { 516, 2, 1, 0, "CVTPS2PDrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(90<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #516 = CVTPS2PDrr
- { 517, 6, 1, 0, "CVTSD2SI64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(11<<8)|(1<<12)|(45<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #517 = CVTSD2SI64rm
- { 518, 2, 1, 0, "CVTSD2SI64rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(11<<8)|(1<<12)|(45<<24), NULL, NULL, NULL, OperandInfo79 }, // Inst #518 = CVTSD2SI64rr
- { 519, 6, 1, 0, "CVTSD2SSrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(90<<24), NULL, NULL, NULL, OperandInfo80 }, // Inst #519 = CVTSD2SSrm
- { 520, 2, 1, 0, "CVTSD2SSrr", 0, 0|5|(11<<8)|(90<<24), NULL, NULL, NULL, OperandInfo81 }, // Inst #520 = CVTSD2SSrr
- { 521, 6, 1, 0, "CVTSI2SD64rm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(1<<12)|(42<<24), NULL, NULL, NULL, OperandInfo82 }, // Inst #521 = CVTSI2SD64rm
- { 522, 2, 1, 0, "CVTSI2SD64rr", 0, 0|5|(11<<8)|(1<<12)|(42<<24), NULL, NULL, NULL, OperandInfo83 }, // Inst #522 = CVTSI2SD64rr
- { 523, 6, 1, 0, "CVTSI2SDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(42<<24), NULL, NULL, NULL, OperandInfo82 }, // Inst #523 = CVTSI2SDrm
- { 524, 2, 1, 0, "CVTSI2SDrr", 0, 0|5|(11<<8)|(42<<24), NULL, NULL, NULL, OperandInfo84 }, // Inst #524 = CVTSI2SDrr
- { 525, 6, 1, 0, "CVTSI2SS64rm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(1<<12)|(42<<24), NULL, NULL, NULL, OperandInfo80 }, // Inst #525 = CVTSI2SS64rm
- { 526, 2, 1, 0, "CVTSI2SS64rr", 0, 0|5|(12<<8)|(1<<12)|(42<<24), NULL, NULL, NULL, OperandInfo85 }, // Inst #526 = CVTSI2SS64rr
- { 527, 6, 1, 0, "CVTSI2SSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(42<<24), NULL, NULL, NULL, OperandInfo80 }, // Inst #527 = CVTSI2SSrm
- { 528, 2, 1, 0, "CVTSI2SSrr", 0, 0|5|(12<<8)|(42<<24), NULL, NULL, NULL, OperandInfo86 }, // Inst #528 = CVTSI2SSrr
- { 529, 6, 1, 0, "CVTSS2SDrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(90<<24), NULL, NULL, NULL, OperandInfo82 }, // Inst #529 = CVTSS2SDrm
- { 530, 2, 1, 0, "CVTSS2SDrr", 0, 0|5|(12<<8)|(90<<24), NULL, NULL, NULL, OperandInfo87 }, // Inst #530 = CVTSS2SDrr
- { 531, 6, 1, 0, "CVTSS2SI64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(12<<8)|(1<<12)|(45<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #531 = CVTSS2SI64rm
- { 532, 2, 1, 0, "CVTSS2SI64rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(12<<8)|(1<<12)|(45<<24), NULL, NULL, NULL, OperandInfo88 }, // Inst #532 = CVTSS2SI64rr
- { 533, 6, 1, 0, "CVTSS2SIrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(12<<8)|(45<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #533 = CVTSS2SIrm
- { 534, 2, 1, 0, "CVTSS2SIrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(12<<8)|(45<<24), NULL, NULL, NULL, OperandInfo89 }, // Inst #534 = CVTSS2SIrr
- { 535, 6, 1, 0, "CVTTPS2DQrm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(12<<8)|(91<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #535 = CVTTPS2DQrm
- { 536, 2, 1, 0, "CVTTPS2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(12<<8)|(91<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #536 = CVTTPS2DQrr
- { 537, 6, 1, 0, "CVTTSD2SI64rm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(1<<12)|(44<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #537 = CVTTSD2SI64rm
- { 538, 2, 1, 0, "CVTTSD2SI64rr", 0, 0|5|(11<<8)|(1<<12)|(44<<24), NULL, NULL, NULL, OperandInfo79 }, // Inst #538 = CVTTSD2SI64rr
- { 539, 6, 1, 0, "CVTTSD2SIrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(44<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #539 = CVTTSD2SIrm
- { 540, 2, 1, 0, "CVTTSD2SIrr", 0, 0|5|(11<<8)|(44<<24), NULL, NULL, NULL, OperandInfo90 }, // Inst #540 = CVTTSD2SIrr
- { 541, 6, 1, 0, "CVTTSS2SI64rm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(1<<12)|(44<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #541 = CVTTSS2SI64rm
- { 542, 2, 1, 0, "CVTTSS2SI64rr", 0, 0|5|(12<<8)|(1<<12)|(44<<24), NULL, NULL, NULL, OperandInfo88 }, // Inst #542 = CVTTSS2SI64rr
- { 543, 6, 1, 0, "CVTTSS2SIrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(44<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #543 = CVTTSS2SIrm
- { 544, 2, 1, 0, "CVTTSS2SIrr", 0, 0|5|(12<<8)|(44<<24), NULL, NULL, NULL, OperandInfo89 }, // Inst #544 = CVTTSS2SIrr
- { 545, 0, 0, 0, "CWD", 0, 0|1|(1<<6)|(153<<24), ImplicitList12, ImplicitList20, NULL, 0 }, // Inst #545 = CWD
- { 546, 0, 0, 0, "CWDE", 0, 0|1|(152<<24), ImplicitList12, ImplicitList13, NULL, 0 }, // Inst #546 = CWDE
- { 547, 5, 0, 0, "DEC16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<6)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #547 = DEC16m
- { 548, 2, 1, 0, "DEC16r", 0|(1<<TID::ConvertibleTo3Addr), 0|2|(1<<6)|(72<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #548 = DEC16r
- { 549, 5, 0, 0, "DEC32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #549 = DEC32m
- { 550, 2, 1, 0, "DEC32r", 0|(1<<TID::ConvertibleTo3Addr), 0|2|(72<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #550 = DEC32r
- { 551, 5, 0, 0, "DEC64_16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<6)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #551 = DEC64_16m
- { 552, 2, 1, 0, "DEC64_16r", 0|(1<<TID::ConvertibleTo3Addr), 0|17|(1<<6)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #552 = DEC64_16r
- { 553, 5, 0, 0, "DEC64_32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #553 = DEC64_32m
- { 554, 2, 1, 0, "DEC64_32r", 0|(1<<TID::ConvertibleTo3Addr), 0|17|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #554 = DEC64_32r
- { 555, 5, 0, 0, "DEC64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<12)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #555 = DEC64m
- { 556, 2, 1, 0, "DEC64r", 0|(1<<TID::ConvertibleTo3Addr), 0|17|(1<<12)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #556 = DEC64r
- { 557, 5, 0, 0, "DEC8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(254<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #557 = DEC8m
- { 558, 2, 1, 0, "DEC8r", 0, 0|17|(254<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #558 = DEC8r
- { 559, 5, 0, 0, "DIV16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|30|(1<<6)|(247<<24), ImplicitList20, ImplicitList21, Barriers1, OperandInfo30 }, // Inst #559 = DIV16m
- { 560, 1, 0, 0, "DIV16r", 0|(1<<TID::UnmodeledSideEffects), 0|22|(1<<6)|(247<<24), ImplicitList20, ImplicitList21, Barriers1, OperandInfo93 }, // Inst #560 = DIV16r
- { 561, 5, 0, 0, "DIV32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|30|(247<<24), ImplicitList14, ImplicitList18, Barriers6, OperandInfo30 }, // Inst #561 = DIV32m
- { 562, 1, 0, 0, "DIV32r", 0|(1<<TID::UnmodeledSideEffects), 0|22|(247<<24), ImplicitList14, ImplicitList18, Barriers6, OperandInfo57 }, // Inst #562 = DIV32r
- { 563, 5, 0, 0, "DIV64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|30|(1<<12)|(247<<24), ImplicitList19, ImplicitList17, Barriers1, OperandInfo30 }, // Inst #563 = DIV64m
- { 564, 1, 0, 0, "DIV64r", 0|(1<<TID::UnmodeledSideEffects), 0|22|(1<<12)|(247<<24), ImplicitList19, ImplicitList17, Barriers1, OperandInfo58 }, // Inst #564 = DIV64r
- { 565, 5, 0, 0, "DIV8m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|30|(246<<24), ImplicitList12, ImplicitList22, Barriers1, OperandInfo30 }, // Inst #565 = DIV8m
- { 566, 1, 0, 0, "DIV8r", 0|(1<<TID::UnmodeledSideEffects), 0|22|(246<<24), ImplicitList12, ImplicitList22, Barriers1, OperandInfo94 }, // Inst #566 = DIV8r
- { 567, 7, 1, 0, "DIVPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(94<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #567 = DIVPDrm
- { 568, 3, 1, 0, "DIVPDrr", 0, 0|5|(1<<6)|(1<<8)|(94<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #568 = DIVPDrr
- { 569, 7, 1, 0, "DIVPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(94<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #569 = DIVPSrm
- { 570, 3, 1, 0, "DIVPSrr", 0, 0|5|(1<<8)|(94<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #570 = DIVPSrr
- { 571, 5, 0, 0, "DIVR_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|31|(216<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #571 = DIVR_F32m
- { 572, 5, 0, 0, "DIVR_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|31|(220<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #572 = DIVR_F64m
- { 573, 5, 0, 0, "DIVR_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|31|(222<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #573 = DIVR_FI16m
- { 574, 5, 0, 0, "DIVR_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|31|(218<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #574 = DIVR_FI32m
- { 575, 1, 0, 0, "DIVR_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(9<<8)|(240<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #575 = DIVR_FPrST0
- { 576, 1, 0, 0, "DIVR_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0|2|(3<<8)|(248<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #576 = DIVR_FST0r
- { 577, 7, 1, 0, "DIVR_Fp32m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #577 = DIVR_Fp32m
- { 578, 7, 1, 0, "DIVR_Fp64m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #578 = DIVR_Fp64m
- { 579, 7, 1, 0, "DIVR_Fp64m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #579 = DIVR_Fp64m32
- { 580, 7, 1, 0, "DIVR_Fp80m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #580 = DIVR_Fp80m32
- { 581, 7, 1, 0, "DIVR_Fp80m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #581 = DIVR_Fp80m64
- { 582, 7, 1, 0, "DIVR_FpI16m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #582 = DIVR_FpI16m32
- { 583, 7, 1, 0, "DIVR_FpI16m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #583 = DIVR_FpI16m64
- { 584, 7, 1, 0, "DIVR_FpI16m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #584 = DIVR_FpI16m80
- { 585, 7, 1, 0, "DIVR_FpI32m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #585 = DIVR_FpI32m32
- { 586, 7, 1, 0, "DIVR_FpI32m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #586 = DIVR_FpI32m64
- { 587, 7, 1, 0, "DIVR_FpI32m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #587 = DIVR_FpI32m80
- { 588, 1, 0, 0, "DIVR_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(7<<8)|(240<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #588 = DIVR_FrST0
- { 589, 7, 1, 0, "DIVSDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(94<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #589 = DIVSDrm
- { 590, 7, 1, 0, "DIVSDrm_Int", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(94<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #590 = DIVSDrm_Int
- { 591, 3, 1, 0, "DIVSDrr", 0, 0|5|(11<<8)|(94<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #591 = DIVSDrr
- { 592, 3, 1, 0, "DIVSDrr_Int", 0, 0|5|(11<<8)|(94<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #592 = DIVSDrr_Int
- { 593, 7, 1, 0, "DIVSSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(94<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #593 = DIVSSrm
- { 594, 7, 1, 0, "DIVSSrm_Int", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(94<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #594 = DIVSSrm_Int
- { 595, 3, 1, 0, "DIVSSrr", 0, 0|5|(12<<8)|(94<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #595 = DIVSSrr
- { 596, 3, 1, 0, "DIVSSrr_Int", 0, 0|5|(12<<8)|(94<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #596 = DIVSSrr_Int
- { 597, 5, 0, 0, "DIV_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|30|(216<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #597 = DIV_F32m
- { 598, 5, 0, 0, "DIV_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|30|(220<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #598 = DIV_F64m
- { 599, 5, 0, 0, "DIV_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|30|(222<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #599 = DIV_FI16m
- { 600, 5, 0, 0, "DIV_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|30|(218<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #600 = DIV_FI32m
- { 601, 1, 0, 0, "DIV_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(9<<8)|(248<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #601 = DIV_FPrST0
- { 602, 1, 0, 0, "DIV_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0|2|(3<<8)|(240<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #602 = DIV_FST0r
- { 603, 3, 1, 0, "DIV_Fp32", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo32 }, // Inst #603 = DIV_Fp32
- { 604, 7, 1, 0, "DIV_Fp32m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #604 = DIV_Fp32m
- { 605, 3, 1, 0, "DIV_Fp64", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo34 }, // Inst #605 = DIV_Fp64
- { 606, 7, 1, 0, "DIV_Fp64m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #606 = DIV_Fp64m
- { 607, 7, 1, 0, "DIV_Fp64m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #607 = DIV_Fp64m32
- { 608, 3, 1, 0, "DIV_Fp80", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo36 }, // Inst #608 = DIV_Fp80
- { 609, 7, 1, 0, "DIV_Fp80m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #609 = DIV_Fp80m32
- { 610, 7, 1, 0, "DIV_Fp80m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #610 = DIV_Fp80m64
- { 611, 7, 1, 0, "DIV_FpI16m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #611 = DIV_FpI16m32
- { 612, 7, 1, 0, "DIV_FpI16m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #612 = DIV_FpI16m64
- { 613, 7, 1, 0, "DIV_FpI16m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #613 = DIV_FpI16m80
- { 614, 7, 1, 0, "DIV_FpI32m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #614 = DIV_FpI32m32
- { 615, 7, 1, 0, "DIV_FpI32m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #615 = DIV_FpI32m64
- { 616, 7, 1, 0, "DIV_FpI32m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #616 = DIV_FpI32m80
- { 617, 1, 0, 0, "DIV_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(7<<8)|(248<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #617 = DIV_FrST0
- { 618, 8, 1, 0, "DPPDrmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(65<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #618 = DPPDrmi
- { 619, 4, 1, 0, "DPPDrri", 0|(1<<TID::Commutable), 0|5|(1<<6)|(14<<8)|(1<<13)|(65<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #619 = DPPDrri
- { 620, 8, 1, 0, "DPPSrmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(64<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #620 = DPPSrmi
- { 621, 4, 1, 0, "DPPSrri", 0|(1<<TID::Commutable), 0|5|(1<<6)|(14<<8)|(1<<13)|(64<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #621 = DPPSrri
- { 622, 0, 0, 0, "DS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(62<<24), NULL, NULL, NULL, 0 }, // Inst #622 = DS_PREFIX
- { 623, 1, 0, 0, "EH_RETURN", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|1|(195<<24), NULL, NULL, NULL, OperandInfo57 }, // Inst #623 = EH_RETURN
- { 624, 1, 0, 0, "EH_RETURN64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|1|(195<<24), NULL, NULL, NULL, OperandInfo58 }, // Inst #624 = EH_RETURN64
- { 625, 2, 0, 0, "ENTER", 0|(1<<TID::UnmodeledSideEffects), 0|1|(200<<24), NULL, NULL, NULL, OperandInfo38 }, // Inst #625 = ENTER
- { 626, 0, 0, 0, "ES_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(38<<24), NULL, NULL, NULL, 0 }, // Inst #626 = ES_PREFIX
- { 627, 7, 0, 0, "EXTRACTPSmr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(14<<8)|(1<<13)|(23<<24), NULL, NULL, NULL, OperandInfo95 }, // Inst #627 = EXTRACTPSmr
- { 628, 3, 1, 0, "EXTRACTPSrr", 0, 0|3|(1<<6)|(14<<8)|(1<<13)|(23<<24), NULL, NULL, NULL, OperandInfo96 }, // Inst #628 = EXTRACTPSrr
- { 629, 0, 0, 0, "F2XM1", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(240<<24), NULL, NULL, NULL, 0 }, // Inst #629 = F2XM1
- { 630, 2, 0, 0, "FARCALL16i", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(154<<24), ImplicitList2, ImplicitList9, Barriers3, OperandInfo38 }, // Inst #630 = FARCALL16i
- { 631, 5, 0, 0, "FARCALL16m", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0|27|(1<<6)|(255<<24), ImplicitList2, ImplicitList9, Barriers3, OperandInfo30 }, // Inst #631 = FARCALL16m
- { 632, 2, 0, 0, "FARCALL32i", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0|1|(154<<24), ImplicitList2, ImplicitList9, Barriers3, OperandInfo38 }, // Inst #632 = FARCALL32i
- { 633, 5, 0, 0, "FARCALL32m", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0|27|(255<<24), ImplicitList2, ImplicitList9, Barriers3, OperandInfo30 }, // Inst #633 = FARCALL32m
- { 634, 5, 0, 0, "FARCALL64", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0|27|(1<<12)|(255<<24), ImplicitList4, ImplicitList10, Barriers4, OperandInfo30 }, // Inst #634 = FARCALL64
- { 635, 2, 0, 0, "FARJMP16i", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(234<<24), NULL, NULL, NULL, OperandInfo38 }, // Inst #635 = FARJMP16i
- { 636, 5, 0, 0, "FARJMP16m", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|29|(1<<6)|(255<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #636 = FARJMP16m
- { 637, 2, 0, 0, "FARJMP32i", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(234<<24), NULL, NULL, NULL, OperandInfo38 }, // Inst #637 = FARJMP32i
- { 638, 5, 0, 0, "FARJMP32m", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|29|(255<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #638 = FARJMP32m
- { 639, 5, 0, 0, "FARJMP64", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|29|(1<<12)|(255<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #639 = FARJMP64
- { 640, 5, 0, 0, "FBLDm", 0|(1<<TID::UnmodeledSideEffects), 0|28|(223<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #640 = FBLDm
- { 641, 5, 1, 0, "FBSTPm", 0|(1<<TID::UnmodeledSideEffects), 0|30|(223<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #641 = FBSTPm
- { 642, 5, 0, 0, "FCOM32m", 0|(1<<TID::UnmodeledSideEffects), 0|26|(216<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #642 = FCOM32m
- { 643, 5, 0, 0, "FCOM64m", 0|(1<<TID::UnmodeledSideEffects), 0|26|(220<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #643 = FCOM64m
- { 644, 5, 0, 0, "FCOMP32m", 0|(1<<TID::UnmodeledSideEffects), 0|27|(216<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #644 = FCOMP32m
- { 645, 5, 0, 0, "FCOMP64m", 0|(1<<TID::UnmodeledSideEffects), 0|27|(220<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #645 = FCOMP64m
- { 646, 0, 0, 0, "FCOMPP", 0|(1<<TID::UnmodeledSideEffects), 0|1|(9<<8)|(217<<24), NULL, NULL, NULL, 0 }, // Inst #646 = FCOMPP
- { 647, 0, 0, 0, "FDECSTP", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(246<<24), NULL, NULL, NULL, 0 }, // Inst #647 = FDECSTP
- { 648, 1, 0, 0, "FFREE", 0|(1<<TID::UnmodeledSideEffects), 0|2|(8<<8)|(192<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #648 = FFREE
- { 649, 5, 0, 0, "FICOM16m", 0|(1<<TID::UnmodeledSideEffects), 0|26|(222<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #649 = FICOM16m
- { 650, 5, 0, 0, "FICOM32m", 0|(1<<TID::UnmodeledSideEffects), 0|26|(218<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #650 = FICOM32m
- { 651, 5, 0, 0, "FICOMP16m", 0|(1<<TID::UnmodeledSideEffects), 0|27|(222<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #651 = FICOMP16m
- { 652, 5, 0, 0, "FICOMP32m", 0|(1<<TID::UnmodeledSideEffects), 0|27|(218<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #652 = FICOMP32m
- { 653, 0, 0, 0, "FINCSTP", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(247<<24), NULL, NULL, NULL, 0 }, // Inst #653 = FINCSTP
- { 654, 5, 0, 0, "FLDCW16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|29|(217<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #654 = FLDCW16m
- { 655, 5, 0, 0, "FLDENVm", 0|(1<<TID::UnmodeledSideEffects), 0|28|(217<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #655 = FLDENVm
- { 656, 0, 0, 0, "FLDL2E", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(234<<24), NULL, NULL, NULL, 0 }, // Inst #656 = FLDL2E
- { 657, 0, 0, 0, "FLDL2T", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(233<<24), NULL, NULL, NULL, 0 }, // Inst #657 = FLDL2T
- { 658, 0, 0, 0, "FLDLG2", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(236<<24), NULL, NULL, NULL, 0 }, // Inst #658 = FLDLG2
- { 659, 0, 0, 0, "FLDLN2", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(237<<24), NULL, NULL, NULL, 0 }, // Inst #659 = FLDLN2
- { 660, 0, 0, 0, "FLDPI", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(235<<24), NULL, NULL, NULL, 0 }, // Inst #660 = FLDPI
- { 661, 0, 0, 0, "FNCLEX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(6<<8)|(226<<24), NULL, NULL, NULL, 0 }, // Inst #661 = FNCLEX
- { 662, 0, 0, 0, "FNINIT", 0|(1<<TID::UnmodeledSideEffects), 0|1|(6<<8)|(227<<24), NULL, NULL, NULL, 0 }, // Inst #662 = FNINIT
- { 663, 0, 0, 0, "FNOP", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(208<<24), NULL, NULL, NULL, 0 }, // Inst #663 = FNOP
- { 664, 5, 0, 0, "FNSTCW16m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|31|(217<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #664 = FNSTCW16m
- { 665, 0, 0, 0, "FNSTSW8r", 0|(1<<TID::UnmodeledSideEffects), 0|1|(10<<8)|(224<<24), NULL, ImplicitList12, NULL, 0 }, // Inst #665 = FNSTSW8r
- { 666, 5, 1, 0, "FNSTSWm", 0|(1<<TID::UnmodeledSideEffects), 0|31|(221<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #666 = FNSTSWm
- { 667, 6, 0, 0, "FP32_TO_INT16_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, OperandInfo97 }, // Inst #667 = FP32_TO_INT16_IN_MEM
- { 668, 6, 0, 0, "FP32_TO_INT32_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, OperandInfo97 }, // Inst #668 = FP32_TO_INT32_IN_MEM
- { 669, 6, 0, 0, "FP32_TO_INT64_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, OperandInfo97 }, // Inst #669 = FP32_TO_INT64_IN_MEM
- { 670, 6, 0, 0, "FP64_TO_INT16_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, OperandInfo98 }, // Inst #670 = FP64_TO_INT16_IN_MEM
- { 671, 6, 0, 0, "FP64_TO_INT32_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, OperandInfo98 }, // Inst #671 = FP64_TO_INT32_IN_MEM
- { 672, 6, 0, 0, "FP64_TO_INT64_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, OperandInfo98 }, // Inst #672 = FP64_TO_INT64_IN_MEM
- { 673, 6, 0, 0, "FP80_TO_INT16_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, OperandInfo99 }, // Inst #673 = FP80_TO_INT16_IN_MEM
- { 674, 6, 0, 0, "FP80_TO_INT32_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, OperandInfo99 }, // Inst #674 = FP80_TO_INT32_IN_MEM
- { 675, 6, 0, 0, "FP80_TO_INT64_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, OperandInfo99 }, // Inst #675 = FP80_TO_INT64_IN_MEM
- { 676, 0, 0, 0, "FPATAN", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(243<<24), NULL, NULL, NULL, 0 }, // Inst #676 = FPATAN
- { 677, 0, 0, 0, "FPREM", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(248<<24), NULL, NULL, NULL, 0 }, // Inst #677 = FPREM
- { 678, 0, 0, 0, "FPREM1", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(245<<24), NULL, NULL, NULL, 0 }, // Inst #678 = FPREM1
- { 679, 0, 0, 0, "FPTAN", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(242<<24), NULL, NULL, NULL, 0 }, // Inst #679 = FPTAN
- { 680, 0, 0, 0, "FP_REG_KILL", 0|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0, NULL, ImplicitList23, Barriers7, 0 }, // Inst #680 = FP_REG_KILL
- { 681, 0, 0, 0, "FRNDINT", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(252<<24), NULL, NULL, NULL, 0 }, // Inst #681 = FRNDINT
- { 682, 5, 1, 0, "FRSTORm", 0|(1<<TID::UnmodeledSideEffects), 0|28|(221<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #682 = FRSTORm
- { 683, 5, 1, 0, "FSAVEm", 0|(1<<TID::UnmodeledSideEffects), 0|30|(221<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #683 = FSAVEm
- { 684, 0, 0, 0, "FSCALE", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(253<<24), NULL, NULL, NULL, 0 }, // Inst #684 = FSCALE
- { 685, 0, 0, 0, "FSINCOS", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(251<<24), NULL, NULL, NULL, 0 }, // Inst #685 = FSINCOS
- { 686, 5, 1, 0, "FSTENVm", 0|(1<<TID::UnmodeledSideEffects), 0|30|(217<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #686 = FSTENVm
- { 687, 6, 1, 0, "FS_MOV32rm", 0|(1<<TID::MayLoad), 0|6|(1<<20)|(139<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #687 = FS_MOV32rm
- { 688, 0, 0, 0, "FS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(100<<24), NULL, NULL, NULL, 0 }, // Inst #688 = FS_PREFIX
- { 689, 0, 0, 0, "FXAM", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(229<<24), NULL, NULL, NULL, 0 }, // Inst #689 = FXAM
- { 690, 5, 0, 0, "FXRSTOR", 0|(1<<TID::UnmodeledSideEffects), 0|25|(1<<8)|(174<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #690 = FXRSTOR
- { 691, 5, 1, 0, "FXSAVE", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<8)|(174<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #691 = FXSAVE
- { 692, 0, 0, 0, "FXTRACT", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(244<<24), NULL, NULL, NULL, 0 }, // Inst #692 = FXTRACT
- { 693, 0, 0, 0, "FYL2X", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(241<<24), NULL, NULL, NULL, 0 }, // Inst #693 = FYL2X
- { 694, 0, 0, 0, "FYL2XP1", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(249<<24), NULL, NULL, NULL, 0 }, // Inst #694 = FYL2XP1
- { 695, 1, 1, 0, "FpGET_ST0_32", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, NULL, NULL, OperandInfo100 }, // Inst #695 = FpGET_ST0_32
- { 696, 1, 1, 0, "FpGET_ST0_64", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, NULL, NULL, OperandInfo101 }, // Inst #696 = FpGET_ST0_64
- { 697, 1, 1, 0, "FpGET_ST0_80", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, NULL, NULL, OperandInfo102 }, // Inst #697 = FpGET_ST0_80
- { 698, 1, 1, 0, "FpGET_ST1_32", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, NULL, NULL, OperandInfo100 }, // Inst #698 = FpGET_ST1_32
- { 699, 1, 1, 0, "FpGET_ST1_64", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, NULL, NULL, OperandInfo101 }, // Inst #699 = FpGET_ST1_64
- { 700, 1, 1, 0, "FpGET_ST1_80", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, NULL, NULL, OperandInfo102 }, // Inst #700 = FpGET_ST1_80
- { 701, 1, 0, 0, "FpSET_ST0_32", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, ImplicitList24, NULL, OperandInfo100 }, // Inst #701 = FpSET_ST0_32
- { 702, 1, 0, 0, "FpSET_ST0_64", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, ImplicitList24, NULL, OperandInfo101 }, // Inst #702 = FpSET_ST0_64
- { 703, 1, 0, 0, "FpSET_ST0_80", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, ImplicitList24, NULL, OperandInfo102 }, // Inst #703 = FpSET_ST0_80
- { 704, 1, 0, 0, "FpSET_ST1_32", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, ImplicitList25, NULL, OperandInfo100 }, // Inst #704 = FpSET_ST1_32
- { 705, 1, 0, 0, "FpSET_ST1_64", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, ImplicitList25, NULL, OperandInfo101 }, // Inst #705 = FpSET_ST1_64
- { 706, 1, 0, 0, "FpSET_ST1_80", 0|(1<<TID::UnmodeledSideEffects), 0|(7<<16), NULL, ImplicitList25, NULL, OperandInfo102 }, // Inst #706 = FpSET_ST1_80
- { 707, 7, 1, 0, "FsANDNPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(85<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #707 = FsANDNPDrm
- { 708, 3, 1, 0, "FsANDNPDrr", 0, 0|5|(1<<6)|(1<<8)|(85<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #708 = FsANDNPDrr
- { 709, 7, 1, 0, "FsANDNPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(85<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #709 = FsANDNPSrm
- { 710, 3, 1, 0, "FsANDNPSrr", 0, 0|5|(1<<8)|(85<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #710 = FsANDNPSrr
- { 711, 7, 1, 0, "FsANDPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(84<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #711 = FsANDPDrm
- { 712, 3, 1, 0, "FsANDPDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(84<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #712 = FsANDPDrr
- { 713, 7, 1, 0, "FsANDPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(84<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #713 = FsANDPSrm
- { 714, 3, 1, 0, "FsANDPSrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(84<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #714 = FsANDPSrr
- { 715, 1, 1, 0, "FsFLD0SD", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|32|(1<<6)|(1<<8)|(239<<24), NULL, NULL, NULL, OperandInfo103 }, // Inst #715 = FsFLD0SD
- { 716, 1, 1, 0, "FsFLD0SS", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|32|(1<<6)|(1<<8)|(239<<24), NULL, NULL, NULL, OperandInfo104 }, // Inst #716 = FsFLD0SS
- { 717, 6, 1, 0, "FsMOVAPDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<6)|(1<<8)|(40<<24), NULL, NULL, NULL, OperandInfo82 }, // Inst #717 = FsMOVAPDrm
- { 718, 2, 1, 0, "FsMOVAPDrr", 0, 0|5|(1<<6)|(1<<8)|(40<<24), NULL, NULL, NULL, OperandInfo105 }, // Inst #718 = FsMOVAPDrr
- { 719, 6, 1, 0, "FsMOVAPSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<8)|(40<<24), NULL, NULL, NULL, OperandInfo80 }, // Inst #719 = FsMOVAPSrm
- { 720, 2, 1, 0, "FsMOVAPSrr", 0, 0|5|(1<<8)|(40<<24), NULL, NULL, NULL, OperandInfo106 }, // Inst #720 = FsMOVAPSrr
- { 721, 7, 1, 0, "FsORPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(86<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #721 = FsORPDrm
- { 722, 3, 1, 0, "FsORPDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(86<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #722 = FsORPDrr
- { 723, 7, 1, 0, "FsORPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(86<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #723 = FsORPSrm
- { 724, 3, 1, 0, "FsORPSrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(86<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #724 = FsORPSrr
- { 725, 7, 1, 0, "FsXORPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(87<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #725 = FsXORPDrm
- { 726, 3, 1, 0, "FsXORPDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(87<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #726 = FsXORPDrr
- { 727, 7, 1, 0, "FsXORPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(87<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #727 = FsXORPSrm
- { 728, 3, 1, 0, "FsXORPSrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(87<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #728 = FsXORPSrr
- { 729, 6, 1, 0, "GS_MOV32rm", 0|(1<<TID::MayLoad), 0|6|(2<<20)|(139<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #729 = GS_MOV32rm
- { 730, 0, 0, 0, "GS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(101<<24), NULL, NULL, NULL, 0 }, // Inst #730 = GS_PREFIX
- { 731, 7, 1, 0, "HADDPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(124<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #731 = HADDPDrm
- { 732, 3, 1, 0, "HADDPDrr", 0, 0|5|(1<<6)|(1<<8)|(124<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #732 = HADDPDrr
- { 733, 7, 1, 0, "HADDPSrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(124<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #733 = HADDPSrm
- { 734, 3, 1, 0, "HADDPSrr", 0, 0|5|(11<<8)|(124<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #734 = HADDPSrr
- { 735, 0, 0, 0, "HLT", 0|(1<<TID::UnmodeledSideEffects), 0|1|(244<<24), NULL, NULL, NULL, 0 }, // Inst #735 = HLT
- { 736, 7, 1, 0, "HSUBPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(125<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #736 = HSUBPDrm
- { 737, 3, 1, 0, "HSUBPDrr", 0, 0|5|(1<<6)|(1<<8)|(125<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #737 = HSUBPDrr
- { 738, 7, 1, 0, "HSUBPSrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(125<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #738 = HSUBPSrm
- { 739, 3, 1, 0, "HSUBPSrr", 0, 0|5|(11<<8)|(125<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #739 = HSUBPSrr
- { 740, 5, 0, 0, "IDIV16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|31|(1<<6)|(247<<24), ImplicitList20, ImplicitList21, Barriers1, OperandInfo30 }, // Inst #740 = IDIV16m
- { 741, 1, 0, 0, "IDIV16r", 0|(1<<TID::UnmodeledSideEffects), 0|23|(1<<6)|(247<<24), ImplicitList20, ImplicitList21, Barriers1, OperandInfo93 }, // Inst #741 = IDIV16r
- { 742, 5, 0, 0, "IDIV32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|31|(247<<24), ImplicitList14, ImplicitList18, Barriers6, OperandInfo30 }, // Inst #742 = IDIV32m
- { 743, 1, 0, 0, "IDIV32r", 0|(1<<TID::UnmodeledSideEffects), 0|23|(247<<24), ImplicitList14, ImplicitList18, Barriers6, OperandInfo57 }, // Inst #743 = IDIV32r
- { 744, 5, 0, 0, "IDIV64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|31|(1<<12)|(247<<24), ImplicitList19, ImplicitList17, Barriers1, OperandInfo30 }, // Inst #744 = IDIV64m
- { 745, 1, 0, 0, "IDIV64r", 0|(1<<TID::UnmodeledSideEffects), 0|23|(1<<12)|(247<<24), ImplicitList19, ImplicitList17, Barriers1, OperandInfo58 }, // Inst #745 = IDIV64r
- { 746, 5, 0, 0, "IDIV8m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|31|(246<<24), ImplicitList12, ImplicitList22, Barriers1, OperandInfo30 }, // Inst #746 = IDIV8m
- { 747, 1, 0, 0, "IDIV8r", 0|(1<<TID::UnmodeledSideEffects), 0|23|(246<<24), ImplicitList12, ImplicitList22, Barriers1, OperandInfo94 }, // Inst #747 = IDIV8r
- { 748, 5, 0, 0, "ILD_F16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|24|(223<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #748 = ILD_F16m
- { 749, 5, 0, 0, "ILD_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|24|(219<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #749 = ILD_F32m
- { 750, 5, 0, 0, "ILD_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|29|(223<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #750 = ILD_F64m
- { 751, 6, 1, 0, "ILD_Fp16m32", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo107 }, // Inst #751 = ILD_Fp16m32
- { 752, 6, 1, 0, "ILD_Fp16m64", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo108 }, // Inst #752 = ILD_Fp16m64
- { 753, 6, 1, 0, "ILD_Fp16m80", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo109 }, // Inst #753 = ILD_Fp16m80
- { 754, 6, 1, 0, "ILD_Fp32m32", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo107 }, // Inst #754 = ILD_Fp32m32
- { 755, 6, 1, 0, "ILD_Fp32m64", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo108 }, // Inst #755 = ILD_Fp32m64
- { 756, 6, 1, 0, "ILD_Fp32m80", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo109 }, // Inst #756 = ILD_Fp32m80
- { 757, 6, 1, 0, "ILD_Fp64m32", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo107 }, // Inst #757 = ILD_Fp64m32
- { 758, 6, 1, 0, "ILD_Fp64m64", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo108 }, // Inst #758 = ILD_Fp64m64
- { 759, 6, 1, 0, "ILD_Fp64m80", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo109 }, // Inst #759 = ILD_Fp64m80
- { 760, 5, 0, 0, "IMUL16m", 0|(1<<TID::MayLoad), 0|29|(1<<6)|(247<<24), ImplicitList12, ImplicitList21, Barriers1, OperandInfo30 }, // Inst #760 = IMUL16m
- { 761, 1, 0, 0, "IMUL16r", 0, 0|21|(1<<6)|(247<<24), ImplicitList12, ImplicitList21, Barriers1, OperandInfo93 }, // Inst #761 = IMUL16r
- { 762, 7, 1, 0, "IMUL16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(175<<24), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #762 = IMUL16rm
- { 763, 7, 1, 0, "IMUL16rmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(3<<13)|(105<<24), NULL, ImplicitList1, Barriers1, OperandInfo110 }, // Inst #763 = IMUL16rmi
- { 764, 7, 1, 0, "IMUL16rmi8", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<13)|(107<<24), NULL, ImplicitList1, Barriers1, OperandInfo110 }, // Inst #764 = IMUL16rmi8
- { 765, 3, 1, 0, "IMUL16rr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(175<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #765 = IMUL16rr
- { 766, 3, 1, 0, "IMUL16rri", 0, 0|5|(1<<6)|(3<<13)|(105<<24), NULL, ImplicitList1, Barriers1, OperandInfo111 }, // Inst #766 = IMUL16rri
- { 767, 3, 1, 0, "IMUL16rri8", 0, 0|5|(1<<6)|(1<<13)|(107<<24), NULL, ImplicitList1, Barriers1, OperandInfo111 }, // Inst #767 = IMUL16rri8
- { 768, 5, 0, 0, "IMUL32m", 0|(1<<TID::MayLoad), 0|29|(247<<24), ImplicitList13, ImplicitList18, Barriers6, OperandInfo30 }, // Inst #768 = IMUL32m
- { 769, 1, 0, 0, "IMUL32r", 0, 0|21|(247<<24), ImplicitList13, ImplicitList18, Barriers6, OperandInfo57 }, // Inst #769 = IMUL32r
- { 770, 7, 1, 0, "IMUL32rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(175<<24), NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #770 = IMUL32rm
- { 771, 7, 1, 0, "IMUL32rmi", 0|(1<<TID::MayLoad), 0|6|(4<<13)|(105<<24), NULL, ImplicitList1, Barriers1, OperandInfo112 }, // Inst #771 = IMUL32rmi
- { 772, 7, 1, 0, "IMUL32rmi8", 0|(1<<TID::MayLoad), 0|6|(1<<13)|(107<<24), NULL, ImplicitList1, Barriers1, OperandInfo112 }, // Inst #772 = IMUL32rmi8
- { 773, 3, 1, 0, "IMUL32rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(175<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #773 = IMUL32rr
- { 774, 3, 1, 0, "IMUL32rri", 0, 0|5|(4<<13)|(105<<24), NULL, ImplicitList1, Barriers1, OperandInfo113 }, // Inst #774 = IMUL32rri
- { 775, 3, 1, 0, "IMUL32rri8", 0, 0|5|(1<<13)|(107<<24), NULL, ImplicitList1, Barriers1, OperandInfo113 }, // Inst #775 = IMUL32rri8
- { 776, 5, 0, 0, "IMUL64m", 0|(1<<TID::MayLoad), 0|29|(1<<12)|(247<<24), ImplicitList15, ImplicitList17, Barriers1, OperandInfo30 }, // Inst #776 = IMUL64m
- { 777, 1, 0, 0, "IMUL64r", 0, 0|21|(1<<12)|(247<<24), ImplicitList15, ImplicitList17, Barriers1, OperandInfo58 }, // Inst #777 = IMUL64r
- { 778, 7, 1, 0, "IMUL64rm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(175<<24), NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #778 = IMUL64rm
- { 779, 7, 1, 0, "IMUL64rmi32", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(4<<13)|(105<<24), NULL, ImplicitList1, Barriers1, OperandInfo114 }, // Inst #779 = IMUL64rmi32
- { 780, 7, 1, 0, "IMUL64rmi8", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(1<<13)|(107<<24), NULL, ImplicitList1, Barriers1, OperandInfo114 }, // Inst #780 = IMUL64rmi8
- { 781, 3, 1, 0, "IMUL64rr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(1<<12)|(175<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #781 = IMUL64rr
- { 782, 3, 1, 0, "IMUL64rri32", 0, 0|5|(1<<12)|(4<<13)|(105<<24), NULL, ImplicitList1, Barriers1, OperandInfo115 }, // Inst #782 = IMUL64rri32
- { 783, 3, 1, 0, "IMUL64rri8", 0, 0|5|(1<<12)|(1<<13)|(107<<24), NULL, ImplicitList1, Barriers1, OperandInfo115 }, // Inst #783 = IMUL64rri8
- { 784, 5, 0, 0, "IMUL8m", 0|(1<<TID::MayLoad), 0|29|(246<<24), ImplicitList11, ImplicitList22, Barriers1, OperandInfo30 }, // Inst #784 = IMUL8m
- { 785, 1, 0, 0, "IMUL8r", 0, 0|21|(246<<24), ImplicitList11, ImplicitList22, Barriers1, OperandInfo94 }, // Inst #785 = IMUL8r
- { 786, 0, 0, 0, "IN16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(109<<24), NULL, NULL, NULL, 0 }, // Inst #786 = IN16
- { 787, 1, 0, 0, "IN16ri", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(1<<13)|(229<<24), NULL, ImplicitList12, NULL, OperandInfo5 }, // Inst #787 = IN16ri
- { 788, 0, 0, 0, "IN16rr", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(237<<24), ImplicitList26, ImplicitList12, NULL, 0 }, // Inst #788 = IN16rr
- { 789, 0, 0, 0, "IN32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(109<<24), NULL, NULL, NULL, 0 }, // Inst #789 = IN32
- { 790, 1, 0, 0, "IN32ri", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(229<<24), NULL, ImplicitList13, NULL, OperandInfo5 }, // Inst #790 = IN32ri
- { 791, 0, 0, 0, "IN32rr", 0|(1<<TID::UnmodeledSideEffects), 0|1|(237<<24), ImplicitList26, ImplicitList13, NULL, 0 }, // Inst #791 = IN32rr
- { 792, 0, 0, 0, "IN8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(108<<24), NULL, NULL, NULL, 0 }, // Inst #792 = IN8
- { 793, 1, 0, 0, "IN8ri", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(228<<24), NULL, ImplicitList11, NULL, OperandInfo5 }, // Inst #793 = IN8ri
- { 794, 0, 0, 0, "IN8rr", 0|(1<<TID::UnmodeledSideEffects), 0|1|(236<<24), ImplicitList26, ImplicitList11, NULL, 0 }, // Inst #794 = IN8rr
- { 795, 5, 0, 0, "INC16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<6)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #795 = INC16m
- { 796, 2, 1, 0, "INC16r", 0|(1<<TID::ConvertibleTo3Addr), 0|2|(1<<6)|(64<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #796 = INC16r
- { 797, 5, 0, 0, "INC32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #797 = INC32m
- { 798, 2, 1, 0, "INC32r", 0|(1<<TID::ConvertibleTo3Addr), 0|2|(64<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #798 = INC32r
- { 799, 5, 0, 0, "INC64_16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<6)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #799 = INC64_16m
- { 800, 2, 1, 0, "INC64_16r", 0|(1<<TID::ConvertibleTo3Addr), 0|16|(1<<6)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #800 = INC64_16r
- { 801, 5, 0, 0, "INC64_32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #801 = INC64_32m
- { 802, 2, 1, 0, "INC64_32r", 0|(1<<TID::ConvertibleTo3Addr), 0|16|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #802 = INC64_32r
- { 803, 5, 0, 0, "INC64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<12)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #803 = INC64m
- { 804, 2, 1, 0, "INC64r", 0|(1<<TID::ConvertibleTo3Addr), 0|16|(1<<12)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #804 = INC64r
- { 805, 5, 0, 0, "INC8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(254<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #805 = INC8m
- { 806, 2, 1, 0, "INC8r", 0, 0|16|(254<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #806 = INC8r
- { 807, 8, 1, 0, "INSERTPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(33<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #807 = INSERTPSrm
- { 808, 4, 1, 0, "INSERTPSrr", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(33<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #808 = INSERTPSrr
- { 809, 1, 0, 0, "INT", 0|(1<<TID::UnmodeledSideEffects), 0|1|(205<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #809 = INT
- { 810, 0, 0, 0, "INT3", 0|(1<<TID::UnmodeledSideEffects), 0|1|(204<<24), NULL, NULL, NULL, 0 }, // Inst #810 = INT3
- { 811, 0, 0, 0, "INVD", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(8<<24), NULL, NULL, NULL, 0 }, // Inst #811 = INVD
- { 812, 0, 0, 0, "INVEPT", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(13<<8)|(128<<24), NULL, NULL, NULL, 0 }, // Inst #812 = INVEPT
- { 813, 5, 0, 0, "INVLPG", 0|(1<<TID::UnmodeledSideEffects), 0|31|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #813 = INVLPG
- { 814, 0, 0, 0, "INVVPID", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(13<<8)|(129<<24), NULL, NULL, NULL, 0 }, // Inst #814 = INVVPID
- { 815, 0, 0, 0, "IRET16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(207<<24), NULL, NULL, NULL, 0 }, // Inst #815 = IRET16
- { 816, 0, 0, 0, "IRET32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(207<<24), NULL, NULL, NULL, 0 }, // Inst #816 = IRET32
- { 817, 0, 0, 0, "IRET64", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(207<<24), NULL, NULL, NULL, 0 }, // Inst #817 = IRET64
- { 818, 5, 0, 0, "ISTT_FP16m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|25|(223<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #818 = ISTT_FP16m
- { 819, 5, 0, 0, "ISTT_FP32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|25|(219<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #819 = ISTT_FP32m
- { 820, 5, 0, 0, "ISTT_FP64m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|25|(221<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #820 = ISTT_FP64m
- { 821, 6, 0, 0, "ISTT_Fp16m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo97 }, // Inst #821 = ISTT_Fp16m32
- { 822, 6, 0, 0, "ISTT_Fp16m64", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #822 = ISTT_Fp16m64
- { 823, 6, 0, 0, "ISTT_Fp16m80", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #823 = ISTT_Fp16m80
- { 824, 6, 0, 0, "ISTT_Fp32m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo97 }, // Inst #824 = ISTT_Fp32m32
- { 825, 6, 0, 0, "ISTT_Fp32m64", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #825 = ISTT_Fp32m64
- { 826, 6, 0, 0, "ISTT_Fp32m80", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #826 = ISTT_Fp32m80
- { 827, 6, 0, 0, "ISTT_Fp64m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo97 }, // Inst #827 = ISTT_Fp64m32
- { 828, 6, 0, 0, "ISTT_Fp64m64", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #828 = ISTT_Fp64m64
- { 829, 6, 0, 0, "ISTT_Fp64m80", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #829 = ISTT_Fp64m80
- { 830, 5, 0, 0, "IST_F16m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|26|(223<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #830 = IST_F16m
- { 831, 5, 0, 0, "IST_F32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|26|(219<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #831 = IST_F32m
- { 832, 5, 0, 0, "IST_FP16m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|27|(223<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #832 = IST_FP16m
- { 833, 5, 0, 0, "IST_FP32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|27|(219<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #833 = IST_FP32m
- { 834, 5, 0, 0, "IST_FP64m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|31|(223<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #834 = IST_FP64m
- { 835, 6, 0, 0, "IST_Fp16m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo97 }, // Inst #835 = IST_Fp16m32
- { 836, 6, 0, 0, "IST_Fp16m64", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #836 = IST_Fp16m64
- { 837, 6, 0, 0, "IST_Fp16m80", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #837 = IST_Fp16m80
- { 838, 6, 0, 0, "IST_Fp32m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo97 }, // Inst #838 = IST_Fp32m32
- { 839, 6, 0, 0, "IST_Fp32m64", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #839 = IST_Fp32m64
- { 840, 6, 0, 0, "IST_Fp32m80", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #840 = IST_Fp32m80
- { 841, 6, 0, 0, "IST_Fp64m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo97 }, // Inst #841 = IST_Fp64m32
- { 842, 6, 0, 0, "IST_Fp64m64", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #842 = IST_Fp64m64
- { 843, 6, 0, 0, "IST_Fp64m80", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #843 = IST_Fp64m80
- { 844, 8, 1, 0, "Int_CMPSDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #844 = Int_CMPSDrm
- { 845, 4, 1, 0, "Int_CMPSDrr", 0, 0|5|(11<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #845 = Int_CMPSDrr
- { 846, 8, 1, 0, "Int_CMPSSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #846 = Int_CMPSSrm
- { 847, 4, 1, 0, "Int_CMPSSrr", 0, 0|5|(12<<8)|(1<<13)|(194<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #847 = Int_CMPSSrr
- { 848, 6, 0, 0, "Int_COMISDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(47<<24), NULL, ImplicitList1, Barriers1, OperandInfo74 }, // Inst #848 = Int_COMISDrm
- { 849, 2, 0, 0, "Int_COMISDrr", 0, 0|5|(1<<6)|(1<<8)|(47<<24), NULL, ImplicitList1, Barriers1, OperandInfo75 }, // Inst #849 = Int_COMISDrr
- { 850, 6, 0, 0, "Int_COMISSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(47<<24), NULL, ImplicitList1, Barriers1, OperandInfo74 }, // Inst #850 = Int_COMISSrm
- { 851, 2, 0, 0, "Int_COMISSrr", 0, 0|5|(1<<8)|(47<<24), NULL, ImplicitList1, Barriers1, OperandInfo75 }, // Inst #851 = Int_COMISSrr
- { 852, 6, 1, 0, "Int_CVTDQ2PDrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(230<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #852 = Int_CVTDQ2PDrm
- { 853, 2, 1, 0, "Int_CVTDQ2PDrr", 0, 0|5|(12<<8)|(230<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #853 = Int_CVTDQ2PDrr
- { 854, 6, 1, 0, "Int_CVTDQ2PSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(91<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #854 = Int_CVTDQ2PSrm
- { 855, 2, 1, 0, "Int_CVTDQ2PSrr", 0, 0|5|(1<<8)|(91<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #855 = Int_CVTDQ2PSrr
- { 856, 6, 1, 0, "Int_CVTPD2DQrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(230<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #856 = Int_CVTPD2DQrm
- { 857, 2, 1, 0, "Int_CVTPD2DQrr", 0, 0|5|(11<<8)|(230<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #857 = Int_CVTPD2DQrr
- { 858, 6, 1, 0, "Int_CVTPD2PIrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(45<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #858 = Int_CVTPD2PIrm
- { 859, 2, 1, 0, "Int_CVTPD2PIrr", 0, 0|5|(1<<6)|(1<<8)|(45<<24), NULL, NULL, NULL, OperandInfo118 }, // Inst #859 = Int_CVTPD2PIrr
- { 860, 6, 1, 0, "Int_CVTPD2PSrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(90<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #860 = Int_CVTPD2PSrm
- { 861, 2, 1, 0, "Int_CVTPD2PSrr", 0, 0|5|(1<<6)|(1<<8)|(90<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #861 = Int_CVTPD2PSrr
- { 862, 6, 1, 0, "Int_CVTPI2PDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(42<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #862 = Int_CVTPI2PDrm
- { 863, 2, 1, 0, "Int_CVTPI2PDrr", 0, 0|5|(1<<6)|(1<<8)|(42<<24), NULL, NULL, NULL, OperandInfo119 }, // Inst #863 = Int_CVTPI2PDrr
- { 864, 7, 1, 0, "Int_CVTPI2PSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(42<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #864 = Int_CVTPI2PSrm
- { 865, 3, 1, 0, "Int_CVTPI2PSrr", 0, 0|5|(1<<8)|(42<<24), NULL, NULL, NULL, OperandInfo120 }, // Inst #865 = Int_CVTPI2PSrr
- { 866, 6, 1, 0, "Int_CVTPS2DQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(91<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #866 = Int_CVTPS2DQrm
- { 867, 2, 1, 0, "Int_CVTPS2DQrr", 0, 0|5|(1<<6)|(1<<8)|(91<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #867 = Int_CVTPS2DQrr
- { 868, 6, 1, 0, "Int_CVTPS2PDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(90<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #868 = Int_CVTPS2PDrm
- { 869, 2, 1, 0, "Int_CVTPS2PDrr", 0, 0|5|(1<<8)|(90<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #869 = Int_CVTPS2PDrr
- { 870, 6, 1, 0, "Int_CVTPS2PIrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(45<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #870 = Int_CVTPS2PIrm
- { 871, 2, 1, 0, "Int_CVTPS2PIrr", 0, 0|5|(1<<8)|(45<<24), NULL, NULL, NULL, OperandInfo118 }, // Inst #871 = Int_CVTPS2PIrr
- { 872, 6, 1, 0, "Int_CVTSD2SI64rm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(1<<12)|(45<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #872 = Int_CVTSD2SI64rm
- { 873, 2, 1, 0, "Int_CVTSD2SI64rr", 0, 0|5|(11<<8)|(1<<12)|(45<<24), NULL, NULL, NULL, OperandInfo121 }, // Inst #873 = Int_CVTSD2SI64rr
- { 874, 6, 1, 0, "Int_CVTSD2SIrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(45<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #874 = Int_CVTSD2SIrm
- { 875, 2, 1, 0, "Int_CVTSD2SIrr", 0, 0|5|(11<<8)|(45<<24), NULL, NULL, NULL, OperandInfo122 }, // Inst #875 = Int_CVTSD2SIrr
- { 876, 7, 1, 0, "Int_CVTSD2SSrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(90<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #876 = Int_CVTSD2SSrm
- { 877, 3, 1, 0, "Int_CVTSD2SSrr", 0, 0|5|(11<<8)|(90<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #877 = Int_CVTSD2SSrr
- { 878, 7, 1, 0, "Int_CVTSI2SD64rm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(1<<12)|(42<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #878 = Int_CVTSI2SD64rm
- { 879, 3, 1, 0, "Int_CVTSI2SD64rr", 0, 0|5|(11<<8)|(1<<12)|(42<<24), NULL, NULL, NULL, OperandInfo123 }, // Inst #879 = Int_CVTSI2SD64rr
- { 880, 7, 1, 0, "Int_CVTSI2SDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(42<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #880 = Int_CVTSI2SDrm
- { 881, 3, 1, 0, "Int_CVTSI2SDrr", 0, 0|5|(11<<8)|(42<<24), NULL, NULL, NULL, OperandInfo124 }, // Inst #881 = Int_CVTSI2SDrr
- { 882, 7, 1, 0, "Int_CVTSI2SS64rm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(1<<12)|(42<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #882 = Int_CVTSI2SS64rm
- { 883, 3, 1, 0, "Int_CVTSI2SS64rr", 0, 0|5|(12<<8)|(1<<12)|(42<<24), NULL, NULL, NULL, OperandInfo123 }, // Inst #883 = Int_CVTSI2SS64rr
- { 884, 7, 1, 0, "Int_CVTSI2SSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(42<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #884 = Int_CVTSI2SSrm
- { 885, 3, 1, 0, "Int_CVTSI2SSrr", 0, 0|5|(12<<8)|(42<<24), NULL, NULL, NULL, OperandInfo124 }, // Inst #885 = Int_CVTSI2SSrr
- { 886, 7, 1, 0, "Int_CVTSS2SDrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(90<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #886 = Int_CVTSS2SDrm
- { 887, 3, 1, 0, "Int_CVTSS2SDrr", 0, 0|5|(12<<8)|(90<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #887 = Int_CVTSS2SDrr
- { 888, 6, 1, 0, "Int_CVTSS2SI64rm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(1<<12)|(45<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #888 = Int_CVTSS2SI64rm
- { 889, 2, 1, 0, "Int_CVTSS2SI64rr", 0, 0|5|(12<<8)|(1<<12)|(45<<24), NULL, NULL, NULL, OperandInfo121 }, // Inst #889 = Int_CVTSS2SI64rr
- { 890, 6, 1, 0, "Int_CVTSS2SIrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(45<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #890 = Int_CVTSS2SIrm
- { 891, 2, 1, 0, "Int_CVTSS2SIrr", 0, 0|5|(12<<8)|(45<<24), NULL, NULL, NULL, OperandInfo122 }, // Inst #891 = Int_CVTSS2SIrr
- { 892, 6, 1, 0, "Int_CVTTPD2DQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(230<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #892 = Int_CVTTPD2DQrm
- { 893, 2, 1, 0, "Int_CVTTPD2DQrr", 0, 0|5|(1<<6)|(1<<8)|(230<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #893 = Int_CVTTPD2DQrr
- { 894, 6, 1, 0, "Int_CVTTPD2PIrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(44<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #894 = Int_CVTTPD2PIrm
- { 895, 2, 1, 0, "Int_CVTTPD2PIrr", 0, 0|5|(1<<6)|(1<<8)|(44<<24), NULL, NULL, NULL, OperandInfo118 }, // Inst #895 = Int_CVTTPD2PIrr
- { 896, 6, 1, 0, "Int_CVTTPS2DQrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(91<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #896 = Int_CVTTPS2DQrm
- { 897, 2, 1, 0, "Int_CVTTPS2DQrr", 0, 0|5|(12<<8)|(91<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #897 = Int_CVTTPS2DQrr
- { 898, 6, 1, 0, "Int_CVTTPS2PIrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(44<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #898 = Int_CVTTPS2PIrm
- { 899, 2, 1, 0, "Int_CVTTPS2PIrr", 0, 0|5|(1<<8)|(44<<24), NULL, NULL, NULL, OperandInfo118 }, // Inst #899 = Int_CVTTPS2PIrr
- { 900, 6, 1, 0, "Int_CVTTSD2SI64rm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(1<<12)|(44<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #900 = Int_CVTTSD2SI64rm
- { 901, 2, 1, 0, "Int_CVTTSD2SI64rr", 0, 0|5|(11<<8)|(1<<12)|(44<<24), NULL, NULL, NULL, OperandInfo121 }, // Inst #901 = Int_CVTTSD2SI64rr
- { 902, 6, 1, 0, "Int_CVTTSD2SIrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(44<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #902 = Int_CVTTSD2SIrm
- { 903, 2, 1, 0, "Int_CVTTSD2SIrr", 0, 0|5|(11<<8)|(44<<24), NULL, NULL, NULL, OperandInfo122 }, // Inst #903 = Int_CVTTSD2SIrr
- { 904, 6, 1, 0, "Int_CVTTSS2SI64rm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(1<<12)|(44<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #904 = Int_CVTTSS2SI64rm
- { 905, 2, 1, 0, "Int_CVTTSS2SI64rr", 0, 0|5|(12<<8)|(1<<12)|(44<<24), NULL, NULL, NULL, OperandInfo121 }, // Inst #905 = Int_CVTTSS2SI64rr
- { 906, 6, 1, 0, "Int_CVTTSS2SIrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(44<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #906 = Int_CVTTSS2SIrm
- { 907, 2, 1, 0, "Int_CVTTSS2SIrr", 0, 0|5|(12<<8)|(44<<24), NULL, NULL, NULL, OperandInfo122 }, // Inst #907 = Int_CVTTSS2SIrr
- { 908, 6, 0, 0, "Int_UCOMISDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(46<<24), NULL, ImplicitList1, Barriers1, OperandInfo74 }, // Inst #908 = Int_UCOMISDrm
- { 909, 2, 0, 0, "Int_UCOMISDrr", 0, 0|5|(1<<6)|(1<<8)|(46<<24), NULL, ImplicitList1, Barriers1, OperandInfo75 }, // Inst #909 = Int_UCOMISDrr
- { 910, 6, 0, 0, "Int_UCOMISSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(46<<24), NULL, ImplicitList1, Barriers1, OperandInfo74 }, // Inst #910 = Int_UCOMISSrm
- { 911, 2, 0, 0, "Int_UCOMISSrr", 0, 0|5|(1<<8)|(46<<24), NULL, ImplicitList1, Barriers1, OperandInfo75 }, // Inst #911 = Int_UCOMISSrr
- { 912, 1, 0, 0, "JAE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(115<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #912 = JAE_1
- { 913, 1, 0, 0, "JAE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(131<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #913 = JAE_4
- { 914, 1, 0, 0, "JA_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(119<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #914 = JA_1
- { 915, 1, 0, 0, "JA_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(135<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #915 = JA_4
- { 916, 1, 0, 0, "JBE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(118<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #916 = JBE_1
- { 917, 1, 0, 0, "JBE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(134<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #917 = JBE_4
- { 918, 1, 0, 0, "JB_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(114<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #918 = JB_1
- { 919, 1, 0, 0, "JB_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(130<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #919 = JB_4
- { 920, 1, 0, 0, "JCXZ8", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(227<<24), ImplicitList27, NULL, NULL, OperandInfo5 }, // Inst #920 = JCXZ8
- { 921, 1, 0, 0, "JE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(116<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #921 = JE_1
- { 922, 1, 0, 0, "JE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(132<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #922 = JE_4
- { 923, 1, 0, 0, "JGE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(125<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #923 = JGE_1
- { 924, 1, 0, 0, "JGE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(141<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #924 = JGE_4
- { 925, 1, 0, 0, "JG_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(127<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #925 = JG_1
- { 926, 1, 0, 0, "JG_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(143<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #926 = JG_4
- { 927, 1, 0, 0, "JLE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(126<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #927 = JLE_1
- { 928, 1, 0, 0, "JLE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(142<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #928 = JLE_4
- { 929, 1, 0, 0, "JL_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(124<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #929 = JL_1
- { 930, 1, 0, 0, "JL_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(140<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #930 = JL_4
- { 931, 5, 0, 0, "JMP32m", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::MayLoad)|(1<<TID::Terminator), 0|28|(255<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #931 = JMP32m
- { 932, 1, 0, 0, "JMP32r", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|20|(255<<24), NULL, NULL, NULL, OperandInfo57 }, // Inst #932 = JMP32r
- { 933, 5, 0, 0, "JMP64m", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::MayLoad)|(1<<TID::Terminator), 0|28|(255<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #933 = JMP64m
- { 934, 1, 0, 0, "JMP64pcrel32", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(233<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #934 = JMP64pcrel32
- { 935, 1, 0, 0, "JMP64r", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|20|(255<<24), NULL, NULL, NULL, OperandInfo58 }, // Inst #935 = JMP64r
- { 936, 1, 0, 0, "JMP_1", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(235<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #936 = JMP_1
- { 937, 1, 0, 0, "JMP_4", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0|1|(5<<13)|(233<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #937 = JMP_4
- { 938, 1, 0, 0, "JNE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(117<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #938 = JNE_1
- { 939, 1, 0, 0, "JNE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(133<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #939 = JNE_4
- { 940, 1, 0, 0, "JNO_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(113<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #940 = JNO_1
- { 941, 1, 0, 0, "JNO_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(129<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #941 = JNO_4
- { 942, 1, 0, 0, "JNP_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(123<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #942 = JNP_1
- { 943, 1, 0, 0, "JNP_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(139<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #943 = JNP_4
- { 944, 1, 0, 0, "JNS_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(121<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #944 = JNS_1
- { 945, 1, 0, 0, "JNS_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(137<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #945 = JNS_4
- { 946, 1, 0, 0, "JO_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(112<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #946 = JO_1
- { 947, 1, 0, 0, "JO_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(128<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #947 = JO_4
- { 948, 1, 0, 0, "JP_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(122<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #948 = JP_1
- { 949, 1, 0, 0, "JP_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(138<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #949 = JP_4
- { 950, 1, 0, 0, "JS_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(2<<13)|(120<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #950 = JS_1
- { 951, 1, 0, 0, "JS_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0|1|(1<<8)|(5<<13)|(136<<24), ImplicitList1, NULL, NULL, OperandInfo5 }, // Inst #951 = JS_4
- { 952, 0, 0, 0, "LAHF", 0, 0|1|(159<<24), ImplicitList1, ImplicitList28, NULL, 0 }, // Inst #952 = LAHF
- { 953, 6, 1, 0, "LAR16rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(2<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #953 = LAR16rm
- { 954, 2, 1, 0, "LAR16rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(1<<8)|(2<<24), NULL, NULL, NULL, OperandInfo47 }, // Inst #954 = LAR16rr
- { 955, 6, 1, 0, "LAR32rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(2<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #955 = LAR32rm
- { 956, 2, 1, 0, "LAR32rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(2<<24), NULL, NULL, NULL, OperandInfo49 }, // Inst #956 = LAR32rr
- { 957, 6, 1, 0, "LAR64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(1<<12)|(2<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #957 = LAR64rm
- { 958, 2, 1, 0, "LAR64rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(1<<12)|(2<<24), NULL, NULL, NULL, OperandInfo125 }, // Inst #958 = LAR64rr
- { 959, 6, 0, 0, "LCMPXCHG16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(1<<19)|(177<<24), ImplicitList12, ImplicitList29, Barriers1, OperandInfo7 }, // Inst #959 = LCMPXCHG16
- { 960, 6, 0, 0, "LCMPXCHG32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(1<<19)|(177<<24), ImplicitList13, ImplicitList30, Barriers1, OperandInfo11 }, // Inst #960 = LCMPXCHG32
- { 961, 6, 0, 0, "LCMPXCHG64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(1<<12)|(1<<19)|(177<<24), ImplicitList15, ImplicitList31, Barriers1, OperandInfo15 }, // Inst #961 = LCMPXCHG64
- { 962, 6, 0, 0, "LCMPXCHG8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(1<<19)|(176<<24), ImplicitList11, ImplicitList32, Barriers1, OperandInfo20 }, // Inst #962 = LCMPXCHG8
- { 963, 5, 0, 0, "LCMPXCHG8B", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<8)|(1<<19)|(199<<24), ImplicitList6, ImplicitList18, Barriers6, OperandInfo30 }, // Inst #963 = LCMPXCHG8B
- { 964, 6, 1, 0, "LDDQUrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(240<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #964 = LDDQUrm
- { 965, 5, 0, 0, "LDMXCSR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|26|(1<<8)|(174<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #965 = LDMXCSR
- { 966, 6, 1, 0, "LDS16rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(197<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #966 = LDS16rm
- { 967, 6, 1, 0, "LDS32rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(197<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #967 = LDS32rm
- { 968, 0, 0, 0, "LD_F0", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(238<<24), NULL, NULL, NULL, 0 }, // Inst #968 = LD_F0
- { 969, 0, 0, 0, "LD_F1", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(232<<24), NULL, NULL, NULL, 0 }, // Inst #969 = LD_F1
- { 970, 5, 0, 0, "LD_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|24|(217<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #970 = LD_F32m
- { 971, 5, 0, 0, "LD_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|24|(221<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #971 = LD_F64m
- { 972, 5, 0, 0, "LD_F80m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|29|(219<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #972 = LD_F80m
- { 973, 1, 1, 0, "LD_Fp032", 0|(1<<TID::Rematerializable), 0|(1<<16), NULL, NULL, NULL, OperandInfo100 }, // Inst #973 = LD_Fp032
- { 974, 1, 1, 0, "LD_Fp064", 0|(1<<TID::Rematerializable), 0|(1<<16), NULL, NULL, NULL, OperandInfo101 }, // Inst #974 = LD_Fp064
- { 975, 1, 1, 0, "LD_Fp080", 0|(1<<TID::Rematerializable), 0|(1<<16), NULL, NULL, NULL, OperandInfo102 }, // Inst #975 = LD_Fp080
- { 976, 1, 1, 0, "LD_Fp132", 0|(1<<TID::Rematerializable), 0|(1<<16), NULL, NULL, NULL, OperandInfo100 }, // Inst #976 = LD_Fp132
- { 977, 1, 1, 0, "LD_Fp164", 0|(1<<TID::Rematerializable), 0|(1<<16), NULL, NULL, NULL, OperandInfo101 }, // Inst #977 = LD_Fp164
- { 978, 1, 1, 0, "LD_Fp180", 0|(1<<TID::Rematerializable), 0|(1<<16), NULL, NULL, NULL, OperandInfo102 }, // Inst #978 = LD_Fp180
- { 979, 6, 1, 0, "LD_Fp32m", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo107 }, // Inst #979 = LD_Fp32m
- { 980, 6, 1, 0, "LD_Fp32m64", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo108 }, // Inst #980 = LD_Fp32m64
- { 981, 6, 1, 0, "LD_Fp32m80", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo109 }, // Inst #981 = LD_Fp32m80
- { 982, 6, 1, 0, "LD_Fp64m", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|(1<<16), NULL, NULL, NULL, OperandInfo108 }, // Inst #982 = LD_Fp64m
- { 983, 6, 1, 0, "LD_Fp64m80", 0|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo109 }, // Inst #983 = LD_Fp64m80
- { 984, 6, 1, 0, "LD_Fp80m", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|(1<<16), NULL, NULL, NULL, OperandInfo109 }, // Inst #984 = LD_Fp80m
- { 985, 1, 0, 0, "LD_Frr", 0|(1<<TID::UnmodeledSideEffects), 0|2|(4<<8)|(192<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #985 = LD_Frr
- { 986, 5, 1, 0, "LEA16r", 0, 0|6|(1<<6)|(141<<24), NULL, NULL, NULL, OperandInfo126 }, // Inst #986 = LEA16r
- { 987, 5, 1, 0, "LEA32r", 0|(1<<TID::Rematerializable), 0|6|(141<<24), NULL, NULL, NULL, OperandInfo127 }, // Inst #987 = LEA32r
- { 988, 5, 1, 0, "LEA64_32r", 0, 0|6|(141<<24), NULL, NULL, NULL, OperandInfo127 }, // Inst #988 = LEA64_32r
- { 989, 5, 1, 0, "LEA64r", 0|(1<<TID::Rematerializable), 0|6|(1<<12)|(141<<24), NULL, NULL, NULL, OperandInfo128 }, // Inst #989 = LEA64r
- { 990, 0, 0, 0, "LEAVE", 0|(1<<TID::MayLoad), 0|1|(201<<24), ImplicitList33, ImplicitList33, NULL, 0 }, // Inst #990 = LEAVE
- { 991, 0, 0, 0, "LEAVE64", 0|(1<<TID::MayLoad), 0|1|(201<<24), ImplicitList34, ImplicitList34, NULL, 0 }, // Inst #991 = LEAVE64
- { 992, 6, 1, 0, "LES16rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(196<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #992 = LES16rm
- { 993, 6, 1, 0, "LES32rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(196<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #993 = LES32rm
- { 994, 0, 0, 0, "LFENCE", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|39|(1<<8)|(174<<24), NULL, NULL, NULL, 0 }, // Inst #994 = LFENCE
- { 995, 6, 1, 0, "LFS16rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(180<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #995 = LFS16rm
- { 996, 6, 1, 0, "LFS32rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(180<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #996 = LFS32rm
- { 997, 6, 1, 0, "LFS64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(1<<12)|(180<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #997 = LFS64rm
- { 998, 5, 0, 0, "LGDTm", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #998 = LGDTm
- { 999, 6, 1, 0, "LGS16rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(181<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #999 = LGS16rm
- { 1000, 6, 1, 0, "LGS32rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(181<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #1000 = LGS32rm
- { 1001, 6, 1, 0, "LGS64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(1<<12)|(181<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1001 = LGS64rm
- { 1002, 5, 0, 0, "LIDTm", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1002 = LIDTm
- { 1003, 5, 0, 0, "LLDT16m", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<8), NULL, NULL, NULL, OperandInfo30 }, // Inst #1003 = LLDT16m
- { 1004, 1, 0, 0, "LLDT16r", 0|(1<<TID::UnmodeledSideEffects), 0|18|(1<<8), NULL, NULL, NULL, OperandInfo93 }, // Inst #1004 = LLDT16r
- { 1005, 5, 0, 0, "LMSW16m", 0|(1<<TID::UnmodeledSideEffects), 0|30|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1005 = LMSW16m
- { 1006, 1, 0, 0, "LMSW16r", 0|(1<<TID::UnmodeledSideEffects), 0|22|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo93 }, // Inst #1006 = LMSW16r
- { 1007, 6, 0, 0, "LOCK_ADD16mi", 0|(1<<TID::UnmodeledSideEffects), 0|24|(3<<13)|(1<<19)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1007 = LOCK_ADD16mi
- { 1008, 6, 0, 0, "LOCK_ADD16mi8", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<6)|(1<<13)|(1<<19)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1008 = LOCK_ADD16mi8
- { 1009, 6, 0, 0, "LOCK_ADD16mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<19)|(1<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #1009 = LOCK_ADD16mr
- { 1010, 6, 0, 0, "LOCK_ADD32mi", 0|(1<<TID::UnmodeledSideEffects), 0|24|(4<<13)|(1<<19)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1010 = LOCK_ADD32mi
- { 1011, 6, 0, 0, "LOCK_ADD32mi8", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<13)|(1<<19)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1011 = LOCK_ADD32mi8
- { 1012, 6, 0, 0, "LOCK_ADD32mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<19)|(1<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #1012 = LOCK_ADD32mr
- { 1013, 6, 0, 0, "LOCK_ADD64mi32", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<12)|(4<<13)|(1<<19)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1013 = LOCK_ADD64mi32
- { 1014, 6, 0, 0, "LOCK_ADD64mi8", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<12)|(1<<13)|(1<<19)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1014 = LOCK_ADD64mi8
- { 1015, 6, 0, 0, "LOCK_ADD64mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<12)|(1<<19)|(3<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #1015 = LOCK_ADD64mr
- { 1016, 6, 0, 0, "LOCK_ADD8mi", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<13)|(1<<19)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1016 = LOCK_ADD8mi
- { 1017, 6, 0, 0, "LOCK_ADD8mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<19), NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #1017 = LOCK_ADD8mr
- { 1018, 5, 0, 0, "LOCK_DEC16m", 0|(1<<TID::UnmodeledSideEffects), 0|25|(1<<6)|(1<<19)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1018 = LOCK_DEC16m
- { 1019, 5, 0, 0, "LOCK_DEC32m", 0|(1<<TID::UnmodeledSideEffects), 0|25|(1<<19)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1019 = LOCK_DEC32m
- { 1020, 5, 0, 0, "LOCK_DEC64m", 0|(1<<TID::UnmodeledSideEffects), 0|25|(1<<12)|(1<<19)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1020 = LOCK_DEC64m
- { 1021, 5, 0, 0, "LOCK_DEC8m", 0|(1<<TID::UnmodeledSideEffects), 0|25|(1<<19)|(254<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1021 = LOCK_DEC8m
- { 1022, 5, 0, 0, "LOCK_INC16m", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<6)|(1<<19)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1022 = LOCK_INC16m
- { 1023, 5, 0, 0, "LOCK_INC32m", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<19)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1023 = LOCK_INC32m
- { 1024, 5, 0, 0, "LOCK_INC64m", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<12)|(1<<19)|(255<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1024 = LOCK_INC64m
- { 1025, 5, 0, 0, "LOCK_INC8m", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<19)|(254<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1025 = LOCK_INC8m
- { 1026, 0, 0, 0, "LOCK_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(240<<24), NULL, NULL, NULL, 0 }, // Inst #1026 = LOCK_PREFIX
- { 1027, 6, 0, 0, "LOCK_SUB16mi", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<6)|(3<<13)|(1<<19)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1027 = LOCK_SUB16mi
- { 1028, 6, 0, 0, "LOCK_SUB16mi8", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<6)|(1<<13)|(1<<19)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1028 = LOCK_SUB16mi8
- { 1029, 6, 0, 0, "LOCK_SUB16mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<19)|(41<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #1029 = LOCK_SUB16mr
- { 1030, 6, 0, 0, "LOCK_SUB32mi", 0|(1<<TID::UnmodeledSideEffects), 0|29|(4<<13)|(1<<19)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1030 = LOCK_SUB32mi
- { 1031, 6, 0, 0, "LOCK_SUB32mi8", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<13)|(1<<19)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1031 = LOCK_SUB32mi8
- { 1032, 6, 0, 0, "LOCK_SUB32mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<19)|(41<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #1032 = LOCK_SUB32mr
- { 1033, 6, 0, 0, "LOCK_SUB64mi32", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<12)|(4<<13)|(1<<19)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1033 = LOCK_SUB64mi32
- { 1034, 6, 0, 0, "LOCK_SUB64mi8", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<12)|(1<<13)|(1<<19)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1034 = LOCK_SUB64mi8
- { 1035, 6, 0, 0, "LOCK_SUB64mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<12)|(1<<19)|(41<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #1035 = LOCK_SUB64mr
- { 1036, 6, 0, 0, "LOCK_SUB8mi", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<13)|(1<<19)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1036 = LOCK_SUB8mi
- { 1037, 6, 0, 0, "LOCK_SUB8mr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<19)|(40<<24), NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #1037 = LOCK_SUB8mr
- { 1038, 0, 0, 0, "LODSB", 0|(1<<TID::UnmodeledSideEffects), 0|1|(172<<24), NULL, NULL, NULL, 0 }, // Inst #1038 = LODSB
- { 1039, 0, 0, 0, "LODSD", 0|(1<<TID::UnmodeledSideEffects), 0|1|(173<<24), NULL, NULL, NULL, 0 }, // Inst #1039 = LODSD
- { 1040, 0, 0, 0, "LODSQ", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(173<<24), NULL, NULL, NULL, 0 }, // Inst #1040 = LODSQ
- { 1041, 0, 0, 0, "LODSW", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(173<<24), NULL, NULL, NULL, 0 }, // Inst #1041 = LODSW
- { 1042, 1, 1, 0, "LOOP", 0|(1<<TID::UnmodeledSideEffects), 0|1|(226<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1042 = LOOP
- { 1043, 1, 1, 0, "LOOPE", 0|(1<<TID::UnmodeledSideEffects), 0|1|(225<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1043 = LOOPE
- { 1044, 1, 1, 0, "LOOPNE", 0|(1<<TID::UnmodeledSideEffects), 0|1|(224<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1044 = LOOPNE
- { 1045, 0, 0, 0, "LRET", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(7<<16)|(203<<24), NULL, NULL, NULL, 0 }, // Inst #1045 = LRET
- { 1046, 1, 0, 0, "LRETI", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0|1|(3<<13)|(7<<16)|(202<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1046 = LRETI
- { 1047, 6, 1, 0, "LSL16rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(3<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #1047 = LSL16rm
- { 1048, 2, 1, 0, "LSL16rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(1<<8)|(3<<24), NULL, NULL, NULL, OperandInfo47 }, // Inst #1048 = LSL16rr
- { 1049, 6, 1, 0, "LSL32rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(3<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #1049 = LSL32rm
- { 1050, 2, 1, 0, "LSL32rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(3<<24), NULL, NULL, NULL, OperandInfo49 }, // Inst #1050 = LSL32rr
- { 1051, 6, 1, 0, "LSL64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(1<<12)|(3<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1051 = LSL64rm
- { 1052, 2, 1, 0, "LSL64rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(1<<12)|(3<<24), NULL, NULL, NULL, OperandInfo51 }, // Inst #1052 = LSL64rr
- { 1053, 6, 1, 0, "LSS16rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(178<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #1053 = LSS16rm
- { 1054, 6, 1, 0, "LSS32rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(178<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #1054 = LSS32rm
- { 1055, 6, 1, 0, "LSS64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(1<<12)|(178<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1055 = LSS64rm
- { 1056, 5, 0, 0, "LTRm", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<8), NULL, NULL, NULL, OperandInfo30 }, // Inst #1056 = LTRm
- { 1057, 1, 0, 0, "LTRr", 0|(1<<TID::UnmodeledSideEffects), 0|19|(1<<8), NULL, NULL, NULL, OperandInfo93 }, // Inst #1057 = LTRr
- { 1058, 7, 1, 0, "LXADD16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|6|(1<<6)|(1<<8)|(1<<19)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #1058 = LXADD16
- { 1059, 7, 1, 0, "LXADD32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|6|(1<<8)|(1<<19)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #1059 = LXADD32
- { 1060, 7, 1, 0, "LXADD64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|6|(1<<8)|(1<<12)|(1<<19)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #1060 = LXADD64
- { 1061, 7, 1, 0, "LXADD8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|6|(1<<8)|(1<<19)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #1061 = LXADD8
- { 1062, 2, 0, 0, "MASKMOVDQU", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(1<<8)|(247<<24), ImplicitList35, NULL, NULL, OperandInfo75 }, // Inst #1062 = MASKMOVDQU
- { 1063, 2, 0, 0, "MASKMOVDQU64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(1<<8)|(247<<24), ImplicitList36, NULL, NULL, OperandInfo75 }, // Inst #1063 = MASKMOVDQU64
- { 1064, 7, 1, 0, "MAXPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(95<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1064 = MAXPDrm
- { 1065, 7, 1, 0, "MAXPDrm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(95<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1065 = MAXPDrm_Int
- { 1066, 3, 1, 0, "MAXPDrr", 0, 0|5|(1<<6)|(1<<8)|(95<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1066 = MAXPDrr
- { 1067, 3, 1, 0, "MAXPDrr_Int", 0, 0|5|(1<<6)|(1<<8)|(95<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1067 = MAXPDrr_Int
- { 1068, 7, 1, 0, "MAXPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(95<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1068 = MAXPSrm
- { 1069, 7, 1, 0, "MAXPSrm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(95<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1069 = MAXPSrm_Int
- { 1070, 3, 1, 0, "MAXPSrr", 0, 0|5|(1<<8)|(95<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1070 = MAXPSrr
- { 1071, 3, 1, 0, "MAXPSrr_Int", 0, 0|5|(1<<8)|(95<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1071 = MAXPSrr_Int
- { 1072, 7, 1, 0, "MAXSDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(95<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #1072 = MAXSDrm
- { 1073, 7, 1, 0, "MAXSDrm_Int", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(95<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1073 = MAXSDrm_Int
- { 1074, 3, 1, 0, "MAXSDrr", 0, 0|5|(11<<8)|(95<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #1074 = MAXSDrr
- { 1075, 3, 1, 0, "MAXSDrr_Int", 0, 0|5|(11<<8)|(95<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1075 = MAXSDrr_Int
- { 1076, 7, 1, 0, "MAXSSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(95<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #1076 = MAXSSrm
- { 1077, 7, 1, 0, "MAXSSrm_Int", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(95<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1077 = MAXSSrm_Int
- { 1078, 3, 1, 0, "MAXSSrr", 0, 0|5|(12<<8)|(95<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #1078 = MAXSSrr
- { 1079, 3, 1, 0, "MAXSSrr_Int", 0, 0|5|(12<<8)|(95<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1079 = MAXSSrr_Int
- { 1080, 0, 0, 0, "MFENCE", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|40|(1<<8)|(174<<24), NULL, NULL, NULL, 0 }, // Inst #1080 = MFENCE
- { 1081, 0, 0, 0, "MINGW_ALLOCA", 0|(1<<TID::UsesCustomInserter), 0, NULL, NULL, NULL, 0 }, // Inst #1081 = MINGW_ALLOCA
- { 1082, 7, 1, 0, "MINPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(93<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1082 = MINPDrm
- { 1083, 7, 1, 0, "MINPDrm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(93<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1083 = MINPDrm_Int
- { 1084, 3, 1, 0, "MINPDrr", 0, 0|5|(1<<6)|(1<<8)|(93<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1084 = MINPDrr
- { 1085, 3, 1, 0, "MINPDrr_Int", 0, 0|5|(1<<6)|(1<<8)|(93<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1085 = MINPDrr_Int
- { 1086, 7, 1, 0, "MINPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(93<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1086 = MINPSrm
- { 1087, 7, 1, 0, "MINPSrm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(93<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1087 = MINPSrm_Int
- { 1088, 3, 1, 0, "MINPSrr", 0, 0|5|(1<<8)|(93<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1088 = MINPSrr
- { 1089, 3, 1, 0, "MINPSrr_Int", 0, 0|5|(1<<8)|(93<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1089 = MINPSrr_Int
- { 1090, 7, 1, 0, "MINSDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(93<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #1090 = MINSDrm
- { 1091, 7, 1, 0, "MINSDrm_Int", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(93<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1091 = MINSDrm_Int
- { 1092, 3, 1, 0, "MINSDrr", 0, 0|5|(11<<8)|(93<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #1092 = MINSDrr
- { 1093, 3, 1, 0, "MINSDrr_Int", 0, 0|5|(11<<8)|(93<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1093 = MINSDrr_Int
- { 1094, 7, 1, 0, "MINSSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(93<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #1094 = MINSSrm
- { 1095, 7, 1, 0, "MINSSrm_Int", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(93<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1095 = MINSSrm_Int
- { 1096, 3, 1, 0, "MINSSrr", 0, 0|5|(12<<8)|(93<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #1096 = MINSSrr
- { 1097, 3, 1, 0, "MINSSrr_Int", 0, 0|5|(12<<8)|(93<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1097 = MINSSrr_Int
- { 1098, 6, 1, 0, "MMX_CVTPD2PIrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(45<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1098 = MMX_CVTPD2PIrm
- { 1099, 2, 1, 0, "MMX_CVTPD2PIrr", 0, 0|5|(1<<6)|(1<<8)|(45<<24), NULL, NULL, NULL, OperandInfo118 }, // Inst #1099 = MMX_CVTPD2PIrr
- { 1100, 6, 1, 0, "MMX_CVTPI2PDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(42<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1100 = MMX_CVTPI2PDrm
- { 1101, 2, 1, 0, "MMX_CVTPI2PDrr", 0, 0|5|(1<<6)|(1<<8)|(42<<24), NULL, NULL, NULL, OperandInfo119 }, // Inst #1101 = MMX_CVTPI2PDrr
- { 1102, 6, 1, 0, "MMX_CVTPI2PSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(42<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1102 = MMX_CVTPI2PSrm
- { 1103, 2, 1, 0, "MMX_CVTPI2PSrr", 0, 0|5|(1<<8)|(42<<24), NULL, NULL, NULL, OperandInfo119 }, // Inst #1103 = MMX_CVTPI2PSrr
- { 1104, 6, 1, 0, "MMX_CVTPS2PIrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(45<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1104 = MMX_CVTPS2PIrm
- { 1105, 2, 1, 0, "MMX_CVTPS2PIrr", 0, 0|5|(1<<8)|(45<<24), NULL, NULL, NULL, OperandInfo118 }, // Inst #1105 = MMX_CVTPS2PIrr
- { 1106, 6, 1, 0, "MMX_CVTTPD2PIrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(44<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1106 = MMX_CVTTPD2PIrm
- { 1107, 2, 1, 0, "MMX_CVTTPD2PIrr", 0, 0|5|(1<<6)|(1<<8)|(44<<24), NULL, NULL, NULL, OperandInfo118 }, // Inst #1107 = MMX_CVTTPD2PIrr
- { 1108, 6, 1, 0, "MMX_CVTTPS2PIrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(44<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1108 = MMX_CVTTPS2PIrm
- { 1109, 2, 1, 0, "MMX_CVTTPS2PIrr", 0, 0|5|(1<<8)|(44<<24), NULL, NULL, NULL, OperandInfo118 }, // Inst #1109 = MMX_CVTTPS2PIrr
- { 1110, 0, 0, 0, "MMX_EMMS", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(119<<24), NULL, NULL, NULL, 0 }, // Inst #1110 = MMX_EMMS
- { 1111, 0, 0, 0, "MMX_FEMMS", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(14<<24), NULL, NULL, NULL, 0 }, // Inst #1111 = MMX_FEMMS
- { 1112, 2, 0, 0, "MMX_MASKMOVQ", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(247<<24), ImplicitList35, NULL, NULL, OperandInfo129 }, // Inst #1112 = MMX_MASKMOVQ
- { 1113, 2, 0, 0, "MMX_MASKMOVQ64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(247<<24), ImplicitList36, NULL, NULL, OperandInfo129 }, // Inst #1113 = MMX_MASKMOVQ64
- { 1114, 2, 1, 0, "MMX_MOVD64from64rr", 0, 0|3|(1<<8)|(1<<12)|(126<<24), NULL, NULL, NULL, OperandInfo130 }, // Inst #1114 = MMX_MOVD64from64rr
- { 1115, 2, 0, 0, "MMX_MOVD64grr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(126<<24), NULL, NULL, NULL, OperandInfo131 }, // Inst #1115 = MMX_MOVD64grr
- { 1116, 6, 0, 0, "MMX_MOVD64mr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(126<<24), NULL, NULL, NULL, OperandInfo132 }, // Inst #1116 = MMX_MOVD64mr
- { 1117, 6, 1, 0, "MMX_MOVD64rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1117 = MMX_MOVD64rm
- { 1118, 2, 1, 0, "MMX_MOVD64rr", 0, 0|5|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo133 }, // Inst #1118 = MMX_MOVD64rr
- { 1119, 2, 1, 0, "MMX_MOVD64rrv164", 0, 0|5|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo134 }, // Inst #1119 = MMX_MOVD64rrv164
- { 1120, 2, 1, 0, "MMX_MOVD64to64rr", 0, 0|5|(1<<8)|(1<<12)|(110<<24), NULL, NULL, NULL, OperandInfo134 }, // Inst #1120 = MMX_MOVD64to64rr
- { 1121, 2, 1, 0, "MMX_MOVDQ2Qrr", 0, 0|5|(11<<8)|(1<<13)|(214<<24), NULL, NULL, NULL, OperandInfo118 }, // Inst #1121 = MMX_MOVDQ2Qrr
- { 1122, 6, 0, 0, "MMX_MOVNTQmr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(231<<24), NULL, NULL, NULL, OperandInfo132 }, // Inst #1122 = MMX_MOVNTQmr
- { 1123, 2, 1, 0, "MMX_MOVQ2DQrr", 0, 0|5|(12<<8)|(1<<13)|(214<<24), NULL, NULL, NULL, OperandInfo119 }, // Inst #1123 = MMX_MOVQ2DQrr
- { 1124, 2, 1, 0, "MMX_MOVQ2FR64rr", 0, 0|5|(12<<8)|(1<<13)|(214<<24), NULL, NULL, NULL, OperandInfo135 }, // Inst #1124 = MMX_MOVQ2FR64rr
- { 1125, 6, 0, 0, "MMX_MOVQ64gmr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(1<<12)|(126<<24), NULL, NULL, NULL, OperandInfo132 }, // Inst #1125 = MMX_MOVQ64gmr
- { 1126, 6, 0, 0, "MMX_MOVQ64mr", 0|(1<<TID::MayStore), 0|4|(1<<8)|(127<<24), NULL, NULL, NULL, OperandInfo132 }, // Inst #1126 = MMX_MOVQ64mr
- { 1127, 6, 1, 0, "MMX_MOVQ64rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<8)|(111<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1127 = MMX_MOVQ64rm
- { 1128, 2, 1, 0, "MMX_MOVQ64rr", 0, 0|5|(1<<8)|(111<<24), NULL, NULL, NULL, OperandInfo129 }, // Inst #1128 = MMX_MOVQ64rr
- { 1129, 6, 1, 0, "MMX_MOVZDI2PDIrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1129 = MMX_MOVZDI2PDIrm
- { 1130, 2, 1, 0, "MMX_MOVZDI2PDIrr", 0, 0|5|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo133 }, // Inst #1130 = MMX_MOVZDI2PDIrr
- { 1131, 7, 1, 0, "MMX_PACKSSDWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(107<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1131 = MMX_PACKSSDWrm
- { 1132, 3, 1, 0, "MMX_PACKSSDWrr", 0, 0|5|(1<<8)|(107<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1132 = MMX_PACKSSDWrr
- { 1133, 7, 1, 0, "MMX_PACKSSWBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(99<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1133 = MMX_PACKSSWBrm
- { 1134, 3, 1, 0, "MMX_PACKSSWBrr", 0, 0|5|(1<<8)|(99<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1134 = MMX_PACKSSWBrr
- { 1135, 7, 1, 0, "MMX_PACKUSWBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(103<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1135 = MMX_PACKUSWBrm
- { 1136, 3, 1, 0, "MMX_PACKUSWBrr", 0, 0|5|(1<<8)|(103<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1136 = MMX_PACKUSWBrr
- { 1137, 7, 1, 0, "MMX_PADDBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(252<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1137 = MMX_PADDBrm
- { 1138, 3, 1, 0, "MMX_PADDBrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(252<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1138 = MMX_PADDBrr
- { 1139, 7, 1, 0, "MMX_PADDDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(254<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1139 = MMX_PADDDrm
- { 1140, 3, 1, 0, "MMX_PADDDrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(254<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1140 = MMX_PADDDrr
- { 1141, 7, 1, 0, "MMX_PADDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(212<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1141 = MMX_PADDQrm
- { 1142, 3, 1, 0, "MMX_PADDQrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(212<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1142 = MMX_PADDQrr
- { 1143, 7, 1, 0, "MMX_PADDSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(236<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1143 = MMX_PADDSBrm
- { 1144, 3, 1, 0, "MMX_PADDSBrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(236<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1144 = MMX_PADDSBrr
- { 1145, 7, 1, 0, "MMX_PADDSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(237<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1145 = MMX_PADDSWrm
- { 1146, 3, 1, 0, "MMX_PADDSWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(237<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1146 = MMX_PADDSWrr
- { 1147, 7, 1, 0, "MMX_PADDUSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(220<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1147 = MMX_PADDUSBrm
- { 1148, 3, 1, 0, "MMX_PADDUSBrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(220<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1148 = MMX_PADDUSBrr
- { 1149, 7, 1, 0, "MMX_PADDUSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(221<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1149 = MMX_PADDUSWrm
- { 1150, 3, 1, 0, "MMX_PADDUSWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(221<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1150 = MMX_PADDUSWrr
- { 1151, 7, 1, 0, "MMX_PADDWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(253<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1151 = MMX_PADDWrm
- { 1152, 3, 1, 0, "MMX_PADDWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(253<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1152 = MMX_PADDWrr
- { 1153, 7, 1, 0, "MMX_PANDNrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(223<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1153 = MMX_PANDNrm
- { 1154, 3, 1, 0, "MMX_PANDNrr", 0, 0|5|(1<<8)|(223<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1154 = MMX_PANDNrr
- { 1155, 7, 1, 0, "MMX_PANDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(219<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1155 = MMX_PANDrm
- { 1156, 3, 1, 0, "MMX_PANDrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(219<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1156 = MMX_PANDrr
- { 1157, 7, 1, 0, "MMX_PAVGBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(224<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1157 = MMX_PAVGBrm
- { 1158, 3, 1, 0, "MMX_PAVGBrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(224<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1158 = MMX_PAVGBrr
- { 1159, 7, 1, 0, "MMX_PAVGWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(227<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1159 = MMX_PAVGWrm
- { 1160, 3, 1, 0, "MMX_PAVGWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(227<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1160 = MMX_PAVGWrr
- { 1161, 7, 1, 0, "MMX_PCMPEQBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(116<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1161 = MMX_PCMPEQBrm
- { 1162, 3, 1, 0, "MMX_PCMPEQBrr", 0, 0|5|(1<<8)|(116<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1162 = MMX_PCMPEQBrr
- { 1163, 7, 1, 0, "MMX_PCMPEQDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(118<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1163 = MMX_PCMPEQDrm
- { 1164, 3, 1, 0, "MMX_PCMPEQDrr", 0, 0|5|(1<<8)|(118<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1164 = MMX_PCMPEQDrr
- { 1165, 7, 1, 0, "MMX_PCMPEQWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(117<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1165 = MMX_PCMPEQWrm
- { 1166, 3, 1, 0, "MMX_PCMPEQWrr", 0, 0|5|(1<<8)|(117<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1166 = MMX_PCMPEQWrr
- { 1167, 7, 1, 0, "MMX_PCMPGTBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(100<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1167 = MMX_PCMPGTBrm
- { 1168, 3, 1, 0, "MMX_PCMPGTBrr", 0, 0|5|(1<<8)|(100<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1168 = MMX_PCMPGTBrr
- { 1169, 7, 1, 0, "MMX_PCMPGTDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(102<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1169 = MMX_PCMPGTDrm
- { 1170, 3, 1, 0, "MMX_PCMPGTDrr", 0, 0|5|(1<<8)|(102<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1170 = MMX_PCMPGTDrr
- { 1171, 7, 1, 0, "MMX_PCMPGTWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(101<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1171 = MMX_PCMPGTWrm
- { 1172, 3, 1, 0, "MMX_PCMPGTWrr", 0, 0|5|(1<<8)|(101<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1172 = MMX_PCMPGTWrr
- { 1173, 3, 1, 0, "MMX_PEXTRWri", 0, 0|5|(1<<8)|(1<<13)|(197<<24), NULL, NULL, NULL, OperandInfo138 }, // Inst #1173 = MMX_PEXTRWri
- { 1174, 8, 1, 0, "MMX_PINSRWrmi", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<13)|(196<<24), NULL, NULL, NULL, OperandInfo139 }, // Inst #1174 = MMX_PINSRWrmi
- { 1175, 4, 1, 0, "MMX_PINSRWrri", 0, 0|5|(1<<8)|(1<<13)|(196<<24), NULL, NULL, NULL, OperandInfo140 }, // Inst #1175 = MMX_PINSRWrri
- { 1176, 7, 1, 0, "MMX_PMADDWDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(245<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1176 = MMX_PMADDWDrm
- { 1177, 3, 1, 0, "MMX_PMADDWDrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(245<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1177 = MMX_PMADDWDrr
- { 1178, 7, 1, 0, "MMX_PMAXSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(238<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1178 = MMX_PMAXSWrm
- { 1179, 3, 1, 0, "MMX_PMAXSWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(238<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1179 = MMX_PMAXSWrr
- { 1180, 7, 1, 0, "MMX_PMAXUBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(222<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1180 = MMX_PMAXUBrm
- { 1181, 3, 1, 0, "MMX_PMAXUBrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(222<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1181 = MMX_PMAXUBrr
- { 1182, 7, 1, 0, "MMX_PMINSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(234<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1182 = MMX_PMINSWrm
- { 1183, 3, 1, 0, "MMX_PMINSWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(234<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1183 = MMX_PMINSWrr
- { 1184, 7, 1, 0, "MMX_PMINUBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(218<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1184 = MMX_PMINUBrm
- { 1185, 3, 1, 0, "MMX_PMINUBrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(218<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1185 = MMX_PMINUBrr
- { 1186, 2, 1, 0, "MMX_PMOVMSKBrr", 0, 0|5|(1<<8)|(215<<24), NULL, NULL, NULL, OperandInfo131 }, // Inst #1186 = MMX_PMOVMSKBrr
- { 1187, 7, 1, 0, "MMX_PMULHUWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(228<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1187 = MMX_PMULHUWrm
- { 1188, 3, 1, 0, "MMX_PMULHUWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(228<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1188 = MMX_PMULHUWrr
- { 1189, 7, 1, 0, "MMX_PMULHWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(229<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1189 = MMX_PMULHWrm
- { 1190, 3, 1, 0, "MMX_PMULHWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(229<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1190 = MMX_PMULHWrr
- { 1191, 7, 1, 0, "MMX_PMULLWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(213<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1191 = MMX_PMULLWrm
- { 1192, 3, 1, 0, "MMX_PMULLWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(213<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1192 = MMX_PMULLWrr
- { 1193, 7, 1, 0, "MMX_PMULUDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(244<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1193 = MMX_PMULUDQrm
- { 1194, 3, 1, 0, "MMX_PMULUDQrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(244<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1194 = MMX_PMULUDQrr
- { 1195, 7, 1, 0, "MMX_PORrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(235<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1195 = MMX_PORrm
- { 1196, 3, 1, 0, "MMX_PORrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(235<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1196 = MMX_PORrr
- { 1197, 7, 1, 0, "MMX_PSADBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(246<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1197 = MMX_PSADBWrm
- { 1198, 3, 1, 0, "MMX_PSADBWrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(246<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1198 = MMX_PSADBWrr
- { 1199, 7, 1, 0, "MMX_PSHUFWmi", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<13)|(112<<24), NULL, NULL, NULL, OperandInfo141 }, // Inst #1199 = MMX_PSHUFWmi
- { 1200, 3, 1, 0, "MMX_PSHUFWri", 0, 0|5|(1<<8)|(1<<13)|(112<<24), NULL, NULL, NULL, OperandInfo142 }, // Inst #1200 = MMX_PSHUFWri
- { 1201, 3, 1, 0, "MMX_PSLLDri", 0, 0|22|(1<<8)|(1<<13)|(114<<24), NULL, NULL, NULL, OperandInfo143 }, // Inst #1201 = MMX_PSLLDri
- { 1202, 7, 1, 0, "MMX_PSLLDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(242<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1202 = MMX_PSLLDrm
- { 1203, 3, 1, 0, "MMX_PSLLDrr", 0, 0|5|(1<<8)|(242<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1203 = MMX_PSLLDrr
- { 1204, 3, 1, 0, "MMX_PSLLQri", 0, 0|22|(1<<8)|(1<<13)|(115<<24), NULL, NULL, NULL, OperandInfo143 }, // Inst #1204 = MMX_PSLLQri
- { 1205, 7, 1, 0, "MMX_PSLLQrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(243<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1205 = MMX_PSLLQrm
- { 1206, 3, 1, 0, "MMX_PSLLQrr", 0, 0|5|(1<<8)|(243<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1206 = MMX_PSLLQrr
- { 1207, 3, 1, 0, "MMX_PSLLWri", 0, 0|22|(1<<8)|(1<<13)|(113<<24), NULL, NULL, NULL, OperandInfo143 }, // Inst #1207 = MMX_PSLLWri
- { 1208, 7, 1, 0, "MMX_PSLLWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(241<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1208 = MMX_PSLLWrm
- { 1209, 3, 1, 0, "MMX_PSLLWrr", 0, 0|5|(1<<8)|(241<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1209 = MMX_PSLLWrr
- { 1210, 3, 1, 0, "MMX_PSRADri", 0, 0|20|(1<<8)|(1<<13)|(114<<24), NULL, NULL, NULL, OperandInfo143 }, // Inst #1210 = MMX_PSRADri
- { 1211, 7, 1, 0, "MMX_PSRADrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(226<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1211 = MMX_PSRADrm
- { 1212, 3, 1, 0, "MMX_PSRADrr", 0, 0|5|(1<<8)|(226<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1212 = MMX_PSRADrr
- { 1213, 3, 1, 0, "MMX_PSRAWri", 0, 0|20|(1<<8)|(1<<13)|(113<<24), NULL, NULL, NULL, OperandInfo143 }, // Inst #1213 = MMX_PSRAWri
- { 1214, 7, 1, 0, "MMX_PSRAWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(225<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1214 = MMX_PSRAWrm
- { 1215, 3, 1, 0, "MMX_PSRAWrr", 0, 0|5|(1<<8)|(225<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1215 = MMX_PSRAWrr
- { 1216, 3, 1, 0, "MMX_PSRLDri", 0, 0|18|(1<<8)|(1<<13)|(114<<24), NULL, NULL, NULL, OperandInfo143 }, // Inst #1216 = MMX_PSRLDri
- { 1217, 7, 1, 0, "MMX_PSRLDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(210<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1217 = MMX_PSRLDrm
- { 1218, 3, 1, 0, "MMX_PSRLDrr", 0, 0|5|(1<<8)|(210<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1218 = MMX_PSRLDrr
- { 1219, 3, 1, 0, "MMX_PSRLQri", 0, 0|18|(1<<8)|(1<<13)|(115<<24), NULL, NULL, NULL, OperandInfo143 }, // Inst #1219 = MMX_PSRLQri
- { 1220, 7, 1, 0, "MMX_PSRLQrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(211<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1220 = MMX_PSRLQrm
- { 1221, 3, 1, 0, "MMX_PSRLQrr", 0, 0|5|(1<<8)|(211<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1221 = MMX_PSRLQrr
- { 1222, 3, 1, 0, "MMX_PSRLWri", 0, 0|18|(1<<8)|(1<<13)|(113<<24), NULL, NULL, NULL, OperandInfo143 }, // Inst #1222 = MMX_PSRLWri
- { 1223, 7, 1, 0, "MMX_PSRLWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(209<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1223 = MMX_PSRLWrm
- { 1224, 3, 1, 0, "MMX_PSRLWrr", 0, 0|5|(1<<8)|(209<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1224 = MMX_PSRLWrr
- { 1225, 7, 1, 0, "MMX_PSUBBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(248<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1225 = MMX_PSUBBrm
- { 1226, 3, 1, 0, "MMX_PSUBBrr", 0, 0|5|(1<<8)|(248<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1226 = MMX_PSUBBrr
- { 1227, 7, 1, 0, "MMX_PSUBDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(250<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1227 = MMX_PSUBDrm
- { 1228, 3, 1, 0, "MMX_PSUBDrr", 0, 0|5|(1<<8)|(250<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1228 = MMX_PSUBDrr
- { 1229, 7, 1, 0, "MMX_PSUBQrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(251<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1229 = MMX_PSUBQrm
- { 1230, 3, 1, 0, "MMX_PSUBQrr", 0, 0|5|(1<<8)|(251<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1230 = MMX_PSUBQrr
- { 1231, 7, 1, 0, "MMX_PSUBSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(232<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1231 = MMX_PSUBSBrm
- { 1232, 3, 1, 0, "MMX_PSUBSBrr", 0, 0|5|(1<<8)|(232<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1232 = MMX_PSUBSBrr
- { 1233, 7, 1, 0, "MMX_PSUBSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(233<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1233 = MMX_PSUBSWrm
- { 1234, 3, 1, 0, "MMX_PSUBSWrr", 0, 0|5|(1<<8)|(233<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1234 = MMX_PSUBSWrr
- { 1235, 7, 1, 0, "MMX_PSUBUSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(216<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1235 = MMX_PSUBUSBrm
- { 1236, 3, 1, 0, "MMX_PSUBUSBrr", 0, 0|5|(1<<8)|(216<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1236 = MMX_PSUBUSBrr
- { 1237, 7, 1, 0, "MMX_PSUBUSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(217<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1237 = MMX_PSUBUSWrm
- { 1238, 3, 1, 0, "MMX_PSUBUSWrr", 0, 0|5|(1<<8)|(217<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1238 = MMX_PSUBUSWrr
- { 1239, 7, 1, 0, "MMX_PSUBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(249<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1239 = MMX_PSUBWrm
- { 1240, 3, 1, 0, "MMX_PSUBWrr", 0, 0|5|(1<<8)|(249<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1240 = MMX_PSUBWrr
- { 1241, 7, 1, 0, "MMX_PUNPCKHBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(104<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1241 = MMX_PUNPCKHBWrm
- { 1242, 3, 1, 0, "MMX_PUNPCKHBWrr", 0, 0|5|(1<<8)|(104<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1242 = MMX_PUNPCKHBWrr
- { 1243, 7, 1, 0, "MMX_PUNPCKHDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(106<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1243 = MMX_PUNPCKHDQrm
- { 1244, 3, 1, 0, "MMX_PUNPCKHDQrr", 0, 0|5|(1<<8)|(106<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1244 = MMX_PUNPCKHDQrr
- { 1245, 7, 1, 0, "MMX_PUNPCKHWDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(105<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1245 = MMX_PUNPCKHWDrm
- { 1246, 3, 1, 0, "MMX_PUNPCKHWDrr", 0, 0|5|(1<<8)|(105<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1246 = MMX_PUNPCKHWDrr
- { 1247, 7, 1, 0, "MMX_PUNPCKLBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(96<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1247 = MMX_PUNPCKLBWrm
- { 1248, 3, 1, 0, "MMX_PUNPCKLBWrr", 0, 0|5|(1<<8)|(96<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1248 = MMX_PUNPCKLBWrr
- { 1249, 7, 1, 0, "MMX_PUNPCKLDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(98<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1249 = MMX_PUNPCKLDQrm
- { 1250, 3, 1, 0, "MMX_PUNPCKLDQrr", 0, 0|5|(1<<8)|(98<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1250 = MMX_PUNPCKLDQrr
- { 1251, 7, 1, 0, "MMX_PUNPCKLWDrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(97<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1251 = MMX_PUNPCKLWDrm
- { 1252, 3, 1, 0, "MMX_PUNPCKLWDrr", 0, 0|5|(1<<8)|(97<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1252 = MMX_PUNPCKLWDrr
- { 1253, 7, 1, 0, "MMX_PXORrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(239<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1253 = MMX_PXORrm
- { 1254, 3, 1, 0, "MMX_PXORrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(239<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1254 = MMX_PXORrr
- { 1255, 1, 1, 0, "MMX_V_SET0", 0|(1<<TID::Rematerializable), 0|32|(1<<8)|(239<<24), NULL, NULL, NULL, OperandInfo144 }, // Inst #1255 = MMX_V_SET0
- { 1256, 1, 1, 0, "MMX_V_SETALLONES", 0|(1<<TID::Rematerializable), 0|32|(1<<8)|(118<<24), NULL, NULL, NULL, OperandInfo144 }, // Inst #1256 = MMX_V_SETALLONES
- { 1257, 0, 0, 0, "MONITOR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|37|(1<<8)|(1<<24), NULL, NULL, NULL, 0 }, // Inst #1257 = MONITOR
- { 1258, 1, 1, 0, "MOV16ao16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(163<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1258 = MOV16ao16
- { 1259, 6, 0, 0, "MOV16mi", 0|(1<<TID::MayStore), 0|24|(1<<6)|(3<<13)|(199<<24), NULL, NULL, NULL, OperandInfo6 }, // Inst #1259 = MOV16mi
- { 1260, 6, 0, 0, "MOV16mr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(137<<24), NULL, NULL, NULL, OperandInfo7 }, // Inst #1260 = MOV16mr
- { 1261, 6, 1, 0, "MOV16ms", 0|(1<<TID::UnmodeledSideEffects), 0|4|(140<<24), NULL, NULL, NULL, OperandInfo145 }, // Inst #1261 = MOV16ms
- { 1262, 1, 0, 0, "MOV16o16a", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(161<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1262 = MOV16o16a
- { 1263, 1, 1, 0, "MOV16r0", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|32|(1<<6)|(49<<24), NULL, ImplicitList1, Barriers1, OperandInfo93 }, // Inst #1263 = MOV16r0
- { 1264, 2, 1, 0, "MOV16ri", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|2|(1<<6)|(3<<13)|(184<<24), NULL, NULL, NULL, OperandInfo54 }, // Inst #1264 = MOV16ri
- { 1265, 6, 1, 0, "MOV16rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<6)|(139<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #1265 = MOV16rm
- { 1266, 2, 1, 0, "MOV16rr", 0, 0|3|(1<<6)|(137<<24), NULL, NULL, NULL, OperandInfo47 }, // Inst #1266 = MOV16rr
- { 1267, 2, 1, 0, "MOV16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(139<<24), NULL, NULL, NULL, OperandInfo47 }, // Inst #1267 = MOV16rr_REV
- { 1268, 2, 1, 0, "MOV16rs", 0|(1<<TID::UnmodeledSideEffects), 0|3|(140<<24), NULL, NULL, NULL, OperandInfo146 }, // Inst #1268 = MOV16rs
- { 1269, 6, 1, 0, "MOV16sm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(142<<24), NULL, NULL, NULL, OperandInfo147 }, // Inst #1269 = MOV16sm
- { 1270, 2, 1, 0, "MOV16sr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(142<<24), NULL, NULL, NULL, OperandInfo148 }, // Inst #1270 = MOV16sr
- { 1271, 1, 1, 0, "MOV32ao32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(163<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1271 = MOV32ao32
- { 1272, 2, 1, 0, "MOV32cr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(34<<24), NULL, NULL, NULL, OperandInfo149 }, // Inst #1272 = MOV32cr
- { 1273, 2, 1, 0, "MOV32dr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(35<<24), NULL, NULL, NULL, OperandInfo150 }, // Inst #1273 = MOV32dr
- { 1274, 6, 0, 0, "MOV32mi", 0|(1<<TID::MayStore), 0|24|(4<<13)|(199<<24), NULL, NULL, NULL, OperandInfo6 }, // Inst #1274 = MOV32mi
- { 1275, 6, 0, 0, "MOV32mr", 0|(1<<TID::MayStore), 0|4|(137<<24), NULL, NULL, NULL, OperandInfo11 }, // Inst #1275 = MOV32mr
- { 1276, 1, 0, 0, "MOV32o32a", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(161<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1276 = MOV32o32a
- { 1277, 1, 1, 0, "MOV32r0", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|32|(49<<24), NULL, ImplicitList1, Barriers1, OperandInfo57 }, // Inst #1277 = MOV32r0
- { 1278, 2, 1, 0, "MOV32rc", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(32<<24), NULL, NULL, NULL, OperandInfo151 }, // Inst #1278 = MOV32rc
- { 1279, 2, 1, 0, "MOV32rd", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(33<<24), NULL, NULL, NULL, OperandInfo152 }, // Inst #1279 = MOV32rd
- { 1280, 2, 1, 0, "MOV32ri", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|2|(4<<13)|(184<<24), NULL, NULL, NULL, OperandInfo55 }, // Inst #1280 = MOV32ri
- { 1281, 6, 1, 0, "MOV32rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(139<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #1281 = MOV32rm
- { 1282, 2, 1, 0, "MOV32rr", 0, 0|3|(137<<24), NULL, NULL, NULL, OperandInfo49 }, // Inst #1282 = MOV32rr
- { 1283, 2, 1, 0, "MOV32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(139<<24), NULL, NULL, NULL, OperandInfo49 }, // Inst #1283 = MOV32rr_REV
- { 1284, 6, 1, 0, "MOV64FSrm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(1<<20)|(139<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1284 = MOV64FSrm
- { 1285, 6, 1, 0, "MOV64GSrm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(2<<20)|(139<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1285 = MOV64GSrm
- { 1286, 1, 1, 0, "MOV64ao64", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(4<<13)|(163<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1286 = MOV64ao64
- { 1287, 1, 1, 0, "MOV64ao8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(1<<13)|(162<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1287 = MOV64ao8
- { 1288, 2, 1, 0, "MOV64cr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(34<<24), NULL, NULL, NULL, OperandInfo153 }, // Inst #1288 = MOV64cr
- { 1289, 2, 1, 0, "MOV64dr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(35<<24), NULL, NULL, NULL, OperandInfo154 }, // Inst #1289 = MOV64dr
- { 1290, 6, 0, 0, "MOV64mi32", 0|(1<<TID::MayStore), 0|24|(1<<12)|(4<<13)|(199<<24), NULL, NULL, NULL, OperandInfo6 }, // Inst #1290 = MOV64mi32
- { 1291, 6, 0, 0, "MOV64mr", 0|(1<<TID::MayStore), 0|4|(1<<12)|(137<<24), NULL, NULL, NULL, OperandInfo15 }, // Inst #1291 = MOV64mr
- { 1292, 6, 1, 0, "MOV64ms", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<12)|(140<<24), NULL, NULL, NULL, OperandInfo145 }, // Inst #1292 = MOV64ms
- { 1293, 1, 0, 0, "MOV64o64a", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(4<<13)|(161<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1293 = MOV64o64a
- { 1294, 1, 0, 0, "MOV64o8a", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(1<<13)|(160<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1294 = MOV64o8a
- { 1295, 1, 1, 0, "MOV64r0", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|32|(49<<24), NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #1295 = MOV64r0
- { 1296, 2, 1, 0, "MOV64rc", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(32<<24), NULL, NULL, NULL, OperandInfo155 }, // Inst #1296 = MOV64rc
- { 1297, 2, 1, 0, "MOV64rd", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(33<<24), NULL, NULL, NULL, OperandInfo156 }, // Inst #1297 = MOV64rd
- { 1298, 2, 1, 0, "MOV64ri", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|2|(1<<12)|(6<<13)|(184<<24), NULL, NULL, NULL, OperandInfo56 }, // Inst #1298 = MOV64ri
- { 1299, 2, 1, 0, "MOV64ri32", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|16|(1<<12)|(4<<13)|(199<<24), NULL, NULL, NULL, OperandInfo56 }, // Inst #1299 = MOV64ri32
- { 1300, 2, 1, 0, "MOV64ri64i32", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|2|(4<<13)|(184<<24), NULL, NULL, NULL, OperandInfo56 }, // Inst #1300 = MOV64ri64i32
- { 1301, 6, 1, 0, "MOV64rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<12)|(139<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1301 = MOV64rm
- { 1302, 2, 1, 0, "MOV64rr", 0, 0|3|(1<<12)|(137<<24), NULL, NULL, NULL, OperandInfo51 }, // Inst #1302 = MOV64rr
- { 1303, 2, 1, 0, "MOV64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(139<<24), NULL, NULL, NULL, OperandInfo51 }, // Inst #1303 = MOV64rr_REV
- { 1304, 2, 1, 0, "MOV64rs", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<12)|(140<<24), NULL, NULL, NULL, OperandInfo157 }, // Inst #1304 = MOV64rs
- { 1305, 6, 1, 0, "MOV64sm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<12)|(142<<24), NULL, NULL, NULL, OperandInfo147 }, // Inst #1305 = MOV64sm
- { 1306, 2, 1, 0, "MOV64sr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(142<<24), NULL, NULL, NULL, OperandInfo158 }, // Inst #1306 = MOV64sr
- { 1307, 2, 1, 0, "MOV64toPQIrr", 0, 0|5|(1<<6)|(1<<8)|(1<<12)|(110<<24), NULL, NULL, NULL, OperandInfo159 }, // Inst #1307 = MOV64toPQIrr
- { 1308, 6, 1, 0, "MOV64toSDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(1<<12)|(110<<24), NULL, NULL, NULL, OperandInfo82 }, // Inst #1308 = MOV64toSDrm
- { 1309, 2, 1, 0, "MOV64toSDrr", 0, 0|5|(1<<6)|(1<<8)|(1<<12)|(110<<24), NULL, NULL, NULL, OperandInfo83 }, // Inst #1309 = MOV64toSDrr
- { 1310, 1, 1, 0, "MOV8ao8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(162<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1310 = MOV8ao8
- { 1311, 6, 0, 0, "MOV8mi", 0|(1<<TID::MayStore), 0|24|(1<<13)|(198<<24), NULL, NULL, NULL, OperandInfo6 }, // Inst #1311 = MOV8mi
- { 1312, 6, 0, 0, "MOV8mr", 0|(1<<TID::MayStore), 0|4|(136<<24), NULL, NULL, NULL, OperandInfo20 }, // Inst #1312 = MOV8mr
- { 1313, 6, 0, 0, "MOV8mr_NOREX", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(136<<24), NULL, NULL, NULL, OperandInfo160 }, // Inst #1313 = MOV8mr_NOREX
- { 1314, 1, 0, 0, "MOV8o8a", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(160<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1314 = MOV8o8a
- { 1315, 1, 1, 0, "MOV8r0", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|32|(48<<24), NULL, ImplicitList1, Barriers1, OperandInfo94 }, // Inst #1315 = MOV8r0
- { 1316, 2, 1, 0, "MOV8ri", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|2|(1<<13)|(176<<24), NULL, NULL, NULL, OperandInfo68 }, // Inst #1316 = MOV8ri
- { 1317, 6, 1, 0, "MOV8rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(138<<24), NULL, NULL, NULL, OperandInfo69 }, // Inst #1317 = MOV8rm
- { 1318, 6, 1, 0, "MOV8rm_NOREX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable)|(1<<TID::UnmodeledSideEffects), 0|6|(138<<24), NULL, NULL, NULL, OperandInfo161 }, // Inst #1318 = MOV8rm_NOREX
- { 1319, 2, 1, 0, "MOV8rr", 0, 0|3|(136<<24), NULL, NULL, NULL, OperandInfo67 }, // Inst #1319 = MOV8rr
- { 1320, 2, 1, 0, "MOV8rr_NOREX", 0, 0|3|(136<<24), NULL, NULL, NULL, OperandInfo162 }, // Inst #1320 = MOV8rr_NOREX
- { 1321, 2, 1, 0, "MOV8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(138<<24), NULL, NULL, NULL, OperandInfo67 }, // Inst #1321 = MOV8rr_REV
- { 1322, 6, 0, 0, "MOVAPDmr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(41<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1322 = MOVAPDmr
- { 1323, 6, 1, 0, "MOVAPDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<6)|(1<<8)|(40<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1323 = MOVAPDrm
- { 1324, 2, 1, 0, "MOVAPDrr", 0, 0|5|(1<<6)|(1<<8)|(40<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1324 = MOVAPDrr
- { 1325, 6, 0, 0, "MOVAPSmr", 0|(1<<TID::MayStore), 0|4|(1<<8)|(41<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1325 = MOVAPSmr
- { 1326, 6, 1, 0, "MOVAPSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<8)|(40<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1326 = MOVAPSrm
- { 1327, 2, 1, 0, "MOVAPSrr", 0, 0|5|(1<<8)|(40<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1327 = MOVAPSrr
- { 1328, 6, 1, 0, "MOVDDUPrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(18<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1328 = MOVDDUPrm
- { 1329, 2, 1, 0, "MOVDDUPrr", 0, 0|5|(11<<8)|(18<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1329 = MOVDDUPrr
- { 1330, 6, 1, 0, "MOVDI2PDIrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1330 = MOVDI2PDIrm
- { 1331, 2, 1, 0, "MOVDI2PDIrr", 0, 0|5|(1<<6)|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo164 }, // Inst #1331 = MOVDI2PDIrr
- { 1332, 6, 1, 0, "MOVDI2SSrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo80 }, // Inst #1332 = MOVDI2SSrm
- { 1333, 2, 1, 0, "MOVDI2SSrr", 0, 0|5|(1<<6)|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo86 }, // Inst #1333 = MOVDI2SSrr
- { 1334, 6, 0, 0, "MOVDQAmr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(127<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1334 = MOVDQAmr
- { 1335, 6, 1, 0, "MOVDQArm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(111<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1335 = MOVDQArm
- { 1336, 2, 1, 0, "MOVDQArr", 0, 0|5|(1<<6)|(1<<8)|(111<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1336 = MOVDQArr
- { 1337, 6, 0, 0, "MOVDQUmr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(12<<8)|(127<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1337 = MOVDQUmr
- { 1338, 6, 0, 0, "MOVDQUmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(12<<8)|(127<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1338 = MOVDQUmr_Int
- { 1339, 6, 1, 0, "MOVDQUrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|6|(12<<8)|(111<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1339 = MOVDQUrm
- { 1340, 6, 1, 0, "MOVDQUrm_Int", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|6|(12<<8)|(111<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1340 = MOVDQUrm_Int
- { 1341, 3, 1, 0, "MOVHLPSrr", 0, 0|5|(1<<8)|(18<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1341 = MOVHLPSrr
- { 1342, 6, 0, 0, "MOVHPDmr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(23<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1342 = MOVHPDmr
- { 1343, 7, 1, 0, "MOVHPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(22<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1343 = MOVHPDrm
- { 1344, 6, 0, 0, "MOVHPSmr", 0|(1<<TID::MayStore), 0|4|(1<<8)|(23<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1344 = MOVHPSmr
- { 1345, 7, 1, 0, "MOVHPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(22<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1345 = MOVHPSrm
- { 1346, 3, 1, 0, "MOVLHPSrr", 0, 0|5|(1<<8)|(22<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1346 = MOVLHPSrr
- { 1347, 6, 0, 0, "MOVLPDmr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(19<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1347 = MOVLPDmr
- { 1348, 7, 1, 0, "MOVLPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(18<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1348 = MOVLPDrm
- { 1349, 6, 0, 0, "MOVLPSmr", 0|(1<<TID::MayStore), 0|4|(1<<8)|(19<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1349 = MOVLPSmr
- { 1350, 7, 1, 0, "MOVLPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(18<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1350 = MOVLPSrm
- { 1351, 6, 0, 0, "MOVLQ128mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(214<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1351 = MOVLQ128mr
- { 1352, 2, 1, 0, "MOVMSKPDrr", 0, 0|5|(1<<6)|(1<<8)|(80<<24), NULL, NULL, NULL, OperandInfo122 }, // Inst #1352 = MOVMSKPDrr
- { 1353, 2, 1, 0, "MOVMSKPSrr", 0, 0|5|(1<<8)|(80<<24), NULL, NULL, NULL, OperandInfo122 }, // Inst #1353 = MOVMSKPSrr
- { 1354, 6, 1, 0, "MOVNTDQArm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(42<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1354 = MOVNTDQArm
- { 1355, 6, 0, 0, "MOVNTDQ_64mr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(231<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1355 = MOVNTDQ_64mr
- { 1356, 6, 0, 0, "MOVNTDQmr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(231<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1356 = MOVNTDQmr
- { 1357, 6, 0, 0, "MOVNTDQmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(231<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1357 = MOVNTDQmr_Int
- { 1358, 6, 0, 0, "MOVNTI_64mr", 0|(1<<TID::MayStore), 0|4|(1<<8)|(1<<12)|(195<<24), NULL, NULL, NULL, OperandInfo15 }, // Inst #1358 = MOVNTI_64mr
- { 1359, 6, 0, 0, "MOVNTImr", 0|(1<<TID::MayStore), 0|4|(1<<8)|(195<<24), NULL, NULL, NULL, OperandInfo11 }, // Inst #1359 = MOVNTImr
- { 1360, 6, 0, 0, "MOVNTImr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(195<<24), NULL, NULL, NULL, OperandInfo11 }, // Inst #1360 = MOVNTImr_Int
- { 1361, 6, 0, 0, "MOVNTPDmr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(43<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1361 = MOVNTPDmr
- { 1362, 6, 0, 0, "MOVNTPDmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(43<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1362 = MOVNTPDmr_Int
- { 1363, 6, 0, 0, "MOVNTPSmr", 0|(1<<TID::MayStore), 0|4|(1<<8)|(43<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1363 = MOVNTPSmr
- { 1364, 6, 0, 0, "MOVNTPSmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(43<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1364 = MOVNTPSmr_Int
- { 1365, 2, 1, 0, "MOVPC32r", 0|(1<<TID::NotDuplicable), 0|(4<<13)|(232<<24), ImplicitList2, NULL, NULL, OperandInfo55 }, // Inst #1365 = MOVPC32r
- { 1366, 6, 0, 0, "MOVPDI2DImr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(126<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1366 = MOVPDI2DImr
- { 1367, 2, 1, 0, "MOVPDI2DIrr", 0, 0|3|(1<<6)|(1<<8)|(126<<24), NULL, NULL, NULL, OperandInfo122 }, // Inst #1367 = MOVPDI2DIrr
- { 1368, 6, 0, 0, "MOVPQI2QImr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(214<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1368 = MOVPQI2QImr
- { 1369, 2, 1, 0, "MOVPQIto64rr", 0, 0|3|(1<<6)|(1<<8)|(1<<12)|(126<<24), NULL, NULL, NULL, OperandInfo121 }, // Inst #1369 = MOVPQIto64rr
- { 1370, 6, 1, 0, "MOVQI2PQIrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(126<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1370 = MOVQI2PQIrm
- { 1371, 2, 1, 0, "MOVQxrxr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(12<<8)|(126<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1371 = MOVQxrxr
- { 1372, 0, 0, 0, "MOVSB", 0|(1<<TID::UnmodeledSideEffects), 0|1|(164<<24), ImplicitList37, ImplicitList38, NULL, 0 }, // Inst #1372 = MOVSB
- { 1373, 0, 0, 0, "MOVSD", 0|(1<<TID::UnmodeledSideEffects), 0|1|(165<<24), ImplicitList37, ImplicitList38, NULL, 0 }, // Inst #1373 = MOVSD
- { 1374, 6, 0, 0, "MOVSDmr", 0|(1<<TID::MayStore), 0|4|(11<<8)|(17<<24), NULL, NULL, NULL, OperandInfo165 }, // Inst #1374 = MOVSDmr
- { 1375, 6, 1, 0, "MOVSDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(11<<8)|(16<<24), NULL, NULL, NULL, OperandInfo82 }, // Inst #1375 = MOVSDrm
- { 1376, 3, 1, 0, "MOVSDrr", 0, 0|5|(11<<8)|(16<<24), NULL, NULL, NULL, OperandInfo166 }, // Inst #1376 = MOVSDrr
- { 1377, 6, 0, 0, "MOVSDto64mr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(1<<12)|(126<<24), NULL, NULL, NULL, OperandInfo165 }, // Inst #1377 = MOVSDto64mr
- { 1378, 2, 1, 0, "MOVSDto64rr", 0, 0|3|(1<<6)|(1<<8)|(1<<12)|(126<<24), NULL, NULL, NULL, OperandInfo79 }, // Inst #1378 = MOVSDto64rr
- { 1379, 6, 1, 0, "MOVSHDUPrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(22<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1379 = MOVSHDUPrm
- { 1380, 2, 1, 0, "MOVSHDUPrr", 0, 0|5|(12<<8)|(22<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1380 = MOVSHDUPrr
- { 1381, 6, 1, 0, "MOVSLDUPrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(18<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1381 = MOVSLDUPrm
- { 1382, 2, 1, 0, "MOVSLDUPrr", 0, 0|5|(12<<8)|(18<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1382 = MOVSLDUPrr
- { 1383, 6, 0, 0, "MOVSS2DImr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(126<<24), NULL, NULL, NULL, OperandInfo167 }, // Inst #1383 = MOVSS2DImr
- { 1384, 2, 1, 0, "MOVSS2DIrr", 0, 0|3|(1<<6)|(1<<8)|(126<<24), NULL, NULL, NULL, OperandInfo89 }, // Inst #1384 = MOVSS2DIrr
- { 1385, 6, 0, 0, "MOVSSmr", 0|(1<<TID::MayStore), 0|4|(12<<8)|(17<<24), NULL, NULL, NULL, OperandInfo167 }, // Inst #1385 = MOVSSmr
- { 1386, 6, 1, 0, "MOVSSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(12<<8)|(16<<24), NULL, NULL, NULL, OperandInfo80 }, // Inst #1386 = MOVSSrm
- { 1387, 3, 1, 0, "MOVSSrr", 0, 0|5|(12<<8)|(16<<24), NULL, NULL, NULL, OperandInfo168 }, // Inst #1387 = MOVSSrr
- { 1388, 0, 0, 0, "MOVSW", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(165<<24), ImplicitList37, ImplicitList38, NULL, 0 }, // Inst #1388 = MOVSW
- { 1389, 6, 1, 0, "MOVSX16rm8", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(190<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #1389 = MOVSX16rm8
- { 1390, 6, 1, 0, "MOVSX16rm8W", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(190<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #1390 = MOVSX16rm8W
- { 1391, 2, 1, 0, "MOVSX16rr8", 0, 0|5|(1<<8)|(190<<24), NULL, NULL, NULL, OperandInfo169 }, // Inst #1391 = MOVSX16rr8
- { 1392, 2, 1, 0, "MOVSX16rr8W", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(1<<8)|(190<<24), NULL, NULL, NULL, OperandInfo169 }, // Inst #1392 = MOVSX16rr8W
- { 1393, 6, 1, 0, "MOVSX32rm16", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(191<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #1393 = MOVSX32rm16
- { 1394, 6, 1, 0, "MOVSX32rm8", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(190<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #1394 = MOVSX32rm8
- { 1395, 2, 1, 0, "MOVSX32rr16", 0, 0|5|(1<<8)|(191<<24), NULL, NULL, NULL, OperandInfo170 }, // Inst #1395 = MOVSX32rr16
- { 1396, 2, 1, 0, "MOVSX32rr8", 0, 0|5|(1<<8)|(190<<24), NULL, NULL, NULL, OperandInfo171 }, // Inst #1396 = MOVSX32rr8
- { 1397, 6, 1, 0, "MOVSX64rm16", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(191<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1397 = MOVSX64rm16
- { 1398, 6, 1, 0, "MOVSX64rm32", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(99<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1398 = MOVSX64rm32
- { 1399, 6, 1, 0, "MOVSX64rm8", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<12)|(190<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1399 = MOVSX64rm8
- { 1400, 2, 1, 0, "MOVSX64rr16", 0, 0|5|(1<<8)|(1<<12)|(191<<24), NULL, NULL, NULL, OperandInfo172 }, // Inst #1400 = MOVSX64rr16
- { 1401, 2, 1, 0, "MOVSX64rr32", 0, 0|5|(1<<12)|(99<<24), NULL, NULL, NULL, OperandInfo125 }, // Inst #1401 = MOVSX64rr32
- { 1402, 2, 1, 0, "MOVSX64rr8", 0, 0|5|(1<<8)|(1<<12)|(190<<24), NULL, NULL, NULL, OperandInfo173 }, // Inst #1402 = MOVSX64rr8
- { 1403, 6, 0, 0, "MOVUPDmr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(17<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1403 = MOVUPDmr
- { 1404, 6, 0, 0, "MOVUPDmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(17<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1404 = MOVUPDmr_Int
- { 1405, 6, 1, 0, "MOVUPDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(16<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1405 = MOVUPDrm
- { 1406, 6, 1, 0, "MOVUPDrm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(16<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1406 = MOVUPDrm_Int
- { 1407, 2, 1, 0, "MOVUPDrr", 0, 0|5|(1<<6)|(1<<8)|(16<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1407 = MOVUPDrr
- { 1408, 6, 0, 0, "MOVUPSmr", 0|(1<<TID::MayStore), 0|4|(1<<8)|(17<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1408 = MOVUPSmr
- { 1409, 6, 0, 0, "MOVUPSmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(17<<24), NULL, NULL, NULL, OperandInfo163 }, // Inst #1409 = MOVUPSmr_Int
- { 1410, 6, 1, 0, "MOVUPSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<8)|(16<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1410 = MOVUPSrm
- { 1411, 6, 1, 0, "MOVUPSrm_Int", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0|6|(1<<8)|(16<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1411 = MOVUPSrm_Int
- { 1412, 2, 1, 0, "MOVUPSrr", 0, 0|5|(1<<8)|(16<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1412 = MOVUPSrr
- { 1413, 6, 1, 0, "MOVZDI2PDIrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1413 = MOVZDI2PDIrm
- { 1414, 2, 1, 0, "MOVZDI2PDIrr", 0, 0|5|(1<<6)|(1<<8)|(110<<24), NULL, NULL, NULL, OperandInfo164 }, // Inst #1414 = MOVZDI2PDIrr
- { 1415, 6, 1, 0, "MOVZPQILo2PQIrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(126<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1415 = MOVZPQILo2PQIrm
- { 1416, 2, 1, 0, "MOVZPQILo2PQIrr", 0, 0|5|(12<<8)|(126<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1416 = MOVZPQILo2PQIrr
- { 1417, 6, 1, 0, "MOVZQI2PQIrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(126<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1417 = MOVZQI2PQIrm
- { 1418, 2, 1, 0, "MOVZQI2PQIrr", 0, 0|5|(1<<6)|(1<<8)|(1<<12)|(110<<24), NULL, NULL, NULL, OperandInfo159 }, // Inst #1418 = MOVZQI2PQIrr
- { 1419, 6, 1, 0, "MOVZX16rm8", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #1419 = MOVZX16rm8
- { 1420, 6, 1, 0, "MOVZX16rm8W", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #1420 = MOVZX16rm8W
- { 1421, 2, 1, 0, "MOVZX16rr8", 0, 0|5|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo169 }, // Inst #1421 = MOVZX16rr8
- { 1422, 2, 1, 0, "MOVZX16rr8W", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo169 }, // Inst #1422 = MOVZX16rr8W
- { 1423, 6, 1, 0, "MOVZX32_NOREXrm8", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo174 }, // Inst #1423 = MOVZX32_NOREXrm8
- { 1424, 2, 1, 0, "MOVZX32_NOREXrr8", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo175 }, // Inst #1424 = MOVZX32_NOREXrr8
- { 1425, 6, 1, 0, "MOVZX32rm16", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(183<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #1425 = MOVZX32rm16
- { 1426, 6, 1, 0, "MOVZX32rm8", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #1426 = MOVZX32rm8
- { 1427, 2, 1, 0, "MOVZX32rr16", 0, 0|5|(1<<8)|(183<<24), NULL, NULL, NULL, OperandInfo170 }, // Inst #1427 = MOVZX32rr16
- { 1428, 2, 1, 0, "MOVZX32rr8", 0, 0|5|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo171 }, // Inst #1428 = MOVZX32rr8
- { 1429, 6, 1, 0, "MOVZX64rm16", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(183<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1429 = MOVZX64rm16
- { 1430, 6, 1, 0, "MOVZX64rm16_Q", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(1<<12)|(183<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1430 = MOVZX64rm16_Q
- { 1431, 6, 1, 0, "MOVZX64rm32", 0|(1<<TID::MayLoad), 0|6|(139<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1431 = MOVZX64rm32
- { 1432, 6, 1, 0, "MOVZX64rm8", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1432 = MOVZX64rm8
- { 1433, 6, 1, 0, "MOVZX64rm8_Q", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(1<<12)|(182<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1433 = MOVZX64rm8_Q
- { 1434, 2, 1, 0, "MOVZX64rr16", 0, 0|5|(1<<8)|(183<<24), NULL, NULL, NULL, OperandInfo172 }, // Inst #1434 = MOVZX64rr16
- { 1435, 2, 1, 0, "MOVZX64rr16_Q", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(1<<12)|(183<<24), NULL, NULL, NULL, OperandInfo172 }, // Inst #1435 = MOVZX64rr16_Q
- { 1436, 2, 1, 0, "MOVZX64rr32", 0, 0|3|(137<<24), NULL, NULL, NULL, OperandInfo125 }, // Inst #1436 = MOVZX64rr32
- { 1437, 2, 1, 0, "MOVZX64rr8", 0, 0|5|(1<<8)|(182<<24), NULL, NULL, NULL, OperandInfo173 }, // Inst #1437 = MOVZX64rr8
- { 1438, 2, 1, 0, "MOVZX64rr8_Q", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(1<<12)|(182<<24), NULL, NULL, NULL, OperandInfo173 }, // Inst #1438 = MOVZX64rr8_Q
- { 1439, 2, 1, 0, "MOV_Fp3232", 0, 0|(7<<16), NULL, NULL, NULL, OperandInfo2 }, // Inst #1439 = MOV_Fp3232
- { 1440, 2, 1, 0, "MOV_Fp3264", 0, 0|(7<<16), NULL, NULL, NULL, OperandInfo176 }, // Inst #1440 = MOV_Fp3264
- { 1441, 2, 1, 0, "MOV_Fp3280", 0, 0|(7<<16), NULL, NULL, NULL, OperandInfo177 }, // Inst #1441 = MOV_Fp3280
- { 1442, 2, 1, 0, "MOV_Fp6432", 0, 0|(7<<16), NULL, NULL, NULL, OperandInfo178 }, // Inst #1442 = MOV_Fp6432
- { 1443, 2, 1, 0, "MOV_Fp6464", 0, 0|(7<<16), NULL, NULL, NULL, OperandInfo3 }, // Inst #1443 = MOV_Fp6464
- { 1444, 2, 1, 0, "MOV_Fp6480", 0, 0|(7<<16), NULL, NULL, NULL, OperandInfo179 }, // Inst #1444 = MOV_Fp6480
- { 1445, 2, 1, 0, "MOV_Fp8032", 0, 0|(7<<16), NULL, NULL, NULL, OperandInfo180 }, // Inst #1445 = MOV_Fp8032
- { 1446, 2, 1, 0, "MOV_Fp8064", 0, 0|(7<<16), NULL, NULL, NULL, OperandInfo181 }, // Inst #1446 = MOV_Fp8064
- { 1447, 2, 1, 0, "MOV_Fp8080", 0, 0|(7<<16), NULL, NULL, NULL, OperandInfo4 }, // Inst #1447 = MOV_Fp8080
- { 1448, 8, 1, 0, "MPSADBWrmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(66<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #1448 = MPSADBWrmi
- { 1449, 4, 1, 0, "MPSADBWrri", 0|(1<<TID::Commutable), 0|5|(1<<6)|(14<<8)|(1<<13)|(66<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #1449 = MPSADBWrri
- { 1450, 5, 0, 0, "MUL16m", 0|(1<<TID::MayLoad), 0|28|(1<<6)|(247<<24), ImplicitList12, ImplicitList21, Barriers1, OperandInfo30 }, // Inst #1450 = MUL16m
- { 1451, 1, 0, 0, "MUL16r", 0, 0|20|(1<<6)|(247<<24), ImplicitList12, ImplicitList21, Barriers1, OperandInfo93 }, // Inst #1451 = MUL16r
- { 1452, 5, 0, 0, "MUL32m", 0|(1<<TID::MayLoad), 0|28|(247<<24), ImplicitList13, ImplicitList18, Barriers6, OperandInfo30 }, // Inst #1452 = MUL32m
- { 1453, 1, 0, 0, "MUL32r", 0, 0|20|(247<<24), ImplicitList13, ImplicitList18, Barriers6, OperandInfo57 }, // Inst #1453 = MUL32r
- { 1454, 5, 0, 0, "MUL64m", 0|(1<<TID::MayLoad), 0|28|(1<<12)|(247<<24), ImplicitList15, ImplicitList17, Barriers1, OperandInfo30 }, // Inst #1454 = MUL64m
- { 1455, 1, 0, 0, "MUL64r", 0, 0|20|(1<<12)|(247<<24), ImplicitList15, ImplicitList17, Barriers1, OperandInfo58 }, // Inst #1455 = MUL64r
- { 1456, 5, 0, 0, "MUL8m", 0|(1<<TID::MayLoad), 0|28|(246<<24), ImplicitList11, ImplicitList22, Barriers1, OperandInfo30 }, // Inst #1456 = MUL8m
- { 1457, 1, 0, 0, "MUL8r", 0, 0|20|(246<<24), ImplicitList11, ImplicitList22, Barriers1, OperandInfo94 }, // Inst #1457 = MUL8r
- { 1458, 7, 1, 0, "MULPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(89<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1458 = MULPDrm
- { 1459, 3, 1, 0, "MULPDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(89<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1459 = MULPDrr
- { 1460, 7, 1, 0, "MULPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(89<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1460 = MULPSrm
- { 1461, 3, 1, 0, "MULPSrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(89<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1461 = MULPSrr
- { 1462, 7, 1, 0, "MULSDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(89<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #1462 = MULSDrm
- { 1463, 7, 1, 0, "MULSDrm_Int", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(89<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1463 = MULSDrm_Int
- { 1464, 3, 1, 0, "MULSDrr", 0|(1<<TID::Commutable), 0|5|(11<<8)|(89<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #1464 = MULSDrr
- { 1465, 3, 1, 0, "MULSDrr_Int", 0, 0|5|(11<<8)|(89<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1465 = MULSDrr_Int
- { 1466, 7, 1, 0, "MULSSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(89<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #1466 = MULSSrm
- { 1467, 7, 1, 0, "MULSSrm_Int", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(89<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1467 = MULSSrm_Int
- { 1468, 3, 1, 0, "MULSSrr", 0|(1<<TID::Commutable), 0|5|(12<<8)|(89<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #1468 = MULSSrr
- { 1469, 3, 1, 0, "MULSSrr_Int", 0, 0|5|(12<<8)|(89<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1469 = MULSSrr_Int
- { 1470, 5, 0, 0, "MUL_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|25|(216<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1470 = MUL_F32m
- { 1471, 5, 0, 0, "MUL_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|25|(220<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1471 = MUL_F64m
- { 1472, 5, 0, 0, "MUL_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|25|(222<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1472 = MUL_FI16m
- { 1473, 5, 0, 0, "MUL_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|25|(218<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1473 = MUL_FI32m
- { 1474, 1, 0, 0, "MUL_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(9<<8)|(200<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #1474 = MUL_FPrST0
- { 1475, 1, 0, 0, "MUL_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0|2|(3<<8)|(200<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #1475 = MUL_FST0r
- { 1476, 3, 1, 0, "MUL_Fp32", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo32 }, // Inst #1476 = MUL_Fp32
- { 1477, 7, 1, 0, "MUL_Fp32m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #1477 = MUL_Fp32m
- { 1478, 3, 1, 0, "MUL_Fp64", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo34 }, // Inst #1478 = MUL_Fp64
- { 1479, 7, 1, 0, "MUL_Fp64m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #1479 = MUL_Fp64m
- { 1480, 7, 1, 0, "MUL_Fp64m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #1480 = MUL_Fp64m32
- { 1481, 3, 1, 0, "MUL_Fp80", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo36 }, // Inst #1481 = MUL_Fp80
- { 1482, 7, 1, 0, "MUL_Fp80m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #1482 = MUL_Fp80m32
- { 1483, 7, 1, 0, "MUL_Fp80m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #1483 = MUL_Fp80m64
- { 1484, 7, 1, 0, "MUL_FpI16m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #1484 = MUL_FpI16m32
- { 1485, 7, 1, 0, "MUL_FpI16m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #1485 = MUL_FpI16m64
- { 1486, 7, 1, 0, "MUL_FpI16m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #1486 = MUL_FpI16m80
- { 1487, 7, 1, 0, "MUL_FpI32m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #1487 = MUL_FpI32m32
- { 1488, 7, 1, 0, "MUL_FpI32m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #1488 = MUL_FpI32m64
- { 1489, 7, 1, 0, "MUL_FpI32m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #1489 = MUL_FpI32m80
- { 1490, 1, 0, 0, "MUL_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(7<<8)|(200<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #1490 = MUL_FrST0
- { 1491, 0, 0, 0, "MWAIT", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|38|(1<<8)|(1<<24), NULL, NULL, NULL, 0 }, // Inst #1491 = MWAIT
- { 1492, 5, 0, 0, "NEG16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(1<<6)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1492 = NEG16m
- { 1493, 2, 1, 0, "NEG16r", 0, 0|19|(1<<6)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #1493 = NEG16r
- { 1494, 5, 0, 0, "NEG32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1494 = NEG32m
- { 1495, 2, 1, 0, "NEG32r", 0, 0|19|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #1495 = NEG32r
- { 1496, 5, 0, 0, "NEG64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(1<<12)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1496 = NEG64m
- { 1497, 2, 1, 0, "NEG64r", 0, 0|19|(1<<12)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #1497 = NEG64r
- { 1498, 5, 0, 0, "NEG8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(246<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1498 = NEG8m
- { 1499, 2, 1, 0, "NEG8r", 0, 0|19|(246<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #1499 = NEG8r
- { 1500, 0, 0, 0, "NOOP", 0, 0|1|(144<<24), NULL, NULL, NULL, 0 }, // Inst #1500 = NOOP
- { 1501, 5, 0, 0, "NOOPL", 0, 0|24|(1<<8)|(31<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1501 = NOOPL
- { 1502, 5, 0, 0, "NOOPW", 0, 0|24|(1<<6)|(1<<8)|(31<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1502 = NOOPW
- { 1503, 5, 0, 0, "NOT16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(1<<6)|(247<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1503 = NOT16m
- { 1504, 2, 1, 0, "NOT16r", 0, 0|18|(1<<6)|(247<<24), NULL, NULL, NULL, OperandInfo91 }, // Inst #1504 = NOT16r
- { 1505, 5, 0, 0, "NOT32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(247<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1505 = NOT32m
- { 1506, 2, 1, 0, "NOT32r", 0, 0|18|(247<<24), NULL, NULL, NULL, OperandInfo52 }, // Inst #1506 = NOT32r
- { 1507, 5, 0, 0, "NOT64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(1<<12)|(247<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1507 = NOT64m
- { 1508, 2, 1, 0, "NOT64r", 0, 0|18|(1<<12)|(247<<24), NULL, NULL, NULL, OperandInfo53 }, // Inst #1508 = NOT64r
- { 1509, 5, 0, 0, "NOT8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(246<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1509 = NOT8m
- { 1510, 2, 1, 0, "NOT8r", 0, 0|18|(246<<24), NULL, NULL, NULL, OperandInfo92 }, // Inst #1510 = NOT8r
- { 1511, 1, 0, 0, "OR16i16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(13<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #1511 = OR16i16
- { 1512, 6, 0, 0, "OR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1512 = OR16mi
- { 1513, 6, 0, 0, "OR16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1513 = OR16mi8
- { 1514, 6, 0, 0, "OR16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(9<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #1514 = OR16mr
- { 1515, 3, 1, 0, "OR16ri", 0, 0|17|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1515 = OR16ri
- { 1516, 3, 1, 0, "OR16ri8", 0, 0|17|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1516 = OR16ri8
- { 1517, 7, 1, 0, "OR16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(11<<24), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #1517 = OR16rm
- { 1518, 3, 1, 0, "OR16rr", 0|(1<<TID::Commutable), 0|3|(1<<6)|(9<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1518 = OR16rr
- { 1519, 3, 1, 0, "OR16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(11<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1519 = OR16rr_REV
- { 1520, 1, 0, 0, "OR32i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(13<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #1520 = OR32i32
- { 1521, 6, 0, 0, "OR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1521 = OR32mi
- { 1522, 6, 0, 0, "OR32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1522 = OR32mi8
- { 1523, 6, 0, 0, "OR32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(9<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #1523 = OR32mr
- { 1524, 3, 1, 0, "OR32ri", 0, 0|17|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #1524 = OR32ri
- { 1525, 3, 1, 0, "OR32ri8", 0, 0|17|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #1525 = OR32ri8
- { 1526, 7, 1, 0, "OR32rm", 0|(1<<TID::MayLoad), 0|6|(11<<24), NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #1526 = OR32rm
- { 1527, 3, 1, 0, "OR32rr", 0|(1<<TID::Commutable), 0|3|(9<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #1527 = OR32rr
- { 1528, 3, 1, 0, "OR32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(11<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #1528 = OR32rr_REV
- { 1529, 1, 0, 0, "OR64i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(4<<13)|(13<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #1529 = OR64i32
- { 1530, 6, 0, 0, "OR64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1530 = OR64mi32
- { 1531, 6, 0, 0, "OR64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1531 = OR64mi8
- { 1532, 6, 0, 0, "OR64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<12)|(9<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #1532 = OR64mr
- { 1533, 3, 1, 0, "OR64ri32", 0, 0|17|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #1533 = OR64ri32
- { 1534, 3, 1, 0, "OR64ri8", 0, 0|17|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #1534 = OR64ri8
- { 1535, 7, 1, 0, "OR64rm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(11<<24), NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #1535 = OR64rm
- { 1536, 3, 1, 0, "OR64rr", 0|(1<<TID::Commutable), 0|3|(1<<12)|(9<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #1536 = OR64rr
- { 1537, 3, 1, 0, "OR64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(11<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #1537 = OR64rr_REV
- { 1538, 1, 0, 0, "OR8i8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(12<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #1538 = OR8i8
- { 1539, 6, 0, 0, "OR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1539 = OR8mi
- { 1540, 6, 0, 0, "OR8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(8<<24), NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #1540 = OR8mr
- { 1541, 3, 1, 0, "OR8ri", 0, 0|17|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #1541 = OR8ri
- { 1542, 7, 1, 0, "OR8rm", 0|(1<<TID::MayLoad), 0|6|(10<<24), NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #1542 = OR8rm
- { 1543, 3, 1, 0, "OR8rr", 0|(1<<TID::Commutable), 0|3|(8<<24), NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #1543 = OR8rr
- { 1544, 3, 1, 0, "OR8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(10<<24), NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #1544 = OR8rr_REV
- { 1545, 7, 1, 0, "ORPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(86<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1545 = ORPDrm
- { 1546, 3, 1, 0, "ORPDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(86<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1546 = ORPDrr
- { 1547, 7, 1, 0, "ORPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(86<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1547 = ORPSrm
- { 1548, 3, 1, 0, "ORPSrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(86<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1548 = ORPSrr
- { 1549, 1, 0, 0, "OUT16ir", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(1<<13)|(231<<24), ImplicitList12, NULL, NULL, OperandInfo5 }, // Inst #1549 = OUT16ir
- { 1550, 0, 0, 0, "OUT16rr", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(239<<24), ImplicitList39, NULL, NULL, 0 }, // Inst #1550 = OUT16rr
- { 1551, 1, 0, 0, "OUT32ir", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(231<<24), ImplicitList13, NULL, NULL, OperandInfo5 }, // Inst #1551 = OUT32ir
- { 1552, 0, 0, 0, "OUT32rr", 0|(1<<TID::UnmodeledSideEffects), 0|1|(239<<24), ImplicitList40, NULL, NULL, 0 }, // Inst #1552 = OUT32rr
- { 1553, 1, 0, 0, "OUT8ir", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(230<<24), ImplicitList11, NULL, NULL, OperandInfo5 }, // Inst #1553 = OUT8ir
- { 1554, 0, 0, 0, "OUT8rr", 0|(1<<TID::UnmodeledSideEffects), 0|1|(238<<24), ImplicitList41, NULL, NULL, 0 }, // Inst #1554 = OUT8rr
- { 1555, 0, 0, 0, "OUTSB", 0|(1<<TID::UnmodeledSideEffects), 0|1|(110<<24), NULL, NULL, NULL, 0 }, // Inst #1555 = OUTSB
- { 1556, 0, 0, 0, "OUTSD", 0|(1<<TID::UnmodeledSideEffects), 0|1|(111<<24), NULL, NULL, NULL, 0 }, // Inst #1556 = OUTSD
- { 1557, 0, 0, 0, "OUTSW", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(111<<24), NULL, NULL, NULL, 0 }, // Inst #1557 = OUTSW
- { 1558, 6, 1, 0, "PABSBrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(28<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1558 = PABSBrm128
- { 1559, 6, 1, 0, "PABSBrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(28<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1559 = PABSBrm64
- { 1560, 2, 1, 0, "PABSBrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(28<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1560 = PABSBrr128
- { 1561, 2, 1, 0, "PABSBrr64", 0, 0|5|(13<<8)|(1<<13)|(28<<24), NULL, NULL, NULL, OperandInfo129 }, // Inst #1561 = PABSBrr64
- { 1562, 6, 1, 0, "PABSDrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(30<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1562 = PABSDrm128
- { 1563, 6, 1, 0, "PABSDrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(30<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1563 = PABSDrm64
- { 1564, 2, 1, 0, "PABSDrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(30<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1564 = PABSDrr128
- { 1565, 2, 1, 0, "PABSDrr64", 0, 0|5|(13<<8)|(1<<13)|(30<<24), NULL, NULL, NULL, OperandInfo129 }, // Inst #1565 = PABSDrr64
- { 1566, 6, 1, 0, "PABSWrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(29<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1566 = PABSWrm128
- { 1567, 6, 1, 0, "PABSWrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(29<<24), NULL, NULL, NULL, OperandInfo117 }, // Inst #1567 = PABSWrm64
- { 1568, 2, 1, 0, "PABSWrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(29<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1568 = PABSWrr128
- { 1569, 2, 1, 0, "PABSWrr64", 0, 0|5|(13<<8)|(1<<13)|(29<<24), NULL, NULL, NULL, OperandInfo129 }, // Inst #1569 = PABSWrr64
- { 1570, 7, 1, 0, "PACKSSDWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(107<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1570 = PACKSSDWrm
- { 1571, 3, 1, 0, "PACKSSDWrr", 0, 0|5|(1<<6)|(1<<8)|(107<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1571 = PACKSSDWrr
- { 1572, 7, 1, 0, "PACKSSWBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(99<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1572 = PACKSSWBrm
- { 1573, 3, 1, 0, "PACKSSWBrr", 0, 0|5|(1<<6)|(1<<8)|(99<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1573 = PACKSSWBrr
- { 1574, 7, 1, 0, "PACKUSDWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(43<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1574 = PACKUSDWrm
- { 1575, 3, 1, 0, "PACKUSDWrr", 0, 0|5|(1<<6)|(13<<8)|(43<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1575 = PACKUSDWrr
- { 1576, 7, 1, 0, "PACKUSWBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(103<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1576 = PACKUSWBrm
- { 1577, 3, 1, 0, "PACKUSWBrr", 0, 0|5|(1<<6)|(1<<8)|(103<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1577 = PACKUSWBrr
- { 1578, 7, 1, 0, "PADDBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(252<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1578 = PADDBrm
- { 1579, 3, 1, 0, "PADDBrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(252<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1579 = PADDBrr
- { 1580, 7, 1, 0, "PADDDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(254<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1580 = PADDDrm
- { 1581, 3, 1, 0, "PADDDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(254<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1581 = PADDDrr
- { 1582, 7, 1, 0, "PADDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(212<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1582 = PADDQrm
- { 1583, 3, 1, 0, "PADDQrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(212<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1583 = PADDQrr
- { 1584, 7, 1, 0, "PADDSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(236<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1584 = PADDSBrm
- { 1585, 3, 1, 0, "PADDSBrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(236<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1585 = PADDSBrr
- { 1586, 7, 1, 0, "PADDSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(237<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1586 = PADDSWrm
- { 1587, 3, 1, 0, "PADDSWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(237<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1587 = PADDSWrr
- { 1588, 7, 1, 0, "PADDUSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(220<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1588 = PADDUSBrm
- { 1589, 3, 1, 0, "PADDUSBrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(220<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1589 = PADDUSBrr
- { 1590, 7, 1, 0, "PADDUSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(221<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1590 = PADDUSWrm
- { 1591, 3, 1, 0, "PADDUSWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(221<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1591 = PADDUSWrr
- { 1592, 7, 1, 0, "PADDWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(253<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1592 = PADDWrm
- { 1593, 3, 1, 0, "PADDWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(253<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1593 = PADDWrr
- { 1594, 8, 1, 0, "PALIGNR128rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(14<<8)|(1<<13)|(15<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #1594 = PALIGNR128rm
- { 1595, 4, 1, 0, "PALIGNR128rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(14<<8)|(1<<13)|(15<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #1595 = PALIGNR128rr
- { 1596, 8, 1, 0, "PALIGNR64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(14<<8)|(1<<13)|(15<<24), NULL, NULL, NULL, OperandInfo139 }, // Inst #1596 = PALIGNR64rm
- { 1597, 4, 1, 0, "PALIGNR64rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(14<<8)|(1<<13)|(15<<24), NULL, NULL, NULL, OperandInfo182 }, // Inst #1597 = PALIGNR64rr
- { 1598, 7, 1, 0, "PANDNrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(223<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1598 = PANDNrm
- { 1599, 3, 1, 0, "PANDNrr", 0, 0|5|(1<<6)|(1<<8)|(223<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1599 = PANDNrr
- { 1600, 7, 1, 0, "PANDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(219<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1600 = PANDrm
- { 1601, 3, 1, 0, "PANDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(219<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1601 = PANDrr
- { 1602, 7, 1, 0, "PAVGBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(224<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1602 = PAVGBrm
- { 1603, 3, 1, 0, "PAVGBrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(224<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1603 = PAVGBrr
- { 1604, 7, 1, 0, "PAVGWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(227<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1604 = PAVGWrm
- { 1605, 3, 1, 0, "PAVGWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(227<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1605 = PAVGWrr
- { 1606, 7, 1, 0, "PBLENDVBrm0", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(16<<24), ImplicitList8, NULL, NULL, OperandInfo24 }, // Inst #1606 = PBLENDVBrm0
- { 1607, 3, 1, 0, "PBLENDVBrr0", 0, 0|5|(1<<6)|(13<<8)|(16<<24), ImplicitList8, NULL, NULL, OperandInfo25 }, // Inst #1607 = PBLENDVBrr0
- { 1608, 8, 1, 0, "PBLENDWrmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(14<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #1608 = PBLENDWrmi
- { 1609, 4, 1, 0, "PBLENDWrri", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(14<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #1609 = PBLENDWrri
- { 1610, 7, 1, 0, "PCMPEQBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(116<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1610 = PCMPEQBrm
- { 1611, 3, 1, 0, "PCMPEQBrr", 0, 0|5|(1<<6)|(1<<8)|(116<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1611 = PCMPEQBrr
- { 1612, 7, 1, 0, "PCMPEQDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(118<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1612 = PCMPEQDrm
- { 1613, 3, 1, 0, "PCMPEQDrr", 0, 0|5|(1<<6)|(1<<8)|(118<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1613 = PCMPEQDrr
- { 1614, 7, 1, 0, "PCMPEQQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(41<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1614 = PCMPEQQrm
- { 1615, 3, 1, 0, "PCMPEQQrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(41<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1615 = PCMPEQQrr
- { 1616, 7, 1, 0, "PCMPEQWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(117<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1616 = PCMPEQWrm
- { 1617, 3, 1, 0, "PCMPEQWrr", 0, 0|5|(1<<6)|(1<<8)|(117<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1617 = PCMPEQWrr
- { 1618, 7, 0, 0, "PCMPESTRIArm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1618 = PCMPESTRIArm
- { 1619, 3, 0, 0, "PCMPESTRIArr", 0, 0|5|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1619 = PCMPESTRIArr
- { 1620, 7, 0, 0, "PCMPESTRICrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1620 = PCMPESTRICrm
- { 1621, 3, 0, 0, "PCMPESTRICrr", 0, 0|5|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1621 = PCMPESTRICrr
- { 1622, 7, 0, 0, "PCMPESTRIOrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1622 = PCMPESTRIOrm
- { 1623, 3, 0, 0, "PCMPESTRIOrr", 0, 0|5|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1623 = PCMPESTRIOrr
- { 1624, 7, 0, 0, "PCMPESTRISrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1624 = PCMPESTRISrm
- { 1625, 3, 0, 0, "PCMPESTRISrr", 0, 0|5|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1625 = PCMPESTRISrr
- { 1626, 7, 0, 0, "PCMPESTRIZrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1626 = PCMPESTRIZrm
- { 1627, 3, 0, 0, "PCMPESTRIZrr", 0, 0|5|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1627 = PCMPESTRIZrr
- { 1628, 7, 0, 0, "PCMPESTRIrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1628 = PCMPESTRIrm
- { 1629, 3, 0, 0, "PCMPESTRIrr", 0, 0|5|(1<<6)|(14<<8)|(97<<24), ImplicitList14, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1629 = PCMPESTRIrr
- { 1630, 8, 1, 0, "PCMPESTRM128MEM", 0|(1<<TID::MayLoad)|(1<<TID::UsesCustomInserter), 0|(1<<6)|(14<<8), ImplicitList14, ImplicitList1, Barriers1, OperandInfo185 }, // Inst #1630 = PCMPESTRM128MEM
- { 1631, 4, 1, 0, "PCMPESTRM128REG", 0|(1<<TID::UsesCustomInserter), 0|(1<<6)|(14<<8), ImplicitList14, ImplicitList1, Barriers1, OperandInfo66 }, // Inst #1631 = PCMPESTRM128REG
- { 1632, 7, 0, 0, "PCMPESTRM128rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(14<<8)|(96<<24), ImplicitList14, ImplicitList43, Barriers1, OperandInfo183 }, // Inst #1632 = PCMPESTRM128rm
- { 1633, 3, 0, 0, "PCMPESTRM128rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(14<<8)|(96<<24), ImplicitList14, ImplicitList43, Barriers1, OperandInfo184 }, // Inst #1633 = PCMPESTRM128rr
- { 1634, 7, 1, 0, "PCMPGTBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(100<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1634 = PCMPGTBrm
- { 1635, 3, 1, 0, "PCMPGTBrr", 0, 0|5|(1<<6)|(1<<8)|(100<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1635 = PCMPGTBrr
- { 1636, 7, 1, 0, "PCMPGTDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(102<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1636 = PCMPGTDrm
- { 1637, 3, 1, 0, "PCMPGTDrr", 0, 0|5|(1<<6)|(1<<8)|(102<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1637 = PCMPGTDrr
- { 1638, 7, 1, 0, "PCMPGTQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(55<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1638 = PCMPGTQrm
- { 1639, 3, 1, 0, "PCMPGTQrr", 0, 0|5|(1<<6)|(13<<8)|(55<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1639 = PCMPGTQrr
- { 1640, 7, 1, 0, "PCMPGTWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(101<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1640 = PCMPGTWrm
- { 1641, 3, 1, 0, "PCMPGTWrr", 0, 0|5|(1<<6)|(1<<8)|(101<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1641 = PCMPGTWrr
- { 1642, 7, 0, 0, "PCMPISTRIArm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1642 = PCMPISTRIArm
- { 1643, 3, 0, 0, "PCMPISTRIArr", 0, 0|5|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1643 = PCMPISTRIArr
- { 1644, 7, 0, 0, "PCMPISTRICrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1644 = PCMPISTRICrm
- { 1645, 3, 0, 0, "PCMPISTRICrr", 0, 0|5|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1645 = PCMPISTRICrr
- { 1646, 7, 0, 0, "PCMPISTRIOrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1646 = PCMPISTRIOrm
- { 1647, 3, 0, 0, "PCMPISTRIOrr", 0, 0|5|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1647 = PCMPISTRIOrr
- { 1648, 7, 0, 0, "PCMPISTRISrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1648 = PCMPISTRISrm
- { 1649, 3, 0, 0, "PCMPISTRISrr", 0, 0|5|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1649 = PCMPISTRISrr
- { 1650, 7, 0, 0, "PCMPISTRIZrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1650 = PCMPISTRIZrm
- { 1651, 3, 0, 0, "PCMPISTRIZrr", 0, 0|5|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1651 = PCMPISTRIZrr
- { 1652, 7, 0, 0, "PCMPISTRIrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo183 }, // Inst #1652 = PCMPISTRIrm
- { 1653, 3, 0, 0, "PCMPISTRIrr", 0, 0|5|(1<<6)|(14<<8)|(99<<24), NULL, ImplicitList42, Barriers1, OperandInfo184 }, // Inst #1653 = PCMPISTRIrr
- { 1654, 8, 1, 0, "PCMPISTRM128MEM", 0|(1<<TID::MayLoad)|(1<<TID::UsesCustomInserter), 0|(1<<6)|(14<<8), NULL, ImplicitList1, Barriers1, OperandInfo185 }, // Inst #1654 = PCMPISTRM128MEM
- { 1655, 4, 1, 0, "PCMPISTRM128REG", 0|(1<<TID::UsesCustomInserter), 0|(1<<6)|(14<<8), NULL, ImplicitList1, Barriers1, OperandInfo66 }, // Inst #1655 = PCMPISTRM128REG
- { 1656, 7, 0, 0, "PCMPISTRM128rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(14<<8)|(98<<24), NULL, ImplicitList43, Barriers1, OperandInfo183 }, // Inst #1656 = PCMPISTRM128rm
- { 1657, 3, 0, 0, "PCMPISTRM128rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(14<<8)|(98<<24), NULL, ImplicitList43, Barriers1, OperandInfo184 }, // Inst #1657 = PCMPISTRM128rr
- { 1658, 7, 0, 0, "PEXTRBmr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(14<<8)|(1<<13)|(20<<24), NULL, NULL, NULL, OperandInfo95 }, // Inst #1658 = PEXTRBmr
- { 1659, 3, 1, 0, "PEXTRBrr", 0, 0|3|(1<<6)|(14<<8)|(1<<13)|(20<<24), NULL, NULL, NULL, OperandInfo96 }, // Inst #1659 = PEXTRBrr
- { 1660, 7, 0, 0, "PEXTRDmr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(14<<8)|(1<<13)|(22<<24), NULL, NULL, NULL, OperandInfo95 }, // Inst #1660 = PEXTRDmr
- { 1661, 3, 1, 0, "PEXTRDrr", 0, 0|3|(1<<6)|(14<<8)|(1<<13)|(22<<24), NULL, NULL, NULL, OperandInfo96 }, // Inst #1661 = PEXTRDrr
- { 1662, 7, 0, 0, "PEXTRQmr", 0|(1<<TID::MayStore), 0|4|(1<<6)|(14<<8)|(1<<12)|(1<<13)|(22<<24), NULL, NULL, NULL, OperandInfo95 }, // Inst #1662 = PEXTRQmr
- { 1663, 3, 1, 0, "PEXTRQrr", 0, 0|3|(1<<6)|(14<<8)|(1<<12)|(1<<13)|(22<<24), NULL, NULL, NULL, OperandInfo186 }, // Inst #1663 = PEXTRQrr
- { 1664, 7, 0, 0, "PEXTRWmr", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(14<<8)|(1<<13)|(21<<24), NULL, NULL, NULL, OperandInfo95 }, // Inst #1664 = PEXTRWmr
- { 1665, 3, 1, 0, "PEXTRWri", 0, 0|5|(1<<6)|(1<<8)|(1<<13)|(197<<24), NULL, NULL, NULL, OperandInfo96 }, // Inst #1665 = PEXTRWri
- { 1666, 7, 1, 0, "PHADDDrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(2<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1666 = PHADDDrm128
- { 1667, 7, 1, 0, "PHADDDrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(2<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1667 = PHADDDrm64
- { 1668, 3, 1, 0, "PHADDDrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(2<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1668 = PHADDDrr128
- { 1669, 3, 1, 0, "PHADDDrr64", 0, 0|5|(13<<8)|(1<<13)|(2<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1669 = PHADDDrr64
- { 1670, 7, 1, 0, "PHADDSWrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(3<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1670 = PHADDSWrm128
- { 1671, 7, 1, 0, "PHADDSWrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(3<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1671 = PHADDSWrm64
- { 1672, 3, 1, 0, "PHADDSWrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(3<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1672 = PHADDSWrr128
- { 1673, 3, 1, 0, "PHADDSWrr64", 0, 0|5|(13<<8)|(1<<13)|(3<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1673 = PHADDSWrr64
- { 1674, 7, 1, 0, "PHADDWrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(1<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1674 = PHADDWrm128
- { 1675, 7, 1, 0, "PHADDWrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(1<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1675 = PHADDWrm64
- { 1676, 3, 1, 0, "PHADDWrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(1<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1676 = PHADDWrr128
- { 1677, 3, 1, 0, "PHADDWrr64", 0, 0|5|(13<<8)|(1<<13)|(1<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1677 = PHADDWrr64
- { 1678, 6, 1, 0, "PHMINPOSUWrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(65<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1678 = PHMINPOSUWrm128
- { 1679, 2, 1, 0, "PHMINPOSUWrr128", 0, 0|5|(1<<6)|(13<<8)|(65<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1679 = PHMINPOSUWrr128
- { 1680, 7, 1, 0, "PHSUBDrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(6<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1680 = PHSUBDrm128
- { 1681, 7, 1, 0, "PHSUBDrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(6<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1681 = PHSUBDrm64
- { 1682, 3, 1, 0, "PHSUBDrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(6<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1682 = PHSUBDrr128
- { 1683, 3, 1, 0, "PHSUBDrr64", 0, 0|5|(13<<8)|(1<<13)|(6<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1683 = PHSUBDrr64
- { 1684, 7, 1, 0, "PHSUBSWrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(7<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1684 = PHSUBSWrm128
- { 1685, 7, 1, 0, "PHSUBSWrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(7<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1685 = PHSUBSWrm64
- { 1686, 3, 1, 0, "PHSUBSWrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(7<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1686 = PHSUBSWrr128
- { 1687, 3, 1, 0, "PHSUBSWrr64", 0, 0|5|(13<<8)|(1<<13)|(7<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1687 = PHSUBSWrr64
- { 1688, 7, 1, 0, "PHSUBWrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(5<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1688 = PHSUBWrm128
- { 1689, 7, 1, 0, "PHSUBWrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(5<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1689 = PHSUBWrm64
- { 1690, 3, 1, 0, "PHSUBWrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(5<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1690 = PHSUBWrr128
- { 1691, 3, 1, 0, "PHSUBWrr64", 0, 0|5|(13<<8)|(1<<13)|(5<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1691 = PHSUBWrr64
- { 1692, 8, 1, 0, "PINSRBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(32<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #1692 = PINSRBrm
- { 1693, 4, 1, 0, "PINSRBrr", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(32<<24), NULL, NULL, NULL, OperandInfo187 }, // Inst #1693 = PINSRBrr
- { 1694, 8, 1, 0, "PINSRDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(34<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #1694 = PINSRDrm
- { 1695, 4, 1, 0, "PINSRDrr", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(34<<24), NULL, NULL, NULL, OperandInfo187 }, // Inst #1695 = PINSRDrr
- { 1696, 8, 1, 0, "PINSRQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<12)|(1<<13)|(34<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #1696 = PINSRQrm
- { 1697, 4, 1, 0, "PINSRQrr", 0, 0|5|(1<<6)|(14<<8)|(1<<12)|(1<<13)|(34<<24), NULL, NULL, NULL, OperandInfo188 }, // Inst #1697 = PINSRQrr
- { 1698, 8, 1, 0, "PINSRWrmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(1<<13)|(196<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #1698 = PINSRWrmi
- { 1699, 4, 1, 0, "PINSRWrri", 0, 0|5|(1<<6)|(1<<8)|(1<<13)|(196<<24), NULL, NULL, NULL, OperandInfo187 }, // Inst #1699 = PINSRWrri
- { 1700, 7, 1, 0, "PMADDUBSWrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(4<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1700 = PMADDUBSWrm128
- { 1701, 7, 1, 0, "PMADDUBSWrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(4<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1701 = PMADDUBSWrm64
- { 1702, 3, 1, 0, "PMADDUBSWrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(4<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1702 = PMADDUBSWrr128
- { 1703, 3, 1, 0, "PMADDUBSWrr64", 0, 0|5|(13<<8)|(1<<13)|(4<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1703 = PMADDUBSWrr64
- { 1704, 7, 1, 0, "PMADDWDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(245<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1704 = PMADDWDrm
- { 1705, 3, 1, 0, "PMADDWDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(245<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1705 = PMADDWDrr
- { 1706, 7, 1, 0, "PMAXSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(60<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1706 = PMAXSBrm
- { 1707, 3, 1, 0, "PMAXSBrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(60<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1707 = PMAXSBrr
- { 1708, 7, 1, 0, "PMAXSDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(61<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1708 = PMAXSDrm
- { 1709, 3, 1, 0, "PMAXSDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(61<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1709 = PMAXSDrr
- { 1710, 7, 1, 0, "PMAXSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(238<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1710 = PMAXSWrm
- { 1711, 3, 1, 0, "PMAXSWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(238<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1711 = PMAXSWrr
- { 1712, 7, 1, 0, "PMAXUBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(222<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1712 = PMAXUBrm
- { 1713, 3, 1, 0, "PMAXUBrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(222<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1713 = PMAXUBrr
- { 1714, 7, 1, 0, "PMAXUDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(63<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1714 = PMAXUDrm
- { 1715, 3, 1, 0, "PMAXUDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(63<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1715 = PMAXUDrr
- { 1716, 7, 1, 0, "PMAXUWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(62<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1716 = PMAXUWrm
- { 1717, 3, 1, 0, "PMAXUWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(62<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1717 = PMAXUWrr
- { 1718, 7, 1, 0, "PMINSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(56<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1718 = PMINSBrm
- { 1719, 3, 1, 0, "PMINSBrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(56<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1719 = PMINSBrr
- { 1720, 7, 1, 0, "PMINSDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(57<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1720 = PMINSDrm
- { 1721, 3, 1, 0, "PMINSDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(57<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1721 = PMINSDrr
- { 1722, 7, 1, 0, "PMINSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(234<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1722 = PMINSWrm
- { 1723, 3, 1, 0, "PMINSWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(234<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1723 = PMINSWrr
- { 1724, 7, 1, 0, "PMINUBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(218<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1724 = PMINUBrm
- { 1725, 3, 1, 0, "PMINUBrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(218<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1725 = PMINUBrr
- { 1726, 7, 1, 0, "PMINUDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(59<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1726 = PMINUDrm
- { 1727, 3, 1, 0, "PMINUDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(59<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1727 = PMINUDrr
- { 1728, 7, 1, 0, "PMINUWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(58<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1728 = PMINUWrm
- { 1729, 3, 1, 0, "PMINUWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(58<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1729 = PMINUWrr
- { 1730, 2, 1, 0, "PMOVMSKBrr", 0, 0|5|(1<<6)|(1<<8)|(215<<24), NULL, NULL, NULL, OperandInfo122 }, // Inst #1730 = PMOVMSKBrr
- { 1731, 6, 1, 0, "PMOVSXBDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(33<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1731 = PMOVSXBDrm
- { 1732, 2, 1, 0, "PMOVSXBDrr", 0, 0|5|(1<<6)|(13<<8)|(33<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1732 = PMOVSXBDrr
- { 1733, 6, 1, 0, "PMOVSXBQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(34<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1733 = PMOVSXBQrm
- { 1734, 2, 1, 0, "PMOVSXBQrr", 0, 0|5|(1<<6)|(13<<8)|(34<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1734 = PMOVSXBQrr
- { 1735, 6, 1, 0, "PMOVSXBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(32<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1735 = PMOVSXBWrm
- { 1736, 2, 1, 0, "PMOVSXBWrr", 0, 0|5|(1<<6)|(13<<8)|(32<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1736 = PMOVSXBWrr
- { 1737, 6, 1, 0, "PMOVSXDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(37<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1737 = PMOVSXDQrm
- { 1738, 2, 1, 0, "PMOVSXDQrr", 0, 0|5|(1<<6)|(13<<8)|(37<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1738 = PMOVSXDQrr
- { 1739, 6, 1, 0, "PMOVSXWDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(35<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1739 = PMOVSXWDrm
- { 1740, 2, 1, 0, "PMOVSXWDrr", 0, 0|5|(1<<6)|(13<<8)|(35<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1740 = PMOVSXWDrr
- { 1741, 6, 1, 0, "PMOVSXWQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(36<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1741 = PMOVSXWQrm
- { 1742, 2, 1, 0, "PMOVSXWQrr", 0, 0|5|(1<<6)|(13<<8)|(36<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1742 = PMOVSXWQrr
- { 1743, 6, 1, 0, "PMOVZXBDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(49<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1743 = PMOVZXBDrm
- { 1744, 2, 1, 0, "PMOVZXBDrr", 0, 0|5|(1<<6)|(13<<8)|(49<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1744 = PMOVZXBDrr
- { 1745, 6, 1, 0, "PMOVZXBQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(50<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1745 = PMOVZXBQrm
- { 1746, 2, 1, 0, "PMOVZXBQrr", 0, 0|5|(1<<6)|(13<<8)|(50<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1746 = PMOVZXBQrr
- { 1747, 6, 1, 0, "PMOVZXBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(48<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1747 = PMOVZXBWrm
- { 1748, 2, 1, 0, "PMOVZXBWrr", 0, 0|5|(1<<6)|(13<<8)|(48<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1748 = PMOVZXBWrr
- { 1749, 6, 1, 0, "PMOVZXDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(53<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1749 = PMOVZXDQrm
- { 1750, 2, 1, 0, "PMOVZXDQrr", 0, 0|5|(1<<6)|(13<<8)|(53<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1750 = PMOVZXDQrr
- { 1751, 6, 1, 0, "PMOVZXWDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(51<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1751 = PMOVZXWDrm
- { 1752, 2, 1, 0, "PMOVZXWDrr", 0, 0|5|(1<<6)|(13<<8)|(51<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1752 = PMOVZXWDrr
- { 1753, 6, 1, 0, "PMOVZXWQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(52<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1753 = PMOVZXWQrm
- { 1754, 2, 1, 0, "PMOVZXWQrr", 0, 0|5|(1<<6)|(13<<8)|(52<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1754 = PMOVZXWQrr
- { 1755, 7, 1, 0, "PMULDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(40<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1755 = PMULDQrm
- { 1756, 3, 1, 0, "PMULDQrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(40<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1756 = PMULDQrr
- { 1757, 7, 1, 0, "PMULHRSWrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(11<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1757 = PMULHRSWrm128
- { 1758, 7, 1, 0, "PMULHRSWrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(11<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1758 = PMULHRSWrm64
- { 1759, 3, 1, 0, "PMULHRSWrr128", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(1<<13)|(11<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1759 = PMULHRSWrr128
- { 1760, 3, 1, 0, "PMULHRSWrr64", 0|(1<<TID::Commutable), 0|5|(13<<8)|(1<<13)|(11<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1760 = PMULHRSWrr64
- { 1761, 7, 1, 0, "PMULHUWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(228<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1761 = PMULHUWrm
- { 1762, 3, 1, 0, "PMULHUWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(228<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1762 = PMULHUWrr
- { 1763, 7, 1, 0, "PMULHWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(229<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1763 = PMULHWrm
- { 1764, 3, 1, 0, "PMULHWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(229<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1764 = PMULHWrr
- { 1765, 7, 1, 0, "PMULLDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(64<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1765 = PMULLDrm
- { 1766, 7, 1, 0, "PMULLDrm_int", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(64<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1766 = PMULLDrm_int
- { 1767, 3, 1, 0, "PMULLDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(64<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1767 = PMULLDrr
- { 1768, 3, 1, 0, "PMULLDrr_int", 0|(1<<TID::Commutable), 0|5|(1<<6)|(13<<8)|(64<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1768 = PMULLDrr_int
- { 1769, 7, 1, 0, "PMULLWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(213<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1769 = PMULLWrm
- { 1770, 3, 1, 0, "PMULLWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(213<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1770 = PMULLWrr
- { 1771, 7, 1, 0, "PMULUDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(244<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1771 = PMULUDQrm
- { 1772, 3, 1, 0, "PMULUDQrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(244<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1772 = PMULUDQrr
- { 1773, 1, 1, 0, "POP16r", 0|(1<<TID::MayLoad), 0|2|(1<<6)|(88<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo93 }, // Inst #1773 = POP16r
- { 1774, 5, 1, 0, "POP16rmm", 0|(1<<TID::MayLoad), 0|24|(1<<6)|(143<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo30 }, // Inst #1774 = POP16rmm
- { 1775, 1, 1, 0, "POP16rmr", 0|(1<<TID::MayLoad), 0|16|(1<<6)|(143<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo93 }, // Inst #1775 = POP16rmr
- { 1776, 1, 1, 0, "POP32r", 0|(1<<TID::MayLoad), 0|2|(88<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo57 }, // Inst #1776 = POP32r
- { 1777, 5, 1, 0, "POP32rmm", 0|(1<<TID::MayLoad), 0|24|(143<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo30 }, // Inst #1777 = POP32rmm
- { 1778, 1, 1, 0, "POP32rmr", 0|(1<<TID::MayLoad), 0|16|(143<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo57 }, // Inst #1778 = POP32rmr
- { 1779, 1, 1, 0, "POP64r", 0|(1<<TID::MayLoad), 0|2|(88<<24), ImplicitList4, ImplicitList4, NULL, OperandInfo58 }, // Inst #1779 = POP64r
- { 1780, 5, 1, 0, "POP64rmm", 0|(1<<TID::MayLoad), 0|24|(143<<24), ImplicitList4, ImplicitList4, NULL, OperandInfo30 }, // Inst #1780 = POP64rmm
- { 1781, 1, 1, 0, "POP64rmr", 0|(1<<TID::MayLoad), 0|16|(143<<24), ImplicitList4, ImplicitList4, NULL, OperandInfo58 }, // Inst #1781 = POP64rmr
- { 1782, 6, 1, 0, "POPCNT16rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<6)|(12<<8)|(184<<24), NULL, NULL, NULL, OperandInfo46 }, // Inst #1782 = POPCNT16rm
- { 1783, 2, 1, 0, "POPCNT16rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(12<<8)|(184<<24), NULL, NULL, NULL, OperandInfo47 }, // Inst #1783 = POPCNT16rr
- { 1784, 6, 1, 0, "POPCNT32rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(12<<8)|(184<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #1784 = POPCNT32rm
- { 1785, 2, 1, 0, "POPCNT32rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(12<<8)|(184<<24), NULL, NULL, NULL, OperandInfo49 }, // Inst #1785 = POPCNT32rr
- { 1786, 6, 1, 0, "POPCNT64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(12<<8)|(1<<12)|(184<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #1786 = POPCNT64rm
- { 1787, 2, 1, 0, "POPCNT64rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(12<<8)|(1<<12)|(184<<24), NULL, NULL, NULL, OperandInfo51 }, // Inst #1787 = POPCNT64rr
- { 1788, 0, 0, 0, "POPF", 0|(1<<TID::MayLoad), 0|1|(1<<6)|(157<<24), ImplicitList2, ImplicitList3, Barriers1, 0 }, // Inst #1788 = POPF
- { 1789, 0, 0, 0, "POPFD", 0|(1<<TID::MayLoad), 0|1|(157<<24), ImplicitList2, ImplicitList3, Barriers1, 0 }, // Inst #1789 = POPFD
- { 1790, 0, 0, 0, "POPFQ", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(157<<24), ImplicitList4, ImplicitList5, Barriers1, 0 }, // Inst #1790 = POPFQ
- { 1791, 0, 0, 0, "POPFS16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(1<<8)|(161<<24), NULL, NULL, NULL, 0 }, // Inst #1791 = POPFS16
- { 1792, 0, 0, 0, "POPFS32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(161<<24), NULL, NULL, NULL, 0 }, // Inst #1792 = POPFS32
- { 1793, 0, 0, 0, "POPFS64", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(161<<24), NULL, NULL, NULL, 0 }, // Inst #1793 = POPFS64
- { 1794, 0, 0, 0, "POPGS16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(1<<8)|(169<<24), NULL, NULL, NULL, 0 }, // Inst #1794 = POPGS16
- { 1795, 0, 0, 0, "POPGS32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(169<<24), NULL, NULL, NULL, 0 }, // Inst #1795 = POPGS32
- { 1796, 0, 0, 0, "POPGS64", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(169<<24), NULL, NULL, NULL, 0 }, // Inst #1796 = POPGS64
- { 1797, 7, 1, 0, "PORrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(235<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1797 = PORrm
- { 1798, 3, 1, 0, "PORrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(235<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1798 = PORrr
- { 1799, 5, 0, 0, "PREFETCHNTA", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<8)|(24<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1799 = PREFETCHNTA
- { 1800, 5, 0, 0, "PREFETCHT0", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<8)|(24<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1800 = PREFETCHT0
- { 1801, 5, 0, 0, "PREFETCHT1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|26|(1<<8)|(24<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1801 = PREFETCHT1
- { 1802, 5, 0, 0, "PREFETCHT2", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(1<<8)|(24<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #1802 = PREFETCHT2
- { 1803, 7, 1, 0, "PSADBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(246<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1803 = PSADBWrm
- { 1804, 3, 1, 0, "PSADBWrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(246<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1804 = PSADBWrr
- { 1805, 7, 1, 0, "PSHUFBrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13), NULL, NULL, NULL, OperandInfo24 }, // Inst #1805 = PSHUFBrm128
- { 1806, 7, 1, 0, "PSHUFBrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13), NULL, NULL, NULL, OperandInfo136 }, // Inst #1806 = PSHUFBrm64
- { 1807, 3, 1, 0, "PSHUFBrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13), NULL, NULL, NULL, OperandInfo25 }, // Inst #1807 = PSHUFBrr128
- { 1808, 3, 1, 0, "PSHUFBrr64", 0, 0|5|(13<<8)|(1<<13), NULL, NULL, NULL, OperandInfo137 }, // Inst #1808 = PSHUFBrr64
- { 1809, 7, 1, 0, "PSHUFDmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(1<<13)|(112<<24), NULL, NULL, NULL, OperandInfo183 }, // Inst #1809 = PSHUFDmi
- { 1810, 3, 1, 0, "PSHUFDri", 0, 0|5|(1<<6)|(1<<8)|(1<<13)|(112<<24), NULL, NULL, NULL, OperandInfo184 }, // Inst #1810 = PSHUFDri
- { 1811, 7, 1, 0, "PSHUFHWmi", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(1<<13)|(112<<24), NULL, NULL, NULL, OperandInfo183 }, // Inst #1811 = PSHUFHWmi
- { 1812, 3, 1, 0, "PSHUFHWri", 0, 0|5|(12<<8)|(1<<13)|(112<<24), NULL, NULL, NULL, OperandInfo184 }, // Inst #1812 = PSHUFHWri
- { 1813, 7, 1, 0, "PSHUFLWmi", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(1<<13)|(112<<24), NULL, NULL, NULL, OperandInfo183 }, // Inst #1813 = PSHUFLWmi
- { 1814, 3, 1, 0, "PSHUFLWri", 0, 0|5|(11<<8)|(1<<13)|(112<<24), NULL, NULL, NULL, OperandInfo184 }, // Inst #1814 = PSHUFLWri
- { 1815, 7, 1, 0, "PSIGNBrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(8<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1815 = PSIGNBrm128
- { 1816, 7, 1, 0, "PSIGNBrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(8<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1816 = PSIGNBrm64
- { 1817, 3, 1, 0, "PSIGNBrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(8<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1817 = PSIGNBrr128
- { 1818, 3, 1, 0, "PSIGNBrr64", 0, 0|5|(13<<8)|(1<<13)|(8<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1818 = PSIGNBrr64
- { 1819, 7, 1, 0, "PSIGNDrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(10<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1819 = PSIGNDrm128
- { 1820, 7, 1, 0, "PSIGNDrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(10<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1820 = PSIGNDrm64
- { 1821, 3, 1, 0, "PSIGNDrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(10<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1821 = PSIGNDrr128
- { 1822, 3, 1, 0, "PSIGNDrr64", 0, 0|5|(13<<8)|(1<<13)|(10<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1822 = PSIGNDrr64
- { 1823, 7, 1, 0, "PSIGNWrm128", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(1<<13)|(9<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1823 = PSIGNWrm128
- { 1824, 7, 1, 0, "PSIGNWrm64", 0|(1<<TID::MayLoad), 0|6|(13<<8)|(1<<13)|(9<<24), NULL, NULL, NULL, OperandInfo136 }, // Inst #1824 = PSIGNWrm64
- { 1825, 3, 1, 0, "PSIGNWrr128", 0, 0|5|(1<<6)|(13<<8)|(1<<13)|(9<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1825 = PSIGNWrr128
- { 1826, 3, 1, 0, "PSIGNWrr64", 0, 0|5|(13<<8)|(1<<13)|(9<<24), NULL, NULL, NULL, OperandInfo137 }, // Inst #1826 = PSIGNWrr64
- { 1827, 3, 1, 0, "PSLLDQri", 0, 0|23|(1<<6)|(1<<8)|(1<<13)|(115<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1827 = PSLLDQri
- { 1828, 3, 1, 0, "PSLLDri", 0, 0|22|(1<<6)|(1<<8)|(1<<13)|(114<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1828 = PSLLDri
- { 1829, 7, 1, 0, "PSLLDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(242<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1829 = PSLLDrm
- { 1830, 3, 1, 0, "PSLLDrr", 0, 0|5|(1<<6)|(1<<8)|(242<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1830 = PSLLDrr
- { 1831, 3, 1, 0, "PSLLQri", 0, 0|22|(1<<6)|(1<<8)|(1<<13)|(115<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1831 = PSLLQri
- { 1832, 7, 1, 0, "PSLLQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(243<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1832 = PSLLQrm
- { 1833, 3, 1, 0, "PSLLQrr", 0, 0|5|(1<<6)|(1<<8)|(243<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1833 = PSLLQrr
- { 1834, 3, 1, 0, "PSLLWri", 0, 0|22|(1<<6)|(1<<8)|(1<<13)|(113<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1834 = PSLLWri
- { 1835, 7, 1, 0, "PSLLWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(241<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1835 = PSLLWrm
- { 1836, 3, 1, 0, "PSLLWrr", 0, 0|5|(1<<6)|(1<<8)|(241<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1836 = PSLLWrr
- { 1837, 3, 1, 0, "PSRADri", 0, 0|20|(1<<6)|(1<<8)|(1<<13)|(114<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1837 = PSRADri
- { 1838, 7, 1, 0, "PSRADrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(226<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1838 = PSRADrm
- { 1839, 3, 1, 0, "PSRADrr", 0, 0|5|(1<<6)|(1<<8)|(226<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1839 = PSRADrr
- { 1840, 3, 1, 0, "PSRAWri", 0, 0|20|(1<<6)|(1<<8)|(1<<13)|(113<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1840 = PSRAWri
- { 1841, 7, 1, 0, "PSRAWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(225<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1841 = PSRAWrm
- { 1842, 3, 1, 0, "PSRAWrr", 0, 0|5|(1<<6)|(1<<8)|(225<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1842 = PSRAWrr
- { 1843, 3, 1, 0, "PSRLDQri", 0, 0|19|(1<<6)|(1<<8)|(1<<13)|(115<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1843 = PSRLDQri
- { 1844, 3, 1, 0, "PSRLDri", 0, 0|18|(1<<6)|(1<<8)|(1<<13)|(114<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1844 = PSRLDri
- { 1845, 7, 1, 0, "PSRLDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(210<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1845 = PSRLDrm
- { 1846, 3, 1, 0, "PSRLDrr", 0, 0|5|(1<<6)|(1<<8)|(210<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1846 = PSRLDrr
- { 1847, 3, 1, 0, "PSRLQri", 0, 0|18|(1<<6)|(1<<8)|(1<<13)|(115<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1847 = PSRLQri
- { 1848, 7, 1, 0, "PSRLQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(211<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1848 = PSRLQrm
- { 1849, 3, 1, 0, "PSRLQrr", 0, 0|5|(1<<6)|(1<<8)|(211<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1849 = PSRLQrr
- { 1850, 3, 1, 0, "PSRLWri", 0, 0|18|(1<<6)|(1<<8)|(1<<13)|(113<<24), NULL, NULL, NULL, OperandInfo189 }, // Inst #1850 = PSRLWri
- { 1851, 7, 1, 0, "PSRLWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(209<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1851 = PSRLWrm
- { 1852, 3, 1, 0, "PSRLWrr", 0, 0|5|(1<<6)|(1<<8)|(209<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1852 = PSRLWrr
- { 1853, 7, 1, 0, "PSUBBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(248<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1853 = PSUBBrm
- { 1854, 3, 1, 0, "PSUBBrr", 0, 0|5|(1<<6)|(1<<8)|(248<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1854 = PSUBBrr
- { 1855, 7, 1, 0, "PSUBDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(250<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1855 = PSUBDrm
- { 1856, 3, 1, 0, "PSUBDrr", 0, 0|5|(1<<6)|(1<<8)|(250<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1856 = PSUBDrr
- { 1857, 7, 1, 0, "PSUBQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(251<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1857 = PSUBQrm
- { 1858, 3, 1, 0, "PSUBQrr", 0, 0|5|(1<<6)|(1<<8)|(251<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1858 = PSUBQrr
- { 1859, 7, 1, 0, "PSUBSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(232<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1859 = PSUBSBrm
- { 1860, 3, 1, 0, "PSUBSBrr", 0, 0|5|(1<<6)|(1<<8)|(232<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1860 = PSUBSBrr
- { 1861, 7, 1, 0, "PSUBSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(233<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1861 = PSUBSWrm
- { 1862, 3, 1, 0, "PSUBSWrr", 0, 0|5|(1<<6)|(1<<8)|(233<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1862 = PSUBSWrr
- { 1863, 7, 1, 0, "PSUBUSBrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(216<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1863 = PSUBUSBrm
- { 1864, 3, 1, 0, "PSUBUSBrr", 0, 0|5|(1<<6)|(1<<8)|(216<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1864 = PSUBUSBrr
- { 1865, 7, 1, 0, "PSUBUSWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(217<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1865 = PSUBUSWrm
- { 1866, 3, 1, 0, "PSUBUSWrr", 0, 0|5|(1<<6)|(1<<8)|(217<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1866 = PSUBUSWrr
- { 1867, 7, 1, 0, "PSUBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(249<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1867 = PSUBWrm
- { 1868, 3, 1, 0, "PSUBWrr", 0, 0|5|(1<<6)|(1<<8)|(249<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1868 = PSUBWrr
- { 1869, 6, 0, 0, "PTESTrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(13<<8)|(23<<24), NULL, ImplicitList1, Barriers1, OperandInfo74 }, // Inst #1869 = PTESTrm
- { 1870, 2, 0, 0, "PTESTrr", 0, 0|5|(1<<6)|(13<<8)|(23<<24), NULL, ImplicitList1, Barriers1, OperandInfo75 }, // Inst #1870 = PTESTrr
- { 1871, 7, 1, 0, "PUNPCKHBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(104<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1871 = PUNPCKHBWrm
- { 1872, 3, 1, 0, "PUNPCKHBWrr", 0, 0|5|(1<<6)|(1<<8)|(104<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1872 = PUNPCKHBWrr
- { 1873, 7, 1, 0, "PUNPCKHDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(106<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1873 = PUNPCKHDQrm
- { 1874, 3, 1, 0, "PUNPCKHDQrr", 0, 0|5|(1<<6)|(1<<8)|(106<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1874 = PUNPCKHDQrr
- { 1875, 7, 1, 0, "PUNPCKHQDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(109<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1875 = PUNPCKHQDQrm
- { 1876, 3, 1, 0, "PUNPCKHQDQrr", 0, 0|5|(1<<6)|(1<<8)|(109<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1876 = PUNPCKHQDQrr
- { 1877, 7, 1, 0, "PUNPCKHWDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(105<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1877 = PUNPCKHWDrm
- { 1878, 3, 1, 0, "PUNPCKHWDrr", 0, 0|5|(1<<6)|(1<<8)|(105<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1878 = PUNPCKHWDrr
- { 1879, 7, 1, 0, "PUNPCKLBWrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(96<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1879 = PUNPCKLBWrm
- { 1880, 3, 1, 0, "PUNPCKLBWrr", 0, 0|5|(1<<6)|(1<<8)|(96<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1880 = PUNPCKLBWrr
- { 1881, 7, 1, 0, "PUNPCKLDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(98<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1881 = PUNPCKLDQrm
- { 1882, 3, 1, 0, "PUNPCKLDQrr", 0, 0|5|(1<<6)|(1<<8)|(98<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1882 = PUNPCKLDQrr
- { 1883, 7, 1, 0, "PUNPCKLQDQrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(108<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1883 = PUNPCKLQDQrm
- { 1884, 3, 1, 0, "PUNPCKLQDQrr", 0, 0|5|(1<<6)|(1<<8)|(108<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1884 = PUNPCKLQDQrr
- { 1885, 7, 1, 0, "PUNPCKLWDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(97<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1885 = PUNPCKLWDrm
- { 1886, 3, 1, 0, "PUNPCKLWDrr", 0, 0|5|(1<<6)|(1<<8)|(97<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1886 = PUNPCKLWDrr
- { 1887, 1, 0, 0, "PUSH16r", 0|(1<<TID::MayStore), 0|2|(1<<6)|(80<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo93 }, // Inst #1887 = PUSH16r
- { 1888, 5, 0, 0, "PUSH16rmm", 0|(1<<TID::MayStore), 0|30|(1<<6)|(255<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo30 }, // Inst #1888 = PUSH16rmm
- { 1889, 1, 0, 0, "PUSH16rmr", 0|(1<<TID::MayStore), 0|22|(1<<6)|(255<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo93 }, // Inst #1889 = PUSH16rmr
- { 1890, 1, 0, 0, "PUSH32i16", 0|(1<<TID::MayStore), 0|1|(3<<13)|(104<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo5 }, // Inst #1890 = PUSH32i16
- { 1891, 1, 0, 0, "PUSH32i32", 0|(1<<TID::MayStore), 0|1|(4<<13)|(104<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo5 }, // Inst #1891 = PUSH32i32
- { 1892, 1, 0, 0, "PUSH32i8", 0|(1<<TID::MayStore), 0|1|(1<<13)|(106<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo5 }, // Inst #1892 = PUSH32i8
- { 1893, 1, 0, 0, "PUSH32r", 0|(1<<TID::MayStore), 0|2|(80<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo57 }, // Inst #1893 = PUSH32r
- { 1894, 5, 0, 0, "PUSH32rmm", 0|(1<<TID::MayStore), 0|30|(255<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo30 }, // Inst #1894 = PUSH32rmm
- { 1895, 1, 0, 0, "PUSH32rmr", 0|(1<<TID::MayStore), 0|22|(255<<24), ImplicitList2, ImplicitList2, NULL, OperandInfo57 }, // Inst #1895 = PUSH32rmr
- { 1896, 1, 0, 0, "PUSH64i16", 0|(1<<TID::MayStore), 0|1|(3<<13)|(104<<24), ImplicitList4, ImplicitList4, NULL, OperandInfo5 }, // Inst #1896 = PUSH64i16
- { 1897, 1, 0, 0, "PUSH64i32", 0|(1<<TID::MayStore), 0|1|(4<<13)|(104<<24), ImplicitList4, ImplicitList4, NULL, OperandInfo5 }, // Inst #1897 = PUSH64i32
- { 1898, 1, 0, 0, "PUSH64i8", 0|(1<<TID::MayStore), 0|1|(1<<13)|(106<<24), ImplicitList4, ImplicitList4, NULL, OperandInfo5 }, // Inst #1898 = PUSH64i8
- { 1899, 1, 0, 0, "PUSH64r", 0|(1<<TID::MayStore), 0|2|(80<<24), ImplicitList4, ImplicitList4, NULL, OperandInfo58 }, // Inst #1899 = PUSH64r
- { 1900, 5, 0, 0, "PUSH64rmm", 0|(1<<TID::MayStore), 0|30|(255<<24), ImplicitList4, ImplicitList4, NULL, OperandInfo30 }, // Inst #1900 = PUSH64rmm
- { 1901, 1, 0, 0, "PUSH64rmr", 0|(1<<TID::MayStore), 0|22|(255<<24), ImplicitList4, ImplicitList4, NULL, OperandInfo58 }, // Inst #1901 = PUSH64rmr
- { 1902, 0, 0, 0, "PUSHF", 0|(1<<TID::MayStore), 0|1|(1<<6)|(156<<24), ImplicitList3, ImplicitList2, NULL, 0 }, // Inst #1902 = PUSHF
- { 1903, 0, 0, 0, "PUSHFD", 0|(1<<TID::MayStore), 0|1|(156<<24), ImplicitList3, ImplicitList2, NULL, 0 }, // Inst #1903 = PUSHFD
- { 1904, 0, 0, 0, "PUSHFQ64", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|1|(156<<24), ImplicitList5, ImplicitList4, NULL, 0 }, // Inst #1904 = PUSHFQ64
- { 1905, 0, 0, 0, "PUSHFS16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(1<<8)|(160<<24), NULL, NULL, NULL, 0 }, // Inst #1905 = PUSHFS16
- { 1906, 0, 0, 0, "PUSHFS32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(160<<24), NULL, NULL, NULL, 0 }, // Inst #1906 = PUSHFS32
- { 1907, 0, 0, 0, "PUSHFS64", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(160<<24), NULL, NULL, NULL, 0 }, // Inst #1907 = PUSHFS64
- { 1908, 0, 0, 0, "PUSHGS16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(1<<8)|(168<<24), NULL, NULL, NULL, 0 }, // Inst #1908 = PUSHGS16
- { 1909, 0, 0, 0, "PUSHGS32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(168<<24), NULL, NULL, NULL, 0 }, // Inst #1909 = PUSHGS32
- { 1910, 0, 0, 0, "PUSHGS64", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(168<<24), NULL, NULL, NULL, 0 }, // Inst #1910 = PUSHGS64
- { 1911, 7, 1, 0, "PXORrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(239<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #1911 = PXORrm
- { 1912, 3, 1, 0, "PXORrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(239<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #1912 = PXORrr
- { 1913, 5, 0, 0, "RCL16m1", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1913 = RCL16m1
- { 1914, 5, 0, 0, "RCL16mCL", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1914 = RCL16mCL
- { 1915, 6, 0, 0, "RCL16mi", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1915 = RCL16mi
- { 1916, 2, 1, 0, "RCL16r1", 0|(1<<TID::UnmodeledSideEffects), 0|18|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #1916 = RCL16r1
- { 1917, 2, 1, 0, "RCL16rCL", 0|(1<<TID::UnmodeledSideEffects), 0|18|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #1917 = RCL16rCL
- { 1918, 3, 1, 0, "RCL16ri", 0|(1<<TID::UnmodeledSideEffects), 0|18|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1918 = RCL16ri
- { 1919, 5, 0, 0, "RCL32m1", 0|(1<<TID::UnmodeledSideEffects), 0|26|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1919 = RCL32m1
- { 1920, 5, 0, 0, "RCL32mCL", 0|(1<<TID::UnmodeledSideEffects), 0|26|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1920 = RCL32mCL
- { 1921, 6, 0, 0, "RCL32mi", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1921 = RCL32mi
- { 1922, 2, 1, 0, "RCL32r1", 0|(1<<TID::UnmodeledSideEffects), 0|18|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #1922 = RCL32r1
- { 1923, 2, 1, 0, "RCL32rCL", 0|(1<<TID::UnmodeledSideEffects), 0|18|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #1923 = RCL32rCL
- { 1924, 3, 1, 0, "RCL32ri", 0|(1<<TID::UnmodeledSideEffects), 0|18|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #1924 = RCL32ri
- { 1925, 5, 0, 0, "RCL64m1", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1925 = RCL64m1
- { 1926, 5, 0, 0, "RCL64mCL", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1926 = RCL64mCL
- { 1927, 6, 0, 0, "RCL64mi", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1927 = RCL64mi
- { 1928, 2, 1, 0, "RCL64r1", 0|(1<<TID::UnmodeledSideEffects), 0|18|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #1928 = RCL64r1
- { 1929, 2, 1, 0, "RCL64rCL", 0|(1<<TID::UnmodeledSideEffects), 0|18|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #1929 = RCL64rCL
- { 1930, 3, 1, 0, "RCL64ri", 0|(1<<TID::UnmodeledSideEffects), 0|18|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #1930 = RCL64ri
- { 1931, 5, 0, 0, "RCL8m1", 0|(1<<TID::UnmodeledSideEffects), 0|26|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1931 = RCL8m1
- { 1932, 5, 0, 0, "RCL8mCL", 0|(1<<TID::UnmodeledSideEffects), 0|26|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1932 = RCL8mCL
- { 1933, 6, 0, 0, "RCL8mi", 0|(1<<TID::UnmodeledSideEffects), 0|26|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1933 = RCL8mi
- { 1934, 2, 1, 0, "RCL8r1", 0|(1<<TID::UnmodeledSideEffects), 0|18|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #1934 = RCL8r1
- { 1935, 2, 1, 0, "RCL8rCL", 0|(1<<TID::UnmodeledSideEffects), 0|18|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #1935 = RCL8rCL
- { 1936, 3, 1, 0, "RCL8ri", 0|(1<<TID::UnmodeledSideEffects), 0|18|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #1936 = RCL8ri
- { 1937, 6, 1, 0, "RCPPSm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(83<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1937 = RCPPSm
- { 1938, 6, 1, 0, "RCPPSm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(83<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1938 = RCPPSm_Int
- { 1939, 2, 1, 0, "RCPPSr", 0, 0|5|(1<<8)|(83<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1939 = RCPPSr
- { 1940, 2, 1, 0, "RCPPSr_Int", 0, 0|5|(1<<8)|(83<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1940 = RCPPSr_Int
- { 1941, 6, 1, 0, "RCPSSm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(83<<24), NULL, NULL, NULL, OperandInfo80 }, // Inst #1941 = RCPSSm
- { 1942, 6, 1, 0, "RCPSSm_Int", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(83<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #1942 = RCPSSm_Int
- { 1943, 2, 1, 0, "RCPSSr", 0, 0|5|(12<<8)|(83<<24), NULL, NULL, NULL, OperandInfo106 }, // Inst #1943 = RCPSSr
- { 1944, 2, 1, 0, "RCPSSr_Int", 0, 0|5|(12<<8)|(83<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #1944 = RCPSSr_Int
- { 1945, 5, 0, 0, "RCR16m1", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1945 = RCR16m1
- { 1946, 5, 0, 0, "RCR16mCL", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1946 = RCR16mCL
- { 1947, 6, 0, 0, "RCR16mi", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1947 = RCR16mi
- { 1948, 2, 1, 0, "RCR16r1", 0|(1<<TID::UnmodeledSideEffects), 0|19|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #1948 = RCR16r1
- { 1949, 2, 1, 0, "RCR16rCL", 0|(1<<TID::UnmodeledSideEffects), 0|19|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #1949 = RCR16rCL
- { 1950, 3, 1, 0, "RCR16ri", 0|(1<<TID::UnmodeledSideEffects), 0|19|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1950 = RCR16ri
- { 1951, 5, 0, 0, "RCR32m1", 0|(1<<TID::UnmodeledSideEffects), 0|27|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1951 = RCR32m1
- { 1952, 5, 0, 0, "RCR32mCL", 0|(1<<TID::UnmodeledSideEffects), 0|27|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1952 = RCR32mCL
- { 1953, 6, 0, 0, "RCR32mi", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1953 = RCR32mi
- { 1954, 2, 1, 0, "RCR32r1", 0|(1<<TID::UnmodeledSideEffects), 0|19|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #1954 = RCR32r1
- { 1955, 2, 1, 0, "RCR32rCL", 0|(1<<TID::UnmodeledSideEffects), 0|19|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #1955 = RCR32rCL
- { 1956, 3, 1, 0, "RCR32ri", 0|(1<<TID::UnmodeledSideEffects), 0|19|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #1956 = RCR32ri
- { 1957, 5, 0, 0, "RCR64m1", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1957 = RCR64m1
- { 1958, 5, 0, 0, "RCR64mCL", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1958 = RCR64mCL
- { 1959, 6, 0, 0, "RCR64mi", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1959 = RCR64mi
- { 1960, 2, 1, 0, "RCR64r1", 0|(1<<TID::UnmodeledSideEffects), 0|19|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #1960 = RCR64r1
- { 1961, 2, 1, 0, "RCR64rCL", 0|(1<<TID::UnmodeledSideEffects), 0|19|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #1961 = RCR64rCL
- { 1962, 3, 1, 0, "RCR64ri", 0|(1<<TID::UnmodeledSideEffects), 0|19|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #1962 = RCR64ri
- { 1963, 5, 0, 0, "RCR8m1", 0|(1<<TID::UnmodeledSideEffects), 0|27|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1963 = RCR8m1
- { 1964, 5, 0, 0, "RCR8mCL", 0|(1<<TID::UnmodeledSideEffects), 0|27|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1964 = RCR8mCL
- { 1965, 6, 0, 0, "RCR8mi", 0|(1<<TID::UnmodeledSideEffects), 0|27|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1965 = RCR8mi
- { 1966, 2, 1, 0, "RCR8r1", 0|(1<<TID::UnmodeledSideEffects), 0|19|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #1966 = RCR8r1
- { 1967, 2, 1, 0, "RCR8rCL", 0|(1<<TID::UnmodeledSideEffects), 0|19|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #1967 = RCR8rCL
- { 1968, 3, 1, 0, "RCR8ri", 0|(1<<TID::UnmodeledSideEffects), 0|19|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #1968 = RCR8ri
- { 1969, 0, 0, 0, "RDMSR", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(50<<24), NULL, NULL, NULL, 0 }, // Inst #1969 = RDMSR
- { 1970, 0, 0, 0, "RDPMC", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(51<<24), NULL, NULL, NULL, 0 }, // Inst #1970 = RDPMC
- { 1971, 0, 0, 0, "RDTSC", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(49<<24), NULL, ImplicitList19, NULL, 0 }, // Inst #1971 = RDTSC
- { 1972, 0, 0, 0, "RDTSCP", 0|(1<<TID::UnmodeledSideEffects), 0|42|(1<<8)|(1<<24), NULL, ImplicitList45, NULL, 0 }, // Inst #1972 = RDTSCP
- { 1973, 0, 0, 0, "REPNE_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(242<<24), ImplicitList42, ImplicitList27, NULL, 0 }, // Inst #1973 = REPNE_PREFIX
- { 1974, 0, 0, 0, "REP_MOVSB", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|1|(2<<8)|(164<<24), ImplicitList46, ImplicitList46, NULL, 0 }, // Inst #1974 = REP_MOVSB
- { 1975, 0, 0, 0, "REP_MOVSD", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|1|(2<<8)|(165<<24), ImplicitList46, ImplicitList46, NULL, 0 }, // Inst #1975 = REP_MOVSD
- { 1976, 0, 0, 0, "REP_MOVSQ", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|1|(2<<8)|(1<<12)|(165<<24), ImplicitList47, ImplicitList47, NULL, 0 }, // Inst #1976 = REP_MOVSQ
- { 1977, 0, 0, 0, "REP_MOVSW", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|1|(1<<6)|(2<<8)|(165<<24), ImplicitList46, ImplicitList46, NULL, 0 }, // Inst #1977 = REP_MOVSW
- { 1978, 0, 0, 0, "REP_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(243<<24), ImplicitList42, ImplicitList27, NULL, 0 }, // Inst #1978 = REP_PREFIX
- { 1979, 0, 0, 0, "REP_STOSB", 0|(1<<TID::MayStore), 0|1|(2<<8)|(170<<24), ImplicitList48, ImplicitList49, NULL, 0 }, // Inst #1979 = REP_STOSB
- { 1980, 0, 0, 0, "REP_STOSD", 0|(1<<TID::MayStore), 0|1|(2<<8)|(171<<24), ImplicitList50, ImplicitList49, NULL, 0 }, // Inst #1980 = REP_STOSD
- { 1981, 0, 0, 0, "REP_STOSQ", 0|(1<<TID::MayStore), 0|1|(2<<8)|(1<<12)|(171<<24), ImplicitList51, ImplicitList52, NULL, 0 }, // Inst #1981 = REP_STOSQ
- { 1982, 0, 0, 0, "REP_STOSW", 0|(1<<TID::MayStore), 0|1|(1<<6)|(2<<8)|(171<<24), ImplicitList53, ImplicitList49, NULL, 0 }, // Inst #1982 = REP_STOSW
- { 1983, 0, 0, 0, "RET", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::Variadic), 0|1|(7<<16)|(195<<24), NULL, NULL, NULL, 0 }, // Inst #1983 = RET
- { 1984, 1, 0, 0, "RETI", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::Variadic), 0|1|(3<<13)|(7<<16)|(194<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #1984 = RETI
- { 1985, 5, 0, 0, "ROL16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1985 = ROL16m1
- { 1986, 5, 0, 0, "ROL16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1986 = ROL16mCL
- { 1987, 6, 0, 0, "ROL16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1987 = ROL16mi
- { 1988, 2, 1, 0, "ROL16r1", 0, 0|16|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #1988 = ROL16r1
- { 1989, 2, 1, 0, "ROL16rCL", 0, 0|16|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #1989 = ROL16rCL
- { 1990, 3, 1, 0, "ROL16ri", 0, 0|16|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #1990 = ROL16ri
- { 1991, 5, 0, 0, "ROL32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1991 = ROL32m1
- { 1992, 5, 0, 0, "ROL32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1992 = ROL32mCL
- { 1993, 6, 0, 0, "ROL32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1993 = ROL32mi
- { 1994, 2, 1, 0, "ROL32r1", 0, 0|16|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #1994 = ROL32r1
- { 1995, 2, 1, 0, "ROL32rCL", 0, 0|16|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #1995 = ROL32rCL
- { 1996, 3, 1, 0, "ROL32ri", 0, 0|16|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #1996 = ROL32ri
- { 1997, 5, 0, 0, "ROL64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1997 = ROL64m1
- { 1998, 5, 0, 0, "ROL64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #1998 = ROL64mCL
- { 1999, 6, 0, 0, "ROL64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #1999 = ROL64mi
- { 2000, 2, 1, 0, "ROL64r1", 0, 0|16|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2000 = ROL64r1
- { 2001, 2, 1, 0, "ROL64rCL", 0, 0|16|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2001 = ROL64rCL
- { 2002, 3, 1, 0, "ROL64ri", 0, 0|16|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2002 = ROL64ri
- { 2003, 5, 0, 0, "ROL8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2003 = ROL8m1
- { 2004, 5, 0, 0, "ROL8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2004 = ROL8mCL
- { 2005, 6, 0, 0, "ROL8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|24|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2005 = ROL8mi
- { 2006, 2, 1, 0, "ROL8r1", 0, 0|16|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2006 = ROL8r1
- { 2007, 2, 1, 0, "ROL8rCL", 0, 0|16|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2007 = ROL8rCL
- { 2008, 3, 1, 0, "ROL8ri", 0, 0|16|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2008 = ROL8ri
- { 2009, 5, 0, 0, "ROR16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2009 = ROR16m1
- { 2010, 5, 0, 0, "ROR16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2010 = ROR16mCL
- { 2011, 6, 0, 0, "ROR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2011 = ROR16mi
- { 2012, 2, 1, 0, "ROR16r1", 0, 0|17|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #2012 = ROR16r1
- { 2013, 2, 1, 0, "ROR16rCL", 0, 0|17|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #2013 = ROR16rCL
- { 2014, 3, 1, 0, "ROR16ri", 0, 0|17|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2014 = ROR16ri
- { 2015, 5, 0, 0, "ROR32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2015 = ROR32m1
- { 2016, 5, 0, 0, "ROR32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2016 = ROR32mCL
- { 2017, 6, 0, 0, "ROR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2017 = ROR32mi
- { 2018, 2, 1, 0, "ROR32r1", 0, 0|17|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #2018 = ROR32r1
- { 2019, 2, 1, 0, "ROR32rCL", 0, 0|17|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #2019 = ROR32rCL
- { 2020, 3, 1, 0, "ROR32ri", 0, 0|17|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2020 = ROR32ri
- { 2021, 5, 0, 0, "ROR64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2021 = ROR64m1
- { 2022, 5, 0, 0, "ROR64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2022 = ROR64mCL
- { 2023, 6, 0, 0, "ROR64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2023 = ROR64mi
- { 2024, 2, 1, 0, "ROR64r1", 0, 0|17|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2024 = ROR64r1
- { 2025, 2, 1, 0, "ROR64rCL", 0, 0|17|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2025 = ROR64rCL
- { 2026, 3, 1, 0, "ROR64ri", 0, 0|17|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2026 = ROR64ri
- { 2027, 5, 0, 0, "ROR8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2027 = ROR8m1
- { 2028, 5, 0, 0, "ROR8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2028 = ROR8mCL
- { 2029, 6, 0, 0, "ROR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|25|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2029 = ROR8mi
- { 2030, 2, 1, 0, "ROR8r1", 0, 0|17|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2030 = ROR8r1
- { 2031, 2, 1, 0, "ROR8rCL", 0, 0|17|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2031 = ROR8rCL
- { 2032, 3, 1, 0, "ROR8ri", 0, 0|17|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2032 = ROR8ri
- { 2033, 7, 1, 0, "ROUNDPDm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(9<<24), NULL, NULL, NULL, OperandInfo183 }, // Inst #2033 = ROUNDPDm_Int
- { 2034, 3, 1, 0, "ROUNDPDr_Int", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(9<<24), NULL, NULL, NULL, OperandInfo184 }, // Inst #2034 = ROUNDPDr_Int
- { 2035, 7, 1, 0, "ROUNDPSm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(8<<24), NULL, NULL, NULL, OperandInfo183 }, // Inst #2035 = ROUNDPSm_Int
- { 2036, 3, 1, 0, "ROUNDPSr_Int", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(8<<24), NULL, NULL, NULL, OperandInfo184 }, // Inst #2036 = ROUNDPSr_Int
- { 2037, 8, 1, 0, "ROUNDSDm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(11<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #2037 = ROUNDSDm_Int
- { 2038, 4, 1, 0, "ROUNDSDr_Int", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(11<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #2038 = ROUNDSDr_Int
- { 2039, 8, 1, 0, "ROUNDSSm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(14<<8)|(1<<13)|(10<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #2039 = ROUNDSSm_Int
- { 2040, 4, 1, 0, "ROUNDSSr_Int", 0, 0|5|(1<<6)|(14<<8)|(1<<13)|(10<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #2040 = ROUNDSSr_Int
- { 2041, 0, 0, 0, "RSM", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(170<<24), NULL, NULL, NULL, 0 }, // Inst #2041 = RSM
- { 2042, 6, 1, 0, "RSQRTPSm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(82<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #2042 = RSQRTPSm
- { 2043, 6, 1, 0, "RSQRTPSm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(82<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #2043 = RSQRTPSm_Int
- { 2044, 2, 1, 0, "RSQRTPSr", 0, 0|5|(1<<8)|(82<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #2044 = RSQRTPSr
- { 2045, 2, 1, 0, "RSQRTPSr_Int", 0, 0|5|(1<<8)|(82<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #2045 = RSQRTPSr_Int
- { 2046, 6, 1, 0, "RSQRTSSm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(82<<24), NULL, NULL, NULL, OperandInfo80 }, // Inst #2046 = RSQRTSSm
- { 2047, 6, 1, 0, "RSQRTSSm_Int", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(82<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #2047 = RSQRTSSm_Int
- { 2048, 2, 1, 0, "RSQRTSSr", 0, 0|5|(12<<8)|(82<<24), NULL, NULL, NULL, OperandInfo106 }, // Inst #2048 = RSQRTSSr
- { 2049, 2, 1, 0, "RSQRTSSr_Int", 0, 0|5|(12<<8)|(82<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #2049 = RSQRTSSr_Int
- { 2050, 0, 0, 0, "SAHF", 0, 0|1|(158<<24), ImplicitList28, ImplicitList1, Barriers1, 0 }, // Inst #2050 = SAHF
- { 2051, 5, 0, 0, "SAR16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2051 = SAR16m1
- { 2052, 5, 0, 0, "SAR16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2052 = SAR16mCL
- { 2053, 6, 0, 0, "SAR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2053 = SAR16mi
- { 2054, 2, 1, 0, "SAR16r1", 0, 0|23|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #2054 = SAR16r1
- { 2055, 2, 1, 0, "SAR16rCL", 0, 0|23|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #2055 = SAR16rCL
- { 2056, 3, 1, 0, "SAR16ri", 0, 0|23|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2056 = SAR16ri
- { 2057, 5, 0, 0, "SAR32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2057 = SAR32m1
- { 2058, 5, 0, 0, "SAR32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2058 = SAR32mCL
- { 2059, 6, 0, 0, "SAR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2059 = SAR32mi
- { 2060, 2, 1, 0, "SAR32r1", 0, 0|23|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #2060 = SAR32r1
- { 2061, 2, 1, 0, "SAR32rCL", 0, 0|23|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #2061 = SAR32rCL
- { 2062, 3, 1, 0, "SAR32ri", 0, 0|23|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2062 = SAR32ri
- { 2063, 5, 0, 0, "SAR64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2063 = SAR64m1
- { 2064, 5, 0, 0, "SAR64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2064 = SAR64mCL
- { 2065, 6, 0, 0, "SAR64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2065 = SAR64mi
- { 2066, 2, 1, 0, "SAR64r1", 0, 0|23|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2066 = SAR64r1
- { 2067, 2, 1, 0, "SAR64rCL", 0, 0|23|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2067 = SAR64rCL
- { 2068, 3, 1, 0, "SAR64ri", 0, 0|23|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2068 = SAR64ri
- { 2069, 5, 0, 0, "SAR8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2069 = SAR8m1
- { 2070, 5, 0, 0, "SAR8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2070 = SAR8mCL
- { 2071, 6, 0, 0, "SAR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|31|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2071 = SAR8mi
- { 2072, 2, 1, 0, "SAR8r1", 0, 0|23|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2072 = SAR8r1
- { 2073, 2, 1, 0, "SAR8rCL", 0, 0|23|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2073 = SAR8rCL
- { 2074, 3, 1, 0, "SAR8ri", 0, 0|23|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2074 = SAR8ri
- { 2075, 1, 0, 0, "SBB16i16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(29<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2075 = SBB16i16
- { 2076, 6, 0, 0, "SBB16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(1<<6)|(3<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2076 = SBB16mi
- { 2077, 6, 0, 0, "SBB16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(1<<6)|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2077 = SBB16mi8
- { 2078, 6, 0, 0, "SBB16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(25<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #2078 = SBB16mr
- { 2079, 3, 1, 0, "SBB16ri", 0, 0|19|(1<<6)|(3<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2079 = SBB16ri
- { 2080, 3, 1, 0, "SBB16ri8", 0, 0|19|(1<<6)|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2080 = SBB16ri8
- { 2081, 7, 1, 0, "SBB16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(27<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #2081 = SBB16rm
- { 2082, 3, 1, 0, "SBB16rr", 0, 0|3|(1<<6)|(25<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2082 = SBB16rr
- { 2083, 3, 1, 0, "SBB16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(27<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2083 = SBB16rr_REV
- { 2084, 1, 0, 0, "SBB32i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(29<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2084 = SBB32i32
- { 2085, 6, 0, 0, "SBB32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(4<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2085 = SBB32mi
- { 2086, 6, 0, 0, "SBB32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2086 = SBB32mi8
- { 2087, 6, 0, 0, "SBB32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(25<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #2087 = SBB32mr
- { 2088, 3, 1, 0, "SBB32ri", 0, 0|19|(4<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2088 = SBB32ri
- { 2089, 3, 1, 0, "SBB32ri8", 0, 0|19|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2089 = SBB32ri8
- { 2090, 7, 1, 0, "SBB32rm", 0|(1<<TID::MayLoad), 0|6|(27<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #2090 = SBB32rm
- { 2091, 3, 1, 0, "SBB32rr", 0, 0|3|(25<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2091 = SBB32rr
- { 2092, 3, 1, 0, "SBB32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(27<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2092 = SBB32rr_REV
- { 2093, 1, 0, 0, "SBB64i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(29<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2093 = SBB64i32
- { 2094, 6, 0, 0, "SBB64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(1<<12)|(4<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2094 = SBB64mi32
- { 2095, 6, 0, 0, "SBB64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(1<<12)|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2095 = SBB64mi8
- { 2096, 6, 0, 0, "SBB64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<12)|(25<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #2096 = SBB64mr
- { 2097, 3, 1, 0, "SBB64ri32", 0, 0|19|(1<<12)|(4<<13)|(129<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2097 = SBB64ri32
- { 2098, 3, 1, 0, "SBB64ri8", 0, 0|19|(1<<12)|(1<<13)|(131<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2098 = SBB64ri8
- { 2099, 7, 1, 0, "SBB64rm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(27<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #2099 = SBB64rm
- { 2100, 3, 1, 0, "SBB64rr", 0, 0|3|(1<<12)|(25<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2100 = SBB64rr
- { 2101, 3, 1, 0, "SBB64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(27<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2101 = SBB64rr_REV
- { 2102, 1, 0, 0, "SBB8i8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(28<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2102 = SBB8i8
- { 2103, 6, 0, 0, "SBB8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|27|(1<<13)|(128<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2103 = SBB8mi
- { 2104, 6, 0, 0, "SBB8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(24<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2104 = SBB8mr
- { 2105, 3, 1, 0, "SBB8ri", 0, 0|19|(1<<13)|(128<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2105 = SBB8ri
- { 2106, 7, 1, 0, "SBB8rm", 0|(1<<TID::MayLoad), 0|6|(26<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #2106 = SBB8rm
- { 2107, 3, 1, 0, "SBB8rr", 0, 0|3|(24<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #2107 = SBB8rr
- { 2108, 3, 1, 0, "SBB8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(26<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #2108 = SBB8rr_REV
- { 2109, 0, 0, 0, "SCAS16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(175<<24), NULL, NULL, NULL, 0 }, // Inst #2109 = SCAS16
- { 2110, 0, 0, 0, "SCAS32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(175<<24), NULL, NULL, NULL, 0 }, // Inst #2110 = SCAS32
- { 2111, 0, 0, 0, "SCAS64", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(175<<24), NULL, NULL, NULL, 0 }, // Inst #2111 = SCAS64
- { 2112, 0, 0, 0, "SCAS8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(174<<24), NULL, NULL, NULL, 0 }, // Inst #2112 = SCAS8
- { 2113, 5, 0, 0, "SETAEm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(147<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2113 = SETAEm
- { 2114, 1, 1, 0, "SETAEr", 0, 0|16|(1<<8)|(147<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2114 = SETAEr
- { 2115, 5, 0, 0, "SETAm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(151<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2115 = SETAm
- { 2116, 1, 1, 0, "SETAr", 0, 0|16|(1<<8)|(151<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2116 = SETAr
- { 2117, 5, 0, 0, "SETBEm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(150<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2117 = SETBEm
- { 2118, 1, 1, 0, "SETBEr", 0, 0|16|(1<<8)|(150<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2118 = SETBEr
- { 2119, 1, 1, 0, "SETB_C16r", 0, 0|32|(1<<6)|(25<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo93 }, // Inst #2119 = SETB_C16r
- { 2120, 1, 1, 0, "SETB_C32r", 0, 0|32|(25<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo57 }, // Inst #2120 = SETB_C32r
- { 2121, 1, 1, 0, "SETB_C64r", 0, 0|32|(1<<12)|(25<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #2121 = SETB_C64r
- { 2122, 1, 1, 0, "SETB_C8r", 0, 0|32|(24<<24), ImplicitList1, ImplicitList1, Barriers1, OperandInfo94 }, // Inst #2122 = SETB_C8r
- { 2123, 5, 0, 0, "SETBm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(146<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2123 = SETBm
- { 2124, 1, 1, 0, "SETBr", 0, 0|16|(1<<8)|(146<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2124 = SETBr
- { 2125, 5, 0, 0, "SETEm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(148<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2125 = SETEm
- { 2126, 1, 1, 0, "SETEr", 0, 0|16|(1<<8)|(148<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2126 = SETEr
- { 2127, 5, 0, 0, "SETGEm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(157<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2127 = SETGEm
- { 2128, 1, 1, 0, "SETGEr", 0, 0|16|(1<<8)|(157<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2128 = SETGEr
- { 2129, 5, 0, 0, "SETGm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(159<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2129 = SETGm
- { 2130, 1, 1, 0, "SETGr", 0, 0|16|(1<<8)|(159<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2130 = SETGr
- { 2131, 5, 0, 0, "SETLEm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(158<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2131 = SETLEm
- { 2132, 1, 1, 0, "SETLEr", 0, 0|16|(1<<8)|(158<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2132 = SETLEr
- { 2133, 5, 0, 0, "SETLm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(156<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2133 = SETLm
- { 2134, 1, 1, 0, "SETLr", 0, 0|16|(1<<8)|(156<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2134 = SETLr
- { 2135, 5, 0, 0, "SETNEm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(149<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2135 = SETNEm
- { 2136, 1, 1, 0, "SETNEr", 0, 0|16|(1<<8)|(149<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2136 = SETNEr
- { 2137, 5, 0, 0, "SETNOm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(145<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2137 = SETNOm
- { 2138, 1, 1, 0, "SETNOr", 0, 0|16|(1<<8)|(145<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2138 = SETNOr
- { 2139, 5, 0, 0, "SETNPm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(155<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2139 = SETNPm
- { 2140, 1, 1, 0, "SETNPr", 0, 0|16|(1<<8)|(155<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2140 = SETNPr
- { 2141, 5, 0, 0, "SETNSm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(153<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2141 = SETNSm
- { 2142, 1, 1, 0, "SETNSr", 0, 0|16|(1<<8)|(153<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2142 = SETNSr
- { 2143, 5, 0, 0, "SETOm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(144<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2143 = SETOm
- { 2144, 1, 1, 0, "SETOr", 0, 0|16|(1<<8)|(144<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2144 = SETOr
- { 2145, 5, 0, 0, "SETPm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(154<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2145 = SETPm
- { 2146, 1, 1, 0, "SETPr", 0, 0|16|(1<<8)|(154<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2146 = SETPr
- { 2147, 5, 0, 0, "SETSm", 0|(1<<TID::MayStore), 0|24|(1<<8)|(152<<24), ImplicitList1, NULL, NULL, OperandInfo30 }, // Inst #2147 = SETSm
- { 2148, 1, 1, 0, "SETSr", 0, 0|16|(1<<8)|(152<<24), ImplicitList1, NULL, NULL, OperandInfo94 }, // Inst #2148 = SETSr
- { 2149, 0, 0, 0, "SFENCE", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|23|(1<<8)|(174<<24), NULL, NULL, NULL, 0 }, // Inst #2149 = SFENCE
- { 2150, 5, 1, 0, "SGDTm", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2150 = SGDTm
- { 2151, 5, 0, 0, "SHL16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2151 = SHL16m1
- { 2152, 5, 0, 0, "SHL16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2152 = SHL16mCL
- { 2153, 6, 0, 0, "SHL16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2153 = SHL16mi
- { 2154, 2, 1, 0, "SHL16r1", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::UnmodeledSideEffects), 0|20|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #2154 = SHL16r1
- { 2155, 2, 1, 0, "SHL16rCL", 0, 0|20|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #2155 = SHL16rCL
- { 2156, 3, 1, 0, "SHL16ri", 0|(1<<TID::ConvertibleTo3Addr), 0|20|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2156 = SHL16ri
- { 2157, 5, 0, 0, "SHL32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2157 = SHL32m1
- { 2158, 5, 0, 0, "SHL32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2158 = SHL32mCL
- { 2159, 6, 0, 0, "SHL32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2159 = SHL32mi
- { 2160, 2, 1, 0, "SHL32r1", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::UnmodeledSideEffects), 0|20|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #2160 = SHL32r1
- { 2161, 2, 1, 0, "SHL32rCL", 0, 0|20|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #2161 = SHL32rCL
- { 2162, 3, 1, 0, "SHL32ri", 0|(1<<TID::ConvertibleTo3Addr), 0|20|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2162 = SHL32ri
- { 2163, 5, 0, 0, "SHL64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2163 = SHL64m1
- { 2164, 5, 0, 0, "SHL64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2164 = SHL64mCL
- { 2165, 6, 0, 0, "SHL64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2165 = SHL64mi
- { 2166, 2, 1, 0, "SHL64r1", 0|(1<<TID::UnmodeledSideEffects), 0|20|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2166 = SHL64r1
- { 2167, 2, 1, 0, "SHL64rCL", 0, 0|20|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2167 = SHL64rCL
- { 2168, 3, 1, 0, "SHL64ri", 0|(1<<TID::ConvertibleTo3Addr), 0|20|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2168 = SHL64ri
- { 2169, 5, 0, 0, "SHL8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2169 = SHL8m1
- { 2170, 5, 0, 0, "SHL8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2170 = SHL8mCL
- { 2171, 6, 0, 0, "SHL8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|28|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2171 = SHL8mi
- { 2172, 2, 1, 0, "SHL8r1", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::UnmodeledSideEffects), 0|20|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2172 = SHL8r1
- { 2173, 2, 1, 0, "SHL8rCL", 0, 0|20|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2173 = SHL8rCL
- { 2174, 3, 1, 0, "SHL8ri", 0, 0|20|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2174 = SHL8ri
- { 2175, 6, 0, 0, "SHLD16mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(165<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #2175 = SHLD16mrCL
- { 2176, 7, 0, 0, "SHLD16mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(1<<13)|(164<<24), NULL, ImplicitList1, Barriers1, OperandInfo190 }, // Inst #2176 = SHLD16mri8
- { 2177, 3, 1, 0, "SHLD16rrCL", 0, 0|3|(1<<6)|(1<<8)|(165<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2177 = SHLD16rrCL
- { 2178, 4, 1, 0, "SHLD16rri8", 0|(1<<TID::Commutable), 0|3|(1<<6)|(1<<8)|(1<<13)|(164<<24), NULL, ImplicitList1, Barriers1, OperandInfo191 }, // Inst #2178 = SHLD16rri8
- { 2179, 6, 0, 0, "SHLD32mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(165<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #2179 = SHLD32mrCL
- { 2180, 7, 0, 0, "SHLD32mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(1<<13)|(164<<24), NULL, ImplicitList1, Barriers1, OperandInfo192 }, // Inst #2180 = SHLD32mri8
- { 2181, 3, 1, 0, "SHLD32rrCL", 0, 0|3|(1<<8)|(165<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2181 = SHLD32rrCL
- { 2182, 4, 1, 0, "SHLD32rri8", 0|(1<<TID::Commutable), 0|3|(1<<8)|(1<<13)|(164<<24), NULL, ImplicitList1, Barriers1, OperandInfo193 }, // Inst #2182 = SHLD32rri8
- { 2183, 6, 0, 0, "SHLD64mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(1<<12)|(165<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #2183 = SHLD64mrCL
- { 2184, 7, 0, 0, "SHLD64mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(1<<12)|(1<<13)|(164<<24), NULL, ImplicitList1, Barriers1, OperandInfo194 }, // Inst #2184 = SHLD64mri8
- { 2185, 3, 1, 0, "SHLD64rrCL", 0, 0|3|(1<<8)|(1<<12)|(165<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2185 = SHLD64rrCL
- { 2186, 4, 1, 0, "SHLD64rri8", 0|(1<<TID::Commutable), 0|3|(1<<8)|(1<<12)|(1<<13)|(164<<24), NULL, ImplicitList1, Barriers1, OperandInfo195 }, // Inst #2186 = SHLD64rri8
- { 2187, 5, 0, 0, "SHR16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2187 = SHR16m1
- { 2188, 5, 0, 0, "SHR16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2188 = SHR16mCL
- { 2189, 6, 0, 0, "SHR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2189 = SHR16mi
- { 2190, 2, 1, 0, "SHR16r1", 0, 0|21|(1<<6)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #2190 = SHR16r1
- { 2191, 2, 1, 0, "SHR16rCL", 0, 0|21|(1<<6)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo91 }, // Inst #2191 = SHR16rCL
- { 2192, 3, 1, 0, "SHR16ri", 0, 0|21|(1<<6)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2192 = SHR16ri
- { 2193, 5, 0, 0, "SHR32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2193 = SHR32m1
- { 2194, 5, 0, 0, "SHR32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2194 = SHR32mCL
- { 2195, 6, 0, 0, "SHR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2195 = SHR32mi
- { 2196, 2, 1, 0, "SHR32r1", 0, 0|21|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #2196 = SHR32r1
- { 2197, 2, 1, 0, "SHR32rCL", 0, 0|21|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo52 }, // Inst #2197 = SHR32rCL
- { 2198, 3, 1, 0, "SHR32ri", 0, 0|21|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2198 = SHR32ri
- { 2199, 5, 0, 0, "SHR64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2199 = SHR64m1
- { 2200, 5, 0, 0, "SHR64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2200 = SHR64mCL
- { 2201, 6, 0, 0, "SHR64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2201 = SHR64mi
- { 2202, 2, 1, 0, "SHR64r1", 0, 0|21|(1<<12)|(209<<24), NULL, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2202 = SHR64r1
- { 2203, 2, 1, 0, "SHR64rCL", 0, 0|21|(1<<12)|(211<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo53 }, // Inst #2203 = SHR64rCL
- { 2204, 3, 1, 0, "SHR64ri", 0, 0|21|(1<<12)|(1<<13)|(193<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2204 = SHR64ri
- { 2205, 5, 0, 0, "SHR8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2205 = SHR8m1
- { 2206, 5, 0, 0, "SHR8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo30 }, // Inst #2206 = SHR8mCL
- { 2207, 6, 0, 0, "SHR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2207 = SHR8mi
- { 2208, 2, 1, 0, "SHR8r1", 0, 0|21|(208<<24), NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2208 = SHR8r1
- { 2209, 2, 1, 0, "SHR8rCL", 0, 0|21|(210<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2209 = SHR8rCL
- { 2210, 3, 1, 0, "SHR8ri", 0, 0|21|(1<<13)|(192<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2210 = SHR8ri
- { 2211, 6, 0, 0, "SHRD16mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(173<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #2211 = SHRD16mrCL
- { 2212, 7, 0, 0, "SHRD16mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(1<<8)|(1<<13)|(172<<24), NULL, ImplicitList1, Barriers1, OperandInfo190 }, // Inst #2212 = SHRD16mri8
- { 2213, 3, 1, 0, "SHRD16rrCL", 0, 0|3|(1<<6)|(1<<8)|(173<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2213 = SHRD16rrCL
- { 2214, 4, 1, 0, "SHRD16rri8", 0|(1<<TID::Commutable), 0|3|(1<<6)|(1<<8)|(1<<13)|(172<<24), NULL, ImplicitList1, Barriers1, OperandInfo191 }, // Inst #2214 = SHRD16rri8
- { 2215, 6, 0, 0, "SHRD32mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(173<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #2215 = SHRD32mrCL
- { 2216, 7, 0, 0, "SHRD32mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(1<<13)|(172<<24), NULL, ImplicitList1, Barriers1, OperandInfo192 }, // Inst #2216 = SHRD32mri8
- { 2217, 3, 1, 0, "SHRD32rrCL", 0, 0|3|(1<<8)|(173<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2217 = SHRD32rrCL
- { 2218, 4, 1, 0, "SHRD32rri8", 0|(1<<TID::Commutable), 0|3|(1<<8)|(1<<13)|(172<<24), NULL, ImplicitList1, Barriers1, OperandInfo193 }, // Inst #2218 = SHRD32rri8
- { 2219, 6, 0, 0, "SHRD64mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(1<<12)|(173<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #2219 = SHRD64mrCL
- { 2220, 7, 0, 0, "SHRD64mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<8)|(1<<12)|(1<<13)|(172<<24), NULL, ImplicitList1, Barriers1, OperandInfo194 }, // Inst #2220 = SHRD64mri8
- { 2221, 3, 1, 0, "SHRD64rrCL", 0, 0|3|(1<<8)|(1<<12)|(173<<24), ImplicitList44, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2221 = SHRD64rrCL
- { 2222, 4, 1, 0, "SHRD64rri8", 0|(1<<TID::Commutable), 0|3|(1<<8)|(1<<12)|(1<<13)|(172<<24), NULL, ImplicitList1, Barriers1, OperandInfo195 }, // Inst #2222 = SHRD64rri8
- { 2223, 8, 1, 0, "SHUFPDrmi", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(1<<13)|(198<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #2223 = SHUFPDrmi
- { 2224, 4, 1, 0, "SHUFPDrri", 0, 0|5|(1<<6)|(1<<8)|(1<<13)|(198<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #2224 = SHUFPDrri
- { 2225, 8, 1, 0, "SHUFPSrmi", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(1<<13)|(198<<24), NULL, NULL, NULL, OperandInfo44 }, // Inst #2225 = SHUFPSrmi
- { 2226, 4, 1, 0, "SHUFPSrri", 0|(1<<TID::ConvertibleTo3Addr), 0|5|(1<<8)|(1<<13)|(198<<24), NULL, NULL, NULL, OperandInfo45 }, // Inst #2226 = SHUFPSrri
- { 2227, 5, 1, 0, "SIDTm", 0|(1<<TID::UnmodeledSideEffects), 0|25|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2227 = SIDTm
- { 2228, 0, 0, 0, "SIN_F", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(254<<24), NULL, NULL, NULL, 0 }, // Inst #2228 = SIN_F
- { 2229, 2, 1, 0, "SIN_Fp32", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo2 }, // Inst #2229 = SIN_Fp32
- { 2230, 2, 1, 0, "SIN_Fp64", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo3 }, // Inst #2230 = SIN_Fp64
- { 2231, 2, 1, 0, "SIN_Fp80", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo4 }, // Inst #2231 = SIN_Fp80
- { 2232, 5, 1, 0, "SLDT16m", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<8), NULL, NULL, NULL, OperandInfo30 }, // Inst #2232 = SLDT16m
- { 2233, 1, 1, 0, "SLDT16r", 0|(1<<TID::UnmodeledSideEffects), 0|16|(1<<8), NULL, NULL, NULL, OperandInfo93 }, // Inst #2233 = SLDT16r
- { 2234, 5, 1, 0, "SLDT64m", 0|(1<<TID::UnmodeledSideEffects), 0|24|(1<<8)|(1<<12), NULL, NULL, NULL, OperandInfo30 }, // Inst #2234 = SLDT64m
- { 2235, 1, 1, 0, "SLDT64r", 0|(1<<TID::UnmodeledSideEffects), 0|16|(1<<8)|(1<<12), NULL, NULL, NULL, OperandInfo58 }, // Inst #2235 = SLDT64r
- { 2236, 5, 1, 0, "SMSW16m", 0|(1<<TID::UnmodeledSideEffects), 0|28|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2236 = SMSW16m
- { 2237, 1, 1, 0, "SMSW16r", 0|(1<<TID::UnmodeledSideEffects), 0|20|(1<<6)|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo93 }, // Inst #2237 = SMSW16r
- { 2238, 1, 1, 0, "SMSW32r", 0|(1<<TID::UnmodeledSideEffects), 0|20|(1<<8)|(1<<24), NULL, NULL, NULL, OperandInfo57 }, // Inst #2238 = SMSW32r
- { 2239, 1, 1, 0, "SMSW64r", 0|(1<<TID::UnmodeledSideEffects), 0|20|(1<<8)|(1<<12)|(1<<24), NULL, NULL, NULL, OperandInfo58 }, // Inst #2239 = SMSW64r
- { 2240, 6, 1, 0, "SQRTPDm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(81<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #2240 = SQRTPDm
- { 2241, 6, 1, 0, "SQRTPDm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(81<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #2241 = SQRTPDm_Int
- { 2242, 2, 1, 0, "SQRTPDr", 0, 0|5|(1<<6)|(1<<8)|(81<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #2242 = SQRTPDr
- { 2243, 2, 1, 0, "SQRTPDr_Int", 0, 0|5|(1<<6)|(1<<8)|(81<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #2243 = SQRTPDr_Int
- { 2244, 6, 1, 0, "SQRTPSm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(81<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #2244 = SQRTPSm
- { 2245, 6, 1, 0, "SQRTPSm_Int", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(81<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #2245 = SQRTPSm_Int
- { 2246, 2, 1, 0, "SQRTPSr", 0, 0|5|(1<<8)|(81<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #2246 = SQRTPSr
- { 2247, 2, 1, 0, "SQRTPSr_Int", 0, 0|5|(1<<8)|(81<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #2247 = SQRTPSr_Int
- { 2248, 6, 1, 0, "SQRTSDm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(81<<24), NULL, NULL, NULL, OperandInfo82 }, // Inst #2248 = SQRTSDm
- { 2249, 6, 1, 0, "SQRTSDm_Int", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(81<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #2249 = SQRTSDm_Int
- { 2250, 2, 1, 0, "SQRTSDr", 0, 0|5|(11<<8)|(81<<24), NULL, NULL, NULL, OperandInfo105 }, // Inst #2250 = SQRTSDr
- { 2251, 2, 1, 0, "SQRTSDr_Int", 0, 0|5|(11<<8)|(81<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #2251 = SQRTSDr_Int
- { 2252, 6, 1, 0, "SQRTSSm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(81<<24), NULL, NULL, NULL, OperandInfo80 }, // Inst #2252 = SQRTSSm
- { 2253, 6, 1, 0, "SQRTSSm_Int", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(81<<24), NULL, NULL, NULL, OperandInfo74 }, // Inst #2253 = SQRTSSm_Int
- { 2254, 2, 1, 0, "SQRTSSr", 0, 0|5|(12<<8)|(81<<24), NULL, NULL, NULL, OperandInfo106 }, // Inst #2254 = SQRTSSr
- { 2255, 2, 1, 0, "SQRTSSr_Int", 0, 0|5|(12<<8)|(81<<24), NULL, NULL, NULL, OperandInfo75 }, // Inst #2255 = SQRTSSr_Int
- { 2256, 0, 0, 0, "SQRT_F", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(250<<24), NULL, NULL, NULL, 0 }, // Inst #2256 = SQRT_F
- { 2257, 2, 1, 0, "SQRT_Fp32", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo2 }, // Inst #2257 = SQRT_Fp32
- { 2258, 2, 1, 0, "SQRT_Fp64", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo3 }, // Inst #2258 = SQRT_Fp64
- { 2259, 2, 1, 0, "SQRT_Fp80", 0, 0|(3<<16), NULL, NULL, NULL, OperandInfo4 }, // Inst #2259 = SQRT_Fp80
- { 2260, 0, 0, 0, "SS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0|1|(54<<24), NULL, NULL, NULL, 0 }, // Inst #2260 = SS_PREFIX
- { 2261, 0, 0, 0, "STC", 0|(1<<TID::UnmodeledSideEffects), 0|1|(249<<24), NULL, NULL, NULL, 0 }, // Inst #2261 = STC
- { 2262, 0, 0, 0, "STD", 0|(1<<TID::UnmodeledSideEffects), 0|1|(253<<24), NULL, NULL, NULL, 0 }, // Inst #2262 = STD
- { 2263, 0, 0, 0, "STI", 0|(1<<TID::UnmodeledSideEffects), 0|1|(251<<24), NULL, NULL, NULL, 0 }, // Inst #2263 = STI
- { 2264, 5, 0, 0, "STMXCSR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|27|(1<<8)|(174<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2264 = STMXCSR
- { 2265, 0, 0, 0, "STOSB", 0|(1<<TID::UnmodeledSideEffects), 0|1|(170<<24), ImplicitList54, ImplicitList35, NULL, 0 }, // Inst #2265 = STOSB
- { 2266, 0, 0, 0, "STOSD", 0|(1<<TID::UnmodeledSideEffects), 0|1|(171<<24), ImplicitList55, ImplicitList35, NULL, 0 }, // Inst #2266 = STOSD
- { 2267, 0, 0, 0, "STOSW", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(171<<24), ImplicitList56, ImplicitList35, NULL, 0 }, // Inst #2267 = STOSW
- { 2268, 5, 1, 0, "STRm", 0|(1<<TID::UnmodeledSideEffects), 0|25|(1<<8), NULL, NULL, NULL, OperandInfo30 }, // Inst #2268 = STRm
- { 2269, 1, 1, 0, "STRr", 0|(1<<TID::UnmodeledSideEffects), 0|17|(1<<8), NULL, NULL, NULL, OperandInfo93 }, // Inst #2269 = STRr
- { 2270, 5, 0, 0, "ST_F32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|26|(217<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2270 = ST_F32m
- { 2271, 5, 0, 0, "ST_F64m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|26|(221<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2271 = ST_F64m
- { 2272, 5, 0, 0, "ST_FP32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|27|(217<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2272 = ST_FP32m
- { 2273, 5, 0, 0, "ST_FP64m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|27|(221<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2273 = ST_FP64m
- { 2274, 5, 0, 0, "ST_FP80m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0|31|(219<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2274 = ST_FP80m
- { 2275, 1, 0, 0, "ST_FPrr", 0|(1<<TID::UnmodeledSideEffects), 0|2|(8<<8)|(216<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #2275 = ST_FPrr
- { 2276, 6, 0, 0, "ST_Fp32m", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo97 }, // Inst #2276 = ST_Fp32m
- { 2277, 6, 0, 0, "ST_Fp64m", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #2277 = ST_Fp64m
- { 2278, 6, 0, 0, "ST_Fp64m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #2278 = ST_Fp64m32
- { 2279, 6, 0, 0, "ST_Fp80m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #2279 = ST_Fp80m32
- { 2280, 6, 0, 0, "ST_Fp80m64", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #2280 = ST_Fp80m64
- { 2281, 6, 0, 0, "ST_FpP32m", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo97 }, // Inst #2281 = ST_FpP32m
- { 2282, 6, 0, 0, "ST_FpP64m", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #2282 = ST_FpP64m
- { 2283, 6, 0, 0, "ST_FpP64m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo98 }, // Inst #2283 = ST_FpP64m32
- { 2284, 6, 0, 0, "ST_FpP80m", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #2284 = ST_FpP80m
- { 2285, 6, 0, 0, "ST_FpP80m32", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #2285 = ST_FpP80m32
- { 2286, 6, 0, 0, "ST_FpP80m64", 0|(1<<TID::MayStore), 0|(2<<16), NULL, NULL, NULL, OperandInfo99 }, // Inst #2286 = ST_FpP80m64
- { 2287, 1, 0, 0, "ST_Frr", 0|(1<<TID::UnmodeledSideEffects), 0|2|(8<<8)|(208<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #2287 = ST_Frr
- { 2288, 1, 0, 0, "SUB16i16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(45<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2288 = SUB16i16
- { 2289, 6, 0, 0, "SUB16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2289 = SUB16mi
- { 2290, 6, 0, 0, "SUB16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2290 = SUB16mi8
- { 2291, 6, 0, 0, "SUB16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(41<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #2291 = SUB16mr
- { 2292, 3, 1, 0, "SUB16ri", 0, 0|21|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2292 = SUB16ri
- { 2293, 3, 1, 0, "SUB16ri8", 0, 0|21|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2293 = SUB16ri8
- { 2294, 7, 1, 0, "SUB16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(43<<24), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #2294 = SUB16rm
- { 2295, 3, 1, 0, "SUB16rr", 0, 0|3|(1<<6)|(41<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2295 = SUB16rr
- { 2296, 3, 1, 0, "SUB16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(43<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2296 = SUB16rr_REV
- { 2297, 1, 0, 0, "SUB32i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(45<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2297 = SUB32i32
- { 2298, 6, 0, 0, "SUB32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2298 = SUB32mi
- { 2299, 6, 0, 0, "SUB32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2299 = SUB32mi8
- { 2300, 6, 0, 0, "SUB32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(41<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #2300 = SUB32mr
- { 2301, 3, 1, 0, "SUB32ri", 0, 0|21|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2301 = SUB32ri
- { 2302, 3, 1, 0, "SUB32ri8", 0, 0|21|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2302 = SUB32ri8
- { 2303, 7, 1, 0, "SUB32rm", 0|(1<<TID::MayLoad), 0|6|(43<<24), NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #2303 = SUB32rm
- { 2304, 3, 1, 0, "SUB32rr", 0, 0|3|(41<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2304 = SUB32rr
- { 2305, 3, 1, 0, "SUB32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(43<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2305 = SUB32rr_REV
- { 2306, 1, 0, 0, "SUB64i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(45<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2306 = SUB64i32
- { 2307, 6, 0, 0, "SUB64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2307 = SUB64mi32
- { 2308, 6, 0, 0, "SUB64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2308 = SUB64mi8
- { 2309, 6, 0, 0, "SUB64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<12)|(41<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #2309 = SUB64mr
- { 2310, 3, 1, 0, "SUB64ri32", 0, 0|21|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2310 = SUB64ri32
- { 2311, 3, 1, 0, "SUB64ri8", 0, 0|21|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2311 = SUB64ri8
- { 2312, 7, 1, 0, "SUB64rm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(43<<24), NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #2312 = SUB64rm
- { 2313, 3, 1, 0, "SUB64rr", 0, 0|3|(1<<12)|(41<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2313 = SUB64rr
- { 2314, 3, 1, 0, "SUB64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(43<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2314 = SUB64rr_REV
- { 2315, 1, 0, 0, "SUB8i8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(44<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2315 = SUB8i8
- { 2316, 6, 0, 0, "SUB8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|29|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2316 = SUB8mi
- { 2317, 6, 0, 0, "SUB8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(40<<24), NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2317 = SUB8mr
- { 2318, 3, 1, 0, "SUB8ri", 0, 0|21|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2318 = SUB8ri
- { 2319, 7, 1, 0, "SUB8rm", 0|(1<<TID::MayLoad), 0|6|(42<<24), NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #2319 = SUB8rm
- { 2320, 3, 1, 0, "SUB8rr", 0, 0|3|(40<<24), NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #2320 = SUB8rr
- { 2321, 3, 1, 0, "SUB8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(42<<24), NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #2321 = SUB8rr_REV
- { 2322, 7, 1, 0, "SUBPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(92<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2322 = SUBPDrm
- { 2323, 3, 1, 0, "SUBPDrr", 0, 0|5|(1<<6)|(1<<8)|(92<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2323 = SUBPDrr
- { 2324, 7, 1, 0, "SUBPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(92<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2324 = SUBPSrm
- { 2325, 3, 1, 0, "SUBPSrr", 0, 0|5|(1<<8)|(92<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2325 = SUBPSrr
- { 2326, 5, 0, 0, "SUBR_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|29|(216<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2326 = SUBR_F32m
- { 2327, 5, 0, 0, "SUBR_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|29|(220<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2327 = SUBR_F64m
- { 2328, 5, 0, 0, "SUBR_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|29|(222<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2328 = SUBR_FI16m
- { 2329, 5, 0, 0, "SUBR_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|29|(218<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2329 = SUBR_FI32m
- { 2330, 1, 0, 0, "SUBR_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(9<<8)|(224<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #2330 = SUBR_FPrST0
- { 2331, 1, 0, 0, "SUBR_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0|2|(3<<8)|(232<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #2331 = SUBR_FST0r
- { 2332, 7, 1, 0, "SUBR_Fp32m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #2332 = SUBR_Fp32m
- { 2333, 7, 1, 0, "SUBR_Fp64m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #2333 = SUBR_Fp64m
- { 2334, 7, 1, 0, "SUBR_Fp64m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #2334 = SUBR_Fp64m32
- { 2335, 7, 1, 0, "SUBR_Fp80m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #2335 = SUBR_Fp80m32
- { 2336, 7, 1, 0, "SUBR_Fp80m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #2336 = SUBR_Fp80m64
- { 2337, 7, 1, 0, "SUBR_FpI16m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #2337 = SUBR_FpI16m32
- { 2338, 7, 1, 0, "SUBR_FpI16m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #2338 = SUBR_FpI16m64
- { 2339, 7, 1, 0, "SUBR_FpI16m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #2339 = SUBR_FpI16m80
- { 2340, 7, 1, 0, "SUBR_FpI32m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #2340 = SUBR_FpI32m32
- { 2341, 7, 1, 0, "SUBR_FpI32m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #2341 = SUBR_FpI32m64
- { 2342, 7, 1, 0, "SUBR_FpI32m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #2342 = SUBR_FpI32m80
- { 2343, 1, 0, 0, "SUBR_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(7<<8)|(224<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #2343 = SUBR_FrST0
- { 2344, 7, 1, 0, "SUBSDrm", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(92<<24), NULL, NULL, NULL, OperandInfo26 }, // Inst #2344 = SUBSDrm
- { 2345, 7, 1, 0, "SUBSDrm_Int", 0|(1<<TID::MayLoad), 0|6|(11<<8)|(92<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2345 = SUBSDrm_Int
- { 2346, 3, 1, 0, "SUBSDrr", 0, 0|5|(11<<8)|(92<<24), NULL, NULL, NULL, OperandInfo27 }, // Inst #2346 = SUBSDrr
- { 2347, 3, 1, 0, "SUBSDrr_Int", 0, 0|5|(11<<8)|(92<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2347 = SUBSDrr_Int
- { 2348, 7, 1, 0, "SUBSSrm", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(92<<24), NULL, NULL, NULL, OperandInfo28 }, // Inst #2348 = SUBSSrm
- { 2349, 7, 1, 0, "SUBSSrm_Int", 0|(1<<TID::MayLoad), 0|6|(12<<8)|(92<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2349 = SUBSSrm_Int
- { 2350, 3, 1, 0, "SUBSSrr", 0, 0|5|(12<<8)|(92<<24), NULL, NULL, NULL, OperandInfo29 }, // Inst #2350 = SUBSSrr
- { 2351, 3, 1, 0, "SUBSSrr_Int", 0, 0|5|(12<<8)|(92<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2351 = SUBSSrr_Int
- { 2352, 5, 0, 0, "SUB_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|28|(216<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2352 = SUB_F32m
- { 2353, 5, 0, 0, "SUB_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|28|(220<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2353 = SUB_F64m
- { 2354, 5, 0, 0, "SUB_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|28|(222<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2354 = SUB_FI16m
- { 2355, 5, 0, 0, "SUB_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0|28|(218<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2355 = SUB_FI32m
- { 2356, 1, 0, 0, "SUB_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(9<<8)|(232<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #2356 = SUB_FPrST0
- { 2357, 1, 0, 0, "SUB_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0|2|(3<<8)|(224<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #2357 = SUB_FST0r
- { 2358, 3, 1, 0, "SUB_Fp32", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo32 }, // Inst #2358 = SUB_Fp32
- { 2359, 7, 1, 0, "SUB_Fp32m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #2359 = SUB_Fp32m
- { 2360, 3, 1, 0, "SUB_Fp64", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo34 }, // Inst #2360 = SUB_Fp64
- { 2361, 7, 1, 0, "SUB_Fp64m", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #2361 = SUB_Fp64m
- { 2362, 7, 1, 0, "SUB_Fp64m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #2362 = SUB_Fp64m32
- { 2363, 3, 1, 0, "SUB_Fp80", 0, 0|(4<<16), NULL, NULL, NULL, OperandInfo36 }, // Inst #2363 = SUB_Fp80
- { 2364, 7, 1, 0, "SUB_Fp80m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #2364 = SUB_Fp80m32
- { 2365, 7, 1, 0, "SUB_Fp80m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #2365 = SUB_Fp80m64
- { 2366, 7, 1, 0, "SUB_FpI16m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #2366 = SUB_FpI16m32
- { 2367, 7, 1, 0, "SUB_FpI16m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #2367 = SUB_FpI16m64
- { 2368, 7, 1, 0, "SUB_FpI16m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #2368 = SUB_FpI16m80
- { 2369, 7, 1, 0, "SUB_FpI32m32", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo33 }, // Inst #2369 = SUB_FpI32m32
- { 2370, 7, 1, 0, "SUB_FpI32m64", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo35 }, // Inst #2370 = SUB_FpI32m64
- { 2371, 7, 1, 0, "SUB_FpI32m80", 0|(1<<TID::MayLoad), 0|(3<<16), NULL, NULL, NULL, OperandInfo37 }, // Inst #2371 = SUB_FpI32m80
- { 2372, 1, 0, 0, "SUB_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0|2|(7<<8)|(232<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #2372 = SUB_FrST0
- { 2373, 0, 0, 0, "SWAPGS", 0|(1<<TID::UnmodeledSideEffects), 0|41|(1<<8)|(1<<24), NULL, NULL, NULL, 0 }, // Inst #2373 = SWAPGS
- { 2374, 0, 0, 0, "SYSCALL", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(5<<24), NULL, NULL, NULL, 0 }, // Inst #2374 = SYSCALL
- { 2375, 0, 0, 0, "SYSENTER", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(52<<24), NULL, NULL, NULL, 0 }, // Inst #2375 = SYSENTER
- { 2376, 0, 0, 0, "SYSEXIT", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(53<<24), NULL, NULL, NULL, 0 }, // Inst #2376 = SYSEXIT
- { 2377, 0, 0, 0, "SYSEXIT64", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(1<<12)|(53<<24), NULL, NULL, NULL, 0 }, // Inst #2377 = SYSEXIT64
- { 2378, 0, 0, 0, "SYSRET", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(7<<24), NULL, NULL, NULL, 0 }, // Inst #2378 = SYSRET
- { 2379, 1, 0, 0, "TAILJMPd", 0|(1<<TID::Return)|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(233<<24), NULL, NULL, NULL, OperandInfo5 }, // Inst #2379 = TAILJMPd
- { 2380, 5, 0, 0, "TAILJMPm", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|28|(255<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2380 = TAILJMPm
- { 2381, 1, 0, 0, "TAILJMPr", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|20|(255<<24), NULL, NULL, NULL, OperandInfo57 }, // Inst #2381 = TAILJMPr
- { 2382, 1, 0, 0, "TAILJMPr64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|20|(255<<24), NULL, NULL, NULL, OperandInfo58 }, // Inst #2382 = TAILJMPr64
- { 2383, 2, 0, 0, "TCRETURNdi", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo38 }, // Inst #2383 = TCRETURNdi
- { 2384, 2, 0, 0, "TCRETURNdi64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo38 }, // Inst #2384 = TCRETURNdi64
- { 2385, 2, 0, 0, "TCRETURNri", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo55 }, // Inst #2385 = TCRETURNri
- { 2386, 2, 0, 0, "TCRETURNri64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0, NULL, NULL, NULL, OperandInfo56 }, // Inst #2386 = TCRETURNri64
- { 2387, 1, 0, 0, "TEST16i16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(169<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2387 = TEST16i16
- { 2388, 6, 0, 0, "TEST16mi", 0|(1<<TID::MayLoad), 0|24|(1<<6)|(3<<13)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2388 = TEST16mi
- { 2389, 2, 0, 0, "TEST16ri", 0, 0|16|(1<<6)|(3<<13)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo54 }, // Inst #2389 = TEST16ri
- { 2390, 6, 0, 0, "TEST16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(133<<24), NULL, ImplicitList1, Barriers1, OperandInfo46 }, // Inst #2390 = TEST16rm
- { 2391, 2, 0, 0, "TEST16rr", 0|(1<<TID::Commutable), 0|3|(1<<6)|(133<<24), NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #2391 = TEST16rr
- { 2392, 1, 0, 0, "TEST32i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(169<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2392 = TEST32i32
- { 2393, 6, 0, 0, "TEST32mi", 0|(1<<TID::MayLoad), 0|24|(4<<13)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2393 = TEST32mi
- { 2394, 2, 0, 0, "TEST32ri", 0, 0|16|(4<<13)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #2394 = TEST32ri
- { 2395, 6, 0, 0, "TEST32rm", 0|(1<<TID::MayLoad), 0|6|(133<<24), NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #2395 = TEST32rm
- { 2396, 2, 0, 0, "TEST32rr", 0|(1<<TID::Commutable), 0|3|(133<<24), NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #2396 = TEST32rr
- { 2397, 1, 0, 0, "TEST64i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(169<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2397 = TEST64i32
- { 2398, 6, 0, 0, "TEST64mi32", 0|(1<<TID::MayLoad), 0|24|(1<<12)|(4<<13)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2398 = TEST64mi32
- { 2399, 2, 0, 0, "TEST64ri32", 0, 0|16|(1<<12)|(4<<13)|(247<<24), NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #2399 = TEST64ri32
- { 2400, 6, 0, 0, "TEST64rm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(133<<24), NULL, ImplicitList1, Barriers1, OperandInfo50 }, // Inst #2400 = TEST64rm
- { 2401, 2, 0, 0, "TEST64rr", 0|(1<<TID::Commutable), 0|3|(1<<12)|(133<<24), NULL, ImplicitList1, Barriers1, OperandInfo51 }, // Inst #2401 = TEST64rr
- { 2402, 1, 0, 0, "TEST8i8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(168<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2402 = TEST8i8
- { 2403, 6, 0, 0, "TEST8mi", 0|(1<<TID::MayLoad), 0|24|(1<<13)|(246<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2403 = TEST8mi
- { 2404, 2, 0, 0, "TEST8ri", 0, 0|16|(1<<13)|(246<<24), NULL, ImplicitList1, Barriers1, OperandInfo68 }, // Inst #2404 = TEST8ri
- { 2405, 6, 0, 0, "TEST8rm", 0|(1<<TID::MayLoad), 0|6|(132<<24), NULL, ImplicitList1, Barriers1, OperandInfo69 }, // Inst #2405 = TEST8rm
- { 2406, 2, 0, 0, "TEST8rr", 0|(1<<TID::Commutable), 0|3|(132<<24), NULL, ImplicitList1, Barriers1, OperandInfo67 }, // Inst #2406 = TEST8rr
- { 2407, 4, 0, 0, "TLS_addr32", 0, 0, ImplicitList2, ImplicitList9, Barriers3, OperandInfo197 }, // Inst #2407 = TLS_addr32
- { 2408, 4, 0, 0, "TLS_addr64", 0, 0, ImplicitList4, ImplicitList10, Barriers4, OperandInfo198 }, // Inst #2408 = TLS_addr64
- { 2409, 0, 0, 0, "TRAP", 0|(1<<TID::Barrier)|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(11<<24), NULL, NULL, NULL, 0 }, // Inst #2409 = TRAP
- { 2410, 0, 0, 0, "TST_F", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<8)|(228<<24), NULL, NULL, NULL, 0 }, // Inst #2410 = TST_F
- { 2411, 1, 0, 0, "TST_Fp32", 0, 0|(2<<16), NULL, NULL, NULL, OperandInfo100 }, // Inst #2411 = TST_Fp32
- { 2412, 1, 0, 0, "TST_Fp64", 0, 0|(2<<16), NULL, NULL, NULL, OperandInfo101 }, // Inst #2412 = TST_Fp64
- { 2413, 1, 0, 0, "TST_Fp80", 0, 0|(2<<16), NULL, NULL, NULL, OperandInfo102 }, // Inst #2413 = TST_Fp80
- { 2414, 6, 0, 0, "UCOMISDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(46<<24), NULL, ImplicitList1, Barriers1, OperandInfo82 }, // Inst #2414 = UCOMISDrm
- { 2415, 2, 0, 0, "UCOMISDrr", 0, 0|5|(1<<6)|(1<<8)|(46<<24), NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2415 = UCOMISDrr
- { 2416, 6, 0, 0, "UCOMISSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(46<<24), NULL, ImplicitList1, Barriers1, OperandInfo80 }, // Inst #2416 = UCOMISSrm
- { 2417, 2, 0, 0, "UCOMISSrr", 0, 0|5|(1<<8)|(46<<24), NULL, ImplicitList1, Barriers1, OperandInfo106 }, // Inst #2417 = UCOMISSrr
- { 2418, 1, 0, 0, "UCOM_FIPr", 0|(1<<TID::UnmodeledSideEffects), 0|2|(10<<8)|(232<<24), ImplicitList24, ImplicitList1, Barriers1, OperandInfo31 }, // Inst #2418 = UCOM_FIPr
- { 2419, 1, 0, 0, "UCOM_FIr", 0|(1<<TID::UnmodeledSideEffects), 0|2|(6<<8)|(232<<24), ImplicitList24, ImplicitList1, Barriers1, OperandInfo31 }, // Inst #2419 = UCOM_FIr
- { 2420, 0, 0, 0, "UCOM_FPPr", 0|(1<<TID::UnmodeledSideEffects), 0|1|(5<<8)|(233<<24), ImplicitList24, ImplicitList1, Barriers1, 0 }, // Inst #2420 = UCOM_FPPr
- { 2421, 1, 0, 0, "UCOM_FPr", 0|(1<<TID::UnmodeledSideEffects), 0|2|(8<<8)|(232<<24), ImplicitList24, ImplicitList1, Barriers1, OperandInfo31 }, // Inst #2421 = UCOM_FPr
- { 2422, 2, 0, 0, "UCOM_FpIr32", 0, 0|(5<<16), NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2422 = UCOM_FpIr32
- { 2423, 2, 0, 0, "UCOM_FpIr64", 0, 0|(5<<16), NULL, ImplicitList1, Barriers1, OperandInfo3 }, // Inst #2423 = UCOM_FpIr64
- { 2424, 2, 0, 0, "UCOM_FpIr80", 0, 0|(5<<16), NULL, ImplicitList1, Barriers1, OperandInfo4 }, // Inst #2424 = UCOM_FpIr80
- { 2425, 2, 0, 0, "UCOM_Fpr32", 0|(1<<TID::UnmodeledSideEffects), 0|(5<<16), NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2425 = UCOM_Fpr32
- { 2426, 2, 0, 0, "UCOM_Fpr64", 0|(1<<TID::UnmodeledSideEffects), 0|(5<<16), NULL, ImplicitList1, Barriers1, OperandInfo3 }, // Inst #2426 = UCOM_Fpr64
- { 2427, 2, 0, 0, "UCOM_Fpr80", 0|(1<<TID::UnmodeledSideEffects), 0|(5<<16), NULL, ImplicitList1, Barriers1, OperandInfo4 }, // Inst #2427 = UCOM_Fpr80
- { 2428, 1, 0, 0, "UCOM_Fr", 0|(1<<TID::UnmodeledSideEffects), 0|2|(8<<8)|(224<<24), ImplicitList24, ImplicitList1, Barriers1, OperandInfo31 }, // Inst #2428 = UCOM_Fr
- { 2429, 7, 1, 0, "UNPCKHPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(21<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2429 = UNPCKHPDrm
- { 2430, 3, 1, 0, "UNPCKHPDrr", 0, 0|5|(1<<6)|(1<<8)|(21<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2430 = UNPCKHPDrr
- { 2431, 7, 1, 0, "UNPCKHPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(21<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2431 = UNPCKHPSrm
- { 2432, 3, 1, 0, "UNPCKHPSrr", 0, 0|5|(1<<8)|(21<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2432 = UNPCKHPSrr
- { 2433, 7, 1, 0, "UNPCKLPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(20<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2433 = UNPCKLPDrm
- { 2434, 3, 1, 0, "UNPCKLPDrr", 0, 0|5|(1<<6)|(1<<8)|(20<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2434 = UNPCKLPDrr
- { 2435, 7, 1, 0, "UNPCKLPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(20<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2435 = UNPCKLPSrm
- { 2436, 3, 1, 0, "UNPCKLPSrr", 0, 0|5|(1<<8)|(20<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2436 = UNPCKLPSrr
- { 2437, 3, 0, 0, "VASTART_SAVE_XMM_REGS", 0|(1<<TID::UsesCustomInserter)|(1<<TID::Variadic), 0, NULL, NULL, NULL, OperandInfo199 }, // Inst #2437 = VASTART_SAVE_XMM_REGS
- { 2438, 5, 0, 0, "VERRm", 0|(1<<TID::UnmodeledSideEffects), 0|28|(1<<8), NULL, NULL, NULL, OperandInfo30 }, // Inst #2438 = VERRm
- { 2439, 1, 0, 0, "VERRr", 0|(1<<TID::UnmodeledSideEffects), 0|20|(1<<8), NULL, NULL, NULL, OperandInfo93 }, // Inst #2439 = VERRr
- { 2440, 5, 0, 0, "VERWm", 0|(1<<TID::UnmodeledSideEffects), 0|29|(1<<8), NULL, NULL, NULL, OperandInfo30 }, // Inst #2440 = VERWm
- { 2441, 1, 0, 0, "VERWr", 0|(1<<TID::UnmodeledSideEffects), 0|21|(1<<8), NULL, NULL, NULL, OperandInfo93 }, // Inst #2441 = VERWr
- { 2442, 0, 0, 0, "VMCALL", 0|(1<<TID::UnmodeledSideEffects), 0|33|(1<<8)|(1<<24), NULL, NULL, NULL, 0 }, // Inst #2442 = VMCALL
- { 2443, 5, 0, 0, "VMCLEARm", 0|(1<<TID::UnmodeledSideEffects), 0|30|(1<<6)|(1<<8)|(199<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2443 = VMCLEARm
- { 2444, 0, 0, 0, "VMLAUNCH", 0|(1<<TID::UnmodeledSideEffects), 0|34|(1<<8)|(1<<24), NULL, NULL, NULL, 0 }, // Inst #2444 = VMLAUNCH
- { 2445, 5, 0, 0, "VMPTRLDm", 0|(1<<TID::UnmodeledSideEffects), 0|30|(1<<8)|(199<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2445 = VMPTRLDm
- { 2446, 5, 1, 0, "VMPTRSTm", 0|(1<<TID::UnmodeledSideEffects), 0|31|(1<<8)|(199<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2446 = VMPTRSTm
- { 2447, 6, 1, 0, "VMREAD32rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(120<<24), NULL, NULL, NULL, OperandInfo11 }, // Inst #2447 = VMREAD32rm
- { 2448, 2, 1, 0, "VMREAD32rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(120<<24), NULL, NULL, NULL, OperandInfo49 }, // Inst #2448 = VMREAD32rr
- { 2449, 6, 1, 0, "VMREAD64rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(120<<24), NULL, NULL, NULL, OperandInfo15 }, // Inst #2449 = VMREAD64rm
- { 2450, 2, 1, 0, "VMREAD64rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(120<<24), NULL, NULL, NULL, OperandInfo51 }, // Inst #2450 = VMREAD64rr
- { 2451, 0, 0, 0, "VMRESUME", 0|(1<<TID::UnmodeledSideEffects), 0|35|(1<<8)|(1<<24), NULL, NULL, NULL, 0 }, // Inst #2451 = VMRESUME
- { 2452, 6, 1, 0, "VMWRITE32rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(121<<24), NULL, NULL, NULL, OperandInfo48 }, // Inst #2452 = VMWRITE32rm
- { 2453, 2, 1, 0, "VMWRITE32rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(121<<24), NULL, NULL, NULL, OperandInfo49 }, // Inst #2453 = VMWRITE32rr
- { 2454, 6, 1, 0, "VMWRITE64rm", 0|(1<<TID::UnmodeledSideEffects), 0|6|(1<<8)|(121<<24), NULL, NULL, NULL, OperandInfo50 }, // Inst #2454 = VMWRITE64rm
- { 2455, 2, 1, 0, "VMWRITE64rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<8)|(121<<24), NULL, NULL, NULL, OperandInfo51 }, // Inst #2455 = VMWRITE64rr
- { 2456, 0, 0, 0, "VMXOFF", 0|(1<<TID::UnmodeledSideEffects), 0|36|(1<<8)|(1<<24), NULL, NULL, NULL, 0 }, // Inst #2456 = VMXOFF
- { 2457, 5, 0, 0, "VMXON", 0|(1<<TID::UnmodeledSideEffects), 0|30|(11<<8)|(199<<24), NULL, NULL, NULL, OperandInfo30 }, // Inst #2457 = VMXON
- { 2458, 1, 1, 0, "V_SET0", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|32|(1<<8)|(87<<24), NULL, NULL, NULL, OperandInfo200 }, // Inst #2458 = V_SET0
- { 2459, 1, 1, 0, "V_SETALLONES", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0|32|(1<<6)|(1<<8)|(118<<24), NULL, NULL, NULL, OperandInfo200 }, // Inst #2459 = V_SETALLONES
- { 2460, 0, 0, 0, "WAIT", 0|(1<<TID::UnmodeledSideEffects), 0|1|(155<<24), NULL, NULL, NULL, 0 }, // Inst #2460 = WAIT
- { 2461, 0, 0, 0, "WBINVD", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(9<<24), NULL, NULL, NULL, 0 }, // Inst #2461 = WBINVD
- { 2462, 5, 0, 0, "WINCALL64m", 0|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Variadic), 0|26|(255<<24), ImplicitList4, ImplicitList57, Barriers8, OperandInfo30 }, // Inst #2462 = WINCALL64m
- { 2463, 1, 0, 0, "WINCALL64pcrel32", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0|1|(232<<24), ImplicitList4, ImplicitList57, Barriers8, OperandInfo5 }, // Inst #2463 = WINCALL64pcrel32
- { 2464, 1, 0, 0, "WINCALL64r", 0|(1<<TID::Call)|(1<<TID::Variadic), 0|18|(255<<24), ImplicitList4, ImplicitList57, Barriers8, OperandInfo58 }, // Inst #2464 = WINCALL64r
- { 2465, 0, 0, 0, "WRMSR", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<8)|(48<<24), NULL, NULL, NULL, 0 }, // Inst #2465 = WRMSR
- { 2466, 6, 0, 0, "XADD16rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<6)|(1<<8)|(193<<24), NULL, NULL, NULL, OperandInfo7 }, // Inst #2466 = XADD16rm
- { 2467, 2, 1, 0, "XADD16rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<6)|(1<<8)|(193<<24), NULL, NULL, NULL, OperandInfo47 }, // Inst #2467 = XADD16rr
- { 2468, 6, 0, 0, "XADD32rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(193<<24), NULL, NULL, NULL, OperandInfo11 }, // Inst #2468 = XADD32rm
- { 2469, 2, 1, 0, "XADD32rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(193<<24), NULL, NULL, NULL, OperandInfo49 }, // Inst #2469 = XADD32rr
- { 2470, 6, 0, 0, "XADD64rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(1<<12)|(193<<24), NULL, NULL, NULL, OperandInfo15 }, // Inst #2470 = XADD64rm
- { 2471, 2, 1, 0, "XADD64rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(1<<12)|(193<<24), NULL, NULL, NULL, OperandInfo51 }, // Inst #2471 = XADD64rr
- { 2472, 6, 0, 0, "XADD8rm", 0|(1<<TID::UnmodeledSideEffects), 0|4|(1<<8)|(192<<24), NULL, NULL, NULL, OperandInfo20 }, // Inst #2472 = XADD8rm
- { 2473, 2, 1, 0, "XADD8rr", 0|(1<<TID::UnmodeledSideEffects), 0|3|(1<<8)|(192<<24), NULL, NULL, NULL, OperandInfo67 }, // Inst #2473 = XADD8rr
- { 2474, 1, 0, 0, "XCHG16ar", 0|(1<<TID::UnmodeledSideEffects), 0|2|(1<<6)|(144<<24), NULL, NULL, NULL, OperandInfo93 }, // Inst #2474 = XCHG16ar
- { 2475, 7, 1, 0, "XCHG16rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|6|(1<<6)|(135<<24), NULL, NULL, NULL, OperandInfo9 }, // Inst #2475 = XCHG16rm
- { 2476, 3, 1, 0, "XCHG16rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(135<<24), NULL, NULL, NULL, OperandInfo10 }, // Inst #2476 = XCHG16rr
- { 2477, 1, 0, 0, "XCHG32ar", 0|(1<<TID::UnmodeledSideEffects), 0|2|(144<<24), NULL, NULL, NULL, OperandInfo57 }, // Inst #2477 = XCHG32ar
- { 2478, 7, 1, 0, "XCHG32rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|6|(135<<24), NULL, NULL, NULL, OperandInfo13 }, // Inst #2478 = XCHG32rm
- { 2479, 3, 1, 0, "XCHG32rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(135<<24), NULL, NULL, NULL, OperandInfo14 }, // Inst #2479 = XCHG32rr
- { 2480, 1, 0, 0, "XCHG64ar", 0|(1<<TID::UnmodeledSideEffects), 0|2|(1<<12)|(144<<24), NULL, NULL, NULL, OperandInfo58 }, // Inst #2480 = XCHG64ar
- { 2481, 7, 1, 0, "XCHG64rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|6|(1<<12)|(135<<24), NULL, NULL, NULL, OperandInfo17 }, // Inst #2481 = XCHG64rm
- { 2482, 3, 1, 0, "XCHG64rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(135<<24), NULL, NULL, NULL, OperandInfo18 }, // Inst #2482 = XCHG64rr
- { 2483, 7, 1, 0, "XCHG8rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|6|(134<<24), NULL, NULL, NULL, OperandInfo22 }, // Inst #2483 = XCHG8rm
- { 2484, 3, 1, 0, "XCHG8rr", 0|(1<<TID::UnmodeledSideEffects), 0|5|(134<<24), NULL, NULL, NULL, OperandInfo23 }, // Inst #2484 = XCHG8rr
- { 2485, 1, 0, 0, "XCH_F", 0|(1<<TID::UnmodeledSideEffects), 0|2|(4<<8)|(200<<24), NULL, NULL, NULL, OperandInfo31 }, // Inst #2485 = XCH_F
- { 2486, 0, 0, 0, "XLAT", 0|(1<<TID::UnmodeledSideEffects), 0|1|(215<<24), NULL, NULL, NULL, 0 }, // Inst #2486 = XLAT
- { 2487, 1, 0, 0, "XOR16i16", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<6)|(3<<13)|(53<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2487 = XOR16i16
- { 2488, 6, 0, 0, "XOR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|30|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2488 = XOR16mi
- { 2489, 6, 0, 0, "XOR16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|30|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2489 = XOR16mi8
- { 2490, 6, 0, 0, "XOR16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<6)|(49<<24), NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #2490 = XOR16mr
- { 2491, 3, 1, 0, "XOR16ri", 0, 0|22|(1<<6)|(3<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2491 = XOR16ri
- { 2492, 3, 1, 0, "XOR16ri8", 0, 0|22|(1<<6)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2492 = XOR16ri8
- { 2493, 7, 1, 0, "XOR16rm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(51<<24), NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #2493 = XOR16rm
- { 2494, 3, 1, 0, "XOR16rr", 0|(1<<TID::Commutable), 0|3|(1<<6)|(49<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2494 = XOR16rr
- { 2495, 3, 1, 0, "XOR16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<6)|(51<<24), NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2495 = XOR16rr_REV
- { 2496, 1, 0, 0, "XOR32i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(4<<13)|(53<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2496 = XOR32i32
- { 2497, 6, 0, 0, "XOR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|30|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2497 = XOR32mi
- { 2498, 6, 0, 0, "XOR32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|30|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2498 = XOR32mi8
- { 2499, 6, 0, 0, "XOR32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(49<<24), NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #2499 = XOR32mr
- { 2500, 3, 1, 0, "XOR32ri", 0, 0|22|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2500 = XOR32ri
- { 2501, 3, 1, 0, "XOR32ri8", 0, 0|22|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2501 = XOR32ri8
- { 2502, 7, 1, 0, "XOR32rm", 0|(1<<TID::MayLoad), 0|6|(51<<24), NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #2502 = XOR32rm
- { 2503, 3, 1, 0, "XOR32rr", 0|(1<<TID::Commutable), 0|3|(49<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2503 = XOR32rr
- { 2504, 3, 1, 0, "XOR32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(51<<24), NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2504 = XOR32rr_REV
- { 2505, 1, 0, 0, "XOR64i32", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<12)|(4<<13)|(53<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2505 = XOR64i32
- { 2506, 6, 0, 0, "XOR64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|30|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2506 = XOR64mi32
- { 2507, 6, 0, 0, "XOR64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|30|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2507 = XOR64mi8
- { 2508, 6, 0, 0, "XOR64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(1<<12)|(49<<24), NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #2508 = XOR64mr
- { 2509, 3, 1, 0, "XOR64ri32", 0, 0|22|(1<<12)|(4<<13)|(129<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2509 = XOR64ri32
- { 2510, 3, 1, 0, "XOR64ri8", 0, 0|22|(1<<12)|(1<<13)|(131<<24), NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2510 = XOR64ri8
- { 2511, 7, 1, 0, "XOR64rm", 0|(1<<TID::MayLoad), 0|6|(1<<12)|(51<<24), NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #2511 = XOR64rm
- { 2512, 3, 1, 0, "XOR64rr", 0|(1<<TID::Commutable), 0|3|(1<<12)|(49<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2512 = XOR64rr
- { 2513, 3, 1, 0, "XOR64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(1<<12)|(51<<24), NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2513 = XOR64rr_REV
- { 2514, 1, 0, 0, "XOR8i8", 0|(1<<TID::UnmodeledSideEffects), 0|1|(1<<13)|(52<<24), NULL, ImplicitList1, Barriers1, OperandInfo5 }, // Inst #2514 = XOR8i8
- { 2515, 6, 0, 0, "XOR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|30|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo6 }, // Inst #2515 = XOR8mi
- { 2516, 6, 0, 0, "XOR8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0|4|(48<<24), NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2516 = XOR8mr
- { 2517, 3, 1, 0, "XOR8ri", 0, 0|22|(1<<13)|(128<<24), NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2517 = XOR8ri
- { 2518, 7, 1, 0, "XOR8rm", 0|(1<<TID::MayLoad), 0|6|(50<<24), NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #2518 = XOR8rm
- { 2519, 3, 1, 0, "XOR8rr", 0|(1<<TID::Commutable), 0|3|(48<<24), NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #2519 = XOR8rr
- { 2520, 3, 1, 0, "XOR8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0|5|(50<<24), NULL, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #2520 = XOR8rr_REV
- { 2521, 7, 1, 0, "XORPDrm", 0|(1<<TID::MayLoad), 0|6|(1<<6)|(1<<8)|(87<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2521 = XORPDrm
- { 2522, 3, 1, 0, "XORPDrr", 0|(1<<TID::Commutable), 0|5|(1<<6)|(1<<8)|(87<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2522 = XORPDrr
- { 2523, 7, 1, 0, "XORPSrm", 0|(1<<TID::MayLoad), 0|6|(1<<8)|(87<<24), NULL, NULL, NULL, OperandInfo24 }, // Inst #2523 = XORPSrm
- { 2524, 3, 1, 0, "XORPSrr", 0|(1<<TID::Commutable), 0|5|(1<<8)|(87<<24), NULL, NULL, NULL, OperandInfo25 }, // Inst #2524 = XORPSrr
+ { 0, 0, 0, 0, "PHI", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #0 = PHI
+ { 1, 0, 0, 0, "INLINEASM", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #1 = INLINEASM
+ { 2, 1, 0, 0, "PROLOG_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #2 = PROLOG_LABEL
+ { 3, 1, 0, 0, "EH_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #3 = EH_LABEL
+ { 4, 1, 0, 0, "GC_LABEL", 0|(1<<TID::NotDuplicable)|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #4 = GC_LABEL
+ { 5, 0, 0, 0, "KILL", 0|(1<<TID::Variadic), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #5 = KILL
+ { 6, 3, 1, 0, "EXTRACT_SUBREG", 0, 0x0ULL, NULL, NULL, NULL, OperandInfo3 }, // Inst #6 = EXTRACT_SUBREG
+ { 7, 4, 1, 0, "INSERT_SUBREG", 0, 0x0ULL, NULL, NULL, NULL, OperandInfo4 }, // Inst #7 = INSERT_SUBREG
+ { 8, 1, 1, 0, "IMPLICIT_DEF", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #8 = IMPLICIT_DEF
+ { 9, 4, 1, 0, "SUBREG_TO_REG", 0, 0x0ULL, NULL, NULL, NULL, OperandInfo5 }, // Inst #9 = SUBREG_TO_REG
+ { 10, 3, 1, 0, "COPY_TO_REGCLASS", 0|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, OperandInfo3 }, // Inst #10 = COPY_TO_REGCLASS
+ { 11, 0, 0, 0, "DBG_VALUE", 0|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects)|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #11 = DBG_VALUE
+ { 12, 1, 1, 0, "REG_SEQUENCE", 0|(1<<TID::Variadic)|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #12 = REG_SEQUENCE
+ { 13, 2, 1, 0, "COPY", 0|(1<<TID::CheapAsAMove), 0x0ULL, NULL, NULL, NULL, OperandInfo6 }, // Inst #13 = COPY
+ { 14, 0, 0, 0, "ABS_F", 0|(1<<TID::UnmodeledSideEffects), 0xe1000401ULL, NULL, NULL, NULL, 0 }, // Inst #14 = ABS_F
+ { 15, 2, 1, 0, "ABS_Fp32", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #15 = ABS_Fp32
+ { 16, 2, 1, 0, "ABS_Fp64", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #16 = ABS_Fp64
+ { 17, 2, 1, 0, "ABS_Fp80", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #17 = ABS_Fp80
+ { 18, 1, 0, 0, "ADC16i16", 0|(1<<TID::UnmodeledSideEffects), 0x15006041ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #18 = ADC16i16
+ { 19, 6, 0, 0, "ADC16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100605aULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #19 = ADC16mi
+ { 20, 6, 0, 0, "ADC16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300205aULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #20 = ADC16mi8
+ { 21, 6, 0, 0, "ADC16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x11000044ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #21 = ADC16mr
+ { 22, 3, 1, 0, "ADC16ri", 0, 0x81006052ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #22 = ADC16ri
+ { 23, 3, 1, 0, "ADC16ri8", 0, 0x83002052ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #23 = ADC16ri8
+ { 24, 7, 1, 0, "ADC16rm", 0|(1<<TID::MayLoad), 0x13000046ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #24 = ADC16rm
+ { 25, 3, 1, 0, "ADC16rr", 0|(1<<TID::Commutable), 0x11000043ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #25 = ADC16rr
+ { 26, 3, 1, 0, "ADC16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x13000045ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #26 = ADC16rr_REV
+ { 27, 1, 0, 0, "ADC32i32", 0|(1<<TID::UnmodeledSideEffects), 0x1500a001ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #27 = ADC32i32
+ { 28, 6, 0, 0, "ADC32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100a01aULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #28 = ADC32mi
+ { 29, 6, 0, 0, "ADC32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300201aULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #29 = ADC32mi8
+ { 30, 6, 0, 0, "ADC32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x11000004ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #30 = ADC32mr
+ { 31, 3, 1, 0, "ADC32ri", 0, 0x8100a012ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #31 = ADC32ri
+ { 32, 3, 1, 0, "ADC32ri8", 0, 0x83002012ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #32 = ADC32ri8
+ { 33, 7, 1, 0, "ADC32rm", 0|(1<<TID::MayLoad), 0x13000006ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #33 = ADC32rm
+ { 34, 3, 1, 0, "ADC32rr", 0|(1<<TID::Commutable), 0x11000003ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #34 = ADC32rr
+ { 35, 3, 1, 0, "ADC32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x13000005ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #35 = ADC32rr_REV
+ { 36, 1, 0, 0, "ADC64i32", 0|(1<<TID::UnmodeledSideEffects), 0x1500b001ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #36 = ADC64i32
+ { 37, 6, 0, 0, "ADC64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100b01aULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #37 = ADC64mi32
+ { 38, 6, 0, 0, "ADC64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300301aULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #38 = ADC64mi8
+ { 39, 6, 0, 0, "ADC64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x11001004ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #39 = ADC64mr
+ { 40, 3, 1, 0, "ADC64ri32", 0, 0x8100b012ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #40 = ADC64ri32
+ { 41, 3, 1, 0, "ADC64ri8", 0, 0x83003012ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #41 = ADC64ri8
+ { 42, 7, 1, 0, "ADC64rm", 0|(1<<TID::MayLoad), 0x13001006ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #42 = ADC64rm
+ { 43, 3, 1, 0, "ADC64rr", 0|(1<<TID::Commutable), 0x11001003ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #43 = ADC64rr
+ { 44, 3, 1, 0, "ADC64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x13001005ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo23 }, // Inst #44 = ADC64rr_REV
+ { 45, 1, 0, 0, "ADC8i8", 0|(1<<TID::UnmodeledSideEffects), 0x14002001ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #45 = ADC8i8
+ { 46, 6, 0, 0, "ADC8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8000201aULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #46 = ADC8mi
+ { 47, 6, 0, 0, "ADC8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x10000004ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #47 = ADC8mr
+ { 48, 3, 1, 0, "ADC8ri", 0, 0x80002012ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #48 = ADC8ri
+ { 49, 7, 1, 0, "ADC8rm", 0|(1<<TID::MayLoad), 0x12000006ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #49 = ADC8rm
+ { 50, 3, 1, 0, "ADC8rr", 0|(1<<TID::Commutable), 0x10000003ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #50 = ADC8rr
+ { 51, 3, 1, 0, "ADC8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x12000005ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #51 = ADC8rr_REV
+ { 52, 1, 0, 0, "ADD16i16", 0|(1<<TID::UnmodeledSideEffects), 0x5006041ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #52 = ADD16i16
+ { 53, 6, 0, 0, "ADD16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x81006058ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #53 = ADD16mi
+ { 54, 6, 0, 0, "ADD16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x83002058ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #54 = ADD16mi8
+ { 55, 6, 0, 0, "ADD16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x1000044ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #55 = ADD16mr
+ { 56, 3, 1, 0, "ADD16ri", 0|(1<<TID::ConvertibleTo3Addr), 0x81006050ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #56 = ADD16ri
+ { 57, 3, 1, 0, "ADD16ri8", 0|(1<<TID::ConvertibleTo3Addr), 0x83002050ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #57 = ADD16ri8
+ { 58, 7, 1, 0, "ADD16rm", 0|(1<<TID::MayLoad), 0x3000046ULL, NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #58 = ADD16rm
+ { 59, 3, 1, 0, "ADD16rr", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::Commutable), 0x1000043ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #59 = ADD16rr
+ { 60, 3, 1, 0, "ADD16rr_alt", 0|(1<<TID::UnmodeledSideEffects), 0x3000045ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #60 = ADD16rr_alt
+ { 61, 1, 0, 0, "ADD32i32", 0|(1<<TID::UnmodeledSideEffects), 0x500a001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #61 = ADD32i32
+ { 62, 6, 0, 0, "ADD32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100a018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #62 = ADD32mi
+ { 63, 6, 0, 0, "ADD32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x83002018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #63 = ADD32mi8
+ { 64, 6, 0, 0, "ADD32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x1000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #64 = ADD32mr
+ { 65, 3, 1, 0, "ADD32ri", 0|(1<<TID::ConvertibleTo3Addr), 0x8100a010ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #65 = ADD32ri
+ { 66, 3, 1, 0, "ADD32ri8", 0|(1<<TID::ConvertibleTo3Addr), 0x83002010ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #66 = ADD32ri8
+ { 67, 7, 1, 0, "ADD32rm", 0|(1<<TID::MayLoad), 0x3000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #67 = ADD32rm
+ { 68, 3, 1, 0, "ADD32rr", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::Commutable), 0x1000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #68 = ADD32rr
+ { 69, 3, 1, 0, "ADD32rr_alt", 0|(1<<TID::UnmodeledSideEffects), 0x3000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #69 = ADD32rr_alt
+ { 70, 1, 0, 0, "ADD64i32", 0|(1<<TID::UnmodeledSideEffects), 0x500b001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #70 = ADD64i32
+ { 71, 6, 0, 0, "ADD64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100b018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #71 = ADD64mi32
+ { 72, 6, 0, 0, "ADD64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x83003018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #72 = ADD64mi8
+ { 73, 6, 0, 0, "ADD64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x1001004ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #73 = ADD64mr
+ { 74, 3, 1, 0, "ADD64ri32", 0|(1<<TID::ConvertibleTo3Addr), 0x8100b010ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #74 = ADD64ri32
+ { 75, 3, 1, 0, "ADD64ri8", 0|(1<<TID::ConvertibleTo3Addr), 0x83003010ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #75 = ADD64ri8
+ { 76, 7, 1, 0, "ADD64rm", 0|(1<<TID::MayLoad), 0x3001006ULL, NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #76 = ADD64rm
+ { 77, 3, 1, 0, "ADD64rr", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::Commutable), 0x1001003ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #77 = ADD64rr
+ { 78, 3, 1, 0, "ADD64rr_alt", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::UnmodeledSideEffects), 0x3001005ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #78 = ADD64rr_alt
+ { 79, 1, 0, 0, "ADD8i8", 0|(1<<TID::UnmodeledSideEffects), 0x4002001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #79 = ADD8i8
+ { 80, 6, 0, 0, "ADD8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x80002018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #80 = ADD8mi
+ { 81, 6, 0, 0, "ADD8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x4ULL, NULL, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #81 = ADD8mr
+ { 82, 3, 1, 0, "ADD8ri", 0, 0x80002010ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #82 = ADD8ri
+ { 83, 7, 1, 0, "ADD8rm", 0|(1<<TID::MayLoad), 0x2000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #83 = ADD8rm
+ { 84, 3, 1, 0, "ADD8rr", 0|(1<<TID::Commutable), 0x3ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #84 = ADD8rr
+ { 85, 3, 1, 0, "ADD8rr_alt", 0|(1<<TID::UnmodeledSideEffects), 0x2000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #85 = ADD8rr_alt
+ { 86, 7, 1, 0, "ADDPDrm", 0|(1<<TID::MayLoad), 0x58800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #86 = ADDPDrm
+ { 87, 3, 1, 0, "ADDPDrr", 0|(1<<TID::Commutable), 0x58800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #87 = ADDPDrr
+ { 88, 7, 1, 0, "ADDPSrm", 0|(1<<TID::MayLoad), 0x58400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #88 = ADDPSrm
+ { 89, 3, 1, 0, "ADDPSrr", 0|(1<<TID::Commutable), 0x58400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #89 = ADDPSrr
+ { 90, 7, 1, 0, "ADDSDrm", 0|(1<<TID::MayLoad), 0x58000b06ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #90 = ADDSDrm
+ { 91, 7, 1, 0, "ADDSDrm_Int", 0|(1<<TID::MayLoad), 0x58000b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #91 = ADDSDrm_Int
+ { 92, 3, 1, 0, "ADDSDrr", 0|(1<<TID::Commutable), 0x58000b05ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #92 = ADDSDrr
+ { 93, 3, 1, 0, "ADDSDrr_Int", 0, 0x58000b05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #93 = ADDSDrr_Int
+ { 94, 7, 1, 0, "ADDSSrm", 0|(1<<TID::MayLoad), 0x58000c06ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #94 = ADDSSrm
+ { 95, 7, 1, 0, "ADDSSrm_Int", 0|(1<<TID::MayLoad), 0x58000c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #95 = ADDSSrm_Int
+ { 96, 3, 1, 0, "ADDSSrr", 0|(1<<TID::Commutable), 0x58000c05ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #96 = ADDSSrr
+ { 97, 3, 1, 0, "ADDSSrr_Int", 0, 0x58000c05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #97 = ADDSSrr_Int
+ { 98, 7, 1, 0, "ADDSUBPDrm", 0|(1<<TID::MayLoad), 0xd0800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #98 = ADDSUBPDrm
+ { 99, 3, 1, 0, "ADDSUBPDrr", 0, 0xd0800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #99 = ADDSUBPDrr
+ { 100, 7, 1, 0, "ADDSUBPSrm", 0|(1<<TID::MayLoad), 0xd0800b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #100 = ADDSUBPSrm
+ { 101, 3, 1, 0, "ADDSUBPSrr", 0, 0xd0800b05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #101 = ADDSUBPSrr
+ { 102, 5, 0, 0, "ADD_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xd8000018ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #102 = ADD_F32m
+ { 103, 5, 0, 0, "ADD_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdc000018ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #103 = ADD_F64m
+ { 104, 5, 0, 0, "ADD_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xde000018ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #104 = ADD_FI16m
+ { 105, 5, 0, 0, "ADD_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xda000018ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #105 = ADD_FI32m
+ { 106, 1, 0, 0, "ADD_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0xc0000902ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #106 = ADD_FPrST0
+ { 107, 1, 0, 0, "ADD_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0xc0000302ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #107 = ADD_FST0r
+ { 108, 3, 1, 0, "ADD_Fp32", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo36 }, // Inst #108 = ADD_Fp32
+ { 109, 7, 1, 0, "ADD_Fp32m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #109 = ADD_Fp32m
+ { 110, 3, 1, 0, "ADD_Fp64", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo38 }, // Inst #110 = ADD_Fp64
+ { 111, 7, 1, 0, "ADD_Fp64m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #111 = ADD_Fp64m
+ { 112, 7, 1, 0, "ADD_Fp64m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #112 = ADD_Fp64m32
+ { 113, 3, 1, 0, "ADD_Fp80", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo40 }, // Inst #113 = ADD_Fp80
+ { 114, 7, 1, 0, "ADD_Fp80m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #114 = ADD_Fp80m32
+ { 115, 7, 1, 0, "ADD_Fp80m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #115 = ADD_Fp80m64
+ { 116, 7, 1, 0, "ADD_FpI16m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #116 = ADD_FpI16m32
+ { 117, 7, 1, 0, "ADD_FpI16m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #117 = ADD_FpI16m64
+ { 118, 7, 1, 0, "ADD_FpI16m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #118 = ADD_FpI16m80
+ { 119, 7, 1, 0, "ADD_FpI32m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #119 = ADD_FpI32m32
+ { 120, 7, 1, 0, "ADD_FpI32m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #120 = ADD_FpI32m64
+ { 121, 7, 1, 0, "ADD_FpI32m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #121 = ADD_FpI32m80
+ { 122, 1, 0, 0, "ADD_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0xc0000702ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #122 = ADD_FrST0
+ { 123, 1, 0, 0, "ADJCALLSTACKDOWN32", 0, 0x0ULL, ImplicitList2, ImplicitList3, Barriers1, OperandInfo2 }, // Inst #123 = ADJCALLSTACKDOWN32
+ { 124, 1, 0, 0, "ADJCALLSTACKDOWN64", 0, 0x0ULL, ImplicitList4, ImplicitList5, Barriers1, OperandInfo2 }, // Inst #124 = ADJCALLSTACKDOWN64
+ { 125, 2, 0, 0, "ADJCALLSTACKUP32", 0, 0x0ULL, ImplicitList2, ImplicitList3, Barriers1, OperandInfo6 }, // Inst #125 = ADJCALLSTACKUP32
+ { 126, 2, 0, 0, "ADJCALLSTACKUP64", 0, 0x0ULL, ImplicitList4, ImplicitList5, Barriers1, OperandInfo6 }, // Inst #126 = ADJCALLSTACKUP64
+ { 127, 7, 1, 0, "AESDECLASTrm", 0|(1<<TID::MayLoad), 0xdfc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #127 = AESDECLASTrm
+ { 128, 3, 1, 0, "AESDECLASTrr", 0, 0xdfc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #128 = AESDECLASTrr
+ { 129, 7, 1, 0, "AESDECrm", 0|(1<<TID::MayLoad), 0xdec00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #129 = AESDECrm
+ { 130, 3, 1, 0, "AESDECrr", 0, 0xdec00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #130 = AESDECrr
+ { 131, 7, 1, 0, "AESENCLASTrm", 0|(1<<TID::MayLoad), 0xddc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #131 = AESENCLASTrm
+ { 132, 3, 1, 0, "AESENCLASTrr", 0, 0xddc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #132 = AESENCLASTrr
+ { 133, 7, 1, 0, "AESENCrm", 0|(1<<TID::MayLoad), 0xdcc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #133 = AESENCrm
+ { 134, 3, 1, 0, "AESENCrr", 0, 0xdcc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #134 = AESENCrr
+ { 135, 6, 1, 0, "AESIMCrm", 0|(1<<TID::MayLoad), 0xdbc00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #135 = AESIMCrm
+ { 136, 2, 1, 0, "AESIMCrr", 0, 0xdbc00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #136 = AESIMCrr
+ { 137, 7, 1, 0, "AESKEYGENASSIST128rm", 0|(1<<TID::MayLoad), 0xdfc02e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #137 = AESKEYGENASSIST128rm
+ { 138, 3, 1, 0, "AESKEYGENASSIST128rr", 0, 0xdfc02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #138 = AESKEYGENASSIST128rr
+ { 139, 1, 0, 0, "AND16i16", 0|(1<<TID::UnmodeledSideEffects), 0x25006041ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #139 = AND16i16
+ { 140, 6, 0, 0, "AND16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100605cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #140 = AND16mi
+ { 141, 6, 0, 0, "AND16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300205cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #141 = AND16mi8
+ { 142, 6, 0, 0, "AND16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x21000044ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #142 = AND16mr
+ { 143, 3, 1, 0, "AND16ri", 0, 0x81006054ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #143 = AND16ri
+ { 144, 3, 1, 0, "AND16ri8", 0, 0x83002054ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #144 = AND16ri8
+ { 145, 7, 1, 0, "AND16rm", 0|(1<<TID::MayLoad), 0x23000046ULL, NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #145 = AND16rm
+ { 146, 3, 1, 0, "AND16rr", 0|(1<<TID::Commutable), 0x21000043ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #146 = AND16rr
+ { 147, 3, 1, 0, "AND16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x23000045ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #147 = AND16rr_REV
+ { 148, 1, 0, 0, "AND32i32", 0|(1<<TID::UnmodeledSideEffects), 0x2500a001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #148 = AND32i32
+ { 149, 6, 0, 0, "AND32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100a01cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #149 = AND32mi
+ { 150, 6, 0, 0, "AND32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300201cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #150 = AND32mi8
+ { 151, 6, 0, 0, "AND32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x21000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #151 = AND32mr
+ { 152, 3, 1, 0, "AND32ri", 0, 0x8100a014ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #152 = AND32ri
+ { 153, 3, 1, 0, "AND32ri8", 0, 0x83002014ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #153 = AND32ri8
+ { 154, 7, 1, 0, "AND32rm", 0|(1<<TID::MayLoad), 0x23000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #154 = AND32rm
+ { 155, 3, 1, 0, "AND32rr", 0|(1<<TID::Commutable), 0x21000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #155 = AND32rr
+ { 156, 3, 1, 0, "AND32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x23000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #156 = AND32rr_REV
+ { 157, 1, 0, 0, "AND64i32", 0|(1<<TID::UnmodeledSideEffects), 0x2500b001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #157 = AND64i32
+ { 158, 6, 0, 0, "AND64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100b01cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #158 = AND64mi32
+ { 159, 6, 0, 0, "AND64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300301cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #159 = AND64mi8
+ { 160, 6, 0, 0, "AND64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x21001004ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #160 = AND64mr
+ { 161, 3, 1, 0, "AND64ri32", 0, 0x8100b014ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #161 = AND64ri32
+ { 162, 3, 1, 0, "AND64ri8", 0, 0x83003014ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #162 = AND64ri8
+ { 163, 7, 1, 0, "AND64rm", 0|(1<<TID::MayLoad), 0x23001006ULL, NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #163 = AND64rm
+ { 164, 3, 1, 0, "AND64rr", 0|(1<<TID::Commutable), 0x21001003ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #164 = AND64rr
+ { 165, 3, 1, 0, "AND64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x23001005ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #165 = AND64rr_REV
+ { 166, 1, 0, 0, "AND8i8", 0|(1<<TID::UnmodeledSideEffects), 0x24002001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #166 = AND8i8
+ { 167, 6, 0, 0, "AND8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8000201cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #167 = AND8mi
+ { 168, 6, 0, 0, "AND8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x20000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #168 = AND8mr
+ { 169, 3, 1, 0, "AND8ri", 0, 0x80002014ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #169 = AND8ri
+ { 170, 7, 1, 0, "AND8rm", 0|(1<<TID::MayLoad), 0x22000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #170 = AND8rm
+ { 171, 3, 1, 0, "AND8rr", 0|(1<<TID::Commutable), 0x20000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #171 = AND8rr
+ { 172, 3, 1, 0, "AND8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x22000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #172 = AND8rr_REV
+ { 173, 7, 1, 0, "ANDNPDrm", 0|(1<<TID::MayLoad), 0x55800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #173 = ANDNPDrm
+ { 174, 3, 1, 0, "ANDNPDrr", 0, 0x55800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #174 = ANDNPDrr
+ { 175, 7, 1, 0, "ANDNPSrm", 0|(1<<TID::MayLoad), 0x55400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #175 = ANDNPSrm
+ { 176, 3, 1, 0, "ANDNPSrr", 0, 0x55400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #176 = ANDNPSrr
+ { 177, 7, 1, 0, "ANDPDrm", 0|(1<<TID::MayLoad), 0x54800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #177 = ANDPDrm
+ { 178, 3, 1, 0, "ANDPDrr", 0|(1<<TID::Commutable), 0x54800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #178 = ANDPDrr
+ { 179, 7, 1, 0, "ANDPSrm", 0|(1<<TID::MayLoad), 0x54400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #179 = ANDPSrm
+ { 180, 3, 1, 0, "ANDPSrr", 0|(1<<TID::Commutable), 0x54400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #180 = ANDPSrr
+ { 181, 9, 2, 0, "ATOMADD6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList6, ImplicitList7, Barriers2, OperandInfo46 }, // Inst #181 = ATOMADD6432
+ { 182, 7, 1, 0, "ATOMAND16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #182 = ATOMAND16
+ { 183, 7, 1, 0, "ATOMAND32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #183 = ATOMAND32
+ { 184, 7, 1, 0, "ATOMAND64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #184 = ATOMAND64
+ { 185, 9, 2, 0, "ATOMAND6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList6, ImplicitList7, Barriers2, OperandInfo46 }, // Inst #185 = ATOMAND6432
+ { 186, 7, 1, 0, "ATOMAND8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo50 }, // Inst #186 = ATOMAND8
+ { 187, 7, 1, 0, "ATOMMAX16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #187 = ATOMMAX16
+ { 188, 7, 1, 0, "ATOMMAX32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #188 = ATOMMAX32
+ { 189, 7, 1, 0, "ATOMMAX64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #189 = ATOMMAX64
+ { 190, 7, 1, 0, "ATOMMIN16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #190 = ATOMMIN16
+ { 191, 7, 1, 0, "ATOMMIN32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #191 = ATOMMIN32
+ { 192, 7, 1, 0, "ATOMMIN64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #192 = ATOMMIN64
+ { 193, 7, 1, 0, "ATOMNAND16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #193 = ATOMNAND16
+ { 194, 7, 1, 0, "ATOMNAND32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #194 = ATOMNAND32
+ { 195, 7, 1, 0, "ATOMNAND64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #195 = ATOMNAND64
+ { 196, 9, 2, 0, "ATOMNAND6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList6, ImplicitList7, Barriers2, OperandInfo46 }, // Inst #196 = ATOMNAND6432
+ { 197, 7, 1, 0, "ATOMNAND8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo50 }, // Inst #197 = ATOMNAND8
+ { 198, 7, 1, 0, "ATOMOR16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #198 = ATOMOR16
+ { 199, 7, 1, 0, "ATOMOR32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #199 = ATOMOR32
+ { 200, 7, 1, 0, "ATOMOR64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #200 = ATOMOR64
+ { 201, 9, 2, 0, "ATOMOR6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList6, ImplicitList7, Barriers2, OperandInfo46 }, // Inst #201 = ATOMOR6432
+ { 202, 7, 1, 0, "ATOMOR8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo50 }, // Inst #202 = ATOMOR8
+ { 203, 9, 2, 0, "ATOMSUB6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList6, ImplicitList7, Barriers2, OperandInfo46 }, // Inst #203 = ATOMSUB6432
+ { 204, 9, 2, 0, "ATOMSWAP6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList6, ImplicitList7, Barriers2, OperandInfo46 }, // Inst #204 = ATOMSWAP6432
+ { 205, 7, 1, 0, "ATOMUMAX16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #205 = ATOMUMAX16
+ { 206, 7, 1, 0, "ATOMUMAX32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #206 = ATOMUMAX32
+ { 207, 7, 1, 0, "ATOMUMAX64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #207 = ATOMUMAX64
+ { 208, 7, 1, 0, "ATOMUMIN16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #208 = ATOMUMIN16
+ { 209, 7, 1, 0, "ATOMUMIN32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #209 = ATOMUMIN32
+ { 210, 7, 1, 0, "ATOMUMIN64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #210 = ATOMUMIN64
+ { 211, 7, 1, 0, "ATOMXOR16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo47 }, // Inst #211 = ATOMXOR16
+ { 212, 7, 1, 0, "ATOMXOR32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo48 }, // Inst #212 = ATOMXOR32
+ { 213, 7, 1, 0, "ATOMXOR64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo49 }, // Inst #213 = ATOMXOR64
+ { 214, 9, 2, 0, "ATOMXOR6432", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList6, ImplicitList7, Barriers2, OperandInfo46 }, // Inst #214 = ATOMXOR6432
+ { 215, 7, 1, 0, "ATOMXOR8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, ImplicitList1, Barriers1, OperandInfo50 }, // Inst #215 = ATOMXOR8
+ { 216, 1, 1, 0, "AVX_SET0PD", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x557800160ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #216 = AVX_SET0PD
+ { 217, 1, 1, 0, "AVX_SET0PDY", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x557800160ULL, NULL, NULL, NULL, OperandInfo52 }, // Inst #217 = AVX_SET0PDY
+ { 218, 1, 1, 0, "AVX_SET0PI", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xefc00160ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #218 = AVX_SET0PI
+ { 219, 1, 1, 0, "AVX_SET0PS", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x557400120ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #219 = AVX_SET0PS
+ { 220, 1, 1, 0, "AVX_SET0PSY", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x557400120ULL, NULL, NULL, NULL, OperandInfo52 }, // Inst #220 = AVX_SET0PSY
+ { 221, 8, 1, 0, "BLENDPDrmi", 0|(1<<TID::MayLoad), 0xdc02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #221 = BLENDPDrmi
+ { 222, 4, 1, 0, "BLENDPDrri", 0, 0xdc02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #222 = BLENDPDrri
+ { 223, 8, 1, 0, "BLENDPSrmi", 0|(1<<TID::MayLoad), 0xcc02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #223 = BLENDPSrmi
+ { 224, 4, 1, 0, "BLENDPSrri", 0, 0xcc02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #224 = BLENDPSrri
+ { 225, 7, 1, 0, "BLENDVPDrm0", 0|(1<<TID::MayLoad), 0x15c00d46ULL, ImplicitList8, NULL, NULL, OperandInfo28 }, // Inst #225 = BLENDVPDrm0
+ { 226, 3, 1, 0, "BLENDVPDrr0", 0, 0x15c00d45ULL, ImplicitList8, NULL, NULL, OperandInfo29 }, // Inst #226 = BLENDVPDrr0
+ { 227, 7, 1, 0, "BLENDVPSrm0", 0|(1<<TID::MayLoad), 0x14c00d46ULL, ImplicitList8, NULL, NULL, OperandInfo28 }, // Inst #227 = BLENDVPSrm0
+ { 228, 3, 1, 0, "BLENDVPSrr0", 0, 0x14c00d45ULL, ImplicitList8, NULL, NULL, OperandInfo29 }, // Inst #228 = BLENDVPSrr0
+ { 229, 6, 1, 0, "BSF16rm", 0|(1<<TID::MayLoad), 0xbc000146ULL, NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #229 = BSF16rm
+ { 230, 2, 1, 0, "BSF16rr", 0, 0xbc000145ULL, NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #230 = BSF16rr
+ { 231, 6, 1, 0, "BSF32rm", 0|(1<<TID::MayLoad), 0xbc000106ULL, NULL, ImplicitList1, Barriers1, OperandInfo57 }, // Inst #231 = BSF32rm
+ { 232, 2, 1, 0, "BSF32rr", 0, 0xbc000105ULL, NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #232 = BSF32rr
+ { 233, 6, 1, 0, "BSF64rm", 0|(1<<TID::MayLoad), 0xbc001106ULL, NULL, ImplicitList1, Barriers1, OperandInfo59 }, // Inst #233 = BSF64rm
+ { 234, 2, 1, 0, "BSF64rr", 0, 0xbc001105ULL, NULL, ImplicitList1, Barriers1, OperandInfo60 }, // Inst #234 = BSF64rr
+ { 235, 6, 1, 0, "BSR16rm", 0|(1<<TID::MayLoad), 0xbd000146ULL, NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #235 = BSR16rm
+ { 236, 2, 1, 0, "BSR16rr", 0, 0xbd000145ULL, NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #236 = BSR16rr
+ { 237, 6, 1, 0, "BSR32rm", 0|(1<<TID::MayLoad), 0xbd000106ULL, NULL, ImplicitList1, Barriers1, OperandInfo57 }, // Inst #237 = BSR32rm
+ { 238, 2, 1, 0, "BSR32rr", 0, 0xbd000105ULL, NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #238 = BSR32rr
+ { 239, 6, 1, 0, "BSR64rm", 0|(1<<TID::MayLoad), 0xbd001106ULL, NULL, ImplicitList1, Barriers1, OperandInfo59 }, // Inst #239 = BSR64rm
+ { 240, 2, 1, 0, "BSR64rr", 0, 0xbd001105ULL, NULL, ImplicitList1, Barriers1, OperandInfo60 }, // Inst #240 = BSR64rr
+ { 241, 2, 1, 0, "BSWAP32r", 0, 0xc8000102ULL, ImplicitList1, NULL, NULL, OperandInfo61 }, // Inst #241 = BSWAP32r
+ { 242, 2, 1, 0, "BSWAP64r", 0, 0xc8001102ULL, NULL, NULL, NULL, OperandInfo62 }, // Inst #242 = BSWAP64r
+ { 243, 6, 0, 0, "BT16mi8", 0|(1<<TID::MayLoad), 0xba00215cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #243 = BT16mi8
+ { 244, 6, 0, 0, "BT16mr", 0|(1<<TID::UnmodeledSideEffects), 0xa3000144ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #244 = BT16mr
+ { 245, 2, 0, 0, "BT16ri8", 0, 0xba002154ULL, NULL, ImplicitList1, Barriers1, OperandInfo63 }, // Inst #245 = BT16ri8
+ { 246, 2, 0, 0, "BT16rr", 0, 0xa3000143ULL, NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #246 = BT16rr
+ { 247, 6, 0, 0, "BT32mi8", 0|(1<<TID::MayLoad), 0xba00211cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #247 = BT32mi8
+ { 248, 6, 0, 0, "BT32mr", 0|(1<<TID::UnmodeledSideEffects), 0xa3000104ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #248 = BT32mr
+ { 249, 2, 0, 0, "BT32ri8", 0, 0xba002114ULL, NULL, ImplicitList1, Barriers1, OperandInfo64 }, // Inst #249 = BT32ri8
+ { 250, 2, 0, 0, "BT32rr", 0, 0xa3000103ULL, NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #250 = BT32rr
+ { 251, 6, 0, 0, "BT64mi8", 0|(1<<TID::MayLoad), 0xba00311cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #251 = BT64mi8
+ { 252, 6, 0, 0, "BT64mr", 0|(1<<TID::UnmodeledSideEffects), 0xa3001104ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #252 = BT64mr
+ { 253, 2, 0, 0, "BT64ri8", 0, 0xba003114ULL, NULL, ImplicitList1, Barriers1, OperandInfo65 }, // Inst #253 = BT64ri8
+ { 254, 2, 0, 0, "BT64rr", 0, 0xa3001103ULL, NULL, ImplicitList1, Barriers1, OperandInfo60 }, // Inst #254 = BT64rr
+ { 255, 6, 0, 0, "BTC16mi8", 0|(1<<TID::UnmodeledSideEffects), 0xba00215fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #255 = BTC16mi8
+ { 256, 6, 0, 0, "BTC16mr", 0|(1<<TID::UnmodeledSideEffects), 0xbb000144ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #256 = BTC16mr
+ { 257, 2, 0, 0, "BTC16ri8", 0|(1<<TID::UnmodeledSideEffects), 0xba002157ULL, NULL, ImplicitList1, Barriers1, OperandInfo63 }, // Inst #257 = BTC16ri8
+ { 258, 2, 0, 0, "BTC16rr", 0|(1<<TID::UnmodeledSideEffects), 0xbb000143ULL, NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #258 = BTC16rr
+ { 259, 6, 0, 0, "BTC32mi8", 0|(1<<TID::UnmodeledSideEffects), 0xba00211fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #259 = BTC32mi8
+ { 260, 6, 0, 0, "BTC32mr", 0|(1<<TID::UnmodeledSideEffects), 0xbb000104ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #260 = BTC32mr
+ { 261, 2, 0, 0, "BTC32ri8", 0|(1<<TID::UnmodeledSideEffects), 0xba002117ULL, NULL, ImplicitList1, Barriers1, OperandInfo64 }, // Inst #261 = BTC32ri8
+ { 262, 2, 0, 0, "BTC32rr", 0|(1<<TID::UnmodeledSideEffects), 0xbb000103ULL, NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #262 = BTC32rr
+ { 263, 6, 0, 0, "BTC64mi8", 0|(1<<TID::UnmodeledSideEffects), 0xba00311fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #263 = BTC64mi8
+ { 264, 6, 0, 0, "BTC64mr", 0|(1<<TID::UnmodeledSideEffects), 0xbb001104ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #264 = BTC64mr
+ { 265, 2, 0, 0, "BTC64ri8", 0|(1<<TID::UnmodeledSideEffects), 0xba003117ULL, NULL, ImplicitList1, Barriers1, OperandInfo65 }, // Inst #265 = BTC64ri8
+ { 266, 2, 0, 0, "BTC64rr", 0|(1<<TID::UnmodeledSideEffects), 0xbb001103ULL, NULL, ImplicitList1, Barriers1, OperandInfo60 }, // Inst #266 = BTC64rr
+ { 267, 6, 0, 0, "BTR16mi8", 0|(1<<TID::UnmodeledSideEffects), 0xba00215eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #267 = BTR16mi8
+ { 268, 6, 0, 0, "BTR16mr", 0|(1<<TID::UnmodeledSideEffects), 0xb3000144ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #268 = BTR16mr
+ { 269, 2, 0, 0, "BTR16ri8", 0|(1<<TID::UnmodeledSideEffects), 0xba002156ULL, NULL, ImplicitList1, Barriers1, OperandInfo63 }, // Inst #269 = BTR16ri8
+ { 270, 2, 0, 0, "BTR16rr", 0|(1<<TID::UnmodeledSideEffects), 0xb3000143ULL, NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #270 = BTR16rr
+ { 271, 6, 0, 0, "BTR32mi8", 0|(1<<TID::UnmodeledSideEffects), 0xba00211eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #271 = BTR32mi8
+ { 272, 6, 0, 0, "BTR32mr", 0|(1<<TID::UnmodeledSideEffects), 0xb3000104ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #272 = BTR32mr
+ { 273, 2, 0, 0, "BTR32ri8", 0|(1<<TID::UnmodeledSideEffects), 0xba002116ULL, NULL, ImplicitList1, Barriers1, OperandInfo64 }, // Inst #273 = BTR32ri8
+ { 274, 2, 0, 0, "BTR32rr", 0|(1<<TID::UnmodeledSideEffects), 0xb3000103ULL, NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #274 = BTR32rr
+ { 275, 6, 0, 0, "BTR64mi8", 0|(1<<TID::UnmodeledSideEffects), 0xba00311eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #275 = BTR64mi8
+ { 276, 6, 0, 0, "BTR64mr", 0|(1<<TID::UnmodeledSideEffects), 0xb3001104ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #276 = BTR64mr
+ { 277, 2, 0, 0, "BTR64ri8", 0|(1<<TID::UnmodeledSideEffects), 0xba003116ULL, NULL, ImplicitList1, Barriers1, OperandInfo65 }, // Inst #277 = BTR64ri8
+ { 278, 2, 0, 0, "BTR64rr", 0|(1<<TID::UnmodeledSideEffects), 0xb3001103ULL, NULL, ImplicitList1, Barriers1, OperandInfo60 }, // Inst #278 = BTR64rr
+ { 279, 6, 0, 0, "BTS16mi8", 0|(1<<TID::UnmodeledSideEffects), 0xba00215dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #279 = BTS16mi8
+ { 280, 6, 0, 0, "BTS16mr", 0|(1<<TID::UnmodeledSideEffects), 0xab000144ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #280 = BTS16mr
+ { 281, 2, 0, 0, "BTS16ri8", 0|(1<<TID::UnmodeledSideEffects), 0xba002155ULL, NULL, ImplicitList1, Barriers1, OperandInfo63 }, // Inst #281 = BTS16ri8
+ { 282, 2, 0, 0, "BTS16rr", 0|(1<<TID::UnmodeledSideEffects), 0xab000143ULL, NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #282 = BTS16rr
+ { 283, 6, 0, 0, "BTS32mi8", 0|(1<<TID::UnmodeledSideEffects), 0xba00211dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #283 = BTS32mi8
+ { 284, 6, 0, 0, "BTS32mr", 0|(1<<TID::UnmodeledSideEffects), 0xab000104ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #284 = BTS32mr
+ { 285, 2, 0, 0, "BTS32ri8", 0|(1<<TID::UnmodeledSideEffects), 0xba002115ULL, NULL, ImplicitList1, Barriers1, OperandInfo64 }, // Inst #285 = BTS32ri8
+ { 286, 2, 0, 0, "BTS32rr", 0|(1<<TID::UnmodeledSideEffects), 0xab000103ULL, NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #286 = BTS32rr
+ { 287, 6, 0, 0, "BTS64mi8", 0|(1<<TID::UnmodeledSideEffects), 0xba00311dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #287 = BTS64mi8
+ { 288, 6, 0, 0, "BTS64mr", 0|(1<<TID::UnmodeledSideEffects), 0xab001104ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #288 = BTS64mr
+ { 289, 2, 0, 0, "BTS64ri8", 0|(1<<TID::UnmodeledSideEffects), 0xba003115ULL, NULL, ImplicitList1, Barriers1, OperandInfo65 }, // Inst #289 = BTS64ri8
+ { 290, 2, 0, 0, "BTS64rr", 0|(1<<TID::UnmodeledSideEffects), 0xab001103ULL, NULL, ImplicitList1, Barriers1, OperandInfo60 }, // Inst #290 = BTS64rr
+ { 291, 5, 0, 0, "CALL32m", 0|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Variadic), 0xff00001aULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo34 }, // Inst #291 = CALL32m
+ { 292, 1, 0, 0, "CALL32r", 0|(1<<TID::Call)|(1<<TID::Variadic), 0xff000012ULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo66 }, // Inst #292 = CALL32r
+ { 293, 5, 0, 0, "CALL64m", 0|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Variadic), 0xff00001aULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo34 }, // Inst #293 = CALL64m
+ { 294, 1, 0, 0, "CALL64pcrel32", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xe800c001ULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo2 }, // Inst #294 = CALL64pcrel32
+ { 295, 1, 0, 0, "CALL64r", 0|(1<<TID::Call)|(1<<TID::Variadic), 0xff000012ULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo67 }, // Inst #295 = CALL64r
+ { 296, 1, 0, 0, "CALLpcrel16", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xe8008041ULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo2 }, // Inst #296 = CALLpcrel16
+ { 297, 1, 0, 0, "CALLpcrel32", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xe800c001ULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo2 }, // Inst #297 = CALLpcrel32
+ { 298, 0, 0, 0, "CBW", 0, 0x98000041ULL, ImplicitList11, ImplicitList12, NULL, 0 }, // Inst #298 = CBW
+ { 299, 0, 0, 0, "CDQ", 0, 0x99000001ULL, ImplicitList13, ImplicitList14, Barriers5, 0 }, // Inst #299 = CDQ
+ { 300, 0, 0, 0, "CDQE", 0, 0x98001001ULL, ImplicitList13, ImplicitList15, NULL, 0 }, // Inst #300 = CDQE
+ { 301, 0, 0, 0, "CHS_F", 0|(1<<TID::UnmodeledSideEffects), 0xe0000401ULL, NULL, NULL, NULL, 0 }, // Inst #301 = CHS_F
+ { 302, 2, 1, 0, "CHS_Fp32", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #302 = CHS_Fp32
+ { 303, 2, 1, 0, "CHS_Fp64", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #303 = CHS_Fp64
+ { 304, 2, 1, 0, "CHS_Fp80", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #304 = CHS_Fp80
+ { 305, 0, 0, 0, "CLC", 0|(1<<TID::UnmodeledSideEffects), 0xf8000001ULL, NULL, NULL, NULL, 0 }, // Inst #305 = CLC
+ { 306, 0, 0, 0, "CLD", 0|(1<<TID::UnmodeledSideEffects), 0xfc000001ULL, NULL, NULL, NULL, 0 }, // Inst #306 = CLD
+ { 307, 5, 0, 0, "CLFLUSH", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xae00011fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #307 = CLFLUSH
+ { 308, 0, 0, 0, "CLI", 0|(1<<TID::UnmodeledSideEffects), 0xfa000001ULL, NULL, NULL, NULL, 0 }, // Inst #308 = CLI
+ { 309, 0, 0, 0, "CLTS", 0|(1<<TID::UnmodeledSideEffects), 0x6000101ULL, NULL, NULL, NULL, 0 }, // Inst #309 = CLTS
+ { 310, 0, 0, 0, "CMC", 0|(1<<TID::UnmodeledSideEffects), 0xf5000001ULL, NULL, NULL, NULL, 0 }, // Inst #310 = CMC
+ { 311, 7, 1, 0, "CMOVA16rm", 0|(1<<TID::MayLoad), 0x47000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #311 = CMOVA16rm
+ { 312, 3, 1, 0, "CMOVA16rr", 0|(1<<TID::Commutable), 0x47000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #312 = CMOVA16rr
+ { 313, 7, 1, 0, "CMOVA32rm", 0|(1<<TID::MayLoad), 0x47000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #313 = CMOVA32rm
+ { 314, 3, 1, 0, "CMOVA32rr", 0|(1<<TID::Commutable), 0x47000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #314 = CMOVA32rr
+ { 315, 7, 1, 0, "CMOVA64rm", 0|(1<<TID::MayLoad), 0x47001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #315 = CMOVA64rm
+ { 316, 3, 1, 0, "CMOVA64rr", 0|(1<<TID::Commutable), 0x47001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #316 = CMOVA64rr
+ { 317, 7, 1, 0, "CMOVAE16rm", 0|(1<<TID::MayLoad), 0x43000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #317 = CMOVAE16rm
+ { 318, 3, 1, 0, "CMOVAE16rr", 0|(1<<TID::Commutable), 0x43000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #318 = CMOVAE16rr
+ { 319, 7, 1, 0, "CMOVAE32rm", 0|(1<<TID::MayLoad), 0x43000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #319 = CMOVAE32rm
+ { 320, 3, 1, 0, "CMOVAE32rr", 0|(1<<TID::Commutable), 0x43000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #320 = CMOVAE32rr
+ { 321, 7, 1, 0, "CMOVAE64rm", 0|(1<<TID::MayLoad), 0x43001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #321 = CMOVAE64rm
+ { 322, 3, 1, 0, "CMOVAE64rr", 0|(1<<TID::Commutable), 0x43001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #322 = CMOVAE64rr
+ { 323, 7, 1, 0, "CMOVB16rm", 0|(1<<TID::MayLoad), 0x42000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #323 = CMOVB16rm
+ { 324, 3, 1, 0, "CMOVB16rr", 0|(1<<TID::Commutable), 0x42000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #324 = CMOVB16rr
+ { 325, 7, 1, 0, "CMOVB32rm", 0|(1<<TID::MayLoad), 0x42000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #325 = CMOVB32rm
+ { 326, 3, 1, 0, "CMOVB32rr", 0|(1<<TID::Commutable), 0x42000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #326 = CMOVB32rr
+ { 327, 7, 1, 0, "CMOVB64rm", 0|(1<<TID::MayLoad), 0x42001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #327 = CMOVB64rm
+ { 328, 3, 1, 0, "CMOVB64rr", 0|(1<<TID::Commutable), 0x42001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #328 = CMOVB64rr
+ { 329, 7, 1, 0, "CMOVBE16rm", 0|(1<<TID::MayLoad), 0x46000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #329 = CMOVBE16rm
+ { 330, 3, 1, 0, "CMOVBE16rr", 0|(1<<TID::Commutable), 0x46000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #330 = CMOVBE16rr
+ { 331, 7, 1, 0, "CMOVBE32rm", 0|(1<<TID::MayLoad), 0x46000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #331 = CMOVBE32rm
+ { 332, 3, 1, 0, "CMOVBE32rr", 0|(1<<TID::Commutable), 0x46000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #332 = CMOVBE32rr
+ { 333, 7, 1, 0, "CMOVBE64rm", 0|(1<<TID::MayLoad), 0x46001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #333 = CMOVBE64rm
+ { 334, 3, 1, 0, "CMOVBE64rr", 0|(1<<TID::Commutable), 0x46001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #334 = CMOVBE64rr
+ { 335, 1, 1, 0, "CMOVBE_F", 0|(1<<TID::UnmodeledSideEffects), 0xd0000502ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #335 = CMOVBE_F
+ { 336, 3, 1, 0, "CMOVBE_Fp32", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo68 }, // Inst #336 = CMOVBE_Fp32
+ { 337, 3, 1, 0, "CMOVBE_Fp64", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo69 }, // Inst #337 = CMOVBE_Fp64
+ { 338, 3, 1, 0, "CMOVBE_Fp80", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo70 }, // Inst #338 = CMOVBE_Fp80
+ { 339, 1, 1, 0, "CMOVB_F", 0|(1<<TID::UnmodeledSideEffects), 0xc0000502ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #339 = CMOVB_F
+ { 340, 3, 1, 0, "CMOVB_Fp32", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo68 }, // Inst #340 = CMOVB_Fp32
+ { 341, 3, 1, 0, "CMOVB_Fp64", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo69 }, // Inst #341 = CMOVB_Fp64
+ { 342, 3, 1, 0, "CMOVB_Fp80", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo70 }, // Inst #342 = CMOVB_Fp80
+ { 343, 7, 1, 0, "CMOVE16rm", 0|(1<<TID::MayLoad), 0x44000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #343 = CMOVE16rm
+ { 344, 3, 1, 0, "CMOVE16rr", 0|(1<<TID::Commutable), 0x44000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #344 = CMOVE16rr
+ { 345, 7, 1, 0, "CMOVE32rm", 0|(1<<TID::MayLoad), 0x44000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #345 = CMOVE32rm
+ { 346, 3, 1, 0, "CMOVE32rr", 0|(1<<TID::Commutable), 0x44000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #346 = CMOVE32rr
+ { 347, 7, 1, 0, "CMOVE64rm", 0|(1<<TID::MayLoad), 0x44001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #347 = CMOVE64rm
+ { 348, 3, 1, 0, "CMOVE64rr", 0|(1<<TID::Commutable), 0x44001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #348 = CMOVE64rr
+ { 349, 1, 1, 0, "CMOVE_F", 0|(1<<TID::UnmodeledSideEffects), 0xc8000502ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #349 = CMOVE_F
+ { 350, 3, 1, 0, "CMOVE_Fp32", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo68 }, // Inst #350 = CMOVE_Fp32
+ { 351, 3, 1, 0, "CMOVE_Fp64", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo69 }, // Inst #351 = CMOVE_Fp64
+ { 352, 3, 1, 0, "CMOVE_Fp80", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo70 }, // Inst #352 = CMOVE_Fp80
+ { 353, 7, 1, 0, "CMOVG16rm", 0|(1<<TID::MayLoad), 0x4f000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #353 = CMOVG16rm
+ { 354, 3, 1, 0, "CMOVG16rr", 0|(1<<TID::Commutable), 0x4f000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #354 = CMOVG16rr
+ { 355, 7, 1, 0, "CMOVG32rm", 0|(1<<TID::MayLoad), 0x4f000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #355 = CMOVG32rm
+ { 356, 3, 1, 0, "CMOVG32rr", 0|(1<<TID::Commutable), 0x4f000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #356 = CMOVG32rr
+ { 357, 7, 1, 0, "CMOVG64rm", 0|(1<<TID::MayLoad), 0x4f001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #357 = CMOVG64rm
+ { 358, 3, 1, 0, "CMOVG64rr", 0|(1<<TID::Commutable), 0x4f001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #358 = CMOVG64rr
+ { 359, 7, 1, 0, "CMOVGE16rm", 0|(1<<TID::MayLoad), 0x4d000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #359 = CMOVGE16rm
+ { 360, 3, 1, 0, "CMOVGE16rr", 0|(1<<TID::Commutable), 0x4d000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #360 = CMOVGE16rr
+ { 361, 7, 1, 0, "CMOVGE32rm", 0|(1<<TID::MayLoad), 0x4d000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #361 = CMOVGE32rm
+ { 362, 3, 1, 0, "CMOVGE32rr", 0|(1<<TID::Commutable), 0x4d000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #362 = CMOVGE32rr
+ { 363, 7, 1, 0, "CMOVGE64rm", 0|(1<<TID::MayLoad), 0x4d001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #363 = CMOVGE64rm
+ { 364, 3, 1, 0, "CMOVGE64rr", 0|(1<<TID::Commutable), 0x4d001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #364 = CMOVGE64rr
+ { 365, 7, 1, 0, "CMOVL16rm", 0|(1<<TID::MayLoad), 0x4c000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #365 = CMOVL16rm
+ { 366, 3, 1, 0, "CMOVL16rr", 0|(1<<TID::Commutable), 0x4c000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #366 = CMOVL16rr
+ { 367, 7, 1, 0, "CMOVL32rm", 0|(1<<TID::MayLoad), 0x4c000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #367 = CMOVL32rm
+ { 368, 3, 1, 0, "CMOVL32rr", 0|(1<<TID::Commutable), 0x4c000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #368 = CMOVL32rr
+ { 369, 7, 1, 0, "CMOVL64rm", 0|(1<<TID::MayLoad), 0x4c001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #369 = CMOVL64rm
+ { 370, 3, 1, 0, "CMOVL64rr", 0|(1<<TID::Commutable), 0x4c001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #370 = CMOVL64rr
+ { 371, 7, 1, 0, "CMOVLE16rm", 0|(1<<TID::MayLoad), 0x4e000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #371 = CMOVLE16rm
+ { 372, 3, 1, 0, "CMOVLE16rr", 0|(1<<TID::Commutable), 0x4e000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #372 = CMOVLE16rr
+ { 373, 7, 1, 0, "CMOVLE32rm", 0|(1<<TID::MayLoad), 0x4e000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #373 = CMOVLE32rm
+ { 374, 3, 1, 0, "CMOVLE32rr", 0|(1<<TID::Commutable), 0x4e000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #374 = CMOVLE32rr
+ { 375, 7, 1, 0, "CMOVLE64rm", 0|(1<<TID::MayLoad), 0x4e001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #375 = CMOVLE64rm
+ { 376, 3, 1, 0, "CMOVLE64rr", 0|(1<<TID::Commutable), 0x4e001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #376 = CMOVLE64rr
+ { 377, 1, 1, 0, "CMOVNBE_F", 0|(1<<TID::UnmodeledSideEffects), 0xd0000602ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #377 = CMOVNBE_F
+ { 378, 3, 1, 0, "CMOVNBE_Fp32", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo68 }, // Inst #378 = CMOVNBE_Fp32
+ { 379, 3, 1, 0, "CMOVNBE_Fp64", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo69 }, // Inst #379 = CMOVNBE_Fp64
+ { 380, 3, 1, 0, "CMOVNBE_Fp80", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo70 }, // Inst #380 = CMOVNBE_Fp80
+ { 381, 1, 1, 0, "CMOVNB_F", 0|(1<<TID::UnmodeledSideEffects), 0xc0000602ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #381 = CMOVNB_F
+ { 382, 3, 1, 0, "CMOVNB_Fp32", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo68 }, // Inst #382 = CMOVNB_Fp32
+ { 383, 3, 1, 0, "CMOVNB_Fp64", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo69 }, // Inst #383 = CMOVNB_Fp64
+ { 384, 3, 1, 0, "CMOVNB_Fp80", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo70 }, // Inst #384 = CMOVNB_Fp80
+ { 385, 7, 1, 0, "CMOVNE16rm", 0|(1<<TID::MayLoad), 0x45000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #385 = CMOVNE16rm
+ { 386, 3, 1, 0, "CMOVNE16rr", 0|(1<<TID::Commutable), 0x45000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #386 = CMOVNE16rr
+ { 387, 7, 1, 0, "CMOVNE32rm", 0|(1<<TID::MayLoad), 0x45000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #387 = CMOVNE32rm
+ { 388, 3, 1, 0, "CMOVNE32rr", 0|(1<<TID::Commutable), 0x45000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #388 = CMOVNE32rr
+ { 389, 7, 1, 0, "CMOVNE64rm", 0|(1<<TID::MayLoad), 0x45001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #389 = CMOVNE64rm
+ { 390, 3, 1, 0, "CMOVNE64rr", 0|(1<<TID::Commutable), 0x45001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #390 = CMOVNE64rr
+ { 391, 1, 1, 0, "CMOVNE_F", 0|(1<<TID::UnmodeledSideEffects), 0xc8000602ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #391 = CMOVNE_F
+ { 392, 3, 1, 0, "CMOVNE_Fp32", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo68 }, // Inst #392 = CMOVNE_Fp32
+ { 393, 3, 1, 0, "CMOVNE_Fp64", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo69 }, // Inst #393 = CMOVNE_Fp64
+ { 394, 3, 1, 0, "CMOVNE_Fp80", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo70 }, // Inst #394 = CMOVNE_Fp80
+ { 395, 7, 1, 0, "CMOVNO16rm", 0|(1<<TID::MayLoad), 0x41000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #395 = CMOVNO16rm
+ { 396, 3, 1, 0, "CMOVNO16rr", 0|(1<<TID::Commutable), 0x41000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #396 = CMOVNO16rr
+ { 397, 7, 1, 0, "CMOVNO32rm", 0|(1<<TID::MayLoad), 0x41000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #397 = CMOVNO32rm
+ { 398, 3, 1, 0, "CMOVNO32rr", 0|(1<<TID::Commutable), 0x41000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #398 = CMOVNO32rr
+ { 399, 7, 1, 0, "CMOVNO64rm", 0|(1<<TID::MayLoad), 0x41001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #399 = CMOVNO64rm
+ { 400, 3, 1, 0, "CMOVNO64rr", 0|(1<<TID::Commutable), 0x41001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #400 = CMOVNO64rr
+ { 401, 7, 1, 0, "CMOVNP16rm", 0|(1<<TID::MayLoad), 0x4b000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #401 = CMOVNP16rm
+ { 402, 3, 1, 0, "CMOVNP16rr", 0|(1<<TID::Commutable), 0x4b000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #402 = CMOVNP16rr
+ { 403, 7, 1, 0, "CMOVNP32rm", 0|(1<<TID::MayLoad), 0x4b000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #403 = CMOVNP32rm
+ { 404, 3, 1, 0, "CMOVNP32rr", 0|(1<<TID::Commutable), 0x4b000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #404 = CMOVNP32rr
+ { 405, 7, 1, 0, "CMOVNP64rm", 0|(1<<TID::MayLoad), 0x4b001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #405 = CMOVNP64rm
+ { 406, 3, 1, 0, "CMOVNP64rr", 0|(1<<TID::Commutable), 0x4b001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #406 = CMOVNP64rr
+ { 407, 1, 1, 0, "CMOVNP_F", 0|(1<<TID::UnmodeledSideEffects), 0xd8000602ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #407 = CMOVNP_F
+ { 408, 3, 1, 0, "CMOVNP_Fp32", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo68 }, // Inst #408 = CMOVNP_Fp32
+ { 409, 3, 1, 0, "CMOVNP_Fp64", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo69 }, // Inst #409 = CMOVNP_Fp64
+ { 410, 3, 1, 0, "CMOVNP_Fp80", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo70 }, // Inst #410 = CMOVNP_Fp80
+ { 411, 7, 1, 0, "CMOVNS16rm", 0|(1<<TID::MayLoad), 0x49000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #411 = CMOVNS16rm
+ { 412, 3, 1, 0, "CMOVNS16rr", 0|(1<<TID::Commutable), 0x49000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #412 = CMOVNS16rr
+ { 413, 7, 1, 0, "CMOVNS32rm", 0|(1<<TID::MayLoad), 0x49000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #413 = CMOVNS32rm
+ { 414, 3, 1, 0, "CMOVNS32rr", 0|(1<<TID::Commutable), 0x49000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #414 = CMOVNS32rr
+ { 415, 7, 1, 0, "CMOVNS64rm", 0|(1<<TID::MayLoad), 0x49001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #415 = CMOVNS64rm
+ { 416, 3, 1, 0, "CMOVNS64rr", 0|(1<<TID::Commutable), 0x49001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #416 = CMOVNS64rr
+ { 417, 7, 1, 0, "CMOVO16rm", 0|(1<<TID::MayLoad), 0x40000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #417 = CMOVO16rm
+ { 418, 3, 1, 0, "CMOVO16rr", 0|(1<<TID::Commutable), 0x40000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #418 = CMOVO16rr
+ { 419, 7, 1, 0, "CMOVO32rm", 0|(1<<TID::MayLoad), 0x40000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #419 = CMOVO32rm
+ { 420, 3, 1, 0, "CMOVO32rr", 0|(1<<TID::Commutable), 0x40000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #420 = CMOVO32rr
+ { 421, 7, 1, 0, "CMOVO64rm", 0|(1<<TID::MayLoad), 0x40001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #421 = CMOVO64rm
+ { 422, 3, 1, 0, "CMOVO64rr", 0|(1<<TID::Commutable), 0x40001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #422 = CMOVO64rr
+ { 423, 7, 1, 0, "CMOVP16rm", 0|(1<<TID::MayLoad), 0x4a000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #423 = CMOVP16rm
+ { 424, 3, 1, 0, "CMOVP16rr", 0|(1<<TID::Commutable), 0x4a000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #424 = CMOVP16rr
+ { 425, 7, 1, 0, "CMOVP32rm", 0|(1<<TID::MayLoad), 0x4a000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #425 = CMOVP32rm
+ { 426, 3, 1, 0, "CMOVP32rr", 0|(1<<TID::Commutable), 0x4a000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #426 = CMOVP32rr
+ { 427, 7, 1, 0, "CMOVP64rm", 0|(1<<TID::MayLoad), 0x4a001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #427 = CMOVP64rm
+ { 428, 3, 1, 0, "CMOVP64rr", 0|(1<<TID::Commutable), 0x4a001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #428 = CMOVP64rr
+ { 429, 1, 1, 0, "CMOVP_F", 0|(1<<TID::UnmodeledSideEffects), 0xd8000502ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #429 = CMOVP_F
+ { 430, 3, 1, 0, "CMOVP_Fp32", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo68 }, // Inst #430 = CMOVP_Fp32
+ { 431, 3, 1, 0, "CMOVP_Fp64", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo69 }, // Inst #431 = CMOVP_Fp64
+ { 432, 3, 1, 0, "CMOVP_Fp80", 0, 0x60000ULL, ImplicitList1, NULL, NULL, OperandInfo70 }, // Inst #432 = CMOVP_Fp80
+ { 433, 7, 1, 0, "CMOVS16rm", 0|(1<<TID::MayLoad), 0x48000146ULL, ImplicitList1, NULL, NULL, OperandInfo13 }, // Inst #433 = CMOVS16rm
+ { 434, 3, 1, 0, "CMOVS16rr", 0|(1<<TID::Commutable), 0x48000145ULL, ImplicitList1, NULL, NULL, OperandInfo14 }, // Inst #434 = CMOVS16rr
+ { 435, 7, 1, 0, "CMOVS32rm", 0|(1<<TID::MayLoad), 0x48000106ULL, ImplicitList1, NULL, NULL, OperandInfo17 }, // Inst #435 = CMOVS32rm
+ { 436, 3, 1, 0, "CMOVS32rr", 0|(1<<TID::Commutable), 0x48000105ULL, ImplicitList1, NULL, NULL, OperandInfo18 }, // Inst #436 = CMOVS32rr
+ { 437, 7, 1, 0, "CMOVS64rm", 0|(1<<TID::MayLoad), 0x48001106ULL, ImplicitList1, NULL, NULL, OperandInfo21 }, // Inst #437 = CMOVS64rm
+ { 438, 3, 1, 0, "CMOVS64rr", 0|(1<<TID::Commutable), 0x48001105ULL, ImplicitList1, NULL, NULL, OperandInfo22 }, // Inst #438 = CMOVS64rr
+ { 439, 4, 1, 0, "CMOV_FR32", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, NULL, NULL, OperandInfo71 }, // Inst #439 = CMOV_FR32
+ { 440, 4, 1, 0, "CMOV_FR64", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, NULL, NULL, OperandInfo72 }, // Inst #440 = CMOV_FR64
+ { 441, 4, 1, 0, "CMOV_GR16", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo73 }, // Inst #441 = CMOV_GR16
+ { 442, 4, 1, 0, "CMOV_GR32", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo74 }, // Inst #442 = CMOV_GR32
+ { 443, 4, 1, 0, "CMOV_GR8", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo75 }, // Inst #443 = CMOV_GR8
+ { 444, 4, 1, 0, "CMOV_RFP32", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo76 }, // Inst #444 = CMOV_RFP32
+ { 445, 4, 1, 0, "CMOV_RFP64", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo77 }, // Inst #445 = CMOV_RFP64
+ { 446, 4, 1, 0, "CMOV_RFP80", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo78 }, // Inst #446 = CMOV_RFP80
+ { 447, 4, 1, 0, "CMOV_V1I64", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, NULL, NULL, OperandInfo79 }, // Inst #447 = CMOV_V1I64
+ { 448, 4, 1, 0, "CMOV_V2F64", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, NULL, NULL, OperandInfo80 }, // Inst #448 = CMOV_V2F64
+ { 449, 4, 1, 0, "CMOV_V2I64", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, NULL, NULL, OperandInfo80 }, // Inst #449 = CMOV_V2I64
+ { 450, 4, 1, 0, "CMOV_V4F32", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList1, NULL, NULL, OperandInfo80 }, // Inst #450 = CMOV_V4F32
+ { 451, 1, 0, 0, "CMP16i16", 0|(1<<TID::UnmodeledSideEffects), 0x3d006041ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #451 = CMP16i16
+ { 452, 6, 0, 0, "CMP16mi", 0|(1<<TID::MayLoad), 0x8100605fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #452 = CMP16mi
+ { 453, 6, 0, 0, "CMP16mi8", 0|(1<<TID::MayLoad), 0x8300205fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #453 = CMP16mi8
+ { 454, 6, 0, 0, "CMP16mr", 0|(1<<TID::MayLoad), 0x39000044ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #454 = CMP16mr
+ { 455, 2, 0, 0, "CMP16ri", 0, 0x81006057ULL, NULL, ImplicitList1, Barriers1, OperandInfo63 }, // Inst #455 = CMP16ri
+ { 456, 2, 0, 0, "CMP16ri8", 0, 0x83002057ULL, NULL, ImplicitList1, Barriers1, OperandInfo63 }, // Inst #456 = CMP16ri8
+ { 457, 6, 0, 0, "CMP16rm", 0|(1<<TID::MayLoad), 0x3b000046ULL, NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #457 = CMP16rm
+ { 458, 2, 0, 0, "CMP16rr", 0, 0x39000043ULL, NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #458 = CMP16rr
+ { 459, 2, 0, 0, "CMP16rr_alt", 0|(1<<TID::UnmodeledSideEffects), 0x3b000045ULL, NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #459 = CMP16rr_alt
+ { 460, 1, 0, 0, "CMP32i32", 0|(1<<TID::UnmodeledSideEffects), 0x3d00a001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #460 = CMP32i32
+ { 461, 6, 0, 0, "CMP32mi", 0|(1<<TID::MayLoad), 0x8100a01fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #461 = CMP32mi
+ { 462, 6, 0, 0, "CMP32mi8", 0|(1<<TID::MayLoad), 0x8300201fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #462 = CMP32mi8
+ { 463, 6, 0, 0, "CMP32mr", 0|(1<<TID::MayLoad), 0x39000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #463 = CMP32mr
+ { 464, 2, 0, 0, "CMP32ri", 0, 0x8100a017ULL, NULL, ImplicitList1, Barriers1, OperandInfo64 }, // Inst #464 = CMP32ri
+ { 465, 2, 0, 0, "CMP32ri8", 0, 0x83002017ULL, NULL, ImplicitList1, Barriers1, OperandInfo64 }, // Inst #465 = CMP32ri8
+ { 466, 6, 0, 0, "CMP32rm", 0|(1<<TID::MayLoad), 0x3b000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo57 }, // Inst #466 = CMP32rm
+ { 467, 2, 0, 0, "CMP32rr", 0, 0x39000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #467 = CMP32rr
+ { 468, 2, 0, 0, "CMP32rr_alt", 0|(1<<TID::UnmodeledSideEffects), 0x3b000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #468 = CMP32rr_alt
+ { 469, 1, 0, 0, "CMP64i32", 0|(1<<TID::UnmodeledSideEffects), 0x3d00b001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #469 = CMP64i32
+ { 470, 6, 0, 0, "CMP64mi32", 0|(1<<TID::MayLoad), 0x8100b01fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #470 = CMP64mi32
+ { 471, 6, 0, 0, "CMP64mi8", 0|(1<<TID::MayLoad), 0x8300301fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #471 = CMP64mi8
+ { 472, 6, 0, 0, "CMP64mr", 0|(1<<TID::MayLoad), 0x39001004ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #472 = CMP64mr
+ { 473, 2, 0, 0, "CMP64mrmrr", 0|(1<<TID::UnmodeledSideEffects), 0x3b001005ULL, NULL, ImplicitList1, Barriers1, OperandInfo60 }, // Inst #473 = CMP64mrmrr
+ { 474, 2, 0, 0, "CMP64ri32", 0, 0x8100b017ULL, NULL, ImplicitList1, Barriers1, OperandInfo65 }, // Inst #474 = CMP64ri32
+ { 475, 2, 0, 0, "CMP64ri8", 0, 0x83003017ULL, NULL, ImplicitList1, Barriers1, OperandInfo65 }, // Inst #475 = CMP64ri8
+ { 476, 6, 0, 0, "CMP64rm", 0|(1<<TID::MayLoad), 0x3b001006ULL, NULL, ImplicitList1, Barriers1, OperandInfo59 }, // Inst #476 = CMP64rm
+ { 477, 2, 0, 0, "CMP64rr", 0, 0x39001003ULL, NULL, ImplicitList1, Barriers1, OperandInfo60 }, // Inst #477 = CMP64rr
+ { 478, 1, 0, 0, "CMP8i8", 0|(1<<TID::UnmodeledSideEffects), 0x3c002001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #478 = CMP8i8
+ { 479, 6, 0, 0, "CMP8mi", 0|(1<<TID::MayLoad), 0x8000201fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #479 = CMP8mi
+ { 480, 6, 0, 0, "CMP8mr", 0|(1<<TID::MayLoad), 0x38000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #480 = CMP8mr
+ { 481, 2, 0, 0, "CMP8ri", 0, 0x80002017ULL, NULL, ImplicitList1, Barriers1, OperandInfo81 }, // Inst #481 = CMP8ri
+ { 482, 6, 0, 0, "CMP8rm", 0|(1<<TID::MayLoad), 0x3a000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo82 }, // Inst #482 = CMP8rm
+ { 483, 2, 0, 0, "CMP8rr", 0, 0x38000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo83 }, // Inst #483 = CMP8rr
+ { 484, 2, 0, 0, "CMP8rr_alt", 0|(1<<TID::UnmodeledSideEffects), 0x3a000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo83 }, // Inst #484 = CMP8rr_alt
+ { 485, 8, 1, 0, "CMPPDrmi", 0|(1<<TID::MayLoad), 0xc2802146ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #485 = CMPPDrmi
+ { 486, 8, 1, 0, "CMPPDrmi_alt", 0|(1<<TID::UnmodeledSideEffects), 0xc2802146ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #486 = CMPPDrmi_alt
+ { 487, 4, 1, 0, "CMPPDrri", 0, 0xc2802145ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #487 = CMPPDrri
+ { 488, 4, 1, 0, "CMPPDrri_alt", 0|(1<<TID::UnmodeledSideEffects), 0xc2802145ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #488 = CMPPDrri_alt
+ { 489, 8, 1, 0, "CMPPSrmi", 0|(1<<TID::MayLoad), 0xc2402106ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #489 = CMPPSrmi
+ { 490, 8, 1, 0, "CMPPSrmi_alt", 0|(1<<TID::UnmodeledSideEffects), 0xc2402106ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #490 = CMPPSrmi_alt
+ { 491, 4, 1, 0, "CMPPSrri", 0, 0xc2402105ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #491 = CMPPSrri
+ { 492, 4, 1, 0, "CMPPSrri_alt", 0|(1<<TID::UnmodeledSideEffects), 0xc2402105ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #492 = CMPPSrri_alt
+ { 493, 0, 0, 0, "CMPS16", 0|(1<<TID::UnmodeledSideEffects), 0xa7000041ULL, NULL, NULL, NULL, 0 }, // Inst #493 = CMPS16
+ { 494, 0, 0, 0, "CMPS32", 0|(1<<TID::UnmodeledSideEffects), 0xa7000001ULL, NULL, NULL, NULL, 0 }, // Inst #494 = CMPS32
+ { 495, 0, 0, 0, "CMPS64", 0|(1<<TID::UnmodeledSideEffects), 0xa7001001ULL, NULL, NULL, NULL, 0 }, // Inst #495 = CMPS64
+ { 496, 0, 0, 0, "CMPS8", 0|(1<<TID::UnmodeledSideEffects), 0xa6000001ULL, NULL, NULL, NULL, 0 }, // Inst #496 = CMPS8
+ { 497, 8, 1, 0, "CMPSDrm", 0|(1<<TID::MayLoad), 0xc2002b06ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #497 = CMPSDrm
+ { 498, 8, 1, 0, "CMPSDrm_alt", 0|(1<<TID::MayLoad), 0xc2002b06ULL, NULL, NULL, NULL, OperandInfo84 }, // Inst #498 = CMPSDrm_alt
+ { 499, 4, 1, 0, "CMPSDrr", 0, 0xc2002b05ULL, NULL, NULL, NULL, OperandInfo85 }, // Inst #499 = CMPSDrr
+ { 500, 4, 1, 0, "CMPSDrr_alt", 0, 0xc2002b05ULL, NULL, NULL, NULL, OperandInfo85 }, // Inst #500 = CMPSDrr_alt
+ { 501, 8, 1, 0, "CMPSSrm", 0|(1<<TID::MayLoad), 0xc2002c06ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #501 = CMPSSrm
+ { 502, 8, 1, 0, "CMPSSrm_alt", 0|(1<<TID::MayLoad), 0xc2002c06ULL, NULL, NULL, NULL, OperandInfo86 }, // Inst #502 = CMPSSrm_alt
+ { 503, 4, 1, 0, "CMPSSrr", 0, 0xc2002c05ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #503 = CMPSSrr
+ { 504, 4, 1, 0, "CMPSSrr_alt", 0, 0xc2002c05ULL, NULL, NULL, NULL, OperandInfo87 }, // Inst #504 = CMPSSrr_alt
+ { 505, 5, 0, 0, "CMPXCHG16B", 0|(1<<TID::UnmodeledSideEffects), 0xc7001119ULL, ImplicitList16, ImplicitList17, Barriers1, OperandInfo34 }, // Inst #505 = CMPXCHG16B
+ { 506, 6, 0, 0, "CMPXCHG16rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xb1000144ULL, NULL, NULL, NULL, OperandInfo11 }, // Inst #506 = CMPXCHG16rm
+ { 507, 2, 1, 0, "CMPXCHG16rr", 0|(1<<TID::UnmodeledSideEffects), 0xb1000143ULL, NULL, NULL, NULL, OperandInfo56 }, // Inst #507 = CMPXCHG16rr
+ { 508, 6, 0, 0, "CMPXCHG32rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xb1000104ULL, NULL, NULL, NULL, OperandInfo15 }, // Inst #508 = CMPXCHG32rm
+ { 509, 2, 1, 0, "CMPXCHG32rr", 0|(1<<TID::UnmodeledSideEffects), 0xb1000103ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #509 = CMPXCHG32rr
+ { 510, 6, 0, 0, "CMPXCHG64rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xb1001104ULL, NULL, NULL, NULL, OperandInfo19 }, // Inst #510 = CMPXCHG64rm
+ { 511, 2, 1, 0, "CMPXCHG64rr", 0|(1<<TID::UnmodeledSideEffects), 0xb1001103ULL, NULL, NULL, NULL, OperandInfo60 }, // Inst #511 = CMPXCHG64rr
+ { 512, 5, 0, 0, "CMPXCHG8B", 0|(1<<TID::UnmodeledSideEffects), 0xc7000119ULL, ImplicitList6, ImplicitList18, Barriers6, OperandInfo34 }, // Inst #512 = CMPXCHG8B
+ { 513, 6, 0, 0, "CMPXCHG8rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xb0000104ULL, NULL, NULL, NULL, OperandInfo24 }, // Inst #513 = CMPXCHG8rm
+ { 514, 2, 1, 0, "CMPXCHG8rr", 0|(1<<TID::UnmodeledSideEffects), 0xb0000103ULL, NULL, NULL, NULL, OperandInfo83 }, // Inst #514 = CMPXCHG8rr
+ { 515, 6, 0, 0, "COMISDrm", 0|(1<<TID::UnmodeledSideEffects), 0x2f800146ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #515 = COMISDrm
+ { 516, 2, 0, 0, "COMISDrr", 0|(1<<TID::UnmodeledSideEffects), 0x2f800145ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #516 = COMISDrr
+ { 517, 6, 0, 0, "COMISSrm", 0|(1<<TID::UnmodeledSideEffects), 0x2f400106ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #517 = COMISSrm
+ { 518, 2, 0, 0, "COMISSrr", 0|(1<<TID::UnmodeledSideEffects), 0x2f400105ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #518 = COMISSrr
+ { 519, 1, 0, 0, "COMP_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0xd8000302ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #519 = COMP_FST0r
+ { 520, 1, 0, 0, "COM_FIPr", 0|(1<<TID::UnmodeledSideEffects), 0xf0000a02ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #520 = COM_FIPr
+ { 521, 1, 0, 0, "COM_FIr", 0|(1<<TID::UnmodeledSideEffects), 0xf0000602ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #521 = COM_FIr
+ { 522, 1, 0, 0, "COM_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0xd0000302ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #522 = COM_FST0r
+ { 523, 0, 0, 0, "COS_F", 0|(1<<TID::UnmodeledSideEffects), 0xff000401ULL, NULL, NULL, NULL, 0 }, // Inst #523 = COS_F
+ { 524, 2, 1, 0, "COS_Fp32", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #524 = COS_Fp32
+ { 525, 2, 1, 0, "COS_Fp64", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #525 = COS_Fp64
+ { 526, 2, 1, 0, "COS_Fp80", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #526 = COS_Fp80
+ { 527, 0, 0, 0, "CPUID", 0|(1<<TID::UnmodeledSideEffects), 0xa2000101ULL, NULL, NULL, NULL, 0 }, // Inst #527 = CPUID
+ { 528, 0, 0, 0, "CQO", 0, 0x99001001ULL, ImplicitList15, ImplicitList19, NULL, 0 }, // Inst #528 = CQO
+ { 529, 7, 1, 0, "CRC32m16", 0|(1<<TID::MayLoad), 0xf1000f46ULL, NULL, NULL, NULL, OperandInfo17 }, // Inst #529 = CRC32m16
+ { 530, 7, 1, 0, "CRC32m32", 0|(1<<TID::MayLoad), 0xf1000f06ULL, NULL, NULL, NULL, OperandInfo17 }, // Inst #530 = CRC32m32
+ { 531, 7, 1, 0, "CRC32m8", 0|(1<<TID::MayLoad), 0xf0000f06ULL, NULL, NULL, NULL, OperandInfo17 }, // Inst #531 = CRC32m8
+ { 532, 3, 1, 0, "CRC32r16", 0, 0xf1000f45ULL, NULL, NULL, NULL, OperandInfo88 }, // Inst #532 = CRC32r16
+ { 533, 3, 1, 0, "CRC32r32", 0, 0xf1000f05ULL, NULL, NULL, NULL, OperandInfo18 }, // Inst #533 = CRC32r32
+ { 534, 3, 1, 0, "CRC32r8", 0, 0xf0000f05ULL, NULL, NULL, NULL, OperandInfo89 }, // Inst #534 = CRC32r8
+ { 535, 7, 1, 0, "CRC64m64", 0|(1<<TID::MayLoad), 0xf1001f06ULL, NULL, NULL, NULL, OperandInfo21 }, // Inst #535 = CRC64m64
+ { 536, 7, 1, 0, "CRC64m8", 0|(1<<TID::MayLoad), 0xf0001f06ULL, NULL, NULL, NULL, OperandInfo21 }, // Inst #536 = CRC64m8
+ { 537, 3, 1, 0, "CRC64r64", 0, 0xf1001f05ULL, NULL, NULL, NULL, OperandInfo22 }, // Inst #537 = CRC64r64
+ { 538, 3, 1, 0, "CRC64r8", 0, 0xf0001f05ULL, NULL, NULL, NULL, OperandInfo90 }, // Inst #538 = CRC64r8
+ { 539, 0, 0, 0, "CS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0x2e000001ULL, NULL, NULL, NULL, 0 }, // Inst #539 = CS_PREFIX
+ { 540, 6, 1, 0, "CVTDQ2PDrm", 0|(1<<TID::UnmodeledSideEffects), 0xe6400c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #540 = CVTDQ2PDrm
+ { 541, 2, 1, 0, "CVTDQ2PDrr", 0|(1<<TID::UnmodeledSideEffects), 0xe6400c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #541 = CVTDQ2PDrr
+ { 542, 6, 1, 0, "CVTDQ2PSrm", 0|(1<<TID::UnmodeledSideEffects), 0x5b400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #542 = CVTDQ2PSrm
+ { 543, 2, 1, 0, "CVTDQ2PSrr", 0|(1<<TID::UnmodeledSideEffects), 0x5b400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #543 = CVTDQ2PSrr
+ { 544, 6, 1, 0, "CVTPD2DQrm", 0|(1<<TID::UnmodeledSideEffects), 0xe6800b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #544 = CVTPD2DQrm
+ { 545, 2, 1, 0, "CVTPD2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0xe6800b05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #545 = CVTPD2DQrr
+ { 546, 6, 1, 0, "CVTPD2PSrm", 0|(1<<TID::UnmodeledSideEffects), 0x5a800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #546 = CVTPD2PSrm
+ { 547, 2, 1, 0, "CVTPD2PSrr", 0|(1<<TID::UnmodeledSideEffects), 0x5a800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #547 = CVTPD2PSrr
+ { 548, 6, 1, 0, "CVTPS2DQrm", 0|(1<<TID::UnmodeledSideEffects), 0x5b800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #548 = CVTPS2DQrm
+ { 549, 2, 1, 0, "CVTPS2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0x5b800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #549 = CVTPS2DQrr
+ { 550, 6, 1, 0, "CVTPS2PDrm", 0|(1<<TID::UnmodeledSideEffects), 0x5a000106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #550 = CVTPS2PDrm
+ { 551, 2, 1, 0, "CVTPS2PDrr", 0|(1<<TID::UnmodeledSideEffects), 0x5a000105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #551 = CVTPS2PDrr
+ { 552, 6, 1, 0, "CVTSD2SI64rm", 0|(1<<TID::UnmodeledSideEffects), 0x2d001b06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #552 = CVTSD2SI64rm
+ { 553, 2, 1, 0, "CVTSD2SI64rr", 0|(1<<TID::UnmodeledSideEffects), 0x2d001b05ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #553 = CVTSD2SI64rr
+ { 554, 6, 1, 0, "CVTSD2SSrm", 0|(1<<TID::MayLoad), 0x5a000b06ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #554 = CVTSD2SSrm
+ { 555, 2, 1, 0, "CVTSD2SSrr", 0, 0x5a000b05ULL, NULL, NULL, NULL, OperandInfo93 }, // Inst #555 = CVTSD2SSrr
+ { 556, 6, 1, 0, "CVTSI2SD64rm", 0|(1<<TID::MayLoad), 0x2a001b06ULL, NULL, NULL, NULL, OperandInfo94 }, // Inst #556 = CVTSI2SD64rm
+ { 557, 2, 1, 0, "CVTSI2SD64rr", 0, 0x2a001b05ULL, NULL, NULL, NULL, OperandInfo95 }, // Inst #557 = CVTSI2SD64rr
+ { 558, 6, 1, 0, "CVTSI2SDrm", 0|(1<<TID::MayLoad), 0x2a000b06ULL, NULL, NULL, NULL, OperandInfo94 }, // Inst #558 = CVTSI2SDrm
+ { 559, 2, 1, 0, "CVTSI2SDrr", 0, 0x2a000b05ULL, NULL, NULL, NULL, OperandInfo96 }, // Inst #559 = CVTSI2SDrr
+ { 560, 6, 1, 0, "CVTSI2SS64rm", 0|(1<<TID::MayLoad), 0x2a001c06ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #560 = CVTSI2SS64rm
+ { 561, 2, 1, 0, "CVTSI2SS64rr", 0, 0x2a001c05ULL, NULL, NULL, NULL, OperandInfo97 }, // Inst #561 = CVTSI2SS64rr
+ { 562, 6, 1, 0, "CVTSI2SSrm", 0|(1<<TID::MayLoad), 0x2a000c06ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #562 = CVTSI2SSrm
+ { 563, 2, 1, 0, "CVTSI2SSrr", 0, 0x2a000c05ULL, NULL, NULL, NULL, OperandInfo98 }, // Inst #563 = CVTSI2SSrr
+ { 564, 6, 1, 0, "CVTSS2SDrm", 0|(1<<TID::MayLoad), 0x5a000c06ULL, NULL, NULL, NULL, OperandInfo94 }, // Inst #564 = CVTSS2SDrm
+ { 565, 2, 1, 0, "CVTSS2SDrr", 0, 0x5a000c05ULL, NULL, NULL, NULL, OperandInfo99 }, // Inst #565 = CVTSS2SDrr
+ { 566, 6, 1, 0, "CVTSS2SI64rm", 0|(1<<TID::UnmodeledSideEffects), 0x2d001c06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #566 = CVTSS2SI64rm
+ { 567, 2, 1, 0, "CVTSS2SI64rr", 0|(1<<TID::UnmodeledSideEffects), 0x2d001c05ULL, NULL, NULL, NULL, OperandInfo100 }, // Inst #567 = CVTSS2SI64rr
+ { 568, 6, 1, 0, "CVTSS2SIrm", 0|(1<<TID::UnmodeledSideEffects), 0x2d000c06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #568 = CVTSS2SIrm
+ { 569, 2, 1, 0, "CVTSS2SIrr", 0|(1<<TID::UnmodeledSideEffects), 0x2d000c05ULL, NULL, NULL, NULL, OperandInfo101 }, // Inst #569 = CVTSS2SIrr
+ { 570, 6, 1, 0, "CVTTPS2DQrm", 0|(1<<TID::UnmodeledSideEffects), 0x5b000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #570 = CVTTPS2DQrm
+ { 571, 2, 1, 0, "CVTTPS2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0x5b000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #571 = CVTTPS2DQrr
+ { 572, 6, 1, 0, "CVTTSD2SI64rm", 0|(1<<TID::MayLoad), 0x2c001b06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #572 = CVTTSD2SI64rm
+ { 573, 2, 1, 0, "CVTTSD2SI64rr", 0, 0x2c001b05ULL, NULL, NULL, NULL, OperandInfo102 }, // Inst #573 = CVTTSD2SI64rr
+ { 574, 6, 1, 0, "CVTTSD2SIrm", 0|(1<<TID::MayLoad), 0x2c000b06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #574 = CVTTSD2SIrm
+ { 575, 2, 1, 0, "CVTTSD2SIrr", 0, 0x2c000b05ULL, NULL, NULL, NULL, OperandInfo103 }, // Inst #575 = CVTTSD2SIrr
+ { 576, 6, 1, 0, "CVTTSS2SI64rm", 0|(1<<TID::MayLoad), 0x2c001c06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #576 = CVTTSS2SI64rm
+ { 577, 2, 1, 0, "CVTTSS2SI64rr", 0, 0x2c001c05ULL, NULL, NULL, NULL, OperandInfo100 }, // Inst #577 = CVTTSS2SI64rr
+ { 578, 6, 1, 0, "CVTTSS2SIrm", 0|(1<<TID::MayLoad), 0x2c000c06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #578 = CVTTSS2SIrm
+ { 579, 2, 1, 0, "CVTTSS2SIrr", 0, 0x2c000c05ULL, NULL, NULL, NULL, OperandInfo101 }, // Inst #579 = CVTTSS2SIrr
+ { 580, 0, 0, 0, "CWD", 0, 0x99000041ULL, ImplicitList12, ImplicitList20, NULL, 0 }, // Inst #580 = CWD
+ { 581, 0, 0, 0, "CWDE", 0, 0x98000001ULL, ImplicitList12, ImplicitList13, NULL, 0 }, // Inst #581 = CWDE
+ { 582, 5, 0, 0, "DEC16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff000059ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #582 = DEC16m
+ { 583, 2, 1, 0, "DEC16r", 0|(1<<TID::ConvertibleTo3Addr), 0x48000042ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #583 = DEC16r
+ { 584, 5, 0, 0, "DEC32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff000019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #584 = DEC32m
+ { 585, 2, 1, 0, "DEC32r", 0|(1<<TID::ConvertibleTo3Addr), 0x48000002ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #585 = DEC32r
+ { 586, 5, 0, 0, "DEC64_16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff000059ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #586 = DEC64_16m
+ { 587, 2, 1, 0, "DEC64_16r", 0|(1<<TID::ConvertibleTo3Addr), 0xff000051ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #587 = DEC64_16r
+ { 588, 5, 0, 0, "DEC64_32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff000019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #588 = DEC64_32m
+ { 589, 2, 1, 0, "DEC64_32r", 0|(1<<TID::ConvertibleTo3Addr), 0xff000011ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #589 = DEC64_32r
+ { 590, 5, 0, 0, "DEC64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff001019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #590 = DEC64m
+ { 591, 2, 1, 0, "DEC64r", 0|(1<<TID::ConvertibleTo3Addr), 0xff001011ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #591 = DEC64r
+ { 592, 5, 0, 0, "DEC8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xfe000019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #592 = DEC8m
+ { 593, 2, 1, 0, "DEC8r", 0, 0xfe000011ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #593 = DEC8r
+ { 594, 5, 0, 0, "DIV16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xf700005eULL, ImplicitList20, ImplicitList21, Barriers1, OperandInfo34 }, // Inst #594 = DIV16m
+ { 595, 1, 0, 0, "DIV16r", 0|(1<<TID::UnmodeledSideEffects), 0xf7000056ULL, ImplicitList20, ImplicitList21, Barriers1, OperandInfo106 }, // Inst #595 = DIV16r
+ { 596, 5, 0, 0, "DIV32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xf700001eULL, ImplicitList14, ImplicitList18, Barriers6, OperandInfo34 }, // Inst #596 = DIV32m
+ { 597, 1, 0, 0, "DIV32r", 0|(1<<TID::UnmodeledSideEffects), 0xf7000016ULL, ImplicitList14, ImplicitList18, Barriers6, OperandInfo66 }, // Inst #597 = DIV32r
+ { 598, 5, 0, 0, "DIV64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xf700101eULL, ImplicitList19, ImplicitList17, Barriers1, OperandInfo34 }, // Inst #598 = DIV64m
+ { 599, 1, 0, 0, "DIV64r", 0|(1<<TID::UnmodeledSideEffects), 0xf7001016ULL, ImplicitList19, ImplicitList17, Barriers1, OperandInfo67 }, // Inst #599 = DIV64r
+ { 600, 5, 0, 0, "DIV8m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xf600001eULL, ImplicitList12, ImplicitList22, Barriers1, OperandInfo34 }, // Inst #600 = DIV8m
+ { 601, 1, 0, 0, "DIV8r", 0|(1<<TID::UnmodeledSideEffects), 0xf6000016ULL, ImplicitList12, ImplicitList22, Barriers1, OperandInfo107 }, // Inst #601 = DIV8r
+ { 602, 7, 1, 0, "DIVPDrm", 0|(1<<TID::MayLoad), 0x5e800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #602 = DIVPDrm
+ { 603, 3, 1, 0, "DIVPDrr", 0, 0x5e800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #603 = DIVPDrr
+ { 604, 7, 1, 0, "DIVPSrm", 0|(1<<TID::MayLoad), 0x5e400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #604 = DIVPSrm
+ { 605, 3, 1, 0, "DIVPSrr", 0, 0x5e400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #605 = DIVPSrr
+ { 606, 5, 0, 0, "DIVR_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xd800001fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #606 = DIVR_F32m
+ { 607, 5, 0, 0, "DIVR_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdc00001fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #607 = DIVR_F64m
+ { 608, 5, 0, 0, "DIVR_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xde00001fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #608 = DIVR_FI16m
+ { 609, 5, 0, 0, "DIVR_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xda00001fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #609 = DIVR_FI32m
+ { 610, 1, 0, 0, "DIVR_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0xf0000902ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #610 = DIVR_FPrST0
+ { 611, 1, 0, 0, "DIVR_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0xf8000302ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #611 = DIVR_FST0r
+ { 612, 7, 1, 0, "DIVR_Fp32m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #612 = DIVR_Fp32m
+ { 613, 7, 1, 0, "DIVR_Fp64m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #613 = DIVR_Fp64m
+ { 614, 7, 1, 0, "DIVR_Fp64m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #614 = DIVR_Fp64m32
+ { 615, 7, 1, 0, "DIVR_Fp80m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #615 = DIVR_Fp80m32
+ { 616, 7, 1, 0, "DIVR_Fp80m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #616 = DIVR_Fp80m64
+ { 617, 7, 1, 0, "DIVR_FpI16m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #617 = DIVR_FpI16m32
+ { 618, 7, 1, 0, "DIVR_FpI16m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #618 = DIVR_FpI16m64
+ { 619, 7, 1, 0, "DIVR_FpI16m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #619 = DIVR_FpI16m80
+ { 620, 7, 1, 0, "DIVR_FpI32m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #620 = DIVR_FpI32m32
+ { 621, 7, 1, 0, "DIVR_FpI32m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #621 = DIVR_FpI32m64
+ { 622, 7, 1, 0, "DIVR_FpI32m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #622 = DIVR_FpI32m80
+ { 623, 1, 0, 0, "DIVR_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0xf0000702ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #623 = DIVR_FrST0
+ { 624, 7, 1, 0, "DIVSDrm", 0|(1<<TID::MayLoad), 0x5e000b06ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #624 = DIVSDrm
+ { 625, 7, 1, 0, "DIVSDrm_Int", 0|(1<<TID::MayLoad), 0x5e000b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #625 = DIVSDrm_Int
+ { 626, 3, 1, 0, "DIVSDrr", 0, 0x5e000b05ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #626 = DIVSDrr
+ { 627, 3, 1, 0, "DIVSDrr_Int", 0, 0x5e000b05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #627 = DIVSDrr_Int
+ { 628, 7, 1, 0, "DIVSSrm", 0|(1<<TID::MayLoad), 0x5e000c06ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #628 = DIVSSrm
+ { 629, 7, 1, 0, "DIVSSrm_Int", 0|(1<<TID::MayLoad), 0x5e000c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #629 = DIVSSrm_Int
+ { 630, 3, 1, 0, "DIVSSrr", 0, 0x5e000c05ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #630 = DIVSSrr
+ { 631, 3, 1, 0, "DIVSSrr_Int", 0, 0x5e000c05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #631 = DIVSSrr_Int
+ { 632, 5, 0, 0, "DIV_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xd800001eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #632 = DIV_F32m
+ { 633, 5, 0, 0, "DIV_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdc00001eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #633 = DIV_F64m
+ { 634, 5, 0, 0, "DIV_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xde00001eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #634 = DIV_FI16m
+ { 635, 5, 0, 0, "DIV_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xda00001eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #635 = DIV_FI32m
+ { 636, 1, 0, 0, "DIV_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0xf8000902ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #636 = DIV_FPrST0
+ { 637, 1, 0, 0, "DIV_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0xf0000302ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #637 = DIV_FST0r
+ { 638, 3, 1, 0, "DIV_Fp32", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo36 }, // Inst #638 = DIV_Fp32
+ { 639, 7, 1, 0, "DIV_Fp32m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #639 = DIV_Fp32m
+ { 640, 3, 1, 0, "DIV_Fp64", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo38 }, // Inst #640 = DIV_Fp64
+ { 641, 7, 1, 0, "DIV_Fp64m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #641 = DIV_Fp64m
+ { 642, 7, 1, 0, "DIV_Fp64m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #642 = DIV_Fp64m32
+ { 643, 3, 1, 0, "DIV_Fp80", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo40 }, // Inst #643 = DIV_Fp80
+ { 644, 7, 1, 0, "DIV_Fp80m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #644 = DIV_Fp80m32
+ { 645, 7, 1, 0, "DIV_Fp80m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #645 = DIV_Fp80m64
+ { 646, 7, 1, 0, "DIV_FpI16m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #646 = DIV_FpI16m32
+ { 647, 7, 1, 0, "DIV_FpI16m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #647 = DIV_FpI16m64
+ { 648, 7, 1, 0, "DIV_FpI16m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #648 = DIV_FpI16m80
+ { 649, 7, 1, 0, "DIV_FpI32m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #649 = DIV_FpI32m32
+ { 650, 7, 1, 0, "DIV_FpI32m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #650 = DIV_FpI32m64
+ { 651, 7, 1, 0, "DIV_FpI32m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #651 = DIV_FpI32m80
+ { 652, 1, 0, 0, "DIV_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0xf8000702ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #652 = DIV_FrST0
+ { 653, 8, 1, 0, "DPPDrmi", 0|(1<<TID::MayLoad), 0x41c02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #653 = DPPDrmi
+ { 654, 4, 1, 0, "DPPDrri", 0|(1<<TID::Commutable), 0x41c02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #654 = DPPDrri
+ { 655, 8, 1, 0, "DPPSrmi", 0|(1<<TID::MayLoad), 0x40c02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #655 = DPPSrmi
+ { 656, 4, 1, 0, "DPPSrri", 0|(1<<TID::Commutable), 0x40c02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #656 = DPPSrri
+ { 657, 0, 0, 0, "DS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0x3e000001ULL, NULL, NULL, NULL, 0 }, // Inst #657 = DS_PREFIX
+ { 658, 1, 0, 0, "EH_RETURN", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator), 0xc3000001ULL, NULL, NULL, NULL, OperandInfo66 }, // Inst #658 = EH_RETURN
+ { 659, 1, 0, 0, "EH_RETURN64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator), 0xc3000001ULL, NULL, NULL, NULL, OperandInfo67 }, // Inst #659 = EH_RETURN64
+ { 660, 2, 0, 0, "ENTER", 0|(1<<TID::UnmodeledSideEffects), 0xc8000001ULL, NULL, NULL, NULL, OperandInfo6 }, // Inst #660 = ENTER
+ { 661, 0, 0, 0, "ES_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0x26000001ULL, NULL, NULL, NULL, 0 }, // Inst #661 = ES_PREFIX
+ { 662, 7, 0, 0, "EXTRACTPSmr", 0|(1<<TID::MayStore), 0x17c02e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #662 = EXTRACTPSmr
+ { 663, 3, 1, 0, "EXTRACTPSrr", 0, 0x17c02e43ULL, NULL, NULL, NULL, OperandInfo109 }, // Inst #663 = EXTRACTPSrr
+ { 664, 0, 0, 0, "F2XM1", 0|(1<<TID::UnmodeledSideEffects), 0xf0000401ULL, NULL, NULL, NULL, 0 }, // Inst #664 = F2XM1
+ { 665, 2, 0, 0, "FARCALL16i", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0x9a00606bULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo6 }, // Inst #665 = FARCALL16i
+ { 666, 5, 0, 0, "FARCALL16m", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0xff00005bULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo34 }, // Inst #666 = FARCALL16m
+ { 667, 2, 0, 0, "FARCALL32i", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0x9a00a02bULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo6 }, // Inst #667 = FARCALL32i
+ { 668, 5, 0, 0, "FARCALL32m", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0xff00001bULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo34 }, // Inst #668 = FARCALL32m
+ { 669, 5, 0, 0, "FARCALL64", 0|(1<<TID::Call)|(1<<TID::UnmodeledSideEffects), 0xff00101bULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo34 }, // Inst #669 = FARCALL64
+ { 670, 2, 0, 0, "FARJMP16i", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xea00606bULL, NULL, NULL, NULL, OperandInfo6 }, // Inst #670 = FARJMP16i
+ { 671, 5, 0, 0, "FARJMP16m", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xff00005dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #671 = FARJMP16m
+ { 672, 2, 0, 0, "FARJMP32i", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xea00a02bULL, NULL, NULL, NULL, OperandInfo6 }, // Inst #672 = FARJMP32i
+ { 673, 5, 0, 0, "FARJMP32m", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xff00001dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #673 = FARJMP32m
+ { 674, 5, 0, 0, "FARJMP64", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xff00101dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #674 = FARJMP64
+ { 675, 5, 0, 0, "FBLDm", 0|(1<<TID::UnmodeledSideEffects), 0xdf00001cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #675 = FBLDm
+ { 676, 5, 1, 0, "FBSTPm", 0|(1<<TID::UnmodeledSideEffects), 0xdf00001eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #676 = FBSTPm
+ { 677, 5, 0, 0, "FCOM32m", 0|(1<<TID::UnmodeledSideEffects), 0xd800001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #677 = FCOM32m
+ { 678, 5, 0, 0, "FCOM64m", 0|(1<<TID::UnmodeledSideEffects), 0xdc00001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #678 = FCOM64m
+ { 679, 5, 0, 0, "FCOMP32m", 0|(1<<TID::UnmodeledSideEffects), 0xd800001bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #679 = FCOMP32m
+ { 680, 5, 0, 0, "FCOMP64m", 0|(1<<TID::UnmodeledSideEffects), 0xdc00001bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #680 = FCOMP64m
+ { 681, 0, 0, 0, "FCOMPP", 0|(1<<TID::UnmodeledSideEffects), 0xd9000901ULL, NULL, NULL, NULL, 0 }, // Inst #681 = FCOMPP
+ { 682, 0, 0, 0, "FDECSTP", 0|(1<<TID::UnmodeledSideEffects), 0xf6000401ULL, NULL, NULL, NULL, 0 }, // Inst #682 = FDECSTP
+ { 683, 1, 0, 0, "FFREE", 0|(1<<TID::UnmodeledSideEffects), 0xc0000802ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #683 = FFREE
+ { 684, 5, 0, 0, "FICOM16m", 0|(1<<TID::UnmodeledSideEffects), 0xde00001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #684 = FICOM16m
+ { 685, 5, 0, 0, "FICOM32m", 0|(1<<TID::UnmodeledSideEffects), 0xda00001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #685 = FICOM32m
+ { 686, 5, 0, 0, "FICOMP16m", 0|(1<<TID::UnmodeledSideEffects), 0xde00001bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #686 = FICOMP16m
+ { 687, 5, 0, 0, "FICOMP32m", 0|(1<<TID::UnmodeledSideEffects), 0xda00001bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #687 = FICOMP32m
+ { 688, 0, 0, 0, "FINCSTP", 0|(1<<TID::UnmodeledSideEffects), 0xf7000401ULL, NULL, NULL, NULL, 0 }, // Inst #688 = FINCSTP
+ { 689, 5, 0, 0, "FLDCW16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xd900001dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #689 = FLDCW16m
+ { 690, 5, 0, 0, "FLDENVm", 0|(1<<TID::UnmodeledSideEffects), 0xd900001cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #690 = FLDENVm
+ { 691, 0, 0, 0, "FLDL2E", 0|(1<<TID::UnmodeledSideEffects), 0xea000401ULL, NULL, NULL, NULL, 0 }, // Inst #691 = FLDL2E
+ { 692, 0, 0, 0, "FLDL2T", 0|(1<<TID::UnmodeledSideEffects), 0xe9000401ULL, NULL, NULL, NULL, 0 }, // Inst #692 = FLDL2T
+ { 693, 0, 0, 0, "FLDLG2", 0|(1<<TID::UnmodeledSideEffects), 0xec000401ULL, NULL, NULL, NULL, 0 }, // Inst #693 = FLDLG2
+ { 694, 0, 0, 0, "FLDLN2", 0|(1<<TID::UnmodeledSideEffects), 0xed000401ULL, NULL, NULL, NULL, 0 }, // Inst #694 = FLDLN2
+ { 695, 0, 0, 0, "FLDPI", 0|(1<<TID::UnmodeledSideEffects), 0xeb000401ULL, NULL, NULL, NULL, 0 }, // Inst #695 = FLDPI
+ { 696, 0, 0, 0, "FNCLEX", 0|(1<<TID::UnmodeledSideEffects), 0xe2000601ULL, NULL, NULL, NULL, 0 }, // Inst #696 = FNCLEX
+ { 697, 0, 0, 0, "FNINIT", 0|(1<<TID::UnmodeledSideEffects), 0xe3000601ULL, NULL, NULL, NULL, 0 }, // Inst #697 = FNINIT
+ { 698, 0, 0, 0, "FNOP", 0|(1<<TID::UnmodeledSideEffects), 0xd0000401ULL, NULL, NULL, NULL, 0 }, // Inst #698 = FNOP
+ { 699, 5, 0, 0, "FNSTCW16m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xd900001fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #699 = FNSTCW16m
+ { 700, 0, 0, 0, "FNSTSW8r", 0|(1<<TID::UnmodeledSideEffects), 0xe0000a01ULL, NULL, ImplicitList12, NULL, 0 }, // Inst #700 = FNSTSW8r
+ { 701, 5, 1, 0, "FNSTSWm", 0|(1<<TID::UnmodeledSideEffects), 0xdd00001fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #701 = FNSTSWm
+ { 702, 6, 0, 0, "FP32_TO_INT16_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #702 = FP32_TO_INT16_IN_MEM
+ { 703, 6, 0, 0, "FP32_TO_INT32_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #703 = FP32_TO_INT32_IN_MEM
+ { 704, 6, 0, 0, "FP32_TO_INT64_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #704 = FP32_TO_INT64_IN_MEM
+ { 705, 6, 0, 0, "FP64_TO_INT16_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #705 = FP64_TO_INT16_IN_MEM
+ { 706, 6, 0, 0, "FP64_TO_INT32_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #706 = FP64_TO_INT32_IN_MEM
+ { 707, 6, 0, 0, "FP64_TO_INT64_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #707 = FP64_TO_INT64_IN_MEM
+ { 708, 6, 0, 0, "FP80_TO_INT16_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #708 = FP80_TO_INT16_IN_MEM
+ { 709, 6, 0, 0, "FP80_TO_INT32_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #709 = FP80_TO_INT32_IN_MEM
+ { 710, 6, 0, 0, "FP80_TO_INT64_IN_MEM", 0|(1<<TID::MayStore)|(1<<TID::UsesCustomInserter), 0x0ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #710 = FP80_TO_INT64_IN_MEM
+ { 711, 0, 0, 0, "FPATAN", 0|(1<<TID::UnmodeledSideEffects), 0xf3000401ULL, NULL, NULL, NULL, 0 }, // Inst #711 = FPATAN
+ { 712, 0, 0, 0, "FPREM", 0|(1<<TID::UnmodeledSideEffects), 0xf8000401ULL, NULL, NULL, NULL, 0 }, // Inst #712 = FPREM
+ { 713, 0, 0, 0, "FPREM1", 0|(1<<TID::UnmodeledSideEffects), 0xf5000401ULL, NULL, NULL, NULL, 0 }, // Inst #713 = FPREM1
+ { 714, 0, 0, 0, "FPTAN", 0|(1<<TID::UnmodeledSideEffects), 0xf2000401ULL, NULL, NULL, NULL, 0 }, // Inst #714 = FPTAN
+ { 715, 0, 0, 0, "FRNDINT", 0|(1<<TID::UnmodeledSideEffects), 0xfc000401ULL, NULL, NULL, NULL, 0 }, // Inst #715 = FRNDINT
+ { 716, 5, 1, 0, "FRSTORm", 0|(1<<TID::UnmodeledSideEffects), 0xdd00001cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #716 = FRSTORm
+ { 717, 5, 1, 0, "FSAVEm", 0|(1<<TID::UnmodeledSideEffects), 0xdd00001eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #717 = FSAVEm
+ { 718, 0, 0, 0, "FSCALE", 0|(1<<TID::UnmodeledSideEffects), 0xfd000401ULL, NULL, NULL, NULL, 0 }, // Inst #718 = FSCALE
+ { 719, 0, 0, 0, "FSINCOS", 0|(1<<TID::UnmodeledSideEffects), 0xfb000401ULL, NULL, NULL, NULL, 0 }, // Inst #719 = FSINCOS
+ { 720, 5, 1, 0, "FSTENVm", 0|(1<<TID::UnmodeledSideEffects), 0xd900001eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #720 = FSTENVm
+ { 721, 6, 1, 0, "FS_MOV32rm", 0|(1<<TID::MayLoad), 0x8b100006ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #721 = FS_MOV32rm
+ { 722, 0, 0, 0, "FS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0x64000001ULL, NULL, NULL, NULL, 0 }, // Inst #722 = FS_PREFIX
+ { 723, 0, 0, 0, "FXAM", 0|(1<<TID::UnmodeledSideEffects), 0xe5000401ULL, NULL, NULL, NULL, 0 }, // Inst #723 = FXAM
+ { 724, 5, 0, 0, "FXRSTOR", 0|(1<<TID::UnmodeledSideEffects), 0xae000119ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #724 = FXRSTOR
+ { 725, 5, 1, 0, "FXSAVE", 0|(1<<TID::UnmodeledSideEffects), 0xae000118ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #725 = FXSAVE
+ { 726, 0, 0, 0, "FXTRACT", 0|(1<<TID::UnmodeledSideEffects), 0xf4000401ULL, NULL, NULL, NULL, 0 }, // Inst #726 = FXTRACT
+ { 727, 0, 0, 0, "FYL2X", 0|(1<<TID::UnmodeledSideEffects), 0xf1000401ULL, NULL, NULL, NULL, 0 }, // Inst #727 = FYL2X
+ { 728, 0, 0, 0, "FYL2XP1", 0|(1<<TID::UnmodeledSideEffects), 0xf9000401ULL, NULL, NULL, NULL, 0 }, // Inst #728 = FYL2XP1
+ { 729, 1, 1, 0, "FpGET_ST0_32", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, NULL, NULL, OperandInfo113 }, // Inst #729 = FpGET_ST0_32
+ { 730, 1, 1, 0, "FpGET_ST0_64", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, NULL, NULL, OperandInfo114 }, // Inst #730 = FpGET_ST0_64
+ { 731, 1, 1, 0, "FpGET_ST0_80", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, NULL, NULL, OperandInfo115 }, // Inst #731 = FpGET_ST0_80
+ { 732, 1, 1, 0, "FpGET_ST1_32", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, NULL, NULL, OperandInfo113 }, // Inst #732 = FpGET_ST1_32
+ { 733, 1, 1, 0, "FpGET_ST1_64", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, NULL, NULL, OperandInfo114 }, // Inst #733 = FpGET_ST1_64
+ { 734, 1, 1, 0, "FpGET_ST1_80", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, NULL, NULL, OperandInfo115 }, // Inst #734 = FpGET_ST1_80
+ { 735, 1, 0, 0, "FpSET_ST0_32", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, ImplicitList23, NULL, OperandInfo113 }, // Inst #735 = FpSET_ST0_32
+ { 736, 1, 0, 0, "FpSET_ST0_64", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, ImplicitList23, NULL, OperandInfo114 }, // Inst #736 = FpSET_ST0_64
+ { 737, 1, 0, 0, "FpSET_ST0_80", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, ImplicitList23, NULL, OperandInfo115 }, // Inst #737 = FpSET_ST0_80
+ { 738, 1, 0, 0, "FpSET_ST1_32", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, ImplicitList24, NULL, OperandInfo113 }, // Inst #738 = FpSET_ST1_32
+ { 739, 1, 0, 0, "FpSET_ST1_64", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, ImplicitList24, NULL, OperandInfo114 }, // Inst #739 = FpSET_ST1_64
+ { 740, 1, 0, 0, "FpSET_ST1_80", 0|(1<<TID::UnmodeledSideEffects), 0x70000ULL, NULL, ImplicitList24, NULL, OperandInfo115 }, // Inst #740 = FpSET_ST1_80
+ { 741, 7, 1, 0, "FsANDNPDrm", 0|(1<<TID::MayLoad), 0x55800146ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #741 = FsANDNPDrm
+ { 742, 3, 1, 0, "FsANDNPDrr", 0, 0x55800145ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #742 = FsANDNPDrr
+ { 743, 7, 1, 0, "FsANDNPSrm", 0|(1<<TID::MayLoad), 0x55400106ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #743 = FsANDNPSrm
+ { 744, 3, 1, 0, "FsANDNPSrr", 0, 0x55400105ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #744 = FsANDNPSrr
+ { 745, 7, 1, 0, "FsANDPDrm", 0|(1<<TID::MayLoad), 0x54800146ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #745 = FsANDPDrm
+ { 746, 3, 1, 0, "FsANDPDrr", 0|(1<<TID::Commutable), 0x54800145ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #746 = FsANDPDrr
+ { 747, 7, 1, 0, "FsANDPSrm", 0|(1<<TID::MayLoad), 0x54400106ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #747 = FsANDPSrm
+ { 748, 3, 1, 0, "FsANDPSrr", 0|(1<<TID::Commutable), 0x54400105ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #748 = FsANDPSrr
+ { 749, 1, 1, 0, "FsFLD0SD", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xef000160ULL, NULL, NULL, NULL, OperandInfo116 }, // Inst #749 = FsFLD0SD
+ { 750, 1, 1, 0, "FsFLD0SS", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xef000160ULL, NULL, NULL, NULL, OperandInfo117 }, // Inst #750 = FsFLD0SS
+ { 751, 6, 1, 0, "FsMOVAPDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x28800146ULL, NULL, NULL, NULL, OperandInfo94 }, // Inst #751 = FsMOVAPDrm
+ { 752, 2, 1, 0, "FsMOVAPDrr", 0, 0x28800145ULL, NULL, NULL, NULL, OperandInfo118 }, // Inst #752 = FsMOVAPDrr
+ { 753, 6, 1, 0, "FsMOVAPSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x28400106ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #753 = FsMOVAPSrm
+ { 754, 2, 1, 0, "FsMOVAPSrr", 0, 0x28400105ULL, NULL, NULL, NULL, OperandInfo119 }, // Inst #754 = FsMOVAPSrr
+ { 755, 7, 1, 0, "FsORPDrm", 0|(1<<TID::MayLoad), 0x56800146ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #755 = FsORPDrm
+ { 756, 3, 1, 0, "FsORPDrr", 0|(1<<TID::Commutable), 0x56800145ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #756 = FsORPDrr
+ { 757, 7, 1, 0, "FsORPSrm", 0|(1<<TID::MayLoad), 0x56400106ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #757 = FsORPSrm
+ { 758, 3, 1, 0, "FsORPSrr", 0|(1<<TID::Commutable), 0x56400105ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #758 = FsORPSrr
+ { 759, 7, 1, 0, "FsXORPDrm", 0|(1<<TID::MayLoad), 0x57800146ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #759 = FsXORPDrm
+ { 760, 3, 1, 0, "FsXORPDrr", 0|(1<<TID::Commutable), 0x57800145ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #760 = FsXORPDrr
+ { 761, 7, 1, 0, "FsXORPSrm", 0|(1<<TID::MayLoad), 0x57400106ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #761 = FsXORPSrm
+ { 762, 3, 1, 0, "FsXORPSrr", 0|(1<<TID::Commutable), 0x57400105ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #762 = FsXORPSrr
+ { 763, 6, 1, 0, "GS_MOV32rm", 0|(1<<TID::MayLoad), 0x8b200006ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #763 = GS_MOV32rm
+ { 764, 0, 0, 0, "GS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0x65000001ULL, NULL, NULL, NULL, 0 }, // Inst #764 = GS_PREFIX
+ { 765, 7, 1, 0, "HADDPDrm", 0|(1<<TID::MayLoad), 0x7c800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #765 = HADDPDrm
+ { 766, 3, 1, 0, "HADDPDrr", 0, 0x7c800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #766 = HADDPDrr
+ { 767, 7, 1, 0, "HADDPSrm", 0|(1<<TID::MayLoad), 0x7c800b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #767 = HADDPSrm
+ { 768, 3, 1, 0, "HADDPSrr", 0, 0x7c800b05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #768 = HADDPSrr
+ { 769, 0, 0, 0, "HLT", 0|(1<<TID::UnmodeledSideEffects), 0xf4000001ULL, NULL, NULL, NULL, 0 }, // Inst #769 = HLT
+ { 770, 7, 1, 0, "HSUBPDrm", 0|(1<<TID::MayLoad), 0x7d800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #770 = HSUBPDrm
+ { 771, 3, 1, 0, "HSUBPDrr", 0, 0x7d800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #771 = HSUBPDrr
+ { 772, 7, 1, 0, "HSUBPSrm", 0|(1<<TID::MayLoad), 0x7d800b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #772 = HSUBPSrm
+ { 773, 3, 1, 0, "HSUBPSrr", 0, 0x7d800b05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #773 = HSUBPSrr
+ { 774, 5, 0, 0, "IDIV16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xf700005fULL, ImplicitList20, ImplicitList21, Barriers1, OperandInfo34 }, // Inst #774 = IDIV16m
+ { 775, 1, 0, 0, "IDIV16r", 0|(1<<TID::UnmodeledSideEffects), 0xf7000057ULL, ImplicitList20, ImplicitList21, Barriers1, OperandInfo106 }, // Inst #775 = IDIV16r
+ { 776, 5, 0, 0, "IDIV32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xf700001fULL, ImplicitList14, ImplicitList18, Barriers6, OperandInfo34 }, // Inst #776 = IDIV32m
+ { 777, 1, 0, 0, "IDIV32r", 0|(1<<TID::UnmodeledSideEffects), 0xf7000017ULL, ImplicitList14, ImplicitList18, Barriers6, OperandInfo66 }, // Inst #777 = IDIV32r
+ { 778, 5, 0, 0, "IDIV64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xf700101fULL, ImplicitList19, ImplicitList17, Barriers1, OperandInfo34 }, // Inst #778 = IDIV64m
+ { 779, 1, 0, 0, "IDIV64r", 0|(1<<TID::UnmodeledSideEffects), 0xf7001017ULL, ImplicitList19, ImplicitList17, Barriers1, OperandInfo67 }, // Inst #779 = IDIV64r
+ { 780, 5, 0, 0, "IDIV8m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xf600001fULL, ImplicitList12, ImplicitList22, Barriers1, OperandInfo34 }, // Inst #780 = IDIV8m
+ { 781, 1, 0, 0, "IDIV8r", 0|(1<<TID::UnmodeledSideEffects), 0xf6000017ULL, ImplicitList12, ImplicitList22, Barriers1, OperandInfo107 }, // Inst #781 = IDIV8r
+ { 782, 5, 0, 0, "ILD_F16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdf000018ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #782 = ILD_F16m
+ { 783, 5, 0, 0, "ILD_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdb000018ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #783 = ILD_F32m
+ { 784, 5, 0, 0, "ILD_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdf00001dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #784 = ILD_F64m
+ { 785, 6, 1, 0, "ILD_Fp16m32", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo120 }, // Inst #785 = ILD_Fp16m32
+ { 786, 6, 1, 0, "ILD_Fp16m64", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo121 }, // Inst #786 = ILD_Fp16m64
+ { 787, 6, 1, 0, "ILD_Fp16m80", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo122 }, // Inst #787 = ILD_Fp16m80
+ { 788, 6, 1, 0, "ILD_Fp32m32", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo120 }, // Inst #788 = ILD_Fp32m32
+ { 789, 6, 1, 0, "ILD_Fp32m64", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo121 }, // Inst #789 = ILD_Fp32m64
+ { 790, 6, 1, 0, "ILD_Fp32m80", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo122 }, // Inst #790 = ILD_Fp32m80
+ { 791, 6, 1, 0, "ILD_Fp64m32", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo120 }, // Inst #791 = ILD_Fp64m32
+ { 792, 6, 1, 0, "ILD_Fp64m64", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo121 }, // Inst #792 = ILD_Fp64m64
+ { 793, 6, 1, 0, "ILD_Fp64m80", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo122 }, // Inst #793 = ILD_Fp64m80
+ { 794, 5, 0, 0, "IMUL16m", 0|(1<<TID::MayLoad), 0xf700005dULL, ImplicitList12, ImplicitList21, Barriers1, OperandInfo34 }, // Inst #794 = IMUL16m
+ { 795, 1, 0, 0, "IMUL16r", 0, 0xf7000055ULL, ImplicitList12, ImplicitList21, Barriers1, OperandInfo106 }, // Inst #795 = IMUL16r
+ { 796, 7, 1, 0, "IMUL16rm", 0|(1<<TID::MayLoad), 0xaf000146ULL, NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #796 = IMUL16rm
+ { 797, 7, 1, 0, "IMUL16rmi", 0|(1<<TID::MayLoad), 0x69006046ULL, NULL, ImplicitList1, Barriers1, OperandInfo123 }, // Inst #797 = IMUL16rmi
+ { 798, 7, 1, 0, "IMUL16rmi8", 0|(1<<TID::MayLoad), 0x6b002046ULL, NULL, ImplicitList1, Barriers1, OperandInfo123 }, // Inst #798 = IMUL16rmi8
+ { 799, 3, 1, 0, "IMUL16rr", 0|(1<<TID::Commutable), 0xaf000145ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #799 = IMUL16rr
+ { 800, 3, 1, 0, "IMUL16rri", 0, 0x69006045ULL, NULL, ImplicitList1, Barriers1, OperandInfo124 }, // Inst #800 = IMUL16rri
+ { 801, 3, 1, 0, "IMUL16rri8", 0, 0x6b002045ULL, NULL, ImplicitList1, Barriers1, OperandInfo124 }, // Inst #801 = IMUL16rri8
+ { 802, 5, 0, 0, "IMUL32m", 0|(1<<TID::MayLoad), 0xf700001dULL, ImplicitList13, ImplicitList18, Barriers6, OperandInfo34 }, // Inst #802 = IMUL32m
+ { 803, 1, 0, 0, "IMUL32r", 0, 0xf7000015ULL, ImplicitList13, ImplicitList18, Barriers6, OperandInfo66 }, // Inst #803 = IMUL32r
+ { 804, 7, 1, 0, "IMUL32rm", 0|(1<<TID::MayLoad), 0xaf000106ULL, NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #804 = IMUL32rm
+ { 805, 7, 1, 0, "IMUL32rmi", 0|(1<<TID::MayLoad), 0x6900a006ULL, NULL, ImplicitList1, Barriers1, OperandInfo125 }, // Inst #805 = IMUL32rmi
+ { 806, 7, 1, 0, "IMUL32rmi8", 0|(1<<TID::MayLoad), 0x6b002006ULL, NULL, ImplicitList1, Barriers1, OperandInfo125 }, // Inst #806 = IMUL32rmi8
+ { 807, 3, 1, 0, "IMUL32rr", 0|(1<<TID::Commutable), 0xaf000105ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #807 = IMUL32rr
+ { 808, 3, 1, 0, "IMUL32rri", 0, 0x6900a005ULL, NULL, ImplicitList1, Barriers1, OperandInfo126 }, // Inst #808 = IMUL32rri
+ { 809, 3, 1, 0, "IMUL32rri8", 0, 0x6b002005ULL, NULL, ImplicitList1, Barriers1, OperandInfo126 }, // Inst #809 = IMUL32rri8
+ { 810, 5, 0, 0, "IMUL64m", 0|(1<<TID::MayLoad), 0xf700101dULL, ImplicitList15, ImplicitList17, Barriers1, OperandInfo34 }, // Inst #810 = IMUL64m
+ { 811, 1, 0, 0, "IMUL64r", 0, 0xf7001015ULL, ImplicitList15, ImplicitList17, Barriers1, OperandInfo67 }, // Inst #811 = IMUL64r
+ { 812, 7, 1, 0, "IMUL64rm", 0|(1<<TID::MayLoad), 0xaf001106ULL, NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #812 = IMUL64rm
+ { 813, 7, 1, 0, "IMUL64rmi32", 0|(1<<TID::MayLoad), 0x6900b006ULL, NULL, ImplicitList1, Barriers1, OperandInfo127 }, // Inst #813 = IMUL64rmi32
+ { 814, 7, 1, 0, "IMUL64rmi8", 0|(1<<TID::MayLoad), 0x6b003006ULL, NULL, ImplicitList1, Barriers1, OperandInfo127 }, // Inst #814 = IMUL64rmi8
+ { 815, 3, 1, 0, "IMUL64rr", 0|(1<<TID::Commutable), 0xaf001105ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #815 = IMUL64rr
+ { 816, 3, 1, 0, "IMUL64rri32", 0, 0x6900b005ULL, NULL, ImplicitList1, Barriers1, OperandInfo128 }, // Inst #816 = IMUL64rri32
+ { 817, 3, 1, 0, "IMUL64rri8", 0, 0x6b003005ULL, NULL, ImplicitList1, Barriers1, OperandInfo128 }, // Inst #817 = IMUL64rri8
+ { 818, 5, 0, 0, "IMUL8m", 0|(1<<TID::MayLoad), 0xf600001dULL, ImplicitList11, ImplicitList22, Barriers1, OperandInfo34 }, // Inst #818 = IMUL8m
+ { 819, 1, 0, 0, "IMUL8r", 0, 0xf6000015ULL, ImplicitList11, ImplicitList22, Barriers1, OperandInfo107 }, // Inst #819 = IMUL8r
+ { 820, 0, 0, 0, "IN16", 0|(1<<TID::UnmodeledSideEffects), 0x6d000041ULL, NULL, NULL, NULL, 0 }, // Inst #820 = IN16
+ { 821, 1, 0, 0, "IN16ri", 0|(1<<TID::UnmodeledSideEffects), 0xe5002041ULL, NULL, ImplicitList12, NULL, OperandInfo2 }, // Inst #821 = IN16ri
+ { 822, 0, 0, 0, "IN16rr", 0|(1<<TID::UnmodeledSideEffects), 0xed000041ULL, ImplicitList25, ImplicitList12, NULL, 0 }, // Inst #822 = IN16rr
+ { 823, 0, 0, 0, "IN32", 0|(1<<TID::UnmodeledSideEffects), 0x6d000001ULL, NULL, NULL, NULL, 0 }, // Inst #823 = IN32
+ { 824, 1, 0, 0, "IN32ri", 0|(1<<TID::UnmodeledSideEffects), 0xe5002001ULL, NULL, ImplicitList13, NULL, OperandInfo2 }, // Inst #824 = IN32ri
+ { 825, 0, 0, 0, "IN32rr", 0|(1<<TID::UnmodeledSideEffects), 0xed000001ULL, ImplicitList25, ImplicitList13, NULL, 0 }, // Inst #825 = IN32rr
+ { 826, 0, 0, 0, "IN8", 0|(1<<TID::UnmodeledSideEffects), 0x6c000001ULL, NULL, NULL, NULL, 0 }, // Inst #826 = IN8
+ { 827, 1, 0, 0, "IN8ri", 0|(1<<TID::UnmodeledSideEffects), 0xe4002001ULL, NULL, ImplicitList11, NULL, OperandInfo2 }, // Inst #827 = IN8ri
+ { 828, 0, 0, 0, "IN8rr", 0|(1<<TID::UnmodeledSideEffects), 0xec000001ULL, ImplicitList25, ImplicitList11, NULL, 0 }, // Inst #828 = IN8rr
+ { 829, 5, 0, 0, "INC16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff000058ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #829 = INC16m
+ { 830, 2, 1, 0, "INC16r", 0|(1<<TID::ConvertibleTo3Addr), 0x40000042ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #830 = INC16r
+ { 831, 5, 0, 0, "INC32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff000018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #831 = INC32m
+ { 832, 2, 1, 0, "INC32r", 0|(1<<TID::ConvertibleTo3Addr), 0x40000002ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #832 = INC32r
+ { 833, 5, 0, 0, "INC64_16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff000058ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #833 = INC64_16m
+ { 834, 2, 1, 0, "INC64_16r", 0|(1<<TID::ConvertibleTo3Addr), 0xff000050ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #834 = INC64_16r
+ { 835, 5, 0, 0, "INC64_32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff000018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #835 = INC64_32m
+ { 836, 2, 1, 0, "INC64_32r", 0|(1<<TID::ConvertibleTo3Addr), 0xff000010ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #836 = INC64_32r
+ { 837, 5, 0, 0, "INC64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xff001018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #837 = INC64m
+ { 838, 2, 1, 0, "INC64r", 0|(1<<TID::ConvertibleTo3Addr), 0xff001010ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #838 = INC64r
+ { 839, 5, 0, 0, "INC8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xfe000018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #839 = INC8m
+ { 840, 2, 1, 0, "INC8r", 0, 0xfe000010ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #840 = INC8r
+ { 841, 8, 1, 0, "INSERTPSrm", 0|(1<<TID::MayLoad), 0x21c02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #841 = INSERTPSrm
+ { 842, 4, 1, 0, "INSERTPSrr", 0, 0x21c02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #842 = INSERTPSrr
+ { 843, 1, 0, 0, "INT", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xcd002001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #843 = INT
+ { 844, 0, 0, 0, "INT3", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xcc000001ULL, NULL, NULL, NULL, 0 }, // Inst #844 = INT3
+ { 845, 0, 0, 0, "INTO", 0|(1<<TID::UnmodeledSideEffects), 0xce000001ULL, ImplicitList1, NULL, NULL, 0 }, // Inst #845 = INTO
+ { 846, 0, 0, 0, "INVD", 0|(1<<TID::UnmodeledSideEffects), 0x8000101ULL, NULL, NULL, NULL, 0 }, // Inst #846 = INVD
+ { 847, 0, 0, 0, "INVEPT", 0|(1<<TID::UnmodeledSideEffects), 0x80000d41ULL, NULL, NULL, NULL, 0 }, // Inst #847 = INVEPT
+ { 848, 5, 0, 0, "INVLPG", 0|(1<<TID::UnmodeledSideEffects), 0x100011fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #848 = INVLPG
+ { 849, 0, 0, 0, "INVVPID", 0|(1<<TID::UnmodeledSideEffects), 0x81000d41ULL, NULL, NULL, NULL, 0 }, // Inst #849 = INVVPID
+ { 850, 0, 0, 0, "IRET16", 0|(1<<TID::UnmodeledSideEffects), 0xcf000041ULL, NULL, NULL, NULL, 0 }, // Inst #850 = IRET16
+ { 851, 0, 0, 0, "IRET32", 0|(1<<TID::UnmodeledSideEffects), 0xcf000001ULL, NULL, NULL, NULL, 0 }, // Inst #851 = IRET32
+ { 852, 0, 0, 0, "IRET64", 0|(1<<TID::UnmodeledSideEffects), 0xcf001001ULL, NULL, NULL, NULL, 0 }, // Inst #852 = IRET64
+ { 853, 5, 0, 0, "ISTT_FP16m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdf000019ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #853 = ISTT_FP16m
+ { 854, 5, 0, 0, "ISTT_FP32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdb000019ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #854 = ISTT_FP32m
+ { 855, 5, 0, 0, "ISTT_FP64m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdd000019ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #855 = ISTT_FP64m
+ { 856, 6, 0, 0, "ISTT_Fp16m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #856 = ISTT_Fp16m32
+ { 857, 6, 0, 0, "ISTT_Fp16m64", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #857 = ISTT_Fp16m64
+ { 858, 6, 0, 0, "ISTT_Fp16m80", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #858 = ISTT_Fp16m80
+ { 859, 6, 0, 0, "ISTT_Fp32m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #859 = ISTT_Fp32m32
+ { 860, 6, 0, 0, "ISTT_Fp32m64", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #860 = ISTT_Fp32m64
+ { 861, 6, 0, 0, "ISTT_Fp32m80", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #861 = ISTT_Fp32m80
+ { 862, 6, 0, 0, "ISTT_Fp64m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #862 = ISTT_Fp64m32
+ { 863, 6, 0, 0, "ISTT_Fp64m64", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #863 = ISTT_Fp64m64
+ { 864, 6, 0, 0, "ISTT_Fp64m80", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #864 = ISTT_Fp64m80
+ { 865, 5, 0, 0, "IST_F16m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdf00001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #865 = IST_F16m
+ { 866, 5, 0, 0, "IST_F32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdb00001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #866 = IST_F32m
+ { 867, 5, 0, 0, "IST_FP16m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdf00001bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #867 = IST_FP16m
+ { 868, 5, 0, 0, "IST_FP32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdb00001bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #868 = IST_FP32m
+ { 869, 5, 0, 0, "IST_FP64m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdf00001fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #869 = IST_FP64m
+ { 870, 6, 0, 0, "IST_Fp16m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #870 = IST_Fp16m32
+ { 871, 6, 0, 0, "IST_Fp16m64", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #871 = IST_Fp16m64
+ { 872, 6, 0, 0, "IST_Fp16m80", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #872 = IST_Fp16m80
+ { 873, 6, 0, 0, "IST_Fp32m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #873 = IST_Fp32m32
+ { 874, 6, 0, 0, "IST_Fp32m64", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #874 = IST_Fp32m64
+ { 875, 6, 0, 0, "IST_Fp32m80", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #875 = IST_Fp32m80
+ { 876, 6, 0, 0, "IST_Fp64m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #876 = IST_Fp64m32
+ { 877, 6, 0, 0, "IST_Fp64m64", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #877 = IST_Fp64m64
+ { 878, 6, 0, 0, "IST_Fp64m80", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #878 = IST_Fp64m80
+ { 879, 8, 1, 0, "Int_CMPSDrm", 0|(1<<TID::MayLoad), 0xc2002b06ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #879 = Int_CMPSDrm
+ { 880, 4, 1, 0, "Int_CMPSDrr", 0, 0xc2002b05ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #880 = Int_CMPSDrr
+ { 881, 8, 1, 0, "Int_CMPSSrm", 0|(1<<TID::MayLoad), 0xc2002c06ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #881 = Int_CMPSSrm
+ { 882, 4, 1, 0, "Int_CMPSSrr", 0, 0xc2002c05ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #882 = Int_CMPSSrr
+ { 883, 6, 0, 0, "Int_COMISDrm", 0|(1<<TID::MayLoad), 0x2f800146ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #883 = Int_COMISDrm
+ { 884, 2, 0, 0, "Int_COMISDrr", 0, 0x2f800145ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #884 = Int_COMISDrr
+ { 885, 6, 0, 0, "Int_COMISSrm", 0|(1<<TID::MayLoad), 0x2f400106ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #885 = Int_COMISSrm
+ { 886, 2, 0, 0, "Int_COMISSrr", 0, 0x2f400105ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #886 = Int_COMISSrr
+ { 887, 6, 1, 0, "Int_CVTDQ2PDrm", 0|(1<<TID::MayLoad), 0xe6000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #887 = Int_CVTDQ2PDrm
+ { 888, 2, 1, 0, "Int_CVTDQ2PDrr", 0, 0xe6000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #888 = Int_CVTDQ2PDrr
+ { 889, 6, 1, 0, "Int_CVTDQ2PSrm", 0|(1<<TID::MayLoad), 0x5b000106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #889 = Int_CVTDQ2PSrm
+ { 890, 2, 1, 0, "Int_CVTDQ2PSrr", 0, 0x5b000105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #890 = Int_CVTDQ2PSrr
+ { 891, 6, 1, 0, "Int_CVTPD2DQrm", 0|(1<<TID::MayLoad), 0xe6000b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #891 = Int_CVTPD2DQrm
+ { 892, 2, 1, 0, "Int_CVTPD2DQrr", 0, 0xe6000b05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #892 = Int_CVTPD2DQrr
+ { 893, 6, 1, 0, "Int_CVTPD2PIrm", 0|(1<<TID::MayLoad), 0x2d800146ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #893 = Int_CVTPD2PIrm
+ { 894, 2, 1, 0, "Int_CVTPD2PIrr", 0, 0x2d800145ULL, NULL, NULL, NULL, OperandInfo130 }, // Inst #894 = Int_CVTPD2PIrr
+ { 895, 6, 1, 0, "Int_CVTPD2PSrm", 0|(1<<TID::MayLoad), 0x5a800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #895 = Int_CVTPD2PSrm
+ { 896, 2, 1, 0, "Int_CVTPD2PSrr", 0, 0x5a800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #896 = Int_CVTPD2PSrr
+ { 897, 6, 1, 0, "Int_CVTPI2PDrm", 0|(1<<TID::MayLoad), 0x2a800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #897 = Int_CVTPI2PDrm
+ { 898, 2, 1, 0, "Int_CVTPI2PDrr", 0, 0x2a800145ULL, NULL, NULL, NULL, OperandInfo131 }, // Inst #898 = Int_CVTPI2PDrr
+ { 899, 7, 1, 0, "Int_CVTPI2PSrm", 0|(1<<TID::MayLoad), 0x2a400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #899 = Int_CVTPI2PSrm
+ { 900, 3, 1, 0, "Int_CVTPI2PSrr", 0, 0x2a400105ULL, NULL, NULL, NULL, OperandInfo132 }, // Inst #900 = Int_CVTPI2PSrr
+ { 901, 6, 1, 0, "Int_CVTPS2DQrm", 0|(1<<TID::MayLoad), 0x5b800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #901 = Int_CVTPS2DQrm
+ { 902, 2, 1, 0, "Int_CVTPS2DQrr", 0, 0x5b800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #902 = Int_CVTPS2DQrr
+ { 903, 6, 1, 0, "Int_CVTPS2PDrm", 0|(1<<TID::MayLoad), 0x5a000106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #903 = Int_CVTPS2PDrm
+ { 904, 2, 1, 0, "Int_CVTPS2PDrr", 0, 0x5a000105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #904 = Int_CVTPS2PDrr
+ { 905, 6, 1, 0, "Int_CVTPS2PIrm", 0|(1<<TID::MayLoad), 0x2d400106ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #905 = Int_CVTPS2PIrm
+ { 906, 2, 1, 0, "Int_CVTPS2PIrr", 0, 0x2d400105ULL, NULL, NULL, NULL, OperandInfo130 }, // Inst #906 = Int_CVTPS2PIrr
+ { 907, 6, 1, 0, "Int_CVTSD2SI64rm", 0|(1<<TID::MayLoad), 0x2d001b06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #907 = Int_CVTSD2SI64rm
+ { 908, 2, 1, 0, "Int_CVTSD2SI64rr", 0, 0x2d001b05ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #908 = Int_CVTSD2SI64rr
+ { 909, 6, 1, 0, "Int_CVTSD2SIrm", 0|(1<<TID::MayLoad), 0x2d000b06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #909 = Int_CVTSD2SIrm
+ { 910, 2, 1, 0, "Int_CVTSD2SIrr", 0, 0x2d000b05ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #910 = Int_CVTSD2SIrr
+ { 911, 7, 1, 0, "Int_CVTSD2SSrm", 0|(1<<TID::MayLoad), 0x5a000c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #911 = Int_CVTSD2SSrm
+ { 912, 3, 1, 0, "Int_CVTSD2SSrr", 0, 0x5a000c05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #912 = Int_CVTSD2SSrr
+ { 913, 7, 1, 0, "Int_CVTSI2SD64rm", 0|(1<<TID::MayLoad), 0x2a001b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #913 = Int_CVTSI2SD64rm
+ { 914, 3, 1, 0, "Int_CVTSI2SD64rr", 0, 0x2a001b05ULL, NULL, NULL, NULL, OperandInfo134 }, // Inst #914 = Int_CVTSI2SD64rr
+ { 915, 7, 1, 0, "Int_CVTSI2SDrm", 0|(1<<TID::MayLoad), 0x2a000b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #915 = Int_CVTSI2SDrm
+ { 916, 3, 1, 0, "Int_CVTSI2SDrr", 0, 0x2a000b05ULL, NULL, NULL, NULL, OperandInfo135 }, // Inst #916 = Int_CVTSI2SDrr
+ { 917, 7, 1, 0, "Int_CVTSI2SS64rm", 0|(1<<TID::MayLoad), 0x2a001c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #917 = Int_CVTSI2SS64rm
+ { 918, 3, 1, 0, "Int_CVTSI2SS64rr", 0, 0x2a001c05ULL, NULL, NULL, NULL, OperandInfo134 }, // Inst #918 = Int_CVTSI2SS64rr
+ { 919, 7, 1, 0, "Int_CVTSI2SSrm", 0|(1<<TID::MayLoad), 0x2a000c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #919 = Int_CVTSI2SSrm
+ { 920, 3, 1, 0, "Int_CVTSI2SSrr", 0, 0x2a000c05ULL, NULL, NULL, NULL, OperandInfo135 }, // Inst #920 = Int_CVTSI2SSrr
+ { 921, 7, 1, 0, "Int_CVTSS2SDrm", 0|(1<<TID::MayLoad), 0x5a000c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #921 = Int_CVTSS2SDrm
+ { 922, 3, 1, 0, "Int_CVTSS2SDrr", 0, 0x5a000c05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #922 = Int_CVTSS2SDrr
+ { 923, 6, 1, 0, "Int_CVTSS2SI64rm", 0|(1<<TID::MayLoad), 0x2d001c06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #923 = Int_CVTSS2SI64rm
+ { 924, 2, 1, 0, "Int_CVTSS2SI64rr", 0, 0x2d001c05ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #924 = Int_CVTSS2SI64rr
+ { 925, 6, 1, 0, "Int_CVTSS2SIrm", 0|(1<<TID::MayLoad), 0x2d000c06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #925 = Int_CVTSS2SIrm
+ { 926, 2, 1, 0, "Int_CVTSS2SIrr", 0, 0x2d000c05ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #926 = Int_CVTSS2SIrr
+ { 927, 6, 1, 0, "Int_CVTTPD2DQrm", 0|(1<<TID::MayLoad), 0xe6800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #927 = Int_CVTTPD2DQrm
+ { 928, 2, 1, 0, "Int_CVTTPD2DQrr", 0, 0xe6800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #928 = Int_CVTTPD2DQrr
+ { 929, 6, 1, 0, "Int_CVTTPD2PIrm", 0|(1<<TID::MayLoad), 0x2c800146ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #929 = Int_CVTTPD2PIrm
+ { 930, 2, 1, 0, "Int_CVTTPD2PIrr", 0, 0x2c800145ULL, NULL, NULL, NULL, OperandInfo130 }, // Inst #930 = Int_CVTTPD2PIrr
+ { 931, 6, 1, 0, "Int_CVTTPS2DQrm", 0|(1<<TID::MayLoad), 0x5b000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #931 = Int_CVTTPS2DQrm
+ { 932, 2, 1, 0, "Int_CVTTPS2DQrr", 0, 0x5b000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #932 = Int_CVTTPS2DQrr
+ { 933, 6, 1, 0, "Int_CVTTPS2PIrm", 0|(1<<TID::MayLoad), 0x2c400106ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #933 = Int_CVTTPS2PIrm
+ { 934, 2, 1, 0, "Int_CVTTPS2PIrr", 0, 0x2c400105ULL, NULL, NULL, NULL, OperandInfo130 }, // Inst #934 = Int_CVTTPS2PIrr
+ { 935, 6, 1, 0, "Int_CVTTSD2SI64rm", 0|(1<<TID::MayLoad), 0x2c001b06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #935 = Int_CVTTSD2SI64rm
+ { 936, 2, 1, 0, "Int_CVTTSD2SI64rr", 0, 0x2c001b05ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #936 = Int_CVTTSD2SI64rr
+ { 937, 6, 1, 0, "Int_CVTTSD2SIrm", 0|(1<<TID::MayLoad), 0x2c000b06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #937 = Int_CVTTSD2SIrm
+ { 938, 2, 1, 0, "Int_CVTTSD2SIrr", 0, 0x2c000b05ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #938 = Int_CVTTSD2SIrr
+ { 939, 6, 1, 0, "Int_CVTTSS2SI64rm", 0|(1<<TID::MayLoad), 0x2c001c06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #939 = Int_CVTTSS2SI64rm
+ { 940, 2, 1, 0, "Int_CVTTSS2SI64rr", 0, 0x2c001c05ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #940 = Int_CVTTSS2SI64rr
+ { 941, 6, 1, 0, "Int_CVTTSS2SIrm", 0|(1<<TID::MayLoad), 0x2c000c06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #941 = Int_CVTTSS2SIrm
+ { 942, 2, 1, 0, "Int_CVTTSS2SIrr", 0, 0x2c000c05ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #942 = Int_CVTTSS2SIrr
+ { 943, 0, 0, 0, "Int_MemBarrier", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, 0 }, // Inst #943 = Int_MemBarrier
+ { 944, 1, 0, 0, "Int_MemBarrierNoSSE64", 0|(1<<TID::UnmodeledSideEffects), 0x9081011ULL, NULL, ImplicitList2, NULL, OperandInfo67 }, // Inst #944 = Int_MemBarrierNoSSE64
+ { 945, 6, 0, 0, "Int_UCOMISDrm", 0|(1<<TID::MayLoad), 0x2e800146ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #945 = Int_UCOMISDrm
+ { 946, 2, 0, 0, "Int_UCOMISDrr", 0, 0x2e800145ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #946 = Int_UCOMISDrr
+ { 947, 6, 0, 0, "Int_UCOMISSrm", 0|(1<<TID::MayLoad), 0x2e400106ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #947 = Int_UCOMISSrm
+ { 948, 2, 0, 0, "Int_UCOMISSrr", 0, 0x2e400105ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #948 = Int_UCOMISSrr
+ { 949, 8, 1, 0, "Int_VCMPSDrm", 0|(1<<TID::MayLoad), 0x5c2002b06ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #949 = Int_VCMPSDrm
+ { 950, 4, 1, 0, "Int_VCMPSDrr", 0, 0x5c2002b05ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #950 = Int_VCMPSDrr
+ { 951, 8, 1, 0, "Int_VCMPSSrm", 0|(1<<TID::MayLoad), 0x5c2002c06ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #951 = Int_VCMPSSrm
+ { 952, 4, 1, 0, "Int_VCMPSSrr", 0, 0x5c2002c05ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #952 = Int_VCMPSSrr
+ { 953, 6, 0, 0, "Int_VCOMISDrm", 0|(1<<TID::MayLoad), 0x12f800046ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #953 = Int_VCOMISDrm
+ { 954, 2, 0, 0, "Int_VCOMISDrr", 0, 0x12f800045ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #954 = Int_VCOMISDrr
+ { 955, 6, 0, 0, "Int_VCOMISSrm", 0|(1<<TID::MayLoad), 0x12f400006ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #955 = Int_VCOMISSrm
+ { 956, 2, 0, 0, "Int_VCOMISSrr", 0, 0x12f400005ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #956 = Int_VCOMISSrr
+ { 957, 6, 1, 0, "Int_VCVTDQ2PDrm", 0|(1<<TID::MayLoad), 0x1e6000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #957 = Int_VCVTDQ2PDrm
+ { 958, 2, 1, 0, "Int_VCVTDQ2PDrr", 0, 0x1e6000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #958 = Int_VCVTDQ2PDrr
+ { 959, 6, 1, 0, "Int_VCVTDQ2PSrm", 0|(1<<TID::MayLoad), 0x15b000106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #959 = Int_VCVTDQ2PSrm
+ { 960, 2, 1, 0, "Int_VCVTDQ2PSrr", 0, 0x15b000105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #960 = Int_VCVTDQ2PSrr
+ { 961, 6, 1, 0, "Int_VCVTPD2DQrm", 0|(1<<TID::MayLoad), 0x1e6000b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #961 = Int_VCVTPD2DQrm
+ { 962, 2, 1, 0, "Int_VCVTPD2DQrr", 0, 0x1e6000b05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #962 = Int_VCVTPD2DQrr
+ { 963, 6, 1, 0, "Int_VCVTPD2PSrm", 0|(1<<TID::MayLoad), 0x5a800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #963 = Int_VCVTPD2PSrm
+ { 964, 2, 1, 0, "Int_VCVTPD2PSrr", 0, 0x5a800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #964 = Int_VCVTPD2PSrr
+ { 965, 6, 1, 0, "Int_VCVTPS2DQrm", 0|(1<<TID::MayLoad), 0x15b800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #965 = Int_VCVTPS2DQrm
+ { 966, 2, 1, 0, "Int_VCVTPS2DQrr", 0, 0x15b800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #966 = Int_VCVTPS2DQrr
+ { 967, 6, 1, 0, "Int_VCVTPS2PDrm", 0|(1<<TID::MayLoad), 0x15a000006ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #967 = Int_VCVTPS2PDrm
+ { 968, 2, 1, 0, "Int_VCVTPS2PDrr", 0, 0x15a000005ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #968 = Int_VCVTPS2PDrr
+ { 969, 6, 1, 0, "Int_VCVTSD2SI64rm", 0|(1<<TID::MayLoad), 0x32d000b06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #969 = Int_VCVTSD2SI64rm
+ { 970, 2, 1, 0, "Int_VCVTSD2SI64rr", 0, 0x32d000b05ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #970 = Int_VCVTSD2SI64rr
+ { 971, 6, 1, 0, "Int_VCVTSD2SIrm", 0|(1<<TID::MayLoad), 0x12d000b06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #971 = Int_VCVTSD2SIrm
+ { 972, 2, 1, 0, "Int_VCVTSD2SIrr", 0, 0x12d000b05ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #972 = Int_VCVTSD2SIrr
+ { 973, 7, 1, 0, "Int_VCVTSD2SSrm", 0|(1<<TID::MayLoad), 0x55a000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #973 = Int_VCVTSD2SSrm
+ { 974, 3, 1, 0, "Int_VCVTSD2SSrr", 0, 0x55a000c05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #974 = Int_VCVTSD2SSrr
+ { 975, 7, 1, 0, "Int_VCVTSI2SD64rm", 0|(1<<TID::MayLoad), 0x72a000b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #975 = Int_VCVTSI2SD64rm
+ { 976, 3, 1, 0, "Int_VCVTSI2SD64rr", 0, 0x72a000b05ULL, NULL, NULL, NULL, OperandInfo139 }, // Inst #976 = Int_VCVTSI2SD64rr
+ { 977, 7, 1, 0, "Int_VCVTSI2SDrm", 0|(1<<TID::MayLoad), 0x52a000b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #977 = Int_VCVTSI2SDrm
+ { 978, 3, 1, 0, "Int_VCVTSI2SDrr", 0, 0x52a000b05ULL, NULL, NULL, NULL, OperandInfo140 }, // Inst #978 = Int_VCVTSI2SDrr
+ { 979, 7, 1, 0, "Int_VCVTSI2SS64rm", 0|(1<<TID::MayLoad), 0x72a000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #979 = Int_VCVTSI2SS64rm
+ { 980, 3, 1, 0, "Int_VCVTSI2SS64rr", 0, 0x72a000c05ULL, NULL, NULL, NULL, OperandInfo139 }, // Inst #980 = Int_VCVTSI2SS64rr
+ { 981, 7, 1, 0, "Int_VCVTSI2SSrm", 0|(1<<TID::MayLoad), 0x52a000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #981 = Int_VCVTSI2SSrm
+ { 982, 3, 1, 0, "Int_VCVTSI2SSrr", 0, 0x52a000c05ULL, NULL, NULL, NULL, OperandInfo140 }, // Inst #982 = Int_VCVTSI2SSrr
+ { 983, 7, 1, 0, "Int_VCVTSS2SDrm", 0|(1<<TID::MayLoad), 0x55a000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #983 = Int_VCVTSS2SDrm
+ { 984, 3, 1, 0, "Int_VCVTSS2SDrr", 0, 0x55a000c05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #984 = Int_VCVTSS2SDrr
+ { 985, 6, 1, 0, "Int_VCVTSS2SI64rm", 0|(1<<TID::MayLoad), 0x32d000c06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #985 = Int_VCVTSS2SI64rm
+ { 986, 2, 1, 0, "Int_VCVTSS2SI64rr", 0, 0x32d000c05ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #986 = Int_VCVTSS2SI64rr
+ { 987, 6, 1, 0, "Int_VCVTSS2SIrm", 0|(1<<TID::MayLoad), 0x12d000c06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #987 = Int_VCVTSS2SIrm
+ { 988, 2, 1, 0, "Int_VCVTSS2SIrr", 0, 0x12d000c05ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #988 = Int_VCVTSS2SIrr
+ { 989, 6, 1, 0, "Int_VCVTTPD2DQrm", 0|(1<<TID::MayLoad), 0x1e6800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #989 = Int_VCVTTPD2DQrm
+ { 990, 2, 1, 0, "Int_VCVTTPD2DQrr", 0, 0x1e6800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #990 = Int_VCVTTPD2DQrr
+ { 991, 6, 1, 0, "Int_VCVTTPS2DQrm", 0|(1<<TID::MayLoad), 0x15b000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #991 = Int_VCVTTPS2DQrm
+ { 992, 2, 1, 0, "Int_VCVTTPS2DQrr", 0, 0x15b000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #992 = Int_VCVTTPS2DQrr
+ { 993, 6, 1, 0, "Int_VCVTTSD2SI64rm", 0|(1<<TID::MayLoad), 0x32c000b06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #993 = Int_VCVTTSD2SI64rm
+ { 994, 2, 1, 0, "Int_VCVTTSD2SI64rr", 0, 0x32c000b05ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #994 = Int_VCVTTSD2SI64rr
+ { 995, 6, 1, 0, "Int_VCVTTSD2SIrm", 0|(1<<TID::MayLoad), 0x12c000b06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #995 = Int_VCVTTSD2SIrm
+ { 996, 2, 1, 0, "Int_VCVTTSD2SIrr", 0, 0x12c000b05ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #996 = Int_VCVTTSD2SIrr
+ { 997, 6, 1, 0, "Int_VCVTTSS2SI64rm", 0|(1<<TID::MayLoad), 0x32c000c06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #997 = Int_VCVTTSS2SI64rm
+ { 998, 2, 1, 0, "Int_VCVTTSS2SI64rr", 0, 0x32c000c05ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #998 = Int_VCVTTSS2SI64rr
+ { 999, 6, 1, 0, "Int_VCVTTSS2SIrm", 0|(1<<TID::MayLoad), 0x12c000c06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #999 = Int_VCVTTSS2SIrm
+ { 1000, 2, 1, 0, "Int_VCVTTSS2SIrr", 0, 0x12c000c05ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #1000 = Int_VCVTTSS2SIrr
+ { 1001, 6, 0, 0, "Int_VUCOMISDrm", 0|(1<<TID::MayLoad), 0x12e800046ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #1001 = Int_VUCOMISDrm
+ { 1002, 2, 0, 0, "Int_VUCOMISDrr", 0, 0x12e800045ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #1002 = Int_VUCOMISDrr
+ { 1003, 6, 0, 0, "Int_VUCOMISSrm", 0|(1<<TID::MayLoad), 0x12e400006ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #1003 = Int_VUCOMISSrm
+ { 1004, 2, 0, 0, "Int_VUCOMISSrr", 0, 0x12e400005ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #1004 = Int_VUCOMISSrr
+ { 1005, 1, 0, 0, "JAE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x73004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1005 = JAE_1
+ { 1006, 1, 0, 0, "JAE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8300c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1006 = JAE_4
+ { 1007, 1, 0, 0, "JA_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x77004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1007 = JA_1
+ { 1008, 1, 0, 0, "JA_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8700c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1008 = JA_4
+ { 1009, 1, 0, 0, "JBE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x76004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1009 = JBE_1
+ { 1010, 1, 0, 0, "JBE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8600c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1010 = JBE_4
+ { 1011, 1, 0, 0, "JB_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x72004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1011 = JB_1
+ { 1012, 1, 0, 0, "JB_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8200c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1012 = JB_4
+ { 1013, 1, 0, 0, "JCXZ8", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xe3004001ULL, ImplicitList26, NULL, NULL, OperandInfo2 }, // Inst #1013 = JCXZ8
+ { 1014, 1, 0, 0, "JE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x74004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1014 = JE_1
+ { 1015, 1, 0, 0, "JE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8400c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1015 = JE_4
+ { 1016, 1, 0, 0, "JGE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x7d004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1016 = JGE_1
+ { 1017, 1, 0, 0, "JGE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8d00c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1017 = JGE_4
+ { 1018, 1, 0, 0, "JG_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x7f004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1018 = JG_1
+ { 1019, 1, 0, 0, "JG_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8f00c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1019 = JG_4
+ { 1020, 1, 0, 0, "JLE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x7e004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1020 = JLE_1
+ { 1021, 1, 0, 0, "JLE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8e00c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1021 = JLE_4
+ { 1022, 1, 0, 0, "JL_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x7c004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1022 = JL_1
+ { 1023, 1, 0, 0, "JL_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8c00c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1023 = JL_4
+ { 1024, 5, 0, 0, "JMP32m", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::MayLoad)|(1<<TID::Terminator), 0xff00001cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1024 = JMP32m
+ { 1025, 1, 0, 0, "JMP32r", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0xff000014ULL, NULL, NULL, NULL, OperandInfo66 }, // Inst #1025 = JMP32r
+ { 1026, 5, 0, 0, "JMP64m", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::MayLoad)|(1<<TID::Terminator), 0xff00001cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1026 = JMP64m
+ { 1027, 1, 0, 0, "JMP64pcrel32", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xe9000001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1027 = JMP64pcrel32
+ { 1028, 1, 0, 0, "JMP64r", 0|(1<<TID::Branch)|(1<<TID::IndirectBranch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0xff000014ULL, NULL, NULL, NULL, OperandInfo67 }, // Inst #1028 = JMP64r
+ { 1029, 1, 0, 0, "JMP_1", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xeb004001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1029 = JMP_1
+ { 1030, 1, 0, 0, "JMP_4", 0|(1<<TID::Branch)|(1<<TID::Barrier)|(1<<TID::Terminator), 0xe900c001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1030 = JMP_4
+ { 1031, 1, 0, 0, "JNE_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x75004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1031 = JNE_1
+ { 1032, 1, 0, 0, "JNE_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8500c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1032 = JNE_4
+ { 1033, 1, 0, 0, "JNO_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x71004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1033 = JNO_1
+ { 1034, 1, 0, 0, "JNO_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8100c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1034 = JNO_4
+ { 1035, 1, 0, 0, "JNP_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x7b004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1035 = JNP_1
+ { 1036, 1, 0, 0, "JNP_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8b00c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1036 = JNP_4
+ { 1037, 1, 0, 0, "JNS_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x79004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1037 = JNS_1
+ { 1038, 1, 0, 0, "JNS_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8900c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1038 = JNS_4
+ { 1039, 1, 0, 0, "JO_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x70004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1039 = JO_1
+ { 1040, 1, 0, 0, "JO_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8000c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1040 = JO_4
+ { 1041, 1, 0, 0, "JP_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x7a004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1041 = JP_1
+ { 1042, 1, 0, 0, "JP_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8a00c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1042 = JP_4
+ { 1043, 1, 0, 0, "JS_1", 0|(1<<TID::Branch)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0x78004001ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1043 = JS_1
+ { 1044, 1, 0, 0, "JS_4", 0|(1<<TID::Branch)|(1<<TID::Terminator), 0x8800c101ULL, ImplicitList1, NULL, NULL, OperandInfo2 }, // Inst #1044 = JS_4
+ { 1045, 0, 0, 0, "LAHF", 0, 0x9f000001ULL, ImplicitList1, ImplicitList27, NULL, 0 }, // Inst #1045 = LAHF
+ { 1046, 6, 1, 0, "LAR16rm", 0|(1<<TID::UnmodeledSideEffects), 0x2000146ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1046 = LAR16rm
+ { 1047, 2, 1, 0, "LAR16rr", 0|(1<<TID::UnmodeledSideEffects), 0x2000145ULL, NULL, NULL, NULL, OperandInfo56 }, // Inst #1047 = LAR16rr
+ { 1048, 6, 1, 0, "LAR32rm", 0|(1<<TID::UnmodeledSideEffects), 0x2000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1048 = LAR32rm
+ { 1049, 2, 1, 0, "LAR32rr", 0|(1<<TID::UnmodeledSideEffects), 0x2000105ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #1049 = LAR32rr
+ { 1050, 6, 1, 0, "LAR64rm", 0|(1<<TID::UnmodeledSideEffects), 0x2001106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1050 = LAR64rm
+ { 1051, 2, 1, 0, "LAR64rr", 0|(1<<TID::UnmodeledSideEffects), 0x2001105ULL, NULL, NULL, NULL, OperandInfo141 }, // Inst #1051 = LAR64rr
+ { 1052, 6, 0, 0, "LCMPXCHG16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xb1080144ULL, ImplicitList12, ImplicitList28, Barriers1, OperandInfo11 }, // Inst #1052 = LCMPXCHG16
+ { 1053, 6, 0, 0, "LCMPXCHG32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xb1080104ULL, ImplicitList13, ImplicitList29, Barriers1, OperandInfo15 }, // Inst #1053 = LCMPXCHG32
+ { 1054, 6, 0, 0, "LCMPXCHG64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xb1081104ULL, ImplicitList15, ImplicitList30, Barriers1, OperandInfo19 }, // Inst #1054 = LCMPXCHG64
+ { 1055, 6, 0, 0, "LCMPXCHG8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xb0080104ULL, ImplicitList11, ImplicitList31, Barriers1, OperandInfo24 }, // Inst #1055 = LCMPXCHG8
+ { 1056, 5, 0, 0, "LCMPXCHG8B", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc7080119ULL, ImplicitList6, ImplicitList18, Barriers6, OperandInfo34 }, // Inst #1056 = LCMPXCHG8B
+ { 1057, 6, 1, 0, "LDDQUrm", 0|(1<<TID::MayLoad), 0xf0800b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1057 = LDDQUrm
+ { 1058, 5, 0, 0, "LDMXCSR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xae40011aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1058 = LDMXCSR
+ { 1059, 6, 1, 0, "LDS16rm", 0|(1<<TID::UnmodeledSideEffects), 0xc5000046ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1059 = LDS16rm
+ { 1060, 6, 1, 0, "LDS32rm", 0|(1<<TID::UnmodeledSideEffects), 0xc5000006ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1060 = LDS32rm
+ { 1061, 0, 0, 0, "LD_F0", 0|(1<<TID::UnmodeledSideEffects), 0xee000401ULL, NULL, NULL, NULL, 0 }, // Inst #1061 = LD_F0
+ { 1062, 0, 0, 0, "LD_F1", 0|(1<<TID::UnmodeledSideEffects), 0xe8000401ULL, NULL, NULL, NULL, 0 }, // Inst #1062 = LD_F1
+ { 1063, 5, 0, 0, "LD_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xd9000018ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1063 = LD_F32m
+ { 1064, 5, 0, 0, "LD_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdd000018ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1064 = LD_F64m
+ { 1065, 5, 0, 0, "LD_F80m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdb00001dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1065 = LD_F80m
+ { 1066, 1, 1, 0, "LD_Fp032", 0|(1<<TID::Rematerializable), 0x10000ULL, NULL, NULL, NULL, OperandInfo113 }, // Inst #1066 = LD_Fp032
+ { 1067, 1, 1, 0, "LD_Fp064", 0|(1<<TID::Rematerializable), 0x10000ULL, NULL, NULL, NULL, OperandInfo114 }, // Inst #1067 = LD_Fp064
+ { 1068, 1, 1, 0, "LD_Fp080", 0|(1<<TID::Rematerializable), 0x10000ULL, NULL, NULL, NULL, OperandInfo115 }, // Inst #1068 = LD_Fp080
+ { 1069, 1, 1, 0, "LD_Fp132", 0|(1<<TID::Rematerializable), 0x10000ULL, NULL, NULL, NULL, OperandInfo113 }, // Inst #1069 = LD_Fp132
+ { 1070, 1, 1, 0, "LD_Fp164", 0|(1<<TID::Rematerializable), 0x10000ULL, NULL, NULL, NULL, OperandInfo114 }, // Inst #1070 = LD_Fp164
+ { 1071, 1, 1, 0, "LD_Fp180", 0|(1<<TID::Rematerializable), 0x10000ULL, NULL, NULL, NULL, OperandInfo115 }, // Inst #1071 = LD_Fp180
+ { 1072, 6, 1, 0, "LD_Fp32m", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo120 }, // Inst #1072 = LD_Fp32m
+ { 1073, 6, 1, 0, "LD_Fp32m64", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo121 }, // Inst #1073 = LD_Fp32m64
+ { 1074, 6, 1, 0, "LD_Fp32m80", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo122 }, // Inst #1074 = LD_Fp32m80
+ { 1075, 6, 1, 0, "LD_Fp64m", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x10000ULL, NULL, NULL, NULL, OperandInfo121 }, // Inst #1075 = LD_Fp64m
+ { 1076, 6, 1, 0, "LD_Fp64m80", 0|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo122 }, // Inst #1076 = LD_Fp64m80
+ { 1077, 6, 1, 0, "LD_Fp80m", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10000ULL, NULL, NULL, NULL, OperandInfo122 }, // Inst #1077 = LD_Fp80m
+ { 1078, 1, 0, 0, "LD_Frr", 0|(1<<TID::UnmodeledSideEffects), 0xc0000402ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #1078 = LD_Frr
+ { 1079, 6, 1, 0, "LEA16r", 0, 0x8d000046ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1079 = LEA16r
+ { 1080, 6, 1, 0, "LEA32r", 0|(1<<TID::Rematerializable), 0x8d000006ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1080 = LEA32r
+ { 1081, 6, 1, 0, "LEA64_32r", 0, 0x8d000006ULL, NULL, NULL, NULL, OperandInfo142 }, // Inst #1081 = LEA64_32r
+ { 1082, 6, 1, 0, "LEA64r", 0|(1<<TID::Rematerializable), 0x8d001006ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1082 = LEA64r
+ { 1083, 0, 0, 0, "LEAVE", 0|(1<<TID::MayLoad), 0xc9000001ULL, ImplicitList32, ImplicitList32, NULL, 0 }, // Inst #1083 = LEAVE
+ { 1084, 0, 0, 0, "LEAVE64", 0|(1<<TID::MayLoad), 0xc9000001ULL, ImplicitList33, ImplicitList33, NULL, 0 }, // Inst #1084 = LEAVE64
+ { 1085, 6, 1, 0, "LES16rm", 0|(1<<TID::UnmodeledSideEffects), 0xc4000046ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1085 = LES16rm
+ { 1086, 6, 1, 0, "LES32rm", 0|(1<<TID::UnmodeledSideEffects), 0xc4000006ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1086 = LES32rm
+ { 1087, 0, 0, 0, "LFENCE", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xae000127ULL, NULL, NULL, NULL, 0 }, // Inst #1087 = LFENCE
+ { 1088, 6, 1, 0, "LFS16rm", 0|(1<<TID::UnmodeledSideEffects), 0xb4000146ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1088 = LFS16rm
+ { 1089, 6, 1, 0, "LFS32rm", 0|(1<<TID::UnmodeledSideEffects), 0xb4000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1089 = LFS32rm
+ { 1090, 6, 1, 0, "LFS64rm", 0|(1<<TID::UnmodeledSideEffects), 0xb4001106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1090 = LFS64rm
+ { 1091, 5, 0, 0, "LGDTm", 0|(1<<TID::UnmodeledSideEffects), 0x100011aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1091 = LGDTm
+ { 1092, 6, 1, 0, "LGS16rm", 0|(1<<TID::UnmodeledSideEffects), 0xb5000146ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1092 = LGS16rm
+ { 1093, 6, 1, 0, "LGS32rm", 0|(1<<TID::UnmodeledSideEffects), 0xb5000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1093 = LGS32rm
+ { 1094, 6, 1, 0, "LGS64rm", 0|(1<<TID::UnmodeledSideEffects), 0xb5001106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1094 = LGS64rm
+ { 1095, 5, 0, 0, "LIDTm", 0|(1<<TID::UnmodeledSideEffects), 0x100011bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1095 = LIDTm
+ { 1096, 5, 0, 0, "LLDT16m", 0|(1<<TID::UnmodeledSideEffects), 0x11aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1096 = LLDT16m
+ { 1097, 1, 0, 0, "LLDT16r", 0|(1<<TID::UnmodeledSideEffects), 0x112ULL, NULL, NULL, NULL, OperandInfo106 }, // Inst #1097 = LLDT16r
+ { 1098, 5, 0, 0, "LMSW16m", 0|(1<<TID::UnmodeledSideEffects), 0x100011eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1098 = LMSW16m
+ { 1099, 1, 0, 0, "LMSW16r", 0|(1<<TID::UnmodeledSideEffects), 0x1000116ULL, NULL, NULL, NULL, OperandInfo106 }, // Inst #1099 = LMSW16r
+ { 1100, 6, 0, 0, "LOCK_ADD16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x81086018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1100 = LOCK_ADD16mi
+ { 1101, 6, 0, 0, "LOCK_ADD16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x83082058ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1101 = LOCK_ADD16mi8
+ { 1102, 6, 0, 0, "LOCK_ADD16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1080044ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #1102 = LOCK_ADD16mr
+ { 1103, 6, 0, 0, "LOCK_ADD32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x8108a018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1103 = LOCK_ADD32mi
+ { 1104, 6, 0, 0, "LOCK_ADD32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x83082018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1104 = LOCK_ADD32mi8
+ { 1105, 6, 0, 0, "LOCK_ADD32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1080004ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #1105 = LOCK_ADD32mr
+ { 1106, 6, 0, 0, "LOCK_ADD64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x8108b018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1106 = LOCK_ADD64mi32
+ { 1107, 6, 0, 0, "LOCK_ADD64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x83083018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1107 = LOCK_ADD64mi8
+ { 1108, 6, 0, 0, "LOCK_ADD64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1081004ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #1108 = LOCK_ADD64mr
+ { 1109, 6, 0, 0, "LOCK_ADD8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x80082018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1109 = LOCK_ADD8mi
+ { 1110, 6, 0, 0, "LOCK_ADD8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x80004ULL, NULL, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #1110 = LOCK_ADD8mr
+ { 1111, 5, 0, 0, "LOCK_DEC16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xff080059ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1111 = LOCK_DEC16m
+ { 1112, 5, 0, 0, "LOCK_DEC32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xff080019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1112 = LOCK_DEC32m
+ { 1113, 5, 0, 0, "LOCK_DEC64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xff081019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1113 = LOCK_DEC64m
+ { 1114, 5, 0, 0, "LOCK_DEC8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xfe080019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1114 = LOCK_DEC8m
+ { 1115, 5, 0, 0, "LOCK_INC16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xff080058ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1115 = LOCK_INC16m
+ { 1116, 5, 0, 0, "LOCK_INC32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xff080018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1116 = LOCK_INC32m
+ { 1117, 5, 0, 0, "LOCK_INC64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xff081018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1117 = LOCK_INC64m
+ { 1118, 5, 0, 0, "LOCK_INC8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xfe080018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1118 = LOCK_INC8m
+ { 1119, 0, 0, 0, "LOCK_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0xf0000001ULL, NULL, NULL, NULL, 0 }, // Inst #1119 = LOCK_PREFIX
+ { 1120, 6, 0, 0, "LOCK_SUB16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x8108605dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1120 = LOCK_SUB16mi
+ { 1121, 6, 0, 0, "LOCK_SUB16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x8308205dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1121 = LOCK_SUB16mi8
+ { 1122, 6, 0, 0, "LOCK_SUB16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x29080044ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #1122 = LOCK_SUB16mr
+ { 1123, 6, 0, 0, "LOCK_SUB32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x8108a01dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1123 = LOCK_SUB32mi
+ { 1124, 6, 0, 0, "LOCK_SUB32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x8308201dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1124 = LOCK_SUB32mi8
+ { 1125, 6, 0, 0, "LOCK_SUB32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x29080004ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #1125 = LOCK_SUB32mr
+ { 1126, 6, 0, 0, "LOCK_SUB64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x8108b01dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1126 = LOCK_SUB64mi32
+ { 1127, 6, 0, 0, "LOCK_SUB64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x8308301dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1127 = LOCK_SUB64mi8
+ { 1128, 6, 0, 0, "LOCK_SUB64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x29081004ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #1128 = LOCK_SUB64mr
+ { 1129, 6, 0, 0, "LOCK_SUB8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x8008201dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1129 = LOCK_SUB8mi
+ { 1130, 6, 0, 0, "LOCK_SUB8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x28080004ULL, NULL, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #1130 = LOCK_SUB8mr
+ { 1131, 0, 0, 0, "LODSB", 0|(1<<TID::UnmodeledSideEffects), 0xac000001ULL, NULL, NULL, NULL, 0 }, // Inst #1131 = LODSB
+ { 1132, 0, 0, 0, "LODSD", 0|(1<<TID::UnmodeledSideEffects), 0xad000001ULL, NULL, NULL, NULL, 0 }, // Inst #1132 = LODSD
+ { 1133, 0, 0, 0, "LODSQ", 0|(1<<TID::UnmodeledSideEffects), 0xad001001ULL, NULL, NULL, NULL, 0 }, // Inst #1133 = LODSQ
+ { 1134, 0, 0, 0, "LODSW", 0|(1<<TID::UnmodeledSideEffects), 0xad000041ULL, NULL, NULL, NULL, 0 }, // Inst #1134 = LODSW
+ { 1135, 1, 0, 0, "LOOP", 0|(1<<TID::UnmodeledSideEffects), 0xe2004001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1135 = LOOP
+ { 1136, 1, 0, 0, "LOOPE", 0|(1<<TID::UnmodeledSideEffects), 0xe1004001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1136 = LOOPE
+ { 1137, 1, 0, 0, "LOOPNE", 0|(1<<TID::UnmodeledSideEffects), 0xe0004001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1137 = LOOPNE
+ { 1138, 0, 0, 0, "LRET", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xcb070001ULL, NULL, NULL, NULL, 0 }, // Inst #1138 = LRET
+ { 1139, 1, 0, 0, "LRETI", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xca076001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1139 = LRETI
+ { 1140, 6, 1, 0, "LSL16rm", 0|(1<<TID::UnmodeledSideEffects), 0x3000146ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1140 = LSL16rm
+ { 1141, 2, 1, 0, "LSL16rr", 0|(1<<TID::UnmodeledSideEffects), 0x3000145ULL, NULL, NULL, NULL, OperandInfo56 }, // Inst #1141 = LSL16rr
+ { 1142, 6, 1, 0, "LSL32rm", 0|(1<<TID::UnmodeledSideEffects), 0x3000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1142 = LSL32rm
+ { 1143, 2, 1, 0, "LSL32rr", 0|(1<<TID::UnmodeledSideEffects), 0x3000105ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #1143 = LSL32rr
+ { 1144, 6, 1, 0, "LSL64rm", 0|(1<<TID::UnmodeledSideEffects), 0x3001106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1144 = LSL64rm
+ { 1145, 2, 1, 0, "LSL64rr", 0|(1<<TID::UnmodeledSideEffects), 0x3001105ULL, NULL, NULL, NULL, OperandInfo60 }, // Inst #1145 = LSL64rr
+ { 1146, 6, 1, 0, "LSS16rm", 0|(1<<TID::UnmodeledSideEffects), 0xb2000146ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1146 = LSS16rm
+ { 1147, 6, 1, 0, "LSS32rm", 0|(1<<TID::UnmodeledSideEffects), 0xb2000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1147 = LSS32rm
+ { 1148, 6, 1, 0, "LSS64rm", 0|(1<<TID::UnmodeledSideEffects), 0xb2001106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1148 = LSS64rm
+ { 1149, 5, 0, 0, "LTRm", 0|(1<<TID::UnmodeledSideEffects), 0x11bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1149 = LTRm
+ { 1150, 1, 0, 0, "LTRr", 0|(1<<TID::UnmodeledSideEffects), 0x113ULL, NULL, NULL, NULL, OperandInfo106 }, // Inst #1150 = LTRr
+ { 1151, 7, 1, 0, "LXADD16", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc1080146ULL, NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #1151 = LXADD16
+ { 1152, 7, 1, 0, "LXADD32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc1080106ULL, NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #1152 = LXADD32
+ { 1153, 7, 1, 0, "LXADD64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc1081106ULL, NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #1153 = LXADD64
+ { 1154, 7, 1, 0, "LXADD8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc0080106ULL, NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #1154 = LXADD8
+ { 1155, 2, 0, 0, "MASKMOVDQU", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xf7c00145ULL, ImplicitList34, NULL, NULL, OperandInfo43 }, // Inst #1155 = MASKMOVDQU
+ { 1156, 2, 0, 0, "MASKMOVDQU64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xf7c00145ULL, ImplicitList35, NULL, NULL, OperandInfo43 }, // Inst #1156 = MASKMOVDQU64
+ { 1157, 7, 1, 0, "MAXPDrm", 0|(1<<TID::MayLoad), 0x5f800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1157 = MAXPDrm
+ { 1158, 7, 1, 0, "MAXPDrm_Int", 0|(1<<TID::MayLoad), 0x5f800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1158 = MAXPDrm_Int
+ { 1159, 3, 1, 0, "MAXPDrr", 0, 0x5f800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1159 = MAXPDrr
+ { 1160, 3, 1, 0, "MAXPDrr_Int", 0, 0x5f800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1160 = MAXPDrr_Int
+ { 1161, 7, 1, 0, "MAXPSrm", 0|(1<<TID::MayLoad), 0x5f400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1161 = MAXPSrm
+ { 1162, 7, 1, 0, "MAXPSrm_Int", 0|(1<<TID::MayLoad), 0x5f400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1162 = MAXPSrm_Int
+ { 1163, 3, 1, 0, "MAXPSrr", 0, 0x5f400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1163 = MAXPSrr
+ { 1164, 3, 1, 0, "MAXPSrr_Int", 0, 0x5f400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1164 = MAXPSrr_Int
+ { 1165, 7, 1, 0, "MAXSDrm", 0|(1<<TID::MayLoad), 0x5f000b06ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #1165 = MAXSDrm
+ { 1166, 7, 1, 0, "MAXSDrm_Int", 0|(1<<TID::MayLoad), 0x5f000b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1166 = MAXSDrm_Int
+ { 1167, 3, 1, 0, "MAXSDrr", 0, 0x5f000b05ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #1167 = MAXSDrr
+ { 1168, 3, 1, 0, "MAXSDrr_Int", 0, 0x5f000b05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1168 = MAXSDrr_Int
+ { 1169, 7, 1, 0, "MAXSSrm", 0|(1<<TID::MayLoad), 0x5f000c06ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #1169 = MAXSSrm
+ { 1170, 7, 1, 0, "MAXSSrm_Int", 0|(1<<TID::MayLoad), 0x5f000c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1170 = MAXSSrm_Int
+ { 1171, 3, 1, 0, "MAXSSrr", 0, 0x5f000c05ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #1171 = MAXSSrr
+ { 1172, 3, 1, 0, "MAXSSrr_Int", 0, 0x5f000c05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1172 = MAXSSrr_Int
+ { 1173, 0, 0, 0, "MFENCE", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xae000128ULL, NULL, NULL, NULL, 0 }, // Inst #1173 = MFENCE
+ { 1174, 0, 0, 0, "MINGW_ALLOCA", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList2, ImplicitList36, Barriers1, 0 }, // Inst #1174 = MINGW_ALLOCA
+ { 1175, 7, 1, 0, "MINPDrm", 0|(1<<TID::MayLoad), 0x5d800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1175 = MINPDrm
+ { 1176, 7, 1, 0, "MINPDrm_Int", 0|(1<<TID::MayLoad), 0x5d800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1176 = MINPDrm_Int
+ { 1177, 3, 1, 0, "MINPDrr", 0, 0x5d800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1177 = MINPDrr
+ { 1178, 3, 1, 0, "MINPDrr_Int", 0, 0x5d800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1178 = MINPDrr_Int
+ { 1179, 7, 1, 0, "MINPSrm", 0|(1<<TID::MayLoad), 0x5d400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1179 = MINPSrm
+ { 1180, 7, 1, 0, "MINPSrm_Int", 0|(1<<TID::MayLoad), 0x5d400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1180 = MINPSrm_Int
+ { 1181, 3, 1, 0, "MINPSrr", 0, 0x5d400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1181 = MINPSrr
+ { 1182, 3, 1, 0, "MINPSrr_Int", 0, 0x5d400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1182 = MINPSrr_Int
+ { 1183, 7, 1, 0, "MINSDrm", 0|(1<<TID::MayLoad), 0x5d000b06ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #1183 = MINSDrm
+ { 1184, 7, 1, 0, "MINSDrm_Int", 0|(1<<TID::MayLoad), 0x5d000b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1184 = MINSDrm_Int
+ { 1185, 3, 1, 0, "MINSDrr", 0, 0x5d000b05ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #1185 = MINSDrr
+ { 1186, 3, 1, 0, "MINSDrr_Int", 0, 0x5d000b05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1186 = MINSDrr_Int
+ { 1187, 7, 1, 0, "MINSSrm", 0|(1<<TID::MayLoad), 0x5d000c06ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #1187 = MINSSrm
+ { 1188, 7, 1, 0, "MINSSrm_Int", 0|(1<<TID::MayLoad), 0x5d000c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1188 = MINSSrm_Int
+ { 1189, 3, 1, 0, "MINSSrr", 0, 0x5d000c05ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #1189 = MINSSrr
+ { 1190, 3, 1, 0, "MINSSrr_Int", 0, 0x5d000c05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1190 = MINSSrr_Int
+ { 1191, 6, 1, 0, "MMX_CVTPD2PIrm", 0|(1<<TID::MayLoad), 0x2d000146ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1191 = MMX_CVTPD2PIrm
+ { 1192, 2, 1, 0, "MMX_CVTPD2PIrr", 0, 0x2d000145ULL, NULL, NULL, NULL, OperandInfo130 }, // Inst #1192 = MMX_CVTPD2PIrr
+ { 1193, 6, 1, 0, "MMX_CVTPI2PDrm", 0|(1<<TID::MayLoad), 0x2a000146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1193 = MMX_CVTPI2PDrm
+ { 1194, 2, 1, 0, "MMX_CVTPI2PDrr", 0, 0x2a000145ULL, NULL, NULL, NULL, OperandInfo131 }, // Inst #1194 = MMX_CVTPI2PDrr
+ { 1195, 6, 1, 0, "MMX_CVTPI2PSrm", 0|(1<<TID::MayLoad), 0x2a000106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1195 = MMX_CVTPI2PSrm
+ { 1196, 2, 1, 0, "MMX_CVTPI2PSrr", 0, 0x2a000105ULL, NULL, NULL, NULL, OperandInfo131 }, // Inst #1196 = MMX_CVTPI2PSrr
+ { 1197, 6, 1, 0, "MMX_CVTPS2PIrm", 0|(1<<TID::MayLoad), 0x2d000106ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1197 = MMX_CVTPS2PIrm
+ { 1198, 2, 1, 0, "MMX_CVTPS2PIrr", 0, 0x2d000105ULL, NULL, NULL, NULL, OperandInfo130 }, // Inst #1198 = MMX_CVTPS2PIrr
+ { 1199, 6, 1, 0, "MMX_CVTTPD2PIrm", 0|(1<<TID::MayLoad), 0x2c000146ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1199 = MMX_CVTTPD2PIrm
+ { 1200, 2, 1, 0, "MMX_CVTTPD2PIrr", 0, 0x2c000145ULL, NULL, NULL, NULL, OperandInfo130 }, // Inst #1200 = MMX_CVTTPD2PIrr
+ { 1201, 6, 1, 0, "MMX_CVTTPS2PIrm", 0|(1<<TID::MayLoad), 0x2c000106ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1201 = MMX_CVTTPS2PIrm
+ { 1202, 2, 1, 0, "MMX_CVTTPS2PIrr", 0, 0x2c000105ULL, NULL, NULL, NULL, OperandInfo130 }, // Inst #1202 = MMX_CVTTPS2PIrr
+ { 1203, 0, 0, 0, "MMX_EMMS", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x77000101ULL, NULL, NULL, NULL, 0 }, // Inst #1203 = MMX_EMMS
+ { 1204, 0, 0, 0, "MMX_FEMMS", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xe000101ULL, NULL, NULL, NULL, 0 }, // Inst #1204 = MMX_FEMMS
+ { 1205, 2, 0, 0, "MMX_MASKMOVQ", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xf7000105ULL, ImplicitList34, NULL, NULL, OperandInfo143 }, // Inst #1205 = MMX_MASKMOVQ
+ { 1206, 2, 0, 0, "MMX_MASKMOVQ64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xf7000105ULL, ImplicitList35, NULL, NULL, OperandInfo143 }, // Inst #1206 = MMX_MASKMOVQ64
+ { 1207, 2, 1, 0, "MMX_MOVD64from64rr", 0, 0x7e001103ULL, NULL, NULL, NULL, OperandInfo144 }, // Inst #1207 = MMX_MOVD64from64rr
+ { 1208, 2, 0, 0, "MMX_MOVD64grr", 0|(1<<TID::UnmodeledSideEffects), 0x7e000103ULL, NULL, NULL, NULL, OperandInfo145 }, // Inst #1208 = MMX_MOVD64grr
+ { 1209, 6, 0, 0, "MMX_MOVD64mr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x7e000104ULL, NULL, NULL, NULL, OperandInfo146 }, // Inst #1209 = MMX_MOVD64mr
+ { 1210, 6, 1, 0, "MMX_MOVD64rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x6e000106ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1210 = MMX_MOVD64rm
+ { 1211, 2, 1, 0, "MMX_MOVD64rr", 0, 0x6e000105ULL, NULL, NULL, NULL, OperandInfo147 }, // Inst #1211 = MMX_MOVD64rr
+ { 1212, 2, 1, 0, "MMX_MOVD64rrv164", 0, 0x6e001105ULL, NULL, NULL, NULL, OperandInfo148 }, // Inst #1212 = MMX_MOVD64rrv164
+ { 1213, 2, 1, 0, "MMX_MOVD64to64rr", 0, 0x6e001105ULL, NULL, NULL, NULL, OperandInfo148 }, // Inst #1213 = MMX_MOVD64to64rr
+ { 1214, 2, 1, 0, "MMX_MOVDQ2Qrr", 0, 0xd6002b05ULL, NULL, NULL, NULL, OperandInfo130 }, // Inst #1214 = MMX_MOVDQ2Qrr
+ { 1215, 2, 1, 0, "MMX_MOVFR642Qrr", 0|(1<<TID::UnmodeledSideEffects), 0xd6002b05ULL, NULL, NULL, NULL, OperandInfo149 }, // Inst #1215 = MMX_MOVFR642Qrr
+ { 1216, 6, 0, 0, "MMX_MOVNTQmr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xe7000104ULL, NULL, NULL, NULL, OperandInfo146 }, // Inst #1216 = MMX_MOVNTQmr
+ { 1217, 2, 1, 0, "MMX_MOVQ2DQrr", 0, 0xd6002c05ULL, NULL, NULL, NULL, OperandInfo131 }, // Inst #1217 = MMX_MOVQ2DQrr
+ { 1218, 2, 1, 0, "MMX_MOVQ2FR64rr", 0, 0xd6002c05ULL, NULL, NULL, NULL, OperandInfo150 }, // Inst #1218 = MMX_MOVQ2FR64rr
+ { 1219, 6, 0, 0, "MMX_MOVQ64mr", 0|(1<<TID::MayStore), 0x7f000104ULL, NULL, NULL, NULL, OperandInfo146 }, // Inst #1219 = MMX_MOVQ64mr
+ { 1220, 6, 1, 0, "MMX_MOVQ64rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x6f000106ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1220 = MMX_MOVQ64rm
+ { 1221, 2, 1, 0, "MMX_MOVQ64rr", 0, 0x6f000105ULL, NULL, NULL, NULL, OperandInfo143 }, // Inst #1221 = MMX_MOVQ64rr
+ { 1222, 6, 1, 0, "MMX_MOVZDI2PDIrm", 0|(1<<TID::MayLoad), 0x6e000106ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1222 = MMX_MOVZDI2PDIrm
+ { 1223, 2, 1, 0, "MMX_MOVZDI2PDIrr", 0, 0x6e000105ULL, NULL, NULL, NULL, OperandInfo147 }, // Inst #1223 = MMX_MOVZDI2PDIrr
+ { 1224, 7, 1, 0, "MMX_PACKSSDWrm", 0|(1<<TID::MayLoad), 0x6b000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1224 = MMX_PACKSSDWrm
+ { 1225, 3, 1, 0, "MMX_PACKSSDWrr", 0, 0x6b000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1225 = MMX_PACKSSDWrr
+ { 1226, 7, 1, 0, "MMX_PACKSSWBrm", 0|(1<<TID::MayLoad), 0x63000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1226 = MMX_PACKSSWBrm
+ { 1227, 3, 1, 0, "MMX_PACKSSWBrr", 0, 0x63000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1227 = MMX_PACKSSWBrr
+ { 1228, 7, 1, 0, "MMX_PACKUSWBrm", 0|(1<<TID::MayLoad), 0x67000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1228 = MMX_PACKUSWBrm
+ { 1229, 3, 1, 0, "MMX_PACKUSWBrr", 0, 0x67000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1229 = MMX_PACKUSWBrr
+ { 1230, 7, 1, 0, "MMX_PADDBrm", 0|(1<<TID::MayLoad), 0xfc000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1230 = MMX_PADDBrm
+ { 1231, 3, 1, 0, "MMX_PADDBrr", 0|(1<<TID::Commutable), 0xfc000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1231 = MMX_PADDBrr
+ { 1232, 7, 1, 0, "MMX_PADDDrm", 0|(1<<TID::MayLoad), 0xfe000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1232 = MMX_PADDDrm
+ { 1233, 3, 1, 0, "MMX_PADDDrr", 0|(1<<TID::Commutable), 0xfe000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1233 = MMX_PADDDrr
+ { 1234, 7, 1, 0, "MMX_PADDQrm", 0|(1<<TID::MayLoad), 0xd4000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1234 = MMX_PADDQrm
+ { 1235, 3, 1, 0, "MMX_PADDQrr", 0|(1<<TID::Commutable), 0xd4000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1235 = MMX_PADDQrr
+ { 1236, 7, 1, 0, "MMX_PADDSBrm", 0|(1<<TID::MayLoad), 0xec000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1236 = MMX_PADDSBrm
+ { 1237, 3, 1, 0, "MMX_PADDSBrr", 0|(1<<TID::Commutable), 0xec000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1237 = MMX_PADDSBrr
+ { 1238, 7, 1, 0, "MMX_PADDSWrm", 0|(1<<TID::MayLoad), 0xed000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1238 = MMX_PADDSWrm
+ { 1239, 3, 1, 0, "MMX_PADDSWrr", 0|(1<<TID::Commutable), 0xed000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1239 = MMX_PADDSWrr
+ { 1240, 7, 1, 0, "MMX_PADDUSBrm", 0|(1<<TID::MayLoad), 0xdc000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1240 = MMX_PADDUSBrm
+ { 1241, 3, 1, 0, "MMX_PADDUSBrr", 0|(1<<TID::Commutable), 0xdc000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1241 = MMX_PADDUSBrr
+ { 1242, 7, 1, 0, "MMX_PADDUSWrm", 0|(1<<TID::MayLoad), 0xdd000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1242 = MMX_PADDUSWrm
+ { 1243, 3, 1, 0, "MMX_PADDUSWrr", 0|(1<<TID::Commutable), 0xdd000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1243 = MMX_PADDUSWrr
+ { 1244, 7, 1, 0, "MMX_PADDWrm", 0|(1<<TID::MayLoad), 0xfd000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1244 = MMX_PADDWrm
+ { 1245, 3, 1, 0, "MMX_PADDWrr", 0|(1<<TID::Commutable), 0xfd000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1245 = MMX_PADDWrr
+ { 1246, 7, 1, 0, "MMX_PANDNrm", 0|(1<<TID::MayLoad), 0xdf000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1246 = MMX_PANDNrm
+ { 1247, 3, 1, 0, "MMX_PANDNrr", 0, 0xdf000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1247 = MMX_PANDNrr
+ { 1248, 7, 1, 0, "MMX_PANDrm", 0|(1<<TID::MayLoad), 0xdb000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1248 = MMX_PANDrm
+ { 1249, 3, 1, 0, "MMX_PANDrr", 0|(1<<TID::Commutable), 0xdb000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1249 = MMX_PANDrr
+ { 1250, 7, 1, 0, "MMX_PAVGBrm", 0|(1<<TID::MayLoad), 0xe0000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1250 = MMX_PAVGBrm
+ { 1251, 3, 1, 0, "MMX_PAVGBrr", 0|(1<<TID::Commutable), 0xe0000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1251 = MMX_PAVGBrr
+ { 1252, 7, 1, 0, "MMX_PAVGWrm", 0|(1<<TID::MayLoad), 0xe3000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1252 = MMX_PAVGWrm
+ { 1253, 3, 1, 0, "MMX_PAVGWrr", 0|(1<<TID::Commutable), 0xe3000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1253 = MMX_PAVGWrr
+ { 1254, 7, 1, 0, "MMX_PCMPEQBrm", 0|(1<<TID::MayLoad), 0x74000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1254 = MMX_PCMPEQBrm
+ { 1255, 3, 1, 0, "MMX_PCMPEQBrr", 0, 0x74000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1255 = MMX_PCMPEQBrr
+ { 1256, 7, 1, 0, "MMX_PCMPEQDrm", 0|(1<<TID::MayLoad), 0x76000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1256 = MMX_PCMPEQDrm
+ { 1257, 3, 1, 0, "MMX_PCMPEQDrr", 0, 0x76000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1257 = MMX_PCMPEQDrr
+ { 1258, 7, 1, 0, "MMX_PCMPEQWrm", 0|(1<<TID::MayLoad), 0x75000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1258 = MMX_PCMPEQWrm
+ { 1259, 3, 1, 0, "MMX_PCMPEQWrr", 0, 0x75000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1259 = MMX_PCMPEQWrr
+ { 1260, 7, 1, 0, "MMX_PCMPGTBrm", 0|(1<<TID::MayLoad), 0x64000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1260 = MMX_PCMPGTBrm
+ { 1261, 3, 1, 0, "MMX_PCMPGTBrr", 0, 0x64000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1261 = MMX_PCMPGTBrr
+ { 1262, 7, 1, 0, "MMX_PCMPGTDrm", 0|(1<<TID::MayLoad), 0x66000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1262 = MMX_PCMPGTDrm
+ { 1263, 3, 1, 0, "MMX_PCMPGTDrr", 0, 0x66000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1263 = MMX_PCMPGTDrr
+ { 1264, 7, 1, 0, "MMX_PCMPGTWrm", 0|(1<<TID::MayLoad), 0x65000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1264 = MMX_PCMPGTWrm
+ { 1265, 3, 1, 0, "MMX_PCMPGTWrr", 0, 0x65000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1265 = MMX_PCMPGTWrr
+ { 1266, 3, 1, 0, "MMX_PEXTRWri", 0, 0xc5002105ULL, NULL, NULL, NULL, OperandInfo153 }, // Inst #1266 = MMX_PEXTRWri
+ { 1267, 8, 1, 0, "MMX_PINSRWrmi", 0|(1<<TID::MayLoad), 0xc4002106ULL, NULL, NULL, NULL, OperandInfo154 }, // Inst #1267 = MMX_PINSRWrmi
+ { 1268, 4, 1, 0, "MMX_PINSRWrri", 0, 0xc4002105ULL, NULL, NULL, NULL, OperandInfo155 }, // Inst #1268 = MMX_PINSRWrri
+ { 1269, 7, 1, 0, "MMX_PMADDWDrm", 0|(1<<TID::MayLoad), 0xf5000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1269 = MMX_PMADDWDrm
+ { 1270, 3, 1, 0, "MMX_PMADDWDrr", 0|(1<<TID::Commutable), 0xf5000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1270 = MMX_PMADDWDrr
+ { 1271, 7, 1, 0, "MMX_PMAXSWrm", 0|(1<<TID::MayLoad), 0xee000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1271 = MMX_PMAXSWrm
+ { 1272, 3, 1, 0, "MMX_PMAXSWrr", 0|(1<<TID::Commutable), 0xee000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1272 = MMX_PMAXSWrr
+ { 1273, 7, 1, 0, "MMX_PMAXUBrm", 0|(1<<TID::MayLoad), 0xde000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1273 = MMX_PMAXUBrm
+ { 1274, 3, 1, 0, "MMX_PMAXUBrr", 0|(1<<TID::Commutable), 0xde000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1274 = MMX_PMAXUBrr
+ { 1275, 7, 1, 0, "MMX_PMINSWrm", 0|(1<<TID::MayLoad), 0xea000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1275 = MMX_PMINSWrm
+ { 1276, 3, 1, 0, "MMX_PMINSWrr", 0|(1<<TID::Commutable), 0xea000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1276 = MMX_PMINSWrr
+ { 1277, 7, 1, 0, "MMX_PMINUBrm", 0|(1<<TID::MayLoad), 0xda000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1277 = MMX_PMINUBrm
+ { 1278, 3, 1, 0, "MMX_PMINUBrr", 0|(1<<TID::Commutable), 0xda000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1278 = MMX_PMINUBrr
+ { 1279, 2, 1, 0, "MMX_PMOVMSKBrr", 0, 0xd7000105ULL, NULL, NULL, NULL, OperandInfo145 }, // Inst #1279 = MMX_PMOVMSKBrr
+ { 1280, 7, 1, 0, "MMX_PMULHUWrm", 0|(1<<TID::MayLoad), 0xe4000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1280 = MMX_PMULHUWrm
+ { 1281, 3, 1, 0, "MMX_PMULHUWrr", 0|(1<<TID::Commutable), 0xe4000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1281 = MMX_PMULHUWrr
+ { 1282, 7, 1, 0, "MMX_PMULHWrm", 0|(1<<TID::MayLoad), 0xe5000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1282 = MMX_PMULHWrm
+ { 1283, 3, 1, 0, "MMX_PMULHWrr", 0|(1<<TID::Commutable), 0xe5000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1283 = MMX_PMULHWrr
+ { 1284, 7, 1, 0, "MMX_PMULLWrm", 0|(1<<TID::MayLoad), 0xd5000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1284 = MMX_PMULLWrm
+ { 1285, 3, 1, 0, "MMX_PMULLWrr", 0|(1<<TID::Commutable), 0xd5000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1285 = MMX_PMULLWrr
+ { 1286, 7, 1, 0, "MMX_PMULUDQrm", 0|(1<<TID::MayLoad), 0xf4000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1286 = MMX_PMULUDQrm
+ { 1287, 3, 1, 0, "MMX_PMULUDQrr", 0|(1<<TID::Commutable), 0xf4000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1287 = MMX_PMULUDQrr
+ { 1288, 7, 1, 0, "MMX_PORrm", 0|(1<<TID::MayLoad), 0xeb000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1288 = MMX_PORrm
+ { 1289, 3, 1, 0, "MMX_PORrr", 0|(1<<TID::Commutable), 0xeb000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1289 = MMX_PORrr
+ { 1290, 7, 1, 0, "MMX_PSADBWrm", 0|(1<<TID::MayLoad), 0xf6000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1290 = MMX_PSADBWrm
+ { 1291, 3, 1, 0, "MMX_PSADBWrr", 0|(1<<TID::Commutable), 0xf6000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1291 = MMX_PSADBWrr
+ { 1292, 7, 1, 0, "MMX_PSHUFWmi", 0|(1<<TID::MayLoad), 0x70002106ULL, NULL, NULL, NULL, OperandInfo156 }, // Inst #1292 = MMX_PSHUFWmi
+ { 1293, 3, 1, 0, "MMX_PSHUFWri", 0, 0x70002105ULL, NULL, NULL, NULL, OperandInfo157 }, // Inst #1293 = MMX_PSHUFWri
+ { 1294, 3, 1, 0, "MMX_PSLLDri", 0, 0x72002116ULL, NULL, NULL, NULL, OperandInfo158 }, // Inst #1294 = MMX_PSLLDri
+ { 1295, 7, 1, 0, "MMX_PSLLDrm", 0|(1<<TID::MayLoad), 0xf2000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1295 = MMX_PSLLDrm
+ { 1296, 3, 1, 0, "MMX_PSLLDrr", 0, 0xf2000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1296 = MMX_PSLLDrr
+ { 1297, 3, 1, 0, "MMX_PSLLQri", 0, 0x73002116ULL, NULL, NULL, NULL, OperandInfo158 }, // Inst #1297 = MMX_PSLLQri
+ { 1298, 7, 1, 0, "MMX_PSLLQrm", 0|(1<<TID::MayLoad), 0xf3000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1298 = MMX_PSLLQrm
+ { 1299, 3, 1, 0, "MMX_PSLLQrr", 0, 0xf3000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1299 = MMX_PSLLQrr
+ { 1300, 3, 1, 0, "MMX_PSLLWri", 0, 0x71002116ULL, NULL, NULL, NULL, OperandInfo158 }, // Inst #1300 = MMX_PSLLWri
+ { 1301, 7, 1, 0, "MMX_PSLLWrm", 0|(1<<TID::MayLoad), 0xf1000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1301 = MMX_PSLLWrm
+ { 1302, 3, 1, 0, "MMX_PSLLWrr", 0, 0xf1000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1302 = MMX_PSLLWrr
+ { 1303, 3, 1, 0, "MMX_PSRADri", 0, 0x72002114ULL, NULL, NULL, NULL, OperandInfo158 }, // Inst #1303 = MMX_PSRADri
+ { 1304, 7, 1, 0, "MMX_PSRADrm", 0|(1<<TID::MayLoad), 0xe2000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1304 = MMX_PSRADrm
+ { 1305, 3, 1, 0, "MMX_PSRADrr", 0, 0xe2000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1305 = MMX_PSRADrr
+ { 1306, 3, 1, 0, "MMX_PSRAWri", 0, 0x71002114ULL, NULL, NULL, NULL, OperandInfo158 }, // Inst #1306 = MMX_PSRAWri
+ { 1307, 7, 1, 0, "MMX_PSRAWrm", 0|(1<<TID::MayLoad), 0xe1000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1307 = MMX_PSRAWrm
+ { 1308, 3, 1, 0, "MMX_PSRAWrr", 0, 0xe1000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1308 = MMX_PSRAWrr
+ { 1309, 3, 1, 0, "MMX_PSRLDri", 0, 0x72002112ULL, NULL, NULL, NULL, OperandInfo158 }, // Inst #1309 = MMX_PSRLDri
+ { 1310, 7, 1, 0, "MMX_PSRLDrm", 0|(1<<TID::MayLoad), 0xd2000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1310 = MMX_PSRLDrm
+ { 1311, 3, 1, 0, "MMX_PSRLDrr", 0, 0xd2000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1311 = MMX_PSRLDrr
+ { 1312, 3, 1, 0, "MMX_PSRLQri", 0, 0x73002112ULL, NULL, NULL, NULL, OperandInfo158 }, // Inst #1312 = MMX_PSRLQri
+ { 1313, 7, 1, 0, "MMX_PSRLQrm", 0|(1<<TID::MayLoad), 0xd3000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1313 = MMX_PSRLQrm
+ { 1314, 3, 1, 0, "MMX_PSRLQrr", 0, 0xd3000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1314 = MMX_PSRLQrr
+ { 1315, 3, 1, 0, "MMX_PSRLWri", 0, 0x71002112ULL, NULL, NULL, NULL, OperandInfo158 }, // Inst #1315 = MMX_PSRLWri
+ { 1316, 7, 1, 0, "MMX_PSRLWrm", 0|(1<<TID::MayLoad), 0xd1000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1316 = MMX_PSRLWrm
+ { 1317, 3, 1, 0, "MMX_PSRLWrr", 0, 0xd1000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1317 = MMX_PSRLWrr
+ { 1318, 7, 1, 0, "MMX_PSUBBrm", 0|(1<<TID::MayLoad), 0xf8000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1318 = MMX_PSUBBrm
+ { 1319, 3, 1, 0, "MMX_PSUBBrr", 0, 0xf8000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1319 = MMX_PSUBBrr
+ { 1320, 7, 1, 0, "MMX_PSUBDrm", 0|(1<<TID::MayLoad), 0xfa000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1320 = MMX_PSUBDrm
+ { 1321, 3, 1, 0, "MMX_PSUBDrr", 0, 0xfa000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1321 = MMX_PSUBDrr
+ { 1322, 7, 1, 0, "MMX_PSUBQrm", 0|(1<<TID::MayLoad), 0xfb000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1322 = MMX_PSUBQrm
+ { 1323, 3, 1, 0, "MMX_PSUBQrr", 0, 0xfb000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1323 = MMX_PSUBQrr
+ { 1324, 7, 1, 0, "MMX_PSUBSBrm", 0|(1<<TID::MayLoad), 0xe8000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1324 = MMX_PSUBSBrm
+ { 1325, 3, 1, 0, "MMX_PSUBSBrr", 0, 0xe8000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1325 = MMX_PSUBSBrr
+ { 1326, 7, 1, 0, "MMX_PSUBSWrm", 0|(1<<TID::MayLoad), 0xe9000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1326 = MMX_PSUBSWrm
+ { 1327, 3, 1, 0, "MMX_PSUBSWrr", 0, 0xe9000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1327 = MMX_PSUBSWrr
+ { 1328, 7, 1, 0, "MMX_PSUBUSBrm", 0|(1<<TID::MayLoad), 0xd8000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1328 = MMX_PSUBUSBrm
+ { 1329, 3, 1, 0, "MMX_PSUBUSBrr", 0, 0xd8000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1329 = MMX_PSUBUSBrr
+ { 1330, 7, 1, 0, "MMX_PSUBUSWrm", 0|(1<<TID::MayLoad), 0xd9000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1330 = MMX_PSUBUSWrm
+ { 1331, 3, 1, 0, "MMX_PSUBUSWrr", 0, 0xd9000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1331 = MMX_PSUBUSWrr
+ { 1332, 7, 1, 0, "MMX_PSUBWrm", 0|(1<<TID::MayLoad), 0xf9000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1332 = MMX_PSUBWrm
+ { 1333, 3, 1, 0, "MMX_PSUBWrr", 0, 0xf9000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1333 = MMX_PSUBWrr
+ { 1334, 7, 1, 0, "MMX_PUNPCKHBWrm", 0|(1<<TID::MayLoad), 0x68000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1334 = MMX_PUNPCKHBWrm
+ { 1335, 3, 1, 0, "MMX_PUNPCKHBWrr", 0, 0x68000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1335 = MMX_PUNPCKHBWrr
+ { 1336, 7, 1, 0, "MMX_PUNPCKHDQrm", 0|(1<<TID::MayLoad), 0x6a000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1336 = MMX_PUNPCKHDQrm
+ { 1337, 3, 1, 0, "MMX_PUNPCKHDQrr", 0, 0x6a000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1337 = MMX_PUNPCKHDQrr
+ { 1338, 7, 1, 0, "MMX_PUNPCKHWDrm", 0|(1<<TID::MayLoad), 0x69000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1338 = MMX_PUNPCKHWDrm
+ { 1339, 3, 1, 0, "MMX_PUNPCKHWDrr", 0, 0x69000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1339 = MMX_PUNPCKHWDrr
+ { 1340, 7, 1, 0, "MMX_PUNPCKLBWrm", 0|(1<<TID::MayLoad), 0x60000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1340 = MMX_PUNPCKLBWrm
+ { 1341, 3, 1, 0, "MMX_PUNPCKLBWrr", 0, 0x60000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1341 = MMX_PUNPCKLBWrr
+ { 1342, 7, 1, 0, "MMX_PUNPCKLDQrm", 0|(1<<TID::MayLoad), 0x62000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1342 = MMX_PUNPCKLDQrm
+ { 1343, 3, 1, 0, "MMX_PUNPCKLDQrr", 0, 0x62000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1343 = MMX_PUNPCKLDQrr
+ { 1344, 7, 1, 0, "MMX_PUNPCKLWDrm", 0|(1<<TID::MayLoad), 0x61000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1344 = MMX_PUNPCKLWDrm
+ { 1345, 3, 1, 0, "MMX_PUNPCKLWDrr", 0, 0x61000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1345 = MMX_PUNPCKLWDrr
+ { 1346, 7, 1, 0, "MMX_PXORrm", 0|(1<<TID::MayLoad), 0xef000106ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1346 = MMX_PXORrm
+ { 1347, 3, 1, 0, "MMX_PXORrr", 0|(1<<TID::Commutable), 0xef000105ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1347 = MMX_PXORrr
+ { 1348, 1, 1, 0, "MMX_V_SET0", 0|(1<<TID::Rematerializable), 0xef000120ULL, NULL, NULL, NULL, OperandInfo159 }, // Inst #1348 = MMX_V_SET0
+ { 1349, 1, 1, 0, "MMX_V_SETALLONES", 0|(1<<TID::Rematerializable), 0x76000120ULL, NULL, NULL, NULL, OperandInfo159 }, // Inst #1349 = MMX_V_SETALLONES
+ { 1350, 0, 0, 0, "MONITOR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1000125ULL, NULL, NULL, NULL, 0 }, // Inst #1350 = MONITOR
+ { 1351, 1, 1, 0, "MOV16ao16", 0|(1<<TID::UnmodeledSideEffects), 0xa300a041ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1351 = MOV16ao16
+ { 1352, 6, 0, 0, "MOV16mi", 0|(1<<TID::MayStore), 0xc7006058ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #1352 = MOV16mi
+ { 1353, 6, 0, 0, "MOV16mr", 0|(1<<TID::MayStore), 0x89000044ULL, NULL, NULL, NULL, OperandInfo11 }, // Inst #1353 = MOV16mr
+ { 1354, 6, 1, 0, "MOV16ms", 0|(1<<TID::UnmodeledSideEffects), 0x8c000044ULL, NULL, NULL, NULL, OperandInfo160 }, // Inst #1354 = MOV16ms
+ { 1355, 1, 0, 0, "MOV16o16a", 0|(1<<TID::UnmodeledSideEffects), 0xa100a041ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1355 = MOV16o16a
+ { 1356, 1, 1, 0, "MOV16r0", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x31000060ULL, NULL, ImplicitList1, Barriers1, OperandInfo106 }, // Inst #1356 = MOV16r0
+ { 1357, 2, 1, 0, "MOV16ri", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xb8006042ULL, NULL, NULL, NULL, OperandInfo63 }, // Inst #1357 = MOV16ri
+ { 1358, 6, 1, 0, "MOV16rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x8b000046ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1358 = MOV16rm
+ { 1359, 2, 1, 0, "MOV16rr", 0, 0x89000043ULL, NULL, NULL, NULL, OperandInfo56 }, // Inst #1359 = MOV16rr
+ { 1360, 2, 1, 0, "MOV16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x8b000045ULL, NULL, NULL, NULL, OperandInfo56 }, // Inst #1360 = MOV16rr_REV
+ { 1361, 2, 1, 0, "MOV16rs", 0|(1<<TID::UnmodeledSideEffects), 0x8c000043ULL, NULL, NULL, NULL, OperandInfo161 }, // Inst #1361 = MOV16rs
+ { 1362, 6, 1, 0, "MOV16sm", 0|(1<<TID::UnmodeledSideEffects), 0x8e000046ULL, NULL, NULL, NULL, OperandInfo162 }, // Inst #1362 = MOV16sm
+ { 1363, 2, 1, 0, "MOV16sr", 0|(1<<TID::UnmodeledSideEffects), 0x8e000045ULL, NULL, NULL, NULL, OperandInfo163 }, // Inst #1363 = MOV16sr
+ { 1364, 1, 1, 0, "MOV32ao32", 0|(1<<TID::UnmodeledSideEffects), 0xa300a001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1364 = MOV32ao32
+ { 1365, 2, 1, 0, "MOV32cr", 0|(1<<TID::UnmodeledSideEffects), 0x22000105ULL, NULL, NULL, NULL, OperandInfo164 }, // Inst #1365 = MOV32cr
+ { 1366, 2, 1, 0, "MOV32dr", 0|(1<<TID::UnmodeledSideEffects), 0x23000105ULL, NULL, NULL, NULL, OperandInfo165 }, // Inst #1366 = MOV32dr
+ { 1367, 6, 0, 0, "MOV32mi", 0|(1<<TID::MayStore), 0xc700a018ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #1367 = MOV32mi
+ { 1368, 6, 0, 0, "MOV32mr", 0|(1<<TID::MayStore), 0x89000004ULL, NULL, NULL, NULL, OperandInfo15 }, // Inst #1368 = MOV32mr
+ { 1369, 6, 0, 0, "MOV32mr_TC", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x89000004ULL, NULL, NULL, NULL, OperandInfo166 }, // Inst #1369 = MOV32mr_TC
+ { 1370, 6, 1, 0, "MOV32ms", 0|(1<<TID::UnmodeledSideEffects), 0x8c000004ULL, NULL, NULL, NULL, OperandInfo160 }, // Inst #1370 = MOV32ms
+ { 1371, 1, 0, 0, "MOV32o32a", 0|(1<<TID::UnmodeledSideEffects), 0xa100a001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1371 = MOV32o32a
+ { 1372, 1, 1, 0, "MOV32r0", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x31000020ULL, NULL, ImplicitList1, Barriers1, OperandInfo66 }, // Inst #1372 = MOV32r0
+ { 1373, 2, 1, 0, "MOV32rc", 0|(1<<TID::UnmodeledSideEffects), 0x20000103ULL, NULL, NULL, NULL, OperandInfo167 }, // Inst #1373 = MOV32rc
+ { 1374, 2, 1, 0, "MOV32rd", 0|(1<<TID::UnmodeledSideEffects), 0x21000103ULL, NULL, NULL, NULL, OperandInfo168 }, // Inst #1374 = MOV32rd
+ { 1375, 2, 1, 0, "MOV32ri", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xb800a002ULL, NULL, NULL, NULL, OperandInfo64 }, // Inst #1375 = MOV32ri
+ { 1376, 6, 1, 0, "MOV32rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x8b000006ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1376 = MOV32rm
+ { 1377, 6, 1, 0, "MOV32rm_TC", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable)|(1<<TID::UnmodeledSideEffects), 0x8b000006ULL, NULL, NULL, NULL, OperandInfo169 }, // Inst #1377 = MOV32rm_TC
+ { 1378, 2, 1, 0, "MOV32rr", 0, 0x89000003ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #1378 = MOV32rr
+ { 1379, 2, 1, 0, "MOV32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x8b000005ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #1379 = MOV32rr_REV
+ { 1380, 2, 1, 0, "MOV32rr_TC", 0, 0x89000003ULL, NULL, NULL, NULL, OperandInfo170 }, // Inst #1380 = MOV32rr_TC
+ { 1381, 2, 1, 0, "MOV32rs", 0|(1<<TID::UnmodeledSideEffects), 0x8c000003ULL, NULL, NULL, NULL, OperandInfo171 }, // Inst #1381 = MOV32rs
+ { 1382, 6, 1, 0, "MOV32sm", 0|(1<<TID::UnmodeledSideEffects), 0x8e000006ULL, NULL, NULL, NULL, OperandInfo162 }, // Inst #1382 = MOV32sm
+ { 1383, 2, 1, 0, "MOV32sr", 0|(1<<TID::UnmodeledSideEffects), 0x8e000005ULL, NULL, NULL, NULL, OperandInfo172 }, // Inst #1383 = MOV32sr
+ { 1384, 6, 1, 0, "MOV64FSrm", 0|(1<<TID::MayLoad), 0x8b101006ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1384 = MOV64FSrm
+ { 1385, 6, 1, 0, "MOV64GSrm", 0|(1<<TID::MayLoad), 0x8b201006ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1385 = MOV64GSrm
+ { 1386, 2, 1, 0, "MOV64cr", 0|(1<<TID::UnmodeledSideEffects), 0x22000105ULL, NULL, NULL, NULL, OperandInfo173 }, // Inst #1386 = MOV64cr
+ { 1387, 2, 1, 0, "MOV64dr", 0|(1<<TID::UnmodeledSideEffects), 0x23000105ULL, NULL, NULL, NULL, OperandInfo174 }, // Inst #1387 = MOV64dr
+ { 1388, 6, 0, 0, "MOV64mi32", 0|(1<<TID::MayStore), 0xc700b018ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #1388 = MOV64mi32
+ { 1389, 6, 0, 0, "MOV64mr", 0|(1<<TID::MayStore), 0x89001004ULL, NULL, NULL, NULL, OperandInfo19 }, // Inst #1389 = MOV64mr
+ { 1390, 6, 0, 0, "MOV64mr_TC", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x89001004ULL, NULL, NULL, NULL, OperandInfo175 }, // Inst #1390 = MOV64mr_TC
+ { 1391, 6, 1, 0, "MOV64ms", 0|(1<<TID::UnmodeledSideEffects), 0x8c001004ULL, NULL, NULL, NULL, OperandInfo160 }, // Inst #1391 = MOV64ms
+ { 1392, 1, 1, 0, "MOV64r0", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x31000020ULL, NULL, ImplicitList1, Barriers1, OperandInfo67 }, // Inst #1392 = MOV64r0
+ { 1393, 2, 1, 0, "MOV64rc", 0|(1<<TID::UnmodeledSideEffects), 0x20000103ULL, NULL, NULL, NULL, OperandInfo176 }, // Inst #1393 = MOV64rc
+ { 1394, 2, 1, 0, "MOV64rd", 0|(1<<TID::UnmodeledSideEffects), 0x21000103ULL, NULL, NULL, NULL, OperandInfo177 }, // Inst #1394 = MOV64rd
+ { 1395, 2, 1, 0, "MOV64ri", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xb800f002ULL, NULL, NULL, NULL, OperandInfo65 }, // Inst #1395 = MOV64ri
+ { 1396, 2, 1, 0, "MOV64ri32", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xc700b010ULL, NULL, NULL, NULL, OperandInfo65 }, // Inst #1396 = MOV64ri32
+ { 1397, 2, 1, 0, "MOV64ri64i32", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xb800a002ULL, NULL, NULL, NULL, OperandInfo65 }, // Inst #1397 = MOV64ri64i32
+ { 1398, 2, 1, 0, "MOV64ri_alt", 0|(1<<TID::UnmodeledSideEffects), 0xb800f002ULL, NULL, NULL, NULL, OperandInfo65 }, // Inst #1398 = MOV64ri_alt
+ { 1399, 6, 1, 0, "MOV64rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x8b001006ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1399 = MOV64rm
+ { 1400, 6, 1, 0, "MOV64rm_TC", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable)|(1<<TID::UnmodeledSideEffects), 0x8b001006ULL, NULL, NULL, NULL, OperandInfo178 }, // Inst #1400 = MOV64rm_TC
+ { 1401, 2, 1, 0, "MOV64rr", 0, 0x89001003ULL, NULL, NULL, NULL, OperandInfo60 }, // Inst #1401 = MOV64rr
+ { 1402, 2, 1, 0, "MOV64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x8b001005ULL, NULL, NULL, NULL, OperandInfo60 }, // Inst #1402 = MOV64rr_REV
+ { 1403, 2, 1, 0, "MOV64rr_TC", 0, 0x89001003ULL, NULL, NULL, NULL, OperandInfo179 }, // Inst #1403 = MOV64rr_TC
+ { 1404, 2, 1, 0, "MOV64rs", 0|(1<<TID::UnmodeledSideEffects), 0x8c001003ULL, NULL, NULL, NULL, OperandInfo180 }, // Inst #1404 = MOV64rs
+ { 1405, 6, 1, 0, "MOV64sm", 0|(1<<TID::UnmodeledSideEffects), 0x8e001006ULL, NULL, NULL, NULL, OperandInfo162 }, // Inst #1405 = MOV64sm
+ { 1406, 2, 1, 0, "MOV64sr", 0|(1<<TID::UnmodeledSideEffects), 0x8e001005ULL, NULL, NULL, NULL, OperandInfo181 }, // Inst #1406 = MOV64sr
+ { 1407, 2, 1, 0, "MOV64toPQIrr", 0, 0x6e801145ULL, NULL, NULL, NULL, OperandInfo182 }, // Inst #1407 = MOV64toPQIrr
+ { 1408, 6, 1, 0, "MOV64toSDrm", 0|(1<<TID::MayLoad), 0x7e400c06ULL, NULL, NULL, NULL, OperandInfo94 }, // Inst #1408 = MOV64toSDrm
+ { 1409, 2, 1, 0, "MOV64toSDrr", 0, 0x6e801145ULL, NULL, NULL, NULL, OperandInfo95 }, // Inst #1409 = MOV64toSDrr
+ { 1410, 1, 1, 0, "MOV8ao8", 0|(1<<TID::UnmodeledSideEffects), 0xa200a001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1410 = MOV8ao8
+ { 1411, 6, 0, 0, "MOV8mi", 0|(1<<TID::MayStore), 0xc6002018ULL, NULL, NULL, NULL, OperandInfo10 }, // Inst #1411 = MOV8mi
+ { 1412, 6, 0, 0, "MOV8mr", 0|(1<<TID::MayStore), 0x88000004ULL, NULL, NULL, NULL, OperandInfo24 }, // Inst #1412 = MOV8mr
+ { 1413, 6, 0, 0, "MOV8mr_NOREX", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x88000004ULL, NULL, NULL, NULL, OperandInfo183 }, // Inst #1413 = MOV8mr_NOREX
+ { 1414, 1, 0, 0, "MOV8o8a", 0|(1<<TID::UnmodeledSideEffects), 0xa000a001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #1414 = MOV8o8a
+ { 1415, 1, 1, 0, "MOV8r0", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x30000020ULL, NULL, ImplicitList1, Barriers1, OperandInfo107 }, // Inst #1415 = MOV8r0
+ { 1416, 2, 1, 0, "MOV8ri", 0|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xb0002002ULL, NULL, NULL, NULL, OperandInfo81 }, // Inst #1416 = MOV8ri
+ { 1417, 6, 1, 0, "MOV8rm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x8a000006ULL, NULL, NULL, NULL, OperandInfo82 }, // Inst #1417 = MOV8rm
+ { 1418, 6, 1, 0, "MOV8rm_NOREX", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable)|(1<<TID::UnmodeledSideEffects), 0x8a000006ULL, NULL, NULL, NULL, OperandInfo184 }, // Inst #1418 = MOV8rm_NOREX
+ { 1419, 2, 1, 0, "MOV8rr", 0, 0x88000003ULL, NULL, NULL, NULL, OperandInfo83 }, // Inst #1419 = MOV8rr
+ { 1420, 2, 1, 0, "MOV8rr_NOREX", 0, 0x88000003ULL, NULL, NULL, NULL, OperandInfo185 }, // Inst #1420 = MOV8rr_NOREX
+ { 1421, 2, 1, 0, "MOV8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x8a000005ULL, NULL, NULL, NULL, OperandInfo83 }, // Inst #1421 = MOV8rr_REV
+ { 1422, 6, 0, 0, "MOVAPDmr", 0|(1<<TID::MayStore), 0x29800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1422 = MOVAPDmr
+ { 1423, 6, 1, 0, "MOVAPDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x28800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1423 = MOVAPDrm
+ { 1424, 2, 1, 0, "MOVAPDrr", 0, 0x28800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1424 = MOVAPDrr
+ { 1425, 6, 0, 0, "MOVAPSmr", 0|(1<<TID::MayStore), 0x29400104ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1425 = MOVAPSmr
+ { 1426, 6, 1, 0, "MOVAPSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x28400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1426 = MOVAPSrm
+ { 1427, 2, 1, 0, "MOVAPSrr", 0, 0x28400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1427 = MOVAPSrr
+ { 1428, 6, 1, 0, "MOVDDUPrm", 0|(1<<TID::MayLoad), 0x12800b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1428 = MOVDDUPrm
+ { 1429, 2, 1, 0, "MOVDDUPrr", 0, 0x12800b05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1429 = MOVDDUPrr
+ { 1430, 6, 1, 0, "MOVDI2PDIrm", 0|(1<<TID::MayLoad), 0x6e800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1430 = MOVDI2PDIrm
+ { 1431, 2, 1, 0, "MOVDI2PDIrr", 0, 0x6e800145ULL, NULL, NULL, NULL, OperandInfo187 }, // Inst #1431 = MOVDI2PDIrr
+ { 1432, 6, 1, 0, "MOVDI2SSrm", 0|(1<<TID::MayLoad), 0x6e800146ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #1432 = MOVDI2SSrm
+ { 1433, 2, 1, 0, "MOVDI2SSrr", 0, 0x6e800145ULL, NULL, NULL, NULL, OperandInfo98 }, // Inst #1433 = MOVDI2SSrr
+ { 1434, 6, 0, 0, "MOVDQAmr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x7fc00144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1434 = MOVDQAmr
+ { 1435, 6, 1, 0, "MOVDQArm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x6fc00146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1435 = MOVDQArm
+ { 1436, 2, 1, 0, "MOVDQArr", 0, 0x6fc00145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1436 = MOVDQArr
+ { 1437, 6, 0, 0, "MOVDQUmr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x7fc00c04ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1437 = MOVDQUmr
+ { 1438, 6, 0, 0, "MOVDQUmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x7fc00c04ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1438 = MOVDQUmr_Int
+ { 1439, 6, 1, 0, "MOVDQUrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x6fc00c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1439 = MOVDQUrm
+ { 1440, 6, 1, 0, "MOVDQUrm_Int", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x6fc00c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1440 = MOVDQUrm_Int
+ { 1441, 3, 1, 0, "MOVHLPSrr", 0, 0x12400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1441 = MOVHLPSrr
+ { 1442, 6, 0, 0, "MOVHPDmr", 0|(1<<TID::MayStore), 0x17800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1442 = MOVHPDmr
+ { 1443, 7, 1, 0, "MOVHPDrm", 0|(1<<TID::MayLoad), 0x16800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1443 = MOVHPDrm
+ { 1444, 6, 0, 0, "MOVHPSmr", 0|(1<<TID::MayStore), 0x17400104ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1444 = MOVHPSmr
+ { 1445, 7, 1, 0, "MOVHPSrm", 0|(1<<TID::MayLoad), 0x16400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1445 = MOVHPSrm
+ { 1446, 3, 1, 0, "MOVLHPSrr", 0, 0x16400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1446 = MOVLHPSrr
+ { 1447, 6, 0, 0, "MOVLPDmr", 0|(1<<TID::MayStore), 0x13800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1447 = MOVLPDmr
+ { 1448, 7, 1, 0, "MOVLPDrm", 0|(1<<TID::MayLoad), 0x12800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1448 = MOVLPDrm
+ { 1449, 6, 0, 0, "MOVLPSmr", 0|(1<<TID::MayStore), 0x13400104ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1449 = MOVLPSmr
+ { 1450, 7, 1, 0, "MOVLPSrm", 0|(1<<TID::MayLoad), 0x12400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1450 = MOVLPSrm
+ { 1451, 6, 0, 0, "MOVLQ128mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xd6800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1451 = MOVLQ128mr
+ { 1452, 2, 1, 0, "MOVMSKPDrr", 0, 0x50800145ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #1452 = MOVMSKPDrr
+ { 1453, 2, 1, 0, "MOVMSKPSrr", 0, 0x50400105ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #1453 = MOVMSKPSrr
+ { 1454, 6, 1, 0, "MOVNTDQArm", 0|(1<<TID::MayLoad), 0x2ac00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1454 = MOVNTDQArm
+ { 1455, 6, 0, 0, "MOVNTDQ_64mr", 0|(1<<TID::MayStore), 0xe7800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1455 = MOVNTDQ_64mr
+ { 1456, 6, 0, 0, "MOVNTDQmr", 0|(1<<TID::MayStore), 0xe7c00144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1456 = MOVNTDQmr
+ { 1457, 6, 0, 0, "MOVNTDQmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xe7c00144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1457 = MOVNTDQmr_Int
+ { 1458, 6, 0, 0, "MOVNTI_64mr", 0|(1<<TID::MayStore), 0xc3001104ULL, NULL, NULL, NULL, OperandInfo19 }, // Inst #1458 = MOVNTI_64mr
+ { 1459, 6, 0, 0, "MOVNTImr", 0|(1<<TID::MayStore), 0xc3000104ULL, NULL, NULL, NULL, OperandInfo15 }, // Inst #1459 = MOVNTImr
+ { 1460, 6, 0, 0, "MOVNTImr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xc3000104ULL, NULL, NULL, NULL, OperandInfo15 }, // Inst #1460 = MOVNTImr_Int
+ { 1461, 6, 0, 0, "MOVNTPDmr", 0|(1<<TID::MayStore), 0x2b800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1461 = MOVNTPDmr
+ { 1462, 6, 0, 0, "MOVNTPDmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2b800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1462 = MOVNTPDmr_Int
+ { 1463, 6, 0, 0, "MOVNTPSmr", 0|(1<<TID::MayStore), 0x2b400104ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1463 = MOVNTPSmr
+ { 1464, 6, 0, 0, "MOVNTPSmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x2b400104ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1464 = MOVNTPSmr_Int
+ { 1465, 2, 1, 0, "MOVPC32r", 0|(1<<TID::NotDuplicable), 0xe800a000ULL, ImplicitList2, NULL, NULL, OperandInfo64 }, // Inst #1465 = MOVPC32r
+ { 1466, 6, 0, 0, "MOVPDI2DImr", 0|(1<<TID::MayStore), 0x7e800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1466 = MOVPDI2DImr
+ { 1467, 2, 1, 0, "MOVPDI2DIrr", 0, 0x7e800143ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #1467 = MOVPDI2DIrr
+ { 1468, 6, 0, 0, "MOVPQI2QImr", 0|(1<<TID::MayStore), 0xd6800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1468 = MOVPQI2QImr
+ { 1469, 2, 1, 0, "MOVPQIto64rr", 0, 0x7e801143ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #1469 = MOVPQIto64rr
+ { 1470, 6, 1, 0, "MOVQI2PQIrm", 0|(1<<TID::MayLoad), 0x7e000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1470 = MOVQI2PQIrm
+ { 1471, 2, 1, 0, "MOVQxrxr", 0|(1<<TID::UnmodeledSideEffects), 0x7e000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1471 = MOVQxrxr
+ { 1472, 0, 0, 0, "MOVSB", 0|(1<<TID::UnmodeledSideEffects), 0xa4000001ULL, ImplicitList37, ImplicitList38, NULL, 0 }, // Inst #1472 = MOVSB
+ { 1473, 0, 0, 0, "MOVSD", 0|(1<<TID::UnmodeledSideEffects), 0xa5000001ULL, ImplicitList37, ImplicitList38, NULL, 0 }, // Inst #1473 = MOVSD
+ { 1474, 6, 0, 0, "MOVSDmr", 0|(1<<TID::MayStore), 0x11000b04ULL, NULL, NULL, NULL, OperandInfo188 }, // Inst #1474 = MOVSDmr
+ { 1475, 6, 1, 0, "MOVSDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x10000b06ULL, NULL, NULL, NULL, OperandInfo94 }, // Inst #1475 = MOVSDrm
+ { 1476, 3, 1, 0, "MOVSDrr", 0, 0x10000b05ULL, NULL, NULL, NULL, OperandInfo189 }, // Inst #1476 = MOVSDrr
+ { 1477, 6, 0, 0, "MOVSDto64mr", 0|(1<<TID::MayStore), 0x7e801144ULL, NULL, NULL, NULL, OperandInfo188 }, // Inst #1477 = MOVSDto64mr
+ { 1478, 2, 1, 0, "MOVSDto64rr", 0, 0x7e801143ULL, NULL, NULL, NULL, OperandInfo102 }, // Inst #1478 = MOVSDto64rr
+ { 1479, 6, 1, 0, "MOVSHDUPrm", 0|(1<<TID::MayLoad), 0x16400c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1479 = MOVSHDUPrm
+ { 1480, 2, 1, 0, "MOVSHDUPrr", 0, 0x16400c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1480 = MOVSHDUPrr
+ { 1481, 6, 1, 0, "MOVSLDUPrm", 0|(1<<TID::MayLoad), 0x12400c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1481 = MOVSLDUPrm
+ { 1482, 2, 1, 0, "MOVSLDUPrr", 0, 0x12400c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1482 = MOVSLDUPrr
+ { 1483, 0, 0, 0, "MOVSQ", 0|(1<<TID::UnmodeledSideEffects), 0xa5001001ULL, ImplicitList37, ImplicitList38, NULL, 0 }, // Inst #1483 = MOVSQ
+ { 1484, 6, 0, 0, "MOVSS2DImr", 0|(1<<TID::MayStore), 0x7e800144ULL, NULL, NULL, NULL, OperandInfo190 }, // Inst #1484 = MOVSS2DImr
+ { 1485, 2, 1, 0, "MOVSS2DIrr", 0, 0x7e800143ULL, NULL, NULL, NULL, OperandInfo101 }, // Inst #1485 = MOVSS2DIrr
+ { 1486, 6, 0, 0, "MOVSSmr", 0|(1<<TID::MayStore), 0x11000c04ULL, NULL, NULL, NULL, OperandInfo190 }, // Inst #1486 = MOVSSmr
+ { 1487, 6, 1, 0, "MOVSSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x10000c06ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #1487 = MOVSSrm
+ { 1488, 3, 1, 0, "MOVSSrr", 0, 0x10000c05ULL, NULL, NULL, NULL, OperandInfo191 }, // Inst #1488 = MOVSSrr
+ { 1489, 0, 0, 0, "MOVSW", 0|(1<<TID::UnmodeledSideEffects), 0xa5000041ULL, ImplicitList37, ImplicitList38, NULL, 0 }, // Inst #1489 = MOVSW
+ { 1490, 6, 1, 0, "MOVSX16rm8", 0|(1<<TID::MayLoad), 0xbe000106ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1490 = MOVSX16rm8
+ { 1491, 6, 1, 0, "MOVSX16rm8W", 0|(1<<TID::UnmodeledSideEffects), 0xbe000146ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1491 = MOVSX16rm8W
+ { 1492, 2, 1, 0, "MOVSX16rr8", 0, 0xbe000105ULL, NULL, NULL, NULL, OperandInfo192 }, // Inst #1492 = MOVSX16rr8
+ { 1493, 2, 1, 0, "MOVSX16rr8W", 0|(1<<TID::UnmodeledSideEffects), 0xbe000145ULL, NULL, NULL, NULL, OperandInfo192 }, // Inst #1493 = MOVSX16rr8W
+ { 1494, 6, 1, 0, "MOVSX32rm16", 0|(1<<TID::MayLoad), 0xbf000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1494 = MOVSX32rm16
+ { 1495, 6, 1, 0, "MOVSX32rm8", 0|(1<<TID::MayLoad), 0xbe000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1495 = MOVSX32rm8
+ { 1496, 2, 1, 0, "MOVSX32rr16", 0, 0xbf000105ULL, NULL, NULL, NULL, OperandInfo193 }, // Inst #1496 = MOVSX32rr16
+ { 1497, 2, 1, 0, "MOVSX32rr8", 0, 0xbe000105ULL, NULL, NULL, NULL, OperandInfo194 }, // Inst #1497 = MOVSX32rr8
+ { 1498, 6, 1, 0, "MOVSX64rm16", 0|(1<<TID::MayLoad), 0xbf001106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1498 = MOVSX64rm16
+ { 1499, 6, 1, 0, "MOVSX64rm32", 0|(1<<TID::MayLoad), 0x63001006ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1499 = MOVSX64rm32
+ { 1500, 6, 1, 0, "MOVSX64rm8", 0|(1<<TID::MayLoad), 0xbe001106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1500 = MOVSX64rm8
+ { 1501, 2, 1, 0, "MOVSX64rr16", 0, 0xbf001105ULL, NULL, NULL, NULL, OperandInfo195 }, // Inst #1501 = MOVSX64rr16
+ { 1502, 2, 1, 0, "MOVSX64rr32", 0, 0x63001005ULL, NULL, NULL, NULL, OperandInfo141 }, // Inst #1502 = MOVSX64rr32
+ { 1503, 2, 1, 0, "MOVSX64rr8", 0, 0xbe001105ULL, NULL, NULL, NULL, OperandInfo196 }, // Inst #1503 = MOVSX64rr8
+ { 1504, 6, 0, 0, "MOVUPDmr", 0|(1<<TID::MayStore), 0x11800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1504 = MOVUPDmr
+ { 1505, 6, 0, 0, "MOVUPDmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x11800144ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1505 = MOVUPDmr_Int
+ { 1506, 6, 1, 0, "MOVUPDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x10800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1506 = MOVUPDrm
+ { 1507, 6, 1, 0, "MOVUPDrm_Int", 0|(1<<TID::MayLoad), 0x10800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1507 = MOVUPDrm_Int
+ { 1508, 2, 1, 0, "MOVUPDrr", 0, 0x10800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1508 = MOVUPDrr
+ { 1509, 6, 0, 0, "MOVUPSmr", 0|(1<<TID::MayStore), 0x11400104ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1509 = MOVUPSmr
+ { 1510, 6, 0, 0, "MOVUPSmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x11400104ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #1510 = MOVUPSmr_Int
+ { 1511, 6, 1, 0, "MOVUPSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x10400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1511 = MOVUPSrm
+ { 1512, 6, 1, 0, "MOVUPSrm_Int", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x10400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1512 = MOVUPSrm_Int
+ { 1513, 2, 1, 0, "MOVUPSrr", 0, 0x10400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1513 = MOVUPSrr
+ { 1514, 6, 1, 0, "MOVZDI2PDIrm", 0|(1<<TID::MayLoad), 0x6e800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1514 = MOVZDI2PDIrm
+ { 1515, 2, 1, 0, "MOVZDI2PDIrr", 0, 0x6e800145ULL, NULL, NULL, NULL, OperandInfo187 }, // Inst #1515 = MOVZDI2PDIrr
+ { 1516, 6, 1, 0, "MOVZPQILo2PQIrm", 0|(1<<TID::MayLoad), 0x7e000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1516 = MOVZPQILo2PQIrm
+ { 1517, 2, 1, 0, "MOVZPQILo2PQIrr", 0, 0x7e000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1517 = MOVZPQILo2PQIrr
+ { 1518, 6, 1, 0, "MOVZQI2PQIrm", 0|(1<<TID::MayLoad), 0x7e000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1518 = MOVZQI2PQIrm
+ { 1519, 2, 1, 0, "MOVZQI2PQIrr", 0, 0x6e801145ULL, NULL, NULL, NULL, OperandInfo182 }, // Inst #1519 = MOVZQI2PQIrr
+ { 1520, 6, 1, 0, "MOVZX16rm8", 0|(1<<TID::MayLoad), 0xb6000106ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1520 = MOVZX16rm8
+ { 1521, 6, 1, 0, "MOVZX16rm8W", 0|(1<<TID::UnmodeledSideEffects), 0xb6000146ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1521 = MOVZX16rm8W
+ { 1522, 2, 1, 0, "MOVZX16rr8", 0, 0xb6000105ULL, NULL, NULL, NULL, OperandInfo192 }, // Inst #1522 = MOVZX16rr8
+ { 1523, 2, 1, 0, "MOVZX16rr8W", 0|(1<<TID::UnmodeledSideEffects), 0xb6000145ULL, NULL, NULL, NULL, OperandInfo192 }, // Inst #1523 = MOVZX16rr8W
+ { 1524, 6, 1, 0, "MOVZX32_NOREXrm8", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xb6000106ULL, NULL, NULL, NULL, OperandInfo197 }, // Inst #1524 = MOVZX32_NOREXrm8
+ { 1525, 2, 1, 0, "MOVZX32_NOREXrr8", 0|(1<<TID::UnmodeledSideEffects), 0xb6000105ULL, NULL, NULL, NULL, OperandInfo198 }, // Inst #1525 = MOVZX32_NOREXrr8
+ { 1526, 6, 1, 0, "MOVZX32rm16", 0|(1<<TID::MayLoad), 0xb7000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1526 = MOVZX32rm16
+ { 1527, 6, 1, 0, "MOVZX32rm8", 0|(1<<TID::MayLoad), 0xb6000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1527 = MOVZX32rm8
+ { 1528, 2, 1, 0, "MOVZX32rr16", 0, 0xb7000105ULL, NULL, NULL, NULL, OperandInfo193 }, // Inst #1528 = MOVZX32rr16
+ { 1529, 2, 1, 0, "MOVZX32rr8", 0, 0xb6000105ULL, NULL, NULL, NULL, OperandInfo194 }, // Inst #1529 = MOVZX32rr8
+ { 1530, 6, 1, 0, "MOVZX64rm16", 0|(1<<TID::MayLoad), 0xb7000106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1530 = MOVZX64rm16
+ { 1531, 6, 1, 0, "MOVZX64rm16_Q", 0|(1<<TID::UnmodeledSideEffects), 0xb7001106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1531 = MOVZX64rm16_Q
+ { 1532, 6, 1, 0, "MOVZX64rm32", 0|(1<<TID::MayLoad), 0x8b000006ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1532 = MOVZX64rm32
+ { 1533, 6, 1, 0, "MOVZX64rm8", 0|(1<<TID::MayLoad), 0xb6000106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1533 = MOVZX64rm8
+ { 1534, 6, 1, 0, "MOVZX64rm8_Q", 0|(1<<TID::UnmodeledSideEffects), 0xb6001106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1534 = MOVZX64rm8_Q
+ { 1535, 2, 1, 0, "MOVZX64rr16", 0, 0xb7000105ULL, NULL, NULL, NULL, OperandInfo195 }, // Inst #1535 = MOVZX64rr16
+ { 1536, 2, 1, 0, "MOVZX64rr16_Q", 0|(1<<TID::UnmodeledSideEffects), 0xb7001105ULL, NULL, NULL, NULL, OperandInfo195 }, // Inst #1536 = MOVZX64rr16_Q
+ { 1537, 2, 1, 0, "MOVZX64rr32", 0, 0x89000003ULL, NULL, NULL, NULL, OperandInfo141 }, // Inst #1537 = MOVZX64rr32
+ { 1538, 2, 1, 0, "MOVZX64rr8", 0, 0xb6000105ULL, NULL, NULL, NULL, OperandInfo196 }, // Inst #1538 = MOVZX64rr8
+ { 1539, 2, 1, 0, "MOVZX64rr8_Q", 0|(1<<TID::UnmodeledSideEffects), 0xb6001105ULL, NULL, NULL, NULL, OperandInfo196 }, // Inst #1539 = MOVZX64rr8_Q
+ { 1540, 2, 1, 0, "MOV_Fp3232", 0, 0x70000ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #1540 = MOV_Fp3232
+ { 1541, 2, 1, 0, "MOV_Fp3264", 0, 0x70000ULL, NULL, NULL, NULL, OperandInfo199 }, // Inst #1541 = MOV_Fp3264
+ { 1542, 2, 1, 0, "MOV_Fp3280", 0, 0x70000ULL, NULL, NULL, NULL, OperandInfo200 }, // Inst #1542 = MOV_Fp3280
+ { 1543, 2, 1, 0, "MOV_Fp6432", 0, 0x70000ULL, NULL, NULL, NULL, OperandInfo201 }, // Inst #1543 = MOV_Fp6432
+ { 1544, 2, 1, 0, "MOV_Fp6464", 0, 0x70000ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #1544 = MOV_Fp6464
+ { 1545, 2, 1, 0, "MOV_Fp6480", 0, 0x70000ULL, NULL, NULL, NULL, OperandInfo202 }, // Inst #1545 = MOV_Fp6480
+ { 1546, 2, 1, 0, "MOV_Fp8032", 0, 0x70000ULL, NULL, NULL, NULL, OperandInfo203 }, // Inst #1546 = MOV_Fp8032
+ { 1547, 2, 1, 0, "MOV_Fp8064", 0, 0x70000ULL, NULL, NULL, NULL, OperandInfo204 }, // Inst #1547 = MOV_Fp8064
+ { 1548, 2, 1, 0, "MOV_Fp8080", 0, 0x70000ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #1548 = MOV_Fp8080
+ { 1549, 8, 1, 0, "MPSADBWrmi", 0|(1<<TID::MayLoad), 0x42c02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #1549 = MPSADBWrmi
+ { 1550, 4, 1, 0, "MPSADBWrri", 0, 0x42c02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #1550 = MPSADBWrri
+ { 1551, 5, 0, 0, "MUL16m", 0|(1<<TID::MayLoad), 0xf700005cULL, ImplicitList12, ImplicitList21, Barriers1, OperandInfo34 }, // Inst #1551 = MUL16m
+ { 1552, 1, 0, 0, "MUL16r", 0, 0xf7000054ULL, ImplicitList12, ImplicitList21, Barriers1, OperandInfo106 }, // Inst #1552 = MUL16r
+ { 1553, 5, 0, 0, "MUL32m", 0|(1<<TID::MayLoad), 0xf700001cULL, ImplicitList13, ImplicitList18, Barriers6, OperandInfo34 }, // Inst #1553 = MUL32m
+ { 1554, 1, 0, 0, "MUL32r", 0, 0xf7000014ULL, ImplicitList13, ImplicitList18, Barriers6, OperandInfo66 }, // Inst #1554 = MUL32r
+ { 1555, 5, 0, 0, "MUL64m", 0|(1<<TID::MayLoad), 0xf700101cULL, ImplicitList15, ImplicitList17, Barriers1, OperandInfo34 }, // Inst #1555 = MUL64m
+ { 1556, 1, 0, 0, "MUL64r", 0, 0xf7001014ULL, ImplicitList15, ImplicitList17, Barriers1, OperandInfo67 }, // Inst #1556 = MUL64r
+ { 1557, 5, 0, 0, "MUL8m", 0|(1<<TID::MayLoad), 0xf600001cULL, ImplicitList11, ImplicitList22, Barriers1, OperandInfo34 }, // Inst #1557 = MUL8m
+ { 1558, 1, 0, 0, "MUL8r", 0, 0xf6000014ULL, ImplicitList11, ImplicitList22, Barriers1, OperandInfo107 }, // Inst #1558 = MUL8r
+ { 1559, 7, 1, 0, "MULPDrm", 0|(1<<TID::MayLoad), 0x59800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1559 = MULPDrm
+ { 1560, 3, 1, 0, "MULPDrr", 0|(1<<TID::Commutable), 0x59800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1560 = MULPDrr
+ { 1561, 7, 1, 0, "MULPSrm", 0|(1<<TID::MayLoad), 0x59400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1561 = MULPSrm
+ { 1562, 3, 1, 0, "MULPSrr", 0|(1<<TID::Commutable), 0x59400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1562 = MULPSrr
+ { 1563, 7, 1, 0, "MULSDrm", 0|(1<<TID::MayLoad), 0x59000b06ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #1563 = MULSDrm
+ { 1564, 7, 1, 0, "MULSDrm_Int", 0|(1<<TID::MayLoad), 0x59000b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1564 = MULSDrm_Int
+ { 1565, 3, 1, 0, "MULSDrr", 0|(1<<TID::Commutable), 0x59000b05ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #1565 = MULSDrr
+ { 1566, 3, 1, 0, "MULSDrr_Int", 0, 0x59000b05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1566 = MULSDrr_Int
+ { 1567, 7, 1, 0, "MULSSrm", 0|(1<<TID::MayLoad), 0x59000c06ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #1567 = MULSSrm
+ { 1568, 7, 1, 0, "MULSSrm_Int", 0|(1<<TID::MayLoad), 0x59000c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1568 = MULSSrm_Int
+ { 1569, 3, 1, 0, "MULSSrr", 0|(1<<TID::Commutable), 0x59000c05ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #1569 = MULSSrr
+ { 1570, 3, 1, 0, "MULSSrr_Int", 0, 0x59000c05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1570 = MULSSrr_Int
+ { 1571, 5, 0, 0, "MUL_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xd8000019ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1571 = MUL_F32m
+ { 1572, 5, 0, 0, "MUL_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdc000019ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1572 = MUL_F64m
+ { 1573, 5, 0, 0, "MUL_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xde000019ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1573 = MUL_FI16m
+ { 1574, 5, 0, 0, "MUL_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xda000019ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1574 = MUL_FI32m
+ { 1575, 1, 0, 0, "MUL_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0xc8000902ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #1575 = MUL_FPrST0
+ { 1576, 1, 0, 0, "MUL_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0xc8000302ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #1576 = MUL_FST0r
+ { 1577, 3, 1, 0, "MUL_Fp32", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo36 }, // Inst #1577 = MUL_Fp32
+ { 1578, 7, 1, 0, "MUL_Fp32m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #1578 = MUL_Fp32m
+ { 1579, 3, 1, 0, "MUL_Fp64", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo38 }, // Inst #1579 = MUL_Fp64
+ { 1580, 7, 1, 0, "MUL_Fp64m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #1580 = MUL_Fp64m
+ { 1581, 7, 1, 0, "MUL_Fp64m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #1581 = MUL_Fp64m32
+ { 1582, 3, 1, 0, "MUL_Fp80", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo40 }, // Inst #1582 = MUL_Fp80
+ { 1583, 7, 1, 0, "MUL_Fp80m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #1583 = MUL_Fp80m32
+ { 1584, 7, 1, 0, "MUL_Fp80m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #1584 = MUL_Fp80m64
+ { 1585, 7, 1, 0, "MUL_FpI16m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #1585 = MUL_FpI16m32
+ { 1586, 7, 1, 0, "MUL_FpI16m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #1586 = MUL_FpI16m64
+ { 1587, 7, 1, 0, "MUL_FpI16m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #1587 = MUL_FpI16m80
+ { 1588, 7, 1, 0, "MUL_FpI32m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #1588 = MUL_FpI32m32
+ { 1589, 7, 1, 0, "MUL_FpI32m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #1589 = MUL_FpI32m64
+ { 1590, 7, 1, 0, "MUL_FpI32m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #1590 = MUL_FpI32m80
+ { 1591, 1, 0, 0, "MUL_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0xc8000702ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #1591 = MUL_FrST0
+ { 1592, 0, 0, 0, "MWAIT", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1000126ULL, NULL, NULL, NULL, 0 }, // Inst #1592 = MWAIT
+ { 1593, 5, 0, 0, "NEG16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xf700005bULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1593 = NEG16m
+ { 1594, 2, 1, 0, "NEG16r", 0, 0xf7000053ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #1594 = NEG16r
+ { 1595, 5, 0, 0, "NEG32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xf700001bULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1595 = NEG32m
+ { 1596, 2, 1, 0, "NEG32r", 0, 0xf7000013ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #1596 = NEG32r
+ { 1597, 5, 0, 0, "NEG64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xf700101bULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1597 = NEG64m
+ { 1598, 2, 1, 0, "NEG64r", 0, 0xf7001013ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #1598 = NEG64r
+ { 1599, 5, 0, 0, "NEG8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xf600001bULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #1599 = NEG8m
+ { 1600, 2, 1, 0, "NEG8r", 0, 0xf6000013ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #1600 = NEG8r
+ { 1601, 0, 0, 0, "NOOP", 0, 0x90000001ULL, NULL, NULL, NULL, 0 }, // Inst #1601 = NOOP
+ { 1602, 5, 0, 0, "NOOPL", 0, 0x1f000118ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1602 = NOOPL
+ { 1603, 5, 0, 0, "NOOPW", 0, 0x1f000158ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1603 = NOOPW
+ { 1604, 5, 0, 0, "NOT16m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xf700005aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1604 = NOT16m
+ { 1605, 2, 1, 0, "NOT16r", 0, 0xf7000052ULL, NULL, NULL, NULL, OperandInfo104 }, // Inst #1605 = NOT16r
+ { 1606, 5, 0, 0, "NOT32m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xf700001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1606 = NOT32m
+ { 1607, 2, 1, 0, "NOT32r", 0, 0xf7000012ULL, NULL, NULL, NULL, OperandInfo61 }, // Inst #1607 = NOT32r
+ { 1608, 5, 0, 0, "NOT64m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xf700101aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1608 = NOT64m
+ { 1609, 2, 1, 0, "NOT64r", 0, 0xf7001012ULL, NULL, NULL, NULL, OperandInfo62 }, // Inst #1609 = NOT64r
+ { 1610, 5, 0, 0, "NOT8m", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xf600001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1610 = NOT8m
+ { 1611, 2, 1, 0, "NOT8r", 0, 0xf6000012ULL, NULL, NULL, NULL, OperandInfo105 }, // Inst #1611 = NOT8r
+ { 1612, 1, 0, 0, "OR16i16", 0|(1<<TID::UnmodeledSideEffects), 0xd006041ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #1612 = OR16i16
+ { 1613, 6, 0, 0, "OR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x81006059ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1613 = OR16mi
+ { 1614, 6, 0, 0, "OR16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x83002059ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1614 = OR16mi8
+ { 1615, 6, 0, 0, "OR16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x9000044ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #1615 = OR16mr
+ { 1616, 3, 1, 0, "OR16ri", 0, 0x81006051ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #1616 = OR16ri
+ { 1617, 3, 1, 0, "OR16ri8", 0, 0x83002051ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #1617 = OR16ri8
+ { 1618, 7, 1, 0, "OR16rm", 0|(1<<TID::MayLoad), 0xb000046ULL, NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #1618 = OR16rm
+ { 1619, 3, 1, 0, "OR16rr", 0|(1<<TID::Commutable), 0x9000043ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #1619 = OR16rr
+ { 1620, 3, 1, 0, "OR16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0xb000045ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #1620 = OR16rr_REV
+ { 1621, 1, 0, 0, "OR32i32", 0|(1<<TID::UnmodeledSideEffects), 0xd00a001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #1621 = OR32i32
+ { 1622, 6, 0, 0, "OR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100a019ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1622 = OR32mi
+ { 1623, 6, 0, 0, "OR32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x83002019ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1623 = OR32mi8
+ { 1624, 6, 0, 0, "OR32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x9000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #1624 = OR32mr
+ { 1625, 6, 0, 0, "OR32mrLocked", 0|(1<<TID::UnmodeledSideEffects), 0x9080004ULL, NULL, NULL, NULL, OperandInfo15 }, // Inst #1625 = OR32mrLocked
+ { 1626, 3, 1, 0, "OR32ri", 0, 0x8100a011ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #1626 = OR32ri
+ { 1627, 3, 1, 0, "OR32ri8", 0, 0x83002011ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #1627 = OR32ri8
+ { 1628, 7, 1, 0, "OR32rm", 0|(1<<TID::MayLoad), 0xb000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #1628 = OR32rm
+ { 1629, 3, 1, 0, "OR32rr", 0|(1<<TID::Commutable), 0x9000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #1629 = OR32rr
+ { 1630, 3, 1, 0, "OR32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0xb000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #1630 = OR32rr_REV
+ { 1631, 1, 0, 0, "OR64i32", 0|(1<<TID::UnmodeledSideEffects), 0xd00b001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #1631 = OR64i32
+ { 1632, 6, 0, 0, "OR64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100b019ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1632 = OR64mi32
+ { 1633, 6, 0, 0, "OR64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x83003019ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1633 = OR64mi8
+ { 1634, 6, 0, 0, "OR64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x9001004ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #1634 = OR64mr
+ { 1635, 3, 1, 0, "OR64ri32", 0, 0x8100b011ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #1635 = OR64ri32
+ { 1636, 3, 1, 0, "OR64ri8", 0, 0x83003011ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #1636 = OR64ri8
+ { 1637, 7, 1, 0, "OR64rm", 0|(1<<TID::MayLoad), 0xb001006ULL, NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #1637 = OR64rm
+ { 1638, 3, 1, 0, "OR64rr", 0|(1<<TID::Commutable), 0x9001003ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #1638 = OR64rr
+ { 1639, 3, 1, 0, "OR64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0xb001005ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #1639 = OR64rr_REV
+ { 1640, 1, 0, 0, "OR8i8", 0|(1<<TID::UnmodeledSideEffects), 0xc002001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #1640 = OR8i8
+ { 1641, 6, 0, 0, "OR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x80002019ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #1641 = OR8mi
+ { 1642, 6, 0, 0, "OR8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #1642 = OR8mr
+ { 1643, 3, 1, 0, "OR8ri", 0, 0x80002011ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #1643 = OR8ri
+ { 1644, 7, 1, 0, "OR8rm", 0|(1<<TID::MayLoad), 0xa000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #1644 = OR8rm
+ { 1645, 3, 1, 0, "OR8rr", 0|(1<<TID::Commutable), 0x8000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #1645 = OR8rr
+ { 1646, 3, 1, 0, "OR8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0xa000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #1646 = OR8rr_REV
+ { 1647, 7, 1, 0, "ORPDrm", 0|(1<<TID::MayLoad), 0x56800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1647 = ORPDrm
+ { 1648, 3, 1, 0, "ORPDrr", 0|(1<<TID::Commutable), 0x56800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1648 = ORPDrr
+ { 1649, 7, 1, 0, "ORPSrm", 0|(1<<TID::MayLoad), 0x56400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1649 = ORPSrm
+ { 1650, 3, 1, 0, "ORPSrr", 0|(1<<TID::Commutable), 0x56400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1650 = ORPSrr
+ { 1651, 1, 0, 0, "OUT16ir", 0|(1<<TID::UnmodeledSideEffects), 0xe7002041ULL, ImplicitList12, NULL, NULL, OperandInfo2 }, // Inst #1651 = OUT16ir
+ { 1652, 0, 0, 0, "OUT16rr", 0|(1<<TID::UnmodeledSideEffects), 0xef000041ULL, ImplicitList39, NULL, NULL, 0 }, // Inst #1652 = OUT16rr
+ { 1653, 1, 0, 0, "OUT32ir", 0|(1<<TID::UnmodeledSideEffects), 0xe7002001ULL, ImplicitList13, NULL, NULL, OperandInfo2 }, // Inst #1653 = OUT32ir
+ { 1654, 0, 0, 0, "OUT32rr", 0|(1<<TID::UnmodeledSideEffects), 0xef000001ULL, ImplicitList40, NULL, NULL, 0 }, // Inst #1654 = OUT32rr
+ { 1655, 1, 0, 0, "OUT8ir", 0|(1<<TID::UnmodeledSideEffects), 0xe6002001ULL, ImplicitList11, NULL, NULL, OperandInfo2 }, // Inst #1655 = OUT8ir
+ { 1656, 0, 0, 0, "OUT8rr", 0|(1<<TID::UnmodeledSideEffects), 0xee000001ULL, ImplicitList41, NULL, NULL, 0 }, // Inst #1656 = OUT8rr
+ { 1657, 0, 0, 0, "OUTSB", 0|(1<<TID::UnmodeledSideEffects), 0x6e000001ULL, NULL, NULL, NULL, 0 }, // Inst #1657 = OUTSB
+ { 1658, 0, 0, 0, "OUTSD", 0|(1<<TID::UnmodeledSideEffects), 0x6f000001ULL, NULL, NULL, NULL, 0 }, // Inst #1658 = OUTSD
+ { 1659, 0, 0, 0, "OUTSW", 0|(1<<TID::UnmodeledSideEffects), 0x6f000041ULL, NULL, NULL, NULL, 0 }, // Inst #1659 = OUTSW
+ { 1660, 6, 1, 0, "PABSBrm128", 0|(1<<TID::MayLoad), 0x1cc02d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1660 = PABSBrm128
+ { 1661, 6, 1, 0, "PABSBrm64", 0|(1<<TID::MayLoad), 0x1cc02d06ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1661 = PABSBrm64
+ { 1662, 2, 1, 0, "PABSBrr128", 0, 0x1cc02d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1662 = PABSBrr128
+ { 1663, 2, 1, 0, "PABSBrr64", 0, 0x1cc02d05ULL, NULL, NULL, NULL, OperandInfo143 }, // Inst #1663 = PABSBrr64
+ { 1664, 6, 1, 0, "PABSDrm128", 0|(1<<TID::MayLoad), 0x1ec02d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1664 = PABSDrm128
+ { 1665, 6, 1, 0, "PABSDrm64", 0|(1<<TID::MayLoad), 0x1ec02d06ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1665 = PABSDrm64
+ { 1666, 2, 1, 0, "PABSDrr128", 0, 0x1ec02d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1666 = PABSDrr128
+ { 1667, 2, 1, 0, "PABSDrr64", 0, 0x1ec02d05ULL, NULL, NULL, NULL, OperandInfo143 }, // Inst #1667 = PABSDrr64
+ { 1668, 6, 1, 0, "PABSWrm128", 0|(1<<TID::MayLoad), 0x1dc02d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1668 = PABSWrm128
+ { 1669, 6, 1, 0, "PABSWrm64", 0|(1<<TID::MayLoad), 0x1dc02d06ULL, NULL, NULL, NULL, OperandInfo129 }, // Inst #1669 = PABSWrm64
+ { 1670, 2, 1, 0, "PABSWrr128", 0, 0x1dc02d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1670 = PABSWrr128
+ { 1671, 2, 1, 0, "PABSWrr64", 0, 0x1dc02d05ULL, NULL, NULL, NULL, OperandInfo143 }, // Inst #1671 = PABSWrr64
+ { 1672, 7, 1, 0, "PACKSSDWrm", 0|(1<<TID::MayLoad), 0x6bc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1672 = PACKSSDWrm
+ { 1673, 3, 1, 0, "PACKSSDWrr", 0, 0x6bc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1673 = PACKSSDWrr
+ { 1674, 7, 1, 0, "PACKSSWBrm", 0|(1<<TID::MayLoad), 0x63c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1674 = PACKSSWBrm
+ { 1675, 3, 1, 0, "PACKSSWBrr", 0, 0x63c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1675 = PACKSSWBrr
+ { 1676, 7, 1, 0, "PACKUSDWrm", 0|(1<<TID::MayLoad), 0x2bc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1676 = PACKUSDWrm
+ { 1677, 3, 1, 0, "PACKUSDWrr", 0, 0x2bc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1677 = PACKUSDWrr
+ { 1678, 7, 1, 0, "PACKUSWBrm", 0|(1<<TID::MayLoad), 0x67c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1678 = PACKUSWBrm
+ { 1679, 3, 1, 0, "PACKUSWBrr", 0, 0x67c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1679 = PACKUSWBrr
+ { 1680, 7, 1, 0, "PADDBrm", 0|(1<<TID::MayLoad), 0xfcc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1680 = PADDBrm
+ { 1681, 3, 1, 0, "PADDBrr", 0|(1<<TID::Commutable), 0xfcc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1681 = PADDBrr
+ { 1682, 7, 1, 0, "PADDDrm", 0|(1<<TID::MayLoad), 0xfec00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1682 = PADDDrm
+ { 1683, 3, 1, 0, "PADDDrr", 0|(1<<TID::Commutable), 0xfec00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1683 = PADDDrr
+ { 1684, 7, 1, 0, "PADDQrm", 0|(1<<TID::MayLoad), 0xd4c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1684 = PADDQrm
+ { 1685, 3, 1, 0, "PADDQrr", 0|(1<<TID::Commutable), 0xd4c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1685 = PADDQrr
+ { 1686, 7, 1, 0, "PADDSBrm", 0|(1<<TID::MayLoad), 0xecc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1686 = PADDSBrm
+ { 1687, 3, 1, 0, "PADDSBrr", 0|(1<<TID::Commutable), 0xecc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1687 = PADDSBrr
+ { 1688, 7, 1, 0, "PADDSWrm", 0|(1<<TID::MayLoad), 0xedc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1688 = PADDSWrm
+ { 1689, 3, 1, 0, "PADDSWrr", 0|(1<<TID::Commutable), 0xedc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1689 = PADDSWrr
+ { 1690, 7, 1, 0, "PADDUSBrm", 0|(1<<TID::MayLoad), 0xdcc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1690 = PADDUSBrm
+ { 1691, 3, 1, 0, "PADDUSBrr", 0|(1<<TID::Commutable), 0xdcc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1691 = PADDUSBrr
+ { 1692, 7, 1, 0, "PADDUSWrm", 0|(1<<TID::MayLoad), 0xddc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1692 = PADDUSWrm
+ { 1693, 3, 1, 0, "PADDUSWrr", 0|(1<<TID::Commutable), 0xddc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1693 = PADDUSWrr
+ { 1694, 7, 1, 0, "PADDWrm", 0|(1<<TID::MayLoad), 0xfdc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1694 = PADDWrm
+ { 1695, 3, 1, 0, "PADDWrr", 0|(1<<TID::Commutable), 0xfdc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1695 = PADDWrr
+ { 1696, 8, 1, 0, "PALIGNR128rm", 0|(1<<TID::UnmodeledSideEffects), 0xfc02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #1696 = PALIGNR128rm
+ { 1697, 4, 1, 0, "PALIGNR128rr", 0|(1<<TID::UnmodeledSideEffects), 0xfc02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #1697 = PALIGNR128rr
+ { 1698, 8, 1, 0, "PALIGNR64rm", 0|(1<<TID::UnmodeledSideEffects), 0xfc02e06ULL, NULL, NULL, NULL, OperandInfo154 }, // Inst #1698 = PALIGNR64rm
+ { 1699, 4, 1, 0, "PALIGNR64rr", 0|(1<<TID::UnmodeledSideEffects), 0xfc02e05ULL, NULL, NULL, NULL, OperandInfo205 }, // Inst #1699 = PALIGNR64rr
+ { 1700, 7, 1, 0, "PANDNrm", 0|(1<<TID::MayLoad), 0xdfc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1700 = PANDNrm
+ { 1701, 3, 1, 0, "PANDNrr", 0, 0xdfc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1701 = PANDNrr
+ { 1702, 7, 1, 0, "PANDrm", 0|(1<<TID::MayLoad), 0xdbc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1702 = PANDrm
+ { 1703, 3, 1, 0, "PANDrr", 0|(1<<TID::Commutable), 0xdbc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1703 = PANDrr
+ { 1704, 0, 0, 0, "PAUSE", 0|(1<<TID::UnmodeledSideEffects), 0x90000201ULL, NULL, NULL, NULL, 0 }, // Inst #1704 = PAUSE
+ { 1705, 7, 1, 0, "PAVGBrm", 0|(1<<TID::MayLoad), 0xe0c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1705 = PAVGBrm
+ { 1706, 3, 1, 0, "PAVGBrr", 0|(1<<TID::Commutable), 0xe0c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1706 = PAVGBrr
+ { 1707, 7, 1, 0, "PAVGWrm", 0|(1<<TID::MayLoad), 0xe3c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1707 = PAVGWrm
+ { 1708, 3, 1, 0, "PAVGWrr", 0|(1<<TID::Commutable), 0xe3c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1708 = PAVGWrr
+ { 1709, 7, 1, 0, "PBLENDVBrm0", 0|(1<<TID::MayLoad), 0x10c00d46ULL, ImplicitList8, NULL, NULL, OperandInfo28 }, // Inst #1709 = PBLENDVBrm0
+ { 1710, 3, 1, 0, "PBLENDVBrr0", 0, 0x10c00d45ULL, ImplicitList8, NULL, NULL, OperandInfo29 }, // Inst #1710 = PBLENDVBrr0
+ { 1711, 8, 1, 0, "PBLENDWrmi", 0|(1<<TID::MayLoad), 0xec02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #1711 = PBLENDWrmi
+ { 1712, 4, 1, 0, "PBLENDWrri", 0, 0xec02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #1712 = PBLENDWrri
+ { 1713, 7, 1, 0, "PCMPEQBrm", 0|(1<<TID::MayLoad), 0x74c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1713 = PCMPEQBrm
+ { 1714, 3, 1, 0, "PCMPEQBrr", 0|(1<<TID::Commutable), 0x74c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1714 = PCMPEQBrr
+ { 1715, 7, 1, 0, "PCMPEQDrm", 0|(1<<TID::MayLoad), 0x76c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1715 = PCMPEQDrm
+ { 1716, 3, 1, 0, "PCMPEQDrr", 0|(1<<TID::Commutable), 0x76c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1716 = PCMPEQDrr
+ { 1717, 7, 1, 0, "PCMPEQQrm", 0|(1<<TID::MayLoad), 0x29c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1717 = PCMPEQQrm
+ { 1718, 3, 1, 0, "PCMPEQQrr", 0|(1<<TID::Commutable), 0x29c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1718 = PCMPEQQrr
+ { 1719, 7, 1, 0, "PCMPEQWrm", 0|(1<<TID::MayLoad), 0x75c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1719 = PCMPEQWrm
+ { 1720, 3, 1, 0, "PCMPEQWrr", 0|(1<<TID::Commutable), 0x75c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1720 = PCMPEQWrr
+ { 1721, 7, 0, 0, "PCMPESTRIArm", 0|(1<<TID::MayLoad), 0x61c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1721 = PCMPESTRIArm
+ { 1722, 3, 0, 0, "PCMPESTRIArr", 0, 0x61c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1722 = PCMPESTRIArr
+ { 1723, 7, 0, 0, "PCMPESTRICrm", 0|(1<<TID::MayLoad), 0x61c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1723 = PCMPESTRICrm
+ { 1724, 3, 0, 0, "PCMPESTRICrr", 0, 0x61c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1724 = PCMPESTRICrr
+ { 1725, 7, 0, 0, "PCMPESTRIOrm", 0|(1<<TID::MayLoad), 0x61c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1725 = PCMPESTRIOrm
+ { 1726, 3, 0, 0, "PCMPESTRIOrr", 0, 0x61c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1726 = PCMPESTRIOrr
+ { 1727, 7, 0, 0, "PCMPESTRISrm", 0|(1<<TID::MayLoad), 0x61c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1727 = PCMPESTRISrm
+ { 1728, 3, 0, 0, "PCMPESTRISrr", 0, 0x61c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1728 = PCMPESTRISrr
+ { 1729, 7, 0, 0, "PCMPESTRIZrm", 0|(1<<TID::MayLoad), 0x61c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1729 = PCMPESTRIZrm
+ { 1730, 3, 0, 0, "PCMPESTRIZrr", 0, 0x61c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1730 = PCMPESTRIZrr
+ { 1731, 7, 0, 0, "PCMPESTRIrm", 0|(1<<TID::MayLoad), 0x61c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1731 = PCMPESTRIrm
+ { 1732, 3, 0, 0, "PCMPESTRIrr", 0, 0x61c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1732 = PCMPESTRIrr
+ { 1733, 8, 1, 0, "PCMPESTRM128MEM", 0|(1<<TID::MayLoad)|(1<<TID::UsesCustomInserter), 0x2000ULL, ImplicitList14, ImplicitList1, Barriers1, OperandInfo136 }, // Inst #1733 = PCMPESTRM128MEM
+ { 1734, 4, 1, 0, "PCMPESTRM128REG", 0|(1<<TID::UsesCustomInserter), 0x2000ULL, ImplicitList14, ImplicitList1, Barriers1, OperandInfo80 }, // Inst #1734 = PCMPESTRM128REG
+ { 1735, 7, 0, 0, "PCMPESTRM128rm", 0|(1<<TID::UnmodeledSideEffects), 0x60c02e46ULL, ImplicitList14, ImplicitList43, Barriers1, OperandInfo44 }, // Inst #1735 = PCMPESTRM128rm
+ { 1736, 3, 0, 0, "PCMPESTRM128rr", 0|(1<<TID::UnmodeledSideEffects), 0x60c02e45ULL, ImplicitList14, ImplicitList43, Barriers1, OperandInfo45 }, // Inst #1736 = PCMPESTRM128rr
+ { 1737, 7, 1, 0, "PCMPGTBrm", 0|(1<<TID::MayLoad), 0x64c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1737 = PCMPGTBrm
+ { 1738, 3, 1, 0, "PCMPGTBrr", 0, 0x64c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1738 = PCMPGTBrr
+ { 1739, 7, 1, 0, "PCMPGTDrm", 0|(1<<TID::MayLoad), 0x66c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1739 = PCMPGTDrm
+ { 1740, 3, 1, 0, "PCMPGTDrr", 0, 0x66c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1740 = PCMPGTDrr
+ { 1741, 7, 1, 0, "PCMPGTQrm", 0|(1<<TID::MayLoad), 0x37c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1741 = PCMPGTQrm
+ { 1742, 3, 1, 0, "PCMPGTQrr", 0, 0x37c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1742 = PCMPGTQrr
+ { 1743, 7, 1, 0, "PCMPGTWrm", 0|(1<<TID::MayLoad), 0x65c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1743 = PCMPGTWrm
+ { 1744, 3, 1, 0, "PCMPGTWrr", 0, 0x65c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1744 = PCMPGTWrr
+ { 1745, 7, 0, 0, "PCMPISTRIArm", 0|(1<<TID::MayLoad), 0x63c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1745 = PCMPISTRIArm
+ { 1746, 3, 0, 0, "PCMPISTRIArr", 0, 0x63c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1746 = PCMPISTRIArr
+ { 1747, 7, 0, 0, "PCMPISTRICrm", 0|(1<<TID::MayLoad), 0x63c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1747 = PCMPISTRICrm
+ { 1748, 3, 0, 0, "PCMPISTRICrr", 0, 0x63c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1748 = PCMPISTRICrr
+ { 1749, 7, 0, 0, "PCMPISTRIOrm", 0|(1<<TID::MayLoad), 0x63c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1749 = PCMPISTRIOrm
+ { 1750, 3, 0, 0, "PCMPISTRIOrr", 0, 0x63c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1750 = PCMPISTRIOrr
+ { 1751, 7, 0, 0, "PCMPISTRISrm", 0|(1<<TID::MayLoad), 0x63c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1751 = PCMPISTRISrm
+ { 1752, 3, 0, 0, "PCMPISTRISrr", 0, 0x63c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1752 = PCMPISTRISrr
+ { 1753, 7, 0, 0, "PCMPISTRIZrm", 0|(1<<TID::MayLoad), 0x63c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1753 = PCMPISTRIZrm
+ { 1754, 3, 0, 0, "PCMPISTRIZrr", 0, 0x63c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1754 = PCMPISTRIZrr
+ { 1755, 7, 0, 0, "PCMPISTRIrm", 0|(1<<TID::MayLoad), 0x63c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #1755 = PCMPISTRIrm
+ { 1756, 3, 0, 0, "PCMPISTRIrr", 0, 0x63c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #1756 = PCMPISTRIrr
+ { 1757, 8, 1, 0, "PCMPISTRM128MEM", 0|(1<<TID::MayLoad)|(1<<TID::UsesCustomInserter), 0x2000ULL, NULL, ImplicitList1, Barriers1, OperandInfo136 }, // Inst #1757 = PCMPISTRM128MEM
+ { 1758, 4, 1, 0, "PCMPISTRM128REG", 0|(1<<TID::UsesCustomInserter), 0x2000ULL, NULL, ImplicitList1, Barriers1, OperandInfo80 }, // Inst #1758 = PCMPISTRM128REG
+ { 1759, 7, 0, 0, "PCMPISTRM128rm", 0|(1<<TID::UnmodeledSideEffects), 0x62c02e46ULL, NULL, ImplicitList43, Barriers1, OperandInfo44 }, // Inst #1759 = PCMPISTRM128rm
+ { 1760, 3, 0, 0, "PCMPISTRM128rr", 0|(1<<TID::UnmodeledSideEffects), 0x62c02e45ULL, NULL, ImplicitList43, Barriers1, OperandInfo45 }, // Inst #1760 = PCMPISTRM128rr
+ { 1761, 7, 0, 0, "PEXTRBmr", 0|(1<<TID::UnmodeledSideEffects), 0x14c02e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #1761 = PEXTRBmr
+ { 1762, 3, 1, 0, "PEXTRBrr", 0, 0x14c02e43ULL, NULL, NULL, NULL, OperandInfo109 }, // Inst #1762 = PEXTRBrr
+ { 1763, 7, 0, 0, "PEXTRDmr", 0|(1<<TID::MayStore), 0x16c02e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #1763 = PEXTRDmr
+ { 1764, 3, 1, 0, "PEXTRDrr", 0, 0x16c02e43ULL, NULL, NULL, NULL, OperandInfo109 }, // Inst #1764 = PEXTRDrr
+ { 1765, 7, 0, 0, "PEXTRQmr", 0|(1<<TID::MayStore), 0x16c03e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #1765 = PEXTRQmr
+ { 1766, 3, 1, 0, "PEXTRQrr", 0, 0x16c03e43ULL, NULL, NULL, NULL, OperandInfo206 }, // Inst #1766 = PEXTRQrr
+ { 1767, 7, 0, 0, "PEXTRWmr", 0|(1<<TID::UnmodeledSideEffects), 0x15c02e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #1767 = PEXTRWmr
+ { 1768, 3, 1, 0, "PEXTRWri", 0, 0xc5c02145ULL, NULL, NULL, NULL, OperandInfo109 }, // Inst #1768 = PEXTRWri
+ { 1769, 7, 1, 0, "PHADDDrm128", 0|(1<<TID::MayLoad), 0x2c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1769 = PHADDDrm128
+ { 1770, 7, 1, 0, "PHADDDrm64", 0|(1<<TID::MayLoad), 0x2c00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1770 = PHADDDrm64
+ { 1771, 3, 1, 0, "PHADDDrr128", 0, 0x2c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1771 = PHADDDrr128
+ { 1772, 3, 1, 0, "PHADDDrr64", 0, 0x2c00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1772 = PHADDDrr64
+ { 1773, 7, 1, 0, "PHADDSWrm128", 0|(1<<TID::MayLoad), 0x3c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1773 = PHADDSWrm128
+ { 1774, 7, 1, 0, "PHADDSWrm64", 0|(1<<TID::MayLoad), 0x3c00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1774 = PHADDSWrm64
+ { 1775, 3, 1, 0, "PHADDSWrr128", 0, 0x3c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1775 = PHADDSWrr128
+ { 1776, 3, 1, 0, "PHADDSWrr64", 0, 0x3c00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1776 = PHADDSWrr64
+ { 1777, 7, 1, 0, "PHADDWrm128", 0|(1<<TID::MayLoad), 0x1c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1777 = PHADDWrm128
+ { 1778, 7, 1, 0, "PHADDWrm64", 0|(1<<TID::MayLoad), 0x1c00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1778 = PHADDWrm64
+ { 1779, 3, 1, 0, "PHADDWrr128", 0, 0x1c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1779 = PHADDWrr128
+ { 1780, 3, 1, 0, "PHADDWrr64", 0, 0x1c00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1780 = PHADDWrr64
+ { 1781, 6, 1, 0, "PHMINPOSUWrm128", 0|(1<<TID::MayLoad), 0x41c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1781 = PHMINPOSUWrm128
+ { 1782, 2, 1, 0, "PHMINPOSUWrr128", 0, 0x41c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1782 = PHMINPOSUWrr128
+ { 1783, 7, 1, 0, "PHSUBDrm128", 0|(1<<TID::MayLoad), 0x6c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1783 = PHSUBDrm128
+ { 1784, 7, 1, 0, "PHSUBDrm64", 0|(1<<TID::MayLoad), 0x6c00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1784 = PHSUBDrm64
+ { 1785, 3, 1, 0, "PHSUBDrr128", 0, 0x6c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1785 = PHSUBDrr128
+ { 1786, 3, 1, 0, "PHSUBDrr64", 0, 0x6c00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1786 = PHSUBDrr64
+ { 1787, 7, 1, 0, "PHSUBSWrm128", 0|(1<<TID::MayLoad), 0x7c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1787 = PHSUBSWrm128
+ { 1788, 7, 1, 0, "PHSUBSWrm64", 0|(1<<TID::MayLoad), 0x7c00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1788 = PHSUBSWrm64
+ { 1789, 3, 1, 0, "PHSUBSWrr128", 0, 0x7c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1789 = PHSUBSWrr128
+ { 1790, 3, 1, 0, "PHSUBSWrr64", 0, 0x7c00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1790 = PHSUBSWrr64
+ { 1791, 7, 1, 0, "PHSUBWrm128", 0|(1<<TID::MayLoad), 0x5c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1791 = PHSUBWrm128
+ { 1792, 7, 1, 0, "PHSUBWrm64", 0|(1<<TID::MayLoad), 0x5c00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1792 = PHSUBWrm64
+ { 1793, 3, 1, 0, "PHSUBWrr128", 0, 0x5c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1793 = PHSUBWrr128
+ { 1794, 3, 1, 0, "PHSUBWrr64", 0, 0x5c00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1794 = PHSUBWrr64
+ { 1795, 8, 1, 0, "PINSRBrm", 0|(1<<TID::MayLoad), 0x20c02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #1795 = PINSRBrm
+ { 1796, 4, 1, 0, "PINSRBrr", 0, 0x20c02e45ULL, NULL, NULL, NULL, OperandInfo207 }, // Inst #1796 = PINSRBrr
+ { 1797, 8, 1, 0, "PINSRDrm", 0|(1<<TID::MayLoad), 0x22c02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #1797 = PINSRDrm
+ { 1798, 4, 1, 0, "PINSRDrr", 0, 0x22c02e45ULL, NULL, NULL, NULL, OperandInfo207 }, // Inst #1798 = PINSRDrr
+ { 1799, 8, 1, 0, "PINSRQrm", 0|(1<<TID::MayLoad), 0x22c03e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #1799 = PINSRQrm
+ { 1800, 4, 1, 0, "PINSRQrr", 0, 0x22c03e45ULL, NULL, NULL, NULL, OperandInfo208 }, // Inst #1800 = PINSRQrr
+ { 1801, 8, 1, 0, "PINSRWrmi", 0|(1<<TID::MayLoad), 0xc4c02146ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #1801 = PINSRWrmi
+ { 1802, 4, 1, 0, "PINSRWrri", 0, 0xc4c02145ULL, NULL, NULL, NULL, OperandInfo207 }, // Inst #1802 = PINSRWrri
+ { 1803, 7, 1, 0, "PMADDUBSWrm128", 0|(1<<TID::MayLoad), 0x4c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1803 = PMADDUBSWrm128
+ { 1804, 7, 1, 0, "PMADDUBSWrm64", 0|(1<<TID::MayLoad), 0x4c00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1804 = PMADDUBSWrm64
+ { 1805, 3, 1, 0, "PMADDUBSWrr128", 0, 0x4c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1805 = PMADDUBSWrr128
+ { 1806, 3, 1, 0, "PMADDUBSWrr64", 0, 0x4c00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1806 = PMADDUBSWrr64
+ { 1807, 7, 1, 0, "PMADDWDrm", 0|(1<<TID::MayLoad), 0xf5c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1807 = PMADDWDrm
+ { 1808, 3, 1, 0, "PMADDWDrr", 0|(1<<TID::Commutable), 0xf5c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1808 = PMADDWDrr
+ { 1809, 7, 1, 0, "PMAXSBrm", 0|(1<<TID::MayLoad), 0x3cc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1809 = PMAXSBrm
+ { 1810, 3, 1, 0, "PMAXSBrr", 0|(1<<TID::Commutable), 0x3cc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1810 = PMAXSBrr
+ { 1811, 7, 1, 0, "PMAXSDrm", 0|(1<<TID::MayLoad), 0x3dc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1811 = PMAXSDrm
+ { 1812, 3, 1, 0, "PMAXSDrr", 0|(1<<TID::Commutable), 0x3dc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1812 = PMAXSDrr
+ { 1813, 7, 1, 0, "PMAXSWrm", 0|(1<<TID::MayLoad), 0xeec00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1813 = PMAXSWrm
+ { 1814, 3, 1, 0, "PMAXSWrr", 0|(1<<TID::Commutable), 0xeec00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1814 = PMAXSWrr
+ { 1815, 7, 1, 0, "PMAXUBrm", 0|(1<<TID::MayLoad), 0xdec00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1815 = PMAXUBrm
+ { 1816, 3, 1, 0, "PMAXUBrr", 0|(1<<TID::Commutable), 0xdec00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1816 = PMAXUBrr
+ { 1817, 7, 1, 0, "PMAXUDrm", 0|(1<<TID::MayLoad), 0x3fc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1817 = PMAXUDrm
+ { 1818, 3, 1, 0, "PMAXUDrr", 0|(1<<TID::Commutable), 0x3fc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1818 = PMAXUDrr
+ { 1819, 7, 1, 0, "PMAXUWrm", 0|(1<<TID::MayLoad), 0x3ec00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1819 = PMAXUWrm
+ { 1820, 3, 1, 0, "PMAXUWrr", 0|(1<<TID::Commutable), 0x3ec00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1820 = PMAXUWrr
+ { 1821, 7, 1, 0, "PMINSBrm", 0|(1<<TID::MayLoad), 0x38c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1821 = PMINSBrm
+ { 1822, 3, 1, 0, "PMINSBrr", 0|(1<<TID::Commutable), 0x38c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1822 = PMINSBrr
+ { 1823, 7, 1, 0, "PMINSDrm", 0|(1<<TID::MayLoad), 0x39c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1823 = PMINSDrm
+ { 1824, 3, 1, 0, "PMINSDrr", 0|(1<<TID::Commutable), 0x39c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1824 = PMINSDrr
+ { 1825, 7, 1, 0, "PMINSWrm", 0|(1<<TID::MayLoad), 0xeac00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1825 = PMINSWrm
+ { 1826, 3, 1, 0, "PMINSWrr", 0|(1<<TID::Commutable), 0xeac00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1826 = PMINSWrr
+ { 1827, 7, 1, 0, "PMINUBrm", 0|(1<<TID::MayLoad), 0xdac00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1827 = PMINUBrm
+ { 1828, 3, 1, 0, "PMINUBrr", 0|(1<<TID::Commutable), 0xdac00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1828 = PMINUBrr
+ { 1829, 7, 1, 0, "PMINUDrm", 0|(1<<TID::MayLoad), 0x3bc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1829 = PMINUDrm
+ { 1830, 3, 1, 0, "PMINUDrr", 0|(1<<TID::Commutable), 0x3bc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1830 = PMINUDrr
+ { 1831, 7, 1, 0, "PMINUWrm", 0|(1<<TID::MayLoad), 0x3ac00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1831 = PMINUWrm
+ { 1832, 3, 1, 0, "PMINUWrr", 0|(1<<TID::Commutable), 0x3ac00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1832 = PMINUWrr
+ { 1833, 2, 1, 0, "PMOVMSKBrr", 0, 0xd7c00145ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #1833 = PMOVMSKBrr
+ { 1834, 6, 1, 0, "PMOVSXBDrm", 0|(1<<TID::MayLoad), 0x21c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1834 = PMOVSXBDrm
+ { 1835, 2, 1, 0, "PMOVSXBDrr", 0, 0x21c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1835 = PMOVSXBDrr
+ { 1836, 6, 1, 0, "PMOVSXBQrm", 0|(1<<TID::MayLoad), 0x22c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1836 = PMOVSXBQrm
+ { 1837, 2, 1, 0, "PMOVSXBQrr", 0, 0x22c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1837 = PMOVSXBQrr
+ { 1838, 6, 1, 0, "PMOVSXBWrm", 0|(1<<TID::MayLoad), 0x20c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1838 = PMOVSXBWrm
+ { 1839, 2, 1, 0, "PMOVSXBWrr", 0, 0x20c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1839 = PMOVSXBWrr
+ { 1840, 6, 1, 0, "PMOVSXDQrm", 0|(1<<TID::MayLoad), 0x25c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1840 = PMOVSXDQrm
+ { 1841, 2, 1, 0, "PMOVSXDQrr", 0, 0x25c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1841 = PMOVSXDQrr
+ { 1842, 6, 1, 0, "PMOVSXWDrm", 0|(1<<TID::MayLoad), 0x23c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1842 = PMOVSXWDrm
+ { 1843, 2, 1, 0, "PMOVSXWDrr", 0, 0x23c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1843 = PMOVSXWDrr
+ { 1844, 6, 1, 0, "PMOVSXWQrm", 0|(1<<TID::MayLoad), 0x24c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1844 = PMOVSXWQrm
+ { 1845, 2, 1, 0, "PMOVSXWQrr", 0, 0x24c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1845 = PMOVSXWQrr
+ { 1846, 6, 1, 0, "PMOVZXBDrm", 0|(1<<TID::MayLoad), 0x31c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1846 = PMOVZXBDrm
+ { 1847, 2, 1, 0, "PMOVZXBDrr", 0, 0x31c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1847 = PMOVZXBDrr
+ { 1848, 6, 1, 0, "PMOVZXBQrm", 0|(1<<TID::MayLoad), 0x32c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1848 = PMOVZXBQrm
+ { 1849, 2, 1, 0, "PMOVZXBQrr", 0, 0x32c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1849 = PMOVZXBQrr
+ { 1850, 6, 1, 0, "PMOVZXBWrm", 0|(1<<TID::MayLoad), 0x30c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1850 = PMOVZXBWrm
+ { 1851, 2, 1, 0, "PMOVZXBWrr", 0, 0x30c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1851 = PMOVZXBWrr
+ { 1852, 6, 1, 0, "PMOVZXDQrm", 0|(1<<TID::MayLoad), 0x35c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1852 = PMOVZXDQrm
+ { 1853, 2, 1, 0, "PMOVZXDQrr", 0, 0x35c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1853 = PMOVZXDQrr
+ { 1854, 6, 1, 0, "PMOVZXWDrm", 0|(1<<TID::MayLoad), 0x33c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1854 = PMOVZXWDrm
+ { 1855, 2, 1, 0, "PMOVZXWDrr", 0, 0x33c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1855 = PMOVZXWDrr
+ { 1856, 6, 1, 0, "PMOVZXWQrm", 0|(1<<TID::MayLoad), 0x34c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #1856 = PMOVZXWQrm
+ { 1857, 2, 1, 0, "PMOVZXWQrr", 0, 0x34c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #1857 = PMOVZXWQrr
+ { 1858, 7, 1, 0, "PMULDQrm", 0|(1<<TID::MayLoad), 0x28c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1858 = PMULDQrm
+ { 1859, 3, 1, 0, "PMULDQrr", 0|(1<<TID::Commutable), 0x28c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1859 = PMULDQrr
+ { 1860, 7, 1, 0, "PMULHRSWrm128", 0|(1<<TID::MayLoad), 0xbc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1860 = PMULHRSWrm128
+ { 1861, 7, 1, 0, "PMULHRSWrm64", 0|(1<<TID::MayLoad), 0xbc00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1861 = PMULHRSWrm64
+ { 1862, 3, 1, 0, "PMULHRSWrr128", 0|(1<<TID::Commutable), 0xbc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1862 = PMULHRSWrr128
+ { 1863, 3, 1, 0, "PMULHRSWrr64", 0|(1<<TID::Commutable), 0xbc00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1863 = PMULHRSWrr64
+ { 1864, 7, 1, 0, "PMULHUWrm", 0|(1<<TID::MayLoad), 0xe4c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1864 = PMULHUWrm
+ { 1865, 3, 1, 0, "PMULHUWrr", 0|(1<<TID::Commutable), 0xe4c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1865 = PMULHUWrr
+ { 1866, 7, 1, 0, "PMULHWrm", 0|(1<<TID::MayLoad), 0xe5c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1866 = PMULHWrm
+ { 1867, 3, 1, 0, "PMULHWrr", 0|(1<<TID::Commutable), 0xe5c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1867 = PMULHWrr
+ { 1868, 7, 1, 0, "PMULLDrm", 0|(1<<TID::MayLoad), 0x40c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1868 = PMULLDrm
+ { 1869, 3, 1, 0, "PMULLDrr", 0|(1<<TID::Commutable), 0x40c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1869 = PMULLDrr
+ { 1870, 7, 1, 0, "PMULLWrm", 0|(1<<TID::MayLoad), 0xd5c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1870 = PMULLWrm
+ { 1871, 3, 1, 0, "PMULLWrr", 0|(1<<TID::Commutable), 0xd5c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1871 = PMULLWrr
+ { 1872, 7, 1, 0, "PMULUDQrm", 0|(1<<TID::MayLoad), 0xf4c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1872 = PMULUDQrm
+ { 1873, 3, 1, 0, "PMULUDQrr", 0|(1<<TID::Commutable), 0xf4c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1873 = PMULUDQrr
+ { 1874, 1, 1, 0, "POP16r", 0|(1<<TID::MayLoad), 0x58000042ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo106 }, // Inst #1874 = POP16r
+ { 1875, 5, 1, 0, "POP16rmm", 0|(1<<TID::MayLoad), 0x8f000058ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo34 }, // Inst #1875 = POP16rmm
+ { 1876, 1, 1, 0, "POP16rmr", 0|(1<<TID::MayLoad), 0x8f000050ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo106 }, // Inst #1876 = POP16rmr
+ { 1877, 1, 1, 0, "POP32r", 0|(1<<TID::MayLoad), 0x58000002ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo66 }, // Inst #1877 = POP32r
+ { 1878, 5, 1, 0, "POP32rmm", 0|(1<<TID::MayLoad), 0x8f000018ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo34 }, // Inst #1878 = POP32rmm
+ { 1879, 1, 1, 0, "POP32rmr", 0|(1<<TID::MayLoad), 0x8f000010ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo66 }, // Inst #1879 = POP32rmr
+ { 1880, 1, 1, 0, "POP64r", 0|(1<<TID::MayLoad), 0x58000002ULL, ImplicitList4, ImplicitList4, NULL, OperandInfo67 }, // Inst #1880 = POP64r
+ { 1881, 5, 1, 0, "POP64rmm", 0|(1<<TID::MayLoad), 0x8f000018ULL, ImplicitList4, ImplicitList4, NULL, OperandInfo34 }, // Inst #1881 = POP64rmm
+ { 1882, 1, 1, 0, "POP64rmr", 0|(1<<TID::MayLoad), 0x8f000010ULL, ImplicitList4, ImplicitList4, NULL, OperandInfo67 }, // Inst #1882 = POP64rmr
+ { 1883, 0, 0, 0, "POPA32", 0|(1<<TID::MayLoad), 0x61000001ULL, ImplicitList2, ImplicitList44, Barriers7, 0 }, // Inst #1883 = POPA32
+ { 1884, 6, 1, 0, "POPCNT16rm", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xb8000c46ULL, NULL, NULL, NULL, OperandInfo55 }, // Inst #1884 = POPCNT16rm
+ { 1885, 2, 1, 0, "POPCNT16rr", 0|(1<<TID::UnmodeledSideEffects), 0xb8000c45ULL, NULL, NULL, NULL, OperandInfo56 }, // Inst #1885 = POPCNT16rr
+ { 1886, 6, 1, 0, "POPCNT32rm", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xb8000c06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #1886 = POPCNT32rm
+ { 1887, 2, 1, 0, "POPCNT32rr", 0|(1<<TID::UnmodeledSideEffects), 0xb8000c05ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #1887 = POPCNT32rr
+ { 1888, 6, 1, 0, "POPCNT64rm", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xb8001c06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #1888 = POPCNT64rm
+ { 1889, 2, 1, 0, "POPCNT64rr", 0|(1<<TID::UnmodeledSideEffects), 0xb8001c05ULL, NULL, NULL, NULL, OperandInfo60 }, // Inst #1889 = POPCNT64rr
+ { 1890, 0, 0, 0, "POPF16", 0|(1<<TID::MayLoad), 0x9d000041ULL, ImplicitList2, ImplicitList3, Barriers1, 0 }, // Inst #1890 = POPF16
+ { 1891, 0, 0, 0, "POPF32", 0|(1<<TID::MayLoad), 0x9d000001ULL, ImplicitList2, ImplicitList3, Barriers1, 0 }, // Inst #1891 = POPF32
+ { 1892, 0, 0, 0, "POPF64", 0|(1<<TID::MayLoad), 0x9d000001ULL, ImplicitList4, ImplicitList5, Barriers1, 0 }, // Inst #1892 = POPF64
+ { 1893, 0, 0, 0, "POPFS16", 0|(1<<TID::UnmodeledSideEffects), 0xa1000141ULL, NULL, NULL, NULL, 0 }, // Inst #1893 = POPFS16
+ { 1894, 0, 0, 0, "POPFS32", 0|(1<<TID::UnmodeledSideEffects), 0xa1000101ULL, NULL, NULL, NULL, 0 }, // Inst #1894 = POPFS32
+ { 1895, 0, 0, 0, "POPFS64", 0|(1<<TID::UnmodeledSideEffects), 0xa1000101ULL, NULL, NULL, NULL, 0 }, // Inst #1895 = POPFS64
+ { 1896, 0, 0, 0, "POPGS16", 0|(1<<TID::UnmodeledSideEffects), 0xa9000141ULL, NULL, NULL, NULL, 0 }, // Inst #1896 = POPGS16
+ { 1897, 0, 0, 0, "POPGS32", 0|(1<<TID::UnmodeledSideEffects), 0xa9000101ULL, NULL, NULL, NULL, 0 }, // Inst #1897 = POPGS32
+ { 1898, 0, 0, 0, "POPGS64", 0|(1<<TID::UnmodeledSideEffects), 0xa9000101ULL, NULL, NULL, NULL, 0 }, // Inst #1898 = POPGS64
+ { 1899, 7, 1, 0, "PORrm", 0|(1<<TID::MayLoad), 0xebc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1899 = PORrm
+ { 1900, 3, 1, 0, "PORrr", 0|(1<<TID::Commutable), 0xebc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1900 = PORrr
+ { 1901, 5, 0, 0, "PREFETCHNTA", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x18400118ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1901 = PREFETCHNTA
+ { 1902, 5, 0, 0, "PREFETCHT0", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x18400119ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1902 = PREFETCHT0
+ { 1903, 5, 0, 0, "PREFETCHT1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x1840011aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1903 = PREFETCHT1
+ { 1904, 5, 0, 0, "PREFETCHT2", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x1840011bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #1904 = PREFETCHT2
+ { 1905, 7, 1, 0, "PSADBWrm", 0|(1<<TID::MayLoad), 0xf6c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1905 = PSADBWrm
+ { 1906, 3, 1, 0, "PSADBWrr", 0|(1<<TID::Commutable), 0xf6c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1906 = PSADBWrr
+ { 1907, 7, 1, 0, "PSHUFBrm128", 0|(1<<TID::MayLoad), 0xc00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1907 = PSHUFBrm128
+ { 1908, 7, 1, 0, "PSHUFBrm64", 0|(1<<TID::MayLoad), 0xc00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1908 = PSHUFBrm64
+ { 1909, 3, 1, 0, "PSHUFBrr128", 0, 0xc00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1909 = PSHUFBrr128
+ { 1910, 3, 1, 0, "PSHUFBrr64", 0, 0xc00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1910 = PSHUFBrr64
+ { 1911, 7, 1, 0, "PSHUFDmi", 0|(1<<TID::MayLoad), 0x70c02146ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #1911 = PSHUFDmi
+ { 1912, 3, 1, 0, "PSHUFDri", 0, 0x70c02145ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #1912 = PSHUFDri
+ { 1913, 7, 1, 0, "PSHUFHWmi", 0|(1<<TID::MayLoad), 0x70c02c06ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #1913 = PSHUFHWmi
+ { 1914, 3, 1, 0, "PSHUFHWri", 0, 0x70c02c05ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #1914 = PSHUFHWri
+ { 1915, 7, 1, 0, "PSHUFLWmi", 0|(1<<TID::MayLoad), 0x70c02b06ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #1915 = PSHUFLWmi
+ { 1916, 3, 1, 0, "PSHUFLWri", 0, 0x70c02b05ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #1916 = PSHUFLWri
+ { 1917, 7, 1, 0, "PSIGNBrm128", 0|(1<<TID::MayLoad), 0x8c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1917 = PSIGNBrm128
+ { 1918, 7, 1, 0, "PSIGNBrm64", 0|(1<<TID::MayLoad), 0x8c00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1918 = PSIGNBrm64
+ { 1919, 3, 1, 0, "PSIGNBrr128", 0, 0x8c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1919 = PSIGNBrr128
+ { 1920, 3, 1, 0, "PSIGNBrr64", 0, 0x8c00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1920 = PSIGNBrr64
+ { 1921, 7, 1, 0, "PSIGNDrm128", 0|(1<<TID::MayLoad), 0xac00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1921 = PSIGNDrm128
+ { 1922, 7, 1, 0, "PSIGNDrm64", 0|(1<<TID::MayLoad), 0xac00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1922 = PSIGNDrm64
+ { 1923, 3, 1, 0, "PSIGNDrr128", 0, 0xac00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1923 = PSIGNDrr128
+ { 1924, 3, 1, 0, "PSIGNDrr64", 0, 0xac00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1924 = PSIGNDrr64
+ { 1925, 7, 1, 0, "PSIGNWrm128", 0|(1<<TID::MayLoad), 0x9c00d46ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1925 = PSIGNWrm128
+ { 1926, 7, 1, 0, "PSIGNWrm64", 0|(1<<TID::MayLoad), 0x9c00d06ULL, NULL, NULL, NULL, OperandInfo151 }, // Inst #1926 = PSIGNWrm64
+ { 1927, 3, 1, 0, "PSIGNWrr128", 0, 0x9c00d45ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1927 = PSIGNWrr128
+ { 1928, 3, 1, 0, "PSIGNWrr64", 0, 0x9c00d05ULL, NULL, NULL, NULL, OperandInfo152 }, // Inst #1928 = PSIGNWrr64
+ { 1929, 3, 1, 0, "PSLLDQri", 0, 0x73c02157ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1929 = PSLLDQri
+ { 1930, 3, 1, 0, "PSLLDri", 0, 0x72c02156ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1930 = PSLLDri
+ { 1931, 7, 1, 0, "PSLLDrm", 0|(1<<TID::MayLoad), 0xf2c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1931 = PSLLDrm
+ { 1932, 3, 1, 0, "PSLLDrr", 0, 0xf2c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1932 = PSLLDrr
+ { 1933, 3, 1, 0, "PSLLQri", 0, 0x73c02156ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1933 = PSLLQri
+ { 1934, 7, 1, 0, "PSLLQrm", 0|(1<<TID::MayLoad), 0xf3c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1934 = PSLLQrm
+ { 1935, 3, 1, 0, "PSLLQrr", 0, 0xf3c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1935 = PSLLQrr
+ { 1936, 3, 1, 0, "PSLLWri", 0, 0x71c02156ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1936 = PSLLWri
+ { 1937, 7, 1, 0, "PSLLWrm", 0|(1<<TID::MayLoad), 0xf1c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1937 = PSLLWrm
+ { 1938, 3, 1, 0, "PSLLWrr", 0, 0xf1c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1938 = PSLLWrr
+ { 1939, 3, 1, 0, "PSRADri", 0, 0x72c02154ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1939 = PSRADri
+ { 1940, 7, 1, 0, "PSRADrm", 0|(1<<TID::MayLoad), 0xe2c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1940 = PSRADrm
+ { 1941, 3, 1, 0, "PSRADrr", 0, 0xe2c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1941 = PSRADrr
+ { 1942, 3, 1, 0, "PSRAWri", 0, 0x71c02154ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1942 = PSRAWri
+ { 1943, 7, 1, 0, "PSRAWrm", 0|(1<<TID::MayLoad), 0xe1c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1943 = PSRAWrm
+ { 1944, 3, 1, 0, "PSRAWrr", 0, 0xe1c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1944 = PSRAWrr
+ { 1945, 3, 1, 0, "PSRLDQri", 0, 0x73c02153ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1945 = PSRLDQri
+ { 1946, 3, 1, 0, "PSRLDri", 0, 0x72c02152ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1946 = PSRLDri
+ { 1947, 7, 1, 0, "PSRLDrm", 0|(1<<TID::MayLoad), 0xd2c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1947 = PSRLDrm
+ { 1948, 3, 1, 0, "PSRLDrr", 0, 0xd2c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1948 = PSRLDrr
+ { 1949, 3, 1, 0, "PSRLQri", 0, 0x73c02152ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1949 = PSRLQri
+ { 1950, 7, 1, 0, "PSRLQrm", 0|(1<<TID::MayLoad), 0xd3c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1950 = PSRLQrm
+ { 1951, 3, 1, 0, "PSRLQrr", 0, 0xd3c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1951 = PSRLQrr
+ { 1952, 3, 1, 0, "PSRLWri", 0, 0x71c02152ULL, NULL, NULL, NULL, OperandInfo209 }, // Inst #1952 = PSRLWri
+ { 1953, 7, 1, 0, "PSRLWrm", 0|(1<<TID::MayLoad), 0xd1c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1953 = PSRLWrm
+ { 1954, 3, 1, 0, "PSRLWrr", 0, 0xd1c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1954 = PSRLWrr
+ { 1955, 7, 1, 0, "PSUBBrm", 0|(1<<TID::MayLoad), 0xf8c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1955 = PSUBBrm
+ { 1956, 3, 1, 0, "PSUBBrr", 0, 0xf8c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1956 = PSUBBrr
+ { 1957, 7, 1, 0, "PSUBDrm", 0|(1<<TID::MayLoad), 0xfac00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1957 = PSUBDrm
+ { 1958, 3, 1, 0, "PSUBDrr", 0, 0xfac00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1958 = PSUBDrr
+ { 1959, 7, 1, 0, "PSUBQrm", 0|(1<<TID::MayLoad), 0xfbc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1959 = PSUBQrm
+ { 1960, 3, 1, 0, "PSUBQrr", 0, 0xfbc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1960 = PSUBQrr
+ { 1961, 7, 1, 0, "PSUBSBrm", 0|(1<<TID::MayLoad), 0xe8c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1961 = PSUBSBrm
+ { 1962, 3, 1, 0, "PSUBSBrr", 0, 0xe8c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1962 = PSUBSBrr
+ { 1963, 7, 1, 0, "PSUBSWrm", 0|(1<<TID::MayLoad), 0xe9c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1963 = PSUBSWrm
+ { 1964, 3, 1, 0, "PSUBSWrr", 0, 0xe9c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1964 = PSUBSWrr
+ { 1965, 7, 1, 0, "PSUBUSBrm", 0|(1<<TID::MayLoad), 0xd8c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1965 = PSUBUSBrm
+ { 1966, 3, 1, 0, "PSUBUSBrr", 0, 0xd8c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1966 = PSUBUSBrr
+ { 1967, 7, 1, 0, "PSUBUSWrm", 0|(1<<TID::MayLoad), 0xd9c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1967 = PSUBUSWrm
+ { 1968, 3, 1, 0, "PSUBUSWrr", 0, 0xd9c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1968 = PSUBUSWrr
+ { 1969, 7, 1, 0, "PSUBWrm", 0|(1<<TID::MayLoad), 0xf9c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1969 = PSUBWrm
+ { 1970, 3, 1, 0, "PSUBWrr", 0, 0xf9c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1970 = PSUBWrr
+ { 1971, 6, 0, 0, "PTESTrm", 0|(1<<TID::MayLoad), 0x17c00d46ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #1971 = PTESTrm
+ { 1972, 2, 0, 0, "PTESTrr", 0, 0x17c00d45ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #1972 = PTESTrr
+ { 1973, 7, 1, 0, "PUNPCKHBWrm", 0|(1<<TID::MayLoad), 0x68c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1973 = PUNPCKHBWrm
+ { 1974, 3, 1, 0, "PUNPCKHBWrr", 0, 0x68c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1974 = PUNPCKHBWrr
+ { 1975, 7, 1, 0, "PUNPCKHDQrm", 0|(1<<TID::MayLoad), 0x6ac00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1975 = PUNPCKHDQrm
+ { 1976, 3, 1, 0, "PUNPCKHDQrr", 0, 0x6ac00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1976 = PUNPCKHDQrr
+ { 1977, 7, 1, 0, "PUNPCKHQDQrm", 0|(1<<TID::MayLoad), 0x6dc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1977 = PUNPCKHQDQrm
+ { 1978, 3, 1, 0, "PUNPCKHQDQrr", 0, 0x6dc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1978 = PUNPCKHQDQrr
+ { 1979, 7, 1, 0, "PUNPCKHWDrm", 0|(1<<TID::MayLoad), 0x69c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1979 = PUNPCKHWDrm
+ { 1980, 3, 1, 0, "PUNPCKHWDrr", 0, 0x69c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1980 = PUNPCKHWDrr
+ { 1981, 7, 1, 0, "PUNPCKLBWrm", 0|(1<<TID::MayLoad), 0x60c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1981 = PUNPCKLBWrm
+ { 1982, 3, 1, 0, "PUNPCKLBWrr", 0, 0x60c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1982 = PUNPCKLBWrr
+ { 1983, 7, 1, 0, "PUNPCKLDQrm", 0|(1<<TID::MayLoad), 0x62c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1983 = PUNPCKLDQrm
+ { 1984, 3, 1, 0, "PUNPCKLDQrr", 0, 0x62c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1984 = PUNPCKLDQrr
+ { 1985, 7, 1, 0, "PUNPCKLQDQrm", 0|(1<<TID::MayLoad), 0x6cc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1985 = PUNPCKLQDQrm
+ { 1986, 3, 1, 0, "PUNPCKLQDQrr", 0, 0x6cc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1986 = PUNPCKLQDQrr
+ { 1987, 7, 1, 0, "PUNPCKLWDrm", 0|(1<<TID::MayLoad), 0x61c00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #1987 = PUNPCKLWDrm
+ { 1988, 3, 1, 0, "PUNPCKLWDrr", 0, 0x61c00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #1988 = PUNPCKLWDrr
+ { 1989, 1, 0, 0, "PUSH16r", 0|(1<<TID::MayStore), 0x50000042ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo106 }, // Inst #1989 = PUSH16r
+ { 1990, 5, 0, 0, "PUSH16rmm", 0|(1<<TID::MayStore), 0xff00005eULL, ImplicitList2, ImplicitList2, NULL, OperandInfo34 }, // Inst #1990 = PUSH16rmm
+ { 1991, 1, 0, 0, "PUSH16rmr", 0|(1<<TID::MayStore), 0xff000056ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo106 }, // Inst #1991 = PUSH16rmr
+ { 1992, 1, 0, 0, "PUSH32r", 0|(1<<TID::MayStore), 0x50000002ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo66 }, // Inst #1992 = PUSH32r
+ { 1993, 5, 0, 0, "PUSH32rmm", 0|(1<<TID::MayStore), 0xff00001eULL, ImplicitList2, ImplicitList2, NULL, OperandInfo34 }, // Inst #1993 = PUSH32rmm
+ { 1994, 1, 0, 0, "PUSH32rmr", 0|(1<<TID::MayStore), 0xff000016ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo66 }, // Inst #1994 = PUSH32rmr
+ { 1995, 1, 0, 0, "PUSH64i16", 0|(1<<TID::MayStore), 0x68006001ULL, ImplicitList4, ImplicitList4, NULL, OperandInfo2 }, // Inst #1995 = PUSH64i16
+ { 1996, 1, 0, 0, "PUSH64i32", 0|(1<<TID::MayStore), 0x6800a001ULL, ImplicitList4, ImplicitList4, NULL, OperandInfo2 }, // Inst #1996 = PUSH64i32
+ { 1997, 1, 0, 0, "PUSH64i8", 0|(1<<TID::MayStore), 0x6a002001ULL, ImplicitList4, ImplicitList4, NULL, OperandInfo2 }, // Inst #1997 = PUSH64i8
+ { 1998, 1, 0, 0, "PUSH64r", 0|(1<<TID::MayStore), 0x50000002ULL, ImplicitList4, ImplicitList4, NULL, OperandInfo67 }, // Inst #1998 = PUSH64r
+ { 1999, 5, 0, 0, "PUSH64rmm", 0|(1<<TID::MayStore), 0xff00001eULL, ImplicitList4, ImplicitList4, NULL, OperandInfo34 }, // Inst #1999 = PUSH64rmm
+ { 2000, 1, 0, 0, "PUSH64rmr", 0|(1<<TID::MayStore), 0xff000016ULL, ImplicitList4, ImplicitList4, NULL, OperandInfo67 }, // Inst #2000 = PUSH64rmr
+ { 2001, 0, 0, 0, "PUSHA32", 0|(1<<TID::MayStore), 0x60000001ULL, ImplicitList44, ImplicitList2, NULL, 0 }, // Inst #2001 = PUSHA32
+ { 2002, 0, 0, 0, "PUSHF16", 0|(1<<TID::MayStore), 0x9c000041ULL, ImplicitList3, ImplicitList2, NULL, 0 }, // Inst #2002 = PUSHF16
+ { 2003, 0, 0, 0, "PUSHF32", 0|(1<<TID::MayStore), 0x9c000001ULL, ImplicitList3, ImplicitList2, NULL, 0 }, // Inst #2003 = PUSHF32
+ { 2004, 0, 0, 0, "PUSHF64", 0|(1<<TID::MayStore), 0x9c000001ULL, ImplicitList5, ImplicitList4, NULL, 0 }, // Inst #2004 = PUSHF64
+ { 2005, 0, 0, 0, "PUSHFS16", 0|(1<<TID::UnmodeledSideEffects), 0xa0000141ULL, NULL, NULL, NULL, 0 }, // Inst #2005 = PUSHFS16
+ { 2006, 0, 0, 0, "PUSHFS32", 0|(1<<TID::UnmodeledSideEffects), 0xa0000101ULL, NULL, NULL, NULL, 0 }, // Inst #2006 = PUSHFS32
+ { 2007, 0, 0, 0, "PUSHFS64", 0|(1<<TID::UnmodeledSideEffects), 0xa0000101ULL, NULL, NULL, NULL, 0 }, // Inst #2007 = PUSHFS64
+ { 2008, 0, 0, 0, "PUSHGS16", 0|(1<<TID::UnmodeledSideEffects), 0xa8000141ULL, NULL, NULL, NULL, 0 }, // Inst #2008 = PUSHGS16
+ { 2009, 0, 0, 0, "PUSHGS32", 0|(1<<TID::UnmodeledSideEffects), 0xa8000101ULL, NULL, NULL, NULL, 0 }, // Inst #2009 = PUSHGS32
+ { 2010, 0, 0, 0, "PUSHGS64", 0|(1<<TID::UnmodeledSideEffects), 0xa8000101ULL, NULL, NULL, NULL, 0 }, // Inst #2010 = PUSHGS64
+ { 2011, 1, 0, 0, "PUSHi16", 0|(1<<TID::MayStore), 0x68006041ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo2 }, // Inst #2011 = PUSHi16
+ { 2012, 1, 0, 0, "PUSHi32", 0|(1<<TID::MayStore), 0x6800a001ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo2 }, // Inst #2012 = PUSHi32
+ { 2013, 1, 0, 0, "PUSHi8", 0|(1<<TID::MayStore), 0x6a002001ULL, ImplicitList2, ImplicitList2, NULL, OperandInfo2 }, // Inst #2013 = PUSHi8
+ { 2014, 7, 1, 0, "PXORrm", 0|(1<<TID::MayLoad), 0xefc00146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #2014 = PXORrm
+ { 2015, 3, 1, 0, "PXORrr", 0|(1<<TID::Commutable), 0xefc00145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #2015 = PXORrr
+ { 2016, 5, 0, 0, "RCL16m1", 0|(1<<TID::UnmodeledSideEffects), 0xd100005aULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2016 = RCL16m1
+ { 2017, 5, 0, 0, "RCL16mCL", 0|(1<<TID::UnmodeledSideEffects), 0xd300005aULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2017 = RCL16mCL
+ { 2018, 6, 0, 0, "RCL16mi", 0|(1<<TID::UnmodeledSideEffects), 0xc100205aULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2018 = RCL16mi
+ { 2019, 2, 1, 0, "RCL16r1", 0|(1<<TID::UnmodeledSideEffects), 0xd1000052ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2019 = RCL16r1
+ { 2020, 2, 1, 0, "RCL16rCL", 0|(1<<TID::UnmodeledSideEffects), 0xd3000052ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2020 = RCL16rCL
+ { 2021, 3, 1, 0, "RCL16ri", 0|(1<<TID::UnmodeledSideEffects), 0xc1002052ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2021 = RCL16ri
+ { 2022, 5, 0, 0, "RCL32m1", 0|(1<<TID::UnmodeledSideEffects), 0xd100001aULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2022 = RCL32m1
+ { 2023, 5, 0, 0, "RCL32mCL", 0|(1<<TID::UnmodeledSideEffects), 0xd300001aULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2023 = RCL32mCL
+ { 2024, 6, 0, 0, "RCL32mi", 0|(1<<TID::UnmodeledSideEffects), 0xc100201aULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2024 = RCL32mi
+ { 2025, 2, 1, 0, "RCL32r1", 0|(1<<TID::UnmodeledSideEffects), 0xd1000012ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2025 = RCL32r1
+ { 2026, 2, 1, 0, "RCL32rCL", 0|(1<<TID::UnmodeledSideEffects), 0xd3000012ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2026 = RCL32rCL
+ { 2027, 3, 1, 0, "RCL32ri", 0|(1<<TID::UnmodeledSideEffects), 0xc1002012ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2027 = RCL32ri
+ { 2028, 5, 0, 0, "RCL64m1", 0|(1<<TID::UnmodeledSideEffects), 0xd100101aULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2028 = RCL64m1
+ { 2029, 5, 0, 0, "RCL64mCL", 0|(1<<TID::UnmodeledSideEffects), 0xd300101aULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2029 = RCL64mCL
+ { 2030, 6, 0, 0, "RCL64mi", 0|(1<<TID::UnmodeledSideEffects), 0xc100301aULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2030 = RCL64mi
+ { 2031, 2, 1, 0, "RCL64r1", 0|(1<<TID::UnmodeledSideEffects), 0xd1001012ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2031 = RCL64r1
+ { 2032, 2, 1, 0, "RCL64rCL", 0|(1<<TID::UnmodeledSideEffects), 0xd3001012ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2032 = RCL64rCL
+ { 2033, 3, 1, 0, "RCL64ri", 0|(1<<TID::UnmodeledSideEffects), 0xc1003012ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2033 = RCL64ri
+ { 2034, 5, 0, 0, "RCL8m1", 0|(1<<TID::UnmodeledSideEffects), 0xd000001aULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2034 = RCL8m1
+ { 2035, 5, 0, 0, "RCL8mCL", 0|(1<<TID::UnmodeledSideEffects), 0xd200001aULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2035 = RCL8mCL
+ { 2036, 6, 0, 0, "RCL8mi", 0|(1<<TID::UnmodeledSideEffects), 0xc000201aULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2036 = RCL8mi
+ { 2037, 2, 1, 0, "RCL8r1", 0|(1<<TID::UnmodeledSideEffects), 0xd0000012ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2037 = RCL8r1
+ { 2038, 2, 1, 0, "RCL8rCL", 0|(1<<TID::UnmodeledSideEffects), 0xd2000012ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2038 = RCL8rCL
+ { 2039, 3, 1, 0, "RCL8ri", 0|(1<<TID::UnmodeledSideEffects), 0xc0002012ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #2039 = RCL8ri
+ { 2040, 6, 1, 0, "RCPPSm", 0|(1<<TID::MayLoad), 0x53400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2040 = RCPPSm
+ { 2041, 6, 1, 0, "RCPPSm_Int", 0|(1<<TID::MayLoad), 0x53400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2041 = RCPPSm_Int
+ { 2042, 2, 1, 0, "RCPPSr", 0, 0x53400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2042 = RCPPSr
+ { 2043, 2, 1, 0, "RCPPSr_Int", 0, 0x53400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2043 = RCPPSr_Int
+ { 2044, 6, 1, 0, "RCPSSm", 0|(1<<TID::MayLoad), 0x53000c06ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #2044 = RCPSSm
+ { 2045, 6, 1, 0, "RCPSSm_Int", 0|(1<<TID::MayLoad), 0x53000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2045 = RCPSSm_Int
+ { 2046, 2, 1, 0, "RCPSSr", 0, 0x53000c05ULL, NULL, NULL, NULL, OperandInfo119 }, // Inst #2046 = RCPSSr
+ { 2047, 2, 1, 0, "RCPSSr_Int", 0, 0x53000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2047 = RCPSSr_Int
+ { 2048, 5, 0, 0, "RCR16m1", 0|(1<<TID::UnmodeledSideEffects), 0xd100005bULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2048 = RCR16m1
+ { 2049, 5, 0, 0, "RCR16mCL", 0|(1<<TID::UnmodeledSideEffects), 0xd300005bULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2049 = RCR16mCL
+ { 2050, 6, 0, 0, "RCR16mi", 0|(1<<TID::UnmodeledSideEffects), 0xc100205bULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2050 = RCR16mi
+ { 2051, 2, 1, 0, "RCR16r1", 0|(1<<TID::UnmodeledSideEffects), 0xd1000053ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2051 = RCR16r1
+ { 2052, 2, 1, 0, "RCR16rCL", 0|(1<<TID::UnmodeledSideEffects), 0xd3000053ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2052 = RCR16rCL
+ { 2053, 3, 1, 0, "RCR16ri", 0|(1<<TID::UnmodeledSideEffects), 0xc1002053ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2053 = RCR16ri
+ { 2054, 5, 0, 0, "RCR32m1", 0|(1<<TID::UnmodeledSideEffects), 0xd100001bULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2054 = RCR32m1
+ { 2055, 5, 0, 0, "RCR32mCL", 0|(1<<TID::UnmodeledSideEffects), 0xd300001bULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2055 = RCR32mCL
+ { 2056, 6, 0, 0, "RCR32mi", 0|(1<<TID::UnmodeledSideEffects), 0xc100201bULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2056 = RCR32mi
+ { 2057, 2, 1, 0, "RCR32r1", 0|(1<<TID::UnmodeledSideEffects), 0xd1000013ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2057 = RCR32r1
+ { 2058, 2, 1, 0, "RCR32rCL", 0|(1<<TID::UnmodeledSideEffects), 0xd3000013ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2058 = RCR32rCL
+ { 2059, 3, 1, 0, "RCR32ri", 0|(1<<TID::UnmodeledSideEffects), 0xc1002013ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2059 = RCR32ri
+ { 2060, 5, 0, 0, "RCR64m1", 0|(1<<TID::UnmodeledSideEffects), 0xd100101bULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2060 = RCR64m1
+ { 2061, 5, 0, 0, "RCR64mCL", 0|(1<<TID::UnmodeledSideEffects), 0xd300101bULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2061 = RCR64mCL
+ { 2062, 6, 0, 0, "RCR64mi", 0|(1<<TID::UnmodeledSideEffects), 0xc100301bULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2062 = RCR64mi
+ { 2063, 2, 1, 0, "RCR64r1", 0|(1<<TID::UnmodeledSideEffects), 0xd1001013ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2063 = RCR64r1
+ { 2064, 2, 1, 0, "RCR64rCL", 0|(1<<TID::UnmodeledSideEffects), 0xd3001013ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2064 = RCR64rCL
+ { 2065, 3, 1, 0, "RCR64ri", 0|(1<<TID::UnmodeledSideEffects), 0xc1003013ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2065 = RCR64ri
+ { 2066, 5, 0, 0, "RCR8m1", 0|(1<<TID::UnmodeledSideEffects), 0xd000001bULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2066 = RCR8m1
+ { 2067, 5, 0, 0, "RCR8mCL", 0|(1<<TID::UnmodeledSideEffects), 0xd200001bULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2067 = RCR8mCL
+ { 2068, 6, 0, 0, "RCR8mi", 0|(1<<TID::UnmodeledSideEffects), 0xc000201bULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2068 = RCR8mi
+ { 2069, 2, 1, 0, "RCR8r1", 0|(1<<TID::UnmodeledSideEffects), 0xd0000013ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2069 = RCR8r1
+ { 2070, 2, 1, 0, "RCR8rCL", 0|(1<<TID::UnmodeledSideEffects), 0xd2000013ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2070 = RCR8rCL
+ { 2071, 3, 1, 0, "RCR8ri", 0|(1<<TID::UnmodeledSideEffects), 0xc0002013ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #2071 = RCR8ri
+ { 2072, 0, 0, 0, "RDMSR", 0|(1<<TID::UnmodeledSideEffects), 0x32000101ULL, NULL, NULL, NULL, 0 }, // Inst #2072 = RDMSR
+ { 2073, 0, 0, 0, "RDPMC", 0|(1<<TID::UnmodeledSideEffects), 0x33000101ULL, NULL, NULL, NULL, 0 }, // Inst #2073 = RDPMC
+ { 2074, 0, 0, 0, "RDTSC", 0|(1<<TID::UnmodeledSideEffects), 0x31000101ULL, NULL, ImplicitList19, NULL, 0 }, // Inst #2074 = RDTSC
+ { 2075, 0, 0, 0, "RDTSCP", 0|(1<<TID::UnmodeledSideEffects), 0x100012aULL, NULL, ImplicitList46, NULL, 0 }, // Inst #2075 = RDTSCP
+ { 2076, 0, 0, 0, "REPNE_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0xf2000001ULL, ImplicitList42, ImplicitList26, NULL, 0 }, // Inst #2076 = REPNE_PREFIX
+ { 2077, 0, 0, 0, "REP_MOVSB", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa4000201ULL, ImplicitList47, ImplicitList47, NULL, 0 }, // Inst #2077 = REP_MOVSB
+ { 2078, 0, 0, 0, "REP_MOVSD", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa5000201ULL, ImplicitList47, ImplicitList47, NULL, 0 }, // Inst #2078 = REP_MOVSD
+ { 2079, 0, 0, 0, "REP_MOVSQ", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa5001201ULL, ImplicitList48, ImplicitList48, NULL, 0 }, // Inst #2079 = REP_MOVSQ
+ { 2080, 0, 0, 0, "REP_MOVSW", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa5000241ULL, ImplicitList47, ImplicitList47, NULL, 0 }, // Inst #2080 = REP_MOVSW
+ { 2081, 0, 0, 0, "REP_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0xf3000001ULL, ImplicitList42, ImplicitList26, NULL, 0 }, // Inst #2081 = REP_PREFIX
+ { 2082, 0, 0, 0, "REP_STOSB", 0|(1<<TID::MayStore), 0xaa000201ULL, ImplicitList49, ImplicitList50, NULL, 0 }, // Inst #2082 = REP_STOSB
+ { 2083, 0, 0, 0, "REP_STOSD", 0|(1<<TID::MayStore), 0xab000201ULL, ImplicitList51, ImplicitList50, NULL, 0 }, // Inst #2083 = REP_STOSD
+ { 2084, 0, 0, 0, "REP_STOSQ", 0|(1<<TID::MayStore), 0xab001201ULL, ImplicitList52, ImplicitList53, NULL, 0 }, // Inst #2084 = REP_STOSQ
+ { 2085, 0, 0, 0, "REP_STOSW", 0|(1<<TID::MayStore), 0xab000241ULL, ImplicitList54, ImplicitList50, NULL, 0 }, // Inst #2085 = REP_STOSW
+ { 2086, 0, 0, 0, "RET", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::Variadic), 0xc3070001ULL, NULL, NULL, NULL, 0 }, // Inst #2086 = RET
+ { 2087, 1, 0, 0, "RETI", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::Variadic), 0xc2076001ULL, NULL, NULL, NULL, OperandInfo2 }, // Inst #2087 = RETI
+ { 2088, 5, 0, 0, "ROL16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd1000058ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2088 = ROL16m1
+ { 2089, 5, 0, 0, "ROL16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd3000058ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2089 = ROL16mCL
+ { 2090, 6, 0, 0, "ROL16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc1002058ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2090 = ROL16mi
+ { 2091, 2, 1, 0, "ROL16r1", 0, 0xd1000050ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2091 = ROL16r1
+ { 2092, 2, 1, 0, "ROL16rCL", 0, 0xd3000050ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2092 = ROL16rCL
+ { 2093, 3, 1, 0, "ROL16ri", 0, 0xc1002050ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2093 = ROL16ri
+ { 2094, 5, 0, 0, "ROL32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd1000018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2094 = ROL32m1
+ { 2095, 5, 0, 0, "ROL32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd3000018ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2095 = ROL32mCL
+ { 2096, 6, 0, 0, "ROL32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc1002018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2096 = ROL32mi
+ { 2097, 2, 1, 0, "ROL32r1", 0, 0xd1000010ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2097 = ROL32r1
+ { 2098, 2, 1, 0, "ROL32rCL", 0, 0xd3000010ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2098 = ROL32rCL
+ { 2099, 3, 1, 0, "ROL32ri", 0, 0xc1002010ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2099 = ROL32ri
+ { 2100, 5, 0, 0, "ROL64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd1001018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2100 = ROL64m1
+ { 2101, 5, 0, 0, "ROL64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd3001018ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2101 = ROL64mCL
+ { 2102, 6, 0, 0, "ROL64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc1003018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2102 = ROL64mi
+ { 2103, 2, 1, 0, "ROL64r1", 0, 0xd1001010ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2103 = ROL64r1
+ { 2104, 2, 1, 0, "ROL64rCL", 0, 0xd3001010ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2104 = ROL64rCL
+ { 2105, 3, 1, 0, "ROL64ri", 0, 0xc1003010ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2105 = ROL64ri
+ { 2106, 5, 0, 0, "ROL8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd0000018ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2106 = ROL8m1
+ { 2107, 5, 0, 0, "ROL8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd2000018ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2107 = ROL8mCL
+ { 2108, 6, 0, 0, "ROL8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc0002018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2108 = ROL8mi
+ { 2109, 2, 1, 0, "ROL8r1", 0, 0xd0000010ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2109 = ROL8r1
+ { 2110, 2, 1, 0, "ROL8rCL", 0, 0xd2000010ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2110 = ROL8rCL
+ { 2111, 3, 1, 0, "ROL8ri", 0, 0xc0002010ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #2111 = ROL8ri
+ { 2112, 5, 0, 0, "ROR16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd1000059ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2112 = ROR16m1
+ { 2113, 5, 0, 0, "ROR16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd3000059ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2113 = ROR16mCL
+ { 2114, 6, 0, 0, "ROR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc1002059ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2114 = ROR16mi
+ { 2115, 2, 1, 0, "ROR16r1", 0, 0xd1000051ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2115 = ROR16r1
+ { 2116, 2, 1, 0, "ROR16rCL", 0, 0xd3000051ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2116 = ROR16rCL
+ { 2117, 3, 1, 0, "ROR16ri", 0, 0xc1002051ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2117 = ROR16ri
+ { 2118, 5, 0, 0, "ROR32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd1000019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2118 = ROR32m1
+ { 2119, 5, 0, 0, "ROR32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd3000019ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2119 = ROR32mCL
+ { 2120, 6, 0, 0, "ROR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc1002019ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2120 = ROR32mi
+ { 2121, 2, 1, 0, "ROR32r1", 0, 0xd1000011ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2121 = ROR32r1
+ { 2122, 2, 1, 0, "ROR32rCL", 0, 0xd3000011ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2122 = ROR32rCL
+ { 2123, 3, 1, 0, "ROR32ri", 0, 0xc1002011ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2123 = ROR32ri
+ { 2124, 5, 0, 0, "ROR64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd1001019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2124 = ROR64m1
+ { 2125, 5, 0, 0, "ROR64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd3001019ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2125 = ROR64mCL
+ { 2126, 6, 0, 0, "ROR64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc1003019ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2126 = ROR64mi
+ { 2127, 2, 1, 0, "ROR64r1", 0, 0xd1001011ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2127 = ROR64r1
+ { 2128, 2, 1, 0, "ROR64rCL", 0, 0xd3001011ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2128 = ROR64rCL
+ { 2129, 3, 1, 0, "ROR64ri", 0, 0xc1003011ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2129 = ROR64ri
+ { 2130, 5, 0, 0, "ROR8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd0000019ULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2130 = ROR8m1
+ { 2131, 5, 0, 0, "ROR8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd2000019ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2131 = ROR8mCL
+ { 2132, 6, 0, 0, "ROR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc0002019ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2132 = ROR8mi
+ { 2133, 2, 1, 0, "ROR8r1", 0, 0xd0000011ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2133 = ROR8r1
+ { 2134, 2, 1, 0, "ROR8rCL", 0, 0xd2000011ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2134 = ROR8rCL
+ { 2135, 3, 1, 0, "ROR8ri", 0, 0xc0002011ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #2135 = ROR8ri
+ { 2136, 7, 1, 0, "ROUNDPDm_Int", 0|(1<<TID::MayLoad), 0x9c02e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #2136 = ROUNDPDm_Int
+ { 2137, 3, 1, 0, "ROUNDPDr_Int", 0, 0x9c02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #2137 = ROUNDPDr_Int
+ { 2138, 7, 1, 0, "ROUNDPSm_Int", 0|(1<<TID::MayLoad), 0x8002e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #2138 = ROUNDPSm_Int
+ { 2139, 3, 1, 0, "ROUNDPSr_Int", 0, 0x8c02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #2139 = ROUNDPSr_Int
+ { 2140, 8, 1, 0, "ROUNDSDm_Int", 0|(1<<TID::MayLoad), 0xbc02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #2140 = ROUNDSDm_Int
+ { 2141, 4, 1, 0, "ROUNDSDr_Int", 0, 0xbc02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #2141 = ROUNDSDr_Int
+ { 2142, 8, 1, 0, "ROUNDSSm_Int", 0|(1<<TID::MayLoad), 0xac02e46ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #2142 = ROUNDSSm_Int
+ { 2143, 4, 1, 0, "ROUNDSSr_Int", 0, 0xac02e45ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #2143 = ROUNDSSr_Int
+ { 2144, 0, 0, 0, "RSM", 0|(1<<TID::UnmodeledSideEffects), 0xaa000101ULL, NULL, NULL, NULL, 0 }, // Inst #2144 = RSM
+ { 2145, 6, 1, 0, "RSQRTPSm", 0|(1<<TID::MayLoad), 0x52400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2145 = RSQRTPSm
+ { 2146, 6, 1, 0, "RSQRTPSm_Int", 0|(1<<TID::MayLoad), 0x52400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2146 = RSQRTPSm_Int
+ { 2147, 2, 1, 0, "RSQRTPSr", 0, 0x52400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2147 = RSQRTPSr
+ { 2148, 2, 1, 0, "RSQRTPSr_Int", 0, 0x52400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2148 = RSQRTPSr_Int
+ { 2149, 6, 1, 0, "RSQRTSSm", 0|(1<<TID::MayLoad), 0x52000c06ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #2149 = RSQRTSSm
+ { 2150, 6, 1, 0, "RSQRTSSm_Int", 0|(1<<TID::MayLoad), 0x52000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2150 = RSQRTSSm_Int
+ { 2151, 2, 1, 0, "RSQRTSSr", 0, 0x52000c05ULL, NULL, NULL, NULL, OperandInfo119 }, // Inst #2151 = RSQRTSSr
+ { 2152, 2, 1, 0, "RSQRTSSr_Int", 0, 0x52000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2152 = RSQRTSSr_Int
+ { 2153, 0, 0, 0, "SAHF", 0, 0x9e000001ULL, ImplicitList27, ImplicitList1, Barriers1, 0 }, // Inst #2153 = SAHF
+ { 2154, 5, 0, 0, "SAR16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd100005fULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2154 = SAR16m1
+ { 2155, 5, 0, 0, "SAR16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd300005fULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2155 = SAR16mCL
+ { 2156, 6, 0, 0, "SAR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc100205fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2156 = SAR16mi
+ { 2157, 2, 1, 0, "SAR16r1", 0, 0xd1000057ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2157 = SAR16r1
+ { 2158, 2, 1, 0, "SAR16rCL", 0, 0xd3000057ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2158 = SAR16rCL
+ { 2159, 3, 1, 0, "SAR16ri", 0, 0xc1002057ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2159 = SAR16ri
+ { 2160, 5, 0, 0, "SAR32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd100001fULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2160 = SAR32m1
+ { 2161, 5, 0, 0, "SAR32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd300001fULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2161 = SAR32mCL
+ { 2162, 6, 0, 0, "SAR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc100201fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2162 = SAR32mi
+ { 2163, 2, 1, 0, "SAR32r1", 0, 0xd1000017ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2163 = SAR32r1
+ { 2164, 2, 1, 0, "SAR32rCL", 0, 0xd3000017ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2164 = SAR32rCL
+ { 2165, 3, 1, 0, "SAR32ri", 0, 0xc1002017ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2165 = SAR32ri
+ { 2166, 5, 0, 0, "SAR64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd100101fULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2166 = SAR64m1
+ { 2167, 5, 0, 0, "SAR64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd300101fULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2167 = SAR64mCL
+ { 2168, 6, 0, 0, "SAR64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc100301fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2168 = SAR64mi
+ { 2169, 2, 1, 0, "SAR64r1", 0, 0xd1001017ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2169 = SAR64r1
+ { 2170, 2, 1, 0, "SAR64rCL", 0, 0xd3001017ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2170 = SAR64rCL
+ { 2171, 3, 1, 0, "SAR64ri", 0, 0xc1003017ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2171 = SAR64ri
+ { 2172, 5, 0, 0, "SAR8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd000001fULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2172 = SAR8m1
+ { 2173, 5, 0, 0, "SAR8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd200001fULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2173 = SAR8mCL
+ { 2174, 6, 0, 0, "SAR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc000201fULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2174 = SAR8mi
+ { 2175, 2, 1, 0, "SAR8r1", 0, 0xd0000017ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2175 = SAR8r1
+ { 2176, 2, 1, 0, "SAR8rCL", 0, 0xd2000017ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2176 = SAR8rCL
+ { 2177, 3, 1, 0, "SAR8ri", 0, 0xc0002017ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #2177 = SAR8ri
+ { 2178, 1, 0, 0, "SBB16i16", 0|(1<<TID::UnmodeledSideEffects), 0x1d006041ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2178 = SBB16i16
+ { 2179, 6, 0, 0, "SBB16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100605bULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2179 = SBB16mi
+ { 2180, 6, 0, 0, "SBB16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300205bULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2180 = SBB16mi8
+ { 2181, 6, 0, 0, "SBB16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x19000044ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #2181 = SBB16mr
+ { 2182, 3, 1, 0, "SBB16ri", 0, 0x81006053ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2182 = SBB16ri
+ { 2183, 3, 1, 0, "SBB16ri8", 0, 0x83002053ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2183 = SBB16ri8
+ { 2184, 7, 1, 0, "SBB16rm", 0|(1<<TID::MayLoad), 0x1b000046ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #2184 = SBB16rm
+ { 2185, 3, 1, 0, "SBB16rr", 0, 0x19000043ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2185 = SBB16rr
+ { 2186, 3, 1, 0, "SBB16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x1b000045ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2186 = SBB16rr_REV
+ { 2187, 1, 0, 0, "SBB32i32", 0|(1<<TID::UnmodeledSideEffects), 0x1d00a001ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2187 = SBB32i32
+ { 2188, 6, 0, 0, "SBB32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100a01bULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2188 = SBB32mi
+ { 2189, 6, 0, 0, "SBB32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300201bULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2189 = SBB32mi8
+ { 2190, 6, 0, 0, "SBB32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x19000004ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #2190 = SBB32mr
+ { 2191, 3, 1, 0, "SBB32ri", 0, 0x8100a013ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2191 = SBB32ri
+ { 2192, 3, 1, 0, "SBB32ri8", 0, 0x83002013ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2192 = SBB32ri8
+ { 2193, 7, 1, 0, "SBB32rm", 0|(1<<TID::MayLoad), 0x1b000006ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #2193 = SBB32rm
+ { 2194, 3, 1, 0, "SBB32rr", 0, 0x19000003ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2194 = SBB32rr
+ { 2195, 3, 1, 0, "SBB32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x1b000005ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2195 = SBB32rr_REV
+ { 2196, 1, 0, 0, "SBB64i32", 0|(1<<TID::UnmodeledSideEffects), 0x1d00b001ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2196 = SBB64i32
+ { 2197, 6, 0, 0, "SBB64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100b01bULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2197 = SBB64mi32
+ { 2198, 6, 0, 0, "SBB64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300301bULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2198 = SBB64mi8
+ { 2199, 6, 0, 0, "SBB64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x19001004ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #2199 = SBB64mr
+ { 2200, 3, 1, 0, "SBB64ri32", 0, 0x8100b013ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2200 = SBB64ri32
+ { 2201, 3, 1, 0, "SBB64ri8", 0, 0x83003013ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2201 = SBB64ri8
+ { 2202, 7, 1, 0, "SBB64rm", 0|(1<<TID::MayLoad), 0x1b001006ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2202 = SBB64rm
+ { 2203, 3, 1, 0, "SBB64rr", 0, 0x19001003ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #2203 = SBB64rr
+ { 2204, 3, 1, 0, "SBB64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x1b001005ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #2204 = SBB64rr_REV
+ { 2205, 1, 0, 0, "SBB8i8", 0|(1<<TID::UnmodeledSideEffects), 0x1c002001ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2205 = SBB8i8
+ { 2206, 6, 0, 0, "SBB8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8000201bULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2206 = SBB8mi
+ { 2207, 6, 0, 0, "SBB8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x18000004ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #2207 = SBB8mr
+ { 2208, 3, 1, 0, "SBB8ri", 0, 0x80002013ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #2208 = SBB8ri
+ { 2209, 7, 1, 0, "SBB8rm", 0|(1<<TID::MayLoad), 0x1a000006ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #2209 = SBB8rm
+ { 2210, 3, 1, 0, "SBB8rr", 0, 0x18000003ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #2210 = SBB8rr
+ { 2211, 3, 1, 0, "SBB8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x1a000005ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #2211 = SBB8rr_REV
+ { 2212, 0, 0, 0, "SCAS16", 0|(1<<TID::UnmodeledSideEffects), 0xaf000041ULL, NULL, NULL, NULL, 0 }, // Inst #2212 = SCAS16
+ { 2213, 0, 0, 0, "SCAS32", 0|(1<<TID::UnmodeledSideEffects), 0xaf000001ULL, NULL, NULL, NULL, 0 }, // Inst #2213 = SCAS32
+ { 2214, 0, 0, 0, "SCAS64", 0|(1<<TID::UnmodeledSideEffects), 0xaf001001ULL, NULL, NULL, NULL, 0 }, // Inst #2214 = SCAS64
+ { 2215, 0, 0, 0, "SCAS8", 0|(1<<TID::UnmodeledSideEffects), 0xae000001ULL, NULL, NULL, NULL, 0 }, // Inst #2215 = SCAS8
+ { 2216, 5, 0, 0, "SETAEm", 0|(1<<TID::MayStore), 0x93000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2216 = SETAEm
+ { 2217, 1, 1, 0, "SETAEr", 0, 0x93000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2217 = SETAEr
+ { 2218, 5, 0, 0, "SETAm", 0|(1<<TID::MayStore), 0x97000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2218 = SETAm
+ { 2219, 1, 1, 0, "SETAr", 0, 0x97000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2219 = SETAr
+ { 2220, 5, 0, 0, "SETBEm", 0|(1<<TID::MayStore), 0x96000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2220 = SETBEm
+ { 2221, 1, 1, 0, "SETBEr", 0, 0x96000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2221 = SETBEr
+ { 2222, 1, 1, 0, "SETB_C16r", 0, 0x19000060ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo106 }, // Inst #2222 = SETB_C16r
+ { 2223, 1, 1, 0, "SETB_C32r", 0, 0x19000020ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo66 }, // Inst #2223 = SETB_C32r
+ { 2224, 1, 1, 0, "SETB_C64r", 0, 0x19001020ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo67 }, // Inst #2224 = SETB_C64r
+ { 2225, 1, 1, 0, "SETB_C8r", 0, 0x18000020ULL, ImplicitList1, ImplicitList1, Barriers1, OperandInfo107 }, // Inst #2225 = SETB_C8r
+ { 2226, 5, 0, 0, "SETBm", 0|(1<<TID::MayStore), 0x92000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2226 = SETBm
+ { 2227, 1, 1, 0, "SETBr", 0, 0x92000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2227 = SETBr
+ { 2228, 5, 0, 0, "SETEm", 0|(1<<TID::MayStore), 0x94000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2228 = SETEm
+ { 2229, 1, 1, 0, "SETEr", 0, 0x94000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2229 = SETEr
+ { 2230, 5, 0, 0, "SETGEm", 0|(1<<TID::MayStore), 0x9d000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2230 = SETGEm
+ { 2231, 1, 1, 0, "SETGEr", 0, 0x9d000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2231 = SETGEr
+ { 2232, 5, 0, 0, "SETGm", 0|(1<<TID::MayStore), 0x9f000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2232 = SETGm
+ { 2233, 1, 1, 0, "SETGr", 0, 0x9f000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2233 = SETGr
+ { 2234, 5, 0, 0, "SETLEm", 0|(1<<TID::MayStore), 0x9e000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2234 = SETLEm
+ { 2235, 1, 1, 0, "SETLEr", 0, 0x9e000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2235 = SETLEr
+ { 2236, 5, 0, 0, "SETLm", 0|(1<<TID::MayStore), 0x9c000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2236 = SETLm
+ { 2237, 1, 1, 0, "SETLr", 0, 0x9c000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2237 = SETLr
+ { 2238, 5, 0, 0, "SETNEm", 0|(1<<TID::MayStore), 0x95000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2238 = SETNEm
+ { 2239, 1, 1, 0, "SETNEr", 0, 0x95000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2239 = SETNEr
+ { 2240, 5, 0, 0, "SETNOm", 0|(1<<TID::MayStore), 0x91000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2240 = SETNOm
+ { 2241, 1, 1, 0, "SETNOr", 0, 0x91000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2241 = SETNOr
+ { 2242, 5, 0, 0, "SETNPm", 0|(1<<TID::MayStore), 0x9b000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2242 = SETNPm
+ { 2243, 1, 1, 0, "SETNPr", 0, 0x9b000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2243 = SETNPr
+ { 2244, 5, 0, 0, "SETNSm", 0|(1<<TID::MayStore), 0x99000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2244 = SETNSm
+ { 2245, 1, 1, 0, "SETNSr", 0, 0x99000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2245 = SETNSr
+ { 2246, 5, 0, 0, "SETOm", 0|(1<<TID::MayStore), 0x90000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2246 = SETOm
+ { 2247, 1, 1, 0, "SETOr", 0, 0x90000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2247 = SETOr
+ { 2248, 5, 0, 0, "SETPm", 0|(1<<TID::MayStore), 0x9a000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2248 = SETPm
+ { 2249, 1, 1, 0, "SETPr", 0, 0x9a000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2249 = SETPr
+ { 2250, 5, 0, 0, "SETSm", 0|(1<<TID::MayStore), 0x98000118ULL, ImplicitList1, NULL, NULL, OperandInfo34 }, // Inst #2250 = SETSm
+ { 2251, 1, 1, 0, "SETSr", 0, 0x98000110ULL, ImplicitList1, NULL, NULL, OperandInfo107 }, // Inst #2251 = SETSr
+ { 2252, 0, 0, 0, "SFENCE", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xae000129ULL, NULL, NULL, NULL, 0 }, // Inst #2252 = SFENCE
+ { 2253, 5, 1, 0, "SGDTm", 0|(1<<TID::UnmodeledSideEffects), 0x1000118ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2253 = SGDTm
+ { 2254, 5, 0, 0, "SHL16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd100005cULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2254 = SHL16m1
+ { 2255, 5, 0, 0, "SHL16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd300005cULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2255 = SHL16mCL
+ { 2256, 6, 0, 0, "SHL16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc100205cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2256 = SHL16mi
+ { 2257, 2, 1, 0, "SHL16r1", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::UnmodeledSideEffects), 0xd1000054ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2257 = SHL16r1
+ { 2258, 2, 1, 0, "SHL16rCL", 0, 0xd3000054ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2258 = SHL16rCL
+ { 2259, 3, 1, 0, "SHL16ri", 0|(1<<TID::ConvertibleTo3Addr), 0xc1002054ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2259 = SHL16ri
+ { 2260, 5, 0, 0, "SHL32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd100001cULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2260 = SHL32m1
+ { 2261, 5, 0, 0, "SHL32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd300001cULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2261 = SHL32mCL
+ { 2262, 6, 0, 0, "SHL32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc100201cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2262 = SHL32mi
+ { 2263, 2, 1, 0, "SHL32r1", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::UnmodeledSideEffects), 0xd1000014ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2263 = SHL32r1
+ { 2264, 2, 1, 0, "SHL32rCL", 0, 0xd3000014ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2264 = SHL32rCL
+ { 2265, 3, 1, 0, "SHL32ri", 0|(1<<TID::ConvertibleTo3Addr), 0xc1002014ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2265 = SHL32ri
+ { 2266, 5, 0, 0, "SHL64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd100101cULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2266 = SHL64m1
+ { 2267, 5, 0, 0, "SHL64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd300101cULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2267 = SHL64mCL
+ { 2268, 6, 0, 0, "SHL64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc100301cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2268 = SHL64mi
+ { 2269, 2, 1, 0, "SHL64r1", 0|(1<<TID::UnmodeledSideEffects), 0xd1001014ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2269 = SHL64r1
+ { 2270, 2, 1, 0, "SHL64rCL", 0, 0xd3001014ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2270 = SHL64rCL
+ { 2271, 3, 1, 0, "SHL64ri", 0|(1<<TID::ConvertibleTo3Addr), 0xc1003014ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2271 = SHL64ri
+ { 2272, 5, 0, 0, "SHL8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd000001cULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2272 = SHL8m1
+ { 2273, 5, 0, 0, "SHL8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd200001cULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2273 = SHL8mCL
+ { 2274, 6, 0, 0, "SHL8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc000201cULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2274 = SHL8mi
+ { 2275, 2, 1, 0, "SHL8r1", 0|(1<<TID::ConvertibleTo3Addr)|(1<<TID::UnmodeledSideEffects), 0xd0000014ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2275 = SHL8r1
+ { 2276, 2, 1, 0, "SHL8rCL", 0, 0xd2000014ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2276 = SHL8rCL
+ { 2277, 3, 1, 0, "SHL8ri", 0, 0xc0002014ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #2277 = SHL8ri
+ { 2278, 6, 0, 0, "SHLD16mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa5000144ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #2278 = SHLD16mrCL
+ { 2279, 7, 0, 0, "SHLD16mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa4002144ULL, NULL, ImplicitList1, Barriers1, OperandInfo210 }, // Inst #2279 = SHLD16mri8
+ { 2280, 3, 1, 0, "SHLD16rrCL", 0, 0xa5000143ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2280 = SHLD16rrCL
+ { 2281, 4, 1, 0, "SHLD16rri8", 0|(1<<TID::Commutable), 0xa4002143ULL, NULL, ImplicitList1, Barriers1, OperandInfo211 }, // Inst #2281 = SHLD16rri8
+ { 2282, 6, 0, 0, "SHLD32mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa5000104ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #2282 = SHLD32mrCL
+ { 2283, 7, 0, 0, "SHLD32mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa4002104ULL, NULL, ImplicitList1, Barriers1, OperandInfo212 }, // Inst #2283 = SHLD32mri8
+ { 2284, 3, 1, 0, "SHLD32rrCL", 0, 0xa5000103ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2284 = SHLD32rrCL
+ { 2285, 4, 1, 0, "SHLD32rri8", 0|(1<<TID::Commutable), 0xa4002103ULL, NULL, ImplicitList1, Barriers1, OperandInfo213 }, // Inst #2285 = SHLD32rri8
+ { 2286, 6, 0, 0, "SHLD64mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa5001104ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #2286 = SHLD64mrCL
+ { 2287, 7, 0, 0, "SHLD64mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xa4003104ULL, NULL, ImplicitList1, Barriers1, OperandInfo214 }, // Inst #2287 = SHLD64mri8
+ { 2288, 3, 1, 0, "SHLD64rrCL", 0, 0xa5001103ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #2288 = SHLD64rrCL
+ { 2289, 4, 1, 0, "SHLD64rri8", 0|(1<<TID::Commutable), 0xa4003103ULL, NULL, ImplicitList1, Barriers1, OperandInfo215 }, // Inst #2289 = SHLD64rri8
+ { 2290, 5, 0, 0, "SHR16m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd100005dULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2290 = SHR16m1
+ { 2291, 5, 0, 0, "SHR16mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd300005dULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2291 = SHR16mCL
+ { 2292, 6, 0, 0, "SHR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc100205dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2292 = SHR16mi
+ { 2293, 2, 1, 0, "SHR16r1", 0, 0xd1000055ULL, NULL, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2293 = SHR16r1
+ { 2294, 2, 1, 0, "SHR16rCL", 0, 0xd3000055ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo104 }, // Inst #2294 = SHR16rCL
+ { 2295, 3, 1, 0, "SHR16ri", 0, 0xc1002055ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2295 = SHR16ri
+ { 2296, 5, 0, 0, "SHR32m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd100001dULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2296 = SHR32m1
+ { 2297, 5, 0, 0, "SHR32mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd300001dULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2297 = SHR32mCL
+ { 2298, 6, 0, 0, "SHR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc100201dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2298 = SHR32mi
+ { 2299, 2, 1, 0, "SHR32r1", 0, 0xd1000015ULL, NULL, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2299 = SHR32r1
+ { 2300, 2, 1, 0, "SHR32rCL", 0, 0xd3000015ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo61 }, // Inst #2300 = SHR32rCL
+ { 2301, 3, 1, 0, "SHR32ri", 0, 0xc1002015ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2301 = SHR32ri
+ { 2302, 5, 0, 0, "SHR64m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd100101dULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2302 = SHR64m1
+ { 2303, 5, 0, 0, "SHR64mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd300101dULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2303 = SHR64mCL
+ { 2304, 6, 0, 0, "SHR64mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc100301dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2304 = SHR64mi
+ { 2305, 2, 1, 0, "SHR64r1", 0, 0xd1001015ULL, NULL, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2305 = SHR64r1
+ { 2306, 2, 1, 0, "SHR64rCL", 0, 0xd3001015ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo62 }, // Inst #2306 = SHR64rCL
+ { 2307, 3, 1, 0, "SHR64ri", 0, 0xc1003015ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2307 = SHR64ri
+ { 2308, 5, 0, 0, "SHR8m1", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd000001dULL, NULL, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2308 = SHR8m1
+ { 2309, 5, 0, 0, "SHR8mCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xd200001dULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo34 }, // Inst #2309 = SHR8mCL
+ { 2310, 6, 0, 0, "SHR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xc000201dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2310 = SHR8mi
+ { 2311, 2, 1, 0, "SHR8r1", 0, 0xd0000015ULL, NULL, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2311 = SHR8r1
+ { 2312, 2, 1, 0, "SHR8rCL", 0, 0xd2000015ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo105 }, // Inst #2312 = SHR8rCL
+ { 2313, 3, 1, 0, "SHR8ri", 0, 0xc0002015ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #2313 = SHR8ri
+ { 2314, 6, 0, 0, "SHRD16mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xad000144ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #2314 = SHRD16mrCL
+ { 2315, 7, 0, 0, "SHRD16mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xac002144ULL, NULL, ImplicitList1, Barriers1, OperandInfo210 }, // Inst #2315 = SHRD16mri8
+ { 2316, 3, 1, 0, "SHRD16rrCL", 0, 0xad000143ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2316 = SHRD16rrCL
+ { 2317, 4, 1, 0, "SHRD16rri8", 0|(1<<TID::Commutable), 0xac002143ULL, NULL, ImplicitList1, Barriers1, OperandInfo211 }, // Inst #2317 = SHRD16rri8
+ { 2318, 6, 0, 0, "SHRD32mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xad000104ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #2318 = SHRD32mrCL
+ { 2319, 7, 0, 0, "SHRD32mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xac002104ULL, NULL, ImplicitList1, Barriers1, OperandInfo212 }, // Inst #2319 = SHRD32mri8
+ { 2320, 3, 1, 0, "SHRD32rrCL", 0, 0xad000103ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2320 = SHRD32rrCL
+ { 2321, 4, 1, 0, "SHRD32rri8", 0|(1<<TID::Commutable), 0xac002103ULL, NULL, ImplicitList1, Barriers1, OperandInfo213 }, // Inst #2321 = SHRD32rri8
+ { 2322, 6, 0, 0, "SHRD64mrCL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xad001104ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #2322 = SHRD64mrCL
+ { 2323, 7, 0, 0, "SHRD64mri8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0xac003104ULL, NULL, ImplicitList1, Barriers1, OperandInfo214 }, // Inst #2323 = SHRD64mri8
+ { 2324, 3, 1, 0, "SHRD64rrCL", 0, 0xad001103ULL, ImplicitList45, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #2324 = SHRD64rrCL
+ { 2325, 4, 1, 0, "SHRD64rri8", 0|(1<<TID::Commutable), 0xac003103ULL, NULL, ImplicitList1, Barriers1, OperandInfo215 }, // Inst #2325 = SHRD64rri8
+ { 2326, 8, 1, 0, "SHUFPDrmi", 0|(1<<TID::MayLoad), 0xc6802146ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #2326 = SHUFPDrmi
+ { 2327, 4, 1, 0, "SHUFPDrri", 0, 0xc6802145ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #2327 = SHUFPDrri
+ { 2328, 8, 1, 0, "SHUFPSrmi", 0|(1<<TID::MayLoad), 0xc6402106ULL, NULL, NULL, NULL, OperandInfo53 }, // Inst #2328 = SHUFPSrmi
+ { 2329, 4, 1, 0, "SHUFPSrri", 0|(1<<TID::ConvertibleTo3Addr), 0xc6402105ULL, NULL, NULL, NULL, OperandInfo54 }, // Inst #2329 = SHUFPSrri
+ { 2330, 5, 1, 0, "SIDTm", 0|(1<<TID::UnmodeledSideEffects), 0x1000119ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2330 = SIDTm
+ { 2331, 0, 0, 0, "SIN_F", 0|(1<<TID::UnmodeledSideEffects), 0xfe000401ULL, NULL, NULL, NULL, 0 }, // Inst #2331 = SIN_F
+ { 2332, 2, 1, 0, "SIN_Fp32", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #2332 = SIN_Fp32
+ { 2333, 2, 1, 0, "SIN_Fp64", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #2333 = SIN_Fp64
+ { 2334, 2, 1, 0, "SIN_Fp80", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #2334 = SIN_Fp80
+ { 2335, 5, 1, 0, "SLDT16m", 0|(1<<TID::UnmodeledSideEffects), 0x118ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2335 = SLDT16m
+ { 2336, 1, 1, 0, "SLDT16r", 0|(1<<TID::UnmodeledSideEffects), 0x110ULL, NULL, NULL, NULL, OperandInfo106 }, // Inst #2336 = SLDT16r
+ { 2337, 5, 1, 0, "SLDT64m", 0|(1<<TID::UnmodeledSideEffects), 0x1118ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2337 = SLDT64m
+ { 2338, 1, 1, 0, "SLDT64r", 0|(1<<TID::UnmodeledSideEffects), 0x1110ULL, NULL, NULL, NULL, OperandInfo67 }, // Inst #2338 = SLDT64r
+ { 2339, 5, 1, 0, "SMSW16m", 0|(1<<TID::UnmodeledSideEffects), 0x100011cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2339 = SMSW16m
+ { 2340, 1, 1, 0, "SMSW16r", 0|(1<<TID::UnmodeledSideEffects), 0x1000154ULL, NULL, NULL, NULL, OperandInfo106 }, // Inst #2340 = SMSW16r
+ { 2341, 1, 1, 0, "SMSW32r", 0|(1<<TID::UnmodeledSideEffects), 0x1000114ULL, NULL, NULL, NULL, OperandInfo66 }, // Inst #2341 = SMSW32r
+ { 2342, 1, 1, 0, "SMSW64r", 0|(1<<TID::UnmodeledSideEffects), 0x1001114ULL, NULL, NULL, NULL, OperandInfo67 }, // Inst #2342 = SMSW64r
+ { 2343, 6, 1, 0, "SQRTPDm", 0|(1<<TID::MayLoad), 0x51800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2343 = SQRTPDm
+ { 2344, 6, 1, 0, "SQRTPDm_Int", 0|(1<<TID::MayLoad), 0x51800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2344 = SQRTPDm_Int
+ { 2345, 2, 1, 0, "SQRTPDr", 0, 0x51800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2345 = SQRTPDr
+ { 2346, 2, 1, 0, "SQRTPDr_Int", 0, 0x51800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2346 = SQRTPDr_Int
+ { 2347, 6, 1, 0, "SQRTPSm", 0|(1<<TID::MayLoad), 0x51400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2347 = SQRTPSm
+ { 2348, 6, 1, 0, "SQRTPSm_Int", 0|(1<<TID::MayLoad), 0x51400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2348 = SQRTPSm_Int
+ { 2349, 2, 1, 0, "SQRTPSr", 0, 0x51400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2349 = SQRTPSr
+ { 2350, 2, 1, 0, "SQRTPSr_Int", 0, 0x51400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2350 = SQRTPSr_Int
+ { 2351, 6, 1, 0, "SQRTSDm", 0|(1<<TID::MayLoad), 0x51000b06ULL, NULL, NULL, NULL, OperandInfo94 }, // Inst #2351 = SQRTSDm
+ { 2352, 6, 1, 0, "SQRTSDm_Int", 0|(1<<TID::MayLoad), 0x51000b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2352 = SQRTSDm_Int
+ { 2353, 2, 1, 0, "SQRTSDr", 0, 0x51000b05ULL, NULL, NULL, NULL, OperandInfo118 }, // Inst #2353 = SQRTSDr
+ { 2354, 2, 1, 0, "SQRTSDr_Int", 0, 0x51000b05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2354 = SQRTSDr_Int
+ { 2355, 6, 1, 0, "SQRTSSm", 0|(1<<TID::MayLoad), 0x51000c06ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #2355 = SQRTSSm
+ { 2356, 6, 1, 0, "SQRTSSm_Int", 0|(1<<TID::MayLoad), 0x51000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2356 = SQRTSSm_Int
+ { 2357, 2, 1, 0, "SQRTSSr", 0, 0x51000c05ULL, NULL, NULL, NULL, OperandInfo119 }, // Inst #2357 = SQRTSSr
+ { 2358, 2, 1, 0, "SQRTSSr_Int", 0, 0x51000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2358 = SQRTSSr_Int
+ { 2359, 0, 0, 0, "SQRT_F", 0|(1<<TID::UnmodeledSideEffects), 0xfa000401ULL, NULL, NULL, NULL, 0 }, // Inst #2359 = SQRT_F
+ { 2360, 2, 1, 0, "SQRT_Fp32", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo7 }, // Inst #2360 = SQRT_Fp32
+ { 2361, 2, 1, 0, "SQRT_Fp64", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo8 }, // Inst #2361 = SQRT_Fp64
+ { 2362, 2, 1, 0, "SQRT_Fp80", 0, 0x30000ULL, NULL, NULL, NULL, OperandInfo9 }, // Inst #2362 = SQRT_Fp80
+ { 2363, 0, 0, 0, "SS_PREFIX", 0|(1<<TID::UnmodeledSideEffects), 0x36000001ULL, NULL, NULL, NULL, 0 }, // Inst #2363 = SS_PREFIX
+ { 2364, 0, 0, 0, "STC", 0|(1<<TID::UnmodeledSideEffects), 0xf9000001ULL, NULL, NULL, NULL, 0 }, // Inst #2364 = STC
+ { 2365, 0, 0, 0, "STD", 0|(1<<TID::UnmodeledSideEffects), 0xfd000001ULL, NULL, NULL, NULL, 0 }, // Inst #2365 = STD
+ { 2366, 0, 0, 0, "STI", 0|(1<<TID::UnmodeledSideEffects), 0xfb000001ULL, NULL, NULL, NULL, 0 }, // Inst #2366 = STI
+ { 2367, 5, 0, 0, "STMXCSR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xae40011bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2367 = STMXCSR
+ { 2368, 0, 0, 0, "STOSB", 0|(1<<TID::UnmodeledSideEffects), 0xaa000001ULL, ImplicitList55, ImplicitList34, NULL, 0 }, // Inst #2368 = STOSB
+ { 2369, 0, 0, 0, "STOSD", 0|(1<<TID::UnmodeledSideEffects), 0xab000001ULL, ImplicitList56, ImplicitList34, NULL, 0 }, // Inst #2369 = STOSD
+ { 2370, 0, 0, 0, "STOSQ", 0|(1<<TID::UnmodeledSideEffects), 0xab001001ULL, ImplicitList57, ImplicitList53, NULL, 0 }, // Inst #2370 = STOSQ
+ { 2371, 0, 0, 0, "STOSW", 0|(1<<TID::UnmodeledSideEffects), 0xab000041ULL, ImplicitList58, ImplicitList34, NULL, 0 }, // Inst #2371 = STOSW
+ { 2372, 5, 1, 0, "STRm", 0|(1<<TID::UnmodeledSideEffects), 0x119ULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2372 = STRm
+ { 2373, 1, 1, 0, "STRr", 0|(1<<TID::UnmodeledSideEffects), 0x111ULL, NULL, NULL, NULL, OperandInfo106 }, // Inst #2373 = STRr
+ { 2374, 5, 0, 0, "ST_F32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xd900001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2374 = ST_F32m
+ { 2375, 5, 0, 0, "ST_F64m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdd00001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2375 = ST_F64m
+ { 2376, 5, 0, 0, "ST_FP32m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xd900001bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2376 = ST_FP32m
+ { 2377, 5, 0, 0, "ST_FP64m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdd00001bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2377 = ST_FP64m
+ { 2378, 5, 0, 0, "ST_FP80m", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xdb00001fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2378 = ST_FP80m
+ { 2379, 1, 0, 0, "ST_FPrr", 0|(1<<TID::UnmodeledSideEffects), 0xd8000802ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #2379 = ST_FPrr
+ { 2380, 6, 0, 0, "ST_Fp32m", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #2380 = ST_Fp32m
+ { 2381, 6, 0, 0, "ST_Fp64m", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #2381 = ST_Fp64m
+ { 2382, 6, 0, 0, "ST_Fp64m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #2382 = ST_Fp64m32
+ { 2383, 6, 0, 0, "ST_Fp80m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #2383 = ST_Fp80m32
+ { 2384, 6, 0, 0, "ST_Fp80m64", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #2384 = ST_Fp80m64
+ { 2385, 6, 0, 0, "ST_FpP32m", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo110 }, // Inst #2385 = ST_FpP32m
+ { 2386, 6, 0, 0, "ST_FpP64m", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #2386 = ST_FpP64m
+ { 2387, 6, 0, 0, "ST_FpP64m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo111 }, // Inst #2387 = ST_FpP64m32
+ { 2388, 6, 0, 0, "ST_FpP80m", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #2388 = ST_FpP80m
+ { 2389, 6, 0, 0, "ST_FpP80m32", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #2389 = ST_FpP80m32
+ { 2390, 6, 0, 0, "ST_FpP80m64", 0|(1<<TID::MayStore), 0x20000ULL, NULL, NULL, NULL, OperandInfo112 }, // Inst #2390 = ST_FpP80m64
+ { 2391, 1, 0, 0, "ST_Frr", 0|(1<<TID::UnmodeledSideEffects), 0xd0000802ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #2391 = ST_Frr
+ { 2392, 1, 0, 0, "SUB16i16", 0|(1<<TID::UnmodeledSideEffects), 0x2d006041ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2392 = SUB16i16
+ { 2393, 6, 0, 0, "SUB16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100605dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2393 = SUB16mi
+ { 2394, 6, 0, 0, "SUB16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300205dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2394 = SUB16mi8
+ { 2395, 6, 0, 0, "SUB16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x29000044ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #2395 = SUB16mr
+ { 2396, 3, 1, 0, "SUB16ri", 0, 0x81006055ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2396 = SUB16ri
+ { 2397, 3, 1, 0, "SUB16ri8", 0, 0x83002055ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #2397 = SUB16ri8
+ { 2398, 7, 1, 0, "SUB16rm", 0|(1<<TID::MayLoad), 0x2b000046ULL, NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #2398 = SUB16rm
+ { 2399, 3, 1, 0, "SUB16rr", 0, 0x29000043ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2399 = SUB16rr
+ { 2400, 3, 1, 0, "SUB16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x2b000045ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #2400 = SUB16rr_REV
+ { 2401, 1, 0, 0, "SUB32i32", 0|(1<<TID::UnmodeledSideEffects), 0x2d00a001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2401 = SUB32i32
+ { 2402, 6, 0, 0, "SUB32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100a01dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2402 = SUB32mi
+ { 2403, 6, 0, 0, "SUB32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300201dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2403 = SUB32mi8
+ { 2404, 6, 0, 0, "SUB32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x29000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #2404 = SUB32mr
+ { 2405, 3, 1, 0, "SUB32ri", 0, 0x8100a015ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2405 = SUB32ri
+ { 2406, 3, 1, 0, "SUB32ri8", 0, 0x83002015ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #2406 = SUB32ri8
+ { 2407, 7, 1, 0, "SUB32rm", 0|(1<<TID::MayLoad), 0x2b000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #2407 = SUB32rm
+ { 2408, 3, 1, 0, "SUB32rr", 0, 0x29000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2408 = SUB32rr
+ { 2409, 3, 1, 0, "SUB32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x2b000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #2409 = SUB32rr_REV
+ { 2410, 1, 0, 0, "SUB64i32", 0|(1<<TID::UnmodeledSideEffects), 0x2d00b001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2410 = SUB64i32
+ { 2411, 6, 0, 0, "SUB64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100b01dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2411 = SUB64mi32
+ { 2412, 6, 0, 0, "SUB64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300301dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2412 = SUB64mi8
+ { 2413, 6, 0, 0, "SUB64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x29001004ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #2413 = SUB64mr
+ { 2414, 3, 1, 0, "SUB64ri32", 0, 0x8100b015ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2414 = SUB64ri32
+ { 2415, 3, 1, 0, "SUB64ri8", 0, 0x83003015ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #2415 = SUB64ri8
+ { 2416, 7, 1, 0, "SUB64rm", 0|(1<<TID::MayLoad), 0x2b001006ULL, NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #2416 = SUB64rm
+ { 2417, 3, 1, 0, "SUB64rr", 0, 0x29001003ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #2417 = SUB64rr
+ { 2418, 3, 1, 0, "SUB64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x2b001005ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #2418 = SUB64rr_REV
+ { 2419, 1, 0, 0, "SUB8i8", 0|(1<<TID::UnmodeledSideEffects), 0x2c002001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2419 = SUB8i8
+ { 2420, 6, 0, 0, "SUB8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8000201dULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2420 = SUB8mi
+ { 2421, 6, 0, 0, "SUB8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x28000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #2421 = SUB8mr
+ { 2422, 3, 1, 0, "SUB8ri", 0, 0x80002015ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #2422 = SUB8ri
+ { 2423, 7, 1, 0, "SUB8rm", 0|(1<<TID::MayLoad), 0x2a000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #2423 = SUB8rm
+ { 2424, 3, 1, 0, "SUB8rr", 0, 0x28000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #2424 = SUB8rr
+ { 2425, 3, 1, 0, "SUB8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x2a000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #2425 = SUB8rr_REV
+ { 2426, 7, 1, 0, "SUBPDrm", 0|(1<<TID::MayLoad), 0x5c800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #2426 = SUBPDrm
+ { 2427, 3, 1, 0, "SUBPDrr", 0, 0x5c800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #2427 = SUBPDrr
+ { 2428, 7, 1, 0, "SUBPSrm", 0|(1<<TID::MayLoad), 0x5c400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #2428 = SUBPSrm
+ { 2429, 3, 1, 0, "SUBPSrr", 0, 0x5c400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #2429 = SUBPSrr
+ { 2430, 5, 0, 0, "SUBR_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xd800001dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2430 = SUBR_F32m
+ { 2431, 5, 0, 0, "SUBR_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdc00001dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2431 = SUBR_F64m
+ { 2432, 5, 0, 0, "SUBR_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xde00001dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2432 = SUBR_FI16m
+ { 2433, 5, 0, 0, "SUBR_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xda00001dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2433 = SUBR_FI32m
+ { 2434, 1, 0, 0, "SUBR_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0xe0000902ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #2434 = SUBR_FPrST0
+ { 2435, 1, 0, 0, "SUBR_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0xe8000302ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #2435 = SUBR_FST0r
+ { 2436, 7, 1, 0, "SUBR_Fp32m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #2436 = SUBR_Fp32m
+ { 2437, 7, 1, 0, "SUBR_Fp64m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #2437 = SUBR_Fp64m
+ { 2438, 7, 1, 0, "SUBR_Fp64m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #2438 = SUBR_Fp64m32
+ { 2439, 7, 1, 0, "SUBR_Fp80m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #2439 = SUBR_Fp80m32
+ { 2440, 7, 1, 0, "SUBR_Fp80m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #2440 = SUBR_Fp80m64
+ { 2441, 7, 1, 0, "SUBR_FpI16m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #2441 = SUBR_FpI16m32
+ { 2442, 7, 1, 0, "SUBR_FpI16m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #2442 = SUBR_FpI16m64
+ { 2443, 7, 1, 0, "SUBR_FpI16m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #2443 = SUBR_FpI16m80
+ { 2444, 7, 1, 0, "SUBR_FpI32m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #2444 = SUBR_FpI32m32
+ { 2445, 7, 1, 0, "SUBR_FpI32m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #2445 = SUBR_FpI32m64
+ { 2446, 7, 1, 0, "SUBR_FpI32m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #2446 = SUBR_FpI32m80
+ { 2447, 1, 0, 0, "SUBR_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0xe0000702ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #2447 = SUBR_FrST0
+ { 2448, 7, 1, 0, "SUBSDrm", 0|(1<<TID::MayLoad), 0x5c000b06ULL, NULL, NULL, NULL, OperandInfo30 }, // Inst #2448 = SUBSDrm
+ { 2449, 7, 1, 0, "SUBSDrm_Int", 0|(1<<TID::MayLoad), 0x5c000b06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #2449 = SUBSDrm_Int
+ { 2450, 3, 1, 0, "SUBSDrr", 0, 0x5c000b05ULL, NULL, NULL, NULL, OperandInfo31 }, // Inst #2450 = SUBSDrr
+ { 2451, 3, 1, 0, "SUBSDrr_Int", 0, 0x5c000b05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #2451 = SUBSDrr_Int
+ { 2452, 7, 1, 0, "SUBSSrm", 0|(1<<TID::MayLoad), 0x5c000c06ULL, NULL, NULL, NULL, OperandInfo32 }, // Inst #2452 = SUBSSrm
+ { 2453, 7, 1, 0, "SUBSSrm_Int", 0|(1<<TID::MayLoad), 0x5c000c06ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #2453 = SUBSSrm_Int
+ { 2454, 3, 1, 0, "SUBSSrr", 0, 0x5c000c05ULL, NULL, NULL, NULL, OperandInfo33 }, // Inst #2454 = SUBSSrr
+ { 2455, 3, 1, 0, "SUBSSrr_Int", 0, 0x5c000c05ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #2455 = SUBSSrr_Int
+ { 2456, 5, 0, 0, "SUB_F32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xd800001cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2456 = SUB_F32m
+ { 2457, 5, 0, 0, "SUB_F64m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xdc00001cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2457 = SUB_F64m
+ { 2458, 5, 0, 0, "SUB_FI16m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xde00001cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2458 = SUB_FI16m
+ { 2459, 5, 0, 0, "SUB_FI32m", 0|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0xda00001cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2459 = SUB_FI32m
+ { 2460, 1, 0, 0, "SUB_FPrST0", 0|(1<<TID::UnmodeledSideEffects), 0xe8000902ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #2460 = SUB_FPrST0
+ { 2461, 1, 0, 0, "SUB_FST0r", 0|(1<<TID::UnmodeledSideEffects), 0xe0000302ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #2461 = SUB_FST0r
+ { 2462, 3, 1, 0, "SUB_Fp32", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo36 }, // Inst #2462 = SUB_Fp32
+ { 2463, 7, 1, 0, "SUB_Fp32m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #2463 = SUB_Fp32m
+ { 2464, 3, 1, 0, "SUB_Fp64", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo38 }, // Inst #2464 = SUB_Fp64
+ { 2465, 7, 1, 0, "SUB_Fp64m", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #2465 = SUB_Fp64m
+ { 2466, 7, 1, 0, "SUB_Fp64m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #2466 = SUB_Fp64m32
+ { 2467, 3, 1, 0, "SUB_Fp80", 0, 0x40000ULL, NULL, NULL, NULL, OperandInfo40 }, // Inst #2467 = SUB_Fp80
+ { 2468, 7, 1, 0, "SUB_Fp80m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #2468 = SUB_Fp80m32
+ { 2469, 7, 1, 0, "SUB_Fp80m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #2469 = SUB_Fp80m64
+ { 2470, 7, 1, 0, "SUB_FpI16m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #2470 = SUB_FpI16m32
+ { 2471, 7, 1, 0, "SUB_FpI16m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #2471 = SUB_FpI16m64
+ { 2472, 7, 1, 0, "SUB_FpI16m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #2472 = SUB_FpI16m80
+ { 2473, 7, 1, 0, "SUB_FpI32m32", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo37 }, // Inst #2473 = SUB_FpI32m32
+ { 2474, 7, 1, 0, "SUB_FpI32m64", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo39 }, // Inst #2474 = SUB_FpI32m64
+ { 2475, 7, 1, 0, "SUB_FpI32m80", 0|(1<<TID::MayLoad), 0x30000ULL, NULL, NULL, NULL, OperandInfo41 }, // Inst #2475 = SUB_FpI32m80
+ { 2476, 1, 0, 0, "SUB_FrST0", 0|(1<<TID::UnmodeledSideEffects), 0xe8000702ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #2476 = SUB_FrST0
+ { 2477, 0, 0, 0, "SWAPGS", 0|(1<<TID::UnmodeledSideEffects), 0x1000129ULL, NULL, NULL, NULL, 0 }, // Inst #2477 = SWAPGS
+ { 2478, 0, 0, 0, "SYSCALL", 0|(1<<TID::UnmodeledSideEffects), 0x5000101ULL, NULL, NULL, NULL, 0 }, // Inst #2478 = SYSCALL
+ { 2479, 0, 0, 0, "SYSENTER", 0|(1<<TID::UnmodeledSideEffects), 0x34000101ULL, NULL, NULL, NULL, 0 }, // Inst #2479 = SYSENTER
+ { 2480, 0, 0, 0, "SYSEXIT", 0|(1<<TID::UnmodeledSideEffects), 0x35000101ULL, NULL, NULL, NULL, 0 }, // Inst #2480 = SYSEXIT
+ { 2481, 0, 0, 0, "SYSEXIT64", 0|(1<<TID::UnmodeledSideEffects), 0x35001101ULL, NULL, NULL, NULL, 0 }, // Inst #2481 = SYSEXIT64
+ { 2482, 0, 0, 0, "SYSRET", 0|(1<<TID::UnmodeledSideEffects), 0x7000101ULL, NULL, NULL, NULL, 0 }, // Inst #2482 = SYSRET
+ { 2483, 1, 0, 0, "TAILJMPd", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xe900c001ULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo2 }, // Inst #2483 = TAILJMPd
+ { 2484, 1, 0, 0, "TAILJMPd64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xe900c001ULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo2 }, // Inst #2484 = TAILJMPd64
+ { 2485, 5, 0, 0, "TAILJMPm", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xff00001cULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo216 }, // Inst #2485 = TAILJMPm
+ { 2486, 5, 0, 0, "TAILJMPm64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xff00001cULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo217 }, // Inst #2486 = TAILJMPm64
+ { 2487, 1, 0, 0, "TAILJMPr", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xff000014ULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo218 }, // Inst #2487 = TAILJMPr
+ { 2488, 1, 0, 0, "TAILJMPr64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xff000014ULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo219 }, // Inst #2488 = TAILJMPr64
+ { 2489, 2, 0, 0, "TCRETURNdi", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo6 }, // Inst #2489 = TCRETURNdi
+ { 2490, 2, 0, 0, "TCRETURNdi64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo6 }, // Inst #2490 = TCRETURNdi64
+ { 2491, 6, 0, 0, "TCRETURNmi", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo220 }, // Inst #2491 = TCRETURNmi
+ { 2492, 6, 0, 0, "TCRETURNmi64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo221 }, // Inst #2492 = TCRETURNmi64
+ { 2493, 2, 0, 0, "TCRETURNri", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo222 }, // Inst #2493 = TCRETURNri
+ { 2494, 2, 0, 0, "TCRETURNri64", 0|(1<<TID::Return)|(1<<TID::Barrier)|(1<<TID::Call)|(1<<TID::Terminator)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0x0ULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo223 }, // Inst #2494 = TCRETURNri64
+ { 2495, 1, 0, 0, "TEST16i16", 0|(1<<TID::UnmodeledSideEffects), 0xa9006041ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2495 = TEST16i16
+ { 2496, 6, 0, 0, "TEST16mi", 0|(1<<TID::MayLoad), 0xf7006058ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2496 = TEST16mi
+ { 2497, 2, 0, 0, "TEST16ri", 0, 0xf7006050ULL, NULL, ImplicitList1, Barriers1, OperandInfo63 }, // Inst #2497 = TEST16ri
+ { 2498, 6, 0, 0, "TEST16rm", 0|(1<<TID::MayLoad), 0x85000046ULL, NULL, ImplicitList1, Barriers1, OperandInfo55 }, // Inst #2498 = TEST16rm
+ { 2499, 2, 0, 0, "TEST16rr", 0|(1<<TID::Commutable), 0x85000045ULL, NULL, ImplicitList1, Barriers1, OperandInfo56 }, // Inst #2499 = TEST16rr
+ { 2500, 1, 0, 0, "TEST32i32", 0|(1<<TID::UnmodeledSideEffects), 0xa900a001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2500 = TEST32i32
+ { 2501, 6, 0, 0, "TEST32mi", 0|(1<<TID::MayLoad), 0xf700a018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2501 = TEST32mi
+ { 2502, 2, 0, 0, "TEST32ri", 0, 0xf700a010ULL, NULL, ImplicitList1, Barriers1, OperandInfo64 }, // Inst #2502 = TEST32ri
+ { 2503, 6, 0, 0, "TEST32rm", 0|(1<<TID::MayLoad), 0x85000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo57 }, // Inst #2503 = TEST32rm
+ { 2504, 2, 0, 0, "TEST32rr", 0|(1<<TID::Commutable), 0x85000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo58 }, // Inst #2504 = TEST32rr
+ { 2505, 1, 0, 0, "TEST64i32", 0|(1<<TID::UnmodeledSideEffects), 0xa900b001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2505 = TEST64i32
+ { 2506, 6, 0, 0, "TEST64mi32", 0|(1<<TID::MayLoad), 0xf700b018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2506 = TEST64mi32
+ { 2507, 2, 0, 0, "TEST64ri32", 0, 0xf700b010ULL, NULL, ImplicitList1, Barriers1, OperandInfo65 }, // Inst #2507 = TEST64ri32
+ { 2508, 6, 0, 0, "TEST64rm", 0|(1<<TID::MayLoad), 0x85001006ULL, NULL, ImplicitList1, Barriers1, OperandInfo59 }, // Inst #2508 = TEST64rm
+ { 2509, 2, 0, 0, "TEST64rr", 0|(1<<TID::Commutable), 0x85001005ULL, NULL, ImplicitList1, Barriers1, OperandInfo60 }, // Inst #2509 = TEST64rr
+ { 2510, 1, 0, 0, "TEST8i8", 0|(1<<TID::UnmodeledSideEffects), 0xa8002001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #2510 = TEST8i8
+ { 2511, 6, 0, 0, "TEST8mi", 0|(1<<TID::MayLoad), 0xf6002018ULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #2511 = TEST8mi
+ { 2512, 2, 0, 0, "TEST8ri", 0, 0xf6002010ULL, NULL, ImplicitList1, Barriers1, OperandInfo81 }, // Inst #2512 = TEST8ri
+ { 2513, 6, 0, 0, "TEST8rm", 0|(1<<TID::MayLoad), 0x84000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo82 }, // Inst #2513 = TEST8rm
+ { 2514, 2, 0, 0, "TEST8rr", 0|(1<<TID::Commutable), 0x84000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo83 }, // Inst #2514 = TEST8rr
+ { 2515, 5, 0, 0, "TLSCall_32", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList2, ImplicitList59, NULL, OperandInfo34 }, // Inst #2515 = TLSCall_32
+ { 2516, 5, 0, 0, "TLSCall_64", 0|(1<<TID::UsesCustomInserter), 0x0ULL, ImplicitList35, ImplicitList15, NULL, OperandInfo34 }, // Inst #2516 = TLSCall_64
+ { 2517, 5, 0, 0, "TLS_addr32", 0, 0x0ULL, ImplicitList2, ImplicitList9, Barriers3, OperandInfo34 }, // Inst #2517 = TLS_addr32
+ { 2518, 5, 0, 0, "TLS_addr64", 0, 0x0ULL, ImplicitList4, ImplicitList10, Barriers4, OperandInfo34 }, // Inst #2518 = TLS_addr64
+ { 2519, 0, 0, 0, "TRAP", 0|(1<<TID::Barrier)|(1<<TID::Terminator)|(1<<TID::UnmodeledSideEffects), 0xb000101ULL, NULL, NULL, NULL, 0 }, // Inst #2519 = TRAP
+ { 2520, 0, 0, 0, "TST_F", 0|(1<<TID::UnmodeledSideEffects), 0xe4000401ULL, NULL, NULL, NULL, 0 }, // Inst #2520 = TST_F
+ { 2521, 1, 0, 0, "TST_Fp32", 0, 0x20000ULL, NULL, NULL, NULL, OperandInfo113 }, // Inst #2521 = TST_Fp32
+ { 2522, 1, 0, 0, "TST_Fp64", 0, 0x20000ULL, NULL, NULL, NULL, OperandInfo114 }, // Inst #2522 = TST_Fp64
+ { 2523, 1, 0, 0, "TST_Fp80", 0, 0x20000ULL, NULL, NULL, NULL, OperandInfo115 }, // Inst #2523 = TST_Fp80
+ { 2524, 6, 0, 0, "UCOMISDrm", 0|(1<<TID::MayLoad), 0x2e800146ULL, NULL, ImplicitList1, Barriers1, OperandInfo94 }, // Inst #2524 = UCOMISDrm
+ { 2525, 2, 0, 0, "UCOMISDrr", 0, 0x2e800145ULL, NULL, ImplicitList1, Barriers1, OperandInfo118 }, // Inst #2525 = UCOMISDrr
+ { 2526, 6, 0, 0, "UCOMISSrm", 0|(1<<TID::MayLoad), 0x2e400106ULL, NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #2526 = UCOMISSrm
+ { 2527, 2, 0, 0, "UCOMISSrr", 0, 0x2e400105ULL, NULL, ImplicitList1, Barriers1, OperandInfo119 }, // Inst #2527 = UCOMISSrr
+ { 2528, 1, 0, 0, "UCOM_FIPr", 0|(1<<TID::UnmodeledSideEffects), 0xe8000a02ULL, ImplicitList23, ImplicitList1, Barriers1, OperandInfo35 }, // Inst #2528 = UCOM_FIPr
+ { 2529, 1, 0, 0, "UCOM_FIr", 0|(1<<TID::UnmodeledSideEffects), 0xe8000602ULL, ImplicitList23, ImplicitList1, Barriers1, OperandInfo35 }, // Inst #2529 = UCOM_FIr
+ { 2530, 0, 0, 0, "UCOM_FPPr", 0|(1<<TID::UnmodeledSideEffects), 0xe9000501ULL, ImplicitList23, ImplicitList1, Barriers1, 0 }, // Inst #2530 = UCOM_FPPr
+ { 2531, 1, 0, 0, "UCOM_FPr", 0|(1<<TID::UnmodeledSideEffects), 0xe8000802ULL, ImplicitList23, ImplicitList1, Barriers1, OperandInfo35 }, // Inst #2531 = UCOM_FPr
+ { 2532, 2, 0, 0, "UCOM_FpIr32", 0, 0x50000ULL, NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #2532 = UCOM_FpIr32
+ { 2533, 2, 0, 0, "UCOM_FpIr64", 0, 0x50000ULL, NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2533 = UCOM_FpIr64
+ { 2534, 2, 0, 0, "UCOM_FpIr80", 0, 0x50000ULL, NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #2534 = UCOM_FpIr80
+ { 2535, 2, 0, 0, "UCOM_Fpr32", 0|(1<<TID::UnmodeledSideEffects), 0x50000ULL, NULL, ImplicitList1, Barriers1, OperandInfo7 }, // Inst #2535 = UCOM_Fpr32
+ { 2536, 2, 0, 0, "UCOM_Fpr64", 0|(1<<TID::UnmodeledSideEffects), 0x50000ULL, NULL, ImplicitList1, Barriers1, OperandInfo8 }, // Inst #2536 = UCOM_Fpr64
+ { 2537, 2, 0, 0, "UCOM_Fpr80", 0|(1<<TID::UnmodeledSideEffects), 0x50000ULL, NULL, ImplicitList1, Barriers1, OperandInfo9 }, // Inst #2537 = UCOM_Fpr80
+ { 2538, 1, 0, 0, "UCOM_Fr", 0|(1<<TID::UnmodeledSideEffects), 0xe0000802ULL, ImplicitList23, ImplicitList1, Barriers1, OperandInfo35 }, // Inst #2538 = UCOM_Fr
+ { 2539, 7, 1, 0, "UNPCKHPDrm", 0|(1<<TID::MayLoad), 0x15800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #2539 = UNPCKHPDrm
+ { 2540, 3, 1, 0, "UNPCKHPDrr", 0, 0x15800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #2540 = UNPCKHPDrr
+ { 2541, 7, 1, 0, "UNPCKHPSrm", 0|(1<<TID::MayLoad), 0x15400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #2541 = UNPCKHPSrm
+ { 2542, 3, 1, 0, "UNPCKHPSrr", 0, 0x15400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #2542 = UNPCKHPSrr
+ { 2543, 7, 1, 0, "UNPCKLPDrm", 0|(1<<TID::MayLoad), 0x14800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #2543 = UNPCKLPDrm
+ { 2544, 3, 1, 0, "UNPCKLPDrr", 0, 0x14800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #2544 = UNPCKLPDrr
+ { 2545, 7, 1, 0, "UNPCKLPSrm", 0|(1<<TID::MayLoad), 0x14400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #2545 = UNPCKLPSrm
+ { 2546, 3, 1, 0, "UNPCKLPSrr", 0, 0x14400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #2546 = UNPCKLPSrr
+ { 2547, 7, 1, 0, "VADDPDYrm", 0|(1<<TID::MayLoad), 0x558800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2547 = VADDPDYrm
+ { 2548, 3, 1, 0, "VADDPDYrr", 0|(1<<TID::Commutable), 0x558800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2548 = VADDPDYrr
+ { 2549, 7, 1, 0, "VADDPDrm", 0|(1<<TID::MayLoad), 0x558800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2549 = VADDPDrm
+ { 2550, 3, 1, 0, "VADDPDrr", 0|(1<<TID::Commutable), 0x558800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2550 = VADDPDrr
+ { 2551, 7, 1, 0, "VADDPSYrm", 0|(1<<TID::MayLoad), 0x558400106ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2551 = VADDPSYrm
+ { 2552, 3, 1, 0, "VADDPSYrr", 0|(1<<TID::Commutable), 0x558400105ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2552 = VADDPSYrr
+ { 2553, 7, 1, 0, "VADDPSrm", 0|(1<<TID::MayLoad), 0x558400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2553 = VADDPSrm
+ { 2554, 3, 1, 0, "VADDPSrr", 0|(1<<TID::Commutable), 0x558400105ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2554 = VADDPSrr
+ { 2555, 7, 1, 0, "VADDSDrm", 0|(1<<TID::MayLoad), 0x558000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2555 = VADDSDrm
+ { 2556, 7, 1, 0, "VADDSDrm_Int", 0|(1<<TID::MayLoad), 0x558000b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2556 = VADDSDrm_Int
+ { 2557, 3, 1, 0, "VADDSDrr", 0|(1<<TID::Commutable), 0x558000b05ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #2557 = VADDSDrr
+ { 2558, 3, 1, 0, "VADDSDrr_Int", 0, 0x558000b05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2558 = VADDSDrr_Int
+ { 2559, 7, 1, 0, "VADDSSrm", 0|(1<<TID::MayLoad), 0x558000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2559 = VADDSSrm
+ { 2560, 7, 1, 0, "VADDSSrm_Int", 0|(1<<TID::MayLoad), 0x558000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2560 = VADDSSrm_Int
+ { 2561, 3, 1, 0, "VADDSSrr", 0|(1<<TID::Commutable), 0x558000c05ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #2561 = VADDSSrr
+ { 2562, 3, 1, 0, "VADDSSrr_Int", 0, 0x558000c05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2562 = VADDSSrr_Int
+ { 2563, 7, 1, 0, "VADDSUBPDYrm", 0|(1<<TID::MayLoad), 0x5d0800046ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2563 = VADDSUBPDYrm
+ { 2564, 3, 1, 0, "VADDSUBPDYrr", 0, 0x5d0800045ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2564 = VADDSUBPDYrr
+ { 2565, 7, 1, 0, "VADDSUBPDrm", 0|(1<<TID::MayLoad), 0x5d0800046ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2565 = VADDSUBPDrm
+ { 2566, 3, 1, 0, "VADDSUBPDrr", 0, 0x5d0800045ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2566 = VADDSUBPDrr
+ { 2567, 7, 1, 0, "VADDSUBPSYrm", 0|(1<<TID::MayLoad), 0x5d0800b06ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2567 = VADDSUBPSYrm
+ { 2568, 3, 1, 0, "VADDSUBPSYrr", 0, 0x5d0800b05ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2568 = VADDSUBPSYrr
+ { 2569, 7, 1, 0, "VADDSUBPSrm", 0|(1<<TID::MayLoad), 0x5d0800b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2569 = VADDSUBPSrm
+ { 2570, 3, 1, 0, "VADDSUBPSrr", 0, 0x5d0800b05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2570 = VADDSUBPSrr
+ { 2571, 7, 1, 0, "VAESDECLASTrm", 0|(1<<TID::MayLoad), 0x5dfc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2571 = VAESDECLASTrm
+ { 2572, 3, 1, 0, "VAESDECLASTrr", 0, 0x5dfc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2572 = VAESDECLASTrr
+ { 2573, 7, 1, 0, "VAESDECrm", 0|(1<<TID::MayLoad), 0x5dec00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2573 = VAESDECrm
+ { 2574, 3, 1, 0, "VAESDECrr", 0, 0x5dec00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2574 = VAESDECrr
+ { 2575, 7, 1, 0, "VAESENCLASTrm", 0|(1<<TID::MayLoad), 0x5ddc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2575 = VAESENCLASTrm
+ { 2576, 3, 1, 0, "VAESENCLASTrr", 0, 0x5ddc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2576 = VAESENCLASTrr
+ { 2577, 7, 1, 0, "VAESENCrm", 0|(1<<TID::MayLoad), 0x5dcc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2577 = VAESENCrm
+ { 2578, 3, 1, 0, "VAESENCrr", 0, 0x5dcc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2578 = VAESENCrr
+ { 2579, 6, 1, 0, "VAESIMCrm", 0|(1<<TID::MayLoad), 0x1dbc00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2579 = VAESIMCrm
+ { 2580, 2, 1, 0, "VAESIMCrr", 0, 0x1dbc00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2580 = VAESIMCrr
+ { 2581, 7, 1, 0, "VAESKEYGENASSIST128rm", 0|(1<<TID::MayLoad), 0x1dfc02e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #2581 = VAESKEYGENASSIST128rm
+ { 2582, 3, 1, 0, "VAESKEYGENASSIST128rr", 0, 0x1dfc02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #2582 = VAESKEYGENASSIST128rr
+ { 2583, 7, 1, 0, "VANDNPDYrm", 0|(1<<TID::UnmodeledSideEffects), 0x555800046ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2583 = VANDNPDYrm
+ { 2584, 3, 1, 0, "VANDNPDYrr", 0|(1<<TID::UnmodeledSideEffects), 0x555800045ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2584 = VANDNPDYrr
+ { 2585, 7, 1, 0, "VANDNPDrm", 0|(1<<TID::UnmodeledSideEffects), 0x555800046ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2585 = VANDNPDrm
+ { 2586, 3, 1, 0, "VANDNPDrr", 0|(1<<TID::UnmodeledSideEffects), 0x555800045ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2586 = VANDNPDrr
+ { 2587, 7, 1, 0, "VANDNPSYrm", 0|(1<<TID::UnmodeledSideEffects), 0x555400006ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2587 = VANDNPSYrm
+ { 2588, 3, 1, 0, "VANDNPSYrr", 0|(1<<TID::UnmodeledSideEffects), 0x555400005ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2588 = VANDNPSYrr
+ { 2589, 7, 1, 0, "VANDNPSrm", 0|(1<<TID::UnmodeledSideEffects), 0x555400006ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2589 = VANDNPSrm
+ { 2590, 3, 1, 0, "VANDNPSrr", 0|(1<<TID::UnmodeledSideEffects), 0x555400005ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2590 = VANDNPSrr
+ { 2591, 7, 1, 0, "VANDPDYrm", 0|(1<<TID::UnmodeledSideEffects), 0x554800046ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2591 = VANDPDYrm
+ { 2592, 3, 1, 0, "VANDPDYrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x554800045ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2592 = VANDPDYrr
+ { 2593, 7, 1, 0, "VANDPDrm", 0|(1<<TID::UnmodeledSideEffects), 0x554800046ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2593 = VANDPDrm
+ { 2594, 3, 1, 0, "VANDPDrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x554800045ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2594 = VANDPDrr
+ { 2595, 7, 1, 0, "VANDPSYrm", 0|(1<<TID::UnmodeledSideEffects), 0x554400006ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2595 = VANDPSYrm
+ { 2596, 3, 1, 0, "VANDPSYrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x554400005ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2596 = VANDPSYrr
+ { 2597, 7, 1, 0, "VANDPSrm", 0|(1<<TID::UnmodeledSideEffects), 0x554400006ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2597 = VANDPSrm
+ { 2598, 3, 1, 0, "VANDPSrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x554400005ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2598 = VANDPSrr
+ { 2599, 3, 0, 0, "VASTART_SAVE_XMM_REGS", 0|(1<<TID::UsesCustomInserter)|(1<<TID::Variadic), 0x0ULL, NULL, NULL, NULL, OperandInfo230 }, // Inst #2599 = VASTART_SAVE_XMM_REGS
+ { 2600, 8, 1, 0, "VBLENDPDYrmi", 0|(1<<TID::MayLoad), 0x50dc02e46ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #2600 = VBLENDPDYrmi
+ { 2601, 4, 1, 0, "VBLENDPDYrri", 0, 0x50dc02e45ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #2601 = VBLENDPDYrri
+ { 2602, 8, 1, 0, "VBLENDPDrmi", 0|(1<<TID::MayLoad), 0x50dc02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #2602 = VBLENDPDrmi
+ { 2603, 4, 1, 0, "VBLENDPDrri", 0, 0x50dc02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #2603 = VBLENDPDrri
+ { 2604, 8, 1, 0, "VBLENDPSYrmi", 0|(1<<TID::MayLoad), 0x50cc02e46ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #2604 = VBLENDPSYrmi
+ { 2605, 4, 1, 0, "VBLENDPSYrri", 0, 0x50cc02e45ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #2605 = VBLENDPSYrri
+ { 2606, 8, 1, 0, "VBLENDPSrmi", 0|(1<<TID::MayLoad), 0x50cc02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #2606 = VBLENDPSrmi
+ { 2607, 4, 1, 0, "VBLENDPSrri", 0, 0x50cc02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #2607 = VBLENDPSrri
+ { 2608, 8, 1, 0, "VBLENDVPDYrm", 0|(1<<TID::MayLoad), 0xd4bc00e46ULL, NULL, NULL, NULL, OperandInfo233 }, // Inst #2608 = VBLENDVPDYrm
+ { 2609, 4, 1, 0, "VBLENDVPDYrr", 0, 0xd4bc00e45ULL, NULL, NULL, NULL, OperandInfo234 }, // Inst #2609 = VBLENDVPDYrr
+ { 2610, 8, 1, 0, "VBLENDVPDrm", 0|(1<<TID::MayLoad), 0xd4bc00e46ULL, NULL, NULL, NULL, OperandInfo235 }, // Inst #2610 = VBLENDVPDrm
+ { 2611, 4, 1, 0, "VBLENDVPDrr", 0, 0xd4bc00e45ULL, NULL, NULL, NULL, OperandInfo236 }, // Inst #2611 = VBLENDVPDrr
+ { 2612, 8, 1, 0, "VBLENDVPSYrm", 0|(1<<TID::MayLoad), 0xd4ac00e46ULL, NULL, NULL, NULL, OperandInfo233 }, // Inst #2612 = VBLENDVPSYrm
+ { 2613, 4, 1, 0, "VBLENDVPSYrr", 0, 0xd4ac00e45ULL, NULL, NULL, NULL, OperandInfo234 }, // Inst #2613 = VBLENDVPSYrr
+ { 2614, 8, 1, 0, "VBLENDVPSrm", 0|(1<<TID::MayLoad), 0xd4ac00e46ULL, NULL, NULL, NULL, OperandInfo235 }, // Inst #2614 = VBLENDVPSrm
+ { 2615, 4, 1, 0, "VBLENDVPSrr", 0, 0xd4ac00e45ULL, NULL, NULL, NULL, OperandInfo236 }, // Inst #2615 = VBLENDVPSrr
+ { 2616, 6, 1, 0, "VBROADCASTF128", 0|(1<<TID::MayLoad), 0x11ac00d46ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2616 = VBROADCASTF128
+ { 2617, 6, 1, 0, "VBROADCASTSD", 0|(1<<TID::MayLoad), 0x119c00d46ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2617 = VBROADCASTSD
+ { 2618, 6, 1, 0, "VBROADCASTSS", 0|(1<<TID::MayLoad), 0x118c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2618 = VBROADCASTSS
+ { 2619, 6, 1, 0, "VBROADCASTSSY", 0|(1<<TID::MayLoad), 0x118c00d46ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2619 = VBROADCASTSSY
+ { 2620, 8, 1, 0, "VCMPPDYrmi", 0|(1<<TID::MayLoad), 0x5c2802046ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #2620 = VCMPPDYrmi
+ { 2621, 8, 1, 0, "VCMPPDYrmi_alt", 0|(1<<TID::UnmodeledSideEffects), 0x5c2802046ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #2621 = VCMPPDYrmi_alt
+ { 2622, 4, 1, 0, "VCMPPDYrri", 0, 0x5c2802045ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #2622 = VCMPPDYrri
+ { 2623, 4, 1, 0, "VCMPPDYrri_alt", 0|(1<<TID::UnmodeledSideEffects), 0x5c2802045ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #2623 = VCMPPDYrri_alt
+ { 2624, 8, 1, 0, "VCMPPDrmi", 0|(1<<TID::MayLoad), 0x5c2802046ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #2624 = VCMPPDrmi
+ { 2625, 8, 1, 0, "VCMPPDrmi_alt", 0|(1<<TID::UnmodeledSideEffects), 0x5c2802046ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #2625 = VCMPPDrmi_alt
+ { 2626, 4, 1, 0, "VCMPPDrri", 0, 0x5c2802045ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #2626 = VCMPPDrri
+ { 2627, 4, 1, 0, "VCMPPDrri_alt", 0|(1<<TID::UnmodeledSideEffects), 0x5c2802045ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #2627 = VCMPPDrri_alt
+ { 2628, 8, 1, 0, "VCMPPSYrmi", 0|(1<<TID::MayLoad), 0x5c2402006ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #2628 = VCMPPSYrmi
+ { 2629, 8, 1, 0, "VCMPPSYrmi_alt", 0|(1<<TID::UnmodeledSideEffects), 0x5c2402006ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #2629 = VCMPPSYrmi_alt
+ { 2630, 4, 1, 0, "VCMPPSYrri", 0, 0x5c2402005ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #2630 = VCMPPSYrri
+ { 2631, 4, 1, 0, "VCMPPSYrri_alt", 0|(1<<TID::UnmodeledSideEffects), 0x5c2402005ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #2631 = VCMPPSYrri_alt
+ { 2632, 8, 1, 0, "VCMPPSrmi", 0|(1<<TID::MayLoad), 0x5c2402006ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #2632 = VCMPPSrmi
+ { 2633, 8, 1, 0, "VCMPPSrmi_alt", 0|(1<<TID::UnmodeledSideEffects), 0x5c2402006ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #2633 = VCMPPSrmi_alt
+ { 2634, 4, 1, 0, "VCMPPSrri", 0, 0x5c2402005ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #2634 = VCMPPSrri
+ { 2635, 4, 1, 0, "VCMPPSrri_alt", 0|(1<<TID::UnmodeledSideEffects), 0x5c2402005ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #2635 = VCMPPSrri_alt
+ { 2636, 8, 1, 0, "VCMPSDrm", 0|(1<<TID::MayLoad), 0x5c2002b06ULL, NULL, NULL, NULL, OperandInfo238 }, // Inst #2636 = VCMPSDrm
+ { 2637, 8, 1, 0, "VCMPSDrm_alt", 0|(1<<TID::MayLoad), 0x5c2002b06ULL, NULL, NULL, NULL, OperandInfo238 }, // Inst #2637 = VCMPSDrm_alt
+ { 2638, 4, 1, 0, "VCMPSDrr", 0, 0x5c2002b05ULL, NULL, NULL, NULL, OperandInfo72 }, // Inst #2638 = VCMPSDrr
+ { 2639, 4, 1, 0, "VCMPSDrr_alt", 0, 0x5c2002b05ULL, NULL, NULL, NULL, OperandInfo72 }, // Inst #2639 = VCMPSDrr_alt
+ { 2640, 8, 1, 0, "VCMPSSrm", 0|(1<<TID::MayLoad), 0x5c2002c06ULL, NULL, NULL, NULL, OperandInfo239 }, // Inst #2640 = VCMPSSrm
+ { 2641, 8, 1, 0, "VCMPSSrm_alt", 0|(1<<TID::MayLoad), 0x5c2002c06ULL, NULL, NULL, NULL, OperandInfo239 }, // Inst #2641 = VCMPSSrm_alt
+ { 2642, 4, 1, 0, "VCMPSSrr", 0, 0x5c2002c05ULL, NULL, NULL, NULL, OperandInfo71 }, // Inst #2642 = VCMPSSrr
+ { 2643, 4, 1, 0, "VCMPSSrr_alt", 0, 0x5c2002c05ULL, NULL, NULL, NULL, OperandInfo71 }, // Inst #2643 = VCMPSSrr_alt
+ { 2644, 6, 0, 0, "VCOMISDrm", 0|(1<<TID::UnmodeledSideEffects), 0x12f800046ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #2644 = VCOMISDrm
+ { 2645, 2, 0, 0, "VCOMISDrr", 0|(1<<TID::UnmodeledSideEffects), 0x12f800045ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #2645 = VCOMISDrr
+ { 2646, 6, 0, 0, "VCOMISSrm", 0|(1<<TID::UnmodeledSideEffects), 0x12f400006ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #2646 = VCOMISSrm
+ { 2647, 2, 0, 0, "VCOMISSrr", 0|(1<<TID::UnmodeledSideEffects), 0x12f400005ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #2647 = VCOMISSrr
+ { 2648, 6, 1, 0, "VCVTDQ2PDYrm", 0|(1<<TID::UnmodeledSideEffects), 0x1e6400c06ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2648 = VCVTDQ2PDYrm
+ { 2649, 2, 1, 0, "VCVTDQ2PDYrr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6400c05ULL, NULL, NULL, NULL, OperandInfo240 }, // Inst #2649 = VCVTDQ2PDYrr
+ { 2650, 6, 1, 0, "VCVTDQ2PDrm", 0|(1<<TID::UnmodeledSideEffects), 0x1e6400c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2650 = VCVTDQ2PDrm
+ { 2651, 2, 1, 0, "VCVTDQ2PDrr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6400c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2651 = VCVTDQ2PDrr
+ { 2652, 6, 1, 0, "VCVTDQ2PSYrm", 0|(1<<TID::UnmodeledSideEffects), 0x15b400106ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2652 = VCVTDQ2PSYrm
+ { 2653, 2, 1, 0, "VCVTDQ2PSYrr", 0|(1<<TID::UnmodeledSideEffects), 0x15b400105ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #2653 = VCVTDQ2PSYrr
+ { 2654, 6, 1, 0, "VCVTDQ2PSrm", 0|(1<<TID::UnmodeledSideEffects), 0x15b400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2654 = VCVTDQ2PSrm
+ { 2655, 2, 1, 0, "VCVTDQ2PSrr", 0|(1<<TID::UnmodeledSideEffects), 0x15b400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2655 = VCVTDQ2PSrr
+ { 2656, 2, 1, 0, "VCVTPD2DQXrYr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800b05ULL, NULL, NULL, NULL, OperandInfo242 }, // Inst #2656 = VCVTPD2DQXrYr
+ { 2657, 6, 1, 0, "VCVTPD2DQXrm", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2657 = VCVTPD2DQXrm
+ { 2658, 2, 1, 0, "VCVTPD2DQXrr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800b05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2658 = VCVTPD2DQXrr
+ { 2659, 6, 1, 0, "VCVTPD2DQYrm", 0|(1<<TID::UnmodeledSideEffects), 0x11e6800b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2659 = VCVTPD2DQYrm
+ { 2660, 2, 1, 0, "VCVTPD2DQYrr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800b05ULL, NULL, NULL, NULL, OperandInfo242 }, // Inst #2660 = VCVTPD2DQYrr
+ { 2661, 2, 1, 0, "VCVTPD2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800b05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2661 = VCVTPD2DQrr
+ { 2662, 2, 1, 0, "VCVTPD2PSXrYr", 0|(1<<TID::UnmodeledSideEffects), 0x15a800045ULL, NULL, NULL, NULL, OperandInfo242 }, // Inst #2662 = VCVTPD2PSXrYr
+ { 2663, 6, 1, 0, "VCVTPD2PSXrm", 0|(1<<TID::UnmodeledSideEffects), 0x15a800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2663 = VCVTPD2PSXrm
+ { 2664, 2, 1, 0, "VCVTPD2PSXrr", 0|(1<<TID::UnmodeledSideEffects), 0x15a800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2664 = VCVTPD2PSXrr
+ { 2665, 6, 1, 0, "VCVTPD2PSYrm", 0|(1<<TID::UnmodeledSideEffects), 0x115a800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2665 = VCVTPD2PSYrm
+ { 2666, 2, 1, 0, "VCVTPD2PSYrr", 0|(1<<TID::UnmodeledSideEffects), 0x15a800045ULL, NULL, NULL, NULL, OperandInfo242 }, // Inst #2666 = VCVTPD2PSYrr
+ { 2667, 2, 1, 0, "VCVTPD2PSrr", 0|(1<<TID::UnmodeledSideEffects), 0x15a800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2667 = VCVTPD2PSrr
+ { 2668, 6, 1, 0, "VCVTPS2DQYrm", 0|(1<<TID::UnmodeledSideEffects), 0x15b800046ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2668 = VCVTPS2DQYrm
+ { 2669, 2, 1, 0, "VCVTPS2DQYrr", 0|(1<<TID::UnmodeledSideEffects), 0x15b800045ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #2669 = VCVTPS2DQYrr
+ { 2670, 6, 1, 0, "VCVTPS2DQrm", 0|(1<<TID::UnmodeledSideEffects), 0x15b800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2670 = VCVTPS2DQrm
+ { 2671, 2, 1, 0, "VCVTPS2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0x15b800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2671 = VCVTPS2DQrr
+ { 2672, 6, 1, 0, "VCVTPS2PDYrm", 0|(1<<TID::UnmodeledSideEffects), 0x15a000006ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2672 = VCVTPS2PDYrm
+ { 2673, 2, 1, 0, "VCVTPS2PDYrr", 0|(1<<TID::UnmodeledSideEffects), 0x15a000005ULL, NULL, NULL, NULL, OperandInfo240 }, // Inst #2673 = VCVTPS2PDYrr
+ { 2674, 6, 1, 0, "VCVTPS2PDrm", 0|(1<<TID::UnmodeledSideEffects), 0x15a000006ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2674 = VCVTPS2PDrm
+ { 2675, 2, 1, 0, "VCVTPS2PDrr", 0|(1<<TID::UnmodeledSideEffects), 0x15a000005ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2675 = VCVTPS2PDrr
+ { 2676, 6, 1, 0, "VCVTSD2SI64rm", 0|(1<<TID::UnmodeledSideEffects), 0x32d000b06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #2676 = VCVTSD2SI64rm
+ { 2677, 2, 1, 0, "VCVTSD2SI64rr", 0|(1<<TID::UnmodeledSideEffects), 0x32d000b05ULL, NULL, NULL, NULL, OperandInfo102 }, // Inst #2677 = VCVTSD2SI64rr
+ { 2678, 6, 1, 0, "VCVTSD2SI_altrm", 0|(1<<TID::UnmodeledSideEffects), 0x12d000b06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #2678 = VCVTSD2SI_altrm
+ { 2679, 2, 1, 0, "VCVTSD2SI_altrr", 0|(1<<TID::UnmodeledSideEffects), 0x12d000b05ULL, NULL, NULL, NULL, OperandInfo103 }, // Inst #2679 = VCVTSD2SI_altrr
+ { 2680, 7, 1, 0, "VCVTSD2SSrm", 0|(1<<TID::UnmodeledSideEffects), 0x55a000b06ULL, NULL, NULL, NULL, OperandInfo243 }, // Inst #2680 = VCVTSD2SSrm
+ { 2681, 3, 1, 0, "VCVTSD2SSrr", 0|(1<<TID::UnmodeledSideEffects), 0x55a000b05ULL, NULL, NULL, NULL, OperandInfo244 }, // Inst #2681 = VCVTSD2SSrr
+ { 2682, 7, 1, 0, "VCVTSI2SD64rm", 0|(1<<TID::UnmodeledSideEffects), 0x72a000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2682 = VCVTSI2SD64rm
+ { 2683, 3, 1, 0, "VCVTSI2SD64rr", 0|(1<<TID::UnmodeledSideEffects), 0x72a000b05ULL, NULL, NULL, NULL, OperandInfo245 }, // Inst #2683 = VCVTSI2SD64rr
+ { 2684, 7, 1, 0, "VCVTSI2SDLrm", 0|(1<<TID::UnmodeledSideEffects), 0x52a000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2684 = VCVTSI2SDLrm
+ { 2685, 3, 1, 0, "VCVTSI2SDLrr", 0|(1<<TID::UnmodeledSideEffects), 0x52a000b05ULL, NULL, NULL, NULL, OperandInfo246 }, // Inst #2685 = VCVTSI2SDLrr
+ { 2686, 7, 1, 0, "VCVTSI2SDrm", 0|(1<<TID::UnmodeledSideEffects), 0x52a000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2686 = VCVTSI2SDrm
+ { 2687, 3, 1, 0, "VCVTSI2SDrr", 0|(1<<TID::UnmodeledSideEffects), 0x52a000b05ULL, NULL, NULL, NULL, OperandInfo246 }, // Inst #2687 = VCVTSI2SDrr
+ { 2688, 7, 1, 0, "VCVTSI2SS64rm", 0|(1<<TID::UnmodeledSideEffects), 0x72a000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2688 = VCVTSI2SS64rm
+ { 2689, 3, 1, 0, "VCVTSI2SS64rr", 0|(1<<TID::UnmodeledSideEffects), 0x72a000c05ULL, NULL, NULL, NULL, OperandInfo247 }, // Inst #2689 = VCVTSI2SS64rr
+ { 2690, 7, 1, 0, "VCVTSI2SSrm", 0|(1<<TID::UnmodeledSideEffects), 0x52a000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2690 = VCVTSI2SSrm
+ { 2691, 3, 1, 0, "VCVTSI2SSrr", 0|(1<<TID::UnmodeledSideEffects), 0x52a000c05ULL, NULL, NULL, NULL, OperandInfo248 }, // Inst #2691 = VCVTSI2SSrr
+ { 2692, 7, 1, 0, "VCVTSS2SDrm", 0|(1<<TID::UnmodeledSideEffects), 0x55a000c06ULL, NULL, NULL, NULL, OperandInfo249 }, // Inst #2692 = VCVTSS2SDrm
+ { 2693, 3, 1, 0, "VCVTSS2SDrr", 0|(1<<TID::UnmodeledSideEffects), 0x55a000c05ULL, NULL, NULL, NULL, OperandInfo250 }, // Inst #2693 = VCVTSS2SDrr
+ { 2694, 6, 1, 0, "VCVTSS2SI64rm", 0|(1<<TID::UnmodeledSideEffects), 0x32d000c06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #2694 = VCVTSS2SI64rm
+ { 2695, 2, 1, 0, "VCVTSS2SI64rr", 0|(1<<TID::UnmodeledSideEffects), 0x32d000c05ULL, NULL, NULL, NULL, OperandInfo100 }, // Inst #2695 = VCVTSS2SI64rr
+ { 2696, 6, 1, 0, "VCVTSS2SIrm", 0|(1<<TID::UnmodeledSideEffects), 0x12d000c06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #2696 = VCVTSS2SIrm
+ { 2697, 2, 1, 0, "VCVTSS2SIrr", 0|(1<<TID::UnmodeledSideEffects), 0x12d000c05ULL, NULL, NULL, NULL, OperandInfo101 }, // Inst #2697 = VCVTSS2SIrr
+ { 2698, 2, 1, 0, "VCVTTPD2DQXrYr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800045ULL, NULL, NULL, NULL, OperandInfo242 }, // Inst #2698 = VCVTTPD2DQXrYr
+ { 2699, 6, 1, 0, "VCVTTPD2DQXrm", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2699 = VCVTTPD2DQXrm
+ { 2700, 2, 1, 0, "VCVTTPD2DQXrr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2700 = VCVTTPD2DQXrr
+ { 2701, 6, 1, 0, "VCVTTPD2DQYrm", 0|(1<<TID::UnmodeledSideEffects), 0x11e6800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2701 = VCVTTPD2DQYrm
+ { 2702, 2, 1, 0, "VCVTTPD2DQYrr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800045ULL, NULL, NULL, NULL, OperandInfo242 }, // Inst #2702 = VCVTTPD2DQYrr
+ { 2703, 2, 1, 0, "VCVTTPD2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0x1e6800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2703 = VCVTTPD2DQrr
+ { 2704, 6, 1, 0, "VCVTTPS2DQYrm", 0|(1<<TID::UnmodeledSideEffects), 0x15b000c06ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2704 = VCVTTPS2DQYrm
+ { 2705, 2, 1, 0, "VCVTTPS2DQYrr", 0|(1<<TID::UnmodeledSideEffects), 0x15b000c05ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #2705 = VCVTTPS2DQYrr
+ { 2706, 6, 1, 0, "VCVTTPS2DQrm", 0|(1<<TID::UnmodeledSideEffects), 0x15b000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2706 = VCVTTPS2DQrm
+ { 2707, 2, 1, 0, "VCVTTPS2DQrr", 0|(1<<TID::UnmodeledSideEffects), 0x15b000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2707 = VCVTTPS2DQrr
+ { 2708, 6, 1, 0, "VCVTTSD2SI64rm", 0|(1<<TID::MayLoad), 0x32c000b06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #2708 = VCVTTSD2SI64rm
+ { 2709, 2, 1, 0, "VCVTTSD2SI64rr", 0, 0x32c000b05ULL, NULL, NULL, NULL, OperandInfo102 }, // Inst #2709 = VCVTTSD2SI64rr
+ { 2710, 6, 1, 0, "VCVTTSD2SIrm", 0|(1<<TID::MayLoad), 0x12c000b06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #2710 = VCVTTSD2SIrm
+ { 2711, 2, 1, 0, "VCVTTSD2SIrr", 0, 0x12c000b05ULL, NULL, NULL, NULL, OperandInfo103 }, // Inst #2711 = VCVTTSD2SIrr
+ { 2712, 6, 1, 0, "VCVTTSS2SI64rm", 0|(1<<TID::MayLoad), 0x32c000c06ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #2712 = VCVTTSS2SI64rm
+ { 2713, 2, 1, 0, "VCVTTSS2SI64rr", 0, 0x32c000c05ULL, NULL, NULL, NULL, OperandInfo100 }, // Inst #2713 = VCVTTSS2SI64rr
+ { 2714, 6, 1, 0, "VCVTTSS2SIrm", 0|(1<<TID::MayLoad), 0x12c000c06ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #2714 = VCVTTSS2SIrm
+ { 2715, 2, 1, 0, "VCVTTSS2SIrr", 0, 0x12c000c05ULL, NULL, NULL, NULL, OperandInfo101 }, // Inst #2715 = VCVTTSS2SIrr
+ { 2716, 7, 1, 0, "VDIVPDYrm", 0|(1<<TID::MayLoad), 0x55e800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2716 = VDIVPDYrm
+ { 2717, 3, 1, 0, "VDIVPDYrr", 0, 0x55e800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2717 = VDIVPDYrr
+ { 2718, 7, 1, 0, "VDIVPDrm", 0|(1<<TID::MayLoad), 0x55e800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2718 = VDIVPDrm
+ { 2719, 3, 1, 0, "VDIVPDrr", 0, 0x55e800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2719 = VDIVPDrr
+ { 2720, 7, 1, 0, "VDIVPSYrm", 0|(1<<TID::MayLoad), 0x55e400106ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2720 = VDIVPSYrm
+ { 2721, 3, 1, 0, "VDIVPSYrr", 0, 0x55e400105ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2721 = VDIVPSYrr
+ { 2722, 7, 1, 0, "VDIVPSrm", 0|(1<<TID::MayLoad), 0x55e400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2722 = VDIVPSrm
+ { 2723, 3, 1, 0, "VDIVPSrr", 0, 0x55e400105ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2723 = VDIVPSrr
+ { 2724, 7, 1, 0, "VDIVSDrm", 0|(1<<TID::MayLoad), 0x55e000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2724 = VDIVSDrm
+ { 2725, 7, 1, 0, "VDIVSDrm_Int", 0|(1<<TID::MayLoad), 0x55e000b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2725 = VDIVSDrm_Int
+ { 2726, 3, 1, 0, "VDIVSDrr", 0, 0x55e000b05ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #2726 = VDIVSDrr
+ { 2727, 3, 1, 0, "VDIVSDrr_Int", 0, 0x55e000b05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2727 = VDIVSDrr_Int
+ { 2728, 7, 1, 0, "VDIVSSrm", 0|(1<<TID::MayLoad), 0x55e000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2728 = VDIVSSrm
+ { 2729, 7, 1, 0, "VDIVSSrm_Int", 0|(1<<TID::MayLoad), 0x55e000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2729 = VDIVSSrm_Int
+ { 2730, 3, 1, 0, "VDIVSSrr", 0, 0x55e000c05ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #2730 = VDIVSSrr
+ { 2731, 3, 1, 0, "VDIVSSrr_Int", 0, 0x55e000c05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2731 = VDIVSSrr_Int
+ { 2732, 8, 1, 0, "VDPPDrmi", 0|(1<<TID::MayLoad), 0x541c02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #2732 = VDPPDrmi
+ { 2733, 4, 1, 0, "VDPPDrri", 0|(1<<TID::Commutable), 0x541c02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #2733 = VDPPDrri
+ { 2734, 8, 1, 0, "VDPPSYrmi", 0|(1<<TID::MayLoad), 0x540c02e46ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #2734 = VDPPSYrmi
+ { 2735, 4, 1, 0, "VDPPSYrri", 0|(1<<TID::Commutable), 0x540c02e45ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #2735 = VDPPSYrri
+ { 2736, 8, 1, 0, "VDPPSrmi", 0|(1<<TID::MayLoad), 0x540c02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #2736 = VDPPSrmi
+ { 2737, 4, 1, 0, "VDPPSrri", 0|(1<<TID::Commutable), 0x540c02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #2737 = VDPPSrri
+ { 2738, 5, 0, 0, "VERRm", 0|(1<<TID::UnmodeledSideEffects), 0x11cULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2738 = VERRm
+ { 2739, 1, 0, 0, "VERRr", 0|(1<<TID::UnmodeledSideEffects), 0x114ULL, NULL, NULL, NULL, OperandInfo106 }, // Inst #2739 = VERRr
+ { 2740, 5, 0, 0, "VERWm", 0|(1<<TID::UnmodeledSideEffects), 0x11dULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2740 = VERWm
+ { 2741, 1, 0, 0, "VERWr", 0|(1<<TID::UnmodeledSideEffects), 0x115ULL, NULL, NULL, NULL, OperandInfo106 }, // Inst #2741 = VERWr
+ { 2742, 7, 0, 0, "VEXTRACTF128mr", 0|(1<<TID::UnmodeledSideEffects), 0x119c02e44ULL, NULL, NULL, NULL, OperandInfo251 }, // Inst #2742 = VEXTRACTF128mr
+ { 2743, 3, 1, 0, "VEXTRACTF128rr", 0|(1<<TID::UnmodeledSideEffects), 0x119c02e43ULL, NULL, NULL, NULL, OperandInfo252 }, // Inst #2743 = VEXTRACTF128rr
+ { 2744, 7, 0, 0, "VEXTRACTPSmr", 0|(1<<TID::MayStore), 0x117c02e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #2744 = VEXTRACTPSmr
+ { 2745, 3, 1, 0, "VEXTRACTPSrr", 0, 0x117c02e43ULL, NULL, NULL, NULL, OperandInfo109 }, // Inst #2745 = VEXTRACTPSrr
+ { 2746, 3, 1, 0, "VEXTRACTPSrr64", 0|(1<<TID::UnmodeledSideEffects), 0x117c02e43ULL, NULL, NULL, NULL, OperandInfo206 }, // Inst #2746 = VEXTRACTPSrr64
+ { 2747, 7, 1, 0, "VFMADDPDr132m", 0|(1<<TID::UnmodeledSideEffects), 0x798c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2747 = VFMADDPDr132m
+ { 2748, 7, 1, 0, "VFMADDPDr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x798c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2748 = VFMADDPDr132mY
+ { 2749, 3, 1, 0, "VFMADDPDr132r", 0|(1<<TID::UnmodeledSideEffects), 0x798c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2749 = VFMADDPDr132r
+ { 2750, 3, 1, 0, "VFMADDPDr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x798c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2750 = VFMADDPDr132rY
+ { 2751, 7, 1, 0, "VFMADDPDr213m", 0|(1<<TID::UnmodeledSideEffects), 0x7a8c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2751 = VFMADDPDr213m
+ { 2752, 7, 1, 0, "VFMADDPDr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x7a8c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2752 = VFMADDPDr213mY
+ { 2753, 3, 1, 0, "VFMADDPDr213r", 0|(1<<TID::UnmodeledSideEffects), 0x7a8c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2753 = VFMADDPDr213r
+ { 2754, 3, 1, 0, "VFMADDPDr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x7a8c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2754 = VFMADDPDr213rY
+ { 2755, 7, 1, 0, "VFMADDPDr231m", 0|(1<<TID::UnmodeledSideEffects), 0x7b8c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2755 = VFMADDPDr231m
+ { 2756, 7, 1, 0, "VFMADDPDr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x7b8c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2756 = VFMADDPDr231mY
+ { 2757, 3, 1, 0, "VFMADDPDr231r", 0|(1<<TID::UnmodeledSideEffects), 0x7b8c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2757 = VFMADDPDr231r
+ { 2758, 3, 1, 0, "VFMADDPDr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x7b8c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2758 = VFMADDPDr231rY
+ { 2759, 7, 1, 0, "VFMADDPSr132m", 0|(1<<TID::UnmodeledSideEffects), 0x598c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2759 = VFMADDPSr132m
+ { 2760, 7, 1, 0, "VFMADDPSr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x598c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2760 = VFMADDPSr132mY
+ { 2761, 3, 1, 0, "VFMADDPSr132r", 0|(1<<TID::UnmodeledSideEffects), 0x598c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2761 = VFMADDPSr132r
+ { 2762, 3, 1, 0, "VFMADDPSr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x598c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2762 = VFMADDPSr132rY
+ { 2763, 7, 1, 0, "VFMADDPSr213m", 0|(1<<TID::UnmodeledSideEffects), 0x5a8c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2763 = VFMADDPSr213m
+ { 2764, 7, 1, 0, "VFMADDPSr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x5a8c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2764 = VFMADDPSr213mY
+ { 2765, 3, 1, 0, "VFMADDPSr213r", 0|(1<<TID::UnmodeledSideEffects), 0x5a8c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2765 = VFMADDPSr213r
+ { 2766, 3, 1, 0, "VFMADDPSr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x5a8c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2766 = VFMADDPSr213rY
+ { 2767, 7, 1, 0, "VFMADDPSr231m", 0|(1<<TID::UnmodeledSideEffects), 0x5b8c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2767 = VFMADDPSr231m
+ { 2768, 7, 1, 0, "VFMADDPSr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x5b8c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2768 = VFMADDPSr231mY
+ { 2769, 3, 1, 0, "VFMADDPSr231r", 0|(1<<TID::UnmodeledSideEffects), 0x5b8c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2769 = VFMADDPSr231r
+ { 2770, 3, 1, 0, "VFMADDPSr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x5b8c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2770 = VFMADDPSr231rY
+ { 2771, 7, 1, 0, "VFMADDSUBPDr132m", 0|(1<<TID::UnmodeledSideEffects), 0x796c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2771 = VFMADDSUBPDr132m
+ { 2772, 7, 1, 0, "VFMADDSUBPDr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x796c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2772 = VFMADDSUBPDr132mY
+ { 2773, 3, 1, 0, "VFMADDSUBPDr132r", 0|(1<<TID::UnmodeledSideEffects), 0x796c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2773 = VFMADDSUBPDr132r
+ { 2774, 3, 1, 0, "VFMADDSUBPDr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x796c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2774 = VFMADDSUBPDr132rY
+ { 2775, 7, 1, 0, "VFMADDSUBPDr213m", 0|(1<<TID::UnmodeledSideEffects), 0x7a6c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2775 = VFMADDSUBPDr213m
+ { 2776, 7, 1, 0, "VFMADDSUBPDr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x7a6c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2776 = VFMADDSUBPDr213mY
+ { 2777, 3, 1, 0, "VFMADDSUBPDr213r", 0|(1<<TID::UnmodeledSideEffects), 0x7a6c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2777 = VFMADDSUBPDr213r
+ { 2778, 3, 1, 0, "VFMADDSUBPDr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x7a6c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2778 = VFMADDSUBPDr213rY
+ { 2779, 7, 1, 0, "VFMADDSUBPDr231m", 0|(1<<TID::UnmodeledSideEffects), 0x7b6c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2779 = VFMADDSUBPDr231m
+ { 2780, 7, 1, 0, "VFMADDSUBPDr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x7b6c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2780 = VFMADDSUBPDr231mY
+ { 2781, 3, 1, 0, "VFMADDSUBPDr231r", 0|(1<<TID::UnmodeledSideEffects), 0x7b6c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2781 = VFMADDSUBPDr231r
+ { 2782, 3, 1, 0, "VFMADDSUBPDr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x7b6c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2782 = VFMADDSUBPDr231rY
+ { 2783, 7, 1, 0, "VFMADDSUBPSr132m", 0|(1<<TID::UnmodeledSideEffects), 0x596c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2783 = VFMADDSUBPSr132m
+ { 2784, 7, 1, 0, "VFMADDSUBPSr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x596c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2784 = VFMADDSUBPSr132mY
+ { 2785, 3, 1, 0, "VFMADDSUBPSr132r", 0|(1<<TID::UnmodeledSideEffects), 0x596c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2785 = VFMADDSUBPSr132r
+ { 2786, 3, 1, 0, "VFMADDSUBPSr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x596c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2786 = VFMADDSUBPSr132rY
+ { 2787, 7, 1, 0, "VFMADDSUBPSr213m", 0|(1<<TID::UnmodeledSideEffects), 0x5a6c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2787 = VFMADDSUBPSr213m
+ { 2788, 7, 1, 0, "VFMADDSUBPSr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x5a6c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2788 = VFMADDSUBPSr213mY
+ { 2789, 3, 1, 0, "VFMADDSUBPSr213r", 0|(1<<TID::UnmodeledSideEffects), 0x5a6c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2789 = VFMADDSUBPSr213r
+ { 2790, 3, 1, 0, "VFMADDSUBPSr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x5a6c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2790 = VFMADDSUBPSr213rY
+ { 2791, 7, 1, 0, "VFMADDSUBPSr231m", 0|(1<<TID::UnmodeledSideEffects), 0x5b6c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2791 = VFMADDSUBPSr231m
+ { 2792, 7, 1, 0, "VFMADDSUBPSr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x5b6c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2792 = VFMADDSUBPSr231mY
+ { 2793, 3, 1, 0, "VFMADDSUBPSr231r", 0|(1<<TID::UnmodeledSideEffects), 0x5b6c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2793 = VFMADDSUBPSr231r
+ { 2794, 3, 1, 0, "VFMADDSUBPSr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x5b6c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2794 = VFMADDSUBPSr231rY
+ { 2795, 7, 1, 0, "VFMSUBADDPDr132m", 0|(1<<TID::UnmodeledSideEffects), 0x797c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2795 = VFMSUBADDPDr132m
+ { 2796, 7, 1, 0, "VFMSUBADDPDr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x797c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2796 = VFMSUBADDPDr132mY
+ { 2797, 3, 1, 0, "VFMSUBADDPDr132r", 0|(1<<TID::UnmodeledSideEffects), 0x797c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2797 = VFMSUBADDPDr132r
+ { 2798, 3, 1, 0, "VFMSUBADDPDr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x797c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2798 = VFMSUBADDPDr132rY
+ { 2799, 7, 1, 0, "VFMSUBADDPDr213m", 0|(1<<TID::UnmodeledSideEffects), 0x7a7c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2799 = VFMSUBADDPDr213m
+ { 2800, 7, 1, 0, "VFMSUBADDPDr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x7a7c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2800 = VFMSUBADDPDr213mY
+ { 2801, 3, 1, 0, "VFMSUBADDPDr213r", 0|(1<<TID::UnmodeledSideEffects), 0x7a7c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2801 = VFMSUBADDPDr213r
+ { 2802, 3, 1, 0, "VFMSUBADDPDr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x7a7c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2802 = VFMSUBADDPDr213rY
+ { 2803, 7, 1, 0, "VFMSUBADDPDr231m", 0|(1<<TID::UnmodeledSideEffects), 0x7b7c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2803 = VFMSUBADDPDr231m
+ { 2804, 7, 1, 0, "VFMSUBADDPDr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x7b7c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2804 = VFMSUBADDPDr231mY
+ { 2805, 3, 1, 0, "VFMSUBADDPDr231r", 0|(1<<TID::UnmodeledSideEffects), 0x7b7c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2805 = VFMSUBADDPDr231r
+ { 2806, 3, 1, 0, "VFMSUBADDPDr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x7b7c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2806 = VFMSUBADDPDr231rY
+ { 2807, 7, 1, 0, "VFMSUBADDPSr132m", 0|(1<<TID::UnmodeledSideEffects), 0x597c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2807 = VFMSUBADDPSr132m
+ { 2808, 7, 1, 0, "VFMSUBADDPSr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x597c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2808 = VFMSUBADDPSr132mY
+ { 2809, 3, 1, 0, "VFMSUBADDPSr132r", 0|(1<<TID::UnmodeledSideEffects), 0x597c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2809 = VFMSUBADDPSr132r
+ { 2810, 3, 1, 0, "VFMSUBADDPSr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x597c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2810 = VFMSUBADDPSr132rY
+ { 2811, 7, 1, 0, "VFMSUBADDPSr213m", 0|(1<<TID::UnmodeledSideEffects), 0x5a7c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2811 = VFMSUBADDPSr213m
+ { 2812, 7, 1, 0, "VFMSUBADDPSr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x5a7c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2812 = VFMSUBADDPSr213mY
+ { 2813, 3, 1, 0, "VFMSUBADDPSr213r", 0|(1<<TID::UnmodeledSideEffects), 0x5a7c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2813 = VFMSUBADDPSr213r
+ { 2814, 3, 1, 0, "VFMSUBADDPSr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x5a7c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2814 = VFMSUBADDPSr213rY
+ { 2815, 7, 1, 0, "VFMSUBADDPSr231m", 0|(1<<TID::UnmodeledSideEffects), 0x5b7c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2815 = VFMSUBADDPSr231m
+ { 2816, 7, 1, 0, "VFMSUBADDPSr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x5b7c00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2816 = VFMSUBADDPSr231mY
+ { 2817, 3, 1, 0, "VFMSUBADDPSr231r", 0|(1<<TID::UnmodeledSideEffects), 0x5b7c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2817 = VFMSUBADDPSr231r
+ { 2818, 3, 1, 0, "VFMSUBADDPSr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x5b7c00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2818 = VFMSUBADDPSr231rY
+ { 2819, 7, 1, 0, "VFMSUBPDr132m", 0|(1<<TID::UnmodeledSideEffects), 0x79ac00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2819 = VFMSUBPDr132m
+ { 2820, 7, 1, 0, "VFMSUBPDr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x79ac00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2820 = VFMSUBPDr132mY
+ { 2821, 3, 1, 0, "VFMSUBPDr132r", 0|(1<<TID::UnmodeledSideEffects), 0x79ac00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2821 = VFMSUBPDr132r
+ { 2822, 3, 1, 0, "VFMSUBPDr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x79ac00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2822 = VFMSUBPDr132rY
+ { 2823, 7, 1, 0, "VFMSUBPDr213m", 0|(1<<TID::UnmodeledSideEffects), 0x7aac00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2823 = VFMSUBPDr213m
+ { 2824, 7, 1, 0, "VFMSUBPDr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x7aac00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2824 = VFMSUBPDr213mY
+ { 2825, 3, 1, 0, "VFMSUBPDr213r", 0|(1<<TID::UnmodeledSideEffects), 0x7aac00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2825 = VFMSUBPDr213r
+ { 2826, 3, 1, 0, "VFMSUBPDr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x7aac00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2826 = VFMSUBPDr213rY
+ { 2827, 7, 1, 0, "VFMSUBPDr231m", 0|(1<<TID::UnmodeledSideEffects), 0x7bac00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2827 = VFMSUBPDr231m
+ { 2828, 7, 1, 0, "VFMSUBPDr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x7bac00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2828 = VFMSUBPDr231mY
+ { 2829, 3, 1, 0, "VFMSUBPDr231r", 0|(1<<TID::UnmodeledSideEffects), 0x7bac00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2829 = VFMSUBPDr231r
+ { 2830, 3, 1, 0, "VFMSUBPDr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x7bac00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2830 = VFMSUBPDr231rY
+ { 2831, 7, 1, 0, "VFMSUBPSr132m", 0|(1<<TID::UnmodeledSideEffects), 0x59ac00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2831 = VFMSUBPSr132m
+ { 2832, 7, 1, 0, "VFMSUBPSr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x59ac00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2832 = VFMSUBPSr132mY
+ { 2833, 3, 1, 0, "VFMSUBPSr132r", 0|(1<<TID::UnmodeledSideEffects), 0x59ac00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2833 = VFMSUBPSr132r
+ { 2834, 3, 1, 0, "VFMSUBPSr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x59ac00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2834 = VFMSUBPSr132rY
+ { 2835, 7, 1, 0, "VFMSUBPSr213m", 0|(1<<TID::UnmodeledSideEffects), 0x5aac00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2835 = VFMSUBPSr213m
+ { 2836, 7, 1, 0, "VFMSUBPSr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x5aac00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2836 = VFMSUBPSr213mY
+ { 2837, 3, 1, 0, "VFMSUBPSr213r", 0|(1<<TID::UnmodeledSideEffects), 0x5aac00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2837 = VFMSUBPSr213r
+ { 2838, 3, 1, 0, "VFMSUBPSr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x5aac00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2838 = VFMSUBPSr213rY
+ { 2839, 7, 1, 0, "VFMSUBPSr231m", 0|(1<<TID::UnmodeledSideEffects), 0x5bac00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2839 = VFMSUBPSr231m
+ { 2840, 7, 1, 0, "VFMSUBPSr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x5bac00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2840 = VFMSUBPSr231mY
+ { 2841, 3, 1, 0, "VFMSUBPSr231r", 0|(1<<TID::UnmodeledSideEffects), 0x5bac00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2841 = VFMSUBPSr231r
+ { 2842, 3, 1, 0, "VFMSUBPSr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x5bac00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2842 = VFMSUBPSr231rY
+ { 2843, 7, 1, 0, "VFNMADDPDr132m", 0|(1<<TID::UnmodeledSideEffects), 0x79cc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2843 = VFNMADDPDr132m
+ { 2844, 7, 1, 0, "VFNMADDPDr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x79cc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2844 = VFNMADDPDr132mY
+ { 2845, 3, 1, 0, "VFNMADDPDr132r", 0|(1<<TID::UnmodeledSideEffects), 0x79cc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2845 = VFNMADDPDr132r
+ { 2846, 3, 1, 0, "VFNMADDPDr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x79cc00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2846 = VFNMADDPDr132rY
+ { 2847, 7, 1, 0, "VFNMADDPDr213m", 0|(1<<TID::UnmodeledSideEffects), 0x7acc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2847 = VFNMADDPDr213m
+ { 2848, 7, 1, 0, "VFNMADDPDr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x7acc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2848 = VFNMADDPDr213mY
+ { 2849, 3, 1, 0, "VFNMADDPDr213r", 0|(1<<TID::UnmodeledSideEffects), 0x7acc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2849 = VFNMADDPDr213r
+ { 2850, 3, 1, 0, "VFNMADDPDr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x7acc00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2850 = VFNMADDPDr213rY
+ { 2851, 7, 1, 0, "VFNMADDPDr231m", 0|(1<<TID::UnmodeledSideEffects), 0x7bcc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2851 = VFNMADDPDr231m
+ { 2852, 7, 1, 0, "VFNMADDPDr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x7bcc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2852 = VFNMADDPDr231mY
+ { 2853, 3, 1, 0, "VFNMADDPDr231r", 0|(1<<TID::UnmodeledSideEffects), 0x7bcc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2853 = VFNMADDPDr231r
+ { 2854, 3, 1, 0, "VFNMADDPDr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x7bcc00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2854 = VFNMADDPDr231rY
+ { 2855, 7, 1, 0, "VFNMADDPSr132m", 0|(1<<TID::UnmodeledSideEffects), 0x59cc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2855 = VFNMADDPSr132m
+ { 2856, 7, 1, 0, "VFNMADDPSr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x59cc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2856 = VFNMADDPSr132mY
+ { 2857, 3, 1, 0, "VFNMADDPSr132r", 0|(1<<TID::UnmodeledSideEffects), 0x59cc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2857 = VFNMADDPSr132r
+ { 2858, 3, 1, 0, "VFNMADDPSr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x59cc00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2858 = VFNMADDPSr132rY
+ { 2859, 7, 1, 0, "VFNMADDPSr213m", 0|(1<<TID::UnmodeledSideEffects), 0x5acc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2859 = VFNMADDPSr213m
+ { 2860, 7, 1, 0, "VFNMADDPSr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x5acc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2860 = VFNMADDPSr213mY
+ { 2861, 3, 1, 0, "VFNMADDPSr213r", 0|(1<<TID::UnmodeledSideEffects), 0x5acc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2861 = VFNMADDPSr213r
+ { 2862, 3, 1, 0, "VFNMADDPSr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x5acc00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2862 = VFNMADDPSr213rY
+ { 2863, 7, 1, 0, "VFNMADDPSr231m", 0|(1<<TID::UnmodeledSideEffects), 0x5bcc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2863 = VFNMADDPSr231m
+ { 2864, 7, 1, 0, "VFNMADDPSr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x5bcc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2864 = VFNMADDPSr231mY
+ { 2865, 3, 1, 0, "VFNMADDPSr231r", 0|(1<<TID::UnmodeledSideEffects), 0x5bcc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2865 = VFNMADDPSr231r
+ { 2866, 3, 1, 0, "VFNMADDPSr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x5bcc00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2866 = VFNMADDPSr231rY
+ { 2867, 7, 1, 0, "VFNMSUBPDr132m", 0|(1<<TID::UnmodeledSideEffects), 0x79ec00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2867 = VFNMSUBPDr132m
+ { 2868, 7, 1, 0, "VFNMSUBPDr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x79ec00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2868 = VFNMSUBPDr132mY
+ { 2869, 3, 1, 0, "VFNMSUBPDr132r", 0|(1<<TID::UnmodeledSideEffects), 0x79ec00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2869 = VFNMSUBPDr132r
+ { 2870, 3, 1, 0, "VFNMSUBPDr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x79ec00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2870 = VFNMSUBPDr132rY
+ { 2871, 7, 1, 0, "VFNMSUBPDr213m", 0|(1<<TID::UnmodeledSideEffects), 0x7aec00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2871 = VFNMSUBPDr213m
+ { 2872, 7, 1, 0, "VFNMSUBPDr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x7aec00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2872 = VFNMSUBPDr213mY
+ { 2873, 3, 1, 0, "VFNMSUBPDr213r", 0|(1<<TID::UnmodeledSideEffects), 0x7aec00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2873 = VFNMSUBPDr213r
+ { 2874, 3, 1, 0, "VFNMSUBPDr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x7aec00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2874 = VFNMSUBPDr213rY
+ { 2875, 7, 1, 0, "VFNMSUBPDr231m", 0|(1<<TID::UnmodeledSideEffects), 0x7bec00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2875 = VFNMSUBPDr231m
+ { 2876, 7, 1, 0, "VFNMSUBPDr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x7bec00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2876 = VFNMSUBPDr231mY
+ { 2877, 3, 1, 0, "VFNMSUBPDr231r", 0|(1<<TID::UnmodeledSideEffects), 0x7bec00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2877 = VFNMSUBPDr231r
+ { 2878, 3, 1, 0, "VFNMSUBPDr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x7bec00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2878 = VFNMSUBPDr231rY
+ { 2879, 7, 1, 0, "VFNMSUBPSr132m", 0|(1<<TID::UnmodeledSideEffects), 0x59ec00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2879 = VFNMSUBPSr132m
+ { 2880, 7, 1, 0, "VFNMSUBPSr132mY", 0|(1<<TID::UnmodeledSideEffects), 0x59ec00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2880 = VFNMSUBPSr132mY
+ { 2881, 3, 1, 0, "VFNMSUBPSr132r", 0|(1<<TID::UnmodeledSideEffects), 0x59ec00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2881 = VFNMSUBPSr132r
+ { 2882, 3, 1, 0, "VFNMSUBPSr132rY", 0|(1<<TID::UnmodeledSideEffects), 0x59ec00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2882 = VFNMSUBPSr132rY
+ { 2883, 7, 1, 0, "VFNMSUBPSr213m", 0|(1<<TID::UnmodeledSideEffects), 0x5aec00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2883 = VFNMSUBPSr213m
+ { 2884, 7, 1, 0, "VFNMSUBPSr213mY", 0|(1<<TID::UnmodeledSideEffects), 0x5aec00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2884 = VFNMSUBPSr213mY
+ { 2885, 3, 1, 0, "VFNMSUBPSr213r", 0|(1<<TID::UnmodeledSideEffects), 0x5aec00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2885 = VFNMSUBPSr213r
+ { 2886, 3, 1, 0, "VFNMSUBPSr213rY", 0|(1<<TID::UnmodeledSideEffects), 0x5aec00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2886 = VFNMSUBPSr213rY
+ { 2887, 7, 1, 0, "VFNMSUBPSr231m", 0|(1<<TID::UnmodeledSideEffects), 0x5bec00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2887 = VFNMSUBPSr231m
+ { 2888, 7, 1, 0, "VFNMSUBPSr231mY", 0|(1<<TID::UnmodeledSideEffects), 0x5bec00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2888 = VFNMSUBPSr231mY
+ { 2889, 3, 1, 0, "VFNMSUBPSr231r", 0|(1<<TID::UnmodeledSideEffects), 0x5bec00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2889 = VFNMSUBPSr231r
+ { 2890, 3, 1, 0, "VFNMSUBPSr231rY", 0|(1<<TID::UnmodeledSideEffects), 0x5bec00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2890 = VFNMSUBPSr231rY
+ { 2891, 7, 1, 0, "VFsANDNPDrm", 0|(1<<TID::MayLoad), 0x555800046ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2891 = VFsANDNPDrm
+ { 2892, 3, 1, 0, "VFsANDNPDrr", 0, 0x555800045ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #2892 = VFsANDNPDrr
+ { 2893, 7, 1, 0, "VFsANDNPSrm", 0|(1<<TID::MayLoad), 0x555400006ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2893 = VFsANDNPSrm
+ { 2894, 3, 1, 0, "VFsANDNPSrr", 0, 0x555400005ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #2894 = VFsANDNPSrr
+ { 2895, 7, 1, 0, "VFsANDPDrm", 0|(1<<TID::MayLoad), 0x554800046ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2895 = VFsANDPDrm
+ { 2896, 3, 1, 0, "VFsANDPDrr", 0|(1<<TID::Commutable), 0x554800045ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #2896 = VFsANDPDrr
+ { 2897, 7, 1, 0, "VFsANDPSrm", 0|(1<<TID::MayLoad), 0x554400006ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2897 = VFsANDPSrm
+ { 2898, 3, 1, 0, "VFsANDPSrr", 0|(1<<TID::Commutable), 0x554400005ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #2898 = VFsANDPSrr
+ { 2899, 7, 1, 0, "VFsORPDrm", 0|(1<<TID::MayLoad), 0x556800046ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2899 = VFsORPDrm
+ { 2900, 3, 1, 0, "VFsORPDrr", 0|(1<<TID::Commutable), 0x556800045ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #2900 = VFsORPDrr
+ { 2901, 7, 1, 0, "VFsORPSrm", 0|(1<<TID::MayLoad), 0x556400006ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2901 = VFsORPSrm
+ { 2902, 3, 1, 0, "VFsORPSrr", 0|(1<<TID::Commutable), 0x556400005ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #2902 = VFsORPSrr
+ { 2903, 7, 1, 0, "VFsXORPDrm", 0|(1<<TID::MayLoad), 0x557800046ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2903 = VFsXORPDrm
+ { 2904, 3, 1, 0, "VFsXORPDrr", 0|(1<<TID::Commutable), 0x557800045ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #2904 = VFsXORPDrr
+ { 2905, 7, 1, 0, "VFsXORPSrm", 0|(1<<TID::MayLoad), 0x557400006ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2905 = VFsXORPSrm
+ { 2906, 3, 1, 0, "VFsXORPSrr", 0|(1<<TID::Commutable), 0x557400005ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #2906 = VFsXORPSrr
+ { 2907, 7, 1, 0, "VHADDPDYrm", 0|(1<<TID::MayLoad), 0x57c800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2907 = VHADDPDYrm
+ { 2908, 3, 1, 0, "VHADDPDYrr", 0, 0x57c800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2908 = VHADDPDYrr
+ { 2909, 7, 1, 0, "VHADDPDrm", 0|(1<<TID::MayLoad), 0x57c800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2909 = VHADDPDrm
+ { 2910, 3, 1, 0, "VHADDPDrr", 0, 0x57c800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2910 = VHADDPDrr
+ { 2911, 7, 1, 0, "VHADDPSYrm", 0|(1<<TID::MayLoad), 0x57c800b06ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2911 = VHADDPSYrm
+ { 2912, 3, 1, 0, "VHADDPSYrr", 0, 0x57c800b05ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2912 = VHADDPSYrr
+ { 2913, 7, 1, 0, "VHADDPSrm", 0|(1<<TID::MayLoad), 0x57c800b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2913 = VHADDPSrm
+ { 2914, 3, 1, 0, "VHADDPSrr", 0, 0x57c800b05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2914 = VHADDPSrr
+ { 2915, 7, 1, 0, "VHSUBPDYrm", 0|(1<<TID::MayLoad), 0x57d800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2915 = VHSUBPDYrm
+ { 2916, 3, 1, 0, "VHSUBPDYrr", 0, 0x57d800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2916 = VHSUBPDYrr
+ { 2917, 7, 1, 0, "VHSUBPDrm", 0|(1<<TID::MayLoad), 0x57d800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2917 = VHSUBPDrm
+ { 2918, 3, 1, 0, "VHSUBPDrr", 0, 0x57d800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2918 = VHSUBPDrr
+ { 2919, 7, 1, 0, "VHSUBPSYrm", 0|(1<<TID::MayLoad), 0x57d800b06ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2919 = VHSUBPSYrm
+ { 2920, 3, 1, 0, "VHSUBPSYrr", 0, 0x57d800b05ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2920 = VHSUBPSYrr
+ { 2921, 7, 1, 0, "VHSUBPSrm", 0|(1<<TID::MayLoad), 0x57d800b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2921 = VHSUBPSrm
+ { 2922, 3, 1, 0, "VHSUBPSrr", 0, 0x57d800b05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2922 = VHSUBPSrr
+ { 2923, 8, 1, 0, "VINSERTF128rm", 0|(1<<TID::UnmodeledSideEffects), 0x518c02e46ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #2923 = VINSERTF128rm
+ { 2924, 4, 1, 0, "VINSERTF128rr", 0|(1<<TID::UnmodeledSideEffects), 0x518c02e45ULL, NULL, NULL, NULL, OperandInfo253 }, // Inst #2924 = VINSERTF128rr
+ { 2925, 8, 1, 0, "VINSERTPSrm", 0|(1<<TID::MayLoad), 0x521c02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #2925 = VINSERTPSrm
+ { 2926, 4, 1, 0, "VINSERTPSrr", 0, 0x521c02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #2926 = VINSERTPSrr
+ { 2927, 6, 1, 0, "VLDDQUYrm", 0|(1<<TID::MayLoad), 0x1f0800b06ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2927 = VLDDQUYrm
+ { 2928, 6, 1, 0, "VLDDQUrm", 0|(1<<TID::MayLoad), 0x1f0800b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2928 = VLDDQUrm
+ { 2929, 5, 0, 0, "VLDMXCSR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1ae40001aULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2929 = VLDMXCSR
+ { 2930, 2, 0, 0, "VMASKMOVDQU", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1f7c00045ULL, ImplicitList34, NULL, NULL, OperandInfo43 }, // Inst #2930 = VMASKMOVDQU
+ { 2931, 2, 0, 0, "VMASKMOVDQU64", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1f7c00045ULL, ImplicitList35, NULL, NULL, OperandInfo43 }, // Inst #2931 = VMASKMOVDQU64
+ { 2932, 7, 0, 0, "VMASKMOVPDYmr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x52fc00d44ULL, NULL, NULL, NULL, OperandInfo254 }, // Inst #2932 = VMASKMOVPDYmr
+ { 2933, 7, 1, 0, "VMASKMOVPDYrm", 0|(1<<TID::MayLoad), 0x52dc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2933 = VMASKMOVPDYrm
+ { 2934, 7, 0, 0, "VMASKMOVPDmr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x52fc00d44ULL, NULL, NULL, NULL, OperandInfo255 }, // Inst #2934 = VMASKMOVPDmr
+ { 2935, 7, 1, 0, "VMASKMOVPDrm", 0|(1<<TID::MayLoad), 0x52dc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2935 = VMASKMOVPDrm
+ { 2936, 7, 0, 0, "VMASKMOVPSYmr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x52ec00d44ULL, NULL, NULL, NULL, OperandInfo254 }, // Inst #2936 = VMASKMOVPSYmr
+ { 2937, 7, 1, 0, "VMASKMOVPSYrm", 0|(1<<TID::MayLoad), 0x52cc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2937 = VMASKMOVPSYrm
+ { 2938, 7, 0, 0, "VMASKMOVPSmr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x52ec00d44ULL, NULL, NULL, NULL, OperandInfo255 }, // Inst #2938 = VMASKMOVPSmr
+ { 2939, 7, 1, 0, "VMASKMOVPSrm", 0|(1<<TID::MayLoad), 0x52cc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2939 = VMASKMOVPSrm
+ { 2940, 7, 1, 0, "VMAXPDYrm", 0|(1<<TID::MayLoad), 0x55f800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2940 = VMAXPDYrm
+ { 2941, 7, 1, 0, "VMAXPDYrm_Int", 0|(1<<TID::MayLoad), 0x55f800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2941 = VMAXPDYrm_Int
+ { 2942, 3, 1, 0, "VMAXPDYrr", 0, 0x55f800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2942 = VMAXPDYrr
+ { 2943, 3, 1, 0, "VMAXPDYrr_Int", 0, 0x55f800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2943 = VMAXPDYrr_Int
+ { 2944, 7, 1, 0, "VMAXPDrm", 0|(1<<TID::MayLoad), 0x55f800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2944 = VMAXPDrm
+ { 2945, 7, 1, 0, "VMAXPDrm_Int", 0|(1<<TID::MayLoad), 0x55f800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2945 = VMAXPDrm_Int
+ { 2946, 3, 1, 0, "VMAXPDrr", 0, 0x55f800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2946 = VMAXPDrr
+ { 2947, 3, 1, 0, "VMAXPDrr_Int", 0, 0x55f800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2947 = VMAXPDrr_Int
+ { 2948, 7, 1, 0, "VMAXPSYrm", 0|(1<<TID::MayLoad), 0x55f400106ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2948 = VMAXPSYrm
+ { 2949, 7, 1, 0, "VMAXPSYrm_Int", 0|(1<<TID::MayLoad), 0x55f400106ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2949 = VMAXPSYrm_Int
+ { 2950, 3, 1, 0, "VMAXPSYrr", 0, 0x55f400105ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2950 = VMAXPSYrr
+ { 2951, 3, 1, 0, "VMAXPSYrr_Int", 0, 0x55f400105ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2951 = VMAXPSYrr_Int
+ { 2952, 7, 1, 0, "VMAXPSrm", 0|(1<<TID::MayLoad), 0x55f400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2952 = VMAXPSrm
+ { 2953, 7, 1, 0, "VMAXPSrm_Int", 0|(1<<TID::MayLoad), 0x55f400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2953 = VMAXPSrm_Int
+ { 2954, 3, 1, 0, "VMAXPSrr", 0, 0x55f400105ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2954 = VMAXPSrr
+ { 2955, 3, 1, 0, "VMAXPSrr_Int", 0, 0x55f400105ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2955 = VMAXPSrr_Int
+ { 2956, 7, 1, 0, "VMAXSDrm", 0|(1<<TID::MayLoad), 0x55f000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2956 = VMAXSDrm
+ { 2957, 7, 1, 0, "VMAXSDrm_Int", 0|(1<<TID::MayLoad), 0x55f000b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2957 = VMAXSDrm_Int
+ { 2958, 3, 1, 0, "VMAXSDrr", 0, 0x55f000b05ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #2958 = VMAXSDrr
+ { 2959, 3, 1, 0, "VMAXSDrr_Int", 0, 0x55f000b05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2959 = VMAXSDrr_Int
+ { 2960, 7, 1, 0, "VMAXSSrm", 0|(1<<TID::MayLoad), 0x55f000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2960 = VMAXSSrm
+ { 2961, 7, 1, 0, "VMAXSSrm_Int", 0|(1<<TID::MayLoad), 0x55f000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2961 = VMAXSSrm_Int
+ { 2962, 3, 1, 0, "VMAXSSrr", 0, 0x55f000c05ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #2962 = VMAXSSrr
+ { 2963, 3, 1, 0, "VMAXSSrr_Int", 0, 0x55f000c05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2963 = VMAXSSrr_Int
+ { 2964, 0, 0, 0, "VMCALL", 0|(1<<TID::UnmodeledSideEffects), 0x1000121ULL, NULL, NULL, NULL, 0 }, // Inst #2964 = VMCALL
+ { 2965, 5, 0, 0, "VMCLEARm", 0|(1<<TID::UnmodeledSideEffects), 0xc700015eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #2965 = VMCLEARm
+ { 2966, 7, 1, 0, "VMINPDYrm", 0|(1<<TID::MayLoad), 0x55d800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2966 = VMINPDYrm
+ { 2967, 7, 1, 0, "VMINPDYrm_Int", 0|(1<<TID::MayLoad), 0x55d800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2967 = VMINPDYrm_Int
+ { 2968, 3, 1, 0, "VMINPDYrr", 0, 0x55d800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2968 = VMINPDYrr
+ { 2969, 3, 1, 0, "VMINPDYrr_Int", 0, 0x55d800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2969 = VMINPDYrr_Int
+ { 2970, 7, 1, 0, "VMINPDrm", 0|(1<<TID::MayLoad), 0x55d800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2970 = VMINPDrm
+ { 2971, 7, 1, 0, "VMINPDrm_Int", 0|(1<<TID::MayLoad), 0x55d800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2971 = VMINPDrm_Int
+ { 2972, 3, 1, 0, "VMINPDrr", 0, 0x55d800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2972 = VMINPDrr
+ { 2973, 3, 1, 0, "VMINPDrr_Int", 0, 0x55d800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2973 = VMINPDrr_Int
+ { 2974, 7, 1, 0, "VMINPSYrm", 0|(1<<TID::MayLoad), 0x55d400106ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2974 = VMINPSYrm
+ { 2975, 7, 1, 0, "VMINPSYrm_Int", 0|(1<<TID::MayLoad), 0x55d400106ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #2975 = VMINPSYrm_Int
+ { 2976, 3, 1, 0, "VMINPSYrr", 0, 0x55d400105ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2976 = VMINPSYrr
+ { 2977, 3, 1, 0, "VMINPSYrr_Int", 0, 0x55d400105ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #2977 = VMINPSYrr_Int
+ { 2978, 7, 1, 0, "VMINPSrm", 0|(1<<TID::MayLoad), 0x55d400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2978 = VMINPSrm
+ { 2979, 7, 1, 0, "VMINPSrm_Int", 0|(1<<TID::MayLoad), 0x55d400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2979 = VMINPSrm_Int
+ { 2980, 3, 1, 0, "VMINPSrr", 0, 0x55d400105ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2980 = VMINPSrr
+ { 2981, 3, 1, 0, "VMINPSrr_Int", 0, 0x55d400105ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2981 = VMINPSrr_Int
+ { 2982, 7, 1, 0, "VMINSDrm", 0|(1<<TID::MayLoad), 0x55d000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #2982 = VMINSDrm
+ { 2983, 7, 1, 0, "VMINSDrm_Int", 0|(1<<TID::MayLoad), 0x55d000b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2983 = VMINSDrm_Int
+ { 2984, 3, 1, 0, "VMINSDrr", 0, 0x55d000b05ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #2984 = VMINSDrr
+ { 2985, 3, 1, 0, "VMINSDrr_Int", 0, 0x55d000b05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2985 = VMINSDrr_Int
+ { 2986, 7, 1, 0, "VMINSSrm", 0|(1<<TID::MayLoad), 0x55d000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #2986 = VMINSSrm
+ { 2987, 7, 1, 0, "VMINSSrm_Int", 0|(1<<TID::MayLoad), 0x55d000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #2987 = VMINSSrm_Int
+ { 2988, 3, 1, 0, "VMINSSrr", 0, 0x55d000c05ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #2988 = VMINSSrr
+ { 2989, 3, 1, 0, "VMINSSrr_Int", 0, 0x55d000c05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #2989 = VMINSSrr_Int
+ { 2990, 0, 0, 0, "VMLAUNCH", 0|(1<<TID::UnmodeledSideEffects), 0x1000122ULL, NULL, NULL, NULL, 0 }, // Inst #2990 = VMLAUNCH
+ { 2991, 6, 0, 0, "VMOVAPDYmr", 0|(1<<TID::MayStore), 0x129800044ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #2991 = VMOVAPDYmr
+ { 2992, 6, 1, 0, "VMOVAPDYrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x128800046ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2992 = VMOVAPDYrm
+ { 2993, 2, 1, 0, "VMOVAPDYrr", 0, 0x128800045ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #2993 = VMOVAPDYrr
+ { 2994, 6, 0, 0, "VMOVAPDmr", 0|(1<<TID::MayStore), 0x129800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #2994 = VMOVAPDmr
+ { 2995, 6, 1, 0, "VMOVAPDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x128800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #2995 = VMOVAPDrm
+ { 2996, 2, 1, 0, "VMOVAPDrr", 0, 0x128800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #2996 = VMOVAPDrr
+ { 2997, 6, 0, 0, "VMOVAPSYmr", 0|(1<<TID::MayStore), 0x129400004ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #2997 = VMOVAPSYmr
+ { 2998, 6, 1, 0, "VMOVAPSYrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x128400006ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #2998 = VMOVAPSYrm
+ { 2999, 2, 1, 0, "VMOVAPSYrr", 0, 0x128400005ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #2999 = VMOVAPSYrr
+ { 3000, 6, 0, 0, "VMOVAPSmr", 0|(1<<TID::MayStore), 0x129400004ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3000 = VMOVAPSmr
+ { 3001, 6, 1, 0, "VMOVAPSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x128400006ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3001 = VMOVAPSrm
+ { 3002, 2, 1, 0, "VMOVAPSrr", 0, 0x128400005ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3002 = VMOVAPSrr
+ { 3003, 6, 1, 0, "VMOVDDUPYrm", 0|(1<<TID::UnmodeledSideEffects), 0x112800b06ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3003 = VMOVDDUPYrm
+ { 3004, 2, 1, 0, "VMOVDDUPYrr", 0|(1<<TID::UnmodeledSideEffects), 0x112800b05ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3004 = VMOVDDUPYrr
+ { 3005, 6, 1, 0, "VMOVDDUPrm", 0|(1<<TID::MayLoad), 0x112800b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3005 = VMOVDDUPrm
+ { 3006, 2, 1, 0, "VMOVDDUPrr", 0, 0x112800b05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3006 = VMOVDDUPrr
+ { 3007, 6, 1, 0, "VMOVDI2PDIrm", 0|(1<<TID::MayLoad), 0x16e800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3007 = VMOVDI2PDIrm
+ { 3008, 2, 1, 0, "VMOVDI2PDIrr", 0, 0x16e800045ULL, NULL, NULL, NULL, OperandInfo187 }, // Inst #3008 = VMOVDI2PDIrr
+ { 3009, 6, 1, 0, "VMOVDI2SSrm", 0|(1<<TID::MayLoad), 0x16e800046ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #3009 = VMOVDI2SSrm
+ { 3010, 2, 1, 0, "VMOVDI2SSrr", 0, 0x16e800045ULL, NULL, NULL, NULL, OperandInfo98 }, // Inst #3010 = VMOVDI2SSrr
+ { 3011, 6, 0, 0, "VMOVDQAYmr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x17fc00044ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #3011 = VMOVDQAYmr
+ { 3012, 6, 1, 0, "VMOVDQAYrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x16fc00046ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3012 = VMOVDQAYrm
+ { 3013, 2, 1, 0, "VMOVDQAYrr", 0, 0x16fc00045ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3013 = VMOVDQAYrr
+ { 3014, 6, 0, 0, "VMOVDQAmr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x17fc00044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3014 = VMOVDQAmr
+ { 3015, 6, 1, 0, "VMOVDQArm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x16fc00046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3015 = VMOVDQArm
+ { 3016, 2, 1, 0, "VMOVDQArr", 0, 0x16fc00045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3016 = VMOVDQArr
+ { 3017, 6, 0, 0, "VMOVDQUYmr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x17fc00c04ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #3017 = VMOVDQUYmr
+ { 3018, 6, 1, 0, "VMOVDQUYrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x16fc00c06ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3018 = VMOVDQUYrm
+ { 3019, 2, 1, 0, "VMOVDQUYrr", 0|(1<<TID::UnmodeledSideEffects), 0x16fc00c45ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3019 = VMOVDQUYrr
+ { 3020, 6, 0, 0, "VMOVDQUmr", 0|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x17fc00c04ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3020 = VMOVDQUmr
+ { 3021, 6, 0, 0, "VMOVDQUmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x17fc00c04ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3021 = VMOVDQUmr_Int
+ { 3022, 6, 1, 0, "VMOVDQUrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::UnmodeledSideEffects), 0x16fc00c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3022 = VMOVDQUrm
+ { 3023, 6, 1, 0, "VMOVDQUrm_Int", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x16fc00c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3023 = VMOVDQUrm_Int
+ { 3024, 2, 1, 0, "VMOVDQUrr", 0|(1<<TID::UnmodeledSideEffects), 0x16fc00c45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3024 = VMOVDQUrr
+ { 3025, 3, 1, 0, "VMOVHLPSrr", 0, 0x512400005ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3025 = VMOVHLPSrr
+ { 3026, 6, 0, 0, "VMOVHPDmr", 0|(1<<TID::MayStore), 0x117800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3026 = VMOVHPDmr
+ { 3027, 7, 1, 0, "VMOVHPDrm", 0|(1<<TID::MayLoad), 0x516800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3027 = VMOVHPDrm
+ { 3028, 6, 0, 0, "VMOVHPSmr", 0|(1<<TID::MayStore), 0x117400004ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3028 = VMOVHPSmr
+ { 3029, 7, 1, 0, "VMOVHPSrm", 0|(1<<TID::MayLoad), 0x516400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3029 = VMOVHPSrm
+ { 3030, 3, 1, 0, "VMOVLHPSrr", 0, 0x516400005ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3030 = VMOVLHPSrr
+ { 3031, 6, 0, 0, "VMOVLPDmr", 0|(1<<TID::MayStore), 0x113800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3031 = VMOVLPDmr
+ { 3032, 7, 1, 0, "VMOVLPDrm", 0|(1<<TID::MayLoad), 0x512800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3032 = VMOVLPDrm
+ { 3033, 6, 0, 0, "VMOVLPSmr", 0|(1<<TID::MayStore), 0x113400004ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3033 = VMOVLPSmr
+ { 3034, 7, 1, 0, "VMOVLPSrm", 0|(1<<TID::MayLoad), 0x512400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3034 = VMOVLPSrm
+ { 3035, 6, 0, 0, "VMOVLQ128mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1d6800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3035 = VMOVLQ128mr
+ { 3036, 2, 1, 0, "VMOVMSKPDYr64r", 0|(1<<TID::UnmodeledSideEffects), 0x150800045ULL, NULL, NULL, NULL, OperandInfo257 }, // Inst #3036 = VMOVMSKPDYr64r
+ { 3037, 2, 1, 0, "VMOVMSKPDYrr", 0, 0x150800045ULL, NULL, NULL, NULL, OperandInfo258 }, // Inst #3037 = VMOVMSKPDYrr
+ { 3038, 2, 1, 0, "VMOVMSKPDr64r", 0|(1<<TID::UnmodeledSideEffects), 0x150800045ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #3038 = VMOVMSKPDr64r
+ { 3039, 2, 1, 0, "VMOVMSKPDrr", 0, 0x150800045ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #3039 = VMOVMSKPDrr
+ { 3040, 2, 1, 0, "VMOVMSKPSYr64r", 0|(1<<TID::UnmodeledSideEffects), 0x150400005ULL, NULL, NULL, NULL, OperandInfo257 }, // Inst #3040 = VMOVMSKPSYr64r
+ { 3041, 2, 1, 0, "VMOVMSKPSYrr", 0, 0x150400005ULL, NULL, NULL, NULL, OperandInfo258 }, // Inst #3041 = VMOVMSKPSYrr
+ { 3042, 2, 1, 0, "VMOVMSKPSr64r", 0|(1<<TID::UnmodeledSideEffects), 0x150400005ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #3042 = VMOVMSKPSr64r
+ { 3043, 2, 1, 0, "VMOVMSKPSrr", 0, 0x150400005ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #3043 = VMOVMSKPSrr
+ { 3044, 6, 1, 0, "VMOVNTDQArm", 0|(1<<TID::MayLoad), 0x12ac00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3044 = VMOVNTDQArm
+ { 3045, 6, 0, 0, "VMOVNTDQY_64mr", 0|(1<<TID::MayStore), 0x1e7800044ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #3045 = VMOVNTDQY_64mr
+ { 3046, 6, 0, 0, "VMOVNTDQYmr", 0|(1<<TID::MayStore), 0x1e7c00044ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #3046 = VMOVNTDQYmr
+ { 3047, 6, 0, 0, "VMOVNTDQ_64mr", 0|(1<<TID::MayStore), 0x1e7800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3047 = VMOVNTDQ_64mr
+ { 3048, 6, 0, 0, "VMOVNTDQmr", 0|(1<<TID::MayStore), 0x1e7c00044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3048 = VMOVNTDQmr
+ { 3049, 6, 0, 0, "VMOVNTDQmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1e7c00044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3049 = VMOVNTDQmr_Int
+ { 3050, 6, 0, 0, "VMOVNTPDYmr", 0|(1<<TID::MayStore), 0x12b800044ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #3050 = VMOVNTPDYmr
+ { 3051, 6, 0, 0, "VMOVNTPDmr", 0|(1<<TID::MayStore), 0x12b800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3051 = VMOVNTPDmr
+ { 3052, 6, 0, 0, "VMOVNTPDmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x12b800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3052 = VMOVNTPDmr_Int
+ { 3053, 6, 0, 0, "VMOVNTPSYmr", 0|(1<<TID::MayStore), 0x12b400004ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #3053 = VMOVNTPSYmr
+ { 3054, 6, 0, 0, "VMOVNTPSmr", 0|(1<<TID::MayStore), 0x12b400004ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3054 = VMOVNTPSmr
+ { 3055, 6, 0, 0, "VMOVNTPSmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x12b400004ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3055 = VMOVNTPSmr_Int
+ { 3056, 6, 0, 0, "VMOVPDI2DImr", 0|(1<<TID::MayStore), 0x17e800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3056 = VMOVPDI2DImr
+ { 3057, 2, 1, 0, "VMOVPDI2DIrr", 0, 0x17e800043ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #3057 = VMOVPDI2DIrr
+ { 3058, 6, 0, 0, "VMOVPQI2QImr", 0|(1<<TID::MayStore), 0x1d6800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3058 = VMOVPQI2QImr
+ { 3059, 6, 1, 0, "VMOVQI2PQIrm", 0|(1<<TID::MayLoad), 0x17e000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3059 = VMOVQI2PQIrm
+ { 3060, 2, 1, 0, "VMOVQd64rr", 0|(1<<TID::UnmodeledSideEffects), 0x37e800043ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #3060 = VMOVQd64rr
+ { 3061, 2, 1, 0, "VMOVQd64rr_alt", 0|(1<<TID::UnmodeledSideEffects), 0x37e800043ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #3061 = VMOVQd64rr_alt
+ { 3062, 2, 1, 0, "VMOVQs64rr", 0|(1<<TID::UnmodeledSideEffects), 0x36e800045ULL, NULL, NULL, NULL, OperandInfo182 }, // Inst #3062 = VMOVQs64rr
+ { 3063, 2, 1, 0, "VMOVQxrxr", 0|(1<<TID::UnmodeledSideEffects), 0x17e000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3063 = VMOVQxrxr
+ { 3064, 6, 0, 0, "VMOVSDmr", 0|(1<<TID::MayStore), 0x111000b04ULL, NULL, NULL, NULL, OperandInfo188 }, // Inst #3064 = VMOVSDmr
+ { 3065, 6, 1, 0, "VMOVSDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x110000b06ULL, NULL, NULL, NULL, OperandInfo94 }, // Inst #3065 = VMOVSDrm
+ { 3066, 3, 1, 0, "VMOVSDrr", 0, 0x510000b05ULL, NULL, NULL, NULL, OperandInfo259 }, // Inst #3066 = VMOVSDrr
+ { 3067, 6, 1, 0, "VMOVSHDUPYrm", 0|(1<<TID::UnmodeledSideEffects), 0x116400c06ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3067 = VMOVSHDUPYrm
+ { 3068, 2, 1, 0, "VMOVSHDUPYrr", 0|(1<<TID::UnmodeledSideEffects), 0x116400c05ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3068 = VMOVSHDUPYrr
+ { 3069, 6, 1, 0, "VMOVSHDUPrm", 0|(1<<TID::MayLoad), 0x116400c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3069 = VMOVSHDUPrm
+ { 3070, 2, 1, 0, "VMOVSHDUPrr", 0, 0x116400c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3070 = VMOVSHDUPrr
+ { 3071, 6, 1, 0, "VMOVSLDUPYrm", 0|(1<<TID::UnmodeledSideEffects), 0x112400c06ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3071 = VMOVSLDUPYrm
+ { 3072, 2, 1, 0, "VMOVSLDUPYrr", 0|(1<<TID::UnmodeledSideEffects), 0x112400c05ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3072 = VMOVSLDUPYrr
+ { 3073, 6, 1, 0, "VMOVSLDUPrm", 0|(1<<TID::MayLoad), 0x112400c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3073 = VMOVSLDUPrm
+ { 3074, 2, 1, 0, "VMOVSLDUPrr", 0, 0x112400c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3074 = VMOVSLDUPrr
+ { 3075, 6, 0, 0, "VMOVSS2DImr", 0|(1<<TID::MayStore), 0x17e800044ULL, NULL, NULL, NULL, OperandInfo190 }, // Inst #3075 = VMOVSS2DImr
+ { 3076, 2, 1, 0, "VMOVSS2DIrr", 0, 0x17e800043ULL, NULL, NULL, NULL, OperandInfo101 }, // Inst #3076 = VMOVSS2DIrr
+ { 3077, 6, 0, 0, "VMOVSSmr", 0|(1<<TID::MayStore), 0x111000c04ULL, NULL, NULL, NULL, OperandInfo190 }, // Inst #3077 = VMOVSSmr
+ { 3078, 6, 1, 0, "VMOVSSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x110000c06ULL, NULL, NULL, NULL, OperandInfo92 }, // Inst #3078 = VMOVSSrm
+ { 3079, 3, 1, 0, "VMOVSSrr", 0, 0x510000c05ULL, NULL, NULL, NULL, OperandInfo260 }, // Inst #3079 = VMOVSSrr
+ { 3080, 6, 0, 0, "VMOVUPDYmr", 0|(1<<TID::MayStore), 0x111800044ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #3080 = VMOVUPDYmr
+ { 3081, 6, 1, 0, "VMOVUPDYrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x110800046ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3081 = VMOVUPDYrm
+ { 3082, 2, 1, 0, "VMOVUPDYrr", 0, 0x110800045ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3082 = VMOVUPDYrr
+ { 3083, 6, 0, 0, "VMOVUPDmr", 0|(1<<TID::MayStore), 0x111800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3083 = VMOVUPDmr
+ { 3084, 6, 0, 0, "VMOVUPDmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x111800044ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3084 = VMOVUPDmr_Int
+ { 3085, 6, 1, 0, "VMOVUPDrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad), 0x110800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3085 = VMOVUPDrm
+ { 3086, 6, 1, 0, "VMOVUPDrm_Int", 0|(1<<TID::MayLoad), 0x110800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3086 = VMOVUPDrm_Int
+ { 3087, 2, 1, 0, "VMOVUPDrr", 0, 0x110800045ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3087 = VMOVUPDrr
+ { 3088, 6, 0, 0, "VMOVUPSYmr", 0|(1<<TID::MayStore), 0x111400004ULL, NULL, NULL, NULL, OperandInfo256 }, // Inst #3088 = VMOVUPSYmr
+ { 3089, 6, 1, 0, "VMOVUPSYrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x110400006ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3089 = VMOVUPSYrm
+ { 3090, 2, 1, 0, "VMOVUPSYrr", 0, 0x110400005ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3090 = VMOVUPSYrr
+ { 3091, 6, 0, 0, "VMOVUPSmr", 0|(1<<TID::MayStore), 0x111400004ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3091 = VMOVUPSmr
+ { 3092, 6, 0, 0, "VMOVUPSmr_Int", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x111400004ULL, NULL, NULL, NULL, OperandInfo186 }, // Inst #3092 = VMOVUPSmr_Int
+ { 3093, 6, 1, 0, "VMOVUPSrm", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x110400006ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3093 = VMOVUPSrm
+ { 3094, 6, 1, 0, "VMOVUPSrm_Int", 0|(1<<TID::FoldableAsLoad)|(1<<TID::MayLoad)|(1<<TID::Rematerializable), 0x110400006ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3094 = VMOVUPSrm_Int
+ { 3095, 2, 1, 0, "VMOVUPSrr", 0, 0x110400005ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3095 = VMOVUPSrr
+ { 3096, 6, 1, 0, "VMOVZDI2PDIrm", 0|(1<<TID::MayLoad), 0x16e800046ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3096 = VMOVZDI2PDIrm
+ { 3097, 2, 1, 0, "VMOVZDI2PDIrr", 0, 0x16e800045ULL, NULL, NULL, NULL, OperandInfo187 }, // Inst #3097 = VMOVZDI2PDIrr
+ { 3098, 6, 1, 0, "VMOVZPQILo2PQIrm", 0|(1<<TID::MayLoad), 0x17e000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3098 = VMOVZPQILo2PQIrm
+ { 3099, 2, 1, 0, "VMOVZPQILo2PQIrr", 0, 0x17e000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3099 = VMOVZPQILo2PQIrr
+ { 3100, 6, 1, 0, "VMOVZQI2PQIrm", 0|(1<<TID::MayLoad), 0x17e000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3100 = VMOVZQI2PQIrm
+ { 3101, 2, 1, 0, "VMOVZQI2PQIrr", 0, 0x36e800045ULL, NULL, NULL, NULL, OperandInfo182 }, // Inst #3101 = VMOVZQI2PQIrr
+ { 3102, 8, 1, 0, "VMPSADBWrmi", 0|(1<<TID::MayLoad), 0x542c02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3102 = VMPSADBWrmi
+ { 3103, 4, 1, 0, "VMPSADBWrri", 0, 0x542c02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3103 = VMPSADBWrri
+ { 3104, 5, 0, 0, "VMPTRLDm", 0|(1<<TID::UnmodeledSideEffects), 0xc700011eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #3104 = VMPTRLDm
+ { 3105, 5, 1, 0, "VMPTRSTm", 0|(1<<TID::UnmodeledSideEffects), 0xc700011fULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #3105 = VMPTRSTm
+ { 3106, 6, 1, 0, "VMREAD32rm", 0|(1<<TID::UnmodeledSideEffects), 0x78000104ULL, NULL, NULL, NULL, OperandInfo15 }, // Inst #3106 = VMREAD32rm
+ { 3107, 2, 1, 0, "VMREAD32rr", 0|(1<<TID::UnmodeledSideEffects), 0x78000103ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #3107 = VMREAD32rr
+ { 3108, 6, 1, 0, "VMREAD64rm", 0|(1<<TID::UnmodeledSideEffects), 0x78000104ULL, NULL, NULL, NULL, OperandInfo19 }, // Inst #3108 = VMREAD64rm
+ { 3109, 2, 1, 0, "VMREAD64rr", 0|(1<<TID::UnmodeledSideEffects), 0x78000103ULL, NULL, NULL, NULL, OperandInfo60 }, // Inst #3109 = VMREAD64rr
+ { 3110, 0, 0, 0, "VMRESUME", 0|(1<<TID::UnmodeledSideEffects), 0x1000123ULL, NULL, NULL, NULL, 0 }, // Inst #3110 = VMRESUME
+ { 3111, 7, 1, 0, "VMULPDYrm", 0|(1<<TID::MayLoad), 0x559800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3111 = VMULPDYrm
+ { 3112, 3, 1, 0, "VMULPDYrr", 0|(1<<TID::Commutable), 0x559800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3112 = VMULPDYrr
+ { 3113, 7, 1, 0, "VMULPDrm", 0|(1<<TID::MayLoad), 0x559800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3113 = VMULPDrm
+ { 3114, 3, 1, 0, "VMULPDrr", 0|(1<<TID::Commutable), 0x559800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3114 = VMULPDrr
+ { 3115, 7, 1, 0, "VMULPSYrm", 0|(1<<TID::MayLoad), 0x559400106ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3115 = VMULPSYrm
+ { 3116, 3, 1, 0, "VMULPSYrr", 0|(1<<TID::Commutable), 0x559400105ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3116 = VMULPSYrr
+ { 3117, 7, 1, 0, "VMULPSrm", 0|(1<<TID::MayLoad), 0x559400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3117 = VMULPSrm
+ { 3118, 3, 1, 0, "VMULPSrr", 0|(1<<TID::Commutable), 0x559400105ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3118 = VMULPSrr
+ { 3119, 7, 1, 0, "VMULSDrm", 0|(1<<TID::MayLoad), 0x559000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #3119 = VMULSDrm
+ { 3120, 7, 1, 0, "VMULSDrm_Int", 0|(1<<TID::MayLoad), 0x559000b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3120 = VMULSDrm_Int
+ { 3121, 3, 1, 0, "VMULSDrr", 0|(1<<TID::Commutable), 0x559000b05ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #3121 = VMULSDrr
+ { 3122, 3, 1, 0, "VMULSDrr_Int", 0, 0x559000b05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3122 = VMULSDrr_Int
+ { 3123, 7, 1, 0, "VMULSSrm", 0|(1<<TID::MayLoad), 0x559000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #3123 = VMULSSrm
+ { 3124, 7, 1, 0, "VMULSSrm_Int", 0|(1<<TID::MayLoad), 0x559000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3124 = VMULSSrm_Int
+ { 3125, 3, 1, 0, "VMULSSrr", 0|(1<<TID::Commutable), 0x559000c05ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #3125 = VMULSSrr
+ { 3126, 3, 1, 0, "VMULSSrr_Int", 0, 0x559000c05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3126 = VMULSSrr_Int
+ { 3127, 6, 1, 0, "VMWRITE32rm", 0|(1<<TID::UnmodeledSideEffects), 0x79000106ULL, NULL, NULL, NULL, OperandInfo57 }, // Inst #3127 = VMWRITE32rm
+ { 3128, 2, 1, 0, "VMWRITE32rr", 0|(1<<TID::UnmodeledSideEffects), 0x79000105ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #3128 = VMWRITE32rr
+ { 3129, 6, 1, 0, "VMWRITE64rm", 0|(1<<TID::UnmodeledSideEffects), 0x79000106ULL, NULL, NULL, NULL, OperandInfo59 }, // Inst #3129 = VMWRITE64rm
+ { 3130, 2, 1, 0, "VMWRITE64rr", 0|(1<<TID::UnmodeledSideEffects), 0x79000105ULL, NULL, NULL, NULL, OperandInfo60 }, // Inst #3130 = VMWRITE64rr
+ { 3131, 0, 0, 0, "VMXOFF", 0|(1<<TID::UnmodeledSideEffects), 0x1000124ULL, NULL, NULL, NULL, 0 }, // Inst #3131 = VMXOFF
+ { 3132, 5, 0, 0, "VMXON", 0|(1<<TID::UnmodeledSideEffects), 0xc7000c1eULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #3132 = VMXON
+ { 3133, 7, 1, 0, "VORPDYrm", 0|(1<<TID::UnmodeledSideEffects), 0x556800046ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3133 = VORPDYrm
+ { 3134, 3, 1, 0, "VORPDYrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x556800045ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3134 = VORPDYrr
+ { 3135, 7, 1, 0, "VORPDrm", 0|(1<<TID::UnmodeledSideEffects), 0x556800046ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3135 = VORPDrm
+ { 3136, 3, 1, 0, "VORPDrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x556800045ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3136 = VORPDrr
+ { 3137, 7, 1, 0, "VORPSYrm", 0|(1<<TID::UnmodeledSideEffects), 0x556400006ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3137 = VORPSYrm
+ { 3138, 3, 1, 0, "VORPSYrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x556400005ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3138 = VORPSYrr
+ { 3139, 7, 1, 0, "VORPSrm", 0|(1<<TID::UnmodeledSideEffects), 0x556400006ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3139 = VORPSrm
+ { 3140, 3, 1, 0, "VORPSrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x556400005ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3140 = VORPSrr
+ { 3141, 6, 1, 0, "VPABSBrm128", 0|(1<<TID::MayLoad), 0x11cc02d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3141 = VPABSBrm128
+ { 3142, 2, 1, 0, "VPABSBrr128", 0, 0x11cc02d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3142 = VPABSBrr128
+ { 3143, 6, 1, 0, "VPABSDrm128", 0|(1<<TID::MayLoad), 0x11ec02d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3143 = VPABSDrm128
+ { 3144, 2, 1, 0, "VPABSDrr128", 0, 0x11ec02d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3144 = VPABSDrr128
+ { 3145, 6, 1, 0, "VPABSWrm128", 0|(1<<TID::MayLoad), 0x11dc02d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3145 = VPABSWrm128
+ { 3146, 2, 1, 0, "VPABSWrr128", 0, 0x11dc02d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3146 = VPABSWrr128
+ { 3147, 7, 1, 0, "VPACKSSDWrm", 0|(1<<TID::MayLoad), 0x56bc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3147 = VPACKSSDWrm
+ { 3148, 3, 1, 0, "VPACKSSDWrr", 0, 0x56bc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3148 = VPACKSSDWrr
+ { 3149, 7, 1, 0, "VPACKSSWBrm", 0|(1<<TID::MayLoad), 0x563c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3149 = VPACKSSWBrm
+ { 3150, 3, 1, 0, "VPACKSSWBrr", 0, 0x563c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3150 = VPACKSSWBrr
+ { 3151, 7, 1, 0, "VPACKUSDWrm", 0|(1<<TID::MayLoad), 0x52bc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3151 = VPACKUSDWrm
+ { 3152, 3, 1, 0, "VPACKUSDWrr", 0, 0x52bc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3152 = VPACKUSDWrr
+ { 3153, 7, 1, 0, "VPACKUSWBrm", 0|(1<<TID::MayLoad), 0x567c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3153 = VPACKUSWBrm
+ { 3154, 3, 1, 0, "VPACKUSWBrr", 0, 0x567c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3154 = VPACKUSWBrr
+ { 3155, 7, 1, 0, "VPADDBrm", 0|(1<<TID::MayLoad), 0x5fcc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3155 = VPADDBrm
+ { 3156, 3, 1, 0, "VPADDBrr", 0|(1<<TID::Commutable), 0x5fcc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3156 = VPADDBrr
+ { 3157, 7, 1, 0, "VPADDDrm", 0|(1<<TID::MayLoad), 0x5fec00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3157 = VPADDDrm
+ { 3158, 3, 1, 0, "VPADDDrr", 0|(1<<TID::Commutable), 0x5fec00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3158 = VPADDDrr
+ { 3159, 7, 1, 0, "VPADDQrm", 0|(1<<TID::MayLoad), 0x5d4c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3159 = VPADDQrm
+ { 3160, 3, 1, 0, "VPADDQrr", 0|(1<<TID::Commutable), 0x5d4c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3160 = VPADDQrr
+ { 3161, 7, 1, 0, "VPADDSBrm", 0|(1<<TID::MayLoad), 0x5ecc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3161 = VPADDSBrm
+ { 3162, 3, 1, 0, "VPADDSBrr", 0|(1<<TID::Commutable), 0x5ecc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3162 = VPADDSBrr
+ { 3163, 7, 1, 0, "VPADDSWrm", 0|(1<<TID::MayLoad), 0x5edc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3163 = VPADDSWrm
+ { 3164, 3, 1, 0, "VPADDSWrr", 0|(1<<TID::Commutable), 0x5edc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3164 = VPADDSWrr
+ { 3165, 7, 1, 0, "VPADDUSBrm", 0|(1<<TID::MayLoad), 0x5dcc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3165 = VPADDUSBrm
+ { 3166, 3, 1, 0, "VPADDUSBrr", 0|(1<<TID::Commutable), 0x5dcc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3166 = VPADDUSBrr
+ { 3167, 7, 1, 0, "VPADDUSWrm", 0|(1<<TID::MayLoad), 0x5ddc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3167 = VPADDUSWrm
+ { 3168, 3, 1, 0, "VPADDUSWrr", 0|(1<<TID::Commutable), 0x5ddc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3168 = VPADDUSWrr
+ { 3169, 7, 1, 0, "VPADDWrm", 0|(1<<TID::MayLoad), 0x5fdc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3169 = VPADDWrm
+ { 3170, 3, 1, 0, "VPADDWrr", 0|(1<<TID::Commutable), 0x5fdc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3170 = VPADDWrr
+ { 3171, 8, 1, 0, "VPALIGNR128rm", 0|(1<<TID::UnmodeledSideEffects), 0x50fc02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3171 = VPALIGNR128rm
+ { 3172, 4, 1, 0, "VPALIGNR128rr", 0|(1<<TID::UnmodeledSideEffects), 0x50fc02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3172 = VPALIGNR128rr
+ { 3173, 7, 1, 0, "VPANDNrm", 0|(1<<TID::MayLoad), 0x5dfc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3173 = VPANDNrm
+ { 3174, 3, 1, 0, "VPANDNrr", 0, 0x5dfc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3174 = VPANDNrr
+ { 3175, 7, 1, 0, "VPANDrm", 0|(1<<TID::MayLoad), 0x5dbc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3175 = VPANDrm
+ { 3176, 3, 1, 0, "VPANDrr", 0|(1<<TID::Commutable), 0x5dbc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3176 = VPANDrr
+ { 3177, 7, 1, 0, "VPAVGBrm", 0|(1<<TID::MayLoad), 0x5e0c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3177 = VPAVGBrm
+ { 3178, 3, 1, 0, "VPAVGBrr", 0|(1<<TID::Commutable), 0x5e0c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3178 = VPAVGBrr
+ { 3179, 7, 1, 0, "VPAVGWrm", 0|(1<<TID::MayLoad), 0x5e3c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3179 = VPAVGWrm
+ { 3180, 3, 1, 0, "VPAVGWrr", 0|(1<<TID::Commutable), 0x5e3c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3180 = VPAVGWrr
+ { 3181, 8, 1, 0, "VPBLENDVBrm", 0|(1<<TID::MayLoad), 0xd4cc00e46ULL, NULL, NULL, NULL, OperandInfo235 }, // Inst #3181 = VPBLENDVBrm
+ { 3182, 4, 1, 0, "VPBLENDVBrr", 0, 0xd4cc00e45ULL, NULL, NULL, NULL, OperandInfo236 }, // Inst #3182 = VPBLENDVBrr
+ { 3183, 8, 1, 0, "VPBLENDWrmi", 0|(1<<TID::MayLoad), 0x50ec02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3183 = VPBLENDWrmi
+ { 3184, 4, 1, 0, "VPBLENDWrri", 0, 0x50ec02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3184 = VPBLENDWrri
+ { 3185, 7, 1, 0, "VPCLMULHQHQDQrm", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3185 = VPCLMULHQHQDQrm
+ { 3186, 3, 1, 0, "VPCLMULHQHQDQrr", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3186 = VPCLMULHQHQDQrr
+ { 3187, 7, 1, 0, "VPCLMULHQLQDQrm", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3187 = VPCLMULHQLQDQrm
+ { 3188, 3, 1, 0, "VPCLMULHQLQDQrr", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3188 = VPCLMULHQLQDQrr
+ { 3189, 7, 1, 0, "VPCLMULLQHQDQrm", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3189 = VPCLMULLQHQDQrm
+ { 3190, 3, 1, 0, "VPCLMULLQHQDQrr", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3190 = VPCLMULLQHQDQrr
+ { 3191, 7, 1, 0, "VPCLMULLQLQDQrm", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3191 = VPCLMULLQLQDQrm
+ { 3192, 3, 1, 0, "VPCLMULLQLQDQrr", 0|(1<<TID::UnmodeledSideEffects), 0x0ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3192 = VPCLMULLQLQDQrr
+ { 3193, 8, 1, 0, "VPCLMULQDQrm", 0|(1<<TID::UnmodeledSideEffects), 0x544c02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3193 = VPCLMULQDQrm
+ { 3194, 4, 1, 0, "VPCLMULQDQrr", 0|(1<<TID::UnmodeledSideEffects), 0x544c02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3194 = VPCLMULQDQrr
+ { 3195, 7, 1, 0, "VPCMPEQBrm", 0|(1<<TID::MayLoad), 0x574c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3195 = VPCMPEQBrm
+ { 3196, 3, 1, 0, "VPCMPEQBrr", 0|(1<<TID::Commutable), 0x574c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3196 = VPCMPEQBrr
+ { 3197, 7, 1, 0, "VPCMPEQDrm", 0|(1<<TID::MayLoad), 0x576c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3197 = VPCMPEQDrm
+ { 3198, 3, 1, 0, "VPCMPEQDrr", 0|(1<<TID::Commutable), 0x576c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3198 = VPCMPEQDrr
+ { 3199, 7, 1, 0, "VPCMPEQQrm", 0|(1<<TID::MayLoad), 0x529c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3199 = VPCMPEQQrm
+ { 3200, 3, 1, 0, "VPCMPEQQrr", 0|(1<<TID::Commutable), 0x529c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3200 = VPCMPEQQrr
+ { 3201, 7, 1, 0, "VPCMPEQWrm", 0|(1<<TID::MayLoad), 0x575c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3201 = VPCMPEQWrm
+ { 3202, 3, 1, 0, "VPCMPEQWrr", 0|(1<<TID::Commutable), 0x575c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3202 = VPCMPEQWrr
+ { 3203, 7, 0, 0, "VPCMPESTRIArm", 0|(1<<TID::MayLoad), 0x161c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3203 = VPCMPESTRIArm
+ { 3204, 3, 0, 0, "VPCMPESTRIArr", 0, 0x161c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3204 = VPCMPESTRIArr
+ { 3205, 7, 0, 0, "VPCMPESTRICrm", 0|(1<<TID::MayLoad), 0x161c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3205 = VPCMPESTRICrm
+ { 3206, 3, 0, 0, "VPCMPESTRICrr", 0, 0x161c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3206 = VPCMPESTRICrr
+ { 3207, 7, 0, 0, "VPCMPESTRIOrm", 0|(1<<TID::MayLoad), 0x161c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3207 = VPCMPESTRIOrm
+ { 3208, 3, 0, 0, "VPCMPESTRIOrr", 0, 0x161c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3208 = VPCMPESTRIOrr
+ { 3209, 7, 0, 0, "VPCMPESTRISrm", 0|(1<<TID::MayLoad), 0x161c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3209 = VPCMPESTRISrm
+ { 3210, 3, 0, 0, "VPCMPESTRISrr", 0, 0x161c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3210 = VPCMPESTRISrr
+ { 3211, 7, 0, 0, "VPCMPESTRIZrm", 0|(1<<TID::MayLoad), 0x161c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3211 = VPCMPESTRIZrm
+ { 3212, 3, 0, 0, "VPCMPESTRIZrr", 0, 0x161c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3212 = VPCMPESTRIZrr
+ { 3213, 7, 0, 0, "VPCMPESTRIrm", 0|(1<<TID::MayLoad), 0x161c02e46ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3213 = VPCMPESTRIrm
+ { 3214, 3, 0, 0, "VPCMPESTRIrr", 0, 0x161c02e45ULL, ImplicitList14, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3214 = VPCMPESTRIrr
+ { 3215, 8, 1, 0, "VPCMPESTRM128MEM", 0|(1<<TID::MayLoad)|(1<<TID::UsesCustomInserter), 0x2000ULL, ImplicitList14, ImplicitList1, Barriers1, OperandInfo136 }, // Inst #3215 = VPCMPESTRM128MEM
+ { 3216, 4, 1, 0, "VPCMPESTRM128REG", 0|(1<<TID::UsesCustomInserter), 0x2000ULL, ImplicitList14, ImplicitList1, Barriers1, OperandInfo80 }, // Inst #3216 = VPCMPESTRM128REG
+ { 3217, 7, 0, 0, "VPCMPESTRM128rm", 0|(1<<TID::UnmodeledSideEffects), 0x160c02e46ULL, ImplicitList14, ImplicitList43, Barriers1, OperandInfo44 }, // Inst #3217 = VPCMPESTRM128rm
+ { 3218, 3, 0, 0, "VPCMPESTRM128rr", 0|(1<<TID::UnmodeledSideEffects), 0x160c02e45ULL, ImplicitList14, ImplicitList43, Barriers1, OperandInfo45 }, // Inst #3218 = VPCMPESTRM128rr
+ { 3219, 7, 1, 0, "VPCMPGTBrm", 0|(1<<TID::MayLoad), 0x564c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3219 = VPCMPGTBrm
+ { 3220, 3, 1, 0, "VPCMPGTBrr", 0, 0x564c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3220 = VPCMPGTBrr
+ { 3221, 7, 1, 0, "VPCMPGTDrm", 0|(1<<TID::MayLoad), 0x566c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3221 = VPCMPGTDrm
+ { 3222, 3, 1, 0, "VPCMPGTDrr", 0, 0x566c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3222 = VPCMPGTDrr
+ { 3223, 7, 1, 0, "VPCMPGTQrm", 0|(1<<TID::MayLoad), 0x537c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3223 = VPCMPGTQrm
+ { 3224, 3, 1, 0, "VPCMPGTQrr", 0, 0x537c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3224 = VPCMPGTQrr
+ { 3225, 7, 1, 0, "VPCMPGTWrm", 0|(1<<TID::MayLoad), 0x565c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3225 = VPCMPGTWrm
+ { 3226, 3, 1, 0, "VPCMPGTWrr", 0, 0x565c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3226 = VPCMPGTWrr
+ { 3227, 7, 0, 0, "VPCMPISTRIArm", 0|(1<<TID::MayLoad), 0x163c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3227 = VPCMPISTRIArm
+ { 3228, 3, 0, 0, "VPCMPISTRIArr", 0, 0x163c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3228 = VPCMPISTRIArr
+ { 3229, 7, 0, 0, "VPCMPISTRICrm", 0|(1<<TID::MayLoad), 0x163c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3229 = VPCMPISTRICrm
+ { 3230, 3, 0, 0, "VPCMPISTRICrr", 0, 0x163c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3230 = VPCMPISTRICrr
+ { 3231, 7, 0, 0, "VPCMPISTRIOrm", 0|(1<<TID::MayLoad), 0x163c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3231 = VPCMPISTRIOrm
+ { 3232, 3, 0, 0, "VPCMPISTRIOrr", 0, 0x163c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3232 = VPCMPISTRIOrr
+ { 3233, 7, 0, 0, "VPCMPISTRISrm", 0|(1<<TID::MayLoad), 0x163c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3233 = VPCMPISTRISrm
+ { 3234, 3, 0, 0, "VPCMPISTRISrr", 0, 0x163c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3234 = VPCMPISTRISrr
+ { 3235, 7, 0, 0, "VPCMPISTRIZrm", 0|(1<<TID::MayLoad), 0x163c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3235 = VPCMPISTRIZrm
+ { 3236, 3, 0, 0, "VPCMPISTRIZrr", 0, 0x163c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3236 = VPCMPISTRIZrr
+ { 3237, 7, 0, 0, "VPCMPISTRIrm", 0|(1<<TID::MayLoad), 0x163c02e46ULL, NULL, ImplicitList42, Barriers1, OperandInfo44 }, // Inst #3237 = VPCMPISTRIrm
+ { 3238, 3, 0, 0, "VPCMPISTRIrr", 0, 0x163c02e45ULL, NULL, ImplicitList42, Barriers1, OperandInfo45 }, // Inst #3238 = VPCMPISTRIrr
+ { 3239, 8, 1, 0, "VPCMPISTRM128MEM", 0|(1<<TID::MayLoad)|(1<<TID::UsesCustomInserter), 0x2000ULL, NULL, ImplicitList1, Barriers1, OperandInfo136 }, // Inst #3239 = VPCMPISTRM128MEM
+ { 3240, 4, 1, 0, "VPCMPISTRM128REG", 0|(1<<TID::UsesCustomInserter), 0x2000ULL, NULL, ImplicitList1, Barriers1, OperandInfo80 }, // Inst #3240 = VPCMPISTRM128REG
+ { 3241, 7, 0, 0, "VPCMPISTRM128rm", 0|(1<<TID::UnmodeledSideEffects), 0x162c02e46ULL, NULL, ImplicitList43, Barriers1, OperandInfo44 }, // Inst #3241 = VPCMPISTRM128rm
+ { 3242, 3, 0, 0, "VPCMPISTRM128rr", 0|(1<<TID::UnmodeledSideEffects), 0x162c02e45ULL, NULL, ImplicitList43, Barriers1, OperandInfo45 }, // Inst #3242 = VPCMPISTRM128rr
+ { 3243, 8, 1, 0, "VPERM2F128rm", 0|(1<<TID::UnmodeledSideEffects), 0x506c02e46ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #3243 = VPERM2F128rm
+ { 3244, 4, 1, 0, "VPERM2F128rr", 0|(1<<TID::UnmodeledSideEffects), 0x506c02e45ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #3244 = VPERM2F128rr
+ { 3245, 7, 1, 0, "VPERMILPDYmi", 0|(1<<TID::MayLoad), 0x105c02e46ULL, NULL, NULL, NULL, OperandInfo261 }, // Inst #3245 = VPERMILPDYmi
+ { 3246, 3, 1, 0, "VPERMILPDYri", 0, 0x105c02e45ULL, NULL, NULL, NULL, OperandInfo262 }, // Inst #3246 = VPERMILPDYri
+ { 3247, 7, 1, 0, "VPERMILPDYrm", 0|(1<<TID::MayLoad), 0x50dc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3247 = VPERMILPDYrm
+ { 3248, 3, 1, 0, "VPERMILPDYrr", 0, 0x50dc00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3248 = VPERMILPDYrr
+ { 3249, 7, 1, 0, "VPERMILPDmi", 0|(1<<TID::MayLoad), 0x105c02e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #3249 = VPERMILPDmi
+ { 3250, 3, 1, 0, "VPERMILPDri", 0, 0x105c02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3250 = VPERMILPDri
+ { 3251, 7, 1, 0, "VPERMILPDrm", 0|(1<<TID::MayLoad), 0x50dc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3251 = VPERMILPDrm
+ { 3252, 3, 1, 0, "VPERMILPDrr", 0, 0x50dc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3252 = VPERMILPDrr
+ { 3253, 7, 1, 0, "VPERMILPSYmi", 0|(1<<TID::MayLoad), 0x104c02e46ULL, NULL, NULL, NULL, OperandInfo261 }, // Inst #3253 = VPERMILPSYmi
+ { 3254, 3, 1, 0, "VPERMILPSYri", 0, 0x104c02e45ULL, NULL, NULL, NULL, OperandInfo262 }, // Inst #3254 = VPERMILPSYri
+ { 3255, 7, 1, 0, "VPERMILPSYrm", 0|(1<<TID::MayLoad), 0x50cc00d46ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3255 = VPERMILPSYrm
+ { 3256, 3, 1, 0, "VPERMILPSYrr", 0, 0x50cc00d45ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3256 = VPERMILPSYrr
+ { 3257, 7, 1, 0, "VPERMILPSmi", 0|(1<<TID::MayLoad), 0x104c02e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #3257 = VPERMILPSmi
+ { 3258, 3, 1, 0, "VPERMILPSri", 0, 0x104c02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3258 = VPERMILPSri
+ { 3259, 7, 1, 0, "VPERMILPSrm", 0|(1<<TID::MayLoad), 0x50cc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3259 = VPERMILPSrm
+ { 3260, 3, 1, 0, "VPERMILPSrr", 0, 0x50cc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3260 = VPERMILPSrr
+ { 3261, 7, 0, 0, "VPEXTRBmr", 0|(1<<TID::UnmodeledSideEffects), 0x114c02e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #3261 = VPEXTRBmr
+ { 3262, 3, 1, 0, "VPEXTRBrr", 0, 0x114c02e43ULL, NULL, NULL, NULL, OperandInfo109 }, // Inst #3262 = VPEXTRBrr
+ { 3263, 3, 1, 0, "VPEXTRBrr64", 0|(1<<TID::UnmodeledSideEffects), 0x114c02e43ULL, NULL, NULL, NULL, OperandInfo206 }, // Inst #3263 = VPEXTRBrr64
+ { 3264, 7, 0, 0, "VPEXTRDmr", 0|(1<<TID::MayStore), 0x116c02e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #3264 = VPEXTRDmr
+ { 3265, 3, 1, 0, "VPEXTRDrr", 0, 0x116c02e43ULL, NULL, NULL, NULL, OperandInfo109 }, // Inst #3265 = VPEXTRDrr
+ { 3266, 7, 0, 0, "VPEXTRQmr", 0|(1<<TID::MayStore), 0x316c03e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #3266 = VPEXTRQmr
+ { 3267, 3, 1, 0, "VPEXTRQrr", 0, 0x316c03e43ULL, NULL, NULL, NULL, OperandInfo206 }, // Inst #3267 = VPEXTRQrr
+ { 3268, 7, 0, 0, "VPEXTRWmr", 0|(1<<TID::UnmodeledSideEffects), 0x115c02e44ULL, NULL, NULL, NULL, OperandInfo108 }, // Inst #3268 = VPEXTRWmr
+ { 3269, 3, 1, 0, "VPEXTRWri", 0, 0x1c5c02045ULL, NULL, NULL, NULL, OperandInfo109 }, // Inst #3269 = VPEXTRWri
+ { 3270, 7, 1, 0, "VPHADDDrm128", 0|(1<<TID::MayLoad), 0x502c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3270 = VPHADDDrm128
+ { 3271, 3, 1, 0, "VPHADDDrr128", 0, 0x502c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3271 = VPHADDDrr128
+ { 3272, 7, 1, 0, "VPHADDSWrm128", 0|(1<<TID::MayLoad), 0x503c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3272 = VPHADDSWrm128
+ { 3273, 3, 1, 0, "VPHADDSWrr128", 0, 0x503c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3273 = VPHADDSWrr128
+ { 3274, 7, 1, 0, "VPHADDWrm128", 0|(1<<TID::MayLoad), 0x501c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3274 = VPHADDWrm128
+ { 3275, 3, 1, 0, "VPHADDWrr128", 0, 0x501c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3275 = VPHADDWrr128
+ { 3276, 6, 1, 0, "VPHMINPOSUWrm128", 0|(1<<TID::MayLoad), 0x141c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3276 = VPHMINPOSUWrm128
+ { 3277, 2, 1, 0, "VPHMINPOSUWrr128", 0, 0x141c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3277 = VPHMINPOSUWrr128
+ { 3278, 7, 1, 0, "VPHSUBDrm128", 0|(1<<TID::MayLoad), 0x506c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3278 = VPHSUBDrm128
+ { 3279, 3, 1, 0, "VPHSUBDrr128", 0, 0x506c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3279 = VPHSUBDrr128
+ { 3280, 7, 1, 0, "VPHSUBSWrm128", 0|(1<<TID::MayLoad), 0x507c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3280 = VPHSUBSWrm128
+ { 3281, 3, 1, 0, "VPHSUBSWrr128", 0, 0x507c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3281 = VPHSUBSWrr128
+ { 3282, 7, 1, 0, "VPHSUBWrm128", 0|(1<<TID::MayLoad), 0x505c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3282 = VPHSUBWrm128
+ { 3283, 3, 1, 0, "VPHSUBWrr128", 0, 0x505c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3283 = VPHSUBWrr128
+ { 3284, 8, 1, 0, "VPINSRBrm", 0|(1<<TID::MayLoad), 0x520c02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3284 = VPINSRBrm
+ { 3285, 4, 1, 0, "VPINSRBrr", 0, 0x520c02e45ULL, NULL, NULL, NULL, OperandInfo263 }, // Inst #3285 = VPINSRBrr
+ { 3286, 8, 1, 0, "VPINSRDrm", 0|(1<<TID::MayLoad), 0x522c02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3286 = VPINSRDrm
+ { 3287, 4, 1, 0, "VPINSRDrr", 0, 0x522c02e45ULL, NULL, NULL, NULL, OperandInfo263 }, // Inst #3287 = VPINSRDrr
+ { 3288, 8, 1, 0, "VPINSRQrm", 0|(1<<TID::MayLoad), 0x722c02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3288 = VPINSRQrm
+ { 3289, 4, 1, 0, "VPINSRQrr", 0, 0x722c02e45ULL, NULL, NULL, NULL, OperandInfo264 }, // Inst #3289 = VPINSRQrr
+ { 3290, 8, 1, 0, "VPINSRWrmi", 0|(1<<TID::MayLoad), 0x5c4c02046ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3290 = VPINSRWrmi
+ { 3291, 4, 1, 0, "VPINSRWrr64i", 0|(1<<TID::UnmodeledSideEffects), 0x5c4c02045ULL, NULL, NULL, NULL, OperandInfo264 }, // Inst #3291 = VPINSRWrr64i
+ { 3292, 4, 1, 0, "VPINSRWrri", 0, 0x5c4c02045ULL, NULL, NULL, NULL, OperandInfo263 }, // Inst #3292 = VPINSRWrri
+ { 3293, 7, 1, 0, "VPMADDUBSWrm128", 0|(1<<TID::MayLoad), 0x504c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3293 = VPMADDUBSWrm128
+ { 3294, 3, 1, 0, "VPMADDUBSWrr128", 0, 0x504c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3294 = VPMADDUBSWrr128
+ { 3295, 7, 1, 0, "VPMADDWDrm", 0|(1<<TID::MayLoad), 0x5f5c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3295 = VPMADDWDrm
+ { 3296, 3, 1, 0, "VPMADDWDrr", 0|(1<<TID::Commutable), 0x5f5c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3296 = VPMADDWDrr
+ { 3297, 7, 1, 0, "VPMAXSBrm", 0|(1<<TID::MayLoad), 0x53cc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3297 = VPMAXSBrm
+ { 3298, 3, 1, 0, "VPMAXSBrr", 0|(1<<TID::Commutable), 0x53cc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3298 = VPMAXSBrr
+ { 3299, 7, 1, 0, "VPMAXSDrm", 0|(1<<TID::MayLoad), 0x53dc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3299 = VPMAXSDrm
+ { 3300, 3, 1, 0, "VPMAXSDrr", 0|(1<<TID::Commutable), 0x53dc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3300 = VPMAXSDrr
+ { 3301, 7, 1, 0, "VPMAXSWrm", 0|(1<<TID::MayLoad), 0x5eec00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3301 = VPMAXSWrm
+ { 3302, 3, 1, 0, "VPMAXSWrr", 0|(1<<TID::Commutable), 0x5eec00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3302 = VPMAXSWrr
+ { 3303, 7, 1, 0, "VPMAXUBrm", 0|(1<<TID::MayLoad), 0x5dec00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3303 = VPMAXUBrm
+ { 3304, 3, 1, 0, "VPMAXUBrr", 0|(1<<TID::Commutable), 0x5dec00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3304 = VPMAXUBrr
+ { 3305, 7, 1, 0, "VPMAXUDrm", 0|(1<<TID::MayLoad), 0x53fc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3305 = VPMAXUDrm
+ { 3306, 3, 1, 0, "VPMAXUDrr", 0|(1<<TID::Commutable), 0x53fc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3306 = VPMAXUDrr
+ { 3307, 7, 1, 0, "VPMAXUWrm", 0|(1<<TID::MayLoad), 0x53ec00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3307 = VPMAXUWrm
+ { 3308, 3, 1, 0, "VPMAXUWrr", 0|(1<<TID::Commutable), 0x53ec00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3308 = VPMAXUWrr
+ { 3309, 7, 1, 0, "VPMINSBrm", 0|(1<<TID::MayLoad), 0x538c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3309 = VPMINSBrm
+ { 3310, 3, 1, 0, "VPMINSBrr", 0|(1<<TID::Commutable), 0x538c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3310 = VPMINSBrr
+ { 3311, 7, 1, 0, "VPMINSDrm", 0|(1<<TID::MayLoad), 0x539c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3311 = VPMINSDrm
+ { 3312, 3, 1, 0, "VPMINSDrr", 0|(1<<TID::Commutable), 0x539c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3312 = VPMINSDrr
+ { 3313, 7, 1, 0, "VPMINSWrm", 0|(1<<TID::MayLoad), 0x5eac00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3313 = VPMINSWrm
+ { 3314, 3, 1, 0, "VPMINSWrr", 0|(1<<TID::Commutable), 0x5eac00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3314 = VPMINSWrr
+ { 3315, 7, 1, 0, "VPMINUBrm", 0|(1<<TID::MayLoad), 0x5dac00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3315 = VPMINUBrm
+ { 3316, 3, 1, 0, "VPMINUBrr", 0|(1<<TID::Commutable), 0x5dac00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3316 = VPMINUBrr
+ { 3317, 7, 1, 0, "VPMINUDrm", 0|(1<<TID::MayLoad), 0x53bc00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3317 = VPMINUDrm
+ { 3318, 3, 1, 0, "VPMINUDrr", 0|(1<<TID::Commutable), 0x53bc00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3318 = VPMINUDrr
+ { 3319, 7, 1, 0, "VPMINUWrm", 0|(1<<TID::MayLoad), 0x53ac00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3319 = VPMINUWrm
+ { 3320, 3, 1, 0, "VPMINUWrr", 0|(1<<TID::Commutable), 0x53ac00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3320 = VPMINUWrr
+ { 3321, 2, 1, 0, "VPMOVMSKBr64r", 0|(1<<TID::UnmodeledSideEffects), 0x1d7c00045ULL, NULL, NULL, NULL, OperandInfo91 }, // Inst #3321 = VPMOVMSKBr64r
+ { 3322, 2, 1, 0, "VPMOVMSKBrr", 0, 0x1d7c00045ULL, NULL, NULL, NULL, OperandInfo133 }, // Inst #3322 = VPMOVMSKBrr
+ { 3323, 6, 1, 0, "VPMOVSXBDrm", 0|(1<<TID::MayLoad), 0x121c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3323 = VPMOVSXBDrm
+ { 3324, 2, 1, 0, "VPMOVSXBDrr", 0, 0x121c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3324 = VPMOVSXBDrr
+ { 3325, 6, 1, 0, "VPMOVSXBQrm", 0|(1<<TID::MayLoad), 0x122c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3325 = VPMOVSXBQrm
+ { 3326, 2, 1, 0, "VPMOVSXBQrr", 0, 0x122c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3326 = VPMOVSXBQrr
+ { 3327, 6, 1, 0, "VPMOVSXBWrm", 0|(1<<TID::MayLoad), 0x120c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3327 = VPMOVSXBWrm
+ { 3328, 2, 1, 0, "VPMOVSXBWrr", 0, 0x120c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3328 = VPMOVSXBWrr
+ { 3329, 6, 1, 0, "VPMOVSXDQrm", 0|(1<<TID::MayLoad), 0x125c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3329 = VPMOVSXDQrm
+ { 3330, 2, 1, 0, "VPMOVSXDQrr", 0, 0x125c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3330 = VPMOVSXDQrr
+ { 3331, 6, 1, 0, "VPMOVSXWDrm", 0|(1<<TID::MayLoad), 0x123c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3331 = VPMOVSXWDrm
+ { 3332, 2, 1, 0, "VPMOVSXWDrr", 0, 0x123c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3332 = VPMOVSXWDrr
+ { 3333, 6, 1, 0, "VPMOVSXWQrm", 0|(1<<TID::MayLoad), 0x124c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3333 = VPMOVSXWQrm
+ { 3334, 2, 1, 0, "VPMOVSXWQrr", 0, 0x124c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3334 = VPMOVSXWQrr
+ { 3335, 6, 1, 0, "VPMOVZXBDrm", 0|(1<<TID::MayLoad), 0x131c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3335 = VPMOVZXBDrm
+ { 3336, 2, 1, 0, "VPMOVZXBDrr", 0, 0x131c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3336 = VPMOVZXBDrr
+ { 3337, 6, 1, 0, "VPMOVZXBQrm", 0|(1<<TID::MayLoad), 0x132c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3337 = VPMOVZXBQrm
+ { 3338, 2, 1, 0, "VPMOVZXBQrr", 0, 0x132c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3338 = VPMOVZXBQrr
+ { 3339, 6, 1, 0, "VPMOVZXBWrm", 0|(1<<TID::MayLoad), 0x130c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3339 = VPMOVZXBWrm
+ { 3340, 2, 1, 0, "VPMOVZXBWrr", 0, 0x130c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3340 = VPMOVZXBWrr
+ { 3341, 6, 1, 0, "VPMOVZXDQrm", 0|(1<<TID::MayLoad), 0x135c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3341 = VPMOVZXDQrm
+ { 3342, 2, 1, 0, "VPMOVZXDQrr", 0, 0x135c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3342 = VPMOVZXDQrr
+ { 3343, 6, 1, 0, "VPMOVZXWDrm", 0|(1<<TID::MayLoad), 0x133c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3343 = VPMOVZXWDrm
+ { 3344, 2, 1, 0, "VPMOVZXWDrr", 0, 0x133c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3344 = VPMOVZXWDrr
+ { 3345, 6, 1, 0, "VPMOVZXWQrm", 0|(1<<TID::MayLoad), 0x134c00d46ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3345 = VPMOVZXWQrm
+ { 3346, 2, 1, 0, "VPMOVZXWQrr", 0, 0x134c00d45ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3346 = VPMOVZXWQrr
+ { 3347, 7, 1, 0, "VPMULDQrm", 0|(1<<TID::MayLoad), 0x528c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3347 = VPMULDQrm
+ { 3348, 3, 1, 0, "VPMULDQrr", 0|(1<<TID::Commutable), 0x528c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3348 = VPMULDQrr
+ { 3349, 7, 1, 0, "VPMULHRSWrm128", 0|(1<<TID::MayLoad), 0x50bc02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3349 = VPMULHRSWrm128
+ { 3350, 3, 1, 0, "VPMULHRSWrr128", 0|(1<<TID::Commutable), 0x50bc02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3350 = VPMULHRSWrr128
+ { 3351, 7, 1, 0, "VPMULHUWrm", 0|(1<<TID::MayLoad), 0x5e4c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3351 = VPMULHUWrm
+ { 3352, 3, 1, 0, "VPMULHUWrr", 0|(1<<TID::Commutable), 0x5e4c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3352 = VPMULHUWrr
+ { 3353, 7, 1, 0, "VPMULHWrm", 0|(1<<TID::MayLoad), 0x5e5c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3353 = VPMULHWrm
+ { 3354, 3, 1, 0, "VPMULHWrr", 0|(1<<TID::Commutable), 0x5e5c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3354 = VPMULHWrr
+ { 3355, 7, 1, 0, "VPMULLDrm", 0|(1<<TID::MayLoad), 0x540c00d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3355 = VPMULLDrm
+ { 3356, 3, 1, 0, "VPMULLDrr", 0|(1<<TID::Commutable), 0x540c00d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3356 = VPMULLDrr
+ { 3357, 7, 1, 0, "VPMULLWrm", 0|(1<<TID::MayLoad), 0x5d5c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3357 = VPMULLWrm
+ { 3358, 3, 1, 0, "VPMULLWrr", 0|(1<<TID::Commutable), 0x5d5c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3358 = VPMULLWrr
+ { 3359, 7, 1, 0, "VPMULUDQrm", 0|(1<<TID::MayLoad), 0x5f4c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3359 = VPMULUDQrm
+ { 3360, 3, 1, 0, "VPMULUDQrr", 0|(1<<TID::Commutable), 0x5f4c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3360 = VPMULUDQrr
+ { 3361, 7, 1, 0, "VPORrm", 0|(1<<TID::MayLoad), 0x5ebc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3361 = VPORrm
+ { 3362, 3, 1, 0, "VPORrr", 0|(1<<TID::Commutable), 0x5ebc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3362 = VPORrr
+ { 3363, 7, 1, 0, "VPSADBWrm", 0|(1<<TID::MayLoad), 0x5f6c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3363 = VPSADBWrm
+ { 3364, 3, 1, 0, "VPSADBWrr", 0|(1<<TID::Commutable), 0x5f6c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3364 = VPSADBWrr
+ { 3365, 7, 1, 0, "VPSHUFBrm128", 0|(1<<TID::MayLoad), 0x500c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3365 = VPSHUFBrm128
+ { 3366, 3, 1, 0, "VPSHUFBrr128", 0, 0x500c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3366 = VPSHUFBrr128
+ { 3367, 7, 1, 0, "VPSHUFDmi", 0|(1<<TID::MayLoad), 0x170c02046ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #3367 = VPSHUFDmi
+ { 3368, 3, 1, 0, "VPSHUFDri", 0, 0x170c02045ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3368 = VPSHUFDri
+ { 3369, 7, 1, 0, "VPSHUFHWmi", 0|(1<<TID::MayLoad), 0x170c02c06ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #3369 = VPSHUFHWmi
+ { 3370, 3, 1, 0, "VPSHUFHWri", 0, 0x170c02c05ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3370 = VPSHUFHWri
+ { 3371, 7, 1, 0, "VPSHUFLWmi", 0|(1<<TID::MayLoad), 0x170c02b06ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #3371 = VPSHUFLWmi
+ { 3372, 3, 1, 0, "VPSHUFLWri", 0, 0x170c02b05ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3372 = VPSHUFLWri
+ { 3373, 7, 1, 0, "VPSIGNBrm128", 0|(1<<TID::MayLoad), 0x508c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3373 = VPSIGNBrm128
+ { 3374, 3, 1, 0, "VPSIGNBrr128", 0, 0x508c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3374 = VPSIGNBrr128
+ { 3375, 7, 1, 0, "VPSIGNDrm128", 0|(1<<TID::MayLoad), 0x50ac02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3375 = VPSIGNDrm128
+ { 3376, 3, 1, 0, "VPSIGNDrr128", 0, 0x50ac02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3376 = VPSIGNDrr128
+ { 3377, 7, 1, 0, "VPSIGNWrm128", 0|(1<<TID::MayLoad), 0x509c02d46ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3377 = VPSIGNWrm128
+ { 3378, 3, 1, 0, "VPSIGNWrr128", 0, 0x509c02d45ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3378 = VPSIGNWrr128
+ { 3379, 3, 1, 0, "VPSLLDQri", 0, 0x573c02157ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3379 = VPSLLDQri
+ { 3380, 3, 1, 0, "VPSLLDri", 0, 0x572c02156ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3380 = VPSLLDri
+ { 3381, 7, 1, 0, "VPSLLDrm", 0|(1<<TID::MayLoad), 0x5f2c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3381 = VPSLLDrm
+ { 3382, 3, 1, 0, "VPSLLDrr", 0, 0x5f2c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3382 = VPSLLDrr
+ { 3383, 3, 1, 0, "VPSLLQri", 0, 0x573c02156ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3383 = VPSLLQri
+ { 3384, 7, 1, 0, "VPSLLQrm", 0|(1<<TID::MayLoad), 0x5f3c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3384 = VPSLLQrm
+ { 3385, 3, 1, 0, "VPSLLQrr", 0, 0x5f3c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3385 = VPSLLQrr
+ { 3386, 3, 1, 0, "VPSLLWri", 0, 0x571c02156ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3386 = VPSLLWri
+ { 3387, 7, 1, 0, "VPSLLWrm", 0|(1<<TID::MayLoad), 0x5f1c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3387 = VPSLLWrm
+ { 3388, 3, 1, 0, "VPSLLWrr", 0, 0x5f1c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3388 = VPSLLWrr
+ { 3389, 3, 1, 0, "VPSRADri", 0, 0x572c02154ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3389 = VPSRADri
+ { 3390, 7, 1, 0, "VPSRADrm", 0|(1<<TID::MayLoad), 0x5e2c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3390 = VPSRADrm
+ { 3391, 3, 1, 0, "VPSRADrr", 0, 0x5e2c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3391 = VPSRADrr
+ { 3392, 3, 1, 0, "VPSRAWri", 0, 0x571c02154ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3392 = VPSRAWri
+ { 3393, 7, 1, 0, "VPSRAWrm", 0|(1<<TID::MayLoad), 0x5e1c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3393 = VPSRAWrm
+ { 3394, 3, 1, 0, "VPSRAWrr", 0, 0x5e1c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3394 = VPSRAWrr
+ { 3395, 3, 1, 0, "VPSRLDQri", 0, 0x573c02153ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3395 = VPSRLDQri
+ { 3396, 3, 1, 0, "VPSRLDri", 0, 0x572c02152ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3396 = VPSRLDri
+ { 3397, 7, 1, 0, "VPSRLDrm", 0|(1<<TID::MayLoad), 0x5d2c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3397 = VPSRLDrm
+ { 3398, 3, 1, 0, "VPSRLDrr", 0, 0x5d2c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3398 = VPSRLDrr
+ { 3399, 3, 1, 0, "VPSRLQri", 0, 0x573c02152ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3399 = VPSRLQri
+ { 3400, 7, 1, 0, "VPSRLQrm", 0|(1<<TID::MayLoad), 0x5d3c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3400 = VPSRLQrm
+ { 3401, 3, 1, 0, "VPSRLQrr", 0, 0x5d3c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3401 = VPSRLQrr
+ { 3402, 3, 1, 0, "VPSRLWri", 0, 0x571c02152ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3402 = VPSRLWri
+ { 3403, 7, 1, 0, "VPSRLWrm", 0|(1<<TID::MayLoad), 0x5d1c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3403 = VPSRLWrm
+ { 3404, 3, 1, 0, "VPSRLWrr", 0, 0x5d1c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3404 = VPSRLWrr
+ { 3405, 7, 1, 0, "VPSUBBrm", 0|(1<<TID::MayLoad), 0x5f8c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3405 = VPSUBBrm
+ { 3406, 3, 1, 0, "VPSUBBrr", 0, 0x5f8c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3406 = VPSUBBrr
+ { 3407, 7, 1, 0, "VPSUBDrm", 0|(1<<TID::MayLoad), 0x5fac00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3407 = VPSUBDrm
+ { 3408, 3, 1, 0, "VPSUBDrr", 0, 0x5fac00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3408 = VPSUBDrr
+ { 3409, 7, 1, 0, "VPSUBQrm", 0|(1<<TID::MayLoad), 0x5fbc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3409 = VPSUBQrm
+ { 3410, 3, 1, 0, "VPSUBQrr", 0, 0x5fbc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3410 = VPSUBQrr
+ { 3411, 7, 1, 0, "VPSUBSBrm", 0|(1<<TID::MayLoad), 0x5e8c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3411 = VPSUBSBrm
+ { 3412, 3, 1, 0, "VPSUBSBrr", 0, 0x5e8c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3412 = VPSUBSBrr
+ { 3413, 7, 1, 0, "VPSUBSWrm", 0|(1<<TID::MayLoad), 0x5e9c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3413 = VPSUBSWrm
+ { 3414, 3, 1, 0, "VPSUBSWrr", 0, 0x5e9c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3414 = VPSUBSWrr
+ { 3415, 7, 1, 0, "VPSUBUSBrm", 0|(1<<TID::MayLoad), 0x5d8c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3415 = VPSUBUSBrm
+ { 3416, 3, 1, 0, "VPSUBUSBrr", 0, 0x5d8c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3416 = VPSUBUSBrr
+ { 3417, 7, 1, 0, "VPSUBUSWrm", 0|(1<<TID::MayLoad), 0x5d9c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3417 = VPSUBUSWrm
+ { 3418, 3, 1, 0, "VPSUBUSWrr", 0, 0x5d9c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3418 = VPSUBUSWrr
+ { 3419, 7, 1, 0, "VPSUBWrm", 0|(1<<TID::MayLoad), 0x5f9c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3419 = VPSUBWrm
+ { 3420, 3, 1, 0, "VPSUBWrr", 0, 0x5f9c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3420 = VPSUBWrr
+ { 3421, 6, 0, 0, "VPTESTYrm", 0|(1<<TID::MayLoad), 0x117c00d46ULL, NULL, ImplicitList1, Barriers1, OperandInfo237 }, // Inst #3421 = VPTESTYrm
+ { 3422, 2, 0, 0, "VPTESTYrr", 0, 0x117c00d45ULL, NULL, ImplicitList1, Barriers1, OperandInfo241 }, // Inst #3422 = VPTESTYrr
+ { 3423, 6, 0, 0, "VPTESTrm", 0|(1<<TID::MayLoad), 0x117c00d46ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #3423 = VPTESTrm
+ { 3424, 2, 0, 0, "VPTESTrr", 0, 0x117c00d45ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #3424 = VPTESTrr
+ { 3425, 7, 1, 0, "VPUNPCKHBWrm", 0|(1<<TID::MayLoad), 0x568c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3425 = VPUNPCKHBWrm
+ { 3426, 3, 1, 0, "VPUNPCKHBWrr", 0, 0x568c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3426 = VPUNPCKHBWrr
+ { 3427, 7, 1, 0, "VPUNPCKHDQrm", 0|(1<<TID::MayLoad), 0x56ac00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3427 = VPUNPCKHDQrm
+ { 3428, 3, 1, 0, "VPUNPCKHDQrr", 0, 0x56ac00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3428 = VPUNPCKHDQrr
+ { 3429, 7, 1, 0, "VPUNPCKHQDQrm", 0|(1<<TID::MayLoad), 0x56dc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3429 = VPUNPCKHQDQrm
+ { 3430, 3, 1, 0, "VPUNPCKHQDQrr", 0, 0x56dc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3430 = VPUNPCKHQDQrr
+ { 3431, 7, 1, 0, "VPUNPCKHWDrm", 0|(1<<TID::MayLoad), 0x569c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3431 = VPUNPCKHWDrm
+ { 3432, 3, 1, 0, "VPUNPCKHWDrr", 0, 0x569c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3432 = VPUNPCKHWDrr
+ { 3433, 7, 1, 0, "VPUNPCKLBWrm", 0|(1<<TID::MayLoad), 0x560c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3433 = VPUNPCKLBWrm
+ { 3434, 3, 1, 0, "VPUNPCKLBWrr", 0, 0x560c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3434 = VPUNPCKLBWrr
+ { 3435, 7, 1, 0, "VPUNPCKLDQrm", 0|(1<<TID::MayLoad), 0x562c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3435 = VPUNPCKLDQrm
+ { 3436, 3, 1, 0, "VPUNPCKLDQrr", 0, 0x562c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3436 = VPUNPCKLDQrr
+ { 3437, 7, 1, 0, "VPUNPCKLQDQrm", 0|(1<<TID::MayLoad), 0x56cc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3437 = VPUNPCKLQDQrm
+ { 3438, 3, 1, 0, "VPUNPCKLQDQrr", 0, 0x56cc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3438 = VPUNPCKLQDQrr
+ { 3439, 7, 1, 0, "VPUNPCKLWDrm", 0|(1<<TID::MayLoad), 0x561c00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3439 = VPUNPCKLWDrm
+ { 3440, 3, 1, 0, "VPUNPCKLWDrr", 0, 0x561c00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3440 = VPUNPCKLWDrr
+ { 3441, 7, 1, 0, "VPXORrm", 0|(1<<TID::MayLoad), 0x5efc00146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3441 = VPXORrm
+ { 3442, 3, 1, 0, "VPXORrr", 0|(1<<TID::Commutable), 0x5efc00145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3442 = VPXORrr
+ { 3443, 6, 1, 0, "VRCPPSYm", 0|(1<<TID::MayLoad), 0x153400106ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3443 = VRCPPSYm
+ { 3444, 6, 1, 0, "VRCPPSYm_Int", 0|(1<<TID::MayLoad), 0x153400106ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3444 = VRCPPSYm_Int
+ { 3445, 2, 1, 0, "VRCPPSYr", 0, 0x153400105ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3445 = VRCPPSYr
+ { 3446, 2, 1, 0, "VRCPPSYr_Int", 0, 0x153400105ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3446 = VRCPPSYr_Int
+ { 3447, 6, 1, 0, "VRCPPSm", 0|(1<<TID::MayLoad), 0x153400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3447 = VRCPPSm
+ { 3448, 6, 1, 0, "VRCPPSm_Int", 0|(1<<TID::MayLoad), 0x153400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3448 = VRCPPSm_Int
+ { 3449, 2, 1, 0, "VRCPPSr", 0, 0x153400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3449 = VRCPPSr
+ { 3450, 2, 1, 0, "VRCPPSr_Int", 0, 0x153400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3450 = VRCPPSr_Int
+ { 3451, 7, 1, 0, "VRCPSSm", 0|(1<<TID::UnmodeledSideEffects), 0x553000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #3451 = VRCPSSm
+ { 3452, 6, 1, 0, "VRCPSSm_Int", 0|(1<<TID::MayLoad), 0x553000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3452 = VRCPSSm_Int
+ { 3453, 3, 1, 0, "VRCPSSr", 0|(1<<TID::UnmodeledSideEffects), 0x553000c05ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #3453 = VRCPSSr
+ { 3454, 2, 1, 0, "VRCPSSr_Int", 0, 0x553000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3454 = VRCPSSr_Int
+ { 3455, 7, 1, 0, "VROUNDPDm", 0|(1<<TID::UnmodeledSideEffects), 0x109c02e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #3455 = VROUNDPDm
+ { 3456, 7, 1, 0, "VROUNDPDm_Int", 0|(1<<TID::MayLoad), 0x109c02e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #3456 = VROUNDPDm_Int
+ { 3457, 3, 1, 0, "VROUNDPDr", 0|(1<<TID::UnmodeledSideEffects), 0x109c02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3457 = VROUNDPDr
+ { 3458, 3, 1, 0, "VROUNDPDr_Int", 0, 0x109c02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3458 = VROUNDPDr_Int
+ { 3459, 7, 1, 0, "VROUNDPSm", 0|(1<<TID::UnmodeledSideEffects), 0x108002e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #3459 = VROUNDPSm
+ { 3460, 7, 1, 0, "VROUNDPSm_Int", 0|(1<<TID::MayLoad), 0x108002e46ULL, NULL, NULL, NULL, OperandInfo44 }, // Inst #3460 = VROUNDPSm_Int
+ { 3461, 3, 1, 0, "VROUNDPSr", 0|(1<<TID::UnmodeledSideEffects), 0x108c02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3461 = VROUNDPSr
+ { 3462, 3, 1, 0, "VROUNDPSr_Int", 0, 0x108c02e45ULL, NULL, NULL, NULL, OperandInfo45 }, // Inst #3462 = VROUNDPSr_Int
+ { 3463, 8, 1, 0, "VROUNDSDm", 0|(1<<TID::UnmodeledSideEffects), 0x50bc02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3463 = VROUNDSDm
+ { 3464, 8, 1, 0, "VROUNDSDm_Int", 0|(1<<TID::MayLoad), 0x50bc02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3464 = VROUNDSDm_Int
+ { 3465, 4, 1, 0, "VROUNDSDr", 0|(1<<TID::UnmodeledSideEffects), 0x50bc02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3465 = VROUNDSDr
+ { 3466, 4, 1, 0, "VROUNDSDr_Int", 0, 0x50bc02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3466 = VROUNDSDr_Int
+ { 3467, 8, 1, 0, "VROUNDSSm", 0|(1<<TID::UnmodeledSideEffects), 0x50ac02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3467 = VROUNDSSm
+ { 3468, 8, 1, 0, "VROUNDSSm_Int", 0|(1<<TID::MayLoad), 0x50ac02e46ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3468 = VROUNDSSm_Int
+ { 3469, 4, 1, 0, "VROUNDSSr", 0|(1<<TID::UnmodeledSideEffects), 0x50ac02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3469 = VROUNDSSr
+ { 3470, 4, 1, 0, "VROUNDSSr_Int", 0, 0x50ac02e45ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3470 = VROUNDSSr_Int
+ { 3471, 7, 1, 0, "VROUNDYPDm", 0|(1<<TID::UnmodeledSideEffects), 0x109c02e46ULL, NULL, NULL, NULL, OperandInfo261 }, // Inst #3471 = VROUNDYPDm
+ { 3472, 7, 1, 0, "VROUNDYPDm_Int", 0|(1<<TID::MayLoad), 0x109c02e46ULL, NULL, NULL, NULL, OperandInfo261 }, // Inst #3472 = VROUNDYPDm_Int
+ { 3473, 3, 1, 0, "VROUNDYPDr", 0|(1<<TID::UnmodeledSideEffects), 0x109c02e45ULL, NULL, NULL, NULL, OperandInfo262 }, // Inst #3473 = VROUNDYPDr
+ { 3474, 3, 1, 0, "VROUNDYPDr_Int", 0, 0x109c02e45ULL, NULL, NULL, NULL, OperandInfo262 }, // Inst #3474 = VROUNDYPDr_Int
+ { 3475, 7, 1, 0, "VROUNDYPSm", 0|(1<<TID::UnmodeledSideEffects), 0x108002e46ULL, NULL, NULL, NULL, OperandInfo261 }, // Inst #3475 = VROUNDYPSm
+ { 3476, 7, 1, 0, "VROUNDYPSm_Int", 0|(1<<TID::MayLoad), 0x108002e46ULL, NULL, NULL, NULL, OperandInfo261 }, // Inst #3476 = VROUNDYPSm_Int
+ { 3477, 3, 1, 0, "VROUNDYPSr", 0|(1<<TID::UnmodeledSideEffects), 0x108c02e45ULL, NULL, NULL, NULL, OperandInfo262 }, // Inst #3477 = VROUNDYPSr
+ { 3478, 3, 1, 0, "VROUNDYPSr_Int", 0, 0x108c02e45ULL, NULL, NULL, NULL, OperandInfo262 }, // Inst #3478 = VROUNDYPSr_Int
+ { 3479, 6, 1, 0, "VRSQRTPSYm", 0|(1<<TID::MayLoad), 0x152400106ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3479 = VRSQRTPSYm
+ { 3480, 6, 1, 0, "VRSQRTPSYm_Int", 0|(1<<TID::MayLoad), 0x152400106ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3480 = VRSQRTPSYm_Int
+ { 3481, 2, 1, 0, "VRSQRTPSYr", 0, 0x152400105ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3481 = VRSQRTPSYr
+ { 3482, 2, 1, 0, "VRSQRTPSYr_Int", 0, 0x152400105ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3482 = VRSQRTPSYr_Int
+ { 3483, 6, 1, 0, "VRSQRTPSm", 0|(1<<TID::MayLoad), 0x152400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3483 = VRSQRTPSm
+ { 3484, 6, 1, 0, "VRSQRTPSm_Int", 0|(1<<TID::MayLoad), 0x152400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3484 = VRSQRTPSm_Int
+ { 3485, 2, 1, 0, "VRSQRTPSr", 0, 0x152400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3485 = VRSQRTPSr
+ { 3486, 2, 1, 0, "VRSQRTPSr_Int", 0, 0x152400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3486 = VRSQRTPSr_Int
+ { 3487, 7, 1, 0, "VRSQRTSSm", 0|(1<<TID::UnmodeledSideEffects), 0x552000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #3487 = VRSQRTSSm
+ { 3488, 6, 1, 0, "VRSQRTSSm_Int", 0|(1<<TID::MayLoad), 0x552000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3488 = VRSQRTSSm_Int
+ { 3489, 3, 1, 0, "VRSQRTSSr", 0|(1<<TID::UnmodeledSideEffects), 0x552000c05ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #3489 = VRSQRTSSr
+ { 3490, 2, 1, 0, "VRSQRTSSr_Int", 0, 0x552000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3490 = VRSQRTSSr_Int
+ { 3491, 8, 1, 0, "VSHUFPDYrmi", 0|(1<<TID::MayLoad), 0x5c6802046ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #3491 = VSHUFPDYrmi
+ { 3492, 4, 1, 0, "VSHUFPDYrri", 0, 0x5c6802045ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #3492 = VSHUFPDYrri
+ { 3493, 8, 1, 0, "VSHUFPDrmi", 0|(1<<TID::MayLoad), 0x5c6802046ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3493 = VSHUFPDrmi
+ { 3494, 4, 1, 0, "VSHUFPDrri", 0, 0x5c6802045ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3494 = VSHUFPDrri
+ { 3495, 8, 1, 0, "VSHUFPSYrmi", 0|(1<<TID::MayLoad), 0x5c6402006ULL, NULL, NULL, NULL, OperandInfo231 }, // Inst #3495 = VSHUFPSYrmi
+ { 3496, 4, 1, 0, "VSHUFPSYrri", 0, 0x5c6402005ULL, NULL, NULL, NULL, OperandInfo232 }, // Inst #3496 = VSHUFPSYrri
+ { 3497, 8, 1, 0, "VSHUFPSrmi", 0|(1<<TID::MayLoad), 0x5c6402006ULL, NULL, NULL, NULL, OperandInfo136 }, // Inst #3497 = VSHUFPSrmi
+ { 3498, 4, 1, 0, "VSHUFPSrri", 0, 0x5c6402005ULL, NULL, NULL, NULL, OperandInfo80 }, // Inst #3498 = VSHUFPSrri
+ { 3499, 6, 1, 0, "VSQRTPDYm", 0|(1<<TID::MayLoad), 0x151800146ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3499 = VSQRTPDYm
+ { 3500, 6, 1, 0, "VSQRTPDYm_Int", 0|(1<<TID::MayLoad), 0x151800146ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3500 = VSQRTPDYm_Int
+ { 3501, 2, 1, 0, "VSQRTPDYr", 0, 0x151800145ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3501 = VSQRTPDYr
+ { 3502, 2, 1, 0, "VSQRTPDYr_Int", 0, 0x151800145ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3502 = VSQRTPDYr_Int
+ { 3503, 6, 1, 0, "VSQRTPDm", 0|(1<<TID::MayLoad), 0x151800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3503 = VSQRTPDm
+ { 3504, 6, 1, 0, "VSQRTPDm_Int", 0|(1<<TID::MayLoad), 0x151800146ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3504 = VSQRTPDm_Int
+ { 3505, 2, 1, 0, "VSQRTPDr", 0, 0x151800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3505 = VSQRTPDr
+ { 3506, 2, 1, 0, "VSQRTPDr_Int", 0, 0x151800145ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3506 = VSQRTPDr_Int
+ { 3507, 6, 1, 0, "VSQRTPSYm", 0|(1<<TID::MayLoad), 0x151400106ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3507 = VSQRTPSYm
+ { 3508, 6, 1, 0, "VSQRTPSYm_Int", 0|(1<<TID::MayLoad), 0x151400106ULL, NULL, NULL, NULL, OperandInfo237 }, // Inst #3508 = VSQRTPSYm_Int
+ { 3509, 2, 1, 0, "VSQRTPSYr", 0, 0x151400105ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3509 = VSQRTPSYr
+ { 3510, 2, 1, 0, "VSQRTPSYr_Int", 0, 0x151400105ULL, NULL, NULL, NULL, OperandInfo241 }, // Inst #3510 = VSQRTPSYr_Int
+ { 3511, 6, 1, 0, "VSQRTPSm", 0|(1<<TID::MayLoad), 0x151400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3511 = VSQRTPSm
+ { 3512, 6, 1, 0, "VSQRTPSm_Int", 0|(1<<TID::MayLoad), 0x151400106ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3512 = VSQRTPSm_Int
+ { 3513, 2, 1, 0, "VSQRTPSr", 0, 0x151400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3513 = VSQRTPSr
+ { 3514, 2, 1, 0, "VSQRTPSr_Int", 0, 0x151400105ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3514 = VSQRTPSr_Int
+ { 3515, 7, 1, 0, "VSQRTSDm", 0|(1<<TID::UnmodeledSideEffects), 0x551000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #3515 = VSQRTSDm
+ { 3516, 6, 1, 0, "VSQRTSDm_Int", 0|(1<<TID::MayLoad), 0x551000b06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3516 = VSQRTSDm_Int
+ { 3517, 3, 1, 0, "VSQRTSDr", 0|(1<<TID::UnmodeledSideEffects), 0x551000b05ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #3517 = VSQRTSDr
+ { 3518, 2, 1, 0, "VSQRTSDr_Int", 0, 0x551000b05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3518 = VSQRTSDr_Int
+ { 3519, 7, 1, 0, "VSQRTSSm", 0|(1<<TID::UnmodeledSideEffects), 0x551000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #3519 = VSQRTSSm
+ { 3520, 6, 1, 0, "VSQRTSSm_Int", 0|(1<<TID::MayLoad), 0x551000c06ULL, NULL, NULL, NULL, OperandInfo42 }, // Inst #3520 = VSQRTSSm_Int
+ { 3521, 3, 1, 0, "VSQRTSSr", 0|(1<<TID::UnmodeledSideEffects), 0x551000c05ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #3521 = VSQRTSSr
+ { 3522, 2, 1, 0, "VSQRTSSr_Int", 0, 0x551000c05ULL, NULL, NULL, NULL, OperandInfo43 }, // Inst #3522 = VSQRTSSr_Int
+ { 3523, 5, 0, 0, "VSTMXCSR", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1ae40001bULL, NULL, NULL, NULL, OperandInfo34 }, // Inst #3523 = VSTMXCSR
+ { 3524, 7, 1, 0, "VSUBPDYrm", 0|(1<<TID::MayLoad), 0x55c800146ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3524 = VSUBPDYrm
+ { 3525, 3, 1, 0, "VSUBPDYrr", 0, 0x55c800145ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3525 = VSUBPDYrr
+ { 3526, 7, 1, 0, "VSUBPDrm", 0|(1<<TID::MayLoad), 0x55c800146ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3526 = VSUBPDrm
+ { 3527, 3, 1, 0, "VSUBPDrr", 0, 0x55c800145ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3527 = VSUBPDrr
+ { 3528, 7, 1, 0, "VSUBPSYrm", 0|(1<<TID::MayLoad), 0x55c400106ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3528 = VSUBPSYrm
+ { 3529, 3, 1, 0, "VSUBPSYrr", 0, 0x55c400105ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3529 = VSUBPSYrr
+ { 3530, 7, 1, 0, "VSUBPSrm", 0|(1<<TID::MayLoad), 0x55c400106ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3530 = VSUBPSrm
+ { 3531, 3, 1, 0, "VSUBPSrr", 0, 0x55c400105ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3531 = VSUBPSrr
+ { 3532, 7, 1, 0, "VSUBSDrm", 0|(1<<TID::MayLoad), 0x55c000b06ULL, NULL, NULL, NULL, OperandInfo226 }, // Inst #3532 = VSUBSDrm
+ { 3533, 7, 1, 0, "VSUBSDrm_Int", 0|(1<<TID::MayLoad), 0x55c000b06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3533 = VSUBSDrm_Int
+ { 3534, 3, 1, 0, "VSUBSDrr", 0, 0x55c000b05ULL, NULL, NULL, NULL, OperandInfo227 }, // Inst #3534 = VSUBSDrr
+ { 3535, 3, 1, 0, "VSUBSDrr_Int", 0, 0x55c000b05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3535 = VSUBSDrr_Int
+ { 3536, 7, 1, 0, "VSUBSSrm", 0|(1<<TID::MayLoad), 0x55c000c06ULL, NULL, NULL, NULL, OperandInfo228 }, // Inst #3536 = VSUBSSrm
+ { 3537, 7, 1, 0, "VSUBSSrm_Int", 0|(1<<TID::MayLoad), 0x55c000c06ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3537 = VSUBSSrm_Int
+ { 3538, 3, 1, 0, "VSUBSSrr", 0, 0x55c000c05ULL, NULL, NULL, NULL, OperandInfo229 }, // Inst #3538 = VSUBSSrr
+ { 3539, 3, 1, 0, "VSUBSSrr_Int", 0, 0x55c000c05ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3539 = VSUBSSrr_Int
+ { 3540, 6, 0, 0, "VTESTPDYrm", 0|(1<<TID::MayLoad), 0x10fc00d46ULL, NULL, ImplicitList1, Barriers1, OperandInfo237 }, // Inst #3540 = VTESTPDYrm
+ { 3541, 2, 0, 0, "VTESTPDYrr", 0, 0x10fc00d45ULL, NULL, ImplicitList1, Barriers1, OperandInfo241 }, // Inst #3541 = VTESTPDYrr
+ { 3542, 6, 0, 0, "VTESTPDrm", 0|(1<<TID::MayLoad), 0x10fc00d46ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #3542 = VTESTPDrm
+ { 3543, 2, 0, 0, "VTESTPDrr", 0, 0x10fc00d45ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #3543 = VTESTPDrr
+ { 3544, 6, 0, 0, "VTESTPSYrm", 0|(1<<TID::MayLoad), 0x10ec00d46ULL, NULL, ImplicitList1, Barriers1, OperandInfo237 }, // Inst #3544 = VTESTPSYrm
+ { 3545, 2, 0, 0, "VTESTPSYrr", 0, 0x10ec00d45ULL, NULL, ImplicitList1, Barriers1, OperandInfo241 }, // Inst #3545 = VTESTPSYrr
+ { 3546, 6, 0, 0, "VTESTPSrm", 0|(1<<TID::MayLoad), 0x10ec00d46ULL, NULL, ImplicitList1, Barriers1, OperandInfo42 }, // Inst #3546 = VTESTPSrm
+ { 3547, 2, 0, 0, "VTESTPSrr", 0, 0x10ec00d45ULL, NULL, ImplicitList1, Barriers1, OperandInfo43 }, // Inst #3547 = VTESTPSrr
+ { 3548, 6, 0, 0, "VUCOMISDrm", 0|(1<<TID::MayLoad), 0x12e800046ULL, NULL, ImplicitList1, Barriers1, OperandInfo94 }, // Inst #3548 = VUCOMISDrm
+ { 3549, 2, 0, 0, "VUCOMISDrr", 0, 0x12e800045ULL, NULL, ImplicitList1, Barriers1, OperandInfo118 }, // Inst #3549 = VUCOMISDrr
+ { 3550, 6, 0, 0, "VUCOMISSrm", 0|(1<<TID::MayLoad), 0x12e400006ULL, NULL, ImplicitList1, Barriers1, OperandInfo92 }, // Inst #3550 = VUCOMISSrm
+ { 3551, 2, 0, 0, "VUCOMISSrr", 0, 0x12e400005ULL, NULL, ImplicitList1, Barriers1, OperandInfo119 }, // Inst #3551 = VUCOMISSrr
+ { 3552, 7, 1, 0, "VUNPCKHPDYrm", 0|(1<<TID::MayLoad), 0x515800046ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3552 = VUNPCKHPDYrm
+ { 3553, 3, 1, 0, "VUNPCKHPDYrr", 0, 0x515800045ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3553 = VUNPCKHPDYrr
+ { 3554, 7, 1, 0, "VUNPCKHPDrm", 0|(1<<TID::MayLoad), 0x515800046ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3554 = VUNPCKHPDrm
+ { 3555, 3, 1, 0, "VUNPCKHPDrr", 0, 0x515800045ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3555 = VUNPCKHPDrr
+ { 3556, 7, 1, 0, "VUNPCKHPSYrm", 0|(1<<TID::MayLoad), 0x515400006ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3556 = VUNPCKHPSYrm
+ { 3557, 3, 1, 0, "VUNPCKHPSYrr", 0, 0x515400005ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3557 = VUNPCKHPSYrr
+ { 3558, 7, 1, 0, "VUNPCKHPSrm", 0|(1<<TID::MayLoad), 0x515400006ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3558 = VUNPCKHPSrm
+ { 3559, 3, 1, 0, "VUNPCKHPSrr", 0, 0x515400005ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3559 = VUNPCKHPSrr
+ { 3560, 7, 1, 0, "VUNPCKLPDYrm", 0|(1<<TID::MayLoad), 0x514800046ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3560 = VUNPCKLPDYrm
+ { 3561, 3, 1, 0, "VUNPCKLPDYrr", 0, 0x514800045ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3561 = VUNPCKLPDYrr
+ { 3562, 7, 1, 0, "VUNPCKLPDrm", 0|(1<<TID::MayLoad), 0x514800046ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3562 = VUNPCKLPDrm
+ { 3563, 3, 1, 0, "VUNPCKLPDrr", 0, 0x514800045ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3563 = VUNPCKLPDrr
+ { 3564, 7, 1, 0, "VUNPCKLPSYrm", 0|(1<<TID::MayLoad), 0x514400006ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3564 = VUNPCKLPSYrm
+ { 3565, 3, 1, 0, "VUNPCKLPSYrr", 0, 0x514400005ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3565 = VUNPCKLPSYrr
+ { 3566, 7, 1, 0, "VUNPCKLPSrm", 0|(1<<TID::MayLoad), 0x514400006ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3566 = VUNPCKLPSrm
+ { 3567, 3, 1, 0, "VUNPCKLPSrr", 0, 0x514400005ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3567 = VUNPCKLPSrr
+ { 3568, 7, 1, 0, "VXORPDYrm", 0|(1<<TID::UnmodeledSideEffects), 0x557800046ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3568 = VXORPDYrm
+ { 3569, 3, 1, 0, "VXORPDYrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x557800045ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3569 = VXORPDYrr
+ { 3570, 7, 1, 0, "VXORPDrm", 0|(1<<TID::UnmodeledSideEffects), 0x557800046ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3570 = VXORPDrm
+ { 3571, 3, 1, 0, "VXORPDrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x557800045ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3571 = VXORPDrr
+ { 3572, 7, 1, 0, "VXORPSYrm", 0|(1<<TID::UnmodeledSideEffects), 0x557400006ULL, NULL, NULL, NULL, OperandInfo224 }, // Inst #3572 = VXORPSYrm
+ { 3573, 3, 1, 0, "VXORPSYrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x557400005ULL, NULL, NULL, NULL, OperandInfo225 }, // Inst #3573 = VXORPSYrr
+ { 3574, 7, 1, 0, "VXORPSrm", 0|(1<<TID::UnmodeledSideEffects), 0x557400006ULL, NULL, NULL, NULL, OperandInfo137 }, // Inst #3574 = VXORPSrm
+ { 3575, 3, 1, 0, "VXORPSrr", 0|(1<<TID::Commutable)|(1<<TID::UnmodeledSideEffects), 0x557400005ULL, NULL, NULL, NULL, OperandInfo138 }, // Inst #3575 = VXORPSrr
+ { 3576, 0, 0, 0, "VZEROALL", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x1177000001ULL, NULL, NULL, NULL, 0 }, // Inst #3576 = VZEROALL
+ { 3577, 0, 0, 0, "VZEROUPPER", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0x177000001ULL, NULL, NULL, NULL, 0 }, // Inst #3577 = VZEROUPPER
+ { 3578, 1, 1, 0, "V_SET0PD", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x57800160ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #3578 = V_SET0PD
+ { 3579, 1, 1, 0, "V_SET0PI", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0xefc00160ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #3579 = V_SET0PI
+ { 3580, 1, 1, 0, "V_SET0PS", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x57400120ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #3580 = V_SET0PS
+ { 3581, 1, 1, 0, "V_SETALLONES", 0|(1<<TID::FoldableAsLoad)|(1<<TID::Rematerializable)|(1<<TID::CheapAsAMove), 0x76c00160ULL, NULL, NULL, NULL, OperandInfo51 }, // Inst #3581 = V_SETALLONES
+ { 3582, 0, 0, 0, "WAIT", 0|(1<<TID::UnmodeledSideEffects), 0x9b000001ULL, NULL, NULL, NULL, 0 }, // Inst #3582 = WAIT
+ { 3583, 0, 0, 0, "WBINVD", 0|(1<<TID::UnmodeledSideEffects), 0x9000101ULL, NULL, NULL, NULL, 0 }, // Inst #3583 = WBINVD
+ { 3584, 5, 0, 0, "WINCALL64m", 0|(1<<TID::Call)|(1<<TID::MayLoad)|(1<<TID::Variadic), 0xff00001aULL, ImplicitList4, ImplicitList60, Barriers8, OperandInfo34 }, // Inst #3584 = WINCALL64m
+ { 3585, 1, 0, 0, "WINCALL64pcrel32", 0|(1<<TID::Call)|(1<<TID::Variadic)|(1<<TID::UnmodeledSideEffects), 0xe800c001ULL, ImplicitList4, ImplicitList60, Barriers8, OperandInfo2 }, // Inst #3585 = WINCALL64pcrel32
+ { 3586, 1, 0, 0, "WINCALL64r", 0|(1<<TID::Call)|(1<<TID::Variadic), 0xff000012ULL, ImplicitList4, ImplicitList60, Barriers8, OperandInfo67 }, // Inst #3586 = WINCALL64r
+ { 3587, 0, 0, 0, "WRMSR", 0|(1<<TID::UnmodeledSideEffects), 0x30000101ULL, NULL, NULL, NULL, 0 }, // Inst #3587 = WRMSR
+ { 3588, 6, 0, 0, "XADD16rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xc1000144ULL, NULL, NULL, NULL, OperandInfo11 }, // Inst #3588 = XADD16rm
+ { 3589, 2, 1, 0, "XADD16rr", 0|(1<<TID::UnmodeledSideEffects), 0xc1000143ULL, NULL, NULL, NULL, OperandInfo56 }, // Inst #3589 = XADD16rr
+ { 3590, 6, 0, 0, "XADD32rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xc1000104ULL, NULL, NULL, NULL, OperandInfo15 }, // Inst #3590 = XADD32rm
+ { 3591, 2, 1, 0, "XADD32rr", 0|(1<<TID::UnmodeledSideEffects), 0xc1000103ULL, NULL, NULL, NULL, OperandInfo58 }, // Inst #3591 = XADD32rr
+ { 3592, 6, 0, 0, "XADD64rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xc1001104ULL, NULL, NULL, NULL, OperandInfo19 }, // Inst #3592 = XADD64rm
+ { 3593, 2, 1, 0, "XADD64rr", 0|(1<<TID::UnmodeledSideEffects), 0xc1001103ULL, NULL, NULL, NULL, OperandInfo60 }, // Inst #3593 = XADD64rr
+ { 3594, 6, 0, 0, "XADD8rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore)|(1<<TID::UnmodeledSideEffects), 0xc0000104ULL, NULL, NULL, NULL, OperandInfo24 }, // Inst #3594 = XADD8rm
+ { 3595, 2, 1, 0, "XADD8rr", 0|(1<<TID::UnmodeledSideEffects), 0xc0000103ULL, NULL, NULL, NULL, OperandInfo83 }, // Inst #3595 = XADD8rr
+ { 3596, 1, 0, 0, "XCHG16ar", 0|(1<<TID::UnmodeledSideEffects), 0x90000042ULL, NULL, NULL, NULL, OperandInfo106 }, // Inst #3596 = XCHG16ar
+ { 3597, 7, 1, 0, "XCHG16rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x87000046ULL, NULL, NULL, NULL, OperandInfo13 }, // Inst #3597 = XCHG16rm
+ { 3598, 3, 1, 0, "XCHG16rr", 0|(1<<TID::UnmodeledSideEffects), 0x87000045ULL, NULL, NULL, NULL, OperandInfo14 }, // Inst #3598 = XCHG16rr
+ { 3599, 1, 0, 0, "XCHG32ar", 0|(1<<TID::UnmodeledSideEffects), 0x90000002ULL, NULL, NULL, NULL, OperandInfo66 }, // Inst #3599 = XCHG32ar
+ { 3600, 7, 1, 0, "XCHG32rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x87000006ULL, NULL, NULL, NULL, OperandInfo17 }, // Inst #3600 = XCHG32rm
+ { 3601, 3, 1, 0, "XCHG32rr", 0|(1<<TID::UnmodeledSideEffects), 0x87000005ULL, NULL, NULL, NULL, OperandInfo18 }, // Inst #3601 = XCHG32rr
+ { 3602, 1, 0, 0, "XCHG64ar", 0|(1<<TID::UnmodeledSideEffects), 0x90001002ULL, NULL, NULL, NULL, OperandInfo67 }, // Inst #3602 = XCHG64ar
+ { 3603, 7, 1, 0, "XCHG64rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x87001006ULL, NULL, NULL, NULL, OperandInfo21 }, // Inst #3603 = XCHG64rm
+ { 3604, 3, 1, 0, "XCHG64rr", 0|(1<<TID::UnmodeledSideEffects), 0x87001005ULL, NULL, NULL, NULL, OperandInfo22 }, // Inst #3604 = XCHG64rr
+ { 3605, 7, 1, 0, "XCHG8rm", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x86000006ULL, NULL, NULL, NULL, OperandInfo26 }, // Inst #3605 = XCHG8rm
+ { 3606, 3, 1, 0, "XCHG8rr", 0|(1<<TID::UnmodeledSideEffects), 0x86000005ULL, NULL, NULL, NULL, OperandInfo27 }, // Inst #3606 = XCHG8rr
+ { 3607, 1, 0, 0, "XCH_F", 0|(1<<TID::UnmodeledSideEffects), 0xc8000402ULL, NULL, NULL, NULL, OperandInfo35 }, // Inst #3607 = XCH_F
+ { 3608, 0, 0, 0, "XLAT", 0|(1<<TID::UnmodeledSideEffects), 0xd7000001ULL, NULL, NULL, NULL, 0 }, // Inst #3608 = XLAT
+ { 3609, 1, 0, 0, "XOR16i16", 0|(1<<TID::UnmodeledSideEffects), 0x35006041ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #3609 = XOR16i16
+ { 3610, 6, 0, 0, "XOR16mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100605eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #3610 = XOR16mi
+ { 3611, 6, 0, 0, "XOR16mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300205eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #3611 = XOR16mi8
+ { 3612, 6, 0, 0, "XOR16mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x31000044ULL, NULL, ImplicitList1, Barriers1, OperandInfo11 }, // Inst #3612 = XOR16mr
+ { 3613, 3, 1, 0, "XOR16ri", 0, 0x81006056ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #3613 = XOR16ri
+ { 3614, 3, 1, 0, "XOR16ri8", 0, 0x83002056ULL, NULL, ImplicitList1, Barriers1, OperandInfo12 }, // Inst #3614 = XOR16ri8
+ { 3615, 7, 1, 0, "XOR16rm", 0|(1<<TID::MayLoad), 0x33000046ULL, NULL, ImplicitList1, Barriers1, OperandInfo13 }, // Inst #3615 = XOR16rm
+ { 3616, 3, 1, 0, "XOR16rr", 0|(1<<TID::Commutable), 0x31000043ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #3616 = XOR16rr
+ { 3617, 3, 1, 0, "XOR16rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x33000045ULL, NULL, ImplicitList1, Barriers1, OperandInfo14 }, // Inst #3617 = XOR16rr_REV
+ { 3618, 1, 0, 0, "XOR32i32", 0|(1<<TID::UnmodeledSideEffects), 0x3500a001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #3618 = XOR32i32
+ { 3619, 6, 0, 0, "XOR32mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100a01eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #3619 = XOR32mi
+ { 3620, 6, 0, 0, "XOR32mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300201eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #3620 = XOR32mi8
+ { 3621, 6, 0, 0, "XOR32mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x31000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo15 }, // Inst #3621 = XOR32mr
+ { 3622, 3, 1, 0, "XOR32ri", 0, 0x8100a016ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #3622 = XOR32ri
+ { 3623, 3, 1, 0, "XOR32ri8", 0, 0x83002016ULL, NULL, ImplicitList1, Barriers1, OperandInfo16 }, // Inst #3623 = XOR32ri8
+ { 3624, 7, 1, 0, "XOR32rm", 0|(1<<TID::MayLoad), 0x33000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo17 }, // Inst #3624 = XOR32rm
+ { 3625, 3, 1, 0, "XOR32rr", 0|(1<<TID::Commutable), 0x31000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #3625 = XOR32rr
+ { 3626, 3, 1, 0, "XOR32rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x33000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo18 }, // Inst #3626 = XOR32rr_REV
+ { 3627, 1, 0, 0, "XOR64i32", 0|(1<<TID::UnmodeledSideEffects), 0x3500b001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #3627 = XOR64i32
+ { 3628, 6, 0, 0, "XOR64mi32", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8100b01eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #3628 = XOR64mi32
+ { 3629, 6, 0, 0, "XOR64mi8", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8300301eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #3629 = XOR64mi8
+ { 3630, 6, 0, 0, "XOR64mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x31001004ULL, NULL, ImplicitList1, Barriers1, OperandInfo19 }, // Inst #3630 = XOR64mr
+ { 3631, 3, 1, 0, "XOR64ri32", 0, 0x8100b016ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #3631 = XOR64ri32
+ { 3632, 3, 1, 0, "XOR64ri8", 0, 0x83003016ULL, NULL, ImplicitList1, Barriers1, OperandInfo20 }, // Inst #3632 = XOR64ri8
+ { 3633, 7, 1, 0, "XOR64rm", 0|(1<<TID::MayLoad), 0x33001006ULL, NULL, ImplicitList1, Barriers1, OperandInfo21 }, // Inst #3633 = XOR64rm
+ { 3634, 3, 1, 0, "XOR64rr", 0|(1<<TID::Commutable), 0x31001003ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #3634 = XOR64rr
+ { 3635, 3, 1, 0, "XOR64rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x33001005ULL, NULL, ImplicitList1, Barriers1, OperandInfo22 }, // Inst #3635 = XOR64rr_REV
+ { 3636, 1, 0, 0, "XOR8i8", 0|(1<<TID::UnmodeledSideEffects), 0x34002001ULL, NULL, ImplicitList1, Barriers1, OperandInfo2 }, // Inst #3636 = XOR8i8
+ { 3637, 6, 0, 0, "XOR8mi", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x8000201eULL, NULL, ImplicitList1, Barriers1, OperandInfo10 }, // Inst #3637 = XOR8mi
+ { 3638, 6, 0, 0, "XOR8mr", 0|(1<<TID::MayLoad)|(1<<TID::MayStore), 0x30000004ULL, NULL, ImplicitList1, Barriers1, OperandInfo24 }, // Inst #3638 = XOR8mr
+ { 3639, 3, 1, 0, "XOR8ri", 0, 0x80002016ULL, NULL, ImplicitList1, Barriers1, OperandInfo25 }, // Inst #3639 = XOR8ri
+ { 3640, 7, 1, 0, "XOR8rm", 0|(1<<TID::MayLoad), 0x32000006ULL, NULL, ImplicitList1, Barriers1, OperandInfo26 }, // Inst #3640 = XOR8rm
+ { 3641, 3, 1, 0, "XOR8rr", 0|(1<<TID::Commutable), 0x30000003ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #3641 = XOR8rr
+ { 3642, 3, 1, 0, "XOR8rr_REV", 0|(1<<TID::UnmodeledSideEffects), 0x32000005ULL, NULL, ImplicitList1, Barriers1, OperandInfo27 }, // Inst #3642 = XOR8rr_REV
+ { 3643, 7, 1, 0, "XORPDrm", 0|(1<<TID::MayLoad), 0x57800146ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #3643 = XORPDrm
+ { 3644, 3, 1, 0, "XORPDrr", 0|(1<<TID::Commutable), 0x57800145ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #3644 = XORPDrr
+ { 3645, 7, 1, 0, "XORPSrm", 0|(1<<TID::MayLoad), 0x57400106ULL, NULL, NULL, NULL, OperandInfo28 }, // Inst #3645 = XORPSrm
+ { 3646, 3, 1, 0, "XORPSrr", 0|(1<<TID::Commutable), 0x57400105ULL, NULL, NULL, NULL, OperandInfo29 }, // Inst #3646 = XORPSrr
};
} // End llvm namespace
diff --git a/libclamav/c++/X86GenInstrNames.inc b/libclamav/c++/X86GenInstrNames.inc
index 549a87e..29326dd 100644
--- a/libclamav/c++/X86GenInstrNames.inc
+++ b/libclamav/c++/X86GenInstrNames.inc
@@ -12,7 +12,7 @@ namespace X86 {
enum {
PHI = 0,
INLINEASM = 1,
- DBG_LABEL = 2,
+ PROLOG_LABEL = 2,
EH_LABEL = 3,
GC_LABEL = 4,
KILL = 5,
@@ -22,2520 +22,3642 @@ namespace X86 {
SUBREG_TO_REG = 9,
COPY_TO_REGCLASS = 10,
DBG_VALUE = 11,
- ABS_F = 12,
- ABS_Fp32 = 13,
- ABS_Fp64 = 14,
- ABS_Fp80 = 15,
- ADC16i16 = 16,
- ADC16mi = 17,
- ADC16mi8 = 18,
- ADC16mr = 19,
- ADC16ri = 20,
- ADC16ri8 = 21,
- ADC16rm = 22,
- ADC16rr = 23,
- ADC16rr_REV = 24,
- ADC32i32 = 25,
- ADC32mi = 26,
- ADC32mi8 = 27,
- ADC32mr = 28,
- ADC32ri = 29,
- ADC32ri8 = 30,
- ADC32rm = 31,
- ADC32rr = 32,
- ADC32rr_REV = 33,
- ADC64i32 = 34,
- ADC64mi32 = 35,
- ADC64mi8 = 36,
- ADC64mr = 37,
- ADC64ri32 = 38,
- ADC64ri8 = 39,
- ADC64rm = 40,
- ADC64rr = 41,
- ADC64rr_REV = 42,
- ADC8i8 = 43,
- ADC8mi = 44,
- ADC8mr = 45,
- ADC8ri = 46,
- ADC8rm = 47,
- ADC8rr = 48,
- ADC8rr_REV = 49,
- ADD16i16 = 50,
- ADD16mi = 51,
- ADD16mi8 = 52,
- ADD16mr = 53,
- ADD16mrmrr = 54,
- ADD16ri = 55,
- ADD16ri8 = 56,
- ADD16rm = 57,
- ADD16rr = 58,
- ADD32i32 = 59,
- ADD32mi = 60,
- ADD32mi8 = 61,
- ADD32mr = 62,
- ADD32mrmrr = 63,
- ADD32ri = 64,
- ADD32ri8 = 65,
- ADD32rm = 66,
- ADD32rr = 67,
- ADD64i32 = 68,
- ADD64mi32 = 69,
- ADD64mi8 = 70,
- ADD64mr = 71,
- ADD64mrmrr = 72,
- ADD64ri32 = 73,
- ADD64ri8 = 74,
- ADD64rm = 75,
- ADD64rr = 76,
- ADD8i8 = 77,
- ADD8mi = 78,
- ADD8mr = 79,
- ADD8mrmrr = 80,
- ADD8ri = 81,
- ADD8rm = 82,
- ADD8rr = 83,
- ADDPDrm = 84,
- ADDPDrr = 85,
- ADDPSrm = 86,
- ADDPSrr = 87,
- ADDSDrm = 88,
- ADDSDrm_Int = 89,
- ADDSDrr = 90,
- ADDSDrr_Int = 91,
- ADDSSrm = 92,
- ADDSSrm_Int = 93,
- ADDSSrr = 94,
- ADDSSrr_Int = 95,
- ADDSUBPDrm = 96,
- ADDSUBPDrr = 97,
- ADDSUBPSrm = 98,
- ADDSUBPSrr = 99,
- ADD_F32m = 100,
- ADD_F64m = 101,
- ADD_FI16m = 102,
- ADD_FI32m = 103,
- ADD_FPrST0 = 104,
- ADD_FST0r = 105,
- ADD_Fp32 = 106,
- ADD_Fp32m = 107,
- ADD_Fp64 = 108,
- ADD_Fp64m = 109,
- ADD_Fp64m32 = 110,
- ADD_Fp80 = 111,
- ADD_Fp80m32 = 112,
- ADD_Fp80m64 = 113,
- ADD_FpI16m32 = 114,
- ADD_FpI16m64 = 115,
- ADD_FpI16m80 = 116,
- ADD_FpI32m32 = 117,
- ADD_FpI32m64 = 118,
- ADD_FpI32m80 = 119,
- ADD_FrST0 = 120,
- ADJCALLSTACKDOWN32 = 121,
- ADJCALLSTACKDOWN64 = 122,
- ADJCALLSTACKUP32 = 123,
- ADJCALLSTACKUP64 = 124,
- AND16i16 = 125,
- AND16mi = 126,
- AND16mi8 = 127,
- AND16mr = 128,
- AND16ri = 129,
- AND16ri8 = 130,
- AND16rm = 131,
- AND16rr = 132,
- AND16rr_REV = 133,
- AND32i32 = 134,
- AND32mi = 135,
- AND32mi8 = 136,
- AND32mr = 137,
- AND32ri = 138,
- AND32ri8 = 139,
- AND32rm = 140,
- AND32rr = 141,
- AND32rr_REV = 142,
- AND64i32 = 143,
- AND64mi32 = 144,
- AND64mi8 = 145,
- AND64mr = 146,
- AND64ri32 = 147,
- AND64ri8 = 148,
- AND64rm = 149,
- AND64rr = 150,
- AND64rr_REV = 151,
- AND8i8 = 152,
- AND8mi = 153,
- AND8mr = 154,
- AND8ri = 155,
- AND8rm = 156,
- AND8rr = 157,
- AND8rr_REV = 158,
- ANDNPDrm = 159,
- ANDNPDrr = 160,
- ANDNPSrm = 161,
- ANDNPSrr = 162,
- ANDPDrm = 163,
- ANDPDrr = 164,
- ANDPSrm = 165,
- ANDPSrr = 166,
- ATOMADD6432 = 167,
- ATOMAND16 = 168,
- ATOMAND32 = 169,
- ATOMAND64 = 170,
- ATOMAND6432 = 171,
- ATOMAND8 = 172,
- ATOMMAX16 = 173,
- ATOMMAX32 = 174,
- ATOMMAX64 = 175,
- ATOMMIN16 = 176,
- ATOMMIN32 = 177,
- ATOMMIN64 = 178,
- ATOMNAND16 = 179,
- ATOMNAND32 = 180,
- ATOMNAND64 = 181,
- ATOMNAND6432 = 182,
- ATOMNAND8 = 183,
- ATOMOR16 = 184,
- ATOMOR32 = 185,
- ATOMOR64 = 186,
- ATOMOR6432 = 187,
- ATOMOR8 = 188,
- ATOMSUB6432 = 189,
- ATOMSWAP6432 = 190,
- ATOMUMAX16 = 191,
- ATOMUMAX32 = 192,
- ATOMUMAX64 = 193,
- ATOMUMIN16 = 194,
- ATOMUMIN32 = 195,
- ATOMUMIN64 = 196,
- ATOMXOR16 = 197,
- ATOMXOR32 = 198,
- ATOMXOR64 = 199,
- ATOMXOR6432 = 200,
- ATOMXOR8 = 201,
- BLENDPDrmi = 202,
- BLENDPDrri = 203,
- BLENDPSrmi = 204,
- BLENDPSrri = 205,
- BLENDVPDrm0 = 206,
- BLENDVPDrr0 = 207,
- BLENDVPSrm0 = 208,
- BLENDVPSrr0 = 209,
- BSF16rm = 210,
- BSF16rr = 211,
- BSF32rm = 212,
- BSF32rr = 213,
- BSF64rm = 214,
- BSF64rr = 215,
- BSR16rm = 216,
- BSR16rr = 217,
- BSR32rm = 218,
- BSR32rr = 219,
- BSR64rm = 220,
- BSR64rr = 221,
- BSWAP32r = 222,
- BSWAP64r = 223,
- BT16mi8 = 224,
- BT16mr = 225,
- BT16ri8 = 226,
- BT16rr = 227,
- BT32mi8 = 228,
- BT32mr = 229,
- BT32ri8 = 230,
- BT32rr = 231,
- BT64mi8 = 232,
- BT64mr = 233,
- BT64ri8 = 234,
- BT64rr = 235,
- BTC16mi8 = 236,
- BTC16mr = 237,
- BTC16ri8 = 238,
- BTC16rr = 239,
- BTC32mi8 = 240,
- BTC32mr = 241,
- BTC32ri8 = 242,
- BTC32rr = 243,
- BTC64mi8 = 244,
- BTC64mr = 245,
- BTC64ri8 = 246,
- BTC64rr = 247,
- BTR16mi8 = 248,
- BTR16mr = 249,
- BTR16ri8 = 250,
- BTR16rr = 251,
- BTR32mi8 = 252,
- BTR32mr = 253,
- BTR32ri8 = 254,
- BTR32rr = 255,
- BTR64mi8 = 256,
- BTR64mr = 257,
- BTR64ri8 = 258,
- BTR64rr = 259,
- BTS16mi8 = 260,
- BTS16mr = 261,
- BTS16ri8 = 262,
- BTS16rr = 263,
- BTS32mi8 = 264,
- BTS32mr = 265,
- BTS32ri8 = 266,
- BTS32rr = 267,
- BTS64mi8 = 268,
- BTS64mr = 269,
- BTS64ri8 = 270,
- BTS64rr = 271,
- CALL32m = 272,
- CALL32r = 273,
- CALL64m = 274,
- CALL64pcrel32 = 275,
- CALL64r = 276,
- CALLpcrel32 = 277,
- CBW = 278,
- CDQ = 279,
- CDQE = 280,
- CHS_F = 281,
- CHS_Fp32 = 282,
- CHS_Fp64 = 283,
- CHS_Fp80 = 284,
- CLC = 285,
- CLD = 286,
- CLFLUSH = 287,
- CLI = 288,
- CLTS = 289,
- CMC = 290,
- CMOVA16rm = 291,
- CMOVA16rr = 292,
- CMOVA32rm = 293,
- CMOVA32rr = 294,
- CMOVA64rm = 295,
- CMOVA64rr = 296,
- CMOVAE16rm = 297,
- CMOVAE16rr = 298,
- CMOVAE32rm = 299,
- CMOVAE32rr = 300,
- CMOVAE64rm = 301,
- CMOVAE64rr = 302,
- CMOVB16rm = 303,
- CMOVB16rr = 304,
- CMOVB32rm = 305,
- CMOVB32rr = 306,
- CMOVB64rm = 307,
- CMOVB64rr = 308,
- CMOVBE16rm = 309,
- CMOVBE16rr = 310,
- CMOVBE32rm = 311,
- CMOVBE32rr = 312,
- CMOVBE64rm = 313,
- CMOVBE64rr = 314,
- CMOVBE_F = 315,
- CMOVBE_Fp32 = 316,
- CMOVBE_Fp64 = 317,
- CMOVBE_Fp80 = 318,
- CMOVB_F = 319,
- CMOVB_Fp32 = 320,
- CMOVB_Fp64 = 321,
- CMOVB_Fp80 = 322,
- CMOVE16rm = 323,
- CMOVE16rr = 324,
- CMOVE32rm = 325,
- CMOVE32rr = 326,
- CMOVE64rm = 327,
- CMOVE64rr = 328,
- CMOVE_F = 329,
- CMOVE_Fp32 = 330,
- CMOVE_Fp64 = 331,
- CMOVE_Fp80 = 332,
- CMOVG16rm = 333,
- CMOVG16rr = 334,
- CMOVG32rm = 335,
- CMOVG32rr = 336,
- CMOVG64rm = 337,
- CMOVG64rr = 338,
- CMOVGE16rm = 339,
- CMOVGE16rr = 340,
- CMOVGE32rm = 341,
- CMOVGE32rr = 342,
- CMOVGE64rm = 343,
- CMOVGE64rr = 344,
- CMOVL16rm = 345,
- CMOVL16rr = 346,
- CMOVL32rm = 347,
- CMOVL32rr = 348,
- CMOVL64rm = 349,
- CMOVL64rr = 350,
- CMOVLE16rm = 351,
- CMOVLE16rr = 352,
- CMOVLE32rm = 353,
- CMOVLE32rr = 354,
- CMOVLE64rm = 355,
- CMOVLE64rr = 356,
- CMOVNBE_F = 357,
- CMOVNBE_Fp32 = 358,
- CMOVNBE_Fp64 = 359,
- CMOVNBE_Fp80 = 360,
- CMOVNB_F = 361,
- CMOVNB_Fp32 = 362,
- CMOVNB_Fp64 = 363,
- CMOVNB_Fp80 = 364,
- CMOVNE16rm = 365,
- CMOVNE16rr = 366,
- CMOVNE32rm = 367,
- CMOVNE32rr = 368,
- CMOVNE64rm = 369,
- CMOVNE64rr = 370,
- CMOVNE_F = 371,
- CMOVNE_Fp32 = 372,
- CMOVNE_Fp64 = 373,
- CMOVNE_Fp80 = 374,
- CMOVNO16rm = 375,
- CMOVNO16rr = 376,
- CMOVNO32rm = 377,
- CMOVNO32rr = 378,
- CMOVNO64rm = 379,
- CMOVNO64rr = 380,
- CMOVNP16rm = 381,
- CMOVNP16rr = 382,
- CMOVNP32rm = 383,
- CMOVNP32rr = 384,
- CMOVNP64rm = 385,
- CMOVNP64rr = 386,
- CMOVNP_F = 387,
- CMOVNP_Fp32 = 388,
- CMOVNP_Fp64 = 389,
- CMOVNP_Fp80 = 390,
- CMOVNS16rm = 391,
- CMOVNS16rr = 392,
- CMOVNS32rm = 393,
- CMOVNS32rr = 394,
- CMOVNS64rm = 395,
- CMOVNS64rr = 396,
- CMOVO16rm = 397,
- CMOVO16rr = 398,
- CMOVO32rm = 399,
- CMOVO32rr = 400,
- CMOVO64rm = 401,
- CMOVO64rr = 402,
- CMOVP16rm = 403,
- CMOVP16rr = 404,
- CMOVP32rm = 405,
- CMOVP32rr = 406,
- CMOVP64rm = 407,
- CMOVP64rr = 408,
- CMOVP_F = 409,
- CMOVP_Fp32 = 410,
- CMOVP_Fp64 = 411,
- CMOVP_Fp80 = 412,
- CMOVS16rm = 413,
- CMOVS16rr = 414,
- CMOVS32rm = 415,
- CMOVS32rr = 416,
- CMOVS64rm = 417,
- CMOVS64rr = 418,
- CMOV_FR32 = 419,
- CMOV_FR64 = 420,
- CMOV_GR8 = 421,
- CMOV_V1I64 = 422,
- CMOV_V2F64 = 423,
- CMOV_V2I64 = 424,
- CMOV_V4F32 = 425,
- CMP16i16 = 426,
- CMP16mi = 427,
- CMP16mi8 = 428,
- CMP16mr = 429,
- CMP16mrmrr = 430,
- CMP16ri = 431,
- CMP16ri8 = 432,
- CMP16rm = 433,
- CMP16rr = 434,
- CMP32i32 = 435,
- CMP32mi = 436,
- CMP32mi8 = 437,
- CMP32mr = 438,
- CMP32mrmrr = 439,
- CMP32ri = 440,
- CMP32ri8 = 441,
- CMP32rm = 442,
- CMP32rr = 443,
- CMP64i32 = 444,
- CMP64mi32 = 445,
- CMP64mi8 = 446,
- CMP64mr = 447,
- CMP64mrmrr = 448,
- CMP64ri32 = 449,
- CMP64ri8 = 450,
- CMP64rm = 451,
- CMP64rr = 452,
- CMP8i8 = 453,
- CMP8mi = 454,
- CMP8mr = 455,
- CMP8mrmrr = 456,
- CMP8ri = 457,
- CMP8rm = 458,
- CMP8rr = 459,
- CMPPDrmi = 460,
- CMPPDrri = 461,
- CMPPSrmi = 462,
- CMPPSrri = 463,
- CMPS16 = 464,
- CMPS32 = 465,
- CMPS64 = 466,
- CMPS8 = 467,
- CMPSDrm = 468,
- CMPSDrr = 469,
- CMPSSrm = 470,
- CMPSSrr = 471,
- CMPXCHG16B = 472,
- CMPXCHG16rm = 473,
- CMPXCHG16rr = 474,
- CMPXCHG32rm = 475,
- CMPXCHG32rr = 476,
- CMPXCHG64rm = 477,
- CMPXCHG64rr = 478,
- CMPXCHG8B = 479,
- CMPXCHG8rm = 480,
- CMPXCHG8rr = 481,
- COMISDrm = 482,
- COMISDrr = 483,
- COMISSrm = 484,
- COMISSrr = 485,
- COMP_FST0r = 486,
- COM_FIPr = 487,
- COM_FIr = 488,
- COM_FST0r = 489,
- COS_F = 490,
- COS_Fp32 = 491,
- COS_Fp64 = 492,
- COS_Fp80 = 493,
- CPUID = 494,
- CQO = 495,
- CRC32m16 = 496,
- CRC32m32 = 497,
- CRC32m8 = 498,
- CRC32r16 = 499,
- CRC32r32 = 500,
- CRC32r8 = 501,
- CRC64m64 = 502,
- CRC64r64 = 503,
- CS_PREFIX = 504,
- CVTDQ2PDrm = 505,
- CVTDQ2PDrr = 506,
- CVTDQ2PSrm = 507,
- CVTDQ2PSrr = 508,
- CVTPD2DQrm = 509,
- CVTPD2DQrr = 510,
- CVTPD2PSrm = 511,
- CVTPD2PSrr = 512,
- CVTPS2DQrm = 513,
- CVTPS2DQrr = 514,
- CVTPS2PDrm = 515,
- CVTPS2PDrr = 516,
- CVTSD2SI64rm = 517,
- CVTSD2SI64rr = 518,
- CVTSD2SSrm = 519,
- CVTSD2SSrr = 520,
- CVTSI2SD64rm = 521,
- CVTSI2SD64rr = 522,
- CVTSI2SDrm = 523,
- CVTSI2SDrr = 524,
- CVTSI2SS64rm = 525,
- CVTSI2SS64rr = 526,
- CVTSI2SSrm = 527,
- CVTSI2SSrr = 528,
- CVTSS2SDrm = 529,
- CVTSS2SDrr = 530,
- CVTSS2SI64rm = 531,
- CVTSS2SI64rr = 532,
- CVTSS2SIrm = 533,
- CVTSS2SIrr = 534,
- CVTTPS2DQrm = 535,
- CVTTPS2DQrr = 536,
- CVTTSD2SI64rm = 537,
- CVTTSD2SI64rr = 538,
- CVTTSD2SIrm = 539,
- CVTTSD2SIrr = 540,
- CVTTSS2SI64rm = 541,
- CVTTSS2SI64rr = 542,
- CVTTSS2SIrm = 543,
- CVTTSS2SIrr = 544,
- CWD = 545,
- CWDE = 546,
- DEC16m = 547,
- DEC16r = 548,
- DEC32m = 549,
- DEC32r = 550,
- DEC64_16m = 551,
- DEC64_16r = 552,
- DEC64_32m = 553,
- DEC64_32r = 554,
- DEC64m = 555,
- DEC64r = 556,
- DEC8m = 557,
- DEC8r = 558,
- DIV16m = 559,
- DIV16r = 560,
- DIV32m = 561,
- DIV32r = 562,
- DIV64m = 563,
- DIV64r = 564,
- DIV8m = 565,
- DIV8r = 566,
- DIVPDrm = 567,
- DIVPDrr = 568,
- DIVPSrm = 569,
- DIVPSrr = 570,
- DIVR_F32m = 571,
- DIVR_F64m = 572,
- DIVR_FI16m = 573,
- DIVR_FI32m = 574,
- DIVR_FPrST0 = 575,
- DIVR_FST0r = 576,
- DIVR_Fp32m = 577,
- DIVR_Fp64m = 578,
- DIVR_Fp64m32 = 579,
- DIVR_Fp80m32 = 580,
- DIVR_Fp80m64 = 581,
- DIVR_FpI16m32 = 582,
- DIVR_FpI16m64 = 583,
- DIVR_FpI16m80 = 584,
- DIVR_FpI32m32 = 585,
- DIVR_FpI32m64 = 586,
- DIVR_FpI32m80 = 587,
- DIVR_FrST0 = 588,
- DIVSDrm = 589,
- DIVSDrm_Int = 590,
- DIVSDrr = 591,
- DIVSDrr_Int = 592,
- DIVSSrm = 593,
- DIVSSrm_Int = 594,
- DIVSSrr = 595,
- DIVSSrr_Int = 596,
- DIV_F32m = 597,
- DIV_F64m = 598,
- DIV_FI16m = 599,
- DIV_FI32m = 600,
- DIV_FPrST0 = 601,
- DIV_FST0r = 602,
- DIV_Fp32 = 603,
- DIV_Fp32m = 604,
- DIV_Fp64 = 605,
- DIV_Fp64m = 606,
- DIV_Fp64m32 = 607,
- DIV_Fp80 = 608,
- DIV_Fp80m32 = 609,
- DIV_Fp80m64 = 610,
- DIV_FpI16m32 = 611,
- DIV_FpI16m64 = 612,
- DIV_FpI16m80 = 613,
- DIV_FpI32m32 = 614,
- DIV_FpI32m64 = 615,
- DIV_FpI32m80 = 616,
- DIV_FrST0 = 617,
- DPPDrmi = 618,
- DPPDrri = 619,
- DPPSrmi = 620,
- DPPSrri = 621,
- DS_PREFIX = 622,
- EH_RETURN = 623,
- EH_RETURN64 = 624,
- ENTER = 625,
- ES_PREFIX = 626,
- EXTRACTPSmr = 627,
- EXTRACTPSrr = 628,
- F2XM1 = 629,
- FARCALL16i = 630,
- FARCALL16m = 631,
- FARCALL32i = 632,
- FARCALL32m = 633,
- FARCALL64 = 634,
- FARJMP16i = 635,
- FARJMP16m = 636,
- FARJMP32i = 637,
- FARJMP32m = 638,
- FARJMP64 = 639,
- FBLDm = 640,
- FBSTPm = 641,
- FCOM32m = 642,
- FCOM64m = 643,
- FCOMP32m = 644,
- FCOMP64m = 645,
- FCOMPP = 646,
- FDECSTP = 647,
- FFREE = 648,
- FICOM16m = 649,
- FICOM32m = 650,
- FICOMP16m = 651,
- FICOMP32m = 652,
- FINCSTP = 653,
- FLDCW16m = 654,
- FLDENVm = 655,
- FLDL2E = 656,
- FLDL2T = 657,
- FLDLG2 = 658,
- FLDLN2 = 659,
- FLDPI = 660,
- FNCLEX = 661,
- FNINIT = 662,
- FNOP = 663,
- FNSTCW16m = 664,
- FNSTSW8r = 665,
- FNSTSWm = 666,
- FP32_TO_INT16_IN_MEM = 667,
- FP32_TO_INT32_IN_MEM = 668,
- FP32_TO_INT64_IN_MEM = 669,
- FP64_TO_INT16_IN_MEM = 670,
- FP64_TO_INT32_IN_MEM = 671,
- FP64_TO_INT64_IN_MEM = 672,
- FP80_TO_INT16_IN_MEM = 673,
- FP80_TO_INT32_IN_MEM = 674,
- FP80_TO_INT64_IN_MEM = 675,
- FPATAN = 676,
- FPREM = 677,
- FPREM1 = 678,
- FPTAN = 679,
- FP_REG_KILL = 680,
- FRNDINT = 681,
- FRSTORm = 682,
- FSAVEm = 683,
- FSCALE = 684,
- FSINCOS = 685,
- FSTENVm = 686,
- FS_MOV32rm = 687,
- FS_PREFIX = 688,
- FXAM = 689,
- FXRSTOR = 690,
- FXSAVE = 691,
- FXTRACT = 692,
- FYL2X = 693,
- FYL2XP1 = 694,
- FpGET_ST0_32 = 695,
- FpGET_ST0_64 = 696,
- FpGET_ST0_80 = 697,
- FpGET_ST1_32 = 698,
- FpGET_ST1_64 = 699,
- FpGET_ST1_80 = 700,
- FpSET_ST0_32 = 701,
- FpSET_ST0_64 = 702,
- FpSET_ST0_80 = 703,
- FpSET_ST1_32 = 704,
- FpSET_ST1_64 = 705,
- FpSET_ST1_80 = 706,
- FsANDNPDrm = 707,
- FsANDNPDrr = 708,
- FsANDNPSrm = 709,
- FsANDNPSrr = 710,
- FsANDPDrm = 711,
- FsANDPDrr = 712,
- FsANDPSrm = 713,
- FsANDPSrr = 714,
- FsFLD0SD = 715,
- FsFLD0SS = 716,
- FsMOVAPDrm = 717,
- FsMOVAPDrr = 718,
- FsMOVAPSrm = 719,
- FsMOVAPSrr = 720,
- FsORPDrm = 721,
- FsORPDrr = 722,
- FsORPSrm = 723,
- FsORPSrr = 724,
- FsXORPDrm = 725,
- FsXORPDrr = 726,
- FsXORPSrm = 727,
- FsXORPSrr = 728,
- GS_MOV32rm = 729,
- GS_PREFIX = 730,
- HADDPDrm = 731,
- HADDPDrr = 732,
- HADDPSrm = 733,
- HADDPSrr = 734,
- HLT = 735,
- HSUBPDrm = 736,
- HSUBPDrr = 737,
- HSUBPSrm = 738,
- HSUBPSrr = 739,
- IDIV16m = 740,
- IDIV16r = 741,
- IDIV32m = 742,
- IDIV32r = 743,
- IDIV64m = 744,
- IDIV64r = 745,
- IDIV8m = 746,
- IDIV8r = 747,
- ILD_F16m = 748,
- ILD_F32m = 749,
- ILD_F64m = 750,
- ILD_Fp16m32 = 751,
- ILD_Fp16m64 = 752,
- ILD_Fp16m80 = 753,
- ILD_Fp32m32 = 754,
- ILD_Fp32m64 = 755,
- ILD_Fp32m80 = 756,
- ILD_Fp64m32 = 757,
- ILD_Fp64m64 = 758,
- ILD_Fp64m80 = 759,
- IMUL16m = 760,
- IMUL16r = 761,
- IMUL16rm = 762,
- IMUL16rmi = 763,
- IMUL16rmi8 = 764,
- IMUL16rr = 765,
- IMUL16rri = 766,
- IMUL16rri8 = 767,
- IMUL32m = 768,
- IMUL32r = 769,
- IMUL32rm = 770,
- IMUL32rmi = 771,
- IMUL32rmi8 = 772,
- IMUL32rr = 773,
- IMUL32rri = 774,
- IMUL32rri8 = 775,
- IMUL64m = 776,
- IMUL64r = 777,
- IMUL64rm = 778,
- IMUL64rmi32 = 779,
- IMUL64rmi8 = 780,
- IMUL64rr = 781,
- IMUL64rri32 = 782,
- IMUL64rri8 = 783,
- IMUL8m = 784,
- IMUL8r = 785,
- IN16 = 786,
- IN16ri = 787,
- IN16rr = 788,
- IN32 = 789,
- IN32ri = 790,
- IN32rr = 791,
- IN8 = 792,
- IN8ri = 793,
- IN8rr = 794,
- INC16m = 795,
- INC16r = 796,
- INC32m = 797,
- INC32r = 798,
- INC64_16m = 799,
- INC64_16r = 800,
- INC64_32m = 801,
- INC64_32r = 802,
- INC64m = 803,
- INC64r = 804,
- INC8m = 805,
- INC8r = 806,
- INSERTPSrm = 807,
- INSERTPSrr = 808,
- INT = 809,
- INT3 = 810,
- INVD = 811,
- INVEPT = 812,
- INVLPG = 813,
- INVVPID = 814,
- IRET16 = 815,
- IRET32 = 816,
- IRET64 = 817,
- ISTT_FP16m = 818,
- ISTT_FP32m = 819,
- ISTT_FP64m = 820,
- ISTT_Fp16m32 = 821,
- ISTT_Fp16m64 = 822,
- ISTT_Fp16m80 = 823,
- ISTT_Fp32m32 = 824,
- ISTT_Fp32m64 = 825,
- ISTT_Fp32m80 = 826,
- ISTT_Fp64m32 = 827,
- ISTT_Fp64m64 = 828,
- ISTT_Fp64m80 = 829,
- IST_F16m = 830,
- IST_F32m = 831,
- IST_FP16m = 832,
- IST_FP32m = 833,
- IST_FP64m = 834,
- IST_Fp16m32 = 835,
- IST_Fp16m64 = 836,
- IST_Fp16m80 = 837,
- IST_Fp32m32 = 838,
- IST_Fp32m64 = 839,
- IST_Fp32m80 = 840,
- IST_Fp64m32 = 841,
- IST_Fp64m64 = 842,
- IST_Fp64m80 = 843,
- Int_CMPSDrm = 844,
- Int_CMPSDrr = 845,
- Int_CMPSSrm = 846,
- Int_CMPSSrr = 847,
- Int_COMISDrm = 848,
- Int_COMISDrr = 849,
- Int_COMISSrm = 850,
- Int_COMISSrr = 851,
- Int_CVTDQ2PDrm = 852,
- Int_CVTDQ2PDrr = 853,
- Int_CVTDQ2PSrm = 854,
- Int_CVTDQ2PSrr = 855,
- Int_CVTPD2DQrm = 856,
- Int_CVTPD2DQrr = 857,
- Int_CVTPD2PIrm = 858,
- Int_CVTPD2PIrr = 859,
- Int_CVTPD2PSrm = 860,
- Int_CVTPD2PSrr = 861,
- Int_CVTPI2PDrm = 862,
- Int_CVTPI2PDrr = 863,
- Int_CVTPI2PSrm = 864,
- Int_CVTPI2PSrr = 865,
- Int_CVTPS2DQrm = 866,
- Int_CVTPS2DQrr = 867,
- Int_CVTPS2PDrm = 868,
- Int_CVTPS2PDrr = 869,
- Int_CVTPS2PIrm = 870,
- Int_CVTPS2PIrr = 871,
- Int_CVTSD2SI64rm = 872,
- Int_CVTSD2SI64rr = 873,
- Int_CVTSD2SIrm = 874,
- Int_CVTSD2SIrr = 875,
- Int_CVTSD2SSrm = 876,
- Int_CVTSD2SSrr = 877,
- Int_CVTSI2SD64rm = 878,
- Int_CVTSI2SD64rr = 879,
- Int_CVTSI2SDrm = 880,
- Int_CVTSI2SDrr = 881,
- Int_CVTSI2SS64rm = 882,
- Int_CVTSI2SS64rr = 883,
- Int_CVTSI2SSrm = 884,
- Int_CVTSI2SSrr = 885,
- Int_CVTSS2SDrm = 886,
- Int_CVTSS2SDrr = 887,
- Int_CVTSS2SI64rm = 888,
- Int_CVTSS2SI64rr = 889,
- Int_CVTSS2SIrm = 890,
- Int_CVTSS2SIrr = 891,
- Int_CVTTPD2DQrm = 892,
- Int_CVTTPD2DQrr = 893,
- Int_CVTTPD2PIrm = 894,
- Int_CVTTPD2PIrr = 895,
- Int_CVTTPS2DQrm = 896,
- Int_CVTTPS2DQrr = 897,
- Int_CVTTPS2PIrm = 898,
- Int_CVTTPS2PIrr = 899,
- Int_CVTTSD2SI64rm = 900,
- Int_CVTTSD2SI64rr = 901,
- Int_CVTTSD2SIrm = 902,
- Int_CVTTSD2SIrr = 903,
- Int_CVTTSS2SI64rm = 904,
- Int_CVTTSS2SI64rr = 905,
- Int_CVTTSS2SIrm = 906,
- Int_CVTTSS2SIrr = 907,
- Int_UCOMISDrm = 908,
- Int_UCOMISDrr = 909,
- Int_UCOMISSrm = 910,
- Int_UCOMISSrr = 911,
- JAE_1 = 912,
- JAE_4 = 913,
- JA_1 = 914,
- JA_4 = 915,
- JBE_1 = 916,
- JBE_4 = 917,
- JB_1 = 918,
- JB_4 = 919,
- JCXZ8 = 920,
- JE_1 = 921,
- JE_4 = 922,
- JGE_1 = 923,
- JGE_4 = 924,
- JG_1 = 925,
- JG_4 = 926,
- JLE_1 = 927,
- JLE_4 = 928,
- JL_1 = 929,
- JL_4 = 930,
- JMP32m = 931,
- JMP32r = 932,
- JMP64m = 933,
- JMP64pcrel32 = 934,
- JMP64r = 935,
- JMP_1 = 936,
- JMP_4 = 937,
- JNE_1 = 938,
- JNE_4 = 939,
- JNO_1 = 940,
- JNO_4 = 941,
- JNP_1 = 942,
- JNP_4 = 943,
- JNS_1 = 944,
- JNS_4 = 945,
- JO_1 = 946,
- JO_4 = 947,
- JP_1 = 948,
- JP_4 = 949,
- JS_1 = 950,
- JS_4 = 951,
- LAHF = 952,
- LAR16rm = 953,
- LAR16rr = 954,
- LAR32rm = 955,
- LAR32rr = 956,
- LAR64rm = 957,
- LAR64rr = 958,
- LCMPXCHG16 = 959,
- LCMPXCHG32 = 960,
- LCMPXCHG64 = 961,
- LCMPXCHG8 = 962,
- LCMPXCHG8B = 963,
- LDDQUrm = 964,
- LDMXCSR = 965,
- LDS16rm = 966,
- LDS32rm = 967,
- LD_F0 = 968,
- LD_F1 = 969,
- LD_F32m = 970,
- LD_F64m = 971,
- LD_F80m = 972,
- LD_Fp032 = 973,
- LD_Fp064 = 974,
- LD_Fp080 = 975,
- LD_Fp132 = 976,
- LD_Fp164 = 977,
- LD_Fp180 = 978,
- LD_Fp32m = 979,
- LD_Fp32m64 = 980,
- LD_Fp32m80 = 981,
- LD_Fp64m = 982,
- LD_Fp64m80 = 983,
- LD_Fp80m = 984,
- LD_Frr = 985,
- LEA16r = 986,
- LEA32r = 987,
- LEA64_32r = 988,
- LEA64r = 989,
- LEAVE = 990,
- LEAVE64 = 991,
- LES16rm = 992,
- LES32rm = 993,
- LFENCE = 994,
- LFS16rm = 995,
- LFS32rm = 996,
- LFS64rm = 997,
- LGDTm = 998,
- LGS16rm = 999,
- LGS32rm = 1000,
- LGS64rm = 1001,
- LIDTm = 1002,
- LLDT16m = 1003,
- LLDT16r = 1004,
- LMSW16m = 1005,
- LMSW16r = 1006,
- LOCK_ADD16mi = 1007,
- LOCK_ADD16mi8 = 1008,
- LOCK_ADD16mr = 1009,
- LOCK_ADD32mi = 1010,
- LOCK_ADD32mi8 = 1011,
- LOCK_ADD32mr = 1012,
- LOCK_ADD64mi32 = 1013,
- LOCK_ADD64mi8 = 1014,
- LOCK_ADD64mr = 1015,
- LOCK_ADD8mi = 1016,
- LOCK_ADD8mr = 1017,
- LOCK_DEC16m = 1018,
- LOCK_DEC32m = 1019,
- LOCK_DEC64m = 1020,
- LOCK_DEC8m = 1021,
- LOCK_INC16m = 1022,
- LOCK_INC32m = 1023,
- LOCK_INC64m = 1024,
- LOCK_INC8m = 1025,
- LOCK_PREFIX = 1026,
- LOCK_SUB16mi = 1027,
- LOCK_SUB16mi8 = 1028,
- LOCK_SUB16mr = 1029,
- LOCK_SUB32mi = 1030,
- LOCK_SUB32mi8 = 1031,
- LOCK_SUB32mr = 1032,
- LOCK_SUB64mi32 = 1033,
- LOCK_SUB64mi8 = 1034,
- LOCK_SUB64mr = 1035,
- LOCK_SUB8mi = 1036,
- LOCK_SUB8mr = 1037,
- LODSB = 1038,
- LODSD = 1039,
- LODSQ = 1040,
- LODSW = 1041,
- LOOP = 1042,
- LOOPE = 1043,
- LOOPNE = 1044,
- LRET = 1045,
- LRETI = 1046,
- LSL16rm = 1047,
- LSL16rr = 1048,
- LSL32rm = 1049,
- LSL32rr = 1050,
- LSL64rm = 1051,
- LSL64rr = 1052,
- LSS16rm = 1053,
- LSS32rm = 1054,
- LSS64rm = 1055,
- LTRm = 1056,
- LTRr = 1057,
- LXADD16 = 1058,
- LXADD32 = 1059,
- LXADD64 = 1060,
- LXADD8 = 1061,
- MASKMOVDQU = 1062,
- MASKMOVDQU64 = 1063,
- MAXPDrm = 1064,
- MAXPDrm_Int = 1065,
- MAXPDrr = 1066,
- MAXPDrr_Int = 1067,
- MAXPSrm = 1068,
- MAXPSrm_Int = 1069,
- MAXPSrr = 1070,
- MAXPSrr_Int = 1071,
- MAXSDrm = 1072,
- MAXSDrm_Int = 1073,
- MAXSDrr = 1074,
- MAXSDrr_Int = 1075,
- MAXSSrm = 1076,
- MAXSSrm_Int = 1077,
- MAXSSrr = 1078,
- MAXSSrr_Int = 1079,
- MFENCE = 1080,
- MINGW_ALLOCA = 1081,
- MINPDrm = 1082,
- MINPDrm_Int = 1083,
- MINPDrr = 1084,
- MINPDrr_Int = 1085,
- MINPSrm = 1086,
- MINPSrm_Int = 1087,
- MINPSrr = 1088,
- MINPSrr_Int = 1089,
- MINSDrm = 1090,
- MINSDrm_Int = 1091,
- MINSDrr = 1092,
- MINSDrr_Int = 1093,
- MINSSrm = 1094,
- MINSSrm_Int = 1095,
- MINSSrr = 1096,
- MINSSrr_Int = 1097,
- MMX_CVTPD2PIrm = 1098,
- MMX_CVTPD2PIrr = 1099,
- MMX_CVTPI2PDrm = 1100,
- MMX_CVTPI2PDrr = 1101,
- MMX_CVTPI2PSrm = 1102,
- MMX_CVTPI2PSrr = 1103,
- MMX_CVTPS2PIrm = 1104,
- MMX_CVTPS2PIrr = 1105,
- MMX_CVTTPD2PIrm = 1106,
- MMX_CVTTPD2PIrr = 1107,
- MMX_CVTTPS2PIrm = 1108,
- MMX_CVTTPS2PIrr = 1109,
- MMX_EMMS = 1110,
- MMX_FEMMS = 1111,
- MMX_MASKMOVQ = 1112,
- MMX_MASKMOVQ64 = 1113,
- MMX_MOVD64from64rr = 1114,
- MMX_MOVD64grr = 1115,
- MMX_MOVD64mr = 1116,
- MMX_MOVD64rm = 1117,
- MMX_MOVD64rr = 1118,
- MMX_MOVD64rrv164 = 1119,
- MMX_MOVD64to64rr = 1120,
- MMX_MOVDQ2Qrr = 1121,
- MMX_MOVNTQmr = 1122,
- MMX_MOVQ2DQrr = 1123,
- MMX_MOVQ2FR64rr = 1124,
- MMX_MOVQ64gmr = 1125,
- MMX_MOVQ64mr = 1126,
- MMX_MOVQ64rm = 1127,
- MMX_MOVQ64rr = 1128,
- MMX_MOVZDI2PDIrm = 1129,
- MMX_MOVZDI2PDIrr = 1130,
- MMX_PACKSSDWrm = 1131,
- MMX_PACKSSDWrr = 1132,
- MMX_PACKSSWBrm = 1133,
- MMX_PACKSSWBrr = 1134,
- MMX_PACKUSWBrm = 1135,
- MMX_PACKUSWBrr = 1136,
- MMX_PADDBrm = 1137,
- MMX_PADDBrr = 1138,
- MMX_PADDDrm = 1139,
- MMX_PADDDrr = 1140,
- MMX_PADDQrm = 1141,
- MMX_PADDQrr = 1142,
- MMX_PADDSBrm = 1143,
- MMX_PADDSBrr = 1144,
- MMX_PADDSWrm = 1145,
- MMX_PADDSWrr = 1146,
- MMX_PADDUSBrm = 1147,
- MMX_PADDUSBrr = 1148,
- MMX_PADDUSWrm = 1149,
- MMX_PADDUSWrr = 1150,
- MMX_PADDWrm = 1151,
- MMX_PADDWrr = 1152,
- MMX_PANDNrm = 1153,
- MMX_PANDNrr = 1154,
- MMX_PANDrm = 1155,
- MMX_PANDrr = 1156,
- MMX_PAVGBrm = 1157,
- MMX_PAVGBrr = 1158,
- MMX_PAVGWrm = 1159,
- MMX_PAVGWrr = 1160,
- MMX_PCMPEQBrm = 1161,
- MMX_PCMPEQBrr = 1162,
- MMX_PCMPEQDrm = 1163,
- MMX_PCMPEQDrr = 1164,
- MMX_PCMPEQWrm = 1165,
- MMX_PCMPEQWrr = 1166,
- MMX_PCMPGTBrm = 1167,
- MMX_PCMPGTBrr = 1168,
- MMX_PCMPGTDrm = 1169,
- MMX_PCMPGTDrr = 1170,
- MMX_PCMPGTWrm = 1171,
- MMX_PCMPGTWrr = 1172,
- MMX_PEXTRWri = 1173,
- MMX_PINSRWrmi = 1174,
- MMX_PINSRWrri = 1175,
- MMX_PMADDWDrm = 1176,
- MMX_PMADDWDrr = 1177,
- MMX_PMAXSWrm = 1178,
- MMX_PMAXSWrr = 1179,
- MMX_PMAXUBrm = 1180,
- MMX_PMAXUBrr = 1181,
- MMX_PMINSWrm = 1182,
- MMX_PMINSWrr = 1183,
- MMX_PMINUBrm = 1184,
- MMX_PMINUBrr = 1185,
- MMX_PMOVMSKBrr = 1186,
- MMX_PMULHUWrm = 1187,
- MMX_PMULHUWrr = 1188,
- MMX_PMULHWrm = 1189,
- MMX_PMULHWrr = 1190,
- MMX_PMULLWrm = 1191,
- MMX_PMULLWrr = 1192,
- MMX_PMULUDQrm = 1193,
- MMX_PMULUDQrr = 1194,
- MMX_PORrm = 1195,
- MMX_PORrr = 1196,
- MMX_PSADBWrm = 1197,
- MMX_PSADBWrr = 1198,
- MMX_PSHUFWmi = 1199,
- MMX_PSHUFWri = 1200,
- MMX_PSLLDri = 1201,
- MMX_PSLLDrm = 1202,
- MMX_PSLLDrr = 1203,
- MMX_PSLLQri = 1204,
- MMX_PSLLQrm = 1205,
- MMX_PSLLQrr = 1206,
- MMX_PSLLWri = 1207,
- MMX_PSLLWrm = 1208,
- MMX_PSLLWrr = 1209,
- MMX_PSRADri = 1210,
- MMX_PSRADrm = 1211,
- MMX_PSRADrr = 1212,
- MMX_PSRAWri = 1213,
- MMX_PSRAWrm = 1214,
- MMX_PSRAWrr = 1215,
- MMX_PSRLDri = 1216,
- MMX_PSRLDrm = 1217,
- MMX_PSRLDrr = 1218,
- MMX_PSRLQri = 1219,
- MMX_PSRLQrm = 1220,
- MMX_PSRLQrr = 1221,
- MMX_PSRLWri = 1222,
- MMX_PSRLWrm = 1223,
- MMX_PSRLWrr = 1224,
- MMX_PSUBBrm = 1225,
- MMX_PSUBBrr = 1226,
- MMX_PSUBDrm = 1227,
- MMX_PSUBDrr = 1228,
- MMX_PSUBQrm = 1229,
- MMX_PSUBQrr = 1230,
- MMX_PSUBSBrm = 1231,
- MMX_PSUBSBrr = 1232,
- MMX_PSUBSWrm = 1233,
- MMX_PSUBSWrr = 1234,
- MMX_PSUBUSBrm = 1235,
- MMX_PSUBUSBrr = 1236,
- MMX_PSUBUSWrm = 1237,
- MMX_PSUBUSWrr = 1238,
- MMX_PSUBWrm = 1239,
- MMX_PSUBWrr = 1240,
- MMX_PUNPCKHBWrm = 1241,
- MMX_PUNPCKHBWrr = 1242,
- MMX_PUNPCKHDQrm = 1243,
- MMX_PUNPCKHDQrr = 1244,
- MMX_PUNPCKHWDrm = 1245,
- MMX_PUNPCKHWDrr = 1246,
- MMX_PUNPCKLBWrm = 1247,
- MMX_PUNPCKLBWrr = 1248,
- MMX_PUNPCKLDQrm = 1249,
- MMX_PUNPCKLDQrr = 1250,
- MMX_PUNPCKLWDrm = 1251,
- MMX_PUNPCKLWDrr = 1252,
- MMX_PXORrm = 1253,
- MMX_PXORrr = 1254,
- MMX_V_SET0 = 1255,
- MMX_V_SETALLONES = 1256,
- MONITOR = 1257,
- MOV16ao16 = 1258,
- MOV16mi = 1259,
- MOV16mr = 1260,
- MOV16ms = 1261,
- MOV16o16a = 1262,
- MOV16r0 = 1263,
- MOV16ri = 1264,
- MOV16rm = 1265,
- MOV16rr = 1266,
- MOV16rr_REV = 1267,
- MOV16rs = 1268,
- MOV16sm = 1269,
- MOV16sr = 1270,
- MOV32ao32 = 1271,
- MOV32cr = 1272,
- MOV32dr = 1273,
- MOV32mi = 1274,
- MOV32mr = 1275,
- MOV32o32a = 1276,
- MOV32r0 = 1277,
- MOV32rc = 1278,
- MOV32rd = 1279,
- MOV32ri = 1280,
- MOV32rm = 1281,
- MOV32rr = 1282,
- MOV32rr_REV = 1283,
- MOV64FSrm = 1284,
- MOV64GSrm = 1285,
- MOV64ao64 = 1286,
- MOV64ao8 = 1287,
- MOV64cr = 1288,
- MOV64dr = 1289,
- MOV64mi32 = 1290,
- MOV64mr = 1291,
- MOV64ms = 1292,
- MOV64o64a = 1293,
- MOV64o8a = 1294,
- MOV64r0 = 1295,
- MOV64rc = 1296,
- MOV64rd = 1297,
- MOV64ri = 1298,
- MOV64ri32 = 1299,
- MOV64ri64i32 = 1300,
- MOV64rm = 1301,
- MOV64rr = 1302,
- MOV64rr_REV = 1303,
- MOV64rs = 1304,
- MOV64sm = 1305,
- MOV64sr = 1306,
- MOV64toPQIrr = 1307,
- MOV64toSDrm = 1308,
- MOV64toSDrr = 1309,
- MOV8ao8 = 1310,
- MOV8mi = 1311,
- MOV8mr = 1312,
- MOV8mr_NOREX = 1313,
- MOV8o8a = 1314,
- MOV8r0 = 1315,
- MOV8ri = 1316,
- MOV8rm = 1317,
- MOV8rm_NOREX = 1318,
- MOV8rr = 1319,
- MOV8rr_NOREX = 1320,
- MOV8rr_REV = 1321,
- MOVAPDmr = 1322,
- MOVAPDrm = 1323,
- MOVAPDrr = 1324,
- MOVAPSmr = 1325,
- MOVAPSrm = 1326,
- MOVAPSrr = 1327,
- MOVDDUPrm = 1328,
- MOVDDUPrr = 1329,
- MOVDI2PDIrm = 1330,
- MOVDI2PDIrr = 1331,
- MOVDI2SSrm = 1332,
- MOVDI2SSrr = 1333,
- MOVDQAmr = 1334,
- MOVDQArm = 1335,
- MOVDQArr = 1336,
- MOVDQUmr = 1337,
- MOVDQUmr_Int = 1338,
- MOVDQUrm = 1339,
- MOVDQUrm_Int = 1340,
- MOVHLPSrr = 1341,
- MOVHPDmr = 1342,
- MOVHPDrm = 1343,
- MOVHPSmr = 1344,
- MOVHPSrm = 1345,
- MOVLHPSrr = 1346,
- MOVLPDmr = 1347,
- MOVLPDrm = 1348,
- MOVLPSmr = 1349,
- MOVLPSrm = 1350,
- MOVLQ128mr = 1351,
- MOVMSKPDrr = 1352,
- MOVMSKPSrr = 1353,
- MOVNTDQArm = 1354,
- MOVNTDQ_64mr = 1355,
- MOVNTDQmr = 1356,
- MOVNTDQmr_Int = 1357,
- MOVNTI_64mr = 1358,
- MOVNTImr = 1359,
- MOVNTImr_Int = 1360,
- MOVNTPDmr = 1361,
- MOVNTPDmr_Int = 1362,
- MOVNTPSmr = 1363,
- MOVNTPSmr_Int = 1364,
- MOVPC32r = 1365,
- MOVPDI2DImr = 1366,
- MOVPDI2DIrr = 1367,
- MOVPQI2QImr = 1368,
- MOVPQIto64rr = 1369,
- MOVQI2PQIrm = 1370,
- MOVQxrxr = 1371,
- MOVSB = 1372,
- MOVSD = 1373,
- MOVSDmr = 1374,
- MOVSDrm = 1375,
- MOVSDrr = 1376,
- MOVSDto64mr = 1377,
- MOVSDto64rr = 1378,
- MOVSHDUPrm = 1379,
- MOVSHDUPrr = 1380,
- MOVSLDUPrm = 1381,
- MOVSLDUPrr = 1382,
- MOVSS2DImr = 1383,
- MOVSS2DIrr = 1384,
- MOVSSmr = 1385,
- MOVSSrm = 1386,
- MOVSSrr = 1387,
- MOVSW = 1388,
- MOVSX16rm8 = 1389,
- MOVSX16rm8W = 1390,
- MOVSX16rr8 = 1391,
- MOVSX16rr8W = 1392,
- MOVSX32rm16 = 1393,
- MOVSX32rm8 = 1394,
- MOVSX32rr16 = 1395,
- MOVSX32rr8 = 1396,
- MOVSX64rm16 = 1397,
- MOVSX64rm32 = 1398,
- MOVSX64rm8 = 1399,
- MOVSX64rr16 = 1400,
- MOVSX64rr32 = 1401,
- MOVSX64rr8 = 1402,
- MOVUPDmr = 1403,
- MOVUPDmr_Int = 1404,
- MOVUPDrm = 1405,
- MOVUPDrm_Int = 1406,
- MOVUPDrr = 1407,
- MOVUPSmr = 1408,
- MOVUPSmr_Int = 1409,
- MOVUPSrm = 1410,
- MOVUPSrm_Int = 1411,
- MOVUPSrr = 1412,
- MOVZDI2PDIrm = 1413,
- MOVZDI2PDIrr = 1414,
- MOVZPQILo2PQIrm = 1415,
- MOVZPQILo2PQIrr = 1416,
- MOVZQI2PQIrm = 1417,
- MOVZQI2PQIrr = 1418,
- MOVZX16rm8 = 1419,
- MOVZX16rm8W = 1420,
- MOVZX16rr8 = 1421,
- MOVZX16rr8W = 1422,
- MOVZX32_NOREXrm8 = 1423,
- MOVZX32_NOREXrr8 = 1424,
- MOVZX32rm16 = 1425,
- MOVZX32rm8 = 1426,
- MOVZX32rr16 = 1427,
- MOVZX32rr8 = 1428,
- MOVZX64rm16 = 1429,
- MOVZX64rm16_Q = 1430,
- MOVZX64rm32 = 1431,
- MOVZX64rm8 = 1432,
- MOVZX64rm8_Q = 1433,
- MOVZX64rr16 = 1434,
- MOVZX64rr16_Q = 1435,
- MOVZX64rr32 = 1436,
- MOVZX64rr8 = 1437,
- MOVZX64rr8_Q = 1438,
- MOV_Fp3232 = 1439,
- MOV_Fp3264 = 1440,
- MOV_Fp3280 = 1441,
- MOV_Fp6432 = 1442,
- MOV_Fp6464 = 1443,
- MOV_Fp6480 = 1444,
- MOV_Fp8032 = 1445,
- MOV_Fp8064 = 1446,
- MOV_Fp8080 = 1447,
- MPSADBWrmi = 1448,
- MPSADBWrri = 1449,
- MUL16m = 1450,
- MUL16r = 1451,
- MUL32m = 1452,
- MUL32r = 1453,
- MUL64m = 1454,
- MUL64r = 1455,
- MUL8m = 1456,
- MUL8r = 1457,
- MULPDrm = 1458,
- MULPDrr = 1459,
- MULPSrm = 1460,
- MULPSrr = 1461,
- MULSDrm = 1462,
- MULSDrm_Int = 1463,
- MULSDrr = 1464,
- MULSDrr_Int = 1465,
- MULSSrm = 1466,
- MULSSrm_Int = 1467,
- MULSSrr = 1468,
- MULSSrr_Int = 1469,
- MUL_F32m = 1470,
- MUL_F64m = 1471,
- MUL_FI16m = 1472,
- MUL_FI32m = 1473,
- MUL_FPrST0 = 1474,
- MUL_FST0r = 1475,
- MUL_Fp32 = 1476,
- MUL_Fp32m = 1477,
- MUL_Fp64 = 1478,
- MUL_Fp64m = 1479,
- MUL_Fp64m32 = 1480,
- MUL_Fp80 = 1481,
- MUL_Fp80m32 = 1482,
- MUL_Fp80m64 = 1483,
- MUL_FpI16m32 = 1484,
- MUL_FpI16m64 = 1485,
- MUL_FpI16m80 = 1486,
- MUL_FpI32m32 = 1487,
- MUL_FpI32m64 = 1488,
- MUL_FpI32m80 = 1489,
- MUL_FrST0 = 1490,
- MWAIT = 1491,
- NEG16m = 1492,
- NEG16r = 1493,
- NEG32m = 1494,
- NEG32r = 1495,
- NEG64m = 1496,
- NEG64r = 1497,
- NEG8m = 1498,
- NEG8r = 1499,
- NOOP = 1500,
- NOOPL = 1501,
- NOOPW = 1502,
- NOT16m = 1503,
- NOT16r = 1504,
- NOT32m = 1505,
- NOT32r = 1506,
- NOT64m = 1507,
- NOT64r = 1508,
- NOT8m = 1509,
- NOT8r = 1510,
- OR16i16 = 1511,
- OR16mi = 1512,
- OR16mi8 = 1513,
- OR16mr = 1514,
- OR16ri = 1515,
- OR16ri8 = 1516,
- OR16rm = 1517,
- OR16rr = 1518,
- OR16rr_REV = 1519,
- OR32i32 = 1520,
- OR32mi = 1521,
- OR32mi8 = 1522,
- OR32mr = 1523,
- OR32ri = 1524,
- OR32ri8 = 1525,
- OR32rm = 1526,
- OR32rr = 1527,
- OR32rr_REV = 1528,
- OR64i32 = 1529,
- OR64mi32 = 1530,
- OR64mi8 = 1531,
- OR64mr = 1532,
- OR64ri32 = 1533,
- OR64ri8 = 1534,
- OR64rm = 1535,
- OR64rr = 1536,
- OR64rr_REV = 1537,
- OR8i8 = 1538,
- OR8mi = 1539,
- OR8mr = 1540,
- OR8ri = 1541,
- OR8rm = 1542,
- OR8rr = 1543,
- OR8rr_REV = 1544,
- ORPDrm = 1545,
- ORPDrr = 1546,
- ORPSrm = 1547,
- ORPSrr = 1548,
- OUT16ir = 1549,
- OUT16rr = 1550,
- OUT32ir = 1551,
- OUT32rr = 1552,
- OUT8ir = 1553,
- OUT8rr = 1554,
- OUTSB = 1555,
- OUTSD = 1556,
- OUTSW = 1557,
- PABSBrm128 = 1558,
- PABSBrm64 = 1559,
- PABSBrr128 = 1560,
- PABSBrr64 = 1561,
- PABSDrm128 = 1562,
- PABSDrm64 = 1563,
- PABSDrr128 = 1564,
- PABSDrr64 = 1565,
- PABSWrm128 = 1566,
- PABSWrm64 = 1567,
- PABSWrr128 = 1568,
- PABSWrr64 = 1569,
- PACKSSDWrm = 1570,
- PACKSSDWrr = 1571,
- PACKSSWBrm = 1572,
- PACKSSWBrr = 1573,
- PACKUSDWrm = 1574,
- PACKUSDWrr = 1575,
- PACKUSWBrm = 1576,
- PACKUSWBrr = 1577,
- PADDBrm = 1578,
- PADDBrr = 1579,
- PADDDrm = 1580,
- PADDDrr = 1581,
- PADDQrm = 1582,
- PADDQrr = 1583,
- PADDSBrm = 1584,
- PADDSBrr = 1585,
- PADDSWrm = 1586,
- PADDSWrr = 1587,
- PADDUSBrm = 1588,
- PADDUSBrr = 1589,
- PADDUSWrm = 1590,
- PADDUSWrr = 1591,
- PADDWrm = 1592,
- PADDWrr = 1593,
- PALIGNR128rm = 1594,
- PALIGNR128rr = 1595,
- PALIGNR64rm = 1596,
- PALIGNR64rr = 1597,
- PANDNrm = 1598,
- PANDNrr = 1599,
- PANDrm = 1600,
- PANDrr = 1601,
- PAVGBrm = 1602,
- PAVGBrr = 1603,
- PAVGWrm = 1604,
- PAVGWrr = 1605,
- PBLENDVBrm0 = 1606,
- PBLENDVBrr0 = 1607,
- PBLENDWrmi = 1608,
- PBLENDWrri = 1609,
- PCMPEQBrm = 1610,
- PCMPEQBrr = 1611,
- PCMPEQDrm = 1612,
- PCMPEQDrr = 1613,
- PCMPEQQrm = 1614,
- PCMPEQQrr = 1615,
- PCMPEQWrm = 1616,
- PCMPEQWrr = 1617,
- PCMPESTRIArm = 1618,
- PCMPESTRIArr = 1619,
- PCMPESTRICrm = 1620,
- PCMPESTRICrr = 1621,
- PCMPESTRIOrm = 1622,
- PCMPESTRIOrr = 1623,
- PCMPESTRISrm = 1624,
- PCMPESTRISrr = 1625,
- PCMPESTRIZrm = 1626,
- PCMPESTRIZrr = 1627,
- PCMPESTRIrm = 1628,
- PCMPESTRIrr = 1629,
- PCMPESTRM128MEM = 1630,
- PCMPESTRM128REG = 1631,
- PCMPESTRM128rm = 1632,
- PCMPESTRM128rr = 1633,
- PCMPGTBrm = 1634,
- PCMPGTBrr = 1635,
- PCMPGTDrm = 1636,
- PCMPGTDrr = 1637,
- PCMPGTQrm = 1638,
- PCMPGTQrr = 1639,
- PCMPGTWrm = 1640,
- PCMPGTWrr = 1641,
- PCMPISTRIArm = 1642,
- PCMPISTRIArr = 1643,
- PCMPISTRICrm = 1644,
- PCMPISTRICrr = 1645,
- PCMPISTRIOrm = 1646,
- PCMPISTRIOrr = 1647,
- PCMPISTRISrm = 1648,
- PCMPISTRISrr = 1649,
- PCMPISTRIZrm = 1650,
- PCMPISTRIZrr = 1651,
- PCMPISTRIrm = 1652,
- PCMPISTRIrr = 1653,
- PCMPISTRM128MEM = 1654,
- PCMPISTRM128REG = 1655,
- PCMPISTRM128rm = 1656,
- PCMPISTRM128rr = 1657,
- PEXTRBmr = 1658,
- PEXTRBrr = 1659,
- PEXTRDmr = 1660,
- PEXTRDrr = 1661,
- PEXTRQmr = 1662,
- PEXTRQrr = 1663,
- PEXTRWmr = 1664,
- PEXTRWri = 1665,
- PHADDDrm128 = 1666,
- PHADDDrm64 = 1667,
- PHADDDrr128 = 1668,
- PHADDDrr64 = 1669,
- PHADDSWrm128 = 1670,
- PHADDSWrm64 = 1671,
- PHADDSWrr128 = 1672,
- PHADDSWrr64 = 1673,
- PHADDWrm128 = 1674,
- PHADDWrm64 = 1675,
- PHADDWrr128 = 1676,
- PHADDWrr64 = 1677,
- PHMINPOSUWrm128 = 1678,
- PHMINPOSUWrr128 = 1679,
- PHSUBDrm128 = 1680,
- PHSUBDrm64 = 1681,
- PHSUBDrr128 = 1682,
- PHSUBDrr64 = 1683,
- PHSUBSWrm128 = 1684,
- PHSUBSWrm64 = 1685,
- PHSUBSWrr128 = 1686,
- PHSUBSWrr64 = 1687,
- PHSUBWrm128 = 1688,
- PHSUBWrm64 = 1689,
- PHSUBWrr128 = 1690,
- PHSUBWrr64 = 1691,
- PINSRBrm = 1692,
- PINSRBrr = 1693,
- PINSRDrm = 1694,
- PINSRDrr = 1695,
- PINSRQrm = 1696,
- PINSRQrr = 1697,
- PINSRWrmi = 1698,
- PINSRWrri = 1699,
- PMADDUBSWrm128 = 1700,
- PMADDUBSWrm64 = 1701,
- PMADDUBSWrr128 = 1702,
- PMADDUBSWrr64 = 1703,
- PMADDWDrm = 1704,
- PMADDWDrr = 1705,
- PMAXSBrm = 1706,
- PMAXSBrr = 1707,
- PMAXSDrm = 1708,
- PMAXSDrr = 1709,
- PMAXSWrm = 1710,
- PMAXSWrr = 1711,
- PMAXUBrm = 1712,
- PMAXUBrr = 1713,
- PMAXUDrm = 1714,
- PMAXUDrr = 1715,
- PMAXUWrm = 1716,
- PMAXUWrr = 1717,
- PMINSBrm = 1718,
- PMINSBrr = 1719,
- PMINSDrm = 1720,
- PMINSDrr = 1721,
- PMINSWrm = 1722,
- PMINSWrr = 1723,
- PMINUBrm = 1724,
- PMINUBrr = 1725,
- PMINUDrm = 1726,
- PMINUDrr = 1727,
- PMINUWrm = 1728,
- PMINUWrr = 1729,
- PMOVMSKBrr = 1730,
- PMOVSXBDrm = 1731,
- PMOVSXBDrr = 1732,
- PMOVSXBQrm = 1733,
- PMOVSXBQrr = 1734,
- PMOVSXBWrm = 1735,
- PMOVSXBWrr = 1736,
- PMOVSXDQrm = 1737,
- PMOVSXDQrr = 1738,
- PMOVSXWDrm = 1739,
- PMOVSXWDrr = 1740,
- PMOVSXWQrm = 1741,
- PMOVSXWQrr = 1742,
- PMOVZXBDrm = 1743,
- PMOVZXBDrr = 1744,
- PMOVZXBQrm = 1745,
- PMOVZXBQrr = 1746,
- PMOVZXBWrm = 1747,
- PMOVZXBWrr = 1748,
- PMOVZXDQrm = 1749,
- PMOVZXDQrr = 1750,
- PMOVZXWDrm = 1751,
- PMOVZXWDrr = 1752,
- PMOVZXWQrm = 1753,
- PMOVZXWQrr = 1754,
- PMULDQrm = 1755,
- PMULDQrr = 1756,
- PMULHRSWrm128 = 1757,
- PMULHRSWrm64 = 1758,
- PMULHRSWrr128 = 1759,
- PMULHRSWrr64 = 1760,
- PMULHUWrm = 1761,
- PMULHUWrr = 1762,
- PMULHWrm = 1763,
- PMULHWrr = 1764,
- PMULLDrm = 1765,
- PMULLDrm_int = 1766,
- PMULLDrr = 1767,
- PMULLDrr_int = 1768,
- PMULLWrm = 1769,
- PMULLWrr = 1770,
- PMULUDQrm = 1771,
- PMULUDQrr = 1772,
- POP16r = 1773,
- POP16rmm = 1774,
- POP16rmr = 1775,
- POP32r = 1776,
- POP32rmm = 1777,
- POP32rmr = 1778,
- POP64r = 1779,
- POP64rmm = 1780,
- POP64rmr = 1781,
- POPCNT16rm = 1782,
- POPCNT16rr = 1783,
- POPCNT32rm = 1784,
- POPCNT32rr = 1785,
- POPCNT64rm = 1786,
- POPCNT64rr = 1787,
- POPF = 1788,
- POPFD = 1789,
- POPFQ = 1790,
- POPFS16 = 1791,
- POPFS32 = 1792,
- POPFS64 = 1793,
- POPGS16 = 1794,
- POPGS32 = 1795,
- POPGS64 = 1796,
- PORrm = 1797,
- PORrr = 1798,
- PREFETCHNTA = 1799,
- PREFETCHT0 = 1800,
- PREFETCHT1 = 1801,
- PREFETCHT2 = 1802,
- PSADBWrm = 1803,
- PSADBWrr = 1804,
- PSHUFBrm128 = 1805,
- PSHUFBrm64 = 1806,
- PSHUFBrr128 = 1807,
- PSHUFBrr64 = 1808,
- PSHUFDmi = 1809,
- PSHUFDri = 1810,
- PSHUFHWmi = 1811,
- PSHUFHWri = 1812,
- PSHUFLWmi = 1813,
- PSHUFLWri = 1814,
- PSIGNBrm128 = 1815,
- PSIGNBrm64 = 1816,
- PSIGNBrr128 = 1817,
- PSIGNBrr64 = 1818,
- PSIGNDrm128 = 1819,
- PSIGNDrm64 = 1820,
- PSIGNDrr128 = 1821,
- PSIGNDrr64 = 1822,
- PSIGNWrm128 = 1823,
- PSIGNWrm64 = 1824,
- PSIGNWrr128 = 1825,
- PSIGNWrr64 = 1826,
- PSLLDQri = 1827,
- PSLLDri = 1828,
- PSLLDrm = 1829,
- PSLLDrr = 1830,
- PSLLQri = 1831,
- PSLLQrm = 1832,
- PSLLQrr = 1833,
- PSLLWri = 1834,
- PSLLWrm = 1835,
- PSLLWrr = 1836,
- PSRADri = 1837,
- PSRADrm = 1838,
- PSRADrr = 1839,
- PSRAWri = 1840,
- PSRAWrm = 1841,
- PSRAWrr = 1842,
- PSRLDQri = 1843,
- PSRLDri = 1844,
- PSRLDrm = 1845,
- PSRLDrr = 1846,
- PSRLQri = 1847,
- PSRLQrm = 1848,
- PSRLQrr = 1849,
- PSRLWri = 1850,
- PSRLWrm = 1851,
- PSRLWrr = 1852,
- PSUBBrm = 1853,
- PSUBBrr = 1854,
- PSUBDrm = 1855,
- PSUBDrr = 1856,
- PSUBQrm = 1857,
- PSUBQrr = 1858,
- PSUBSBrm = 1859,
- PSUBSBrr = 1860,
- PSUBSWrm = 1861,
- PSUBSWrr = 1862,
- PSUBUSBrm = 1863,
- PSUBUSBrr = 1864,
- PSUBUSWrm = 1865,
- PSUBUSWrr = 1866,
- PSUBWrm = 1867,
- PSUBWrr = 1868,
- PTESTrm = 1869,
- PTESTrr = 1870,
- PUNPCKHBWrm = 1871,
- PUNPCKHBWrr = 1872,
- PUNPCKHDQrm = 1873,
- PUNPCKHDQrr = 1874,
- PUNPCKHQDQrm = 1875,
- PUNPCKHQDQrr = 1876,
- PUNPCKHWDrm = 1877,
- PUNPCKHWDrr = 1878,
- PUNPCKLBWrm = 1879,
- PUNPCKLBWrr = 1880,
- PUNPCKLDQrm = 1881,
- PUNPCKLDQrr = 1882,
- PUNPCKLQDQrm = 1883,
- PUNPCKLQDQrr = 1884,
- PUNPCKLWDrm = 1885,
- PUNPCKLWDrr = 1886,
- PUSH16r = 1887,
- PUSH16rmm = 1888,
- PUSH16rmr = 1889,
- PUSH32i16 = 1890,
- PUSH32i32 = 1891,
- PUSH32i8 = 1892,
- PUSH32r = 1893,
- PUSH32rmm = 1894,
- PUSH32rmr = 1895,
- PUSH64i16 = 1896,
- PUSH64i32 = 1897,
- PUSH64i8 = 1898,
- PUSH64r = 1899,
- PUSH64rmm = 1900,
- PUSH64rmr = 1901,
- PUSHF = 1902,
- PUSHFD = 1903,
- PUSHFQ64 = 1904,
- PUSHFS16 = 1905,
- PUSHFS32 = 1906,
- PUSHFS64 = 1907,
- PUSHGS16 = 1908,
- PUSHGS32 = 1909,
- PUSHGS64 = 1910,
- PXORrm = 1911,
- PXORrr = 1912,
- RCL16m1 = 1913,
- RCL16mCL = 1914,
- RCL16mi = 1915,
- RCL16r1 = 1916,
- RCL16rCL = 1917,
- RCL16ri = 1918,
- RCL32m1 = 1919,
- RCL32mCL = 1920,
- RCL32mi = 1921,
- RCL32r1 = 1922,
- RCL32rCL = 1923,
- RCL32ri = 1924,
- RCL64m1 = 1925,
- RCL64mCL = 1926,
- RCL64mi = 1927,
- RCL64r1 = 1928,
- RCL64rCL = 1929,
- RCL64ri = 1930,
- RCL8m1 = 1931,
- RCL8mCL = 1932,
- RCL8mi = 1933,
- RCL8r1 = 1934,
- RCL8rCL = 1935,
- RCL8ri = 1936,
- RCPPSm = 1937,
- RCPPSm_Int = 1938,
- RCPPSr = 1939,
- RCPPSr_Int = 1940,
- RCPSSm = 1941,
- RCPSSm_Int = 1942,
- RCPSSr = 1943,
- RCPSSr_Int = 1944,
- RCR16m1 = 1945,
- RCR16mCL = 1946,
- RCR16mi = 1947,
- RCR16r1 = 1948,
- RCR16rCL = 1949,
- RCR16ri = 1950,
- RCR32m1 = 1951,
- RCR32mCL = 1952,
- RCR32mi = 1953,
- RCR32r1 = 1954,
- RCR32rCL = 1955,
- RCR32ri = 1956,
- RCR64m1 = 1957,
- RCR64mCL = 1958,
- RCR64mi = 1959,
- RCR64r1 = 1960,
- RCR64rCL = 1961,
- RCR64ri = 1962,
- RCR8m1 = 1963,
- RCR8mCL = 1964,
- RCR8mi = 1965,
- RCR8r1 = 1966,
- RCR8rCL = 1967,
- RCR8ri = 1968,
- RDMSR = 1969,
- RDPMC = 1970,
- RDTSC = 1971,
- RDTSCP = 1972,
- REPNE_PREFIX = 1973,
- REP_MOVSB = 1974,
- REP_MOVSD = 1975,
- REP_MOVSQ = 1976,
- REP_MOVSW = 1977,
- REP_PREFIX = 1978,
- REP_STOSB = 1979,
- REP_STOSD = 1980,
- REP_STOSQ = 1981,
- REP_STOSW = 1982,
- RET = 1983,
- RETI = 1984,
- ROL16m1 = 1985,
- ROL16mCL = 1986,
- ROL16mi = 1987,
- ROL16r1 = 1988,
- ROL16rCL = 1989,
- ROL16ri = 1990,
- ROL32m1 = 1991,
- ROL32mCL = 1992,
- ROL32mi = 1993,
- ROL32r1 = 1994,
- ROL32rCL = 1995,
- ROL32ri = 1996,
- ROL64m1 = 1997,
- ROL64mCL = 1998,
- ROL64mi = 1999,
- ROL64r1 = 2000,
- ROL64rCL = 2001,
- ROL64ri = 2002,
- ROL8m1 = 2003,
- ROL8mCL = 2004,
- ROL8mi = 2005,
- ROL8r1 = 2006,
- ROL8rCL = 2007,
- ROL8ri = 2008,
- ROR16m1 = 2009,
- ROR16mCL = 2010,
- ROR16mi = 2011,
- ROR16r1 = 2012,
- ROR16rCL = 2013,
- ROR16ri = 2014,
- ROR32m1 = 2015,
- ROR32mCL = 2016,
- ROR32mi = 2017,
- ROR32r1 = 2018,
- ROR32rCL = 2019,
- ROR32ri = 2020,
- ROR64m1 = 2021,
- ROR64mCL = 2022,
- ROR64mi = 2023,
- ROR64r1 = 2024,
- ROR64rCL = 2025,
- ROR64ri = 2026,
- ROR8m1 = 2027,
- ROR8mCL = 2028,
- ROR8mi = 2029,
- ROR8r1 = 2030,
- ROR8rCL = 2031,
- ROR8ri = 2032,
- ROUNDPDm_Int = 2033,
- ROUNDPDr_Int = 2034,
- ROUNDPSm_Int = 2035,
- ROUNDPSr_Int = 2036,
- ROUNDSDm_Int = 2037,
- ROUNDSDr_Int = 2038,
- ROUNDSSm_Int = 2039,
- ROUNDSSr_Int = 2040,
- RSM = 2041,
- RSQRTPSm = 2042,
- RSQRTPSm_Int = 2043,
- RSQRTPSr = 2044,
- RSQRTPSr_Int = 2045,
- RSQRTSSm = 2046,
- RSQRTSSm_Int = 2047,
- RSQRTSSr = 2048,
- RSQRTSSr_Int = 2049,
- SAHF = 2050,
- SAR16m1 = 2051,
- SAR16mCL = 2052,
- SAR16mi = 2053,
- SAR16r1 = 2054,
- SAR16rCL = 2055,
- SAR16ri = 2056,
- SAR32m1 = 2057,
- SAR32mCL = 2058,
- SAR32mi = 2059,
- SAR32r1 = 2060,
- SAR32rCL = 2061,
- SAR32ri = 2062,
- SAR64m1 = 2063,
- SAR64mCL = 2064,
- SAR64mi = 2065,
- SAR64r1 = 2066,
- SAR64rCL = 2067,
- SAR64ri = 2068,
- SAR8m1 = 2069,
- SAR8mCL = 2070,
- SAR8mi = 2071,
- SAR8r1 = 2072,
- SAR8rCL = 2073,
- SAR8ri = 2074,
- SBB16i16 = 2075,
- SBB16mi = 2076,
- SBB16mi8 = 2077,
- SBB16mr = 2078,
- SBB16ri = 2079,
- SBB16ri8 = 2080,
- SBB16rm = 2081,
- SBB16rr = 2082,
- SBB16rr_REV = 2083,
- SBB32i32 = 2084,
- SBB32mi = 2085,
- SBB32mi8 = 2086,
- SBB32mr = 2087,
- SBB32ri = 2088,
- SBB32ri8 = 2089,
- SBB32rm = 2090,
- SBB32rr = 2091,
- SBB32rr_REV = 2092,
- SBB64i32 = 2093,
- SBB64mi32 = 2094,
- SBB64mi8 = 2095,
- SBB64mr = 2096,
- SBB64ri32 = 2097,
- SBB64ri8 = 2098,
- SBB64rm = 2099,
- SBB64rr = 2100,
- SBB64rr_REV = 2101,
- SBB8i8 = 2102,
- SBB8mi = 2103,
- SBB8mr = 2104,
- SBB8ri = 2105,
- SBB8rm = 2106,
- SBB8rr = 2107,
- SBB8rr_REV = 2108,
- SCAS16 = 2109,
- SCAS32 = 2110,
- SCAS64 = 2111,
- SCAS8 = 2112,
- SETAEm = 2113,
- SETAEr = 2114,
- SETAm = 2115,
- SETAr = 2116,
- SETBEm = 2117,
- SETBEr = 2118,
- SETB_C16r = 2119,
- SETB_C32r = 2120,
- SETB_C64r = 2121,
- SETB_C8r = 2122,
- SETBm = 2123,
- SETBr = 2124,
- SETEm = 2125,
- SETEr = 2126,
- SETGEm = 2127,
- SETGEr = 2128,
- SETGm = 2129,
- SETGr = 2130,
- SETLEm = 2131,
- SETLEr = 2132,
- SETLm = 2133,
- SETLr = 2134,
- SETNEm = 2135,
- SETNEr = 2136,
- SETNOm = 2137,
- SETNOr = 2138,
- SETNPm = 2139,
- SETNPr = 2140,
- SETNSm = 2141,
- SETNSr = 2142,
- SETOm = 2143,
- SETOr = 2144,
- SETPm = 2145,
- SETPr = 2146,
- SETSm = 2147,
- SETSr = 2148,
- SFENCE = 2149,
- SGDTm = 2150,
- SHL16m1 = 2151,
- SHL16mCL = 2152,
- SHL16mi = 2153,
- SHL16r1 = 2154,
- SHL16rCL = 2155,
- SHL16ri = 2156,
- SHL32m1 = 2157,
- SHL32mCL = 2158,
- SHL32mi = 2159,
- SHL32r1 = 2160,
- SHL32rCL = 2161,
- SHL32ri = 2162,
- SHL64m1 = 2163,
- SHL64mCL = 2164,
- SHL64mi = 2165,
- SHL64r1 = 2166,
- SHL64rCL = 2167,
- SHL64ri = 2168,
- SHL8m1 = 2169,
- SHL8mCL = 2170,
- SHL8mi = 2171,
- SHL8r1 = 2172,
- SHL8rCL = 2173,
- SHL8ri = 2174,
- SHLD16mrCL = 2175,
- SHLD16mri8 = 2176,
- SHLD16rrCL = 2177,
- SHLD16rri8 = 2178,
- SHLD32mrCL = 2179,
- SHLD32mri8 = 2180,
- SHLD32rrCL = 2181,
- SHLD32rri8 = 2182,
- SHLD64mrCL = 2183,
- SHLD64mri8 = 2184,
- SHLD64rrCL = 2185,
- SHLD64rri8 = 2186,
- SHR16m1 = 2187,
- SHR16mCL = 2188,
- SHR16mi = 2189,
- SHR16r1 = 2190,
- SHR16rCL = 2191,
- SHR16ri = 2192,
- SHR32m1 = 2193,
- SHR32mCL = 2194,
- SHR32mi = 2195,
- SHR32r1 = 2196,
- SHR32rCL = 2197,
- SHR32ri = 2198,
- SHR64m1 = 2199,
- SHR64mCL = 2200,
- SHR64mi = 2201,
- SHR64r1 = 2202,
- SHR64rCL = 2203,
- SHR64ri = 2204,
- SHR8m1 = 2205,
- SHR8mCL = 2206,
- SHR8mi = 2207,
- SHR8r1 = 2208,
- SHR8rCL = 2209,
- SHR8ri = 2210,
- SHRD16mrCL = 2211,
- SHRD16mri8 = 2212,
- SHRD16rrCL = 2213,
- SHRD16rri8 = 2214,
- SHRD32mrCL = 2215,
- SHRD32mri8 = 2216,
- SHRD32rrCL = 2217,
- SHRD32rri8 = 2218,
- SHRD64mrCL = 2219,
- SHRD64mri8 = 2220,
- SHRD64rrCL = 2221,
- SHRD64rri8 = 2222,
- SHUFPDrmi = 2223,
- SHUFPDrri = 2224,
- SHUFPSrmi = 2225,
- SHUFPSrri = 2226,
- SIDTm = 2227,
- SIN_F = 2228,
- SIN_Fp32 = 2229,
- SIN_Fp64 = 2230,
- SIN_Fp80 = 2231,
- SLDT16m = 2232,
- SLDT16r = 2233,
- SLDT64m = 2234,
- SLDT64r = 2235,
- SMSW16m = 2236,
- SMSW16r = 2237,
- SMSW32r = 2238,
- SMSW64r = 2239,
- SQRTPDm = 2240,
- SQRTPDm_Int = 2241,
- SQRTPDr = 2242,
- SQRTPDr_Int = 2243,
- SQRTPSm = 2244,
- SQRTPSm_Int = 2245,
- SQRTPSr = 2246,
- SQRTPSr_Int = 2247,
- SQRTSDm = 2248,
- SQRTSDm_Int = 2249,
- SQRTSDr = 2250,
- SQRTSDr_Int = 2251,
- SQRTSSm = 2252,
- SQRTSSm_Int = 2253,
- SQRTSSr = 2254,
- SQRTSSr_Int = 2255,
- SQRT_F = 2256,
- SQRT_Fp32 = 2257,
- SQRT_Fp64 = 2258,
- SQRT_Fp80 = 2259,
- SS_PREFIX = 2260,
- STC = 2261,
- STD = 2262,
- STI = 2263,
- STMXCSR = 2264,
- STOSB = 2265,
- STOSD = 2266,
- STOSW = 2267,
- STRm = 2268,
- STRr = 2269,
- ST_F32m = 2270,
- ST_F64m = 2271,
- ST_FP32m = 2272,
- ST_FP64m = 2273,
- ST_FP80m = 2274,
- ST_FPrr = 2275,
- ST_Fp32m = 2276,
- ST_Fp64m = 2277,
- ST_Fp64m32 = 2278,
- ST_Fp80m32 = 2279,
- ST_Fp80m64 = 2280,
- ST_FpP32m = 2281,
- ST_FpP64m = 2282,
- ST_FpP64m32 = 2283,
- ST_FpP80m = 2284,
- ST_FpP80m32 = 2285,
- ST_FpP80m64 = 2286,
- ST_Frr = 2287,
- SUB16i16 = 2288,
- SUB16mi = 2289,
- SUB16mi8 = 2290,
- SUB16mr = 2291,
- SUB16ri = 2292,
- SUB16ri8 = 2293,
- SUB16rm = 2294,
- SUB16rr = 2295,
- SUB16rr_REV = 2296,
- SUB32i32 = 2297,
- SUB32mi = 2298,
- SUB32mi8 = 2299,
- SUB32mr = 2300,
- SUB32ri = 2301,
- SUB32ri8 = 2302,
- SUB32rm = 2303,
- SUB32rr = 2304,
- SUB32rr_REV = 2305,
- SUB64i32 = 2306,
- SUB64mi32 = 2307,
- SUB64mi8 = 2308,
- SUB64mr = 2309,
- SUB64ri32 = 2310,
- SUB64ri8 = 2311,
- SUB64rm = 2312,
- SUB64rr = 2313,
- SUB64rr_REV = 2314,
- SUB8i8 = 2315,
- SUB8mi = 2316,
- SUB8mr = 2317,
- SUB8ri = 2318,
- SUB8rm = 2319,
- SUB8rr = 2320,
- SUB8rr_REV = 2321,
- SUBPDrm = 2322,
- SUBPDrr = 2323,
- SUBPSrm = 2324,
- SUBPSrr = 2325,
- SUBR_F32m = 2326,
- SUBR_F64m = 2327,
- SUBR_FI16m = 2328,
- SUBR_FI32m = 2329,
- SUBR_FPrST0 = 2330,
- SUBR_FST0r = 2331,
- SUBR_Fp32m = 2332,
- SUBR_Fp64m = 2333,
- SUBR_Fp64m32 = 2334,
- SUBR_Fp80m32 = 2335,
- SUBR_Fp80m64 = 2336,
- SUBR_FpI16m32 = 2337,
- SUBR_FpI16m64 = 2338,
- SUBR_FpI16m80 = 2339,
- SUBR_FpI32m32 = 2340,
- SUBR_FpI32m64 = 2341,
- SUBR_FpI32m80 = 2342,
- SUBR_FrST0 = 2343,
- SUBSDrm = 2344,
- SUBSDrm_Int = 2345,
- SUBSDrr = 2346,
- SUBSDrr_Int = 2347,
- SUBSSrm = 2348,
- SUBSSrm_Int = 2349,
- SUBSSrr = 2350,
- SUBSSrr_Int = 2351,
- SUB_F32m = 2352,
- SUB_F64m = 2353,
- SUB_FI16m = 2354,
- SUB_FI32m = 2355,
- SUB_FPrST0 = 2356,
- SUB_FST0r = 2357,
- SUB_Fp32 = 2358,
- SUB_Fp32m = 2359,
- SUB_Fp64 = 2360,
- SUB_Fp64m = 2361,
- SUB_Fp64m32 = 2362,
- SUB_Fp80 = 2363,
- SUB_Fp80m32 = 2364,
- SUB_Fp80m64 = 2365,
- SUB_FpI16m32 = 2366,
- SUB_FpI16m64 = 2367,
- SUB_FpI16m80 = 2368,
- SUB_FpI32m32 = 2369,
- SUB_FpI32m64 = 2370,
- SUB_FpI32m80 = 2371,
- SUB_FrST0 = 2372,
- SWAPGS = 2373,
- SYSCALL = 2374,
- SYSENTER = 2375,
- SYSEXIT = 2376,
- SYSEXIT64 = 2377,
- SYSRET = 2378,
- TAILJMPd = 2379,
- TAILJMPm = 2380,
- TAILJMPr = 2381,
- TAILJMPr64 = 2382,
- TCRETURNdi = 2383,
- TCRETURNdi64 = 2384,
- TCRETURNri = 2385,
- TCRETURNri64 = 2386,
- TEST16i16 = 2387,
- TEST16mi = 2388,
- TEST16ri = 2389,
- TEST16rm = 2390,
- TEST16rr = 2391,
- TEST32i32 = 2392,
- TEST32mi = 2393,
- TEST32ri = 2394,
- TEST32rm = 2395,
- TEST32rr = 2396,
- TEST64i32 = 2397,
- TEST64mi32 = 2398,
- TEST64ri32 = 2399,
- TEST64rm = 2400,
- TEST64rr = 2401,
- TEST8i8 = 2402,
- TEST8mi = 2403,
- TEST8ri = 2404,
- TEST8rm = 2405,
- TEST8rr = 2406,
- TLS_addr32 = 2407,
- TLS_addr64 = 2408,
- TRAP = 2409,
- TST_F = 2410,
- TST_Fp32 = 2411,
- TST_Fp64 = 2412,
- TST_Fp80 = 2413,
- UCOMISDrm = 2414,
- UCOMISDrr = 2415,
- UCOMISSrm = 2416,
- UCOMISSrr = 2417,
- UCOM_FIPr = 2418,
- UCOM_FIr = 2419,
- UCOM_FPPr = 2420,
- UCOM_FPr = 2421,
- UCOM_FpIr32 = 2422,
- UCOM_FpIr64 = 2423,
- UCOM_FpIr80 = 2424,
- UCOM_Fpr32 = 2425,
- UCOM_Fpr64 = 2426,
- UCOM_Fpr80 = 2427,
- UCOM_Fr = 2428,
- UNPCKHPDrm = 2429,
- UNPCKHPDrr = 2430,
- UNPCKHPSrm = 2431,
- UNPCKHPSrr = 2432,
- UNPCKLPDrm = 2433,
- UNPCKLPDrr = 2434,
- UNPCKLPSrm = 2435,
- UNPCKLPSrr = 2436,
- VASTART_SAVE_XMM_REGS = 2437,
- VERRm = 2438,
- VERRr = 2439,
- VERWm = 2440,
- VERWr = 2441,
- VMCALL = 2442,
- VMCLEARm = 2443,
- VMLAUNCH = 2444,
- VMPTRLDm = 2445,
- VMPTRSTm = 2446,
- VMREAD32rm = 2447,
- VMREAD32rr = 2448,
- VMREAD64rm = 2449,
- VMREAD64rr = 2450,
- VMRESUME = 2451,
- VMWRITE32rm = 2452,
- VMWRITE32rr = 2453,
- VMWRITE64rm = 2454,
- VMWRITE64rr = 2455,
- VMXOFF = 2456,
- VMXON = 2457,
- V_SET0 = 2458,
- V_SETALLONES = 2459,
- WAIT = 2460,
- WBINVD = 2461,
- WINCALL64m = 2462,
- WINCALL64pcrel32 = 2463,
- WINCALL64r = 2464,
- WRMSR = 2465,
- XADD16rm = 2466,
- XADD16rr = 2467,
- XADD32rm = 2468,
- XADD32rr = 2469,
- XADD64rm = 2470,
- XADD64rr = 2471,
- XADD8rm = 2472,
- XADD8rr = 2473,
- XCHG16ar = 2474,
- XCHG16rm = 2475,
- XCHG16rr = 2476,
- XCHG32ar = 2477,
- XCHG32rm = 2478,
- XCHG32rr = 2479,
- XCHG64ar = 2480,
- XCHG64rm = 2481,
- XCHG64rr = 2482,
- XCHG8rm = 2483,
- XCHG8rr = 2484,
- XCH_F = 2485,
- XLAT = 2486,
- XOR16i16 = 2487,
- XOR16mi = 2488,
- XOR16mi8 = 2489,
- XOR16mr = 2490,
- XOR16ri = 2491,
- XOR16ri8 = 2492,
- XOR16rm = 2493,
- XOR16rr = 2494,
- XOR16rr_REV = 2495,
- XOR32i32 = 2496,
- XOR32mi = 2497,
- XOR32mi8 = 2498,
- XOR32mr = 2499,
- XOR32ri = 2500,
- XOR32ri8 = 2501,
- XOR32rm = 2502,
- XOR32rr = 2503,
- XOR32rr_REV = 2504,
- XOR64i32 = 2505,
- XOR64mi32 = 2506,
- XOR64mi8 = 2507,
- XOR64mr = 2508,
- XOR64ri32 = 2509,
- XOR64ri8 = 2510,
- XOR64rm = 2511,
- XOR64rr = 2512,
- XOR64rr_REV = 2513,
- XOR8i8 = 2514,
- XOR8mi = 2515,
- XOR8mr = 2516,
- XOR8ri = 2517,
- XOR8rm = 2518,
- XOR8rr = 2519,
- XOR8rr_REV = 2520,
- XORPDrm = 2521,
- XORPDrr = 2522,
- XORPSrm = 2523,
- XORPSrr = 2524,
- INSTRUCTION_LIST_END = 2525
+ REG_SEQUENCE = 12,
+ COPY = 13,
+ ABS_F = 14,
+ ABS_Fp32 = 15,
+ ABS_Fp64 = 16,
+ ABS_Fp80 = 17,
+ ADC16i16 = 18,
+ ADC16mi = 19,
+ ADC16mi8 = 20,
+ ADC16mr = 21,
+ ADC16ri = 22,
+ ADC16ri8 = 23,
+ ADC16rm = 24,
+ ADC16rr = 25,
+ ADC16rr_REV = 26,
+ ADC32i32 = 27,
+ ADC32mi = 28,
+ ADC32mi8 = 29,
+ ADC32mr = 30,
+ ADC32ri = 31,
+ ADC32ri8 = 32,
+ ADC32rm = 33,
+ ADC32rr = 34,
+ ADC32rr_REV = 35,
+ ADC64i32 = 36,
+ ADC64mi32 = 37,
+ ADC64mi8 = 38,
+ ADC64mr = 39,
+ ADC64ri32 = 40,
+ ADC64ri8 = 41,
+ ADC64rm = 42,
+ ADC64rr = 43,
+ ADC64rr_REV = 44,
+ ADC8i8 = 45,
+ ADC8mi = 46,
+ ADC8mr = 47,
+ ADC8ri = 48,
+ ADC8rm = 49,
+ ADC8rr = 50,
+ ADC8rr_REV = 51,
+ ADD16i16 = 52,
+ ADD16mi = 53,
+ ADD16mi8 = 54,
+ ADD16mr = 55,
+ ADD16ri = 56,
+ ADD16ri8 = 57,
+ ADD16rm = 58,
+ ADD16rr = 59,
+ ADD16rr_alt = 60,
+ ADD32i32 = 61,
+ ADD32mi = 62,
+ ADD32mi8 = 63,
+ ADD32mr = 64,
+ ADD32ri = 65,
+ ADD32ri8 = 66,
+ ADD32rm = 67,
+ ADD32rr = 68,
+ ADD32rr_alt = 69,
+ ADD64i32 = 70,
+ ADD64mi32 = 71,
+ ADD64mi8 = 72,
+ ADD64mr = 73,
+ ADD64ri32 = 74,
+ ADD64ri8 = 75,
+ ADD64rm = 76,
+ ADD64rr = 77,
+ ADD64rr_alt = 78,
+ ADD8i8 = 79,
+ ADD8mi = 80,
+ ADD8mr = 81,
+ ADD8ri = 82,
+ ADD8rm = 83,
+ ADD8rr = 84,
+ ADD8rr_alt = 85,
+ ADDPDrm = 86,
+ ADDPDrr = 87,
+ ADDPSrm = 88,
+ ADDPSrr = 89,
+ ADDSDrm = 90,
+ ADDSDrm_Int = 91,
+ ADDSDrr = 92,
+ ADDSDrr_Int = 93,
+ ADDSSrm = 94,
+ ADDSSrm_Int = 95,
+ ADDSSrr = 96,
+ ADDSSrr_Int = 97,
+ ADDSUBPDrm = 98,
+ ADDSUBPDrr = 99,
+ ADDSUBPSrm = 100,
+ ADDSUBPSrr = 101,
+ ADD_F32m = 102,
+ ADD_F64m = 103,
+ ADD_FI16m = 104,
+ ADD_FI32m = 105,
+ ADD_FPrST0 = 106,
+ ADD_FST0r = 107,
+ ADD_Fp32 = 108,
+ ADD_Fp32m = 109,
+ ADD_Fp64 = 110,
+ ADD_Fp64m = 111,
+ ADD_Fp64m32 = 112,
+ ADD_Fp80 = 113,
+ ADD_Fp80m32 = 114,
+ ADD_Fp80m64 = 115,
+ ADD_FpI16m32 = 116,
+ ADD_FpI16m64 = 117,
+ ADD_FpI16m80 = 118,
+ ADD_FpI32m32 = 119,
+ ADD_FpI32m64 = 120,
+ ADD_FpI32m80 = 121,
+ ADD_FrST0 = 122,
+ ADJCALLSTACKDOWN32 = 123,
+ ADJCALLSTACKDOWN64 = 124,
+ ADJCALLSTACKUP32 = 125,
+ ADJCALLSTACKUP64 = 126,
+ AESDECLASTrm = 127,
+ AESDECLASTrr = 128,
+ AESDECrm = 129,
+ AESDECrr = 130,
+ AESENCLASTrm = 131,
+ AESENCLASTrr = 132,
+ AESENCrm = 133,
+ AESENCrr = 134,
+ AESIMCrm = 135,
+ AESIMCrr = 136,
+ AESKEYGENASSIST128rm = 137,
+ AESKEYGENASSIST128rr = 138,
+ AND16i16 = 139,
+ AND16mi = 140,
+ AND16mi8 = 141,
+ AND16mr = 142,
+ AND16ri = 143,
+ AND16ri8 = 144,
+ AND16rm = 145,
+ AND16rr = 146,
+ AND16rr_REV = 147,
+ AND32i32 = 148,
+ AND32mi = 149,
+ AND32mi8 = 150,
+ AND32mr = 151,
+ AND32ri = 152,
+ AND32ri8 = 153,
+ AND32rm = 154,
+ AND32rr = 155,
+ AND32rr_REV = 156,
+ AND64i32 = 157,
+ AND64mi32 = 158,
+ AND64mi8 = 159,
+ AND64mr = 160,
+ AND64ri32 = 161,
+ AND64ri8 = 162,
+ AND64rm = 163,
+ AND64rr = 164,
+ AND64rr_REV = 165,
+ AND8i8 = 166,
+ AND8mi = 167,
+ AND8mr = 168,
+ AND8ri = 169,
+ AND8rm = 170,
+ AND8rr = 171,
+ AND8rr_REV = 172,
+ ANDNPDrm = 173,
+ ANDNPDrr = 174,
+ ANDNPSrm = 175,
+ ANDNPSrr = 176,
+ ANDPDrm = 177,
+ ANDPDrr = 178,
+ ANDPSrm = 179,
+ ANDPSrr = 180,
+ ATOMADD6432 = 181,
+ ATOMAND16 = 182,
+ ATOMAND32 = 183,
+ ATOMAND64 = 184,
+ ATOMAND6432 = 185,
+ ATOMAND8 = 186,
+ ATOMMAX16 = 187,
+ ATOMMAX32 = 188,
+ ATOMMAX64 = 189,
+ ATOMMIN16 = 190,
+ ATOMMIN32 = 191,
+ ATOMMIN64 = 192,
+ ATOMNAND16 = 193,
+ ATOMNAND32 = 194,
+ ATOMNAND64 = 195,
+ ATOMNAND6432 = 196,
+ ATOMNAND8 = 197,
+ ATOMOR16 = 198,
+ ATOMOR32 = 199,
+ ATOMOR64 = 200,
+ ATOMOR6432 = 201,
+ ATOMOR8 = 202,
+ ATOMSUB6432 = 203,
+ ATOMSWAP6432 = 204,
+ ATOMUMAX16 = 205,
+ ATOMUMAX32 = 206,
+ ATOMUMAX64 = 207,
+ ATOMUMIN16 = 208,
+ ATOMUMIN32 = 209,
+ ATOMUMIN64 = 210,
+ ATOMXOR16 = 211,
+ ATOMXOR32 = 212,
+ ATOMXOR64 = 213,
+ ATOMXOR6432 = 214,
+ ATOMXOR8 = 215,
+ AVX_SET0PD = 216,
+ AVX_SET0PDY = 217,
+ AVX_SET0PI = 218,
+ AVX_SET0PS = 219,
+ AVX_SET0PSY = 220,
+ BLENDPDrmi = 221,
+ BLENDPDrri = 222,
+ BLENDPSrmi = 223,
+ BLENDPSrri = 224,
+ BLENDVPDrm0 = 225,
+ BLENDVPDrr0 = 226,
+ BLENDVPSrm0 = 227,
+ BLENDVPSrr0 = 228,
+ BSF16rm = 229,
+ BSF16rr = 230,
+ BSF32rm = 231,
+ BSF32rr = 232,
+ BSF64rm = 233,
+ BSF64rr = 234,
+ BSR16rm = 235,
+ BSR16rr = 236,
+ BSR32rm = 237,
+ BSR32rr = 238,
+ BSR64rm = 239,
+ BSR64rr = 240,
+ BSWAP32r = 241,
+ BSWAP64r = 242,
+ BT16mi8 = 243,
+ BT16mr = 244,
+ BT16ri8 = 245,
+ BT16rr = 246,
+ BT32mi8 = 247,
+ BT32mr = 248,
+ BT32ri8 = 249,
+ BT32rr = 250,
+ BT64mi8 = 251,
+ BT64mr = 252,
+ BT64ri8 = 253,
+ BT64rr = 254,
+ BTC16mi8 = 255,
+ BTC16mr = 256,
+ BTC16ri8 = 257,
+ BTC16rr = 258,
+ BTC32mi8 = 259,
+ BTC32mr = 260,
+ BTC32ri8 = 261,
+ BTC32rr = 262,
+ BTC64mi8 = 263,
+ BTC64mr = 264,
+ BTC64ri8 = 265,
+ BTC64rr = 266,
+ BTR16mi8 = 267,
+ BTR16mr = 268,
+ BTR16ri8 = 269,
+ BTR16rr = 270,
+ BTR32mi8 = 271,
+ BTR32mr = 272,
+ BTR32ri8 = 273,
+ BTR32rr = 274,
+ BTR64mi8 = 275,
+ BTR64mr = 276,
+ BTR64ri8 = 277,
+ BTR64rr = 278,
+ BTS16mi8 = 279,
+ BTS16mr = 280,
+ BTS16ri8 = 281,
+ BTS16rr = 282,
+ BTS32mi8 = 283,
+ BTS32mr = 284,
+ BTS32ri8 = 285,
+ BTS32rr = 286,
+ BTS64mi8 = 287,
+ BTS64mr = 288,
+ BTS64ri8 = 289,
+ BTS64rr = 290,
+ CALL32m = 291,
+ CALL32r = 292,
+ CALL64m = 293,
+ CALL64pcrel32 = 294,
+ CALL64r = 295,
+ CALLpcrel16 = 296,
+ CALLpcrel32 = 297,
+ CBW = 298,
+ CDQ = 299,
+ CDQE = 300,
+ CHS_F = 301,
+ CHS_Fp32 = 302,
+ CHS_Fp64 = 303,
+ CHS_Fp80 = 304,
+ CLC = 305,
+ CLD = 306,
+ CLFLUSH = 307,
+ CLI = 308,
+ CLTS = 309,
+ CMC = 310,
+ CMOVA16rm = 311,
+ CMOVA16rr = 312,
+ CMOVA32rm = 313,
+ CMOVA32rr = 314,
+ CMOVA64rm = 315,
+ CMOVA64rr = 316,
+ CMOVAE16rm = 317,
+ CMOVAE16rr = 318,
+ CMOVAE32rm = 319,
+ CMOVAE32rr = 320,
+ CMOVAE64rm = 321,
+ CMOVAE64rr = 322,
+ CMOVB16rm = 323,
+ CMOVB16rr = 324,
+ CMOVB32rm = 325,
+ CMOVB32rr = 326,
+ CMOVB64rm = 327,
+ CMOVB64rr = 328,
+ CMOVBE16rm = 329,
+ CMOVBE16rr = 330,
+ CMOVBE32rm = 331,
+ CMOVBE32rr = 332,
+ CMOVBE64rm = 333,
+ CMOVBE64rr = 334,
+ CMOVBE_F = 335,
+ CMOVBE_Fp32 = 336,
+ CMOVBE_Fp64 = 337,
+ CMOVBE_Fp80 = 338,
+ CMOVB_F = 339,
+ CMOVB_Fp32 = 340,
+ CMOVB_Fp64 = 341,
+ CMOVB_Fp80 = 342,
+ CMOVE16rm = 343,
+ CMOVE16rr = 344,
+ CMOVE32rm = 345,
+ CMOVE32rr = 346,
+ CMOVE64rm = 347,
+ CMOVE64rr = 348,
+ CMOVE_F = 349,
+ CMOVE_Fp32 = 350,
+ CMOVE_Fp64 = 351,
+ CMOVE_Fp80 = 352,
+ CMOVG16rm = 353,
+ CMOVG16rr = 354,
+ CMOVG32rm = 355,
+ CMOVG32rr = 356,
+ CMOVG64rm = 357,
+ CMOVG64rr = 358,
+ CMOVGE16rm = 359,
+ CMOVGE16rr = 360,
+ CMOVGE32rm = 361,
+ CMOVGE32rr = 362,
+ CMOVGE64rm = 363,
+ CMOVGE64rr = 364,
+ CMOVL16rm = 365,
+ CMOVL16rr = 366,
+ CMOVL32rm = 367,
+ CMOVL32rr = 368,
+ CMOVL64rm = 369,
+ CMOVL64rr = 370,
+ CMOVLE16rm = 371,
+ CMOVLE16rr = 372,
+ CMOVLE32rm = 373,
+ CMOVLE32rr = 374,
+ CMOVLE64rm = 375,
+ CMOVLE64rr = 376,
+ CMOVNBE_F = 377,
+ CMOVNBE_Fp32 = 378,
+ CMOVNBE_Fp64 = 379,
+ CMOVNBE_Fp80 = 380,
+ CMOVNB_F = 381,
+ CMOVNB_Fp32 = 382,
+ CMOVNB_Fp64 = 383,
+ CMOVNB_Fp80 = 384,
+ CMOVNE16rm = 385,
+ CMOVNE16rr = 386,
+ CMOVNE32rm = 387,
+ CMOVNE32rr = 388,
+ CMOVNE64rm = 389,
+ CMOVNE64rr = 390,
+ CMOVNE_F = 391,
+ CMOVNE_Fp32 = 392,
+ CMOVNE_Fp64 = 393,
+ CMOVNE_Fp80 = 394,
+ CMOVNO16rm = 395,
+ CMOVNO16rr = 396,
+ CMOVNO32rm = 397,
+ CMOVNO32rr = 398,
+ CMOVNO64rm = 399,
+ CMOVNO64rr = 400,
+ CMOVNP16rm = 401,
+ CMOVNP16rr = 402,
+ CMOVNP32rm = 403,
+ CMOVNP32rr = 404,
+ CMOVNP64rm = 405,
+ CMOVNP64rr = 406,
+ CMOVNP_F = 407,
+ CMOVNP_Fp32 = 408,
+ CMOVNP_Fp64 = 409,
+ CMOVNP_Fp80 = 410,
+ CMOVNS16rm = 411,
+ CMOVNS16rr = 412,
+ CMOVNS32rm = 413,
+ CMOVNS32rr = 414,
+ CMOVNS64rm = 415,
+ CMOVNS64rr = 416,
+ CMOVO16rm = 417,
+ CMOVO16rr = 418,
+ CMOVO32rm = 419,
+ CMOVO32rr = 420,
+ CMOVO64rm = 421,
+ CMOVO64rr = 422,
+ CMOVP16rm = 423,
+ CMOVP16rr = 424,
+ CMOVP32rm = 425,
+ CMOVP32rr = 426,
+ CMOVP64rm = 427,
+ CMOVP64rr = 428,
+ CMOVP_F = 429,
+ CMOVP_Fp32 = 430,
+ CMOVP_Fp64 = 431,
+ CMOVP_Fp80 = 432,
+ CMOVS16rm = 433,
+ CMOVS16rr = 434,
+ CMOVS32rm = 435,
+ CMOVS32rr = 436,
+ CMOVS64rm = 437,
+ CMOVS64rr = 438,
+ CMOV_FR32 = 439,
+ CMOV_FR64 = 440,
+ CMOV_GR16 = 441,
+ CMOV_GR32 = 442,
+ CMOV_GR8 = 443,
+ CMOV_RFP32 = 444,
+ CMOV_RFP64 = 445,
+ CMOV_RFP80 = 446,
+ CMOV_V1I64 = 447,
+ CMOV_V2F64 = 448,
+ CMOV_V2I64 = 449,
+ CMOV_V4F32 = 450,
+ CMP16i16 = 451,
+ CMP16mi = 452,
+ CMP16mi8 = 453,
+ CMP16mr = 454,
+ CMP16ri = 455,
+ CMP16ri8 = 456,
+ CMP16rm = 457,
+ CMP16rr = 458,
+ CMP16rr_alt = 459,
+ CMP32i32 = 460,
+ CMP32mi = 461,
+ CMP32mi8 = 462,
+ CMP32mr = 463,
+ CMP32ri = 464,
+ CMP32ri8 = 465,
+ CMP32rm = 466,
+ CMP32rr = 467,
+ CMP32rr_alt = 468,
+ CMP64i32 = 469,
+ CMP64mi32 = 470,
+ CMP64mi8 = 471,
+ CMP64mr = 472,
+ CMP64mrmrr = 473,
+ CMP64ri32 = 474,
+ CMP64ri8 = 475,
+ CMP64rm = 476,
+ CMP64rr = 477,
+ CMP8i8 = 478,
+ CMP8mi = 479,
+ CMP8mr = 480,
+ CMP8ri = 481,
+ CMP8rm = 482,
+ CMP8rr = 483,
+ CMP8rr_alt = 484,
+ CMPPDrmi = 485,
+ CMPPDrmi_alt = 486,
+ CMPPDrri = 487,
+ CMPPDrri_alt = 488,
+ CMPPSrmi = 489,
+ CMPPSrmi_alt = 490,
+ CMPPSrri = 491,
+ CMPPSrri_alt = 492,
+ CMPS16 = 493,
+ CMPS32 = 494,
+ CMPS64 = 495,
+ CMPS8 = 496,
+ CMPSDrm = 497,
+ CMPSDrm_alt = 498,
+ CMPSDrr = 499,
+ CMPSDrr_alt = 500,
+ CMPSSrm = 501,
+ CMPSSrm_alt = 502,
+ CMPSSrr = 503,
+ CMPSSrr_alt = 504,
+ CMPXCHG16B = 505,
+ CMPXCHG16rm = 506,
+ CMPXCHG16rr = 507,
+ CMPXCHG32rm = 508,
+ CMPXCHG32rr = 509,
+ CMPXCHG64rm = 510,
+ CMPXCHG64rr = 511,
+ CMPXCHG8B = 512,
+ CMPXCHG8rm = 513,
+ CMPXCHG8rr = 514,
+ COMISDrm = 515,
+ COMISDrr = 516,
+ COMISSrm = 517,
+ COMISSrr = 518,
+ COMP_FST0r = 519,
+ COM_FIPr = 520,
+ COM_FIr = 521,
+ COM_FST0r = 522,
+ COS_F = 523,
+ COS_Fp32 = 524,
+ COS_Fp64 = 525,
+ COS_Fp80 = 526,
+ CPUID = 527,
+ CQO = 528,
+ CRC32m16 = 529,
+ CRC32m32 = 530,
+ CRC32m8 = 531,
+ CRC32r16 = 532,
+ CRC32r32 = 533,
+ CRC32r8 = 534,
+ CRC64m64 = 535,
+ CRC64m8 = 536,
+ CRC64r64 = 537,
+ CRC64r8 = 538,
+ CS_PREFIX = 539,
+ CVTDQ2PDrm = 540,
+ CVTDQ2PDrr = 541,
+ CVTDQ2PSrm = 542,
+ CVTDQ2PSrr = 543,
+ CVTPD2DQrm = 544,
+ CVTPD2DQrr = 545,
+ CVTPD2PSrm = 546,
+ CVTPD2PSrr = 547,
+ CVTPS2DQrm = 548,
+ CVTPS2DQrr = 549,
+ CVTPS2PDrm = 550,
+ CVTPS2PDrr = 551,
+ CVTSD2SI64rm = 552,
+ CVTSD2SI64rr = 553,
+ CVTSD2SSrm = 554,
+ CVTSD2SSrr = 555,
+ CVTSI2SD64rm = 556,
+ CVTSI2SD64rr = 557,
+ CVTSI2SDrm = 558,
+ CVTSI2SDrr = 559,
+ CVTSI2SS64rm = 560,
+ CVTSI2SS64rr = 561,
+ CVTSI2SSrm = 562,
+ CVTSI2SSrr = 563,
+ CVTSS2SDrm = 564,
+ CVTSS2SDrr = 565,
+ CVTSS2SI64rm = 566,
+ CVTSS2SI64rr = 567,
+ CVTSS2SIrm = 568,
+ CVTSS2SIrr = 569,
+ CVTTPS2DQrm = 570,
+ CVTTPS2DQrr = 571,
+ CVTTSD2SI64rm = 572,
+ CVTTSD2SI64rr = 573,
+ CVTTSD2SIrm = 574,
+ CVTTSD2SIrr = 575,
+ CVTTSS2SI64rm = 576,
+ CVTTSS2SI64rr = 577,
+ CVTTSS2SIrm = 578,
+ CVTTSS2SIrr = 579,
+ CWD = 580,
+ CWDE = 581,
+ DEC16m = 582,
+ DEC16r = 583,
+ DEC32m = 584,
+ DEC32r = 585,
+ DEC64_16m = 586,
+ DEC64_16r = 587,
+ DEC64_32m = 588,
+ DEC64_32r = 589,
+ DEC64m = 590,
+ DEC64r = 591,
+ DEC8m = 592,
+ DEC8r = 593,
+ DIV16m = 594,
+ DIV16r = 595,
+ DIV32m = 596,
+ DIV32r = 597,
+ DIV64m = 598,
+ DIV64r = 599,
+ DIV8m = 600,
+ DIV8r = 601,
+ DIVPDrm = 602,
+ DIVPDrr = 603,
+ DIVPSrm = 604,
+ DIVPSrr = 605,
+ DIVR_F32m = 606,
+ DIVR_F64m = 607,
+ DIVR_FI16m = 608,
+ DIVR_FI32m = 609,
+ DIVR_FPrST0 = 610,
+ DIVR_FST0r = 611,
+ DIVR_Fp32m = 612,
+ DIVR_Fp64m = 613,
+ DIVR_Fp64m32 = 614,
+ DIVR_Fp80m32 = 615,
+ DIVR_Fp80m64 = 616,
+ DIVR_FpI16m32 = 617,
+ DIVR_FpI16m64 = 618,
+ DIVR_FpI16m80 = 619,
+ DIVR_FpI32m32 = 620,
+ DIVR_FpI32m64 = 621,
+ DIVR_FpI32m80 = 622,
+ DIVR_FrST0 = 623,
+ DIVSDrm = 624,
+ DIVSDrm_Int = 625,
+ DIVSDrr = 626,
+ DIVSDrr_Int = 627,
+ DIVSSrm = 628,
+ DIVSSrm_Int = 629,
+ DIVSSrr = 630,
+ DIVSSrr_Int = 631,
+ DIV_F32m = 632,
+ DIV_F64m = 633,
+ DIV_FI16m = 634,
+ DIV_FI32m = 635,
+ DIV_FPrST0 = 636,
+ DIV_FST0r = 637,
+ DIV_Fp32 = 638,
+ DIV_Fp32m = 639,
+ DIV_Fp64 = 640,
+ DIV_Fp64m = 641,
+ DIV_Fp64m32 = 642,
+ DIV_Fp80 = 643,
+ DIV_Fp80m32 = 644,
+ DIV_Fp80m64 = 645,
+ DIV_FpI16m32 = 646,
+ DIV_FpI16m64 = 647,
+ DIV_FpI16m80 = 648,
+ DIV_FpI32m32 = 649,
+ DIV_FpI32m64 = 650,
+ DIV_FpI32m80 = 651,
+ DIV_FrST0 = 652,
+ DPPDrmi = 653,
+ DPPDrri = 654,
+ DPPSrmi = 655,
+ DPPSrri = 656,
+ DS_PREFIX = 657,
+ EH_RETURN = 658,
+ EH_RETURN64 = 659,
+ ENTER = 660,
+ ES_PREFIX = 661,
+ EXTRACTPSmr = 662,
+ EXTRACTPSrr = 663,
+ F2XM1 = 664,
+ FARCALL16i = 665,
+ FARCALL16m = 666,
+ FARCALL32i = 667,
+ FARCALL32m = 668,
+ FARCALL64 = 669,
+ FARJMP16i = 670,
+ FARJMP16m = 671,
+ FARJMP32i = 672,
+ FARJMP32m = 673,
+ FARJMP64 = 674,
+ FBLDm = 675,
+ FBSTPm = 676,
+ FCOM32m = 677,
+ FCOM64m = 678,
+ FCOMP32m = 679,
+ FCOMP64m = 680,
+ FCOMPP = 681,
+ FDECSTP = 682,
+ FFREE = 683,
+ FICOM16m = 684,
+ FICOM32m = 685,
+ FICOMP16m = 686,
+ FICOMP32m = 687,
+ FINCSTP = 688,
+ FLDCW16m = 689,
+ FLDENVm = 690,
+ FLDL2E = 691,
+ FLDL2T = 692,
+ FLDLG2 = 693,
+ FLDLN2 = 694,
+ FLDPI = 695,
+ FNCLEX = 696,
+ FNINIT = 697,
+ FNOP = 698,
+ FNSTCW16m = 699,
+ FNSTSW8r = 700,
+ FNSTSWm = 701,
+ FP32_TO_INT16_IN_MEM = 702,
+ FP32_TO_INT32_IN_MEM = 703,
+ FP32_TO_INT64_IN_MEM = 704,
+ FP64_TO_INT16_IN_MEM = 705,
+ FP64_TO_INT32_IN_MEM = 706,
+ FP64_TO_INT64_IN_MEM = 707,
+ FP80_TO_INT16_IN_MEM = 708,
+ FP80_TO_INT32_IN_MEM = 709,
+ FP80_TO_INT64_IN_MEM = 710,
+ FPATAN = 711,
+ FPREM = 712,
+ FPREM1 = 713,
+ FPTAN = 714,
+ FRNDINT = 715,
+ FRSTORm = 716,
+ FSAVEm = 717,
+ FSCALE = 718,
+ FSINCOS = 719,
+ FSTENVm = 720,
+ FS_MOV32rm = 721,
+ FS_PREFIX = 722,
+ FXAM = 723,
+ FXRSTOR = 724,
+ FXSAVE = 725,
+ FXTRACT = 726,
+ FYL2X = 727,
+ FYL2XP1 = 728,
+ FpGET_ST0_32 = 729,
+ FpGET_ST0_64 = 730,
+ FpGET_ST0_80 = 731,
+ FpGET_ST1_32 = 732,
+ FpGET_ST1_64 = 733,
+ FpGET_ST1_80 = 734,
+ FpSET_ST0_32 = 735,
+ FpSET_ST0_64 = 736,
+ FpSET_ST0_80 = 737,
+ FpSET_ST1_32 = 738,
+ FpSET_ST1_64 = 739,
+ FpSET_ST1_80 = 740,
+ FsANDNPDrm = 741,
+ FsANDNPDrr = 742,
+ FsANDNPSrm = 743,
+ FsANDNPSrr = 744,
+ FsANDPDrm = 745,
+ FsANDPDrr = 746,
+ FsANDPSrm = 747,
+ FsANDPSrr = 748,
+ FsFLD0SD = 749,
+ FsFLD0SS = 750,
+ FsMOVAPDrm = 751,
+ FsMOVAPDrr = 752,
+ FsMOVAPSrm = 753,
+ FsMOVAPSrr = 754,
+ FsORPDrm = 755,
+ FsORPDrr = 756,
+ FsORPSrm = 757,
+ FsORPSrr = 758,
+ FsXORPDrm = 759,
+ FsXORPDrr = 760,
+ FsXORPSrm = 761,
+ FsXORPSrr = 762,
+ GS_MOV32rm = 763,
+ GS_PREFIX = 764,
+ HADDPDrm = 765,
+ HADDPDrr = 766,
+ HADDPSrm = 767,
+ HADDPSrr = 768,
+ HLT = 769,
+ HSUBPDrm = 770,
+ HSUBPDrr = 771,
+ HSUBPSrm = 772,
+ HSUBPSrr = 773,
+ IDIV16m = 774,
+ IDIV16r = 775,
+ IDIV32m = 776,
+ IDIV32r = 777,
+ IDIV64m = 778,
+ IDIV64r = 779,
+ IDIV8m = 780,
+ IDIV8r = 781,
+ ILD_F16m = 782,
+ ILD_F32m = 783,
+ ILD_F64m = 784,
+ ILD_Fp16m32 = 785,
+ ILD_Fp16m64 = 786,
+ ILD_Fp16m80 = 787,
+ ILD_Fp32m32 = 788,
+ ILD_Fp32m64 = 789,
+ ILD_Fp32m80 = 790,
+ ILD_Fp64m32 = 791,
+ ILD_Fp64m64 = 792,
+ ILD_Fp64m80 = 793,
+ IMUL16m = 794,
+ IMUL16r = 795,
+ IMUL16rm = 796,
+ IMUL16rmi = 797,
+ IMUL16rmi8 = 798,
+ IMUL16rr = 799,
+ IMUL16rri = 800,
+ IMUL16rri8 = 801,
+ IMUL32m = 802,
+ IMUL32r = 803,
+ IMUL32rm = 804,
+ IMUL32rmi = 805,
+ IMUL32rmi8 = 806,
+ IMUL32rr = 807,
+ IMUL32rri = 808,
+ IMUL32rri8 = 809,
+ IMUL64m = 810,
+ IMUL64r = 811,
+ IMUL64rm = 812,
+ IMUL64rmi32 = 813,
+ IMUL64rmi8 = 814,
+ IMUL64rr = 815,
+ IMUL64rri32 = 816,
+ IMUL64rri8 = 817,
+ IMUL8m = 818,
+ IMUL8r = 819,
+ IN16 = 820,
+ IN16ri = 821,
+ IN16rr = 822,
+ IN32 = 823,
+ IN32ri = 824,
+ IN32rr = 825,
+ IN8 = 826,
+ IN8ri = 827,
+ IN8rr = 828,
+ INC16m = 829,
+ INC16r = 830,
+ INC32m = 831,
+ INC32r = 832,
+ INC64_16m = 833,
+ INC64_16r = 834,
+ INC64_32m = 835,
+ INC64_32r = 836,
+ INC64m = 837,
+ INC64r = 838,
+ INC8m = 839,
+ INC8r = 840,
+ INSERTPSrm = 841,
+ INSERTPSrr = 842,
+ INT = 843,
+ INT3 = 844,
+ INTO = 845,
+ INVD = 846,
+ INVEPT = 847,
+ INVLPG = 848,
+ INVVPID = 849,
+ IRET16 = 850,
+ IRET32 = 851,
+ IRET64 = 852,
+ ISTT_FP16m = 853,
+ ISTT_FP32m = 854,
+ ISTT_FP64m = 855,
+ ISTT_Fp16m32 = 856,
+ ISTT_Fp16m64 = 857,
+ ISTT_Fp16m80 = 858,
+ ISTT_Fp32m32 = 859,
+ ISTT_Fp32m64 = 860,
+ ISTT_Fp32m80 = 861,
+ ISTT_Fp64m32 = 862,
+ ISTT_Fp64m64 = 863,
+ ISTT_Fp64m80 = 864,
+ IST_F16m = 865,
+ IST_F32m = 866,
+ IST_FP16m = 867,
+ IST_FP32m = 868,
+ IST_FP64m = 869,
+ IST_Fp16m32 = 870,
+ IST_Fp16m64 = 871,
+ IST_Fp16m80 = 872,
+ IST_Fp32m32 = 873,
+ IST_Fp32m64 = 874,
+ IST_Fp32m80 = 875,
+ IST_Fp64m32 = 876,
+ IST_Fp64m64 = 877,
+ IST_Fp64m80 = 878,
+ Int_CMPSDrm = 879,
+ Int_CMPSDrr = 880,
+ Int_CMPSSrm = 881,
+ Int_CMPSSrr = 882,
+ Int_COMISDrm = 883,
+ Int_COMISDrr = 884,
+ Int_COMISSrm = 885,
+ Int_COMISSrr = 886,
+ Int_CVTDQ2PDrm = 887,
+ Int_CVTDQ2PDrr = 888,
+ Int_CVTDQ2PSrm = 889,
+ Int_CVTDQ2PSrr = 890,
+ Int_CVTPD2DQrm = 891,
+ Int_CVTPD2DQrr = 892,
+ Int_CVTPD2PIrm = 893,
+ Int_CVTPD2PIrr = 894,
+ Int_CVTPD2PSrm = 895,
+ Int_CVTPD2PSrr = 896,
+ Int_CVTPI2PDrm = 897,
+ Int_CVTPI2PDrr = 898,
+ Int_CVTPI2PSrm = 899,
+ Int_CVTPI2PSrr = 900,
+ Int_CVTPS2DQrm = 901,
+ Int_CVTPS2DQrr = 902,
+ Int_CVTPS2PDrm = 903,
+ Int_CVTPS2PDrr = 904,
+ Int_CVTPS2PIrm = 905,
+ Int_CVTPS2PIrr = 906,
+ Int_CVTSD2SI64rm = 907,
+ Int_CVTSD2SI64rr = 908,
+ Int_CVTSD2SIrm = 909,
+ Int_CVTSD2SIrr = 910,
+ Int_CVTSD2SSrm = 911,
+ Int_CVTSD2SSrr = 912,
+ Int_CVTSI2SD64rm = 913,
+ Int_CVTSI2SD64rr = 914,
+ Int_CVTSI2SDrm = 915,
+ Int_CVTSI2SDrr = 916,
+ Int_CVTSI2SS64rm = 917,
+ Int_CVTSI2SS64rr = 918,
+ Int_CVTSI2SSrm = 919,
+ Int_CVTSI2SSrr = 920,
+ Int_CVTSS2SDrm = 921,
+ Int_CVTSS2SDrr = 922,
+ Int_CVTSS2SI64rm = 923,
+ Int_CVTSS2SI64rr = 924,
+ Int_CVTSS2SIrm = 925,
+ Int_CVTSS2SIrr = 926,
+ Int_CVTTPD2DQrm = 927,
+ Int_CVTTPD2DQrr = 928,
+ Int_CVTTPD2PIrm = 929,
+ Int_CVTTPD2PIrr = 930,
+ Int_CVTTPS2DQrm = 931,
+ Int_CVTTPS2DQrr = 932,
+ Int_CVTTPS2PIrm = 933,
+ Int_CVTTPS2PIrr = 934,
+ Int_CVTTSD2SI64rm = 935,
+ Int_CVTTSD2SI64rr = 936,
+ Int_CVTTSD2SIrm = 937,
+ Int_CVTTSD2SIrr = 938,
+ Int_CVTTSS2SI64rm = 939,
+ Int_CVTTSS2SI64rr = 940,
+ Int_CVTTSS2SIrm = 941,
+ Int_CVTTSS2SIrr = 942,
+ Int_MemBarrier = 943,
+ Int_MemBarrierNoSSE64 = 944,
+ Int_UCOMISDrm = 945,
+ Int_UCOMISDrr = 946,
+ Int_UCOMISSrm = 947,
+ Int_UCOMISSrr = 948,
+ Int_VCMPSDrm = 949,
+ Int_VCMPSDrr = 950,
+ Int_VCMPSSrm = 951,
+ Int_VCMPSSrr = 952,
+ Int_VCOMISDrm = 953,
+ Int_VCOMISDrr = 954,
+ Int_VCOMISSrm = 955,
+ Int_VCOMISSrr = 956,
+ Int_VCVTDQ2PDrm = 957,
+ Int_VCVTDQ2PDrr = 958,
+ Int_VCVTDQ2PSrm = 959,
+ Int_VCVTDQ2PSrr = 960,
+ Int_VCVTPD2DQrm = 961,
+ Int_VCVTPD2DQrr = 962,
+ Int_VCVTPD2PSrm = 963,
+ Int_VCVTPD2PSrr = 964,
+ Int_VCVTPS2DQrm = 965,
+ Int_VCVTPS2DQrr = 966,
+ Int_VCVTPS2PDrm = 967,
+ Int_VCVTPS2PDrr = 968,
+ Int_VCVTSD2SI64rm = 969,
+ Int_VCVTSD2SI64rr = 970,
+ Int_VCVTSD2SIrm = 971,
+ Int_VCVTSD2SIrr = 972,
+ Int_VCVTSD2SSrm = 973,
+ Int_VCVTSD2SSrr = 974,
+ Int_VCVTSI2SD64rm = 975,
+ Int_VCVTSI2SD64rr = 976,
+ Int_VCVTSI2SDrm = 977,
+ Int_VCVTSI2SDrr = 978,
+ Int_VCVTSI2SS64rm = 979,
+ Int_VCVTSI2SS64rr = 980,
+ Int_VCVTSI2SSrm = 981,
+ Int_VCVTSI2SSrr = 982,
+ Int_VCVTSS2SDrm = 983,
+ Int_VCVTSS2SDrr = 984,
+ Int_VCVTSS2SI64rm = 985,
+ Int_VCVTSS2SI64rr = 986,
+ Int_VCVTSS2SIrm = 987,
+ Int_VCVTSS2SIrr = 988,
+ Int_VCVTTPD2DQrm = 989,
+ Int_VCVTTPD2DQrr = 990,
+ Int_VCVTTPS2DQrm = 991,
+ Int_VCVTTPS2DQrr = 992,
+ Int_VCVTTSD2SI64rm = 993,
+ Int_VCVTTSD2SI64rr = 994,
+ Int_VCVTTSD2SIrm = 995,
+ Int_VCVTTSD2SIrr = 996,
+ Int_VCVTTSS2SI64rm = 997,
+ Int_VCVTTSS2SI64rr = 998,
+ Int_VCVTTSS2SIrm = 999,
+ Int_VCVTTSS2SIrr = 1000,
+ Int_VUCOMISDrm = 1001,
+ Int_VUCOMISDrr = 1002,
+ Int_VUCOMISSrm = 1003,
+ Int_VUCOMISSrr = 1004,
+ JAE_1 = 1005,
+ JAE_4 = 1006,
+ JA_1 = 1007,
+ JA_4 = 1008,
+ JBE_1 = 1009,
+ JBE_4 = 1010,
+ JB_1 = 1011,
+ JB_4 = 1012,
+ JCXZ8 = 1013,
+ JE_1 = 1014,
+ JE_4 = 1015,
+ JGE_1 = 1016,
+ JGE_4 = 1017,
+ JG_1 = 1018,
+ JG_4 = 1019,
+ JLE_1 = 1020,
+ JLE_4 = 1021,
+ JL_1 = 1022,
+ JL_4 = 1023,
+ JMP32m = 1024,
+ JMP32r = 1025,
+ JMP64m = 1026,
+ JMP64pcrel32 = 1027,
+ JMP64r = 1028,
+ JMP_1 = 1029,
+ JMP_4 = 1030,
+ JNE_1 = 1031,
+ JNE_4 = 1032,
+ JNO_1 = 1033,
+ JNO_4 = 1034,
+ JNP_1 = 1035,
+ JNP_4 = 1036,
+ JNS_1 = 1037,
+ JNS_4 = 1038,
+ JO_1 = 1039,
+ JO_4 = 1040,
+ JP_1 = 1041,
+ JP_4 = 1042,
+ JS_1 = 1043,
+ JS_4 = 1044,
+ LAHF = 1045,
+ LAR16rm = 1046,
+ LAR16rr = 1047,
+ LAR32rm = 1048,
+ LAR32rr = 1049,
+ LAR64rm = 1050,
+ LAR64rr = 1051,
+ LCMPXCHG16 = 1052,
+ LCMPXCHG32 = 1053,
+ LCMPXCHG64 = 1054,
+ LCMPXCHG8 = 1055,
+ LCMPXCHG8B = 1056,
+ LDDQUrm = 1057,
+ LDMXCSR = 1058,
+ LDS16rm = 1059,
+ LDS32rm = 1060,
+ LD_F0 = 1061,
+ LD_F1 = 1062,
+ LD_F32m = 1063,
+ LD_F64m = 1064,
+ LD_F80m = 1065,
+ LD_Fp032 = 1066,
+ LD_Fp064 = 1067,
+ LD_Fp080 = 1068,
+ LD_Fp132 = 1069,
+ LD_Fp164 = 1070,
+ LD_Fp180 = 1071,
+ LD_Fp32m = 1072,
+ LD_Fp32m64 = 1073,
+ LD_Fp32m80 = 1074,
+ LD_Fp64m = 1075,
+ LD_Fp64m80 = 1076,
+ LD_Fp80m = 1077,
+ LD_Frr = 1078,
+ LEA16r = 1079,
+ LEA32r = 1080,
+ LEA64_32r = 1081,
+ LEA64r = 1082,
+ LEAVE = 1083,
+ LEAVE64 = 1084,
+ LES16rm = 1085,
+ LES32rm = 1086,
+ LFENCE = 1087,
+ LFS16rm = 1088,
+ LFS32rm = 1089,
+ LFS64rm = 1090,
+ LGDTm = 1091,
+ LGS16rm = 1092,
+ LGS32rm = 1093,
+ LGS64rm = 1094,
+ LIDTm = 1095,
+ LLDT16m = 1096,
+ LLDT16r = 1097,
+ LMSW16m = 1098,
+ LMSW16r = 1099,
+ LOCK_ADD16mi = 1100,
+ LOCK_ADD16mi8 = 1101,
+ LOCK_ADD16mr = 1102,
+ LOCK_ADD32mi = 1103,
+ LOCK_ADD32mi8 = 1104,
+ LOCK_ADD32mr = 1105,
+ LOCK_ADD64mi32 = 1106,
+ LOCK_ADD64mi8 = 1107,
+ LOCK_ADD64mr = 1108,
+ LOCK_ADD8mi = 1109,
+ LOCK_ADD8mr = 1110,
+ LOCK_DEC16m = 1111,
+ LOCK_DEC32m = 1112,
+ LOCK_DEC64m = 1113,
+ LOCK_DEC8m = 1114,
+ LOCK_INC16m = 1115,
+ LOCK_INC32m = 1116,
+ LOCK_INC64m = 1117,
+ LOCK_INC8m = 1118,
+ LOCK_PREFIX = 1119,
+ LOCK_SUB16mi = 1120,
+ LOCK_SUB16mi8 = 1121,
+ LOCK_SUB16mr = 1122,
+ LOCK_SUB32mi = 1123,
+ LOCK_SUB32mi8 = 1124,
+ LOCK_SUB32mr = 1125,
+ LOCK_SUB64mi32 = 1126,
+ LOCK_SUB64mi8 = 1127,
+ LOCK_SUB64mr = 1128,
+ LOCK_SUB8mi = 1129,
+ LOCK_SUB8mr = 1130,
+ LODSB = 1131,
+ LODSD = 1132,
+ LODSQ = 1133,
+ LODSW = 1134,
+ LOOP = 1135,
+ LOOPE = 1136,
+ LOOPNE = 1137,
+ LRET = 1138,
+ LRETI = 1139,
+ LSL16rm = 1140,
+ LSL16rr = 1141,
+ LSL32rm = 1142,
+ LSL32rr = 1143,
+ LSL64rm = 1144,
+ LSL64rr = 1145,
+ LSS16rm = 1146,
+ LSS32rm = 1147,
+ LSS64rm = 1148,
+ LTRm = 1149,
+ LTRr = 1150,
+ LXADD16 = 1151,
+ LXADD32 = 1152,
+ LXADD64 = 1153,
+ LXADD8 = 1154,
+ MASKMOVDQU = 1155,
+ MASKMOVDQU64 = 1156,
+ MAXPDrm = 1157,
+ MAXPDrm_Int = 1158,
+ MAXPDrr = 1159,
+ MAXPDrr_Int = 1160,
+ MAXPSrm = 1161,
+ MAXPSrm_Int = 1162,
+ MAXPSrr = 1163,
+ MAXPSrr_Int = 1164,
+ MAXSDrm = 1165,
+ MAXSDrm_Int = 1166,
+ MAXSDrr = 1167,
+ MAXSDrr_Int = 1168,
+ MAXSSrm = 1169,
+ MAXSSrm_Int = 1170,
+ MAXSSrr = 1171,
+ MAXSSrr_Int = 1172,
+ MFENCE = 1173,
+ MINGW_ALLOCA = 1174,
+ MINPDrm = 1175,
+ MINPDrm_Int = 1176,
+ MINPDrr = 1177,
+ MINPDrr_Int = 1178,
+ MINPSrm = 1179,
+ MINPSrm_Int = 1180,
+ MINPSrr = 1181,
+ MINPSrr_Int = 1182,
+ MINSDrm = 1183,
+ MINSDrm_Int = 1184,
+ MINSDrr = 1185,
+ MINSDrr_Int = 1186,
+ MINSSrm = 1187,
+ MINSSrm_Int = 1188,
+ MINSSrr = 1189,
+ MINSSrr_Int = 1190,
+ MMX_CVTPD2PIrm = 1191,
+ MMX_CVTPD2PIrr = 1192,
+ MMX_CVTPI2PDrm = 1193,
+ MMX_CVTPI2PDrr = 1194,
+ MMX_CVTPI2PSrm = 1195,
+ MMX_CVTPI2PSrr = 1196,
+ MMX_CVTPS2PIrm = 1197,
+ MMX_CVTPS2PIrr = 1198,
+ MMX_CVTTPD2PIrm = 1199,
+ MMX_CVTTPD2PIrr = 1200,
+ MMX_CVTTPS2PIrm = 1201,
+ MMX_CVTTPS2PIrr = 1202,
+ MMX_EMMS = 1203,
+ MMX_FEMMS = 1204,
+ MMX_MASKMOVQ = 1205,
+ MMX_MASKMOVQ64 = 1206,
+ MMX_MOVD64from64rr = 1207,
+ MMX_MOVD64grr = 1208,
+ MMX_MOVD64mr = 1209,
+ MMX_MOVD64rm = 1210,
+ MMX_MOVD64rr = 1211,
+ MMX_MOVD64rrv164 = 1212,
+ MMX_MOVD64to64rr = 1213,
+ MMX_MOVDQ2Qrr = 1214,
+ MMX_MOVFR642Qrr = 1215,
+ MMX_MOVNTQmr = 1216,
+ MMX_MOVQ2DQrr = 1217,
+ MMX_MOVQ2FR64rr = 1218,
+ MMX_MOVQ64mr = 1219,
+ MMX_MOVQ64rm = 1220,
+ MMX_MOVQ64rr = 1221,
+ MMX_MOVZDI2PDIrm = 1222,
+ MMX_MOVZDI2PDIrr = 1223,
+ MMX_PACKSSDWrm = 1224,
+ MMX_PACKSSDWrr = 1225,
+ MMX_PACKSSWBrm = 1226,
+ MMX_PACKSSWBrr = 1227,
+ MMX_PACKUSWBrm = 1228,
+ MMX_PACKUSWBrr = 1229,
+ MMX_PADDBrm = 1230,
+ MMX_PADDBrr = 1231,
+ MMX_PADDDrm = 1232,
+ MMX_PADDDrr = 1233,
+ MMX_PADDQrm = 1234,
+ MMX_PADDQrr = 1235,
+ MMX_PADDSBrm = 1236,
+ MMX_PADDSBrr = 1237,
+ MMX_PADDSWrm = 1238,
+ MMX_PADDSWrr = 1239,
+ MMX_PADDUSBrm = 1240,
+ MMX_PADDUSBrr = 1241,
+ MMX_PADDUSWrm = 1242,
+ MMX_PADDUSWrr = 1243,
+ MMX_PADDWrm = 1244,
+ MMX_PADDWrr = 1245,
+ MMX_PANDNrm = 1246,
+ MMX_PANDNrr = 1247,
+ MMX_PANDrm = 1248,
+ MMX_PANDrr = 1249,
+ MMX_PAVGBrm = 1250,
+ MMX_PAVGBrr = 1251,
+ MMX_PAVGWrm = 1252,
+ MMX_PAVGWrr = 1253,
+ MMX_PCMPEQBrm = 1254,
+ MMX_PCMPEQBrr = 1255,
+ MMX_PCMPEQDrm = 1256,
+ MMX_PCMPEQDrr = 1257,
+ MMX_PCMPEQWrm = 1258,
+ MMX_PCMPEQWrr = 1259,
+ MMX_PCMPGTBrm = 1260,
+ MMX_PCMPGTBrr = 1261,
+ MMX_PCMPGTDrm = 1262,
+ MMX_PCMPGTDrr = 1263,
+ MMX_PCMPGTWrm = 1264,
+ MMX_PCMPGTWrr = 1265,
+ MMX_PEXTRWri = 1266,
+ MMX_PINSRWrmi = 1267,
+ MMX_PINSRWrri = 1268,
+ MMX_PMADDWDrm = 1269,
+ MMX_PMADDWDrr = 1270,
+ MMX_PMAXSWrm = 1271,
+ MMX_PMAXSWrr = 1272,
+ MMX_PMAXUBrm = 1273,
+ MMX_PMAXUBrr = 1274,
+ MMX_PMINSWrm = 1275,
+ MMX_PMINSWrr = 1276,
+ MMX_PMINUBrm = 1277,
+ MMX_PMINUBrr = 1278,
+ MMX_PMOVMSKBrr = 1279,
+ MMX_PMULHUWrm = 1280,
+ MMX_PMULHUWrr = 1281,
+ MMX_PMULHWrm = 1282,
+ MMX_PMULHWrr = 1283,
+ MMX_PMULLWrm = 1284,
+ MMX_PMULLWrr = 1285,
+ MMX_PMULUDQrm = 1286,
+ MMX_PMULUDQrr = 1287,
+ MMX_PORrm = 1288,
+ MMX_PORrr = 1289,
+ MMX_PSADBWrm = 1290,
+ MMX_PSADBWrr = 1291,
+ MMX_PSHUFWmi = 1292,
+ MMX_PSHUFWri = 1293,
+ MMX_PSLLDri = 1294,
+ MMX_PSLLDrm = 1295,
+ MMX_PSLLDrr = 1296,
+ MMX_PSLLQri = 1297,
+ MMX_PSLLQrm = 1298,
+ MMX_PSLLQrr = 1299,
+ MMX_PSLLWri = 1300,
+ MMX_PSLLWrm = 1301,
+ MMX_PSLLWrr = 1302,
+ MMX_PSRADri = 1303,
+ MMX_PSRADrm = 1304,
+ MMX_PSRADrr = 1305,
+ MMX_PSRAWri = 1306,
+ MMX_PSRAWrm = 1307,
+ MMX_PSRAWrr = 1308,
+ MMX_PSRLDri = 1309,
+ MMX_PSRLDrm = 1310,
+ MMX_PSRLDrr = 1311,
+ MMX_PSRLQri = 1312,
+ MMX_PSRLQrm = 1313,
+ MMX_PSRLQrr = 1314,
+ MMX_PSRLWri = 1315,
+ MMX_PSRLWrm = 1316,
+ MMX_PSRLWrr = 1317,
+ MMX_PSUBBrm = 1318,
+ MMX_PSUBBrr = 1319,
+ MMX_PSUBDrm = 1320,
+ MMX_PSUBDrr = 1321,
+ MMX_PSUBQrm = 1322,
+ MMX_PSUBQrr = 1323,
+ MMX_PSUBSBrm = 1324,
+ MMX_PSUBSBrr = 1325,
+ MMX_PSUBSWrm = 1326,
+ MMX_PSUBSWrr = 1327,
+ MMX_PSUBUSBrm = 1328,
+ MMX_PSUBUSBrr = 1329,
+ MMX_PSUBUSWrm = 1330,
+ MMX_PSUBUSWrr = 1331,
+ MMX_PSUBWrm = 1332,
+ MMX_PSUBWrr = 1333,
+ MMX_PUNPCKHBWrm = 1334,
+ MMX_PUNPCKHBWrr = 1335,
+ MMX_PUNPCKHDQrm = 1336,
+ MMX_PUNPCKHDQrr = 1337,
+ MMX_PUNPCKHWDrm = 1338,
+ MMX_PUNPCKHWDrr = 1339,
+ MMX_PUNPCKLBWrm = 1340,
+ MMX_PUNPCKLBWrr = 1341,
+ MMX_PUNPCKLDQrm = 1342,
+ MMX_PUNPCKLDQrr = 1343,
+ MMX_PUNPCKLWDrm = 1344,
+ MMX_PUNPCKLWDrr = 1345,
+ MMX_PXORrm = 1346,
+ MMX_PXORrr = 1347,
+ MMX_V_SET0 = 1348,
+ MMX_V_SETALLONES = 1349,
+ MONITOR = 1350,
+ MOV16ao16 = 1351,
+ MOV16mi = 1352,
+ MOV16mr = 1353,
+ MOV16ms = 1354,
+ MOV16o16a = 1355,
+ MOV16r0 = 1356,
+ MOV16ri = 1357,
+ MOV16rm = 1358,
+ MOV16rr = 1359,
+ MOV16rr_REV = 1360,
+ MOV16rs = 1361,
+ MOV16sm = 1362,
+ MOV16sr = 1363,
+ MOV32ao32 = 1364,
+ MOV32cr = 1365,
+ MOV32dr = 1366,
+ MOV32mi = 1367,
+ MOV32mr = 1368,
+ MOV32mr_TC = 1369,
+ MOV32ms = 1370,
+ MOV32o32a = 1371,
+ MOV32r0 = 1372,
+ MOV32rc = 1373,
+ MOV32rd = 1374,
+ MOV32ri = 1375,
+ MOV32rm = 1376,
+ MOV32rm_TC = 1377,
+ MOV32rr = 1378,
+ MOV32rr_REV = 1379,
+ MOV32rr_TC = 1380,
+ MOV32rs = 1381,
+ MOV32sm = 1382,
+ MOV32sr = 1383,
+ MOV64FSrm = 1384,
+ MOV64GSrm = 1385,
+ MOV64cr = 1386,
+ MOV64dr = 1387,
+ MOV64mi32 = 1388,
+ MOV64mr = 1389,
+ MOV64mr_TC = 1390,
+ MOV64ms = 1391,
+ MOV64r0 = 1392,
+ MOV64rc = 1393,
+ MOV64rd = 1394,
+ MOV64ri = 1395,
+ MOV64ri32 = 1396,
+ MOV64ri64i32 = 1397,
+ MOV64ri_alt = 1398,
+ MOV64rm = 1399,
+ MOV64rm_TC = 1400,
+ MOV64rr = 1401,
+ MOV64rr_REV = 1402,
+ MOV64rr_TC = 1403,
+ MOV64rs = 1404,
+ MOV64sm = 1405,
+ MOV64sr = 1406,
+ MOV64toPQIrr = 1407,
+ MOV64toSDrm = 1408,
+ MOV64toSDrr = 1409,
+ MOV8ao8 = 1410,
+ MOV8mi = 1411,
+ MOV8mr = 1412,
+ MOV8mr_NOREX = 1413,
+ MOV8o8a = 1414,
+ MOV8r0 = 1415,
+ MOV8ri = 1416,
+ MOV8rm = 1417,
+ MOV8rm_NOREX = 1418,
+ MOV8rr = 1419,
+ MOV8rr_NOREX = 1420,
+ MOV8rr_REV = 1421,
+ MOVAPDmr = 1422,
+ MOVAPDrm = 1423,
+ MOVAPDrr = 1424,
+ MOVAPSmr = 1425,
+ MOVAPSrm = 1426,
+ MOVAPSrr = 1427,
+ MOVDDUPrm = 1428,
+ MOVDDUPrr = 1429,
+ MOVDI2PDIrm = 1430,
+ MOVDI2PDIrr = 1431,
+ MOVDI2SSrm = 1432,
+ MOVDI2SSrr = 1433,
+ MOVDQAmr = 1434,
+ MOVDQArm = 1435,
+ MOVDQArr = 1436,
+ MOVDQUmr = 1437,
+ MOVDQUmr_Int = 1438,
+ MOVDQUrm = 1439,
+ MOVDQUrm_Int = 1440,
+ MOVHLPSrr = 1441,
+ MOVHPDmr = 1442,
+ MOVHPDrm = 1443,
+ MOVHPSmr = 1444,
+ MOVHPSrm = 1445,
+ MOVLHPSrr = 1446,
+ MOVLPDmr = 1447,
+ MOVLPDrm = 1448,
+ MOVLPSmr = 1449,
+ MOVLPSrm = 1450,
+ MOVLQ128mr = 1451,
+ MOVMSKPDrr = 1452,
+ MOVMSKPSrr = 1453,
+ MOVNTDQArm = 1454,
+ MOVNTDQ_64mr = 1455,
+ MOVNTDQmr = 1456,
+ MOVNTDQmr_Int = 1457,
+ MOVNTI_64mr = 1458,
+ MOVNTImr = 1459,
+ MOVNTImr_Int = 1460,
+ MOVNTPDmr = 1461,
+ MOVNTPDmr_Int = 1462,
+ MOVNTPSmr = 1463,
+ MOVNTPSmr_Int = 1464,
+ MOVPC32r = 1465,
+ MOVPDI2DImr = 1466,
+ MOVPDI2DIrr = 1467,
+ MOVPQI2QImr = 1468,
+ MOVPQIto64rr = 1469,
+ MOVQI2PQIrm = 1470,
+ MOVQxrxr = 1471,
+ MOVSB = 1472,
+ MOVSD = 1473,
+ MOVSDmr = 1474,
+ MOVSDrm = 1475,
+ MOVSDrr = 1476,
+ MOVSDto64mr = 1477,
+ MOVSDto64rr = 1478,
+ MOVSHDUPrm = 1479,
+ MOVSHDUPrr = 1480,
+ MOVSLDUPrm = 1481,
+ MOVSLDUPrr = 1482,
+ MOVSQ = 1483,
+ MOVSS2DImr = 1484,
+ MOVSS2DIrr = 1485,
+ MOVSSmr = 1486,
+ MOVSSrm = 1487,
+ MOVSSrr = 1488,
+ MOVSW = 1489,
+ MOVSX16rm8 = 1490,
+ MOVSX16rm8W = 1491,
+ MOVSX16rr8 = 1492,
+ MOVSX16rr8W = 1493,
+ MOVSX32rm16 = 1494,
+ MOVSX32rm8 = 1495,
+ MOVSX32rr16 = 1496,
+ MOVSX32rr8 = 1497,
+ MOVSX64rm16 = 1498,
+ MOVSX64rm32 = 1499,
+ MOVSX64rm8 = 1500,
+ MOVSX64rr16 = 1501,
+ MOVSX64rr32 = 1502,
+ MOVSX64rr8 = 1503,
+ MOVUPDmr = 1504,
+ MOVUPDmr_Int = 1505,
+ MOVUPDrm = 1506,
+ MOVUPDrm_Int = 1507,
+ MOVUPDrr = 1508,
+ MOVUPSmr = 1509,
+ MOVUPSmr_Int = 1510,
+ MOVUPSrm = 1511,
+ MOVUPSrm_Int = 1512,
+ MOVUPSrr = 1513,
+ MOVZDI2PDIrm = 1514,
+ MOVZDI2PDIrr = 1515,
+ MOVZPQILo2PQIrm = 1516,
+ MOVZPQILo2PQIrr = 1517,
+ MOVZQI2PQIrm = 1518,
+ MOVZQI2PQIrr = 1519,
+ MOVZX16rm8 = 1520,
+ MOVZX16rm8W = 1521,
+ MOVZX16rr8 = 1522,
+ MOVZX16rr8W = 1523,
+ MOVZX32_NOREXrm8 = 1524,
+ MOVZX32_NOREXrr8 = 1525,
+ MOVZX32rm16 = 1526,
+ MOVZX32rm8 = 1527,
+ MOVZX32rr16 = 1528,
+ MOVZX32rr8 = 1529,
+ MOVZX64rm16 = 1530,
+ MOVZX64rm16_Q = 1531,
+ MOVZX64rm32 = 1532,
+ MOVZX64rm8 = 1533,
+ MOVZX64rm8_Q = 1534,
+ MOVZX64rr16 = 1535,
+ MOVZX64rr16_Q = 1536,
+ MOVZX64rr32 = 1537,
+ MOVZX64rr8 = 1538,
+ MOVZX64rr8_Q = 1539,
+ MOV_Fp3232 = 1540,
+ MOV_Fp3264 = 1541,
+ MOV_Fp3280 = 1542,
+ MOV_Fp6432 = 1543,
+ MOV_Fp6464 = 1544,
+ MOV_Fp6480 = 1545,
+ MOV_Fp8032 = 1546,
+ MOV_Fp8064 = 1547,
+ MOV_Fp8080 = 1548,
+ MPSADBWrmi = 1549,
+ MPSADBWrri = 1550,
+ MUL16m = 1551,
+ MUL16r = 1552,
+ MUL32m = 1553,
+ MUL32r = 1554,
+ MUL64m = 1555,
+ MUL64r = 1556,
+ MUL8m = 1557,
+ MUL8r = 1558,
+ MULPDrm = 1559,
+ MULPDrr = 1560,
+ MULPSrm = 1561,
+ MULPSrr = 1562,
+ MULSDrm = 1563,
+ MULSDrm_Int = 1564,
+ MULSDrr = 1565,
+ MULSDrr_Int = 1566,
+ MULSSrm = 1567,
+ MULSSrm_Int = 1568,
+ MULSSrr = 1569,
+ MULSSrr_Int = 1570,
+ MUL_F32m = 1571,
+ MUL_F64m = 1572,
+ MUL_FI16m = 1573,
+ MUL_FI32m = 1574,
+ MUL_FPrST0 = 1575,
+ MUL_FST0r = 1576,
+ MUL_Fp32 = 1577,
+ MUL_Fp32m = 1578,
+ MUL_Fp64 = 1579,
+ MUL_Fp64m = 1580,
+ MUL_Fp64m32 = 1581,
+ MUL_Fp80 = 1582,
+ MUL_Fp80m32 = 1583,
+ MUL_Fp80m64 = 1584,
+ MUL_FpI16m32 = 1585,
+ MUL_FpI16m64 = 1586,
+ MUL_FpI16m80 = 1587,
+ MUL_FpI32m32 = 1588,
+ MUL_FpI32m64 = 1589,
+ MUL_FpI32m80 = 1590,
+ MUL_FrST0 = 1591,
+ MWAIT = 1592,
+ NEG16m = 1593,
+ NEG16r = 1594,
+ NEG32m = 1595,
+ NEG32r = 1596,
+ NEG64m = 1597,
+ NEG64r = 1598,
+ NEG8m = 1599,
+ NEG8r = 1600,
+ NOOP = 1601,
+ NOOPL = 1602,
+ NOOPW = 1603,
+ NOT16m = 1604,
+ NOT16r = 1605,
+ NOT32m = 1606,
+ NOT32r = 1607,
+ NOT64m = 1608,
+ NOT64r = 1609,
+ NOT8m = 1610,
+ NOT8r = 1611,
+ OR16i16 = 1612,
+ OR16mi = 1613,
+ OR16mi8 = 1614,
+ OR16mr = 1615,
+ OR16ri = 1616,
+ OR16ri8 = 1617,
+ OR16rm = 1618,
+ OR16rr = 1619,
+ OR16rr_REV = 1620,
+ OR32i32 = 1621,
+ OR32mi = 1622,
+ OR32mi8 = 1623,
+ OR32mr = 1624,
+ OR32mrLocked = 1625,
+ OR32ri = 1626,
+ OR32ri8 = 1627,
+ OR32rm = 1628,
+ OR32rr = 1629,
+ OR32rr_REV = 1630,
+ OR64i32 = 1631,
+ OR64mi32 = 1632,
+ OR64mi8 = 1633,
+ OR64mr = 1634,
+ OR64ri32 = 1635,
+ OR64ri8 = 1636,
+ OR64rm = 1637,
+ OR64rr = 1638,
+ OR64rr_REV = 1639,
+ OR8i8 = 1640,
+ OR8mi = 1641,
+ OR8mr = 1642,
+ OR8ri = 1643,
+ OR8rm = 1644,
+ OR8rr = 1645,
+ OR8rr_REV = 1646,
+ ORPDrm = 1647,
+ ORPDrr = 1648,
+ ORPSrm = 1649,
+ ORPSrr = 1650,
+ OUT16ir = 1651,
+ OUT16rr = 1652,
+ OUT32ir = 1653,
+ OUT32rr = 1654,
+ OUT8ir = 1655,
+ OUT8rr = 1656,
+ OUTSB = 1657,
+ OUTSD = 1658,
+ OUTSW = 1659,
+ PABSBrm128 = 1660,
+ PABSBrm64 = 1661,
+ PABSBrr128 = 1662,
+ PABSBrr64 = 1663,
+ PABSDrm128 = 1664,
+ PABSDrm64 = 1665,
+ PABSDrr128 = 1666,
+ PABSDrr64 = 1667,
+ PABSWrm128 = 1668,
+ PABSWrm64 = 1669,
+ PABSWrr128 = 1670,
+ PABSWrr64 = 1671,
+ PACKSSDWrm = 1672,
+ PACKSSDWrr = 1673,
+ PACKSSWBrm = 1674,
+ PACKSSWBrr = 1675,
+ PACKUSDWrm = 1676,
+ PACKUSDWrr = 1677,
+ PACKUSWBrm = 1678,
+ PACKUSWBrr = 1679,
+ PADDBrm = 1680,
+ PADDBrr = 1681,
+ PADDDrm = 1682,
+ PADDDrr = 1683,
+ PADDQrm = 1684,
+ PADDQrr = 1685,
+ PADDSBrm = 1686,
+ PADDSBrr = 1687,
+ PADDSWrm = 1688,
+ PADDSWrr = 1689,
+ PADDUSBrm = 1690,
+ PADDUSBrr = 1691,
+ PADDUSWrm = 1692,
+ PADDUSWrr = 1693,
+ PADDWrm = 1694,
+ PADDWrr = 1695,
+ PALIGNR128rm = 1696,
+ PALIGNR128rr = 1697,
+ PALIGNR64rm = 1698,
+ PALIGNR64rr = 1699,
+ PANDNrm = 1700,
+ PANDNrr = 1701,
+ PANDrm = 1702,
+ PANDrr = 1703,
+ PAUSE = 1704,
+ PAVGBrm = 1705,
+ PAVGBrr = 1706,
+ PAVGWrm = 1707,
+ PAVGWrr = 1708,
+ PBLENDVBrm0 = 1709,
+ PBLENDVBrr0 = 1710,
+ PBLENDWrmi = 1711,
+ PBLENDWrri = 1712,
+ PCMPEQBrm = 1713,
+ PCMPEQBrr = 1714,
+ PCMPEQDrm = 1715,
+ PCMPEQDrr = 1716,
+ PCMPEQQrm = 1717,
+ PCMPEQQrr = 1718,
+ PCMPEQWrm = 1719,
+ PCMPEQWrr = 1720,
+ PCMPESTRIArm = 1721,
+ PCMPESTRIArr = 1722,
+ PCMPESTRICrm = 1723,
+ PCMPESTRICrr = 1724,
+ PCMPESTRIOrm = 1725,
+ PCMPESTRIOrr = 1726,
+ PCMPESTRISrm = 1727,
+ PCMPESTRISrr = 1728,
+ PCMPESTRIZrm = 1729,
+ PCMPESTRIZrr = 1730,
+ PCMPESTRIrm = 1731,
+ PCMPESTRIrr = 1732,
+ PCMPESTRM128MEM = 1733,
+ PCMPESTRM128REG = 1734,
+ PCMPESTRM128rm = 1735,
+ PCMPESTRM128rr = 1736,
+ PCMPGTBrm = 1737,
+ PCMPGTBrr = 1738,
+ PCMPGTDrm = 1739,
+ PCMPGTDrr = 1740,
+ PCMPGTQrm = 1741,
+ PCMPGTQrr = 1742,
+ PCMPGTWrm = 1743,
+ PCMPGTWrr = 1744,
+ PCMPISTRIArm = 1745,
+ PCMPISTRIArr = 1746,
+ PCMPISTRICrm = 1747,
+ PCMPISTRICrr = 1748,
+ PCMPISTRIOrm = 1749,
+ PCMPISTRIOrr = 1750,
+ PCMPISTRISrm = 1751,
+ PCMPISTRISrr = 1752,
+ PCMPISTRIZrm = 1753,
+ PCMPISTRIZrr = 1754,
+ PCMPISTRIrm = 1755,
+ PCMPISTRIrr = 1756,
+ PCMPISTRM128MEM = 1757,
+ PCMPISTRM128REG = 1758,
+ PCMPISTRM128rm = 1759,
+ PCMPISTRM128rr = 1760,
+ PEXTRBmr = 1761,
+ PEXTRBrr = 1762,
+ PEXTRDmr = 1763,
+ PEXTRDrr = 1764,
+ PEXTRQmr = 1765,
+ PEXTRQrr = 1766,
+ PEXTRWmr = 1767,
+ PEXTRWri = 1768,
+ PHADDDrm128 = 1769,
+ PHADDDrm64 = 1770,
+ PHADDDrr128 = 1771,
+ PHADDDrr64 = 1772,
+ PHADDSWrm128 = 1773,
+ PHADDSWrm64 = 1774,
+ PHADDSWrr128 = 1775,
+ PHADDSWrr64 = 1776,
+ PHADDWrm128 = 1777,
+ PHADDWrm64 = 1778,
+ PHADDWrr128 = 1779,
+ PHADDWrr64 = 1780,
+ PHMINPOSUWrm128 = 1781,
+ PHMINPOSUWrr128 = 1782,
+ PHSUBDrm128 = 1783,
+ PHSUBDrm64 = 1784,
+ PHSUBDrr128 = 1785,
+ PHSUBDrr64 = 1786,
+ PHSUBSWrm128 = 1787,
+ PHSUBSWrm64 = 1788,
+ PHSUBSWrr128 = 1789,
+ PHSUBSWrr64 = 1790,
+ PHSUBWrm128 = 1791,
+ PHSUBWrm64 = 1792,
+ PHSUBWrr128 = 1793,
+ PHSUBWrr64 = 1794,
+ PINSRBrm = 1795,
+ PINSRBrr = 1796,
+ PINSRDrm = 1797,
+ PINSRDrr = 1798,
+ PINSRQrm = 1799,
+ PINSRQrr = 1800,
+ PINSRWrmi = 1801,
+ PINSRWrri = 1802,
+ PMADDUBSWrm128 = 1803,
+ PMADDUBSWrm64 = 1804,
+ PMADDUBSWrr128 = 1805,
+ PMADDUBSWrr64 = 1806,
+ PMADDWDrm = 1807,
+ PMADDWDrr = 1808,
+ PMAXSBrm = 1809,
+ PMAXSBrr = 1810,
+ PMAXSDrm = 1811,
+ PMAXSDrr = 1812,
+ PMAXSWrm = 1813,
+ PMAXSWrr = 1814,
+ PMAXUBrm = 1815,
+ PMAXUBrr = 1816,
+ PMAXUDrm = 1817,
+ PMAXUDrr = 1818,
+ PMAXUWrm = 1819,
+ PMAXUWrr = 1820,
+ PMINSBrm = 1821,
+ PMINSBrr = 1822,
+ PMINSDrm = 1823,
+ PMINSDrr = 1824,
+ PMINSWrm = 1825,
+ PMINSWrr = 1826,
+ PMINUBrm = 1827,
+ PMINUBrr = 1828,
+ PMINUDrm = 1829,
+ PMINUDrr = 1830,
+ PMINUWrm = 1831,
+ PMINUWrr = 1832,
+ PMOVMSKBrr = 1833,
+ PMOVSXBDrm = 1834,
+ PMOVSXBDrr = 1835,
+ PMOVSXBQrm = 1836,
+ PMOVSXBQrr = 1837,
+ PMOVSXBWrm = 1838,
+ PMOVSXBWrr = 1839,
+ PMOVSXDQrm = 1840,
+ PMOVSXDQrr = 1841,
+ PMOVSXWDrm = 1842,
+ PMOVSXWDrr = 1843,
+ PMOVSXWQrm = 1844,
+ PMOVSXWQrr = 1845,
+ PMOVZXBDrm = 1846,
+ PMOVZXBDrr = 1847,
+ PMOVZXBQrm = 1848,
+ PMOVZXBQrr = 1849,
+ PMOVZXBWrm = 1850,
+ PMOVZXBWrr = 1851,
+ PMOVZXDQrm = 1852,
+ PMOVZXDQrr = 1853,
+ PMOVZXWDrm = 1854,
+ PMOVZXWDrr = 1855,
+ PMOVZXWQrm = 1856,
+ PMOVZXWQrr = 1857,
+ PMULDQrm = 1858,
+ PMULDQrr = 1859,
+ PMULHRSWrm128 = 1860,
+ PMULHRSWrm64 = 1861,
+ PMULHRSWrr128 = 1862,
+ PMULHRSWrr64 = 1863,
+ PMULHUWrm = 1864,
+ PMULHUWrr = 1865,
+ PMULHWrm = 1866,
+ PMULHWrr = 1867,
+ PMULLDrm = 1868,
+ PMULLDrr = 1869,
+ PMULLWrm = 1870,
+ PMULLWrr = 1871,
+ PMULUDQrm = 1872,
+ PMULUDQrr = 1873,
+ POP16r = 1874,
+ POP16rmm = 1875,
+ POP16rmr = 1876,
+ POP32r = 1877,
+ POP32rmm = 1878,
+ POP32rmr = 1879,
+ POP64r = 1880,
+ POP64rmm = 1881,
+ POP64rmr = 1882,
+ POPA32 = 1883,
+ POPCNT16rm = 1884,
+ POPCNT16rr = 1885,
+ POPCNT32rm = 1886,
+ POPCNT32rr = 1887,
+ POPCNT64rm = 1888,
+ POPCNT64rr = 1889,
+ POPF16 = 1890,
+ POPF32 = 1891,
+ POPF64 = 1892,
+ POPFS16 = 1893,
+ POPFS32 = 1894,
+ POPFS64 = 1895,
+ POPGS16 = 1896,
+ POPGS32 = 1897,
+ POPGS64 = 1898,
+ PORrm = 1899,
+ PORrr = 1900,
+ PREFETCHNTA = 1901,
+ PREFETCHT0 = 1902,
+ PREFETCHT1 = 1903,
+ PREFETCHT2 = 1904,
+ PSADBWrm = 1905,
+ PSADBWrr = 1906,
+ PSHUFBrm128 = 1907,
+ PSHUFBrm64 = 1908,
+ PSHUFBrr128 = 1909,
+ PSHUFBrr64 = 1910,
+ PSHUFDmi = 1911,
+ PSHUFDri = 1912,
+ PSHUFHWmi = 1913,
+ PSHUFHWri = 1914,
+ PSHUFLWmi = 1915,
+ PSHUFLWri = 1916,
+ PSIGNBrm128 = 1917,
+ PSIGNBrm64 = 1918,
+ PSIGNBrr128 = 1919,
+ PSIGNBrr64 = 1920,
+ PSIGNDrm128 = 1921,
+ PSIGNDrm64 = 1922,
+ PSIGNDrr128 = 1923,
+ PSIGNDrr64 = 1924,
+ PSIGNWrm128 = 1925,
+ PSIGNWrm64 = 1926,
+ PSIGNWrr128 = 1927,
+ PSIGNWrr64 = 1928,
+ PSLLDQri = 1929,
+ PSLLDri = 1930,
+ PSLLDrm = 1931,
+ PSLLDrr = 1932,
+ PSLLQri = 1933,
+ PSLLQrm = 1934,
+ PSLLQrr = 1935,
+ PSLLWri = 1936,
+ PSLLWrm = 1937,
+ PSLLWrr = 1938,
+ PSRADri = 1939,
+ PSRADrm = 1940,
+ PSRADrr = 1941,
+ PSRAWri = 1942,
+ PSRAWrm = 1943,
+ PSRAWrr = 1944,
+ PSRLDQri = 1945,
+ PSRLDri = 1946,
+ PSRLDrm = 1947,
+ PSRLDrr = 1948,
+ PSRLQri = 1949,
+ PSRLQrm = 1950,
+ PSRLQrr = 1951,
+ PSRLWri = 1952,
+ PSRLWrm = 1953,
+ PSRLWrr = 1954,
+ PSUBBrm = 1955,
+ PSUBBrr = 1956,
+ PSUBDrm = 1957,
+ PSUBDrr = 1958,
+ PSUBQrm = 1959,
+ PSUBQrr = 1960,
+ PSUBSBrm = 1961,
+ PSUBSBrr = 1962,
+ PSUBSWrm = 1963,
+ PSUBSWrr = 1964,
+ PSUBUSBrm = 1965,
+ PSUBUSBrr = 1966,
+ PSUBUSWrm = 1967,
+ PSUBUSWrr = 1968,
+ PSUBWrm = 1969,
+ PSUBWrr = 1970,
+ PTESTrm = 1971,
+ PTESTrr = 1972,
+ PUNPCKHBWrm = 1973,
+ PUNPCKHBWrr = 1974,
+ PUNPCKHDQrm = 1975,
+ PUNPCKHDQrr = 1976,
+ PUNPCKHQDQrm = 1977,
+ PUNPCKHQDQrr = 1978,
+ PUNPCKHWDrm = 1979,
+ PUNPCKHWDrr = 1980,
+ PUNPCKLBWrm = 1981,
+ PUNPCKLBWrr = 1982,
+ PUNPCKLDQrm = 1983,
+ PUNPCKLDQrr = 1984,
+ PUNPCKLQDQrm = 1985,
+ PUNPCKLQDQrr = 1986,
+ PUNPCKLWDrm = 1987,
+ PUNPCKLWDrr = 1988,
+ PUSH16r = 1989,
+ PUSH16rmm = 1990,
+ PUSH16rmr = 1991,
+ PUSH32r = 1992,
+ PUSH32rmm = 1993,
+ PUSH32rmr = 1994,
+ PUSH64i16 = 1995,
+ PUSH64i32 = 1996,
+ PUSH64i8 = 1997,
+ PUSH64r = 1998,
+ PUSH64rmm = 1999,
+ PUSH64rmr = 2000,
+ PUSHA32 = 2001,
+ PUSHF16 = 2002,
+ PUSHF32 = 2003,
+ PUSHF64 = 2004,
+ PUSHFS16 = 2005,
+ PUSHFS32 = 2006,
+ PUSHFS64 = 2007,
+ PUSHGS16 = 2008,
+ PUSHGS32 = 2009,
+ PUSHGS64 = 2010,
+ PUSHi16 = 2011,
+ PUSHi32 = 2012,
+ PUSHi8 = 2013,
+ PXORrm = 2014,
+ PXORrr = 2015,
+ RCL16m1 = 2016,
+ RCL16mCL = 2017,
+ RCL16mi = 2018,
+ RCL16r1 = 2019,
+ RCL16rCL = 2020,
+ RCL16ri = 2021,
+ RCL32m1 = 2022,
+ RCL32mCL = 2023,
+ RCL32mi = 2024,
+ RCL32r1 = 2025,
+ RCL32rCL = 2026,
+ RCL32ri = 2027,
+ RCL64m1 = 2028,
+ RCL64mCL = 2029,
+ RCL64mi = 2030,
+ RCL64r1 = 2031,
+ RCL64rCL = 2032,
+ RCL64ri = 2033,
+ RCL8m1 = 2034,
+ RCL8mCL = 2035,
+ RCL8mi = 2036,
+ RCL8r1 = 2037,
+ RCL8rCL = 2038,
+ RCL8ri = 2039,
+ RCPPSm = 2040,
+ RCPPSm_Int = 2041,
+ RCPPSr = 2042,
+ RCPPSr_Int = 2043,
+ RCPSSm = 2044,
+ RCPSSm_Int = 2045,
+ RCPSSr = 2046,
+ RCPSSr_Int = 2047,
+ RCR16m1 = 2048,
+ RCR16mCL = 2049,
+ RCR16mi = 2050,
+ RCR16r1 = 2051,
+ RCR16rCL = 2052,
+ RCR16ri = 2053,
+ RCR32m1 = 2054,
+ RCR32mCL = 2055,
+ RCR32mi = 2056,
+ RCR32r1 = 2057,
+ RCR32rCL = 2058,
+ RCR32ri = 2059,
+ RCR64m1 = 2060,
+ RCR64mCL = 2061,
+ RCR64mi = 2062,
+ RCR64r1 = 2063,
+ RCR64rCL = 2064,
+ RCR64ri = 2065,
+ RCR8m1 = 2066,
+ RCR8mCL = 2067,
+ RCR8mi = 2068,
+ RCR8r1 = 2069,
+ RCR8rCL = 2070,
+ RCR8ri = 2071,
+ RDMSR = 2072,
+ RDPMC = 2073,
+ RDTSC = 2074,
+ RDTSCP = 2075,
+ REPNE_PREFIX = 2076,
+ REP_MOVSB = 2077,
+ REP_MOVSD = 2078,
+ REP_MOVSQ = 2079,
+ REP_MOVSW = 2080,
+ REP_PREFIX = 2081,
+ REP_STOSB = 2082,
+ REP_STOSD = 2083,
+ REP_STOSQ = 2084,
+ REP_STOSW = 2085,
+ RET = 2086,
+ RETI = 2087,
+ ROL16m1 = 2088,
+ ROL16mCL = 2089,
+ ROL16mi = 2090,
+ ROL16r1 = 2091,
+ ROL16rCL = 2092,
+ ROL16ri = 2093,
+ ROL32m1 = 2094,
+ ROL32mCL = 2095,
+ ROL32mi = 2096,
+ ROL32r1 = 2097,
+ ROL32rCL = 2098,
+ ROL32ri = 2099,
+ ROL64m1 = 2100,
+ ROL64mCL = 2101,
+ ROL64mi = 2102,
+ ROL64r1 = 2103,
+ ROL64rCL = 2104,
+ ROL64ri = 2105,
+ ROL8m1 = 2106,
+ ROL8mCL = 2107,
+ ROL8mi = 2108,
+ ROL8r1 = 2109,
+ ROL8rCL = 2110,
+ ROL8ri = 2111,
+ ROR16m1 = 2112,
+ ROR16mCL = 2113,
+ ROR16mi = 2114,
+ ROR16r1 = 2115,
+ ROR16rCL = 2116,
+ ROR16ri = 2117,
+ ROR32m1 = 2118,
+ ROR32mCL = 2119,
+ ROR32mi = 2120,
+ ROR32r1 = 2121,
+ ROR32rCL = 2122,
+ ROR32ri = 2123,
+ ROR64m1 = 2124,
+ ROR64mCL = 2125,
+ ROR64mi = 2126,
+ ROR64r1 = 2127,
+ ROR64rCL = 2128,
+ ROR64ri = 2129,
+ ROR8m1 = 2130,
+ ROR8mCL = 2131,
+ ROR8mi = 2132,
+ ROR8r1 = 2133,
+ ROR8rCL = 2134,
+ ROR8ri = 2135,
+ ROUNDPDm_Int = 2136,
+ ROUNDPDr_Int = 2137,
+ ROUNDPSm_Int = 2138,
+ ROUNDPSr_Int = 2139,
+ ROUNDSDm_Int = 2140,
+ ROUNDSDr_Int = 2141,
+ ROUNDSSm_Int = 2142,
+ ROUNDSSr_Int = 2143,
+ RSM = 2144,
+ RSQRTPSm = 2145,
+ RSQRTPSm_Int = 2146,
+ RSQRTPSr = 2147,
+ RSQRTPSr_Int = 2148,
+ RSQRTSSm = 2149,
+ RSQRTSSm_Int = 2150,
+ RSQRTSSr = 2151,
+ RSQRTSSr_Int = 2152,
+ SAHF = 2153,
+ SAR16m1 = 2154,
+ SAR16mCL = 2155,
+ SAR16mi = 2156,
+ SAR16r1 = 2157,
+ SAR16rCL = 2158,
+ SAR16ri = 2159,
+ SAR32m1 = 2160,
+ SAR32mCL = 2161,
+ SAR32mi = 2162,
+ SAR32r1 = 2163,
+ SAR32rCL = 2164,
+ SAR32ri = 2165,
+ SAR64m1 = 2166,
+ SAR64mCL = 2167,
+ SAR64mi = 2168,
+ SAR64r1 = 2169,
+ SAR64rCL = 2170,
+ SAR64ri = 2171,
+ SAR8m1 = 2172,
+ SAR8mCL = 2173,
+ SAR8mi = 2174,
+ SAR8r1 = 2175,
+ SAR8rCL = 2176,
+ SAR8ri = 2177,
+ SBB16i16 = 2178,
+ SBB16mi = 2179,
+ SBB16mi8 = 2180,
+ SBB16mr = 2181,
+ SBB16ri = 2182,
+ SBB16ri8 = 2183,
+ SBB16rm = 2184,
+ SBB16rr = 2185,
+ SBB16rr_REV = 2186,
+ SBB32i32 = 2187,
+ SBB32mi = 2188,
+ SBB32mi8 = 2189,
+ SBB32mr = 2190,
+ SBB32ri = 2191,
+ SBB32ri8 = 2192,
+ SBB32rm = 2193,
+ SBB32rr = 2194,
+ SBB32rr_REV = 2195,
+ SBB64i32 = 2196,
+ SBB64mi32 = 2197,
+ SBB64mi8 = 2198,
+ SBB64mr = 2199,
+ SBB64ri32 = 2200,
+ SBB64ri8 = 2201,
+ SBB64rm = 2202,
+ SBB64rr = 2203,
+ SBB64rr_REV = 2204,
+ SBB8i8 = 2205,
+ SBB8mi = 2206,
+ SBB8mr = 2207,
+ SBB8ri = 2208,
+ SBB8rm = 2209,
+ SBB8rr = 2210,
+ SBB8rr_REV = 2211,
+ SCAS16 = 2212,
+ SCAS32 = 2213,
+ SCAS64 = 2214,
+ SCAS8 = 2215,
+ SETAEm = 2216,
+ SETAEr = 2217,
+ SETAm = 2218,
+ SETAr = 2219,
+ SETBEm = 2220,
+ SETBEr = 2221,
+ SETB_C16r = 2222,
+ SETB_C32r = 2223,
+ SETB_C64r = 2224,
+ SETB_C8r = 2225,
+ SETBm = 2226,
+ SETBr = 2227,
+ SETEm = 2228,
+ SETEr = 2229,
+ SETGEm = 2230,
+ SETGEr = 2231,
+ SETGm = 2232,
+ SETGr = 2233,
+ SETLEm = 2234,
+ SETLEr = 2235,
+ SETLm = 2236,
+ SETLr = 2237,
+ SETNEm = 2238,
+ SETNEr = 2239,
+ SETNOm = 2240,
+ SETNOr = 2241,
+ SETNPm = 2242,
+ SETNPr = 2243,
+ SETNSm = 2244,
+ SETNSr = 2245,
+ SETOm = 2246,
+ SETOr = 2247,
+ SETPm = 2248,
+ SETPr = 2249,
+ SETSm = 2250,
+ SETSr = 2251,
+ SFENCE = 2252,
+ SGDTm = 2253,
+ SHL16m1 = 2254,
+ SHL16mCL = 2255,
+ SHL16mi = 2256,
+ SHL16r1 = 2257,
+ SHL16rCL = 2258,
+ SHL16ri = 2259,
+ SHL32m1 = 2260,
+ SHL32mCL = 2261,
+ SHL32mi = 2262,
+ SHL32r1 = 2263,
+ SHL32rCL = 2264,
+ SHL32ri = 2265,
+ SHL64m1 = 2266,
+ SHL64mCL = 2267,
+ SHL64mi = 2268,
+ SHL64r1 = 2269,
+ SHL64rCL = 2270,
+ SHL64ri = 2271,
+ SHL8m1 = 2272,
+ SHL8mCL = 2273,
+ SHL8mi = 2274,
+ SHL8r1 = 2275,
+ SHL8rCL = 2276,
+ SHL8ri = 2277,
+ SHLD16mrCL = 2278,
+ SHLD16mri8 = 2279,
+ SHLD16rrCL = 2280,
+ SHLD16rri8 = 2281,
+ SHLD32mrCL = 2282,
+ SHLD32mri8 = 2283,
+ SHLD32rrCL = 2284,
+ SHLD32rri8 = 2285,
+ SHLD64mrCL = 2286,
+ SHLD64mri8 = 2287,
+ SHLD64rrCL = 2288,
+ SHLD64rri8 = 2289,
+ SHR16m1 = 2290,
+ SHR16mCL = 2291,
+ SHR16mi = 2292,
+ SHR16r1 = 2293,
+ SHR16rCL = 2294,
+ SHR16ri = 2295,
+ SHR32m1 = 2296,
+ SHR32mCL = 2297,
+ SHR32mi = 2298,
+ SHR32r1 = 2299,
+ SHR32rCL = 2300,
+ SHR32ri = 2301,
+ SHR64m1 = 2302,
+ SHR64mCL = 2303,
+ SHR64mi = 2304,
+ SHR64r1 = 2305,
+ SHR64rCL = 2306,
+ SHR64ri = 2307,
+ SHR8m1 = 2308,
+ SHR8mCL = 2309,
+ SHR8mi = 2310,
+ SHR8r1 = 2311,
+ SHR8rCL = 2312,
+ SHR8ri = 2313,
+ SHRD16mrCL = 2314,
+ SHRD16mri8 = 2315,
+ SHRD16rrCL = 2316,
+ SHRD16rri8 = 2317,
+ SHRD32mrCL = 2318,
+ SHRD32mri8 = 2319,
+ SHRD32rrCL = 2320,
+ SHRD32rri8 = 2321,
+ SHRD64mrCL = 2322,
+ SHRD64mri8 = 2323,
+ SHRD64rrCL = 2324,
+ SHRD64rri8 = 2325,
+ SHUFPDrmi = 2326,
+ SHUFPDrri = 2327,
+ SHUFPSrmi = 2328,
+ SHUFPSrri = 2329,
+ SIDTm = 2330,
+ SIN_F = 2331,
+ SIN_Fp32 = 2332,
+ SIN_Fp64 = 2333,
+ SIN_Fp80 = 2334,
+ SLDT16m = 2335,
+ SLDT16r = 2336,
+ SLDT64m = 2337,
+ SLDT64r = 2338,
+ SMSW16m = 2339,
+ SMSW16r = 2340,
+ SMSW32r = 2341,
+ SMSW64r = 2342,
+ SQRTPDm = 2343,
+ SQRTPDm_Int = 2344,
+ SQRTPDr = 2345,
+ SQRTPDr_Int = 2346,
+ SQRTPSm = 2347,
+ SQRTPSm_Int = 2348,
+ SQRTPSr = 2349,
+ SQRTPSr_Int = 2350,
+ SQRTSDm = 2351,
+ SQRTSDm_Int = 2352,
+ SQRTSDr = 2353,
+ SQRTSDr_Int = 2354,
+ SQRTSSm = 2355,
+ SQRTSSm_Int = 2356,
+ SQRTSSr = 2357,
+ SQRTSSr_Int = 2358,
+ SQRT_F = 2359,
+ SQRT_Fp32 = 2360,
+ SQRT_Fp64 = 2361,
+ SQRT_Fp80 = 2362,
+ SS_PREFIX = 2363,
+ STC = 2364,
+ STD = 2365,
+ STI = 2366,
+ STMXCSR = 2367,
+ STOSB = 2368,
+ STOSD = 2369,
+ STOSQ = 2370,
+ STOSW = 2371,
+ STRm = 2372,
+ STRr = 2373,
+ ST_F32m = 2374,
+ ST_F64m = 2375,
+ ST_FP32m = 2376,
+ ST_FP64m = 2377,
+ ST_FP80m = 2378,
+ ST_FPrr = 2379,
+ ST_Fp32m = 2380,
+ ST_Fp64m = 2381,
+ ST_Fp64m32 = 2382,
+ ST_Fp80m32 = 2383,
+ ST_Fp80m64 = 2384,
+ ST_FpP32m = 2385,
+ ST_FpP64m = 2386,
+ ST_FpP64m32 = 2387,
+ ST_FpP80m = 2388,
+ ST_FpP80m32 = 2389,
+ ST_FpP80m64 = 2390,
+ ST_Frr = 2391,
+ SUB16i16 = 2392,
+ SUB16mi = 2393,
+ SUB16mi8 = 2394,
+ SUB16mr = 2395,
+ SUB16ri = 2396,
+ SUB16ri8 = 2397,
+ SUB16rm = 2398,
+ SUB16rr = 2399,
+ SUB16rr_REV = 2400,
+ SUB32i32 = 2401,
+ SUB32mi = 2402,
+ SUB32mi8 = 2403,
+ SUB32mr = 2404,
+ SUB32ri = 2405,
+ SUB32ri8 = 2406,
+ SUB32rm = 2407,
+ SUB32rr = 2408,
+ SUB32rr_REV = 2409,
+ SUB64i32 = 2410,
+ SUB64mi32 = 2411,
+ SUB64mi8 = 2412,
+ SUB64mr = 2413,
+ SUB64ri32 = 2414,
+ SUB64ri8 = 2415,
+ SUB64rm = 2416,
+ SUB64rr = 2417,
+ SUB64rr_REV = 2418,
+ SUB8i8 = 2419,
+ SUB8mi = 2420,
+ SUB8mr = 2421,
+ SUB8ri = 2422,
+ SUB8rm = 2423,
+ SUB8rr = 2424,
+ SUB8rr_REV = 2425,
+ SUBPDrm = 2426,
+ SUBPDrr = 2427,
+ SUBPSrm = 2428,
+ SUBPSrr = 2429,
+ SUBR_F32m = 2430,
+ SUBR_F64m = 2431,
+ SUBR_FI16m = 2432,
+ SUBR_FI32m = 2433,
+ SUBR_FPrST0 = 2434,
+ SUBR_FST0r = 2435,
+ SUBR_Fp32m = 2436,
+ SUBR_Fp64m = 2437,
+ SUBR_Fp64m32 = 2438,
+ SUBR_Fp80m32 = 2439,
+ SUBR_Fp80m64 = 2440,
+ SUBR_FpI16m32 = 2441,
+ SUBR_FpI16m64 = 2442,
+ SUBR_FpI16m80 = 2443,
+ SUBR_FpI32m32 = 2444,
+ SUBR_FpI32m64 = 2445,
+ SUBR_FpI32m80 = 2446,
+ SUBR_FrST0 = 2447,
+ SUBSDrm = 2448,
+ SUBSDrm_Int = 2449,
+ SUBSDrr = 2450,
+ SUBSDrr_Int = 2451,
+ SUBSSrm = 2452,
+ SUBSSrm_Int = 2453,
+ SUBSSrr = 2454,
+ SUBSSrr_Int = 2455,
+ SUB_F32m = 2456,
+ SUB_F64m = 2457,
+ SUB_FI16m = 2458,
+ SUB_FI32m = 2459,
+ SUB_FPrST0 = 2460,
+ SUB_FST0r = 2461,
+ SUB_Fp32 = 2462,
+ SUB_Fp32m = 2463,
+ SUB_Fp64 = 2464,
+ SUB_Fp64m = 2465,
+ SUB_Fp64m32 = 2466,
+ SUB_Fp80 = 2467,
+ SUB_Fp80m32 = 2468,
+ SUB_Fp80m64 = 2469,
+ SUB_FpI16m32 = 2470,
+ SUB_FpI16m64 = 2471,
+ SUB_FpI16m80 = 2472,
+ SUB_FpI32m32 = 2473,
+ SUB_FpI32m64 = 2474,
+ SUB_FpI32m80 = 2475,
+ SUB_FrST0 = 2476,
+ SWAPGS = 2477,
+ SYSCALL = 2478,
+ SYSENTER = 2479,
+ SYSEXIT = 2480,
+ SYSEXIT64 = 2481,
+ SYSRET = 2482,
+ TAILJMPd = 2483,
+ TAILJMPd64 = 2484,
+ TAILJMPm = 2485,
+ TAILJMPm64 = 2486,
+ TAILJMPr = 2487,
+ TAILJMPr64 = 2488,
+ TCRETURNdi = 2489,
+ TCRETURNdi64 = 2490,
+ TCRETURNmi = 2491,
+ TCRETURNmi64 = 2492,
+ TCRETURNri = 2493,
+ TCRETURNri64 = 2494,
+ TEST16i16 = 2495,
+ TEST16mi = 2496,
+ TEST16ri = 2497,
+ TEST16rm = 2498,
+ TEST16rr = 2499,
+ TEST32i32 = 2500,
+ TEST32mi = 2501,
+ TEST32ri = 2502,
+ TEST32rm = 2503,
+ TEST32rr = 2504,
+ TEST64i32 = 2505,
+ TEST64mi32 = 2506,
+ TEST64ri32 = 2507,
+ TEST64rm = 2508,
+ TEST64rr = 2509,
+ TEST8i8 = 2510,
+ TEST8mi = 2511,
+ TEST8ri = 2512,
+ TEST8rm = 2513,
+ TEST8rr = 2514,
+ TLSCall_32 = 2515,
+ TLSCall_64 = 2516,
+ TLS_addr32 = 2517,
+ TLS_addr64 = 2518,
+ TRAP = 2519,
+ TST_F = 2520,
+ TST_Fp32 = 2521,
+ TST_Fp64 = 2522,
+ TST_Fp80 = 2523,
+ UCOMISDrm = 2524,
+ UCOMISDrr = 2525,
+ UCOMISSrm = 2526,
+ UCOMISSrr = 2527,
+ UCOM_FIPr = 2528,
+ UCOM_FIr = 2529,
+ UCOM_FPPr = 2530,
+ UCOM_FPr = 2531,
+ UCOM_FpIr32 = 2532,
+ UCOM_FpIr64 = 2533,
+ UCOM_FpIr80 = 2534,
+ UCOM_Fpr32 = 2535,
+ UCOM_Fpr64 = 2536,
+ UCOM_Fpr80 = 2537,
+ UCOM_Fr = 2538,
+ UNPCKHPDrm = 2539,
+ UNPCKHPDrr = 2540,
+ UNPCKHPSrm = 2541,
+ UNPCKHPSrr = 2542,
+ UNPCKLPDrm = 2543,
+ UNPCKLPDrr = 2544,
+ UNPCKLPSrm = 2545,
+ UNPCKLPSrr = 2546,
+ VADDPDYrm = 2547,
+ VADDPDYrr = 2548,
+ VADDPDrm = 2549,
+ VADDPDrr = 2550,
+ VADDPSYrm = 2551,
+ VADDPSYrr = 2552,
+ VADDPSrm = 2553,
+ VADDPSrr = 2554,
+ VADDSDrm = 2555,
+ VADDSDrm_Int = 2556,
+ VADDSDrr = 2557,
+ VADDSDrr_Int = 2558,
+ VADDSSrm = 2559,
+ VADDSSrm_Int = 2560,
+ VADDSSrr = 2561,
+ VADDSSrr_Int = 2562,
+ VADDSUBPDYrm = 2563,
+ VADDSUBPDYrr = 2564,
+ VADDSUBPDrm = 2565,
+ VADDSUBPDrr = 2566,
+ VADDSUBPSYrm = 2567,
+ VADDSUBPSYrr = 2568,
+ VADDSUBPSrm = 2569,
+ VADDSUBPSrr = 2570,
+ VAESDECLASTrm = 2571,
+ VAESDECLASTrr = 2572,
+ VAESDECrm = 2573,
+ VAESDECrr = 2574,
+ VAESENCLASTrm = 2575,
+ VAESENCLASTrr = 2576,
+ VAESENCrm = 2577,
+ VAESENCrr = 2578,
+ VAESIMCrm = 2579,
+ VAESIMCrr = 2580,
+ VAESKEYGENASSIST128rm = 2581,
+ VAESKEYGENASSIST128rr = 2582,
+ VANDNPDYrm = 2583,
+ VANDNPDYrr = 2584,
+ VANDNPDrm = 2585,
+ VANDNPDrr = 2586,
+ VANDNPSYrm = 2587,
+ VANDNPSYrr = 2588,
+ VANDNPSrm = 2589,
+ VANDNPSrr = 2590,
+ VANDPDYrm = 2591,
+ VANDPDYrr = 2592,
+ VANDPDrm = 2593,
+ VANDPDrr = 2594,
+ VANDPSYrm = 2595,
+ VANDPSYrr = 2596,
+ VANDPSrm = 2597,
+ VANDPSrr = 2598,
+ VASTART_SAVE_XMM_REGS = 2599,
+ VBLENDPDYrmi = 2600,
+ VBLENDPDYrri = 2601,
+ VBLENDPDrmi = 2602,
+ VBLENDPDrri = 2603,
+ VBLENDPSYrmi = 2604,
+ VBLENDPSYrri = 2605,
+ VBLENDPSrmi = 2606,
+ VBLENDPSrri = 2607,
+ VBLENDVPDYrm = 2608,
+ VBLENDVPDYrr = 2609,
+ VBLENDVPDrm = 2610,
+ VBLENDVPDrr = 2611,
+ VBLENDVPSYrm = 2612,
+ VBLENDVPSYrr = 2613,
+ VBLENDVPSrm = 2614,
+ VBLENDVPSrr = 2615,
+ VBROADCASTF128 = 2616,
+ VBROADCASTSD = 2617,
+ VBROADCASTSS = 2618,
+ VBROADCASTSSY = 2619,
+ VCMPPDYrmi = 2620,
+ VCMPPDYrmi_alt = 2621,
+ VCMPPDYrri = 2622,
+ VCMPPDYrri_alt = 2623,
+ VCMPPDrmi = 2624,
+ VCMPPDrmi_alt = 2625,
+ VCMPPDrri = 2626,
+ VCMPPDrri_alt = 2627,
+ VCMPPSYrmi = 2628,
+ VCMPPSYrmi_alt = 2629,
+ VCMPPSYrri = 2630,
+ VCMPPSYrri_alt = 2631,
+ VCMPPSrmi = 2632,
+ VCMPPSrmi_alt = 2633,
+ VCMPPSrri = 2634,
+ VCMPPSrri_alt = 2635,
+ VCMPSDrm = 2636,
+ VCMPSDrm_alt = 2637,
+ VCMPSDrr = 2638,
+ VCMPSDrr_alt = 2639,
+ VCMPSSrm = 2640,
+ VCMPSSrm_alt = 2641,
+ VCMPSSrr = 2642,
+ VCMPSSrr_alt = 2643,
+ VCOMISDrm = 2644,
+ VCOMISDrr = 2645,
+ VCOMISSrm = 2646,
+ VCOMISSrr = 2647,
+ VCVTDQ2PDYrm = 2648,
+ VCVTDQ2PDYrr = 2649,
+ VCVTDQ2PDrm = 2650,
+ VCVTDQ2PDrr = 2651,
+ VCVTDQ2PSYrm = 2652,
+ VCVTDQ2PSYrr = 2653,
+ VCVTDQ2PSrm = 2654,
+ VCVTDQ2PSrr = 2655,
+ VCVTPD2DQXrYr = 2656,
+ VCVTPD2DQXrm = 2657,
+ VCVTPD2DQXrr = 2658,
+ VCVTPD2DQYrm = 2659,
+ VCVTPD2DQYrr = 2660,
+ VCVTPD2DQrr = 2661,
+ VCVTPD2PSXrYr = 2662,
+ VCVTPD2PSXrm = 2663,
+ VCVTPD2PSXrr = 2664,
+ VCVTPD2PSYrm = 2665,
+ VCVTPD2PSYrr = 2666,
+ VCVTPD2PSrr = 2667,
+ VCVTPS2DQYrm = 2668,
+ VCVTPS2DQYrr = 2669,
+ VCVTPS2DQrm = 2670,
+ VCVTPS2DQrr = 2671,
+ VCVTPS2PDYrm = 2672,
+ VCVTPS2PDYrr = 2673,
+ VCVTPS2PDrm = 2674,
+ VCVTPS2PDrr = 2675,
+ VCVTSD2SI64rm = 2676,
+ VCVTSD2SI64rr = 2677,
+ VCVTSD2SI_altrm = 2678,
+ VCVTSD2SI_altrr = 2679,
+ VCVTSD2SSrm = 2680,
+ VCVTSD2SSrr = 2681,
+ VCVTSI2SD64rm = 2682,
+ VCVTSI2SD64rr = 2683,
+ VCVTSI2SDLrm = 2684,
+ VCVTSI2SDLrr = 2685,
+ VCVTSI2SDrm = 2686,
+ VCVTSI2SDrr = 2687,
+ VCVTSI2SS64rm = 2688,
+ VCVTSI2SS64rr = 2689,
+ VCVTSI2SSrm = 2690,
+ VCVTSI2SSrr = 2691,
+ VCVTSS2SDrm = 2692,
+ VCVTSS2SDrr = 2693,
+ VCVTSS2SI64rm = 2694,
+ VCVTSS2SI64rr = 2695,
+ VCVTSS2SIrm = 2696,
+ VCVTSS2SIrr = 2697,
+ VCVTTPD2DQXrYr = 2698,
+ VCVTTPD2DQXrm = 2699,
+ VCVTTPD2DQXrr = 2700,
+ VCVTTPD2DQYrm = 2701,
+ VCVTTPD2DQYrr = 2702,
+ VCVTTPD2DQrr = 2703,
+ VCVTTPS2DQYrm = 2704,
+ VCVTTPS2DQYrr = 2705,
+ VCVTTPS2DQrm = 2706,
+ VCVTTPS2DQrr = 2707,
+ VCVTTSD2SI64rm = 2708,
+ VCVTTSD2SI64rr = 2709,
+ VCVTTSD2SIrm = 2710,
+ VCVTTSD2SIrr = 2711,
+ VCVTTSS2SI64rm = 2712,
+ VCVTTSS2SI64rr = 2713,
+ VCVTTSS2SIrm = 2714,
+ VCVTTSS2SIrr = 2715,
+ VDIVPDYrm = 2716,
+ VDIVPDYrr = 2717,
+ VDIVPDrm = 2718,
+ VDIVPDrr = 2719,
+ VDIVPSYrm = 2720,
+ VDIVPSYrr = 2721,
+ VDIVPSrm = 2722,
+ VDIVPSrr = 2723,
+ VDIVSDrm = 2724,
+ VDIVSDrm_Int = 2725,
+ VDIVSDrr = 2726,
+ VDIVSDrr_Int = 2727,
+ VDIVSSrm = 2728,
+ VDIVSSrm_Int = 2729,
+ VDIVSSrr = 2730,
+ VDIVSSrr_Int = 2731,
+ VDPPDrmi = 2732,
+ VDPPDrri = 2733,
+ VDPPSYrmi = 2734,
+ VDPPSYrri = 2735,
+ VDPPSrmi = 2736,
+ VDPPSrri = 2737,
+ VERRm = 2738,
+ VERRr = 2739,
+ VERWm = 2740,
+ VERWr = 2741,
+ VEXTRACTF128mr = 2742,
+ VEXTRACTF128rr = 2743,
+ VEXTRACTPSmr = 2744,
+ VEXTRACTPSrr = 2745,
+ VEXTRACTPSrr64 = 2746,
+ VFMADDPDr132m = 2747,
+ VFMADDPDr132mY = 2748,
+ VFMADDPDr132r = 2749,
+ VFMADDPDr132rY = 2750,
+ VFMADDPDr213m = 2751,
+ VFMADDPDr213mY = 2752,
+ VFMADDPDr213r = 2753,
+ VFMADDPDr213rY = 2754,
+ VFMADDPDr231m = 2755,
+ VFMADDPDr231mY = 2756,
+ VFMADDPDr231r = 2757,
+ VFMADDPDr231rY = 2758,
+ VFMADDPSr132m = 2759,
+ VFMADDPSr132mY = 2760,
+ VFMADDPSr132r = 2761,
+ VFMADDPSr132rY = 2762,
+ VFMADDPSr213m = 2763,
+ VFMADDPSr213mY = 2764,
+ VFMADDPSr213r = 2765,
+ VFMADDPSr213rY = 2766,
+ VFMADDPSr231m = 2767,
+ VFMADDPSr231mY = 2768,
+ VFMADDPSr231r = 2769,
+ VFMADDPSr231rY = 2770,
+ VFMADDSUBPDr132m = 2771,
+ VFMADDSUBPDr132mY = 2772,
+ VFMADDSUBPDr132r = 2773,
+ VFMADDSUBPDr132rY = 2774,
+ VFMADDSUBPDr213m = 2775,
+ VFMADDSUBPDr213mY = 2776,
+ VFMADDSUBPDr213r = 2777,
+ VFMADDSUBPDr213rY = 2778,
+ VFMADDSUBPDr231m = 2779,
+ VFMADDSUBPDr231mY = 2780,
+ VFMADDSUBPDr231r = 2781,
+ VFMADDSUBPDr231rY = 2782,
+ VFMADDSUBPSr132m = 2783,
+ VFMADDSUBPSr132mY = 2784,
+ VFMADDSUBPSr132r = 2785,
+ VFMADDSUBPSr132rY = 2786,
+ VFMADDSUBPSr213m = 2787,
+ VFMADDSUBPSr213mY = 2788,
+ VFMADDSUBPSr213r = 2789,
+ VFMADDSUBPSr213rY = 2790,
+ VFMADDSUBPSr231m = 2791,
+ VFMADDSUBPSr231mY = 2792,
+ VFMADDSUBPSr231r = 2793,
+ VFMADDSUBPSr231rY = 2794,
+ VFMSUBADDPDr132m = 2795,
+ VFMSUBADDPDr132mY = 2796,
+ VFMSUBADDPDr132r = 2797,
+ VFMSUBADDPDr132rY = 2798,
+ VFMSUBADDPDr213m = 2799,
+ VFMSUBADDPDr213mY = 2800,
+ VFMSUBADDPDr213r = 2801,
+ VFMSUBADDPDr213rY = 2802,
+ VFMSUBADDPDr231m = 2803,
+ VFMSUBADDPDr231mY = 2804,
+ VFMSUBADDPDr231r = 2805,
+ VFMSUBADDPDr231rY = 2806,
+ VFMSUBADDPSr132m = 2807,
+ VFMSUBADDPSr132mY = 2808,
+ VFMSUBADDPSr132r = 2809,
+ VFMSUBADDPSr132rY = 2810,
+ VFMSUBADDPSr213m = 2811,
+ VFMSUBADDPSr213mY = 2812,
+ VFMSUBADDPSr213r = 2813,
+ VFMSUBADDPSr213rY = 2814,
+ VFMSUBADDPSr231m = 2815,
+ VFMSUBADDPSr231mY = 2816,
+ VFMSUBADDPSr231r = 2817,
+ VFMSUBADDPSr231rY = 2818,
+ VFMSUBPDr132m = 2819,
+ VFMSUBPDr132mY = 2820,
+ VFMSUBPDr132r = 2821,
+ VFMSUBPDr132rY = 2822,
+ VFMSUBPDr213m = 2823,
+ VFMSUBPDr213mY = 2824,
+ VFMSUBPDr213r = 2825,
+ VFMSUBPDr213rY = 2826,
+ VFMSUBPDr231m = 2827,
+ VFMSUBPDr231mY = 2828,
+ VFMSUBPDr231r = 2829,
+ VFMSUBPDr231rY = 2830,
+ VFMSUBPSr132m = 2831,
+ VFMSUBPSr132mY = 2832,
+ VFMSUBPSr132r = 2833,
+ VFMSUBPSr132rY = 2834,
+ VFMSUBPSr213m = 2835,
+ VFMSUBPSr213mY = 2836,
+ VFMSUBPSr213r = 2837,
+ VFMSUBPSr213rY = 2838,
+ VFMSUBPSr231m = 2839,
+ VFMSUBPSr231mY = 2840,
+ VFMSUBPSr231r = 2841,
+ VFMSUBPSr231rY = 2842,
+ VFNMADDPDr132m = 2843,
+ VFNMADDPDr132mY = 2844,
+ VFNMADDPDr132r = 2845,
+ VFNMADDPDr132rY = 2846,
+ VFNMADDPDr213m = 2847,
+ VFNMADDPDr213mY = 2848,
+ VFNMADDPDr213r = 2849,
+ VFNMADDPDr213rY = 2850,
+ VFNMADDPDr231m = 2851,
+ VFNMADDPDr231mY = 2852,
+ VFNMADDPDr231r = 2853,
+ VFNMADDPDr231rY = 2854,
+ VFNMADDPSr132m = 2855,
+ VFNMADDPSr132mY = 2856,
+ VFNMADDPSr132r = 2857,
+ VFNMADDPSr132rY = 2858,
+ VFNMADDPSr213m = 2859,
+ VFNMADDPSr213mY = 2860,
+ VFNMADDPSr213r = 2861,
+ VFNMADDPSr213rY = 2862,
+ VFNMADDPSr231m = 2863,
+ VFNMADDPSr231mY = 2864,
+ VFNMADDPSr231r = 2865,
+ VFNMADDPSr231rY = 2866,
+ VFNMSUBPDr132m = 2867,
+ VFNMSUBPDr132mY = 2868,
+ VFNMSUBPDr132r = 2869,
+ VFNMSUBPDr132rY = 2870,
+ VFNMSUBPDr213m = 2871,
+ VFNMSUBPDr213mY = 2872,
+ VFNMSUBPDr213r = 2873,
+ VFNMSUBPDr213rY = 2874,
+ VFNMSUBPDr231m = 2875,
+ VFNMSUBPDr231mY = 2876,
+ VFNMSUBPDr231r = 2877,
+ VFNMSUBPDr231rY = 2878,
+ VFNMSUBPSr132m = 2879,
+ VFNMSUBPSr132mY = 2880,
+ VFNMSUBPSr132r = 2881,
+ VFNMSUBPSr132rY = 2882,
+ VFNMSUBPSr213m = 2883,
+ VFNMSUBPSr213mY = 2884,
+ VFNMSUBPSr213r = 2885,
+ VFNMSUBPSr213rY = 2886,
+ VFNMSUBPSr231m = 2887,
+ VFNMSUBPSr231mY = 2888,
+ VFNMSUBPSr231r = 2889,
+ VFNMSUBPSr231rY = 2890,
+ VFsANDNPDrm = 2891,
+ VFsANDNPDrr = 2892,
+ VFsANDNPSrm = 2893,
+ VFsANDNPSrr = 2894,
+ VFsANDPDrm = 2895,
+ VFsANDPDrr = 2896,
+ VFsANDPSrm = 2897,
+ VFsANDPSrr = 2898,
+ VFsORPDrm = 2899,
+ VFsORPDrr = 2900,
+ VFsORPSrm = 2901,
+ VFsORPSrr = 2902,
+ VFsXORPDrm = 2903,
+ VFsXORPDrr = 2904,
+ VFsXORPSrm = 2905,
+ VFsXORPSrr = 2906,
+ VHADDPDYrm = 2907,
+ VHADDPDYrr = 2908,
+ VHADDPDrm = 2909,
+ VHADDPDrr = 2910,
+ VHADDPSYrm = 2911,
+ VHADDPSYrr = 2912,
+ VHADDPSrm = 2913,
+ VHADDPSrr = 2914,
+ VHSUBPDYrm = 2915,
+ VHSUBPDYrr = 2916,
+ VHSUBPDrm = 2917,
+ VHSUBPDrr = 2918,
+ VHSUBPSYrm = 2919,
+ VHSUBPSYrr = 2920,
+ VHSUBPSrm = 2921,
+ VHSUBPSrr = 2922,
+ VINSERTF128rm = 2923,
+ VINSERTF128rr = 2924,
+ VINSERTPSrm = 2925,
+ VINSERTPSrr = 2926,
+ VLDDQUYrm = 2927,
+ VLDDQUrm = 2928,
+ VLDMXCSR = 2929,
+ VMASKMOVDQU = 2930,
+ VMASKMOVDQU64 = 2931,
+ VMASKMOVPDYmr = 2932,
+ VMASKMOVPDYrm = 2933,
+ VMASKMOVPDmr = 2934,
+ VMASKMOVPDrm = 2935,
+ VMASKMOVPSYmr = 2936,
+ VMASKMOVPSYrm = 2937,
+ VMASKMOVPSmr = 2938,
+ VMASKMOVPSrm = 2939,
+ VMAXPDYrm = 2940,
+ VMAXPDYrm_Int = 2941,
+ VMAXPDYrr = 2942,
+ VMAXPDYrr_Int = 2943,
+ VMAXPDrm = 2944,
+ VMAXPDrm_Int = 2945,
+ VMAXPDrr = 2946,
+ VMAXPDrr_Int = 2947,
+ VMAXPSYrm = 2948,
+ VMAXPSYrm_Int = 2949,
+ VMAXPSYrr = 2950,
+ VMAXPSYrr_Int = 2951,
+ VMAXPSrm = 2952,
+ VMAXPSrm_Int = 2953,
+ VMAXPSrr = 2954,
+ VMAXPSrr_Int = 2955,
+ VMAXSDrm = 2956,
+ VMAXSDrm_Int = 2957,
+ VMAXSDrr = 2958,
+ VMAXSDrr_Int = 2959,
+ VMAXSSrm = 2960,
+ VMAXSSrm_Int = 2961,
+ VMAXSSrr = 2962,
+ VMAXSSrr_Int = 2963,
+ VMCALL = 2964,
+ VMCLEARm = 2965,
+ VMINPDYrm = 2966,
+ VMINPDYrm_Int = 2967,
+ VMINPDYrr = 2968,
+ VMINPDYrr_Int = 2969,
+ VMINPDrm = 2970,
+ VMINPDrm_Int = 2971,
+ VMINPDrr = 2972,
+ VMINPDrr_Int = 2973,
+ VMINPSYrm = 2974,
+ VMINPSYrm_Int = 2975,
+ VMINPSYrr = 2976,
+ VMINPSYrr_Int = 2977,
+ VMINPSrm = 2978,
+ VMINPSrm_Int = 2979,
+ VMINPSrr = 2980,
+ VMINPSrr_Int = 2981,
+ VMINSDrm = 2982,
+ VMINSDrm_Int = 2983,
+ VMINSDrr = 2984,
+ VMINSDrr_Int = 2985,
+ VMINSSrm = 2986,
+ VMINSSrm_Int = 2987,
+ VMINSSrr = 2988,
+ VMINSSrr_Int = 2989,
+ VMLAUNCH = 2990,
+ VMOVAPDYmr = 2991,
+ VMOVAPDYrm = 2992,
+ VMOVAPDYrr = 2993,
+ VMOVAPDmr = 2994,
+ VMOVAPDrm = 2995,
+ VMOVAPDrr = 2996,
+ VMOVAPSYmr = 2997,
+ VMOVAPSYrm = 2998,
+ VMOVAPSYrr = 2999,
+ VMOVAPSmr = 3000,
+ VMOVAPSrm = 3001,
+ VMOVAPSrr = 3002,
+ VMOVDDUPYrm = 3003,
+ VMOVDDUPYrr = 3004,
+ VMOVDDUPrm = 3005,
+ VMOVDDUPrr = 3006,
+ VMOVDI2PDIrm = 3007,
+ VMOVDI2PDIrr = 3008,
+ VMOVDI2SSrm = 3009,
+ VMOVDI2SSrr = 3010,
+ VMOVDQAYmr = 3011,
+ VMOVDQAYrm = 3012,
+ VMOVDQAYrr = 3013,
+ VMOVDQAmr = 3014,
+ VMOVDQArm = 3015,
+ VMOVDQArr = 3016,
+ VMOVDQUYmr = 3017,
+ VMOVDQUYrm = 3018,
+ VMOVDQUYrr = 3019,
+ VMOVDQUmr = 3020,
+ VMOVDQUmr_Int = 3021,
+ VMOVDQUrm = 3022,
+ VMOVDQUrm_Int = 3023,
+ VMOVDQUrr = 3024,
+ VMOVHLPSrr = 3025,
+ VMOVHPDmr = 3026,
+ VMOVHPDrm = 3027,
+ VMOVHPSmr = 3028,
+ VMOVHPSrm = 3029,
+ VMOVLHPSrr = 3030,
+ VMOVLPDmr = 3031,
+ VMOVLPDrm = 3032,
+ VMOVLPSmr = 3033,
+ VMOVLPSrm = 3034,
+ VMOVLQ128mr = 3035,
+ VMOVMSKPDYr64r = 3036,
+ VMOVMSKPDYrr = 3037,
+ VMOVMSKPDr64r = 3038,
+ VMOVMSKPDrr = 3039,
+ VMOVMSKPSYr64r = 3040,
+ VMOVMSKPSYrr = 3041,
+ VMOVMSKPSr64r = 3042,
+ VMOVMSKPSrr = 3043,
+ VMOVNTDQArm = 3044,
+ VMOVNTDQY_64mr = 3045,
+ VMOVNTDQYmr = 3046,
+ VMOVNTDQ_64mr = 3047,
+ VMOVNTDQmr = 3048,
+ VMOVNTDQmr_Int = 3049,
+ VMOVNTPDYmr = 3050,
+ VMOVNTPDmr = 3051,
+ VMOVNTPDmr_Int = 3052,
+ VMOVNTPSYmr = 3053,
+ VMOVNTPSmr = 3054,
+ VMOVNTPSmr_Int = 3055,
+ VMOVPDI2DImr = 3056,
+ VMOVPDI2DIrr = 3057,
+ VMOVPQI2QImr = 3058,
+ VMOVQI2PQIrm = 3059,
+ VMOVQd64rr = 3060,
+ VMOVQd64rr_alt = 3061,
+ VMOVQs64rr = 3062,
+ VMOVQxrxr = 3063,
+ VMOVSDmr = 3064,
+ VMOVSDrm = 3065,
+ VMOVSDrr = 3066,
+ VMOVSHDUPYrm = 3067,
+ VMOVSHDUPYrr = 3068,
+ VMOVSHDUPrm = 3069,
+ VMOVSHDUPrr = 3070,
+ VMOVSLDUPYrm = 3071,
+ VMOVSLDUPYrr = 3072,
+ VMOVSLDUPrm = 3073,
+ VMOVSLDUPrr = 3074,
+ VMOVSS2DImr = 3075,
+ VMOVSS2DIrr = 3076,
+ VMOVSSmr = 3077,
+ VMOVSSrm = 3078,
+ VMOVSSrr = 3079,
+ VMOVUPDYmr = 3080,
+ VMOVUPDYrm = 3081,
+ VMOVUPDYrr = 3082,
+ VMOVUPDmr = 3083,
+ VMOVUPDmr_Int = 3084,
+ VMOVUPDrm = 3085,
+ VMOVUPDrm_Int = 3086,
+ VMOVUPDrr = 3087,
+ VMOVUPSYmr = 3088,
+ VMOVUPSYrm = 3089,
+ VMOVUPSYrr = 3090,
+ VMOVUPSmr = 3091,
+ VMOVUPSmr_Int = 3092,
+ VMOVUPSrm = 3093,
+ VMOVUPSrm_Int = 3094,
+ VMOVUPSrr = 3095,
+ VMOVZDI2PDIrm = 3096,
+ VMOVZDI2PDIrr = 3097,
+ VMOVZPQILo2PQIrm = 3098,
+ VMOVZPQILo2PQIrr = 3099,
+ VMOVZQI2PQIrm = 3100,
+ VMOVZQI2PQIrr = 3101,
+ VMPSADBWrmi = 3102,
+ VMPSADBWrri = 3103,
+ VMPTRLDm = 3104,
+ VMPTRSTm = 3105,
+ VMREAD32rm = 3106,
+ VMREAD32rr = 3107,
+ VMREAD64rm = 3108,
+ VMREAD64rr = 3109,
+ VMRESUME = 3110,
+ VMULPDYrm = 3111,
+ VMULPDYrr = 3112,
+ VMULPDrm = 3113,
+ VMULPDrr = 3114,
+ VMULPSYrm = 3115,
+ VMULPSYrr = 3116,
+ VMULPSrm = 3117,
+ VMULPSrr = 3118,
+ VMULSDrm = 3119,
+ VMULSDrm_Int = 3120,
+ VMULSDrr = 3121,
+ VMULSDrr_Int = 3122,
+ VMULSSrm = 3123,
+ VMULSSrm_Int = 3124,
+ VMULSSrr = 3125,
+ VMULSSrr_Int = 3126,
+ VMWRITE32rm = 3127,
+ VMWRITE32rr = 3128,
+ VMWRITE64rm = 3129,
+ VMWRITE64rr = 3130,
+ VMXOFF = 3131,
+ VMXON = 3132,
+ VORPDYrm = 3133,
+ VORPDYrr = 3134,
+ VORPDrm = 3135,
+ VORPDrr = 3136,
+ VORPSYrm = 3137,
+ VORPSYrr = 3138,
+ VORPSrm = 3139,
+ VORPSrr = 3140,
+ VPABSBrm128 = 3141,
+ VPABSBrr128 = 3142,
+ VPABSDrm128 = 3143,
+ VPABSDrr128 = 3144,
+ VPABSWrm128 = 3145,
+ VPABSWrr128 = 3146,
+ VPACKSSDWrm = 3147,
+ VPACKSSDWrr = 3148,
+ VPACKSSWBrm = 3149,
+ VPACKSSWBrr = 3150,
+ VPACKUSDWrm = 3151,
+ VPACKUSDWrr = 3152,
+ VPACKUSWBrm = 3153,
+ VPACKUSWBrr = 3154,
+ VPADDBrm = 3155,
+ VPADDBrr = 3156,
+ VPADDDrm = 3157,
+ VPADDDrr = 3158,
+ VPADDQrm = 3159,
+ VPADDQrr = 3160,
+ VPADDSBrm = 3161,
+ VPADDSBrr = 3162,
+ VPADDSWrm = 3163,
+ VPADDSWrr = 3164,
+ VPADDUSBrm = 3165,
+ VPADDUSBrr = 3166,
+ VPADDUSWrm = 3167,
+ VPADDUSWrr = 3168,
+ VPADDWrm = 3169,
+ VPADDWrr = 3170,
+ VPALIGNR128rm = 3171,
+ VPALIGNR128rr = 3172,
+ VPANDNrm = 3173,
+ VPANDNrr = 3174,
+ VPANDrm = 3175,
+ VPANDrr = 3176,
+ VPAVGBrm = 3177,
+ VPAVGBrr = 3178,
+ VPAVGWrm = 3179,
+ VPAVGWrr = 3180,
+ VPBLENDVBrm = 3181,
+ VPBLENDVBrr = 3182,
+ VPBLENDWrmi = 3183,
+ VPBLENDWrri = 3184,
+ VPCLMULHQHQDQrm = 3185,
+ VPCLMULHQHQDQrr = 3186,
+ VPCLMULHQLQDQrm = 3187,
+ VPCLMULHQLQDQrr = 3188,
+ VPCLMULLQHQDQrm = 3189,
+ VPCLMULLQHQDQrr = 3190,
+ VPCLMULLQLQDQrm = 3191,
+ VPCLMULLQLQDQrr = 3192,
+ VPCLMULQDQrm = 3193,
+ VPCLMULQDQrr = 3194,
+ VPCMPEQBrm = 3195,
+ VPCMPEQBrr = 3196,
+ VPCMPEQDrm = 3197,
+ VPCMPEQDrr = 3198,
+ VPCMPEQQrm = 3199,
+ VPCMPEQQrr = 3200,
+ VPCMPEQWrm = 3201,
+ VPCMPEQWrr = 3202,
+ VPCMPESTRIArm = 3203,
+ VPCMPESTRIArr = 3204,
+ VPCMPESTRICrm = 3205,
+ VPCMPESTRICrr = 3206,
+ VPCMPESTRIOrm = 3207,
+ VPCMPESTRIOrr = 3208,
+ VPCMPESTRISrm = 3209,
+ VPCMPESTRISrr = 3210,
+ VPCMPESTRIZrm = 3211,
+ VPCMPESTRIZrr = 3212,
+ VPCMPESTRIrm = 3213,
+ VPCMPESTRIrr = 3214,
+ VPCMPESTRM128MEM = 3215,
+ VPCMPESTRM128REG = 3216,
+ VPCMPESTRM128rm = 3217,
+ VPCMPESTRM128rr = 3218,
+ VPCMPGTBrm = 3219,
+ VPCMPGTBrr = 3220,
+ VPCMPGTDrm = 3221,
+ VPCMPGTDrr = 3222,
+ VPCMPGTQrm = 3223,
+ VPCMPGTQrr = 3224,
+ VPCMPGTWrm = 3225,
+ VPCMPGTWrr = 3226,
+ VPCMPISTRIArm = 3227,
+ VPCMPISTRIArr = 3228,
+ VPCMPISTRICrm = 3229,
+ VPCMPISTRICrr = 3230,
+ VPCMPISTRIOrm = 3231,
+ VPCMPISTRIOrr = 3232,
+ VPCMPISTRISrm = 3233,
+ VPCMPISTRISrr = 3234,
+ VPCMPISTRIZrm = 3235,
+ VPCMPISTRIZrr = 3236,
+ VPCMPISTRIrm = 3237,
+ VPCMPISTRIrr = 3238,
+ VPCMPISTRM128MEM = 3239,
+ VPCMPISTRM128REG = 3240,
+ VPCMPISTRM128rm = 3241,
+ VPCMPISTRM128rr = 3242,
+ VPERM2F128rm = 3243,
+ VPERM2F128rr = 3244,
+ VPERMILPDYmi = 3245,
+ VPERMILPDYri = 3246,
+ VPERMILPDYrm = 3247,
+ VPERMILPDYrr = 3248,
+ VPERMILPDmi = 3249,
+ VPERMILPDri = 3250,
+ VPERMILPDrm = 3251,
+ VPERMILPDrr = 3252,
+ VPERMILPSYmi = 3253,
+ VPERMILPSYri = 3254,
+ VPERMILPSYrm = 3255,
+ VPERMILPSYrr = 3256,
+ VPERMILPSmi = 3257,
+ VPERMILPSri = 3258,
+ VPERMILPSrm = 3259,
+ VPERMILPSrr = 3260,
+ VPEXTRBmr = 3261,
+ VPEXTRBrr = 3262,
+ VPEXTRBrr64 = 3263,
+ VPEXTRDmr = 3264,
+ VPEXTRDrr = 3265,
+ VPEXTRQmr = 3266,
+ VPEXTRQrr = 3267,
+ VPEXTRWmr = 3268,
+ VPEXTRWri = 3269,
+ VPHADDDrm128 = 3270,
+ VPHADDDrr128 = 3271,
+ VPHADDSWrm128 = 3272,
+ VPHADDSWrr128 = 3273,
+ VPHADDWrm128 = 3274,
+ VPHADDWrr128 = 3275,
+ VPHMINPOSUWrm128 = 3276,
+ VPHMINPOSUWrr128 = 3277,
+ VPHSUBDrm128 = 3278,
+ VPHSUBDrr128 = 3279,
+ VPHSUBSWrm128 = 3280,
+ VPHSUBSWrr128 = 3281,
+ VPHSUBWrm128 = 3282,
+ VPHSUBWrr128 = 3283,
+ VPINSRBrm = 3284,
+ VPINSRBrr = 3285,
+ VPINSRDrm = 3286,
+ VPINSRDrr = 3287,
+ VPINSRQrm = 3288,
+ VPINSRQrr = 3289,
+ VPINSRWrmi = 3290,
+ VPINSRWrr64i = 3291,
+ VPINSRWrri = 3292,
+ VPMADDUBSWrm128 = 3293,
+ VPMADDUBSWrr128 = 3294,
+ VPMADDWDrm = 3295,
+ VPMADDWDrr = 3296,
+ VPMAXSBrm = 3297,
+ VPMAXSBrr = 3298,
+ VPMAXSDrm = 3299,
+ VPMAXSDrr = 3300,
+ VPMAXSWrm = 3301,
+ VPMAXSWrr = 3302,
+ VPMAXUBrm = 3303,
+ VPMAXUBrr = 3304,
+ VPMAXUDrm = 3305,
+ VPMAXUDrr = 3306,
+ VPMAXUWrm = 3307,
+ VPMAXUWrr = 3308,
+ VPMINSBrm = 3309,
+ VPMINSBrr = 3310,
+ VPMINSDrm = 3311,
+ VPMINSDrr = 3312,
+ VPMINSWrm = 3313,
+ VPMINSWrr = 3314,
+ VPMINUBrm = 3315,
+ VPMINUBrr = 3316,
+ VPMINUDrm = 3317,
+ VPMINUDrr = 3318,
+ VPMINUWrm = 3319,
+ VPMINUWrr = 3320,
+ VPMOVMSKBr64r = 3321,
+ VPMOVMSKBrr = 3322,
+ VPMOVSXBDrm = 3323,
+ VPMOVSXBDrr = 3324,
+ VPMOVSXBQrm = 3325,
+ VPMOVSXBQrr = 3326,
+ VPMOVSXBWrm = 3327,
+ VPMOVSXBWrr = 3328,
+ VPMOVSXDQrm = 3329,
+ VPMOVSXDQrr = 3330,
+ VPMOVSXWDrm = 3331,
+ VPMOVSXWDrr = 3332,
+ VPMOVSXWQrm = 3333,
+ VPMOVSXWQrr = 3334,
+ VPMOVZXBDrm = 3335,
+ VPMOVZXBDrr = 3336,
+ VPMOVZXBQrm = 3337,
+ VPMOVZXBQrr = 3338,
+ VPMOVZXBWrm = 3339,
+ VPMOVZXBWrr = 3340,
+ VPMOVZXDQrm = 3341,
+ VPMOVZXDQrr = 3342,
+ VPMOVZXWDrm = 3343,
+ VPMOVZXWDrr = 3344,
+ VPMOVZXWQrm = 3345,
+ VPMOVZXWQrr = 3346,
+ VPMULDQrm = 3347,
+ VPMULDQrr = 3348,
+ VPMULHRSWrm128 = 3349,
+ VPMULHRSWrr128 = 3350,
+ VPMULHUWrm = 3351,
+ VPMULHUWrr = 3352,
+ VPMULHWrm = 3353,
+ VPMULHWrr = 3354,
+ VPMULLDrm = 3355,
+ VPMULLDrr = 3356,
+ VPMULLWrm = 3357,
+ VPMULLWrr = 3358,
+ VPMULUDQrm = 3359,
+ VPMULUDQrr = 3360,
+ VPORrm = 3361,
+ VPORrr = 3362,
+ VPSADBWrm = 3363,
+ VPSADBWrr = 3364,
+ VPSHUFBrm128 = 3365,
+ VPSHUFBrr128 = 3366,
+ VPSHUFDmi = 3367,
+ VPSHUFDri = 3368,
+ VPSHUFHWmi = 3369,
+ VPSHUFHWri = 3370,
+ VPSHUFLWmi = 3371,
+ VPSHUFLWri = 3372,
+ VPSIGNBrm128 = 3373,
+ VPSIGNBrr128 = 3374,
+ VPSIGNDrm128 = 3375,
+ VPSIGNDrr128 = 3376,
+ VPSIGNWrm128 = 3377,
+ VPSIGNWrr128 = 3378,
+ VPSLLDQri = 3379,
+ VPSLLDri = 3380,
+ VPSLLDrm = 3381,
+ VPSLLDrr = 3382,
+ VPSLLQri = 3383,
+ VPSLLQrm = 3384,
+ VPSLLQrr = 3385,
+ VPSLLWri = 3386,
+ VPSLLWrm = 3387,
+ VPSLLWrr = 3388,
+ VPSRADri = 3389,
+ VPSRADrm = 3390,
+ VPSRADrr = 3391,
+ VPSRAWri = 3392,
+ VPSRAWrm = 3393,
+ VPSRAWrr = 3394,
+ VPSRLDQri = 3395,
+ VPSRLDri = 3396,
+ VPSRLDrm = 3397,
+ VPSRLDrr = 3398,
+ VPSRLQri = 3399,
+ VPSRLQrm = 3400,
+ VPSRLQrr = 3401,
+ VPSRLWri = 3402,
+ VPSRLWrm = 3403,
+ VPSRLWrr = 3404,
+ VPSUBBrm = 3405,
+ VPSUBBrr = 3406,
+ VPSUBDrm = 3407,
+ VPSUBDrr = 3408,
+ VPSUBQrm = 3409,
+ VPSUBQrr = 3410,
+ VPSUBSBrm = 3411,
+ VPSUBSBrr = 3412,
+ VPSUBSWrm = 3413,
+ VPSUBSWrr = 3414,
+ VPSUBUSBrm = 3415,
+ VPSUBUSBrr = 3416,
+ VPSUBUSWrm = 3417,
+ VPSUBUSWrr = 3418,
+ VPSUBWrm = 3419,
+ VPSUBWrr = 3420,
+ VPTESTYrm = 3421,
+ VPTESTYrr = 3422,
+ VPTESTrm = 3423,
+ VPTESTrr = 3424,
+ VPUNPCKHBWrm = 3425,
+ VPUNPCKHBWrr = 3426,
+ VPUNPCKHDQrm = 3427,
+ VPUNPCKHDQrr = 3428,
+ VPUNPCKHQDQrm = 3429,
+ VPUNPCKHQDQrr = 3430,
+ VPUNPCKHWDrm = 3431,
+ VPUNPCKHWDrr = 3432,
+ VPUNPCKLBWrm = 3433,
+ VPUNPCKLBWrr = 3434,
+ VPUNPCKLDQrm = 3435,
+ VPUNPCKLDQrr = 3436,
+ VPUNPCKLQDQrm = 3437,
+ VPUNPCKLQDQrr = 3438,
+ VPUNPCKLWDrm = 3439,
+ VPUNPCKLWDrr = 3440,
+ VPXORrm = 3441,
+ VPXORrr = 3442,
+ VRCPPSYm = 3443,
+ VRCPPSYm_Int = 3444,
+ VRCPPSYr = 3445,
+ VRCPPSYr_Int = 3446,
+ VRCPPSm = 3447,
+ VRCPPSm_Int = 3448,
+ VRCPPSr = 3449,
+ VRCPPSr_Int = 3450,
+ VRCPSSm = 3451,
+ VRCPSSm_Int = 3452,
+ VRCPSSr = 3453,
+ VRCPSSr_Int = 3454,
+ VROUNDPDm = 3455,
+ VROUNDPDm_Int = 3456,
+ VROUNDPDr = 3457,
+ VROUNDPDr_Int = 3458,
+ VROUNDPSm = 3459,
+ VROUNDPSm_Int = 3460,
+ VROUNDPSr = 3461,
+ VROUNDPSr_Int = 3462,
+ VROUNDSDm = 3463,
+ VROUNDSDm_Int = 3464,
+ VROUNDSDr = 3465,
+ VROUNDSDr_Int = 3466,
+ VROUNDSSm = 3467,
+ VROUNDSSm_Int = 3468,
+ VROUNDSSr = 3469,
+ VROUNDSSr_Int = 3470,
+ VROUNDYPDm = 3471,
+ VROUNDYPDm_Int = 3472,
+ VROUNDYPDr = 3473,
+ VROUNDYPDr_Int = 3474,
+ VROUNDYPSm = 3475,
+ VROUNDYPSm_Int = 3476,
+ VROUNDYPSr = 3477,
+ VROUNDYPSr_Int = 3478,
+ VRSQRTPSYm = 3479,
+ VRSQRTPSYm_Int = 3480,
+ VRSQRTPSYr = 3481,
+ VRSQRTPSYr_Int = 3482,
+ VRSQRTPSm = 3483,
+ VRSQRTPSm_Int = 3484,
+ VRSQRTPSr = 3485,
+ VRSQRTPSr_Int = 3486,
+ VRSQRTSSm = 3487,
+ VRSQRTSSm_Int = 3488,
+ VRSQRTSSr = 3489,
+ VRSQRTSSr_Int = 3490,
+ VSHUFPDYrmi = 3491,
+ VSHUFPDYrri = 3492,
+ VSHUFPDrmi = 3493,
+ VSHUFPDrri = 3494,
+ VSHUFPSYrmi = 3495,
+ VSHUFPSYrri = 3496,
+ VSHUFPSrmi = 3497,
+ VSHUFPSrri = 3498,
+ VSQRTPDYm = 3499,
+ VSQRTPDYm_Int = 3500,
+ VSQRTPDYr = 3501,
+ VSQRTPDYr_Int = 3502,
+ VSQRTPDm = 3503,
+ VSQRTPDm_Int = 3504,
+ VSQRTPDr = 3505,
+ VSQRTPDr_Int = 3506,
+ VSQRTPSYm = 3507,
+ VSQRTPSYm_Int = 3508,
+ VSQRTPSYr = 3509,
+ VSQRTPSYr_Int = 3510,
+ VSQRTPSm = 3511,
+ VSQRTPSm_Int = 3512,
+ VSQRTPSr = 3513,
+ VSQRTPSr_Int = 3514,
+ VSQRTSDm = 3515,
+ VSQRTSDm_Int = 3516,
+ VSQRTSDr = 3517,
+ VSQRTSDr_Int = 3518,
+ VSQRTSSm = 3519,
+ VSQRTSSm_Int = 3520,
+ VSQRTSSr = 3521,
+ VSQRTSSr_Int = 3522,
+ VSTMXCSR = 3523,
+ VSUBPDYrm = 3524,
+ VSUBPDYrr = 3525,
+ VSUBPDrm = 3526,
+ VSUBPDrr = 3527,
+ VSUBPSYrm = 3528,
+ VSUBPSYrr = 3529,
+ VSUBPSrm = 3530,
+ VSUBPSrr = 3531,
+ VSUBSDrm = 3532,
+ VSUBSDrm_Int = 3533,
+ VSUBSDrr = 3534,
+ VSUBSDrr_Int = 3535,
+ VSUBSSrm = 3536,
+ VSUBSSrm_Int = 3537,
+ VSUBSSrr = 3538,
+ VSUBSSrr_Int = 3539,
+ VTESTPDYrm = 3540,
+ VTESTPDYrr = 3541,
+ VTESTPDrm = 3542,
+ VTESTPDrr = 3543,
+ VTESTPSYrm = 3544,
+ VTESTPSYrr = 3545,
+ VTESTPSrm = 3546,
+ VTESTPSrr = 3547,
+ VUCOMISDrm = 3548,
+ VUCOMISDrr = 3549,
+ VUCOMISSrm = 3550,
+ VUCOMISSrr = 3551,
+ VUNPCKHPDYrm = 3552,
+ VUNPCKHPDYrr = 3553,
+ VUNPCKHPDrm = 3554,
+ VUNPCKHPDrr = 3555,
+ VUNPCKHPSYrm = 3556,
+ VUNPCKHPSYrr = 3557,
+ VUNPCKHPSrm = 3558,
+ VUNPCKHPSrr = 3559,
+ VUNPCKLPDYrm = 3560,
+ VUNPCKLPDYrr = 3561,
+ VUNPCKLPDrm = 3562,
+ VUNPCKLPDrr = 3563,
+ VUNPCKLPSYrm = 3564,
+ VUNPCKLPSYrr = 3565,
+ VUNPCKLPSrm = 3566,
+ VUNPCKLPSrr = 3567,
+ VXORPDYrm = 3568,
+ VXORPDYrr = 3569,
+ VXORPDrm = 3570,
+ VXORPDrr = 3571,
+ VXORPSYrm = 3572,
+ VXORPSYrr = 3573,
+ VXORPSrm = 3574,
+ VXORPSrr = 3575,
+ VZEROALL = 3576,
+ VZEROUPPER = 3577,
+ V_SET0PD = 3578,
+ V_SET0PI = 3579,
+ V_SET0PS = 3580,
+ V_SETALLONES = 3581,
+ WAIT = 3582,
+ WBINVD = 3583,
+ WINCALL64m = 3584,
+ WINCALL64pcrel32 = 3585,
+ WINCALL64r = 3586,
+ WRMSR = 3587,
+ XADD16rm = 3588,
+ XADD16rr = 3589,
+ XADD32rm = 3590,
+ XADD32rr = 3591,
+ XADD64rm = 3592,
+ XADD64rr = 3593,
+ XADD8rm = 3594,
+ XADD8rr = 3595,
+ XCHG16ar = 3596,
+ XCHG16rm = 3597,
+ XCHG16rr = 3598,
+ XCHG32ar = 3599,
+ XCHG32rm = 3600,
+ XCHG32rr = 3601,
+ XCHG64ar = 3602,
+ XCHG64rm = 3603,
+ XCHG64rr = 3604,
+ XCHG8rm = 3605,
+ XCHG8rr = 3606,
+ XCH_F = 3607,
+ XLAT = 3608,
+ XOR16i16 = 3609,
+ XOR16mi = 3610,
+ XOR16mi8 = 3611,
+ XOR16mr = 3612,
+ XOR16ri = 3613,
+ XOR16ri8 = 3614,
+ XOR16rm = 3615,
+ XOR16rr = 3616,
+ XOR16rr_REV = 3617,
+ XOR32i32 = 3618,
+ XOR32mi = 3619,
+ XOR32mi8 = 3620,
+ XOR32mr = 3621,
+ XOR32ri = 3622,
+ XOR32ri8 = 3623,
+ XOR32rm = 3624,
+ XOR32rr = 3625,
+ XOR32rr_REV = 3626,
+ XOR64i32 = 3627,
+ XOR64mi32 = 3628,
+ XOR64mi8 = 3629,
+ XOR64mr = 3630,
+ XOR64ri32 = 3631,
+ XOR64ri8 = 3632,
+ XOR64rm = 3633,
+ XOR64rr = 3634,
+ XOR64rr_REV = 3635,
+ XOR8i8 = 3636,
+ XOR8mi = 3637,
+ XOR8mr = 3638,
+ XOR8ri = 3639,
+ XOR8rm = 3640,
+ XOR8rr = 3641,
+ XOR8rr_REV = 3642,
+ XORPDrm = 3643,
+ XORPDrr = 3644,
+ XORPSrm = 3645,
+ XORPSrr = 3646,
+ INSTRUCTION_LIST_END = 3647
};
}
} // End llvm namespace
diff --git a/libclamav/c++/X86GenRegisterInfo.h.inc b/libclamav/c++/X86GenRegisterInfo.h.inc
index 8279ef8..7661ab5 100644
--- a/libclamav/c++/X86GenRegisterInfo.h.inc
+++ b/libclamav/c++/X86GenRegisterInfo.h.inc
@@ -19,29 +19,31 @@ struct X86GenRegisterInfo : public TargetRegisterInfo {
{ return false; }
unsigned getSubReg(unsigned RegNo, unsigned Index) const;
unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const;
+ unsigned composeSubRegIndices(unsigned, unsigned) const;
};
namespace X86 { // Register classes
enum {
- CCRRegClassID = 1,
- CONTROL_REG_32RegClassID = 2,
- CONTROL_REG_64RegClassID = 3,
- DEBUG_REGRegClassID = 4,
- FR32RegClassID = 5,
- FR64RegClassID = 6,
- GR16RegClassID = 7,
- GR16_ABCDRegClassID = 8,
- GR16_NOREXRegClassID = 9,
- GR32RegClassID = 10,
- GR32_ABCDRegClassID = 11,
- GR32_ADRegClassID = 12,
- GR32_NOREXRegClassID = 13,
- GR32_NOSPRegClassID = 14,
- GR64RegClassID = 15,
- GR64_ABCDRegClassID = 16,
- GR64_NOREXRegClassID = 17,
- GR64_NOREX_NOSPRegClassID = 18,
- GR64_NOSPRegClassID = 19,
+ CCRRegClassID = 0,
+ CONTROL_REGRegClassID = 1,
+ DEBUG_REGRegClassID = 2,
+ FR32RegClassID = 3,
+ FR64RegClassID = 4,
+ GR16RegClassID = 5,
+ GR16_ABCDRegClassID = 6,
+ GR16_NOREXRegClassID = 7,
+ GR32RegClassID = 8,
+ GR32_ABCDRegClassID = 9,
+ GR32_ADRegClassID = 10,
+ GR32_NOREXRegClassID = 11,
+ GR32_NOSPRegClassID = 12,
+ GR32_TCRegClassID = 13,
+ GR64RegClassID = 14,
+ GR64_ABCDRegClassID = 15,
+ GR64_NOREXRegClassID = 16,
+ GR64_NOREX_NOSPRegClassID = 17,
+ GR64_NOSPRegClassID = 18,
+ GR64_TCRegClassID = 19,
GR8RegClassID = 20,
GR8_ABCD_HRegClassID = 21,
GR8_ABCD_LRegClassID = 22,
@@ -58,19 +60,16 @@ namespace X86 { // Register classes
struct CCRClass : public TargetRegisterClass {
CCRClass();
- };
+
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ };
extern CCRClass CCRRegClass;
static TargetRegisterClass * const CCRRegisterClass = &CCRRegClass;
- struct CONTROL_REG_32Class : public TargetRegisterClass {
- CONTROL_REG_32Class();
- };
- extern CONTROL_REG_32Class CONTROL_REG_32RegClass;
- static TargetRegisterClass * const CONTROL_REG_32RegisterClass = &CONTROL_REG_32RegClass;
- struct CONTROL_REG_64Class : public TargetRegisterClass {
- CONTROL_REG_64Class();
+ struct CONTROL_REGClass : public TargetRegisterClass {
+ CONTROL_REGClass();
};
- extern CONTROL_REG_64Class CONTROL_REG_64RegClass;
- static TargetRegisterClass * const CONTROL_REG_64RegisterClass = &CONTROL_REG_64RegClass;
+ extern CONTROL_REGClass CONTROL_REGRegClass;
+ static TargetRegisterClass * const CONTROL_REGRegisterClass = &CONTROL_REGRegClass;
struct DEBUG_REGClass : public TargetRegisterClass {
DEBUG_REGClass();
};
@@ -143,6 +142,11 @@ namespace X86 { // Register classes
};
extern GR32_NOSPClass GR32_NOSPRegClass;
static TargetRegisterClass * const GR32_NOSPRegisterClass = &GR32_NOSPRegClass;
+ struct GR32_TCClass : public TargetRegisterClass {
+ GR32_TCClass();
+ };
+ extern GR32_TCClass GR32_TCRegClass;
+ static TargetRegisterClass * const GR32_TCRegisterClass = &GR32_TCRegClass;
struct GR64Class : public TargetRegisterClass {
GR64Class();
@@ -176,6 +180,11 @@ namespace X86 { // Register classes
};
extern GR64_NOSPClass GR64_NOSPRegClass;
static TargetRegisterClass * const GR64_NOSPRegisterClass = &GR64_NOSPRegClass;
+ struct GR64_TCClass : public TargetRegisterClass {
+ GR64_TCClass();
+ };
+ extern GR64_TCClass GR64_TCRegClass;
+ static TargetRegisterClass * const GR64_TCRegisterClass = &GR64_TCRegClass;
struct GR8Class : public TargetRegisterClass {
GR8Class();
@@ -238,7 +247,9 @@ namespace X86 { // Register classes
static TargetRegisterClass * const VR128RegisterClass = &VR128RegClass;
struct VR256Class : public TargetRegisterClass {
VR256Class();
- };
+
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ };
extern VR256Class VR256RegClass;
static TargetRegisterClass * const VR256RegisterClass = &VR256RegClass;
struct VR64Class : public TargetRegisterClass {
diff --git a/libclamav/c++/X86GenRegisterInfo.inc b/libclamav/c++/X86GenRegisterInfo.inc
index d18e482..4c6ce85 100644
--- a/libclamav/c++/X86GenRegisterInfo.inc
+++ b/libclamav/c++/X86GenRegisterInfo.inc
@@ -14,14 +14,9 @@ namespace { // Register classes...
X86::EFLAGS,
};
- // CONTROL_REG_32 Register Class...
- static const unsigned CONTROL_REG_32[] = {
- X86::ECR0, X86::ECR1, X86::ECR2, X86::ECR3, X86::ECR4, X86::ECR5, X86::ECR6, X86::ECR7,
- };
-
- // CONTROL_REG_64 Register Class...
- static const unsigned CONTROL_REG_64[] = {
- X86::RCR0, X86::RCR1, X86::RCR2, X86::RCR3, X86::RCR4, X86::RCR5, X86::RCR6, X86::RCR7, X86::RCR8,
+ // CONTROL_REG Register Class...
+ static const unsigned CONTROL_REG[] = {
+ X86::CR0, X86::CR1, X86::CR2, X86::CR3, X86::CR4, X86::CR5, X86::CR6, X86::CR7, X86::CR8,
};
// DEBUG_REG Register Class...
@@ -79,6 +74,11 @@ namespace { // Register classes...
X86::EAX, X86::ECX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, X86::R8D, X86::R9D, X86::R10D, X86::R11D, X86::R14D, X86::R15D, X86::R12D, X86::R13D,
};
+ // GR32_TC Register Class...
+ static const unsigned GR32_TC[] = {
+ X86::EAX, X86::ECX, X86::EDX,
+ };
+
// GR64 Register Class...
static const unsigned GR64[] = {
X86::RAX, X86::RCX, X86::RDX, X86::RSI, X86::RDI, X86::R8, X86::R9, X86::R10, X86::R11, X86::RBX, X86::R14, X86::R15, X86::R12, X86::R13, X86::RBP, X86::RSP, X86::RIP,
@@ -104,6 +104,11 @@ namespace { // Register classes...
X86::RAX, X86::RCX, X86::RDX, X86::RSI, X86::RDI, X86::R8, X86::R9, X86::R10, X86::R11, X86::RBX, X86::R14, X86::R15, X86::R12, X86::R13, X86::RBP,
};
+ // GR64_TC Register Class...
+ static const unsigned GR64_TC[] = {
+ X86::RAX, X86::RCX, X86::RDX, X86::RSI, X86::RDI, X86::R8, X86::R9, X86::R11,
+ };
+
// GR8 Register Class...
static const unsigned GR8[] = {
X86::AL, X86::CL, X86::DL, X86::AH, X86::CH, X86::DH, X86::BL, X86::BH, X86::SIL, X86::DIL, X86::BPL, X86::SPL, X86::R8B, X86::R9B, X86::R10B, X86::R11B, X86::R14B, X86::R15B, X86::R12B, X86::R13B,
@@ -169,13 +174,8 @@ namespace { // Register classes...
MVT::i32, MVT::Other
};
- // CONTROL_REG_32VTs Register Class Value Types...
- static const EVT CONTROL_REG_32VTs[] = {
- MVT::i32, MVT::Other
- };
-
- // CONTROL_REG_64VTs Register Class Value Types...
- static const EVT CONTROL_REG_64VTs[] = {
+ // CONTROL_REGVTs Register Class Value Types...
+ static const EVT CONTROL_REGVTs[] = {
MVT::i64, MVT::Other
};
@@ -234,6 +234,11 @@ namespace { // Register classes...
MVT::i32, MVT::Other
};
+ // GR32_TCVTs Register Class Value Types...
+ static const EVT GR32_TCVTs[] = {
+ MVT::i32, MVT::Other
+ };
+
// GR64VTs Register Class Value Types...
static const EVT GR64VTs[] = {
MVT::i64, MVT::Other
@@ -259,6 +264,11 @@ namespace { // Register classes...
MVT::i64, MVT::Other
};
+ // GR64_TCVTs Register Class Value Types...
+ static const EVT GR64_TCVTs[] = {
+ MVT::i64, MVT::Other
+ };
+
// GR8VTs Register Class Value Types...
static const EVT GR8VTs[] = {
MVT::i8, MVT::Other
@@ -311,20 +321,19 @@ namespace { // Register classes...
// VR256VTs Register Class Value Types...
static const EVT VR256VTs[] = {
- MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64, MVT::Other
+ MVT::v32i8, MVT::v8i32, MVT::v4i64, MVT::v8f32, MVT::v4f64, MVT::Other
};
// VR64VTs Register Class Value Types...
static const EVT VR64VTs[] = {
- MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v1i64, MVT::v2f32, MVT::Other
+ MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v1i64, MVT::Other
};
} // end anonymous namespace
namespace X86 { // Register class instances
CCRClass CCRRegClass;
- CONTROL_REG_32Class CONTROL_REG_32RegClass;
- CONTROL_REG_64Class CONTROL_REG_64RegClass;
+ CONTROL_REGClass CONTROL_REGRegClass;
DEBUG_REGClass DEBUG_REGRegClass;
FR32Class FR32RegClass;
FR64Class FR64RegClass;
@@ -336,11 +345,13 @@ namespace X86 { // Register class instances
GR32_ADClass GR32_ADRegClass;
GR32_NOREXClass GR32_NOREXRegClass;
GR32_NOSPClass GR32_NOSPRegClass;
+ GR32_TCClass GR32_TCRegClass;
GR64Class GR64RegClass;
GR64_ABCDClass GR64_ABCDRegClass;
GR64_NOREXClass GR64_NOREXRegClass;
GR64_NOREX_NOSPClass GR64_NOREX_NOSPRegClass;
GR64_NOSPClass GR64_NOSPRegClass;
+ GR64_TCClass GR64_TCRegClass;
GR8Class GR8RegClass;
GR8_ABCD_HClass GR8_ABCD_HRegClass;
GR8_ABCD_LClass GR8_ABCD_LRegClass;
@@ -356,157 +367,162 @@ namespace X86 { // Register class instances
// CCR Sub-register Classes...
static const TargetRegisterClass* const CCRSubRegClasses[] = {
- NULL
- };
-
- // CONTROL_REG_32 Sub-register Classes...
- static const TargetRegisterClass* const CONTROL_REG_32SubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
- // CONTROL_REG_64 Sub-register Classes...
- static const TargetRegisterClass* const CONTROL_REG_64SubRegClasses[] = {
- NULL
+ // CONTROL_REG Sub-register Classes...
+ static const TargetRegisterClass* const CONTROL_REGSubRegClasses[] = {
+ 0, 0, 0, 0, 0, 0, 0
};
// DEBUG_REG Sub-register Classes...
static const TargetRegisterClass* const DEBUG_REGSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// FR32 Sub-register Classes...
static const TargetRegisterClass* const FR32SubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// FR64 Sub-register Classes...
static const TargetRegisterClass* const FR64SubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// GR16 Sub-register Classes...
static const TargetRegisterClass* const GR16SubRegClasses[] = {
- &X86::GR8RegClass, &X86::GR8RegClass, NULL
+ &X86::GR8RegClass, &X86::GR8RegClass, 0, 0, 0, 0, 0
};
// GR16_ABCD Sub-register Classes...
static const TargetRegisterClass* const GR16_ABCDSubRegClasses[] = {
- &X86::GR8_ABCD_LRegClass, &X86::GR8_ABCD_HRegClass, NULL
+ &X86::GR8_ABCD_LRegClass, &X86::GR8_ABCD_HRegClass, 0, 0, 0, 0, 0
};
// GR16_NOREX Sub-register Classes...
static const TargetRegisterClass* const GR16_NOREXSubRegClasses[] = {
- &X86::GR8_NOREXRegClass, &X86::GR8_NOREXRegClass, NULL
+ &X86::GR8_NOREXRegClass, &X86::GR8_NOREXRegClass, 0, 0, 0, 0, 0
};
// GR32 Sub-register Classes...
static const TargetRegisterClass* const GR32SubRegClasses[] = {
- &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, NULL
+ &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, 0, 0, 0, 0
};
// GR32_ABCD Sub-register Classes...
static const TargetRegisterClass* const GR32_ABCDSubRegClasses[] = {
- &X86::GR8_ABCD_LRegClass, &X86::GR8_ABCD_HRegClass, &X86::GR16_ABCDRegClass, NULL
+ &X86::GR8_ABCD_LRegClass, &X86::GR8_ABCD_HRegClass, &X86::GR16_ABCDRegClass, 0, 0, 0, 0
};
// GR32_AD Sub-register Classes...
static const TargetRegisterClass* const GR32_ADSubRegClasses[] = {
- &X86::GR8_ABCD_LRegClass, &X86::GR8_ABCD_HRegClass, &X86::GR16_ABCDRegClass, NULL
+ &X86::GR8_ABCD_LRegClass, &X86::GR8_ABCD_HRegClass, &X86::GR16_ABCDRegClass, 0, 0, 0, 0
};
// GR32_NOREX Sub-register Classes...
static const TargetRegisterClass* const GR32_NOREXSubRegClasses[] = {
- &X86::GR8_NOREXRegClass, &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass, NULL
+ &X86::GR8_NOREXRegClass, &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass, 0, 0, 0, 0
};
// GR32_NOSP Sub-register Classes...
static const TargetRegisterClass* const GR32_NOSPSubRegClasses[] = {
- &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, NULL
+ &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, 0, 0, 0, 0
+ };
+
+ // GR32_TC Sub-register Classes...
+ static const TargetRegisterClass* const GR32_TCSubRegClasses[] = {
+ &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, 0, 0, 0, 0
};
// GR64 Sub-register Classes...
static const TargetRegisterClass* const GR64SubRegClasses[] = {
- &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass, NULL
+ &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass, 0, 0, 0
};
// GR64_ABCD Sub-register Classes...
static const TargetRegisterClass* const GR64_ABCDSubRegClasses[] = {
- &X86::GR8_ABCD_LRegClass, &X86::GR8_ABCD_HRegClass, &X86::GR16_ABCDRegClass, &X86::GR32_ABCDRegClass, NULL
+ &X86::GR8_ABCD_LRegClass, &X86::GR8_ABCD_HRegClass, &X86::GR16_ABCDRegClass, &X86::GR32_ABCDRegClass, 0, 0, 0
};
// GR64_NOREX Sub-register Classes...
static const TargetRegisterClass* const GR64_NOREXSubRegClasses[] = {
- &X86::GR8_NOREXRegClass, &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass, &X86::GR32_NOREXRegClass, NULL
+ &X86::GR8_NOREXRegClass, &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass, &X86::GR32_NOREXRegClass, 0, 0, 0
};
// GR64_NOREX_NOSP Sub-register Classes...
static const TargetRegisterClass* const GR64_NOREX_NOSPSubRegClasses[] = {
- &X86::GR8_NOREXRegClass, &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass, &X86::GR32_NOREXRegClass, NULL
+ &X86::GR8_NOREXRegClass, &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass, &X86::GR32_NOREXRegClass, 0, 0, 0
};
// GR64_NOSP Sub-register Classes...
static const TargetRegisterClass* const GR64_NOSPSubRegClasses[] = {
- &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32_NOSPRegClass, NULL
+ &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32_NOSPRegClass, 0, 0, 0
+ };
+
+ // GR64_TC Sub-register Classes...
+ static const TargetRegisterClass* const GR64_TCSubRegClasses[] = {
+ &X86::GR8RegClass, &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32_TCRegClass, 0, 0, 0
};
// GR8 Sub-register Classes...
static const TargetRegisterClass* const GR8SubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// GR8_ABCD_H Sub-register Classes...
static const TargetRegisterClass* const GR8_ABCD_HSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// GR8_ABCD_L Sub-register Classes...
static const TargetRegisterClass* const GR8_ABCD_LSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// GR8_NOREX Sub-register Classes...
static const TargetRegisterClass* const GR8_NOREXSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// RFP32 Sub-register Classes...
static const TargetRegisterClass* const RFP32SubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// RFP64 Sub-register Classes...
static const TargetRegisterClass* const RFP64SubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// RFP80 Sub-register Classes...
static const TargetRegisterClass* const RFP80SubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// RST Sub-register Classes...
static const TargetRegisterClass* const RSTSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// SEGMENT_REG Sub-register Classes...
static const TargetRegisterClass* const SEGMENT_REGSubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// VR128 Sub-register Classes...
static const TargetRegisterClass* const VR128SubRegClasses[] = {
- &X86::FR32RegClass, &X86::FR64RegClass, NULL
+ 0, 0, 0, 0, &X86::FR64RegClass, &X86::FR32RegClass, 0
};
// VR256 Sub-register Classes...
static const TargetRegisterClass* const VR256SubRegClasses[] = {
- &X86::FR32RegClass, &X86::FR64RegClass, &X86::VR128RegClass, NULL
+ 0, 0, 0, 0, &X86::FR64RegClass, &X86::FR32RegClass, &X86::VR128RegClass
};
// VR64 Sub-register Classes...
static const TargetRegisterClass* const VR64SubRegClasses[] = {
- NULL
+ 0, 0, 0, 0, 0, 0, 0
};
// CCR Super-register Classes...
@@ -514,13 +530,8 @@ namespace X86 { // Register class instances
NULL
};
- // CONTROL_REG_32 Super-register Classes...
- static const TargetRegisterClass* const CONTROL_REG_32SuperRegClasses[] = {
- NULL
- };
-
- // CONTROL_REG_64 Super-register Classes...
- static const TargetRegisterClass* const CONTROL_REG_64SuperRegClasses[] = {
+ // CONTROL_REG Super-register Classes...
+ static const TargetRegisterClass* const CONTROL_REGSuperRegClasses[] = {
NULL
};
@@ -541,7 +552,7 @@ namespace X86 { // Register class instances
// GR16 Super-register Classes...
static const TargetRegisterClass* const GR16SuperRegClasses[] = {
- &X86::GR32RegClass, &X86::GR32_NOSPRegClass, &X86::GR64RegClass, &X86::GR64_NOSPRegClass, NULL
+ &X86::GR32RegClass, &X86::GR32_NOSPRegClass, &X86::GR32_TCRegClass, &X86::GR64RegClass, &X86::GR64_NOSPRegClass, &X86::GR64_TCRegClass, NULL
};
// GR16_ABCD Super-register Classes...
@@ -579,6 +590,11 @@ namespace X86 { // Register class instances
&X86::GR64_NOSPRegClass, NULL
};
+ // GR32_TC Super-register Classes...
+ static const TargetRegisterClass* const GR32_TCSuperRegClasses[] = {
+ &X86::GR64_TCRegClass, NULL
+ };
+
// GR64 Super-register Classes...
static const TargetRegisterClass* const GR64SuperRegClasses[] = {
NULL
@@ -604,9 +620,14 @@ namespace X86 { // Register class instances
NULL
};
+ // GR64_TC Super-register Classes...
+ static const TargetRegisterClass* const GR64_TCSuperRegClasses[] = {
+ NULL
+ };
+
// GR8 Super-register Classes...
static const TargetRegisterClass* const GR8SuperRegClasses[] = {
- &X86::GR16RegClass, &X86::GR32RegClass, &X86::GR32_NOSPRegClass, &X86::GR64RegClass, &X86::GR64_NOSPRegClass, NULL
+ &X86::GR16RegClass, &X86::GR32RegClass, &X86::GR32_NOSPRegClass, &X86::GR32_TCRegClass, &X86::GR64RegClass, &X86::GR64_NOSPRegClass, &X86::GR64_TCRegClass, NULL
};
// GR8_ABCD_H Super-register Classes...
@@ -669,13 +690,8 @@ namespace X86 { // Register class instances
NULL
};
- // CONTROL_REG_32 Register Class sub-classes...
- static const TargetRegisterClass* const CONTROL_REG_32Subclasses[] = {
- NULL
- };
-
- // CONTROL_REG_64 Register Class sub-classes...
- static const TargetRegisterClass* const CONTROL_REG_64Subclasses[] = {
+ // CONTROL_REG Register Class sub-classes...
+ static const TargetRegisterClass* const CONTROL_REGSubclasses[] = {
NULL
};
@@ -711,12 +727,12 @@ namespace X86 { // Register class instances
// GR32 Register Class sub-classes...
static const TargetRegisterClass* const GR32Subclasses[] = {
- &X86::GR32_ABCDRegClass, &X86::GR32_ADRegClass, &X86::GR32_NOREXRegClass, &X86::GR32_NOSPRegClass, NULL
+ &X86::GR32_ABCDRegClass, &X86::GR32_ADRegClass, &X86::GR32_NOREXRegClass, &X86::GR32_NOSPRegClass, &X86::GR32_TCRegClass, NULL
};
// GR32_ABCD Register Class sub-classes...
static const TargetRegisterClass* const GR32_ABCDSubclasses[] = {
- &X86::GR32_ADRegClass, NULL
+ &X86::GR32_ADRegClass, &X86::GR32_TCRegClass, NULL
};
// GR32_AD Register Class sub-classes...
@@ -726,17 +742,22 @@ namespace X86 { // Register class instances
// GR32_NOREX Register Class sub-classes...
static const TargetRegisterClass* const GR32_NOREXSubclasses[] = {
- &X86::GR32_ABCDRegClass, &X86::GR32_ADRegClass, NULL
+ &X86::GR32_ABCDRegClass, &X86::GR32_ADRegClass, &X86::GR32_TCRegClass, NULL
};
// GR32_NOSP Register Class sub-classes...
static const TargetRegisterClass* const GR32_NOSPSubclasses[] = {
- &X86::GR32_ABCDRegClass, &X86::GR32_ADRegClass, NULL
+ &X86::GR32_ABCDRegClass, &X86::GR32_ADRegClass, &X86::GR32_TCRegClass, NULL
+ };
+
+ // GR32_TC Register Class sub-classes...
+ static const TargetRegisterClass* const GR32_TCSubclasses[] = {
+ &X86::GR32_ADRegClass, NULL
};
// GR64 Register Class sub-classes...
static const TargetRegisterClass* const GR64Subclasses[] = {
- &X86::GR64_ABCDRegClass, &X86::GR64_NOREXRegClass, &X86::GR64_NOREX_NOSPRegClass, &X86::GR64_NOSPRegClass, NULL
+ &X86::GR64_ABCDRegClass, &X86::GR64_NOREXRegClass, &X86::GR64_NOREX_NOSPRegClass, &X86::GR64_NOSPRegClass, &X86::GR64_TCRegClass, NULL
};
// GR64_ABCD Register Class sub-classes...
@@ -756,7 +777,12 @@ namespace X86 { // Register class instances
// GR64_NOSP Register Class sub-classes...
static const TargetRegisterClass* const GR64_NOSPSubclasses[] = {
- &X86::GR64_ABCDRegClass, &X86::GR64_NOREX_NOSPRegClass, NULL
+ &X86::GR64_ABCDRegClass, &X86::GR64_NOREX_NOSPRegClass, &X86::GR64_TCRegClass, NULL
+ };
+
+ // GR64_TC Register Class sub-classes...
+ static const TargetRegisterClass* const GR64_TCSubclasses[] = {
+ NULL
};
// GR8 Register Class sub-classes...
@@ -824,13 +850,8 @@ namespace X86 { // Register class instances
NULL
};
- // CONTROL_REG_32 Register Class super-classes...
- static const TargetRegisterClass* const CONTROL_REG_32Superclasses[] = {
- NULL
- };
-
- // CONTROL_REG_64 Register Class super-classes...
- static const TargetRegisterClass* const CONTROL_REG_64Superclasses[] = {
+ // CONTROL_REG Register Class super-classes...
+ static const TargetRegisterClass* const CONTROL_REGSuperclasses[] = {
NULL
};
@@ -876,7 +897,7 @@ namespace X86 { // Register class instances
// GR32_AD Register Class super-classes...
static const TargetRegisterClass* const GR32_ADSuperclasses[] = {
- &X86::GR32RegClass, &X86::GR32_ABCDRegClass, &X86::GR32_NOREXRegClass, &X86::GR32_NOSPRegClass, NULL
+ &X86::GR32RegClass, &X86::GR32_ABCDRegClass, &X86::GR32_NOREXRegClass, &X86::GR32_NOSPRegClass, &X86::GR32_TCRegClass, NULL
};
// GR32_NOREX Register Class super-classes...
@@ -889,6 +910,11 @@ namespace X86 { // Register class instances
&X86::GR32RegClass, NULL
};
+ // GR32_TC Register Class super-classes...
+ static const TargetRegisterClass* const GR32_TCSuperclasses[] = {
+ &X86::GR32RegClass, &X86::GR32_ABCDRegClass, &X86::GR32_NOREXRegClass, &X86::GR32_NOSPRegClass, NULL
+ };
+
// GR64 Register Class super-classes...
static const TargetRegisterClass* const GR64Superclasses[] = {
NULL
@@ -914,6 +940,11 @@ namespace X86 { // Register class instances
&X86::GR64RegClass, NULL
};
+ // GR64_TC Register Class super-classes...
+ static const TargetRegisterClass* const GR64_TCSuperclasses[] = {
+ &X86::GR64RegClass, &X86::GR64_NOSPRegClass, NULL
+ };
+
// GR8 Register Class super-classes...
static const TargetRegisterClass* const GR8Superclasses[] = {
NULL
@@ -975,11 +1006,14 @@ namespace X86 { // Register class instances
};
+ CCRClass::iterator
+ CCRClass::allocation_order_end(const MachineFunction &MF) const {
+ return allocation_order_begin(MF);
+ }
+
CCRClass::CCRClass() : TargetRegisterClass(CCRRegClassID, "CCR", CCRVTs, CCRSubclasses, CCRSuperclasses, CCRSubRegClasses, CCRSuperRegClasses, 4, 4, -1, CCR, CCR + 1) {}
-CONTROL_REG_32Class::CONTROL_REG_32Class() : TargetRegisterClass(CONTROL_REG_32RegClassID, "CONTROL_REG_32", CONTROL_REG_32VTs, CONTROL_REG_32Subclasses, CONTROL_REG_32Superclasses, CONTROL_REG_32SubRegClasses, CONTROL_REG_32SuperRegClasses, 4, 4, 1, CONTROL_REG_32, CONTROL_REG_32 + 8) {}
-
-CONTROL_REG_64Class::CONTROL_REG_64Class() : TargetRegisterClass(CONTROL_REG_64RegClassID, "CONTROL_REG_64", CONTROL_REG_64VTs, CONTROL_REG_64Subclasses, CONTROL_REG_64Superclasses, CONTROL_REG_64SubRegClasses, CONTROL_REG_64SuperRegClasses, 8, 8, 1, CONTROL_REG_64, CONTROL_REG_64 + 9) {}
+CONTROL_REGClass::CONTROL_REGClass() : TargetRegisterClass(CONTROL_REGRegClassID, "CONTROL_REG", CONTROL_REGVTs, CONTROL_REGSubclasses, CONTROL_REGSuperclasses, CONTROL_REGSubRegClasses, CONTROL_REGSuperRegClasses, 8, 8, 1, CONTROL_REG, CONTROL_REG + 9) {}
DEBUG_REGClass::DEBUG_REGClass() : TargetRegisterClass(DEBUG_REGRegClassID, "DEBUG_REG", DEBUG_REGVTs, DEBUG_REGSubclasses, DEBUG_REGSuperclasses, DEBUG_REGSubRegClasses, DEBUG_REGSuperRegClasses, 4, 4, 1, DEBUG_REG, DEBUG_REG + 8) {}
@@ -1028,9 +1062,10 @@ FR64Class::FR64Class() : TargetRegisterClass(FR64RegClassID, "FR64", FR64VTs, F
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (Subtarget.is64Bit()) {
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate SP or BP.
return array_endof(X86_GR16_AO_64) - 1;
else
@@ -1038,7 +1073,7 @@ FR64Class::FR64Class() : TargetRegisterClass(FR64RegClassID, "FR64", FR64VTs, F
return array_endof(X86_GR16_AO_64);
} else {
// Does the function dedicate EBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate SP or BP.
return begin() + 6;
else
@@ -1055,8 +1090,9 @@ GR16_ABCDClass::GR16_ABCDClass() : TargetRegisterClass(GR16_ABCDRegClassID, "GR
GR16_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP / EBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate SP or BP.
return end() - 2;
else
@@ -1087,9 +1123,10 @@ GR16_NOREXClass::GR16_NOREXClass() : TargetRegisterClass(GR16_NOREXRegClassID,
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (Subtarget.is64Bit()) {
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate ESP or EBP.
return array_endof(X86_GR32_AO_64) - 1;
else
@@ -1097,7 +1134,7 @@ GR16_NOREXClass::GR16_NOREXClass() : TargetRegisterClass(GR16_NOREXRegClassID,
return array_endof(X86_GR32_AO_64);
} else {
// Does the function dedicate EBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate ESP or EBP.
return begin() + 6;
else
@@ -1116,8 +1153,9 @@ GR32_ADClass::GR32_ADClass() : TargetRegisterClass(GR32_ADRegClassID, "GR32_AD"
GR32_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP / EBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate ESP or EBP.
return end() - 2;
else
@@ -1148,9 +1186,10 @@ GR32_NOREXClass::GR32_NOREXClass() : TargetRegisterClass(GR32_NOREXRegClassID,
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (Subtarget.is64Bit()) {
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate EBP.
return array_endof(X86_GR32_NOSP_AO_64) - 1;
else
@@ -1158,7 +1197,7 @@ GR32_NOREXClass::GR32_NOREXClass() : TargetRegisterClass(GR32_NOREXRegClassID,
return array_endof(X86_GR32_NOSP_AO_64);
} else {
// Does the function dedicate EBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate EBP.
return begin() + 6;
else
@@ -1169,14 +1208,18 @@ GR32_NOREXClass::GR32_NOREXClass() : TargetRegisterClass(GR32_NOREXRegClassID,
GR32_NOSPClass::GR32_NOSPClass() : TargetRegisterClass(GR32_NOSPRegClassID, "GR32_NOSP", GR32_NOSPVTs, GR32_NOSPSubclasses, GR32_NOSPSuperclasses, GR32_NOSPSubRegClasses, GR32_NOSPSuperRegClasses, 4, 4, 1, GR32_NOSP, GR32_NOSP + 15) {}
+GR32_TCClass::GR32_TCClass() : TargetRegisterClass(GR32_TCRegClassID, "GR32_TC", GR32_TCVTs, GR32_TCSubclasses, GR32_TCSuperclasses, GR32_TCSubRegClasses, GR32_TCSuperRegClasses, 4, 4, 1, GR32_TC, GR32_TC + 3) {}
+
GR64Class::iterator
GR64Class::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (!Subtarget.is64Bit())
return begin(); // None of these are allocatable in 32-bit.
- if (RI->hasFP(MF)) // Does the function dedicate RBP to being a frame ptr?
+ // Does the function dedicate RBP to being a frame ptr?
+ if (RI->hasFP(MF) || MFI->getReserveFP())
return end()-3; // If so, don't allocate RIP, RSP or RBP
else
return end()-2; // If not, just don't allocate RIP or RSP
@@ -1190,8 +1233,9 @@ GR64_ABCDClass::GR64_ABCDClass() : TargetRegisterClass(GR64_ABCDRegClassID, "GR
GR64_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate RIP, RSP or RBP.
return end() - 3;
else
@@ -1206,8 +1250,9 @@ GR64_NOREXClass::GR64_NOREXClass() : TargetRegisterClass(GR64_NOREXRegClassID,
{
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP to being a frame ptr?
- if (RI->hasFP(MF))
+ if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate RBP.
return end() - 1;
else
@@ -1222,9 +1267,11 @@ GR64_NOREX_NOSPClass::GR64_NOREX_NOSPClass() : TargetRegisterClass(GR64_NOREX_N
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (!Subtarget.is64Bit())
return begin(); // None of these are allocatable in 32-bit.
- if (RI->hasFP(MF)) // Does the function dedicate RBP to being a frame ptr?
+ // Does the function dedicate RBP to being a frame ptr?
+ if (RI->hasFP(MF) || MFI->getReserveFP())
return end()-1; // If so, don't allocate RBP
else
return end(); // If not, any reg in this class is ok.
@@ -1232,6 +1279,8 @@ GR64_NOREX_NOSPClass::GR64_NOREX_NOSPClass() : TargetRegisterClass(GR64_NOREX_N
GR64_NOSPClass::GR64_NOSPClass() : TargetRegisterClass(GR64_NOSPRegClassID, "GR64_NOSP", GR64_NOSPVTs, GR64_NOSPSubclasses, GR64_NOSPSuperclasses, GR64_NOSPSubRegClasses, GR64_NOSPSuperRegClasses, 8, 8, 1, GR64_NOSP, GR64_NOSP + 15) {}
+GR64_TCClass::GR64_TCClass() : TargetRegisterClass(GR64_TCRegClassID, "GR64_TC", GR64_TCVTs, GR64_TCSubclasses, GR64_TCSuperclasses, GR64_TCSubRegClasses, GR64_TCSuperRegClasses, 8, 8, 1, GR64_TC, GR64_TC + 8) {}
+
static const unsigned X86_GR8_AO_64[] = {
X86::AL, X86::CL, X86::DL, X86::SIL, X86::DIL,
X86::R8B, X86::R9B, X86::R10B, X86::R11B,
@@ -1253,11 +1302,12 @@ GR64_NOSPClass::GR64_NOSPClass() : TargetRegisterClass(GR64_NOSPRegClassID, "GR
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ const X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
// Does the function dedicate RBP / EBP to being a frame ptr?
if (!Subtarget.is64Bit())
// In 32-mode, none of the 8-bit registers aliases EBP or ESP.
return begin() + 8;
- else if (RI->hasFP(MF))
+ else if (RI->hasFP(MF) || MFI->getReserveFP())
// If so, don't allocate SPL or BPL.
return array_endof(X86_GR8_AO_64) - 1;
else
@@ -1325,6 +1375,16 @@ SEGMENT_REGClass::SEGMENT_REGClass() : TargetRegisterClass(SEGMENT_REGRegClassI
VR128Class::VR128Class() : TargetRegisterClass(VR128RegClassID, "VR128", VR128VTs, VR128Subclasses, VR128Superclasses, VR128SubRegClasses, VR128SuperRegClasses, 16, 16, 1, VR128, VR128 + 16) {}
+ VR256Class::iterator
+ VR256Class::allocation_order_end(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ if (!Subtarget.is64Bit())
+ return end()-8; // Only YMM0 to YMM7 are available in 32-bit mode.
+ else
+ return end();
+ }
+
VR256Class::VR256Class() : TargetRegisterClass(VR256RegClassID, "VR256", VR256VTs, VR256Subclasses, VR256Superclasses, VR256SubRegClasses, VR256SuperRegClasses, 32, 32, 1, VR256, VR256 + 16) {}
VR64Class::VR64Class() : TargetRegisterClass(VR64RegClassID, "VR64", VR64VTs, VR64Subclasses, VR64Superclasses, VR64SubRegClasses, VR64SuperRegClasses, 8, 8, 1, VR64, VR64 + 8) {}
@@ -1333,8 +1393,7 @@ VR64Class::VR64Class() : TargetRegisterClass(VR64RegClassID, "VR64", VR64VTs, V
namespace {
const TargetRegisterClass* const RegisterClasses[] = {
&X86::CCRRegClass,
- &X86::CONTROL_REG_32RegClass,
- &X86::CONTROL_REG_64RegClass,
+ &X86::CONTROL_REGRegClass,
&X86::DEBUG_REGRegClass,
&X86::FR32RegClass,
&X86::FR64RegClass,
@@ -1346,11 +1405,13 @@ namespace {
&X86::GR32_ADRegClass,
&X86::GR32_NOREXRegClass,
&X86::GR32_NOSPRegClass,
+ &X86::GR32_TCRegClass,
&X86::GR64RegClass,
&X86::GR64_ABCDRegClass,
&X86::GR64_NOREXRegClass,
&X86::GR64_NOREX_NOSPRegClass,
&X86::GR64_NOSPRegClass,
+ &X86::GR64_TCRegClass,
&X86::GR8RegClass,
&X86::GR8_ABCD_HRegClass,
&X86::GR8_ABCD_LRegClass,
@@ -1366,166 +1427,34 @@ namespace {
};
- // Number of hash collisions: 20
+ // Number of hash collisions: 15
const unsigned SubregHashTable[] = { X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R14, X86::R14B,
- X86::NoRegister, X86::NoRegister,
- X86::R14D, X86::R14B,
- X86::R14W, X86::R14B,
- X86::EDX, X86::DH,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RDI, X86::EDI,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R9, X86::R9W,
- X86::NoRegister, X86::NoRegister,
- X86::R9D, X86::R9W,
- X86::YMM2, X86::XMM2,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::EIP, X86::IP,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::AX, X86::AH,
- X86::NoRegister, X86::NoRegister,
- X86::R14, X86::R14D,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::EDI, X86::DI,
- X86::RCX, X86::CX,
- X86::NoRegister, X86::NoRegister,
- X86::RDX, X86::DX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RDX, X86::EDX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::DI, X86::DIL,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R11, X86::R11B,
- X86::NoRegister, X86::NoRegister,
- X86::R11D, X86::R11B,
- X86::EAX, X86::AH,
- X86::NoRegister, X86::NoRegister,
- X86::R11W, X86::R11B,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::YMM3, X86::XMM3,
- X86::RAX, X86::EAX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::AX, X86::AL,
- X86::NoRegister, X86::NoRegister,
- X86::R14, X86::R14W,
- X86::NoRegister, X86::NoRegister,
- X86::R14D, X86::R14W,
- X86::EDI, X86::DIL,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RDX, X86::DH,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R11, X86::R11D,
- X86::NoRegister, X86::NoRegister,
- X86::RIP, X86::IP,
- X86::EAX, X86::AL,
- X86::YMM4, X86::XMM4,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::DX, X86::DL,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RBP, X86::EBP,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RDI, X86::DI,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::EDX, X86::DL,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RIP, X86::EIP,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RAX, X86::AH,
- X86::NoRegister, X86::NoRegister,
X86::R11, X86::R11W,
- X86::NoRegister, X86::NoRegister,
+ X86::RDX, X86::DL,
X86::R11D, X86::R11W,
- X86::EAX, X86::AX,
+ X86::RSI, X86::SI,
X86::NoRegister, X86::NoRegister,
- X86::YMM5, X86::XMM5,
+ X86::RCX, X86::ECX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::YMM0, X86::XMM0,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RBX, X86::EBX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RDI, X86::DIL,
X86::NoRegister, X86::NoRegister,
+ X86::ESP, X86::SP,
+ X86::R8, X86::R8D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::BX, X86::BH,
X86::R15, X86::R15B,
- X86::NoRegister, X86::NoRegister,
+ X86::YMM14, X86::XMM14,
X86::R15D, X86::R15B,
X86::R15W, X86::R15B,
X86::NoRegister, X86::NoRegister,
@@ -1533,20 +1462,19 @@ namespace {
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ESI, X86::SI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RAX, X86::AL,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::AX, X86::AH,
X86::NoRegister, X86::NoRegister,
- X86::EBX, X86::BH,
- X86::YMM6, X86::XMM6,
X86::NoRegister, X86::NoRegister,
+ X86::RSI, X86::SIL,
+ X86::YMM1, X86::XMM1,
+ X86::SI, X86::SIL,
+ X86::RDI, X86::EDI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1557,12 +1485,14 @@ namespace {
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RDX, X86::DL,
X86::NoRegister, X86::NoRegister,
+ X86::ESP, X86::SPL,
+ X86::R8, X86::R8W,
X86::NoRegister, X86::NoRegister,
- X86::BX, X86::BL,
+ X86::R8D, X86::R8W,
X86::R15, X86::R15D,
X86::NoRegister, X86::NoRegister,
+ X86::YMM15, X86::XMM15,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1570,23 +1500,22 @@ namespace {
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ESI, X86::SIL,
- X86::RSI, X86::ESI,
X86::NoRegister, X86::NoRegister,
+ X86::EAX, X86::AH,
X86::NoRegister, X86::NoRegister,
- X86::RAX, X86::AX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::AX, X86::AL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::R12, X86::R12B,
- X86::EBX, X86::BL,
+ X86::YMM2, X86::XMM2,
X86::R12D, X86::R12B,
X86::R12W, X86::R12B,
X86::NoRegister, X86::NoRegister,
- X86::YMM7, X86::XMM7,
- X86::NoRegister, X86::NoRegister,
+ X86::RDX, X86::EDX,
+ X86::RSP, X86::SP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1601,27 +1530,27 @@ namespace {
X86::R15, X86::R15W,
X86::NoRegister, X86::NoRegister,
X86::R15D, X86::R15W,
- X86::RSI, X86::SI,
X86::NoRegister, X86::NoRegister,
- X86::YMM0, X86::XMM0,
X86::NoRegister, X86::NoRegister,
+ X86::EIP, X86::IP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ESP, X86::SP,
- X86::RSP, X86::ESP,
+ X86::NoRegister, X86::NoRegister,
+ X86::NoRegister, X86::NoRegister,
+ X86::EAX, X86::AL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RBX, X86::BH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBP, X86::BP,
X86::NoRegister, X86::NoRegister,
X86::R12, X86::R12D,
X86::NoRegister, X86::NoRegister,
- X86::YMM8, X86::XMM8,
+ X86::RSP, X86::SPL,
+ X86::SP, X86::SPL,
+ X86::YMM3, X86::XMM3,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1632,72 +1561,70 @@ namespace {
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::BP, X86::BPL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RAX, X86::AH,
X86::NoRegister, X86::NoRegister,
+ X86::R9, X86::R9B,
X86::NoRegister, X86::NoRegister,
+ X86::R9D, X86::R9B,
+ X86::R9W, X86::R9B,
X86::NoRegister, X86::NoRegister,
- X86::RSI, X86::SIL,
- X86::YMM1, X86::XMM1,
- X86::SI, X86::SIL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ESP, X86::SPL,
X86::NoRegister, X86::NoRegister,
+ X86::EAX, X86::AX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RBX, X86::BL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBP, X86::BPL,
X86::NoRegister, X86::NoRegister,
X86::R12, X86::R12W,
- X86::NoRegister, X86::NoRegister,
+ X86::BX, X86::BH,
X86::R12D, X86::R12W,
- X86::YMM9, X86::XMM9,
+ X86::YMM4, X86::XMM4,
+ X86::RIP, X86::EIP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RIP, X86::IP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RAX, X86::AL,
X86::NoRegister, X86::NoRegister,
+ X86::R9, X86::R9D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R8, X86::R8B,
- X86::YMM10, X86::XMM10,
- X86::R8D, X86::R8B,
- X86::R8W, X86::R8B,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RSP, X86::SP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RBP, X86::BP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EBX, X86::BH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBX, X86::BX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::BX, X86::BL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::YMM5, X86::XMM5,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1710,35 +1637,36 @@ namespace {
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RAX, X86::AX,
X86::NoRegister, X86::NoRegister,
+ X86::R9, X86::R9W,
+ X86::NoRegister, X86::NoRegister,
+ X86::R9D, X86::R9W,
X86::NoRegister, X86::NoRegister,
- X86::CX, X86::CH,
- X86::R8, X86::R8D,
X86::NoRegister, X86::NoRegister,
- X86::RSP, X86::SPL,
- X86::SP, X86::SPL,
- X86::YMM11, X86::XMM11,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RBP, X86::BPL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EBX, X86::BL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R13, X86::R13B,
X86::NoRegister, X86::NoRegister,
+ X86::NoRegister, X86::NoRegister,
+ X86::NoRegister, X86::NoRegister,
+ X86::R13, X86::R13B,
+ X86::YMM6, X86::XMM6,
X86::R13D, X86::R13B,
X86::R13W, X86::R13B,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ECX, X86::CH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1748,12 +1676,9 @@ namespace {
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RBX, X86::BH,
X86::NoRegister, X86::NoRegister,
- X86::CX, X86::CL,
- X86::R8, X86::R8W,
X86::NoRegister, X86::NoRegister,
- X86::R8D, X86::R8W,
- X86::YMM12, X86::XMM12,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1761,21 +1686,23 @@ namespace {
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RBX, X86::BX,
X86::NoRegister, X86::NoRegister,
+ X86::EBP, X86::BP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::BP, X86::BPL,
X86::NoRegister, X86::NoRegister,
- X86::R13, X86::R13D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R13, X86::R13D,
X86::NoRegister, X86::NoRegister,
+ X86::RSI, X86::ESI,
X86::NoRegister, X86::NoRegister,
- X86::ECX, X86::CL,
+ X86::YMM7, X86::XMM7,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1786,6 +1713,8 @@ namespace {
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RBX, X86::BL,
+ X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::R10, X86::R10B,
@@ -1793,10 +1722,9 @@ namespace {
X86::R10D, X86::R10B,
X86::R10W, X86::R10B,
X86::NoRegister, X86::NoRegister,
- X86::YMM13, X86::XMM13,
- X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EBP, X86::BPL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1804,144 +1732,127 @@ namespace {
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ECX, X86::CX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::R13, X86::R13W,
X86::NoRegister, X86::NoRegister,
X86::R13D, X86::R13W,
+ X86::RSP, X86::ESP,
X86::NoRegister, X86::NoRegister,
- X86::RCX, X86::CH,
+ X86::EDX, X86::DX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::YMM8, X86::XMM8,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RBP, X86::BP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R10, X86::R10D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R10, X86::R10D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R9, X86::R9B,
- X86::YMM14, X86::XMM14,
- X86::R9D, X86::R9B,
- X86::R9W, X86::R9B,
- X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EBX, X86::BX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::DX, X86::DH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EDX, X86::DH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RCX, X86::CL,
X86::NoRegister, X86::NoRegister,
+ X86::YMM9, X86::XMM9,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ECX, X86::CX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::CX, X86::CH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RBP, X86::BPL,
X86::NoRegister, X86::NoRegister,
- X86::EDX, X86::DX,
X86::NoRegister, X86::NoRegister,
- X86::RCX, X86::ECX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::R10, X86::R10W,
X86::NoRegister, X86::NoRegister,
X86::R10D, X86::R10W,
- X86::R9, X86::R9D,
+ X86::RCX, X86::CX,
X86::NoRegister, X86::NoRegister,
- X86::YMM15, X86::XMM15,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ECX, X86::CH,
X86::NoRegister, X86::NoRegister,
+ X86::RDX, X86::DX,
X86::NoRegister, X86::NoRegister,
- X86::DX, X86::DH,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
-X86::NoRegister, X86::NoRegister };
- const unsigned SubregHashTableSize = 512;
-
-
- // Number of hash collisions: 23
- const unsigned SuperregHashTable[] = { X86::DX, X86::RDX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EDI, X86::DI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R14, X86::R14B,
+ X86::YMM10, X86::XMM10,
+ X86::R14D, X86::R14B,
+ X86::R14W, X86::R14B,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::BP, X86::RBP,
- X86::BPL, X86::RBP,
+ X86::CX, X86::CL,
X86::NoRegister, X86::NoRegister,
- X86::EDX, X86::RDX,
X86::NoRegister, X86::NoRegister,
- X86::XMM14, X86::YMM14,
X86::NoRegister, X86::NoRegister,
- X86::BP, X86::EBP,
- X86::BPL, X86::EBP,
X86::NoRegister, X86::NoRegister,
- X86::DIL, X86::DI,
+ X86::RBX, X86::BX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R9B, X86::R9W,
- X86::R11B, X86::R11,
- X86::R11D, X86::R11,
- X86::R11W, X86::R11,
+ X86::DI, X86::DIL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBP, X86::RBP,
+ X86::RDX, X86::DH,
+ X86::RAX, X86::EAX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ECX, X86::CL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R14D, X86::R14,
- X86::R14B, X86::R14D,
+ X86::EDI, X86::DIL,
X86::NoRegister, X86::NoRegister,
- X86::R14W, X86::R14D,
- X86::BH, X86::RBX,
- X86::BL, X86::RBX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::BX, X86::RBX,
X86::NoRegister, X86::NoRegister,
- X86::XMM15, X86::YMM15,
- X86::BH, X86::EBX,
- X86::BL, X86::EBX,
- X86::EIP, X86::RIP,
X86::NoRegister, X86::NoRegister,
- X86::BX, X86::EBX,
+ X86::R14, X86::R14D,
X86::NoRegister, X86::NoRegister,
+ X86::YMM11, X86::XMM11,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -1949,54 +1860,60 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::IP, X86::RIP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RCX, X86::CH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBX, X86::RBX,
X86::NoRegister, X86::NoRegister,
- X86::IP, X86::EIP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R11, X86::R11B,
X86::NoRegister, X86::NoRegister,
+ X86::R11D, X86::R11B,
+ X86::R11W, X86::R11B,
X86::NoRegister, X86::NoRegister,
+ X86::RBP, X86::EBP,
+ X86::RDI, X86::DI,
+ X86::DX, X86::DL,
X86::NoRegister, X86::NoRegister,
- X86::R14B, X86::R14W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EDX, X86::DL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ESI, X86::SI,
+ X86::R14, X86::R14W,
X86::NoRegister, X86::NoRegister,
+ X86::R14D, X86::R14W,
+ X86::YMM12, X86::XMM12,
X86::NoRegister, X86::NoRegister,
- X86::XMM2, X86::YMM2,
X86::NoRegister, X86::NoRegister,
- X86::ESI, X86::RSI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R11B, X86::R11D,
+ X86::RCX, X86::CL,
X86::NoRegister, X86::NoRegister,
- X86::R11W, X86::R11D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R11, X86::R11D,
X86::NoRegister, X86::NoRegister,
+ X86::RDI, X86::DIL,
X86::NoRegister, X86::NoRegister,
+ X86::RBX, X86::EBX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::AH, X86::AX,
- X86::AL, X86::AX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2004,28 +1921,39 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R15B, X86::R15,
- X86::R15D, X86::R15,
- X86::R15W, X86::R15,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ESI, X86::SIL,
X86::NoRegister, X86::NoRegister,
+ X86::R8, X86::R8B,
X86::NoRegister, X86::NoRegister,
- X86::XMM3, X86::YMM3,
+ X86::R8D, X86::R8B,
+ X86::R8W, X86::R8B,
X86::NoRegister, X86::NoRegister,
- X86::ESP, X86::RSP,
+ X86::YMM13, X86::XMM13,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R11B, X86::R11W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+X86::NoRegister, X86::NoRegister };
+ const unsigned SubregHashTableSize = 512;
+
+
+ // Number of hash collisions: 43
+ const unsigned AliasesHashTable[] = { X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R11, X86::R11W,
+ X86::R11B, X86::R11W,
+ X86::R11D, X86::R11W,
+ X86::RDX, X86::DL,
X86::NoRegister, X86::NoRegister,
+ X86::RSI, X86::SI,
+ X86::SIL, X86::SI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2037,19 +1965,18 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ESP, X86::SP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::YMM14, X86::XMM14,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::SI, X86::RSI,
- X86::SIL, X86::RSI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM4, X86::YMM4,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2057,8 +1984,10 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::AX, X86::AH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RSI, X86::SIL,
X86::NoRegister, X86::NoRegister,
X86::R12B, X86::R12,
X86::R12D, X86::R12,
@@ -2066,43 +1995,43 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::SI, X86::SIL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ESP, X86::SPL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::YMM15, X86::XMM15,
X86::NoRegister, X86::NoRegister,
- X86::R15B, X86::R15D,
- X86::NoRegister, X86::NoRegister,
- X86::R15W, X86::R15D,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::SP, X86::RSP,
- X86::SPL, X86::RSP,
X86::NoRegister, X86::NoRegister,
- X86::XMM5, X86::YMM5,
X86::NoRegister, X86::NoRegister,
- X86::SI, X86::ESI,
- X86::SIL, X86::ESI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::XMM0, X86::YMM0,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EAX, X86::AH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::AX, X86::AL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R12, X86::R12B,
X86::NoRegister, X86::NoRegister,
+ X86::R12D, X86::R12B,
+ X86::R12W, X86::R12B,
+ X86::SPL, X86::SP,
X86::NoRegister, X86::NoRegister,
+ X86::RSP, X86::SP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2112,32 +2041,33 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::BPL, X86::BP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R15B, X86::R15W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::SIL, X86::SI,
X86::NoRegister, X86::NoRegister,
+ X86::EIP, X86::IP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM6, X86::YMM6,
+ X86::XMM1, X86::YMM1,
X86::NoRegister, X86::NoRegister,
+ X86::EAX, X86::AL,
X86::NoRegister, X86::NoRegister,
- X86::SP, X86::ESP,
- X86::SPL, X86::ESP,
+ X86::AH, X86::AX,
+ X86::AL, X86::AX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R12, X86::R12D,
X86::R12B, X86::R12D,
- X86::NoRegister, X86::NoRegister,
+ X86::RSP, X86::SPL,
X86::R12W, X86::R12D,
X86::NoRegister, X86::NoRegister,
+ X86::SP, X86::SPL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2149,6 +2079,7 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RAX, X86::AH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2156,14 +2087,11 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R8B, X86::R8,
- X86::R8D, X86::R8,
- X86::R8W, X86::R8,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::XMM7, X86::YMM7,
+ X86::XMM2, X86::YMM2,
+ X86::EAX, X86::AX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2171,6 +2099,9 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R12, X86::R12W,
+ X86::BX, X86::BH,
+ X86::R12D, X86::R12W,
X86::R12B, X86::R12W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2178,17 +2109,14 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RIP, X86::IP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM0, X86::YMM0,
- X86::NoRegister, X86::NoRegister,
- X86::BH, X86::BX,
- X86::BL, X86::BX,
- X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RAX, X86::AL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2196,19 +2124,20 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::SPL, X86::SP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM8, X86::YMM8,
+ X86::XMM3, X86::YMM3,
X86::NoRegister, X86::NoRegister,
+ X86::EBX, X86::BH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::BX, X86::BL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2219,36 +2148,39 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM1, X86::YMM1,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RAX, X86::AX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R8B, X86::R8D,
X86::NoRegister, X86::NoRegister,
- X86::R8W, X86::R8D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM9, X86::YMM9,
+ X86::XMM4, X86::YMM4,
+ X86::EBX, X86::BL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::BPL, X86::BP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R13, X86::R13B,
X86::NoRegister, X86::NoRegister,
+ X86::R13D, X86::R13B,
+ X86::R13W, X86::R13B,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2257,35 +2189,32 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM10, X86::YMM10,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RBX, X86::BH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R8B, X86::R8W,
- X86::R10B, X86::R10,
- X86::R10D, X86::R10,
- X86::R10W, X86::R10,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::CH, X86::ECX,
- X86::CL, X86::ECX,
X86::NoRegister, X86::NoRegister,
- X86::CX, X86::ECX,
+ X86::EBP, X86::BP,
+ X86::XMM5, X86::YMM5,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::BP, X86::BPL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R13, X86::R13D,
X86::R13B, X86::R13D,
X86::NoRegister, X86::NoRegister,
X86::R13W, X86::R13D,
@@ -2295,140 +2224,90 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM11, X86::YMM11,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::CH, X86::RCX,
- X86::CL, X86::RCX,
- X86::NoRegister, X86::NoRegister,
- X86::CX, X86::RCX,
- X86::R9B, X86::R9,
- X86::R9D, X86::R9,
- X86::R9W, X86::R9,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::DI, X86::EDI,
- X86::DIL, X86::EDI,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R13B, X86::R13W,
- X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RBX, X86::BL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ECX, X86::RCX,
- X86::NoRegister, X86::NoRegister,
- X86::XMM12, X86::YMM12,
- X86::CH, X86::CX,
- X86::CL, X86::CX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R10B, X86::R10D,
+ X86::DH, X86::DX,
X86::NoRegister, X86::NoRegister,
- X86::R10W, X86::R10D,
X86::NoRegister, X86::NoRegister,
- X86::DH, X86::DX,
- X86::DIL, X86::RDI,
- X86::DI, X86::RDI,
X86::DL, X86::DX,
X86::NoRegister, X86::NoRegister,
+ X86::EBP, X86::BPL,
X86::NoRegister, X86::NoRegister,
- X86::DH, X86::EDX,
+ X86::BH, X86::BX,
+ X86::BL, X86::BX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::DL, X86::EDX,
+ X86::XMM6, X86::YMM6,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R13, X86::R13W,
+ X86::R13B, X86::R13W,
+ X86::AH, X86::EAX,
+ X86::AL, X86::EAX,
+ X86::AX, X86::EAX,
+ X86::EDX, X86::DX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R13D, X86::R13W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::AH, X86::RAX,
- X86::AL, X86::RAX,
- X86::AX, X86::RAX,
- X86::DX, X86::EDX,
- X86::R14W, X86::R14,
X86::NoRegister, X86::NoRegister,
- X86::R14B, X86::R14,
- X86::AH, X86::EAX,
- X86::AL, X86::EAX,
- X86::AX, X86::EAX,
+ X86::RBP, X86::BP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EDI, X86::RDI,
X86::NoRegister, X86::NoRegister,
- X86::R10B, X86::R10W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::DH, X86::RDX,
- X86::XMM13, X86::YMM13,
- X86::R9B, X86::R9D,
- X86::DL, X86::RDX,
- X86::R9W, X86::R9D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EAX, X86::RAX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
-X86::NoRegister, X86::NoRegister };
- const unsigned SuperregHashTableSize = 512;
-
-
- // Number of hash collisions: 31
- const unsigned AliasesHashTable[] = { X86::DX, X86::RDX,
+ X86::EBX, X86::BX,
X86::NoRegister, X86::NoRegister,
+ X86::XMM7, X86::YMM7,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R14, X86::R14B,
X86::NoRegister, X86::NoRegister,
- X86::R14D, X86::R14B,
- X86::R14W, X86::R14B,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EDX, X86::RDX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R14B, X86::R14,
+ X86::R14D, X86::R14,
X86::BP, X86::EBP,
X86::BPL, X86::EBP,
+ X86::R14W, X86::R14,
+ X86::CX, X86::CH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RBP, X86::BPL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::YMM2, X86::XMM2,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EIP, X86::IP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2437,270 +2316,124 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ECX, X86::CH,
X86::NoRegister, X86::NoRegister,
+ X86::RDX, X86::DX,
X86::NoRegister, X86::NoRegister,
- X86::AX, X86::AH,
+ X86::XMM8, X86::YMM8,
X86::NoRegister, X86::NoRegister,
- X86::R14, X86::R14D,
- X86::R14B, X86::R14D,
X86::NoRegister, X86::NoRegister,
- X86::R14W, X86::R14D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RDX, X86::DX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R14, X86::R14B,
X86::BH, X86::EBX,
X86::BL, X86::EBX,
- X86::EIP, X86::RIP,
- X86::NoRegister, X86::NoRegister,
+ X86::R14W, X86::R14B,
+ X86::R14D, X86::R14B,
X86::BX, X86::EBX,
+ X86::CX, X86::CL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::EAX, X86::AH,
- X86::NoRegister, X86::NoRegister,
- X86::IP, X86::RIP,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::YMM3, X86::XMM3,
- X86::RAX, X86::EAX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::AX, X86::AL,
- X86::NoRegister, X86::NoRegister,
- X86::R14, X86::R14W,
- X86::R14B, X86::R14W,
- X86::R14D, X86::R14W,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::ESI, X86::RSI,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RIP, X86::IP,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::EAX, X86::AL,
- X86::YMM4, X86::XMM4,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RBP, X86::EBP,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::AH, X86::AX,
- X86::AL, X86::AX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R15B, X86::R15,
- X86::R15D, X86::R15,
- X86::R15W, X86::R15,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::ESP, X86::RSP,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RAX, X86::AH,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::EAX, X86::AX,
- X86::NoRegister, X86::NoRegister,
- X86::YMM5, X86::XMM5,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RBX, X86::EBX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::BX, X86::BH,
- X86::R15, X86::R15B,
- X86::NoRegister, X86::NoRegister,
- X86::R15D, X86::R15B,
- X86::R15W, X86::R15B,
- X86::NoRegister, X86::NoRegister,
- X86::SI, X86::RSI,
- X86::SIL, X86::RSI,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::ESI, X86::SI,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RAX, X86::AL,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::EBX, X86::BH,
- X86::YMM6, X86::XMM6,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::BX, X86::BL,
- X86::R15, X86::R15D,
- X86::R15B, X86::R15D,
- X86::NoRegister, X86::NoRegister,
- X86::R15W, X86::R15D,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::SP, X86::RSP,
- X86::SPL, X86::RSP,
- X86::ESI, X86::SIL,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RAX, X86::AX,
+ X86::RBX, X86::BX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBX, X86::BL,
X86::NoRegister, X86::NoRegister,
- X86::YMM7, X86::XMM7,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RAX, X86::EAX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ECX, X86::CL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::XMM9, X86::YMM9,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::BPL, X86::BP,
X86::NoRegister, X86::NoRegister,
- X86::R15, X86::R15W,
- X86::R15B, X86::R15W,
- X86::R15D, X86::R15W,
- X86::RSI, X86::SI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::SIL, X86::SI,
+ X86::R14, X86::R14D,
+ X86::R14B, X86::R14D,
X86::NoRegister, X86::NoRegister,
+ X86::R14W, X86::R14D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ESP, X86::SP,
+ X86::CH, X86::ECX,
+ X86::CL, X86::ECX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RBX, X86::BH,
+ X86::RCX, X86::CH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBP, X86::BP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::CX, X86::ECX,
X86::NoRegister, X86::NoRegister,
- X86::YMM8, X86::XMM8,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RBP, X86::EBP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::XMM10, X86::YMM10,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::BP, X86::BPL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RSI, X86::SIL,
- X86::NoRegister, X86::NoRegister,
+ X86::R14, X86::R14W,
+ X86::R14B, X86::R14W,
X86::R8B, X86::R8,
X86::R8D, X86::R8,
X86::R8W, X86::R8,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ESP, X86::SPL,
- X86::SI, X86::SIL,
X86::NoRegister, X86::NoRegister,
+ X86::R14D, X86::R14W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RBX, X86::BL,
X86::NoRegister, X86::NoRegister,
+ X86::RCX, X86::CL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBP, X86::BPL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::YMM9, X86::XMM9,
X86::NoRegister, X86::NoRegister,
+ X86::DI, X86::EDI,
+ X86::DIL, X86::EDI,
X86::NoRegister, X86::NoRegister,
+ X86::RBX, X86::EBX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM0, X86::YMM0,
X86::NoRegister, X86::NoRegister,
- X86::BH, X86::BX,
- X86::BL, X86::BX,
+ X86::NoRegister, X86::NoRegister,
+ X86::XMM11, X86::YMM11,
+ X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2712,50 +2445,47 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::R8D, X86::R8B,
X86::R8W, X86::R8B,
- X86::SPL, X86::SP,
- X86::NoRegister, X86::NoRegister,
- X86::RSP, X86::SP,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
+ X86::R15B, X86::R15,
+ X86::R15D, X86::R15,
+ X86::R15W, X86::R15,
X86::NoRegister, X86::NoRegister,
- X86::RBP, X86::BP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBX, X86::BX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::DH, X86::EDX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::DL, X86::EDX,
X86::NoRegister, X86::NoRegister,
+ X86::RCX, X86::ECX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM1, X86::YMM1,
X86::NoRegister, X86::NoRegister,
+ X86::YMM0, X86::XMM0,
X86::NoRegister, X86::NoRegister,
+ X86::XMM12, X86::YMM12,
X86::NoRegister, X86::NoRegister,
+ X86::DX, X86::EDX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::CX, X86::CH,
X86::R8, X86::R8D,
X86::R8B, X86::R8D,
- X86::RSP, X86::SPL,
- X86::R8W, X86::R8D,
- X86::NoRegister, X86::NoRegister,
- X86::SP, X86::SPL,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R8W, X86::R8D,
X86::NoRegister, X86::NoRegister,
- X86::RBP, X86::BPL,
+ X86::R15, X86::R15B,
+ X86::R15W, X86::R15B,
+ X86::R15D, X86::R15B,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2769,34 +2499,33 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::AH, X86::RAX,
+ X86::AL, X86::RAX,
+ X86::AX, X86::RAX,
+ X86::RDI, X86::EDI,
X86::NoRegister, X86::NoRegister,
- X86::ECX, X86::CH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM10, X86::YMM10,
+ X86::YMM1, X86::XMM1,
X86::NoRegister, X86::NoRegister,
+ X86::XMM13, X86::YMM13,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::CX, X86::CL,
X86::R8, X86::R8W,
X86::R8B, X86::R8W,
- X86::R10B, X86::R10,
- X86::R10D, X86::R10,
- X86::R10W, X86::R10,
+ X86::R8D, X86::R8W,
+ X86::R15, X86::R15D,
+ X86::R15B, X86::R15D,
X86::NoRegister, X86::NoRegister,
+ X86::R15W, X86::R15D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::CH, X86::ECX,
- X86::CL, X86::ECX,
X86::NoRegister, X86::NoRegister,
- X86::CX, X86::ECX,
X86::NoRegister, X86::NoRegister,
- X86::RBX, X86::BX,
- X86::R8D, X86::R8W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2805,14 +2534,19 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EAX, X86::RAX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::ECX, X86::CL,
X86::NoRegister, X86::NoRegister,
+ X86::YMM2, X86::XMM2,
+ X86::NoRegister, X86::NoRegister,
+ X86::RDX, X86::EDX,
+ X86::BP, X86::RBP,
+ X86::BPL, X86::RBP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM11, X86::YMM11,
X86::NoRegister, X86::NoRegister,
+ X86::XMM14, X86::YMM14,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -2820,180 +2554,52 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R10, X86::R10B,
X86::NoRegister, X86::NoRegister,
- X86::R10D, X86::R10B,
- X86::R10W, X86::R10B,
+ X86::R15, X86::R15W,
+ X86::R15B, X86::R15W,
X86::R9B, X86::R9,
X86::R9D, X86::R9,
X86::R9W, X86::R9,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::DI, X86::EDI,
- X86::DIL, X86::EDI,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RCX, X86::CH,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::XMM12, X86::YMM12,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::CH, X86::CX,
- X86::CL, X86::CX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R10, X86::R10D,
- X86::R10B, X86::R10D,
- X86::NoRegister, X86::NoRegister,
- X86::R10W, X86::R10D,
- X86::NoRegister, X86::NoRegister,
- X86::R9, X86::R9B,
- X86::R9W, X86::R9B,
- X86::R9D, X86::R9B,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::DH, X86::EDX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::DL, X86::EDX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RCX, X86::CL,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::AH, X86::RAX,
- X86::AL, X86::RAX,
- X86::AX, X86::RAX,
- X86::DX, X86::EDX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::ECX, X86::CX,
- X86::NoRegister, X86::NoRegister,
- X86::RCX, X86::ECX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::XMM13, X86::YMM13,
- X86::R10, X86::R10W,
- X86::R10B, X86::R10W,
- X86::R10D, X86::R10W,
- X86::R9, X86::R9D,
- X86::R9B, X86::R9D,
- X86::NoRegister, X86::NoRegister,
- X86::R9W, X86::R9D,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::DX, X86::DH,
- X86::NoRegister, X86::NoRegister,
- X86::EAX, X86::RAX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::EDX, X86::DH,
- X86::NoRegister, X86::NoRegister,
- X86::BP, X86::RBP,
- X86::BPL, X86::RBP,
- X86::NoRegister, X86::NoRegister,
- X86::XMM14, X86::YMM14,
- X86::NoRegister, X86::NoRegister,
- X86::RDI, X86::EDI,
- X86::NoRegister, X86::NoRegister,
+ X86::R15D, X86::R15W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::DIL, X86::DI,
X86::NoRegister, X86::NoRegister,
- X86::R9, X86::R9W,
- X86::R9B, X86::R9W,
- X86::R11B, X86::R11,
- X86::R11D, X86::R11,
- X86::R11W, X86::R11,
+ X86::IP, X86::EIP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R9D, X86::R9W,
- X86::NoRegister, X86::NoRegister,
X86::EBP, X86::RBP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::RCX, X86::CX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::EDI, X86::DI,
X86::BH, X86::RBX,
X86::BL, X86::RBX,
- X86::NoRegister, X86::NoRegister,
+ X86::YMM3, X86::XMM3,
X86::NoRegister, X86::NoRegister,
X86::BX, X86::RBX,
X86::NoRegister, X86::NoRegister,
- X86::XMM15, X86::YMM15,
- X86::NoRegister, X86::NoRegister,
- X86::RDX, X86::EDX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::DI, X86::DIL,
- X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R11, X86::R11B,
- X86::NoRegister, X86::NoRegister,
- X86::R11D, X86::R11B,
- X86::R11W, X86::R11B,
- X86::NoRegister, X86::NoRegister,
+ X86::XMM15, X86::YMM15,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EBX, X86::RBX,
- X86::NoRegister, X86::NoRegister,
- X86::IP, X86::EIP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R9, X86::R9B,
X86::NoRegister, X86::NoRegister,
+ X86::R9D, X86::R9B,
+ X86::R9W, X86::R9B,
X86::NoRegister, X86::NoRegister,
- X86::RDX, X86::DH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EDI, X86::DIL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3001,37 +2607,39 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM2, X86::YMM2,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EBX, X86::RBX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::YMM4, X86::XMM4,
+ X86::RIP, X86::EIP,
X86::NoRegister, X86::NoRegister,
- X86::R11, X86::R11D,
- X86::R11B, X86::R11D,
X86::NoRegister, X86::NoRegister,
- X86::R11W, X86::R11D,
+ X86::CH, X86::RCX,
+ X86::CL, X86::RCX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::DX, X86::DL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::CX, X86::RCX,
+ X86::R9B, X86::R9D,
+ X86::R9, X86::R9D,
+ X86::R9W, X86::R9D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::RDI, X86::DI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::EDX, X86::DL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3039,16 +2647,13 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM3, X86::YMM3,
- X86::RIP, X86::EIP,
X86::NoRegister, X86::NoRegister,
+ X86::ECX, X86::RCX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::YMM5, X86::XMM5,
X86::NoRegister, X86::NoRegister,
- X86::R11, X86::R11W,
- X86::R11B, X86::R11W,
- X86::R11D, X86::R11W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3062,8 +2667,12 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R9, X86::R9W,
+ X86::R9B, X86::R9W,
+ X86::DI, X86::RDI,
+ X86::DIL, X86::RDI,
+ X86::R9D, X86::R9W,
X86::NoRegister, X86::NoRegister,
- X86::RDI, X86::DIL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3077,10 +2686,11 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM4, X86::YMM4,
+ X86::EDI, X86::RDI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::YMM6, X86::XMM6,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3088,9 +2698,6 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R12B, X86::R12,
- X86::R12D, X86::R12,
- X86::R12W, X86::R12,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3098,36 +2705,38 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::DH, X86::RDX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::DL, X86::RDX,
+ X86::R10B, X86::R10,
+ X86::R10D, X86::R10,
+ X86::R10W, X86::R10,
X86::NoRegister, X86::NoRegister,
- X86::RDX, X86::DL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::DX, X86::RDX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EDX, X86::RDX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::RSI, X86::ESI,
X86::NoRegister, X86::NoRegister,
+ X86::CH, X86::CX,
+ X86::CL, X86::CX,
X86::SI, X86::ESI,
X86::SIL, X86::ESI,
+ X86::YMM7, X86::XMM7,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM5, X86::YMM5,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R12, X86::R12B,
- X86::NoRegister, X86::NoRegister,
- X86::R12D, X86::R12B,
- X86::R12W, X86::R12B,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3136,7 +2745,10 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R10, X86::R10B,
X86::NoRegister, X86::NoRegister,
+ X86::R10D, X86::R10B,
+ X86::R10W, X86::R10B,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3145,34 +2757,35 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::YMM0, X86::XMM0,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ECX, X86::CX,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EIP, X86::RIP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::RSP, X86::ESP,
X86::NoRegister, X86::NoRegister,
- X86::XMM6, X86::YMM6,
+ X86::YMM8, X86::XMM8,
X86::SP, X86::ESP,
X86::SPL, X86::ESP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R12, X86::R12D,
- X86::R12B, X86::R12D,
- X86::NoRegister, X86::NoRegister,
- X86::R12W, X86::R12D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::IP, X86::RIP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R10, X86::R10D,
+ X86::R10B, X86::R10D,
X86::NoRegister, X86::NoRegister,
+ X86::R10W, X86::R10D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3180,27 +2793,25 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::DX, X86::DH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::YMM1, X86::XMM1,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EDX, X86::DH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::YMM9, X86::XMM9,
X86::NoRegister, X86::NoRegister,
- X86::XMM7, X86::YMM7,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R12, X86::R12W,
- X86::R12B, X86::R12W,
- X86::R12D, X86::R12W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3208,6 +2819,11 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::DIL, X86::DI,
+ X86::R10B, X86::R10W,
+ X86::R10, X86::R10W,
+ X86::RCX, X86::CX,
+ X86::R10D, X86::R10W,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3219,17 +2835,18 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EDI, X86::DI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::YMM10, X86::XMM10,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ESI, X86::RSI,
X86::NoRegister, X86::NoRegister,
+ X86::YMM10, X86::XMM10,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM8, X86::YMM8,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3238,26 +2855,30 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::DI, X86::DIL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R13B, X86::R13,
- X86::R13D, X86::R13,
- X86::R13W, X86::R13,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R11B, X86::R11,
+ X86::R11D, X86::R11,
+ X86::R11W, X86::R11,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::RDX, X86::DH,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EDI, X86::DIL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ESP, X86::RSP,
X86::NoRegister, X86::NoRegister,
X86::YMM11, X86::XMM11,
X86::NoRegister, X86::NoRegister,
@@ -3267,33 +2888,33 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::XMM9, X86::YMM9,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R13, X86::R13B,
X86::NoRegister, X86::NoRegister,
- X86::R13D, X86::R13B,
- X86::R13W, X86::R13B,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::R11, X86::R11B,
X86::NoRegister, X86::NoRegister,
+ X86::R11D, X86::R11B,
+ X86::R11W, X86::R11B,
X86::NoRegister, X86::NoRegister,
+ X86::SI, X86::RSI,
+ X86::RDI, X86::DI,
+ X86::DX, X86::DL,
+ X86::SIL, X86::RSI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::EDX, X86::DL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ESI, X86::SI,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3313,15 +2934,15 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::R13, X86::R13D,
- X86::R13B, X86::R13D,
- X86::NoRegister, X86::NoRegister,
- X86::R13W, X86::R13D,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
+ X86::R11, X86::R11D,
+ X86::R11B, X86::R11D,
+ X86::RDI, X86::DIL,
+ X86::R11W, X86::R11D,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::SP, X86::RSP,
+ X86::SPL, X86::RSP,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
@@ -3330,12 +2951,10 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
+ X86::ESI, X86::SIL,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::CH, X86::RCX,
- X86::CL, X86::RCX,
X86::NoRegister, X86::NoRegister,
- X86::CX, X86::RCX,
X86::NoRegister, X86::NoRegister,
X86::YMM13, X86::XMM13,
X86::NoRegister, X86::NoRegister,
@@ -3348,80 +2967,6 @@ X86::NoRegister, X86::NoRegister };
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R13, X86::R13W,
- X86::R13B, X86::R13W,
- X86::R13D, X86::R13W,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::ECX, X86::RCX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::YMM14, X86::XMM14,
- X86::DH, X86::DX,
- X86::DIL, X86::RDI,
- X86::DI, X86::RDI,
- X86::DL, X86::DX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::R14B, X86::R14,
- X86::R14D, X86::R14,
- X86::R14W, X86::R14,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::AH, X86::EAX,
- X86::AL, X86::EAX,
- X86::AX, X86::EAX,
- X86::NoRegister, X86::NoRegister,
- X86::EDX, X86::DX,
- X86::NoRegister, X86::NoRegister,
- X86::EDI, X86::RDI,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::DH, X86::RDX,
- X86::YMM15, X86::XMM15,
- X86::NoRegister, X86::NoRegister,
- X86::DL, X86::RDX,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
- X86::NoRegister, X86::NoRegister,
X86::NoRegister, X86::NoRegister };
const unsigned AliasesHashTableSize = 1024;
@@ -3438,59 +2983,30 @@ X86::NoRegister, X86::NoRegister };
const unsigned BX_AliasSet[] = { X86::BL, X86::BH, X86::EBX, X86::RBX, 0 };
const unsigned CH_AliasSet[] = { X86::CX, X86::ECX, X86::RCX, 0 };
const unsigned CL_AliasSet[] = { X86::CX, X86::ECX, X86::RCX, 0 };
- const unsigned CS_AliasSet[] = { 0 };
const unsigned CX_AliasSet[] = { X86::CL, X86::CH, X86::ECX, X86::RCX, 0 };
const unsigned DH_AliasSet[] = { X86::DX, X86::EDX, X86::RDX, 0 };
const unsigned DI_AliasSet[] = { X86::DIL, X86::EDI, X86::RDI, 0 };
const unsigned DIL_AliasSet[] = { X86::DI, X86::EDI, X86::RDI, 0 };
const unsigned DL_AliasSet[] = { X86::DX, X86::EDX, X86::RDX, 0 };
- const unsigned DR0_AliasSet[] = { 0 };
- const unsigned DR1_AliasSet[] = { 0 };
- const unsigned DR2_AliasSet[] = { 0 };
- const unsigned DR3_AliasSet[] = { 0 };
- const unsigned DR4_AliasSet[] = { 0 };
- const unsigned DR5_AliasSet[] = { 0 };
- const unsigned DR6_AliasSet[] = { 0 };
- const unsigned DR7_AliasSet[] = { 0 };
- const unsigned DS_AliasSet[] = { 0 };
const unsigned DX_AliasSet[] = { X86::DL, X86::DH, X86::EDX, X86::RDX, 0 };
const unsigned EAX_AliasSet[] = { X86::AL, X86::AH, X86::AX, X86::RAX, 0 };
const unsigned EBP_AliasSet[] = { X86::BPL, X86::BP, X86::RBP, 0 };
const unsigned EBX_AliasSet[] = { X86::BL, X86::BH, X86::BX, X86::RBX, 0 };
- const unsigned ECR0_AliasSet[] = { 0 };
- const unsigned ECR1_AliasSet[] = { 0 };
- const unsigned ECR2_AliasSet[] = { 0 };
- const unsigned ECR3_AliasSet[] = { 0 };
- const unsigned ECR4_AliasSet[] = { 0 };
- const unsigned ECR5_AliasSet[] = { 0 };
- const unsigned ECR6_AliasSet[] = { 0 };
- const unsigned ECR7_AliasSet[] = { 0 };
const unsigned ECX_AliasSet[] = { X86::CL, X86::CH, X86::CX, X86::RCX, 0 };
const unsigned EDI_AliasSet[] = { X86::DIL, X86::DI, X86::RDI, 0 };
const unsigned EDX_AliasSet[] = { X86::DL, X86::DH, X86::DX, X86::RDX, 0 };
- const unsigned EFLAGS_AliasSet[] = { 0 };
const unsigned EIP_AliasSet[] = { X86::IP, X86::RIP, 0 };
- const unsigned ES_AliasSet[] = { 0 };
const unsigned ESI_AliasSet[] = { X86::SIL, X86::SI, X86::RSI, 0 };
const unsigned ESP_AliasSet[] = { X86::SPL, X86::SP, X86::RSP, 0 };
- const unsigned FP0_AliasSet[] = { 0 };
- const unsigned FP1_AliasSet[] = { 0 };
- const unsigned FP2_AliasSet[] = { 0 };
- const unsigned FP3_AliasSet[] = { 0 };
- const unsigned FP4_AliasSet[] = { 0 };
- const unsigned FP5_AliasSet[] = { 0 };
- const unsigned FP6_AliasSet[] = { 0 };
- const unsigned FS_AliasSet[] = { 0 };
- const unsigned GS_AliasSet[] = { 0 };
const unsigned IP_AliasSet[] = { X86::EIP, X86::RIP, 0 };
- const unsigned MM0_AliasSet[] = { 0 };
- const unsigned MM1_AliasSet[] = { 0 };
- const unsigned MM2_AliasSet[] = { 0 };
- const unsigned MM3_AliasSet[] = { 0 };
- const unsigned MM4_AliasSet[] = { 0 };
- const unsigned MM5_AliasSet[] = { 0 };
- const unsigned MM6_AliasSet[] = { 0 };
- const unsigned MM7_AliasSet[] = { 0 };
+ const unsigned R8_AliasSet[] = { X86::R8B, X86::R8W, X86::R8D, 0 };
+ const unsigned R8B_AliasSet[] = { X86::R8W, X86::R8D, X86::R8, 0 };
+ const unsigned R8D_AliasSet[] = { X86::R8B, X86::R8W, X86::R8, 0 };
+ const unsigned R8W_AliasSet[] = { X86::R8B, X86::R8D, X86::R8, 0 };
+ const unsigned R9_AliasSet[] = { X86::R9B, X86::R9W, X86::R9D, 0 };
+ const unsigned R9B_AliasSet[] = { X86::R9W, X86::R9D, X86::R9, 0 };
+ const unsigned R9D_AliasSet[] = { X86::R9B, X86::R9W, X86::R9, 0 };
+ const unsigned R9W_AliasSet[] = { X86::R9B, X86::R9D, X86::R9, 0 };
const unsigned R10_AliasSet[] = { X86::R10B, X86::R10W, X86::R10D, 0 };
const unsigned R10B_AliasSet[] = { X86::R10W, X86::R10D, X86::R10, 0 };
const unsigned R10D_AliasSet[] = { X86::R10B, X86::R10W, X86::R10, 0 };
@@ -3515,26 +3031,9 @@ X86::NoRegister, X86::NoRegister };
const unsigned R15B_AliasSet[] = { X86::R15W, X86::R15D, X86::R15, 0 };
const unsigned R15D_AliasSet[] = { X86::R15B, X86::R15W, X86::R15, 0 };
const unsigned R15W_AliasSet[] = { X86::R15B, X86::R15D, X86::R15, 0 };
- const unsigned R8_AliasSet[] = { X86::R8B, X86::R8W, X86::R8D, 0 };
- const unsigned R8B_AliasSet[] = { X86::R8W, X86::R8D, X86::R8, 0 };
- const unsigned R8D_AliasSet[] = { X86::R8B, X86::R8W, X86::R8, 0 };
- const unsigned R8W_AliasSet[] = { X86::R8B, X86::R8D, X86::R8, 0 };
- const unsigned R9_AliasSet[] = { X86::R9B, X86::R9W, X86::R9D, 0 };
- const unsigned R9B_AliasSet[] = { X86::R9W, X86::R9D, X86::R9, 0 };
- const unsigned R9D_AliasSet[] = { X86::R9B, X86::R9W, X86::R9, 0 };
- const unsigned R9W_AliasSet[] = { X86::R9B, X86::R9D, X86::R9, 0 };
const unsigned RAX_AliasSet[] = { X86::AL, X86::AH, X86::AX, X86::EAX, 0 };
const unsigned RBP_AliasSet[] = { X86::BPL, X86::BP, X86::EBP, 0 };
const unsigned RBX_AliasSet[] = { X86::BL, X86::BH, X86::BX, X86::EBX, 0 };
- const unsigned RCR0_AliasSet[] = { 0 };
- const unsigned RCR1_AliasSet[] = { 0 };
- const unsigned RCR2_AliasSet[] = { 0 };
- const unsigned RCR3_AliasSet[] = { 0 };
- const unsigned RCR4_AliasSet[] = { 0 };
- const unsigned RCR5_AliasSet[] = { 0 };
- const unsigned RCR6_AliasSet[] = { 0 };
- const unsigned RCR7_AliasSet[] = { 0 };
- const unsigned RCR8_AliasSet[] = { 0 };
const unsigned RCX_AliasSet[] = { X86::CL, X86::CH, X86::CX, X86::ECX, 0 };
const unsigned RDI_AliasSet[] = { X86::DIL, X86::DI, X86::EDI, 0 };
const unsigned RDX_AliasSet[] = { X86::DL, X86::DH, X86::DX, X86::EDX, 0 };
@@ -3545,23 +3044,8 @@ X86::NoRegister, X86::NoRegister };
const unsigned SIL_AliasSet[] = { X86::SI, X86::ESI, X86::RSI, 0 };
const unsigned SP_AliasSet[] = { X86::SPL, X86::ESP, X86::RSP, 0 };
const unsigned SPL_AliasSet[] = { X86::SP, X86::ESP, X86::RSP, 0 };
- const unsigned SS_AliasSet[] = { 0 };
- const unsigned ST0_AliasSet[] = { 0 };
- const unsigned ST1_AliasSet[] = { 0 };
- const unsigned ST2_AliasSet[] = { 0 };
- const unsigned ST3_AliasSet[] = { 0 };
- const unsigned ST4_AliasSet[] = { 0 };
- const unsigned ST5_AliasSet[] = { 0 };
- const unsigned ST6_AliasSet[] = { 0 };
- const unsigned ST7_AliasSet[] = { 0 };
const unsigned XMM0_AliasSet[] = { X86::YMM0, 0 };
const unsigned XMM1_AliasSet[] = { X86::YMM1, 0 };
- const unsigned XMM10_AliasSet[] = { X86::YMM10, 0 };
- const unsigned XMM11_AliasSet[] = { X86::YMM11, 0 };
- const unsigned XMM12_AliasSet[] = { X86::YMM12, 0 };
- const unsigned XMM13_AliasSet[] = { X86::YMM13, 0 };
- const unsigned XMM14_AliasSet[] = { X86::YMM14, 0 };
- const unsigned XMM15_AliasSet[] = { X86::YMM15, 0 };
const unsigned XMM2_AliasSet[] = { X86::YMM2, 0 };
const unsigned XMM3_AliasSet[] = { X86::YMM3, 0 };
const unsigned XMM4_AliasSet[] = { X86::YMM4, 0 };
@@ -3570,14 +3054,14 @@ X86::NoRegister, X86::NoRegister };
const unsigned XMM7_AliasSet[] = { X86::YMM7, 0 };
const unsigned XMM8_AliasSet[] = { X86::YMM8, 0 };
const unsigned XMM9_AliasSet[] = { X86::YMM9, 0 };
+ const unsigned XMM10_AliasSet[] = { X86::YMM10, 0 };
+ const unsigned XMM11_AliasSet[] = { X86::YMM11, 0 };
+ const unsigned XMM12_AliasSet[] = { X86::YMM12, 0 };
+ const unsigned XMM13_AliasSet[] = { X86::YMM13, 0 };
+ const unsigned XMM14_AliasSet[] = { X86::YMM14, 0 };
+ const unsigned XMM15_AliasSet[] = { X86::YMM15, 0 };
const unsigned YMM0_AliasSet[] = { X86::XMM0, 0 };
const unsigned YMM1_AliasSet[] = { X86::XMM1, 0 };
- const unsigned YMM10_AliasSet[] = { X86::XMM10, 0 };
- const unsigned YMM11_AliasSet[] = { X86::XMM11, 0 };
- const unsigned YMM12_AliasSet[] = { X86::XMM12, 0 };
- const unsigned YMM13_AliasSet[] = { X86::XMM13, 0 };
- const unsigned YMM14_AliasSet[] = { X86::XMM14, 0 };
- const unsigned YMM15_AliasSet[] = { X86::XMM15, 0 };
const unsigned YMM2_AliasSet[] = { X86::XMM2, 0 };
const unsigned YMM3_AliasSet[] = { X86::XMM3, 0 };
const unsigned YMM4_AliasSet[] = { X86::XMM4, 0 };
@@ -3586,117 +3070,58 @@ X86::NoRegister, X86::NoRegister };
const unsigned YMM7_AliasSet[] = { X86::XMM7, 0 };
const unsigned YMM8_AliasSet[] = { X86::XMM8, 0 };
const unsigned YMM9_AliasSet[] = { X86::XMM9, 0 };
+ const unsigned YMM10_AliasSet[] = { X86::XMM10, 0 };
+ const unsigned YMM11_AliasSet[] = { X86::XMM11, 0 };
+ const unsigned YMM12_AliasSet[] = { X86::XMM12, 0 };
+ const unsigned YMM13_AliasSet[] = { X86::XMM13, 0 };
+ const unsigned YMM14_AliasSet[] = { X86::XMM14, 0 };
+ const unsigned YMM15_AliasSet[] = { X86::XMM15, 0 };
// Register Sub-registers Sets...
const unsigned Empty_SubRegsSet[] = { 0 };
- const unsigned AH_SubRegsSet[] = { 0 };
- const unsigned AL_SubRegsSet[] = { 0 };
const unsigned AX_SubRegsSet[] = { X86::AL, X86::AH, 0 };
- const unsigned BH_SubRegsSet[] = { 0 };
- const unsigned BL_SubRegsSet[] = { 0 };
const unsigned BP_SubRegsSet[] = { X86::BPL, 0 };
- const unsigned BPL_SubRegsSet[] = { 0 };
const unsigned BX_SubRegsSet[] = { X86::BL, X86::BH, 0 };
- const unsigned CH_SubRegsSet[] = { 0 };
- const unsigned CL_SubRegsSet[] = { 0 };
- const unsigned CS_SubRegsSet[] = { 0 };
const unsigned CX_SubRegsSet[] = { X86::CL, X86::CH, 0 };
- const unsigned DH_SubRegsSet[] = { 0 };
const unsigned DI_SubRegsSet[] = { X86::DIL, 0 };
- const unsigned DIL_SubRegsSet[] = { 0 };
- const unsigned DL_SubRegsSet[] = { 0 };
- const unsigned DR0_SubRegsSet[] = { 0 };
- const unsigned DR1_SubRegsSet[] = { 0 };
- const unsigned DR2_SubRegsSet[] = { 0 };
- const unsigned DR3_SubRegsSet[] = { 0 };
- const unsigned DR4_SubRegsSet[] = { 0 };
- const unsigned DR5_SubRegsSet[] = { 0 };
- const unsigned DR6_SubRegsSet[] = { 0 };
- const unsigned DR7_SubRegsSet[] = { 0 };
- const unsigned DS_SubRegsSet[] = { 0 };
const unsigned DX_SubRegsSet[] = { X86::DL, X86::DH, 0 };
const unsigned EAX_SubRegsSet[] = { X86::AX, X86::AL, X86::AH, 0 };
const unsigned EBP_SubRegsSet[] = { X86::BP, X86::BPL, 0 };
const unsigned EBX_SubRegsSet[] = { X86::BX, X86::BL, X86::BH, 0 };
- const unsigned ECR0_SubRegsSet[] = { 0 };
- const unsigned ECR1_SubRegsSet[] = { 0 };
- const unsigned ECR2_SubRegsSet[] = { 0 };
- const unsigned ECR3_SubRegsSet[] = { 0 };
- const unsigned ECR4_SubRegsSet[] = { 0 };
- const unsigned ECR5_SubRegsSet[] = { 0 };
- const unsigned ECR6_SubRegsSet[] = { 0 };
- const unsigned ECR7_SubRegsSet[] = { 0 };
const unsigned ECX_SubRegsSet[] = { X86::CX, X86::CL, X86::CH, 0 };
const unsigned EDI_SubRegsSet[] = { X86::DI, X86::DIL, 0 };
const unsigned EDX_SubRegsSet[] = { X86::DX, X86::DL, X86::DH, 0 };
- const unsigned EFLAGS_SubRegsSet[] = { 0 };
const unsigned EIP_SubRegsSet[] = { X86::IP, 0 };
- const unsigned ES_SubRegsSet[] = { 0 };
const unsigned ESI_SubRegsSet[] = { X86::SI, X86::SIL, 0 };
const unsigned ESP_SubRegsSet[] = { X86::SP, X86::SPL, 0 };
- const unsigned FP0_SubRegsSet[] = { 0 };
- const unsigned FP1_SubRegsSet[] = { 0 };
- const unsigned FP2_SubRegsSet[] = { 0 };
- const unsigned FP3_SubRegsSet[] = { 0 };
- const unsigned FP4_SubRegsSet[] = { 0 };
- const unsigned FP5_SubRegsSet[] = { 0 };
- const unsigned FP6_SubRegsSet[] = { 0 };
- const unsigned FS_SubRegsSet[] = { 0 };
- const unsigned GS_SubRegsSet[] = { 0 };
- const unsigned IP_SubRegsSet[] = { 0 };
- const unsigned MM0_SubRegsSet[] = { 0 };
- const unsigned MM1_SubRegsSet[] = { 0 };
- const unsigned MM2_SubRegsSet[] = { 0 };
- const unsigned MM3_SubRegsSet[] = { 0 };
- const unsigned MM4_SubRegsSet[] = { 0 };
- const unsigned MM5_SubRegsSet[] = { 0 };
- const unsigned MM6_SubRegsSet[] = { 0 };
- const unsigned MM7_SubRegsSet[] = { 0 };
+ const unsigned R8_SubRegsSet[] = { X86::R8D, X86::R8W, X86::R8B, 0 };
+ const unsigned R8D_SubRegsSet[] = { X86::R8W, X86::R8B, 0 };
+ const unsigned R8W_SubRegsSet[] = { X86::R8B, 0 };
+ const unsigned R9_SubRegsSet[] = { X86::R9D, X86::R9W, X86::R9B, 0 };
+ const unsigned R9D_SubRegsSet[] = { X86::R9W, X86::R9B, 0 };
+ const unsigned R9W_SubRegsSet[] = { X86::R9B, 0 };
const unsigned R10_SubRegsSet[] = { X86::R10D, X86::R10W, X86::R10B, 0 };
- const unsigned R10B_SubRegsSet[] = { 0 };
const unsigned R10D_SubRegsSet[] = { X86::R10W, X86::R10B, 0 };
const unsigned R10W_SubRegsSet[] = { X86::R10B, 0 };
const unsigned R11_SubRegsSet[] = { X86::R11D, X86::R11W, X86::R11B, 0 };
- const unsigned R11B_SubRegsSet[] = { 0 };
const unsigned R11D_SubRegsSet[] = { X86::R11W, X86::R11B, 0 };
const unsigned R11W_SubRegsSet[] = { X86::R11B, 0 };
const unsigned R12_SubRegsSet[] = { X86::R12D, X86::R12W, X86::R12B, 0 };
- const unsigned R12B_SubRegsSet[] = { 0 };
const unsigned R12D_SubRegsSet[] = { X86::R12W, X86::R12B, 0 };
const unsigned R12W_SubRegsSet[] = { X86::R12B, 0 };
const unsigned R13_SubRegsSet[] = { X86::R13D, X86::R13W, X86::R13B, 0 };
- const unsigned R13B_SubRegsSet[] = { 0 };
const unsigned R13D_SubRegsSet[] = { X86::R13W, X86::R13B, 0 };
const unsigned R13W_SubRegsSet[] = { X86::R13B, 0 };
const unsigned R14_SubRegsSet[] = { X86::R14D, X86::R14W, X86::R14B, 0 };
- const unsigned R14B_SubRegsSet[] = { 0 };
const unsigned R14D_SubRegsSet[] = { X86::R14W, X86::R14B, 0 };
const unsigned R14W_SubRegsSet[] = { X86::R14B, 0 };
const unsigned R15_SubRegsSet[] = { X86::R15D, X86::R15W, X86::R15B, 0 };
- const unsigned R15B_SubRegsSet[] = { 0 };
const unsigned R15D_SubRegsSet[] = { X86::R15W, X86::R15B, 0 };
const unsigned R15W_SubRegsSet[] = { X86::R15B, 0 };
- const unsigned R8_SubRegsSet[] = { X86::R8D, X86::R8W, X86::R8B, 0 };
- const unsigned R8B_SubRegsSet[] = { 0 };
- const unsigned R8D_SubRegsSet[] = { X86::R8W, X86::R8B, 0 };
- const unsigned R8W_SubRegsSet[] = { X86::R8B, 0 };
- const unsigned R9_SubRegsSet[] = { X86::R9D, X86::R9W, X86::R9B, 0 };
- const unsigned R9B_SubRegsSet[] = { 0 };
- const unsigned R9D_SubRegsSet[] = { X86::R9W, X86::R9B, 0 };
- const unsigned R9W_SubRegsSet[] = { X86::R9B, 0 };
const unsigned RAX_SubRegsSet[] = { X86::EAX, X86::AX, X86::AL, X86::AH, 0 };
const unsigned RBP_SubRegsSet[] = { X86::EBP, X86::BP, X86::BPL, 0 };
const unsigned RBX_SubRegsSet[] = { X86::EBX, X86::BX, X86::BL, X86::BH, 0 };
- const unsigned RCR0_SubRegsSet[] = { 0 };
- const unsigned RCR1_SubRegsSet[] = { 0 };
- const unsigned RCR2_SubRegsSet[] = { 0 };
- const unsigned RCR3_SubRegsSet[] = { 0 };
- const unsigned RCR4_SubRegsSet[] = { 0 };
- const unsigned RCR5_SubRegsSet[] = { 0 };
- const unsigned RCR6_SubRegsSet[] = { 0 };
- const unsigned RCR7_SubRegsSet[] = { 0 };
- const unsigned RCR8_SubRegsSet[] = { 0 };
const unsigned RCX_SubRegsSet[] = { X86::ECX, X86::CX, X86::CL, X86::CH, 0 };
const unsigned RDI_SubRegsSet[] = { X86::EDI, X86::DI, X86::DIL, 0 };
const unsigned RDX_SubRegsSet[] = { X86::EDX, X86::DX, X86::DL, X86::DH, 0 };
@@ -3704,42 +3129,9 @@ X86::NoRegister, X86::NoRegister };
const unsigned RSI_SubRegsSet[] = { X86::ESI, X86::SI, X86::SIL, 0 };
const unsigned RSP_SubRegsSet[] = { X86::ESP, X86::SP, X86::SPL, 0 };
const unsigned SI_SubRegsSet[] = { X86::SIL, 0 };
- const unsigned SIL_SubRegsSet[] = { 0 };
const unsigned SP_SubRegsSet[] = { X86::SPL, 0 };
- const unsigned SPL_SubRegsSet[] = { 0 };
- const unsigned SS_SubRegsSet[] = { 0 };
- const unsigned ST0_SubRegsSet[] = { 0 };
- const unsigned ST1_SubRegsSet[] = { 0 };
- const unsigned ST2_SubRegsSet[] = { 0 };
- const unsigned ST3_SubRegsSet[] = { 0 };
- const unsigned ST4_SubRegsSet[] = { 0 };
- const unsigned ST5_SubRegsSet[] = { 0 };
- const unsigned ST6_SubRegsSet[] = { 0 };
- const unsigned ST7_SubRegsSet[] = { 0 };
- const unsigned XMM0_SubRegsSet[] = { 0 };
- const unsigned XMM1_SubRegsSet[] = { 0 };
- const unsigned XMM10_SubRegsSet[] = { 0 };
- const unsigned XMM11_SubRegsSet[] = { 0 };
- const unsigned XMM12_SubRegsSet[] = { 0 };
- const unsigned XMM13_SubRegsSet[] = { 0 };
- const unsigned XMM14_SubRegsSet[] = { 0 };
- const unsigned XMM15_SubRegsSet[] = { 0 };
- const unsigned XMM2_SubRegsSet[] = { 0 };
- const unsigned XMM3_SubRegsSet[] = { 0 };
- const unsigned XMM4_SubRegsSet[] = { 0 };
- const unsigned XMM5_SubRegsSet[] = { 0 };
- const unsigned XMM6_SubRegsSet[] = { 0 };
- const unsigned XMM7_SubRegsSet[] = { 0 };
- const unsigned XMM8_SubRegsSet[] = { 0 };
- const unsigned XMM9_SubRegsSet[] = { 0 };
const unsigned YMM0_SubRegsSet[] = { X86::XMM0, 0 };
const unsigned YMM1_SubRegsSet[] = { X86::XMM1, 0 };
- const unsigned YMM10_SubRegsSet[] = { X86::XMM10, 0 };
- const unsigned YMM11_SubRegsSet[] = { X86::XMM11, 0 };
- const unsigned YMM12_SubRegsSet[] = { X86::XMM12, 0 };
- const unsigned YMM13_SubRegsSet[] = { X86::XMM13, 0 };
- const unsigned YMM14_SubRegsSet[] = { X86::XMM14, 0 };
- const unsigned YMM15_SubRegsSet[] = { X86::XMM15, 0 };
const unsigned YMM2_SubRegsSet[] = { X86::XMM2, 0 };
const unsigned YMM3_SubRegsSet[] = { X86::XMM3, 0 };
const unsigned YMM4_SubRegsSet[] = { X86::XMM4, 0 };
@@ -3748,6 +3140,12 @@ X86::NoRegister, X86::NoRegister };
const unsigned YMM7_SubRegsSet[] = { X86::XMM7, 0 };
const unsigned YMM8_SubRegsSet[] = { X86::XMM8, 0 };
const unsigned YMM9_SubRegsSet[] = { X86::XMM9, 0 };
+ const unsigned YMM10_SubRegsSet[] = { X86::XMM10, 0 };
+ const unsigned YMM11_SubRegsSet[] = { X86::XMM11, 0 };
+ const unsigned YMM12_SubRegsSet[] = { X86::XMM12, 0 };
+ const unsigned YMM13_SubRegsSet[] = { X86::XMM13, 0 };
+ const unsigned YMM14_SubRegsSet[] = { X86::XMM14, 0 };
+ const unsigned YMM15_SubRegsSet[] = { X86::XMM15, 0 };
// Register Super-registers Sets...
@@ -3762,130 +3160,52 @@ X86::NoRegister, X86::NoRegister };
const unsigned BX_SuperRegsSet[] = { X86::RBX, X86::EBX, 0 };
const unsigned CH_SuperRegsSet[] = { X86::RCX, X86::ECX, X86::CX, 0 };
const unsigned CL_SuperRegsSet[] = { X86::RCX, X86::ECX, X86::CX, 0 };
- const unsigned CS_SuperRegsSet[] = { 0 };
const unsigned CX_SuperRegsSet[] = { X86::RCX, X86::ECX, 0 };
const unsigned DH_SuperRegsSet[] = { X86::RDX, X86::EDX, X86::DX, 0 };
const unsigned DI_SuperRegsSet[] = { X86::RDI, X86::EDI, 0 };
const unsigned DIL_SuperRegsSet[] = { X86::RDI, X86::EDI, X86::DI, 0 };
const unsigned DL_SuperRegsSet[] = { X86::RDX, X86::EDX, X86::DX, 0 };
- const unsigned DR0_SuperRegsSet[] = { 0 };
- const unsigned DR1_SuperRegsSet[] = { 0 };
- const unsigned DR2_SuperRegsSet[] = { 0 };
- const unsigned DR3_SuperRegsSet[] = { 0 };
- const unsigned DR4_SuperRegsSet[] = { 0 };
- const unsigned DR5_SuperRegsSet[] = { 0 };
- const unsigned DR6_SuperRegsSet[] = { 0 };
- const unsigned DR7_SuperRegsSet[] = { 0 };
- const unsigned DS_SuperRegsSet[] = { 0 };
const unsigned DX_SuperRegsSet[] = { X86::RDX, X86::EDX, 0 };
const unsigned EAX_SuperRegsSet[] = { X86::RAX, 0 };
const unsigned EBP_SuperRegsSet[] = { X86::RBP, 0 };
const unsigned EBX_SuperRegsSet[] = { X86::RBX, 0 };
- const unsigned ECR0_SuperRegsSet[] = { 0 };
- const unsigned ECR1_SuperRegsSet[] = { 0 };
- const unsigned ECR2_SuperRegsSet[] = { 0 };
- const unsigned ECR3_SuperRegsSet[] = { 0 };
- const unsigned ECR4_SuperRegsSet[] = { 0 };
- const unsigned ECR5_SuperRegsSet[] = { 0 };
- const unsigned ECR6_SuperRegsSet[] = { 0 };
- const unsigned ECR7_SuperRegsSet[] = { 0 };
const unsigned ECX_SuperRegsSet[] = { X86::RCX, 0 };
const unsigned EDI_SuperRegsSet[] = { X86::RDI, 0 };
const unsigned EDX_SuperRegsSet[] = { X86::RDX, 0 };
- const unsigned EFLAGS_SuperRegsSet[] = { 0 };
const unsigned EIP_SuperRegsSet[] = { X86::RIP, 0 };
- const unsigned ES_SuperRegsSet[] = { 0 };
const unsigned ESI_SuperRegsSet[] = { X86::RSI, 0 };
const unsigned ESP_SuperRegsSet[] = { X86::RSP, 0 };
- const unsigned FP0_SuperRegsSet[] = { 0 };
- const unsigned FP1_SuperRegsSet[] = { 0 };
- const unsigned FP2_SuperRegsSet[] = { 0 };
- const unsigned FP3_SuperRegsSet[] = { 0 };
- const unsigned FP4_SuperRegsSet[] = { 0 };
- const unsigned FP5_SuperRegsSet[] = { 0 };
- const unsigned FP6_SuperRegsSet[] = { 0 };
- const unsigned FS_SuperRegsSet[] = { 0 };
- const unsigned GS_SuperRegsSet[] = { 0 };
const unsigned IP_SuperRegsSet[] = { X86::RIP, X86::EIP, 0 };
- const unsigned MM0_SuperRegsSet[] = { 0 };
- const unsigned MM1_SuperRegsSet[] = { 0 };
- const unsigned MM2_SuperRegsSet[] = { 0 };
- const unsigned MM3_SuperRegsSet[] = { 0 };
- const unsigned MM4_SuperRegsSet[] = { 0 };
- const unsigned MM5_SuperRegsSet[] = { 0 };
- const unsigned MM6_SuperRegsSet[] = { 0 };
- const unsigned MM7_SuperRegsSet[] = { 0 };
- const unsigned R10_SuperRegsSet[] = { 0 };
+ const unsigned R8B_SuperRegsSet[] = { X86::R8, X86::R8D, X86::R8W, 0 };
+ const unsigned R8D_SuperRegsSet[] = { X86::R8, 0 };
+ const unsigned R8W_SuperRegsSet[] = { X86::R8, X86::R8D, 0 };
+ const unsigned R9B_SuperRegsSet[] = { X86::R9, X86::R9D, X86::R9W, 0 };
+ const unsigned R9D_SuperRegsSet[] = { X86::R9, 0 };
+ const unsigned R9W_SuperRegsSet[] = { X86::R9, X86::R9D, 0 };
const unsigned R10B_SuperRegsSet[] = { X86::R10, X86::R10D, X86::R10W, 0 };
const unsigned R10D_SuperRegsSet[] = { X86::R10, 0 };
const unsigned R10W_SuperRegsSet[] = { X86::R10, X86::R10D, 0 };
- const unsigned R11_SuperRegsSet[] = { 0 };
const unsigned R11B_SuperRegsSet[] = { X86::R11, X86::R11D, X86::R11W, 0 };
const unsigned R11D_SuperRegsSet[] = { X86::R11, 0 };
const unsigned R11W_SuperRegsSet[] = { X86::R11, X86::R11D, 0 };
- const unsigned R12_SuperRegsSet[] = { 0 };
const unsigned R12B_SuperRegsSet[] = { X86::R12, X86::R12D, X86::R12W, 0 };
const unsigned R12D_SuperRegsSet[] = { X86::R12, 0 };
const unsigned R12W_SuperRegsSet[] = { X86::R12, X86::R12D, 0 };
- const unsigned R13_SuperRegsSet[] = { 0 };
const unsigned R13B_SuperRegsSet[] = { X86::R13, X86::R13D, X86::R13W, 0 };
const unsigned R13D_SuperRegsSet[] = { X86::R13, 0 };
const unsigned R13W_SuperRegsSet[] = { X86::R13, X86::R13D, 0 };
- const unsigned R14_SuperRegsSet[] = { 0 };
const unsigned R14B_SuperRegsSet[] = { X86::R14, X86::R14D, X86::R14W, 0 };
const unsigned R14D_SuperRegsSet[] = { X86::R14, 0 };
const unsigned R14W_SuperRegsSet[] = { X86::R14, X86::R14D, 0 };
- const unsigned R15_SuperRegsSet[] = { 0 };
const unsigned R15B_SuperRegsSet[] = { X86::R15, X86::R15D, X86::R15W, 0 };
const unsigned R15D_SuperRegsSet[] = { X86::R15, 0 };
const unsigned R15W_SuperRegsSet[] = { X86::R15, X86::R15D, 0 };
- const unsigned R8_SuperRegsSet[] = { 0 };
- const unsigned R8B_SuperRegsSet[] = { X86::R8, X86::R8D, X86::R8W, 0 };
- const unsigned R8D_SuperRegsSet[] = { X86::R8, 0 };
- const unsigned R8W_SuperRegsSet[] = { X86::R8, X86::R8D, 0 };
- const unsigned R9_SuperRegsSet[] = { 0 };
- const unsigned R9B_SuperRegsSet[] = { X86::R9, X86::R9D, X86::R9W, 0 };
- const unsigned R9D_SuperRegsSet[] = { X86::R9, 0 };
- const unsigned R9W_SuperRegsSet[] = { X86::R9, X86::R9D, 0 };
- const unsigned RAX_SuperRegsSet[] = { 0 };
- const unsigned RBP_SuperRegsSet[] = { 0 };
- const unsigned RBX_SuperRegsSet[] = { 0 };
- const unsigned RCR0_SuperRegsSet[] = { 0 };
- const unsigned RCR1_SuperRegsSet[] = { 0 };
- const unsigned RCR2_SuperRegsSet[] = { 0 };
- const unsigned RCR3_SuperRegsSet[] = { 0 };
- const unsigned RCR4_SuperRegsSet[] = { 0 };
- const unsigned RCR5_SuperRegsSet[] = { 0 };
- const unsigned RCR6_SuperRegsSet[] = { 0 };
- const unsigned RCR7_SuperRegsSet[] = { 0 };
- const unsigned RCR8_SuperRegsSet[] = { 0 };
- const unsigned RCX_SuperRegsSet[] = { 0 };
- const unsigned RDI_SuperRegsSet[] = { 0 };
- const unsigned RDX_SuperRegsSet[] = { 0 };
- const unsigned RIP_SuperRegsSet[] = { 0 };
- const unsigned RSI_SuperRegsSet[] = { 0 };
- const unsigned RSP_SuperRegsSet[] = { 0 };
const unsigned SI_SuperRegsSet[] = { X86::RSI, X86::ESI, 0 };
const unsigned SIL_SuperRegsSet[] = { X86::RSI, X86::ESI, X86::SI, 0 };
const unsigned SP_SuperRegsSet[] = { X86::RSP, X86::ESP, 0 };
const unsigned SPL_SuperRegsSet[] = { X86::RSP, X86::ESP, X86::SP, 0 };
- const unsigned SS_SuperRegsSet[] = { 0 };
- const unsigned ST0_SuperRegsSet[] = { 0 };
- const unsigned ST1_SuperRegsSet[] = { 0 };
- const unsigned ST2_SuperRegsSet[] = { 0 };
- const unsigned ST3_SuperRegsSet[] = { 0 };
- const unsigned ST4_SuperRegsSet[] = { 0 };
- const unsigned ST5_SuperRegsSet[] = { 0 };
- const unsigned ST6_SuperRegsSet[] = { 0 };
- const unsigned ST7_SuperRegsSet[] = { 0 };
const unsigned XMM0_SuperRegsSet[] = { X86::YMM0, 0 };
const unsigned XMM1_SuperRegsSet[] = { X86::YMM1, 0 };
- const unsigned XMM10_SuperRegsSet[] = { X86::YMM10, 0 };
- const unsigned XMM11_SuperRegsSet[] = { X86::YMM11, 0 };
- const unsigned XMM12_SuperRegsSet[] = { X86::YMM12, 0 };
- const unsigned XMM13_SuperRegsSet[] = { X86::YMM13, 0 };
- const unsigned XMM14_SuperRegsSet[] = { X86::YMM14, 0 };
- const unsigned XMM15_SuperRegsSet[] = { X86::YMM15, 0 };
const unsigned XMM2_SuperRegsSet[] = { X86::YMM2, 0 };
const unsigned XMM3_SuperRegsSet[] = { X86::YMM3, 0 };
const unsigned XMM4_SuperRegsSet[] = { X86::YMM4, 0 };
@@ -3894,184 +3214,171 @@ X86::NoRegister, X86::NoRegister };
const unsigned XMM7_SuperRegsSet[] = { X86::YMM7, 0 };
const unsigned XMM8_SuperRegsSet[] = { X86::YMM8, 0 };
const unsigned XMM9_SuperRegsSet[] = { X86::YMM9, 0 };
- const unsigned YMM0_SuperRegsSet[] = { 0 };
- const unsigned YMM1_SuperRegsSet[] = { 0 };
- const unsigned YMM10_SuperRegsSet[] = { 0 };
- const unsigned YMM11_SuperRegsSet[] = { 0 };
- const unsigned YMM12_SuperRegsSet[] = { 0 };
- const unsigned YMM13_SuperRegsSet[] = { 0 };
- const unsigned YMM14_SuperRegsSet[] = { 0 };
- const unsigned YMM15_SuperRegsSet[] = { 0 };
- const unsigned YMM2_SuperRegsSet[] = { 0 };
- const unsigned YMM3_SuperRegsSet[] = { 0 };
- const unsigned YMM4_SuperRegsSet[] = { 0 };
- const unsigned YMM5_SuperRegsSet[] = { 0 };
- const unsigned YMM6_SuperRegsSet[] = { 0 };
- const unsigned YMM7_SuperRegsSet[] = { 0 };
- const unsigned YMM8_SuperRegsSet[] = { 0 };
- const unsigned YMM9_SuperRegsSet[] = { 0 };
+ const unsigned XMM10_SuperRegsSet[] = { X86::YMM10, 0 };
+ const unsigned XMM11_SuperRegsSet[] = { X86::YMM11, 0 };
+ const unsigned XMM12_SuperRegsSet[] = { X86::YMM12, 0 };
+ const unsigned XMM13_SuperRegsSet[] = { X86::YMM13, 0 };
+ const unsigned XMM14_SuperRegsSet[] = { X86::YMM14, 0 };
+ const unsigned XMM15_SuperRegsSet[] = { X86::YMM15, 0 };
const TargetRegisterDesc RegisterDescriptors[] = { // Descriptors
{ "NOREG", 0, 0, 0 },
- { "AH", AH_AliasSet, AH_SubRegsSet, AH_SuperRegsSet },
- { "AL", AL_AliasSet, AL_SubRegsSet, AL_SuperRegsSet },
+ { "AH", AH_AliasSet, Empty_SubRegsSet, AH_SuperRegsSet },
+ { "AL", AL_AliasSet, Empty_SubRegsSet, AL_SuperRegsSet },
{ "AX", AX_AliasSet, AX_SubRegsSet, AX_SuperRegsSet },
- { "BH", BH_AliasSet, BH_SubRegsSet, BH_SuperRegsSet },
- { "BL", BL_AliasSet, BL_SubRegsSet, BL_SuperRegsSet },
+ { "BH", BH_AliasSet, Empty_SubRegsSet, BH_SuperRegsSet },
+ { "BL", BL_AliasSet, Empty_SubRegsSet, BL_SuperRegsSet },
{ "BP", BP_AliasSet, BP_SubRegsSet, BP_SuperRegsSet },
- { "BPL", BPL_AliasSet, BPL_SubRegsSet, BPL_SuperRegsSet },
+ { "BPL", BPL_AliasSet, Empty_SubRegsSet, BPL_SuperRegsSet },
{ "BX", BX_AliasSet, BX_SubRegsSet, BX_SuperRegsSet },
- { "CH", CH_AliasSet, CH_SubRegsSet, CH_SuperRegsSet },
- { "CL", CL_AliasSet, CL_SubRegsSet, CL_SuperRegsSet },
- { "CS", CS_AliasSet, CS_SubRegsSet, CS_SuperRegsSet },
+ { "CH", CH_AliasSet, Empty_SubRegsSet, CH_SuperRegsSet },
+ { "CL", CL_AliasSet, Empty_SubRegsSet, CL_SuperRegsSet },
+ { "CR0", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CR1", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CR2", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CR3", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CR4", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CR5", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CR6", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CR7", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CR8", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "CS", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
{ "CX", CX_AliasSet, CX_SubRegsSet, CX_SuperRegsSet },
- { "DH", DH_AliasSet, DH_SubRegsSet, DH_SuperRegsSet },
+ { "DH", DH_AliasSet, Empty_SubRegsSet, DH_SuperRegsSet },
{ "DI", DI_AliasSet, DI_SubRegsSet, DI_SuperRegsSet },
- { "DIL", DIL_AliasSet, DIL_SubRegsSet, DIL_SuperRegsSet },
- { "DL", DL_AliasSet, DL_SubRegsSet, DL_SuperRegsSet },
- { "DR0", DR0_AliasSet, DR0_SubRegsSet, DR0_SuperRegsSet },
- { "DR1", DR1_AliasSet, DR1_SubRegsSet, DR1_SuperRegsSet },
- { "DR2", DR2_AliasSet, DR2_SubRegsSet, DR2_SuperRegsSet },
- { "DR3", DR3_AliasSet, DR3_SubRegsSet, DR3_SuperRegsSet },
- { "DR4", DR4_AliasSet, DR4_SubRegsSet, DR4_SuperRegsSet },
- { "DR5", DR5_AliasSet, DR5_SubRegsSet, DR5_SuperRegsSet },
- { "DR6", DR6_AliasSet, DR6_SubRegsSet, DR6_SuperRegsSet },
- { "DR7", DR7_AliasSet, DR7_SubRegsSet, DR7_SuperRegsSet },
- { "DS", DS_AliasSet, DS_SubRegsSet, DS_SuperRegsSet },
+ { "DIL", DIL_AliasSet, Empty_SubRegsSet, DIL_SuperRegsSet },
+ { "DL", DL_AliasSet, Empty_SubRegsSet, DL_SuperRegsSet },
+ { "DR0", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "DR1", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "DR2", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "DR3", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "DR4", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "DR5", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "DR6", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "DR7", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "DS", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
{ "DX", DX_AliasSet, DX_SubRegsSet, DX_SuperRegsSet },
{ "EAX", EAX_AliasSet, EAX_SubRegsSet, EAX_SuperRegsSet },
{ "EBP", EBP_AliasSet, EBP_SubRegsSet, EBP_SuperRegsSet },
{ "EBX", EBX_AliasSet, EBX_SubRegsSet, EBX_SuperRegsSet },
- { "ECR0", ECR0_AliasSet, ECR0_SubRegsSet, ECR0_SuperRegsSet },
- { "ECR1", ECR1_AliasSet, ECR1_SubRegsSet, ECR1_SuperRegsSet },
- { "ECR2", ECR2_AliasSet, ECR2_SubRegsSet, ECR2_SuperRegsSet },
- { "ECR3", ECR3_AliasSet, ECR3_SubRegsSet, ECR3_SuperRegsSet },
- { "ECR4", ECR4_AliasSet, ECR4_SubRegsSet, ECR4_SuperRegsSet },
- { "ECR5", ECR5_AliasSet, ECR5_SubRegsSet, ECR5_SuperRegsSet },
- { "ECR6", ECR6_AliasSet, ECR6_SubRegsSet, ECR6_SuperRegsSet },
- { "ECR7", ECR7_AliasSet, ECR7_SubRegsSet, ECR7_SuperRegsSet },
{ "ECX", ECX_AliasSet, ECX_SubRegsSet, ECX_SuperRegsSet },
{ "EDI", EDI_AliasSet, EDI_SubRegsSet, EDI_SuperRegsSet },
{ "EDX", EDX_AliasSet, EDX_SubRegsSet, EDX_SuperRegsSet },
- { "EFLAGS", EFLAGS_AliasSet, EFLAGS_SubRegsSet, EFLAGS_SuperRegsSet },
+ { "EFLAGS", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
{ "EIP", EIP_AliasSet, EIP_SubRegsSet, EIP_SuperRegsSet },
- { "ES", ES_AliasSet, ES_SubRegsSet, ES_SuperRegsSet },
+ { "EIZ", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "ES", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
{ "ESI", ESI_AliasSet, ESI_SubRegsSet, ESI_SuperRegsSet },
{ "ESP", ESP_AliasSet, ESP_SubRegsSet, ESP_SuperRegsSet },
- { "FP0", FP0_AliasSet, FP0_SubRegsSet, FP0_SuperRegsSet },
- { "FP1", FP1_AliasSet, FP1_SubRegsSet, FP1_SuperRegsSet },
- { "FP2", FP2_AliasSet, FP2_SubRegsSet, FP2_SuperRegsSet },
- { "FP3", FP3_AliasSet, FP3_SubRegsSet, FP3_SuperRegsSet },
- { "FP4", FP4_AliasSet, FP4_SubRegsSet, FP4_SuperRegsSet },
- { "FP5", FP5_AliasSet, FP5_SubRegsSet, FP5_SuperRegsSet },
- { "FP6", FP6_AliasSet, FP6_SubRegsSet, FP6_SuperRegsSet },
- { "FS", FS_AliasSet, FS_SubRegsSet, FS_SuperRegsSet },
- { "GS", GS_AliasSet, GS_SubRegsSet, GS_SuperRegsSet },
- { "IP", IP_AliasSet, IP_SubRegsSet, IP_SuperRegsSet },
- { "MM0", MM0_AliasSet, MM0_SubRegsSet, MM0_SuperRegsSet },
- { "MM1", MM1_AliasSet, MM1_SubRegsSet, MM1_SuperRegsSet },
- { "MM2", MM2_AliasSet, MM2_SubRegsSet, MM2_SuperRegsSet },
- { "MM3", MM3_AliasSet, MM3_SubRegsSet, MM3_SuperRegsSet },
- { "MM4", MM4_AliasSet, MM4_SubRegsSet, MM4_SuperRegsSet },
- { "MM5", MM5_AliasSet, MM5_SubRegsSet, MM5_SuperRegsSet },
- { "MM6", MM6_AliasSet, MM6_SubRegsSet, MM6_SuperRegsSet },
- { "MM7", MM7_AliasSet, MM7_SubRegsSet, MM7_SuperRegsSet },
- { "R10", R10_AliasSet, R10_SubRegsSet, R10_SuperRegsSet },
- { "R10B", R10B_AliasSet, R10B_SubRegsSet, R10B_SuperRegsSet },
+ { "FP0", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "FP1", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "FP2", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "FP3", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "FP4", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "FP5", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "FP6", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "FS", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "GS", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "IP", IP_AliasSet, Empty_SubRegsSet, IP_SuperRegsSet },
+ { "MM0", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "MM1", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "MM2", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "MM3", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "MM4", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "MM5", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "MM6", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "MM7", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "R8", R8_AliasSet, R8_SubRegsSet, Empty_SuperRegsSet },
+ { "R8B", R8B_AliasSet, Empty_SubRegsSet, R8B_SuperRegsSet },
+ { "R8D", R8D_AliasSet, R8D_SubRegsSet, R8D_SuperRegsSet },
+ { "R8W", R8W_AliasSet, R8W_SubRegsSet, R8W_SuperRegsSet },
+ { "R9", R9_AliasSet, R9_SubRegsSet, Empty_SuperRegsSet },
+ { "R9B", R9B_AliasSet, Empty_SubRegsSet, R9B_SuperRegsSet },
+ { "R9D", R9D_AliasSet, R9D_SubRegsSet, R9D_SuperRegsSet },
+ { "R9W", R9W_AliasSet, R9W_SubRegsSet, R9W_SuperRegsSet },
+ { "R10", R10_AliasSet, R10_SubRegsSet, Empty_SuperRegsSet },
+ { "R10B", R10B_AliasSet, Empty_SubRegsSet, R10B_SuperRegsSet },
{ "R10D", R10D_AliasSet, R10D_SubRegsSet, R10D_SuperRegsSet },
{ "R10W", R10W_AliasSet, R10W_SubRegsSet, R10W_SuperRegsSet },
- { "R11", R11_AliasSet, R11_SubRegsSet, R11_SuperRegsSet },
- { "R11B", R11B_AliasSet, R11B_SubRegsSet, R11B_SuperRegsSet },
+ { "R11", R11_AliasSet, R11_SubRegsSet, Empty_SuperRegsSet },
+ { "R11B", R11B_AliasSet, Empty_SubRegsSet, R11B_SuperRegsSet },
{ "R11D", R11D_AliasSet, R11D_SubRegsSet, R11D_SuperRegsSet },
{ "R11W", R11W_AliasSet, R11W_SubRegsSet, R11W_SuperRegsSet },
- { "R12", R12_AliasSet, R12_SubRegsSet, R12_SuperRegsSet },
- { "R12B", R12B_AliasSet, R12B_SubRegsSet, R12B_SuperRegsSet },
+ { "R12", R12_AliasSet, R12_SubRegsSet, Empty_SuperRegsSet },
+ { "R12B", R12B_AliasSet, Empty_SubRegsSet, R12B_SuperRegsSet },
{ "R12D", R12D_AliasSet, R12D_SubRegsSet, R12D_SuperRegsSet },
{ "R12W", R12W_AliasSet, R12W_SubRegsSet, R12W_SuperRegsSet },
- { "R13", R13_AliasSet, R13_SubRegsSet, R13_SuperRegsSet },
- { "R13B", R13B_AliasSet, R13B_SubRegsSet, R13B_SuperRegsSet },
+ { "R13", R13_AliasSet, R13_SubRegsSet, Empty_SuperRegsSet },
+ { "R13B", R13B_AliasSet, Empty_SubRegsSet, R13B_SuperRegsSet },
{ "R13D", R13D_AliasSet, R13D_SubRegsSet, R13D_SuperRegsSet },
{ "R13W", R13W_AliasSet, R13W_SubRegsSet, R13W_SuperRegsSet },
- { "R14", R14_AliasSet, R14_SubRegsSet, R14_SuperRegsSet },
- { "R14B", R14B_AliasSet, R14B_SubRegsSet, R14B_SuperRegsSet },
+ { "R14", R14_AliasSet, R14_SubRegsSet, Empty_SuperRegsSet },
+ { "R14B", R14B_AliasSet, Empty_SubRegsSet, R14B_SuperRegsSet },
{ "R14D", R14D_AliasSet, R14D_SubRegsSet, R14D_SuperRegsSet },
{ "R14W", R14W_AliasSet, R14W_SubRegsSet, R14W_SuperRegsSet },
- { "R15", R15_AliasSet, R15_SubRegsSet, R15_SuperRegsSet },
- { "R15B", R15B_AliasSet, R15B_SubRegsSet, R15B_SuperRegsSet },
+ { "R15", R15_AliasSet, R15_SubRegsSet, Empty_SuperRegsSet },
+ { "R15B", R15B_AliasSet, Empty_SubRegsSet, R15B_SuperRegsSet },
{ "R15D", R15D_AliasSet, R15D_SubRegsSet, R15D_SuperRegsSet },
{ "R15W", R15W_AliasSet, R15W_SubRegsSet, R15W_SuperRegsSet },
- { "R8", R8_AliasSet, R8_SubRegsSet, R8_SuperRegsSet },
- { "R8B", R8B_AliasSet, R8B_SubRegsSet, R8B_SuperRegsSet },
- { "R8D", R8D_AliasSet, R8D_SubRegsSet, R8D_SuperRegsSet },
- { "R8W", R8W_AliasSet, R8W_SubRegsSet, R8W_SuperRegsSet },
- { "R9", R9_AliasSet, R9_SubRegsSet, R9_SuperRegsSet },
- { "R9B", R9B_AliasSet, R9B_SubRegsSet, R9B_SuperRegsSet },
- { "R9D", R9D_AliasSet, R9D_SubRegsSet, R9D_SuperRegsSet },
- { "R9W", R9W_AliasSet, R9W_SubRegsSet, R9W_SuperRegsSet },
- { "RAX", RAX_AliasSet, RAX_SubRegsSet, RAX_SuperRegsSet },
- { "RBP", RBP_AliasSet, RBP_SubRegsSet, RBP_SuperRegsSet },
- { "RBX", RBX_AliasSet, RBX_SubRegsSet, RBX_SuperRegsSet },
- { "RCR0", RCR0_AliasSet, RCR0_SubRegsSet, RCR0_SuperRegsSet },
- { "RCR1", RCR1_AliasSet, RCR1_SubRegsSet, RCR1_SuperRegsSet },
- { "RCR2", RCR2_AliasSet, RCR2_SubRegsSet, RCR2_SuperRegsSet },
- { "RCR3", RCR3_AliasSet, RCR3_SubRegsSet, RCR3_SuperRegsSet },
- { "RCR4", RCR4_AliasSet, RCR4_SubRegsSet, RCR4_SuperRegsSet },
- { "RCR5", RCR5_AliasSet, RCR5_SubRegsSet, RCR5_SuperRegsSet },
- { "RCR6", RCR6_AliasSet, RCR6_SubRegsSet, RCR6_SuperRegsSet },
- { "RCR7", RCR7_AliasSet, RCR7_SubRegsSet, RCR7_SuperRegsSet },
- { "RCR8", RCR8_AliasSet, RCR8_SubRegsSet, RCR8_SuperRegsSet },
- { "RCX", RCX_AliasSet, RCX_SubRegsSet, RCX_SuperRegsSet },
- { "RDI", RDI_AliasSet, RDI_SubRegsSet, RDI_SuperRegsSet },
- { "RDX", RDX_AliasSet, RDX_SubRegsSet, RDX_SuperRegsSet },
- { "RIP", RIP_AliasSet, RIP_SubRegsSet, RIP_SuperRegsSet },
- { "RSI", RSI_AliasSet, RSI_SubRegsSet, RSI_SuperRegsSet },
- { "RSP", RSP_AliasSet, RSP_SubRegsSet, RSP_SuperRegsSet },
+ { "RAX", RAX_AliasSet, RAX_SubRegsSet, Empty_SuperRegsSet },
+ { "RBP", RBP_AliasSet, RBP_SubRegsSet, Empty_SuperRegsSet },
+ { "RBX", RBX_AliasSet, RBX_SubRegsSet, Empty_SuperRegsSet },
+ { "RCX", RCX_AliasSet, RCX_SubRegsSet, Empty_SuperRegsSet },
+ { "RDI", RDI_AliasSet, RDI_SubRegsSet, Empty_SuperRegsSet },
+ { "RDX", RDX_AliasSet, RDX_SubRegsSet, Empty_SuperRegsSet },
+ { "RIP", RIP_AliasSet, RIP_SubRegsSet, Empty_SuperRegsSet },
+ { "RIZ", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "RSI", RSI_AliasSet, RSI_SubRegsSet, Empty_SuperRegsSet },
+ { "RSP", RSP_AliasSet, RSP_SubRegsSet, Empty_SuperRegsSet },
{ "SI", SI_AliasSet, SI_SubRegsSet, SI_SuperRegsSet },
- { "SIL", SIL_AliasSet, SIL_SubRegsSet, SIL_SuperRegsSet },
+ { "SIL", SIL_AliasSet, Empty_SubRegsSet, SIL_SuperRegsSet },
{ "SP", SP_AliasSet, SP_SubRegsSet, SP_SuperRegsSet },
- { "SPL", SPL_AliasSet, SPL_SubRegsSet, SPL_SuperRegsSet },
- { "SS", SS_AliasSet, SS_SubRegsSet, SS_SuperRegsSet },
- { "ST0", ST0_AliasSet, ST0_SubRegsSet, ST0_SuperRegsSet },
- { "ST1", ST1_AliasSet, ST1_SubRegsSet, ST1_SuperRegsSet },
- { "ST2", ST2_AliasSet, ST2_SubRegsSet, ST2_SuperRegsSet },
- { "ST3", ST3_AliasSet, ST3_SubRegsSet, ST3_SuperRegsSet },
- { "ST4", ST4_AliasSet, ST4_SubRegsSet, ST4_SuperRegsSet },
- { "ST5", ST5_AliasSet, ST5_SubRegsSet, ST5_SuperRegsSet },
- { "ST6", ST6_AliasSet, ST6_SubRegsSet, ST6_SuperRegsSet },
- { "ST7", ST7_AliasSet, ST7_SubRegsSet, ST7_SuperRegsSet },
- { "XMM0", XMM0_AliasSet, XMM0_SubRegsSet, XMM0_SuperRegsSet },
- { "XMM1", XMM1_AliasSet, XMM1_SubRegsSet, XMM1_SuperRegsSet },
- { "XMM10", XMM10_AliasSet, XMM10_SubRegsSet, XMM10_SuperRegsSet },
- { "XMM11", XMM11_AliasSet, XMM11_SubRegsSet, XMM11_SuperRegsSet },
- { "XMM12", XMM12_AliasSet, XMM12_SubRegsSet, XMM12_SuperRegsSet },
- { "XMM13", XMM13_AliasSet, XMM13_SubRegsSet, XMM13_SuperRegsSet },
- { "XMM14", XMM14_AliasSet, XMM14_SubRegsSet, XMM14_SuperRegsSet },
- { "XMM15", XMM15_AliasSet, XMM15_SubRegsSet, XMM15_SuperRegsSet },
- { "XMM2", XMM2_AliasSet, XMM2_SubRegsSet, XMM2_SuperRegsSet },
- { "XMM3", XMM3_AliasSet, XMM3_SubRegsSet, XMM3_SuperRegsSet },
- { "XMM4", XMM4_AliasSet, XMM4_SubRegsSet, XMM4_SuperRegsSet },
- { "XMM5", XMM5_AliasSet, XMM5_SubRegsSet, XMM5_SuperRegsSet },
- { "XMM6", XMM6_AliasSet, XMM6_SubRegsSet, XMM6_SuperRegsSet },
- { "XMM7", XMM7_AliasSet, XMM7_SubRegsSet, XMM7_SuperRegsSet },
- { "XMM8", XMM8_AliasSet, XMM8_SubRegsSet, XMM8_SuperRegsSet },
- { "XMM9", XMM9_AliasSet, XMM9_SubRegsSet, XMM9_SuperRegsSet },
- { "YMM0", YMM0_AliasSet, YMM0_SubRegsSet, YMM0_SuperRegsSet },
- { "YMM1", YMM1_AliasSet, YMM1_SubRegsSet, YMM1_SuperRegsSet },
- { "YMM10", YMM10_AliasSet, YMM10_SubRegsSet, YMM10_SuperRegsSet },
- { "YMM11", YMM11_AliasSet, YMM11_SubRegsSet, YMM11_SuperRegsSet },
- { "YMM12", YMM12_AliasSet, YMM12_SubRegsSet, YMM12_SuperRegsSet },
- { "YMM13", YMM13_AliasSet, YMM13_SubRegsSet, YMM13_SuperRegsSet },
- { "YMM14", YMM14_AliasSet, YMM14_SubRegsSet, YMM14_SuperRegsSet },
- { "YMM15", YMM15_AliasSet, YMM15_SubRegsSet, YMM15_SuperRegsSet },
- { "YMM2", YMM2_AliasSet, YMM2_SubRegsSet, YMM2_SuperRegsSet },
- { "YMM3", YMM3_AliasSet, YMM3_SubRegsSet, YMM3_SuperRegsSet },
- { "YMM4", YMM4_AliasSet, YMM4_SubRegsSet, YMM4_SuperRegsSet },
- { "YMM5", YMM5_AliasSet, YMM5_SubRegsSet, YMM5_SuperRegsSet },
- { "YMM6", YMM6_AliasSet, YMM6_SubRegsSet, YMM6_SuperRegsSet },
- { "YMM7", YMM7_AliasSet, YMM7_SubRegsSet, YMM7_SuperRegsSet },
- { "YMM8", YMM8_AliasSet, YMM8_SubRegsSet, YMM8_SuperRegsSet },
- { "YMM9", YMM9_AliasSet, YMM9_SubRegsSet, YMM9_SuperRegsSet },
- };
+ { "SPL", SPL_AliasSet, Empty_SubRegsSet, SPL_SuperRegsSet },
+ { "SS", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "ST0", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "ST1", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "ST2", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "ST3", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "ST4", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "ST5", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "ST6", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "ST7", Empty_AliasSet, Empty_SubRegsSet, Empty_SuperRegsSet },
+ { "XMM0", XMM0_AliasSet, Empty_SubRegsSet, XMM0_SuperRegsSet },
+ { "XMM1", XMM1_AliasSet, Empty_SubRegsSet, XMM1_SuperRegsSet },
+ { "XMM2", XMM2_AliasSet, Empty_SubRegsSet, XMM2_SuperRegsSet },
+ { "XMM3", XMM3_AliasSet, Empty_SubRegsSet, XMM3_SuperRegsSet },
+ { "XMM4", XMM4_AliasSet, Empty_SubRegsSet, XMM4_SuperRegsSet },
+ { "XMM5", XMM5_AliasSet, Empty_SubRegsSet, XMM5_SuperRegsSet },
+ { "XMM6", XMM6_AliasSet, Empty_SubRegsSet, XMM6_SuperRegsSet },
+ { "XMM7", XMM7_AliasSet, Empty_SubRegsSet, XMM7_SuperRegsSet },
+ { "XMM8", XMM8_AliasSet, Empty_SubRegsSet, XMM8_SuperRegsSet },
+ { "XMM9", XMM9_AliasSet, Empty_SubRegsSet, XMM9_SuperRegsSet },
+ { "XMM10", XMM10_AliasSet, Empty_SubRegsSet, XMM10_SuperRegsSet },
+ { "XMM11", XMM11_AliasSet, Empty_SubRegsSet, XMM11_SuperRegsSet },
+ { "XMM12", XMM12_AliasSet, Empty_SubRegsSet, XMM12_SuperRegsSet },
+ { "XMM13", XMM13_AliasSet, Empty_SubRegsSet, XMM13_SuperRegsSet },
+ { "XMM14", XMM14_AliasSet, Empty_SubRegsSet, XMM14_SuperRegsSet },
+ { "XMM15", XMM15_AliasSet, Empty_SubRegsSet, XMM15_SuperRegsSet },
+ { "YMM0", YMM0_AliasSet, YMM0_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM1", YMM1_AliasSet, YMM1_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM2", YMM2_AliasSet, YMM2_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM3", YMM3_AliasSet, YMM3_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM4", YMM4_AliasSet, YMM4_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM5", YMM5_AliasSet, YMM5_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM6", YMM6_AliasSet, YMM6_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM7", YMM7_AliasSet, YMM7_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM8", YMM8_AliasSet, YMM8_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM9", YMM9_AliasSet, YMM9_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM10", YMM10_AliasSet, YMM10_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM11", YMM11_AliasSet, YMM11_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM12", YMM12_AliasSet, YMM12_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM13", YMM13_AliasSet, YMM13_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM14", YMM14_AliasSet, YMM14_SubRegsSet, Empty_SuperRegsSet },
+ { "YMM15", YMM15_AliasSet, YMM15_SubRegsSet, Empty_SuperRegsSet },
+ };
+
+ const char *const SubRegIndexTable[] = { "sub_8bit", "sub_8bit_hi", "sub_16bit", "sub_32bit", "sub_sd", "sub_ss", "sub_xmm" };
+
}
unsigned X86GenRegisterInfo::getSubReg(unsigned RegNo, unsigned Index) const {
@@ -4081,589 +3388,602 @@ unsigned X86GenRegisterInfo::getSubReg(unsigned RegNo, unsigned Index) const {
case X86::AX:
switch (Index) {
default: return 0;
- case 1: return X86::AL;
- case 2: return X86::AH;
+ case X86::sub_8bit: return X86::AL;
+ case X86::sub_8bit_hi: return X86::AH;
};
break;
- case X86::DX:
+ case X86::BP:
switch (Index) {
default: return 0;
- case 1: return X86::DL;
- case 2: return X86::DH;
+ case X86::sub_8bit: return X86::BPL;
};
break;
- case X86::CX:
+ case X86::BX:
switch (Index) {
default: return 0;
- case 1: return X86::CL;
- case 2: return X86::CH;
+ case X86::sub_8bit: return X86::BL;
+ case X86::sub_8bit_hi: return X86::BH;
};
break;
- case X86::BX:
+ case X86::CX:
switch (Index) {
default: return 0;
- case 1: return X86::BL;
- case 2: return X86::BH;
+ case X86::sub_8bit: return X86::CL;
+ case X86::sub_8bit_hi: return X86::CH;
};
break;
- case X86::SI:
+ case X86::DI:
switch (Index) {
default: return 0;
- case 1: return X86::SIL;
+ case X86::sub_8bit: return X86::DIL;
};
break;
- case X86::DI:
+ case X86::DX:
switch (Index) {
default: return 0;
- case 1: return X86::DIL;
+ case X86::sub_8bit: return X86::DL;
+ case X86::sub_8bit_hi: return X86::DH;
};
break;
- case X86::BP:
+ case X86::EAX:
switch (Index) {
default: return 0;
- case 1: return X86::BPL;
+ case X86::sub_8bit: return X86::AL;
+ case X86::sub_8bit_hi: return X86::AH;
+ case X86::sub_16bit: return X86::AX;
};
break;
- case X86::SP:
+ case X86::EBP:
switch (Index) {
default: return 0;
- case 1: return X86::SPL;
+ case X86::sub_8bit: return X86::BPL;
+ case X86::sub_16bit: return X86::BP;
};
break;
- case X86::R8W:
+ case X86::EBX:
switch (Index) {
default: return 0;
- case 1: return X86::R8B;
+ case X86::sub_8bit: return X86::BL;
+ case X86::sub_8bit_hi: return X86::BH;
+ case X86::sub_16bit: return X86::BX;
};
break;
- case X86::R9W:
+ case X86::ECX:
switch (Index) {
default: return 0;
- case 1: return X86::R9B;
+ case X86::sub_8bit: return X86::CL;
+ case X86::sub_8bit_hi: return X86::CH;
+ case X86::sub_16bit: return X86::CX;
};
break;
- case X86::R10W:
+ case X86::EDI:
switch (Index) {
default: return 0;
- case 1: return X86::R10B;
+ case X86::sub_8bit: return X86::DIL;
+ case X86::sub_16bit: return X86::DI;
};
break;
- case X86::R11W:
+ case X86::EDX:
switch (Index) {
default: return 0;
- case 1: return X86::R11B;
+ case X86::sub_8bit: return X86::DL;
+ case X86::sub_8bit_hi: return X86::DH;
+ case X86::sub_16bit: return X86::DX;
};
break;
- case X86::R12W:
+ case X86::EIP:
switch (Index) {
default: return 0;
- case 1: return X86::R12B;
+ case X86::sub_16bit: return X86::IP;
};
break;
- case X86::R13W:
+ case X86::ESI:
switch (Index) {
default: return 0;
- case 1: return X86::R13B;
+ case X86::sub_8bit: return X86::SIL;
+ case X86::sub_16bit: return X86::SI;
};
break;
- case X86::R14W:
+ case X86::ESP:
switch (Index) {
default: return 0;
- case 1: return X86::R14B;
+ case X86::sub_8bit: return X86::SPL;
+ case X86::sub_16bit: return X86::SP;
};
break;
- case X86::R15W:
+ case X86::R8:
switch (Index) {
default: return 0;
- case 1: return X86::R15B;
+ case X86::sub_8bit: return X86::R8B;
+ case X86::sub_16bit: return X86::R8W;
+ case X86::sub_32bit: return X86::R8D;
};
break;
- case X86::EAX:
+ case X86::R8D:
switch (Index) {
default: return 0;
- case 1: return X86::AL;
- case 2: return X86::AH;
- case 3: return X86::AX;
+ case X86::sub_8bit: return X86::R8B;
+ case X86::sub_16bit: return X86::R8W;
};
break;
- case X86::EDX:
+ case X86::R8W:
switch (Index) {
default: return 0;
- case 1: return X86::DL;
- case 2: return X86::DH;
- case 3: return X86::DX;
+ case X86::sub_8bit: return X86::R8B;
};
break;
- case X86::ECX:
+ case X86::R9:
switch (Index) {
default: return 0;
- case 1: return X86::CL;
- case 2: return X86::CH;
- case 3: return X86::CX;
+ case X86::sub_8bit: return X86::R9B;
+ case X86::sub_16bit: return X86::R9W;
+ case X86::sub_32bit: return X86::R9D;
};
break;
- case X86::EBX:
+ case X86::R9D:
switch (Index) {
default: return 0;
- case 1: return X86::BL;
- case 2: return X86::BH;
- case 3: return X86::BX;
+ case X86::sub_8bit: return X86::R9B;
+ case X86::sub_16bit: return X86::R9W;
};
break;
- case X86::ESI:
+ case X86::R9W:
switch (Index) {
default: return 0;
- case 1: return X86::SIL;
- case 3: return X86::SI;
+ case X86::sub_8bit: return X86::R9B;
};
break;
- case X86::EDI:
+ case X86::R10:
switch (Index) {
default: return 0;
- case 1: return X86::DIL;
- case 3: return X86::DI;
+ case X86::sub_8bit: return X86::R10B;
+ case X86::sub_16bit: return X86::R10W;
+ case X86::sub_32bit: return X86::R10D;
};
break;
- case X86::EBP:
+ case X86::R10D:
switch (Index) {
default: return 0;
- case 1: return X86::BPL;
- case 3: return X86::BP;
+ case X86::sub_8bit: return X86::R10B;
+ case X86::sub_16bit: return X86::R10W;
};
break;
- case X86::ESP:
+ case X86::R10W:
switch (Index) {
default: return 0;
- case 1: return X86::SPL;
- case 3: return X86::SP;
+ case X86::sub_8bit: return X86::R10B;
};
break;
- case X86::R8D:
+ case X86::R11:
switch (Index) {
default: return 0;
- case 1: return X86::R8B;
- case 3: return X86::R8W;
+ case X86::sub_8bit: return X86::R11B;
+ case X86::sub_16bit: return X86::R11W;
+ case X86::sub_32bit: return X86::R11D;
};
break;
- case X86::R9D:
+ case X86::R11D:
switch (Index) {
default: return 0;
- case 1: return X86::R9B;
- case 3: return X86::R9W;
+ case X86::sub_8bit: return X86::R11B;
+ case X86::sub_16bit: return X86::R11W;
};
break;
- case X86::R10D:
+ case X86::R11W:
switch (Index) {
default: return 0;
- case 1: return X86::R10B;
- case 3: return X86::R10W;
+ case X86::sub_8bit: return X86::R11B;
};
break;
- case X86::R11D:
+ case X86::R12:
switch (Index) {
default: return 0;
- case 1: return X86::R11B;
- case 3: return X86::R11W;
+ case X86::sub_8bit: return X86::R12B;
+ case X86::sub_16bit: return X86::R12W;
+ case X86::sub_32bit: return X86::R12D;
};
break;
case X86::R12D:
switch (Index) {
default: return 0;
- case 1: return X86::R12B;
- case 3: return X86::R12W;
+ case X86::sub_8bit: return X86::R12B;
+ case X86::sub_16bit: return X86::R12W;
+ };
+ break;
+ case X86::R12W:
+ switch (Index) {
+ default: return 0;
+ case X86::sub_8bit: return X86::R12B;
+ };
+ break;
+ case X86::R13:
+ switch (Index) {
+ default: return 0;
+ case X86::sub_8bit: return X86::R13B;
+ case X86::sub_16bit: return X86::R13W;
+ case X86::sub_32bit: return X86::R13D;
};
break;
case X86::R13D:
switch (Index) {
default: return 0;
- case 1: return X86::R13B;
- case 3: return X86::R13W;
+ case X86::sub_8bit: return X86::R13B;
+ case X86::sub_16bit: return X86::R13W;
};
break;
- case X86::R14D:
+ case X86::R13W:
switch (Index) {
default: return 0;
- case 1: return X86::R14B;
- case 3: return X86::R14W;
+ case X86::sub_8bit: return X86::R13B;
};
break;
- case X86::R15D:
+ case X86::R14:
switch (Index) {
default: return 0;
- case 1: return X86::R15B;
- case 3: return X86::R15W;
+ case X86::sub_8bit: return X86::R14B;
+ case X86::sub_16bit: return X86::R14W;
+ case X86::sub_32bit: return X86::R14D;
};
break;
- case X86::RAX:
+ case X86::R14D:
switch (Index) {
default: return 0;
- case 1: return X86::AL;
- case 2: return X86::AH;
- case 3: return X86::AX;
- case 4: return X86::EAX;
+ case X86::sub_8bit: return X86::R14B;
+ case X86::sub_16bit: return X86::R14W;
};
break;
- case X86::RDX:
+ case X86::R14W:
switch (Index) {
default: return 0;
- case 1: return X86::DL;
- case 2: return X86::DH;
- case 3: return X86::DX;
- case 4: return X86::EDX;
+ case X86::sub_8bit: return X86::R14B;
};
break;
- case X86::RCX:
+ case X86::R15:
switch (Index) {
default: return 0;
- case 1: return X86::CL;
- case 2: return X86::CH;
- case 3: return X86::CX;
- case 4: return X86::ECX;
+ case X86::sub_8bit: return X86::R15B;
+ case X86::sub_16bit: return X86::R15W;
+ case X86::sub_32bit: return X86::R15D;
};
break;
- case X86::RBX:
+ case X86::R15D:
switch (Index) {
default: return 0;
- case 1: return X86::BL;
- case 2: return X86::BH;
- case 3: return X86::BX;
- case 4: return X86::EBX;
+ case X86::sub_8bit: return X86::R15B;
+ case X86::sub_16bit: return X86::R15W;
};
break;
- case X86::RSI:
+ case X86::R15W:
switch (Index) {
default: return 0;
- case 1: return X86::SIL;
- case 3: return X86::SI;
- case 4: return X86::ESI;
+ case X86::sub_8bit: return X86::R15B;
};
break;
- case X86::RDI:
+ case X86::RAX:
switch (Index) {
default: return 0;
- case 1: return X86::DIL;
- case 3: return X86::DI;
- case 4: return X86::EDI;
+ case X86::sub_8bit: return X86::AL;
+ case X86::sub_8bit_hi: return X86::AH;
+ case X86::sub_16bit: return X86::AX;
+ case X86::sub_32bit: return X86::EAX;
};
break;
case X86::RBP:
switch (Index) {
default: return 0;
- case 1: return X86::BPL;
- case 3: return X86::BP;
- case 4: return X86::EBP;
+ case X86::sub_8bit: return X86::BPL;
+ case X86::sub_16bit: return X86::BP;
+ case X86::sub_32bit: return X86::EBP;
};
break;
- case X86::RSP:
+ case X86::RBX:
switch (Index) {
default: return 0;
- case 1: return X86::SPL;
- case 3: return X86::SP;
- case 4: return X86::ESP;
+ case X86::sub_8bit: return X86::BL;
+ case X86::sub_8bit_hi: return X86::BH;
+ case X86::sub_16bit: return X86::BX;
+ case X86::sub_32bit: return X86::EBX;
};
break;
- case X86::R8:
+ case X86::RCX:
switch (Index) {
default: return 0;
- case 1: return X86::R8B;
- case 3: return X86::R8W;
- case 4: return X86::R8D;
+ case X86::sub_8bit: return X86::CL;
+ case X86::sub_8bit_hi: return X86::CH;
+ case X86::sub_16bit: return X86::CX;
+ case X86::sub_32bit: return X86::ECX;
};
break;
- case X86::R9:
+ case X86::RDI:
switch (Index) {
default: return 0;
- case 1: return X86::R9B;
- case 3: return X86::R9W;
- case 4: return X86::R9D;
+ case X86::sub_8bit: return X86::DIL;
+ case X86::sub_16bit: return X86::DI;
+ case X86::sub_32bit: return X86::EDI;
};
break;
- case X86::R10:
+ case X86::RDX:
switch (Index) {
default: return 0;
- case 1: return X86::R10B;
- case 3: return X86::R10W;
- case 4: return X86::R10D;
+ case X86::sub_8bit: return X86::DL;
+ case X86::sub_8bit_hi: return X86::DH;
+ case X86::sub_16bit: return X86::DX;
+ case X86::sub_32bit: return X86::EDX;
};
break;
- case X86::R11:
+ case X86::RIP:
switch (Index) {
default: return 0;
- case 1: return X86::R11B;
- case 3: return X86::R11W;
- case 4: return X86::R11D;
+ case X86::sub_16bit: return X86::IP;
+ case X86::sub_32bit: return X86::EIP;
};
break;
- case X86::R12:
+ case X86::RSI:
switch (Index) {
default: return 0;
- case 1: return X86::R12B;
- case 3: return X86::R12W;
- case 4: return X86::R12D;
+ case X86::sub_8bit: return X86::SIL;
+ case X86::sub_16bit: return X86::SI;
+ case X86::sub_32bit: return X86::ESI;
};
break;
- case X86::R13:
+ case X86::RSP:
switch (Index) {
default: return 0;
- case 1: return X86::R13B;
- case 3: return X86::R13W;
- case 4: return X86::R13D;
+ case X86::sub_8bit: return X86::SPL;
+ case X86::sub_16bit: return X86::SP;
+ case X86::sub_32bit: return X86::ESP;
};
break;
- case X86::R14:
+ case X86::SI:
switch (Index) {
default: return 0;
- case 1: return X86::R14B;
- case 3: return X86::R14W;
- case 4: return X86::R14D;
+ case X86::sub_8bit: return X86::SIL;
};
break;
- case X86::R15:
+ case X86::SP:
switch (Index) {
default: return 0;
- case 1: return X86::R15B;
- case 3: return X86::R15W;
- case 4: return X86::R15D;
+ case X86::sub_8bit: return X86::SPL;
};
break;
case X86::XMM0:
switch (Index) {
default: return 0;
- case 1: return X86::XMM0;
- case 2: return X86::XMM0;
+ case X86::sub_sd: return X86::XMM0;
+ case X86::sub_ss: return X86::XMM0;
};
break;
case X86::XMM1:
switch (Index) {
default: return 0;
- case 1: return X86::XMM1;
- case 2: return X86::XMM1;
+ case X86::sub_sd: return X86::XMM1;
+ case X86::sub_ss: return X86::XMM1;
};
break;
case X86::XMM2:
switch (Index) {
default: return 0;
- case 1: return X86::XMM2;
- case 2: return X86::XMM2;
+ case X86::sub_sd: return X86::XMM2;
+ case X86::sub_ss: return X86::XMM2;
};
break;
case X86::XMM3:
switch (Index) {
default: return 0;
- case 1: return X86::XMM3;
- case 2: return X86::XMM3;
+ case X86::sub_sd: return X86::XMM3;
+ case X86::sub_ss: return X86::XMM3;
};
break;
case X86::XMM4:
switch (Index) {
default: return 0;
- case 1: return X86::XMM4;
- case 2: return X86::XMM4;
+ case X86::sub_sd: return X86::XMM4;
+ case X86::sub_ss: return X86::XMM4;
};
break;
case X86::XMM5:
switch (Index) {
default: return 0;
- case 1: return X86::XMM5;
- case 2: return X86::XMM5;
+ case X86::sub_sd: return X86::XMM5;
+ case X86::sub_ss: return X86::XMM5;
};
break;
case X86::XMM6:
switch (Index) {
default: return 0;
- case 1: return X86::XMM6;
- case 2: return X86::XMM6;
+ case X86::sub_sd: return X86::XMM6;
+ case X86::sub_ss: return X86::XMM6;
};
break;
case X86::XMM7:
switch (Index) {
default: return 0;
- case 1: return X86::XMM7;
- case 2: return X86::XMM7;
+ case X86::sub_sd: return X86::XMM7;
+ case X86::sub_ss: return X86::XMM7;
};
break;
case X86::XMM8:
switch (Index) {
default: return 0;
- case 1: return X86::XMM8;
- case 2: return X86::XMM8;
+ case X86::sub_sd: return X86::XMM8;
+ case X86::sub_ss: return X86::XMM8;
};
break;
case X86::XMM9:
switch (Index) {
default: return 0;
- case 1: return X86::XMM9;
- case 2: return X86::XMM9;
+ case X86::sub_sd: return X86::XMM9;
+ case X86::sub_ss: return X86::XMM9;
};
break;
case X86::XMM10:
switch (Index) {
default: return 0;
- case 1: return X86::XMM10;
- case 2: return X86::XMM10;
+ case X86::sub_sd: return X86::XMM10;
+ case X86::sub_ss: return X86::XMM10;
};
break;
case X86::XMM11:
switch (Index) {
default: return 0;
- case 1: return X86::XMM11;
- case 2: return X86::XMM11;
+ case X86::sub_sd: return X86::XMM11;
+ case X86::sub_ss: return X86::XMM11;
};
break;
case X86::XMM12:
switch (Index) {
default: return 0;
- case 1: return X86::XMM12;
- case 2: return X86::XMM12;
+ case X86::sub_sd: return X86::XMM12;
+ case X86::sub_ss: return X86::XMM12;
};
break;
case X86::XMM13:
switch (Index) {
default: return 0;
- case 1: return X86::XMM13;
- case 2: return X86::XMM13;
+ case X86::sub_sd: return X86::XMM13;
+ case X86::sub_ss: return X86::XMM13;
};
break;
case X86::XMM14:
switch (Index) {
default: return 0;
- case 1: return X86::XMM14;
- case 2: return X86::XMM14;
+ case X86::sub_sd: return X86::XMM14;
+ case X86::sub_ss: return X86::XMM14;
};
break;
case X86::XMM15:
switch (Index) {
default: return 0;
- case 1: return X86::XMM15;
- case 2: return X86::XMM15;
+ case X86::sub_sd: return X86::XMM15;
+ case X86::sub_ss: return X86::XMM15;
};
break;
case X86::YMM0:
switch (Index) {
default: return 0;
- case 1: return X86::XMM0;
- case 2: return X86::XMM0;
- case 3: return X86::XMM0;
+ case X86::sub_sd: return X86::XMM0;
+ case X86::sub_ss: return X86::XMM0;
+ case X86::sub_xmm: return X86::XMM0;
};
break;
case X86::YMM1:
switch (Index) {
default: return 0;
- case 1: return X86::XMM1;
- case 2: return X86::XMM1;
- case 3: return X86::XMM1;
+ case X86::sub_sd: return X86::XMM1;
+ case X86::sub_ss: return X86::XMM1;
+ case X86::sub_xmm: return X86::XMM1;
};
break;
case X86::YMM2:
switch (Index) {
default: return 0;
- case 1: return X86::XMM2;
- case 2: return X86::XMM2;
- case 3: return X86::XMM2;
+ case X86::sub_sd: return X86::XMM2;
+ case X86::sub_ss: return X86::XMM2;
+ case X86::sub_xmm: return X86::XMM2;
};
break;
case X86::YMM3:
switch (Index) {
default: return 0;
- case 1: return X86::XMM3;
- case 2: return X86::XMM3;
- case 3: return X86::XMM3;
+ case X86::sub_sd: return X86::XMM3;
+ case X86::sub_ss: return X86::XMM3;
+ case X86::sub_xmm: return X86::XMM3;
};
break;
case X86::YMM4:
switch (Index) {
default: return 0;
- case 1: return X86::XMM4;
- case 2: return X86::XMM4;
- case 3: return X86::XMM4;
+ case X86::sub_sd: return X86::XMM4;
+ case X86::sub_ss: return X86::XMM4;
+ case X86::sub_xmm: return X86::XMM4;
};
break;
case X86::YMM5:
switch (Index) {
default: return 0;
- case 1: return X86::XMM5;
- case 2: return X86::XMM5;
- case 3: return X86::XMM5;
+ case X86::sub_sd: return X86::XMM5;
+ case X86::sub_ss: return X86::XMM5;
+ case X86::sub_xmm: return X86::XMM5;
};
break;
case X86::YMM6:
switch (Index) {
default: return 0;
- case 1: return X86::XMM6;
- case 2: return X86::XMM6;
- case 3: return X86::XMM6;
+ case X86::sub_sd: return X86::XMM6;
+ case X86::sub_ss: return X86::XMM6;
+ case X86::sub_xmm: return X86::XMM6;
};
break;
case X86::YMM7:
switch (Index) {
default: return 0;
- case 1: return X86::XMM7;
- case 2: return X86::XMM7;
- case 3: return X86::XMM7;
+ case X86::sub_sd: return X86::XMM7;
+ case X86::sub_ss: return X86::XMM7;
+ case X86::sub_xmm: return X86::XMM7;
};
break;
case X86::YMM8:
switch (Index) {
default: return 0;
- case 1: return X86::XMM8;
- case 2: return X86::XMM8;
- case 3: return X86::XMM8;
+ case X86::sub_sd: return X86::XMM8;
+ case X86::sub_ss: return X86::XMM8;
+ case X86::sub_xmm: return X86::XMM8;
};
break;
case X86::YMM9:
switch (Index) {
default: return 0;
- case 1: return X86::XMM9;
- case 2: return X86::XMM9;
- case 3: return X86::XMM9;
+ case X86::sub_sd: return X86::XMM9;
+ case X86::sub_ss: return X86::XMM9;
+ case X86::sub_xmm: return X86::XMM9;
};
break;
case X86::YMM10:
switch (Index) {
default: return 0;
- case 1: return X86::XMM10;
- case 2: return X86::XMM10;
- case 3: return X86::XMM10;
+ case X86::sub_sd: return X86::XMM10;
+ case X86::sub_ss: return X86::XMM10;
+ case X86::sub_xmm: return X86::XMM10;
};
break;
case X86::YMM11:
switch (Index) {
default: return 0;
- case 1: return X86::XMM11;
- case 2: return X86::XMM11;
- case 3: return X86::XMM11;
+ case X86::sub_sd: return X86::XMM11;
+ case X86::sub_ss: return X86::XMM11;
+ case X86::sub_xmm: return X86::XMM11;
};
break;
case X86::YMM12:
switch (Index) {
default: return 0;
- case 1: return X86::XMM12;
- case 2: return X86::XMM12;
- case 3: return X86::XMM12;
+ case X86::sub_sd: return X86::XMM12;
+ case X86::sub_ss: return X86::XMM12;
+ case X86::sub_xmm: return X86::XMM12;
};
break;
case X86::YMM13:
switch (Index) {
default: return 0;
- case 1: return X86::XMM13;
- case 2: return X86::XMM13;
- case 3: return X86::XMM13;
+ case X86::sub_sd: return X86::XMM13;
+ case X86::sub_ss: return X86::XMM13;
+ case X86::sub_xmm: return X86::XMM13;
};
break;
case X86::YMM14:
switch (Index) {
default: return 0;
- case 1: return X86::XMM14;
- case 2: return X86::XMM14;
- case 3: return X86::XMM14;
+ case X86::sub_sd: return X86::XMM14;
+ case X86::sub_ss: return X86::XMM14;
+ case X86::sub_xmm: return X86::XMM14;
};
break;
case X86::YMM15:
switch (Index) {
default: return 0;
- case 1: return X86::XMM15;
- case 2: return X86::XMM15;
- case 3: return X86::XMM15;
+ case X86::sub_sd: return X86::XMM15;
+ case X86::sub_ss: return X86::XMM15;
+ case X86::sub_xmm: return X86::XMM15;
};
break;
};
@@ -4675,362 +3995,376 @@ unsigned X86GenRegisterInfo::getSubRegIndex(unsigned RegNo, unsigned SubRegNo) c
default:
return 0;
case X86::AX:
- if (SubRegNo == X86::AL) return 1;
- if (SubRegNo == X86::AH) return 2;
- return 0;
- case X86::DX:
- if (SubRegNo == X86::DL) return 1;
- if (SubRegNo == X86::DH) return 2;
+ if (SubRegNo == X86::AL) return X86::sub_8bit;
+ if (SubRegNo == X86::AH) return X86::sub_8bit_hi;
return 0;
- case X86::CX:
- if (SubRegNo == X86::CL) return 1;
- if (SubRegNo == X86::CH) return 2;
+ case X86::BP:
+ if (SubRegNo == X86::BPL) return X86::sub_8bit;
return 0;
case X86::BX:
- if (SubRegNo == X86::BL) return 1;
- if (SubRegNo == X86::BH) return 2;
+ if (SubRegNo == X86::BL) return X86::sub_8bit;
+ if (SubRegNo == X86::BH) return X86::sub_8bit_hi;
return 0;
- case X86::SI:
- if (SubRegNo == X86::SIL) return 1;
+ case X86::CX:
+ if (SubRegNo == X86::CL) return X86::sub_8bit;
+ if (SubRegNo == X86::CH) return X86::sub_8bit_hi;
return 0;
case X86::DI:
- if (SubRegNo == X86::DIL) return 1;
+ if (SubRegNo == X86::DIL) return X86::sub_8bit;
return 0;
- case X86::BP:
- if (SubRegNo == X86::BPL) return 1;
- return 0;
- case X86::SP:
- if (SubRegNo == X86::SPL) return 1;
+ case X86::DX:
+ if (SubRegNo == X86::DL) return X86::sub_8bit;
+ if (SubRegNo == X86::DH) return X86::sub_8bit_hi;
return 0;
- case X86::R8W:
- if (SubRegNo == X86::R8B) return 1;
+ case X86::EAX:
+ if (SubRegNo == X86::AL) return X86::sub_8bit;
+ if (SubRegNo == X86::AH) return X86::sub_8bit_hi;
+ if (SubRegNo == X86::AX) return X86::sub_16bit;
return 0;
- case X86::R9W:
- if (SubRegNo == X86::R9B) return 1;
+ case X86::EBP:
+ if (SubRegNo == X86::BPL) return X86::sub_8bit;
+ if (SubRegNo == X86::BP) return X86::sub_16bit;
return 0;
- case X86::R10W:
- if (SubRegNo == X86::R10B) return 1;
+ case X86::EBX:
+ if (SubRegNo == X86::BL) return X86::sub_8bit;
+ if (SubRegNo == X86::BH) return X86::sub_8bit_hi;
+ if (SubRegNo == X86::BX) return X86::sub_16bit;
return 0;
- case X86::R11W:
- if (SubRegNo == X86::R11B) return 1;
+ case X86::ECX:
+ if (SubRegNo == X86::CL) return X86::sub_8bit;
+ if (SubRegNo == X86::CH) return X86::sub_8bit_hi;
+ if (SubRegNo == X86::CX) return X86::sub_16bit;
return 0;
- case X86::R12W:
- if (SubRegNo == X86::R12B) return 1;
+ case X86::EDI:
+ if (SubRegNo == X86::DIL) return X86::sub_8bit;
+ if (SubRegNo == X86::DI) return X86::sub_16bit;
return 0;
- case X86::R13W:
- if (SubRegNo == X86::R13B) return 1;
+ case X86::EDX:
+ if (SubRegNo == X86::DL) return X86::sub_8bit;
+ if (SubRegNo == X86::DH) return X86::sub_8bit_hi;
+ if (SubRegNo == X86::DX) return X86::sub_16bit;
return 0;
- case X86::R14W:
- if (SubRegNo == X86::R14B) return 1;
+ case X86::EIP:
+ if (SubRegNo == X86::IP) return X86::sub_16bit;
return 0;
- case X86::R15W:
- if (SubRegNo == X86::R15B) return 1;
+ case X86::ESI:
+ if (SubRegNo == X86::SIL) return X86::sub_8bit;
+ if (SubRegNo == X86::SI) return X86::sub_16bit;
return 0;
- case X86::EAX:
- if (SubRegNo == X86::AL) return 1;
- if (SubRegNo == X86::AH) return 2;
- if (SubRegNo == X86::AX) return 3;
+ case X86::ESP:
+ if (SubRegNo == X86::SPL) return X86::sub_8bit;
+ if (SubRegNo == X86::SP) return X86::sub_16bit;
return 0;
- case X86::EDX:
- if (SubRegNo == X86::DL) return 1;
- if (SubRegNo == X86::DH) return 2;
- if (SubRegNo == X86::DX) return 3;
+ case X86::R8:
+ if (SubRegNo == X86::R8B) return X86::sub_8bit;
+ if (SubRegNo == X86::R8W) return X86::sub_16bit;
+ if (SubRegNo == X86::R8D) return X86::sub_32bit;
return 0;
- case X86::ECX:
- if (SubRegNo == X86::CL) return 1;
- if (SubRegNo == X86::CH) return 2;
- if (SubRegNo == X86::CX) return 3;
+ case X86::R8D:
+ if (SubRegNo == X86::R8B) return X86::sub_8bit;
+ if (SubRegNo == X86::R8W) return X86::sub_16bit;
return 0;
- case X86::EBX:
- if (SubRegNo == X86::BL) return 1;
- if (SubRegNo == X86::BH) return 2;
- if (SubRegNo == X86::BX) return 3;
+ case X86::R8W:
+ if (SubRegNo == X86::R8B) return X86::sub_8bit;
return 0;
- case X86::ESI:
- if (SubRegNo == X86::SIL) return 1;
- if (SubRegNo == X86::SI) return 3;
+ case X86::R9:
+ if (SubRegNo == X86::R9B) return X86::sub_8bit;
+ if (SubRegNo == X86::R9W) return X86::sub_16bit;
+ if (SubRegNo == X86::R9D) return X86::sub_32bit;
return 0;
- case X86::EDI:
- if (SubRegNo == X86::DIL) return 1;
- if (SubRegNo == X86::DI) return 3;
+ case X86::R9D:
+ if (SubRegNo == X86::R9B) return X86::sub_8bit;
+ if (SubRegNo == X86::R9W) return X86::sub_16bit;
return 0;
- case X86::EBP:
- if (SubRegNo == X86::BPL) return 1;
- if (SubRegNo == X86::BP) return 3;
+ case X86::R9W:
+ if (SubRegNo == X86::R9B) return X86::sub_8bit;
return 0;
- case X86::ESP:
- if (SubRegNo == X86::SPL) return 1;
- if (SubRegNo == X86::SP) return 3;
+ case X86::R10:
+ if (SubRegNo == X86::R10B) return X86::sub_8bit;
+ if (SubRegNo == X86::R10W) return X86::sub_16bit;
+ if (SubRegNo == X86::R10D) return X86::sub_32bit;
return 0;
- case X86::R8D:
- if (SubRegNo == X86::R8B) return 1;
- if (SubRegNo == X86::R8W) return 3;
+ case X86::R10D:
+ if (SubRegNo == X86::R10B) return X86::sub_8bit;
+ if (SubRegNo == X86::R10W) return X86::sub_16bit;
return 0;
- case X86::R9D:
- if (SubRegNo == X86::R9B) return 1;
- if (SubRegNo == X86::R9W) return 3;
+ case X86::R10W:
+ if (SubRegNo == X86::R10B) return X86::sub_8bit;
return 0;
- case X86::R10D:
- if (SubRegNo == X86::R10B) return 1;
- if (SubRegNo == X86::R10W) return 3;
+ case X86::R11:
+ if (SubRegNo == X86::R11B) return X86::sub_8bit;
+ if (SubRegNo == X86::R11W) return X86::sub_16bit;
+ if (SubRegNo == X86::R11D) return X86::sub_32bit;
return 0;
case X86::R11D:
- if (SubRegNo == X86::R11B) return 1;
- if (SubRegNo == X86::R11W) return 3;
+ if (SubRegNo == X86::R11B) return X86::sub_8bit;
+ if (SubRegNo == X86::R11W) return X86::sub_16bit;
+ return 0;
+ case X86::R11W:
+ if (SubRegNo == X86::R11B) return X86::sub_8bit;
+ return 0;
+ case X86::R12:
+ if (SubRegNo == X86::R12B) return X86::sub_8bit;
+ if (SubRegNo == X86::R12W) return X86::sub_16bit;
+ if (SubRegNo == X86::R12D) return X86::sub_32bit;
return 0;
case X86::R12D:
- if (SubRegNo == X86::R12B) return 1;
- if (SubRegNo == X86::R12W) return 3;
+ if (SubRegNo == X86::R12B) return X86::sub_8bit;
+ if (SubRegNo == X86::R12W) return X86::sub_16bit;
+ return 0;
+ case X86::R12W:
+ if (SubRegNo == X86::R12B) return X86::sub_8bit;
+ return 0;
+ case X86::R13:
+ if (SubRegNo == X86::R13B) return X86::sub_8bit;
+ if (SubRegNo == X86::R13W) return X86::sub_16bit;
+ if (SubRegNo == X86::R13D) return X86::sub_32bit;
return 0;
case X86::R13D:
- if (SubRegNo == X86::R13B) return 1;
- if (SubRegNo == X86::R13W) return 3;
+ if (SubRegNo == X86::R13B) return X86::sub_8bit;
+ if (SubRegNo == X86::R13W) return X86::sub_16bit;
return 0;
- case X86::R14D:
- if (SubRegNo == X86::R14B) return 1;
- if (SubRegNo == X86::R14W) return 3;
+ case X86::R13W:
+ if (SubRegNo == X86::R13B) return X86::sub_8bit;
return 0;
- case X86::R15D:
- if (SubRegNo == X86::R15B) return 1;
- if (SubRegNo == X86::R15W) return 3;
+ case X86::R14:
+ if (SubRegNo == X86::R14B) return X86::sub_8bit;
+ if (SubRegNo == X86::R14W) return X86::sub_16bit;
+ if (SubRegNo == X86::R14D) return X86::sub_32bit;
return 0;
- case X86::RAX:
- if (SubRegNo == X86::AL) return 1;
- if (SubRegNo == X86::AH) return 2;
- if (SubRegNo == X86::AX) return 3;
- if (SubRegNo == X86::EAX) return 4;
+ case X86::R14D:
+ if (SubRegNo == X86::R14B) return X86::sub_8bit;
+ if (SubRegNo == X86::R14W) return X86::sub_16bit;
return 0;
- case X86::RDX:
- if (SubRegNo == X86::DL) return 1;
- if (SubRegNo == X86::DH) return 2;
- if (SubRegNo == X86::DX) return 3;
- if (SubRegNo == X86::EDX) return 4;
+ case X86::R14W:
+ if (SubRegNo == X86::R14B) return X86::sub_8bit;
return 0;
- case X86::RCX:
- if (SubRegNo == X86::CL) return 1;
- if (SubRegNo == X86::CH) return 2;
- if (SubRegNo == X86::CX) return 3;
- if (SubRegNo == X86::ECX) return 4;
+ case X86::R15:
+ if (SubRegNo == X86::R15B) return X86::sub_8bit;
+ if (SubRegNo == X86::R15W) return X86::sub_16bit;
+ if (SubRegNo == X86::R15D) return X86::sub_32bit;
return 0;
- case X86::RBX:
- if (SubRegNo == X86::BL) return 1;
- if (SubRegNo == X86::BH) return 2;
- if (SubRegNo == X86::BX) return 3;
- if (SubRegNo == X86::EBX) return 4;
+ case X86::R15D:
+ if (SubRegNo == X86::R15B) return X86::sub_8bit;
+ if (SubRegNo == X86::R15W) return X86::sub_16bit;
return 0;
- case X86::RSI:
- if (SubRegNo == X86::SIL) return 1;
- if (SubRegNo == X86::SI) return 3;
- if (SubRegNo == X86::ESI) return 4;
+ case X86::R15W:
+ if (SubRegNo == X86::R15B) return X86::sub_8bit;
return 0;
- case X86::RDI:
- if (SubRegNo == X86::DIL) return 1;
- if (SubRegNo == X86::DI) return 3;
- if (SubRegNo == X86::EDI) return 4;
+ case X86::RAX:
+ if (SubRegNo == X86::AL) return X86::sub_8bit;
+ if (SubRegNo == X86::AH) return X86::sub_8bit_hi;
+ if (SubRegNo == X86::AX) return X86::sub_16bit;
+ if (SubRegNo == X86::EAX) return X86::sub_32bit;
return 0;
case X86::RBP:
- if (SubRegNo == X86::BPL) return 1;
- if (SubRegNo == X86::BP) return 3;
- if (SubRegNo == X86::EBP) return 4;
+ if (SubRegNo == X86::BPL) return X86::sub_8bit;
+ if (SubRegNo == X86::BP) return X86::sub_16bit;
+ if (SubRegNo == X86::EBP) return X86::sub_32bit;
return 0;
- case X86::RSP:
- if (SubRegNo == X86::SPL) return 1;
- if (SubRegNo == X86::SP) return 3;
- if (SubRegNo == X86::ESP) return 4;
+ case X86::RBX:
+ if (SubRegNo == X86::BL) return X86::sub_8bit;
+ if (SubRegNo == X86::BH) return X86::sub_8bit_hi;
+ if (SubRegNo == X86::BX) return X86::sub_16bit;
+ if (SubRegNo == X86::EBX) return X86::sub_32bit;
return 0;
- case X86::R8:
- if (SubRegNo == X86::R8B) return 1;
- if (SubRegNo == X86::R8W) return 3;
- if (SubRegNo == X86::R8D) return 4;
+ case X86::RCX:
+ if (SubRegNo == X86::CL) return X86::sub_8bit;
+ if (SubRegNo == X86::CH) return X86::sub_8bit_hi;
+ if (SubRegNo == X86::CX) return X86::sub_16bit;
+ if (SubRegNo == X86::ECX) return X86::sub_32bit;
return 0;
- case X86::R9:
- if (SubRegNo == X86::R9B) return 1;
- if (SubRegNo == X86::R9W) return 3;
- if (SubRegNo == X86::R9D) return 4;
+ case X86::RDI:
+ if (SubRegNo == X86::DIL) return X86::sub_8bit;
+ if (SubRegNo == X86::DI) return X86::sub_16bit;
+ if (SubRegNo == X86::EDI) return X86::sub_32bit;
return 0;
- case X86::R10:
- if (SubRegNo == X86::R10B) return 1;
- if (SubRegNo == X86::R10W) return 3;
- if (SubRegNo == X86::R10D) return 4;
+ case X86::RDX:
+ if (SubRegNo == X86::DL) return X86::sub_8bit;
+ if (SubRegNo == X86::DH) return X86::sub_8bit_hi;
+ if (SubRegNo == X86::DX) return X86::sub_16bit;
+ if (SubRegNo == X86::EDX) return X86::sub_32bit;
return 0;
- case X86::R11:
- if (SubRegNo == X86::R11B) return 1;
- if (SubRegNo == X86::R11W) return 3;
- if (SubRegNo == X86::R11D) return 4;
+ case X86::RIP:
+ if (SubRegNo == X86::IP) return X86::sub_16bit;
+ if (SubRegNo == X86::EIP) return X86::sub_32bit;
return 0;
- case X86::R12:
- if (SubRegNo == X86::R12B) return 1;
- if (SubRegNo == X86::R12W) return 3;
- if (SubRegNo == X86::R12D) return 4;
+ case X86::RSI:
+ if (SubRegNo == X86::SIL) return X86::sub_8bit;
+ if (SubRegNo == X86::SI) return X86::sub_16bit;
+ if (SubRegNo == X86::ESI) return X86::sub_32bit;
return 0;
- case X86::R13:
- if (SubRegNo == X86::R13B) return 1;
- if (SubRegNo == X86::R13W) return 3;
- if (SubRegNo == X86::R13D) return 4;
+ case X86::RSP:
+ if (SubRegNo == X86::SPL) return X86::sub_8bit;
+ if (SubRegNo == X86::SP) return X86::sub_16bit;
+ if (SubRegNo == X86::ESP) return X86::sub_32bit;
return 0;
- case X86::R14:
- if (SubRegNo == X86::R14B) return 1;
- if (SubRegNo == X86::R14W) return 3;
- if (SubRegNo == X86::R14D) return 4;
+ case X86::SI:
+ if (SubRegNo == X86::SIL) return X86::sub_8bit;
return 0;
- case X86::R15:
- if (SubRegNo == X86::R15B) return 1;
- if (SubRegNo == X86::R15W) return 3;
- if (SubRegNo == X86::R15D) return 4;
+ case X86::SP:
+ if (SubRegNo == X86::SPL) return X86::sub_8bit;
return 0;
case X86::XMM0:
- if (SubRegNo == X86::XMM0) return 1;
- if (SubRegNo == X86::XMM0) return 2;
+ if (SubRegNo == X86::XMM0) return X86::sub_sd;
+ if (SubRegNo == X86::XMM0) return X86::sub_ss;
return 0;
case X86::XMM1:
- if (SubRegNo == X86::XMM1) return 1;
- if (SubRegNo == X86::XMM1) return 2;
+ if (SubRegNo == X86::XMM1) return X86::sub_sd;
+ if (SubRegNo == X86::XMM1) return X86::sub_ss;
return 0;
case X86::XMM2:
- if (SubRegNo == X86::XMM2) return 1;
- if (SubRegNo == X86::XMM2) return 2;
+ if (SubRegNo == X86::XMM2) return X86::sub_sd;
+ if (SubRegNo == X86::XMM2) return X86::sub_ss;
return 0;
case X86::XMM3:
- if (SubRegNo == X86::XMM3) return 1;
- if (SubRegNo == X86::XMM3) return 2;
+ if (SubRegNo == X86::XMM3) return X86::sub_sd;
+ if (SubRegNo == X86::XMM3) return X86::sub_ss;
return 0;
case X86::XMM4:
- if (SubRegNo == X86::XMM4) return 1;
- if (SubRegNo == X86::XMM4) return 2;
+ if (SubRegNo == X86::XMM4) return X86::sub_sd;
+ if (SubRegNo == X86::XMM4) return X86::sub_ss;
return 0;
case X86::XMM5:
- if (SubRegNo == X86::XMM5) return 1;
- if (SubRegNo == X86::XMM5) return 2;
+ if (SubRegNo == X86::XMM5) return X86::sub_sd;
+ if (SubRegNo == X86::XMM5) return X86::sub_ss;
return 0;
case X86::XMM6:
- if (SubRegNo == X86::XMM6) return 1;
- if (SubRegNo == X86::XMM6) return 2;
+ if (SubRegNo == X86::XMM6) return X86::sub_sd;
+ if (SubRegNo == X86::XMM6) return X86::sub_ss;
return 0;
case X86::XMM7:
- if (SubRegNo == X86::XMM7) return 1;
- if (SubRegNo == X86::XMM7) return 2;
+ if (SubRegNo == X86::XMM7) return X86::sub_sd;
+ if (SubRegNo == X86::XMM7) return X86::sub_ss;
return 0;
case X86::XMM8:
- if (SubRegNo == X86::XMM8) return 1;
- if (SubRegNo == X86::XMM8) return 2;
+ if (SubRegNo == X86::XMM8) return X86::sub_sd;
+ if (SubRegNo == X86::XMM8) return X86::sub_ss;
return 0;
case X86::XMM9:
- if (SubRegNo == X86::XMM9) return 1;
- if (SubRegNo == X86::XMM9) return 2;
+ if (SubRegNo == X86::XMM9) return X86::sub_sd;
+ if (SubRegNo == X86::XMM9) return X86::sub_ss;
return 0;
case X86::XMM10:
- if (SubRegNo == X86::XMM10) return 1;
- if (SubRegNo == X86::XMM10) return 2;
+ if (SubRegNo == X86::XMM10) return X86::sub_sd;
+ if (SubRegNo == X86::XMM10) return X86::sub_ss;
return 0;
case X86::XMM11:
- if (SubRegNo == X86::XMM11) return 1;
- if (SubRegNo == X86::XMM11) return 2;
+ if (SubRegNo == X86::XMM11) return X86::sub_sd;
+ if (SubRegNo == X86::XMM11) return X86::sub_ss;
return 0;
case X86::XMM12:
- if (SubRegNo == X86::XMM12) return 1;
- if (SubRegNo == X86::XMM12) return 2;
+ if (SubRegNo == X86::XMM12) return X86::sub_sd;
+ if (SubRegNo == X86::XMM12) return X86::sub_ss;
return 0;
case X86::XMM13:
- if (SubRegNo == X86::XMM13) return 1;
- if (SubRegNo == X86::XMM13) return 2;
+ if (SubRegNo == X86::XMM13) return X86::sub_sd;
+ if (SubRegNo == X86::XMM13) return X86::sub_ss;
return 0;
case X86::XMM14:
- if (SubRegNo == X86::XMM14) return 1;
- if (SubRegNo == X86::XMM14) return 2;
+ if (SubRegNo == X86::XMM14) return X86::sub_sd;
+ if (SubRegNo == X86::XMM14) return X86::sub_ss;
return 0;
case X86::XMM15:
- if (SubRegNo == X86::XMM15) return 1;
- if (SubRegNo == X86::XMM15) return 2;
+ if (SubRegNo == X86::XMM15) return X86::sub_sd;
+ if (SubRegNo == X86::XMM15) return X86::sub_ss;
return 0;
case X86::YMM0:
- if (SubRegNo == X86::XMM0) return 1;
- if (SubRegNo == X86::XMM0) return 2;
- if (SubRegNo == X86::XMM0) return 3;
+ if (SubRegNo == X86::XMM0) return X86::sub_sd;
+ if (SubRegNo == X86::XMM0) return X86::sub_ss;
+ if (SubRegNo == X86::XMM0) return X86::sub_xmm;
return 0;
case X86::YMM1:
- if (SubRegNo == X86::XMM1) return 1;
- if (SubRegNo == X86::XMM1) return 2;
- if (SubRegNo == X86::XMM1) return 3;
+ if (SubRegNo == X86::XMM1) return X86::sub_sd;
+ if (SubRegNo == X86::XMM1) return X86::sub_ss;
+ if (SubRegNo == X86::XMM1) return X86::sub_xmm;
return 0;
case X86::YMM2:
- if (SubRegNo == X86::XMM2) return 1;
- if (SubRegNo == X86::XMM2) return 2;
- if (SubRegNo == X86::XMM2) return 3;
+ if (SubRegNo == X86::XMM2) return X86::sub_sd;
+ if (SubRegNo == X86::XMM2) return X86::sub_ss;
+ if (SubRegNo == X86::XMM2) return X86::sub_xmm;
return 0;
case X86::YMM3:
- if (SubRegNo == X86::XMM3) return 1;
- if (SubRegNo == X86::XMM3) return 2;
- if (SubRegNo == X86::XMM3) return 3;
+ if (SubRegNo == X86::XMM3) return X86::sub_sd;
+ if (SubRegNo == X86::XMM3) return X86::sub_ss;
+ if (SubRegNo == X86::XMM3) return X86::sub_xmm;
return 0;
case X86::YMM4:
- if (SubRegNo == X86::XMM4) return 1;
- if (SubRegNo == X86::XMM4) return 2;
- if (SubRegNo == X86::XMM4) return 3;
+ if (SubRegNo == X86::XMM4) return X86::sub_sd;
+ if (SubRegNo == X86::XMM4) return X86::sub_ss;
+ if (SubRegNo == X86::XMM4) return X86::sub_xmm;
return 0;
case X86::YMM5:
- if (SubRegNo == X86::XMM5) return 1;
- if (SubRegNo == X86::XMM5) return 2;
- if (SubRegNo == X86::XMM5) return 3;
+ if (SubRegNo == X86::XMM5) return X86::sub_sd;
+ if (SubRegNo == X86::XMM5) return X86::sub_ss;
+ if (SubRegNo == X86::XMM5) return X86::sub_xmm;
return 0;
case X86::YMM6:
- if (SubRegNo == X86::XMM6) return 1;
- if (SubRegNo == X86::XMM6) return 2;
- if (SubRegNo == X86::XMM6) return 3;
+ if (SubRegNo == X86::XMM6) return X86::sub_sd;
+ if (SubRegNo == X86::XMM6) return X86::sub_ss;
+ if (SubRegNo == X86::XMM6) return X86::sub_xmm;
return 0;
case X86::YMM7:
- if (SubRegNo == X86::XMM7) return 1;
- if (SubRegNo == X86::XMM7) return 2;
- if (SubRegNo == X86::XMM7) return 3;
+ if (SubRegNo == X86::XMM7) return X86::sub_sd;
+ if (SubRegNo == X86::XMM7) return X86::sub_ss;
+ if (SubRegNo == X86::XMM7) return X86::sub_xmm;
return 0;
case X86::YMM8:
- if (SubRegNo == X86::XMM8) return 1;
- if (SubRegNo == X86::XMM8) return 2;
- if (SubRegNo == X86::XMM8) return 3;
+ if (SubRegNo == X86::XMM8) return X86::sub_sd;
+ if (SubRegNo == X86::XMM8) return X86::sub_ss;
+ if (SubRegNo == X86::XMM8) return X86::sub_xmm;
return 0;
case X86::YMM9:
- if (SubRegNo == X86::XMM9) return 1;
- if (SubRegNo == X86::XMM9) return 2;
- if (SubRegNo == X86::XMM9) return 3;
+ if (SubRegNo == X86::XMM9) return X86::sub_sd;
+ if (SubRegNo == X86::XMM9) return X86::sub_ss;
+ if (SubRegNo == X86::XMM9) return X86::sub_xmm;
return 0;
case X86::YMM10:
- if (SubRegNo == X86::XMM10) return 1;
- if (SubRegNo == X86::XMM10) return 2;
- if (SubRegNo == X86::XMM10) return 3;
+ if (SubRegNo == X86::XMM10) return X86::sub_sd;
+ if (SubRegNo == X86::XMM10) return X86::sub_ss;
+ if (SubRegNo == X86::XMM10) return X86::sub_xmm;
return 0;
case X86::YMM11:
- if (SubRegNo == X86::XMM11) return 1;
- if (SubRegNo == X86::XMM11) return 2;
- if (SubRegNo == X86::XMM11) return 3;
+ if (SubRegNo == X86::XMM11) return X86::sub_sd;
+ if (SubRegNo == X86::XMM11) return X86::sub_ss;
+ if (SubRegNo == X86::XMM11) return X86::sub_xmm;
return 0;
case X86::YMM12:
- if (SubRegNo == X86::XMM12) return 1;
- if (SubRegNo == X86::XMM12) return 2;
- if (SubRegNo == X86::XMM12) return 3;
+ if (SubRegNo == X86::XMM12) return X86::sub_sd;
+ if (SubRegNo == X86::XMM12) return X86::sub_ss;
+ if (SubRegNo == X86::XMM12) return X86::sub_xmm;
return 0;
case X86::YMM13:
- if (SubRegNo == X86::XMM13) return 1;
- if (SubRegNo == X86::XMM13) return 2;
- if (SubRegNo == X86::XMM13) return 3;
+ if (SubRegNo == X86::XMM13) return X86::sub_sd;
+ if (SubRegNo == X86::XMM13) return X86::sub_ss;
+ if (SubRegNo == X86::XMM13) return X86::sub_xmm;
return 0;
case X86::YMM14:
- if (SubRegNo == X86::XMM14) return 1;
- if (SubRegNo == X86::XMM14) return 2;
- if (SubRegNo == X86::XMM14) return 3;
+ if (SubRegNo == X86::XMM14) return X86::sub_sd;
+ if (SubRegNo == X86::XMM14) return X86::sub_ss;
+ if (SubRegNo == X86::XMM14) return X86::sub_xmm;
return 0;
case X86::YMM15:
- if (SubRegNo == X86::XMM15) return 1;
- if (SubRegNo == X86::XMM15) return 2;
- if (SubRegNo == X86::XMM15) return 3;
+ if (SubRegNo == X86::XMM15) return X86::sub_sd;
+ if (SubRegNo == X86::XMM15) return X86::sub_ss;
+ if (SubRegNo == X86::XMM15) return X86::sub_xmm;
return 0;
};
return 0;
}
+unsigned X86GenRegisterInfo::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {
+ switch (IdxA) {
+ default:
+ return IdxB;
+ }
+}
+
X86GenRegisterInfo::X86GenRegisterInfo(int CallFrameSetupOpcode, int CallFrameDestroyOpcode)
- : TargetRegisterInfo(RegisterDescriptors, 159, RegisterClasses, RegisterClasses+31,
- CallFrameSetupOpcode, CallFrameDestroyOpcode,
+ : TargetRegisterInfo(RegisterDescriptors, 153, RegisterClasses, RegisterClasses+32,
+ SubRegIndexTable,
+ CallFrameSetupOpcode, CallFrameDestroyOpcode,
SubregHashTable, SubregHashTableSize,
- SuperregHashTable, SuperregHashTableSize,
AliasesHashTable, AliasesHashTableSize) {
}
@@ -5064,6 +4398,24 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 2;
case X86::CL:
return 2;
+ case X86::CR0:
+ return -1;
+ case X86::CR1:
+ return -1;
+ case X86::CR2:
+ return -1;
+ case X86::CR3:
+ return -1;
+ case X86::CR4:
+ return -1;
+ case X86::CR5:
+ return -1;
+ case X86::CR6:
+ return -1;
+ case X86::CR7:
+ return -1;
+ case X86::CR8:
+ return -1;
case X86::CS:
return -1;
case X86::CX:
@@ -5102,22 +4454,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 6;
case X86::EBX:
return 3;
- case X86::ECR0:
- return -1;
- case X86::ECR1:
- return -1;
- case X86::ECR2:
- return -1;
- case X86::ECR3:
- return -1;
- case X86::ECR4:
- return -1;
- case X86::ECR5:
- return -1;
- case X86::ECR6:
- return -1;
- case X86::ECR7:
- return -1;
case X86::ECX:
return 2;
case X86::EDI:
@@ -5128,6 +4464,8 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return -1;
case X86::EIP:
return 16;
+ case X86::EIZ:
+ return -1;
case X86::ES:
return -1;
case X86::ESI:
@@ -5170,6 +4508,22 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 47;
case X86::MM7:
return 48;
+ case X86::R8:
+ return 8;
+ case X86::R8B:
+ return 8;
+ case X86::R8D:
+ return 8;
+ case X86::R8W:
+ return 8;
+ case X86::R9:
+ return 9;
+ case X86::R9B:
+ return 9;
+ case X86::R9D:
+ return 9;
+ case X86::R9W:
+ return 9;
case X86::R10:
return 10;
case X86::R10B:
@@ -5218,46 +4572,12 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 15;
case X86::R15W:
return 15;
- case X86::R8:
- return 8;
- case X86::R8B:
- return 8;
- case X86::R8D:
- return 8;
- case X86::R8W:
- return 8;
- case X86::R9:
- return 9;
- case X86::R9B:
- return 9;
- case X86::R9D:
- return 9;
- case X86::R9W:
- return 9;
case X86::RAX:
return 0;
case X86::RBP:
return 6;
case X86::RBX:
return 3;
- case X86::RCR0:
- return -1;
- case X86::RCR1:
- return -1;
- case X86::RCR2:
- return -1;
- case X86::RCR3:
- return -1;
- case X86::RCR4:
- return -1;
- case X86::RCR5:
- return -1;
- case X86::RCR6:
- return -1;
- case X86::RCR7:
- return -1;
- case X86::RCR8:
- return -1;
case X86::RCX:
return 2;
case X86::RDI:
@@ -5266,6 +4586,8 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 1;
case X86::RIP:
return 16;
+ case X86::RIZ:
+ return -1;
case X86::RSI:
return 4;
case X86::RSP:
@@ -5300,18 +4622,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 17;
case X86::XMM1:
return 18;
- case X86::XMM10:
- return 27;
- case X86::XMM11:
- return 28;
- case X86::XMM12:
- return 29;
- case X86::XMM13:
- return 30;
- case X86::XMM14:
- return 31;
- case X86::XMM15:
- return 32;
case X86::XMM2:
return 19;
case X86::XMM3:
@@ -5328,22 +4638,22 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 25;
case X86::XMM9:
return 26;
- case X86::YMM0:
- return 17;
- case X86::YMM1:
- return 18;
- case X86::YMM10:
+ case X86::XMM10:
return 27;
- case X86::YMM11:
+ case X86::XMM11:
return 28;
- case X86::YMM12:
+ case X86::XMM12:
return 29;
- case X86::YMM13:
+ case X86::XMM13:
return 30;
- case X86::YMM14:
+ case X86::XMM14:
return 31;
- case X86::YMM15:
+ case X86::XMM15:
return 32;
+ case X86::YMM0:
+ return 17;
+ case X86::YMM1:
+ return 18;
case X86::YMM2:
return 19;
case X86::YMM3:
@@ -5360,6 +4670,18 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 25;
case X86::YMM9:
return 26;
+ case X86::YMM10:
+ return 27;
+ case X86::YMM11:
+ return 28;
+ case X86::YMM12:
+ return 29;
+ case X86::YMM13:
+ return 30;
+ case X86::YMM14:
+ return 31;
+ case X86::YMM15:
+ return 32;
};
case 1:
switch (RegNum) {
@@ -5386,6 +4708,24 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 1;
case X86::CL:
return 1;
+ case X86::CR0:
+ return -1;
+ case X86::CR1:
+ return -1;
+ case X86::CR2:
+ return -1;
+ case X86::CR3:
+ return -1;
+ case X86::CR4:
+ return -1;
+ case X86::CR5:
+ return -1;
+ case X86::CR6:
+ return -1;
+ case X86::CR7:
+ return -1;
+ case X86::CR8:
+ return -1;
case X86::CS:
return -1;
case X86::CX:
@@ -5424,22 +4764,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 4;
case X86::EBX:
return 3;
- case X86::ECR0:
- return -1;
- case X86::ECR1:
- return -1;
- case X86::ECR2:
- return -1;
- case X86::ECR3:
- return -1;
- case X86::ECR4:
- return -1;
- case X86::ECR5:
- return -1;
- case X86::ECR6:
- return -1;
- case X86::ECR7:
- return -1;
case X86::ECX:
return 1;
case X86::EDI:
@@ -5450,6 +4774,8 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return -1;
case X86::EIP:
return 8;
+ case X86::EIZ:
+ return -1;
case X86::ES:
return -1;
case X86::ESI:
@@ -5492,6 +4818,30 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 35;
case X86::MM7:
return 36;
+ case X86::R8:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R8B:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R8D:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R8W:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R9:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R9B:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R9D:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R9W:
+ assert(0 && "Invalid register for this mode");
+ return -1;
case X86::R10:
assert(0 && "Invalid register for this mode");
return -1;
@@ -5564,30 +4914,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::R15W:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::R8:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R8B:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R8D:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R8W:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R9:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R9B:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R9D:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R9W:
- assert(0 && "Invalid register for this mode");
- return -1;
case X86::RAX:
assert(0 && "Invalid register for this mode");
return -1;
@@ -5597,24 +4923,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::RBX:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::RCR0:
- return -1;
- case X86::RCR1:
- return -1;
- case X86::RCR2:
- return -1;
- case X86::RCR3:
- return -1;
- case X86::RCR4:
- return -1;
- case X86::RCR5:
- return -1;
- case X86::RCR6:
- return -1;
- case X86::RCR7:
- return -1;
- case X86::RCR8:
- return -1;
case X86::RCX:
assert(0 && "Invalid register for this mode");
return -1;
@@ -5627,6 +4935,8 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::RIP:
assert(0 && "Invalid register for this mode");
return -1;
+ case X86::RIZ:
+ return -1;
case X86::RSI:
assert(0 && "Invalid register for this mode");
return -1;
@@ -5663,24 +4973,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 21;
case X86::XMM1:
return 22;
- case X86::XMM10:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM11:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM12:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM13:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM14:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM15:
- assert(0 && "Invalid register for this mode");
- return -1;
case X86::XMM2:
return 23;
case X86::XMM3:
@@ -5699,28 +4991,28 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::XMM9:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM0:
- return 21;
- case X86::YMM1:
- return 22;
- case X86::YMM10:
+ case X86::XMM10:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM11:
+ case X86::XMM11:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM12:
+ case X86::XMM12:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM13:
+ case X86::XMM13:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM14:
+ case X86::XMM14:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM15:
+ case X86::XMM15:
assert(0 && "Invalid register for this mode");
return -1;
+ case X86::YMM0:
+ return 21;
+ case X86::YMM1:
+ return 22;
case X86::YMM2:
return 23;
case X86::YMM3:
@@ -5739,6 +5031,24 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::YMM9:
assert(0 && "Invalid register for this mode");
return -1;
+ case X86::YMM10:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM11:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM12:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM13:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM14:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM15:
+ assert(0 && "Invalid register for this mode");
+ return -1;
};
case 2:
switch (RegNum) {
@@ -5765,6 +5075,24 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 1;
case X86::CL:
return 1;
+ case X86::CR0:
+ return -1;
+ case X86::CR1:
+ return -1;
+ case X86::CR2:
+ return -1;
+ case X86::CR3:
+ return -1;
+ case X86::CR4:
+ return -1;
+ case X86::CR5:
+ return -1;
+ case X86::CR6:
+ return -1;
+ case X86::CR7:
+ return -1;
+ case X86::CR8:
+ return -1;
case X86::CS:
return -1;
case X86::CX:
@@ -5803,22 +5131,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 5;
case X86::EBX:
return 3;
- case X86::ECR0:
- return -1;
- case X86::ECR1:
- return -1;
- case X86::ECR2:
- return -1;
- case X86::ECR3:
- return -1;
- case X86::ECR4:
- return -1;
- case X86::ECR5:
- return -1;
- case X86::ECR6:
- return -1;
- case X86::ECR7:
- return -1;
case X86::ECX:
return 1;
case X86::EDI:
@@ -5829,6 +5141,8 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return -1;
case X86::EIP:
return 8;
+ case X86::EIZ:
+ return -1;
case X86::ES:
return -1;
case X86::ESI:
@@ -5871,6 +5185,30 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 35;
case X86::MM7:
return 36;
+ case X86::R8:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R8B:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R8D:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R8W:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R9:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R9B:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R9D:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::R9W:
+ assert(0 && "Invalid register for this mode");
+ return -1;
case X86::R10:
assert(0 && "Invalid register for this mode");
return -1;
@@ -5943,30 +5281,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::R15W:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::R8:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R8B:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R8D:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R8W:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R9:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R9B:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R9D:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::R9W:
- assert(0 && "Invalid register for this mode");
- return -1;
case X86::RAX:
assert(0 && "Invalid register for this mode");
return -1;
@@ -5976,24 +5290,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::RBX:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::RCR0:
- return -1;
- case X86::RCR1:
- return -1;
- case X86::RCR2:
- return -1;
- case X86::RCR3:
- return -1;
- case X86::RCR4:
- return -1;
- case X86::RCR5:
- return -1;
- case X86::RCR6:
- return -1;
- case X86::RCR7:
- return -1;
- case X86::RCR8:
- return -1;
case X86::RCX:
assert(0 && "Invalid register for this mode");
return -1;
@@ -6006,6 +5302,8 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::RIP:
assert(0 && "Invalid register for this mode");
return -1;
+ case X86::RIZ:
+ return -1;
case X86::RSI:
assert(0 && "Invalid register for this mode");
return -1;
@@ -6042,24 +5340,6 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
return 21;
case X86::XMM1:
return 22;
- case X86::XMM10:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM11:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM12:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM13:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM14:
- assert(0 && "Invalid register for this mode");
- return -1;
- case X86::XMM15:
- assert(0 && "Invalid register for this mode");
- return -1;
case X86::XMM2:
return 23;
case X86::XMM3:
@@ -6078,28 +5358,28 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::XMM9:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM0:
- return 21;
- case X86::YMM1:
- return 22;
- case X86::YMM10:
+ case X86::XMM10:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM11:
+ case X86::XMM11:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM12:
+ case X86::XMM12:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM13:
+ case X86::XMM13:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM14:
+ case X86::XMM14:
assert(0 && "Invalid register for this mode");
return -1;
- case X86::YMM15:
+ case X86::XMM15:
assert(0 && "Invalid register for this mode");
return -1;
+ case X86::YMM0:
+ return 21;
+ case X86::YMM1:
+ return 22;
case X86::YMM2:
return 23;
case X86::YMM3:
@@ -6118,6 +5398,24 @@ int X86GenRegisterInfo::getDwarfRegNumFull(unsigned RegNum, unsigned Flavour) co
case X86::YMM9:
assert(0 && "Invalid register for this mode");
return -1;
+ case X86::YMM10:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM11:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM12:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM13:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM14:
+ assert(0 && "Invalid register for this mode");
+ return -1;
+ case X86::YMM15:
+ assert(0 && "Invalid register for this mode");
+ return -1;
};
};
}
diff --git a/libclamav/c++/X86GenRegisterNames.inc b/libclamav/c++/X86GenRegisterNames.inc
index 6edb8ec..42f1fad 100644
--- a/libclamav/c++/X86GenRegisterNames.inc
+++ b/libclamav/c++/X86GenRegisterNames.inc
@@ -9,167 +9,176 @@
namespace llvm {
namespace X86 {
- enum {
- NoRegister,
- AH, // 1
- AL, // 2
- AX, // 3
- BH, // 4
- BL, // 5
- BP, // 6
- BPL, // 7
- BX, // 8
- CH, // 9
- CL, // 10
- CS, // 11
- CX, // 12
- DH, // 13
- DI, // 14
- DIL, // 15
- DL, // 16
- DR0, // 17
- DR1, // 18
- DR2, // 19
- DR3, // 20
- DR4, // 21
- DR5, // 22
- DR6, // 23
- DR7, // 24
- DS, // 25
- DX, // 26
- EAX, // 27
- EBP, // 28
- EBX, // 29
- ECR0, // 30
- ECR1, // 31
- ECR2, // 32
- ECR3, // 33
- ECR4, // 34
- ECR5, // 35
- ECR6, // 36
- ECR7, // 37
- ECX, // 38
- EDI, // 39
- EDX, // 40
- EFLAGS, // 41
- EIP, // 42
- ES, // 43
- ESI, // 44
- ESP, // 45
- FP0, // 46
- FP1, // 47
- FP2, // 48
- FP3, // 49
- FP4, // 50
- FP5, // 51
- FP6, // 52
- FS, // 53
- GS, // 54
- IP, // 55
- MM0, // 56
- MM1, // 57
- MM2, // 58
- MM3, // 59
- MM4, // 60
- MM5, // 61
- MM6, // 62
- MM7, // 63
- R10, // 64
- R10B, // 65
- R10D, // 66
- R10W, // 67
- R11, // 68
- R11B, // 69
- R11D, // 70
- R11W, // 71
- R12, // 72
- R12B, // 73
- R12D, // 74
- R12W, // 75
- R13, // 76
- R13B, // 77
- R13D, // 78
- R13W, // 79
- R14, // 80
- R14B, // 81
- R14D, // 82
- R14W, // 83
- R15, // 84
- R15B, // 85
- R15D, // 86
- R15W, // 87
- R8, // 88
- R8B, // 89
- R8D, // 90
- R8W, // 91
- R9, // 92
- R9B, // 93
- R9D, // 94
- R9W, // 95
- RAX, // 96
- RBP, // 97
- RBX, // 98
- RCR0, // 99
- RCR1, // 100
- RCR2, // 101
- RCR3, // 102
- RCR4, // 103
- RCR5, // 104
- RCR6, // 105
- RCR7, // 106
- RCR8, // 107
- RCX, // 108
- RDI, // 109
- RDX, // 110
- RIP, // 111
- RSI, // 112
- RSP, // 113
- SI, // 114
- SIL, // 115
- SP, // 116
- SPL, // 117
- SS, // 118
- ST0, // 119
- ST1, // 120
- ST2, // 121
- ST3, // 122
- ST4, // 123
- ST5, // 124
- ST6, // 125
- ST7, // 126
- XMM0, // 127
- XMM1, // 128
- XMM10, // 129
- XMM11, // 130
- XMM12, // 131
- XMM13, // 132
- XMM14, // 133
- XMM15, // 134
- XMM2, // 135
- XMM3, // 136
- XMM4, // 137
- XMM5, // 138
- XMM6, // 139
- XMM7, // 140
- XMM8, // 141
- XMM9, // 142
- YMM0, // 143
- YMM1, // 144
- YMM10, // 145
- YMM11, // 146
- YMM12, // 147
- YMM13, // 148
- YMM14, // 149
- YMM15, // 150
- YMM2, // 151
- YMM3, // 152
- YMM4, // 153
- YMM5, // 154
- YMM6, // 155
- YMM7, // 156
- YMM8, // 157
- YMM9, // 158
- NUM_TARGET_REGS // 159
- };
+enum {
+ NoRegister,
+ AH, // 1
+ AL, // 2
+ AX, // 3
+ BH, // 4
+ BL, // 5
+ BP, // 6
+ BPL, // 7
+ BX, // 8
+ CH, // 9
+ CL, // 10
+ CR0, // 11
+ CR1, // 12
+ CR2, // 13
+ CR3, // 14
+ CR4, // 15
+ CR5, // 16
+ CR6, // 17
+ CR7, // 18
+ CR8, // 19
+ CS, // 20
+ CX, // 21
+ DH, // 22
+ DI, // 23
+ DIL, // 24
+ DL, // 25
+ DR0, // 26
+ DR1, // 27
+ DR2, // 28
+ DR3, // 29
+ DR4, // 30
+ DR5, // 31
+ DR6, // 32
+ DR7, // 33
+ DS, // 34
+ DX, // 35
+ EAX, // 36
+ EBP, // 37
+ EBX, // 38
+ ECX, // 39
+ EDI, // 40
+ EDX, // 41
+ EFLAGS, // 42
+ EIP, // 43
+ EIZ, // 44
+ ES, // 45
+ ESI, // 46
+ ESP, // 47
+ FP0, // 48
+ FP1, // 49
+ FP2, // 50
+ FP3, // 51
+ FP4, // 52
+ FP5, // 53
+ FP6, // 54
+ FS, // 55
+ GS, // 56
+ IP, // 57
+ MM0, // 58
+ MM1, // 59
+ MM2, // 60
+ MM3, // 61
+ MM4, // 62
+ MM5, // 63
+ MM6, // 64
+ MM7, // 65
+ R8, // 66
+ R8B, // 67
+ R8D, // 68
+ R8W, // 69
+ R9, // 70
+ R9B, // 71
+ R9D, // 72
+ R9W, // 73
+ R10, // 74
+ R10B, // 75
+ R10D, // 76
+ R10W, // 77
+ R11, // 78
+ R11B, // 79
+ R11D, // 80
+ R11W, // 81
+ R12, // 82
+ R12B, // 83
+ R12D, // 84
+ R12W, // 85
+ R13, // 86
+ R13B, // 87
+ R13D, // 88
+ R13W, // 89
+ R14, // 90
+ R14B, // 91
+ R14D, // 92
+ R14W, // 93
+ R15, // 94
+ R15B, // 95
+ R15D, // 96
+ R15W, // 97
+ RAX, // 98
+ RBP, // 99
+ RBX, // 100
+ RCX, // 101
+ RDI, // 102
+ RDX, // 103
+ RIP, // 104
+ RIZ, // 105
+ RSI, // 106
+ RSP, // 107
+ SI, // 108
+ SIL, // 109
+ SP, // 110
+ SPL, // 111
+ SS, // 112
+ ST0, // 113
+ ST1, // 114
+ ST2, // 115
+ ST3, // 116
+ ST4, // 117
+ ST5, // 118
+ ST6, // 119
+ ST7, // 120
+ XMM0, // 121
+ XMM1, // 122
+ XMM2, // 123
+ XMM3, // 124
+ XMM4, // 125
+ XMM5, // 126
+ XMM6, // 127
+ XMM7, // 128
+ XMM8, // 129
+ XMM9, // 130
+ XMM10, // 131
+ XMM11, // 132
+ XMM12, // 133
+ XMM13, // 134
+ XMM14, // 135
+ XMM15, // 136
+ YMM0, // 137
+ YMM1, // 138
+ YMM2, // 139
+ YMM3, // 140
+ YMM4, // 141
+ YMM5, // 142
+ YMM6, // 143
+ YMM7, // 144
+ YMM8, // 145
+ YMM9, // 146
+ YMM10, // 147
+ YMM11, // 148
+ YMM12, // 149
+ YMM13, // 150
+ YMM14, // 151
+ YMM15, // 152
+ NUM_TARGET_REGS // 153
+};
+}
+
+// Subregister indices
+namespace X86 {
+enum {
+ NoSubRegister,
+ sub_8bit, // 1
+ sub_8bit_hi, // 2
+ sub_16bit, // 3
+ sub_32bit, // 4
+ sub_sd, // 5
+ sub_ss, // 6
+ sub_xmm, // 7
+ NUM_TARGET_SUBREGS = 8
+};
}
} // End llvm namespace
diff --git a/libclamav/c++/X86GenSubtarget.inc b/libclamav/c++/X86GenSubtarget.inc
index dbafd0c..347b9cc 100644
--- a/libclamav/c++/X86GenSubtarget.inc
+++ b/libclamav/c++/X86GenSubtarget.inc
@@ -12,35 +12,38 @@
#include "llvm/Target/TargetInstrItineraries.h"
enum {
-};
-
-enum {
Feature3DNow = 1 << 0,
Feature3DNowA = 1 << 1,
Feature64Bit = 1 << 2,
- FeatureAVX = 1 << 3,
- FeatureCMOV = 1 << 4,
- FeatureFMA3 = 1 << 5,
- FeatureFMA4 = 1 << 6,
- FeatureMMX = 1 << 7,
- FeatureSSE1 = 1 << 8,
- FeatureSSE2 = 1 << 9,
- FeatureSSE3 = 1 << 10,
- FeatureSSE41 = 1 << 11,
- FeatureSSE42 = 1 << 12,
- FeatureSSE4A = 1 << 13,
- FeatureSSSE3 = 1 << 14,
- FeatureSlowBTMem = 1 << 15,
- FeatureVectorUAMem = 1 << 16
+ FeatureAES = 1 << 3,
+ FeatureAVX = 1 << 4,
+ FeatureCLMUL = 1 << 5,
+ FeatureCMOV = 1 << 6,
+ FeatureFMA3 = 1 << 7,
+ FeatureFMA4 = 1 << 8,
+ FeatureFastUAMem = 1 << 9,
+ FeatureMMX = 1 << 10,
+ FeatureSSE1 = 1 << 11,
+ FeatureSSE2 = 1 << 12,
+ FeatureSSE3 = 1 << 13,
+ FeatureSSE41 = 1 << 14,
+ FeatureSSE42 = 1 << 15,
+ FeatureSSE4A = 1 << 16,
+ FeatureSSSE3 = 1 << 17,
+ FeatureSlowBTMem = 1 << 18,
+ FeatureVectorUAMem = 1 << 19
};
// Sorted (by key) array of values for CPU features.
static const llvm::SubtargetFeatureKV FeatureKV[] = {
{ "3dnow", "Enable 3DNow! instructions", Feature3DNow, 0 },
{ "3dnowa", "Enable 3DNow! Athlon instructions", Feature3DNowA, Feature3DNow },
- { "64bit", "Support 64-bit instructions", Feature64Bit, 0 },
+ { "64bit", "Support 64-bit instructions", Feature64Bit, FeatureCMOV },
+ { "aes", "Enable AES instructions", FeatureAES, 0 },
{ "avx", "Enable AVX instructions", FeatureAVX, 0 },
+ { "clmul", "Enable carry-less multiplication instructions", FeatureCLMUL, 0 },
{ "cmov", "Enable conditional move instructions", FeatureCMOV, 0 },
+ { "fast-unaligned-mem", "Fast unaligned memory access", FeatureFastUAMem, 0 },
{ "fma3", "Enable three-operand fused multiple-add", FeatureFMA3, 0 },
{ "fma4", "Enable four-operand fused multiple-add", FeatureFMA4, 0 },
{ "mmx", "Enable MMX instructions", FeatureMMX, 0 },
@@ -75,7 +78,7 @@ static const llvm::SubtargetFeatureKV SubTypeKV[] = {
{ "c3", "Select the c3 processor", FeatureMMX | Feature3DNow, 0 },
{ "c3-2", "Select the c3-2 processor", FeatureSSE1, 0 },
{ "core2", "Select the core2 processor", FeatureSSSE3 | Feature64Bit | FeatureSlowBTMem, 0 },
- { "corei7", "Select the corei7 processor", FeatureSSE42 | Feature64Bit | FeatureSlowBTMem, 0 },
+ { "corei7", "Select the corei7 processor", FeatureSSE42 | Feature64Bit | FeatureSlowBTMem | FeatureFastUAMem | FeatureAES, 0 },
{ "generic", "Select the generic processor", 0, 0 },
{ "i386", "Select the i386 processor", 0, 0 },
{ "i486", "Select the i486 processor", 0, 0 },
@@ -87,7 +90,7 @@ static const llvm::SubtargetFeatureKV SubTypeKV[] = {
{ "k6-3", "Select the k6-3 processor", FeatureMMX | Feature3DNow, 0 },
{ "k8", "Select the k8 processor", FeatureSSE2 | Feature3DNowA | Feature64Bit | FeatureSlowBTMem, 0 },
{ "k8-sse3", "Select the k8-sse3 processor", FeatureSSE3 | Feature3DNowA | Feature64Bit | FeatureSlowBTMem, 0 },
- { "nehalem", "Select the nehalem processor", FeatureSSE42 | Feature64Bit | FeatureSlowBTMem, 0 },
+ { "nehalem", "Select the nehalem processor", FeatureSSE42 | Feature64Bit | FeatureSlowBTMem | FeatureFastUAMem, 0 },
{ "nocona", "Select the nocona processor", FeatureSSE3 | Feature64Bit | FeatureSlowBTMem, 0 },
{ "opteron", "Select the opteron processor", FeatureSSE2 | Feature3DNowA | Feature64Bit | FeatureSlowBTMem, 0 },
{ "opteron-sse3", "Select the opteron-sse3 processor", FeatureSSE3 | Feature3DNowA | Feature64Bit | FeatureSlowBTMem, 0 },
@@ -102,6 +105,7 @@ static const llvm::SubtargetFeatureKV SubTypeKV[] = {
{ "prescott", "Select the prescott processor", FeatureSSE3 | FeatureSlowBTMem, 0 },
{ "sandybridge", "Select the sandybridge processor", FeatureSSE42 | FeatureAVX | Feature64Bit, 0 },
{ "shanghai", "Select the shanghai processor", Feature3DNowA | Feature64Bit | FeatureSSE4A | Feature3DNowA, 0 },
+ { "westmere", "Select the westmere processor", FeatureSSE42 | Feature64Bit | FeatureSlowBTMem | FeatureFastUAMem | FeatureAES, 0 },
{ "winchip-c6", "Select the winchip-c6 processor", FeatureMMX, 0 },
{ "winchip2", "Select the winchip2 processor", FeatureMMX | Feature3DNow, 0 },
{ "x86-64", "Select the x86-64 processor", FeatureSSE2 | Feature64Bit | FeatureSlowBTMem, 0 },
@@ -130,10 +134,13 @@ std::string llvm::X86Subtarget::ParseSubtargetFeatures(const std::string &FS,
if ((Bits & Feature3DNow) != 0 && X863DNowLevel < ThreeDNow) X863DNowLevel = ThreeDNow;
if ((Bits & Feature3DNowA) != 0 && X863DNowLevel < ThreeDNowA) X863DNowLevel = ThreeDNowA;
if ((Bits & Feature64Bit) != 0) HasX86_64 = true;
+ if ((Bits & FeatureAES) != 0) HasAES = true;
if ((Bits & FeatureAVX) != 0) HasAVX = true;
+ if ((Bits & FeatureCLMUL) != 0) HasCLMUL = true;
if ((Bits & FeatureCMOV) != 0) HasCMov = true;
if ((Bits & FeatureFMA3) != 0) HasFMA3 = true;
if ((Bits & FeatureFMA4) != 0) HasFMA4 = true;
+ if ((Bits & FeatureFastUAMem) != 0) IsUAMemFast = true;
if ((Bits & FeatureMMX) != 0 && X86SSELevel < MMX) X86SSELevel = MMX;
if ((Bits & FeatureSSE1) != 0 && X86SSELevel < SSE1) X86SSELevel = SSE1;
if ((Bits & FeatureSSE2) != 0 && X86SSELevel < SSE2) X86SSELevel = SSE2;
diff --git a/libclamav/c++/bytecode2llvm.cpp b/libclamav/c++/bytecode2llvm.cpp
index ff4eaa8..5c19dbd 100644
--- a/libclamav/c++/bytecode2llvm.cpp
+++ b/libclamav/c++/bytecode2llvm.cpp
@@ -66,6 +66,11 @@
#include "llvm/System/Signals.h"
#include "llvm/Support/Timer.h"
#include "llvm/System/Threading.h"
+
+extern "C" {
+void LLVMInitializeX86AsmPrinter();
+void LLVMInitializePowerPCAsmPrinter();
+}
#include "llvm/Target/TargetSelect.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetOptions.h"
@@ -130,6 +135,10 @@ struct cli_bcengine {
extern "C" uint8_t cli_debug_flag;
namespace {
+#ifndef LLVM28
+#define LLVM28
+#endif
+
#ifdef LLVM28
#define llvm_report_error(x) report_fatal_error(x)
#define llvm_install_error_handler(x) install_fatal_error_handler(x)
@@ -150,25 +159,90 @@ static void UpgradeCall(CallInst *&C, Function *Intr)
UpgradeIntrinsicCall(C, New);
}
+extern "C" {
+#ifdef __GNUC__
+void cli_errmsg(const char *str, ...) __attribute__((format(printf, 1, 2)));
+#else
+void cli_errmsg(const char *str, ...);
+#endif
+
+#ifdef __GNUC__
+void cli_warnmsg(const char *str, ...) __attribute__((format(printf, 1, 2)));
+#else
+void cli_warnmsg(const char *str, ...);
+#endif
+
+#ifdef __GNUC__
+void cli_dbgmsg_internal(const char *str, ...) __attribute__((format(printf, 1, 2)));
+#else
+void cli_dbgmsg_internal(const char *str, ...);
+#endif
+}
+
+class ScopedExceptionHandler {
+ public:
+ jmp_buf &getEnv() { return env;}
+ void Set() {
+ /* set the exception handler's return location to here for the
+ * current thread */
+ ExceptionReturn.set((const jmp_buf*)&env);
+ }
+ ~ScopedExceptionHandler() {
+ /* leaving scope, remove exception handler for current thread */
+ ExceptionReturn.erase();
+ }
+ private:
+ jmp_buf env;
+};
+#define HANDLER_TRY(handler) \
+ if (setjmp(handler.getEnv()) == 0) {\
+ handler.Set();
+
+#define HANDLER_END(handler) \
+ } else cli_warnmsg("[Bytecode JIT]: recovered from error\n");
+
+
void do_shutdown() {
- llvm_shutdown();
+ ScopedExceptionHandler handler;
+ HANDLER_TRY(handler) {
+ // TODO: be on the safe side, and clear errors here,
+ // otherwise destructor calls report_fatal_error
+ ((class raw_fd_ostream&)errs()).clear_error();
+
+ llvm_shutdown();
+
+ ((class raw_fd_ostream&)errs()).clear_error();
+ }
+ HANDLER_END(handler);
+ remove_fatal_error_handler();
}
static void NORETURN jit_exception_handler(void)
{
- longjmp(*(jmp_buf*)(ExceptionReturn.get()), 1);
+ jmp_buf* buf = const_cast<jmp_buf*>(ExceptionReturn.get());
+ if (buf) {
+ // For errors raised during bytecode generation and execution.
+ longjmp(*buf, 1);
+ } else {
+ // Oops, got no error recovery pointer set up,
+ // this is probably an error raised during shutdown.
+ cli_errmsg("[Bytecode JIT]: exception handler called, but no recovery point set up");
+ // should never happen, we remove the error handler when we don't use
+ // LLVM anymore, and when we use it, we do set an error recovery point.
+ llvm_unreachable("Bytecode JIT]: no exception handler recovery installed, but exception hit!");
+ }
}
static void NORETURN jit_ssp_handler(void)
{
- errs() << "Bytecode JIT: *** stack smashing detected, bytecode aborted\n";
+ cli_errmsg("[Bytecode JIT]: *** stack smashing detected, bytecode aborted\n");
jit_exception_handler();
}
void llvm_error_handler(void *user_data, const std::string &reason)
{
// Output it to stderr, it might exceed the 1k/4k limit of cli_errmsg
- errs() << MODULE << reason;
+ cli_errmsg("[Bytecode JIT]: [LLVM error] %s\n", reason.c_str());
jit_exception_handler();
}
@@ -225,6 +299,13 @@ static void rtlib_bzero(void *s, size_t n)
memset(s, 0, n);
}
+#ifdef _WIN32
+#ifdef _WIN64
+extern "C" void __chkstk(void);
+#else
+extern "C" void _chkstk(void);
+#endif
+#endif
// Resolve integer libcalls, but nothing else.
static void* noUnknownFunctions(const std::string& name) {
void *addr =
@@ -242,6 +323,13 @@ static void* noUnknownFunctions(const std::string& name) {
.Case("memcpy", (void*)(intptr_t)memcpy)
.Case("memset", (void*)(intptr_t)memset)
.Case("abort", (void*)(intptr_t)jit_exception_handler)
+#ifdef _WIN32
+#ifdef _WIN64
+ .Case("_chkstk", (void*)(intptr_t)__chkstk)
+#else
+ .Case("_chkstk", (void*)(intptr_t)_chkstk)
+#endif
+#endif
.Default(0);
if (addr)
return addr;
@@ -259,10 +347,8 @@ public:
{
if (!cli_debug_flag)
return;
- errs() << "bytecode JIT: emitted function " << F.getName() <<
- " of " << Size << " bytes at 0x";
- errs().write_hex((uintptr_t)Code);
- errs() << "\n";
+ cli_dbgmsg_internal("[Bytecode JIT]: emitted function %s of %ld bytes at %p\n",
+ F.getNameStr().c_str(), (long)Size, Code);
}
};
@@ -710,9 +796,14 @@ private:
isa<PointerType>(Ty))
V = Builder.CreateBitCast(V, Ty);
if (V->getType() != Ty) {
- errs() << operand << " ";
- V->dump();
- Ty->dump();
+ if (cli_debug_flag) {
+ std::string str;
+ raw_string_ostream ostr(str);
+ ostr << operand << " " ;
+ V->print(ostr);
+ Ty->print(ostr);
+ cli_dbgmsg_internal("[Bytecode JIT]: %s\n", ostr.str().c_str());
+ }
llvm_report_error("(libclamav) Type mismatch converting operand");
}
return V;
@@ -873,17 +964,25 @@ public:
Value* createGEP(Value *Base, const Type *ETy, InputIterator Start, InputIterator End) {
const Type *Ty = GetElementPtrInst::getIndexedType(Base->getType(), Start, End);
if (!Ty || (ETy && (Ty != ETy && (!isa<IntegerType>(Ty) || !isa<IntegerType>(ETy))))) {
- errs() << MODULE << "Wrong indices for GEP opcode: "
- << " expected type: " << *ETy;
- if (Ty)
- errs() << " actual type: " << *Ty;
- errs() << " base: " << *Base << ";";
- Base->getType()->dump();
- errs() << "\n indices: ";
- for (InputIterator I=Start; I != End; I++) {
- errs() << **I << ", ";
+ if (cli_debug_flag) {
+ std::string str;
+ raw_string_ostream ostr(str);
+
+ ostr << "Wrong indices for GEP opcode: "
+ << " expected type: " << *ETy;
+ if (Ty)
+ ostr << " actual type: " << *Ty;
+ ostr << " base: " << *Base << ";";
+ Base->getType()->print(ostr);
+ ostr << "\n indices: ";
+ for (InputIterator I=Start; I != End; I++) {
+ ostr << **I << ", ";
+ }
+ ostr << "\n";
+ cli_dbgmsg_internal("[Bytecode JIT]: %s\n", ostr.str().c_str());
+ } else {
+ cli_warnmsg("[Bytecode JIT]: Wrong indices for GEP opcode\n");
}
- errs() << "\n";
return 0;
}
return Builder.CreateGEP(Base, Start, End);
@@ -895,7 +994,8 @@ public:
const Type *ETy = cast<PointerType>(cast<PointerType>(Values[dest]->getType())->getElementType())->getElementType();
Value *V = createGEP(Base, ETy, Start, End);
if (!V) {
- errs() << "@ " << dest << "\n";
+ if (cli_debug_flag)
+ cli_dbgmsg_internal("[Bytecode JIT] @%d\n", dest);
return false;
}
V = Builder.CreateBitCast(V, PointerType::getUnqual(ETy));
@@ -1107,8 +1207,13 @@ public:
};
globals[i] = createGEP(SpecialGV, 0, C, C+1);
if (!globals[i]) {
- errs() << i << ":" << g << ":" << bc->globals[i][0] <<"\n";
- Ty->dump();
+ if (cli_debug_flag) {
+ std::string str;
+ raw_string_ostream ostr(str);
+ ostr << i << ":" << g << ":" << bc->globals[i][0] <<"\n";
+ Ty->print(ostr);
+ cli_dbgmsg_internal("[Bytecode JIT]: %s\n", ostr.str().c_str());
+ }
llvm_report_error("(libclamav) unable to create fake global");
}
globals[i] = Builder.CreateBitCast(globals[i], Ty);
@@ -1264,7 +1369,7 @@ public:
BasicBlock *True = BB[inst->u.branch.br_true];
BasicBlock *False = BB[inst->u.branch.br_false];
if (Cond->getType() != Type::getInt1Ty(Context)) {
- errs() << MODULE << "type mismatch in condition\n";
+ cli_warnmsg("[Bytecode JIT]: type mismatch in condition");
return 0;
}
Builder.CreateCondBr(Cond, True, False);
@@ -1522,17 +1627,22 @@ public:
break;
}
default:
- errs() << MODULE << "JIT doesn't implement opcode " <<
- inst->opcode << " yet!\n";
+ cli_warnmsg("[Bytecode JIT]: JIT doesn't implement opcode %d yet!\n",
+ inst->opcode);
return 0;
}
}
}
if (verifyFunction(*F, PrintMessageAction)) {
- errs() << MODULE << "Verification failed\n";
- F->dump();
// verification failed
+ cli_warnmsg("[Bytecode JIT]: Verification failed\n");
+ if (cli_debug_flag) {
+ std::string str;
+ raw_string_ostream ostr(str);
+ F->print(ostr);
+ cli_dbgmsg_internal("[Bytecode JIT]: %s\n", ostr.str().c_str());
+ }
return 0;
}
delete [] Values;
@@ -1566,7 +1676,7 @@ public:
// If prototype matches, add to callable functions
if (Functions[0]->getFunctionType() != Callable) {
- errs() << "Wrong prototype for function 0 in bytecode " << bc->id << "\n";
+ cli_warnmsg("[Bytecode JIT]: Wrong prototype for function 0 in bytecode %d\n", bc->id);
return 0;
}
// All functions have the Fast calling convention, however
@@ -1611,11 +1721,14 @@ class LLVMApiScopedLock {
// we need to wrap all LLVM API calls with a giant mutex lock, but
// only then.
LLVMApiScopedLock() {
- if (!llvm_is_multithreaded())
+ // It is safer to just run all codegen under the mutex,
+ // it is not like we are going to codegen from multiple threads
+ // at a time anyway.
+// if (!llvm_is_multithreaded())
llvm_api_lock.acquire();
}
~LLVMApiScopedLock() {
- if (!llvm_is_multithreaded())
+// if (!llvm_is_multithreaded())
llvm_api_lock.release();
}
};
@@ -1731,26 +1844,23 @@ static void *bytecode_watchdog(void *arg)
pthread_mutex_unlock(&w->mutex);
if (ret == ETIMEDOUT) {
*w->timeout = 1;
- errs() << "Bytecode run timed out, timeout flag set\n";
+ cli_warnmsg("[Bytecode JIT]: Bytecode run timed out, timeout flag set\n");
}
return NULL;
}
static int bytecode_execute(intptr_t code, struct cli_bc_ctx *ctx)
{
- jmp_buf env;
+ ScopedExceptionHandler handler;
// execute;
- if (setjmp(env) == 0) {
+ HANDLER_TRY(handler) {
// setup exception handler to longjmp back here
- ExceptionReturn.set((const jmp_buf*)&env);
uint32_t result = ((uint32_t (*)(struct cli_bc_ctx *))(intptr_t)code)(ctx);
*(uint32_t*)ctx->values = result;
return 0;
}
- errs() << "\n";
- errs().changeColor(raw_ostream::RED, true) << MODULE
- << "*** JITed code intercepted runtime error!\n";
- errs().resetColor();
+ HANDLER_END(handler);
+ cli_warnmsg("[Bytecode JIT]: JITed code intercepted runtime error!\n");
return CL_EBYTECODE;
}
@@ -1767,10 +1877,10 @@ int cli_vm_execute_jit(const struct cli_all_bc *bcs, struct cli_bc_ctx *ctx,
// if needed.
void *code = bcs->engine->compiledFunctions[func];
if (!code) {
- errs() << MODULE << "Unable to find compiled function\n";
+ cli_warnmsg("[Bytecode JIT]: Unable to find compiled function\n");
if (func->numArgs)
- errs() << MODULE << "Function has "
- << (unsigned)func->numArgs << " arguments, it must have 0 to be called as entrypoint\n";
+ cli_warnmsg("[Bytecode JIT] Function has %d arguments, it must have 0 to be called as entrypoint\n",
+ func->numArgs);
return CL_EBYTECODE;
}
gettimeofday(&tv0, NULL);
@@ -1793,9 +1903,8 @@ int cli_vm_execute_jit(const struct cli_all_bc *bcs, struct cli_bc_ctx *ctx,
/* only spawn if timeout is set.
* we don't set timeout for selfcheck (see bb #2235) */
if ((ret = pthread_create(&thread, NULL, bytecode_watchdog, &w))) {
- errs() << "Bytecode: failed to create new thread!";
- errs() << cli_strerror(ret, buf, sizeof(buf));
- errs() << "\n";
+ cli_warnmsg("[Bytecode JIT]: Bytecode: failed to create new thread :%s!\n",
+ cli_strerror(ret, buf, sizeof(buf)));
return CL_EBYTECODE;
}
}
@@ -1810,10 +1919,12 @@ int cli_vm_execute_jit(const struct cli_all_bc *bcs, struct cli_bc_ctx *ctx,
}
if (cli_debug_flag) {
+ long diff;
gettimeofday(&tv1, NULL);
tv1.tv_sec -= tv0.tv_sec;
tv1.tv_usec -= tv0.tv_usec;
- errs() << "bytecode finished in " << (tv1.tv_sec*1000000 + tv1.tv_usec) << "us\n";
+ diff = tv1.tv_sec*1000000 + tv1.tv_usec;
+ cli_dbgmsg_internal("bytecode finished in %ld us\n", diff);
}
return ctx->timeout ? CL_ETIMEOUT : ret;
}
@@ -1847,17 +1958,10 @@ int cli_bytecode_prepare_jit(struct cli_all_bc *bcs)
{
if (!bcs->engine)
return CL_EBYTECODE;
- jmp_buf env;
+ ScopedExceptionHandler handler;
LLVMApiScopedLock scopedLock;
// setup exception handler to longjmp back here
- ExceptionReturn.set((const jmp_buf*)&env);
- if (setjmp(env) != 0) {
- errs() << "\n";
- errs().changeColor(raw_ostream::RED, true) << MODULE
- << "*** FATAL error encountered during bytecode generation\n";
- errs().resetColor();
- return CL_EBYTECODE;
- }
+ HANDLER_TRY(handler) {
// LLVM itself never throws exceptions, but operator new may throw bad_alloc
try {
Module *M = new Module("ClamAV jit module", bcs->engine->Context);
@@ -1871,9 +1975,10 @@ int cli_bytecode_prepare_jit(struct cli_all_bc *bcs)
ExecutionEngine *EE = bcs->engine->EE = builder.create();
if (!EE) {
if (!ErrorMsg.empty())
- errs() << MODULE << "error creating execution engine: " << ErrorMsg << "\n";
+ cli_errmsg("[Bytecode JIT]: error creating execution engine: %s\n",
+ ErrorMsg.c_str());
else
- errs() << MODULE << "JIT not registered?\n";
+ cli_errmsg("[Bytecode JIT]: JIT not registered?\n");
return CL_EBYTECODE;
}
bcs->engine->Listener = new NotifyListener();
@@ -1978,7 +2083,7 @@ int cli_bytecode_prepare_jit(struct cli_all_bc *bcs)
OurFPM, OurFPMUnsigned, apiFuncs, apiMap);
Function *F = Codegen.generate();
if (!F) {
- errs() << MODULE << "JIT codegen failed\n";
+ cli_errmsg("[Bytecode JIT]: JIT codegen failed\n");
return CL_EBYTECODE;
}
Functions[i] = F;
@@ -2033,19 +2138,24 @@ int cli_bytecode_prepare_jit(struct cli_all_bc *bcs)
}
return CL_SUCCESS;
} catch (std::bad_alloc &badalloc) {
- errs() << MODULE << badalloc.what() << "\n";
+ cli_errmsg("[Bytecode JIT]: bad_alloc: %s\n",
+ badalloc.what());
return CL_EMEM;
} catch (...) {
- errs() << MODULE << "Unexpected unknown exception occurred.\n";
+ cli_errmsg("[Bytecode JIT]: Unexpected unknown exception occured\n");
return CL_EBYTECODE;
}
+ return 0;
+ } HANDLER_END(handler);
+ cli_errmsg("[Bytecode JIT] *** FATAL error encountered during bytecode generation\n");
+ return CL_EBYTECODE;
}
int bytecode_init(void)
{
// If already initialized return
if (llvm_is_multithreaded()) {
- errs() << "bytecode_init: already initialized";
+ cli_warnmsg("bytecode_init: already initialized");
return CL_EARG;
}
llvm_install_error_handler(llvm_error_handler);
@@ -2176,9 +2286,9 @@ void cli_bytecode_debug_printsrc(const struct cli_bc_ctx *ctx)
}
assert(ctx->line < lines->linev.size());
+#ifndef LLVM28
int line = (int)ctx->line ? (int)ctx->line : -1;
int col = (int)ctx->col ? (int)ctx->col : -1;
-#ifndef LLVM28
//TODO: print this ourselves, instead of using SMDiagnostic
SMDiagnostic diag(ctx->file, line, col,
"", std::string(lines->linev[ctx->line-1], lines->linev[ctx->line]-1));
@@ -2210,9 +2320,11 @@ void cli_printcxxver()
namespace ClamBCModule {
void stop(const char *msg, llvm::Function* F, llvm::Instruction* I)
{
- if (F && F->hasName())
- llvm::errs() << "in function " << F->getName() << ": ";
- llvm::errs() << msg << "\n";
+ if (F && F->hasName()) {
+ cli_warnmsg("[Bytecode JIT] in function %s: %s", F->getNameStr().c_str(), msg);
+ } else {
+ cli_warnmsg("[Bytecode JIT] %s", msg);
+ }
}
}
diff --git a/libclamav/c++/clamavcxx-config.h.in b/libclamav/c++/clamavcxx-config.h.in
index a6b475a..034aba0 100644
--- a/libclamav/c++/clamavcxx-config.h.in
+++ b/libclamav/c++/clamavcxx-config.h.in
@@ -1,5 +1,8 @@
/* clamavcxx-config.h.in. Generated from configure.ac by autoheader. */
+/* Define if building universal (internal helper macro) */
+#undef AC_APPLE_UNIVERSAL_BUILD
+
/* Define to 1 if you have the <dlfcn.h> header file. */
#undef HAVE_DLFCN_H
@@ -54,3 +57,15 @@
/* Define to 1 if you have the ANSI C header files. */
#undef STDC_HEADERS
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined AC_APPLE_UNIVERSAL_BUILD
+# if defined __BIG_ENDIAN__
+# define WORDS_BIGENDIAN 1
+# endif
+#else
+# ifndef WORDS_BIGENDIAN
+# undef WORDS_BIGENDIAN
+# endif
+#endif
diff --git a/libclamav/c++/configure b/libclamav/c++/configure
index f19918a..42b73cb 100755
--- a/libclamav/c++/configure
+++ b/libclamav/c++/configure
@@ -752,8 +752,6 @@ BUILD_EXTERNAL_LLVM_FALSE
BUILD_EXTERNAL_LLVM_TRUE
NO_MISSING_FIELD_INITIALIZERS
NO_VARIADIC_MACROS
-BUILD_ARM_FALSE
-BUILD_ARM_TRUE
BUILD_PPC_FALSE
BUILD_PPC_TRUE
BUILD_X86_FALSE
@@ -5640,13 +5638,13 @@ if test "${lt_cv_nm_interface+set}" = set; then :
else
lt_cv_nm_interface="BSD nm"
echo "int some_variable = 0;" > conftest.$ac_ext
- (eval echo "\"\$as_me:5643: $ac_compile\"" >&5)
+ (eval echo "\"\$as_me:5641: $ac_compile\"" >&5)
(eval "$ac_compile" 2>conftest.err)
cat conftest.err >&5
- (eval echo "\"\$as_me:5646: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+ (eval echo "\"\$as_me:5644: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
(eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
cat conftest.err >&5
- (eval echo "\"\$as_me:5649: output\"" >&5)
+ (eval echo "\"\$as_me:5647: output\"" >&5)
cat conftest.out >&5
if $GREP 'External.*some_variable' conftest.out > /dev/null; then
lt_cv_nm_interface="MS dumpbin"
@@ -6851,7 +6849,7 @@ ia64-*-hpux*)
;;
*-*-irix6*)
# Find out which ABI we are using.
- echo '#line 6854 "configure"' > conftest.$ac_ext
+ echo '#line 6852 "configure"' > conftest.$ac_ext
if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
(eval $ac_compile) 2>&5
ac_status=$?
@@ -8639,11 +8637,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:8642: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:8640: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:8646: \$? = $ac_status" >&5
+ echo "$as_me:8644: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -8978,11 +8976,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:8981: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:8979: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:8985: \$? = $ac_status" >&5
+ echo "$as_me:8983: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -9083,11 +9081,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:9086: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:9084: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:9090: \$? = $ac_status" >&5
+ echo "$as_me:9088: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -9138,11 +9136,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:9141: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:9139: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:9145: \$? = $ac_status" >&5
+ echo "$as_me:9143: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -11522,7 +11520,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 11525 "configure"
+#line 11523 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -11618,7 +11616,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 11621 "configure"
+#line 11619 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -13574,11 +13572,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:13577: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:13575: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:13581: \$? = $ac_status" >&5
+ echo "$as_me:13579: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@@ -13673,11 +13671,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:13676: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:13674: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:13680: \$? = $ac_status" >&5
+ echo "$as_me:13678: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -13725,11 +13723,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:13728: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:13726: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:13732: \$? = $ac_status" >&5
+ echo "$as_me:13730: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@@ -14806,7 +14804,7 @@ else
fi
if test "$enable_alltargets" = "yes"; then
- new_args="$ac_configure_args --enable-targets=x86,powerpc,arm --enable-bindings=none --enable-libffi=no --without-llvmgcc --without-llvmgxx"
+ new_args="$ac_configure_args --enable-targets=x86,powerpc --enable-bindings=none --enable-libffi=no --without-llvmgcc --without-llvmgxx"
else
new_args="$ac_configure_args --enable-targets=host-only --enable-bindings=none --enable-libffi=no --without-llvmgcc --without-llvmgxx"
fi
@@ -14920,7 +14918,6 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
build_x86=no
build_ppc=no
-build_arm=no
case "$target_cpu" in
i?86|amd64|x86_64)
build_x86=yes
@@ -14928,9 +14925,6 @@ case "$target_cpu" in
powerpc*)
build_ppc=yes
;;
- arm*)
- build_arm=yes
- ;;
esac
# FreeBSD is only one which needs something else than -pthread,
@@ -14955,7 +14949,6 @@ esac
if test "$enable_alltargets" = "yes"; then
build_x86=yes
build_ppc=yes
- build_arm=yes
fi
if test "$ac_cv_c_bigendian" = "universal"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: Universal build detected" >&5
@@ -14967,8 +14960,6 @@ fi
$as_echo "$as_me: Building X86 backend: $build_x86" >&6;}
{ $as_echo "$as_me:${as_lineno-$LINENO}: Building PPC backend: $build_ppc" >&5
$as_echo "$as_me: Building PPC backend: $build_ppc" >&6;}
-{ $as_echo "$as_me:${as_lineno-$LINENO}: Building ARM backend: $build_arm" >&5
-$as_echo "$as_me: Building ARM backend: $build_arm" >&6;}
if test "$build_x86" = "yes"; then
BUILD_X86_TRUE=
BUILD_X86_FALSE='#'
@@ -14985,14 +14976,6 @@ else
BUILD_PPC_FALSE=
fi
- if test "$build_arm" = "yes"; then
- BUILD_ARM_TRUE=
- BUILD_ARM_FALSE='#'
-else
- BUILD_ARM_TRUE='#'
- BUILD_ARM_FALSE=
-fi
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking optional compiler flags" >&5
$as_echo_n "checking optional compiler flags... " >&6; }
@@ -15153,10 +15136,6 @@ if test -z "${BUILD_PPC_TRUE}" && test -z "${BUILD_PPC_FALSE}"; then
as_fn_error $? "conditional \"BUILD_PPC\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
-if test -z "${BUILD_ARM_TRUE}" && test -z "${BUILD_ARM_FALSE}"; then
- as_fn_error $? "conditional \"BUILD_ARM\" was never defined.
-Usually this means the macro was only invoked conditionally." "$LINENO" 5
-fi
if test -z "${BUILD_EXTERNAL_LLVM_TRUE}" && test -z "${BUILD_EXTERNAL_LLVM_FALSE}"; then
as_fn_error $? "conditional \"BUILD_EXTERNAL_LLVM\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
diff --git a/libclamav/c++/configure.ac b/libclamav/c++/configure.ac
index 1573c6c..eaffb04 100644
--- a/libclamav/c++/configure.ac
+++ b/libclamav/c++/configure.ac
@@ -99,7 +99,7 @@ AC_ARG_ENABLE(all-jit-targets, AC_HELP_STRING([-enable-all-jit-targets],
[Build all the targets that support JIT for testing (default NO)]),
enable_alltargets=$enableval, enable_alltargets=no)
if test "$enable_alltargets" = "yes"; then
- new_args="$ac_configure_args --enable-targets=x86,powerpc,arm --enable-bindings=none --enable-libffi=no --without-llvmgcc --without-llvmgxx"
+ new_args="$ac_configure_args --enable-targets=x86,powerpc --enable-bindings=none --enable-libffi=no --without-llvmgcc --without-llvmgxx"
else
new_args="$ac_configure_args --enable-targets=host-only --enable-bindings=none --enable-libffi=no --without-llvmgcc --without-llvmgxx"
fi
@@ -174,7 +174,6 @@ AC_LANG_POP([C++])
build_x86=no
build_ppc=no
-build_arm=no
case "$target_cpu" in
i?86|amd64|x86_64)
build_x86=yes
@@ -182,9 +181,6 @@ case "$target_cpu" in
powerpc*)
build_ppc=yes
;;
- arm*)
- build_arm=yes
- ;;
esac
# FreeBSD is only one which needs something else than -pthread,
@@ -209,7 +205,6 @@ AC_SUBST([THREAD_LIBS])
if test "$enable_alltargets" = "yes"; then
build_x86=yes
build_ppc=yes
- build_arm=yes
fi
if test "$ac_cv_c_bigendian" = "universal"; then
AC_MSG_NOTICE([Universal build detected])
@@ -218,10 +213,8 @@ if test "$ac_cv_c_bigendian" = "universal"; then
fi
AC_MSG_NOTICE([Building X86 backend: $build_x86])
AC_MSG_NOTICE([Building PPC backend: $build_ppc])
-AC_MSG_NOTICE([Building ARM backend: $build_arm])
AM_CONDITIONAL(BUILD_X86, [test "$build_x86" = "yes"])
AM_CONDITIONAL(BUILD_PPC, [test "$build_ppc" = "yes"])
-AM_CONDITIONAL(BUILD_ARM, [test "$build_arm" = "yes"])
AC_MSG_CHECKING([optional compiler flags])
CXX_FLAG_CHECK(NO_VARIADIC_MACROS, [-Wno-variadic-macros])
diff --git a/libclamav/c++/llvm/CMakeLists.txt b/libclamav/c++/llvm/CMakeLists.txt
index 9e76de1..1f82b5a 100644
--- a/libclamav/c++/llvm/CMakeLists.txt
+++ b/libclamav/c++/llvm/CMakeLists.txt
@@ -1,10 +1,20 @@
# See docs/CMake.html for instructions about how to build LLVM with CMake.
project(LLVM)
-cmake_minimum_required(VERSION 2.6.1)
+cmake_minimum_required(VERSION 2.8)
+
+# Add path for custom modules
+set(CMAKE_MODULE_PATH
+ ${CMAKE_MODULE_PATH}
+ "${CMAKE_CURRENT_SOURCE_DIR}/cmake"
+ "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules"
+ )
+
+set(PACKAGE_VERSION "2.8")
+include(VersionFromVCS)
+add_version_info_from_vcs(PACKAGE_VERSION)
set(PACKAGE_NAME llvm)
-set(PACKAGE_VERSION 2.7svn)
set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
set(PACKAGE_BUGREPORT "llvmbugs at cs.uiuc.edu")
@@ -110,13 +120,6 @@ configure_file(
set(llvm_builded_incs_dir ${LLVM_BINARY_DIR}/include/llvm)
-# Add path for custom modules
-set(CMAKE_MODULE_PATH
- ${CMAKE_MODULE_PATH}
- "${LLVM_MAIN_SRC_DIR}/cmake"
- "${LLVM_MAIN_SRC_DIR}/cmake/modules"
- )
-
include(AddLLVMDefinitions)
if(WIN32)
@@ -200,12 +203,8 @@ if( CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32 )
endif( CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32 )
if( MSVC )
- # List of valid CRTs for MSVC
- set(MSVC_CRT
- MD
- MDd)
+ include(ChooseMSVCCRT)
- set(LLVM_USE_CRT "" CACHE STRING "Specify VC++ CRT to use for debug/release configurations.")
add_llvm_definitions( -D_CRT_SECURE_NO_DEPRECATE -D_CRT_SECURE_NO_WARNINGS )
add_llvm_definitions( -D_SCL_SECURE_NO_WARNINGS -DCRT_NONSTDC_NO_WARNINGS )
add_llvm_definitions( -D_SCL_SECURE_NO_DEPRECATE )
@@ -215,15 +214,6 @@ if( MSVC )
# Suppress 'new behavior: elements of array 'array' will be default initialized'
add_llvm_definitions( -wd4351 )
- if (NOT ${LLVM_USE_CRT} STREQUAL "")
- list(FIND MSVC_CRT ${LLVM_USE_CRT} idx)
- if (idx LESS 0)
- message(FATAL_ERROR "Invalid value for LLVM_USE_CRT: ${LLVM_USE_CRT}. Valid options are one of: ${MSVC_CRT}")
- endif (idx LESS 0)
- add_llvm_definitions("/${LLVM_USE_CRT}")
- message(STATUS "Using VC++ CRT: ${LLVM_USE_CRT}")
- endif (NOT ${LLVM_USE_CRT} STREQUAL "")
-
# Enable warnings
if (LLVM_ENABLE_WARNINGS)
add_llvm_definitions( /W4 /Wall )
@@ -291,6 +281,7 @@ add_subdirectory(lib/Analysis)
add_subdirectory(lib/Analysis/IPA)
add_subdirectory(lib/MC)
add_subdirectory(lib/MC/MCParser)
+add_subdirectory(lib/MC/MCDisassembler)
add_subdirectory(test)
add_subdirectory(utils/FileCheck)
@@ -352,6 +343,8 @@ add_subdirectory(tools)
option(LLVM_BUILD_EXAMPLES "Build LLVM example programs." OFF)
+add_subdirectory(cmake/modules)
+
install(DIRECTORY include/
DESTINATION include
FILES_MATCHING
diff --git a/libclamav/c++/llvm/CREDITS.TXT b/libclamav/c++/llvm/CREDITS.TXT
index e58b85f..aeecfe2 100644
--- a/libclamav/c++/llvm/CREDITS.TXT
+++ b/libclamav/c++/llvm/CREDITS.TXT
@@ -134,6 +134,11 @@ N: Gabor Greif
E: ggreif at gmail.com
D: Improvements for space efficiency
+N: James Grosbach
+E: grosbach at apple.com
+D: SjLj exception handling support
+D: General fixes and improvements for the ARM back-end
+
N: Lang Hames
E: lhames at gmail.com
D: PBQP-based register allocator
@@ -247,6 +252,12 @@ N: Scott Michel
E: scottm at aero.org
D: Added STI Cell SPU backend.
+N: Takumi Nakamura
+E: geek4civic at gmail.com
+E: chapuni at hf.rim.or.jp
+D: Cygwin and MinGW support.
+S: Yokohama, Japan
+
N: Edward O'Callaghan
E: eocallaghan at auroraux.org
W: http://www.auroraux.org
@@ -277,6 +288,11 @@ N: Sandeep Patel
E: deeppatel1987 at gmail.com
D: ARM calling conventions rewrite, hard float support
+N: Wesley Peck
+E: peckw at wesleypeck.com
+W: http://wesleypeck.com/
+D: MicroBlaze backend
+
N: Vladimir Prus
W: http://vladimir_prus.blogspot.com
E: ghost at cs.msu.su
@@ -288,7 +304,10 @@ D: MSIL backend
N: Duncan Sands
E: baldrick at free.fr
-D: Ada front-end, exception handling improvements
+D: Ada support in llvm-gcc
+D: Dragonegg plugin
+D: Exception handling improvements
+D: Type legalizer rewrite
N: Ruchira Sasanka
E: sasanka at uiuc.edu
@@ -306,6 +325,10 @@ N: Anand Shukla
E: ashukla at cs.uiuc.edu
D: The `paths' pass
+N: Michael J. Spencer
+E: bigcheesegs at gmail.com
+D: Shepherding Windows COFF support into MC.
+
N: Reid Spencer
E: rspencer at reidspencer.com
W: http://reidspencer.com/
@@ -329,14 +352,9 @@ E: xerxes at zafena.se
D: Cmake dependency chain and various bug fixes
N: Bill Wendling
-E: isanbard at gmail.com
+E: wendling at apple.com
D: Bunches of stuff
N: Bob Wilson
E: bob.wilson at acm.org
D: Advanced SIMD (NEON) support in the ARM backend
-
-N: Wesley Peck
-E: peckw at wesleypeck.com
-W: http://wesleypeck.com/
-D: MicroBlaze backend
diff --git a/libclamav/c++/llvm/Makefile b/libclamav/c++/llvm/Makefile
index f5a9b33..ae650b7 100644
--- a/libclamav/c++/llvm/Makefile
+++ b/libclamav/c++/llvm/Makefile
@@ -64,13 +64,23 @@ endif
ifeq ($(MAKECMDGOALS),install-clang)
DIRS := tools/clang/tools/driver tools/clang/lib/Headers \
- tools/clang/lib/Runtime tools/clang/docs
+ tools/clang/runtime tools/clang/docs \
+ tools/lto
+ OPTIONAL_DIRS :=
+ NO_INSTALL = 1
+endif
+
+ifeq ($(MAKECMDGOALS),install-clang-c)
+ DIRS := tools/clang/tools/driver tools/clang/lib/Headers \
+ tools/clang/tools/libclang tools/clang/tools/c-index-test \
+ tools/clang/include/clang-c
OPTIONAL_DIRS :=
NO_INSTALL = 1
endif
ifeq ($(MAKECMDGOALS),clang-only)
- DIRS := $(filter-out tools runtime docs unittests, $(DIRS)) tools/clang
+ DIRS := $(filter-out tools runtime docs unittests, $(DIRS)) \
+ tools/clang tools/lto
OPTIONAL_DIRS :=
endif
@@ -102,7 +112,8 @@ cross-compile-build-tools:
--host=$(BUILD_TRIPLE) --target=$(BUILD_TRIPLE); \
cd .. ; \
fi; \
- ($(MAKE) -C BuildTools \
+ (unset SDKROOT; \
+ $(MAKE) -C BuildTools \
BUILD_DIRS_ONLY=1 \
UNIVERSAL= \
ENABLE_OPTIMIZED=$(ENABLE_OPTIMIZED) \
@@ -110,6 +121,8 @@ cross-compile-build-tools:
ENABLE_COVERAGE=$(ENABLE_COVERAGE) \
DISABLE_ASSERTIONS=$(DISABLE_ASSERTIONS) \
ENABLE_EXPENSIVE_CHECKS=$(ENABLE_EXPENSIVE_CHECKS) \
+ CFLAGS= \
+ CXXFLAGS= \
) || exit 1;
endif
@@ -143,6 +156,7 @@ clang-only: all
tools-only: all
libs-only: all
install-clang: install
+install-clang-c: install
install-libs: install
#------------------------------------------------------------------------
@@ -156,7 +170,7 @@ FilesToConfig := \
include/llvm/Config/AsmParsers.def \
include/llvm/Config/Disassemblers.def \
include/llvm/System/DataTypes.h \
- tools/llvmc/plugins/Base/Base.td
+ tools/llvmc/src/Base.td
FilesToConfigPATH := $(addprefix $(LLVM_OBJ_ROOT)/,$(FilesToConfig))
all-local:: $(FilesToConfigPATH)
@@ -169,8 +183,8 @@ $(FilesToConfigPATH) : $(LLVM_OBJ_ROOT)/% : $(LLVM_SRC_ROOT)/%.in
# that it gets executed last.
ifneq ($(BUILD_DIRS_ONLY),1)
all::
- $(Echo) '*****' Completed $(BuildMode)$(AssertMode) Build
-ifeq ($(BuildMode),Debug)
+ $(Echo) '*****' Completed $(BuildMode) Build
+ifneq ($(ENABLE_OPTIMIZED),1)
$(Echo) '*****' Note: Debug build can be 10 times slower than an
$(Echo) '*****' optimized build. Use 'make ENABLE_OPTIMIZED=1' to
$(Echo) '*****' make an optimized build. Alternatively you can
@@ -181,9 +195,6 @@ endif
check-llvm2cpp:
$(Verb)$(MAKE) check TESTSUITE=Feature RUNLLVM2CPP=1
-check-one:
- $(Verb)$(MAKE) -C test check-one TESTONE=$(TESTONE)
-
srpm: $(LLVM_OBJ_ROOT)/llvm.spec
rpmbuild -bs $(LLVM_OBJ_ROOT)/llvm.spec
@@ -214,7 +225,7 @@ update:
$(SVN) $(SVN-UPDATE-OPTIONS) update $(LLVM_SRC_ROOT)
@ $(SVN) status $(LLVM_SRC_ROOT) | $(SUB-SVN-DIRS) | xargs $(SVN) $(SVN-UPDATE-OPTIONS) update
-happiness: update all check unittests
+happiness: update all check-all
.PHONY: srpm rpm update happiness
diff --git a/libclamav/c++/llvm/Makefile.config.in b/libclamav/c++/llvm/Makefile.config.in
index 1b61f09..5ebd803 100644
--- a/libclamav/c++/llvm/Makefile.config.in
+++ b/libclamav/c++/llvm/Makefile.config.in
@@ -39,14 +39,18 @@ ifndef PROJECT_NAME
PROJECT_NAME := $(LLVMPackageName)
endif
-PROJ_OBJ_DIR := $(shell $(PWD))
-PROJ_OBJ_ROOT := $(shell cd $(PROJ_OBJ_DIR)/$(LEVEL); $(PWD))
+# The macro below is expanded when 'realpath' is not built-in.
+# Built-in 'realpath' is available on GNU Make 3.81.
+realpath = $(shell cd $(1); $(PWD))
+
+PROJ_OBJ_DIR := $(call realpath, .)
+PROJ_OBJ_ROOT := $(call realpath, $(PROJ_OBJ_DIR)/$(LEVEL))
ifeq ($(PROJECT_NAME),llvm)
-LLVM_SRC_ROOT := $(shell cd @abs_top_srcdir@; $(PWD))
-LLVM_OBJ_ROOT := $(shell cd @abs_top_builddir@; $(PWD))
-PROJ_SRC_ROOT := $(shell cd $(LLVM_SRC_ROOT); $(PWD))
-PROJ_SRC_DIR := $(shell cd $(LLVM_SRC_ROOT)/$(patsubst $(PROJ_OBJ_ROOT)%,%,$(PROJ_OBJ_DIR)); $(PWD))
+LLVM_SRC_ROOT := $(call realpath, @abs_top_srcdir@)
+LLVM_OBJ_ROOT := $(call realpath, @abs_top_builddir@)
+PROJ_SRC_ROOT := $(LLVM_SRC_ROOT)
+PROJ_SRC_DIR := $(call realpath, $(LLVM_SRC_ROOT)/$(patsubst $(PROJ_OBJ_ROOT)%,%,$(PROJ_OBJ_DIR)))
prefix := @prefix@
PROJ_prefix := $(prefix)
PROJ_VERSION := $(LLVMVersion)
@@ -66,7 +70,7 @@ endif
ifndef LLVM_OBJ_ROOT
$(error Projects must define LLVM_OBJ_ROOT)
endif
-PROJ_SRC_DIR := $(shell cd $(PROJ_SRC_ROOT)/$(patsubst $(PROJ_OBJ_ROOT)%,%,$(PROJ_OBJ_DIR)); $(PWD))
+PROJ_SRC_DIR := $(call realpath, $(PROJ_SRC_ROOT)/$(patsubst $(PROJ_OBJ_ROOT)%,%,$(PROJ_OBJ_DIR)))
prefix := $(PROJ_INSTALL_ROOT)
PROJ_prefix := $(prefix)
ifndef PROJ_VERSION
@@ -156,6 +160,7 @@ TAR := @TAR@
# Paths to miscellaneous programs we hope are present but might not be
PERL := @PERL@
BZIP2 := @BZIP2@
+CAT := @CAT@
DOT := @DOT@
DOXYGEN := @DOXYGEN@
GROFF := @GROFF@
@@ -167,6 +172,7 @@ OCAMLDOC := @OCAMLDOC@
GAS := @GAS@
POD2HTML := @POD2HTML@
POD2MAN := @POD2MAN@
+PDFROFF := @PDFROFF@
RUNTEST := @RUNTEST@
TCLSH := @TCLSH@
ZIP := @ZIP@
@@ -220,8 +226,8 @@ RDYNAMIC := @RDYNAMIC@
# When ENABLE_PROFILING is enabled, profile instrumentation is done
# and output is put into the "<Flavor>+Profile" directories, where
-# <Flavor> is either Debug or Release depending on how other builkd
-# flags are set.. Otherwise, output is put in the <Flavor>
+# <Flavor> is either Debug or Release depending on how other build
+# flags are set. Otherwise, output is put in the <Flavor>
# directories.
#ENABLE_PROFILING = 1
@ENABLE_PROFILING@
@@ -268,6 +274,9 @@ ENABLE_SHARED := @ENABLE_SHARED@
# Use -fvisibility-inlines-hidden?
ENABLE_VISIBILITY_INLINES_HIDDEN := @ENABLE_VISIBILITY_INLINES_HIDDEN@
+# Do we want to allow timestamping information into builds?
+ENABLE_TIMESTAMPS := @ENABLE_TIMESTAMPS@
+
# This option tells the Makefiles to produce verbose output.
# It essentially prints the commands that make is executing
#VERBOSE = 1
@@ -315,12 +324,6 @@ endif
# Location of the plugin header file for gold.
BINUTILS_INCDIR := @BINUTILS_INCDIR@
-C_INCLUDE_DIRS := @C_INCLUDE_DIRS@
-CXX_INCLUDE_ROOT := @CXX_INCLUDE_ROOT@
-CXX_INCLUDE_ARCH := @CXX_INCLUDE_ARCH@
-CXX_INCLUDE_32BIT_DIR = @CXX_INCLUDE_32BIT_DIR@
-CXX_INCLUDE_64BIT_DIR = @CXX_INCLUDE_64BIT_DIR@
-
# When ENABLE_LLVMC_DYNAMIC is enabled, LLVMC will link libCompilerDriver
# dynamically. This is needed to make dynamic plugins work on some targets
# (Windows).
@@ -337,3 +340,7 @@ ENABLE_LLVMC_DYNAMIC_PLUGINS = 1
NO_MISSING_FIELD_INITIALIZERS = @NO_MISSING_FIELD_INITIALIZERS@
# -Wno-variadic-macros
NO_VARIADIC_MACROS = @NO_VARIADIC_MACROS@
+
+# Flags supported by the linker.
+# bfd ld / gold --version-script=file
+HAVE_LINK_VERSION_SCRIPT = @HAVE_LINK_VERSION_SCRIPT@
diff --git a/libclamav/c++/llvm/Makefile.rules b/libclamav/c++/llvm/Makefile.rules
index 1d4decc..9cff105 100644
--- a/libclamav/c++/llvm/Makefile.rules
+++ b/libclamav/c++/llvm/Makefile.rules
@@ -42,7 +42,7 @@ VPATH=$(PROJ_SRC_DIR)
# Reset the list of suffixes we know how to build.
#--------------------------------------------------------------------
.SUFFIXES:
-.SUFFIXES: .c .cpp .cc .h .hpp .o .a .bc .td .ps .dot .ll
+.SUFFIXES: .c .cpp .cc .h .hpp .o .a .bc .td .ps .dot .ll .m .mm
.SUFFIXES: $(SHLIBEXT) $(SUFFIXES)
#--------------------------------------------------------------------
@@ -196,105 +196,15 @@ install-local:: all-local
install-bytecode:: install-bytecode-local
###############################################################################
-# LLVMC: Provide rules for compiling llvmc plugins
+# LLVMC: Provide rules for compiling llvmc-based driver
###############################################################################
-ifdef LLVMC_PLUGIN
-
-LIBRARYNAME := $(patsubst %,plugin_llvmc_%,$(LLVMC_PLUGIN))
-CPP.Flags += -DLLVMC_PLUGIN_NAME=$(LLVMC_PLUGIN)
-REQUIRES_EH := 1
-
-ifeq ($(ENABLE_LLVMC_DYNAMIC),1)
- LD.Flags += -lCompilerDriver
-endif
-
-# Build a dynamic library if the user runs `make` directly from the plugin
-# directory.
-ifndef LLVMC_BUILTIN_PLUGIN
- LOADABLE_MODULE = 1
-endif
-
-# TableGen stuff...
-ifneq ($(BUILT_SOURCES),)
- LLVMC_BUILD_AUTOGENERATED_INC=1
-endif
-
-endif # LLVMC_PLUGIN
-
ifdef LLVMC_BASED_DRIVER
TOOLNAME = $(LLVMC_BASED_DRIVER)
-REQUIRES_EH := 1
-
-ifeq ($(ENABLE_LLVMC_DYNAMIC),1)
- LD.Flags += -lCompilerDriver
-else
- LLVMLIBS = CompilerDriver.a
- LINK_COMPONENTS = support system
-endif
-
-# Preprocessor magic that generates references to static variables in built-in
-# plugins.
-ifneq ($(LLVMC_BUILTIN_PLUGINS),)
-
-USEDLIBS += $(patsubst %,plugin_llvmc_%.a,$(LLVMC_BUILTIN_PLUGINS))
-
-LLVMC_BUILTIN_PLUGIN_1 = $(word 1, $(LLVMC_BUILTIN_PLUGINS))
-LLVMC_BUILTIN_PLUGIN_2 = $(word 2, $(LLVMC_BUILTIN_PLUGINS))
-LLVMC_BUILTIN_PLUGIN_3 = $(word 3, $(LLVMC_BUILTIN_PLUGINS))
-LLVMC_BUILTIN_PLUGIN_4 = $(word 4, $(LLVMC_BUILTIN_PLUGINS))
-LLVMC_BUILTIN_PLUGIN_5 = $(word 5, $(LLVMC_BUILTIN_PLUGINS))
-LLVMC_BUILTIN_PLUGIN_6 = $(word 6, $(LLVMC_BUILTIN_PLUGINS))
-LLVMC_BUILTIN_PLUGIN_7 = $(word 7, $(LLVMC_BUILTIN_PLUGINS))
-LLVMC_BUILTIN_PLUGIN_8 = $(word 8, $(LLVMC_BUILTIN_PLUGINS))
-LLVMC_BUILTIN_PLUGIN_9 = $(word 9, $(LLVMC_BUILTIN_PLUGINS))
-LLVMC_BUILTIN_PLUGIN_10 = $(word 10, $(LLVMC_BUILTIN_PLUGINS))
-
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_1),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_1=$(LLVMC_BUILTIN_PLUGIN_1)
-endif
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_2),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_2=$(LLVMC_BUILTIN_PLUGIN_2)
-endif
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_3),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_3=$(LLVMC_BUILTIN_PLUGIN_3)
-endif
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_4),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_4=$(LLVMC_BUILTIN_PLUGIN_4)
-endif
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_5),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_5=$(LLVMC_BUILTIN_PLUGIN_5)
-endif
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_6),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_5=$(LLVMC_BUILTIN_PLUGIN_6)
-endif
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_7),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_5=$(LLVMC_BUILTIN_PLUGIN_7)
-endif
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_8),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_5=$(LLVMC_BUILTIN_PLUGIN_8)
-endif
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_9),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_5=$(LLVMC_BUILTIN_PLUGIN_9)
-endif
-
-ifneq ($(LLVMC_BUILTIN_PLUGIN_10),)
-CPP.Flags += -DLLVMC_BUILTIN_PLUGIN_5=$(LLVMC_BUILTIN_PLUGIN_10)
-endif
-
-
-endif
+LLVMLIBS = CompilerDriver.a
+LINK_COMPONENTS = support system
endif # LLVMC_BASED_DRIVER
@@ -398,12 +308,11 @@ endif
# If DISABLE_ASSERTIONS=1 is specified (make command line or configured),
# then disable assertions by defining the appropriate preprocessor symbols.
-ifdef DISABLE_ASSERTIONS
- # Indicate that assertions are turned off using a minus sign
- BuildMode := $(BuildMode)-Asserts
- CPP.Defines += -DNDEBUG
-else
+ifndef DISABLE_ASSERTIONS
+ BuildMode := $(BuildMode)+Asserts
CPP.Defines += -D_DEBUG
+else
+ CPP.Defines += -DNDEBUG
endif
# If ENABLE_EXPENSIVE_CHECKS=1 is specified (make command line or
@@ -447,6 +356,14 @@ else
endif
endif
+# Support makefile variable to disable any kind of timestamp/non-deterministic
+# info from being used in the build.
+ifeq ($(ENABLE_TIMESTAMPS),1)
+ DOTDIR_TIMESTAMP_COMMAND := $(DATE)
+else
+ DOTDIR_TIMESTAMP_COMMAND := echo 'Created.'
+endif
+
ifeq ($(HOST_OS),MingW)
# Work around PR4957
CPP.Defines += -D__NO_CTYPE_INLINE
@@ -494,6 +411,26 @@ LLVMToolDir := $(LLVM_OBJ_ROOT)/$(BuildMode)/bin
LLVMExmplDir:= $(LLVM_OBJ_ROOT)/$(BuildMode)/examples
#--------------------------------------------------------------------
+# Locations of shared libraries
+#--------------------------------------------------------------------
+
+SharedPrefix := lib
+SharedLibDir := $(LibDir)
+LLVMSharedLibDir := $(LLVMLibDir)
+
+# Win32.DLL prefers to be located on the "PATH" of binaries.
+ifeq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW))
+ SharedLibDir := $(ToolDir)
+ LLVMSharedLibDir := $(LLVMToolDir)
+
+ ifeq ($(HOST_OS),Cygwin)
+ SharedPrefix := cyg
+ else
+ SharedPrefix :=
+ endif
+endif
+
+#--------------------------------------------------------------------
# LLVM Capable Compiler
#--------------------------------------------------------------------
@@ -560,38 +497,30 @@ ifeq ($(HOST_OS),Darwin)
# Get "4" out of 10.4 for later pieces in the makefile.
DARWIN_MAJVERS := $(shell echo $(DARWIN_VERSION)| sed -E 's/10.([0-9]).*/\1/')
- SharedLinkOptions=-Wl,-flat_namespace -Wl,-undefined -Wl,suppress \
+ SharedLinkOptions=-Wl,-flat_namespace -Wl,-undefined,suppress \
-dynamiclib
ifneq ($(ARCH),ARM)
SharedLinkOptions += -mmacosx-version-min=$(DARWIN_VERSION)
endif
else
- ifeq ($(HOST_OS),Cygwin)
- SharedLinkOptions=-shared -nostdlib -Wl,--export-all-symbols \
- -Wl,--enable-auto-import -Wl,--enable-auto-image-base
- else
- SharedLinkOptions=-shared
- endif
+ SharedLinkOptions=-shared
endif
ifeq ($(TARGET_OS),Darwin)
ifneq ($(ARCH),ARM)
TargetCommonOpts += -mmacosx-version-min=$(DARWIN_VERSION)
- else
- TargetCommonOpts += -marm
endif
endif
-# Adjust LD.Flags depending on the kind of library that is to be built. Note
-# that if LOADABLE_MODULE is specified then the resulting shared library can
-# be opened with dlopen.
-ifdef LOADABLE_MODULE
- LD.Flags += -module
-endif
-
ifdef SHARED_LIBRARY
+ifneq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW))
+ifneq ($(HOST_OS),Darwin)
+ LD.Flags += $(RPATH) -Wl,'$$ORIGIN'
+else
ifneq ($(DARWIN_MAJVERS),4)
- LD.Flags += $(RPATH) -Wl,$(LibDir)
+ LD.Flags += $(RPATH) -Wl,$(SharedLibDir)
+endif
+endif
endif
endif
@@ -619,8 +548,8 @@ ifndef KEEP_SYMBOLS
endif
# Adjust linker flags for building an executable
-ifneq ($(HOST_OS),Darwin)
-ifneq ($(DARWIN_MAJVERS),4)
+ifneq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW))
+ifneq ($(HOST_OS), Darwin)
ifdef TOOLNAME
LD.Flags += $(RPATH) -Wl,'$$ORIGIN/../lib'
ifdef EXAMPLE_TOOL
@@ -629,9 +558,14 @@ ifdef TOOLNAME
LD.Flags += $(RPATH) -Wl,$(ToolDir) $(RDYNAMIC)
endif
endif
+else
+ifneq ($(DARWIN_MAJVERS),4)
+ LD.Flags += $(RPATH) -Wl, at executable_path/../lib
+endif
endif
endif
+
#----------------------------------------------------------
# Options To Invoke Tools
#----------------------------------------------------------
@@ -641,6 +575,8 @@ CompileCommonOpts += -pedantic -Wno-long-long
endif
CompileCommonOpts += -Wall -W -Wno-unused-parameter -Wwrite-strings \
$(EXTRA_OPTIONS)
+# Enable cast-qual for C++; the workaround is to use const_cast.
+CXX.Flags += -Wcast-qual
ifeq ($(HOST_OS),HP-UX)
CompileCommonOpts := -D_REENTRANT -D_HPUX_SOURCE
@@ -786,7 +722,7 @@ $(DESTDIR)$(PROJ_bindir) $(DESTDIR)$(PROJ_libdir) $(DESTDIR)$(PROJ_includedir) $
# To create other directories, as needed, and timestamp their creation
%/.dir:
$(Verb) $(MKDIR) $* > /dev/null
- $(Verb) $(DATE) > $@
+ $(Verb) $(DOTDIR_TIMESTAMP_COMMAND) > $@
.PRECIOUS: $(ObjDir)/.dir $(LibDir)/.dir $(ToolDir)/.dir $(ExmplDir)/.dir
.PRECIOUS: $(LLVMLibDir)/.dir $(LLVMToolDir)/.dir $(LLVMExmplDir)/.dir
@@ -802,7 +738,8 @@ SubDirs += $(DIRS)
ifneq ($(PROJ_SRC_ROOT),$(PROJ_OBJ_ROOT))
$(RecursiveTargets)::
$(Verb) for dir in $(DIRS); do \
- if [ ! -f $$dir/Makefile ]; then \
+ if ([ ! -f $$dir/Makefile ] || \
+ command test $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ); then \
$(MKDIR) $$dir; \
$(CP) $(PROJ_SRC_DIR)/$$dir/Makefile $$dir/Makefile; \
fi; \
@@ -824,7 +761,8 @@ endif
ifdef EXPERIMENTAL_DIRS
$(RecursiveTargets)::
$(Verb) for dir in $(EXPERIMENTAL_DIRS); do \
- if [ ! -f $$dir/Makefile ]; then \
+ if ([ ! -f $$dir/Makefile ] || \
+ command test $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ); then \
$(MKDIR) $$dir; \
$(CP) $(PROJ_SRC_DIR)/$$dir/Makefile $$dir/Makefile; \
fi; \
@@ -858,7 +796,9 @@ unitcheck:: $(addsuffix /.makeunitcheck,$(PARALLEL_DIRS))
ParallelTargets := $(foreach T,$(RecursiveTargets),%/.make$(T))
$(ParallelTargets) :
- $(Verb) if [ ! -f $(@D)/Makefile ]; then \
+ $(Verb) if ([ ! -f $(@D)/Makefile ] || \
+ command test $(@D)/Makefile -ot \
+ $(PROJ_SRC_DIR)/$(@D)/Makefile ); then \
$(MKDIR) $(@D); \
$(CP) $(PROJ_SRC_DIR)/$(@D)/Makefile $(@D)/Makefile; \
fi; \
@@ -877,7 +817,8 @@ ifneq ($(PROJ_SRC_ROOT),$(PROJ_OBJ_ROOT))
$(RecursiveTargets)::
$(Verb) for dir in $(OPTIONAL_DIRS); do \
if [ -d $(PROJ_SRC_DIR)/$$dir ]; then\
- if [ ! -f $$dir/Makefile ]; then \
+ if ([ ! -f $$dir/Makefile ] || \
+ command test $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ); then \
$(MKDIR) $$dir; \
$(CP) $(PROJ_SRC_DIR)/$$dir/Makefile $$dir/Makefile; \
fi; \
@@ -925,7 +866,7 @@ endif
endif
###############################################################################
-# Set up variables for building libararies
+# Set up variables for building libraries
###############################################################################
#---------------------------------------------------------
@@ -949,6 +890,13 @@ LLVMUsedLibs := $(patsubst %.a.o, lib%.a, $(addsuffix .o, $(LLVMLIBS)))
LLVMLibsPaths := $(addprefix $(LLVMLibDir)/,$(LLVMUsedLibs))
endif
+# Win32.DLL may refer to other components.
+ifeq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW))
+ ifdef LOADABLE_MODULE
+ LINK_COMPONENTS := all
+ endif
+endif
+
ifndef IS_CLEANING_TARGET
ifdef LINK_COMPONENTS
@@ -961,16 +909,99 @@ $(LLVM_CONFIG):
$(ToolDir)/$(strip $(TOOLNAME))$(EXEEXT): $(LLVM_CONFIG)
ifeq ($(ENABLE_SHARED), 1)
+# We can take the "auto-import" feature to get rid of using dllimport.
+ifeq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW))
+LLVMLibsOptions += -Wl,--enable-auto-import,--enable-runtime-pseudo-reloc \
+ -L $(SharedLibDir)
+endif
LLVMLibsOptions += -lLLVM-$(LLVMVersion)
-LLVMLibsPaths += $(LibDir)/libLLVM-$(LLVMVersion)$(SHLIBEXT)
+LLVMLibsPaths += $(SharedLibDir)/$(SharedPrefix)LLVM-$(LLVMVersion)$(SHLIBEXT)
else
-LLVMLibsOptions += $(shell $(LLVM_CONFIG) --libs $(LINK_COMPONENTS))
-LLVMLibsPaths += $(LLVM_CONFIG) \
- $(shell $(LLVM_CONFIG) --libfiles $(LINK_COMPONENTS))
+
+ifndef NO_LLVM_CONFIG
+LLVMConfigLibs := $(shell $(LLVM_CONFIG) --libs $(LINK_COMPONENTS) || echo Error)
+ifeq ($(LLVMConfigLibs),Error)
+$(error llvm-config --libs failed)
+endif
+LLVMLibsOptions += $(LLVMConfigLibs)
+LLVMConfigLibfiles := $(shell $(LLVM_CONFIG) --libfiles $(LINK_COMPONENTS) || echo Error)
+ifeq ($(LLVMConfigLibfiles),Error)
+$(error llvm-config --libfiles failed)
+endif
+LLVMLibsPaths += $(LLVM_CONFIG) $(LLVMConfigLibfiles)
+endif
+
endif
endif
endif
+# Set up the library exports file.
+ifdef EXPORTED_SYMBOL_FILE
+
+# First, set up the native export file, which may differ from the source
+# export file.
+
+# The option --version-script is not effective on GNU ld win32.
+ifneq (,$(filter $(HOST_OS),Cygwin MingW))
+ HAVE_LINK_VERSION_SCRIPT := 0
+endif
+
+ifeq ($(HOST_OS),Darwin)
+# Darwin convention prefixes symbols with underscores.
+NativeExportsFile := $(ObjDir)/$(notdir $(EXPORTED_SYMBOL_FILE)).sed
+$(NativeExportsFile): $(EXPORTED_SYMBOL_FILE) $(ObjDir)/.dir
+ $(Verb) sed -e 's/^/_/' < $< > $@
+clean-local::
+ -$(Verb) $(RM) -f $(NativeExportsFile)
+else
+ifeq ($(HAVE_LINK_VERSION_SCRIPT),1)
+# Gold and BFD ld require a version script rather than a plain list.
+NativeExportsFile := $(ObjDir)/$(notdir $(EXPORTED_SYMBOL_FILE)).map
+$(NativeExportsFile): $(EXPORTED_SYMBOL_FILE) $(ObjDir)/.dir
+ $(Verb) echo "{" > $@
+ $(Verb) grep -q "\<" $< && echo " global:" >> $@ || :
+ $(Verb) sed -e 's/$$/;/' -e 's/^/ /' < $< >> $@
+ $(Verb) echo " local: *;" >> $@
+ $(Verb) echo "};" >> $@
+clean-local::
+ -$(Verb) $(RM) -f $(NativeExportsFile)
+else
+ifeq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW))
+# GNU ld Win32 accepts .DEF files that contain "DATA" entries.
+NativeExportsFile := $(ObjDir)/$(notdir $(EXPORTED_SYMBOL_FILE:.exports=.def))
+$(NativeExportsFile): $(EXPORTED_SYMBOL_FILE) $(ObjDir)/.dir
+ $(Echo) Generating $(notdir $@)
+ $(Verb) $(ECHO) "EXPORTS" > $@
+ $(Verb) $(CAT) $< >> $@
+clean-local::
+ -$(Verb) $(RM) -f $(NativeExportsFile)
+else
+# Default behavior: just use the exports file verbatim.
+NativeExportsFile := $(EXPORTED_SYMBOL_FILE)
+endif
+endif
+endif
+
+# Now add the linker command-line options to use the native export file.
+
+# Darwin
+ifeq ($(HOST_OS),Darwin)
+LLVMLibsOptions += -Wl,-exported_symbols_list,$(NativeExportsFile)
+endif
+
+# gold, bfd ld, etc.
+ifeq ($(HAVE_LINK_VERSION_SCRIPT),1)
+LLVMLibsOptions += -Wl,--version-script,$(NativeExportsFile)
+endif
+
+# Windows
+ifeq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW))
+# LLVMLibsOptions is invalidated at processing tools/llvm-shlib.
+SharedLinkOptions += $(NativeExportsFile)
+endif
+
+endif
+
###############################################################################
# Library Build Rules: Four ways to build a library
###############################################################################
@@ -1043,10 +1074,10 @@ ifdef LIBRARYNAME
LIBRARYNAME := $(strip $(LIBRARYNAME))
ifdef LOADABLE_MODULE
LibName.A := $(LibDir)/$(LIBRARYNAME).a
-LibName.SO := $(LibDir)/$(LIBRARYNAME)$(SHLIBEXT)
+LibName.SO := $(SharedLibDir)/$(LIBRARYNAME)$(SHLIBEXT)
else
LibName.A := $(LibDir)/lib$(LIBRARYNAME).a
-LibName.SO := $(LibDir)/lib$(LIBRARYNAME)$(SHLIBEXT)
+LibName.SO := $(SharedLibDir)/$(SharedPrefix)$(LIBRARYNAME)$(SHLIBEXT)
endif
LibName.O := $(LibDir)/$(LIBRARYNAME).o
LibName.BCA:= $(LibDir)/lib$(LIBRARYNAME).bca
@@ -1061,20 +1092,24 @@ ifdef SHARED_LIBRARY
all-local:: $(LibName.SO)
+ifdef EXPORTED_SYMBOL_FILE
+$(LibName.SO): $(NativeExportsFile)
+endif
+
ifdef LINK_LIBS_IN_SHARED
ifdef LOADABLE_MODULE
SharedLibKindMessage := "Loadable Module"
else
SharedLibKindMessage := "Shared Library"
endif
-$(LibName.SO): $(ObjectsO) $(ProjLibsPaths) $(LLVMLibsPaths) $(LibDir)/.dir
+$(LibName.SO): $(ObjectsO) $(ProjLibsPaths) $(LLVMLibsPaths) $(SharedLibDir)/.dir
$(Echo) Linking $(BuildMode) $(SharedLibKindMessage) \
- $(LIBRARYNAME)$(SHLIBEXT)
+ $(notdir $@)
$(Verb) $(Link) $(SharedLinkOptions) -o $@ $(ObjectsO) \
$(ProjLibsOptions) $(LLVMLibsOptions) $(LIBS)
else
-$(LibName.SO): $(ObjectsO) $(LibDir)/.dir
- $(Echo) Linking $(BuildMode) Shared Library $(LIBRARYNAME)$(SHLIBEXT)
+$(LibName.SO): $(ObjectsO) $(SharedLibDir)/.dir
+ $(Echo) Linking $(BuildMode) Shared Library $(notdir $@)
$(Verb) $(Link) $(SharedLinkOptions) -o $@ $(ObjectsO)
endif
@@ -1089,17 +1124,24 @@ install-local::
uninstall-local::
$(Echo) Uninstall circumvented with NO_INSTALL
else
-DestSharedLib = $(DESTDIR)$(PROJ_libdir)/lib$(LIBRARYNAME)$(SHLIBEXT)
+
+# Win32.DLL prefers to be located on the "PATH" of binaries.
+ifeq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW))
+DestSharedLibDir := $(DESTDIR)$(PROJ_bindir)
+else
+DestSharedLibDir := $(DESTDIR)$(PROJ_libdir)
+endif
+DestSharedLib := $(DestSharedLibDir)/$(SharedPrefix)$(LIBRARYNAME)$(SHLIBEXT)
install-local:: $(DestSharedLib)
-$(DestSharedLib): $(LibName.SO) $(DESTDIR)$(PROJ_libdir)
+$(DestSharedLib): $(LibName.SO) $(DestSharedLibDir)
$(Echo) Installing $(BuildMode) Shared Library $(DestSharedLib)
$(Verb) $(INSTALL) $(LibName.SO) $(DestSharedLib)
uninstall-local::
$(Echo) Uninstalling $(BuildMode) Shared Library $(DestSharedLib)
- -$(Verb) $(RM) -f $(DESTDIR)$(PROJ_libdir)/lib$(LIBRARYNAME).*
+ -$(Verb) $(RM) -f $(DestSharedLibDir)/$(SharedPrefix)$(LIBRARYNAME).*
endif
endif
@@ -1208,6 +1250,12 @@ install-local::
uninstall-local::
$(Echo) Uninstall circumvented with NO_INSTALL
else
+ifdef NO_INSTALL_ARCHIVES
+install-local::
+ $(Echo) Install circumvented with NO_INSTALL
+uninstall-local::
+ $(Echo) Uninstall circumvented with NO_INSTALL
+else
DestArchiveLib := $(DESTDIR)$(PROJ_libdir)/lib$(LIBRARYNAME).a
install-local:: $(DestArchiveLib)
@@ -1222,6 +1270,7 @@ uninstall-local::
-$(Verb) $(RM) -f $(DestArchiveLib)
endif
endif
+endif
# endif LIBRARYNAME
endif
@@ -1263,15 +1312,38 @@ ifeq ($(HOST_OS),Darwin)
# Tiger tools don't support this.
ifneq ($(DARWIN_MAJVERS),4)
-LD.Flags += -Wl,-exported_symbol -Wl,_main
+LD.Flags += -Wl,-exported_symbol,_main
endif
endif
ifeq ($(HOST_OS), $(filter $(HOST_OS), Linux NetBSD FreeBSD))
+ifneq ($(ARCH), Mips)
LD.Flags += -Wl,--version-script=$(LLVM_SRC_ROOT)/autoconf/ExportMap.map
endif
endif
+endif
+
+#---------------------------------------------------------
+# Tool Version Info Support
+#---------------------------------------------------------
+
+ifeq ($(HOST_OS),Darwin)
+ifdef TOOL_INFO_PLIST
+
+LD.Flags += -Wl,-sectcreate,__TEXT,__info_plist,$(ObjDir)/$(TOOL_INFO_PLIST)
+
+$(ToolBuildPath): $(ObjDir)/$(TOOL_INFO_PLIST)
+$(ObjDir)/$(TOOL_INFO_PLIST): $(PROJ_SRC_DIR)/$(TOOL_INFO_PLIST).in $(ObjDir)/.dir
+ $(Echo) "Creating $(TOOLNAME) '$(TOOL_INFO_PLIST)' file..."
+ $(Verb)sed -e "s#@TOOL_INFO_UTI@#$(TOOL_INFO_UTI)#g" \
+ -e "s#@TOOL_INFO_NAME@#$(TOOL_INFO_NAME)#g" \
+ -e "s#@TOOL_INFO_VERSION@#$(TOOL_INFO_VERSION)#g" \
+ -e "s#@TOOL_INFO_BUILD_VERSION@#$(TOOL_INFO_BUILD_VERSION)#g" \
+ $< > $@
+
+endif
+endif
#---------------------------------------------------------
# Provide targets for building the tools
@@ -1304,7 +1376,7 @@ $(ToolAliasBuildPath): $(ToolBuildPath)
$(Echo) Creating $(BuildMode) Alias $(TOOLALIAS) $(StripWarnMsg)
$(Verb) $(RM) -f $(ToolAliasBuildPath)
$(Verb) $(AliasTool) $(TOOLEXENAME) $(ToolAliasBuildPath)
- $(Echo) ======= Finished Creating $(BuildMode) Alias $(TOOLNAME) \
+ $(Echo) ======= Finished Creating $(BuildMode) Alias $(TOOLALIAS) \
$(StripWarnMsg)
endif
@@ -1332,7 +1404,7 @@ DestToolAlias = $(DESTDIR)$(PROJ_bindir)/$(TOOLALIAS)$(EXEEXT)
install-local:: $(DestToolAlias)
-$(DestToolAlias): $(DestTool) $(PROJ_bindir)
+$(DestToolAlias): $(DestTool)
$(Echo) Installing $(BuildMode) $(DestToolAlias)
$(Verb) $(RM) -f $(DestToolAlias)
$(Verb) $(AliasTool) $(TOOLEXENAME) $(DestToolAlias)
@@ -1374,6 +1446,11 @@ $(ObjDir)/%.o: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
$(Verb) if $(Compile.CXX) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
$(DEPEND_MOVEFILE)
+$(ObjDir)/%.o: %.mm $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
+ $(Echo) "Compiling $*.mm for $(BuildMode) build" $(PIC_FLAG)
+ $(Verb) if $(Compile.CXX) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
+ $(DEPEND_MOVEFILE)
+
$(ObjDir)/%.o: %.cc $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
$(Echo) "Compiling $*.cc for $(BuildMode) build" $(PIC_FLAG)
$(Verb) if $(Compile.CXX) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
@@ -1384,6 +1461,11 @@ $(ObjDir)/%.o: %.c $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
$(Verb) if $(Compile.C) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
$(DEPEND_MOVEFILE)
+$(ObjDir)/%.o: %.m $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
+ $(Echo) "Compiling $*.m for $(BuildMode) build" $(PIC_FLAG)
+ $(Verb) if $(Compile.C) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
+ $(DEPEND_MOVEFILE)
+
#---------------------------------------------------------
# Create .bc files in the ObjDir directory from .cpp .cc and .c files...
#---------------------------------------------------------
@@ -1402,6 +1484,12 @@ $(ObjDir)/%.ll: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
$< -o $(ObjDir)/$*.ll -S -emit-llvm ; \
$(BC_DEPEND_MOVEFILE)
+$(ObjDir)/%.ll: %.mm $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
+ $(Echo) "Compiling $*.mm for $(BuildMode) build (bytecode)"
+ $(Verb) if $(BCCompile.CXX) $(BC_DEPEND_OPTIONS) \
+ $< -o $(ObjDir)/$*.ll -S -emit-llvm ; \
+ $(BC_DEPEND_MOVEFILE)
+
$(ObjDir)/%.ll: %.cc $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
$(Echo) "Compiling $*.cc for $(BuildMode) build (bytecode)"
$(Verb) if $(BCCompile.CXX) $(BC_DEPEND_OPTIONS) \
@@ -1414,6 +1502,12 @@ $(ObjDir)/%.ll: %.c $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCC)
$< -o $(ObjDir)/$*.ll -S -emit-llvm ; \
$(BC_DEPEND_MOVEFILE)
+$(ObjDir)/%.ll: %.m $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCC)
+ $(Echo) "Compiling $*.m for $(BuildMode) build (bytecode)"
+ $(Verb) if $(BCCompile.C) $(BC_DEPEND_OPTIONS) \
+ $< -o $(ObjDir)/$*.ll -S -emit-llvm ; \
+ $(BC_DEPEND_MOVEFILE)
+
# Provide alternate rule sets if dependencies are disabled
else
@@ -1421,6 +1515,10 @@ $(ObjDir)/%.o: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cpp for $(BuildMode) build" $(PIC_FLAG)
$(Compile.CXX) $< -o $@
+$(ObjDir)/%.o: %.mm $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.mm for $(BuildMode) build" $(PIC_FLAG)
+ $(Compile.CXX) $< -o $@
+
$(ObjDir)/%.o: %.cc $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cc for $(BuildMode) build" $(PIC_FLAG)
$(Compile.CXX) $< -o $@
@@ -1429,10 +1527,18 @@ $(ObjDir)/%.o: %.c $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.c for $(BuildMode) build" $(PIC_FLAG)
$(Compile.C) $< -o $@
+$(ObjDir)/%.o: %.m $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.m for $(BuildMode) build" $(PIC_FLAG)
+ $(Compile.C) $< -o $@
+
$(ObjDir)/%.ll: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
$(Echo) "Compiling $*.cpp for $(BuildMode) build (bytecode)"
$(BCCompile.CXX) $< -o $@ -S -emit-llvm
+$(ObjDir)/%.ll: %.mm $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
+ $(Echo) "Compiling $*.mm for $(BuildMode) build (bytecode)"
+ $(BCCompile.CXX) $< -o $@ -S -emit-llvm
+
$(ObjDir)/%.ll: %.cc $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
$(Echo) "Compiling $*.cc for $(BuildMode) build (bytecode)"
$(BCCompile.CXX) $< -o $@ -S -emit-llvm
@@ -1441,6 +1547,10 @@ $(ObjDir)/%.ll: %.c $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCC)
$(Echo) "Compiling $*.c for $(BuildMode) build (bytecode)"
$(BCCompile.C) $< -o $@ -S -emit-llvm
+$(ObjDir)/%.ll: %.m $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCC)
+ $(Echo) "Compiling $*.m for $(BuildMode) build (bytecode)"
+ $(BCCompile.C) $< -o $@ -S -emit-llvm
+
endif
@@ -1449,6 +1559,10 @@ $(BuildMode)/%.ii: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cpp for $(BuildMode) build to .ii file"
$(Verb) $(Preprocess.CXX) $< -o $@
+$(BuildMode)/%.ii: %.mm $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.mm for $(BuildMode) build to .ii file"
+ $(Verb) $(Preprocess.CXX) $< -o $@
+
$(BuildMode)/%.ii: %.cc $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cc for $(BuildMode) build to .ii file"
$(Verb) $(Preprocess.CXX) $< -o $@
@@ -1457,11 +1571,19 @@ $(BuildMode)/%.i: %.c $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.c for $(BuildMode) build to .i file"
$(Verb) $(Preprocess.C) $< -o $@
+$(BuildMode)/%.i: %.m $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.m for $(BuildMode) build to .i file"
+ $(Verb) $(Preprocess.C) $< -o $@
+
$(ObjDir)/%.s: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cpp to asm for $(BuildMode) build" $(PIC_FLAG)
$(Compile.CXX) $< -o $@ -S
+$(ObjDir)/%.s: %.mm $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.mm to asm for $(BuildMode) build" $(PIC_FLAG)
+ $(Compile.CXX) $< -o $@ -S
+
$(ObjDir)/%.s: %.cc $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cc to asm for $(BuildMode) build" $(PIC_FLAG)
$(Compile.CXX) $< -o $@ -S
@@ -1470,6 +1592,10 @@ $(ObjDir)/%.s: %.c $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.c to asm for $(BuildMode) build" $(PIC_FLAG)
$(Compile.C) $< -o $@ -S
+$(ObjDir)/%.s: %.m $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.m to asm for $(BuildMode) build" $(PIC_FLAG)
+ $(Compile.C) $< -o $@ -S
+
# make the C and C++ compilers strip debug info out of bytecode libraries.
ifdef DEBUG_RUNTIME
@@ -1499,7 +1625,7 @@ ifdef TARGET
TABLEGEN_INC_FILES_COMMON = 1
endif
-ifdef LLVMC_BUILD_AUTOGENERATED_INC
+ifdef LLVMC_BASED_DRIVER
TABLEGEN_INC_FILES_COMMON = 1
endif
@@ -1613,25 +1739,36 @@ $(ObjDir)/%GenIntrinsics.inc.tmp : %.td $(ObjDir)/.dir
$(Echo) "Building $(<F) intrinsics information with tblgen"
$(Verb) $(TableGen) -gen-tgt-intrinsic -o $(call SYSPATH, $@) $<
+$(ObjDir)/ARMGenDecoderTables.inc.tmp : ARM.td $(ObjDir)/.dir
+ $(Echo) "Building $(<F) decoder tables with tblgen"
+ $(Verb) $(TableGen) -gen-arm-decoder -o $(call SYSPATH, $@) $<
+
+
clean-local::
-$(Verb) $(RM) -f $(INCFiles)
endif # TARGET
-ifdef LLVMC_BUILD_AUTOGENERATED_INC
+ifdef LLVMC_BASED_DRIVER
+
+TDSrc := $(sort $(strip $(wildcard $(PROJ_SRC_DIR)/*.td)) \
+ $(strip $(wildcard $(PROJ_OBJ_DIR)/*.td)))
-LLVMCPluginSrc := $(sort $(strip $(wildcard $(PROJ_SRC_DIR)/*.td)) \
- $(strip $(wildcard $(PROJ_OBJ_DIR)/*.td)))
+TDCommon := $(strip $(wildcard \
+ $(LLVM_SRC_ROOT)/include/llvm/CompilerDriver/*.td))
-TDFiles := $(LLVMCPluginSrc) \
- $(strip $(wildcard $(LLVM_SRC_ROOT)/include/llvm/CompilerDriver/*.td))
+TDFiles := $(TDSrc) $(TDCommon)
+
+$(INCTMPFiles) : $(TBLGEN) $(TDFiles)
-$(ObjDir)/AutoGenerated.inc.tmp: $(LLVMCPluginSrc) $(ObjDir)/.dir \
- $(TBLGEN) $(TD_COMMON)
- $(Echo) "Building LLVMC configuration library with tblgen"
+$(ObjDir)/%.inc.tmp: %.td $(ObjDir)/.dir
+ $(Echo) "Building LLVMC compilation graph description with tblgen"
$(Verb) $(TableGen) -gen-llvmc -o $(call SYSPATH, $@) $<
-endif # LLVMC_BUILD_AUTOGENERATED_INC
+clean-local::
+ -$(Verb) $(RM) -f $(INCFiles)
+
+endif # LLVMC_BASED_DRIVER
###############################################################################
# OTHER RULES: Other rules needed
@@ -1677,7 +1814,7 @@ ifndef DISABLE_AUTO_DEPENDENCIES
ifndef IS_CLEANING_TARGET
# Get the list of dependency files
-DependSourceFiles := $(basename $(filter %.cpp %.c %.cc, $(Sources)))
+DependSourceFiles := $(basename $(filter %.cpp %.c %.cc %.m %.mm, $(Sources)))
DependFiles := $(DependSourceFiles:%=$(PROJ_OBJ_DIR)/$(BuildMode)/%.d)
# Include bitcode dependency files if using bitcode libraries
@@ -1708,11 +1845,13 @@ check::
$(EchoCmd) No test directory ; \
fi
-check-lit::
+check-lit:: check
+
+check-dg::
$(Verb) if test -d "$(PROJ_OBJ_ROOT)/test" ; then \
if test -f "$(PROJ_OBJ_ROOT)/test/Makefile" ; then \
$(EchoCmd) Running test suite ; \
- $(MAKE) -C $(PROJ_OBJ_ROOT)/test check-local-lit ; \
+ $(MAKE) -C $(PROJ_OBJ_ROOT)/test check-local-dg ; \
else \
$(EchoCmd) No Makefile in test directory ; \
fi ; \
diff --git a/libclamav/c++/llvm/README.txt b/libclamav/c++/llvm/README.txt
index 8b118db..0968e66 100644
--- a/libclamav/c++/llvm/README.txt
+++ b/libclamav/c++/llvm/README.txt
@@ -1,4 +1,4 @@
-Low Level Virtual Machine (LLVM)
+\Low Level Virtual Machine (LLVM)
================================
This directory and its subdirectories contain source code for the Low Level
@@ -12,4 +12,4 @@ Please see the HTML documentation provided in docs/index.html for further
assistance with LLVM.
If you're writing a package for LLVM, see docs/Packaging.html for our
-suggestions.
\ No newline at end of file
+suggestions.
diff --git a/libclamav/c++/llvm/autoconf/configure.ac b/libclamav/c++/llvm/autoconf/configure.ac
index ba2a044..3b1a6c0 100644
--- a/libclamav/c++/llvm/autoconf/configure.ac
+++ b/libclamav/c++/llvm/autoconf/configure.ac
@@ -31,7 +31,7 @@ dnl===
dnl===-----------------------------------------------------------------------===
dnl Initialize autoconf and define the package name, version number and
dnl email address for reporting bugs.
-AC_INIT([[llvm]],[[2.7]],[llvmbugs at cs.uiuc.edu])
+AC_INIT([[llvm]],[[2.8]],[llvmbugs at cs.uiuc.edu])
dnl Provide a copyright substitution and ensure the copyright notice is included
dnl in the output of --version option of the generated configure script.
@@ -62,26 +62,58 @@ dnl Configure all of the projects present in our source tree. While we could
dnl just AC_CONFIG_SUBDIRS on the set of directories in projects that have a
dnl configure script, that usage of the AC_CONFIG_SUBDIRS macro is deprecated.
dnl Instead we match on the known projects.
+
+dnl
+dnl One tricky part of doing this is that some projects depend upon other
+dnl projects. For example, several projects rely upon the LLVM test suite.
+dnl We want to configure those projects first so that their object trees are
+dnl created before running the configure scripts of projects that depend upon
+dnl them.
+dnl
+
+dnl Several projects use llvm-gcc, so configure that first
+if test -d ${srcdir}/projects/llvm-gcc ; then
+ AC_CONFIG_SUBDIRS([projects/llvm-gcc])
+fi
+
+dnl Several projects use the LLVM test suite, so configure it next.
+if test -d ${srcdir}/projects/test-suite ; then
+ AC_CONFIG_SUBDIRS([projects/test-suite])
+fi
+
+dnl llvm-test is the old name of the test-suite, kept here for backwards
+dnl compatibility
+if test -d ${srcdir}/projects/llvm-test ; then
+ AC_CONFIG_SUBDIRS([projects/llvm-test])
+fi
+
+dnl Some projects use poolalloc; configure that next
+if test -d ${srcdir}/projects/poolalloc ; then
+ AC_CONFIG_SUBDIRS([projects/poolalloc])
+fi
+
+if test -d ${srcdir}/projects/llvm-poolalloc ; then
+ AC_CONFIG_SUBDIRS([projects/llvm-poolalloc])
+fi
+
+dnl Check for all other projects
for i in `ls ${srcdir}/projects`
do
if test -d ${srcdir}/projects/${i} ; then
case ${i} in
- CVS) ;;
sample) AC_CONFIG_SUBDIRS([projects/sample]) ;;
privbracket) AC_CONFIG_SUBDIRS([projects/privbracket]) ;;
llvm-stacker) AC_CONFIG_SUBDIRS([projects/llvm-stacker]) ;;
- # llvm-test is the old name of the test-suite, kept here for backwards
- # compatibility
- llvm-test) AC_CONFIG_SUBDIRS([projects/llvm-test]) ;;
- test-suite) AC_CONFIG_SUBDIRS([projects/test-suite]) ;;
llvm-reopt) AC_CONFIG_SUBDIRS([projects/llvm-reopt]);;
- llvm-gcc) AC_CONFIG_SUBDIRS([projects/llvm-gcc]) ;;
llvm-java) AC_CONFIG_SUBDIRS([projects/llvm-java]) ;;
llvm-tv) AC_CONFIG_SUBDIRS([projects/llvm-tv]) ;;
- llvm-poolalloc) AC_CONFIG_SUBDIRS([projects/llvm-poolalloc]) ;;
- poolalloc) AC_CONFIG_SUBDIRS([projects/poolalloc]) ;;
safecode) AC_CONFIG_SUBDIRS([projects/safecode]) ;;
llvm-kernel) AC_CONFIG_SUBDIRS([projects/llvm-kernel]) ;;
+ llvm-gcc) ;;
+ test-suite) ;;
+ llvm-test) ;;
+ poolalloc) ;;
+ llvm-poolalloc) ;;
*)
AC_MSG_WARN([Unknown project (${i}) won't be configured automatically])
;;
@@ -126,6 +158,11 @@ AC_CACHE_CHECK([type of operating system we're going to host on],
llvm_cv_no_link_all_option="-Wl,-noall_load"
llvm_cv_os_type="Darwin"
llvm_cv_platform_type="Unix" ;;
+ *-*-minix*)
+ llvm_cv_link_all_option="-Wl,-all_load"
+ llvm_cv_no_link_all_option="-Wl,-noall_load"
+ llvm_cv_os_type="Minix"
+ llvm_cv_platform_type="Unix" ;;
*-*-freebsd*)
llvm_cv_link_all_option="-Wl,--whole-archive"
llvm_cv_no_link_all_option="-Wl,--no-whole-archive"
@@ -214,6 +251,8 @@ AC_CACHE_CHECK([type of operating system we're going to target],
llvm_cv_target_os_type="Cygwin" ;;
*-*-darwin*)
llvm_cv_target_os_type="Darwin" ;;
+ *-*-minix*)
+ llvm_cv_target_os_type="Minix" ;;
*-*-freebsd*)
llvm_cv_target_os_type="FreeBSD" ;;
*-*-openbsd*)
@@ -259,7 +298,7 @@ dnl Set the LINKALL and NOLINKALL Makefile variables based on the platform
AC_SUBST(LINKALL,$llvm_cv_link_all_option)
AC_SUBST(NOLINKALL,$llvm_cv_no_link_all_option)
-dnl Set the "LLVM_ON_*" variables based on llvm_cvs_platform_type
+dnl Set the "LLVM_ON_*" variables based on llvm_cv_platform_type
dnl This is used by lib/System to determine the basic kind of implementation
dnl to use.
case $llvm_cv_platform_type in
@@ -329,13 +368,13 @@ else
AC_SUBST(LLVM_CROSS_COMPILING, [0])
fi
-dnl Check to see if there's a "CVS" (or .svn or .git) directory indicating
-dnl that this build is being done from a checkout. This sets up several
-dnl defaults for the command line switches. When we build with a CVS directory,
+dnl Check to see if there's a .svn or .git directory indicating that this
+dnl build is being done from a checkout. This sets up several defaults for
+dnl the command line switches. When we build with a checkout directory,
dnl we get a debug with assertions turned on. Without, we assume a source
dnl release and we get an optimized build without assertions.
dnl See --enable-optimized and --enable-assertions below
-if test -d "CVS" -o -d "${srcdir}/CVS" -o -d ".svn" -o -d "${srcdir}/.svn" -o -d ".git" -o -d "${srcdir}/.git"; then
+if test -d ".svn" -o -d "${srcdir}/.svn" -o -d ".git" -o -d "${srcdir}/.git"; then
cvsbuild="yes"
optimize="no"
AC_SUBST(CVSBUILD,[[CVSBUILD=1]])
@@ -352,7 +391,7 @@ dnl===-----------------------------------------------------------------------===
dnl --enable-optimized : check whether they want to do an optimized build:
AC_ARG_ENABLE(optimized, AS_HELP_STRING(
- --enable-optimized,[Compile with optimizations enabled (default is NO)]),,enableval=$optimize)
+ --enable-optimized,[Compile with optimizations enabled (default is YES)]),,enableval="yes")
if test ${enableval} = "no" ; then
AC_SUBST(ENABLE_OPTIMIZED,[[]])
else
@@ -370,7 +409,7 @@ fi
dnl --enable-assertions : check whether they want to turn on assertions or not:
AC_ARG_ENABLE(assertions,AS_HELP_STRING(
- --enable-assertions,[Compile with assertion checks enabled (default is YES)]),, enableval="yes")
+ --enable-assertions,[Compile with assertion checks enabled (default is NO)]),, enableval="no")
if test ${enableval} = "yes" ; then
AC_SUBST(DISABLE_ASSERTIONS,[[]])
else
@@ -485,18 +524,32 @@ case "$enableval" in
*) AC_MSG_ERROR([Invalid setting for --enable-shared. Use "yes" or "no"]) ;;
esac
+dnl Enable embedding timestamp information into build.
+AC_ARG_ENABLE(timestamps,
+ AS_HELP_STRING([--enable-timestamps],
+ [Enable embedding timestamp information in build (default is YES)]),,
+ enableval=default)
+case "$enableval" in
+ yes) AC_SUBST(ENABLE_TIMESTAMPS,[1]) ;;
+ no) AC_SUBST(ENABLE_TIMESTAMPS,[0]) ;;
+ default) AC_SUBST(ENABLE_TIMESTAMPS,[1]) ;;
+ *) AC_MSG_ERROR([Invalid setting for --enable-timestamps. Use "yes" or "no"]) ;;
+esac
+AC_DEFINE_UNQUOTED([ENABLE_TIMESTAMPS],$ENABLE_TIMESTAMPS,
+ [Define if timestamp information (e.g., __DATE___) is allowed])
+
dnl Allow specific targets to be specified for building (or not)
TARGETS_TO_BUILD=""
AC_ARG_ENABLE([targets],AS_HELP_STRING([--enable-targets],
[Build specific host targets: all or target1,target2,... Valid targets are:
host, x86, x86_64, sparc, powerpc, alpha, arm, mips, spu, pic16,
- xcore, msp430, systemz, blackfin, cbe, msil, and cpp (default=all)]),,
+ xcore, msp430, systemz, blackfin, cbe, and cpp (default=all)]),,
enableval=all)
if test "$enableval" = host-only ; then
enableval=host
fi
case "$enableval" in
- all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha ARM Mips CellSPU PIC16 XCore MSP430 SystemZ Blackfin CBackend MSIL CppBackend MBlaze" ;;
+ all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha ARM Mips CellSPU PIC16 XCore MSP430 SystemZ Blackfin CBackend CppBackend MBlaze" ;;
*)for a_target in `echo $enableval|sed -e 's/,/ /g' ` ; do
case "$a_target" in
x86) TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;
@@ -513,7 +566,6 @@ case "$enableval" in
systemz) TARGETS_TO_BUILD="SystemZ $TARGETS_TO_BUILD" ;;
blackfin) TARGETS_TO_BUILD="Blackfin $TARGETS_TO_BUILD" ;;
cbe) TARGETS_TO_BUILD="CBackend $TARGETS_TO_BUILD" ;;
- msil) TARGETS_TO_BUILD="MSIL $TARGETS_TO_BUILD" ;;
cpp) TARGETS_TO_BUILD="CppBackend $TARGETS_TO_BUILD" ;;
mblaze) TARGETS_TO_BUILD="MBlaze $TARGETS_TO_BUILD" ;;
host) case "$llvm_cv_target_arch" in
@@ -544,9 +596,17 @@ AC_SUBST(TARGETS_TO_BUILD,$TARGETS_TO_BUILD)
# If so, define LLVM_NATIVE_ARCH to that LLVM target.
for a_target in $TARGETS_TO_BUILD; do
if test "$a_target" = "$LLVM_NATIVE_ARCH"; then
- LLVM_NATIVE_ARCHTARGET="${LLVM_NATIVE_ARCH}Target"
- AC_DEFINE_UNQUOTED(LLVM_NATIVE_ARCH,$LLVM_NATIVE_ARCHTARGET,
+ AC_DEFINE_UNQUOTED(LLVM_NATIVE_ARCH, $LLVM_NATIVE_ARCH,
[LLVM architecture name for the native architecture, if available])
+ LLVM_NATIVE_TARGET="LLVMInitialize${LLVM_NATIVE_ARCH}Target"
+ LLVM_NATIVE_TARGETINFO="LLVMInitialize${LLVM_NATIVE_ARCH}TargetInfo"
+ LLVM_NATIVE_ASMPRINTER="LLVMInitialize${LLVM_NATIVE_ARCH}AsmPrinter"
+ AC_DEFINE_UNQUOTED(LLVM_NATIVE_TARGET, $LLVM_NATIVE_TARGET,
+ [LLVM name for the native Target init function, if available])
+ AC_DEFINE_UNQUOTED(LLVM_NATIVE_TARGETINFO, $LLVM_NATIVE_TARGETINFO,
+ [LLVM name for the native TargetInfo init function, if available])
+ AC_DEFINE_UNQUOTED(LLVM_NATIVE_ASMPRINTER, $LLVM_NATIVE_ASMPRINTER,
+ [LLVM name for the native AsmPrinter init function, if available])
fi
done
@@ -688,8 +748,9 @@ AC_MSG_CHECKING([optimization flags])
case "$withval" in
default)
case "$llvm_cv_os_type" in
- MingW) optimize_option=-O3 ;;
- *) optimize_option=-O2 ;;
+ FreeBSD) optimize_option=-O2 ;;
+ MingW) optimize_option=-O2 ;;
+ *) optimize_option=-O3 ;;
esac ;;
*) optimize_option="$withval" ;;
esac
@@ -802,35 +863,6 @@ AC_ARG_ENABLE(libffi,AS_HELP_STRING(
esac],
llvm_cv_enable_libffi=no)
-dnl Only Windows needs dynamic libCompilerDriver to support plugins.
-if test "$llvm_cv_os_type" = "Win32" ; then
- llvmc_dynamic="yes"
-else
- llvmc_dynamic="no"
-fi
-
-dnl --enable-llvmc-dynamic : should LLVMC link libCompilerDriver dynamically?
-AC_ARG_ENABLE(llvmc-dynamic,AS_HELP_STRING(
---enable-llvmc-dynamic,
-[Link LLVMC dynamically (default is NO, unless on Win32)]),,
-enableval=$llvmc_dynamic)
-if test ${enableval} = "yes" && test "$ENABLE_PIC" -eq 1 ; then
- AC_SUBST(ENABLE_LLVMC_DYNAMIC,[[ENABLE_LLVMC_DYNAMIC=1]])
-else
- AC_SUBST(ENABLE_LLVMC_DYNAMIC,[[]])
-fi
-
-dnl --enable-llvmc-dynamic-plugins : should LLVMC support dynamic plugins?
-AC_ARG_ENABLE(llvmc-dynamic-plugins,AS_HELP_STRING(
---enable-llvmc-dynamic-plugins,
-[Enable dynamic LLVMC plugins (default is YES)]),,
-enableval=yes)
-if test ${enableval} = "yes" ; then
- AC_SUBST(ENABLE_LLVMC_DYNAMIC_PLUGINS,[[ENABLE_LLVMC_DYNAMIC_PLUGINS=1]])
-else
- AC_SUBST(ENABLE_LLVMC_DYNAMIC_PLUGINS,[[]])
-fi
-
dnl===-----------------------------------------------------------------------===
dnl===
dnl=== SECTION 4: Check for programs we need and that they are the right version
@@ -957,17 +989,26 @@ dnl fi
dnl Find the install program
AC_PROG_INSTALL
+dnl Prepend src dir to install path dir if it's a relative path
+dnl This is a hack for installs that take place in something other
+dnl than the top level.
+case "$INSTALL" in
+ [[\\/$]]* | ?:[[\\/]]* ) ;;
+ *) INSTALL="\\\$(TOPSRCDIR)/$INSTALL" ;;
+esac
dnl Checks for documentation and testing tools that we can do without. If these
dnl are not found then they are set to "true" which always succeeds but does
dnl nothing. This just lets the build output show that we could have done
dnl something if the tool was available.
AC_PATH_PROG(BZIP2, [bzip2])
+AC_PATH_PROG(CAT, [cat])
AC_PATH_PROG(DOXYGEN, [doxygen])
AC_PATH_PROG(GROFF, [groff])
AC_PATH_PROG(GZIP, [gzip])
AC_PATH_PROG(POD2HTML, [pod2html])
AC_PATH_PROG(POD2MAN, [pod2man])
+AC_PATH_PROG(PDFROFF, [pdfroff])
AC_PATH_PROG(RUNTEST, [runtest])
DJ_AC_PATH_TCLSH
AC_PATH_PROG(ZIP, [zip])
@@ -977,12 +1018,18 @@ AC_PATH_PROGS(OCAMLDEP, [ocamldep])
AC_PATH_PROGS(OCAMLDOC, [ocamldoc])
AC_PATH_PROGS(GAS, [gas as])
+dnl Get the version of the linker in use.
+AC_LINK_GET_VERSION
+
dnl Determine whether the linker supports the -R option.
AC_LINK_USE_R
dnl Determine whether the linker supports the -export-dynamic option.
AC_LINK_EXPORT_DYNAMIC
+dnl Determine whether the linker supports the --version-script option.
+AC_LINK_VERSION_SCRIPT
+
dnl Check for libtool and the library that has dlopen function (which must come
dnl before the AC_PROG_LIBTOOL check in order to enable dlopening libraries with
dnl libtool).
@@ -1211,6 +1258,7 @@ AC_CHECK_HEADERS([malloc.h setjmp.h signal.h stdint.h termios.h unistd.h])
AC_CHECK_HEADERS([utime.h windows.h])
AC_CHECK_HEADERS([sys/mman.h sys/param.h sys/resource.h sys/time.h])
AC_CHECK_HEADERS([sys/types.h sys/ioctl.h malloc/malloc.h mach/mach.h])
+AC_CHECK_HEADERS([valgrind/valgrind.h])
if test "$ENABLE_THREADS" -eq 1 ; then
AC_CHECK_HEADERS(pthread.h,
AC_SUBST(HAVE_PTHREAD, 1),
@@ -1224,6 +1272,9 @@ if test "$llvm_cv_enable_libffi" = "yes" ; then
AC_CHECK_HEADERS([ffi.h ffi/ffi.h])
fi
+dnl Try to find Darwin specific crash reporting library.
+AC_CHECK_HEADERS([CrashReporterClient.h])
+
dnl===-----------------------------------------------------------------------===
dnl===
dnl=== SECTION 7: Check for types and structures
@@ -1250,7 +1301,7 @@ AC_CHECK_FUNCS([backtrace ceilf floorf roundf rintf nearbyintf getcwd ])
AC_CHECK_FUNCS([powf fmodf strtof round ])
AC_CHECK_FUNCS([getpagesize getrusage getrlimit setrlimit gettimeofday ])
AC_CHECK_FUNCS([isatty mkdtemp mkstemp ])
-AC_CHECK_FUNCS([mktemp realpath sbrk setrlimit strdup ])
+AC_CHECK_FUNCS([mktemp posix_spawn realpath sbrk setrlimit strdup ])
AC_CHECK_FUNCS([strerror strerror_r strerror_s setenv ])
AC_CHECK_FUNCS([strtoll strtoq sysconf malloc_zone_statistics ])
AC_CHECK_FUNCS([setjmp longjmp sigsetjmp siglongjmp])
@@ -1282,6 +1333,8 @@ fi
dnl atomic builtins are required for threading support.
AC_MSG_CHECKING(for GCC atomic builtins)
+dnl Since we'll be using these atomic builtins in C++ files we should test
+dnl the C++ compiler.
AC_LANG_PUSH([C++])
AC_LINK_IFELSE(
AC_LANG_SOURCE(
@@ -1294,13 +1347,12 @@ AC_LINK_IFELSE(
return 0;
}
]]),
+ AC_LANG_POP([C++])
AC_MSG_RESULT(yes)
AC_DEFINE(LLVM_MULTITHREADED, 1, Build multithreading support into LLVM),
AC_MSG_RESULT(no)
AC_DEFINE(LLVM_MULTITHREADED, 0, Build multithreading support into LLVM)
AC_MSG_WARN([LLVM will be built thread-unsafe because atomic builtins are missing]))
-AC_LANG_POP([C++])
-
dnl===-----------------------------------------------------------------------===
dnl===
@@ -1488,7 +1540,11 @@ dnl WARNING: dnl If you add or remove any of the following config headers, then
dnl you MUST also update Makefile.rules so that the variable FilesToConfig
dnl contains the same list of files as AC_CONFIG_HEADERS below. This ensures the
dnl files can be updated automatically when their *.in sources change.
-AC_CONFIG_HEADERS([include/llvm/Config/config.h])
+AC_CONFIG_HEADERS([include/llvm/Config/config.h include/llvm/Config/llvm-config.h])
+AH_TOP([#ifndef CONFIG_H
+#define CONFIG_H])
+AH_BOTTOM([#endif])
+
AC_CONFIG_FILES([include/llvm/Config/Targets.def])
AC_CONFIG_FILES([include/llvm/Config/AsmPrinters.def])
AC_CONFIG_FILES([include/llvm/Config/AsmParsers.def])
@@ -1501,14 +1557,12 @@ AC_CONFIG_FILES([Makefile.config])
dnl Configure the RPM spec file for LLVM
AC_CONFIG_FILES([llvm.spec])
-dnl Configure doxygen's configuration file
-AC_CONFIG_FILES([docs/doxygen.cfg])
-
dnl Configure llvmc's Base plugin
-AC_CONFIG_FILES([tools/llvmc/plugins/Base/Base.td])
+dnl ClamAV local: no tools
+dnl AC_CONFIG_FILES([tools/llvmc/src/Base.td])
dnl Do the first stage of configuration for llvm-config.in.
-AC_CONFIG_FILES([tools/llvm-config/llvm-config.in])
+dnl AC_CONFIG_FILES([tools/llvm-config/llvm-config.in])
dnl Do special configuration of Makefiles
AC_CONFIG_COMMANDS([setup],,[llvm_src="${srcdir}"])
diff --git a/libclamav/c++/llvm/autoconf/m4/link_options.m4 b/libclamav/c++/llvm/autoconf/m4/link_options.m4
index 66036de..4c5f2f4 100644
--- a/libclamav/c++/llvm/autoconf/m4/link_options.m4
+++ b/libclamav/c++/llvm/autoconf/m4/link_options.m4
@@ -1,4 +1,25 @@
#
+# Get the linker version string.
+#
+# This macro is specific to LLVM.
+#
+AC_DEFUN([AC_LINK_GET_VERSION],
+ [AC_CACHE_CHECK([for linker version],[llvm_cv_link_version],
+ [
+ version_string="$(ld -v 2>&1 | head -1)"
+
+ # Check for ld64.
+ if (echo "$version_string" | grep -q "ld64"); then
+ llvm_cv_link_version=$(echo "$version_string" | sed -e "s#.*ld64-\([^ ]*\)#\1#")
+ else
+ llvm_cv_link_version=$(echo "$version_string" | sed -e "s#[^0-9]*\([0-9.]*\).*#\1#")
+ fi
+ ])
+ AC_DEFINE_UNQUOTED([HOST_LINK_VERSION],"$llvm_cv_link_version",
+ [Linker version detected at compile time.])
+])
+
+#
# Determine if the system can handle the -R option being passed to the linker.
#
# This macro is specific to LLVM.
@@ -8,7 +29,7 @@ AC_DEFUN([AC_LINK_USE_R],
[ AC_LANG_PUSH([C])
oldcflags="$CFLAGS"
CFLAGS="$CFLAGS -Wl,-R."
- AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[int main() { return 0; }]])],
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],
[llvm_cv_link_use_r=yes],[llvm_cv_link_use_r=no])
CFLAGS="$oldcflags"
AC_LANG_POP([C])
@@ -29,7 +50,7 @@ AC_DEFUN([AC_LINK_EXPORT_DYNAMIC],
[ AC_LANG_PUSH([C])
oldcflags="$CFLAGS"
CFLAGS="$CFLAGS -Wl,-export-dynamic"
- AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[int main() { return 0; }]])],
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],
[llvm_cv_link_use_export_dynamic=yes],[llvm_cv_link_use_export_dynamic=no])
CFLAGS="$oldcflags"
AC_LANG_POP([C])
@@ -39,3 +60,49 @@ if test "$llvm_cv_link_use_export_dynamic" = yes ; then
fi
])
+#
+# Determine if the system can handle the --version-script option being
+# passed to the linker.
+#
+# This macro is specific to LLVM.
+#
+AC_DEFUN([AC_LINK_VERSION_SCRIPT],
+[AC_CACHE_CHECK([for compiler -Wl,--version-script option],
+ [llvm_cv_link_use_version_script],
+[ AC_LANG_PUSH([C])
+ oldcflags="$CFLAGS"
+
+ # The following code is from the autoconf manual,
+ # "11.13: Limitations of Usual Tools".
+ # Create a temporary directory $tmp in $TMPDIR (default /tmp).
+ # Use mktemp if possible; otherwise fall back on mkdir,
+ # with $RANDOM to make collisions less likely.
+ : ${TMPDIR=/tmp}
+ {
+ tmp=`
+ (umask 077 && mktemp -d "$TMPDIR/fooXXXXXX") 2>/dev/null
+ ` &&
+ test -n "$tmp" && test -d "$tmp"
+ } || {
+ tmp=$TMPDIR/foo$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+ } || exit $?
+
+ echo "{" > "$tmp/export.map"
+ echo " global: main;" >> "$tmp/export.map"
+ echo " local: *;" >> "$tmp/export.map"
+ echo "};" >> "$tmp/export.map"
+
+ CFLAGS="$CFLAGS -Wl,--version-script=$tmp/export.map"
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],
+ [llvm_cv_link_use_version_script=yes],[llvm_cv_link_use_version_script=no])
+ rm "$tmp/export.map"
+ rmdir "$tmp"
+ CFLAGS="$oldcflags"
+ AC_LANG_POP([C])
+])
+if test "$llvm_cv_link_use_version_script" = yes ; then
+ AC_SUBST(HAVE_LINK_VERSION_SCRIPT,1)
+ fi
+])
+
diff --git a/libclamav/c++/llvm/cmake/config-ix.cmake b/libclamav/c++/llvm/cmake/config-ix.cmake
index 3a2b91c..f75e5df 100755
--- a/libclamav/c++/llvm/cmake/config-ix.cmake
+++ b/libclamav/c++/llvm/cmake/config-ix.cmake
@@ -4,7 +4,7 @@ include(CheckSymbolExists)
include(CheckFunctionExists)
include(CheckCXXSourceCompiles)
-if( UNIX )
+if( UNIX AND NOT BEOS )
# Used by check_symbol_exists:
set(CMAKE_REQUIRED_LIBRARIES m)
endif()
@@ -67,6 +67,7 @@ check_include_file(sys/wait.h HAVE_SYS_WAIT_H)
check_include_file(termios.h HAVE_TERMIOS_H)
check_include_file(unistd.h HAVE_UNISTD_H)
check_include_file(utime.h HAVE_UTIME_H)
+check_include_file(valgrind/valgrind.h HAVE_VALGRIND_VALGRIND_H)
check_include_file(windows.h HAVE_WINDOWS_H)
# library checks
@@ -213,6 +214,9 @@ if (LLVM_NATIVE_ARCH)
set(LLVM_NATIVE_ARCH)
else ()
message(STATUS "Native target architecture is ${LLVM_NATIVE_ARCH}")
+ set(LLVM_NATIVE_TARGET LLVMInitialize${LLVM_NATIVE_ARCH}Target)
+ set(LLVM_NATIVE_TARGETINFO LLVMInitialize${LLVM_NATIVE_ARCH}TargetInfo)
+ set(LLVM_NATIVE_ASMPRINTER LLVMInitialize${LLVM_NATIVE_ARCH}AsmPrinter)
endif ()
endif()
@@ -258,12 +262,19 @@ else( ENABLE_THREADS )
message(STATUS "Threads disabled.")
endif()
+set(LLVM_PREFIX ${CMAKE_INSTALL_PREFIX})
+
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/config.h.cmake
${LLVM_BINARY_DIR}/include/llvm/Config/config.h
)
configure_file(
+ ${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/llvm-config.h.cmake
+ ${LLVM_BINARY_DIR}/include/llvm/Config/llvm-config.h
+ )
+
+configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/System/DataTypes.h.cmake
${LLVM_BINARY_DIR}/include/llvm/System/DataTypes.h
)
diff --git a/libclamav/c++/llvm/cmake/modules/CMakeLists.txt b/libclamav/c++/llvm/cmake/modules/CMakeLists.txt
new file mode 100644
index 0000000..416d7f4
--- /dev/null
+++ b/libclamav/c++/llvm/cmake/modules/CMakeLists.txt
@@ -0,0 +1,12 @@
+set(llvm_cmake_builddir "${LLVM_BINARY_DIR}/share/llvm/cmake")
+
+configure_file(
+ LLVM.cmake
+ ${llvm_cmake_builddir}/LLVM.cmake
+ @ONLY)
+
+install(FILES
+ ${llvm_cmake_builddir}/LLVM.cmake
+ LLVMConfig.cmake
+ LLVMLibDeps.cmake
+ DESTINATION share/llvm/cmake)
diff --git a/libclamav/c++/llvm/cmake/modules/ChooseMSVCCRT.cmake b/libclamav/c++/llvm/cmake/modules/ChooseMSVCCRT.cmake
new file mode 100644
index 0000000..eb78f45
--- /dev/null
+++ b/libclamav/c++/llvm/cmake/modules/ChooseMSVCCRT.cmake
@@ -0,0 +1,106 @@
+# The macro choose_msvc_crt() takes a list of possible
+# C runtimes to choose from, in the form of compiler flags,
+# to present to the user. (MTd for /MTd, etc)
+#
+# The macro is invoked at the end of the file.
+#
+# CMake already sets CRT flags in the CMAKE_CXX_FLAGS_* and
+# CMAKE_C_FLAGS_* variables by default. To let the user
+# override that for each build type:
+# 1. Detect which CRT is already selected, and reflect this in
+# LLVM_USE_CRT_* so the user can have a better idea of what
+# changes they're making.
+# 2. Replace the flags in both variables with the new flag via a regex.
+# 3. set() the variables back into the cache so the changes
+# are user-visible.
+
+### Helper macros: ###
+macro(make_crt_regex regex crts)
+ set(${regex} "")
+ foreach(crt ${${crts}})
+ # Trying to match the beginning or end of the string with stuff
+ # like [ ^]+ didn't work, so use a bunch of parentheses instead.
+ set(${regex} "${${regex}}|(^| +)/${crt}($| +)")
+ endforeach(crt)
+ string(REGEX REPLACE "^\\|" "" ${regex} "${${regex}}")
+endmacro(make_crt_regex)
+
+macro(get_current_crt crt_current regex flagsvar)
+ # Find the selected-by-CMake CRT for each build type, if any.
+ # Strip off the leading slash and any whitespace.
+ string(REGEX MATCH "${${regex}}" ${crt_current} "${${flagsvar}}")
+ string(REPLACE "/" " " ${crt_current} "${${crt_current}}")
+ string(STRIP "${${crt_current}}" ${crt_current})
+endmacro(get_current_crt)
+
+# Replaces or adds a flag to a variable.
+# Expects 'flag' to be padded with spaces.
+macro(set_flag_in_var flagsvar regex flag)
+ string(REGEX MATCH "${${regex}}" current_flag "${${flagsvar}}")
+ if("${current_flag}" STREQUAL "")
+ set(${flagsvar} "${${flagsvar}}${${flag}}")
+ else()
+ string(REGEX REPLACE "${${regex}}" "${${flag}}" ${flagsvar} "${${flagsvar}}")
+ endif()
+ string(STRIP "${${flagsvar}}" ${flagsvar})
+ # Make sure this change gets reflected in the cache/gui.
+ # CMake requires the docstring parameter whenever set() touches the cache,
+ # so get the existing docstring and re-use that.
+ get_property(flagsvar_docs CACHE ${flagsvar} PROPERTY HELPSTRING)
+ set(${flagsvar} "${${flagsvar}}" CACHE STRING "${flagsvar_docs}" FORCE)
+endmacro(set_flag_in_var)
+
+
+macro(choose_msvc_crt MSVC_CRT)
+ if(LLVM_USE_CRT)
+ message(FATAL_ERROR
+ "LLVM_USE_CRT is deprecated. Use the CMAKE_BUILD_TYPE-specific
+variables (LLVM_USE_CRT_DEBUG, etc) instead.")
+ endif()
+
+ make_crt_regex(MSVC_CRT_REGEX ${MSVC_CRT})
+
+ foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
+ string(TOUPPER "${build_type}" build)
+ if (NOT LLVM_USE_CRT_${build})
+ get_current_crt(LLVM_USE_CRT_${build}
+ MSVC_CRT_REGEX
+ CMAKE_CXX_FLAGS_${build})
+ set(LLVM_USE_CRT_${build}
+ "${LLVM_USE_CRT_${build}}"
+ CACHE STRING "Specify VC++ CRT to use for ${build_type} configurations."
+ FORCE)
+ set_property(CACHE LLVM_USE_CRT_${build}
+ PROPERTY STRINGS "";${${MSVC_CRT}})
+ endif(NOT LLVM_USE_CRT_${build})
+ endforeach(build_type)
+
+ foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
+ string(TOUPPER "${build_type}" build)
+ if ("${LLVM_USE_CRT_${build}}" STREQUAL "")
+ set(flag_string " ")
+ else()
+ set(flag_string " /${LLVM_USE_CRT_${build}} ")
+ list(FIND ${MSVC_CRT} ${LLVM_USE_CRT_${build}} idx)
+ if (idx LESS 0)
+ message(FATAL_ERROR
+ "Invalid value for LLVM_USE_CRT_${build}: ${LLVM_USE_CRT_${build}}. Valid options are one of: ${${MSVC_CRT}}")
+ endif (idx LESS 0)
+ message(STATUS "Using ${build_type} VC++ CRT: ${LLVM_USE_CRT_${build}}")
+ endif()
+ foreach(lang C CXX)
+ set_flag_in_var(CMAKE_${lang}_FLAGS_${build} MSVC_CRT_REGEX flag_string)
+ endforeach(lang)
+ endforeach(build_type)
+endmacro(choose_msvc_crt MSVC_CRT)
+
+
+# List of valid CRTs for MSVC
+set(MSVC_CRT
+ MD
+ MDd
+ MT
+ MTd)
+
+choose_msvc_crt(MSVC_CRT)
+
diff --git a/libclamav/c++/llvm/cmake/modules/LLVM.cmake b/libclamav/c++/llvm/cmake/modules/LLVM.cmake
new file mode 100644
index 0000000..9621454
--- /dev/null
+++ b/libclamav/c++/llvm/cmake/modules/LLVM.cmake
@@ -0,0 +1,29 @@
+# This file provides information and services to the final user.
+
+set(LLVM_COMMON_DEPENDS @LLVM_COMMON_DEPENDS@)
+
+set(llvm_libs @llvm_libs@)
+
+set(llvm_lib_targets @llvm_lib_targets@)
+
+set(LLVM_TARGETS_TO_BUILD @LLVM_TARGETS_TO_BUILD@)
+
+set(LLVM_TOOLS_BINARY_DIR @LLVM_TOOLS_BINARY_DIR@)
+
+set(LLVM_ENABLE_THREADS @LLVM_ENABLE_THREADS@)
+
+set(LLVM_NATIVE_ARCH @LLVM_NATIVE_ARCH@)
+
+# We try to include using the current setting of CMAKE_MODULE_PATH,
+# which suppossedly was filled by the user with the directory where
+# this file was installed:
+include( LLVMConfig OPTIONAL RESULT_VARIABLE LLVMCONFIG_INCLUDED )
+
+# If failed, we assume that this is an un-installed build:
+if( NOT LLVMCONFIG_INCLUDED )
+ set(CMAKE_MODULE_PATH
+ ${CMAKE_MODULE_PATH}
+ "@LLVM_SOURCE_DIR@/cmake/modules")
+ include( LLVMConfig )
+endif()
+
diff --git a/libclamav/c++/llvm/cmake/modules/LLVMConfig.cmake b/libclamav/c++/llvm/cmake/modules/LLVMConfig.cmake
index 0744b50..e549708 100755
--- a/libclamav/c++/llvm/cmake/modules/LLVMConfig.cmake
+++ b/libclamav/c++/llvm/cmake/modules/LLVMConfig.cmake
@@ -16,6 +16,26 @@ function(get_system_libs return_var)
endfunction(get_system_libs)
+function(is_llvm_target_library library return_var)
+ # Sets variable `return_var' to ON if `library' corresponds to a
+ # LLVM supported target. To OFF if it doesn't.
+ set(${return_var} OFF PARENT_SCOPE)
+ string(TOUPPER "${library}" capitalized_lib)
+ string(TOUPPER "${LLVM_ALL_TARGETS}" targets)
+ foreach(t ${targets})
+ if( capitalized_lib STREQUAL "LLVM${t}" OR
+ capitalized_lib STREQUAL "LLVM${t}CODEGEN" OR
+ capitalized_lib STREQUAL "LLVM${t}ASMPARSER" OR
+ capitalized_lib STREQUAL "LLVM${t}ASMPRINTER" OR
+ capitalized_lib STREQUAL "LLVM${t}DISASSEMBLER" OR
+ capitalized_lib STREQUAL "LLVM${t}INFO" )
+ set(${return_var} ON PARENT_SCOPE)
+ break()
+ endif()
+ endforeach()
+endfunction(is_llvm_target_library)
+
+
macro(llvm_config executable)
explicit_llvm_config(${executable} ${ARGN})
endmacro(llvm_config)
@@ -29,6 +49,14 @@ function(explicit_llvm_config executable)
endfunction(explicit_llvm_config)
+# This is a variant intended for the final user:
+function(llvm_map_components_to_libraries OUT_VAR)
+ explicit_map_components_to_libraries(result ${ARGN})
+ get_system_libs(sys_result)
+ set( ${OUT_VAR} ${result} ${sys_result} PARENT_SCOPE )
+endfunction(llvm_map_components_to_libraries)
+
+
function(explicit_map_components_to_libraries out_libs)
set( link_components ${ARGN} )
foreach(c ${link_components})
@@ -86,16 +114,24 @@ function(explicit_map_components_to_libraries out_libs)
list(GET expanded_components 0 c)
string(TOUPPER "${c}" capitalized)
list(FIND capitalized_libs ${capitalized} idx)
+ set(add_it ON)
if( idx LESS 0 )
- message(FATAL_ERROR "Library ${c} not found in list of llvm libraries.")
+ # The library is unkown. Maybe is an ommitted target?
+ is_llvm_target_library(${c} iltl_result)
+ if( NOT iltl_result )
+ message(FATAL_ERROR "Library ${c} not found in list of llvm libraries.")
+ endif()
+ set(add_it OFF)
endif( idx LESS 0 )
list(GET llvm_libs ${idx} canonical_lib)
list(REMOVE_ITEM result ${canonical_lib})
- list(APPEND result ${canonical_lib})
foreach(c ${MSVC_LIB_DEPS_${canonical_lib}})
list(REMOVE_ITEM expanded_components ${c})
endforeach()
- list(APPEND expanded_components ${MSVC_LIB_DEPS_${canonical_lib}})
+ if( add_it )
+ list(APPEND result ${canonical_lib})
+ list(APPEND expanded_components ${MSVC_LIB_DEPS_${canonical_lib}})
+ endif()
list(REMOVE_AT expanded_components 0)
list(LENGTH expanded_components lst_size)
endwhile( 0 LESS ${lst_size} )
@@ -115,13 +151,13 @@ endfunction(explicit_map_components_to_libraries)
# The format generated by GenLibDeps.pl
-# LLVMARMAsmPrinter.o: LLVMARMCodeGen.o libLLVMAsmPrinter.a libLLVMCodeGen.a libLLVMCore.a libLLVMSupport.a libLLVMTarget.a
+# libLLVMARMAsmPrinter.a: libLLVMMC.a libLLVMSupport.a
# is translated to:
-# set(MSVC_LIB_DEPS_LLVMARMAsmPrinter LLVMARMCodeGen LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMSupport LLVMTarget)
+# set(MSVC_LIB_DEPS_LLVMARMAsmPrinter LLVMMC LLVMSupport)
-# It is necessary to remove the `lib' prefix and the `.a'.
+# It is necessary to remove the `lib' prefix and the `.a' suffix.
# This 'sed' script should do the trick:
# sed -e s'#\.a##g' -e 's#libLLVM#LLVM#g' -e 's#: # #' -e 's#\(.*\)#set(MSVC_LIB_DEPS_\1)#' ~/llvm/tools/llvm-config/LibDeps.txt
diff --git a/libclamav/c++/llvm/cmake/modules/LLVMLibDeps.cmake b/libclamav/c++/llvm/cmake/modules/LLVMLibDeps.cmake
index e1c470f..e639b04 100644
--- a/libclamav/c++/llvm/cmake/modules/LLVMLibDeps.cmake
+++ b/libclamav/c++/llvm/cmake/modules/LLVMLibDeps.cmake
@@ -1,22 +1,22 @@
-set(MSVC_LIB_DEPS_LLVMARMAsmParser LLVMARMInfo LLVMMCParser LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMARMAsmPrinter LLVMARMCodeGen LLVMARMInfo LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystem LLVMTarget)
-set(MSVC_LIB_DEPS_LLVMARMCodeGen LLVMARMInfo LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMARMAsmParser LLVMARMInfo LLVMMC LLVMMCParser LLVMSupport LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMARMAsmPrinter LLVMMC LLVMSupport)
+set(MSVC_LIB_DEPS_LLVMARMCodeGen LLVMARMInfo LLVMAnalysis LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMARMInfo LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMAlphaAsmPrinter LLVMAlphaInfo LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMAlphaAsmPrinter LLVMAlphaInfo LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMAlphaCodeGen LLVMAlphaInfo LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMAlphaInfo LLVMSupport)
set(MSVC_LIB_DEPS_LLVMAnalysis LLVMCore LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMArchive LLVMBitReader LLVMCore LLVMSupport LLVMSystem)
set(MSVC_LIB_DEPS_LLVMAsmParser LLVMCore LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMAsmPrinter LLVMAnalysis LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMAsmPrinter LLVMAnalysis LLVMCodeGen LLVMCore LLVMMC LLVMMCParser LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMBitReader LLVMCore LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMBitWriter LLVMCore LLVMSupport LLVMSystem)
-set(MSVC_LIB_DEPS_LLVMBlackfinAsmPrinter LLVMAsmPrinter LLVMBlackfinInfo LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMBitWriter LLVMCore LLVMSupport)
+set(MSVC_LIB_DEPS_LLVMBlackfinAsmPrinter LLVMAsmPrinter LLVMBlackfinInfo LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMBlackfinCodeGen LLVMBlackfinInfo LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMBlackfinInfo LLVMSupport)
set(MSVC_LIB_DEPS_LLVMCBackend LLVMAnalysis LLVMCBackendInfo LLVMCodeGen LLVMCore LLVMMC LLVMScalarOpts LLVMSupport LLVMSystem LLVMTarget LLVMTransformUtils LLVMipa)
set(MSVC_LIB_DEPS_LLVMCBackendInfo LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMCellSPUAsmPrinter LLVMAsmPrinter LLVMCellSPUInfo LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMCellSPUAsmPrinter LLVMAsmPrinter LLVMCellSPUInfo LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMCellSPUCodeGen LLVMCellSPUInfo LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMCellSPUInfo LLVMSupport)
set(MSVC_LIB_DEPS_LLVMCodeGen LLVMAnalysis LLVMCore LLVMMC LLVMScalarOpts LLVMSupport LLVMSystem LLVMTarget LLVMTransformUtils)
@@ -27,43 +27,45 @@ set(MSVC_LIB_DEPS_LLVMExecutionEngine LLVMCore LLVMSupport LLVMSystem LLVMTarget
set(MSVC_LIB_DEPS_LLVMInstCombine LLVMAnalysis LLVMCore LLVMSupport LLVMSystem LLVMTarget LLVMTransformUtils)
set(MSVC_LIB_DEPS_LLVMInstrumentation LLVMAnalysis LLVMCore LLVMSupport LLVMSystem LLVMTransformUtils)
set(MSVC_LIB_DEPS_LLVMInterpreter LLVMCodeGen LLVMCore LLVMExecutionEngine LLVMSupport LLVMSystem LLVMTarget)
-set(MSVC_LIB_DEPS_LLVMJIT LLVMAnalysis LLVMCodeGen LLVMCore LLVMExecutionEngine LLVMMC LLVMSupport LLVMSystem LLVMTarget)
-set(MSVC_LIB_DEPS_LLVMLinker LLVMArchive LLVMBitReader LLVMCore LLVMSupport LLVMSystem)
+set(MSVC_LIB_DEPS_LLVMJIT LLVMCodeGen LLVMCore LLVMExecutionEngine LLVMMC LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMLinker LLVMArchive LLVMBitReader LLVMCore LLVMSupport LLVMSystem LLVMTransformUtils)
+set(MSVC_LIB_DEPS_LLVMMBlazeAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMBlazeCodeGen LLVMMBlazeInfo LLVMMC LLVMSupport LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMMBlazeCodeGen LLVMCodeGen LLVMCore LLVMMBlazeInfo LLVMMC LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMMBlazeInfo LLVMSupport)
set(MSVC_LIB_DEPS_LLVMMC LLVMSupport LLVMSystem)
+set(MSVC_LIB_DEPS_LLVMMCDisassembler LLVMARMAsmParser LLVMARMCodeGen LLVMARMInfo LLVMAlphaAsmPrinter LLVMAlphaCodeGen LLVMAlphaInfo LLVMBlackfinAsmPrinter LLVMBlackfinCodeGen LLVMBlackfinInfo LLVMCBackend LLVMCBackendInfo LLVMCellSPUAsmPrinter LLVMCellSPUCodeGen LLVMCellSPUInfo LLVMCppBackend LLVMCppBackendInfo LLVMMBlazeAsmPrinter LLVMMBlazeCodeGen LLVMMBlazeInfo LLVMMC LLVMMCParser LLVMMSP430AsmPrinter LLVMMSP430CodeGen LLVMMSP430Info LLVMMipsAsmPrinter LLVMMipsCodeGen LLVMMipsInfo LLVMPIC16AsmPrinter LLVMPIC16CodeGen LLVMPIC16Info LLVMPowerPCAsmPrinter LLVMPowerPCCodeGen LLVMPowerPCInfo LLVMSparcAsmPrinter LLVMSparcCodeGen LLVMSparcInfo LLVMSupport LLVMSystem LLVMSystemZAsmPrinter LLVMSystemZCodeGen LLVMSystemZInfo LLVMX86AsmParser LLVMX86CodeGen LLVMX86Disassembler LLVMX86Info LLVMXCoreAsmPrinter LLVMXCoreCodeGen LLVMXCoreInfo)
set(MSVC_LIB_DEPS_LLVMMCParser LLVMMC LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMMSIL LLVMAnalysis LLVMCodeGen LLVMCore LLVMMSILInfo LLVMScalarOpts LLVMSupport LLVMTarget LLVMTransformUtils LLVMipa)
-set(MSVC_LIB_DEPS_LLVMMSILInfo LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMMSP430AsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMMSP430Info LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMMSP430AsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMMSP430Info LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMMSP430CodeGen LLVMCodeGen LLVMCore LLVMMC LLVMMSP430Info LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMMSP430Info LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMMipsAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMMipsCodeGen LLVMMipsInfo LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMMipsAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMMipsCodeGen LLVMMipsInfo LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMMipsCodeGen LLVMCodeGen LLVMCore LLVMMC LLVMMipsInfo LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMMipsInfo LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMPIC16 LLVMAnalysis LLVMCodeGen LLVMCore LLVMMC LLVMPIC16Info LLVMSelectionDAG LLVMSupport LLVMTarget)
-set(MSVC_LIB_DEPS_LLVMPIC16AsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMPIC16 LLVMPIC16Info LLVMSupport LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMPIC16AsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMPIC16CodeGen LLVMPIC16Info LLVMSupport LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMPIC16CodeGen LLVMAnalysis LLVMCodeGen LLVMCore LLVMMC LLVMPIC16Info LLVMSelectionDAG LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMPIC16Info LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMPowerPCAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMPowerPCInfo LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMPowerPCAsmPrinter LLVMAnalysis LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMPowerPCInfo LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMPowerPCCodeGen LLVMCodeGen LLVMCore LLVMMC LLVMPowerPCInfo LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMPowerPCInfo LLVMSupport)
set(MSVC_LIB_DEPS_LLVMScalarOpts LLVMAnalysis LLVMCore LLVMInstCombine LLVMSupport LLVMSystem LLVMTarget LLVMTransformUtils)
-set(MSVC_LIB_DEPS_LLVMSelectionDAG LLVMAnalysis LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMSupport LLVMSystem LLVMTarget)
-set(MSVC_LIB_DEPS_LLVMSparcAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSparcInfo LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMSelectionDAG LLVMAnalysis LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystem LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMSparcAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSparcInfo LLVMSupport LLVMTarget)
set(MSVC_LIB_DEPS_LLVMSparcCodeGen LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSparcInfo LLVMSupport LLVMSystem LLVMTarget)
set(MSVC_LIB_DEPS_LLVMSparcInfo LLVMSupport)
set(MSVC_LIB_DEPS_LLVMSupport LLVMSystem)
set(MSVC_LIB_DEPS_LLVMSystem )
-set(MSVC_LIB_DEPS_LLVMSystemZAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystem LLVMSystemZInfo LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMSystemZAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystemZInfo LLVMTarget)
set(MSVC_LIB_DEPS_LLVMSystemZCodeGen LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMSystemZInfo LLVMTarget)
set(MSVC_LIB_DEPS_LLVMSystemZInfo LLVMSupport)
set(MSVC_LIB_DEPS_LLVMTarget LLVMCore LLVMMC LLVMSupport)
set(MSVC_LIB_DEPS_LLVMTransformUtils LLVMAnalysis LLVMCore LLVMSupport LLVMSystem LLVMTarget LLVMipa)
-set(MSVC_LIB_DEPS_LLVMX86AsmParser LLVMMC LLVMMCParser LLVMSupport LLVMX86Info)
-set(MSVC_LIB_DEPS_LLVMX86AsmPrinter LLVMAnalysis LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystem LLVMTarget LLVMX86CodeGen LLVMX86Info)
-set(MSVC_LIB_DEPS_LLVMX86CodeGen LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget LLVMX86Info)
+set(MSVC_LIB_DEPS_LLVMX86AsmParser LLVMMC LLVMMCParser LLVMSupport LLVMTarget LLVMX86Info)
+set(MSVC_LIB_DEPS_LLVMX86AsmPrinter LLVMMC LLVMSupport)
+set(MSVC_LIB_DEPS_LLVMX86CodeGen LLVMAnalysis LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMSystem LLVMTarget LLVMX86AsmPrinter LLVMX86Info)
set(MSVC_LIB_DEPS_LLVMX86Disassembler LLVMMC LLVMSupport LLVMX86Info)
set(MSVC_LIB_DEPS_LLVMX86Info LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMXCore LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMTarget LLVMXCoreInfo)
-set(MSVC_LIB_DEPS_LLVMXCoreAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMSystem LLVMTarget LLVMXCoreInfo)
+set(MSVC_LIB_DEPS_LLVMXCoreAsmPrinter LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSupport LLVMTarget LLVMXCoreInfo)
+set(MSVC_LIB_DEPS_LLVMXCoreCodeGen LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMTarget LLVMXCoreInfo)
set(MSVC_LIB_DEPS_LLVMXCoreInfo LLVMSupport)
set(MSVC_LIB_DEPS_LLVMipa LLVMAnalysis LLVMCore LLVMSupport LLVMSystem)
-set(MSVC_LIB_DEPS_LLVMipo LLVMAnalysis LLVMCore LLVMSupport LLVMSystem LLVMTarget LLVMTransformUtils LLVMipa)
+set(MSVC_LIB_DEPS_LLVMipo LLVMAnalysis LLVMCore LLVMScalarOpts LLVMSupport LLVMSystem LLVMTarget LLVMTransformUtils LLVMipa)
diff --git a/libclamav/c++/llvm/cmake/modules/TableGen.cmake b/libclamav/c++/llvm/cmake/modules/TableGen.cmake
index f6da1b8..cf7cd1f 100644
--- a/libclamav/c++/llvm/cmake/modules/TableGen.cmake
+++ b/libclamav/c++/llvm/cmake/modules/TableGen.cmake
@@ -6,10 +6,16 @@ macro(tablegen ofn)
file(GLOB local_tds "*.td")
file(GLOB_RECURSE global_tds "${LLVM_MAIN_SRC_DIR}/include/llvm/*.td")
+ if (IS_ABSOLUTE ${LLVM_TARGET_DEFINITIONS})
+ set(LLVM_TARGET_DEFINITIONS_ABSOLUTE ${LLVM_TARGET_DEFINITIONS})
+ else()
+ set(LLVM_TARGET_DEFINITIONS_ABSOLUTE
+ ${CMAKE_CURRENT_SOURCE_DIR}/${LLVM_TARGET_DEFINITIONS})
+ endif()
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
COMMAND ${LLVM_TABLEGEN_EXE} ${ARGN} -I ${CMAKE_CURRENT_SOURCE_DIR}
-I ${LLVM_MAIN_SRC_DIR}/lib/Target -I ${LLVM_MAIN_INCLUDE_DIR}
- ${CMAKE_CURRENT_SOURCE_DIR}/${LLVM_TARGET_DEFINITIONS}
+ ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
-o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
DEPENDS tblgen ${local_tds} ${global_tds}
COMMENT "Building ${ofn}..."
diff --git a/libclamav/c++/llvm/cmake/modules/VersionFromVCS.cmake b/libclamav/c++/llvm/cmake/modules/VersionFromVCS.cmake
new file mode 100644
index 0000000..1016df2
--- /dev/null
+++ b/libclamav/c++/llvm/cmake/modules/VersionFromVCS.cmake
@@ -0,0 +1,33 @@
+# Adds version control information to the variable VERS. For
+# determining the Version Control System used (if any) it inspects the
+# existence of certain subdirectories under CMAKE_CURRENT_SOURCE_DIR.
+
+function(add_version_info_from_vcs VERS)
+ set(result ${${VERS}})
+ if( EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/.svn )
+ set(result "${result}svn")
+ find_package(Subversion)
+ if( Subversion_FOUND )
+ subversion_wc_info( ${CMAKE_CURRENT_SOURCE_DIR} Project )
+ if( Project_WC_REVISION )
+ set(result "${result}-r${Project_WC_REVISION}")
+ endif()
+ endif()
+ elseif( EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/.git )
+ set(result "${result}git")
+ # Try to get a ref-id
+ find_program(git_executable NAMES git git.exe git.cmd)
+ if( git_executable )
+ execute_process(COMMAND ${git_executable} show-ref HEAD
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+ TIMEOUT 5
+ RESULT_VARIABLE git_result
+ OUTPUT_VARIABLE git_output)
+ if( git_result EQUAL 0 )
+ string(SUBSTRING ${git_output} 0 7 git_ref_id)
+ set(result "${result}-${git_ref_id}")
+ endif()
+ endif()
+ endif()
+ set(${VERS} ${result} PARENT_SCOPE)
+endfunction(add_version_info_from_vcs)
diff --git a/libclamav/c++/llvm/configure b/libclamav/c++/llvm/configure
index ec34230..0d8450f 100755
--- a/libclamav/c++/llvm/configure
+++ b/libclamav/c++/llvm/configure
@@ -1,13 +1,13 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.65 for llvm 2.7.
+# Generated by GNU Autoconf 2.67 for llvm 2.8.
#
# Report bugs to <llvmbugs at cs.uiuc.edu>.
#
#
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
+# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software
+# Foundation, Inc.
#
#
# This configure script is free software; the Free Software Foundation
@@ -321,7 +321,7 @@ $as_echo X"$as_dir" |
test -d "$as_dir" && break
done
test -z "$as_dirs" || eval "mkdir $as_dirs"
- } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
} # as_fn_mkdir_p
@@ -361,19 +361,19 @@ else
fi # as_fn_arith
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
+# script with STATUS, using 1 if that was 0.
as_fn_error ()
{
- as_status=$?; test $as_status -eq 0 && as_status=1
- if test "$3"; then
- as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
fi
- $as_echo "$as_me: error: $1" >&2
+ $as_echo "$as_me: error: $2" >&2
as_fn_exit $as_status
} # as_fn_error
@@ -535,7 +535,7 @@ test -n "$DJDIR" || exec 7<&0 </dev/null
exec 6>&1
# Name of the host.
-# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
# so uname gets run too.
ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
@@ -554,8 +554,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='llvm'
PACKAGE_TARNAME='-llvm-'
-PACKAGE_VERSION='2.7'
-PACKAGE_STRING='llvm 2.7'
+PACKAGE_VERSION='2.8'
+PACKAGE_STRING='llvm 2.8'
PACKAGE_BUGREPORT='llvmbugs at cs.uiuc.edu'
PACKAGE_URL=''
@@ -639,6 +639,7 @@ CONVENIENCE_LTDL_FALSE
CONVENIENCE_LTDL_TRUE
INSTALL_LTDL_FALSE
INSTALL_LTDL_TRUE
+HAVE_LINK_VERSION_SCRIPT
GAS
OCAMLDOC
OCAMLDEP
@@ -647,11 +648,13 @@ OCAMLC
ZIP
TCLSH
RUNTEST
+PDFROFF
POD2MAN
POD2HTML
GZIP
GROFF
DOXYGEN
+CAT
BZIP2
INSTALL_DATA
INSTALL_SCRIPT
@@ -683,8 +686,6 @@ NM
ac_ct_CXX
CXXFLAGS
CXX
-ENABLE_LLVMC_DYNAMIC_PLUGINS
-ENABLE_LLVMC_DYNAMIC
BINUTILS_INCDIR
EXTRA_OPTIONS
OPTIMIZE_OPTION
@@ -697,6 +698,7 @@ LLVM_ENUM_ASM_PARSERS
LLVM_ENUM_ASM_PRINTERS
LLVM_ENUM_TARGETS
TARGETS_TO_BUILD
+ENABLE_TIMESTAMPS
ENABLE_SHARED
ENABLE_PIC
ENABLE_THREADS
@@ -800,6 +802,7 @@ enable_doxygen
enable_threads
enable_pic
enable_shared
+enable_timestamps
enable_targets
enable_cbe_printf_a
with_llvmgccdir
@@ -818,8 +821,6 @@ with_cxx_include_32bit_dir
with_cxx_include_64bit_dir
with_binutils_include
enable_libffi
-enable_llvmc_dynamic
-enable_llvmc_dynamic_plugins
with_tclinclude
enable_ltdl_install
with_llvmcc
@@ -838,17 +839,17 @@ CPP
CXX
CXXFLAGS
CCC'
-ac_subdirs_all='projects/sample
+ac_subdirs_all='projects/llvm-gcc
+projects/test-suite
+projects/llvm-test
+projects/poolalloc
+projects/llvm-poolalloc
+projects/sample
projects/privbracket
projects/llvm-stacker
-projects/llvm-test
-projects/test-suite
projects/llvm-reopt
-projects/llvm-gcc
projects/llvm-java
projects/llvm-tv
-projects/llvm-poolalloc
-projects/poolalloc
projects/safecode
projects/llvm-kernel'
@@ -912,8 +913,9 @@ do
fi
case $ac_option in
- *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
- *) ac_optarg=yes ;;
+ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *=) ac_optarg= ;;
+ *) ac_optarg=yes ;;
esac
# Accept the important Cygnus configure options, so we can diagnose typos.
@@ -958,7 +960,7 @@ do
ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
- as_fn_error "invalid feature name: $ac_useropt"
+ as_fn_error $? "invalid feature name: $ac_useropt"
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
@@ -984,7 +986,7 @@ do
ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
- as_fn_error "invalid feature name: $ac_useropt"
+ as_fn_error $? "invalid feature name: $ac_useropt"
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
@@ -1188,7 +1190,7 @@ do
ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
- as_fn_error "invalid package name: $ac_useropt"
+ as_fn_error $? "invalid package name: $ac_useropt"
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
@@ -1204,7 +1206,7 @@ do
ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
- as_fn_error "invalid package name: $ac_useropt"
+ as_fn_error $? "invalid package name: $ac_useropt"
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
@@ -1234,8 +1236,8 @@ do
| --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
x_libraries=$ac_optarg ;;
- -*) as_fn_error "unrecognized option: \`$ac_option'
-Try \`$0 --help' for more information."
+ -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
;;
*=*)
@@ -1243,7 +1245,7 @@ Try \`$0 --help' for more information."
# Reject names that are not valid shell variable names.
case $ac_envvar in #(
'' | [0-9]* | *[!_$as_cr_alnum]* )
- as_fn_error "invalid variable name: \`$ac_envvar'" ;;
+ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
esac
eval $ac_envvar=\$ac_optarg
export $ac_envvar ;;
@@ -1261,13 +1263,13 @@ done
if test -n "$ac_prev"; then
ac_option=--`echo $ac_prev | sed 's/_/-/g'`
- as_fn_error "missing argument to $ac_option"
+ as_fn_error $? "missing argument to $ac_option"
fi
if test -n "$ac_unrecognized_opts"; then
case $enable_option_checking in
no) ;;
- fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;;
+ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
*) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
esac
fi
@@ -1290,7 +1292,7 @@ do
[\\/$]* | ?:[\\/]* ) continue;;
NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
esac
- as_fn_error "expected an absolute directory name for --$ac_var: $ac_val"
+ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
done
# There might be people who depend on the old broken behavior: `$host'
@@ -1304,8 +1306,8 @@ target=$target_alias
if test "x$host_alias" != x; then
if test "x$build_alias" = x; then
cross_compiling=maybe
- $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
- If a cross compiler is detected then cross compile mode will be used." >&2
+ $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host.
+ If a cross compiler is detected then cross compile mode will be used" >&2
elif test "x$build_alias" != "x$host_alias"; then
cross_compiling=yes
fi
@@ -1320,9 +1322,9 @@ test "$silent" = yes && exec 6>/dev/null
ac_pwd=`pwd` && test -n "$ac_pwd" &&
ac_ls_di=`ls -di .` &&
ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
- as_fn_error "working directory cannot be determined"
+ as_fn_error $? "working directory cannot be determined"
test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
- as_fn_error "pwd does not report name of working directory"
+ as_fn_error $? "pwd does not report name of working directory"
# Find the source files, if location was not specified.
@@ -1361,11 +1363,11 @@ else
fi
if test ! -r "$srcdir/$ac_unique_file"; then
test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
- as_fn_error "cannot find sources ($ac_unique_file) in $srcdir"
+ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
fi
ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
ac_abs_confdir=`(
- cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg"
+ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
pwd)`
# When building in place, set srcdir=.
if test "$ac_abs_confdir" = "$ac_pwd"; then
@@ -1391,7 +1393,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures llvm 2.7 to adapt to many kinds of systems.
+\`configure' configures llvm 2.8 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1405,7 +1407,7 @@ Configuration:
--help=short display options specific to this package
--help=recursive display the short help of all the included packages
-V, --version display version information and exit
- -q, --quiet, --silent do not print \`checking...' messages
+ -q, --quiet, --silent do not print \`checking ...' messages
--cache-file=FILE cache test results in FILE [disabled]
-C, --config-cache alias for \`--cache-file=config.cache'
-n, --no-create do not create output files
@@ -1457,7 +1459,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of llvm 2.7:";;
+ short | recursive ) echo "Configuration of llvm 2.8:";;
esac
cat <<\_ACEOF
@@ -1465,10 +1467,10 @@ Optional Features:
--disable-option-checking ignore unrecognized --enable/--with options
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
- --enable-optimized Compile with optimizations enabled (default is NO)
+ --enable-optimized Compile with optimizations enabled (default is YES)
--enable-profiling Compile with profiling enabled (default is NO)
--enable-assertions Compile with assertion checks enabled (default is
- YES)
+ NO)
--enable-expensive-checks
Compile with expensive debug checks enabled (default
is NO)
@@ -1483,20 +1485,18 @@ Optional Features:
is YES)
--enable-shared Build a shared library and link tools against it
(default is NO)
+ --enable-timestamps Enable embedding timestamp information in build
+ (default is YES)
--enable-targets Build specific host targets: all or
target1,target2,... Valid targets are: host, x86,
x86_64, sparc, powerpc, alpha, arm, mips, spu,
- pic16, xcore, msp430, systemz, blackfin, cbe, msil,
- and cpp (default=all)
+ pic16, xcore, msp430, systemz, blackfin, cbe, and
+ cpp (default=all)
--enable-cbe-printf-a Enable C Backend output with hex floating point via
%a (default is YES)
--enable-bindings Build specific language bindings:
all,auto,none,{binding-name} (default=auto)
--enable-libffi Check for the presence of libffi (default is NO)
- --enable-llvmc-dynamic Link LLVMC dynamically (default is NO, unless on
- Win32)
- --enable-llvmc-dynamic-plugins
- Enable dynamic LLVMC plugins (default is YES)
--enable-ltdl-install install libltdl
Optional Packages:
@@ -1612,10 +1612,10 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-llvm configure 2.7
-generated by GNU Autoconf 2.65
+llvm configure 2.8
+generated by GNU Autoconf 2.67
-Copyright (C) 2009 Free Software Foundation, Inc.
+Copyright (C) 2010 Free Software Foundation, Inc.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
@@ -1729,7 +1729,7 @@ $as_echo "$ac_try_echo"; } >&5
mv -f conftest.er1 conftest.err
fi
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; } >/dev/null && {
+ test $ac_status = 0; } > conftest.i && {
test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
test ! -s conftest.err
}; then :
@@ -1754,7 +1754,7 @@ ac_fn_c_check_header_compile ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval "test \"\${$3+set}\"" = set; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -1868,10 +1868,10 @@ fi
ac_fn_c_check_header_mongrel ()
{
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+ if eval "test \"\${$3+set}\"" = set; then :
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval "test \"\${$3+set}\"" = set; then :
$as_echo_n "(cached) " >&6
fi
eval ac_res=\$$3
@@ -1907,7 +1907,7 @@ if ac_fn_c_try_cpp "$LINENO"; then :
else
ac_header_preproc=no
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
$as_echo "$ac_header_preproc" >&6; }
@@ -1930,17 +1930,15 @@ $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;}
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
-( cat <<\_ASBOX
-## ----------------------------------- ##
+( $as_echo "## ----------------------------------- ##
## Report this to llvmbugs at cs.uiuc.edu ##
-## ----------------------------------- ##
-_ASBOX
+## ----------------------------------- ##"
) | sed "s/^/$as_me: WARNING: /" >&2
;;
esac
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval "test \"\${$3+set}\"" = set; then :
$as_echo_n "(cached) " >&6
else
eval "$3=\$ac_header_compiler"
@@ -1961,7 +1959,7 @@ ac_fn_c_check_func ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval "test \"\${$3+set}\"" = set; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -2029,7 +2027,7 @@ ac_fn_c_check_type ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval "test \"\${$3+set}\"" = set; then :
$as_echo_n "(cached) " >&6
else
eval "$3=no"
@@ -2115,58 +2113,12 @@ fi
as_fn_set_status $ac_retval
} # ac_fn_cxx_try_run
-
-# ac_fn_cxx_try_link LINENO
-# -------------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded.
-ac_fn_cxx_try_link ()
-{
- as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- rm -f conftest.$ac_objext conftest$ac_exeext
- if { { ac_try="$ac_link"
-case "(($ac_try" in
- *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
- *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
- (eval "$ac_link") 2>conftest.err
- ac_status=$?
- if test -s conftest.err; then
- grep -v '^ *+' conftest.err >conftest.er1
- cat conftest.er1 >&5
- mv -f conftest.er1 conftest.err
- fi
- $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; } && {
- test -z "$ac_cxx_werror_flag" ||
- test ! -s conftest.err
- } && test -s conftest$ac_exeext && {
- test "$cross_compiling" = yes ||
- $as_test_x conftest$ac_exeext
- }; then :
- ac_retval=0
-else
- $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
- ac_retval=1
-fi
- # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
- # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
- # interfere with the next link command; also delete a directory that is
- # left behind by Apple's compiler. We do this before executing the actions.
- rm -rf conftest.dSYM conftest_ipa8_conftest.oo
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
- as_fn_set_status $ac_retval
-
-} # ac_fn_cxx_try_link
cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by llvm $as_me 2.7, which was
-generated by GNU Autoconf 2.65. Invocation command line was
+It was created by llvm $as_me 2.8, which was
+generated by GNU Autoconf 2.67. Invocation command line was
$ $0 $@
@@ -2276,11 +2228,9 @@ trap 'exit_status=$?
{
echo
- cat <<\_ASBOX
-## ---------------- ##
+ $as_echo "## ---------------- ##
## Cache variables. ##
-## ---------------- ##
-_ASBOX
+## ---------------- ##"
echo
# The following way of writing the cache mishandles newlines in values,
(
@@ -2314,11 +2264,9 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
)
echo
- cat <<\_ASBOX
-## ----------------- ##
+ $as_echo "## ----------------- ##
## Output variables. ##
-## ----------------- ##
-_ASBOX
+## ----------------- ##"
echo
for ac_var in $ac_subst_vars
do
@@ -2331,11 +2279,9 @@ _ASBOX
echo
if test -n "$ac_subst_files"; then
- cat <<\_ASBOX
-## ------------------- ##
+ $as_echo "## ------------------- ##
## File substitutions. ##
-## ------------------- ##
-_ASBOX
+## ------------------- ##"
echo
for ac_var in $ac_subst_files
do
@@ -2349,11 +2295,9 @@ _ASBOX
fi
if test -s confdefs.h; then
- cat <<\_ASBOX
-## ----------- ##
+ $as_echo "## ----------- ##
## confdefs.h. ##
-## ----------- ##
-_ASBOX
+## ----------- ##"
echo
cat confdefs.h
echo
@@ -2408,7 +2352,12 @@ _ACEOF
ac_site_file1=NONE
ac_site_file2=NONE
if test -n "$CONFIG_SITE"; then
- ac_site_file1=$CONFIG_SITE
+ # We do not want a PATH search for config.site.
+ case $CONFIG_SITE in #((
+ -*) ac_site_file1=./$CONFIG_SITE;;
+ */*) ac_site_file1=$CONFIG_SITE;;
+ *) ac_site_file1=./$CONFIG_SITE;;
+ esac
elif test "x$prefix" != xNONE; then
ac_site_file1=$prefix/share/config.site
ac_site_file2=$prefix/etc/config.site
@@ -2423,7 +2372,11 @@ do
{ $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
$as_echo "$as_me: loading site script $ac_site_file" >&6;}
sed 's/^/| /' "$ac_site_file" >&5
- . "$ac_site_file"
+ . "$ac_site_file" \
+ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5 ; }
fi
done
@@ -2502,7 +2455,7 @@ if $ac_cache_corrupted; then
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
{ $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
- as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
+ as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
fi
## -------------------- ##
## Main body of script. ##
@@ -2526,16 +2479,22 @@ LLVM_COPYRIGHT="Copyright (c) 2003-2010 University of Illinois at Urbana-Champai
ac_aux_dir=
for ac_dir in autoconf "$srcdir"/autoconf; do
- for ac_t in install-sh install.sh shtool; do
- if test -f "$ac_dir/$ac_t"; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/$ac_t -c"
- break 2
- fi
- done
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
done
if test -z "$ac_aux_dir"; then
- as_fn_error "cannot find install-sh, install.sh, or shtool in autoconf \"$srcdir\"/autoconf" "$LINENO" 5
+ as_fn_error $? "cannot find install-sh, install.sh, or shtool in autoconf \"$srcdir\"/autoconf" "$LINENO" 5
fi
# These three variables are undocumented and unsupported,
@@ -2550,45 +2509,64 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
if test ${srcdir} != "." ; then
if test -f ${srcdir}/include/llvm/Config/config.h ; then
- as_fn_error "Already configured in ${srcdir}" "$LINENO" 5
+ as_fn_error $? "Already configured in ${srcdir}" "$LINENO" 5
fi
fi
+
+
+if test -d ${srcdir}/projects/llvm-gcc ; then
+
+
+subdirs="$subdirs projects/llvm-gcc"
+
+fi
+
+if test -d ${srcdir}/projects/test-suite ; then
+ subdirs="$subdirs projects/test-suite"
+
+fi
+
+if test -d ${srcdir}/projects/llvm-test ; then
+ subdirs="$subdirs projects/llvm-test"
+
+fi
+
+if test -d ${srcdir}/projects/poolalloc ; then
+ subdirs="$subdirs projects/poolalloc"
+
+fi
+
+if test -d ${srcdir}/projects/llvm-poolalloc ; then
+ subdirs="$subdirs projects/llvm-poolalloc"
+
+fi
+
for i in `ls ${srcdir}/projects`
do
if test -d ${srcdir}/projects/${i} ; then
case ${i} in
- CVS) ;;
- sample)
-
-subdirs="$subdirs projects/sample"
+ sample) subdirs="$subdirs projects/sample"
;;
privbracket) subdirs="$subdirs projects/privbracket"
;;
llvm-stacker) subdirs="$subdirs projects/llvm-stacker"
;;
- # llvm-test is the old name of the test-suite, kept here for backwards
- # compatibility
- llvm-test) subdirs="$subdirs projects/llvm-test"
- ;;
- test-suite) subdirs="$subdirs projects/test-suite"
- ;;
llvm-reopt) subdirs="$subdirs projects/llvm-reopt"
;;
- llvm-gcc) subdirs="$subdirs projects/llvm-gcc"
- ;;
llvm-java) subdirs="$subdirs projects/llvm-java"
;;
llvm-tv) subdirs="$subdirs projects/llvm-tv"
;;
- llvm-poolalloc) subdirs="$subdirs projects/llvm-poolalloc"
- ;;
- poolalloc) subdirs="$subdirs projects/poolalloc"
- ;;
safecode) subdirs="$subdirs projects/safecode"
;;
llvm-kernel) subdirs="$subdirs projects/llvm-kernel"
;;
+ llvm-gcc) ;;
+ test-suite) ;;
+ llvm-test) ;;
+ poolalloc) ;;
+ llvm-poolalloc) ;;
*)
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unknown project (${i}) won't be configured automatically" >&5
$as_echo "$as_me: WARNING: Unknown project (${i}) won't be configured automatically" >&2;}
@@ -2600,7 +2578,7 @@ done
# Make sure we can run config.sub.
$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
- as_fn_error "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
+ as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
$as_echo_n "checking build system type... " >&6; }
@@ -2611,16 +2589,16 @@ else
test "x$ac_build_alias" = x &&
ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
test "x$ac_build_alias" = x &&
- as_fn_error "cannot guess build type; you must specify one" "$LINENO" 5
+ as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
- as_fn_error "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
$as_echo "$ac_cv_build" >&6; }
case $ac_cv_build in
*-*-*) ;;
-*) as_fn_error "invalid value of canonical build" "$LINENO" 5;;
+*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5 ;;
esac
build=$ac_cv_build
ac_save_IFS=$IFS; IFS='-'
@@ -2645,7 +2623,7 @@ else
ac_cv_host=$ac_cv_build
else
ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
- as_fn_error "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
fi
fi
@@ -2653,7 +2631,7 @@ fi
$as_echo "$ac_cv_host" >&6; }
case $ac_cv_host in
*-*-*) ;;
-*) as_fn_error "invalid value of canonical host" "$LINENO" 5;;
+*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5 ;;
esac
host=$ac_cv_host
ac_save_IFS=$IFS; IFS='-'
@@ -2678,7 +2656,7 @@ else
ac_cv_target=$ac_cv_host
else
ac_cv_target=`$SHELL "$ac_aux_dir/config.sub" $target_alias` ||
- as_fn_error "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5
fi
fi
@@ -2686,7 +2664,7 @@ fi
$as_echo "$ac_cv_target" >&6; }
case $ac_cv_target in
*-*-*) ;;
-*) as_fn_error "invalid value of canonical target" "$LINENO" 5;;
+*) as_fn_error $? "invalid value of canonical target" "$LINENO" 5 ;;
esac
target=$ac_cv_target
ac_save_IFS=$IFS; IFS='-'
@@ -2735,6 +2713,11 @@ else
llvm_cv_no_link_all_option="-Wl,-noall_load"
llvm_cv_os_type="Darwin"
llvm_cv_platform_type="Unix" ;;
+ *-*-minix*)
+ llvm_cv_link_all_option="-Wl,-all_load"
+ llvm_cv_no_link_all_option="-Wl,-noall_load"
+ llvm_cv_os_type="Minix"
+ llvm_cv_platform_type="Unix" ;;
*-*-freebsd*)
llvm_cv_link_all_option="-Wl,--whole-archive"
llvm_cv_no_link_all_option="-Wl,--no-whole-archive"
@@ -2829,6 +2812,8 @@ else
llvm_cv_target_os_type="Cygwin" ;;
*-*-darwin*)
llvm_cv_target_os_type="Darwin" ;;
+ *-*-minix*)
+ llvm_cv_target_os_type="Minix" ;;
*-*-freebsd*)
llvm_cv_target_os_type="FreeBSD" ;;
*-*-openbsd*)
@@ -2863,7 +2848,7 @@ fi
$as_echo "$llvm_cv_target_os_type" >&6; }
if test "$llvm_cv_os_type" = "Unknown" ; then
- as_fn_error "Operating system is unknown, configure can't continue" "$LINENO" 5
+ as_fn_error $? "Operating system is unknown, configure can't continue" "$LINENO" 5
fi
OS=$llvm_cv_os_type
@@ -3239,8 +3224,8 @@ fi
test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "no acceptable C compiler found in \$PATH
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5 ; }
# Provide some information about the compiler.
$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
@@ -3354,9 +3339,8 @@ sed 's/^/| /' conftest.$ac_ext >&5
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-{ as_fn_set_status 77
-as_fn_error "C compiler cannot create executables
-See \`config.log' for more details." "$LINENO" 5; }; }
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5 ; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
@@ -3398,8 +3382,8 @@ done
else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5 ; }
fi
rm -f conftest conftest$ac_cv_exeext
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
@@ -3456,9 +3440,9 @@ $as_echo "$ac_try_echo"; } >&5
else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot run C compiled programs.
+as_fn_error $? "cannot run C compiled programs.
If you meant to cross compile, use \`--host'.
-See \`config.log' for more details." "$LINENO" 5; }
+See \`config.log' for more details" "$LINENO" 5 ; }
fi
fi
fi
@@ -3509,8 +3493,8 @@ sed 's/^/| /' conftest.$ac_ext >&5
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of object files: cannot compile
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5 ; }
fi
rm -f conftest.$ac_cv_objext conftest.$ac_ext
fi
@@ -3773,7 +3757,7 @@ else
# Broken: fails on valid input.
continue
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
# OK, works on sane cases. Now check whether nonexistent headers
# can be detected and how.
@@ -3789,11 +3773,11 @@ else
ac_preproc_ok=:
break
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
done
# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.i conftest.err conftest.$ac_ext
if $ac_preproc_ok; then :
break
fi
@@ -3832,7 +3816,7 @@ else
# Broken: fails on valid input.
continue
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
# OK, works on sane cases. Now check whether nonexistent headers
# can be detected and how.
@@ -3848,18 +3832,18 @@ else
ac_preproc_ok=:
break
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
done
# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.i conftest.err conftest.$ac_ext
if $ac_preproc_ok; then :
else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "C preprocessor \"$CPP\" fails sanity check
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5 ; }
fi
ac_ext=c
@@ -3920,7 +3904,7 @@ esac
done
IFS=$as_save_IFS
if test -z "$ac_cv_path_GREP"; then
- as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
fi
else
ac_cv_path_GREP=$GREP
@@ -3986,7 +3970,7 @@ esac
done
IFS=$as_save_IFS
if test -z "$ac_cv_path_EGREP"; then
- as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
fi
else
ac_cv_path_EGREP=$EGREP
@@ -4118,8 +4102,7 @@ do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -4350,8 +4333,8 @@ $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h
;; #(
*)
- as_fn_error "unknown endianness
- presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
+ as_fn_error $? "unknown endianness
+ presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
esac
@@ -4501,7 +4484,7 @@ fi
fi
fi
- test -z "$BUILD_CC" && as_fn_error "no acceptable cc found in \$PATH" "$LINENO" 5
+ test -z "$BUILD_CC" && as_fn_error $? "no acceptable cc found in \$PATH" "$LINENO" 5
ac_build_link='${BUILD_CC-cc} -o conftest $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS 1>&5'
rm -f conftest*
echo 'int main () { return 0; }' > conftest.$ac_ext
@@ -4518,7 +4501,7 @@ fi
esac
done
else
- as_fn_error "installation or configuration problem: compiler cannot create executables." "$LINENO" 5
+ as_fn_error $? "installation or configuration problem: compiler cannot create executables." "$LINENO" 5
fi
rm -f conftest*
test x"${ac_cv_build_exeext}" = x && ac_cv_build_exeext=blank
@@ -4669,7 +4652,7 @@ else
fi
-if test -d "CVS" -o -d "${srcdir}/CVS" -o -d ".svn" -o -d "${srcdir}/.svn" -o -d ".git" -o -d "${srcdir}/.git"; then
+if test -d ".svn" -o -d "${srcdir}/.svn" -o -d ".git" -o -d "${srcdir}/.git"; then
cvsbuild="yes"
optimize="no"
CVSBUILD=CVSBUILD=1
@@ -4684,7 +4667,7 @@ fi
if test "${enable_optimized+set}" = set; then :
enableval=$enable_optimized;
else
- enableval=$optimize
+ enableval="yes"
fi
if test ${enableval} = "no" ; then
@@ -4714,7 +4697,7 @@ fi
if test "${enable_assertions+set}" = set; then :
enableval=$enable_assertions;
else
- enableval="yes"
+ enableval="no"
fi
if test ${enableval} = "yes" ; then
@@ -4832,7 +4815,7 @@ case "$enableval" in
;;
default) ENABLE_DOXYGEN=0
;;
- *) as_fn_error "Invalid setting for --enable-doxygen. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
+ *) as_fn_error $? "Invalid setting for --enable-doxygen. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
esac
# Check whether --enable-threads was given.
@@ -4849,7 +4832,7 @@ case "$enableval" in
;;
default) ENABLE_THREADS=1
;;
- *) as_fn_error "Invalid setting for --enable-threads. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
+ *) as_fn_error $? "Invalid setting for --enable-threads. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
esac
cat >>confdefs.h <<_ACEOF
@@ -4871,7 +4854,7 @@ case "$enableval" in
;;
default) ENABLE_PIC=1
;;
- *) as_fn_error "Invalid setting for --enable-pic. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
+ *) as_fn_error $? "Invalid setting for --enable-pic. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
esac
cat >>confdefs.h <<_ACEOF
@@ -4893,9 +4876,31 @@ case "$enableval" in
;;
default) ENABLE_SHARED=0
;;
- *) as_fn_error "Invalid setting for --enable-shared. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
+ *) as_fn_error $? "Invalid setting for --enable-shared. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
esac
+# Check whether --enable-timestamps was given.
+if test "${enable_timestamps+set}" = set; then :
+ enableval=$enable_timestamps;
+else
+ enableval=default
+fi
+
+case "$enableval" in
+ yes) ENABLE_TIMESTAMPS=1
+ ;;
+ no) ENABLE_TIMESTAMPS=0
+ ;;
+ default) ENABLE_TIMESTAMPS=1
+ ;;
+ *) as_fn_error $? "Invalid setting for --enable-timestamps. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
+esac
+
+cat >>confdefs.h <<_ACEOF
+#define ENABLE_TIMESTAMPS $ENABLE_TIMESTAMPS
+_ACEOF
+
+
TARGETS_TO_BUILD=""
# Check whether --enable-targets was given.
if test "${enable_targets+set}" = set; then :
@@ -4908,7 +4913,7 @@ if test "$enableval" = host-only ; then
enableval=host
fi
case "$enableval" in
- all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha ARM Mips CellSPU PIC16 XCore MSP430 SystemZ Blackfin CBackend MSIL CppBackend MBlaze" ;;
+ all) TARGETS_TO_BUILD="X86 Sparc PowerPC Alpha ARM Mips CellSPU PIC16 XCore MSP430 SystemZ Blackfin CBackend CppBackend MBlaze" ;;
*)for a_target in `echo $enableval|sed -e 's/,/ /g' ` ; do
case "$a_target" in
x86) TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;
@@ -4925,7 +4930,6 @@ case "$enableval" in
systemz) TARGETS_TO_BUILD="SystemZ $TARGETS_TO_BUILD" ;;
blackfin) TARGETS_TO_BUILD="Blackfin $TARGETS_TO_BUILD" ;;
cbe) TARGETS_TO_BUILD="CBackend $TARGETS_TO_BUILD" ;;
- msil) TARGETS_TO_BUILD="MSIL $TARGETS_TO_BUILD" ;;
cpp) TARGETS_TO_BUILD="CppBackend $TARGETS_TO_BUILD" ;;
mblaze) TARGETS_TO_BUILD="MBlaze $TARGETS_TO_BUILD" ;;
host) case "$llvm_cv_target_arch" in
@@ -4943,9 +4947,9 @@ case "$enableval" in
MSP430) TARGETS_TO_BUILD="MSP430 $TARGETS_TO_BUILD" ;;
s390x) TARGETS_TO_BUILD="SystemZ $TARGETS_TO_BUILD" ;;
Blackfin) TARGETS_TO_BUILD="Blackfin $TARGETS_TO_BUILD" ;;
- *) as_fn_error "Can not set target to build" "$LINENO" 5 ;;
+ *) as_fn_error $? "Can not set target to build" "$LINENO" 5 ;;
esac ;;
- *) as_fn_error "Unrecognized target $a_target" "$LINENO" 5 ;;
+ *) as_fn_error $? "Unrecognized target $a_target" "$LINENO" 5 ;;
esac
done
;;
@@ -4957,10 +4961,27 @@ TARGETS_TO_BUILD=$TARGETS_TO_BUILD
# If so, define LLVM_NATIVE_ARCH to that LLVM target.
for a_target in $TARGETS_TO_BUILD; do
if test "$a_target" = "$LLVM_NATIVE_ARCH"; then
- LLVM_NATIVE_ARCHTARGET="${LLVM_NATIVE_ARCH}Target"
cat >>confdefs.h <<_ACEOF
-#define LLVM_NATIVE_ARCH $LLVM_NATIVE_ARCHTARGET
+#define LLVM_NATIVE_ARCH $LLVM_NATIVE_ARCH
+_ACEOF
+
+ LLVM_NATIVE_TARGET="LLVMInitialize${LLVM_NATIVE_ARCH}Target"
+ LLVM_NATIVE_TARGETINFO="LLVMInitialize${LLVM_NATIVE_ARCH}TargetInfo"
+ LLVM_NATIVE_ASMPRINTER="LLVMInitialize${LLVM_NATIVE_ARCH}AsmPrinter"
+
+cat >>confdefs.h <<_ACEOF
+#define LLVM_NATIVE_TARGET $LLVM_NATIVE_TARGET
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define LLVM_NATIVE_TARGETINFO $LLVM_NATIVE_TARGETINFO
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define LLVM_NATIVE_ASMPRINTER $LLVM_NATIVE_ASMPRINTER
_ACEOF
fi
@@ -5003,7 +5024,7 @@ case "$enableval" in
;;
default) ENABLE_CBE_PRINTF_A=1
;;
- *) as_fn_error "Invalid setting for --enable-cbe-printf-a. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
+ *) as_fn_error $? "Invalid setting for --enable-cbe-printf-a. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
esac
cat >>confdefs.h <<_ACEOF
@@ -5022,7 +5043,7 @@ fi
case "$withval" in
default) WITH_LLVMGCCDIR=default ;;
/* | [A-Za-z]:[\\/]*) WITH_LLVMGCCDIR=$withval ;;
- *) as_fn_error "Invalid path for --with-llvmgccdir. Provide full path" "$LINENO" 5 ;;
+ *) as_fn_error $? "Invalid path for --with-llvmgccdir. Provide full path" "$LINENO" 5 ;;
esac
@@ -5050,11 +5071,11 @@ if test -n "$LLVMGXX"; then
fi
if test -n "$LLVMGCC" && test -z "$LLVMGXX"; then
- as_fn_error "Invalid llvm-g++. Use --with-llvmgxx when --with-llvmgcc is used" "$LINENO" 5;
+ as_fn_error $? "Invalid llvm-g++. Use --with-llvmgxx when --with-llvmgcc is used" "$LINENO" 5 ;
fi
if test -n "$LLVMGXX" && test -z "$LLVMGCC"; then
- as_fn_error "Invalid llvm-gcc. Use --with-llvmgcc when --with-llvmgxx is used" "$LINENO" 5;
+ as_fn_error $? "Invalid llvm-gcc. Use --with-llvmgcc when --with-llvmgxx is used" "$LINENO" 5 ;
fi
@@ -5082,7 +5103,7 @@ WITH_BUILT_CLANG=0
if test "$with_clang" != "default"; then
WITH_CLANGPATH="$with_clang"
if ! test -x "$WITH_CLANGPATH"; then
- as_fn_error "invalid --with-clang, path does not specify an executable" "$LINENO" 5
+ as_fn_error $? "invalid --with-clang, path does not specify an executable" "$LINENO" 5
fi
elif test "$with_built_clang" = "yes"; then
WITH_BUILT_CLANG=1
@@ -5090,7 +5111,7 @@ elif test "$with_built_clang" = "no"; then
WITH_BUILT_CLANG=0
else
if test "$with_built_clang" != "check"; then
- as_fn_error "invalid value for --with-built-clang." "$LINENO" 5
+ as_fn_error $? "invalid value for --with-built-clang." "$LINENO" 5
fi
if test -f ${srcdir}/tools/clang/README.txt; then
@@ -5129,8 +5150,9 @@ $as_echo_n "checking optimization flags... " >&6; }
case "$withval" in
default)
case "$llvm_cv_os_type" in
- MingW) optimize_option=-O3 ;;
- *) optimize_option=-O2 ;;
+ FreeBSD) optimize_option=-O2 ;;
+ MingW) optimize_option=-O2 ;;
+ *) optimize_option=-O3 ;;
esac ;;
*) optimize_option="$withval" ;;
esac
@@ -5169,7 +5191,7 @@ case "$enableval" in
*)for a_binding in `echo $enableval|sed -e 's/,/ /g' ` ; do
case "$a_binding" in
ocaml) BINDINGS_TO_BUILD="ocaml $BINDINGS_TO_BUILD" ;;
- *) as_fn_error "Unrecognized binding $a_binding" "$LINENO" 5 ;;
+ *) as_fn_error $? "Unrecognized binding $a_binding" "$LINENO" 5 ;;
esac
done
;;
@@ -5186,7 +5208,7 @@ fi
case "$withval" in
auto) with_ocaml_libdir="$withval" ;;
/* | [A-Za-z]:[\\/]*) with_ocaml_libdir="$withval" ;;
- *) as_fn_error "Invalid path for --with-ocaml-libdir. Provide full path" "$LINENO" 5 ;;
+ *) as_fn_error $? "Invalid path for --with-ocaml-libdir. Provide full path" "$LINENO" 5 ;;
esac
@@ -5270,14 +5292,14 @@ fi
case "$withval" in
default) WITH_BINUTILS_INCDIR=default ;;
/* | [A-Za-z]:[\\/]*) WITH_BINUTILS_INCDIR=$withval ;;
- *) as_fn_error "Invalid path for --with-binutils-include. Provide full path" "$LINENO" 5 ;;
+ *) as_fn_error $? "Invalid path for --with-binutils-include. Provide full path" "$LINENO" 5 ;;
esac
if test "x$WITH_BINUTILS_INCDIR" != xdefault ; then
BINUTILS_INCDIR=$WITH_BINUTILS_INCDIR
if test ! -f "$WITH_BINUTILS_INCDIR/plugin-api.h"; then
echo "$WITH_BINUTILS_INCDIR/plugin-api.h"
- as_fn_error "Invalid path to directory containing plugin-api.h." "$LINENO" 5;
+ as_fn_error $? "Invalid path to directory containing plugin-api.h." "$LINENO" 5 ;
fi
fi
@@ -5286,49 +5308,13 @@ if test "${enable_libffi+set}" = set; then :
enableval=$enable_libffi; case "$enableval" in
yes) llvm_cv_enable_libffi="yes" ;;
no) llvm_cv_enable_libffi="no" ;;
- *) as_fn_error "Invalid setting for --enable-libffi. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
+ *) as_fn_error $? "Invalid setting for --enable-libffi. Use \"yes\" or \"no\"" "$LINENO" 5 ;;
esac
else
llvm_cv_enable_libffi=no
fi
-if test "$llvm_cv_os_type" = "Win32" ; then
- llvmc_dynamic="yes"
-else
- llvmc_dynamic="no"
-fi
-
-# Check whether --enable-llvmc-dynamic was given.
-if test "${enable_llvmc_dynamic+set}" = set; then :
- enableval=$enable_llvmc_dynamic;
-else
- enableval=$llvmc_dynamic
-fi
-
-if test ${enableval} = "yes" && test "$ENABLE_PIC" -eq 1 ; then
- ENABLE_LLVMC_DYNAMIC=ENABLE_LLVMC_DYNAMIC=1
-
-else
- ENABLE_LLVMC_DYNAMIC=
-
-fi
-
-# Check whether --enable-llvmc-dynamic-plugins was given.
-if test "${enable_llvmc_dynamic_plugins+set}" = set; then :
- enableval=$enable_llvmc_dynamic_plugins;
-else
- enableval=yes
-fi
-
-if test ${enableval} = "yes" ; then
- ENABLE_LLVMC_DYNAMIC_PLUGINS=ENABLE_LLVMC_DYNAMIC_PLUGINS=1
-
-else
- ENABLE_LLVMC_DYNAMIC_PLUGINS=
-
-fi
-
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
@@ -5372,7 +5358,7 @@ else
# Broken: fails on valid input.
continue
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
# OK, works on sane cases. Now check whether nonexistent headers
# can be detected and how.
@@ -5388,11 +5374,11 @@ else
ac_preproc_ok=:
break
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
done
# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.i conftest.err conftest.$ac_ext
if $ac_preproc_ok; then :
break
fi
@@ -5431,7 +5417,7 @@ else
# Broken: fails on valid input.
continue
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
# OK, works on sane cases. Now check whether nonexistent headers
# can be detected and how.
@@ -5447,18 +5433,18 @@ else
ac_preproc_ok=:
break
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
done
# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.i conftest.err conftest.$ac_ext
if $ac_preproc_ok; then :
else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "C preprocessor \"$CPP\" fails sanity check
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5 ; }
fi
ac_ext=c
@@ -5575,8 +5561,8 @@ fi
test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "no acceptable C compiler found in \$PATH
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5 ; }
# Provide some information about the compiler.
$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
@@ -7335,6 +7321,10 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+case "$INSTALL" in
+ [\\/$]* | ?:[\\/]* ) ;;
+ *) INSTALL="\\\$(TOPSRCDIR)/$INSTALL" ;;
+esac
# Extract the first word of "bzip2", so it can be a program name with args.
set dummy bzip2; ac_word=$2
@@ -7376,6 +7366,46 @@ $as_echo "no" >&6; }
fi
+# Extract the first word of "cat", so it can be a program name with args.
+set dummy cat; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_path_CAT+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $CAT in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_CAT="$CAT" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_path_CAT="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+CAT=$ac_cv_path_CAT
+if test -n "$CAT"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CAT" >&5
+$as_echo "$CAT" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
# Extract the first word of "doxygen", so it can be a program name with args.
set dummy doxygen; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
@@ -7576,6 +7606,46 @@ $as_echo "no" >&6; }
fi
+# Extract the first word of "pdfroff", so it can be a program name with args.
+set dummy pdfroff; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_path_PDFROFF+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $PDFROFF in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_PDFROFF="$PDFROFF" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ ac_cv_path_PDFROFF="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+PDFROFF=$ac_cv_path_PDFROFF
+if test -n "$PDFROFF"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PDFROFF" >&5
+$as_echo "$PDFROFF" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
# Extract the first word of "runtest", so it can be a program name with args.
set dummy runtest; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
@@ -7638,7 +7708,7 @@ if test x"${with_tclinclude}" != x ; then
elif test -f ${with_tclinclude}/src/tclsh ; then
ac_cv_path_tclsh=`(cd ${with_tclinclude}/src; pwd)`
else
- as_fn_error "${with_tclinclude} directory doesn't contain tclsh" "$LINENO" 5
+ as_fn_error $? "${with_tclinclude} directory doesn't contain tclsh" "$LINENO" 5
fi
fi
fi
@@ -7970,6 +8040,31 @@ fi
done
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for linker version" >&5
+$as_echo_n "checking for linker version... " >&6; }
+if test "${llvm_cv_link_version+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ version_string="$(ld -v 2>&1 | head -1)"
+
+ # Check for ld64.
+ if (echo "$version_string" | grep -q "ld64"); then
+ llvm_cv_link_version=$(echo "$version_string" | sed -e "s#.*ld64-\([^ ]*\)#\1#")
+ else
+ llvm_cv_link_version=$(echo "$version_string" | sed -e "s#[^0-9]*\([0-9.]*\).*#\1#")
+ fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $llvm_cv_link_version" >&5
+$as_echo "$llvm_cv_link_version" >&6; }
+
+cat >>confdefs.h <<_ACEOF
+#define HOST_LINK_VERSION "$llvm_cv_link_version"
+_ACEOF
+
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for compiler -Wl,-R<path> option" >&5
$as_echo_n "checking for compiler -Wl,-R<path> option... " >&6; }
if test "${llvm_cv_link_use_r+set}" = set; then :
@@ -7989,7 +8084,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
int
main ()
{
-int main() { return 0; }
+
;
return 0;
}
@@ -8038,7 +8133,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
int
main ()
{
-int main() { return 0; }
+
;
return 0;
}
@@ -8068,6 +8163,78 @@ $as_echo "#define HAVE_LINK_EXPORT_DYNAMIC 1" >>confdefs.h
fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for compiler -Wl,--version-script option" >&5
+$as_echo_n "checking for compiler -Wl,--version-script option... " >&6; }
+if test "${llvm_cv_link_use_version_script+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ oldcflags="$CFLAGS"
+
+ # The following code is from the autoconf manual,
+ # "11.13: Limitations of Usual Tools".
+ # Create a temporary directory $tmp in $TMPDIR (default /tmp).
+ # Use mktemp if possible; otherwise fall back on mkdir,
+ # with $RANDOM to make collisions less likely.
+ : ${TMPDIR=/tmp}
+ {
+ tmp=`
+ (umask 077 && mktemp -d "$TMPDIR/fooXXXXXX") 2>/dev/null
+ ` &&
+ test -n "$tmp" && test -d "$tmp"
+ } || {
+ tmp=$TMPDIR/foo$$-$RANDOM
+ (umask 077 && mkdir "$tmp")
+ } || exit $?
+
+ echo "{" > "$tmp/export.map"
+ echo " global: main;" >> "$tmp/export.map"
+ echo " local: *;" >> "$tmp/export.map"
+ echo "};" >> "$tmp/export.map"
+
+ CFLAGS="$CFLAGS -Wl,--version-script=$tmp/export.map"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ llvm_cv_link_use_version_script=yes
+else
+ llvm_cv_link_use_version_script=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ rm "$tmp/export.map"
+ rmdir "$tmp"
+ CFLAGS="$oldcflags"
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $llvm_cv_link_use_version_script" >&5
+$as_echo "$llvm_cv_link_use_version_script" >&6; }
+if test "$llvm_cv_link_use_version_script" = yes ; then
+ HAVE_LINK_VERSION_SCRIPT=1
+
+ fi
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5
@@ -8155,7 +8322,7 @@ for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do
as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh`
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5
$as_echo_n "checking for $ac_hdr that defines DIR... " >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval "test \"\${$as_ac_Header+set}\"" = set; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -8182,8 +8349,7 @@ fi
eval ac_res=\$$as_ac_Header
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1
_ACEOF
@@ -9590,7 +9756,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<EOF
-#line 9593 "configure"
+#line 9759 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -9811,8 +9977,7 @@ for ac_func in argz_append argz_create_sep argz_insert argz_next argz_stringify
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -9844,8 +10009,7 @@ for ac_header in assert.h ctype.h errno.h malloc.h memory.h stdlib.h \
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -9858,8 +10022,7 @@ for ac_header in dl.h sys/dl.h dld.h mach-o/dyld.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -9872,8 +10035,7 @@ for ac_header in string.h strings.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -9887,8 +10049,7 @@ for ac_func in strchr index
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -9900,8 +10061,7 @@ for ac_func in strrchr rindex
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -9913,8 +10073,7 @@ for ac_func in memcpy bcopy
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -9926,8 +10085,7 @@ for ac_func in memmove strcmp
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -9939,8 +10097,7 @@ for ac_func in closedir opendir readdir
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -10073,7 +10230,7 @@ if test "$with_llvmcc" != "check"; then
if (test "$with_llvmcc" != "llvm-gcc" &&
test "$with_llvmcc" != "clang" &&
test "$with_llvmcc" != "none"); then
- as_fn_error "invalid value for --with-llvmcc, expected 'llvm-gcc', 'clang', or 'none'." "$LINENO" 5
+ as_fn_error $? "invalid value for --with-llvmcc, expected 'llvm-gcc', 'clang', or 'none'." "$LINENO" 5
fi
WITH_LLVMCC="$with_llvmcc"
elif test -n "$LLVMGCC"; then
@@ -10104,12 +10261,12 @@ esac
if test "$GCC" != "yes" && test "$ICC" != "yes"
then
- as_fn_error "gcc|icc required but not found" "$LINENO" 5
+ as_fn_error $? "gcc|icc required but not found" "$LINENO" 5
fi
if test "$GXX" != "yes" && test "$IXX" != "yes"
then
- as_fn_error "g++|icc required but not found" "$LINENO" 5
+ as_fn_error $? "g++|icc required but not found" "$LINENO" 5
fi
if test "$GCC" = "yes"
@@ -10124,14 +10281,14 @@ _ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
else
- as_fn_error "gcc 3.x required, but you have a lower version" "$LINENO" 5
+ as_fn_error $? "gcc 3.x required, but you have a lower version" "$LINENO" 5
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
fi
if test -z "$llvm_cv_gnu_make_command"
then
- as_fn_error "GNU Make required but not found" "$LINENO" 5
+ as_fn_error $? "GNU Make required but not found" "$LINENO" 5
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5
@@ -10393,7 +10550,7 @@ if test "$ac_res" != no; then :
$as_echo "#define HAVE_FFI_CALL 1" >>confdefs.h
else
- as_fn_error "libffi not found - configure without --enable-libffi to compile without it" "$LINENO" 5
+ as_fn_error $? "libffi not found - configure without --enable-libffi to compile without it" "$LINENO" 5
fi
fi
@@ -10916,7 +11073,7 @@ for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do
as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh`
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5
$as_echo_n "checking for $ac_hdr that defines DIR... " >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval "test \"\${$as_ac_Header+set}\"" = set; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -10943,8 +11100,7 @@ fi
eval ac_res=\$$as_ac_Header
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_hdr" | $as_tr_cpp` 1
_ACEOF
@@ -11351,8 +11507,7 @@ for ac_header in dlfcn.h execinfo.h fcntl.h inttypes.h limits.h link.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -11365,8 +11520,7 @@ for ac_header in malloc.h setjmp.h signal.h stdint.h termios.h unistd.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -11379,8 +11533,7 @@ for ac_header in utime.h windows.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -11393,8 +11546,7 @@ for ac_header in sys/mman.h sys/param.h sys/resource.h sys/time.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -11407,8 +11559,7 @@ for ac_header in sys/types.h sys/ioctl.h malloc/malloc.h mach/mach.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -11417,6 +11568,18 @@ fi
done
+for ac_header in valgrind/valgrind.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "valgrind/valgrind.h" "ac_cv_header_valgrind_valgrind_h" "$ac_includes_default"
+if test "x$ac_cv_header_valgrind_valgrind_h" = x""yes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_VALGRIND_VALGRIND_H 1
+_ACEOF
+
+fi
+
+done
+
if test "$ENABLE_THREADS" -eq 1 ; then
for ac_header in pthread.h
do :
@@ -11444,8 +11607,7 @@ if test "$llvm_cv_enable_libffi" = "yes" ; then
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -11456,6 +11618,19 @@ done
fi
+for ac_header in CrashReporterClient.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "CrashReporterClient.h" "ac_cv_header_CrashReporterClient_h" "$ac_includes_default"
+if test "x$ac_cv_header_CrashReporterClient_h" = x""yes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_CRASHREPORTERCLIENT_H 1
+_ACEOF
+
+fi
+
+done
+
+
@@ -11581,7 +11756,7 @@ _ACEOF
else
- as_fn_error "Type int64_t required but not found" "$LINENO" 5
+ as_fn_error $? "Type int64_t required but not found" "$LINENO" 5
fi
ac_fn_c_check_type "$LINENO" "uint64_t" "ac_cv_type_uint64_t" "$ac_includes_default"
@@ -11602,7 +11777,7 @@ _ACEOF
else
- as_fn_error "Type uint64_t or u_int64_t required but not found" "$LINENO" 5
+ as_fn_error $? "Type uint64_t or u_int64_t required but not found" "$LINENO" 5
fi
fi
@@ -11613,8 +11788,7 @@ for ac_func in backtrace ceilf floorf roundf rintf nearbyintf getcwd
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -11626,8 +11800,7 @@ for ac_func in powf fmodf strtof round
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -11639,8 +11812,7 @@ for ac_func in getpagesize getrusage getrlimit setrlimit gettimeofday
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -11652,8 +11824,7 @@ for ac_func in isatty mkdtemp mkstemp
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -11661,12 +11832,11 @@ _ACEOF
fi
done
-for ac_func in mktemp realpath sbrk setrlimit strdup
+for ac_func in mktemp posix_spawn realpath sbrk setrlimit strdup
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -11678,8 +11848,7 @@ for ac_func in strerror strerror_r strerror_s setenv
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -11691,8 +11860,7 @@ for ac_func in strtoll strtoq sysconf malloc_zone_statistics
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -11704,8 +11872,7 @@ for ac_func in setjmp longjmp sigsetjmp siglongjmp
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
_ACEOF
@@ -12145,8 +12312,7 @@ do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -12453,7 +12619,13 @@ int main() {
}
_ACEOF
-if ac_fn_cxx_try_link "$LINENO"; then :
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
@@ -12470,13 +12642,6 @@ $as_echo "$as_me: WARNING: LLVM will be built thread-unsafe because atomic built
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
if test "$llvm_cv_os_type" = "Linux" -a "$llvm_cv_target_arch" = "x86_64" ; then
@@ -12723,7 +12888,7 @@ $as_echo "$as_me: WARNING: --enable-bindings=ocaml specified, but ocamlopt not f
esac
done
if test "$binding_prereqs_failed" = 1 ; then
- as_fn_error "Prequisites for bindings not satisfied. Fix them or use configure --disable-bindings." "$LINENO" 5
+ as_fn_error $? "Prequisites for bindings not satisfied. Fix them or use configure --disable-bindings." "$LINENO" 5
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for compiler -fvisibility-inlines-hidden option" >&5
@@ -12791,7 +12956,10 @@ fi
-ac_config_headers="$ac_config_headers include/llvm/Config/config.h"
+ac_config_headers="$ac_config_headers include/llvm/Config/config.h include/llvm/Config/llvm-config.h"
+
+
+
ac_config_files="$ac_config_files include/llvm/Config/Targets.def"
@@ -12810,13 +12978,6 @@ ac_config_files="$ac_config_files Makefile.config"
ac_config_files="$ac_config_files llvm.spec"
-ac_config_files="$ac_config_files docs/doxygen.cfg"
-
-
-ac_config_files="$ac_config_files tools/llvmc/plugins/Base/Base.td"
-
-
-ac_config_files="$ac_config_files tools/llvm-config/llvm-config.in"
ac_config_commands="$ac_config_commands setup"
@@ -12944,6 +13105,7 @@ DEFS=-DHAVE_CONFIG_H
ac_libobjs=
ac_ltlibobjs=
+U=
for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
# 1. Remove the extension, and $U if already installed.
ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
@@ -12960,11 +13122,11 @@ LTLIBOBJS=$ac_ltlibobjs
if test -z "${INSTALL_LTDL_TRUE}" && test -z "${INSTALL_LTDL_FALSE}"; then
- as_fn_error "conditional \"INSTALL_LTDL\" was never defined.
+ as_fn_error $? "conditional \"INSTALL_LTDL\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${CONVENIENCE_LTDL_TRUE}" && test -z "${CONVENIENCE_LTDL_FALSE}"; then
- as_fn_error "conditional \"CONVENIENCE_LTDL\" was never defined.
+ as_fn_error $? "conditional \"CONVENIENCE_LTDL\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
@@ -13114,19 +13276,19 @@ export LANGUAGE
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
+# script with STATUS, using 1 if that was 0.
as_fn_error ()
{
- as_status=$?; test $as_status -eq 0 && as_status=1
- if test "$3"; then
- as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
fi
- $as_echo "$as_me: error: $1" >&2
+ $as_echo "$as_me: error: $2" >&2
as_fn_exit $as_status
} # as_fn_error
@@ -13322,7 +13484,7 @@ $as_echo X"$as_dir" |
test -d "$as_dir" && break
done
test -z "$as_dirs" || eval "mkdir $as_dirs"
- } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
} # as_fn_mkdir_p
@@ -13375,8 +13537,8 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by llvm $as_me 2.7, which was
-generated by GNU Autoconf 2.65. Invocation command line was
+This file was extended by llvm $as_me 2.8, which was
+generated by GNU Autoconf 2.67. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
CONFIG_HEADERS = $CONFIG_HEADERS
@@ -13441,11 +13603,11 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-llvm config.status 2.7
-configured by $0, generated by GNU Autoconf 2.65,
+llvm config.status 2.8
+configured by $0, generated by GNU Autoconf 2.67,
with options \\"\$ac_cs_config\\"
-Copyright (C) 2009 Free Software Foundation, Inc.
+Copyright (C) 2010 Free Software Foundation, Inc.
This config.status script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it."
@@ -13461,11 +13623,16 @@ ac_need_defaults=:
while test $# != 0
do
case $1 in
- --*=*)
+ --*=?*)
ac_option=`expr "X$1" : 'X\([^=]*\)='`
ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
ac_shift=:
;;
+ --*=)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=
+ ac_shift=:
+ ;;
*)
ac_option=$1
ac_optarg=$2
@@ -13487,6 +13654,7 @@ do
$ac_shift
case $ac_optarg in
*\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ '') as_fn_error $? "missing file argument" ;;
esac
as_fn_append CONFIG_FILES " '$ac_optarg'"
ac_need_defaults=false;;
@@ -13499,7 +13667,7 @@ do
ac_need_defaults=false;;
--he | --h)
# Conflict between --help and --header
- as_fn_error "ambiguous option: \`$1'
+ as_fn_error $? "ambiguous option: \`$1'
Try \`$0 --help' for more information.";;
--help | --hel | -h )
$as_echo "$ac_cs_usage"; exit ;;
@@ -13508,7 +13676,7 @@ Try \`$0 --help' for more information.";;
ac_cs_silent=: ;;
# This is an error.
- -*) as_fn_error "unrecognized option: \`$1'
+ -*) as_fn_error $? "unrecognized option: \`$1'
Try \`$0 --help' for more information." ;;
*) as_fn_append ac_config_targets " $1"
@@ -13563,6 +13731,7 @@ for ac_config_target in $ac_config_targets
do
case $ac_config_target in
"include/llvm/Config/config.h") CONFIG_HEADERS="$CONFIG_HEADERS include/llvm/Config/config.h" ;;
+ "include/llvm/Config/llvm-config.h") CONFIG_HEADERS="$CONFIG_HEADERS include/llvm/Config/llvm-config.h" ;;
"include/llvm/Config/Targets.def") CONFIG_FILES="$CONFIG_FILES include/llvm/Config/Targets.def" ;;
"include/llvm/Config/AsmPrinters.def") CONFIG_FILES="$CONFIG_FILES include/llvm/Config/AsmPrinters.def" ;;
"include/llvm/Config/AsmParsers.def") CONFIG_FILES="$CONFIG_FILES include/llvm/Config/AsmParsers.def" ;;
@@ -13570,9 +13739,6 @@ do
"include/llvm/System/DataTypes.h") CONFIG_HEADERS="$CONFIG_HEADERS include/llvm/System/DataTypes.h" ;;
"Makefile.config") CONFIG_FILES="$CONFIG_FILES Makefile.config" ;;
"llvm.spec") CONFIG_FILES="$CONFIG_FILES llvm.spec" ;;
- "docs/doxygen.cfg") CONFIG_FILES="$CONFIG_FILES docs/doxygen.cfg" ;;
- "tools/llvmc/plugins/Base/Base.td") CONFIG_FILES="$CONFIG_FILES tools/llvmc/plugins/Base/Base.td" ;;
- "tools/llvm-config/llvm-config.in") CONFIG_FILES="$CONFIG_FILES tools/llvm-config/llvm-config.in" ;;
"setup") CONFIG_COMMANDS="$CONFIG_COMMANDS setup" ;;
"Makefile") CONFIG_COMMANDS="$CONFIG_COMMANDS Makefile" ;;
"Makefile.common") CONFIG_COMMANDS="$CONFIG_COMMANDS Makefile.common" ;;
@@ -13588,7 +13754,7 @@ do
"bindings/Makefile") CONFIG_COMMANDS="$CONFIG_COMMANDS bindings/Makefile" ;;
"bindings/ocaml/Makefile.ocaml") CONFIG_COMMANDS="$CONFIG_COMMANDS bindings/ocaml/Makefile.ocaml" ;;
- *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5 ;;
esac
done
@@ -13626,7 +13792,7 @@ $debug ||
{
tmp=./conf$$-$RANDOM
(umask 077 && mkdir "$tmp")
-} || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
# Set up the scripts for CONFIG_FILES section.
# No need to generate them if there are no CONFIG_FILES.
@@ -13643,7 +13809,7 @@ if test "x$ac_cr" = x; then
fi
ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
- ac_cs_awk_cr='\r'
+ ac_cs_awk_cr='\\r'
else
ac_cs_awk_cr=$ac_cr
fi
@@ -13657,18 +13823,18 @@ _ACEOF
echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
echo "_ACEOF"
} >conf$$subs.sh ||
- as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'`
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
ac_delim='%!_!# '
for ac_last_try in false false false false false :; do
. ./conf$$subs.sh ||
- as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
if test $ac_delim_n = $ac_delim_num; then
break
elif $ac_last_try; then
- as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
else
ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
fi
@@ -13757,20 +13923,28 @@ if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
else
cat
fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
- || as_fn_error "could not setup config files machinery" "$LINENO" 5
+ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
_ACEOF
-# VPATH may cause trouble with some makes, so we remove $(srcdir),
-# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
# trailing colons and then remove the whole line if VPATH becomes empty
# (actually we leave an empty line to preserve line numbers).
if test "x$srcdir" = x.; then
- ac_vpsub='/^[ ]*VPATH[ ]*=/{
-s/:*\$(srcdir):*/:/
-s/:*\${srcdir}:*/:/
-s/:*@srcdir@:*/:/
-s/^\([^=]*=[ ]*\):*/\1/
+ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{
+h
+s///
+s/^/:/
+s/[ ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
s/:*$//
+x
+s/\(=[ ]*\).*/\1/
+G
+s/\n//
s/^[^=]*=[ ]*$//
}'
fi
@@ -13798,7 +13972,7 @@ for ac_last_try in false false :; do
if test -z "$ac_t"; then
break
elif $ac_last_try; then
- as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5
+ as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
else
ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
fi
@@ -13883,7 +14057,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
_ACAWK
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
- as_fn_error "could not setup config headers machinery" "$LINENO" 5
+ as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
fi # test -n "$CONFIG_HEADERS"
@@ -13896,7 +14070,7 @@ do
esac
case $ac_mode$ac_tag in
:[FHL]*:*);;
- :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5 ;;
:[FH]-) ac_tag=-:-;;
:[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
esac
@@ -13924,7 +14098,7 @@ do
[\\/$]*) false;;
*) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
esac ||
- as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5 ;;
esac
case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
as_fn_append ac_file_inputs " '$ac_f'"
@@ -13951,7 +14125,7 @@ $as_echo "$as_me: creating $ac_file" >&6;}
case $ac_tag in
*:-:* | *:-) cat >"$tmp/stdin" \
- || as_fn_error "could not create $ac_file" "$LINENO" 5 ;;
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
esac
;;
esac
@@ -14082,22 +14256,22 @@ s&@INSTALL@&$ac_INSTALL&;t t
$ac_datarootdir_hack
"
eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
- || as_fn_error "could not create $ac_file" "$LINENO" 5
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
{ ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
{ ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined. Please make sure it is defined." >&5
+which seems to be undefined. Please make sure it is defined" >&5
$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined. Please make sure it is defined." >&2;}
+which seems to be undefined. Please make sure it is defined" >&2;}
rm -f "$tmp/stdin"
case $ac_file in
-) cat "$tmp/out" && rm -f "$tmp/out";;
*) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
esac \
- || as_fn_error "could not create $ac_file" "$LINENO" 5
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
;;
:H)
#
@@ -14108,19 +14282,19 @@ which seems to be undefined. Please make sure it is defined." >&2;}
$as_echo "/* $configure_input */" \
&& eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
} >"$tmp/config.h" \
- || as_fn_error "could not create $ac_file" "$LINENO" 5
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
$as_echo "$as_me: $ac_file is unchanged" >&6;}
else
rm -f "$ac_file"
mv "$tmp/config.h" "$ac_file" \
- || as_fn_error "could not create $ac_file" "$LINENO" 5
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
fi
else
$as_echo "/* $configure_input */" \
&& eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
- || as_fn_error "could not create -" "$LINENO" 5
+ || as_fn_error $? "could not create -" "$LINENO" 5
fi
;;
@@ -14167,7 +14341,7 @@ _ACEOF
ac_clean_files=$ac_clean_files_save
test $ac_write_fail = 0 ||
- as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5
+ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
# configure is writing to config.log, and then calls config.status.
@@ -14188,7 +14362,7 @@ if test "$no_create" != yes; then
exec 5>>config.log
# Use ||, not &&, to avoid exiting from the if with $? = 1, which
# would make configure fail if this is the last instruction.
- $ac_cs_success || as_fn_exit $?
+ $ac_cs_success || as_fn_exit 1
fi
#
@@ -14329,7 +14503,7 @@ $as_echo "$as_me: running $SHELL $ac_sub_configure $ac_sub_configure_args --cach
# The eval makes quoting arguments work.
eval "\$SHELL \"\$ac_sub_configure\" $ac_sub_configure_args \
--cache-file=\"\$ac_sub_cache_file\" --srcdir=\"\$ac_srcdir\"" ||
- as_fn_error "$ac_sub_configure failed for $ac_dir" "$LINENO" 5
+ as_fn_error $? "$ac_sub_configure failed for $ac_dir" "$LINENO" 5
fi
cd "$ac_popdir"
diff --git a/libclamav/c++/llvm/include/llvm-c/Core.h b/libclamav/c++/llvm/include/llvm-c/Core.h
index 733b92c..75cee7d 100644
--- a/libclamav/c++/llvm/include/llvm-c/Core.h
+++ b/libclamav/c++/llvm/include/llvm-c/Core.h
@@ -204,8 +204,7 @@ typedef enum {
LLVMPointerTypeKind, /**< Pointers */
LLVMOpaqueTypeKind, /**< Opaque: type with unknown structure */
LLVMVectorTypeKind, /**< SIMD 'packed' format, or other vector type */
- LLVMMetadataTypeKind, /**< Metadata */
- LLVMUnionTypeKind /**< Unions */
+ LLVMMetadataTypeKind /**< Metadata */
} LLVMTypeKind;
typedef enum {
@@ -226,7 +225,10 @@ typedef enum {
LLVMExternalWeakLinkage,/**< ExternalWeak linkage description */
LLVMGhostLinkage, /**< Obsolete */
LLVMCommonLinkage, /**< Tentative definitions */
- LLVMLinkerPrivateLinkage /**< Like Private, but linker removes. */
+ LLVMLinkerPrivateLinkage, /**< Like Private, but linker removes. */
+ LLVMLinkerPrivateWeakLinkage, /**< Like LinkerPrivate, but is weak. */
+ LLVMLinkerPrivateWeakDefAutoLinkage /**< Like LinkerPrivateWeak, but possibly
+ hidden. */
} LLVMLinkage;
typedef enum {
@@ -319,6 +321,8 @@ LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name);
/** See Module::dump. */
void LLVMDumpModule(LLVMModuleRef M);
+/** See Module::setModuleInlineAsm. */
+void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm);
/*===-- Types -------------------------------------------------------------===*/
@@ -390,13 +394,6 @@ unsigned LLVMCountStructElementTypes(LLVMTypeRef StructTy);
void LLVMGetStructElementTypes(LLVMTypeRef StructTy, LLVMTypeRef *Dest);
LLVMBool LLVMIsPackedStruct(LLVMTypeRef StructTy);
-/* Operations on union types */
-LLVMTypeRef LLVMUnionTypeInContext(LLVMContextRef C, LLVMTypeRef *ElementTypes,
- unsigned ElementCount);
-LLVMTypeRef LLVMUnionType(LLVMTypeRef *ElementTypes, unsigned ElementCount);
-unsigned LLVMCountUnionElementTypes(LLVMTypeRef UnionTy);
-void LLVMGetUnionElementTypes(LLVMTypeRef UnionTy, LLVMTypeRef *Dest);
-
/* Operations on array, pointer, and vector types (sequence types) */
LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount);
LLVMTypeRef LLVMPointerType(LLVMTypeRef ElementType, unsigned AddressSpace);
@@ -520,6 +517,8 @@ LLVMValueRef LLVMGetUsedValue(LLVMUseRef U);
/* Operations on Users */
LLVMValueRef LLVMGetOperand(LLVMValueRef Val, unsigned Index);
+void LLVMSetOperand(LLVMValueRef User, unsigned Index, LLVMValueRef Val);
+int LLVMGetNumOperands(LLVMValueRef Val);
/* Operations on constants of any type */
LLVMValueRef LLVMConstNull(LLVMTypeRef Ty); /* all zeroes */
@@ -567,7 +566,6 @@ LLVMValueRef LLVMConstArray(LLVMTypeRef ElementTy,
LLVMValueRef LLVMConstStruct(LLVMValueRef *ConstantVals, unsigned Count,
LLVMBool Packed);
LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size);
-LLVMValueRef LLVMConstUnion(LLVMTypeRef Ty, LLVMValueRef Val);
/* Constant expressions */
LLVMOpcode LLVMGetConstOpcode(LLVMValueRef ConstantVal);
@@ -747,6 +745,9 @@ LLVMBasicBlockRef LLVMInsertBasicBlock(LLVMBasicBlockRef InsertBeforeBB,
const char *Name);
void LLVMDeleteBasicBlock(LLVMBasicBlockRef BB);
+void LLVMMoveBasicBlockBefore(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos);
+void LLVMMoveBasicBlockAfter(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos);
+
/* Operations on instructions */
LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst);
LLVMValueRef LLVMGetFirstInstruction(LLVMBasicBlockRef BB);
diff --git a/libclamav/c++/llvm/include/llvm-c/EnhancedDisassembly.h b/libclamav/c++/llvm/include/llvm-c/EnhancedDisassembly.h
index 9cd1e1f..d177381 100644
--- a/libclamav/c++/llvm/include/llvm-c/EnhancedDisassembly.h
+++ b/libclamav/c++/llvm/include/llvm-c/EnhancedDisassembly.h
@@ -51,40 +51,38 @@ typedef int (*EDRegisterReaderCallback)(uint64_t *value, unsigned regID,
@typedef EDAssemblySyntax_t
An assembly syntax for use in tokenizing instructions.
*/
-typedef enum {
+enum {
/*! @constant kEDAssemblySyntaxX86Intel Intel syntax for i386 and x86_64. */
kEDAssemblySyntaxX86Intel = 0,
/*! @constant kEDAssemblySyntaxX86ATT AT&T syntax for i386 and x86_64. */
- kEDAssemblySyntaxX86ATT = 1
-} EDAssemblySyntax_t;
+ kEDAssemblySyntaxX86ATT = 1,
+ kEDAssemblySyntaxARMUAL = 2
+};
+typedef unsigned EDAssemblySyntax_t;
/*!
@typedef EDDisassemblerRef
Encapsulates a disassembler for a single CPU architecture.
*/
-struct EDDisassembler;
-typedef struct EDDisassembler *EDDisassemblerRef;
+typedef void *EDDisassemblerRef;
/*!
@typedef EDInstRef
Encapsulates a single disassembled instruction in one assembly syntax.
*/
-struct EDInst;
-typedef struct EDInst *EDInstRef;
+typedef void *EDInstRef;
/*!
@typedef EDTokenRef
Encapsulates a token from the disassembly of an instruction.
*/
-struct EDToken;
-typedef struct EDToken *EDTokenRef;
+typedef void *EDTokenRef;
/*!
@typedef EDOperandRef
Encapsulates an operand of an instruction.
*/
-struct EDOperand;
-typedef struct EDOperand *EDOperandRef;
+typedef void *EDOperandRef;
/*!
@functiongroup Getting a disassembler
diff --git a/libclamav/c++/llvm/include/llvm-c/ExecutionEngine.h b/libclamav/c++/llvm/include/llvm-c/ExecutionEngine.h
index 5a98a77..f5f4061 100644
--- a/libclamav/c++/llvm/include/llvm-c/ExecutionEngine.h
+++ b/libclamav/c++/llvm/include/llvm-c/ExecutionEngine.h
@@ -116,6 +116,8 @@ LLVMBool LLVMRemoveModuleProvider(LLVMExecutionEngineRef EE,
LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,
LLVMValueRef *OutFn);
+void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE, LLVMValueRef Fn);
+
LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE);
void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
diff --git a/libclamav/c++/llvm/include/llvm-c/Target.h b/libclamav/c++/llvm/include/llvm-c/Target.h
index e705a99..2cd15c3 100644
--- a/libclamav/c++/llvm/include/llvm-c/Target.h
+++ b/libclamav/c++/llvm/include/llvm-c/Target.h
@@ -1,26 +1,26 @@
-/*===-- llvm-c/Target.h - Target Lib C Iface --------------------*- C++ -*-===*\
-|* *|
-|* The LLVM Compiler Infrastructure *|
-|* *|
-|* This file is distributed under the University of Illinois Open Source *|
-|* License. See LICENSE.TXT for details. *|
-|* *|
-|*===----------------------------------------------------------------------===*|
-|* *|
-|* This header declares the C interface to libLLVMTarget.a, which *|
-|* implements target information. *|
-|* *|
-|* Many exotic languages can interoperate with C code but have a harder time *|
-|* with C++ due to name mangling. So in addition to C, this interface enables *|
-|* tools written in such languages. *|
-|* *|
-\*===----------------------------------------------------------------------===*/
+/*===-- llvm-c/Target.h - Target Lib C Iface --------------------*- C++ -*-===*/
+/* */
+/* The LLVM Compiler Infrastructure */
+/* */
+/* This file is distributed under the University of Illinois Open Source */
+/* License. See LICENSE.TXT for details. */
+/* */
+/*===----------------------------------------------------------------------===*/
+/* */
+/* This header declares the C interface to libLLVMTarget.a, which */
+/* implements target information. */
+/* */
+/* Many exotic languages can interoperate with C code but have a harder time */
+/* with C++ due to name mangling. So in addition to C, this interface enables */
+/* tools written in such languages. */
+/* */
+/*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TARGET_H
#define LLVM_C_TARGET_H
#include "llvm-c/Core.h"
-#include "llvm/Config/config.h"
+#include "llvm/Config/llvm-config.h"
#ifdef __cplusplus
extern "C" {
@@ -32,18 +32,19 @@ typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
typedef struct LLVMStructLayout *LLVMStructLayoutRef;
/* Declare all of the target-initialization functions that are available. */
-#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetInfo();
+#define LLVM_TARGET(TargetName) \
+ void LLVMInitialize##TargetName##TargetInfo(void);
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
-#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##Target();
+#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##Target(void);
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
/** LLVMInitializeAllTargetInfos - The main program should call this function if
it wants access to all available targets that LLVM is configured to
support. */
-static inline void LLVMInitializeAllTargetInfos() {
+static inline void LLVMInitializeAllTargetInfos(void) {
#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetInfo();
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
@@ -52,7 +53,7 @@ static inline void LLVMInitializeAllTargetInfos() {
/** LLVMInitializeAllTargets - The main program should call this function if it
wants to link in all available targets that LLVM is configured to
support. */
-static inline void LLVMInitializeAllTargets() {
+static inline void LLVMInitializeAllTargets(void) {
#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##Target();
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
@@ -61,17 +62,12 @@ static inline void LLVMInitializeAllTargets() {
/** LLVMInitializeNativeTarget - The main program should call this function to
initialize the native target corresponding to the host. This is useful
for JIT applications to ensure that the target gets linked in correctly. */
-static inline LLVMBool LLVMInitializeNativeTarget() {
+static inline LLVMBool LLVMInitializeNativeTarget(void) {
/* If we have a native target, initialize it to ensure it is linked in. */
-#ifdef LLVM_NATIVE_ARCH
-#define DoInit2(TARG) \
- LLVMInitialize ## TARG ## Info (); \
- LLVMInitialize ## TARG ()
-#define DoInit(T) DoInit2(T)
- DoInit(LLVM_NATIVE_ARCH);
+#ifdef LLVM_NATIVE_TARGET
+ LLVM_NATIVE_TARGETINFO();
+ LLVM_NATIVE_TARGET();
return 0;
-#undef DoInit
-#undef DoInit2
#else
return 1;
#endif
diff --git a/libclamav/c++/llvm/include/llvm-c/Transforms/IPO.h b/libclamav/c++/llvm/include/llvm-c/Transforms/IPO.h
index 0a94315..d16e858 100644
--- a/libclamav/c++/llvm/include/llvm-c/Transforms/IPO.h
+++ b/libclamav/c++/llvm/include/llvm-c/Transforms/IPO.h
@@ -54,6 +54,12 @@ void LLVMAddLowerSetJmpPass(LLVMPassManagerRef PM);
/** See llvm::createPruneEHPass function. */
void LLVMAddPruneEHPass(LLVMPassManagerRef PM);
+/** See llvm::createIPSCCPPass function. */
+void LLVMAddIPSCCPPass(LLVMPassManagerRef PM);
+
+/** See llvm::createInternalizePass function. */
+void LLVMAddInternalizePass(LLVMPassManagerRef, unsigned AllButMain);
+
// FIXME: Remove in LLVM 3.0.
void LLVMAddRaiseAllocationsPass(LLVMPassManagerRef PM);
diff --git a/libclamav/c++/llvm/include/llvm-c/Transforms/Scalar.h b/libclamav/c++/llvm/include/llvm-c/Transforms/Scalar.h
index 2c5a371..c94019a 100644
--- a/libclamav/c++/llvm/include/llvm-c/Transforms/Scalar.h
+++ b/libclamav/c++/llvm/include/llvm-c/Transforms/Scalar.h
@@ -79,6 +79,10 @@ void LLVMAddSCCPPass(LLVMPassManagerRef PM);
/** See llvm::createScalarReplAggregatesPass function. */
void LLVMAddScalarReplAggregatesPass(LLVMPassManagerRef PM);
+/** See llvm::createScalarReplAggregatesPass function. */
+void LLVMAddScalarReplAggregatesPassWithThreshold(LLVMPassManagerRef PM,
+ int Threshold);
+
/** See llvm::createSimplifyLibCallsPass function. */
void LLVMAddSimplifyLibCallsPass(LLVMPassManagerRef PM);
@@ -91,6 +95,9 @@ void LLVMAddConstantPropagationPass(LLVMPassManagerRef PM);
/** See llvm::demotePromoteMemoryToRegisterPass function. */
void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM);
+/** See llvm::createVerifierPass function. */
+void LLVMAddVerifierPass(LLVMPassManagerRef PM);
+
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
diff --git a/libclamav/c++/llvm/include/llvm-c/lto.h b/libclamav/c++/llvm/include/llvm-c/lto.h
index 7cafcb2..e6f69af 100644
--- a/libclamav/c++/llvm/include/llvm-c/lto.h
+++ b/libclamav/c++/llvm/include/llvm-c/lto.h
@@ -18,6 +18,7 @@
#include <stdbool.h>
#include <stddef.h>
+#include "llvm/System/DataTypes.h"
#define LTO_API_VERSION 3
@@ -102,7 +103,7 @@ lto_module_is_object_file_in_memory(const void* mem, size_t length);
*/
extern bool
lto_module_is_object_file_in_memory_for_target(const void* mem, size_t length,
- const char* target_triple_prefix);
+ const char* target_triple_prefix);
/**
@@ -135,11 +136,17 @@ lto_module_dispose(lto_module_t mod);
extern const char*
lto_module_get_target_triple(lto_module_t mod);
+/**
+ * Sets triple string with which the object will be codegened.
+ */
+extern void
+lto_module_set_target_triple(lto_module_t mod, const char *triple);
+
/**
* Returns the number of symbols in the object module.
*/
-extern unsigned int
+extern uint32_t
lto_module_get_num_symbols(lto_module_t mod);
@@ -147,14 +154,14 @@ lto_module_get_num_symbols(lto_module_t mod);
* Returns the name of the ith symbol in the object module.
*/
extern const char*
-lto_module_get_symbol_name(lto_module_t mod, unsigned int index);
+lto_module_get_symbol_name(lto_module_t mod, uint32_t index);
/**
* Returns the attributes of the ith symbol in the object module.
*/
extern lto_symbol_attributes
-lto_module_get_symbol_attribute(lto_module_t mod, unsigned int index);
+lto_module_get_symbol_attribute(lto_module_t mod, uint32_t index);
/**
@@ -200,11 +207,10 @@ lto_codegen_set_pic_model(lto_code_gen_t cg, lto_codegen_model);
/**
- * Sets the location of the "gcc" to run. If not set, libLTO will search for
- * "gcc" on the path.
+ * Sets the cpu to generate code for.
*/
extern void
-lto_codegen_set_gcc_path(lto_code_gen_t cg, const char* path);
+lto_codegen_set_cpu(lto_code_gen_t cg, const char *cpu);
/**
@@ -214,6 +220,12 @@ lto_codegen_set_gcc_path(lto_code_gen_t cg, const char* path);
extern void
lto_codegen_set_assembler_path(lto_code_gen_t cg, const char* path);
+/**
+ * Sets extra arguments that libLTO should pass to the assembler.
+ */
+extern void
+lto_codegen_set_assembler_args(lto_code_gen_t cg, const char **args,
+ int nargs);
/**
* Adds to a list of all global symbols that must exist in the final
diff --git a/libclamav/c++/llvm/include/llvm/ADT/APFloat.h b/libclamav/c++/llvm/include/llvm/ADT/APFloat.h
index 3cccc81..dfe4e0f 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/APFloat.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/APFloat.h
@@ -179,7 +179,7 @@ namespace llvm {
// Constructors.
APFloat(const fltSemantics &); // Default construct to 0.0
- APFloat(const fltSemantics &, const StringRef &);
+ APFloat(const fltSemantics &, StringRef);
APFloat(const fltSemantics &, integerPart);
APFloat(const fltSemantics &, fltCategory, bool negative);
APFloat(const fltSemantics &, uninitializedTag);
@@ -282,7 +282,7 @@ namespace llvm {
bool, roundingMode);
opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
bool, roundingMode);
- opStatus convertFromString(const StringRef&, roundingMode);
+ opStatus convertFromString(StringRef, roundingMode);
APInt bitcastToAPInt() const;
double convertToDouble() const;
float convertToFloat() const;
@@ -386,8 +386,8 @@ namespace llvm {
roundingMode, bool *) const;
opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
roundingMode);
- opStatus convertFromHexadecimalString(const StringRef&, roundingMode);
- opStatus convertFromDecimalString (const StringRef&, roundingMode);
+ opStatus convertFromHexadecimalString(StringRef, roundingMode);
+ opStatus convertFromDecimalString(StringRef, roundingMode);
char *convertNormalToHexString(char *, unsigned int, bool,
roundingMode) const;
opStatus roundSignificandWithExponent(const integerPart *, unsigned int,
diff --git a/libclamav/c++/llvm/include/llvm/ADT/APInt.h b/libclamav/c++/llvm/include/llvm/ADT/APInt.h
index 3f67ffb..8004cb4 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/APInt.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/APInt.h
@@ -162,7 +162,7 @@ class APInt {
///
/// @param radix 2, 8, 10, or 16
/// @brief Convert a char array into an APInt
- void fromString(unsigned numBits, const StringRef &str, uint8_t radix);
+ void fromString(unsigned numBits, StringRef str, uint8_t radix);
/// This is used by the toString method to divide by the radix. It simply
/// provides a more convenient form of divide for internal use since KnuthDiv
@@ -248,7 +248,7 @@ public:
/// @param str the string to be interpreted
/// @param radix the radix to use for the conversion
/// @brief Construct an APInt from a string representation.
- APInt(unsigned numBits, const StringRef &str, uint8_t radix);
+ APInt(unsigned numBits, StringRef str, uint8_t radix);
/// Simply makes *this a copy of that.
/// @brief Copy Constructor.
@@ -464,7 +464,7 @@ public:
// For small values, return quickly
if (numBits <= APINT_BITS_PER_WORD)
return APInt(numBits, ~0ULL << shiftAmt);
- return (~APInt(numBits, 0)).shl(shiftAmt);
+ return getAllOnesValue(numBits).shl(shiftAmt);
}
/// Constructs an APInt value that has the bottom loBitsSet bits set.
@@ -481,7 +481,7 @@ public:
// For small values, return quickly.
if (numBits < APINT_BITS_PER_WORD)
return APInt(numBits, (1ULL << loBitsSet) - 1);
- return (~APInt(numBits, 0)).lshr(numBits - loBitsSet);
+ return getAllOnesValue(numBits).lshr(numBits - loBitsSet);
}
/// The hash value is computed as the sum of the words and the bit width.
@@ -870,12 +870,28 @@ public:
/// @brief Unsigned less than comparison
bool ult(const APInt& RHS) const;
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the less-than relationship.
+ /// @returns true if *this < RHS when considered unsigned.
+ /// @brief Unsigned less than comparison
+ bool ult(uint64_t RHS) const {
+ return ult(APInt(getBitWidth(), RHS));
+ }
+
/// Regards both *this and RHS as signed quantities and compares them for
/// validity of the less-than relationship.
/// @returns true if *this < RHS when both are considered signed.
/// @brief Signed less than comparison
bool slt(const APInt& RHS) const;
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the less-than relationship.
+ /// @returns true if *this < RHS when considered signed.
+ /// @brief Signed less than comparison
+ bool slt(uint64_t RHS) const {
+ return slt(APInt(getBitWidth(), RHS));
+ }
+
/// Regards both *this and RHS as unsigned quantities and compares them for
/// validity of the less-or-equal relationship.
/// @returns true if *this <= RHS when both are considered unsigned.
@@ -884,6 +900,14 @@ public:
return ult(RHS) || eq(RHS);
}
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the less-or-equal relationship.
+ /// @returns true if *this <= RHS when considered unsigned.
+ /// @brief Unsigned less or equal comparison
+ bool ule(uint64_t RHS) const {
+ return ule(APInt(getBitWidth(), RHS));
+ }
+
/// Regards both *this and RHS as signed quantities and compares them for
/// validity of the less-or-equal relationship.
/// @returns true if *this <= RHS when both are considered signed.
@@ -892,6 +916,14 @@ public:
return slt(RHS) || eq(RHS);
}
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the less-or-equal relationship.
+ /// @returns true if *this <= RHS when considered signed.
+ /// @brief Signed less or equal comparison
+ bool sle(uint64_t RHS) const {
+ return sle(APInt(getBitWidth(), RHS));
+ }
+
/// Regards both *this and RHS as unsigned quantities and compares them for
/// the validity of the greater-than relationship.
/// @returns true if *this > RHS when both are considered unsigned.
@@ -900,6 +932,14 @@ public:
return !ult(RHS) && !eq(RHS);
}
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the greater-than relationship.
+ /// @returns true if *this > RHS when considered unsigned.
+ /// @brief Unsigned greater than comparison
+ bool ugt(uint64_t RHS) const {
+ return ugt(APInt(getBitWidth(), RHS));
+ }
+
/// Regards both *this and RHS as signed quantities and compares them for
/// the validity of the greater-than relationship.
/// @returns true if *this > RHS when both are considered signed.
@@ -908,6 +948,14 @@ public:
return !slt(RHS) && !eq(RHS);
}
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the greater-than relationship.
+ /// @returns true if *this > RHS when considered signed.
+ /// @brief Signed greater than comparison
+ bool sgt(uint64_t RHS) const {
+ return sgt(APInt(getBitWidth(), RHS));
+ }
+
/// Regards both *this and RHS as unsigned quantities and compares them for
/// validity of the greater-or-equal relationship.
/// @returns true if *this >= RHS when both are considered unsigned.
@@ -916,6 +964,14 @@ public:
return !ult(RHS);
}
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the greater-or-equal relationship.
+ /// @returns true if *this >= RHS when considered unsigned.
+ /// @brief Unsigned greater or equal comparison
+ bool uge(uint64_t RHS) const {
+ return uge(APInt(getBitWidth(), RHS));
+ }
+
/// Regards both *this and RHS as signed quantities and compares them for
/// validity of the greater-or-equal relationship.
/// @returns true if *this >= RHS when both are considered signed.
@@ -924,6 +980,14 @@ public:
return !slt(RHS);
}
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the greater-or-equal relationship.
+ /// @returns true if *this >= RHS when considered signed.
+ /// @brief Signed greater or equal comparison
+ bool sge(uint64_t RHS) const {
+ return sge(APInt(getBitWidth(), RHS));
+ }
+
/// This operation tests if there are any pairs of corresponding bits
/// between this APInt and RHS that are both set.
bool intersects(const APInt &RHS) const {
@@ -1089,7 +1153,7 @@ public:
/// This method determines how many bits are required to hold the APInt
/// equivalent of the string given by \arg str.
/// @brief Get bits required for string value.
- static unsigned getBitsNeeded(const StringRef& str, uint8_t radix);
+ static unsigned getBitsNeeded(StringRef str, uint8_t radix);
/// countLeadingZeros - This function is an APInt version of the
/// countLeadingZeros_{32,64} functions in MathExtras.h. It counts the number
diff --git a/libclamav/c++/llvm/include/llvm/ADT/BitVector.h b/libclamav/c++/llvm/include/llvm/ADT/BitVector.h
index b9f2d83..9dcb9e1 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/BitVector.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/BitVector.h
@@ -49,6 +49,11 @@ public:
~reference() {}
+ reference &operator=(reference t) {
+ *this = bool(t);
+ return *this;
+ }
+
reference& operator=(bool t) {
if (t)
*WordRef |= 1L << BitPos;
@@ -329,7 +334,8 @@ public:
Size = RHS.size();
unsigned RHSWords = NumBitWords(Size);
if (Size <= Capacity * BITWORD_SIZE) {
- std::copy(RHS.Bits, &RHS.Bits[RHSWords], Bits);
+ if (Size)
+ std::copy(RHS.Bits, &RHS.Bits[RHSWords], Bits);
clear_unused_bits();
return *this;
}
diff --git a/libclamav/c++/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h b/libclamav/c++/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
new file mode 100644
index 0000000..99ed15c
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -0,0 +1,75 @@
+//===--- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DAGDELTAALGORITHM_H
+#define LLVM_ADT_DAGDELTAALGORITHM_H
+
+#include <vector>
+#include <set>
+
+namespace llvm {
+
+/// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing
+/// directed acyclic graphs using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element not required by the dependencies on the other
+/// elements would falsify the predicate.
+///
+/// The DAG should be used to represent dependencies in the changes which are
+/// likely to hold across the predicate function. That is, for a particular
+/// changeset S and predicate P:
+///
+/// P(S) => P(S union pred(S))
+///
+/// The minization algorithm uses this dependency information to attempt to
+/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
+/// is not required to satisfy this property, but the algorithm will run
+/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
+/// for more information on the properties which the predicate function itself
+/// should satisfy.
+class DAGDeltaAlgorithm {
+public:
+ typedef unsigned change_ty;
+ typedef std::pair<change_ty, change_ty> edge_ty;
+
+ // FIXME: Use a decent data structure.
+ typedef std::set<change_ty> changeset_ty;
+ typedef std::vector<changeset_ty> changesetlist_ty;
+
+public:
+ virtual ~DAGDeltaAlgorithm() {}
+
+ /// Run - Minimize the DAG formed by the \arg Changes vertices and the \arg
+ /// Dependencies edges by executing \see ExecuteOneTest() on subsets of
+ /// changes and returning the smallest set which still satisfies the test
+ /// predicate and the input \arg Dependencies.
+ ///
+ /// \param Changes The list of changes.
+ ///
+ /// \param Dependencies The list of dependencies amongst changes. For each
+ /// (x,y) in \arg Dependencies, both x and y must be in \arg Changes. The
+ /// minimization algorithm guarantees that for each tested changed set S, x
+ /// \in S implies y \in S. It is an error to have cyclic dependencies.
+ changeset_ty Run(const changeset_ty &Changes,
+ const std::vector<edge_ty> &Dependencies);
+
+ /// UpdatedSearchState - Callback used when the search state changes.
+ virtual void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets,
+ const changeset_ty &Required) {}
+
+ /// ExecuteOneTest - Execute a single test predicate on the change set \arg S.
+ virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/ADT/DenseMap.h b/libclamav/c++/llvm/include/llvm/ADT/DenseMap.h
index 393473b..06a1575 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/DenseMap.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/DenseMap.h
@@ -22,6 +22,7 @@
#include <new>
#include <utility>
#include <cassert>
+#include <cstddef>
#include <cstring>
namespace llvm {
@@ -79,13 +80,14 @@ public:
typedef DenseMapIterator<KeyT, ValueT,
KeyInfoT, ValueInfoT, true> const_iterator;
inline iterator begin() {
- return iterator(Buckets, Buckets+NumBuckets);
+ // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
+ return empty() ? end() : iterator(Buckets, Buckets+NumBuckets);
}
inline iterator end() {
return iterator(Buckets+NumBuckets, Buckets+NumBuckets);
}
inline const_iterator begin() const {
- return const_iterator(Buckets, Buckets+NumBuckets);
+ return empty() ? end() : const_iterator(Buckets, Buckets+NumBuckets);
}
inline const_iterator end() const {
return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets);
@@ -183,13 +185,12 @@ public:
++NumTombstones;
return true;
}
- bool erase(iterator I) {
+ void erase(iterator I) {
BucketT *TheBucket = &*I;
TheBucket->second.~ValueT();
TheBucket->first = getTombstoneKey();
--NumEntries;
++NumTombstones;
- return true;
}
void swap(DenseMap& RHS) {
diff --git a/libclamav/c++/llvm/include/llvm/ADT/DenseMapInfo.h b/libclamav/c++/llvm/include/llvm/ADT/DenseMapInfo.h
index 41197a1..5299386 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/DenseMapInfo.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/DenseMapInfo.h
@@ -92,6 +92,16 @@ template<> struct DenseMapInfo<unsigned long long> {
}
};
+// Provide DenseMapInfo for ints.
+template<> struct DenseMapInfo<int> {
+ static inline int getEmptyKey() { return 0x7fffffff; }
+ static inline int getTombstoneKey() { return -0x7fffffff - 1; }
+ static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37); }
+ static bool isEqual(const int& LHS, const int& RHS) {
+ return LHS == RHS;
+ }
+};
+
// Provide DenseMapInfo for long longs.
template<> struct DenseMapInfo<long long> {
static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
diff --git a/libclamav/c++/llvm/include/llvm/ADT/DenseSet.h b/libclamav/c++/llvm/include/llvm/ADT/DenseSet.h
index 9388338..00bcf64 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/DenseSet.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/DenseSet.h
@@ -58,6 +58,7 @@ public:
class Iterator {
typename MapTy::iterator I;
+ friend class DenseSet;
public:
typedef typename MapTy::iterator::difference_type difference_type;
typedef ValueT value_type;
@@ -77,6 +78,7 @@ public:
class ConstIterator {
typename MapTy::const_iterator I;
+ friend class DenseSet;
public:
typedef typename MapTy::const_iterator::difference_type difference_type;
typedef ValueT value_type;
@@ -103,6 +105,10 @@ public:
const_iterator begin() const { return ConstIterator(TheMap.begin()); }
const_iterator end() const { return ConstIterator(TheMap.end()); }
+ iterator find(const ValueT &V) { return Iterator(TheMap.find(V)); }
+ void erase(Iterator I) { return TheMap.erase(I.I); }
+ void erase(ConstIterator CI) { return TheMap.erase(CI.I); }
+
std::pair<iterator, bool> insert(const ValueT &V) {
return TheMap.insert(std::make_pair(V, 0));
}
diff --git a/libclamav/c++/llvm/include/llvm/ADT/DepthFirstIterator.h b/libclamav/c++/llvm/include/llvm/ADT/DepthFirstIterator.h
index 5f2df2a..b9e5cbd 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/DepthFirstIterator.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/DepthFirstIterator.h
@@ -183,6 +183,16 @@ public:
inline bool nodeVisited(NodeType *Node) const {
return this->Visited.count(Node) != 0;
}
+
+ /// getPathLength - Return the length of the path from the entry node to the
+ /// current node, counting both nodes.
+ unsigned getPathLength() const { return VisitStack.size(); }
+
+ /// getPath - Return the n'th node in the path from the the entry node to the
+ /// current node.
+ NodeType *getPath(unsigned n) const {
+ return VisitStack[n].first.getPointer();
+ }
};
diff --git a/libclamav/c++/llvm/include/llvm/ADT/EquivalenceClasses.h b/libclamav/c++/llvm/include/llvm/ADT/EquivalenceClasses.h
index fa4af67..07a5edf 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/EquivalenceClasses.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/EquivalenceClasses.h
@@ -16,6 +16,7 @@
#define LLVM_ADT_EQUIVALENCECLASSES_H
#include "llvm/System/DataTypes.h"
+#include <cassert>
#include <set>
namespace llvm {
@@ -168,7 +169,7 @@ public:
/// getOrInsertLeaderValue - Return the leader for the specified value that is
/// in the set. If the member is not in the set, it is inserted, then
/// returned.
- const ElemTy &getOrInsertLeaderValue(const ElemTy &V) const {
+ const ElemTy &getOrInsertLeaderValue(const ElemTy &V) {
member_iterator MI = findLeader(insert(V));
assert(MI != member_end() && "Value is not in the set!");
return *MI;
diff --git a/libclamav/c++/llvm/include/llvm/ADT/FoldingSet.h b/libclamav/c++/llvm/include/llvm/ADT/FoldingSet.h
index 81dc469..662b5e2 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/FoldingSet.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/FoldingSet.h
@@ -23,6 +23,7 @@
namespace llvm {
class APFloat;
class APInt;
+ class BumpPtrAllocator;
/// This folding set used for two purposes:
/// 1. Given information about a node we want to create, look up the unique
@@ -53,9 +54,9 @@ namespace llvm {
/// void Profile(FoldingSetNodeID &ID) const {
/// ID.AddString(Name);
/// ID.AddInteger(Value);
-/// }
-/// ...
-/// };
+/// }
+/// ...
+/// };
///
/// To define the folding set itself use the FoldingSet template;
///
@@ -165,6 +166,14 @@ public:
/// FindNodeOrInsertPos.
void InsertNode(Node *N, void *InsertPos);
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(Node *N) {
+ Node *Inserted = GetOrInsertNode(N);
+ (void)Inserted;
+ assert(Inserted == N && "Node already inserted!");
+ }
+
/// size - Returns the number of nodes in the folding set.
unsigned size() const { return NumNodes; }
@@ -181,20 +190,97 @@ protected:
/// GetNodeProfile - Instantiations of the FoldingSet template implement
/// this function to gather data bits for the given node.
- virtual void GetNodeProfile(FoldingSetNodeID &ID, Node *N) const = 0;
+ virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0;
+ /// NodeEquals - Instantiations of the FoldingSet template implement
+ /// this function to compare the given node with the given ID.
+ virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID,
+ FoldingSetNodeID &TempID) const=0;
+ /// NodeEquals - Instantiations of the FoldingSet template implement
+ /// this function to compute a hash value for the given node.
+ virtual unsigned ComputeNodeHash(Node *N,
+ FoldingSetNodeID &TempID) const = 0;
};
//===----------------------------------------------------------------------===//
-/// FoldingSetTrait - This trait class is used to define behavior of how
-/// to "profile" (in the FoldingSet parlance) an object of a given type.
-/// The default behavior is to invoke a 'Profile' method on an object, but
-/// through template specialization the behavior can be tailored for specific
-/// types. Combined with the FoldingSetNodeWrapper classs, one can add objects
-/// to FoldingSets that were not originally designed to have that behavior.
+
+template<typename T> struct FoldingSetTrait;
+
+/// DefaultFoldingSetTrait - This class provides default implementations
+/// for FoldingSetTrait implementations.
///
-template<typename T> struct FoldingSetTrait {
- static inline void Profile(const T& X, FoldingSetNodeID& ID) { X.Profile(ID);}
- static inline void Profile(T& X, FoldingSetNodeID& ID) { X.Profile(ID); }
+template<typename T> struct DefaultFoldingSetTrait {
+ static void Profile(const T& X, FoldingSetNodeID& ID) {
+ X.Profile(ID);
+ }
+ static void Profile(T& X, FoldingSetNodeID& ID) {
+ X.Profile(ID);
+ }
+
+ // Equals - Test if the profile for X would match ID, using TempID
+ // to compute a temporary ID if necessary. The default implementation
+ // just calls Profile and does a regular comparison. Implementations
+ // can override this to provide more efficient implementations.
+ static inline bool Equals(T &X, const FoldingSetNodeID &ID,
+ FoldingSetNodeID &TempID);
+
+ // ComputeHash - Compute a hash value for X, using TempID to
+ // compute a temporary ID if necessary. The default implementation
+ // just calls Profile and does a regular hash computation.
+ // Implementations can override this to provide more efficient
+ // implementations.
+ static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID);
+};
+
+/// FoldingSetTrait - This trait class is used to define behavior of how
+/// to "profile" (in the FoldingSet parlance) an object of a given type.
+/// The default behavior is to invoke a 'Profile' method on an object, but
+/// through template specialization the behavior can be tailored for specific
+/// types. Combined with the FoldingSetNodeWrapper class, one can add objects
+/// to FoldingSets that were not originally designed to have that behavior.
+template<typename T> struct FoldingSetTrait
+ : public DefaultFoldingSetTrait<T> {};
+
+template<typename T, typename Ctx> struct ContextualFoldingSetTrait;
+
+/// DefaultContextualFoldingSetTrait - Like DefaultFoldingSetTrait, but
+/// for ContextualFoldingSets.
+template<typename T, typename Ctx>
+struct DefaultContextualFoldingSetTrait {
+ static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
+ X.Profile(ID, Context);
+ }
+ static inline bool Equals(T &X, const FoldingSetNodeID &ID,
+ FoldingSetNodeID &TempID, Ctx Context);
+ static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID,
+ Ctx Context);
+};
+
+/// ContextualFoldingSetTrait - Like FoldingSetTrait, but for
+/// ContextualFoldingSets.
+template<typename T, typename Ctx> struct ContextualFoldingSetTrait
+ : public DefaultContextualFoldingSetTrait<T, Ctx> {};
+
+//===--------------------------------------------------------------------===//
+/// FoldingSetNodeIDRef - This class describes a reference to an interned
+/// FoldingSetNodeID, which can be a useful to store node id data rather
+/// than using plain FoldingSetNodeIDs, since the 32-element SmallVector
+/// is often much larger than necessary, and the possibility of heap
+/// allocation means it requires a non-trivial destructor call.
+class FoldingSetNodeIDRef {
+ const unsigned* Data;
+ size_t Size;
+public:
+ FoldingSetNodeIDRef() : Data(0), Size(0) {}
+ FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
+
+ /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
+ /// used to lookup the node in the FoldingSetImpl.
+ unsigned ComputeHash() const;
+
+ bool operator==(FoldingSetNodeIDRef) const;
+
+ const unsigned *getData() const { return Data; }
+ size_t getSize() const { return Size; }
};
//===--------------------------------------------------------------------===//
@@ -210,11 +296,8 @@ class FoldingSetNodeID {
public:
FoldingSetNodeID() {}
- /// getRawData - Return the ith entry in the Bits data.
- ///
- unsigned getRawData(unsigned i) const {
- return Bits[i];
- }
+ FoldingSetNodeID(FoldingSetNodeIDRef Ref)
+ : Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {}
/// Add* - Add various data types to Bit data.
///
@@ -232,16 +315,22 @@ public:
inline void Add(const T& x) { FoldingSetTrait<T>::Profile(x, *this); }
/// clear - Clear the accumulated profile, allowing this FoldingSetNodeID
- /// object to be used to compute a new profile.
+ /// object to be used to compute a new profile.
inline void clear() { Bits.clear(); }
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used
- /// to lookup the node in the FoldingSetImpl.
+ /// to lookup the node in the FoldingSetImpl.
unsigned ComputeHash() const;
/// operator== - Used to compare two nodes to each other.
///
bool operator==(const FoldingSetNodeID &RHS) const;
+ bool operator==(const FoldingSetNodeIDRef RHS) const;
+
+ /// Intern - Copy this node's data to a memory region allocated from the
+ /// given allocator and return a FoldingSetNodeIDRef describing the
+ /// interned data.
+ FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const;
};
// Convenience type to hide the implementation of the folding set.
@@ -249,6 +338,39 @@ typedef FoldingSetImpl::Node FoldingSetNode;
template<class T> class FoldingSetIterator;
template<class T> class FoldingSetBucketIterator;
+// Definitions of FoldingSetTrait and ContextualFoldingSetTrait functions, which
+// require the definition of FoldingSetNodeID.
+template<typename T>
+inline bool
+DefaultFoldingSetTrait<T>::Equals(T &X, const FoldingSetNodeID &ID,
+ FoldingSetNodeID &TempID) {
+ FoldingSetTrait<T>::Profile(X, TempID);
+ return TempID == ID;
+}
+template<typename T>
+inline unsigned
+DefaultFoldingSetTrait<T>::ComputeHash(T &X, FoldingSetNodeID &TempID) {
+ FoldingSetTrait<T>::Profile(X, TempID);
+ return TempID.ComputeHash();
+}
+template<typename T, typename Ctx>
+inline bool
+DefaultContextualFoldingSetTrait<T, Ctx>::Equals(T &X,
+ const FoldingSetNodeID &ID,
+ FoldingSetNodeID &TempID,
+ Ctx Context) {
+ ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+ return TempID == ID;
+}
+template<typename T, typename Ctx>
+inline unsigned
+DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
+ FoldingSetNodeID &TempID,
+ Ctx Context) {
+ ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+ return TempID.ComputeHash();
+}
+
//===----------------------------------------------------------------------===//
/// FoldingSet - This template class is used to instantiate a specialized
/// implementation of the folding set to the node class T. T must be a
@@ -258,9 +380,23 @@ template<class T> class FoldingSet : public FoldingSetImpl {
private:
/// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
/// way to convert nodes into a unique specifier.
- virtual void GetNodeProfile(FoldingSetNodeID &ID, Node *N) const {
+ virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const {
+ T *TN = static_cast<T *>(N);
+ FoldingSetTrait<T>::Profile(*TN, ID);
+ }
+ /// NodeEquals - Instantiations may optionally provide a way to compare a
+ /// node with a specified ID.
+ virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID,
+ FoldingSetNodeID &TempID) const {
+ T *TN = static_cast<T *>(N);
+ return FoldingSetTrait<T>::Equals(*TN, ID, TempID);
+ }
+ /// NodeEquals - Instantiations may optionally provide a way to compute a
+ /// hash value directly from a node.
+ virtual unsigned ComputeNodeHash(Node *N,
+ FoldingSetNodeID &TempID) const {
T *TN = static_cast<T *>(N);
- FoldingSetTrait<T>::Profile(*TN,ID);
+ return FoldingSetTrait<T>::ComputeHash(*TN, TempID);
}
public:
@@ -302,6 +438,85 @@ public:
};
//===----------------------------------------------------------------------===//
+/// ContextualFoldingSet - This template class is a further refinement
+/// of FoldingSet which provides a context argument when calling
+/// Profile on its nodes. Currently, that argument is fixed at
+/// initialization time.
+///
+/// T must be a subclass of FoldingSetNode and implement a Profile
+/// function with signature
+/// void Profile(llvm::FoldingSetNodeID &, Ctx);
+template <class T, class Ctx>
+class ContextualFoldingSet : public FoldingSetImpl {
+ // Unfortunately, this can't derive from FoldingSet<T> because the
+ // construction vtable for FoldingSet<T> requires
+ // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
+ // requires a single-argument T::Profile().
+
+private:
+ Ctx Context;
+
+ /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+ /// way to convert nodes into a unique specifier.
+ virtual void GetNodeProfile(FoldingSetImpl::Node *N,
+ FoldingSetNodeID &ID) const {
+ T *TN = static_cast<T *>(N);
+ ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
+ }
+ virtual bool NodeEquals(FoldingSetImpl::Node *N,
+ const FoldingSetNodeID &ID,
+ FoldingSetNodeID &TempID) const {
+ T *TN = static_cast<T *>(N);
+ return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, TempID, Context);
+ }
+ virtual unsigned ComputeNodeHash(FoldingSetImpl::Node *N,
+ FoldingSetNodeID &TempID) const {
+ T *TN = static_cast<T *>(N);
+ return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context);
+ }
+
+public:
+ explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
+ : FoldingSetImpl(Log2InitSize), Context(Context)
+ {}
+
+ Ctx getContext() const { return Context; }
+
+
+ typedef FoldingSetIterator<T> iterator;
+ iterator begin() { return iterator(Buckets); }
+ iterator end() { return iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetIterator<const T> const_iterator;
+ const_iterator begin() const { return const_iterator(Buckets); }
+ const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetBucketIterator<T> bucket_iterator;
+
+ bucket_iterator bucket_begin(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+ }
+
+ bucket_iterator bucket_end(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N'
+ /// and return it instead.
+ T *GetOrInsertNode(Node *N) {
+ return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
+ }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it
+ /// exists, return it. If not, return the insertion token that will
+ /// make insertion faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
+ }
+};
+
+//===----------------------------------------------------------------------===//
/// FoldingSetIteratorImpl - This is the common iterator support shared by all
/// folding sets, which knows how to walk the folding set hash table.
class FoldingSetIteratorImpl {
@@ -344,8 +559,8 @@ public:
//===----------------------------------------------------------------------===//
/// FoldingSetBucketIteratorImpl - This is the common bucket iterator support
-/// shared by all folding sets, which knows how to walk a particular bucket
-/// of a folding set hash table.
+/// shared by all folding sets, which knows how to walk a particular bucket
+/// of a folding set hash table.
class FoldingSetBucketIteratorImpl {
protected:
@@ -446,7 +661,7 @@ class FastFoldingSetNode : public FoldingSetNode {
protected:
explicit FastFoldingSetNode(const FoldingSetNodeID &ID) : FastID(ID) {}
public:
- void Profile(FoldingSetNodeID& ID) { ID = FastID; }
+ void Profile(FoldingSetNodeID& ID) const { ID = FastID; }
};
//===----------------------------------------------------------------------===//
@@ -456,9 +671,6 @@ template<typename T> struct FoldingSetTrait<T*> {
static inline void Profile(const T* X, FoldingSetNodeID& ID) {
ID.AddPointer(X);
}
- static inline void Profile(T* X, FoldingSetNodeID& ID) {
- ID.AddPointer(X);
- }
};
template<typename T> struct FoldingSetTrait<const T*> {
diff --git a/libclamav/c++/llvm/include/llvm/ADT/ImmutableIntervalMap.h b/libclamav/c++/llvm/include/llvm/ADT/ImmutableIntervalMap.h
index f33fb1e..968ce15 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/ImmutableIntervalMap.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/ImmutableIntervalMap.h
@@ -16,14 +16,14 @@ namespace llvm {
class Interval {
private:
- uint64_t Start;
- uint64_t End;
+ int64_t Start;
+ int64_t End;
public:
- Interval(uint64_t S, uint64_t E) : Start(S), End(E) {}
+ Interval(int64_t S, int64_t E) : Start(S), End(E) {}
- uint64_t getStart() const { return Start; }
- uint64_t getEnd() const { return End; }
+ int64_t getStart() const { return Start; }
+ int64_t getEnd() const { return End; }
};
template <typename T>
@@ -125,9 +125,11 @@ private:
key_type_ref KCurrent = ImutInfo::KeyOfValue(this->Value(T));
if (ImutInfo::isLess(K, KCurrent))
- return this->Balance(Add_internal(V, this->Left(T)), this->Value(T), this->Right(T));
+ return this->Balance(Add_internal(V, this->Left(T)), this->Value(T),
+ this->Right(T));
else
- return this->Balance(this->Left(T), this->Value(T), Add_internal(V, this->Right(T)));
+ return this->Balance(this->Left(T), this->Value(T),
+ Add_internal(V, this->Right(T)));
}
// Remove all overlaps from T.
@@ -150,9 +152,11 @@ private:
// If current key does not overlap the inserted key.
if (CurrentK.getStart() > K.getEnd())
- return this->Balance(RemoveOverlap(this->Left(T), K, Changed), this->Value(T), this->Right(T));
+ return this->Balance(RemoveOverlap(this->Left(T), K, Changed),
+ this->Value(T), this->Right(T));
else if (CurrentK.getEnd() < K.getStart())
- return this->Balance(this->Left(T), this->Value(T), RemoveOverlap(this->Right(T), K, Changed));
+ return this->Balance(this->Left(T), this->Value(T),
+ RemoveOverlap(this->Right(T), K, Changed));
// Current key overlaps with the inserted key.
// Remove the current key.
diff --git a/libclamav/c++/llvm/include/llvm/ADT/ImmutableSet.h b/libclamav/c++/llvm/include/llvm/ADT/ImmutableSet.h
index 65e70e2..70c3caf 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/ImmutableSet.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/ImmutableSet.h
@@ -189,6 +189,8 @@ public:
unsigned verify() const {
unsigned HL = getLeft() ? getLeft()->verify() : 0;
unsigned HR = getRight() ? getRight()->verify() : 0;
+ (void) HL;
+ (void) HR;
assert(getHeight() == ( HL > HR ? HL : HR ) + 1
&& "Height calculation wrong");
diff --git a/libclamav/c++/llvm/include/llvm/ADT/NullablePtr.h b/libclamav/c++/llvm/include/llvm/ADT/NullablePtr.h
new file mode 100644
index 0000000..a9c47a1
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/ADT/NullablePtr.h
@@ -0,0 +1,52 @@
+//===- llvm/ADT/NullablePtr.h - A pointer that allows null ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines and implements the NullablePtr class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_NULLABLE_PTR_H
+#define LLVM_ADT_NULLABLE_PTR_H
+
+#include <cassert>
+#include <cstddef>
+
+namespace llvm {
+/// NullablePtr pointer wrapper - NullablePtr is used for APIs where a
+/// potentially-null pointer gets passed around that must be explicitly handled
+/// in lots of places. By putting a wrapper around the null pointer, it makes
+/// it more likely that the null pointer case will be handled correctly.
+template<class T>
+class NullablePtr {
+ T *Ptr;
+public:
+ NullablePtr(T *P = 0) : Ptr(P) {}
+
+ bool isNull() const { return Ptr == 0; }
+ bool isNonNull() const { return Ptr != 0; }
+
+ /// get - Return the pointer if it is non-null.
+ const T *get() const {
+ assert(Ptr && "Pointer wasn't checked for null!");
+ return Ptr;
+ }
+
+ /// get - Return the pointer if it is non-null.
+ T *get() {
+ assert(Ptr && "Pointer wasn't checked for null!");
+ return Ptr;
+ }
+
+ T *getPtrOrNull() { return Ptr; }
+ const T *getPtrOrNull() const { return Ptr; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/ADT/Optional.h b/libclamav/c++/llvm/include/llvm/ADT/Optional.h
new file mode 100644
index 0000000..34e54a0
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/ADT/Optional.h
@@ -0,0 +1,66 @@
+//===-- Optional.h - Simple variant for passing optional values ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Optional, a template class modeled in the spirit of
+// OCaml's 'opt' variant. The idea is to strongly type whether or not
+// a value can be optional.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_OPTIONAL
+#define LLVM_ADT_OPTIONAL
+
+#include <cassert>
+
+namespace llvm {
+
+template<typename T>
+class Optional {
+ T x;
+ unsigned hasVal : 1;
+public:
+ explicit Optional() : x(), hasVal(false) {}
+ Optional(const T &y) : x(y), hasVal(true) {}
+
+ static inline Optional create(const T* y) {
+ return y ? Optional(*y) : Optional();
+ }
+
+ Optional &operator=(const T &y) {
+ x = y;
+ hasVal = true;
+ return *this;
+ }
+
+ const T* getPointer() const { assert(hasVal); return &x; }
+ const T& getValue() const { assert(hasVal); return x; }
+
+ operator bool() const { return hasVal; }
+ bool hasValue() const { return hasVal; }
+ const T* operator->() const { return getPointer(); }
+ const T& operator*() const { assert(hasVal); return x; }
+};
+
+template<typename T> struct simplify_type;
+
+template <typename T>
+struct simplify_type<const Optional<T> > {
+ typedef const T* SimpleType;
+ static SimpleType getSimplifiedValue(const Optional<T> &Val) {
+ return Val.getPointer();
+ }
+};
+
+template <typename T>
+struct simplify_type<Optional<T> >
+ : public simplify_type<const Optional<T> > {};
+
+} // end llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/ADT/OwningPtr.h b/libclamav/c++/llvm/include/llvm/ADT/OwningPtr.h
index cc53c8c..6d9c305 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/OwningPtr.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/OwningPtr.h
@@ -128,7 +128,6 @@ inline void swap(OwningArrayPtr<T> &a, OwningArrayPtr<T> &b) {
a.swap(b);
}
-
} // end namespace llvm
#endif
diff --git a/libclamav/c++/llvm/include/llvm/ADT/PointerUnion.h b/libclamav/c++/llvm/include/llvm/ADT/PointerUnion.h
index 49c8940..3a514b5 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/PointerUnion.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/PointerUnion.h
@@ -124,7 +124,7 @@ namespace llvm {
}
void *getOpaqueValue() const { return Val.getOpaqueValue(); }
- static PointerUnion getFromOpaqueValue(void *VP) {
+ static inline PointerUnion getFromOpaqueValue(void *VP) {
PointerUnion V;
V.Val = ValTy::getFromOpaqueValue(VP);
return V;
@@ -227,7 +227,7 @@ namespace llvm {
}
void *getOpaqueValue() const { return Val.getOpaqueValue(); }
- static PointerUnion3 getFromOpaqueValue(void *VP) {
+ static inline PointerUnion3 getFromOpaqueValue(void *VP) {
PointerUnion3 V;
V.Val = ValTy::getFromOpaqueValue(VP);
return V;
@@ -338,7 +338,7 @@ namespace llvm {
}
void *getOpaqueValue() const { return Val.getOpaqueValue(); }
- static PointerUnion4 getFromOpaqueValue(void *VP) {
+ static inline PointerUnion4 getFromOpaqueValue(void *VP) {
PointerUnion4 V;
V.Val = ValTy::getFromOpaqueValue(VP);
return V;
diff --git a/libclamav/c++/llvm/include/llvm/ADT/PostOrderIterator.h b/libclamav/c++/llvm/include/llvm/ADT/PostOrderIterator.h
index 8315bc9..47e5b2b 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/PostOrderIterator.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/PostOrderIterator.h
@@ -19,7 +19,6 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include <set>
-#include <stack>
#include <vector>
namespace llvm {
@@ -52,21 +51,21 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
// VisitStack - Used to maintain the ordering. Top = current block
// First element is basic block pointer, second is the 'next child' to visit
- std::stack<std::pair<NodeType *, ChildItTy> > VisitStack;
+ std::vector<std::pair<NodeType *, ChildItTy> > VisitStack;
void traverseChild() {
- while (VisitStack.top().second != GT::child_end(VisitStack.top().first)) {
- NodeType *BB = *VisitStack.top().second++;
+ while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
+ NodeType *BB = *VisitStack.back().second++;
if (!this->Visited.count(BB)) { // If the block is not visited...
this->Visited.insert(BB);
- VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
}
inline po_iterator(NodeType *BB) {
this->Visited.insert(BB);
- VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
inline po_iterator() {} // End is when stack is empty.
@@ -75,7 +74,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
po_iterator_storage<SetType, ExtStorage>(S) {
if(!S.count(BB)) {
this->Visited.insert(BB);
- VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
}
@@ -102,7 +101,7 @@ public:
inline bool operator!=(const _Self& x) const { return !operator==(x); }
inline pointer operator*() const {
- return VisitStack.top().first;
+ return VisitStack.back().first;
}
// This is a nonstandard operator-> that dereferences the pointer an extra
@@ -112,7 +111,7 @@ public:
inline NodeType *operator->() const { return operator*(); }
inline _Self& operator++() { // Preincrement
- VisitStack.pop();
+ VisitStack.pop_back();
if (!VisitStack.empty())
traverseChild();
return *this;
diff --git a/libclamav/c++/llvm/include/llvm/ADT/SCCIterator.h b/libclamav/c++/llvm/include/llvm/ADT/SCCIterator.h
index d38ce4c..c49d599 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/SCCIterator.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/SCCIterator.h
@@ -66,7 +66,7 @@ class scc_iterator
std::vector<unsigned> MinVisitNumStack;
// A single "visit" within the non-recursive DFS traversal.
- void DFSVisitOne(NodeType* N) {
+ void DFSVisitOne(NodeType *N) {
++visitNum; // Global counter for the visit order
nodeVisitNumbers[N] = visitNum;
SCCNodeStack.push_back(N);
@@ -83,13 +83,14 @@ class scc_iterator
// TOS has at least one more child so continue DFS
NodeType *childN = *VisitStack.back().second++;
if (!nodeVisitNumbers.count(childN)) {
- // this node has never been seen
+ // this node has never been seen.
DFSVisitOne(childN);
- } else {
- unsigned childNum = nodeVisitNumbers[childN];
- if (MinVisitNumStack.back() > childNum)
- MinVisitNumStack.back() = childNum;
+ continue;
}
+
+ unsigned childNum = nodeVisitNumbers[childN];
+ if (MinVisitNumStack.back() > childNum)
+ MinVisitNumStack.back() = childNum;
}
}
@@ -100,7 +101,7 @@ class scc_iterator
while (!VisitStack.empty()) {
DFSVisitChildren();
assert(VisitStack.back().second ==GT::child_end(VisitStack.back().first));
- NodeType* visitingN = VisitStack.back().first;
+ NodeType *visitingN = VisitStack.back().first;
unsigned minVisitNum = MinVisitNumStack.back();
VisitStack.pop_back();
MinVisitNumStack.pop_back();
@@ -111,18 +112,19 @@ class scc_iterator
// " : minVisitNum = " << minVisitNum << "; Node visit num = " <<
// nodeVisitNumbers[visitingN] << "\n";
- if (minVisitNum == nodeVisitNumbers[visitingN]) {
- // A full SCC is on the SCCNodeStack! It includes all nodes below
- // visitingN on the stack. Copy those nodes to CurrentSCC,
- // reset their minVisit values, and return (this suspends
- // the DFS traversal till the next ++).
- do {
- CurrentSCC.push_back(SCCNodeStack.back());
- SCCNodeStack.pop_back();
- nodeVisitNumbers[CurrentSCC.back()] = ~0U;
- } while (CurrentSCC.back() != visitingN);
- return;
- }
+ if (minVisitNum != nodeVisitNumbers[visitingN])
+ continue;
+
+ // A full SCC is on the SCCNodeStack! It includes all nodes below
+ // visitingN on the stack. Copy those nodes to CurrentSCC,
+ // reset their minVisit values, and return (this suspends
+ // the DFS traversal till the next ++).
+ do {
+ CurrentSCC.push_back(SCCNodeStack.back());
+ SCCNodeStack.pop_back();
+ nodeVisitNumbers[CurrentSCC.back()] = ~0U;
+ } while (CurrentSCC.back() != visitingN);
+ return;
}
}
@@ -136,11 +138,11 @@ public:
typedef scc_iterator<GraphT, GT> _Self;
// Provide static "constructors"...
- static inline _Self begin(const GraphT& G) { return _Self(GT::getEntryNode(G)); }
- static inline _Self end (const GraphT& G) { return _Self(); }
+ static inline _Self begin(const GraphT &G){return _Self(GT::getEntryNode(G));}
+ static inline _Self end (const GraphT &G) { return _Self(); }
- // Direct loop termination test (I.fini() is more efficient than I == end())
- inline bool fini() const {
+ // Direct loop termination test: I.isAtEnd() is more efficient than I == end()
+ inline bool isAtEnd() const {
assert(!CurrentSCC.empty() || VisitStack.empty());
return CurrentSCC.empty();
}
@@ -181,28 +183,36 @@ public:
return true;
return false;
}
+
+ /// ReplaceNode - This informs the scc_iterator that the specified Old node
+ /// has been deleted, and New is to be used in its place.
+ void ReplaceNode(NodeType *Old, NodeType *New) {
+ assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
+ nodeVisitNumbers[New] = nodeVisitNumbers[Old];
+ nodeVisitNumbers.erase(Old);
+ }
};
// Global constructor for the SCC iterator.
template <class T>
-scc_iterator<T> scc_begin(const T& G) {
+scc_iterator<T> scc_begin(const T &G) {
return scc_iterator<T>::begin(G);
}
template <class T>
-scc_iterator<T> scc_end(const T& G) {
+scc_iterator<T> scc_end(const T &G) {
return scc_iterator<T>::end(G);
}
template <class T>
-scc_iterator<Inverse<T> > scc_begin(const Inverse<T>& G) {
- return scc_iterator<Inverse<T> >::begin(G);
+scc_iterator<Inverse<T> > scc_begin(const Inverse<T> &G) {
+ return scc_iterator<Inverse<T> >::begin(G);
}
template <class T>
-scc_iterator<Inverse<T> > scc_end(const Inverse<T>& G) {
- return scc_iterator<Inverse<T> >::end(G);
+scc_iterator<Inverse<T> > scc_end(const Inverse<T> &G) {
+ return scc_iterator<Inverse<T> >::end(G);
}
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/ADT/STLExtras.h b/libclamav/c++/llvm/include/llvm/ADT/STLExtras.h
index 32cf459..0b0346b 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/STLExtras.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/STLExtras.h
@@ -225,7 +225,7 @@ inline T *array_endof(T (&x)[N]) {
/// Find the length of an array.
template<class T, std::size_t N>
-inline size_t array_lengthof(T (&x)[N]) {
+inline size_t array_lengthof(T (&)[N]) {
return N;
}
@@ -243,7 +243,7 @@ static inline int array_pod_sort_comparator(const void *P1, const void *P2) {
/// get_array_pad_sort_comparator - This is an internal helper function used to
/// get type deduction of T right.
template<typename T>
-static int (*get_array_pad_sort_comparator(const T &X))
+static int (*get_array_pad_sort_comparator(const T &))
(const void*, const void*) {
return array_pod_sort_comparator<T>;
}
@@ -279,6 +279,28 @@ static inline void array_pod_sort(IteratorTy Start, IteratorTy End,
qsort(&*Start, End-Start, sizeof(*Start), Compare);
}
+//===----------------------------------------------------------------------===//
+// Extra additions to <algorithm>
+//===----------------------------------------------------------------------===//
+
+/// For a container of pointers, deletes the pointers and then clears the
+/// container.
+template<typename Container>
+void DeleteContainerPointers(Container &C) {
+ for (typename Container::iterator I = C.begin(), E = C.end(); I != E; ++I)
+ delete *I;
+ C.clear();
+}
+
+/// In a container of pairs (usually a map) whose second element is a pointer,
+/// deletes the second elements and then clears the container.
+template<typename Container>
+void DeleteContainerSeconds(Container &C) {
+ for (typename Container::iterator I = C.begin(), E = C.end(); I != E; ++I)
+ delete I->second;
+ C.clear();
+}
+
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/ADT/ScopedHashTable.h b/libclamav/c++/llvm/include/llvm/ADT/ScopedHashTable.h
index b5ca374..c96ad19 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/ScopedHashTable.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/ScopedHashTable.h
@@ -139,7 +139,12 @@ public:
}
V lookup(const K &Key) {
- return TopLevelMap[Key]->getValue();
+ typename DenseMap<K, ScopedHashTableVal<K, V, KInfo>*, KInfo>::iterator
+ I = TopLevelMap.find(Key);
+ if (I != TopLevelMap.end())
+ return I->second->getValue();
+
+ return V();
}
void insert(const K &Key, const V &Val) {
diff --git a/libclamav/c++/llvm/include/llvm/ADT/SetVector.h b/libclamav/c++/llvm/include/llvm/ADT/SetVector.h
index fab133a..bf8286c 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/SetVector.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/SetVector.h
@@ -143,6 +143,14 @@ public:
vector_.pop_back();
}
+ bool operator==(const SetVector &that) const {
+ return vector_ == that.vector_;
+ }
+
+ bool operator!=(const SetVector &that) const {
+ return vector_ != that.vector_;
+ }
+
private:
set_type set_; ///< The set.
vector_type vector_; ///< The vector.
diff --git a/libclamav/c++/llvm/include/llvm/ADT/SmallBitVector.h b/libclamav/c++/llvm/include/llvm/ADT/SmallBitVector.h
index 5c774b9..3441d0a 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/SmallBitVector.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/SmallBitVector.h
@@ -15,7 +15,6 @@
#define LLVM_ADT_SMALLBITVECTOR_H
#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
@@ -32,48 +31,85 @@ class SmallBitVector {
// TODO: In "large" mode, a pointer to a BitVector is used, leading to an
// unnecessary level of indirection. It would be more efficient to use a
// pointer to memory containing size, allocation size, and the array of bits.
- PointerIntPair<BitVector *, 1, uintptr_t> X;
+ uintptr_t X;
- // The number of bits in this class.
- static const size_t NumBaseBits = sizeof(uintptr_t) * CHAR_BIT;
+ enum {
+ // The number of bits in this class.
+ NumBaseBits = sizeof(uintptr_t) * CHAR_BIT,
- // One bit is used to discriminate between small and large mode. The
- // remaining bits are used for the small-mode representation.
- static const size_t SmallNumRawBits = NumBaseBits - 1;
+ // One bit is used to discriminate between small and large mode. The
+ // remaining bits are used for the small-mode representation.
+ SmallNumRawBits = NumBaseBits - 1,
- // A few more bits are used to store the size of the bit set in small mode.
- // Theoretically this is a ceil-log2. These bits are encoded in the most
- // significant bits of the raw bits.
- static const size_t SmallNumSizeBits = (NumBaseBits == 32 ? 5 :
- NumBaseBits == 64 ? 6 :
- SmallNumRawBits);
+ // A few more bits are used to store the size of the bit set in small mode.
+ // Theoretically this is a ceil-log2. These bits are encoded in the most
+ // significant bits of the raw bits.
+ SmallNumSizeBits = (NumBaseBits == 32 ? 5 :
+ NumBaseBits == 64 ? 6 :
+ SmallNumRawBits),
- // The remaining bits are used to store the actual set in small mode.
- static const size_t SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits;
+ // The remaining bits are used to store the actual set in small mode.
+ SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits
+ };
+public:
+ // Encapsulation of a single bit.
+ class reference {
+ SmallBitVector &TheVector;
+ unsigned BitPos;
+
+ public:
+ reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {}
+
+ reference& operator=(reference t) {
+ *this = bool(t);
+ return *this;
+ }
+
+ reference& operator=(bool t) {
+ if (t)
+ TheVector.set(BitPos);
+ else
+ TheVector.reset(BitPos);
+ return *this;
+ }
+
+ operator bool() const {
+ return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos);
+ }
+ };
+
+private:
bool isSmall() const {
- return X.getInt();
+ return X & uintptr_t(1);
+ }
+
+ BitVector *getPointer() const {
+ assert(!isSmall());
+ return reinterpret_cast<BitVector *>(X);
}
void switchToSmall(uintptr_t NewSmallBits, size_t NewSize) {
- X.setInt(true);
+ X = 1;
setSmallSize(NewSize);
setSmallBits(NewSmallBits);
}
void switchToLarge(BitVector *BV) {
- X.setInt(false);
- X.setPointer(BV);
+ X = reinterpret_cast<uintptr_t>(BV);
+ assert(!isSmall() && "Tried to use an unaligned pointer");
}
// Return all the bits used for the "small" representation; this includes
// bits for the size as well as the element bits.
uintptr_t getSmallRawBits() const {
- return reinterpret_cast<uintptr_t>(X.getPointer()) >> 1;
+ assert(isSmall());
+ return X >> 1;
}
void setSmallRawBits(uintptr_t NewRawBits) {
- return X.setPointer(reinterpret_cast<BitVector *>(NewRawBits << 1));
+ assert(isSmall());
+ X = (NewRawBits << 1) | uintptr_t(1);
}
// Return the size.
@@ -87,22 +123,22 @@ class SmallBitVector {
// Return the element bits.
uintptr_t getSmallBits() const {
- return getSmallRawBits() & ~(~uintptr_t(0) << SmallNumDataBits);
+ return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize());
}
void setSmallBits(uintptr_t NewBits) {
- setSmallRawBits((getSmallRawBits() & (~uintptr_t(0) << SmallNumDataBits)) |
- (NewBits & ~(~uintptr_t(0) << getSmallSize())));
+ setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) |
+ (getSmallSize() << SmallNumDataBits));
}
public:
/// SmallBitVector default ctor - Creates an empty bitvector.
- SmallBitVector() : X(0, 1) {}
+ SmallBitVector() : X(1) {}
/// SmallBitVector ctor - Creates a bitvector of specified number of bits. All
/// bits are initialized to the specified value.
- explicit SmallBitVector(unsigned s, bool t = false) : X(0, 1) {
- if (s <= SmallNumRawBits)
+ explicit SmallBitVector(unsigned s, bool t = false) {
+ if (s <= SmallNumDataBits)
switchToSmall(t ? ~uintptr_t(0) : 0, s);
else
switchToLarge(new BitVector(s, t));
@@ -113,22 +149,22 @@ public:
if (RHS.isSmall())
X = RHS.X;
else
- switchToLarge(new BitVector(*RHS.X.getPointer()));
+ switchToLarge(new BitVector(*RHS.getPointer()));
}
~SmallBitVector() {
if (!isSmall())
- delete X.getPointer();
+ delete getPointer();
}
/// empty - Tests whether there are no bits in this bitvector.
bool empty() const {
- return isSmall() ? getSmallSize() == 0 : X.getPointer()->empty();
+ return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
}
/// size - Returns the number of bits in this bitvector.
size_t size() const {
- return isSmall() ? getSmallSize() : X.getPointer()->size();
+ return isSmall() ? getSmallSize() : getPointer()->size();
}
/// count - Returns the number of bits which are set.
@@ -141,21 +177,21 @@ public:
return CountPopulation_64(Bits);
assert(0 && "Unsupported!");
}
- return X.getPointer()->count();
+ return getPointer()->count();
}
/// any - Returns true if any bit is set.
bool any() const {
if (isSmall())
return getSmallBits() != 0;
- return X.getPointer()->any();
+ return getPointer()->any();
}
/// none - Returns true if none of the bits are set.
bool none() const {
if (isSmall())
return getSmallBits() == 0;
- return X.getPointer()->none();
+ return getPointer()->none();
}
/// find_first - Returns the index of the first set bit, -1 if none
@@ -163,13 +199,15 @@ public:
int find_first() const {
if (isSmall()) {
uintptr_t Bits = getSmallBits();
+ if (Bits == 0)
+ return -1;
if (sizeof(uintptr_t) * CHAR_BIT == 32)
return CountTrailingZeros_32(Bits);
if (sizeof(uintptr_t) * CHAR_BIT == 64)
return CountTrailingZeros_64(Bits);
assert(0 && "Unsupported!");
}
- return X.getPointer()->find_first();
+ return getPointer()->find_first();
}
/// find_next - Returns the index of the next set bit following the
@@ -178,30 +216,33 @@ public:
if (isSmall()) {
uintptr_t Bits = getSmallBits();
// Mask off previous bits.
- Bits &= ~uintptr_t(0) << Prev;
+ Bits &= ~uintptr_t(0) << (Prev + 1);
+ if (Bits == 0 || Prev + 1 >= getSmallSize())
+ return -1;
if (sizeof(uintptr_t) * CHAR_BIT == 32)
return CountTrailingZeros_32(Bits);
if (sizeof(uintptr_t) * CHAR_BIT == 64)
return CountTrailingZeros_64(Bits);
assert(0 && "Unsupported!");
}
- return X.getPointer()->find_next(Prev);
+ return getPointer()->find_next(Prev);
}
/// clear - Clear all bits.
void clear() {
if (!isSmall())
- delete X.getPointer();
+ delete getPointer();
switchToSmall(0, 0);
}
/// resize - Grow or shrink the bitvector.
void resize(unsigned N, bool t = false) {
if (!isSmall()) {
- X.getPointer()->resize(N, t);
- } else if (getSmallSize() >= N) {
+ getPointer()->resize(N, t);
+ } else if (SmallNumDataBits >= N) {
+ uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0;
setSmallSize(N);
- setSmallBits(getSmallBits());
+ setSmallBits(NewBits | getSmallBits());
} else {
BitVector *BV = new BitVector(N, t);
uintptr_t OldBits = getSmallBits();
@@ -224,7 +265,7 @@ public:
switchToLarge(BV);
}
} else {
- X.getPointer()->reserve(N);
+ getPointer()->reserve(N);
}
}
@@ -233,7 +274,7 @@ public:
if (isSmall())
setSmallBits(~uintptr_t(0));
else
- X.getPointer()->set();
+ getPointer()->set();
return *this;
}
@@ -241,7 +282,7 @@ public:
if (isSmall())
setSmallBits(getSmallBits() | (uintptr_t(1) << Idx));
else
- X.getPointer()->set(Idx);
+ getPointer()->set(Idx);
return *this;
}
@@ -249,7 +290,7 @@ public:
if (isSmall())
setSmallBits(0);
else
- X.getPointer()->reset();
+ getPointer()->reset();
return *this;
}
@@ -257,7 +298,7 @@ public:
if (isSmall())
setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx));
else
- X.getPointer()->reset(Idx);
+ getPointer()->reset(Idx);
return *this;
}
@@ -265,7 +306,7 @@ public:
if (isSmall())
setSmallBits(~getSmallBits());
else
- X.getPointer()->flip();
+ getPointer()->flip();
return *this;
}
@@ -273,7 +314,7 @@ public:
if (isSmall())
setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx));
else
- X.getPointer()->flip(Idx);
+ getPointer()->flip(Idx);
return *this;
}
@@ -283,12 +324,16 @@ public:
}
// Indexing.
- // TODO: Add an index operator which returns a "reference" (proxy class).
+ reference operator[](unsigned Idx) {
+ assert(Idx < size() && "Out-of-bounds Bit access.");
+ return reference(*this, Idx);
+ }
+
bool operator[](unsigned Idx) const {
assert(Idx < size() && "Out-of-bounds Bit access.");
if (isSmall())
return ((getSmallBits() >> Idx) & 1) != 0;
- return X.getPointer()->operator[](Idx);
+ return getPointer()->operator[](Idx);
}
bool test(unsigned Idx) const {
@@ -302,7 +347,7 @@ public:
if (isSmall())
return getSmallBits() == RHS.getSmallBits();
else
- return *X.getPointer() == *RHS.X.getPointer();
+ return *getPointer() == *RHS.getPointer();
}
bool operator!=(const SmallBitVector &RHS) const {
@@ -315,11 +360,11 @@ public:
if (isSmall())
setSmallBits(getSmallBits() & RHS.getSmallBits());
else if (!RHS.isSmall())
- X.getPointer()->operator&=(*RHS.X.getPointer());
+ getPointer()->operator&=(*RHS.getPointer());
else {
SmallBitVector Copy = RHS;
Copy.resize(size());
- X.getPointer()->operator&=(*Copy.X.getPointer());
+ getPointer()->operator&=(*Copy.getPointer());
}
return *this;
}
@@ -329,11 +374,11 @@ public:
if (isSmall())
setSmallBits(getSmallBits() | RHS.getSmallBits());
else if (!RHS.isSmall())
- X.getPointer()->operator|=(*RHS.X.getPointer());
+ getPointer()->operator|=(*RHS.getPointer());
else {
SmallBitVector Copy = RHS;
Copy.resize(size());
- X.getPointer()->operator|=(*Copy.X.getPointer());
+ getPointer()->operator|=(*Copy.getPointer());
}
return *this;
}
@@ -343,11 +388,11 @@ public:
if (isSmall())
setSmallBits(getSmallBits() ^ RHS.getSmallBits());
else if (!RHS.isSmall())
- X.getPointer()->operator^=(*RHS.X.getPointer());
+ getPointer()->operator^=(*RHS.getPointer());
else {
SmallBitVector Copy = RHS;
Copy.resize(size());
- X.getPointer()->operator^=(*Copy.X.getPointer());
+ getPointer()->operator^=(*Copy.getPointer());
}
return *this;
}
@@ -358,12 +403,12 @@ public:
if (RHS.isSmall())
X = RHS.X;
else
- switchToLarge(new BitVector(*RHS.X.getPointer()));
+ switchToLarge(new BitVector(*RHS.getPointer()));
} else {
if (!RHS.isSmall())
- *X.getPointer() = *RHS.X.getPointer();
+ *getPointer() = *RHS.getPointer();
else {
- delete X.getPointer();
+ delete getPointer();
X = RHS.X;
}
}
diff --git a/libclamav/c++/llvm/include/llvm/ADT/SmallPtrSet.h b/libclamav/c++/llvm/include/llvm/ADT/SmallPtrSet.h
index ef08125..424bdba 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/SmallPtrSet.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -46,8 +46,10 @@ class SmallPtrSetIteratorImpl;
class SmallPtrSetImpl {
friend class SmallPtrSetIteratorImpl;
protected:
- /// CurArray - This is the current set of buckets. If it points to
- /// SmallArray, then the set is in 'small mode'.
+ /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
+ const void **SmallArray;
+ /// CurArray - This is the current set of buckets. If equal to SmallArray,
+ /// then the set is in 'small mode'.
const void **CurArray;
/// CurArraySize - The allocated size of CurArray, always a power of two.
/// Note that CurArray points to an array that has CurArraySize+1 elements in
@@ -57,15 +59,13 @@ protected:
// If small, this is # elts allocated consequtively
unsigned NumElements;
unsigned NumTombstones;
- const void *SmallArray[1]; // Must be last ivar.
// Helper to copy construct a SmallPtrSet.
- SmallPtrSetImpl(const SmallPtrSetImpl& that);
- explicit SmallPtrSetImpl(unsigned SmallSize) {
+ SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl& that);
+ explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize) :
+ SmallArray(SmallStorage), CurArray(SmallStorage), CurArraySize(SmallSize) {
assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
"Initial size must be a power of two!");
- CurArray = &SmallArray[0];
- CurArraySize = SmallSize;
// The end pointer, always valid, is set to a valid element to help the
// iterator.
CurArray[SmallSize] = 0;
@@ -123,7 +123,7 @@ protected:
}
private:
- bool isSmall() const { return CurArray == &SmallArray[0]; }
+ bool isSmall() const { return CurArray == SmallArray; }
unsigned Hash(const void *Ptr) const {
return static_cast<unsigned>(((uintptr_t)Ptr >> 4) & (CurArraySize-1));
@@ -199,29 +199,29 @@ public:
}
};
-/// NextPowerOfTwo - This is a helper template that rounds N up to the next
-/// power of two.
+/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
+/// power of two (which means N itself if N is already a power of two).
template<unsigned N>
-struct NextPowerOfTwo;
+struct RoundUpToPowerOfTwo;
-/// NextPowerOfTwoH - If N is not a power of two, increase it. This is a helper
-/// template used to implement NextPowerOfTwo.
+/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a
+/// helper template used to implement RoundUpToPowerOfTwo.
template<unsigned N, bool isPowerTwo>
-struct NextPowerOfTwoH {
+struct RoundUpToPowerOfTwoH {
enum { Val = N };
};
template<unsigned N>
-struct NextPowerOfTwoH<N, false> {
+struct RoundUpToPowerOfTwoH<N, false> {
enum {
// We could just use NextVal = N+1, but this converges faster. N|(N-1) sets
// the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
- Val = NextPowerOfTwo<(N|(N-1)) + 1>::Val
+ Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
};
};
template<unsigned N>
-struct NextPowerOfTwo {
- enum { Val = NextPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
+struct RoundUpToPowerOfTwo {
+ enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
};
@@ -232,16 +232,17 @@ struct NextPowerOfTwo {
template<class PtrType, unsigned SmallSize>
class SmallPtrSet : public SmallPtrSetImpl {
// Make sure that SmallSize is a power of two, round up if not.
- enum { SmallSizePowTwo = NextPowerOfTwo<SmallSize>::Val };
- void *SmallArray[SmallSizePowTwo];
+ enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
+ /// SmallStorage - Fixed size storage used in 'small mode'. The extra element
+ /// ensures that the end iterator actually points to valid memory.
+ const void *SmallStorage[SmallSizePowTwo+1];
typedef PointerLikeTypeTraits<PtrType> PtrTraits;
public:
- SmallPtrSet() : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) {}
- SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(that) {}
+ SmallPtrSet() : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {}
+ SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(SmallStorage, that) {}
template<typename It>
- SmallPtrSet(It I, It E)
- : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) {
+ SmallPtrSet(It I, It E) : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {
insert(I, E);
}
diff --git a/libclamav/c++/llvm/include/llvm/ADT/SmallVector.h b/libclamav/c++/llvm/include/llvm/ADT/SmallVector.h
index 6299739..9c5d380 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/SmallVector.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/SmallVector.h
@@ -17,6 +17,8 @@
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
+#include <cstddef>
+#include <cstdlib>
#include <cstring>
#include <memory>
@@ -69,35 +71,35 @@ protected:
} FirstEl;
#endif
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
-
+
protected:
SmallVectorBase(size_t Size)
: BeginX(&FirstEl), EndX(&FirstEl), CapacityX((char*)&FirstEl+Size) {}
-
+
/// isSmall - Return true if this is a smallvector which has not had dynamic
/// memory allocated for it.
bool isSmall() const {
return BeginX == static_cast<const void*>(&FirstEl);
}
-
+
/// size_in_bytes - This returns size()*sizeof(T).
size_t size_in_bytes() const {
return size_t((char*)EndX - (char*)BeginX);
}
-
+
/// capacity_in_bytes - This returns capacity()*sizeof(T).
size_t capacity_in_bytes() const {
return size_t((char*)CapacityX - (char*)BeginX);
}
-
+
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like datatypes and is out of line to reduce code duplication.
void grow_pod(size_t MinSizeInBytes, size_t TSize);
-
+
public:
bool empty() const { return BeginX == EndX; }
};
-
+
template <typename T>
class SmallVectorTemplateCommon : public SmallVectorBase {
@@ -105,21 +107,21 @@ protected:
void setEnd(T *P) { this->EndX = P; }
public:
SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(Size) {}
-
+
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T value_type;
typedef T *iterator;
typedef const T *const_iterator;
-
+
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
-
+
typedef T &reference;
typedef const T &const_reference;
typedef T *pointer;
typedef const T *const_pointer;
-
+
// forward iterator creation methods.
iterator begin() { return (iterator)this->BeginX; }
const_iterator begin() const { return (const_iterator)this->BeginX; }
@@ -129,7 +131,7 @@ protected:
iterator capacity_ptr() { return (iterator)this->CapacityX; }
const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
public:
-
+
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
@@ -138,16 +140,16 @@ public:
size_type size() const { return end()-begin(); }
size_type max_size() const { return size_type(-1) / sizeof(T); }
-
+
/// capacity - Return the total number of elements in the currently allocated
/// buffer.
size_t capacity() const { return capacity_ptr() - begin(); }
-
+
/// data - Return a pointer to the vector's buffer, even if empty().
pointer data() { return pointer(begin()); }
/// data - Return a pointer to the vector's buffer, even if empty().
const_pointer data() const { return const_pointer(begin()); }
-
+
reference operator[](unsigned idx) {
assert(begin() + idx < end());
return begin()[idx];
@@ -171,7 +173,7 @@ public:
return end()[-1];
}
};
-
+
/// SmallVectorTemplateBase<isPodLike = false> - This is where we put method
/// implementations that are designed to work with non-POD-like T's.
template <typename T, bool isPodLike>
@@ -185,14 +187,14 @@ public:
E->~T();
}
}
-
+
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
-
+
/// grow - double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0);
@@ -203,79 +205,90 @@ template <typename T, bool isPodLike>
void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
size_t CurCapacity = this->capacity();
size_t CurSize = this->size();
- size_t NewCapacity = 2*CurCapacity;
+ size_t NewCapacity = 2*CurCapacity + 1; // Always grow, even from zero.
if (NewCapacity < MinSize)
NewCapacity = MinSize;
- T *NewElts = static_cast<T*>(operator new(NewCapacity*sizeof(T)));
-
+ T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
+
// Copy the elements over.
this->uninitialized_copy(this->begin(), this->end(), NewElts);
-
+
// Destroy the original elements.
destroy_range(this->begin(), this->end());
-
+
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
- operator delete(this->begin());
-
+ free(this->begin());
+
this->setEnd(NewElts+CurSize);
this->BeginX = NewElts;
this->CapacityX = this->begin()+NewCapacity;
}
-
-
+
+
/// SmallVectorTemplateBase<isPodLike = true> - This is where we put method
/// implementations that are designed to work with POD-like T's.
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
public:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
-
+
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
-
+
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
- // Use memcpy for PODs: std::uninitialized_copy optimizes to memmove, memcpy
- // is better.
- memcpy(&*Dest, &*I, (E-I)*sizeof(T));
+ // Arbitrary iterator types; just use the basic implementation.
+ std::uninitialized_copy(I, E, Dest);
}
-
+
+ /// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template<typename T1, typename T2>
+ static void uninitialized_copy(T1 *I, T1 *E, T2 *Dest) {
+ // Use memcpy for PODs iterated by pointers (which includes SmallVector
+ // iterators): std::uninitialized_copy optimizes to memmove, but we can
+ // use memcpy here.
+ memcpy(Dest, I, (E-I)*sizeof(T));
+ }
+
/// grow - double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0) {
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
};
-
-
+
+
/// SmallVectorImpl - This class consists of common code factored out of the
/// SmallVector class to reduce code duplication based on the SmallVector 'N'
/// template parameter.
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
+
+ SmallVectorImpl(const SmallVectorImpl&); // DISABLED.
public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::size_type size_type;
-
+
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
}
-
+
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
-
+
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
- operator delete(this->begin());
+ free(this->begin());
}
-
-
+
+
void clear() {
this->destroy_range(this->begin(), this->end());
this->EndX = this->BeginX;
@@ -309,7 +322,7 @@ public:
if (this->capacity() < N)
this->grow(N);
}
-
+
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
@@ -320,21 +333,21 @@ public:
this->grow();
goto Retry;
}
-
+
void pop_back() {
this->setEnd(this->end()-1);
this->end()->~T();
}
-
+
T pop_back_val() {
T Result = this->back();
pop_back();
return Result;
}
-
-
+
+
void swap(SmallVectorImpl &RHS);
-
+
/// append - Add the specified range to the end of the SmallVector.
///
template<typename in_iter>
@@ -343,26 +356,26 @@ public:
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
-
+
// Copy the new elements over.
// TODO: NEED To compile time dispatch on whether in_iter is a random access
// iterator to use the fast uninitialized_copy.
std::uninitialized_copy(in_start, in_end, this->end());
this->setEnd(this->end() + NumInputs);
}
-
+
/// append - Add the specified range to the end of the SmallVector.
///
void append(size_type NumInputs, const T &Elt) {
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
-
+
// Copy the new elements over.
std::uninitialized_fill_n(this->end(), NumInputs, Elt);
this->setEnd(this->end() + NumInputs);
}
-
+
void assign(unsigned NumElts, const T &Elt) {
clear();
if (this->capacity() < NumElts)
@@ -370,7 +383,7 @@ public:
this->setEnd(this->begin()+NumElts);
construct_range(this->begin(), this->end(), Elt);
}
-
+
iterator erase(iterator I) {
iterator N = I;
// Shift all elts down one.
@@ -379,7 +392,7 @@ public:
pop_back();
return(N);
}
-
+
iterator erase(iterator S, iterator E) {
iterator N = S;
// Shift all elts down.
@@ -389,13 +402,13 @@ public:
this->setEnd(I);
return(N);
}
-
+
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
push_back(Elt);
return this->end()-1;
}
-
+
if (this->EndX < this->CapacityX) {
Retry:
new (this->end()) T(this->back());
@@ -410,22 +423,22 @@ public:
I = this->begin()+EltNo;
goto Retry;
}
-
+
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
return this->end()-1;
}
-
+
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
-
+
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
-
+
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
-
+
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
@@ -433,48 +446,48 @@ public:
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(this->end()-NumToInsert, this->end());
-
+
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
-
+
std::fill_n(I, NumToInsert, Elt);
return I;
}
-
+
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
-
+
// Copy over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
-
+
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
-
+
// Insert the non-overwritten middle part.
std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
return I;
}
-
+
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->end()-1;
}
-
+
size_t NumToInsert = std::distance(From, To);
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
-
+
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
-
+
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
-
+
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
@@ -482,34 +495,37 @@ public:
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(this->end()-NumToInsert, this->end());
-
+
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
-
+
std::copy(From, To, I);
return I;
}
-
+
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
-
+
// Copy over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
-
+
// Replace the overwritten part.
- std::copy(From, From+NumOverwritten, I);
-
+ for (; NumOverwritten > 0; --NumOverwritten) {
+ *I = *From;
+ ++I; ++From;
+ }
+
// Insert the non-overwritten middle part.
- this->uninitialized_copy(From+NumOverwritten, To, OldEnd);
+ this->uninitialized_copy(From, To, OldEnd);
return I;
}
-
+
const SmallVectorImpl
&operator=(const SmallVectorImpl &RHS);
-
+
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
return std::equal(this->begin(), this->end(), RHS.begin());
@@ -517,12 +533,12 @@ public:
bool operator!=(const SmallVectorImpl &RHS) const {
return !(*this == RHS);
}
-
+
bool operator<(const SmallVectorImpl &RHS) const {
return std::lexicographical_compare(this->begin(), this->end(),
RHS.begin(), RHS.end());
}
-
+
/// set_size - Set the array size to \arg N, which the current array must have
/// enough capacity for.
///
@@ -536,14 +552,14 @@ public:
assert(N <= this->capacity());
this->setEnd(this->begin() + N);
}
-
+
private:
static void construct_range(T *S, T *E, const T &Elt) {
for (; S != E; ++S)
new (S) T(Elt);
}
};
-
+
template <typename T>
void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
@@ -690,6 +706,36 @@ public:
};
+/// Specialize SmallVector at N=0. This specialization guarantees
+/// that it can be instantiated at an incomplete T if none of its
+/// members are required.
+template <typename T>
+class SmallVector<T,0> : public SmallVectorImpl<T> {
+public:
+ SmallVector() : SmallVectorImpl<T>(0) {}
+
+ explicit SmallVector(unsigned Size, const T &Value = T())
+ : SmallVectorImpl<T>(0) {
+ this->reserve(Size);
+ while (Size--)
+ this->push_back(Value);
+ }
+
+ template<typename ItTy>
+ SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(0) {
+ this->append(S, E);
+ }
+
+ SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(0) {
+ SmallVectorImpl<T>::operator=(RHS);
+ }
+
+ SmallVector &operator=(const SmallVectorImpl<T> &RHS) {
+ return SmallVectorImpl<T>::operator=(RHS);
+ }
+
+};
+
} // End llvm namespace
namespace std {
diff --git a/libclamav/c++/llvm/include/llvm/ADT/SparseBitVector.h b/libclamav/c++/llvm/include/llvm/ADT/SparseBitVector.h
index 6c813ec..0862981 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/SparseBitVector.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/SparseBitVector.h
@@ -889,13 +889,17 @@ operator-(const SparseBitVector<ElementSize> &LHS,
// Dump a SparseBitVector to a stream
template <unsigned ElementSize>
void dump(const SparseBitVector<ElementSize> &LHS, raw_ostream &out) {
- out << "[ ";
-
- typename SparseBitVector<ElementSize>::iterator bi;
- for (bi = LHS.begin(); bi != LHS.end(); ++bi) {
- out << *bi << " ";
+ out << "[";
+
+ typename SparseBitVector<ElementSize>::iterator bi = LHS.begin(),
+ be = LHS.end();
+ if (bi != be) {
+ out << *bi;
+ for (++bi; bi != be; ++bi) {
+ out << " " << *bi;
+ }
}
- out << " ]\n";
+ out << "]\n";
}
} // end namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/ADT/Statistic.h b/libclamav/c++/llvm/include/llvm/ADT/Statistic.h
index 1a4833c..3a1319f 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/Statistic.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/Statistic.h
@@ -29,6 +29,7 @@
#include "llvm/System/Atomic.h"
namespace llvm {
+class raw_ostream;
class Statistic {
public:
@@ -55,6 +56,10 @@ public:
}
const Statistic &operator++() {
+ // FIXME: This function and all those that follow carefully use an
+ // atomic operation to update the value safely in the presence of
+ // concurrent accesses, but not to read the return value, so the
+ // return value is not thread safe.
sys::AtomicIncrement(&Value);
return init();
}
@@ -113,6 +118,15 @@ protected:
#define STATISTIC(VARNAME, DESC) \
static llvm::Statistic VARNAME = { DEBUG_TYPE, DESC, 0, 0 }
+/// \brief Enable the collection and printing of statistics.
+void EnableStatistics();
+
+/// \brief Print statistics to the file returned by CreateInfoOutputFile().
+void PrintStatistics();
+
+/// \brief Print statistics to the given output stream.
+void PrintStatistics(raw_ostream &OS);
+
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/ADT/StringExtras.h b/libclamav/c++/llvm/include/llvm/ADT/StringExtras.h
index 1ea546f..3c53ade 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/StringExtras.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/StringExtras.h
@@ -57,15 +57,14 @@ static inline char *utohex_buffer(IntTy X, char *BufferEnd) {
}
static inline std::string utohexstr(uint64_t X) {
- char Buffer[40];
- return utohex_buffer(X, Buffer+40);
+ char Buffer[17];
+ return utohex_buffer(X, Buffer+17);
}
static inline std::string utostr_32(uint32_t X, bool isNeg = false) {
- char Buffer[20];
- char *BufPtr = Buffer+19;
+ char Buffer[11];
+ char *BufPtr = Buffer+11;
- *BufPtr = 0; // Null terminate buffer...
if (X == 0) *--BufPtr = '0'; // Handle special case...
while (X) {
@@ -75,17 +74,13 @@ static inline std::string utostr_32(uint32_t X, bool isNeg = false) {
if (isNeg) *--BufPtr = '-'; // Add negative sign...
- return std::string(BufPtr);
+ return std::string(BufPtr, Buffer+11);
}
static inline std::string utostr(uint64_t X, bool isNeg = false) {
- if (X == uint32_t(X))
- return utostr_32(uint32_t(X), isNeg);
+ char Buffer[21];
+ char *BufPtr = Buffer+21;
- char Buffer[40];
- char *BufPtr = Buffer+39;
-
- *BufPtr = 0; // Null terminate buffer...
if (X == 0) *--BufPtr = '0'; // Handle special case...
while (X) {
@@ -94,7 +89,7 @@ static inline std::string utostr(uint64_t X, bool isNeg = false) {
}
if (isNeg) *--BufPtr = '-'; // Add negative sign...
- return std::string(BufPtr);
+ return std::string(BufPtr, Buffer+21);
}
diff --git a/libclamav/c++/llvm/include/llvm/ADT/StringMap.h b/libclamav/c++/llvm/include/llvm/ADT/StringMap.h
index 86e8546..59ff6aa 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/StringMap.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/StringMap.h
@@ -216,6 +216,14 @@ public:
static const StringMapEntry &GetStringMapEntryFromValue(const ValueTy &V) {
return GetStringMapEntryFromValue(const_cast<ValueTy&>(V));
}
+
+ /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
+ /// into a StringMapEntry, return the StringMapEntry itself.
+ static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) {
+ char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>);
+ return *reinterpret_cast<StringMapEntry*>(Ptr);
+ }
+
/// Destroy - Destroy this StringMapEntry, releasing memory back to the
/// specified allocator.
@@ -246,6 +254,10 @@ public:
StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(unsigned InitialSize)
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
+
+ explicit StringMap(AllocatorTy A)
+ : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
+
explicit StringMap(const StringMap &RHS)
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {
assert(RHS.empty() &&
diff --git a/libclamav/c++/llvm/include/llvm/ADT/StringRef.h b/libclamav/c++/llvm/include/llvm/ADT/StringRef.h
index 9257770..8386d3e 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/StringRef.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/StringRef.h
@@ -44,8 +44,8 @@ namespace llvm {
// Workaround PR5482: nearly all gcc 4.x miscompile StringRef and std::min()
// Changing the arg of min to be an integer, instead of a reference to an
// integer works around this bug.
- size_t min(size_t a, size_t b) const { return a < b ? a : b; }
- size_t max(size_t a, size_t b) const { return a > b ? a : b; }
+ static size_t min(size_t a, size_t b) { return a < b ? a : b; }
+ static size_t max(size_t a, size_t b) { return a > b ? a : b; }
public:
/// @name Constructors
@@ -128,6 +128,10 @@ namespace llvm {
/// compare_lower - Compare two strings, ignoring case.
int compare_lower(StringRef RHS) const;
+ /// compare_numeric - Compare two strings, treating sequences of digits as
+ /// numbers.
+ int compare_numeric(StringRef RHS) const;
+
/// \brief Determine the edit distance between this string and another
/// string.
///
@@ -145,7 +149,10 @@ namespace llvm {
unsigned edit_distance(StringRef Other, bool AllowReplacements = true);
/// str - Get the contents as an std::string.
- std::string str() const { return std::string(Data, Length); }
+ std::string str() const {
+ if (Data == 0) return std::string();
+ return std::string(Data, Length);
+ }
/// @}
/// @name Operator Overloads
@@ -224,12 +231,14 @@ namespace llvm {
/// find_first_of - Find the first character in the string that is \arg C,
/// or npos if not found. Same as find.
- size_type find_first_of(char C, size_t = 0) const { return find(C); }
+ size_type find_first_of(char C, size_t From = 0) const {
+ return find(C, From);
+ }
/// find_first_of - Find the first character in the string that is in \arg
/// Chars, or npos if not found.
///
- /// Note: O(size() * Chars.size())
+ /// Note: O(size() + Chars.size())
size_type find_first_of(StringRef Chars, size_t From = 0) const;
/// find_first_not_of - Find the first character in the string that is not
@@ -239,7 +248,7 @@ namespace llvm {
/// find_first_not_of - Find the first character in the string that is not
/// in the string \arg Chars, or npos if not found.
///
- /// Note: O(size() * Chars.size())
+ /// Note: O(size() + Chars.size())
size_type find_first_not_of(StringRef Chars, size_t From = 0) const;
/// @}
diff --git a/libclamav/c++/llvm/include/llvm/ADT/StringSet.h b/libclamav/c++/llvm/include/llvm/ADT/StringSet.h
index 0004836..9c55f6b 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/StringSet.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/StringSet.h
@@ -15,7 +15,6 @@
#define LLVM_ADT_STRINGSET_H
#include "llvm/ADT/StringMap.h"
-#include <cassert>
namespace llvm {
@@ -26,10 +25,10 @@ namespace llvm {
class StringSet : public llvm::StringMap<char, AllocatorTy> {
typedef llvm::StringMap<char, AllocatorTy> base;
public:
- bool insert(const std::string& InLang) {
+ bool insert(StringRef InLang) {
assert(!InLang.empty());
- const char* KeyStart = &InLang[0];
- const char* KeyEnd = KeyStart + InLang.size();
+ const char *KeyStart = InLang.data();
+ const char *KeyEnd = KeyStart + InLang.size();
return base::insert(llvm::StringMapEntry<char>::
Create(KeyStart, KeyEnd, base::getAllocator(), '+'));
}
diff --git a/libclamav/c++/llvm/include/llvm/ADT/StringSwitch.h b/libclamav/c++/llvm/include/llvm/ADT/StringSwitch.h
index 7dd5647..7480583 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/StringSwitch.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/StringSwitch.h
@@ -61,6 +61,26 @@ public:
return *this;
}
+ template<unsigned N>
+ StringSwitch& EndsWith(const char (&S)[N], const T &Value) {
+ if (!Result && Str.size() >= N-1 &&
+ std::memcmp(S, Str.data() + Str.size() + 1 - N, N-1) == 0) {
+ Result = &Value;
+ }
+
+ return *this;
+ }
+
+ template<unsigned N>
+ StringSwitch& StartsWith(const char (&S)[N], const T &Value) {
+ if (!Result && Str.size() >= N-1 &&
+ std::memcmp(S, Str.data(), N-1) == 0) {
+ Result = &Value;
+ }
+
+ return *this;
+ }
+
template<unsigned N0, unsigned N1>
StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
const T& Value) {
diff --git a/libclamav/c++/llvm/include/llvm/ADT/Triple.h b/libclamav/c++/llvm/include/llvm/ADT/Triple.h
index be31ea0..8dca3c1 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/Triple.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/Triple.h
@@ -24,7 +24,7 @@ class Twine;
/// Triple - Helper class for working with target triples.
///
-/// Target triples are strings in the format of:
+/// Target triples are strings in the canonical form:
/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM
/// or
/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT
@@ -35,20 +35,11 @@ class Twine;
/// from the components of the target triple to well known IDs.
///
/// At its core the Triple class is designed to be a wrapper for a triple
-/// string; it does not normally change or normalize the triple string, instead
-/// it provides additional APIs to parse normalized parts out of the triple.
+/// string; the constructor does not change or normalize the triple string.
+/// Clients that need to handle the non-canonical triples that users often
+/// specify should use the normalize method.
///
-/// One curiosity this implies is that for some odd triples the results of,
-/// e.g., getOSName() can be very different from the result of getOS(). For
-/// example, for 'i386-mingw32', getOS() will return MinGW32, but since
-/// getOSName() is purely based on the string structure that will return the
-/// empty string.
-///
-/// Clients should generally avoid using getOSName() and related APIs unless
-/// they are familiar with the triple format (this is particularly true when
-/// rewriting a triple).
-///
-/// See autoconf/config.guess for a glimpse into what they look like in
+/// See autoconf/config.guess for a glimpse into what triples look like in
/// practice.
class Triple {
public:
@@ -100,7 +91,8 @@ public:
Psp,
Solaris,
Win32,
- Haiku
+ Haiku,
+ Minix
};
private:
@@ -116,6 +108,9 @@ private:
mutable OSType OS;
bool isInitialized() const { return Arch != InvalidArch; }
+ static ArchType ParseArch(StringRef ArchName);
+ static VendorType ParseVendor(StringRef VendorName);
+ static OSType ParseOS(StringRef OSName);
void Parse() const;
public:
@@ -133,6 +128,16 @@ public:
}
/// @}
+ /// @name Normalization
+ /// @{
+
+ /// normalize - Turn an arbitrary machine specification into the canonical
+ /// triple form (or something sensible that the Triple class understands if
+ /// nothing better can reasonably be done). In particular, it handles the
+ /// common case in which otherwise valid components are in the wrong order.
+ static std::string normalize(StringRef Str);
+
+ /// @}
/// @name Typed Component Access
/// @{
@@ -242,8 +247,8 @@ public:
/// environment components with a single string.
void setOSAndEnvironmentName(StringRef Str);
- /// getArchNameForAssembler - Get an architecture name that is understood by the
- /// target assembler.
+ /// getArchNameForAssembler - Get an architecture name that is understood by
+ /// the target assembler.
const char *getArchNameForAssembler();
/// @}
diff --git a/libclamav/c++/llvm/include/llvm/ADT/Twine.h b/libclamav/c++/llvm/include/llvm/ADT/Twine.h
index 97e9df4..b519a3e 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/Twine.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/Twine.h
@@ -99,11 +99,12 @@ namespace llvm {
/// A pointer to a StringRef instance.
StringRefKind,
- /// A pointer to an unsigned int value, to render as an unsigned decimal
- /// integer.
+ /// An unsigned int value reinterpreted as a pointer, to render as an
+ /// unsigned decimal integer.
DecUIKind,
- /// A pointer to an int value, to render as a signed decimal integer.
+ /// An int value reinterpreted as a pointer, to render as a signed
+ /// decimal integer.
DecIKind,
/// A pointer to an unsigned long value, to render as an unsigned decimal
@@ -259,13 +260,13 @@ namespace llvm {
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
- explicit Twine(const unsigned int &Val)
- : LHS(&Val), LHSKind(DecUIKind), RHSKind(EmptyKind) {
+ explicit Twine(unsigned Val)
+ : LHS((void*)(intptr_t)Val), LHSKind(DecUIKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as a signed decimal integer.
- explicit Twine(const int &Val)
- : LHS(&Val), LHSKind(DecIKind), RHSKind(EmptyKind) {
+ explicit Twine(int Val)
+ : LHS((void*)(intptr_t)Val), LHSKind(DecIKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
diff --git a/libclamav/c++/llvm/include/llvm/ADT/ValueMap.h b/libclamav/c++/llvm/include/llvm/ADT/ValueMap.h
index 6f57fe8..ded17fc 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/ValueMap.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/ValueMap.h
@@ -59,16 +59,16 @@ struct ValueMapConfig {
struct ExtraData {};
template<typename ExtraDataT>
- static void onRAUW(const ExtraDataT &Data, KeyT Old, KeyT New) {}
+ static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
template<typename ExtraDataT>
- static void onDelete(const ExtraDataT &Data, KeyT Old) {}
+ static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}
/// Returns a mutex that should be acquired around any changes to the map.
/// This is only acquired from the CallbackVH (and held around calls to onRAUW
/// and onDelete) and not inside other ValueMap methods. NULL means that no
/// mutex is necessary.
template<typename ExtraDataT>
- static sys::Mutex *getMutex(const ExtraDataT &Data) { return NULL; }
+ static sys::Mutex *getMutex(const ExtraDataT &/*Data*/) { return NULL; }
};
/// See the file comment.
@@ -82,13 +82,13 @@ class ValueMap {
typedef typename Config::ExtraData ExtraData;
MapT Map;
ExtraData Data;
+ ValueMap(const ValueMap&); // DO NOT IMPLEMENT
+ ValueMap& operator=(const ValueMap&); // DO NOT IMPLEMENT
public:
typedef KeyT key_type;
typedef ValueT mapped_type;
typedef std::pair<KeyT, ValueT> value_type;
- ValueMap(const ValueMap& Other) : Map(Other.Map), Data(Other.Data) {}
-
explicit ValueMap(unsigned NumInitBuckets = 64)
: Map(NumInitBuckets), Data() {}
explicit ValueMap(const ExtraData &Data, unsigned NumInitBuckets = 64)
@@ -149,7 +149,7 @@ public:
bool erase(const KeyT &Val) {
return Map.erase(Wrap(Val));
}
- bool erase(iterator I) {
+ void erase(iterator I) {
return Map.erase(I.base());
}
@@ -161,12 +161,6 @@ public:
return Map[Wrap(Key)];
}
- ValueMap& operator=(const ValueMap& Other) {
- Map = Other.Map;
- Data = Other.Data;
- return *this;
- }
-
/// isPointerIntoBucketsArray - Return true if the specified pointer points
/// somewhere into the ValueMap's array of buckets (i.e. either to a key or
/// value in the ValueMap).
@@ -250,12 +244,6 @@ public:
}
};
-
-template<typename KeyT, typename ValueT, typename Config, typename ValueInfoT>
-struct isPodLike<ValueMapCallbackVH<KeyT, ValueT, Config, ValueInfoT> > {
- static const bool value = true;
-};
-
template<typename KeyT, typename ValueT, typename Config, typename ValueInfoT>
struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config, ValueInfoT> > {
typedef ValueMapCallbackVH<KeyT, ValueT, Config, ValueInfoT> VH;
diff --git a/libclamav/c++/llvm/include/llvm/ADT/ilist.h b/libclamav/c++/llvm/include/llvm/ADT/ilist.h
index e4d26dd..4e3afe1 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/ilist.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/ilist.h
@@ -39,6 +39,7 @@
#define LLVM_ADT_ILIST_H
#include <cassert>
+#include <cstddef>
#include <iterator>
namespace llvm {
@@ -613,7 +614,6 @@ public:
template<class Pr3> void sort(Pr3 pred);
void sort() { sort(op_less); }
- void reverse();
};
diff --git a/libclamav/c++/llvm/include/llvm/ADT/ilist_node.h b/libclamav/c++/llvm/include/llvm/ADT/ilist_node.h
index da25f95..f008003 100644
--- a/libclamav/c++/llvm/include/llvm/ADT/ilist_node.h
+++ b/libclamav/c++/llvm/include/llvm/ADT/ilist_node.h
@@ -49,6 +49,56 @@ class ilist_node : private ilist_half_node<NodeTy> {
void setNext(NodeTy *N) { Next = N; }
protected:
ilist_node() : Next(0) {}
+
+public:
+ /// @name Adjacent Node Accessors
+ /// @{
+
+ /// \brief Get the previous node, or 0 for the list head.
+ NodeTy *getPrevNode() {
+ NodeTy *Prev = this->getPrev();
+
+ // Check for sentinel.
+ if (!Prev->getNext())
+ return 0;
+
+ return Prev;
+ }
+
+ /// \brief Get the previous node, or 0 for the list head.
+ const NodeTy *getPrevNode() const {
+ const NodeTy *Prev = this->getPrev();
+
+ // Check for sentinel.
+ if (!Prev->getNext())
+ return 0;
+
+ return Prev;
+ }
+
+ /// \brief Get the next node, or 0 for the list tail.
+ NodeTy *getNextNode() {
+ NodeTy *Next = getNext();
+
+ // Check for sentinel.
+ if (!Next->getNext())
+ return 0;
+
+ return Next;
+ }
+
+ /// \brief Get the next node, or 0 for the list tail.
+ const NodeTy *getNextNode() const {
+ const NodeTy *Next = getNext();
+
+ // Check for sentinel.
+ if (!Next->getNext())
+ return 0;
+
+ return Next;
+ }
+
+ /// @}
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/AbstractTypeUser.h b/libclamav/c++/llvm/include/llvm/AbstractTypeUser.h
index b6cceb4..81f5c5c 100644
--- a/libclamav/c++/llvm/include/llvm/AbstractTypeUser.h
+++ b/libclamav/c++/llvm/include/llvm/AbstractTypeUser.h
@@ -146,6 +146,7 @@ class PATypeHolder {
mutable const Type *Ty;
void destroy();
public:
+ PATypeHolder() : Ty(0) {}
PATypeHolder(const Type *ty) : Ty(ty) {
addRef();
}
@@ -153,7 +154,7 @@ public:
addRef();
}
- ~PATypeHolder() { if (Ty) dropRef(); }
+ ~PATypeHolder() { dropRef(); }
operator Type *() const { return get(); }
Type *get() const;
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/AliasAnalysis.h b/libclamav/c++/llvm/include/llvm/Analysis/AliasAnalysis.h
index 9f41135..ad68d48 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -18,12 +18,9 @@
//
// This API represents memory as a (Pointer, Size) pair. The Pointer component
// specifies the base memory address of the region, the Size specifies how large
-// of an area is being queried. If Size is 0, two pointers only alias if they
-// are exactly equal. If size is greater than zero, but small, the two pointers
-// alias if the areas pointed to overlap. If the size is very large (ie, ~0U),
-// then the two pointers alias if they may be pointing to components of the same
-// memory object. Pointers that point to two completely different objects in
-// memory never alias, regardless of the value of the Size component.
+// of an area is being queried, or UnknownSize if the size is not known.
+// Pointers that point to two completely different objects in memory never
+// alias, regardless of the value of the Size component.
//
//===----------------------------------------------------------------------===//
@@ -46,8 +43,11 @@ class AnalysisUsage;
class AliasAnalysis {
protected:
const TargetData *TD;
+
+private:
AliasAnalysis *AA; // Previous Alias Analysis to chain to.
+protected:
/// InitializeAliasAnalysis - Subclasses must call this method to initialize
/// the AliasAnalysis interface before any other methods are called. This is
/// typically called by the run* methods of these subclasses. This may be
@@ -64,6 +64,11 @@ public:
AliasAnalysis() : TD(0), AA(0) {}
virtual ~AliasAnalysis(); // We want to be subclassed
+ /// UnknownSize - This is a special value which can be used with the
+ /// size arguments in alias queries to indicate that the caller does not
+ /// know the sizes of the potential memory references.
+ static unsigned const UnknownSize = ~0u;
+
/// getTargetData - Return a pointer to the current TargetData object, or
/// null if no TargetData object is available.
///
@@ -84,6 +89,9 @@ public:
/// if (AA.alias(P1, P2)) { ... }
/// to check to see if two pointers might alias.
///
+ /// See docs/AliasAnalysis.html for more information on the specific meanings
+ /// of these values.
+ ///
enum AliasResult { NoAlias = 0, MayAlias = 1, MustAlias = 2 };
/// alias - The main low level interface to the alias analysis implementation.
@@ -94,6 +102,11 @@ public:
virtual AliasResult alias(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size);
+ /// alias - A convenience wrapper for the case where the sizes are unknown.
+ AliasResult alias(const Value *V1, const Value *V2) {
+ return alias(V1, UnknownSize, V2, UnknownSize);
+ }
+
/// isNoAlias - A trivial helper function to check to see if the specified
/// pointers are no-alias.
bool isNoAlias(const Value *V1, unsigned V1Size,
@@ -130,17 +143,11 @@ public:
// AccessesArguments - This function accesses function arguments in well
// known (possibly volatile) ways, but does not access any other memory.
- //
- // Clients may use the Info parameter of getModRefBehavior to get specific
- // information about how pointer arguments are used.
AccessesArguments,
// AccessesArgumentsAndGlobals - This function has accesses function
// arguments and global variables well known (possibly volatile) ways, but
// does not access any other memory.
- //
- // Clients may use the Info parameter of getModRefBehavior to get specific
- // information about how pointer arguments are used.
AccessesArgumentsAndGlobals,
// OnlyReadsMemory - This function does not perform any non-local stores or
@@ -154,52 +161,17 @@ public:
UnknownModRefBehavior
};
- /// PointerAccessInfo - This struct is used to return results for pointers,
- /// globals, and the return value of a function.
- struct PointerAccessInfo {
- /// V - The value this record corresponds to. This may be an Argument for
- /// the function, a GlobalVariable, or null, corresponding to the return
- /// value for the function.
- Value *V;
-
- /// ModRefInfo - Whether the pointer is loaded or stored to/from.
- ///
- ModRefResult ModRefInfo;
-
- /// AccessType - Specific fine-grained access information for the argument.
- /// If none of these classifications is general enough, the
- /// getModRefBehavior method should not return AccessesArguments*. If a
- /// record is not returned for a particular argument, the argument is never
- /// dead and never dereferenced.
- enum AccessType {
- /// ScalarAccess - The pointer is dereferenced.
- ///
- ScalarAccess,
-
- /// ArrayAccess - The pointer is indexed through as an array of elements.
- ///
- ArrayAccess,
-
- /// ElementAccess ?? P->F only?
-
- /// CallsThrough - Indirect calls are made through the specified function
- /// pointer.
- CallsThrough
- };
- };
-
/// getModRefBehavior - Return the behavior when calling the given call site.
- virtual ModRefBehavior getModRefBehavior(CallSite CS,
- std::vector<PointerAccessInfo> *Info = 0);
+ virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
/// getModRefBehavior - Return the behavior when calling the given function.
/// For use when the call site is not known.
- virtual ModRefBehavior getModRefBehavior(Function *F,
- std::vector<PointerAccessInfo> *Info = 0);
+ virtual ModRefBehavior getModRefBehavior(const Function *F);
- /// getModRefBehavior - Return the modref behavior of the intrinsic with the
- /// given id.
- static ModRefBehavior getModRefBehavior(unsigned iid);
+ /// getIntrinsicModRefBehavior - Return the modref behavior of the intrinsic
+ /// with the given id. Most clients won't need this, because the regular
+ /// getModRefBehavior incorporates this information.
+ static ModRefBehavior getIntrinsicModRefBehavior(unsigned iid);
/// doesNotAccessMemory - If the specified call is known to never read or
/// write memory, return true. If the call only reads from known-constant
@@ -212,14 +184,14 @@ public:
///
/// This property corresponds to the GCC 'const' attribute.
///
- bool doesNotAccessMemory(CallSite CS) {
+ bool doesNotAccessMemory(ImmutableCallSite CS) {
return getModRefBehavior(CS) == DoesNotAccessMemory;
}
/// doesNotAccessMemory - If the specified function is known to never read or
/// write memory, return true. For use when the call site is not known.
///
- bool doesNotAccessMemory(Function *F) {
+ bool doesNotAccessMemory(const Function *F) {
return getModRefBehavior(F) == DoesNotAccessMemory;
}
@@ -232,7 +204,7 @@ public:
///
/// This property corresponds to the GCC 'pure' attribute.
///
- bool onlyReadsMemory(CallSite CS) {
+ bool onlyReadsMemory(ImmutableCallSite CS) {
ModRefBehavior MRB = getModRefBehavior(CS);
return MRB == DoesNotAccessMemory || MRB == OnlyReadsMemory;
}
@@ -241,7 +213,7 @@ public:
/// non-volatile memory (or not access memory at all), return true. For use
/// when the call site is not known.
///
- bool onlyReadsMemory(Function *F) {
+ bool onlyReadsMemory(const Function *F) {
ModRefBehavior MRB = getModRefBehavior(F);
return MRB == DoesNotAccessMemory || MRB == OnlyReadsMemory;
}
@@ -255,36 +227,36 @@ public:
/// a particular call site modifies or reads the memory specified by the
/// pointer.
///
- virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size);
/// getModRefInfo - Return information about whether two call sites may refer
- /// to the same set of memory locations. This function returns NoModRef if
- /// the two calls refer to disjoint memory locations, Ref if CS1 reads memory
- /// written by CS2, Mod if CS1 writes to memory read or written by CS2, or
- /// ModRef if CS1 might read or write memory accessed by CS2.
- ///
- virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2);
+ /// to the same set of memory locations. See
+ /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
+ /// for details.
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2);
public:
/// Convenience functions...
- ModRefResult getModRefInfo(LoadInst *L, Value *P, unsigned Size);
- ModRefResult getModRefInfo(StoreInst *S, Value *P, unsigned Size);
- ModRefResult getModRefInfo(CallInst *C, Value *P, unsigned Size) {
- return getModRefInfo(CallSite(C), P, Size);
- }
- ModRefResult getModRefInfo(InvokeInst *I, Value *P, unsigned Size) {
- return getModRefInfo(CallSite(I), P, Size);
+ ModRefResult getModRefInfo(const LoadInst *L, const Value *P, unsigned Size);
+ ModRefResult getModRefInfo(const StoreInst *S, const Value *P, unsigned Size);
+ ModRefResult getModRefInfo(const VAArgInst* I, const Value* P, unsigned Size);
+ ModRefResult getModRefInfo(const CallInst *C, const Value *P, unsigned Size) {
+ return getModRefInfo(ImmutableCallSite(C), P, Size);
}
- ModRefResult getModRefInfo(VAArgInst* I, Value* P, unsigned Size) {
- return AliasAnalysis::ModRef;
+ ModRefResult getModRefInfo(const InvokeInst *I,
+ const Value *P, unsigned Size) {
+ return getModRefInfo(ImmutableCallSite(I), P, Size);
}
- ModRefResult getModRefInfo(Instruction *I, Value *P, unsigned Size) {
+ ModRefResult getModRefInfo(const Instruction *I,
+ const Value *P, unsigned Size) {
switch (I->getOpcode()) {
- case Instruction::VAArg: return getModRefInfo((VAArgInst*)I, P, Size);
- case Instruction::Load: return getModRefInfo((LoadInst*)I, P, Size);
- case Instruction::Store: return getModRefInfo((StoreInst*)I, P, Size);
- case Instruction::Call: return getModRefInfo((CallInst*)I, P, Size);
- case Instruction::Invoke: return getModRefInfo((InvokeInst*)I, P, Size);
+ case Instruction::VAArg: return getModRefInfo((const VAArgInst*)I, P,Size);
+ case Instruction::Load: return getModRefInfo((const LoadInst*)I, P, Size);
+ case Instruction::Store: return getModRefInfo((const StoreInst*)I, P,Size);
+ case Instruction::Call: return getModRefInfo((const CallInst*)I, P, Size);
+ case Instruction::Invoke: return getModRefInfo((const InvokeInst*)I,P,Size);
default: return NoModRef;
}
}
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/AliasSetTracker.h b/libclamav/c++/llvm/include/llvm/Analysis/AliasSetTracker.h
index 09f12ad..8e2f7fd 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/AliasSetTracker.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/AliasSetTracker.h
@@ -92,7 +92,8 @@ class AliasSet : public ilist_node<AliasSet> {
AliasSet *Forward; // Forwarding pointer.
AliasSet *Next, *Prev; // Doubly linked list of AliasSets.
- std::vector<CallSite> CallSites; // All calls & invokes in this alias set.
+ // All calls & invokes in this alias set.
+ std::vector<AssertingVH<Instruction> > CallSites;
// RefCount - Number of nodes pointing to this AliasSet plus the number of
// AliasSets forwarding to it.
@@ -127,6 +128,11 @@ class AliasSet : public ilist_node<AliasSet> {
removeFromTracker(AST);
}
+ CallSite getCallSite(unsigned i) const {
+ assert(i < CallSites.size());
+ return CallSite(CallSites[i]);
+ }
+
public:
/// Accessors...
bool isRef() const { return AccessTy & Refs; }
@@ -229,7 +235,7 @@ private:
void addCallSite(CallSite CS, AliasAnalysis &AA);
void removeCallSite(CallSite CS) {
for (size_t i = 0, e = CallSites.size(); i != e; ++i)
- if (CallSites[i].getInstruction() == CS.getInstruction()) {
+ if (CallSites[i] == CS.getInstruction()) {
CallSites[i] = CallSites.back();
CallSites.pop_back();
}
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/CFGPrinter.h b/libclamav/c++/llvm/include/llvm/Analysis/CFGPrinter.h
index 6ad2e5a..ac8f596 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/CFGPrinter.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/CFGPrinter.h
@@ -1,4 +1,4 @@
-//===-- CFGPrinter.h - CFG printer external interface ------------*- C++ -*-===//
+//===-- CFGPrinter.h - CFG printer external interface -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -43,8 +43,8 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
return OS.str();
}
- static std::string getCompleteNodeLabel(const BasicBlock *Node,
- const Function *Graph) {
+ static std::string getCompleteNodeLabel(const BasicBlock *Node,
+ const Function *Graph) {
std::string Str;
raw_string_ostream OS(Str);
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/CallGraph.h b/libclamav/c++/llvm/include/llvm/Analysis/CallGraph.h
index 287fe4f..a4884ed 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/CallGraph.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/CallGraph.h
@@ -187,6 +187,9 @@ public:
// CallGraphNode ctor - Create a node for the specified function.
inline CallGraphNode(Function *f) : F(f), NumReferences(0) {}
+ ~CallGraphNode() {
+ assert(NumReferences == 0 && "Node deleted while references remain");
+ }
//===---------------------------------------------------------------------
// Accessor methods.
@@ -277,6 +280,11 @@ public:
/// time, so it should be used sparingly.
void replaceCallEdge(CallSite CS, CallSite NewCS, CallGraphNode *NewNode);
+ /// allReferencesDropped - This is a special function that should only be
+ /// used by the CallGraph class.
+ void allReferencesDropped() {
+ NumReferences = 0;
+ }
};
//===----------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/CaptureTracking.h b/libclamav/c++/llvm/include/llvm/Analysis/CaptureTracking.h
index 493ecf5..b3390f4 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/CaptureTracking.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/CaptureTracking.h
@@ -21,9 +21,9 @@ namespace llvm {
/// by the enclosing function (which is required to exist). This routine can
/// be expensive, so consider caching the results. The boolean ReturnCaptures
/// specifies whether returning the value (or part of it) from the function
- /// counts as capturing it or not. The boolean StoreCaptures specified whether
- /// storing the value (or part of it) into memory anywhere automatically
- /// counts as capturing it or not.
+ /// counts as capturing it or not. The boolean StoreCaptures specified
+ /// whether storing the value (or part of it) into memory anywhere
+ /// automatically counts as capturing it or not.
bool PointerMayBeCaptured(const Value *V,
bool ReturnCaptures,
bool StoreCaptures);
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/CodeMetrics.h b/libclamav/c++/llvm/include/llvm/Analysis/CodeMetrics.h
new file mode 100644
index 0000000..58096f1
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Analysis/CodeMetrics.h
@@ -0,0 +1,72 @@
+//===- CodeMetrics.h - Measures the weight of a function---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements various weight measurements for a function, helping
+// the Inliner and PartialSpecialization decide whether to duplicate its
+// contents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CODEMETRICS_H
+#define LLVM_ANALYSIS_CODEMETRICS_H
+
+namespace llvm {
+ // CodeMetrics - Calculate size and a few similar metrics for a set of
+ // basic blocks.
+ struct CodeMetrics {
+ /// NeverInline - True if this callee should never be inlined into a
+ /// caller.
+ // bool NeverInline;
+
+ // True if this function contains a call to setjmp or _setjmp
+ bool callsSetJmp;
+
+ // True if this function calls itself
+ bool isRecursive;
+
+ // True if this function contains one or more indirect branches
+ bool containsIndirectBr;
+
+ /// usesDynamicAlloca - True if this function calls alloca (in the C sense).
+ bool usesDynamicAlloca;
+
+ /// NumInsts, NumBlocks - Keep track of how large each function is, which
+ /// is used to estimate the code size cost of inlining it.
+ unsigned NumInsts, NumBlocks;
+
+ /// NumBBInsts - Keeps track of basic block code size estimates.
+ DenseMap<const BasicBlock *, unsigned> NumBBInsts;
+
+ /// NumCalls - Keep track of the number of calls to 'big' functions.
+ unsigned NumCalls;
+
+ /// NumVectorInsts - Keep track of how many instructions produce vector
+ /// values. The inliner is being more aggressive with inlining vector
+ /// kernels.
+ unsigned NumVectorInsts;
+
+ /// NumRets - Keep track of how many Ret instructions the block contains.
+ unsigned NumRets;
+
+ CodeMetrics() : callsSetJmp(false), isRecursive(false),
+ containsIndirectBr(false), usesDynamicAlloca(false),
+ NumInsts(0), NumBlocks(0), NumCalls(0), NumVectorInsts(0),
+ NumRets(0) {}
+
+ /// analyzeBasicBlock - Add information about the specified basic block
+ /// to the current structure.
+ void analyzeBasicBlock(const BasicBlock *BB);
+
+ /// analyzeFunction - Add information about the specified function
+ /// to the current structure.
+ void analyzeFunction(Function *F);
+ };
+}
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h b/libclamav/c++/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
index 4828eba..d8daf51 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
@@ -22,7 +22,7 @@ template <class Analysis, bool Simple>
struct DOTGraphTraitsViewer : public FunctionPass {
std::string Name;
- DOTGraphTraitsViewer(std::string GraphName, const void *ID) : FunctionPass(ID) {
+ DOTGraphTraitsViewer(std::string GraphName, char &ID) : FunctionPass(ID) {
Name = GraphName;
}
@@ -48,7 +48,7 @@ struct DOTGraphTraitsPrinter : public FunctionPass {
std::string Name;
- DOTGraphTraitsPrinter(std::string GraphName, const void *ID)
+ DOTGraphTraitsPrinter(std::string GraphName, char &ID)
: FunctionPass(ID) {
Name = GraphName;
}
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/DebugInfo.h b/libclamav/c++/llvm/include/llvm/Analysis/DebugInfo.h
index f105c20..2d1418d 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/DebugInfo.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/DebugInfo.h
@@ -31,23 +31,23 @@ namespace llvm {
class Type;
class Value;
class DbgDeclareInst;
- class DebugLoc;
- struct DebugLocTracker;
class Instruction;
class MDNode;
class LLVMContext;
+ class raw_ostream;
+
+ class DIFile;
+ class DISubprogram;
+ class DILexicalBlock;
+ class DIVariable;
+ class DIType;
/// DIDescriptor - A thin wraper around MDNode to access encoded debug info.
/// This should not be stored in a container, because underly MDNode may
/// change in certain situations.
class DIDescriptor {
protected:
- MDNode *DbgNode;
-
- /// DIDescriptor constructor. If the specified node is non-null, check
- /// to make sure that the tag in the descriptor matches 'RequiredTag'. If
- /// not, the debug info is corrupt and we ignore it.
- DIDescriptor(MDNode *N, unsigned RequiredTag);
+ const MDNode *DbgNode;
StringRef getStringField(unsigned Elt) const;
unsigned getUnsignedField(unsigned Elt) const {
@@ -58,18 +58,26 @@ namespace llvm {
template <typename DescTy>
DescTy getFieldAs(unsigned Elt) const {
- return DescTy(getDescriptorField(Elt).getNode());
+ return DescTy(getDescriptorField(Elt));
}
GlobalVariable *getGlobalVariableField(unsigned Elt) const;
+ Constant *getConstantField(unsigned Elt) const;
+ Function *getFunctionField(unsigned Elt) const;
public:
explicit DIDescriptor() : DbgNode(0) {}
- explicit DIDescriptor(MDNode *N) : DbgNode(N) {}
+ explicit DIDescriptor(const MDNode *N) : DbgNode(N) {}
+ explicit DIDescriptor(const DIFile F);
+ explicit DIDescriptor(const DISubprogram F);
+ explicit DIDescriptor(const DILexicalBlock F);
+ explicit DIDescriptor(const DIVariable F);
+ explicit DIDescriptor(const DIType F);
- bool isNull() const { return DbgNode == 0; }
+ bool Verify() const { return DbgNode != 0; }
- MDNode *getNode() const { return DbgNode; }
+ operator MDNode *() const { return const_cast<MDNode*>(DbgNode); }
+ MDNode *operator ->() const { return const_cast<MDNode*>(DbgNode); }
unsigned getVersion() const {
return getUnsignedField(0) & LLVMDebugVersionMask;
@@ -79,10 +87,10 @@ namespace llvm {
return getUnsignedField(0) & ~LLVMDebugVersionMask;
}
- /// ValidDebugInfo - Return true if N represents valid debug info value.
- static bool ValidDebugInfo(MDNode *N, unsigned OptLevel);
+ /// print - print descriptor.
+ void print(raw_ostream &OS) const;
- /// dump - print descriptor.
+ /// dump - print descriptor to dbgs() with a newline.
void dump() const;
bool isDerivedType() const;
@@ -92,6 +100,7 @@ namespace llvm {
bool isSubprogram() const;
bool isGlobalVariable() const;
bool isScope() const;
+ bool isFile() const;
bool isCompileUnit() const;
bool isNameSpace() const;
bool isLexicalBlock() const;
@@ -104,8 +113,7 @@ namespace llvm {
/// DISubrange - This is used to represent ranges, for array bounds.
class DISubrange : public DIDescriptor {
public:
- explicit DISubrange(MDNode *N = 0)
- : DIDescriptor(N, dwarf::DW_TAG_subrange_type) {}
+ explicit DISubrange(const MDNode *N = 0) : DIDescriptor(N) {}
int64_t getLo() const { return (int64_t)getUInt64Field(1); }
int64_t getHi() const { return (int64_t)getUInt64Field(2); }
@@ -114,7 +122,7 @@ namespace llvm {
/// DIArray - This descriptor holds an array of descriptors.
class DIArray : public DIDescriptor {
public:
- explicit DIArray(MDNode *N = 0)
+ explicit DIArray(const MDNode *N = 0)
: DIDescriptor(N) {}
unsigned getNumElements() const;
@@ -126,10 +134,7 @@ namespace llvm {
/// DIScope - A base class for various scopes.
class DIScope : public DIDescriptor {
public:
- explicit DIScope(MDNode *N = 0) : DIDescriptor (N) {
- if (DbgNode && !isScope())
- DbgNode = 0;
- }
+ explicit DIScope(const MDNode *N = 0) : DIDescriptor (N) {}
virtual ~DIScope() {}
StringRef getFilename() const;
@@ -139,12 +144,9 @@ namespace llvm {
/// DICompileUnit - A wrapper for a compile unit.
class DICompileUnit : public DIScope {
public:
- explicit DICompileUnit(MDNode *N = 0) : DIScope(N) {
- if (DbgNode && !isCompileUnit())
- DbgNode = 0;
- }
+ explicit DICompileUnit(const MDNode *N = 0) : DIScope(N) {}
- unsigned getLanguage() const { return getUnsignedField(2); }
+ unsigned getLanguage() const { return getUnsignedField(2); }
StringRef getFilename() const { return getStringField(3); }
StringRef getDirectory() const { return getStringField(4); }
StringRef getProducer() const { return getStringField(5); }
@@ -166,17 +168,31 @@ namespace llvm {
/// Verify - Verify that a compile unit is well formed.
bool Verify() const;
- /// dump - print compile unit.
+ /// print - print compile unit.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print compile unit to dbgs() with a newline.
void dump() const;
};
+ /// DIFile - This is a wrapper for a file.
+ class DIFile : public DIScope {
+ public:
+ explicit DIFile(const MDNode *N = 0) : DIScope(N) {
+ if (DbgNode && !isFile())
+ DbgNode = 0;
+ }
+ StringRef getFilename() const { return getStringField(1); }
+ StringRef getDirectory() const { return getStringField(2); }
+ DICompileUnit getCompileUnit() const{ return getFieldAs<DICompileUnit>(3); }
+ };
+
/// DIEnumerator - A wrapper for an enumerator (e.g. X and Y in 'enum {X,Y}').
/// FIXME: it seems strange that this doesn't have either a reference to the
/// type/precision or a file/line pair for location info.
class DIEnumerator : public DIDescriptor {
public:
- explicit DIEnumerator(MDNode *N = 0)
- : DIDescriptor(N, dwarf::DW_TAG_enumerator) {}
+ explicit DIEnumerator(const MDNode *N = 0) : DIDescriptor(N) {}
StringRef getName() const { return getStringField(1); }
uint64_t getEnumValue() const { return getUInt64Field(2); }
@@ -185,7 +201,7 @@ namespace llvm {
/// DIType - This is a wrapper for a type.
/// FIXME: Types should be factored much better so that CV qualifiers and
/// others do not require a huge and empty descriptor full of zeros.
- class DIType : public DIDescriptor {
+ class DIType : public DIScope {
public:
enum {
FlagPrivate = 1 << 0,
@@ -199,24 +215,28 @@ namespace llvm {
};
protected:
- DIType(MDNode *N, unsigned Tag)
- : DIDescriptor(N, Tag) {}
// This ctor is used when the Tag has already been validated by a derived
// ctor.
- DIType(MDNode *N, bool, bool) : DIDescriptor(N) {}
+ DIType(const MDNode *N, bool, bool) : DIScope(N) {}
public:
/// Verify - Verify that a type descriptor is well formed.
bool Verify() const;
public:
- explicit DIType(MDNode *N);
+ explicit DIType(const MDNode *N);
explicit DIType() {}
virtual ~DIType() {}
- DIDescriptor getContext() const { return getDescriptorField(1); }
+ DIScope getContext() const { return getFieldAs<DIScope>(1); }
StringRef getName() const { return getStringField(2); }
- DICompileUnit getCompileUnit() const{ return getFieldAs<DICompileUnit>(3); }
+ DICompileUnit getCompileUnit() const{
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getFieldAs<DICompileUnit>(3);
+
+ DIFile F = getFieldAs<DIFile>(3);
+ return F.getCompileUnit();
+ }
unsigned getLineNumber() const { return getUnsignedField(4); }
uint64_t getSizeInBits() const { return getUInt64Field(5); }
uint64_t getAlignInBits() const { return getUInt64Field(6); }
@@ -246,20 +266,37 @@ namespace llvm {
bool isArtificial() const {
return (getFlags() & FlagArtificial) != 0;
}
+ bool isValid() const {
+ return DbgNode && (isBasicType() || isDerivedType() || isCompositeType());
+ }
+ StringRef getFilename() const { return getCompileUnit().getFilename();}
+ StringRef getDirectory() const { return getCompileUnit().getDirectory();}
+
+ /// replaceAllUsesWith - Replace all uses of debug info referenced by
+ /// this descriptor.
+ void replaceAllUsesWith(DIDescriptor &D);
- /// dump - print type.
+ /// print - print type.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print type to dbgs() with a newline.
void dump() const;
};
/// DIBasicType - A basic type, like 'int' or 'float'.
class DIBasicType : public DIType {
public:
- explicit DIBasicType(MDNode *N = 0)
- : DIType(N, dwarf::DW_TAG_base_type) {}
+ explicit DIBasicType(const MDNode *N = 0) : DIType(N) {}
unsigned getEncoding() const { return getUnsignedField(9); }
- /// dump - print basic type.
+ /// Verify - Verify that a basic type descriptor is well formed.
+ bool Verify() const;
+
+ /// print - print basic type.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print basic type to dbgs() with a newline.
void dump() const;
};
@@ -267,27 +304,26 @@ namespace llvm {
/// a typedef, a pointer or reference, etc.
class DIDerivedType : public DIType {
protected:
- explicit DIDerivedType(MDNode *N, bool, bool)
+ explicit DIDerivedType(const MDNode *N, bool, bool)
: DIType(N, true, true) {}
public:
- explicit DIDerivedType(MDNode *N = 0)
- : DIType(N, true, true) {
- if (DbgNode && !isDerivedType())
- DbgNode = 0;
- }
+ explicit DIDerivedType(const MDNode *N = 0)
+ : DIType(N, true, true) {}
DIType getTypeDerivedFrom() const { return getFieldAs<DIType>(9); }
/// getOriginalTypeSize - If this type is derived from a base type then
/// return base type size.
uint64_t getOriginalTypeSize() const;
- /// dump - print derived type.
- void dump() const;
- /// replaceAllUsesWith - Replace all uses of debug info referenced by
- /// this descriptor. After this completes, the current debug info value
- /// is erased.
- void replaceAllUsesWith(DIDescriptor &D);
+ /// Verify - Verify that a derived type descriptor is well formed.
+ bool Verify() const;
+
+ /// print - print derived type.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print derived type to dbgs() with a newline.
+ void dump() const;
};
/// DICompositeType - This descriptor holds a type that can refer to multiple
@@ -295,7 +331,7 @@ namespace llvm {
/// FIXME: Why is this a DIDerivedType??
class DICompositeType : public DIDerivedType {
public:
- explicit DICompositeType(MDNode *N = 0)
+ explicit DICompositeType(const MDNode *N = 0)
: DIDerivedType(N, true, true) {
if (N && !isCompositeType())
DbgNode = 0;
@@ -310,49 +346,29 @@ namespace llvm {
/// Verify - Verify that a composite type descriptor is well formed.
bool Verify() const;
- /// dump - print composite type.
- void dump() const;
- };
-
- /// DIGlobal - This is a common class for global variables and subprograms.
- class DIGlobal : public DIDescriptor {
- protected:
- explicit DIGlobal(MDNode *N, unsigned RequiredTag)
- : DIDescriptor(N, RequiredTag) {}
-
- public:
- virtual ~DIGlobal() {}
-
- DIDescriptor getContext() const { return getDescriptorField(2); }
- StringRef getName() const { return getStringField(3); }
- StringRef getDisplayName() const { return getStringField(4); }
- StringRef getLinkageName() const { return getStringField(5); }
- DICompileUnit getCompileUnit() const{ return getFieldAs<DICompileUnit>(6); }
- unsigned getLineNumber() const { return getUnsignedField(7); }
- DIType getType() const { return getFieldAs<DIType>(8); }
-
- /// isLocalToUnit - Return true if this subprogram is local to the current
- /// compile unit, like 'static' in C.
- unsigned isLocalToUnit() const { return getUnsignedField(9); }
- unsigned isDefinition() const { return getUnsignedField(10); }
+ /// print - print composite type.
+ void print(raw_ostream &OS) const;
- /// dump - print global.
+ /// dump - print composite type to dbgs() with a newline.
void dump() const;
};
/// DISubprogram - This is a wrapper for a subprogram (e.g. a function).
class DISubprogram : public DIScope {
public:
- explicit DISubprogram(MDNode *N = 0) : DIScope(N) {
- if (DbgNode && !isSubprogram())
- DbgNode = 0;
- }
+ explicit DISubprogram(const MDNode *N = 0) : DIScope(N) {}
- DIDescriptor getContext() const { return getDescriptorField(2); }
+ DIScope getContext() const { return getFieldAs<DIScope>(2); }
StringRef getName() const { return getStringField(3); }
StringRef getDisplayName() const { return getStringField(4); }
StringRef getLinkageName() const { return getStringField(5); }
- DICompileUnit getCompileUnit() const{ return getFieldAs<DICompileUnit>(6); }
+ DICompileUnit getCompileUnit() const{
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getFieldAs<DICompileUnit>(6);
+
+ DIFile F = getFieldAs<DIFile>(6);
+ return F.getCompileUnit();
+ }
unsigned getLineNumber() const { return getUnsignedField(7); }
DICompositeType getType() const { return getFieldAs<DICompositeType>(8); }
@@ -360,9 +376,9 @@ namespace llvm {
/// DIType or as DICompositeType.
StringRef getReturnTypeName() const {
DICompositeType DCT(getFieldAs<DICompositeType>(8));
- if (!DCT.isNull()) {
+ if (DCT.Verify()) {
DIArray A = DCT.getTypeArray();
- DIType T(A.getElement(0).getNode());
+ DIType T(A.getElement(0));
return T.getName();
}
DIType T(getFieldAs<DIType>(8));
@@ -381,33 +397,72 @@ namespace llvm {
return getFieldAs<DICompositeType>(13);
}
unsigned isArtificial() const { return getUnsignedField(14); }
+ unsigned isOptimized() const;
- StringRef getFilename() const { return getCompileUnit().getFilename();}
- StringRef getDirectory() const { return getCompileUnit().getDirectory();}
+ StringRef getFilename() const {
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getCompileUnit().getFilename();
+
+ DIFile F = getFieldAs<DIFile>(6);
+ return F.getFilename();
+ }
+
+ StringRef getDirectory() const {
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getCompileUnit().getFilename();
+
+ DIFile F = getFieldAs<DIFile>(6);
+ return F.getDirectory();
+ }
/// Verify - Verify that a subprogram descriptor is well formed.
bool Verify() const;
- /// dump - print subprogram.
+ /// print - print subprogram.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print subprogram to dbgs() with a newline.
void dump() const;
/// describes - Return true if this subprogram provides debugging
/// information for the function F.
bool describes(const Function *F);
+
+ Function *getFunction() const { return getFunctionField(16); }
};
/// DIGlobalVariable - This is a wrapper for a global variable.
- class DIGlobalVariable : public DIGlobal {
+ class DIGlobalVariable : public DIDescriptor {
public:
- explicit DIGlobalVariable(MDNode *N = 0)
- : DIGlobal(N, dwarf::DW_TAG_variable) {}
+ explicit DIGlobalVariable(const MDNode *N = 0) : DIDescriptor(N) {}
+
+ DIScope getContext() const { return getFieldAs<DIScope>(2); }
+ StringRef getName() const { return getStringField(3); }
+ StringRef getDisplayName() const { return getStringField(4); }
+ StringRef getLinkageName() const { return getStringField(5); }
+ DICompileUnit getCompileUnit() const{
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getFieldAs<DICompileUnit>(6);
+
+ DIFile F = getFieldAs<DIFile>(6);
+ return F.getCompileUnit();
+ }
+
+ unsigned getLineNumber() const { return getUnsignedField(7); }
+ DIType getType() const { return getFieldAs<DIType>(8); }
+ unsigned isLocalToUnit() const { return getUnsignedField(9); }
+ unsigned isDefinition() const { return getUnsignedField(10); }
GlobalVariable *getGlobal() const { return getGlobalVariableField(11); }
+ Constant *getConstant() const { return getConstantField(11); }
/// Verify - Verify that a global variable descriptor is well formed.
bool Verify() const;
- /// dump - print global variable.
+ /// print - print global variable.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print global variable to dbgs() with a newline.
void dump() const;
};
@@ -415,15 +470,18 @@ namespace llvm {
/// global etc).
class DIVariable : public DIDescriptor {
public:
- explicit DIVariable(MDNode *N = 0)
- : DIDescriptor(N) {
- if (DbgNode && !isVariable())
- DbgNode = 0;
- }
+ explicit DIVariable(const MDNode *N = 0)
+ : DIDescriptor(N) {}
- DIDescriptor getContext() const { return getDescriptorField(1); }
- StringRef getName() const { return getStringField(2); }
- DICompileUnit getCompileUnit() const{ return getFieldAs<DICompileUnit>(3); }
+ DIScope getContext() const { return getFieldAs<DIScope>(1); }
+ StringRef getName() const { return getStringField(2); }
+ DICompileUnit getCompileUnit() const{
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getFieldAs<DICompileUnit>(3);
+
+ DIFile F = getFieldAs<DIFile>(3);
+ return F.getCompileUnit();
+ }
unsigned getLineNumber() const { return getUnsignedField(4); }
DIType getType() const { return getFieldAs<DIType>(5); }
@@ -448,45 +506,60 @@ namespace llvm {
return getType().isBlockByrefStruct();
}
- /// dump - print variable.
+ /// isInlinedFnArgument - Return trule if this variable provides debugging
+ /// information for an inlined function arguments.
+ bool isInlinedFnArgument(const Function *CurFn);
+
+ /// print - print variable.
+ void print(raw_ostream &OS) const;
+
+ /// dump - print variable to dbgs() with a newline.
void dump() const;
};
/// DILexicalBlock - This is a wrapper for a lexical block.
class DILexicalBlock : public DIScope {
public:
- explicit DILexicalBlock(MDNode *N = 0) : DIScope(N) {
- if (DbgNode && !isLexicalBlock())
- DbgNode = 0;
- }
+ explicit DILexicalBlock(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
- StringRef getDirectory() const { return getContext().getDirectory(); }
- StringRef getFilename() const { return getContext().getFilename(); }
unsigned getLineNumber() const { return getUnsignedField(2); }
unsigned getColumnNumber() const { return getUnsignedField(3); }
+ StringRef getDirectory() const {
+ DIFile F = getFieldAs<DIFile>(4);
+ StringRef dir = F.getDirectory();
+ return !dir.empty() ? dir : getContext().getDirectory();
+ }
+ StringRef getFilename() const {
+ DIFile F = getFieldAs<DIFile>(4);
+ StringRef filename = F.getFilename();
+ return !filename.empty() ? filename : getContext().getFilename();
+ }
};
/// DINameSpace - A wrapper for a C++ style name space.
class DINameSpace : public DIScope {
public:
- explicit DINameSpace(MDNode *N = 0) : DIScope(N) {
- if (DbgNode && !isNameSpace())
- DbgNode = 0;
- }
-
+ explicit DINameSpace(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
StringRef getName() const { return getStringField(2); }
StringRef getDirectory() const { return getContext().getDirectory(); }
StringRef getFilename() const { return getContext().getFilename(); }
- DICompileUnit getCompileUnit() const { return getFieldAs<DICompileUnit>(3);}
+ DICompileUnit getCompileUnit() const{
+ if (getVersion() == llvm::LLVMDebugVersion7)
+ return getFieldAs<DICompileUnit>(3);
+
+ DIFile F = getFieldAs<DIFile>(3);
+ return F.getCompileUnit();
+ }
unsigned getLineNumber() const { return getUnsignedField(4); }
+ bool Verify() const;
};
/// DILocation - This object holds location information. This object
/// is not associated with any DWARF tag.
class DILocation : public DIDescriptor {
public:
- explicit DILocation(MDNode *N) : DIDescriptor(N) { }
+ explicit DILocation(const MDNode *N) : DIDescriptor(N) { }
unsigned getLineNumber() const { return getUnsignedField(0); }
unsigned getColumnNumber() const { return getUnsignedField(1); }
@@ -494,6 +567,7 @@ namespace llvm {
DILocation getOrigLocation() const { return getFieldAs<DILocation>(3); }
StringRef getFilename() const { return getScope().getFilename(); }
StringRef getDirectory() const { return getScope().getDirectory(); }
+ bool Verify() const;
};
/// DIFactory - This object assists with the construction of the various
@@ -531,19 +605,23 @@ namespace llvm {
StringRef Flags = "",
unsigned RunTimeVer = 0);
+ /// CreateFile - Create a new descriptor for the specified file.
+ DIFile CreateFile(StringRef Filename, StringRef Directory,
+ DICompileUnit CU);
+
/// CreateEnumerator - Create a single enumerator value.
DIEnumerator CreateEnumerator(StringRef Name, uint64_t Val);
/// CreateBasicType - Create a basic type like int, float, etc.
DIBasicType CreateBasicType(DIDescriptor Context, StringRef Name,
- DICompileUnit CompileUnit, unsigned LineNumber,
+ DIFile F, unsigned LineNumber,
uint64_t SizeInBits, uint64_t AlignInBits,
uint64_t OffsetInBits, unsigned Flags,
unsigned Encoding);
/// CreateBasicType - Create a basic type like int, float, etc.
DIBasicType CreateBasicTypeEx(DIDescriptor Context, StringRef Name,
- DICompileUnit CompileUnit, unsigned LineNumber,
+ DIFile F, unsigned LineNumber,
Constant *SizeInBits, Constant *AlignInBits,
Constant *OffsetInBits, unsigned Flags,
unsigned Encoding);
@@ -552,7 +630,7 @@ namespace llvm {
/// pointer, typedef, etc.
DIDerivedType CreateDerivedType(unsigned Tag, DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNumber,
uint64_t SizeInBits, uint64_t AlignInBits,
uint64_t OffsetInBits, unsigned Flags,
@@ -561,17 +639,18 @@ namespace llvm {
/// CreateDerivedType - Create a derived type like const qualified type,
/// pointer, typedef, etc.
DIDerivedType CreateDerivedTypeEx(unsigned Tag, DIDescriptor Context,
- StringRef Name,
- DICompileUnit CompileUnit,
- unsigned LineNumber,
- Constant *SizeInBits, Constant *AlignInBits,
- Constant *OffsetInBits, unsigned Flags,
- DIType DerivedFrom);
+ StringRef Name,
+ DIFile F,
+ unsigned LineNumber,
+ Constant *SizeInBits,
+ Constant *AlignInBits,
+ Constant *OffsetInBits, unsigned Flags,
+ DIType DerivedFrom);
/// CreateCompositeType - Create a composite type like array, struct, etc.
DICompositeType CreateCompositeType(unsigned Tag, DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNumber,
uint64_t SizeInBits,
uint64_t AlignInBits,
@@ -581,33 +660,40 @@ namespace llvm {
unsigned RunTimeLang = 0,
MDNode *ContainingType = 0);
+ /// CreateTemporaryType - Create a temporary forward-declared type.
+ DIType CreateTemporaryType();
+
/// CreateArtificialType - Create a new DIType with "artificial" flag set.
DIType CreateArtificialType(DIType Ty);
/// CreateCompositeType - Create a composite type like array, struct, etc.
DICompositeType CreateCompositeTypeEx(unsigned Tag, DIDescriptor Context,
- StringRef Name,
- DICompileUnit CompileUnit,
- unsigned LineNumber,
- Constant *SizeInBits,
- Constant *AlignInBits,
- Constant *OffsetInBits, unsigned Flags,
- DIType DerivedFrom,
- DIArray Elements,
- unsigned RunTimeLang = 0);
+ StringRef Name,
+ DIFile F,
+ unsigned LineNumber,
+ Constant *SizeInBits,
+ Constant *AlignInBits,
+ Constant *OffsetInBits,
+ unsigned Flags,
+ DIType DerivedFrom,
+ DIArray Elements,
+ unsigned RunTimeLang = 0,
+ MDNode *ContainingType = 0);
/// CreateSubprogram - Create a new descriptor for the specified subprogram.
/// See comments in DISubprogram for descriptions of these fields.
DISubprogram CreateSubprogram(DIDescriptor Context, StringRef Name,
StringRef DisplayName,
StringRef LinkageName,
- DICompileUnit CompileUnit, unsigned LineNo,
+ DIFile F, unsigned LineNo,
DIType Ty, bool isLocalToUnit,
bool isDefinition,
unsigned VK = 0,
unsigned VIndex = 0,
DIType = DIType(),
- bool isArtificial = 0);
+ bool isArtificial = 0,
+ bool isOptimized = false,
+ Function *Fn = 0);
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
/// given declaration.
@@ -618,33 +704,42 @@ namespace llvm {
CreateGlobalVariable(DIDescriptor Context, StringRef Name,
StringRef DisplayName,
StringRef LinkageName,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNo, DIType Ty, bool isLocalToUnit,
bool isDefinition, llvm::GlobalVariable *GV);
+ /// CreateGlobalVariable - Create a new descriptor for the specified constant.
+ DIGlobalVariable
+ CreateGlobalVariable(DIDescriptor Context, StringRef Name,
+ StringRef DisplayName,
+ StringRef LinkageName,
+ DIFile F,
+ unsigned LineNo, DIType Ty, bool isLocalToUnit,
+ bool isDefinition, llvm::Constant *C);
+
/// CreateVariable - Create a new descriptor for the specified variable.
DIVariable CreateVariable(unsigned Tag, DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit, unsigned LineNo,
- DIType Ty);
+ DIFile F, unsigned LineNo,
+ DIType Ty, bool AlwaysPreserve = false);
/// CreateComplexVariable - Create a new descriptor for the specified
/// variable which has a complex address expression for its address.
DIVariable CreateComplexVariable(unsigned Tag, DIDescriptor Context,
const std::string &Name,
- DICompileUnit CompileUnit, unsigned LineNo,
+ DIFile F, unsigned LineNo,
DIType Ty,
SmallVector<Value *, 9> &addr);
/// CreateLexicalBlock - This creates a descriptor for a lexical block
/// with the specified parent context.
- DILexicalBlock CreateLexicalBlock(DIDescriptor Context, unsigned Line = 0,
- unsigned Col = 0);
+ DILexicalBlock CreateLexicalBlock(DIDescriptor Context, DIFile F,
+ unsigned Line = 0, unsigned Col = 0);
/// CreateNameSpace - This creates new descriptor for a namespace
/// with the specified parent context.
DINameSpace CreateNameSpace(DIDescriptor Context, StringRef Name,
- DICompileUnit CU, unsigned LineNo);
+ DIFile F, unsigned LineNo);
/// CreateLocation - Creates a debug info location.
DILocation CreateLocation(unsigned LineNo, unsigned ColumnNo,
@@ -677,13 +772,8 @@ namespace llvm {
std::string &Type, unsigned &LineNo, std::string &File,
std::string &Dir);
- /// ExtractDebugLocation - Extract debug location information
- /// from DILocation.
- DebugLoc ExtractDebugLocation(DILocation &Loc,
- DebugLocTracker &DebugLocInfo);
-
/// getDISubprogram - Find subprogram that is enclosing this scope.
- DISubprogram getDISubprogram(MDNode *Scope);
+ DISubprogram getDISubprogram(const MDNode *Scope);
/// getDICompositeType - Find underlying composite type.
DICompositeType getDICompositeType(DIType T);
@@ -723,20 +813,20 @@ namespace llvm {
bool addType(DIType DT);
public:
- typedef SmallVector<MDNode *, 8>::iterator iterator;
- iterator compile_unit_begin() { return CUs.begin(); }
- iterator compile_unit_end() { return CUs.end(); }
- iterator subprogram_begin() { return SPs.begin(); }
- iterator subprogram_end() { return SPs.end(); }
- iterator global_variable_begin() { return GVs.begin(); }
- iterator global_variable_end() { return GVs.end(); }
- iterator type_begin() { return TYs.begin(); }
- iterator type_end() { return TYs.end(); }
-
- unsigned compile_unit_count() { return CUs.size(); }
- unsigned global_variable_count() { return GVs.size(); }
- unsigned subprogram_count() { return SPs.size(); }
- unsigned type_count() { return TYs.size(); }
+ typedef SmallVector<MDNode *, 8>::const_iterator iterator;
+ iterator compile_unit_begin() const { return CUs.begin(); }
+ iterator compile_unit_end() const { return CUs.end(); }
+ iterator subprogram_begin() const { return SPs.begin(); }
+ iterator subprogram_end() const { return SPs.end(); }
+ iterator global_variable_begin() const { return GVs.begin(); }
+ iterator global_variable_end() const { return GVs.end(); }
+ iterator type_begin() const { return TYs.begin(); }
+ iterator type_end() const { return TYs.end(); }
+
+ unsigned compile_unit_count() const { return CUs.size(); }
+ unsigned global_variable_count() const { return GVs.size(); }
+ unsigned subprogram_count() const { return SPs.size(); }
+ unsigned type_count() const { return TYs.size(); }
private:
SmallVector<MDNode *, 8> CUs; // Compile Units
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/DominatorInternals.h b/libclamav/c++/llvm/include/llvm/Analysis/DominatorInternals.h
index 8cea96d..0419688 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/DominatorInternals.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/DominatorInternals.h
@@ -152,8 +152,9 @@ void Compress(DominatorTreeBase<typename GraphT::NodeType>& DT,
}
template<class GraphT>
-typename GraphT::NodeType* Eval(DominatorTreeBase<typename GraphT::NodeType>& DT,
- typename GraphT::NodeType *V) {
+typename GraphT::NodeType*
+Eval(DominatorTreeBase<typename GraphT::NodeType>& DT,
+ typename GraphT::NodeType *V) {
typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo =
DT.Info[V];
#if !BALANCE_IDOM_TREE
@@ -265,14 +266,17 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
// initialize the semi dominator to point to the parent node
WInfo.Semi = WInfo.Parent;
- for (typename GraphTraits<Inverse<NodeT> >::ChildIteratorType CI =
- GraphTraits<Inverse<NodeT> >::child_begin(W),
- E = GraphTraits<Inverse<NodeT> >::child_end(W); CI != E; ++CI)
- if (DT.Info.count(*CI)) { // Only if this predecessor is reachable!
- unsigned SemiU = DT.Info[Eval<GraphT>(DT, *CI)].Semi;
+ typedef GraphTraits<Inverse<NodeT> > InvTraits;
+ for (typename InvTraits::ChildIteratorType CI =
+ InvTraits::child_begin(W),
+ E = InvTraits::child_end(W); CI != E; ++CI) {
+ typename InvTraits::NodeType *N = *CI;
+ if (DT.Info.count(N)) { // Only if this predecessor is reachable!
+ unsigned SemiU = DT.Info[Eval<GraphT>(DT, N)].Semi;
if (SemiU < WInfo.Semi)
WInfo.Semi = SemiU;
}
+ }
DT.Info[DT.Vertex[WInfo.Semi]].Bucket.push_back(W);
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/Dominators.h b/libclamav/c++/llvm/include/llvm/Analysis/Dominators.h
index 1e94f30..73c6e62 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/Dominators.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/Dominators.h
@@ -116,12 +116,12 @@ public:
return true;
SmallPtrSet<NodeT *, 4> OtherChildren;
- for(iterator I = Other->begin(), E = Other->end(); I != E; ++I) {
+ for (iterator I = Other->begin(), E = Other->end(); I != E; ++I) {
NodeT *Nd = (*I)->getBlock();
OtherChildren.insert(Nd);
}
- for(iterator I = begin(), E = end(); I != E; ++I) {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
NodeT *N = (*I)->getBlock();
if (OtherChildren.count(N) == 0)
return true;
@@ -240,27 +240,31 @@ protected:
template<class N, class GraphT>
void Split(DominatorTreeBase<typename GraphT::NodeType>& DT,
typename GraphT::NodeType* NewBB) {
- assert(std::distance(GraphT::child_begin(NewBB), GraphT::child_end(NewBB)) == 1
- && "NewBB should have a single successor!");
+ assert(std::distance(GraphT::child_begin(NewBB),
+ GraphT::child_end(NewBB)) == 1 &&
+ "NewBB should have a single successor!");
typename GraphT::NodeType* NewBBSucc = *GraphT::child_begin(NewBB);
std::vector<typename GraphT::NodeType*> PredBlocks;
- for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI =
- GraphTraits<Inverse<N> >::child_begin(NewBB),
- PE = GraphTraits<Inverse<N> >::child_end(NewBB); PI != PE; ++PI)
+ typedef GraphTraits<Inverse<N> > InvTraits;
+ for (typename InvTraits::ChildIteratorType PI =
+ InvTraits::child_begin(NewBB),
+ PE = InvTraits::child_end(NewBB); PI != PE; ++PI)
PredBlocks.push_back(*PI);
- assert(!PredBlocks.empty() && "No predblocks??");
+ assert(!PredBlocks.empty() && "No predblocks?");
bool NewBBDominatesNewBBSucc = true;
- for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI =
- GraphTraits<Inverse<N> >::child_begin(NewBBSucc),
- E = GraphTraits<Inverse<N> >::child_end(NewBBSucc); PI != E; ++PI)
- if (*PI != NewBB && !DT.dominates(NewBBSucc, *PI) &&
- DT.isReachableFromEntry(*PI)) {
+ for (typename InvTraits::ChildIteratorType PI =
+ InvTraits::child_begin(NewBBSucc),
+ E = InvTraits::child_end(NewBBSucc); PI != E; ++PI) {
+ typename InvTraits::NodeType *ND = *PI;
+ if (ND != NewBB && !DT.dominates(NewBBSucc, ND) &&
+ DT.isReachableFromEntry(ND)) {
NewBBDominatesNewBBSucc = false;
break;
}
+ }
// Find NewBB's immediate dominator and create new dominator tree node for
// NewBB.
@@ -374,8 +378,8 @@ public:
/// isReachableFromEntry - Return true if A is dominated by the entry
/// block of the function containing it.
bool isReachableFromEntry(NodeT* A) {
- assert (!this->isPostDominator()
- && "This is not implemented for post dominators");
+ assert(!this->isPostDominator() &&
+ "This is not implemented for post dominators");
return dominates(&A->getParent()->front(), A);
}
@@ -393,8 +397,9 @@ public:
// Compare the result of the tree walk and the dfs numbers, if expensive
// checks are enabled.
#ifdef XDEBUG
- assert(!DFSInfoValid
- || (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A)));
+ assert((!DFSInfoValid ||
+ (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&
+ "Tree walk disagrees with dfs numbers!");
#endif
if (DFSInfoValid)
@@ -430,16 +435,16 @@ public:
/// findNearestCommonDominator - Find nearest common dominator basic block
/// for basic block A and B. If there is no such block then return NULL.
NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) {
-
- assert (!this->isPostDominator()
- && "This is not implemented for post dominators");
- assert (A->getParent() == B->getParent()
- && "Two blocks are not in same function");
-
- // If either A or B is a entry block then it is nearest common dominator.
- NodeT &Entry = A->getParent()->front();
- if (A == &Entry || B == &Entry)
- return &Entry;
+ assert(A->getParent() == B->getParent() &&
+ "Two blocks are not in same function");
+
+ // If either A or B is a entry block then it is nearest common dominator
+ // (for forward-dominators).
+ if (!this->isPostDominator()) {
+ NodeT &Entry = A->getParent()->front();
+ if (A == &Entry || B == &Entry)
+ return &Entry;
+ }
// If B dominates A then B is nearest common dominator.
if (dominates(B, A))
@@ -463,7 +468,7 @@ public:
// Walk NodeB immediate dominators chain and find common dominator node.
DomTreeNodeBase<NodeT> *IDomB = NodeB->getIDom();
- while(IDomB) {
+ while (IDomB) {
if (NodeADoms.count(IDomB) != 0)
return IDomB->getBlock();
@@ -508,8 +513,8 @@ public:
/// children list. Deletes dominator node associated with basic block BB.
void eraseNode(NodeT *BB) {
DomTreeNodeBase<NodeT> *Node = getNode(BB);
- assert (Node && "Removing node that isn't in dominator tree.");
- assert (Node->getChildren().empty() && "Node is not a leaf node.");
+ assert(Node && "Removing node that isn't in dominator tree.");
+ assert(Node->getChildren().empty() && "Node is not a leaf node.");
// Remove node from immediate dominator's children list.
DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
@@ -697,12 +702,11 @@ public:
static char ID; // Pass ID, replacement for typeid
DominatorTreeBase<BasicBlock>* DT;
- DominatorTree() : FunctionPass(&ID) {
+ DominatorTree() : FunctionPass(ID) {
DT = new DominatorTreeBase<BasicBlock>(false);
}
~DominatorTree() {
- DT->releaseMemory();
delete DT;
}
@@ -886,7 +890,7 @@ protected:
const bool IsPostDominators;
public:
- DominanceFrontierBase(void *ID, bool isPostDom)
+ DominanceFrontierBase(char &ID, bool isPostDom)
: FunctionPass(ID), IsPostDominators(isPostDom) {}
/// getRoots - Return the root blocks of the current CFG. This may include
@@ -952,7 +956,7 @@ public:
return true;
}
- if(!tmpSet.empty())
+ if (!tmpSet.empty())
// There are nodes that are in DS2 but not in DS1.
return true;
@@ -991,6 +995,9 @@ public:
/// print - Convert to human readable form
///
virtual void print(raw_ostream &OS, const Module* = 0) const;
+
+ /// dump - Dump the dominance frontier to dbgs().
+ void dump() const;
};
@@ -1002,7 +1009,7 @@ class DominanceFrontier : public DominanceFrontierBase {
public:
static char ID; // Pass ID, replacement for typeid
DominanceFrontier() :
- DominanceFrontierBase(&ID, false) {}
+ DominanceFrontierBase(ID, false) {}
BasicBlock *getRoot() const {
assert(Roots.size() == 1 && "Should always have entry node!");
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/FindUsedTypes.h b/libclamav/c++/llvm/include/llvm/Analysis/FindUsedTypes.h
index 1337385..8a78eb6 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/FindUsedTypes.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/FindUsedTypes.h
@@ -26,7 +26,7 @@ class FindUsedTypes : public ModulePass {
std::set<const Type *> UsedTypes;
public:
static char ID; // Pass identification, replacement for typeid
- FindUsedTypes() : ModulePass(&ID) {}
+ FindUsedTypes() : ModulePass(ID) {}
/// getTypes - After the pass has been run, return the set containing all of
/// the types used in the module.
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/IVUsers.h b/libclamav/c++/llvm/include/llvm/Analysis/IVUsers.h
index dc616ca..578e6ab 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/IVUsers.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/IVUsers.h
@@ -16,6 +16,7 @@
#define LLVM_ANALYSIS_IVUSERS_H
#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Support/ValueHandle.h"
namespace llvm {
@@ -26,17 +27,17 @@ class Value;
class IVUsers;
class ScalarEvolution;
class SCEV;
+class IVUsers;
/// IVStrideUse - Keep track of one use of a strided induction variable.
/// The Expr member keeps track of the expression, User is the actual user
/// instruction of the operand, and 'OperandValToReplace' is the operand of
/// the User that is the use.
class IVStrideUse : public CallbackVH, public ilist_node<IVStrideUse> {
+ friend class IVUsers;
public:
- IVStrideUse(IVUsers *P, const SCEV *S, const SCEV *Off,
- Instruction* U, Value *O)
- : CallbackVH(U), Parent(P), Stride(S), Offset(Off),
- OperandValToReplace(O), IsUseOfPostIncrementedValue(false) {
+ IVStrideUse(IVUsers *P, Instruction* U, Value *O)
+ : CallbackVH(U), Parent(P), OperandValToReplace(O) {
}
/// getUser - Return the user instruction for this use.
@@ -49,28 +50,6 @@ public:
setValPtr(NewUser);
}
- /// getParent - Return a pointer to the IVUsers that owns
- /// this IVStrideUse.
- IVUsers *getParent() const { return Parent; }
-
- /// getStride - Return the expression for the stride for the use.
- const SCEV *getStride() const { return Stride; }
-
- /// setStride - Assign a new stride to this use.
- void setStride(const SCEV *Val) {
- Stride = Val;
- }
-
- /// getOffset - Return the offset to add to a theoretical induction
- /// variable that starts at zero and counts up by the stride to compute
- /// the value for the use. This always has the same type as the stride.
- const SCEV *getOffset() const { return Offset; }
-
- /// setOffset - Assign a new offset to this use.
- void setOffset(const SCEV *Val) {
- Offset = Val;
- }
-
/// getOperandValToReplace - Return the Value of the operand in the user
/// instruction that this IVStrideUse is representing.
Value *getOperandValToReplace() const {
@@ -83,37 +62,27 @@ public:
OperandValToReplace = Op;
}
- /// isUseOfPostIncrementedValue - True if this should use the
- /// post-incremented version of this IV, not the preincremented version.
- /// This can only be set in special cases, such as the terminating setcc
- /// instruction for a loop or uses dominated by the loop.
- bool isUseOfPostIncrementedValue() const {
- return IsUseOfPostIncrementedValue;
+ /// getPostIncLoops - Return the set of loops for which the expression has
+ /// been adjusted to use post-inc mode.
+ const PostIncLoopSet &getPostIncLoops() const {
+ return PostIncLoops;
}
- /// setIsUseOfPostIncrmentedValue - set the flag that indicates whether
- /// this is a post-increment use.
- void setIsUseOfPostIncrementedValue(bool Val) {
- IsUseOfPostIncrementedValue = Val;
- }
+ /// transformToPostInc - Transform the expression to post-inc form for the
+ /// given loop.
+ void transformToPostInc(const Loop *L);
private:
/// Parent - a pointer to the IVUsers that owns this IVStrideUse.
IVUsers *Parent;
- /// Stride - The stride for this use.
- const SCEV *Stride;
-
- /// Offset - The offset to add to the base induction expression.
- const SCEV *Offset;
-
/// OperandValToReplace - The Value of the operand in the user instruction
/// that this IVStrideUse is representing.
WeakVH OperandValToReplace;
- /// IsUseOfPostIncrementedValue - True if this should use the
- /// post-incremented version of this IV, not the preincremented version.
- bool IsUseOfPostIncrementedValue;
+ /// PostIncLoops - The set of loops for which Expr has been adjusted to
+ /// use post-inc mode. This corresponds with SCEVExpander's post-inc concept.
+ PostIncLoopSet PostIncLoops;
/// Deleted - Implementation of CallbackVH virtual function to
/// receive notification when the User is deleted.
@@ -174,17 +143,16 @@ public:
/// return true. Otherwise, return false.
bool AddUsersIfInteresting(Instruction *I);
- IVStrideUse &AddUser(const SCEV *Stride, const SCEV *Offset,
- Instruction *User, Value *Operand);
+ IVStrideUse &AddUser(Instruction *User, Value *Operand);
/// getReplacementExpr - Return a SCEV expression which computes the
/// value of the OperandValToReplace of the given IVStrideUse.
- const SCEV *getReplacementExpr(const IVStrideUse &U) const;
+ const SCEV *getReplacementExpr(const IVStrideUse &IU) const;
+
+ /// getExpr - Return the expression for the use.
+ const SCEV *getExpr(const IVStrideUse &IU) const;
- /// getCanonicalExpr - Return a SCEV expression which computes the
- /// value of the SCEV of the given IVStrideUse, ignoring the
- /// isUseOfPostIncrementedValue flag.
- const SCEV *getCanonicalExpr(const IVStrideUse &U) const;
+ const SCEV *getStride(const IVStrideUse &IU, const Loop *L) const;
typedef ilist<IVStrideUse>::iterator iterator;
typedef ilist<IVStrideUse>::const_iterator const_iterator;
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/InlineCost.h b/libclamav/c++/llvm/include/llvm/Analysis/InlineCost.h
index 84acd7d..462bddd 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/InlineCost.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/InlineCost.h
@@ -1,4 +1,4 @@
-//===- InlineCost.cpp - Cost analysis for inliner ---------------*- C++ -*-===//
+//===- InlineCost.h - Cost analysis for inliner -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,8 +16,10 @@
#include <cassert>
#include <climits>
-#include <map>
#include <vector>
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
+#include "llvm/Analysis/CodeMetrics.h"
namespace llvm {
@@ -28,43 +30,6 @@ namespace llvm {
template<class PtrType, unsigned SmallSize>
class SmallPtrSet;
- // CodeMetrics - Calculate size and a few similar metrics for a set of
- // basic blocks.
- struct CodeMetrics {
- /// NeverInline - True if this callee should never be inlined into a
- /// caller.
- bool NeverInline;
-
- /// usesDynamicAlloca - True if this function calls alloca (in the C sense).
- bool usesDynamicAlloca;
-
- /// NumInsts, NumBlocks - Keep track of how large each function is, which
- /// is used to estimate the code size cost of inlining it.
- unsigned NumInsts, NumBlocks;
-
- /// NumCalls - Keep track of the number of calls to 'big' functions.
- unsigned NumCalls;
-
- /// NumVectorInsts - Keep track of how many instructions produce vector
- /// values. The inliner is being more aggressive with inlining vector
- /// kernels.
- unsigned NumVectorInsts;
-
- /// NumRets - Keep track of how many Ret instructions the block contains.
- unsigned NumRets;
-
- CodeMetrics() : NeverInline(false), usesDynamicAlloca(false), NumInsts(0),
- NumBlocks(0), NumCalls(0), NumVectorInsts(0), NumRets(0) {}
-
- /// analyzeBasicBlock - Add information about the specified basic block
- /// to the current structure.
- void analyzeBasicBlock(const BasicBlock *BB);
-
- /// analyzeFunction - Add information about the specified function
- /// to the current structure.
- void analyzeFunction(Function *F);
- };
-
namespace InlineConstants {
// Various magic constants used to adjust heuristics.
const int InstrCost = 5;
@@ -159,9 +124,15 @@ namespace llvm {
/// analyzeFunction - Add information about the specified function
/// to the current structure.
void analyzeFunction(Function *F);
+
+ /// NeverInline - Returns true if the function should never be
+ /// inlined into any caller.
+ bool NeverInline();
};
- std::map<const Function *, FunctionInfo> CachedFunctionInfo;
+ // The Function* for a function can be changed (by ArgumentPromotion);
+ // the ValueMap will update itself when this happens.
+ ValueMap<const Function *, FunctionInfo> CachedFunctionInfo;
public:
@@ -170,6 +141,14 @@ namespace llvm {
///
InlineCost getInlineCost(CallSite CS,
SmallPtrSet<const Function *, 16> &NeverInline);
+ /// getCalledFunction - The heuristic used to determine if we should inline
+ /// the function call or not. The callee is explicitly specified, to allow
+ /// you to calculate the cost of inlining a function via a pointer. The
+ /// result assumes that the inlined version will always be used. You should
+ /// weight it yourself in cases where this callee will not always be called.
+ InlineCost getInlineCost(CallSite CS,
+ Function *Callee,
+ SmallPtrSet<const Function *, 16> &NeverInline);
/// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
/// higher threshold to determine if the function call should be inlined.
@@ -179,7 +158,19 @@ namespace llvm {
void resetCachedCostInfo(Function* Caller) {
CachedFunctionInfo[Caller] = FunctionInfo();
}
+
+ /// growCachedCostInfo - update the cached cost info for Caller after Callee
+ /// has been inlined. If Callee is NULL it means a dead call has been
+ /// eliminated.
+ void growCachedCostInfo(Function* Caller, Function* Callee);
+
+ /// clear - empty the cache of inline costs
+ void clear();
};
+
+ /// callIsSmall - If a call is likely to lower to a single target instruction,
+ /// or is otherwise deemed small return true.
+ bool callIsSmall(const Function *Callee);
}
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/InstructionSimplify.h b/libclamav/c++/llvm/include/llvm/Analysis/InstructionSimplify.h
index 13314e6..f47e740 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/InstructionSimplify.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/InstructionSimplify.h
@@ -46,6 +46,10 @@ namespace llvm {
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const TargetData *TD = 0);
+ /// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
+ /// the result. If not, this returns null.
+ Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
+ const TargetData *TD = 0);
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/IntervalIterator.h b/libclamav/c++/llvm/include/llvm/Analysis/IntervalIterator.h
index d842840..82b3294 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/IntervalIterator.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/IntervalIterator.h
@@ -36,9 +36,9 @@
#include "llvm/Analysis/IntervalPartition.h"
#include "llvm/Function.h"
#include "llvm/Support/CFG.h"
-#include <stack>
-#include <set>
#include <algorithm>
+#include <set>
+#include <vector>
namespace llvm {
@@ -88,7 +88,7 @@ inline void addNodeToInterval(Interval *Int, Interval *I) {
template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy*>,
class IGT = GraphTraits<Inverse<NodeTy*> > >
class IntervalIterator {
- std::stack<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
+ std::vector<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
std::set<BasicBlock*> Visited;
OrigContainer_t *OrigContainer;
bool IOwnMem; // If True, delete intervals when done with them
@@ -116,15 +116,15 @@ public:
if (IOwnMem)
while (!IntStack.empty()) {
delete operator*();
- IntStack.pop();
+ IntStack.pop_back();
}
}
inline bool operator==(const _Self& x) const { return IntStack == x.IntStack;}
inline bool operator!=(const _Self& x) const { return !operator==(x); }
- inline const Interval *operator*() const { return IntStack.top().first; }
- inline Interval *operator*() { return IntStack.top().first; }
+ inline const Interval *operator*() const { return IntStack.back().first; }
+ inline Interval *operator*() { return IntStack.back().first; }
inline const Interval *operator->() const { return operator*(); }
inline Interval *operator->() { return operator*(); }
@@ -133,8 +133,8 @@ public:
do {
// All of the intervals on the stack have been visited. Try visiting
// their successors now.
- Interval::succ_iterator &SuccIt = IntStack.top().second,
- EndIt = succ_end(IntStack.top().first);
+ Interval::succ_iterator &SuccIt = IntStack.back().second,
+ EndIt = succ_end(IntStack.back().first);
while (SuccIt != EndIt) { // Loop over all interval succs
bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt));
++SuccIt; // Increment iterator
@@ -142,10 +142,10 @@ public:
}
// Free interval memory... if necessary
- if (IOwnMem) delete IntStack.top().first;
+ if (IOwnMem) delete IntStack.back().first;
// We ran out of successors for this interval... pop off the stack
- IntStack.pop();
+ IntStack.pop_back();
} while (!IntStack.empty());
return *this;
@@ -175,7 +175,7 @@ private:
E = GT::child_end(Node); I != E; ++I)
ProcessNode(Int, getSourceGraphNode(OrigContainer, *I));
- IntStack.push(std::make_pair(Int, succ_begin(Int)));
+ IntStack.push_back(std::make_pair(Int, succ_begin(Int)));
return true;
}
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/IntervalPartition.h b/libclamav/c++/llvm/include/llvm/Analysis/IntervalPartition.h
index c1214e7..75a5cdf 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/IntervalPartition.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/IntervalPartition.h
@@ -48,7 +48,7 @@ class IntervalPartition : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- IntervalPartition() : FunctionPass(&ID), RootInterval(0) {}
+ IntervalPartition() : FunctionPass(ID), RootInterval(0) {}
// run - Calculate the interval partition for this function
virtual bool runOnFunction(Function &F);
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/LazyValueInfo.h b/libclamav/c++/llvm/include/llvm/Analysis/LazyValueInfo.h
index 566788d..b2a3afb 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/LazyValueInfo.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/LazyValueInfo.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_ANALYSIS_LIVEVALUES_H
-#define LLVM_ANALYSIS_LIVEVALUES_H
+#ifndef LLVM_ANALYSIS_LAZYVALUEINFO_H
+#define LLVM_ANALYSIS_LAZYVALUEINFO_H
#include "llvm/Pass.h"
@@ -31,7 +31,7 @@ class LazyValueInfo : public FunctionPass {
void operator=(const LazyValueInfo&); // DO NOT IMPLEMENT.
public:
static char ID;
- LazyValueInfo() : FunctionPass(&ID), PImpl(0) {}
+ LazyValueInfo() : FunctionPass(ID), PImpl(0) {}
~LazyValueInfo() { assert(PImpl == 0 && "releaseMemory not called"); }
/// Tristate - This is used to return true/false/dunno results.
@@ -57,6 +57,12 @@ public:
/// constant on the specified edge. Return null if not.
Constant *getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB);
+ /// threadEdge - Inform the analysis cache that we have threaded an edge from
+ /// PredBB to OldSucc to be from PredBB to NewSucc instead.
+ void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc);
+
+ /// eraseBlock - Inform the analysis cache that we have erased a block.
+ void eraseBlock(BasicBlock *BB);
// Implementation boilerplate.
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h b/libclamav/c++/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h
index 01f108d..c9adf3f 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/LibCallAliasAnalysis.h
@@ -28,18 +28,20 @@ namespace llvm {
LibCallInfo *LCI;
explicit LibCallAliasAnalysis(LibCallInfo *LC = 0)
- : FunctionPass(&ID), LCI(LC) {
+ : FunctionPass(ID), LCI(LC) {
}
- explicit LibCallAliasAnalysis(const void *ID, LibCallInfo *LC)
+ explicit LibCallAliasAnalysis(char &ID, LibCallInfo *LC)
: FunctionPass(ID), LCI(LC) {
}
~LibCallAliasAnalysis();
- ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size);
- ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
// TODO: Could compare two direct calls against each other if we cared to.
- return AliasAnalysis::getModRefInfo(CS1,CS2);
+ return AliasAnalysis::getModRefInfo(CS1, CS2);
}
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
@@ -49,9 +51,20 @@ namespace llvm {
return false;
}
+ /// getAdjustedAnalysisPointer - This method is used when a pass implements
+ /// an analysis interface through multiple inheritance. If needed, it
+ /// should override this to adjust the this pointer as needed for the
+ /// specified pass info.
+ virtual void *getAdjustedAnalysisPointer(const void *PI) {
+ if (PI == &AliasAnalysis::ID)
+ return (AliasAnalysis*)this;
+ return this;
+ }
+
private:
ModRefResult AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
- CallSite CS, Value *P, unsigned Size);
+ ImmutableCallSite CS,
+ const Value *P, unsigned Size);
};
} // End of llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/LibCallSemantics.h b/libclamav/c++/llvm/include/llvm/Analysis/LibCallSemantics.h
index 74e8401..31d7cc5 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/LibCallSemantics.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/LibCallSemantics.h
@@ -47,7 +47,8 @@ namespace llvm {
enum LocResult {
Yes, No, Unknown
};
- LocResult (*isLocation)(CallSite CS, const Value *Ptr, unsigned Size);
+ LocResult (*isLocation)(ImmutableCallSite CS,
+ const Value *Ptr, unsigned Size);
};
/// LibCallFunctionInfo - Each record in the array of FunctionInfo structs
@@ -142,7 +143,7 @@ namespace llvm {
/// getFunctionInfo - Return the LibCallFunctionInfo object corresponding to
/// the specified function if we have it. If not, return null.
- const LibCallFunctionInfo *getFunctionInfo(Function *F) const;
+ const LibCallFunctionInfo *getFunctionInfo(const Function *F) const;
//===------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/Lint.h b/libclamav/c++/llvm/include/llvm/Analysis/Lint.h
new file mode 100644
index 0000000..eb65d22
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Analysis/Lint.h
@@ -0,0 +1,51 @@
+//===-- llvm/Analysis/Lint.h - LLVM IR Lint ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines lint interfaces that can be used for some sanity checking
+// of input to the system, and for checking that transformations
+// haven't done something bad. In contrast to the Verifier, the Lint checker
+// checks for undefined behavior or constructions with likely unintended
+// behavior.
+//
+// To see what specifically is checked, look at Lint.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LINT_H
+#define LLVM_ANALYSIS_LINT_H
+
+#include <string>
+
+namespace llvm {
+
+class FunctionPass;
+class Module;
+class Function;
+
+/// @brief Create a lint pass.
+///
+/// Check a module or function.
+FunctionPass *createLintPass();
+
+/// @brief Check a module.
+///
+/// This should only be used for debugging, because it plays games with
+/// PassManagers and stuff.
+void lintModule(
+ const Module &M ///< The module to be checked
+);
+
+// lintFunction - Check a function.
+void lintFunction(
+ const Function &F ///< The function to be checked
+);
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/Loads.h b/libclamav/c++/llvm/include/llvm/Analysis/Loads.h
new file mode 100644
index 0000000..1574262
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Analysis/Loads.h
@@ -0,0 +1,51 @@
+//===- Loads.h - Local load analysis --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares simple local analyses for load instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOADS_H
+#define LLVM_ANALYSIS_LOADS_H
+
+#include "llvm/BasicBlock.h"
+
+namespace llvm {
+
+class AliasAnalysis;
+class TargetData;
+
+/// isSafeToLoadUnconditionally - Return true if we know that executing a load
+/// from this value cannot trap. If it is not obviously safe to load from the
+/// specified pointer, we do a quick local scan of the basic block containing
+/// ScanFrom, to determine if the address is already accessed.
+bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
+ unsigned Align, const TargetData *TD = 0);
+
+/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
+/// the instruction before ScanFrom) checking to see if we have the value at
+/// the memory address *Ptr locally available within a small number of
+/// instructions. If the value is available, return it.
+///
+/// If not, return the iterator for the last validated instruction that the
+/// value would be live through. If we scanned the entire block and didn't
+/// find something that invalidates *Ptr or provides it, ScanFrom would be
+/// left at begin() and this returns null. ScanFrom could also be left
+///
+/// MaxInstsToScan specifies the maximum instructions to scan in the block.
+/// If it is set to 0, it will scan the whole block. You can also optionally
+/// specify an alias analysis implementation, which makes this more precise.
+Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
+ BasicBlock::iterator &ScanFrom,
+ unsigned MaxInstsToScan = 6,
+ AliasAnalysis *AA = 0);
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h b/libclamav/c++/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h
index a1a5637..94fd990 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/LoopDependenceAnalysis.h
@@ -91,7 +91,7 @@ class LoopDependenceAnalysis : public LoopPass {
public:
static char ID; // Class identification, replacement for typeinfo
- LoopDependenceAnalysis() : LoopPass(&ID) {}
+ LoopDependenceAnalysis() : LoopPass(ID) {}
/// isDependencePair - Check whether two values can possibly give rise to
/// a data dependence: that is the case if both are instructions accessing
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/LoopInfo.h b/libclamav/c++/llvm/include/llvm/Analysis/LoopInfo.h
index f792a7f..462620f 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/LoopInfo.h
@@ -35,6 +35,7 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/raw_ostream.h"
@@ -229,13 +230,16 @@ public:
return 0;
}
+ /// Edge type.
+ typedef std::pair<BlockT*, BlockT*> Edge;
+
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
- typedef std::pair<const BlockT*,const BlockT*> Edge;
- void getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const {
+ template <typename EdgeT>
+ void getExitEdges(SmallVectorImpl<EdgeT> &ExitEdges) const {
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
- std::sort(LoopBBs.begin(), LoopBBs.end());
+ array_pod_sort(LoopBBs.begin(), LoopBBs.end());
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
@@ -244,7 +248,7 @@ public:
I != E; ++I)
if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
// Not in current loop? It must be an exit block.
- ExitEdges.push_back(std::make_pair(*BI, *I));
+ ExitEdges.push_back(EdgeT(*BI, *I));
}
/// getLoopPreheader - If there is a preheader for this loop, return it. A
@@ -256,6 +260,27 @@ public:
///
BlockT *getLoopPreheader() const {
// Keep track of nodes outside the loop branching to the header...
+ BlockT *Out = getLoopPredecessor();
+ if (!Out) return 0;
+
+ // Make sure there is only one exit out of the preheader.
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
+ ++SI;
+ if (SI != BlockTraits::child_end(Out))
+ return 0; // Multiple exits from the block, must not be a preheader.
+
+ // The predecessor has exactly one successor, so it is a preheader.
+ return Out;
+ }
+
+ /// getLoopPredecessor - If the given loop's header has exactly one unique
+ /// predecessor outside the loop, return it. Otherwise return null.
+ /// This is less strict that the loop "preheader" concept, which requires
+ /// the predecessor to have exactly one successor.
+ ///
+ BlockT *getLoopPredecessor() const {
+ // Keep track of nodes outside the loop branching to the header...
BlockT *Out = 0;
// Loop over the predecessors of the header node...
@@ -264,22 +289,17 @@ public:
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
- PE = InvBlockTraits::child_end(Header); PI != PE; ++PI)
- if (!contains(*PI)) { // If the block is not in the loop...
- if (Out && Out != *PI)
+ PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (!contains(N)) { // If the block is not in the loop...
+ if (Out && Out != N)
return 0; // Multiple predecessors outside the loop
- Out = *PI;
+ Out = N;
}
+ }
// Make sure there is only one exit out of the preheader.
assert(Out && "Header of loop has no predecessors from outside loop?");
- typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
- ++SI;
- if (SI != BlockTraits::child_end(Out))
- return 0; // Multiple exits from the block, must not be a preheader.
-
- // If there is exactly one preheader, return it. If there was zero, then
- // Out is still null.
return Out;
}
@@ -293,11 +313,13 @@ public:
typename InvBlockTraits::ChildIteratorType PE =
InvBlockTraits::child_end(Header);
BlockT *Latch = 0;
- for (; PI != PE; ++PI)
- if (contains(*PI)) {
+ for (; PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (contains(N)) {
if (Latch) return 0;
- Latch = *PI;
+ Latch = N;
}
+ }
return Latch;
}
@@ -409,10 +431,11 @@ public:
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
PI != PE; ++PI) {
- if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *PI))
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N))
HasInsideLoopPreds = true;
else
- OutsideLoopPreds.push_back(*PI);
+ OutsideLoopPreds.push_back(N);
}
if (BB == getHeader()) {
@@ -486,6 +509,12 @@ protected:
}
};
+template<class BlockT, class LoopT>
+raw_ostream& operator<<(raw_ostream &OS, const LoopBase<BlockT, LoopT> &Loop) {
+ Loop.print(OS);
+ return OS;
+}
+
class Loop : public LoopBase<BasicBlock, Loop> {
public:
Loop() {}
@@ -533,12 +562,6 @@ public:
///
PHINode *getCanonicalInductionVariable() const;
- /// getCanonicalInductionVariableIncrement - Return the LLVM value that holds
- /// the canonical induction variable value for the "next" iteration of the
- /// loop. This always succeeds if getCanonicalInductionVariable succeeds.
- ///
- Instruction *getCanonicalInductionVariableIncrement() const;
-
/// getTripCount - Return a loop-invariant LLVM value indicating the number of
/// times the loop will be executed. Note that this means that the backedge
/// of the loop executes N-1 times. If the trip-count cannot be determined,
@@ -571,7 +594,7 @@ public:
unsigned getSmallConstantTripMultiple() const;
/// isLCSSAForm - Return true if the Loop is in LCSSA form
- bool isLCSSAForm() const;
+ bool isLCSSAForm(DominatorTree &DT) const;
/// isLoopSimplifyForm - Return true if the Loop is in the form that
/// the LoopSimplify form transforms loops to, which is sometimes called
@@ -743,9 +766,11 @@ public:
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType I =
InvBlockTraits::child_begin(BB), E = InvBlockTraits::child_end(BB);
- I != E; ++I)
- if (DT.dominates(BB, *I)) // If BB dominates its predecessor...
- TodoStack.push_back(*I);
+ I != E; ++I) {
+ typename InvBlockTraits::NodeType *N = *I;
+ if (DT.dominates(BB, N)) // If BB dominates its predecessor...
+ TodoStack.push_back(N);
+ }
if (TodoStack.empty()) return 0; // No backedges to this block...
@@ -915,7 +940,7 @@ class LoopInfo : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- LoopInfo() : FunctionPass(&ID) {}
+ LoopInfo() : FunctionPass(ID) {}
LoopInfoBase<BasicBlock, Loop>& getBase() { return LI; }
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/LoopPass.h b/libclamav/c++/llvm/include/llvm/Analysis/LoopPass.h
index 10ff103..1603d2e 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/LoopPass.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/LoopPass.h
@@ -19,6 +19,7 @@
#include "llvm/Pass.h"
#include "llvm/PassManagers.h"
#include "llvm/Function.h"
+#include <deque>
namespace llvm {
@@ -28,8 +29,11 @@ class PMStack;
class LoopPass : public Pass {
public:
- explicit LoopPass(intptr_t pid) : Pass(PT_Loop, pid) {}
- explicit LoopPass(void *pid) : Pass(PT_Loop, pid) {}
+ explicit LoopPass(char &pid) : Pass(PT_Loop, pid) {}
+
+ /// getPrinterPass - Get a pass to print the function corresponding
+ /// to a Loop.
+ Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const;
// runOnLoop - This method should be implemented by the subclass to perform
// whatever action is necessary for the specified Loop.
@@ -54,7 +58,7 @@ public:
/// Assign pass manager to manage this pass
virtual void assignPassManager(PMStack &PMS,
- PassManagerType PMT = PMT_LoopPassManager);
+ PassManagerType PMT);
/// Return what kind of Pass Manager can manage this pass.
virtual PassManagerType getPotentialPassManagerType() const {
@@ -100,10 +104,10 @@ public:
/// Print passes managed by this manager
void dumpPassStructure(unsigned Offset);
- Pass *getContainedPass(unsigned N) {
+ LoopPass *getContainedPass(unsigned N) {
assert(N < PassVector.size() && "Pass number out of range!");
- Pass *FP = static_cast<Pass *>(PassVector[N]);
- return FP;
+ LoopPass *LP = static_cast<LoopPass *>(PassVector[N]);
+ return LP;
}
virtual PassManagerType getPassManagerType() const {
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/MemoryBuiltins.h b/libclamav/c++/llvm/include/llvm/Analysis/MemoryBuiltins.h
index a7f42c9..a4f9162 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -72,8 +72,8 @@ Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
// free Call Utility Functions.
//
-/// isFreeCall - Returns true if the value is a call to the builtin free()
-bool isFreeCall(const Value *I);
+/// isFreeCall - Returns non-null if the value is a call to the builtin free()
+const CallInst *isFreeCall(const Value *I);
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/Passes.h b/libclamav/c++/llvm/include/llvm/Analysis/Passes.h
index 1a5cbb2..37425eb 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/Passes.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/Passes.h
@@ -81,11 +81,18 @@ namespace llvm {
//===--------------------------------------------------------------------===//
//
+ // createTypeBasedAliasAnalysisPass - This pass implements metadata-based
+ // type-based alias analysis.
+ //
+ ImmutablePass *createTypeBasedAliasAnalysisPass();
+
+ //===--------------------------------------------------------------------===//
+ //
// createProfileLoaderPass - This pass loads information from a profile dump
// file.
//
ModulePass *createProfileLoaderPass();
- extern const PassInfo *ProfileLoaderPassID;
+ extern char &ProfileLoaderPassID;
//===--------------------------------------------------------------------===//
//
@@ -99,7 +106,7 @@ namespace llvm {
// instead of loading it from a previous run.
//
FunctionPass *createProfileEstimatorPass();
- extern const PassInfo *ProfileEstimatorPassID;
+ extern char &ProfileEstimatorPassID;
//===--------------------------------------------------------------------===//
//
@@ -153,6 +160,16 @@ namespace llvm {
// print debug info intrinsics in human readable form
FunctionPass *createDbgInfoPrinterPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createRegionInfoPass - This pass finds all single entry single exit regions
+ // in a function and builds the region hierarchy.
+ //
+ FunctionPass *createRegionInfoPass();
+
+ // Print module-level debug info metadata in human-readable form.
+ ModulePass *createModuleDebugInfoPrinterPass();
}
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/PointerTracking.h b/libclamav/c++/llvm/include/llvm/Analysis/PointerTracking.h
index 4d282b1..6b49e18 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/PointerTracking.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/PointerTracking.h
@@ -27,7 +27,7 @@
#ifndef LLVM_ANALYSIS_POINTERTRACKING_H
#define LLVM_ANALYSIS_POINTERTRACKING_H
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Instructions.h"
#include "llvm/Pass.h"
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/PostDominators.h b/libclamav/c++/llvm/include/llvm/Analysis/PostDominators.h
index 5552017..46ce820 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/PostDominators.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/PostDominators.h
@@ -25,7 +25,7 @@ struct PostDominatorTree : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
DominatorTreeBase<BasicBlock>* DT;
- PostDominatorTree() : FunctionPass(&ID) {
+ PostDominatorTree() : FunctionPass(ID) {
DT = new DominatorTreeBase<BasicBlock>(true);
}
@@ -106,7 +106,7 @@ template <> struct GraphTraits<PostDominatorTree*>
struct PostDominanceFrontier : public DominanceFrontierBase {
static char ID;
PostDominanceFrontier()
- : DominanceFrontierBase(&ID, true) {}
+ : DominanceFrontierBase(ID, true) {}
virtual bool runOnFunction(Function &) {
Frontiers.clear();
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/RegionInfo.h b/libclamav/c++/llvm/include/llvm/Analysis/RegionInfo.h
new file mode 100644
index 0000000..7a2670f
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Analysis/RegionInfo.h
@@ -0,0 +1,630 @@
+//===- RegionInfo.h - SESE region analysis ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Calculate a program structure tree built out of single entry single exit
+// regions.
+// The basic ideas are taken from "The Program Structure Tree - Richard Johnson,
+// David Pearson, Keshav Pingali - 1994", however enriched with ideas from "The
+// Refined Process Structure Tree - Jussi Vanhatalo, Hagen Voelyer, Jana
+// Koehler - 2009".
+// The algorithm to calculate these data structures however is completely
+// different, as it takes advantage of existing information already available
+// in (Post)dominace tree and dominance frontier passes. This leads to a simpler
+// and in practice hopefully better performing algorithm. The runtime of the
+// algorithms described in the papers above are both linear in graph size,
+// O(V+E), whereas this algorithm is not, as the dominance frontier information
+// itself is not, but in practice runtime seems to be in the order of magnitude
+// of dominance tree calculation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGION_INFO_H
+#define LLVM_ANALYSIS_REGION_INFO_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+
+class Region;
+class RegionInfo;
+class raw_ostream;
+class Loop;
+class LoopInfo;
+
+/// @brief Marker class to iterate over the elements of a Region in flat mode.
+///
+/// The class is used to either iterate in Flat mode or by not using it to not
+/// iterate in Flat mode. During a Flat mode iteration all Regions are entered
+/// and the iteration returns every BasicBlock. If the Flat mode is not
+/// selected for SubRegions just one RegionNode containing the subregion is
+/// returned.
+template <class GraphType>
+class FlatIt {};
+
+/// @brief A RegionNode represents a subregion or a BasicBlock that is part of a
+/// Region.
+class RegionNode {
+ // DO NOT IMPLEMENT
+ RegionNode(const RegionNode &);
+ // DO NOT IMPLEMENT
+ const RegionNode &operator=(const RegionNode &);
+
+ /// This is the entry basic block that starts this region node. If this is a
+ /// BasicBlock RegionNode, then entry is just the basic block, that this
+ /// RegionNode represents. Otherwise it is the entry of this (Sub)RegionNode.
+ ///
+ /// In the BBtoRegionNode map of the parent of this node, BB will always map
+ /// to this node no matter which kind of node this one is.
+ ///
+ /// The node can hold either a Region or a BasicBlock.
+ /// Use one bit to save, if this RegionNode is a subregion or BasicBlock
+ /// RegionNode.
+ PointerIntPair<BasicBlock*, 1, bool> entry;
+
+protected:
+ /// @brief The parent Region of this RegionNode.
+ /// @see getParent()
+ Region* parent;
+
+public:
+ /// @brief Create a RegionNode.
+ ///
+ /// @param Parent The parent of this RegionNode.
+ /// @param Entry The entry BasicBlock of the RegionNode. If this
+ /// RegionNode represents a BasicBlock, this is the
+ /// BasicBlock itself. If it represents a subregion, this
+ /// is the entry BasicBlock of the subregion.
+ /// @param isSubRegion If this RegionNode represents a SubRegion.
+ inline RegionNode(Region* Parent, BasicBlock* Entry, bool isSubRegion = 0)
+ : entry(Entry, isSubRegion), parent(Parent) {}
+
+ /// @brief Get the parent Region of this RegionNode.
+ ///
+ /// The parent Region is the Region this RegionNode belongs to. If for
+ /// example a BasicBlock is element of two Regions, there exist two
+ /// RegionNodes for this BasicBlock. Each with the getParent() function
+ /// pointing to the Region this RegionNode belongs to.
+ ///
+ /// @return Get the parent Region of this RegionNode.
+ inline Region* getParent() const { return parent; }
+
+ /// @brief Get the entry BasicBlock of this RegionNode.
+ ///
+ /// If this RegionNode represents a BasicBlock this is just the BasicBlock
+ /// itself, otherwise we return the entry BasicBlock of the Subregion
+ ///
+ /// @return The entry BasicBlock of this RegionNode.
+ inline BasicBlock* getEntry() const { return entry.getPointer(); }
+
+ /// @brief Get the content of this RegionNode.
+ ///
+ /// This can be either a BasicBlock or a subregion. Before calling getNodeAs()
+ /// check the type of the content with the isSubRegion() function call.
+ ///
+ /// @return The content of this RegionNode.
+ template<class T>
+ inline T* getNodeAs() const;
+
+ /// @brief Is this RegionNode a subregion?
+ ///
+ /// @return True if it contains a subregion. False if it contains a
+ /// BasicBlock.
+ inline bool isSubRegion() const {
+ return entry.getInt();
+ }
+};
+
+/// Print a RegionNode.
+inline raw_ostream &operator<<(raw_ostream &OS, const RegionNode &Node);
+
+template<>
+inline BasicBlock* RegionNode::getNodeAs<BasicBlock>() const {
+ assert(!isSubRegion() && "This is not a BasicBlock RegionNode!");
+ return getEntry();
+}
+
+template<>
+inline Region* RegionNode::getNodeAs<Region>() const {
+ assert(isSubRegion() && "This is not a subregion RegionNode!");
+ return reinterpret_cast<Region*>(const_cast<RegionNode*>(this));
+}
+
+//===----------------------------------------------------------------------===//
+/// @brief A single entry single exit Region.
+///
+/// A Region is a connected subgraph of a control flow graph that has exactly
+/// two connections to the remaining graph. It can be used to analyze or
+/// optimize parts of the control flow graph.
+///
+/// A <em> simple Region </em> is connected to the remaing graph by just two
+/// edges. One edge entering the Region and another one leaving the Region.
+///
+/// An <em> extended Region </em> (or just Region) is a subgraph that can be
+/// transform into a simple Region. The transformation is done by adding
+/// BasicBlocks that merge several entry or exit edges so that after the merge
+/// just one entry and one exit edge exists.
+///
+/// The \e Entry of a Region is the first BasicBlock that is passed after
+/// entering the Region. It is an element of the Region. The entry BasicBlock
+/// dominates all BasicBlocks in the Region.
+///
+/// The \e Exit of a Region is the first BasicBlock that is passed after
+/// leaving the Region. It is not an element of the Region. The exit BasicBlock,
+/// postdominates all BasicBlocks in the Region.
+///
+/// A <em> canonical Region </em> cannot be constructed by combining smaller
+/// Regions.
+///
+/// Region A is the \e parent of Region B, if B is completely contained in A.
+///
+/// Two canonical Regions either do not intersect at all or one is
+/// the parent of the other.
+///
+/// The <em> Program Structure Tree</em> is a graph (V, E) where V is the set of
+/// Regions in the control flow graph and E is the \e parent relation of these
+/// Regions.
+///
+/// Example:
+///
+/// \verbatim
+/// A simple control flow graph, that contains two regions.
+///
+/// 1
+/// / |
+/// 2 |
+/// / \ 3
+/// 4 5 |
+/// | | |
+/// 6 7 8
+/// \ | /
+/// \ |/ Region A: 1 -> 9 {1,2,3,4,5,6,7,8}
+/// 9 Region B: 2 -> 9 {2,4,5,6,7}
+/// \endverbatim
+///
+/// You can obtain more examples by either calling
+///
+/// <tt> "opt -regions -analyze anyprogram.ll" </tt>
+/// or
+/// <tt> "opt -view-regions-only anyprogram.ll" </tt>
+///
+/// on any LLVM file you are interested in.
+///
+/// The first call returns a textual representation of the program structure
+/// tree, the second one creates a graphical representation using graphviz.
+class Region : public RegionNode {
+ friend class RegionInfo;
+ // DO NOT IMPLEMENT
+ Region(const Region &);
+ // DO NOT IMPLEMENT
+ const Region &operator=(const Region &);
+
+ // Information necessary to manage this Region.
+ RegionInfo* RI;
+ DominatorTree *DT;
+
+ // The exit BasicBlock of this region.
+ // (The entry BasicBlock is part of RegionNode)
+ BasicBlock *exit;
+
+ typedef std::vector<Region*> RegionSet;
+
+ // The subregions of this region.
+ RegionSet children;
+
+ typedef std::map<BasicBlock*, RegionNode*> BBNodeMapT;
+
+ // Save the BasicBlock RegionNodes that are element of this Region.
+ mutable BBNodeMapT BBNodeMap;
+
+ /// verifyBBInRegion - Check if a BB is in this Region. This check also works
+ /// if the region is incorrectly built. (EXPENSIVE!)
+ void verifyBBInRegion(BasicBlock* BB) const;
+
+ /// verifyWalk - Walk over all the BBs of the region starting from BB and
+ /// verify that all reachable basic blocks are elements of the region.
+ /// (EXPENSIVE!)
+ void verifyWalk(BasicBlock* BB, std::set<BasicBlock*>* visitedBB) const;
+
+ /// verifyRegionNest - Verify if the region and its children are valid
+ /// regions (EXPENSIVE!)
+ void verifyRegionNest() const;
+
+public:
+ /// @brief Create a new region.
+ ///
+ /// @param Entry The entry basic block of the region.
+ /// @param Exit The exit basic block of the region.
+ /// @param RI The region info object that is managing this region.
+ /// @param DT The dominator tree of the current function.
+ /// @param Parent The surrounding region or NULL if this is a top level
+ /// region.
+ Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo* RI,
+ DominatorTree *DT, Region *Parent = 0);
+
+ /// Delete the Region and all its subregions.
+ ~Region();
+
+ /// @brief Get the entry BasicBlock of the Region.
+ /// @return The entry BasicBlock of the region.
+ BasicBlock *getEntry() const { return RegionNode::getEntry(); }
+
+ /// @brief Get the exit BasicBlock of the Region.
+ /// @return The exit BasicBlock of the Region, NULL if this is the TopLevel
+ /// Region.
+ BasicBlock *getExit() const { return exit; }
+
+ /// @brief Get the parent of the Region.
+ /// @return The parent of the Region or NULL if this is a top level
+ /// Region.
+ Region *getParent() const { return RegionNode::getParent(); }
+
+ /// @brief Get the RegionNode representing the current Region.
+ /// @return The RegionNode representing the current Region.
+ RegionNode* getNode() const {
+ return const_cast<RegionNode*>(reinterpret_cast<const RegionNode*>(this));
+ }
+
+ /// @brief Get the nesting level of this Region.
+ ///
+ /// An toplevel Region has depth 0.
+ ///
+ /// @return The depth of the region.
+ unsigned getDepth() const;
+
+ /// @brief Is this a simple region?
+ ///
+ /// A region is simple if it has exactly one exit and one entry edge.
+ ///
+ /// @return True if the Region is simple.
+ bool isSimple() const;
+
+ /// @brief Returns the name of the Region.
+ /// @return The Name of the Region.
+ std::string getNameStr() const;
+
+ /// @brief Return the RegionInfo object, that belongs to this Region.
+ RegionInfo *getRegionInfo() const {
+ return RI;
+ }
+
+ /// @brief Print the region.
+ ///
+ /// @param OS The output stream the Region is printed to.
+ /// @param printTree Print also the tree of subregions.
+ /// @param level The indentation level used for printing.
+ void print(raw_ostream& OS, bool printTree = true, unsigned level = 0) const;
+
+ /// @brief Print the region to stderr.
+ void dump() const;
+
+ /// @brief Check if the region contains a BasicBlock.
+ ///
+ /// @param BB The BasicBlock that might be contained in this Region.
+ /// @return True if the block is contained in the region otherwise false.
+ bool contains(const BasicBlock *BB) const;
+
+ /// @brief Check if the region contains another region.
+ ///
+ /// @param SubRegion The region that might be contained in this Region.
+ /// @return True if SubRegion is contained in the region otherwise false.
+ bool contains(const Region *SubRegion) const {
+ // Toplevel Region.
+ if (!getExit())
+ return true;
+
+ return contains(SubRegion->getEntry())
+ && (contains(SubRegion->getExit()) || SubRegion->getExit() == getExit());
+ }
+
+ /// @brief Check if the region contains an Instruction.
+ ///
+ /// @param Inst The Instruction that might be contained in this region.
+ /// @return True if the Instruction is contained in the region otherwise false.
+ bool contains(const Instruction *Inst) const {
+ return contains(Inst->getParent());
+ }
+
+ /// @brief Check if the region contains a loop.
+ ///
+ /// @param L The loop that might be contained in this region.
+ /// @return True if the loop is contained in the region otherwise false.
+ /// In case a NULL pointer is passed to this function the result
+ /// is false, except for the region that describes the whole function.
+ /// In that case true is returned.
+ bool contains(const Loop *L) const;
+
+ /// @brief Get the outermost loop in the region that contains a loop.
+ ///
+ /// Find for a Loop L the outermost loop OuterL that is a parent loop of L
+ /// and is itself contained in the region.
+ ///
+ /// @param L The loop the lookup is started.
+ /// @return The outermost loop in the region, NULL if such a loop does not
+ /// exist or if the region describes the whole function.
+ Loop *outermostLoopInRegion(Loop *L) const;
+
+ /// @brief Get the outermost loop in the region that contains a basic block.
+ ///
+ /// Find for a basic block BB the outermost loop L that contains BB and is
+ /// itself contained in the region.
+ ///
+ /// @param LI A pointer to a LoopInfo analysis.
+ /// @param BB The basic block surrounded by the loop.
+ /// @return The outermost loop in the region, NULL if such a loop does not
+ /// exist or if the region describes the whole function.
+ Loop *outermostLoopInRegion(LoopInfo *LI, BasicBlock* BB) const;
+
+ /// @brief Get the subregion that starts at a BasicBlock
+ ///
+ /// @param BB The BasicBlock the subregion should start.
+ /// @return The Subregion if available, otherwise NULL.
+ Region* getSubRegionNode(BasicBlock *BB) const;
+
+ /// @brief Get the RegionNode for a BasicBlock
+ ///
+ /// @param BB The BasicBlock at which the RegionNode should start.
+ /// @return If available, the RegionNode that represents the subregion
+ /// starting at BB. If no subregion starts at BB, the RegionNode
+ /// representing BB.
+ RegionNode* getNode(BasicBlock *BB) const;
+
+ /// @brief Get the BasicBlock RegionNode for a BasicBlock
+ ///
+ /// @param BB The BasicBlock for which the RegionNode is requested.
+ /// @return The RegionNode representing the BB.
+ RegionNode* getBBNode(BasicBlock *BB) const;
+
+ /// @brief Add a new subregion to this Region.
+ ///
+ /// @param SubRegion The new subregion that will be added.
+ void addSubRegion(Region *SubRegion);
+
+ /// @brief Remove a subregion from this Region.
+ ///
+ /// The subregion is not deleted, as it will probably be inserted into another
+ /// region.
+ /// @param SubRegion The SubRegion that will be removed.
+ Region *removeSubRegion(Region *SubRegion);
+
+ /// @brief Move all direct child nodes of this Region to another Region.
+ ///
+ /// @param To The Region the child nodes will be transfered to.
+ void transferChildrenTo(Region *To);
+
+ /// @brief Verify if the region is a correct region.
+ ///
+ /// Check if this is a correctly build Region. This is an expensive check, as
+ /// the complete CFG of the Region will be walked.
+ void verifyRegion() const;
+
+ /// @brief Clear the cache for BB RegionNodes.
+ ///
+ /// After calling this function the BasicBlock RegionNodes will be stored at
+ /// different memory locations. RegionNodes obtained before this function is
+ /// called are therefore not comparable to RegionNodes abtained afterwords.
+ void clearNodeCache();
+
+ /// @name Subregion Iterators
+ ///
+ /// These iterators iterator over all subregions of this Region.
+ //@{
+ typedef RegionSet::iterator iterator;
+ typedef RegionSet::const_iterator const_iterator;
+
+ iterator begin() { return children.begin(); }
+ iterator end() { return children.end(); }
+
+ const_iterator begin() const { return children.begin(); }
+ const_iterator end() const { return children.end(); }
+ //@}
+
+ /// @name BasicBlock Iterators
+ ///
+ /// These iterators iterate over all BasicBlock RegionNodes that are
+ /// contained in this Region. The iterator also iterates over BasicBlocks
+ /// that are elements of a subregion of this Region. It is therefore called a
+ /// flat iterator.
+ //@{
+ typedef df_iterator<RegionNode*, SmallPtrSet<RegionNode*, 8>, false,
+ GraphTraits<FlatIt<RegionNode*> > > block_iterator;
+
+ typedef df_iterator<const RegionNode*, SmallPtrSet<const RegionNode*, 8>,
+ false, GraphTraits<FlatIt<const RegionNode*> > >
+ const_block_iterator;
+
+ block_iterator block_begin();
+ block_iterator block_end();
+
+ const_block_iterator block_begin() const;
+ const_block_iterator block_end() const;
+ //@}
+
+ /// @name Element Iterators
+ ///
+ /// These iterators iterate over all BasicBlock and subregion RegionNodes that
+ /// are direct children of this Region. It does not iterate over any
+ /// RegionNodes that are also element of a subregion of this Region.
+ //@{
+ typedef df_iterator<RegionNode*, SmallPtrSet<RegionNode*, 8>, false,
+ GraphTraits<RegionNode*> > element_iterator;
+
+ typedef df_iterator<const RegionNode*, SmallPtrSet<const RegionNode*, 8>,
+ false, GraphTraits<const RegionNode*> >
+ const_element_iterator;
+
+ element_iterator element_begin();
+ element_iterator element_end();
+
+ const_element_iterator element_begin() const;
+ const_element_iterator element_end() const;
+ //@}
+};
+
+//===----------------------------------------------------------------------===//
+/// @brief Analysis that detects all canonical Regions.
+///
+/// The RegionInfo pass detects all canonical regions in a function. The Regions
+/// are connected using the parent relation. This builds a Program Structure
+/// Tree.
+class RegionInfo : public FunctionPass {
+ typedef DenseMap<BasicBlock*,BasicBlock*> BBtoBBMap;
+ typedef DenseMap<BasicBlock*, Region*> BBtoRegionMap;
+ typedef SmallPtrSet<Region*, 4> RegionSet;
+
+ // DO NOT IMPLEMENT
+ RegionInfo(const RegionInfo &);
+ // DO NOT IMPLEMENT
+ const RegionInfo &operator=(const RegionInfo &);
+
+ DominatorTree *DT;
+ PostDominatorTree *PDT;
+ DominanceFrontier *DF;
+
+ /// The top level region.
+ Region *TopLevelRegion;
+
+ /// Map every BB to the smallest region, that contains BB.
+ BBtoRegionMap BBtoRegion;
+
+ // isCommonDomFrontier - Returns true if BB is in the dominance frontier of
+ // entry, because it was inherited from exit. In the other case there is an
+ // edge going from entry to BB without passing exit.
+ bool isCommonDomFrontier(BasicBlock* BB, BasicBlock* entry,
+ BasicBlock* exit) const;
+
+ // isRegion - Check if entry and exit surround a valid region, based on
+ // dominance tree and dominance frontier.
+ bool isRegion(BasicBlock* entry, BasicBlock* exit) const;
+
+ // insertShortCut - Saves a shortcut pointing from entry to exit.
+ // This function may extend this shortcut if possible.
+ void insertShortCut(BasicBlock* entry, BasicBlock* exit,
+ BBtoBBMap* ShortCut) const;
+
+ // getNextPostDom - Returns the next BB that postdominates N, while skipping
+ // all post dominators that cannot finish a canonical region.
+ DomTreeNode *getNextPostDom(DomTreeNode* N, BBtoBBMap *ShortCut) const;
+
+ // isTrivialRegion - A region is trivial, if it contains only one BB.
+ bool isTrivialRegion(BasicBlock *entry, BasicBlock *exit) const;
+
+ // createRegion - Creates a single entry single exit region.
+ Region *createRegion(BasicBlock *entry, BasicBlock *exit);
+
+ // findRegionsWithEntry - Detect all regions starting with bb 'entry'.
+ void findRegionsWithEntry(BasicBlock *entry, BBtoBBMap *ShortCut);
+
+ // scanForRegions - Detects regions in F.
+ void scanForRegions(Function &F, BBtoBBMap *ShortCut);
+
+ // getTopMostParent - Get the top most parent with the same entry block.
+ Region *getTopMostParent(Region *region);
+
+ // buildRegionsTree - build the region hierarchy after all region detected.
+ void buildRegionsTree(DomTreeNode *N, Region *region);
+
+ // Calculate - detecte all regions in function and build the region tree.
+ void Calculate(Function& F);
+
+ void releaseMemory();
+
+ // updateStatistics - Update statistic about created regions.
+ void updateStatistics(Region *R);
+
+ // isSimple - Check if a region is a simple region with exactly one entry
+ // edge and exactly one exit edge.
+ bool isSimple(Region* R) const;
+
+public:
+ static char ID;
+ explicit RegionInfo();
+
+ ~RegionInfo();
+
+ /// @name FunctionPass interface
+ //@{
+ virtual bool runOnFunction(Function &F);
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual void print(raw_ostream &OS, const Module *) const;
+ virtual void verifyAnalysis() const;
+ //@}
+
+ /// @brief Get the smallest region that contains a BasicBlock.
+ ///
+ /// @param BB The basic block.
+ /// @return The smallest region, that contains BB or NULL, if there is no
+ /// region containing BB.
+ Region *getRegionFor(BasicBlock *BB) const;
+
+ /// @brief A shortcut for getRegionFor().
+ ///
+ /// @param BB The basic block.
+ /// @return The smallest region, that contains BB or NULL, if there is no
+ /// region containing BB.
+ Region *operator[](BasicBlock *BB) const;
+
+ /// @brief Return the exit of the maximal refined region, that starts at a
+ /// BasicBlock.
+ ///
+ /// @param BB The BasicBlock the refined region starts.
+ BasicBlock *getMaxRegionExit(BasicBlock *BB) const;
+
+ /// @brief Find the smallest region that contains two regions.
+ ///
+ /// @param A The first region.
+ /// @param B The second region.
+ /// @return The smallest region containing A and B.
+ Region *getCommonRegion(Region* A, Region *B) const;
+
+ /// @brief Find the smallest region that contains two basic blocks.
+ ///
+ /// @param A The first basic block.
+ /// @param B The second basic block.
+ /// @return The smallest region that contains A and B.
+ Region* getCommonRegion(BasicBlock* A, BasicBlock *B) const {
+ return getCommonRegion(getRegionFor(A), getRegionFor(B));
+ }
+
+ /// @brief Find the smallest region that contains a set of regions.
+ ///
+ /// @param Regions A vector of regions.
+ /// @return The smallest region that contains all regions in Regions.
+ Region* getCommonRegion(SmallVectorImpl<Region*> &Regions) const;
+
+ /// @brief Find the smallest region that contains a set of basic blocks.
+ ///
+ /// @param BBs A vector of basic blocks.
+ /// @return The smallest region that contains all basic blocks in BBS.
+ Region* getCommonRegion(SmallVectorImpl<BasicBlock*> &BBs) const;
+
+ Region *getTopLevelRegion() const {
+ return TopLevelRegion;
+ }
+
+ /// @brief Clear the Node Cache for all Regions.
+ ///
+ /// @see Region::clearNodeCache()
+ void clearNodeCache() {
+ if (TopLevelRegion)
+ TopLevelRegion->clearNodeCache();
+ }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const RegionNode &Node) {
+ if (Node.isSubRegion())
+ return OS << Node.getNodeAs<Region>()->getNameStr();
+ else
+ return OS << Node.getNodeAs<BasicBlock>()->getNameStr();
+}
+} // End llvm namespace
+#endif
+
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/RegionIterator.h b/libclamav/c++/llvm/include/llvm/Analysis/RegionIterator.h
new file mode 100644
index 0000000..ced5b52
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Analysis/RegionIterator.h
@@ -0,0 +1,342 @@
+//===- RegionIterator.h - Iterators to iteratate over Regions ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines the iterators to iterate over the elements of a Region.
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_ANALYSIS_REGION_ITERATOR_H
+#define LLVM_ANALYSIS_REGION_ITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+//===----------------------------------------------------------------------===//
+/// @brief Hierachical RegionNode successor iterator.
+///
+/// This iterator iterates over all successors of a RegionNode.
+///
+/// For a BasicBlock RegionNode it skips all BasicBlocks that are not part of
+/// the parent Region. Furthermore for BasicBlocks that start a subregion, a
+/// RegionNode representing the subregion is returned.
+///
+/// For a subregion RegionNode there is just one successor. The RegionNode
+/// representing the exit of the subregion.
+template<class NodeType>
+class RNSuccIterator : public std::iterator<std::forward_iterator_tag,
+ NodeType, ptrdiff_t>
+{
+ typedef std::iterator<std::forward_iterator_tag, NodeType, ptrdiff_t> super;
+ // The iterator works in two modes, bb mode or region mode.
+ enum ItMode{
+ // In BB mode it returns all successors of this BasicBlock as its
+ // successors.
+ ItBB,
+ // In region mode there is only one successor, thats the regionnode mapping
+ // to the exit block of the regionnode
+ ItRgBegin, // At the beginning of the regionnode successor.
+ ItRgEnd // At the end of the regionnode successor.
+ };
+
+ // Use two bit to represent the mode iterator.
+ PointerIntPair<NodeType*, 2, enum ItMode> Node;
+
+ // The block successor iterator.
+ succ_iterator BItor;
+
+ // advanceRegionSucc - A region node has only one successor. It reaches end
+ // once we advance it.
+ void advanceRegionSucc() {
+ assert(Node.getInt() == ItRgBegin && "Cannot advance region successor!");
+ Node.setInt(ItRgEnd);
+ }
+
+ NodeType* getNode() const{ return Node.getPointer(); }
+
+ // isRegionMode - Is the current iterator in region mode?
+ bool isRegionMode() const { return Node.getInt() != ItBB; }
+
+ // Get the immediate successor. This function may return a Basic Block
+ // RegionNode or a subregion RegionNode.
+ RegionNode* getISucc(BasicBlock* BB) const {
+ RegionNode *succ;
+ succ = getNode()->getParent()->getNode(BB);
+ assert(succ && "BB not in Region or entered subregion!");
+ return succ;
+ }
+
+ // getRegionSucc - Return the successor basic block of a SubRegion RegionNode.
+ inline BasicBlock* getRegionSucc() const {
+ assert(Node.getInt() == ItRgBegin && "Cannot get the region successor!");
+ return getNode()->template getNodeAs<Region>()->getExit();
+ }
+
+ // isExit - Is this the exit BB of the Region?
+ inline bool isExit(BasicBlock* BB) const {
+ return getNode()->getParent()->getExit() == BB;
+ }
+public:
+ typedef RNSuccIterator<NodeType> Self;
+
+ typedef typename super::pointer pointer;
+
+ /// @brief Create begin iterator of a RegionNode.
+ inline RNSuccIterator(NodeType* node)
+ : Node(node, node->isSubRegion() ? ItRgBegin : ItBB),
+ BItor(succ_begin(node->getEntry())) {
+
+
+ // Skip the exit block
+ if (!isRegionMode())
+ while (succ_end(node->getEntry()) != BItor && isExit(*BItor))
+ ++BItor;
+
+ if (isRegionMode() && isExit(getRegionSucc()))
+ advanceRegionSucc();
+ }
+
+ /// @brief Create an end iterator.
+ inline RNSuccIterator(NodeType* node, bool)
+ : Node(node, node->isSubRegion() ? ItRgEnd : ItBB),
+ BItor(succ_end(node->getEntry())) {}
+
+ inline bool operator==(const Self& x) const {
+ assert(isRegionMode() == x.isRegionMode() && "Broken iterator!");
+ if (isRegionMode())
+ return Node.getInt() == x.Node.getInt();
+ else
+ return BItor == x.BItor;
+ }
+
+ inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+ inline pointer operator*() const {
+ BasicBlock* BB = isRegionMode() ? getRegionSucc() : *BItor;
+ assert(!isExit(BB) && "Iterator out of range!");
+ return getISucc(BB);
+ }
+
+ inline Self& operator++() {
+ if(isRegionMode()) {
+ // The Region only has 1 successor.
+ advanceRegionSucc();
+ } else {
+ // Skip the exit.
+ do
+ ++BItor;
+ while (BItor != succ_end(getNode()->getEntry())
+ && isExit(*BItor));
+ }
+ return *this;
+ }
+
+ inline Self operator++(int) {
+ Self tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ inline const Self &operator=(const Self &I) {
+ if (this != &I) {
+ assert(getNode()->getParent() == I.getNode()->getParent()
+ && "Cannot assign iterators of two different regions!");
+ Node = I.Node;
+ BItor = I.BItor;
+ }
+ return *this;
+ }
+};
+
+
+//===----------------------------------------------------------------------===//
+/// @brief Flat RegionNode iterator.
+///
+/// The Flat Region iterator will iterate over all BasicBlock RegionNodes that
+/// are contained in the Region and its subregions. This is close to a virtual
+/// control flow graph of the Region.
+template<class NodeType>
+class RNSuccIterator<FlatIt<NodeType> >
+ : public std::iterator<std::forward_iterator_tag, NodeType, ptrdiff_t>
+{
+ typedef std::iterator<std::forward_iterator_tag, NodeType, ptrdiff_t> super;
+ NodeType* Node;
+ succ_iterator Itor;
+
+public:
+ typedef RNSuccIterator<FlatIt<NodeType> > Self;
+ typedef typename super::pointer pointer;
+
+ /// @brief Create the iterator from a RegionNode.
+ ///
+ /// Note that the incoming node must be a bb node, otherwise it will trigger
+ /// an assertion when we try to get a BasicBlock.
+ inline RNSuccIterator(NodeType* node) : Node(node),
+ Itor(succ_begin(node->getEntry())) {
+ assert(!Node->isSubRegion()
+ && "Subregion node not allowed in flat iterating mode!");
+ assert(Node->getParent() && "A BB node must have a parent!");
+
+ // Skip the exit block of the iterating region.
+ while (succ_end(Node->getEntry()) != Itor
+ && Node->getParent()->getExit() == *Itor)
+ ++Itor;
+ }
+ /// @brief Create an end iterator
+ inline RNSuccIterator(NodeType* node, bool) : Node(node),
+ Itor(succ_end(node->getEntry())) {
+ assert(!Node->isSubRegion()
+ && "Subregion node not allowed in flat iterating mode!");
+ }
+
+ inline bool operator==(const Self& x) const {
+ assert(Node->getParent() == x.Node->getParent()
+ && "Cannot compare iterators of different regions!");
+
+ return Itor == x.Itor && Node == x.Node;
+ }
+
+ inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+ inline pointer operator*() const {
+ BasicBlock* BB = *Itor;
+
+ // Get the iterating region.
+ Region* Parent = Node->getParent();
+
+ // The only case that the successor reaches out of the region is it reaches
+ // the exit of the region.
+ assert(Parent->getExit() != BB && "iterator out of range!");
+
+ return Parent->getBBNode(BB);
+ }
+
+ inline Self& operator++() {
+ // Skip the exit block of the iterating region.
+ do
+ ++Itor;
+ while (Itor != succ_end(Node->getEntry())
+ && Node->getParent()->getExit() == *Itor);
+
+ return *this;
+ }
+
+ inline Self operator++(int) {
+ Self tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ inline const Self &operator=(const Self &I) {
+ if (this != &I) {
+ assert(Node->getParent() == I.Node->getParent()
+ && "Cannot assign iterators to two different regions!");
+ Node = I.Node;
+ Itor = I.Itor;
+ }
+ return *this;
+ }
+};
+
+template<class NodeType>
+inline RNSuccIterator<NodeType> succ_begin(NodeType* Node) {
+ return RNSuccIterator<NodeType>(Node);
+}
+
+template<class NodeType>
+inline RNSuccIterator<NodeType> succ_end(NodeType* Node) {
+ return RNSuccIterator<NodeType>(Node, true);
+}
+
+//===--------------------------------------------------------------------===//
+// RegionNode GraphTraits specialization so the bbs in the region can be
+// iterate by generic graph iterators.
+//
+// NodeT can either be region node or const region node, otherwise child_begin
+// and child_end fail.
+
+#define RegionNodeGraphTraits(NodeT) \
+ template<> struct GraphTraits<NodeT*> { \
+ typedef NodeT NodeType; \
+ typedef RNSuccIterator<NodeType> ChildIteratorType; \
+ static NodeType *getEntryNode(NodeType* N) { return N; } \
+ static inline ChildIteratorType child_begin(NodeType *N) { \
+ return RNSuccIterator<NodeType>(N); \
+ } \
+ static inline ChildIteratorType child_end(NodeType *N) { \
+ return RNSuccIterator<NodeType>(N, true); \
+ } \
+}; \
+template<> struct GraphTraits<FlatIt<NodeT*> > { \
+ typedef NodeT NodeType; \
+ typedef RNSuccIterator<FlatIt<NodeT> > ChildIteratorType; \
+ static NodeType *getEntryNode(NodeType* N) { return N; } \
+ static inline ChildIteratorType child_begin(NodeType *N) { \
+ return RNSuccIterator<FlatIt<NodeType> >(N); \
+ } \
+ static inline ChildIteratorType child_end(NodeType *N) { \
+ return RNSuccIterator<FlatIt<NodeType> >(N, true); \
+ } \
+}
+
+#define RegionGraphTraits(RegionT, NodeT) \
+template<> struct GraphTraits<RegionT*> \
+ : public GraphTraits<NodeT*> { \
+ typedef df_iterator<NodeType*> nodes_iterator; \
+ static NodeType *getEntryNode(RegionT* R) { \
+ return R->getNode(R->getEntry()); \
+ } \
+ static nodes_iterator nodes_begin(RegionT* R) { \
+ return nodes_iterator::begin(getEntryNode(R)); \
+ } \
+ static nodes_iterator nodes_end(RegionT* R) { \
+ return nodes_iterator::end(getEntryNode(R)); \
+ } \
+}; \
+template<> struct GraphTraits<FlatIt<RegionT*> > \
+ : public GraphTraits<FlatIt<NodeT*> > { \
+ typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false, \
+ GraphTraits<FlatIt<NodeType*> > > nodes_iterator; \
+ static NodeType *getEntryNode(RegionT* R) { \
+ return R->getBBNode(R->getEntry()); \
+ } \
+ static nodes_iterator nodes_begin(RegionT* R) { \
+ return nodes_iterator::begin(getEntryNode(R)); \
+ } \
+ static nodes_iterator nodes_end(RegionT* R) { \
+ return nodes_iterator::end(getEntryNode(R)); \
+ } \
+}
+
+RegionNodeGraphTraits(RegionNode);
+RegionNodeGraphTraits(const RegionNode);
+
+RegionGraphTraits(Region, RegionNode);
+RegionGraphTraits(const Region, const RegionNode);
+
+template <> struct GraphTraits<RegionInfo*>
+ : public GraphTraits<FlatIt<RegionNode*> > {
+ typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
+ GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
+
+ static NodeType *getEntryNode(RegionInfo *RI) {
+ return GraphTraits<FlatIt<Region*> >::getEntryNode(RI->getTopLevelRegion());
+ }
+ static nodes_iterator nodes_begin(RegionInfo* RI) {
+ return nodes_iterator::begin(getEntryNode(RI));
+ }
+ static nodes_iterator nodes_end(RegionInfo *RI) {
+ return nodes_iterator::end(getEntryNode(RI));
+ }
+};
+
+} // End namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/RegionPrinter.h b/libclamav/c++/llvm/include/llvm/Analysis/RegionPrinter.h
new file mode 100644
index 0000000..758748a
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Analysis/RegionPrinter.h
@@ -0,0 +1,26 @@
+//===-- RegionPrinter.h - Region printer external interface -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines external functions that can be called to explicitly
+// instantiate the region printer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONPRINTER_H
+#define LLVM_ANALYSIS_REGIONPRINTER_H
+
+namespace llvm {
+ class FunctionPass;
+ FunctionPass *createRegionViewerPass();
+ FunctionPass *createRegionOnlyViewerPass();
+ FunctionPass *createRegionPrinterPass();
+ FunctionPass *createRegionOnlyPrinterPass();
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolution.h b/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolution.h
index 96d29ba..1fa94e9 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -44,12 +44,21 @@ namespace llvm {
class Loop;
class LoopInfo;
class Operator;
+ class SCEVUnknown;
+ class SCEV;
+ template<> struct FoldingSetTrait<SCEV>;
/// SCEV - This class represents an analyzed expression in the program. These
/// are opaque objects that the client is not allowed to do much with
/// directly.
///
- class SCEV : public FastFoldingSetNode {
+ class SCEV : public FoldingSetNode {
+ friend struct FoldingSetTrait<SCEV>;
+
+ /// FastID - A reference to an Interned FoldingSetNodeID for this node.
+ /// The ScalarEvolution's BumpPtrAllocator holds the data.
+ FoldingSetNodeIDRef FastID;
+
// The SCEV baseclass this node corresponds to
const unsigned short SCEVType;
@@ -64,8 +73,8 @@ namespace llvm {
protected:
virtual ~SCEV();
public:
- explicit SCEV(const FoldingSetNodeID &ID, unsigned SCEVTy) :
- FastFoldingSetNode(ID), SCEVType(SCEVTy), SubclassData(0) {}
+ explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy) :
+ FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
unsigned getSCEVType() const { return SCEVType; }
@@ -118,6 +127,21 @@ namespace llvm {
void dump() const;
};
+ // Specialize FoldingSetTrait for SCEV to avoid needing to compute
+ // temporary FoldingSetNodeID values.
+ template<> struct FoldingSetTrait<SCEV> : DefaultFoldingSetTrait<SCEV> {
+ static void Profile(const SCEV &X, FoldingSetNodeID& ID) {
+ ID = X.FastID;
+ }
+ static bool Equals(const SCEV &X, const FoldingSetNodeID &ID,
+ FoldingSetNodeID &TempID) {
+ return ID == X.FastID;
+ }
+ static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
+ return X.FastID.ComputeHash();
+ }
+ };
+
inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
S.print(OS);
return OS;
@@ -168,6 +192,7 @@ namespace llvm {
friend class SCEVCallbackVH;
friend class SCEVExpander;
+ friend class SCEVUnknown;
/// F - The function we are analyzing.
///
@@ -189,9 +214,14 @@ namespace llvm {
/// counts and things.
SCEVCouldNotCompute CouldNotCompute;
- /// Scalars - This is a cache of the scalars we have analyzed so far.
+ /// ValueExprMapType - The typedef for ValueExprMap.
+ ///
+ typedef DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *> >
+ ValueExprMapType;
+
+ /// ValueExprMap - This is a cache of the values we have analyzed so far.
///
- std::map<SCEVCallbackVH, const SCEV *> Scalars;
+ ValueExprMapType ValueExprMap;
/// BackedgeTakenInfo - Information about the backedge-taken count
/// of a loop. This currently includes an exact count and a maximum count.
@@ -256,7 +286,7 @@ namespace llvm {
/// ForgetSymbolicValue - This looks up computed SCEV values for all
/// instructions that depend on the given instruction and removes them from
- /// the Scalars map if they reference SymName. This is used during PHI
+ /// the ValueExprMap map if they reference SymName. This is used during PHI
/// resolution.
void ForgetSymbolicName(Instruction *I, const SCEV *SymName);
@@ -336,20 +366,18 @@ namespace llvm {
BackedgeTakenInfo HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
const Loop *L, bool isSigned);
- /// getLoopPredecessor - If the given loop's header has exactly one unique
- /// predecessor outside the loop, return it. Otherwise return null.
- BasicBlock *getLoopPredecessor(const Loop *L);
-
/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
/// (which may not be an immediate predecessor) which has exactly one
/// successor from which BB is reachable, or null if no such block is
/// found.
- BasicBlock* getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB);
+ std::pair<BasicBlock *, BasicBlock *>
+ getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB);
- /// isImpliedCond - Test whether the condition described by Pred, LHS,
- /// and RHS is true whenever the given Cond value evaluates to true.
- bool isImpliedCond(Value *Cond, ICmpInst::Predicate Pred,
+ /// isImpliedCond - Test whether the condition described by Pred, LHS, and
+ /// RHS is true whenever the given FoundCondValue value evaluates to true.
+ bool isImpliedCond(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
+ Value *FoundCondValue,
bool Inverse);
/// isImpliedCondOperands - Test whether the condition described by Pred,
@@ -373,6 +401,13 @@ namespace llvm {
Constant *getConstantEvolutionLoopExitValue(PHINode *PN, const APInt& BEs,
const Loop *L);
+ /// isKnownPredicateWithRanges - Test if the given expression is known to
+ /// satisfy the condition described by Pred and the known constant ranges
+ /// of LHS and RHS.
+ ///
+ bool isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
+
public:
static char ID; // Pass identification, replacement for typeid
ScalarEvolution();
@@ -515,10 +550,6 @@ namespace llvm {
/// widening.
const SCEV *getTruncateOrNoop(const SCEV *V, const Type *Ty);
- /// getIntegerSCEV - Given a SCEVable type, create a constant for the
- /// specified signed integer value and return a SCEV for the constant.
- const SCEV *getIntegerSCEV(int64_t Val, const Type *Ty);
-
/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
/// the types using zero-extension, and then perform a umax operation
/// with them.
@@ -547,11 +578,11 @@ namespace llvm {
/// getSCEVAtScope(getSCEV(V), L).
const SCEV *getSCEVAtScope(Value *V, const Loop *L);
- /// isLoopGuardedByCond - Test whether entry to the loop is protected by
- /// a conditional between LHS and RHS. This is used to help avoid max
+ /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
+ /// by a conditional between LHS and RHS. This is used to help avoid max
/// expressions in loop trip counts, and to eliminate casts.
- bool isLoopGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS);
+ bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
/// protected by a conditional between LHS and RHS. This is used to
@@ -629,12 +660,21 @@ namespace llvm {
///
bool isKnownNonZero(const SCEV *S);
- /// isKnownNonZero - Test if the given expression is known to satisfy
+ /// isKnownPredicate - Test if the given expression is known to satisfy
/// the condition described by Pred, LHS, and RHS.
///
bool isKnownPredicate(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);
+ /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
+ /// predicate Pred. Return true iff any changes were made. If the
+ /// operands are provably equal or inequal, LHS and RHS are set to
+ /// the same value and Pred is set to either ICMP_EQ or ICMP_NE.
+ ///
+ bool SimplifyICmpOperands(ICmpInst::Predicate &Pred,
+ const SCEV *&LHS,
+ const SCEV *&RHS);
+
virtual bool runOnFunction(Function &F);
virtual void releaseMemory();
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
@@ -643,6 +683,11 @@ namespace llvm {
private:
FoldingSet<SCEV> UniqueSCEVs;
BumpPtrAllocator SCEVAllocator;
+
+ /// FirstUnknown - The head of a linked list of all SCEVUnknown
+ /// values that have been allocated. This is used by releaseMemory
+ /// to locate them all and call their destructors.
+ SCEVUnknown *FirstUnknown;
};
}
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h b/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
index 26dc0c4..4b02f82 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -15,8 +15,10 @@
#define LLVM_ANALYSIS_SCALAREVOLUTION_EXPANDER_H
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/TargetFolder.h"
+#include "llvm/Support/ValueHandle.h"
#include <set>
namespace llvm {
@@ -30,14 +32,15 @@ namespace llvm {
ScalarEvolution &SE;
std::map<std::pair<const SCEV *, Instruction *>, AssertingVH<Value> >
InsertedExpressions;
- std::set<Value*> InsertedValues;
+ std::set<AssertingVH<Value> > InsertedValues;
+ std::set<AssertingVH<Value> > InsertedPostIncValues;
- /// PostIncLoop - When non-null, expanded addrecs referring to the given
- /// loop expanded in post-inc mode. For example, expanding {1,+,1}<L> in
- /// post-inc mode returns the add instruction that adds one to the phi
- /// for {0,+,1}<L>, as opposed to a new phi starting at 1. This is only
- /// supported in non-canonical mode.
- const Loop *PostIncLoop;
+ /// PostIncLoops - Addrecs referring to any of the given loops are expanded
+ /// in post-inc mode. For example, expanding {1,+,1}<L> in post-inc mode
+ /// returns the add instruction that adds one to the phi for {0,+,1}<L>,
+ /// as opposed to a new phi starting at 1. This is only supported in
+ /// non-canonical mode.
+ PostIncLoopSet PostIncLoops;
/// IVIncInsertPos - When this is non-null, addrecs expanded in the
/// loop it indicates should be inserted with increments at
@@ -62,29 +65,29 @@ namespace llvm {
public:
/// SCEVExpander - Construct a SCEVExpander in "canonical" mode.
explicit SCEVExpander(ScalarEvolution &se)
- : SE(se), PostIncLoop(0), IVIncInsertLoop(0), CanonicalMode(true),
+ : SE(se), IVIncInsertLoop(0), CanonicalMode(true),
Builder(se.getContext(), TargetFolder(se.TD)) {}
/// clear - Erase the contents of the InsertedExpressions map so that users
/// trying to expand the same expression into multiple BasicBlocks or
/// different places within the same BasicBlock can do so.
- void clear() { InsertedExpressions.clear(); }
+ void clear() {
+ InsertedExpressions.clear();
+ InsertedValues.clear();
+ InsertedPostIncValues.clear();
+ }
/// getOrInsertCanonicalInductionVariable - This method returns the
/// canonical induction variable of the specified type for the specified
/// loop (inserting one if there is none). A canonical induction variable
/// starts at zero and steps by one on each iteration.
- Value *getOrInsertCanonicalInductionVariable(const Loop *L, const Type *Ty);
+ PHINode *getOrInsertCanonicalInductionVariable(const Loop *L,
+ const Type *Ty);
/// expandCodeFor - Insert code to directly compute the specified SCEV
/// expression into the program. The inserted code is inserted into the
/// specified block.
- Value *expandCodeFor(const SCEV *SH, const Type *Ty, Instruction *I) {
- BasicBlock::iterator IP = I;
- while (isInsertedInstruction(IP)) ++IP;
- Builder.SetInsertPoint(IP->getParent(), IP);
- return expandCodeFor(SH, Ty);
- }
+ Value *expandCodeFor(const SCEV *SH, const Type *Ty, Instruction *I);
/// setIVIncInsertPos - Set the current IV increment loop and position.
void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
@@ -94,14 +97,22 @@ namespace llvm {
IVIncInsertPos = Pos;
}
- /// setPostInc - If L is non-null, enable post-inc expansion for addrecs
- /// referring to the given loop. If L is null, disable post-inc expansion
- /// completely. Post-inc expansion is only supported in non-canonical
+ /// setPostInc - Enable post-inc expansion for addrecs referring to the
+ /// given loops. Post-inc expansion is only supported in non-canonical
/// mode.
- void setPostInc(const Loop *L) {
+ void setPostInc(const PostIncLoopSet &L) {
assert(!CanonicalMode &&
"Post-inc expansion is not supported in CanonicalMode");
- PostIncLoop = L;
+ PostIncLoops = L;
+ }
+
+ /// clearPostInc - Disable all post-inc expansion.
+ void clearPostInc() {
+ PostIncLoops.clear();
+
+ // When we change the post-inc loop set, cached expansions may no
+ // longer be valid.
+ InsertedPostIncValues.clear();
}
/// disableCanonicalMode - Disable the behavior of expanding expressions in
@@ -109,6 +120,13 @@ namespace llvm {
/// is useful for late optimization passes.
void disableCanonicalMode() { CanonicalMode = false; }
+ /// clearInsertPoint - Clear the current insertion point. This is useful
+ /// if the instruction that had been serving as the insertion point may
+ /// have been deleted.
+ void clearInsertPoint() {
+ Builder.ClearInsertionPoint();
+ }
+
private:
LLVMContext &getContext() const { return SE.getContext(); }
@@ -116,6 +134,14 @@ namespace llvm {
/// of work to avoid inserting an obviously redundant operation.
Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS);
+ /// ReuseOrCreateCast - Arange for there to be a cast of V to Ty at IP,
+ /// reusing an existing cast if a suitable one exists, moving an existing
+ /// cast if a suitable one exists but isn't in the right place, or
+ /// or creating a new one.
+ Value *ReuseOrCreateCast(Value *V, const Type *Ty,
+ Instruction::CastOps Op,
+ BasicBlock::iterator IP);
+
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to
/// share the casts.
@@ -139,7 +165,7 @@ namespace llvm {
/// inserted by the code rewriter. If so, the client should not modify the
/// instruction.
bool isInsertedInstruction(Instruction *I) const {
- return InsertedValues.count(I);
+ return InsertedValues.count(I) || InsertedPostIncValues.count(I);
}
Value *visitConstant(const SCEVConstant *S) {
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 0ab3b3f..4213a28 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -37,7 +37,7 @@ namespace llvm {
friend class ScalarEvolution;
ConstantInt *V;
- SCEVConstant(const FoldingSetNodeID &ID, ConstantInt *v) :
+ SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v) :
SCEV(ID, scConstant), V(v) {}
public:
ConstantInt *getValue() const { return V; }
@@ -81,7 +81,7 @@ namespace llvm {
const SCEV *Op;
const Type *Ty;
- SCEVCastExpr(const FoldingSetNodeID &ID,
+ SCEVCastExpr(const FoldingSetNodeIDRef ID,
unsigned SCEVTy, const SCEV *op, const Type *ty);
public:
@@ -120,7 +120,7 @@ namespace llvm {
class SCEVTruncateExpr : public SCEVCastExpr {
friend class ScalarEvolution;
- SCEVTruncateExpr(const FoldingSetNodeID &ID,
+ SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty);
public:
@@ -140,7 +140,7 @@ namespace llvm {
class SCEVZeroExtendExpr : public SCEVCastExpr {
friend class ScalarEvolution;
- SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
+ SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty);
public:
@@ -160,7 +160,7 @@ namespace llvm {
class SCEVSignExtendExpr : public SCEVCastExpr {
friend class ScalarEvolution;
- SCEVSignExtendExpr(const FoldingSetNodeID &ID,
+ SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty);
public:
@@ -180,53 +180,36 @@ namespace llvm {
///
class SCEVNAryExpr : public SCEV {
protected:
- SmallVector<const SCEV *, 8> Operands;
+ // Since SCEVs are immutable, ScalarEvolution allocates operand
+ // arrays with its SCEVAllocator, so this class just needs a simple
+ // pointer rather than a more elaborate vector-like data structure.
+ // This also avoids the need for a non-trivial destructor.
+ const SCEV *const *Operands;
+ size_t NumOperands;
- SCEVNAryExpr(const FoldingSetNodeID &ID,
- enum SCEVTypes T, const SmallVectorImpl<const SCEV *> &ops)
- : SCEV(ID, T), Operands(ops.begin(), ops.end()) {}
+ SCEVNAryExpr(const FoldingSetNodeIDRef ID,
+ enum SCEVTypes T, const SCEV *const *O, size_t N)
+ : SCEV(ID, T), Operands(O), NumOperands(N) {}
public:
- unsigned getNumOperands() const { return (unsigned)Operands.size(); }
+ size_t getNumOperands() const { return NumOperands; }
const SCEV *getOperand(unsigned i) const {
- assert(i < Operands.size() && "Operand index out of range!");
+ assert(i < NumOperands && "Operand index out of range!");
return Operands[i];
}
- const SmallVectorImpl<const SCEV *> &getOperands() const {
- return Operands;
- }
- typedef SmallVectorImpl<const SCEV *>::const_iterator op_iterator;
- op_iterator op_begin() const { return Operands.begin(); }
- op_iterator op_end() const { return Operands.end(); }
+ typedef const SCEV *const *op_iterator;
+ op_iterator op_begin() const { return Operands; }
+ op_iterator op_end() const { return Operands + NumOperands; }
- virtual bool isLoopInvariant(const Loop *L) const {
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (!getOperand(i)->isLoopInvariant(L)) return false;
- return true;
- }
+ virtual bool isLoopInvariant(const Loop *L) const;
// hasComputableLoopEvolution - N-ary expressions have computable loop
// evolutions iff they have at least one operand that varies with the loop,
// but that all varying operands are computable.
- virtual bool hasComputableLoopEvolution(const Loop *L) const {
- bool HasVarying = false;
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (!getOperand(i)->isLoopInvariant(L)) {
- if (getOperand(i)->hasComputableLoopEvolution(L))
- HasVarying = true;
- else
- return false;
- }
- return HasVarying;
- }
+ virtual bool hasComputableLoopEvolution(const Loop *L) const;
- virtual bool hasOperand(const SCEV *O) const {
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (O == getOperand(i) || getOperand(i)->hasOperand(O))
- return true;
- return false;
- }
+ virtual bool hasOperand(const SCEV *O) const;
bool dominates(BasicBlock *BB, DominatorTree *DT) const;
@@ -260,10 +243,9 @@ namespace llvm {
///
class SCEVCommutativeExpr : public SCEVNAryExpr {
protected:
- SCEVCommutativeExpr(const FoldingSetNodeID &ID,
- enum SCEVTypes T,
- const SmallVectorImpl<const SCEV *> &ops)
- : SCEVNAryExpr(ID, T, ops) {}
+ SCEVCommutativeExpr(const FoldingSetNodeIDRef ID,
+ enum SCEVTypes T, const SCEV *const *O, size_t N)
+ : SCEVNAryExpr(ID, T, O, N) {}
public:
virtual const char *getOperationStr() const = 0;
@@ -287,9 +269,9 @@ namespace llvm {
class SCEVAddExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVAddExpr(const FoldingSetNodeID &ID,
- const SmallVectorImpl<const SCEV *> &ops)
- : SCEVCommutativeExpr(ID, scAddExpr, ops) {
+ SCEVAddExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, scAddExpr, O, N) {
}
public:
@@ -315,9 +297,9 @@ namespace llvm {
class SCEVMulExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVMulExpr(const FoldingSetNodeID &ID,
- const SmallVectorImpl<const SCEV *> &ops)
- : SCEVCommutativeExpr(ID, scMulExpr, ops) {
+ SCEVMulExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, scMulExpr, O, N) {
}
public:
@@ -339,7 +321,7 @@ namespace llvm {
const SCEV *LHS;
const SCEV *RHS;
- SCEVUDivExpr(const FoldingSetNodeID &ID, const SCEV *lhs, const SCEV *rhs)
+ SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
: SCEV(ID, scUDivExpr), LHS(lhs), RHS(rhs) {}
public:
@@ -389,10 +371,10 @@ namespace llvm {
const Loop *L;
- SCEVAddRecExpr(const FoldingSetNodeID &ID,
- const SmallVectorImpl<const SCEV *> &ops, const Loop *l)
- : SCEVNAryExpr(ID, scAddRecExpr, ops), L(l) {
- for (size_t i = 0, e = Operands.size(); i != e; ++i)
+ SCEVAddRecExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N, const Loop *l)
+ : SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {
+ for (size_t i = 0, e = NumOperands; i != e; ++i)
assert(Operands[i]->isLoopInvariant(l) &&
"Operands of AddRec must be loop-invariant!");
}
@@ -471,9 +453,9 @@ namespace llvm {
class SCEVSMaxExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVSMaxExpr(const FoldingSetNodeID &ID,
- const SmallVectorImpl<const SCEV *> &ops)
- : SCEVCommutativeExpr(ID, scSMaxExpr, ops) {
+ SCEVSMaxExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, scSMaxExpr, O, N) {
// Max never overflows.
setHasNoUnsignedWrap(true);
setHasNoSignedWrap(true);
@@ -496,9 +478,9 @@ namespace llvm {
class SCEVUMaxExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVUMaxExpr(const FoldingSetNodeID &ID,
- const SmallVectorImpl<const SCEV *> &ops)
- : SCEVCommutativeExpr(ID, scUMaxExpr, ops) {
+ SCEVUMaxExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, scUMaxExpr, O, N) {
// Max never overflows.
setHasNoUnsignedWrap(true);
setHasNoSignedWrap(true);
@@ -519,15 +501,28 @@ namespace llvm {
/// value, and only represent it as its LLVM Value. This is the "bottom"
/// value for the analysis.
///
- class SCEVUnknown : public SCEV {
+ class SCEVUnknown : public SCEV, private CallbackVH {
friend class ScalarEvolution;
- Value *V;
- SCEVUnknown(const FoldingSetNodeID &ID, Value *v) :
- SCEV(ID, scUnknown), V(v) {}
+ // Implement CallbackVH.
+ virtual void deleted();
+ virtual void allUsesReplacedWith(Value *New);
+
+ /// SE - The parent ScalarEvolution value. This is used to update
+ /// the parent's maps when the value associated with a SCEVUnknown
+ /// is deleted or RAUW'd.
+ ScalarEvolution *SE;
+
+ /// Next - The next pointer in the linked list of all
+ /// SCEVUnknown instances owned by a ScalarEvolution.
+ SCEVUnknown *Next;
+
+ SCEVUnknown(const FoldingSetNodeIDRef ID, Value *V,
+ ScalarEvolution *se, SCEVUnknown *next) :
+ SCEV(ID, scUnknown), CallbackVH(V), SE(se), Next(next) {}
public:
- Value *getValue() const { return V; }
+ Value *getValue() const { return getValPtr(); }
/// isSizeOf, isAlignOf, isOffsetOf - Test whether this is a special
/// constant representing a type size, alignment, or field offset in
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h b/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
new file mode 100644
index 0000000..342e593
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
@@ -0,0 +1,78 @@
+//===- llvm/Analysis/ScalarEvolutionNormalization.h - See below -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines utilities for working with "normalized" ScalarEvolution
+// expressions.
+//
+// The following example illustrates post-increment uses and how normalized
+// expressions help.
+//
+// for (i=0; i!=n; ++i) {
+// ...
+// }
+// use(i);
+//
+// While the expression for most uses of i inside the loop is {0,+,1}<%L>, the
+// expression for the use of i outside the loop is {1,+,1}<%L>, since i is
+// incremented at the end of the loop body. This is inconveient, since it
+// suggests that we need two different induction variables, one that starts
+// at 0 and one that starts at 1. We'd prefer to be able to think of these as
+// the same induction variable, with uses inside the loop using the
+// "pre-incremented" value, and uses after the loop using the
+// "post-incremented" value.
+//
+// Expressions for post-incremented uses are represented as an expression
+// paired with a set of loops for which the expression is in "post-increment"
+// mode (there may be multiple loops).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_NORMALIZATION_H
+#define LLVM_ANALYSIS_SCALAREVOLUTION_NORMALIZATION_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+
+namespace llvm {
+
+class Instruction;
+class DominatorTree;
+class Loop;
+class ScalarEvolution;
+class SCEV;
+class Value;
+
+/// TransformKind - Different types of transformations that
+/// TransformForPostIncUse can do.
+enum TransformKind {
+ /// Normalize - Normalize according to the given loops.
+ Normalize,
+ /// NormalizeAutodetect - Detect post-inc opportunities on new expressions,
+ /// update the given loop set, and normalize.
+ NormalizeAutodetect,
+ /// Denormalize - Perform the inverse transform on the expression with the
+ /// given loop set.
+ Denormalize
+};
+
+/// PostIncLoopSet - A set of loops.
+typedef SmallPtrSet<const Loop *, 2> PostIncLoopSet;
+
+/// TransformForPostIncUse - Transform the given expression according to the
+/// given transformation kind.
+const SCEV *TransformForPostIncUse(TransformKind Kind,
+ const SCEV *S,
+ Instruction *User,
+ Value *OperandValToReplace,
+ PostIncLoopSet &Loops,
+ ScalarEvolution &SE,
+ DominatorTree &DT);
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/ValueTracking.h b/libclamav/c++/llvm/include/llvm/Analysis/ValueTracking.h
index 0791b7b..7b6026f 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/ValueTracking.h
@@ -77,27 +77,8 @@ namespace llvm {
///
bool CannotBeNegativeZero(const Value *V, unsigned Depth = 0);
- /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose
- /// it into a base pointer with a constant offset and a number of scaled
- /// symbolic offsets.
- ///
- /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
- /// in the VarIndices vector) are Value*'s that are known to be scaled by the
- /// specified amount, but which may have other unrepresented high bits. As
- /// such, the gep cannot necessarily be reconstructed from its decomposed
- /// form.
- ///
- /// When TargetData is around, this function is capable of analyzing
- /// everything that Value::getUnderlyingObject() can look through. When not,
- /// it just looks through pointer casts.
- ///
- const Value *DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
- SmallVectorImpl<std::pair<const Value*, int64_t> > &VarIndices,
- const TargetData *TD);
-
-
- /// FindScalarValue - Given an aggregrate and an sequence of indices, see if
+ /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
/// the scalar value indexed is already around as a register, for example if
/// it were inserted directly into the aggregrate.
///
@@ -122,7 +103,8 @@ namespace llvm {
/// StopAtNul is set to true (the default), the returned string is truncated
/// by a nul character in the global. If StopAtNul is false, the nul
/// character is included in the result string.
- bool GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset = 0,
+ bool GetConstantStringInfo(const Value *V, std::string &Str,
+ uint64_t Offset = 0,
bool StopAtNul = true);
/// GetStringLength - If we can compute the length of the string pointed to by
diff --git a/libclamav/c++/llvm/include/llvm/Analysis/Verifier.h b/libclamav/c++/llvm/include/llvm/Analysis/Verifier.h
index a6b2a6d..ce8aeef 100644
--- a/libclamav/c++/llvm/include/llvm/Analysis/Verifier.h
+++ b/libclamav/c++/llvm/include/llvm/Analysis/Verifier.h
@@ -1,4 +1,4 @@
-//===-- llvm/Analysis/Verifier.h - Module Verifier --------------*- C++ -*-===//
+//===-- llvm/Analysis/Verifier.h - LLVM IR Verifier -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/libclamav/c++/llvm/include/llvm/Assembly/AsmAnnotationWriter.h b/libclamav/c++/llvm/include/llvm/Assembly/AsmAnnotationWriter.h
deleted file mode 100644
index 6d75720..0000000
--- a/libclamav/c++/llvm/include/llvm/Assembly/AsmAnnotationWriter.h
+++ /dev/null
@@ -1,58 +0,0 @@
-//===-- AsmAnnotationWriter.h - Itf for annotation .ll files - --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Clients of the assembly writer can use this interface to add their own
-// special-purpose annotations to LLVM assembly language printouts. Note that
-// the assembly parser won't be able to parse these, in general, so
-// implementations are advised to print stuff as LLVM comments.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ASSEMBLY_ASMANNOTATIONWRITER_H
-#define LLVM_ASSEMBLY_ASMANNOTATIONWRITER_H
-
-namespace llvm {
-
-class Function;
-class BasicBlock;
-class Instruction;
-class raw_ostream;
-class formatted_raw_ostream;
-
-class AssemblyAnnotationWriter {
-public:
-
- virtual ~AssemblyAnnotationWriter();
-
- /// emitFunctionAnnot - This may be implemented to emit a string right before
- /// the start of a function.
- virtual void emitFunctionAnnot(const Function *F, raw_ostream &OS) {}
-
- /// emitBasicBlockStartAnnot - This may be implemented to emit a string right
- /// after the basic block label, but before the first instruction in the block.
- virtual void emitBasicBlockStartAnnot(const BasicBlock *BB, raw_ostream &OS){
- }
-
- /// emitBasicBlockEndAnnot - This may be implemented to emit a string right
- /// after the basic block.
- virtual void emitBasicBlockEndAnnot(const BasicBlock *BB, raw_ostream &OS){
- }
-
- /// emitInstructionAnnot - This may be implemented to emit a string right
- /// before an instruction is emitted.
- virtual void emitInstructionAnnot(const Instruction *I, raw_ostream &OS) {}
-
- /// printInfoComment - This may be implemented to emit a comment to the
- /// right of an instruction or global value.
- virtual void printInfoComment(const Value &V, formatted_raw_ostream &OS) {}
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/include/llvm/Assembly/AssemblyAnnotationWriter.h b/libclamav/c++/llvm/include/llvm/Assembly/AssemblyAnnotationWriter.h
new file mode 100644
index 0000000..3a65f97
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Assembly/AssemblyAnnotationWriter.h
@@ -0,0 +1,63 @@
+//===-- AssemblyAnnotationWriter.h - Annotation .ll files -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Clients of the assembly writer can use this interface to add their own
+// special-purpose annotations to LLVM assembly language printouts. Note that
+// the assembly parser won't be able to parse these, in general, so
+// implementations are advised to print stuff as LLVM comments.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ASSEMBLY_ASMANNOTATIONWRITER_H
+#define LLVM_ASSEMBLY_ASMANNOTATIONWRITER_H
+
+namespace llvm {
+
+class Function;
+class BasicBlock;
+class Instruction;
+class raw_ostream;
+class formatted_raw_ostream;
+
+class AssemblyAnnotationWriter {
+public:
+
+ virtual ~AssemblyAnnotationWriter();
+
+ /// emitFunctionAnnot - This may be implemented to emit a string right before
+ /// the start of a function.
+ virtual void emitFunctionAnnot(const Function *F,
+ formatted_raw_ostream &OS) {}
+
+ /// emitBasicBlockStartAnnot - This may be implemented to emit a string right
+ /// after the basic block label, but before the first instruction in the
+ /// block.
+ virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
+ formatted_raw_ostream &OS) {
+ }
+
+ /// emitBasicBlockEndAnnot - This may be implemented to emit a string right
+ /// after the basic block.
+ virtual void emitBasicBlockEndAnnot(const BasicBlock *BB,
+ formatted_raw_ostream &OS) {
+ }
+
+ /// emitInstructionAnnot - This may be implemented to emit a string right
+ /// before an instruction is emitted.
+ virtual void emitInstructionAnnot(const Instruction *I,
+ formatted_raw_ostream &OS) {}
+
+ /// printInfoComment - This may be implemented to emit a comment to the
+ /// right of an instruction or global value.
+ virtual void printInfoComment(const Value &V, formatted_raw_ostream &OS) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Assembly/PrintModulePass.h b/libclamav/c++/llvm/include/llvm/Assembly/PrintModulePass.h
index fb4f6a7..239fbcc 100644
--- a/libclamav/c++/llvm/include/llvm/Assembly/PrintModulePass.h
+++ b/libclamav/c++/llvm/include/llvm/Assembly/PrintModulePass.h
@@ -27,7 +27,9 @@ namespace llvm {
/// createPrintModulePass - Create and return a pass that writes the
/// module to the specified raw_ostream.
- ModulePass *createPrintModulePass(raw_ostream *OS, bool DeleteStream=false);
+ ModulePass *createPrintModulePass(raw_ostream *OS,
+ bool DeleteStream=false,
+ const std::string &Banner = "");
/// createPrintFunctionPass - Create and return a pass that prints
/// functions to the specified raw_ostream as they are processed.
diff --git a/libclamav/c++/llvm/include/llvm/AutoUpgrade.h b/libclamav/c++/llvm/include/llvm/AutoUpgrade.h
index 0a81c80..5ce20b6 100644
--- a/libclamav/c++/llvm/include/llvm/AutoUpgrade.h
+++ b/libclamav/c++/llvm/include/llvm/AutoUpgrade.h
@@ -16,6 +16,7 @@
namespace llvm {
class Module;
+ class GlobalVariable;
class Function;
class CallInst;
@@ -35,6 +36,10 @@ namespace llvm {
/// so that it can update all calls to the old function.
void UpgradeCallsToIntrinsic(Function* F);
+ /// This checks for global variables which should be upgraded. It returns true
+ /// if it requires upgrading.
+ bool UpgradeGlobalVariable(GlobalVariable *GV);
+
/// This function checks debug info intrinsics. If an intrinsic is invalid
/// then this function simply removes the intrinsic.
void CheckDebugInfoIntrinsics(Module *M);
diff --git a/libclamav/c++/llvm/include/llvm/BasicBlock.h b/libclamav/c++/llvm/include/llvm/BasicBlock.h
index e358f91..bf5874f 100644
--- a/libclamav/c++/llvm/include/llvm/BasicBlock.h
+++ b/libclamav/c++/llvm/include/llvm/BasicBlock.h
@@ -131,6 +131,12 @@ public:
const Instruction* getFirstNonPHI() const {
return const_cast<BasicBlock*>(this)->getFirstNonPHI();
}
+
+ // Same as above, but also skip debug intrinsics.
+ Instruction* getFirstNonPHIOrDbg();
+ const Instruction* getFirstNonPHIOrDbg() const {
+ return const_cast<BasicBlock*>(this)->getFirstNonPHIOrDbg();
+ }
/// removeFromParent - This method unlinks 'this' from the containing
/// function, but does not delete it.
diff --git a/libclamav/c++/llvm/include/llvm/Bitcode/Archive.h b/libclamav/c++/llvm/include/llvm/Bitcode/Archive.h
index 67f2a4a..934e764 100644
--- a/libclamav/c++/llvm/include/llvm/Bitcode/Archive.h
+++ b/libclamav/c++/llvm/include/llvm/Bitcode/Archive.h
@@ -107,7 +107,7 @@ class ArchiveMember : public ilist_node<ArchiveMember> {
/// into memory, the return value will be null.
/// @returns a pointer to the member's data.
/// @brief Get the data content of the archive member
- const void* getData() const { return data; }
+ const char* getData() const { return data; }
/// This method determines if the member is a regular compressed file.
/// @returns true iff the archive member is a compressed regular file.
@@ -172,7 +172,7 @@ class ArchiveMember : public ilist_node<ArchiveMember> {
sys::PathWithStatus path; ///< Path of file containing the member
sys::FileStatus info; ///< Status info (size,mode,date)
unsigned flags; ///< Flags about the archive member
- const void* data; ///< Data for the member
+ const char* data; ///< Data for the member
/// @}
/// @name Constructors
@@ -297,7 +297,7 @@ class Archive {
/// its symbol table without reading in any of the archive's members. This
/// reduces both I/O and cpu time in opening the archive if it is to be used
/// solely for symbol lookup (e.g. during linking). The \p Filename must
- /// exist and be an archive file or an exception will be thrown. This form
+ /// exist and be an archive file or an error will be returned. This form
/// of opening the archive is intended for read-only operations that need to
/// locate members via the symbol table for link editing. Since the archve
/// members are not read by this method, the archive will appear empty upon
@@ -306,8 +306,7 @@ class Archive {
/// if this form of opening the archive is used that only the symbol table
/// lookup methods (getSymbolTable, findModuleDefiningSymbol, and
/// findModulesDefiningSymbols) be used.
- /// @throws std::string if an error occurs opening the file
- /// @returns an Archive* that represents the archive file.
+ /// @returns an Archive* that represents the archive file, or null on error.
/// @brief Open an existing archive and load its symbols.
static Archive* OpenAndLoadSymbols(
const sys::Path& Filename, ///< Name of the archive file to open
@@ -319,7 +318,6 @@ class Archive {
/// closes files. It does nothing with the archive file on disk. If you
/// haven't used the writeToDisk method by the time the destructor is
/// called, all changes to the archive will be lost.
- /// @throws std::string if an error occurs
/// @brief Destruct in-memory archive
~Archive();
diff --git a/libclamav/c++/llvm/include/llvm/Bitcode/BitstreamWriter.h b/libclamav/c++/llvm/include/llvm/Bitcode/BitstreamWriter.h
index 31d513c..bfb3a4e 100644
--- a/libclamav/c++/llvm/include/llvm/Bitcode/BitstreamWriter.h
+++ b/libclamav/c++/llvm/include/llvm/Bitcode/BitstreamWriter.h
@@ -88,7 +88,7 @@ public:
//===--------------------------------------------------------------------===//
void Emit(uint32_t Val, unsigned NumBits) {
- assert(NumBits <= 32 && "Invalid value size!");
+ assert(NumBits && NumBits <= 32 && "Invalid value size!");
assert((Val & ~(~0U >> (32-NumBits))) == 0 && "High bits set!");
CurValue |= Val << CurBit;
if (CurBit + NumBits < 32) {
@@ -277,10 +277,12 @@ private:
switch (Op.getEncoding()) {
default: assert(0 && "Unknown encoding!");
case BitCodeAbbrevOp::Fixed:
- Emit((unsigned)V, (unsigned)Op.getEncodingData());
+ if (Op.getEncodingData())
+ Emit((unsigned)V, (unsigned)Op.getEncodingData());
break;
case BitCodeAbbrevOp::VBR:
- EmitVBR64(V, (unsigned)Op.getEncodingData());
+ if (Op.getEncodingData())
+ EmitVBR64(V, (unsigned)Op.getEncodingData());
break;
case BitCodeAbbrevOp::Char6:
Emit(BitCodeAbbrevOp::EncodeChar6((char)V), 6);
diff --git a/libclamav/c++/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/libclamav/c++/llvm/include/llvm/Bitcode/LLVMBitCodes.h
index a980df8..4f9b783 100644
--- a/libclamav/c++/llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/libclamav/c++/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -94,8 +94,7 @@ namespace bitc {
TYPE_CODE_FP128 = 14, // LONG DOUBLE (112 bit mantissa)
TYPE_CODE_PPC_FP128= 15, // PPC LONG DOUBLE (2 doubles)
- TYPE_CODE_METADATA = 16, // METADATA
- TYPE_CODE_UNION = 17 // UNION: [eltty x N]
+ TYPE_CODE_METADATA = 16 // METADATA
};
// The type symbol table only has one code (TST_ENTRY_CODE).
@@ -111,12 +110,20 @@ namespace bitc {
enum MetadataCodes {
METADATA_STRING = 1, // MDSTRING: [values]
- METADATA_NODE = 2, // MDNODE: [n x (type num, value num)]
- METADATA_FN_NODE = 3, // FN_MDNODE: [n x (type num, value num)]
+ // FIXME: Remove NODE in favor of NODE2 in LLVM 3.0
+ METADATA_NODE = 2, // NODE with potentially invalid metadata
+ // FIXME: Remove FN_NODE in favor of FN_NODE2 in LLVM 3.0
+ METADATA_FN_NODE = 3, // FN_NODE with potentially invalid metadata
METADATA_NAME = 4, // STRING: [values]
- METADATA_NAMED_NODE = 5, // NAMEDMDNODE: [n x mdnodes]
+ // FIXME: Remove NAMED_NODE in favor of NAMED_NODE2 in LLVM 3.0
+ METADATA_NAMED_NODE = 5, // NAMED_NODE with potentially invalid metadata
METADATA_KIND = 6, // [n x [id, name]]
- METADATA_ATTACHMENT = 7 // [m x [value, [n x [id, mdnode]]]
+ // FIXME: Remove ATTACHMENT in favor of ATTACHMENT2 in LLVM 3.0
+ METADATA_ATTACHMENT = 7, // ATTACHMENT with potentially invalid metadata
+ METADATA_NODE2 = 8, // NODE2: [n x (type num, value num)]
+ METADATA_FN_NODE2 = 9, // FN_NODE2: [n x (type num, value num)]
+ METADATA_NAMED_NODE2 = 10, // NAMED_NODE2: [n x mdnodes]
+ METADATA_ATTACHMENT2 = 11 // [m x [value, [n x [id, mdnode]]]
};
// The constants block (CONSTANTS_BLOCK_ID) describes emission for each
// constant and maintains an implicit current type value.
@@ -224,7 +231,8 @@ namespace bitc {
FUNC_CODE_INST_LOAD = 20, // LOAD: [opty, op, align, vol]
// FIXME: Remove STORE in favor of STORE2 in LLVM 3.0
FUNC_CODE_INST_STORE = 21, // STORE: [valty,val,ptr, align, vol]
- FUNC_CODE_INST_CALL = 22, // CALL: [attr, fnty, fnid, args...]
+ // FIXME: Remove CALL in favor of CALL2 in LLVM 3.0
+ FUNC_CODE_INST_CALL = 22, // CALL with potentially invalid metadata
FUNC_CODE_INST_VAARG = 23, // VAARG: [valistty, valist, instty]
// This store code encodes the pointer type, rather than the value type
// this is so information only available in the pointer type (e.g. address
@@ -240,7 +248,15 @@ namespace bitc {
// new select on i1 or [N x i1]
FUNC_CODE_INST_VSELECT = 29, // VSELECT: [ty,opval,opval,predty,pred]
FUNC_CODE_INST_INBOUNDS_GEP= 30, // INBOUNDS_GEP: [n x operands]
- FUNC_CODE_INST_INDIRECTBR = 31 // INDIRECTBR: [opty, op0, op1, ...]
+ FUNC_CODE_INST_INDIRECTBR = 31, // INDIRECTBR: [opty, op0, op1, ...]
+
+ // FIXME: Remove DEBUG_LOC in favor of DEBUG_LOC2 in LLVM 3.0
+ FUNC_CODE_DEBUG_LOC = 32, // DEBUG_LOC with potentially invalid metadata
+ FUNC_CODE_DEBUG_LOC_AGAIN = 33, // DEBUG_LOC_AGAIN
+
+ FUNC_CODE_INST_CALL2 = 34, // CALL2: [attr, fnty, fnid, args...]
+
+ FUNC_CODE_DEBUG_LOC2 = 35 // DEBUG_LOC2: [Line,Col,ScopeVal, IAVal]
};
} // End bitc namespace
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/Bitcode/ReaderWriter.h b/libclamav/c++/llvm/include/llvm/Bitcode/ReaderWriter.h
index 45eb801..a186964 100644
--- a/libclamav/c++/llvm/include/llvm/Bitcode/ReaderWriter.h
+++ b/libclamav/c++/llvm/include/llvm/Bitcode/ReaderWriter.h
@@ -40,7 +40,8 @@ namespace llvm {
std::string *ErrMsg = 0);
/// WriteBitcodeToFile - Write the specified module to the specified
- /// raw output stream.
+ /// raw output stream. For streams where it matters, the given stream
+ /// should be in "binary" mode.
void WriteBitcodeToFile(const Module *M, raw_ostream &Out);
/// WriteBitcodeToStream - Write the specified module to the specified
diff --git a/libclamav/c++/llvm/include/llvm/CallGraphSCCPass.h b/libclamav/c++/llvm/include/llvm/CallGraphSCCPass.h
index feab763..7154aa3 100644
--- a/libclamav/c++/llvm/include/llvm/CallGraphSCCPass.h
+++ b/libclamav/c++/llvm/include/llvm/CallGraphSCCPass.h
@@ -29,11 +29,15 @@ namespace llvm {
class CallGraphNode;
class CallGraph;
class PMStack;
+class CallGraphSCC;
+
+class CallGraphSCCPass : public Pass {
+public:
+ explicit CallGraphSCCPass(char &pid) : Pass(PT_CallGraphSCC, pid) {}
-struct CallGraphSCCPass : public Pass {
-
- explicit CallGraphSCCPass(intptr_t pid) : Pass(PT_CallGraphSCC, pid) {}
- explicit CallGraphSCCPass(void *pid) : Pass(PT_CallGraphSCC, pid) {}
+ /// createPrinterPass - Get a pass that prints the Module
+ /// corresponding to a CallGraph.
+ Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const;
/// doInitialization - This method is called before the SCC's of the program
/// has been processed, allowing the pass to do initialization as necessary.
@@ -49,7 +53,7 @@ struct CallGraphSCCPass : public Pass {
/// SCC passes that add or delete functions to the SCC are required to update
/// the SCC list, otherwise stale pointers may be dereferenced.
///
- virtual bool runOnSCC(std::vector<CallGraphNode *> &SCC) = 0;
+ virtual bool runOnSCC(CallGraphSCC &SCC) = 0;
/// doFinalization - This method is called after the SCC's of the program has
/// been processed, allowing the pass to do final cleanup as necessary.
@@ -59,7 +63,7 @@ struct CallGraphSCCPass : public Pass {
/// Assign pass manager to manager this pass
virtual void assignPassManager(PMStack &PMS,
- PassManagerType PMT = PMT_CallGraphPassManager);
+ PassManagerType PMT);
/// Return what kind of Pass Manager can manage this pass.
virtual PassManagerType getPotentialPassManagerType() const {
@@ -72,6 +76,29 @@ struct CallGraphSCCPass : public Pass {
virtual void getAnalysisUsage(AnalysisUsage &Info) const;
};
+/// CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on.
+class CallGraphSCC {
+ void *Context; // The CGPassManager object that is vending this.
+ std::vector<CallGraphNode*> Nodes;
+public:
+ CallGraphSCC(void *context) : Context(context) {}
+
+ void initialize(CallGraphNode*const*I, CallGraphNode*const*E) {
+ Nodes.assign(I, E);
+ }
+
+ bool isSingular() const { return Nodes.size() == 1; }
+ unsigned size() const { return Nodes.size(); }
+
+ /// ReplaceNode - This informs the SCC and the pass manager that the specified
+ /// Old node has been deleted, and New is to be used in its place.
+ void ReplaceNode(CallGraphNode *Old, CallGraphNode *New);
+
+ typedef std::vector<CallGraphNode*>::const_iterator iterator;
+ iterator begin() const { return Nodes.begin(); }
+ iterator end() const { return Nodes.end(); }
+};
+
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/CallingConv.h b/libclamav/c++/llvm/include/llvm/CallingConv.h
index 624390d..b0481b9 100644
--- a/libclamav/c++/llvm/include/llvm/CallingConv.h
+++ b/libclamav/c++/llvm/include/llvm/CallingConv.h
@@ -74,7 +74,12 @@ namespace CallingConv {
ARM_AAPCS_VFP = 68,
/// MSP430_INTR - Calling convention used for MSP430 interrupt routines.
- MSP430_INTR = 69
+ MSP430_INTR = 69,
+
+ /// X86_ThisCall - Similar to X86_StdCall. Passes first argument in ECX,
+ /// others via stack. Callee is responsible for stack cleaning. MSVC uses
+ /// this by default for methods in its ABI.
+ X86_ThisCall = 70
};
} // End CallingConv namespace
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/Analysis.h b/libclamav/c++/llvm/include/llvm/CodeGen/Analysis.h
new file mode 100644
index 0000000..f33a9db
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/Analysis.h
@@ -0,0 +1,80 @@
+//===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares several CodeGen-specific LLVM IR analysis utilties.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ANALYSIS_H
+#define LLVM_CODEGEN_ANALYSIS_H
+
+#include "llvm/Instructions.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/Support/CallSite.h"
+
+namespace llvm {
+
+class TargetLowering;
+class GlobalVariable;
+
+/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
+/// of insertvalue or extractvalue indices that identify a member, return
+/// the linearized index of the start of the member.
+///
+unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
+ const unsigned *Indices,
+ const unsigned *IndicesEnd,
+ unsigned CurIndex = 0);
+
+/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
+/// EVTs that represent all the individual underlying
+/// non-aggregate types that comprise it.
+///
+/// If Offsets is non-null, it points to a vector to be filled in
+/// with the in-memory offsets of each of the individual values.
+///
+void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
+ SmallVectorImpl<EVT> &ValueVTs,
+ SmallVectorImpl<uint64_t> *Offsets = 0,
+ uint64_t StartingOffset = 0);
+
+/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
+GlobalVariable *ExtractTypeInfo(Value *V);
+
+/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
+/// processed uses a memory 'm' constraint.
+bool hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
+ const TargetLowering &TLI);
+
+/// getFCmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR floating-point condition code. This includes
+/// consideration of global floating-point math flags.
+///
+ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
+
+/// getICmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR integer condition code.
+///
+ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
+
+/// Test if the given instruction is in a position to be optimized
+/// with a tail-call. This roughly means that it's in a block with
+/// a return and there's nothing that needs to be scheduled
+/// between it and the return.
+///
+/// This function only tests target-independent requirements.
+bool isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
+ const TargetLowering &TLI);
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/AsmPrinter.h b/libclamav/c++/llvm/include/llvm/CodeGen/AsmPrinter.h
index 8ade1bd..b018603 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -18,7 +18,6 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/DebugLoc.h"
-#include "llvm/Target/TargetMachine.h"
namespace llvm {
class BlockAddress;
@@ -35,6 +34,7 @@ namespace llvm {
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
+ class MachineLocation;
class MachineLoopInfo;
class MachineLoop;
class MachineConstantPool;
@@ -42,78 +42,51 @@ namespace llvm {
class MachineConstantPoolValue;
class MachineJumpTableInfo;
class MachineModuleInfo;
+ class MachineMove;
+ class MCAsmInfo;
class MCInst;
class MCContext;
class MCSection;
class MCStreamer;
class MCSymbol;
- class MDNode;
- class DwarfWriter;
+ class DwarfDebug;
+ class DwarfException;
class Mangler;
- class MCAsmInfo;
class TargetLoweringObjectFile;
+ class TargetData;
+ class TargetMachine;
class Twine;
class Type;
- class formatted_raw_ostream;
/// AsmPrinter - This class is intended to be used as a driving class for all
/// asm writers.
class AsmPrinter : public MachineFunctionPass {
- static char ID;
-
- // GCMetadataPrinters - The garbage collection metadata printer table.
- typedef DenseMap<GCStrategy*,GCMetadataPrinter*> gcp_map_type;
- typedef gcp_map_type::iterator gcp_iterator;
- gcp_map_type GCMetadataPrinters;
-
- /// If VerboseAsm is set, a pointer to the loop info for this
- /// function.
- ///
- MachineLoopInfo *LI;
-
- public:
- /// MMI - If available, this is a pointer to the current MachineModuleInfo.
- MachineModuleInfo *MMI;
-
- protected:
- /// DW - If available, this is a pointer to the current dwarf writer.
- DwarfWriter *DW;
-
public:
-
- /// Output stream on which we're printing assembly code.
- ///
- formatted_raw_ostream &O;
-
/// Target machine description.
///
TargetMachine &TM;
-
- /// getObjFileLowering - Return information about object file lowering.
- TargetLoweringObjectFile &getObjFileLowering() const;
-
+
/// Target Asm Printer information.
///
const MCAsmInfo *MAI;
- /// Target Register Information.
- ///
- const TargetRegisterInfo *TRI;
-
/// OutContext - This is the context for the output file that we are
/// streaming. This owns all of the global MC-related objects for the
/// generated translation unit.
MCContext &OutContext;
-
+
/// OutStreamer - This is the MCStreamer object for the file we are
/// generating. This contains the transient state for the current
/// translation unit that we are generating (such as the current section
/// etc).
MCStreamer &OutStreamer;
-
+
/// The current machine function.
const MachineFunction *MF;
+ /// MMI - This is a pointer to the current MachineModuleInfo.
+ MachineModuleInfo *MMI;
+
/// Name-mangler for global names.
///
Mangler *Mang;
@@ -122,29 +95,30 @@ namespace llvm {
/// beginning of each call to runOnMachineFunction().
///
MCSymbol *CurrentFnSym;
-
- /// getCurrentSection() - Return the current section we are emitting to.
- const MCSection *getCurrentSection() const;
-
+
+ private:
+ // GCMetadataPrinters - The garbage collection metadata printer table.
+ void *GCMetadataPrinters; // Really a DenseMap.
/// VerboseAsm - Emit comments in assembly output if this is true.
///
bool VerboseAsm;
+ static char ID;
- /// Private state for PrintSpecial()
- // Assign a unique ID to this machine instruction.
- mutable const MachineInstr *LastMI;
- mutable const Function *LastFn;
- mutable unsigned Counter;
-
- // Private state for processDebugLoc()
- mutable const MDNode *PrevDLT;
+ /// If VerboseAsm is set, a pointer to the loop info for this
+ /// function.
+ MachineLoopInfo *LI;
+
+ /// DD - If the target supports dwarf debug info, this pointer is non-null.
+ DwarfDebug *DD;
+
+ /// DE - If the target supports dwarf exception info, this pointer is
+ /// non-null.
+ DwarfException *DE;
protected:
- explicit AsmPrinter(formatted_raw_ostream &o, TargetMachine &TM,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *T);
-
+ explicit AsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
+
public:
virtual ~AsmPrinter();
@@ -155,52 +129,34 @@ namespace llvm {
/// getFunctionNumber - Return a unique ID for the current function.
///
unsigned getFunctionNumber() const;
-
- protected:
+
+ /// getObjFileLowering - Return information about object file lowering.
+ const TargetLoweringObjectFile &getObjFileLowering() const;
+
+ /// getTargetData - Return information about data layout.
+ const TargetData &getTargetData() const;
+
+ /// getCurrentSection() - Return the current section we are emitting to.
+ const MCSection *getCurrentSection() const;
+
+
+ //===------------------------------------------------------------------===//
+ // MachineFunctionPass Implementation.
+ //===------------------------------------------------------------------===//
+
/// getAnalysisUsage - Record analysis usage.
- ///
+ ///
void getAnalysisUsage(AnalysisUsage &AU) const;
-
+
/// doInitialization - Set up the AsmPrinter when we are working on a new
/// module. If your pass overrides this, it must make sure to explicitly
/// call this implementation.
bool doInitialization(Module &M);
- /// EmitStartOfAsmFile - This virtual method can be overridden by targets
- /// that want to emit something at the start of their file.
- virtual void EmitStartOfAsmFile(Module &) {}
-
- /// EmitEndOfAsmFile - This virtual method can be overridden by targets that
- /// want to emit something at the end of their file.
- virtual void EmitEndOfAsmFile(Module &) {}
-
/// doFinalization - Shut down the asmprinter. If you override this in your
/// pass, you must make sure to call it explicitly.
bool doFinalization(Module &M);
-
- /// PrintSpecial - Print information related to the specified machine instr
- /// that is independent of the operand, and may be independent of the instr
- /// itself. This can be useful for portably encoding the comment character
- /// or other bits of target-specific knowledge into the asmstrings. The
- /// syntax used is ${:comment}. Targets can override this to add support
- /// for their own strange codes.
- virtual void PrintSpecial(const MachineInstr *MI, const char *Code) const;
- /// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
- /// instruction, using the specified assembler variant. Targets should
- /// override this to format as appropriate. This method can return true if
- /// the operand is erroneous.
- virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode);
-
- /// PrintAsmMemoryOperand - Print the specified operand of MI, an INLINEASM
- /// instruction, using the specified assembler variant as an address.
- /// Targets should override this to format as appropriate. This method can
- /// return true if the operand is erroneous.
- virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode);
-
/// runOnMachineFunction - Emit the specified function out to the
/// OutStreamer.
virtual bool runOnMachineFunction(MachineFunction &MF) {
@@ -208,107 +164,112 @@ namespace llvm {
EmitFunctionHeader();
EmitFunctionBody();
return false;
- }
-
+ }
+
+ //===------------------------------------------------------------------===//
+ // Coarse grained IR lowering routines.
+ //===------------------------------------------------------------------===//
+
/// SetupMachineFunction - This should be called when a new MachineFunction
/// is being processed from runOnMachineFunction.
void SetupMachineFunction(MachineFunction &MF);
-
+
/// EmitFunctionHeader - This method emits the header for the current
/// function.
void EmitFunctionHeader();
-
+
/// EmitFunctionBody - This method emits the body and trailer for a
/// function.
void EmitFunctionBody();
- /// EmitInstruction - Targets should implement this to emit instructions.
- virtual void EmitInstruction(const MachineInstr *) {
- assert(0 && "EmitInstruction not implemented");
- }
-
- /// EmitFunctionBodyStart - Targets can override this to emit stuff before
- /// the first basic block in the function.
- virtual void EmitFunctionBodyStart() {}
-
- /// EmitFunctionBodyEnd - Targets can override this to emit stuff after
- /// the last basic block in the function.
- virtual void EmitFunctionBodyEnd() {}
-
/// EmitConstantPool - Print to the current output stream assembly
/// representations of the constants in the constant pool MCP. This is
/// used to print out constants which have been "spilled to memory" by
/// the code generator.
///
virtual void EmitConstantPool();
-
- /// EmitJumpTableInfo - Print assembly representations of the jump tables
- /// used by the current function to the current output stream.
+
+ /// EmitJumpTableInfo - Print assembly representations of the jump tables
+ /// used by the current function to the current output stream.
///
void EmitJumpTableInfo();
-
+
/// EmitGlobalVariable - Emit the specified global variable to the .s file.
virtual void EmitGlobalVariable(const GlobalVariable *GV);
-
+
/// EmitSpecialLLVMGlobal - Check to see if the specified global is a
/// special global used by LLVM. If so, emit it and return true, otherwise
/// do nothing and return false.
bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
- public:
- //===------------------------------------------------------------------===//
- // Emission and print routines
- //
-
- /// EmitInt8 - Emit a byte directive and value.
+ /// EmitAlignment - Emit an alignment directive to the specified power of
+ /// two boundary. For example, if you pass in 3 here, you will get an 8
+ /// byte alignment. If a global value is specified, and if that global has
+ /// an explicit alignment requested, it will override the alignment request
+ /// if required for correctness.
///
- void EmitInt8(int Value) const;
+ void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const;
- /// EmitInt16 - Emit a short directive and value.
- ///
- void EmitInt16(int Value) const;
+ /// EmitBasicBlockStart - This method prints the label for the specified
+ /// MachineBasicBlock, an alignment (if present) and a comment describing
+ /// it if appropriate.
+ void EmitBasicBlockStart(const MachineBasicBlock *MBB) const;
- /// EmitInt32 - Emit a long directive and value.
- ///
- void EmitInt32(int Value) const;
+ /// EmitGlobalConstant - Print a general LLVM constant to the .s file.
+ void EmitGlobalConstant(const Constant *CV, unsigned AddrSpace = 0);
- /// EmitInt64 - Emit a long long directive and value.
- ///
- void EmitInt64(uint64_t Value) const;
//===------------------------------------------------------------------===//
+ // Overridable Hooks
+ //===------------------------------------------------------------------===//
- /// EmitAlignment - Emit an alignment directive to the specified power of
- /// two boundary. For example, if you pass in 3 here, you will get an 8
- /// byte alignment. If a global value is specified, and if that global has
- /// an explicit alignment requested, it will unconditionally override the
- /// alignment request. However, if ForcedAlignBits is specified, this value
- /// has final say: the ultimate alignment will be the max of ForcedAlignBits
- /// and the alignment computed with NumBits and the global. If UseFillExpr
- /// is true, it also emits an optional second value FillValue which the
- /// assembler uses to fill gaps to match alignment for text sections if the
- /// has specified a non-zero fill value.
- ///
- /// The algorithm is:
- /// Align = NumBits;
- /// if (GV && GV->hasalignment) Align = GV->getalignment();
- /// Align = std::max(Align, ForcedAlignBits);
- ///
- void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0,
- unsigned ForcedAlignBits = 0,
- bool UseFillExpr = true) const;
+ // Targets can, or in the case of EmitInstruction, must implement these to
+ // customize output.
+
+ /// EmitStartOfAsmFile - This virtual method can be overridden by targets
+ /// that want to emit something at the start of their file.
+ virtual void EmitStartOfAsmFile(Module &) {}
+
+ /// EmitEndOfAsmFile - This virtual method can be overridden by targets that
+ /// want to emit something at the end of their file.
+ virtual void EmitEndOfAsmFile(Module &) {}
+
+ /// EmitFunctionBodyStart - Targets can override this to emit stuff before
+ /// the first basic block in the function.
+ virtual void EmitFunctionBodyStart() {}
+
+ /// EmitFunctionBodyEnd - Targets can override this to emit stuff after
+ /// the last basic block in the function.
+ virtual void EmitFunctionBodyEnd() {}
+
+ /// EmitInstruction - Targets should implement this to emit instructions.
+ virtual void EmitInstruction(const MachineInstr *) {
+ assert(0 && "EmitInstruction not implemented");
+ }
+
+ virtual void EmitFunctionEntryLabel();
+
+ virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
- /// printLabel - This method prints a local label used by debug and
- /// exception handling tables.
- void printLabel(unsigned Id) const;
+ /// isBlockOnlyReachableByFallthough - Return true if the basic block has
+ /// exactly one predecessor and the control transfer mechanism between
+ /// the predecessor and this block is a fall-through.
+ virtual bool
+ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
+
+ //===------------------------------------------------------------------===//
+ // Symbol Lowering Routines.
+ //===------------------------------------------------------------------===//
+ public:
+
+ /// GetTempSymbol - Return the MCSymbol corresponding to the assembler
+ /// temporary label with the specified stem and unique ID.
+ MCSymbol *GetTempSymbol(StringRef Name, unsigned ID) const;
- /// printDeclare - This method prints a local variable declaration used by
- /// debug tables.
- void printDeclare(const MachineInstr *MI) const;
+ /// GetTempSymbol - Return an assembler temporary label with the specified
+ /// stem.
+ MCSymbol *GetTempSymbol(StringRef Name) const;
- /// GetGlobalValueSymbol - Return the MCSymbol for the specified global
- /// value.
- virtual MCSymbol *GetGlobalValueSymbol(const GlobalValue *GV) const;
/// GetSymbolWithGlobalValueBase - Return the MCSymbol for a symbol with
/// global value name as its base, with the specified suffix, and where the
@@ -316,11 +277,11 @@ namespace llvm {
MCSymbol *GetSymbolWithGlobalValueBase(const GlobalValue *GV,
StringRef Suffix,
bool ForcePrivate = true) const;
-
+
/// GetExternalSymbolSymbol - Return the MCSymbol for the specified
/// ExternalSymbol.
MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
-
+
/// GetCPISymbol - Return the symbol for the specified constant pool entry.
MCSymbol *GetCPISymbol(unsigned CPID) const;
@@ -334,58 +295,159 @@ namespace llvm {
/// GetBlockAddressSymbol - Return the MCSymbol used to satisfy BlockAddress
/// uses of the specified basic block.
MCSymbol *GetBlockAddressSymbol(const BlockAddress *BA) const;
- MCSymbol *GetBlockAddressSymbol(const Function *F,
- const BasicBlock *BB) const;
-
- /// EmitBasicBlockStart - This method prints the label for the specified
- /// MachineBasicBlock, an alignment (if present) and a comment describing
- /// it if appropriate.
- void EmitBasicBlockStart(const MachineBasicBlock *MBB) const;
-
-
- // Data emission.
-
- /// EmitGlobalConstant - Print a general LLVM constant to the .s file.
- void EmitGlobalConstant(const Constant* CV, unsigned AddrSpace = 0);
-
- protected:
- virtual void EmitFunctionEntryLabel();
-
- virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+ MCSymbol *GetBlockAddressSymbol(const BasicBlock *BB) const;
+ //===------------------------------------------------------------------===//
+ // Emission Helper Routines.
+ //===------------------------------------------------------------------===//
+ public:
/// printOffset - This is just convenient handler for printing offsets.
- void printOffset(int64_t Offset) const;
+ void printOffset(int64_t Offset, raw_ostream &OS) const;
- /// isBlockOnlyReachableByFallthough - Return true if the basic block has
- /// exactly one predecessor and the control transfer mechanism between
- /// the predecessor and this block is a fall-through.
- virtual bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
+ /// EmitInt8 - Emit a byte directive and value.
+ ///
+ void EmitInt8(int Value) const;
+
+ /// EmitInt16 - Emit a short directive and value.
+ ///
+ void EmitInt16(int Value) const;
+
+ /// EmitInt32 - Emit a long directive and value.
+ ///
+ void EmitInt32(int Value) const;
+
+ /// EmitLabelDifference - Emit something like ".long Hi-Lo" where the size
+ /// in bytes of the directive is specified by Size and Hi/Lo specify the
+ /// labels. This implicitly uses .set if it is available.
+ void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
+ unsigned Size) const;
+
+ /// EmitLabelOffsetDifference - Emit something like ".long Hi+Offset-Lo"
+ /// where the size in bytes of the directive is specified by Size and Hi/Lo
+ /// specify the labels. This implicitly uses .set if it is available.
+ void EmitLabelOffsetDifference(const MCSymbol *Hi, uint64_t Offset,
+ const MCSymbol *Lo, unsigned Size) const;
+
+ /// EmitLabelPlusOffset - Emit something like ".long Label+Offset"
+ /// where the size in bytes of the directive is specified by Size and Label
+ /// specifies the label. This implicitly uses .set if it is available.
+ void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
+ unsigned Size) const;
+
+ //===------------------------------------------------------------------===//
+ // Dwarf Emission Helper Routines
+ //===------------------------------------------------------------------===//
+
+ /// EmitSLEB128 - emit the specified signed leb128 value.
+ void EmitSLEB128(int Value, const char *Desc = 0) const;
+
+ /// EmitULEB128 - emit the specified unsigned leb128 value.
+ void EmitULEB128(unsigned Value, const char *Desc = 0,
+ unsigned PadTo = 0) const;
+
+ /// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
+ void EmitCFAByte(unsigned Val) const;
+
+ /// EmitEncodingByte - Emit a .byte 42 directive that corresponds to an
+ /// encoding. If verbose assembly output is enabled, we output comments
+ /// describing the encoding. Desc is a string saying what the encoding is
+ /// specifying (e.g. "LSDA").
+ void EmitEncodingByte(unsigned Val, const char *Desc = 0) const;
+
+ /// GetSizeOfEncodedValue - Return the size of the encoding in bytes.
+ unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
+
+ /// EmitReference - Emit a reference to a label with a specified encoding.
+ ///
+ void EmitReference(const MCSymbol *Sym, unsigned Encoding) const;
+ void EmitReference(const GlobalValue *GV, unsigned Encoding) const;
+
+ /// EmitSectionOffset - Emit the 4-byte offset of Label from the start of
+ /// its section. This can be done with a special directive if the target
+ /// supports it (e.g. cygwin) or by emitting it as an offset from a label at
+ /// the start of the section.
+ ///
+ /// SectionLabel is a temporary label emitted at the start of the section
+ /// that Label lives in.
+ void EmitSectionOffset(const MCSymbol *Label,
+ const MCSymbol *SectionLabel) const;
+
+ /// getDebugValueLocation - Get location information encoded by DBG_VALUE
+ /// operands.
+ virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
+
+ /// getISAEncoding - Get the value for DW_AT_APPLE_isa. Zero if no isa
+ /// encoding specified.
+ virtual unsigned getISAEncoding() { return 0; }
+
+ //===------------------------------------------------------------------===//
+ // Dwarf Lowering Routines
+ //===------------------------------------------------------------------===//
+
+ /// EmitFrameMoves - Emit frame instructions to describe the layout of the
+ /// frame.
+ void EmitFrameMoves(const std::vector<MachineMove> &Moves,
+ MCSymbol *BaseLabel, bool isEH) const;
+
+
+ //===------------------------------------------------------------------===//
+ // Inline Asm Support
+ //===------------------------------------------------------------------===//
+ public:
+ // These are hooks that targets can override to implement inline asm
+ // support. These should probably be moved out of AsmPrinter someday.
+
+ /// PrintSpecial - Print information related to the specified machine instr
+ /// that is independent of the operand, and may be independent of the instr
+ /// itself. This can be useful for portably encoding the comment character
+ /// or other bits of target-specific knowledge into the asmstrings. The
+ /// syntax used is ${:comment}. Targets can override this to add support
+ /// for their own strange codes.
+ virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
+ const char *Code) const;
+
+ /// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
+ /// instruction, using the specified assembler variant. Targets should
+ /// override this to format as appropriate. This method can return true if
+ /// the operand is erroneous.
+ virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+
+ /// PrintAsmMemoryOperand - Print the specified operand of MI, an INLINEASM
+ /// instruction, using the specified assembler variant as an address.
+ /// Targets should override this to format as appropriate. This method can
+ /// return true if the operand is erroneous.
+ virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &OS);
private:
+ /// Private state for PrintSpecial()
+ // Assign a unique ID to this machine instruction.
+ mutable const MachineInstr *LastMI;
+ mutable unsigned LastFn;
+ mutable unsigned Counter;
+ mutable unsigned SetCounter;
- /// processDebugLoc - Processes the debug information of each machine
- /// instruction's DebugLoc.
- void processDebugLoc(const MachineInstr *MI, bool BeforePrintingInsn);
-
- void printLabelInst(const MachineInstr *MI) const;
+ /// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
+ void EmitInlineAsm(StringRef Str, unsigned LocCookie) const;
- /// printInlineAsm - This method formats and prints the specified machine
+ /// EmitInlineAsm - This method formats and emits the specified machine
/// instruction that is an inline asm.
- void printInlineAsm(const MachineInstr *MI) const;
+ void EmitInlineAsm(const MachineInstr *MI) const;
- /// printImplicitDef - This method prints the specified machine instruction
- /// that is an implicit def.
- void printImplicitDef(const MachineInstr *MI) const;
-
- /// printKill - This method prints the specified kill machine instruction.
- void printKill(const MachineInstr *MI) const;
+ //===------------------------------------------------------------------===//
+ // Internal Implementation Details
+ //===------------------------------------------------------------------===//
/// EmitVisibility - This emits visibility information about symbol, if
/// this is suported by the target.
void EmitVisibility(MCSymbol *Sym, unsigned Visibility) const;
-
+
void EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const;
-
+
void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid) const;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/CalcSpillWeights.h b/libclamav/c++/llvm/include/llvm/CodeGen/CalcSpillWeights.h
index 2fc03bd..240734f 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/CalcSpillWeights.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/CalcSpillWeights.h
@@ -12,10 +12,35 @@
#define LLVM_CODEGEN_CALCSPILLWEIGHTS_H
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/ADT/DenseMap.h"
namespace llvm {
class LiveInterval;
+ class LiveIntervals;
+ class MachineLoopInfo;
+
+ /// VirtRegAuxInfo - Calculate auxiliary information for a virtual
+ /// register such as its spill weight and allocation hint.
+ class VirtRegAuxInfo {
+ MachineFunction &mf_;
+ LiveIntervals &lis_;
+ const MachineLoopInfo &loops_;
+ DenseMap<unsigned, float> hint_;
+ public:
+ VirtRegAuxInfo(MachineFunction &mf, LiveIntervals &lis,
+ const MachineLoopInfo &loops) :
+ mf_(mf), lis_(lis), loops_(loops) {}
+
+ /// CalculateRegClass - recompute the register class for reg from its uses.
+ /// Since the register class can affect the allocation hint, this function
+ /// should be called before CalculateWeightAndHint if both are called.
+ void CalculateRegClass(unsigned reg);
+
+ /// CalculateWeightAndHint - (re)compute li's spill weight and allocation
+ /// hint.
+ void CalculateWeightAndHint(LiveInterval &li);
+ };
/// CalculateSpillWeights - Compute spill weights for all virtual register
/// live intervals.
@@ -23,11 +48,11 @@ namespace llvm {
public:
static char ID;
- CalculateSpillWeights() : MachineFunctionPass(&ID) {}
+ CalculateSpillWeights() : MachineFunctionPass(ID) {}
virtual void getAnalysisUsage(AnalysisUsage &au) const;
- virtual bool runOnMachineFunction(MachineFunction &fn);
+ virtual bool runOnMachineFunction(MachineFunction &fn);
private:
/// Returns true if the given live interval is zero length.
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/CallingConvLower.h b/libclamav/c++/llvm/include/llvm/CodeGen/CallingConvLower.h
index 45a2757..6fb8436 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/CallingConvLower.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/CallingConvLower.h
@@ -17,14 +17,13 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Target/TargetCallingConv.h"
#include "llvm/CallingConv.h"
namespace llvm {
class TargetRegisterInfo;
class TargetMachine;
class CCState;
- class SDNode;
/// CCValAssign - Represent assignment of one arg/retval to a location.
class CCValAssign {
@@ -35,6 +34,9 @@ public:
ZExt, // The value is zero extended in the location.
AExt, // The value is extended with undefined upper bits.
BCvt, // The value is bit-converted in the location.
+ VExt, // The value is vector-widened in the location.
+ // FIXME: Not implemented yet. Code that uses AExt to mean
+ // vector-widen should be fixed to use VExt instead.
Indirect // The location contains pointer to the value.
// TODO: a subset of the value is in the location.
};
@@ -186,8 +188,7 @@ public:
/// CheckReturn - Analyze the return values of a function, returning
/// true if the return can be performed without sret-demotion, and
/// false otherwise.
- bool CheckReturn(const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
+ bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
CCAssignFn Fn);
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
@@ -274,6 +275,12 @@ public:
return Result;
}
+ /// Version of AllocateStack with extra register to be shadowed.
+ unsigned AllocateStack(unsigned Size, unsigned Align, unsigned ShadowReg) {
+ MarkAllocated(ShadowReg);
+ return AllocateStack(Size, Align);
+ }
+
// HandleByVal - Allocate a stack slot large enough to pass an argument by
// value. The size and alignment information of the argument is encoded in its
// parameter attribute.
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/DwarfWriter.h b/libclamav/c++/llvm/include/llvm/CodeGen/DwarfWriter.h
deleted file mode 100644
index d59e22a..0000000
--- a/libclamav/c++/llvm/include/llvm/CodeGen/DwarfWriter.h
+++ /dev/null
@@ -1,103 +0,0 @@
-//===-- llvm/CodeGen/DwarfWriter.h - Dwarf Framework ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains support for writing Dwarf debug and exception info into
-// asm files. For Details on the Dwarf 3 specfication see DWARF Debugging
-// Information Format V.3 reference manual http://dwarf.freestandards.org ,
-//
-// The role of the Dwarf Writer class is to extract information from the
-// MachineModuleInfo object, organize it in Dwarf form and then emit it into asm
-// the current asm file using data and high level Dwarf directives.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_DWARFWRITER_H
-#define LLVM_CODEGEN_DWARFWRITER_H
-
-#include "llvm/Pass.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-
-class AsmPrinter;
-class DwarfDebug;
-class DwarfException;
-class MachineModuleInfo;
-class MachineFunction;
-class MachineInstr;
-class Value;
-class Module;
-class MDNode;
-class MCAsmInfo;
-class raw_ostream;
-class Instruction;
-class DICompileUnit;
-class DISubprogram;
-class DIVariable;
-
-//===----------------------------------------------------------------------===//
-// DwarfWriter - Emits Dwarf debug and exception handling directives.
-//
-
-class DwarfWriter : public ImmutablePass {
-private:
- /// DD - Provides the DwarfWriter debug implementation.
- ///
- DwarfDebug *DD;
-
- /// DE - Provides the DwarfWriter exception implementation.
- ///
- DwarfException *DE;
-
-public:
- static char ID; // Pass identification, replacement for typeid
-
- DwarfWriter();
- virtual ~DwarfWriter();
-
- //===--------------------------------------------------------------------===//
- // Main entry points.
- //
-
- /// BeginModule - Emit all Dwarf sections that should come prior to the
- /// content.
- void BeginModule(Module *M, MachineModuleInfo *MMI, raw_ostream &OS,
- AsmPrinter *A, const MCAsmInfo *T);
-
- /// EndModule - Emit all Dwarf sections that should come after the content.
- ///
- void EndModule();
-
- /// BeginFunction - Gather pre-function debug information. Assumes being
- /// emitted immediately after the function entry point.
- void BeginFunction(const MachineFunction *MF);
-
- /// EndFunction - Gather and emit post-function debug information.
- ///
- void EndFunction(const MachineFunction *MF);
-
- /// RecordSourceLine - Register a source line with debug info. Returns a
- /// unique label ID used to generate a label and provide correspondence to
- /// the source line list.
- unsigned RecordSourceLine(unsigned Line, unsigned Col, MDNode *Scope);
-
- /// getRecordSourceLineCount - Count source lines.
- unsigned getRecordSourceLineCount();
-
- /// ShouldEmitDwarfDebug - Returns true if Dwarf debugging declarations should
- /// be emitted.
- bool ShouldEmitDwarfDebug() const;
-
- void BeginScope(const MachineInstr *MI, unsigned Label);
- void EndScope(const MachineInstr *MI);
-};
-
-} // end llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/ELFRelocation.h b/libclamav/c++/llvm/include/llvm/CodeGen/ELFRelocation.h
deleted file mode 100644
index e58b8df..0000000
--- a/libclamav/c++/llvm/include/llvm/CodeGen/ELFRelocation.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//=== ELFRelocation.h - ELF Relocation Info ---------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the ELFRelocation class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_ELF_RELOCATION_H
-#define LLVM_CODEGEN_ELF_RELOCATION_H
-
-#include "llvm/System/DataTypes.h"
-
-namespace llvm {
-
- /// ELFRelocation - This class contains all the information necessary to
- /// to generate any 32-bit or 64-bit ELF relocation entry.
- class ELFRelocation {
- uint64_t r_offset; // offset in the section of the object this applies to
- uint32_t r_symidx; // symbol table index of the symbol to use
- uint32_t r_type; // machine specific relocation type
- int64_t r_add; // explicit relocation addend
- bool r_rela; // if true then the addend is part of the entry
- // otherwise the addend is at the location specified
- // by r_offset
- public:
-
- uint64_t getInfo(bool is64Bit = false) const {
- if (is64Bit)
- return ((uint64_t)r_symidx << 32) + ((uint64_t)r_type & 0xFFFFFFFFL);
- else
- return (r_symidx << 8) + (r_type & 0xFFL);
- }
-
- uint64_t getOffset() const { return r_offset; }
- uint64_t getAddress() const { return r_add; }
-
- ELFRelocation(uint64_t off, uint32_t sym, uint32_t type,
- bool rela = true, int64_t addend = 0) :
- r_offset(off), r_symidx(sym), r_type(type),
- r_add(addend), r_rela(rela) {}
- };
-
-} // end llvm namespace
-
-#endif // LLVM_CODEGEN_ELF_RELOCATION_H
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/FastISel.h b/libclamav/c++/llvm/include/llvm/CodeGen/FastISel.h
index 9d0f0d9..79b1554 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/FastISel.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/FastISel.h
@@ -15,43 +15,38 @@
#define LLVM_CODEGEN_FASTISEL_H
#include "llvm/ADT/DenseMap.h"
+#ifndef NDEBUG
#include "llvm/ADT/SmallSet.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
+#endif
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
namespace llvm {
class AllocaInst;
class ConstantFP;
+class FunctionLoweringInfo;
class Instruction;
class MachineBasicBlock;
class MachineConstantPool;
class MachineFunction;
+class MachineInstr;
class MachineFrameInfo;
-class MachineModuleInfo;
-class DwarfWriter;
class MachineRegisterInfo;
class TargetData;
class TargetInstrInfo;
class TargetLowering;
class TargetMachine;
class TargetRegisterClass;
+class TargetRegisterInfo;
/// FastISel - This is a fast-path instruction selection class that
/// generates poor code and doesn't support illegal types or non-trivial
/// lowering, but runs quickly.
class FastISel {
protected:
- MachineBasicBlock *MBB;
DenseMap<const Value *, unsigned> LocalValueMap;
- DenseMap<const Value *, unsigned> &ValueMap;
- DenseMap<const BasicBlock *, MachineBasicBlock *> &MBBMap;
- DenseMap<const AllocaInst *, int> &StaticAllocaMap;
-#ifndef NDEBUG
- SmallSet<Instruction*, 8> &CatchInfoLost;
-#endif
- MachineFunction &MF;
- MachineModuleInfo *MMI;
- DwarfWriter *DW;
+ FunctionLoweringInfo &FuncInfo;
MachineRegisterInfo &MRI;
MachineFrameInfo &MFI;
MachineConstantPool &MCP;
@@ -60,27 +55,22 @@ protected:
const TargetData &TD;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
+ const TargetRegisterInfo &TRI;
+ MachineInstr *LastLocalValue;
public:
- /// startNewBlock - Set the current block to which generated machine
- /// instructions will be appended, and clear the local CSE map.
- ///
- void startNewBlock(MachineBasicBlock *mbb) {
- setCurrentBlock(mbb);
- LocalValueMap.clear();
- }
+ /// getLastLocalValue - Return the position of the last instruction
+ /// emitted for materializing constants for use in the current block.
+ MachineInstr *getLastLocalValue() { return LastLocalValue; }
- /// setCurrentBlock - Set the current block to which generated machine
- /// instructions will be appended.
- ///
- void setCurrentBlock(MachineBasicBlock *mbb) {
- MBB = mbb;
- }
+ /// setLastLocalValue - Update the position of the last instruction
+ /// emitted for materializing constants for use in the current block.
+ void setLastLocalValue(MachineInstr *I) { LastLocalValue = I; }
- /// setCurDebugLoc - Set the current debug location information, which is used
- /// when creating a machine instruction.
+ /// startNewBlock - Set the current block to which generated machine
+ /// instructions will be appended, and clear the local CSE map.
///
- void setCurDebugLoc(DebugLoc dl) { DL = dl; }
+ void startNewBlock();
/// getCurDebugLoc() - Return current debug location information.
DebugLoc getCurDebugLoc() const { return DL; }
@@ -89,42 +79,49 @@ public:
/// LLVM IR instruction, and append generated machine instructions to
/// the current block. Return true if selection was successful.
///
- bool SelectInstruction(Instruction *I);
+ bool SelectInstruction(const Instruction *I);
/// SelectOperator - Do "fast" instruction selection for the given
/// LLVM IR operator (Instruction or ConstantExpr), and append
/// generated machine instructions to the current block. Return true
/// if selection was successful.
///
- bool SelectOperator(User *I, unsigned Opcode);
+ bool SelectOperator(const User *I, unsigned Opcode);
/// getRegForValue - Create a virtual register and arrange for it to
/// be assigned the value for the given LLVM value.
- unsigned getRegForValue(Value *V);
+ unsigned getRegForValue(const Value *V);
/// lookUpRegForValue - Look up the value to see if its value is already
/// cached in a register. It may be defined by instructions across blocks or
/// defined locally.
- unsigned lookUpRegForValue(Value *V);
+ unsigned lookUpRegForValue(const Value *V);
/// getRegForGEPIndex - This is a wrapper around getRegForValue that also
/// takes care of truncating or sign-extending the given getelementptr
/// index value.
- unsigned getRegForGEPIndex(Value *V);
+ std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
+
+ /// recomputeInsertPt - Reset InsertPt to prepare for insterting instructions
+ /// into the current block.
+ void recomputeInsertPt();
+
+ struct SavePoint {
+ MachineBasicBlock::iterator InsertPt;
+ DebugLoc DL;
+ };
+
+ /// enterLocalValueArea - Prepare InsertPt to begin inserting instructions
+ /// into the local value area and return the old insert position.
+ SavePoint enterLocalValueArea();
+
+ /// leaveLocalValueArea - Reset InsertPt to the given old insert position.
+ void leaveLocalValueArea(SavePoint Old);
virtual ~FastISel();
protected:
- FastISel(MachineFunction &mf,
- MachineModuleInfo *mmi,
- DwarfWriter *dw,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &cil
-#endif
- );
+ explicit FastISel(FunctionLoweringInfo &funcInfo);
/// TargetSelectInstruction - This method is called by target-independent
/// code when the normal FastISel process fails to select an instruction.
@@ -132,7 +129,7 @@ protected:
/// fit into FastISel's framework. It returns true if it was successful.
///
virtual bool
- TargetSelectInstruction(Instruction *I) = 0;
+ TargetSelectInstruction(const Instruction *I) = 0;
/// FastEmit_r - This method is called by target-independent code
/// to request that an instruction with the given type and opcode
@@ -147,7 +144,8 @@ protected:
///
virtual unsigned FastEmit_r(MVT VT,
MVT RetVT,
- unsigned Opcode, unsigned Op0);
+ unsigned Opcode,
+ unsigned Op0, bool Op0IsKill);
/// FastEmit_rr - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
@@ -156,7 +154,8 @@ protected:
virtual unsigned FastEmit_rr(MVT VT,
MVT RetVT,
unsigned Opcode,
- unsigned Op0, unsigned Op1);
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill);
/// FastEmit_ri - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
@@ -165,7 +164,8 @@ protected:
virtual unsigned FastEmit_ri(MVT VT,
MVT RetVT,
unsigned Opcode,
- unsigned Op0, uint64_t Imm);
+ unsigned Op0, bool Op0IsKill,
+ uint64_t Imm);
/// FastEmit_rf - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
@@ -174,7 +174,8 @@ protected:
virtual unsigned FastEmit_rf(MVT VT,
MVT RetVT,
unsigned Opcode,
- unsigned Op0, ConstantFP *FPImm);
+ unsigned Op0, bool Op0IsKill,
+ const ConstantFP *FPImm);
/// FastEmit_rri - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
@@ -183,7 +184,9 @@ protected:
virtual unsigned FastEmit_rri(MVT VT,
MVT RetVT,
unsigned Opcode,
- unsigned Op0, unsigned Op1, uint64_t Imm);
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill,
+ uint64_t Imm);
/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
/// to emit an instruction with an immediate operand using FastEmit_ri.
@@ -191,8 +194,8 @@ protected:
/// FastEmit_rr instead.
unsigned FastEmit_ri_(MVT VT,
unsigned Opcode,
- unsigned Op0, uint64_t Imm,
- MVT ImmType);
+ unsigned Op0, bool Op0IsKill,
+ uint64_t Imm, MVT ImmType);
/// FastEmit_rf_ - This method is a wrapper of FastEmit_rf. It first tries
/// to emit an instruction with an immediate operand using FastEmit_rf.
@@ -200,8 +203,8 @@ protected:
/// FastEmit_rr instead.
unsigned FastEmit_rf_(MVT VT,
unsigned Opcode,
- unsigned Op0, ConstantFP *FPImm,
- MVT ImmType);
+ unsigned Op0, bool Op0IsKill,
+ const ConstantFP *FPImm, MVT ImmType);
/// FastEmit_i - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
@@ -217,7 +220,7 @@ protected:
virtual unsigned FastEmit_f(MVT VT,
MVT RetVT,
unsigned Opcode,
- ConstantFP *FPImm);
+ const ConstantFP *FPImm);
/// FastEmitInst_ - Emit a MachineInstr with no operands and a
/// result register in the given register class.
@@ -230,35 +233,40 @@ protected:
///
unsigned FastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0);
+ unsigned Op0, bool Op0IsKill);
/// FastEmitInst_rr - Emit a MachineInstr with two register operands
/// and a result register in the given register class.
///
unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0, unsigned Op1);
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill);
/// FastEmitInst_ri - Emit a MachineInstr with two register operands
/// and a result register in the given register class.
///
unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0, uint64_t Imm);
+ unsigned Op0, bool Op0IsKill,
+ uint64_t Imm);
/// FastEmitInst_rf - Emit a MachineInstr with two register operands
/// and a result register in the given register class.
///
unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0, ConstantFP *FPImm);
+ unsigned Op0, bool Op0IsKill,
+ const ConstantFP *FPImm);
/// FastEmitInst_rri - Emit a MachineInstr with two register operands,
/// an immediate, and a result register in the given register class.
///
unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0, unsigned Op1, uint64_t Imm);
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill,
+ uint64_t Imm);
/// FastEmitInst_i - Emit a MachineInstr with a single immediate
/// operand, and a result register in the given register class.
@@ -269,46 +277,63 @@ protected:
/// FastEmitInst_extractsubreg - Emit a MachineInstr for an extract_subreg
/// from a specified index of a superregister to a specified type.
unsigned FastEmitInst_extractsubreg(MVT RetVT,
- unsigned Op0, uint32_t Idx);
+ unsigned Op0, bool Op0IsKill,
+ uint32_t Idx);
/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
/// with all but the least significant bit set to zero.
unsigned FastEmitZExtFromI1(MVT VT,
- unsigned Op);
+ unsigned Op0, bool Op0IsKill);
/// FastEmitBranch - Emit an unconditional branch to the given block,
/// unless it is the immediate (fall-through) successor, and update
/// the CFG.
- void FastEmitBranch(MachineBasicBlock *MBB);
+ void FastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
- unsigned UpdateValueMap(Value* I, unsigned Reg);
+ unsigned UpdateValueMap(const Value* I, unsigned Reg);
unsigned createResultReg(const TargetRegisterClass *RC);
/// TargetMaterializeConstant - Emit a constant in a register using
/// target-specific logic, such as constant pool loads.
- virtual unsigned TargetMaterializeConstant(Constant* C) {
+ virtual unsigned TargetMaterializeConstant(const Constant* C) {
return 0;
}
/// TargetMaterializeAlloca - Emit an alloca address in a register using
/// target-specific logic.
- virtual unsigned TargetMaterializeAlloca(AllocaInst* C) {
+ virtual unsigned TargetMaterializeAlloca(const AllocaInst* C) {
return 0;
}
private:
- bool SelectBinaryOp(User *I, unsigned ISDOpcode);
+ bool SelectBinaryOp(const User *I, unsigned ISDOpcode);
- bool SelectFNeg(User *I);
+ bool SelectFNeg(const User *I);
- bool SelectGetElementPtr(User *I);
+ bool SelectGetElementPtr(const User *I);
- bool SelectCall(User *I);
+ bool SelectCall(const User *I);
- bool SelectBitCast(User *I);
+ bool SelectBitCast(const User *I);
- bool SelectCast(User *I, unsigned Opcode);
+ bool SelectCast(const User *I, unsigned Opcode);
+
+ /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
+ /// Emit code to ensure constants are copied into registers when needed.
+ /// Remember the virtual registers that need to be added to the Machine PHI
+ /// nodes as input. We cannot just directly add them, because expansion
+ /// might result in multiple MBB's for one BB. As such, the start of the
+ /// BB might correspond to a different MBB than the end.
+ bool HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
+
+ /// materializeRegForValue - Helper for getRegForVale. This function is
+ /// called when the value isn't already available in a register and must
+ /// be materialized with new instructions.
+ unsigned materializeRegForValue(const Value *V, MVT VT);
+
+ /// hasTrivialKill - Test whether the given value has exactly one use.
+ bool hasTrivialKill(const Value *V) const;
};
}
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h b/libclamav/c++/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
new file mode 100644
index 0000000..f17fe5a
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -0,0 +1,164 @@
+//===-- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements routines for translating functions from LLVM IR into
+// Machine IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+#define LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+
+#include "llvm/InlineAsm.h"
+#include "llvm/Instructions.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#ifndef NDEBUG
+#include "llvm/ADT/SmallSet.h"
+#endif
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/Support/CallSite.h"
+#include <vector>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class CallInst;
+class Function;
+class GlobalVariable;
+class Instruction;
+class MachineInstr;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineModuleInfo;
+class MachineRegisterInfo;
+class TargetLowering;
+class Value;
+
+//===--------------------------------------------------------------------===//
+/// FunctionLoweringInfo - This contains information that is global to a
+/// function that is used when lowering a region of the function.
+///
+class FunctionLoweringInfo {
+public:
+ const TargetLowering &TLI;
+ const Function *Fn;
+ MachineFunction *MF;
+ MachineRegisterInfo *RegInfo;
+
+ /// CanLowerReturn - true iff the function's return value can be lowered to
+ /// registers.
+ bool CanLowerReturn;
+
+ /// DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg
+ /// allocated to hold a pointer to the hidden sret parameter.
+ unsigned DemoteRegister;
+
+ /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
+ DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
+
+ /// ValueMap - Since we emit code for the function a basic block at a time,
+ /// we must remember which virtual registers hold the values for
+ /// cross-basic-block values.
+ DenseMap<const Value*, unsigned> ValueMap;
+
+ /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
+ /// the entry block. This allows the allocas to be efficiently referenced
+ /// anywhere in the function.
+ DenseMap<const AllocaInst*, int> StaticAllocaMap;
+
+ /// ByValArgFrameIndexMap - Keep track of frame indices for byval arguments.
+ DenseMap<const Argument*, int> ByValArgFrameIndexMap;
+
+ /// ArgDbgValues - A list of DBG_VALUE instructions created during isel for
+ /// function arguments that are inserted after scheduling is completed.
+ SmallVector<MachineInstr*, 8> ArgDbgValues;
+
+ /// RegFixups - Registers which need to be replaced after isel is done.
+ DenseMap<unsigned, unsigned> RegFixups;
+
+ /// MBB - The current block.
+ MachineBasicBlock *MBB;
+
+ /// MBB - The current insert position inside the current block.
+ MachineBasicBlock::iterator InsertPt;
+
+#ifndef NDEBUG
+ SmallSet<const Instruction *, 8> CatchInfoLost;
+ SmallSet<const Instruction *, 8> CatchInfoFound;
+#endif
+
+ struct LiveOutInfo {
+ unsigned NumSignBits;
+ APInt KnownOne, KnownZero;
+ LiveOutInfo() : NumSignBits(0), KnownOne(1, 0), KnownZero(1, 0) {}
+ };
+
+ /// LiveOutRegInfo - Information about live out vregs, indexed by their
+ /// register number offset by 'FirstVirtualRegister'.
+ std::vector<LiveOutInfo> LiveOutRegInfo;
+
+ /// PHINodesToUpdate - A list of phi instructions whose operand list will
+ /// be updated after processing the current basic block.
+ /// TODO: This isn't per-function state, it's per-basic-block state. But
+ /// there's no other convenient place for it to live right now.
+ std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
+
+ explicit FunctionLoweringInfo(const TargetLowering &TLI);
+
+ /// set - Initialize this FunctionLoweringInfo with the given Function
+ /// and its associated MachineFunction.
+ ///
+ void set(const Function &Fn, MachineFunction &MF);
+
+ /// clear - Clear out all the function-specific state. This returns this
+ /// FunctionLoweringInfo to an empty state, ready to be used for a
+ /// different function.
+ void clear();
+
+ /// isExportedInst - Return true if the specified value is an instruction
+ /// exported from its block.
+ bool isExportedInst(const Value *V) {
+ return ValueMap.count(V);
+ }
+
+ unsigned CreateReg(EVT VT);
+
+ unsigned CreateRegs(const Type *Ty);
+
+ unsigned InitializeRegForValue(const Value *V) {
+ unsigned &R = ValueMap[V];
+ assert(R == 0 && "Already initialized this value register!");
+ return R = CreateRegs(V->getType());
+ }
+
+ /// setByValArgumentFrameIndex - Record frame index for the byval
+ /// argument.
+ void setByValArgumentFrameIndex(const Argument *A, int FI);
+
+ /// getByValArgumentFrameIndex - Get frame index for the byval argument.
+ int getByValArgumentFrameIndex(const Argument *A);
+};
+
+/// AddCatchInfo - Extract the personality and type infos from an eh.selector
+/// call, and add them to the specified machine basic block.
+void AddCatchInfo(const CallInst &I,
+ MachineModuleInfo *MMI, MachineBasicBlock *MBB);
+
+/// CopyCatchInfo - Copy catch information from DestBB to SrcBB.
+void CopyCatchInfo(const BasicBlock *SrcBB, const BasicBlock *DestBB,
+ MachineModuleInfo *MMI, FunctionLoweringInfo &FLI);
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/GCMetadata.h b/libclamav/c++/llvm/include/llvm/CodeGen/GCMetadata.h
index 04fd8be..b401068 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/GCMetadata.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/GCMetadata.h
@@ -1,4 +1,4 @@
-//===-- GCMetadata.h - Garbage collector metadata -------------------------===//
+//===-- GCMetadata.h - Garbage collector metadata ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
//
// The GCFunctionInfo class logs the data necessary to build a type accurate
// stack map. The code generator outputs:
-//
+//
// - Safe points as specified by the GCStrategy's NeededSafePoints.
// - Stack offsets for GC roots, as specified by calls to llvm.gcroot
//
@@ -38,16 +38,14 @@
#include "llvm/ADT/StringMap.h"
namespace llvm {
-
class AsmPrinter;
class GCStrategy;
class Constant;
- class MCAsmInfo;
-
-
+ class MCSymbol;
+
namespace GC {
/// PointKind - The type of a collector-safe point.
- ///
+ ///
enum PointKind {
Loop, //< Instr is a loop (backwards branch).
Return, //< Instr is a return instruction.
@@ -55,138 +53,138 @@ namespace llvm {
PostCall //< Instr is the return address of a call.
};
}
-
+
/// GCPoint - Metadata for a collector-safe point in machine code.
- ///
+ ///
struct GCPoint {
GC::PointKind Kind; //< The kind of the safe point.
- unsigned Num; //< Usually a label.
-
- GCPoint(GC::PointKind K, unsigned N) : Kind(K), Num(N) {}
+ MCSymbol *Label; //< A label.
+
+ GCPoint(GC::PointKind K, MCSymbol *L) : Kind(K), Label(L) {}
};
-
+
/// GCRoot - Metadata for a pointer to an object managed by the garbage
/// collector.
struct GCRoot {
int Num; //< Usually a frame index.
int StackOffset; //< Offset from the stack pointer.
- Constant *Metadata; //< Metadata straight from the call to llvm.gcroot.
-
- GCRoot(int N, Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
+ const Constant *Metadata;//< Metadata straight from the call to llvm.gcroot.
+
+ GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
};
-
-
+
+
/// GCFunctionInfo - Garbage collection metadata for a single function.
- ///
+ ///
class GCFunctionInfo {
public:
typedef std::vector<GCPoint>::iterator iterator;
typedef std::vector<GCRoot>::iterator roots_iterator;
typedef std::vector<GCRoot>::const_iterator live_iterator;
-
+
private:
const Function &F;
GCStrategy &S;
uint64_t FrameSize;
std::vector<GCRoot> Roots;
std::vector<GCPoint> SafePoints;
-
+
// FIXME: Liveness. A 2D BitVector, perhaps?
- //
+ //
// BitVector Liveness;
- //
+ //
// bool islive(int point, int root) =
// Liveness[point * SafePoints.size() + root]
- //
+ //
// The bit vector is the more compact representation where >3.2% of roots
// are live per safe point (1.5% on 64-bit hosts).
-
+
public:
GCFunctionInfo(const Function &F, GCStrategy &S);
~GCFunctionInfo();
-
+
/// getFunction - Return the function to which this metadata applies.
- ///
+ ///
const Function &getFunction() const { return F; }
-
+
/// getStrategy - Return the GC strategy for the function.
- ///
+ ///
GCStrategy &getStrategy() { return S; }
-
+
/// addStackRoot - Registers a root that lives on the stack. Num is the
/// stack object ID for the alloca (if the code generator is
// using MachineFrameInfo).
- void addStackRoot(int Num, Constant *Metadata) {
+ void addStackRoot(int Num, const Constant *Metadata) {
Roots.push_back(GCRoot(Num, Metadata));
}
-
+
/// addSafePoint - Notes the existence of a safe point. Num is the ID of the
- /// label just prior to the safe point (if the code generator is using
+ /// label just prior to the safe point (if the code generator is using
/// MachineModuleInfo).
- void addSafePoint(GC::PointKind Kind, unsigned Num) {
- SafePoints.push_back(GCPoint(Kind, Num));
+ void addSafePoint(GC::PointKind Kind, MCSymbol *Label) {
+ SafePoints.push_back(GCPoint(Kind, Label));
}
-
+
/// getFrameSize/setFrameSize - Records the function's frame size.
- ///
+ ///
uint64_t getFrameSize() const { return FrameSize; }
void setFrameSize(uint64_t S) { FrameSize = S; }
-
+
/// begin/end - Iterators for safe points.
- ///
+ ///
iterator begin() { return SafePoints.begin(); }
iterator end() { return SafePoints.end(); }
size_t size() const { return SafePoints.size(); }
-
+
/// roots_begin/roots_end - Iterators for all roots in the function.
- ///
+ ///
roots_iterator roots_begin() { return Roots.begin(); }
roots_iterator roots_end () { return Roots.end(); }
size_t roots_size() const { return Roots.size(); }
-
+
/// live_begin/live_end - Iterators for live roots at a given safe point.
- ///
+ ///
live_iterator live_begin(const iterator &p) { return roots_begin(); }
live_iterator live_end (const iterator &p) { return roots_end(); }
size_t live_size(const iterator &p) const { return roots_size(); }
};
-
-
+
+
/// GCModuleInfo - Garbage collection metadata for a whole module.
- ///
+ ///
class GCModuleInfo : public ImmutablePass {
typedef StringMap<GCStrategy*> strategy_map_type;
typedef std::vector<GCStrategy*> list_type;
typedef DenseMap<const Function*,GCFunctionInfo*> finfo_map_type;
-
+
strategy_map_type StrategyMap;
list_type StrategyList;
finfo_map_type FInfoMap;
-
+
GCStrategy *getOrCreateStrategy(const Module *M, const std::string &Name);
-
+
public:
typedef list_type::const_iterator iterator;
-
+
static char ID;
-
+
GCModuleInfo();
~GCModuleInfo();
-
+
/// clear - Resets the pass. The metadata deleter pass calls this.
- ///
+ ///
void clear();
-
+
/// begin/end - Iterators for used strategies.
- ///
+ ///
iterator begin() const { return StrategyList.begin(); }
iterator end() const { return StrategyList.end(); }
-
+
/// get - Look up function metadata.
- ///
+ ///
GCFunctionInfo &getFunctionInfo(const Function &F);
};
-
+
}
#endif
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/GCMetadataPrinter.h b/libclamav/c++/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
index ff1a205..17a2653 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
@@ -25,52 +25,49 @@
#include "llvm/Support/Registry.h"
namespace llvm {
-
+
class GCMetadataPrinter;
- class raw_ostream;
-
+
/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
/// defaults from Registry.
typedef Registry<GCMetadataPrinter> GCMetadataPrinterRegistry;
-
+
/// GCMetadataPrinter - Emits GC metadata as assembly code.
- ///
+ ///
class GCMetadataPrinter {
public:
typedef GCStrategy::list_type list_type;
typedef GCStrategy::iterator iterator;
-
+
private:
GCStrategy *S;
-
+
friend class AsmPrinter;
-
+
protected:
// May only be subclassed.
GCMetadataPrinter();
-
+
// Do not implement.
GCMetadataPrinter(const GCMetadataPrinter &);
GCMetadataPrinter &operator=(const GCMetadataPrinter &);
-
+
public:
GCStrategy &getStrategy() { return *S; }
const Module &getModule() const { return S->getModule(); }
-
+
/// begin/end - Iterate over the collected function metadata.
iterator begin() { return S->begin(); }
iterator end() { return S->end(); }
-
+
/// beginAssembly/finishAssembly - Emit module metadata as assembly code.
- virtual void beginAssembly(raw_ostream &OS, AsmPrinter &AP,
- const MCAsmInfo &MAI);
-
- virtual void finishAssembly(raw_ostream &OS, AsmPrinter &AP,
- const MCAsmInfo &MAI);
-
+ virtual void beginAssembly(AsmPrinter &AP);
+
+ virtual void finishAssembly(AsmPrinter &AP);
+
virtual ~GCMetadataPrinter();
};
-
+
}
#endif
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/ISDOpcodes.h b/libclamav/c++/llvm/include/llvm/CodeGen/ISDOpcodes.h
new file mode 100644
index 0000000..2e23f4e
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -0,0 +1,775 @@
+//===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares codegen opcodes and related utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ISDOPCODES_H
+#define LLVM_CODEGEN_ISDOPCODES_H
+
+namespace llvm {
+
+/// ISD namespace - This namespace contains an enum which represents all of the
+/// SelectionDAG node types and value types.
+///
+namespace ISD {
+
+ //===--------------------------------------------------------------------===//
+ /// ISD::NodeType enum - This enum defines the target-independent operators
+ /// for a SelectionDAG.
+ ///
+ /// Targets may also define target-dependent operator codes for SDNodes. For
+ /// example, on x86, these are the enum values in the X86ISD namespace.
+ /// Targets should aim to use target-independent operators to model their
+ /// instruction sets as much as possible, and only use target-dependent
+ /// operators when they have special requirements.
+ ///
+ /// Finally, during and after selection proper, SNodes may use special
+ /// operator codes that correspond directly with MachineInstr opcodes. These
+ /// are used to represent selected instructions. See the isMachineOpcode()
+ /// and getMachineOpcode() member functions of SDNode.
+ ///
+ enum NodeType {
+ // DELETED_NODE - This is an illegal value that is used to catch
+ // errors. This opcode is not a legal opcode for any node.
+ DELETED_NODE,
+
+ // EntryToken - This is the marker used to indicate the start of the region.
+ EntryToken,
+
+ // TokenFactor - This node takes multiple tokens as input and produces a
+ // single token result. This is used to represent the fact that the operand
+ // operators are independent of each other.
+ TokenFactor,
+
+ // AssertSext, AssertZext - These nodes record if a register contains a
+ // value that has already been zero or sign extended from a narrower type.
+ // These nodes take two operands. The first is the node that has already
+ // been extended, and the second is a value type node indicating the width
+ // of the extension
+ AssertSext, AssertZext,
+
+ // Various leaf nodes.
+ BasicBlock, VALUETYPE, CONDCODE, Register,
+ Constant, ConstantFP,
+ GlobalAddress, GlobalTLSAddress, FrameIndex,
+ JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
+
+ // The address of the GOT
+ GLOBAL_OFFSET_TABLE,
+
+ // FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
+ // llvm.returnaddress on the DAG. These nodes take one operand, the index
+ // of the frame or return address to return. An index of zero corresponds
+ // to the current function's frame or return address, an index of one to the
+ // parent's frame or return address, and so on.
+ FRAMEADDR, RETURNADDR,
+
+ // FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
+ // first (possible) on-stack argument. This is needed for correct stack
+ // adjustment during unwind.
+ FRAME_TO_ARGS_OFFSET,
+
+ // RESULT, OUTCHAIN = EXCEPTIONADDR(INCHAIN) - This node represents the
+ // address of the exception block on entry to an landing pad block.
+ EXCEPTIONADDR,
+
+ // RESULT, OUTCHAIN = LSDAADDR(INCHAIN) - This node represents the
+ // address of the Language Specific Data Area for the enclosing function.
+ LSDAADDR,
+
+ // RESULT, OUTCHAIN = EHSELECTION(INCHAIN, EXCEPTION) - This node represents
+ // the selection index of the exception thrown.
+ EHSELECTION,
+
+ // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
+ // 'eh_return' gcc dwarf builtin, which is used to return from
+ // exception. The general meaning is: adjust stack by OFFSET and pass
+ // execution to HANDLER. Many platform-related details also :)
+ EH_RETURN,
+
+ // OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
+ // This corresponds to the eh.sjlj.setjmp intrinsic.
+ // It takes an input chain and a pointer to the jump buffer as inputs
+ // and returns an outchain.
+ EH_SJLJ_SETJMP,
+
+ // OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
+ // This corresponds to the eh.sjlj.longjmp intrinsic.
+ // It takes an input chain and a pointer to the jump buffer as inputs
+ // and returns an outchain.
+ EH_SJLJ_LONGJMP,
+
+ // TargetConstant* - Like Constant*, but the DAG does not do any folding,
+ // simplification, or lowering of the constant. They are used for constants
+ // which are known to fit in the immediate fields of their users, or for
+ // carrying magic numbers which are not values which need to be materialized
+ // in registers.
+ TargetConstant,
+ TargetConstantFP,
+
+ // TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
+ // anything else with this node, and this is valid in the target-specific
+ // dag, turning into a GlobalAddress operand.
+ TargetGlobalAddress,
+ TargetGlobalTLSAddress,
+ TargetFrameIndex,
+ TargetJumpTable,
+ TargetConstantPool,
+ TargetExternalSymbol,
+ TargetBlockAddress,
+
+ /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
+ /// This node represents a target intrinsic function with no side effects.
+ /// The first operand is the ID number of the intrinsic from the
+ /// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
+ /// node returns the result of the intrinsic.
+ INTRINSIC_WO_CHAIN,
+
+ /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
+ /// This node represents a target intrinsic function with side effects that
+ /// returns a result. The first operand is a chain pointer. The second is
+ /// the ID number of the intrinsic from the llvm::Intrinsic namespace. The
+ /// operands to the intrinsic follow. The node has two results, the result
+ /// of the intrinsic and an output chain.
+ INTRINSIC_W_CHAIN,
+
+ /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
+ /// This node represents a target intrinsic function with side effects that
+ /// does not return a result. The first operand is a chain pointer. The
+ /// second is the ID number of the intrinsic from the llvm::Intrinsic
+ /// namespace. The operands to the intrinsic follow.
+ INTRINSIC_VOID,
+
+ // CopyToReg - This node has three operands: a chain, a register number to
+ // set to this value, and a value.
+ CopyToReg,
+
+ // CopyFromReg - This node indicates that the input value is a virtual or
+ // physical register that is defined outside of the scope of this
+ // SelectionDAG. The register is available from the RegisterSDNode object.
+ CopyFromReg,
+
+ // UNDEF - An undefined node
+ UNDEF,
+
+ // EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
+ // a Constant, which is required to be operand #1) half of the integer or
+ // float value specified as operand #0. This is only for use before
+ // legalization, for values that will be broken into multiple registers.
+ EXTRACT_ELEMENT,
+
+ // BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways. Given
+ // two values of the same integer value type, this produces a value twice as
+ // big. Like EXTRACT_ELEMENT, this can only be used before legalization.
+ BUILD_PAIR,
+
+ // MERGE_VALUES - This node takes multiple discrete operands and returns
+ // them all as its individual results. This nodes has exactly the same
+ // number of inputs and outputs. This node is useful for some pieces of the
+ // code generator that want to think about a single node with multiple
+ // results, not multiple nodes.
+ MERGE_VALUES,
+
+ // Simple integer binary arithmetic operators.
+ ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
+
+ // SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
+ // a signed/unsigned value of type i[2*N], and return the full value as
+ // two results, each of type iN.
+ SMUL_LOHI, UMUL_LOHI,
+
+ // SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
+ // remainder result.
+ SDIVREM, UDIVREM,
+
+ // CARRY_FALSE - This node is used when folding other nodes,
+ // like ADDC/SUBC, which indicate the carry result is always false.
+ CARRY_FALSE,
+
+ // Carry-setting nodes for multiple precision addition and subtraction.
+ // These nodes take two operands of the same value type, and produce two
+ // results. The first result is the normal add or sub result, the second
+ // result is the carry flag result.
+ ADDC, SUBC,
+
+ // Carry-using nodes for multiple precision addition and subtraction. These
+ // nodes take three operands: The first two are the normal lhs and rhs to
+ // the add or sub, and the third is the input carry flag. These nodes
+ // produce two results; the normal result of the add or sub, and the output
+ // carry flag. These nodes both read and write a carry flag to allow them
+ // to them to be chained together for add and sub of arbitrarily large
+ // values.
+ ADDE, SUBE,
+
+ // RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
+ // These nodes take two operands: the normal LHS and RHS to the add. They
+ // produce two results: the normal result of the add, and a boolean that
+ // indicates if an overflow occured (*not* a flag, because it may be stored
+ // to memory, etc.). If the type of the boolean is not i1 then the high
+ // bits conform to getBooleanContents.
+ // These nodes are generated from the llvm.[su]add.with.overflow intrinsics.
+ SADDO, UADDO,
+
+ // Same for subtraction
+ SSUBO, USUBO,
+
+ // Same for multiplication
+ SMULO, UMULO,
+
+ // Simple binary floating point operators.
+ FADD, FSUB, FMUL, FDIV, FREM,
+
+ // FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
+ // DAG node does not require that X and Y have the same type, just that they
+ // are both floating point. X and the result must have the same type.
+ // FCOPYSIGN(f32, f64) is allowed.
+ FCOPYSIGN,
+
+ // INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
+ // value as an integer 0/1 value.
+ FGETSIGN,
+
+ /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
+ /// specified, possibly variable, elements. The number of elements is
+ /// required to be a power of two. The types of the operands must all be
+ /// the same and must match the vector element type, except that integer
+ /// types are allowed to be larger than the element type, in which case
+ /// the operands are implicitly truncated.
+ BUILD_VECTOR,
+
+ /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
+ /// at IDX replaced with VAL. If the type of VAL is larger than the vector
+ /// element type then VAL is truncated before replacement.
+ INSERT_VECTOR_ELT,
+
+ /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
+ /// identified by the (potentially variable) element number IDX. If the
+ /// return type is an integer type larger than the element type of the
+ /// vector, the result is extended to the width of the return type.
+ EXTRACT_VECTOR_ELT,
+
+ /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
+ /// vector type with the same length and element type, this produces a
+ /// concatenated vector result value, with length equal to the sum of the
+ /// lengths of the input vectors.
+ CONCAT_VECTORS,
+
+ /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an
+ /// vector value) starting with the (potentially variable) element number
+ /// IDX, which must be a multiple of the result vector length.
+ EXTRACT_SUBVECTOR,
+
+ /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
+ /// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
+ /// values that indicate which value (or undef) each result element will
+ /// get. These constant ints are accessible through the
+ /// ShuffleVectorSDNode class. This is quite similar to the Altivec
+ /// 'vperm' instruction, except that the indices must be constants and are
+ /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
+ VECTOR_SHUFFLE,
+
+ /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
+ /// scalar value into element 0 of the resultant vector type. The top
+ /// elements 1 to N-1 of the N-element vector are undefined. The type
+ /// of the operand must match the vector element type, except when they
+ /// are integer types. In this case the operand is allowed to be wider
+ /// than the vector element type, and is implicitly truncated to it.
+ SCALAR_TO_VECTOR,
+
+ // MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing
+ // an unsigned/signed value of type i[2*N], then return the top part.
+ MULHU, MULHS,
+
+ // Bitwise operators - logical and, logical or, logical xor, shift left,
+ // shift right algebraic (shift in sign bits), shift right logical (shift in
+ // zeroes), rotate left, rotate right, and byteswap.
+ AND, OR, XOR, SHL, SRA, SRL, ROTL, ROTR, BSWAP,
+
+ // Counting operators
+ CTTZ, CTLZ, CTPOP,
+
+ // Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
+ // i1 then the high bits must conform to getBooleanContents.
+ SELECT,
+
+ // Select with condition operator - This selects between a true value and
+ // a false value (ops #2 and #3) based on the boolean result of comparing
+ // the lhs and rhs (ops #0 and #1) of a conditional expression with the
+ // condition code in op #4, a CondCodeSDNode.
+ SELECT_CC,
+
+ // SetCC operator - This evaluates to a true value iff the condition is
+ // true. If the result value type is not i1 then the high bits conform
+ // to getBooleanContents. The operands to this are the left and right
+ // operands to compare (ops #0, and #1) and the condition code to compare
+ // them with (op #2) as a CondCodeSDNode.
+ SETCC,
+
+ // RESULT = VSETCC(LHS, RHS, COND) operator - This evaluates to a vector of
+ // integer elements with all bits of the result elements set to true if the
+ // comparison is true or all cleared if the comparison is false. The
+ // operands to this are the left and right operands to compare (LHS/RHS) and
+ // the condition code to compare them with (COND) as a CondCodeSDNode.
+ VSETCC,
+
+ // SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
+ // integer shift operations, just like ADD/SUB_PARTS. The operation
+ // ordering is:
+ // [Lo,Hi] = op [LoLHS,HiLHS], Amt
+ SHL_PARTS, SRA_PARTS, SRL_PARTS,
+
+ // Conversion operators. These are all single input single output
+ // operations. For all of these, the result type must be strictly
+ // wider or narrower (depending on the operation) than the source
+ // type.
+
+ // SIGN_EXTEND - Used for integer types, replicating the sign bit
+ // into new bits.
+ SIGN_EXTEND,
+
+ // ZERO_EXTEND - Used for integer types, zeroing the new bits.
+ ZERO_EXTEND,
+
+ // ANY_EXTEND - Used for integer types. The high bits are undefined.
+ ANY_EXTEND,
+
+ // TRUNCATE - Completely drop the high bits.
+ TRUNCATE,
+
+ // [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
+ // depends on the first letter) to floating point.
+ SINT_TO_FP,
+ UINT_TO_FP,
+
+ // SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
+ // sign extend a small value in a large integer register (e.g. sign
+ // extending the low 8 bits of a 32-bit register to fill the top 24 bits
+ // with the 7th bit). The size of the smaller type is indicated by the 1th
+ // operand, a ValueType node.
+ SIGN_EXTEND_INREG,
+
+ /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
+ /// integer.
+ FP_TO_SINT,
+ FP_TO_UINT,
+
+ /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
+ /// down to the precision of the destination VT. TRUNC is a flag, which is
+ /// always an integer that is zero or one. If TRUNC is 0, this is a
+ /// normal rounding, if it is 1, this FP_ROUND is known to not change the
+ /// value of Y.
+ ///
+ /// The TRUNC = 1 case is used in cases where we know that the value will
+ /// not be modified by the node, because Y is not using any of the extra
+ /// precision of source type. This allows certain transformations like
+ /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
+ /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
+ FP_ROUND,
+
+ // FLT_ROUNDS_ - Returns current rounding mode:
+ // -1 Undefined
+ // 0 Round to 0
+ // 1 Round to nearest
+ // 2 Round to +inf
+ // 3 Round to -inf
+ FLT_ROUNDS_,
+
+ /// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
+ /// rounds it to a floating point value. It then promotes it and returns it
+ /// in a register of the same size. This operation effectively just
+ /// discards excess precision. The type to round down to is specified by
+ /// the VT operand, a VTSDNode.
+ FP_ROUND_INREG,
+
+ /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
+ FP_EXTEND,
+
+ // BIT_CONVERT - This operator converts between integer, vector and FP
+ // values, as if the value was stored to memory with one type and loaded
+ // from the same address with the other type (or equivalently for vector
+ // format conversions, etc). The source and result are required to have
+ // the same bit size (e.g. f32 <-> i32). This can also be used for
+ // int-to-int or fp-to-fp conversions, but that is a noop, deleted by
+ // getNode().
+ BIT_CONVERT,
+
+ // CONVERT_RNDSAT - This operator is used to support various conversions
+ // between various types (float, signed, unsigned and vectors of those
+ // types) with rounding and saturation. NOTE: Avoid using this operator as
+ // most target don't support it and the operator might be removed in the
+ // future. It takes the following arguments:
+ // 0) value
+ // 1) dest type (type to convert to)
+ // 2) src type (type to convert from)
+ // 3) rounding imm
+ // 4) saturation imm
+ // 5) ISD::CvtCode indicating the type of conversion to do
+ CONVERT_RNDSAT,
+
+ // FP16_TO_FP32, FP32_TO_FP16 - These operators are used to perform
+ // promotions and truncation for half-precision (16 bit) floating
+ // numbers. We need special nodes since FP16 is a storage-only type with
+ // special semantics of operations.
+ FP16_TO_FP32, FP32_TO_FP16,
+
+ // FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+ // FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+ // FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR - Perform various unary floating
+ // point operations. These are inspired by libm.
+ FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+ FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+ FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR,
+
+ // LOAD and STORE have token chains as their first operand, then the same
+ // operands as an LLVM load/store instruction, then an offset node that
+ // is added / subtracted from the base pointer to form the address (for
+ // indexed memory ops).
+ LOAD, STORE,
+
+ // DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
+ // to a specified boundary. This node always has two return values: a new
+ // stack pointer value and a chain. The first operand is the token chain,
+ // the second is the number of bytes to allocate, and the third is the
+ // alignment boundary. The size is guaranteed to be a multiple of the stack
+ // alignment, and the alignment is guaranteed to be bigger than the stack
+ // alignment (if required) or 0 to get standard stack alignment.
+ DYNAMIC_STACKALLOC,
+
+ // Control flow instructions. These all have token chains.
+
+ // BR - Unconditional branch. The first operand is the chain
+ // operand, the second is the MBB to branch to.
+ BR,
+
+ // BRIND - Indirect branch. The first operand is the chain, the second
+ // is the value to branch to, which must be of the same type as the target's
+ // pointer type.
+ BRIND,
+
+ // BR_JT - Jumptable branch. The first operand is the chain, the second
+ // is the jumptable index, the last one is the jumptable entry index.
+ BR_JT,
+
+ // BRCOND - Conditional branch. The first operand is the chain, the
+ // second is the condition, the third is the block to branch to if the
+ // condition is true. If the type of the condition is not i1, then the
+ // high bits must conform to getBooleanContents.
+ BRCOND,
+
+ // BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
+ // that the condition is represented as condition code, and two nodes to
+ // compare, rather than as a combined SetCC node. The operands in order are
+ // chain, cc, lhs, rhs, block to branch to if condition is true.
+ BR_CC,
+
+ // INLINEASM - Represents an inline asm block. This node always has two
+ // return values: a chain and a flag result. The inputs are as follows:
+ // Operand #0 : Input chain.
+ // Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
+ // Operand #2 : a MDNodeSDNode with the !srcloc metadata.
+ // After this, it is followed by a list of operands with this format:
+ // ConstantSDNode: Flags that encode whether it is a mem or not, the
+ // of operands that follow, etc. See InlineAsm.h.
+ // ... however many operands ...
+ // Operand #last: Optional, an incoming flag.
+ //
+ // The variable width operands are required to represent target addressing
+ // modes as a single "operand", even though they may have multiple
+ // SDOperands.
+ INLINEASM,
+
+ // EH_LABEL - Represents a label in mid basic block used to track
+ // locations needed for debug and exception handling tables. These nodes
+ // take a chain as input and return a chain.
+ EH_LABEL,
+
+ // STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
+ // value, the same type as the pointer type for the system, and an output
+ // chain.
+ STACKSAVE,
+
+ // STACKRESTORE has two operands, an input chain and a pointer to restore to
+ // it returns an output chain.
+ STACKRESTORE,
+
+ // CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of
+ // a call sequence, and carry arbitrary information that target might want
+ // to know. The first operand is a chain, the rest are specified by the
+ // target and not touched by the DAG optimizers.
+ // CALLSEQ_START..CALLSEQ_END pairs may not be nested.
+ CALLSEQ_START, // Beginning of a call sequence
+ CALLSEQ_END, // End of a call sequence
+
+ // VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
+ // and the alignment. It returns a pair of values: the vaarg value and a
+ // new chain.
+ VAARG,
+
+ // VACOPY - VACOPY has five operands: an input chain, a destination pointer,
+ // a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
+ // source.
+ VACOPY,
+
+ // VAEND, VASTART - VAEND and VASTART have three operands: an input chain, a
+ // pointer, and a SRCVALUE.
+ VAEND, VASTART,
+
+ // SRCVALUE - This is a node type that holds a Value* that is used to
+ // make reference to a value in the LLVM IR.
+ SRCVALUE,
+
+ // MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
+ // reference metadata in the IR.
+ MDNODE_SDNODE,
+
+ // PCMARKER - This corresponds to the pcmarker intrinsic.
+ PCMARKER,
+
+ // READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
+ // The only operand is a chain and a value and a chain are produced. The
+ // value is the contents of the architecture specific cycle counter like
+ // register (or other high accuracy low latency clock source)
+ READCYCLECOUNTER,
+
+ // HANDLENODE node - Used as a handle for various purposes.
+ HANDLENODE,
+
+ // TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
+ // It takes as input a token chain, the pointer to the trampoline,
+ // the pointer to the nested function, the pointer to pass for the
+ // 'nest' parameter, a SRCVALUE for the trampoline and another for
+ // the nested function (allowing targets to access the original
+ // Function*). It produces the result of the intrinsic and a token
+ // chain as output.
+ TRAMPOLINE,
+
+ // TRAP - Trapping instruction
+ TRAP,
+
+ // PREFETCH - This corresponds to a prefetch intrinsic. It takes chains are
+ // their first operand. The other operands are the address to prefetch,
+ // read / write specifier, and locality specifier.
+ PREFETCH,
+
+ // OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load,
+ // store-store, device)
+ // This corresponds to the memory.barrier intrinsic.
+ // it takes an input chain, 4 operands to specify the type of barrier, an
+ // operand specifying if the barrier applies to device and uncached memory
+ // and produces an output chain.
+ MEMBARRIER,
+
+ // Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
+ // this corresponds to the atomic.lcs intrinsic.
+ // cmp is compared to *ptr, and if equal, swap is stored in *ptr.
+ // the return is always the original value in *ptr
+ ATOMIC_CMP_SWAP,
+
+ // Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
+ // this corresponds to the atomic.swap intrinsic.
+ // amt is stored to *ptr atomically.
+ // the return is always the original value in *ptr
+ ATOMIC_SWAP,
+
+ // Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
+ // this corresponds to the atomic.load.[OpName] intrinsic.
+ // op(*ptr, amt) is stored to *ptr atomically.
+ // the return is always the original value in *ptr
+ ATOMIC_LOAD_ADD,
+ ATOMIC_LOAD_SUB,
+ ATOMIC_LOAD_AND,
+ ATOMIC_LOAD_OR,
+ ATOMIC_LOAD_XOR,
+ ATOMIC_LOAD_NAND,
+ ATOMIC_LOAD_MIN,
+ ATOMIC_LOAD_MAX,
+ ATOMIC_LOAD_UMIN,
+ ATOMIC_LOAD_UMAX,
+
+ /// BUILTIN_OP_END - This must be the last enum value in this list.
+ /// The target-specific pre-isel opcode values start here.
+ BUILTIN_OP_END
+ };
+
+ /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
+ /// which do not reference a specific memory location should be less than
+ /// this value. Those that do must not be less than this value, and can
+ /// be used with SelectionDAG::getMemIntrinsicNode.
+ static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+150;
+
+ //===--------------------------------------------------------------------===//
+ /// MemIndexedMode enum - This enum defines the load / store indexed
+ /// addressing modes.
+ ///
+ /// UNINDEXED "Normal" load / store. The effective address is already
+ /// computed and is available in the base pointer. The offset
+ /// operand is always undefined. In addition to producing a
+ /// chain, an unindexed load produces one value (result of the
+ /// load); an unindexed store does not produce a value.
+ ///
+ /// PRE_INC Similar to the unindexed mode where the effective address is
+ /// PRE_DEC the value of the base pointer add / subtract the offset.
+ /// It considers the computation as being folded into the load /
+ /// store operation (i.e. the load / store does the address
+ /// computation as well as performing the memory transaction).
+ /// The base operand is always undefined. In addition to
+ /// producing a chain, pre-indexed load produces two values
+ /// (result of the load and the result of the address
+ /// computation); a pre-indexed store produces one value (result
+ /// of the address computation).
+ ///
+ /// POST_INC The effective address is the value of the base pointer. The
+ /// POST_DEC value of the offset operand is then added to / subtracted
+ /// from the base after memory transaction. In addition to
+ /// producing a chain, post-indexed load produces two values
+ /// (the result of the load and the result of the base +/- offset
+ /// computation); a post-indexed store produces one value (the
+ /// the result of the base +/- offset computation).
+ enum MemIndexedMode {
+ UNINDEXED = 0,
+ PRE_INC,
+ PRE_DEC,
+ POST_INC,
+ POST_DEC,
+ LAST_INDEXED_MODE
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// LoadExtType enum - This enum defines the three variants of LOADEXT
+ /// (load with extension).
+ ///
+ /// SEXTLOAD loads the integer operand and sign extends it to a larger
+ /// integer result type.
+ /// ZEXTLOAD loads the integer operand and zero extends it to a larger
+ /// integer result type.
+ /// EXTLOAD is used for two things: floating point extending loads and
+ /// integer extending loads [the top bits are undefined].
+ enum LoadExtType {
+ NON_EXTLOAD = 0,
+ EXTLOAD,
+ SEXTLOAD,
+ ZEXTLOAD,
+ LAST_LOADEXT_TYPE
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// ISD::CondCode enum - These are ordered carefully to make the bitfields
+ /// below work out, when considering SETFALSE (something that never exists
+ /// dynamically) as 0. "U" -> Unsigned (for integer operands) or Unordered
+ /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
+ /// to. If the "N" column is 1, the result of the comparison is undefined if
+ /// the input is a NAN.
+ ///
+ /// All of these (except for the 'always folded ops') should be handled for
+ /// floating point. For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
+ /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
+ ///
+ /// Note that these are laid out in a specific order to allow bit-twiddling
+ /// to transform conditions.
+ enum CondCode {
+ // Opcode N U L G E Intuitive operation
+ SETFALSE, // 0 0 0 0 Always false (always folded)
+ SETOEQ, // 0 0 0 1 True if ordered and equal
+ SETOGT, // 0 0 1 0 True if ordered and greater than
+ SETOGE, // 0 0 1 1 True if ordered and greater than or equal
+ SETOLT, // 0 1 0 0 True if ordered and less than
+ SETOLE, // 0 1 0 1 True if ordered and less than or equal
+ SETONE, // 0 1 1 0 True if ordered and operands are unequal
+ SETO, // 0 1 1 1 True if ordered (no nans)
+ SETUO, // 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
+ SETUEQ, // 1 0 0 1 True if unordered or equal
+ SETUGT, // 1 0 1 0 True if unordered or greater than
+ SETUGE, // 1 0 1 1 True if unordered, greater than, or equal
+ SETULT, // 1 1 0 0 True if unordered or less than
+ SETULE, // 1 1 0 1 True if unordered, less than, or equal
+ SETUNE, // 1 1 1 0 True if unordered or not equal
+ SETTRUE, // 1 1 1 1 Always true (always folded)
+ // Don't care operations: undefined if the input is a nan.
+ SETFALSE2, // 1 X 0 0 0 Always false (always folded)
+ SETEQ, // 1 X 0 0 1 True if equal
+ SETGT, // 1 X 0 1 0 True if greater than
+ SETGE, // 1 X 0 1 1 True if greater than or equal
+ SETLT, // 1 X 1 0 0 True if less than
+ SETLE, // 1 X 1 0 1 True if less than or equal
+ SETNE, // 1 X 1 1 0 True if not equal
+ SETTRUE2, // 1 X 1 1 1 Always true (always folded)
+
+ SETCC_INVALID // Marker value.
+ };
+
+ /// isSignedIntSetCC - Return true if this is a setcc instruction that
+ /// performs a signed comparison when used with integer operands.
+ inline bool isSignedIntSetCC(CondCode Code) {
+ return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
+ }
+
+ /// isUnsignedIntSetCC - Return true if this is a setcc instruction that
+ /// performs an unsigned comparison when used with integer operands.
+ inline bool isUnsignedIntSetCC(CondCode Code) {
+ return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
+ }
+
+ /// isTrueWhenEqual - Return true if the specified condition returns true if
+ /// the two operands to the condition are equal. Note that if one of the two
+ /// operands is a NaN, this value is meaningless.
+ inline bool isTrueWhenEqual(CondCode Cond) {
+ return ((int)Cond & 1) != 0;
+ }
+
+ /// getUnorderedFlavor - This function returns 0 if the condition is always
+ /// false if an operand is a NaN, 1 if the condition is always true if the
+ /// operand is a NaN, and 2 if the condition is undefined if the operand is a
+ /// NaN.
+ inline unsigned getUnorderedFlavor(CondCode Cond) {
+ return ((int)Cond >> 3) & 3;
+ }
+
+ /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
+ /// 'op' is a valid SetCC operation.
+ CondCode getSetCCInverse(CondCode Operation, bool isInteger);
+
+ /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
+ /// when given the operation for (X op Y).
+ CondCode getSetCCSwappedOperands(CondCode Operation);
+
+ /// getSetCCOrOperation - Return the result of a logical OR between different
+ /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This
+ /// function returns SETCC_INVALID if it is not possible to represent the
+ /// resultant comparison.
+ CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+ /// getSetCCAndOperation - Return the result of a logical AND between
+ /// different comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
+ /// function returns SETCC_INVALID if it is not possible to represent the
+ /// resultant comparison.
+ CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+ //===--------------------------------------------------------------------===//
+ /// CvtCode enum - This enum defines the various converts CONVERT_RNDSAT
+ /// supports.
+ enum CvtCode {
+ CVT_FF, // Float from Float
+ CVT_FS, // Float from Signed
+ CVT_FU, // Float from Unsigned
+ CVT_SF, // Signed from Float
+ CVT_UF, // Unsigned from Float
+ CVT_SS, // Signed from Signed
+ CVT_SU, // Signed from Unsigned
+ CVT_US, // Unsigned from Signed
+ CVT_UU, // Unsigned from Unsigned
+ CVT_INVALID // Marker - Invalid opcode
+ };
+
+} // end llvm::ISD namespace
+
+} // end llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/JITCodeEmitter.h b/libclamav/c++/llvm/include/llvm/CodeGen/JITCodeEmitter.h
index 0a1d4f4..eb373fb 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/JITCodeEmitter.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/JITCodeEmitter.h
@@ -21,6 +21,7 @@
#include "llvm/System/DataTypes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/CodeGen/MachineCodeEmitter.h"
+#include "llvm/ADT/DenseMap.h"
using namespace std;
@@ -35,7 +36,7 @@ class MachineRelocation;
class Value;
class GlobalValue;
class Function;
-
+
/// JITCodeEmitter - This class defines two sorts of methods: those for
/// emitting the actual bytes of machine code, and those for emitting auxillary
/// structures, such as jump tables, relocations, etc.
@@ -173,13 +174,20 @@ public:
/// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be
/// written to the output stream.
- void emitULEB128Bytes(uint64_t Value) {
+ void emitULEB128Bytes(uint64_t Value, unsigned PadTo = 0) {
do {
uint8_t Byte = Value & 0x7f;
Value >>= 7;
- if (Value) Byte |= 0x80;
+ if (Value || PadTo != 0) Byte |= 0x80;
emitByte(Byte);
} while (Value);
+
+ if (PadTo) {
+ do {
+ uint8_t Byte = (PadTo > 1) ? 0x80 : 0x0;
+ emitByte(Byte);
+ } while (--PadTo);
+ }
}
/// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be
@@ -242,7 +250,7 @@ public:
/// emitLabel - Emits a label
- virtual void emitLabel(uint64_t LabelID) = 0;
+ virtual void emitLabel(MCSymbol *Label) = 0;
/// allocateSpace - Allocate a block of space in the current output buffer,
/// returning null (and setting conditions to indicate buffer overflow) on
@@ -316,14 +324,18 @@ public:
///
virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const= 0;
- /// getLabelAddress - Return the address of the specified LabelID, only usable
- /// after the LabelID has been emitted.
+ /// getLabelAddress - Return the address of the specified Label, only usable
+ /// after the Label has been emitted.
///
- virtual uintptr_t getLabelAddress(uint64_t LabelID) const = 0;
+ virtual uintptr_t getLabelAddress(MCSymbol *Label) const = 0;
/// Specifies the MachineModuleInfo object. This is used for exception handling
/// purposes.
virtual void setModuleInfo(MachineModuleInfo* Info) = 0;
+
+ /// getLabelLocations - Return the label locations map of the label IDs to
+ /// their address.
+ virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() { return 0; }
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h b/libclamav/c++/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
index 7ac0418..13cebea 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
@@ -17,7 +17,6 @@
#define LATENCY_PRIORITY_QUEUE_H
#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/ADT/PriorityQueue.h"
namespace llvm {
class LatencyPriorityQueue;
@@ -41,10 +40,11 @@ namespace llvm {
std::vector<unsigned> NumNodesSolelyBlocking;
/// Queue - The queue.
- PriorityQueue<SUnit*, std::vector<SUnit*>, latency_sort> Queue;
+ std::vector<SUnit*> Queue;
+ latency_sort Picker;
-public:
- LatencyPriorityQueue() : Queue(latency_sort(this)) {
+ public:
+ LatencyPriorityQueue() : Picker(this) {
}
void initNodes(std::vector<SUnit> &sunits) {
@@ -73,31 +73,13 @@ public:
return NumNodesSolelyBlocking[NodeNum];
}
- unsigned size() const { return Queue.size(); }
-
bool empty() const { return Queue.empty(); }
- virtual void push(SUnit *U) {
- push_impl(U);
- }
- void push_impl(SUnit *U);
-
- void push_all(const std::vector<SUnit *> &Nodes) {
- for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
- push_impl(Nodes[i]);
- }
+ virtual void push(SUnit *U);
- SUnit *pop() {
- if (empty()) return NULL;
- SUnit *V = Queue.top();
- Queue.pop();
- return V;
- }
+ virtual SUnit *pop();
- void remove(SUnit *SU) {
- assert(!Queue.empty() && "Not in queue!");
- Queue.erase_one(SU);
- }
+ virtual void remove(SUnit *SU);
// ScheduledNode - As nodes are scheduled, we look to see if there are any
// successor nodes that have a single unscheduled predecessor. If so, that
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h b/libclamav/c++/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
index 27947e8..cd8293d 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -33,7 +33,7 @@ namespace {
(void) llvm::createDeadMachineInstructionElimPass();
- (void) llvm::createLocalRegisterAllocator();
+ (void) llvm::createFastRegisterAllocator();
(void) llvm::createLinearScanRegisterAllocator();
(void) llvm::createPBQPRegisterAllocator();
@@ -45,6 +45,7 @@ namespace {
(void) llvm::createBURRListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
(void) llvm::createTDRRListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
(void) llvm::createSourceListDAGScheduler(NULL,llvm::CodeGenOpt::Default);
+ (void) llvm::createHybridListDAGScheduler(NULL,llvm::CodeGenOpt::Default);
(void) llvm::createTDListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
(void) llvm::createFastDAGScheduler(NULL, llvm::CodeGenOpt::Default);
(void) llvm::createDefaultScheduler(NULL, llvm::CodeGenOpt::Default);
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/LiveInterval.h b/libclamav/c++/llvm/include/llvm/CodeGen/LiveInterval.h
index 1f198e6..29e689a 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/LiveInterval.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -39,7 +39,7 @@ namespace llvm {
/// This class holds information about a machine level values, including
/// definition and use points.
///
- /// Care must be taken in interpreting the def index of the value. The
+ /// Care must be taken in interpreting the def index of the value. The
/// following rules apply:
///
/// If the isDefAccurate() method returns false then def does not contain the
@@ -53,7 +53,7 @@ namespace llvm {
class VNInfo {
private:
enum {
- HAS_PHI_KILL = 1,
+ HAS_PHI_KILL = 1,
REDEF_BY_EC = 1 << 1,
IS_PHI_DEF = 1 << 2,
IS_UNUSED = 1 << 3,
@@ -67,22 +67,14 @@ namespace llvm {
} cr;
public:
-
- typedef SmallVector<SlotIndex, 4> KillSet;
+ typedef BumpPtrAllocator Allocator;
/// The ID number of this value.
unsigned id;
-
+
/// The index of the defining instruction (if isDefAccurate() returns true).
SlotIndex def;
- KillSet kills;
-
- /*
- VNInfo(LiveIntervals &li_)
- : defflags(IS_UNUSED), id(~1U) { cr.copy = 0; }
- */
-
/// VNInfo constructor.
/// d is presumed to point to the actual defining instr. If it doesn't
/// setIsDefAccurate(false) should be called after construction.
@@ -91,7 +83,7 @@ namespace llvm {
/// VNInfo construtor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
- : flags(orig.flags), cr(orig.cr), id(i), def(orig.def), kills(orig.kills)
+ : flags(orig.flags), cr(orig.cr), id(i), def(orig.def)
{ }
/// Copy from the parameter into this VNInfo.
@@ -99,7 +91,6 @@ namespace llvm {
flags = src.flags;
cr = src.cr;
def = src.def;
- kills = src.kills;
}
/// Used for copying value number info.
@@ -114,10 +105,10 @@ namespace llvm {
/// This method should not be called on stack intervals as it may lead to
/// undefined behavior.
void setCopy(MachineInstr *c) { cr.copy = c; }
-
+
/// For a stack interval, returns the reg which this stack interval was
/// defined from.
- /// For a register interval the behaviour of this method is undefined.
+ /// For a register interval the behaviour of this method is undefined.
unsigned getReg() const { return cr.reg; }
/// For a stack interval, set the defining register.
/// This method should not be called on register intervals as it may lead
@@ -144,7 +135,7 @@ namespace llvm {
else
flags &= ~REDEF_BY_EC;
}
-
+
/// Returns true if this value is defined by a PHI instruction (or was,
/// PHI instrucions may have been eliminated).
bool isPHIDef() const { return flags & IS_PHI_DEF; }
@@ -172,49 +163,9 @@ namespace llvm {
void setIsDefAccurate(bool defAccurate) {
if (defAccurate)
flags |= IS_DEF_ACCURATE;
- else
+ else
flags &= ~IS_DEF_ACCURATE;
}
-
- /// Returns true if the given index is a kill of this value.
- bool isKill(SlotIndex k) const {
- KillSet::const_iterator
- i = std::lower_bound(kills.begin(), kills.end(), k);
- return (i != kills.end() && *i == k);
- }
-
- /// addKill - Add a kill instruction index to the specified value
- /// number.
- void addKill(SlotIndex k) {
- if (kills.empty()) {
- kills.push_back(k);
- } else {
- KillSet::iterator
- i = std::lower_bound(kills.begin(), kills.end(), k);
- kills.insert(i, k);
- }
- }
-
- /// Remove the specified kill index from this value's kills list.
- /// Returns true if the value was present, otherwise returns false.
- bool removeKill(SlotIndex k) {
- KillSet::iterator i = std::lower_bound(kills.begin(), kills.end(), k);
- if (i != kills.end() && *i == k) {
- kills.erase(i);
- return true;
- }
- return false;
- }
-
- /// Remove all kills in the range [s, e).
- void removeKills(SlotIndex s, SlotIndex e) {
- KillSet::iterator
- si = std::lower_bound(kills.begin(), kills.end(), s),
- se = std::upper_bound(kills.begin(), kills.end(), e);
-
- kills.erase(si, se);
- }
-
};
/// LiveRange structure - This represents a simple register range in the
@@ -238,7 +189,7 @@ namespace llvm {
}
/// containsRange - Return true if the given range, [S, E), is covered by
- /// this range.
+ /// this range.
bool containsRange(SlotIndex S, SlotIndex E) const {
assert((S < E) && "Backwards interval?");
return (start <= S && S < end) && (start < E && E <= end);
@@ -258,6 +209,8 @@ namespace llvm {
LiveRange(); // DO NOT IMPLEMENT
};
+ template <> struct isPodLike<LiveRange> { static const bool value = true; };
+
raw_ostream& operator<<(raw_ostream& os, const LiveRange &LR);
@@ -283,7 +236,7 @@ namespace llvm {
float weight; // weight of this interval
Ranges ranges; // the ranges in which this register is live
VNInfoList valnos; // value#'s
-
+
struct InstrSlots {
enum {
LOAD = 0,
@@ -328,7 +281,7 @@ namespace llvm {
while (I->end <= Pos) ++I;
return I;
}
-
+
void clear() {
valnos.clear();
ranges.clear();
@@ -352,7 +305,7 @@ namespace llvm {
bool containsOneValue() const { return valnos.size() == 1; }
unsigned getNumValNums() const { return (unsigned)valnos.size(); }
-
+
/// getValNumInfo - Returns pointer to the specified val#.
///
inline VNInfo *getValNumInfo(unsigned ValNo) {
@@ -365,11 +318,9 @@ namespace llvm {
/// getNextValue - Create a new value number and return it. MIIdx specifies
/// the instruction that defines the value number.
VNInfo *getNextValue(SlotIndex def, MachineInstr *CopyMI,
- bool isDefAccurate, BumpPtrAllocator &VNInfoAllocator){
+ bool isDefAccurate, VNInfo::Allocator &VNInfoAllocator) {
VNInfo *VNI =
- static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo),
- alignof<VNInfo>()));
- new (VNI) VNInfo((unsigned)valnos.size(), def, CopyMI);
+ new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def, CopyMI);
VNI->setIsDefAccurate(isDefAccurate);
valnos.push_back(VNI);
return VNI;
@@ -378,26 +329,17 @@ namespace llvm {
/// Create a copy of the given value. The new value will be identical except
/// for the Value number.
VNInfo *createValueCopy(const VNInfo *orig,
- BumpPtrAllocator &VNInfoAllocator) {
+ VNInfo::Allocator &VNInfoAllocator) {
VNInfo *VNI =
- static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo),
- alignof<VNInfo>()));
-
- new (VNI) VNInfo((unsigned)valnos.size(), *orig);
+ new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
valnos.push_back(VNI);
return VNI;
}
- /// addKills - Add a number of kills into the VNInfo kill vector. If this
- /// interval is live at a kill point, then the kill is not added.
- void addKills(VNInfo *VNI, const VNInfo::KillSet &kills) {
- for (unsigned i = 0, e = static_cast<unsigned>(kills.size());
- i != e; ++i) {
- if (!liveBeforeAndAt(kills[i])) {
- VNI->addKill(kills[i]);
- }
- }
- }
+ /// RenumberValues - Renumber all values in order of appearance and remove
+ /// unused values.
+ /// Recalculate phi-kill flags in case any phi-def values were removed.
+ void RenumberValues(LiveIntervals &lis);
/// isOnlyLROfValNo - Return true if the specified live range is the only
/// one defined by the its val#.
@@ -409,7 +351,7 @@ namespace llvm {
}
return true;
}
-
+
/// MergeValueNumberInto - This method is called when two value nubmers
/// are found to be equivalent. This eliminates V1, replacing all
/// LiveRanges with the V1 value number with the V2 value number. This can
@@ -422,14 +364,14 @@ namespace llvm {
/// VNInfoAllocator since it will create a new val#.
void MergeInClobberRanges(LiveIntervals &li_,
const LiveInterval &Clobbers,
- BumpPtrAllocator &VNInfoAllocator);
+ VNInfo::Allocator &VNInfoAllocator);
/// MergeInClobberRange - Same as MergeInClobberRanges except it merge in a
/// single LiveRange only.
void MergeInClobberRange(LiveIntervals &li_,
SlotIndex Start,
SlotIndex End,
- BumpPtrAllocator &VNInfoAllocator);
+ VNInfo::Allocator &VNInfoAllocator);
/// MergeValueInAsValue - Merge all of the live ranges of a specific val#
/// in RHS into this live interval as the specified value number.
@@ -449,8 +391,8 @@ namespace llvm {
/// Copy - Copy the specified live interval. This copies all the fields
/// except for the register of the interval.
void Copy(const LiveInterval &RHS, MachineRegisterInfo *MRI,
- BumpPtrAllocator &VNInfoAllocator);
-
+ VNInfo::Allocator &VNInfoAllocator);
+
bool empty() const { return ranges.empty(); }
/// beginIndex - Return the lowest numbered slot covered by interval.
@@ -477,6 +419,17 @@ namespace llvm {
// range.If it does, then check if the previous live range ends at index-1.
bool liveBeforeAndAt(SlotIndex index) const;
+ /// killedAt - Return true if a live range ends at index. Note that the kill
+ /// point is not contained in the half-open live range. It is usually the
+ /// getDefIndex() slot following its last use.
+ bool killedAt(SlotIndex index) const;
+
+ /// killedInRange - Return true if the interval has kills in [Start,End).
+ /// Note that the kill point is considered the end of a live range, so it is
+ /// not contained in the live range. If a live range ends at End, it won't
+ /// be counted as a kill by this method.
+ bool killedInRange(SlotIndex Start, SlotIndex End) const;
+
/// getLiveRangeContaining - Return the live range that contains the
/// specified index, or null if there is none.
const LiveRange *getLiveRangeContaining(SlotIndex Idx) const {
@@ -491,6 +444,12 @@ namespace llvm {
return I == end() ? 0 : &*I;
}
+ /// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
+ VNInfo *getVNInfoAt(SlotIndex Idx) const {
+ const_iterator I = FindLiveRangeContaining(Idx);
+ return I == end() ? 0 : I->valno;
+ }
+
/// FindLiveRangeContaining - Return an iterator to the live range that
/// contains the specified index, or end() if there is none.
const_iterator FindLiveRangeContaining(SlotIndex Idx) const;
@@ -500,17 +459,19 @@ namespace llvm {
iterator FindLiveRangeContaining(SlotIndex Idx);
/// findDefinedVNInfo - Find the by the specified
- /// index (register interval) or defined
+ /// index (register interval) or defined
VNInfo *findDefinedVNInfoForRegInt(SlotIndex Idx) const;
/// findDefinedVNInfo - Find the VNInfo that's defined by the specified
/// register (stack inteval only).
VNInfo *findDefinedVNInfoForStackInt(unsigned Reg) const;
-
+
/// overlaps - Return true if the intersection of the two live intervals is
/// not empty.
bool overlaps(const LiveInterval& other) const {
+ if (other.empty())
+ return false;
return overlapsFrom(other, other.begin());
}
@@ -556,14 +517,19 @@ namespace llvm {
/// Also remove the value# from value# list.
void removeValNo(VNInfo *ValNo);
- /// scaleNumbering - Renumber VNI and ranges to provide gaps for new
- /// instructions.
- void scaleNumbering(unsigned factor);
-
/// getSize - Returns the sum of sizes of all the LiveRange's.
///
unsigned getSize() const;
+ /// Returns true if the live interval is zero length, i.e. no live ranges
+ /// span instructions. It doesn't pay to spill such an interval.
+ bool isZeroLength() const {
+ for (const_iterator i = begin(), e = end(); i != e; ++i)
+ if (i->end.getPrevIndex() > i->start)
+ return false;
+ return true;
+ }
+
/// isSpillable - Can this interval be spilled?
bool isSpillable() const {
return weight != HUGE_VALF;
@@ -593,6 +559,7 @@ namespace llvm {
Ranges::iterator addRangeFrom(LiveRange LR, Ranges::iterator From);
void extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd);
Ranges::iterator extendIntervalStartTo(Ranges::iterator I, SlotIndex NewStr);
+ void markValNoForDeletion(VNInfo *V);
LiveInterval& operator=(const LiveInterval& rhs); // DO NOT IMPLEMENT
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h b/libclamav/c++/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
index e8856ac..2918c3c 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -42,7 +42,7 @@ namespace llvm {
class TargetInstrInfo;
class TargetRegisterClass;
class VirtRegMap;
-
+
class LiveIntervals : public MachineFunctionPass {
MachineFunction* mf_;
MachineRegisterInfo* mri_;
@@ -55,7 +55,7 @@ namespace llvm {
/// Special pool allocator for VNInfo's (LiveInterval val#).
///
- BumpPtrAllocator VNInfoAllocator;
+ VNInfo::Allocator VNInfoAllocator;
typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap;
Reg2IntervalMap r2iMap_;
@@ -68,7 +68,7 @@ namespace llvm {
public:
static char ID; // Pass identification, replacement for typeid
- LiveIntervals() : MachineFunctionPass(&ID) {}
+ LiveIntervals() : MachineFunctionPass(ID) {}
// Calculate the spill weight to assign to a single instruction.
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth);
@@ -105,13 +105,25 @@ namespace llvm {
return r2iMap_.count(reg);
}
+ /// isAllocatable - is the physical register reg allocatable in the current
+ /// function?
+ bool isAllocatable(unsigned reg) const {
+ return allocatableRegs_.test(reg);
+ }
+
/// getScaledIntervalSize - get the size of an interval in "units,"
/// where every function is composed of one thousand units. This
/// measure scales properly with empty index slots in the function.
double getScaledIntervalSize(LiveInterval& I) {
return (1000.0 * I.getSize()) / indexes_->getIndexesLength();
}
-
+
+ /// getFuncInstructionCount - Return the number of instructions in the
+ /// current function.
+ unsigned getFuncInstructionCount() {
+ return indexes_->getFunctionSize();
+ }
+
/// getApproximateInstructionCount - computes an estimate of the number
/// of instructions in a given LiveInterval.
unsigned getApproximateInstructionCount(LiveInterval& I) {
@@ -127,11 +139,10 @@ namespace llvm {
bool conflictsWithPhysReg(const LiveInterval &li, VirtRegMap &vrm,
unsigned reg);
- /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
- /// it can check use as well.
- bool conflictsWithPhysRegRef(LiveInterval &li, unsigned Reg,
- bool CheckUse,
- SmallPtrSet<MachineInstr*,32> &JoinedCopies);
+ /// conflictsWithAliasRef - Similar to conflictsWithPhysRegRef except
+ /// it checks for alias uses and defs.
+ bool conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
+ SmallPtrSet<MachineInstr*,32> &JoinedCopies);
// Interval creation
LiveInterval &getOrCreateInterval(unsigned reg) {
@@ -144,7 +155,7 @@ namespace llvm {
/// dupInterval - Duplicate a live interval. The caller is responsible for
/// managing the allocated memory.
LiveInterval *dupInterval(LiveInterval *li);
-
+
/// addLiveRangeToEndOfBlock - Given a register and an instruction,
/// adds a live range from that instruction to the end of its MBB.
LiveRange addLiveRangeToEndOfBlock(unsigned reg,
@@ -176,7 +187,7 @@ namespace llvm {
SlotIndex getInstructionIndex(const MachineInstr *instr) const {
return indexes_->getInstructionIndex(instr);
}
-
+
/// Returns the instruction associated with the given index.
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
return indexes_->getInstructionFromIndex(index);
@@ -185,12 +196,32 @@ namespace llvm {
/// Return the first index in the given basic block.
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
return indexes_->getMBBStartIdx(mbb);
- }
+ }
/// Return the last index in the given basic block.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
return indexes_->getMBBEndIdx(mbb);
- }
+ }
+
+ bool isLiveInToMBB(const LiveInterval &li,
+ const MachineBasicBlock *mbb) const {
+ return li.liveAt(getMBBStartIdx(mbb));
+ }
+
+ LiveRange* findEnteringRange(LiveInterval &li,
+ const MachineBasicBlock *mbb) {
+ return li.getLiveRangeContaining(getMBBStartIdx(mbb));
+ }
+
+ bool isLiveOutOfMBB(const LiveInterval &li,
+ const MachineBasicBlock *mbb) const {
+ return li.liveAt(getMBBEndIdx(mbb).getPrevSlot());
+ }
+
+ LiveRange* findExitingRange(LiveInterval &li,
+ const MachineBasicBlock *mbb) {
+ return li.getLiveRangeContaining(getMBBEndIdx(mbb).getPrevSlot());
+ }
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
return indexes_->getMBBFromIndex(index);
@@ -212,6 +243,10 @@ namespace llvm {
indexes_->replaceMachineInstrInMaps(MI, NewMI);
}
+ void InsertMBBInMaps(MachineBasicBlock *MBB) {
+ indexes_->insertMBBInMaps(MBB);
+ }
+
bool findLiveInMBBs(SlotIndex Start, SlotIndex End,
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
return indexes_->findLiveInMBBs(Start, End, MBBs);
@@ -221,11 +256,7 @@ namespace llvm {
indexes_->renumberIndexes();
}
- BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; }
-
- /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
- /// copy field and returns the source register that defines it.
- unsigned getVNInfoSourceReg(const VNInfo *VNI) const;
+ VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual void releaseMemory();
@@ -243,12 +274,6 @@ namespace llvm {
addIntervalsForSpills(const LiveInterval& i,
SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
-
- /// addIntervalsForSpillsFast - Quickly create new intervals for spilled
- /// defs / uses without remat or splitting.
- std::vector<LiveInterval*>
- addIntervalsForSpillsFast(const LiveInterval &li,
- const MachineLoopInfo *loopInfo, VirtRegMap &vrm);
/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
/// around all defs and uses of the specified interval. Return true if it
@@ -277,15 +302,11 @@ namespace llvm {
unsigned getNumConflictsWithPhysReg(const LiveInterval &li,
unsigned PhysReg) const;
- /// processImplicitDefs - Process IMPLICIT_DEF instructions. Add isUndef
- /// marker to implicit_def defs and their uses.
- void processImplicitDefs();
-
/// intervalIsInOneMBB - Returns true if the specified interval is entirely
/// within a single basic block.
bool intervalIsInOneMBB(const LiveInterval &li) const;
- private:
+ private:
/// computeIntervals - Compute live intervals.
void computeIntervals();
@@ -297,6 +318,12 @@ namespace llvm {
SlotIndex MIIdx,
MachineOperand& MO, unsigned MOIdx);
+ /// isPartialRedef - Return true if the specified def at the specific index
+ /// is partially re-defining the specified live interval. A common case of
+ /// this is a definition of the sub-register.
+ bool isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
+ LiveInterval &interval);
+
/// handleVirtualRegisterDef - update intervals for a virtual
/// register def
void handleVirtualRegisterDef(MachineBasicBlock *MBB,
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/LiveStackAnalysis.h b/libclamav/c++/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
index e01d1ae..ad984db 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
@@ -27,7 +27,7 @@ namespace llvm {
class LiveStacks : public MachineFunctionPass {
/// Special pool allocator for VNInfo's (LiveInterval val#).
///
- BumpPtrAllocator VNInfoAllocator;
+ VNInfo::Allocator VNInfoAllocator;
/// S2IMap - Stack slot indices to live interval mapping.
///
@@ -39,7 +39,7 @@ namespace llvm {
public:
static char ID; // Pass identification, replacement for typeid
- LiveStacks() : MachineFunctionPass(&ID) {}
+ LiveStacks() : MachineFunctionPass(ID) {}
typedef SS2IntervalMap::iterator iterator;
typedef SS2IntervalMap::const_iterator const_iterator;
@@ -91,7 +91,7 @@ namespace llvm {
return I->second;
}
- BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; }
+ VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual void releaseMemory();
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/LiveVariables.h b/libclamav/c++/llvm/include/llvm/CodeGen/LiveVariables.h
index fc5ea6f..c8182e0 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/LiveVariables.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/LiveVariables.h
@@ -46,7 +46,7 @@ class TargetRegisterInfo;
class LiveVariables : public MachineFunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- LiveVariables() : MachineFunctionPass(&ID) {}
+ LiveVariables() : MachineFunctionPass(ID) {}
/// VarInfo - This represents the regions where a virtual register is live in
/// the program. We represent this with three different pieces of
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index d92650b..3cfc47a 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -19,9 +19,9 @@
namespace llvm {
+class Pass;
class BasicBlock;
class MachineFunction;
-class MCContext;
class MCSymbol;
class StringRef;
class raw_ostream;
@@ -202,12 +202,9 @@ public:
// Iteration support for live in sets. These sets are kept in sorted
// order by their register number.
- typedef std::vector<unsigned>::iterator livein_iterator;
- typedef std::vector<unsigned>::const_iterator const_livein_iterator;
- livein_iterator livein_begin() { return LiveIns.begin(); }
- const_livein_iterator livein_begin() const { return LiveIns.begin(); }
- livein_iterator livein_end() { return LiveIns.end(); }
- const_livein_iterator livein_end() const { return LiveIns.end(); }
+ typedef std::vector<unsigned>::const_iterator livein_iterator;
+ livein_iterator livein_begin() const { return LiveIns.begin(); }
+ livein_iterator livein_end() const { return LiveIns.end(); }
bool livein_empty() const { return LiveIns.empty(); }
/// getAlignment - Return alignment of the basic block.
@@ -262,6 +259,11 @@ public:
/// machine basic block (i.e., copies all the successors fromMBB and
/// remove all the successors from fromMBB).
void transferSuccessors(MachineBasicBlock *fromMBB);
+
+ /// transferSuccessorsAndUpdatePHIs - Transfers all the successors, as
+ /// in transferSuccessors, and update PHI operands in the successor blocks
+ /// which refer to fromMBB to refer to this.
+ void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB);
/// isSuccessor - Return true if the specified MBB is a successor of this
/// block.
@@ -280,11 +282,26 @@ public:
/// branch to do so (e.g., a table jump). True is a conservative answer.
bool canFallThrough();
+ /// Returns a pointer to the first instructon in this block that is not a
+ /// PHINode instruction. When adding instruction to the beginning of the
+ /// basic block, they should be added before the returned value, not before
+ /// the first instruction, which might be PHI.
+ /// Returns end() is there's no non-PHI instruction.
+ iterator getFirstNonPHI();
+
/// getFirstTerminator - returns an iterator to the first terminator
/// instruction of this basic block. If a terminator does not exist,
/// it returns end()
iterator getFirstTerminator();
+ /// SplitCriticalEdge - Split the critical edge from this block to the
+ /// given successor block, and return the newly created block, or null
+ /// if splitting is not possible.
+ ///
+ /// This function updates LiveVariables, MachineDominatorTree, and
+ /// MachineLoopInfo, as applicable.
+ MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P);
+
void pop_front() { Insts.pop_front(); }
void pop_back() { Insts.pop_back(); }
void push_back(MachineInstr *MI) { Insts.push_back(MI); }
@@ -352,7 +369,7 @@ public:
/// getSymbol - Return the MCSymbol for this basic block.
///
- MCSymbol *getSymbol(MCContext &Ctx) const;
+ MCSymbol *getSymbol() const;
private: // Methods used to maintain doubly linked list of blocks...
friend struct ilist_traits<MachineBasicBlock>;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineCodeEmitter.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineCodeEmitter.h
index 48b4082..7abb49a 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineCodeEmitter.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineCodeEmitter.h
@@ -31,6 +31,7 @@ class MachineRelocation;
class Value;
class GlobalValue;
class Function;
+class MCSymbol;
/// MachineCodeEmitter - This class defines two sorts of methods: those for
/// emitting the actual bytes of machine code, and those for emitting auxillary
@@ -247,7 +248,7 @@ public:
virtual void processDebugLoc(DebugLoc DL, bool BeforePrintintInsn) {}
/// emitLabel - Emits a label
- virtual void emitLabel(uint64_t LabelID) = 0;
+ virtual void emitLabel(MCSymbol *Label) = 0;
/// allocateSpace - Allocate a block of space in the current output buffer,
/// returning null (and setting conditions to indicate buffer overflow) on
@@ -316,10 +317,10 @@ public:
///
virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const= 0;
- /// getLabelAddress - Return the address of the specified LabelID, only usable
+ /// getLabelAddress - Return the address of the specified Label, only usable
/// after the LabelID has been emitted.
///
- virtual uintptr_t getLabelAddress(uint64_t LabelID) const = 0;
+ virtual uintptr_t getLabelAddress(MCSymbol *Label) const = 0;
/// Specifies the MachineModuleInfo object. This is used for exception handling
/// purposes.
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineConstantPool.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineConstantPool.h
index e6698a5..498f815 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineConstantPool.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineConstantPool.h
@@ -74,7 +74,7 @@ class MachineConstantPoolEntry {
public:
/// The constant itself.
union {
- Constant *ConstVal;
+ const Constant *ConstVal;
MachineConstantPoolValue *MachineCPVal;
} Val;
@@ -82,7 +82,7 @@ public:
/// a MachineConstantPoolValue.
unsigned Alignment;
- MachineConstantPoolEntry(Constant *V, unsigned A)
+ MachineConstantPoolEntry(const Constant *V, unsigned A)
: Alignment(A) {
Val.ConstVal = V;
}
@@ -143,7 +143,7 @@ public:
/// getConstantPoolIndex - Create a new entry in the constant pool or return
/// an existing one. User must specify the minimum required alignment for
/// the object.
- unsigned getConstantPoolIndex(Constant *C, unsigned Alignment);
+ unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment);
unsigned getConstantPoolIndex(MachineConstantPoolValue *V,unsigned Alignment);
/// isEmpty - Return true if this constant pool contains no constants.
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineDominators.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineDominators.h
index 086528a..48695d5 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineDominators.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineDominators.h
@@ -156,8 +156,13 @@ public:
inline void splitBlock(MachineBasicBlock* NewBB) {
DT->splitBlock(NewBB);
}
-
-
+
+ /// isReachableFromEntry - Return true if A is dominated by the entry
+ /// block of the function containing it.
+ bool isReachableFromEntry(MachineBasicBlock *A) {
+ return DT->isReachableFromEntry(A);
+ }
+
virtual void releaseMemory();
virtual void print(raw_ostream &OS, const Module*) const;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index 043e97f..dca65ef 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -14,12 +14,10 @@
#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H
#define LLVM_CODEGEN_MACHINEFRAMEINFO_H
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+//#include "llvm/ADT/IndexedMap.h"
#include "llvm/System/DataTypes.h"
#include <cassert>
-#include <limits>
#include <vector>
namespace llvm {
@@ -27,30 +25,23 @@ class raw_ostream;
class TargetData;
class TargetRegisterClass;
class Type;
-class MachineModuleInfo;
class MachineFunction;
class MachineBasicBlock;
class TargetFrameInfo;
+class BitVector;
/// The CalleeSavedInfo class tracks the information need to locate where a
-/// callee saved register in the current frame.
+/// callee saved register is in the current frame.
class CalleeSavedInfo {
-
-private:
unsigned Reg;
- const TargetRegisterClass *RegClass;
int FrameIdx;
-
+
public:
- CalleeSavedInfo(unsigned R, const TargetRegisterClass *RC, int FI = 0)
- : Reg(R)
- , RegClass(RC)
- , FrameIdx(FI)
- {}
-
+ explicit CalleeSavedInfo(unsigned R, int FI = 0)
+ : Reg(R), FrameIdx(FI) {}
+
// Accessors.
unsigned getReg() const { return Reg; }
- const TargetRegisterClass *getRegClass() const { return RegClass; }
int getFrameIdx() const { return FrameIdx; }
void setFrameIdx(int FI) { FrameIdx = FI; }
};
@@ -91,7 +82,7 @@ class MachineFrameInfo {
// SPOffset - The offset of this object from the stack pointer on entry to
// the function. This field has no meaning for a variable sized element.
int64_t SPOffset;
-
+
// The size of this object on the stack. 0 means a variable sized object,
// ~0ULL means a dead object.
uint64_t Size;
@@ -104,14 +95,23 @@ class MachineFrameInfo {
// default, fixed objects are immutable unless marked otherwise.
bool isImmutable;
- // isSpillSlot - If true, the stack object is used as spill slot. It
+ // isSpillSlot - If true the stack object is used as spill slot. It
// cannot alias any other memory objects.
bool isSpillSlot;
+ // MayNeedSP - If true the stack object triggered the creation of the stack
+ // protector. We should allocate this object right after the stack
+ // protector.
+ bool MayNeedSP;
+
+ // PreAllocated - If true, the object was mapped into the local frame
+ // block and doesn't need additional handling for allocation beyond that.
+ bool PreAllocated;
+
StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
- bool isSS)
+ bool isSS, bool NSP)
: SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
- isSpillSlot(isSS) {}
+ isSpillSlot(isSS), MayNeedSP(NSP), PreAllocated(false) {}
};
/// Objects - The list of stack objects allocated...
@@ -133,13 +133,17 @@ class MachineFrameInfo {
/// to builtin \@llvm.frameaddress.
bool FrameAddressTaken;
+ /// ReturnAddressTaken - This boolean keeps track of whether there is a call
+ /// to builtin \@llvm.returnaddress.
+ bool ReturnAddressTaken;
+
/// StackSize - The prolog/epilog code inserter calculates the final stack
/// offsets for all of the fixed size objects, updating the Objects list
/// above. It then updates StackSize to contain the number of bytes that need
/// to be allocated on entry to the function.
///
uint64_t StackSize;
-
+
/// OffsetAdjustment - The amount that a frame offset needs to be adjusted to
/// have the actual offset from the stack/frame pointer. The exact usage of
/// this is target-dependent, but it is typically used to adjust between
@@ -150,18 +154,22 @@ class MachineFrameInfo {
/// TargetRegisterInfo::getFrameIndexOffset); when generating code, the
/// corresponding adjustments are performed directly.
int OffsetAdjustment;
-
- /// MaxAlignment - The prolog/epilog code inserter may process objects
+
+ /// MaxAlignment - The prolog/epilog code inserter may process objects
/// that require greater alignment than the default alignment the target
- /// provides. To handle this, MaxAlignment is set to the maximum alignment
+ /// provides. To handle this, MaxAlignment is set to the maximum alignment
/// needed by the objects on the current frame. If this is greater than the
/// native alignment maintained by the compiler, dynamic alignment code will
/// be needed.
///
unsigned MaxAlignment;
- /// HasCalls - Set to true if this function has any function calls. This is
- /// only valid during and after prolog/epilog code insertion.
+ /// AdjustsStack - Set to true if this function adjusts the stack -- e.g.,
+ /// when calling another function. This is only valid during and after
+ /// prolog/epilog code insertion.
+ bool AdjustsStack;
+
+ /// HasCalls - Set to true if this function has any function calls.
bool HasCalls;
/// StackProtectorIdx - The frame index for the stack protector.
@@ -174,7 +182,7 @@ class MachineFrameInfo {
/// insertion.
///
unsigned MaxCallFrameSize;
-
+
/// CSInfo - The prolog/epilog code inserter fills in this vector with each
/// callee saved register saved in the frame. Beyond its use by the prolog/
/// epilog code inserter, this data used for debug info and exception
@@ -188,27 +196,40 @@ class MachineFrameInfo {
/// spill slots.
SmallVector<bool, 8> SpillObjects;
- /// MMI - This field is set (via setMachineModuleInfo) by a module info
- /// consumer (ex. DwarfWriter) to indicate that frame layout information
- /// should be acquired. Typically, it's the responsibility of the target's
- /// TargetRegisterInfo prologue/epilogue emitting code to inform
- /// MachineModuleInfo of frame layouts.
- MachineModuleInfo *MMI;
-
/// TargetFrameInfo - Target information about frame layout.
///
const TargetFrameInfo &TFI;
+ /// LocalFrameObjects - References to frame indices which are mapped
+ /// into the local frame allocation block. <FrameIdx, LocalOffset>
+ SmallVector<std::pair<int, int64_t>, 32> LocalFrameObjects;
+
+ /// LocalFrameSize - Size of the pre-allocated local frame block.
+ int64_t LocalFrameSize;
+
+ /// Required alignment of the local object blob, which is the strictest
+ /// alignment of any object in it.
+ unsigned LocalFrameMaxAlign;
+
+ /// Whether the local object blob needs to be allocated together. If not,
+ /// PEI should ignore the isPreAllocated flags on the stack objects and
+ /// just allocate them normally.
+ bool UseLocalStackAllocationBlock;
+
public:
- explicit MachineFrameInfo(const TargetFrameInfo &tfi) : TFI(tfi) {
+ explicit MachineFrameInfo(const TargetFrameInfo &tfi) : TFI(tfi) {
StackSize = NumFixedObjects = OffsetAdjustment = MaxAlignment = 0;
HasVarSizedObjects = false;
FrameAddressTaken = false;
+ ReturnAddressTaken = false;
+ AdjustsStack = false;
HasCalls = false;
StackProtectorIdx = -1;
MaxCallFrameSize = 0;
CSIValid = false;
- MMI = 0;
+ LocalFrameSize = 0;
+ LocalFrameMaxAlign = 0;
+ UseLocalStackAllocationBlock = false;
}
/// hasStackObjects - Return true if there are any stack objects in this
@@ -234,6 +255,12 @@ public:
bool isFrameAddressTaken() const { return FrameAddressTaken; }
void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; }
+ /// isReturnAddressTaken - This method may be called any time after
+ /// instruction selection is complete to determine if there is a call to
+ /// \@llvm.returnaddress in this function.
+ bool isReturnAddressTaken() const { return ReturnAddressTaken; }
+ void setReturnAddressIsTaken(bool s) { ReturnAddressTaken = s; }
+
/// getObjectIndexBegin - Return the minimum frame object index.
///
int getObjectIndexBegin() const { return -NumFixedObjects; }
@@ -242,13 +269,64 @@ public:
///
int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; }
- /// getNumFixedObjects() - Return the number of fixed objects.
+ /// getNumFixedObjects - Return the number of fixed objects.
unsigned getNumFixedObjects() const { return NumFixedObjects; }
- /// getNumObjects() - Return the number of objects.
+ /// getNumObjects - Return the number of objects.
///
unsigned getNumObjects() const { return Objects.size(); }
+ /// mapLocalFrameObject - Map a frame index into the local object block
+ void mapLocalFrameObject(int ObjectIndex, int64_t Offset) {
+ LocalFrameObjects.push_back(std::pair<int, int64_t>(ObjectIndex, Offset));
+ Objects[ObjectIndex + NumFixedObjects].PreAllocated = true;
+ }
+
+ /// getLocalFrameObjectMap - Get the local offset mapping for a for an object
+ std::pair<int, int64_t> getLocalFrameObjectMap(int i) {
+ assert (i >= 0 && (unsigned)i < LocalFrameObjects.size() &&
+ "Invalid local object reference!");
+ return LocalFrameObjects[i];
+ }
+
+ /// getLocalFrameObjectCount - Return the number of objects allocated into
+ /// the local object block.
+ int64_t getLocalFrameObjectCount() { return LocalFrameObjects.size(); }
+
+ /// setLocalFrameSize - Set the size of the local object blob.
+ void setLocalFrameSize(int64_t sz) { LocalFrameSize = sz; }
+
+ /// getLocalFrameSize - Get the size of the local object blob.
+ int64_t getLocalFrameSize() const { return LocalFrameSize; }
+
+ /// setLocalFrameMaxAlign - Required alignment of the local object blob,
+ /// which is the strictest alignment of any object in it.
+ void setLocalFrameMaxAlign(unsigned Align) { LocalFrameMaxAlign = Align; }
+
+ /// getLocalFrameMaxAlign - Return the required alignment of the local
+ /// object blob.
+ unsigned getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }
+
+ /// getUseLocalStackAllocationBlock - Get whether the local allocation blob
+ /// should be allocated together or let PEI allocate the locals in it
+ /// directly.
+ bool getUseLocalStackAllocationBlock() {return UseLocalStackAllocationBlock;}
+
+ /// setUseLocalStackAllocationBlock - Set whether the local allocation blob
+ /// should be allocated together or let PEI allocate the locals in it
+ /// directly.
+ void setUseLocalStackAllocationBlock(bool v) {
+ UseLocalStackAllocationBlock = v;
+ }
+
+ /// isObjectPreAllocated - Return true if the object was pre-allocated into
+ /// the local block.
+ bool isObjectPreAllocated(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].PreAllocated;
+ }
+
/// getObjectSize - Return the size of the specified object.
///
int64_t getObjectSize(int ObjectIdx) const {
@@ -279,6 +357,14 @@ public:
MaxAlignment = std::max(MaxAlignment, Align);
}
+ /// NeedsStackProtector - Returns true if the object may need stack
+ /// protectors.
+ bool MayNeedStackProtector(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].MayNeedSP;
+ }
+
/// getObjectOffset - Return the assigned stack offset of the specified object
/// from the incoming stack pointer.
///
@@ -310,28 +396,32 @@ public:
/// setStackSize - Set the size of the stack...
///
void setStackSize(uint64_t Size) { StackSize = Size; }
-
+
/// getOffsetAdjustment - Return the correction for frame offsets.
///
int getOffsetAdjustment() const { return OffsetAdjustment; }
-
+
/// setOffsetAdjustment - Set the correction for frame offsets.
///
void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; }
- /// getMaxAlignment - Return the alignment in bytes that this function must be
- /// aligned to, which is greater than the default stack alignment provided by
+ /// getMaxAlignment - Return the alignment in bytes that this function must be
+ /// aligned to, which is greater than the default stack alignment provided by
/// the target.
///
unsigned getMaxAlignment() const { return MaxAlignment; }
-
+
/// setMaxAlignment - Set the preferred alignment.
///
void setMaxAlignment(unsigned Align) { MaxAlignment = Align; }
- /// hasCalls - Return true if the current function has no function calls.
- /// This is only valid during or after prolog/epilog code emission.
- ///
+ /// AdjustsStack - Return true if this function adjusts the stack -- e.g.,
+ /// when calling another function. This is only valid during and after
+ /// prolog/epilog code insertion.
+ bool adjustsStack() const { return AdjustsStack; }
+ void setAdjustsStack(bool V) { AdjustsStack = V; }
+
+ /// hasCalls - Return true if the current function has any function calls.
bool hasCalls() const { return HasCalls; }
void setHasCalls(bool V) { HasCalls = V; }
@@ -348,10 +438,9 @@ public:
/// efficiency. By default, fixed objects are immutable. This returns an
/// index with a negative value.
///
- int CreateFixedObject(uint64_t Size, int64_t SPOffset,
- bool Immutable, bool isSS);
-
-
+ int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable);
+
+
/// isFixedObjectIndex - Returns true if the specified index corresponds to a
/// fixed stack object.
bool isFixedObjectIndex(int ObjectIdx) const {
@@ -382,25 +471,26 @@ public:
return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL;
}
- /// CreateStackObject - Create a new statically sized stack object,
- /// returning a nonnegative identifier to represent it.
+ /// CreateStackObject - Create a new statically sized stack object, returning
+ /// a nonnegative identifier to represent it.
///
- int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS) {
+ int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS,
+ bool MayNeedSP = false) {
assert(Size != 0 && "Cannot allocate zero size stack objects!");
- Objects.push_back(StackObject(Size, Alignment, 0, false, isSS));
- int Index = (int)Objects.size()-NumFixedObjects-1;
+ Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP));
+ int Index = (int)Objects.size() - NumFixedObjects - 1;
assert(Index >= 0 && "Bad frame index!");
MaxAlignment = std::max(MaxAlignment, Alignment);
return Index;
}
- /// CreateSpillStackObject - Create a new statically sized stack
- /// object that represents a spill slot, returning a nonnegative
- /// identifier to represent it.
+ /// CreateSpillStackObject - Create a new statically sized stack object that
+ /// represents a spill slot, returning a nonnegative identifier to represent
+ /// it.
///
int CreateSpillStackObject(uint64_t Size, unsigned Alignment) {
- CreateStackObject(Size, Alignment, true);
- int Index = (int)Objects.size()-NumFixedObjects-1;
+ CreateStackObject(Size, Alignment, true, false);
+ int Index = (int)Objects.size() - NumFixedObjects - 1;
MaxAlignment = std::max(MaxAlignment, Alignment);
return Index;
}
@@ -417,9 +507,10 @@ public:
/// variable sized object is created, whether or not the index returned is
/// actually used.
///
- int CreateVariableSizedObject() {
+ int CreateVariableSizedObject(unsigned Alignment) {
HasVarSizedObjects = true;
- Objects.push_back(StackObject(0, 1, 0, false, false));
+ Objects.push_back(StackObject(0, Alignment, 0, false, false, true));
+ MaxAlignment = std::max(MaxAlignment, Alignment);
return (int)Objects.size()-NumFixedObjects-1;
}
@@ -431,7 +522,7 @@ public:
/// setCalleeSavedInfo - Used by prolog/epilog inserter to set the function's
/// callee saved information.
- void setCalleeSavedInfo(const std::vector<CalleeSavedInfo> &CSI) {
+ void setCalleeSavedInfo(const std::vector<CalleeSavedInfo> &CSI) {
CSInfo = CSI;
}
@@ -451,16 +542,8 @@ public:
/// method always returns an empty set.
BitVector getPristineRegs(const MachineBasicBlock *MBB) const;
- /// getMachineModuleInfo - Used by a prologue/epilogue
- /// emitter (TargetRegisterInfo) to provide frame layout information.
- MachineModuleInfo *getMachineModuleInfo() const { return MMI; }
-
- /// setMachineModuleInfo - Used by a meta info consumer (DwarfWriter) to
- /// indicate that frame layout information should be gathered.
- void setMachineModuleInfo(MachineModuleInfo *mmi) { MMI = mmi; }
-
/// print - Used by the MachineFunction printer to print information about
- /// stack objects. Implemented in MachineFunction.cpp
+ /// stack objects. Implemented in MachineFunction.cpp
///
void print(const MachineFunction &MF, raw_ostream &OS) const;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunction.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunction.h
index 3c5b466..5bb453d 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -26,13 +26,14 @@
namespace llvm {
-class DILocation;
class Value;
class Function;
class MachineRegisterInfo;
class MachineFrameInfo;
class MachineConstantPool;
class MachineJumpTableInfo;
+class MachineModuleInfo;
+class MCContext;
class Pass;
class TargetMachine;
class TargetRegisterClass;
@@ -69,9 +70,11 @@ struct MachineFunctionInfo {
};
class MachineFunction {
- Function *Fn;
+ const Function *Fn;
const TargetMachine &Target;
-
+ MCContext &Ctx;
+ MachineModuleInfo &MMI;
+
// RegInfo - Information about each register in use in the function.
MachineRegisterInfo *RegInfo;
@@ -106,31 +109,32 @@ class MachineFunction {
typedef ilist<MachineBasicBlock> BasicBlockListType;
BasicBlockListType BasicBlocks;
- // Default debug location. Used to print out the debug label at the beginning
- // of a function.
- DebugLoc DefaultDebugLoc;
-
- // Tracks debug locations.
- DebugLocTracker DebugLocInfo;
-
/// FunctionNumber - This provides a unique ID for each function emitted in
/// this translation unit.
///
unsigned FunctionNumber;
- // The alignment of the function.
+ /// Alignment - The alignment of the function.
unsigned Alignment;
- MachineFunction(const MachineFunction &); // intentionally unimplemented
- void operator=(const MachineFunction&); // intentionally unimplemented
+ /// CallsSetJmp - True if the function calls setjmp or sigsetjmp. This is used
+ /// to limit optimizations which cannot reason about the control flow of
+ /// setjmp.
+ bool CallsSetJmp;
+ MachineFunction(const MachineFunction &); // DO NOT IMPLEMENT
+ void operator=(const MachineFunction&); // DO NOT IMPLEMENT
public:
- MachineFunction(Function *Fn, const TargetMachine &TM, unsigned FunctionNum);
+ MachineFunction(const Function *Fn, const TargetMachine &TM,
+ unsigned FunctionNum, MachineModuleInfo &MMI);
~MachineFunction();
+ MachineModuleInfo &getMMI() const { return MMI; }
+ MCContext &getContext() const { return Ctx; }
+
/// getFunction - Return the LLVM function that this machine code represents
///
- Function *getFunction() const { return Fn; }
+ const Function *getFunction() const { return Fn; }
/// getFunctionNumber - Return a unique ID for the current function.
///
@@ -182,6 +186,17 @@ public:
void EnsureAlignment(unsigned A) {
if (Alignment < A) Alignment = A;
}
+
+ /// callsSetJmp - Returns true if the function calls setjmp or sigsetjmp.
+ bool callsSetJmp() const {
+ return CallsSetJmp;
+ }
+
+ /// setCallsSetJmp - Set a flag that indicates if there's a call to setjmp or
+ /// sigsetjmp.
+ void setCallsSetJmp(bool B) {
+ CallsSetJmp = B;
+ }
/// getInfo - Keep track of various per-function pieces of information for
/// backends that would like to do so.
@@ -251,7 +266,7 @@ public:
/// verify - Run the current MachineFunction through the machine code
/// verifier, useful for debugger use.
- void verify(Pass *p=NULL, bool allowDoubleDefs=false) const;
+ void verify(Pass *p=NULL) const;
// Provide accessors for the MachineBasicBlock list...
typedef BasicBlockListType::iterator iterator;
@@ -391,25 +406,6 @@ public:
/// normal 'L' label is returned.
MCSymbol *getJTISymbol(unsigned JTI, MCContext &Ctx,
bool isLinkerPrivate = false) const;
-
-
- //===--------------------------------------------------------------------===//
- // Debug location.
- //
-
- /// getDILocation - Get the DILocation for a given DebugLoc object.
- DILocation getDILocation(DebugLoc DL) const;
-
- /// getDefaultDebugLoc - Get the default debug location for the machine
- /// function.
- DebugLoc getDefaultDebugLoc() const { return DefaultDebugLoc; }
-
- /// setDefaultDebugLoc - Get the default debug location for the machine
- /// function.
- void setDefaultDebugLoc(DebugLoc DL) { DefaultDebugLoc = DL; }
-
- /// getDebugLocInfo - Get the debug info location tracker.
- DebugLocTracker &getDebugLocInfo() { return DebugLocInfo; }
};
//===--------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
index ee2c6dd..75dbaab 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
@@ -39,7 +39,7 @@ public:
CodeGenOpt::Level getOptLevel() const { return OptLevel; }
private:
- virtual bool doInitialization(Module &) { NextFnNum = 1; return false; }
+ virtual bool doInitialization(Module &M);
virtual bool runOnFunction(Function &F);
virtual void releaseMemory();
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunctionPass.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunctionPass.h
index bac1103..b7bf0a3 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunctionPass.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineFunctionPass.h
@@ -31,8 +31,7 @@ class MachineFunction;
/// override runOnMachineFunction.
class MachineFunctionPass : public FunctionPass {
protected:
- explicit MachineFunctionPass(intptr_t ID) : FunctionPass(ID) {}
- explicit MachineFunctionPass(void *ID) : FunctionPass(ID) {}
+ explicit MachineFunctionPass(char &ID) : FunctionPass(ID) {}
/// runOnMachineFunction - This method must be overloaded to perform the
/// desired machine code transformation or analysis.
@@ -48,7 +47,11 @@ protected:
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
private:
- bool runOnFunction(Function &F);
+ /// createPrinterPass - Get a machine function printer pass.
+ virtual Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const;
+
+ virtual bool runOnFunction(Function &F);
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineInstr.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineInstr.h
index d84f882..f843196 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -16,17 +16,19 @@
#ifndef LLVM_CODEGEN_MACHINEINSTR_H
#define LLVM_CODEGEN_MACHINEINSTR_H
-#include "llvm/ADT/ilist.h"
-#include "llvm/ADT/ilist_node.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/Target/TargetInstrDesc.h"
#include "llvm/Target/TargetOpcodes.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/DebugLoc.h"
#include <vector>
namespace llvm {
+template <typename T> class SmallVectorImpl;
class AliasAnalysis;
class TargetInstrDesc;
class TargetInstrInfo;
@@ -90,15 +92,14 @@ private:
// over time, the non-DebugLoc versions should be phased out and eventually
// removed.
- /// MachineInstr ctor - This constructor create a MachineInstr and add the
- /// implicit operands. It reserves space for number of operands specified by
- /// TargetInstrDesc. The version with a DebugLoc should be preferred.
+ /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
+ /// implicit operands. It reserves space for the number of operands specified
+ /// by the TargetInstrDesc. The version with a DebugLoc should be preferred.
explicit MachineInstr(const TargetInstrDesc &TID, bool NoImp = false);
/// MachineInstr ctor - Work exactly the same as the ctor above, except that
/// the MachineInstr is created and added to the end of the specified basic
/// block. The version with a DebugLoc should be preferred.
- ///
MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &TID);
/// MachineInstr ctor - This constructor create a MachineInstr and add the
@@ -110,7 +111,6 @@ private:
/// MachineInstr ctor - Work exactly the same as the ctor above, except that
/// the MachineInstr is created and added to the end of the specified basic
/// block.
- ///
MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
const TargetInstrDesc &TID);
@@ -201,12 +201,14 @@ public:
/// isLabel - Returns true if the MachineInstr represents a label.
///
bool isLabel() const {
- return getOpcode() == TargetOpcode::DBG_LABEL ||
+ return getOpcode() == TargetOpcode::PROLOG_LABEL ||
getOpcode() == TargetOpcode::EH_LABEL ||
getOpcode() == TargetOpcode::GC_LABEL;
}
- bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
+ bool isPrologLabel() const {
+ return getOpcode() == TargetOpcode::PROLOG_LABEL;
+ }
bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
@@ -215,23 +217,54 @@ public:
bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
- bool isExtractSubreg() const {
- return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
- }
bool isInsertSubreg() const {
return getOpcode() == TargetOpcode::INSERT_SUBREG;
}
bool isSubregToReg() const {
return getOpcode() == TargetOpcode::SUBREG_TO_REG;
}
-
+ bool isRegSequence() const {
+ return getOpcode() == TargetOpcode::REG_SEQUENCE;
+ }
+ bool isCopy() const {
+ return getOpcode() == TargetOpcode::COPY;
+ }
+
+ /// isCopyLike - Return true if the instruction behaves like a copy.
+ /// This does not include native copy instructions.
+ bool isCopyLike() const {
+ return isCopy() || isSubregToReg();
+ }
+
+ /// isIdentityCopy - Return true is the instruction is an identity copy.
+ bool isIdentityCopy() const {
+ return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
+ getOperand(0).getSubReg() == getOperand(1).getSubReg();
+ }
+
/// readsRegister - Return true if the MachineInstr reads the specified
/// register. If TargetRegisterInfo is passed, then it also checks if there
/// is a read of a super-register.
+ /// This does not count partial redefines of virtual registers as reads:
+ /// %reg1024:6 = OP.
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI = NULL) const {
return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
}
+ /// readsVirtualRegister - Return true if the MachineInstr reads the specified
+ /// virtual register. Take into account that a partial define is a
+ /// read-modify-write operation.
+ bool readsVirtualRegister(unsigned Reg) const {
+ return readsWritesVirtualRegister(Reg).first;
+ }
+
+ /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
+ /// indicating if this instruction reads or writes Reg. This also considers
+ /// partial defines.
+ /// If Ops is not null, all operand indices for Reg are added.
+ std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
+ SmallVectorImpl<unsigned> *Ops = 0) const;
+
/// killsRegister - Return true if the MachineInstr kills the specified
/// register. If TargetRegisterInfo is passed, then it also checks if there is
/// a kill of a super-register.
@@ -239,12 +272,19 @@ public:
return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
}
- /// modifiesRegister - Return true if the MachineInstr modifies the
+ /// definesRegister - Return true if the MachineInstr fully defines the
/// specified register. If TargetRegisterInfo is passed, then it also checks
/// if there is a def of a super-register.
- bool modifiesRegister(unsigned Reg,
- const TargetRegisterInfo *TRI = NULL) const {
- return findRegisterDefOperandIdx(Reg, false, TRI) != -1;
+ /// NOTE: It's ignoring subreg indices on virtual registers.
+ bool definesRegister(unsigned Reg, const TargetRegisterInfo *TRI=NULL) const {
+ return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
+ }
+
+ /// modifiesRegister - Return true if the MachineInstr modifies (fully define
+ /// or partially define) the specified register.
+ /// NOTE: It's ignoring subreg indices on virtual registers.
+ bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const {
+ return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
}
/// registerDefIsDead - Returns true if the register is dead in this machine
@@ -252,7 +292,7 @@ public:
/// if there is a dead def of a super-register.
bool registerDefIsDead(unsigned Reg,
const TargetRegisterInfo *TRI = NULL) const {
- return findRegisterDefOperandIdx(Reg, true, TRI) != -1;
+ return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
}
/// findRegisterUseOperandIdx() - Returns the operand index that is a use of
@@ -271,16 +311,18 @@ public:
/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
/// the specified register or -1 if it is not found. If isDead is true, defs
- /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
- /// also checks if there is a def of a super-register.
- int findRegisterDefOperandIdx(unsigned Reg, bool isDead = false,
+ /// that are not dead are skipped. If Overlap is true, then it also looks for
+ /// defs that merely overlap the specified register. If TargetRegisterInfo is
+ /// non-null, then it also checks if there is a def of a super-register.
+ int findRegisterDefOperandIdx(unsigned Reg,
+ bool isDead = false, bool Overlap = false,
const TargetRegisterInfo *TRI = NULL) const;
/// findRegisterDefOperand - Wrapper for findRegisterDefOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
const TargetRegisterInfo *TRI = NULL) {
- int Idx = findRegisterDefOperandIdx(Reg, isDead, TRI);
+ int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI);
return (Idx == -1) ? NULL : &getOperand(Idx);
}
@@ -300,6 +342,10 @@ public:
/// reference if DefOpIdx is not null.
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const;
+ /// clearKillInfo - Clears kill flags on all operands.
+ ///
+ void clearKillInfo();
+
/// copyKillDeadInfo - Copies kill / dead operand properties from MI.
///
void copyKillDeadInfo(const MachineInstr *MI);
@@ -307,6 +353,11 @@ public:
/// copyPredicates - Copies predicate operand(s) from MI.
void copyPredicates(const MachineInstr *MI);
+ /// substituteRegister - Replace all occurrences of FromReg with ToReg:SubIdx,
+ /// properly composing subreg indices where necessary.
+ void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
+ const TargetRegisterInfo &RegInfo);
+
/// addRegisterKilled - We have determined MI kills a register. Look for the
/// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
/// add a implicit operand if it's not found. Returns true if the operand
@@ -325,7 +376,12 @@ public:
/// addRegisterDefined - We have determined MI defines a register. Make sure
/// there is an operand defining Reg.
void addRegisterDefined(unsigned IncomingReg,
- const TargetRegisterInfo *RegInfo);
+ const TargetRegisterInfo *RegInfo = 0);
+
+ /// setPhysRegsDeadExcept - Mark every physreg used by this instruction as dead
+ /// except those in the UsedRegs list.
+ void setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
+ const TargetRegisterInfo &TRI);
/// isSafeToMove - Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
@@ -356,6 +412,10 @@ public:
/// return 0.
unsigned isConstantValuePHI() const;
+ /// allDefsAreDead - Return true if all the defs of this instruction are dead.
+ ///
+ bool allDefsAreDead() const;
+
//
// Debugging support
//
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index 47f7cf7..37ac24c 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -104,7 +104,7 @@ public:
return *this;
}
- const MachineInstrBuilder &addGlobalAddress(GlobalValue *GV,
+ const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
int64_t Offset = 0,
unsigned char TargetFlags = 0) const {
MI->addOperand(MachineOperand::CreateGA(GV, Offset, TargetFlags));
@@ -131,6 +131,11 @@ public:
MI->addOperand(MachineOperand::CreateMetadata(MD));
return *this;
}
+
+ const MachineInstrBuilder &addSym(MCSymbol *Sym) const {
+ MI->addOperand(MachineOperand::CreateMCSymbol(Sym));
+ return *this;
+ }
};
/// BuildMI - Builder interface. Specify how to create the initial instruction
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
index b92ed7b..6264349 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -61,7 +61,11 @@ public:
/// .set L4_5_set_123, LBB123 - LJTI1_2
/// .word L4_5_set_123
EK_LabelDifference32,
-
+
+ /// EK_Inline - Jump table entries are emitted inline at their point of
+ /// use. It is the responsibility of the target to emit the entries.
+ EK_Inline,
+
/// EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the
/// TargetLowering::LowerCustomJumpTableEntry hook.
EK_Custom32
@@ -70,7 +74,7 @@ private:
JTEntryKind EntryKind;
std::vector<MachineJumpTableEntry> JumpTables;
public:
- MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
+ explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
JTEntryKind getEntryKind() const { return EntryKind; }
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineLocation.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineLocation.h
index 2db4e55..a1fcb9f 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineLocation.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineLocation.h
@@ -22,13 +22,13 @@
#define LLVM_CODEGEN_MACHINELOCATION_H
namespace llvm {
-
+ class MCSymbol;
+
class MachineLocation {
private:
bool IsRegister; // True if location is a register.
unsigned Register; // gcc/gdb register number.
int Offset; // Displacement if not register.
-
public:
enum {
// The target register number for an abstract frame pointer. The value is
@@ -36,20 +36,11 @@ public:
VirtualFP = ~0U
};
MachineLocation()
- : IsRegister(false)
- , Register(0)
- , Offset(0)
- {}
+ : IsRegister(false), Register(0), Offset(0) {}
explicit MachineLocation(unsigned R)
- : IsRegister(true)
- , Register(R)
- , Offset(0)
- {}
+ : IsRegister(true), Register(R), Offset(0) {}
MachineLocation(unsigned R, int O)
- : IsRegister(false)
- , Register(R)
- , Offset(O)
- {}
+ : IsRegister(false), Register(R), Offset(O) {}
// Accessors
bool isReg() const { return IsRegister; }
@@ -74,29 +65,25 @@ public:
#endif
};
+/// MachineMove - This class represents the save or restore of a callee saved
+/// register that exception or debug info needs to know about.
class MachineMove {
private:
- unsigned LabelID; // Label ID number for post-instruction
- // address when result of move takes
- // effect.
- MachineLocation Destination; // Move to location.
- MachineLocation Source; // Move from location.
+ /// Label - Symbol for post-instruction address when result of move takes
+ /// effect.
+ MCSymbol *Label;
+ // Move to & from location.
+ MachineLocation Destination, Source;
public:
- MachineMove()
- : LabelID(0)
- , Destination()
- , Source()
- {}
+ MachineMove() : Label(0) {}
- MachineMove(unsigned ID, MachineLocation &D, MachineLocation &S)
- : LabelID(ID)
- , Destination(D)
- , Source(S)
- {}
+ MachineMove(MCSymbol *label, const MachineLocation &D,
+ const MachineLocation &S)
+ : Label(label), Destination(D), Source(S) {}
// Accessors
- unsigned getLabelID() const { return LabelID; }
+ MCSymbol *getLabel() const { return Label; }
const MachineLocation &getDestination() const { return Destination; }
const MachineLocation &getSource() const { return Source; }
};
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineLoopInfo.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineLoopInfo.h
index 8459a8d..9760eba 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineLoopInfo.h
@@ -64,12 +64,12 @@ class MachineLoopInfo : public MachineFunctionPass {
void operator=(const MachineLoopInfo &); // do not implement
MachineLoopInfo(const MachineLoopInfo &); // do not implement
- LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
-
public:
static char ID; // Pass identification, replacement for typeid
- MachineLoopInfo() : MachineFunctionPass(&ID) {}
+ MachineLoopInfo() : MachineFunctionPass(ID) {}
+
+ LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
/// iterator/begin/end - The interface to the top-level loops in the current
/// function.
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineModuleInfo.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineModuleInfo.h
index 8eeac9f..0e719c8 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineModuleInfo.h
@@ -31,48 +31,44 @@
#ifndef LLVM_CODEGEN_MACHINEMODULEINFO_H
#define LLVM_CODEGEN_MACHINEMODULEINFO_H
+#include "llvm/Pass.h"
+#include "llvm/GlobalValue.h"
+#include "llvm/Metadata.h"
+#include "llvm/CodeGen/MachineLocation.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/DebugLoc.h"
+#include "llvm/Support/ValueHandle.h"
#include "llvm/System/DataTypes.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/UniqueVector.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/Pass.h"
-#include "llvm/Metadata.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/ADT/SmallVector.h"
namespace llvm {
//===----------------------------------------------------------------------===//
// Forward declarations.
class Constant;
-class MCSymbol;
-class MDNode;
class GlobalVariable;
+class MDNode;
class MachineBasicBlock;
class MachineFunction;
class Module;
class PointerType;
class StructType;
-
/// MachineModuleInfoImpl - This class can be derived from and used by targets
/// to hold private target-specific information for each Module. Objects of
/// type are accessed/created with MMI::getInfo and destroyed when the
/// MachineModuleInfo is destroyed.
class MachineModuleInfoImpl {
public:
+ typedef PointerIntPair<MCSymbol*, 1, bool> StubValueTy;
virtual ~MachineModuleInfoImpl();
-
- typedef std::vector<std::pair<MCSymbol*, MCSymbol*> >
- SymbolListTy;
+ typedef std::vector<std::pair<MCSymbol*, StubValueTy> > SymbolListTy;
protected:
- static SymbolListTy
- GetSortedStubs(const DenseMap<MCSymbol*, MCSymbol*> &Map);
+ static SymbolListTy GetSortedStubs(const DenseMap<MCSymbol*, StubValueTy>&);
};
@@ -82,37 +78,36 @@ protected:
/// the current function.
///
struct LandingPadInfo {
- MachineBasicBlock *LandingPadBlock; // Landing pad block.
- SmallVector<unsigned, 1> BeginLabels; // Labels prior to invoke.
- SmallVector<unsigned, 1> EndLabels; // Labels after invoke.
- unsigned LandingPadLabel; // Label at beginning of landing pad.
- Function *Personality; // Personality function.
- std::vector<int> TypeIds; // List of type ids (filters negative)
+ MachineBasicBlock *LandingPadBlock; // Landing pad block.
+ SmallVector<MCSymbol*, 1> BeginLabels; // Labels prior to invoke.
+ SmallVector<MCSymbol*, 1> EndLabels; // Labels after invoke.
+ MCSymbol *LandingPadLabel; // Label at beginning of landing pad.
+ const Function *Personality; // Personality function.
+ std::vector<int> TypeIds; // List of type ids (filters negative)
explicit LandingPadInfo(MachineBasicBlock *MBB)
- : LandingPadBlock(MBB)
- , LandingPadLabel(0)
- , Personality(NULL)
- {}
+ : LandingPadBlock(MBB), LandingPadLabel(0), Personality(0) {}
};
+class MMIAddrLabelMap;
+
//===----------------------------------------------------------------------===//
/// MachineModuleInfo - This class contains meta information specific to a
/// module. Queries can be made by different debugging and exception handling
/// schemes and reformated for specific use.
///
class MachineModuleInfo : public ImmutablePass {
+ /// Context - This is the MCContext used for the entire code generator.
+ MCContext Context;
+
+ /// TheModule - This is the LLVM Module being worked on.
+ const Module *TheModule;
+
/// ObjFileMMI - This is the object-file-format-specific implementation of
/// MachineModuleInfoImpl, which lets targets accumulate whatever info they
/// want.
MachineModuleInfoImpl *ObjFileMMI;
- // LabelIDList - One entry per assigned label. Normally the entry is equal to
- // the list index(+1). If the entry is zero then the label has been deleted.
- // Any other value indicates the label has been deleted by is mapped to
- // another label.
- std::vector<unsigned> LabelIDList;
-
// FrameMoves - List of moves done by a function's prolog. Used to construct
// frame maps by debug and exception handling consumers.
std::vector<MachineMove> FrameMoves;
@@ -123,14 +118,14 @@ class MachineModuleInfo : public ImmutablePass {
// Map of invoke call site index values to associated begin EH_LABEL for
// the current function.
- DenseMap<unsigned, unsigned> CallSiteMap;
+ DenseMap<MCSymbol*, unsigned> CallSiteMap;
// The current call site index being processed, if any. 0 if none.
unsigned CurCallSite;
// TypeInfos - List of C++ TypeInfo used in the current function.
//
- std::vector<GlobalVariable *> TypeInfos;
+ std::vector<const GlobalVariable *> TypeInfos;
// FilterIds - List of typeids encoding filters used in the current function.
//
@@ -143,13 +138,18 @@ class MachineModuleInfo : public ImmutablePass {
// Personalities - Vector of all personality functions ever seen. Used to emit
// common EH frames.
- std::vector<Function *> Personalities;
+ std::vector<const Function *> Personalities;
/// UsedFunctions - The functions in the @llvm.used list in a more easily
/// searchable format. This does not include the functions in
/// llvm.compiler.used.
SmallPtrSet<const Function *, 32> UsedFunctions;
+
+ /// AddrLabelSymbols - This map keeps track of which symbol is being used for
+ /// the specified basic block's address of label.
+ MMIAddrLabelMap *AddrLabelSymbols;
+
bool CallsEHReturn;
bool CallsUnwindInit;
@@ -160,12 +160,13 @@ class MachineModuleInfo : public ImmutablePass {
public:
static char ID; // Pass identification, replacement for typeid
- typedef std::pair<unsigned, TrackingVH<MDNode> > UnsignedAndMDNodePair;
- typedef SmallVector< std::pair<TrackingVH<MDNode>, UnsignedAndMDNodePair>, 4>
+ typedef std::pair<unsigned, DebugLoc> UnsignedDebugLocPair;
+ typedef SmallVector<std::pair<TrackingVH<MDNode>, UnsignedDebugLocPair>, 4>
VariableDbgInfoMapTy;
VariableDbgInfoMapTy VariableDbgInfo;
- MachineModuleInfo();
+ MachineModuleInfo(); // DUMMY CONSTRUCTOR, DO NOT CALL.
+ MachineModuleInfo(const MCAsmInfo &MAI); // Real constructor.
~MachineModuleInfo();
bool doInitialization();
@@ -174,7 +175,13 @@ public:
/// EndFunction - Discard function meta information.
///
void EndFunction();
+
+ const MCContext &getContext() const { return Context; }
+ MCContext &getContext() { return Context; }
+ void setModule(const Module *M) { TheModule = M; }
+ const Module *getModule() const { return TheModule; }
+
/// getInfo - Keep track of various per-function pieces of information for
/// backends that would like to do so.
///
@@ -192,7 +199,7 @@ public:
/// AnalyzeModule - Scan the module for global debug information.
///
- void AnalyzeModule(Module &M);
+ void AnalyzeModule(const Module &M);
/// hasDebugInfo - Returns true if valid debug info is present.
///
@@ -205,44 +212,30 @@ public:
bool callsUnwindInit() const { return CallsUnwindInit; }
void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
- /// NextLabelID - Return the next unique label id.
- ///
- unsigned NextLabelID() {
- unsigned ID = (unsigned)LabelIDList.size() + 1;
- LabelIDList.push_back(ID);
- return ID;
- }
-
- /// InvalidateLabel - Inhibit use of the specified label # from
- /// MachineModuleInfo, for example because the code was deleted.
- void InvalidateLabel(unsigned LabelID) {
- // Remap to zero to indicate deletion.
- RemapLabel(LabelID, 0);
- }
-
- /// RemapLabel - Indicate that a label has been merged into another.
- ///
- void RemapLabel(unsigned OldLabelID, unsigned NewLabelID) {
- assert(0 < OldLabelID && OldLabelID <= LabelIDList.size() &&
- "Old label ID out of range.");
- assert(NewLabelID <= LabelIDList.size() &&
- "New label ID out of range.");
- LabelIDList[OldLabelID - 1] = NewLabelID;
- }
-
- /// MappedLabel - Find out the label's final ID. Zero indicates deletion.
- /// ID != Mapped ID indicates that the label was folded into another label.
- unsigned MappedLabel(unsigned LabelID) const {
- assert(LabelID <= LabelIDList.size() && "Debug label ID out of range.");
- return LabelID ? LabelIDList[LabelID - 1] : 0;
- }
-
/// getFrameMoves - Returns a reference to a list of moves done in the current
/// function's prologue. Used to construct frame maps for debug and exception
/// handling comsumers.
std::vector<MachineMove> &getFrameMoves() { return FrameMoves; }
- //===-EH-----------------------------------------------------------------===//
+ /// getAddrLabelSymbol - Return the symbol to be used for the specified basic
+ /// block when its address is taken. This cannot be its normal LBB label
+ /// because the block may be accessed outside its containing function.
+ MCSymbol *getAddrLabelSymbol(const BasicBlock *BB);
+
+ /// getAddrLabelSymbolToEmit - Return the symbol to be used for the specified
+ /// basic block when its address is taken. If other blocks were RAUW'd to
+ /// this one, we may have to emit them as well, return the whole set.
+ std::vector<MCSymbol*> getAddrLabelSymbolToEmit(const BasicBlock *BB);
+
+ /// takeDeletedSymbolsForFunction - If the specified function has had any
+ /// references to address-taken blocks generated, but the block got deleted,
+ /// return the symbol now so we can emit it. This prevents emitting a
+ /// reference to a symbol that has no definition.
+ void takeDeletedSymbolsForFunction(const Function *F,
+ std::vector<MCSymbol*> &Result);
+
+
+ //===- EH ---------------------------------------------------------------===//
/// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the
/// specified MachineBasicBlock.
@@ -250,23 +243,24 @@ public:
/// addInvoke - Provide the begin and end labels of an invoke style call and
/// associate it with a try landing pad block.
- void addInvoke(MachineBasicBlock *LandingPad, unsigned BeginLabel,
- unsigned EndLabel);
+ void addInvoke(MachineBasicBlock *LandingPad,
+ MCSymbol *BeginLabel, MCSymbol *EndLabel);
/// addLandingPad - Add a new panding pad. Returns the label ID for the
/// landing pad entry.
- unsigned addLandingPad(MachineBasicBlock *LandingPad);
+ MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
/// addPersonality - Provide the personality function for the exception
/// information.
- void addPersonality(MachineBasicBlock *LandingPad, Function *Personality);
+ void addPersonality(MachineBasicBlock *LandingPad,
+ const Function *Personality);
/// getPersonalityIndex - Get index of the current personality function inside
/// Personalitites array
unsigned getPersonalityIndex() const;
/// getPersonalities - Return array of personality functions ever seen.
- const std::vector<Function *>& getPersonalities() const {
+ const std::vector<const Function *>& getPersonalities() const {
return Personalities;
}
@@ -280,12 +274,12 @@ public:
/// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
///
void addCatchTypeInfo(MachineBasicBlock *LandingPad,
- std::vector<GlobalVariable *> &TyInfo);
+ std::vector<const GlobalVariable *> &TyInfo);
/// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
///
void addFilterTypeInfo(MachineBasicBlock *LandingPad,
- std::vector<GlobalVariable *> &TyInfo);
+ std::vector<const GlobalVariable *> &TyInfo);
/// addCleanup - Add a cleanup action for a landing pad.
///
@@ -293,7 +287,7 @@ public:
/// getTypeIDFor - Return the type id for the specified typeinfo. This is
/// function wide.
- unsigned getTypeIDFor(GlobalVariable *TI);
+ unsigned getTypeIDFor(const GlobalVariable *TI);
/// getFilterIDFor - Return the id of the filter encoded by TyIds. This is
/// function wide.
@@ -301,7 +295,7 @@ public:
/// TidyLandingPads - Remap landing pad labels and remove any deleted landing
/// pads.
- void TidyLandingPads();
+ void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = 0);
/// getLandingPads - Return a reference to the landing pad info for the
/// current function.
@@ -310,12 +304,12 @@ public:
}
/// setCallSiteBeginLabel - Map the begin label for a call site
- void setCallSiteBeginLabel(unsigned BeginLabel, unsigned Site) {
+ void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
CallSiteMap[BeginLabel] = Site;
}
/// getCallSiteBeginLabel - Get the call site number for a begin label
- unsigned getCallSiteBeginLabel(unsigned BeginLabel) {
+ unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) {
assert(CallSiteMap.count(BeginLabel) &&
"Missing call site number for EH_LABEL!");
return CallSiteMap[BeginLabel];
@@ -330,7 +324,7 @@ public:
/// getTypeInfos - Return a reference to the C++ typeinfo for the current
/// function.
- const std::vector<GlobalVariable *> &getTypeInfos() const {
+ const std::vector<const GlobalVariable *> &getTypeInfos() const {
return TypeInfos;
}
@@ -342,15 +336,15 @@ public:
/// getPersonality - Return a personality function if available. The presence
/// of one is required to emit exception handling info.
- Function *getPersonality() const;
+ const Function *getPersonality() const;
- /// setVariableDbgInfo - Collect information used to emit debugging information
- /// of a variable.
- void setVariableDbgInfo(MDNode *N, unsigned Slot, MDNode *Scope) {
- VariableDbgInfo.push_back(std::make_pair(N, std::make_pair(Slot, Scope)));
+ /// setVariableDbgInfo - Collect information used to emit debugging
+ /// information of a variable.
+ void setVariableDbgInfo(MDNode *N, unsigned Slot, DebugLoc Loc) {
+ VariableDbgInfo.push_back(std::make_pair(N, std::make_pair(Slot, Loc)));
}
- VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfo; }
+ VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfo; }
}; // End class MachineModuleInfo
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
index 89b8207..9401ffd 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
@@ -25,32 +25,34 @@ namespace llvm {
class MachineModuleInfoMachO : public MachineModuleInfoImpl {
/// FnStubs - Darwin '$stub' stubs. The key is something like "Lfoo$stub",
/// the value is something like "_foo".
- DenseMap<MCSymbol*, MCSymbol*> FnStubs;
+ DenseMap<MCSymbol*, StubValueTy> FnStubs;
/// GVStubs - Darwin '$non_lazy_ptr' stubs. The key is something like
- /// "Lfoo$non_lazy_ptr", the value is something like "_foo".
- DenseMap<MCSymbol*, MCSymbol*> GVStubs;
+ /// "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra bit
+ /// is true if this GV is external.
+ DenseMap<MCSymbol*, StubValueTy> GVStubs;
/// HiddenGVStubs - Darwin '$non_lazy_ptr' stubs. The key is something like
/// "Lfoo$non_lazy_ptr", the value is something like "_foo". Unlike GVStubs
- /// these are for things with hidden visibility.
- DenseMap<MCSymbol*, MCSymbol*> HiddenGVStubs;
+ /// these are for things with hidden visibility. The extra bit is true if
+ /// this GV is external.
+ DenseMap<MCSymbol*, StubValueTy> HiddenGVStubs;
virtual void Anchor(); // Out of line virtual method.
public:
MachineModuleInfoMachO(const MachineModuleInfo &) {}
- MCSymbol *&getFnStubEntry(MCSymbol *Sym) {
+ StubValueTy &getFnStubEntry(MCSymbol *Sym) {
assert(Sym && "Key cannot be null");
return FnStubs[Sym];
}
- MCSymbol *&getGVStubEntry(MCSymbol *Sym) {
+ StubValueTy &getGVStubEntry(MCSymbol *Sym) {
assert(Sym && "Key cannot be null");
return GVStubs[Sym];
}
- MCSymbol *&getHiddenGVStubEntry(MCSymbol *Sym) {
+ StubValueTy &getHiddenGVStubEntry(MCSymbol *Sym) {
assert(Sym && "Key cannot be null");
return HiddenGVStubs[Sym];
}
@@ -72,13 +74,13 @@ namespace llvm {
class MachineModuleInfoELF : public MachineModuleInfoImpl {
/// GVStubs - These stubs are used to materialize global addresses in PIC
/// mode.
- DenseMap<MCSymbol*, MCSymbol*> GVStubs;
+ DenseMap<MCSymbol*, StubValueTy> GVStubs;
virtual void Anchor(); // Out of line virtual method.
public:
MachineModuleInfoELF(const MachineModuleInfo &) {}
- MCSymbol *&getGVStubEntry(MCSymbol *Sym) {
+ StubValueTy &getGVStubEntry(MCSymbol *Sym) {
assert(Sym && "Key cannot be null");
return GVStubs[Sym];
}
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineOperand.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineOperand.h
index 0978057..afa2c29 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineOperand.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineOperand.h
@@ -27,7 +27,9 @@ class MachineInstr;
class MachineRegisterInfo;
class MDNode;
class TargetMachine;
+class TargetRegisterInfo;
class raw_ostream;
+class MCSymbol;
/// MachineOperand class - Representation of each machine instruction operand.
///
@@ -44,7 +46,8 @@ public:
MO_ExternalSymbol, ///< Name of external global symbol
MO_GlobalAddress, ///< Address of a global value
MO_BlockAddress, ///< Address of a basic block
- MO_Metadata ///< Metadata reference (for debug info)
+ MO_Metadata, ///< Metadata reference (for debug info)
+ MO_MCSymbol ///< MCSymbol reference (for debug/eh info)
};
private:
@@ -101,6 +104,7 @@ private:
const ConstantFP *CFP; // For MO_FPImmediate.
int64_t ImmVal; // For MO_Immediate.
const MDNode *MD; // For MO_Metadata.
+ MCSymbol *Sym; // For MO_MCSymbol
struct { // For MO_Register.
unsigned RegNo;
@@ -114,8 +118,8 @@ private:
union {
int Index; // For MO_*Index - The index itself.
const char *SymbolName; // For MO_ExternalSymbol.
- GlobalValue *GV; // For MO_GlobalAddress.
- BlockAddress *BA; // For MO_BlockAddress.
+ const GlobalValue *GV; // For MO_GlobalAddress.
+ const BlockAddress *BA; // For MO_BlockAddress.
} Val;
int64_t Offset; // An offset from the object.
} OffsetedInfo;
@@ -167,6 +171,7 @@ public:
bool isBlockAddress() const { return OpKind == MO_BlockAddress; }
/// isMetadata - Tests if this is a MO_Metadata operand.
bool isMetadata() const { return OpKind == MO_Metadata; }
+ bool isMCSymbol() const { return OpKind == MO_MCSymbol; }
//===--------------------------------------------------------------------===//
// Accessors for Register Operands
@@ -242,7 +247,20 @@ public:
assert(isReg() && "Wrong MachineOperand accessor");
SubReg = (unsigned char)subReg;
}
-
+
+ /// substVirtReg - Substitute the current register with the virtual
+ /// subregister Reg:SubReg. Take any existing SubReg index into account,
+ /// using TargetRegisterInfo to compose the subreg indices if necessary.
+ /// Reg must be a virtual register, SubIdx can be 0.
+ ///
+ void substVirtReg(unsigned Reg, unsigned SubIdx, const TargetRegisterInfo&);
+
+ /// substPhysReg - Substitute the current register with the physical register
+ /// Reg, taking any existing SubReg into account. For instance,
+ /// substPhysReg(%EAX) will change %reg1024:sub_8bit to %AL.
+ ///
+ void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
+
void setIsUse(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
assert((Val || !isDebug()) && "Marking a debug operation as def");
@@ -281,6 +299,11 @@ public:
IsEarlyClobber = Val;
}
+ void setIsDebug(bool Val = true) {
+ assert(isReg() && IsDef && "Wrong MachineOperand accessor");
+ IsDebug = Val;
+ }
+
//===--------------------------------------------------------------------===//
// Accessors for various operand types.
//===--------------------------------------------------------------------===//
@@ -306,15 +329,20 @@ public:
return Contents.OffsetedInfo.Val.Index;
}
- GlobalValue *getGlobal() const {
+ const GlobalValue *getGlobal() const {
assert(isGlobal() && "Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.GV;
}
- BlockAddress *getBlockAddress() const {
+ const BlockAddress *getBlockAddress() const {
assert(isBlockAddress() && "Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.BA;
}
+
+ MCSymbol *getMCSymbol() const {
+ assert(isMCSymbol() && "Wrong MachineOperand accessor");
+ return Contents.Sym;
+ }
/// getOffset - Return the offset from the symbol in this operand. This always
/// returns 0 for ExternalSymbol operands.
@@ -443,7 +471,7 @@ public:
Op.setTargetFlags(TargetFlags);
return Op;
}
- static MachineOperand CreateGA(GlobalValue *GV, int64_t Offset,
+ static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_GlobalAddress);
Op.Contents.OffsetedInfo.Val.GV = GV;
@@ -459,7 +487,7 @@ public:
Op.setTargetFlags(TargetFlags);
return Op;
}
- static MachineOperand CreateBA(BlockAddress *BA,
+ static MachineOperand CreateBA(const BlockAddress *BA,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_BlockAddress);
Op.Contents.OffsetedInfo.Val.BA = BA;
@@ -473,6 +501,12 @@ public:
return Op;
}
+ static MachineOperand CreateMCSymbol(MCSymbol *Sym) {
+ MachineOperand Op(MachineOperand::MO_MCSymbol);
+ Op.Contents.Sym = Sym;
+ return Op;
+ }
+
friend class MachineInstr;
friend class MachineRegisterInfo;
private:
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
index f2e5e10..066c91b 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -35,7 +35,7 @@ class MachineRegisterInfo {
/// RegClassVRegMap - This vector acts as a map from TargetRegisterClass to
/// virtual registers. For each target register class, it keeps a list of
/// virtual registers belonging to the class.
- std::vector<std::vector<unsigned> > RegClass2VRegMap;
+ std::vector<unsigned> *RegClass2VRegMap;
/// RegAllocHints - This vector records register allocation hints for virtual
/// registers. For each virtual register, it keeps a register and hint type
@@ -93,6 +93,20 @@ public:
/// specified register (it may be live-in).
bool reg_empty(unsigned RegNo) const { return reg_begin(RegNo) == reg_end(); }
+ /// reg_nodbg_iterator/reg_nodbg_begin/reg_nodbg_end - Walk all defs and uses
+ /// of the specified register, skipping those marked as Debug.
+ typedef defusechain_iterator<true,true,true> reg_nodbg_iterator;
+ reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
+ return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
+ }
+ static reg_nodbg_iterator reg_nodbg_end() { return reg_nodbg_iterator(0); }
+
+ /// reg_nodbg_empty - Return true if the only instructions using or defining
+ /// Reg are Debug instructions.
+ bool reg_nodbg_empty(unsigned RegNo) const {
+ return reg_nodbg_begin(RegNo) == reg_nodbg_end();
+ }
+
/// def_iterator/def_begin/def_end - Walk all defs of the specified register.
typedef defusechain_iterator<false,true,false> def_iterator;
def_iterator def_begin(unsigned RegNo) const {
@@ -162,6 +176,12 @@ public:
/// register or null if none is found. This assumes that the code is in SSA
/// form, so there should only be one definition.
MachineInstr *getVRegDef(unsigned Reg) const;
+
+ /// clearKillFlags - Iterate over all the uses of the given register and
+ /// clear the kill flag from the MachineOperand. This function is used by
+ /// optimization passes which extend register lifetimes and need only
+ /// preserve conservative kill flag information.
+ void clearKillFlags(unsigned Reg) const;
#ifndef NDEBUG
void dumpUses(unsigned RegNo) const;
@@ -196,7 +216,8 @@ public:
/// getRegClassVirtRegs - Return the list of virtual registers of the given
/// target register class.
- std::vector<unsigned> &getRegClassVirtRegs(const TargetRegisterClass *RC) {
+ const std::vector<unsigned> &
+ getRegClassVirtRegs(const TargetRegisterClass *RC) const {
return RegClass2VRegMap[RC->getID()];
}
@@ -229,11 +250,18 @@ public:
/// setPhysRegUsed - Mark the specified register used in this function.
/// This should only be called during and after register allocation.
void setPhysRegUsed(unsigned Reg) { UsedPhysRegs[Reg] = true; }
-
+
+ /// addPhysRegsUsed - Mark the specified registers used in this function.
+ /// This should only be called during and after register allocation.
+ void addPhysRegsUsed(const BitVector &Regs) { UsedPhysRegs |= Regs; }
+
/// setPhysRegUnused - Mark the specified register unused in this function.
/// This should only be called during and after register allocation.
void setPhysRegUnused(unsigned Reg) { UsedPhysRegs[Reg] = false; }
-
+
+ /// closePhysRegsUsed - Expand UsedPhysRegs to its transitive closure over
+ /// subregisters. That means that if R is used, so are all subregisters.
+ void closePhysRegsUsed(const TargetRegisterInfo&);
//===--------------------------------------------------------------------===//
// LiveIn/LiveOut Management
@@ -258,18 +286,22 @@ public:
liveout_iterator liveout_end() const { return LiveOuts.end(); }
bool liveout_empty() const { return LiveOuts.empty(); }
- bool isLiveIn(unsigned Reg) const {
- for (livein_iterator I = livein_begin(), E = livein_end(); I != E; ++I)
- if (I->first == Reg || I->second == Reg)
- return true;
- return false;
- }
- bool isLiveOut(unsigned Reg) const {
- for (liveout_iterator I = liveout_begin(), E = liveout_end(); I != E; ++I)
- if (*I == Reg)
- return true;
- return false;
- }
+ bool isLiveIn(unsigned Reg) const;
+ bool isLiveOut(unsigned Reg) const;
+
+ /// getLiveInPhysReg - If VReg is a live-in virtual register, return the
+ /// corresponding live-in physical register.
+ unsigned getLiveInPhysReg(unsigned VReg) const;
+
+ /// getLiveInVirtReg - If PReg is a live-in physical register, return the
+ /// corresponding live-in physical register.
+ unsigned getLiveInVirtReg(unsigned PReg) const;
+
+ /// EmitLiveInCopies - Emit copies to initialize livein virtual registers
+ /// into the given entry block.
+ void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
+ const TargetRegisterInfo &TRI,
+ const TargetInstrInfo &TII);
private:
void HandleVRegListReallocation();
@@ -331,7 +363,18 @@ public:
defusechain_iterator operator++(int) { // Postincrement
defusechain_iterator tmp = *this; ++*this; return tmp;
}
-
+
+ /// skipInstruction - move forward until reaching a different instruction.
+ /// Return the skipped instruction that is no longer pointed to, or NULL if
+ /// already pointing to end().
+ MachineInstr *skipInstruction() {
+ if (!Op) return 0;
+ MachineInstr *MI = Op->getParent();
+ do ++*this;
+ while (Op && Op->getParent() == MI);
+ return MI;
+ }
+
MachineOperand &getOperand() const {
assert(Op && "Cannot dereference end iterator!");
return *Op;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/MachineSSAUpdater.h b/libclamav/c++/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
index ab663fe..cbb45a7 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -23,23 +23,22 @@ namespace llvm {
class TargetInstrInfo;
class TargetRegisterClass;
template<typename T> class SmallVectorImpl;
+ template<typename T> class SSAUpdaterTraits;
+ class BumpPtrAllocator;
/// MachineSSAUpdater - This class updates SSA form for a set of virtual
/// registers defined in multiple blocks. This is used when code duplication
/// or another unstructured transformation wants to rewrite a set of uses of one
/// vreg with uses of a set of vregs.
class MachineSSAUpdater {
+ friend class SSAUpdaterTraits<MachineSSAUpdater>;
+
+private:
/// AvailableVals - This keeps track of which value to use on a per-block
/// basis. When we insert PHI nodes, we keep track of them here.
//typedef DenseMap<MachineBasicBlock*, unsigned > AvailableValsTy;
void *AV;
- /// IncomingPredInfo - We use this as scratch space when doing our recursive
- /// walk. This should only be used in GetValueInBlockInternal, normally it
- /// should be empty.
- //std::vector<std::pair<MachineBasicBlock*, unsigned > > IncomingPredInfo;
- void *IPI;
-
/// VR - Current virtual register whose uses are being updated.
unsigned VR;
@@ -106,6 +105,7 @@ public:
private:
void ReplaceRegWith(unsigned OldReg, unsigned NewReg);
unsigned GetValueAtEndOfBlockInternal(MachineBasicBlock *BB);
+
void operator=(const MachineSSAUpdater&); // DO NOT IMPLEMENT
MachineSSAUpdater(const MachineSSAUpdater&); // DO NOT IMPLEMENT
};
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/ObjectCodeEmitter.h b/libclamav/c++/llvm/include/llvm/CodeGen/ObjectCodeEmitter.h
index 170c0c8..d46628c 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/ObjectCodeEmitter.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/ObjectCodeEmitter.h
@@ -137,13 +137,6 @@ public:
/// emitted.
virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const;
- /// emitLabel - Emits a label
- virtual void emitLabel(uint64_t LabelID) = 0;
-
- /// getLabelAddress - Return the address of the specified LabelID, only usable
- /// after the LabelID has been emitted.
- virtual uintptr_t getLabelAddress(uint64_t LabelID) const = 0;
-
/// emitJumpTables - Emit all the jump tables for a given jump table info
/// record to the appropriate section.
virtual void emitJumpTables(MachineJumpTableInfo *MJTI) = 0;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/Passes.h b/libclamav/c++/llvm/include/llvm/CodeGen/Passes.h
index 911be22..4762a39 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/Passes.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/Passes.h
@@ -21,6 +21,7 @@
namespace llvm {
class FunctionPass;
+ class MachineFunctionPass;
class PassInfo;
class TargetLowering;
class RegisterCoalescer;
@@ -29,69 +30,70 @@ namespace llvm {
/// createUnreachableBlockEliminationPass - The LLVM code generator does not
/// work well with unreachable basic blocks (what live ranges make sense for a
/// block that cannot be reached?). As such, a code generator should either
- /// not instruction select unreachable blocks, or it can run this pass as it's
+ /// not instruction select unreachable blocks, or run this pass as its
/// last LLVM modifying pass to clean up blocks that are not reachable from
/// the entry block.
FunctionPass *createUnreachableBlockEliminationPass();
/// MachineFunctionPrinter pass - This pass prints out the machine function to
- /// the given stream, as a debugging tool.
- FunctionPass *createMachineFunctionPrinterPass(raw_ostream &OS,
- const std::string &Banner ="");
+ /// the given stream as a debugging tool.
+ MachineFunctionPass *
+ createMachineFunctionPrinterPass(raw_ostream &OS,
+ const std::string &Banner ="");
/// MachineLoopInfo pass - This pass is a loop analysis pass.
- ///
- extern const PassInfo *const MachineLoopInfoID;
+ ///
+ extern char &MachineLoopInfoID;
/// MachineDominators pass - This pass is a machine dominators analysis pass.
- ///
- extern const PassInfo *const MachineDominatorsID;
+ ///
+ extern char &MachineDominatorsID;
/// PHIElimination pass - This pass eliminates machine instruction PHI nodes
/// by inserting copy instructions. This destroys SSA information, but is the
/// desired input for some register allocators. This pass is "required" by
/// these register allocator like this: AU.addRequiredID(PHIEliminationID);
///
- extern const PassInfo *const PHIEliminationID;
-
+ extern char &PHIEliminationID;
+
/// StrongPHIElimination pass - This pass eliminates machine instruction PHI
/// nodes by inserting copy instructions. This destroys SSA information, but
/// is the desired input for some register allocators. This pass is
/// "required" by these register allocator like this:
/// AU.addRequiredID(PHIEliminationID);
/// This pass is still in development
- extern const PassInfo *const StrongPHIEliminationID;
+ extern char &StrongPHIEliminationID;
- extern const PassInfo *const PreAllocSplittingID;
+ extern char &PreAllocSplittingID;
/// SimpleRegisterCoalescing pass. Aggressively coalesces every register
/// copy it can.
///
- extern const PassInfo *const SimpleRegisterCoalescingID;
+ extern char &SimpleRegisterCoalescingID;
/// TwoAddressInstruction pass - This pass reduces two-address instructions to
/// use two operands. This destroys SSA information but it is desired by
/// register allocators.
- extern const PassInfo *const TwoAddressInstructionPassID;
+ extern char &TwoAddressInstructionPassID;
/// UnreachableMachineBlockElimination pass - This pass removes unreachable
/// machine basic blocks.
- extern const PassInfo *const UnreachableMachineBlockElimID;
+ extern char &UnreachableMachineBlockElimID;
/// DeadMachineInstructionElim pass - This pass removes dead machine
/// instructions.
///
FunctionPass *createDeadMachineInstructionElimPass();
- /// Creates a register allocator as the user specified on the command line.
+ /// Creates a register allocator as the user specified on the command line, or
+ /// picks one that matches OptLevel.
///
- FunctionPass *createRegisterAllocator();
+ FunctionPass *createRegisterAllocator(CodeGenOpt::Level OptLevel);
- /// LocalRegisterAllocation Pass - This pass register allocates the input code
- /// a basic block at a time, yielding code better than the simple register
- /// allocator, but not as good as a global allocator.
+ /// FastRegisterAllocation Pass - This pass register allocates as fast as
+ /// possible. It is best suited for debug code where live ranges are short.
///
- FunctionPass *createLocalRegisterAllocator();
+ FunctionPass *createFastRegisterAllocator();
/// LinearScanRegisterAllocation Pass - This pass implements the linear scan
/// register allocation algorithm, a global register allocator.
@@ -112,7 +114,7 @@ namespace llvm {
/// and eliminates abstract frame references.
///
FunctionPass *createPrologEpilogCodeInserter();
-
+
/// LowerSubregs Pass - This pass lowers subregs to register-register copies
/// which yields suboptimal, but correct code if the register allocator
/// cannot coalesce all subreg operations during allocation.
@@ -140,43 +142,39 @@ namespace llvm {
/// headers to target specific alignment boundary.
FunctionPass *createCodePlacementOptPass();
- /// getRegisterAllocator - This creates an instance of the register allocator
- /// for the Sparc.
- FunctionPass *getRegisterAllocator(TargetMachine &T);
-
/// IntrinsicLowering Pass - Performs target-independent LLVM IR
/// transformations for highly portable strategies.
FunctionPass *createGCLoweringPass();
-
+
/// MachineCodeAnalysis Pass - Target-independent pass to mark safe points in
/// machine code. Must be added very late during code generation, just prior
/// to output, and importantly after all CFG transformations (such as branch
/// folding).
FunctionPass *createGCMachineCodeAnalysisPass();
-
+
/// Deleter Pass - Releases GC metadata.
- ///
+ ///
FunctionPass *createGCInfoDeleter();
-
+
/// Creates a pass to print GC metadata.
- ///
+ ///
FunctionPass *createGCInfoPrinter(raw_ostream &OS);
-
+
/// createMachineCSEPass - This pass performs global CSE on machine
/// instructions.
FunctionPass *createMachineCSEPass();
/// createMachineLICMPass - This pass performs LICM on machine instructions.
- ///
- FunctionPass *createMachineLICMPass();
+ ///
+ FunctionPass *createMachineLICMPass(bool PreRegAlloc = true);
/// createMachineSinkingPass - This pass performs sinking on machine
/// instructions.
FunctionPass *createMachineSinkingPass();
- /// createOptimizeExtsPass - This pass performs sign / zero extension
- /// optimization by increasing uses of extended values.
- FunctionPass *createOptimizeExtsPass();
+ /// createPeepholeOptimizerPass - This pass performs peephole optimizations -
+ /// like extension and comparison eliminations.
+ FunctionPass *createPeepholeOptimizerPass();
/// createOptimizePHIsPass - This pass optimizes machine instruction PHIs
/// to take advantage of opportunities created during DAG legalization.
@@ -190,19 +188,23 @@ namespace llvm {
/// createMachineVerifierPass - This pass verifies cenerated machine code
/// instructions for correctness.
- ///
- /// @param allowDoubleDefs ignore double definitions of
- /// registers. Useful before LiveVariables has run.
- FunctionPass *createMachineVerifierPass(bool allowDoubleDefs);
+ FunctionPass *createMachineVerifierPass();
/// createDwarfEHPass - This pass mulches exception handling code into a form
/// adapted to code generation. Required if using dwarf exception handling.
- FunctionPass *createDwarfEHPass(const TargetLowering *tli, bool fast);
+ FunctionPass *createDwarfEHPass(const TargetMachine *tm);
/// createSjLjEHPass - This pass adapts exception handling code to use
/// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
FunctionPass *createSjLjEHPass(const TargetLowering *tli);
+ /// createLocalStackSlotAllocationPass - This pass assigns local frame
+ /// indices to stack slots relative to one another and allocates
+ /// base registers to access them when it is estimated by the target to
+ /// be out of range of normal frame pointer or stack pointer index
+ /// addressing.
+ FunctionPass *createLocalStackSlotAllocationPass();
+
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h b/libclamav/c++/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h
new file mode 100644
index 0000000..24d73cb
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h
@@ -0,0 +1,94 @@
+//=- llvm/CodeGen/PostRAHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PostRAHazardRecognizer class, which
+// implements hazard-avoidance heuristics for scheduling, based on the
+// scheduling itineraries specified for the target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
+
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/System/DataTypes.h"
+
+#include <cassert>
+#include <cstring>
+#include <string>
+
+namespace llvm {
+
+class InstrItineraryData;
+class SUnit;
+
+class PostRAHazardRecognizer : public ScheduleHazardRecognizer {
+ // ScoreBoard to track function unit usage. ScoreBoard[0] is a
+ // mask of the FUs in use in the cycle currently being
+ // schedule. ScoreBoard[1] is a mask for the next cycle. The
+ // ScoreBoard is used as a circular buffer with the current cycle
+ // indicated by Head.
+ class ScoreBoard {
+ unsigned *Data;
+
+ // The maximum number of cycles monitored by the Scoreboard. This
+ // value is determined based on the target itineraries to ensure
+ // that all hazards can be tracked.
+ size_t Depth;
+ // Indices into the Scoreboard that represent the current cycle.
+ size_t Head;
+ public:
+ ScoreBoard():Data(NULL), Depth(0), Head(0) { }
+ ~ScoreBoard() {
+ delete[] Data;
+ }
+
+ size_t getDepth() const { return Depth; }
+ unsigned& operator[](size_t idx) const {
+ assert(Depth && "ScoreBoard was not initialized properly!");
+
+ return Data[(Head + idx) % Depth];
+ }
+
+ void reset(size_t d = 1) {
+ if (Data == NULL) {
+ Depth = d;
+ Data = new unsigned[Depth];
+ }
+
+ memset(Data, 0, Depth * sizeof(Data[0]));
+ Head = 0;
+ }
+
+ void advance() {
+ Head = (Head + 1) % Depth;
+ }
+
+ // Print the scoreboard.
+ void dump() const;
+ };
+
+ // Itinerary data for the target.
+ const InstrItineraryData &ItinData;
+
+ ScoreBoard ReservedScoreboard;
+ ScoreBoard RequiredScoreboard;
+
+public:
+ PostRAHazardRecognizer(const InstrItineraryData &ItinData);
+
+ virtual HazardType getHazardType(SUnit *SU);
+ virtual void Reset();
+ virtual void EmitInstruction(SUnit *SU);
+ virtual void AdvanceCycle();
+};
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h b/libclamav/c++/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
index cec867f..1d743c1 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
@@ -12,6 +12,7 @@
#define LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/ADT/SmallSet.h"
namespace llvm {
@@ -24,12 +25,13 @@ namespace llvm {
private:
bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
- unsigned OpIdx, const TargetInstrInfo *tii_);
+ unsigned OpIdx, const TargetInstrInfo *tii_,
+ SmallSet<unsigned, 8> &ImpDefRegs);
public:
static char ID;
- ProcessImplicitDefs() : MachineFunctionPass(&ID) {}
+ ProcessImplicitDefs() : MachineFunctionPass(ID) {}
virtual void getAnalysisUsage(AnalysisUsage &au) const;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/RegisterCoalescer.h b/libclamav/c++/llvm/include/llvm/CodeGen/RegisterCoalescer.h
index 1490aa0..7644433 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/RegisterCoalescer.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/RegisterCoalescer.h
@@ -25,6 +25,9 @@ namespace llvm {
class RegallocQuery;
class AnalysisUsage;
class MachineInstr;
+ class TargetRegisterInfo;
+ class TargetRegisterClass;
+ class TargetInstrInfo;
/// An abstract interface for register coalescers. Coalescers must
/// implement this interface to be part of the coalescer analysis
@@ -141,6 +144,93 @@ namespace llvm {
return true;
}
};
+
+
+ /// CoalescerPair - A helper class for register coalescers. When deciding if
+ /// two registers can be coalesced, CoalescerPair can determine if a copy
+ /// instruction would become an identity copy after coalescing.
+ class CoalescerPair {
+ const TargetInstrInfo &tii_;
+ const TargetRegisterInfo &tri_;
+
+ /// dstReg_ - The register that will be left after coalescing. It can be a
+ /// virtual or physical register.
+ unsigned dstReg_;
+
+ /// srcReg_ - the virtual register that will be coalesced into dstReg.
+ unsigned srcReg_;
+
+ /// subReg_ - The subregister index of srcReg in dstReg_. It is possible the
+ /// coalesce srcReg_ into a subreg of the larger dstReg_ when dstReg_ is a
+ /// virtual register.
+ unsigned subIdx_;
+
+ /// partial_ - True when the original copy was a partial subregister copy.
+ bool partial_;
+
+ /// crossClass_ - True when both regs are virtual, and newRC is constrained.
+ bool crossClass_;
+
+ /// flipped_ - True when DstReg and SrcReg are reversed from the oriignal copy
+ /// instruction.
+ bool flipped_;
+
+ /// newRC_ - The register class of the coalesced register, or NULL if dstReg_
+ /// is a physreg.
+ const TargetRegisterClass *newRC_;
+
+ /// compose - Compose subreg indices a and b, either may be 0.
+ unsigned compose(unsigned, unsigned) const;
+
+ /// isMoveInstr - Return true if MI is a move or subreg instruction.
+ bool isMoveInstr(const MachineInstr *MI, unsigned &Src, unsigned &Dst,
+ unsigned &SrcSub, unsigned &DstSub) const;
+
+ public:
+ CoalescerPair(const TargetInstrInfo &tii, const TargetRegisterInfo &tri)
+ : tii_(tii), tri_(tri), dstReg_(0), srcReg_(0), subIdx_(0),
+ partial_(false), crossClass_(false), flipped_(false), newRC_(0) {}
+
+ /// setRegisters - set registers to match the copy instruction MI. Return
+ /// false if MI is not a coalescable copy instruction.
+ bool setRegisters(const MachineInstr*);
+
+ /// flip - Swap srcReg_ and dstReg_. Return false if swapping is impossible
+ /// because dstReg_ is a physical register, or subIdx_ is set.
+ bool flip();
+
+ /// isCoalescable - Return true if MI is a copy instruction that will become
+ /// an identity copy after coalescing.
+ bool isCoalescable(const MachineInstr*) const;
+
+ /// isPhys - Return true if DstReg is a physical register.
+ bool isPhys() const { return !newRC_; }
+
+ /// isPartial - Return true if the original copy instruction did not copy the
+ /// full register, but was a subreg operation.
+ bool isPartial() const { return partial_; }
+
+ /// isCrossClass - Return true if DstReg is virtual and NewRC is a smaller register class than DstReg's.
+ bool isCrossClass() const { return crossClass_; }
+
+ /// isFlipped - Return true when getSrcReg is the register being defined by
+ /// the original copy instruction.
+ bool isFlipped() const { return flipped_; }
+
+ /// getDstReg - Return the register (virtual or physical) that will remain
+ /// after coalescing.
+ unsigned getDstReg() const { return dstReg_; }
+
+ /// getSrcReg - Return the virtual register that will be coalesced away.
+ unsigned getSrcReg() const { return srcReg_; }
+
+ /// getSubIdx - Return the subregister index in DstReg that SrcReg will be
+ /// coalesced into, or 0.
+ unsigned getSubIdx() const { return subIdx_; }
+
+ /// getNewRC - Return the register class of the coalesced register.
+ const TargetRegisterClass *getNewRC() const { return newRC_; }
+ };
}
// Because of the way .a files work, we must force the SimpleRC
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/RegisterScavenging.h b/libclamav/c++/llvm/include/llvm/CodeGen/RegisterScavenging.h
index 84b726d..246831c 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/RegisterScavenging.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/RegisterScavenging.h
@@ -98,6 +98,10 @@ public:
/// getRegsUsed - return all registers currently in use in used.
void getRegsUsed(BitVector &used, bool includeReserved);
+ /// getRegsAvailable - Return all available registers in the register class
+ /// in Mask.
+ void getRegsAvailable(const TargetRegisterClass *RC, BitVector &Mask);
+
/// FindUnusedReg - Find a unused register of the specified register class.
/// Return 0 if none is found.
unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
@@ -147,7 +151,12 @@ private:
/// Add Reg and its aliases to BV.
void addRegWithAliases(BitVector &BV, unsigned Reg);
- unsigned findSurvivorReg(MachineBasicBlock::iterator MI,
+ /// findSurvivorReg - Return the candidate register that is unused for the
+ /// longest after StartMI. UseMI is set to the instruction where the search
+ /// stopped.
+ ///
+ /// No more than InstrLimit instructions are inspected.
+ unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
BitVector &Candidates,
unsigned InstrLimit,
MachineBasicBlock::iterator &UseMI);
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/RuntimeLibcalls.h b/libclamav/c++/llvm/include/llvm/CodeGen/RuntimeLibcalls.h
index c404ab6..a51e82a 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/RuntimeLibcalls.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -150,9 +150,15 @@ namespace RTLIB {
FLOOR_F64,
FLOOR_F80,
FLOOR_PPCF128,
+ COPYSIGN_F32,
+ COPYSIGN_F64,
+ COPYSIGN_F80,
+ COPYSIGN_PPCF128,
// CONVERSION
FPEXT_F32_F64,
+ FPEXT_F16_F32,
+ FPROUND_F32_F16,
FPROUND_F64_F32,
FPROUND_F80_F32,
FPROUND_PPCF128_F32,
@@ -163,6 +169,8 @@ namespace RTLIB {
FPTOSINT_F32_I32,
FPTOSINT_F32_I64,
FPTOSINT_F32_I128,
+ FPTOSINT_F64_I8,
+ FPTOSINT_F64_I16,
FPTOSINT_F64_I32,
FPTOSINT_F64_I64,
FPTOSINT_F64_I128,
@@ -177,6 +185,8 @@ namespace RTLIB {
FPTOUINT_F32_I32,
FPTOUINT_F32_I64,
FPTOUINT_F32_I128,
+ FPTOUINT_F64_I8,
+ FPTOUINT_F64_I16,
FPTOUINT_F64_I32,
FPTOUINT_F64_I64,
FPTOUINT_F64_I128,
@@ -237,6 +247,40 @@ namespace RTLIB {
// EXCEPTION HANDLING
UNWIND_RESUME,
+ // Family ATOMICs
+ SYNC_VAL_COMPARE_AND_SWAP_1,
+ SYNC_VAL_COMPARE_AND_SWAP_2,
+ SYNC_VAL_COMPARE_AND_SWAP_4,
+ SYNC_VAL_COMPARE_AND_SWAP_8,
+ SYNC_LOCK_TEST_AND_SET_1,
+ SYNC_LOCK_TEST_AND_SET_2,
+ SYNC_LOCK_TEST_AND_SET_4,
+ SYNC_LOCK_TEST_AND_SET_8,
+ SYNC_FETCH_AND_ADD_1,
+ SYNC_FETCH_AND_ADD_2,
+ SYNC_FETCH_AND_ADD_4,
+ SYNC_FETCH_AND_ADD_8,
+ SYNC_FETCH_AND_SUB_1,
+ SYNC_FETCH_AND_SUB_2,
+ SYNC_FETCH_AND_SUB_4,
+ SYNC_FETCH_AND_SUB_8,
+ SYNC_FETCH_AND_AND_1,
+ SYNC_FETCH_AND_AND_2,
+ SYNC_FETCH_AND_AND_4,
+ SYNC_FETCH_AND_AND_8,
+ SYNC_FETCH_AND_OR_1,
+ SYNC_FETCH_AND_OR_2,
+ SYNC_FETCH_AND_OR_4,
+ SYNC_FETCH_AND_OR_8,
+ SYNC_FETCH_AND_XOR_1,
+ SYNC_FETCH_AND_XOR_2,
+ SYNC_FETCH_AND_XOR_4,
+ SYNC_FETCH_AND_XOR_8,
+ SYNC_FETCH_AND_NAND_1,
+ SYNC_FETCH_AND_NAND_2,
+ SYNC_FETCH_AND_NAND_4,
+ SYNC_FETCH_AND_NAND_8,
+
UNKNOWN_LIBCALL
};
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/ScheduleDAG.h b/libclamav/c++/llvm/include/llvm/CodeGen/ScheduleDAG.h
index 955965b..076268b 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -16,6 +16,7 @@
#define LLVM_CODEGEN_SCHEDULEDAG_H
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/GraphTraits.h"
@@ -27,7 +28,6 @@ namespace llvm {
class SUnit;
class MachineConstantPool;
class MachineFunction;
- class MachineModuleInfo;
class MachineRegisterInfo;
class MachineInstr;
class TargetRegisterInfo;
@@ -35,7 +35,6 @@ namespace llvm {
class SDNode;
class TargetInstrInfo;
class TargetInstrDesc;
- class TargetLowering;
class TargetMachine;
class TargetRegisterClass;
template<class Graph> class GraphWriter;
@@ -240,7 +239,7 @@ namespace llvm {
typedef SmallVector<SDep, 4>::iterator succ_iterator;
typedef SmallVector<SDep, 4>::const_iterator const_pred_iterator;
typedef SmallVector<SDep, 4>::const_iterator const_succ_iterator;
-
+
unsigned NodeNum; // Entry # of node in the node vector.
unsigned NodeQueueId; // Queue id of node.
unsigned short Latency; // Node latency.
@@ -257,6 +256,9 @@ namespace llvm {
bool isScheduled : 1; // True once scheduled.
bool isScheduleHigh : 1; // True if preferable to schedule high.
bool isCloned : 1; // True if this node has been cloned.
+ Sched::Preference SchedulingPref; // Scheduling preference.
+
+ SmallVector<MachineInstr*, 4> DbgInstrList; // dbg_values referencing this.
private:
bool isDepthCurrent : 1; // True if Depth is current.
bool isHeightCurrent : 1; // True if Height is current.
@@ -269,35 +271,38 @@ namespace llvm {
/// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
/// an SDNode and any nodes flagged to it.
SUnit(SDNode *node, unsigned nodenum)
- : Node(node), Instr(0), OrigNode(0), NodeNum(nodenum), NodeQueueId(0),
- Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0),
- isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
- hasPhysRegClobbers(false),
+ : Node(node), Instr(0), OrigNode(0), NodeNum(nodenum),
+ NodeQueueId(0), Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
+ NumSuccsLeft(0), isTwoAddress(false), isCommutable(false),
+ hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
+ SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
CopyDstRC(NULL), CopySrcRC(NULL) {}
/// SUnit - Construct an SUnit for post-regalloc scheduling to represent
/// a MachineInstr.
SUnit(MachineInstr *instr, unsigned nodenum)
- : Node(0), Instr(instr), OrigNode(0), NodeNum(nodenum), NodeQueueId(0),
- Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0),
- isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
- hasPhysRegClobbers(false),
+ : Node(0), Instr(instr), OrigNode(0), NodeNum(nodenum),
+ NodeQueueId(0), Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
+ NumSuccsLeft(0), isTwoAddress(false), isCommutable(false),
+ hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
+ SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
CopyDstRC(NULL), CopySrcRC(NULL) {}
/// SUnit - Construct a placeholder SUnit.
SUnit()
- : Node(0), Instr(0), OrigNode(0), NodeNum(~0u), NodeQueueId(0),
- Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0),
- isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
- hasPhysRegClobbers(false),
+ : Node(0), Instr(0), OrigNode(0), NodeNum(~0u),
+ NodeQueueId(0), Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
+ NumSuccsLeft(0), isTwoAddress(false), isCommutable(false),
+ hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
+ SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
CopyDstRC(NULL), CopySrcRC(NULL) {}
@@ -390,7 +395,7 @@ namespace llvm {
return true;
return false;
}
-
+
void dump(const ScheduleDAG *G) const;
void dumpAll(const ScheduleDAG *G) const;
void print(raw_ostream &O, const ScheduleDAG *G) const;
@@ -409,7 +414,9 @@ namespace llvm {
/// implementation to decide.
///
class SchedulingPriorityQueue {
+ unsigned CurCycle;
public:
+ SchedulingPriorityQueue() : CurCycle(0) {}
virtual ~SchedulingPriorityQueue() {}
virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
@@ -417,11 +424,15 @@ namespace llvm {
virtual void updateNode(const SUnit *SU) = 0;
virtual void releaseState() = 0;
- virtual unsigned size() const = 0;
virtual bool empty() const = 0;
virtual void push(SUnit *U) = 0;
- virtual void push_all(const std::vector<SUnit *> &Nodes) = 0;
+ void push_all(const std::vector<SUnit *> &Nodes) {
+ for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
+ E = Nodes.end(); I != E; ++I)
+ push(*I);
+ }
+
virtual SUnit *pop() = 0;
virtual void remove(SUnit *SU) = 0;
@@ -433,6 +444,14 @@ namespace llvm {
virtual void ScheduledNode(SUnit *) {}
virtual void UnscheduledNode(SUnit *) {}
+
+ void setCurCycle(unsigned Cycle) {
+ CurCycle = Cycle;
+ }
+
+ unsigned getCurCycle() const {
+ return CurCycle;
+ }
};
class ScheduleDAG {
@@ -442,10 +461,8 @@ namespace llvm {
const TargetMachine &TM; // Target processor
const TargetInstrInfo *TII; // Target instruction information
const TargetRegisterInfo *TRI; // Target processor register info
- const TargetLowering *TLI; // Target lowering info
MachineFunction &MF; // Machine function
MachineRegisterInfo &MRI; // Virtual/real register map
- MachineConstantPool *ConstPool; // Target constant pool
std::vector<SUnit*> Sequence; // The schedule. Null SUnit*'s
// represent noop instructions.
std::vector<SUnit> SUnits; // The scheduling units.
@@ -464,8 +481,7 @@ namespace llvm {
/// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock
/// according to the order specified in Sequence.
///
- virtual MachineBasicBlock*
- EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*>*) = 0;
+ virtual MachineBasicBlock *EmitSchedule() = 0;
void dumpSchedule() const;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/SchedulerRegistry.h b/libclamav/c++/llvm/include/llvm/CodeGen/SchedulerRegistry.h
index cf3274f..96573dd 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/SchedulerRegistry.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/SchedulerRegistry.h
@@ -73,11 +73,24 @@ ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS,
ScheduleDAGSDNodes *createTDRRListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
-/// createBURRListDAGScheduler - This creates a bottom up register usage
-/// reduction list scheduler that schedules in source code order when possible.
+/// createBURRListDAGScheduler - This creates a bottom up list scheduler that
+/// schedules nodes in source code order when possible.
ScheduleDAGSDNodes *createSourceListDAGScheduler(SelectionDAGISel *IS,
CodeGenOpt::Level OptLevel);
+/// createHybridListDAGScheduler - This creates a bottom up register pressure
+/// aware list scheduler that make use of latency information to avoid stalls
+/// for long latency instructions in low register pressure mode. In high
+/// register pressure mode it schedules to reduce register pressure.
+ScheduleDAGSDNodes *createHybridListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level);
+
+/// createILPListDAGScheduler - This creates a bottom up register pressure
+/// aware list scheduler that tries to increase instruction level parallelism
+/// in low register pressure mode. In high register pressure mode it schedules
+/// to reduce register pressure.
+ScheduleDAGSDNodes *createILPListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level);
/// createTDListDAGScheduler - This creates a top-down list scheduler with
/// a hazard recognizer.
ScheduleDAGSDNodes *createTDListDAGScheduler(SelectionDAGISel *IS,
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAG.h b/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAG.h
index ad01e89..7723fa0 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -29,13 +29,13 @@
namespace llvm {
class AliasAnalysis;
-class DwarfWriter;
-class FunctionLoweringInfo;
class MachineConstantPoolValue;
class MachineFunction;
-class MachineModuleInfo;
+class MDNode;
class SDNodeOrdering;
+class SDDbgValue;
class TargetLowering;
+class TargetSelectionDAGInfo;
template<> struct ilist_traits<SDNode> : public ilist_default_traits<SDNode> {
private:
@@ -57,6 +57,56 @@ private:
static void createNode(const SDNode &);
};
+/// SDDbgInfo - Keeps track of dbg_value information through SDISel. We do
+/// not build SDNodes for these so as not to perturb the generated code;
+/// instead the info is kept off to the side in this structure. Each SDNode may
+/// have one or more associated dbg_value entries. This information is kept in
+/// DbgValMap.
+/// Byval parameters are handled separately because they don't use alloca's,
+/// which busts the normal mechanism. There is good reason for handling all
+/// parameters separately: they may not have code generated for them, they
+/// should always go at the beginning of the function regardless of other code
+/// motion, and debug info for them is potentially useful even if the parameter
+/// is unused. Right now only byval parameters are handled separately.
+class SDDbgInfo {
+ SmallVector<SDDbgValue*, 32> DbgValues;
+ SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
+ DenseMap<const SDNode*, SmallVector<SDDbgValue*, 2> > DbgValMap;
+
+ void operator=(const SDDbgInfo&); // Do not implement.
+ SDDbgInfo(const SDDbgInfo&); // Do not implement.
+public:
+ SDDbgInfo() {}
+
+ void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
+ if (isParameter) {
+ ByvalParmDbgValues.push_back(V);
+ } else DbgValues.push_back(V);
+ if (Node)
+ DbgValMap[Node].push_back(V);
+ }
+
+ void clear() {
+ DbgValMap.clear();
+ DbgValues.clear();
+ ByvalParmDbgValues.clear();
+ }
+
+ bool empty() const {
+ return DbgValues.empty() && ByvalParmDbgValues.empty();
+ }
+
+ SmallVector<SDDbgValue*,2> &getSDDbgValues(const SDNode *Node) {
+ return DbgValMap[Node];
+ }
+
+ typedef SmallVector<SDDbgValue*,32>::iterator DbgIterator;
+ DbgIterator DbgBegin() { return DbgValues.begin(); }
+ DbgIterator DbgEnd() { return DbgValues.end(); }
+ DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
+ DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
+};
+
enum CombineLevel {
Unrestricted, // Combine may create illegal operations and illegal types.
NoIllegalTypes, // Combine may create illegal operations but no illegal types.
@@ -79,12 +129,11 @@ void checkForCycles(const SelectionDAG *DAG);
/// linear form.
///
class SelectionDAG {
- TargetLowering &TLI;
+ const TargetMachine &TM;
+ const TargetLowering &TLI;
+ const TargetSelectionDAGInfo &TSI;
MachineFunction *MF;
- FunctionLoweringInfo &FLI;
- MachineModuleInfo *MMI;
- DwarfWriter *DW;
- LLVMContext* Context;
+ LLVMContext *Context;
/// EntryNode - The starting token.
SDNode EntryNode;
@@ -119,6 +168,9 @@ class SelectionDAG {
/// the ordering of the original LLVM instructions.
SDNodeOrdering *Ordering;
+ /// DbgInfo - Tracks dbg_value information through SDISel.
+ SDDbgInfo *DbgInfo;
+
/// VerifyNode - Sanity check the given node. Aborts if it is invalid.
void VerifyNode(SDNode *N);
@@ -133,13 +185,13 @@ class SelectionDAG {
SelectionDAG(const SelectionDAG&); // Do not implement.
public:
- SelectionDAG(TargetLowering &tli, FunctionLoweringInfo &fli);
+ explicit SelectionDAG(const TargetMachine &TM);
~SelectionDAG();
/// init - Prepare this SelectionDAG to process code in the given
/// MachineFunction.
///
- void init(MachineFunction &mf, MachineModuleInfo *mmi, DwarfWriter *dw);
+ void init(MachineFunction &mf);
/// clear - Clear state and free memory necessary to make this
/// SelectionDAG ready to process a new block.
@@ -147,11 +199,9 @@ public:
void clear();
MachineFunction &getMachineFunction() const { return *MF; }
- const TargetMachine &getTarget() const;
- TargetLowering &getTargetLoweringInfo() const { return TLI; }
- FunctionLoweringInfo &getFunctionLoweringInfo() const { return FLI; }
- MachineModuleInfo *getMachineModuleInfo() const { return MMI; }
- DwarfWriter *getDwarfWriter() const { return DW; }
+ const TargetMachine &getTarget() const { return TM; }
+ const TargetLowering &getTargetLoweringInfo() const { return TLI; }
+ const TargetSelectionDAGInfo &getSelectionDAGInfo() const { return TSI; }
LLVMContext *getContext() const {return Context; }
/// viewGraph - Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
@@ -284,6 +334,8 @@ public:
SDValue getTargetConstant(const ConstantInt &Val, EVT VT) {
return getConstant(Val, VT, true);
}
+ // The forms below that take a double should only be used for simple
+ // constants that can be exactly represented in VT. No checks are made.
SDValue getConstantFP(double Val, EVT VT, bool isTarget = false);
SDValue getConstantFP(const APFloat& Val, EVT VT, bool isTarget = false);
SDValue getConstantFP(const ConstantFP &CF, EVT VT, bool isTarget = false);
@@ -296,13 +348,13 @@ public:
SDValue getTargetConstantFP(const ConstantFP &Val, EVT VT) {
return getConstantFP(Val, VT, true);
}
- SDValue getGlobalAddress(const GlobalValue *GV, EVT VT,
+ SDValue getGlobalAddress(const GlobalValue *GV, DebugLoc DL, EVT VT,
int64_t offset = 0, bool isTargetGA = false,
unsigned char TargetFlags = 0);
- SDValue getTargetGlobalAddress(const GlobalValue *GV, EVT VT,
+ SDValue getTargetGlobalAddress(const GlobalValue *GV, DebugLoc DL, EVT VT,
int64_t offset = 0,
unsigned char TargetFlags = 0) {
- return getGlobalAddress(GV, VT, offset, true, TargetFlags);
+ return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
}
SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
SDValue getTargetFrameIndex(int FI, EVT VT) {
@@ -313,10 +365,10 @@ public:
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags = 0) {
return getJumpTable(JTI, VT, true, TargetFlags);
}
- SDValue getConstantPool(Constant *C, EVT VT,
+ SDValue getConstantPool(const Constant *C, EVT VT,
unsigned Align = 0, int Offs = 0, bool isT=false,
unsigned char TargetFlags = 0);
- SDValue getTargetConstantPool(Constant *C, EVT VT,
+ SDValue getTargetConstantPool(const Constant *C, EVT VT,
unsigned Align = 0, int Offset = 0,
unsigned char TargetFlags = 0) {
return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
@@ -339,9 +391,8 @@ public:
unsigned char TargetFlags = 0);
SDValue getValueType(EVT);
SDValue getRegister(unsigned Reg, EVT VT);
- SDValue getLabel(unsigned Opcode, DebugLoc dl, SDValue Root,
- unsigned LabelID);
- SDValue getBlockAddress(BlockAddress *BA, EVT VT,
+ SDValue getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label);
+ SDValue getBlockAddress(const BlockAddress *BA, EVT VT,
bool isTarget = false, unsigned char TargetFlags = 0);
SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N) {
@@ -419,8 +470,7 @@ public:
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op) {
SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
SDValue Ops[] = { Chain, Op };
- return getNode(ISD::CALLSEQ_START, DebugLoc::getUnknownLoc(),
- VTs, Ops, 2);
+ return getNode(ISD::CALLSEQ_START, DebugLoc(), VTs, Ops, 2);
}
/// getCALLSEQ_END - Return a new CALLSEQ_END node, which always must have a
@@ -434,20 +484,19 @@ public:
Ops.push_back(Op1);
Ops.push_back(Op2);
Ops.push_back(InFlag);
- return getNode(ISD::CALLSEQ_END, DebugLoc::getUnknownLoc(), NodeTys,
- &Ops[0],
+ return getNode(ISD::CALLSEQ_END, DebugLoc(), NodeTys, &Ops[0],
(unsigned)Ops.size() - (InFlag.getNode() == 0 ? 1 : 0));
}
/// getUNDEF - Return an UNDEF node. UNDEF does not have a useful DebugLoc.
SDValue getUNDEF(EVT VT) {
- return getNode(ISD::UNDEF, DebugLoc::getUnknownLoc(), VT);
+ return getNode(ISD::UNDEF, DebugLoc(), VT);
}
/// getGLOBAL_OFFSET_TABLE - Return a GLOBAL_OFFSET_TABLE node. This does
/// not have a useful DebugLoc.
SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
- return getNode(ISD::GLOBAL_OFFSET_TABLE, DebugLoc::getUnknownLoc(), VT);
+ return getNode(ISD::GLOBAL_OFFSET_TABLE, DebugLoc(), VT);
}
/// getNode - Gets or creates the specified node.
@@ -492,17 +541,17 @@ public:
SDValue getStackArgumentTokenFactor(SDValue Chain);
SDValue getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align, bool AlwaysInline,
+ SDValue Size, unsigned Align, bool isVol, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff);
SDValue getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
+ SDValue Size, unsigned Align, bool isVol,
const Value *DstSV, uint64_t DstOSVff,
const Value *SrcSV, uint64_t SrcSVOff);
SDValue getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
+ SDValue Size, unsigned Align, bool isVol,
const Value *DstSV, uint64_t DstSVOff);
/// getSetCC - Helper function to make it easier to build SetCC's if you just
@@ -533,7 +582,7 @@ public:
/// getVAArg - VAArg produces a result and token chain, and takes a pointer
/// and a source value as input.
SDValue getVAArg(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
- SDValue SV);
+ SDValue SV, unsigned Align);
/// getAtomic - Gets a node for an atomic op, produces result and chain and
/// takes 3 operands
@@ -583,18 +632,20 @@ public:
SDValue getLoad(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
const Value *SV, int SVOffset, bool isVolatile,
bool isNonTemporal, unsigned Alignment);
- SDValue getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
+ SDValue getExtLoad(ISD::LoadExtType ExtType, EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr, const Value *SV,
int SVOffset, EVT MemVT, bool isVolatile,
bool isNonTemporal, unsigned Alignment);
SDValue getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
- SDValue Offset, ISD::MemIndexedMode AM);
- SDValue getLoad(ISD::MemIndexedMode AM, DebugLoc dl, ISD::LoadExtType ExtType,
- EVT VT, SDValue Chain, SDValue Ptr, SDValue Offset,
+ SDValue Offset, ISD::MemIndexedMode AM);
+ SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl,
+ SDValue Chain, SDValue Ptr, SDValue Offset,
const Value *SV, int SVOffset, EVT MemVT,
bool isVolatile, bool isNonTemporal, unsigned Alignment);
- SDValue getLoad(ISD::MemIndexedMode AM, DebugLoc dl, ISD::LoadExtType ExtType,
- EVT VT, SDValue Chain, SDValue Ptr, SDValue Offset,
+ SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl,
+ SDValue Chain, SDValue Ptr, SDValue Offset,
EVT MemVT, MachineMemOperand *MMO);
/// getStore - Helper function to build ISD::STORE nodes.
@@ -616,6 +667,9 @@ public:
/// getSrcValue - Construct a node to track a Value* through the backend.
SDValue getSrcValue(const Value *v);
+ /// getMDNode - Return an MDNodeSDNode which holds an MDNode.
+ SDValue getMDNode(const MDNode *MD);
+
/// getShiftAmountOperand - Return the specified value casted to
/// the target's desired shift amount type.
SDValue getShiftAmountOperand(SDValue Op);
@@ -626,15 +680,15 @@ public:
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
- SDValue UpdateNodeOperands(SDValue N, SDValue Op);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5);
- SDValue UpdateNodeOperands(SDValue N,
+ SDNode *UpdateNodeOperands(SDNode *N,
const SDValue *Ops, unsigned NumOps);
/// SelectNodeTo - These are used for target selectors to *mutate* the
@@ -726,6 +780,15 @@ public:
SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs,
const SDValue *Ops, unsigned NumOps);
+ /// getDbgValue - Creates a SDDbgValue node.
+ ///
+ SDDbgValue *getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
+ DebugLoc DL, unsigned O);
+ SDDbgValue *getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
+ DebugLoc DL, unsigned O);
+ SDDbgValue *getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
+ DebugLoc DL, unsigned O);
+
/// DAGUpdateListener - Clients of various APIs that cause global effects on
/// the DAG can optionally implement this interface. This allows the clients
/// to handle the various sorts of updates that happen.
@@ -828,6 +891,28 @@ public:
/// GetOrdering - Get the order for the SDNode.
unsigned GetOrdering(const SDNode *SD) const;
+ /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
+ /// value is produced by SD.
+ void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter);
+
+ /// GetDbgValues - Get the debug values which reference the given SDNode.
+ SmallVector<SDDbgValue*,2> &GetDbgValues(const SDNode* SD) {
+ return DbgInfo->getSDDbgValues(SD);
+ }
+
+ /// hasDebugValues - Return true if there are any SDDbgValue nodes associated
+ /// with this SelectionDAG.
+ bool hasDebugValues() const { return !DbgInfo->empty(); }
+
+ SDDbgInfo::DbgIterator DbgBegin() { return DbgInfo->DbgBegin(); }
+ SDDbgInfo::DbgIterator DbgEnd() { return DbgInfo->DbgEnd(); }
+ SDDbgInfo::DbgIterator ByvalParmDbgBegin() {
+ return DbgInfo->ByvalParmDbgBegin();
+ }
+ SDDbgInfo::DbgIterator ByvalParmDbgEnd() {
+ return DbgInfo->ByvalParmDbgEnd();
+ }
+
void dump() const;
/// CreateStackTemporary - Create a stack temporary, suitable for holding the
@@ -892,10 +977,6 @@ public:
/// been verified as a debug information descriptor.
bool isVerifiedDebugInfoDesc(SDValue Op) const;
- /// getShuffleScalarElt - Returns the scalar element that will make up the ith
- /// element of the result of the vector shuffle.
- SDValue getShuffleScalarElt(const ShuffleVectorSDNode *N, unsigned Idx);
-
/// UnrollVectorOp - Utility function used by legalize and lowering to
/// "unroll" a vector operation by splitting out the scalars and operating
/// on each element individually. If the ResNE is 0, fully unroll the vector
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index d9c1374..01d05dd 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -17,7 +17,6 @@
#include "llvm/BasicBlock.h"
#include "llvm/Pass.h"
-#include "llvm/Constant.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -29,8 +28,6 @@ namespace llvm {
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
- class MachineModuleInfo;
- class DwarfWriter;
class TargetLowering;
class TargetInstrInfo;
class FunctionLoweringInfo;
@@ -43,31 +40,28 @@ namespace llvm {
class SelectionDAGISel : public MachineFunctionPass {
public:
const TargetMachine &TM;
- TargetLowering &TLI;
+ const TargetLowering &TLI;
FunctionLoweringInfo *FuncInfo;
MachineFunction *MF;
MachineRegisterInfo *RegInfo;
SelectionDAG *CurDAG;
SelectionDAGBuilder *SDB;
- MachineBasicBlock *BB;
AliasAnalysis *AA;
GCFunctionInfo *GFI;
CodeGenOpt::Level OptLevel;
static char ID;
- explicit SelectionDAGISel(TargetMachine &tm,
+ explicit SelectionDAGISel(const TargetMachine &tm,
CodeGenOpt::Level OL = CodeGenOpt::Default);
virtual ~SelectionDAGISel();
- TargetLowering &getTargetLowering() { return TLI; }
+ const TargetLowering &getTargetLowering() { return TLI; }
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual bool runOnMachineFunction(MachineFunction &MF);
- unsigned MakeReg(EVT VT);
-
- virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {}
+ virtual void EmitFunctionEntryCode() {}
/// PreprocessISelDAG - This hook allows targets to hack on the graph before
/// instruction selection starts.
@@ -97,8 +91,11 @@ public:
/// IsLegalToFold - Returns true if the specific operand node N of
/// U can be folded during instruction selection that starts at Root.
- bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
- bool IgnoreChains = false) const;
+ /// FIXME: This is a static member function because the PIC16 target,
+ /// which uses it during lowering.
+ static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
+ CodeGenOpt::Level OptLevel,
+ bool IgnoreChains = false);
/// CreateTargetHazardRecognizer - Return a newly allocated hazard recognizer
/// to use for this target when scheduling the DAG.
@@ -136,6 +133,8 @@ public:
OPC_EmitRegister,
OPC_EmitConvertToTarget,
OPC_EmitMergeInputChains,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitMergeInputChains1_1,
OPC_EmitCopyToReg,
OPC_EmitNodeXForm,
OPC_EmitNode,
@@ -274,7 +273,6 @@ private:
// Calls to these functions are generated by tblgen.
SDNode *Select_INLINEASM(SDNode *N);
SDNode *Select_UNDEF(SDNode *N);
- SDNode *Select_EH_LABEL(SDNode *N);
void CannotYetSelect(SDNode *N);
private:
@@ -282,26 +280,18 @@ private:
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
- void SelectAllBasicBlocks(Function &Fn, MachineFunction &MF,
- MachineModuleInfo *MMI,
- DwarfWriter *DW,
- const TargetInstrInfo &TII);
+ void PrepareEHLandingPad();
+ void SelectAllBasicBlocks(const Function &Fn);
void FinishBasicBlock();
- void SelectBasicBlock(BasicBlock *LLVMBB,
- BasicBlock::iterator Begin,
- BasicBlock::iterator End,
+ void SelectBasicBlock(BasicBlock::const_iterator Begin,
+ BasicBlock::const_iterator End,
bool &HadTailCall);
void CodeGenAndEmitDAG();
- void LowerArguments(BasicBlock *BB);
+ void LowerArguments(const BasicBlock *BB);
- void ShrinkDemandedOps();
void ComputeLiveOutVRegInfo();
- void HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB);
-
- bool HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB, FastISel *F);
-
/// Create the scheduler. If a specific scheduler was specified
/// via the SchedulerRegistry, use it, otherwise select the
/// one preferred by the target.
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 21a0b98..4cf6f36 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -25,6 +25,7 @@
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/Support/MathExtras.h"
@@ -40,6 +41,7 @@ class MachineBasicBlock;
class MachineConstantPoolValue;
class SDNode;
class Value;
+class MCSymbol;
template <typename T> struct DenseMapInfo;
template <typename T> struct simplify_type;
template <typename T> struct ilist_traits;
@@ -55,562 +57,7 @@ struct SDVTList {
unsigned int NumVTs;
};
-/// ISD namespace - This namespace contains an enum which represents all of the
-/// SelectionDAG node types and value types.
-///
namespace ISD {
-
- //===--------------------------------------------------------------------===//
- /// ISD::NodeType enum - This enum defines the target-independent operators
- /// for a SelectionDAG.
- ///
- /// Targets may also define target-dependent operator codes for SDNodes. For
- /// example, on x86, these are the enum values in the X86ISD namespace.
- /// Targets should aim to use target-independent operators to model their
- /// instruction sets as much as possible, and only use target-dependent
- /// operators when they have special requirements.
- ///
- /// Finally, during and after selection proper, SNodes may use special
- /// operator codes that correspond directly with MachineInstr opcodes. These
- /// are used to represent selected instructions. See the isMachineOpcode()
- /// and getMachineOpcode() member functions of SDNode.
- ///
- enum NodeType {
- // DELETED_NODE - This is an illegal value that is used to catch
- // errors. This opcode is not a legal opcode for any node.
- DELETED_NODE,
-
- // EntryToken - This is the marker used to indicate the start of the region.
- EntryToken,
-
- // TokenFactor - This node takes multiple tokens as input and produces a
- // single token result. This is used to represent the fact that the operand
- // operators are independent of each other.
- TokenFactor,
-
- // AssertSext, AssertZext - These nodes record if a register contains a
- // value that has already been zero or sign extended from a narrower type.
- // These nodes take two operands. The first is the node that has already
- // been extended, and the second is a value type node indicating the width
- // of the extension
- AssertSext, AssertZext,
-
- // Various leaf nodes.
- BasicBlock, VALUETYPE, CONDCODE, Register,
- Constant, ConstantFP,
- GlobalAddress, GlobalTLSAddress, FrameIndex,
- JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
-
- // The address of the GOT
- GLOBAL_OFFSET_TABLE,
-
- // FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
- // llvm.returnaddress on the DAG. These nodes take one operand, the index
- // of the frame or return address to return. An index of zero corresponds
- // to the current function's frame or return address, an index of one to the
- // parent's frame or return address, and so on.
- FRAMEADDR, RETURNADDR,
-
- // FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
- // first (possible) on-stack argument. This is needed for correct stack
- // adjustment during unwind.
- FRAME_TO_ARGS_OFFSET,
-
- // RESULT, OUTCHAIN = EXCEPTIONADDR(INCHAIN) - This node represents the
- // address of the exception block on entry to an landing pad block.
- EXCEPTIONADDR,
-
- // RESULT, OUTCHAIN = LSDAADDR(INCHAIN) - This node represents the
- // address of the Language Specific Data Area for the enclosing function.
- LSDAADDR,
-
- // RESULT, OUTCHAIN = EHSELECTION(INCHAIN, EXCEPTION) - This node represents
- // the selection index of the exception thrown.
- EHSELECTION,
-
- // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
- // 'eh_return' gcc dwarf builtin, which is used to return from
- // exception. The general meaning is: adjust stack by OFFSET and pass
- // execution to HANDLER. Many platform-related details also :)
- EH_RETURN,
-
- // TargetConstant* - Like Constant*, but the DAG does not do any folding or
- // simplification of the constant.
- TargetConstant,
- TargetConstantFP,
-
- // TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
- // anything else with this node, and this is valid in the target-specific
- // dag, turning into a GlobalAddress operand.
- TargetGlobalAddress,
- TargetGlobalTLSAddress,
- TargetFrameIndex,
- TargetJumpTable,
- TargetConstantPool,
- TargetExternalSymbol,
- TargetBlockAddress,
-
- /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
- /// This node represents a target intrinsic function with no side effects.
- /// The first operand is the ID number of the intrinsic from the
- /// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
- /// node has returns the result of the intrinsic.
- INTRINSIC_WO_CHAIN,
-
- /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
- /// This node represents a target intrinsic function with side effects that
- /// returns a result. The first operand is a chain pointer. The second is
- /// the ID number of the intrinsic from the llvm::Intrinsic namespace. The
- /// operands to the intrinsic follow. The node has two results, the result
- /// of the intrinsic and an output chain.
- INTRINSIC_W_CHAIN,
-
- /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
- /// This node represents a target intrinsic function with side effects that
- /// does not return a result. The first operand is a chain pointer. The
- /// second is the ID number of the intrinsic from the llvm::Intrinsic
- /// namespace. The operands to the intrinsic follow.
- INTRINSIC_VOID,
-
- // CopyToReg - This node has three operands: a chain, a register number to
- // set to this value, and a value.
- CopyToReg,
-
- // CopyFromReg - This node indicates that the input value is a virtual or
- // physical register that is defined outside of the scope of this
- // SelectionDAG. The register is available from the RegisterSDNode object.
- CopyFromReg,
-
- // UNDEF - An undefined node
- UNDEF,
-
- // EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
- // a Constant, which is required to be operand #1) half of the integer or
- // float value specified as operand #0. This is only for use before
- // legalization, for values that will be broken into multiple registers.
- EXTRACT_ELEMENT,
-
- // BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways. Given
- // two values of the same integer value type, this produces a value twice as
- // big. Like EXTRACT_ELEMENT, this can only be used before legalization.
- BUILD_PAIR,
-
- // MERGE_VALUES - This node takes multiple discrete operands and returns
- // them all as its individual results. This nodes has exactly the same
- // number of inputs and outputs. This node is useful for some pieces of the
- // code generator that want to think about a single node with multiple
- // results, not multiple nodes.
- MERGE_VALUES,
-
- // Simple integer binary arithmetic operators.
- ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
-
- // SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
- // a signed/unsigned value of type i[2*N], and return the full value as
- // two results, each of type iN.
- SMUL_LOHI, UMUL_LOHI,
-
- // SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
- // remainder result.
- SDIVREM, UDIVREM,
-
- // CARRY_FALSE - This node is used when folding other nodes,
- // like ADDC/SUBC, which indicate the carry result is always false.
- CARRY_FALSE,
-
- // Carry-setting nodes for multiple precision addition and subtraction.
- // These nodes take two operands of the same value type, and produce two
- // results. The first result is the normal add or sub result, the second
- // result is the carry flag result.
- ADDC, SUBC,
-
- // Carry-using nodes for multiple precision addition and subtraction. These
- // nodes take three operands: The first two are the normal lhs and rhs to
- // the add or sub, and the third is the input carry flag. These nodes
- // produce two results; the normal result of the add or sub, and the output
- // carry flag. These nodes both read and write a carry flag to allow them
- // to them to be chained together for add and sub of arbitrarily large
- // values.
- ADDE, SUBE,
-
- // RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
- // These nodes take two operands: the normal LHS and RHS to the add. They
- // produce two results: the normal result of the add, and a boolean that
- // indicates if an overflow occured (*not* a flag, because it may be stored
- // to memory, etc.). If the type of the boolean is not i1 then the high
- // bits conform to getBooleanContents.
- // These nodes are generated from the llvm.[su]add.with.overflow intrinsics.
- SADDO, UADDO,
-
- // Same for subtraction
- SSUBO, USUBO,
-
- // Same for multiplication
- SMULO, UMULO,
-
- // Simple binary floating point operators.
- FADD, FSUB, FMUL, FDIV, FREM,
-
- // FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
- // DAG node does not require that X and Y have the same type, just that they
- // are both floating point. X and the result must have the same type.
- // FCOPYSIGN(f32, f64) is allowed.
- FCOPYSIGN,
-
- // INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
- // value as an integer 0/1 value.
- FGETSIGN,
-
- /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
- /// specified, possibly variable, elements. The number of elements is
- /// required to be a power of two. The types of the operands must all be
- /// the same and must match the vector element type, except that integer
- /// types are allowed to be larger than the element type, in which case
- /// the operands are implicitly truncated.
- BUILD_VECTOR,
-
- /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
- /// at IDX replaced with VAL. If the type of VAL is larger than the vector
- /// element type then VAL is truncated before replacement.
- INSERT_VECTOR_ELT,
-
- /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
- /// identified by the (potentially variable) element number IDX. If the
- /// return type is an integer type larger than the element type of the
- /// vector, the result is extended to the width of the return type.
- EXTRACT_VECTOR_ELT,
-
- /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
- /// vector type with the same length and element type, this produces a
- /// concatenated vector result value, with length equal to the sum of the
- /// lengths of the input vectors.
- CONCAT_VECTORS,
-
- /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an
- /// vector value) starting with the (potentially variable) element number
- /// IDX, which must be a multiple of the result vector length.
- EXTRACT_SUBVECTOR,
-
- /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
- /// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
- /// values that indicate which value (or undef) each result element will
- /// get. These constant ints are accessible through the
- /// ShuffleVectorSDNode class. This is quite similar to the Altivec
- /// 'vperm' instruction, except that the indices must be constants and are
- /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
- VECTOR_SHUFFLE,
-
- /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
- /// scalar value into element 0 of the resultant vector type. The top
- /// elements 1 to N-1 of the N-element vector are undefined. The type
- /// of the operand must match the vector element type, except when they
- /// are integer types. In this case the operand is allowed to be wider
- /// than the vector element type, and is implicitly truncated to it.
- SCALAR_TO_VECTOR,
-
- // MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing
- // an unsigned/signed value of type i[2*N], then return the top part.
- MULHU, MULHS,
-
- // Bitwise operators - logical and, logical or, logical xor, shift left,
- // shift right algebraic (shift in sign bits), shift right logical (shift in
- // zeroes), rotate left, rotate right, and byteswap.
- AND, OR, XOR, SHL, SRA, SRL, ROTL, ROTR, BSWAP,
-
- // Counting operators
- CTTZ, CTLZ, CTPOP,
-
- // Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
- // i1 then the high bits must conform to getBooleanContents.
- SELECT,
-
- // Select with condition operator - This selects between a true value and
- // a false value (ops #2 and #3) based on the boolean result of comparing
- // the lhs and rhs (ops #0 and #1) of a conditional expression with the
- // condition code in op #4, a CondCodeSDNode.
- SELECT_CC,
-
- // SetCC operator - This evaluates to a true value iff the condition is
- // true. If the result value type is not i1 then the high bits conform
- // to getBooleanContents. The operands to this are the left and right
- // operands to compare (ops #0, and #1) and the condition code to compare
- // them with (op #2) as a CondCodeSDNode.
- SETCC,
-
- // RESULT = VSETCC(LHS, RHS, COND) operator - This evaluates to a vector of
- // integer elements with all bits of the result elements set to true if the
- // comparison is true or all cleared if the comparison is false. The
- // operands to this are the left and right operands to compare (LHS/RHS) and
- // the condition code to compare them with (COND) as a CondCodeSDNode.
- VSETCC,
-
- // SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
- // integer shift operations, just like ADD/SUB_PARTS. The operation
- // ordering is:
- // [Lo,Hi] = op [LoLHS,HiLHS], Amt
- SHL_PARTS, SRA_PARTS, SRL_PARTS,
-
- // Conversion operators. These are all single input single output
- // operations. For all of these, the result type must be strictly
- // wider or narrower (depending on the operation) than the source
- // type.
-
- // SIGN_EXTEND - Used for integer types, replicating the sign bit
- // into new bits.
- SIGN_EXTEND,
-
- // ZERO_EXTEND - Used for integer types, zeroing the new bits.
- ZERO_EXTEND,
-
- // ANY_EXTEND - Used for integer types. The high bits are undefined.
- ANY_EXTEND,
-
- // TRUNCATE - Completely drop the high bits.
- TRUNCATE,
-
- // [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
- // depends on the first letter) to floating point.
- SINT_TO_FP,
- UINT_TO_FP,
-
- // SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
- // sign extend a small value in a large integer register (e.g. sign
- // extending the low 8 bits of a 32-bit register to fill the top 24 bits
- // with the 7th bit). The size of the smaller type is indicated by the 1th
- // operand, a ValueType node.
- SIGN_EXTEND_INREG,
-
- /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
- /// integer.
- FP_TO_SINT,
- FP_TO_UINT,
-
- /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
- /// down to the precision of the destination VT. TRUNC is a flag, which is
- /// always an integer that is zero or one. If TRUNC is 0, this is a
- /// normal rounding, if it is 1, this FP_ROUND is known to not change the
- /// value of Y.
- ///
- /// The TRUNC = 1 case is used in cases where we know that the value will
- /// not be modified by the node, because Y is not using any of the extra
- /// precision of source type. This allows certain transformations like
- /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
- /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
- FP_ROUND,
-
- // FLT_ROUNDS_ - Returns current rounding mode:
- // -1 Undefined
- // 0 Round to 0
- // 1 Round to nearest
- // 2 Round to +inf
- // 3 Round to -inf
- FLT_ROUNDS_,
-
- /// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
- /// rounds it to a floating point value. It then promotes it and returns it
- /// in a register of the same size. This operation effectively just
- /// discards excess precision. The type to round down to is specified by
- /// the VT operand, a VTSDNode.
- FP_ROUND_INREG,
-
- /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
- FP_EXTEND,
-
- // BIT_CONVERT - This operator converts between integer, vector and FP
- // values, as if the value was stored to memory with one type and loaded
- // from the same address with the other type (or equivalently for vector
- // format conversions, etc). The source and result are required to have
- // the same bit size (e.g. f32 <-> i32). This can also be used for
- // int-to-int or fp-to-fp conversions, but that is a noop, deleted by
- // getNode().
- BIT_CONVERT,
-
- // CONVERT_RNDSAT - This operator is used to support various conversions
- // between various types (float, signed, unsigned and vectors of those
- // types) with rounding and saturation. NOTE: Avoid using this operator as
- // most target don't support it and the operator might be removed in the
- // future. It takes the following arguments:
- // 0) value
- // 1) dest type (type to convert to)
- // 2) src type (type to convert from)
- // 3) rounding imm
- // 4) saturation imm
- // 5) ISD::CvtCode indicating the type of conversion to do
- CONVERT_RNDSAT,
-
- // FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
- // FLOG, FLOG2, FLOG10, FEXP, FEXP2,
- // FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR - Perform various unary floating
- // point operations. These are inspired by libm.
- FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
- FLOG, FLOG2, FLOG10, FEXP, FEXP2,
- FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR,
-
- // LOAD and STORE have token chains as their first operand, then the same
- // operands as an LLVM load/store instruction, then an offset node that
- // is added / subtracted from the base pointer to form the address (for
- // indexed memory ops).
- LOAD, STORE,
-
- // DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
- // to a specified boundary. This node always has two return values: a new
- // stack pointer value and a chain. The first operand is the token chain,
- // the second is the number of bytes to allocate, and the third is the
- // alignment boundary. The size is guaranteed to be a multiple of the stack
- // alignment, and the alignment is guaranteed to be bigger than the stack
- // alignment (if required) or 0 to get standard stack alignment.
- DYNAMIC_STACKALLOC,
-
- // Control flow instructions. These all have token chains.
-
- // BR - Unconditional branch. The first operand is the chain
- // operand, the second is the MBB to branch to.
- BR,
-
- // BRIND - Indirect branch. The first operand is the chain, the second
- // is the value to branch to, which must be of the same type as the target's
- // pointer type.
- BRIND,
-
- // BR_JT - Jumptable branch. The first operand is the chain, the second
- // is the jumptable index, the last one is the jumptable entry index.
- BR_JT,
-
- // BRCOND - Conditional branch. The first operand is the chain, the
- // second is the condition, the third is the block to branch to if the
- // condition is true. If the type of the condition is not i1, then the
- // high bits must conform to getBooleanContents.
- BRCOND,
-
- // BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
- // that the condition is represented as condition code, and two nodes to
- // compare, rather than as a combined SetCC node. The operands in order are
- // chain, cc, lhs, rhs, block to branch to if condition is true.
- BR_CC,
-
- // INLINEASM - Represents an inline asm block. This node always has two
- // return values: a chain and a flag result. The inputs are as follows:
- // Operand #0 : Input chain.
- // Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
- // Operand #2n+2: A RegisterNode.
- // Operand #2n+3: A TargetConstant, indicating if the reg is a use/def
- // Operand #last: Optional, an incoming flag.
- INLINEASM,
-
- // EH_LABEL - Represents a label in mid basic block used to track
- // locations needed for debug and exception handling tables. These nodes
- // take a chain as input and return a chain.
- EH_LABEL,
-
- // STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
- // value, the same type as the pointer type for the system, and an output
- // chain.
- STACKSAVE,
-
- // STACKRESTORE has two operands, an input chain and a pointer to restore to
- // it returns an output chain.
- STACKRESTORE,
-
- // CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of
- // a call sequence, and carry arbitrary information that target might want
- // to know. The first operand is a chain, the rest are specified by the
- // target and not touched by the DAG optimizers.
- // CALLSEQ_START..CALLSEQ_END pairs may not be nested.
- CALLSEQ_START, // Beginning of a call sequence
- CALLSEQ_END, // End of a call sequence
-
- // VAARG - VAARG has three operands: an input chain, a pointer, and a
- // SRCVALUE. It returns a pair of values: the vaarg value and a new chain.
- VAARG,
-
- // VACOPY - VACOPY has five operands: an input chain, a destination pointer,
- // a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
- // source.
- VACOPY,
-
- // VAEND, VASTART - VAEND and VASTART have three operands: an input chain, a
- // pointer, and a SRCVALUE.
- VAEND, VASTART,
-
- // SRCVALUE - This is a node type that holds a Value* that is used to
- // make reference to a value in the LLVM IR.
- SRCVALUE,
-
- // PCMARKER - This corresponds to the pcmarker intrinsic.
- PCMARKER,
-
- // READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
- // The only operand is a chain and a value and a chain are produced. The
- // value is the contents of the architecture specific cycle counter like
- // register (or other high accuracy low latency clock source)
- READCYCLECOUNTER,
-
- // HANDLENODE node - Used as a handle for various purposes.
- HANDLENODE,
-
- // TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
- // It takes as input a token chain, the pointer to the trampoline,
- // the pointer to the nested function, the pointer to pass for the
- // 'nest' parameter, a SRCVALUE for the trampoline and another for
- // the nested function (allowing targets to access the original
- // Function*). It produces the result of the intrinsic and a token
- // chain as output.
- TRAMPOLINE,
-
- // TRAP - Trapping instruction
- TRAP,
-
- // PREFETCH - This corresponds to a prefetch intrinsic. It takes chains are
- // their first operand. The other operands are the address to prefetch,
- // read / write specifier, and locality specifier.
- PREFETCH,
-
- // OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load,
- // store-store, device)
- // This corresponds to the memory.barrier intrinsic.
- // it takes an input chain, 4 operands to specify the type of barrier, an
- // operand specifying if the barrier applies to device and uncached memory
- // and produces an output chain.
- MEMBARRIER,
-
- // Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
- // this corresponds to the atomic.lcs intrinsic.
- // cmp is compared to *ptr, and if equal, swap is stored in *ptr.
- // the return is always the original value in *ptr
- ATOMIC_CMP_SWAP,
-
- // Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
- // this corresponds to the atomic.swap intrinsic.
- // amt is stored to *ptr atomically.
- // the return is always the original value in *ptr
- ATOMIC_SWAP,
-
- // Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
- // this corresponds to the atomic.load.[OpName] intrinsic.
- // op(*ptr, amt) is stored to *ptr atomically.
- // the return is always the original value in *ptr
- ATOMIC_LOAD_ADD,
- ATOMIC_LOAD_SUB,
- ATOMIC_LOAD_AND,
- ATOMIC_LOAD_OR,
- ATOMIC_LOAD_XOR,
- ATOMIC_LOAD_NAND,
- ATOMIC_LOAD_MIN,
- ATOMIC_LOAD_MAX,
- ATOMIC_LOAD_UMIN,
- ATOMIC_LOAD_UMAX,
-
- /// BUILTIN_OP_END - This must be the last enum value in this list.
- /// The target-specific pre-isel opcode values start here.
- BUILTIN_OP_END
- };
-
- /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
- /// which do not reference a specific memory location should be less than
- /// this value. Those that do must not be less than this value, and can
- /// be used with SelectionDAG::getMemIntrinsicNode.
- static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+80;
-
/// Node predicates
/// isBuildVectorAllOnes - Return true if the specified node is a
@@ -625,174 +72,7 @@ namespace ISD {
/// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
/// element is not an undef.
bool isScalarToVector(const SDNode *N);
-
- //===--------------------------------------------------------------------===//
- /// MemIndexedMode enum - This enum defines the load / store indexed
- /// addressing modes.
- ///
- /// UNINDEXED "Normal" load / store. The effective address is already
- /// computed and is available in the base pointer. The offset
- /// operand is always undefined. In addition to producing a
- /// chain, an unindexed load produces one value (result of the
- /// load); an unindexed store does not produce a value.
- ///
- /// PRE_INC Similar to the unindexed mode where the effective address is
- /// PRE_DEC the value of the base pointer add / subtract the offset.
- /// It considers the computation as being folded into the load /
- /// store operation (i.e. the load / store does the address
- /// computation as well as performing the memory transaction).
- /// The base operand is always undefined. In addition to
- /// producing a chain, pre-indexed load produces two values
- /// (result of the load and the result of the address
- /// computation); a pre-indexed store produces one value (result
- /// of the address computation).
- ///
- /// POST_INC The effective address is the value of the base pointer. The
- /// POST_DEC value of the offset operand is then added to / subtracted
- /// from the base after memory transaction. In addition to
- /// producing a chain, post-indexed load produces two values
- /// (the result of the load and the result of the base +/- offset
- /// computation); a post-indexed store produces one value (the
- /// the result of the base +/- offset computation).
- ///
- enum MemIndexedMode {
- UNINDEXED = 0,
- PRE_INC,
- PRE_DEC,
- POST_INC,
- POST_DEC,
- LAST_INDEXED_MODE
- };
-
- //===--------------------------------------------------------------------===//
- /// LoadExtType enum - This enum defines the three variants of LOADEXT
- /// (load with extension).
- ///
- /// SEXTLOAD loads the integer operand and sign extends it to a larger
- /// integer result type.
- /// ZEXTLOAD loads the integer operand and zero extends it to a larger
- /// integer result type.
- /// EXTLOAD is used for three things: floating point extending loads,
- /// integer extending loads [the top bits are undefined], and vector
- /// extending loads [load into low elt].
- ///
- enum LoadExtType {
- NON_EXTLOAD = 0,
- EXTLOAD,
- SEXTLOAD,
- ZEXTLOAD,
- LAST_LOADEXT_TYPE
- };
-
- //===--------------------------------------------------------------------===//
- /// ISD::CondCode enum - These are ordered carefully to make the bitfields
- /// below work out, when considering SETFALSE (something that never exists
- /// dynamically) as 0. "U" -> Unsigned (for integer operands) or Unordered
- /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
- /// to. If the "N" column is 1, the result of the comparison is undefined if
- /// the input is a NAN.
- ///
- /// All of these (except for the 'always folded ops') should be handled for
- /// floating point. For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
- /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
- ///
- /// Note that these are laid out in a specific order to allow bit-twiddling
- /// to transform conditions.
- enum CondCode {
- // Opcode N U L G E Intuitive operation
- SETFALSE, // 0 0 0 0 Always false (always folded)
- SETOEQ, // 0 0 0 1 True if ordered and equal
- SETOGT, // 0 0 1 0 True if ordered and greater than
- SETOGE, // 0 0 1 1 True if ordered and greater than or equal
- SETOLT, // 0 1 0 0 True if ordered and less than
- SETOLE, // 0 1 0 1 True if ordered and less than or equal
- SETONE, // 0 1 1 0 True if ordered and operands are unequal
- SETO, // 0 1 1 1 True if ordered (no nans)
- SETUO, // 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
- SETUEQ, // 1 0 0 1 True if unordered or equal
- SETUGT, // 1 0 1 0 True if unordered or greater than
- SETUGE, // 1 0 1 1 True if unordered, greater than, or equal
- SETULT, // 1 1 0 0 True if unordered or less than
- SETULE, // 1 1 0 1 True if unordered, less than, or equal
- SETUNE, // 1 1 1 0 True if unordered or not equal
- SETTRUE, // 1 1 1 1 Always true (always folded)
- // Don't care operations: undefined if the input is a nan.
- SETFALSE2, // 1 X 0 0 0 Always false (always folded)
- SETEQ, // 1 X 0 0 1 True if equal
- SETGT, // 1 X 0 1 0 True if greater than
- SETGE, // 1 X 0 1 1 True if greater than or equal
- SETLT, // 1 X 1 0 0 True if less than
- SETLE, // 1 X 1 0 1 True if less than or equal
- SETNE, // 1 X 1 1 0 True if not equal
- SETTRUE2, // 1 X 1 1 1 Always true (always folded)
-
- SETCC_INVALID // Marker value.
- };
-
- /// isSignedIntSetCC - Return true if this is a setcc instruction that
- /// performs a signed comparison when used with integer operands.
- inline bool isSignedIntSetCC(CondCode Code) {
- return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
- }
-
- /// isUnsignedIntSetCC - Return true if this is a setcc instruction that
- /// performs an unsigned comparison when used with integer operands.
- inline bool isUnsignedIntSetCC(CondCode Code) {
- return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
- }
-
- /// isTrueWhenEqual - Return true if the specified condition returns true if
- /// the two operands to the condition are equal. Note that if one of the two
- /// operands is a NaN, this value is meaningless.
- inline bool isTrueWhenEqual(CondCode Cond) {
- return ((int)Cond & 1) != 0;
- }
-
- /// getUnorderedFlavor - This function returns 0 if the condition is always
- /// false if an operand is a NaN, 1 if the condition is always true if the
- /// operand is a NaN, and 2 if the condition is undefined if the operand is a
- /// NaN.
- inline unsigned getUnorderedFlavor(CondCode Cond) {
- return ((int)Cond >> 3) & 3;
- }
-
- /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
- /// 'op' is a valid SetCC operation.
- CondCode getSetCCInverse(CondCode Operation, bool isInteger);
-
- /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
- /// when given the operation for (X op Y).
- CondCode getSetCCSwappedOperands(CondCode Operation);
-
- /// getSetCCOrOperation - Return the result of a logical OR between different
- /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This
- /// function returns SETCC_INVALID if it is not possible to represent the
- /// resultant comparison.
- CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger);
-
- /// getSetCCAndOperation - Return the result of a logical AND between
- /// different comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
- /// function returns SETCC_INVALID if it is not possible to represent the
- /// resultant comparison.
- CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger);
-
- //===--------------------------------------------------------------------===//
- /// CvtCode enum - This enum defines the various converts CONVERT_RNDSAT
- /// supports.
- enum CvtCode {
- CVT_FF, // Float from Float
- CVT_FS, // Float from Signed
- CVT_FU, // Float from Unsigned
- CVT_SF, // Signed from Float
- CVT_UF, // Unsigned from Float
- CVT_SS, // Signed from Signed
- CVT_SU, // Signed from Unsigned
- CVT_US, // Unsigned from Signed
- CVT_UU, // Unsigned from Unsigned
- CVT_INVALID // Marker - Invalid opcode
- };
-} // end llvm::ISD namespace
-
+} // end llvm:ISD namespace
//===----------------------------------------------------------------------===//
/// SDValue - Unlike LLVM values, Selection DAG nodes may return multiple
@@ -1269,6 +549,15 @@ public:
return FoundNode;
}
+ /// getFlaggedUser - If this node has a flag value with a user, return
+ /// the user (there is at most one). Otherwise return NULL.
+ SDNode *getFlaggedUser() const {
+ for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
+ if (UI.getUse().get().getValueType() == MVT::Flag)
+ return *UI;
+ return 0;
+ }
+
/// getNumValues - Return the number of values defined/returned by this
/// operator.
///
@@ -1383,9 +672,9 @@ protected:
/// This constructor adds no operands itself; operands can be
/// set later with InitOperands.
SDNode(unsigned Opc, const DebugLoc dl, SDVTList VTs)
- : NodeType(Opc), OperandsNeedDelete(false), SubclassData(0),
- NodeId(-1), OperandList(0), ValueList(VTs.VTs), UseList(NULL),
- NumOperands(0), NumValues(VTs.NumVTs),
+ : NodeType(Opc), OperandsNeedDelete(false), HasDebugValue(false),
+ SubclassData(0), NodeId(-1), OperandList(0), ValueList(VTs.VTs),
+ UseList(NULL), NumOperands(0), NumValues(VTs.NumVTs),
debugLoc(dl) {}
/// InitOperands - Initialize the operands list of this with 1 operand.
@@ -1557,13 +846,12 @@ class HandleSDNode : public SDNode {
public:
// FIXME: Remove the "noinline" attribute once <rdar://problem/5852746> is
// fixed.
-#ifdef __GNUC__
+#if __GNUC__==4 && __GNUC_MINOR__==2 && defined(__APPLE__) && !defined(__llvm__)
explicit __attribute__((__noinline__)) HandleSDNode(SDValue X)
#else
explicit HandleSDNode(SDValue X)
#endif
- : SDNode(ISD::HANDLENODE, DebugLoc::getUnknownLoc(),
- getSDVTList(MVT::Other)) {
+ : SDNode(ISD::HANDLENODE, DebugLoc(), getSDVTList(MVT::Other)) {
InitOperands(&Op, X);
}
~HandleSDNode();
@@ -1794,7 +1082,7 @@ class ConstantSDNode : public SDNode {
friend class SelectionDAG;
ConstantSDNode(bool isTarget, const ConstantInt *val, EVT VT)
: SDNode(isTarget ? ISD::TargetConstant : ISD::Constant,
- DebugLoc::getUnknownLoc(), getSDVTList(VT)), Value(val) {
+ DebugLoc(), getSDVTList(VT)), Value(val) {
}
public:
@@ -1803,6 +1091,7 @@ public:
uint64_t getZExtValue() const { return Value->getZExtValue(); }
int64_t getSExtValue() const { return Value->getSExtValue(); }
+ bool isOne() const { return Value->isOne(); }
bool isNullValue() const { return Value->isNullValue(); }
bool isAllOnesValue() const { return Value->isAllOnesValue(); }
@@ -1818,7 +1107,7 @@ class ConstantFPSDNode : public SDNode {
friend class SelectionDAG;
ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
: SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP,
- DebugLoc::getUnknownLoc(), getSDVTList(VT)), Value(val) {
+ DebugLoc(), getSDVTList(VT)), Value(val) {
}
public:
@@ -1851,7 +1140,7 @@ public:
}
bool isExactlyValue(const APFloat& V) const;
- bool isValueValidForType(EVT VT, const APFloat& Val);
+ static bool isValueValidForType(EVT VT, const APFloat& Val);
static bool classof(const ConstantFPSDNode *) { return true; }
static bool classof(const SDNode *N) {
@@ -1861,15 +1150,15 @@ public:
};
class GlobalAddressSDNode : public SDNode {
- GlobalValue *TheGlobal;
+ const GlobalValue *TheGlobal;
int64_t Offset;
unsigned char TargetFlags;
friend class SelectionDAG;
- GlobalAddressSDNode(unsigned Opc, const GlobalValue *GA, EVT VT,
+ GlobalAddressSDNode(unsigned Opc, DebugLoc DL, const GlobalValue *GA, EVT VT,
int64_t o, unsigned char TargetFlags);
public:
- GlobalValue *getGlobal() const { return TheGlobal; }
+ const GlobalValue *getGlobal() const { return TheGlobal; }
int64_t getOffset() const { return Offset; }
unsigned char getTargetFlags() const { return TargetFlags; }
// Return the address space this GlobalAddress belongs to.
@@ -1889,7 +1178,7 @@ class FrameIndexSDNode : public SDNode {
friend class SelectionDAG;
FrameIndexSDNode(int fi, EVT VT, bool isTarg)
: SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
- DebugLoc::getUnknownLoc(), getSDVTList(VT)), FI(fi) {
+ DebugLoc(), getSDVTList(VT)), FI(fi) {
}
public:
@@ -1908,7 +1197,7 @@ class JumpTableSDNode : public SDNode {
friend class SelectionDAG;
JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned char TF)
: SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
- DebugLoc::getUnknownLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
+ DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
}
public:
@@ -1924,17 +1213,17 @@ public:
class ConstantPoolSDNode : public SDNode {
union {
- Constant *ConstVal;
+ const Constant *ConstVal;
MachineConstantPoolValue *MachineCPVal;
} Val;
int Offset; // It's a MachineConstantPoolValue if top bit is set.
unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
unsigned char TargetFlags;
friend class SelectionDAG;
- ConstantPoolSDNode(bool isTarget, Constant *c, EVT VT, int o, unsigned Align,
- unsigned char TF)
+ ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
+ unsigned Align, unsigned char TF)
: SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool,
- DebugLoc::getUnknownLoc(),
+ DebugLoc(),
getSDVTList(VT)), Offset(o), Alignment(Align), TargetFlags(TF) {
assert((int)Offset >= 0 && "Offset is too large");
Val.ConstVal = c;
@@ -1942,7 +1231,7 @@ class ConstantPoolSDNode : public SDNode {
ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
EVT VT, int o, unsigned Align, unsigned char TF)
: SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool,
- DebugLoc::getUnknownLoc(),
+ DebugLoc(),
getSDVTList(VT)), Offset(o), Alignment(Align), TargetFlags(TF) {
assert((int)Offset >= 0 && "Offset is too large");
Val.MachineCPVal = v;
@@ -1955,7 +1244,7 @@ public:
return (int)Offset < 0;
}
- Constant *getConstVal() const {
+ const Constant *getConstVal() const {
assert(!isMachineConstantPoolEntry() && "Wrong constantpool type");
return Val.ConstVal;
}
@@ -1990,8 +1279,7 @@ class BasicBlockSDNode : public SDNode {
/// blocks out of order when they're jumped to, which makes it a bit
/// harder. Let's see if we need it first.
explicit BasicBlockSDNode(MachineBasicBlock *mbb)
- : SDNode(ISD::BasicBlock, DebugLoc::getUnknownLoc(),
- getSDVTList(MVT::Other)), MBB(mbb) {
+ : SDNode(ISD::BasicBlock, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb) {
}
public:
@@ -2037,8 +1325,7 @@ class SrcValueSDNode : public SDNode {
friend class SelectionDAG;
/// Create a SrcValue for a general value.
explicit SrcValueSDNode(const Value *v)
- : SDNode(ISD::SRCVALUE, DebugLoc::getUnknownLoc(),
- getSDVTList(MVT::Other)), V(v) {}
+ : SDNode(ISD::SRCVALUE, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
public:
/// getValue - return the contained Value.
@@ -2049,14 +1336,28 @@ public:
return N->getOpcode() == ISD::SRCVALUE;
}
};
+
+class MDNodeSDNode : public SDNode {
+ const MDNode *MD;
+ friend class SelectionDAG;
+ explicit MDNodeSDNode(const MDNode *md)
+ : SDNode(ISD::MDNODE_SDNODE, DebugLoc(), getSDVTList(MVT::Other)), MD(md) {}
+public:
+
+ const MDNode *getMD() const { return MD; }
+
+ static bool classof(const MDNodeSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MDNODE_SDNODE;
+ }
+};
class RegisterSDNode : public SDNode {
unsigned Reg;
friend class SelectionDAG;
RegisterSDNode(unsigned reg, EVT VT)
- : SDNode(ISD::Register, DebugLoc::getUnknownLoc(),
- getSDVTList(VT)), Reg(reg) {
+ : SDNode(ISD::Register, DebugLoc(), getSDVTList(VT)), Reg(reg) {
}
public:
@@ -2069,16 +1370,16 @@ public:
};
class BlockAddressSDNode : public SDNode {
- BlockAddress *BA;
+ const BlockAddress *BA;
unsigned char TargetFlags;
friend class SelectionDAG;
- BlockAddressSDNode(unsigned NodeTy, EVT VT, BlockAddress *ba,
+ BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
unsigned char Flags)
- : SDNode(NodeTy, DebugLoc::getUnknownLoc(), getSDVTList(VT)),
+ : SDNode(NodeTy, DebugLoc(), getSDVTList(VT)),
BA(ba), TargetFlags(Flags) {
}
public:
- BlockAddress *getBlockAddress() const { return BA; }
+ const BlockAddress *getBlockAddress() const { return BA; }
unsigned char getTargetFlags() const { return TargetFlags; }
static bool classof(const BlockAddressSDNode *) { return true; }
@@ -2088,18 +1389,18 @@ public:
}
};
-class LabelSDNode : public SDNode {
+class EHLabelSDNode : public SDNode {
SDUse Chain;
- unsigned LabelID;
+ MCSymbol *Label;
friend class SelectionDAG;
- LabelSDNode(unsigned NodeTy, DebugLoc dl, SDValue ch, unsigned id)
- : SDNode(NodeTy, dl, getSDVTList(MVT::Other)), LabelID(id) {
+ EHLabelSDNode(DebugLoc dl, SDValue ch, MCSymbol *L)
+ : SDNode(ISD::EH_LABEL, dl, getSDVTList(MVT::Other)), Label(L) {
InitOperands(&Chain, ch);
}
public:
- unsigned getLabelID() const { return LabelID; }
+ MCSymbol *getLabel() const { return Label; }
- static bool classof(const LabelSDNode *) { return true; }
+ static bool classof(const EHLabelSDNode *) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::EH_LABEL;
}
@@ -2112,8 +1413,7 @@ class ExternalSymbolSDNode : public SDNode {
friend class SelectionDAG;
ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned char TF, EVT VT)
: SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol,
- DebugLoc::getUnknownLoc(),
- getSDVTList(VT)), Symbol(Sym), TargetFlags(TF) {
+ DebugLoc(), getSDVTList(VT)), Symbol(Sym), TargetFlags(TF) {
}
public:
@@ -2131,8 +1431,8 @@ class CondCodeSDNode : public SDNode {
ISD::CondCode Condition;
friend class SelectionDAG;
explicit CondCodeSDNode(ISD::CondCode Cond)
- : SDNode(ISD::CONDCODE, DebugLoc::getUnknownLoc(),
- getSDVTList(MVT::Other)), Condition(Cond) {
+ : SDNode(ISD::CONDCODE, DebugLoc(), getSDVTList(MVT::Other)),
+ Condition(Cond) {
}
public:
@@ -2164,133 +1464,14 @@ public:
}
};
-namespace ISD {
- struct ArgFlagsTy {
- private:
- static const uint64_t NoFlagSet = 0ULL;
- static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
- static const uint64_t ZExtOffs = 0;
- static const uint64_t SExt = 1ULL<<1; ///< Sign extended
- static const uint64_t SExtOffs = 1;
- static const uint64_t InReg = 1ULL<<2; ///< Passed in register
- static const uint64_t InRegOffs = 2;
- static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
- static const uint64_t SRetOffs = 3;
- static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
- static const uint64_t ByValOffs = 4;
- static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
- static const uint64_t NestOffs = 5;
- static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment
- static const uint64_t ByValAlignOffs = 6;
- static const uint64_t Split = 1ULL << 10;
- static const uint64_t SplitOffs = 10;
- static const uint64_t OrigAlign = 0x1FULL<<27;
- static const uint64_t OrigAlignOffs = 27;
- static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size
- static const uint64_t ByValSizeOffs = 32;
-
- static const uint64_t One = 1ULL; //< 1 of this type, for shifts
-
- uint64_t Flags;
- public:
- ArgFlagsTy() : Flags(0) { }
-
- bool isZExt() const { return Flags & ZExt; }
- void setZExt() { Flags |= One << ZExtOffs; }
-
- bool isSExt() const { return Flags & SExt; }
- void setSExt() { Flags |= One << SExtOffs; }
-
- bool isInReg() const { return Flags & InReg; }
- void setInReg() { Flags |= One << InRegOffs; }
-
- bool isSRet() const { return Flags & SRet; }
- void setSRet() { Flags |= One << SRetOffs; }
-
- bool isByVal() const { return Flags & ByVal; }
- void setByVal() { Flags |= One << ByValOffs; }
-
- bool isNest() const { return Flags & Nest; }
- void setNest() { Flags |= One << NestOffs; }
-
- unsigned getByValAlign() const {
- return (unsigned)
- ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
- }
- void setByValAlign(unsigned A) {
- Flags = (Flags & ~ByValAlign) |
- (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
- }
-
- bool isSplit() const { return Flags & Split; }
- void setSplit() { Flags |= One << SplitOffs; }
-
- unsigned getOrigAlign() const {
- return (unsigned)
- ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
- }
- void setOrigAlign(unsigned A) {
- Flags = (Flags & ~OrigAlign) |
- (uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
- }
-
- unsigned getByValSize() const {
- return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
- }
- void setByValSize(unsigned S) {
- Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
- }
-
- /// getArgFlagsString - Returns the flags as a string, eg: "zext align:4".
- std::string getArgFlagsString();
-
- /// getRawBits - Represent the flags as a bunch of bits.
- uint64_t getRawBits() const { return Flags; }
- };
-
- /// InputArg - This struct carries flags and type information about a
- /// single incoming (formal) argument or incoming (from the perspective
- /// of the caller) return value virtual register.
- ///
- struct InputArg {
- ArgFlagsTy Flags;
- EVT VT;
- bool Used;
-
- InputArg() : VT(MVT::Other), Used(false) {}
- InputArg(ISD::ArgFlagsTy flags, EVT vt, bool used)
- : Flags(flags), VT(vt), Used(used) {
- assert(VT.isSimple() &&
- "InputArg value type must be Simple!");
- }
- };
-
- /// OutputArg - This struct carries flags and a value for a
- /// single outgoing (actual) argument or outgoing (from the perspective
- /// of the caller) return value virtual register.
- ///
- struct OutputArg {
- ArgFlagsTy Flags;
- SDValue Val;
- bool IsFixed;
-
- OutputArg() : IsFixed(false) {}
- OutputArg(ISD::ArgFlagsTy flags, SDValue val, bool isfixed)
- : Flags(flags), Val(val), IsFixed(isfixed) {
- assert(Val.getValueType().isSimple() &&
- "OutputArg value type must be Simple!");
- }
- };
-}
-
/// VTSDNode - This class is used to represent EVT's, which are used
/// to parameterize some operations.
class VTSDNode : public SDNode {
EVT ValueType;
friend class SelectionDAG;
explicit VTSDNode(EVT VT)
- : SDNode(ISD::VALUETYPE, DebugLoc::getUnknownLoc(),
- getSDVTList(MVT::Other)), ValueType(VT) {
+ : SDNode(ISD::VALUETYPE, DebugLoc(), getSDVTList(MVT::Other)),
+ ValueType(VT) {
}
public:
@@ -2586,7 +1767,6 @@ namespace ISD {
}
}
-
} // end llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/SlotIndexes.h b/libclamav/c++/llvm/include/llvm/CodeGen/SlotIndexes.h
index dd4caba..88044c7 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/SlotIndexes.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/SlotIndexes.h
@@ -22,13 +22,13 @@
#ifndef LLVM_CODEGEN_SLOTINDEXES_H
#define LLVM_CODEGEN_SLOTINDEXES_H
-#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/ErrorHandling.h"
namespace llvm {
@@ -37,8 +37,6 @@ namespace llvm {
/// SlotIndex & SlotIndexes classes for the public interface to this
/// information.
class IndexListEntry {
- private:
-
static const unsigned EMPTY_KEY_INDEX = ~0U & ~3U,
TOMBSTONE_KEY_INDEX = ~0U & ~7U;
@@ -66,10 +64,9 @@ namespace llvm {
public:
IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {
- if (index == EMPTY_KEY_INDEX || index == TOMBSTONE_KEY_INDEX) {
- llvm_report_error("Attempt to create invalid index. "
- "Available indexes may have been exhausted?.");
- }
+ assert(index != EMPTY_KEY_INDEX && index != TOMBSTONE_KEY_INDEX &&
+ "Attempt to create invalid index. "
+ "Available indexes may have been exhausted?.");
}
bool isValid() const {
@@ -131,7 +128,8 @@ namespace llvm {
friend class SlotIndexes;
friend struct DenseMapInfo<SlotIndex>;
- private:
+ enum Slot { LOAD, USE, DEF, STORE, NUM };
+
static const unsigned PHI_BIT = 1 << 2;
PointerIntPair<IndexListEntry*, 3, unsigned> lie;
@@ -149,6 +147,11 @@ namespace llvm {
return entry().getIndex() | getSlot();
}
+ /// Returns the slot for this SlotIndex.
+ Slot getSlot() const {
+ return static_cast<Slot>(lie.getInt() & ~PHI_BIT);
+ }
+
static inline unsigned getHashValue(const SlotIndex &v) {
IndexListEntry *ptrVal = &v.entry();
return (unsigned((intptr_t)ptrVal) >> 4) ^
@@ -156,11 +159,6 @@ namespace llvm {
}
public:
-
- // FIXME: Ugh. This is public because LiveIntervalAnalysis is still using it
- // for some spill weight stuff. Fix that, then make this private.
- enum Slot { LOAD, USE, DEF, STORE, NUM };
-
static inline SlotIndex getEmptyKey() {
return SlotIndex(IndexListEntry::getEmptyKeyEntry(), 0);
}
@@ -238,16 +236,31 @@ namespace llvm {
return other.getIndex() - getIndex();
}
- /// Returns the slot for this SlotIndex.
- Slot getSlot() const {
- return static_cast<Slot>(lie.getInt() & ~PHI_BIT);
- }
-
/// Returns the state of the PHI bit.
bool isPHI() const {
return lie.getInt() & PHI_BIT;
}
+ /// isLoad - Return true if this is a LOAD slot.
+ bool isLoad() const {
+ return getSlot() == LOAD;
+ }
+
+ /// isDef - Return true if this is a DEF slot.
+ bool isDef() const {
+ return getSlot() == DEF;
+ }
+
+ /// isUse - Return true if this is a USE slot.
+ bool isUse() const {
+ return getSlot() == USE;
+ }
+
+ /// isStore - Return true if this is a STORE slot.
+ bool isStore() const {
+ return getSlot() == STORE;
+ }
+
/// Returns the base index for associated with this index. The base index
/// is the one associated with the LOAD slot for the instruction pointed to
/// by this index.
@@ -478,7 +491,7 @@ namespace llvm {
public:
static char ID;
- SlotIndexes() : MachineFunctionPass(&ID), indexListHead(0) {}
+ SlotIndexes() : MachineFunctionPass(ID), indexListHead(0) {}
virtual void getAnalysisUsage(AnalysisUsage &au) const;
virtual void releaseMemory();
@@ -497,6 +510,11 @@ namespace llvm {
return SlotIndex(front(), 0);
}
+ /// Returns the base index of the last slot in this analysis.
+ SlotIndex getLastIndex() {
+ return SlotIndex(back(), 0);
+ }
+
/// Returns the invalid index marker for this analysis.
SlotIndex getInvalidIndex() {
return getZeroIndex();
@@ -667,15 +685,20 @@ namespace llvm {
MachineBasicBlock::iterator miItr(mi);
bool needRenumber = false;
IndexListEntry *newEntry;
-
+ // Get previous index, considering that not all instructions are indexed.
IndexListEntry *prevEntry;
- if (miItr == mbb->begin()) {
+ for (;;) {
// If mi is at the mbb beginning, get the prev index from the mbb.
- prevEntry = &mbbRangeItr->second.first.entry();
- } else {
- // Otherwise get it from the previous instr.
- MachineBasicBlock::iterator pItr(prior(miItr));
- prevEntry = &getInstructionIndex(pItr).entry();
+ if (miItr == mbb->begin()) {
+ prevEntry = &mbbRangeItr->second.first.entry();
+ break;
+ }
+ // Otherwise rewind until we find a mapped instruction.
+ Mi2IndexMap::const_iterator itr = mi2iMap.find(--miItr);
+ if (itr != mi2iMap.end()) {
+ prevEntry = &itr->second.entry();
+ break;
+ }
}
// Get next entry from previous entry.
@@ -761,6 +784,47 @@ namespace llvm {
mi2iMap.insert(std::make_pair(newMI, replaceBaseIndex));
}
+ /// Add the given MachineBasicBlock into the maps.
+ void insertMBBInMaps(MachineBasicBlock *mbb) {
+ MachineFunction::iterator nextMBB =
+ llvm::next(MachineFunction::iterator(mbb));
+ IndexListEntry *startEntry = createEntry(0, 0);
+ IndexListEntry *terminatorEntry = createEntry(0, 0);
+ IndexListEntry *nextEntry = 0;
+
+ if (nextMBB == mbb->getParent()->end()) {
+ nextEntry = getTail();
+ } else {
+ nextEntry = &getMBBStartIdx(nextMBB).entry();
+ }
+
+ insert(nextEntry, startEntry);
+ insert(nextEntry, terminatorEntry);
+
+ SlotIndex startIdx(startEntry, SlotIndex::LOAD);
+ SlotIndex terminatorIdx(terminatorEntry, SlotIndex::PHI_BIT);
+ SlotIndex endIdx(nextEntry, SlotIndex::LOAD);
+
+ terminatorGaps.insert(
+ std::make_pair(mbb, terminatorIdx));
+
+ mbb2IdxMap.insert(
+ std::make_pair(mbb, std::make_pair(startIdx, endIdx)));
+
+ idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
+
+ if (MachineFunction::iterator(mbb) != mbb->getParent()->begin()) {
+ // Have to update the end index of the previous block.
+ MachineBasicBlock *priorMBB =
+ llvm::prior(MachineFunction::iterator(mbb));
+ mbb2IdxMap[priorMBB].second = startIdx;
+ }
+
+ renumberIndexes();
+ std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+
+ }
+
};
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/libclamav/c++/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index 3d99fa7..d8f0373 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -33,7 +33,6 @@ namespace llvm {
class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
- mutable void *UniquingMap;
protected:
/// TLSDataSection - Section directive for Thread Local data.
///
@@ -52,14 +51,9 @@ protected:
const MCSection *MergeableConst4Section;
const MCSection *MergeableConst8Section;
const MCSection *MergeableConst16Section;
-
-protected:
- const MCSection *getELFSection(StringRef Section, unsigned Type,
- unsigned Flags, SectionKind Kind,
- bool IsExplicit = false) const;
public:
- TargetLoweringObjectFileELF() : UniquingMap(0) {}
- ~TargetLoweringObjectFileELF();
+ TargetLoweringObjectFileELF() {}
+ ~TargetLoweringObjectFileELF() {}
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
@@ -78,24 +72,39 @@ public:
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const;
- /// getSymbolForDwarfGlobalReference - Return an MCExpr to use for a reference
+ /// getExprForDwarfGlobalReference - Return an MCExpr to use for a reference
/// to the specified global variable from exception handling information.
///
virtual const MCExpr *
- getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding) const;
+ getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const;
};
class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
- mutable void *UniquingMap;
+ /// TLSDataSection - Section for thread local data.
+ ///
+ const MCSection *TLSDataSection; // Defaults to ".tdata".
+ /// TLSBSSSection - Section for thread local uninitialized data.
+ ///
+ const MCSection *TLSBSSSection; // Defaults to ".tbss".
+
+ /// TLSTLVSection - Section for thread local structure infomation.
+ /// Contains the source code name of the variable, visibility and a pointer
+ /// to the initial value (.tdata or .tbss).
+ const MCSection *TLSTLVSection; // Defaults to ".tlv".
+
+ /// TLSThreadInitSection - Section for thread local data initialization
+ /// functions.
+ const MCSection *TLSThreadInitSection; // Defaults to ".thread_init_func".
+
const MCSection *CStringSection;
const MCSection *UStringSection;
const MCSection *TextCoalSection;
const MCSection *ConstTextCoalSection;
- const MCSection *ConstDataCoalSection;
const MCSection *ConstDataSection;
const MCSection *DataCoalSection;
const MCSection *DataCommonSection;
@@ -107,8 +116,8 @@ class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
const MCSection *LazySymbolPointerSection;
const MCSection *NonLazySymbolPointerSection;
public:
- TargetLoweringObjectFileMachO() : UniquingMap(0) {}
- ~TargetLoweringObjectFileMachO();
+ TargetLoweringObjectFileMachO() {}
+ ~TargetLoweringObjectFileMachO() {}
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
@@ -128,20 +137,6 @@ public:
virtual bool shouldEmitUsedDirectiveFor(const GlobalValue *GV,
Mangler *) const;
- /// getMachOSection - Return the MCSection for the specified mach-o section.
- /// This requires the operands to be valid.
- const MCSectionMachO *getMachOSection(StringRef Segment,
- StringRef Section,
- unsigned TypeAndAttributes,
- SectionKind K) const {
- return getMachOSection(Segment, Section, TypeAndAttributes, 0, K);
- }
- const MCSectionMachO *getMachOSection(StringRef Segment,
- StringRef Section,
- unsigned TypeAndAttributes,
- unsigned Reserved2,
- SectionKind K) const;
-
/// getTextCoalSection - Return the "__TEXT,__textcoal_nt" section we put weak
/// text symbols into.
const MCSection *getTextCoalSection() const {
@@ -166,11 +161,12 @@ public:
return NonLazySymbolPointerSection;
}
- /// getSymbolForDwarfGlobalReference - The mach-o version of this method
+ /// getExprForDwarfGlobalReference - The mach-o version of this method
/// defaults to returning a stub reference.
virtual const MCExpr *
- getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding) const;
+ getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const;
virtual unsigned getPersonalityEncoding() const;
virtual unsigned getLSDAEncoding() const;
@@ -181,13 +177,15 @@ public:
class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
- mutable void *UniquingMap;
+ const MCSection *DrectveSection;
public:
- TargetLoweringObjectFileCOFF() : UniquingMap(0) {}
- ~TargetLoweringObjectFileCOFF();
+ TargetLoweringObjectFileCOFF() {}
+ ~TargetLoweringObjectFileCOFF() {}
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
+ virtual const MCSection *getDrectveSection() const { return DrectveSection; }
+
virtual const MCSection *
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const;
@@ -195,11 +193,6 @@ public:
virtual const MCSection *
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const;
-
- /// getCOFFSection - Return the MCSection for the specified COFF section.
- /// FIXME: Switch this to a semantic view eventually.
- const MCSection *getCOFFSection(StringRef Name, bool isDirective,
- SectionKind K) const;
};
} // end namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/ValueTypes.h b/libclamav/c++/llvm/include/llvm/CodeGen/ValueTypes.h
index a7aafc0..51f324c 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -63,21 +63,22 @@ namespace llvm {
v1i64 = 24, // 1 x i64
v2i64 = 25, // 2 x i64
v4i64 = 26, // 4 x i64
+ v8i64 = 27, // 8 x i64
- v2f32 = 27, // 2 x f32
- v4f32 = 28, // 4 x f32
- v8f32 = 29, // 8 x f32
- v2f64 = 30, // 2 x f64
- v4f64 = 31, // 4 x f64
+ v2f32 = 28, // 2 x f32
+ v4f32 = 29, // 4 x f32
+ v8f32 = 30, // 8 x f32
+ v2f64 = 31, // 2 x f64
+ v4f64 = 32, // 4 x f64
FIRST_VECTOR_VALUETYPE = v2i8,
LAST_VECTOR_VALUETYPE = v4f64,
- Flag = 32, // This glues nodes together during pre-RA sched
+ Flag = 33, // This glues nodes together during pre-RA sched
- isVoid = 33, // This has no value
+ isVoid = 34, // This has no value
- LAST_VALUETYPE = 34, // This always remains at the end of the list.
+ LAST_VALUETYPE = 35, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
// EVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
@@ -140,7 +141,7 @@ namespace llvm {
bool isInteger() const {
return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
- (SimpleTy >= MVT::v2i8 && SimpleTy <= MVT::v4i64));
+ (SimpleTy >= MVT::v2i8 && SimpleTy <= MVT::v8i64));
}
/// isVector - Return true if this is a vector value type.
@@ -158,14 +159,12 @@ namespace llvm {
/// getPow2VectorType - Widens the length of the given vector EVT up to
/// the nearest power of 2 and returns that type.
MVT getPow2VectorType() const {
- if (!isPow2VectorType()) {
- unsigned NElts = getVectorNumElements();
- unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
- return MVT::getVectorVT(getVectorElementType(), Pow2NElts);
- }
- else {
+ if (isPow2VectorType())
return *this;
- }
+
+ unsigned NElts = getVectorNumElements();
+ unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
+ return MVT::getVectorVT(getVectorElementType(), Pow2NElts);
}
/// getScalarType - If this is a vector type, return the element type,
@@ -192,7 +191,8 @@ namespace llvm {
case v8i32: return i32;
case v1i64:
case v2i64:
- case v4i64: return i64;
+ case v4i64:
+ case v8i64: return i64;
case v2f32:
case v4f32:
case v8f32: return f32;
@@ -211,6 +211,7 @@ namespace llvm {
case v8i8 :
case v8i16:
case v8i32:
+ case v8i64:
case v8f32: return 8;
case v4i8:
case v4i16:
@@ -269,6 +270,7 @@ namespace llvm {
case v4i64:
case v8f32:
case v4f64: return 256;
+ case v8i64: return 512;
}
}
@@ -332,6 +334,7 @@ namespace llvm {
if (NumElements == 1) return MVT::v1i64;
if (NumElements == 2) return MVT::v2i64;
if (NumElements == 4) return MVT::v4i64;
+ if (NumElements == 8) return MVT::v8i64;
break;
case MVT::f32:
if (NumElements == 2) return MVT::v2f32;
@@ -345,17 +348,6 @@ namespace llvm {
}
return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
}
-
- static MVT getIntVectorWithNumElements(unsigned NumElts) {
- switch (NumElts) {
- default: return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
- case 1: return MVT::v1i64;
- case 2: return MVT::v2i32;
- case 4: return MVT::v4i16;
- case 8: return MVT::v8i8;
- case 16: return MVT::v16i8;
- }
- }
};
struct EVT { // EVT = Extended Value Type
@@ -369,22 +361,16 @@ namespace llvm {
EVT(MVT::SimpleValueType SVT) : V(SVT), LLVMTy(0) { }
EVT(MVT S) : V(S), LLVMTy(0) {}
- bool operator==(const EVT VT) const {
- if (V.SimpleTy == VT.V.SimpleTy) {
- if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
- return LLVMTy == VT.LLVMTy;
+ bool operator==(EVT VT) const {
+ return !(*this != VT);
+ }
+ bool operator!=(EVT VT) const {
+ if (V.SimpleTy != VT.V.SimpleTy)
return true;
- }
+ if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
+ return LLVMTy != VT.LLVMTy;
return false;
}
- bool operator!=(const EVT VT) const {
- if (V.SimpleTy == VT.V.SimpleTy) {
- if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
- return LLVMTy != VT.LLVMTy;
- return false;
- }
- return true;
- }
/// getFloatingPointVT - Returns the EVT that represents a floating point
/// type with the given number of bits. There are two floating point types
@@ -397,30 +383,32 @@ namespace llvm {
/// number of bits.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
MVT M = MVT::getIntegerVT(BitWidth);
- if (M.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
- return getExtendedIntegerVT(Context, BitWidth);
- else
+ if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
return M;
+ return getExtendedIntegerVT(Context, BitWidth);
}
/// getVectorVT - Returns the EVT that represents a vector NumElements in
/// length, where each element is of type VT.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements) {
MVT M = MVT::getVectorVT(VT.V, NumElements);
- if (M.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
- return getExtendedVectorVT(Context, VT, NumElements);
- else
+ if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
return M;
+ return getExtendedVectorVT(Context, VT, NumElements);
}
/// getIntVectorWithNumElements - Return any integer vector type that has
/// the specified number of elements.
static EVT getIntVectorWithNumElements(LLVMContext &C, unsigned NumElts) {
- MVT M = MVT::getIntVectorWithNumElements(NumElts);
- if (M.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
- return getVectorVT(C, MVT::i8, NumElts);
- else
- return M;
+ switch (NumElts) {
+ default: return getVectorVT(C, MVT::i8, NumElts);
+ case 1: return MVT::v1i64;
+ case 2: return MVT::v2i32;
+ case 4: return MVT::v4i16;
+ case 8: return MVT::v8i8;
+ case 16: return MVT::v16i8;
+ }
+ return MVT::INVALID_SIMPLE_VALUE_TYPE;
}
/// isSimple - Test if the given EVT is simple (as opposed to being
@@ -452,26 +440,32 @@ namespace llvm {
/// is64BitVector - Return true if this is a 64-bit vector type.
bool is64BitVector() const {
- return isSimple() ?
- (V==MVT::v8i8 || V==MVT::v4i16 || V==MVT::v2i32 ||
- V==MVT::v1i64 || V==MVT::v2f32) :
- isExtended64BitVector();
+ if (!isSimple())
+ return isExtended64BitVector();
+
+ return (V == MVT::v8i8 || V==MVT::v4i16 || V==MVT::v2i32 ||
+ V == MVT::v1i64 || V==MVT::v2f32);
}
/// is128BitVector - Return true if this is a 128-bit vector type.
bool is128BitVector() const {
- return isSimple() ?
- (V==MVT::v16i8 || V==MVT::v8i16 || V==MVT::v4i32 ||
- V==MVT::v2i64 || V==MVT::v4f32 || V==MVT::v2f64) :
- isExtended128BitVector();
+ if (!isSimple())
+ return isExtended128BitVector();
+ return (V==MVT::v16i8 || V==MVT::v8i16 || V==MVT::v4i32 ||
+ V==MVT::v2i64 || V==MVT::v4f32 || V==MVT::v2f64);
}
/// is256BitVector - Return true if this is a 256-bit vector type.
inline bool is256BitVector() const {
- return isSimple() ?
- (V==MVT::v8f32 || V==MVT::v4f64 || V==MVT::v32i8 ||
- V==MVT::v16i16 || V==MVT::v8i32 || V==MVT::v4i64) :
- isExtended256BitVector();
+ if (!isSimple())
+ return isExtended256BitVector();
+ return (V == MVT::v8f32 || V == MVT::v4f64 || V == MVT::v32i8 ||
+ V == MVT::v16i16 || V == MVT::v8i32 || V == MVT::v4i64);
+ }
+
+ /// is512BitVector - Return true if this is a 512-bit vector type.
+ inline bool is512BitVector() const {
+ return isSimple() ? (V == MVT::v8i64) : isExtended512BitVector();
}
/// isOverloaded - Return true if this is an overloaded type for TableGen.
@@ -540,8 +534,7 @@ namespace llvm {
assert(isVector() && "Invalid vector type!");
if (isSimple())
return V.getVectorElementType();
- else
- return getExtendedVectorElementType();
+ return getExtendedVectorElementType();
}
/// getVectorNumElements - Given a vector type, return the number of
@@ -550,16 +543,14 @@ namespace llvm {
assert(isVector() && "Invalid vector type!");
if (isSimple())
return V.getVectorNumElements();
- else
- return getExtendedVectorNumElements();
+ return getExtendedVectorNumElements();
}
/// getSizeInBits - Return the size of the specified value type in bits.
unsigned getSizeInBits() const {
if (isSimple())
return V.getSizeInBits();
- else
- return getExtendedSizeInBits();
+ return getExtendedSizeInBits();
}
/// getStoreSize - Return the number of bytes overwritten by a store
@@ -582,8 +573,7 @@ namespace llvm {
unsigned BitWidth = getSizeInBits();
if (BitWidth <= 8)
return EVT(MVT::i8);
- else
- return getIntegerVT(Context, 1 << Log2_32_Ceil(BitWidth));
+ return getIntegerVT(Context, 1 << Log2_32_Ceil(BitWidth));
}
/// getHalfSizedIntegerVT - Finds the smallest simple value type that is
@@ -594,12 +584,10 @@ namespace llvm {
assert(isInteger() && !isVector() && "Invalid integer type!");
unsigned EVTSize = getSizeInBits();
for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
- IntVT <= MVT::LAST_INTEGER_VALUETYPE;
- ++IntVT) {
+ IntVT <= MVT::LAST_INTEGER_VALUETYPE; ++IntVT) {
EVT HalfVT = EVT((MVT::SimpleValueType)IntVT);
- if(HalfVT.getSizeInBits() * 2 >= EVTSize) {
+ if (HalfVT.getSizeInBits() * 2 >= EVTSize)
return HalfVT;
- }
}
return getIntegerVT(Context, (EVTSize + 1) / 2);
}
@@ -668,6 +656,7 @@ namespace llvm {
bool isExtended64BitVector() const;
bool isExtended128BitVector() const;
bool isExtended256BitVector() const;
+ bool isExtended512BitVector() const;
EVT getExtendedVectorElementType() const;
unsigned getExtendedVectorNumElements() const;
unsigned getExtendedSizeInBits() const;
diff --git a/libclamav/c++/llvm/include/llvm/CodeGen/ValueTypes.td b/libclamav/c++/llvm/include/llvm/CodeGen/ValueTypes.td
index c8bb789..8151c0b 100644
--- a/libclamav/c++/llvm/include/llvm/CodeGen/ValueTypes.td
+++ b/libclamav/c++/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -47,15 +47,16 @@ def v8i32 : ValueType<256, 23>; // 8 x i32 vector value
def v1i64 : ValueType<64 , 24>; // 1 x i64 vector value
def v2i64 : ValueType<128, 25>; // 2 x i64 vector value
def v4i64 : ValueType<256, 26>; // 4 x f64 vector value
+def v8i64 : ValueType<512, 27>; // 4 x f64 vector value
-def v2f32 : ValueType<64, 27>; // 2 x f32 vector value
-def v4f32 : ValueType<128, 28>; // 4 x f32 vector value
-def v8f32 : ValueType<256, 29>; // 8 x f32 vector value
-def v2f64 : ValueType<128, 30>; // 2 x f64 vector value
-def v4f64 : ValueType<256, 31>; // 4 x f64 vector value
+def v2f32 : ValueType<64, 28>; // 2 x f32 vector value
+def v4f32 : ValueType<128, 29>; // 4 x f32 vector value
+def v8f32 : ValueType<256, 30>; // 8 x f32 vector value
+def v2f64 : ValueType<128, 31>; // 2 x f64 vector value
+def v4f64 : ValueType<256, 32>; // 4 x f64 vector value
-def FlagVT : ValueType<0 , 32>; // Pre-RA sched glue
-def isVoid : ValueType<0 , 33>; // Produces no value
+def FlagVT : ValueType<0 , 33>; // Pre-RA sched glue
+def isVoid : ValueType<0 , 34>; // Produces no value
def MetadataVT: ValueType<0, 250>; // Metadata
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/Action.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/Action.h
index 7014139..f2b7965 100644
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/Action.h
+++ b/libclamav/c++/llvm/include/llvm/CompilerDriver/Action.h
@@ -34,12 +34,16 @@ namespace llvmc {
std::string OutFile_;
public:
- Action (const std::string& C, const StrVector& A,
- bool S, const std::string& O)
- : Command_(C), Args_(A), StopCompilation_(S), OutFile_(O)
- {}
-
- /// Execute - Executes the represented action.
+ void Construct (const std::string& C, const StrVector& A,
+ bool S, const std::string& O) {
+ Command_ = C;
+ Args_ = A;
+ StopCompilation_ = S;
+ OutFile_ = O;
+ }
+ bool IsConstructed () { return (Command_.size() != 0);}
+
+ /// Execute - Executes the command. Returns -1 on error.
int Execute () const;
bool StopCompilation () const { return StopCompilation_; }
const std::string& OutFile() { return OutFile_; }
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/AutoGenerated.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/AutoGenerated.h
new file mode 100644
index 0000000..7b926c6
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/CompilerDriver/AutoGenerated.h
@@ -0,0 +1,40 @@
+//===--- AutoGenerated.h - The LLVM Compiler Driver -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open
+// Source License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to the autogenerated driver code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_INCLUDE_COMPILER_DRIVER_AUTOGENERATED_H
+#define LLVM_INCLUDE_COMPILER_DRIVER_AUTOGENERATED_H
+
+namespace llvmc {
+ class LanguageMap;
+ class CompilationGraph;
+
+ namespace autogenerated {
+
+ int PreprocessOptions();
+ int PopulateLanguageMap(LanguageMap& langMap);
+ int PopulateCompilationGraph(CompilationGraph& graph);
+
+ inline int RunInitialization (LanguageMap& M, CompilationGraph& G) {
+ if (int ret = PreprocessOptions())
+ return ret;
+ if (int ret = PopulateLanguageMap(M))
+ return ret;
+ if (int ret = PopulateCompilationGraph(G))
+ return ret;
+
+ return 0;
+ }
+ }
+}
+
+#endif // LLVM_INCLUDE_COMPILER_DRIVER_AUTOGENERATED_H
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/BuiltinOptions.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/BuiltinOptions.h
index 0c1bbe2..7b9c15c 100644
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/BuiltinOptions.h
+++ b/libclamav/c++/llvm/include/llvm/CompilerDriver/BuiltinOptions.h
@@ -18,6 +18,8 @@
#include <string>
+namespace llvmc {
+
namespace SaveTempsEnum { enum Values { Cwd, Obj, Unset }; }
extern llvm::cl::list<std::string> InputFilenames;
@@ -32,4 +34,6 @@ extern llvm::cl::opt<bool> ViewGraph;
extern llvm::cl::opt<bool> WriteGraph;
extern llvm::cl::opt<SaveTempsEnum::Values> SaveTemps;
+} // End namespace llvmc.
+
#endif // LLVM_INCLUDE_COMPILER_DRIVER_BUILTIN_OPTIONS_H
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/Common.td b/libclamav/c++/llvm/include/llvm/CompilerDriver/Common.td
index 31a627d..84e8783 100644
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/Common.td
+++ b/libclamav/c++/llvm/include/llvm/CompilerDriver/Common.td
@@ -32,6 +32,7 @@ def actions;
def alias_option;
def switch_option;
+def switch_list_option;
def parameter_option;
def parameter_list_option;
def prefix_option;
@@ -39,7 +40,6 @@ def prefix_list_option;
// Possible option properties.
-def extern;
def help;
def hidden;
def init;
@@ -93,17 +93,8 @@ def error;
def set_option;
def unset_option;
-// Increase/decrease the edge weight.
+// Increase the edge weight.
def inc_weight;
-def dec_weight;
-
-// Empty DAG marker.
-def empty_dag_marker;
-
-// Used to specify plugin priority.
-class PluginPriority<int p> {
- int priority = p;
-}
// Option list - a single place to specify options.
class OptionList<list<dag> l> {
@@ -117,31 +108,17 @@ class OptionPreprocessor<dag d> {
// Map from suffixes to language names
-class LangToSuffixes<string str, list<string> lst> {
- string lang = str;
- list<string> suffixes = lst;
-}
+def lang_to_suffixes;
-class LanguageMap<list<LangToSuffixes> lst> {
- list<LangToSuffixes> map = lst;
+class LanguageMap<list<dag> l> {
+ list<dag> map = l;
}
// Compilation graph
-class EdgeBase<string t1, string t2, dag d> {
- string a = t1;
- string b = t2;
- dag weight = d;
-}
-
-class Edge<string t1, string t2> : EdgeBase<t1, t2, (empty_dag_marker)>;
-
-// Edge and SimpleEdge are synonyms.
-class SimpleEdge<string t1, string t2> : EdgeBase<t1, t2, (empty_dag_marker)>;
-
-// Optionally enabled edge.
-class OptionalEdge<string t1, string t2, dag props> : EdgeBase<t1, t2, props>;
+def edge;
+def optional_edge;
-class CompilationGraph<list<EdgeBase> lst> {
- list<EdgeBase> edges = lst;
+class CompilationGraph<list<dag> l> {
+ list<dag> edges = l;
}
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/CompilationGraph.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/CompilationGraph.h
index ba6ff47..619c904 100644
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/CompilationGraph.h
+++ b/libclamav/c++/llvm/include/llvm/CompilerDriver/CompilationGraph.h
@@ -36,7 +36,7 @@ namespace llvmc {
public:
/// GetLanguage - Find the language name corresponding to a given file.
- const std::string& GetLanguage(const llvm::sys::Path&) const;
+ const std::string* GetLanguage(const llvm::sys::Path&) const;
};
/// Edge - Represents an edge of the compilation graph.
@@ -46,7 +46,7 @@ namespace llvmc {
virtual ~Edge() {}
const std::string& ToolName() const { return ToolName_; }
- virtual unsigned Weight(const InputLanguagesSet& InLangs) const = 0;
+ virtual int Weight(const InputLanguagesSet& InLangs) const = 0;
private:
std::string ToolName_;
};
@@ -55,7 +55,7 @@ namespace llvmc {
class SimpleEdge : public Edge {
public:
SimpleEdge(const std::string& T) : Edge(T) {}
- unsigned Weight(const InputLanguagesSet&) const { return 1; }
+ int Weight(const InputLanguagesSet&) const { return 1; }
};
/// Node - A node (vertex) of the compilation graph.
@@ -132,32 +132,32 @@ namespace llvmc {
void insertNode(Tool* T);
/// insertEdge - Insert a new edge into the graph. Takes ownership
- /// of the Edge object.
- void insertEdge(const std::string& A, Edge* E);
+ /// of the Edge object. Returns non-zero value on error.
+ int insertEdge(const std::string& A, Edge* E);
- /// Build - Build target(s) from the input file set. Command-line
- /// options are passed implicitly as global variables.
+ /// Build - Build target(s) from the input file set. Command-line options
+ /// are passed implicitly as global variables. Returns non-zero value on
+ /// error (usually the failed program's exit code).
int Build(llvm::sys::Path const& TempDir, const LanguageMap& LangMap);
- /// Check - Check the compilation graph for common errors like
- /// cycles, input/output language mismatch and multiple default
- /// edges. Prints error messages and in case it finds any errors.
+ /// Check - Check the compilation graph for common errors like cycles,
+ /// input/output language mismatch and multiple default edges. Prints error
+ /// messages and in case it finds any errors.
int Check();
- /// getNode - Return a reference to the node correponding to the
- /// given tool name. Throws std::runtime_error.
- Node& getNode(const std::string& ToolName);
- const Node& getNode(const std::string& ToolName) const;
+ /// getNode - Return a reference to the node corresponding to the given tool
+ /// name. Returns 0 on error.
+ Node* getNode(const std::string& ToolName);
+ const Node* getNode(const std::string& ToolName) const;
- /// viewGraph - This function is meant for use from the debugger.
- /// You can just say 'call G->viewGraph()' and a ghostview window
- /// should pop up from the program, displaying the compilation
- /// graph. This depends on there being a 'dot' and 'gv' program
- /// in your path.
+ /// viewGraph - This function is meant for use from the debugger. You can
+ /// just say 'call G->viewGraph()' and a ghostview window should pop up from
+ /// the program, displaying the compilation graph. This depends on there
+ /// being a 'dot' and 'gv' program in your path.
void viewGraph();
/// writeGraph - Write Graphviz .dot source file to the current direcotry.
- void writeGraph(const std::string& OutputFilename);
+ int writeGraph(const std::string& OutputFilename);
// GraphTraits support.
friend NodesIterator GraphBegin(CompilationGraph*);
@@ -167,16 +167,15 @@ namespace llvmc {
// Helper functions.
/// getToolsVector - Return a reference to the list of tool names
- /// corresponding to the given language name. Throws
- /// std::runtime_error.
- const tools_vector_type& getToolsVector(const std::string& LangName) const;
+ /// corresponding to the given language name. Returns 0 on error.
+ const tools_vector_type* getToolsVector(const std::string& LangName) const;
- /// PassThroughGraph - Pass the input file through the toolchain
- /// starting at StartNode.
- void PassThroughGraph (const llvm::sys::Path& In, const Node* StartNode,
- const InputLanguagesSet& InLangs,
- const llvm::sys::Path& TempDir,
- const LanguageMap& LangMap) const;
+ /// PassThroughGraph - Pass the input file through the toolchain starting at
+ /// StartNode.
+ int PassThroughGraph (const llvm::sys::Path& In, const Node* StartNode,
+ const InputLanguagesSet& InLangs,
+ const llvm::sys::Path& TempDir,
+ const LanguageMap& LangMap) const;
/// FindToolChain - Find head of the toolchain corresponding to
/// the given file.
@@ -185,26 +184,32 @@ namespace llvmc {
InputLanguagesSet& InLangs,
const LanguageMap& LangMap) const;
- /// BuildInitial - Traverse the initial parts of the toolchains.
- void BuildInitial(InputLanguagesSet& InLangs,
- const llvm::sys::Path& TempDir,
- const LanguageMap& LangMap);
+ /// BuildInitial - Traverse the initial parts of the toolchains. Returns
+ /// non-zero value on error.
+ int BuildInitial(InputLanguagesSet& InLangs,
+ const llvm::sys::Path& TempDir,
+ const LanguageMap& LangMap);
- /// TopologicalSort - Sort the nodes in topological order.
- void TopologicalSort(std::vector<const Node*>& Out);
- /// TopologicalSortFilterJoinNodes - Call TopologicalSort and
- /// filter the resulting list to include only Join nodes.
- void TopologicalSortFilterJoinNodes(std::vector<const Node*>& Out);
+ /// TopologicalSort - Sort the nodes in topological order. Returns non-zero
+ /// value on error.
+ int TopologicalSort(std::vector<const Node*>& Out);
+ /// TopologicalSortFilterJoinNodes - Call TopologicalSort and filter the
+ /// resulting list to include only Join nodes. Returns non-zero value on
+ /// error.
+ int TopologicalSortFilterJoinNodes(std::vector<const Node*>& Out);
// Functions used to implement Check().
- /// CheckLanguageNames - Check that output/input language names
- /// match for all nodes.
+ /// CheckLanguageNames - Check that output/input language names match for
+ /// all nodes. Returns non-zero value on error (number of errors
+ /// encountered).
int CheckLanguageNames() const;
- /// CheckMultipleDefaultEdges - check that there are no multiple
- /// default default edges.
+ /// CheckMultipleDefaultEdges - check that there are no multiple default
+ /// default edges. Returns non-zero value on error (number of errors
+ /// encountered).
int CheckMultipleDefaultEdges() const;
- /// CheckCycles - Check that there are no cycles in the graph.
+ /// CheckCycles - Check that there are no cycles in the graph. Returns
+ /// non-zero value on error (number of errors encountered).
int CheckCycles();
};
@@ -270,7 +275,7 @@ namespace llvmc {
}
inline pointer operator*() const {
- return &OwningGraph->getNode((*EdgeIter)->ToolName());
+ return OwningGraph->getNode((*EdgeIter)->ToolName());
}
inline pointer operator->() const {
return this->operator*();
@@ -301,7 +306,7 @@ namespace llvm {
typedef llvmc::NodeChildIterator ChildIteratorType;
static NodeType* getEntryNode(GraphType* G) {
- return &G->getNode("root");
+ return G->getNode("root");
}
static ChildIteratorType child_begin(NodeType* N) {
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/Error.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/Error.h
index fa678cf..013094e 100644
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/Error.h
+++ b/libclamav/c++/llvm/include/llvm/CompilerDriver/Error.h
@@ -7,28 +7,22 @@
//
//===----------------------------------------------------------------------===//
//
-// Exception classes for llvmc.
+// Error handling.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_INCLUDE_COMPILER_DRIVER_ERROR_H
#define LLVM_INCLUDE_COMPILER_DRIVER_ERROR_H
-#include <stdexcept>
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
namespace llvmc {
- /// error_code - This gets thrown during the compilation process if a tool
- /// invocation returns a non-zero exit code.
- class error_code: public std::runtime_error {
- int Code_;
- public:
- error_code (int c)
- : std::runtime_error("Tool returned error code"), Code_(c)
- {}
-
- int code() const { return Code_; }
- };
+ inline void PrintError(llvm::StringRef Err) {
+ extern const char* ProgramName;
+ llvm::errs() << ProgramName << ": " << Err << '\n';
+ }
}
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/ForceLinkage.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/ForceLinkage.h
deleted file mode 100644
index 830c04e..0000000
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/ForceLinkage.h
+++ /dev/null
@@ -1,122 +0,0 @@
-//===--- ForceLinkage.h - The LLVM Compiler Driver --------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open
-// Source License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// A bit of preprocessor magic to force references to static libraries. Needed
-// because plugin initialization is done via static variables.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_INCLUDE_COMPILER_DRIVER_FORCE_LINKAGE_H
-#define LLVM_INCLUDE_COMPILER_DRIVER_FORCE_LINKAGE_H
-
-#include "llvm/CompilerDriver/ForceLinkageMacros.h"
-
-namespace llvmc {
-
-// Declare all ForceLinkage$(PluginName) functions.
-
-#ifdef LLVMC_BUILTIN_PLUGIN_1
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_1);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_2
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_2);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_3
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_3);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_4
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_4);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_5
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_5);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_6
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_6);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_7
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_7);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_8
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_8);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_9
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_9);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_10
- LLVMC_FORCE_LINKAGE_DECL(LLVMC_BUILTIN_PLUGIN_10);
-#endif
-
-namespace force_linkage {
-
- struct LinkageForcer {
-
- LinkageForcer() {
-
-// Call all ForceLinkage$(PluginName) functions.
-#ifdef LLVMC_BUILTIN_PLUGIN_1
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_1);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_2
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_2);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_3
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_3);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_4
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_4);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_5
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_5);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_6
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_6);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_7
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_7);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_8
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_8);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_9
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_9);
-#endif
-
-#ifdef LLVMC_BUILTIN_PLUGIN_10
- LLVMC_FORCE_LINKAGE_CALL(LLVMC_BUILTIN_PLUGIN_10);
-#endif
-
- }
- };
-} // End namespace force_linkage.
-
-// The only externally used bit.
-void ForceLinkage() {
- force_linkage::LinkageForcer dummy;
-}
-
-} // End namespace llvmc.
-
-#endif // LLVM_INCLUDE_COMPILER_DRIVER_FORCE_LINKAGE_H
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/ForceLinkageMacros.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/ForceLinkageMacros.h
deleted file mode 100644
index 8862b00..0000000
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/ForceLinkageMacros.h
+++ /dev/null
@@ -1,29 +0,0 @@
-//===--- ForceLinkageMacros.h - The LLVM Compiler Driver --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open
-// Source License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Preprocessor magic that forces references to static libraries - common
-// macros used by both driver and plugins.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_INCLUDE_COMPILER_DRIVER_FORCE_LINKAGE_MACROS_H
-#define LLVM_INCLUDE_COMPILER_DRIVER_FORCE_LINKAGE_MACROS_H
-
-#define LLVMC_FORCE_LINKAGE_PREFIX(PluginName) ForceLinkage ## PluginName
-
-#define LLVMC_FORCE_LINKAGE_FUN(PluginName) \
- LLVMC_FORCE_LINKAGE_PREFIX(PluginName)
-
-#define LLVMC_FORCE_LINKAGE_DECL(PluginName) \
- void LLVMC_FORCE_LINKAGE_FUN(PluginName) ()
-
-#define LLVMC_FORCE_LINKAGE_CALL(PluginName) \
- LLVMC_FORCE_LINKAGE_FUN(PluginName) ()
-
-#endif // LLVM_INCLUDE_COMPILER_DRIVER_FORCE_LINKAGE_MACROS_H
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/Main.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/Main.h
new file mode 100644
index 0000000..d136a5d
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/CompilerDriver/Main.h
@@ -0,0 +1,21 @@
+//===--- Main.h - The LLVM Compiler Driver ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open
+// Source License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Entry point for the driver executable.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_INCLUDE_COMPILER_DRIVER_MAIN_H
+#define LLVM_INCLUDE_COMPILER_DRIVER_MAIN_H
+
+namespace llvmc {
+ int Main(int argc, char** argv);
+}
+
+#endif // LLVM_INCLUDE_COMPILER_DRIVER_MAIN_H
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/Main.inc b/libclamav/c++/llvm/include/llvm/CompilerDriver/Main.inc
index 71bb8cb..4164043 100644
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/Main.inc
+++ b/libclamav/c++/llvm/include/llvm/CompilerDriver/Main.inc
@@ -7,26 +7,16 @@
//
//===----------------------------------------------------------------------===//
//
-// This tool provides a single point of access to the LLVM
-// compilation tools. It has many options. To discover the options
-// supported please refer to the tools' manual page or run the tool
-// with the -help option.
-//
-// This file provides the default entry point for the driver executable.
+// Default main() for the driver executable.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_INCLUDE_COMPILER_DRIVER_MAIN_INC
#define LLVM_INCLUDE_COMPILER_DRIVER_MAIN_INC
-#include "llvm/CompilerDriver/ForceLinkage.h"
-
-namespace llvmc {
- int Main(int argc, char** argv);
-}
+#include "llvm/CompilerDriver/Main.h"
int main(int argc, char** argv) {
- llvmc::ForceLinkage();
return llvmc::Main(argc, argv);
}
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/Plugin.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/Plugin.h
deleted file mode 100644
index e9a2048..0000000
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/Plugin.h
+++ /dev/null
@@ -1,81 +0,0 @@
-//===--- Plugin.h - The LLVM Compiler Driver --------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open
-// Source License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Plugin support for llvmc.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_INCLUDE_COMPILER_DRIVER_PLUGIN_H
-#define LLVM_INCLUDE_COMPILER_DRIVER_PLUGIN_H
-
-#include "llvm/Support/Registry.h"
-
-namespace llvmc {
-
- class LanguageMap;
- class CompilationGraph;
-
- /// BasePlugin - An abstract base class for all LLVMC plugins.
- struct BasePlugin {
-
- /// Priority - Plugin priority, useful for handling dependencies
- /// between plugins. Plugins with lower priorities are loaded
- /// first.
- virtual int Priority() const { return 0; }
-
- /// PreprocessOptions - The auto-generated function that performs various
- /// consistency checks on options (like ensuring that -O2 and -O3 are not
- /// used together).
- virtual void PreprocessOptions() const = 0;
-
- /// PopulateLanguageMap - The auto-generated function that fills in
- /// the language map (map from file extensions to language names).
- virtual void PopulateLanguageMap(LanguageMap&) const = 0;
-
- /// PopulateCompilationGraph - The auto-generated function that
- /// populates the compilation graph with nodes and edges.
- virtual void PopulateCompilationGraph(CompilationGraph&) const = 0;
-
- /// Needed to avoid a compiler warning.
- virtual ~BasePlugin() {}
- };
-
- typedef llvm::Registry<BasePlugin> PluginRegistry;
-
- template <class P>
- struct RegisterPlugin
- : public PluginRegistry::Add<P> {
- typedef PluginRegistry::Add<P> Base;
-
- RegisterPlugin(const char* Name = "Nameless",
- const char* Desc = "Auto-generated plugin")
- : Base(Name, Desc) {}
- };
-
-
- /// PluginLoader - Helper class used by the main program for
- /// lifetime management.
- struct PluginLoader {
- PluginLoader();
- ~PluginLoader();
-
- /// RunInitialization - Calls PreprocessOptions, PopulateLanguageMap and
- /// PopulateCompilationGraph methods of all plugins. This populates the
- /// global language map and the compilation graph.
- void RunInitialization(LanguageMap& langMap, CompilationGraph& graph) const;
-
- private:
- // noncopyable
- PluginLoader(const PluginLoader& other);
- const PluginLoader& operator=(const PluginLoader& other);
- };
-
-}
-
-#endif // LLVM_INCLUDE_COMPILER_DRIVER_PLUGIN_H
diff --git a/libclamav/c++/llvm/include/llvm/CompilerDriver/Tool.h b/libclamav/c++/llvm/include/llvm/CompilerDriver/Tool.h
index 85d1690..45ef50d 100644
--- a/libclamav/c++/llvm/include/llvm/CompilerDriver/Tool.h
+++ b/libclamav/c++/llvm/include/llvm/CompilerDriver/Tool.h
@@ -38,17 +38,23 @@ namespace llvmc {
virtual ~Tool() {}
- virtual Action GenerateAction (const PathVector& inFiles,
- bool HasChildren,
- const llvm::sys::Path& TempDir,
- const InputLanguagesSet& InLangs,
- const LanguageMap& LangMap) const = 0;
-
- virtual Action GenerateAction (const llvm::sys::Path& inFile,
- bool HasChildren,
- const llvm::sys::Path& TempDir,
- const InputLanguagesSet& InLangs,
- const LanguageMap& LangMap) const = 0;
+ /// GenerateAction - Generate an Action given particular command-line
+ /// options. Returns non-zero value on error.
+ virtual int GenerateAction (Action& Out,
+ const PathVector& inFiles,
+ const bool HasChildren,
+ const llvm::sys::Path& TempDir,
+ const InputLanguagesSet& InLangs,
+ const LanguageMap& LangMap) const = 0;
+
+ /// GenerateAction - Generate an Action given particular command-line
+ /// options. Returns non-zero value on error.
+ virtual int GenerateAction (Action& Out,
+ const llvm::sys::Path& inFile,
+ const bool HasChildren,
+ const llvm::sys::Path& TempDir,
+ const InputLanguagesSet& InLangs,
+ const LanguageMap& LangMap) const = 0;
virtual const char* Name() const = 0;
virtual const char** InputLanguages() const = 0;
@@ -74,11 +80,13 @@ namespace llvmc {
void ClearJoinList() { JoinList_.clear(); }
bool JoinListEmpty() const { return JoinList_.empty(); }
- Action GenerateAction(bool HasChildren,
- const llvm::sys::Path& TempDir,
- const InputLanguagesSet& InLangs,
- const LanguageMap& LangMap) const {
- return GenerateAction(JoinList_, HasChildren, TempDir, InLangs, LangMap);
+ int GenerateAction(Action& Out,
+ const bool HasChildren,
+ const llvm::sys::Path& TempDir,
+ const InputLanguagesSet& InLangs,
+ const LanguageMap& LangMap) const {
+ return GenerateAction(Out, JoinList_, HasChildren, TempDir, InLangs,
+ LangMap);
}
// We shouldn't shadow base class's version of GenerateAction.
using Tool::GenerateAction;
diff --git a/libclamav/c++/llvm/include/llvm/Config/.gitignore b/libclamav/c++/llvm/include/llvm/Config/.gitignore
new file mode 100644
index 0000000..8d64155
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Config/.gitignore
@@ -0,0 +1 @@
+llvm-config.h
diff --git a/libclamav/c++/llvm/include/llvm/Config/config.h.cmake b/libclamav/c++/llvm/include/llvm/Config/config.h.cmake
index 1f48ae9..e8feabf 100644
--- a/libclamav/c++/llvm/include/llvm/Config/config.h.cmake
+++ b/libclamav/c++/llvm/include/llvm/Config/config.h.cmake
@@ -3,6 +3,9 @@
** Created by Kevin from config.h.in **
***************************************/
+#ifndef CONFIG_H
+#define CONFIG_H
+
/* Define if dlopen(0) will open the symbols of the program */
#undef CAN_DLOPEN_SELF
@@ -312,6 +315,9 @@
/* Define to 1 if you have the `roundf' function. */
#undef HAVE_ROUNDF
+/* Define to 1 if you have the `round' function. */
+#cmakedefine HAVE_ROUND ${HAVE_ROUND}
+
/* Define to 1 if you have the `sbrk' function. */
#cmakedefine HAVE_SBRK ${HAVE_SBRK}
@@ -452,6 +458,9 @@
/* Define to 1 if the system has the type `u_int64_t'. */
#undef HAVE_U_INT64_T
+/* Define to 1 if you have the <valgrind/valgrind.h> header file. */
+#cmakedefine HAVE_VALGRIND_VALGRIND_H ${HAVE_VALGRIND_VALGRIND_H}
+
/* Define to 1 if you have the <windows.h> header file. */
#cmakedefine HAVE_WINDOWS_H ${HAVE_WINDOWS_H}
@@ -519,7 +528,7 @@
#cmakedefine LLVM_PATH_TWOPI "${LLVM_PATH_TWOPI}"
/* Installation prefix directory */
-#undef LLVM_PREFIX
+#cmakedefine LLVM_PREFIX "${LLVM_PREFIX}"
/* Define if the OS needs help to load dependent libraries for dlopen(). */
#cmakedefine LTDL_DLOPEN_DEPLIBS ${LTDL_DLOPEN_DEPLIBS}
@@ -617,5 +626,16 @@
/* Define to a function implementing strdup */
#cmakedefine strdup ${strdup}
-/* Native LLVM architecture */
-#cmakedefine LLVM_NATIVE_ARCH ${LLVM_NATIVE_ARCH}Target
+/* LLVM architecture name for the native architecture, if available */
+#cmakedefine LLVM_NATIVE_ARCH ${LLVM_NATIVE_ARCH}
+
+/* LLVM name for the native Target init function, if available */
+#cmakedefine LLVM_NATIVE_TARGET LLVMInitialize${LLVM_NATIVE_ARCH}Target
+
+/* LLVM name for the native TargetInfo init function, if available */
+#cmakedefine LLVM_NATIVE_TARGETINFO LLVMInitialize${LLVM_NATIVE_ARCH}TargetInfo
+
+/* LLVM name for the native AsmPrinter init function, if available */
+#cmakedefine LLVM_NATIVE_ASMPRINTER LLVMInitialize${LLVM_NATIVE_ARCH}AsmPrinter
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Config/config.h.in b/libclamav/c++/llvm/include/llvm/Config/config.h.in
index 3016da8..a4cebb3 100644
--- a/libclamav/c++/llvm/include/llvm/Config/config.h.in
+++ b/libclamav/c++/llvm/include/llvm/Config/config.h.in
@@ -1,5 +1,8 @@
/* include/llvm/Config/config.h.in. Generated from autoconf/configure.ac by autoheader. */
+#ifndef CONFIG_H
+#define CONFIG_H
+
/* Define if building universal (internal helper macro) */
#undef AC_APPLE_UNIVERSAL_BUILD
@@ -27,6 +30,9 @@
/* Define if threads enabled */
#undef ENABLE_THREADS
+/* Define if timestamp information (e.g., __DATE___) is allowed */
+#undef ENABLE_TIMESTAMPS
+
/* Define to 1 if you have the `argz_append' function. */
#undef HAVE_ARGZ_APPEND
@@ -63,6 +69,9 @@
/* Define to 1 if you have the `closedir' function. */
#undef HAVE_CLOSEDIR
+/* Define to 1 if you have the <CrashReporterClient.h> header file. */
+#undef HAVE_CRASHREPORTERCLIENT_H
+
/* Define to 1 if you have the <ctype.h> header file. */
#undef HAVE_CTYPE_H
@@ -267,6 +276,9 @@
/* Define to 1 if you have the `opendir' function. */
#undef HAVE_OPENDIR
+/* Define to 1 if you have the `posix_spawn' function. */
+#undef HAVE_POSIX_SPAWN
+
/* Define to 1 if you have the `powf' function. */
#undef HAVE_POWF
@@ -443,12 +455,18 @@
/* Define to 1 if the system has the type `u_int64_t'. */
#undef HAVE_U_INT64_T
+/* Define to 1 if you have the <valgrind/valgrind.h> header file. */
+#undef HAVE_VALGRIND_VALGRIND_H
+
/* Define to 1 if you have the <windows.h> header file. */
#undef HAVE_WINDOWS_H
/* Define to 1 if you have the `__dso_handle' function. */
#undef HAVE___DSO_HANDLE
+/* Linker version detected at compile time. */
+#undef HOST_LINK_VERSION
+
/* Installation directory for binary executables */
#undef LLVM_BINDIR
@@ -485,6 +503,15 @@
/* LLVM architecture name for the native architecture, if available */
#undef LLVM_NATIVE_ARCH
+/* LLVM name for the native AsmPrinter init function, if available */
+#undef LLVM_NATIVE_ASMPRINTER
+
+/* LLVM name for the native Target init function, if available */
+#undef LLVM_NATIVE_TARGET
+
+/* LLVM name for the native TargetInfo init function, if available */
+#undef LLVM_NATIVE_TARGETINFO
+
/* Define if this is Unixish platform */
#undef LLVM_ON_UNIX
@@ -604,3 +631,5 @@
/* Define to `unsigned int' if <sys/types.h> does not define. */
#undef size_t
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Config/llvm-config.h.cmake b/libclamav/c++/llvm/include/llvm/Config/llvm-config.h.cmake
new file mode 100644
index 0000000..8469bcc
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Config/llvm-config.h.cmake
@@ -0,0 +1,97 @@
+/*===-- llvm/config/llvm-config.h - llvm configure variable -------*- C -*-===*/
+/* */
+/* The LLVM Compiler Infrastructure */
+/* */
+/* This file is distributed under the University of Illinois Open Source */
+/* License. See LICENSE.TXT for details. */
+/* */
+/*===----------------------------------------------------------------------===*/
+
+/* This file enumerates all of the llvm variables from configure so that
+ they can be in exported headers and won't override package specific
+ directives. This is a C file so we can include it in the llvm-c headers. */
+
+/* To avoid multiple inclusions of these variables when we include the exported
+ headers and config.h, conditionally include these. */
+/* TODO: This is a bit of a hack. */
+#ifndef CONFIG_H
+
+/* Installation directory for binary executables */
+#cmakedefine LLVM_BINDIR "${LLVM_BINDIR}"
+
+/* Time at which LLVM was configured */
+#cmakedefine LLVM_CONFIGTIME "${LLVM_CONFIGTIME}"
+
+/* Installation directory for data files */
+#cmakedefine LLVM_DATADIR "${LLVM_DATADIR}"
+
+/* Installation directory for documentation */
+#cmakedefine LLVM_DOCSDIR "${LLVM_DOCSDIR}"
+
+/* Installation directory for config files */
+#cmakedefine LLVM_ETCDIR "${LLVM_ETCDIR}"
+
+/* Host triple we were built on */
+#cmakedefine LLVM_HOSTTRIPLE "${LLVM_HOSTTRIPLE}"
+
+/* Installation directory for include files */
+#cmakedefine LLVM_INCLUDEDIR "${LLVM_INCLUDEDIR}"
+
+/* Installation directory for .info files */
+#cmakedefine LLVM_INFODIR "${LLVM_INFODIR}"
+
+/* Installation directory for libraries */
+#cmakedefine LLVM_LIBDIR "${LLVM_LIBDIR}"
+
+/* Installation directory for man pages */
+#cmakedefine LLVM_MANDIR "${LLVM_MANDIR}"
+
+/* Build multithreading support into LLVM */
+#cmakedefine LLVM_MULTITHREADED
+
+/* LLVM architecture name for the native architecture, if available */
+#cmakedefine LLVM_NATIVE_ARCH ${LLVM_NATIVE_ARCH}
+
+/* LLVM name for the native Target init function, if available */
+#cmakedefine LLVM_NATIVE_TARGET LLVMInitialize${LLVM_NATIVE_ARCH}Target
+
+/* LLVM name for the native TargetInfo init function, if available */
+#cmakedefine LLVM_NATIVE_TARGETINFO LLVMInitialize${LLVM_NATIVE_ARCH}TargetInfo
+
+/* LLVM name for the native AsmPrinter init function, if available */
+#cmakedefine LLVM_NATIVE_ASMPRINTER LLVMInitialize${LLVM_NATIVE_ARCH}AsmPrinter
+
+/* Define if this is Unixish platform */
+#cmakedefine LLVM_ON_UNIX
+
+/* Define if this is Win32ish platform */
+#cmakedefine LLVM_ON_WIN32
+
+/* Define to path to circo program if found or 'echo circo' otherwise */
+#cmakedefine LLVM_PATH_CIRCO "${LLVM_PATH_CIRCO}"
+
+/* Define to path to dot program if found or 'echo dot' otherwise */
+#cmakedefine LLVM_PATH_DOT "${LLVM_PATH_DOT}"
+
+/* Define to path to dotty program if found or 'echo dotty' otherwise */
+#cmakedefine LLVM_PATH_DOTTY "${LLVM_PATH_DOTTY}"
+
+/* Define to path to fdp program if found or 'echo fdp' otherwise */
+#cmakedefine LLVM_PATH_FDP "${LLVM_PATH_FDP}"
+
+/* Define to path to Graphviz program if found or 'echo Graphviz' otherwise */
+#cmakedefine LLVM_PATH_GRAPHVIZ "${LLVM_PATH_GRAPHVIZ}"
+
+/* Define to path to gv program if found or 'echo gv' otherwise */
+#cmakedefine LLVM_PATH_GV "${LLVM_PATH_GV}"
+
+/* Define to path to neato program if found or 'echo neato' otherwise */
+#cmakedefine LLVM_PATH_NEATO "${LLVM_PATH_NEATO}"
+
+/* Define to path to twopi program if found or 'echo twopi' otherwise */
+#cmakedefine LLVM_PATH_TWOPI "${LLVM_PATH_TWOPI}"
+
+/* Installation prefix directory */
+#cmakedefine LLVM_PREFIX "${LLVM_PREFIX}"
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Config/llvm-config.h.in b/libclamav/c++/llvm/include/llvm/Config/llvm-config.h.in
new file mode 100644
index 0000000..e7a04ee
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Config/llvm-config.h.in
@@ -0,0 +1,97 @@
+/*===-- llvm/config/llvm-config.h - llvm configure variable -------*- C -*-===*/
+/* */
+/* The LLVM Compiler Infrastructure */
+/* */
+/* This file is distributed under the University of Illinois Open Source */
+/* License. See LICENSE.TXT for details. */
+/* */
+/*===----------------------------------------------------------------------===*/
+
+/* This file enumerates all of the llvm variables from configure so that
+ they can be in exported headers and won't override package specific
+ directives. This is a C file so we can include it in the llvm-c headers. */
+
+/* To avoid multiple inclusions of these variables when we include the exported
+ headers and config.h, conditionally include these. */
+/* TODO: This is a bit of a hack. */
+#ifndef CONFIG_H
+
+/* Installation directory for binary executables */
+#undef LLVM_BINDIR
+
+/* Time at which LLVM was configured */
+#undef LLVM_CONFIGTIME
+
+/* Installation directory for data files */
+#undef LLVM_DATADIR
+
+/* Installation directory for documentation */
+#undef LLVM_DOCSDIR
+
+/* Installation directory for config files */
+#undef LLVM_ETCDIR
+
+/* Host triple we were built on */
+#undef LLVM_HOSTTRIPLE
+
+/* Installation directory for include files */
+#undef LLVM_INCLUDEDIR
+
+/* Installation directory for .info files */
+#undef LLVM_INFODIR
+
+/* Installation directory for libraries */
+#undef LLVM_LIBDIR
+
+/* Installation directory for man pages */
+#undef LLVM_MANDIR
+
+/* Build multithreading support into LLVM */
+#undef LLVM_MULTITHREADED
+
+/* LLVM architecture name for the native architecture, if available */
+#undef LLVM_NATIVE_ARCH
+
+/* LLVM name for the native Target init function, if available */
+#undef LLVM_NATIVE_TARGET
+
+/* LLVM name for the native TargetInfo init function, if available */
+#undef LLVM_NATIVE_TARGETINFO
+
+/* LLVM name for the native AsmPrinter init function, if available */
+#undef LLVM_NATIVE_ASMPRINTER
+
+/* Define if this is Unixish platform */
+#undef LLVM_ON_UNIX
+
+/* Define if this is Win32ish platform */
+#undef LLVM_ON_WIN32
+
+/* Define to path to circo program if found or 'echo circo' otherwise */
+#undef LLVM_PATH_CIRCO
+
+/* Define to path to dot program if found or 'echo dot' otherwise */
+#undef LLVM_PATH_DOT
+
+/* Define to path to dotty program if found or 'echo dotty' otherwise */
+#undef LLVM_PATH_DOTTY
+
+/* Define to path to fdp program if found or 'echo fdp' otherwise */
+#undef LLVM_PATH_FDP
+
+/* Define to path to Graphviz program if found or 'echo Graphviz' otherwise */
+#undef LLVM_PATH_GRAPHVIZ
+
+/* Define to path to gv program if found or 'echo gv' otherwise */
+#undef LLVM_PATH_GV
+
+/* Define to path to neato program if found or 'echo neato' otherwise */
+#undef LLVM_PATH_NEATO
+
+/* Define to path to twopi program if found or 'echo twopi' otherwise */
+#undef LLVM_PATH_TWOPI
+
+/* Installation prefix directory */
+#undef LLVM_PREFIX
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Constants.h b/libclamav/c++/llvm/include/llvm/Constants.h
index 1cebb20..a7deae0 100644
--- a/libclamav/c++/llvm/include/llvm/Constants.h
+++ b/libclamav/c++/llvm/include/llvm/Constants.h
@@ -33,7 +33,6 @@ namespace llvm {
class ArrayType;
class IntegerType;
class StructType;
-class UnionType;
class PointerType;
class VectorType;
@@ -459,49 +458,6 @@ struct OperandTraits<ConstantStruct> : public VariadicOperandTraits<> {
DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantStruct, Constant)
-//===----------------------------------------------------------------------===//
-// ConstantUnion - Constant Union Declarations
-//
-class ConstantUnion : public Constant {
- friend struct ConstantCreator<ConstantUnion, UnionType, Constant*>;
- ConstantUnion(const ConstantUnion &); // DO NOT IMPLEMENT
-protected:
- ConstantUnion(const UnionType *T, Constant* Val);
-public:
- // ConstantUnion accessors
- static Constant *get(const UnionType *T, Constant* V);
-
- /// Transparently provide more efficient getOperand methods.
- DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
-
- /// getType() specialization - Reduce amount of casting...
- ///
- inline const UnionType *getType() const {
- return reinterpret_cast<const UnionType*>(Value::getType());
- }
-
- /// isNullValue - Return true if this is the value that would be returned by
- /// getNullValue. This always returns false because zero structs are always
- /// created as ConstantAggregateZero objects.
- virtual bool isNullValue() const {
- return false;
- }
-
- virtual void destroyConstant();
- virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U);
-
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const ConstantUnion *) { return true; }
- static bool classof(const Value *V) {
- return V->getValueID() == ConstantUnionVal;
- }
-};
-
-template <>
-struct OperandTraits<ConstantUnion> : public FixedNumOperandTraits<1> {
-};
-
-DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantUnion, Constant)
//===----------------------------------------------------------------------===//
/// ConstantVector - Constant Vector Declarations
@@ -691,9 +647,6 @@ public:
// ConstantExpr class, because they will attempt to fold the constant
// expression into something simpler if possible.
- /// Cast constant expr
- ///
-
/// getAlignOf constant expr - computes the alignment of a type in a target
/// independent way (Note: the return type is an i64).
static Constant *getAlignOf(const Type* Ty);
@@ -926,7 +879,11 @@ DEFINE_TRANSPARENT_CASTED_OPERAND_ACCESSORS(ConstantExpr, Constant)
/// UndefValue - 'undef' values are things that do not have specified contents.
/// These are used for a variety of purposes, including global variable
/// initializers and operands to instructions. 'undef' values can occur with
-/// any type.
+/// any first-class type.
+///
+/// Undef values aren't exactly constants; if they have multiple uses, they
+/// can appear to have different bit patterns at each use. See
+/// LangRef.html#undefvalues for details.
///
class UndefValue : public Constant {
friend struct ConstantCreator<UndefValue, Type, char>;
@@ -957,6 +914,7 @@ public:
return V->getValueID() == UndefValueVal;
}
};
+
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/DerivedTypes.h b/libclamav/c++/llvm/include/llvm/DerivedTypes.h
index 912bb6d..9b6b19f 100644
--- a/libclamav/c++/llvm/include/llvm/DerivedTypes.h
+++ b/libclamav/c++/llvm/include/llvm/DerivedTypes.h
@@ -27,7 +27,6 @@ template<class ValType, class TypeClass> class TypeMap;
class FunctionValType;
class ArrayValType;
class StructValType;
-class UnionValType;
class PointerValType;
class VectorValType;
class IntegerValType;
@@ -52,10 +51,6 @@ protected:
///
void dropAllTypeUses();
- /// unlockedRefineAbstractTypeTo - Internal version of refineAbstractTypeTo
- /// that performs no locking. Only used for internal recursion.
- void unlockedRefineAbstractTypeTo(const Type *NewType);
-
public:
//===--------------------------------------------------------------------===//
@@ -230,8 +225,7 @@ public:
return T->getTypeID() == ArrayTyID ||
T->getTypeID() == StructTyID ||
T->getTypeID() == PointerTyID ||
- T->getTypeID() == VectorTyID ||
- T->getTypeID() == UnionTyID;
+ T->getTypeID() == VectorTyID;
}
};
@@ -302,64 +296,6 @@ public:
bool isPacked() const { return (0 != getSubclassData()) ? true : false; }
};
-
-/// UnionType - Class to represent union types. A union type is similar to
-/// a structure, except that all member fields begin at offset 0.
-///
-class UnionType : public CompositeType {
- friend class TypeMap<UnionValType, UnionType>;
- UnionType(const UnionType &); // Do not implement
- const UnionType &operator=(const UnionType &); // Do not implement
- UnionType(LLVMContext &C, const Type* const* Types, unsigned NumTypes);
-public:
- /// UnionType::get - This static method is the primary way to create a
- /// UnionType.
- static UnionType *get(const Type* const* Types, unsigned NumTypes);
-
- /// UnionType::get - This static method is a convenience method for
- /// creating union types by specifying the elements as arguments.
- static UnionType *get(const Type *type, ...) END_WITH_NULL;
-
- /// isValidElementType - Return true if the specified type is valid as a
- /// element type.
- static bool isValidElementType(const Type *ElemTy);
-
- /// Given an element type, return the member index of that type, or -1
- /// if there is no such member type.
- int getElementTypeIndex(const Type *ElemTy) const;
-
- // Iterator access to the elements
- typedef Type::subtype_iterator element_iterator;
- element_iterator element_begin() const { return ContainedTys; }
- element_iterator element_end() const { return &ContainedTys[NumContainedTys];}
-
- // Random access to the elements
- unsigned getNumElements() const { return NumContainedTys; }
- const Type *getElementType(unsigned N) const {
- assert(N < NumContainedTys && "Element number out of range!");
- return ContainedTys[N];
- }
-
- /// getTypeAtIndex - Given an index value into the type, return the type of
- /// the element. For a union type, this must be a constant value...
- ///
- virtual const Type *getTypeAtIndex(const Value *V) const;
- virtual const Type *getTypeAtIndex(unsigned Idx) const;
- virtual bool indexValid(const Value *V) const;
- virtual bool indexValid(unsigned Idx) const;
-
- // Implement the AbstractTypeUser interface.
- virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy);
- virtual void typeBecameConcrete(const DerivedType *AbsTy);
-
- // Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const UnionType *) { return true; }
- static inline bool classof(const Type *T) {
- return T->getTypeID() == UnionTyID;
- }
-};
-
-
/// SequentialType - This is the superclass of the array, pointer and vector
/// type classes. All of these represent "arrays" in memory. The array type
/// represents a specifically sized array, pointer types are unsized/unknown
diff --git a/libclamav/c++/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h b/libclamav/c++/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
index c3f1902..3287b39 100644
--- a/libclamav/c++/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/libclamav/c++/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -174,8 +174,8 @@ public:
CodeGenOpt::Level OptLevel =
CodeGenOpt::Default,
bool GVsWithCode = true,
- CodeModel::Model CMM =
- CodeModel::Default);
+ CodeModel::Model CMM =
+ CodeModel::Default);
/// addModule - Add a Module to the list of modules that we can JIT from.
/// Note that this takes ownership of the Module: when the ExecutionEngine is
diff --git a/libclamav/c++/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h b/libclamav/c++/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
index fd51920..e015930 100644
--- a/libclamav/c++/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
+++ b/libclamav/c++/llvm/include/llvm/ExecutionEngine/JITMemoryManager.h
@@ -29,10 +29,9 @@ namespace llvm {
class JITMemoryManager {
protected:
bool HasGOT;
- bool SizeRequired;
public:
- JITMemoryManager() : HasGOT(false), SizeRequired(false) {}
+ JITMemoryManager() : HasGOT(false) {}
virtual ~JITMemoryManager();
/// CreateDefaultMemManager - This is used to create the default
@@ -71,12 +70,6 @@ public:
/// return a pointer to its base.
virtual uint8_t *getGOTBase() const = 0;
- /// NeedsExactSize - If the memory manager requires to know the size of the
- /// objects to be emitted
- bool NeedsExactSize() const {
- return SizeRequired;
- }
-
//===--------------------------------------------------------------------===//
// Main Allocation Functions
//===--------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/include/llvm/Function.h b/libclamav/c++/llvm/include/llvm/Function.h
index 3882233..2b19fa5 100644
--- a/libclamav/c++/llvm/include/llvm/Function.h
+++ b/libclamav/c++/llvm/include/llvm/Function.h
@@ -409,8 +409,11 @@ public:
void dropAllReferences();
/// hasAddressTaken - returns true if there are any uses of this function
- /// other than direct calls or invokes to it.
- bool hasAddressTaken() const;
+ /// other than direct calls or invokes to it. Optionally passes back the
+ /// offending user for diagnostic purposes.
+ ///
+ bool hasAddressTaken(const User** = 0) const;
+
private:
// Shadow Value::setValueSubclassData with a private forwarding method so that
// subclasses cannot accidentally use it.
diff --git a/libclamav/c++/llvm/include/llvm/GlobalValue.h b/libclamav/c++/llvm/include/llvm/GlobalValue.h
index 658967d..62e84f8 100644
--- a/libclamav/c++/llvm/include/llvm/GlobalValue.h
+++ b/libclamav/c++/llvm/include/llvm/GlobalValue.h
@@ -40,6 +40,9 @@ public:
InternalLinkage, ///< Rename collisions when linking (static functions).
PrivateLinkage, ///< Like Internal, but omit from symbol table.
LinkerPrivateLinkage, ///< Like Private, but linker removes.
+ LinkerPrivateWeakLinkage, ///< Like LinkerPrivate, but weak.
+ LinkerPrivateWeakDefAutoLinkage, ///< Like LinkerPrivateWeak, but possibly
+ /// hidden.
DLLImportLinkage, ///< Function to be imported from DLL
DLLExportLinkage, ///< Function to be accessible from DLL.
ExternalWeakLinkage,///< ExternalWeak linkage description.
@@ -73,11 +76,10 @@ public:
removeDeadConstantUsers(); // remove any dead constants using this.
}
- unsigned getAlignment() const { return Alignment; }
- void setAlignment(unsigned Align) {
- assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
- Alignment = Align;
+ unsigned getAlignment() const {
+ return (1u << Alignment) >> 1;
}
+ void setAlignment(unsigned Align);
VisibilityTypes getVisibility() const { return VisibilityTypes(Visibility); }
bool hasDefaultVisibility() const { return Visibility == DefaultVisibility; }
@@ -132,11 +134,18 @@ public:
return Linkage == PrivateLinkage;
}
static bool isLinkerPrivateLinkage(LinkageTypes Linkage) {
- return Linkage==LinkerPrivateLinkage;
+ return Linkage == LinkerPrivateLinkage;
+ }
+ static bool isLinkerPrivateWeakLinkage(LinkageTypes Linkage) {
+ return Linkage == LinkerPrivateWeakLinkage;
+ }
+ static bool isLinkerPrivateWeakDefAutoLinkage(LinkageTypes Linkage) {
+ return Linkage == LinkerPrivateWeakDefAutoLinkage;
}
static bool isLocalLinkage(LinkageTypes Linkage) {
return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage) ||
- isLinkerPrivateLinkage(Linkage);
+ isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage) ||
+ isLinkerPrivateWeakDefAutoLinkage(Linkage);
}
static bool isDLLImportLinkage(LinkageTypes Linkage) {
return Linkage == DLLImportLinkage;
@@ -155,22 +164,26 @@ public:
/// by something non-equivalent at link time. For example, if a function has
/// weak linkage then the code defining it may be replaced by different code.
static bool mayBeOverridden(LinkageTypes Linkage) {
- return (Linkage == WeakAnyLinkage ||
- Linkage == LinkOnceAnyLinkage ||
- Linkage == CommonLinkage ||
- Linkage == ExternalWeakLinkage);
+ return Linkage == WeakAnyLinkage ||
+ Linkage == LinkOnceAnyLinkage ||
+ Linkage == CommonLinkage ||
+ Linkage == ExternalWeakLinkage ||
+ Linkage == LinkerPrivateWeakLinkage ||
+ Linkage == LinkerPrivateWeakDefAutoLinkage;
}
/// isWeakForLinker - Whether the definition of this global may be replaced at
/// link time.
static bool isWeakForLinker(LinkageTypes Linkage) {
- return (Linkage == AvailableExternallyLinkage ||
- Linkage == WeakAnyLinkage ||
- Linkage == WeakODRLinkage ||
- Linkage == LinkOnceAnyLinkage ||
- Linkage == LinkOnceODRLinkage ||
- Linkage == CommonLinkage ||
- Linkage == ExternalWeakLinkage);
+ return Linkage == AvailableExternallyLinkage ||
+ Linkage == WeakAnyLinkage ||
+ Linkage == WeakODRLinkage ||
+ Linkage == LinkOnceAnyLinkage ||
+ Linkage == LinkOnceODRLinkage ||
+ Linkage == CommonLinkage ||
+ Linkage == ExternalWeakLinkage ||
+ Linkage == LinkerPrivateWeakLinkage ||
+ Linkage == LinkerPrivateWeakDefAutoLinkage;
}
bool hasExternalLinkage() const { return isExternalLinkage(Linkage); }
@@ -187,6 +200,12 @@ public:
bool hasInternalLinkage() const { return isInternalLinkage(Linkage); }
bool hasPrivateLinkage() const { return isPrivateLinkage(Linkage); }
bool hasLinkerPrivateLinkage() const { return isLinkerPrivateLinkage(Linkage); }
+ bool hasLinkerPrivateWeakLinkage() const {
+ return isLinkerPrivateWeakLinkage(Linkage);
+ }
+ bool hasLinkerPrivateWeakDefAutoLinkage() const {
+ return isLinkerPrivateWeakDefAutoLinkage(Linkage);
+ }
bool hasLocalLinkage() const { return isLocalLinkage(Linkage); }
bool hasDLLImportLinkage() const { return isDLLImportLinkage(Linkage); }
bool hasDLLExportLinkage() const { return isDLLExportLinkage(Linkage); }
diff --git a/libclamav/c++/llvm/include/llvm/InlineAsm.h b/libclamav/c++/llvm/include/llvm/InlineAsm.h
index 2ac0fca..105b1bc 100644
--- a/libclamav/c++/llvm/include/llvm/InlineAsm.h
+++ b/libclamav/c++/llvm/include/llvm/InlineAsm.h
@@ -146,6 +146,50 @@ public:
return V->getValueID() == Value::InlineAsmVal;
}
+
+ // These are helper methods for dealing with flags in the INLINEASM SDNode
+ // in the backend.
+
+ enum {
+ Op_InputChain = 0,
+ Op_AsmString = 1,
+ Op_MDNode = 2,
+ Op_IsAlignStack = 3,
+ Op_FirstOperand = 4,
+
+ Kind_RegUse = 1,
+ Kind_RegDef = 2,
+ Kind_Imm = 3,
+ Kind_Mem = 4,
+ Kind_RegDefEarlyClobber = 6,
+
+ Flag_MatchingOperand = 0x80000000
+ };
+
+ static unsigned getFlagWord(unsigned Kind, unsigned NumOps) {
+ assert(((NumOps << 3) & ~0xffff) == 0 && "Too many inline asm operands!");
+ return Kind | (NumOps << 3);
+ }
+
+ /// getFlagWordForMatchingOp - Augment an existing flag word returned by
+ /// getFlagWord with information indicating that this input operand is tied
+ /// to a previous output operand.
+ static unsigned getFlagWordForMatchingOp(unsigned InputFlag,
+ unsigned MatchedOperandNo) {
+ return InputFlag | Flag_MatchingOperand | (MatchedOperandNo << 16);
+ }
+
+ static unsigned getKind(unsigned Flags) {
+ return Flags & 7;
+ }
+
+ static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
+ static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
+ static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
+ static bool isRegDefEarlyClobberKind(unsigned Flag) {
+ return getKind(Flag) == Kind_RegDefEarlyClobber;
+ }
+
/// getNumOperandRegisters - Extract the number of registers field from the
/// inline asm operand flag.
static unsigned getNumOperandRegisters(unsigned Flag) {
@@ -155,9 +199,9 @@ public:
/// isUseOperandTiedToDef - Return true if the flag of the inline asm
/// operand indicates it is an use operand that's matched to a def operand.
static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx) {
- if ((Flag & 0x80000000) == 0)
+ if ((Flag & Flag_MatchingOperand) == 0)
return false;
- Idx = (Flag & ~0x80000000) >> 16;
+ Idx = (Flag & ~Flag_MatchingOperand) >> 16;
return true;
}
diff --git a/libclamav/c++/llvm/include/llvm/InstrTypes.h b/libclamav/c++/llvm/include/llvm/InstrTypes.h
index 49cdd6a..6715416 100644
--- a/libclamav/c++/llvm/include/llvm/InstrTypes.h
+++ b/libclamav/c++/llvm/include/llvm/InstrTypes.h
@@ -612,7 +612,7 @@ public:
/// A lossless cast is one that does not alter the basic value. It implies
/// a no-op cast but is more stringent, preventing things like int->float,
- /// long->double, int->ptr, or vector->anything.
+ /// long->double, or int->ptr.
/// @returns true iff the cast is lossless.
/// @brief Determine if this is a lossless cast.
bool isLosslessCast() const;
@@ -625,6 +625,14 @@ public:
/// platform. Generally, the result of TargetData::getIntPtrType() should be
/// passed in. If that's not available, use Type::Int64Ty, which will make
/// the isNoopCast call conservative.
+ /// @brief Determine if the described cast is a no-op cast.
+ static bool isNoopCast(
+ Instruction::CastOps Opcode, ///< Opcode of cast
+ const Type *SrcTy, ///< SrcTy of cast
+ const Type *DstTy, ///< DstTy of cast
+ const Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
+ );
+
/// @brief Determine if this cast is a no-op cast.
bool isNoopCast(
const Type *IntPtrTy ///< Integer type corresponding to pointer
diff --git a/libclamav/c++/llvm/include/llvm/Instruction.h b/libclamav/c++/llvm/include/llvm/Instruction.h
index cf9dc44..88f5ce1 100644
--- a/libclamav/c++/llvm/include/llvm/Instruction.h
+++ b/libclamav/c++/llvm/include/llvm/Instruction.h
@@ -17,6 +17,7 @@
#include "llvm/User.h"
#include "llvm/ADT/ilist_node.h"
+#include "llvm/Support/DebugLoc.h"
namespace llvm {
@@ -31,6 +32,7 @@ class Instruction : public User, public ilist_node<Instruction> {
Instruction(const Instruction &); // Do not implement
BasicBlock *Parent;
+ DebugLoc DbgLoc; // 'dbg' Metadata cache.
enum {
/// HasMetadataBit - This is a bit stored in the SubClassData field which
@@ -123,7 +125,13 @@ public:
/// hasMetadata() - Return true if this instruction has any metadata attached
/// to it.
bool hasMetadata() const {
- return (getSubclassDataFromValue() & HasMetadataBit) != 0;
+ return !DbgLoc.isUnknown() || hasMetadataHashEntry();
+ }
+
+ /// hasMetadataOtherThanDebugLoc - Return true if this instruction has
+ /// metadata attached to it other than a debug location.
+ bool hasMetadataOtherThanDebugLoc() const {
+ return hasMetadataHashEntry();
}
/// getMetadata - Get the metadata of given kind attached to this Instruction.
@@ -148,18 +156,40 @@ public:
getAllMetadataImpl(MDs);
}
+ /// getAllMetadataOtherThanDebugLoc - This does the same thing as
+ /// getAllMetadata, except that it filters out the debug location.
+ void getAllMetadataOtherThanDebugLoc(SmallVectorImpl<std::pair<unsigned,
+ MDNode*> > &MDs) const {
+ if (hasMetadataOtherThanDebugLoc())
+ getAllMetadataOtherThanDebugLocImpl(MDs);
+ }
+
/// setMetadata - Set the metadata of the specified kind to the specified
/// node. This updates/replaces metadata if already present, or removes it if
/// Node is null.
void setMetadata(unsigned KindID, MDNode *Node);
void setMetadata(const char *Kind, MDNode *Node);
+ /// setDebugLoc - Set the debug location information for this instruction.
+ void setDebugLoc(const DebugLoc &Loc) { DbgLoc = Loc; }
+
+ /// getDebugLoc - Return the debug location for this node as a DebugLoc.
+ const DebugLoc &getDebugLoc() const { return DbgLoc; }
+
private:
+ /// hasMetadataHashEntry - Return true if we have an entry in the on-the-side
+ /// metadata hash.
+ bool hasMetadataHashEntry() const {
+ return (getSubclassDataFromValue() & HasMetadataBit) != 0;
+ }
+
// These are all implemented in Metadata.cpp.
MDNode *getMetadataImpl(unsigned KindID) const;
MDNode *getMetadataImpl(const char *Kind) const;
void getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned,MDNode*> > &)const;
- void removeAllMetadata();
+ void getAllMetadataOtherThanDebugLocImpl(SmallVectorImpl<std::pair<unsigned,
+ MDNode*> > &) const;
+ void clearMetadataHashEntries();
public:
//===--------------------------------------------------------------------===//
// Predicates and helper methods.
@@ -315,7 +345,7 @@ private:
return Value::getSubclassDataFromValue();
}
- void setHasMetadata(bool V) {
+ void setHasMetadataHashEntry(bool V) {
setValueSubclassData((getSubclassDataFromValue() & ~HasMetadataBit) |
(V ? HasMetadataBit : 0));
}
diff --git a/libclamav/c++/llvm/include/llvm/Instructions.h b/libclamav/c++/llvm/include/llvm/Instructions.h
index f07291c..bd1e889 100644
--- a/libclamav/c++/llvm/include/llvm/Instructions.h
+++ b/libclamav/c++/llvm/include/llvm/Instructions.h
@@ -235,6 +235,9 @@ public:
void setAlignment(unsigned Align);
+ Value *getValueOperand() { return getOperand(0); }
+ const Value *getValueOperand() const { return getOperand(0); }
+
Value *getPointerOperand() { return getOperand(1); }
const Value *getPointerOperand() const { return getOperand(1); }
static unsigned getPointerOperandIndex() { return 1U; }
@@ -883,14 +886,14 @@ public:
InputIterator ArgBegin, InputIterator ArgEnd,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
- return new((unsigned)(ArgEnd - ArgBegin + 1))
+ return new(unsigned(ArgEnd - ArgBegin + 1))
CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertBefore);
}
template<typename InputIterator>
static CallInst *Create(Value *Func,
InputIterator ArgBegin, InputIterator ArgEnd,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
- return new((unsigned)(ArgEnd - ArgBegin + 1))
+ return new(unsigned(ArgEnd - ArgBegin + 1))
CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertAtEnd);
}
static CallInst *Create(Value *F, Value *Actual,
@@ -919,6 +922,7 @@ public:
static Instruction *CreateMalloc(Instruction *InsertBefore,
const Type *IntPtrTy, const Type *AllocTy,
Value *AllocSize, Value *ArraySize = 0,
+ Function* MallocF = 0,
const Twine &Name = "");
static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
const Type *IntPtrTy, const Type *AllocTy,
@@ -926,7 +930,7 @@ public:
Function* MallocF = 0,
const Twine &Name = "");
/// CreateFree - Generate the IR for a call to the builtin free function.
- static void CreateFree(Value* Source, Instruction *InsertBefore);
+ static Instruction* CreateFree(Value* Source, Instruction *InsertBefore);
static Instruction* CreateFree(Value* Source, BasicBlock *InsertAtEnd);
~CallInst();
@@ -940,6 +944,15 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+ /// getNumArgOperands - Return the number of call arguments.
+ ///
+ unsigned getNumArgOperands() const { return getNumOperands() - 1; }
+
+ /// getArgOperand/setArgOperand - Return/set the i-th call argument.
+ ///
+ Value *getArgOperand(unsigned i) const { return getOperand(i); }
+ void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
+
/// getCallingConv/setCallingConv - Get or set the calling convention of this
/// function call.
CallingConv::ID getCallingConv() const {
@@ -971,6 +984,13 @@ public:
unsigned getParamAlignment(unsigned i) const {
return AttributeList.getParamAlignment(i);
}
+
+ /// @brief Return true if the call should not be inlined.
+ bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
+ void setIsNoInline(bool Value = true) {
+ if (Value) addAttribute(~0, Attribute::NoInline);
+ else removeAttribute(~0, Attribute::NoInline);
+ }
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
@@ -991,18 +1011,14 @@ public:
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const {
- return paramHasAttr(~0, Attribute::NoReturn);
- }
+ bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const {
- return paramHasAttr(~0, Attribute::NoUnwind);
- }
+ bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
@@ -1024,17 +1040,22 @@ public:
/// indirect function invocation.
///
Function *getCalledFunction() const {
- return dyn_cast<Function>(Op<0>());
+ return dyn_cast<Function>(Op<-1>());
}
/// getCalledValue - Get a pointer to the function that is invoked by this
/// instruction.
- const Value *getCalledValue() const { return Op<0>(); }
- Value *getCalledValue() { return Op<0>(); }
+ const Value *getCalledValue() const { return Op<-1>(); }
+ Value *getCalledValue() { return Op<-1>(); }
/// setCalledFunction - Set the function called.
void setCalledFunction(Value* Fn) {
- Op<0>() = Fn;
+ Op<-1>() = Fn;
+ }
+
+ /// isInlineAsm - Check if this call is an inline asm statement.
+ bool isInlineAsm() const {
+ return isa<InlineAsm>(Op<-1>());
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -1064,7 +1085,7 @@ CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
->getElementType())->getReturnType(),
Instruction::Call,
OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1),
- (unsigned)(ArgEnd - ArgBegin + 1), InsertAtEnd) {
+ unsigned(ArgEnd - ArgBegin + 1), InsertAtEnd) {
init(Func, ArgBegin, ArgEnd, NameStr,
typename std::iterator_traits<InputIterator>::iterator_category());
}
@@ -1076,11 +1097,15 @@ CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
->getElementType())->getReturnType(),
Instruction::Call,
OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1),
- (unsigned)(ArgEnd - ArgBegin + 1), InsertBefore) {
+ unsigned(ArgEnd - ArgBegin + 1), InsertBefore) {
init(Func, ArgBegin, ArgEnd, NameStr,
typename std::iterator_traits<InputIterator>::iterator_category());
}
+
+// Note: if you get compile errors about private methods then
+// please update your code to use the high-level operand
+// interfaces. See line 943 above.
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value)
//===----------------------------------------------------------------------===//
@@ -1984,7 +2009,7 @@ public:
};
template <>
-struct OperandTraits<ReturnInst> : public OptionalOperandTraits<> {
+struct OperandTraits<ReturnInst> : public VariadicOperandTraits<> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
@@ -2425,6 +2450,15 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+ /// getNumArgOperands - Return the number of invoke arguments.
+ ///
+ unsigned getNumArgOperands() const { return getNumOperands() - 3; }
+
+ /// getArgOperand/setArgOperand - Return/set the i-th invoke argument.
+ ///
+ Value *getArgOperand(unsigned i) const { return getOperand(i); }
+ void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
+
/// getCallingConv/setCallingConv - Get or set the calling convention of this
/// function call.
CallingConv::ID getCallingConv() const {
@@ -2456,6 +2490,13 @@ public:
return AttributeList.getParamAlignment(i);
}
+ /// @brief Return true if the call should not be inlined.
+ bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
+ void setIsNoInline(bool Value = true) {
+ if (Value) addAttribute(~0, Attribute::NoInline);
+ else removeAttribute(~0, Attribute::NoInline);
+ }
+
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
return paramHasAttr(~0, Attribute::ReadNone);
@@ -2475,18 +2516,14 @@ public:
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const {
- return paramHasAttr(~0, Attribute::NoReturn);
- }
+ bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const {
- return paramHasAttr(~0, Attribute::NoUnwind);
- }
+ bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
@@ -2508,27 +2545,31 @@ public:
/// indirect function invocation.
///
Function *getCalledFunction() const {
- return dyn_cast<Function>(getOperand(0));
+ return dyn_cast<Function>(Op<-3>());
}
/// getCalledValue - Get a pointer to the function that is invoked by this
/// instruction
- const Value *getCalledValue() const { return getOperand(0); }
- Value *getCalledValue() { return getOperand(0); }
+ const Value *getCalledValue() const { return Op<-3>(); }
+ Value *getCalledValue() { return Op<-3>(); }
+
+ /// setCalledFunction - Set the function called.
+ void setCalledFunction(Value* Fn) {
+ Op<-3>() = Fn;
+ }
// get*Dest - Return the destination basic blocks...
BasicBlock *getNormalDest() const {
- return cast<BasicBlock>(getOperand(1));
+ return cast<BasicBlock>(Op<-2>());
}
BasicBlock *getUnwindDest() const {
- return cast<BasicBlock>(getOperand(2));
+ return cast<BasicBlock>(Op<-1>());
}
void setNormalDest(BasicBlock *B) {
- setOperand(1, (Value*)B);
+ Op<-2>() = reinterpret_cast<Value*>(B);
}
-
void setUnwindDest(BasicBlock *B) {
- setOperand(2, (Value*)B);
+ Op<-1>() = reinterpret_cast<Value*>(B);
}
BasicBlock *getSuccessor(unsigned i) const {
@@ -2538,7 +2579,7 @@ public:
void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
assert(idx < 2 && "Successor # out of range for invoke!");
- setOperand(idx+1, (Value*)NewSucc);
+ *(&Op<-2>() + idx) = reinterpret_cast<Value*>(NewSucc);
}
unsigned getNumSuccessors() const { return 2; }
@@ -2551,6 +2592,7 @@ public:
static inline bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
+
private:
virtual BasicBlock *getSuccessorV(unsigned idx) const;
virtual unsigned getNumSuccessorsV() const;
@@ -2687,7 +2729,7 @@ public:
TruncInst(
Value *S, ///< The value to be truncated
const Type *Ty, ///< The (smaller) type to truncate to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2695,7 +2737,7 @@ public:
TruncInst(
Value *S, ///< The value to be truncated
const Type *Ty, ///< The (smaller) type to truncate to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2724,7 +2766,7 @@ public:
ZExtInst(
Value *S, ///< The value to be zero extended
const Type *Ty, ///< The type to zero extend to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2732,7 +2774,7 @@ public:
ZExtInst(
Value *S, ///< The value to be zero extended
const Type *Ty, ///< The type to zero extend to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2761,7 +2803,7 @@ public:
SExtInst(
Value *S, ///< The value to be sign extended
const Type *Ty, ///< The type to sign extend to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2769,7 +2811,7 @@ public:
SExtInst(
Value *S, ///< The value to be sign extended
const Type *Ty, ///< The type to sign extend to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2798,7 +2840,7 @@ public:
FPTruncInst(
Value *S, ///< The value to be truncated
const Type *Ty, ///< The type to truncate to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2806,7 +2848,7 @@ public:
FPTruncInst(
Value *S, ///< The value to be truncated
const Type *Ty, ///< The type to truncate to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2835,7 +2877,7 @@ public:
FPExtInst(
Value *S, ///< The value to be extended
const Type *Ty, ///< The type to extend to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2843,7 +2885,7 @@ public:
FPExtInst(
Value *S, ///< The value to be extended
const Type *Ty, ///< The type to extend to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2872,7 +2914,7 @@ public:
UIToFPInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2880,7 +2922,7 @@ public:
UIToFPInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2909,7 +2951,7 @@ public:
SIToFPInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2917,7 +2959,7 @@ public:
SIToFPInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -2946,7 +2988,7 @@ public:
FPToUIInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2954,7 +2996,7 @@ public:
FPToUIInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< Where to insert the new instruction
);
@@ -2983,7 +3025,7 @@ public:
FPToSIInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -2991,7 +3033,7 @@ public:
FPToSIInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3016,7 +3058,7 @@ public:
IntToPtrInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -3024,7 +3066,7 @@ public:
IntToPtrInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3056,7 +3098,7 @@ public:
PtrToIntInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -3064,7 +3106,7 @@ public:
PtrToIntInst(
Value *S, ///< The value to be converted
const Type *Ty, ///< The type to convert to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
@@ -3093,7 +3135,7 @@ public:
BitCastInst(
Value *S, ///< The value to be casted
const Type *Ty, ///< The type to casted to
- const Twine &NameStr = "", ///< A name for the new instruction
+ const Twine &NameStr = "", ///< A name for the new instruction
Instruction *InsertBefore = 0 ///< Where to insert the new instruction
);
@@ -3101,7 +3143,7 @@ public:
BitCastInst(
Value *S, ///< The value to be casted
const Type *Ty, ///< The type to casted to
- const Twine &NameStr, ///< A name for the new instruction
+ const Twine &NameStr, ///< A name for the new instruction
BasicBlock *InsertAtEnd ///< The block to insert the instruction into
);
diff --git a/libclamav/c++/llvm/include/llvm/IntrinsicInst.h b/libclamav/c++/llvm/include/llvm/IntrinsicInst.h
index d86b33e..a17fa9c 100644
--- a/libclamav/c++/llvm/include/llvm/IntrinsicInst.h
+++ b/libclamav/c++/llvm/include/llvm/IntrinsicInst.h
@@ -43,7 +43,7 @@ namespace llvm {
Intrinsic::ID getIntrinsicID() const {
return (Intrinsic::ID)getCalledFunction()->getIntrinsicID();
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const IntrinsicInst *) { return true; }
static inline bool classof(const CallInst *I) {
@@ -74,7 +74,7 @@ namespace llvm {
static inline bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
-
+
static Value *StripCast(Value *C);
};
@@ -83,7 +83,7 @@ namespace llvm {
class DbgDeclareInst : public DbgInfoIntrinsic {
public:
Value *getAddress() const;
- MDNode *getVariable() const { return cast<MDNode>(getOperand(2)); }
+ MDNode *getVariable() const { return cast<MDNode>(getArgOperand(1)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const DbgDeclareInst *) { return true; }
@@ -103,10 +103,9 @@ namespace llvm {
Value *getValue();
uint64_t getOffset() const {
return cast<ConstantInt>(
- const_cast<Value*>(getOperand(2)))->getZExtValue();
+ const_cast<Value*>(getArgOperand(1)))->getZExtValue();
}
- const MDNode *getVariable() const { return cast<MDNode>(getOperand(3)); }
- MDNode *getVariable() { return cast<MDNode>(getOperand(3)); }
+ MDNode *getVariable() const { return cast<MDNode>(getArgOperand(2)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const DbgValueInst *) { return true; }
@@ -122,17 +121,24 @@ namespace llvm {
///
class MemIntrinsic : public IntrinsicInst {
public:
- Value *getRawDest() const { return const_cast<Value*>(getOperand(1)); }
+ Value *getRawDest() const { return const_cast<Value*>(getArgOperand(0)); }
- Value *getLength() const { return const_cast<Value*>(getOperand(3)); }
+ Value *getLength() const { return const_cast<Value*>(getArgOperand(2)); }
ConstantInt *getAlignmentCst() const {
- return cast<ConstantInt>(const_cast<Value*>(getOperand(4)));
+ return cast<ConstantInt>(const_cast<Value*>(getArgOperand(3)));
}
-
+
unsigned getAlignment() const {
return getAlignmentCst()->getZExtValue();
}
+ ConstantInt *getVolatileCst() const {
+ return cast<ConstantInt>(const_cast<Value*>(getArgOperand(4)));
+ }
+ bool isVolatile() const {
+ return !getVolatileCst()->isZero();
+ }
+
/// getDest - This is just like getRawDest, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
/// value is guaranteed to be a pointer.
@@ -143,23 +149,27 @@ namespace llvm {
void setDest(Value *Ptr) {
assert(getRawDest()->getType() == Ptr->getType() &&
"setDest called with pointer of wrong type!");
- setOperand(1, Ptr);
+ setArgOperand(0, Ptr);
}
void setLength(Value *L) {
assert(getLength()->getType() == L->getType() &&
"setLength called with value of wrong type!");
- setOperand(3, L);
+ setArgOperand(2, L);
}
-
+
void setAlignment(Constant* A) {
- setOperand(4, A);
+ setArgOperand(3, A);
+ }
+
+ void setVolatile(Constant* V) {
+ setArgOperand(4, V);
}
-
+
const Type *getAlignmentType() const {
- return getOperand(4)->getType();
+ return getArgOperand(3)->getType();
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemIntrinsic *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -182,14 +192,14 @@ namespace llvm {
public:
/// get* - Return the arguments to the instruction.
///
- Value *getValue() const { return const_cast<Value*>(getOperand(2)); }
-
+ Value *getValue() const { return const_cast<Value*>(getArgOperand(1)); }
+
void setValue(Value *Val) {
assert(getValue()->getType() == Val->getType() &&
- "setSource called with pointer of wrong type!");
- setOperand(2, Val);
+ "setValue called with value of wrong type!");
+ setArgOperand(1, Val);
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemSetInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -199,26 +209,26 @@ namespace llvm {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
+
/// MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
///
class MemTransferInst : public MemIntrinsic {
public:
/// get* - Return the arguments to the instruction.
///
- Value *getRawSource() const { return const_cast<Value*>(getOperand(2)); }
-
+ Value *getRawSource() const { return const_cast<Value*>(getArgOperand(1)); }
+
/// getSource - This is just like getRawSource, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
/// value is guaranteed to be a pointer.
Value *getSource() const { return getRawSource()->stripPointerCasts(); }
-
+
void setSource(Value *Ptr) {
assert(getRawSource()->getType() == Ptr->getType() &&
"setSource called with pointer of wrong type!");
- setOperand(2, Ptr);
+ setArgOperand(1, Ptr);
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemTransferInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -229,8 +239,8 @@ namespace llvm {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
-
+
+
/// MemCpyInst - This class wraps the llvm.memcpy intrinsic.
///
class MemCpyInst : public MemTransferInst {
@@ -259,6 +269,20 @@ namespace llvm {
}
};
+ /// EHExceptionInst - This represents the llvm.eh.exception instruction.
+ ///
+ class EHExceptionInst : public IntrinsicInst {
+ public:
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const EHExceptionInst *) { return true; }
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::eh_exception;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
/// EHSelectorInst - This represents the llvm.eh.selector instruction.
///
class EHSelectorInst : public IntrinsicInst {
@@ -272,7 +296,7 @@ namespace llvm {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
+
/// MemoryUseIntrinsic - This is the common base class for the memory use
/// marker intrinsics.
///
diff --git a/libclamav/c++/llvm/include/llvm/Intrinsics.gen b/libclamav/c++/llvm/include/llvm/Intrinsics.gen
index 57eee07..fc595b0 100644
--- a/libclamav/c++/llvm/include/llvm/Intrinsics.gen
+++ b/libclamav/c++/llvm/include/llvm/Intrinsics.gen
@@ -6,16 +6,17 @@
//
//===----------------------------------------------------------------------===//
+// VisualStudio defines setjmp as _setjmp
+#if defined(_MSC_VER) && defined(setjmp)
+#define setjmp_undefined_for_visual_studio
+#undef setjmp
+#endif
+
// Enum values for Intrinsics.h
#ifdef GET_INTRINSIC_ENUM_VALUES
alpha_umulh, // llvm.alpha.umulh
annotation, // llvm.annotation
- arm_neon_vabals, // llvm.arm.neon.vabals
- arm_neon_vabalu, // llvm.arm.neon.vabalu
- arm_neon_vabas, // llvm.arm.neon.vabas
- arm_neon_vabau, // llvm.arm.neon.vabau
- arm_neon_vabdls, // llvm.arm.neon.vabdls
- arm_neon_vabdlu, // llvm.arm.neon.vabdlu
+ arm_get_fpscr, // llvm.arm.get.fpscr
arm_neon_vabds, // llvm.arm.neon.vabds
arm_neon_vabdu, // llvm.arm.neon.vabdu
arm_neon_vabs, // llvm.arm.neon.vabs
@@ -24,10 +25,6 @@
arm_neon_vacgtd, // llvm.arm.neon.vacgtd
arm_neon_vacgtq, // llvm.arm.neon.vacgtq
arm_neon_vaddhn, // llvm.arm.neon.vaddhn
- arm_neon_vaddls, // llvm.arm.neon.vaddls
- arm_neon_vaddlu, // llvm.arm.neon.vaddlu
- arm_neon_vaddws, // llvm.arm.neon.vaddws
- arm_neon_vaddwu, // llvm.arm.neon.vaddwu
arm_neon_vcls, // llvm.arm.neon.vcls
arm_neon_vclz, // llvm.arm.neon.vclz
arm_neon_vcnt, // llvm.arm.neon.vcnt
@@ -50,16 +47,7 @@
arm_neon_vmaxu, // llvm.arm.neon.vmaxu
arm_neon_vmins, // llvm.arm.neon.vmins
arm_neon_vminu, // llvm.arm.neon.vminu
- arm_neon_vmlals, // llvm.arm.neon.vmlals
- arm_neon_vmlalu, // llvm.arm.neon.vmlalu
- arm_neon_vmlsls, // llvm.arm.neon.vmlsls
- arm_neon_vmlslu, // llvm.arm.neon.vmlslu
- arm_neon_vmovls, // llvm.arm.neon.vmovls
- arm_neon_vmovlu, // llvm.arm.neon.vmovlu
- arm_neon_vmovn, // llvm.arm.neon.vmovn
arm_neon_vmullp, // llvm.arm.neon.vmullp
- arm_neon_vmulls, // llvm.arm.neon.vmulls
- arm_neon_vmullu, // llvm.arm.neon.vmullu
arm_neon_vmulp, // llvm.arm.neon.vmulp
arm_neon_vpadals, // llvm.arm.neon.vpadals
arm_neon_vpadalu, // llvm.arm.neon.vpadalu
@@ -120,10 +108,6 @@
arm_neon_vst4, // llvm.arm.neon.vst4
arm_neon_vst4lane, // llvm.arm.neon.vst4lane
arm_neon_vsubhn, // llvm.arm.neon.vsubhn
- arm_neon_vsubls, // llvm.arm.neon.vsubls
- arm_neon_vsublu, // llvm.arm.neon.vsublu
- arm_neon_vsubws, // llvm.arm.neon.vsubws
- arm_neon_vsubwu, // llvm.arm.neon.vsubwu
arm_neon_vtbl1, // llvm.arm.neon.vtbl1
arm_neon_vtbl2, // llvm.arm.neon.vtbl2
arm_neon_vtbl3, // llvm.arm.neon.vtbl3
@@ -132,7 +116,14 @@
arm_neon_vtbx2, // llvm.arm.neon.vtbx2
arm_neon_vtbx3, // llvm.arm.neon.vtbx3
arm_neon_vtbx4, // llvm.arm.neon.vtbx4
+ arm_qadd, // llvm.arm.qadd
+ arm_qsub, // llvm.arm.qsub
+ arm_set_fpscr, // llvm.arm.set.fpscr
+ arm_ssat, // llvm.arm.ssat
arm_thread_pointer, // llvm.arm.thread.pointer
+ arm_usat, // llvm.arm.usat
+ arm_vcvtr, // llvm.arm.vcvtr
+ arm_vcvtru, // llvm.arm.vcvtru
atomic_cmp_swap, // llvm.atomic.cmp.swap
atomic_load_add, // llvm.atomic.load.add
atomic_load_and, // llvm.atomic.load.and
@@ -146,6 +137,8 @@
atomic_load_xor, // llvm.atomic.load.xor
atomic_swap, // llvm.atomic.swap
bswap, // llvm.bswap
+ convert_from_fp16, // llvm.convert.from.fp16
+ convert_to_fp16, // llvm.convert.to.fp16
convertff, // llvm.convertff
convertfsi, // llvm.convertfsi
convertfui, // llvm.convertfui
@@ -457,6 +450,105 @@
vaend, // llvm.va_end
var_annotation, // llvm.var.annotation
vastart, // llvm.va_start
+ x86_aesni_aesdec, // llvm.x86.aesni.aesdec
+ x86_aesni_aesdeclast, // llvm.x86.aesni.aesdeclast
+ x86_aesni_aesenc, // llvm.x86.aesni.aesenc
+ x86_aesni_aesenclast, // llvm.x86.aesni.aesenclast
+ x86_aesni_aesimc, // llvm.x86.aesni.aesimc
+ x86_aesni_aeskeygenassist, // llvm.x86.aesni.aeskeygenassist
+ x86_avx_addsub_pd_256, // llvm.x86.avx.addsub.pd.256
+ x86_avx_addsub_ps_256, // llvm.x86.avx.addsub.ps.256
+ x86_avx_blend_pd_256, // llvm.x86.avx.blend.pd.256
+ x86_avx_blend_ps_256, // llvm.x86.avx.blend.ps.256
+ x86_avx_blendv_pd_256, // llvm.x86.avx.blendv.pd.256
+ x86_avx_blendv_ps_256, // llvm.x86.avx.blendv.ps.256
+ x86_avx_cmp_pd_256, // llvm.x86.avx.cmp.pd.256
+ x86_avx_cmp_ps_256, // llvm.x86.avx.cmp.ps.256
+ x86_avx_cvt_pd2_ps_256, // llvm.x86.avx.cvt.pd2.ps.256
+ x86_avx_cvt_pd2dq_256, // llvm.x86.avx.cvt.pd2dq.256
+ x86_avx_cvt_ps2_pd_256, // llvm.x86.avx.cvt.ps2.pd.256
+ x86_avx_cvt_ps2dq_256, // llvm.x86.avx.cvt.ps2dq.256
+ x86_avx_cvtdq2_pd_256, // llvm.x86.avx.cvtdq2.pd.256
+ x86_avx_cvtdq2_ps_256, // llvm.x86.avx.cvtdq2.ps.256
+ x86_avx_cvtt_pd2dq_256, // llvm.x86.avx.cvtt.pd2dq.256
+ x86_avx_cvtt_ps2dq_256, // llvm.x86.avx.cvtt.ps2dq.256
+ x86_avx_dp_ps_256, // llvm.x86.avx.dp.ps.256
+ x86_avx_hadd_pd_256, // llvm.x86.avx.hadd.pd.256
+ x86_avx_hadd_ps_256, // llvm.x86.avx.hadd.ps.256
+ x86_avx_hsub_pd_256, // llvm.x86.avx.hsub.pd.256
+ x86_avx_hsub_ps_256, // llvm.x86.avx.hsub.ps.256
+ x86_avx_ldu_dq_256, // llvm.x86.avx.ldu.dq.256
+ x86_avx_loadu_dq_256, // llvm.x86.avx.loadu.dq.256
+ x86_avx_loadu_pd_256, // llvm.x86.avx.loadu.pd.256
+ x86_avx_loadu_ps_256, // llvm.x86.avx.loadu.ps.256
+ x86_avx_maskload_pd, // llvm.x86.avx.maskload.pd
+ x86_avx_maskload_pd_256, // llvm.x86.avx.maskload.pd.256
+ x86_avx_maskload_ps, // llvm.x86.avx.maskload.ps
+ x86_avx_maskload_ps_256, // llvm.x86.avx.maskload.ps.256
+ x86_avx_maskstore_pd, // llvm.x86.avx.maskstore.pd
+ x86_avx_maskstore_pd_256, // llvm.x86.avx.maskstore.pd.256
+ x86_avx_maskstore_ps, // llvm.x86.avx.maskstore.ps
+ x86_avx_maskstore_ps_256, // llvm.x86.avx.maskstore.ps.256
+ x86_avx_max_pd_256, // llvm.x86.avx.max.pd.256
+ x86_avx_max_ps_256, // llvm.x86.avx.max.ps.256
+ x86_avx_min_pd_256, // llvm.x86.avx.min.pd.256
+ x86_avx_min_ps_256, // llvm.x86.avx.min.ps.256
+ x86_avx_movmsk_pd_256, // llvm.x86.avx.movmsk.pd.256
+ x86_avx_movmsk_ps_256, // llvm.x86.avx.movmsk.ps.256
+ x86_avx_movnt_dq_256, // llvm.x86.avx.movnt.dq.256
+ x86_avx_movnt_pd_256, // llvm.x86.avx.movnt.pd.256
+ x86_avx_movnt_ps_256, // llvm.x86.avx.movnt.ps.256
+ x86_avx_ptestc_256, // llvm.x86.avx.ptestc.256
+ x86_avx_ptestnzc_256, // llvm.x86.avx.ptestnzc.256
+ x86_avx_ptestz_256, // llvm.x86.avx.ptestz.256
+ x86_avx_rcp_ps_256, // llvm.x86.avx.rcp.ps.256
+ x86_avx_round_pd_256, // llvm.x86.avx.round.pd.256
+ x86_avx_round_ps_256, // llvm.x86.avx.round.ps.256
+ x86_avx_rsqrt_ps_256, // llvm.x86.avx.rsqrt.ps.256
+ x86_avx_sqrt_pd_256, // llvm.x86.avx.sqrt.pd.256
+ x86_avx_sqrt_ps_256, // llvm.x86.avx.sqrt.ps.256
+ x86_avx_storeu_dq_256, // llvm.x86.avx.storeu.dq.256
+ x86_avx_storeu_pd_256, // llvm.x86.avx.storeu.pd.256
+ x86_avx_storeu_ps_256, // llvm.x86.avx.storeu.ps.256
+ x86_avx_vbroadcast_sd_256, // llvm.x86.avx.vbroadcast.sd.256
+ x86_avx_vbroadcastf128_pd_256, // llvm.x86.avx.vbroadcastf128.pd.256
+ x86_avx_vbroadcastf128_ps_256, // llvm.x86.avx.vbroadcastf128.ps.256
+ x86_avx_vbroadcastss, // llvm.x86.avx.vbroadcastss
+ x86_avx_vbroadcastss_256, // llvm.x86.avx.vbroadcastss.256
+ x86_avx_vextractf128_pd_256, // llvm.x86.avx.vextractf128.pd.256
+ x86_avx_vextractf128_ps_256, // llvm.x86.avx.vextractf128.ps.256
+ x86_avx_vextractf128_si_256, // llvm.x86.avx.vextractf128.si.256
+ x86_avx_vinsertf128_pd_256, // llvm.x86.avx.vinsertf128.pd.256
+ x86_avx_vinsertf128_ps_256, // llvm.x86.avx.vinsertf128.ps.256
+ x86_avx_vinsertf128_si_256, // llvm.x86.avx.vinsertf128.si.256
+ x86_avx_vperm2f128_pd_256, // llvm.x86.avx.vperm2f128.pd.256
+ x86_avx_vperm2f128_ps_256, // llvm.x86.avx.vperm2f128.ps.256
+ x86_avx_vperm2f128_si_256, // llvm.x86.avx.vperm2f128.si.256
+ x86_avx_vpermil_pd, // llvm.x86.avx.vpermil.pd
+ x86_avx_vpermil_pd_256, // llvm.x86.avx.vpermil.pd.256
+ x86_avx_vpermil_ps, // llvm.x86.avx.vpermil.ps
+ x86_avx_vpermil_ps_256, // llvm.x86.avx.vpermil.ps.256
+ x86_avx_vpermilvar_pd, // llvm.x86.avx.vpermilvar.pd
+ x86_avx_vpermilvar_pd_256, // llvm.x86.avx.vpermilvar.pd.256
+ x86_avx_vpermilvar_ps, // llvm.x86.avx.vpermilvar.ps
+ x86_avx_vpermilvar_ps_256, // llvm.x86.avx.vpermilvar.ps.256
+ x86_avx_vtestc_pd, // llvm.x86.avx.vtestc.pd
+ x86_avx_vtestc_pd_256, // llvm.x86.avx.vtestc.pd.256
+ x86_avx_vtestc_ps, // llvm.x86.avx.vtestc.ps
+ x86_avx_vtestc_ps_256, // llvm.x86.avx.vtestc.ps.256
+ x86_avx_vtestnzc_pd, // llvm.x86.avx.vtestnzc.pd
+ x86_avx_vtestnzc_pd_256, // llvm.x86.avx.vtestnzc.pd.256
+ x86_avx_vtestnzc_ps, // llvm.x86.avx.vtestnzc.ps
+ x86_avx_vtestnzc_ps_256, // llvm.x86.avx.vtestnzc.ps.256
+ x86_avx_vtestz_pd, // llvm.x86.avx.vtestz.pd
+ x86_avx_vtestz_pd_256, // llvm.x86.avx.vtestz.pd.256
+ x86_avx_vtestz_ps, // llvm.x86.avx.vtestz.ps
+ x86_avx_vtestz_ps_256, // llvm.x86.avx.vtestz.ps.256
+ x86_avx_vzeroall, // llvm.x86.avx.vzeroall
+ x86_avx_vzeroupper, // llvm.x86.avx.vzeroupper
+ x86_int, // llvm.x86.int
+ x86_mmx_cvtsi32_si64, // llvm.x86.mmx.cvtsi32.si64
+ x86_mmx_cvtsi64_si32, // llvm.x86.mmx.cvtsi64.si32
x86_mmx_emms, // llvm.x86.mmx.emms
x86_mmx_femms, // llvm.x86.mmx.femms
x86_mmx_maskmovq, // llvm.x86.mmx.maskmovq
@@ -464,10 +556,16 @@
x86_mmx_packssdw, // llvm.x86.mmx.packssdw
x86_mmx_packsswb, // llvm.x86.mmx.packsswb
x86_mmx_packuswb, // llvm.x86.mmx.packuswb
+ x86_mmx_padd_b, // llvm.x86.mmx.padd.b
+ x86_mmx_padd_d, // llvm.x86.mmx.padd.d
+ x86_mmx_padd_q, // llvm.x86.mmx.padd.q
+ x86_mmx_padd_w, // llvm.x86.mmx.padd.w
x86_mmx_padds_b, // llvm.x86.mmx.padds.b
x86_mmx_padds_w, // llvm.x86.mmx.padds.w
x86_mmx_paddus_b, // llvm.x86.mmx.paddus.b
x86_mmx_paddus_w, // llvm.x86.mmx.paddus.w
+ x86_mmx_pand, // llvm.x86.mmx.pand
+ x86_mmx_pandn, // llvm.x86.mmx.pandn
x86_mmx_pavg_b, // llvm.x86.mmx.pavg.b
x86_mmx_pavg_w, // llvm.x86.mmx.pavg.w
x86_mmx_pcmpeq_b, // llvm.x86.mmx.pcmpeq.b
@@ -476,6 +574,8 @@
x86_mmx_pcmpgt_b, // llvm.x86.mmx.pcmpgt.b
x86_mmx_pcmpgt_d, // llvm.x86.mmx.pcmpgt.d
x86_mmx_pcmpgt_w, // llvm.x86.mmx.pcmpgt.w
+ x86_mmx_pextr_w, // llvm.x86.mmx.pextr.w
+ x86_mmx_pinsr_w, // llvm.x86.mmx.pinsr.w
x86_mmx_pmadd_wd, // llvm.x86.mmx.pmadd.wd
x86_mmx_pmaxs_w, // llvm.x86.mmx.pmaxs.w
x86_mmx_pmaxu_b, // llvm.x86.mmx.pmaxu.b
@@ -484,7 +584,9 @@
x86_mmx_pmovmskb, // llvm.x86.mmx.pmovmskb
x86_mmx_pmulh_w, // llvm.x86.mmx.pmulh.w
x86_mmx_pmulhu_w, // llvm.x86.mmx.pmulhu.w
+ x86_mmx_pmull_w, // llvm.x86.mmx.pmull.w
x86_mmx_pmulu_dq, // llvm.x86.mmx.pmulu.dq
+ x86_mmx_por, // llvm.x86.mmx.por
x86_mmx_psad_bw, // llvm.x86.mmx.psad.bw
x86_mmx_psll_d, // llvm.x86.mmx.psll.d
x86_mmx_psll_q, // llvm.x86.mmx.psll.q
@@ -502,10 +604,25 @@
x86_mmx_psrli_d, // llvm.x86.mmx.psrli.d
x86_mmx_psrli_q, // llvm.x86.mmx.psrli.q
x86_mmx_psrli_w, // llvm.x86.mmx.psrli.w
+ x86_mmx_psub_b, // llvm.x86.mmx.psub.b
+ x86_mmx_psub_d, // llvm.x86.mmx.psub.d
+ x86_mmx_psub_q, // llvm.x86.mmx.psub.q
+ x86_mmx_psub_w, // llvm.x86.mmx.psub.w
x86_mmx_psubs_b, // llvm.x86.mmx.psubs.b
x86_mmx_psubs_w, // llvm.x86.mmx.psubs.w
x86_mmx_psubus_b, // llvm.x86.mmx.psubus.b
x86_mmx_psubus_w, // llvm.x86.mmx.psubus.w
+ x86_mmx_punpckhbw, // llvm.x86.mmx.punpckhbw
+ x86_mmx_punpckhdq, // llvm.x86.mmx.punpckhdq
+ x86_mmx_punpckhwd, // llvm.x86.mmx.punpckhwd
+ x86_mmx_punpcklbw, // llvm.x86.mmx.punpcklbw
+ x86_mmx_punpckldq, // llvm.x86.mmx.punpckldq
+ x86_mmx_punpcklwd, // llvm.x86.mmx.punpcklwd
+ x86_mmx_pxor, // llvm.x86.mmx.pxor
+ x86_mmx_vec_ext_d, // llvm.x86.mmx.vec.ext.d
+ x86_mmx_vec_init_b, // llvm.x86.mmx.vec.init.b
+ x86_mmx_vec_init_d, // llvm.x86.mmx.vec.init.d
+ x86_mmx_vec_init_w, // llvm.x86.mmx.vec.init.w
x86_sse2_add_sd, // llvm.x86.sse2.add.sd
x86_sse2_clflush, // llvm.x86.sse2.clflush
x86_sse2_cmp_pd, // llvm.x86.sse2.cmp.pd
@@ -656,7 +773,6 @@
x86_sse41_pmovzxwd, // llvm.x86.sse41.pmovzxwd
x86_sse41_pmovzxwq, // llvm.x86.sse41.pmovzxwq
x86_sse41_pmuldq, // llvm.x86.sse41.pmuldq
- x86_sse41_pmulld, // llvm.x86.sse41.pmulld
x86_sse41_ptestc, // llvm.x86.sse41.ptestc
x86_sse41_ptestnzc, // llvm.x86.sse41.ptestnzc
x86_sse41_ptestz, // llvm.x86.sse41.ptestz
@@ -666,8 +782,9 @@
x86_sse41_round_ss, // llvm.x86.sse41.round.ss
x86_sse42_crc32_16, // llvm.x86.sse42.crc32.16
x86_sse42_crc32_32, // llvm.x86.sse42.crc32.32
- x86_sse42_crc32_64, // llvm.x86.sse42.crc32.64
x86_sse42_crc32_8, // llvm.x86.sse42.crc32.8
+ x86_sse42_crc64_64, // llvm.x86.sse42.crc64.64
+ x86_sse42_crc64_8, // llvm.x86.sse42.crc64.8
x86_sse42_pcmpestri128, // llvm.x86.sse42.pcmpestri128
x86_sse42_pcmpestria128, // llvm.x86.sse42.pcmpestria128
x86_sse42_pcmpestric128, // llvm.x86.sse42.pcmpestric128
@@ -736,8 +853,6 @@
x86_ssse3_pabs_d_128, // llvm.x86.ssse3.pabs.d.128
x86_ssse3_pabs_w, // llvm.x86.ssse3.pabs.w
x86_ssse3_pabs_w_128, // llvm.x86.ssse3.pabs.w.128
- x86_ssse3_palign_r, // llvm.x86.ssse3.palign.r
- x86_ssse3_palign_r_128, // llvm.x86.ssse3.palign.r.128
x86_ssse3_phadd_d, // llvm.x86.ssse3.phadd.d
x86_ssse3_phadd_d_128, // llvm.x86.ssse3.phadd.d.128
x86_ssse3_phadd_sw, // llvm.x86.ssse3.phadd.sw
@@ -756,6 +871,7 @@
x86_ssse3_pmul_hr_sw_128, // llvm.x86.ssse3.pmul.hr.sw.128
x86_ssse3_pshuf_b, // llvm.x86.ssse3.pshuf.b
x86_ssse3_pshuf_b_128, // llvm.x86.ssse3.pshuf.b.128
+ x86_ssse3_pshuf_w, // llvm.x86.ssse3.pshuf.w
x86_ssse3_psign_b, // llvm.x86.ssse3.psign.b
x86_ssse3_psign_b_128, // llvm.x86.ssse3.psign.b.128
x86_ssse3_psign_d, // llvm.x86.ssse3.psign.d
@@ -771,12 +887,7 @@
// Note that entry #0 is the invalid intrinsic!
"llvm.alpha.umulh",
"llvm.annotation",
- "llvm.arm.neon.vabals",
- "llvm.arm.neon.vabalu",
- "llvm.arm.neon.vabas",
- "llvm.arm.neon.vabau",
- "llvm.arm.neon.vabdls",
- "llvm.arm.neon.vabdlu",
+ "llvm.arm.get.fpscr",
"llvm.arm.neon.vabds",
"llvm.arm.neon.vabdu",
"llvm.arm.neon.vabs",
@@ -785,10 +896,6 @@
"llvm.arm.neon.vacgtd",
"llvm.arm.neon.vacgtq",
"llvm.arm.neon.vaddhn",
- "llvm.arm.neon.vaddls",
- "llvm.arm.neon.vaddlu",
- "llvm.arm.neon.vaddws",
- "llvm.arm.neon.vaddwu",
"llvm.arm.neon.vcls",
"llvm.arm.neon.vclz",
"llvm.arm.neon.vcnt",
@@ -811,16 +918,7 @@
"llvm.arm.neon.vmaxu",
"llvm.arm.neon.vmins",
"llvm.arm.neon.vminu",
- "llvm.arm.neon.vmlals",
- "llvm.arm.neon.vmlalu",
- "llvm.arm.neon.vmlsls",
- "llvm.arm.neon.vmlslu",
- "llvm.arm.neon.vmovls",
- "llvm.arm.neon.vmovlu",
- "llvm.arm.neon.vmovn",
"llvm.arm.neon.vmullp",
- "llvm.arm.neon.vmulls",
- "llvm.arm.neon.vmullu",
"llvm.arm.neon.vmulp",
"llvm.arm.neon.vpadals",
"llvm.arm.neon.vpadalu",
@@ -881,10 +979,6 @@
"llvm.arm.neon.vst4",
"llvm.arm.neon.vst4lane",
"llvm.arm.neon.vsubhn",
- "llvm.arm.neon.vsubls",
- "llvm.arm.neon.vsublu",
- "llvm.arm.neon.vsubws",
- "llvm.arm.neon.vsubwu",
"llvm.arm.neon.vtbl1",
"llvm.arm.neon.vtbl2",
"llvm.arm.neon.vtbl3",
@@ -893,7 +987,14 @@
"llvm.arm.neon.vtbx2",
"llvm.arm.neon.vtbx3",
"llvm.arm.neon.vtbx4",
+ "llvm.arm.qadd",
+ "llvm.arm.qsub",
+ "llvm.arm.set.fpscr",
+ "llvm.arm.ssat",
"llvm.arm.thread.pointer",
+ "llvm.arm.usat",
+ "llvm.arm.vcvtr",
+ "llvm.arm.vcvtru",
"llvm.atomic.cmp.swap",
"llvm.atomic.load.add",
"llvm.atomic.load.and",
@@ -907,6 +1008,8 @@
"llvm.atomic.load.xor",
"llvm.atomic.swap",
"llvm.bswap",
+ "llvm.convert.from.fp16",
+ "llvm.convert.to.fp16",
"llvm.convertff",
"llvm.convertfsi",
"llvm.convertfui",
@@ -1218,6 +1321,105 @@
"llvm.va_end",
"llvm.var.annotation",
"llvm.va_start",
+ "llvm.x86.aesni.aesdec",
+ "llvm.x86.aesni.aesdeclast",
+ "llvm.x86.aesni.aesenc",
+ "llvm.x86.aesni.aesenclast",
+ "llvm.x86.aesni.aesimc",
+ "llvm.x86.aesni.aeskeygenassist",
+ "llvm.x86.avx.addsub.pd.256",
+ "llvm.x86.avx.addsub.ps.256",
+ "llvm.x86.avx.blend.pd.256",
+ "llvm.x86.avx.blend.ps.256",
+ "llvm.x86.avx.blendv.pd.256",
+ "llvm.x86.avx.blendv.ps.256",
+ "llvm.x86.avx.cmp.pd.256",
+ "llvm.x86.avx.cmp.ps.256",
+ "llvm.x86.avx.cvt.pd2.ps.256",
+ "llvm.x86.avx.cvt.pd2dq.256",
+ "llvm.x86.avx.cvt.ps2.pd.256",
+ "llvm.x86.avx.cvt.ps2dq.256",
+ "llvm.x86.avx.cvtdq2.pd.256",
+ "llvm.x86.avx.cvtdq2.ps.256",
+ "llvm.x86.avx.cvtt.pd2dq.256",
+ "llvm.x86.avx.cvtt.ps2dq.256",
+ "llvm.x86.avx.dp.ps.256",
+ "llvm.x86.avx.hadd.pd.256",
+ "llvm.x86.avx.hadd.ps.256",
+ "llvm.x86.avx.hsub.pd.256",
+ "llvm.x86.avx.hsub.ps.256",
+ "llvm.x86.avx.ldu.dq.256",
+ "llvm.x86.avx.loadu.dq.256",
+ "llvm.x86.avx.loadu.pd.256",
+ "llvm.x86.avx.loadu.ps.256",
+ "llvm.x86.avx.maskload.pd",
+ "llvm.x86.avx.maskload.pd.256",
+ "llvm.x86.avx.maskload.ps",
+ "llvm.x86.avx.maskload.ps.256",
+ "llvm.x86.avx.maskstore.pd",
+ "llvm.x86.avx.maskstore.pd.256",
+ "llvm.x86.avx.maskstore.ps",
+ "llvm.x86.avx.maskstore.ps.256",
+ "llvm.x86.avx.max.pd.256",
+ "llvm.x86.avx.max.ps.256",
+ "llvm.x86.avx.min.pd.256",
+ "llvm.x86.avx.min.ps.256",
+ "llvm.x86.avx.movmsk.pd.256",
+ "llvm.x86.avx.movmsk.ps.256",
+ "llvm.x86.avx.movnt.dq.256",
+ "llvm.x86.avx.movnt.pd.256",
+ "llvm.x86.avx.movnt.ps.256",
+ "llvm.x86.avx.ptestc.256",
+ "llvm.x86.avx.ptestnzc.256",
+ "llvm.x86.avx.ptestz.256",
+ "llvm.x86.avx.rcp.ps.256",
+ "llvm.x86.avx.round.pd.256",
+ "llvm.x86.avx.round.ps.256",
+ "llvm.x86.avx.rsqrt.ps.256",
+ "llvm.x86.avx.sqrt.pd.256",
+ "llvm.x86.avx.sqrt.ps.256",
+ "llvm.x86.avx.storeu.dq.256",
+ "llvm.x86.avx.storeu.pd.256",
+ "llvm.x86.avx.storeu.ps.256",
+ "llvm.x86.avx.vbroadcast.sd.256",
+ "llvm.x86.avx.vbroadcastf128.pd.256",
+ "llvm.x86.avx.vbroadcastf128.ps.256",
+ "llvm.x86.avx.vbroadcastss",
+ "llvm.x86.avx.vbroadcastss.256",
+ "llvm.x86.avx.vextractf128.pd.256",
+ "llvm.x86.avx.vextractf128.ps.256",
+ "llvm.x86.avx.vextractf128.si.256",
+ "llvm.x86.avx.vinsertf128.pd.256",
+ "llvm.x86.avx.vinsertf128.ps.256",
+ "llvm.x86.avx.vinsertf128.si.256",
+ "llvm.x86.avx.vperm2f128.pd.256",
+ "llvm.x86.avx.vperm2f128.ps.256",
+ "llvm.x86.avx.vperm2f128.si.256",
+ "llvm.x86.avx.vpermil.pd",
+ "llvm.x86.avx.vpermil.pd.256",
+ "llvm.x86.avx.vpermil.ps",
+ "llvm.x86.avx.vpermil.ps.256",
+ "llvm.x86.avx.vpermilvar.pd",
+ "llvm.x86.avx.vpermilvar.pd.256",
+ "llvm.x86.avx.vpermilvar.ps",
+ "llvm.x86.avx.vpermilvar.ps.256",
+ "llvm.x86.avx.vtestc.pd",
+ "llvm.x86.avx.vtestc.pd.256",
+ "llvm.x86.avx.vtestc.ps",
+ "llvm.x86.avx.vtestc.ps.256",
+ "llvm.x86.avx.vtestnzc.pd",
+ "llvm.x86.avx.vtestnzc.pd.256",
+ "llvm.x86.avx.vtestnzc.ps",
+ "llvm.x86.avx.vtestnzc.ps.256",
+ "llvm.x86.avx.vtestz.pd",
+ "llvm.x86.avx.vtestz.pd.256",
+ "llvm.x86.avx.vtestz.ps",
+ "llvm.x86.avx.vtestz.ps.256",
+ "llvm.x86.avx.vzeroall",
+ "llvm.x86.avx.vzeroupper",
+ "llvm.x86.int",
+ "llvm.x86.mmx.cvtsi32.si64",
+ "llvm.x86.mmx.cvtsi64.si32",
"llvm.x86.mmx.emms",
"llvm.x86.mmx.femms",
"llvm.x86.mmx.maskmovq",
@@ -1225,10 +1427,16 @@
"llvm.x86.mmx.packssdw",
"llvm.x86.mmx.packsswb",
"llvm.x86.mmx.packuswb",
+ "llvm.x86.mmx.padd.b",
+ "llvm.x86.mmx.padd.d",
+ "llvm.x86.mmx.padd.q",
+ "llvm.x86.mmx.padd.w",
"llvm.x86.mmx.padds.b",
"llvm.x86.mmx.padds.w",
"llvm.x86.mmx.paddus.b",
"llvm.x86.mmx.paddus.w",
+ "llvm.x86.mmx.pand",
+ "llvm.x86.mmx.pandn",
"llvm.x86.mmx.pavg.b",
"llvm.x86.mmx.pavg.w",
"llvm.x86.mmx.pcmpeq.b",
@@ -1237,6 +1445,8 @@
"llvm.x86.mmx.pcmpgt.b",
"llvm.x86.mmx.pcmpgt.d",
"llvm.x86.mmx.pcmpgt.w",
+ "llvm.x86.mmx.pextr.w",
+ "llvm.x86.mmx.pinsr.w",
"llvm.x86.mmx.pmadd.wd",
"llvm.x86.mmx.pmaxs.w",
"llvm.x86.mmx.pmaxu.b",
@@ -1245,7 +1455,9 @@
"llvm.x86.mmx.pmovmskb",
"llvm.x86.mmx.pmulh.w",
"llvm.x86.mmx.pmulhu.w",
+ "llvm.x86.mmx.pmull.w",
"llvm.x86.mmx.pmulu.dq",
+ "llvm.x86.mmx.por",
"llvm.x86.mmx.psad.bw",
"llvm.x86.mmx.psll.d",
"llvm.x86.mmx.psll.q",
@@ -1263,10 +1475,25 @@
"llvm.x86.mmx.psrli.d",
"llvm.x86.mmx.psrli.q",
"llvm.x86.mmx.psrli.w",
+ "llvm.x86.mmx.psub.b",
+ "llvm.x86.mmx.psub.d",
+ "llvm.x86.mmx.psub.q",
+ "llvm.x86.mmx.psub.w",
"llvm.x86.mmx.psubs.b",
"llvm.x86.mmx.psubs.w",
"llvm.x86.mmx.psubus.b",
"llvm.x86.mmx.psubus.w",
+ "llvm.x86.mmx.punpckhbw",
+ "llvm.x86.mmx.punpckhdq",
+ "llvm.x86.mmx.punpckhwd",
+ "llvm.x86.mmx.punpcklbw",
+ "llvm.x86.mmx.punpckldq",
+ "llvm.x86.mmx.punpcklwd",
+ "llvm.x86.mmx.pxor",
+ "llvm.x86.mmx.vec.ext.d",
+ "llvm.x86.mmx.vec.init.b",
+ "llvm.x86.mmx.vec.init.d",
+ "llvm.x86.mmx.vec.init.w",
"llvm.x86.sse2.add.sd",
"llvm.x86.sse2.clflush",
"llvm.x86.sse2.cmp.pd",
@@ -1417,7 +1644,6 @@
"llvm.x86.sse41.pmovzxwd",
"llvm.x86.sse41.pmovzxwq",
"llvm.x86.sse41.pmuldq",
- "llvm.x86.sse41.pmulld",
"llvm.x86.sse41.ptestc",
"llvm.x86.sse41.ptestnzc",
"llvm.x86.sse41.ptestz",
@@ -1427,8 +1653,9 @@
"llvm.x86.sse41.round.ss",
"llvm.x86.sse42.crc32.16",
"llvm.x86.sse42.crc32.32",
- "llvm.x86.sse42.crc32.64",
"llvm.x86.sse42.crc32.8",
+ "llvm.x86.sse42.crc64.64",
+ "llvm.x86.sse42.crc64.8",
"llvm.x86.sse42.pcmpestri128",
"llvm.x86.sse42.pcmpestria128",
"llvm.x86.sse42.pcmpestric128",
@@ -1497,8 +1724,6 @@
"llvm.x86.ssse3.pabs.d.128",
"llvm.x86.ssse3.pabs.w",
"llvm.x86.ssse3.pabs.w.128",
- "llvm.x86.ssse3.palign.r",
- "llvm.x86.ssse3.palign.r.128",
"llvm.x86.ssse3.phadd.d",
"llvm.x86.ssse3.phadd.d.128",
"llvm.x86.ssse3.phadd.sw",
@@ -1517,6 +1742,7 @@
"llvm.x86.ssse3.pmul.hr.sw.128",
"llvm.x86.ssse3.pshuf.b",
"llvm.x86.ssse3.pshuf.b.128",
+ "llvm.x86.ssse3.pshuf.w",
"llvm.x86.ssse3.psign.b",
"llvm.x86.ssse3.psign.b.128",
"llvm.x86.ssse3.psign.d",
@@ -1532,12 +1758,7 @@
// Note that entry #0 is the invalid intrinsic!
false,
true,
- true,
- true,
- true,
- true,
- true,
- true,
+ false,
true,
true,
true,
@@ -1629,23 +1850,6 @@
true,
true,
true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
- true,
false,
false,
false,
@@ -1655,6 +1859,12 @@
false,
false,
false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
true,
true,
true,
@@ -1669,6 +1879,9 @@
true,
true,
true,
+ false,
+ false,
+ true,
true,
true,
true,
@@ -2286,6 +2499,129 @@
false,
false,
false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
#endif
// Function name -> enum value recognizer code.
@@ -2296,12 +2632,7 @@
case 'a':
if (Len == 16 && !memcmp(Name, "llvm.alpha.umulh", 16)) return Intrinsic::alpha_umulh;
if (Len > 15 && !memcmp(Name, "llvm.annotation.", 16)) return Intrinsic::annotation;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vabals.", 21)) return Intrinsic::arm_neon_vabals;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vabalu.", 21)) return Intrinsic::arm_neon_vabalu;
- if (Len > 19 && !memcmp(Name, "llvm.arm.neon.vabas.", 20)) return Intrinsic::arm_neon_vabas;
- if (Len > 19 && !memcmp(Name, "llvm.arm.neon.vabau.", 20)) return Intrinsic::arm_neon_vabau;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vabdls.", 21)) return Intrinsic::arm_neon_vabdls;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vabdlu.", 21)) return Intrinsic::arm_neon_vabdlu;
+ if (Len == 18 && !memcmp(Name, "llvm.arm.get.fpscr", 18)) return Intrinsic::arm_get_fpscr;
if (Len > 19 && !memcmp(Name, "llvm.arm.neon.vabds.", 20)) return Intrinsic::arm_neon_vabds;
if (Len > 19 && !memcmp(Name, "llvm.arm.neon.vabdu.", 20)) return Intrinsic::arm_neon_vabdu;
if (Len > 18 && !memcmp(Name, "llvm.arm.neon.vabs.", 19)) return Intrinsic::arm_neon_vabs;
@@ -2310,10 +2641,6 @@
if (Len == 20 && !memcmp(Name, "llvm.arm.neon.vacgtd", 20)) return Intrinsic::arm_neon_vacgtd;
if (Len == 20 && !memcmp(Name, "llvm.arm.neon.vacgtq", 20)) return Intrinsic::arm_neon_vacgtq;
if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vaddhn.", 21)) return Intrinsic::arm_neon_vaddhn;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vaddls.", 21)) return Intrinsic::arm_neon_vaddls;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vaddlu.", 21)) return Intrinsic::arm_neon_vaddlu;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vaddws.", 21)) return Intrinsic::arm_neon_vaddws;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vaddwu.", 21)) return Intrinsic::arm_neon_vaddwu;
if (Len > 18 && !memcmp(Name, "llvm.arm.neon.vcls.", 19)) return Intrinsic::arm_neon_vcls;
if (Len > 18 && !memcmp(Name, "llvm.arm.neon.vclz.", 19)) return Intrinsic::arm_neon_vclz;
if (Len > 18 && !memcmp(Name, "llvm.arm.neon.vcnt.", 19)) return Intrinsic::arm_neon_vcnt;
@@ -2336,16 +2663,7 @@
if (Len > 19 && !memcmp(Name, "llvm.arm.neon.vmaxu.", 20)) return Intrinsic::arm_neon_vmaxu;
if (Len > 19 && !memcmp(Name, "llvm.arm.neon.vmins.", 20)) return Intrinsic::arm_neon_vmins;
if (Len > 19 && !memcmp(Name, "llvm.arm.neon.vminu.", 20)) return Intrinsic::arm_neon_vminu;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vmlals.", 21)) return Intrinsic::arm_neon_vmlals;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vmlalu.", 21)) return Intrinsic::arm_neon_vmlalu;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vmlsls.", 21)) return Intrinsic::arm_neon_vmlsls;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vmlslu.", 21)) return Intrinsic::arm_neon_vmlslu;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vmovls.", 21)) return Intrinsic::arm_neon_vmovls;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vmovlu.", 21)) return Intrinsic::arm_neon_vmovlu;
- if (Len > 19 && !memcmp(Name, "llvm.arm.neon.vmovn.", 20)) return Intrinsic::arm_neon_vmovn;
if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vmullp.", 21)) return Intrinsic::arm_neon_vmullp;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vmulls.", 21)) return Intrinsic::arm_neon_vmulls;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vmullu.", 21)) return Intrinsic::arm_neon_vmullu;
if (Len > 19 && !memcmp(Name, "llvm.arm.neon.vmulp.", 20)) return Intrinsic::arm_neon_vmulp;
if (Len > 21 && !memcmp(Name, "llvm.arm.neon.vpadals.", 22)) return Intrinsic::arm_neon_vpadals;
if (Len > 21 && !memcmp(Name, "llvm.arm.neon.vpadalu.", 22)) return Intrinsic::arm_neon_vpadalu;
@@ -2406,10 +2724,6 @@
if (Len > 18 && !memcmp(Name, "llvm.arm.neon.vst4.", 19)) return Intrinsic::arm_neon_vst4;
if (Len > 22 && !memcmp(Name, "llvm.arm.neon.vst4lane.", 23)) return Intrinsic::arm_neon_vst4lane;
if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vsubhn.", 21)) return Intrinsic::arm_neon_vsubhn;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vsubls.", 21)) return Intrinsic::arm_neon_vsubls;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vsublu.", 21)) return Intrinsic::arm_neon_vsublu;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vsubws.", 21)) return Intrinsic::arm_neon_vsubws;
- if (Len > 20 && !memcmp(Name, "llvm.arm.neon.vsubwu.", 21)) return Intrinsic::arm_neon_vsubwu;
if (Len == 19 && !memcmp(Name, "llvm.arm.neon.vtbl1", 19)) return Intrinsic::arm_neon_vtbl1;
if (Len == 19 && !memcmp(Name, "llvm.arm.neon.vtbl2", 19)) return Intrinsic::arm_neon_vtbl2;
if (Len == 19 && !memcmp(Name, "llvm.arm.neon.vtbl3", 19)) return Intrinsic::arm_neon_vtbl3;
@@ -2418,7 +2732,14 @@
if (Len == 19 && !memcmp(Name, "llvm.arm.neon.vtbx2", 19)) return Intrinsic::arm_neon_vtbx2;
if (Len == 19 && !memcmp(Name, "llvm.arm.neon.vtbx3", 19)) return Intrinsic::arm_neon_vtbx3;
if (Len == 19 && !memcmp(Name, "llvm.arm.neon.vtbx4", 19)) return Intrinsic::arm_neon_vtbx4;
+ if (Len == 13 && !memcmp(Name, "llvm.arm.qadd", 13)) return Intrinsic::arm_qadd;
+ if (Len == 13 && !memcmp(Name, "llvm.arm.qsub", 13)) return Intrinsic::arm_qsub;
+ if (Len == 18 && !memcmp(Name, "llvm.arm.set.fpscr", 18)) return Intrinsic::arm_set_fpscr;
+ if (Len == 13 && !memcmp(Name, "llvm.arm.ssat", 13)) return Intrinsic::arm_ssat;
if (Len == 23 && !memcmp(Name, "llvm.arm.thread.pointer", 23)) return Intrinsic::arm_thread_pointer;
+ if (Len == 13 && !memcmp(Name, "llvm.arm.usat", 13)) return Intrinsic::arm_usat;
+ if (Len > 14 && !memcmp(Name, "llvm.arm.vcvtr.", 15)) return Intrinsic::arm_vcvtr;
+ if (Len > 15 && !memcmp(Name, "llvm.arm.vcvtru.", 16)) return Intrinsic::arm_vcvtru;
if (Len > 20 && !memcmp(Name, "llvm.atomic.cmp.swap.", 21)) return Intrinsic::atomic_cmp_swap;
if (Len > 20 && !memcmp(Name, "llvm.atomic.load.add.", 21)) return Intrinsic::atomic_load_add;
if (Len > 20 && !memcmp(Name, "llvm.atomic.load.and.", 21)) return Intrinsic::atomic_load_and;
@@ -2436,6 +2757,8 @@
if (Len > 10 && !memcmp(Name, "llvm.bswap.", 11)) return Intrinsic::bswap;
break;
case 'c':
+ if (Len == 22 && !memcmp(Name, "llvm.convert.from.fp16", 22)) return Intrinsic::convert_from_fp16;
+ if (Len == 20 && !memcmp(Name, "llvm.convert.to.fp16", 20)) return Intrinsic::convert_to_fp16;
if (Len > 14 && !memcmp(Name, "llvm.convertff.", 15)) return Intrinsic::convertff;
if (Len > 15 && !memcmp(Name, "llvm.convertfsi.", 16)) return Intrinsic::convertfsi;
if (Len > 15 && !memcmp(Name, "llvm.convertfui.", 16)) return Intrinsic::convertfui;
@@ -2777,6 +3100,105 @@
if (Len == 19 && !memcmp(Name, "llvm.var.annotation", 19)) return Intrinsic::var_annotation;
break;
case 'x':
+ if (Len == 21 && !memcmp(Name, "llvm.x86.aesni.aesdec", 21)) return Intrinsic::x86_aesni_aesdec;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.aesni.aesdeclast", 25)) return Intrinsic::x86_aesni_aesdeclast;
+ if (Len == 21 && !memcmp(Name, "llvm.x86.aesni.aesenc", 21)) return Intrinsic::x86_aesni_aesenc;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.aesni.aesenclast", 25)) return Intrinsic::x86_aesni_aesenclast;
+ if (Len == 21 && !memcmp(Name, "llvm.x86.aesni.aesimc", 21)) return Intrinsic::x86_aesni_aesimc;
+ if (Len == 30 && !memcmp(Name, "llvm.x86.aesni.aeskeygenassist", 30)) return Intrinsic::x86_aesni_aeskeygenassist;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.addsub.pd.256", 26)) return Intrinsic::x86_avx_addsub_pd_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.addsub.ps.256", 26)) return Intrinsic::x86_avx_addsub_ps_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.blend.pd.256", 25)) return Intrinsic::x86_avx_blend_pd_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.blend.ps.256", 25)) return Intrinsic::x86_avx_blend_ps_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.blendv.pd.256", 26)) return Intrinsic::x86_avx_blendv_pd_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.blendv.ps.256", 26)) return Intrinsic::x86_avx_blendv_ps_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.cmp.pd.256", 23)) return Intrinsic::x86_avx_cmp_pd_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.cmp.ps.256", 23)) return Intrinsic::x86_avx_cmp_ps_256;
+ if (Len == 27 && !memcmp(Name, "llvm.x86.avx.cvt.pd2.ps.256", 27)) return Intrinsic::x86_avx_cvt_pd2_ps_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.cvt.pd2dq.256", 26)) return Intrinsic::x86_avx_cvt_pd2dq_256;
+ if (Len == 27 && !memcmp(Name, "llvm.x86.avx.cvt.ps2.pd.256", 27)) return Intrinsic::x86_avx_cvt_ps2_pd_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.cvt.ps2dq.256", 26)) return Intrinsic::x86_avx_cvt_ps2dq_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.cvtdq2.pd.256", 26)) return Intrinsic::x86_avx_cvtdq2_pd_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.cvtdq2.ps.256", 26)) return Intrinsic::x86_avx_cvtdq2_ps_256;
+ if (Len == 27 && !memcmp(Name, "llvm.x86.avx.cvtt.pd2dq.256", 27)) return Intrinsic::x86_avx_cvtt_pd2dq_256;
+ if (Len == 27 && !memcmp(Name, "llvm.x86.avx.cvtt.ps2dq.256", 27)) return Intrinsic::x86_avx_cvtt_ps2dq_256;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.avx.dp.ps.256", 22)) return Intrinsic::x86_avx_dp_ps_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.hadd.pd.256", 24)) return Intrinsic::x86_avx_hadd_pd_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.hadd.ps.256", 24)) return Intrinsic::x86_avx_hadd_ps_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.hsub.pd.256", 24)) return Intrinsic::x86_avx_hsub_pd_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.hsub.ps.256", 24)) return Intrinsic::x86_avx_hsub_ps_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.ldu.dq.256", 23)) return Intrinsic::x86_avx_ldu_dq_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.loadu.dq.256", 25)) return Intrinsic::x86_avx_loadu_dq_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.loadu.pd.256", 25)) return Intrinsic::x86_avx_loadu_pd_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.loadu.ps.256", 25)) return Intrinsic::x86_avx_loadu_ps_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.maskload.pd", 24)) return Intrinsic::x86_avx_maskload_pd;
+ if (Len == 28 && !memcmp(Name, "llvm.x86.avx.maskload.pd.256", 28)) return Intrinsic::x86_avx_maskload_pd_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.maskload.ps", 24)) return Intrinsic::x86_avx_maskload_ps;
+ if (Len == 28 && !memcmp(Name, "llvm.x86.avx.maskload.ps.256", 28)) return Intrinsic::x86_avx_maskload_ps_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.maskstore.pd", 25)) return Intrinsic::x86_avx_maskstore_pd;
+ if (Len == 29 && !memcmp(Name, "llvm.x86.avx.maskstore.pd.256", 29)) return Intrinsic::x86_avx_maskstore_pd_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.maskstore.ps", 25)) return Intrinsic::x86_avx_maskstore_ps;
+ if (Len == 29 && !memcmp(Name, "llvm.x86.avx.maskstore.ps.256", 29)) return Intrinsic::x86_avx_maskstore_ps_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.max.pd.256", 23)) return Intrinsic::x86_avx_max_pd_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.max.ps.256", 23)) return Intrinsic::x86_avx_max_ps_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.min.pd.256", 23)) return Intrinsic::x86_avx_min_pd_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.min.ps.256", 23)) return Intrinsic::x86_avx_min_ps_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.movmsk.pd.256", 26)) return Intrinsic::x86_avx_movmsk_pd_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.movmsk.ps.256", 26)) return Intrinsic::x86_avx_movmsk_ps_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.movnt.dq.256", 25)) return Intrinsic::x86_avx_movnt_dq_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.movnt.pd.256", 25)) return Intrinsic::x86_avx_movnt_pd_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.movnt.ps.256", 25)) return Intrinsic::x86_avx_movnt_ps_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.ptestc.256", 23)) return Intrinsic::x86_avx_ptestc_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.ptestnzc.256", 25)) return Intrinsic::x86_avx_ptestnzc_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.ptestz.256", 23)) return Intrinsic::x86_avx_ptestz_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.rcp.ps.256", 23)) return Intrinsic::x86_avx_rcp_ps_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.round.pd.256", 25)) return Intrinsic::x86_avx_round_pd_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.round.ps.256", 25)) return Intrinsic::x86_avx_round_ps_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.rsqrt.ps.256", 25)) return Intrinsic::x86_avx_rsqrt_ps_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.sqrt.pd.256", 24)) return Intrinsic::x86_avx_sqrt_pd_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.sqrt.ps.256", 24)) return Intrinsic::x86_avx_sqrt_ps_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.storeu.dq.256", 26)) return Intrinsic::x86_avx_storeu_dq_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.storeu.pd.256", 26)) return Intrinsic::x86_avx_storeu_pd_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.storeu.ps.256", 26)) return Intrinsic::x86_avx_storeu_ps_256;
+ if (Len == 30 && !memcmp(Name, "llvm.x86.avx.vbroadcast.sd.256", 30)) return Intrinsic::x86_avx_vbroadcast_sd_256;
+ if (Len == 34 && !memcmp(Name, "llvm.x86.avx.vbroadcastf128.pd.256", 34)) return Intrinsic::x86_avx_vbroadcastf128_pd_256;
+ if (Len == 34 && !memcmp(Name, "llvm.x86.avx.vbroadcastf128.ps.256", 34)) return Intrinsic::x86_avx_vbroadcastf128_ps_256;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.avx.vbroadcastss", 25)) return Intrinsic::x86_avx_vbroadcastss;
+ if (Len == 29 && !memcmp(Name, "llvm.x86.avx.vbroadcastss.256", 29)) return Intrinsic::x86_avx_vbroadcastss_256;
+ if (Len == 32 && !memcmp(Name, "llvm.x86.avx.vextractf128.pd.256", 32)) return Intrinsic::x86_avx_vextractf128_pd_256;
+ if (Len == 32 && !memcmp(Name, "llvm.x86.avx.vextractf128.ps.256", 32)) return Intrinsic::x86_avx_vextractf128_ps_256;
+ if (Len == 32 && !memcmp(Name, "llvm.x86.avx.vextractf128.si.256", 32)) return Intrinsic::x86_avx_vextractf128_si_256;
+ if (Len == 31 && !memcmp(Name, "llvm.x86.avx.vinsertf128.pd.256", 31)) return Intrinsic::x86_avx_vinsertf128_pd_256;
+ if (Len == 31 && !memcmp(Name, "llvm.x86.avx.vinsertf128.ps.256", 31)) return Intrinsic::x86_avx_vinsertf128_ps_256;
+ if (Len == 31 && !memcmp(Name, "llvm.x86.avx.vinsertf128.si.256", 31)) return Intrinsic::x86_avx_vinsertf128_si_256;
+ if (Len == 30 && !memcmp(Name, "llvm.x86.avx.vperm2f128.pd.256", 30)) return Intrinsic::x86_avx_vperm2f128_pd_256;
+ if (Len == 30 && !memcmp(Name, "llvm.x86.avx.vperm2f128.ps.256", 30)) return Intrinsic::x86_avx_vperm2f128_ps_256;
+ if (Len == 30 && !memcmp(Name, "llvm.x86.avx.vperm2f128.si.256", 30)) return Intrinsic::x86_avx_vperm2f128_si_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.vpermil.pd", 23)) return Intrinsic::x86_avx_vpermil_pd;
+ if (Len == 27 && !memcmp(Name, "llvm.x86.avx.vpermil.pd.256", 27)) return Intrinsic::x86_avx_vpermil_pd_256;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.vpermil.ps", 23)) return Intrinsic::x86_avx_vpermil_ps;
+ if (Len == 27 && !memcmp(Name, "llvm.x86.avx.vpermil.ps.256", 27)) return Intrinsic::x86_avx_vpermil_ps_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.vpermilvar.pd", 26)) return Intrinsic::x86_avx_vpermilvar_pd;
+ if (Len == 30 && !memcmp(Name, "llvm.x86.avx.vpermilvar.pd.256", 30)) return Intrinsic::x86_avx_vpermilvar_pd_256;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.vpermilvar.ps", 26)) return Intrinsic::x86_avx_vpermilvar_ps;
+ if (Len == 30 && !memcmp(Name, "llvm.x86.avx.vpermilvar.ps.256", 30)) return Intrinsic::x86_avx_vpermilvar_ps_256;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.avx.vtestc.pd", 22)) return Intrinsic::x86_avx_vtestc_pd;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.vtestc.pd.256", 26)) return Intrinsic::x86_avx_vtestc_pd_256;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.avx.vtestc.ps", 22)) return Intrinsic::x86_avx_vtestc_ps;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.vtestc.ps.256", 26)) return Intrinsic::x86_avx_vtestc_ps_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.vtestnzc.pd", 24)) return Intrinsic::x86_avx_vtestnzc_pd;
+ if (Len == 28 && !memcmp(Name, "llvm.x86.avx.vtestnzc.pd.256", 28)) return Intrinsic::x86_avx_vtestnzc_pd_256;
+ if (Len == 24 && !memcmp(Name, "llvm.x86.avx.vtestnzc.ps", 24)) return Intrinsic::x86_avx_vtestnzc_ps;
+ if (Len == 28 && !memcmp(Name, "llvm.x86.avx.vtestnzc.ps.256", 28)) return Intrinsic::x86_avx_vtestnzc_ps_256;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.avx.vtestz.pd", 22)) return Intrinsic::x86_avx_vtestz_pd;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.vtestz.pd.256", 26)) return Intrinsic::x86_avx_vtestz_pd_256;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.avx.vtestz.ps", 22)) return Intrinsic::x86_avx_vtestz_ps;
+ if (Len == 26 && !memcmp(Name, "llvm.x86.avx.vtestz.ps.256", 26)) return Intrinsic::x86_avx_vtestz_ps_256;
+ if (Len == 21 && !memcmp(Name, "llvm.x86.avx.vzeroall", 21)) return Intrinsic::x86_avx_vzeroall;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.avx.vzeroupper", 23)) return Intrinsic::x86_avx_vzeroupper;
+ if (Len == 12 && !memcmp(Name, "llvm.x86.int", 12)) return Intrinsic::x86_int;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.mmx.cvtsi32.si64", 25)) return Intrinsic::x86_mmx_cvtsi32_si64;
+ if (Len == 25 && !memcmp(Name, "llvm.x86.mmx.cvtsi64.si32", 25)) return Intrinsic::x86_mmx_cvtsi64_si32;
if (Len == 17 && !memcmp(Name, "llvm.x86.mmx.emms", 17)) return Intrinsic::x86_mmx_emms;
if (Len == 18 && !memcmp(Name, "llvm.x86.mmx.femms", 18)) return Intrinsic::x86_mmx_femms;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.maskmovq", 21)) return Intrinsic::x86_mmx_maskmovq;
@@ -2784,10 +3206,16 @@
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.packssdw", 21)) return Intrinsic::x86_mmx_packssdw;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.packsswb", 21)) return Intrinsic::x86_mmx_packsswb;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.packuswb", 21)) return Intrinsic::x86_mmx_packuswb;
+ if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.padd.b", 19)) return Intrinsic::x86_mmx_padd_b;
+ if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.padd.d", 19)) return Intrinsic::x86_mmx_padd_d;
+ if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.padd.q", 19)) return Intrinsic::x86_mmx_padd_q;
+ if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.padd.w", 19)) return Intrinsic::x86_mmx_padd_w;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.padds.b", 20)) return Intrinsic::x86_mmx_padds_b;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.padds.w", 20)) return Intrinsic::x86_mmx_padds_w;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.paddus.b", 21)) return Intrinsic::x86_mmx_paddus_b;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.paddus.w", 21)) return Intrinsic::x86_mmx_paddus_w;
+ if (Len == 17 && !memcmp(Name, "llvm.x86.mmx.pand", 17)) return Intrinsic::x86_mmx_pand;
+ if (Len == 18 && !memcmp(Name, "llvm.x86.mmx.pandn", 18)) return Intrinsic::x86_mmx_pandn;
if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.pavg.b", 19)) return Intrinsic::x86_mmx_pavg_b;
if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.pavg.w", 19)) return Intrinsic::x86_mmx_pavg_w;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.pcmpeq.b", 21)) return Intrinsic::x86_mmx_pcmpeq_b;
@@ -2796,6 +3224,8 @@
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.pcmpgt.b", 21)) return Intrinsic::x86_mmx_pcmpgt_b;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.pcmpgt.d", 21)) return Intrinsic::x86_mmx_pcmpgt_d;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.pcmpgt.w", 21)) return Intrinsic::x86_mmx_pcmpgt_w;
+ if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.pextr.w", 20)) return Intrinsic::x86_mmx_pextr_w;
+ if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.pinsr.w", 20)) return Intrinsic::x86_mmx_pinsr_w;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.pmadd.wd", 21)) return Intrinsic::x86_mmx_pmadd_wd;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.pmaxs.w", 20)) return Intrinsic::x86_mmx_pmaxs_w;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.pmaxu.b", 20)) return Intrinsic::x86_mmx_pmaxu_b;
@@ -2804,7 +3234,9 @@
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.pmovmskb", 21)) return Intrinsic::x86_mmx_pmovmskb;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.pmulh.w", 20)) return Intrinsic::x86_mmx_pmulh_w;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.pmulhu.w", 21)) return Intrinsic::x86_mmx_pmulhu_w;
+ if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.pmull.w", 20)) return Intrinsic::x86_mmx_pmull_w;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.pmulu.dq", 21)) return Intrinsic::x86_mmx_pmulu_dq;
+ if (Len == 16 && !memcmp(Name, "llvm.x86.mmx.por", 16)) return Intrinsic::x86_mmx_por;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.psad.bw", 20)) return Intrinsic::x86_mmx_psad_bw;
if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.psll.d", 19)) return Intrinsic::x86_mmx_psll_d;
if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.psll.q", 19)) return Intrinsic::x86_mmx_psll_q;
@@ -2822,10 +3254,25 @@
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.psrli.d", 20)) return Intrinsic::x86_mmx_psrli_d;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.psrli.q", 20)) return Intrinsic::x86_mmx_psrli_q;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.psrli.w", 20)) return Intrinsic::x86_mmx_psrli_w;
+ if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.psub.b", 19)) return Intrinsic::x86_mmx_psub_b;
+ if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.psub.d", 19)) return Intrinsic::x86_mmx_psub_d;
+ if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.psub.q", 19)) return Intrinsic::x86_mmx_psub_q;
+ if (Len == 19 && !memcmp(Name, "llvm.x86.mmx.psub.w", 19)) return Intrinsic::x86_mmx_psub_w;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.psubs.b", 20)) return Intrinsic::x86_mmx_psubs_b;
if (Len == 20 && !memcmp(Name, "llvm.x86.mmx.psubs.w", 20)) return Intrinsic::x86_mmx_psubs_w;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.psubus.b", 21)) return Intrinsic::x86_mmx_psubus_b;
if (Len == 21 && !memcmp(Name, "llvm.x86.mmx.psubus.w", 21)) return Intrinsic::x86_mmx_psubus_w;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.mmx.punpckhbw", 22)) return Intrinsic::x86_mmx_punpckhbw;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.mmx.punpckhdq", 22)) return Intrinsic::x86_mmx_punpckhdq;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.mmx.punpckhwd", 22)) return Intrinsic::x86_mmx_punpckhwd;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.mmx.punpcklbw", 22)) return Intrinsic::x86_mmx_punpcklbw;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.mmx.punpckldq", 22)) return Intrinsic::x86_mmx_punpckldq;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.mmx.punpcklwd", 22)) return Intrinsic::x86_mmx_punpcklwd;
+ if (Len == 17 && !memcmp(Name, "llvm.x86.mmx.pxor", 17)) return Intrinsic::x86_mmx_pxor;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.mmx.vec.ext.d", 22)) return Intrinsic::x86_mmx_vec_ext_d;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.mmx.vec.init.b", 23)) return Intrinsic::x86_mmx_vec_init_b;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.mmx.vec.init.d", 23)) return Intrinsic::x86_mmx_vec_init_d;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.mmx.vec.init.w", 23)) return Intrinsic::x86_mmx_vec_init_w;
if (Len == 19 && !memcmp(Name, "llvm.x86.sse.add.ss", 19)) return Intrinsic::x86_sse_add_ss;
if (Len == 19 && !memcmp(Name, "llvm.x86.sse.cmp.ps", 19)) return Intrinsic::x86_sse_cmp_ps;
if (Len == 19 && !memcmp(Name, "llvm.x86.sse.cmp.ss", 19)) return Intrinsic::x86_sse_cmp_ss;
@@ -3023,7 +3470,6 @@
if (Len == 23 && !memcmp(Name, "llvm.x86.sse41.pmovzxwd", 23)) return Intrinsic::x86_sse41_pmovzxwd;
if (Len == 23 && !memcmp(Name, "llvm.x86.sse41.pmovzxwq", 23)) return Intrinsic::x86_sse41_pmovzxwq;
if (Len == 21 && !memcmp(Name, "llvm.x86.sse41.pmuldq", 21)) return Intrinsic::x86_sse41_pmuldq;
- if (Len == 21 && !memcmp(Name, "llvm.x86.sse41.pmulld", 21)) return Intrinsic::x86_sse41_pmulld;
if (Len == 21 && !memcmp(Name, "llvm.x86.sse41.ptestc", 21)) return Intrinsic::x86_sse41_ptestc;
if (Len == 23 && !memcmp(Name, "llvm.x86.sse41.ptestnzc", 23)) return Intrinsic::x86_sse41_ptestnzc;
if (Len == 21 && !memcmp(Name, "llvm.x86.sse41.ptestz", 21)) return Intrinsic::x86_sse41_ptestz;
@@ -3033,8 +3479,9 @@
if (Len == 23 && !memcmp(Name, "llvm.x86.sse41.round.ss", 23)) return Intrinsic::x86_sse41_round_ss;
if (Len == 23 && !memcmp(Name, "llvm.x86.sse42.crc32.16", 23)) return Intrinsic::x86_sse42_crc32_16;
if (Len == 23 && !memcmp(Name, "llvm.x86.sse42.crc32.32", 23)) return Intrinsic::x86_sse42_crc32_32;
- if (Len == 23 && !memcmp(Name, "llvm.x86.sse42.crc32.64", 23)) return Intrinsic::x86_sse42_crc32_64;
if (Len == 22 && !memcmp(Name, "llvm.x86.sse42.crc32.8", 22)) return Intrinsic::x86_sse42_crc32_8;
+ if (Len == 23 && !memcmp(Name, "llvm.x86.sse42.crc64.64", 23)) return Intrinsic::x86_sse42_crc64_64;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.sse42.crc64.8", 22)) return Intrinsic::x86_sse42_crc64_8;
if (Len == 27 && !memcmp(Name, "llvm.x86.sse42.pcmpestri128", 27)) return Intrinsic::x86_sse42_pcmpestri128;
if (Len == 28 && !memcmp(Name, "llvm.x86.sse42.pcmpestria128", 28)) return Intrinsic::x86_sse42_pcmpestria128;
if (Len == 28 && !memcmp(Name, "llvm.x86.sse42.pcmpestric128", 28)) return Intrinsic::x86_sse42_pcmpestric128;
@@ -3056,8 +3503,6 @@
if (Len == 25 && !memcmp(Name, "llvm.x86.ssse3.pabs.d.128", 25)) return Intrinsic::x86_ssse3_pabs_d_128;
if (Len == 21 && !memcmp(Name, "llvm.x86.ssse3.pabs.w", 21)) return Intrinsic::x86_ssse3_pabs_w;
if (Len == 25 && !memcmp(Name, "llvm.x86.ssse3.pabs.w.128", 25)) return Intrinsic::x86_ssse3_pabs_w_128;
- if (Len == 23 && !memcmp(Name, "llvm.x86.ssse3.palign.r", 23)) return Intrinsic::x86_ssse3_palign_r;
- if (Len == 27 && !memcmp(Name, "llvm.x86.ssse3.palign.r.128", 27)) return Intrinsic::x86_ssse3_palign_r_128;
if (Len == 22 && !memcmp(Name, "llvm.x86.ssse3.phadd.d", 22)) return Intrinsic::x86_ssse3_phadd_d;
if (Len == 26 && !memcmp(Name, "llvm.x86.ssse3.phadd.d.128", 26)) return Intrinsic::x86_ssse3_phadd_d_128;
if (Len == 23 && !memcmp(Name, "llvm.x86.ssse3.phadd.sw", 23)) return Intrinsic::x86_ssse3_phadd_sw;
@@ -3076,6 +3521,7 @@
if (Len == 29 && !memcmp(Name, "llvm.x86.ssse3.pmul.hr.sw.128", 29)) return Intrinsic::x86_ssse3_pmul_hr_sw_128;
if (Len == 22 && !memcmp(Name, "llvm.x86.ssse3.pshuf.b", 22)) return Intrinsic::x86_ssse3_pshuf_b;
if (Len == 26 && !memcmp(Name, "llvm.x86.ssse3.pshuf.b.128", 26)) return Intrinsic::x86_ssse3_pshuf_b_128;
+ if (Len == 22 && !memcmp(Name, "llvm.x86.ssse3.pshuf.w", 22)) return Intrinsic::x86_ssse3_pshuf_w;
if (Len == 22 && !memcmp(Name, "llvm.x86.ssse3.psign.b", 22)) return Intrinsic::x86_ssse3_psign_b;
if (Len == 26 && !memcmp(Name, "llvm.x86.ssse3.psign.b.128", 26)) return Intrinsic::x86_ssse3_psign_b_128;
if (Len == 22 && !memcmp(Name, "llvm.x86.ssse3.psign.d", 22)) return Intrinsic::x86_ssse3_psign_d;
@@ -3091,6 +3537,190 @@
#ifdef GET_INTRINSIC_VERIFIER
switch (ID) {
default: assert(0 && "Invalid intrinsic!");
+ case Intrinsic::eh_unwind_init: // llvm.eh.unwind.init
+ case Intrinsic::ppc_altivec_dssall: // llvm.ppc.altivec.dssall
+ case Intrinsic::ppc_sync: // llvm.ppc.sync
+ case Intrinsic::trap: // llvm.trap
+ case Intrinsic::x86_avx_vzeroall: // llvm.x86.avx.vzeroall
+ case Intrinsic::x86_avx_vzeroupper: // llvm.x86.avx.vzeroupper
+ case Intrinsic::x86_mmx_emms: // llvm.x86.mmx.emms
+ case Intrinsic::x86_mmx_femms: // llvm.x86.mmx.femms
+ case Intrinsic::x86_sse2_lfence: // llvm.x86.sse2.lfence
+ case Intrinsic::x86_sse2_mfence: // llvm.x86.sse2.mfence
+ case Intrinsic::x86_sse_sfence: // llvm.x86.sse.sfence
+ VerifyIntrinsicPrototype(ID, IF, 0, 0);
+ break;
+ case Intrinsic::memcpy: // llvm.memcpy
+ case Intrinsic::memmove: // llvm.memmove
+ VerifyIntrinsicPrototype(ID, IF, 0, 5, MVT::iPTRAny, MVT::iPTRAny, MVT::iAny, MVT::i32, MVT::i1);
+ break;
+ case Intrinsic::memset: // llvm.memset
+ VerifyIntrinsicPrototype(ID, IF, 0, 5, MVT::iPTRAny, MVT::i8, MVT::iAny, MVT::i32, MVT::i1);
+ break;
+ case Intrinsic::invariant_end: // llvm.invariant.end
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::iPTR, MVT::i64, MVT::iPTR);
+ break;
+ case Intrinsic::memory_barrier: // llvm.memory.barrier
+ VerifyIntrinsicPrototype(ID, IF, 0, 5, MVT::i1, MVT::i1, MVT::i1, MVT::i1, MVT::i1);
+ break;
+ case Intrinsic::arm_set_fpscr: // llvm.arm.set.fpscr
+ case Intrinsic::eh_sjlj_callsite: // llvm.eh.sjlj.callsite
+ case Intrinsic::pcmarker: // llvm.pcmarker
+ case Intrinsic::ppc_altivec_dss: // llvm.ppc.altivec.dss
+ VerifyIntrinsicPrototype(ID, IF, 0, 1, MVT::i32);
+ break;
+ case Intrinsic::x86_sse3_mwait: // llvm.x86.sse3.mwait
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::i32, MVT::i32);
+ break;
+ case Intrinsic::eh_return_i32: // llvm.eh.return.i32
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::i32, MVT::iPTR);
+ break;
+ case Intrinsic::eh_return_i64: // llvm.eh.return.i64
+ case Intrinsic::lifetime_end: // llvm.lifetime.end
+ case Intrinsic::lifetime_start: // llvm.lifetime.start
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::i64, MVT::iPTR);
+ break;
+ case Intrinsic::x86_int: // llvm.x86.int
+ VerifyIntrinsicPrototype(ID, IF, 0, 1, MVT::i8);
+ break;
+ case Intrinsic::dbg_value: // llvm.dbg.value
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::Metadata, MVT::i64, MVT::Metadata);
+ break;
+ case Intrinsic::dbg_declare: // llvm.dbg.declare
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::Metadata, MVT::Metadata);
+ break;
+ case Intrinsic::eh_sjlj_longjmp: // llvm.eh.sjlj.longjmp
+ case Intrinsic::ppc_dcba: // llvm.ppc.dcba
+ case Intrinsic::ppc_dcbf: // llvm.ppc.dcbf
+ case Intrinsic::ppc_dcbi: // llvm.ppc.dcbi
+ case Intrinsic::ppc_dcbst: // llvm.ppc.dcbst
+ case Intrinsic::ppc_dcbt: // llvm.ppc.dcbt
+ case Intrinsic::ppc_dcbtst: // llvm.ppc.dcbtst
+ case Intrinsic::ppc_dcbz: // llvm.ppc.dcbz
+ case Intrinsic::ppc_dcbzl: // llvm.ppc.dcbzl
+ case Intrinsic::stackrestore: // llvm.stackrestore
+ case Intrinsic::vaend: // llvm.va_end
+ case Intrinsic::vastart: // llvm.va_start
+ case Intrinsic::x86_sse2_clflush: // llvm.x86.sse2.clflush
+ case Intrinsic::x86_sse_ldmxcsr: // llvm.x86.sse.ldmxcsr
+ case Intrinsic::x86_sse_stmxcsr: // llvm.x86.sse.stmxcsr
+ VerifyIntrinsicPrototype(ID, IF, 0, 1, MVT::iPTR);
+ break;
+ case Intrinsic::arm_neon_vst2: // llvm.arm.neon.vst2
+ VerifyIntrinsicPrototype(ID, IF, 0, 4, MVT::iPTR, MVT::vAny, ~1, MVT::i32);
+ break;
+ case Intrinsic::arm_neon_vst3: // llvm.arm.neon.vst3
+ VerifyIntrinsicPrototype(ID, IF, 0, 5, MVT::iPTR, MVT::vAny, ~1, ~1, MVT::i32);
+ break;
+ case Intrinsic::arm_neon_vst4: // llvm.arm.neon.vst4
+ VerifyIntrinsicPrototype(ID, IF, 0, 6, MVT::iPTR, MVT::vAny, ~1, ~1, ~1, MVT::i32);
+ break;
+ case Intrinsic::arm_neon_vst2lane: // llvm.arm.neon.vst2lane
+ VerifyIntrinsicPrototype(ID, IF, 0, 5, MVT::iPTR, MVT::vAny, ~1, MVT::i32, MVT::i32);
+ break;
+ case Intrinsic::arm_neon_vst3lane: // llvm.arm.neon.vst3lane
+ VerifyIntrinsicPrototype(ID, IF, 0, 6, MVT::iPTR, MVT::vAny, ~1, ~1, MVT::i32, MVT::i32);
+ break;
+ case Intrinsic::arm_neon_vst4lane: // llvm.arm.neon.vst4lane
+ VerifyIntrinsicPrototype(ID, IF, 0, 7, MVT::iPTR, MVT::vAny, ~1, ~1, ~1, MVT::i32, MVT::i32);
+ break;
+ case Intrinsic::arm_neon_vst1: // llvm.arm.neon.vst1
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::iPTR, MVT::vAny, MVT::i32);
+ break;
+ case Intrinsic::longjmp: // llvm.longjmp
+ case Intrinsic::siglongjmp: // llvm.siglongjmp
+ case Intrinsic::x86_sse2_movnt_i: // llvm.x86.sse2.movnt.i
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::i32);
+ break;
+ case Intrinsic::ppc_altivec_dst: // llvm.ppc.altivec.dst
+ case Intrinsic::ppc_altivec_dstst: // llvm.ppc.altivec.dstst
+ case Intrinsic::ppc_altivec_dststt: // llvm.ppc.altivec.dststt
+ case Intrinsic::ppc_altivec_dstt: // llvm.ppc.altivec.dstt
+ case Intrinsic::prefetch: // llvm.prefetch
+ case Intrinsic::x86_sse3_monitor: // llvm.x86.sse3.monitor
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::iPTR, MVT::i32, MVT::i32);
+ break;
+ case Intrinsic::vacopy: // llvm.va_copy
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::iPTR);
+ break;
+ case Intrinsic::var_annotation: // llvm.var.annotation
+ VerifyIntrinsicPrototype(ID, IF, 0, 4, MVT::iPTR, MVT::iPTR, MVT::iPTR, MVT::i32);
+ break;
+ case Intrinsic::gcwrite: // llvm.gcwrite
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::iPTR, MVT::iPTR, MVT::iPTR);
+ break;
+ case Intrinsic::stackprotector: // llvm.stackprotector
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::iPTR);
+ break;
+ case Intrinsic::x86_sse2_storeu_dq: // llvm.x86.sse2.storeu.dq
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v16i8);
+ break;
+ case Intrinsic::x86_mmx_movnt_dq: // llvm.x86.mmx.movnt.dq
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v1i64);
+ break;
+ case Intrinsic::x86_sse2_movnt_pd: // llvm.x86.sse2.movnt.pd
+ case Intrinsic::x86_sse2_storeu_pd: // llvm.x86.sse2.storeu.pd
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v2f64);
+ break;
+ case Intrinsic::x86_avx_maskstore_pd: // llvm.x86.avx.maskstore.pd
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::iPTR, MVT::v2f64, MVT::v2f64);
+ break;
+ case Intrinsic::x86_sse2_movnt_dq: // llvm.x86.sse2.movnt.dq
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v2i64);
+ break;
+ case Intrinsic::x86_avx_storeu_dq_256: // llvm.x86.avx.storeu.dq.256
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v32i8);
+ break;
+ case Intrinsic::x86_sse_movnt_ps: // llvm.x86.sse.movnt.ps
+ case Intrinsic::x86_sse_storeu_ps: // llvm.x86.sse.storeu.ps
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v4f32);
+ break;
+ case Intrinsic::x86_avx_maskstore_ps: // llvm.x86.avx.maskstore.ps
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::iPTR, MVT::v4f32, MVT::v4f32);
+ break;
+ case Intrinsic::x86_avx_movnt_pd_256: // llvm.x86.avx.movnt.pd.256
+ case Intrinsic::x86_avx_storeu_pd_256: // llvm.x86.avx.storeu.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v4f64);
+ break;
+ case Intrinsic::x86_avx_maskstore_pd_256: // llvm.x86.avx.maskstore.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::iPTR, MVT::v4f64, MVT::v4f64);
+ break;
+ case Intrinsic::x86_sse2_storel_dq: // llvm.x86.sse2.storel.dq
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v4i32);
+ break;
+ case Intrinsic::x86_avx_movnt_dq_256: // llvm.x86.avx.movnt.dq.256
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v4i64);
+ break;
+ case Intrinsic::x86_avx_movnt_ps_256: // llvm.x86.avx.movnt.ps.256
+ case Intrinsic::x86_avx_storeu_ps_256: // llvm.x86.avx.storeu.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::v8f32);
+ break;
+ case Intrinsic::x86_avx_maskstore_ps_256: // llvm.x86.avx.maskstore.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::iPTR, MVT::v8f32, MVT::v8f32);
+ break;
+ case Intrinsic::gcroot: // llvm.gcroot
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::iPTR, MVT::iPTR);
+ break;
+ case Intrinsic::ppc_altivec_stvebx: // llvm.ppc.altivec.stvebx
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::v16i8, MVT::iPTR);
+ break;
+ case Intrinsic::x86_sse2_maskmov_dqu: // llvm.x86.sse2.maskmov.dqu
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::v16i8, MVT::v16i8, MVT::iPTR);
+ break;
+ case Intrinsic::ppc_altivec_mtvscr: // llvm.ppc.altivec.mtvscr
+ VerifyIntrinsicPrototype(ID, IF, 0, 1, MVT::v4i32);
+ break;
+ case Intrinsic::ppc_altivec_stvewx: // llvm.ppc.altivec.stvewx
+ case Intrinsic::ppc_altivec_stvx: // llvm.ppc.altivec.stvx
+ case Intrinsic::ppc_altivec_stvxl: // llvm.ppc.altivec.stvxl
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::v4i32, MVT::iPTR);
+ break;
+ case Intrinsic::ppc_altivec_stvehx: // llvm.ppc.altivec.stvehx
+ VerifyIntrinsicPrototype(ID, IF, 0, 2, MVT::v8i16, MVT::iPTR);
+ break;
+ case Intrinsic::x86_mmx_maskmovq: // llvm.x86.mmx.maskmovq
+ VerifyIntrinsicPrototype(ID, IF, 0, 3, MVT::v8i8, MVT::v8i8, MVT::iPTR);
+ break;
case Intrinsic::ptr_annotation: // llvm.ptr.annotation
VerifyIntrinsicPrototype(ID, IF, 1, 4, MVT::iPTRAny, ~0, MVT::iPTR, MVT::iPTR, MVT::i32);
break;
@@ -3221,23 +3851,6 @@
case Intrinsic::umul_with_overflow: // llvm.umul.with.overflow
VerifyIntrinsicPrototype(ID, IF, 2, 2, MVT::iAny, MVT::i1, ~0, ~0);
break;
- case Intrinsic::arm_neon_vaddws: // llvm.arm.neon.vaddws
- case Intrinsic::arm_neon_vaddwu: // llvm.arm.neon.vaddwu
- case Intrinsic::arm_neon_vsubws: // llvm.arm.neon.vsubws
- case Intrinsic::arm_neon_vsubwu: // llvm.arm.neon.vsubwu
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::vAny, ~0, ~(TruncatedElementVectorType | 0));
- break;
- case Intrinsic::arm_neon_vabas: // llvm.arm.neon.vabas
- case Intrinsic::arm_neon_vabau: // llvm.arm.neon.vabau
- case Intrinsic::arm_neon_vshiftins: // llvm.arm.neon.vshiftins
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::vAny, ~0, ~0, ~0);
- break;
- case Intrinsic::arm_neon_vabals: // llvm.arm.neon.vabals
- case Intrinsic::arm_neon_vabalu: // llvm.arm.neon.vabalu
- case Intrinsic::arm_neon_vmlals: // llvm.arm.neon.vmlals
- case Intrinsic::arm_neon_vmlalu: // llvm.arm.neon.vmlalu
- case Intrinsic::arm_neon_vmlsls: // llvm.arm.neon.vmlsls
- case Intrinsic::arm_neon_vmlslu: // llvm.arm.neon.vmlslu
case Intrinsic::arm_neon_vqdmlal: // llvm.arm.neon.vqdmlal
case Intrinsic::arm_neon_vqdmlsl: // llvm.arm.neon.vqdmlsl
VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::vAny, ~0, ~(TruncatedElementVectorType | 0), ~(TruncatedElementVectorType | 0));
@@ -3258,16 +3871,11 @@
case Intrinsic::arm_neon_vrsqrte: // llvm.arm.neon.vrsqrte
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::vAny, ~0);
break;
- case Intrinsic::arm_neon_vmovn: // llvm.arm.neon.vmovn
case Intrinsic::arm_neon_vqmovns: // llvm.arm.neon.vqmovns
case Intrinsic::arm_neon_vqmovnsu: // llvm.arm.neon.vqmovnsu
case Intrinsic::arm_neon_vqmovnu: // llvm.arm.neon.vqmovnu
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::vAny, ~(ExtendedElementVectorType | 0));
break;
- case Intrinsic::arm_neon_vmovls: // llvm.arm.neon.vmovls
- case Intrinsic::arm_neon_vmovlu: // llvm.arm.neon.vmovlu
- VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::vAny, ~(TruncatedElementVectorType | 0));
- break;
case Intrinsic::arm_neon_vabds: // llvm.arm.neon.vabds
case Intrinsic::arm_neon_vabdu: // llvm.arm.neon.vabdu
case Intrinsic::arm_neon_vhadds: // llvm.arm.neon.vhadds
@@ -3319,48 +3927,54 @@
case Intrinsic::arm_neon_vsubhn: // llvm.arm.neon.vsubhn
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::vAny, ~(ExtendedElementVectorType | 0), ~(ExtendedElementVectorType | 0));
break;
- case Intrinsic::arm_neon_vabdls: // llvm.arm.neon.vabdls
- case Intrinsic::arm_neon_vabdlu: // llvm.arm.neon.vabdlu
- case Intrinsic::arm_neon_vaddls: // llvm.arm.neon.vaddls
- case Intrinsic::arm_neon_vaddlu: // llvm.arm.neon.vaddlu
case Intrinsic::arm_neon_vmullp: // llvm.arm.neon.vmullp
- case Intrinsic::arm_neon_vmulls: // llvm.arm.neon.vmulls
- case Intrinsic::arm_neon_vmullu: // llvm.arm.neon.vmullu
case Intrinsic::arm_neon_vqdmull: // llvm.arm.neon.vqdmull
case Intrinsic::arm_neon_vshiftls: // llvm.arm.neon.vshiftls
case Intrinsic::arm_neon_vshiftlu: // llvm.arm.neon.vshiftlu
- case Intrinsic::arm_neon_vsubls: // llvm.arm.neon.vsubls
- case Intrinsic::arm_neon_vsublu: // llvm.arm.neon.vsublu
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::vAny, ~(TruncatedElementVectorType | 0), ~(TruncatedElementVectorType | 0));
break;
+ case Intrinsic::arm_neon_vshiftins: // llvm.arm.neon.vshiftins
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::vAny, ~0, ~0, ~0);
+ break;
case Intrinsic::arm_neon_vpaddls: // llvm.arm.neon.vpaddls
case Intrinsic::arm_neon_vpaddlu: // llvm.arm.neon.vpaddlu
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::vAny, MVT::vAny);
break;
case Intrinsic::arm_neon_vld1: // llvm.arm.neon.vld1
- VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::vAny, MVT::iPTR);
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::vAny, MVT::iPTR, MVT::i32);
break;
case Intrinsic::arm_neon_vld2: // llvm.arm.neon.vld2
- VerifyIntrinsicPrototype(ID, IF, 2, 1, MVT::vAny, ~0, MVT::iPTR);
+ VerifyIntrinsicPrototype(ID, IF, 2, 2, MVT::vAny, ~0, MVT::iPTR, MVT::i32);
break;
case Intrinsic::arm_neon_vld3: // llvm.arm.neon.vld3
- VerifyIntrinsicPrototype(ID, IF, 3, 1, MVT::vAny, ~0, ~0, MVT::iPTR);
+ VerifyIntrinsicPrototype(ID, IF, 3, 2, MVT::vAny, ~0, ~0, MVT::iPTR, MVT::i32);
break;
case Intrinsic::arm_neon_vld4: // llvm.arm.neon.vld4
- VerifyIntrinsicPrototype(ID, IF, 4, 1, MVT::vAny, ~0, ~0, ~0, MVT::iPTR);
+ VerifyIntrinsicPrototype(ID, IF, 4, 2, MVT::vAny, ~0, ~0, ~0, MVT::iPTR, MVT::i32);
break;
case Intrinsic::arm_neon_vld2lane: // llvm.arm.neon.vld2lane
- VerifyIntrinsicPrototype(ID, IF, 2, 4, MVT::vAny, ~0, MVT::iPTR, ~0, ~0, MVT::i32);
+ VerifyIntrinsicPrototype(ID, IF, 2, 5, MVT::vAny, ~0, MVT::iPTR, ~0, ~0, MVT::i32, MVT::i32);
break;
case Intrinsic::arm_neon_vld3lane: // llvm.arm.neon.vld3lane
- VerifyIntrinsicPrototype(ID, IF, 3, 5, MVT::vAny, ~0, ~0, MVT::iPTR, ~0, ~0, ~0, MVT::i32);
+ VerifyIntrinsicPrototype(ID, IF, 3, 6, MVT::vAny, ~0, ~0, MVT::iPTR, ~0, ~0, ~0, MVT::i32, MVT::i32);
break;
case Intrinsic::arm_neon_vld4lane: // llvm.arm.neon.vld4lane
- VerifyIntrinsicPrototype(ID, IF, 4, 6, MVT::vAny, ~0, ~0, ~0, MVT::iPTR, ~0, ~0, ~0, ~0, MVT::i32);
+ VerifyIntrinsicPrototype(ID, IF, 4, 7, MVT::vAny, ~0, ~0, ~0, MVT::iPTR, ~0, ~0, ~0, ~0, MVT::i32, MVT::i32);
break;
case Intrinsic::invariant_start: // llvm.invariant.start
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::iPTR, MVT::i64, MVT::iPTR);
break;
+ case Intrinsic::arm_vcvtr: // llvm.arm.vcvtr
+ case Intrinsic::arm_vcvtru: // llvm.arm.vcvtru
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::f32, MVT::fAny);
+ break;
+ case Intrinsic::convert_from_fp16: // llvm.convert.from.fp16
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::f32, MVT::i16);
+ break;
+ case Intrinsic::convert_to_fp16: // llvm.convert.to.fp16
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::i16, MVT::f32);
+ break;
+ case Intrinsic::arm_get_fpscr: // llvm.arm.get.fpscr
case Intrinsic::flt_rounds: // llvm.flt.rounds
case Intrinsic::xcore_getid: // llvm.xcore.getid
VerifyIntrinsicPrototype(ID, IF, 1, 0, MVT::i32);
@@ -3371,6 +3985,10 @@
case Intrinsic::x86_sse42_crc32_16: // llvm.x86.sse42.crc32.16
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i32, MVT::i32, MVT::i16);
break;
+ case Intrinsic::arm_qadd: // llvm.arm.qadd
+ case Intrinsic::arm_qsub: // llvm.arm.qsub
+ case Intrinsic::arm_ssat: // llvm.arm.ssat
+ case Intrinsic::arm_usat: // llvm.arm.usat
case Intrinsic::x86_sse42_crc32_32: // llvm.x86.sse42.crc32.32
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i32, MVT::i32, MVT::i32);
break;
@@ -3431,11 +4049,20 @@
case Intrinsic::x86_sse42_pcmpistriz128: // llvm.x86.sse42.pcmpistriz128
VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::i32, MVT::v16i8, MVT::v16i8, MVT::i8);
break;
+ case Intrinsic::x86_mmx_cvtsi64_si32: // llvm.x86.mmx.cvtsi64.si32
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::i32, MVT::v1i64);
+ break;
+ case Intrinsic::x86_mmx_pextr_w: // llvm.x86.mmx.pextr.w
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i32, MVT::v1i64, MVT::i32);
+ break;
case Intrinsic::x86_sse2_cvtsd2si: // llvm.x86.sse2.cvtsd2si
case Intrinsic::x86_sse2_cvttsd2si: // llvm.x86.sse2.cvttsd2si
case Intrinsic::x86_sse2_movmsk_pd: // llvm.x86.sse2.movmsk.pd
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::i32, MVT::v2f64);
break;
+ case Intrinsic::x86_avx_vtestc_pd: // llvm.x86.avx.vtestc.pd
+ case Intrinsic::x86_avx_vtestnzc_pd: // llvm.x86.avx.vtestnzc.pd
+ case Intrinsic::x86_avx_vtestz_pd: // llvm.x86.avx.vtestz.pd
case Intrinsic::x86_sse2_comieq_sd: // llvm.x86.sse2.comieq.sd
case Intrinsic::x86_sse2_comige_sd: // llvm.x86.sse2.comige.sd
case Intrinsic::x86_sse2_comigt_sd: // llvm.x86.sse2.comigt.sd
@@ -3458,6 +4085,9 @@
case Intrinsic::x86_sse41_extractps: // llvm.x86.sse41.extractps
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i32, MVT::v4f32, MVT::i32);
break;
+ case Intrinsic::x86_avx_vtestc_ps: // llvm.x86.avx.vtestc.ps
+ case Intrinsic::x86_avx_vtestnzc_ps: // llvm.x86.avx.vtestnzc.ps
+ case Intrinsic::x86_avx_vtestz_ps: // llvm.x86.avx.vtestz.ps
case Intrinsic::x86_sse41_ptestc: // llvm.x86.sse41.ptestc
case Intrinsic::x86_sse41_ptestnzc: // llvm.x86.sse41.ptestnzc
case Intrinsic::x86_sse41_ptestz: // llvm.x86.sse41.ptestz
@@ -3475,9 +4105,30 @@
case Intrinsic::x86_sse_ucomineq_ss: // llvm.x86.sse.ucomineq.ss
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i32, MVT::v4f32, MVT::v4f32);
break;
+ case Intrinsic::x86_avx_movmsk_pd_256: // llvm.x86.avx.movmsk.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::i32, MVT::v4f64);
+ break;
+ case Intrinsic::x86_avx_vtestc_pd_256: // llvm.x86.avx.vtestc.pd.256
+ case Intrinsic::x86_avx_vtestnzc_pd_256: // llvm.x86.avx.vtestnzc.pd.256
+ case Intrinsic::x86_avx_vtestz_pd_256: // llvm.x86.avx.vtestz.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i32, MVT::v4f64, MVT::v4f64);
+ break;
case Intrinsic::x86_sse41_pextrd: // llvm.x86.sse41.pextrd
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i32, MVT::v4i32, MVT::i32);
break;
+ case Intrinsic::x86_avx_ptestc_256: // llvm.x86.avx.ptestc.256
+ case Intrinsic::x86_avx_ptestnzc_256: // llvm.x86.avx.ptestnzc.256
+ case Intrinsic::x86_avx_ptestz_256: // llvm.x86.avx.ptestz.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i32, MVT::v4i64, MVT::v4i64);
+ break;
+ case Intrinsic::x86_avx_movmsk_ps_256: // llvm.x86.avx.movmsk.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::i32, MVT::v8f32);
+ break;
+ case Intrinsic::x86_avx_vtestc_ps_256: // llvm.x86.avx.vtestc.ps.256
+ case Intrinsic::x86_avx_vtestnzc_ps_256: // llvm.x86.avx.vtestnzc.ps.256
+ case Intrinsic::x86_avx_vtestz_ps_256: // llvm.x86.avx.vtestz.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i32, MVT::v8f32, MVT::v8f32);
+ break;
case Intrinsic::x86_mmx_pmovmskb: // llvm.x86.mmx.pmovmskb
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::i32, MVT::v8i8);
break;
@@ -3485,9 +4136,12 @@
VerifyIntrinsicPrototype(ID, IF, 1, 0, MVT::i64);
break;
case Intrinsic::alpha_umulh: // llvm.alpha.umulh
- case Intrinsic::x86_sse42_crc32_64: // llvm.x86.sse42.crc32.64
+ case Intrinsic::x86_sse42_crc64_64: // llvm.x86.sse42.crc64.64
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i64, MVT::i64, MVT::i64);
break;
+ case Intrinsic::x86_sse42_crc64_8: // llvm.x86.sse42.crc64.8
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::i64, MVT::i64, MVT::i8);
+ break;
case Intrinsic::x86_sse2_cvtsd2si64: // llvm.x86.sse2.cvtsd2si64
case Intrinsic::x86_sse2_cvttsd2si64: // llvm.x86.sse2.cvttsd2si64
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::i64, MVT::v2f64);
@@ -3595,20 +4249,32 @@
case Intrinsic::x86_sse2_packuswb_128: // llvm.x86.sse2.packuswb.128
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v16i8, MVT::v8i16, MVT::v8i16);
break;
+ case Intrinsic::x86_mmx_cvtsi32_si64: // llvm.x86.mmx.cvtsi32.si64
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v1i64, MVT::i32);
+ break;
case Intrinsic::x86_mmx_pslli_q: // llvm.x86.mmx.pslli.q
case Intrinsic::x86_mmx_psrli_q: // llvm.x86.mmx.psrli.q
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v1i64, MVT::v1i64, MVT::i32);
break;
+ case Intrinsic::x86_mmx_pinsr_w: // llvm.x86.mmx.pinsr.w
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v1i64, MVT::v1i64, MVT::i32, MVT::i32);
+ break;
+ case Intrinsic::x86_mmx_padd_q: // llvm.x86.mmx.padd.q
+ case Intrinsic::x86_mmx_pand: // llvm.x86.mmx.pand
+ case Intrinsic::x86_mmx_pandn: // llvm.x86.mmx.pandn
+ case Intrinsic::x86_mmx_por: // llvm.x86.mmx.por
case Intrinsic::x86_mmx_psll_q: // llvm.x86.mmx.psll.q
case Intrinsic::x86_mmx_psrl_q: // llvm.x86.mmx.psrl.q
+ case Intrinsic::x86_mmx_psub_q: // llvm.x86.mmx.psub.q
+ case Intrinsic::x86_mmx_pxor: // llvm.x86.mmx.pxor
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v1i64, MVT::v1i64, MVT::v1i64);
break;
- case Intrinsic::x86_ssse3_palign_r: // llvm.x86.ssse3.palign.r
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v1i64, MVT::v1i64, MVT::v1i64, MVT::i8);
- break;
case Intrinsic::x86_sse2_loadu_pd: // llvm.x86.sse2.loadu.pd
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v2f64, MVT::iPTR);
break;
+ case Intrinsic::x86_avx_maskload_pd: // llvm.x86.avx.maskload.pd
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2f64, MVT::iPTR, MVT::v2f64);
+ break;
case Intrinsic::x86_sse2_sqrt_pd: // llvm.x86.sse2.sqrt.pd
case Intrinsic::x86_sse2_sqrt_sd: // llvm.x86.sse2.sqrt.sd
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v2f64, MVT::v2f64);
@@ -3620,6 +4286,9 @@
case Intrinsic::x86_sse2_cvtsi642sd: // llvm.x86.sse2.cvtsi642sd
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2f64, MVT::v2f64, MVT::i64);
break;
+ case Intrinsic::x86_avx_vpermil_pd: // llvm.x86.avx.vpermil.pd
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2f64, MVT::v2f64, MVT::i8);
+ break;
case Intrinsic::spu_si_dfa: // llvm.spu.si.dfa
case Intrinsic::spu_si_dfm: // llvm.spu.si.dfm
case Intrinsic::spu_si_dfma: // llvm.spu.si.dfma
@@ -3652,6 +4321,9 @@
case Intrinsic::x86_sse41_blendvpd: // llvm.x86.sse41.blendvpd
VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v2f64, MVT::v2f64, MVT::v2f64, MVT::v2f64);
break;
+ case Intrinsic::x86_avx_vpermilvar_pd: // llvm.x86.avx.vpermilvar.pd
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2f64, MVT::v2f64, MVT::v2i64);
+ break;
case Intrinsic::x86_sse2_cvtss2sd: // llvm.x86.sse2.cvtss2sd
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2f64, MVT::v2f64, MVT::v4f32);
break;
@@ -3661,9 +4333,15 @@
case Intrinsic::x86_sse2_cvtps2pd: // llvm.x86.sse2.cvtps2pd
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v2f64, MVT::v4f32);
break;
+ case Intrinsic::x86_avx_vextractf128_pd_256: // llvm.x86.avx.vextractf128.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2f64, MVT::v4f64, MVT::i8);
+ break;
case Intrinsic::x86_sse2_cvtdq2pd: // llvm.x86.sse2.cvtdq2pd
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v2f64, MVT::v4i32);
break;
+ case Intrinsic::x86_mmx_vec_init_d: // llvm.x86.mmx.vec.init.d
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2i32, MVT::i32, MVT::i32);
+ break;
case Intrinsic::arm_neon_vacged: // llvm.arm.neon.vacged
case Intrinsic::arm_neon_vacgtd: // llvm.arm.neon.vacgtd
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2i32, MVT::v2f32, MVT::v2f32);
@@ -3678,6 +4356,7 @@
case Intrinsic::x86_mmx_pslli_d: // llvm.x86.mmx.pslli.d
case Intrinsic::x86_mmx_psrai_d: // llvm.x86.mmx.psrai.d
case Intrinsic::x86_mmx_psrli_d: // llvm.x86.mmx.psrli.d
+ case Intrinsic::x86_mmx_vec_ext_d: // llvm.x86.mmx.vec.ext.d
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2i32, MVT::v2i32, MVT::i32);
break;
case Intrinsic::x86_mmx_psll_d: // llvm.x86.mmx.psll.d
@@ -3685,9 +4364,13 @@
case Intrinsic::x86_mmx_psrl_d: // llvm.x86.mmx.psrl.d
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2i32, MVT::v2i32, MVT::v1i64);
break;
+ case Intrinsic::x86_mmx_padd_d: // llvm.x86.mmx.padd.d
case Intrinsic::x86_mmx_pcmpeq_d: // llvm.x86.mmx.pcmpeq.d
case Intrinsic::x86_mmx_pcmpgt_d: // llvm.x86.mmx.pcmpgt.d
case Intrinsic::x86_mmx_pmulu_dq: // llvm.x86.mmx.pmulu.dq
+ case Intrinsic::x86_mmx_psub_d: // llvm.x86.mmx.psub.d
+ case Intrinsic::x86_mmx_punpckhdq: // llvm.x86.mmx.punpckhdq
+ case Intrinsic::x86_mmx_punpckldq: // llvm.x86.mmx.punpckldq
case Intrinsic::x86_ssse3_phadd_d: // llvm.x86.ssse3.phadd.d
case Intrinsic::x86_ssse3_phsub_d: // llvm.x86.ssse3.phsub.d
case Intrinsic::x86_ssse3_psign_d: // llvm.x86.ssse3.psign.d
@@ -3710,6 +4393,9 @@
case Intrinsic::x86_sse2_psad_bw: // llvm.x86.sse2.psad.bw
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2i64, MVT::v16i8, MVT::v16i8);
break;
+ case Intrinsic::x86_aesni_aesimc: // llvm.x86.aesni.aesimc
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v2i64, MVT::v2i64);
+ break;
case Intrinsic::x86_sse2_psll_dq: // llvm.x86.sse2.psll.dq
case Intrinsic::x86_sse2_psll_dq_bs: // llvm.x86.sse2.psll.dq.bs
case Intrinsic::x86_sse2_pslli_q: // llvm.x86.sse2.pslli.q
@@ -3718,15 +4404,19 @@
case Intrinsic::x86_sse2_psrli_q: // llvm.x86.sse2.psrli.q
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2i64, MVT::v2i64, MVT::i32);
break;
+ case Intrinsic::x86_aesni_aeskeygenassist: // llvm.x86.aesni.aeskeygenassist
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2i64, MVT::v2i64, MVT::i8);
+ break;
+ case Intrinsic::x86_aesni_aesdec: // llvm.x86.aesni.aesdec
+ case Intrinsic::x86_aesni_aesdeclast: // llvm.x86.aesni.aesdeclast
+ case Intrinsic::x86_aesni_aesenc: // llvm.x86.aesni.aesenc
+ case Intrinsic::x86_aesni_aesenclast: // llvm.x86.aesni.aesenclast
case Intrinsic::x86_sse2_psll_q: // llvm.x86.sse2.psll.q
case Intrinsic::x86_sse2_psrl_q: // llvm.x86.sse2.psrl.q
case Intrinsic::x86_sse41_pcmpeqq: // llvm.x86.sse41.pcmpeqq
case Intrinsic::x86_sse42_pcmpgtq: // llvm.x86.sse42.pcmpgtq
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v2i64, MVT::v2i64, MVT::v2i64);
break;
- case Intrinsic::x86_ssse3_palign_r_128: // llvm.x86.ssse3.palign.r.128
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::i8);
- break;
case Intrinsic::x86_sse41_pmovsxdq: // llvm.x86.sse41.pmovsxdq
case Intrinsic::x86_sse41_pmovzxdq: // llvm.x86.sse41.pmovzxdq
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v2i64, MVT::v4i32);
@@ -3739,9 +4429,17 @@
case Intrinsic::x86_sse41_pmovzxwq: // llvm.x86.sse41.pmovzxwq
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v2i64, MVT::v8i16);
break;
+ case Intrinsic::x86_avx_ldu_dq_256: // llvm.x86.avx.ldu.dq.256
+ case Intrinsic::x86_avx_loadu_dq_256: // llvm.x86.avx.loadu.dq.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v32i8, MVT::iPTR);
+ break;
+ case Intrinsic::x86_avx_vbroadcastss: // llvm.x86.avx.vbroadcastss
case Intrinsic::x86_sse_loadu_ps: // llvm.x86.sse.loadu.ps
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4f32, MVT::iPTR);
break;
+ case Intrinsic::x86_avx_maskload_ps: // llvm.x86.avx.maskload.ps
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f32, MVT::iPTR, MVT::v4f32);
+ break;
case Intrinsic::x86_sse2_cvtpd2ps: // llvm.x86.sse2.cvtpd2ps
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4f32, MVT::v2f64);
break;
@@ -3768,6 +4466,9 @@
case Intrinsic::x86_sse_cvtsi642ss: // llvm.x86.sse.cvtsi642ss
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f32, MVT::v4f32, MVT::i64);
break;
+ case Intrinsic::x86_avx_vpermil_ps: // llvm.x86.avx.vpermil.ps
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f32, MVT::v4f32, MVT::i8);
+ break;
case Intrinsic::x86_sse2_cvtsd2ss: // llvm.x86.sse2.cvtsd2ss
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f32, MVT::v4f32, MVT::v2f64);
break;
@@ -3814,6 +4515,12 @@
case Intrinsic::x86_sse41_blendvps: // llvm.x86.sse41.blendvps
VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v4f32, MVT::v4f32, MVT::v4f32, MVT::v4f32);
break;
+ case Intrinsic::x86_avx_vpermilvar_ps: // llvm.x86.avx.vpermilvar.ps
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f32, MVT::v4f32, MVT::v4i32);
+ break;
+ case Intrinsic::x86_avx_cvt_pd2_ps_256: // llvm.x86.avx.cvt.pd2.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4f32, MVT::v4f64);
+ break;
case Intrinsic::x86_sse2_cvtdq2ps: // llvm.x86.sse2.cvtdq2ps
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4f32, MVT::v4i32);
break;
@@ -3821,6 +4528,58 @@
case Intrinsic::ppc_altivec_vcfux: // llvm.ppc.altivec.vcfux
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f32, MVT::v4i32, MVT::i32);
break;
+ case Intrinsic::x86_avx_vextractf128_ps_256: // llvm.x86.avx.vextractf128.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f32, MVT::v8f32, MVT::i8);
+ break;
+ case Intrinsic::x86_avx_loadu_pd_256: // llvm.x86.avx.loadu.pd.256
+ case Intrinsic::x86_avx_vbroadcast_sd_256: // llvm.x86.avx.vbroadcast.sd.256
+ case Intrinsic::x86_avx_vbroadcastf128_pd_256: // llvm.x86.avx.vbroadcastf128.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4f64, MVT::iPTR);
+ break;
+ case Intrinsic::x86_avx_maskload_pd_256: // llvm.x86.avx.maskload.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f64, MVT::iPTR, MVT::v4f64);
+ break;
+ case Intrinsic::x86_avx_cvt_ps2_pd_256: // llvm.x86.avx.cvt.ps2.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4f64, MVT::v4f32);
+ break;
+ case Intrinsic::x86_avx_sqrt_pd_256: // llvm.x86.avx.sqrt.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4f64, MVT::v4f64);
+ break;
+ case Intrinsic::x86_avx_round_pd_256: // llvm.x86.avx.round.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f64, MVT::v4f64, MVT::i32);
+ break;
+ case Intrinsic::x86_avx_vpermil_pd_256: // llvm.x86.avx.vpermil.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f64, MVT::v4f64, MVT::i8);
+ break;
+ case Intrinsic::x86_avx_vinsertf128_pd_256: // llvm.x86.avx.vinsertf128.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v4f64, MVT::v4f64, MVT::v2f64, MVT::i8);
+ break;
+ case Intrinsic::x86_avx_addsub_pd_256: // llvm.x86.avx.addsub.pd.256
+ case Intrinsic::x86_avx_hadd_pd_256: // llvm.x86.avx.hadd.pd.256
+ case Intrinsic::x86_avx_hsub_pd_256: // llvm.x86.avx.hsub.pd.256
+ case Intrinsic::x86_avx_max_pd_256: // llvm.x86.avx.max.pd.256
+ case Intrinsic::x86_avx_min_pd_256: // llvm.x86.avx.min.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f64, MVT::v4f64, MVT::v4f64);
+ break;
+ case Intrinsic::x86_avx_blend_pd_256: // llvm.x86.avx.blend.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v4f64, MVT::v4f64, MVT::v4f64, MVT::i32);
+ break;
+ case Intrinsic::x86_avx_cmp_pd_256: // llvm.x86.avx.cmp.pd.256
+ case Intrinsic::x86_avx_vperm2f128_pd_256: // llvm.x86.avx.vperm2f128.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v4f64, MVT::v4f64, MVT::v4f64, MVT::i8);
+ break;
+ case Intrinsic::x86_avx_blendv_pd_256: // llvm.x86.avx.blendv.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v4f64, MVT::v4f64, MVT::v4f64, MVT::v4f64);
+ break;
+ case Intrinsic::x86_avx_vpermilvar_pd_256: // llvm.x86.avx.vpermilvar.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4f64, MVT::v4f64, MVT::v4i64);
+ break;
+ case Intrinsic::x86_avx_cvtdq2_pd_256: // llvm.x86.avx.cvtdq2.pd.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4f64, MVT::v4i32);
+ break;
+ case Intrinsic::x86_mmx_vec_init_w: // llvm.x86.mmx.vec.init.w
+ VerifyIntrinsicPrototype(ID, IF, 1, 4, MVT::v4i16, MVT::i16, MVT::i16, MVT::i16, MVT::i16);
+ break;
case Intrinsic::x86_mmx_packssdw: // llvm.x86.mmx.packssdw
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4i16, MVT::v2i32, MVT::v2i32);
break;
@@ -3830,6 +4589,7 @@
case Intrinsic::x86_mmx_pslli_w: // llvm.x86.mmx.pslli.w
case Intrinsic::x86_mmx_psrai_w: // llvm.x86.mmx.psrai.w
case Intrinsic::x86_mmx_psrli_w: // llvm.x86.mmx.psrli.w
+ case Intrinsic::x86_ssse3_pshuf_w: // llvm.x86.ssse3.pshuf.w
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4i16, MVT::v4i16, MVT::i32);
break;
case Intrinsic::x86_mmx_psll_w: // llvm.x86.mmx.psll.w
@@ -3837,6 +4597,7 @@
case Intrinsic::x86_mmx_psrl_w: // llvm.x86.mmx.psrl.w
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4i16, MVT::v4i16, MVT::v1i64);
break;
+ case Intrinsic::x86_mmx_padd_w: // llvm.x86.mmx.padd.w
case Intrinsic::x86_mmx_padds_w: // llvm.x86.mmx.padds.w
case Intrinsic::x86_mmx_paddus_w: // llvm.x86.mmx.paddus.w
case Intrinsic::x86_mmx_pavg_w: // llvm.x86.mmx.pavg.w
@@ -3846,8 +4607,12 @@
case Intrinsic::x86_mmx_pmins_w: // llvm.x86.mmx.pmins.w
case Intrinsic::x86_mmx_pmulh_w: // llvm.x86.mmx.pmulh.w
case Intrinsic::x86_mmx_pmulhu_w: // llvm.x86.mmx.pmulhu.w
+ case Intrinsic::x86_mmx_pmull_w: // llvm.x86.mmx.pmull.w
+ case Intrinsic::x86_mmx_psub_w: // llvm.x86.mmx.psub.w
case Intrinsic::x86_mmx_psubs_w: // llvm.x86.mmx.psubs.w
case Intrinsic::x86_mmx_psubus_w: // llvm.x86.mmx.psubus.w
+ case Intrinsic::x86_mmx_punpckhwd: // llvm.x86.mmx.punpckhwd
+ case Intrinsic::x86_mmx_punpcklwd: // llvm.x86.mmx.punpcklwd
case Intrinsic::x86_ssse3_phadd_sw: // llvm.x86.ssse3.phadd.sw
case Intrinsic::x86_ssse3_phadd_w: // llvm.x86.ssse3.phadd.w
case Intrinsic::x86_ssse3_phsub_sw: // llvm.x86.ssse3.phsub.sw
@@ -3897,6 +4662,10 @@
case Intrinsic::ppc_altivec_vcmpgtfp: // llvm.ppc.altivec.vcmpgtfp
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4i32, MVT::v4f32, MVT::v4f32);
break;
+ case Intrinsic::x86_avx_cvt_pd2dq_256: // llvm.x86.avx.cvt.pd2dq.256
+ case Intrinsic::x86_avx_cvtt_pd2dq_256: // llvm.x86.avx.cvtt.pd2dq.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4i32, MVT::v4f64);
+ break;
case Intrinsic::x86_ssse3_pabs_d_128: // llvm.x86.ssse3.pabs.d.128
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v4i32, MVT::v4i32);
break;
@@ -3970,7 +4739,6 @@
case Intrinsic::x86_sse41_pmaxud: // llvm.x86.sse41.pmaxud
case Intrinsic::x86_sse41_pminsd: // llvm.x86.sse41.pminsd
case Intrinsic::x86_sse41_pminud: // llvm.x86.sse41.pminud
- case Intrinsic::x86_sse41_pmulld: // llvm.x86.sse41.pmulld
case Intrinsic::x86_ssse3_phadd_d_128: // llvm.x86.ssse3.phadd.d.128
case Intrinsic::x86_ssse3_phadd_sw_128: // llvm.x86.ssse3.phadd.sw.128
case Intrinsic::x86_ssse3_phsub_d_128: // llvm.x86.ssse3.phsub.d.128
@@ -4024,6 +4792,55 @@
case Intrinsic::spu_si_mpya: // llvm.spu.si.mpya
VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v4i32, MVT::v8i16, MVT::v8i16, MVT::v8i16);
break;
+ case Intrinsic::x86_avx_vextractf128_si_256: // llvm.x86.avx.vextractf128.si.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v4i32, MVT::v8i32, MVT::i8);
+ break;
+ case Intrinsic::x86_avx_loadu_ps_256: // llvm.x86.avx.loadu.ps.256
+ case Intrinsic::x86_avx_vbroadcastf128_ps_256: // llvm.x86.avx.vbroadcastf128.ps.256
+ case Intrinsic::x86_avx_vbroadcastss_256: // llvm.x86.avx.vbroadcastss.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v8f32, MVT::iPTR);
+ break;
+ case Intrinsic::x86_avx_maskload_ps_256: // llvm.x86.avx.maskload.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v8f32, MVT::iPTR, MVT::v8f32);
+ break;
+ case Intrinsic::x86_avx_rcp_ps_256: // llvm.x86.avx.rcp.ps.256
+ case Intrinsic::x86_avx_rsqrt_ps_256: // llvm.x86.avx.rsqrt.ps.256
+ case Intrinsic::x86_avx_sqrt_ps_256: // llvm.x86.avx.sqrt.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v8f32, MVT::v8f32);
+ break;
+ case Intrinsic::x86_avx_round_ps_256: // llvm.x86.avx.round.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v8f32, MVT::v8f32, MVT::i32);
+ break;
+ case Intrinsic::x86_avx_vpermil_ps_256: // llvm.x86.avx.vpermil.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v8f32, MVT::v8f32, MVT::i8);
+ break;
+ case Intrinsic::x86_avx_vinsertf128_ps_256: // llvm.x86.avx.vinsertf128.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v8f32, MVT::v8f32, MVT::v4f32, MVT::i8);
+ break;
+ case Intrinsic::x86_avx_addsub_ps_256: // llvm.x86.avx.addsub.ps.256
+ case Intrinsic::x86_avx_hadd_ps_256: // llvm.x86.avx.hadd.ps.256
+ case Intrinsic::x86_avx_hsub_ps_256: // llvm.x86.avx.hsub.ps.256
+ case Intrinsic::x86_avx_max_ps_256: // llvm.x86.avx.max.ps.256
+ case Intrinsic::x86_avx_min_ps_256: // llvm.x86.avx.min.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v8f32, MVT::v8f32, MVT::v8f32);
+ break;
+ case Intrinsic::x86_avx_blend_ps_256: // llvm.x86.avx.blend.ps.256
+ case Intrinsic::x86_avx_dp_ps_256: // llvm.x86.avx.dp.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v8f32, MVT::v8f32, MVT::v8f32, MVT::i32);
+ break;
+ case Intrinsic::x86_avx_cmp_ps_256: // llvm.x86.avx.cmp.ps.256
+ case Intrinsic::x86_avx_vperm2f128_ps_256: // llvm.x86.avx.vperm2f128.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v8f32, MVT::v8f32, MVT::v8f32, MVT::i8);
+ break;
+ case Intrinsic::x86_avx_blendv_ps_256: // llvm.x86.avx.blendv.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v8f32, MVT::v8f32, MVT::v8f32, MVT::v8f32);
+ break;
+ case Intrinsic::x86_avx_vpermilvar_ps_256: // llvm.x86.avx.vpermilvar.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v8f32, MVT::v8f32, MVT::v8i32);
+ break;
+ case Intrinsic::x86_avx_cvtdq2_ps_256: // llvm.x86.avx.cvtdq2.ps.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v8f32, MVT::v8i32);
+ break;
case Intrinsic::ppc_altivec_mfvscr: // llvm.ppc.altivec.mfvscr
VerifyIntrinsicPrototype(ID, IF, 1, 0, MVT::v8i16);
break;
@@ -4125,6 +4942,19 @@
case Intrinsic::ppc_altivec_vmladduhm: // llvm.ppc.altivec.vmladduhm
VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v8i16, MVT::v8i16, MVT::v8i16, MVT::v8i16);
break;
+ case Intrinsic::x86_avx_cvt_ps2dq_256: // llvm.x86.avx.cvt.ps2dq.256
+ case Intrinsic::x86_avx_cvtt_ps2dq_256: // llvm.x86.avx.cvtt.ps2dq.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v8i32, MVT::v8f32);
+ break;
+ case Intrinsic::x86_avx_vinsertf128_si_256: // llvm.x86.avx.vinsertf128.si.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v8i32, MVT::v8i32, MVT::v4i32, MVT::i8);
+ break;
+ case Intrinsic::x86_avx_vperm2f128_si_256: // llvm.x86.avx.vperm2f128.si.256
+ VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::v8i32, MVT::v8i32, MVT::v8i32, MVT::i8);
+ break;
+ case Intrinsic::x86_mmx_vec_init_b: // llvm.x86.mmx.vec.init.b
+ VerifyIntrinsicPrototype(ID, IF, 1, 8, MVT::v8i8, MVT::i8, MVT::i8, MVT::i8, MVT::i8, MVT::i8, MVT::i8, MVT::i8, MVT::i8);
+ break;
case Intrinsic::x86_mmx_packsswb: // llvm.x86.mmx.packsswb
case Intrinsic::x86_mmx_packuswb: // llvm.x86.mmx.packuswb
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v8i8, MVT::v4i16, MVT::v4i16);
@@ -4133,6 +4963,7 @@
VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::v8i8, MVT::v8i8);
break;
case Intrinsic::arm_neon_vtbl1: // llvm.arm.neon.vtbl1
+ case Intrinsic::x86_mmx_padd_b: // llvm.x86.mmx.padd.b
case Intrinsic::x86_mmx_padds_b: // llvm.x86.mmx.padds.b
case Intrinsic::x86_mmx_paddus_b: // llvm.x86.mmx.paddus.b
case Intrinsic::x86_mmx_pavg_b: // llvm.x86.mmx.pavg.b
@@ -4140,8 +4971,11 @@
case Intrinsic::x86_mmx_pcmpgt_b: // llvm.x86.mmx.pcmpgt.b
case Intrinsic::x86_mmx_pmaxu_b: // llvm.x86.mmx.pmaxu.b
case Intrinsic::x86_mmx_pminu_b: // llvm.x86.mmx.pminu.b
+ case Intrinsic::x86_mmx_psub_b: // llvm.x86.mmx.psub.b
case Intrinsic::x86_mmx_psubs_b: // llvm.x86.mmx.psubs.b
case Intrinsic::x86_mmx_psubus_b: // llvm.x86.mmx.psubus.b
+ case Intrinsic::x86_mmx_punpckhbw: // llvm.x86.mmx.punpckhbw
+ case Intrinsic::x86_mmx_punpcklbw: // llvm.x86.mmx.punpcklbw
case Intrinsic::x86_ssse3_pshuf_b: // llvm.x86.ssse3.pshuf.b
case Intrinsic::x86_ssse3_psign_b: // llvm.x86.ssse3.psign.b
VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::v8i8, MVT::v8i8, MVT::v8i8);
@@ -4161,44 +4995,95 @@
case Intrinsic::arm_neon_vtbx4: // llvm.arm.neon.vtbx4
VerifyIntrinsicPrototype(ID, IF, 1, 6, MVT::v8i8, MVT::v8i8, MVT::v8i8, MVT::v8i8, MVT::v8i8, MVT::v8i8, MVT::v8i8);
break;
+ }
+#endif
+
+// Code for generating Intrinsic function declarations.
+#ifdef GET_INTRINSIC_GENERATOR
+ switch (id) {
+ default: assert(0 && "Invalid intrinsic!");
case Intrinsic::eh_unwind_init: // llvm.eh.unwind.init
case Intrinsic::ppc_altivec_dssall: // llvm.ppc.altivec.dssall
case Intrinsic::ppc_sync: // llvm.ppc.sync
case Intrinsic::trap: // llvm.trap
+ case Intrinsic::x86_avx_vzeroall: // llvm.x86.avx.vzeroall
+ case Intrinsic::x86_avx_vzeroupper: // llvm.x86.avx.vzeroupper
case Intrinsic::x86_mmx_emms: // llvm.x86.mmx.emms
case Intrinsic::x86_mmx_femms: // llvm.x86.mmx.femms
case Intrinsic::x86_sse2_lfence: // llvm.x86.sse2.lfence
case Intrinsic::x86_sse2_mfence: // llvm.x86.sse2.mfence
case Intrinsic::x86_sse_sfence: // llvm.x86.sse.sfence
- VerifyIntrinsicPrototype(ID, IF, 1, 0, MVT::isVoid);
+ ResultTy = Type::getVoidTy(Context);
+ break;
+ case Intrinsic::memcpy: // llvm.memcpy
+ case Intrinsic::memmove: // llvm.memmove
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back((0 < numTys) ? Tys[0] : PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back((1 < numTys) ? Tys[1] : PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(Tys[2]);
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 1));
+ break;
+ case Intrinsic::memset: // llvm.memset
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back((0 < numTys) ? Tys[0] : PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ ArgTys.push_back(Tys[1]);
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 1));
break;
case Intrinsic::invariant_end: // llvm.invariant.end
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::isVoid, MVT::iPTR, MVT::i64, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(StructType::get(Context)));
+ ArgTys.push_back(IntegerType::get(Context, 64));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
case Intrinsic::memory_barrier: // llvm.memory.barrier
- VerifyIntrinsicPrototype(ID, IF, 1, 5, MVT::isVoid, MVT::i1, MVT::i1, MVT::i1, MVT::i1, MVT::i1);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(IntegerType::get(Context, 1));
+ ArgTys.push_back(IntegerType::get(Context, 1));
+ ArgTys.push_back(IntegerType::get(Context, 1));
+ ArgTys.push_back(IntegerType::get(Context, 1));
+ ArgTys.push_back(IntegerType::get(Context, 1));
break;
+ case Intrinsic::arm_set_fpscr: // llvm.arm.set.fpscr
case Intrinsic::eh_sjlj_callsite: // llvm.eh.sjlj.callsite
case Intrinsic::pcmarker: // llvm.pcmarker
case Intrinsic::ppc_altivec_dss: // llvm.ppc.altivec.dss
- VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::isVoid, MVT::i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::x86_sse3_mwait: // llvm.x86.sse3.mwait
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::i32, MVT::i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::eh_return_i32: // llvm.eh.return.i32
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::i32, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
case Intrinsic::eh_return_i64: // llvm.eh.return.i64
case Intrinsic::lifetime_end: // llvm.lifetime.end
case Intrinsic::lifetime_start: // llvm.lifetime.start
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::i64, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(IntegerType::get(Context, 64));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ break;
+ case Intrinsic::x86_int: // llvm.x86.int
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(IntegerType::get(Context, 8));
break;
case Intrinsic::dbg_value: // llvm.dbg.value
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::isVoid, MVT::Metadata, MVT::i64, MVT::Metadata);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(Type::getMetadataTy(Context));
+ ArgTys.push_back(IntegerType::get(Context, 64));
+ ArgTys.push_back(Type::getMetadataTy(Context));
break;
case Intrinsic::dbg_declare: // llvm.dbg.declare
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::Metadata, MVT::Metadata);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(Type::getMetadataTy(Context));
+ ArgTys.push_back(Type::getMetadataTy(Context));
break;
case Intrinsic::eh_sjlj_longjmp: // llvm.eh.sjlj.longjmp
case Intrinsic::ppc_dcba: // llvm.ppc.dcba
@@ -4215,33 +5100,72 @@
case Intrinsic::x86_sse2_clflush: // llvm.x86.sse2.clflush
case Intrinsic::x86_sse_ldmxcsr: // llvm.x86.sse.ldmxcsr
case Intrinsic::x86_sse_stmxcsr: // llvm.x86.sse.stmxcsr
- VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::isVoid, MVT::iPTR);
- break;
- case Intrinsic::arm_neon_vst1: // llvm.arm.neon.vst1
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::vAny);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
case Intrinsic::arm_neon_vst2: // llvm.arm.neon.vst2
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::isVoid, MVT::iPTR, MVT::vAny, ~2);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vst3: // llvm.arm.neon.vst3
- VerifyIntrinsicPrototype(ID, IF, 1, 4, MVT::isVoid, MVT::iPTR, MVT::vAny, ~2, ~2);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vst4: // llvm.arm.neon.vst4
- VerifyIntrinsicPrototype(ID, IF, 1, 5, MVT::isVoid, MVT::iPTR, MVT::vAny, ~2, ~2, ~2);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vst2lane: // llvm.arm.neon.vst2lane
- VerifyIntrinsicPrototype(ID, IF, 1, 4, MVT::isVoid, MVT::iPTR, MVT::vAny, ~2, MVT::i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vst3lane: // llvm.arm.neon.vst3lane
- VerifyIntrinsicPrototype(ID, IF, 1, 5, MVT::isVoid, MVT::iPTR, MVT::vAny, ~2, ~2, MVT::i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vst4lane: // llvm.arm.neon.vst4lane
- VerifyIntrinsicPrototype(ID, IF, 1, 6, MVT::isVoid, MVT::iPTR, MVT::vAny, ~2, ~2, ~2, MVT::i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ break;
+ case Intrinsic::arm_neon_vst1: // llvm.arm.neon.vst1
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::longjmp: // llvm.longjmp
case Intrinsic::siglongjmp: // llvm.siglongjmp
case Intrinsic::x86_sse2_movnt_i: // llvm.x86.sse2.movnt.i
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::ppc_altivec_dst: // llvm.ppc.altivec.dst
case Intrinsic::ppc_altivec_dstst: // llvm.ppc.altivec.dstst
@@ -4249,77 +5173,150 @@
case Intrinsic::ppc_altivec_dstt: // llvm.ppc.altivec.dstt
case Intrinsic::prefetch: // llvm.prefetch
case Intrinsic::x86_sse3_monitor: // llvm.x86.sse3.monitor
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::isVoid, MVT::iPTR, MVT::i32, MVT::i32);
- break;
- case Intrinsic::memset: // llvm.memset
- VerifyIntrinsicPrototype(ID, IF, 1, 4, MVT::isVoid, MVT::iPTR, MVT::i8, MVT::iAny, MVT::i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::vacopy: // llvm.va_copy
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::iPTR);
- break;
- case Intrinsic::memcpy: // llvm.memcpy
- case Intrinsic::memmove: // llvm.memmove
- VerifyIntrinsicPrototype(ID, IF, 1, 4, MVT::isVoid, MVT::iPTR, MVT::iPTR, MVT::iAny, MVT::i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
case Intrinsic::var_annotation: // llvm.var.annotation
- VerifyIntrinsicPrototype(ID, IF, 1, 4, MVT::isVoid, MVT::iPTR, MVT::iPTR, MVT::iPTR, MVT::i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::gcwrite: // llvm.gcwrite
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::isVoid, MVT::iPTR, MVT::iPTR, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(PointerType::getUnqual(PointerType::getUnqual(IntegerType::get(Context, 8))));
break;
case Intrinsic::stackprotector: // llvm.stackprotector
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(PointerType::getUnqual(PointerType::getUnqual(IntegerType::get(Context, 8))));
break;
case Intrinsic::x86_sse2_storeu_dq: // llvm.x86.sse2.storeu.dq
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::v16i8);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
break;
case Intrinsic::x86_mmx_movnt_dq: // llvm.x86.mmx.movnt.dq
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::v1i64);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
break;
case Intrinsic::x86_sse2_movnt_pd: // llvm.x86.sse2.movnt.pd
case Intrinsic::x86_sse2_storeu_pd: // llvm.x86.sse2.storeu.pd
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::v2f64);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
+ break;
+ case Intrinsic::x86_avx_maskstore_pd: // llvm.x86.avx.maskstore.pd
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
break;
case Intrinsic::x86_sse2_movnt_dq: // llvm.x86.sse2.movnt.dq
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::v2i64);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
+ break;
+ case Intrinsic::x86_avx_storeu_dq_256: // llvm.x86.avx.storeu.dq.256
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 32));
break;
case Intrinsic::x86_sse_movnt_ps: // llvm.x86.sse.movnt.ps
case Intrinsic::x86_sse_storeu_ps: // llvm.x86.sse.storeu.ps
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::v4f32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ break;
+ case Intrinsic::x86_avx_maskstore_ps: // llvm.x86.avx.maskstore.ps
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ break;
+ case Intrinsic::x86_avx_movnt_pd_256: // llvm.x86.avx.movnt.pd.256
+ case Intrinsic::x86_avx_storeu_pd_256: // llvm.x86.avx.storeu.pd.256
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ break;
+ case Intrinsic::x86_avx_maskstore_pd_256: // llvm.x86.avx.maskstore.pd.256
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
break;
case Intrinsic::x86_sse2_storel_dq: // llvm.x86.sse2.storel.dq
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::v4i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
+ break;
+ case Intrinsic::x86_avx_movnt_dq_256: // llvm.x86.avx.movnt.dq.256
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 4));
+ break;
+ case Intrinsic::x86_avx_movnt_ps_256: // llvm.x86.avx.movnt.ps.256
+ case Intrinsic::x86_avx_storeu_ps_256: // llvm.x86.avx.storeu.ps.256
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ break;
+ case Intrinsic::x86_avx_maskstore_ps_256: // llvm.x86.avx.maskstore.ps.256
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
break;
case Intrinsic::gcroot: // llvm.gcroot
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::iPTR, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(PointerType::getUnqual(PointerType::getUnqual(IntegerType::get(Context, 8))));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
case Intrinsic::ppc_altivec_stvebx: // llvm.ppc.altivec.stvebx
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::v16i8, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
case Intrinsic::x86_sse2_maskmov_dqu: // llvm.x86.sse2.maskmov.dqu
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::isVoid, MVT::v16i8, MVT::v16i8, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
case Intrinsic::ppc_altivec_mtvscr: // llvm.ppc.altivec.mtvscr
- VerifyIntrinsicPrototype(ID, IF, 1, 1, MVT::isVoid, MVT::v4i32);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
break;
case Intrinsic::ppc_altivec_stvewx: // llvm.ppc.altivec.stvewx
case Intrinsic::ppc_altivec_stvx: // llvm.ppc.altivec.stvx
case Intrinsic::ppc_altivec_stvxl: // llvm.ppc.altivec.stvxl
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::v4i32, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
case Intrinsic::ppc_altivec_stvehx: // llvm.ppc.altivec.stvehx
- VerifyIntrinsicPrototype(ID, IF, 1, 2, MVT::isVoid, MVT::v8i16, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 8));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
case Intrinsic::x86_mmx_maskmovq: // llvm.x86.mmx.maskmovq
- VerifyIntrinsicPrototype(ID, IF, 1, 3, MVT::isVoid, MVT::v8i8, MVT::v8i8, MVT::iPTR);
+ ResultTy = Type::getVoidTy(Context);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 8));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 8));
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
- }
-#endif
-
-// Code for generating Intrinsic function declarations.
-#ifdef GET_INTRINSIC_GENERATOR
- switch (id) {
- default: assert(0 && "Invalid intrinsic!");
case Intrinsic::ptr_annotation: // llvm.ptr.annotation
ResultTy = (0 < numTys) ? Tys[0] : PointerType::getUnqual(Tys[0]);
ArgTys.push_back(Tys[0]);
@@ -4529,28 +5526,6 @@
ArgTys.push_back(Tys[0]);
ArgTys.push_back(Tys[0]);
break;
- case Intrinsic::arm_neon_vaddws: // llvm.arm.neon.vaddws
- case Intrinsic::arm_neon_vaddwu: // llvm.arm.neon.vaddwu
- case Intrinsic::arm_neon_vsubws: // llvm.arm.neon.vsubws
- case Intrinsic::arm_neon_vsubwu: // llvm.arm.neon.vsubwu
- ResultTy = Tys[0];
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(VectorType::getTruncatedElementVectorType(dyn_cast<VectorType>(Tys[0])));
- break;
- case Intrinsic::arm_neon_vabas: // llvm.arm.neon.vabas
- case Intrinsic::arm_neon_vabau: // llvm.arm.neon.vabau
- case Intrinsic::arm_neon_vshiftins: // llvm.arm.neon.vshiftins
- ResultTy = Tys[0];
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- break;
- case Intrinsic::arm_neon_vabals: // llvm.arm.neon.vabals
- case Intrinsic::arm_neon_vabalu: // llvm.arm.neon.vabalu
- case Intrinsic::arm_neon_vmlals: // llvm.arm.neon.vmlals
- case Intrinsic::arm_neon_vmlalu: // llvm.arm.neon.vmlalu
- case Intrinsic::arm_neon_vmlsls: // llvm.arm.neon.vmlsls
- case Intrinsic::arm_neon_vmlslu: // llvm.arm.neon.vmlslu
case Intrinsic::arm_neon_vqdmlal: // llvm.arm.neon.vqdmlal
case Intrinsic::arm_neon_vqdmlsl: // llvm.arm.neon.vqdmlsl
ResultTy = Tys[0];
@@ -4579,18 +5554,12 @@
ResultTy = Tys[0];
ArgTys.push_back(Tys[0]);
break;
- case Intrinsic::arm_neon_vmovn: // llvm.arm.neon.vmovn
case Intrinsic::arm_neon_vqmovns: // llvm.arm.neon.vqmovns
case Intrinsic::arm_neon_vqmovnsu: // llvm.arm.neon.vqmovnsu
case Intrinsic::arm_neon_vqmovnu: // llvm.arm.neon.vqmovnu
ResultTy = Tys[0];
ArgTys.push_back(VectorType::getExtendedElementVectorType(dyn_cast<VectorType>(Tys[0])));
break;
- case Intrinsic::arm_neon_vmovls: // llvm.arm.neon.vmovls
- case Intrinsic::arm_neon_vmovlu: // llvm.arm.neon.vmovlu
- ResultTy = Tys[0];
- ArgTys.push_back(VectorType::getTruncatedElementVectorType(dyn_cast<VectorType>(Tys[0])));
- break;
case Intrinsic::arm_neon_vabds: // llvm.arm.neon.vabds
case Intrinsic::arm_neon_vabdu: // llvm.arm.neon.vabdu
case Intrinsic::arm_neon_vhadds: // llvm.arm.neon.vhadds
@@ -4646,22 +5615,20 @@
ArgTys.push_back(VectorType::getExtendedElementVectorType(dyn_cast<VectorType>(Tys[0])));
ArgTys.push_back(VectorType::getExtendedElementVectorType(dyn_cast<VectorType>(Tys[0])));
break;
- case Intrinsic::arm_neon_vabdls: // llvm.arm.neon.vabdls
- case Intrinsic::arm_neon_vabdlu: // llvm.arm.neon.vabdlu
- case Intrinsic::arm_neon_vaddls: // llvm.arm.neon.vaddls
- case Intrinsic::arm_neon_vaddlu: // llvm.arm.neon.vaddlu
case Intrinsic::arm_neon_vmullp: // llvm.arm.neon.vmullp
- case Intrinsic::arm_neon_vmulls: // llvm.arm.neon.vmulls
- case Intrinsic::arm_neon_vmullu: // llvm.arm.neon.vmullu
case Intrinsic::arm_neon_vqdmull: // llvm.arm.neon.vqdmull
case Intrinsic::arm_neon_vshiftls: // llvm.arm.neon.vshiftls
case Intrinsic::arm_neon_vshiftlu: // llvm.arm.neon.vshiftlu
- case Intrinsic::arm_neon_vsubls: // llvm.arm.neon.vsubls
- case Intrinsic::arm_neon_vsublu: // llvm.arm.neon.vsublu
ResultTy = Tys[0];
ArgTys.push_back(VectorType::getTruncatedElementVectorType(dyn_cast<VectorType>(Tys[0])));
ArgTys.push_back(VectorType::getTruncatedElementVectorType(dyn_cast<VectorType>(Tys[0])));
break;
+ case Intrinsic::arm_neon_vshiftins: // llvm.arm.neon.vshiftins
+ ResultTy = Tys[0];
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ ArgTys.push_back(Tys[0]);
+ break;
case Intrinsic::arm_neon_vpaddls: // llvm.arm.neon.vpaddls
case Intrinsic::arm_neon_vpaddlu: // llvm.arm.neon.vpaddlu
ResultTy = Tys[0];
@@ -4670,18 +5637,22 @@
case Intrinsic::arm_neon_vld1: // llvm.arm.neon.vld1
ResultTy = Tys[0];
ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vld2: // llvm.arm.neon.vld2
ResultTy = StructType::get(Context, Tys[0], Tys[0], NULL);
ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vld3: // llvm.arm.neon.vld3
ResultTy = StructType::get(Context, Tys[0], Tys[0], Tys[0], NULL);
ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vld4: // llvm.arm.neon.vld4
ResultTy = StructType::get(Context, Tys[0], Tys[0], Tys[0], Tys[0], NULL);
ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vld2lane: // llvm.arm.neon.vld2lane
ResultTy = StructType::get(Context, Tys[0], Tys[0], NULL);
@@ -4689,6 +5660,7 @@
ArgTys.push_back(Tys[0]);
ArgTys.push_back(Tys[0]);
ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vld3lane: // llvm.arm.neon.vld3lane
ResultTy = StructType::get(Context, Tys[0], Tys[0], Tys[0], NULL);
@@ -4697,6 +5669,7 @@
ArgTys.push_back(Tys[0]);
ArgTys.push_back(Tys[0]);
ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::arm_neon_vld4lane: // llvm.arm.neon.vld4lane
ResultTy = StructType::get(Context, Tys[0], Tys[0], Tys[0], Tys[0], NULL);
@@ -4706,12 +5679,27 @@
ArgTys.push_back(Tys[0]);
ArgTys.push_back(Tys[0]);
ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
case Intrinsic::invariant_start: // llvm.invariant.start
ResultTy = PointerType::getUnqual(StructType::get(Context));
ArgTys.push_back(IntegerType::get(Context, 64));
ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
+ case Intrinsic::arm_vcvtr: // llvm.arm.vcvtr
+ case Intrinsic::arm_vcvtru: // llvm.arm.vcvtru
+ ResultTy = Type::getFloatTy(Context);
+ ArgTys.push_back(Tys[0]);
+ break;
+ case Intrinsic::convert_from_fp16: // llvm.convert.from.fp16
+ ResultTy = Type::getFloatTy(Context);
+ ArgTys.push_back(IntegerType::get(Context, 16));
+ break;
+ case Intrinsic::convert_to_fp16: // llvm.convert.to.fp16
+ ResultTy = IntegerType::get(Context, 16);
+ ArgTys.push_back(Type::getFloatTy(Context));
+ break;
+ case Intrinsic::arm_get_fpscr: // llvm.arm.get.fpscr
case Intrinsic::flt_rounds: // llvm.flt.rounds
case Intrinsic::xcore_getid: // llvm.xcore.getid
ResultTy = IntegerType::get(Context, 32);
@@ -4725,6 +5713,10 @@
ArgTys.push_back(IntegerType::get(Context, 32));
ArgTys.push_back(IntegerType::get(Context, 16));
break;
+ case Intrinsic::arm_qadd: // llvm.arm.qadd
+ case Intrinsic::arm_qsub: // llvm.arm.qsub
+ case Intrinsic::arm_ssat: // llvm.arm.ssat
+ case Intrinsic::arm_usat: // llvm.arm.usat
case Intrinsic::x86_sse42_crc32_32: // llvm.x86.sse42.crc32.32
ResultTy = IntegerType::get(Context, 32);
ArgTys.push_back(IntegerType::get(Context, 32));
@@ -4818,12 +5810,24 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
ArgTys.push_back(IntegerType::get(Context, 8));
break;
+ case Intrinsic::x86_mmx_cvtsi64_si32: // llvm.x86.mmx.cvtsi64.si32
+ ResultTy = IntegerType::get(Context, 32);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
+ break;
+ case Intrinsic::x86_mmx_pextr_w: // llvm.x86.mmx.pextr.w
+ ResultTy = IntegerType::get(Context, 32);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ break;
case Intrinsic::x86_sse2_cvtsd2si: // llvm.x86.sse2.cvtsd2si
case Intrinsic::x86_sse2_cvttsd2si: // llvm.x86.sse2.cvttsd2si
case Intrinsic::x86_sse2_movmsk_pd: // llvm.x86.sse2.movmsk.pd
ResultTy = IntegerType::get(Context, 32);
ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
break;
+ case Intrinsic::x86_avx_vtestc_pd: // llvm.x86.avx.vtestc.pd
+ case Intrinsic::x86_avx_vtestnzc_pd: // llvm.x86.avx.vtestnzc.pd
+ case Intrinsic::x86_avx_vtestz_pd: // llvm.x86.avx.vtestz.pd
case Intrinsic::x86_sse2_comieq_sd: // llvm.x86.sse2.comieq.sd
case Intrinsic::x86_sse2_comige_sd: // llvm.x86.sse2.comige.sd
case Intrinsic::x86_sse2_comigt_sd: // llvm.x86.sse2.comigt.sd
@@ -4851,6 +5855,9 @@
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
ArgTys.push_back(IntegerType::get(Context, 32));
break;
+ case Intrinsic::x86_avx_vtestc_ps: // llvm.x86.avx.vtestc.ps
+ case Intrinsic::x86_avx_vtestnzc_ps: // llvm.x86.avx.vtestnzc.ps
+ case Intrinsic::x86_avx_vtestz_ps: // llvm.x86.avx.vtestz.ps
case Intrinsic::x86_sse41_ptestc: // llvm.x86.sse41.ptestc
case Intrinsic::x86_sse41_ptestnzc: // llvm.x86.sse41.ptestnzc
case Intrinsic::x86_sse41_ptestz: // llvm.x86.sse41.ptestz
@@ -4870,11 +5877,40 @@
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
break;
+ case Intrinsic::x86_avx_movmsk_pd_256: // llvm.x86.avx.movmsk.pd.256
+ ResultTy = IntegerType::get(Context, 32);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ break;
+ case Intrinsic::x86_avx_vtestc_pd_256: // llvm.x86.avx.vtestc.pd.256
+ case Intrinsic::x86_avx_vtestnzc_pd_256: // llvm.x86.avx.vtestnzc.pd.256
+ case Intrinsic::x86_avx_vtestz_pd_256: // llvm.x86.avx.vtestz.pd.256
+ ResultTy = IntegerType::get(Context, 32);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ break;
case Intrinsic::x86_sse41_pextrd: // llvm.x86.sse41.pextrd
ResultTy = IntegerType::get(Context, 32);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
ArgTys.push_back(IntegerType::get(Context, 32));
break;
+ case Intrinsic::x86_avx_ptestc_256: // llvm.x86.avx.ptestc.256
+ case Intrinsic::x86_avx_ptestnzc_256: // llvm.x86.avx.ptestnzc.256
+ case Intrinsic::x86_avx_ptestz_256: // llvm.x86.avx.ptestz.256
+ ResultTy = IntegerType::get(Context, 32);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 4));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 4));
+ break;
+ case Intrinsic::x86_avx_movmsk_ps_256: // llvm.x86.avx.movmsk.ps.256
+ ResultTy = IntegerType::get(Context, 32);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ break;
+ case Intrinsic::x86_avx_vtestc_ps_256: // llvm.x86.avx.vtestc.ps.256
+ case Intrinsic::x86_avx_vtestnzc_ps_256: // llvm.x86.avx.vtestnzc.ps.256
+ case Intrinsic::x86_avx_vtestz_ps_256: // llvm.x86.avx.vtestz.ps.256
+ ResultTy = IntegerType::get(Context, 32);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ break;
case Intrinsic::x86_mmx_pmovmskb: // llvm.x86.mmx.pmovmskb
ResultTy = IntegerType::get(Context, 32);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 8));
@@ -4883,11 +5919,16 @@
ResultTy = IntegerType::get(Context, 64);
break;
case Intrinsic::alpha_umulh: // llvm.alpha.umulh
- case Intrinsic::x86_sse42_crc32_64: // llvm.x86.sse42.crc32.64
+ case Intrinsic::x86_sse42_crc64_64: // llvm.x86.sse42.crc64.64
ResultTy = IntegerType::get(Context, 64);
ArgTys.push_back(IntegerType::get(Context, 64));
ArgTys.push_back(IntegerType::get(Context, 64));
break;
+ case Intrinsic::x86_sse42_crc64_8: // llvm.x86.sse42.crc64.8
+ ResultTy = IntegerType::get(Context, 64);
+ ArgTys.push_back(IntegerType::get(Context, 64));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
case Intrinsic::x86_sse2_cvtsd2si64: // llvm.x86.sse2.cvtsd2si64
case Intrinsic::x86_sse2_cvttsd2si64: // llvm.x86.sse2.cvttsd2si64
ResultTy = IntegerType::get(Context, 64);
@@ -5031,28 +6072,43 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 8));
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 8));
break;
+ case Intrinsic::x86_mmx_cvtsi32_si64: // llvm.x86.mmx.cvtsi32.si64
+ ResultTy = VectorType::get(IntegerType::get(Context, 64), 1);
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ break;
case Intrinsic::x86_mmx_pslli_q: // llvm.x86.mmx.pslli.q
case Intrinsic::x86_mmx_psrli_q: // llvm.x86.mmx.psrli.q
ResultTy = VectorType::get(IntegerType::get(Context, 64), 1);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
ArgTys.push_back(IntegerType::get(Context, 32));
break;
- case Intrinsic::x86_mmx_psll_q: // llvm.x86.mmx.psll.q
- case Intrinsic::x86_mmx_psrl_q: // llvm.x86.mmx.psrl.q
+ case Intrinsic::x86_mmx_pinsr_w: // llvm.x86.mmx.pinsr.w
ResultTy = VectorType::get(IntegerType::get(Context, 64), 1);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
break;
- case Intrinsic::x86_ssse3_palign_r: // llvm.x86.ssse3.palign.r
+ case Intrinsic::x86_mmx_padd_q: // llvm.x86.mmx.padd.q
+ case Intrinsic::x86_mmx_pand: // llvm.x86.mmx.pand
+ case Intrinsic::x86_mmx_pandn: // llvm.x86.mmx.pandn
+ case Intrinsic::x86_mmx_por: // llvm.x86.mmx.por
+ case Intrinsic::x86_mmx_psll_q: // llvm.x86.mmx.psll.q
+ case Intrinsic::x86_mmx_psrl_q: // llvm.x86.mmx.psrl.q
+ case Intrinsic::x86_mmx_psub_q: // llvm.x86.mmx.psub.q
+ case Intrinsic::x86_mmx_pxor: // llvm.x86.mmx.pxor
ResultTy = VectorType::get(IntegerType::get(Context, 64), 1);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
- ArgTys.push_back(IntegerType::get(Context, 8));
break;
case Intrinsic::x86_sse2_loadu_pd: // llvm.x86.sse2.loadu.pd
ResultTy = VectorType::get(Type::getDoubleTy(Context), 2);
ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
+ case Intrinsic::x86_avx_maskload_pd: // llvm.x86.avx.maskload.pd
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 2);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
+ break;
case Intrinsic::x86_sse2_sqrt_pd: // llvm.x86.sse2.sqrt.pd
case Intrinsic::x86_sse2_sqrt_sd: // llvm.x86.sse2.sqrt.sd
ResultTy = VectorType::get(Type::getDoubleTy(Context), 2);
@@ -5069,6 +6125,11 @@
ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
ArgTys.push_back(IntegerType::get(Context, 64));
break;
+ case Intrinsic::x86_avx_vpermil_pd: // llvm.x86.avx.vpermil.pd
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 2);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
case Intrinsic::spu_si_dfa: // llvm.spu.si.dfa
case Intrinsic::spu_si_dfm: // llvm.spu.si.dfm
case Intrinsic::spu_si_dfma: // llvm.spu.si.dfma
@@ -5112,6 +6173,11 @@
ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
break;
+ case Intrinsic::x86_avx_vpermilvar_pd: // llvm.x86.avx.vpermilvar.pd
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 2);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
+ break;
case Intrinsic::x86_sse2_cvtss2sd: // llvm.x86.sse2.cvtss2sd
ResultTy = VectorType::get(Type::getDoubleTy(Context), 2);
ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
@@ -5125,10 +6191,20 @@
ResultTy = VectorType::get(Type::getDoubleTy(Context), 2);
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
break;
+ case Intrinsic::x86_avx_vextractf128_pd_256: // llvm.x86.avx.vextractf128.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 2);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
case Intrinsic::x86_sse2_cvtdq2pd: // llvm.x86.sse2.cvtdq2pd
ResultTy = VectorType::get(Type::getDoubleTy(Context), 2);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
break;
+ case Intrinsic::x86_mmx_vec_init_d: // llvm.x86.mmx.vec.init.d
+ ResultTy = VectorType::get(IntegerType::get(Context, 32), 2);
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ break;
case Intrinsic::arm_neon_vacged: // llvm.arm.neon.vacged
case Intrinsic::arm_neon_vacgtd: // llvm.arm.neon.vacgtd
ResultTy = VectorType::get(IntegerType::get(Context, 32), 2);
@@ -5147,6 +6223,7 @@
case Intrinsic::x86_mmx_pslli_d: // llvm.x86.mmx.pslli.d
case Intrinsic::x86_mmx_psrai_d: // llvm.x86.mmx.psrai.d
case Intrinsic::x86_mmx_psrli_d: // llvm.x86.mmx.psrli.d
+ case Intrinsic::x86_mmx_vec_ext_d: // llvm.x86.mmx.vec.ext.d
ResultTy = VectorType::get(IntegerType::get(Context, 32), 2);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 2));
ArgTys.push_back(IntegerType::get(Context, 32));
@@ -5158,9 +6235,13 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 2));
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
break;
+ case Intrinsic::x86_mmx_padd_d: // llvm.x86.mmx.padd.d
case Intrinsic::x86_mmx_pcmpeq_d: // llvm.x86.mmx.pcmpeq.d
case Intrinsic::x86_mmx_pcmpgt_d: // llvm.x86.mmx.pcmpgt.d
case Intrinsic::x86_mmx_pmulu_dq: // llvm.x86.mmx.pmulu.dq
+ case Intrinsic::x86_mmx_psub_d: // llvm.x86.mmx.psub.d
+ case Intrinsic::x86_mmx_punpckhdq: // llvm.x86.mmx.punpckhdq
+ case Intrinsic::x86_mmx_punpckldq: // llvm.x86.mmx.punpckldq
case Intrinsic::x86_ssse3_phadd_d: // llvm.x86.ssse3.phadd.d
case Intrinsic::x86_ssse3_phsub_d: // llvm.x86.ssse3.phsub.d
case Intrinsic::x86_ssse3_psign_d: // llvm.x86.ssse3.psign.d
@@ -5192,6 +6273,10 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
break;
+ case Intrinsic::x86_aesni_aesimc: // llvm.x86.aesni.aesimc
+ ResultTy = VectorType::get(IntegerType::get(Context, 64), 2);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
+ break;
case Intrinsic::x86_sse2_psll_dq: // llvm.x86.sse2.psll.dq
case Intrinsic::x86_sse2_psll_dq_bs: // llvm.x86.sse2.psll.dq.bs
case Intrinsic::x86_sse2_pslli_q: // llvm.x86.sse2.pslli.q
@@ -5202,6 +6287,15 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
ArgTys.push_back(IntegerType::get(Context, 32));
break;
+ case Intrinsic::x86_aesni_aeskeygenassist: // llvm.x86.aesni.aeskeygenassist
+ ResultTy = VectorType::get(IntegerType::get(Context, 64), 2);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_aesni_aesdec: // llvm.x86.aesni.aesdec
+ case Intrinsic::x86_aesni_aesdeclast: // llvm.x86.aesni.aesdeclast
+ case Intrinsic::x86_aesni_aesenc: // llvm.x86.aesni.aesenc
+ case Intrinsic::x86_aesni_aesenclast: // llvm.x86.aesni.aesenclast
case Intrinsic::x86_sse2_psll_q: // llvm.x86.sse2.psll.q
case Intrinsic::x86_sse2_psrl_q: // llvm.x86.sse2.psrl.q
case Intrinsic::x86_sse41_pcmpeqq: // llvm.x86.sse41.pcmpeqq
@@ -5210,12 +6304,6 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
break;
- case Intrinsic::x86_ssse3_palign_r_128: // llvm.x86.ssse3.palign.r.128
- ResultTy = VectorType::get(IntegerType::get(Context, 64), 2);
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
- ArgTys.push_back(IntegerType::get(Context, 8));
- break;
case Intrinsic::x86_sse41_pmovsxdq: // llvm.x86.sse41.pmovsxdq
case Intrinsic::x86_sse41_pmovzxdq: // llvm.x86.sse41.pmovzxdq
ResultTy = VectorType::get(IntegerType::get(Context, 64), 2);
@@ -5232,10 +6320,21 @@
ResultTy = VectorType::get(IntegerType::get(Context, 64), 2);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 8));
break;
+ case Intrinsic::x86_avx_ldu_dq_256: // llvm.x86.avx.ldu.dq.256
+ case Intrinsic::x86_avx_loadu_dq_256: // llvm.x86.avx.loadu.dq.256
+ ResultTy = VectorType::get(IntegerType::get(Context, 8), 32);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ break;
+ case Intrinsic::x86_avx_vbroadcastss: // llvm.x86.avx.vbroadcastss
case Intrinsic::x86_sse_loadu_ps: // llvm.x86.sse.loadu.ps
ResultTy = VectorType::get(Type::getFloatTy(Context), 4);
ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
break;
+ case Intrinsic::x86_avx_maskload_ps: // llvm.x86.avx.maskload.ps
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 4);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ break;
case Intrinsic::x86_sse2_cvtpd2ps: // llvm.x86.sse2.cvtpd2ps
ResultTy = VectorType::get(Type::getFloatTy(Context), 4);
ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
@@ -5268,6 +6367,11 @@
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
ArgTys.push_back(IntegerType::get(Context, 64));
break;
+ case Intrinsic::x86_avx_vpermil_ps: // llvm.x86.avx.vpermil.ps
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
case Intrinsic::x86_sse2_cvtsd2ss: // llvm.x86.sse2.cvtsd2ss
ResultTy = VectorType::get(Type::getFloatTy(Context), 4);
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
@@ -5329,6 +6433,15 @@
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
break;
+ case Intrinsic::x86_avx_vpermilvar_ps: // llvm.x86.avx.vpermilvar.ps
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
+ break;
+ case Intrinsic::x86_avx_cvt_pd2_ps_256: // llvm.x86.avx.cvt.pd2.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ break;
case Intrinsic::x86_sse2_cvtdq2ps: // llvm.x86.sse2.cvtdq2ps
ResultTy = VectorType::get(Type::getFloatTy(Context), 4);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
@@ -5339,6 +6452,90 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
ArgTys.push_back(IntegerType::get(Context, 32));
break;
+ case Intrinsic::x86_avx_vextractf128_ps_256: // llvm.x86.avx.vextractf128.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_avx_loadu_pd_256: // llvm.x86.avx.loadu.pd.256
+ case Intrinsic::x86_avx_vbroadcast_sd_256: // llvm.x86.avx.vbroadcast.sd.256
+ case Intrinsic::x86_avx_vbroadcastf128_pd_256: // llvm.x86.avx.vbroadcastf128.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ break;
+ case Intrinsic::x86_avx_maskload_pd_256: // llvm.x86.avx.maskload.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ break;
+ case Intrinsic::x86_avx_cvt_ps2_pd_256: // llvm.x86.avx.cvt.ps2.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ break;
+ case Intrinsic::x86_avx_sqrt_pd_256: // llvm.x86.avx.sqrt.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ break;
+ case Intrinsic::x86_avx_round_pd_256: // llvm.x86.avx.round.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ break;
+ case Intrinsic::x86_avx_vpermil_pd_256: // llvm.x86.avx.vpermil.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_avx_vinsertf128_pd_256: // llvm.x86.avx.vinsertf128.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_avx_addsub_pd_256: // llvm.x86.avx.addsub.pd.256
+ case Intrinsic::x86_avx_hadd_pd_256: // llvm.x86.avx.hadd.pd.256
+ case Intrinsic::x86_avx_hsub_pd_256: // llvm.x86.avx.hsub.pd.256
+ case Intrinsic::x86_avx_max_pd_256: // llvm.x86.avx.max.pd.256
+ case Intrinsic::x86_avx_min_pd_256: // llvm.x86.avx.min.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ break;
+ case Intrinsic::x86_avx_blend_pd_256: // llvm.x86.avx.blend.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ break;
+ case Intrinsic::x86_avx_cmp_pd_256: // llvm.x86.avx.cmp.pd.256
+ case Intrinsic::x86_avx_vperm2f128_pd_256: // llvm.x86.avx.vperm2f128.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_avx_blendv_pd_256: // llvm.x86.avx.blendv.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ break;
+ case Intrinsic::x86_avx_vpermilvar_pd_256: // llvm.x86.avx.vpermilvar.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 4));
+ break;
+ case Intrinsic::x86_avx_cvtdq2_pd_256: // llvm.x86.avx.cvtdq2.pd.256
+ ResultTy = VectorType::get(Type::getDoubleTy(Context), 4);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
+ break;
+ case Intrinsic::x86_mmx_vec_init_w: // llvm.x86.mmx.vec.init.w
+ ResultTy = VectorType::get(IntegerType::get(Context, 16), 4);
+ ArgTys.push_back(IntegerType::get(Context, 16));
+ ArgTys.push_back(IntegerType::get(Context, 16));
+ ArgTys.push_back(IntegerType::get(Context, 16));
+ ArgTys.push_back(IntegerType::get(Context, 16));
+ break;
case Intrinsic::x86_mmx_packssdw: // llvm.x86.mmx.packssdw
ResultTy = VectorType::get(IntegerType::get(Context, 16), 4);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 2));
@@ -5351,6 +6548,7 @@
case Intrinsic::x86_mmx_pslli_w: // llvm.x86.mmx.pslli.w
case Intrinsic::x86_mmx_psrai_w: // llvm.x86.mmx.psrai.w
case Intrinsic::x86_mmx_psrli_w: // llvm.x86.mmx.psrli.w
+ case Intrinsic::x86_ssse3_pshuf_w: // llvm.x86.ssse3.pshuf.w
ResultTy = VectorType::get(IntegerType::get(Context, 16), 4);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 4));
ArgTys.push_back(IntegerType::get(Context, 32));
@@ -5362,6 +6560,7 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 4));
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
break;
+ case Intrinsic::x86_mmx_padd_w: // llvm.x86.mmx.padd.w
case Intrinsic::x86_mmx_padds_w: // llvm.x86.mmx.padds.w
case Intrinsic::x86_mmx_paddus_w: // llvm.x86.mmx.paddus.w
case Intrinsic::x86_mmx_pavg_w: // llvm.x86.mmx.pavg.w
@@ -5371,8 +6570,12 @@
case Intrinsic::x86_mmx_pmins_w: // llvm.x86.mmx.pmins.w
case Intrinsic::x86_mmx_pmulh_w: // llvm.x86.mmx.pmulh.w
case Intrinsic::x86_mmx_pmulhu_w: // llvm.x86.mmx.pmulhu.w
+ case Intrinsic::x86_mmx_pmull_w: // llvm.x86.mmx.pmull.w
+ case Intrinsic::x86_mmx_psub_w: // llvm.x86.mmx.psub.w
case Intrinsic::x86_mmx_psubs_w: // llvm.x86.mmx.psubs.w
case Intrinsic::x86_mmx_psubus_w: // llvm.x86.mmx.psubus.w
+ case Intrinsic::x86_mmx_punpckhwd: // llvm.x86.mmx.punpckhwd
+ case Intrinsic::x86_mmx_punpcklwd: // llvm.x86.mmx.punpcklwd
case Intrinsic::x86_ssse3_phadd_sw: // llvm.x86.ssse3.phadd.sw
case Intrinsic::x86_ssse3_phadd_w: // llvm.x86.ssse3.phadd.w
case Intrinsic::x86_ssse3_phsub_sw: // llvm.x86.ssse3.phsub.sw
@@ -5439,6 +6642,11 @@
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
break;
+ case Intrinsic::x86_avx_cvt_pd2dq_256: // llvm.x86.avx.cvt.pd2dq.256
+ case Intrinsic::x86_avx_cvtt_pd2dq_256: // llvm.x86.avx.cvtt.pd2dq.256
+ ResultTy = VectorType::get(IntegerType::get(Context, 32), 4);
+ ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 4));
+ break;
case Intrinsic::x86_ssse3_pabs_d_128: // llvm.x86.ssse3.pabs.d.128
ResultTy = VectorType::get(IntegerType::get(Context, 32), 4);
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
@@ -5519,7 +6727,6 @@
case Intrinsic::x86_sse41_pmaxud: // llvm.x86.sse41.pmaxud
case Intrinsic::x86_sse41_pminsd: // llvm.x86.sse41.pminsd
case Intrinsic::x86_sse41_pminud: // llvm.x86.sse41.pminud
- case Intrinsic::x86_sse41_pmulld: // llvm.x86.sse41.pmulld
case Intrinsic::x86_ssse3_phadd_d_128: // llvm.x86.ssse3.phadd.d.128
case Intrinsic::x86_ssse3_phadd_sw_128: // llvm.x86.ssse3.phadd.sw.128
case Intrinsic::x86_ssse3_phsub_d_128: // llvm.x86.ssse3.phsub.d.128
@@ -5596,6 +6803,82 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 8));
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 8));
break;
+ case Intrinsic::x86_avx_vextractf128_si_256: // llvm.x86.avx.vextractf128.si.256
+ ResultTy = VectorType::get(IntegerType::get(Context, 32), 4);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_avx_loadu_ps_256: // llvm.x86.avx.loadu.ps.256
+ case Intrinsic::x86_avx_vbroadcastf128_ps_256: // llvm.x86.avx.vbroadcastf128.ps.256
+ case Intrinsic::x86_avx_vbroadcastss_256: // llvm.x86.avx.vbroadcastss.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ break;
+ case Intrinsic::x86_avx_maskload_ps_256: // llvm.x86.avx.maskload.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ break;
+ case Intrinsic::x86_avx_rcp_ps_256: // llvm.x86.avx.rcp.ps.256
+ case Intrinsic::x86_avx_rsqrt_ps_256: // llvm.x86.avx.rsqrt.ps.256
+ case Intrinsic::x86_avx_sqrt_ps_256: // llvm.x86.avx.sqrt.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ break;
+ case Intrinsic::x86_avx_round_ps_256: // llvm.x86.avx.round.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ break;
+ case Intrinsic::x86_avx_vpermil_ps_256: // llvm.x86.avx.vpermil.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_avx_vinsertf128_ps_256: // llvm.x86.avx.vinsertf128.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_avx_addsub_ps_256: // llvm.x86.avx.addsub.ps.256
+ case Intrinsic::x86_avx_hadd_ps_256: // llvm.x86.avx.hadd.ps.256
+ case Intrinsic::x86_avx_hsub_ps_256: // llvm.x86.avx.hsub.ps.256
+ case Intrinsic::x86_avx_max_ps_256: // llvm.x86.avx.max.ps.256
+ case Intrinsic::x86_avx_min_ps_256: // llvm.x86.avx.min.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ break;
+ case Intrinsic::x86_avx_blend_ps_256: // llvm.x86.avx.blend.ps.256
+ case Intrinsic::x86_avx_dp_ps_256: // llvm.x86.avx.dp.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(IntegerType::get(Context, 32));
+ break;
+ case Intrinsic::x86_avx_cmp_ps_256: // llvm.x86.avx.cmp.ps.256
+ case Intrinsic::x86_avx_vperm2f128_ps_256: // llvm.x86.avx.vperm2f128.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_avx_blendv_ps_256: // llvm.x86.avx.blendv.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ break;
+ case Intrinsic::x86_avx_vpermilvar_ps_256: // llvm.x86.avx.vpermilvar.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 8));
+ break;
+ case Intrinsic::x86_avx_cvtdq2_ps_256: // llvm.x86.avx.cvtdq2.ps.256
+ ResultTy = VectorType::get(Type::getFloatTy(Context), 8);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 8));
+ break;
case Intrinsic::ppc_altivec_mfvscr: // llvm.ppc.altivec.mfvscr
ResultTy = VectorType::get(IntegerType::get(Context, 16), 8);
break;
@@ -5716,6 +6999,34 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 8));
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 8));
break;
+ case Intrinsic::x86_avx_cvt_ps2dq_256: // llvm.x86.avx.cvt.ps2dq.256
+ case Intrinsic::x86_avx_cvtt_ps2dq_256: // llvm.x86.avx.cvtt.ps2dq.256
+ ResultTy = VectorType::get(IntegerType::get(Context, 32), 8);
+ ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 8));
+ break;
+ case Intrinsic::x86_avx_vinsertf128_si_256: // llvm.x86.avx.vinsertf128.si.256
+ ResultTy = VectorType::get(IntegerType::get(Context, 32), 8);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 8));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_avx_vperm2f128_si_256: // llvm.x86.avx.vperm2f128.si.256
+ ResultTy = VectorType::get(IntegerType::get(Context, 32), 8);
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 8));
+ ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
+ case Intrinsic::x86_mmx_vec_init_b: // llvm.x86.mmx.vec.init.b
+ ResultTy = VectorType::get(IntegerType::get(Context, 8), 8);
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ ArgTys.push_back(IntegerType::get(Context, 8));
+ break;
case Intrinsic::x86_mmx_packsswb: // llvm.x86.mmx.packsswb
case Intrinsic::x86_mmx_packuswb: // llvm.x86.mmx.packuswb
ResultTy = VectorType::get(IntegerType::get(Context, 8), 8);
@@ -5727,6 +7038,7 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 8));
break;
case Intrinsic::arm_neon_vtbl1: // llvm.arm.neon.vtbl1
+ case Intrinsic::x86_mmx_padd_b: // llvm.x86.mmx.padd.b
case Intrinsic::x86_mmx_padds_b: // llvm.x86.mmx.padds.b
case Intrinsic::x86_mmx_paddus_b: // llvm.x86.mmx.paddus.b
case Intrinsic::x86_mmx_pavg_b: // llvm.x86.mmx.pavg.b
@@ -5734,8 +7046,11 @@
case Intrinsic::x86_mmx_pcmpgt_b: // llvm.x86.mmx.pcmpgt.b
case Intrinsic::x86_mmx_pmaxu_b: // llvm.x86.mmx.pmaxu.b
case Intrinsic::x86_mmx_pminu_b: // llvm.x86.mmx.pminu.b
+ case Intrinsic::x86_mmx_psub_b: // llvm.x86.mmx.psub.b
case Intrinsic::x86_mmx_psubs_b: // llvm.x86.mmx.psubs.b
case Intrinsic::x86_mmx_psubus_b: // llvm.x86.mmx.psubus.b
+ case Intrinsic::x86_mmx_punpckhbw: // llvm.x86.mmx.punpckhbw
+ case Intrinsic::x86_mmx_punpcklbw: // llvm.x86.mmx.punpcklbw
case Intrinsic::x86_ssse3_pshuf_b: // llvm.x86.ssse3.pshuf.b
case Intrinsic::x86_ssse3_psign_b: // llvm.x86.ssse3.psign.b
ResultTy = VectorType::get(IntegerType::get(Context, 8), 8);
@@ -5775,259 +7090,6 @@
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 8));
ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 8));
break;
- case Intrinsic::eh_unwind_init: // llvm.eh.unwind.init
- case Intrinsic::ppc_altivec_dssall: // llvm.ppc.altivec.dssall
- case Intrinsic::ppc_sync: // llvm.ppc.sync
- case Intrinsic::trap: // llvm.trap
- case Intrinsic::x86_mmx_emms: // llvm.x86.mmx.emms
- case Intrinsic::x86_mmx_femms: // llvm.x86.mmx.femms
- case Intrinsic::x86_sse2_lfence: // llvm.x86.sse2.lfence
- case Intrinsic::x86_sse2_mfence: // llvm.x86.sse2.mfence
- case Intrinsic::x86_sse_sfence: // llvm.x86.sse.sfence
- ResultTy = Type::getVoidTy(Context);
- break;
- case Intrinsic::invariant_end: // llvm.invariant.end
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(StructType::get(Context)));
- ArgTys.push_back(IntegerType::get(Context, 64));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::memory_barrier: // llvm.memory.barrier
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(IntegerType::get(Context, 1));
- ArgTys.push_back(IntegerType::get(Context, 1));
- ArgTys.push_back(IntegerType::get(Context, 1));
- ArgTys.push_back(IntegerType::get(Context, 1));
- ArgTys.push_back(IntegerType::get(Context, 1));
- break;
- case Intrinsic::eh_sjlj_callsite: // llvm.eh.sjlj.callsite
- case Intrinsic::pcmarker: // llvm.pcmarker
- case Intrinsic::ppc_altivec_dss: // llvm.ppc.altivec.dss
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::x86_sse3_mwait: // llvm.x86.sse3.mwait
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(IntegerType::get(Context, 32));
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::eh_return_i32: // llvm.eh.return.i32
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(IntegerType::get(Context, 32));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::eh_return_i64: // llvm.eh.return.i64
- case Intrinsic::lifetime_end: // llvm.lifetime.end
- case Intrinsic::lifetime_start: // llvm.lifetime.start
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(IntegerType::get(Context, 64));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::dbg_value: // llvm.dbg.value
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(Type::getMetadataTy(Context));
- ArgTys.push_back(IntegerType::get(Context, 64));
- ArgTys.push_back(Type::getMetadataTy(Context));
- break;
- case Intrinsic::dbg_declare: // llvm.dbg.declare
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(Type::getMetadataTy(Context));
- ArgTys.push_back(Type::getMetadataTy(Context));
- break;
- case Intrinsic::eh_sjlj_longjmp: // llvm.eh.sjlj.longjmp
- case Intrinsic::ppc_dcba: // llvm.ppc.dcba
- case Intrinsic::ppc_dcbf: // llvm.ppc.dcbf
- case Intrinsic::ppc_dcbi: // llvm.ppc.dcbi
- case Intrinsic::ppc_dcbst: // llvm.ppc.dcbst
- case Intrinsic::ppc_dcbt: // llvm.ppc.dcbt
- case Intrinsic::ppc_dcbtst: // llvm.ppc.dcbtst
- case Intrinsic::ppc_dcbz: // llvm.ppc.dcbz
- case Intrinsic::ppc_dcbzl: // llvm.ppc.dcbzl
- case Intrinsic::stackrestore: // llvm.stackrestore
- case Intrinsic::vaend: // llvm.va_end
- case Intrinsic::vastart: // llvm.va_start
- case Intrinsic::x86_sse2_clflush: // llvm.x86.sse2.clflush
- case Intrinsic::x86_sse_ldmxcsr: // llvm.x86.sse.ldmxcsr
- case Intrinsic::x86_sse_stmxcsr: // llvm.x86.sse.stmxcsr
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::arm_neon_vst1: // llvm.arm.neon.vst1
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(Tys[0]);
- break;
- case Intrinsic::arm_neon_vst2: // llvm.arm.neon.vst2
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- break;
- case Intrinsic::arm_neon_vst3: // llvm.arm.neon.vst3
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- break;
- case Intrinsic::arm_neon_vst4: // llvm.arm.neon.vst4
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- break;
- case Intrinsic::arm_neon_vst2lane: // llvm.arm.neon.vst2lane
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::arm_neon_vst3lane: // llvm.arm.neon.vst3lane
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::arm_neon_vst4lane: // llvm.arm.neon.vst4lane
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::longjmp: // llvm.longjmp
- case Intrinsic::siglongjmp: // llvm.siglongjmp
- case Intrinsic::x86_sse2_movnt_i: // llvm.x86.sse2.movnt.i
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::ppc_altivec_dst: // llvm.ppc.altivec.dst
- case Intrinsic::ppc_altivec_dstst: // llvm.ppc.altivec.dstst
- case Intrinsic::ppc_altivec_dststt: // llvm.ppc.altivec.dststt
- case Intrinsic::ppc_altivec_dstt: // llvm.ppc.altivec.dstt
- case Intrinsic::prefetch: // llvm.prefetch
- case Intrinsic::x86_sse3_monitor: // llvm.x86.sse3.monitor
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(IntegerType::get(Context, 32));
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::memset: // llvm.memset
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(IntegerType::get(Context, 8));
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::vacopy: // llvm.va_copy
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::memcpy: // llvm.memcpy
- case Intrinsic::memmove: // llvm.memmove
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(Tys[0]);
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::var_annotation: // llvm.var.annotation
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(IntegerType::get(Context, 32));
- break;
- case Intrinsic::gcwrite: // llvm.gcwrite
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(PointerType::getUnqual(PointerType::getUnqual(IntegerType::get(Context, 8))));
- break;
- case Intrinsic::stackprotector: // llvm.stackprotector
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(PointerType::getUnqual(PointerType::getUnqual(IntegerType::get(Context, 8))));
- break;
- case Intrinsic::x86_sse2_storeu_dq: // llvm.x86.sse2.storeu.dq
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
- break;
- case Intrinsic::x86_mmx_movnt_dq: // llvm.x86.mmx.movnt.dq
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 1));
- break;
- case Intrinsic::x86_sse2_movnt_pd: // llvm.x86.sse2.movnt.pd
- case Intrinsic::x86_sse2_storeu_pd: // llvm.x86.sse2.storeu.pd
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(VectorType::get(Type::getDoubleTy(Context), 2));
- break;
- case Intrinsic::x86_sse2_movnt_dq: // llvm.x86.sse2.movnt.dq
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 64), 2));
- break;
- case Intrinsic::x86_sse_movnt_ps: // llvm.x86.sse.movnt.ps
- case Intrinsic::x86_sse_storeu_ps: // llvm.x86.sse.storeu.ps
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(VectorType::get(Type::getFloatTy(Context), 4));
- break;
- case Intrinsic::x86_sse2_storel_dq: // llvm.x86.sse2.storel.dq
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
- break;
- case Intrinsic::gcroot: // llvm.gcroot
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(PointerType::getUnqual(PointerType::getUnqual(IntegerType::get(Context, 8))));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::ppc_altivec_stvebx: // llvm.ppc.altivec.stvebx
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::x86_sse2_maskmov_dqu: // llvm.x86.sse2.maskmov.dqu
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 16));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::ppc_altivec_mtvscr: // llvm.ppc.altivec.mtvscr
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
- break;
- case Intrinsic::ppc_altivec_stvewx: // llvm.ppc.altivec.stvewx
- case Intrinsic::ppc_altivec_stvx: // llvm.ppc.altivec.stvx
- case Intrinsic::ppc_altivec_stvxl: // llvm.ppc.altivec.stvxl
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 32), 4));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::ppc_altivec_stvehx: // llvm.ppc.altivec.stvehx
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 16), 8));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
- case Intrinsic::x86_mmx_maskmovq: // llvm.x86.mmx.maskmovq
- ResultTy = Type::getVoidTy(Context);
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 8));
- ArgTys.push_back(VectorType::get(IntegerType::get(Context, 8), 8));
- ArgTys.push_back(PointerType::getUnqual(IntegerType::get(Context, 8)));
- break;
}
#endif
@@ -6038,12 +7100,7 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
switch (id) {
default: break;
case Intrinsic::alpha_umulh:
- case Intrinsic::arm_neon_vabals:
- case Intrinsic::arm_neon_vabalu:
- case Intrinsic::arm_neon_vabas:
- case Intrinsic::arm_neon_vabau:
- case Intrinsic::arm_neon_vabdls:
- case Intrinsic::arm_neon_vabdlu:
+ case Intrinsic::arm_get_fpscr:
case Intrinsic::arm_neon_vabds:
case Intrinsic::arm_neon_vabdu:
case Intrinsic::arm_neon_vabs:
@@ -6052,10 +7109,6 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::arm_neon_vacgtd:
case Intrinsic::arm_neon_vacgtq:
case Intrinsic::arm_neon_vaddhn:
- case Intrinsic::arm_neon_vaddls:
- case Intrinsic::arm_neon_vaddlu:
- case Intrinsic::arm_neon_vaddws:
- case Intrinsic::arm_neon_vaddwu:
case Intrinsic::arm_neon_vcls:
case Intrinsic::arm_neon_vclz:
case Intrinsic::arm_neon_vcnt:
@@ -6071,16 +7124,7 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::arm_neon_vmaxu:
case Intrinsic::arm_neon_vmins:
case Intrinsic::arm_neon_vminu:
- case Intrinsic::arm_neon_vmlals:
- case Intrinsic::arm_neon_vmlalu:
- case Intrinsic::arm_neon_vmlsls:
- case Intrinsic::arm_neon_vmlslu:
- case Intrinsic::arm_neon_vmovls:
- case Intrinsic::arm_neon_vmovlu:
- case Intrinsic::arm_neon_vmovn:
case Intrinsic::arm_neon_vmullp:
- case Intrinsic::arm_neon_vmulls:
- case Intrinsic::arm_neon_vmullu:
case Intrinsic::arm_neon_vmulp:
case Intrinsic::arm_neon_vpadals:
case Intrinsic::arm_neon_vpadalu:
@@ -6134,10 +7178,6 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::arm_neon_vshifts:
case Intrinsic::arm_neon_vshiftu:
case Intrinsic::arm_neon_vsubhn:
- case Intrinsic::arm_neon_vsubls:
- case Intrinsic::arm_neon_vsublu:
- case Intrinsic::arm_neon_vsubws:
- case Intrinsic::arm_neon_vsubwu:
case Intrinsic::arm_neon_vtbl1:
case Intrinsic::arm_neon_vtbl2:
case Intrinsic::arm_neon_vtbl3:
@@ -6146,17 +7186,23 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::arm_neon_vtbx2:
case Intrinsic::arm_neon_vtbx3:
case Intrinsic::arm_neon_vtbx4:
+ case Intrinsic::arm_qadd:
+ case Intrinsic::arm_qsub:
+ case Intrinsic::arm_ssat:
case Intrinsic::arm_thread_pointer:
+ case Intrinsic::arm_usat:
+ case Intrinsic::arm_vcvtr:
+ case Intrinsic::arm_vcvtru:
case Intrinsic::bswap:
+ case Intrinsic::convert_from_fp16:
+ case Intrinsic::convert_to_fp16:
case Intrinsic::ctlz:
case Intrinsic::ctpop:
case Intrinsic::cttz:
case Intrinsic::dbg_declare:
case Intrinsic::dbg_value:
case Intrinsic::eh_sjlj_callsite:
- case Intrinsic::eh_sjlj_longjmp:
case Intrinsic::eh_sjlj_lsda:
- case Intrinsic::eh_sjlj_setjmp:
case Intrinsic::frameaddress:
case Intrinsic::ppc_altivec_lvsl:
case Intrinsic::ppc_altivec_lvsr:
@@ -6376,13 +7422,92 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::uadd_with_overflow:
case Intrinsic::umul_with_overflow:
case Intrinsic::usub_with_overflow:
+ case Intrinsic::x86_aesni_aesdec:
+ case Intrinsic::x86_aesni_aesdeclast:
+ case Intrinsic::x86_aesni_aesenc:
+ case Intrinsic::x86_aesni_aesenclast:
+ case Intrinsic::x86_aesni_aesimc:
+ case Intrinsic::x86_aesni_aeskeygenassist:
+ case Intrinsic::x86_avx_addsub_pd_256:
+ case Intrinsic::x86_avx_addsub_ps_256:
+ case Intrinsic::x86_avx_blend_pd_256:
+ case Intrinsic::x86_avx_blend_ps_256:
+ case Intrinsic::x86_avx_blendv_pd_256:
+ case Intrinsic::x86_avx_blendv_ps_256:
+ case Intrinsic::x86_avx_cmp_pd_256:
+ case Intrinsic::x86_avx_cmp_ps_256:
+ case Intrinsic::x86_avx_cvt_pd2_ps_256:
+ case Intrinsic::x86_avx_cvt_pd2dq_256:
+ case Intrinsic::x86_avx_cvt_ps2_pd_256:
+ case Intrinsic::x86_avx_cvt_ps2dq_256:
+ case Intrinsic::x86_avx_cvtdq2_pd_256:
+ case Intrinsic::x86_avx_cvtdq2_ps_256:
+ case Intrinsic::x86_avx_cvtt_pd2dq_256:
+ case Intrinsic::x86_avx_cvtt_ps2dq_256:
+ case Intrinsic::x86_avx_dp_ps_256:
+ case Intrinsic::x86_avx_hadd_pd_256:
+ case Intrinsic::x86_avx_hadd_ps_256:
+ case Intrinsic::x86_avx_hsub_pd_256:
+ case Intrinsic::x86_avx_hsub_ps_256:
+ case Intrinsic::x86_avx_max_pd_256:
+ case Intrinsic::x86_avx_max_ps_256:
+ case Intrinsic::x86_avx_min_pd_256:
+ case Intrinsic::x86_avx_min_ps_256:
+ case Intrinsic::x86_avx_movmsk_pd_256:
+ case Intrinsic::x86_avx_movmsk_ps_256:
+ case Intrinsic::x86_avx_ptestc_256:
+ case Intrinsic::x86_avx_ptestnzc_256:
+ case Intrinsic::x86_avx_ptestz_256:
+ case Intrinsic::x86_avx_rcp_ps_256:
+ case Intrinsic::x86_avx_round_pd_256:
+ case Intrinsic::x86_avx_round_ps_256:
+ case Intrinsic::x86_avx_rsqrt_ps_256:
+ case Intrinsic::x86_avx_sqrt_pd_256:
+ case Intrinsic::x86_avx_sqrt_ps_256:
+ case Intrinsic::x86_avx_vextractf128_pd_256:
+ case Intrinsic::x86_avx_vextractf128_ps_256:
+ case Intrinsic::x86_avx_vextractf128_si_256:
+ case Intrinsic::x86_avx_vinsertf128_pd_256:
+ case Intrinsic::x86_avx_vinsertf128_ps_256:
+ case Intrinsic::x86_avx_vinsertf128_si_256:
+ case Intrinsic::x86_avx_vperm2f128_pd_256:
+ case Intrinsic::x86_avx_vperm2f128_ps_256:
+ case Intrinsic::x86_avx_vperm2f128_si_256:
+ case Intrinsic::x86_avx_vpermil_pd:
+ case Intrinsic::x86_avx_vpermil_pd_256:
+ case Intrinsic::x86_avx_vpermil_ps:
+ case Intrinsic::x86_avx_vpermil_ps_256:
+ case Intrinsic::x86_avx_vpermilvar_pd:
+ case Intrinsic::x86_avx_vpermilvar_pd_256:
+ case Intrinsic::x86_avx_vpermilvar_ps:
+ case Intrinsic::x86_avx_vpermilvar_ps_256:
+ case Intrinsic::x86_avx_vtestc_pd:
+ case Intrinsic::x86_avx_vtestc_pd_256:
+ case Intrinsic::x86_avx_vtestc_ps:
+ case Intrinsic::x86_avx_vtestc_ps_256:
+ case Intrinsic::x86_avx_vtestnzc_pd:
+ case Intrinsic::x86_avx_vtestnzc_pd_256:
+ case Intrinsic::x86_avx_vtestnzc_ps:
+ case Intrinsic::x86_avx_vtestnzc_ps_256:
+ case Intrinsic::x86_avx_vtestz_pd:
+ case Intrinsic::x86_avx_vtestz_pd_256:
+ case Intrinsic::x86_avx_vtestz_ps:
+ case Intrinsic::x86_avx_vtestz_ps_256:
+ case Intrinsic::x86_mmx_cvtsi32_si64:
+ case Intrinsic::x86_mmx_cvtsi64_si32:
case Intrinsic::x86_mmx_packssdw:
case Intrinsic::x86_mmx_packsswb:
case Intrinsic::x86_mmx_packuswb:
+ case Intrinsic::x86_mmx_padd_b:
+ case Intrinsic::x86_mmx_padd_d:
+ case Intrinsic::x86_mmx_padd_q:
+ case Intrinsic::x86_mmx_padd_w:
case Intrinsic::x86_mmx_padds_b:
case Intrinsic::x86_mmx_padds_w:
case Intrinsic::x86_mmx_paddus_b:
case Intrinsic::x86_mmx_paddus_w:
+ case Intrinsic::x86_mmx_pand:
+ case Intrinsic::x86_mmx_pandn:
case Intrinsic::x86_mmx_pavg_b:
case Intrinsic::x86_mmx_pavg_w:
case Intrinsic::x86_mmx_pcmpeq_b:
@@ -6391,6 +7516,8 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::x86_mmx_pcmpgt_b:
case Intrinsic::x86_mmx_pcmpgt_d:
case Intrinsic::x86_mmx_pcmpgt_w:
+ case Intrinsic::x86_mmx_pextr_w:
+ case Intrinsic::x86_mmx_pinsr_w:
case Intrinsic::x86_mmx_pmadd_wd:
case Intrinsic::x86_mmx_pmaxs_w:
case Intrinsic::x86_mmx_pmaxu_b:
@@ -6399,7 +7526,9 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::x86_mmx_pmovmskb:
case Intrinsic::x86_mmx_pmulh_w:
case Intrinsic::x86_mmx_pmulhu_w:
+ case Intrinsic::x86_mmx_pmull_w:
case Intrinsic::x86_mmx_pmulu_dq:
+ case Intrinsic::x86_mmx_por:
case Intrinsic::x86_mmx_psad_bw:
case Intrinsic::x86_mmx_psll_d:
case Intrinsic::x86_mmx_psll_q:
@@ -6417,10 +7546,25 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::x86_mmx_psrli_d:
case Intrinsic::x86_mmx_psrli_q:
case Intrinsic::x86_mmx_psrli_w:
+ case Intrinsic::x86_mmx_psub_b:
+ case Intrinsic::x86_mmx_psub_d:
+ case Intrinsic::x86_mmx_psub_q:
+ case Intrinsic::x86_mmx_psub_w:
case Intrinsic::x86_mmx_psubs_b:
case Intrinsic::x86_mmx_psubs_w:
case Intrinsic::x86_mmx_psubus_b:
case Intrinsic::x86_mmx_psubus_w:
+ case Intrinsic::x86_mmx_punpckhbw:
+ case Intrinsic::x86_mmx_punpckhdq:
+ case Intrinsic::x86_mmx_punpckhwd:
+ case Intrinsic::x86_mmx_punpcklbw:
+ case Intrinsic::x86_mmx_punpckldq:
+ case Intrinsic::x86_mmx_punpcklwd:
+ case Intrinsic::x86_mmx_pxor:
+ case Intrinsic::x86_mmx_vec_ext_d:
+ case Intrinsic::x86_mmx_vec_init_b:
+ case Intrinsic::x86_mmx_vec_init_d:
+ case Intrinsic::x86_mmx_vec_init_w:
case Intrinsic::x86_sse2_add_sd:
case Intrinsic::x86_sse2_cmp_pd:
case Intrinsic::x86_sse2_cmp_sd:
@@ -6555,7 +7699,6 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::x86_sse41_pmovzxwd:
case Intrinsic::x86_sse41_pmovzxwq:
case Intrinsic::x86_sse41_pmuldq:
- case Intrinsic::x86_sse41_pmulld:
case Intrinsic::x86_sse41_ptestc:
case Intrinsic::x86_sse41_ptestnzc:
case Intrinsic::x86_sse41_ptestz:
@@ -6565,8 +7708,9 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::x86_sse41_round_ss:
case Intrinsic::x86_sse42_crc32_16:
case Intrinsic::x86_sse42_crc32_32:
- case Intrinsic::x86_sse42_crc32_64:
case Intrinsic::x86_sse42_crc32_8:
+ case Intrinsic::x86_sse42_crc64_64:
+ case Intrinsic::x86_sse42_crc64_8:
case Intrinsic::x86_sse42_pcmpestri128:
case Intrinsic::x86_sse42_pcmpestria128:
case Intrinsic::x86_sse42_pcmpestric128:
@@ -6629,8 +7773,6 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::x86_ssse3_pabs_d_128:
case Intrinsic::x86_ssse3_pabs_w:
case Intrinsic::x86_ssse3_pabs_w_128:
- case Intrinsic::x86_ssse3_palign_r:
- case Intrinsic::x86_ssse3_palign_r_128:
case Intrinsic::x86_ssse3_phadd_d:
case Intrinsic::x86_ssse3_phadd_d_128:
case Intrinsic::x86_ssse3_phadd_sw:
@@ -6649,6 +7791,7 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::x86_ssse3_pmul_hr_sw_128:
case Intrinsic::x86_ssse3_pshuf_b:
case Intrinsic::x86_ssse3_pshuf_b_128:
+ case Intrinsic::x86_ssse3_pshuf_w:
case Intrinsic::x86_ssse3_psign_b:
case Intrinsic::x86_ssse3_psign_b_128:
case Intrinsic::x86_ssse3_psign_d:
@@ -6686,6 +7829,19 @@ AttrListPtr Intrinsic::getAttributes(ID id) { // No intrinsic can throw excepti
case Intrinsic::ppc_altivec_mfvscr:
case Intrinsic::sin:
case Intrinsic::sqrt:
+ case Intrinsic::x86_avx_ldu_dq_256:
+ case Intrinsic::x86_avx_loadu_dq_256:
+ case Intrinsic::x86_avx_loadu_pd_256:
+ case Intrinsic::x86_avx_loadu_ps_256:
+ case Intrinsic::x86_avx_maskload_pd:
+ case Intrinsic::x86_avx_maskload_pd_256:
+ case Intrinsic::x86_avx_maskload_ps:
+ case Intrinsic::x86_avx_maskload_ps_256:
+ case Intrinsic::x86_avx_vbroadcast_sd_256:
+ case Intrinsic::x86_avx_vbroadcastf128_pd_256:
+ case Intrinsic::x86_avx_vbroadcastf128_ps_256:
+ case Intrinsic::x86_avx_vbroadcastss:
+ case Intrinsic::x86_avx_vbroadcastss_256:
case Intrinsic::x86_sse2_loadu_dq:
case Intrinsic::x86_sse2_loadu_pd:
case Intrinsic::x86_sse3_ldu_dq:
@@ -6798,17 +7954,7 @@ default:
return UnknownModRefBehavior;
case Intrinsic::alpha_umulh:
return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vabals:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vabalu:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vabas:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vabau:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vabdls:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vabdlu:
+case Intrinsic::arm_get_fpscr:
return DoesNotAccessMemory;
case Intrinsic::arm_neon_vabds:
return DoesNotAccessMemory;
@@ -6826,14 +7972,6 @@ case Intrinsic::arm_neon_vacgtq:
return DoesNotAccessMemory;
case Intrinsic::arm_neon_vaddhn:
return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vaddls:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vaddlu:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vaddws:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vaddwu:
- return DoesNotAccessMemory;
case Intrinsic::arm_neon_vcls:
return DoesNotAccessMemory;
case Intrinsic::arm_neon_vclz:
@@ -6878,26 +8016,8 @@ case Intrinsic::arm_neon_vmins:
return DoesNotAccessMemory;
case Intrinsic::arm_neon_vminu:
return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vmlals:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vmlalu:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vmlsls:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vmlslu:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vmovls:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vmovlu:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vmovn:
- return DoesNotAccessMemory;
case Intrinsic::arm_neon_vmullp:
return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vmulls:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vmullu:
- return DoesNotAccessMemory;
case Intrinsic::arm_neon_vmulp:
return DoesNotAccessMemory;
case Intrinsic::arm_neon_vpadals:
@@ -7018,14 +8138,6 @@ case Intrinsic::arm_neon_vst4lane:
return AccessesArguments;
case Intrinsic::arm_neon_vsubhn:
return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vsubls:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vsublu:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vsubws:
- return DoesNotAccessMemory;
-case Intrinsic::arm_neon_vsubwu:
- return DoesNotAccessMemory;
case Intrinsic::arm_neon_vtbl1:
return DoesNotAccessMemory;
case Intrinsic::arm_neon_vtbl2:
@@ -7042,8 +8154,20 @@ case Intrinsic::arm_neon_vtbx3:
return DoesNotAccessMemory;
case Intrinsic::arm_neon_vtbx4:
return DoesNotAccessMemory;
+case Intrinsic::arm_qadd:
+ return DoesNotAccessMemory;
+case Intrinsic::arm_qsub:
+ return DoesNotAccessMemory;
+case Intrinsic::arm_ssat:
+ return DoesNotAccessMemory;
case Intrinsic::arm_thread_pointer:
return DoesNotAccessMemory;
+case Intrinsic::arm_usat:
+ return DoesNotAccessMemory;
+case Intrinsic::arm_vcvtr:
+ return DoesNotAccessMemory;
+case Intrinsic::arm_vcvtru:
+ return DoesNotAccessMemory;
case Intrinsic::atomic_cmp_swap:
return AccessesArguments;
case Intrinsic::atomic_load_add:
@@ -7070,6 +8194,10 @@ case Intrinsic::atomic_swap:
return AccessesArguments;
case Intrinsic::bswap:
return DoesNotAccessMemory;
+case Intrinsic::convert_from_fp16:
+ return DoesNotAccessMemory;
+case Intrinsic::convert_to_fp16:
+ return DoesNotAccessMemory;
case Intrinsic::cos:
return OnlyReadsMemory;
case Intrinsic::ctlz:
@@ -7086,12 +8214,8 @@ case Intrinsic::eh_exception:
return OnlyReadsMemory;
case Intrinsic::eh_sjlj_callsite:
return DoesNotAccessMemory;
-case Intrinsic::eh_sjlj_longjmp:
- return DoesNotAccessMemory;
case Intrinsic::eh_sjlj_lsda:
return DoesNotAccessMemory;
-case Intrinsic::eh_sjlj_setjmp:
- return DoesNotAccessMemory;
case Intrinsic::exp:
return OnlyReadsMemory;
case Intrinsic::exp2:
@@ -7584,12 +8708,192 @@ case Intrinsic::umul_with_overflow:
return DoesNotAccessMemory;
case Intrinsic::usub_with_overflow:
return DoesNotAccessMemory;
+case Intrinsic::x86_aesni_aesdec:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_aesni_aesdeclast:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_aesni_aesenc:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_aesni_aesenclast:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_aesni_aesimc:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_aesni_aeskeygenassist:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_addsub_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_addsub_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_blend_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_blend_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_blendv_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_blendv_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cmp_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cmp_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cvt_pd2_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cvt_pd2dq_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cvt_ps2_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cvt_ps2dq_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cvtdq2_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cvtdq2_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cvtt_pd2dq_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_cvtt_ps2dq_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_dp_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_hadd_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_hadd_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_hsub_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_hsub_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_ldu_dq_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_loadu_dq_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_loadu_pd_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_loadu_ps_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_maskload_pd:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_maskload_pd_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_maskload_ps:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_maskload_ps_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_max_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_max_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_min_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_min_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_movmsk_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_movmsk_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_ptestc_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_ptestnzc_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_ptestz_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_rcp_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_round_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_round_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_rsqrt_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_sqrt_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_sqrt_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vbroadcast_sd_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_vbroadcastf128_pd_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_vbroadcastf128_ps_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_vbroadcastss:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_vbroadcastss_256:
+ return OnlyReadsMemory;
+case Intrinsic::x86_avx_vextractf128_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vextractf128_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vextractf128_si_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vinsertf128_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vinsertf128_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vinsertf128_si_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vperm2f128_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vperm2f128_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vperm2f128_si_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vpermil_pd:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vpermil_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vpermil_ps:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vpermil_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vpermilvar_pd:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vpermilvar_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vpermilvar_ps:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vpermilvar_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestc_pd:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestc_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestc_ps:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestc_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestnzc_pd:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestnzc_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestnzc_ps:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestnzc_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestz_pd:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestz_pd_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestz_ps:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_avx_vtestz_ps_256:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_cvtsi32_si64:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_cvtsi64_si32:
+ return DoesNotAccessMemory;
case Intrinsic::x86_mmx_packssdw:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_packsswb:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_packuswb:
return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_padd_b:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_padd_d:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_padd_q:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_padd_w:
+ return DoesNotAccessMemory;
case Intrinsic::x86_mmx_padds_b:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_padds_w:
@@ -7598,6 +8902,10 @@ case Intrinsic::x86_mmx_paddus_b:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_paddus_w:
return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_pand:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_pandn:
+ return DoesNotAccessMemory;
case Intrinsic::x86_mmx_pavg_b:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_pavg_w:
@@ -7614,6 +8922,10 @@ case Intrinsic::x86_mmx_pcmpgt_d:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_pcmpgt_w:
return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_pextr_w:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_pinsr_w:
+ return DoesNotAccessMemory;
case Intrinsic::x86_mmx_pmadd_wd:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_pmaxs_w:
@@ -7630,8 +8942,12 @@ case Intrinsic::x86_mmx_pmulh_w:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_pmulhu_w:
return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_pmull_w:
+ return DoesNotAccessMemory;
case Intrinsic::x86_mmx_pmulu_dq:
return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_por:
+ return DoesNotAccessMemory;
case Intrinsic::x86_mmx_psad_bw:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_psll_d:
@@ -7666,6 +8982,14 @@ case Intrinsic::x86_mmx_psrli_q:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_psrli_w:
return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_psub_b:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_psub_d:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_psub_q:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_psub_w:
+ return DoesNotAccessMemory;
case Intrinsic::x86_mmx_psubs_b:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_psubs_w:
@@ -7674,6 +8998,28 @@ case Intrinsic::x86_mmx_psubus_b:
return DoesNotAccessMemory;
case Intrinsic::x86_mmx_psubus_w:
return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_punpckhbw:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_punpckhdq:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_punpckhwd:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_punpcklbw:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_punpckldq:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_punpcklwd:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_pxor:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_vec_ext_d:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_vec_init_b:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_vec_init_d:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_mmx_vec_init_w:
+ return DoesNotAccessMemory;
case Intrinsic::x86_sse2_add_sd:
return DoesNotAccessMemory;
case Intrinsic::x86_sse2_cmp_pd:
@@ -7950,8 +9296,6 @@ case Intrinsic::x86_sse41_pmovzxwq:
return DoesNotAccessMemory;
case Intrinsic::x86_sse41_pmuldq:
return DoesNotAccessMemory;
-case Intrinsic::x86_sse41_pmulld:
- return DoesNotAccessMemory;
case Intrinsic::x86_sse41_ptestc:
return DoesNotAccessMemory;
case Intrinsic::x86_sse41_ptestnzc:
@@ -7970,10 +9314,12 @@ case Intrinsic::x86_sse42_crc32_16:
return DoesNotAccessMemory;
case Intrinsic::x86_sse42_crc32_32:
return DoesNotAccessMemory;
-case Intrinsic::x86_sse42_crc32_64:
- return DoesNotAccessMemory;
case Intrinsic::x86_sse42_crc32_8:
return DoesNotAccessMemory;
+case Intrinsic::x86_sse42_crc64_64:
+ return DoesNotAccessMemory;
+case Intrinsic::x86_sse42_crc64_8:
+ return DoesNotAccessMemory;
case Intrinsic::x86_sse42_pcmpestri128:
return DoesNotAccessMemory;
case Intrinsic::x86_sse42_pcmpestria128:
@@ -8100,10 +9446,6 @@ case Intrinsic::x86_ssse3_pabs_w:
return DoesNotAccessMemory;
case Intrinsic::x86_ssse3_pabs_w_128:
return DoesNotAccessMemory;
-case Intrinsic::x86_ssse3_palign_r:
- return DoesNotAccessMemory;
-case Intrinsic::x86_ssse3_palign_r_128:
- return DoesNotAccessMemory;
case Intrinsic::x86_ssse3_phadd_d:
return DoesNotAccessMemory;
case Intrinsic::x86_ssse3_phadd_d_128:
@@ -8140,6 +9482,8 @@ case Intrinsic::x86_ssse3_pshuf_b:
return DoesNotAccessMemory;
case Intrinsic::x86_ssse3_pshuf_b_128:
return DoesNotAccessMemory;
+case Intrinsic::x86_ssse3_pshuf_w:
+ return DoesNotAccessMemory;
case Intrinsic::x86_ssse3_psign_b:
return DoesNotAccessMemory;
case Intrinsic::x86_ssse3_psign_b_128:
@@ -8164,7 +9508,13 @@ case Intrinsic::xcore_getid:
switch (F->getIntrinsicID()) {
default: BuiltinName = ""; break;
case Intrinsic::alpha_umulh: BuiltinName = "__builtin_alpha_umulh"; break;
+ case Intrinsic::arm_get_fpscr: BuiltinName = "__builtin_arm_get_fpscr"; break;
+ case Intrinsic::arm_qadd: BuiltinName = "__builtin_arm_qadd"; break;
+ case Intrinsic::arm_qsub: BuiltinName = "__builtin_arm_qsub"; break;
+ case Intrinsic::arm_set_fpscr: BuiltinName = "__builtin_arm_set_fpscr"; break;
+ case Intrinsic::arm_ssat: BuiltinName = "__builtin_arm_ssat"; break;
case Intrinsic::arm_thread_pointer: BuiltinName = "__builtin_thread_pointer"; break;
+ case Intrinsic::arm_usat: BuiltinName = "__builtin_arm_usat"; break;
case Intrinsic::atomic_cmp_swap: BuiltinName = "__sync_val_compare_and_swap"; break;
case Intrinsic::atomic_load_add: BuiltinName = "__sync_fetch_and_add"; break;
case Intrinsic::atomic_load_and: BuiltinName = "__sync_fetch_and_and"; break;
@@ -8177,6 +9527,8 @@ case Intrinsic::xcore_getid:
case Intrinsic::atomic_load_umin: BuiltinName = "__sync_fetch_and_umin"; break;
case Intrinsic::atomic_load_xor: BuiltinName = "__sync_fetch_and_xor"; break;
case Intrinsic::atomic_swap: BuiltinName = "__sync_lock_test_and_set"; break;
+ case Intrinsic::convert_from_fp16: BuiltinName = "__gnu_h2f_ieee"; break;
+ case Intrinsic::convert_to_fp16: BuiltinName = "__gnu_f2h_ieee"; break;
case Intrinsic::eh_unwind_init: BuiltinName = "__builtin_unwind_init"; break;
case Intrinsic::flt_rounds: BuiltinName = "__builtin_flt_rounds"; break;
case Intrinsic::init_trampoline: BuiltinName = "__builtin_init_trampoline"; break;
@@ -8402,6 +9754,102 @@ case Intrinsic::xcore_getid:
case Intrinsic::stackrestore: BuiltinName = "__builtin_stack_restore"; break;
case Intrinsic::stacksave: BuiltinName = "__builtin_stack_save"; break;
case Intrinsic::trap: BuiltinName = "__builtin_trap"; break;
+ case Intrinsic::x86_aesni_aesdec: BuiltinName = "__builtin_ia32_aesdec128"; break;
+ case Intrinsic::x86_aesni_aesdeclast: BuiltinName = "__builtin_ia32_aesdeclast128"; break;
+ case Intrinsic::x86_aesni_aesenc: BuiltinName = "__builtin_ia32_aesenc128"; break;
+ case Intrinsic::x86_aesni_aesenclast: BuiltinName = "__builtin_ia32_aesenclast128"; break;
+ case Intrinsic::x86_aesni_aesimc: BuiltinName = "__builtin_ia32_aesimc128"; break;
+ case Intrinsic::x86_aesni_aeskeygenassist: BuiltinName = "__builtin_ia32_aeskeygenassist128"; break;
+ case Intrinsic::x86_avx_addsub_pd_256: BuiltinName = "__builtin_ia32_addsubpd256"; break;
+ case Intrinsic::x86_avx_addsub_ps_256: BuiltinName = "__builtin_ia32_addsubps256"; break;
+ case Intrinsic::x86_avx_blend_pd_256: BuiltinName = "__builtin_ia32_blendpd256"; break;
+ case Intrinsic::x86_avx_blend_ps_256: BuiltinName = "__builtin_ia32_blendps256"; break;
+ case Intrinsic::x86_avx_blendv_pd_256: BuiltinName = "__builtin_ia32_blendvpd256"; break;
+ case Intrinsic::x86_avx_blendv_ps_256: BuiltinName = "__builtin_ia32_blendvps256"; break;
+ case Intrinsic::x86_avx_cmp_pd_256: BuiltinName = "__builtin_ia32_cmppd256"; break;
+ case Intrinsic::x86_avx_cmp_ps_256: BuiltinName = "__builtin_ia32_cmpps256"; break;
+ case Intrinsic::x86_avx_cvt_pd2_ps_256: BuiltinName = "__builtin_ia32_cvtpd2ps256"; break;
+ case Intrinsic::x86_avx_cvt_pd2dq_256: BuiltinName = "__builtin_ia32_cvtpd2dq256"; break;
+ case Intrinsic::x86_avx_cvt_ps2_pd_256: BuiltinName = "__builtin_ia32_cvtps2pd256"; break;
+ case Intrinsic::x86_avx_cvt_ps2dq_256: BuiltinName = "__builtin_ia32_cvtps2dq256"; break;
+ case Intrinsic::x86_avx_cvtdq2_pd_256: BuiltinName = "__builtin_ia32_cvtdq2pd256"; break;
+ case Intrinsic::x86_avx_cvtdq2_ps_256: BuiltinName = "__builtin_ia32_cvtdq2ps256"; break;
+ case Intrinsic::x86_avx_cvtt_pd2dq_256: BuiltinName = "__builtin_ia32_cvttpd2dq256"; break;
+ case Intrinsic::x86_avx_cvtt_ps2dq_256: BuiltinName = "__builtin_ia32_cvttps2dq256"; break;
+ case Intrinsic::x86_avx_dp_ps_256: BuiltinName = "__builtin_ia32_dpps256"; break;
+ case Intrinsic::x86_avx_hadd_pd_256: BuiltinName = "__builtin_ia32_haddpd256"; break;
+ case Intrinsic::x86_avx_hadd_ps_256: BuiltinName = "__builtin_ia32_haddps256"; break;
+ case Intrinsic::x86_avx_hsub_pd_256: BuiltinName = "__builtin_ia32_hsubpd256"; break;
+ case Intrinsic::x86_avx_hsub_ps_256: BuiltinName = "__builtin_ia32_hsubps256"; break;
+ case Intrinsic::x86_avx_ldu_dq_256: BuiltinName = "__builtin_ia32_lddqu256"; break;
+ case Intrinsic::x86_avx_loadu_dq_256: BuiltinName = "__builtin_ia32_loaddqu256"; break;
+ case Intrinsic::x86_avx_loadu_pd_256: BuiltinName = "__builtin_ia32_loadupd256"; break;
+ case Intrinsic::x86_avx_loadu_ps_256: BuiltinName = "__builtin_ia32_loadups256"; break;
+ case Intrinsic::x86_avx_maskload_pd: BuiltinName = "__builtin_ia32_maskloadpd"; break;
+ case Intrinsic::x86_avx_maskload_pd_256: BuiltinName = "__builtin_ia32_maskloadpd256"; break;
+ case Intrinsic::x86_avx_maskload_ps: BuiltinName = "__builtin_ia32_maskloadps"; break;
+ case Intrinsic::x86_avx_maskload_ps_256: BuiltinName = "__builtin_ia32_maskloadps256"; break;
+ case Intrinsic::x86_avx_maskstore_pd: BuiltinName = "__builtin_ia32_maskstorepd"; break;
+ case Intrinsic::x86_avx_maskstore_pd_256: BuiltinName = "__builtin_ia32_maskstorepd256"; break;
+ case Intrinsic::x86_avx_maskstore_ps: BuiltinName = "__builtin_ia32_maskstoreps"; break;
+ case Intrinsic::x86_avx_maskstore_ps_256: BuiltinName = "__builtin_ia32_maskstoreps256"; break;
+ case Intrinsic::x86_avx_max_pd_256: BuiltinName = "__builtin_ia32_maxpd256"; break;
+ case Intrinsic::x86_avx_max_ps_256: BuiltinName = "__builtin_ia32_maxps256"; break;
+ case Intrinsic::x86_avx_min_pd_256: BuiltinName = "__builtin_ia32_minpd256"; break;
+ case Intrinsic::x86_avx_min_ps_256: BuiltinName = "__builtin_ia32_minps256"; break;
+ case Intrinsic::x86_avx_movmsk_pd_256: BuiltinName = "__builtin_ia32_movmskpd256"; break;
+ case Intrinsic::x86_avx_movmsk_ps_256: BuiltinName = "__builtin_ia32_movmskps256"; break;
+ case Intrinsic::x86_avx_movnt_dq_256: BuiltinName = "__builtin_ia32_movntdq256"; break;
+ case Intrinsic::x86_avx_movnt_pd_256: BuiltinName = "__builtin_ia32_movntpd256"; break;
+ case Intrinsic::x86_avx_movnt_ps_256: BuiltinName = "__builtin_ia32_movntps256"; break;
+ case Intrinsic::x86_avx_ptestc_256: BuiltinName = "__builtin_ia32_ptestc256"; break;
+ case Intrinsic::x86_avx_ptestnzc_256: BuiltinName = "__builtin_ia32_ptestnzc256"; break;
+ case Intrinsic::x86_avx_ptestz_256: BuiltinName = "__builtin_ia32_ptestz256"; break;
+ case Intrinsic::x86_avx_rcp_ps_256: BuiltinName = "__builtin_ia32_rcpps256"; break;
+ case Intrinsic::x86_avx_round_pd_256: BuiltinName = "__builtin_ia32_roundpd256"; break;
+ case Intrinsic::x86_avx_round_ps_256: BuiltinName = "__builtin_ia32_roundps256"; break;
+ case Intrinsic::x86_avx_rsqrt_ps_256: BuiltinName = "__builtin_ia32_rsqrtps256"; break;
+ case Intrinsic::x86_avx_sqrt_pd_256: BuiltinName = "__builtin_ia32_sqrtpd256"; break;
+ case Intrinsic::x86_avx_sqrt_ps_256: BuiltinName = "__builtin_ia32_sqrtps256"; break;
+ case Intrinsic::x86_avx_storeu_dq_256: BuiltinName = "__builtin_ia32_storedqu256"; break;
+ case Intrinsic::x86_avx_storeu_pd_256: BuiltinName = "__builtin_ia32_storeupd256"; break;
+ case Intrinsic::x86_avx_storeu_ps_256: BuiltinName = "__builtin_ia32_storeups256"; break;
+ case Intrinsic::x86_avx_vbroadcast_sd_256: BuiltinName = "__builtin_ia32_vbroadcastsd256"; break;
+ case Intrinsic::x86_avx_vbroadcastf128_pd_256: BuiltinName = "__builtin_ia32_vbroadcastf128_pd256"; break;
+ case Intrinsic::x86_avx_vbroadcastf128_ps_256: BuiltinName = "__builtin_ia32_vbroadcastf128_ps256"; break;
+ case Intrinsic::x86_avx_vbroadcastss: BuiltinName = "__builtin_ia32_vbroadcastss"; break;
+ case Intrinsic::x86_avx_vbroadcastss_256: BuiltinName = "__builtin_ia32_vbroadcastss256"; break;
+ case Intrinsic::x86_avx_vextractf128_pd_256: BuiltinName = "__builtin_ia32_vextractf128_pd256"; break;
+ case Intrinsic::x86_avx_vextractf128_ps_256: BuiltinName = "__builtin_ia32_vextractf128_ps256"; break;
+ case Intrinsic::x86_avx_vextractf128_si_256: BuiltinName = "__builtin_ia32_vextractf128_si256"; break;
+ case Intrinsic::x86_avx_vinsertf128_pd_256: BuiltinName = "__builtin_ia32_vinsertf128_pd256"; break;
+ case Intrinsic::x86_avx_vinsertf128_ps_256: BuiltinName = "__builtin_ia32_vinsertf128_ps256"; break;
+ case Intrinsic::x86_avx_vinsertf128_si_256: BuiltinName = "__builtin_ia32_vinsertf128_si256"; break;
+ case Intrinsic::x86_avx_vperm2f128_pd_256: BuiltinName = "__builtin_ia32_vperm2f128_pd256"; break;
+ case Intrinsic::x86_avx_vperm2f128_ps_256: BuiltinName = "__builtin_ia32_vperm2f128_ps256"; break;
+ case Intrinsic::x86_avx_vperm2f128_si_256: BuiltinName = "__builtin_ia32_vperm2f128_si256"; break;
+ case Intrinsic::x86_avx_vpermil_pd: BuiltinName = "__builtin_ia32_vpermilpd"; break;
+ case Intrinsic::x86_avx_vpermil_pd_256: BuiltinName = "__builtin_ia32_vpermilpd256"; break;
+ case Intrinsic::x86_avx_vpermil_ps: BuiltinName = "__builtin_ia32_vpermilps"; break;
+ case Intrinsic::x86_avx_vpermil_ps_256: BuiltinName = "__builtin_ia32_vpermilps256"; break;
+ case Intrinsic::x86_avx_vpermilvar_pd: BuiltinName = "__builtin_ia32_vpermilvarpd"; break;
+ case Intrinsic::x86_avx_vpermilvar_pd_256: BuiltinName = "__builtin_ia32_vpermilvarpd256"; break;
+ case Intrinsic::x86_avx_vpermilvar_ps: BuiltinName = "__builtin_ia32_vpermilvarps"; break;
+ case Intrinsic::x86_avx_vpermilvar_ps_256: BuiltinName = "__builtin_ia32_vpermilvarps256"; break;
+ case Intrinsic::x86_avx_vtestc_pd: BuiltinName = "__builtin_ia32_vtestcpd"; break;
+ case Intrinsic::x86_avx_vtestc_pd_256: BuiltinName = "__builtin_ia32_vtestcpd256"; break;
+ case Intrinsic::x86_avx_vtestc_ps: BuiltinName = "__builtin_ia32_vtestcps"; break;
+ case Intrinsic::x86_avx_vtestc_ps_256: BuiltinName = "__builtin_ia32_vtestcps256"; break;
+ case Intrinsic::x86_avx_vtestnzc_pd: BuiltinName = "__builtin_ia32_vtestnzcpd"; break;
+ case Intrinsic::x86_avx_vtestnzc_pd_256: BuiltinName = "__builtin_ia32_vtestnzcpd256"; break;
+ case Intrinsic::x86_avx_vtestnzc_ps: BuiltinName = "__builtin_ia32_vtestnzcps"; break;
+ case Intrinsic::x86_avx_vtestnzc_ps_256: BuiltinName = "__builtin_ia32_vtestnzcps256"; break;
+ case Intrinsic::x86_avx_vtestz_pd: BuiltinName = "__builtin_ia32_vtestzpd"; break;
+ case Intrinsic::x86_avx_vtestz_pd_256: BuiltinName = "__builtin_ia32_vtestzpd256"; break;
+ case Intrinsic::x86_avx_vtestz_ps: BuiltinName = "__builtin_ia32_vtestzps"; break;
+ case Intrinsic::x86_avx_vtestz_ps_256: BuiltinName = "__builtin_ia32_vtestzps256"; break;
+ case Intrinsic::x86_avx_vzeroall: BuiltinName = "__builtin_ia32_vzeroall"; break;
+ case Intrinsic::x86_avx_vzeroupper: BuiltinName = "__builtin_ia32_vzeroupper"; break;
case Intrinsic::x86_mmx_emms: BuiltinName = "__builtin_ia32_emms"; break;
case Intrinsic::x86_mmx_femms: BuiltinName = "__builtin_ia32_femms"; break;
case Intrinsic::x86_mmx_maskmovq: BuiltinName = "__builtin_ia32_maskmovq"; break;
@@ -8409,10 +9857,16 @@ case Intrinsic::xcore_getid:
case Intrinsic::x86_mmx_packssdw: BuiltinName = "__builtin_ia32_packssdw"; break;
case Intrinsic::x86_mmx_packsswb: BuiltinName = "__builtin_ia32_packsswb"; break;
case Intrinsic::x86_mmx_packuswb: BuiltinName = "__builtin_ia32_packuswb"; break;
+ case Intrinsic::x86_mmx_padd_b: BuiltinName = "__builtin_ia32_paddb"; break;
+ case Intrinsic::x86_mmx_padd_d: BuiltinName = "__builtin_ia32_paddd"; break;
+ case Intrinsic::x86_mmx_padd_q: BuiltinName = "__builtin_ia32_paddq"; break;
+ case Intrinsic::x86_mmx_padd_w: BuiltinName = "__builtin_ia32_paddw"; break;
case Intrinsic::x86_mmx_padds_b: BuiltinName = "__builtin_ia32_paddsb"; break;
case Intrinsic::x86_mmx_padds_w: BuiltinName = "__builtin_ia32_paddsw"; break;
case Intrinsic::x86_mmx_paddus_b: BuiltinName = "__builtin_ia32_paddusb"; break;
case Intrinsic::x86_mmx_paddus_w: BuiltinName = "__builtin_ia32_paddusw"; break;
+ case Intrinsic::x86_mmx_pand: BuiltinName = "__builtin_ia32_pand"; break;
+ case Intrinsic::x86_mmx_pandn: BuiltinName = "__builtin_ia32_pandn"; break;
case Intrinsic::x86_mmx_pavg_b: BuiltinName = "__builtin_ia32_pavgb"; break;
case Intrinsic::x86_mmx_pavg_w: BuiltinName = "__builtin_ia32_pavgw"; break;
case Intrinsic::x86_mmx_pcmpeq_b: BuiltinName = "__builtin_ia32_pcmpeqb"; break;
@@ -8429,7 +9883,9 @@ case Intrinsic::xcore_getid:
case Intrinsic::x86_mmx_pmovmskb: BuiltinName = "__builtin_ia32_pmovmskb"; break;
case Intrinsic::x86_mmx_pmulh_w: BuiltinName = "__builtin_ia32_pmulhw"; break;
case Intrinsic::x86_mmx_pmulhu_w: BuiltinName = "__builtin_ia32_pmulhuw"; break;
+ case Intrinsic::x86_mmx_pmull_w: BuiltinName = "__builtin_ia32_pmullw"; break;
case Intrinsic::x86_mmx_pmulu_dq: BuiltinName = "__builtin_ia32_pmuludq"; break;
+ case Intrinsic::x86_mmx_por: BuiltinName = "__builtin_ia32_por"; break;
case Intrinsic::x86_mmx_psad_bw: BuiltinName = "__builtin_ia32_psadbw"; break;
case Intrinsic::x86_mmx_psll_d: BuiltinName = "__builtin_ia32_pslld"; break;
case Intrinsic::x86_mmx_psll_q: BuiltinName = "__builtin_ia32_psllq"; break;
@@ -8447,10 +9903,25 @@ case Intrinsic::xcore_getid:
case Intrinsic::x86_mmx_psrli_d: BuiltinName = "__builtin_ia32_psrldi"; break;
case Intrinsic::x86_mmx_psrli_q: BuiltinName = "__builtin_ia32_psrlqi"; break;
case Intrinsic::x86_mmx_psrli_w: BuiltinName = "__builtin_ia32_psrlwi"; break;
+ case Intrinsic::x86_mmx_psub_b: BuiltinName = "__builtin_ia32_psubb"; break;
+ case Intrinsic::x86_mmx_psub_d: BuiltinName = "__builtin_ia32_psubd"; break;
+ case Intrinsic::x86_mmx_psub_q: BuiltinName = "__builtin_ia32_psubq"; break;
+ case Intrinsic::x86_mmx_psub_w: BuiltinName = "__builtin_ia32_psubw"; break;
case Intrinsic::x86_mmx_psubs_b: BuiltinName = "__builtin_ia32_psubsb"; break;
case Intrinsic::x86_mmx_psubs_w: BuiltinName = "__builtin_ia32_psubsw"; break;
case Intrinsic::x86_mmx_psubus_b: BuiltinName = "__builtin_ia32_psubusb"; break;
case Intrinsic::x86_mmx_psubus_w: BuiltinName = "__builtin_ia32_psubusw"; break;
+ case Intrinsic::x86_mmx_punpckhbw: BuiltinName = "__builtin_ia32_punpckhbw"; break;
+ case Intrinsic::x86_mmx_punpckhdq: BuiltinName = "__builtin_ia32_punpckhdq"; break;
+ case Intrinsic::x86_mmx_punpckhwd: BuiltinName = "__builtin_ia32_punpckhwd"; break;
+ case Intrinsic::x86_mmx_punpcklbw: BuiltinName = "__builtin_ia32_punpcklbw"; break;
+ case Intrinsic::x86_mmx_punpckldq: BuiltinName = "__builtin_ia32_punpckldq"; break;
+ case Intrinsic::x86_mmx_punpcklwd: BuiltinName = "__builtin_ia32_punpcklwd"; break;
+ case Intrinsic::x86_mmx_pxor: BuiltinName = "__builtin_ia32_pxor"; break;
+ case Intrinsic::x86_mmx_vec_ext_d: BuiltinName = "__builtin_ia32_vec_ext_v2si"; break;
+ case Intrinsic::x86_mmx_vec_init_b: BuiltinName = "__builtin_ia32_vec_init_v8qi"; break;
+ case Intrinsic::x86_mmx_vec_init_d: BuiltinName = "__builtin_ia32_vec_init_v2si"; break;
+ case Intrinsic::x86_mmx_vec_init_w: BuiltinName = "__builtin_ia32_vec_init_v4hi"; break;
case Intrinsic::x86_sse2_add_sd: BuiltinName = "__builtin_ia32_addsd"; break;
case Intrinsic::x86_sse2_clflush: BuiltinName = "__builtin_ia32_clflush"; break;
case Intrinsic::x86_sse2_comieq_sd: BuiltinName = "__builtin_ia32_comisdeq"; break;
@@ -8596,7 +10067,6 @@ case Intrinsic::xcore_getid:
case Intrinsic::x86_sse41_pmovzxwd: BuiltinName = "__builtin_ia32_pmovzxwd128"; break;
case Intrinsic::x86_sse41_pmovzxwq: BuiltinName = "__builtin_ia32_pmovzxwq128"; break;
case Intrinsic::x86_sse41_pmuldq: BuiltinName = "__builtin_ia32_pmuldq128"; break;
- case Intrinsic::x86_sse41_pmulld: BuiltinName = "__builtin_ia32_pmulld128"; break;
case Intrinsic::x86_sse41_ptestc: BuiltinName = "__builtin_ia32_ptestc128"; break;
case Intrinsic::x86_sse41_ptestnzc: BuiltinName = "__builtin_ia32_ptestnzc128"; break;
case Intrinsic::x86_sse41_ptestz: BuiltinName = "__builtin_ia32_ptestz128"; break;
@@ -8606,8 +10076,8 @@ case Intrinsic::xcore_getid:
case Intrinsic::x86_sse41_round_ss: BuiltinName = "__builtin_ia32_roundss"; break;
case Intrinsic::x86_sse42_crc32_16: BuiltinName = "__builtin_ia32_crc32hi"; break;
case Intrinsic::x86_sse42_crc32_32: BuiltinName = "__builtin_ia32_crc32si"; break;
- case Intrinsic::x86_sse42_crc32_64: BuiltinName = "__builtin_ia32_crc32di"; break;
case Intrinsic::x86_sse42_crc32_8: BuiltinName = "__builtin_ia32_crc32qi"; break;
+ case Intrinsic::x86_sse42_crc64_64: BuiltinName = "__builtin_ia32_crc32di"; break;
case Intrinsic::x86_sse42_pcmpestri128: BuiltinName = "__builtin_ia32_pcmpestri128"; break;
case Intrinsic::x86_sse42_pcmpestria128: BuiltinName = "__builtin_ia32_pcmpestria128"; break;
case Intrinsic::x86_sse42_pcmpestric128: BuiltinName = "__builtin_ia32_pcmpestric128"; break;
@@ -8690,6 +10160,7 @@ case Intrinsic::xcore_getid:
case Intrinsic::x86_ssse3_pmul_hr_sw_128: BuiltinName = "__builtin_ia32_pmulhrsw128"; break;
case Intrinsic::x86_ssse3_pshuf_b: BuiltinName = "__builtin_ia32_pshufb"; break;
case Intrinsic::x86_ssse3_pshuf_b_128: BuiltinName = "__builtin_ia32_pshufb128"; break;
+ case Intrinsic::x86_ssse3_pshuf_w: BuiltinName = "__builtin_ia32_pshufw"; break;
case Intrinsic::x86_ssse3_psign_b: BuiltinName = "__builtin_ia32_psignb"; break;
case Intrinsic::x86_ssse3_psign_b_128: BuiltinName = "__builtin_ia32_psignb128"; break;
case Intrinsic::x86_ssse3_psign_d: BuiltinName = "__builtin_ia32_psignd"; break;
@@ -8710,8 +10181,28 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
switch (strlen(BuiltinName)) {
default: break;
case 14:
- if (!memcmp(BuiltinName, "__builtin_trap", 14))
- IntrinsicID = Intrinsic::trap;
+ if (!memcmp(BuiltinName, "__", 2)) {
+ switch (BuiltinName[2]) { // "__"
+ case 'b':
+ if (!memcmp(BuiltinName+3, "uiltin_trap", 11))
+ IntrinsicID = Intrinsic::trap;
+ break;
+ case 'g':
+ if (!memcmp(BuiltinName+3, "nu_", 3)) {
+ switch (BuiltinName[6]) { // "__gnu_"
+ case 'f':
+ if (!memcmp(BuiltinName+7, "2h_ieee", 7))
+ IntrinsicID = Intrinsic::convert_to_fp16;
+ break;
+ case 'h':
+ if (!memcmp(BuiltinName+7, "2f_ieee", 7))
+ IntrinsicID = Intrinsic::convert_from_fp16;
+ break;
+ }
+ }
+ break;
+ }
+ }
break;
case 19:
if (!memcmp(BuiltinName, "__sync_fetch_and_or", 19))
@@ -8853,6 +10344,46 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
if (!strcmp(TargetPrefix, "arm")) {
switch (strlen(BuiltinName)) {
default: break;
+ case 18:
+ if (!memcmp(BuiltinName, "__builtin_arm_", 14)) {
+ switch (BuiltinName[14]) { // "__builtin_arm_"
+ case 'q':
+ switch (BuiltinName[15]) { // "__builtin_arm_q"
+ case 'a':
+ if (!memcmp(BuiltinName+16, "dd", 2))
+ IntrinsicID = Intrinsic::arm_qadd;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+16, "ub", 2))
+ IntrinsicID = Intrinsic::arm_qsub;
+ break;
+ }
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+15, "sat", 3))
+ IntrinsicID = Intrinsic::arm_ssat;
+ break;
+ case 'u':
+ if (!memcmp(BuiltinName+15, "sat", 3))
+ IntrinsicID = Intrinsic::arm_usat;
+ break;
+ }
+ }
+ break;
+ case 23:
+ if (!memcmp(BuiltinName, "__builtin_arm_", 14)) {
+ switch (BuiltinName[14]) { // "__builtin_arm_"
+ case 'g':
+ if (!memcmp(BuiltinName+15, "et_fpscr", 8))
+ IntrinsicID = Intrinsic::arm_get_fpscr;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+15, "et_fpscr", 8))
+ IntrinsicID = Intrinsic::arm_set_fpscr;
+ break;
+ }
+ }
+ break;
case 24:
if (!memcmp(BuiltinName, "__builtin_thread_pointer", 24))
IntrinsicID = Intrinsic::arm_thread_pointer;
@@ -10240,6 +11771,10 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
if (!strcmp(TargetPrefix, "x86")) {
switch (strlen(BuiltinName)) {
default: break;
+ case 18:
+ if (!memcmp(BuiltinName, "__builtin_ia32_por", 18))
+ IntrinsicID = Intrinsic::x86_mmx_por;
+ break;
case 19:
if (!memcmp(BuiltinName, "__builtin_ia32_", 15)) {
switch (BuiltinName[15]) { // "__builtin_ia32_"
@@ -10259,6 +11794,18 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
if (!memcmp(BuiltinName+16, "mms", 3))
IntrinsicID = Intrinsic::x86_mmx_emms;
break;
+ case 'p':
+ switch (BuiltinName[16]) { // "__builtin_ia32_p"
+ case 'a':
+ if (!memcmp(BuiltinName+17, "nd", 2))
+ IntrinsicID = Intrinsic::x86_mmx_pand;
+ break;
+ case 'x':
+ if (!memcmp(BuiltinName+17, "or", 2))
+ IntrinsicID = Intrinsic::x86_mmx_pxor;
+ break;
+ }
+ break;
}
}
break;
@@ -10388,6 +11935,28 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
}
}
break;
+ case 'd':
+ if (!memcmp(BuiltinName+18, "d", 1)) {
+ switch (BuiltinName[19]) { // "__builtin_ia32_padd"
+ case 'b':
+ IntrinsicID = Intrinsic::x86_mmx_padd_b;
+ break;
+ case 'd':
+ IntrinsicID = Intrinsic::x86_mmx_padd_d;
+ break;
+ case 'q':
+ IntrinsicID = Intrinsic::x86_mmx_padd_q;
+ break;
+ case 'w':
+ IntrinsicID = Intrinsic::x86_mmx_padd_w;
+ break;
+ }
+ }
+ break;
+ case 'n':
+ if (!memcmp(BuiltinName+18, "dn", 2))
+ IntrinsicID = Intrinsic::x86_mmx_pandn;
+ break;
case 'v':
if (!memcmp(BuiltinName+18, "g", 1)) {
switch (BuiltinName[19]) { // "__builtin_ia32_pavg"
@@ -10446,6 +12015,24 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
break;
}
break;
+ case 'u':
+ if (!memcmp(BuiltinName+18, "b", 1)) {
+ switch (BuiltinName[19]) { // "__builtin_ia32_psub"
+ case 'b':
+ IntrinsicID = Intrinsic::x86_mmx_psub_b;
+ break;
+ case 'd':
+ IntrinsicID = Intrinsic::x86_mmx_psub_d;
+ break;
+ case 'q':
+ IntrinsicID = Intrinsic::x86_mmx_psub_q;
+ break;
+ case 'w':
+ IntrinsicID = Intrinsic::x86_mmx_psub_w;
+ break;
+ }
+ }
+ break;
}
break;
}
@@ -10637,8 +12224,18 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
}
break;
case 'u':
- if (!memcmp(BuiltinName+18, "lhw", 3))
- IntrinsicID = Intrinsic::x86_mmx_pmulh_w;
+ if (!memcmp(BuiltinName+18, "l", 1)) {
+ switch (BuiltinName[19]) { // "__builtin_ia32_pmul"
+ case 'h':
+ if (!memcmp(BuiltinName+20, "w", 1))
+ IntrinsicID = Intrinsic::x86_mmx_pmulh_w;
+ break;
+ case 'l':
+ if (!memcmp(BuiltinName+20, "w", 1))
+ IntrinsicID = Intrinsic::x86_mmx_pmull_w;
+ break;
+ }
+ }
break;
}
break;
@@ -10649,8 +12246,16 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
IntrinsicID = Intrinsic::x86_mmx_psad_bw;
break;
case 'h':
- if (!memcmp(BuiltinName+18, "ufb", 3))
- IntrinsicID = Intrinsic::x86_ssse3_pshuf_b;
+ if (!memcmp(BuiltinName+18, "uf", 2)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_pshuf"
+ case 'b':
+ IntrinsicID = Intrinsic::x86_ssse3_pshuf_b;
+ break;
+ case 'w':
+ IntrinsicID = Intrinsic::x86_ssse3_pshuf_w;
+ break;
+ }
+ }
break;
case 'i':
if (!memcmp(BuiltinName+18, "gn", 2)) {
@@ -10800,7 +12405,7 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
switch (BuiltinName[20]) { // "__builtin_ia32_crc32"
case 'd':
if (!memcmp(BuiltinName+21, "i", 1))
- IntrinsicID = Intrinsic::x86_sse42_crc32_64;
+ IntrinsicID = Intrinsic::x86_sse42_crc64_64;
break;
case 'h':
if (!memcmp(BuiltinName+21, "i", 1))
@@ -10819,6 +12424,10 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
break;
}
break;
+ case 'd':
+ if (!memcmp(BuiltinName+16, "pps256", 6))
+ IntrinsicID = Intrinsic::x86_avx_dp_ps_256;
+ break;
case 'l':
if (!memcmp(BuiltinName+16, "oad", 3)) {
switch (BuiltinName[19]) { // "__builtin_ia32_load"
@@ -11081,6 +12690,20 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
break;
case 'c':
switch (BuiltinName[16]) { // "__builtin_ia32_c"
+ case 'm':
+ if (!memcmp(BuiltinName+17, "pp", 2)) {
+ switch (BuiltinName[19]) { // "__builtin_ia32_cmpp"
+ case 'd':
+ if (!memcmp(BuiltinName+20, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_cmp_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+20, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_cmp_ps_256;
+ break;
+ }
+ }
+ break;
case 'o':
if (!memcmp(BuiltinName+17, "misd", 4)) {
switch (BuiltinName[21]) { // "__builtin_ia32_comisd"
@@ -11227,11 +12850,47 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
break;
}
break;
+ case 'l':
+ if (!memcmp(BuiltinName+16, "ddqu256", 7))
+ IntrinsicID = Intrinsic::x86_avx_ldu_dq_256;
+ break;
case 'm':
switch (BuiltinName[16]) { // "__builtin_ia32_m"
case 'a':
- if (!memcmp(BuiltinName+17, "skmovq", 6))
- IntrinsicID = Intrinsic::x86_mmx_maskmovq;
+ switch (BuiltinName[17]) { // "__builtin_ia32_ma"
+ case 's':
+ if (!memcmp(BuiltinName+18, "kmovq", 5))
+ IntrinsicID = Intrinsic::x86_mmx_maskmovq;
+ break;
+ case 'x':
+ if (!memcmp(BuiltinName+18, "p", 1)) {
+ switch (BuiltinName[19]) { // "__builtin_ia32_maxp"
+ case 'd':
+ if (!memcmp(BuiltinName+20, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_max_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+20, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_max_ps_256;
+ break;
+ }
+ }
+ break;
+ }
+ break;
+ case 'i':
+ if (!memcmp(BuiltinName+17, "np", 2)) {
+ switch (BuiltinName[19]) { // "__builtin_ia32_minp"
+ case 'd':
+ if (!memcmp(BuiltinName+20, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_min_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+20, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_min_ps_256;
+ break;
+ }
+ }
break;
case 'o':
if (!memcmp(BuiltinName+17, "v", 1)) {
@@ -11387,6 +13046,10 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
break;
}
break;
+ case 'r':
+ if (!memcmp(BuiltinName+16, "cpps256", 7))
+ IntrinsicID = Intrinsic::x86_avx_rcp_ps_256;
+ break;
case 's':
if (!memcmp(BuiltinName+16, "tore", 4)) {
switch (BuiltinName[20]) { // "__builtin_ia32_store"
@@ -11413,12 +13076,68 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
if (!memcmp(BuiltinName+16, "comineq", 7))
IntrinsicID = Intrinsic::x86_sse_ucomineq_ss;
break;
+ case 'v':
+ switch (BuiltinName[16]) { // "__builtin_ia32_v"
+ case 't':
+ if (!memcmp(BuiltinName+17, "est", 3)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_vtest"
+ case 'c':
+ if (!memcmp(BuiltinName+21, "p", 1)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_vtestcp"
+ case 'd':
+ IntrinsicID = Intrinsic::x86_avx_vtestc_pd;
+ break;
+ case 's':
+ IntrinsicID = Intrinsic::x86_avx_vtestc_ps;
+ break;
+ }
+ }
+ break;
+ case 'z':
+ if (!memcmp(BuiltinName+21, "p", 1)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_vtestzp"
+ case 'd':
+ IntrinsicID = Intrinsic::x86_avx_vtestz_pd;
+ break;
+ case 's':
+ IntrinsicID = Intrinsic::x86_avx_vtestz_ps;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ case 'z':
+ if (!memcmp(BuiltinName+17, "eroall", 6))
+ IntrinsicID = Intrinsic::x86_avx_vzeroall;
+ break;
+ }
+ break;
}
}
break;
case 24:
if (!memcmp(BuiltinName, "__builtin_ia32_", 15)) {
switch (BuiltinName[15]) { // "__builtin_ia32_"
+ case 'a':
+ if (!memcmp(BuiltinName+16, "es", 2)) {
+ switch (BuiltinName[18]) { // "__builtin_ia32_aes"
+ case 'd':
+ if (!memcmp(BuiltinName+19, "ec128", 5))
+ IntrinsicID = Intrinsic::x86_aesni_aesdec;
+ break;
+ case 'e':
+ if (!memcmp(BuiltinName+19, "nc128", 5))
+ IntrinsicID = Intrinsic::x86_aesni_aesenc;
+ break;
+ case 'i':
+ if (!memcmp(BuiltinName+19, "mc128", 5))
+ IntrinsicID = Intrinsic::x86_aesni_aesimc;
+ break;
+ }
+ }
+ break;
case 'c':
switch (BuiltinName[16]) { // "__builtin_ia32_c"
case 'o':
@@ -11477,6 +13196,38 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
break;
}
break;
+ case 'h':
+ switch (BuiltinName[16]) { // "__builtin_ia32_h"
+ case 'a':
+ if (!memcmp(BuiltinName+17, "ddp", 3)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_haddp"
+ case 'd':
+ if (!memcmp(BuiltinName+21, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_hadd_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+21, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_hadd_ps_256;
+ break;
+ }
+ }
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+17, "ubp", 3)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_hsubp"
+ case 'd':
+ if (!memcmp(BuiltinName+21, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_hsub_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+21, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_hsub_ps_256;
+ break;
+ }
+ }
+ break;
+ }
+ break;
case 'p':
switch (BuiltinName[16]) { // "__builtin_ia32_p"
case 'a':
@@ -11620,10 +13371,6 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
if (!memcmp(BuiltinName+20, "w128", 4))
IntrinsicID = Intrinsic::x86_sse2_pmulh_w;
break;
- case 'l':
- if (!memcmp(BuiltinName+20, "d128", 4))
- IntrinsicID = Intrinsic::x86_sse41_pmulld;
- break;
}
}
break;
@@ -11727,16 +13474,84 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
if (!memcmp(BuiltinName+17, "est", 3)) {
switch (BuiltinName[20]) { // "__builtin_ia32_ptest"
case 'c':
- if (!memcmp(BuiltinName+21, "128", 3))
- IntrinsicID = Intrinsic::x86_sse41_ptestc;
+ switch (BuiltinName[21]) { // "__builtin_ia32_ptestc"
+ case '1':
+ if (!memcmp(BuiltinName+22, "28", 2))
+ IntrinsicID = Intrinsic::x86_sse41_ptestc;
+ break;
+ case '2':
+ if (!memcmp(BuiltinName+22, "56", 2))
+ IntrinsicID = Intrinsic::x86_avx_ptestc_256;
+ break;
+ }
break;
case 'z':
- if (!memcmp(BuiltinName+21, "128", 3))
- IntrinsicID = Intrinsic::x86_sse41_ptestz;
+ switch (BuiltinName[21]) { // "__builtin_ia32_ptestz"
+ case '1':
+ if (!memcmp(BuiltinName+22, "28", 2))
+ IntrinsicID = Intrinsic::x86_sse41_ptestz;
+ break;
+ case '2':
+ if (!memcmp(BuiltinName+22, "56", 2))
+ IntrinsicID = Intrinsic::x86_avx_ptestz_256;
+ break;
+ }
break;
}
}
break;
+ case 'u':
+ if (!memcmp(BuiltinName+17, "npck", 4)) {
+ switch (BuiltinName[21]) { // "__builtin_ia32_punpck"
+ case 'h':
+ switch (BuiltinName[22]) { // "__builtin_ia32_punpckh"
+ case 'b':
+ if (!memcmp(BuiltinName+23, "w", 1))
+ IntrinsicID = Intrinsic::x86_mmx_punpckhbw;
+ break;
+ case 'd':
+ if (!memcmp(BuiltinName+23, "q", 1))
+ IntrinsicID = Intrinsic::x86_mmx_punpckhdq;
+ break;
+ case 'w':
+ if (!memcmp(BuiltinName+23, "d", 1))
+ IntrinsicID = Intrinsic::x86_mmx_punpckhwd;
+ break;
+ }
+ break;
+ case 'l':
+ switch (BuiltinName[22]) { // "__builtin_ia32_punpckl"
+ case 'b':
+ if (!memcmp(BuiltinName+23, "w", 1))
+ IntrinsicID = Intrinsic::x86_mmx_punpcklbw;
+ break;
+ case 'd':
+ if (!memcmp(BuiltinName+23, "q", 1))
+ IntrinsicID = Intrinsic::x86_mmx_punpckldq;
+ break;
+ case 'w':
+ if (!memcmp(BuiltinName+23, "d", 1))
+ IntrinsicID = Intrinsic::x86_mmx_punpcklwd;
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ }
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+16, "qrtp", 4)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_sqrtp"
+ case 'd':
+ if (!memcmp(BuiltinName+21, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_sqrt_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+21, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_sqrt_ps_256;
+ break;
+ }
}
break;
case 'u':
@@ -11769,12 +13584,38 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
}
}
break;
+ case 'v':
+ if (!memcmp(BuiltinName+16, "permilp", 7)) {
+ switch (BuiltinName[23]) { // "__builtin_ia32_vpermilp"
+ case 'd':
+ IntrinsicID = Intrinsic::x86_avx_vpermil_pd;
+ break;
+ case 's':
+ IntrinsicID = Intrinsic::x86_avx_vpermil_ps;
+ break;
+ }
+ }
+ break;
}
}
break;
case 25:
if (!memcmp(BuiltinName, "__builtin_ia32_", 15)) {
switch (BuiltinName[15]) { // "__builtin_ia32_"
+ case 'b':
+ if (!memcmp(BuiltinName+16, "lendp", 5)) {
+ switch (BuiltinName[21]) { // "__builtin_ia32_blendp"
+ case 'd':
+ if (!memcmp(BuiltinName+22, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_blend_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+22, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_blend_ps_256;
+ break;
+ }
+ }
+ break;
case 'c':
if (!memcmp(BuiltinName+16, "vts", 3)) {
switch (BuiltinName[19]) { // "__builtin_ia32_cvts"
@@ -11801,11 +13642,75 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
}
}
break;
+ case 'l':
+ if (!memcmp(BuiltinName+16, "oad", 3)) {
+ switch (BuiltinName[19]) { // "__builtin_ia32_load"
+ case 'd':
+ if (!memcmp(BuiltinName+20, "qu256", 5))
+ IntrinsicID = Intrinsic::x86_avx_loadu_dq_256;
+ break;
+ case 'u':
+ if (!memcmp(BuiltinName+20, "p", 1)) {
+ switch (BuiltinName[21]) { // "__builtin_ia32_loadup"
+ case 'd':
+ if (!memcmp(BuiltinName+22, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_loadu_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+22, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_loadu_ps_256;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
case 'm':
switch (BuiltinName[16]) { // "__builtin_ia32_m"
case 'a':
- if (!memcmp(BuiltinName+17, "skmovdqu", 8))
- IntrinsicID = Intrinsic::x86_sse2_maskmov_dqu;
+ if (!memcmp(BuiltinName+17, "sk", 2)) {
+ switch (BuiltinName[19]) { // "__builtin_ia32_mask"
+ case 'l':
+ if (!memcmp(BuiltinName+20, "oadp", 4)) {
+ switch (BuiltinName[24]) { // "__builtin_ia32_maskloadp"
+ case 'd':
+ IntrinsicID = Intrinsic::x86_avx_maskload_pd;
+ break;
+ case 's':
+ IntrinsicID = Intrinsic::x86_avx_maskload_ps;
+ break;
+ }
+ }
+ break;
+ case 'm':
+ if (!memcmp(BuiltinName+20, "ovdqu", 5))
+ IntrinsicID = Intrinsic::x86_sse2_maskmov_dqu;
+ break;
+ }
+ }
+ break;
+ case 'o':
+ if (!memcmp(BuiltinName+17, "vnt", 3)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_movnt"
+ case 'd':
+ if (!memcmp(BuiltinName+21, "q256", 4))
+ IntrinsicID = Intrinsic::x86_avx_movnt_dq_256;
+ break;
+ case 'p':
+ switch (BuiltinName[21]) { // "__builtin_ia32_movntp"
+ case 'd':
+ if (!memcmp(BuiltinName+22, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_movnt_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+22, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_movnt_ps_256;
+ break;
+ }
+ break;
+ }
+ }
break;
case 'p':
if (!memcmp(BuiltinName+17, "sadbw128", 8))
@@ -11937,6 +13842,28 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
break;
}
break;
+ case 'r':
+ switch (BuiltinName[16]) { // "__builtin_ia32_r"
+ case 'o':
+ if (!memcmp(BuiltinName+17, "undp", 4)) {
+ switch (BuiltinName[21]) { // "__builtin_ia32_roundp"
+ case 'd':
+ if (!memcmp(BuiltinName+22, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_round_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+22, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_round_ps_256;
+ break;
+ }
+ }
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+17, "qrtps256", 8))
+ IntrinsicID = Intrinsic::x86_avx_rsqrt_ps_256;
+ break;
+ }
+ break;
case 's':
if (!memcmp(BuiltinName+16, "torelv4si", 9))
IntrinsicID = Intrinsic::x86_sse2_storel_dq;
@@ -11945,22 +13872,122 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
if (!memcmp(BuiltinName+16, "comisdneq", 9))
IntrinsicID = Intrinsic::x86_sse2_ucomineq_sd;
break;
+ case 'v':
+ switch (BuiltinName[16]) { // "__builtin_ia32_v"
+ case 't':
+ if (!memcmp(BuiltinName+17, "estnzcp", 7)) {
+ switch (BuiltinName[24]) { // "__builtin_ia32_vtestnzcp"
+ case 'd':
+ IntrinsicID = Intrinsic::x86_avx_vtestnzc_pd;
+ break;
+ case 's':
+ IntrinsicID = Intrinsic::x86_avx_vtestnzc_ps;
+ break;
+ }
+ }
+ break;
+ case 'z':
+ if (!memcmp(BuiltinName+17, "eroupper", 8))
+ IntrinsicID = Intrinsic::x86_avx_vzeroupper;
+ break;
+ }
+ break;
}
}
break;
case 26:
if (!memcmp(BuiltinName, "__builtin_ia32_", 15)) {
switch (BuiltinName[15]) { // "__builtin_ia32_"
- case 'c':
- if (!memcmp(BuiltinName+16, "vtts", 4)) {
- switch (BuiltinName[20]) { // "__builtin_ia32_cvtts"
+ case 'a':
+ if (!memcmp(BuiltinName+16, "ddsubp", 6)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_addsubp"
+ case 'd':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_addsub_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_addsub_ps_256;
+ break;
+ }
+ }
+ break;
+ case 'b':
+ if (!memcmp(BuiltinName+16, "lendvp", 6)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_blendvp"
case 'd':
- if (!memcmp(BuiltinName+21, "2si64", 5))
- IntrinsicID = Intrinsic::x86_sse2_cvttsd2si64;
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_blendv_pd_256;
break;
case 's':
- if (!memcmp(BuiltinName+21, "2si64", 5))
- IntrinsicID = Intrinsic::x86_sse_cvttss2si64;
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_blendv_ps_256;
+ break;
+ }
+ }
+ break;
+ case 'c':
+ if (!memcmp(BuiltinName+16, "vt", 2)) {
+ switch (BuiltinName[18]) { // "__builtin_ia32_cvt"
+ case 'd':
+ if (!memcmp(BuiltinName+19, "q2p", 3)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_cvtdq2p"
+ case 'd':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_cvtdq2_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_cvtdq2_ps_256;
+ break;
+ }
+ }
+ break;
+ case 'p':
+ switch (BuiltinName[19]) { // "__builtin_ia32_cvtp"
+ case 'd':
+ if (!memcmp(BuiltinName+20, "2", 1)) {
+ switch (BuiltinName[21]) { // "__builtin_ia32_cvtpd2"
+ case 'd':
+ if (!memcmp(BuiltinName+22, "q256", 4))
+ IntrinsicID = Intrinsic::x86_avx_cvt_pd2dq_256;
+ break;
+ case 'p':
+ if (!memcmp(BuiltinName+22, "s256", 4))
+ IntrinsicID = Intrinsic::x86_avx_cvt_pd2_ps_256;
+ break;
+ }
+ }
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+20, "2", 1)) {
+ switch (BuiltinName[21]) { // "__builtin_ia32_cvtps2"
+ case 'd':
+ if (!memcmp(BuiltinName+22, "q256", 4))
+ IntrinsicID = Intrinsic::x86_avx_cvt_ps2dq_256;
+ break;
+ case 'p':
+ if (!memcmp(BuiltinName+22, "d256", 4))
+ IntrinsicID = Intrinsic::x86_avx_cvt_ps2_pd_256;
+ break;
+ }
+ }
+ break;
+ }
+ break;
+ case 't':
+ if (!memcmp(BuiltinName+19, "s", 1)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_cvtts"
+ case 'd':
+ if (!memcmp(BuiltinName+21, "2si64", 5))
+ IntrinsicID = Intrinsic::x86_sse2_cvttsd2si64;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+21, "2si64", 5))
+ IntrinsicID = Intrinsic::x86_sse_cvttss2si64;
+ break;
+ }
+ }
break;
}
}
@@ -11969,6 +13996,36 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
if (!memcmp(BuiltinName+16, "nsertps128", 10))
IntrinsicID = Intrinsic::x86_sse41_insertps;
break;
+ case 'm':
+ switch (BuiltinName[16]) { // "__builtin_ia32_m"
+ case 'a':
+ if (!memcmp(BuiltinName+17, "skstorep", 8)) {
+ switch (BuiltinName[25]) { // "__builtin_ia32_maskstorep"
+ case 'd':
+ IntrinsicID = Intrinsic::x86_avx_maskstore_pd;
+ break;
+ case 's':
+ IntrinsicID = Intrinsic::x86_avx_maskstore_ps;
+ break;
+ }
+ }
+ break;
+ case 'o':
+ if (!memcmp(BuiltinName+17, "vmskp", 5)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_movmskp"
+ case 'd':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_movmsk_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_movmsk_ps_256;
+ break;
+ }
+ }
+ break;
+ }
+ break;
case 'p':
switch (BuiltinName[16]) { // "__builtin_ia32_p"
case 'a':
@@ -12104,17 +14161,99 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
}
break;
case 't':
- if (!memcmp(BuiltinName+17, "estnzc128", 9))
- IntrinsicID = Intrinsic::x86_sse41_ptestnzc;
+ if (!memcmp(BuiltinName+17, "estnzc", 6)) {
+ switch (BuiltinName[23]) { // "__builtin_ia32_ptestnzc"
+ case '1':
+ if (!memcmp(BuiltinName+24, "28", 2))
+ IntrinsicID = Intrinsic::x86_sse41_ptestnzc;
+ break;
+ case '2':
+ if (!memcmp(BuiltinName+24, "56", 2))
+ IntrinsicID = Intrinsic::x86_avx_ptestnzc_256;
+ break;
+ }
+ }
break;
}
break;
+ case 's':
+ if (!memcmp(BuiltinName+16, "tore", 4)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_store"
+ case 'd':
+ if (!memcmp(BuiltinName+21, "qu256", 5))
+ IntrinsicID = Intrinsic::x86_avx_storeu_dq_256;
+ break;
+ case 'u':
+ if (!memcmp(BuiltinName+21, "p", 1)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_storeup"
+ case 'd':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_storeu_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_storeu_ps_256;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ case 'v':
+ if (!memcmp(BuiltinName+16, "test", 4)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_vtest"
+ case 'c':
+ if (!memcmp(BuiltinName+21, "p", 1)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_vtestcp"
+ case 'd':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vtestc_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vtestc_ps_256;
+ break;
+ }
+ }
+ break;
+ case 'z':
+ if (!memcmp(BuiltinName+21, "p", 1)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_vtestzp"
+ case 'd':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vtestz_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+23, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vtestz_ps_256;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
}
}
break;
case 27:
if (!memcmp(BuiltinName, "__builtin_ia32_", 15)) {
switch (BuiltinName[15]) { // "__builtin_ia32_"
+ case 'c':
+ if (!memcmp(BuiltinName+16, "vttp", 4)) {
+ switch (BuiltinName[20]) { // "__builtin_ia32_cvttp"
+ case 'd':
+ if (!memcmp(BuiltinName+21, "2dq256", 6))
+ IntrinsicID = Intrinsic::x86_avx_cvtt_pd2dq_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+21, "2dq256", 6))
+ IntrinsicID = Intrinsic::x86_avx_cvtt_ps2dq_256;
+ break;
+ }
+ }
+ break;
case 'e':
if (!memcmp(BuiltinName+16, "xtractps128", 11))
IntrinsicID = Intrinsic::x86_sse41_extractps;
@@ -12161,87 +14300,341 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
break;
}
break;
- }
- }
- break;
- case 28:
- if (!memcmp(BuiltinName, "__builtin_ia32_p", 16)) {
- switch (BuiltinName[16]) { // "__builtin_ia32_p"
- case 'c':
- if (!memcmp(BuiltinName+17, "mp", 2)) {
- switch (BuiltinName[19]) { // "__builtin_ia32_pcmp"
- case 'e':
- if (!memcmp(BuiltinName+20, "stri", 4)) {
- switch (BuiltinName[24]) { // "__builtin_ia32_pcmpestri"
- case 'a':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpestria128;
- break;
- case 'c':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpestric128;
- break;
- case 'o':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpestrio128;
+ case 'v':
+ switch (BuiltinName[16]) { // "__builtin_ia32_v"
+ case 'b':
+ if (!memcmp(BuiltinName+17, "roadcastss", 10))
+ IntrinsicID = Intrinsic::x86_avx_vbroadcastss;
+ break;
+ case 'e':
+ if (!memcmp(BuiltinName+17, "c_ext_v2si", 10))
+ IntrinsicID = Intrinsic::x86_mmx_vec_ext_d;
+ break;
+ case 'p':
+ if (!memcmp(BuiltinName+17, "ermil", 5)) {
+ switch (BuiltinName[22]) { // "__builtin_ia32_vpermil"
+ case 'p':
+ switch (BuiltinName[23]) { // "__builtin_ia32_vpermilp"
+ case 'd':
+ if (!memcmp(BuiltinName+24, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vpermil_pd_256;
break;
case 's':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpestris128;
- break;
- case 'z':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpestriz128;
+ if (!memcmp(BuiltinName+24, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vpermil_ps_256;
break;
}
+ break;
+ case 'v':
+ if (!memcmp(BuiltinName+23, "arp", 3)) {
+ switch (BuiltinName[26]) { // "__builtin_ia32_vpermilvarp"
+ case 'd':
+ IntrinsicID = Intrinsic::x86_avx_vpermilvar_pd;
+ break;
+ case 's':
+ IntrinsicID = Intrinsic::x86_avx_vpermilvar_ps;
+ break;
+ }
+ }
+ break;
}
+ }
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ case 28:
+ if (!memcmp(BuiltinName, "__builtin_ia32_", 15)) {
+ switch (BuiltinName[15]) { // "__builtin_ia32_"
+ case 'a':
+ if (!memcmp(BuiltinName+16, "es", 2)) {
+ switch (BuiltinName[18]) { // "__builtin_ia32_aes"
+ case 'd':
+ if (!memcmp(BuiltinName+19, "eclast128", 9))
+ IntrinsicID = Intrinsic::x86_aesni_aesdeclast;
break;
- case 'i':
- if (!memcmp(BuiltinName+20, "stri", 4)) {
- switch (BuiltinName[24]) { // "__builtin_ia32_pcmpistri"
- case 'a':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpistria128;
- break;
- case 'c':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpistric128;
- break;
- case 'o':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpistrio128;
- break;
- case 's':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpistris128;
- break;
- case 'z':
- if (!memcmp(BuiltinName+25, "128", 3))
- IntrinsicID = Intrinsic::x86_sse42_pcmpistriz128;
- break;
+ case 'e':
+ if (!memcmp(BuiltinName+19, "nclast128", 9))
+ IntrinsicID = Intrinsic::x86_aesni_aesenclast;
+ break;
+ }
+ }
+ break;
+ case 'm':
+ if (!memcmp(BuiltinName+16, "askloadp", 8)) {
+ switch (BuiltinName[24]) { // "__builtin_ia32_maskloadp"
+ case 'd':
+ if (!memcmp(BuiltinName+25, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_maskload_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+25, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_maskload_ps_256;
+ break;
+ }
+ }
+ break;
+ case 'p':
+ switch (BuiltinName[16]) { // "__builtin_ia32_p"
+ case 'c':
+ if (!memcmp(BuiltinName+17, "mp", 2)) {
+ switch (BuiltinName[19]) { // "__builtin_ia32_pcmp"
+ case 'e':
+ if (!memcmp(BuiltinName+20, "stri", 4)) {
+ switch (BuiltinName[24]) { // "__builtin_ia32_pcmpestri"
+ case 'a':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpestria128;
+ break;
+ case 'c':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpestric128;
+ break;
+ case 'o':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpestrio128;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpestris128;
+ break;
+ case 'z':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpestriz128;
+ break;
+ }
}
+ break;
+ case 'i':
+ if (!memcmp(BuiltinName+20, "stri", 4)) {
+ switch (BuiltinName[24]) { // "__builtin_ia32_pcmpistri"
+ case 'a':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpistria128;
+ break;
+ case 'c':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpistric128;
+ break;
+ case 'o':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpistrio128;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpistris128;
+ break;
+ case 'z':
+ if (!memcmp(BuiltinName+25, "128", 3))
+ IntrinsicID = Intrinsic::x86_sse42_pcmpistriz128;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ case 'h':
+ if (!memcmp(BuiltinName+17, "minposuw128", 11))
+ IntrinsicID = Intrinsic::x86_sse41_phminposuw;
+ break;
+ }
+ break;
+ case 'v':
+ switch (BuiltinName[16]) { // "__builtin_ia32_v"
+ case 'e':
+ if (!memcmp(BuiltinName+17, "c_init_v", 8)) {
+ switch (BuiltinName[25]) { // "__builtin_ia32_vec_init_v"
+ case '2':
+ if (!memcmp(BuiltinName+26, "si", 2))
+ IntrinsicID = Intrinsic::x86_mmx_vec_init_d;
+ break;
+ case '4':
+ if (!memcmp(BuiltinName+26, "hi", 2))
+ IntrinsicID = Intrinsic::x86_mmx_vec_init_w;
+ break;
+ case '8':
+ if (!memcmp(BuiltinName+26, "qi", 2))
+ IntrinsicID = Intrinsic::x86_mmx_vec_init_b;
+ break;
+ }
+ }
+ break;
+ case 't':
+ if (!memcmp(BuiltinName+17, "estnzcp", 7)) {
+ switch (BuiltinName[24]) { // "__builtin_ia32_vtestnzcp"
+ case 'd':
+ if (!memcmp(BuiltinName+25, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vtestnzc_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+25, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vtestnzc_ps_256;
+ break;
}
+ }
+ break;
+ }
+ break;
+ }
+ }
+ break;
+ case 29:
+ if (!memcmp(BuiltinName, "__builtin_ia32_maskstorep", 25)) {
+ switch (BuiltinName[25]) { // "__builtin_ia32_maskstorep"
+ case 'd':
+ if (!memcmp(BuiltinName+26, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_maskstore_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+26, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_maskstore_ps_256;
+ break;
+ }
+ }
+ break;
+ case 30:
+ if (!memcmp(BuiltinName, "__builtin_ia32_v", 16)) {
+ switch (BuiltinName[16]) { // "__builtin_ia32_v"
+ case 'b':
+ if (!memcmp(BuiltinName+17, "roadcasts", 9)) {
+ switch (BuiltinName[26]) { // "__builtin_ia32_vbroadcasts"
+ case 'd':
+ if (!memcmp(BuiltinName+27, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vbroadcast_sd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+27, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vbroadcastss_256;
break;
}
}
break;
- case 'h':
- if (!memcmp(BuiltinName+17, "minposuw128", 11))
- IntrinsicID = Intrinsic::x86_sse41_phminposuw;
+ case 'p':
+ if (!memcmp(BuiltinName+17, "ermilvarp", 9)) {
+ switch (BuiltinName[26]) { // "__builtin_ia32_vpermilvarp"
+ case 'd':
+ if (!memcmp(BuiltinName+27, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vpermilvar_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+27, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vpermilvar_ps_256;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ break;
+ case 31:
+ if (!memcmp(BuiltinName, "__builtin_ia32_vperm2f128_", 26)) {
+ switch (BuiltinName[26]) { // "__builtin_ia32_vperm2f128_"
+ case 'p':
+ switch (BuiltinName[27]) { // "__builtin_ia32_vperm2f128_p"
+ case 'd':
+ if (!memcmp(BuiltinName+28, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vperm2f128_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+28, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vperm2f128_ps_256;
+ break;
+ }
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+27, "i256", 4))
+ IntrinsicID = Intrinsic::x86_avx_vperm2f128_si_256;
+ break;
+ }
+ }
+ break;
+ case 32:
+ if (!memcmp(BuiltinName, "__builtin_ia32_vinsertf128_", 27)) {
+ switch (BuiltinName[27]) { // "__builtin_ia32_vinsertf128_"
+ case 'p':
+ switch (BuiltinName[28]) { // "__builtin_ia32_vinsertf128_p"
+ case 'd':
+ if (!memcmp(BuiltinName+29, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vinsertf128_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+29, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vinsertf128_ps_256;
+ break;
+ }
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+28, "i256", 4))
+ IntrinsicID = Intrinsic::x86_avx_vinsertf128_si_256;
+ break;
+ }
+ }
+ break;
+ case 33:
+ if (!memcmp(BuiltinName, "__builtin_ia32_", 15)) {
+ switch (BuiltinName[15]) { // "__builtin_ia32_"
+ case 'a':
+ if (!memcmp(BuiltinName+16, "eskeygenassist128", 17))
+ IntrinsicID = Intrinsic::x86_aesni_aeskeygenassist;
+ break;
+ case 'v':
+ if (!memcmp(BuiltinName+16, "extractf128_", 12)) {
+ switch (BuiltinName[28]) { // "__builtin_ia32_vextractf128_"
+ case 'p':
+ switch (BuiltinName[29]) { // "__builtin_ia32_vextractf128_p"
+ case 'd':
+ if (!memcmp(BuiltinName+30, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vextractf128_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+30, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vextractf128_ps_256;
+ break;
+ }
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+29, "i256", 4))
+ IntrinsicID = Intrinsic::x86_avx_vextractf128_si_256;
+ break;
+ }
+ }
break;
}
}
break;
case 35:
- if (!memcmp(BuiltinName, "__builtin_ia32_ps", 17)) {
- switch (BuiltinName[17]) { // "__builtin_ia32_ps"
- case 'l':
- if (!memcmp(BuiltinName+18, "ldqi128_byteshift", 17))
- IntrinsicID = Intrinsic::x86_sse2_psll_dq_bs;
+ if (!memcmp(BuiltinName, "__builtin_ia32_", 15)) {
+ switch (BuiltinName[15]) { // "__builtin_ia32_"
+ case 'p':
+ if (!memcmp(BuiltinName+16, "s", 1)) {
+ switch (BuiltinName[17]) { // "__builtin_ia32_ps"
+ case 'l':
+ if (!memcmp(BuiltinName+18, "ldqi128_byteshift", 17))
+ IntrinsicID = Intrinsic::x86_sse2_psll_dq_bs;
+ break;
+ case 'r':
+ if (!memcmp(BuiltinName+18, "ldqi128_byteshift", 17))
+ IntrinsicID = Intrinsic::x86_sse2_psrl_dq_bs;
+ break;
+ }
+ }
break;
- case 'r':
- if (!memcmp(BuiltinName+18, "ldqi128_byteshift", 17))
- IntrinsicID = Intrinsic::x86_sse2_psrl_dq_bs;
+ case 'v':
+ if (!memcmp(BuiltinName+16, "broadcastf128_p", 15)) {
+ switch (BuiltinName[31]) { // "__builtin_ia32_vbroadcastf128_p"
+ case 'd':
+ if (!memcmp(BuiltinName+32, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vbroadcastf128_pd_256;
+ break;
+ case 's':
+ if (!memcmp(BuiltinName+32, "256", 3))
+ IntrinsicID = Intrinsic::x86_avx_vbroadcastf128_ps_256;
+ break;
+ }
+ }
break;
}
}
@@ -12252,3 +14645,8 @@ Intrinsic::ID Intrinsic::getIntrinsicForGCCBuiltin(const char *TargetPrefix, con
}
#endif
+#if defined(_MSC_VER) && defined(setjmp_undefined_for_visual_studio)
+// let's return it to _setjmp state
+#define setjmp _setjmp
+#endif
+
diff --git a/libclamav/c++/llvm/include/llvm/Intrinsics.td b/libclamav/c++/llvm/include/llvm/Intrinsics.td
index 3a0da9c..fb4f750 100644
--- a/libclamav/c++/llvm/include/llvm/Intrinsics.td
+++ b/libclamav/c++/llvm/include/llvm/Intrinsics.td
@@ -19,10 +19,11 @@ include "llvm/CodeGen/ValueTypes.td"
class IntrinsicProperty;
-// Intr*Mem - Memory properties. An intrinsic is allowed to have exactly one of
+// Intr*Mem - Memory properties. An intrinsic is allowed to have at most one of
// these properties set. They are listed from the most aggressive (best to use
// if correct) to the least aggressive. If no property is set, the worst case
-// is assumed (IntrWriteMem).
+// is assumed (it may read and write any memory it can get access to and it may
+// have other side effects).
// IntrNoMem - The intrinsic does not access memory or have any other side
// effects. It may be CSE'd deleted if dead, etc.
@@ -37,15 +38,11 @@ def IntrReadArgMem : IntrinsicProperty;
// deleted if dead.
def IntrReadMem : IntrinsicProperty;
-// IntrWriteArgMem - This intrinsic reads and writes only from memory that one
-// of its arguments points to, but may access an unspecified amount. The reads
-// and writes may be volatile, but except for this it has no other side effects.
-def IntrWriteArgMem : IntrinsicProperty;
-
-// IntrWriteMem - This intrinsic may read or modify unspecified memory or has
-// other side effects. It cannot be modified by the optimizer. This is the
-// default if the intrinsic has no other Intr*Mem property.
-def IntrWriteMem : IntrinsicProperty;
+// IntrReadWriteArgMem - This intrinsic reads and writes only from memory that
+// one of its arguments points to, but may access an unspecified amount. The
+// reads and writes may be volatile, but except for this it has no other side
+// effects.
+def IntrReadWriteArgMem : IntrinsicProperty;
// Commutative - This intrinsic is commutative: X op Y == Y op X.
def Commutative : IntrinsicProperty;
@@ -117,7 +114,7 @@ def llvm_v4i8_ty : LLVMType<v4i8>; // 4 x i8
def llvm_v8i8_ty : LLVMType<v8i8>; // 8 x i8
def llvm_v16i8_ty : LLVMType<v16i8>; // 16 x i8
def llvm_v32i8_ty : LLVMType<v32i8>; // 32 x i8
-def llvm_v2i16_ty : LLVMType<v2i16>; // 4 x i16
+def llvm_v2i16_ty : LLVMType<v2i16>; // 2 x i16
def llvm_v4i16_ty : LLVMType<v4i16>; // 4 x i16
def llvm_v8i16_ty : LLVMType<v8i16>; // 8 x i16
def llvm_v16i16_ty : LLVMType<v16i16>; // 16 x i16
@@ -176,21 +173,21 @@ class GCCBuiltin<string name> {
//===--------------- Variable Argument Handling Intrinsics ----------------===//
//
-def int_vastart : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [], "llvm.va_start">;
-def int_vacopy : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_ptr_ty], [],
+def int_vastart : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_start">;
+def int_vacopy : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], [],
"llvm.va_copy">;
-def int_vaend : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [], "llvm.va_end">;
+def int_vaend : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_end">;
//===------------------- Garbage Collection Intrinsics --------------------===//
//
-def int_gcroot : Intrinsic<[llvm_void_ty],
+def int_gcroot : Intrinsic<[],
[llvm_ptrptr_ty, llvm_ptr_ty]>;
def int_gcread : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty, llvm_ptrptr_ty],
[IntrReadArgMem]>;
-def int_gcwrite : Intrinsic<[llvm_void_ty],
+def int_gcwrite : Intrinsic<[],
[llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty],
- [IntrWriteArgMem, NoCapture<1>, NoCapture<2>]>;
+ [IntrReadWriteArgMem, NoCapture<1>, NoCapture<2>]>;
//===--------------------- Code Generator Intrinsics ----------------------===//
//
@@ -201,40 +198,38 @@ def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
// model their dependencies on allocas.
def int_stacksave : Intrinsic<[llvm_ptr_ty]>,
GCCBuiltin<"__builtin_stack_save">;
-def int_stackrestore : Intrinsic<[llvm_void_ty], [llvm_ptr_ty]>,
+def int_stackrestore : Intrinsic<[], [llvm_ptr_ty]>,
GCCBuiltin<"__builtin_stack_restore">;
-// IntrWriteArgMem is more pessimistic than strictly necessary for prefetch,
+// IntrReadWriteArgMem is more pessimistic than strictly necessary for prefetch,
// however it does conveniently prevent the prefetch from being reordered
// with respect to nearby accesses to the same memory.
-def int_prefetch : Intrinsic<[llvm_void_ty],
+def int_prefetch : Intrinsic<[],
[llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrWriteArgMem, NoCapture<0>]>;
-def int_pcmarker : Intrinsic<[llvm_void_ty], [llvm_i32_ty]>;
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+def int_pcmarker : Intrinsic<[], [llvm_i32_ty]>;
def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>;
// Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
// guard to the correct place on the stack frame.
-def int_stackprotector : Intrinsic<[llvm_void_ty],
- [llvm_ptr_ty, llvm_ptrptr_ty],
- [IntrWriteMem]>;
+def int_stackprotector : Intrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>;
//===------------------- Standard C Library Intrinsics --------------------===//
//
-def int_memcpy : Intrinsic<[llvm_void_ty],
- [llvm_ptr_ty, llvm_ptr_ty, llvm_anyint_ty,
- llvm_i32_ty],
- [IntrWriteArgMem, NoCapture<0>, NoCapture<1>]>;
-def int_memmove : Intrinsic<[llvm_void_ty],
- [llvm_ptr_ty, llvm_ptr_ty, llvm_anyint_ty,
- llvm_i32_ty],
- [IntrWriteArgMem, NoCapture<0>, NoCapture<1>]>;
-def int_memset : Intrinsic<[llvm_void_ty],
- [llvm_ptr_ty, llvm_i8_ty, llvm_anyint_ty,
- llvm_i32_ty],
- [IntrWriteArgMem, NoCapture<0>]>;
+def int_memcpy : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
+ llvm_i32_ty, llvm_i1_ty],
+ [IntrReadWriteArgMem, NoCapture<0>, NoCapture<1>]>;
+def int_memmove : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
+ llvm_i32_ty, llvm_i1_ty],
+ [IntrReadWriteArgMem, NoCapture<0>, NoCapture<1>]>;
+def int_memset : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty,
+ llvm_i32_ty, llvm_i1_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
// These functions do not actually read memory, but they are sensitive to the
// rounding mode. This needs to be modelled separately; in the meantime
@@ -255,9 +250,9 @@ let Properties = [IntrReadMem] in {
// NOTE: these are internal interfaces.
def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
-def int_longjmp : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_i32_ty]>;
+def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>;
def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>;
-def int_siglongjmp : Intrinsic<[llvm_void_ty], [llvm_ptr_ty, llvm_i32_ty]>;
+def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>;
// Internal interface for object size checking
def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i1_ty],
@@ -282,9 +277,9 @@ let Properties = [IntrNoMem] in {
// optimizers can change them aggressively. Special handling needed in a few
// places.
let Properties = [IntrNoMem] in {
- def int_dbg_declare : Intrinsic<[llvm_void_ty],
+ def int_dbg_declare : Intrinsic<[],
[llvm_metadata_ty, llvm_metadata_ty]>;
- def int_dbg_value : Intrinsic<[llvm_void_ty],
+ def int_dbg_value : Intrinsic<[],
[llvm_metadata_ty, llvm_i64_ty,
llvm_metadata_ty]>;
}
@@ -297,24 +292,24 @@ def int_eh_selector : Intrinsic<[llvm_i32_ty],
def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
-def int_eh_return_i32 : Intrinsic<[llvm_void_ty], [llvm_i32_ty, llvm_ptr_ty]>;
-def int_eh_return_i64 : Intrinsic<[llvm_void_ty], [llvm_i64_ty, llvm_ptr_ty]>;
+def int_eh_return_i32 : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty]>;
+def int_eh_return_i64 : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty]>;
-def int_eh_unwind_init: Intrinsic<[llvm_void_ty]>,
+def int_eh_unwind_init: Intrinsic<[]>,
GCCBuiltin<"__builtin_unwind_init">;
def int_eh_dwarf_cfa : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty]>;
let Properties = [IntrNoMem] in {
- def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
- def int_eh_sjlj_longjmp : Intrinsic<[llvm_void_ty], [llvm_ptr_ty]>;
def int_eh_sjlj_lsda : Intrinsic<[llvm_ptr_ty]>;
- def int_eh_sjlj_callsite: Intrinsic<[llvm_void_ty], [llvm_i32_ty]>;
+ def int_eh_sjlj_callsite: Intrinsic<[], [llvm_i32_ty]>;
}
+def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
+def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>;
//===---------------- Generic Variable Attribute Intrinsics----------------===//
//
-def int_var_annotation : Intrinsic<[llvm_void_ty],
+def int_var_annotation : Intrinsic<[],
[llvm_ptr_ty, llvm_ptr_ty,
llvm_ptr_ty, llvm_i32_ty],
[], "llvm.var.annotation">;
@@ -331,7 +326,7 @@ def int_annotation : Intrinsic<[llvm_anyint_ty],
//
def int_init_trampoline : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
- [IntrWriteArgMem]>,
+ [IntrReadWriteArgMem]>,
GCCBuiltin<"__builtin_init_trampoline">;
//===------------------------ Overflow Intrinsics -------------------------===//
@@ -361,7 +356,7 @@ def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
//===------------------------- Atomic Intrinsics --------------------------===//
//
-def int_memory_barrier : Intrinsic<[llvm_void_ty],
+def int_memory_barrier : Intrinsic<[],
[llvm_i1_ty, llvm_i1_ty,
llvm_i1_ty, llvm_i1_ty, llvm_i1_ty], []>,
GCCBuiltin<"__builtin_llvm_memory_barrier">;
@@ -369,87 +364,95 @@ def int_memory_barrier : Intrinsic<[llvm_void_ty],
def int_atomic_cmp_swap : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_val_compare_and_swap">;
def int_atomic_load_add : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_add">;
def int_atomic_swap : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_lock_test_and_set">;
def int_atomic_load_sub : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_sub">;
def int_atomic_load_and : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_and">;
def int_atomic_load_or : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_or">;
def int_atomic_load_xor : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_xor">;
def int_atomic_load_nand : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_nand">;
def int_atomic_load_min : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_min">;
def int_atomic_load_max : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_max">;
def int_atomic_load_umin : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_umin">;
def int_atomic_load_umax : Intrinsic<[llvm_anyint_ty],
[LLVMAnyPointerType<LLVMMatchType<0>>,
LLVMMatchType<0>],
- [IntrWriteArgMem, NoCapture<0>]>,
+ [IntrReadWriteArgMem, NoCapture<0>]>,
GCCBuiltin<"__sync_fetch_and_umax">;
//===------------------------- Memory Use Markers -------------------------===//
//
-def int_lifetime_start : Intrinsic<[llvm_void_ty],
+def int_lifetime_start : Intrinsic<[],
[llvm_i64_ty, llvm_ptr_ty],
- [IntrWriteArgMem, NoCapture<1>]>;
-def int_lifetime_end : Intrinsic<[llvm_void_ty],
+ [IntrReadWriteArgMem, NoCapture<1>]>;
+def int_lifetime_end : Intrinsic<[],
[llvm_i64_ty, llvm_ptr_ty],
- [IntrWriteArgMem, NoCapture<1>]>;
+ [IntrReadWriteArgMem, NoCapture<1>]>;
def int_invariant_start : Intrinsic<[llvm_descriptor_ty],
[llvm_i64_ty, llvm_ptr_ty],
[IntrReadArgMem, NoCapture<1>]>;
-def int_invariant_end : Intrinsic<[llvm_void_ty],
+def int_invariant_end : Intrinsic<[],
[llvm_descriptor_ty, llvm_i64_ty,
llvm_ptr_ty],
- [IntrWriteArgMem, NoCapture<2>]>;
+ [IntrReadWriteArgMem, NoCapture<2>]>;
//===-------------------------- Other Intrinsics --------------------------===//
//
def int_flt_rounds : Intrinsic<[llvm_i32_ty]>,
GCCBuiltin<"__builtin_flt_rounds">;
-def int_trap : Intrinsic<[llvm_void_ty]>,
+def int_trap : Intrinsic<[]>,
GCCBuiltin<"__builtin_trap">;
+// Intrisics to support half precision floating point format
+let Properties = [IntrNoMem] in {
+def int_convert_to_fp16 : Intrinsic<[llvm_i16_ty], [llvm_float_ty]>,
+ GCCBuiltin<"__gnu_f2h_ieee">;
+def int_convert_from_fp16 : Intrinsic<[llvm_float_ty], [llvm_i16_ty]>,
+ GCCBuiltin<"__gnu_h2f_ieee">;
+}
+
// These convert intrinsics are to support various conversions between
// various types with rounding and saturation. NOTE: avoid using these
// intrinsics as they might be removed sometime in the future and
diff --git a/libclamav/c++/llvm/include/llvm/IntrinsicsARM.td b/libclamav/c++/llvm/include/llvm/IntrinsicsARM.td
index c408a2f..6c04771 100644
--- a/libclamav/c++/llvm/include/llvm/IntrinsicsARM.td
+++ b/libclamav/c++/llvm/include/llvm/IntrinsicsARM.td
@@ -21,6 +21,35 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
}
//===----------------------------------------------------------------------===//
+// Saturating Arithmentic
+
+let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
+ def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// VFP
+
+let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
+ def int_arm_get_fpscr : GCCBuiltin<"__builtin_arm_get_fpscr">,
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+ def int_arm_set_fpscr : GCCBuiltin<"__builtin_arm_set_fpscr">,
+ Intrinsic<[], [llvm_i32_ty], []>;
+ def int_arm_vcvtr : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+ [IntrNoMem]>;
+ def int_arm_vcvtru : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+ [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON)
let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
@@ -31,9 +60,6 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
class Neon_1Arg_Narrow_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMExtendedElementVectorType<0>], [IntrNoMem]>;
- class Neon_1Arg_Long_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMTruncatedElementVectorType<0>], [IntrNoMem]>;
class Neon_2Arg_Intrinsic
: Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem]>;
@@ -47,10 +73,6 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
[LLVMTruncatedElementVectorType<0>,
LLVMTruncatedElementVectorType<0>],
[IntrNoMem]>;
- class Neon_2Arg_Wide_Intrinsic
- : Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMTruncatedElementVectorType<0>],
- [IntrNoMem]>;
class Neon_3Arg_Intrinsic
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
@@ -102,25 +124,13 @@ let Properties = [IntrNoMem, Commutative] in {
def int_arm_neon_vqaddu : Neon_2Arg_Intrinsic;
def int_arm_neon_vaddhn : Neon_2Arg_Narrow_Intrinsic;
def int_arm_neon_vraddhn : Neon_2Arg_Narrow_Intrinsic;
- def int_arm_neon_vaddls : Neon_2Arg_Long_Intrinsic;
- def int_arm_neon_vaddlu : Neon_2Arg_Long_Intrinsic;
- def int_arm_neon_vaddws : Neon_2Arg_Wide_Intrinsic;
- def int_arm_neon_vaddwu : Neon_2Arg_Wide_Intrinsic;
// Vector Multiply.
def int_arm_neon_vmulp : Neon_2Arg_Intrinsic;
def int_arm_neon_vqdmulh : Neon_2Arg_Intrinsic;
def int_arm_neon_vqrdmulh : Neon_2Arg_Intrinsic;
- def int_arm_neon_vmulls : Neon_2Arg_Long_Intrinsic;
- def int_arm_neon_vmullu : Neon_2Arg_Long_Intrinsic;
def int_arm_neon_vmullp : Neon_2Arg_Long_Intrinsic;
def int_arm_neon_vqdmull : Neon_2Arg_Long_Intrinsic;
-
- // Vector Multiply and Accumulate/Subtract.
- def int_arm_neon_vmlals : Neon_3Arg_Long_Intrinsic;
- def int_arm_neon_vmlalu : Neon_3Arg_Long_Intrinsic;
- def int_arm_neon_vmlsls : Neon_3Arg_Long_Intrinsic;
- def int_arm_neon_vmlslu : Neon_3Arg_Long_Intrinsic;
def int_arm_neon_vqdmlal : Neon_3Arg_Long_Intrinsic;
def int_arm_neon_vqdmlsl : Neon_3Arg_Long_Intrinsic;
@@ -146,10 +156,6 @@ def int_arm_neon_vqsubs : Neon_2Arg_Intrinsic;
def int_arm_neon_vqsubu : Neon_2Arg_Intrinsic;
def int_arm_neon_vsubhn : Neon_2Arg_Narrow_Intrinsic;
def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic;
-def int_arm_neon_vsubls : Neon_2Arg_Long_Intrinsic;
-def int_arm_neon_vsublu : Neon_2Arg_Long_Intrinsic;
-def int_arm_neon_vsubws : Neon_2Arg_Wide_Intrinsic;
-def int_arm_neon_vsubwu : Neon_2Arg_Wide_Intrinsic;
// Vector Absolute Compare.
let TargetPrefix = "arm" in {
@@ -170,14 +176,6 @@ let TargetPrefix = "arm" in {
// Vector Absolute Differences.
def int_arm_neon_vabds : Neon_2Arg_Intrinsic;
def int_arm_neon_vabdu : Neon_2Arg_Intrinsic;
-def int_arm_neon_vabdls : Neon_2Arg_Long_Intrinsic;
-def int_arm_neon_vabdlu : Neon_2Arg_Long_Intrinsic;
-
-// Vector Absolute Difference and Accumulate.
-def int_arm_neon_vabas : Neon_3Arg_Intrinsic;
-def int_arm_neon_vabau : Neon_3Arg_Intrinsic;
-def int_arm_neon_vabals : Neon_3Arg_Long_Intrinsic;
-def int_arm_neon_vabalu : Neon_3Arg_Long_Intrinsic;
// Vector Pairwise Add.
def int_arm_neon_vpadd : Neon_2Arg_Intrinsic;
@@ -288,13 +286,10 @@ def int_arm_neon_vcvtfp2fxu : Neon_CvtFPToFx_Intrinsic;
def int_arm_neon_vcvtfxs2fp : Neon_CvtFxToFP_Intrinsic;
def int_arm_neon_vcvtfxu2fp : Neon_CvtFxToFP_Intrinsic;
-// Narrowing and Lengthening Vector Moves.
-def int_arm_neon_vmovn : Neon_1Arg_Narrow_Intrinsic;
+// Narrowing Saturating Vector Moves.
def int_arm_neon_vqmovns : Neon_1Arg_Narrow_Intrinsic;
def int_arm_neon_vqmovnu : Neon_1Arg_Narrow_Intrinsic;
def int_arm_neon_vqmovnsu : Neon_1Arg_Narrow_Intrinsic;
-def int_arm_neon_vmovls : Neon_1Arg_Long_Intrinsic;
-def int_arm_neon_vmovlu : Neon_1Arg_Long_Intrinsic;
// Vector Table Lookup.
// The first 1-4 arguments are the table.
@@ -315,62 +310,76 @@ def int_arm_neon_vtbx4 : Neon_Tbl6Arg_Intrinsic;
let TargetPrefix = "arm" in {
// De-interleaving vector loads from N-element structures.
+ // Source operands are the address and alignment.
def int_arm_neon_vld1 : Intrinsic<[llvm_anyvector_ty],
- [llvm_ptr_ty], [IntrReadArgMem]>;
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
def int_arm_neon_vld2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
- [llvm_ptr_ty], [IntrReadArgMem]>;
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
def int_arm_neon_vld3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
LLVMMatchType<0>],
- [llvm_ptr_ty], [IntrReadArgMem]>;
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
def int_arm_neon_vld4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
LLVMMatchType<0>, LLVMMatchType<0>],
- [llvm_ptr_ty], [IntrReadArgMem]>;
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
// Vector load N-element structure to one lane.
+ // Source operands are: the address, the N input vectors (since only one
+ // lane is assigned), the lane number, and the alignment.
def int_arm_neon_vld2lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
[llvm_ptr_ty, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrReadArgMem]>;
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadArgMem]>;
def int_arm_neon_vld3lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
LLVMMatchType<0>],
[llvm_ptr_ty, LLVMMatchType<0>,
LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i32_ty], [IntrReadArgMem]>;
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
def int_arm_neon_vld4lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
LLVMMatchType<0>, LLVMMatchType<0>],
[llvm_ptr_ty, LLVMMatchType<0>,
LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrReadArgMem]>;
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadArgMem]>;
// Interleaving vector stores from N-element structures.
- def int_arm_neon_vst1 : Intrinsic<[llvm_void_ty],
- [llvm_ptr_ty, llvm_anyvector_ty],
- [IntrWriteArgMem]>;
- def int_arm_neon_vst2 : Intrinsic<[llvm_void_ty],
+ // Source operands are: the address, the N vectors, and the alignment.
+ def int_arm_neon_vst1 : Intrinsic<[],
[llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>], [IntrWriteArgMem]>;
- def int_arm_neon_vst3 : Intrinsic<[llvm_void_ty],
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_arm_neon_vst2 : Intrinsic<[],
[llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrWriteArgMem]>;
- def int_arm_neon_vst4 : Intrinsic<[llvm_void_ty],
+ LLVMMatchType<0>, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_arm_neon_vst3 : Intrinsic<[],
+ [llvm_ptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_arm_neon_vst4 : Intrinsic<[],
[llvm_ptr_ty, llvm_anyvector_ty,
LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>], [IntrWriteArgMem]>;
+ LLVMMatchType<0>, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
// Vector store N-element structure from one lane.
- def int_arm_neon_vst2lane : Intrinsic<[llvm_void_ty],
+ // Source operands are: the address, the N vectors, the lane number, and
+ // the alignment.
+ def int_arm_neon_vst2lane : Intrinsic<[],
[llvm_ptr_ty, llvm_anyvector_ty,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrWriteArgMem]>;
- def int_arm_neon_vst3lane : Intrinsic<[llvm_void_ty],
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_arm_neon_vst3lane : Intrinsic<[],
[llvm_ptr_ty, llvm_anyvector_ty,
LLVMMatchType<0>, LLVMMatchType<0>,
- llvm_i32_ty], [IntrWriteArgMem]>;
- def int_arm_neon_vst4lane : Intrinsic<[llvm_void_ty],
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_arm_neon_vst4lane : Intrinsic<[],
[llvm_ptr_ty, llvm_anyvector_ty,
LLVMMatchType<0>, LLVMMatchType<0>,
- LLVMMatchType<0>, llvm_i32_ty],
- [IntrWriteArgMem]>;
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
}
diff --git a/libclamav/c++/llvm/include/llvm/IntrinsicsPowerPC.td b/libclamav/c++/llvm/include/llvm/IntrinsicsPowerPC.td
index ffb870d..da85bfb 100644
--- a/libclamav/c++/llvm/include/llvm/IntrinsicsPowerPC.td
+++ b/libclamav/c++/llvm/include/llvm/IntrinsicsPowerPC.td
@@ -18,17 +18,17 @@
// Non-altivec intrinsics.
let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
// dcba/dcbf/dcbi/dcbst/dcbt/dcbz/dcbzl(PPC970) instructions.
- def int_ppc_dcba : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
- def int_ppc_dcbf : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
- def int_ppc_dcbi : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
- def int_ppc_dcbst : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
- def int_ppc_dcbt : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
- def int_ppc_dcbtst: Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
- def int_ppc_dcbz : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
- def int_ppc_dcbzl : Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
+ def int_ppc_dcba : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbf : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbi : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbst : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbt : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbtst: Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbz : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbzl : Intrinsic<[], [llvm_ptr_ty], []>;
// sync instruction
- def int_ppc_sync : Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>;
+ def int_ppc_sync : Intrinsic<[], [], []>;
}
@@ -86,31 +86,31 @@ class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix>
let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
// Data Stream Control.
def int_ppc_altivec_dss : GCCBuiltin<"__builtin_altivec_dss">,
- Intrinsic<[llvm_void_ty], [llvm_i32_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_i32_ty], []>;
def int_ppc_altivec_dssall : GCCBuiltin<"__builtin_altivec_dssall">,
- Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>;
+ Intrinsic<[], [], []>;
def int_ppc_altivec_dst : GCCBuiltin<"__builtin_altivec_dst">,
- Intrinsic<[llvm_void_ty],
+ Intrinsic<[],
[llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrWriteMem]>;
+ []>;
def int_ppc_altivec_dstt : GCCBuiltin<"__builtin_altivec_dstt">,
- Intrinsic<[llvm_void_ty],
+ Intrinsic<[],
[llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrWriteMem]>;
+ []>;
def int_ppc_altivec_dstst : GCCBuiltin<"__builtin_altivec_dstst">,
- Intrinsic<[llvm_void_ty],
+ Intrinsic<[],
[llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrWriteMem]>;
+ []>;
def int_ppc_altivec_dststt : GCCBuiltin<"__builtin_altivec_dststt">,
- Intrinsic<[llvm_void_ty],
+ Intrinsic<[],
[llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrWriteMem]>;
+ []>;
// VSCR access.
def int_ppc_altivec_mfvscr : GCCBuiltin<"__builtin_altivec_mfvscr">,
Intrinsic<[llvm_v8i16_ty], [], [IntrReadMem]>;
def int_ppc_altivec_mtvscr : GCCBuiltin<"__builtin_altivec_mtvscr">,
- Intrinsic<[llvm_void_ty], [llvm_v4i32_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_v4i32_ty], []>;
// Loads. These don't map directly to GCC builtins because they represent the
@@ -129,20 +129,15 @@ let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
// Stores. These don't map directly to GCC builtins because they represent the
// source address with a single pointer.
def int_ppc_altivec_stvx :
- Intrinsic<[llvm_void_ty], [llvm_v4i32_ty, llvm_ptr_ty],
- [IntrWriteMem]>;
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty], []>;
def int_ppc_altivec_stvxl :
- Intrinsic<[llvm_void_ty], [llvm_v4i32_ty, llvm_ptr_ty],
- [IntrWriteMem]>;
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty], []>;
def int_ppc_altivec_stvebx :
- Intrinsic<[llvm_void_ty], [llvm_v16i8_ty, llvm_ptr_ty],
- [IntrWriteMem]>;
+ Intrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty], []>;
def int_ppc_altivec_stvehx :
- Intrinsic<[llvm_void_ty], [llvm_v8i16_ty, llvm_ptr_ty],
- [IntrWriteMem]>;
+ Intrinsic<[], [llvm_v8i16_ty, llvm_ptr_ty], []>;
def int_ppc_altivec_stvewx :
- Intrinsic<[llvm_void_ty], [llvm_v4i32_ty, llvm_ptr_ty],
- [IntrWriteMem]>;
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty], []>;
// Comparisons setting a vector.
def int_ppc_altivec_vcmpbfp : GCCBuiltin<"__builtin_altivec_vcmpbfp">,
diff --git a/libclamav/c++/llvm/include/llvm/IntrinsicsX86.td b/libclamav/c++/llvm/include/llvm/IntrinsicsX86.td
index 50ee358..06ea3ae 100644
--- a/libclamav/c++/llvm/include/llvm/IntrinsicsX86.td
+++ b/libclamav/c++/llvm/include/llvm/IntrinsicsX86.td
@@ -11,6 +11,11 @@
//
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// Interrupt traps
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_int : Intrinsic<[], [llvm_i8_ty]>;
+}
//===----------------------------------------------------------------------===//
// SSE1
@@ -142,25 +147,25 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// SIMD store ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse_storeu_ps : GCCBuiltin<"__builtin_ia32_storeups">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_v4f32_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v4f32_ty], []>;
}
// Cacheability support ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse_movnt_ps : GCCBuiltin<"__builtin_ia32_movntps">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_v4f32_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v4f32_ty], []>;
def int_x86_sse_sfence : GCCBuiltin<"__builtin_ia32_sfence">,
- Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>;
+ Intrinsic<[], [], []>;
}
// Control register.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse_stmxcsr :
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty], []>;
def int_x86_sse_ldmxcsr :
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty], []>;
}
// Misc.
@@ -458,27 +463,27 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// SIMD store ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse2_storeu_pd : GCCBuiltin<"__builtin_ia32_storeupd">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_v2f64_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v2f64_ty], []>;
def int_x86_sse2_storeu_dq : GCCBuiltin<"__builtin_ia32_storedqu">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_v16i8_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v16i8_ty], []>;
def int_x86_sse2_storel_dq : GCCBuiltin<"__builtin_ia32_storelv4si">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_v4i32_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v4i32_ty], []>;
}
// Cacheability support ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse2_movnt_dq : GCCBuiltin<"__builtin_ia32_movntdq">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_v2i64_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v2i64_ty], []>;
def int_x86_sse2_movnt_pd : GCCBuiltin<"__builtin_ia32_movntpd">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_v2f64_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v2f64_ty], []>;
def int_x86_sse2_movnt_i : GCCBuiltin<"__builtin_ia32_movnti">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_i32_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_i32_ty], []>;
}
// Misc.
@@ -497,14 +502,14 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse2_pmovmskb_128 : GCCBuiltin<"__builtin_ia32_pmovmskb128">,
Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
def int_x86_sse2_maskmov_dqu : GCCBuiltin<"__builtin_ia32_maskmovdqu">,
- Intrinsic<[llvm_void_ty], [llvm_v16i8_ty,
- llvm_v16i8_ty, llvm_ptr_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_ptr_ty], []>;
def int_x86_sse2_clflush : GCCBuiltin<"__builtin_ia32_clflush">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty], []>;
def int_x86_sse2_lfence : GCCBuiltin<"__builtin_ia32_lfence">,
- Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>;
+ Intrinsic<[], [], []>;
def int_x86_sse2_mfence : GCCBuiltin<"__builtin_ia32_mfence">,
- Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>;
+ Intrinsic<[], [], []>;
}
//===----------------------------------------------------------------------===//
@@ -545,11 +550,11 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Thread synchronization ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse3_monitor : GCCBuiltin<"__builtin_ia32_monitor">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_i32_ty, llvm_i32_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
def int_x86_sse3_mwait : GCCBuiltin<"__builtin_ia32_mwait">,
- Intrinsic<[llvm_void_ty], [llvm_i32_ty,
- llvm_i32_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_i32_ty,
+ llvm_i32_ty], []>;
}
//===----------------------------------------------------------------------===//
@@ -625,6 +630,9 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_ssse3_pshuf_b_128 : GCCBuiltin<"__builtin_ia32_pshufb128">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_ssse3_pshuf_w : GCCBuiltin<"__builtin_ia32_pshufw">,
+ Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
}
// Sign ops
@@ -669,16 +677,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
}
-// Align ops
-let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
- def int_x86_ssse3_palign_r :
- Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty,
- llvm_v1i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_ssse3_palign_r_128 :
- Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
-}
-
//===----------------------------------------------------------------------===//
// SSE4.1
@@ -779,6 +777,29 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
[IntrNoMem, Commutative]>;
}
+// Advanced Encryption Standard (AES) Instructions
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_aesni_aesimc : GCCBuiltin<"__builtin_ia32_aesimc128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aesenc : GCCBuiltin<"__builtin_ia32_aesenc128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aesenclast : GCCBuiltin<"__builtin_ia32_aesenclast128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aesdec : GCCBuiltin<"__builtin_ia32_aesdec128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aesdeclast : GCCBuiltin<"__builtin_ia32_aesdeclast128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aeskeygenassist :
+ GCCBuiltin<"__builtin_ia32_aeskeygenassist128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
// Vector pack
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse41_packusdw : GCCBuiltin<"__builtin_ia32_packusdw128">,
@@ -791,9 +812,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse41_pmuldq : GCCBuiltin<"__builtin_ia32_pmuldq128">,
Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem, Commutative]>;
- def int_x86_sse41_pmulld : GCCBuiltin<"__builtin_ia32_pmulld128">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
- [IntrNoMem, Commutative]>;
}
// Vector extract
@@ -892,7 +910,10 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse42_crc32_32 : GCCBuiltin<"__builtin_ia32_crc32si">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
- def int_x86_sse42_crc32_64 : GCCBuiltin<"__builtin_ia32_crc32di">,
+ def int_x86_sse42_crc64_8 :
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_crc64_64 : GCCBuiltin<"__builtin_ia32_crc32di">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
[IntrNoMem]>;
}
@@ -965,19 +986,360 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
}
//===----------------------------------------------------------------------===//
+// AVX
+
+// Arithmetic ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_addsub_pd_256 : GCCBuiltin<"__builtin_ia32_addsubpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_addsub_ps_256 : GCCBuiltin<"__builtin_ia32_addsubps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_max_pd_256 : GCCBuiltin<"__builtin_ia32_maxpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_max_ps_256 : GCCBuiltin<"__builtin_ia32_maxps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_min_pd_256 : GCCBuiltin<"__builtin_ia32_minpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_min_ps_256 : GCCBuiltin<"__builtin_ia32_minps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_sqrt_pd_256 : GCCBuiltin<"__builtin_ia32_sqrtpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_sqrt_ps_256 : GCCBuiltin<"__builtin_ia32_sqrtps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_rsqrt_ps_256 : GCCBuiltin<"__builtin_ia32_rsqrtps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_rcp_ps_256 : GCCBuiltin<"__builtin_ia32_rcpps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_round_pd_256 : GCCBuiltin<"__builtin_ia32_roundpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx_round_ps_256 : GCCBuiltin<"__builtin_ia32_roundps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Horizontal ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_hadd_pd_256 : GCCBuiltin<"__builtin_ia32_haddpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_hsub_ps_256 : GCCBuiltin<"__builtin_ia32_hsubps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_hsub_pd_256 : GCCBuiltin<"__builtin_ia32_hsubpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_hadd_ps_256 : GCCBuiltin<"__builtin_ia32_haddps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector permutation
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_vpermilvar_pd : GCCBuiltin<"__builtin_ia32_vpermilvarpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx_vpermilvar_ps : GCCBuiltin<"__builtin_ia32_vpermilvarps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_vpermilvar_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx_vpermilvar_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_vperm2f128_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vperm2f128_pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vperm2f128_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vperm2f128_ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vperm2f128_si_256 :
+ GCCBuiltin<"__builtin_ia32_vperm2f128_si256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx_vpermil_pd : GCCBuiltin<"__builtin_ia32_vpermilpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vpermil_ps : GCCBuiltin<"__builtin_ia32_vpermilps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx_vpermil_pd_256 : GCCBuiltin<"__builtin_ia32_vpermilpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vpermil_ps_256 : GCCBuiltin<"__builtin_ia32_vpermilps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector blend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_blend_pd_256 : GCCBuiltin<"__builtin_ia32_blendpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx_blend_ps_256 : GCCBuiltin<"__builtin_ia32_blendps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx_blendv_pd_256 : GCCBuiltin<"__builtin_ia32_blendvpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_blendv_ps_256 : GCCBuiltin<"__builtin_ia32_blendvps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector dot product
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_dp_ps_256 : GCCBuiltin<"__builtin_ia32_dpps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Vector compare
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_cmp_pd_256 : GCCBuiltin<"__builtin_ia32_cmppd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_cmp_ps_256 : GCCBuiltin<"__builtin_ia32_cmpps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector extract and insert
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_vextractf128_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vextractf128_pd256">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vextractf128_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vextractf128_ps256">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vextractf128_si_256 :
+ GCCBuiltin<"__builtin_ia32_vextractf128_si256">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx_vinsertf128_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vinsertf128_pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vinsertf128_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vinsertf128_ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vinsertf128_si_256 :
+ GCCBuiltin<"__builtin_ia32_vinsertf128_si256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector convert
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_cvtdq2_pd_256 : GCCBuiltin<"__builtin_ia32_cvtdq2pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx_cvtdq2_ps_256 : GCCBuiltin<"__builtin_ia32_cvtdq2ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx_cvt_pd2_ps_256 : GCCBuiltin<"__builtin_ia32_cvtpd2ps256">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_cvt_ps2dq_256 : GCCBuiltin<"__builtin_ia32_cvtps2dq256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_cvt_ps2_pd_256 : GCCBuiltin<"__builtin_ia32_cvtps2pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx_cvtt_pd2dq_256 : GCCBuiltin<"__builtin_ia32_cvttpd2dq256">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_cvt_pd2dq_256 : GCCBuiltin<"__builtin_ia32_cvtpd2dq256">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_cvtt_ps2dq_256 : GCCBuiltin<"__builtin_ia32_cvttps2dq256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector bit test
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_vtestz_pd : GCCBuiltin<"__builtin_ia32_vtestzpd">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestc_pd : GCCBuiltin<"__builtin_ia32_vtestcpd">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestnzc_pd : GCCBuiltin<"__builtin_ia32_vtestnzcpd">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestz_ps : GCCBuiltin<"__builtin_ia32_vtestzps">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestc_ps : GCCBuiltin<"__builtin_ia32_vtestcps">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestnzc_ps : GCCBuiltin<"__builtin_ia32_vtestnzcps">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestz_pd_256 : GCCBuiltin<"__builtin_ia32_vtestzpd256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestc_pd_256 : GCCBuiltin<"__builtin_ia32_vtestcpd256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestnzc_pd_256 : GCCBuiltin<"__builtin_ia32_vtestnzcpd256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestz_ps_256 : GCCBuiltin<"__builtin_ia32_vtestzps256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestc_ps_256 : GCCBuiltin<"__builtin_ia32_vtestcps256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestnzc_ps_256 : GCCBuiltin<"__builtin_ia32_vtestnzcps256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_ptestz_256 : GCCBuiltin<"__builtin_ia32_ptestz256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
+ llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx_ptestc_256 : GCCBuiltin<"__builtin_ia32_ptestc256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
+ llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx_ptestnzc_256 : GCCBuiltin<"__builtin_ia32_ptestnzc256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
+ llvm_v4i64_ty], [IntrNoMem]>;
+}
+
+// Vector extract sign mask
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_movmsk_pd_256 : GCCBuiltin<"__builtin_ia32_movmskpd256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_movmsk_ps_256 : GCCBuiltin<"__builtin_ia32_movmskps256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector zero
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_vzeroall : GCCBuiltin<"__builtin_ia32_vzeroall">,
+ Intrinsic<[], [], []>;
+ def int_x86_avx_vzeroupper : GCCBuiltin<"__builtin_ia32_vzeroupper">,
+ Intrinsic<[], [], []>;
+}
+
+// Vector load with broadcast
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_vbroadcastss :
+ GCCBuiltin<"__builtin_ia32_vbroadcastss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ def int_x86_avx_vbroadcast_sd_256 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastsd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ def int_x86_avx_vbroadcastss_256 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastss256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ def int_x86_avx_vbroadcastf128_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastf128_pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ def int_x86_avx_vbroadcastf128_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastf128_ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+// SIMD load ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_loadu_pd_256 : GCCBuiltin<"__builtin_ia32_loadupd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ def int_x86_avx_loadu_ps_256 : GCCBuiltin<"__builtin_ia32_loadups256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ def int_x86_avx_loadu_dq_256 : GCCBuiltin<"__builtin_ia32_loaddqu256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
+ def int_x86_avx_ldu_dq_256 : GCCBuiltin<"__builtin_ia32_lddqu256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+// SIMD store ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_storeu_pd_256 : GCCBuiltin<"__builtin_ia32_storeupd256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], []>;
+ def int_x86_avx_storeu_ps_256 : GCCBuiltin<"__builtin_ia32_storeups256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], []>;
+ def int_x86_avx_storeu_dq_256 : GCCBuiltin<"__builtin_ia32_storedqu256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty], []>;
+}
+
+// Cacheability support ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_movnt_dq_256 : GCCBuiltin<"__builtin_ia32_movntdq256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty], []>;
+ def int_x86_avx_movnt_pd_256 : GCCBuiltin<"__builtin_ia32_movntpd256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], []>;
+ def int_x86_avx_movnt_ps_256 : GCCBuiltin<"__builtin_ia32_movntps256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], []>;
+}
+
+// Conditional load ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_maskload_pd : GCCBuiltin<"__builtin_ia32_maskloadpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty], [IntrReadMem]>;
+ def int_x86_avx_maskload_ps : GCCBuiltin<"__builtin_ia32_maskloadps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty], [IntrReadMem]>;
+ def int_x86_avx_maskload_pd_256 : GCCBuiltin<"__builtin_ia32_maskloadpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty], [IntrReadMem]>;
+ def int_x86_avx_maskload_ps_256 : GCCBuiltin<"__builtin_ia32_maskloadps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty], [IntrReadMem]>;
+}
+
+// Conditional store ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v2f64_ty, llvm_v2f64_ty], []>;
+ def int_x86_avx_maskstore_ps : GCCBuiltin<"__builtin_ia32_maskstoreps">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v4f32_ty, llvm_v4f32_ty], []>;
+ def int_x86_avx_maskstore_pd_256 :
+ GCCBuiltin<"__builtin_ia32_maskstorepd256">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v4f64_ty, llvm_v4f64_ty], []>;
+ def int_x86_avx_maskstore_ps_256 :
+ GCCBuiltin<"__builtin_ia32_maskstoreps256">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v8f32_ty, llvm_v8f32_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
// MMX
// Empty MMX state op.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_emms : GCCBuiltin<"__builtin_ia32_emms">,
- Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>;
+ Intrinsic<[], [], []>;
def int_x86_mmx_femms : GCCBuiltin<"__builtin_ia32_femms">,
- Intrinsic<[llvm_void_ty], [], [IntrWriteMem]>;
+ Intrinsic<[], [], []>;
}
// Integer arithmetic ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Addition
+ def int_x86_mmx_padd_b : GCCBuiltin<"__builtin_ia32_paddb">,
+ Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_padd_w : GCCBuiltin<"__builtin_ia32_paddw">,
+ Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_padd_d : GCCBuiltin<"__builtin_ia32_paddd">,
+ Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_padd_q : GCCBuiltin<"__builtin_ia32_paddq">,
+ Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ [IntrNoMem]>;
+
def int_x86_mmx_padds_b : GCCBuiltin<"__builtin_ia32_paddsb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
llvm_v8i8_ty], [IntrNoMem, Commutative]>;
@@ -993,6 +1355,19 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
llvm_v4i16_ty], [IntrNoMem, Commutative]>;
// Subtraction
+ def int_x86_mmx_psub_b : GCCBuiltin<"__builtin_ia32_psubb">,
+ Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_psub_w : GCCBuiltin<"__builtin_ia32_psubw">,
+ Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_psub_d : GCCBuiltin<"__builtin_ia32_psubd">,
+ Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_psub_q : GCCBuiltin<"__builtin_ia32_psubq">,
+ Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ [IntrNoMem]>;
+
def int_x86_mmx_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
llvm_v8i8_ty], [IntrNoMem]>;
@@ -1011,6 +1386,9 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pmull_w : GCCBuiltin<"__builtin_ia32_pmullw">,
+ Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
+ llvm_v4i16_ty], [IntrNoMem, Commutative]>;
def int_x86_mmx_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw">,
Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>;
@@ -1021,6 +1399,20 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v2i32_ty], [llvm_v4i16_ty,
llvm_v4i16_ty], [IntrNoMem, Commutative]>;
+ // Bitwise operations
+ def int_x86_mmx_pand : GCCBuiltin<"__builtin_ia32_pand">,
+ Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_pandn : GCCBuiltin<"__builtin_ia32_pandn">,
+ Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_por : GCCBuiltin<"__builtin_ia32_por">,
+ Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_pxor : GCCBuiltin<"__builtin_ia32_pxor">,
+ Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
+ [IntrNoMem]>;
+
// Averages
def int_x86_mmx_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb">,
Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty,
@@ -1122,6 +1514,28 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
llvm_v4i16_ty], [IntrNoMem]>;
}
+// Unpacking ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_mmx_punpckhbw : GCCBuiltin<"__builtin_ia32_punpckhbw">,
+ Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpckhwd : GCCBuiltin<"__builtin_ia32_punpckhwd">,
+ Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpckhdq : GCCBuiltin<"__builtin_ia32_punpckhdq">,
+ Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpcklbw : GCCBuiltin<"__builtin_ia32_punpcklbw">,
+ Intrinsic<[llvm_v8i8_ty], [llvm_v8i8_ty, llvm_v8i8_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpcklwd : GCCBuiltin<"__builtin_ia32_punpcklwd">,
+ Intrinsic<[llvm_v4i16_ty], [llvm_v4i16_ty, llvm_v4i16_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpckldq : GCCBuiltin<"__builtin_ia32_punpckldq">,
+ Intrinsic<[llvm_v2i32_ty], [llvm_v2i32_ty, llvm_v2i32_ty],
+ [IntrNoMem]>;
+}
+
// Integer comparison ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb">,
@@ -1148,14 +1562,47 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Misc.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_mmx_maskmovq : GCCBuiltin<"__builtin_ia32_maskmovq">,
- Intrinsic<[llvm_void_ty],
- [llvm_v8i8_ty, llvm_v8i8_ty, llvm_ptr_ty],
- [IntrWriteMem]>;
+ Intrinsic<[], [llvm_v8i8_ty, llvm_v8i8_ty, llvm_ptr_ty], []>;
def int_x86_mmx_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb">,
Intrinsic<[llvm_i32_ty], [llvm_v8i8_ty], [IntrNoMem]>;
def int_x86_mmx_movnt_dq : GCCBuiltin<"__builtin_ia32_movntq">,
- Intrinsic<[llvm_void_ty], [llvm_ptr_ty,
- llvm_v1i64_ty], [IntrWriteMem]>;
+ Intrinsic<[], [llvm_ptr_ty, llvm_v1i64_ty], []>;
+
+// def int_x86_mmx_palignr_b : GCCBuiltin<"__builtin_ia32_palignr">,
+// Intrinsic<[llvm_v1i64_ty], [llvm_1i64_ty,
+// llvm_v1i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_pextr_w :
+ Intrinsic<[llvm_i32_ty], [llvm_v1i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_mmx_pinsr_w :
+ Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_cvtsi32_si64 :
+ Intrinsic<[llvm_v1i64_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_mmx_cvtsi64_si32 :
+ Intrinsic<[llvm_i32_ty], [llvm_v1i64_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_vec_init_b : GCCBuiltin<"__builtin_ia32_vec_init_v8qi">,
+ Intrinsic<[llvm_v8i8_ty],
+ [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty, llvm_i8_ty,
+ llvm_i8_ty, llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_vec_init_w : GCCBuiltin<"__builtin_ia32_vec_init_v4hi">,
+ Intrinsic<[llvm_v4i16_ty],
+ [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_vec_init_d : GCCBuiltin<"__builtin_ia32_vec_init_v2si">,
+ Intrinsic<[llvm_v2i32_ty],
+ [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_mmx_vec_ext_d : GCCBuiltin<"__builtin_ia32_vec_ext_v2si">,
+ Intrinsic<[llvm_v2i32_ty],
+ [llvm_v2i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
}
diff --git a/libclamav/c++/llvm/include/llvm/LLVMContext.h b/libclamav/c++/llvm/include/llvm/LLVMContext.h
index 6d36d5e..7cb6579 100644
--- a/libclamav/c++/llvm/include/llvm/LLVMContext.h
+++ b/libclamav/c++/llvm/include/llvm/LLVMContext.h
@@ -19,6 +19,7 @@ namespace llvm {
class LLVMContextImpl;
class StringRef;
+class Instruction;
template <typename T> class SmallVectorImpl;
/// This is an important class for using LLVM in a threaded context. It
@@ -36,14 +37,46 @@ public:
LLVMContext();
~LLVMContext();
+ // Pinned metadata names, which always have the same value. This is a
+ // compile-time performance optimization, not a correctness optimization.
+ enum {
+ MD_dbg = 0 // "dbg"
+ };
+
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
/// This ID is uniqued across modules in the current LLVMContext.
unsigned getMDKindID(StringRef Name) const;
/// getMDKindNames - Populate client supplied SmallVector with the name for
- /// custom metadata IDs registered in this LLVMContext. ID #0 is not used,
- /// so it is filled in as an empty string.
+ /// custom metadata IDs registered in this LLVMContext.
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
+
+ /// setInlineAsmDiagnosticHandler - This method sets a handler that is invoked
+ /// when problems with inline asm are detected by the backend. The first
+ /// argument is a function pointer (of type SourceMgr::DiagHandlerTy) and the
+ /// second is a context pointer that gets passed into the DiagHandler.
+ ///
+ /// LLVMContext doesn't take ownership or interpreter either of these
+ /// pointers.
+ void setInlineAsmDiagnosticHandler(void *DiagHandler, void *DiagContext = 0);
+
+ /// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
+ /// setInlineAsmDiagnosticHandler.
+ void *getInlineAsmDiagnosticHandler() const;
+
+ /// getInlineAsmDiagnosticContext - Return the diagnostic context set by
+ /// setInlineAsmDiagnosticHandler.
+ void *getInlineAsmDiagnosticContext() const;
+
+
+ /// emitError - Emit an error message to the currently installed error handler
+ /// with optional location information. This function returns, so code should
+ /// be prepared to drop the erroneous construct on the floor and "not crash".
+ /// The generated code need not be correct. The error message will be
+ /// implicitly prefixed with "error: " and should not end with a ".".
+ void emitError(unsigned LocCookie, StringRef ErrorStr);
+ void emitError(const Instruction *I, StringRef ErrorStr);
+ void emitError(StringRef ErrorStr);
};
/// getGlobalContext - Returns a global context. This is for LLVM clients that
diff --git a/libclamav/c++/llvm/include/llvm/LinkAllPasses.h b/libclamav/c++/llvm/include/llvm/LinkAllPasses.h
index ae53851..35dab62 100644
--- a/libclamav/c++/llvm/include/llvm/LinkAllPasses.h
+++ b/libclamav/c++/llvm/include/llvm/LinkAllPasses.h
@@ -22,7 +22,9 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/PointerTracking.h"
#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Analysis/RegionPrinter.h"
#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/Lint.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Function.h"
@@ -51,6 +53,7 @@ namespace {
(void) llvm::createBasicAliasAnalysisPass();
(void) llvm::createLibCallAliasAnalysisPass(0);
(void) llvm::createScalarEvolutionAliasAnalysisPass();
+ (void) llvm::createTypeBasedAliasAnalysisPass();
(void) llvm::createBlockPlacementPass();
(void) llvm::createBreakCriticalEdgesPass();
(void) llvm::createCFGSimplificationPass();
@@ -105,6 +108,11 @@ namespace {
(void) llvm::createPostDomOnlyViewerPass();
(void) llvm::createPostDomViewerPass();
(void) llvm::createReassociatePass();
+ (void) llvm::createRegionInfoPass();
+ (void) llvm::createRegionOnlyPrinterPass();
+ (void) llvm::createRegionOnlyViewerPass();
+ (void) llvm::createRegionPrinterPass();
+ (void) llvm::createRegionViewerPass();
(void) llvm::createSCCPPass();
(void) llvm::createScalarReplAggregatesPass();
(void) llvm::createSimplifyLibCallsPass();
@@ -112,6 +120,7 @@ namespace {
(void) llvm::createSingleLoopExtractorPass();
(void) llvm::createStripSymbolsPass();
(void) llvm::createStripNonDebugSymbolsPass();
+ (void) llvm::createStripDeadDebugInfoPass();
(void) llvm::createStripDeadPrototypesPass();
(void) llvm::createTailCallEliminationPass();
(void) llvm::createTailDuplicationPass();
@@ -131,12 +140,13 @@ namespace {
(void) llvm::createPrintModulePass(0);
(void) llvm::createPrintFunctionPass("", 0);
(void) llvm::createDbgInfoPrinterPass();
+ (void) llvm::createModuleDebugInfoPrinterPass();
(void) llvm::createPartialInliningPass();
- (void) llvm::createSSIPass();
- (void) llvm::createSSIEverythingPass();
(void) llvm::createGEPSplitterPass();
- (void) llvm::createSCCVNPass();
- (void) llvm::createABCDPass();
+ (void) llvm::createLintPass();
+ (void) llvm::createSinkingPass();
+ (void) llvm::createLowerAtomicPass();
+ (void) llvm::createCorrelatedValuePropagationPass();
(void)new llvm::IntervalPartition();
(void)new llvm::FindUsedTypes();
diff --git a/libclamav/c++/llvm/include/llvm/LinkAllVMCore.h b/libclamav/c++/llvm/include/llvm/LinkAllVMCore.h
index 6cf2c4b..6959cb6 100644
--- a/libclamav/c++/llvm/include/llvm/LinkAllVMCore.h
+++ b/libclamav/c++/llvm/include/llvm/LinkAllVMCore.h
@@ -33,7 +33,6 @@
#include "llvm/System/TimeValue.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/SlowOperationInformer.h"
#include <cstdlib>
namespace {
diff --git a/libclamav/c++/llvm/include/llvm/Linker.h b/libclamav/c++/llvm/include/llvm/Linker.h
index cc7bf88..b402a60 100644
--- a/libclamav/c++/llvm/include/llvm/Linker.h
+++ b/libclamav/c++/llvm/include/llvm/Linker.h
@@ -158,7 +158,6 @@ class Linker {
/// @returns true if an error occurred, false otherwise
/// @see LinkItemKind
/// @see getLastError
- /// @throws nothing
bool LinkInItems (
const ItemList& Items, ///< Set of libraries/files to link in
ItemList& NativeItems ///< Output list of native files/libs
diff --git a/libclamav/c++/llvm/include/llvm/MC/EDInstInfo.h b/libclamav/c++/llvm/include/llvm/MC/EDInstInfo.h
new file mode 100644
index 0000000..dded255
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/EDInstInfo.h
@@ -0,0 +1,29 @@
+//===-- llvm/MC/EDInstInfo.h - EDis instruction info ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef EDINSTINFO_H
+#define EDINSTINFO_H
+
+#include "llvm/System/DataTypes.h"
+
+namespace llvm {
+
+#define EDIS_MAX_OPERANDS 13
+#define EDIS_MAX_SYNTAXES 2
+
+struct EDInstInfo {
+ uint8_t instructionType;
+ uint8_t numOperands;
+ uint8_t operandTypes[EDIS_MAX_OPERANDS];
+ uint8_t operandFlags[EDIS_MAX_OPERANDS];
+ const char operandOrders[EDIS_MAX_SYNTAXES][EDIS_MAX_OPERANDS];
+};
+
+} // namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/ELFObjectWriter.h b/libclamav/c++/llvm/include/llvm/MC/ELFObjectWriter.h
new file mode 100644
index 0000000..3b9951f
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/ELFObjectWriter.h
@@ -0,0 +1,46 @@
+//===-- llvm/MC/ELFObjectWriter.h - ELF File Writer ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_ELFOBJECTWRITER_H
+#define LLVM_MC_ELFOBJECTWRITER_H
+
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+namespace llvm {
+class MCAsmFixup;
+class MCAssembler;
+class MCFragment;
+class MCValue;
+class raw_ostream;
+
+class ELFObjectWriter : public MCObjectWriter {
+ void *Impl;
+
+public:
+ ELFObjectWriter(raw_ostream &OS, bool Is64Bit, bool IsLittleEndian = true,
+ bool HasRelocationAddend = true);
+
+ virtual ~ELFObjectWriter();
+
+ virtual void ExecutePostLayoutBinding(MCAssembler &Asm);
+
+ virtual void RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue);
+
+ virtual void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCAsmInfo.h b/libclamav/c++/llvm/include/llvm/MC/MCAsmInfo.h
index 3effea4..43952e0 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCAsmInfo.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCAsmInfo.h
@@ -41,6 +41,10 @@ namespace llvm {
/// the macho-specific .zerofill directive for emitting BSS Symbols.
bool HasMachoZeroFillDirective; // Default is false.
+ /// HasMachoTBSSDirective - True if this is a MachO target that supports
+ /// the macho-specific .tbss directive for emitting thread local BSS Symbols
+ bool HasMachoTBSSDirective; // Default is false.
+
/// HasStaticCtorDtorReferenceInStaticMode - True if the compiler should
/// emit a ".reference .constructors_used" or ".reference .destructors_used"
/// directive after the a static ctor/dtor list. This directive is only
@@ -97,7 +101,11 @@ namespace llvm {
/// AllowNameToStartWithDigit - This is true if the assembler allows symbol
/// names to start with a digit (e.g., "0x0021"). This defaults to false.
bool AllowNameToStartWithDigit;
-
+
+ /// AllowPeriodsInName - This is true if the assembler allows periods in
+ /// symbol names. This defaults to true.
+ bool AllowPeriodsInName;
+
//===--- Data Emission Directives -------------------------------------===//
/// ZeroDirective - this should be set to the directive used to get some
@@ -145,6 +153,11 @@ namespace llvm {
/// which doesn't support the '.bss' directive only.
bool UsesELFSectionDirectiveForBSS; // Defaults to false.
+ /// HasMicrosoftFastStdCallMangling - True if this target uses microsoft
+ /// style mangling for functions with X86_StdCall/X86_FastCall calling
+ /// convention.
+ bool HasMicrosoftFastStdCallMangling; // Defaults to false.
+
//===--- Alignment Information ----------------------------------------===//
/// AlignDirective - The directive used to emit round up to an alignment
@@ -218,14 +231,6 @@ namespace llvm {
//===--- Dwarf Emission Directives -----------------------------------===//
- /// AbsoluteDebugSectionOffsets - True if we should emit abolute section
- /// offsets for debug information.
- bool AbsoluteDebugSectionOffsets; // Defaults to false.
-
- /// AbsoluteEHSectionOffsets - True if we should emit abolute section
- /// offsets for EH information. Defaults to false.
- bool AbsoluteEHSectionOffsets;
-
/// HasLEB128 - True if target asm supports leb128 directives.
bool HasLEB128; // Defaults to false.
@@ -247,21 +252,17 @@ namespace llvm {
/// encode inline subroutine information.
bool DwarfUsesInlineInfoSection; // Defaults to false.
- /// Is_EHSymbolPrivate - If set, the "_foo.eh" is made private so that it
- /// doesn't show up in the symbol table of the object file.
- bool Is_EHSymbolPrivate; // Defaults to true.
-
- /// GlobalEHDirective - This is the directive used to make exception frame
- /// tables globally visible.
- const char *GlobalEHDirective; // Defaults to NULL.
-
- /// SupportsWeakEmptyEHFrame - True if target assembler and linker will
- /// handle a weak_definition of constant 0 for an omitted EH frame.
- bool SupportsWeakOmittedEHFrame; // Defaults to true.
-
/// DwarfSectionOffsetDirective - Special section offset directive.
const char* DwarfSectionOffsetDirective; // Defaults to NULL
+ /// DwarfUsesAbsoluteLabelForStmtList - True if DW_AT_stmt_list needs
+ /// absolute label instead of offset.
+ bool DwarfUsesAbsoluteLabelForStmtList; // Defaults to true;
+
+ // DwarfUsesLabelOffsetDifference - True if Dwarf2 output can
+ // use EmitLabelOffsetDifference.
+ bool DwarfUsesLabelOffsetForRanges;
+
//===--- CBE Asm Translation Table -----------------------------------===//
const char *const *AsmTransCBE; // Defaults to empty
@@ -295,7 +296,7 @@ namespace llvm {
/// getNonexecutableStackSection - Targets can implement this method to
/// specify a section to switch to if the translation unit doesn't have any
/// trampolines that require an executable stack.
- virtual MCSection *getNonexecutableStackSection(MCContext &Ctx) const {
+ virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const{
return 0;
}
@@ -307,9 +308,14 @@ namespace llvm {
return UsesELFSectionDirectiveForBSS;
}
+ bool hasMicrosoftFastStdCallMangling() const {
+ return HasMicrosoftFastStdCallMangling;
+ }
+
// Accessors.
//
bool hasMachoZeroFillDirective() const { return HasMachoZeroFillDirective; }
+ bool hasMachoTBSSDirective() const { return HasMachoTBSSDirective; }
bool hasStaticCtorDtorReferenceInStaticMode() const {
return HasStaticCtorDtorReferenceInStaticMode;
}
@@ -352,6 +358,9 @@ namespace llvm {
bool doesAllowNameToStartWithDigit() const {
return AllowNameToStartWithDigit;
}
+ bool doesAllowPeriodsInName() const {
+ return AllowPeriodsInName;
+ }
const char *getZeroDirective() const {
return ZeroDirective;
}
@@ -392,12 +401,6 @@ namespace llvm {
MCSymbolAttr getProtectedVisibilityAttr() const {
return ProtectedVisibilityAttr;
}
- bool isAbsoluteDebugSectionOffsets() const {
- return AbsoluteDebugSectionOffsets;
- }
- bool isAbsoluteEHSectionOffsets() const {
- return AbsoluteEHSectionOffsets;
- }
bool hasLEB128() const {
return HasLEB128;
}
@@ -419,18 +422,15 @@ namespace llvm {
bool doesDwarfUsesInlineInfoSection() const {
return DwarfUsesInlineInfoSection;
}
- bool is_EHSymbolPrivate() const {
- return Is_EHSymbolPrivate;
- }
- const char *getGlobalEHDirective() const {
- return GlobalEHDirective;
- }
- bool getSupportsWeakOmittedEHFrame() const {
- return SupportsWeakOmittedEHFrame;
- }
const char *getDwarfSectionOffsetDirective() const {
return DwarfSectionOffsetDirective;
}
+ bool doesDwarfUsesAbsoluteLabelForStmtList() const {
+ return DwarfUsesAbsoluteLabelForStmtList;
+ }
+ bool doesDwarfUsesLabelOffsetForRanges() const {
+ return DwarfUsesLabelOffsetForRanges;
+ }
const char *const *getAsmCBE() const {
return AsmTransCBE;
}
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCAsmLayout.h b/libclamav/c++/llvm/include/llvm/MC/MCAsmLayout.h
new file mode 100644
index 0000000..b9565ba
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MCAsmLayout.h
@@ -0,0 +1,135 @@
+//===- MCAsmLayout.h - Assembly Layout Object -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMLAYOUT_H
+#define LLVM_MC_MCASMLAYOUT_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+class MCAssembler;
+class MCFragment;
+class MCSectionData;
+class MCSymbolData;
+
+/// Encapsulates the layout of an assembly file at a particular point in time.
+///
+/// Assembly may requiring compute multiple layouts for a particular assembly
+/// file as part of the relaxation process. This class encapsulates the layout
+/// at a single point in time in such a way that it is always possible to
+/// efficiently compute the exact addresses of any symbol in the assembly file,
+/// even during the relaxation process.
+class MCAsmLayout {
+public:
+ typedef llvm::SmallVectorImpl<MCSectionData*>::const_iterator const_iterator;
+ typedef llvm::SmallVectorImpl<MCSectionData*>::iterator iterator;
+
+private:
+ MCAssembler &Assembler;
+
+ /// List of sections in layout order.
+ llvm::SmallVector<MCSectionData*, 16> SectionOrder;
+
+ /// The last fragment which was layed out, or 0 if nothing has been layed
+ /// out. Fragments are always layed out in order, so all fragments with a
+ /// lower ordinal will be up to date.
+ mutable MCFragment *LastValidFragment;
+
+ /// \brief Make sure that the layout for the given fragment is valid, lazily
+ /// computing it if necessary.
+ void EnsureValid(const MCFragment *F) const;
+
+ bool isSectionUpToDate(const MCSectionData *SD) const;
+ bool isFragmentUpToDate(const MCFragment *F) const;
+
+public:
+ MCAsmLayout(MCAssembler &_Assembler);
+
+ /// Get the assembler object this is a layout for.
+ MCAssembler &getAssembler() const { return Assembler; }
+
+ /// \brief Update the layout because a fragment has been resized. The
+ /// fragments size should have already been updated, the \arg SlideAmount is
+ /// the delta from the old size.
+ void UpdateForSlide(MCFragment *F, int SlideAmount);
+
+ /// \brief Update the layout because a fragment has been replaced.
+ void FragmentReplaced(MCFragment *Src, MCFragment *Dst);
+
+ /// \brief Perform a full layout.
+ void LayoutFile();
+
+ /// \brief Perform layout for a single fragment, assuming that the previous
+ /// fragment has already been layed out correctly, and the parent section has
+ /// been initialized.
+ void LayoutFragment(MCFragment *Fragment);
+
+ /// \brief Performs initial layout for a single section, assuming that the
+ /// previous section (including its fragments) has already been layed out
+ /// correctly.
+ void LayoutSection(MCSectionData *SD);
+
+ /// @name Section Access (in layout order)
+ /// @{
+
+ llvm::SmallVectorImpl<MCSectionData*> &getSectionOrder() {
+ return SectionOrder;
+ }
+ const llvm::SmallVectorImpl<MCSectionData*> &getSectionOrder() const {
+ return SectionOrder;
+ }
+
+ /// @}
+ /// @name Fragment Layout Data
+ /// @{
+
+ /// \brief Get the effective size of the given fragment, as computed in the
+ /// current layout.
+ uint64_t getFragmentEffectiveSize(const MCFragment *F) const;
+
+ /// \brief Get the offset of the given fragment inside its containing section.
+ uint64_t getFragmentOffset(const MCFragment *F) const;
+
+ /// @}
+ /// @name Section Layout Data
+ /// @{
+
+ /// \brief Get the computed address of the given section.
+ uint64_t getSectionAddress(const MCSectionData *SD) const;
+
+ /// @}
+ /// @name Utility Functions
+ /// @{
+
+ /// \brief Get the address of the given fragment, as computed in the current
+ /// layout.
+ uint64_t getFragmentAddress(const MCFragment *F) const;
+
+ /// \brief Get the address space size of the given section, as it effects
+ /// layout. This may differ from the size reported by \see getSectionSize() by
+ /// not including section tail padding.
+ uint64_t getSectionAddressSize(const MCSectionData *SD) const;
+
+ /// \brief Get the data size of the given section, as emitted to the object
+ /// file. This may include additional padding, or be 0 for virtual sections.
+ uint64_t getSectionFileSize(const MCSectionData *SD) const;
+
+ /// \brief Get the logical data size of the given section.
+ uint64_t getSectionSize(const MCSectionData *SD) const;
+
+ /// \brief Get the address of the given symbol, as computed in the current
+ /// layout.
+ uint64_t getSymbolAddress(const MCSymbolData *SD) const;
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCAssembler.h b/libclamav/c++/llvm/include/llvm/MC/MCAssembler.h
index 882929f..d193b98 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCAssembler.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCAssembler.h
@@ -10,58 +10,46 @@
#ifndef LLVM_MC_MCASSEMBLER_H
#define LLVM_MC_MCASSEMBLER_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/Support/Casting.h"
#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCInst.h"
#include "llvm/System/DataTypes.h"
#include <vector> // FIXME: Shouldn't be needed.
namespace llvm {
class raw_ostream;
+class MCAsmLayout;
class MCAssembler;
+class MCBinaryExpr;
class MCContext;
+class MCCodeEmitter;
class MCExpr;
class MCFragment;
+class MCObjectWriter;
class MCSection;
class MCSectionData;
class MCSymbol;
-
-/// MCAsmFixup - Represent a fixed size region of bytes inside some fragment
-/// which needs to be rewritten. This region will either be rewritten by the
-/// assembler or cause a relocation entry to be generated.
-struct MCAsmFixup {
- /// Offset - The offset inside the fragment which needs to be rewritten.
- uint64_t Offset;
-
- /// Value - The expression to eventually write into the fragment.
- const MCExpr *Value;
-
- /// Kind - The fixup kind.
- MCFixupKind Kind;
-
- /// FixedValue - The value to replace the fix up by.
- //
- // FIXME: This should not be here.
- uint64_t FixedValue;
-
-public:
- MCAsmFixup(uint64_t _Offset, const MCExpr &_Value, MCFixupKind _Kind)
- : Offset(_Offset), Value(&_Value), Kind(_Kind), FixedValue(0) {}
-};
+class MCSymbolData;
+class MCValue;
+class TargetAsmBackend;
class MCFragment : public ilist_node<MCFragment> {
+ friend class MCAsmLayout;
+
MCFragment(const MCFragment&); // DO NOT IMPLEMENT
void operator=(const MCFragment&); // DO NOT IMPLEMENT
public:
enum FragmentType {
- FT_Data,
FT_Align,
+ FT_Data,
FT_Fill,
- FT_Org,
- FT_ZeroFill
+ FT_Inst,
+ FT_Org
};
private:
@@ -70,6 +58,11 @@ private:
/// Parent - The data for the section this fragment is in.
MCSectionData *Parent;
+ /// Atom - The atom this fragment is in, as represented by it's defining
+ /// symbol. Atom's are only used by backends which set
+ /// \see MCAsmBackend::hasReliableSymbolDifference().
+ MCSymbolData *Atom;
+
/// @name Assembler Backend Data
/// @{
//
@@ -79,8 +72,13 @@ private:
/// initialized.
uint64_t Offset;
- /// FileSize - The file size of this section. This is ~0 until initialized.
- uint64_t FileSize;
+ /// EffectiveSize - The compute size of this section. This is ~0 until
+ /// initialized.
+ uint64_t EffectiveSize;
+
+ /// LayoutOrder - The global layout order of this fragment. This is the index
+ /// across all fragments in the file, not just within the section.
+ unsigned LayoutOrder;
/// @}
@@ -97,50 +95,26 @@ public:
MCSectionData *getParent() const { return Parent; }
void setParent(MCSectionData *Value) { Parent = Value; }
- // FIXME: This should be abstract, fix sentinel.
- virtual uint64_t getMaxFileSize() const {
- assert(0 && "Invalid getMaxFileSize call!");
- return 0;
- }
-
- /// @name Assembler Backend Support
- /// @{
- //
- // FIXME: This could all be kept private to the assembler implementation.
-
- uint64_t getAddress() const;
-
- uint64_t getFileSize() const {
- assert(FileSize != ~UINT64_C(0) && "File size not set!");
- return FileSize;
- }
- void setFileSize(uint64_t Value) {
- assert(Value <= getMaxFileSize() && "Invalid file size!");
- FileSize = Value;
- }
-
- uint64_t getOffset() const {
- assert(Offset != ~UINT64_C(0) && "File offset not set!");
- return Offset;
- }
- void setOffset(uint64_t Value) { Offset = Value; }
+ MCSymbolData *getAtom() const { return Atom; }
+ void setAtom(MCSymbolData *Value) { Atom = Value; }
- /// @}
+ unsigned getLayoutOrder() const { return LayoutOrder; }
+ void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
static bool classof(const MCFragment *O) { return true; }
- virtual void dump();
+ void dump();
};
class MCDataFragment : public MCFragment {
SmallString<32> Contents;
/// Fixups - The list of fixups in this fragment.
- std::vector<MCAsmFixup> Fixups;
+ std::vector<MCFixup> Fixups;
public:
- typedef std::vector<MCAsmFixup>::const_iterator const_fixup_iterator;
- typedef std::vector<MCAsmFixup>::iterator fixup_iterator;
+ typedef std::vector<MCFixup>::const_iterator const_fixup_iterator;
+ typedef std::vector<MCFixup>::iterator fixup_iterator;
public:
MCDataFragment(MCSectionData *SD = 0) : MCFragment(FT_Data, SD) {}
@@ -148,20 +122,22 @@ public:
/// @name Accessors
/// @{
- uint64_t getMaxFileSize() const {
- return Contents.size();
- }
-
SmallString<32> &getContents() { return Contents; }
const SmallString<32> &getContents() const { return Contents; }
/// @}
-
/// @name Fixup Access
/// @{
- std::vector<MCAsmFixup> &getFixups() { return Fixups; }
- const std::vector<MCAsmFixup> &getFixups() const { return Fixups; }
+ void addFixup(MCFixup Fixup) {
+ // Enforce invariant that fixups are in offset order.
+ assert((Fixups.empty() || Fixup.getOffset() > Fixups.back().getOffset()) &&
+ "Fixups must be added in order!");
+ Fixups.push_back(Fixup);
+ }
+
+ std::vector<MCFixup> &getFixups() { return Fixups; }
+ const std::vector<MCFixup> &getFixups() const { return Fixups; }
fixup_iterator fixup_begin() { return Fixups.begin(); }
const_fixup_iterator fixup_begin() const { return Fixups.begin(); }
@@ -177,8 +153,66 @@ public:
return F->getKind() == MCFragment::FT_Data;
}
static bool classof(const MCDataFragment *) { return true; }
+};
+
+// FIXME: This current incarnation of MCInstFragment doesn't make much sense, as
+// it is almost entirely a duplicate of MCDataFragment. If we decide to stick
+// with this approach (as opposed to making MCInstFragment a very light weight
+// object with just the MCInst and a code size, then we should just change
+// MCDataFragment to have an optional MCInst at its end.
+class MCInstFragment : public MCFragment {
+ /// Inst - The instruction this is a fragment for.
+ MCInst Inst;
+
+ /// Code - Binary data for the currently encoded instruction.
+ SmallString<8> Code;
+
+ /// Fixups - The list of fixups in this fragment.
+ SmallVector<MCFixup, 1> Fixups;
+
+public:
+ typedef SmallVectorImpl<MCFixup>::const_iterator const_fixup_iterator;
+ typedef SmallVectorImpl<MCFixup>::iterator fixup_iterator;
+
+public:
+ MCInstFragment(MCInst _Inst, MCSectionData *SD = 0)
+ : MCFragment(FT_Inst, SD), Inst(_Inst) {
+ }
- virtual void dump();
+ /// @name Accessors
+ /// @{
+
+ SmallVectorImpl<char> &getCode() { return Code; }
+ const SmallVectorImpl<char> &getCode() const { return Code; }
+
+ unsigned getInstSize() const { return Code.size(); }
+
+ MCInst &getInst() { return Inst; }
+ const MCInst &getInst() const { return Inst; }
+
+ void setInst(MCInst Value) { Inst = Value; }
+
+ /// @}
+ /// @name Fixup Access
+ /// @{
+
+ SmallVectorImpl<MCFixup> &getFixups() { return Fixups; }
+ const SmallVectorImpl<MCFixup> &getFixups() const { return Fixups; }
+
+ fixup_iterator fixup_begin() { return Fixups.begin(); }
+ const_fixup_iterator fixup_begin() const { return Fixups.begin(); }
+
+ fixup_iterator fixup_end() {return Fixups.end();}
+ const_fixup_iterator fixup_end() const {return Fixups.end();}
+
+ size_t fixup_size() const { return Fixups.size(); }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_Inst;
+ }
+ static bool classof(const MCInstFragment *) { return true; }
};
class MCAlignFragment : public MCFragment {
@@ -195,24 +229,28 @@ class MCAlignFragment : public MCFragment {
/// cannot be satisfied in this width then this fragment is ignored.
unsigned MaxBytesToEmit;
- /// EmitNops - true when aligning code and optimal nops to be used for filling
- bool EmitNops;
+ /// EmitNops - Flag to indicate that (optimal) NOPs should be emitted instead
+ /// of using the provided value. The exact interpretation of this flag is
+ /// target dependent.
+ bool EmitNops : 1;
+
+ /// OnlyAlignAddress - Flag to indicate that this align is only used to adjust
+ /// the address space size of a section and that it should not be included as
+ /// part of the section size. This flag can only be used on the last fragment
+ /// in a section.
+ bool OnlyAlignAddress : 1;
public:
MCAlignFragment(unsigned _Alignment, int64_t _Value, unsigned _ValueSize,
- unsigned _MaxBytesToEmit, bool _EmitNops,
- MCSectionData *SD = 0)
+ unsigned _MaxBytesToEmit, MCSectionData *SD = 0)
: MCFragment(FT_Align, SD), Alignment(_Alignment),
Value(_Value),ValueSize(_ValueSize),
- MaxBytesToEmit(_MaxBytesToEmit), EmitNops(_EmitNops) {}
+ MaxBytesToEmit(_MaxBytesToEmit), EmitNops(false),
+ OnlyAlignAddress(false) {}
/// @name Accessors
/// @{
- uint64_t getMaxFileSize() const {
- return std::max(Alignment - 1, MaxBytesToEmit);
- }
-
unsigned getAlignment() const { return Alignment; }
int64_t getValue() const { return Value; }
@@ -221,7 +259,11 @@ public:
unsigned getMaxBytesToEmit() const { return MaxBytesToEmit; }
- unsigned getEmitNops() const { return EmitNops; }
+ bool hasEmitNops() const { return EmitNops; }
+ void setEmitNops(bool Value) { EmitNops = Value; }
+
+ bool hasOnlyAlignAddress() const { return OnlyAlignAddress; }
+ void setOnlyAlignAddress(bool Value) { OnlyAlignAddress = Value; }
/// @}
@@ -229,38 +271,36 @@ public:
return F->getKind() == MCFragment::FT_Align;
}
static bool classof(const MCAlignFragment *) { return true; }
-
- virtual void dump();
};
class MCFillFragment : public MCFragment {
/// Value - Value to use for filling bytes.
int64_t Value;
- /// ValueSize - The size (in bytes) of \arg Value to use when filling.
+ /// ValueSize - The size (in bytes) of \arg Value to use when filling, or 0 if
+ /// this is a virtual fill fragment.
unsigned ValueSize;
- /// Count - The number of copies of \arg Value to insert.
- uint64_t Count;
+ /// Size - The number of bytes to insert.
+ uint64_t Size;
public:
- MCFillFragment(int64_t _Value, unsigned _ValueSize, uint64_t _Count,
+ MCFillFragment(int64_t _Value, unsigned _ValueSize, uint64_t _Size,
MCSectionData *SD = 0)
: MCFragment(FT_Fill, SD),
- Value(_Value), ValueSize(_ValueSize), Count(_Count) {}
+ Value(_Value), ValueSize(_ValueSize), Size(_Size) {
+ assert((!ValueSize || (Size % ValueSize) == 0) &&
+ "Fill size must be a multiple of the value size!");
+ }
/// @name Accessors
/// @{
- uint64_t getMaxFileSize() const {
- return ValueSize * Count;
- }
-
int64_t getValue() const { return Value; }
unsigned getValueSize() const { return ValueSize; }
- uint64_t getCount() const { return Count; }
+ uint64_t getSize() const { return Size; }
/// @}
@@ -268,8 +308,6 @@ public:
return F->getKind() == MCFragment::FT_Fill;
}
static bool classof(const MCFillFragment *) { return true; }
-
- virtual void dump();
};
class MCOrgFragment : public MCFragment {
@@ -287,11 +325,6 @@ public:
/// @name Accessors
/// @{
- uint64_t getMaxFileSize() const {
- // FIXME: This doesn't make much sense.
- return ~UINT64_C(0);
- }
-
const MCExpr &getOffset() const { return *Offset; }
uint8_t getValue() const { return Value; }
@@ -302,50 +335,14 @@ public:
return F->getKind() == MCFragment::FT_Org;
}
static bool classof(const MCOrgFragment *) { return true; }
-
- virtual void dump();
-};
-
-/// MCZeroFillFragment - Represent data which has a fixed size and alignment,
-/// but requires no physical space in the object file.
-class MCZeroFillFragment : public MCFragment {
- /// Size - The size of this fragment.
- uint64_t Size;
-
- /// Alignment - The alignment for this fragment.
- unsigned Alignment;
-
-public:
- MCZeroFillFragment(uint64_t _Size, unsigned _Alignment, MCSectionData *SD = 0)
- : MCFragment(FT_ZeroFill, SD),
- Size(_Size), Alignment(_Alignment) {}
-
- /// @name Accessors
- /// @{
-
- uint64_t getMaxFileSize() const {
- // FIXME: This also doesn't make much sense, this method is misnamed.
- return ~UINT64_C(0);
- }
-
- uint64_t getSize() const { return Size; }
-
- unsigned getAlignment() const { return Alignment; }
-
- /// @}
-
- static bool classof(const MCFragment *F) {
- return F->getKind() == MCFragment::FT_ZeroFill;
- }
- static bool classof(const MCZeroFillFragment *) { return true; }
-
- virtual void dump();
};
// FIXME: Should this be a separate class, or just merged into MCSection? Since
// we anticipate the fast path being through an MCAssembler, the only reason to
// keep it out is for API abstraction.
class MCSectionData : public ilist_node<MCSectionData> {
+ friend class MCAsmLayout;
+
MCSectionData(const MCSectionData&); // DO NOT IMPLEMENT
void operator=(const MCSectionData&); // DO NOT IMPLEMENT
@@ -359,9 +356,15 @@ public:
typedef FragmentListType::reverse_iterator reverse_iterator;
private:
- iplist<MCFragment> Fragments;
+ FragmentListType Fragments;
const MCSection *Section;
+ /// Ordinal - The section index in the assemblers section list.
+ unsigned Ordinal;
+
+ /// LayoutOrder - The index of this section in the layout order.
+ unsigned LayoutOrder;
+
/// Alignment - The maximum alignment seen in this section.
unsigned Alignment;
@@ -374,13 +377,6 @@ private:
/// initialized.
uint64_t Address;
- /// Size - The content size of this section. This is ~0 until initialized.
- uint64_t Size;
-
- /// FileSize - The size of this section in the object file. This is ~0 until
- /// initialized.
- uint64_t FileSize;
-
/// HasInstructions - Whether this section has had instructions emitted into
/// it.
unsigned HasInstructions : 1;
@@ -397,6 +393,15 @@ public:
unsigned getAlignment() const { return Alignment; }
void setAlignment(unsigned Value) { Alignment = Value; }
+ bool hasInstructions() const { return HasInstructions; }
+ void setHasInstructions(bool Value) { HasInstructions = Value; }
+
+ unsigned getOrdinal() const { return Ordinal; }
+ void setOrdinal(unsigned Value) { Ordinal = Value; }
+
+ unsigned getLayoutOrder() const { return LayoutOrder; }
+ void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
+
/// @name Fragment Access
/// @{
@@ -419,36 +424,9 @@ public:
bool empty() const { return Fragments.empty(); }
- /// @}
- /// @name Assembler Backend Support
- /// @{
- //
- // FIXME: This could all be kept private to the assembler implementation.
-
- uint64_t getAddress() const {
- assert(Address != ~UINT64_C(0) && "Address not set!");
- return Address;
- }
- void setAddress(uint64_t Value) { Address = Value; }
-
- uint64_t getSize() const {
- assert(Size != ~UINT64_C(0) && "File size not set!");
- return Size;
- }
- void setSize(uint64_t Value) { Size = Value; }
-
- uint64_t getFileSize() const {
- assert(FileSize != ~UINT64_C(0) && "File size not set!");
- return FileSize;
- }
- void setFileSize(uint64_t Value) { FileSize = Value; }
-
- bool hasInstructions() const { return HasInstructions; }
- void setHasInstructions(bool Value) { HasInstructions = Value; }
+ void dump();
/// @}
-
- void dump();
};
// FIXME: Same concerns as with SectionData.
@@ -476,6 +454,10 @@ public:
// common symbol can never get a definition.
uint64_t CommonSize;
+ /// SymbolSize - An expression describing how to calculate the size of
+ /// a symbol. If a symbol has no size this field will be NULL.
+ const MCExpr *SymbolSize;
+
/// CommonAlign - The alignment of the symbol, if it is 'common'.
//
// FIXME: Pack this in with other fields?
@@ -533,6 +515,15 @@ public:
return CommonSize;
}
+ void setSize(const MCExpr *SS) {
+ SymbolSize = SS;
+ }
+
+ const MCExpr *getSize() const {
+ return SymbolSize;
+ }
+
+
/// getCommonAlignment - Return the alignment of a 'common' symbol.
unsigned getCommonAlignment() const {
assert(isCommon() && "Not a 'common' symbol!");
@@ -545,6 +536,11 @@ public:
/// setFlags - Set the (implementation defined) symbol flags.
void setFlags(uint32_t Value) { Flags = Value; }
+ /// modifyFlags - Modify the flags via a mask
+ void modifyFlags(uint32_t Value, uint32_t Mask) {
+ Flags = (Flags & ~Mask) | Value;
+ }
+
/// getIndex - Get the (implementation defined) index.
uint64_t getIndex() const { return Index; }
@@ -563,6 +559,8 @@ struct IndirectSymbolData {
};
class MCAssembler {
+ friend class MCAsmLayout;
+
public:
typedef iplist<MCSectionData> SectionDataListType;
typedef iplist<MCSymbolData> SymbolDataListType;
@@ -573,6 +571,8 @@ public:
typedef SymbolDataListType::const_iterator const_symbol_iterator;
typedef SymbolDataListType::iterator symbol_iterator;
+ typedef std::vector<IndirectSymbolData>::const_iterator
+ const_indirect_symbol_iterator;
typedef std::vector<IndirectSymbolData>::iterator indirect_symbol_iterator;
private:
@@ -581,21 +581,90 @@ private:
MCContext &Context;
+ TargetAsmBackend &Backend;
+
+ MCCodeEmitter &Emitter;
+
raw_ostream &OS;
iplist<MCSectionData> Sections;
iplist<MCSymbolData> Symbols;
+ /// The map of sections to their associated assembler backend data.
+ //
+ // FIXME: Avoid this indirection?
+ DenseMap<const MCSection*, MCSectionData*> SectionMap;
+
+ /// The map of symbols to their associated assembler backend data.
+ //
+ // FIXME: Avoid this indirection?
+ DenseMap<const MCSymbol*, MCSymbolData*> SymbolMap;
+
std::vector<IndirectSymbolData> IndirectSymbols;
+ unsigned RelaxAll : 1;
unsigned SubsectionsViaSymbols : 1;
private:
- /// LayoutSection - Assign offsets and sizes to the fragments in the section
- /// \arg SD, and update the section size. The section file offset should
- /// already have been computed.
- void LayoutSection(MCSectionData &SD);
+ /// Evaluate a fixup to a relocatable expression and the value which should be
+ /// placed into the fixup.
+ ///
+ /// \param Layout The layout to use for evaluation.
+ /// \param Fixup The fixup to evaluate.
+ /// \param DF The fragment the fixup is inside.
+ /// \param Target [out] On return, the relocatable expression the fixup
+ /// evaluates to.
+ /// \param Value [out] On return, the value of the fixup as currently layed
+ /// out.
+ /// \return Whether the fixup value was fully resolved. This is true if the
+ /// \arg Value result is fixed, otherwise the value may change due to
+ /// relocation.
+ bool EvaluateFixup(const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ MCValue &Target, uint64_t &Value) const;
+
+ /// Check whether a fixup can be satisfied, or whether it needs to be relaxed
+ /// (increased in size, in order to hold its value correctly).
+ bool FixupNeedsRelaxation(const MCFixup &Fixup, const MCFragment *DF,
+ const MCAsmLayout &Layout) const;
+
+ /// Check whether the given fragment needs relaxation.
+ bool FragmentNeedsRelaxation(const MCInstFragment *IF,
+ const MCAsmLayout &Layout) const;
+
+ /// Compute the effective fragment size assuming it is layed out at the given
+ /// \arg SectionAddress and \arg FragmentOffset.
+ uint64_t ComputeFragmentSize(MCAsmLayout &Layout, const MCFragment &F,
+ uint64_t SectionAddress,
+ uint64_t FragmentOffset) const;
+
+ /// LayoutOnce - Perform one layout iteration and return true if any offsets
+ /// were adjusted.
+ bool LayoutOnce(MCAsmLayout &Layout);
+
+ /// FinishLayout - Finalize a layout, including fragment lowering.
+ void FinishLayout(MCAsmLayout &Layout);
+
+public:
+ /// Find the symbol which defines the atom containing the given symbol, or
+ /// null if there is no such symbol.
+ const MCSymbolData *getAtom(const MCAsmLayout &Layout,
+ const MCSymbolData *Symbol) const;
+
+ /// Check whether a particular symbol is visible to the linker and is required
+ /// in the symbol table, or whether it can be discarded by the assembler. This
+ /// also effects whether the assembler treats the label as potentially
+ /// defining a separate atom.
+ bool isSymbolLinkerVisible(const MCSymbol &SD) const;
+
+ /// Emit the section contents using the given object writer.
+ //
+ // FIXME: Should MCAssembler always have a reference to the object writer?
+ void WriteSectionData(const MCSectionData *Section, const MCAsmLayout &Layout,
+ MCObjectWriter *OW) const;
+
+ void AddSectionToTheEnd(MCSectionData &SD, MCAsmLayout &Layout);
public:
/// Construct a new assembler instance.
@@ -606,13 +675,20 @@ public:
// concrete and require clients to pass in a target like object. The other
// option is to make this abstract, and have targets provide concrete
// implementations as we do with AsmParser.
- MCAssembler(MCContext &_Context, raw_ostream &OS);
+ MCAssembler(MCContext &_Context, TargetAsmBackend &_Backend,
+ MCCodeEmitter &_Emitter, raw_ostream &OS);
~MCAssembler();
MCContext &getContext() const { return Context; }
+ TargetAsmBackend &getBackend() const { return Backend; }
+
+ MCCodeEmitter &getEmitter() const { return Emitter; }
+
/// Finish - Do final processing and write the object to the output stream.
- void Finish();
+ /// \arg Writer is used for custom object writer (as the MCJIT does),
+ /// if not specified it is automatically created from backend.
+ void Finish(MCObjectWriter *Writer = 0);
// FIXME: This does not belong here.
bool getSubsectionsViaSymbols() const {
@@ -622,6 +698,9 @@ public:
SubsectionsViaSymbols = Value;
}
+ bool getRelaxAll() const { return RelaxAll; }
+ void setRelaxAll(bool Value) { RelaxAll = Value; }
+
/// @name Section List Access
/// @{
@@ -665,14 +744,58 @@ public:
indirect_symbol_iterator indirect_symbol_begin() {
return IndirectSymbols.begin();
}
+ const_indirect_symbol_iterator indirect_symbol_begin() const {
+ return IndirectSymbols.begin();
+ }
indirect_symbol_iterator indirect_symbol_end() {
return IndirectSymbols.end();
}
+ const_indirect_symbol_iterator indirect_symbol_end() const {
+ return IndirectSymbols.end();
+ }
size_t indirect_symbol_size() const { return IndirectSymbols.size(); }
/// @}
+ /// @name Backend Data Access
+ /// @{
+
+ MCSectionData &getSectionData(const MCSection &Section) const {
+ MCSectionData *Entry = SectionMap.lookup(&Section);
+ assert(Entry && "Missing section data!");
+ return *Entry;
+ }
+
+ MCSectionData &getOrCreateSectionData(const MCSection &Section,
+ bool *Created = 0) {
+ MCSectionData *&Entry = SectionMap[&Section];
+
+ if (Created) *Created = !Entry;
+ if (!Entry)
+ Entry = new MCSectionData(Section, this);
+
+ return *Entry;
+ }
+
+ MCSymbolData &getSymbolData(const MCSymbol &Symbol) const {
+ MCSymbolData *Entry = SymbolMap.lookup(&Symbol);
+ assert(Entry && "Missing symbol data!");
+ return *Entry;
+ }
+
+ MCSymbolData &getOrCreateSymbolData(const MCSymbol &Symbol,
+ bool *Created = 0) {
+ MCSymbolData *&Entry = SymbolMap[&Symbol];
+
+ if (Created) *Created = !Entry;
+ if (!Entry)
+ Entry = new MCSymbolData(Symbol, 0, 0, this);
+
+ return *Entry;
+ }
+
+ /// @}
void dump();
};
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCCodeEmitter.h b/libclamav/c++/llvm/include/llvm/MC/MCCodeEmitter.h
index fe1aff4..010a2e5 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCCodeEmitter.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCCodeEmitter.h
@@ -22,6 +22,12 @@ template<typename T> class SmallVectorImpl;
/// MCFixupKindInfo - Target independent information on a fixup kind.
struct MCFixupKindInfo {
+ enum FixupKindFlags {
+ /// Is this fixup kind PCrelative. This is used by the assembler backend to
+ /// evaluate fixup values in a target independent manner when possible.
+ FKF_IsPCRel = (1 << 0)
+ };
+
/// A target specific name for the fixup kind. The names will be unique for
/// distinct kinds on any given target.
const char *Name;
@@ -36,6 +42,9 @@ struct MCFixupKindInfo {
/// The number of bits written by this fixup. The bits are assumed to be
/// contiguous.
unsigned TargetSize;
+
+ /// Flags describing additional information on this fixup kind.
+ unsigned Flags;
};
/// MCCodeEmitter - Generic instruction encoding interface.
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCContext.h b/libclamav/c++/llvm/include/llvm/MC/MCContext.h
index 74415e2..d22868c 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCContext.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCContext.h
@@ -10,16 +10,26 @@
#ifndef LLVM_MC_MCCONTEXT_H
#define LLVM_MC_MCCONTEXT_H
+#include "llvm/MC/SectionKind.h"
+#include "llvm/MC/MCDwarf.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/raw_ostream.h"
+#include <vector> // FIXME: Shouldn't be needed.
namespace llvm {
+ class MCAsmInfo;
class MCExpr;
class MCSection;
class MCSymbol;
+ class MCLabel;
+ class MCDwarfFile;
+ class MCDwarfLoc;
+ class MCLineSection;
class StringRef;
class Twine;
+ class MCSectionMachO;
/// MCContext - Context object for machine code objects. This class owns all
/// of the sections that it creates.
@@ -28,28 +38,75 @@ namespace llvm {
MCContext(const MCContext&); // DO NOT IMPLEMENT
MCContext &operator=(const MCContext&); // DO NOT IMPLEMENT
- /// Sections - Bindings of names to allocated sections.
- StringMap<MCSection*> Sections;
+ /// The MCAsmInfo for this target.
+ const MCAsmInfo &MAI;
/// Symbols - Bindings of names to symbols.
StringMap<MCSymbol*> Symbols;
+ /// NextUniqueID - The next ID to dole out to an unnamed assembler temporary
+ /// symbol.
+ unsigned NextUniqueID;
+
+ /// Instances of directional local labels.
+ DenseMap<unsigned, MCLabel *> Instances;
+ /// NextInstance() creates the next instance of the directional local label
+ /// for the LocalLabelVal and adds it to the map if needed.
+ unsigned NextInstance(int64_t LocalLabelVal);
+ /// GetInstance() gets the current instance of the directional local label
+ /// for the LocalLabelVal and adds it to the map if needed.
+ unsigned GetInstance(int64_t LocalLabelVal);
+
+ /// The file name of the log file from the enviromment variable
+ /// AS_SECURE_LOG_FILE. Which must be set before the .secure_log_unique
+ /// directive is used or it is an error.
+ char *SecureLogFile;
+ /// The stream that gets written to for the .secure_log_unique directive.
+ raw_ostream *SecureLog;
+ /// Boolean toggled when .secure_log_unique / .secure_log_reset is seen to
+ /// catch errors if .secure_log_unique appears twice without
+ /// .secure_log_reset appearing between them.
+ bool SecureLogUsed;
+
+ /// The dwarf file and directory tables from the dwarf .file directive.
+ std::vector<MCDwarfFile *> MCDwarfFiles;
+ std::vector<StringRef> MCDwarfDirs;
+
+ /// The current dwarf line information from the last dwarf .loc directive.
+ MCDwarfLoc CurrentDwarfLoc;
+ bool DwarfLocSeen;
+
+ /// The dwarf line information from the .loc directives for the sections
+ /// with assembled machine instructions have after seeing .loc directives.
+ DenseMap<const MCSection *, MCLineSection *> MCLineSections;
+
/// Allocator - Allocator object used for creating machine code objects.
///
/// We use a bump pointer allocator to avoid the need to track all allocated
/// objects.
BumpPtrAllocator Allocator;
+
+ void *MachOUniquingMap, *ELFUniquingMap, *COFFUniquingMap;
public:
- MCContext();
+ explicit MCContext(const MCAsmInfo &MAI);
~MCContext();
+
+ const MCAsmInfo &getAsmInfo() const { return MAI; }
/// @name Symbol Managment
/// @{
+
+ /// CreateTempSymbol - Create and return a new assembler temporary symbol
+ /// with a unique but unspecified name.
+ MCSymbol *CreateTempSymbol();
- /// CreateSymbol - Create a new symbol with the specified @p Name.
- ///
- /// @param Name - The symbol name, which must be unique across all symbols.
- MCSymbol *CreateSymbol(StringRef Name);
+ /// CreateDirectionalLocalSymbol - Create the defintion of a directional
+ /// local symbol for numbered label (used for "1:" defintions).
+ MCSymbol *CreateDirectionalLocalSymbol(int64_t LocalLabelVal);
+
+ /// GetDirectionalLocalSymbol - Create and return a directional local
+ /// symbol for numbered label (used for "1b" or 1f" references).
+ MCSymbol *GetDirectionalLocalSymbol(int64_t LocalLabelVal, int bORf);
/// GetOrCreateSymbol - Lookup the symbol inside with the specified
/// @p Name. If it exists, return it. If not, create a forward
@@ -59,18 +116,90 @@ namespace llvm {
MCSymbol *GetOrCreateSymbol(StringRef Name);
MCSymbol *GetOrCreateSymbol(const Twine &Name);
- /// CreateTemporarySymbol - Create a new temporary symbol with the specified
- /// @p Name.
- ///
- /// @param Name - The symbol name, for debugging purposes only, temporary
- /// symbols do not surive assembly. If non-empty the name must be unique
- /// across all symbols.
- MCSymbol *CreateTemporarySymbol(StringRef Name = "");
-
/// LookupSymbol - Get the symbol for \p Name, or null.
MCSymbol *LookupSymbol(StringRef Name) const;
/// @}
+
+ /// @name Section Managment
+ /// @{
+
+ /// getMachOSection - Return the MCSection for the specified mach-o section.
+ /// This requires the operands to be valid.
+ const MCSectionMachO *getMachOSection(StringRef Segment,
+ StringRef Section,
+ unsigned TypeAndAttributes,
+ unsigned Reserved2,
+ SectionKind K);
+ const MCSectionMachO *getMachOSection(StringRef Segment,
+ StringRef Section,
+ unsigned TypeAndAttributes,
+ SectionKind K) {
+ return getMachOSection(Segment, Section, TypeAndAttributes, 0, K);
+ }
+
+ const MCSection *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags, SectionKind Kind,
+ bool IsExplicit = false,
+ unsigned EntrySize = 0);
+
+ const MCSection *getCOFFSection(StringRef Section, unsigned Characteristics,
+ int Selection, SectionKind Kind);
+
+ const MCSection *getCOFFSection(StringRef Section, unsigned Characteristics,
+ SectionKind Kind) {
+ return getCOFFSection (Section, Characteristics, 0, Kind);
+ }
+
+
+ /// @}
+
+ /// @name Dwarf Managment
+ /// @{
+
+ /// GetDwarfFile - creates an entry in the dwarf file and directory tables.
+ unsigned GetDwarfFile(StringRef FileName, unsigned FileNumber);
+
+ bool ValidateDwarfFileNumber(unsigned FileNumber);
+
+ const std::vector<MCDwarfFile *> &getMCDwarfFiles() {
+ return MCDwarfFiles;
+ }
+ const std::vector<StringRef> &getMCDwarfDirs() {
+ return MCDwarfDirs;
+ }
+ DenseMap<const MCSection *, MCLineSection *> &getMCLineSections() {
+ return MCLineSections;
+ }
+
+ /// setCurrentDwarfLoc - saves the information from the currently parsed
+ /// dwarf .loc directive and sets DwarfLocSeen. When the next instruction /// is assembled an entry in the line number table with this information and
+ /// the address of the instruction will be created.
+ void setCurrentDwarfLoc(unsigned FileNum, unsigned Line, unsigned Column,
+ unsigned Flags, unsigned Isa) {
+ CurrentDwarfLoc.setFileNum(FileNum);
+ CurrentDwarfLoc.setLine(Line);
+ CurrentDwarfLoc.setColumn(Column);
+ CurrentDwarfLoc.setFlags(Flags);
+ CurrentDwarfLoc.setIsa(Isa);
+ DwarfLocSeen = true;
+ }
+ void clearDwarfLocSeen() { DwarfLocSeen = false; }
+
+ bool getDwarfLocSeen() { return DwarfLocSeen; }
+ const MCDwarfLoc &getCurrentDwarfLoc() { return CurrentDwarfLoc; }
+
+ /// @}
+
+ char *getSecureLogFile() { return SecureLogFile; }
+ raw_ostream *getSecureLog() { return SecureLog; }
+ bool getSecureLogUsed() { return SecureLogUsed; }
+ void setSecureLog(raw_ostream *Value) {
+ SecureLog = Value;
+ }
+ void setSecureLogUsed(bool Value) {
+ SecureLogUsed = Value;
+ }
void *Allocate(unsigned Size, unsigned Align = 8) {
return Allocator.Allocate(Size, Align);
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCDirectives.h b/libclamav/c++/llvm/include/llvm/MC/MCDirectives.h
index 1f7364d..223b09e 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCDirectives.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCDirectives.h
@@ -38,7 +38,8 @@ enum MCSymbolAttr {
MCSA_Reference, ///< .reference (MachO)
MCSA_Weak, ///< .weak
MCSA_WeakDefinition, ///< .weak_definition (MachO)
- MCSA_WeakReference ///< .weak_reference (MachO)
+ MCSA_WeakReference, ///< .weak_reference (MachO)
+ MCSA_WeakDefAutoPrivate ///< .weak_def_can_be_hidden (MachO)
};
enum MCAssemblerFlag {
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCDisassembler.h b/libclamav/c++/llvm/include/llvm/MC/MCDisassembler.h
index ffa0e41..dfb8ed5 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCDisassembler.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCDisassembler.h
@@ -16,6 +16,8 @@ namespace llvm {
class MCInst;
class MemoryObject;
class raw_ostream;
+
+struct EDInstInfo;
/// MCDisassembler - Superclass for all disassemblers. Consumes a memory region
/// and provides an array of assembly instructions.
@@ -43,7 +45,15 @@ public:
const MemoryObject ®ion,
uint64_t address,
raw_ostream &vStream) const = 0;
-};
+
+ /// getEDInfo - Returns the enhanced insturction information corresponding to
+ /// the disassembler.
+ ///
+ /// @return - An array of instruction information, with one entry for
+ /// each MCInst opcode this disassembler returns.
+ /// NULL if there is no info for this target.
+ virtual EDInstInfo *getEDInfo() const { return (EDInstInfo*)0; }
+};
} // namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCDwarf.h b/libclamav/c++/llvm/include/llvm/MC/MCDwarf.h
new file mode 100644
index 0000000..dac875c
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MCDwarf.h
@@ -0,0 +1,156 @@
+//===- MCDwarf.h - Machine Code Dwarf support -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCDwarfFile to support the dwarf
+// .file directive.
+// TODO: add the support needed for the .loc directive.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCDWARF_H
+#define LLVM_MC_MCDWARF_H
+
+#include "llvm/ADT/StringRef.h"
+#include <vector>
+
+namespace llvm {
+ class MCContext;
+ class MCSection;
+ class MCSymbol;
+ class raw_ostream;
+
+ /// MCDwarfFile - Instances of this class represent the name of the dwarf
+ /// .file directive and its associated dwarf file number in the MC file,
+ /// and MCDwarfFile's are created and unique'd by the MCContext class where
+ /// the file number for each is its index into the vector of DwarfFiles (note
+ /// index 0 is not used and not a valid dwarf file number).
+ class MCDwarfFile {
+ // Name - the base name of the file without its directory path.
+ // The StringRef references memory allocated in the MCContext.
+ StringRef Name;
+
+ // DirIndex - the index into the list of directory names for this file name.
+ unsigned DirIndex;
+
+ private: // MCContext creates and uniques these.
+ friend class MCContext;
+ MCDwarfFile(StringRef name, unsigned dirIndex)
+ : Name(name), DirIndex(dirIndex) {}
+
+ MCDwarfFile(const MCDwarfFile&); // DO NOT IMPLEMENT
+ void operator=(const MCDwarfFile&); // DO NOT IMPLEMENT
+ public:
+ /// getName - Get the base name of this MCDwarfFile.
+ StringRef getName() const { return Name; }
+
+ /// getDirIndex - Get the dirIndex of this MCDwarfFile.
+ unsigned getDirIndex() const { return DirIndex; }
+
+
+ /// print - Print the value to the stream \arg OS.
+ void print(raw_ostream &OS) const;
+
+ /// dump - Print the value to stderr.
+ void dump() const;
+ };
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const MCDwarfFile &DwarfFile){
+ DwarfFile.print(OS);
+ return OS;
+ }
+
+ /// MCDwarfLoc - Instances of this class represent the information from a
+ /// dwarf .loc directive.
+ class MCDwarfLoc {
+ // FileNum - the file number.
+ unsigned FileNum;
+ // Line - the line number.
+ unsigned Line;
+ // Column - the column position.
+ unsigned Column;
+ // Flags (see #define's below)
+ unsigned Flags;
+ // Isa
+ unsigned Isa;
+
+#define DWARF2_FLAG_IS_STMT (1 << 0)
+#define DWARF2_FLAG_BASIC_BLOCK (1 << 1)
+#define DWARF2_FLAG_PROLOGUE_END (1 << 2)
+#define DWARF2_FLAG_EPILOGUE_BEGIN (1 << 3)
+
+ private: // MCContext manages these
+ friend class MCContext;
+ friend class MCLineEntry;
+ MCDwarfLoc(unsigned fileNum, unsigned line, unsigned column, unsigned flags,
+ unsigned isa)
+ : FileNum(fileNum), Line(line), Column(column), Flags(flags), Isa(isa) {}
+
+ // Allow the default copy constructor and assignment operator to be used
+ // for an MCDwarfLoc object.
+
+ public:
+ /// setFileNum - Set the FileNum of this MCDwarfLoc.
+ void setFileNum(unsigned fileNum) { FileNum = fileNum; }
+
+ /// setLine - Set the Line of this MCDwarfLoc.
+ void setLine(unsigned line) { Line = line; }
+
+ /// setColumn - Set the Column of this MCDwarfLoc.
+ void setColumn(unsigned column) { Column = column; }
+
+ /// setFlags - Set the Flags of this MCDwarfLoc.
+ void setFlags(unsigned flags) { Flags = flags; }
+
+ /// setIsa - Set the Isa of this MCDwarfLoc.
+ void setIsa(unsigned isa) { Isa = isa; }
+ };
+
+ /// MCLineEntry - Instances of this class represent the line information for
+ /// the dwarf line table entries. Which is created after a machine
+ /// instruction is assembled and uses an address from a temporary label
+ /// created at the current address in the current section and the info from
+ /// the last .loc directive seen as stored in the context.
+ class MCLineEntry : public MCDwarfLoc {
+ MCSymbol *Label;
+
+ private:
+ // Allow the default copy constructor and assignment operator to be used
+ // for an MCLineEntry object.
+
+ public:
+ // Constructor to create an MCLineEntry given a symbol and the dwarf loc.
+ MCLineEntry(MCSymbol *label, const MCDwarfLoc loc) : MCDwarfLoc(loc),
+ Label(label) {}
+ };
+
+ /// MCLineSection - Instances of this class represent the line information
+ /// for a section where machine instructions have been assembled after seeing
+ /// .loc directives. This is the information used to build the dwarf line
+ /// table for a section.
+ class MCLineSection {
+ std::vector<MCLineEntry> MCLineEntries;
+
+ private:
+ MCLineSection(const MCLineSection&); // DO NOT IMPLEMENT
+ void operator=(const MCLineSection&); // DO NOT IMPLEMENT
+
+ public:
+ // Constructor to create an MCLineSection with an empty MCLineEntries
+ // vector.
+ MCLineSection() {}
+
+ // addLineEntry - adds an entry to this MCLineSection's line entries
+ void addLineEntry(const MCLineEntry &LineEntry) {
+ MCLineEntries.push_back(LineEntry);
+ }
+ };
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCELFSymbolFlags.h b/libclamav/c++/llvm/include/llvm/MC/MCELFSymbolFlags.h
new file mode 100644
index 0000000..eb7978b
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MCELFSymbolFlags.h
@@ -0,0 +1,54 @@
+//===- MCELFSymbolFlags.h - ELF Symbol Flags ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SymbolFlags used for the ELF target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCELFSYMBOLFLAGS_H
+#define LLVM_MC_MCELFSYMBOLFLAGS_H
+
+#include "llvm/Support/ELF.h"
+
+// Because all the symbol flags need to be stored in the MCSymbolData
+// 'flags' variable we need to provide shift constants per flag type.
+
+namespace llvm {
+ enum {
+ ELF_STT_Shift = 0, // Shift value for STT_* flags.
+ ELF_STB_Shift = 4, // Shift value for STB_* flags.
+ ELF_STV_Shift = 8 // Shift value ofr STV_* flags.
+ };
+
+ enum SymbolFlags {
+ ELF_STB_Local = (ELF::STB_LOCAL << ELF_STB_Shift),
+ ELF_STB_Global = (ELF::STB_GLOBAL << ELF_STB_Shift),
+ ELF_STB_Weak = (ELF::STB_WEAK << ELF_STB_Shift),
+ ELF_STB_Loproc = (ELF::STB_LOPROC << ELF_STB_Shift),
+ ELF_STB_Hiproc = (ELF::STB_HIPROC << ELF_STB_Shift),
+
+ ELF_STT_Notype = (ELF::STT_NOTYPE << ELF_STT_Shift),
+ ELF_STT_Object = (ELF::STT_OBJECT << ELF_STT_Shift),
+ ELF_STT_Func = (ELF::STT_FUNC << ELF_STT_Shift),
+ ELF_STT_Section = (ELF::STT_SECTION << ELF_STT_Shift),
+ ELF_STT_File = (ELF::STT_FILE << ELF_STT_Shift),
+ ELF_STT_Common = (ELF::STT_COMMON << ELF_STT_Shift),
+ ELF_STT_Tls = (ELF::STT_TLS << ELF_STT_Shift),
+ ELF_STT_Loproc = (ELF::STT_LOPROC << ELF_STT_Shift),
+ ELF_STT_Hiproc = (ELF::STT_HIPROC << ELF_STT_Shift),
+
+ ELF_STV_Default = (ELF::STV_DEFAULT << ELF_STV_Shift),
+ ELF_STV_Internal = (ELF::STV_INTERNAL << ELF_STV_Shift),
+ ELF_STV_Hidden = (ELF::STV_HIDDEN << ELF_STV_Shift),
+ ELF_STV_Protected = (ELF::STV_PROTECTED << ELF_STV_Shift)
+ };
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCExpr.h b/libclamav/c++/llvm/include/llvm/MC/MCExpr.h
index fce7602..1f9b8f2 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCExpr.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCExpr.h
@@ -15,6 +15,7 @@
namespace llvm {
class MCAsmInfo;
+class MCAsmLayout;
class MCContext;
class MCSymbol;
class MCValue;
@@ -62,21 +63,25 @@ public:
/// EvaluateAsAbsolute - Try to evaluate the expression to an absolute value.
///
/// @param Res - The absolute value, if evaluation succeeds.
+ /// @param Layout - The assembler layout object to use for evaluating symbol
+ /// values. If not given, then only non-symbolic expressions will be
+ /// evaluated.
/// @result - True on success.
- bool EvaluateAsAbsolute(int64_t &Res) const;
+ bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout *Layout = 0) const;
/// EvaluateAsRelocatable - Try to evaluate the expression to a relocatable
/// value, i.e. an expression of the fixed form (a - b + constant).
///
/// @param Res - The relocatable value, if evaluation succeeds.
+ /// @param Layout - The assembler layout object to use for evaluating values.
/// @result - True on success.
- bool EvaluateAsRelocatable(MCValue &Res) const;
+ bool EvaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout = 0) const;
/// @}
static bool classof(const MCExpr *) { return true; }
};
-
+
inline raw_ostream &operator<<(raw_ostream &OS, const MCExpr &E) {
E.print(OS);
return OS;
@@ -116,24 +121,64 @@ public:
/// assembler variable (defined constant), or constitute an implicit definition
/// of the symbol as external.
class MCSymbolRefExpr : public MCExpr {
+public:
+ enum VariantKind {
+ VK_None,
+ VK_Invalid,
+
+ VK_GOT,
+ VK_GOTOFF,
+ VK_GOTPCREL,
+ VK_GOTTPOFF,
+ VK_INDNTPOFF,
+ VK_NTPOFF,
+ VK_PLT,
+ VK_TLSGD,
+ VK_TPOFF,
+ VK_ARM_HI16, // The R_ARM_MOVT_ABS relocation (:upper16: in the asm file)
+ VK_ARM_LO16, // The R_ARM_MOVW_ABS_NC relocation (:lower16: in the asm file)
+ VK_TLVP // Mach-O thread local variable relocation
+ };
+
+private:
+ /// The symbol being referenced.
const MCSymbol *Symbol;
- explicit MCSymbolRefExpr(const MCSymbol *_Symbol)
- : MCExpr(MCExpr::SymbolRef), Symbol(_Symbol) {}
+ /// The symbol reference modifier.
+ const VariantKind Kind;
+
+ explicit MCSymbolRefExpr(const MCSymbol *_Symbol, VariantKind _Kind)
+ : MCExpr(MCExpr::SymbolRef), Symbol(_Symbol), Kind(_Kind) {}
public:
/// @name Construction
/// @{
- static const MCSymbolRefExpr *Create(const MCSymbol *Symbol, MCContext &Ctx);
- static const MCSymbolRefExpr *Create(StringRef Name, MCContext &Ctx);
+ static const MCSymbolRefExpr *Create(const MCSymbol *Symbol, MCContext &Ctx) {
+ return MCSymbolRefExpr::Create(Symbol, VK_None, Ctx);
+ }
+ static const MCSymbolRefExpr *Create(const MCSymbol *Symbol, VariantKind Kind,
+ MCContext &Ctx);
+ static const MCSymbolRefExpr *Create(StringRef Name, VariantKind Kind,
+ MCContext &Ctx);
+
/// @}
/// @name Accessors
/// @{
const MCSymbol &getSymbol() const { return *Symbol; }
+ VariantKind getKind() const { return Kind; }
+
+ /// @}
+ /// @name Static Utility Functions
+ /// @{
+
+ static StringRef getVariantKindName(VariantKind Kind);
+
+ static VariantKind getVariantKindForName(StringRef Name);
+
/// @}
static bool classof(const MCExpr *E) {
@@ -342,11 +387,12 @@ protected:
MCTargetExpr() : MCExpr(Target) {}
virtual ~MCTargetExpr() {}
public:
-
+
virtual void PrintImpl(raw_ostream &OS) const = 0;
- virtual bool EvaluateAsRelocatableImpl(MCValue &Res) const = 0;
+ virtual bool EvaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout) const = 0;
+
-
static bool classof(const MCExpr *E) {
return E->getKind() == MCExpr::Target;
}
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCFixup.h b/libclamav/c++/llvm/include/llvm/MC/MCFixup.h
index cd0dd19..eed4c34 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCFixup.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCFixup.h
@@ -10,26 +10,12 @@
#ifndef LLVM_MC_MCFIXUP_H
#define LLVM_MC_MCFIXUP_H
+#include "llvm/System/DataTypes.h"
#include <cassert>
namespace llvm {
class MCExpr;
-// Private constants, do not use.
-//
-// This is currently laid out so that the MCFixup fields can be efficiently
-// accessed, while keeping the offset field large enough that the assembler
-// backend can reasonably use the MCFixup representation for an entire fragment
-// (splitting any overly large fragments).
-//
-// The division of bits between the kind and the opindex can be tweaked if we
-// end up needing more bits for target dependent kinds.
-enum {
- MCFIXUP_NUM_GENERIC_KINDS = 128,
- MCFIXUP_NUM_KIND_BITS = 16,
- MCFIXUP_NUM_OFFSET_BITS = (32 - MCFIXUP_NUM_KIND_BITS)
-};
-
/// MCFixupKind - Extensible enumeration to represent the type of a fixup.
enum MCFixupKind {
FK_Data_1 = 0, ///< A one-byte fixup.
@@ -37,12 +23,14 @@ enum MCFixupKind {
FK_Data_4, ///< A four-byte fixup.
FK_Data_8, ///< A eight-byte fixup.
- FirstTargetFixupKind = MCFIXUP_NUM_GENERIC_KINDS,
+ FirstTargetFixupKind = 128,
- MaxTargetFixupKind = (1 << MCFIXUP_NUM_KIND_BITS)
+ // Limit range of target fixups, in case we want to pack more efficiently
+ // later.
+ MaxTargetFixupKind = (1 << 8)
};
-/// MCFixup - Encode information on a single operation to perform on an byte
+/// MCFixup - Encode information on a single operation to perform on a byte
/// sequence (e.g., an encoded instruction) which requires assemble- or run-
/// time patching.
///
@@ -57,36 +45,33 @@ enum MCFixupKind {
/// fixups become relocations in the object file (or errors, if the fixup cannot
/// be encoded on the target).
class MCFixup {
- static const unsigned MaxOffset = 1 << MCFIXUP_NUM_KIND_BITS;
-
/// The value to put into the fixup location. The exact interpretation of the
- /// expression is target dependent, usually it will one of the operands to an
- /// instruction or an assembler directive.
+ /// expression is target dependent, usually it will be one of the operands to
+ /// an instruction or an assembler directive.
const MCExpr *Value;
/// The byte index of start of the relocation inside the encoded instruction.
- unsigned Offset : MCFIXUP_NUM_OFFSET_BITS;
+ uint32_t Offset;
/// The target dependent kind of fixup item this is. The kind is used to
/// determine how the operand value should be encoded into the instruction.
- unsigned Kind : MCFIXUP_NUM_KIND_BITS;
+ unsigned Kind;
public:
- static MCFixup Create(unsigned Offset, const MCExpr *Value,
+ static MCFixup Create(uint32_t Offset, const MCExpr *Value,
MCFixupKind Kind) {
+ assert(unsigned(Kind) < MaxTargetFixupKind && "Kind out of range!");
MCFixup FI;
FI.Value = Value;
FI.Offset = Offset;
FI.Kind = unsigned(Kind);
-
- assert(Offset == FI.getOffset() && "Offset out of range!");
- assert(Kind == FI.getKind() && "Kind out of range!");
return FI;
}
MCFixupKind getKind() const { return MCFixupKind(Kind); }
- unsigned getOffset() const { return Offset; }
+ uint32_t getOffset() const { return Offset; }
+ void setOffset(uint32_t Value) { Offset = Value; }
const MCExpr *getValue() const { return Value; }
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCInst.h b/libclamav/c++/llvm/include/llvm/MC/MCInst.h
index 29b38dd..dc630fe 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCInst.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCInst.h
@@ -17,11 +17,13 @@
#define LLVM_MC_MCINST_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/System/DataTypes.h"
namespace llvm {
class raw_ostream;
class MCAsmInfo;
+class MCInstPrinter;
class MCExpr;
/// MCOperand - Instances of this class represent operands of the MCInst class.
@@ -125,6 +127,13 @@ public:
void print(raw_ostream &OS, const MCAsmInfo *MAI) const;
void dump() const;
+
+ /// \brief Dump the MCInst as prettily as possible using the additional MC
+ /// structures, if given. Operators are separated by the \arg Separator
+ /// string.
+ void dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI = 0,
+ const MCInstPrinter *Printer = 0,
+ StringRef Separator = " ") const;
};
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCInstPrinter.h b/libclamav/c++/llvm/include/llvm/MC/MCInstPrinter.h
index d2ddc5b..4839a83 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCInstPrinter.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCInstPrinter.h
@@ -20,26 +20,23 @@ class StringRef;
/// that converts an MCInst to valid target assembly syntax.
class MCInstPrinter {
protected:
- /// O - The main stream to emit instruction text to.
- raw_ostream &O;
-
/// CommentStream - a stream that comments can be emitted to if desired.
/// Each comment must end with a newline. This will be null if verbose
/// assembly emission is disable.
raw_ostream *CommentStream;
const MCAsmInfo &MAI;
public:
- MCInstPrinter(raw_ostream &o, const MCAsmInfo &mai)
- : O(o), CommentStream(0), MAI(mai) {}
+ MCInstPrinter(const MCAsmInfo &mai)
+ : CommentStream(0), MAI(mai) {}
virtual ~MCInstPrinter();
/// setCommentStream - Specify a stream to emit comments to.
void setCommentStream(raw_ostream &OS) { CommentStream = &OS; }
- /// printInst - Print the specified MCInst to the current raw_ostream.
+ /// printInst - Print the specified MCInst to the specified raw_ostream.
///
- virtual void printInst(const MCInst *MI) = 0;
+ virtual void printInst(const MCInst *MI, raw_ostream &OS) = 0;
/// getOpcodeName - Return the name of the specified opcode enum (e.g.
/// "MOV32ri") or empty if we can't resolve it.
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCLabel.h b/libclamav/c++/llvm/include/llvm/MC/MCLabel.h
new file mode 100644
index 0000000..727520d
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MCLabel.h
@@ -0,0 +1,56 @@
+//===- MCLabel.h - Machine Code Directional Local Labels --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCLabel class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCLABEL_H
+#define LLVM_MC_MCLABEL_H
+
+namespace llvm {
+ class MCContext;
+ class raw_ostream;
+
+ /// MCLabel - Instances of this class represent a label name in the MC file,
+ /// and MCLabel are created and unique'd by the MCContext class. MCLabel
+ /// should only be constructed for valid instances in the object file.
+ class MCLabel {
+ // Instance - the instance number of this Directional Local Label
+ unsigned Instance;
+
+ private: // MCContext creates and uniques these.
+ friend class MCContext;
+ MCLabel(unsigned instance)
+ : Instance(instance) {}
+
+ MCLabel(const MCLabel&); // DO NOT IMPLEMENT
+ void operator=(const MCLabel&); // DO NOT IMPLEMENT
+ public:
+ /// getInstance - Get the current instance of this Directional Local Label.
+ unsigned getInstance() const { return Instance; }
+
+ /// incInstance - Increment the current instance of this Directional Local
+ /// Label.
+ unsigned incInstance() { return ++Instance; }
+
+ /// print - Print the value to the stream \arg OS.
+ void print(raw_ostream &OS) const;
+
+ /// dump - Print the value to stderr.
+ void dump() const;
+ };
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const MCLabel &Label) {
+ Label.print(OS);
+ return OS;
+ }
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCMachOSymbolFlags.h b/libclamav/c++/llvm/include/llvm/MC/MCMachOSymbolFlags.h
new file mode 100644
index 0000000..c938c81
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MCMachOSymbolFlags.h
@@ -0,0 +1,44 @@
+//===- MCMachOSymbolFlags.h - MachO Symbol Flags ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SymbolFlags used for the MachO target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCMACHOSYMBOLFLAGS_H
+#define LLVM_MC_MCMACHOSYMBOLFLAGS_H
+
+// These flags are mostly used in MCMachOStreamer.cpp but also needed in
+// MachObjectWriter.cpp to test for Weak Definitions of symbols to emit
+// the correct relocation information.
+
+namespace llvm {
+ /// SymbolFlags - We store the value for the 'desc' symbol field in the lowest
+ /// 16 bits of the implementation defined flags.
+ enum SymbolFlags { // See <mach-o/nlist.h>.
+ SF_DescFlagsMask = 0xFFFF,
+
+ // Reference type flags.
+ SF_ReferenceTypeMask = 0x0007,
+ SF_ReferenceTypeUndefinedNonLazy = 0x0000,
+ SF_ReferenceTypeUndefinedLazy = 0x0001,
+ SF_ReferenceTypeDefined = 0x0002,
+ SF_ReferenceTypePrivateDefined = 0x0003,
+ SF_ReferenceTypePrivateUndefinedNonLazy = 0x0004,
+ SF_ReferenceTypePrivateUndefinedLazy = 0x0005,
+
+ // Other 'desc' flags.
+ SF_NoDeadStrip = 0x0020,
+ SF_WeakReference = 0x0040,
+ SF_WeakDefinition = 0x0080
+ };
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCObjectStreamer.h b/libclamav/c++/llvm/include/llvm/MC/MCObjectStreamer.h
new file mode 100644
index 0000000..ea6d9c1
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -0,0 +1,67 @@
+//===- MCObjectStreamer.h - MCStreamer Object File Interface ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTSTREAMER_H
+#define LLVM_MC_MCOBJECTSTREAMER_H
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class MCAssembler;
+class MCCodeEmitter;
+class MCSectionData;
+class MCExpr;
+class MCFragment;
+class MCDataFragment;
+class TargetAsmBackend;
+class raw_ostream;
+
+/// \brief Streaming object file generation interface.
+///
+/// This class provides an implementation of the MCStreamer interface which is
+/// suitable for use with the assembler backend. Specific object file formats
+/// are expected to subclass this interface to implement directives specific
+/// to that file format or custom semantics expected by the object writer
+/// implementation.
+class MCObjectStreamer : public MCStreamer {
+ MCAssembler *Assembler;
+ MCSectionData *CurSectionData;
+
+protected:
+ MCObjectStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &_OS, MCCodeEmitter *_Emitter);
+ ~MCObjectStreamer();
+
+ MCSectionData *getCurrentSectionData() const {
+ return CurSectionData;
+ }
+
+ MCFragment *getCurrentFragment() const;
+
+ /// Get a data fragment to write into, creating a new one if the current
+ /// fragment is not a data fragment.
+ MCDataFragment *getOrCreateDataFragment() const;
+
+ const MCExpr *AddValueSymbols(const MCExpr *Value);
+
+public:
+ MCAssembler &getAssembler() { return *Assembler; }
+
+ /// @name MCStreamer Interface
+ /// @{
+
+ virtual void SwitchSection(const MCSection *Section);
+ virtual void Finish();
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCObjectWriter.h b/libclamav/c++/llvm/include/llvm/MC/MCObjectWriter.h
new file mode 100644
index 0000000..f1c1cb8
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MCObjectWriter.h
@@ -0,0 +1,169 @@
+//===-- llvm/MC/MCObjectWriter.h - Object File Writer Interface -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTWRITER_H
+#define LLVM_MC_MCOBJECTWRITER_H
+
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/DataTypes.h"
+#include <cassert>
+
+namespace llvm {
+class MCAsmLayout;
+class MCAssembler;
+class MCFixup;
+class MCFragment;
+class MCValue;
+class raw_ostream;
+
+/// MCObjectWriter - Defines the object file and target independent interfaces
+/// used by the assembler backend to write native file format object files.
+///
+/// The object writer contains a few callbacks used by the assembler to allow
+/// the object writer to modify the assembler data structures at appropriate
+/// points. Once assembly is complete, the object writer is given the
+/// MCAssembler instance, which contains all the symbol and section data which
+/// should be emitted as part of WriteObject().
+///
+/// The object writer also contains a number of helper methods for writing
+/// binary data to the output stream.
+class MCObjectWriter {
+ MCObjectWriter(const MCObjectWriter &); // DO NOT IMPLEMENT
+ void operator=(const MCObjectWriter &); // DO NOT IMPLEMENT
+
+protected:
+ raw_ostream &OS;
+
+ unsigned IsLittleEndian : 1;
+
+protected: // Can only create subclasses.
+ MCObjectWriter(raw_ostream &_OS, bool _IsLittleEndian)
+ : OS(_OS), IsLittleEndian(_IsLittleEndian) {}
+
+public:
+ virtual ~MCObjectWriter();
+
+ bool isLittleEndian() const { return IsLittleEndian; }
+
+ raw_ostream &getStream() { return OS; }
+
+ /// @name High-Level API
+ /// @{
+
+ /// Perform any late binding of symbols (for example, to assign symbol indices
+ /// for use when generating relocations).
+ ///
+ /// This routine is called by the assembler after layout and relaxation is
+ /// complete.
+ virtual void ExecutePostLayoutBinding(MCAssembler &Asm) = 0;
+
+ /// Record a relocation entry.
+ ///
+ /// This routine is called by the assembler after layout and relaxation, and
+ /// post layout binding. The implementation is responsible for storing
+ /// information about the relocation so that it can be emitted during
+ /// WriteObject().
+ virtual void RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) = 0;
+
+ /// Write the object file.
+ ///
+ /// This routine is called by the assembler after layout and relaxation is
+ /// complete, fixups have been evaluated and applied, and relocations
+ /// generated.
+ virtual void WriteObject(const MCAssembler &Asm,
+ const MCAsmLayout &Layout) = 0;
+
+ /// @}
+ /// @name Binary Output
+ /// @{
+
+ void Write8(uint8_t Value) {
+ OS << char(Value);
+ }
+
+ void WriteLE16(uint16_t Value) {
+ Write8(uint8_t(Value >> 0));
+ Write8(uint8_t(Value >> 8));
+ }
+
+ void WriteLE32(uint32_t Value) {
+ WriteLE16(uint16_t(Value >> 0));
+ WriteLE16(uint16_t(Value >> 16));
+ }
+
+ void WriteLE64(uint64_t Value) {
+ WriteLE32(uint32_t(Value >> 0));
+ WriteLE32(uint32_t(Value >> 32));
+ }
+
+ void WriteBE16(uint16_t Value) {
+ Write8(uint8_t(Value >> 8));
+ Write8(uint8_t(Value >> 0));
+ }
+
+ void WriteBE32(uint32_t Value) {
+ WriteBE16(uint16_t(Value >> 16));
+ WriteBE16(uint16_t(Value >> 0));
+ }
+
+ void WriteBE64(uint64_t Value) {
+ WriteBE32(uint32_t(Value >> 32));
+ WriteBE32(uint32_t(Value >> 0));
+ }
+
+ void Write16(uint16_t Value) {
+ if (IsLittleEndian)
+ WriteLE16(Value);
+ else
+ WriteBE16(Value);
+ }
+
+ void Write32(uint32_t Value) {
+ if (IsLittleEndian)
+ WriteLE32(Value);
+ else
+ WriteBE32(Value);
+ }
+
+ void Write64(uint64_t Value) {
+ if (IsLittleEndian)
+ WriteLE64(Value);
+ else
+ WriteBE64(Value);
+ }
+
+ void WriteZeros(unsigned N) {
+ const char Zeros[16] = { 0 };
+
+ for (unsigned i = 0, e = N / 16; i != e; ++i)
+ OS << StringRef(Zeros, 16);
+
+ OS << StringRef(Zeros, N % 16);
+ }
+
+ void WriteBytes(StringRef Str, unsigned ZeroFillSize = 0) {
+ assert((ZeroFillSize == 0 || Str.size () <= ZeroFillSize) &&
+ "data size greater than fill size, unexpected large write will occur");
+ OS << Str;
+ if (ZeroFillSize)
+ WriteZeros(ZeroFillSize - Str.size());
+ }
+
+ /// @}
+};
+
+MCObjectWriter *createWinCOFFObjectWriter(raw_ostream &OS, bool is64Bit);
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCParser/AsmLexer.h b/libclamav/c++/llvm/include/llvm/MC/MCParser/AsmLexer.h
index cf6eefb..2187889 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCParser/AsmLexer.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCParser/AsmLexer.h
@@ -33,8 +33,6 @@ class AsmLexer : public MCAsmLexer {
const char *CurPtr;
const MemoryBuffer *CurBuf;
- const char *TokStart;
-
void operator=(const AsmLexer&); // DO NOT IMPLEMENT
AsmLexer(const AsmLexer&); // DO NOT IMPLEMENT
@@ -48,9 +46,7 @@ public:
void setBuffer(const MemoryBuffer *buf, const char *ptr = NULL);
- SMLoc getLoc() const;
-
- StringRef LexUntilEndOfStatement();
+ virtual StringRef LexUntilEndOfStatement();
bool isAtStartOfComment(char Char);
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCParser/AsmParser.h b/libclamav/c++/llvm/include/llvm/MC/MCParser/AsmParser.h
deleted file mode 100644
index 829604c..0000000
--- a/libclamav/c++/llvm/include/llvm/MC/MCParser/AsmParser.h
+++ /dev/null
@@ -1,178 +0,0 @@
-//===- AsmParser.h - Parser for Assembly Files ------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class declares the parser for assembly files.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ASMPARSER_H
-#define ASMPARSER_H
-
-#include <vector>
-#include "llvm/MC/MCParser/AsmLexer.h"
-#include "llvm/MC/MCParser/AsmCond.h"
-#include "llvm/MC/MCParser/MCAsmParser.h"
-#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/ADT/StringMap.h"
-
-namespace llvm {
-class AsmCond;
-class AsmToken;
-class MCContext;
-class MCExpr;
-class MCInst;
-class MCStreamer;
-class MCAsmInfo;
-class MCValue;
-class SourceMgr;
-class TargetAsmParser;
-class Twine;
-
-class AsmParser : public MCAsmParser {
-private:
- AsmLexer Lexer;
- MCContext &Ctx;
- MCStreamer &Out;
- SourceMgr &SrcMgr;
- TargetAsmParser *TargetParser;
-
- /// This is the current buffer index we're lexing from as managed by the
- /// SourceMgr object.
- int CurBuffer;
-
- AsmCond TheCondState;
- std::vector<AsmCond> TheCondStack;
-
- // FIXME: Figure out where this should leave, the code is a copy of that which
- // is also used by TargetLoweringObjectFile.
- mutable void *SectionUniquingMap;
-
- /// DirectiveMap - This is a table handlers for directives. Each handler is
- /// invoked after the directive identifier is read and is responsible for
- /// parsing and validating the rest of the directive. The handler is passed
- /// in the directive name and the location of the directive keyword.
- StringMap<bool(AsmParser::*)(StringRef, SMLoc)> DirectiveMap;
-public:
- AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
- const MCAsmInfo &MAI);
- ~AsmParser();
-
- bool Run();
-
-
- void AddDirectiveHandler(StringRef Directive,
- bool (AsmParser::*Handler)(StringRef, SMLoc)) {
- DirectiveMap[Directive] = Handler;
- }
-public:
- TargetAsmParser &getTargetParser() const { return *TargetParser; }
- void setTargetParser(TargetAsmParser &P) { TargetParser = &P; }
-
- /// @name MCAsmParser Interface
- /// {
-
- virtual MCAsmLexer &getLexer() { return Lexer; }
- virtual MCContext &getContext() { return Ctx; }
- virtual MCStreamer &getStreamer() { return Out; }
-
- virtual void Warning(SMLoc L, const Twine &Meg);
- virtual bool Error(SMLoc L, const Twine &Msg);
-
- const AsmToken &Lex();
-
- bool ParseExpression(const MCExpr *&Res);
- virtual bool ParseExpression(const MCExpr *&Res, SMLoc &EndLoc);
- virtual bool ParseParenExpression(const MCExpr *&Res, SMLoc &EndLoc);
- virtual bool ParseAbsoluteExpression(int64_t &Res);
-
- /// }
-
-private:
- MCSymbol *CreateSymbol(StringRef Name);
-
- // FIXME: See comment on SectionUniquingMap.
- const MCSection *getMachOSection(const StringRef &Segment,
- const StringRef &Section,
- unsigned TypeAndAttributes,
- unsigned Reserved2,
- SectionKind Kind) const;
-
- bool ParseStatement();
-
- bool TokError(const char *Msg);
-
- void PrintMessage(SMLoc Loc, const std::string &Msg, const char *Type) const;
-
- /// EnterIncludeFile - Enter the specified file. This returns true on failure.
- bool EnterIncludeFile(const std::string &Filename);
-
- bool ParseConditionalAssemblyDirectives(StringRef Directive,
- SMLoc DirectiveLoc);
- void EatToEndOfStatement();
-
- bool ParseAssignment(const StringRef &Name);
-
- bool ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc);
- bool ParseBinOpRHS(unsigned Precedence, const MCExpr *&Res, SMLoc &EndLoc);
- bool ParseParenExpr(const MCExpr *&Res, SMLoc &EndLoc);
-
- /// ParseIdentifier - Parse an identifier or string (as a quoted identifier)
- /// and set \arg Res to the identifier contents.
- bool ParseIdentifier(StringRef &Res);
-
- // Directive Parsing.
- bool ParseDirectiveDarwinSection(); // Darwin specific ".section".
- bool ParseDirectiveSectionSwitch(const char *Segment, const char *Section,
- unsigned TAA = 0, unsigned ImplicitAlign = 0,
- unsigned StubSize = 0);
- bool ParseDirectiveAscii(bool ZeroTerminated); // ".ascii", ".asciiz"
- bool ParseDirectiveValue(unsigned Size); // ".byte", ".long", ...
- bool ParseDirectiveFill(); // ".fill"
- bool ParseDirectiveSpace(); // ".space"
- bool ParseDirectiveSet(); // ".set"
- bool ParseDirectiveOrg(); // ".org"
- // ".align{,32}", ".p2align{,w,l}"
- bool ParseDirectiveAlign(bool IsPow2, unsigned ValueSize);
-
- /// ParseDirectiveSymbolAttribute - Parse a directive like ".globl" which
- /// accepts a single symbol (which should be a label or an external).
- bool ParseDirectiveSymbolAttribute(MCSymbolAttr Attr);
- bool ParseDirectiveDarwinSymbolDesc(); // Darwin specific ".desc"
- bool ParseDirectiveDarwinLsym(); // Darwin specific ".lsym"
-
- bool ParseDirectiveComm(bool IsLocal); // ".comm" and ".lcomm"
- bool ParseDirectiveDarwinZerofill(); // Darwin specific ".zerofill"
-
- // Darwin specific ".subsections_via_symbols"
- bool ParseDirectiveDarwinSubsectionsViaSymbols();
- // Darwin specific .dump and .load
- bool ParseDirectiveDarwinDumpOrLoad(SMLoc IDLoc, bool IsDump);
-
- bool ParseDirectiveAbort(); // ".abort"
- bool ParseDirectiveInclude(); // ".include"
-
- bool ParseDirectiveIf(SMLoc DirectiveLoc); // ".if"
- bool ParseDirectiveElseIf(SMLoc DirectiveLoc); // ".elseif"
- bool ParseDirectiveElse(SMLoc DirectiveLoc); // ".else"
- bool ParseDirectiveEndIf(SMLoc DirectiveLoc); // .endif
-
- bool ParseDirectiveFile(StringRef, SMLoc DirectiveLoc); // ".file"
- bool ParseDirectiveLine(StringRef, SMLoc DirectiveLoc); // ".line"
- bool ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc); // ".loc"
-
- /// ParseEscapedString - Parse the current token as a string which may include
- /// escaped characters and return the string contents.
- bool ParseEscapedString(std::string &Data);
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmLexer.h b/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
index 043c363..d690e81 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -42,12 +42,12 @@ public:
Plus, Minus, Tilde,
Slash, // '/'
LParen, RParen, LBrac, RBrac, LCurly, RCurly,
- Star, Comma, Dollar, Equal, EqualEqual,
+ Star, Dot, Comma, Dollar, Equal, EqualEqual,
Pipe, PipePipe, Caret,
Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
Less, LessEqual, LessLess, LessGreater,
- Greater, GreaterEqual, GreaterGreater
+ Greater, GreaterEqual, GreaterGreater, At
};
TokenKind Kind;
@@ -121,6 +121,8 @@ class MCAsmLexer {
MCAsmLexer(const MCAsmLexer &); // DO NOT IMPLEMENT
void operator=(const MCAsmLexer &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
+ const char *TokStart;
+
MCAsmLexer();
virtual AsmToken LexToken() = 0;
@@ -141,6 +143,11 @@ public:
return CurTok = LexToken();
}
+ virtual StringRef LexUntilEndOfStatement() = 0;
+
+ /// getLoc - Get the current source location.
+ SMLoc getLoc() const;
+
/// getTok - Get the current (last) lexed token.
const AsmToken &getTok() {
return CurTok;
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmParser.h b/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmParser.h
index 843c692..b37d46c 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmParser.h
@@ -14,37 +14,66 @@
namespace llvm {
class AsmToken;
+class MCAsmInfo;
class MCAsmLexer;
+class MCAsmParserExtension;
class MCContext;
class MCExpr;
class MCStreamer;
-class MCValue;
class SMLoc;
+class SourceMgr;
+class StringRef;
+class Target;
+class TargetAsmParser;
class Twine;
/// MCAsmParser - Generic assembler parser interface, for use by target specific
/// assembly parsers.
class MCAsmParser {
+public:
+ typedef bool (*DirectiveHandler)(MCAsmParserExtension*, StringRef, SMLoc);
+
+private:
MCAsmParser(const MCAsmParser &); // DO NOT IMPLEMENT
void operator=(const MCAsmParser &); // DO NOT IMPLEMENT
+
+ TargetAsmParser *TargetParser;
+
+ unsigned ShowParsedOperands : 1;
+
protected: // Can only create subclasses.
MCAsmParser();
-
+
public:
virtual ~MCAsmParser();
+ virtual void AddDirectiveHandler(MCAsmParserExtension *Object,
+ StringRef Directive,
+ DirectiveHandler Handler) = 0;
+
+ virtual SourceMgr &getSourceManager() = 0;
+
virtual MCAsmLexer &getLexer() = 0;
virtual MCContext &getContext() = 0;
- /// getSteamer - Return the output streamer for the assembler.
+ /// getStreamer - Return the output streamer for the assembler.
virtual MCStreamer &getStreamer() = 0;
+ TargetAsmParser &getTargetParser() const { return *TargetParser; }
+ void setTargetParser(TargetAsmParser &P);
+
+ bool getShowParsedOperands() const { return ShowParsedOperands; }
+ void setShowParsedOperands(bool Value) { ShowParsedOperands = Value; }
+
+ /// Run - Run the parser on the input source buffer.
+ virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false) = 0;
+
/// Warning - Emit a warning at the location \arg L, with the message \arg
/// Msg.
virtual void Warning(SMLoc L, const Twine &Msg) = 0;
- /// Warning - Emit an error at the location \arg L, with the message \arg
+ /// Error - Emit an error at the location \arg L, with the message \arg
/// Msg.
///
/// \return The return value is always true, as an idiomatic convenience to
@@ -54,10 +83,22 @@ public:
/// Lex - Get the next AsmToken in the stream, possibly handling file
/// inclusion first.
virtual const AsmToken &Lex() = 0;
-
+
/// getTok - Get the current AsmToken from the stream.
const AsmToken &getTok();
-
+
+ /// \brief Report an error at the current lexer location.
+ bool TokError(const Twine &Msg);
+
+ /// ParseIdentifier - Parse an identifier or string (as a quoted identifier)
+ /// and set \arg Res to the identifier contents.
+ virtual bool ParseIdentifier(StringRef &Res) = 0;
+
+ /// \brief Parse up to the end of statement and return the contents from the
+ /// current token until the end of the statement; the current token on exit
+ /// will be either the EndOfStatement or EOF.
+ virtual StringRef ParseStringToEndOfStatement() = 0;
+
/// ParseExpression - Parse an arbitrary expression.
///
/// @param Res - The value of the expression. The result is undefined
@@ -65,7 +106,7 @@ public:
/// @result - False on success.
virtual bool ParseExpression(const MCExpr *&Res, SMLoc &EndLoc) = 0;
bool ParseExpression(const MCExpr *&Res);
-
+
/// ParseParenExpression - Parse an arbitrary expression, assuming that an
/// initial '(' has already been consumed.
///
@@ -83,6 +124,10 @@ public:
virtual bool ParseAbsoluteExpression(int64_t &Res) = 0;
};
+/// \brief Create an MCAsmParser instance.
+MCAsmParser *createMCAsmParser(const Target &, SourceMgr &, MCContext &,
+ MCStreamer &, const MCAsmInfo &);
+
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h b/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h
new file mode 100644
index 0000000..95184cd
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h
@@ -0,0 +1,76 @@
+//===-- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMPARSEREXTENSION_H
+#define LLVM_MC_MCASMPARSEREXTENSION_H
+
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/SMLoc.h"
+
+namespace llvm {
+class Twine;
+
+/// \brief Generic interface for extending the MCAsmParser,
+/// which is implemented by target and object file assembly parser
+/// implementations.
+class MCAsmParserExtension {
+ MCAsmParserExtension(const MCAsmParserExtension &); // DO NOT IMPLEMENT
+ void operator=(const MCAsmParserExtension &); // DO NOT IMPLEMENT
+
+ MCAsmParser *Parser;
+
+protected:
+ MCAsmParserExtension();
+
+ // Helper template for implementing static dispatch functions.
+ template<typename T, bool (T::*Handler)(StringRef, SMLoc)>
+ static bool HandleDirective(MCAsmParserExtension *Target,
+ StringRef Directive,
+ SMLoc DirectiveLoc) {
+ T *Obj = static_cast<T*>(Target);
+ return (Obj->*Handler)(Directive, DirectiveLoc);
+ }
+
+public:
+ virtual ~MCAsmParserExtension();
+
+ /// \brief Initialize the extension for parsing using the given \arg
+ /// Parser. The extension should use the AsmParser interfaces to register its
+ /// parsing routines.
+ virtual void Initialize(MCAsmParser &Parser);
+
+ /// @name MCAsmParser Proxy Interfaces
+ /// @{
+
+ MCContext &getContext() { return getParser().getContext(); }
+ MCAsmLexer &getLexer() { return getParser().getLexer(); }
+ MCAsmParser &getParser() { return *Parser; }
+ SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
+ MCStreamer &getStreamer() { return getParser().getStreamer(); }
+ void Warning(SMLoc L, const Twine &Msg) {
+ return getParser().Warning(L, Msg);
+ }
+ bool Error(SMLoc L, const Twine &Msg) {
+ return getParser().Error(L, Msg);
+ }
+ bool TokError(const Twine &Msg) {
+ return getParser().TokError(Msg);
+ }
+
+ const AsmToken &Lex() { return getParser().Lex(); }
+
+ const AsmToken &getTok() { return getParser().getTok(); }
+
+ /// @}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/libclamav/c++/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
index 7c2f5be..99fa5ad 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
@@ -12,6 +12,7 @@
namespace llvm {
class SMLoc;
+class raw_ostream;
/// MCParsedAsmOperand - This abstract class represents a source-level assembly
/// instruction operand. It should be subclassed by target-specific code. This
@@ -23,9 +24,12 @@ public:
virtual ~MCParsedAsmOperand() {}
/// getStartLoc - Get the location of the first token of this operand.
- virtual SMLoc getStartLoc() const;
+ virtual SMLoc getStartLoc() const = 0;
/// getEndLoc - Get the location of the last token of this operand.
- virtual SMLoc getEndLoc() const;
+ virtual SMLoc getEndLoc() const = 0;
+
+ /// dump - Print a debug representation of the operand to the given stream.
+ virtual void dump(raw_ostream &OS) const = 0;
};
} // end namespace llvm.
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCSection.h b/libclamav/c++/llvm/include/llvm/MC/MCSection.h
index ceb6d27..5c99735 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCSection.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCSection.h
@@ -17,55 +17,53 @@
#include <string>
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/SectionKind.h"
+#include "llvm/Support/Casting.h"
namespace llvm {
class MCContext;
class MCAsmInfo;
class raw_ostream;
-
+
/// MCSection - Instances of this class represent a uniqued identifier for a
/// section in the current translation unit. The MCContext class uniques and
/// creates these.
class MCSection {
+ public:
+ enum SectionVariant {
+ SV_COFF = 0,
+ SV_ELF,
+ SV_MachO,
+ SV_PIC16
+ };
+
+ private:
MCSection(const MCSection&); // DO NOT IMPLEMENT
void operator=(const MCSection&); // DO NOT IMPLEMENT
protected:
- MCSection(SectionKind K) : Kind(K) {}
+ MCSection(SectionVariant V, SectionKind K) : Variant(V), Kind(K) {}
+ SectionVariant Variant;
SectionKind Kind;
public:
virtual ~MCSection();
SectionKind getKind() const { return Kind; }
-
+
+ SectionVariant getVariant() const { return Variant; }
+
virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const = 0;
- };
- class MCSectionCOFF : public MCSection {
- std::string Name;
-
- /// IsDirective - This is true if the section name is a directive, not
- /// something that should be printed with ".section".
- ///
- /// FIXME: This is a hack. Switch to a semantic view of the section instead
- /// of a syntactic one.
- bool IsDirective;
-
- MCSectionCOFF(StringRef name, bool isDirective, SectionKind K)
- : MCSection(K), Name(name), IsDirective(isDirective) {
+ /// isBaseAddressKnownZero - Return true if we know that this section will
+ /// get a base address of zero. In cases where we know that this is true we
+ /// can emit section offsets as direct references to avoid a subtraction
+ /// from the base of the section, saving a relocation.
+ virtual bool isBaseAddressKnownZero() const {
+ return false;
}
- public:
-
- static MCSectionCOFF *Create(StringRef Name, bool IsDirective,
- SectionKind K, MCContext &Ctx);
- const std::string &getName() const { return Name; }
- bool isDirective() const { return IsDirective; }
-
- virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
- raw_ostream &OS) const;
+ static bool classof(const MCSection *) { return true; }
};
-
+
} // end namespace llvm
#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCSectionCOFF.h b/libclamav/c++/llvm/include/llvm/MC/MCSectionCOFF.h
new file mode 100644
index 0000000..f828e10
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MCSectionCOFF.h
@@ -0,0 +1,67 @@
+//===- MCSectionCOFF.h - COFF Machine Code Sections -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSectionCOFF class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTIONCOFF_H
+#define LLVM_MC_MCSECTIONCOFF_H
+
+#include "llvm/MC/MCSection.h"
+
+#include "llvm/Support/COFF.h"
+
+namespace llvm {
+
+/// MCSectionCOFF - This represents a section on Windows
+ class MCSectionCOFF : public MCSection {
+ // The memory for this string is stored in the same MCContext as *this.
+ StringRef SectionName;
+
+ /// Characteristics - This is the Characteristics field of a section,
+ // drawn from the enums below.
+ unsigned Characteristics;
+
+ /// Selection - This is the Selection field for the section symbol, if
+ /// it is a COMDAT section (Characteristics & IMAGE_SCN_LNK_COMDAT) != 0
+ int Selection;
+
+ private:
+ friend class MCContext;
+ MCSectionCOFF(StringRef Section, unsigned Characteristics,
+ int Selection, SectionKind K)
+ : MCSection(SV_COFF, K), SectionName(Section),
+ Characteristics(Characteristics), Selection (Selection) {
+ assert ((Characteristics & 0x00F00000) == 0 &&
+ "alignment must not be set upon section creation");
+ }
+ ~MCSectionCOFF();
+
+ public:
+ /// ShouldOmitSectionDirective - Decides whether a '.section' directive
+ /// should be printed before the section name
+ bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
+
+ StringRef getSectionName() const { return SectionName; }
+ unsigned getCharacteristics() const { return Characteristics; }
+ int getSelection () const { return Selection; }
+
+ virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
+ raw_ostream &OS) const;
+
+ static bool classof(const MCSection *S) {
+ return S->getVariant() == SV_COFF;
+ }
+ static bool classof(const MCSectionCOFF *) { return true; }
+ };
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCSectionELF.h b/libclamav/c++/llvm/include/llvm/MC/MCSectionELF.h
index 2dccf5c..5de0bf5 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCSectionELF.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCSectionELF.h
@@ -21,7 +21,9 @@ namespace llvm {
/// MCSectionELF - This represents a section on linux, lots of unix variants
/// and some bare metal systems.
class MCSectionELF : public MCSection {
- std::string SectionName;
+ /// SectionName - This is the name of the section. The referenced memory is
+ /// owned by TargetLoweringObjectFileELF's ELFUniqueMap.
+ StringRef SectionName;
/// Type - This is the sh_type field of a section, drawn from the enums below.
unsigned Type;
@@ -33,17 +35,20 @@ class MCSectionELF : public MCSection {
/// IsExplicit - Indicates that this section comes from globals with an
/// explicit section specified.
bool IsExplicit;
+
+ /// EntrySize - The size of each entry in this section. This size only
+ /// makes sense for sections that contain fixed-sized entries. If a
+ /// section does not contain fixed-sized entries 'EntrySize' will be 0.
+ unsigned EntrySize;
-protected:
+private:
+ friend class MCContext;
MCSectionELF(StringRef Section, unsigned type, unsigned flags,
- SectionKind K, bool isExplicit)
- : MCSection(K), SectionName(Section.str()), Type(type), Flags(flags),
- IsExplicit(isExplicit) {}
+ SectionKind K, bool isExplicit, unsigned entrySize)
+ : MCSection(SV_ELF, K), SectionName(Section), Type(type), Flags(flags),
+ IsExplicit(isExplicit), EntrySize(entrySize) {}
+ ~MCSectionELF();
public:
-
- static MCSectionELF *Create(StringRef Section, unsigned Type,
- unsigned Flags, SectionKind K, bool isExplicit,
- MCContext &Ctx);
/// ShouldOmitSectionDirective - Decides whether a '.section' directive
/// should be printed before the section name
@@ -151,38 +156,39 @@ public:
// This section holds Thread-Local Storage.
SHF_TLS = 0x400U,
+
- /// FIRST_TARGET_DEP_FLAG - This is the first flag that subclasses are
- /// allowed to specify.
- FIRST_TARGET_DEP_FLAG = 0x800U,
-
- /// TARGET_INDEP_SHF - This is the bitmask for all the target independent
- /// section flags. Targets can define their own target flags above these.
- /// If they do that, they should implement their own MCSectionELF subclasses
- /// and implement the virtual method hooks below to handle printing needs.
- TARGET_INDEP_SHF = FIRST_TARGET_DEP_FLAG-1U
+ // Start of target-specific flags.
+
+ /// XCORE_SHF_CP_SECTION - All sections with the "c" flag are grouped
+ /// together by the linker to form the constant pool and the cp register is
+ /// set to the start of the constant pool by the boot code.
+ XCORE_SHF_CP_SECTION = 0x800U,
+
+ /// XCORE_SHF_DP_SECTION - All sections with the "d" flag are grouped
+ /// together by the linker to form the data section and the dp register is
+ /// set to the start of the section by the boot code.
+ XCORE_SHF_DP_SECTION = 0x1000U
};
- StringRef getSectionName() const {
- return StringRef(SectionName);
- }
-
+ StringRef getSectionName() const { return SectionName; }
unsigned getType() const { return Type; }
unsigned getFlags() const { return Flags; }
+ unsigned getEntrySize() const { return EntrySize; }
- virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
- raw_ostream &OS) const;
-
+ void PrintSwitchToSection(const MCAsmInfo &MAI,
+ raw_ostream &OS) const;
- /// PrintTargetSpecificSectionFlags - Targets that define their own
- /// MCSectionELF subclasses with target specific section flags should
- /// implement this method if they end up adding letters to the attributes
- /// list.
- virtual void PrintTargetSpecificSectionFlags(const MCAsmInfo &MAI,
- raw_ostream &OS) const {
+ /// isBaseAddressKnownZero - We know that non-allocatable sections (like
+ /// debug info) have a base of zero.
+ virtual bool isBaseAddressKnownZero() const {
+ return (getFlags() & SHF_ALLOC) == 0;
}
-
-
+
+ static bool classof(const MCSection *S) {
+ return S->getVariant() == SV_ELF;
+ }
+ static bool classof(const MCSectionELF *) { return true; }
};
} // end namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCSectionMachO.h b/libclamav/c++/llvm/include/llvm/MC/MCSectionMachO.h
index 6156819..2d9d133 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCSectionMachO.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCSectionMachO.h
@@ -34,30 +34,10 @@ class MCSectionMachO : public MCSection {
unsigned Reserved2;
MCSectionMachO(StringRef Segment, StringRef Section,
- unsigned TAA, unsigned reserved2, SectionKind K)
- : MCSection(K), TypeAndAttributes(TAA), Reserved2(reserved2) {
- assert(Segment.size() <= 16 && Section.size() <= 16 &&
- "Segment or section string too long");
- for (unsigned i = 0; i != 16; ++i) {
- if (i < Segment.size())
- SegmentName[i] = Segment[i];
- else
- SegmentName[i] = 0;
-
- if (i < Section.size())
- SectionName[i] = Section[i];
- else
- SectionName[i] = 0;
- }
- }
+ unsigned TAA, unsigned reserved2, SectionKind K);
+ friend class MCContext;
public:
- static MCSectionMachO *Create(StringRef Segment,
- StringRef Section,
- unsigned TypeAndAttributes,
- unsigned Reserved2,
- SectionKind K, MCContext &Ctx);
-
/// These are the section type and attributes fields. A MachO section can
/// have only one Type, but can have any of the attributes specified.
enum {
@@ -107,8 +87,20 @@ public:
/// S_LAZY_DYLIB_SYMBOL_POINTERS - Section with lazy symbol pointers to
/// lazy loaded dylibs.
S_LAZY_DYLIB_SYMBOL_POINTERS = 0x10U,
+ /// S_THREAD_LOCAL_REGULAR - Section with ....
+ S_THREAD_LOCAL_REGULAR = 0x11U,
+ /// S_THREAD_LOCAL_ZEROFILL - Thread local zerofill section.
+ S_THREAD_LOCAL_ZEROFILL = 0x12U,
+ /// S_THREAD_LOCAL_VARIABLES - Section with thread local variable structure
+ /// data.
+ S_THREAD_LOCAL_VARIABLES = 0x13U,
+ /// S_THREAD_LOCAL_VARIABLE_POINTERS - Section with ....
+ S_THREAD_LOCAL_VARIABLE_POINTERS = 0x14U,
+ /// S_THREAD_LOCAL_INIT_FUNCTION_POINTERS - Section with thread local
+ /// variable initialization pointers to functions.
+ S_THREAD_LOCAL_INIT_FUNCTION_POINTERS = 0x15U,
- LAST_KNOWN_SECTION_TYPE = S_LAZY_DYLIB_SYMBOL_POINTERS,
+ LAST_KNOWN_SECTION_TYPE = S_THREAD_LOCAL_INIT_FUNCTION_POINTERS,
// Valid section attributes.
@@ -151,10 +143,15 @@ public:
return StringRef(SectionName, 16);
return StringRef(SectionName);
}
-
+
unsigned getTypeAndAttributes() const { return TypeAndAttributes; }
unsigned getStubSize() const { return Reserved2; }
-
+
+ unsigned getType() const { return TypeAndAttributes & SECTION_TYPE; }
+ bool hasAttribute(unsigned Value) const {
+ return (TypeAndAttributes & Value) != 0;
+ }
+
/// ParseSectionSpecifier - Parse the section specifier indicated by "Spec".
/// This is a string that can appear after a .section directive in a mach-o
/// flavored .s file. If successful, this fills in the specified Out
@@ -165,9 +162,14 @@ public:
StringRef &Section, // Out.
unsigned &TAA, // Out.
unsigned &StubSize); // Out.
-
+
virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const;
+
+ static bool classof(const MCSection *S) {
+ return S->getVariant() == SV_MachO;
+ }
+ static bool classof(const MCSectionMachO *) { return true; }
};
} // end namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCStreamer.h b/libclamav/c++/llvm/include/llvm/MC/MCStreamer.h
index 696d024..1ce1b0e 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCStreamer.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCStreamer.h
@@ -27,6 +27,7 @@ namespace llvm {
class MCSection;
class MCSymbol;
class StringRef;
+ class TargetAsmBackend;
class Twine;
class raw_ostream;
class formatted_raw_ostream;
@@ -53,6 +54,10 @@ namespace llvm {
/// kept up to date by SwitchSection.
const MCSection *CurSection;
+ /// PrevSection - This is the previous section code is being emitted to, it is
+ /// kept up to date by SwitchSection.
+ const MCSection *PrevSection;
+
public:
virtual ~MCStreamer();
@@ -61,9 +66,13 @@ namespace llvm {
/// @name Assembly File Formatting.
/// @{
- /// isVerboseAsm - Return true if this streamer supports verbose assembly at
- /// all.
+ /// isVerboseAsm - Return true if this streamer supports verbose assembly
+ /// and if it is enabled.
virtual bool isVerboseAsm() const { return false; }
+
+ /// hasRawTextSupport - Return true if this asm streamer supports emitting
+ /// unformatted text to the .s file with EmitRawText.
+ virtual bool hasRawTextSupport() const { return false; }
/// AddComment - Add a comment that can be emitted to the generated .s
/// file if applicable as a QoI issue to make the output of the compiler
@@ -87,10 +96,14 @@ namespace llvm {
/// @name Symbol & Section Management
/// @{
- /// getCurrentSection - Return the current seciton that the streamer is
+ /// getCurrentSection - Return the current section that the streamer is
/// emitting code to.
const MCSection *getCurrentSection() const { return CurSection; }
+ /// getPreviousSection - Return the previous section that the streamer is
+ /// emitting code to.
+ const MCSection *getPreviousSection() const { return PrevSection; }
+
/// SwitchSection - Set the current section where code is being emitted to
/// @p Section. This is required to update CurSection.
///
@@ -133,7 +146,24 @@ namespace llvm {
/// @param DescValue - The value to set into the n_desc field.
virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) = 0;
-
+ /// BeginCOFFSymbolDef - Start emitting COFF symbol definition
+ ///
+ /// @param Symbol - The symbol to have its External & Type fields set.
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) = 0;
+
+ /// EmitCOFFSymbolStorageClass - Emit the storage class of the symbol.
+ ///
+ /// @param StorageClass - The storage class the symbol should have.
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass) = 0;
+
+ /// EmitCOFFSymbolType - Emit the type of the symbol.
+ ///
+ /// @param Type - A COFF type identifier (see COFF::SymbolType in X86COFF.h)
+ virtual void EmitCOFFSymbolType(int Type) = 0;
+
+ /// EndCOFFSymbolDef - Marks the end of the symbol definition.
+ virtual void EndCOFFSymbolDef() = 0;
+
/// EmitELFSize - Emit an ELF .size directive.
///
/// This corresponds to an assembler statement such as:
@@ -156,7 +186,7 @@ namespace llvm {
/// @param Size - The size of the common symbol.
virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) = 0;
- /// EmitZerofill - Emit a the zerofill section and an option symbol.
+ /// EmitZerofill - Emit the zerofill section and an optional symbol.
///
/// @param Section - The zerofill section to create and or to put the symbol
/// @param Symbol - The zerofill symbol to emit, if non-NULL.
@@ -166,6 +196,15 @@ namespace llvm {
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
unsigned Size = 0,unsigned ByteAlignment = 0) = 0;
+ /// EmitTBSSSymbol - Emit a thread local bss (.tbss) symbol.
+ ///
+ /// @param Section - The thread local common section.
+ /// @param Symbol - The thread local common symbol to emit.
+ /// @param Size - The size of the symbol.
+ /// @param ByteAlignment - The alignment of the thread local common symbol
+ /// if non-zero. This must be a power of 2 on some targets.
+ virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0) = 0;
/// @}
/// @name Generating Data
/// @{
@@ -186,11 +225,17 @@ namespace llvm {
/// @param Size - The size of the integer (in bytes) to emit. This must
/// match a native machine width.
virtual void EmitValue(const MCExpr *Value, unsigned Size,
- unsigned AddrSpace) = 0;
+ unsigned AddrSpace = 0) = 0;
/// EmitIntValue - Special case of EmitValue that avoids the client having
/// to pass in a MCExpr for constant integers.
- virtual void EmitIntValue(uint64_t Value, unsigned Size,unsigned AddrSpace);
+ virtual void EmitIntValue(uint64_t Value, unsigned Size,
+ unsigned AddrSpace = 0);
+
+ /// EmitSymbolValue - Special case of EmitValue that avoids the client
+ /// having to pass in a MCExpr for MCSymbols.
+ virtual void EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
+ unsigned AddrSpace);
/// EmitGPRel32Value - Emit the expression @p Value into the output as a
/// gprel32 (32-bit GP relative) value.
@@ -272,7 +317,13 @@ namespace llvm {
/// section.
virtual void EmitInstruction(const MCInst &Inst) = 0;
- /// Finish - Finish emission of machine code and flush any output.
+ /// EmitRawText - If this file is backed by a assembly streamer, this dumps
+ /// the specified string in the output .s file. This capability is
+ /// indicated by the hasRawTextSupport() predicate. By default this aborts.
+ virtual void EmitRawText(StringRef String);
+ void EmitRawText(const Twine &String);
+
+ /// Finish - Finish emission of machine code.
virtual void Finish() = 0;
};
@@ -285,32 +336,48 @@ namespace llvm {
/// assembler.
///
/// \param InstPrint - If given, the instruction printer to use. If not given
- /// the MCInst representation will be printed.
+ /// the MCInst representation will be printed. This method takes ownership of
+ /// InstPrint.
///
/// \param CE - If given, a code emitter to use to show the instruction
- /// encoding inline with the assembly.
+ /// encoding inline with the assembly. This method takes ownership of \arg CE.
///
/// \param ShowInst - Whether to show the MCInst representation inline with
/// the assembly.
MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS,
- const MCAsmInfo &MAI, bool isLittleEndian,
- bool isVerboseAsm,
+ bool isLittleEndian, bool isVerboseAsm,
MCInstPrinter *InstPrint = 0,
MCCodeEmitter *CE = 0,
bool ShowInst = false);
- // FIXME: These two may end up getting rolled into a single
- // createObjectStreamer interface, which implements the assembler backend, and
- // is parameterized on an output object file writer.
-
- /// createMachOStream - Create a machine code streamer which will generative
+ /// createMachOStreamer - Create a machine code streamer which will generate
/// Mach-O format object files.
- MCStreamer *createMachOStreamer(MCContext &Ctx, raw_ostream &OS,
- MCCodeEmitter *CE);
+ ///
+ /// Takes ownership of \arg TAB and \arg CE.
+ MCStreamer *createMachOStreamer(MCContext &Ctx, TargetAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *CE,
+ bool RelaxAll = false);
+
+ /// createWinCOFFStreamer - Create a machine code streamer which will
+ /// generate Microsoft COFF format object files.
+ ///
+ /// Takes ownership of \arg TAB and \arg CE.
+ MCStreamer *createWinCOFFStreamer(MCContext &Ctx,
+ TargetAsmBackend &TAB,
+ MCCodeEmitter &CE, raw_ostream &OS,
+ bool RelaxAll = false);
- /// createELFStreamer - Create a machine code streamer which will generative
+ /// createELFStreamer - Create a machine code streamer which will generate
/// ELF format object files.
- MCStreamer *createELFStreamer(MCContext &Ctx, raw_ostream &OS);
+ MCStreamer *createELFStreamer(MCContext &Ctx, TargetAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *CE,
+ bool RelaxAll = false);
+
+ /// createLoggingStreamer - Create a machine code streamer which just logs the
+ /// API calls and then dispatches to another streamer.
+ ///
+ /// The new streamer takes ownership of the \arg Child.
+ MCStreamer *createLoggingStreamer(MCStreamer *Child, raw_ostream &OS);
} // end namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCSymbol.h b/libclamav/c++/llvm/include/llvm/MC/MCSymbol.h
index d5c4d95..1b432c2 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCSymbol.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCSymbol.h
@@ -14,9 +14,7 @@
#ifndef LLVM_MC_MCSYMBOL_H
#define LLVM_MC_MCSYMBOL_H
-#include <string>
#include "llvm/ADT/StringRef.h"
-#include "llvm/System/DataTypes.h"
namespace llvm {
class MCExpr;
@@ -30,16 +28,16 @@ namespace llvm {
///
/// If the symbol is defined/emitted into the current translation unit, the
/// Section member is set to indicate what section it lives in. Otherwise, if
- /// it is a reference to an external entity, it has a null section.
- ///
+ /// it is a reference to an external entity, it has a null section.
class MCSymbol {
// Special sentinal value for the absolute pseudo section.
//
// FIXME: Use a PointerInt wrapper for this?
static const MCSection *AbsolutePseudoSection;
- /// Name - The name of the symbol.
- std::string Name;
+ /// Name - The name of the symbol. The referred-to string data is actually
+ /// held by the StringMap that lives in MCContext.
+ StringRef Name;
/// Section - The section the symbol is defined in. This is null for
/// undefined symbols, and the special AbsolutePseudoSection value for
@@ -53,25 +51,32 @@ namespace llvm {
/// typically does not survive in the .o file's symbol table. Usually
/// "Lfoo" or ".foo".
unsigned IsTemporary : 1;
-
+
+ /// IsUsedInExpr - True if this symbol has been used in an expression and
+ /// cannot be redefined.
+ unsigned IsUsedInExpr : 1;
+
private: // MCContext creates and uniques these.
friend class MCContext;
- MCSymbol(StringRef _Name, bool _IsTemporary)
- : Name(_Name), Section(0), Value(0), IsTemporary(_IsTemporary) {}
+ MCSymbol(StringRef name, bool isTemporary)
+ : Name(name), Section(0), Value(0),
+ IsTemporary(isTemporary), IsUsedInExpr(false) {}
MCSymbol(const MCSymbol&); // DO NOT IMPLEMENT
void operator=(const MCSymbol&); // DO NOT IMPLEMENT
public:
/// getName - Get the symbol name.
- const std::string &getName() const { return Name; }
+ StringRef getName() const { return Name; }
- /// @name Symbol Type
+ /// @name Accessors
/// @{
/// isTemporary - Check if this is an assembler temporary symbol.
- bool isTemporary() const {
- return IsTemporary;
- }
+ bool isTemporary() const { return IsTemporary; }
+
+ /// isUsedInExpr - Check if this is an assembler temporary symbol.
+ bool isUsedInExpr() const { return IsUsedInExpr; }
+ void setUsedInExpr(bool Value) { IsUsedInExpr = Value; }
/// @}
/// @name Associated Sections
@@ -84,6 +89,12 @@ namespace llvm {
return Section != 0;
}
+ /// isInSection - Check if this symbol is defined in some section (i.e., it
+ /// is defined but not absolute).
+ bool isInSection() const {
+ return isDefined() && !isAbsolute();
+ }
+
/// isUndefined - Check if this symbol undefined (i.e., implicitly defined).
bool isUndefined() const {
return !isDefined();
@@ -97,7 +108,7 @@ namespace llvm {
/// getSection - Get the section associated with a defined, non-absolute
/// symbol.
const MCSection &getSection() const {
- assert(!isUndefined() && !isAbsolute() && "Invalid accessor!");
+ assert(isInSection() && "Invalid accessor!");
return *Section;
}
@@ -121,14 +132,14 @@ namespace llvm {
return Value != 0;
}
- /// getValue() - Get the value for variable symbols, or null if the symbol
- /// is not a variable.
- const MCExpr *getValue() const { return Value; }
-
- void setValue(const MCExpr *Value) {
- this->Value = Value;
+ /// getValue() - Get the value for variable symbols.
+ const MCExpr *getVariableValue() const {
+ assert(isVariable() && "Invalid accessor!");
+ return Value;
}
+ void setVariableValue(const MCExpr *Value);
+
/// @}
/// print - Print the value to the stream \arg OS.
diff --git a/libclamav/c++/llvm/include/llvm/MC/MCValue.h b/libclamav/c++/llvm/include/llvm/MC/MCValue.h
index 8aa73f3..11b6c2a 100644
--- a/libclamav/c++/llvm/include/llvm/MC/MCValue.h
+++ b/libclamav/c++/llvm/include/llvm/MC/MCValue.h
@@ -19,8 +19,9 @@
#include <cassert>
namespace llvm {
-class MCSymbol;
class MCAsmInfo;
+class MCSymbol;
+class MCSymbolRefExpr;
class raw_ostream;
/// MCValue - This represents an "assembler immediate". In its most general
@@ -34,13 +35,13 @@ class raw_ostream;
/// Note that this class must remain a simple POD value class, because we need
/// it to live in unions etc.
class MCValue {
- const MCSymbol *SymA, *SymB;
+ const MCSymbolRefExpr *SymA, *SymB;
int64_t Cst;
public:
int64_t getConstant() const { return Cst; }
- const MCSymbol *getSymA() const { return SymA; }
- const MCSymbol *getSymB() const { return SymB; }
+ const MCSymbolRefExpr *getSymA() const { return SymA; }
+ const MCSymbolRefExpr *getSymB() const { return SymB; }
/// isAbsolute - Is this an absolute (as opposed to relocatable) value.
bool isAbsolute() const { return !SymA && !SymB; }
@@ -57,11 +58,11 @@ public:
/// print - Print the value to the stream \arg OS.
void print(raw_ostream &OS, const MCAsmInfo *MAI) const;
-
+
/// dump - Print the value to stderr.
void dump() const;
- static MCValue get(const MCSymbol *SymA, const MCSymbol *SymB = 0,
+ static MCValue get(const MCSymbolRefExpr *SymA, const MCSymbolRefExpr *SymB=0,
int64_t Val = 0) {
MCValue R;
assert((!SymB || SymA) && "Invalid relocatable MCValue!");
@@ -70,7 +71,7 @@ public:
R.SymB = SymB;
return R;
}
-
+
static MCValue get(int64_t Val) {
MCValue R;
R.Cst = Val;
@@ -78,7 +79,7 @@ public:
R.SymB = 0;
return R;
}
-
+
};
} // end namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/MC/MachObjectWriter.h b/libclamav/c++/llvm/include/llvm/MC/MachObjectWriter.h
new file mode 100644
index 0000000..9b1ff1d
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/MC/MachObjectWriter.h
@@ -0,0 +1,44 @@
+//===-- llvm/MC/MachObjectWriter.h - Mach-O File Writer ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MACHOBJECTWRITER_H
+#define LLVM_MC_MACHOBJECTWRITER_H
+
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+namespace llvm {
+class MCAssembler;
+class MCFragment;
+class MCFixup;
+class MCValue;
+class raw_ostream;
+
+class MachObjectWriter : public MCObjectWriter {
+ void *Impl;
+
+public:
+ MachObjectWriter(raw_ostream &OS, bool Is64Bit, bool IsLittleEndian = true);
+ virtual ~MachObjectWriter();
+
+ virtual void ExecutePostLayoutBinding(MCAssembler &Asm);
+
+ virtual void RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue);
+
+ virtual void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/MC/SectionKind.h b/libclamav/c++/llvm/include/llvm/MC/SectionKind.h
index c9557f2..85a91c6 100644
--- a/libclamav/c++/llvm/include/llvm/MC/SectionKind.h
+++ b/libclamav/c++/llvm/include/llvm/MC/SectionKind.h
@@ -29,10 +29,10 @@ class SectionKind {
enum Kind {
/// Metadata - Debug info sections or other metadata.
Metadata,
-
+
/// Text - Text section, used for functions and other executable code.
Text,
-
+
/// ReadOnly - Data that is never written to at program runtime by the
/// program or the dynamic linker. Things in the top-level readonly
/// SectionKind are not mergeable.
@@ -45,7 +45,7 @@ class SectionKind {
/// Mergeable1ByteCString - 1 byte mergable, null terminated, string.
Mergeable1ByteCString,
-
+
/// Mergeable2ByteCString - 2 byte mergable, null terminated, string.
Mergeable2ByteCString,
@@ -56,11 +56,11 @@ class SectionKind {
/// constants together. For example, this can be used to unique
/// constant pool entries etc.
MergeableConst,
-
+
/// MergeableConst4 - This is a section used by 4-byte constants,
/// for example, floats.
MergeableConst4,
-
+
/// MergeableConst8 - This is a section used by 8-byte constants,
/// for example, doubles.
MergeableConst8,
@@ -68,33 +68,33 @@ class SectionKind {
/// MergeableConst16 - This is a section used by 16-byte constants,
/// for example, vectors.
MergeableConst16,
-
+
/// Writeable - This is the base of all segments that need to be written
/// to during program runtime.
-
+
/// ThreadLocal - This is the base of all TLS segments. All TLS
/// objects must be writeable, otherwise there is no reason for them to
/// be thread local!
-
+
/// ThreadBSS - Zero-initialized TLS data objects.
ThreadBSS,
-
+
/// ThreadData - Initialized TLS data objects.
ThreadData,
-
+
/// GlobalWriteableData - Writeable data that is global (not thread
/// local).
-
+
/// BSS - Zero initialized writeable data.
BSS,
-
+
/// BSSLocal - This is BSS (zero initialized and writable) data
/// which has local linkage.
BSSLocal,
-
+
/// BSSExtern - This is BSS data with normal external linkage.
BSSExtern,
-
+
/// Common - Data with common linkage. These represent tentative
/// definitions, which always have a zero initializer and are never
/// marked 'constant'.
@@ -123,20 +123,20 @@ class SectionKind {
/// mark the pages these globals end up on as read-only after it is
/// done with its relocation phase.
ReadOnlyWithRel,
-
+
/// ReadOnlyWithRelLocal - This is data that is readonly by the
/// program, but must be writeable so that the dynamic linker
/// can perform relocations in it. This is used when we know
/// that all the relocations are to globals in this final
/// linked image.
ReadOnlyWithRelLocal
-
+
} K : 8;
public:
-
+
bool isMetadata() const { return K == Metadata; }
bool isText() const { return K == Text; }
-
+
bool isReadOnly() const {
return K == ReadOnly || isMergeableCString() ||
isMergeableConst();
@@ -149,7 +149,7 @@ public:
bool isMergeable1ByteCString() const { return K == Mergeable1ByteCString; }
bool isMergeable2ByteCString() const { return K == Mergeable2ByteCString; }
bool isMergeable4ByteCString() const { return K == Mergeable4ByteCString; }
-
+
bool isMergeableConst() const {
return K == MergeableConst || K == MergeableConst4 ||
K == MergeableConst8 || K == MergeableConst16;
@@ -157,38 +157,38 @@ public:
bool isMergeableConst4() const { return K == MergeableConst4; }
bool isMergeableConst8() const { return K == MergeableConst8; }
bool isMergeableConst16() const { return K == MergeableConst16; }
-
+
bool isWriteable() const {
return isThreadLocal() || isGlobalWriteableData();
}
-
+
bool isThreadLocal() const {
return K == ThreadData || K == ThreadBSS;
}
-
- bool isThreadBSS() const { return K == ThreadBSS; }
- bool isThreadData() const { return K == ThreadData; }
+
+ bool isThreadBSS() const { return K == ThreadBSS; }
+ bool isThreadData() const { return K == ThreadData; }
bool isGlobalWriteableData() const {
return isBSS() || isCommon() || isDataRel() || isReadOnlyWithRel();
}
-
+
bool isBSS() const { return K == BSS || K == BSSLocal || K == BSSExtern; }
bool isBSSLocal() const { return K == BSSLocal; }
bool isBSSExtern() const { return K == BSSExtern; }
-
+
bool isCommon() const { return K == Common; }
-
+
bool isDataRel() const {
return K == DataRel || K == DataRelLocal || K == DataNoRel;
}
-
+
bool isDataRelLocal() const {
return K == DataRelLocal || K == DataNoRel;
}
bool isDataNoRel() const { return K == DataNoRel; }
-
+
bool isReadOnlyWithRel() const {
return K == ReadOnlyWithRel || K == ReadOnlyWithRelLocal;
}
@@ -196,14 +196,14 @@ public:
bool isReadOnlyWithRelLocal() const {
return K == ReadOnlyWithRelLocal;
}
-private:
+private:
static SectionKind get(Kind K) {
SectionKind Res;
Res.K = K;
return Res;
}
public:
-
+
static SectionKind getMetadata() { return get(Metadata); }
static SectionKind getText() { return get(Text); }
static SectionKind getReadOnly() { return get(ReadOnly); }
@@ -234,7 +234,7 @@ public:
return get(ReadOnlyWithRelLocal);
}
};
-
+
} // end namespace llvm
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Metadata.h b/libclamav/c++/llvm/include/llvm/Metadata.h
index cecb7da..f5a80a3 100644
--- a/libclamav/c++/llvm/include/llvm/Metadata.h
+++ b/libclamav/c++/llvm/include/llvm/Metadata.h
@@ -38,13 +38,14 @@ class MDString : public Value {
MDString(const MDString &); // DO NOT IMPLEMENT
StringRef Str;
-protected:
explicit MDString(LLVMContext &C, StringRef S);
public:
static MDString *get(LLVMContext &Context, StringRef Str);
- static MDString *get(LLVMContext &Context, const char *Str);
-
+ static MDString *get(LLVMContext &Context, const char *Str) {
+ return get(Context, Str ? StringRef(Str) : StringRef());
+ }
+
StringRef getString() const { return Str; }
unsigned getLength() const { return (unsigned)Str.size(); }
@@ -89,7 +90,7 @@ class MDNode : public Value, public FoldingSetNode {
FunctionLocalBit = 1 << 0,
/// NotUniquedBit - This is set on MDNodes that are not uniqued because they
- /// have a null perand.
+ /// have a null operand.
NotUniquedBit = 1 << 1,
/// DestroyFlag - This bit is set by destroy() so the destructor can assert
@@ -109,9 +110,8 @@ class MDNode : public Value, public FoldingSetNode {
void replaceOperand(MDNodeOperand *Op, Value *NewVal);
~MDNode();
-protected:
- explicit MDNode(LLVMContext &C, Value *const *Vals, unsigned NumVals,
- bool isFunctionLocal);
+ MDNode(LLVMContext &C, Value *const *Vals, unsigned NumVals,
+ bool isFunctionLocal);
static MDNode *getMDNode(LLVMContext &C, Value *const *Vals, unsigned NumVals,
FunctionLocalness FL, bool Insert = true);
@@ -126,6 +126,16 @@ public:
static MDNode *getIfExists(LLVMContext &Context, Value *const *Vals,
unsigned NumVals);
+
+ /// getTemporary - Return a temporary MDNode, for use in constructing
+ /// cyclic MDNode structures. A temporary MDNode is not uniqued,
+ /// may be RAUW'd, and must be manually deleted with deleteTemporary.
+ static MDNode *getTemporary(LLVMContext &Context, Value *const *Vals,
+ unsigned NumVals);
+
+ /// deleteTemporary - Deallocate a node created by getTemporary. The
+ /// node must not have any users.
+ static void deleteTemporary(MDNode *N);
/// getOperand - Return specified operand.
Value *getOperand(unsigned i) const;
@@ -147,9 +157,6 @@ public:
// critical code because it recursively visits all the MDNode's operands.
const Function *getFunction() const;
- // destroy - Delete this node. Only when there are no uses.
- void destroy();
-
/// Profile - calculate a unique identifier for this MDNode to collapse
/// duplicates
void Profile(FoldingSetNodeID &ID) const;
@@ -160,6 +167,9 @@ public:
return V->getValueID() == MDNodeVal;
}
private:
+ // destroy - Delete this node. Only when there are no uses.
+ void destroy();
+
bool isNotUniqued() const {
return (getSubclassDataFromValue() & NotUniquedBit) != 0;
}
@@ -173,31 +183,25 @@ private:
};
//===----------------------------------------------------------------------===//
-/// NamedMDNode - a tuple of MDNodes.
-/// NamedMDNode is always named. All NamedMDNode operand has a type of metadata.
-class NamedMDNode : public Value, public ilist_node<NamedMDNode> {
+/// NamedMDNode - a tuple of MDNodes. Despite its name, a NamedMDNode isn't
+/// itself an MDNode. NamedMDNodes belong to modules, have names, and contain
+/// lists of MDNodes.
+class NamedMDNode : public ilist_node<NamedMDNode> {
friend class SymbolTableListTraits<NamedMDNode, Module>;
friend struct ilist_traits<NamedMDNode>;
friend class LLVMContextImpl;
+ friend class Module;
NamedMDNode(const NamedMDNode &); // DO NOT IMPLEMENT
std::string Name;
Module *Parent;
- void *Operands; // SmallVector<WeakVH<MDNode>, 4>
+ void *Operands; // SmallVector<TrackingVH<MDNode>, 4>
void setParent(Module *M) { Parent = M; }
-protected:
- explicit NamedMDNode(LLVMContext &C, const Twine &N, MDNode*const *Vals,
- unsigned NumVals, Module *M = 0);
-public:
- static NamedMDNode *Create(LLVMContext &C, const Twine &N,
- MDNode *const *MDs,
- unsigned NumMDs, Module *M = 0) {
- return new NamedMDNode(C, N, MDs, NumMDs, M);
- }
- static NamedMDNode *Create(const NamedMDNode *NMD, Module *M = 0);
+ explicit NamedMDNode(const Twine &N);
+public:
/// eraseFromParent - Drop all references and remove the node from parent
/// module.
void eraseFromParent();
@@ -221,17 +225,11 @@ public:
/// addOperand - Add metadata operand.
void addOperand(MDNode *M);
- /// setName - Set the name of this named metadata.
- void setName(const Twine &NewName);
-
/// getName - Return a constant reference to this named metadata's name.
StringRef getName() const;
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static inline bool classof(const NamedMDNode *) { return true; }
- static bool classof(const Value *V) {
- return V->getValueID() == NamedMDNodeVal;
- }
+ /// print - Implement operator<< on NamedMDNode.
+ void print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW = 0) const;
};
} // end llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/Module.h b/libclamav/c++/llvm/include/llvm/Module.h
index 901fada..b7880ca 100644
--- a/libclamav/c++/llvm/include/llvm/Module.h
+++ b/libclamav/c++/llvm/include/llvm/Module.h
@@ -28,7 +28,6 @@ namespace llvm {
class FunctionType;
class GVMaterializer;
class LLVMContext;
-class MDSymbolTable;
template<> struct ilist_traits<Function>
: public SymbolTableListTraits<Function, Module> {
@@ -61,7 +60,7 @@ template<> struct ilist_traits<GlobalAlias>
};
template<> struct ilist_traits<NamedMDNode>
- : public SymbolTableListTraits<NamedMDNode, Module> {
+ : public ilist_default_traits<NamedMDNode> {
// createSentinel is used to get hold of a node that marks the end of
// the list...
NamedMDNode *createSentinel() const {
@@ -72,8 +71,8 @@ template<> struct ilist_traits<NamedMDNode>
NamedMDNode *provideInitialHead() const { return createSentinel(); }
NamedMDNode *ensureHead(NamedMDNode*) const { return createSentinel(); }
static void noteHead(NamedMDNode*, NamedMDNode*) {}
- void addNodeToList(NamedMDNode *N);
- void removeNodeFromList(NamedMDNode *N);
+ void addNodeToList(NamedMDNode *) {}
+ void removeNodeFromList(NamedMDNode *) {}
private:
mutable ilist_node<NamedMDNode> Sentinel;
};
@@ -100,7 +99,7 @@ public:
/// The type for the list of aliases.
typedef iplist<GlobalAlias> AliasListType;
/// The type for the list of named metadata.
- typedef iplist<NamedMDNode> NamedMDListType;
+ typedef ilist<NamedMDNode> NamedMDListType;
/// The type for the list of dependent libraries.
typedef std::vector<std::string> LibraryListType;
@@ -151,7 +150,7 @@ private:
std::string ModuleID; ///< Human readable identifier for the module
std::string TargetTriple; ///< Platform target triple Module compiled on
std::string DataLayout; ///< Target data description
- MDSymbolTable *NamedMDSymTab; ///< NamedMDNode names.
+ void *NamedMDSymTab; ///< NamedMDNode names.
friend class Constant;
@@ -197,11 +196,11 @@ public:
/// Get any module-scope inline assembly blocks.
/// @returns a string containing the module-scope inline assembly blocks.
const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; }
-
+
/// @}
/// @name Module Level Mutators
/// @{
-
+
/// Set the module identifier.
void setModuleIdentifier(StringRef ID) { ModuleID = ID; }
@@ -235,12 +234,11 @@ public:
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
/// This ID is uniqued across modules in the current LLVMContext.
unsigned getMDKindID(StringRef Name) const;
-
+
/// getMDKindNames - Populate client supplied SmallVector with the name for
- /// custom metadata IDs registered in this LLVMContext. ID #0 is not used,
- /// so it is filled in as an empty string.
+ /// custom metadata IDs registered in this LLVMContext.
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
-
+
/// @}
/// @name Function Accessors
/// @{
@@ -277,7 +275,7 @@ public:
Constant *getOrInsertTargetIntrinsic(StringRef Name,
const FunctionType *Ty,
AttrListPtr AttributeList);
-
+
/// getFunction - Look up the specified function in the module symbol table.
/// If it does not exist, return null.
Function *getFunction(StringRef Name) const;
@@ -321,17 +319,21 @@ public:
/// @}
/// @name Named Metadata Accessors
/// @{
-
+
/// getNamedMetadata - Return the first NamedMDNode in the module with the
- /// specified name. This method returns null if a NamedMDNode with the
+ /// specified name. This method returns null if a NamedMDNode with the
/// specified name is not found.
- NamedMDNode *getNamedMetadata(StringRef Name) const;
+ NamedMDNode *getNamedMetadata(const Twine &Name) const;
- /// getOrInsertNamedMetadata - Return the first named MDNode in the module
- /// with the specified name. This method returns a new NamedMDNode if a
+ /// getOrInsertNamedMetadata - Return the first named MDNode in the module
+ /// with the specified name. This method returns a new NamedMDNode if a
/// NamedMDNode with the specified name is not found.
NamedMDNode *getOrInsertNamedMetadata(StringRef Name);
+ /// eraseNamedMetadata - Remove the given NamedMDNode from this module
+ /// and delete it.
+ void eraseNamedMetadata(NamedMDNode *NMD);
+
/// @}
/// @name Type Accessors
/// @{
@@ -418,13 +420,6 @@ public:
static iplist<GlobalAlias> Module::*getSublistAccess(GlobalAlias*) {
return &Module::AliasList;
}
- /// Get the Module's list of named metadata (constant).
- const NamedMDListType &getNamedMDList() const { return NamedMDList; }
- /// Get the Module's list of named metadata.
- NamedMDListType &getNamedMDList() { return NamedMDList; }
- static iplist<NamedMDNode> Module::*getSublistAccess(NamedMDNode *) {
- return &Module::NamedMDList;
- }
/// Get the symbol table of global variable and function identifiers
const ValueSymbolTable &getValueSymbolTable() const { return *ValSymTab; }
/// Get the Module's symbol table of global variable and function identifiers.
@@ -433,10 +428,6 @@ public:
const TypeSymbolTable &getTypeSymbolTable() const { return *TypeSymTab; }
/// Get the Module's symbol table of types
TypeSymbolTable &getTypeSymbolTable() { return *TypeSymTab; }
- /// Get the symbol table of named metadata
- const MDSymbolTable &getMDSymbolTable() const { return *NamedMDSymTab; }
- /// Get the Module's symbol table of named metadata
- MDSymbolTable &getMDSymbolTable() { return *NamedMDSymTab; }
/// @}
/// @name Global Variable Iteration
@@ -515,15 +506,16 @@ public:
const_named_metadata_iterator named_metadata_begin() const {
return NamedMDList.begin();
}
-
+
/// Get an iterator to the last named metadata.
named_metadata_iterator named_metadata_end() { return NamedMDList.end(); }
/// Get a constant iterator to the last named metadata.
const_named_metadata_iterator named_metadata_end() const {
return NamedMDList.end();
}
-
- /// Determine how many NamedMDNodes are in the Module's list of named metadata.
+
+ /// Determine how many NamedMDNodes are in the Module's list of named
+ /// metadata.
size_t named_metadata_size() const { return NamedMDList.size(); }
/// Determine if the list of named metadata is empty.
bool named_metadata_empty() const { return NamedMDList.empty(); }
@@ -535,7 +527,7 @@ public:
/// Print the module to an output stream with AssemblyAnnotationWriter.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW) const;
-
+
/// Dump the module to stderr (for debugging).
void dump() const;
/// This function causes all the subinstructions to "let go" of all references
diff --git a/libclamav/c++/llvm/include/llvm/OperandTraits.h b/libclamav/c++/llvm/include/llvm/OperandTraits.h
index 7c879c8..b614ccb 100644
--- a/libclamav/c++/llvm/include/llvm/OperandTraits.h
+++ b/libclamav/c++/llvm/include/llvm/OperandTraits.h
@@ -20,7 +20,7 @@
namespace llvm {
//===----------------------------------------------------------------------===//
-// FixedNumOperands Trait Class
+// FixedNumOperand Trait Class
//===----------------------------------------------------------------------===//
/// FixedNumOperandTraits - determine the allocation regime of the Use array
@@ -51,9 +51,12 @@ struct FixedNumOperandTraits {
};
//===----------------------------------------------------------------------===//
-// OptionalOperands Trait Class
+// OptionalOperand Trait Class
//===----------------------------------------------------------------------===//
+/// OptionalOperandTraits - when the number of operands may change at runtime.
+/// Naturally it may only decrease, because the allocations may not change.
+
template <unsigned ARITY = 1>
struct OptionalOperandTraits : public FixedNumOperandTraits<ARITY> {
static unsigned operands(const User *U) {
diff --git a/libclamav/c++/llvm/include/llvm/Pass.h b/libclamav/c++/llvm/include/llvm/Pass.h
index 8fc3a53..f4c6eed 100644
--- a/libclamav/c++/llvm/include/llvm/Pass.h
+++ b/libclamav/c++/llvm/include/llvm/Pass.h
@@ -29,10 +29,7 @@
#ifndef LLVM_PASS_H
#define LLVM_PASS_H
-#include "llvm/System/DataTypes.h"
-#include <cassert>
-#include <utility>
-#include <vector>
+#include <string>
namespace llvm {
@@ -49,7 +46,7 @@ class raw_ostream;
class StringRef;
// AnalysisID - Use the PassInfo to identify a pass...
-typedef const PassInfo* AnalysisID;
+typedef const void* AnalysisID;
/// Different types of internal pass managers. External pass managers
/// (PassManager and FunctionPassManager) are not represented here.
@@ -81,19 +78,13 @@ enum PassKind {
///
class Pass {
AnalysisResolver *Resolver; // Used to resolve analysis
- intptr_t PassID;
+ const void *PassID;
PassKind Kind;
void operator=(const Pass&); // DO NOT IMPLEMENT
Pass(const Pass &); // DO NOT IMPLEMENT
public:
- explicit Pass(PassKind K, intptr_t pid) : Resolver(0), PassID(pid), Kind(K) {
- assert(pid && "pid cannot be 0");
- }
- explicit Pass(PassKind K, const void *pid)
- : Resolver(0), PassID((intptr_t)pid), Kind(K) {
- assert(pid && "pid cannot be 0");
- }
+ explicit Pass(PassKind K, char &pid);
virtual ~Pass();
@@ -105,10 +96,10 @@ public:
///
virtual const char *getPassName() const;
- /// getPassInfo - Return the PassInfo data structure that corresponds to this
- /// pass... If the pass has not been registered, this will return null.
- ///
- const PassInfo *getPassInfo() const;
+ /// getPassID - Return the PassID number that corresponds to this pass.
+ virtual AnalysisID getPassID() const {
+ return PassID;
+ }
/// print - Print out the internal state of the pass. This is called by
/// Analyze to print out the contents of an analysis. Otherwise it is not
@@ -120,10 +111,15 @@ public:
virtual void print(raw_ostream &O, const Module *M) const;
void dump() const; // dump - Print to stderr.
+ /// createPrinterPass - Get a Pass appropriate to print the IR this
+ /// pass operates one (Module, Function or MachineFunction).
+ virtual Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const = 0;
+
/// Each pass is responsible for assigning a pass manager to itself.
/// PMS is the stack of available pass manager.
virtual void assignPassManager(PMStack &,
- PassManagerType = PMT_Unknown) {}
+ PassManagerType) {}
/// Check if available pass managers are suitable for this pass or not.
virtual void preparePassManager(PMStack &);
@@ -131,13 +127,8 @@ public:
virtual PassManagerType getPotentialPassManagerType() const;
// Access AnalysisResolver
- inline void setResolver(AnalysisResolver *AR) {
- assert(!Resolver && "Resolver is already set");
- Resolver = AR;
- }
- inline AnalysisResolver *getResolver() {
- return Resolver;
- }
+ void setResolver(AnalysisResolver *AR);
+ AnalysisResolver *getResolver() const { return Resolver; }
/// getAnalysisUsage - This function should be overriden by passes that need
/// analysis information to do their job. If a pass specifies that it uses a
@@ -163,11 +154,9 @@ public:
/// an analysis interface through multiple inheritance. If needed, it should
/// override this to adjust the this pointer as needed for the specified pass
/// info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *) {
- return this;
- }
- virtual ImmutablePass *getAsImmutablePass() { return 0; }
- virtual PMDataManager *getAsPMDataManager() { return 0; }
+ virtual void *getAdjustedAnalysisPointer(AnalysisID ID);
+ virtual ImmutablePass *getAsImmutablePass();
+ virtual PMDataManager *getAsPMDataManager();
/// verifyAnalysis() - This member can be implemented by a analysis pass to
/// check state of analysis information.
@@ -176,14 +165,9 @@ public:
// dumpPassStructure - Implement the -debug-passes=PassStructure option
virtual void dumpPassStructure(unsigned Offset = 0);
- template<typename AnalysisClass>
- static const PassInfo *getClassPassInfo() {
- return lookupPassInfo(intptr_t(&AnalysisClass::ID));
- }
-
// lookupPassInfo - Return the pass info object for the specified pass class,
// or null if it is not known.
- static const PassInfo *lookupPassInfo(intptr_t TI);
+ static const PassInfo *lookupPassInfo(const void *TI);
// lookupPassInfo - Return the pass info object for the pass with the given
// argument string, or null if it is not known.
@@ -206,7 +190,7 @@ public:
/// don't have the class name available (use getAnalysisIfAvailable if you
/// do), but it can tell you if you need to preserve the pass at least.
///
- bool mustPreserveAnalysisID(const PassInfo *AnalysisID) const;
+ bool mustPreserveAnalysisID(char &AID) const;
/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
/// to the analysis information that they claim to use by overriding the
@@ -219,10 +203,10 @@ public:
AnalysisType &getAnalysis(Function &F); // Defined in PassAnalysisSupport.h
template<typename AnalysisType>
- AnalysisType &getAnalysisID(const PassInfo *PI) const;
+ AnalysisType &getAnalysisID(AnalysisID PI) const;
template<typename AnalysisType>
- AnalysisType &getAnalysisID(const PassInfo *PI, Function &F);
+ AnalysisType &getAnalysisID(AnalysisID PI, Function &F);
};
@@ -233,18 +217,20 @@ public:
///
class ModulePass : public Pass {
public:
+ /// createPrinterPass - Get a module printer pass.
+ Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const;
+
/// runOnModule - Virtual method overriden by subclasses to process the module
/// being operated on.
virtual bool runOnModule(Module &M) = 0;
virtual void assignPassManager(PMStack &PMS,
- PassManagerType T = PMT_ModulePassManager);
+ PassManagerType T);
/// Return what kind of Pass Manager can manage this pass.
virtual PassManagerType getPotentialPassManagerType() const;
- explicit ModulePass(intptr_t pid) : Pass(PT_Module, pid) {}
- explicit ModulePass(const void *pid) : Pass(PT_Module, pid) {}
+ explicit ModulePass(char &pid) : Pass(PT_Module, pid) {}
// Force out-of-line virtual method.
virtual ~ModulePass();
};
@@ -271,8 +257,7 @@ public:
///
bool runOnModule(Module &) { return false; }
- explicit ImmutablePass(intptr_t pid) : ModulePass(pid) {}
- explicit ImmutablePass(const void *pid)
+ explicit ImmutablePass(char &pid)
: ModulePass(pid) {}
// Force out-of-line virtual method.
@@ -290,8 +275,10 @@ public:
///
class FunctionPass : public Pass {
public:
- explicit FunctionPass(intptr_t pid) : Pass(PT_Function, pid) {}
- explicit FunctionPass(const void *pid) : Pass(PT_Function, pid) {}
+ explicit FunctionPass(char &pid) : Pass(PT_Function, pid) {}
+
+ /// createPrinterPass - Get a function printer pass.
+ Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const;
/// doInitialization - Virtual method overridden by subclasses to do
/// any necessary per-module initialization.
@@ -308,19 +295,8 @@ public:
///
virtual bool doFinalization(Module &);
- /// runOnModule - On a module, we run this pass by initializing,
- /// ronOnFunction'ing once for every function in the module, then by
- /// finalizing.
- ///
- virtual bool runOnModule(Module &M);
-
- /// run - On a function, we simply initialize, run the function, then
- /// finalize.
- ///
- bool run(Function &F);
-
virtual void assignPassManager(PMStack &PMS,
- PassManagerType T = PMT_FunctionPassManager);
+ PassManagerType T);
/// Return what kind of Pass Manager can manage this pass.
virtual PassManagerType getPotentialPassManagerType() const;
@@ -340,8 +316,10 @@ public:
///
class BasicBlockPass : public Pass {
public:
- explicit BasicBlockPass(intptr_t pid) : Pass(PT_BasicBlock, pid) {}
- explicit BasicBlockPass(const void *pid) : Pass(PT_BasicBlock, pid) {}
+ explicit BasicBlockPass(char &pid) : Pass(PT_BasicBlock, pid) {}
+
+ /// createPrinterPass - Get a function printer pass.
+ Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const;
/// doInitialization - Virtual method overridden by subclasses to do
/// any necessary per-module initialization.
@@ -368,14 +346,8 @@ public:
///
virtual bool doFinalization(Module &);
-
- // To run this pass on a function, we simply call runOnBasicBlock once for
- // each function.
- //
- bool runOnFunction(Function &F);
-
virtual void assignPassManager(PMStack &PMS,
- PassManagerType T = PMT_BasicBlockPassManager);
+ PassManagerType T);
/// Return what kind of Pass Manager can manage this pass.
virtual PassManagerType getPotentialPassManagerType() const;
diff --git a/libclamav/c++/llvm/include/llvm/PassAnalysisSupport.h b/libclamav/c++/llvm/include/llvm/PassAnalysisSupport.h
index d59be3c..a3342d5 100644
--- a/libclamav/c++/llvm/include/llvm/PassAnalysisSupport.h
+++ b/libclamav/c++/llvm/include/llvm/PassAnalysisSupport.h
@@ -19,7 +19,6 @@
#ifndef LLVM_PASS_ANALYSIS_SUPPORT_H
#define LLVM_PASS_ANALYSIS_SUPPORT_H
-#include "llvm/Pass.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <vector>
@@ -49,43 +48,37 @@ public:
// addRequired - Add the specified ID to the required set of the usage info
// for a pass.
//
- AnalysisUsage &addRequiredID(AnalysisID ID) {
- assert(ID && "Pass class not registered!");
- Required.push_back(ID);
- return *this;
- }
+ AnalysisUsage &addRequiredID(const void *ID);
+ AnalysisUsage &addRequiredID(char &ID);
template<class PassClass>
AnalysisUsage &addRequired() {
- return addRequiredID(Pass::getClassPassInfo<PassClass>());
+ return addRequiredID(PassClass::ID);
}
- AnalysisUsage &addRequiredTransitiveID(AnalysisID ID) {
- assert(ID && "Pass class not registered!");
- Required.push_back(ID);
- RequiredTransitive.push_back(ID);
- return *this;
- }
+ AnalysisUsage &addRequiredTransitiveID(char &ID);
template<class PassClass>
AnalysisUsage &addRequiredTransitive() {
- AnalysisID ID = Pass::getClassPassInfo<PassClass>();
- return addRequiredTransitiveID(ID);
+ return addRequiredTransitiveID(PassClass::ID);
}
// addPreserved - Add the specified ID to the set of analyses preserved by
// this pass
//
- AnalysisUsage &addPreservedID(AnalysisID ID) {
+ AnalysisUsage &addPreservedID(const void *ID) {
Preserved.push_back(ID);
return *this;
}
+ AnalysisUsage &addPreservedID(char &ID) {
+ Preserved.push_back(&ID);
+ return *this;
+ }
// addPreserved - Add the specified Pass class to the set of analyses
// preserved by this pass.
//
template<class PassClass>
AnalysisUsage &addPreserved() {
- assert(Pass::getClassPassInfo<PassClass>() && "Pass class not registered!");
- Preserved.push_back(Pass::getClassPassInfo<PassClass>());
+ Preserved.push_back(&PassClass::ID);
return *this;
}
@@ -94,12 +87,7 @@ public:
// This can be useful when a pass is trivially preserved, but may not be
// linked in. Be careful about spelling!
//
- AnalysisUsage &addPreserved(StringRef Arg) {
- const PassInfo *PI = Pass::lookupPassInfo(Arg);
- // If the pass exists, preserve it. Otherwise silently do nothing.
- if (PI) Preserved.push_back(PI);
- return *this;
- }
+ AnalysisUsage &addPreserved(StringRef Arg);
// setPreservesAll - Set by analyses that do not transform their input at all
void setPreservesAll() { PreservesAll = true; }
@@ -139,7 +127,7 @@ public:
inline PMDataManager &getPMDataManager() { return PM; }
// Find pass that is implementing PI.
- Pass *findImplPass(const PassInfo *PI) {
+ Pass *findImplPass(AnalysisID PI) {
Pass *ResultPass = 0;
for (unsigned i = 0; i < AnalysisImpls.size() ; ++i) {
if (AnalysisImpls[i].first == PI) {
@@ -151,10 +139,10 @@ public:
}
// Find pass that is implementing PI. Initialize pass for Function F.
- Pass *findImplPass(Pass *P, const PassInfo *PI, Function &F);
+ Pass *findImplPass(Pass *P, AnalysisID PI, Function &F);
- void addAnalysisImplsPair(const PassInfo *PI, Pass *P) {
- std::pair<const PassInfo*, Pass*> pir = std::make_pair(PI,P);
+ void addAnalysisImplsPair(AnalysisID PI, Pass *P) {
+ std::pair<AnalysisID, Pass*> pir = std::make_pair(PI,P);
AnalysisImpls.push_back(pir);
}
@@ -167,11 +155,11 @@ public:
// getAnalysisIfAvailable - Return analysis result or null if it doesn't exist
Pass *getAnalysisIfAvailable(AnalysisID ID, bool Direction) const;
+private:
// AnalysisImpls - This keeps track of which passes implements the interfaces
// that are required by the current pass (to implement getAnalysis()).
- std::vector<std::pair<const PassInfo*, Pass*> > AnalysisImpls;
+ std::vector<std::pair<AnalysisID, Pass*> > AnalysisImpls;
-private:
// PassManager that is used to resolve analysis info
PMDataManager &PM;
};
@@ -188,8 +176,7 @@ template<typename AnalysisType>
AnalysisType *Pass::getAnalysisIfAvailable() const {
assert(Resolver && "Pass not resident in a PassManager object!");
- const PassInfo *PI = getClassPassInfo<AnalysisType>();
- if (PI == 0) return 0;
+ const void *PI = &AnalysisType::ID;
Pass *ResultPass = Resolver->getAnalysisIfAvailable(PI, true);
if (ResultPass == 0) return 0;
@@ -208,11 +195,11 @@ AnalysisType *Pass::getAnalysisIfAvailable() const {
template<typename AnalysisType>
AnalysisType &Pass::getAnalysis() const {
assert(Resolver && "Pass has not been inserted into a PassManager object!");
- return getAnalysisID<AnalysisType>(getClassPassInfo<AnalysisType>());
+ return getAnalysisID<AnalysisType>(&AnalysisType::ID);
}
template<typename AnalysisType>
-AnalysisType &Pass::getAnalysisID(const PassInfo *PI) const {
+AnalysisType &Pass::getAnalysisID(AnalysisID PI) const {
assert(PI && "getAnalysis for unregistered pass!");
assert(Resolver&&"Pass has not been inserted into a PassManager object!");
// PI *must* appear in AnalysisImpls. Because the number of passes used
@@ -238,11 +225,11 @@ template<typename AnalysisType>
AnalysisType &Pass::getAnalysis(Function &F) {
assert(Resolver &&"Pass has not been inserted into a PassManager object!");
- return getAnalysisID<AnalysisType>(getClassPassInfo<AnalysisType>(), F);
+ return getAnalysisID<AnalysisType>(&AnalysisType::ID, F);
}
template<typename AnalysisType>
-AnalysisType &Pass::getAnalysisID(const PassInfo *PI, Function &F) {
+AnalysisType &Pass::getAnalysisID(AnalysisID PI, Function &F) {
assert(PI && "getAnalysis for unregistered pass!");
assert(Resolver && "Pass has not been inserted into a PassManager object!");
// PI *must* appear in AnalysisImpls. Because the number of passes used
diff --git a/libclamav/c++/llvm/include/llvm/PassManager.h b/libclamav/c++/llvm/include/llvm/PassManager.h
index 4d91163..c8b5dca 100644
--- a/libclamav/c++/llvm/include/llvm/PassManager.h
+++ b/libclamav/c++/llvm/include/llvm/PassManager.h
@@ -22,7 +22,6 @@
namespace llvm {
class Pass;
-class ModulePass;
class Module;
class PassManagerImpl;
@@ -60,6 +59,9 @@ public:
bool run(Module &M);
private:
+ /// addImpl - Add a pass to the queue of passes to run, without
+ /// checking whether to add a printer pass.
+ void addImpl(Pass *P);
/// PassManagerImpl_New is the actual class. PassManager is just the
/// wraper to publish simple pass manager interface
@@ -96,6 +98,10 @@ public:
bool doFinalization();
private:
+ /// addImpl - Add a pass to the queue of passes to run, without
+ /// checking whether to add a printer pass.
+ void addImpl(Pass *P);
+
FunctionPassManagerImpl *FPM;
Module *M;
};
diff --git a/libclamav/c++/llvm/include/llvm/PassManagers.h b/libclamav/c++/llvm/include/llvm/PassManagers.h
index d5685c6..17f4a05 100644
--- a/libclamav/c++/llvm/include/llvm/PassManagers.h
+++ b/libclamav/c++/llvm/include/llvm/PassManagers.h
@@ -14,11 +14,11 @@
#ifndef LLVM_PASSMANAGERS_H
#define LLVM_PASSMANAGERS_H
-#include "llvm/PassManager.h"
+#include "llvm/Pass.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/DenseMap.h"
-#include <deque>
+#include <vector>
#include <map>
//===----------------------------------------------------------------------===//
@@ -96,14 +96,8 @@ namespace llvm {
class StringRef;
class Value;
class Timer;
+ class PMDataManager;
-/// FunctionPassManager and PassManager, two top level managers, serve
-/// as the public interface of pass manager infrastructure.
-enum TopLevelManagerType {
- TLM_Function, // FunctionPassManager
- TLM_Pass // PassManager
-};
-
// enums for debugging strings
enum PassDebuggingString {
EXECUTION_MSG, // "Executing Pass '"
@@ -138,30 +132,28 @@ public:
//===----------------------------------------------------------------------===//
// PMStack
//
-/// PMStack
+/// PMStack - This class implements a stack data structure of PMDataManager
+/// pointers.
+///
/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers
/// using PMStack. Each Pass implements assignPassManager() to connect itself
/// with appropriate manager. assignPassManager() walks PMStack to find
/// suitable manager.
-///
-/// PMStack is just a wrapper around standard deque that overrides pop() and
-/// push() methods.
class PMStack {
public:
- typedef std::deque<PMDataManager *>::reverse_iterator iterator;
- iterator begin() { return S.rbegin(); }
- iterator end() { return S.rend(); }
-
- void handleLastUserOverflow();
+ typedef std::vector<PMDataManager *>::const_reverse_iterator iterator;
+ iterator begin() const { return S.rbegin(); }
+ iterator end() const { return S.rend(); }
void pop();
- inline PMDataManager *top() { return S.back(); }
+ PMDataManager *top() const { return S.back(); }
void push(PMDataManager *PM);
- inline bool empty() { return S.empty(); }
+ bool empty() const { return S.empty(); }
+
+ void dump() const;
- void dump();
private:
- std::deque<PMDataManager *> S;
+ std::vector<PMDataManager *> S;
};
@@ -171,21 +163,26 @@ private:
/// PMTopLevelManager manages LastUser info and collects common APIs used by
/// top level pass managers.
class PMTopLevelManager {
-public:
+protected:
+ explicit PMTopLevelManager(PMDataManager *PMDM);
virtual unsigned getNumContainedManagers() const {
return (unsigned)PassManagers.size();
}
- /// Schedule pass P for execution. Make sure that passes required by
- /// P are run before P is run. Update analysis info maintained by
- /// the manager. Remove dead passes. This is a recursive function.
- void schedulePass(Pass *P);
+ void initializeAllAnalysisInfo();
+private:
/// This is implemented by top level pass manager and used by
/// schedulePass() to add analysis info passes that are not available.
virtual void addTopLevelPass(Pass *P) = 0;
+public:
+ /// Schedule pass P for execution. Make sure that passes required by
+ /// P are run before P is run. Update analysis info maintained by
+ /// the manager. Remove dead passes. This is a recursive function.
+ void schedulePass(Pass *P);
+
/// Set pass P as the last user of the given analysis passes.
void setLastUser(SmallVector<Pass *, 12> &AnalysisPasses, Pass *P);
@@ -200,7 +197,6 @@ public:
/// Find analysis usage information for the pass P.
AnalysisUsage *findAnalysisUsage(Pass *P);
- explicit PMTopLevelManager(enum TopLevelManagerType t);
virtual ~PMTopLevelManager();
/// Add immutable pass and initialize it.
@@ -227,8 +223,6 @@ public:
void dumpPasses() const;
void dumpArguments() const;
- void initializeAllAnalysisInfo();
-
// Active Pass Managers
PMStack activeStack;
@@ -302,10 +296,7 @@ public:
/// through getAnalysis interface.
virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
- virtual Pass * getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F) {
- assert (0 && "Unable to find on the fly pass");
- return NULL;
- }
+ virtual Pass *getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F);
/// Initialize available analysis information.
void initializeAnalysisInfo() {
@@ -413,12 +404,11 @@ private:
/// It batches all function passes and basic block pass managers together and
/// sequence them to process one function at a time before processing next
/// function.
-
class FPPassManager : public ModulePass, public PMDataManager {
public:
static char ID;
explicit FPPassManager(int Depth)
- : ModulePass(&ID), PMDataManager(Depth) { }
+ : ModulePass(ID), PMDataManager(Depth) { }
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
@@ -462,8 +452,7 @@ public:
}
};
-extern Timer *StartPassTimer(Pass *);
-extern void StopPassTimer(Pass *, Timer *);
+Timer *getPassTimer(Pass *);
}
diff --git a/libclamav/c++/llvm/include/llvm/PassRegistry.h b/libclamav/c++/llvm/include/llvm/PassRegistry.h
new file mode 100644
index 0000000..5907139
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/PassRegistry.h
@@ -0,0 +1,71 @@
+//===- llvm/PassRegistry.h - Pass Information Registry ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines PassRegistry, a class that is used in the initialization
+// and registration of passes. At initialization, passes are registered with
+// the PassRegistry, which is later provided to the PassManager for dependency
+// resolution and similar tasks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSREGISTRY_H
+#define LLVM_PASSREGISTRY_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/System/DataTypes.h"
+#include "llvm/System/Mutex.h"
+#include <map>
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+class PassInfo;
+struct PassRegistrationListener;
+
+class PassRegistry {
+ /// Guards the contents of this class.
+ mutable sys::SmartMutex<true> Lock;
+
+ /// PassInfoMap - Keep track of the PassInfo object for each registered pass.
+ typedef std::map<const void*, const PassInfo*> MapType;
+ MapType PassInfoMap;
+
+ typedef StringMap<const PassInfo*> StringMapType;
+ StringMapType PassInfoStringMap;
+
+ /// AnalysisGroupInfo - Keep track of information for each analysis group.
+ struct AnalysisGroupInfo {
+ std::set<const PassInfo *> Implementations;
+ };
+ std::map<const PassInfo*, AnalysisGroupInfo> AnalysisGroupInfoMap;
+
+ std::vector<PassRegistrationListener*> Listeners;
+
+public:
+ static PassRegistry *getPassRegistry();
+
+ const PassInfo *getPassInfo(const void *TI) const;
+ const PassInfo *getPassInfo(StringRef Arg) const;
+
+ void registerPass(const PassInfo &PI);
+ void unregisterPass(const PassInfo &PI);
+
+ /// Analysis Group Mechanisms.
+ void registerAnalysisGroup(const void *InterfaceID, const void *PassID,
+ PassInfo& Registeree, bool isDefault);
+
+ void enumerateWith(PassRegistrationListener *L);
+ void addRegistrationListener(PassRegistrationListener* L);
+ void removeRegistrationListener(PassRegistrationListener *L);
+};
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/PassSupport.h b/libclamav/c++/llvm/include/llvm/PassSupport.h
index b229989..0f559d6 100644
--- a/libclamav/c++/llvm/include/llvm/PassSupport.h
+++ b/libclamav/c++/llvm/include/llvm/PassSupport.h
@@ -22,11 +22,10 @@
#define LLVM_PASS_SUPPORT_H
#include "Pass.h"
+#include "llvm/PassRegistry.h"
namespace llvm {
-class TargetMachine;
-
//===---------------------------------------------------------------------------
/// PassInfo class - An instance of this class exists for every pass known by
/// the system, and can be obtained from a live Pass by calling its
@@ -40,7 +39,7 @@ public:
private:
const char *const PassName; // Nice name for Pass
const char *const PassArgument; // Command Line argument to run this pass
- const intptr_t PassID;
+ const void *PassID;
const bool IsCFGOnlyPass; // Pass only looks at the CFG.
const bool IsAnalysis; // True if an analysis pass.
const bool IsAnalysisGroup; // True if an analysis group.
@@ -51,18 +50,17 @@ private:
public:
/// PassInfo ctor - Do not call this directly, this should only be invoked
/// through RegisterPass.
- PassInfo(const char *name, const char *arg, intptr_t pi,
- NormalCtor_t normal = 0,
- bool isCFGOnly = false, bool is_analysis = false)
+ PassInfo(const char *name, const char *arg, const void *pi,
+ NormalCtor_t normal, bool isCFGOnly, bool is_analysis)
: PassName(name), PassArgument(arg), PassID(pi),
IsCFGOnlyPass(isCFGOnly),
IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal) {
- registerPass();
+ PassRegistry::getPassRegistry()->registerPass(*this);
}
/// PassInfo ctor - Do not call this directly, this should only be invoked
/// through RegisterPass. This version is for use by analysis groups; it
/// does not auto-register the pass.
- PassInfo(const char *name, intptr_t pi)
+ PassInfo(const char *name, const void *pi)
: PassName(name), PassArgument(""), PassID(pi),
IsCFGOnlyPass(false),
IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(0) {
@@ -80,11 +78,11 @@ public:
/// getTypeInfo - Return the id object for the pass...
/// TODO : Rename
- intptr_t getTypeInfo() const { return PassID; }
+ const void *getTypeInfo() const { return PassID; }
/// Return true if this PassID implements the specified ID pointer.
- bool isPassID(void *IDPtr) const {
- return PassID == (intptr_t)IDPtr;
+ bool isPassID(const void *IDPtr) const {
+ return PassID == IDPtr;
}
/// isAnalysisGroup - Return true if this is an analysis group, not a normal
@@ -109,13 +107,7 @@ public:
}
/// createPass() - Use this method to create an instance of this pass.
- Pass *createPass() const {
- assert((!isAnalysisGroup() || NormalCtor) &&
- "No default implementation found for analysis group!");
- assert(NormalCtor &&
- "Cannot call createPass on PassInfo without default ctor!");
- return NormalCtor();
- }
+ Pass *createPass() const;
/// addInterfaceImplemented - This method is called when this pass is
/// registered as a member of an analysis group with the RegisterAnalysisGroup
@@ -132,15 +124,13 @@ public:
return ItfImpl;
}
-protected:
- void registerPass();
- void unregisterPass();
-
private:
void operator=(const PassInfo &); // do not implement
PassInfo(const PassInfo &); // do not implement
};
+#define INITIALIZE_PASS(passName, arg, name, cfg, analysis) \
+ static RegisterPass<passName> passName ## _info(arg, name, cfg, analysis)
template<typename PassName>
Pass *callDefaultCtor() { return new PassName(); }
@@ -168,9 +158,10 @@ struct RegisterPass : public PassInfo {
// Register Pass using default constructor...
RegisterPass(const char *PassArg, const char *Name, bool CFGOnly = false,
bool is_analysis = false)
- : PassInfo(Name, PassArg, intptr_t(&passName::ID),
+ : PassInfo(Name, PassArg, &passName::ID,
PassInfo::NormalCtor_t(callDefaultCtor<passName>),
CFGOnly, is_analysis) {
+
}
};
@@ -197,8 +188,8 @@ struct RegisterPass : public PassInfo {
class RegisterAGBase : public PassInfo {
protected:
RegisterAGBase(const char *Name,
- intptr_t InterfaceID,
- intptr_t PassID = 0,
+ const void *InterfaceID,
+ const void *PassID = 0,
bool isDefault = false);
};
@@ -206,16 +197,18 @@ template<typename Interface, bool Default = false>
struct RegisterAnalysisGroup : public RegisterAGBase {
explicit RegisterAnalysisGroup(PassInfo &RPB)
: RegisterAGBase(RPB.getPassName(),
- intptr_t(&Interface::ID), RPB.getTypeInfo(),
+ &Interface::ID, RPB.getTypeInfo(),
Default) {
}
explicit RegisterAnalysisGroup(const char *Name)
- : RegisterAGBase(Name, intptr_t(&Interface::ID)) {
+ : RegisterAGBase(Name, &Interface::ID) {
}
};
-
+#define INITIALIZE_AG_PASS(passName, agName, arg, name, cfg, analysis, def) \
+ static RegisterPass<passName> passName ## _info(arg, name, cfg, analysis); \
+ static RegisterAnalysisGroup<agName, def> passName ## _ag(passName ## _info)
//===---------------------------------------------------------------------------
/// PassRegistrationListener class - This class is meant to be derived from by
diff --git a/libclamav/c++/llvm/include/llvm/Support/Allocator.h b/libclamav/c++/llvm/include/llvm/Support/Allocator.h
index a8e4d2d..4a7251f 100644
--- a/libclamav/c++/llvm/include/llvm/Support/Allocator.h
+++ b/libclamav/c++/llvm/include/llvm/Support/Allocator.h
@@ -15,9 +15,12 @@
#define LLVM_SUPPORT_ALLOCATOR_H
#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/System/DataTypes.h"
+#include <algorithm>
#include <cassert>
#include <cstdlib>
+#include <cstddef>
namespace llvm {
@@ -130,8 +133,8 @@ class BumpPtrAllocator {
static MallocSlabAllocator DefaultSlabAllocator;
+ template<typename T> friend class SpecificBumpPtrAllocator;
public:
- typedef void (*DTorFunction)(void*);
BumpPtrAllocator(size_t size = 4096, size_t threshold = 4096,
SlabAllocator &allocator = DefaultSlabAllocator);
~BumpPtrAllocator();
@@ -140,11 +143,6 @@ public:
/// to the beginning of it, freeing all memory allocated so far.
void Reset();
- /// Reset - like Reset(), but call DTorFunction for each allocated
- /// object. This assumes that all objects allocated with this allocator
- /// had the same size and alignment specified here.
- void Reset(size_t Size, size_t Alignment, DTorFunction DTor);
-
/// Allocate - Allocate space at the specified alignment.
///
void *Allocate(size_t Size, size_t Alignment);
@@ -179,6 +177,65 @@ public:
void PrintStats() const;
};
+/// SpecificBumpPtrAllocator - Same as BumpPtrAllocator but allows only
+/// elements of one type to be allocated. This allows calling the destructor
+/// in DestroyAll() and when the allocator is destroyed.
+template <typename T>
+class SpecificBumpPtrAllocator {
+ BumpPtrAllocator Allocator;
+public:
+ SpecificBumpPtrAllocator(size_t size = 4096, size_t threshold = 4096,
+ SlabAllocator &allocator = BumpPtrAllocator::DefaultSlabAllocator)
+ : Allocator(size, threshold, allocator) {}
+
+ ~SpecificBumpPtrAllocator() {
+ DestroyAll();
+ }
+
+ /// Call the destructor of each allocated object and deallocate all but the
+ /// current slab and reset the current pointer to the beginning of it, freeing
+ /// all memory allocated so far.
+ void DestroyAll() {
+ MemSlab *Slab = Allocator.CurSlab;
+ while (Slab) {
+ char *End = Slab == Allocator.CurSlab ? Allocator.CurPtr :
+ (char *)Slab + Slab->Size;
+ for (char *Ptr = (char*)(Slab+1); Ptr < End; Ptr += sizeof(T)) {
+ Ptr = Allocator.AlignPtr(Ptr, alignof<T>());
+ if (Ptr + sizeof(T) <= End)
+ reinterpret_cast<T*>(Ptr)->~T();
+ }
+ Slab = Slab->NextPtr;
+ }
+ Allocator.Reset();
+ }
+
+ /// Allocate space for a specific count of elements.
+ T *Allocate(size_t num = 1) {
+ return Allocator.Allocate<T>(num);
+ }
+};
+
} // end namespace llvm
+inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) {
+ struct S {
+ char c;
+#ifdef __GNUC__
+ char x __attribute__((aligned));
+#else
+ union {
+ double D;
+ long double LD;
+ long long L;
+ void *P;
+ } x;
+#endif
+ };
+ return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
+ offsetof(S, x)));
+}
+
+inline void operator delete(void *, llvm::BumpPtrAllocator &) {}
+
#endif // LLVM_SUPPORT_ALLOCATOR_H
diff --git a/libclamav/c++/llvm/include/llvm/Support/CFG.h b/libclamav/c++/llvm/include/llvm/Support/CFG.h
index 3875f0b..9ba71fc 100644
--- a/libclamav/c++/llvm/include/llvm/Support/CFG.h
+++ b/libclamav/c++/llvm/include/llvm/Support/CFG.h
@@ -25,57 +25,58 @@ namespace llvm {
// BasicBlock pred_iterator definition
//===----------------------------------------------------------------------===//
-template <class _Ptr, class _USE_iterator> // Predecessor Iterator
+template <class Ptr, class USE_iterator> // Predecessor Iterator
class PredIterator : public std::iterator<std::forward_iterator_tag,
- _Ptr, ptrdiff_t> {
- typedef std::iterator<std::forward_iterator_tag, _Ptr, ptrdiff_t> super;
- _USE_iterator It;
-public:
- typedef PredIterator<_Ptr,_USE_iterator> _Self;
- typedef typename super::pointer pointer;
+ Ptr, ptrdiff_t> {
+ typedef std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t> super;
+ typedef PredIterator<Ptr, USE_iterator> Self;
+ USE_iterator It;
inline void advancePastNonTerminators() {
- // Loop to ignore non terminator uses (for example PHI nodes)...
+ // Loop to ignore non terminator uses (for example PHI nodes).
while (!It.atEnd() && !isa<TerminatorInst>(*It))
++It;
}
- inline PredIterator(_Ptr *bb) : It(bb->use_begin()) {
+public:
+ typedef typename super::pointer pointer;
+
+ explicit inline PredIterator(Ptr *bb) : It(bb->use_begin()) {
advancePastNonTerminators();
}
- inline PredIterator(_Ptr *bb, bool) : It(bb->use_end()) {}
+ inline PredIterator(Ptr *bb, bool) : It(bb->use_end()) {}
- inline bool operator==(const _Self& x) const { return It == x.It; }
- inline bool operator!=(const _Self& x) const { return !operator==(x); }
+ inline bool operator==(const Self& x) const { return It == x.It; }
+ inline bool operator!=(const Self& x) const { return !operator==(x); }
inline pointer operator*() const {
assert(!It.atEnd() && "pred_iterator out of range!");
return cast<TerminatorInst>(*It)->getParent();
}
- inline pointer *operator->() const { return &(operator*()); }
+ inline pointer *operator->() const { return &operator*(); }
- inline _Self& operator++() { // Preincrement
+ inline Self& operator++() { // Preincrement
assert(!It.atEnd() && "pred_iterator out of range!");
++It; advancePastNonTerminators();
return *this;
}
- inline _Self operator++(int) { // Postincrement
- _Self tmp = *this; ++*this; return tmp;
+ inline Self operator++(int) { // Postincrement
+ Self tmp = *this; ++*this; return tmp;
}
};
typedef PredIterator<BasicBlock, Value::use_iterator> pred_iterator;
typedef PredIterator<const BasicBlock,
- Value::use_const_iterator> pred_const_iterator;
+ Value::const_use_iterator> const_pred_iterator;
inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
-inline pred_const_iterator pred_begin(const BasicBlock *BB) {
- return pred_const_iterator(BB);
+inline const_pred_iterator pred_begin(const BasicBlock *BB) {
+ return const_pred_iterator(BB);
}
inline pred_iterator pred_end(BasicBlock *BB) { return pred_iterator(BB, true);}
-inline pred_const_iterator pred_end(const BasicBlock *BB) {
- return pred_const_iterator(BB, true);
+inline const_pred_iterator pred_end(const BasicBlock *BB) {
+ return const_pred_iterator(BB, true);
}
@@ -90,12 +91,17 @@ class SuccIterator : public std::iterator<std::bidirectional_iterator_tag,
const Term_ Term;
unsigned idx;
typedef std::iterator<std::bidirectional_iterator_tag, BB_, ptrdiff_t> super;
+ typedef SuccIterator<Term_, BB_> Self;
+
+ inline bool index_is_valid(int idx) {
+ return idx >= 0 && (unsigned) idx < Term->getNumSuccessors();
+ }
+
public:
- typedef SuccIterator<Term_, BB_> _Self;
typedef typename super::pointer pointer;
// TODO: This can be random access iterator, only operator[] missing.
- inline SuccIterator(Term_ T) : Term(T), idx(0) { // begin iterator
+ explicit inline SuccIterator(Term_ T) : Term(T), idx(0) {// begin iterator
assert(T && "getTerminator returned null!");
}
inline SuccIterator(Term_ T, bool) // end iterator
@@ -103,78 +109,74 @@ public:
assert(T && "getTerminator returned null!");
}
- inline const _Self &operator=(const _Self &I) {
+ inline const Self &operator=(const Self &I) {
assert(Term == I.Term &&"Cannot assign iterators to two different blocks!");
idx = I.idx;
return *this;
}
- inline bool index_is_valid (int idx) {
- return idx >= 0 && (unsigned) idx < Term->getNumSuccessors();
- }
-
/// getSuccessorIndex - This is used to interface between code that wants to
/// operate on terminator instructions directly.
unsigned getSuccessorIndex() const { return idx; }
- inline bool operator==(const _Self& x) const { return idx == x.idx; }
- inline bool operator!=(const _Self& x) const { return !operator==(x); }
+ inline bool operator==(const Self& x) const { return idx == x.idx; }
+ inline bool operator!=(const Self& x) const { return !operator==(x); }
inline pointer operator*() const { return Term->getSuccessor(idx); }
inline pointer operator->() const { return operator*(); }
- inline _Self& operator++() { ++idx; return *this; } // Preincrement
+ inline Self& operator++() { ++idx; return *this; } // Preincrement
- inline _Self operator++(int) { // Postincrement
- _Self tmp = *this; ++*this; return tmp;
+ inline Self operator++(int) { // Postincrement
+ Self tmp = *this; ++*this; return tmp;
}
- inline _Self& operator--() { --idx; return *this; } // Predecrement
- inline _Self operator--(int) { // Postdecrement
- _Self tmp = *this; --*this; return tmp;
+ inline Self& operator--() { --idx; return *this; } // Predecrement
+ inline Self operator--(int) { // Postdecrement
+ Self tmp = *this; --*this; return tmp;
}
- inline bool operator<(const _Self& x) const {
+ inline bool operator<(const Self& x) const {
assert(Term == x.Term && "Cannot compare iterators of different blocks!");
return idx < x.idx;
}
- inline bool operator<=(const _Self& x) const {
+ inline bool operator<=(const Self& x) const {
assert(Term == x.Term && "Cannot compare iterators of different blocks!");
return idx <= x.idx;
}
- inline bool operator>=(const _Self& x) const {
+ inline bool operator>=(const Self& x) const {
assert(Term == x.Term && "Cannot compare iterators of different blocks!");
return idx >= x.idx;
}
- inline bool operator>(const _Self& x) const {
+ inline bool operator>(const Self& x) const {
assert(Term == x.Term && "Cannot compare iterators of different blocks!");
return idx > x.idx;
}
- inline _Self& operator+=(int Right) {
+ inline Self& operator+=(int Right) {
unsigned new_idx = idx + Right;
assert(index_is_valid(new_idx) && "Iterator index out of bound");
idx = new_idx;
return *this;
}
- inline _Self operator+(int Right) {
- _Self tmp = *this;
+ inline Self operator+(int Right) {
+ Self tmp = *this;
tmp += Right;
return tmp;
}
- inline _Self& operator-=(int Right) {
+ inline Self& operator-=(int Right) {
return operator+=(-Right);
}
- inline _Self operator-(int Right) {
+ inline Self operator-(int Right) {
return operator+(-Right);
}
- inline int operator-(const _Self& x) {
+ inline int operator-(const Self& x) {
assert(Term == x.Term && "Cannot work on iterators of different blocks!");
int distance = idx - x.idx;
return distance;
@@ -185,14 +187,14 @@ public:
// be modified are not available.
//
// inline pointer operator[](int offset) {
- // _Self tmp = *this;
+ // Self tmp = *this;
// tmp += offset;
// return tmp.operator*();
// }
/// Get the source BB of this iterator.
inline BB_ *getSource() {
- return Term->getParent();
+ return Term->getParent();
}
};
@@ -268,7 +270,7 @@ template <> struct GraphTraits<Inverse<BasicBlock*> > {
template <> struct GraphTraits<Inverse<const BasicBlock*> > {
typedef const BasicBlock NodeType;
- typedef pred_const_iterator ChildIteratorType;
+ typedef const_pred_iterator ChildIteratorType;
static NodeType *getEntryNode(Inverse<const BasicBlock*> G) {
return G.Graph;
}
diff --git a/libclamav/c++/llvm/include/llvm/Support/COFF.h b/libclamav/c++/llvm/include/llvm/Support/COFF.h
new file mode 100644
index 0000000..78254ae
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Support/COFF.h
@@ -0,0 +1,298 @@
+//===-- llvm/Support/COFF.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an definitions used in Windows COFF Files.
+//
+// Structures and enums defined within this file where created using
+// information from Microsoft's publicly available PE/COFF format document:
+//
+// Microsoft Portable Executable and Common Object File Format Specification
+// Revision 8.1 - February 15, 2008
+//
+// As of 5/2/2010, hosted by Microsoft at:
+// http://www.microsoft.com/whdc/system/platform/firmware/pecoff.mspx
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WIN_COFF_H
+#define LLVM_SUPPORT_WIN_COFF_H
+
+#include "llvm/System/DataTypes.h"
+#include <cstring>
+
+namespace llvm {
+namespace COFF {
+
+ // Sizes in bytes of various things in the COFF format.
+ enum {
+ HeaderSize = 20,
+ NameSize = 8,
+ SymbolSize = 18,
+ SectionSize = 40,
+ RelocationSize = 10
+ };
+
+ struct header {
+ uint16_t Machine;
+ uint16_t NumberOfSections;
+ uint32_t TimeDateStamp;
+ uint32_t PointerToSymbolTable;
+ uint32_t NumberOfSymbols;
+ uint16_t SizeOfOptionalHeader;
+ uint16_t Characteristics;
+ };
+
+ enum MachineTypes {
+ IMAGE_FILE_MACHINE_I386 = 0x14C,
+ IMAGE_FILE_MACHINE_AMD64 = 0x8664
+ };
+
+ struct symbol {
+ char Name[NameSize];
+ uint32_t Value;
+ uint16_t Type;
+ uint8_t StorageClass;
+ uint16_t SectionNumber;
+ uint8_t NumberOfAuxSymbols;
+ };
+
+ enum SymbolFlags {
+ SF_TypeMask = 0x0000FFFF,
+ SF_TypeShift = 0,
+
+ SF_ClassMask = 0x00FF0000,
+ SF_ClassShift = 16,
+
+ SF_WeakReference = 0x01000000
+ };
+
+ enum SymbolSectionNumber {
+ IMAGE_SYM_DEBUG = -2,
+ IMAGE_SYM_ABSOLUTE = -1,
+ IMAGE_SYM_UNDEFINED = 0
+ };
+
+ /// Storage class tells where and what the symbol represents
+ enum SymbolStorageClass {
+ IMAGE_SYM_CLASS_END_OF_FUNCTION = -1, ///< Physical end of function
+ IMAGE_SYM_CLASS_NULL = 0, ///< No symbol
+ IMAGE_SYM_CLASS_AUTOMATIC = 1, ///< Stack variable
+ IMAGE_SYM_CLASS_EXTERNAL = 2, ///< External symbol
+ IMAGE_SYM_CLASS_STATIC = 3, ///< Static
+ IMAGE_SYM_CLASS_REGISTER = 4, ///< Register variable
+ IMAGE_SYM_CLASS_EXTERNAL_DEF = 5, ///< External definition
+ IMAGE_SYM_CLASS_LABEL = 6, ///< Label
+ IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7, ///< Undefined label
+ IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8, ///< Member of structure
+ IMAGE_SYM_CLASS_ARGUMENT = 9, ///< Function argument
+ IMAGE_SYM_CLASS_STRUCT_TAG = 10, ///< Structure tag
+ IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11, ///< Member of union
+ IMAGE_SYM_CLASS_UNION_TAG = 12, ///< Union tag
+ IMAGE_SYM_CLASS_TYPE_DEFINITION = 13, ///< Type definition
+ IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14, ///< Undefined static
+ IMAGE_SYM_CLASS_ENUM_TAG = 15, ///< Enumeration tag
+ IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16, ///< Member of enumeration
+ IMAGE_SYM_CLASS_REGISTER_PARAM = 17, ///< Register parameter
+ IMAGE_SYM_CLASS_BIT_FIELD = 18, ///< Bit field
+ /// ".bb" or ".eb" - beginning or end of block
+ IMAGE_SYM_CLASS_BLOCK = 100,
+ /// ".bf" or ".ef" - beginning or end of function
+ IMAGE_SYM_CLASS_FUNCTION = 101,
+ IMAGE_SYM_CLASS_END_OF_STRUCT = 102, ///< End of structure
+ IMAGE_SYM_CLASS_FILE = 103, ///< File name
+ /// Line number, reformatted as symbol
+ IMAGE_SYM_CLASS_SECTION = 104,
+ IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105, ///< Duplicate tag
+ /// External symbol in dmert public lib
+ IMAGE_SYM_CLASS_CLR_TOKEN = 107
+ };
+
+ enum SymbolBaseType {
+ IMAGE_SYM_TYPE_NULL = 0, ///< No type information or unknown base type.
+ IMAGE_SYM_TYPE_VOID = 1, ///< Used with void pointers and functions.
+ IMAGE_SYM_TYPE_CHAR = 2, ///< A character (signed byte).
+ IMAGE_SYM_TYPE_SHORT = 3, ///< A 2-byte signed integer.
+ IMAGE_SYM_TYPE_INT = 4, ///< A natural integer type on the target.
+ IMAGE_SYM_TYPE_LONG = 5, ///< A 4-byte signed integer.
+ IMAGE_SYM_TYPE_FLOAT = 6, ///< A 4-byte floating-point number.
+ IMAGE_SYM_TYPE_DOUBLE = 7, ///< An 8-byte floating-point number.
+ IMAGE_SYM_TYPE_STRUCT = 8, ///< A structure.
+ IMAGE_SYM_TYPE_UNION = 9, ///< An union.
+ IMAGE_SYM_TYPE_ENUM = 10, ///< An enumerated type.
+ IMAGE_SYM_TYPE_MOE = 11, ///< A member of enumeration (a specific value).
+ IMAGE_SYM_TYPE_BYTE = 12, ///< A byte; unsigned 1-byte integer.
+ IMAGE_SYM_TYPE_WORD = 13, ///< A word; unsigned 2-byte integer.
+ IMAGE_SYM_TYPE_UINT = 14, ///< An unsigned integer of natural size.
+ IMAGE_SYM_TYPE_DWORD = 15 ///< An unsigned 4-byte integer.
+ };
+
+ enum SymbolComplexType {
+ IMAGE_SYM_DTYPE_NULL = 0, ///< No complex type; simple scalar variable.
+ IMAGE_SYM_DTYPE_POINTER = 1, ///< A pointer to base type.
+ IMAGE_SYM_DTYPE_FUNCTION = 2, ///< A function that returns a base type.
+ IMAGE_SYM_DTYPE_ARRAY = 3, ///< An array of base type.
+
+ /// Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
+ SCT_COMPLEX_TYPE_SHIFT = 8
+ };
+
+ struct section {
+ char Name[NameSize];
+ uint32_t VirtualSize;
+ uint32_t VirtualAddress;
+ uint32_t SizeOfRawData;
+ uint32_t PointerToRawData;
+ uint32_t PointerToRelocations;
+ uint32_t PointerToLineNumbers;
+ uint16_t NumberOfRelocations;
+ uint16_t NumberOfLineNumbers;
+ uint32_t Characteristics;
+ };
+
+ enum SectionCharacteristics {
+ IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
+ IMAGE_SCN_CNT_CODE = 0x00000020,
+ IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040,
+ IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080,
+ IMAGE_SCN_LNK_OTHER = 0x00000100,
+ IMAGE_SCN_LNK_INFO = 0x00000200,
+ IMAGE_SCN_LNK_REMOVE = 0x00000800,
+ IMAGE_SCN_LNK_COMDAT = 0x00001000,
+ IMAGE_SCN_GPREL = 0x00008000,
+ IMAGE_SCN_MEM_PURGEABLE = 0x00020000,
+ IMAGE_SCN_MEM_16BIT = 0x00020000,
+ IMAGE_SCN_MEM_LOCKED = 0x00040000,
+ IMAGE_SCN_MEM_PRELOAD = 0x00080000,
+ IMAGE_SCN_ALIGN_1BYTES = 0x00100000,
+ IMAGE_SCN_ALIGN_2BYTES = 0x00200000,
+ IMAGE_SCN_ALIGN_4BYTES = 0x00300000,
+ IMAGE_SCN_ALIGN_8BYTES = 0x00400000,
+ IMAGE_SCN_ALIGN_16BYTES = 0x00500000,
+ IMAGE_SCN_ALIGN_32BYTES = 0x00600000,
+ IMAGE_SCN_ALIGN_64BYTES = 0x00700000,
+ IMAGE_SCN_ALIGN_128BYTES = 0x00800000,
+ IMAGE_SCN_ALIGN_256BYTES = 0x00900000,
+ IMAGE_SCN_ALIGN_512BYTES = 0x00A00000,
+ IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000,
+ IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000,
+ IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000,
+ IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000,
+ IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000,
+ IMAGE_SCN_MEM_DISCARDABLE = 0x02000000,
+ IMAGE_SCN_MEM_NOT_CACHED = 0x04000000,
+ IMAGE_SCN_MEM_NOT_PAGED = 0x08000000,
+ IMAGE_SCN_MEM_SHARED = 0x10000000,
+ IMAGE_SCN_MEM_EXECUTE = 0x20000000,
+ IMAGE_SCN_MEM_READ = 0x40000000,
+ IMAGE_SCN_MEM_WRITE = 0x80000000
+ };
+
+ struct relocation {
+ uint32_t VirtualAddress;
+ uint32_t SymbolTableIndex;
+ uint16_t Type;
+ };
+
+ enum RelocationTypeX86 {
+ IMAGE_REL_I386_ABSOLUTE = 0x0000,
+ IMAGE_REL_I386_DIR16 = 0x0001,
+ IMAGE_REL_I386_REL16 = 0x0002,
+ IMAGE_REL_I386_DIR32 = 0x0006,
+ IMAGE_REL_I386_DIR32NB = 0x0007,
+ IMAGE_REL_I386_SEG12 = 0x0009,
+ IMAGE_REL_I386_SECTION = 0x000A,
+ IMAGE_REL_I386_SECREL = 0x000B,
+ IMAGE_REL_I386_TOKEN = 0x000C,
+ IMAGE_REL_I386_SECREL7 = 0x000D,
+ IMAGE_REL_I386_REL32 = 0x0014,
+
+ IMAGE_REL_AMD64_ABSOLUTE = 0x0000,
+ IMAGE_REL_AMD64_ADDR64 = 0x0001,
+ IMAGE_REL_AMD64_ADDR32 = 0x0002,
+ IMAGE_REL_AMD64_ADDR32NB = 0x0003,
+ IMAGE_REL_AMD64_REL32 = 0x0004,
+ IMAGE_REL_AMD64_REL32_1 = 0x0005,
+ IMAGE_REL_AMD64_REL32_2 = 0x0006,
+ IMAGE_REL_AMD64_REL32_3 = 0x0007,
+ IMAGE_REL_AMD64_REL32_4 = 0x0008,
+ IMAGE_REL_AMD64_REL32_5 = 0x0009,
+ IMAGE_REL_AMD64_SECTION = 0x000A,
+ IMAGE_REL_AMD64_SECREL = 0x000B,
+ IMAGE_REL_AMD64_SECREL7 = 0x000C,
+ IMAGE_REL_AMD64_TOKEN = 0x000D,
+ IMAGE_REL_AMD64_SREL32 = 0x000E,
+ IMAGE_REL_AMD64_PAIR = 0x000F,
+ IMAGE_REL_AMD64_SSPAN32 = 0x0010
+ };
+
+ enum COMDATType {
+ IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
+ IMAGE_COMDAT_SELECT_ANY,
+ IMAGE_COMDAT_SELECT_SAME_SIZE,
+ IMAGE_COMDAT_SELECT_EXACT_MATCH,
+ IMAGE_COMDAT_SELECT_ASSOCIATIVE,
+ IMAGE_COMDAT_SELECT_LARGEST
+ };
+
+ // Auxiliary Symbol Formats
+ struct AuxiliaryFunctionDefinition {
+ uint32_t TagIndex;
+ uint32_t TotalSize;
+ uint32_t PointerToLinenumber;
+ uint32_t PointerToNextFunction;
+ uint8_t unused[2];
+ };
+
+ struct AuxiliarybfAndefSymbol {
+ uint8_t unused1[4];
+ uint16_t Linenumber;
+ uint8_t unused2[6];
+ uint32_t PointerToNextFunction;
+ uint8_t unused3[2];
+ };
+
+ struct AuxiliaryWeakExternal {
+ uint32_t TagIndex;
+ uint32_t Characteristics;
+ uint8_t unused[10];
+ };
+
+ /// These are not documented in the spec, but are located in WinNT.h.
+ enum WeakExternalCharacteristics {
+ IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY = 1,
+ IMAGE_WEAK_EXTERN_SEARCH_LIBRARY = 2,
+ IMAGE_WEAK_EXTERN_SEARCH_ALIAS = 3
+ };
+
+ struct AuxiliaryFile {
+ uint8_t FileName[18];
+ };
+
+ struct AuxiliarySectionDefinition {
+ uint32_t Length;
+ uint16_t NumberOfRelocations;
+ uint16_t NumberOfLinenumbers;
+ uint32_t CheckSum;
+ uint16_t Number;
+ uint8_t Selection;
+ uint8_t unused[3];
+ };
+
+ union Auxiliary {
+ AuxiliaryFunctionDefinition FunctionDefinition;
+ AuxiliarybfAndefSymbol bfAndefSymbol;
+ AuxiliaryWeakExternal WeakExternal;
+ AuxiliaryFile File;
+ AuxiliarySectionDefinition SectionDefinition;
+ };
+
+} // End namespace llvm.
+} // End namespace COFF.
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Support/CallSite.h b/libclamav/c++/llvm/include/llvm/Support/CallSite.h
index 285b558..9b6a409 100644
--- a/libclamav/c++/llvm/include/llvm/Support/CallSite.h
+++ b/libclamav/c++/llvm/include/llvm/Support/CallSite.h
@@ -8,15 +8,18 @@
//===----------------------------------------------------------------------===//
//
// This file defines the CallSite class, which is a handy wrapper for code that
-// wants to treat Call and Invoke instructions in a generic way.
+// wants to treat Call and Invoke instructions in a generic way. When in non-
+// mutation context (e.g. an analysis) ImmutableCallSite should be used.
+// Finally, when some degree of customization is necessary between these two
+// extremes, CallSiteBase<> can be supplied with fine-tuned parameters.
//
-// NOTE: This class is supposed to have "value semantics". So it should be
-// passed by value, not by reference; it should not be "new"ed or "delete"d. It
-// is efficiently copyable, assignable and constructable, with cost equivalent
-// to copying a pointer (notice that it has only a single data member).
-// The internal representation carries a flag which indicates which of the two
-// variants is enclosed. This allows for cheaper checks when various accessors
-// of CallSite are employed.
+// NOTE: These classes are supposed to have "value semantics". So they should be
+// passed by value, not by reference; they should not be "new"ed or "delete"d.
+// They are efficiently copyable, assignable and constructable, with cost
+// equivalent to copying a pointer (notice that they have only a single data
+// member). The internal representation carries a flag which indicates which of
+// the two variants is enclosed. This allows for cheaper checks when various
+// accessors of CallSite are employed.
//
//===----------------------------------------------------------------------===//
@@ -27,75 +30,49 @@
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/BasicBlock.h"
#include "llvm/CallingConv.h"
-#include "llvm/Instruction.h"
+#include "llvm/Instructions.h"
namespace llvm {
class CallInst;
class InvokeInst;
-class CallSite {
- PointerIntPair<Instruction*, 1, bool> I;
+template <typename FunTy = const Function,
+ typename ValTy = const Value,
+ typename UserTy = const User,
+ typename InstrTy = const Instruction,
+ typename CallTy = const CallInst,
+ typename InvokeTy = const InvokeInst,
+ typename IterTy = User::const_op_iterator>
+class CallSiteBase {
+protected:
+ PointerIntPair<InstrTy*, 1, bool> I;
public:
- CallSite() : I(0, false) {}
- CallSite(CallInst *CI) : I(reinterpret_cast<Instruction*>(CI), true) {}
- CallSite(InvokeInst *II) : I(reinterpret_cast<Instruction*>(II), false) {}
- CallSite(Instruction *C);
-
- bool operator==(const CallSite &CS) const { return I == CS.I; }
- bool operator!=(const CallSite &CS) const { return I != CS.I; }
+ CallSiteBase() : I(0, false) {}
+ CallSiteBase(CallTy *CI) : I(CI, true) { assert(CI); }
+ CallSiteBase(InvokeTy *II) : I(II, false) { assert(II); }
+ CallSiteBase(ValTy *II) { *this = get(II); }
+ CallSiteBase(InstrTy *II) {
+ assert(II && "Null instruction given?");
+ *this = get(II);
+ assert(I.getPointer() && "Not a call?");
+ }
- /// CallSite::get - This static method is sort of like a constructor. It will
- /// create an appropriate call site for a Call or Invoke instruction, but it
- /// can also create a null initialized CallSite object for something which is
- /// NOT a call site.
+ /// CallSiteBase::get - This static method is sort of like a constructor. It
+ /// will create an appropriate call site for a Call or Invoke instruction, but
+ /// it can also create a null initialized CallSiteBase object for something
+ /// which is NOT a call site.
///
- static CallSite get(Value *V) {
- if (Instruction *I = dyn_cast<Instruction>(V)) {
- if (I->getOpcode() == Instruction::Call)
- return CallSite(reinterpret_cast<CallInst*>(I));
- else if (I->getOpcode() == Instruction::Invoke)
- return CallSite(reinterpret_cast<InvokeInst*>(I));
+ static CallSiteBase get(ValTy *V) {
+ if (InstrTy *II = dyn_cast<InstrTy>(V)) {
+ if (II->getOpcode() == Instruction::Call)
+ return CallSiteBase(static_cast<CallTy*>(II));
+ else if (II->getOpcode() == Instruction::Invoke)
+ return CallSiteBase(static_cast<InvokeTy*>(II));
}
- return CallSite();
+ return CallSiteBase();
}
- /// getCallingConv/setCallingConv - get or set the calling convention of the
- /// call.
- CallingConv::ID getCallingConv() const;
- void setCallingConv(CallingConv::ID CC);
-
- /// getAttributes/setAttributes - get or set the parameter attributes of
- /// the call.
- const AttrListPtr &getAttributes() const;
- void setAttributes(const AttrListPtr &PAL);
-
- /// paramHasAttr - whether the call or the callee has the given attribute.
- bool paramHasAttr(uint16_t i, Attributes attr) const;
-
- /// @brief Extract the alignment for a call or parameter (0=unknown).
- uint16_t getParamAlignment(uint16_t i) const;
-
- /// @brief Determine if the call does not access memory.
- bool doesNotAccessMemory() const;
- void setDoesNotAccessMemory(bool doesNotAccessMemory = true);
-
- /// @brief Determine if the call does not access or only reads memory.
- bool onlyReadsMemory() const;
- void setOnlyReadsMemory(bool onlyReadsMemory = true);
-
- /// @brief Determine if the call cannot return.
- bool doesNotReturn() const;
- void setDoesNotReturn(bool doesNotReturn = true);
-
- /// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const;
- void setDoesNotThrow(bool doesNotThrow = true);
-
- /// getType - Return the type of the instruction that generated this call site
- ///
- const Type *getType() const { return getInstruction()->getType(); }
-
/// isCall - true if a CallInst is enclosed.
/// Note that !isCall() does not mean it is an InvokeInst enclosed,
/// it also could signify a NULL Instruction pointer.
@@ -105,94 +82,235 @@ public:
///
bool isInvoke() const { return getInstruction() && !I.getInt(); }
- /// getInstruction - Return the instruction this call site corresponds to
- ///
- Instruction *getInstruction() const { return I.getPointer(); }
-
- /// getCaller - Return the caller function for this call site
- ///
- Function *getCaller() const { return getInstruction()
- ->getParent()->getParent(); }
+ InstrTy *getInstruction() const { return I.getPointer(); }
+ InstrTy *operator->() const { return I.getPointer(); }
+ operator bool() const { return I.getPointer(); }
/// getCalledValue - Return the pointer to function that is being called...
///
- Value *getCalledValue() const {
+ ValTy *getCalledValue() const {
assert(getInstruction() && "Not a call or invoke instruction!");
- return getInstruction()->getOperand(0);
+ return *getCallee();
}
/// getCalledFunction - Return the function being called if this is a direct
/// call, otherwise return null (if it's an indirect call).
///
- Function *getCalledFunction() const {
- return dyn_cast<Function>(getCalledValue());
+ FunTy *getCalledFunction() const {
+ return dyn_cast<FunTy>(getCalledValue());
}
/// setCalledFunction - Set the callee to the specified value...
///
void setCalledFunction(Value *V) {
assert(getInstruction() && "Not a call or invoke instruction!");
- getInstruction()->setOperand(0, V);
+ *getCallee() = V;
+ }
+
+ /// isCallee - Determine whether the passed iterator points to the
+ /// callee operand's Use.
+ ///
+ bool isCallee(value_use_iterator<UserTy> UI) const {
+ return getCallee() == &UI.getUse();
}
- Value *getArgument(unsigned ArgNo) const {
+ ValTy *getArgument(unsigned ArgNo) const {
assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
- return *(arg_begin()+ArgNo);
+ return *(arg_begin() + ArgNo);
}
void setArgument(unsigned ArgNo, Value* newVal) {
assert(getInstruction() && "Not a call or invoke instruction!");
assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
- getInstruction()->setOperand(getArgumentOffset() + ArgNo, newVal);
+ getInstruction()->setOperand(ArgNo, newVal);
}
- /// Given an operand number, returns the argument that corresponds to it.
- /// OperandNo must be a valid operand number that actually corresponds to an
- /// argument.
- unsigned getArgumentNo(unsigned OperandNo) const {
- assert(OperandNo >= getArgumentOffset() && "Operand number passed was not "
- "a valid argument");
- return OperandNo - getArgumentOffset();
+ /// Given a value use iterator, returns the argument that corresponds to it.
+ /// Iterator must actually correspond to an argument.
+ unsigned getArgumentNo(value_use_iterator<UserTy> I) const {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ assert(arg_begin() <= &I.getUse() && &I.getUse() < arg_end()
+ && "Argument # out of range!");
+ return &I.getUse() - arg_begin();
}
- /// hasArgument - Returns true if this CallSite passes the given Value* as an
- /// argument to the called function.
- bool hasArgument(const Value *Arg) const;
-
/// arg_iterator - The type of iterator to use when looping over actual
/// arguments at this call site...
- typedef User::op_iterator arg_iterator;
+ typedef IterTy arg_iterator;
/// arg_begin/arg_end - Return iterators corresponding to the actual argument
/// list for a call site.
- arg_iterator arg_begin() const {
+ IterTy arg_begin() const {
assert(getInstruction() && "Not a call or invoke instruction!");
// Skip non-arguments
- return getInstruction()->op_begin() + getArgumentOffset();
+ return (*this)->op_begin();
}
- arg_iterator arg_end() const { return getInstruction()->op_end(); }
+ IterTy arg_end() const { return (*this)->op_end() - getArgumentEndOffset(); }
bool arg_empty() const { return arg_end() == arg_begin(); }
unsigned arg_size() const { return unsigned(arg_end() - arg_begin()); }
+
+ /// getType - Return the type of the instruction that generated this call site
+ ///
+ const Type *getType() const { return (*this)->getType(); }
- bool operator<(const CallSite &CS) const {
- return getInstruction() < CS.getInstruction();
+ /// getCaller - Return the caller function for this call site
+ ///
+ FunTy *getCaller() const { return (*this)->getParent()->getParent(); }
+
+#define CALLSITE_DELEGATE_GETTER(METHOD) \
+ InstrTy *II = getInstruction(); \
+ return isCall() \
+ ? cast<CallInst>(II)->METHOD \
+ : cast<InvokeInst>(II)->METHOD
+
+#define CALLSITE_DELEGATE_SETTER(METHOD) \
+ InstrTy *II = getInstruction(); \
+ if (isCall()) \
+ cast<CallInst>(II)->METHOD; \
+ else \
+ cast<InvokeInst>(II)->METHOD
+
+ /// getCallingConv/setCallingConv - get or set the calling convention of the
+ /// call.
+ CallingConv::ID getCallingConv() const {
+ CALLSITE_DELEGATE_GETTER(getCallingConv());
+ }
+ void setCallingConv(CallingConv::ID CC) {
+ CALLSITE_DELEGATE_SETTER(setCallingConv(CC));
}
- bool isCallee(Value::use_iterator UI) const {
- return getInstruction()->op_begin() == &UI.getUse();
+ /// getAttributes/setAttributes - get or set the parameter attributes of
+ /// the call.
+ const AttrListPtr &getAttributes() const {
+ CALLSITE_DELEGATE_GETTER(getAttributes());
+ }
+ void setAttributes(const AttrListPtr &PAL) {
+ CALLSITE_DELEGATE_SETTER(setAttributes(PAL));
+ }
+
+ /// paramHasAttr - whether the call or the callee has the given attribute.
+ bool paramHasAttr(uint16_t i, Attributes attr) const {
+ CALLSITE_DELEGATE_GETTER(paramHasAttr(i, attr));
+ }
+
+ /// @brief Extract the alignment for a call or parameter (0=unknown).
+ uint16_t getParamAlignment(uint16_t i) const {
+ CALLSITE_DELEGATE_GETTER(getParamAlignment(i));
+ }
+
+ /// @brief Return true if the call should not be inlined.
+ bool isNoInline() const {
+ CALLSITE_DELEGATE_GETTER(isNoInline());
+ }
+ void setIsNoInline(bool Value = true) {
+ CALLSITE_DELEGATE_SETTER(setIsNoInline(Value));
+ }
+
+ /// @brief Determine if the call does not access memory.
+ bool doesNotAccessMemory() const {
+ CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
+ }
+ void setDoesNotAccessMemory(bool doesNotAccessMemory = true) {
+ CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory(doesNotAccessMemory));
+ }
+
+ /// @brief Determine if the call does not access or only reads memory.
+ bool onlyReadsMemory() const {
+ CALLSITE_DELEGATE_GETTER(onlyReadsMemory());
+ }
+ void setOnlyReadsMemory(bool onlyReadsMemory = true) {
+ CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory(onlyReadsMemory));
+ }
+
+ /// @brief Determine if the call cannot return.
+ bool doesNotReturn() const {
+ CALLSITE_DELEGATE_GETTER(doesNotReturn());
+ }
+ void setDoesNotReturn(bool doesNotReturn = true) {
+ CALLSITE_DELEGATE_SETTER(setDoesNotReturn(doesNotReturn));
+ }
+
+ /// @brief Determine if the call cannot unwind.
+ bool doesNotThrow() const {
+ CALLSITE_DELEGATE_GETTER(doesNotThrow());
+ }
+ void setDoesNotThrow(bool doesNotThrow = true) {
+ CALLSITE_DELEGATE_SETTER(setDoesNotThrow(doesNotThrow));
+ }
+
+#undef CALLSITE_DELEGATE_GETTER
+#undef CALLSITE_DELEGATE_SETTER
+
+ /// hasArgument - Returns true if this CallSite passes the given Value* as an
+ /// argument to the called function.
+ bool hasArgument(const Value *Arg) const {
+ for (arg_iterator AI = this->arg_begin(), E = this->arg_end(); AI != E;
+ ++AI)
+ if (AI->get() == Arg)
+ return true;
+ return false;
}
private:
- /// Returns the operand number of the first argument
- unsigned getArgumentOffset() const {
+ unsigned getArgumentEndOffset() const {
if (isCall())
- return 1; // Skip Function
+ return 1; // Skip Callee
else
- return 3; // Skip Function, BB, BB
+ return 3; // Skip BB, BB, Callee
+ }
+
+ IterTy getCallee() const {
+ if (isCall()) // Skip Callee
+ return cast<CallInst>(getInstruction())->op_end() - 1;
+ else // Skip BB, BB, Callee
+ return cast<InvokeInst>(getInstruction())->op_end() - 3;
}
};
+class CallSite : public CallSiteBase<Function, Value, User, Instruction,
+ CallInst, InvokeInst, User::op_iterator> {
+ typedef CallSiteBase<Function, Value, User, Instruction,
+ CallInst, InvokeInst, User::op_iterator> Base;
+public:
+ CallSite() {}
+ CallSite(Base B) : Base(B) {}
+ CallSite(Value* V) : Base(V) {}
+ CallSite(CallInst *CI) : Base(CI) {}
+ CallSite(InvokeInst *II) : Base(II) {}
+ CallSite(Instruction *II) : Base(II) {}
+
+ bool operator==(const CallSite &CS) const { return I == CS.I; }
+ bool operator!=(const CallSite &CS) const { return I != CS.I; }
+
+ /// CallSite::get - This static method is sort of like a constructor. It will
+ /// create an appropriate call site for a Call or Invoke instruction, but it
+ /// can also create a null initialized CallSite object for something which is
+ /// NOT a call site.
+ ///
+ static CallSite get(Value *V) {
+ return Base::get(V);
+ }
+
+ bool operator<(const CallSite &CS) const {
+ return getInstruction() < CS.getInstruction();
+ }
+
+private:
+ User::op_iterator getCallee() const;
+};
+
+/// ImmutableCallSite - establish a view to a call site for examination
+class ImmutableCallSite : public CallSiteBase<> {
+ typedef CallSiteBase<> Base;
+public:
+ ImmutableCallSite(const Value* V) : Base(V) {}
+ ImmutableCallSite(const CallInst *CI) : Base(CI) {}
+ ImmutableCallSite(const InvokeInst *II) : Base(II) {}
+ ImmutableCallSite(const Instruction *II) : Base(II) {}
+ ImmutableCallSite(CallSite CS) : Base(CS.getInstruction()) {}
+};
+
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Support/Casting.h b/libclamav/c++/llvm/include/llvm/Support/Casting.h
index 17bcb59..c589171 100644
--- a/libclamav/c++/llvm/include/llvm/Support/Casting.h
+++ b/libclamav/c++/llvm/include/llvm/Support/Casting.h
@@ -50,9 +50,11 @@ template<typename From> struct simplify_type<const From> {
// if (isa<Type*>(myVal)) { ... }
//
template <typename To, typename From>
-inline bool isa_impl(const From &Val) {
- return To::classof(&Val);
-}
+struct isa_impl {
+ static inline bool doit(const From &Val) {
+ return To::classof(&Val);
+ }
+};
template<typename To, typename From, typename SimpleType>
struct isa_impl_wrap {
@@ -68,7 +70,7 @@ template<typename To, typename FromTy>
struct isa_impl_wrap<To, const FromTy, const FromTy> {
// When From == SimpleType, we are as simple as we are going to get.
static bool doit(const FromTy &Val) {
- return isa_impl<To,FromTy>(Val);
+ return isa_impl<To,FromTy>::doit(Val);
}
};
@@ -234,71 +236,6 @@ inline typename cast_retty<X, Y>::ret_type dyn_cast_or_null(const Y &Val) {
return (Val && isa<X>(Val)) ? cast<X, Y>(Val) : 0;
}
-
-#ifdef DEBUG_CAST_OPERATORS
-#include "llvm/Support/raw_ostream.h"
-
-struct bar {
- bar() {}
-private:
- bar(const bar &);
-};
-struct foo {
- void ext() const;
- /* static bool classof(const bar *X) {
- cerr << "Classof: " << X << "\n";
- return true;
- }*/
-};
-
-template <> inline bool isa_impl<foo,bar>(const bar &Val) {
- dbgs() << "Classof: " << &Val << "\n";
- return true;
-}
-
-
-bar *fub();
-void test(bar &B1, const bar *B2) {
- // test various configurations of const
- const bar &B3 = B1;
- const bar *const B4 = B2;
-
- // test isa
- if (!isa<foo>(B1)) return;
- if (!isa<foo>(B2)) return;
- if (!isa<foo>(B3)) return;
- if (!isa<foo>(B4)) return;
-
- // test cast
- foo &F1 = cast<foo>(B1);
- const foo *F3 = cast<foo>(B2);
- const foo *F4 = cast<foo>(B2);
- const foo &F8 = cast<foo>(B3);
- const foo *F9 = cast<foo>(B4);
- foo *F10 = cast<foo>(fub());
-
- // test cast_or_null
- const foo *F11 = cast_or_null<foo>(B2);
- const foo *F12 = cast_or_null<foo>(B2);
- const foo *F13 = cast_or_null<foo>(B4);
- const foo *F14 = cast_or_null<foo>(fub()); // Shouldn't print.
-
- // These lines are errors...
- //foo *F20 = cast<foo>(B2); // Yields const foo*
- //foo &F21 = cast<foo>(B3); // Yields const foo&
- //foo *F22 = cast<foo>(B4); // Yields const foo*
- //foo &F23 = cast_or_null<foo>(B1);
- //const foo &F24 = cast_or_null<foo>(B3);
-}
-
-bar *fub() { return 0; }
-void main() {
- bar B;
- test(B, &B);
-}
-
-#endif
-
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Support/CommandLine.h b/libclamav/c++/llvm/include/llvm/Support/CommandLine.h
index 61c3256..9ae3d6a 100644
--- a/libclamav/c++/llvm/include/llvm/Support/CommandLine.h
+++ b/libclamav/c++/llvm/include/llvm/Support/CommandLine.h
@@ -31,7 +31,7 @@
#include <vector>
namespace llvm {
-
+
/// cl Namespace - This namespace contains all of the command line option
/// processing machinery. It is intentionally a short name to make qualified
/// usage concise.
@@ -443,16 +443,23 @@ protected:
template <class DataType>
class parser : public generic_parser_base {
protected:
- SmallVector<std::pair<const char *,
- std::pair<DataType, const char *> >, 8> Values;
+ class OptionInfo {
+ public:
+ OptionInfo(const char *name, DataType v, const char *helpStr) :
+ Name(name), V(v), HelpStr(helpStr) {}
+ const char *Name;
+ DataType V;
+ const char *HelpStr;
+ };
+ SmallVector<OptionInfo, 8> Values;
public:
typedef DataType parser_data_type;
// Implement virtual functions needed by generic_parser_base
unsigned getNumOptions() const { return unsigned(Values.size()); }
- const char *getOption(unsigned N) const { return Values[N].first; }
+ const char *getOption(unsigned N) const { return Values[N].Name; }
const char *getDescription(unsigned N) const {
- return Values[N].second.second;
+ return Values[N].HelpStr;
}
// parse - Return true on error.
@@ -465,8 +472,8 @@ public:
for (unsigned i = 0, e = static_cast<unsigned>(Values.size());
i != e; ++i)
- if (Values[i].first == ArgVal) {
- V = Values[i].second.first;
+ if (Values[i].Name == ArgVal) {
+ V = Values[i].V;
return false;
}
@@ -478,8 +485,8 @@ public:
template <class DT>
void addLiteralOption(const char *Name, const DT &V, const char *HelpStr) {
assert(findOption(Name) == Values.size() && "Option already exists!");
- Values.push_back(std::make_pair(Name,
- std::make_pair(static_cast<DataType>(V),HelpStr)));
+ OptionInfo X(Name, static_cast<DataType>(V), HelpStr);
+ Values.push_back(X);
MarkOptionsChanged();
}
@@ -781,7 +788,7 @@ public:
DataType &getValue() { check(); return *Location; }
const DataType &getValue() const { check(); return *Location; }
-
+
operator DataType() const { return this->getValue(); }
};
diff --git a/libclamav/c++/llvm/include/llvm/Support/Compiler.h b/libclamav/c++/llvm/include/llvm/Support/Compiler.h
index 881a0fe..14b36f8 100644
--- a/libclamav/c++/llvm/include/llvm/Support/Compiler.h
+++ b/libclamav/c++/llvm/include/llvm/Support/Compiler.h
@@ -15,12 +15,24 @@
#ifndef LLVM_SUPPORT_COMPILER_H
#define LLVM_SUPPORT_COMPILER_H
-// The VISIBILITY_HIDDEN macro, used for marking classes with the GCC-specific
-// visibility("hidden") attribute.
-#if (__GNUC__ >= 4) && !defined(__MINGW32__) && !defined(__CYGWIN__)
-#define VISIBILITY_HIDDEN __attribute__ ((visibility("hidden")))
+/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
+/// into a shared library, then the class should be private to the library and
+/// not accessible from outside it. Can also be used to mark variables and
+/// functions, making them private to any shared library they are linked into.
+
+/// LLVM_GLOBAL_VISIBILITY - If a class marked with this attribute is linked
+/// into a shared library, then the class will be accessible from outside the
+/// the library. Can also be used to mark variables and functions, making them
+/// accessible from outside any shared library they are linked into.
+#if defined(__MINGW32__) || defined(__CYGWIN__)
+#define LLVM_LIBRARY_VISIBILITY
+#define LLVM_GLOBAL_VISIBILITY __declspec(dllexport)
+#elif (__GNUC__ >= 4)
+#define LLVM_LIBRARY_VISIBILITY __attribute__ ((visibility("hidden")))
+#define LLVM_GLOBAL_VISIBILITY __attribute__ ((visibility("default")))
#else
-#define VISIBILITY_HIDDEN
+#define LLVM_LIBRARY_VISIBILITY
+#define LLVM_GLOBAL_VISIBILITY
#endif
#if (__GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
diff --git a/libclamav/c++/llvm/include/llvm/Support/ConstantRange.h b/libclamav/c++/llvm/include/llvm/Support/ConstantRange.h
index 6342c6f..29086b2 100644
--- a/libclamav/c++/llvm/include/llvm/Support/ConstantRange.h
+++ b/libclamav/c++/llvm/include/llvm/Support/ConstantRange.h
@@ -41,8 +41,6 @@ namespace llvm {
///
class ConstantRange {
APInt Lower, Upper;
- static ConstantRange intersect1Wrapped(const ConstantRange &LHS,
- const ConstantRange &RHS);
public:
/// Initialize a full (the default) or empty set for the specified bit width.
@@ -196,39 +194,45 @@ public:
ConstantRange sextOrTrunc(uint32_t BitWidth) const;
/// add - Return a new range representing the possible values resulting
- /// from an addition of a value in this range and a value in Other.
+ /// from an addition of a value in this range and a value in \p Other.
ConstantRange add(const ConstantRange &Other) const;
+ /// sub - Return a new range representing the possible values resulting
+ /// from a subtraction of a value in this range and a value in \p Other.
+ ConstantRange sub(const ConstantRange &Other) const;
+
/// multiply - Return a new range representing the possible values resulting
- /// from a multiplication of a value in this range and a value in Other.
+ /// from a multiplication of a value in this range and a value in \p Other.
/// TODO: This isn't fully implemented yet.
ConstantRange multiply(const ConstantRange &Other) const;
/// smax - Return a new range representing the possible values resulting
- /// from a signed maximum of a value in this range and a value in Other.
+ /// from a signed maximum of a value in this range and a value in \p Other.
ConstantRange smax(const ConstantRange &Other) const;
/// umax - Return a new range representing the possible values resulting
- /// from an unsigned maximum of a value in this range and a value in Other.
+ /// from an unsigned maximum of a value in this range and a value in \p Other.
ConstantRange umax(const ConstantRange &Other) const;
/// udiv - Return a new range representing the possible values resulting
- /// from an unsigned division of a value in this range and a value in Other.
- /// TODO: This isn't fully implemented yet.
+ /// from an unsigned division of a value in this range and a value in
+ /// \p Other.
ConstantRange udiv(const ConstantRange &Other) const;
/// shl - Return a new range representing the possible values resulting
- /// from a left shift of a value in this range by the Amount value.
- ConstantRange shl(const ConstantRange &Amount) const;
-
- /// ashr - Return a new range representing the possible values resulting from
- /// an arithmetic right shift of a value in this range by the Amount value.
- ConstantRange ashr(const ConstantRange &Amount) const;
+ /// from a left shift of a value in this range by a value in \p Other.
+ /// TODO: This isn't fully implemented yet.
+ ConstantRange shl(const ConstantRange &Other) const;
- /// shr - Return a new range representing the possible values resulting
- /// from a logical right shift of a value in this range by the Amount value.
- ConstantRange lshr(const ConstantRange &Amount) const;
+ /// lshr - Return a new range representing the possible values resulting
+ /// from a logical right shift of a value in this range and a value in
+ /// \p Other.
+ ConstantRange lshr(const ConstantRange &Other) const;
+ /// inverse - Return a new range that is the logical not of the current set.
+ ///
+ ConstantRange inverse() const;
+
/// print - Print out the bounds to a stream...
///
void print(raw_ostream &OS) const;
diff --git a/libclamav/c++/llvm/include/llvm/Support/CrashRecoveryContext.h b/libclamav/c++/llvm/include/llvm/Support/CrashRecoveryContext.h
new file mode 100644
index 0000000..d66609f
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Support/CrashRecoveryContext.h
@@ -0,0 +1,84 @@
+//===--- CrashRecoveryContext.h - Crash Recovery ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
+#define LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
+
+#include <string>
+
+namespace llvm {
+class StringRef;
+
+/// \brief Crash recovery helper object.
+///
+/// This class implements support for running operations in a safe context so
+/// that crashes (memory errors, stack overflow, assertion violations) can be
+/// detected and control restored to the crashing thread. Crash detection is
+/// purely "best effort", the exact set of failures which can be recovered from
+/// is platform dependent.
+///
+/// Clients make use of this code by first calling
+/// CrashRecoveryContext::Enable(), and then executing unsafe operations via a
+/// CrashRecoveryContext object. For example:
+///
+/// void actual_work(void *);
+///
+/// void foo() {
+/// CrashRecoveryContext CRC;
+///
+/// if (!CRC.RunSafely(actual_work, 0)) {
+/// ... a crash was detected, report error to user ...
+/// }
+///
+/// ... no crash was detected ...
+/// }
+///
+/// Crash recovery contexts may not be nested.
+class CrashRecoveryContext {
+ void *Impl;
+
+public:
+ CrashRecoveryContext() : Impl(0) {}
+ ~CrashRecoveryContext();
+
+ /// \brief Enable crash recovery.
+ static void Enable();
+
+ /// \brief Disable crash recovery.
+ static void Disable();
+
+ /// \brief Return the active context, if the code is currently executing in a
+ /// thread which is in a protected context.
+ static CrashRecoveryContext *GetCurrent();
+
+ /// \brief Execute the provide callback function (with the given arguments) in
+ /// a protected context.
+ ///
+ /// \return True if the function completed successfully, and false if the
+ /// function crashed (or HandleCrash was called explicitly). Clients should
+ /// make as little assumptions as possible about the program state when
+ /// RunSafely has returned false. Clients can use getBacktrace() to retrieve
+ /// the backtrace of the crash on failures.
+ bool RunSafely(void (*Fn)(void*), void *UserData);
+
+ /// \brief Explicitly trigger a crash recovery in the current process, and
+ /// return failure from RunSafely(). This function does not return.
+ void HandleCrash();
+
+ /// \brief Return a string containing the backtrace where the crash was
+ /// detected; or empty if the backtrace wasn't recovered.
+ ///
+ /// This function is only valid when a crash has been detected (i.e.,
+ /// RunSafely() has returned false.
+ const std::string &getBacktrace() const;
+};
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Support/DOTGraphTraits.h b/libclamav/c++/llvm/include/llvm/Support/DOTGraphTraits.h
index 54ced15..796c74a 100644
--- a/libclamav/c++/llvm/include/llvm/Support/DOTGraphTraits.h
+++ b/libclamav/c++/llvm/include/llvm/Support/DOTGraphTraits.h
@@ -36,7 +36,7 @@ protected:
}
public:
- DefaultDOTGraphTraits (bool simple=false) : IsSimple (simple) {}
+ explicit DefaultDOTGraphTraits(bool simple=false) : IsSimple (simple) {}
/// getGraphName - Return the label for the graph as a whole. Printed at the
/// top of the graph.
@@ -59,6 +59,12 @@ public:
return false;
}
+ /// isNodeHidden - If the function returns true, the given node is not
+ /// displayed in the graph.
+ static bool isNodeHidden(const void *Node) {
+ return false;
+ }
+
/// getNodeLabel - Given a node and a pointer to the top level graph, return
/// the label to print in the node.
template<typename GraphType>
diff --git a/libclamav/c++/llvm/include/llvm/Support/DataFlow.h b/libclamav/c++/llvm/include/llvm/Support/DataFlow.h
index 8f79ead..355c402 100644
--- a/libclamav/c++/llvm/include/llvm/Support/DataFlow.h
+++ b/libclamav/c++/llvm/include/llvm/Support/DataFlow.h
@@ -25,7 +25,7 @@ namespace llvm {
template <> struct GraphTraits<const Value*> {
typedef const Value NodeType;
- typedef Value::use_const_iterator ChildIteratorType;
+ typedef Value::const_use_iterator ChildIteratorType;
static NodeType *getEntryNode(const Value *G) {
return G;
diff --git a/libclamav/c++/llvm/include/llvm/Support/DebugLoc.h b/libclamav/c++/llvm/include/llvm/Support/DebugLoc.h
index 32631fc..ccc3446 100644
--- a/libclamav/c++/llvm/include/llvm/Support/DebugLoc.h
+++ b/libclamav/c++/llvm/include/llvm/Support/DebugLoc.h
@@ -1,4 +1,4 @@
-//===---- llvm/DebugLoc.h - Debug Location Information ----------*- C++ -*-===//
+//===---- llvm/Support/DebugLoc.h - Debug Location Information --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,50 +12,69 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGLOC_H
-#define LLVM_DEBUGLOC_H
-
-#include "llvm/ADT/DenseMap.h"
-#include <vector>
+#ifndef LLVM_SUPPORT_DEBUGLOC_H
+#define LLVM_SUPPORT_DEBUGLOC_H
namespace llvm {
class MDNode;
-
- /// DebugLoc - Debug location id. This is carried by SDNode and MachineInstr
- /// to index into a vector of unique debug location tuples.
+ class LLVMContext;
+
+ /// DebugLoc - Debug location id. This is carried by Instruction, SDNode,
+ /// and MachineInstr to compactly encode file/line/scope information for an
+ /// operation.
class DebugLoc {
- unsigned Idx;
-
+ /// LineCol - This 32-bit value encodes the line and column number for the
+ /// location, encoded as 24-bits for line and 8 bits for col. A value of 0
+ /// for either means unknown.
+ unsigned LineCol;
+
+ /// ScopeIdx - This is an opaque ID# for Scope/InlinedAt information,
+ /// decoded by LLVMContext. 0 is unknown.
+ int ScopeIdx;
public:
- DebugLoc() : Idx(~0U) {} // Defaults to invalid.
-
- static DebugLoc getUnknownLoc() { DebugLoc L; L.Idx = ~0U; return L; }
- static DebugLoc get(unsigned idx) { DebugLoc L; L.Idx = idx; return L; }
-
- unsigned getIndex() const { return Idx; }
-
- /// isUnknown - Return true if there is no debug info for the SDNode /
- /// MachineInstr.
- bool isUnknown() const { return Idx == ~0U; }
-
- bool operator==(const DebugLoc &DL) const { return Idx == DL.Idx; }
+ DebugLoc() : LineCol(0), ScopeIdx(0) {} // Defaults to unknown.
+
+ /// get - Get a new DebugLoc that corresponds to the specified line/col
+ /// scope/inline location.
+ static DebugLoc get(unsigned Line, unsigned Col,
+ MDNode *Scope, MDNode *InlinedAt = 0);
+
+ /// getFromDILocation - Translate the DILocation quad into a DebugLoc.
+ static DebugLoc getFromDILocation(MDNode *N);
+
+ /// isUnknown - Return true if this is an unknown location.
+ bool isUnknown() const { return ScopeIdx == 0; }
+
+ unsigned getLine() const {
+ return (LineCol << 8) >> 8; // Mask out column.
+ }
+
+ unsigned getCol() const {
+ return LineCol >> 24;
+ }
+
+ /// getScope - This returns the scope pointer for this DebugLoc, or null if
+ /// invalid.
+ MDNode *getScope(const LLVMContext &Ctx) const;
+
+ /// getInlinedAt - This returns the InlinedAt pointer for this DebugLoc, or
+ /// null if invalid or not present.
+ MDNode *getInlinedAt(const LLVMContext &Ctx) const;
+
+ /// getScopeAndInlinedAt - Return both the Scope and the InlinedAt values.
+ void getScopeAndInlinedAt(MDNode *&Scope, MDNode *&IA,
+ const LLVMContext &Ctx) const;
+
+
+ /// getAsMDNode - This method converts the compressed DebugLoc node into a
+ /// DILocation compatible MDNode.
+ MDNode *getAsMDNode(const LLVMContext &Ctx) const;
+
+ bool operator==(const DebugLoc &DL) const {
+ return LineCol == DL.LineCol && ScopeIdx == DL.ScopeIdx;
+ }
bool operator!=(const DebugLoc &DL) const { return !(*this == DL); }
};
-
- /// DebugLocTracker - This class tracks debug location information.
- ///
- struct DebugLocTracker {
- /// DebugLocations - A vector of unique DebugLocTuple's.
- ///
- std::vector<MDNode *> DebugLocations;
-
- /// DebugIdMap - This maps DebugLocTuple's to indices into the
- /// DebugLocations vector.
- DenseMap<MDNode *, unsigned> DebugIdMap;
-
- DebugLocTracker() {}
- };
-
} // end namespace llvm
#endif /* LLVM_DEBUGLOC_H */
diff --git a/libclamav/c++/llvm/include/llvm/Support/Dwarf.h b/libclamav/c++/llvm/include/llvm/Support/Dwarf.h
index 5f591d4..3ca8d96 100644
--- a/libclamav/c++/llvm/include/llvm/Support/Dwarf.h
+++ b/libclamav/c++/llvm/include/llvm/Support/Dwarf.h
@@ -22,7 +22,8 @@ namespace llvm {
// Debug info constants.
enum {
- LLVMDebugVersion = (7 << 16), // Current version of debug information.
+ LLVMDebugVersion = (8 << 16), // Current version of debug information.
+ LLVMDebugVersion7 = (7 << 16), // Constant for version 7.
LLVMDebugVersion6 = (6 << 16), // Constant for version 6.
LLVMDebugVersion5 = (5 << 16), // Constant for version 5.
LLVMDebugVersion4 = (4 << 16), // Constant for version 4.
@@ -229,6 +230,7 @@ enum dwarf_constants {
DW_AT_APPLE_block = 0x3fe4,
DW_AT_APPLE_major_runtime_vers = 0x3fe5,
DW_AT_APPLE_runtime_class = 0x3fe6,
+ DW_AT_APPLE_omit_frame_ptr = 0x3fe7,
// Attribute form encodings
DW_FORM_addr = 0x01,
@@ -298,12 +300,99 @@ enum dwarf_constants {
DW_OP_ne = 0x2e,
DW_OP_lit0 = 0x30,
DW_OP_lit1 = 0x31,
+ DW_OP_lit2 = 0x32,
+ DW_OP_lit3 = 0x33,
+ DW_OP_lit4 = 0x34,
+ DW_OP_lit5 = 0x35,
+ DW_OP_lit6 = 0x36,
+ DW_OP_lit7 = 0x37,
+ DW_OP_lit8 = 0x38,
+ DW_OP_lit9 = 0x39,
+ DW_OP_lit10 = 0x3a,
+ DW_OP_lit11 = 0x3b,
+ DW_OP_lit12 = 0x3c,
+ DW_OP_lit13 = 0x3d,
+ DW_OP_lit14 = 0x3e,
+ DW_OP_lit15 = 0x3f,
+ DW_OP_lit16 = 0x40,
+ DW_OP_lit17 = 0x41,
+ DW_OP_lit18 = 0x42,
+ DW_OP_lit19 = 0x43,
+ DW_OP_lit20 = 0x44,
+ DW_OP_lit21 = 0x45,
+ DW_OP_lit22 = 0x46,
+ DW_OP_lit23 = 0x47,
+ DW_OP_lit24 = 0x48,
+ DW_OP_lit25 = 0x49,
+ DW_OP_lit26 = 0x4a,
+ DW_OP_lit27 = 0x4b,
+ DW_OP_lit28 = 0x4c,
+ DW_OP_lit29 = 0x4d,
+ DW_OP_lit30 = 0x4e,
DW_OP_lit31 = 0x4f,
DW_OP_reg0 = 0x50,
DW_OP_reg1 = 0x51,
+ DW_OP_reg2 = 0x52,
+ DW_OP_reg3 = 0x53,
+ DW_OP_reg4 = 0x54,
+ DW_OP_reg5 = 0x55,
+ DW_OP_reg6 = 0x56,
+ DW_OP_reg7 = 0x57,
+ DW_OP_reg8 = 0x58,
+ DW_OP_reg9 = 0x59,
+ DW_OP_reg10 = 0x5a,
+ DW_OP_reg11 = 0x5b,
+ DW_OP_reg12 = 0x5c,
+ DW_OP_reg13 = 0x5d,
+ DW_OP_reg14 = 0x5e,
+ DW_OP_reg15 = 0x5f,
+ DW_OP_reg16 = 0x60,
+ DW_OP_reg17 = 0x61,
+ DW_OP_reg18 = 0x62,
+ DW_OP_reg19 = 0x63,
+ DW_OP_reg20 = 0x64,
+ DW_OP_reg21 = 0x65,
+ DW_OP_reg22 = 0x66,
+ DW_OP_reg23 = 0x67,
+ DW_OP_reg24 = 0x68,
+ DW_OP_reg25 = 0x69,
+ DW_OP_reg26 = 0x6a,
+ DW_OP_reg27 = 0x6b,
+ DW_OP_reg28 = 0x6c,
+ DW_OP_reg29 = 0x6d,
+ DW_OP_reg30 = 0x6e,
DW_OP_reg31 = 0x6f,
DW_OP_breg0 = 0x70,
DW_OP_breg1 = 0x71,
+ DW_OP_breg2 = 0x72,
+ DW_OP_breg3 = 0x73,
+ DW_OP_breg4 = 0x74,
+ DW_OP_breg5 = 0x75,
+ DW_OP_breg6 = 0x76,
+ DW_OP_breg7 = 0x77,
+ DW_OP_breg8 = 0x78,
+ DW_OP_breg9 = 0x79,
+ DW_OP_breg10 = 0x7a,
+ DW_OP_breg11 = 0x7b,
+ DW_OP_breg12 = 0x7c,
+ DW_OP_breg13 = 0x7d,
+ DW_OP_breg14 = 0x7e,
+ DW_OP_breg15 = 0x7f,
+ DW_OP_breg16 = 0x80,
+ DW_OP_breg17 = 0x81,
+ DW_OP_breg18 = 0x82,
+ DW_OP_breg19 = 0x83,
+ DW_OP_breg20 = 0x84,
+ DW_OP_breg21 = 0x85,
+ DW_OP_breg22 = 0x86,
+ DW_OP_breg23 = 0x87,
+ DW_OP_breg24 = 0x88,
+ DW_OP_breg25 = 0x89,
+ DW_OP_breg26 = 0x8a,
+ DW_OP_breg27 = 0x8b,
+ DW_OP_breg28 = 0x8c,
+ DW_OP_breg29 = 0x8d,
+ DW_OP_breg30 = 0x8e,
DW_OP_breg31 = 0x8f,
DW_OP_regx = 0x90,
DW_OP_fbreg = 0x91,
diff --git a/libclamav/c++/llvm/include/llvm/Support/ELF.h b/libclamav/c++/llvm/include/llvm/Support/ELF.h
index e747c7a..83478b7 100644
--- a/libclamav/c++/llvm/include/llvm/Support/ELF.h
+++ b/libclamav/c++/llvm/include/llvm/Support/ELF.h
@@ -10,11 +10,10 @@
// This header contains common, non-processor-specific data structures and
// constants for the ELF file format.
//
-// The details of the ELF32 bits in this file are largely based on
-// the Tool Interface Standard (TIS) Executable and Linking Format
-// (ELF) Specification Version 1.2, May 1995. The ELF64 stuff is not
-// standardized, as far as I can tell. It was largely based on information
-// I found in OpenBSD header files.
+// The details of the ELF32 bits in this file are largely based on the Tool
+// Interface Standard (TIS) Executable and Linking Format (ELF) Specification
+// Version 1.2, May 1995. The ELF64 stuff is based on ELF-64 Object File Format
+// Version 1.5, Draft 2, May 1998 as well as OpenBSD header files.
//
//===----------------------------------------------------------------------===//
@@ -47,8 +46,23 @@ typedef uint16_t Elf64_Quarter;
// Object file magic string.
static const char ElfMagic[] = { 0x7f, 'E', 'L', 'F', '\0' };
+// e_ident size and indices.
+enum {
+ EI_MAG0 = 0, // File identification index.
+ EI_MAG1 = 1, // File identification index.
+ EI_MAG2 = 2, // File identification index.
+ EI_MAG3 = 3, // File identification index.
+ EI_CLASS = 4, // File class.
+ EI_DATA = 5, // Data encoding.
+ EI_VERSION = 6, // File version.
+ EI_OSABI = 7, // OS/ABI identification.
+ EI_ABIVERSION = 8, // ABI version.
+ EI_PAD = 9, // Start of padding bytes.
+ EI_NIDENT = 16 // Number of bytes in e_ident.
+};
+
struct Elf32_Ehdr {
- unsigned char e_ident[16]; // ELF Identification bytes
+ unsigned char e_ident[EI_NIDENT]; // ELF Identification bytes
Elf32_Half e_type; // Type of file (see ET_* below)
Elf32_Half e_machine; // Required architecture for this file (see EM_*)
Elf32_Word e_version; // Must be equal to 1
@@ -62,17 +76,17 @@ struct Elf32_Ehdr {
Elf32_Half e_shentsize; // Size of an entry in the section header table
Elf32_Half e_shnum; // Number of entries in the section header table
Elf32_Half e_shstrndx; // Sect hdr table index of sect name string table
- bool checkMagic () const {
- return (memcmp (e_ident, ElfMagic, strlen (ElfMagic))) == 0;
+ bool checkMagic() const {
+ return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
}
- unsigned char getFileClass () const { return e_ident[4]; }
- unsigned char getDataEncoding () { return e_ident[5]; }
+ unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
// 64-bit ELF header. Fields are the same as for ELF32, but with different
// types (see above).
struct Elf64_Ehdr {
- unsigned char e_ident[16];
+ unsigned char e_ident[EI_NIDENT];
Elf64_Quarter e_type;
Elf64_Quarter e_machine;
Elf64_Half e_version;
@@ -86,6 +100,11 @@ struct Elf64_Ehdr {
Elf64_Quarter e_shentsize;
Elf64_Quarter e_shnum;
Elf64_Quarter e_shstrndx;
+ bool checkMagic() const {
+ return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
// File types
@@ -99,6 +118,12 @@ enum {
ET_HIPROC = 0xffff // Processor-specific
};
+// Versioning
+enum {
+ EV_NONE = 0,
+ EV_CURRENT = 1
+};
+
// Machine architectures
enum {
EM_NONE = 0, // No machine
@@ -111,6 +136,7 @@ enum {
EM_860 = 7, // Intel 80860
EM_MIPS = 8, // MIPS R3000
EM_PPC = 20, // PowerPC
+ EM_PPC64 = 21, // PowerPC64
EM_ARM = 40, // ARM
EM_ALPHA = 41, // DEC Alpha
EM_SPARCV9 = 43, // SPARC V9
@@ -125,10 +151,92 @@ enum {
// Object file byte orderings.
enum {
+ ELFDATANONE = 0, // Invalid data encoding.
ELFDATA2LSB = 1, // Little-endian object file
ELFDATA2MSB = 2 // Big-endian object file
};
+// OS ABI identification.
+enum {
+ ELFOSABI_NONE = 0, // UNIX System V ABI
+ ELFOSABI_HPUX = 1, // HP-UX operating system
+ ELFOSABI_NETBSD = 2, // NetBSD
+ ELFOSABI_LINUX = 3, // GNU/Linux
+ ELFOSABI_HURD = 4, // GNU/Hurd
+ ELFOSABI_SOLARIS = 6, // Solaris
+ ELFOSABI_AIX = 7, // AIX
+ ELFOSABI_IRIX = 8, // IRIX
+ ELFOSABI_FREEBSD = 9, // FreeBSD
+ ELFOSABI_TRU64 = 10, // TRU64 UNIX
+ ELFOSABI_MODESTO = 11, // Novell Modesto
+ ELFOSABI_OPENBSD = 12, // OpenBSD
+ ELFOSABI_OPENVMS = 13, // OpenVMS
+ ELFOSABI_NSK = 14, // Hewlett-Packard Non-Stop Kernel
+ ELFOSABI_AROS = 15, // AROS
+ ELFOSABI_FENIXOS = 16, // FenixOS
+ ELFOSABI_C6000_ELFABI = 64, // Bare-metal TMS320C6000
+ ELFOSABI_C6000_LINUX = 65, // Linux TMS320C6000
+ ELFOSABI_ARM = 97, // ARM
+ ELFOSABI_STANDALONE = 255 // Standalone (embedded) application
+};
+
+// X86_64 relocations.
+enum {
+ R_X86_64_NONE = 0,
+ R_X86_64_64 = 1,
+ R_X86_64_PC32 = 2,
+ R_X86_64_GOT32 = 3,
+ R_X86_64_PLT32 = 4,
+ R_X86_64_COPY = 5,
+ R_X86_64_GLOB_DAT = 6,
+ R_X86_64_JUMP_SLOT = 7,
+ R_X86_64_RELATIVE = 8,
+ R_X86_64_GOTPCREL = 9,
+ R_X86_64_32 = 10,
+ R_X86_64_32S = 11,
+ R_X86_64_16 = 12,
+ R_X86_64_PC16 = 13,
+ R_X86_64_8 = 14,
+ R_X86_64_PC8 = 15,
+ R_X86_64_DTPMOD64 = 16,
+ R_X86_64_DTPOFF64 = 17,
+ R_X86_64_TPOFF64 = 18,
+ R_X86_64_TLSGD = 19,
+ R_X86_64_TLSLD = 20,
+ R_X86_64_DTPOFF32 = 21,
+ R_X86_64_GOTTPOFF = 22,
+ R_X86_64_TPOFF32 = 23,
+ R_X86_64_PC64 = 24,
+ R_X86_64_GOTOFF64 = 25,
+ R_X86_64_GOTPC32 = 26,
+ R_X86_64_SIZE32 = 32,
+ R_X86_64_SIZE64 = 33,
+ R_X86_64_GOTPC32_TLSDESC = 34,
+ R_X86_64_TLSDESC_CALL = 35,
+ R_X86_64_TLSDESC = 36
+};
+
+// i386 relocations.
+// TODO: this is just a subset
+enum {
+ R_386_NONE = 0,
+ R_386_32 = 1,
+ R_386_PC32 = 2,
+ R_386_GOT32 = 3,
+ R_386_PLT32 = 4,
+ R_386_COPY = 5,
+ R_386_GLOB_DAT = 6,
+ R_386_JUMP_SLOT = 7,
+ R_386_RELATIVE = 8,
+ R_386_GOTOFF = 9,
+ R_386_GOTPC = 10,
+ R_386_32PLT = 11,
+ R_386_16 = 20,
+ R_386_PC16 = 21,
+ R_386_8 = 22,
+ R_386_PC8 = 23
+};
+
// Section header.
struct Elf32_Shdr {
Elf32_Word sh_name; // Section name (index into string table)
@@ -170,22 +278,29 @@ enum {
// Section types.
enum {
- SHT_NULL = 0, // No associated section (inactive entry).
- SHT_PROGBITS = 1, // Program-defined contents.
- SHT_SYMTAB = 2, // Symbol table.
- SHT_STRTAB = 3, // String table.
- SHT_RELA = 4, // Relocation entries; explicit addends.
- SHT_HASH = 5, // Symbol hash table.
- SHT_DYNAMIC = 6, // Information for dynamic linking.
- SHT_NOTE = 7, // Information about the file.
- SHT_NOBITS = 8, // Data occupies no space in the file.
- SHT_REL = 9, // Relocation entries; no explicit addends.
- SHT_SHLIB = 10, // Reserved.
- SHT_DYNSYM = 11, // Symbol table.
- SHT_LOPROC = 0x70000000, // Lowest processor architecture-specific type.
- SHT_HIPROC = 0x7fffffff, // Highest processor architecture-specific type.
- SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
- SHT_HIUSER = 0xffffffff // Highest type reserved for applications.
+ SHT_NULL = 0, // No associated section (inactive entry).
+ SHT_PROGBITS = 1, // Program-defined contents.
+ SHT_SYMTAB = 2, // Symbol table.
+ SHT_STRTAB = 3, // String table.
+ SHT_RELA = 4, // Relocation entries; explicit addends.
+ SHT_HASH = 5, // Symbol hash table.
+ SHT_DYNAMIC = 6, // Information for dynamic linking.
+ SHT_NOTE = 7, // Information about the file.
+ SHT_NOBITS = 8, // Data occupies no space in the file.
+ SHT_REL = 9, // Relocation entries; no explicit addends.
+ SHT_SHLIB = 10, // Reserved.
+ SHT_DYNSYM = 11, // Symbol table.
+ SHT_INIT_ARRAY = 14, // Pointers to initialisation functions.
+ SHT_FINI_ARRAY = 15, // Pointers to termination functions.
+ SHT_PREINIT_ARRAY = 16, // Pointers to pre-init functions.
+ SHT_GROUP = 17, // Section group.
+ SHT_SYMTAB_SHNDX = 18, // Indicies for SHN_XINDEX entries.
+ SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
+ SHT_HIOS = 0x6fffffff, // Highest operating system-specific type.
+ SHT_LOPROC = 0x70000000, // Lowest processor architecture-specific type.
+ SHT_HIPROC = 0x7fffffff, // Highest processor architecture-specific type.
+ SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
+ SHT_HIUSER = 0xffffffff // Highest type reserved for applications.
};
// Section flags.
@@ -196,7 +311,7 @@ enum {
SHF_MASKPROC = 0xf0000000 // Bits indicating processor-specific flags.
};
-// Symbol table entries.
+// Symbol table entries for ELF32.
struct Elf32_Sym {
Elf32_Word st_name; // Symbol name (index into string table)
Elf32_Addr st_value; // Value or address associated with the symbol
@@ -207,15 +322,41 @@ struct Elf32_Sym {
// These accessors and mutators correspond to the ELF32_ST_BIND,
// ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
- unsigned char getBinding () const { return st_info >> 4; }
- unsigned char getType () const { return st_info & 0x0f; }
- void setBinding (unsigned char b) { setBindingAndType (b, getType ()); }
- void setType (unsigned char t) { setBindingAndType (getBinding (), t); }
- void setBindingAndType (unsigned char b, unsigned char t) {
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
+ st_info = (b << 4) + (t & 0x0f);
+ }
+};
+
+// Symbol table entries for ELF64.
+struct Elf64_Sym {
+ Elf64_Word st_name; // Symbol name (index into string table)
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf64_Half st_shndx; // Which section (header table index) it's defined in
+ Elf64_Addr st_value; // Value or address associated with the symbol
+ Elf64_Xword st_size; // Size of the symbol
+
+ // These accessors and mutators are identical to those defined for ELF32
+ // symbol table entries.
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
st_info = (b << 4) + (t & 0x0f);
}
};
+// The size (in bytes) of symbol table entries.
+enum {
+ SYMENTRY_SIZE32 = 16, // 32-bit symbol entry size
+ SYMENTRY_SIZE64 = 24 // 64-bit symbol entry size.
+};
+
// Symbol bindings.
enum {
STB_LOCAL = 0, // Local symbol, not visible outside obj file containing def
@@ -232,10 +373,19 @@ enum {
STT_FUNC = 2, // Symbol is executable code (function, etc.)
STT_SECTION = 3, // Symbol refers to a section
STT_FILE = 4, // Local, absolute symbol that refers to a file
+ STT_COMMON = 5, // An uninitialised common block
+ STT_TLS = 6, // Thread local data object
STT_LOPROC = 13, // Lowest processor-specific symbol type
STT_HIPROC = 15 // Highest processor-specific symbol type
};
+enum {
+ STV_DEFAULT = 0, // Visibility is specified by binding type
+ STV_INTERNAL = 1, // Defined by processor supplements
+ STV_HIDDEN = 2, // Not visible to other components
+ STV_PROTECTED = 3 // Visible in other components but not preemptable
+};
+
// Relocation entry, without explicit addend.
struct Elf32_Rel {
Elf32_Addr r_offset; // Location (file byte offset, or program virtual addr)
@@ -243,13 +393,13 @@ struct Elf32_Rel {
// These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
// and ELF32_R_INFO macros defined in the ELF specification:
- Elf32_Word getSymbol () const { return (r_info >> 8); }
- unsigned char getType () const { return (unsigned char) (r_info & 0x0ff); }
- void setSymbol (Elf32_Word s) { setSymbolAndType (s, getType ()); }
- void setType (unsigned char t) { setSymbolAndType (getSymbol(), t); }
- void setSymbolAndType (Elf32_Word s, unsigned char t) {
+ Elf32_Word getSymbol() const { return (r_info >> 8); }
+ unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf32_Word s, unsigned char t) {
r_info = (s << 8) + t;
- };
+ }
};
// Relocation entry with explicit addend.
@@ -260,16 +410,53 @@ struct Elf32_Rela {
// These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
// and ELF32_R_INFO macros defined in the ELF specification:
- Elf32_Word getSymbol () const { return (r_info >> 8); }
- unsigned char getType () const { return (unsigned char) (r_info & 0x0ff); }
- void setSymbol (Elf32_Word s) { setSymbolAndType (s, getType ()); }
- void setType (unsigned char t) { setSymbolAndType (getSymbol(), t); }
- void setSymbolAndType (Elf32_Word s, unsigned char t) {
+ Elf32_Word getSymbol() const { return (r_info >> 8); }
+ unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf32_Word s, unsigned char t) {
r_info = (s << 8) + t;
- };
+ }
+};
+
+// Relocation entry, without explicit addend.
+struct Elf64_Rel {
+ Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ Elf64_Xword getSymbol() const { return (r_info >> 32); }
+ unsigned char getType() const {
+ return (unsigned char) (r_info & 0xffffffffL);
+ }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf64_Xword s, unsigned char t) {
+ r_info = (s << 32) + (t&0xffffffffL);
+ }
};
-// Program header.
+// Relocation entry with explicit addend.
+struct Elf64_Rela {
+ Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+ Elf64_Sxword r_addend; // Compute value for relocatable field by adding this.
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ Elf64_Xword getSymbol() const { return (r_info >> 32); }
+ unsigned char getType() const {
+ return (unsigned char) (r_info & 0xffffffffL);
+ }
+ void setSymbol(Elf64_Xword s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf64_Xword s, unsigned char t) {
+ r_info = (s << 32) + (t&0xffffffffL);
+ }
+};
+
+// Program header for ELF32.
struct Elf32_Phdr {
Elf32_Word p_type; // Type of segment
Elf32_Off p_offset; // File offset where segment is located, in bytes
@@ -281,6 +468,18 @@ struct Elf32_Phdr {
Elf32_Word p_align; // Segment alignment constraint
};
+// Program header for ELF64.
+struct Elf64_Phdr {
+ Elf64_Word p_type; // Type of segment
+ Elf64_Word p_flags; // Segment flags
+ Elf64_Off p_offset; // File offset where segment is located, in bytes
+ Elf64_Addr p_vaddr; // Virtual address of beginning of segment
+ Elf64_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf64_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf64_Xword p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf64_Xword p_align; // Segment alignment constraint
+};
+
// Segment types.
enum {
PT_NULL = 0, // Unused segment.
@@ -302,6 +501,65 @@ enum {
PF_MASKPROC = 0xf0000000 // Unspecified
};
+// Dynamic table entry for ELF32.
+struct Elf32_Dyn
+{
+ Elf32_Sword d_tag; // Type of dynamic table entry.
+ union
+ {
+ Elf32_Word d_val; // Integer value of entry.
+ Elf32_Addr d_ptr; // Pointer value of entry.
+ } d_un;
+};
+
+// Dynamic table entry for ELF64.
+struct Elf64_Dyn
+{
+ Elf64_Sxword d_tag; // Type of dynamic table entry.
+ union
+ {
+ Elf64_Xword d_val; // Integer value of entry.
+ Elf64_Addr d_ptr; // Pointer value of entry.
+ } d_un;
+};
+
+// Dynamic table entry tags.
+enum {
+ DT_NULL = 0, // Marks end of dynamic array.
+ DT_NEEDED = 1, // String table offset of needed library.
+ DT_PLTRELSZ = 2, // Size of relocation entries in PLT.
+ DT_PLTGOT = 3, // Address associated with linkage table.
+ DT_HASH = 4, // Address of symbolic hash table.
+ DT_STRTAB = 5, // Address of dynamic string table.
+ DT_SYMTAB = 6, // Address of dynamic symbol table.
+ DT_RELA = 7, // Address of relocation table (Rela entries).
+ DT_RELASZ = 8, // Size of Rela relocation table.
+ DT_RELAENT = 9, // Size of a Rela relocation entry.
+ DT_STRSZ = 10, // Total size of the string table.
+ DT_SYMENT = 11, // Size of a symbol table entry.
+ DT_INIT = 12, // Address of initialization function.
+ DT_FINI = 13, // Address of termination function.
+ DT_SONAME = 14, // String table offset of a shared objects name.
+ DT_RPATH = 15, // String table offset of library search path.
+ DT_SYMBOLIC = 16, // Changes symbol resolution algorithm.
+ DT_REL = 17, // Address of relocation table (Rel entries).
+ DT_RELSZ = 18, // Size of Rel relocation table.
+ DT_RELENT = 19, // Size of a Rel relocation entry.
+ DT_PLTREL = 20, // Type of relocation entry used for linking.
+ DT_DEBUG = 21, // Reserved for debugger.
+ DT_TEXTREL = 22, // Relocations exist for non-writable segements.
+ DT_JMPREL = 23, // Address of relocations associated with PLT.
+ DT_BIND_NOW = 24, // Process all relocations before execution.
+ DT_INIT_ARRAY = 25, // Pointer to array of initialization functions.
+ DT_FINI_ARRAY = 26, // Pointer to array of termination functions.
+ DT_INIT_ARRAYSZ = 27, // Size of DT_INIT_ARRAY.
+ DT_FINI_ARRAYSZ = 28, // Size of DT_FINI_ARRAY.
+ DT_LOOS = 0x60000000, // Start of environment specific tags.
+ DT_HIOS = 0x6FFFFFFF, // End of environment specific tags.
+ DT_LOPROC = 0x70000000, // Start of processor specific tags.
+ DT_HIPROC = 0x7FFFFFFF // End of processor specific tags.
+};
+
} // end namespace ELF
} // end namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/Support/ErrorHandling.h b/libclamav/c++/llvm/include/llvm/Support/ErrorHandling.h
index 4d24ada..9854657 100644
--- a/libclamav/c++/llvm/include/llvm/Support/ErrorHandling.h
+++ b/libclamav/c++/llvm/include/llvm/Support/ErrorHandling.h
@@ -1,4 +1,4 @@
-//===- llvm/Support/ErrorHandling.h - Callbacks for errors ------*- C++ -*-===//
+//===- llvm/Support/ErrorHandling.h - Fatal error handling ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines an API used to indicate error conditions.
-// Callbacks can be registered for these errors through this API.
+// This file defines an API used to indicate fatal error conditions. Non-fatal
+// errors (most of them) should be handled through LLVMContext.
//
//===----------------------------------------------------------------------===//
@@ -22,10 +22,10 @@ namespace llvm {
class Twine;
/// An error handler callback.
- typedef void (*llvm_error_handler_t)(void *user_data,
- const std::string& reason);
+ typedef void (*fatal_error_handler_t)(void *user_data,
+ const std::string& reason);
- /// llvm_instal_error_handler - Installs a new error handler to be used
+ /// install_fatal_error_handler - Installs a new error handler to be used
/// whenever a serious (non-recoverable) error is encountered by LLVM.
///
/// If you are using llvm_start_multithreaded, you should register the handler
@@ -44,13 +44,25 @@ namespace llvm {
///
/// \param user_data - An argument which will be passed to the install error
/// handler.
- void llvm_install_error_handler(llvm_error_handler_t handler,
- void *user_data = 0);
+ void install_fatal_error_handler(fatal_error_handler_t handler,
+ void *user_data = 0);
/// Restores default error handling behaviour.
/// This must not be called between llvm_start_multithreaded() and
/// llvm_stop_multithreaded().
- void llvm_remove_error_handler();
+ void remove_fatal_error_handler();
+
+ /// ScopedFatalErrorHandler - This is a simple helper class which just
+ /// calls install_fatal_error_handler in its constructor and
+ /// remove_fatal_error_handler in its destructor.
+ struct ScopedFatalErrorHandler {
+ explicit ScopedFatalErrorHandler(fatal_error_handler_t handler,
+ void *user_data = 0) {
+ install_fatal_error_handler(handler, user_data);
+ }
+
+ ~ScopedFatalErrorHandler() { remove_fatal_error_handler(); }
+ };
/// Reports a serious error, calling any installed error handler. These
/// functions are intended to be used for error conditions which are outside
@@ -60,9 +72,9 @@ namespace llvm {
/// standard error, followed by a newline.
/// After the error handler is called this function will call exit(1), it
/// does not return.
- NORETURN void llvm_report_error(const char *reason);
- NORETURN void llvm_report_error(const std::string &reason);
- NORETURN void llvm_report_error(const Twine &reason);
+ NORETURN void report_fatal_error(const char *reason);
+ NORETURN void report_fatal_error(const std::string &reason);
+ NORETURN void report_fatal_error(const Twine &reason);
/// This function calls abort(), and prints the optional message to stderr.
/// Use the llvm_unreachable macro (that adds location info), instead of
diff --git a/libclamav/c++/llvm/include/llvm/Support/FileUtilities.h b/libclamav/c++/llvm/include/llvm/Support/FileUtilities.h
index cc8f953..d0dd4a7 100644
--- a/libclamav/c++/llvm/include/llvm/Support/FileUtilities.h
+++ b/libclamav/c++/llvm/include/llvm/Support/FileUtilities.h
@@ -40,6 +40,8 @@ namespace llvm {
sys::Path Filename;
bool DeleteIt;
public:
+ FileRemover() : DeleteIt(false) {}
+
explicit FileRemover(const sys::Path &filename, bool deleteIt = true)
: Filename(filename), DeleteIt(deleteIt) {}
@@ -50,6 +52,17 @@ namespace llvm {
}
}
+ /// setFile - Give ownership of the file to the FileRemover so it will
+ /// be removed when the object is destroyed. If the FileRemover already
+ /// had ownership of a file, remove it first.
+ void setFile(const sys::Path &filename, bool deleteIt = true) {
+ if (DeleteIt)
+ Filename.eraseFromDisk();
+
+ Filename = filename;
+ DeleteIt = deleteIt;
+ }
+
/// releaseFile - Take ownership of the file away from the FileRemover so it
/// will not be removed when the object is destroyed.
void releaseFile() { DeleteIt = false; }
diff --git a/libclamav/c++/llvm/include/llvm/Support/GraphWriter.h b/libclamav/c++/llvm/include/llvm/Support/GraphWriter.h
index 28fa92f..287c5ba 100644
--- a/libclamav/c++/llvm/include/llvm/Support/GraphWriter.h
+++ b/libclamav/c++/llvm/include/llvm/Support/GraphWriter.h
@@ -89,7 +89,7 @@ class GraphWriter {
public:
GraphWriter(raw_ostream &o, const GraphType &g, bool SN) : O(o), G(g) {
- DTraits = DOTTraits(SN);
+ DTraits = DOTTraits(SN);
}
void writeHeader(const std::string &Name) {
@@ -122,7 +122,20 @@ public:
// Loop over the graph, printing it out...
for (node_iterator I = GTraits::nodes_begin(G), E = GTraits::nodes_end(G);
I != E; ++I)
- writeNode(*I);
+ if (!isNodeHidden(*I))
+ writeNode(*I);
+ }
+
+ bool isNodeHidden(NodeType &Node) {
+ return isNodeHidden(&Node);
+ }
+
+ bool isNodeHidden(NodeType *const *Node) {
+ return isNodeHidden(*Node);
+ }
+
+ bool isNodeHidden(NodeType *Node) {
+ return DTraits.isNodeHidden(Node);
}
void writeNode(NodeType& Node) {
@@ -174,7 +187,8 @@ public:
unsigned i = 0, e = DTraits.numEdgeDestLabels(Node);
for (; i != e && i != 64; ++i) {
if (i) O << "|";
- O << "<d" << i << ">" << DTraits.getEdgeDestLabel(Node, i);
+ O << "<d" << i << ">"
+ << DOT::EscapeString(DTraits.getEdgeDestLabel(Node, i));
}
if (i != e)
@@ -188,9 +202,11 @@ public:
child_iterator EI = GTraits::child_begin(Node);
child_iterator EE = GTraits::child_end(Node);
for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i)
- writeEdge(Node, i, EI);
+ if (!DTraits.isNodeHidden(*EI))
+ writeEdge(Node, i, EI);
for (; EI != EE; ++EI)
- writeEdge(Node, 64, EI);
+ if (!DTraits.isNodeHidden(*EI))
+ writeEdge(Node, 64, EI);
}
void writeEdge(NodeType *Node, unsigned edgeidx, child_iterator EI) {
@@ -230,7 +246,7 @@ public:
for (unsigned i = 0; i != NumEdgeSources; ++i) {
if (i) O << "|";
O << "<s" << i << ">";
- if (EdgeSourceLabels) O << (*EdgeSourceLabels)[i];
+ if (EdgeSourceLabels) O << DOT::EscapeString((*EdgeSourceLabels)[i]);
}
O << "}}";
}
@@ -255,6 +271,12 @@ public:
O << "[" << Attrs << "]";
O << ";\n";
}
+
+ /// getOStream - Get the raw output stream into the graph file. Useful to
+ /// write fancy things using addCustomGraphFeatures().
+ raw_ostream &getOStream() {
+ return O;
+ }
};
template<typename GraphType>
@@ -300,7 +322,7 @@ sys::Path WriteGraph(const GraphType &G, const std::string &Name,
raw_fd_ostream O(Filename.c_str(), ErrorInfo);
if (ErrorInfo.empty()) {
- WriteGraph(O, G, ShortNames, Name, Title);
+ llvm::WriteGraph(O, G, ShortNames, Name, Title);
errs() << " done. \n";
} else {
errs() << "error opening file '" << Filename.str() << "' for writing!\n";
@@ -317,7 +339,7 @@ template<typename GraphType>
void ViewGraph(const GraphType &G, const std::string &Name,
bool ShortNames = false, const std::string &Title = "",
GraphProgram::Name Program = GraphProgram::DOT) {
- sys::Path Filename = WriteGraph(G, Name, ShortNames, Title);
+ sys::Path Filename = llvm::WriteGraph(G, Name, ShortNames, Title);
if (Filename.isEmpty())
return;
diff --git a/libclamav/c++/llvm/include/llvm/Support/IRBuilder.h b/libclamav/c++/llvm/include/llvm/Support/IRBuilder.h
index 1f4e598..c827cce 100644
--- a/libclamav/c++/llvm/include/llvm/Support/IRBuilder.h
+++ b/libclamav/c++/llvm/include/llvm/Support/IRBuilder.h
@@ -40,8 +40,7 @@ protected:
/// IRBuilderBase - Common base class shared among various IRBuilders.
class IRBuilderBase {
- unsigned DbgMDKind;
- MDNode *CurDbgLocation;
+ DebugLoc CurDbgLocation;
protected:
BasicBlock *BB;
BasicBlock::iterator InsertPt;
@@ -49,7 +48,7 @@ protected:
public:
IRBuilderBase(LLVMContext &context)
- : DbgMDKind(0), CurDbgLocation(0), Context(context) {
+ : Context(context) {
ClearInsertionPoint();
}
@@ -65,6 +64,7 @@ public:
BasicBlock *GetInsertBlock() const { return BB; }
BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
+ LLVMContext &getContext() const { return Context; }
/// SetInsertPoint - This specifies that created instructions should be
/// appended to the end of the specified block.
@@ -82,12 +82,62 @@ public:
/// SetCurrentDebugLocation - Set location information used by debugging
/// information.
- void SetCurrentDebugLocation(MDNode *L);
- MDNode *getCurrentDebugLocation() const { return CurDbgLocation; }
+ void SetCurrentDebugLocation(const DebugLoc &L) {
+ CurDbgLocation = L;
+ }
+
+ /// getCurrentDebugLocation - Get location information used by debugging
+ /// information.
+ const DebugLoc &getCurrentDebugLocation() const { return CurDbgLocation; }
/// SetInstDebugLocation - If this builder has a current debug location, set
/// it on the specified instruction.
- void SetInstDebugLocation(Instruction *I) const;
+ void SetInstDebugLocation(Instruction *I) const {
+ if (!CurDbgLocation.isUnknown())
+ I->setDebugLoc(CurDbgLocation);
+ }
+
+ /// InsertPoint - A saved insertion point.
+ class InsertPoint {
+ BasicBlock *Block;
+ BasicBlock::iterator Point;
+
+ public:
+ /// Creates a new insertion point which doesn't point to anything.
+ InsertPoint() : Block(0) {}
+
+ /// Creates a new insertion point at the given location.
+ InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
+ : Block(InsertBlock), Point(InsertPoint) {}
+
+ /// isSet - Returns true if this insert point is set.
+ bool isSet() const { return (Block != 0); }
+
+ llvm::BasicBlock *getBlock() const { return Block; }
+ llvm::BasicBlock::iterator getPoint() const { return Point; }
+ };
+
+ /// saveIP - Returns the current insert point.
+ InsertPoint saveIP() const {
+ return InsertPoint(GetInsertBlock(), GetInsertPoint());
+ }
+
+ /// saveAndClearIP - Returns the current insert point, clearing it
+ /// in the process.
+ InsertPoint saveAndClearIP() {
+ InsertPoint IP(GetInsertBlock(), GetInsertPoint());
+ ClearInsertionPoint();
+ return IP;
+ }
+
+ /// restoreIP - Sets the current insert point to a previously-saved
+ /// location.
+ void restoreIP(InsertPoint IP) {
+ if (IP.isSet())
+ SetInsertPoint(IP.getBlock(), IP.getPoint());
+ else
+ ClearInsertionPoint();
+ }
//===--------------------------------------------------------------------===//
// Miscellaneous creation methods.
@@ -98,33 +148,68 @@ public:
/// specified. If Name is specified, it is the name of the global variable
/// created.
Value *CreateGlobalString(const char *Str = "", const Twine &Name = "");
+
+ /// getInt1 - Get a constant value representing either true or false.
+ ConstantInt *getInt1(bool V) {
+ return ConstantInt::get(getInt1Ty(), V);
+ }
+
+ /// getTrue - Get the constant value for i1 true.
+ ConstantInt *getTrue() {
+ return ConstantInt::getTrue(Context);
+ }
+
+ /// getFalse - Get the constant value for i1 false.
+ ConstantInt *getFalse() {
+ return ConstantInt::getFalse(Context);
+ }
+
+ /// getInt8 - Get a constant 8-bit value.
+ ConstantInt *getInt8(uint8_t C) {
+ return ConstantInt::get(getInt8Ty(), C);
+ }
+
+ /// getInt16 - Get a constant 16-bit value.
+ ConstantInt *getInt16(uint16_t C) {
+ return ConstantInt::get(getInt16Ty(), C);
+ }
+
+ /// getInt32 - Get a constant 32-bit value.
+ ConstantInt *getInt32(uint32_t C) {
+ return ConstantInt::get(getInt32Ty(), C);
+ }
+
+ /// getInt64 - Get a constant 64-bit value.
+ ConstantInt *getInt64(uint64_t C) {
+ return ConstantInt::get(getInt64Ty(), C);
+ }
//===--------------------------------------------------------------------===//
// Type creation methods
//===--------------------------------------------------------------------===//
/// getInt1Ty - Fetch the type representing a single bit
- const Type *getInt1Ty() {
+ const IntegerType *getInt1Ty() {
return Type::getInt1Ty(Context);
}
/// getInt8Ty - Fetch the type representing an 8-bit integer.
- const Type *getInt8Ty() {
+ const IntegerType *getInt8Ty() {
return Type::getInt8Ty(Context);
}
/// getInt16Ty - Fetch the type representing a 16-bit integer.
- const Type *getInt16Ty() {
+ const IntegerType *getInt16Ty() {
return Type::getInt16Ty(Context);
}
/// getInt32Ty - Fetch the type resepresenting a 32-bit integer.
- const Type *getInt32Ty() {
+ const IntegerType *getInt32Ty() {
return Type::getInt32Ty(Context);
}
/// getInt64Ty - Fetch the type representing a 64-bit integer.
- const Type *getInt64Ty() {
+ const IntegerType *getInt64Ty() {
return Type::getInt64Ty(Context);
}
@@ -143,7 +228,7 @@ public:
return Type::getVoidTy(Context);
}
- const Type *getInt8PtrTy() {
+ const PointerType *getInt8PtrTy() {
return Type::getInt8PtrTy(Context);
}
@@ -208,7 +293,7 @@ public:
template<typename InstTy>
InstTy *Insert(InstTy *I, const Twine &Name = "") const {
this->InsertHelper(I, Name, BB, InsertPt);
- if (getCurrentDebugLocation() != 0)
+ if (!getCurrentDebugLocation().isUnknown())
this->SetInstDebugLocation(I);
return I;
}
@@ -424,12 +509,19 @@ public:
return Folder.CreateFRem(LC, RC);
return Insert(BinaryOperator::CreateFRem(LHS, RHS), Name);
}
+
Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Folder.CreateShl(LC, RC);
return Insert(BinaryOperator::CreateShl(LHS, RHS), Name);
}
+ Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+ Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Folder.CreateShl(LC, RHSC);
+ return Insert(BinaryOperator::CreateShl(LHS, RHSC), Name);
+ }
Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "") {
Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
if (Constant *LC = dyn_cast<Constant>(LHS))
@@ -443,23 +535,35 @@ public:
return Folder.CreateLShr(LC, RC);
return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
}
+ Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+ Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Folder.CreateLShr(LC, RHSC);
+ return Insert(BinaryOperator::CreateLShr(LHS, RHSC), Name);
+ }
Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
if (Constant *LC = dyn_cast<Constant>(LHS))
return Folder.CreateLShr(LC, RHSC);
return Insert(BinaryOperator::CreateLShr(LHS, RHSC), Name);
}
-
+
Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Folder.CreateAShr(LC, RC);
return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
}
+ Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+ Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Folder.CreateAShr(LC, RHSC);
+ return Insert(BinaryOperator::CreateAShr(LHS, RHSC), Name);
+ }
Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
if (Constant *LC = dyn_cast<Constant>(LHS))
- return Folder.CreateSShr(LC, RHSC);
+ return Folder.CreateAShr(LC, RHSC);
return Insert(BinaryOperator::CreateAShr(LHS, RHSC), Name);
}
@@ -472,6 +576,19 @@ public:
}
return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
}
+ Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+ Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Folder.CreateAnd(LC, RHSC);
+ return Insert(BinaryOperator::CreateAnd(LHS, RHSC), Name);
+ }
+ Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
+ Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Folder.CreateAnd(LC, RHSC);
+ return Insert(BinaryOperator::CreateAnd(LHS, RHSC), Name);
+ }
+
Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *RC = dyn_cast<Constant>(RHS)) {
if (RC->isNullValue())
@@ -481,12 +598,37 @@ public:
}
return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
}
+ Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+ Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Folder.CreateOr(LC, RHSC);
+ return Insert(BinaryOperator::CreateOr(LHS, RHSC), Name);
+ }
+ Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
+ Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Folder.CreateOr(LC, RHSC);
+ return Insert(BinaryOperator::CreateOr(LHS, RHSC), Name);
+ }
+
Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Folder.CreateXor(LC, RC);
return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
}
+ Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+ Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Folder.CreateXor(LC, RHSC);
+ return Insert(BinaryOperator::CreateXor(LHS, RHSC), Name);
+ }
+ Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
+ Constant *RHSC = ConstantInt::get(LHS->getType(), RHS);
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Folder.CreateXor(LC, RHSC);
+ return Insert(BinaryOperator::CreateXor(LHS, RHSC), Name);
+ }
Value *CreateBinOp(Instruction::BinaryOps Opc,
Value *LHS, Value *RHS, const Twine &Name = "") {
@@ -559,8 +701,8 @@ public:
return Insert(GetElementPtrInst::Create(Ptr, IdxBegin, IdxEnd), Name);
}
template<typename InputIterator>
- Value *CreateInBoundsGEP(Value *Ptr, InputIterator IdxBegin, InputIterator IdxEnd,
- const Twine &Name = "") {
+ Value *CreateInBoundsGEP(Value *Ptr, InputIterator IdxBegin,
+ InputIterator IdxEnd, const Twine &Name = "") {
if (Constant *PC = dyn_cast<Constant>(Ptr)) {
// Every index must be constant.
InputIterator i;
@@ -909,6 +1051,11 @@ public:
Value *Args[] = { Arg1, Arg2, Arg3, Arg4 };
return Insert(CallInst::Create(Callee, Args, Args+4), Name);
}
+ CallInst *CreateCall5(Value *Callee, Value *Arg1, Value *Arg2, Value *Arg3,
+ Value *Arg4, Value *Arg5, const Twine &Name = "") {
+ Value *Args[] = { Arg1, Arg2, Arg3, Arg4, Arg5 };
+ return Insert(CallInst::Create(Callee, Args, Args+5), Name);
+ }
template<typename InputIterator>
CallInst *CreateCall(Value *Callee, InputIterator ArgBegin,
diff --git a/libclamav/c++/llvm/include/llvm/Support/IRReader.h b/libclamav/c++/llvm/include/llvm/Support/IRReader.h
index 66314e0..a44da52 100644
--- a/libclamav/c++/llvm/include/llvm/Support/IRReader.h
+++ b/libclamav/c++/llvm/include/llvm/Support/IRReader.h
@@ -38,7 +38,7 @@ namespace llvm {
std::string ErrMsg;
Module *M = getLazyBitcodeModule(Buffer, Context, &ErrMsg);
if (M == 0) {
- Err = SMDiagnostic(Buffer->getBufferIdentifier(), -1, -1, ErrMsg, "");
+ Err = SMDiagnostic(Buffer->getBufferIdentifier(), ErrMsg);
// ParseBitcodeFile does not take ownership of the Buffer in the
// case of an error.
delete Buffer;
@@ -59,8 +59,8 @@ namespace llvm {
std::string ErrMsg;
MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrMsg);
if (F == 0) {
- Err = SMDiagnostic(Filename, -1, -1,
- "Could not open input file '" + Filename + "'", "");
+ Err = SMDiagnostic(Filename,
+ "Could not open input file: " + ErrMsg);
return 0;
}
@@ -78,10 +78,10 @@ namespace llvm {
(const unsigned char *)Buffer->getBufferEnd())) {
std::string ErrMsg;
Module *M = ParseBitcodeFile(Buffer, Context, &ErrMsg);
+ if (M == 0)
+ Err = SMDiagnostic(Buffer->getBufferIdentifier(), ErrMsg);
// ParseBitcodeFile does not take ownership of the Buffer.
delete Buffer;
- if (M == 0)
- Err = SMDiagnostic(Buffer->getBufferIdentifier(), -1, -1, ErrMsg, "");
return M;
}
@@ -97,8 +97,8 @@ namespace llvm {
std::string ErrMsg;
MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrMsg);
if (F == 0) {
- Err = SMDiagnostic(Filename, -1, -1,
- "Could not open input file '" + Filename + "'", "");
+ Err = SMDiagnostic(Filename,
+ "Could not open input file: " + ErrMsg);
return 0;
}
diff --git a/libclamav/c++/llvm/include/llvm/Support/MachO.h b/libclamav/c++/llvm/include/llvm/Support/MachO.h
index e6fccfc..4c13177 100644
--- a/libclamav/c++/llvm/include/llvm/Support/MachO.h
+++ b/libclamav/c++/llvm/include/llvm/Support/MachO.h
@@ -14,11 +14,649 @@
#ifndef LLVM_SUPPORT_MACHO_H
#define LLVM_SUPPORT_MACHO_H
+#include "llvm/System/DataTypes.h"
+
// NOTE: The enums in this file are intentially named to be different than those
// in the headers in /usr/include/mach (on darwin systems) to avoid conflicts
// with those macros.
namespace llvm {
namespace MachO {
+ // Enums from <mach-o/loader.h>
+ enum {
+ // Constants for the "magic" field in llvm::MachO::mach_header and
+ // llvm::MachO::mach_header_64
+ HeaderMagic32 = 0xFEEDFACEu, // MH_MAGIC
+ HeaderMagic32Swapped = 0xCEFAEDFEu, // MH_CIGAM
+ HeaderMagic64 = 0xFEEDFACFu, // MH_MAGIC_64
+ HeaderMagic64Swapped = 0xCFFAEDFEu, // MH_CIGAM_64
+ UniversalMagic = 0xCAFEBABEu, // FAT_MAGIC
+ UniversalMagicSwapped = 0xBEBAFECAu, // FAT_CIGAM
+
+ // Constants for the "filetype" field in llvm::MachO::mach_header and
+ // llvm::MachO::mach_header_64
+ HeaderFileTypeObject = 0x1u, // MH_OBJECT
+ HeaderFileTypeExecutable = 0x2u, // MH_EXECUTE
+ HeaderFileTypeFixedVMShlib = 0x3u, // MH_FVMLIB
+ HeaderFileTypeCore = 0x4u, // MH_CORE
+ HeaderFileTypePreloadedExecutable = 0x5u, // MH_PRELOAD
+ HeaderFileTypeDynamicShlib = 0x6u, // MH_DYLIB
+ HeaderFileTypeDynamicLinkEditor = 0x7u, // MH_DYLINKER
+ HeaderFileTypeBundle = 0x8u, // MH_BUNDLE
+ HeaderFileTypeDynamicShlibStub = 0x9u, // MH_DYLIB_STUB
+ HeaderFileTypeDSYM = 0xAu, // MH_DSYM
+ HeaderFileTypeKextBundle = 0xBu, // MH_KEXT_BUNDLE
+
+ // Constant bits for the "flags" field in llvm::MachO::mach_header and
+ // llvm::MachO::mach_header_64
+ HeaderFlagBitNoUndefinedSymbols = 0x00000001u, // MH_NOUNDEFS
+ HeaderFlagBitIsIncrementalLinkObject= 0x00000002u, // MH_INCRLINK
+ HeaderFlagBitIsDynamicLinkObject = 0x00000004u, // MH_DYLDLINK
+ HeaderFlagBitBindAtLoad = 0x00000008u, // MH_BINDATLOAD
+ HeaderFlagBitPrebound = 0x00000010u, // MH_PREBOUND
+ HeaderFlagBitSplitSegments = 0x00000020u, // MH_SPLIT_SEGS
+ HeaderFlagBitLazyInit = 0x00000040u, // MH_LAZY_INIT
+ HeaderFlagBitTwoLevelNamespace = 0x00000080u, // MH_TWOLEVEL
+ HeaderFlagBitForceFlatNamespace = 0x00000100u, // MH_FORCE_FLAT
+ HeaderFlagBitNoMultipleDefintions = 0x00000200u, // MH_NOMULTIDEFS
+ HeaderFlagBitNoFixPrebinding = 0x00000400u, // MH_NOFIXPREBINDING
+ HeaderFlagBitPrebindable = 0x00000800u, // MH_PREBINDABLE
+ HeaderFlagBitAllModulesBound = 0x00001000u, // MH_ALLMODSBOUND
+ HeaderFlagBitSubsectionsViaSymbols = 0x00002000u, // MH_SUBSECTIONS_VIA_SYMBOLS
+ HeaderFlagBitCanonical = 0x00004000u, // MH_CANONICAL
+ HeaderFlagBitWeakDefines = 0x00008000u, // MH_WEAK_DEFINES
+ HeaderFlagBitBindsToWeak = 0x00010000u, // MH_BINDS_TO_WEAK
+ HeaderFlagBitAllowStackExecution = 0x00020000u, // MH_ALLOW_STACK_EXECUTION
+ HeaderFlagBitRootSafe = 0x00040000u, // MH_ROOT_SAFE
+ HeaderFlagBitSetUIDSafe = 0x00080000u, // MH_SETUID_SAFE
+ HeaderFlagBitNoReexportedDylibs = 0x00100000u, // MH_NO_REEXPORTED_DYLIBS
+ HeaderFlagBitPIE = 0x00200000u, // MH_PIE
+ HeaderFlagBitDeadStrippableDylib = 0x00400000u, // MH_DEAD_STRIPPABLE_DYLIB
+
+ // Constants for the "cmd" field in llvm::MachO::load_command
+ LoadCommandDynamicLinkerRequired = 0x80000000u, // LC_REQ_DYLD
+ LoadCommandSegment32 = 0x00000001u, // LC_SEGMENT
+ LoadCommandSymtab = 0x00000002u, // LC_SYMTAB
+ LoadCommandSymSeg = 0x00000003u, // LC_SYMSEG
+ LoadCommandThread = 0x00000004u, // LC_THREAD
+ LoadCommandUnixThread = 0x00000005u, // LC_UNIXTHREAD
+ LoadCommandFixedVMShlibLoad = 0x00000006u, // LC_LOADFVMLIB
+ LoadCommandFixedVMShlibIdent = 0x00000007u, // LC_IDFVMLIB
+ LoadCommandIdent = 0x00000008u, // LC_IDENT
+ LoadCommandFixedVMFileInclusion = 0x00000009u, // LC_FVMFILE
+ LoadCommandPrePage = 0x0000000Au, // LC_PREPAGE
+ LoadCommandDynamicSymtabInfo = 0x0000000Bu, // LC_DYSYMTAB
+ LoadCommandDylibLoad = 0x0000000Cu, // LC_LOAD_DYLIB
+ LoadCommandDylibIdent = 0x0000000Du, // LC_ID_DYLIB
+ LoadCommandDynamicLinkerLoad = 0x0000000Eu, // LC_LOAD_DYLINKER
+ LoadCommandDynamicLinkerIdent = 0x0000000Fu, // LC_ID_DYLINKER
+ LoadCommandDylibPrebound = 0x00000010u, // LC_PREBOUND_DYLIB
+ LoadCommandRoutines32 = 0x00000011u, // LC_ROUTINES
+ LoadCommandSubFramework = 0x00000012u, // LC_SUB_FRAMEWORK
+ LoadCommandSubUmbrella = 0x00000013u, // LC_SUB_UMBRELLA
+ LoadCommandSubClient = 0x00000014u, // LC_SUB_CLIENT
+ LoadCommandSubLibrary = 0x00000015u, // LC_SUB_LIBRARY
+ LoadCommandTwoLevelHints = 0x00000016u, // LC_TWOLEVEL_HINTS
+ LoadCommandPreBindChecksum = 0x00000017u, // LC_PREBIND_CKSUM
+ LoadCommandDylibLoadWeak = 0x80000018u, // LC_LOAD_WEAK_DYLIB
+ LoadCommandSegment64 = 0x00000019u, // LC_SEGMENT_64
+ LoadCommandRoutines64 = 0x0000001Au, // LC_ROUTINES_64
+ LoadCommandUUID = 0x0000001Bu, // LC_UUID
+ LoadCommandRunpath = 0x8000001Cu, // LC_RPATH
+ LoadCommandCodeSignature = 0x0000001Du, // LC_CODE_SIGNATURE
+ LoadCommandSegmentSplitInfo = 0x0000001Eu, // LC_SEGMENT_SPLIT_INFO
+ LoadCommandDylibReexport = 0x8000001Fu, // LC_REEXPORT_DYLIB
+ LoadCommandDylibLazyLoad = 0x00000020u, // LC_LAZY_LOAD_DYLIB
+ LoadCommandEncryptionInfo = 0x00000021u, // LC_ENCRYPTION_INFO
+ LoadCommandDynamicLinkerInfo = 0x00000022u, // LC_DYLD_INFO
+ LoadCommandDynamicLinkerInfoOnly = 0x80000022u, // LC_DYLD_INFO_ONLY
+ LoadCommandDylibLoadUpward = 0x80000023u, // LC_LOAD_UPWARD_DYLIB
+
+ // Constant bits for the "flags" field in llvm::MachO::segment_command
+ SegmentCommandFlagBitHighVM = 0x1u, // SG_HIGHVM
+ SegmentCommandFlagBitFixedVMLibrary = 0x2u, // SG_FVMLIB
+ SegmentCommandFlagBitNoRelocations = 0x4u, // SG_NORELOC
+ SegmentCommandFlagBitProtectedVersion1 = 0x8u, // SG_PROTECTED_VERSION_1
+
+
+ // Constant masks for the "flags" field in llvm::MachO::section and
+ // llvm::MachO::section_64
+ SectionFlagMaskSectionType = 0x000000ffu, // SECTION_TYPE
+ SectionFlagMaskAllAttributes = 0xffffff00u, // SECTION_ATTRIBUTES
+ SectionFlagMaskUserAttributes = 0xff000000u, // SECTION_ATTRIBUTES_USR
+ SectionFlagMaskSystemAttributes = 0x00ffff00u, // SECTION_ATTRIBUTES_SYS
+
+ // Constant masks for the "flags[7:0]" field in llvm::MachO::section and
+ // llvm::MachO::section_64 (mask "flags" with SECTION_TYPE)
+ SectionTypeRegular = 0x00u, // S_REGULAR
+ SectionTypeZeroFill = 0x01u, // S_ZEROFILL
+ SectionTypeCStringLiterals = 0x02u, // S_CSTRING_LITERALS
+ SectionType4ByteLiterals = 0x03u, // S_4BYTE_LITERALS
+ SectionType8ByteLiterals = 0x04u, // S_8BYTE_LITERALS
+ SectionTypeLiteralPointers = 0x05u, // S_LITERAL_POINTERS
+ SectionTypeNonLazySymbolPointers = 0x06u, // S_NON_LAZY_SYMBOL_POINTERS
+ SectionTypeLazySymbolPointers = 0x07u, // S_LAZY_SYMBOL_POINTERS
+ SectionTypeSymbolStubs = 0x08u, // S_SYMBOL_STUBS
+ SectionTypeModuleInitFunctionPointers = 0x09u, // S_MOD_INIT_FUNC_POINTERS
+ SectionTypeModuleTermFunctionPointers = 0x0au, // S_MOD_TERM_FUNC_POINTERS
+ SectionTypeCoalesced = 0x0bu, // S_COALESCED
+ SectionTypeZeroFillLarge = 0x0cu, // S_GB_ZEROFILL
+ SectionTypeInterposing = 0x0du, // S_INTERPOSING
+ SectionType16ByteLiterals = 0x0eu, // S_16BYTE_LITERALS
+ SectionTypeDTraceObjectFormat = 0x0fu, // S_DTRACE_DOF
+ SectionTypeLazyDylibSymbolPointers = 0x10u, // S_LAZY_DYLIB_SYMBOL_POINTERS
+
+ // Constant masks for the "flags[31:24]" field in llvm::MachO::section and
+ // llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_USR)
+ SectionAttrUserPureInstructions = 0x80000000u, // S_ATTR_PURE_INSTRUCTIONS
+ SectionAttrUserNoTableOfContents = 0x40000000u, // S_ATTR_NO_TOC
+ SectionAttrUserCanStripStaticSymbols = 0x20000000u, // S_ATTR_STRIP_STATIC_SYMS
+ SectionAttrUserNoDeadStrip = 0x10000000u, // S_ATTR_NO_DEAD_STRIP
+ SectionAttrUserLiveSupport = 0x08000000u, // S_ATTR_LIVE_SUPPORT
+ SectionAttrUserSelfModifyingCode = 0x04000000u, // S_ATTR_SELF_MODIFYING_CODE
+ SectionAttrUserDebug = 0x02000000u, // S_ATTR_DEBUG
+
+ // Constant masks for the "flags[23:8]" field in llvm::MachO::section and
+ // llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_SYS)
+ SectionAttrSytemSomeInstructions = 0x00000400u, // S_ATTR_SOME_INSTRUCTIONS
+ SectionAttrSytemHasExternalRelocations= 0x00000200u, // S_ATTR_EXT_RELOC
+ SectionAttrSytemHasLocalRelocations = 0x00000100u, // S_ATTR_LOC_RELOC
+
+ IndirectSymbolLocal = 0x80000000u, // INDIRECT_SYMBOL_LOCAL
+ IndirectSymbolAbsolute = 0x40000000u, // INDIRECT_SYMBOL_ABS
+
+ RebaseTypePointer = 1u, // REBASE_TYPE_POINTER
+ RebaseTypeTextAbsolute32 = 2u, // REBASE_TYPE_TEXT_ABSOLUTE32
+ RebaseTypeTextPCRelative32 = 3u, // REBASE_TYPE_TEXT_PCREL32
+
+ RebaseOpcodeMask = 0xF0u, // REBASE_OPCODE_MASK
+ RebaseImmediateMask = 0x0Fu, // REBASE_IMMEDIATE_MASK
+ RebaseOpcodeDone = 0x00u, // REBASE_OPCODE_DONE
+ RebaseOpcodeSetTypeImmediate = 0x10u, // REBASE_OPCODE_SET_TYPE_IMM
+ RebaseOpcodeSetSegmentAndOffsetULEB = 0x20u, // REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB
+ RebaseOpcodeAddAddressULEB = 0x30u, // REBASE_OPCODE_ADD_ADDR_ULEB
+ RebaseOpcodeAddAddressImmediateScaled = 0x40u, // REBASE_OPCODE_ADD_ADDR_IMM_SCALED
+ RebaseOpcodeDoRebaseImmediateTimes = 0x50u, // REBASE_OPCODE_DO_REBASE_IMM_TIMES
+ RebaseOpcodeDoRebaseULEBTimes = 0x60u, // REBASE_OPCODE_DO_REBASE_ULEB_TIMES
+ RebaseOpcodeDoRebaseAddAddressULEB = 0x70u, // REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
+ RebaseOpcodeDoRebaseULEBTimesSkippingULEB = 0x80u, // REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
+
+
+ BindTypePointer = 1u, // BIND_TYPE_POINTER
+ BindTypeTextAbsolute32 = 2u, // BIND_TYPE_TEXT_ABSOLUTE32
+ BindTypeTextPCRelative32 = 3u, // BIND_TYPE_TEXT_PCREL32
+
+ BindSpecialDylibSelf = 0u, // BIND_SPECIAL_DYLIB_SELF
+ BindSpecialDylibMainExecutable = -1u, // BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE
+ BindSpecialDylibFlatLookup = -2u, // BIND_SPECIAL_DYLIB_FLAT_LOOKUP
+
+ BindSymbolFlagsWeakImport = 0x1u, // BIND_SYMBOL_FLAGS_WEAK_IMPORT
+ BindSymbolFlagsNonWeakDefinition = 0x8u, // BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION
+
+ BindOpcodeMask = 0xF0u, // BIND_OPCODE_MASK
+ BindImmediateMask = 0x0Fu, // BIND_IMMEDIATE_MASK
+ BindOpcodeDone = 0x00u, // BIND_OPCODE_DONE
+ BindOpcodeSetDylibOrdinalImmediate = 0x10u, // BIND_OPCODE_SET_DYLIB_ORDINAL_IMM
+ BindOpcodeSetDylibOrdinalULEB = 0x20u, // BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB
+ BindOpcodeSetDylibSpecialImmediate = 0x30u, // BIND_OPCODE_SET_DYLIB_SPECIAL_IMM
+ BindOpcodeSetSymbolTrailingFlagsImmediate = 0x40u, // BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM
+ BindOpcodeSetTypeImmediate = 0x50u, // BIND_OPCODE_SET_TYPE_IMM
+ BindOpcodeSetAppendSLEB = 0x60u, // BIND_OPCODE_SET_ADDEND_SLEB
+ BindOpcodeSetSegmentAndOffsetULEB = 0x70u, // BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB
+ BindOpcodeAddAddressULEB = 0x80u, // BIND_OPCODE_ADD_ADDR_ULEB
+ BindOpcodeDoBind = 0x90u, // BIND_OPCODE_DO_BIND
+ BindOpcodeDoBindAddAddressULEB = 0xA0u, // BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB
+ BindOpcodeDoBindAddAddressImmediateScaled = 0xB0u, // BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED
+ BindOpcodeDoBindULEBTimesSkippingULEB = 0xC0u, // BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB
+
+ ExportSymbolFlagsKindMask = 0x03u, // EXPORT_SYMBOL_FLAGS_KIND_MASK
+ ExportSymbolFlagsKindRegular = 0x00u, // EXPORT_SYMBOL_FLAGS_KIND_REGULAR
+ ExportSymbolFlagsKindThreadLocal = 0x01u, // EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL
+ ExportSymbolFlagsWeakDefinition = 0x04u, // EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION
+ ExportSymbolFlagsIndirectDefinition = 0x08u, // EXPORT_SYMBOL_FLAGS_INDIRECT_DEFINITION
+ ExportSymbolFlagsHasSpecializations = 0x10u, // EXPORT_SYMBOL_FLAGS_HAS_SPECIALIZATIONS
+
+
+ // Constant masks for the "n_type" field in llvm::MachO::nlist and
+ // llvm::MachO::nlist_64
+ NlistMaskStab = 0xe0, // N_STAB
+ NlistMaskPrivateExternal = 0x10, // N_PEXT
+ NlistMaskType = 0x0e, // N_TYPE
+ NlistMaskExternal = 0x01, // N_EXT
+
+ // Constants for the "n_type & N_TYPE" llvm::MachO::nlist and
+ // llvm::MachO::nlist_64
+ NListTypeUndefined = 0x0u, // N_UNDF
+ NListTypeAbsolute = 0x2u, // N_ABS
+ NListTypeSection = 0xeu, // N_SECT
+ NListTypePreboundUndefined = 0xcu, // N_PBUD
+ NListTypeIndirect = 0xau, // N_INDR
+
+ // Constant masks for the "n_sect" field in llvm::MachO::nlist and
+ // llvm::MachO::nlist_64
+ NListSectionNoSection = 0u, // NO_SECT
+ NListSectionMaxSection = 0xffu, // MAX_SECT
+
+ // Constant values for the "n_type" field in llvm::MachO::nlist and
+ // llvm::MachO::nlist_64 when "(n_type & NlistMaskStab) != 0"
+ StabGlobalSymbol = 0x20u, // N_GSYM
+ StabFunctionName = 0x22u, // N_FNAME
+ StabFunction = 0x24u, // N_FUN
+ StabStaticSymbol = 0x26u, // N_STSYM
+ StabLocalCommon = 0x28u, // N_LCSYM
+ StabBeginSymbol = 0x2Eu, // N_BNSYM
+ StabSourceFileOptions = 0x3Cu, // N_OPT
+ StabRegisterSymbol = 0x40u, // N_RSYM
+ StabSourceLine = 0x44u, // N_SLINE
+ StabEndSymbol = 0x4Eu, // N_ENSYM
+ StabStructureType = 0x60u, // N_SSYM
+ StabSourceFileName = 0x64u, // N_SO
+ StabObjectFileName = 0x66u, // N_OSO
+ StabLocalSymbol = 0x80u, // N_LSYM
+ StabBeginIncludeFileName = 0x82u, // N_BINCL
+ StabIncludeFileName = 0x84u, // N_SOL
+ StabCompilerParameters = 0x86u, // N_PARAMS
+ StabCompilerVersion = 0x88u, // N_VERSION
+ StabCompilerOptLevel = 0x8Au, // N_OLEVEL
+ StabParameter = 0xA0u, // N_PSYM
+ StabEndIncludeFile = 0xA2u, // N_EINCL
+ StabAlternateEntry = 0xA4u, // N_ENTRY
+ StabLeftBracket = 0xC0u, // N_LBRAC
+ StabDeletedIncludeFile = 0xC2u, // N_EXCL
+ StabRightBracket = 0xE0u, // N_RBRAC
+ StabBeginCommon = 0xE2u, // N_BCOMM
+ StabEndCommon = 0xE4u, // N_ECOMM
+ StabEndCommonLocal = 0xE8u, // N_ECOML
+ StabLength = 0xFEu // N_LENG
+
+ };
+
+ // Structs from <mach-o/loader.h>
+
+ struct mach_header {
+ uint32_t magic;
+ uint32_t cputype;
+ uint32_t cpusubtype;
+ uint32_t filetype;
+ uint32_t ncmds;
+ uint32_t sizeofcmds;
+ uint32_t flags;
+ };
+
+ struct mach_header_64 {
+ uint32_t magic;
+ uint32_t cputype;
+ uint32_t cpusubtype;
+ uint32_t filetype;
+ uint32_t ncmds;
+ uint32_t sizeofcmds;
+ uint32_t flags;
+ uint32_t reserved;
+ };
+
+ struct load_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ };
+
+ struct segment_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ char segname[16];
+ uint32_t vmaddr;
+ uint32_t vmsize;
+ uint32_t fileoff;
+ uint32_t filesize;
+ uint32_t maxprot;
+ uint32_t initprot;
+ uint32_t nsects;
+ uint32_t flags;
+ };
+
+ struct segment_command_64 {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ char segname[16];
+ uint64_t vmaddr;
+ uint64_t vmsize;
+ uint64_t fileoff;
+ uint64_t filesize;
+ uint32_t maxprot;
+ uint32_t initprot;
+ uint32_t nsects;
+ uint32_t flags;
+ };
+
+ struct section {
+ char sectname[16];
+ char segname[16];
+ uint32_t addr;
+ uint32_t size;
+ uint32_t offset;
+ uint32_t align;
+ uint32_t reloff;
+ uint32_t nreloc;
+ uint32_t flags;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ };
+
+ struct section_64 {
+ char sectname[16];
+ char segname[16];
+ uint64_t addr;
+ uint64_t size;
+ uint32_t offset;
+ uint32_t align;
+ uint32_t reloff;
+ uint32_t nreloc;
+ uint32_t flags;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ };
+
+ struct fvmlib {
+ uint32_t name;
+ uint32_t minor_version;
+ uint32_t header_addr;
+ };
+
+ struct fvmlib_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ struct fvmlib fvmlib;
+ };
+
+ struct dylib {
+ uint32_t name;
+ uint32_t timestamp;
+ uint32_t current_version;
+ uint32_t compatibility_version;
+ };
+
+ struct dylib_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ struct dylib dylib;
+ };
+
+ struct sub_framework_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t umbrella;
+ };
+
+ struct sub_client_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t client;
+ };
+
+ struct sub_umbrella_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t sub_umbrella;
+ };
+
+ struct sub_library_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t sub_library;
+ };
+
+ struct prebound_dylib_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t name;
+ uint32_t nmodules;
+ uint32_t linked_modules;
+ };
+
+ struct dylinker_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t name;
+ };
+
+ struct thread_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ };
+
+ struct routines_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t init_address;
+ uint32_t init_module;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t reserved4;
+ uint32_t reserved5;
+ uint32_t reserved6;
+ };
+
+ struct routines_command_64 {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint64_t init_address;
+ uint64_t init_module;
+ uint64_t reserved1;
+ uint64_t reserved2;
+ uint64_t reserved3;
+ uint64_t reserved4;
+ uint64_t reserved5;
+ uint64_t reserved6;
+ };
+
+ struct symtab_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t symoff;
+ uint32_t nsyms;
+ uint32_t stroff;
+ uint32_t strsize;
+ };
+
+ struct dysymtab_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t ilocalsym;
+ uint32_t nlocalsym;
+ uint32_t iextdefsym;
+ uint32_t nextdefsym;
+ uint32_t iundefsym;
+ uint32_t nundefsym;
+ uint32_t tocoff;
+ uint32_t ntoc;
+ uint32_t modtaboff;
+ uint32_t nmodtab;
+ uint32_t extrefsymoff;
+ uint32_t nextrefsyms;
+ uint32_t indirectsymoff;
+ uint32_t nindirectsyms;
+ uint32_t extreloff;
+ uint32_t nextrel;
+ uint32_t locreloff;
+ uint32_t nlocrel;
+ };
+
+ struct dylib_table_of_contents {
+ uint32_t symbol_index;
+ uint32_t module_index;
+ };
+
+ struct dylib_module {
+ uint32_t module_name;
+ uint32_t iextdefsym;
+ uint32_t nextdefsym;
+ uint32_t irefsym;
+ uint32_t nrefsym;
+ uint32_t ilocalsym;
+ uint32_t nlocalsym;
+ uint32_t iextrel;
+ uint32_t nextrel;
+ uint32_t iinit_iterm;
+ uint32_t ninit_nterm;
+ uint32_t objc_module_info_addr;
+ uint32_t objc_module_info_size;
+ };
+
+ struct dylib_module_64 {
+ uint32_t module_name;
+ uint32_t iextdefsym;
+ uint32_t nextdefsym;
+ uint32_t irefsym;
+ uint32_t nrefsym;
+ uint32_t ilocalsym;
+ uint32_t nlocalsym;
+ uint32_t iextrel;
+ uint32_t nextrel;
+ uint32_t iinit_iterm;
+ uint32_t ninit_nterm;
+ uint32_t objc_module_info_size;
+ uint64_t objc_module_info_addr;
+ };
+
+ struct dylib_reference {
+ uint32_t isym:24,
+ flags:8;
+ };
+
+
+ struct twolevel_hints_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t offset;
+ uint32_t nhints;
+ };
+
+ struct twolevel_hint {
+ uint32_t isub_image:8,
+ itoc:24;
+ };
+
+ struct prebind_cksum_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t cksum;
+ };
+
+ struct uuid_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint8_t uuid[16];
+ };
+
+ struct rpath_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t path;
+ };
+
+ struct linkedit_data_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t dataoff;
+ uint32_t datasize;
+ };
+
+ struct encryption_info_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t cryptoff;
+ uint32_t cryptsize;
+ uint32_t cryptid;
+ };
+
+ struct dyld_info_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t rebase_off;
+ uint32_t rebase_size;
+ uint32_t bind_off;
+ uint32_t bind_size;
+ uint32_t weak_bind_off;
+ uint32_t weak_bind_size;
+ uint32_t lazy_bind_off;
+ uint32_t lazy_bind_size;
+ uint32_t export_off;
+ uint32_t export_size;
+ };
+
+ struct symseg_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t offset;
+ uint32_t size;
+ };
+
+ struct ident_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ };
+
+ struct fvmfile_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t name;
+ uint32_t header_addr;
+ };
+
+
+ // Structs from <mach-o/fat.h>
+ struct fat_header {
+ uint32_t magic;
+ uint32_t nfat_arch;
+ };
+
+ struct fat_arch {
+ uint32_t cputype;
+ uint32_t cpusubtype;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t align;
+ };
+
+ // Structs from <mach-o/fat.h>
+ struct nlist {
+ uint32_t n_strx;
+ uint8_t n_type;
+ uint8_t n_sect;
+ int16_t n_desc;
+ uint32_t n_value;
+ };
+
+ struct nlist_64 {
+ uint32_t n_strx;
+ uint8_t n_type;
+ uint8_t n_sect;
+ uint16_t n_desc;
+ uint64_t n_value;
+ };
+
+ // Get/Set functions from <mach-o/nlist.h>
+
+ static inline uint16_t GET_LIBRARY_ORDINAL(uint16_t n_desc)
+ {
+ return (((n_desc) >> 8u) & 0xffu);
+ }
+
+ static inline void SET_LIBRARY_ORDINAL(uint16_t &n_desc, uint8_t ordinal)
+ {
+ n_desc = (((n_desc) & 0x00ff) | (((ordinal) & 0xff) << 8));
+ }
+
+ static inline uint8_t GET_COMM_ALIGN (uint16_t n_desc)
+ {
+ return (n_desc >> 8u) & 0x0fu;
+ }
+
+ static inline void SET_COMM_ALIGN (uint16_t &n_desc, uint8_t align)
+ {
+ n_desc = ((n_desc & 0xf0ffu) | ((align & 0x0fu) << 8u));
+ }
+
// Enums from <mach/machine.h>
enum {
// Capability bits used in the definition of cpu_type.
diff --git a/libclamav/c++/llvm/include/llvm/Support/MathExtras.h b/libclamav/c++/llvm/include/llvm/Support/MathExtras.h
index fa12416..982813f 100644
--- a/libclamav/c++/llvm/include/llvm/Support/MathExtras.h
+++ b/libclamav/c++/llvm/include/llvm/Support/MathExtras.h
@@ -32,35 +32,43 @@ inline uint32_t Lo_32(uint64_t Value) {
return static_cast<uint32_t>(Value);
}
-/// is?Type - these functions produce optimal testing for integer data types.
-inline bool isInt8 (int64_t Value) {
- return static_cast<int8_t>(Value) == Value;
-}
-inline bool isUInt8 (int64_t Value) {
- return static_cast<uint8_t>(Value) == Value;
-}
-inline bool isInt16 (int64_t Value) {
- return static_cast<int16_t>(Value) == Value;
-}
-inline bool isUInt16(int64_t Value) {
- return static_cast<uint16_t>(Value) == Value;
-}
-inline bool isInt32 (int64_t Value) {
- return static_cast<int32_t>(Value) == Value;
-}
-inline bool isUInt32(int64_t Value) {
- return static_cast<uint32_t>(Value) == Value;
-}
-
+/// isInt - Checks if an integer fits into the given bit width.
template<unsigned N>
inline bool isInt(int64_t x) {
return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
}
+// Template specializations to get better code for common cases.
+template<>
+inline bool isInt<8>(int64_t x) {
+ return static_cast<int8_t>(x) == x;
+}
+template<>
+inline bool isInt<16>(int64_t x) {
+ return static_cast<int16_t>(x) == x;
+}
+template<>
+inline bool isInt<32>(int64_t x) {
+ return static_cast<int32_t>(x) == x;
+}
+/// isUInt - Checks if an unsigned integer fits into the given bit width.
template<unsigned N>
-inline bool isUint(uint64_t x) {
+inline bool isUInt(uint64_t x) {
return N >= 64 || x < (UINT64_C(1)<<N);
}
+// Template specializations to get better code for common cases.
+template<>
+inline bool isUInt<8>(uint64_t x) {
+ return static_cast<uint8_t>(x) == x;
+}
+template<>
+inline bool isUInt<16>(uint64_t x) {
+ return static_cast<uint16_t>(x) == x;
+}
+template<>
+inline bool isUInt<32>(uint64_t x) {
+ return static_cast<uint32_t>(x) == x;
+}
/// isMask_32 - This function returns true if the argument is a sequence of ones
/// starting at the least significant bit with the remainder zero (32 bit
@@ -118,7 +126,8 @@ inline uint16_t ByteSwap_16(uint16_t Value) {
/// ByteSwap_32 - This function returns a byte-swapped representation of the
/// 32-bit argument, Value.
inline uint32_t ByteSwap_32(uint32_t Value) {
-#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
+#if defined(__llvm__) || \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
return __builtin_bswap32(Value);
#elif defined(_MSC_VER) && !defined(_DEBUG)
return _byteswap_ulong(Value);
@@ -134,7 +143,8 @@ inline uint32_t ByteSwap_32(uint32_t Value) {
/// ByteSwap_64 - This function returns a byte-swapped representation of the
/// 64-bit argument, Value.
inline uint64_t ByteSwap_64(uint64_t Value) {
-#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
+#if defined(__llvm__) || \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) && !defined(__ICC)
return __builtin_bswap64(Value);
#elif defined(_MSC_VER) && !defined(_DEBUG)
return _byteswap_uint64(Value);
@@ -449,6 +459,18 @@ inline int64_t abs64(int64_t x) {
return (x < 0) ? -x : x;
}
+/// SignExtend32 - Sign extend B-bit number x to 32-bit int.
+/// Usage int32_t r = SignExtend32<5>(x);
+template <unsigned B> inline int32_t SignExtend32(uint32_t x) {
+ return int32_t(x << (32 - B)) >> (32 - B);
+}
+
+/// SignExtend64 - Sign extend B-bit number x to 64-bit int.
+/// Usage int64_t r = SignExtend64<5>(x);
+template <unsigned B> inline int64_t SignExtend64(uint64_t x) {
+ return int64_t(x << (64 - B)) >> (64 - B);
+}
+
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Support/MemoryBuffer.h b/libclamav/c++/llvm/include/llvm/Support/MemoryBuffer.h
index 65c7167..8a41aa5 100644
--- a/libclamav/c++/llvm/include/llvm/Support/MemoryBuffer.h
+++ b/libclamav/c++/llvm/include/llvm/Support/MemoryBuffer.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/System/DataTypes.h"
#include <string>
+#include <sys/stat.h>
namespace llvm {
@@ -25,17 +26,20 @@ namespace llvm {
/// into a memory buffer. In addition to basic access to the characters in the
/// file, this interface guarantees you can read one character past the end of
/// the file, and that this character will read as '\0'.
+///
+/// The '\0' guarantee is needed to support an optimization -- it's intended to
+/// be more efficient for clients which are reading all the data to stop
+/// reading when they encounter a '\0' than to continually check the file
+/// position to see if it has reached the end of the file.
class MemoryBuffer {
const char *BufferStart; // Start of the buffer.
const char *BufferEnd; // End of the buffer.
- /// MustDeleteBuffer - True if we allocated this buffer. If so, the
- /// destructor must know the delete[] it.
- bool MustDeleteBuffer;
+ MemoryBuffer(const MemoryBuffer &); // DO NOT IMPLEMENT
+ MemoryBuffer &operator=(const MemoryBuffer &); // DO NOT IMPLEMENT
protected:
- MemoryBuffer() : MustDeleteBuffer(false) {}
+ MemoryBuffer() {}
void init(const char *BufStart, const char *BufEnd);
- void initCopyOf(const char *BufStart, const char *BufEnd);
public:
virtual ~MemoryBuffer();
@@ -59,25 +63,29 @@ public:
/// it has the specified size.
static MemoryBuffer *getFile(StringRef Filename,
std::string *ErrStr = 0,
- int64_t FileSize = -1);
+ int64_t FileSize = -1,
+ struct stat *FileInfo = 0);
+ static MemoryBuffer *getFile(const char *Filename,
+ std::string *ErrStr = 0,
+ int64_t FileSize = -1,
+ struct stat *FileInfo = 0);
/// getMemBuffer - Open the specified memory range as a MemoryBuffer. Note
/// that EndPtr[0] must be a null byte and be accessible!
- static MemoryBuffer *getMemBuffer(const char *StartPtr, const char *EndPtr,
- const char *BufferName = "");
+ static MemoryBuffer *getMemBuffer(StringRef InputData,
+ StringRef BufferName = "");
/// getMemBufferCopy - Open the specified memory range as a MemoryBuffer,
/// copying the contents and taking ownership of it. This has no requirements
/// on EndPtr[0].
- static MemoryBuffer *getMemBufferCopy(const char *StartPtr,const char *EndPtr,
- const char *BufferName = "");
+ static MemoryBuffer *getMemBufferCopy(StringRef InputData,
+ StringRef BufferName = "");
/// getNewMemBuffer - Allocate a new MemoryBuffer of the specified size that
/// is completely initialized to zeros. Note that the caller should
/// initialize the memory allocated by this method. The memory is owned by
/// the MemoryBuffer object.
- static MemoryBuffer *getNewMemBuffer(size_t Size,
- const char *BufferName = "");
+ static MemoryBuffer *getNewMemBuffer(size_t Size, StringRef BufferName = "");
/// getNewUninitMemBuffer - Allocate a new MemoryBuffer of the specified size
/// that is not initialized. Note that the caller should initialize the
@@ -87,7 +95,8 @@ public:
StringRef BufferName = "");
/// getSTDIN - Read all of stdin into a file buffer, and return it.
- static MemoryBuffer *getSTDIN();
+ /// If an error occurs, this returns null and fills in *ErrStr with a reason.
+ static MemoryBuffer *getSTDIN(std::string *ErrStr = 0);
/// getFileOrSTDIN - Open the specified file as a MemoryBuffer, or open stdin
@@ -95,7 +104,12 @@ public:
/// in *ErrStr with a reason.
static MemoryBuffer *getFileOrSTDIN(StringRef Filename,
std::string *ErrStr = 0,
- int64_t FileSize = -1);
+ int64_t FileSize = -1,
+ struct stat *FileInfo = 0);
+ static MemoryBuffer *getFileOrSTDIN(const char *Filename,
+ std::string *ErrStr = 0,
+ int64_t FileSize = -1,
+ struct stat *FileInfo = 0);
};
} // end namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/Support/PassNameParser.h b/libclamav/c++/llvm/include/llvm/Support/PassNameParser.h
index cdca978..a24a6f0 100644
--- a/libclamav/c++/llvm/include/llvm/Support/PassNameParser.h
+++ b/libclamav/c++/llvm/include/llvm/Support/PassNameParser.h
@@ -23,11 +23,11 @@
#ifndef LLVM_SUPPORT_PASS_NAME_PARSER_H
#define LLVM_SUPPORT_PASS_NAME_PARSER_H
+#include "llvm/Pass.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Pass.h"
-#include <algorithm>
#include <cstring>
namespace llvm {
@@ -42,8 +42,7 @@ class PassNameParser : public PassRegistrationListener,
public:
PassNameParser() : Opt(0) {}
virtual ~PassNameParser();
-
-
+
void initialize(cl::Option &O) {
Opt = &O;
cl::parser<const PassInfo*>::initialize(O);
@@ -77,20 +76,21 @@ public:
}
virtual void passEnumerate(const PassInfo *P) { passRegistered(P); }
- // ValLessThan - Provide a sorting comparator for Values elements...
- typedef std::pair<const char*,
- std::pair<const PassInfo*, const char*> > ValType;
- static bool ValLessThan(const ValType &VT1, const ValType &VT2) {
- return std::string(VT1.first) < std::string(VT2.first);
- }
-
// printOptionInfo - Print out information about this option. Override the
// default implementation to sort the table before we print...
virtual void printOptionInfo(const cl::Option &O, size_t GlobalWidth) const {
PassNameParser *PNP = const_cast<PassNameParser*>(this);
- std::sort(PNP->Values.begin(), PNP->Values.end(), ValLessThan);
+ array_pod_sort(PNP->Values.begin(), PNP->Values.end(), ValLessThan);
cl::parser<const PassInfo*>::printOptionInfo(O, GlobalWidth);
}
+
+private:
+ // ValLessThan - Provide a sorting comparator for Values elements...
+ static int ValLessThan(const void *VT1, const void *VT2) {
+ typedef PassNameParser::OptionInfo ValType;
+ return std::strcmp(static_cast<const ValType *>(VT1)->Name,
+ static_cast<const ValType *>(VT2)->Name);
+ }
};
///===----------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/include/llvm/Support/PatternMatch.h b/libclamav/c++/llvm/include/llvm/Support/PatternMatch.h
index f02bc34..bee6768 100644
--- a/libclamav/c++/llvm/include/llvm/Support/PatternMatch.h
+++ b/libclamav/c++/llvm/include/llvm/Support/PatternMatch.h
@@ -453,6 +453,13 @@ struct CastClass_match {
}
};
+/// m_BitCast
+template<typename OpTy>
+inline CastClass_match<OpTy, Instruction::BitCast>
+m_BitCast(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::BitCast>(Op);
+}
+
/// m_PtrToInt
template<typename OpTy>
inline CastClass_match<OpTy, Instruction::PtrToInt>
diff --git a/libclamav/c++/llvm/include/llvm/Support/PrettyStackTrace.h b/libclamav/c++/llvm/include/llvm/Support/PrettyStackTrace.h
index 0db84e1..6dbce39 100644
--- a/libclamav/c++/llvm/include/llvm/Support/PrettyStackTrace.h
+++ b/libclamav/c++/llvm/include/llvm/Support/PrettyStackTrace.h
@@ -24,10 +24,10 @@ namespace llvm {
/// handlers which conflict with the ones installed by this module.
/// Defaults to false.
extern bool DisablePrettyStackTrace;
-
+
/// PrettyStackTraceEntry - This class is used to represent a frame of the
/// "pretty" stack trace that is dumped when a program crashes. You can define
- /// subclasses of this and declare them on the program stack: when they are
+ /// subclasses of this and declare them on the program stack: when they are
/// constructed and destructed, they will add their symbolic frames to a
/// virtual stack trace. This gets dumped out if the program crashes.
class PrettyStackTraceEntry {
@@ -37,14 +37,14 @@ namespace llvm {
public:
PrettyStackTraceEntry();
virtual ~PrettyStackTraceEntry();
-
+
/// print - Emit information about this stack frame to OS.
virtual void print(raw_ostream &OS) const = 0;
-
+
/// getNextEntry - Return the next entry in the list of frames.
const PrettyStackTraceEntry *getNextEntry() const { return NextEntry; }
};
-
+
/// PrettyStackTraceString - This object prints a specified string (which
/// should not contain newlines) to the stream as the stack trace when a crash
/// occurs.
@@ -54,7 +54,7 @@ namespace llvm {
PrettyStackTraceString(const char *str) : Str(str) {}
virtual void print(raw_ostream &OS) const;
};
-
+
/// PrettyStackTraceProgram - This object prints a specified program arguments
/// to the stream as the stack trace when a crash occurs.
class PrettyStackTraceProgram : public PrettyStackTraceEntry {
@@ -65,7 +65,7 @@ namespace llvm {
: ArgC(argc), ArgV(argv) {}
virtual void print(raw_ostream &OS) const;
};
-
+
} // end namespace llvm
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Support/RecyclingAllocator.h b/libclamav/c++/llvm/include/llvm/Support/RecyclingAllocator.h
index 609193f..34ab874 100644
--- a/libclamav/c++/llvm/include/llvm/Support/RecyclingAllocator.h
+++ b/libclamav/c++/llvm/include/llvm/Support/RecyclingAllocator.h
@@ -56,4 +56,18 @@ public:
}
+template<class AllocatorType, class T, size_t Size, size_t Align>
+inline void *operator new(size_t,
+ llvm::RecyclingAllocator<AllocatorType,
+ T, Size, Align> &Allocator) {
+ return Allocator.Allocate();
+}
+
+template<class AllocatorType, class T, size_t Size, size_t Align>
+inline void operator delete(void *E,
+ llvm::RecyclingAllocator<AllocatorType,
+ T, Size, Align> &A) {
+ A.Deallocate(E);
+}
+
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Support/Regex.h b/libclamav/c++/llvm/include/llvm/Support/Regex.h
index 591af00..b46a668 100644
--- a/libclamav/c++/llvm/include/llvm/Support/Regex.h
+++ b/libclamav/c++/llvm/include/llvm/Support/Regex.h
@@ -11,6 +11,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_REGEX_H
+#define LLVM_SUPPORT_REGEX_H
+
#include <string>
struct llvm_regex;
@@ -18,7 +21,7 @@ struct llvm_regex;
namespace llvm {
class StringRef;
template<typename T> class SmallVectorImpl;
-
+
class Regex {
public:
enum {
@@ -26,9 +29,9 @@ namespace llvm {
/// Compile for matching that ignores upper/lower case distinctions.
IgnoreCase=1,
/// Compile for newline-sensitive matching. With this flag '[^' bracket
- /// expressions and '.' never match newline. A ^ anchor matches the
- /// null string after any newline in the string in addition to its normal
- /// function, and the $ anchor matches the null string before any
+ /// expressions and '.' never match newline. A ^ anchor matches the
+ /// null string after any newline in the string in addition to its normal
+ /// function, and the $ anchor matches the null string before any
/// newline in the string in addition to its normal function.
Newline=2
};
@@ -36,7 +39,7 @@ namespace llvm {
/// Compiles the given POSIX Extended Regular Expression \arg Regex.
/// This implementation supports regexes and matching strings with embedded
/// NUL characters.
- Regex(const StringRef &Regex, unsigned Flags = NoFlags);
+ Regex(StringRef Regex, unsigned Flags = NoFlags);
~Regex();
/// isValid - returns the error encountered during regex compilation, or
@@ -47,7 +50,7 @@ namespace llvm {
/// matches it contains. The number filled in by match will include this
/// many entries plus one for the whole regex (as element 0).
unsigned getNumMatches() const;
-
+
/// matches - Match the regex against a given \arg String.
///
/// \param Matches - If given, on a succesful match this will be filled in
@@ -55,7 +58,7 @@ namespace llvm {
/// the first group is always the entire pattern.
///
/// This returns true on a successful match.
- bool match(const StringRef &String, SmallVectorImpl<StringRef> *Matches=0);
+ bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = 0);
/// sub - Return the result of replacing the first match of the regex in
/// \arg String with the \arg Repl string. Backreferences like "\0" in the
@@ -74,3 +77,5 @@ namespace llvm {
int error;
};
}
+
+#endif // LLVM_SUPPORT_REGEX_H
diff --git a/libclamav/c++/llvm/include/llvm/Support/Registry.h b/libclamav/c++/llvm/include/llvm/Support/Registry.h
index 4db8882..d0375be 100644
--- a/libclamav/c++/llvm/include/llvm/Support/Registry.h
+++ b/libclamav/c++/llvm/include/llvm/Support/Registry.h
@@ -203,6 +203,8 @@ namespace llvm {
};
+ // Since these are defined in a header file, plugins must be sure to export
+ // these symbols.
template <typename T, typename U>
typename Registry<T,U>::node *Registry<T,U>::Head;
diff --git a/libclamav/c++/llvm/include/llvm/Support/SlowOperationInformer.h b/libclamav/c++/llvm/include/llvm/Support/SlowOperationInformer.h
deleted file mode 100644
index 524049c..0000000
--- a/libclamav/c++/llvm/include/llvm/Support/SlowOperationInformer.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//===- llvm/Support/SlowOperationInformer.h - Keep user informed *- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines a simple object which can be used to let the user know what
-// is going on when a slow operation is happening, and gives them the ability to
-// cancel it. Potentially slow operations can stack allocate one of these
-// objects, and periodically call the "progress" method to update the progress
-// bar. If the operation takes more than 1 second to complete, the progress bar
-// is automatically shown and updated. As such, the slow operation should not
-// print stuff to the screen, and should not be confused if an extra line
-// appears on the screen (ie, the cursor should be at the start of the line).
-//
-// If the user presses CTRL-C during the operation, the next invocation of the
-// progress method return true indicating that the operation was cancelled.
-//
-// Because SlowOperationInformers fiddle around with signals, they cannot be
-// nested, and interact poorly with threads. The SIGALRM handler is set back to
-// SIGDFL, but the SIGINT signal handler is restored when the
-// SlowOperationInformer is destroyed.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_SUPPORT_SLOW_OPERATION_INFORMER_H
-#define LLVM_SUPPORT_SLOW_OPERATION_INFORMER_H
-
-#include <string>
-#include <cassert>
-#include "llvm/System/DataTypes.h"
-
-namespace llvm {
- class SlowOperationInformer {
- std::string OperationName;
- unsigned LastPrintAmount;
-
- SlowOperationInformer(const SlowOperationInformer&); // DO NOT IMPLEMENT
- void operator=(const SlowOperationInformer&); // DO NOT IMPLEMENT
- public:
- SlowOperationInformer(const std::string &Name);
- ~SlowOperationInformer();
-
- /// progress - Clients should periodically call this method when they can
- /// handle cancellation. The Amount variable should indicate how far
- /// along the operation is, given in 1/10ths of a percent (in other words,
- /// Amount should range from 0 to 1000). If the user cancels the operation,
- /// this returns true, false otherwise.
- bool progress(unsigned Amount);
-
- /// progress - Same as the method above, but this performs the division for
- /// you, and helps you avoid overflow if you are dealing with largish
- /// numbers.
- bool progress(unsigned Current, unsigned Maximum) {
- assert(Maximum != 0 &&
- "Shouldn't be doing work if there is nothing to do!");
- return progress(Current*uint64_t(1000UL)/Maximum);
- }
- };
-} // end namespace llvm
-
-#endif /* SLOW_OPERATION_INFORMER_H */
diff --git a/libclamav/c++/llvm/include/llvm/Support/SourceMgr.h b/libclamav/c++/llvm/include/llvm/Support/SourceMgr.h
index fd56b16..270ab2b 100644
--- a/libclamav/c++/llvm/include/llvm/Support/SourceMgr.h
+++ b/libclamav/c++/llvm/include/llvm/Support/SourceMgr.h
@@ -31,6 +31,13 @@ namespace llvm {
/// SourceMgr - This owns the files read by a parser, handles include stacks,
/// and handles diagnostic wrangling.
class SourceMgr {
+public:
+ /// DiagHandlerTy - Clients that want to handle their own diagnostics in a
+ /// custom way can register a function pointer+context as a diagnostic
+ /// handler. It gets called each time PrintMessage is invoked.
+ typedef void (*DiagHandlerTy)(const SMDiagnostic&, void *Context,
+ unsigned LocCookie);
+private:
struct SrcBuffer {
/// Buffer - The memory buffer for the file.
MemoryBuffer *Buffer;
@@ -51,16 +58,29 @@ class SourceMgr {
/// is really private to SourceMgr.cpp.
mutable void *LineNoCache;
+ DiagHandlerTy DiagHandler;
+ void *DiagContext;
+ unsigned DiagLocCookie;
+
SourceMgr(const SourceMgr&); // DO NOT IMPLEMENT
void operator=(const SourceMgr&); // DO NOT IMPLEMENT
public:
- SourceMgr() : LineNoCache(0) {}
+ SourceMgr() : LineNoCache(0), DiagHandler(0), DiagContext(0) {}
~SourceMgr();
void setIncludeDirs(const std::vector<std::string> &Dirs) {
IncludeDirectories = Dirs;
}
+ /// setDiagHandler - Specify a diagnostic handler to be invoked every time
+ /// PrintMessage is called. Ctx and Cookie are passed into the handler when
+ /// it is invoked.
+ void setDiagHandler(DiagHandlerTy DH, void *Ctx = 0, unsigned Cookie = 0) {
+ DiagHandler = DH;
+ DiagContext = Ctx;
+ DiagLocCookie = Cookie;
+ }
+
const SrcBuffer &getBufferInfo(unsigned i) const {
assert(i < Buffers.size() && "Invalid Buffer ID!");
return Buffers[i];
@@ -76,6 +96,8 @@ public:
return Buffers[i].IncludeLoc;
}
+ /// AddNewSourceBuffer - Add a new source buffer to this source manager. This
+ /// takes ownership of the memory buffer.
unsigned AddNewSourceBuffer(MemoryBuffer *F, SMLoc IncludeLoc) {
SrcBuffer NB;
NB.Buffer = F;
@@ -126,19 +148,39 @@ private:
/// SMDiagnostic - Instances of this class encapsulate one diagnostic report,
/// allowing printing to a raw_ostream as a caret diagnostic.
class SMDiagnostic {
+ const SourceMgr *SM;
+ SMLoc Loc;
std::string Filename;
int LineNo, ColumnNo;
std::string Message, LineContents;
unsigned ShowLine : 1;
public:
- SMDiagnostic() : LineNo(0), ColumnNo(0), ShowLine(0) {}
- SMDiagnostic(const std::string &FN, int Line, int Col,
+ // Null diagnostic.
+ SMDiagnostic() : SM(0), LineNo(0), ColumnNo(0), ShowLine(0) {}
+ // Diagnostic with no location (e.g. file not found, command line arg error).
+ SMDiagnostic(const std::string &filename, const std::string &Msg,
+ bool showline = true)
+ : SM(0), Filename(filename), LineNo(-1), ColumnNo(-1),
+ Message(Msg), ShowLine(showline) {}
+
+ // Diagnostic with a location.
+ SMDiagnostic(const SourceMgr &sm, SMLoc L, const std::string &FN,
+ int Line, int Col,
const std::string &Msg, const std::string &LineStr,
bool showline = true)
- : Filename(FN), LineNo(Line), ColumnNo(Col), Message(Msg),
+ : SM(&sm), Loc(L), Filename(FN), LineNo(Line), ColumnNo(Col), Message(Msg),
LineContents(LineStr), ShowLine(showline) {}
+ const SourceMgr *getSourceMgr() const { return SM; }
+ SMLoc getLoc() const { return Loc; }
+ const std::string &getFilename() { return Filename; }
+ int getLineNo() const { return LineNo; }
+ int getColumnNo() const { return ColumnNo; }
+ const std::string &getMessage() const { return Message; }
+ const std::string &getLineContents() const { return LineContents; }
+ bool getShowLine() const { return ShowLine; }
+
void Print(const char *ProgName, raw_ostream &S) const;
};
diff --git a/libclamav/c++/llvm/include/llvm/Support/StandardPasses.h b/libclamav/c++/llvm/include/llvm/Support/StandardPasses.h
index f233c18..bb3bddd 100644
--- a/libclamav/c++/llvm/include/llvm/Support/StandardPasses.h
+++ b/libclamav/c++/llvm/include/llvm/Support/StandardPasses.h
@@ -20,6 +20,7 @@
#define LLVM_SUPPORT_STANDARDPASSES_H
#include "llvm/PassManager.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Transforms/Scalar.h"
@@ -31,7 +32,7 @@ namespace llvm {
///
/// \arg OptimizationLevel - The optimization level, corresponding to -O0,
/// -O1, etc.
- static inline void createStandardFunctionPasses(FunctionPassManager *PM,
+ static inline void createStandardFunctionPasses(PassManagerBase *PM,
unsigned OptimizationLevel);
/// createStandardModulePasses - Add the standard list of module passes to the
@@ -46,7 +47,7 @@ namespace llvm {
/// \arg HaveExceptions - Whether the module may have code using exceptions.
/// \arg InliningPass - The inlining pass to use, if any, or null. This will
/// always be added, even at -O0.a
- static inline void createStandardModulePasses(PassManager *PM,
+ static inline void createStandardModulePasses(PassManagerBase *PM,
unsigned OptimizationLevel,
bool OptimizeSize,
bool UnitAtATime,
@@ -61,14 +62,14 @@ namespace llvm {
/// Internalize - Run the internalize pass.
/// RunInliner - Use a function inlining pass.
/// VerifyEach - Run the verifier after each pass.
- static inline void createStandardLTOPasses(PassManager *PM,
+ static inline void createStandardLTOPasses(PassManagerBase *PM,
bool Internalize,
bool RunInliner,
bool VerifyEach);
// Implementations
- static inline void createStandardFunctionPasses(FunctionPassManager *PM,
+ static inline void createStandardFunctionPasses(PassManagerBase *PM,
unsigned OptimizationLevel) {
if (OptimizationLevel > 0) {
PM->add(createCFGSimplificationPass());
@@ -82,7 +83,7 @@ namespace llvm {
/// createStandardModulePasses - Add the standard module passes. This is
/// expected to be run after the standard function passes.
- static inline void createStandardModulePasses(PassManager *PM,
+ static inline void createStandardModulePasses(PassManagerBase *PM,
unsigned OptimizationLevel,
bool OptimizeSize,
bool UnitAtATime,
@@ -116,7 +117,6 @@ namespace llvm {
PM->add(createArgumentPromotionPass()); // Scalarize uninlined fn args
// Start of function pass.
-
PM->add(createScalarReplAggregatesPass()); // Break up aggregate allocas
if (SimplifyLibCalls)
PM->add(createSimplifyLibCallsPass()); // Library Call Optimizations
@@ -146,6 +146,7 @@ namespace llvm {
// opened up by them.
PM->add(createInstructionCombiningPass());
PM->add(createJumpThreadingPass()); // Thread jumps
+ PM->add(createCorrelatedValuePropagationPass());
PM->add(createDeadStoreEliminationPass()); // Delete dead stores
PM->add(createAggressiveDCEPass()); // Delete dead instructions
PM->add(createCFGSimplificationPass()); // Merge & remove BBs
@@ -164,14 +165,14 @@ namespace llvm {
}
}
- static inline void addOnePass(PassManager *PM, Pass *P, bool AndVerify) {
+ static inline void addOnePass(PassManagerBase *PM, Pass *P, bool AndVerify) {
PM->add(P);
if (AndVerify)
PM->add(createVerifierPass());
}
- static inline void createStandardLTOPasses(PassManager *PM,
+ static inline void createStandardLTOPasses(PassManagerBase *PM,
bool Internalize,
bool RunInliner,
bool VerifyEach) {
diff --git a/libclamav/c++/llvm/include/llvm/Support/StringPool.h b/libclamav/c++/llvm/include/llvm/Support/StringPool.h
index 82e46d4..de05e0b 100644
--- a/libclamav/c++/llvm/include/llvm/Support/StringPool.h
+++ b/libclamav/c++/llvm/include/llvm/Support/StringPool.h
@@ -64,7 +64,7 @@ namespace llvm {
/// intern - Adds a string to the pool and returns a reference-counted
/// pointer to it. No additional memory is allocated if the string already
/// exists in the pool.
- PooledStringPtr intern(const StringRef &Str);
+ PooledStringPtr intern(StringRef Str);
/// empty - Checks whether the pool is empty. Returns true if so.
///
diff --git a/libclamav/c++/llvm/include/llvm/Support/SystemUtils.h b/libclamav/c++/llvm/include/llvm/Support/SystemUtils.h
index b3d83fc..3c182c1 100644
--- a/libclamav/c++/llvm/include/llvm/Support/SystemUtils.h
+++ b/libclamav/c++/llvm/include/llvm/Support/SystemUtils.h
@@ -21,10 +21,9 @@ namespace llvm {
class raw_ostream;
namespace sys { class Path; }
-/// Determine if the raw_ostream provided is connected to the outs() and
-/// displayed or not (to a console window). If so, generate a warning message
-/// advising against display of bitcode and return true. Otherwise just return
-/// false
+/// Determine if the raw_ostream provided is connected to a terminal. If so,
+/// generate a warning message to errs() advising against display of bitcode
+/// and return true. Otherwise just return false.
/// @brief Check for output written to a console
bool CheckBitcodeOutputToConsole(
raw_ostream &stream_to_check, ///< The stream to be checked
diff --git a/libclamav/c++/llvm/include/llvm/Support/Timer.h b/libclamav/c++/llvm/include/llvm/Support/Timer.h
index 8a0f55d..f959136 100644
--- a/libclamav/c++/llvm/include/llvm/Support/Timer.h
+++ b/libclamav/c++/llvm/include/llvm/Support/Timer.h
@@ -16,16 +16,63 @@
#define LLVM_SUPPORT_TIMER_H
#include "llvm/System/DataTypes.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
#include <string>
#include <vector>
-#include <cassert>
+#include <utility>
namespace llvm {
+class Timer;
class TimerGroup;
class raw_ostream;
+class TimeRecord {
+ double WallTime; // Wall clock time elapsed in seconds
+ double UserTime; // User time elapsed
+ double SystemTime; // System time elapsed
+ ssize_t MemUsed; // Memory allocated (in bytes)
+public:
+ TimeRecord() : WallTime(0), UserTime(0), SystemTime(0), MemUsed(0) {}
+
+ /// getCurrentTime - Get the current time and memory usage. If Start is true
+ /// we get the memory usage before the time, otherwise we get time before
+ /// memory usage. This matters if the time to get the memory usage is
+ /// significant and shouldn't be counted as part of a duration.
+ static TimeRecord getCurrentTime(bool Start = true);
+
+ double getProcessTime() const { return UserTime+SystemTime; }
+ double getUserTime() const { return UserTime; }
+ double getSystemTime() const { return SystemTime; }
+ double getWallTime() const { return WallTime; }
+ ssize_t getMemUsed() const { return MemUsed; }
+
+
+ // operator< - Allow sorting.
+ bool operator<(const TimeRecord &T) const {
+ // Sort by Wall Time elapsed, as it is the only thing really accurate
+ return WallTime < T.WallTime;
+ }
+
+ void operator+=(const TimeRecord &RHS) {
+ WallTime += RHS.WallTime;
+ UserTime += RHS.UserTime;
+ SystemTime += RHS.SystemTime;
+ MemUsed += RHS.MemUsed;
+ }
+ void operator-=(const TimeRecord &RHS) {
+ WallTime -= RHS.WallTime;
+ UserTime -= RHS.UserTime;
+ SystemTime -= RHS.SystemTime;
+ MemUsed -= RHS.MemUsed;
+ }
+
+ /// print - Print the current timer to standard error, and reset the "Started"
+ /// flag.
+ void print(const TimeRecord &Total, raw_ostream &OS) const;
+};
+
/// Timer - This class is used to track the amount of time spent between
/// invocations of its startTimer()/stopTimer() methods. Given appropriate OS
/// support it can also keep track of the RSS of the program at various points.
@@ -35,65 +82,32 @@ class raw_ostream;
/// if they are never started.
///
class Timer {
- double Elapsed; // Wall clock time elapsed in seconds
- double UserTime; // User time elapsed
- double SystemTime; // System time elapsed
- ssize_t MemUsed; // Memory allocated (in bytes)
- size_t PeakMem; // Peak memory used
- size_t PeakMemBase; // Temporary for peak calculation...
- std::string Name; // The name of this time variable
+ TimeRecord Time;
+ std::string Name; // The name of this time variable.
bool Started; // Has this time variable ever been started?
TimerGroup *TG; // The TimerGroup this Timer is in.
- mutable sys::SmartMutex<true> Lock; // Mutex for the contents of this Timer.
+
+ Timer **Prev, *Next; // Doubly linked list of timers in the group.
public:
- explicit Timer(const std::string &N);
- Timer(const std::string &N, TimerGroup &tg);
- Timer(const Timer &T);
- ~Timer();
-
- double getProcessTime() const { return UserTime+SystemTime; }
- double getWallTime() const { return Elapsed; }
- ssize_t getMemUsed() const { return MemUsed; }
- size_t getPeakMem() const { return PeakMem; }
- std::string getName() const { return Name; }
-
+ explicit Timer(StringRef N) : TG(0) { init(N); }
+ Timer(StringRef N, TimerGroup &tg) : TG(0) { init(N, tg); }
+ Timer(const Timer &RHS) : TG(0) {
+ assert(RHS.TG == 0 && "Can only copy uninitialized timers");
+ }
const Timer &operator=(const Timer &T) {
- if (&T < this) {
- T.Lock.acquire();
- Lock.acquire();
- } else {
- Lock.acquire();
- T.Lock.acquire();
- }
-
- Elapsed = T.Elapsed;
- UserTime = T.UserTime;
- SystemTime = T.SystemTime;
- MemUsed = T.MemUsed;
- PeakMem = T.PeakMem;
- PeakMemBase = T.PeakMemBase;
- Name = T.Name;
- Started = T.Started;
- assert(TG == T.TG && "Can only assign timers in the same TimerGroup!");
-
- if (&T < this) {
- T.Lock.release();
- Lock.release();
- } else {
- Lock.release();
- T.Lock.release();
- }
-
+ assert(TG == 0 && T.TG == 0 && "Can only assign uninit timers");
return *this;
}
+ ~Timer();
- // operator< - Allow sorting...
- bool operator<(const Timer &T) const {
- // Sort by Wall Time elapsed, as it is the only thing really accurate
- return Elapsed < T.Elapsed;
- }
- bool operator>(const Timer &T) const { return T.operator<(*this); }
-
+ // Create an uninitialized timer, client must use 'init'.
+ explicit Timer() : TG(0) {}
+ void init(StringRef N);
+ void init(StringRef N, TimerGroup &tg);
+
+ const std::string &getName() const { return Name; }
+ bool isInitialized() const { return TG != 0; }
+
/// startTimer - Start the timer running. Time between calls to
/// startTimer/stopTimer is counted by the Timer class. Note that these calls
/// must be correctly paired.
@@ -104,25 +118,8 @@ public:
///
void stopTimer();
- /// addPeakMemoryMeasurement - This method should be called whenever memory
- /// usage needs to be checked. It adds a peak memory measurement to the
- /// currently active timers, which will be printed when the timer group prints
- ///
- static void addPeakMemoryMeasurement();
-
- /// print - Print the current timer to standard error, and reset the "Started"
- /// flag.
- void print(const Timer &Total, raw_ostream &OS);
-
private:
friend class TimerGroup;
-
- // Copy ctor, initialize with no TG member.
- Timer(bool, const Timer &T);
-
- /// sum - Add the time accumulated in the specified timer into this timer.
- ///
- void sum(const Timer &T);
};
@@ -139,12 +136,10 @@ public:
T->startTimer();
}
explicit TimeRegion(Timer *t) : T(t) {
- if (T)
- T->startTimer();
+ if (T) T->startTimer();
}
~TimeRegion() {
- if (T)
- T->stopTimer();
+ if (T) T->stopTimer();
}
};
@@ -155,9 +150,10 @@ public:
/// is primarily used for debugging and for hunting performance problems.
///
struct NamedRegionTimer : public TimeRegion {
- explicit NamedRegionTimer(const std::string &Name);
- explicit NamedRegionTimer(const std::string &Name,
- const std::string &GroupName);
+ explicit NamedRegionTimer(StringRef Name,
+ bool Enabled = true);
+ explicit NamedRegionTimer(StringRef Name, StringRef GroupName,
+ bool Enabled = true);
};
@@ -168,20 +164,29 @@ struct NamedRegionTimer : public TimeRegion {
///
class TimerGroup {
std::string Name;
- unsigned NumTimers;
- std::vector<Timer> TimersToPrint;
+ Timer *FirstTimer; // First timer in the group.
+ std::vector<std::pair<TimeRecord, std::string> > TimersToPrint;
+
+ TimerGroup **Prev, *Next; // Doubly linked list of TimerGroup's.
+ TimerGroup(const TimerGroup &TG); // DO NOT IMPLEMENT
+ void operator=(const TimerGroup &TG); // DO NOT IMPLEMENT
public:
- explicit TimerGroup(const std::string &name) : Name(name), NumTimers(0) {}
- ~TimerGroup() {
- assert(NumTimers == 0 &&
- "TimerGroup destroyed before all contained timers!");
- }
+ explicit TimerGroup(StringRef name);
+ ~TimerGroup();
+
+ void setName(StringRef name) { Name.assign(name.begin(), name.end()); }
+ /// print - Print any started timers in this group and zero them.
+ void print(raw_ostream &OS);
+
+ /// printAll - This static method prints all timers and clears them all out.
+ static void printAll(raw_ostream &OS);
+
private:
friend class Timer;
- void addTimer();
- void removeTimer();
- void addTimerToPrint(const Timer &T);
+ void addTimer(Timer &T);
+ void removeTimer(Timer &T);
+ void PrintQueuedTimers(raw_ostream &OS);
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/Support/TypeBuilder.h b/libclamav/c++/llvm/include/llvm/Support/TypeBuilder.h
index 270ac52..81c2747 100644
--- a/libclamav/c++/llvm/include/llvm/Support/TypeBuilder.h
+++ b/libclamav/c++/llvm/include/llvm/Support/TypeBuilder.h
@@ -73,7 +73,7 @@ namespace llvm {
///
/// TypeBuilder cannot handle recursive types or types you only know at runtime.
/// If you try to give it a recursive type, it will deadlock, infinitely
-/// recurse, or throw a recursive_init exception.
+/// recurse, or do something similarly undesirable.
template<typename T, bool cross_compilable> class TypeBuilder {};
// Types for use with cross-compilable TypeBuilders. These correspond
diff --git a/libclamav/c++/llvm/include/llvm/Support/ValueHandle.h b/libclamav/c++/llvm/include/llvm/Support/ValueHandle.h
index 82c3cae..c0cdc35 100644
--- a/libclamav/c++/llvm/include/llvm/Support/ValueHandle.h
+++ b/libclamav/c++/llvm/include/llvm/Support/ValueHandle.h
@@ -284,8 +284,7 @@ class TrackingVH : public ValueHandleBase {
Value *VP = ValueHandleBase::getValPtr();
// Null is always ok.
- if (!VP)
- return;
+ if (!VP) return;
// Check that this value is valid (i.e., it hasn't been deleted). We
// explicitly delay this check until access to avoid requiring clients to be
@@ -302,7 +301,7 @@ class TrackingVH : public ValueHandleBase {
ValueTy *getValPtr() const {
CheckValidity();
- return static_cast<ValueTy*>(ValueHandleBase::getValPtr());
+ return (ValueTy*)ValueHandleBase::getValPtr();
}
void setValPtr(ValueTy *P) {
CheckValidity();
@@ -316,7 +315,7 @@ class TrackingVH : public ValueHandleBase {
public:
TrackingVH() : ValueHandleBase(Tracking) {}
- TrackingVH(ValueTy *P) : ValueHandleBase(Tracking, P) {}
+ TrackingVH(ValueTy *P) : ValueHandleBase(Tracking, GetAsValue(P)) {}
TrackingVH(const TrackingVH &RHS) : ValueHandleBase(Tracking, RHS) {}
operator ValueTy*() const {
diff --git a/libclamav/c++/llvm/include/llvm/Support/raw_ostream.h b/libclamav/c++/llvm/include/llvm/Support/raw_ostream.h
index 0f227cc..39bdbd8 100644
--- a/libclamav/c++/llvm/include/llvm/Support/raw_ostream.h
+++ b/libclamav/c++/llvm/include/llvm/Support/raw_ostream.h
@@ -58,10 +58,6 @@ private:
ExternalBuffer
} BufferMode;
- /// Error This flag is true if an error of any kind has been detected.
- ///
- bool Error;
-
public:
// color order matches ANSI escape sequence, don't change
enum Colors {
@@ -77,7 +73,7 @@ public:
};
explicit raw_ostream(bool unbuffered=false)
- : BufferMode(unbuffered ? Unbuffered : InternalBuffer), Error(false) {
+ : BufferMode(unbuffered ? Unbuffered : InternalBuffer) {
// Start out ready to flush.
OutBufStart = OutBufEnd = OutBufCur = 0;
}
@@ -87,20 +83,6 @@ public:
/// tell - Return the current offset with the file.
uint64_t tell() const { return current_pos() + GetNumBytesInBuffer(); }
- /// has_error - Return the value of the flag in this raw_ostream indicating
- /// whether an output error has been encountered.
- bool has_error() const {
- return Error;
- }
-
- /// clear_error - Set the flag read by has_error() to false. If the error
- /// flag is set at the time when this raw_ostream's destructor is called,
- /// llvm_report_error is called to report the error. Use clear_error()
- /// after handling the error to avoid this behavior.
- void clear_error() {
- Error = false;
- }
-
//===--------------------------------------------------------------------===//
// Configuration Interface
//===--------------------------------------------------------------------===//
@@ -233,8 +215,8 @@ public:
/// @param bold bold/brighter text, default false
/// @param bg if true change the background, default: change foreground
/// @returns itself so it can be used within << invocations
- virtual raw_ostream &changeColor(enum Colors, bool = false,
- bool = false) { return *this; }
+ virtual raw_ostream &changeColor(enum Colors, bool = false, bool = false) {
+ return *this; }
/// Resets the colors to terminal defaults. Call this when you are done
/// outputting colored text, or before program exit.
@@ -284,10 +266,6 @@ protected:
/// underlying output mechanism.
virtual size_t preferred_buffer_size() const;
- /// error_detected - Set the flag indicating that an output error has
- /// been encountered.
- void error_detected() { Error = true; }
-
/// getBufferStart - Return the beginning of the current stream buffer, or 0
/// if the stream is unbuffered.
const char *getBufferStart() const { return OutBufStart; }
@@ -318,6 +296,11 @@ private:
class raw_fd_ostream : public raw_ostream {
int FD;
bool ShouldClose;
+
+ /// Error This flag is true if an error of any kind has been detected.
+ ///
+ bool Error;
+
uint64_t pos;
/// write_impl - See raw_ostream::write_impl.
@@ -330,6 +313,10 @@ class raw_fd_ostream : public raw_ostream {
/// preferred_buffer_size - Determine an efficient buffer size.
virtual size_t preferred_buffer_size() const;
+ /// error_detected - Set the flag indicating that an output error has
+ /// been encountered.
+ void error_detected() { Error = true; }
+
public:
enum {
@@ -352,8 +339,11 @@ public:
/// be immediately destroyed; the string will be empty if no error occurred.
/// This allows optional flags to control how the file will be opened.
///
- /// \param Filename - The file to open. If this is "-" then the
- /// stream will use stdout instead.
+ /// As a special case, if Filename is "-", then the stream will use
+ /// STDOUT_FILENO instead of opening a file. Note that it will still consider
+ /// itself to own the file descriptor. In particular, it will close the
+ /// file descriptor when it is done (this is necessary to detect
+ /// output errors).
raw_fd_ostream(const char *Filename, std::string &ErrorInfo,
unsigned Flags = 0);
@@ -361,15 +351,17 @@ public:
/// ShouldClose is true, this closes the file when the stream is destroyed.
raw_fd_ostream(int fd, bool shouldClose,
bool unbuffered=false) : raw_ostream(unbuffered), FD(fd),
- ShouldClose(shouldClose) {}
+ ShouldClose(shouldClose),
+ Error(false) {}
~raw_fd_ostream();
/// close - Manually flush the stream and close the file.
+ /// Note that this does not call fsync.
void close();
/// seek - Flushes the stream and repositions the underlying file descriptor
- /// positition to the offset specified from the beginning of the file.
+ /// positition to the offset specified from the beginning of the file.
uint64_t seek(uint64_t off);
virtual raw_ostream &changeColor(enum Colors colors, bool bold=false,
@@ -377,24 +369,27 @@ public:
virtual raw_ostream &resetColor();
virtual bool is_displayed() const;
-};
-/// raw_stdout_ostream - This is a stream that always prints to stdout.
-///
-class raw_stdout_ostream : public raw_fd_ostream {
- // An out of line virtual method to provide a home for the class vtable.
- virtual void handle();
-public:
- raw_stdout_ostream();
-};
+ /// has_error - Return the value of the flag in this raw_fd_ostream indicating
+ /// whether an output error has been encountered.
+ /// This doesn't implicitly flush any pending output. Also, it doesn't
+ /// guarantee to detect all errors unless the the stream has been closed.
+ bool has_error() const {
+ return Error;
+ }
-/// raw_stderr_ostream - This is a stream that always prints to stderr.
-///
-class raw_stderr_ostream : public raw_fd_ostream {
- // An out of line virtual method to provide a home for the class vtable.
- virtual void handle();
-public:
- raw_stderr_ostream();
+ /// clear_error - Set the flag read by has_error() to false. If the error
+ /// flag is set at the time when this raw_ostream's destructor is called,
+ /// report_fatal_error is called to report the error. Use clear_error()
+ /// after handling the error to avoid this behavior.
+ ///
+ /// "Errors should never pass silently.
+ /// Unless explicitly silenced."
+ /// - from The Zen of Python, by Tim Peters
+ ///
+ void clear_error() {
+ Error = false;
+ }
};
/// outs() - This returns a reference to a raw_ostream for standard output.
@@ -460,7 +455,7 @@ public:
/// outside of the raw_svector_ostream's control. It is only safe to do this
/// if the raw_svector_ostream has previously been flushed.
void resync();
-
+
/// str - Flushes the stream contents to the target vector and return a
/// StringRef for the vector contents.
StringRef str();
@@ -480,6 +475,45 @@ public:
~raw_null_ostream();
};
+/// tool_output_file - This class contains a raw_fd_ostream and adds a
+/// few extra features commonly needed for compiler-like tool output files:
+/// - The file is automatically deleted if the process is killed.
+/// - The file is automatically deleted when the tool_output_file
+/// object is destroyed unless the client calls keep().
+class tool_output_file {
+ /// Installer - This class is declared before the raw_fd_ostream so that
+ /// it is constructed before the raw_fd_ostream is constructed and
+ /// destructed after the raw_fd_ostream is destructed. It installs
+ /// cleanups in its constructor and uninstalls them in its destructor.
+ class CleanupInstaller {
+ /// Filename - The name of the file.
+ std::string Filename;
+ public:
+ /// Keep - The flag which indicates whether we should not delete the file.
+ bool Keep;
+
+ explicit CleanupInstaller(const char *filename);
+ ~CleanupInstaller();
+ } Installer;
+
+ /// OS - The contained stream. This is intentionally declared after
+ /// Installer.
+ raw_fd_ostream OS;
+
+public:
+ /// tool_output_file - This constructor's arguments are passed to
+ /// to raw_fd_ostream's constructor.
+ tool_output_file(const char *filename, std::string &ErrorInfo,
+ unsigned Flags = 0);
+
+ /// os - Return the contained raw_fd_ostream.
+ raw_fd_ostream &os() { return OS; }
+
+ /// keep - Indicate that the tool's job wrt this output file has been
+ /// successful and the file should not be deleted.
+ void keep() { Installer.Keep = true; }
+};
+
} // end llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/SymbolTableListTraits.h b/libclamav/c++/llvm/include/llvm/SymbolTableListTraits.h
index 39953e1..91a4eb9 100644
--- a/libclamav/c++/llvm/include/llvm/SymbolTableListTraits.h
+++ b/libclamav/c++/llvm/include/llvm/SymbolTableListTraits.h
@@ -47,9 +47,8 @@ public:
/// of instructions, it returns the BasicBlock that owns them.
ItemParentClass *getListOwner() {
typedef iplist<ValueSubClass> ItemParentClass::*Sublist;
- Sublist Sub(ItemParentClass::
- getSublistAccess(static_cast<ValueSubClass*>(0)));
- size_t Offset(size_t(&((ItemParentClass*)0->*Sub)));
+ size_t Offset(size_t(&((ItemParentClass*)0->*ItemParentClass::
+ getSublistAccess(static_cast<ValueSubClass*>(0)))));
iplist<ValueSubClass>* Anchor(static_cast<iplist<ValueSubClass>*>(this));
return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)-
Offset);
diff --git a/libclamav/c++/llvm/include/llvm/System/DataTypes.h.cmake b/libclamav/c++/llvm/include/llvm/System/DataTypes.h.cmake
index d9ca273..9efe75a 100644
--- a/libclamav/c++/llvm/include/llvm/System/DataTypes.h.cmake
+++ b/libclamav/c++/llvm/include/llvm/System/DataTypes.h.cmake
@@ -109,41 +109,59 @@ typedef unsigned short uint16_t;
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed int ssize_t;
-#define INT8_MAX 127
-#define INT8_MIN -128
-#define UINT8_MAX 255
-#define INT16_MAX 32767
-#define INT16_MIN -32768
-#define UINT16_MAX 65535
-#define INT32_MAX 2147483647
-#define INT32_MIN -2147483648
-#define UINT32_MAX 4294967295U
+#ifndef INT8_MAX
+# define INT8_MAX 127
+#endif
+#ifndef INT8_MIN
+# define INT8_MIN -128
+#endif
+#ifndef UINT8_MAX
+# define UINT8_MAX 255
+#endif
+#ifndef INT16_MAX
+# define INT16_MAX 32767
+#endif
+#ifndef INT16_MIN
+# define INT16_MIN -32768
+#endif
+#ifndef UINT16_MAX
+# define UINT16_MAX 65535
+#endif
+#ifndef INT32_MAX
+# define INT32_MAX 2147483647
+#endif
+#ifndef INT32_MIN
+# define INT32_MIN -2147483648
+#endif
+#ifndef UINT32_MAX
+# define UINT32_MAX 4294967295U
+#endif
/* Certain compatibility updates to VC++ introduce the `cstdint'
* header, which defines the INT*_C macros. On default installs they
* are absent. */
#ifndef INT8_C
-# define INT8_C(C) C
+# define INT8_C(C) C##i8
#endif
#ifndef UINT8_C
-# define UINT8_C(C) C
+# define UINT8_C(C) C##ui8
#endif
#ifndef INT16_C
-# define INT16_C(C) C
+# define INT16_C(C) C##i16
#endif
#ifndef UINT16_C
-# define UINT16_C(C) C
+# define UINT16_C(C) C##ui16
#endif
#ifndef INT32_C
-# define INT32_C(C) C
+# define INT32_C(C) C##i32
#endif
#ifndef UINT32_C
-# define UINT32_C(C) C ## U
+# define UINT32_C(C) C##ui32
#endif
#ifndef INT64_C
-# define INT64_C(C) ((int64_t) C ## LL)
+# define INT64_C(C) C##i64
#endif
#ifndef UINT64_C
-# define UINT64_C(C) ((uint64_t) C ## ULL)
+# define UINT64_C(C) C##ui64
#endif
#endif /* _MSC_VER */
diff --git a/libclamav/c++/llvm/include/llvm/System/DataTypes.h.in b/libclamav/c++/llvm/include/llvm/System/DataTypes.h.in
index 1f8ce79..6537f30 100644
--- a/libclamav/c++/llvm/include/llvm/System/DataTypes.h.in
+++ b/libclamav/c++/llvm/include/llvm/System/DataTypes.h.in
@@ -15,7 +15,7 @@
|* [u]int(32|64)_t : typedefs for signed and unsigned 32/64 bit system types*|
|* [U]INT(8|16|32|64)_(MIN|MAX) : Constants for the min and max values. *|
|* *|
-|* No library is required when using these functinons. *|
+|* No library is required when using these functions. *|
|* *|
|*===----------------------------------------------------------------------===*/
diff --git a/libclamav/c++/llvm/include/llvm/System/Memory.h b/libclamav/c++/llvm/include/llvm/System/Memory.h
index 01bcab1..2dd36e8 100644
--- a/libclamav/c++/llvm/include/llvm/System/Memory.h
+++ b/libclamav/c++/llvm/include/llvm/System/Memory.h
@@ -63,7 +63,6 @@ namespace sys {
///
/// On success, this returns false, otherwise it returns true and fills
/// in *ErrMsg.
- /// @throws std::string if an error occurred.
/// @brief Release Read/Write/Execute memory.
static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg = 0);
diff --git a/libclamav/c++/llvm/include/llvm/System/Path.h b/libclamav/c++/llvm/include/llvm/System/Path.h
index d4af478..23b18d4 100644
--- a/libclamav/c++/llvm/include/llvm/System/Path.h
+++ b/libclamav/c++/llvm/include/llvm/System/Path.h
@@ -164,6 +164,7 @@ namespace sys {
/// GetMainExecutable - Return the path to the main executable, given the
/// value of argv[0] from program startup and the address of main itself.
+ /// In extremis, this function may fail and return an empty path.
static Path GetMainExecutable(const char *argv0, void *MainAddr);
/// This is one of the very few ways in which a path can be constructed
@@ -292,14 +293,6 @@ namespace sys {
/// @name Disk Accessors
/// @{
public:
- /// This function determines if the path name in this object references
- /// the root (top level directory) of the file system. The details of what
- /// is considered the "root" may vary from system to system so this method
- /// will do the necessary checking.
- /// @returns true iff the path name references the root directory.
- /// @brief Determines if the path references the root directory.
- bool isRootDirectory() const;
-
/// This function determines if the path name is absolute, as opposed to
/// relative.
/// @brief Determine if the path is absolute.
@@ -344,9 +337,9 @@ namespace sys {
/// native Dynamic Library (shared library, shared object) by looking at
/// the file's magic number. The Path object must reference a file, not a
/// directory.
- /// @return strue if the file starts with the magid number for a native
+ /// @returns true if the file starts with the magic number for a native
/// shared library.
- /// @brief Determine if the path reference a dynamic library.
+ /// @brief Determine if the path references a dynamic library.
bool isDynamicLibrary() const;
/// This function determines if the path name references an existing file
diff --git a/libclamav/c++/llvm/include/llvm/System/Process.h b/libclamav/c++/llvm/include/llvm/System/Process.h
index 010499a..41bcd69 100644
--- a/libclamav/c++/llvm/include/llvm/System/Process.h
+++ b/libclamav/c++/llvm/include/llvm/System/Process.h
@@ -30,7 +30,6 @@ namespace sys {
/// This static function will return the operating system's virtual memory
/// page size.
/// @returns The number of bytes in a virtual memory page.
- /// @throws nothing
/// @brief Get the virtual memory page size
static unsigned GetPageSize();
@@ -38,7 +37,6 @@ namespace sys {
/// by the process. This only counts the memory allocated via the malloc,
/// calloc and realloc functions and includes any "free" holes in the
/// allocated space.
- /// @throws nothing
/// @brief Return process memory usage.
static size_t GetMallocUsage();
diff --git a/libclamav/c++/llvm/include/llvm/System/Program.h b/libclamav/c++/llvm/include/llvm/System/Program.h
index 69ce478..7017305 100644
--- a/libclamav/c++/llvm/include/llvm/System/Program.h
+++ b/libclamav/c++/llvm/include/llvm/System/Program.h
@@ -116,7 +116,6 @@ namespace sys {
/// locations to search (e.g. the PATH on Unix).
/// @returns A Path object initialized to the path of the program or a
/// Path object that is empty (invalid) if the program could not be found.
- /// @throws nothing
/// @brief Construct a Program by finding it by name.
static Path FindProgramByName(const std::string& name);
@@ -129,7 +128,6 @@ namespace sys {
/// A convenience function equivalent to Program prg; prg.Execute(..);
/// prg.Wait(..);
- /// @throws nothing
/// @see Execute, Wait
static int ExecuteAndWait(const Path& path,
const char** args,
@@ -140,7 +138,6 @@ namespace sys {
std::string* ErrMsg = 0);
/// A convenience function equivalent to Program prg; prg.Execute(..);
- /// @throws nothing
/// @see Execute
static void ExecuteNoWait(const Path& path,
const char** args,
diff --git a/libclamav/c++/llvm/include/llvm/System/Signals.h b/libclamav/c++/llvm/include/llvm/System/Signals.h
index 2b9d8ca..7f1c87c 100644
--- a/libclamav/c++/llvm/include/llvm/System/Signals.h
+++ b/libclamav/c++/llvm/include/llvm/System/Signals.h
@@ -20,11 +20,19 @@
namespace llvm {
namespace sys {
+ /// This function runs all the registered interrupt handlers, including the
+ /// removal of files registered by RemoveFileOnSignal.
+ void RunInterruptHandlers();
+
/// This function registers signal handlers to ensure that if a signal gets
/// delivered that the named file is removed.
/// @brief Remove a file if a fatal signal occurs.
bool RemoveFileOnSignal(const Path &Filename, std::string* ErrMsg = 0);
+ /// This function removes a file from the list of files to be removed on
+ /// signal delivery.
+ void DontRemoveFileOnSignal(const Path &Filename);
+
/// When an error signal (such as SIBABRT or SIGSEGV) is delivered to the
/// process, print a stack trace and then exit.
/// @brief Print a stack trace if a fatal signal occurs.
diff --git a/libclamav/c++/llvm/include/llvm/System/ThreadLocal.h b/libclamav/c++/llvm/include/llvm/System/ThreadLocal.h
index 39b1e64..e6edd79 100644
--- a/libclamav/c++/llvm/include/llvm/System/ThreadLocal.h
+++ b/libclamav/c++/llvm/include/llvm/System/ThreadLocal.h
@@ -19,6 +19,8 @@
namespace llvm {
namespace sys {
+ // ThreadLocalImpl - Common base class of all ThreadLocal instantiations.
+ // YOU SHOULD NEVER USE THIS DIRECTLY.
class ThreadLocalImpl {
void* data;
public:
@@ -26,14 +28,25 @@ namespace llvm {
virtual ~ThreadLocalImpl();
void setInstance(const void* d);
const void* getInstance();
+ void removeInstance();
};
+ /// ThreadLocal - A class used to abstract thread-local storage. It holds,
+ /// for each thread, a pointer a single object of type T.
template<class T>
class ThreadLocal : public ThreadLocalImpl {
public:
ThreadLocal() : ThreadLocalImpl() { }
+
+ /// get - Fetches a pointer to the object associated with the current
+ /// thread. If no object has yet been associated, it returns NULL;
T* get() { return static_cast<T*>(getInstance()); }
+
+ // set - Associates a pointer to an object with the current thread.
void set(T* d) { setInstance(d); }
+
+ // erase - Removes the pointer associated with the current thread.
+ void erase() { removeInstance(); }
};
}
}
diff --git a/libclamav/c++/llvm/include/llvm/System/Valgrind.h b/libclamav/c++/llvm/include/llvm/System/Valgrind.h
new file mode 100644
index 0000000..5ec79c3
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/System/Valgrind.h
@@ -0,0 +1,32 @@
+//===- llvm/System/Valgrind.h - Communication with Valgrind -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Methods for communicating with a valgrind instance this program is running
+// under. These are all no-ops unless LLVM was configured on a system with the
+// valgrind headers installed and valgrind is controlling this process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SYSTEM_VALGRIND_H
+#define LLVM_SYSTEM_VALGRIND_H
+
+#include <stddef.h>
+
+namespace llvm {
+namespace sys {
+ // True if Valgrind is controlling this process.
+ bool RunningOnValgrind();
+
+ // Discard valgrind's translation of code in the range [Addr .. Addr + Len).
+ // Otherwise valgrind may continue to execute the old version of the code.
+ void ValgrindDiscardTranslations(const void *Addr, size_t Len);
+}
+}
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Target/Mangler.h b/libclamav/c++/llvm/include/llvm/Target/Mangler.h
index 45cbf9d..a9f3576 100644
--- a/libclamav/c++/llvm/include/llvm/Target/Mangler.h
+++ b/libclamav/c++/llvm/include/llvm/Target/Mangler.h
@@ -1,4 +1,4 @@
-//===-- llvm/Target/Mangler.h - Self-contained name mangler ----*- C++ -*-===//
+//===-- llvm/Target/Mangler.h - Self-contained name mangler -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,7 +23,9 @@ class Twine;
class Value;
class GlobalValue;
template <typename T> class SmallVectorImpl;
-class MCAsmInfo;
+class MCContext;
+class MCSymbol;
+class TargetData;
class Mangler {
public:
@@ -34,7 +36,8 @@ public:
};
private:
- const MCAsmInfo &MAI;
+ MCContext &Context;
+ const TargetData &TD;
/// AnonGlobalIDs - We need to give global values the same name every time
/// they are mangled. This keeps track of the number we give to anonymous
@@ -47,10 +50,14 @@ private:
unsigned NextAnonGlobalID;
public:
- // Mangler ctor - if a prefix is specified, it will be prepended onto all
- // symbols.
- Mangler(const MCAsmInfo &mai) : MAI(mai), NextAnonGlobalID(1) {}
+ Mangler(MCContext &context, const TargetData &td)
+ : Context(context), TD(td), NextAnonGlobalID(1) {}
+ /// getSymbol - Return the MCSymbol for the specified global value. This
+ /// symbol is the main label that is the address of the global.
+ MCSymbol *getSymbol(const GlobalValue *GV);
+
+
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
/// and the specified global variable's name. If the global variable doesn't
/// have a name, this fills in a unique name for the global.
diff --git a/libclamav/c++/llvm/include/llvm/Target/SubtargetFeature.h b/libclamav/c++/llvm/include/llvm/Target/SubtargetFeature.h
index 38a3cc2..4546871 100644
--- a/libclamav/c++/llvm/include/llvm/Target/SubtargetFeature.h
+++ b/libclamav/c++/llvm/include/llvm/Target/SubtargetFeature.h
@@ -108,9 +108,10 @@ public:
// Dump feature info.
void dump() const;
- /// Retrieve a formatted string of the default features for
- /// the specified target triple.
- static std::string getDefaultSubtargetFeatures(const Triple &Triple);
+ /// Retrieve a formatted string of the default features for the specified
+ /// target triple.
+ void getDefaultSubtargetFeatures(const std::string &CPU,
+ const Triple& Triple);
};
} // End namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/Target/Target.td b/libclamav/c++/llvm/include/llvm/Target/Target.td
index 0cffffb..b141a77 100644
--- a/libclamav/c++/llvm/include/llvm/Target/Target.td
+++ b/libclamav/c++/llvm/include/llvm/Target/Target.td
@@ -21,6 +21,11 @@ include "llvm/Intrinsics.td"
class RegisterClass; // Forward def
+// SubRegIndex - Use instances of SubRegIndex to identify subregisters.
+class SubRegIndex {
+ string Namespace = "";
+}
+
// Register - You should define one instance of this class for each register
// in the target machine. String n will become the "name" of the register.
class Register<string n> {
@@ -49,6 +54,23 @@ class Register<string n> {
// not [AX, AH, AL].
list<Register> SubRegs = [];
+ // SubRegIndices - For each register in SubRegs, specify the SubRegIndex used
+ // to address it. Sub-sub-register indices are automatically inherited from
+ // SubRegs.
+ list<SubRegIndex> SubRegIndices = [];
+
+ // CompositeIndices - Specify subreg indices that don't correspond directly to
+ // a register in SubRegs and are not inherited. The following formats are
+ // supported:
+ //
+ // (a) Identity - Reg:a == Reg
+ // (a b) Alias - Reg:a == Reg:b
+ // (a b,c) Composite - Reg:a == (Reg:b):c
+ //
+ // This can be used to disambiguate a sub-sub-register that exists in more
+ // than one subregister and other weird stuff.
+ list<dag> CompositeIndices = [];
+
// DwarfNumbers - Numbers used internally by gcc/gdb to identify the register.
// These values can be determined by locating the <target>.h file in the
// directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The
@@ -68,17 +90,6 @@ class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> {
let SubRegs = subregs;
}
-// SubRegSet - This can be used to define a specific mapping of registers to
-// indices, for use as named subregs of a particular physical register. Each
-// register in 'subregs' becomes an addressable subregister at index 'n' of the
-// corresponding register in 'regs'.
-class SubRegSet<int n, list<Register> regs, list<Register> subregs> {
- int index = n;
-
- list<Register> From = regs;
- list<Register> To = subregs;
-}
-
// RegisterClass - Now that all of the registers are defined, and aliases
// between registers are defined, specify which registers belong to which
// register classes. This also defines the default allocation order of
@@ -117,9 +128,9 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
//
list<Register> MemberList = regList;
- // SubClassList - Specify which register classes correspond to subregisters
- // of this class. The order should be by subregister set index.
- list<RegisterClass> SubRegClassList = [];
+ // SubRegClasses - Specify the register class of subregisters as a list of
+ // dags: (RegClass SubRegIndex, SubRegindex, ...)
+ list<dag> SubRegClasses = [];
// MethodProtos/MethodBodies - These members can be used to insert arbitrary
// code into a generated register class. The normal usage of this is to
@@ -132,8 +143,8 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
//===----------------------------------------------------------------------===//
// DwarfRegNum - This class provides a mapping of the llvm register enumeration
// to the register numbering used by gcc and gdb. These values are used by a
-// debug information writer (ex. DwarfWriter) to describe where values may be
-// located during execution.
+// debug information writer to describe where values may be located during
+// execution.
class DwarfRegNum<list<int> Numbers> {
// DwarfNumbers - Numbers used internally by gcc/gdb to identify the register.
// These values can be determined by locating the <target>.h file in the
@@ -187,12 +198,12 @@ class Instruction {
bit isReturn = 0; // Is this instruction a return instruction?
bit isBranch = 0; // Is this instruction a branch instruction?
bit isIndirectBranch = 0; // Is this instruction an indirect branch?
+ bit isCompare = 0; // Is this instruction a comparison instruction?
bit isBarrier = 0; // Can control flow fall through this instruction?
bit isCall = 0; // Is this instruction a call instruction?
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
bit mayLoad = 0; // Is it possible for this inst to read memory?
bit mayStore = 0; // Is it possible for this inst to write memory?
- bit isTwoAddress = 0; // Is this a two address instruction?
bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote?
bit isCommutable = 0; // Is this 3 operand instruction commutable?
bit isTerminator = 0; // Is this part of the terminator for a basic block?
@@ -221,13 +232,19 @@ class Instruction {
// purposes.
bit isCodeGenOnly = 0;
+ // Is this instruction a pseudo instruction for use by the assembler parser.
+ bit isAsmParserOnly = 0;
+
InstrItinClass Itinerary = NoItinerary;// Execution steps used for scheduling.
string Constraints = ""; // OperandConstraint, e.g. $src = $dst.
-
+
/// DisableEncoding - List of operand names (e.g. "$op1,$op2") that should not
/// be encoded into the output machineinstr.
string DisableEncoding = "";
+
+ /// Target-specific flags. This becomes the TSFlags field in TargetInstrDesc.
+ bits<64> TSFlags = 0;
}
/// Predicates - These are extra conditionals which are turned into instruction
@@ -293,8 +310,8 @@ class AsmOperandClass {
/// The name to use for this class, which should be usable as an enum value.
string Name = ?;
- /// The super class of this operand.
- AsmOperandClass SuperClass = ?;
+ /// The super classes of this operand.
+ list<AsmOperandClass> SuperClasses = [];
/// The name of the method on the target specific operand to call to test
/// whether the operand is an instance of this class. If not set, this will
@@ -328,10 +345,10 @@ class Operand<ValueType ty> {
// in. Match classes are used to define the order in which instructions are
// match, to ensure that which instructions gets matched is deterministic.
//
- // The target specific parser must be able to classify an parsed operand
- // into a unique class, which does not partially overlap with any other
- // classes. It can match a subset of some other class, in which case
- // ParserMatchSuperClass should be set to the name of that class.
+ // The target specific parser must be able to classify an parsed operand into
+ // a unique class, which does not partially overlap with any other classes. It
+ // can match a subset of some other class, in which case the AsmOperandClass
+ // should declare the other operand as one of its super classes.
AsmOperandClass ParserMatchClass = ImmAsmOperand;
}
@@ -372,13 +389,6 @@ class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops>
// which are global to the target machine.
//
class InstrInfo {
- // If the target wants to associate some target-specific information with each
- // instruction, it should provide these two lists to indicate how to assemble
- // the target specific information into the 32 bits available.
- //
- list<string> TSFlagsFields = [];
- list<int> TSFlagsShifts = [];
-
// Target can specify its instructions in either big or little-endian formats.
// For instance, while both Sparc and PowerPC are big-endian platforms, the
// Sparc manual specifies its instructions in the format [31..0] (big), while
@@ -387,103 +397,107 @@ class InstrInfo {
}
// Standard Pseudo Instructions.
-let isCodeGenOnly = 1 in {
+// This list must match TargetOpcodes.h and CodeGenTarget.cpp.
+// Only these instructions are allowed in the TargetOpcode namespace.
+let isCodeGenOnly = 1, Namespace = "TargetOpcode" in {
def PHI : Instruction {
- let OutOperandList = (ops);
- let InOperandList = (ops variable_ops);
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
let AsmString = "PHINODE";
- let Namespace = "TargetOpcode";
}
def INLINEASM : Instruction {
- let OutOperandList = (ops);
- let InOperandList = (ops variable_ops);
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
let AsmString = "";
- let Namespace = "TargetOpcode";
}
-def DBG_LABEL : Instruction {
- let OutOperandList = (ops);
- let InOperandList = (ops i32imm:$id);
+def PROLOG_LABEL : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
let AsmString = "";
- let Namespace = "TargetOpcode";
let hasCtrlDep = 1;
let isNotDuplicable = 1;
}
def EH_LABEL : Instruction {
- let OutOperandList = (ops);
- let InOperandList = (ops i32imm:$id);
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
let AsmString = "";
- let Namespace = "TargetOpcode";
let hasCtrlDep = 1;
let isNotDuplicable = 1;
}
def GC_LABEL : Instruction {
- let OutOperandList = (ops);
- let InOperandList = (ops i32imm:$id);
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
let AsmString = "";
- let Namespace = "TargetOpcode";
let hasCtrlDep = 1;
let isNotDuplicable = 1;
}
def KILL : Instruction {
- let OutOperandList = (ops);
- let InOperandList = (ops variable_ops);
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
}
def EXTRACT_SUBREG : Instruction {
- let OutOperandList = (ops unknown:$dst);
- let InOperandList = (ops unknown:$supersrc, i32imm:$subidx);
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$supersrc, i32imm:$subidx);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
}
def INSERT_SUBREG : Instruction {
- let OutOperandList = (ops unknown:$dst);
- let InOperandList = (ops unknown:$supersrc, unknown:$subsrc, i32imm:$subidx);
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$supersrc, unknown:$subsrc, i32imm:$subidx);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
let Constraints = "$supersrc = $dst";
}
def IMPLICIT_DEF : Instruction {
- let OutOperandList = (ops unknown:$dst);
- let InOperandList = (ops);
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
}
def SUBREG_TO_REG : Instruction {
- let OutOperandList = (ops unknown:$dst);
- let InOperandList = (ops unknown:$implsrc, unknown:$subsrc, i32imm:$subidx);
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$implsrc, unknown:$subsrc, i32imm:$subidx);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
}
def COPY_TO_REGCLASS : Instruction {
- let OutOperandList = (ops unknown:$dst);
- let InOperandList = (ops unknown:$src, i32imm:$regclass);
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$src, i32imm:$regclass);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
let isAsCheapAsAMove = 1;
}
def DBG_VALUE : Instruction {
- let OutOperandList = (ops);
- let InOperandList = (ops variable_ops);
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
let AsmString = "DBG_VALUE";
- let Namespace = "TargetOpcode";
+ let isAsCheapAsAMove = 1;
+}
+def REG_SEQUENCE : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins variable_ops);
+ let AsmString = "";
+ let neverHasSideEffects = 1;
+ let isAsCheapAsAMove = 1;
+}
+def COPY : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$src);
+ let AsmString = "";
+ let neverHasSideEffects = 1;
let isAsCheapAsAMove = 1;
}
}
//===----------------------------------------------------------------------===//
-// AsmParser - This class can be implemented by targets that wish to implement
+// AsmParser - This class can be implemented by targets that wish to implement
// .s file parsing.
//
-// Subtargets can have multiple different assembly parsers (e.g. AT&T vs Intel
+// Subtargets can have multiple different assembly parsers (e.g. AT&T vs Intel
// syntax on X86 for example).
//
class AsmParser {
@@ -491,9 +505,14 @@ class AsmParser {
// class. Generated AsmParser classes are always prefixed with the target
// name.
string AsmParserClassName = "AsmParser";
-
+
+ // AsmParserInstCleanup - If non-empty, this is the name of a custom function on the
+ // AsmParser class to call on every matched instruction. This can be used to
+ // perform target specific instruction post-processing.
+ string AsmParserInstCleanup = "";
+
// Variant - AsmParsers can be of multiple different variants. Variants are
- // used to support targets that need to parser multiple formats for the
+ // used to support targets that need to parser multiple formats for the
// assembly language.
int Variant = 0;
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetAsmBackend.h b/libclamav/c++/llvm/include/llvm/Target/TargetAsmBackend.h
index dfdabdb..979595a 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetAsmBackend.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetAsmBackend.h
@@ -10,8 +10,18 @@
#ifndef LLVM_TARGET_TARGETASMBACKEND_H
#define LLVM_TARGET_TARGETASMBACKEND_H
+#include "llvm/System/DataTypes.h"
+
namespace llvm {
+class MCDataFragment;
+class MCFixup;
+class MCInst;
+class MCObjectWriter;
+class MCSection;
+template<typename T>
+class SmallVectorImpl;
class Target;
+class raw_ostream;
/// TargetAsmBackend - Generic interface to target specific assembler backends.
class TargetAsmBackend {
@@ -23,11 +33,100 @@ protected: // Can only create subclasses.
/// TheTarget - The Target that this machine was created for.
const Target &TheTarget;
+ unsigned HasAbsolutizedSet : 1;
+ unsigned HasReliableSymbolDifference : 1;
+ unsigned HasScatteredSymbols : 1;
+
public:
virtual ~TargetAsmBackend();
const Target &getTarget() const { return TheTarget; }
+ /// createObjectWriter - Create a new MCObjectWriter instance for use by the
+ /// assembler backend to emit the final object file.
+ virtual MCObjectWriter *createObjectWriter(raw_ostream &OS) const = 0;
+
+ /// hasAbsolutizedSet - Check whether this target "absolutizes"
+ /// assignments. That is, given code like:
+ /// a:
+ /// ...
+ /// b:
+ /// tmp = a - b
+ /// .long tmp
+ /// will the value of 'tmp' be a relocatable expression, or the assembly time
+ /// value of L0 - L1. This distinction is only relevant for platforms that
+ /// support scattered symbols, since in the absence of scattered symbols (a -
+ /// b) cannot change after assembly.
+ bool hasAbsolutizedSet() const { return HasAbsolutizedSet; }
+
+ /// hasReliableSymbolDifference - Check whether this target implements
+ /// accurate relocations for differences between symbols. If not, differences
+ /// between symbols will always be relocatable expressions and any references
+ /// to temporary symbols will be assumed to be in the same atom, unless they
+ /// reside in a different section.
+ ///
+ /// This should always be true (since it results in fewer relocations with no
+ /// loss of functionality), but is currently supported as a way to maintain
+ /// exact object compatibility with Darwin 'as' (on non-x86_64). It should
+ /// eventually should be eliminated. See also \see hasAbsolutizedSet.
+ bool hasReliableSymbolDifference() const {
+ return HasReliableSymbolDifference;
+ }
+
+ /// hasScatteredSymbols - Check whether this target supports scattered
+ /// symbols. If so, the assembler should assume that atoms can be scattered by
+ /// the linker. In particular, this means that the offsets between symbols
+ /// which are in distinct atoms is not known at link time, and the assembler
+ /// must generate fixups and relocations appropriately.
+ ///
+ /// Note that the assembler currently does not reason about atoms, instead it
+ /// assumes all temporary symbols reside in the "current atom".
+ bool hasScatteredSymbols() const { return HasScatteredSymbols; }
+
+ /// doesSectionRequireSymbols - Check whether the given section requires that
+ /// all symbols (even temporaries) have symbol table entries.
+ virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
+ return false;
+ }
+
+ /// isSectionAtomizable - Check whether the given section can be split into
+ /// atoms.
+ ///
+ /// \see MCAssembler::isSymbolLinkerVisible().
+ virtual bool isSectionAtomizable(const MCSection &Section) const {
+ return true;
+ }
+
+ /// isVirtualSection - Check whether the given section is "virtual", that is
+ /// has no actual object file contents.
+ virtual bool isVirtualSection(const MCSection &Section) const = 0;
+
+ /// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided
+ /// data fragment, at the offset specified by the fixup and following the
+ /// fixup kind as appropriate.
+ virtual void ApplyFixup(const MCFixup &Fixup, MCDataFragment &Fragment,
+ uint64_t Value) const = 0;
+
+ /// MayNeedRelaxation - Check whether the given instruction may need
+ /// relaxation.
+ ///
+ /// \param Inst - The instruction to test.
+ virtual bool MayNeedRelaxation(const MCInst &Inst) const = 0;
+
+ /// RelaxInstruction - Relax the instruction in the given fragment to the next
+ /// wider instruction.
+ ///
+ /// \param Inst - The instruction to relax, which may be the same as the
+ /// output.
+ /// \parm Res [output] - On return, the relaxed instruction.
+ virtual void RelaxInstruction(const MCInst &Inst, MCInst &Res) const = 0;
+
+ /// WriteNopData - Write an (optimal) nop sequence of Count bytes to the given
+ /// output. If the target cannot generate such a sequence, it should return an
+ /// error.
+ ///
+ /// \return - True on success.
+ virtual bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const = 0;
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetAsmParser.h b/libclamav/c++/llvm/include/llvm/Target/TargetAsmParser.h
index 85315c1..5830d1f 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetAsmParser.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetAsmParser.h
@@ -10,6 +10,8 @@
#ifndef LLVM_TARGET_TARGETPARSER_H
#define LLVM_TARGET_TARGETPARSER_H
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+
namespace llvm {
class MCInst;
class StringRef;
@@ -20,20 +22,26 @@ class MCParsedAsmOperand;
template <typename T> class SmallVectorImpl;
/// TargetAsmParser - Generic interface to target specific assembly parsers.
-class TargetAsmParser {
+class TargetAsmParser : public MCAsmParserExtension {
TargetAsmParser(const TargetAsmParser &); // DO NOT IMPLEMENT
void operator=(const TargetAsmParser &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
TargetAsmParser(const Target &);
- /// TheTarget - The Target that this machine was created for.
+ /// The Target that this machine was created for.
const Target &TheTarget;
+ /// The current set of available features.
+ unsigned AvailableFeatures;
+
public:
virtual ~TargetAsmParser();
const Target &getTarget() const { return TheTarget; }
+ unsigned getAvailableFeatures() const { return AvailableFeatures; }
+ void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; }
+
/// ParseInstruction - Parse one assembly instruction.
///
/// The parser is positioned following the instruction name. The target
@@ -47,7 +55,7 @@ public:
/// \param Operands [out] - The list of parsed operands, this returns
/// ownership of them to the caller.
/// \return True on failure.
- virtual bool ParseInstruction(const StringRef &Name, SMLoc NameLoc,
+ virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) = 0;
/// ParseDirective - Parse a target specific assembler directive
@@ -65,8 +73,12 @@ public:
/// MatchInstruction - Recognize a series of operands of a parsed instruction
/// as an actual MCInst. This returns false and fills in Inst on success and
/// returns true on failure to match.
+ ///
+ /// On failure, the target parser is responsible for emitting a diagnostic
+ /// explaining the match failure.
virtual bool
- MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MatchInstruction(SMLoc IDLoc,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
MCInst &Inst) = 0;
};
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetCallingConv.h b/libclamav/c++/llvm/include/llvm/Target/TargetCallingConv.h
new file mode 100644
index 0000000..f368a2e
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetCallingConv.h
@@ -0,0 +1,142 @@
+//===-- llvm/Target/TargetCallingConv.h - Calling Convention ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines types for working with calling-convention information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETCALLINGCONV_H
+#define LLVM_TARGET_TARGETCALLINGCONV_H
+
+namespace llvm {
+
+namespace ISD {
+ struct ArgFlagsTy {
+ private:
+ static const uint64_t NoFlagSet = 0ULL;
+ static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
+ static const uint64_t ZExtOffs = 0;
+ static const uint64_t SExt = 1ULL<<1; ///< Sign extended
+ static const uint64_t SExtOffs = 1;
+ static const uint64_t InReg = 1ULL<<2; ///< Passed in register
+ static const uint64_t InRegOffs = 2;
+ static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
+ static const uint64_t SRetOffs = 3;
+ static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
+ static const uint64_t ByValOffs = 4;
+ static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
+ static const uint64_t NestOffs = 5;
+ static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment
+ static const uint64_t ByValAlignOffs = 6;
+ static const uint64_t Split = 1ULL << 10;
+ static const uint64_t SplitOffs = 10;
+ static const uint64_t OrigAlign = 0x1FULL<<27;
+ static const uint64_t OrigAlignOffs = 27;
+ static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size
+ static const uint64_t ByValSizeOffs = 32;
+
+ static const uint64_t One = 1ULL; //< 1 of this type, for shifts
+
+ uint64_t Flags;
+ public:
+ ArgFlagsTy() : Flags(0) { }
+
+ bool isZExt() const { return Flags & ZExt; }
+ void setZExt() { Flags |= One << ZExtOffs; }
+
+ bool isSExt() const { return Flags & SExt; }
+ void setSExt() { Flags |= One << SExtOffs; }
+
+ bool isInReg() const { return Flags & InReg; }
+ void setInReg() { Flags |= One << InRegOffs; }
+
+ bool isSRet() const { return Flags & SRet; }
+ void setSRet() { Flags |= One << SRetOffs; }
+
+ bool isByVal() const { return Flags & ByVal; }
+ void setByVal() { Flags |= One << ByValOffs; }
+
+ bool isNest() const { return Flags & Nest; }
+ void setNest() { Flags |= One << NestOffs; }
+
+ unsigned getByValAlign() const {
+ return (unsigned)
+ ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
+ }
+ void setByValAlign(unsigned A) {
+ Flags = (Flags & ~ByValAlign) |
+ (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
+ }
+
+ bool isSplit() const { return Flags & Split; }
+ void setSplit() { Flags |= One << SplitOffs; }
+
+ unsigned getOrigAlign() const {
+ return (unsigned)
+ ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
+ }
+ void setOrigAlign(unsigned A) {
+ Flags = (Flags & ~OrigAlign) |
+ (uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
+ }
+
+ unsigned getByValSize() const {
+ return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
+ }
+ void setByValSize(unsigned S) {
+ Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
+ }
+
+ /// getArgFlagsString - Returns the flags as a string, eg: "zext align:4".
+ std::string getArgFlagsString();
+
+ /// getRawBits - Represent the flags as a bunch of bits.
+ uint64_t getRawBits() const { return Flags; }
+ };
+
+ /// InputArg - This struct carries flags and type information about a
+ /// single incoming (formal) argument or incoming (from the perspective
+ /// of the caller) return value virtual register.
+ ///
+ struct InputArg {
+ ArgFlagsTy Flags;
+ EVT VT;
+ bool Used;
+
+ InputArg() : VT(MVT::Other), Used(false) {}
+ InputArg(ArgFlagsTy flags, EVT vt, bool used)
+ : Flags(flags), VT(vt), Used(used) {
+ assert(VT.isSimple() &&
+ "InputArg value type must be Simple!");
+ }
+ };
+
+ /// OutputArg - This struct carries flags and a value for a
+ /// single outgoing (actual) argument or outgoing (from the perspective
+ /// of the caller) return value virtual register.
+ ///
+ struct OutputArg {
+ ArgFlagsTy Flags;
+ EVT VT;
+
+ /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
+ bool IsFixed;
+
+ OutputArg() : IsFixed(false) {}
+ OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed)
+ : Flags(flags), VT(vt), IsFixed(isfixed) {
+ assert(VT.isSimple() &&
+ "OutputArg value type must be Simple!");
+ }
+ };
+}
+
+} // end llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetCallingConv.td b/libclamav/c++/llvm/include/llvm/Target/TargetCallingConv.td
index ceaeb0b..6da3ba1 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetCallingConv.td
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetCallingConv.td
@@ -42,7 +42,7 @@ class CCIf<string predicate, CCAction A> : CCPredicateAction<A> {
class CCIfByVal<CCAction A> : CCIf<"ArgFlags.isByVal()", A> {
}
-/// CCIfCC - Match of the current calling convention is 'CC'.
+/// CCIfCC - Match if the current calling convention is 'CC'.
class CCIfCC<string CC, CCAction A>
: CCIf<!strconcat("State.getCallingConv() == ", CC), A> {}
@@ -89,6 +89,13 @@ class CCAssignToStack<int size, int align> : CCAction {
int Align = align;
}
+/// CCAssignToStackWithShadow - Same as CCAssignToStack, but with a register
+/// to be shadowed.
+class CCAssignToStackWithShadow<int size, int align, Register reg> :
+ CCAssignToStack<size, align> {
+ Register ShadowReg = reg;
+}
+
/// CCPassByVal - This action always matches: it assigns the value to a stack
/// slot to implement ByVal aggregate parameter passing. Size and alignment
/// specify the minimum size and alignment for the stack slot.
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetData.h b/libclamav/c++/llvm/include/llvm/Target/TargetData.h
index cc88dae..b89cbe0 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetData.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetData.h
@@ -50,13 +50,13 @@ enum AlignTypeEnum {
/// padding and make the structure slightly more cache friendly.
struct TargetAlignElem {
AlignTypeEnum AlignType : 8; //< Alignment type (AlignTypeEnum)
- unsigned char ABIAlign; //< ABI alignment for this type/bitw
- unsigned char PrefAlign; //< Pref. alignment for this type/bitw
+ unsigned ABIAlign; //< ABI alignment for this type/bitw
+ unsigned PrefAlign; //< Pref. alignment for this type/bitw
uint32_t TypeBitWidth; //< Type bit width
/// Initializer
- static TargetAlignElem get(AlignTypeEnum align_type, unsigned char abi_align,
- unsigned char pref_align, uint32_t bit_width);
+ static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width);
/// Equality predicate
bool operator==(const TargetAlignElem &rhs) const;
};
@@ -64,9 +64,9 @@ struct TargetAlignElem {
class TargetData : public ImmutablePass {
private:
bool LittleEndian; ///< Defaults to false
- unsigned char PointerMemSize; ///< Pointer size in bytes
- unsigned char PointerABIAlign; ///< Pointer ABI alignment
- unsigned char PointerPrefAlign; ///< Pointer preferred alignment
+ unsigned PointerMemSize; ///< Pointer size in bytes
+ unsigned PointerABIAlign; ///< Pointer ABI alignment
+ unsigned PointerPrefAlign; ///< Pointer preferred alignment
SmallVector<unsigned char, 8> LegalIntWidths; ///< Legal Integers.
@@ -86,12 +86,12 @@ private:
mutable void *LayoutMap;
//! Set/initialize target alignments
- void setAlignment(AlignTypeEnum align_type, unsigned char abi_align,
- unsigned char pref_align, uint32_t bit_width);
+ void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width);
unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
bool ABIAlign, const Type *Ty) const;
//! Internal helper method that returns requested alignment for type.
- unsigned char getAlignment(const Type *Ty, bool abi_or_pref) const;
+ unsigned getAlignment(const Type *Ty, bool abi_or_pref) const;
/// Valid alignment predicate.
///
@@ -110,7 +110,7 @@ public:
/// Constructs a TargetData from a specification string. See init().
explicit TargetData(StringRef TargetDescription)
- : ImmutablePass(&ID) {
+ : ImmutablePass(ID) {
init(TargetDescription);
}
@@ -118,7 +118,7 @@ public:
explicit TargetData(const Module *M);
TargetData(const TargetData &TD) :
- ImmutablePass(&ID),
+ ImmutablePass(ID),
LittleEndian(TD.isLittleEndian()),
PointerMemSize(TD.PointerMemSize),
PointerABIAlign(TD.PointerABIAlign),
@@ -161,13 +161,13 @@ public:
}
/// Target pointer alignment
- unsigned char getPointerABIAlignment() const { return PointerABIAlign; }
+ unsigned getPointerABIAlignment() const { return PointerABIAlign; }
/// Return target's alignment for stack-based pointers
- unsigned char getPointerPrefAlignment() const { return PointerPrefAlign; }
+ unsigned getPointerPrefAlignment() const { return PointerPrefAlign; }
/// Target pointer size
- unsigned char getPointerSize() const { return PointerMemSize; }
+ unsigned getPointerSize() const { return PointerMemSize; }
/// Target pointer size, in bits
- unsigned char getPointerSizeInBits() const { return 8*PointerMemSize; }
+ unsigned getPointerSizeInBits() const { return 8*PointerMemSize; }
/// Size examples:
///
@@ -223,26 +223,26 @@ public:
/// getABITypeAlignment - Return the minimum ABI-required alignment for the
/// specified type.
- unsigned char getABITypeAlignment(const Type *Ty) const;
+ unsigned getABITypeAlignment(const Type *Ty) const;
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
- unsigned char getABIIntegerTypeAlignment(unsigned BitWidth) const;
+ unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
/// getCallFrameTypeAlignment - Return the minimum ABI-required alignment
/// for the specified type when it is part of a call frame.
- unsigned char getCallFrameTypeAlignment(const Type *Ty) const;
+ unsigned getCallFrameTypeAlignment(const Type *Ty) const;
/// getPrefTypeAlignment - Return the preferred stack/global alignment for
/// the specified type. This is always at least as good as the ABI alignment.
- unsigned char getPrefTypeAlignment(const Type *Ty) const;
+ unsigned getPrefTypeAlignment(const Type *Ty) const;
/// getPreferredTypeAlignmentShift - Return the preferred alignment for the
/// specified type, returned as log2 of the value (a shift amount).
///
- unsigned char getPreferredTypeAlignmentShift(const Type *Ty) const;
+ unsigned getPreferredTypeAlignmentShift(const Type *Ty) const;
/// getIntPtrType - Return an unsigned integer type that is the same size or
/// greater to the host pointer size.
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetInstrDesc.h b/libclamav/c++/llvm/include/llvm/Target/TargetInstrDesc.h
index 9efb683..a127aed 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetInstrDesc.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetInstrDesc.h
@@ -15,6 +15,8 @@
#ifndef LLVM_TARGET_TARGETINSTRDESC_H
#define LLVM_TARGET_TARGETINSTRDESC_H
+#include "llvm/System/DataTypes.h"
+
namespace llvm {
class TargetRegisterClass;
@@ -53,7 +55,7 @@ public:
///
/// NOTE: This member should be considered to be private, all access should go
/// through "getRegClass(TRI)" below.
- unsigned short RegClass;
+ short RegClass;
/// Flags - These are flags from the TOI::OperandFlags enum.
unsigned short Flags;
@@ -103,6 +105,7 @@ namespace TID {
IndirectBranch,
Predicable,
NotDuplicable,
+ Compare,
DelaySlot,
FoldableAsLoad,
MayLoad,
@@ -131,7 +134,7 @@ public:
unsigned short SchedClass; // enum identifying instr sched class
const char * Name; // Name of the instruction record in td file
unsigned Flags; // Flags identifying machine instr class
- unsigned TSFlags; // Target Specific Flag values
+ uint64_t TSFlags; // Target Specific Flag values
const unsigned *ImplicitUses; // Registers implicitly read by this instr
const unsigned *ImplicitDefs; // Registers implicitly defined by this instr
const TargetRegisterClass **RCBarriers; // Reg classes completely "clobbered"
@@ -149,6 +152,12 @@ public:
return -1;
}
+ /// getRegClass - Returns the register class constraint for OpNum, or NULL.
+ const TargetRegisterClass *getRegClass(unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ return OpNum < NumOperands ? OpInfo[OpNum].getRegClass(TRI) : 0;
+ }
+
/// getOpcode - Return the opcode number for this descriptor.
unsigned getOpcode() const {
return Opcode;
@@ -204,6 +213,16 @@ public:
return ImplicitUses;
}
+ /// getNumImplicitUses - Return the number of implicit uses this instruction
+ /// has.
+ unsigned getNumImplicitUses() const {
+ if (ImplicitUses == 0) return 0;
+ unsigned i = 0;
+ for (; ImplicitUses[i]; ++i) /*empty*/;
+ return i;
+ }
+
+
/// getImplicitDefs - Return a list of registers that are potentially
/// written by any instance of this machine instruction. For example, on X86,
/// many instructions implicitly set the flags register. In this case, they
@@ -218,6 +237,15 @@ public:
return ImplicitDefs;
}
+ /// getNumImplicitDefs - Return the number of implicit defs this instruction
+ /// has.
+ unsigned getNumImplicitDefs() const {
+ if (ImplicitDefs == 0) return 0;
+ unsigned i = 0;
+ for (; ImplicitDefs[i]; ++i) /*empty*/;
+ return i;
+ }
+
/// hasImplicitUseOfPhysReg - Return true if this instruction implicitly
/// uses the specified physical register.
bool hasImplicitUseOfPhysReg(unsigned Reg) const {
@@ -294,7 +322,7 @@ public:
bool isIndirectBranch() const {
return Flags & (1 << TID::IndirectBranch);
}
-
+
/// isConditionalBranch - Return true if this is a branch which may fall
/// through to the next instruction or may transfer control flow to some other
/// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
@@ -319,6 +347,11 @@ public:
return Flags & (1 << TID::Predicable);
}
+ /// isCompare - Return true if this instruction is a comparison.
+ bool isCompare() const {
+ return Flags & (1 << TID::Compare);
+ }
+
/// isNotDuplicable - Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetInstrInfo.h b/libclamav/c++/llvm/include/llvm/Target/TargetInstrInfo.h
index 4b26beb..520c41b 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetInstrInfo.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetInstrInfo.h
@@ -20,10 +20,14 @@
namespace llvm {
class CalleeSavedInfo;
+class InstrItineraryData;
class LiveVariables;
class MCAsmInfo;
class MachineMemOperand;
+class MDNode;
+class MCInst;
class SDNode;
+class ScheduleHazardRecognizer;
class SelectionDAG;
class TargetRegisterClass;
class TargetRegisterInfo;
@@ -88,15 +92,6 @@ private:
AliasAnalysis *AA) const;
public:
- /// isMoveInstr - Return true if the instruction is a register to register
- /// move and return the source and dest operands and their sub-register
- /// indices by reference.
- virtual bool isMoveInstr(const MachineInstr& MI,
- unsigned& SrcReg, unsigned& DstReg,
- unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
- return false;
- }
-
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
/// extension instruction. That is, it's like a copy where it's legal for the
/// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
@@ -109,26 +104,6 @@ public:
return false;
}
- /// isIdentityCopy - Return true if the instruction is a copy (or
- /// extract_subreg, insert_subreg, subreg_to_reg) where the source and
- /// destination registers are the same.
- bool isIdentityCopy(const MachineInstr &MI) const {
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (isMoveInstr(MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
- SrcReg == DstReg)
- return true;
-
- if (MI.getOpcode() == TargetOpcode::EXTRACT_SUBREG &&
- MI.getOperand(0).getReg() == MI.getOperand(1).getReg())
- return true;
-
- if ((MI.getOpcode() == TargetOpcode::INSERT_SUBREG ||
- MI.getOpcode() == TargetOpcode::SUBREG_TO_REG) &&
- MI.getOperand(0).getReg() == MI.getOperand(2).getReg())
- return true;
- return false;
- }
-
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
/// the destination along with the FrameIndex of the loaded stack slot. If
@@ -182,7 +157,7 @@ public:
/// store to a stack slot, return true along with the FrameIndex of
/// the loaded stack slot and the machine mem operand containing the
/// reference. If not, return false. Unlike isStoreToStackSlot,
- /// this returns true for any instructions that loads from the
+ /// this returns true for any instructions that stores to the
/// stack. This is just a hint, as some cases may be missed.
virtual bool hasStoreToStackSlot(const MachineInstr *MI,
const MachineMemOperand *&MMO,
@@ -192,11 +167,22 @@ public:
/// reMaterialize - Re-issue the specified 'original' instruction at the
/// specific location targeting a new destination register.
+ /// The register in Orig->getOperand(0).getReg() will be substituted by
+ /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
+ /// SubIdx.
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const = 0;
+ const TargetRegisterInfo &TRI) const = 0;
+
+ /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
+ /// two-addrss instruction inserted by two-address pass.
+ virtual void scheduleTwoAddrSource(MachineInstr *SrcMI,
+ MachineInstr *UseMI,
+ const TargetRegisterInfo &TRI) const {
+ // Do nothing.
+ }
/// duplicate - Create a duplicate of the Orig instruction in MF. This is like
/// MachineFunction::CloneMachineInstr(), but the target may update operands
@@ -222,23 +208,19 @@ public:
return 0;
}
- /// commuteInstruction - If a target has any instructions that are commutable,
- /// but require converting to a different instruction or making non-trivial
- /// changes to commute them, this method can overloaded to do this. The
- /// default implementation of this method simply swaps the first two operands
- /// of MI and returns it.
- ///
- /// If a target wants to make more aggressive changes, they can construct and
- /// return a new machine instruction. If an instruction cannot commute, it
- /// can also return null.
- ///
- /// If NewMI is true, then a new machine instruction must be created.
- ///
+ /// commuteInstruction - If a target has any instructions that are
+ /// commutable but require converting to different instructions or making
+ /// non-trivial changes to commute them, this method can overloaded to do
+ /// that. The default implementation simply swaps the commutable operands.
+ /// If NewMI is false, MI is modified in place and returned; otherwise, a
+ /// new machine instruction is created and returned. Do not call this
+ /// method for a non-commutable instruction, but there may be some cases
+ /// where this method fails and returns null.
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI = false) const = 0;
/// findCommutedOpIndices - If specified MI is commutable, return the two
- /// operand indices that would swap value. Return true if the instruction
+ /// operand indices that would swap value. Return false if the instruction
/// is not in a form which this routine understands.
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const = 0;
@@ -300,24 +282,60 @@ public:
/// branch to analyze. At least this much must be implemented, else tail
/// merging needs to be disabled.
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
return 0;
}
+
+ /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
+ /// after it, replacing it with an unconditional branch to NewDest. This is
+ /// used by the tail merging pass.
+ virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+ MachineBasicBlock *NewDest) const = 0;
+
+ /// isLegalToSplitMBBAt - Return true if it's legal to split the given basic
+ /// block at the specified instruction (i.e. instruction would be the start
+ /// of a new basic block).
+ virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const {
+ return true;
+ }
+
+ /// isProfitableToIfCvt - Return true if it's profitable to first "NumInstrs"
+ /// of the specified basic block.
+ virtual
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
+ return false;
+ }
- /// copyRegToReg - Emit instructions to copy between a pair of registers. It
- /// returns false if the target does not how to copy between the specified
- /// registers.
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const {
- assert(0 && "Target didn't implement TargetInstrInfo::copyRegToReg!");
+ /// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
+ /// checks for the case where two basic blocks from true and false path
+ /// of a if-then-else (diamond) are predicated on mutally exclusive
+ /// predicates.
+ virtual bool
+ isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
+ MachineBasicBlock &FMBB, unsigned NumFInstrs) const {
+ return false;
+ }
+
+ /// isProfitableToDupForIfCvt - Return true if it's profitable for
+ /// if-converter to duplicate a specific number of instructions in the
+ /// specified MBB to enable if-conversion.
+ virtual bool
+ isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs) const {
return false;
}
+ /// copyPhysReg - Emit instructions to copy a pair of physical registers.
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ assert(0 && "Target didn't implement TargetInstrInfo::copyPhysReg!");
+ }
+
/// storeRegToStackSlot - Store the specified register of the given register
/// class to the specified stack frame index. The store instruction is to be
/// added to the given machine basic block before the specified machine
@@ -326,8 +344,9 @@ public:
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC) const {
- assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!");
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!");
}
/// loadRegFromStackSlot - Load the specified register of the given register
@@ -337,8 +356,9 @@ public:
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC) const {
- assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
}
/// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
@@ -347,7 +367,8 @@ public:
/// storeRegToStackSlot(). Returns false otherwise.
virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const {
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
return false;
}
@@ -357,26 +378,41 @@ public:
/// Returns false otherwise.
virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const {
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
return false;
}
+ /// emitFrameIndexDebugValue - Emit a target-dependent form of
+ /// DBG_VALUE encoding the address of a frame index. Addresses would
+ /// normally be lowered the same way as other addresses on the target,
+ /// e.g. in load instructions. For targets that do not support this
+ /// the debug info is simply lost.
+ /// If you add this for a target you should handle this DBG_VALUE in the
+ /// target-specific AsmPrinter code as well; you will probably get invalid
+ /// assembly output if you don't.
+ virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
+ int FrameIx,
+ uint64_t Offset,
+ const MDNode *MDPtr,
+ DebugLoc dl) const {
+ return 0;
+ }
+
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
- /// operand folded, otherwise NULL is returned. The client is responsible for
- /// removing the old instruction and adding the new one in the instruction
- /// stream.
- MachineInstr* foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+ /// operand folded, otherwise NULL is returned.
+ /// The new instruction is inserted before MI, and the client is responsible
+ /// for removing the old instruction.
+ MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
- MachineInstr* foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+ MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
@@ -396,7 +432,7 @@ protected:
/// take care of adding a MachineMemOperand to the newly created instruction.
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
@@ -406,9 +442,7 @@ public:
/// folding is possible.
virtual
bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- return false;
- }
+ const SmallVectorImpl<unsigned> &Ops) const =0;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
@@ -442,7 +476,7 @@ public:
/// only differences between the two addresses are the offset. It also returns
/// the offsets by reference.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
- int64_t &Offset1, int64_t &Offset2) const {
+ int64_t &Offset1, int64_t &Offset2) const {
return false;
}
@@ -473,6 +507,13 @@ public:
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
+
+ /// getNoopForMachoTarget - Return the noop instruction to use for a noop.
+ virtual void getNoopForMachoTarget(MCInst &NopInst) const {
+ // Default to just using 'nop' string.
+ }
+
+
/// isPredicated - Returns true if the instruction is already predicated.
///
virtual bool isPredicated(const MachineInstr *MI) const {
@@ -518,22 +559,38 @@ public:
return true;
}
- /// GetInstSize - Returns the size of the specified Instruction.
- ///
- virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const {
- assert(0 && "Target didn't implement TargetInstrInfo::GetInstSize!");
- return 0;
- }
+ /// isSchedulingBoundary - Test if the given instruction should be
+ /// considered a scheduling boundary. This primarily includes labels and
+ /// terminators.
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const = 0;
- /// GetFunctionSizeInBytes - Returns the size of the specified
- /// MachineFunction.
- ///
- virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const = 0;
-
/// Measure the specified inline asm to determine an approximation of its
/// length.
virtual unsigned getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const;
+
+ /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer
+ /// to use for this target when scheduling the machine instructions after
+ /// register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const = 0;
+
+ /// AnalyzeCompare - For a comparison instruction, return the source register
+ /// in SrcReg and the value it compares against in CmpValue. Return true if
+ /// the comparison instruction can be analyzed.
+ virtual bool AnalyzeCompare(const MachineInstr *MI,
+ unsigned &SrcReg, int &CmpValue) const {
+ return false;
+ }
+
+ /// ConvertToSetZeroFlag - Convert the instruction to set the zero flag so
+ /// that we can remove a "comparison with zero".
+ virtual bool ConvertToSetZeroFlag(MachineInstr *Instr,
+ MachineInstr *CmpInstr) const {
+ return false;
+ }
};
/// TargetInstrInfoImpl - This is the default implementation of
@@ -545,22 +602,31 @@ protected:
TargetInstrInfoImpl(const TargetInstrDesc *desc, unsigned NumOpcodes)
: TargetInstrInfo(desc, NumOpcodes) {}
public:
+ virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
+ MachineBasicBlock *NewDest) const;
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI = false) const;
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
+ virtual bool canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const;
virtual bool PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const;
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubReg,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo &TRI) const;
virtual MachineInstr *duplicate(MachineInstr *Orig,
MachineFunction &MF) const;
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1) const;
- virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const;
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const;
+
+ virtual ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const;
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetInstrItineraries.h b/libclamav/c++/llvm/include/llvm/Target/TargetInstrItineraries.h
index 420fa94..39648c2 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetInstrItineraries.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetInstrItineraries.h
@@ -47,10 +47,24 @@ namespace llvm {
/// indicate that the instruction requires multiple stages at the
/// same time.
///
+/// FU reservation can be of two different kinds:
+/// - FUs which instruction actually requires
+/// - FUs which instruction just reserves. Reserved unit is not available for
+/// execution of other instruction. However, several instructions can reserve
+/// the same unit several times.
+/// Such two types of units reservation is used to model instruction domain
+/// change stalls, FUs using the same resource (e.g. same register file), etc.
+
struct InstrStage {
+ enum ReservationKinds {
+ Required = 0,
+ Reserved = 1
+ };
+
unsigned Cycles_; ///< Length of stage in machine cycles
unsigned Units_; ///< Choice of functional units
- int NextCycles_; ///< Number of machine cycles to next stage
+ int NextCycles_; ///< Number of machine cycles to next stage
+ ReservationKinds Kind_; ///< Kind of the FU reservation
/// getCycles - returns the number of cycles the stage is occupied
unsigned getCycles() const {
@@ -62,6 +76,10 @@ struct InstrStage {
return Units_;
}
+ ReservationKinds getReservationKind() const {
+ return Kind_;
+ }
+
/// getNextCycles - returns the number of cycles from the start of
/// this stage to the start of the next stage in the itinerary
unsigned getNextCycles() const {
@@ -88,7 +106,8 @@ struct InstrItinerary {
/// Instruction itinerary Data - Itinerary data supplied by a subtarget to be
/// used by a target.
///
-struct InstrItineraryData {
+class InstrItineraryData {
+public:
const InstrStage *Stages; ///< Array of stages selected
const unsigned *OperandCycles; ///< Array of operand cycles selected
const InstrItinerary *Itineratries; ///< Array of itineraries selected
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetLowering.h b/libclamav/c++/llvm/include/llvm/Target/TargetLowering.h
index 5bc1c0e..29de994 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetLowering.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetLowering.h
@@ -24,6 +24,7 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
+#include "llvm/Attributes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/ADT/APFloat.h"
@@ -32,6 +33,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/DebugLoc.h"
+#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
#include <climits>
#include <map>
@@ -42,22 +44,20 @@ namespace llvm {
class CallInst;
class Function;
class FastISel;
+ class FunctionLoweringInfo;
class MachineBasicBlock;
class MachineFunction;
class MachineFrameInfo;
class MachineInstr;
class MachineJumpTableInfo;
- class MachineModuleInfo;
class MCContext;
class MCExpr;
- class DwarfWriter;
class SDNode;
class SDValue;
class SelectionDAG;
class TargetData;
class TargetMachine;
class TargetRegisterClass;
- class TargetSubtarget;
class TargetLoweringObjectFile;
class Value;
@@ -100,18 +100,14 @@ public:
ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
};
- enum SchedPreference {
- SchedulingForLatency, // Scheduling for shortest total latency.
- SchedulingForRegPressure // Scheduling for lowest register pressure.
- };
-
/// NOTE: The constructor takes ownership of TLOF.
- explicit TargetLowering(TargetMachine &TM, TargetLoweringObjectFile *TLOF);
+ explicit TargetLowering(const TargetMachine &TM,
+ const TargetLoweringObjectFile *TLOF);
virtual ~TargetLowering();
- TargetMachine &getTargetMachine() const { return TM; }
+ const TargetMachine &getTargetMachine() const { return TM; }
const TargetData *getTargetData() const { return TD; }
- TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
+ const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
@@ -121,7 +117,7 @@ public:
/// isSelectExpensive - Return true if the select operation is expensive for
/// this target.
bool isSelectExpensive() const { return SelectIsExpensive; }
-
+
/// isIntDivCheap() - Return true if integer divide is usually cheaper than
/// a sequence of several shifts, adds, and multiplies for this target.
bool isIntDivCheap() const { return IntDivIsCheap; }
@@ -138,10 +134,10 @@ public:
virtual
MVT::SimpleValueType getSetCCResultType(EVT VT) const;
- /// getCmpLibcallReturnType - Return the ValueType for comparison
+ /// getCmpLibcallReturnType - Return the ValueType for comparison
/// libcalls. Comparions libcalls include floating point comparion calls,
/// and Ordered/Unordered check calls on floating point numbers.
- virtual
+ virtual
MVT::SimpleValueType getCmpLibcallReturnType() const;
/// getBooleanContents - For targets without i1 registers, this gives the
@@ -152,19 +148,52 @@ public:
BooleanContent getBooleanContents() const { return BooleanContents;}
/// getSchedulingPreference - Return target scheduling preference.
- SchedPreference getSchedulingPreference() const {
+ Sched::Preference getSchedulingPreference() const {
return SchedPreferenceInfo;
}
+ /// getSchedulingPreference - Some scheduler, e.g. hybrid, can switch to
+ /// different scheduling heuristics for different nodes. This function returns
+ /// the preference (or none) for the given node.
+ virtual Sched::Preference getSchedulingPreference(SDNode *N) const {
+ return Sched::None;
+ }
+
/// getRegClassFor - Return the register class that should be used for the
- /// specified value type. This may only be called on legal types.
- TargetRegisterClass *getRegClassFor(EVT VT) const {
+ /// specified value type.
+ virtual TargetRegisterClass *getRegClassFor(EVT VT) const {
assert(VT.isSimple() && "getRegClassFor called on illegal type!");
TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
assert(RC && "This value type is not natively supported!");
return RC;
}
+ /// getRepRegClassFor - Return the 'representative' register class for the
+ /// specified value type. The 'representative' register class is the largest
+ /// legal super-reg register class for the register class of the value type.
+ /// For example, on i386 the rep register class for i8, i16, and i32 are GR32;
+ /// while the rep register class is GR64 on x86_64.
+ virtual const TargetRegisterClass *getRepRegClassFor(EVT VT) const {
+ assert(VT.isSimple() && "getRepRegClassFor called on illegal type!");
+ const TargetRegisterClass *RC = RepRegClassForVT[VT.getSimpleVT().SimpleTy];
+ return RC;
+ }
+
+ /// getRepRegClassCostFor - Return the cost of the 'representative' register
+ /// class for the specified value type.
+ virtual uint8_t getRepRegClassCostFor(EVT VT) const {
+ assert(VT.isSimple() && "getRepRegClassCostFor called on illegal type!");
+ return RepRegClassCostForVT[VT.getSimpleVT().SimpleTy];
+ }
+
+ /// getRegPressureLimit - Return the register pressure "high water mark" for
+ /// the specific register class. The scheduler is in high register pressure
+ /// mode (for the specific register class) if it goes over the limit.
+ virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const {
+ return 0;
+ }
+
/// isTypeLegal - Return true if the target has native support for the
/// specified value type. This means that it has a register that directly
/// holds it without promotions or expansions.
@@ -174,46 +203,70 @@ public:
return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
}
+ /// isTypeSynthesizable - Return true if it's OK for the compiler to create
+ /// new operations of this type. All Legal types are synthesizable except
+ /// MMX vector types on X86. Non-Legal types are not synthesizable.
+ bool isTypeSynthesizable(EVT VT) const {
+ return isTypeLegal(VT) && Synthesizable[VT.getSimpleVT().SimpleTy];
+ }
+
class ValueTypeActionImpl {
- /// ValueTypeActions - This is a bitvector that contains two bits for each
- /// value type, where the two bits correspond to the LegalizeAction enum.
- /// This can be queried with "getTypeAction(VT)".
- /// dimension by (MVT::MAX_ALLOWED_VALUETYPE/32) * 2
- uint32_t ValueTypeActions[(MVT::MAX_ALLOWED_VALUETYPE/32)*2];
+ /// ValueTypeActions - For each value type, keep a LegalizeAction enum
+ /// that indicates how instruction selection should deal with the type.
+ uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
+
+ LegalizeAction getExtendedTypeAction(EVT VT) const {
+ // Handle non-vector integers.
+ if (!VT.isVector()) {
+ assert(VT.isInteger() && "Unsupported extended type!");
+ unsigned BitSize = VT.getSizeInBits();
+ // First promote to a power-of-two size, then expand if necessary.
+ if (BitSize < 8 || !isPowerOf2_32(BitSize))
+ return Promote;
+ return Expand;
+ }
+
+ // If this is a type smaller than a legal vector type, promote to that
+ // type, e.g. <2 x float> -> <4 x float>.
+ if (VT.getVectorElementType().isSimple() &&
+ VT.getVectorNumElements() != 1) {
+ MVT EltType = VT.getVectorElementType().getSimpleVT();
+ unsigned NumElts = VT.getVectorNumElements();
+ while (1) {
+ // Round up to the nearest power of 2.
+ NumElts = (unsigned)NextPowerOf2(NumElts);
+
+ MVT LargerVector = MVT::getVectorVT(EltType, NumElts);
+ if (LargerVector == MVT()) break;
+
+ // If this the larger type is legal, promote to it.
+ if (getTypeAction(LargerVector) == Legal) return Promote;
+ }
+ }
+
+ return VT.isPow2VectorType() ? Expand : Promote;
+ }
public:
ValueTypeActionImpl() {
- ValueTypeActions[0] = ValueTypeActions[1] = 0;
- ValueTypeActions[2] = ValueTypeActions[3] = 0;
+ std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
}
- ValueTypeActionImpl(const ValueTypeActionImpl &RHS) {
- ValueTypeActions[0] = RHS.ValueTypeActions[0];
- ValueTypeActions[1] = RHS.ValueTypeActions[1];
- ValueTypeActions[2] = RHS.ValueTypeActions[2];
- ValueTypeActions[3] = RHS.ValueTypeActions[3];
+
+ LegalizeAction getTypeAction(EVT VT) const {
+ if (!VT.isExtended())
+ return getTypeAction(VT.getSimpleVT());
+ return getExtendedTypeAction(VT);
}
- LegalizeAction getTypeAction(LLVMContext &Context, EVT VT) const {
- if (VT.isExtended()) {
- if (VT.isVector()) {
- return VT.isPow2VectorType() ? Expand : Promote;
- }
- if (VT.isInteger())
- // First promote to a power-of-two size, then expand if necessary.
- return VT == VT.getRoundIntegerType(Context) ? Expand : Promote;
- assert(0 && "Unsupported extended type!");
- return Legal;
- }
- unsigned I = VT.getSimpleVT().SimpleTy;
- assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
- return (LegalizeAction)((ValueTypeActions[I>>4] >> ((2*I) & 31)) & 3);
+ LegalizeAction getTypeAction(MVT VT) const {
+ return (LegalizeAction)ValueTypeActions[VT.SimpleTy];
}
+
void setTypeAction(EVT VT, LegalizeAction Action) {
unsigned I = VT.getSimpleVT().SimpleTy;
- assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
- ValueTypeActions[I>>4] |= Action << ((I*2) & 31);
+ ValueTypeActions[I] = Action;
}
};
-
+
const ValueTypeActionImpl &getValueTypeActions() const {
return ValueTypeActions;
}
@@ -222,10 +275,13 @@ public:
/// it is already legal (return 'Legal') or we need to promote it to a larger
/// type (return 'Promote'), or we need to expand it into multiple registers
/// of smaller integer type (return 'Expand'). 'Custom' is not an option.
- LegalizeAction getTypeAction(LLVMContext &Context, EVT VT) const {
- return ValueTypeActions.getTypeAction(Context, VT);
+ LegalizeAction getTypeAction(EVT VT) const {
+ return ValueTypeActions.getTypeAction(VT);
}
-
+ LegalizeAction getTypeAction(MVT VT) const {
+ return ValueTypeActions.getTypeAction(VT);
+ }
+
/// getTypeToTransformTo - For types supported by the target, this is an
/// identity function. For types that must be promoted to larger types, this
/// returns the larger type to promote to. For integer types that are larger
@@ -234,10 +290,10 @@ public:
/// returns the integer type to transform to.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
if (VT.isSimple()) {
- assert((unsigned)VT.getSimpleVT().SimpleTy <
+ assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(TransformToType));
EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
- assert(getTypeAction(Context, NVT) != Promote &&
+ assert(getTypeAction(NVT) != Promote &&
"Promote may not follow Expand or Promote");
return NVT;
}
@@ -252,17 +308,16 @@ public:
EltVT : EVT::getVectorVT(Context, EltVT, NumElts / 2);
}
// Promote to a power of two size, avoiding multi-step promotion.
- return getTypeAction(Context, NVT) == Promote ?
+ return getTypeAction(NVT) == Promote ?
getTypeToTransformTo(Context, NVT) : NVT;
} else if (VT.isInteger()) {
EVT NVT = VT.getRoundIntegerType(Context);
- if (NVT == VT)
- // Size is a power of two - expand to half the size.
+ if (NVT == VT) // Size is a power of two - expand to half the size.
return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2);
- else
- // Promote to a power of two size, avoiding multi-step promotion.
- return getTypeAction(Context, NVT) == Promote ?
- getTypeToTransformTo(Context, NVT) : NVT;
+
+ // Promote to a power of two size, avoiding multi-step promotion.
+ return getTypeAction(NVT) == Promote ?
+ getTypeToTransformTo(Context, NVT) : NVT;
}
assert(0 && "Unsupported extended type!");
return MVT(MVT::Other); // Not reached
@@ -275,7 +330,7 @@ public:
EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
assert(!VT.isVector());
while (true) {
- switch (getTypeAction(Context, VT)) {
+ switch (getTypeAction(VT)) {
case Legal:
return VT;
case Expand:
@@ -307,36 +362,29 @@ public:
/// intrinsic will need to map to a MemIntrinsicNode (touches memory). If
/// this is the case, it returns true and store the intrinsic
/// information into the IntrinsicInfo that was passed to the function.
- typedef struct IntrinsicInfo {
+ struct IntrinsicInfo {
unsigned opc; // target opcode
EVT memVT; // memory VT
const Value* ptrVal; // value representing memory location
- int offset; // offset off of ptrVal
+ int offset; // offset off of ptrVal
unsigned align; // alignment
bool vol; // is volatile?
bool readMem; // reads memory?
bool writeMem; // writes memory?
- } IntrinisicInfo;
+ };
- virtual bool getTgtMemIntrinsic(IntrinsicInfo& Info,
- CallInst &I, unsigned Intrinsic) {
+ virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I, unsigned Intrinsic) const {
return false;
}
- /// getWidenVectorType: given a vector type, returns the type to widen to
- /// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
- /// If there is no vector type that we want to widen to, returns MVT::Other
- /// When and were to widen is target dependent based on the cost of
- /// scalarizing vs using the wider vector type.
- virtual EVT getWidenVectorType(EVT VT) const;
-
/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will materialize
/// the FP immediate as a load from a constant pool.
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const {
return false;
}
-
+
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
@@ -366,13 +414,9 @@ public:
/// for it.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
if (VT.isExtended()) return Expand;
- assert(Op < array_lengthof(OpActions[0]) &&
- (unsigned)VT.getSimpleVT().SimpleTy < sizeof(OpActions[0][0])*8 &&
- "Table isn't big enough!");
+ assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
- unsigned J = I & 31;
- I = I >> 5;
- return (LegalizeAction)((OpActions[I][Op] >> (J*2) ) & 3);
+ return (LegalizeAction)OpActions[I][Op];
}
/// isOperationLegalOrCustom - Return true if the specified operation is
@@ -395,35 +439,31 @@ public:
/// either it is legal, needs to be promoted to a larger size, needs to be
/// expanded to some other code sequence, or the target has a custom expander
/// for it.
- LegalizeAction getLoadExtAction(unsigned LType, EVT VT) const {
- assert(LType < array_lengthof(LoadExtActions) &&
- (unsigned)VT.getSimpleVT().SimpleTy < sizeof(LoadExtActions[0])*4 &&
+ LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const {
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
+ (unsigned)VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- return (LegalizeAction)((LoadExtActions[LType] >>
- (2*VT.getSimpleVT().SimpleTy)) & 3);
+ return (LegalizeAction)LoadExtActions[VT.getSimpleVT().SimpleTy][ExtType];
}
/// isLoadExtLegal - Return true if the specified load with extension is legal
/// on this target.
- bool isLoadExtLegal(unsigned LType, EVT VT) const {
+ bool isLoadExtLegal(unsigned ExtType, EVT VT) const {
return VT.isSimple() &&
- (getLoadExtAction(LType, VT) == Legal ||
- getLoadExtAction(LType, VT) == Custom);
+ (getLoadExtAction(ExtType, VT) == Legal ||
+ getLoadExtAction(ExtType, VT) == Custom);
}
/// getTruncStoreAction - Return how this store with truncation should be
/// treated: either it is legal, needs to be promoted to a larger size, needs
/// to be expanded to some other code sequence, or the target has a custom
/// expander for it.
- LegalizeAction getTruncStoreAction(EVT ValVT,
- EVT MemVT) const {
- assert((unsigned)ValVT.getSimpleVT().SimpleTy <
- array_lengthof(TruncStoreActions) &&
- (unsigned)MemVT.getSimpleVT().SimpleTy <
- sizeof(TruncStoreActions[0])*4 &&
+ LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
+ assert((unsigned)ValVT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ (unsigned)MemVT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT().SimpleTy] >>
- (2*MemVT.getSimpleVT().SimpleTy)) & 3);
+ return (LegalizeAction)TruncStoreActions[ValVT.getSimpleVT().SimpleTy]
+ [MemVT.getSimpleVT().SimpleTy];
}
/// isTruncStoreLegal - Return true if the specified store with truncation is
@@ -440,11 +480,11 @@ public:
/// for it.
LegalizeAction
getIndexedLoadAction(unsigned IdxMode, EVT VT) const {
- assert( IdxMode < array_lengthof(IndexedModeActions[0][0]) &&
+ assert( IdxMode < ISD::LAST_INDEXED_MODE &&
((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- return (LegalizeAction)((IndexedModeActions[
- (unsigned)VT.getSimpleVT().SimpleTy][0][IdxMode]));
+ unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
+ return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
}
/// isIndexedLoadLegal - Return true if the specified indexed load is legal
@@ -461,12 +501,12 @@ public:
/// for it.
LegalizeAction
getIndexedStoreAction(unsigned IdxMode, EVT VT) const {
- assert(IdxMode < array_lengthof(IndexedModeActions[0][1]) &&
- (unsigned)VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ assert( IdxMode < ISD::LAST_INDEXED_MODE &&
+ ((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- return (LegalizeAction)((IndexedModeActions[
- (unsigned)VT.getSimpleVT().SimpleTy][1][IdxMode]));
- }
+ unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
+ return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
+ }
/// isIndexedStoreLegal - Return true if the specified indexed load is legal
/// on this target.
@@ -476,29 +516,6 @@ public:
getIndexedStoreAction(IdxMode, VT) == Custom);
}
- /// getConvertAction - Return how the conversion should be treated:
- /// either it is legal, needs to be promoted to a larger size, needs to be
- /// expanded to some other code sequence, or the target has a custom expander
- /// for it.
- LegalizeAction
- getConvertAction(EVT FromVT, EVT ToVT) const {
- assert((unsigned)FromVT.getSimpleVT().SimpleTy <
- array_lengthof(ConvertActions) &&
- (unsigned)ToVT.getSimpleVT().SimpleTy <
- sizeof(ConvertActions[0])*4 &&
- "Table isn't big enough!");
- return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT().SimpleTy] >>
- (2*ToVT.getSimpleVT().SimpleTy)) & 3);
- }
-
- /// isConvertLegal - Return true if the specified conversion is legal
- /// on this target.
- bool isConvertLegal(EVT FromVT, EVT ToVT) const {
- return isTypeLegal(FromVT) && isTypeLegal(ToVT) &&
- (getConvertAction(FromVT, ToVT) == Legal ||
- getConvertAction(FromVT, ToVT) == Custom);
- }
-
/// getCondCodeAction - Return how the condition code should be treated:
/// either it is legal, needs to be expanded to some other code sequence,
/// or the target has a custom expander for it.
@@ -535,7 +552,7 @@ public:
assert((VT.isInteger() || VT.isFloatingPoint()) &&
"Cannot autopromote this type, add it with AddPromotedToType.");
-
+
EVT NVT = VT;
do {
NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1);
@@ -552,21 +569,21 @@ public:
/// counterpart (e.g. structs), otherwise it will assert.
EVT getValueType(const Type *Ty, bool AllowUnknown = false) const {
EVT VT = EVT::getEVT(Ty, AllowUnknown);
- return VT == MVT:: iPTR ? PointerTy : VT;
+ return VT == MVT::iPTR ? PointerTy : VT;
}
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
virtual unsigned getByValTypeAlignment(const Type *Ty) const;
-
+
/// getRegisterType - Return the type of registers that this ValueType will
/// eventually require.
EVT getRegisterType(MVT VT) const {
assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
return RegisterTypeForVT[VT.SimpleTy];
}
-
+
/// getRegisterType - Return the type of registers that this ValueType will
/// eventually require.
EVT getRegisterType(LLVMContext &Context, EVT VT) const {
@@ -649,7 +666,7 @@ public:
/// of the specified type. This is used, for example, in situations where an
/// array copy/move/set is converted to a sequence of store operations. It's
/// use helps to ensure that such replacements don't generate code that causes
- /// an alignment error (trap) on the target machine.
+ /// an alignment error (trap) on the target machine.
/// @brief Determine if the target supports unaligned memory accesses.
virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
return false;
@@ -663,15 +680,24 @@ public:
}
/// getOptimalMemOpType - Returns the target specific optimal type for load
- /// and store operations as a result of memset, memcpy, and memmove lowering.
- /// It returns EVT::Other if SelectionDAG should be responsible for
- /// determining it.
- virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align,
- bool isSrcConst, bool isSrcStr,
- SelectionDAG &DAG) const {
+ /// and store operations as a result of memset, memcpy, and memmove
+ /// lowering. If DstAlign is zero that means it's safe to destination
+ /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
+ /// means there isn't a need to check it against alignment requirement,
+ /// probably because the source does not need to be loaded. If
+ /// 'NonScalarIntSafe' is true, that means it's safe to return a
+ /// non-scalar-integer type, e.g. empty string source, constant, or loaded
+ /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
+ /// constant so it does not need to be loaded.
+ /// It returns EVT::Other if the type should be determined using generic
+ /// target-independent logic.
+ virtual EVT getOptimalMemOpType(uint64_t Size,
+ unsigned DstAlign, unsigned SrcAlign,
+ bool NonScalarIntSafe, bool MemcpyStrSrc,
+ MachineFunction &MF) const {
return MVT::Other;
}
-
+
/// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
/// to implement llvm.setjmp.
bool usesUnderscoreSetJmp() const {
@@ -717,17 +743,10 @@ public:
return JumpBufAlignment;
}
- /// getIfCvtBlockLimit - returns the target specific if-conversion block size
- /// limit. Any block whose size is greater should not be predicated.
- unsigned getIfCvtBlockSizeLimit() const {
- return IfCvtBlockSizeLimit;
- }
-
- /// getIfCvtDupBlockLimit - returns the target specific size limit for a
- /// block to be considered for duplication. Any block whose size is greater
- /// should not be duplicated to facilitate its predication.
- unsigned getIfCvtDupBlockSizeLimit() const {
- return IfCvtDupBlockSizeLimit;
+ /// getMinStackArgumentAlignment - return the minimum stack alignment of an
+ /// argument.
+ unsigned getMinStackArgumentAlignment() const {
+ return MinStackArgumentAlignment;
}
/// getPrefLoopAlignment - return the preferred loop alignment.
@@ -735,7 +754,14 @@ public:
unsigned getPrefLoopAlignment() const {
return PrefLoopAlignment;
}
-
+
+ /// getShouldFoldAtomicFences - return whether the combiner should fold
+ /// fence MEMBARRIER instructions into the atomic intrinsic instructions.
+ ///
+ bool getShouldFoldAtomicFences() const {
+ return ShouldFoldAtomicFences;
+ }
+
/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
/// can be legally represented as pre-indexed load / store address.
@@ -745,7 +771,7 @@ public:
SelectionDAG &DAG) const {
return false;
}
-
+
/// getPostIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if this node can be
/// combined with a load / store to form a post-indexed load / store.
@@ -755,12 +781,12 @@ public:
SelectionDAG &DAG) const {
return false;
}
-
+
/// getJumpTableEncoding - Return the entry encoding for a jump table in the
/// current function. The returned value is a member of the
/// MachineJumpTableInfo::JTEntryKind enum.
virtual unsigned getJumpTableEncoding() const;
-
+
virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid,
@@ -768,7 +794,7 @@ public:
assert(0 && "Need to implement this hook if target has custom JTIs");
return 0;
}
-
+
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
virtual SDValue getPICJumpTableRelocBase(SDValue Table,
@@ -780,7 +806,7 @@ public:
virtual const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI, MCContext &Ctx) const;
-
+
/// isOffsetFoldingLegal - Return true if folding a constant offset
/// with the given GlobalAddress is legal. It is frequently not legal in
/// PIC relocation models.
@@ -789,29 +815,48 @@ public:
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *) const = 0;
+ /// getStackCookieLocation - Return true if the target stores stack
+ /// protector cookies at a fixed offset in some non-standard address
+ /// space, and populates the address space and offset as
+ /// appropriate.
+ virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const {
+ return false;
+ }
+
+ /// getMaximalGlobalOffset - Returns the maximal possible offset which can be
+ /// used for loads / stores from the global.
+ virtual unsigned getMaximalGlobalOffset() const {
+ return 0;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Optimization Methods
//
-
+
/// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
/// SDValues for returning information from TargetLowering to its clients
- /// that want to combine
+ /// that want to combine
struct TargetLoweringOpt {
SelectionDAG &DAG;
- bool ShrinkOps;
+ bool LegalTys;
+ bool LegalOps;
SDValue Old;
SDValue New;
- explicit TargetLoweringOpt(SelectionDAG &InDAG, bool Shrink = false) :
- DAG(InDAG), ShrinkOps(Shrink) {}
-
- bool CombineTo(SDValue O, SDValue N) {
- Old = O;
- New = N;
+ explicit TargetLoweringOpt(SelectionDAG &InDAG,
+ bool LT, bool LO) :
+ DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
+
+ bool LegalTypes() const { return LegalTys; }
+ bool LegalOperations() const { return LegalOps; }
+
+ bool CombineTo(SDValue O, SDValue N) {
+ Old = O;
+ New = N;
return true;
}
-
- /// ShrinkDemandedConstant - Check to see if the specified operand of the
+
+ /// ShrinkDemandedConstant - Check to see if the specified operand of the
/// specified instruction is a constant integer. If so, check to see if
/// there are any bits set in the constant that are not demanded. If so,
/// shrink the constant and return true.
@@ -824,25 +869,25 @@ public:
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
DebugLoc dl);
};
-
+
/// SimplifyDemandedBits - Look at Op. At this point, we know that only the
/// DemandedMask bits of the result of Op are ever used downstream. If we can
/// use this information to simplify Op, create a new simplified DAG node and
- /// return true, returning the original and new nodes in Old and New.
- /// Otherwise, analyze the expression and return a mask of KnownOne and
- /// KnownZero bits for the expression (used to simplify the caller).
- /// The KnownZero/One bits may only be accurate for those bits in the
+ /// return true, returning the original and new nodes in Old and New.
+ /// Otherwise, analyze the expression and return a mask of KnownOne and
+ /// KnownZero bits for the expression (used to simplify the caller).
+ /// The KnownZero/One bits may only be accurate for those bits in the
/// DemandedMask.
- bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
+ bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
APInt &KnownZero, APInt &KnownOne,
TargetLoweringOpt &TLO, unsigned Depth = 0) const;
-
+
/// computeMaskedBitsForTargetNode - Determine which of the bits specified in
- /// Mask are known to be either zero or one and return them in the
+ /// Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
- APInt &KnownZero,
+ APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
@@ -852,7 +897,7 @@ public:
/// DAG Combiner.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
unsigned Depth = 0) const;
-
+
struct DAGCombinerInfo {
void *DC; // The DAG Combiner object.
bool BeforeLegalize;
@@ -860,15 +905,15 @@ public:
bool CalledByLegalizer;
public:
SelectionDAG &DAG;
-
+
DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc)
: DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo),
CalledByLegalizer(cl), DAG(dag) {}
-
+
bool isBeforeLegalize() const { return BeforeLegalize; }
bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; }
bool isCalledByLegalizer() const { return CalledByLegalizer; }
-
+
void AddToWorklist(SDNode *N);
SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
bool AddTo = true);
@@ -878,7 +923,7 @@ public:
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
};
- /// SimplifySetCC - Try to simplify a setcc built with the specified operands
+ /// SimplifySetCC - Try to simplify a setcc built with the specified operands
/// and cc. If it is unable to simplify it, return a null SDValue.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, bool foldBooleans,
@@ -887,7 +932,7 @@ public:
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
virtual bool
- isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const;
+ isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
/// PerformDAGCombine - This method will be invoked for all target nodes and
/// for any target-independent nodes that the target has registered with
@@ -903,7 +948,23 @@ public:
/// more complex transformations.
///
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-
+
+ /// isTypeDesirableForOp - Return true if the target has native support for
+ /// the specified value type and it is 'desirable' to use the type for the
+ /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
+ /// instruction encodings are longer and some i16 instructions are slow.
+ virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const {
+ // By default, assume all legal types are desirable.
+ return isTypeLegal(VT);
+ }
+
+ /// IsDesirableToPromoteOp - This method query the target whether it is
+ /// beneficial for dag combiner to promote the specified node. If true, it
+ /// should return the desired promotion type by reference.
+ virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
@@ -919,7 +980,7 @@ protected:
void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; }
/// setSchedulingPreference - Specify the target scheduling preference.
- void setSchedulingPreference(SchedPreference Pref) {
+ void setSchedulingPreference(Sched::Preference Pref) {
SchedPreferenceInfo = Pref;
}
@@ -943,7 +1004,7 @@ protected:
void setStackPointerRegisterToSaveRestore(unsigned R) {
StackPointerRegisterToSaveRestore = R;
}
-
+
/// setExceptionPointerRegister - If set to a physical register, this sets
/// the register that receives the exception address on entry to a landing
/// pad.
@@ -966,21 +1027,28 @@ protected:
/// expensive, and if possible, should be replaced by an alternate sequence
/// of instructions not containing an integer divide.
void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
-
+
/// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
-
+
/// addRegisterClass - Add the specified register class as an available
/// regclass for the specified value type. This indicates the selector can
/// handle values of that class natively.
- void addRegisterClass(EVT VT, TargetRegisterClass *RC) {
+ void addRegisterClass(EVT VT, TargetRegisterClass *RC,
+ bool isSynthesizable = true) {
assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
AvailableRegClasses.push_back(std::make_pair(VT, RC));
RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
+ Synthesizable[VT.getSimpleVT().SimpleTy] = isSynthesizable;
}
+ /// findRepresentativeClass - Return the largest legal super-reg register class
+ /// of the register class for the specified type and its associated "cost".
+ virtual std::pair<const TargetRegisterClass*, uint8_t>
+ findRepresentativeClass(EVT VT) const;
+
/// computeRegisterProperties - Once all of the register classes are added,
/// this allows us to compute derived properties we expose.
void computeRegisterProperties();
@@ -989,68 +1057,58 @@ protected:
/// with the specified type and indicate what to do about it.
void setOperationAction(unsigned Op, MVT VT,
LegalizeAction Action) {
- unsigned I = (unsigned)VT.SimpleTy;
- unsigned J = I & 31;
- I = I >> 5;
- OpActions[I][Op] &= ~(uint64_t(3UL) << (J*2));
- OpActions[I][Op] |= (uint64_t)Action << (J*2);
+ assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
+ OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
}
-
+
/// setLoadExtAction - Indicate that the specified load with extension does
- /// not work with the with specified type and indicate what to do about it.
+ /// not work with the specified type and indicate what to do about it.
void setLoadExtAction(unsigned ExtType, MVT VT,
- LegalizeAction Action) {
- assert((unsigned)VT.SimpleTy*2 < 63 &&
- ExtType < array_lengthof(LoadExtActions) &&
+ LegalizeAction Action) {
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
+ (unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
- LoadExtActions[ExtType] |= (uint64_t)Action << VT.SimpleTy*2;
+ LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
}
-
+
/// setTruncStoreAction - Indicate that the specified truncating store does
- /// not work with the with specified type and indicate what to do about it.
+ /// not work with the specified type and indicate what to do about it.
void setTruncStoreAction(MVT ValVT, MVT MemVT,
LegalizeAction Action) {
- assert((unsigned)ValVT.SimpleTy < array_lengthof(TruncStoreActions) &&
- (unsigned)MemVT.SimpleTy*2 < 63 &&
+ assert((unsigned)ValVT.SimpleTy < MVT::LAST_VALUETYPE &&
+ (unsigned)MemVT.SimpleTy < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- TruncStoreActions[ValVT.SimpleTy] &= ~(uint64_t(3UL) << MemVT.SimpleTy*2);
- TruncStoreActions[ValVT.SimpleTy] |= (uint64_t)Action << MemVT.SimpleTy*2;
+ TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
}
/// setIndexedLoadAction - Indicate that the specified indexed load does or
- /// does not work with the with specified type and indicate what to do abort
+ /// does not work with the specified type and indicate what to do abort
/// it. NOTE: All indexed mode loads are initialized to Expand in
/// TargetLowering.cpp
void setIndexedLoadAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
- IdxMode < array_lengthof(IndexedModeActions[0][0]) &&
+ IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf &&
"Table isn't big enough!");
- IndexedModeActions[(unsigned)VT.SimpleTy][0][IdxMode] = (uint8_t)Action;
+ // Load action are kept in the upper half.
+ IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
+ IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
}
-
+
/// setIndexedStoreAction - Indicate that the specified indexed store does or
- /// does not work with the with specified type and indicate what to do about
+ /// does not work with the specified type and indicate what to do about
/// it. NOTE: All indexed mode stores are initialized to Expand in
/// TargetLowering.cpp
void setIndexedStoreAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
- IdxMode < array_lengthof(IndexedModeActions[0][1] ) &&
- "Table isn't big enough!");
- IndexedModeActions[(unsigned)VT.SimpleTy][1][IdxMode] = (uint8_t)Action;
- }
-
- /// setConvertAction - Indicate that the specified conversion does or does
- /// not work with the with specified type and indicate what to do about it.
- void setConvertAction(MVT FromVT, MVT ToVT,
- LegalizeAction Action) {
- assert((unsigned)FromVT.SimpleTy < array_lengthof(ConvertActions) &&
- (unsigned)ToVT.SimpleTy < MVT::LAST_VALUETYPE &&
+ IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf &&
"Table isn't big enough!");
- ConvertActions[FromVT.SimpleTy] &= ~(uint64_t(3UL) << ToVT.SimpleTy*2);
- ConvertActions[FromVT.SimpleTy] |= (uint64_t)Action << ToVT.SimpleTy*2;
+ // Store action are kept in the lower half.
+ IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
+ IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
}
/// setCondCodeAction - Indicate that the specified condition code is or isn't
@@ -1079,7 +1137,7 @@ protected:
assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
}
-
+
/// setJumpBufSize - Set the target's required jmp_buf buffer size (in
/// bytes); default is 200
void setJumpBufSize(unsigned Size) {
@@ -1092,32 +1150,25 @@ protected:
JumpBufAlignment = Align;
}
- /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size
- /// limit (in number of instructions); default is 2.
- void setIfCvtBlockSizeLimit(unsigned Limit) {
- IfCvtBlockSizeLimit = Limit;
- }
-
- /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number
- /// of instructions) to be considered for code duplication during
- /// if-conversion; default is 2.
- void setIfCvtDupBlockSizeLimit(unsigned Limit) {
- IfCvtDupBlockSizeLimit = Limit;
- }
-
/// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
/// alignment is zero, it means the target does not care about loop alignment.
void setPrefLoopAlignment(unsigned Align) {
PrefLoopAlignment = Align;
}
-
-public:
- virtual const TargetSubtarget *getSubtarget() {
- assert(0 && "Not Implemented");
- return NULL; // this is here to silence compiler errors
+ /// setMinStackArgumentAlignment - Set the minimum stack alignment of an
+ /// argument.
+ void setMinStackArgumentAlignment(unsigned Align) {
+ MinStackArgumentAlignment = Align;
+ }
+
+ /// setShouldFoldAtomicFences - Set if the target's implementation of the
+ /// atomic operation intrinsics includes locking. Default is false.
+ void setShouldFoldAtomicFences(bool fold) {
+ ShouldFoldAtomicFences = fold;
}
+public:
//===--------------------------------------------------------------------===//
// Lowering methods - These methods must be implemented by targets so that
// the SelectionDAGLowering code knows how to lower these.
@@ -1134,7 +1185,7 @@ public:
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors
}
@@ -1164,7 +1215,7 @@ public:
bool isVarArg, bool isInreg, unsigned NumFixedArgs,
CallingConv::ID CallConv, bool isTailCall,
bool isReturnValueUsed, SDValue Callee, ArgListTy &Args,
- SelectionDAG &DAG, DebugLoc dl);
+ SelectionDAG &DAG, DebugLoc dl) const;
/// LowerCall - This hook must be implemented to lower calls into the
/// the specified DAG. The outgoing arguments to the call are described
@@ -1176,9 +1227,10 @@ public:
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors
}
@@ -1188,13 +1240,13 @@ public:
/// registers. If false is returned, an sret-demotion is performed.
///
virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- SelectionDAG &DAG)
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const
{
// Return true by default to get preexisting behavior.
return true;
}
+
/// LowerReturn - This hook must be implemented to lower outgoing
/// return values, described by the Outs array, into the specified
/// DAG. The implementation should return the resulting token chain
@@ -1203,66 +1255,12 @@ public:
virtual SDValue
LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- DebugLoc dl, SelectionDAG &DAG) {
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const {
assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors
}
- /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a
- /// memcpy. This can be used by targets to provide code sequences for cases
- /// that don't fit the target's parameters for simple loads/stores and can be
- /// more efficient than using a library call. This function can return a null
- /// SDValue if the target declines to use custom code and a different
- /// lowering strategy should be used.
- ///
- /// If AlwaysInline is true, the size is constant and the target should not
- /// emit any calls and is strongly encouraged to attempt to emit inline code
- /// even if it is beyond the usual threshold because this intrinsic is being
- /// expanded in a place where calls are not feasible (e.g. within the prologue
- /// for another call). If the target chooses to decline an AlwaysInline
- /// request here, legalize will resort to using simple loads and stores.
- virtual SDValue
- EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Op1, SDValue Op2,
- SDValue Op3, unsigned Align,
- bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff) {
- return SDValue();
- }
-
- /// EmitTargetCodeForMemmove - Emit target-specific code that performs a
- /// memmove. This can be used by targets to provide code sequences for cases
- /// that don't fit the target's parameters for simple loads/stores and can be
- /// more efficient than using a library call. This function can return a null
- /// SDValue if the target declines to use custom code and a different
- /// lowering strategy should be used.
- virtual SDValue
- EmitTargetCodeForMemmove(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Op1, SDValue Op2,
- SDValue Op3, unsigned Align,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff) {
- return SDValue();
- }
-
- /// EmitTargetCodeForMemset - Emit target-specific code that performs a
- /// memset. This can be used by targets to provide code sequences for cases
- /// that don't fit the target's parameters for simple stores and can be more
- /// efficient than using a library call. This function can return a null
- /// SDValue if the target declines to use custom code and a different
- /// lowering strategy should be used.
- virtual SDValue
- EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Op1, SDValue Op2,
- SDValue Op3, unsigned Align,
- const Value *DstSV, uint64_t DstOff) {
- return SDValue();
- }
-
/// LowerOperationWrapper - This callback is invoked by the type legalizer
/// to legalize nodes with an illegal operand type but legal result types.
/// It replaces the LowerOperation callback in the type Legalizer.
@@ -1277,14 +1275,14 @@ public:
/// The default implementation calls LowerOperation.
virtual void LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG);
+ SelectionDAG &DAG) const;
- /// LowerOperation - This callback is invoked for operations that are
+ /// LowerOperation - This callback is invoked for operations that are
/// unsupported by the target, which are registered to use 'custom' lowering,
/// and whose defined values are all legal.
/// If the target has no operations that require custom lowering, it need not
/// implement this. The default implementation of this aborts.
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
+ virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
/// ReplaceNodeResults - This callback is invoked when a node result type is
/// illegal for the target, and the operation was registered to use 'custom'
@@ -1296,7 +1294,7 @@ public:
/// If the target has no operations that require custom lowering, it need not
/// implement this. The default implementation aborts.
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
assert(0 && "ReplaceNodeResults not implemented for this target!");
}
@@ -1306,23 +1304,14 @@ public:
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *
- createFastISel(MachineFunction &,
- MachineModuleInfo *, DwarfWriter *,
- DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &,
- DenseMap<const AllocaInst *, int> &
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &CatchInfoLost
-#endif
- ) {
+ virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const {
return 0;
}
//===--------------------------------------------------------------------===//
// Inline Asm Support hooks
//
-
+
/// ExpandInlineAsm - This hook allows the target to expand an inline asm
/// call to be explicit llvm code if it wants to. This is useful for
/// turning simple inline asms into LLVM intrinsics, which gives the
@@ -1330,7 +1319,7 @@ public:
virtual bool ExpandInlineAsm(CallInst *CI) const {
return false;
}
-
+
enum ConstraintType {
C_Register, // Constraint represents specific register(s).
C_RegisterClass, // Constraint represents any of register(s) in class.
@@ -1338,7 +1327,7 @@ public:
C_Other, // Something else.
C_Unknown // Unsupported constraint.
};
-
+
/// AsmOperandInfo - This contains information for each constraint that we are
/// lowering.
struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
@@ -1350,25 +1339,25 @@ public:
/// ConstraintType - Information about the constraint code, e.g. Register,
/// RegisterClass, Memory, Other, Unknown.
TargetLowering::ConstraintType ConstraintType;
-
+
/// CallOperandval - If this is the result output operand or a
/// clobber, this is null, otherwise it is the incoming operand to the
/// CallInst. This gets modified as the asm is processed.
Value *CallOperandVal;
-
+
/// ConstraintVT - The ValueType for the operand value.
EVT ConstraintVT;
-
+
/// isMatchingInputConstraint - Return true of this is an input operand that
/// is a matching constraint like "4".
bool isMatchingInputConstraint() const;
-
+
/// getMatchedOperand - If this is an input matching constraint, this method
/// returns the output operand it matches.
unsigned getMatchedOperand() const;
-
+
AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
- : InlineAsm::ConstraintInfo(info),
+ : InlineAsm::ConstraintInfo(info),
ConstraintType(TargetLowering::C_Unknown),
CallOperandVal(0), ConstraintVT(MVT::Other) {
}
@@ -1378,21 +1367,19 @@ public:
/// type to use for the specific AsmOperandInfo, setting
/// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand
/// being passed in is available, it can be passed in as Op, otherwise an
- /// empty SDValue can be passed. If hasMemory is true it means one of the asm
- /// constraint of the inline asm instruction being processed is 'm'.
+ /// empty SDValue can be passed.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
- bool hasMemory,
SelectionDAG *DAG = 0) const;
-
+
/// getConstraintType - Given a constraint, return the type of constraint it
/// is for this target.
virtual ConstraintType getConstraintType(const std::string &Constraint) const;
-
+
/// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
/// return a list of registers that can be used to satisfy the constraint.
/// This should only be used for C_RegisterClass constraints.
- virtual std::vector<unsigned>
+ virtual std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
@@ -1406,41 +1393,34 @@ public:
///
/// This should only be used for C_Register constraints. On error,
/// this returns a register number of 0 and a null register class pointer..
- virtual std::pair<unsigned, const TargetRegisterClass*>
+ virtual std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
-
+
/// LowerXConstraint - try to replace an X constraint, which matches anything,
/// with another that has more specific requirements based on the type of the
/// corresponding operand. This returns null if there is no replacement to
/// make.
virtual const char *LowerXConstraint(EVT ConstraintVT) const;
-
+
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
- /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true
- /// it means one of the asm constraint of the inline asm instruction being
- /// processed is 'm'.
+ /// vector. If it is invalid, don't add anything to Ops.
virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
-
+
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
//
-
+
// EmitInstrWithCustomInserter - This method should be implemented by targets
// that mark instructions with the 'usesCustomInserter' flag. These
// instructions are special in various ways, which require special support to
// insert. The specified MachineInstr is created but not inserted into any
// basic blocks, and this method is called to expand it into a sequence of
// instructions, potentially also creating new basic blocks and control flow.
- // When new basic blocks are inserted and the edges from MBB to its successors
- // are modified, the method should insert pairs of <OldSucc, NewSucc> into the
- // DenseMap.
- virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const;
+ virtual MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
//===--------------------------------------------------------------------===//
// Addressing mode description hooks (used by LSR etc).
@@ -1461,7 +1441,7 @@ public:
int64_t Scale;
AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
-
+
/// isLegalAddressingMode - Return true if the addressing mode represented by
/// AM is legal for this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
@@ -1514,9 +1494,9 @@ public:
//===--------------------------------------------------------------------===//
// Div utility functions
//
- SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
+ SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
std::vector<SDNode*>* Created) const;
- SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
+ SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
std::vector<SDNode*>* Created) const;
@@ -1553,7 +1533,7 @@ public:
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
LibcallCallingConvs[Call] = CC;
}
-
+
/// getLibcallCallingConv - Get the CallingConv that should be used for the
/// specified libcall.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
@@ -1561,9 +1541,9 @@ public:
}
private:
- TargetMachine &TM;
+ const TargetMachine &TM;
const TargetData *TD;
- TargetLoweringObjectFile &TLOF;
+ const TargetLoweringObjectFile &TLOF;
/// PointerTy - The type to use for pointers, usually i32 or i64.
///
@@ -1582,12 +1562,12 @@ private:
/// a real cost model is in place. If we ever optimize for size, this will be
/// set to true unconditionally.
bool IntDivIsCheap;
-
+
/// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
bool Pow2DivIsCheap;
-
+
/// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
/// llvm.setjmp. Defaults to false.
bool UseUnderscoreSetJmp;
@@ -1606,27 +1586,29 @@ private:
/// SchedPreferenceInfo - The target scheduling preference: shortest possible
/// total cycles or lowest register usage.
- SchedPreference SchedPreferenceInfo;
-
+ Sched::Preference SchedPreferenceInfo;
+
/// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
unsigned JumpBufSize;
-
+
/// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
/// buffers
unsigned JumpBufAlignment;
- /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be
- /// if-converted.
- unsigned IfCvtBlockSizeLimit;
-
- /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be
- /// duplicated during if-conversion.
- unsigned IfCvtDupBlockSizeLimit;
+ /// MinStackArgumentAlignment - The minimum alignment that any argument
+ /// on the stack needs to have.
+ ///
+ unsigned MinStackArgumentAlignment;
/// PrefLoopAlignment - The perferred loop alignment.
///
unsigned PrefLoopAlignment;
+ /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
+ /// be folded into the enclosed atomic intrinsic instruction by the
+ /// combiner.
+ bool ShouldFoldAtomicFences;
+
/// StackPointerRegisterToSaveRestore - If set to a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1648,6 +1630,24 @@ private:
unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
EVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
+ /// RepRegClassForVT - This indicates the "representative" register class to
+ /// use for each ValueType the target supports natively. This information is
+ /// used by the scheduler to track register pressure. By default, the
+ /// representative register class is the largest legal super-reg register
+ /// class of the register class of the specified type. e.g. On x86, i8, i16,
+ /// and i32's representative class would be GR32.
+ const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
+
+ /// RepRegClassCostForVT - This indicates the "cost" of the "representative"
+ /// register class for each ValueType. The cost is used by the scheduler to
+ /// approximate register pressure.
+ uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
+
+ /// Synthesizable indicates whether it is OK for the compiler to create new
+ /// operations using this type. All Legal types are Synthesizable except
+ /// MMX types on X86. Non-Legal types are not Synthesizable.
+ bool Synthesizable[MVT::LAST_VALUETYPE];
+
/// TransformToType - For any value types we are promoting or expanding, this
/// contains the value type that we are changing to. For Expanded types, this
/// contains one step of the expand (e.g. i64 -> i32), even if there are
@@ -1660,33 +1660,24 @@ private:
/// Most operations are Legal (aka, supported natively by the target), but
/// operations that are not should be described. Note that operations on
/// non-legal value types are not described here.
- /// This array is accessed using VT.getSimpleVT(), so it is subject to
- /// the MVT::MAX_ALLOWED_VALUETYPE * 2 bits.
- uint64_t OpActions[MVT::MAX_ALLOWED_VALUETYPE/(sizeof(uint64_t)*4)][ISD::BUILTIN_OP_END];
-
- /// LoadExtActions - For each load of load extension type and each value type,
+ uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
+
+ /// LoadExtActions - For each load extension type and each value type,
/// keep a LegalizeAction that indicates how instruction selection should deal
- /// with the load.
- uint64_t LoadExtActions[ISD::LAST_LOADEXT_TYPE];
-
- /// TruncStoreActions - For each truncating store, keep a LegalizeAction that
- /// indicates how instruction selection should deal with the store.
- uint64_t TruncStoreActions[MVT::LAST_VALUETYPE];
+ /// with a load of a specific value type and extension type.
+ uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
+
+ /// TruncStoreActions - For each value type pair keep a LegalizeAction that
+ /// indicates whether a truncating store of a specific value type and
+ /// truncating type is legal.
+ uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
/// IndexedModeActions - For each indexed mode and each value type,
/// keep a pair of LegalizeAction that indicates how instruction
- /// selection should deal with the load / store. The first
- /// dimension is now the value_type for the reference. The second
- /// dimension is the load [0] vs. store[1]. The third dimension
- /// represents the various modes for load store.
- uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][2][ISD::LAST_INDEXED_MODE];
-
- /// ConvertActions - For each conversion from source type to destination type,
- /// keep a LegalizeAction that indicates how instruction selection should
- /// deal with the conversion.
- /// Currently, this is used only for floating->floating conversions
- /// (FP_EXTEND and FP_ROUND).
- uint64_t ConvertActions[MVT::LAST_VALUETYPE];
+ /// selection should deal with the load / store. The first dimension is the
+ /// value_type for the reference. The second dimension represents the various
+ /// modes for load store.
+ uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
/// CondCodeActions - For each condition code (ISD::CondCode) keep a
/// LegalizeAction that indicates how instruction selection should
@@ -1702,7 +1693,7 @@ private:
/// which sets a bit in this array.
unsigned char
TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
-
+
/// PromoteToType - For operations that must be promoted to a specific type,
/// this holds the destination type. This map should be sparse, so don't hold
/// it as an array.
@@ -1762,7 +1753,25 @@ protected:
/// This field specifies whether the target can benefit from code placement
/// optimization.
bool benefitFromCodePlacementOpt;
+
+private:
+ /// isLegalRC - Return true if the value types that can be represented by the
+ /// specified register class are all legal.
+ bool isLegalRC(const TargetRegisterClass *RC) const;
+
+ /// hasLegalSuperRegRegClasses - Return true if the specified register class
+ /// has one or more super-reg register classes that are legal.
+ bool hasLegalSuperRegRegClasses(const TargetRegisterClass *RC) const;
};
+
+/// GetReturnInfo - Given an LLVM IR type and return type attributes,
+/// compute the return value EVTs and flags, and optionally also
+/// the offsets, if the return value is being lowered to memory.
+void GetReturnInfo(const Type* ReturnType, Attributes attr,
+ SmallVectorImpl<ISD::OutputArg> &Outs,
+ const TargetLowering &TLI,
+ SmallVectorImpl<uint64_t> *Offsets = 0);
+
} // end llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetLoweringObjectFile.h b/libclamav/c++/llvm/include/llvm/Target/TargetLoweringObjectFile.h
index 42d88a0..819709f 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetLoweringObjectFile.h
@@ -22,11 +22,12 @@ namespace llvm {
class MachineModuleInfo;
class Mangler;
class MCAsmInfo;
+ class MCContext;
class MCExpr;
class MCSection;
class MCSectionMachO;
class MCSymbol;
- class MCContext;
+ class MCStreamer;
class GlobalValue;
class TargetMachine;
@@ -87,11 +88,27 @@ protected:
const MCSection *DwarfRangesSection;
const MCSection *DwarfMacroInfoSection;
+ // Extra TLS Variable Data section. If the target needs to put additional
+ // information for a TLS variable, it'll go here.
+ const MCSection *TLSExtraDataSection;
+
+ /// SupportsWeakEmptyEHFrame - True if target object file supports a
+ /// weak_definition of constant 0 for an omitted EH frame.
+ bool SupportsWeakOmittedEHFrame;
+
+ /// IsFunctionEHSymbolGlobal - This flag is set to true if the ".eh" symbol
+ /// for a function should be marked .globl.
+ bool IsFunctionEHSymbolGlobal;
+
+ /// IsFunctionEHFrameSymbolPrivate - This flag is set to true if the
+ /// "EH_frame" symbol for EH information should be an assembler temporary (aka
+ /// private linkage, aka an L or .L label) or false if it should be a normal
+ /// non-.globl label. This defaults to true.
+ bool IsFunctionEHFrameSymbolPrivate;
public:
MCContext &getContext() const { return *Ctx; }
-
virtual ~TargetLoweringObjectFile();
/// Initialize - this method must be called before any actual lowering is
@@ -101,6 +118,15 @@ public:
Ctx = &ctx;
}
+ bool isFunctionEHSymbolGlobal() const {
+ return IsFunctionEHSymbolGlobal;
+ }
+ bool isFunctionEHFrameSymbolPrivate() const {
+ return IsFunctionEHFrameSymbolPrivate;
+ }
+ bool getSupportsWeakOmittedEHFrame() const {
+ return SupportsWeakOmittedEHFrame;
+ }
const MCSection *getTextSection() const { return TextSection; }
const MCSection *getDataSection() const { return DataSection; }
@@ -125,6 +151,9 @@ public:
const MCSection *getDwarfMacroInfoSection() const {
return DwarfMacroInfoSection;
}
+ const MCSection *getTLSExtraDataSection() const {
+ return TLSExtraDataSection;
+ }
/// shouldEmitUsedDirectiveFor - This hook allows targets to selectively
/// decide not to emit the UsedDirective for some symbols in llvm.used.
@@ -176,17 +205,20 @@ public:
return 0;
}
- /// getSymbolForDwarfGlobalReference - Return an MCExpr to use for a reference
+ /// getExprForDwarfGlobalReference - Return an MCExpr to use for a reference
/// to the specified global variable from exception handling information.
///
virtual const MCExpr *
- getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding) const;
-
- virtual const MCExpr *
- getSymbolForDwarfReference(const MCSymbol *Sym, MachineModuleInfo *MMI,
- unsigned Encoding) const;
+ getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const;
+ ///
+ const MCExpr *
+ getExprForDwarfReference(const MCSymbol *Sym, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const;
+
virtual unsigned getPersonalityEncoding() const;
virtual unsigned getLSDAEncoding() const;
virtual unsigned getFDEEncoding() const;
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetMachine.h b/libclamav/c++/llvm/include/llvm/Target/TargetMachine.h
index a7062ac..42e99e0 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetMachine.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetMachine.h
@@ -28,8 +28,10 @@ class TargetInstrInfo;
class TargetIntrinsicInfo;
class TargetJITInfo;
class TargetLowering;
+class TargetSelectionDAGInfo;
class TargetFrameInfo;
class JITCodeEmitter;
+class MCContext;
class TargetRegisterInfo;
class PassManagerBase;
class PassManager;
@@ -68,6 +70,16 @@ namespace CodeGenOpt {
};
}
+namespace Sched {
+ enum Preference {
+ None, // No preference
+ Latency, // Scheduling for shortest total latency.
+ RegPressure, // Scheduling for lowest register pressure.
+ Hybrid, // Scheduling for both latency and register pressure.
+ ILP // Scheduling for ILP in low register pressure mode.
+ };
+}
+
//===----------------------------------------------------------------------===//
///
/// TargetMachine - Primary interface to the complete machine description for
@@ -90,7 +102,9 @@ protected: // Can only create subclasses.
/// AsmInfo - Contains target specific asm information.
///
const MCAsmInfo *AsmInfo;
-
+
+ unsigned MCRelaxAll : 1;
+
public:
virtual ~TargetMachine();
@@ -104,7 +118,8 @@ public:
//
virtual const TargetInstrInfo *getInstrInfo() const { return 0; }
virtual const TargetFrameInfo *getFrameInfo() const { return 0; }
- virtual TargetLowering *getTargetLowering() const { return 0; }
+ virtual const TargetLowering *getTargetLowering() const { return 0; }
+ virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
virtual const TargetData *getTargetData() const { return 0; }
/// getMCAsmInfo - Return target specific asm information.
@@ -146,6 +161,14 @@ public:
///
virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; }
+ /// hasMCRelaxAll - Check whether all machine code instructions should be
+ /// relaxed.
+ bool hasMCRelaxAll() const { return MCRelaxAll; }
+
+ /// setMCRelaxAll - Set whether all machine code instructions should be
+ /// relaxed.
+ void setMCRelaxAll(bool Value) { MCRelaxAll = Value; }
+
/// getRelocationModel - Returns the code generation relocation model. The
/// choices are static, PIC, and dynamic-no-pic, and target default.
static Reloc::Model getRelocationModel();
@@ -170,6 +193,21 @@ public:
/// is false.
static void setAsmVerbosityDefault(bool);
+ /// getDataSections - Return true if data objects should be emitted into their
+ /// own section, corresponds to -fdata-sections.
+ static bool getDataSections();
+
+ /// getFunctionSections - Return true if functions should be emitted into
+ /// their own section, corresponding to -ffunction-sections.
+ static bool getFunctionSections();
+
+ /// setDataSections - Set if the data are emit into separate sections.
+ static void setDataSections(bool);
+
+ /// setFunctionSections - Set if the functions are emit into separate
+ /// sections.
+ static void setFunctionSections(bool);
+
/// CodeGenFileType - These enums are meant to be passed into
/// addPassesToEmitFile to indicate what type of file to emit, and returned by
/// it to indicate what type of file could actually be made.
@@ -191,7 +229,7 @@ public:
formatted_raw_ostream &,
CodeGenFileType,
CodeGenOpt::Level,
- bool DisableVerify = true) {
+ bool = true) {
return true;
}
@@ -204,18 +242,19 @@ public:
virtual bool addPassesToEmitMachineCode(PassManagerBase &,
JITCodeEmitter &,
CodeGenOpt::Level,
- bool DisableVerify = true) {
+ bool = true) {
return true;
}
- /// addPassesToEmitWholeFile - This method can be implemented by targets that
- /// require having the entire module at once. This is not recommended, do not
- /// use this.
- virtual bool WantsWholeFile() const { return false; }
- virtual bool addPassesToEmitWholeFile(PassManager &, formatted_raw_ostream &,
- CodeGenFileType,
- CodeGenOpt::Level,
- bool DisableVerify = true) {
+ /// addPassesToEmitMC - Add passes to the specified pass manager to get
+ /// machine code emitted with the MCJIT. This method returns true if machine
+ /// code is not supported. It fills the MCContext Ctx pointer which can be
+ /// used to build custom MCStreamer.
+ ///
+ virtual bool addPassesToEmitMC(PassManagerBase &,
+ MCContext *&,
+ CodeGenOpt::Level,
+ bool = true) {
return true;
}
};
@@ -224,16 +263,18 @@ public:
/// implemented with the LLVM target-independent code generator.
///
class LLVMTargetMachine : public TargetMachine {
+ std::string TargetTriple;
+
protected: // Can only create subclasses.
LLVMTargetMachine(const Target &T, const std::string &TargetTriple);
+private:
/// addCommonCodeGenPasses - Add standard LLVM codegen passes used for
/// both emitting to assembly files or machine code output.
///
bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level,
- bool DisableVerify);
+ bool DisableVerify, MCContext *&OutCtx);
-private:
virtual void setCodeModelForJIT();
virtual void setCodeModelForStatic();
@@ -259,12 +300,27 @@ public:
JITCodeEmitter &MCE,
CodeGenOpt::Level,
bool DisableVerify = true);
+
+ /// addPassesToEmitMC - Add passes to the specified pass manager to get
+ /// machine code emitted with the MCJIT. This method returns true if machine
+ /// code is not supported. It fills the MCContext Ctx pointer which can be
+ /// used to build custom MCStreamer.
+ ///
+ virtual bool addPassesToEmitMC(PassManagerBase &PM,
+ MCContext *&Ctx,
+ CodeGenOpt::Level OptLevel,
+ bool DisableVerify = true);
/// Target-Independent Code Generator Pass Configuration Options.
-
- /// addInstSelector - This method should add any "last minute" LLVM->LLVM
- /// passes, then install an instruction selector pass, which converts from
- /// LLVM code to machine instructions.
+
+ /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
+ /// passes (which are run just before instruction selector).
+ virtual bool addPreISel(PassManagerBase &, CodeGenOpt::Level) {
+ return true;
+ }
+
+ /// addInstSelector - This method should install an instruction selector pass,
+ /// which converts from LLVM code to machine instructions.
virtual bool addInstSelector(PassManagerBase &, CodeGenOpt::Level) {
return true;
}
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetOpcodes.h b/libclamav/c++/llvm/include/llvm/Target/TargetOpcodes.h
index 48665b7..01fba66 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetOpcodes.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetOpcodes.h
@@ -15,56 +15,70 @@
#define LLVM_TARGET_TARGETOPCODES_H
namespace llvm {
-
+
/// Invariant opcodes: All instruction sets have these as their low opcodes.
+///
+/// Every instruction defined here must also appear in Target.td and the order
+/// must be the same as in CodeGenTarget.cpp.
+///
namespace TargetOpcode {
- enum {
+ enum {
PHI = 0,
INLINEASM = 1,
- DBG_LABEL = 2,
+ PROLOG_LABEL = 2,
EH_LABEL = 3,
GC_LABEL = 4,
-
+
/// KILL - This instruction is a noop that is used only to adjust the
/// liveness of registers. This can be useful when dealing with
/// sub-registers.
KILL = 5,
-
+
/// EXTRACT_SUBREG - This instruction takes two operands: a register
/// that has subregisters, and a subregister index. It returns the
/// extracted subregister value. This is commonly used to implement
/// truncation operations on target architectures which support it.
EXTRACT_SUBREG = 6,
-
- /// INSERT_SUBREG - This instruction takes three operands: a register
- /// that has subregisters, a register providing an insert value, and a
- /// subregister index. It returns the value of the first register with
- /// the value of the second register inserted. The first register is
- /// often defined by an IMPLICIT_DEF, as is commonly used to implement
+
+ /// INSERT_SUBREG - This instruction takes three operands: a register that
+ /// has subregisters, a register providing an insert value, and a
+ /// subregister index. It returns the value of the first register with the
+ /// value of the second register inserted. The first register is often
+ /// defined by an IMPLICIT_DEF, because it is commonly used to implement
/// anyext operations on target architectures which support it.
INSERT_SUBREG = 7,
-
+
/// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
IMPLICIT_DEF = 8,
-
- /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except
- /// that the first operand is an immediate integer constant. This constant
- /// is often zero, as is commonly used to implement zext operations on
- /// target architectures which support it, such as with x86-64 (with
- /// zext from i32 to i64 via implicit zero-extension).
+
+ /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that
+ /// the first operand is an immediate integer constant. This constant is
+ /// often zero, because it is commonly used to assert that the instruction
+ /// defining the register implicitly clears the high bits.
SUBREG_TO_REG = 9,
-
+
/// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
/// register-to-register copy into a specific register class. This is only
/// used between instruction selection and MachineInstr creation, before
/// virtual registers have been created for all the instructions, and it's
/// only needed in cases where the register classes implied by the
- /// instructions are insufficient. The actual MachineInstrs to perform
- /// the copy are emitted with the TargetInstrInfo::copyRegToReg hook.
+ /// instructions are insufficient. It is emitted as a COPY MachineInstr.
COPY_TO_REGCLASS = 10,
-
+
/// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
- DBG_VALUE = 11
+ DBG_VALUE = 11,
+
+ /// REG_SEQUENCE - This variadic instruction is used to form a register that
+ /// represent a consecutive sequence of sub-registers. It's used as register
+ /// coalescing / allocation aid and must be eliminated before code emission.
+ /// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
+ /// After register coalescing references of v1024 should be replace with
+ /// v1027:3, v1025 with v1027:4, etc.
+ REG_SEQUENCE = 12,
+
+ /// COPY - Target-independent register copy. This instruction can also be
+ /// used to copy between subregisters of virtual registers.
+ COPY = 13
};
} // end namespace TargetOpcode
} // end namespace llvm
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetOptions.h b/libclamav/c++/llvm/include/llvm/Target/TargetOptions.h
index b63c2bf..97ceffd 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetOptions.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetOptions.h
@@ -16,6 +16,8 @@
#define LLVM_TARGET_TARGETOPTIONS_H
namespace llvm {
+ class MachineFunction;
+
// Possible float ABI settings. Used with FloatABIType in TargetOptions.h.
namespace FloatABI {
enum ABIType {
@@ -35,6 +37,16 @@ namespace llvm {
/// elimination optimization, this option should disable it.
extern bool NoFramePointerElim;
+ /// NoFramePointerElimNonLeaf - This flag is enabled when the
+ /// -disable-non-leaf-fp-elim is specified on the command line. If the target
+ /// supports the frame pointer elimination optimization, this option should
+ /// disable it for non-leaf functions.
+ extern bool NoFramePointerElimNonLeaf;
+
+ /// DisableFramePointerElim - This returns true if frame pointer elimination
+ /// optimization should be disabled for the given machine function.
+ extern bool DisableFramePointerElim(const MachineFunction &MF);
+
/// LessPreciseFPMAD - This flag is enabled when the
/// -enable-fp-mad is specified on the command line. When this flag is off
/// (the default), the code generator is not allowed to generate mad
@@ -56,16 +68,21 @@ namespace llvm {
/// this flag is off (the default), the code generator is not allowed to
/// produce results that are "less precise" than IEEE allows. This includes
/// use of X86 instructions like FSIN and FCOS instead of libcalls.
- /// UnsafeFPMath implies FiniteOnlyFPMath and LessPreciseFPMAD.
+ /// UnsafeFPMath implies LessPreciseFPMAD.
extern bool UnsafeFPMath;
- /// FiniteOnlyFPMath - This returns true when the -enable-finite-only-fp-math
- /// option is specified on the command line. If this returns false (default),
- /// the code generator is not allowed to assume that FP arithmetic arguments
- /// and results are never NaNs or +-Infs.
- extern bool FiniteOnlyFPMathOption;
- extern bool FiniteOnlyFPMath();
-
+ /// NoInfsFPMath - This flag is enabled when the
+ /// -enable-no-infs-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// assume the FP arithmetic arguments and results are never +-Infs.
+ extern bool NoInfsFPMath;
+
+ /// NoNaNsFPMath - This flag is enabled when the
+ /// -enable-no-nans-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// assume the FP arithmetic arguments and results are never NaNs.
+ extern bool NoNaNsFPMath;
+
/// HonorSignDependentRoundingFPMath - This returns true when the
/// -enable-sign-dependent-rounding-fp-math is specified. If this returns
/// false (the default), the code generator is allowed to assume that the
@@ -95,13 +112,9 @@ namespace llvm {
/// crt*.o compiling).
extern bool NoZerosInBSS;
- /// DwarfExceptionHandling - This flag indicates that Dwarf exception
- /// information should be emitted.
- extern bool DwarfExceptionHandling;
-
- /// SjLjExceptionHandling - This flag indicates that SJLJ exception
- /// information should be emitted.
- extern bool SjLjExceptionHandling;
+ /// JITExceptionHandling - This flag indicates that the JIT should emit
+ /// exception handling information.
+ extern bool JITExceptionHandling;
/// JITEmitDebugInfo - This flag indicates that the JIT should try to emit
/// debug information and notify a debugger about it.
@@ -127,8 +140,8 @@ namespace llvm {
/// StackAlignment - Override default stack alignment for target.
extern unsigned StackAlignment;
- /// RealignStack - This flag indicates, whether stack should be automatically
- /// realigned, if needed.
+ /// RealignStack - This flag indicates whether the stack should be
+ /// automatically realigned, if needed.
extern bool RealignStack;
/// DisableJumpTables - This flag indicates jump tables should not be
@@ -144,11 +157,6 @@ namespace llvm {
/// wth earlier copy coalescing.
extern bool StrongPHIElim;
- /// DisableScheduling - This flag disables instruction scheduling. In
- /// particular, it assigns an ordering to the SDNodes, which the scheduler
- /// uses instead of its normal heuristics to perform scheduling.
- extern bool DisableScheduling;
-
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetRegisterInfo.h b/libclamav/c++/llvm/include/llvm/Target/TargetRegisterInfo.h
index 212cc93..81dec3e 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetRegisterInfo.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetRegisterInfo.h
@@ -28,6 +28,7 @@ class BitVector;
class MachineFunction;
class MachineMove;
class RegScavenger;
+template<class T> class SmallVectorImpl;
/// TargetRegisterDesc - This record contains all of the information known about
/// a particular register. The AliasSet field (if not null) contains a pointer
@@ -109,11 +110,16 @@ public:
}
/// contains - Return true if the specified register is included in this
- /// register class.
+ /// register class. This does not include virtual registers.
bool contains(unsigned Reg) const {
return RegSet.count(Reg);
}
+ /// contains - Return true if both registers are in this class.
+ bool contains(unsigned Reg1, unsigned Reg2) const {
+ return contains(Reg1) && contains(Reg2);
+ }
+
/// hasType - return true if this TargetRegisterClass has the ValueType vt.
///
bool hasType(EVT vt) const {
@@ -151,9 +157,6 @@ public:
/// index SubIdx, or NULL if no such class exists.
const TargetRegisterClass* getSubRegisterRegClass(unsigned SubIdx) const {
assert(SubIdx>0 && "Invalid subregister index");
- for (unsigned s = 0; s != SubIdx-1; ++s)
- if (!SubRegClasses[s])
- return NULL;
return SubRegClasses[SubIdx-1];
}
@@ -262,14 +265,13 @@ class TargetRegisterInfo {
protected:
const unsigned* SubregHash;
const unsigned SubregHashSize;
- const unsigned* SuperregHash;
- const unsigned SuperregHashSize;
const unsigned* AliasesHash;
const unsigned AliasesHashSize;
public:
typedef const TargetRegisterClass * const * regclass_iterator;
private:
const TargetRegisterDesc *Desc; // Pointer to the descriptor array
+ const char *const *SubRegIndexNames; // Names of subreg indexes.
unsigned NumRegs; // Number of entries in the array
regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
@@ -280,12 +282,11 @@ protected:
TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR,
regclass_iterator RegClassBegin,
regclass_iterator RegClassEnd,
+ const char *const *subregindexnames,
int CallFrameSetupOpcode = -1,
int CallFrameDestroyOpcode = -1,
const unsigned* subregs = 0,
const unsigned subregsize = 0,
- const unsigned* superregs = 0,
- const unsigned superregsize = 0,
const unsigned* aliases = 0,
const unsigned aliasessize = 0);
virtual ~TargetRegisterInfo();
@@ -300,7 +301,7 @@ public:
/// considered to be a 'virtual' register, which is part of the SSA
/// namespace. This must be the same for all targets, which means that each
/// target is limited to this fixed number of registers.
- FirstVirtualRegister = 1024
+ FirstVirtualRegister = 16384
};
/// isPhysicalRegister - Return true if the specified register number is in
@@ -317,11 +318,11 @@ public:
return Reg >= FirstVirtualRegister;
}
- /// getPhysicalRegisterRegClass - Returns the Register Class of a physical
- /// register of the given type. If type is EVT::Other, then just return any
- /// register class the register belongs to.
- virtual const TargetRegisterClass *
- getPhysicalRegisterRegClass(unsigned Reg, EVT VT = MVT::Other) const;
+ /// getMinimalPhysRegClass - Returns the Register Class of a physical
+ /// register of the given type, picking the most sub register class of
+ /// the right type that contains this physreg.
+ const TargetRegisterClass *
+ getMinimalPhysRegClass(unsigned Reg, EVT VT = MVT::Other) const;
/// getAllocatableSet - Returns a bitset indexed by register number
/// indicating if a register is allocatable or not. If a register class is
@@ -380,6 +381,13 @@ public:
return NumRegs;
}
+ /// getSubRegIndexName - Return the human-readable symbolic target-specific
+ /// name for the specified SubRegIndex.
+ const char *getSubRegIndexName(unsigned SubIdx) const {
+ assert(SubIdx && "This is not a subregister index");
+ return SubRegIndexNames[SubIdx-1];
+ }
+
/// regsOverlap - Returns true if the two registers are equal or alias each
/// other. The registers may be virtual register.
bool regsOverlap(unsigned regA, unsigned regB) const {
@@ -425,19 +433,7 @@ public:
/// isSuperRegister - Returns true if regB is a super-register of regA.
///
bool isSuperRegister(unsigned regA, unsigned regB) const {
- // SuperregHash is a simple quadratically probed hash table.
- size_t index = (regA + regB * 37) & (SuperregHashSize-1);
- unsigned ProbeAmt = 2;
- while (SuperregHash[index*2] != 0 &&
- SuperregHash[index*2+1] != 0) {
- if (SuperregHash[index*2] == regA && SuperregHash[index*2+1] == regB)
- return true;
-
- index = (index + ProbeAmt) & (SuperregHashSize-1);
- ProbeAmt += 2;
- }
-
- return false;
+ return isSubRegister(regB, regA);
}
/// getCalleeSavedRegs - Return a null-terminated list of all of the
@@ -447,11 +443,6 @@ public:
virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF = 0)
const = 0;
- /// getCalleeSavedRegClasses - Return a null-terminated list of the preferred
- /// register classes to spill each callee saved register with. The order and
- /// length of this list match the getCalleeSaveRegs() list.
- virtual const TargetRegisterClass* const *getCalleeSavedRegClasses(
- const MachineFunction *MF) const =0;
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses
@@ -465,7 +456,7 @@ public:
virtual unsigned getSubReg(unsigned RegNo, unsigned Index) const = 0;
/// getSubRegIndex - For a given register pair, return the sub-register index
- /// if the are second register is a sub-register of the first. Return zero
+ /// if the second register is a sub-register of the first. Return zero
/// otherwise.
virtual unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const = 0;
@@ -479,6 +470,18 @@ public:
return 0;
}
+ /// canCombineSubRegIndices - Given a register class and a list of
+ /// subregister indices, return true if it's possible to combine the
+ /// subregister indices into one that corresponds to a larger
+ /// subregister. Return the new subregister index by reference. Note the
+ /// new index may be zero if the given subregisters can be combined to
+ /// form the whole register.
+ virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC,
+ SmallVectorImpl<unsigned> &SubIndices,
+ unsigned &NewSubIdx) const {
+ return 0;
+ }
+
/// getMatchingSuperRegClass - Return a subclass of the specified register
/// class A so that each register in it has a sub-register of the
/// specified sub-register index which is in the specified register class B.
@@ -488,6 +491,23 @@ public:
return 0;
}
+ /// composeSubRegIndices - Return the subregister index you get from composing
+ /// two subregister indices.
+ ///
+ /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
+ /// returns c. Note that composeSubRegIndices does not tell you about illegal
+ /// compositions. If R does not have a subreg a, or R:a does not have a subreg
+ /// b, composeSubRegIndices doesn't tell you.
+ ///
+ /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
+ /// ssub_0:S0 - ssub_3:S3 subregs.
+ /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
+ ///
+ virtual unsigned composeSubRegIndices(unsigned a, unsigned b) const {
+ // This default implementation is correct for most targets.
+ return b;
+ }
+
//===--------------------------------------------------------------------===//
// Register Class Information
//
@@ -504,8 +524,8 @@ public:
/// getRegClass - Returns the register class associated with the enumeration
/// value. See class TargetOperandInfo.
const TargetRegisterClass *getRegClass(unsigned i) const {
- assert(i <= getNumRegClasses() && "Register Class ID out of range");
- return i ? RegClassBegin[i - 1] : NULL;
+ assert(i < getNumRegClasses() && "Register Class ID out of range");
+ return RegClassBegin[i];
}
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
@@ -573,6 +593,13 @@ public:
return false;
}
+ /// requiresVirtualBaseRegisters - Returns true if the target wants the
+ /// LocalStackAllocation pass to be run and virtual base registers
+ /// used for more efficient stack access.
+ virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
+ return false;
+ }
+
/// hasFP - Return true if the specified function should have a dedicated
/// frame pointer register. For most targets this is true only if the function
/// has variable sized allocas or if frame pointer elimination is disabled.
@@ -583,18 +610,18 @@ public:
/// immediately on entry to the current function. This eliminates the need for
/// add/sub sp brackets around call sites. Returns true if the call frame is
/// included as part of the stack frame.
- virtual bool hasReservedCallFrame(MachineFunction &MF) const {
+ virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
return !hasFP(MF);
}
/// canSimplifyCallFramePseudos - When possible, it's best to simplify the
/// call frame pseudo ops before doing frame index elimination. This is
/// possible only when frame index references between the pseudos won't
- /// need adjusted for the call frame adjustments. Normally, that's true
+ /// need adjusting for the call frame adjustments. Normally, that's true
/// if the function has a reserved call frame or a frame pointer. Some
/// targets (Thumb2, for example) may have more complicated criteria,
/// however, and can override this behavior.
- virtual bool canSimplifyCallFramePseudos(MachineFunction &MF) const {
+ virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
return hasReservedCallFrame(MF) || hasFP(MF);
}
@@ -604,7 +631,7 @@ public:
/// reserved as its spill slot. This tells PEI not to create a new stack frame
/// object for the given register. It should be called only after
/// processFunctionBeforeCalleeSavedScan().
- virtual bool hasReservedSpillSlot(MachineFunction &MF, unsigned Reg,
+ virtual bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
int &FrameIdx) const {
return false;
}
@@ -616,6 +643,44 @@ public:
return false;
}
+ /// getFrameIndexInstrOffset - Get the offset from the referenced frame
+ /// index in the instruction, if the is one.
+ virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
+ int Idx) const {
+ return 0;
+ }
+
+ /// needsFrameBaseReg - Returns true if the instruction's frame index
+ /// reference would be better served by a base register other than FP
+ /// or SP. Used by LocalStackFrameAllocation to determine which frame index
+ /// references it should create new base registers for.
+ virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
+ return false;
+ }
+
+ /// materializeFrameBaseRegister - Insert defining instruction(s) for
+ /// BaseReg to be a pointer to FrameIdx before insertion point I.
+ virtual void materializeFrameBaseRegister(MachineBasicBlock::iterator I,
+ unsigned BaseReg, int FrameIdx,
+ int64_t Offset) const {
+ assert(0 && "materializeFrameBaseRegister does not exist on this target");
+ }
+
+ /// resolveFrameIndex - Resolve a frame index operand of an instruction
+ /// to reference the indicated base register plus offset instead.
+ virtual void resolveFrameIndex(MachineBasicBlock::iterator I,
+ unsigned BaseReg, int64_t Offset) const {
+ assert(0 && "resolveFrameIndex does not exist on this target");
+ }
+
+ /// isFrameOffsetLegal - Determine whether a given offset immediate is
+ /// encodable to resolve a frame index.
+ virtual bool isFrameOffsetLegal(const MachineInstr *MI,
+ int64_t Offset) const {
+ assert(0 && "isFrameOffsetLegal does not exist on this target");
+ return false; // Must return a value in order to compile with VS 2005
+ }
+
/// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the
/// frame setup/destroy instructions if they exist (-1 otherwise). Some
/// targets use pseudo instructions in order to abstract away the difference
@@ -651,7 +716,7 @@ public:
}
/// processFunctionBeforeFrameFinalized - This method is called immediately
- /// before the specified functions frame layout (MF.getFrameInfo()) is
+ /// before the specified function's frame layout (MF.getFrameInfo()) is
/// finalized. Once the frame is finalized, MO_FrameIndex operands are
/// replaced with direct constants. This method is optional.
///
@@ -678,13 +743,8 @@ public:
/// specified instruction, as long as it keeps the iterator pointing at the
/// finished product. SPAdj is the SP adjustment due to call frame setup
/// instruction.
- ///
- /// When -enable-frame-index-scavenging is enabled, the virtual register
- /// allocated for this frame index is returned and its value is stored in
- /// *Value.
- virtual unsigned eliminateFrameIndex(MachineBasicBlock::iterator MI,
- int SPAdj, int *Value = NULL,
- RegScavenger *RS=NULL) const = 0;
+ virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj, RegScavenger *RS=NULL) const = 0;
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetRegistry.h b/libclamav/c++/llvm/include/llvm/Target/TargetRegistry.h
index a409b62..2817b0c 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetRegistry.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetRegistry.h
@@ -38,7 +38,6 @@ namespace llvm {
class TargetAsmLexer;
class TargetAsmParser;
class TargetMachine;
- class formatted_raw_ostream;
class raw_ostream;
/// Target - Wrapper for Target specific information.
@@ -55,29 +54,33 @@ namespace llvm {
typedef unsigned (*TripleMatchQualityFnTy)(const std::string &TT);
- typedef const MCAsmInfo *(*AsmInfoCtorFnTy)(const Target &T,
+ typedef MCAsmInfo *(*AsmInfoCtorFnTy)(const Target &T,
StringRef TT);
typedef TargetMachine *(*TargetMachineCtorTy)(const Target &T,
const std::string &TT,
const std::string &Features);
- typedef AsmPrinter *(*AsmPrinterCtorTy)(formatted_raw_ostream &OS,
- TargetMachine &TM,
- MCContext &Ctx,
- MCStreamer &Streamer,
- const MCAsmInfo *MAI);
+ typedef AsmPrinter *(*AsmPrinterCtorTy)(TargetMachine &TM,
+ MCStreamer &Streamer);
typedef TargetAsmBackend *(*AsmBackendCtorTy)(const Target &T,
- MCAssembler &A);
+ const std::string &TT);
typedef TargetAsmLexer *(*AsmLexerCtorTy)(const Target &T,
const MCAsmInfo &MAI);
- typedef TargetAsmParser *(*AsmParserCtorTy)(const Target &T,MCAsmParser &P);
- typedef const MCDisassembler *(*MCDisassemblerCtorTy)(const Target &T);
+ typedef TargetAsmParser *(*AsmParserCtorTy)(const Target &T,MCAsmParser &P,
+ TargetMachine &TM);
+ typedef MCDisassembler *(*MCDisassemblerCtorTy)(const Target &T);
typedef MCInstPrinter *(*MCInstPrinterCtorTy)(const Target &T,
unsigned SyntaxVariant,
- const MCAsmInfo &MAI,
- raw_ostream &O);
+ const MCAsmInfo &MAI);
typedef MCCodeEmitter *(*CodeEmitterCtorTy)(const Target &T,
TargetMachine &TM,
MCContext &Ctx);
+ typedef MCStreamer *(*ObjectStreamerCtorTy)(const Target &T,
+ const std::string &TT,
+ MCContext &Ctx,
+ TargetAsmBackend &TAB,
+ raw_ostream &_OS,
+ MCCodeEmitter *_Emitter,
+ bool RelaxAll);
private:
/// Next - The next registered target in the linked list, maintained by the
@@ -131,6 +134,10 @@ namespace llvm {
/// if registered.
CodeEmitterCtorTy CodeEmitterCtorFn;
+ /// ObjectStreamerCtorFn - Construction function for this target's
+ /// ObjectStreamer, if registered.
+ ObjectStreamerCtorTy ObjectStreamerCtorFn;
+
public:
/// @name Target Information
/// @{
@@ -175,6 +182,9 @@ namespace llvm {
/// hasCodeEmitter - Check if this target supports instruction encoding.
bool hasCodeEmitter() const { return CodeEmitterCtorFn != 0; }
+ /// hasObjectStreamer - Check if this target supports streaming to files.
+ bool hasObjectStreamer() const { return ObjectStreamerCtorFn != 0; }
+
/// @}
/// @name Feature Constructors
/// @{
@@ -186,7 +196,7 @@ namespace llvm {
/// feature set; it should always be provided. Generally this should be
/// either the target triple from the module, or the target triple of the
/// host if that does not exist.
- const MCAsmInfo *createAsmInfo(StringRef Triple) const {
+ MCAsmInfo *createAsmInfo(StringRef Triple) const {
if (!AsmInfoCtorFn)
return 0;
return AsmInfoCtorFn(*this, Triple);
@@ -208,11 +218,12 @@ namespace llvm {
/// createAsmBackend - Create a target specific assembly parser.
///
+ /// \arg Triple - The target triple string.
/// \arg Backend - The target independent assembler object.
- TargetAsmBackend *createAsmBackend(MCAssembler &Backend) const {
+ TargetAsmBackend *createAsmBackend(const std::string &Triple) const {
if (!AsmBackendCtorFn)
return 0;
- return AsmBackendCtorFn(*this, Backend);
+ return AsmBackendCtorFn(*this, Triple);
}
/// createAsmLexer - Create a target specific assembly lexer.
@@ -227,34 +238,32 @@ namespace llvm {
///
/// \arg Parser - The target independent parser implementation to use for
/// parsing and lexing.
- TargetAsmParser *createAsmParser(MCAsmParser &Parser) const {
+ TargetAsmParser *createAsmParser(MCAsmParser &Parser,
+ TargetMachine &TM) const {
if (!AsmParserCtorFn)
return 0;
- return AsmParserCtorFn(*this, Parser);
+ return AsmParserCtorFn(*this, Parser, TM);
}
/// createAsmPrinter - Create a target specific assembly printer pass. This
- /// takes ownership of the MCContext and MCStreamer objects but not the MAI.
- AsmPrinter *createAsmPrinter(formatted_raw_ostream &OS, TargetMachine &TM,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *MAI) const {
+ /// takes ownership of the MCStreamer object.
+ AsmPrinter *createAsmPrinter(TargetMachine &TM, MCStreamer &Streamer) const{
if (!AsmPrinterCtorFn)
return 0;
- return AsmPrinterCtorFn(OS, TM, Ctx, Streamer, MAI);
+ return AsmPrinterCtorFn(TM, Streamer);
}
- const MCDisassembler *createMCDisassembler() const {
+ MCDisassembler *createMCDisassembler() const {
if (!MCDisassemblerCtorFn)
return 0;
return MCDisassemblerCtorFn(*this);
}
MCInstPrinter *createMCInstPrinter(unsigned SyntaxVariant,
- const MCAsmInfo &MAI,
- raw_ostream &O) const {
+ const MCAsmInfo &MAI) const {
if (!MCInstPrinterCtorFn)
return 0;
- return MCInstPrinterCtorFn(*this, SyntaxVariant, MAI, O);
+ return MCInstPrinterCtorFn(*this, SyntaxVariant, MAI);
}
@@ -265,6 +274,24 @@ namespace llvm {
return CodeEmitterCtorFn(*this, TM, Ctx);
}
+ /// createObjectStreamer - Create a target specific MCStreamer.
+ ///
+ /// \arg TT - The target triple.
+ /// \arg Ctx - The target context.
+ /// \arg TAB - The target assembler backend object. Takes ownership.
+ /// \arg _OS - The stream object.
+ /// \arg _Emitter - The target independent assembler object.Takes ownership.
+ /// \arg RelaxAll - Relax all fixups?
+ MCStreamer *createObjectStreamer(const std::string &TT, MCContext &Ctx,
+ TargetAsmBackend &TAB,
+ raw_ostream &_OS,
+ MCCodeEmitter *_Emitter,
+ bool RelaxAll) const {
+ if (!ObjectStreamerCtorFn)
+ return 0;
+ return ObjectStreamerCtorFn(*this, TT, Ctx, TAB, _OS, _Emitter, RelaxAll);
+ }
+
/// @}
};
@@ -486,6 +513,20 @@ namespace llvm {
T.CodeEmitterCtorFn = Fn;
}
+ /// RegisterObjectStreamer - Register an MCStreamer implementation
+ /// for the given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an MCStreamer for the target.
+ static void RegisterObjectStreamer(Target &T, Target::ObjectStreamerCtorTy Fn) {
+ if (!T.ObjectStreamerCtorFn)
+ T.ObjectStreamerCtorFn = Fn;
+ }
+
/// @}
};
@@ -531,7 +572,7 @@ namespace llvm {
TargetRegistry::RegisterAsmInfo(T, &Allocator);
}
private:
- static const MCAsmInfo *Allocator(const Target &T, StringRef TT) {
+ static MCAsmInfo *Allocator(const Target &T, StringRef TT) {
return new MCAsmInfoImpl(T, TT);
}
@@ -587,8 +628,9 @@ namespace llvm {
}
private:
- static TargetAsmBackend *Allocator(const Target &T, MCAssembler &Backend) {
- return new AsmBackendImpl(T, Backend);
+ static TargetAsmBackend *Allocator(const Target &T,
+ const std::string &Triple) {
+ return new AsmBackendImpl(T, Triple);
}
};
@@ -627,8 +669,9 @@ namespace llvm {
}
private:
- static TargetAsmParser *Allocator(const Target &T, MCAsmParser &P) {
- return new AsmParserImpl(T, P);
+ static TargetAsmParser *Allocator(const Target &T, MCAsmParser &P,
+ TargetMachine &TM) {
+ return new AsmParserImpl(T, P, TM);
}
};
@@ -647,10 +690,8 @@ namespace llvm {
}
private:
- static AsmPrinter *Allocator(formatted_raw_ostream &OS, TargetMachine &TM,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *MAI) {
- return new AsmPrinterImpl(OS, TM, Ctx, Streamer, MAI);
+ static AsmPrinter *Allocator(TargetMachine &TM, MCStreamer &Streamer) {
+ return new AsmPrinterImpl(TM, Streamer);
}
};
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetSchedule.td b/libclamav/c++/llvm/include/llvm/Target/TargetSchedule.td
index dcc0992..96c8367 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetSchedule.td
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetSchedule.td
@@ -22,6 +22,13 @@
//
class FuncUnit;
+class ReservationKind<bits<1> val> {
+ int Value = val;
+}
+
+def Required : ReservationKind<0>;
+def Reserved : ReservationKind<1>;
+
//===----------------------------------------------------------------------===//
// Instruction stage - These values represent a non-pipelined step in
// the execution of an instruction. Cycles represents the number of
@@ -36,10 +43,14 @@ class FuncUnit;
// InstrStage<1, [FU_x, FU_y]> - TimeInc defaults to Cycles
// InstrStage<1, [FU_x, FU_y], 0> - TimeInc explicit
//
-class InstrStage<int cycles, list<FuncUnit> units, int timeinc = -1> {
+
+class InstrStage<int cycles, list<FuncUnit> units,
+ int timeinc = -1,
+ ReservationKind kind = Required> {
int Cycles = cycles; // length of stage in machine cycles
list<FuncUnit> Units = units; // choice of functional units
int TimeInc = timeinc; // cycles till start of next stage
+ int Kind = kind.Value; // kind of FU reservation
}
//===----------------------------------------------------------------------===//
@@ -73,11 +84,12 @@ class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
// Processor itineraries - These values represent the set of all itinerary
// classes for a given chip set.
//
-class ProcessorItineraries<list<InstrItinData> iid> {
+class ProcessorItineraries<list<FuncUnit> fu, list<InstrItinData> iid> {
+ list<FuncUnit> FU = fu;
list<InstrItinData> IID = iid;
}
// NoItineraries - A marker that can be used by processors without schedule
// info.
-def NoItineraries : ProcessorItineraries<[]>;
+def NoItineraries : ProcessorItineraries<[], []>;
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetSelect.h b/libclamav/c++/llvm/include/llvm/Target/TargetSelect.h
index 951e7fa..1891f87 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetSelect.h
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetSelect.h
@@ -16,7 +16,7 @@
#ifndef LLVM_TARGET_TARGETSELECT_H
#define LLVM_TARGET_TARGETSELECT_H
-#include "llvm/Config/config.h"
+#include "llvm/Config/llvm-config.h"
extern "C" {
// Declare all of the target-initialization functions that are available.
@@ -100,15 +100,22 @@ namespace llvm {
/// It is legal for a client to make multiple calls to this function.
inline bool InitializeNativeTarget() {
// If we have a native target, initialize it to ensure it is linked in.
-#ifdef LLVM_NATIVE_ARCH
-#define DoInit2(TARG) \
- LLVMInitialize ## TARG ## Info (); \
- LLVMInitialize ## TARG ()
-#define DoInit(T) DoInit2(T)
- DoInit(LLVM_NATIVE_ARCH);
+#ifdef LLVM_NATIVE_TARGET
+ LLVM_NATIVE_TARGETINFO();
+ LLVM_NATIVE_TARGET();
+ return false;
+#else
+ return true;
+#endif
+ }
+
+ /// InitializeNativeTargetAsmPrinter - The main program should call
+ /// this function to initialize the native target asm printer.
+ inline bool InitializeNativeTargetAsmPrinter() {
+ // If we have a native target, initialize the corresponding asm printer.
+#ifdef LLVM_NATIVE_ASMPRINTER
+ LLVM_NATIVE_ASMPRINTER();
return false;
-#undef DoInit
-#undef DoInit2
#else
return true;
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetSelectionDAG.td b/libclamav/c++/llvm/include/llvm/Target/TargetSelectionDAG.td
index 4365d33..58ccfba 100644
--- a/libclamav/c++/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -92,6 +92,10 @@ def SDTIntBinOp : SDTypeProfile<1, 2, [ // add, and, or, xor, udiv, etc.
def SDTIntShiftOp : SDTypeProfile<1, 2, [ // shl, sra, srl
SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>
]>;
+def SDTIntBinHiLoOp : SDTypeProfile<2, 2, [ // mulhi, mullo, sdivrem, udivrem
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,SDTCisInt<0>
+]>;
+
def SDTFPBinOp : SDTypeProfile<1, 2, [ // fadd, fmul, etc.
SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>
]>;
@@ -219,6 +223,7 @@ def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'.
def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'.
def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'.
def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand
+def SDNPVariadic : SDNodeProperty; // Node has variable arguments.
//===----------------------------------------------------------------------===//
// Selection DAG Node definitions.
@@ -234,7 +239,6 @@ class SDNode<string opcode, SDTypeProfile typeprof,
// Special TableGen-recognized dag nodes
def set;
def implicit;
-def parallel;
def node;
def srcvalue;
@@ -281,10 +285,14 @@ def mul : SDNode<"ISD::MUL" , SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]>;
def mulhs : SDNode<"ISD::MULHS" , SDTIntBinOp, [SDNPCommutative]>;
def mulhu : SDNode<"ISD::MULHU" , SDTIntBinOp, [SDNPCommutative]>;
+def smullohi : SDNode<"ISD::SMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>;
+def umullohi : SDNode<"ISD::UMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>;
def sdiv : SDNode<"ISD::SDIV" , SDTIntBinOp>;
def udiv : SDNode<"ISD::UDIV" , SDTIntBinOp>;
def srem : SDNode<"ISD::SREM" , SDTIntBinOp>;
def urem : SDNode<"ISD::UREM" , SDTIntBinOp>;
+def sdivrem : SDNode<"ISD::SDIVREM" , SDTIntBinHiLoOp>;
+def udivrem : SDNode<"ISD::UDIVREM" , SDTIntBinHiLoOp>;
def srl : SDNode<"ISD::SRL" , SDTIntShiftOp>;
def sra : SDNode<"ISD::SRA" , SDTIntShiftOp>;
def shl : SDNode<"ISD::SHL" , SDTIntShiftOp>;
@@ -345,6 +353,8 @@ def sint_to_fp : SDNode<"ISD::SINT_TO_FP" , SDTIntToFPOp>;
def uint_to_fp : SDNode<"ISD::UINT_TO_FP" , SDTIntToFPOp>;
def fp_to_sint : SDNode<"ISD::FP_TO_SINT" , SDTFPToIntOp>;
def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>;
+def f16_to_f32 : SDNode<"ISD::FP16_TO_FP32", SDTIntToFPOp>;
+def f32_to_f16 : SDNode<"ISD::FP32_TO_FP16", SDTFPToIntOp>;
def setcc : SDNode<"ISD::SETCC" , SDTSetCC>;
def select : SDNode<"ISD::SELECT" , SDTSelect>;
@@ -482,22 +492,15 @@ def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>;
def immAllOnesV: PatLeaf<(build_vector), [{
return ISD::isBuildVectorAllOnes(N);
}]>;
-def immAllOnesV_bc: PatLeaf<(bitconvert), [{
- return ISD::isBuildVectorAllOnes(N);
-}]>;
def immAllZerosV: PatLeaf<(build_vector), [{
return ISD::isBuildVectorAllZeros(N);
}]>;
-def immAllZerosV_bc: PatLeaf<(bitconvert), [{
- return ISD::isBuildVectorAllZeros(N);
-}]>;
// Other helper fragments.
def not : PatFrag<(ops node:$in), (xor node:$in, -1)>;
def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>;
-def vnot_conv : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV_bc)>;
def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>;
// load fragments.
diff --git a/libclamav/c++/llvm/include/llvm/Target/TargetSelectionDAGInfo.h b/libclamav/c++/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
new file mode 100644
index 0000000..2be1834
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
@@ -0,0 +1,101 @@
+//==-- llvm/Target/TargetSelectionDAGInfo.h - SelectionDAG Info --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the TargetSelectionDAGInfo class, which targets can
+// subclass to parameterize the SelectionDAG lowering and instruction
+// selection process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETSELECTIONDAGINFO_H
+#define LLVM_TARGET_TARGETSELECTIONDAGINFO_H
+
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+
+namespace llvm {
+
+class TargetData;
+class TargetMachine;
+
+//===----------------------------------------------------------------------===//
+/// TargetSelectionDAGInfo - Targets can subclass this to parameterize the
+/// SelectionDAG lowering and instruction selection process.
+///
+class TargetSelectionDAGInfo {
+ TargetSelectionDAGInfo(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT
+ void operator=(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT
+
+ const TargetData *TD;
+
+protected:
+ const TargetData *getTargetData() const { return TD; }
+
+public:
+ explicit TargetSelectionDAGInfo(const TargetMachine &TM);
+ virtual ~TargetSelectionDAGInfo();
+
+ /// EmitTargetCodeForMemcpy - Emit target-specific code that performs a
+ /// memcpy. This can be used by targets to provide code sequences for cases
+ /// that don't fit the target's parameters for simple loads/stores and can be
+ /// more efficient than using a library call. This function can return a null
+ /// SDValue if the target declines to use custom code and a different
+ /// lowering strategy should be used.
+ ///
+ /// If AlwaysInline is true, the size is constant and the target should not
+ /// emit any calls and is strongly encouraged to attempt to emit inline code
+ /// even if it is beyond the usual threshold because this intrinsic is being
+ /// expanded in a place where calls are not feasible (e.g. within the prologue
+ /// for another call). If the target chooses to decline an AlwaysInline
+ /// request here, legalize will resort to using simple loads and stores.
+ virtual SDValue
+ EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
+ SDValue Chain,
+ SDValue Op1, SDValue Op2,
+ SDValue Op3, unsigned Align, bool isVolatile,
+ bool AlwaysInline,
+ const Value *DstSV, uint64_t DstOff,
+ const Value *SrcSV, uint64_t SrcOff) const {
+ return SDValue();
+ }
+
+ /// EmitTargetCodeForMemmove - Emit target-specific code that performs a
+ /// memmove. This can be used by targets to provide code sequences for cases
+ /// that don't fit the target's parameters for simple loads/stores and can be
+ /// more efficient than using a library call. This function can return a null
+ /// SDValue if the target declines to use custom code and a different
+ /// lowering strategy should be used.
+ virtual SDValue
+ EmitTargetCodeForMemmove(SelectionDAG &DAG, DebugLoc dl,
+ SDValue Chain,
+ SDValue Op1, SDValue Op2,
+ SDValue Op3, unsigned Align, bool isVolatile,
+ const Value *DstSV, uint64_t DstOff,
+ const Value *SrcSV, uint64_t SrcOff) const {
+ return SDValue();
+ }
+
+ /// EmitTargetCodeForMemset - Emit target-specific code that performs a
+ /// memset. This can be used by targets to provide code sequences for cases
+ /// that don't fit the target's parameters for simple stores and can be more
+ /// efficient than using a library call. This function can return a null
+ /// SDValue if the target declines to use custom code and a different
+ /// lowering strategy should be used.
+ virtual SDValue
+ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
+ SDValue Chain,
+ SDValue Op1, SDValue Op2,
+ SDValue Op3, unsigned Align, bool isVolatile,
+ const Value *DstSV, uint64_t DstOff) const {
+ return SDValue();
+ }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/IPO.h b/libclamav/c++/llvm/include/llvm/Transforms/IPO.h
index 5e17904..0de1003 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/IPO.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/IPO.h
@@ -45,6 +45,11 @@ ModulePass *createStripNonDebugSymbolsPass();
ModulePass *createStripDebugDeclarePass();
//===----------------------------------------------------------------------===//
+//
+// These pass removes unused symbols' debug info.
+ModulePass *createStripDeadDebugInfoPass();
+
+//===----------------------------------------------------------------------===//
/// createLowerSetJmpPass - This function lowers the setjmp/longjmp intrinsics
/// to invoke/unwind instructions. This should really be part of the C/C++
/// front-end, but it's so much easier to write transformations in LLVM proper.
@@ -88,8 +93,7 @@ ModulePass *createGlobalDCEPass();
/// possible, except for the global values specified.
///
ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool
- deleteFn = false,
- bool relinkCallees = false);
+ deleteFn = false);
//===----------------------------------------------------------------------===//
/// createFunctionInliningPass - Return a new pass object that uses a heuristic
@@ -176,7 +180,7 @@ Pass *createSingleLoopExtractorPass();
/// createBlockExtractorPass - This pass extracts all blocks (except those
/// specified in the argument list) from the functions in the module.
///
-ModulePass *createBlockExtractorPass(const std::vector<BasicBlock*> &BTNE);
+ModulePass *createBlockExtractorPass();
/// createStripDeadPrototypesPass - This pass removes any function declarations
/// (prototypes) that are not used.
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/IPO/InlinerPass.h b/libclamav/c++/llvm/include/llvm/Transforms/IPO/InlinerPass.h
index 30ece0e..3ac4c59 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/IPO/InlinerPass.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/IPO/InlinerPass.h
@@ -30,8 +30,8 @@ namespace llvm {
/// perform the inlining operations that do not depend on the policy.
///
struct Inliner : public CallGraphSCCPass {
- explicit Inliner(void *ID);
- explicit Inliner(void *ID, int Threshold);
+ explicit Inliner(char &ID);
+ explicit Inliner(char &ID, int Threshold);
/// getAnalysisUsage - For this class, we declare that we require and preserve
/// the call graph. If the derived class implements this method, it should
@@ -40,7 +40,7 @@ struct Inliner : public CallGraphSCCPass {
// Main run interface method, this implements the interface required by the
// Pass class.
- virtual bool runOnSCC(std::vector<CallGraphNode *> &SCC);
+ virtual bool runOnSCC(CallGraphSCC &SCC);
// doFinalization - Remove now-dead linkonce functions at the end of
// processing to avoid breaking the SCC traversal.
@@ -75,6 +75,10 @@ struct Inliner : public CallGraphSCCPass {
///
virtual void resetCachedCostInfo(Function* Caller) = 0;
+ /// growCachedCostInfo - update the cached cost info for Caller after Callee
+ /// has been inlined.
+ virtual void growCachedCostInfo(Function *Caller, Function *Callee) = 0;
+
/// removeDeadFunctions - Remove dead functions that are not included in
/// DNR (Do Not Remove) list.
bool removeDeadFunctions(CallGraph &CG,
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Scalar.h b/libclamav/c++/llvm/include/llvm/Transforms/Scalar.h
index 6893bad..0c35d7e 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/Scalar.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/Scalar.h
@@ -149,7 +149,6 @@ Pass *createLoopIndexSplitPass();
// ret i32 %Y
//
FunctionPass *createPromoteMemoryToRegisterPass();
-extern const PassInfo *const PromoteMemoryToRegisterID;
//===----------------------------------------------------------------------===//
//
@@ -158,7 +157,7 @@ extern const PassInfo *const PromoteMemoryToRegisterID;
// hacking easier.
//
FunctionPass *createDemoteRegisterToMemoryPass();
-extern const PassInfo *const DemoteRegisterToMemoryID;
+extern char &DemoteRegisterToMemoryID;
//===----------------------------------------------------------------------===//
//
@@ -202,7 +201,7 @@ FunctionPass *createCFGSimplificationPass();
// (set, immediate dominators, tree, and frontier) information.
//
FunctionPass *createBreakCriticalEdgesPass();
-extern const PassInfo *const BreakCriticalEdgesID;
+extern char &BreakCriticalEdgesID;
//===----------------------------------------------------------------------===//
//
@@ -213,7 +212,7 @@ extern const PassInfo *const BreakCriticalEdgesID;
// AU.addRequiredID(LoopSimplifyID);
//
Pass *createLoopSimplifyPass();
-extern const PassInfo *const LoopSimplifyID;
+extern char &LoopSimplifyID;
//===----------------------------------------------------------------------===//
//
@@ -228,7 +227,7 @@ FunctionPass *createTailCallEliminationPass();
// chained binary branch instructions.
//
FunctionPass *createLowerSwitchPass();
-extern const PassInfo *const LowerSwitchID;
+extern char &LowerSwitchID;
//===----------------------------------------------------------------------===//
//
@@ -241,7 +240,9 @@ extern const PassInfo *const LowerSwitchID;
// lowering pass.
//
FunctionPass *createLowerInvokePass(const TargetLowering *TLI = 0);
-extern const PassInfo *const LowerInvokePassID;
+FunctionPass *createLowerInvokePass(const TargetLowering *TLI,
+ bool useExpensiveEHSupport);
+extern char &LowerInvokePassID;
//===----------------------------------------------------------------------===//
//
@@ -256,7 +257,7 @@ FunctionPass *createBlockPlacementPass();
// optimizations.
//
Pass *createLCSSAPass();
-extern const PassInfo *const LCSSAID;
+extern char &LCSSAID;
//===----------------------------------------------------------------------===//
//
@@ -302,39 +303,31 @@ FunctionPass *createCodeGenPreparePass(const TargetLowering *TLI = 0);
// InstructionNamer - Give any unnamed non-void instructions "tmp" names.
//
FunctionPass *createInstructionNamerPass();
-extern const PassInfo *const InstructionNamerID;
+extern char &InstructionNamerID;
//===----------------------------------------------------------------------===//
//
-// SSI - This pass converts instructions to Static Single Information form
-// on demand.
-//
-FunctionPass *createSSIPass();
-
-//===----------------------------------------------------------------------===//
-//
-// SSI - This pass converts every non-void instuction to Static Single
-// Information form.
+// GEPSplitter - Split complex GEPs into simple ones
//
-FunctionPass *createSSIEverythingPass();
+FunctionPass *createGEPSplitterPass();
//===----------------------------------------------------------------------===//
//
-// GEPSplitter - Split complex GEPs into simple ones
+// Sink - Code Sinking
//
-FunctionPass *createGEPSplitterPass();
+FunctionPass *createSinkingPass();
//===----------------------------------------------------------------------===//
//
-// SCCVN - Aggressively eliminate redundant scalar values
+// LowerAtomic - Lower atomic intrinsics to non-atomic form
//
-FunctionPass *createSCCVNPass();
+Pass *createLowerAtomicPass();
//===----------------------------------------------------------------------===//
//
-// ABCD - Elimination of Array Bounds Checks on Demand
+// ValuePropagation - Propagate CFG-derived value information
//
-FunctionPass *createABCDPass();
+Pass *createCorrelatedValuePropagationPass();
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/libclamav/c++/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 5279e96..0f54450 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -66,24 +66,6 @@ void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
//
void ReplaceInstWithInst(Instruction *From, Instruction *To);
-/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at the
-/// instruction before ScanFrom) checking to see if we have the value at the
-/// memory address *Ptr locally available within a small number of instructions.
-/// If the value is available, return it.
-///
-/// If not, return the iterator for the last validated instruction that the
-/// value would be live through. If we scanned the entire block and didn't find
-/// something that invalidates *Ptr or provides it, ScanFrom would be left at
-/// begin() and this returns null. ScanFrom could also be left
-///
-/// MaxInstsToScan specifies the maximum instructions to scan in the block. If
-/// it is set to 0, it will scan the whole block. You can also optionally
-/// specify an alias analysis implementation, which makes this more precise.
-Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
- BasicBlock::iterator &ScanFrom,
- unsigned MaxInstsToScan = 6,
- AliasAnalysis *AA = 0);
-
/// FindFunctionBackedges - Analyze the specified function to find all of the
/// loop backedges in the function and return them. This is a relatively cheap
/// (compared to computing dominators and loop info) analysis.
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/libclamav/c++/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
index 03716a8..c75c142 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -34,20 +34,35 @@ namespace llvm {
/// and the return value has 'i8*' type.
Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD);
+ /// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+ Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+ const TargetData *TD);
+
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
- const TargetData *TD);
+ const TargetData *TD, StringRef Name = "strcpy");
+
+ /// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
+ /// specified pointer arguments and length.
+ Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
+ const TargetData *TD, StringRef Name = "strncpy");
/// EmitMemCpy - Emit a call to the memcpy function to the builder. This
/// always expects that the size has type 'intptr_t' and Dst/Src are pointers.
- Value *EmitMemCpy(Value *Dst, Value *Src, Value *Len,
- unsigned Align, IRBuilder<> &B, const TargetData *TD);
+ Value *EmitMemCpy(Value *Dst, Value *Src, Value *Len, unsigned Align,
+ bool isVolatile, IRBuilder<> &B, const TargetData *TD);
+
+ /// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
+ /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
+ /// are pointers.
+ Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
+ IRBuilder<> &B, const TargetData *TD);
/// EmitMemMove - Emit a call to the memmove function to the builder. This
/// always expects that the size has type 'intptr_t' and Dst/Src are pointers.
- Value *EmitMemMove(Value *Dst, Value *Src, Value *Len,
- unsigned Align, IRBuilder<> &B, const TargetData *TD);
+ Value *EmitMemMove(Value *Dst, Value *Src, Value *Len, unsigned Align,
+ bool isVolatile, IRBuilder<> &B, const TargetData *TD);
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
@@ -59,8 +74,8 @@ namespace llvm {
const TargetData *TD);
/// EmitMemSet - Emit a call to the memset function
- Value *EmitMemSet(Value *Dst, Value *Val, Value *Len, IRBuilder<> &B,
- const TargetData *TD);
+ Value *EmitMemSet(Value *Dst, Value *Val, Value *Len, bool isVolatile,
+ IRBuilder<> &B, const TargetData *TD);
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
/// (e.g. 'floor'). This function is known to take a single of type matching
@@ -91,6 +106,19 @@ namespace llvm {
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
void EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
const TargetData *TD);
+
+ /// SimplifyFortifiedLibCalls - Helper class for folding checked library
+ /// calls (e.g. __strcpy_chk) into their unchecked counterparts.
+ class SimplifyFortifiedLibCalls {
+ protected:
+ CallInst *CI;
+ virtual void replaceCall(Value *With) = 0;
+ virtual bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp,
+ bool isString) const = 0;
+ public:
+ virtual ~SimplifyFortifiedLibCalls();
+ bool fold(CallInst *CI, const TargetData *TD);
+ };
}
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Utils/Cloning.h b/libclamav/c++/llvm/include/llvm/Transforms/Utils/Cloning.h
index 5f494fb..62bf92a 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/Utils/Cloning.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -18,8 +18,10 @@
#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
#define LLVM_TRANSFORMS_UTILS_CLONING_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ValueHandle.h"
namespace llvm {
@@ -40,12 +42,11 @@ class TargetData;
class Loop;
class LoopInfo;
class AllocaInst;
-template <typename T> class SmallVectorImpl;
/// CloneModule - Return an exact copy of the specified module
///
Module *CloneModule(const Module *M);
-Module *CloneModule(const Module *M, DenseMap<const Value*, Value*> &ValueMap);
+Module *CloneModule(const Module *M, ValueMap<const Value*, Value*> &VMap);
/// ClonedCodeInfo - This struct can be used to capture information about code
/// being cloned, while it is being cloned.
@@ -88,7 +89,7 @@ struct ClonedCodeInfo {
/// incoming edges.
///
/// The correlation between instructions in the source and result basic blocks
-/// is recorded in the ValueMap map.
+/// is recorded in the VMap map.
///
/// If you have a particular suffix you'd like to use to add to any cloned
/// names, specify it as the optional third parameter.
@@ -101,44 +102,52 @@ struct ClonedCodeInfo {
/// parameter.
///
BasicBlock *CloneBasicBlock(const BasicBlock *BB,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
const Twine &NameSuffix = "", Function *F = 0,
ClonedCodeInfo *CodeInfo = 0);
/// CloneLoop - Clone Loop. Clone dominator info for loop insiders. Populate
-/// ValueMap using old blocks to new blocks mapping.
+/// VMap using old blocks to new blocks mapping.
Loop *CloneLoop(Loop *L, LPPassManager *LPM, LoopInfo *LI,
- DenseMap<const Value *, Value *> &ValueMap, Pass *P);
+ ValueMap<const Value *, Value *> &VMap, Pass *P);
/// CloneFunction - Return a copy of the specified function, but without
/// embedding the function into another module. Also, any references specified
-/// in the ValueMap are changed to refer to their mapped value instead of the
-/// original one. If any of the arguments to the function are in the ValueMap,
-/// the arguments are deleted from the resultant function. The ValueMap is
+/// in the VMap are changed to refer to their mapped value instead of the
+/// original one. If any of the arguments to the function are in the VMap,
+/// the arguments are deleted from the resultant function. The VMap is
/// updated to include mappings from all of the instructions and basicblocks in
/// the function from their old to new values. The final argument captures
/// information about the cloned code if non-null.
///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
Function *CloneFunction(const Function *F,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
+ bool ModuleLevelChanges,
ClonedCodeInfo *CodeInfo = 0);
-/// CloneFunction - Version of the function that doesn't need the ValueMap.
+/// CloneFunction - Version of the function that doesn't need the VMap.
///
inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
- DenseMap<const Value*, Value*> ValueMap;
- return CloneFunction(F, ValueMap, CodeInfo);
+ ValueMap<const Value*, Value*> VMap;
+ return CloneFunction(F, VMap, CodeInfo);
}
/// Clone OldFunc into NewFunc, transforming the old arguments into references
-/// to ArgMap values. Note that if NewFunc already has basic blocks, the ones
+/// to VMap values. Note that if NewFunc already has basic blocks, the ones
/// cloned into it will be added to the end of the function. This function
/// fills in a list of return instructions, and can optionally append the
/// specified suffix to all values cloned.
///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
+ bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = 0);
@@ -150,14 +159,46 @@ void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// constant arguments cause a significant amount of code in the callee to be
/// dead. Since this doesn't produce an exactly copy of the input, it can't be
/// used for things like CloneFunction or CloneModule.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
+ bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = 0,
const TargetData *TD = 0,
Instruction *TheCall = 0);
+
+/// InlineFunctionInfo - This class captures the data input to the
+/// InlineFunction call, and records the auxiliary results produced by it.
+class InlineFunctionInfo {
+public:
+ explicit InlineFunctionInfo(CallGraph *cg = 0, const TargetData *td = 0)
+ : CG(cg), TD(td) {}
+
+ /// CG - If non-null, InlineFunction will update the callgraph to reflect the
+ /// changes it makes.
+ CallGraph *CG;
+ const TargetData *TD;
+
+ /// StaticAllocas - InlineFunction fills this in with all static allocas that
+ /// get copied into the caller.
+ SmallVector<AllocaInst*, 4> StaticAllocas;
+
+ /// InlinedCalls - InlineFunction fills this in with callsites that were
+ /// inlined from the callee. This is only filled in if CG is non-null.
+ SmallVector<WeakVH, 8> InlinedCalls;
+
+ void reset() {
+ StaticAllocas.clear();
+ InlinedCalls.clear();
+ }
+};
+
/// InlineFunction - This function inlines the called function into the basic
/// block of the caller. This returns false if it is not possible to inline
/// this call. The program is still in a well defined state if this occurs
@@ -168,18 +209,9 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// exists in the instruction stream. Similiarly this will inline a recursive
/// function by one level.
///
-/// If a non-null callgraph pointer is provided, these functions update the
-/// CallGraph to represent the program after inlining.
-///
-/// If StaticAllocas is non-null, InlineFunction populates it with all of the
-/// static allocas that it inlines into the caller.
-///
-bool InlineFunction(CallInst *C, CallGraph *CG = 0, const TargetData *TD = 0,
- SmallVectorImpl<AllocaInst*> *StaticAllocas = 0);
-bool InlineFunction(InvokeInst *II, CallGraph *CG = 0, const TargetData *TD = 0,
- SmallVectorImpl<AllocaInst*> *StaticAllocas = 0);
-bool InlineFunction(CallSite CS, CallGraph *CG = 0, const TargetData *TD = 0,
- SmallVectorImpl<AllocaInst*> *StaticAllocas = 0);
+bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI);
+bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI);
+bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI);
} // End llvm namespace
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Utils/Local.h b/libclamav/c++/llvm/include/llvm/Transforms/Utils/Local.h
index bb6fd56..caae27f 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/Utils/Local.h
@@ -31,17 +31,6 @@ class TargetData;
template<typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
-// Local analysis.
-//
-
-/// isSafeToLoadUnconditionally - Return true if we know that executing a load
-/// from this value cannot trap. If it is not obviously safe to load from the
-/// specified pointer, we do a quick local scan of the basic block containing
-/// ScanFrom, to determine if the address is already accessed.
-bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD = 0);
-
-//===----------------------------------------------------------------------===//
// Local constant propagation.
//
@@ -129,8 +118,6 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// of the CFG. It returns true if a modification was made, possibly deleting
/// the basic block that was pointed to.
///
-/// WARNING: The entry node of a method may not be simplified.
-///
bool SimplifyCFG(BasicBlock *BB, const TargetData *TD = 0);
/// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSAUpdater.h b/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
index 927e156..e50a6b1 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -19,34 +19,33 @@ namespace llvm {
class BasicBlock;
class Use;
class PHINode;
- template<typename T>
- class SmallVectorImpl;
+ template<typename T> class SmallVectorImpl;
+ template<typename T> class SSAUpdaterTraits;
+ class BumpPtrAllocator;
/// SSAUpdater - This class updates SSA form for a set of values defined in
/// multiple blocks. This is used when code duplication or another unstructured
/// transformation wants to rewrite a set of uses of one value with uses of a
/// set of values.
class SSAUpdater {
+ friend class SSAUpdaterTraits<SSAUpdater>;
+
+private:
/// AvailableVals - This keeps track of which value to use on a per-block
- /// basis. When we insert PHI nodes, we keep track of them here. We use
- /// TrackingVH's for the value of the map because we RAUW PHI nodes when we
- /// eliminate them, and want the TrackingVH's to track this.
- //typedef DenseMap<BasicBlock*, TrackingVH<Value> > AvailableValsTy;
+ /// basis. When we insert PHI nodes, we keep track of them here.
+ //typedef DenseMap<BasicBlock*, Value*> AvailableValsTy;
void *AV;
- /// PrototypeValue is an arbitrary representative value, which we derive names
- /// and a type for PHI nodes.
- Value *PrototypeValue;
+ /// ProtoType holds the type of the values being rewritten.
+ const Type *ProtoType;
- /// IncomingPredInfo - We use this as scratch space when doing our recursive
- /// walk. This should only be used in GetValueInBlockInternal, normally it
- /// should be empty.
- //std::vector<std::pair<BasicBlock*, TrackingVH<Value> > > IncomingPredInfo;
- void *IPI;
+ // PHI nodes are given a name based on ProtoName.
+ std::string ProtoName;
/// InsertedPHIs - If this is non-null, the SSAUpdater adds all PHI nodes that
/// it creates to the vector.
SmallVectorImpl<PHINode*> *InsertedPHIs;
+
public:
/// SSAUpdater constructor. If InsertedPHIs is specified, it will be filled
/// in with all PHI Nodes created by rewriting.
@@ -54,8 +53,8 @@ public:
~SSAUpdater();
/// Initialize - Reset this object to get ready for a new set of SSA
- /// updates. ProtoValue is the value used to name PHI nodes.
- void Initialize(Value *ProtoValue);
+ /// updates with type 'Ty'. PHI nodes get a name based on 'Name'.
+ void Initialize(const Type *Ty, StringRef Name);
/// AddAvailableValue - Indicate that a rewritten value is available at the
/// end of the specified block with the specified value.
@@ -97,8 +96,15 @@ public:
/// for the use's block will be considered to be below it.
void RewriteUse(Use &U);
+ /// RewriteUseAfterInsertions - Rewrite a use, just like RewriteUse. However,
+ /// this version of the method can rewrite uses in the same block as a
+ /// definition, because it assumes that all uses of a value are below any
+ /// inserted values.
+ void RewriteUseAfterInsertions(Use &U);
+
private:
Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
+
void operator=(const SSAUpdater&); // DO NOT IMPLEMENT
SSAUpdater(const SSAUpdater&); // DO NOT IMPLEMENT
};
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
new file mode 100644
index 0000000..5a03d22
--- /dev/null
+++ b/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -0,0 +1,469 @@
+//===-- SSAUpdaterImpl.h - SSA Updater Implementation -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a template that implements the core algorithm for the
+// SSAUpdater and MachineSSAUpdater.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+
+namespace llvm {
+
+template<typename T> class SSAUpdaterTraits;
+
+template<typename UpdaterT>
+class SSAUpdaterImpl {
+private:
+ UpdaterT *Updater;
+
+ typedef SSAUpdaterTraits<UpdaterT> Traits;
+ typedef typename Traits::BlkT BlkT;
+ typedef typename Traits::ValT ValT;
+ typedef typename Traits::PhiT PhiT;
+
+ /// BBInfo - Per-basic block information used internally by SSAUpdaterImpl.
+ /// The predecessors of each block are cached here since pred_iterator is
+ /// slow and we need to iterate over the blocks at least a few times.
+ class BBInfo {
+ public:
+ BlkT *BB; // Back-pointer to the corresponding block.
+ ValT AvailableVal; // Value to use in this block.
+ BBInfo *DefBB; // Block that defines the available value.
+ int BlkNum; // Postorder number.
+ BBInfo *IDom; // Immediate dominator.
+ unsigned NumPreds; // Number of predecessor blocks.
+ BBInfo **Preds; // Array[NumPreds] of predecessor blocks.
+ PhiT *PHITag; // Marker for existing PHIs that match.
+
+ BBInfo(BlkT *ThisBB, ValT V)
+ : BB(ThisBB), AvailableVal(V), DefBB(V ? this : 0), BlkNum(0), IDom(0),
+ NumPreds(0), Preds(0), PHITag(0) { }
+ };
+
+ typedef DenseMap<BlkT*, ValT> AvailableValsTy;
+ AvailableValsTy *AvailableVals;
+
+ SmallVectorImpl<PhiT*> *InsertedPHIs;
+
+ typedef SmallVectorImpl<BBInfo*> BlockListTy;
+ typedef DenseMap<BlkT*, BBInfo*> BBMapTy;
+ BBMapTy BBMap;
+ BumpPtrAllocator Allocator;
+
+public:
+ explicit SSAUpdaterImpl(UpdaterT *U, AvailableValsTy *A,
+ SmallVectorImpl<PhiT*> *Ins) :
+ Updater(U), AvailableVals(A), InsertedPHIs(Ins) { }
+
+ /// GetValue - Check to see if AvailableVals has an entry for the specified
+ /// BB and if so, return it. If not, construct SSA form by first
+ /// calculating the required placement of PHIs and then inserting new PHIs
+ /// where needed.
+ ValT GetValue(BlkT *BB) {
+ SmallVector<BBInfo*, 100> BlockList;
+ BBInfo *PseudoEntry = BuildBlockList(BB, &BlockList);
+
+ // Special case: bail out if BB is unreachable.
+ if (BlockList.size() == 0) {
+ ValT V = Traits::GetUndefVal(BB, Updater);
+ (*AvailableVals)[BB] = V;
+ return V;
+ }
+
+ FindDominators(&BlockList, PseudoEntry);
+ FindPHIPlacement(&BlockList);
+ FindAvailableVals(&BlockList);
+
+ return BBMap[BB]->DefBB->AvailableVal;
+ }
+
+ /// BuildBlockList - Starting from the specified basic block, traverse back
+ /// through its predecessors until reaching blocks with known values.
+ /// Create BBInfo structures for the blocks and append them to the block
+ /// list.
+ BBInfo *BuildBlockList(BlkT *BB, BlockListTy *BlockList) {
+ SmallVector<BBInfo*, 10> RootList;
+ SmallVector<BBInfo*, 64> WorkList;
+
+ BBInfo *Info = new (Allocator) BBInfo(BB, 0);
+ BBMap[BB] = Info;
+ WorkList.push_back(Info);
+
+ // Search backward from BB, creating BBInfos along the way and stopping
+ // when reaching blocks that define the value. Record those defining
+ // blocks on the RootList.
+ SmallVector<BlkT*, 10> Preds;
+ while (!WorkList.empty()) {
+ Info = WorkList.pop_back_val();
+ Preds.clear();
+ Traits::FindPredecessorBlocks(Info->BB, &Preds);
+ Info->NumPreds = Preds.size();
+ if (Info->NumPreds == 0)
+ Info->Preds = 0;
+ else
+ Info->Preds = static_cast<BBInfo**>
+ (Allocator.Allocate(Info->NumPreds * sizeof(BBInfo*),
+ AlignOf<BBInfo*>::Alignment));
+
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BlkT *Pred = Preds[p];
+ // Check if BBMap already has a BBInfo for the predecessor block.
+ typename BBMapTy::value_type &BBMapBucket =
+ BBMap.FindAndConstruct(Pred);
+ if (BBMapBucket.second) {
+ Info->Preds[p] = BBMapBucket.second;
+ continue;
+ }
+
+ // Create a new BBInfo for the predecessor.
+ ValT PredVal = AvailableVals->lookup(Pred);
+ BBInfo *PredInfo = new (Allocator) BBInfo(Pred, PredVal);
+ BBMapBucket.second = PredInfo;
+ Info->Preds[p] = PredInfo;
+
+ if (PredInfo->AvailableVal) {
+ RootList.push_back(PredInfo);
+ continue;
+ }
+ WorkList.push_back(PredInfo);
+ }
+ }
+
+ // Now that we know what blocks are backwards-reachable from the starting
+ // block, do a forward depth-first traversal to assign postorder numbers
+ // to those blocks.
+ BBInfo *PseudoEntry = new (Allocator) BBInfo(0, 0);
+ unsigned BlkNum = 1;
+
+ // Initialize the worklist with the roots from the backward traversal.
+ while (!RootList.empty()) {
+ Info = RootList.pop_back_val();
+ Info->IDom = PseudoEntry;
+ Info->BlkNum = -1;
+ WorkList.push_back(Info);
+ }
+
+ while (!WorkList.empty()) {
+ Info = WorkList.back();
+
+ if (Info->BlkNum == -2) {
+ // All the successors have been handled; assign the postorder number.
+ Info->BlkNum = BlkNum++;
+ // If not a root, put it on the BlockList.
+ if (!Info->AvailableVal)
+ BlockList->push_back(Info);
+ WorkList.pop_back();
+ continue;
+ }
+
+ // Leave this entry on the worklist, but set its BlkNum to mark that its
+ // successors have been put on the worklist. When it returns to the top
+ // the list, after handling its successors, it will be assigned a
+ // number.
+ Info->BlkNum = -2;
+
+ // Add unvisited successors to the work list.
+ for (typename Traits::BlkSucc_iterator SI =
+ Traits::BlkSucc_begin(Info->BB),
+ E = Traits::BlkSucc_end(Info->BB); SI != E; ++SI) {
+ BBInfo *SuccInfo = BBMap[*SI];
+ if (!SuccInfo || SuccInfo->BlkNum)
+ continue;
+ SuccInfo->BlkNum = -1;
+ WorkList.push_back(SuccInfo);
+ }
+ }
+ PseudoEntry->BlkNum = BlkNum;
+ return PseudoEntry;
+ }
+
+ /// IntersectDominators - This is the dataflow lattice "meet" operation for
+ /// finding dominators. Given two basic blocks, it walks up the dominator
+ /// tree until it finds a common dominator of both. It uses the postorder
+ /// number of the blocks to determine how to do that.
+ BBInfo *IntersectDominators(BBInfo *Blk1, BBInfo *Blk2) {
+ while (Blk1 != Blk2) {
+ while (Blk1->BlkNum < Blk2->BlkNum) {
+ Blk1 = Blk1->IDom;
+ if (!Blk1)
+ return Blk2;
+ }
+ while (Blk2->BlkNum < Blk1->BlkNum) {
+ Blk2 = Blk2->IDom;
+ if (!Blk2)
+ return Blk1;
+ }
+ }
+ return Blk1;
+ }
+
+ /// FindDominators - Calculate the dominator tree for the subset of the CFG
+ /// corresponding to the basic blocks on the BlockList. This uses the
+ /// algorithm from: "A Simple, Fast Dominance Algorithm" by Cooper, Harvey
+ /// and Kennedy, published in Software--Practice and Experience, 2001,
+ /// 4:1-10. Because the CFG subset does not include any edges leading into
+ /// blocks that define the value, the results are not the usual dominator
+ /// tree. The CFG subset has a single pseudo-entry node with edges to a set
+ /// of root nodes for blocks that define the value. The dominators for this
+ /// subset CFG are not the standard dominators but they are adequate for
+ /// placing PHIs within the subset CFG.
+ void FindDominators(BlockListTy *BlockList, BBInfo *PseudoEntry) {
+ bool Changed;
+ do {
+ Changed = false;
+ // Iterate over the list in reverse order, i.e., forward on CFG edges.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+ BBInfo *NewIDom = 0;
+
+ // Iterate through the block's predecessors.
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BBInfo *Pred = Info->Preds[p];
+
+ // Treat an unreachable predecessor as a definition with 'undef'.
+ if (Pred->BlkNum == 0) {
+ Pred->AvailableVal = Traits::GetUndefVal(Pred->BB, Updater);
+ (*AvailableVals)[Pred->BB] = Pred->AvailableVal;
+ Pred->DefBB = Pred;
+ Pred->BlkNum = PseudoEntry->BlkNum;
+ PseudoEntry->BlkNum++;
+ }
+
+ if (!NewIDom)
+ NewIDom = Pred;
+ else
+ NewIDom = IntersectDominators(NewIDom, Pred);
+ }
+
+ // Check if the IDom value has changed.
+ if (NewIDom && NewIDom != Info->IDom) {
+ Info->IDom = NewIDom;
+ Changed = true;
+ }
+ }
+ } while (Changed);
+ }
+
+ /// IsDefInDomFrontier - Search up the dominator tree from Pred to IDom for
+ /// any blocks containing definitions of the value. If one is found, then
+ /// the successor of Pred is in the dominance frontier for the definition,
+ /// and this function returns true.
+ bool IsDefInDomFrontier(const BBInfo *Pred, const BBInfo *IDom) {
+ for (; Pred != IDom; Pred = Pred->IDom) {
+ if (Pred->DefBB == Pred)
+ return true;
+ }
+ return false;
+ }
+
+ /// FindPHIPlacement - PHIs are needed in the iterated dominance frontiers
+ /// of the known definitions. Iteratively add PHIs in the dom frontiers
+ /// until nothing changes. Along the way, keep track of the nearest
+ /// dominating definitions for non-PHI blocks.
+ void FindPHIPlacement(BlockListTy *BlockList) {
+ bool Changed;
+ do {
+ Changed = false;
+ // Iterate over the list in reverse order, i.e., forward on CFG edges.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+
+ // If this block already needs a PHI, there is nothing to do here.
+ if (Info->DefBB == Info)
+ continue;
+
+ // Default to use the same def as the immediate dominator.
+ BBInfo *NewDefBB = Info->IDom->DefBB;
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ if (IsDefInDomFrontier(Info->Preds[p], Info->IDom)) {
+ // Need a PHI here.
+ NewDefBB = Info;
+ break;
+ }
+ }
+
+ // Check if anything changed.
+ if (NewDefBB != Info->DefBB) {
+ Info->DefBB = NewDefBB;
+ Changed = true;
+ }
+ }
+ } while (Changed);
+ }
+
+ /// FindAvailableVal - If this block requires a PHI, first check if an
+ /// existing PHI matches the PHI placement and reaching definitions computed
+ /// earlier, and if not, create a new PHI. Visit all the block's
+ /// predecessors to calculate the available value for each one and fill in
+ /// the incoming values for a new PHI.
+ void FindAvailableVals(BlockListTy *BlockList) {
+ // Go through the worklist in forward order (i.e., backward through the CFG)
+ // and check if existing PHIs can be used. If not, create empty PHIs where
+ // they are needed.
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I) {
+ BBInfo *Info = *I;
+ // Check if there needs to be a PHI in BB.
+ if (Info->DefBB != Info)
+ continue;
+
+ // Look for an existing PHI.
+ FindExistingPHI(Info->BB, BlockList);
+ if (Info->AvailableVal)
+ continue;
+
+ ValT PHI = Traits::CreateEmptyPHI(Info->BB, Info->NumPreds, Updater);
+ Info->AvailableVal = PHI;
+ (*AvailableVals)[Info->BB] = PHI;
+ }
+
+ // Now go back through the worklist in reverse order to fill in the
+ // arguments for any new PHIs added in the forward traversal.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+
+ if (Info->DefBB != Info) {
+ // Record the available value at join nodes to speed up subsequent
+ // uses of this SSAUpdater for the same value.
+ if (Info->NumPreds > 1)
+ (*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
+ continue;
+ }
+
+ // Check if this block contains a newly added PHI.
+ PhiT *PHI = Traits::ValueIsNewPHI(Info->AvailableVal, Updater);
+ if (!PHI)
+ continue;
+
+ // Iterate through the block's predecessors.
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BBInfo *PredInfo = Info->Preds[p];
+ BlkT *Pred = PredInfo->BB;
+ // Skip to the nearest preceding definition.
+ if (PredInfo->DefBB != PredInfo)
+ PredInfo = PredInfo->DefBB;
+ Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred);
+ }
+
+ DEBUG(dbgs() << " Inserted PHI: " << *PHI << "\n");
+
+ // If the client wants to know about all new instructions, tell it.
+ if (InsertedPHIs) InsertedPHIs->push_back(PHI);
+ }
+ }
+
+ /// FindExistingPHI - Look through the PHI nodes in a block to see if any of
+ /// them match what is needed.
+ void FindExistingPHI(BlkT *BB, BlockListTy *BlockList) {
+ for (typename BlkT::iterator BBI = BB->begin(), BBE = BB->end();
+ BBI != BBE; ++BBI) {
+ PhiT *SomePHI = Traits::InstrIsPHI(BBI);
+ if (!SomePHI)
+ break;
+ if (CheckIfPHIMatches(SomePHI)) {
+ RecordMatchingPHI(SomePHI);
+ break;
+ }
+ // Match failed: clear all the PHITag values.
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I)
+ (*I)->PHITag = 0;
+ }
+ }
+
+ /// CheckIfPHIMatches - Check if a PHI node matches the placement and values
+ /// in the BBMap.
+ bool CheckIfPHIMatches(PhiT *PHI) {
+ SmallVector<PhiT*, 20> WorkList;
+ WorkList.push_back(PHI);
+
+ // Mark that the block containing this PHI has been visited.
+ BBMap[PHI->getParent()]->PHITag = PHI;
+
+ while (!WorkList.empty()) {
+ PHI = WorkList.pop_back_val();
+
+ // Iterate through the PHI's incoming values.
+ for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
+ E = Traits::PHI_end(PHI); I != E; ++I) {
+ ValT IncomingVal = I.getIncomingValue();
+ BBInfo *PredInfo = BBMap[I.getIncomingBlock()];
+ // Skip to the nearest preceding definition.
+ if (PredInfo->DefBB != PredInfo)
+ PredInfo = PredInfo->DefBB;
+
+ // Check if it matches the expected value.
+ if (PredInfo->AvailableVal) {
+ if (IncomingVal == PredInfo->AvailableVal)
+ continue;
+ return false;
+ }
+
+ // Check if the value is a PHI in the correct block.
+ PhiT *IncomingPHIVal = Traits::ValueIsPHI(IncomingVal, Updater);
+ if (!IncomingPHIVal || IncomingPHIVal->getParent() != PredInfo->BB)
+ return false;
+
+ // If this block has already been visited, check if this PHI matches.
+ if (PredInfo->PHITag) {
+ if (IncomingPHIVal == PredInfo->PHITag)
+ continue;
+ return false;
+ }
+ PredInfo->PHITag = IncomingPHIVal;
+
+ WorkList.push_back(IncomingPHIVal);
+ }
+ }
+ return true;
+ }
+
+ /// RecordMatchingPHI - For a PHI node that matches, record it and its input
+ /// PHIs in both the BBMap and the AvailableVals mapping.
+ void RecordMatchingPHI(PhiT *PHI) {
+ SmallVector<PhiT*, 20> WorkList;
+ WorkList.push_back(PHI);
+
+ // Record this PHI.
+ BlkT *BB = PHI->getParent();
+ ValT PHIVal = Traits::GetPHIValue(PHI);
+ (*AvailableVals)[BB] = PHIVal;
+ BBMap[BB]->AvailableVal = PHIVal;
+
+ while (!WorkList.empty()) {
+ PHI = WorkList.pop_back_val();
+
+ // Iterate through the PHI's incoming values.
+ for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
+ E = Traits::PHI_end(PHI); I != E; ++I) {
+ ValT IncomingVal = I.getIncomingValue();
+ PhiT *IncomingPHI = Traits::ValueIsPHI(IncomingVal, Updater);
+ if (!IncomingPHI) continue;
+ BB = IncomingPHI->getParent();
+ BBInfo *Info = BBMap[BB];
+ if (!Info || Info->AvailableVal)
+ continue;
+
+ // Record the PHI and add it to the worklist.
+ (*AvailableVals)[BB] = IncomingVal;
+ Info->AvailableVal = IncomingVal;
+ WorkList.push_back(IncomingPHI);
+ }
+ }
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSI.h b/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSI.h
deleted file mode 100644
index 198fc82..0000000
--- a/libclamav/c++/llvm/include/llvm/Transforms/Utils/SSI.h
+++ /dev/null
@@ -1,93 +0,0 @@
-//===------------------- SSI.h - Creates SSI Representation -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass converts a list of variables to the Static Single Information
-// form. This is a program representation described by Scott Ananian in his
-// Master Thesis: "The Static Single Information Form (1999)".
-// We are building an on-demand representation, that is, we do not convert
-// every single variable in the target function to SSI form. Rather, we receive
-// a list of target variables that must be converted. We also do not
-// completely convert a target variable to the SSI format. Instead, we only
-// change the variable in the points where new information can be attached
-// to its live range, that is, at branch points.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_UTILS_SSI_H
-#define LLVM_TRANSFORMS_UTILS_SSI_H
-
-#include "llvm/InstrTypes.h"
-#include "llvm/Pass.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-
-namespace llvm {
-
- class DominatorTree;
- class PHINode;
- class Instruction;
- class CmpInst;
-
- class SSI : public FunctionPass {
- public:
- static char ID; // Pass identification, replacement for typeid.
- SSI() :
- FunctionPass(&ID) {
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const;
-
- bool runOnFunction(Function&);
-
- void createSSI(SmallVectorImpl<Instruction *> &value);
-
- private:
- // Variables always live
- DominatorTree *DT_;
-
- // Stores variables created by SSI
- SmallPtrSet<Instruction *, 16> created;
-
- // Phis created by SSI
- DenseMap<PHINode *, Instruction*> phis;
-
- // Sigmas created by SSI
- DenseMap<PHINode *, Instruction*> sigmas;
-
- // Phi nodes that have a phi as operand and has to be fixed
- SmallPtrSet<PHINode *, 1> phisToFix;
-
- // List of definition points for every variable
- DenseMap<Instruction*, SmallVector<BasicBlock*, 4> > defsites;
-
- // Basic Block of the original definition of each variable
- DenseMap<Instruction*, BasicBlock*> value_original;
-
- // Stack of last seen definition of a variable
- DenseMap<Instruction*, SmallVector<Instruction *, 1> > value_stack;
-
- void insertSigmaFunctions(SmallPtrSet<Instruction*, 4> &value);
- void insertSigma(TerminatorInst *TI, Instruction *I);
- void insertPhiFunctions(SmallPtrSet<Instruction*, 4> &value);
- void renameInit(SmallPtrSet<Instruction*, 4> &value);
- void rename(BasicBlock *BB);
-
- void substituteUse(Instruction *I);
- bool dominateAny(BasicBlock *BB, Instruction *value);
- void fixPhis();
-
- Instruction* getPositionPhi(PHINode *PN);
- Instruction* getPositionSigma(PHINode *PN);
-
- void init(SmallVectorImpl<Instruction *> &value);
- void clean();
- };
-} // end namespace
-#endif
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/libclamav/c++/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
index c2d0993..a5060e6 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -26,7 +26,7 @@ struct UnifyFunctionExitNodes : public FunctionPass {
BasicBlock *ReturnBlock, *UnwindBlock, *UnreachableBlock;
public:
static char ID; // Pass identification, replacement for typeid
- UnifyFunctionExitNodes() : FunctionPass(&ID),
+ UnifyFunctionExitNodes() : FunctionPass(ID),
ReturnBlock(0), UnwindBlock(0) {}
// We can preserve non-critical-edgeness when we unify function exit nodes
diff --git a/libclamav/c++/llvm/include/llvm/Transforms/Utils/ValueMapper.h b/libclamav/c++/llvm/include/llvm/Transforms/Utils/ValueMapper.h
index ed33413..5274112 100644
--- a/libclamav/c++/llvm/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/libclamav/c++/llvm/include/llvm/Transforms/Utils/ValueMapper.h
@@ -1,4 +1,4 @@
-//===- ValueMapper.h - Interface shared by lib/Transforms/Utils -*- C++ -*-===//
+//===- ValueMapper.h - Remapping for constants and metadata -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,18 +12,20 @@
//
//===----------------------------------------------------------------------===//
-#ifndef VALUEMAPPER_H
-#define VALUEMAPPER_H
+#ifndef LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+#define LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
namespace llvm {
class Value;
class Instruction;
- typedef DenseMap<const Value *, Value *> ValueMapTy;
+ typedef ValueMap<const Value *, Value *> ValueToValueMapTy;
- Value *MapValue(const Value *V, ValueMapTy &VM);
- void RemapInstruction(Instruction *I, ValueMapTy &VM);
+ Value *MapValue(const Value *V, ValueToValueMapTy &VM,
+ bool ModuleLevelChanges);
+ void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
+ bool ModuleLevelChanges);
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Type.h b/libclamav/c++/llvm/include/llvm/Type.h
index d09913a..f7d6fd5 100644
--- a/libclamav/c++/llvm/include/llvm/Type.h
+++ b/libclamav/c++/llvm/include/llvm/Type.h
@@ -82,11 +82,10 @@ public:
IntegerTyID, ///< 8: Arbitrary bit width integers
FunctionTyID, ///< 9: Functions
StructTyID, ///< 10: Structures
- UnionTyID, ///< 11: Unions
- ArrayTyID, ///< 12: Arrays
- PointerTyID, ///< 13: Pointers
- OpaqueTyID, ///< 14: Opaque: type with unknown structure
- VectorTyID, ///< 15: SIMD 'packed' format, or other vector type
+ ArrayTyID, ///< 11: Arrays
+ PointerTyID, ///< 12: Pointers
+ OpaqueTyID, ///< 13: Opaque: type with unknown structure
+ VectorTyID, ///< 14: SIMD 'packed' format, or other vector type
NumTypeIDs, // Must remain as last defined ID
LastPrimitiveTyID = MetadataTyID,
@@ -243,10 +242,6 @@ public:
///
bool isStructTy() const { return ID == StructTyID; }
- /// isUnionTy - True if this is an instance of UnionType.
- ///
- bool isUnionTy() const { return ID == UnionTyID; }
-
/// isArrayTy - True if this is an instance of ArrayType.
///
bool isArrayTy() const { return ID == ArrayTyID; }
@@ -306,7 +301,7 @@ public:
/// does not include vector types.
///
inline bool isAggregateType() const {
- return ID == StructTyID || ID == ArrayTyID || ID == UnionTyID;
+ return ID == StructTyID || ID == ArrayTyID;
}
/// isSized - Return true if it makes sense to take the size of this type. To
@@ -319,8 +314,7 @@ public:
return true;
// If it is not something that can have a size (e.g. a function or label),
// it doesn't have a size.
- if (ID != StructTyID && ID != ArrayTyID && ID != VectorTyID &&
- ID != UnionTyID)
+ if (ID != StructTyID && ID != ArrayTyID && ID != VectorTyID)
return false;
// If it is something that can have a size and it's concrete, it definitely
// has a size, otherwise we have to try harder to decide.
@@ -406,6 +400,7 @@ public:
static const Type *getX86_FP80Ty(LLVMContext &C);
static const Type *getFP128Ty(LLVMContext &C);
static const Type *getPPC_FP128Ty(LLVMContext &C);
+ static const IntegerType *getIntNTy(LLVMContext &C, unsigned N);
static const IntegerType *getInt1Ty(LLVMContext &C);
static const IntegerType *getInt8Ty(LLVMContext &C);
static const IntegerType *getInt16Ty(LLVMContext &C);
@@ -421,6 +416,8 @@ public:
static const PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
static const PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
static const PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
+ static const PointerType *getIntNPtrTy(LLVMContext &C, unsigned N,
+ unsigned AS = 0);
static const PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
static const PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0);
static const PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0);
@@ -501,19 +498,19 @@ inline void PATypeHandle::removeUser() {
/// reference to the type.
///
inline Type* PATypeHolder::get() const {
+ if (Ty == 0) return 0;
const Type *NewTy = Ty->getForwardedType();
if (!NewTy) return const_cast<Type*>(Ty);
return *const_cast<PATypeHolder*>(this) = NewTy;
}
inline void PATypeHolder::addRef() {
- assert(Ty && "Type Holder has a null type!");
- if (Ty->isAbstract())
+ if (Ty && Ty->isAbstract())
Ty->addRef();
}
inline void PATypeHolder::dropRef() {
- if (Ty->isAbstract())
+ if (Ty && Ty->isAbstract())
Ty->dropRef();
}
@@ -548,9 +545,11 @@ template <> struct GraphTraits<const Type*> {
}
};
-template <> inline bool isa_impl<PointerType, Type>(const Type &Ty) {
- return Ty.getTypeID() == Type::PointerTyID;
-}
+template <> struct isa_impl<PointerType, Type> {
+ static inline bool doit(const Type &Ty) {
+ return Ty.getTypeID() == Type::PointerTyID;
+ }
+};
raw_ostream &operator<<(raw_ostream &OS, const Type &T);
diff --git a/libclamav/c++/llvm/include/llvm/Use.h b/libclamav/c++/llvm/include/llvm/Use.h
index 970f69b..e1ebc6a 100644
--- a/libclamav/c++/llvm/include/llvm/Use.h
+++ b/libclamav/c++/llvm/include/llvm/Use.h
@@ -27,6 +27,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/ADT/PointerIntPair.h"
+#include <cstddef>
#include <iterator>
namespace llvm {
@@ -209,30 +210,6 @@ public:
unsigned getOperandNo() const;
};
-
-template<> struct simplify_type<value_use_iterator<User> > {
- typedef User* SimpleType;
-
- static SimpleType getSimplifiedValue(const value_use_iterator<User> &Val) {
- return *Val;
- }
-};
-
-template<> struct simplify_type<const value_use_iterator<User> >
- : public simplify_type<value_use_iterator<User> > {};
-
-template<> struct simplify_type<value_use_iterator<const User> > {
- typedef const User* SimpleType;
-
- static SimpleType getSimplifiedValue(const
- value_use_iterator<const User> &Val) {
- return *Val;
- }
-};
-
-template<> struct simplify_type<const value_use_iterator<const User> >
- : public simplify_type<value_use_iterator<const User> > {};
-
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/include/llvm/Value.h b/libclamav/c++/llvm/include/llvm/Value.h
index d06cbc0..8740f35 100644
--- a/libclamav/c++/llvm/include/llvm/Value.h
+++ b/libclamav/c++/llvm/include/llvm/Value.h
@@ -93,8 +93,8 @@ protected:
/// printing behavior.
virtual void printCustom(raw_ostream &O) const;
-public:
Value(const Type *Ty, unsigned scid);
+public:
virtual ~Value();
/// dump - Support for debugging, callable in GDB: V->dump()
@@ -157,13 +157,13 @@ public:
// Methods for handling the chain of uses of this Value.
//
typedef value_use_iterator<User> use_iterator;
- typedef value_use_iterator<const User> use_const_iterator;
+ typedef value_use_iterator<const User> const_use_iterator;
bool use_empty() const { return UseList == 0; }
use_iterator use_begin() { return use_iterator(UseList); }
- use_const_iterator use_begin() const { return use_const_iterator(UseList); }
+ const_use_iterator use_begin() const { return const_use_iterator(UseList); }
use_iterator use_end() { return use_iterator(0); }
- use_const_iterator use_end() const { return use_const_iterator(0); }
+ const_use_iterator use_end() const { return const_use_iterator(0); }
User *use_back() { return *use_begin(); }
const User *use_back() const { return *use_begin(); }
@@ -172,7 +172,7 @@ public:
/// traversing the whole use list.
///
bool hasOneUse() const {
- use_const_iterator I = use_begin(), E = use_end();
+ const_use_iterator I = use_begin(), E = use_end();
if (I == E) return false;
return ++I == E;
}
@@ -210,17 +210,15 @@ public:
UndefValueVal, // This is an instance of UndefValue
BlockAddressVal, // This is an instance of BlockAddress
ConstantExprVal, // This is an instance of ConstantExpr
- ConstantAggregateZeroVal, // This is an instance of ConstantAggregateNull
+ ConstantAggregateZeroVal, // This is an instance of ConstantAggregateZero
ConstantIntVal, // This is an instance of ConstantInt
ConstantFPVal, // This is an instance of ConstantFP
ConstantArrayVal, // This is an instance of ConstantArray
ConstantStructVal, // This is an instance of ConstantStruct
- ConstantUnionVal, // This is an instance of ConstantUnion
ConstantVectorVal, // This is an instance of ConstantVector
ConstantPointerNullVal, // This is an instance of ConstantPointerNull
MDNodeVal, // This is an instance of MDNode
MDStringVal, // This is an instance of MDString
- NamedMDNodeVal, // This is an instance of NamedMDNode
InlineAsmVal, // This is an instance of InlineAsm
PseudoSourceValueVal, // This is an instance of PseudoSourceValue
FixedStackPseudoSourceValueVal, // This is an instance of
@@ -266,6 +264,10 @@ public:
SubclassOptionalData &= V->SubclassOptionalData;
}
+ /// hasValueHandle - Return true if there is a value handle associated with
+ /// this value.
+ bool hasValueHandle() const { return HasValueHandle; }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Value *) {
return true; // Values are always values.
@@ -304,6 +306,10 @@ public:
return const_cast<Value*>(this)->DoPHITranslation(CurBB, PredBB);
}
+ /// MaximumAlignment - This is the greatest alignment value supported by
+ /// load, store, and alloca instructions, and global values.
+ static const unsigned MaximumAlignment = 1u << 29;
+
protected:
unsigned short getSubclassDataFromValue() const { return SubclassData; }
void setValueSubclassData(unsigned short D) { SubclassData = D; }
@@ -324,39 +330,67 @@ void Use::set(Value *V) {
// isa - Provide some specializations of isa so that we don't have to include
// the subtype header files to test to see if the value is a subclass...
//
-template <> inline bool isa_impl<Constant, Value>(const Value &Val) {
- return Val.getValueID() >= Value::ConstantFirstVal &&
- Val.getValueID() <= Value::ConstantLastVal;
-}
-template <> inline bool isa_impl<Argument, Value>(const Value &Val) {
- return Val.getValueID() == Value::ArgumentVal;
-}
-template <> inline bool isa_impl<InlineAsm, Value>(const Value &Val) {
- return Val.getValueID() == Value::InlineAsmVal;
-}
-template <> inline bool isa_impl<Instruction, Value>(const Value &Val) {
- return Val.getValueID() >= Value::InstructionVal;
-}
-template <> inline bool isa_impl<BasicBlock, Value>(const Value &Val) {
- return Val.getValueID() == Value::BasicBlockVal;
-}
-template <> inline bool isa_impl<Function, Value>(const Value &Val) {
- return Val.getValueID() == Value::FunctionVal;
-}
-template <> inline bool isa_impl<GlobalVariable, Value>(const Value &Val) {
- return Val.getValueID() == Value::GlobalVariableVal;
-}
-template <> inline bool isa_impl<GlobalAlias, Value>(const Value &Val) {
- return Val.getValueID() == Value::GlobalAliasVal;
-}
-template <> inline bool isa_impl<GlobalValue, Value>(const Value &Val) {
- return isa<GlobalVariable>(Val) || isa<Function>(Val) ||
- isa<GlobalAlias>(Val);
-}
-template <> inline bool isa_impl<MDNode, Value>(const Value &Val) {
- return Val.getValueID() == Value::MDNodeVal;
-}
-
+template <> struct isa_impl<Constant, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() >= Value::ConstantFirstVal &&
+ Val.getValueID() <= Value::ConstantLastVal;
+ }
+};
+
+template <> struct isa_impl<Argument, Value> {
+ static inline bool doit (const Value &Val) {
+ return Val.getValueID() == Value::ArgumentVal;
+ }
+};
+
+template <> struct isa_impl<InlineAsm, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::InlineAsmVal;
+ }
+};
+
+template <> struct isa_impl<Instruction, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() >= Value::InstructionVal;
+ }
+};
+
+template <> struct isa_impl<BasicBlock, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::BasicBlockVal;
+ }
+};
+
+template <> struct isa_impl<Function, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::FunctionVal;
+ }
+};
+
+template <> struct isa_impl<GlobalVariable, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::GlobalVariableVal;
+ }
+};
+
+template <> struct isa_impl<GlobalAlias, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::GlobalAliasVal;
+ }
+};
+
+template <> struct isa_impl<GlobalValue, Value> {
+ static inline bool doit(const Value &Val) {
+ return isa<GlobalVariable>(Val) || isa<Function>(Val) ||
+ isa<GlobalAlias>(Val);
+ }
+};
+
+template <> struct isa_impl<MDNode, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::MDNodeVal;
+ }
+};
// Value* is only 4-byte aligned.
template<>
diff --git a/libclamav/c++/llvm/include/llvm/ValueSymbolTable.h b/libclamav/c++/llvm/include/llvm/ValueSymbolTable.h
index 7497dae..35fc97b 100644
--- a/libclamav/c++/llvm/include/llvm/ValueSymbolTable.h
+++ b/libclamav/c++/llvm/include/llvm/ValueSymbolTable.h
@@ -128,94 +128,6 @@ private:
/// @}
};
-/// This class provides a symbol table of name/NamedMDNode pairs. It is
-/// essentially a StringMap wrapper.
-
-class MDSymbolTable {
- friend class SymbolTableListTraits<NamedMDNode, Module>;
-/// @name Types
-/// @{
-private:
- /// @brief A mapping of names to metadata
- typedef StringMap<NamedMDNode*> MDMap;
-
-public:
- /// @brief An iterator over a ValueMap.
- typedef MDMap::iterator iterator;
-
- /// @brief A const_iterator over a ValueMap.
- typedef MDMap::const_iterator const_iterator;
-
-/// @}
-/// @name Constructors
-/// @{
-public:
-
- MDSymbolTable(const MDNode &); // DO NOT IMPLEMENT
- void operator=(const MDSymbolTable &); // DO NOT IMPLEMENT
- MDSymbolTable() : mmap(0) {}
- ~MDSymbolTable();
-
-/// @}
-/// @name Accessors
-/// @{
-public:
-
- /// This method finds the value with the given \p Name in the
- /// the symbol table.
- /// @returns the NamedMDNode associated with the \p Name
- /// @brief Lookup a named Value.
- NamedMDNode *lookup(StringRef Name) const { return mmap.lookup(Name); }
-
- /// @returns true iff the symbol table is empty
- /// @brief Determine if the symbol table is empty
- inline bool empty() const { return mmap.empty(); }
-
- /// @brief The number of name/type pairs is returned.
- inline unsigned size() const { return unsigned(mmap.size()); }
-
-/// @}
-/// @name Iteration
-/// @{
-public:
- /// @brief Get an iterator that from the beginning of the symbol table.
- inline iterator begin() { return mmap.begin(); }
-
- /// @brief Get a const_iterator that from the beginning of the symbol table.
- inline const_iterator begin() const { return mmap.begin(); }
-
- /// @brief Get an iterator to the end of the symbol table.
- inline iterator end() { return mmap.end(); }
-
- /// @brief Get a const_iterator to the end of the symbol table.
- inline const_iterator end() const { return mmap.end(); }
-
-/// @}
-/// @name Mutators
-/// @{
-public:
- /// insert - The method inserts a new entry into the stringmap. This will
- /// replace existing entry, if any.
- void insert(StringRef Name, NamedMDNode *Node) {
- StringMapEntry<NamedMDNode *> &Entry =
- mmap.GetOrCreateValue(Name, Node);
- if (Entry.getValue() != Node) {
- mmap.remove(&Entry);
- (void) mmap.GetOrCreateValue(Name, Node);
- }
- }
-
- /// This method removes a NamedMDNode from the symbol table.
- void remove(StringRef Name) { mmap.erase(Name); }
-
-/// @}
-/// @name Internal Data
-/// @{
-private:
- MDMap mmap; ///< The map that holds the symbol table.
-/// @}
-};
-
} // End llvm namespace
#endif
diff --git a/libclamav/c++/llvm/lib/Analysis/AliasAnalysis.cpp b/libclamav/c++/llvm/lib/Analysis/AliasAnalysis.cpp
index 371dcaf..1f2528f 100644
--- a/libclamav/c++/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -65,10 +65,127 @@ void AliasAnalysis::copyValue(Value *From, Value *To) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
- // FIXME: we can do better.
+AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
+ // Don't assert AA because BasicAA calls us in order to make use of the
+ // logic here.
+
+ ModRefBehavior MRB = getModRefBehavior(CS);
+ if (MRB == DoesNotAccessMemory)
+ return NoModRef;
+
+ ModRefResult Mask = ModRef;
+ if (MRB == OnlyReadsMemory)
+ Mask = Ref;
+ else if (MRB == AliasAnalysis::AccessesArguments) {
+ bool doesAlias = false;
+ for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
+ AI != AE; ++AI)
+ if (!isNoAlias(*AI, ~0U, P, Size)) {
+ doesAlias = true;
+ break;
+ }
+
+ if (!doesAlias)
+ return NoModRef;
+ }
+
+ // If P points to a constant memory location, the call definitely could not
+ // modify the memory location.
+ if ((Mask & Mod) && pointsToConstantMemory(P))
+ Mask = ModRefResult(Mask & ~Mod);
+
+ // If this is BasicAA, don't forward.
+ if (!AA) return Mask;
+
+ // Otherwise, fall back to the next AA in the chain. But we can merge
+ // in any mask we've managed to compute.
+ return ModRefResult(AA->getModRefInfo(CS, P, Size) & Mask);
+}
+
+AliasAnalysis::ModRefResult
+AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
+ // Don't assert AA because BasicAA calls us in order to make use of the
+ // logic here.
+
+ // If CS1 or CS2 are readnone, they don't interact.
+ ModRefBehavior CS1B = getModRefBehavior(CS1);
+ if (CS1B == DoesNotAccessMemory) return NoModRef;
+
+ ModRefBehavior CS2B = getModRefBehavior(CS2);
+ if (CS2B == DoesNotAccessMemory) return NoModRef;
+
+ // If they both only read from memory, there is no dependence.
+ if (CS1B == OnlyReadsMemory && CS2B == OnlyReadsMemory)
+ return NoModRef;
+
+ AliasAnalysis::ModRefResult Mask = ModRef;
+
+ // If CS1 only reads memory, the only dependence on CS2 can be
+ // from CS1 reading memory written by CS2.
+ if (CS1B == OnlyReadsMemory)
+ Mask = ModRefResult(Mask & Ref);
+
+ // If CS2 only access memory through arguments, accumulate the mod/ref
+ // information from CS1's references to the memory referenced by
+ // CS2's arguments.
+ if (CS2B == AccessesArguments) {
+ AliasAnalysis::ModRefResult R = NoModRef;
+ for (ImmutableCallSite::arg_iterator
+ I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
+ R = ModRefResult((R | getModRefInfo(CS1, *I, UnknownSize)) & Mask);
+ if (R == Mask)
+ break;
+ }
+ return R;
+ }
+
+ // If CS1 only accesses memory through arguments, check if CS2 references
+ // any of the memory referenced by CS1's arguments. If not, return NoModRef.
+ if (CS1B == AccessesArguments) {
+ AliasAnalysis::ModRefResult R = NoModRef;
+ for (ImmutableCallSite::arg_iterator
+ I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I)
+ if (getModRefInfo(CS2, *I, UnknownSize) != NoModRef) {
+ R = Mask;
+ break;
+ }
+ if (R == NoModRef)
+ return R;
+ }
+
+ // If this is BasicAA, don't forward.
+ if (!AA) return Mask;
+
+ // Otherwise, fall back to the next AA in the chain. But we can merge
+ // in any mask we've managed to compute.
+ return ModRefResult(AA->getModRefInfo(CS1, CS2) & Mask);
+}
+
+AliasAnalysis::ModRefBehavior
+AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
+ // Don't assert AA because BasicAA calls us in order to make use of the
+ // logic here.
+
+ ModRefBehavior Min = UnknownModRefBehavior;
+
+ // Call back into the alias analysis with the other form of getModRefBehavior
+ // to see if it can give a better response.
+ if (const Function *F = CS.getCalledFunction())
+ Min = getModRefBehavior(F);
+
+ // If this is BasicAA, don't forward.
+ if (!AA) return Min;
+
+ // Otherwise, fall back to the next AA in the chain. But we can merge
+ // in any result we've managed to compute.
+ return std::min(AA->getModRefBehavior(CS), Min);
+}
+
+AliasAnalysis::ModRefBehavior
+AliasAnalysis::getModRefBehavior(const Function *F) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->getModRefInfo(CS1, CS2);
+ return AA->getModRefBehavior(F);
}
@@ -77,87 +194,63 @@ AliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
//===----------------------------------------------------------------------===//
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(LoadInst *L, Value *P, unsigned Size) {
- return alias(L->getOperand(0), getTypeStoreSize(L->getType()),
- P, Size) ? Ref : NoModRef;
+AliasAnalysis::getModRefInfo(const LoadInst *L, const Value *P, unsigned Size) {
+ // Be conservative in the face of volatile.
+ if (L->isVolatile())
+ return ModRef;
+
+ // If the load address doesn't alias the given address, it doesn't read
+ // or write the specified memory.
+ if (!alias(L->getOperand(0), getTypeStoreSize(L->getType()), P, Size))
+ return NoModRef;
+
+ // Otherwise, a load just reads.
+ return Ref;
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(StoreInst *S, Value *P, unsigned Size) {
- // If the stored address cannot alias the pointer in question, then the
- // pointer cannot be modified by the store.
+AliasAnalysis::getModRefInfo(const StoreInst *S, const Value *P, unsigned Size) {
+ // Be conservative in the face of volatile.
+ if (S->isVolatile())
+ return ModRef;
+
+ // If the store address cannot alias the pointer in question, then the
+ // specified memory cannot be modified by the store.
if (!alias(S->getOperand(1),
getTypeStoreSize(S->getOperand(0)->getType()), P, Size))
return NoModRef;
// If the pointer is a pointer to constant memory, then it could not have been
// modified by this store.
- return pointsToConstantMemory(P) ? NoModRef : Mod;
-}
-
-AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(CallSite CS,
- std::vector<PointerAccessInfo> *Info) {
- if (CS.doesNotAccessMemory())
- // Can't do better than this.
- return DoesNotAccessMemory;
- ModRefBehavior MRB = getModRefBehavior(CS.getCalledFunction(), Info);
- if (MRB != DoesNotAccessMemory && CS.onlyReadsMemory())
- return OnlyReadsMemory;
- return MRB;
-}
-
-AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(Function *F,
- std::vector<PointerAccessInfo> *Info) {
- if (F) {
- if (F->doesNotAccessMemory())
- // Can't do better than this.
- return DoesNotAccessMemory;
- if (F->onlyReadsMemory())
- return OnlyReadsMemory;
- if (unsigned id = F->getIntrinsicID())
- return getModRefBehavior(id);
- }
- return UnknownModRefBehavior;
-}
+ if (pointsToConstantMemory(P))
+ return NoModRef;
-AliasAnalysis::ModRefBehavior AliasAnalysis::getModRefBehavior(unsigned iid) {
-#define GET_INTRINSIC_MODREF_BEHAVIOR
-#include "llvm/Intrinsics.gen"
-#undef GET_INTRINSIC_MODREF_BEHAVIOR
+ // Otherwise, a store just writes.
+ return Mod;
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
- ModRefBehavior MRB = getModRefBehavior(CS);
- if (MRB == DoesNotAccessMemory)
+AliasAnalysis::getModRefInfo(const VAArgInst *V, const Value *P, unsigned Size) {
+ // If the va_arg address cannot alias the pointer in question, then the
+ // specified memory cannot be accessed by the va_arg.
+ if (!alias(V->getOperand(0), UnknownSize, P, Size))
return NoModRef;
-
- ModRefResult Mask = ModRef;
- if (MRB == OnlyReadsMemory)
- Mask = Ref;
- else if (MRB == AliasAnalysis::AccessesArguments) {
- bool doesAlias = false;
- for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
- AI != AE; ++AI)
- if (!isNoAlias(*AI, ~0U, P, Size)) {
- doesAlias = true;
- break;
- }
- if (!doesAlias)
- return NoModRef;
- }
+ // If the pointer is a pointer to constant memory, then it could not have been
+ // modified by this va_arg.
+ if (pointsToConstantMemory(P))
+ return NoModRef;
- if (!AA) return Mask;
+ // Otherwise, a va_arg reads and writes.
+ return ModRef;
+}
- // If P points to a constant memory location, the call definitely could not
- // modify the memory location.
- if ((Mask & Mod) && AA->pointsToConstantMemory(P))
- Mask = ModRefResult(Mask & ~Mod);
- return ModRefResult(Mask & AA->getModRefInfo(CS, P, Size));
+AliasAnalysis::ModRefBehavior
+AliasAnalysis::getIntrinsicModRefBehavior(unsigned iid) {
+#define GET_INTRINSIC_MODREF_BEHAVIOR
+#include "llvm/Intrinsics.gen"
+#undef GET_INTRINSIC_MODREF_BEHAVIOR
}
// AliasAnalysis destructor: DO NOT move this to the header file for
@@ -206,12 +299,12 @@ bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1,
const Value *Ptr, unsigned Size) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
- BasicBlock::iterator I = const_cast<Instruction*>(&I1);
- BasicBlock::iterator E = const_cast<Instruction*>(&I2);
+ BasicBlock::const_iterator I = &I1;
+ BasicBlock::const_iterator E = &I2;
++E; // Convert from inclusive to exclusive range.
for (; I != E; ++I) // Check every instruction in range
- if (getModRefInfo(I, const_cast<Value*>(Ptr), Size) & Mod)
+ if (getModRefInfo(I, Ptr, Size) & Mod)
return true;
return false;
}
@@ -220,7 +313,7 @@ bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1,
/// function.
bool llvm::isNoAliasCall(const Value *V) {
if (isa<CallInst>(V) || isa<InvokeInst>(V))
- return CallSite(const_cast<Instruction*>(cast<Instruction>(V)))
+ return ImmutableCallSite(cast<Instruction>(V))
.paramHasAttr(0, Attribute::NoAlias);
return false;
}
@@ -233,10 +326,12 @@ bool llvm::isNoAliasCall(const Value *V) {
/// NoAlias returns
///
bool llvm::isIdentifiedObject(const Value *V) {
- if (isa<AllocaInst>(V) || isNoAliasCall(V))
+ if (isa<AllocaInst>(V))
return true;
if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
return true;
+ if (isNoAliasCall(V))
+ return true;
if (const Argument *A = dyn_cast<Argument>(V))
return A->hasNoAliasAttr() || A->hasByValAttr();
return false;
diff --git a/libclamav/c++/llvm/lib/Analysis/AliasAnalysisCounter.cpp b/libclamav/c++/llvm/lib/Analysis/AliasAnalysisCounter.cpp
index 1053955..b178041 100644
--- a/libclamav/c++/llvm/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/AliasAnalysisCounter.cpp
@@ -34,7 +34,7 @@ namespace {
Module *M;
public:
static char ID; // Class identification, replacement for typeinfo
- AliasAnalysisCounter() : ModulePass(&ID) {
+ AliasAnalysisCounter() : ModulePass(ID) {
No = May = Must = 0;
NoMR = JustRef = JustMod = MR = 0;
}
@@ -87,8 +87,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -103,17 +103,18 @@ namespace {
AliasResult alias(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size);
- ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
- ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size);
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
};
}
char AliasAnalysisCounter::ID = 0;
-static RegisterPass<AliasAnalysisCounter>
-X("count-aa", "Count Alias Analysis Query Responses", false, true);
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(AliasAnalysisCounter, AliasAnalysis, "count-aa",
+ "Count Alias Analysis Query Responses", false, true, false);
ModulePass *llvm::createAliasAnalysisCounterPass() {
return new AliasAnalysisCounter();
@@ -146,7 +147,8 @@ AliasAnalysisCounter::alias(const Value *V1, unsigned V1Size,
}
AliasAnalysis::ModRefResult
-AliasAnalysisCounter::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, P, Size);
const char *MRString;
diff --git a/libclamav/c++/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/libclamav/c++/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index 308b9e3..ce363cb 100644
--- a/libclamav/c++/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -25,7 +25,6 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/InstIterator.h"
#include "llvm/Support/CommandLine.h"
@@ -51,7 +50,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- AAEval() : FunctionPass(&ID) {}
+ AAEval() : FunctionPass(ID) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
@@ -75,8 +74,8 @@ namespace {
}
char AAEval::ID = 0;
-static RegisterPass<AAEval>
-X("aa-eval", "Exhaustive Alias Analysis Precision Evaluator", false, true);
+INITIALIZE_PASS(AAEval, "aa-eval",
+ "Exhaustive Alias Analysis Precision Evaluator", false, true);
FunctionPass *llvm::createAAEvalPass() { return new AAEval(); }
@@ -108,6 +107,20 @@ PrintModRefResults(const char *Msg, bool P, Instruction *I, Value *Ptr,
}
}
+static inline void
+PrintModRefResults(const char *Msg, bool P, CallSite CSA, CallSite CSB,
+ Module *M) {
+ if (P) {
+ errs() << " " << Msg << ": " << *CSA.getInstruction()
+ << " <-> " << *CSB.getInstruction() << '\n';
+ }
+}
+
+static inline bool isInterestingPointer(Value *V) {
+ return V->getType()->isPointerTy()
+ && !isa<ConstantPointerNull>(V);
+}
+
bool AAEval::runOnFunction(Function &F) {
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
@@ -115,23 +128,31 @@ bool AAEval::runOnFunction(Function &F) {
SetVector<CallSite> CallSites;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
- if (I->getType()->isPointerTy()) // Add all pointer arguments
+ if (I->getType()->isPointerTy()) // Add all pointer arguments.
Pointers.insert(I);
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
- if (I->getType()->isPointerTy()) // Add all pointer instructions
+ if (I->getType()->isPointerTy()) // Add all pointer instructions.
Pointers.insert(&*I);
Instruction &Inst = *I;
- User::op_iterator OI = Inst.op_begin();
- CallSite CS = CallSite::get(&Inst);
- if (CS.getInstruction() &&
- isa<Function>(CS.getCalledValue()))
- ++OI; // Skip actual functions for direct function calls.
- for (; OI != Inst.op_end(); ++OI)
- if ((*OI)->getType()->isPointerTy() && !isa<ConstantPointerNull>(*OI))
- Pointers.insert(*OI);
-
- if (CS.getInstruction()) CallSites.insert(CS);
+ if (CallSite CS = cast<Value>(&Inst)) {
+ Value *Callee = CS.getCalledValue();
+ // Skip actual functions for direct function calls.
+ if (!isa<Function>(Callee) && isInterestingPointer(Callee))
+ Pointers.insert(Callee);
+ // Consider formals.
+ for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
+ AI != AE; ++AI)
+ if (isInterestingPointer(*AI))
+ Pointers.insert(*AI);
+ CallSites.insert(CS);
+ } else {
+ // Consider all operands.
+ for (Instruction::op_iterator OI = Inst.op_begin(), OE = Inst.op_end();
+ OI != OE; ++OI)
+ if (isInterestingPointer(*OI))
+ Pointers.insert(*OI);
+ }
}
if (PrintNoAlias || PrintMayAlias || PrintMustAlias ||
@@ -183,13 +204,13 @@ bool AAEval::runOnFunction(Function &F) {
PrintModRefResults("NoModRef", PrintNoModRef, I, *V, F.getParent());
++NoModRef; break;
case AliasAnalysis::Mod:
- PrintModRefResults(" Mod", PrintMod, I, *V, F.getParent());
+ PrintModRefResults("Just Mod", PrintMod, I, *V, F.getParent());
++Mod; break;
case AliasAnalysis::Ref:
- PrintModRefResults(" Ref", PrintRef, I, *V, F.getParent());
+ PrintModRefResults("Just Ref", PrintRef, I, *V, F.getParent());
++Ref; break;
case AliasAnalysis::ModRef:
- PrintModRefResults(" ModRef", PrintModRef, I, *V, F.getParent());
+ PrintModRefResults("Both ModRef", PrintModRef, I, *V, F.getParent());
++ModRef; break;
default:
errs() << "Unknown alias query result!\n";
@@ -197,6 +218,29 @@ bool AAEval::runOnFunction(Function &F) {
}
}
+ // Mod/ref alias analysis: compare all pairs of calls
+ for (SetVector<CallSite>::iterator C = CallSites.begin(),
+ Ce = CallSites.end(); C != Ce; ++C) {
+ for (SetVector<CallSite>::iterator D = CallSites.begin(); D != Ce; ++D) {
+ if (D == C)
+ continue;
+ switch (AA.getModRefInfo(*C, *D)) {
+ case AliasAnalysis::NoModRef:
+ PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
+ ++NoModRef; break;
+ case AliasAnalysis::Mod:
+ PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
+ ++Mod; break;
+ case AliasAnalysis::Ref:
+ PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
+ ++Ref; break;
+ case AliasAnalysis::ModRef:
+ PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
+ ++ModRef; break;
+ }
+ }
+ }
+
return false;
}
diff --git a/libclamav/c++/llvm/lib/Analysis/AliasDebugger.cpp b/libclamav/c++/llvm/lib/Analysis/AliasDebugger.cpp
index 88c2875..b9fe646 100644
--- a/libclamav/c++/llvm/lib/Analysis/AliasDebugger.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/AliasDebugger.cpp
@@ -39,14 +39,18 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- AliasDebugger() : ModulePass(&ID) {}
+ AliasDebugger() : ModulePass(ID) {}
bool runOnModule(Module &M) {
InitializeAliasAnalysis(this); // set up super class
for(Module::global_iterator I = M.global_begin(),
- E = M.global_end(); I != E; ++I)
+ E = M.global_end(); I != E; ++I) {
Vals.insert(&*I);
+ for (User::const_op_iterator OI = I->op_begin(),
+ OE = I->op_end(); OI != OE; ++OI)
+ Vals.insert(*OI);
+ }
for(Module::iterator I = M.begin(),
E = M.end(); I != E; ++I){
@@ -58,8 +62,12 @@ namespace {
for (Function::const_iterator FI = I->begin(), FE = I->end();
FI != FE; ++FI)
for (BasicBlock::const_iterator BI = FI->begin(), BE = FI->end();
- BI != BE; ++BI)
+ BI != BE; ++BI) {
Vals.insert(&*BI);
+ for (User::const_op_iterator OI = BI->op_begin(),
+ OE = BI->op_end(); OI != OE; ++OI)
+ Vals.insert(*OI);
+ }
}
}
@@ -75,8 +83,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -91,12 +99,14 @@ namespace {
return AliasAnalysis::alias(V1, V1Size, V2, V2Size);
}
- ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
assert(Vals.find(P) != Vals.end() && "Never seen value in AA before");
return AliasAnalysis::getModRefInfo(CS, P, Size);
}
- ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
@@ -118,9 +128,8 @@ namespace {
}
char AliasDebugger::ID = 0;
-static RegisterPass<AliasDebugger>
-X("debug-aa", "AA use debugger", false, true);
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(AliasDebugger, AliasAnalysis, "debug-aa",
+ "AA use debugger", false, true, false);
Pass *llvm::createAliasDebugger() { return new AliasDebugger(); }
diff --git a/libclamav/c++/llvm/lib/Analysis/AliasSetTracker.cpp b/libclamav/c++/llvm/lib/Analysis/AliasSetTracker.cpp
index 02aff50..e74543b 100644
--- a/libclamav/c++/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -22,7 +22,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/InstIterator.h"
-#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -35,6 +34,7 @@ void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
// Update the alias and access types of this set...
AccessTy |= AS.AccessTy;
AliasTy |= AS.AliasTy;
+ Volatile |= AS.Volatile;
if (AliasTy == MustAlias) {
// Check that these two merged sets really are must aliases. Since both
@@ -111,11 +111,11 @@ void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
*PtrListEnd = &Entry;
PtrListEnd = Entry.setPrevInList(PtrListEnd);
assert(*PtrListEnd == 0 && "End of list is not null?");
- addRef(); // Entry points to alias set...
+ addRef(); // Entry points to alias set.
}
void AliasSet::addCallSite(CallSite CS, AliasAnalysis &AA) {
- CallSites.push_back(CS);
+ CallSites.push_back(CS.getInstruction());
AliasAnalysis::ModRefBehavior Behavior = AA.getModRefBehavior(CS);
if (Behavior == AliasAnalysis::DoesNotAccessMemory)
@@ -140,7 +140,7 @@ bool AliasSet::aliasesPointer(const Value *Ptr, unsigned Size,
assert(CallSites.empty() && "Illegal must alias set!");
// If this is a set of MustAliases, only check to see if the pointer aliases
- // SOME value in the set...
+ // SOME value in the set.
PointerRec *SomePtr = getSomePointer();
assert(SomePtr && "Empty must-alias set??");
return AA.alias(SomePtr->getValue(), SomePtr->getSize(), Ptr, Size);
@@ -155,8 +155,7 @@ bool AliasSet::aliasesPointer(const Value *Ptr, unsigned Size,
// Check the call sites list and invoke list...
if (!CallSites.empty()) {
for (unsigned i = 0, e = CallSites.size(); i != e; ++i)
- if (AA.getModRefInfo(CallSites[i], const_cast<Value*>(Ptr), Size)
- != AliasAnalysis::NoModRef)
+ if (AA.getModRefInfo(CallSites[i], Ptr, Size) != AliasAnalysis::NoModRef)
return true;
}
@@ -167,10 +166,11 @@ bool AliasSet::aliasesCallSite(CallSite CS, AliasAnalysis &AA) const {
if (AA.doesNotAccessMemory(CS))
return false;
- for (unsigned i = 0, e = CallSites.size(); i != e; ++i)
- if (AA.getModRefInfo(CallSites[i], CS) != AliasAnalysis::NoModRef ||
- AA.getModRefInfo(CS, CallSites[i]) != AliasAnalysis::NoModRef)
+ for (unsigned i = 0, e = CallSites.size(); i != e; ++i) {
+ if (AA.getModRefInfo(getCallSite(i), CS) != AliasAnalysis::NoModRef ||
+ AA.getModRefInfo(CS, getCallSite(i)) != AliasAnalysis::NoModRef)
return true;
+ }
for (iterator I = begin(), E = end(); I != E; ++I)
if (AA.getModRefInfo(CS, I.getPointer(), I.getSize()) !=
@@ -200,14 +200,15 @@ void AliasSetTracker::clear() {
AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
unsigned Size) {
AliasSet *FoundSet = 0;
- for (iterator I = begin(), E = end(); I != E; ++I)
- if (!I->Forward && I->aliasesPointer(Ptr, Size, AA)) {
- if (FoundSet == 0) { // If this is the first alias set ptr can go into.
- FoundSet = I; // Remember it.
- } else { // Otherwise, we must merge the sets.
- FoundSet->mergeSetIn(*I, *this); // Merge in contents.
- }
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I->Forward || !I->aliasesPointer(Ptr, Size, AA)) continue;
+
+ if (FoundSet == 0) { // If this is the first alias set ptr can go into.
+ FoundSet = I; // Remember it.
+ } else { // Otherwise, we must merge the sets.
+ FoundSet->mergeSetIn(*I, *this); // Merge in contents.
}
+ }
return FoundSet;
}
@@ -226,15 +227,15 @@ bool AliasSetTracker::containsPointer(Value *Ptr, unsigned Size) const {
AliasSet *AliasSetTracker::findAliasSetForCallSite(CallSite CS) {
AliasSet *FoundSet = 0;
- for (iterator I = begin(), E = end(); I != E; ++I)
- if (!I->Forward && I->aliasesCallSite(CS, AA)) {
- if (FoundSet == 0) { // If this is the first alias set ptr can go into.
- FoundSet = I; // Remember it.
- } else if (!I->Forward) { // Otherwise, we must merge the sets.
- FoundSet->mergeSetIn(*I, *this); // Merge in contents.
- }
- }
-
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I->Forward || !I->aliasesCallSite(CS, AA))
+ continue;
+
+ if (FoundSet == 0) // If this is the first alias set ptr can go into.
+ FoundSet = I; // Remember it.
+ else if (!I->Forward) // Otherwise, we must merge the sets.
+ FoundSet->mergeSetIn(*I, *this); // Merge in contents.
+ }
return FoundSet;
}
@@ -247,22 +248,24 @@ AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer, unsigned Size,
bool *New) {
AliasSet::PointerRec &Entry = getEntryFor(Pointer);
- // Check to see if the pointer is already known...
+ // Check to see if the pointer is already known.
if (Entry.hasAliasSet()) {
Entry.updateSize(Size);
// Return the set!
return *Entry.getAliasSet(*this)->getForwardedTarget(*this);
- } else if (AliasSet *AS = findAliasSetForPointer(Pointer, Size)) {
- // Add it to the alias set it aliases...
+ }
+
+ if (AliasSet *AS = findAliasSetForPointer(Pointer, Size)) {
+ // Add it to the alias set it aliases.
AS->addPointer(*this, Entry, Size);
return *AS;
- } else {
- if (New) *New = true;
- // Otherwise create a new alias set to hold the loaded pointer...
- AliasSets.push_back(new AliasSet());
- AliasSets.back().addPointer(*this, Entry, Size);
- return AliasSets.back();
}
+
+ if (New) *New = true;
+ // Otherwise create a new alias set to hold the loaded pointer.
+ AliasSets.push_back(new AliasSet());
+ AliasSets.back().addPointer(*this, Entry, Size);
+ return AliasSets.back();
}
bool AliasSetTracker::add(Value *Ptr, unsigned Size) {
@@ -305,28 +308,27 @@ bool AliasSetTracker::add(CallSite CS) {
return true; // doesn't alias anything
AliasSet *AS = findAliasSetForCallSite(CS);
- if (!AS) {
- AliasSets.push_back(new AliasSet());
- AS = &AliasSets.back();
- AS->addCallSite(CS, AA);
- return true;
- } else {
+ if (AS) {
AS->addCallSite(CS, AA);
return false;
}
+ AliasSets.push_back(new AliasSet());
+ AS = &AliasSets.back();
+ AS->addCallSite(CS, AA);
+ return true;
}
bool AliasSetTracker::add(Instruction *I) {
- // Dispatch to one of the other add methods...
+ // Dispatch to one of the other add methods.
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return add(LI);
- else if (StoreInst *SI = dyn_cast<StoreInst>(I))
+ if (StoreInst *SI = dyn_cast<StoreInst>(I))
return add(SI);
- else if (CallInst *CI = dyn_cast<CallInst>(I))
+ if (CallInst *CI = dyn_cast<CallInst>(I))
return add(CI);
- else if (InvokeInst *II = dyn_cast<InvokeInst>(I))
+ if (InvokeInst *II = dyn_cast<InvokeInst>(I))
return add(II);
- else if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
+ if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
return add(VAAI);
return true;
}
@@ -343,23 +345,23 @@ void AliasSetTracker::add(const AliasSetTracker &AST) {
// Loop over all of the alias sets in AST, adding the pointers contained
// therein into the current alias sets. This can cause alias sets to be
// merged together in the current AST.
- for (const_iterator I = AST.begin(), E = AST.end(); I != E; ++I)
- if (!I->Forward) { // Ignore forwarding alias sets
- AliasSet &AS = const_cast<AliasSet&>(*I);
-
- // If there are any call sites in the alias set, add them to this AST.
- for (unsigned i = 0, e = AS.CallSites.size(); i != e; ++i)
- add(AS.CallSites[i]);
-
- // Loop over all of the pointers in this alias set...
- AliasSet::iterator I = AS.begin(), E = AS.end();
- bool X;
- for (; I != E; ++I) {
- AliasSet &NewAS = addPointer(I.getPointer(), I.getSize(),
- (AliasSet::AccessType)AS.AccessTy, X);
- if (AS.isVolatile()) NewAS.setVolatile();
- }
+ for (const_iterator I = AST.begin(), E = AST.end(); I != E; ++I) {
+ if (I->Forward) continue; // Ignore forwarding alias sets
+
+ AliasSet &AS = const_cast<AliasSet&>(*I);
+
+ // If there are any call sites in the alias set, add them to this AST.
+ for (unsigned i = 0, e = AS.CallSites.size(); i != e; ++i)
+ add(AS.CallSites[i]);
+
+ // Loop over all of the pointers in this alias set.
+ bool X;
+ for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
+ AliasSet &NewAS = addPointer(ASI.getPointer(), ASI.getSize(),
+ (AliasSet::AccessType)AS.AccessTy, X);
+ if (AS.isVolatile()) NewAS.setVolatile();
}
+ }
}
/// remove - Remove the specified (potentially non-empty) alias set from the
@@ -435,11 +437,11 @@ bool AliasSetTracker::remove(Instruction *I) {
// Dispatch to one of the other remove methods...
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return remove(LI);
- else if (StoreInst *SI = dyn_cast<StoreInst>(I))
+ if (StoreInst *SI = dyn_cast<StoreInst>(I))
return remove(SI);
- else if (CallInst *CI = dyn_cast<CallInst>(I))
+ if (CallInst *CI = dyn_cast<CallInst>(I))
return remove(CI);
- else if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
+ if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
return remove(VAAI);
return true;
}
@@ -455,12 +457,17 @@ void AliasSetTracker::deleteValue(Value *PtrVal) {
AA.deleteValue(PtrVal);
// If this is a call instruction, remove the callsite from the appropriate
- // AliasSet.
- CallSite CS = CallSite::get(PtrVal);
- if (CS.getInstruction())
- if (!AA.doesNotAccessMemory(CS))
- if (AliasSet *AS = findAliasSetForCallSite(CS))
- AS->removeCallSite(CS);
+ // AliasSet (if present).
+ if (CallSite CS = PtrVal) {
+ if (!AA.doesNotAccessMemory(CS)) {
+ // Scan all the alias sets to see if this call site is contained.
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I->Forward) continue;
+
+ I->removeCallSite(CS);
+ }
+ }
+ }
// First, look up the PointerRec for this pointer.
PointerMapType::iterator I = PointerMap.find(PtrVal);
@@ -510,7 +517,7 @@ void AliasSetTracker::copyValue(Value *From, Value *To) {
//===----------------------------------------------------------------------===//
void AliasSet::print(raw_ostream &OS) const {
- OS << " AliasSet[" << format("0x%p", (void*)this) << "," << RefCount << "] ";
+ OS << " AliasSet[" << (void*)this << ", " << RefCount << "] ";
OS << (AliasTy == MustAlias ? "must" : "may") << " alias, ";
switch (AccessTy) {
case NoModRef: OS << "No access "; break;
@@ -536,7 +543,7 @@ void AliasSet::print(raw_ostream &OS) const {
OS << "\n " << CallSites.size() << " Call Sites: ";
for (unsigned i = 0, e = CallSites.size(); i != e; ++i) {
if (i) OS << ", ";
- WriteAsOperand(OS, CallSites[i].getCalledValue());
+ WriteAsOperand(OS, CallSites[i]);
}
}
OS << "\n";
@@ -580,7 +587,7 @@ namespace {
AliasSetTracker *Tracker;
public:
static char ID; // Pass identification, replacement for typeid
- AliasSetPrinter() : FunctionPass(&ID) {}
+ AliasSetPrinter() : FunctionPass(ID) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -600,5 +607,5 @@ namespace {
}
char AliasSetPrinter::ID = 0;
-static RegisterPass<AliasSetPrinter>
-X("print-alias-sets", "Alias Set Printer", false, true);
+INITIALIZE_PASS(AliasSetPrinter, "print-alias-sets",
+ "Alias Set Printer", false, true);
diff --git a/libclamav/c++/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/libclamav/c++/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 31a649d..113c72b 100644
--- a/libclamav/c++/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -18,6 +18,7 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
+#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
@@ -30,6 +31,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
#include <algorithm>
using namespace llvm;
@@ -78,6 +80,20 @@ static bool isNonEscapingLocalObject(const Value *V) {
return false;
}
+/// isEscapeSource - Return true if the pointer is one which would have
+/// been considered an escape by isNonEscapingLocalObject.
+static bool isEscapeSource(const Value *V) {
+ if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
+ return true;
+
+ // The load case works because isNonEscapingLocalObject considers all
+ // stores to be escapes (it passes true for the StoreCaptures argument
+ // to PointerMayBeCaptured).
+ if (isa<LoadInst>(V))
+ return true;
+
+ return false;
+}
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
@@ -94,7 +110,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size,
} else if (const CallInst* CI = extractMallocCall(V)) {
if (!isArrayMalloc(V, &TD))
// The size is the argument to the malloc call.
- if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getOperand(1)))
+ if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
return (C->getZExtValue() < Size);
return false;
} else if (const Argument *A = dyn_cast<Argument>(V)) {
@@ -123,8 +139,8 @@ namespace {
///
struct NoAA : public ImmutablePass, public AliasAnalysis {
static char ID; // Class identification, replacement for typeinfo
- NoAA() : ImmutablePass(&ID) {}
- explicit NoAA(void *PID) : ImmutablePass(PID) { }
+ NoAA() : ImmutablePass(ID) {}
+ explicit NoAA(char &PID) : ImmutablePass(PID) { }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
}
@@ -138,16 +154,20 @@ namespace {
return MayAlias;
}
- virtual void getArgumentAccesses(Function *F, CallSite CS,
- std::vector<PointerAccessInfo> &Info) {
- llvm_unreachable("This method may not be called on this function!");
+ virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ return UnknownModRefBehavior;
+ }
+ virtual ModRefBehavior getModRefBehavior(const Function *F) {
+ return UnknownModRefBehavior;
}
virtual bool pointsToConstantMemory(const Value *P) { return false; }
- virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
return ModRef;
}
- virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
return ModRef;
}
@@ -155,11 +175,11 @@ namespace {
virtual void copyValue(Value *From, Value *To) {}
/// getAdjustedAnalysisPointer - This method is used when a pass implements
- /// an analysis interface through multiple inheritance. If needed, it should
- /// override this to adjust the this pointer as needed for the specified pass
- /// info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ /// an analysis interface through multiple inheritance. If needed, it
+ /// should override this to adjust the this pointer as needed for the
+ /// specified pass info.
+ virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ if (ID == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -168,53 +188,354 @@ namespace {
// Register this pass...
char NoAA::ID = 0;
-static RegisterPass<NoAA>
-U("no-aa", "No Alias Analysis (always returns 'may' alias)", true, true);
-
-// Declare that we implement the AliasAnalysis interface
-static RegisterAnalysisGroup<AliasAnalysis> V(U);
+INITIALIZE_AG_PASS(NoAA, AliasAnalysis, "no-aa",
+ "No Alias Analysis (always returns 'may' alias)",
+ true, true, false);
ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
//===----------------------------------------------------------------------===//
-// BasicAA Pass
+// GetElementPtr Instruction Decomposition and Analysis
//===----------------------------------------------------------------------===//
namespace {
+ enum ExtensionKind {
+ EK_NotExtended,
+ EK_SignExt,
+ EK_ZeroExt
+ };
+
+ struct VariableGEPIndex {
+ const Value *V;
+ ExtensionKind Extension;
+ int64_t Scale;
+ };
+}
+
+
+/// GetLinearExpression - Analyze the specified value as a linear expression:
+/// "A*V + B", where A and B are constant integers. Return the scale and offset
+/// values as APInts and return V as a Value*, and return whether we looked
+/// through any sign or zero extends. The incoming Value is known to have
+/// IntegerType and it may already be sign or zero extended.
+///
+/// Note that this looks through extends, so the high bits may not be
+/// represented in the result.
+static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
+ ExtensionKind &Extension,
+ const TargetData &TD, unsigned Depth) {
+ assert(V->getType()->isIntegerTy() && "Not an integer value");
+
+ // Limit our recursion depth.
+ if (Depth == 6) {
+ Scale = 1;
+ Offset = 0;
+ return V;
+ }
+
+ if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
+ if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
+ switch (BOp->getOpcode()) {
+ default: break;
+ case Instruction::Or:
+ // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
+ // analyze it.
+ if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &TD))
+ break;
+ // FALL THROUGH.
+ case Instruction::Add:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
+ TD, Depth+1);
+ Offset += RHSC->getValue();
+ return V;
+ case Instruction::Mul:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
+ TD, Depth+1);
+ Offset *= RHSC->getValue();
+ Scale *= RHSC->getValue();
+ return V;
+ case Instruction::Shl:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
+ TD, Depth+1);
+ Offset <<= RHSC->getValue().getLimitedValue();
+ Scale <<= RHSC->getValue().getLimitedValue();
+ return V;
+ }
+ }
+ }
+
+ // Since GEP indices are sign extended anyway, we don't care about the high
+ // bits of a sign or zero extended value - just scales and offsets. The
+ // extensions have to be consistent though.
+ if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) ||
+ (isa<ZExtInst>(V) && Extension != EK_SignExt)) {
+ Value *CastOp = cast<CastInst>(V)->getOperand(0);
+ unsigned OldWidth = Scale.getBitWidth();
+ unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
+ Scale.trunc(SmallWidth);
+ Offset.trunc(SmallWidth);
+ Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
+
+ Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension,
+ TD, Depth+1);
+ Scale.zext(OldWidth);
+ Offset.zext(OldWidth);
+
+ return Result;
+ }
+
+ Scale = 1;
+ Offset = 0;
+ return V;
+}
+
+/// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
+/// into a base pointer with a constant offset and a number of scaled symbolic
+/// offsets.
+///
+/// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
+/// the VarIndices vector) are Value*'s that are known to be scaled by the
+/// specified amount, but which may have other unrepresented high bits. As such,
+/// the gep cannot necessarily be reconstructed from its decomposed form.
+///
+/// When TargetData is around, this function is capable of analyzing everything
+/// that Value::getUnderlyingObject() can look through. When not, it just looks
+/// through pointer casts.
+///
+static const Value *
+DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
+ SmallVectorImpl<VariableGEPIndex> &VarIndices,
+ const TargetData *TD) {
+ // Limit recursion depth to limit compile time in crazy cases.
+ unsigned MaxLookup = 6;
+
+ BaseOffs = 0;
+ do {
+ // See if this is a bitcast or GEP.
+ const Operator *Op = dyn_cast<Operator>(V);
+ if (Op == 0) {
+ // The only non-operator case we can handle are GlobalAliases.
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (!GA->mayBeOverridden()) {
+ V = GA->getAliasee();
+ continue;
+ }
+ }
+ return V;
+ }
+
+ if (Op->getOpcode() == Instruction::BitCast) {
+ V = Op->getOperand(0);
+ continue;
+ }
+
+ const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
+ if (GEPOp == 0)
+ return V;
+
+ // Don't attempt to analyze GEPs over unsized objects.
+ if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
+ ->getElementType()->isSized())
+ return V;
+
+ // If we are lacking TargetData information, we can't compute the offets of
+ // elements computed by GEPs. However, we can handle bitcast equivalent
+ // GEPs.
+ if (TD == 0) {
+ if (!GEPOp->hasAllZeroIndices())
+ return V;
+ V = GEPOp->getOperand(0);
+ continue;
+ }
+
+ // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
+ gep_type_iterator GTI = gep_type_begin(GEPOp);
+ for (User::const_op_iterator I = GEPOp->op_begin()+1,
+ E = GEPOp->op_end(); I != E; ++I) {
+ Value *Index = *I;
+ // Compute the (potentially symbolic) offset in bytes for this index.
+ if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
+ // For a struct, add the member offset.
+ unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
+ if (FieldNo == 0) continue;
+
+ BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
+ continue;
+ }
+
+ // For an array/pointer, add the element offset, explicitly scaled.
+ if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
+ if (CIdx->isZero()) continue;
+ BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
+ continue;
+ }
+
+ uint64_t Scale = TD->getTypeAllocSize(*GTI);
+ ExtensionKind Extension = EK_NotExtended;
+
+ // If the integer type is smaller than the pointer size, it is implicitly
+ // sign extended to pointer size.
+ unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
+ if (TD->getPointerSizeInBits() > Width)
+ Extension = EK_SignExt;
+
+ // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
+ APInt IndexScale(Width, 0), IndexOffset(Width, 0);
+ Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension,
+ *TD, 0);
+
+ // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
+ // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
+ BaseOffs += IndexOffset.getZExtValue()*Scale;
+ Scale *= IndexScale.getZExtValue();
+
+
+ // If we already had an occurrance of this index variable, merge this
+ // scale into it. For example, we want to handle:
+ // A[x][x] -> x*16 + x*4 -> x*20
+ // This also ensures that 'x' only appears in the index list once.
+ for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
+ if (VarIndices[i].V == Index &&
+ VarIndices[i].Extension == Extension) {
+ Scale += VarIndices[i].Scale;
+ VarIndices.erase(VarIndices.begin()+i);
+ break;
+ }
+ }
+
+ // Make sure that we have a scale that makes sense for this target's
+ // pointer size.
+ if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
+ Scale <<= ShiftBits;
+ Scale >>= ShiftBits;
+ }
+
+ if (Scale) {
+ VariableGEPIndex Entry = {Index, Extension, Scale};
+ VarIndices.push_back(Entry);
+ }
+ }
+
+ // Analyze the base pointer next.
+ V = GEPOp->getOperand(0);
+ } while (--MaxLookup);
+
+ // If the chain of expressions is too deep, just return early.
+ return V;
+}
+
+/// GetIndexDifference - Dest and Src are the variable indices from two
+/// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
+/// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
+/// difference between the two pointers.
+static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
+ const SmallVectorImpl<VariableGEPIndex> &Src) {
+ if (Src.empty()) return;
+
+ for (unsigned i = 0, e = Src.size(); i != e; ++i) {
+ const Value *V = Src[i].V;
+ ExtensionKind Extension = Src[i].Extension;
+ int64_t Scale = Src[i].Scale;
+
+ // Find V in Dest. This is N^2, but pointer indices almost never have more
+ // than a few variable indexes.
+ for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
+ if (Dest[j].V != V || Dest[j].Extension != Extension) continue;
+
+ // If we found it, subtract off Scale V's from the entry in Dest. If it
+ // goes to zero, remove the entry.
+ if (Dest[j].Scale != Scale)
+ Dest[j].Scale -= Scale;
+ else
+ Dest.erase(Dest.begin()+j);
+ Scale = 0;
+ break;
+ }
+
+ // If we didn't consume this entry, add it to the end of the Dest list.
+ if (Scale) {
+ VariableGEPIndex Entry = { V, Extension, -Scale };
+ Dest.push_back(Entry);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// BasicAliasAnalysis Pass
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+static const Function *getParent(const Value *V) {
+ if (const Instruction *inst = dyn_cast<Instruction>(V))
+ return inst->getParent()->getParent();
+
+ if (const Argument *arg = dyn_cast<Argument>(V))
+ return arg->getParent();
+
+ return NULL;
+}
+
+static bool notDifferentParent(const Value *O1, const Value *O2) {
+
+ const Function *F1 = getParent(O1);
+ const Function *F2 = getParent(O2);
+
+ return !F1 || !F2 || F1 == F2;
+}
+#endif
+
+namespace {
/// BasicAliasAnalysis - This is the default alias analysis implementation.
/// Because it doesn't chain to a previous alias analysis (like -no-aa), it
/// derives from the NoAA class.
struct BasicAliasAnalysis : public NoAA {
static char ID; // Class identification, replacement for typeinfo
- BasicAliasAnalysis() : NoAA(&ID) {}
- AliasResult alias(const Value *V1, unsigned V1Size,
- const Value *V2, unsigned V2Size) {
- assert(VisitedPHIs.empty() && "VisitedPHIs must be cleared after use!");
+ BasicAliasAnalysis() : NoAA(ID) {}
+
+ virtual AliasResult alias(const Value *V1, unsigned V1Size,
+ const Value *V2, unsigned V2Size) {
+ assert(Visited.empty() && "Visited must be cleared after use!");
+ assert(notDifferentParent(V1, V2) &&
+ "BasicAliasAnalysis doesn't support interprocedural queries.");
AliasResult Alias = aliasCheck(V1, V1Size, V2, V2Size);
- VisitedPHIs.clear();
+ Visited.clear();
return Alias;
}
- ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
- ModRefResult getModRefInfo(CallSite CS1, CallSite CS2);
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size);
+
+ virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
+ // The AliasAnalysis base class has some smarts, lets use them.
+ return AliasAnalysis::getModRefInfo(CS1, CS2);
+ }
/// pointsToConstantMemory - Chase pointers until we find a (constant
/// global) or not.
- bool pointsToConstantMemory(const Value *P);
+ virtual bool pointsToConstantMemory(const Value *P);
+
+ /// getModRefBehavior - Return the behavior when calling the given
+ /// call site.
+ virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+
+ /// getModRefBehavior - Return the behavior when calling the given function.
+ /// For use when the call site is not known.
+ virtual ModRefBehavior getModRefBehavior(const Function *F);
/// getAdjustedAnalysisPointer - This method is used when a pass implements
- /// an analysis interface through multiple inheritance. If needed, it should
- /// override this to adjust the this pointer as needed for the specified pass
- /// info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ /// an analysis interface through multiple inheritance. If needed, it
+ /// should override this to adjust the this pointer as needed for the
+ /// specified pass info.
+ virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ if (ID == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
private:
- // VisitedPHIs - Track PHI nodes visited by a aliasCheck() call.
- SmallPtrSet<const Value*, 16> VisitedPHIs;
+ // Visited - Track instructions visited by a aliasPHI, aliasSelect(), and aliasGEP().
+ SmallPtrSet<const Value*, 16> Visited;
// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
// instruction against another.
@@ -238,11 +559,9 @@ namespace {
// Register this pass...
char BasicAliasAnalysis::ID = 0;
-static RegisterPass<BasicAliasAnalysis>
-X("basicaa", "Basic Alias Analysis (default AA impl)", false, true);
-
-// Declare that we implement the AliasAnalysis interface
-static RegisterAnalysisGroup<AliasAnalysis, true> Y(X);
+INITIALIZE_AG_PASS(BasicAliasAnalysis, AliasAnalysis, "basicaa",
+ "Basic Alias Analysis (default AA impl)",
+ false, true, true);
ImmutablePass *llvm::createBasicAliasAnalysisPass() {
return new BasicAliasAnalysis();
@@ -258,16 +577,53 @@ bool BasicAliasAnalysis::pointsToConstantMemory(const Value *P) {
// global to be marked constant in some modules and non-constant in others.
// GV may even be a declaration, not a definition.
return GV->isConstant();
- return false;
+
+ return NoAA::pointsToConstantMemory(P);
+}
+
+/// getModRefBehavior - Return the behavior when calling the given call site.
+AliasAnalysis::ModRefBehavior
+BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
+ if (CS.doesNotAccessMemory())
+ // Can't do better than this.
+ return DoesNotAccessMemory;
+
+ ModRefBehavior Min = UnknownModRefBehavior;
+
+ // If the callsite knows it only reads memory, don't return worse
+ // than that.
+ if (CS.onlyReadsMemory())
+ Min = OnlyReadsMemory;
+
+ // The AliasAnalysis base class has some smarts, lets use them.
+ return std::min(AliasAnalysis::getModRefBehavior(CS), Min);
}
+/// getModRefBehavior - Return the behavior when calling the given function.
+/// For use when the call site is not known.
+AliasAnalysis::ModRefBehavior
+BasicAliasAnalysis::getModRefBehavior(const Function *F) {
+ if (F->doesNotAccessMemory())
+ // Can't do better than this.
+ return DoesNotAccessMemory;
+ if (F->onlyReadsMemory())
+ return OnlyReadsMemory;
+ if (unsigned id = F->getIntrinsicID())
+ return getIntrinsicModRefBehavior(id);
+
+ return NoAA::getModRefBehavior(F);
+}
/// getModRefInfo - Check to see if the specified callsite can clobber the
/// specified memory object. Since we only look at local properties of this
/// function, we really can't say much about this query. We do, however, use
/// simple "address taken" analysis on local objects.
AliasAnalysis::ModRefResult
-BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
+ assert(notDifferentParent(CS.getInstruction(), P) &&
+ "AliasAnalysis query involving multiple functions!");
+
const Value *Object = P->getUnderlyingObject();
// If this is a tail call and P points to a stack location, we know that
@@ -276,7 +632,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
// the current function not to the current function, and a tail callee
// may reference them.
if (isa<AllocaInst>(Object))
- if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
+ if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
if (CI->isTailCall())
return NoModRef;
@@ -287,7 +643,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
isNonEscapingLocalObject(Object)) {
bool PassedAsArg = false;
unsigned ArgNo = 0;
- for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
+ for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture pointer arguments.
if (!(*CI)->getType()->isPointerTy() ||
@@ -298,7 +654,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
- if (!isNoAlias(cast<Value>(CI), ~0U, P, ~0U)) {
+ if (!isNoAlias(cast<Value>(CI), UnknownSize, P, UnknownSize)) {
PassedAsArg = true;
break;
}
@@ -309,127 +665,76 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
}
// Finally, handle specific knowledge of intrinsics.
- IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
- if (II == 0)
- return AliasAnalysis::getModRefInfo(CS, P, Size);
-
- switch (II->getIntrinsicID()) {
- default: break;
- case Intrinsic::memcpy:
- case Intrinsic::memmove: {
- unsigned Len = ~0U;
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3)))
- Len = LenCI->getZExtValue();
- Value *Dest = II->getOperand(1);
- Value *Src = II->getOperand(2);
- if (isNoAlias(Dest, Len, P, Size)) {
- if (isNoAlias(Src, Len, P, Size))
- return NoModRef;
- return Ref;
+ const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
+ if (II != 0)
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove: {
+ unsigned Len = UnknownSize;
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
+ Len = LenCI->getZExtValue();
+ Value *Dest = II->getArgOperand(0);
+ Value *Src = II->getArgOperand(1);
+ if (isNoAlias(Dest, Len, P, Size)) {
+ if (isNoAlias(Src, Len, P, Size))
+ return NoModRef;
+ return Ref;
+ }
+ break;
}
- break;
- }
- case Intrinsic::memset:
- // Since memset is 'accesses arguments' only, the AliasAnalysis base class
- // will handle it for the variable length case.
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3))) {
- unsigned Len = LenCI->getZExtValue();
- Value *Dest = II->getOperand(1);
- if (isNoAlias(Dest, Len, P, Size))
+ case Intrinsic::memset:
+ // Since memset is 'accesses arguments' only, the AliasAnalysis base class
+ // will handle it for the variable length case.
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
+ unsigned Len = LenCI->getZExtValue();
+ Value *Dest = II->getArgOperand(0);
+ if (isNoAlias(Dest, Len, P, Size))
+ return NoModRef;
+ }
+ break;
+ case Intrinsic::atomic_cmp_swap:
+ case Intrinsic::atomic_swap:
+ case Intrinsic::atomic_load_add:
+ case Intrinsic::atomic_load_sub:
+ case Intrinsic::atomic_load_and:
+ case Intrinsic::atomic_load_nand:
+ case Intrinsic::atomic_load_or:
+ case Intrinsic::atomic_load_xor:
+ case Intrinsic::atomic_load_max:
+ case Intrinsic::atomic_load_min:
+ case Intrinsic::atomic_load_umax:
+ case Intrinsic::atomic_load_umin:
+ if (TD) {
+ Value *Op1 = II->getArgOperand(0);
+ unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
+ if (isNoAlias(Op1, Op1Size, P, Size))
+ return NoModRef;
+ }
+ break;
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start: {
+ unsigned PtrSize =
+ cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
+ if (isNoAlias(II->getArgOperand(1), PtrSize, P, Size))
return NoModRef;
+ break;
}
- break;
- case Intrinsic::atomic_cmp_swap:
- case Intrinsic::atomic_swap:
- case Intrinsic::atomic_load_add:
- case Intrinsic::atomic_load_sub:
- case Intrinsic::atomic_load_and:
- case Intrinsic::atomic_load_nand:
- case Intrinsic::atomic_load_or:
- case Intrinsic::atomic_load_xor:
- case Intrinsic::atomic_load_max:
- case Intrinsic::atomic_load_min:
- case Intrinsic::atomic_load_umax:
- case Intrinsic::atomic_load_umin:
- if (TD) {
- Value *Op1 = II->getOperand(1);
- unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
- if (isNoAlias(Op1, Op1Size, P, Size))
+ case Intrinsic::invariant_end: {
+ unsigned PtrSize =
+ cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
+ if (isNoAlias(II->getArgOperand(2), PtrSize, P, Size))
return NoModRef;
+ break;
+ }
}
- break;
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start: {
- unsigned PtrSize = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
- if (isNoAlias(II->getOperand(2), PtrSize, P, Size))
- return NoModRef;
- break;
- }
- case Intrinsic::invariant_end: {
- unsigned PtrSize = cast<ConstantInt>(II->getOperand(2))->getZExtValue();
- if (isNoAlias(II->getOperand(3), PtrSize, P, Size))
- return NoModRef;
- break;
- }
- }
// The AliasAnalysis base class has some smarts, lets use them.
return AliasAnalysis::getModRefInfo(CS, P, Size);
}
-AliasAnalysis::ModRefResult
-BasicAliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
- // If CS1 or CS2 are readnone, they don't interact.
- ModRefBehavior CS1B = AliasAnalysis::getModRefBehavior(CS1);
- if (CS1B == DoesNotAccessMemory) return NoModRef;
-
- ModRefBehavior CS2B = AliasAnalysis::getModRefBehavior(CS2);
- if (CS2B == DoesNotAccessMemory) return NoModRef;
-
- // If they both only read from memory, just return ref.
- if (CS1B == OnlyReadsMemory && CS2B == OnlyReadsMemory)
- return Ref;
-
- // Otherwise, fall back to NoAA (mod+ref).
- return NoAA::getModRefInfo(CS1, CS2);
-}
-
-/// GetIndiceDifference - Dest and Src are the variable indices from two
-/// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
-/// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
-/// difference between the two pointers.
-static void GetIndiceDifference(
- SmallVectorImpl<std::pair<const Value*, int64_t> > &Dest,
- const SmallVectorImpl<std::pair<const Value*, int64_t> > &Src) {
- if (Src.empty()) return;
-
- for (unsigned i = 0, e = Src.size(); i != e; ++i) {
- const Value *V = Src[i].first;
- int64_t Scale = Src[i].second;
-
- // Find V in Dest. This is N^2, but pointer indices almost never have more
- // than a few variable indexes.
- for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
- if (Dest[j].first != V) continue;
-
- // If we found it, subtract off Scale V's from the entry in Dest. If it
- // goes to zero, remove the entry.
- if (Dest[j].second != Scale)
- Dest[j].second -= Scale;
- else
- Dest.erase(Dest.begin()+j);
- Scale = 0;
- break;
- }
-
- // If we didn't consume this entry, add it to the end of the Dest list.
- if (Scale)
- Dest.push_back(std::make_pair(V, -Scale));
- }
-}
-
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
/// against another pointer. We know that V1 is a GEP, but we don't know
/// anything about V2. UnderlyingV1 is GEP1->getUnderlyingObject(),
@@ -440,14 +745,22 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
const Value *V2, unsigned V2Size,
const Value *UnderlyingV1,
const Value *UnderlyingV2) {
+ // If this GEP has been visited before, we're on a use-def cycle.
+ // Such cycles are only valid when PHI nodes are involved or in unreachable
+ // code. The visitPHI function catches cycles containing PHIs, but there
+ // could still be a cycle without PHIs in unreachable code.
+ if (!Visited.insert(GEP1))
+ return MayAlias;
+
int64_t GEP1BaseOffset;
- SmallVector<std::pair<const Value*, int64_t>, 4> GEP1VariableIndices;
+ SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
// If we have two gep instructions with must-alias'ing base pointers, figure
// out if the indexes to the GEP tell us anything about the derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
// Do the base pointers alias?
- AliasResult BaseAlias = aliasCheck(UnderlyingV1, ~0U, UnderlyingV2, ~0U);
+ AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize,
+ UnderlyingV2, UnknownSize);
// If we get a No or May, then return it immediately, no amount of analysis
// will improve this situation.
@@ -460,7 +773,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
int64_t GEP2BaseOffset;
- SmallVector<std::pair<const Value*, int64_t>, 4> GEP2VariableIndices;
+ SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
@@ -476,7 +789,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// Subtract the GEP2 pointer from the GEP1 pointer to find out their
// symbolic difference.
GEP1BaseOffset -= GEP2BaseOffset;
- GetIndiceDifference(GEP1VariableIndices, GEP2VariableIndices);
+ GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices);
} else {
// Check to see if these two pointers are related by the getelementptr
@@ -484,10 +797,10 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// pointer, we know they cannot alias.
// If both accesses are unknown size, we can't do anything useful here.
- if (V1Size == ~0U && V2Size == ~0U)
+ if (V1Size == UnknownSize && V2Size == UnknownSize)
return MayAlias;
- AliasResult R = aliasCheck(UnderlyingV1, ~0U, V2, V2Size);
+ AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, V2, V2Size);
if (R != MustAlias)
// If V2 may alias GEP base pointer, conservatively returns MayAlias.
// If V2 is known not to alias GEP base pointer, then the two values
@@ -531,8 +844,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
// provides an offset of 4 bytes (assuming a <= 4 byte access).
for (unsigned i = 0, e = GEP1VariableIndices.size();
i != e && GEP1BaseOffset;++i)
- if (int64_t RemovedOffset = GEP1BaseOffset/GEP1VariableIndices[i].second)
- GEP1BaseOffset -= RemovedOffset*GEP1VariableIndices[i].second;
+ if (int64_t RemovedOffset = GEP1BaseOffset/GEP1VariableIndices[i].Scale)
+ GEP1BaseOffset -= RemovedOffset*GEP1VariableIndices[i].Scale;
// If our known offset is bigger than the access size, we know we don't have
// an alias.
@@ -550,6 +863,13 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
AliasAnalysis::AliasResult
BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
const Value *V2, unsigned V2Size) {
+ // If this select has been visited before, we're on a use-def cycle.
+ // Such cycles are only valid when PHI nodes are involved or in unreachable
+ // code. The visitPHI function catches cycles containing PHIs, but there
+ // could still be a cycle without PHIs in unreachable code.
+ if (!Visited.insert(SI))
+ return MayAlias;
+
// If the values are Selects with the same condition, we can do a more precise
// check: just check for aliases between the values on corresponding arms.
if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
@@ -570,11 +890,17 @@ BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
// If both arms of the Select node NoAlias or MustAlias V2, then returns
// NoAlias / MustAlias. Otherwise, returns MayAlias.
AliasResult Alias =
- aliasCheck(SI->getTrueValue(), SISize, V2, V2Size);
+ aliasCheck(V2, V2Size, SI->getTrueValue(), SISize);
if (Alias == MayAlias)
return MayAlias;
+
+ // If V2 is visited, the recursive case will have been caught in the
+ // above aliasCheck call, so these subsequent calls to aliasCheck
+ // don't need to assume that V2 is being visited recursively.
+ Visited.erase(V2);
+
AliasResult ThisAlias =
- aliasCheck(SI->getFalseValue(), SISize, V2, V2Size);
+ aliasCheck(V2, V2Size, SI->getFalseValue(), SISize);
if (ThisAlias != Alias)
return MayAlias;
return Alias;
@@ -586,7 +912,7 @@ AliasAnalysis::AliasResult
BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
const Value *V2, unsigned V2Size) {
// The PHI node has already been visited, avoid recursion any further.
- if (!VisitedPHIs.insert(PN))
+ if (!Visited.insert(PN))
return MayAlias;
// If the values are PHIs in the same block, we can do a more precise
@@ -636,10 +962,10 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
Value *V = V1Srcs[i];
- // If V2 is a PHI, the recursive case will have been caught in the
+ // If V2 is visited, the recursive case will have been caught in the
// above aliasCheck call, so these subsequent calls to aliasCheck
// don't need to assume that V2 is being visited recursively.
- VisitedPHIs.erase(V2);
+ Visited.erase(V2);
AliasResult ThisAlias = aliasCheck(V2, V2Size, V, PNSize);
if (ThisAlias != Alias || ThisAlias == MayAlias)
@@ -655,6 +981,11 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
AliasAnalysis::AliasResult
BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size) {
+ // If either of the memory references is empty, it doesn't matter what the
+ // pointer values are.
+ if (V1Size == 0 || V2Size == 0)
+ return NoAlias;
+
// Strip off any casts if they exist.
V1 = V1->stripPointerCasts();
V2 = V2->stripPointerCasts();
@@ -688,40 +1019,39 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
(isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
return NoAlias;
- // Arguments can't alias with local allocations or noalias calls.
- if ((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) ||
- (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1))))
+ // Arguments can't alias with local allocations or noalias calls
+ // in the same function.
+ if (((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) ||
+ (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1)))))
return NoAlias;
// Most objects can't alias null.
- if ((isa<ConstantPointerNull>(V2) && isKnownNonNull(O1)) ||
- (isa<ConstantPointerNull>(V1) && isKnownNonNull(O2)))
+ if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
+ (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
return NoAlias;
- }
+ // If one pointer is the result of a call/invoke or load and the other is a
+ // non-escaping local object within the same function, then we know the
+ // object couldn't escape to a point where the call could return it.
+ //
+ // Note that if the pointers are in different functions, there are a
+ // variety of complications. A call with a nocapture argument may still
+ // temporary store the nocapture argument's value in a temporary memory
+ // location if that memory location doesn't escape. Or it may pass a
+ // nocapture value to other functions as long as they don't capture it.
+ if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
+ return NoAlias;
+ if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
+ return NoAlias;
+ }
+
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
if (TD)
- if ((V1Size != ~0U && isObjectSmallerThan(O2, V1Size, *TD)) ||
- (V2Size != ~0U && isObjectSmallerThan(O1, V2Size, *TD)))
+ if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD)) ||
+ (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD)))
return NoAlias;
- // If one pointer is the result of a call/invoke or load and the other is a
- // non-escaping local object, then we know the object couldn't escape to a
- // point where the call could return it. The load case works because
- // isNonEscapingLocalObject considers all stores to be escapes (it
- // passes true for the StoreCaptures argument to PointerMayBeCaptured).
- if (O1 != O2) {
- if ((isa<CallInst>(O1) || isa<InvokeInst>(O1) || isa<LoadInst>(O1) ||
- isa<Argument>(O1)) &&
- isNonEscapingLocalObject(O2))
- return NoAlias;
- if ((isa<CallInst>(O2) || isa<InvokeInst>(O2) || isa<LoadInst>(O2) ||
- isa<Argument>(O2)) &&
- isNonEscapingLocalObject(O1))
- return NoAlias;
- }
-
// FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
// GEP can't simplify, we don't even look at the PHI cases.
if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
@@ -746,7 +1076,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
if (const SelectInst *S1 = dyn_cast<SelectInst>(V1))
return aliasSelect(S1, V1Size, V2, V2Size);
- return MayAlias;
+ return NoAA::alias(V1, V1Size, V2, V2Size);
}
// Make sure that anything that uses AliasAnalysis pulls in this file.
diff --git a/libclamav/c++/llvm/lib/Analysis/CFGPrinter.cpp b/libclamav/c++/llvm/lib/Analysis/CFGPrinter.cpp
index e06704b..617a362 100644
--- a/libclamav/c++/llvm/lib/Analysis/CFGPrinter.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/CFGPrinter.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
namespace {
struct CFGViewer : public FunctionPass {
static char ID; // Pass identifcation, replacement for typeid
- CFGViewer() : FunctionPass(&ID) {}
+ CFGViewer() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F) {
F.viewCFG();
@@ -41,13 +41,12 @@ namespace {
}
char CFGViewer::ID = 0;
-static RegisterPass<CFGViewer>
-V0("view-cfg", "View CFG of function", false, true);
+INITIALIZE_PASS(CFGViewer, "view-cfg", "View CFG of function", false, true);
namespace {
struct CFGOnlyViewer : public FunctionPass {
static char ID; // Pass identifcation, replacement for typeid
- CFGOnlyViewer() : FunctionPass(&ID) {}
+ CFGOnlyViewer() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F) {
F.viewCFGOnly();
@@ -63,15 +62,14 @@ namespace {
}
char CFGOnlyViewer::ID = 0;
-static RegisterPass<CFGOnlyViewer>
-V1("view-cfg-only",
- "View CFG of function (with no function bodies)", false, true);
+INITIALIZE_PASS(CFGOnlyViewer, "view-cfg-only",
+ "View CFG of function (with no function bodies)", false, true);
namespace {
struct CFGPrinter : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- CFGPrinter() : FunctionPass(&ID) {}
- explicit CFGPrinter(void *pid) : FunctionPass(pid) {}
+ CFGPrinter() : FunctionPass(ID) {}
+ explicit CFGPrinter(char &pid) : FunctionPass(pid) {}
virtual bool runOnFunction(Function &F) {
std::string Filename = "cfg." + F.getNameStr() + ".dot";
@@ -97,14 +95,14 @@ namespace {
}
char CFGPrinter::ID = 0;
-static RegisterPass<CFGPrinter>
-P1("dot-cfg", "Print CFG of function to 'dot' file", false, true);
+INITIALIZE_PASS(CFGPrinter, "dot-cfg", "Print CFG of function to 'dot' file",
+ false, true);
namespace {
struct CFGOnlyPrinter : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- CFGOnlyPrinter() : FunctionPass(&ID) {}
- explicit CFGOnlyPrinter(void *pid) : FunctionPass(pid) {}
+ CFGOnlyPrinter() : FunctionPass(ID) {}
+ explicit CFGOnlyPrinter(char &pid) : FunctionPass(pid) {}
virtual bool runOnFunction(Function &F) {
std::string Filename = "cfg." + F.getNameStr() + ".dot";
errs() << "Writing '" << Filename << "'...";
@@ -128,9 +126,9 @@ namespace {
}
char CFGOnlyPrinter::ID = 0;
-static RegisterPass<CFGOnlyPrinter>
-P2("dot-cfg-only",
- "Print CFG of function to 'dot' file (with no function bodies)", false, true);
+INITIALIZE_PASS(CFGOnlyPrinter, "dot-cfg-only",
+ "Print CFG of function to 'dot' file (with no function bodies)",
+ false, true);
/// viewCFG - This function is meant for use from the debugger. You can just
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
diff --git a/libclamav/c++/llvm/lib/Analysis/CMakeLists.txt b/libclamav/c++/llvm/lib/Analysis/CMakeLists.txt
index 17c9b86..6a2ab68 100644
--- a/libclamav/c++/llvm/lib/Analysis/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/Analysis/CMakeLists.txt
@@ -21,12 +21,15 @@ add_llvm_library(LLVMAnalysis
LazyValueInfo.cpp
LibCallAliasAnalysis.cpp
LibCallSemantics.cpp
+ Lint.cpp
LiveValues.cpp
+ Loads.cpp
LoopDependenceAnalysis.cpp
LoopInfo.cpp
LoopPass.cpp
MemoryBuiltins.cpp
MemoryDependenceAnalysis.cpp
+ ModuleDebugInfoPrinter.cpp
PHITransAddr.cpp
PointerTracking.cpp
PostDominators.cpp
@@ -35,11 +38,15 @@ add_llvm_library(LLVMAnalysis
ProfileInfoLoader.cpp
ProfileInfoLoaderPass.cpp
ProfileVerifierPass.cpp
+ RegionInfo.cpp
+ RegionPrinter.cpp
ScalarEvolution.cpp
ScalarEvolutionAliasAnalysis.cpp
ScalarEvolutionExpander.cpp
+ ScalarEvolutionNormalization.cpp
SparsePropagation.cpp
Trace.cpp
+ TypeBasedAliasAnalysis.cpp
ValueTracking.cpp
)
diff --git a/libclamav/c++/llvm/lib/Analysis/CaptureTracking.cpp b/libclamav/c++/llvm/lib/Analysis/CaptureTracking.cpp
index 8767c18..90eae20 100644
--- a/libclamav/c++/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/CaptureTracking.cpp
@@ -49,7 +49,7 @@ bool llvm::PointerMayBeCaptured(const Value *V,
SmallSet<Use*, Threshold> Visited;
int Count = 0;
- for (Value::use_const_iterator UI = V->use_begin(), UE = V->use_end();
+ for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
UI != UE; ++UI) {
// If there are lots of uses, conservatively say that the value
// is captured to avoid taking too much compile time.
@@ -69,7 +69,7 @@ bool llvm::PointerMayBeCaptured(const Value *V,
switch (I->getOpcode()) {
case Instruction::Call:
case Instruction::Invoke: {
- CallSite CS = CallSite::get(I);
+ CallSite CS(I);
// Not captured if the callee is readonly, doesn't return a copy through
// its return value and doesn't unwind (a readonly function can leak bits
// by throwing an exception or not depending on the input value).
diff --git a/libclamav/c++/llvm/lib/Analysis/ConstantFolding.cpp b/libclamav/c++/llvm/lib/Analysis/ConstantFolding.cpp
index 114db2d..0bf7967 100644
--- a/libclamav/c++/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/ConstantFolding.cpp
@@ -208,7 +208,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
i != e; ++i, ++GTI) {
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
if (!CI) return false; // Index isn't a simple constant?
- if (CI->getZExtValue() == 0) continue; // Not adding anything.
+ if (CI->isZero()) continue; // Not adding anything.
if (const StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
@@ -401,7 +401,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
APInt ResultVal = APInt(IntType->getBitWidth(), RawBytes[BytesLoaded-1]);
for (unsigned i = 1; i != BytesLoaded; ++i) {
ResultVal <<= 8;
- ResultVal |= APInt(IntType->getBitWidth(), RawBytes[BytesLoaded-1-i]);
+ ResultVal |= RawBytes[BytesLoaded-1-i];
}
return ConstantInt::get(IntType->getContext(), ResultVal);
@@ -436,8 +436,10 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
unsigned StrLen = Str.length();
const Type *Ty = cast<PointerType>(CE->getType())->getElementType();
unsigned NumBits = Ty->getPrimitiveSizeInBits();
- // Replace LI with immediate integer store.
- if ((NumBits >> 3) == StrLen + 1) {
+ // Replace load with immediate integer if the result is an integer or fp
+ // value.
+ if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
+ (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
APInt StrVal(NumBits, 0);
APInt SingleChar(NumBits, 0);
if (TD->isLittleEndian()) {
@@ -454,7 +456,11 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
SingleChar = 0;
StrVal = (StrVal << 8) | SingleChar;
}
- return ConstantInt::get(CE->getContext(), StrVal);
+
+ Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
+ if (Ty->isFloatingPointTy())
+ Res = ConstantExpr::getBitCast(Res, Ty);
+ return Res;
}
}
@@ -564,21 +570,6 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
unsigned BitWidth =
TD->getTypeSizeInBits(TD->getIntPtrType(Ptr->getContext()));
- APInt BasePtr(BitWidth, 0);
- bool BaseIsInt = true;
- if (!Ptr->isNullValue()) {
- // If this is a inttoptr from a constant int, we can fold this as the base,
- // otherwise we can't.
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
- if (CE->getOpcode() == Instruction::IntToPtr)
- if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) {
- BasePtr = Base->getValue();
- BasePtr.zextOrTrunc(BitWidth);
- }
-
- if (BasePtr == 0)
- BaseIsInt = false;
- }
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
@@ -589,9 +580,40 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
APInt Offset = APInt(BitWidth,
TD->getIndexedOffset(Ptr->getType(),
(Value**)Ops+1, NumOps-1));
+ Ptr = cast<Constant>(Ptr->stripPointerCasts());
+
+ // If this is a GEP of a GEP, fold it all into a single GEP.
+ while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
+ SmallVector<Value *, 4> NestedOps(GEP->op_begin()+1, GEP->op_end());
+
+ // Do not try the incorporate the sub-GEP if some index is not a number.
+ bool AllConstantInt = true;
+ for (unsigned i = 0, e = NestedOps.size(); i != e; ++i)
+ if (!isa<ConstantInt>(NestedOps[i])) {
+ AllConstantInt = false;
+ break;
+ }
+ if (!AllConstantInt)
+ break;
+
+ Ptr = cast<Constant>(GEP->getOperand(0));
+ Offset += APInt(BitWidth,
+ TD->getIndexedOffset(Ptr->getType(),
+ (Value**)NestedOps.data(),
+ NestedOps.size()));
+ Ptr = cast<Constant>(Ptr->stripPointerCasts());
+ }
+
// If the base value for this address is a literal integer value, fold the
// getelementptr to the resulting integer value casted to the pointer type.
- if (BaseIsInt) {
+ APInt BasePtr(BitWidth, 0);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
+ if (CE->getOpcode() == Instruction::IntToPtr)
+ if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) {
+ BasePtr = Base->getValue();
+ BasePtr.zextOrTrunc(BitWidth);
+ }
+ if (Ptr->isNullValue() || BasePtr != 0) {
Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr);
return ConstantExpr::getIntToPtr(C, ResultTy);
}
@@ -600,7 +622,6 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
// we eliminate over-indexing of the notional static type array bounds.
// This makes it easy to determine if the getelementptr is "inbounds".
// Also, this helps GlobalOpt do SROA on GlobalVariables.
- Ptr = cast<Constant>(Ptr->stripPointerCasts());
const Type *Ty = Ptr->getType();
SmallVector<Constant*, 32> NewIdxs;
do {
@@ -757,9 +778,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
case Instruction::ICmp:
case Instruction::FCmp: assert(0 && "Invalid for compares");
case Instruction::Call:
- if (Function *F = dyn_cast<Function>(Ops[0]))
+ if (Function *F = dyn_cast<Function>(Ops[NumOps - 1]))
if (canConstantFoldCallTo(F))
- return ConstantFoldCall(F, Ops+1, NumOps-1);
+ return ConstantFoldCall(F, Ops, NumOps - 1);
return 0;
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
@@ -979,6 +1000,8 @@ llvm::canConstantFoldCallTo(const Function *F) {
case Intrinsic::usub_with_overflow:
case Intrinsic::sadd_with_overflow:
case Intrinsic::ssub_with_overflow:
+ case Intrinsic::convert_from_fp16:
+ case Intrinsic::convert_to_fp16:
return true;
default:
return false;
@@ -1059,6 +1082,15 @@ llvm::ConstantFoldCall(Function *F,
const Type *Ty = F->getReturnType();
if (NumOperands == 1) {
if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
+ if (Name == "llvm.convert.to.fp16") {
+ APFloat Val(Op->getValueAPF());
+
+ bool lost = false;
+ Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost);
+
+ return ConstantInt::get(F->getContext(), Val.bitcastToAPInt());
+ }
+
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
/// Currently APFloat versions of these functions do not exist, so we use
@@ -1143,6 +1175,20 @@ llvm::ConstantFoldCall(Function *F,
return ConstantInt::get(Ty, Op->getValue().countTrailingZeros());
else if (Name.startswith("llvm.ctlz"))
return ConstantInt::get(Ty, Op->getValue().countLeadingZeros());
+ else if (Name == "llvm.convert.from.fp16") {
+ APFloat Val(Op->getValue());
+
+ bool lost = false;
+ APFloat::opStatus status =
+ Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
+
+ // Conversion is always precise.
+ status = status;
+ assert(status == APFloat::opOK && !lost &&
+ "Precision lost during fp16 constfolding");
+
+ return ConstantFP::get(F->getContext(), Val);
+ }
return 0;
}
diff --git a/libclamav/c++/llvm/lib/Analysis/DbgInfoPrinter.cpp b/libclamav/c++/llvm/lib/Analysis/DbgInfoPrinter.cpp
index 3532b05..0567750 100644
--- a/libclamav/c++/llvm/lib/Analysis/DbgInfoPrinter.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/DbgInfoPrinter.cpp
@@ -40,7 +40,7 @@ namespace {
void printVariableDeclaration(const Value *V);
public:
static char ID; // Pass identification
- PrintDbgInfo() : FunctionPass(&ID), Out(outs()) {}
+ PrintDbgInfo() : FunctionPass(ID), Out(errs()) {}
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
@@ -48,8 +48,8 @@ namespace {
}
};
char PrintDbgInfo::ID = 0;
- static RegisterPass<PrintDbgInfo> X("print-dbginfo",
- "Print debug info in human readable form");
+ INITIALIZE_PASS(PrintDbgInfo, "print-dbginfo",
+ "Print debug info in human readable form", false, false);
}
FunctionPass *llvm::createDbgInfoPrinterPass() { return new PrintDbgInfo(); }
diff --git a/libclamav/c++/llvm/lib/Analysis/DebugInfo.cpp b/libclamav/c++/llvm/lib/Analysis/DebugInfo.cpp
index 5cfe666..5ca89c6 100644
--- a/libclamav/c++/llvm/lib/Analysis/DebugInfo.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/DebugInfo.cpp
@@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Target/TargetMachine.h" // FIXME: LAYERING VIOLATION!
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Intrinsics.h"
@@ -22,9 +21,10 @@
#include "llvm/Module.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/DebugLoc.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
using namespace llvm::dwarf;
@@ -33,52 +33,22 @@ using namespace llvm::dwarf;
// DIDescriptor
//===----------------------------------------------------------------------===//
-/// ValidDebugInfo - Return true if V represents valid debug info value.
-/// FIXME : Add DIDescriptor.isValid()
-bool DIDescriptor::ValidDebugInfo(MDNode *N, unsigned OptLevel) {
- if (!N)
- return false;
-
- DIDescriptor DI(N);
-
- // Check current version. Allow Version6 for now.
- unsigned Version = DI.getVersion();
- if (Version != LLVMDebugVersion && Version != LLVMDebugVersion6)
- return false;
+DIDescriptor::DIDescriptor(const DIFile F) : DbgNode(F.DbgNode) {
+}
- switch (DI.getTag()) {
- case DW_TAG_variable:
- assert(DIVariable(N).Verify() && "Invalid DebugInfo value");
- break;
- case DW_TAG_compile_unit:
- assert(DICompileUnit(N).Verify() && "Invalid DebugInfo value");
- break;
- case DW_TAG_subprogram:
- assert(DISubprogram(N).Verify() && "Invalid DebugInfo value");
- break;
- case DW_TAG_lexical_block:
- // FIXME: This interfers with the quality of generated code during
- // optimization.
- if (OptLevel != CodeGenOpt::None)
- return false;
- // FALLTHROUGH
- default:
- break;
- }
+DIDescriptor::DIDescriptor(const DISubprogram F) : DbgNode(F.DbgNode) {
+}
- return true;
+DIDescriptor::DIDescriptor(const DILexicalBlock F) : DbgNode(F.DbgNode) {
}
-DIDescriptor::DIDescriptor(MDNode *N, unsigned RequiredTag) {
- DbgNode = N;
+DIDescriptor::DIDescriptor(const DIVariable F) : DbgNode(F.DbgNode) {
+}
- // If this is non-null, check to see if the Tag matches. If not, set to null.
- if (N && getTag() != RequiredTag) {
- DbgNode = 0;
- }
+DIDescriptor::DIDescriptor(const DIType F) : DbgNode(F.DbgNode) {
}
-StringRef
+StringRef
DIDescriptor::getStringField(unsigned Elt) const {
if (DbgNode == 0)
return StringRef();
@@ -105,9 +75,9 @@ DIDescriptor DIDescriptor::getDescriptorField(unsigned Elt) const {
if (DbgNode == 0)
return DIDescriptor();
- if (Elt < DbgNode->getNumOperands() && DbgNode->getOperand(Elt))
- return DIDescriptor(dyn_cast<MDNode>(DbgNode->getOperand(Elt)));
-
+ if (Elt < DbgNode->getNumOperands())
+ return
+ DIDescriptor(dyn_cast_or_null<const MDNode>(DbgNode->getOperand(Elt)));
return DIDescriptor();
}
@@ -120,6 +90,24 @@ GlobalVariable *DIDescriptor::getGlobalVariableField(unsigned Elt) const {
return 0;
}
+Constant *DIDescriptor::getConstantField(unsigned Elt) const {
+ if (DbgNode == 0)
+ return 0;
+
+ if (Elt < DbgNode->getNumOperands())
+ return dyn_cast_or_null<Constant>(DbgNode->getOperand(Elt));
+ return 0;
+}
+
+Function *DIDescriptor::getFunctionField(unsigned Elt) const {
+ if (DbgNode == 0)
+ return 0;
+
+ if (Elt < DbgNode->getNumOperands())
+ return dyn_cast_or_null<Function>(DbgNode->getOperand(Elt));
+ return 0;
+}
+
unsigned DIVariable::getNumAddrElements() const {
return DbgNode->getNumOperands()-6;
}
@@ -132,13 +120,12 @@ unsigned DIVariable::getNumAddrElements() const {
/// isBasicType - Return true if the specified tag is legal for
/// DIBasicType.
bool DIDescriptor::isBasicType() const {
- assert(!isNull() && "Invalid descriptor!");
- return getTag() == dwarf::DW_TAG_base_type;
+ return DbgNode && getTag() == dwarf::DW_TAG_base_type;
}
/// isDerivedType - Return true if the specified tag is legal for DIDerivedType.
bool DIDescriptor::isDerivedType() const {
- assert(!isNull() && "Invalid descriptor!");
+ if (!DbgNode) return false;
switch (getTag()) {
case dwarf::DW_TAG_typedef:
case dwarf::DW_TAG_pointer_type:
@@ -148,6 +135,7 @@ bool DIDescriptor::isDerivedType() const {
case dwarf::DW_TAG_restrict_type:
case dwarf::DW_TAG_member:
case dwarf::DW_TAG_inheritance:
+ case dwarf::DW_TAG_friend:
return true;
default:
// CompositeTypes are currently modelled as DerivedTypes.
@@ -158,7 +146,7 @@ bool DIDescriptor::isDerivedType() const {
/// isCompositeType - Return true if the specified tag is legal for
/// DICompositeType.
bool DIDescriptor::isCompositeType() const {
- assert(!isNull() && "Invalid descriptor!");
+ if (!DbgNode) return false;
switch (getTag()) {
case dwarf::DW_TAG_array_type:
case dwarf::DW_TAG_structure_type:
@@ -175,7 +163,7 @@ bool DIDescriptor::isCompositeType() const {
/// isVariable - Return true if the specified tag is legal for DIVariable.
bool DIDescriptor::isVariable() const {
- assert(!isNull() && "Invalid descriptor!");
+ if (!DbgNode) return false;
switch (getTag()) {
case dwarf::DW_TAG_auto_variable:
case dwarf::DW_TAG_arg_variable:
@@ -194,15 +182,14 @@ bool DIDescriptor::isType() const {
/// isSubprogram - Return true if the specified tag is legal for
/// DISubprogram.
bool DIDescriptor::isSubprogram() const {
- assert(!isNull() && "Invalid descriptor!");
- return getTag() == dwarf::DW_TAG_subprogram;
+ return DbgNode && getTag() == dwarf::DW_TAG_subprogram;
}
/// isGlobalVariable - Return true if the specified tag is legal for
/// DIGlobalVariable.
bool DIDescriptor::isGlobalVariable() const {
- assert(!isNull() && "Invalid descriptor!");
- return getTag() == dwarf::DW_TAG_variable;
+ return DbgNode && (getTag() == dwarf::DW_TAG_variable ||
+ getTag() == dwarf::DW_TAG_constant);
}
/// isGlobal - Return true if the specified tag is legal for DIGlobal.
@@ -213,7 +200,7 @@ bool DIDescriptor::isGlobal() const {
/// isScope - Return true if the specified tag is one of the scope
/// related tag.
bool DIDescriptor::isScope() const {
- assert(!isNull() && "Invalid descriptor!");
+ if (!DbgNode) return false;
switch (getTag()) {
case dwarf::DW_TAG_compile_unit:
case dwarf::DW_TAG_lexical_block:
@@ -228,39 +215,39 @@ bool DIDescriptor::isScope() const {
/// isCompileUnit - Return true if the specified tag is DW_TAG_compile_unit.
bool DIDescriptor::isCompileUnit() const {
- assert(!isNull() && "Invalid descriptor!");
- return getTag() == dwarf::DW_TAG_compile_unit;
+ return DbgNode && getTag() == dwarf::DW_TAG_compile_unit;
+}
+
+/// isFile - Return true if the specified tag is DW_TAG_file_type.
+bool DIDescriptor::isFile() const {
+ return DbgNode && getTag() == dwarf::DW_TAG_file_type;
}
/// isNameSpace - Return true if the specified tag is DW_TAG_namespace.
bool DIDescriptor::isNameSpace() const {
- assert(!isNull() && "Invalid descriptor!");
- return getTag() == dwarf::DW_TAG_namespace;
+ return DbgNode && getTag() == dwarf::DW_TAG_namespace;
}
/// isLexicalBlock - Return true if the specified tag is DW_TAG_lexical_block.
bool DIDescriptor::isLexicalBlock() const {
- assert(!isNull() && "Invalid descriptor!");
- return getTag() == dwarf::DW_TAG_lexical_block;
+ return DbgNode && getTag() == dwarf::DW_TAG_lexical_block;
}
/// isSubrange - Return true if the specified tag is DW_TAG_subrange_type.
bool DIDescriptor::isSubrange() const {
- assert(!isNull() && "Invalid descriptor!");
- return getTag() == dwarf::DW_TAG_subrange_type;
+ return DbgNode && getTag() == dwarf::DW_TAG_subrange_type;
}
/// isEnumerator - Return true if the specified tag is DW_TAG_enumerator.
bool DIDescriptor::isEnumerator() const {
- assert(!isNull() && "Invalid descriptor!");
- return getTag() == dwarf::DW_TAG_enumerator;
+ return DbgNode && getTag() == dwarf::DW_TAG_enumerator;
}
//===----------------------------------------------------------------------===//
// Simple Descriptor Constructors and other Methods
//===----------------------------------------------------------------------===//
-DIType::DIType(MDNode *N) : DIDescriptor(N) {
+DIType::DIType(const MDNode *N) : DIScope(N) {
if (!N) return;
if (!isBasicType() && !isDerivedType() && !isCompositeType()) {
DbgNode = 0;
@@ -268,34 +255,34 @@ DIType::DIType(MDNode *N) : DIDescriptor(N) {
}
unsigned DIArray::getNumElements() const {
- assert(DbgNode && "Invalid DIArray");
+ if (!DbgNode)
+ return 0;
return DbgNode->getNumOperands();
}
/// replaceAllUsesWith - Replace all uses of debug info referenced by
-/// this descriptor. After this completes, the current debug info value
-/// is erased.
-void DIDerivedType::replaceAllUsesWith(DIDescriptor &D) {
- if (isNull())
+/// this descriptor.
+void DIType::replaceAllUsesWith(DIDescriptor &D) {
+ if (!DbgNode)
return;
- assert(!D.isNull() && "Can not replace with null");
-
// Since we use a TrackingVH for the node, its easy for clients to manufacture
// legitimate situations where they want to replaceAllUsesWith() on something
// which, due to uniquing, has merged with the source. We shield clients from
// this detail by allowing a value to be replaced with replaceAllUsesWith()
// itself.
- if (getNode() != D.getNode()) {
- MDNode *Node = DbgNode;
- Node->replaceAllUsesWith(D.getNode());
- Node->destroy();
+ if (DbgNode != D) {
+ MDNode *Node = const_cast<MDNode*>(DbgNode);
+ const MDNode *DN = D;
+ const Value *V = cast_or_null<Value>(DN);
+ Node->replaceAllUsesWith(const_cast<Value*>(V));
+ MDNode::deleteTemporary(Node);
}
}
/// Verify - Verify that a compile unit is well formed.
bool DICompileUnit::Verify() const {
- if (isNull())
+ if (!DbgNode)
return false;
StringRef N = getFilename();
if (N.empty())
@@ -306,36 +293,46 @@ bool DICompileUnit::Verify() const {
/// Verify - Verify that a type descriptor is well formed.
bool DIType::Verify() const {
- if (isNull())
+ if (!DbgNode)
return false;
- if (getContext().isNull())
+ if (!getContext().Verify())
return false;
DICompileUnit CU = getCompileUnit();
- if (!CU.isNull() && !CU.Verify())
+ if (!CU.Verify())
return false;
return true;
}
+/// Verify - Verify that a basic type descriptor is well formed.
+bool DIBasicType::Verify() const {
+ return isBasicType();
+}
+
+/// Verify - Verify that a derived type descriptor is well formed.
+bool DIDerivedType::Verify() const {
+ return isDerivedType();
+}
+
/// Verify - Verify that a composite type descriptor is well formed.
bool DICompositeType::Verify() const {
- if (isNull())
+ if (!DbgNode)
return false;
- if (getContext().isNull())
+ if (!getContext().Verify())
return false;
DICompileUnit CU = getCompileUnit();
- if (!CU.isNull() && !CU.Verify())
+ if (!CU.Verify())
return false;
return true;
}
/// Verify - Verify that a subprogram descriptor is well formed.
bool DISubprogram::Verify() const {
- if (isNull())
+ if (!DbgNode)
return false;
- if (getContext().isNull())
+ if (!getContext().Verify())
return false;
DICompileUnit CU = getCompileUnit();
@@ -343,31 +340,31 @@ bool DISubprogram::Verify() const {
return false;
DICompositeType Ty = getType();
- if (!Ty.isNull() && !Ty.Verify())
+ if (!Ty.Verify())
return false;
return true;
}
/// Verify - Verify that a global variable descriptor is well formed.
bool DIGlobalVariable::Verify() const {
- if (isNull())
+ if (!DbgNode)
return false;
if (getDisplayName().empty())
return false;
- if (getContext().isNull())
+ if (!getContext().Verify())
return false;
DICompileUnit CU = getCompileUnit();
- if (!CU.isNull() && !CU.Verify())
+ if (!CU.Verify())
return false;
DIType Ty = getType();
if (!Ty.Verify())
return false;
- if (!getGlobal())
+ if (!getGlobal() && !getConstant())
return false;
return true;
@@ -375,10 +372,13 @@ bool DIGlobalVariable::Verify() const {
/// Verify - Verify that a variable descriptor is well formed.
bool DIVariable::Verify() const {
- if (isNull())
+ if (!DbgNode)
return false;
- if (getContext().isNull())
+ if (!getContext().Verify())
+ return false;
+
+ if (!getCompileUnit().Verify())
return false;
DIType Ty = getType();
@@ -388,6 +388,25 @@ bool DIVariable::Verify() const {
return true;
}
+/// Verify - Verify that a location descriptor is well formed.
+bool DILocation::Verify() const {
+ if (!DbgNode)
+ return false;
+
+ return DbgNode->getNumOperands() == 4;
+}
+
+/// Verify - Verify that a namespace descriptor is well formed.
+bool DINameSpace::Verify() const {
+ if (!DbgNode)
+ return false;
+ if (getName().empty())
+ return false;
+ if (!getCompileUnit().Verify())
+ return false;
+ return true;
+}
+
/// getOriginalTypeSize - If this type is derived from a base type then
/// return base type size.
uint64_t DIDerivedType::getOriginalTypeSize() const {
@@ -396,23 +415,36 @@ uint64_t DIDerivedType::getOriginalTypeSize() const {
Tag == dwarf::DW_TAG_const_type || Tag == dwarf::DW_TAG_volatile_type ||
Tag == dwarf::DW_TAG_restrict_type) {
DIType BaseType = getTypeDerivedFrom();
- // If this type is not derived from any type then take conservative
+ // If this type is not derived from any type then take conservative
// approach.
- if (BaseType.isNull())
+ if (!BaseType.isValid())
return getSizeInBits();
if (BaseType.isDerivedType())
- return DIDerivedType(BaseType.getNode()).getOriginalTypeSize();
+ return DIDerivedType(BaseType).getOriginalTypeSize();
else
return BaseType.getSizeInBits();
}
-
+
return getSizeInBits();
}
+/// isInlinedFnArgument - Return true if this variable provides debugging
+/// information for an inlined function arguments.
+bool DIVariable::isInlinedFnArgument(const Function *CurFn) {
+ assert(CurFn && "Invalid function");
+ if (!getContext().isSubprogram())
+ return false;
+ // This variable is not inlined function argument if its scope
+ // does not describe current function.
+ return !(DISubprogram(getContext()).describes(CurFn));
+}
+
/// describes - Return true if this subprogram provides debugging
/// information for the function F.
bool DISubprogram::describes(const Function *F) {
assert(F && "Invalid function");
+ if (F == getFunction())
+ return true;
StringRef Name = getLinkageName();
if (Name.empty())
Name = getName();
@@ -421,8 +453,17 @@ bool DISubprogram::describes(const Function *F) {
return false;
}
+unsigned DISubprogram::isOptimized() const {
+ assert (DbgNode && "Invalid subprogram descriptor!");
+ if (DbgNode->getNumOperands() == 16)
+ return getUnsignedField(15);
+ return 0;
+}
+
StringRef DIScope::getFilename() const {
- if (isLexicalBlock())
+ if (!DbgNode)
+ return StringRef();
+ if (isLexicalBlock())
return DILexicalBlock(DbgNode).getFilename();
if (isSubprogram())
return DISubprogram(DbgNode).getFilename();
@@ -430,12 +471,18 @@ StringRef DIScope::getFilename() const {
return DICompileUnit(DbgNode).getFilename();
if (isNameSpace())
return DINameSpace(DbgNode).getFilename();
+ if (isType())
+ return DIType(DbgNode).getFilename();
+ if (isFile())
+ return DIFile(DbgNode).getFilename();
assert(0 && "Invalid DIScope!");
return StringRef();
}
StringRef DIScope::getDirectory() const {
- if (isLexicalBlock())
+ if (!DbgNode)
+ return StringRef();
+ if (isLexicalBlock())
return DILexicalBlock(DbgNode).getDirectory();
if (isSubprogram())
return DISubprogram(DbgNode).getDirectory();
@@ -443,6 +490,10 @@ StringRef DIScope::getDirectory() const {
return DICompileUnit(DbgNode).getDirectory();
if (isNameSpace())
return DINameSpace(DbgNode).getDirectory();
+ if (isType())
+ return DIType(DbgNode).getDirectory();
+ if (isFile())
+ return DIFile(DbgNode).getDirectory();
assert(0 && "Invalid DIScope!");
return StringRef();
}
@@ -452,146 +503,182 @@ StringRef DIScope::getDirectory() const {
//===----------------------------------------------------------------------===//
-/// dump - Print descriptor.
-void DIDescriptor::dump() const {
- dbgs() << "[" << dwarf::TagString(getTag()) << "] ";
- dbgs().write_hex((intptr_t) &*DbgNode) << ']';
+/// print - Print descriptor.
+void DIDescriptor::print(raw_ostream &OS) const {
+ OS << "[" << dwarf::TagString(getTag()) << "] ";
+ OS.write_hex((intptr_t) &*DbgNode) << ']';
}
-/// dump - Print compile unit.
-void DICompileUnit::dump() const {
+/// print - Print compile unit.
+void DICompileUnit::print(raw_ostream &OS) const {
if (getLanguage())
- dbgs() << " [" << dwarf::LanguageString(getLanguage()) << "] ";
+ OS << " [" << dwarf::LanguageString(getLanguage()) << "] ";
- dbgs() << " [" << getDirectory() << "/" << getFilename() << " ]";
+ OS << " [" << getDirectory() << "/" << getFilename() << "]";
}
-/// dump - Print type.
-void DIType::dump() const {
- if (isNull()) return;
+/// print - Print type.
+void DIType::print(raw_ostream &OS) const {
+ if (!DbgNode) return;
StringRef Res = getName();
if (!Res.empty())
- dbgs() << " [" << Res << "] ";
+ OS << " [" << Res << "] ";
unsigned Tag = getTag();
- dbgs() << " [" << dwarf::TagString(Tag) << "] ";
+ OS << " [" << dwarf::TagString(Tag) << "] ";
// TODO : Print context
- getCompileUnit().dump();
- dbgs() << " ["
- << getLineNumber() << ", "
- << getSizeInBits() << ", "
- << getAlignInBits() << ", "
- << getOffsetInBits()
+ getCompileUnit().print(OS);
+ OS << " ["
+ << "line " << getLineNumber() << ", "
+ << getSizeInBits() << " bits, "
+ << getAlignInBits() << " bit alignment, "
+ << getOffsetInBits() << " bit offset"
<< "] ";
if (isPrivate())
- dbgs() << " [private] ";
+ OS << " [private] ";
else if (isProtected())
- dbgs() << " [protected] ";
+ OS << " [protected] ";
if (isForwardDecl())
- dbgs() << " [fwd] ";
+ OS << " [fwd] ";
if (isBasicType())
- DIBasicType(DbgNode).dump();
+ DIBasicType(DbgNode).print(OS);
else if (isDerivedType())
- DIDerivedType(DbgNode).dump();
+ DIDerivedType(DbgNode).print(OS);
else if (isCompositeType())
- DICompositeType(DbgNode).dump();
+ DICompositeType(DbgNode).print(OS);
else {
- dbgs() << "Invalid DIType\n";
+ OS << "Invalid DIType\n";
return;
}
- dbgs() << "\n";
+ OS << "\n";
}
-/// dump - Print basic type.
-void DIBasicType::dump() const {
- dbgs() << " [" << dwarf::AttributeEncodingString(getEncoding()) << "] ";
+/// print - Print basic type.
+void DIBasicType::print(raw_ostream &OS) const {
+ OS << " [" << dwarf::AttributeEncodingString(getEncoding()) << "] ";
}
-/// dump - Print derived type.
-void DIDerivedType::dump() const {
- dbgs() << "\n\t Derived From: "; getTypeDerivedFrom().dump();
+/// print - Print derived type.
+void DIDerivedType::print(raw_ostream &OS) const {
+ OS << "\n\t Derived From: "; getTypeDerivedFrom().print(OS);
}
-/// dump - Print composite type.
-void DICompositeType::dump() const {
+/// print - Print composite type.
+void DICompositeType::print(raw_ostream &OS) const {
DIArray A = getTypeArray();
- if (A.isNull())
- return;
- dbgs() << " [" << A.getNumElements() << " elements]";
+ OS << " [" << A.getNumElements() << " elements]";
}
-/// dump - Print global.
-void DIGlobal::dump() const {
+/// print - Print subprogram.
+void DISubprogram::print(raw_ostream &OS) const {
StringRef Res = getName();
if (!Res.empty())
- dbgs() << " [" << Res << "] ";
+ OS << " [" << Res << "] ";
unsigned Tag = getTag();
- dbgs() << " [" << dwarf::TagString(Tag) << "] ";
+ OS << " [" << dwarf::TagString(Tag) << "] ";
// TODO : Print context
- getCompileUnit().dump();
- dbgs() << " [" << getLineNumber() << "] ";
+ getCompileUnit().print(OS);
+ OS << " [" << getLineNumber() << "] ";
if (isLocalToUnit())
- dbgs() << " [local] ";
+ OS << " [local] ";
if (isDefinition())
- dbgs() << " [def] ";
-
- if (isGlobalVariable())
- DIGlobalVariable(DbgNode).dump();
+ OS << " [def] ";
- dbgs() << "\n";
+ OS << "\n";
}
-/// dump - Print subprogram.
-void DISubprogram::dump() const {
+/// print - Print global variable.
+void DIGlobalVariable::print(raw_ostream &OS) const {
+ OS << " [";
StringRef Res = getName();
if (!Res.empty())
- dbgs() << " [" << Res << "] ";
+ OS << " [" << Res << "] ";
unsigned Tag = getTag();
- dbgs() << " [" << dwarf::TagString(Tag) << "] ";
+ OS << " [" << dwarf::TagString(Tag) << "] ";
// TODO : Print context
- getCompileUnit().dump();
- dbgs() << " [" << getLineNumber() << "] ";
+ getCompileUnit().print(OS);
+ OS << " [" << getLineNumber() << "] ";
if (isLocalToUnit())
- dbgs() << " [local] ";
+ OS << " [local] ";
if (isDefinition())
- dbgs() << " [def] ";
+ OS << " [def] ";
+
+ if (isGlobalVariable())
+ DIGlobalVariable(DbgNode).print(OS);
+ OS << "]\n";
+}
+
+/// print - Print variable.
+void DIVariable::print(raw_ostream &OS) const {
+ StringRef Res = getName();
+ if (!Res.empty())
+ OS << " [" << Res << "] ";
+
+ getCompileUnit().print(OS);
+ OS << " [" << getLineNumber() << "] ";
+ getType().print(OS);
+ OS << "\n";
+
+ // FIXME: Dump complex addresses
+}
+
+/// dump - Print descriptor to dbgs() with a newline.
+void DIDescriptor::dump() const {
+ print(dbgs()); dbgs() << '\n';
+}
+
+/// dump - Print compile unit to dbgs() with a newline.
+void DICompileUnit::dump() const {
+ print(dbgs()); dbgs() << '\n';
+}
+
+/// dump - Print type to dbgs() with a newline.
+void DIType::dump() const {
+ print(dbgs()); dbgs() << '\n';
+}
+
+/// dump - Print basic type to dbgs() with a newline.
+void DIBasicType::dump() const {
+ print(dbgs()); dbgs() << '\n';
+}
+
+/// dump - Print derived type to dbgs() with a newline.
+void DIDerivedType::dump() const {
+ print(dbgs()); dbgs() << '\n';
+}
- dbgs() << "\n";
+/// dump - Print composite type to dbgs() with a newline.
+void DICompositeType::dump() const {
+ print(dbgs()); dbgs() << '\n';
+}
+
+/// dump - Print subprogram to dbgs() with a newline.
+void DISubprogram::dump() const {
+ print(dbgs()); dbgs() << '\n';
}
/// dump - Print global variable.
void DIGlobalVariable::dump() const {
- dbgs() << " [";
- getGlobal()->dump();
- dbgs() << "] ";
+ print(dbgs()); dbgs() << '\n';
}
/// dump - Print variable.
void DIVariable::dump() const {
- StringRef Res = getName();
- if (!Res.empty())
- dbgs() << " [" << Res << "] ";
-
- getCompileUnit().dump();
- dbgs() << " [" << getLineNumber() << "] ";
- getType().dump();
- dbgs() << "\n";
-
- // FIXME: Dump complex addresses
+ print(dbgs()); dbgs() << '\n';
}
//===----------------------------------------------------------------------===//
@@ -620,7 +707,7 @@ DIArray DIFactory::GetOrCreateArray(DIDescriptor *Tys, unsigned NumTys) {
Elts.push_back(llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)));
else
for (unsigned i = 0; i != NumTys; ++i)
- Elts.push_back(Tys[i].getNode());
+ Elts.push_back(Tys[i]);
return DIArray(MDNode::get(VMContext,Elts.data(), Elts.size()));
}
@@ -665,6 +752,20 @@ DICompileUnit DIFactory::CreateCompileUnit(unsigned LangID,
return DICompileUnit(MDNode::get(VMContext, &Elts[0], 10));
}
+/// CreateFile - Create a new descriptor for the specified file.
+DIFile DIFactory::CreateFile(StringRef Filename,
+ StringRef Directory,
+ DICompileUnit CU) {
+ Value *Elts[] = {
+ GetTagConstant(dwarf::DW_TAG_file_type),
+ MDString::get(VMContext, Filename),
+ MDString::get(VMContext, Directory),
+ CU
+ };
+
+ return DIFile(MDNode::get(VMContext, &Elts[0], 4));
+}
+
/// CreateEnumerator - Create a single enumerator value.
DIEnumerator DIFactory::CreateEnumerator(StringRef Name, uint64_t Val){
Value *Elts[] = {
@@ -679,7 +780,7 @@ DIEnumerator DIFactory::CreateEnumerator(StringRef Name, uint64_t Val){
/// CreateBasicType - Create a basic type like int, float, etc.
DIBasicType DIFactory::CreateBasicType(DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNumber,
uint64_t SizeInBits,
uint64_t AlignInBits,
@@ -687,9 +788,9 @@ DIBasicType DIFactory::CreateBasicType(DIDescriptor Context,
unsigned Encoding) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_base_type),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
@@ -704,7 +805,7 @@ DIBasicType DIFactory::CreateBasicType(DIDescriptor Context,
/// CreateBasicType - Create a basic type like int, float, etc.
DIBasicType DIFactory::CreateBasicTypeEx(DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNumber,
Constant *SizeInBits,
Constant *AlignInBits,
@@ -712,9 +813,9 @@ DIBasicType DIFactory::CreateBasicTypeEx(DIDescriptor Context,
unsigned Encoding) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_base_type),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
SizeInBits,
AlignInBits,
@@ -731,7 +832,7 @@ DIType DIFactory::CreateArtificialType(DIType Ty) {
return Ty;
SmallVector<Value *, 9> Elts;
- MDNode *N = Ty.getNode();
+ MDNode *N = Ty;
assert (N && "Unexpected input DIType!");
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
if (Value *V = N->getOperand(i))
@@ -754,7 +855,7 @@ DIType DIFactory::CreateArtificialType(DIType Ty) {
DIDerivedType DIFactory::CreateDerivedType(unsigned Tag,
DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNumber,
uint64_t SizeInBits,
uint64_t AlignInBits,
@@ -763,15 +864,15 @@ DIDerivedType DIFactory::CreateDerivedType(unsigned Tag,
DIType DerivedFrom) {
Value *Elts[] = {
GetTagConstant(Tag),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- DerivedFrom.getNode(),
+ DerivedFrom,
};
return DIDerivedType(MDNode::get(VMContext, &Elts[0], 10));
}
@@ -782,7 +883,7 @@ DIDerivedType DIFactory::CreateDerivedType(unsigned Tag,
DIDerivedType DIFactory::CreateDerivedTypeEx(unsigned Tag,
DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNumber,
Constant *SizeInBits,
Constant *AlignInBits,
@@ -791,15 +892,15 @@ DIDerivedType DIFactory::CreateDerivedTypeEx(unsigned Tag,
DIType DerivedFrom) {
Value *Elts[] = {
GetTagConstant(Tag),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
SizeInBits,
AlignInBits,
OffsetInBits,
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- DerivedFrom.getNode(),
+ DerivedFrom,
};
return DIDerivedType(MDNode::get(VMContext, &Elts[0], 10));
}
@@ -809,7 +910,7 @@ DIDerivedType DIFactory::CreateDerivedTypeEx(unsigned Tag,
DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNumber,
uint64_t SizeInBits,
uint64_t AlignInBits,
@@ -822,20 +923,39 @@ DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
Value *Elts[] = {
GetTagConstant(Tag),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits),
ConstantInt::get(Type::getInt64Ty(VMContext), OffsetInBits),
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- DerivedFrom.getNode(),
- Elements.getNode(),
+ DerivedFrom,
+ Elements,
ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
ContainingType
};
- return DICompositeType(MDNode::get(VMContext, &Elts[0], 13));
+
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], 13);
+ // Create a named metadata so that we do not lose this enum info.
+ if (Tag == dwarf::DW_TAG_enumeration_type) {
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.enum");
+ NMD->addOperand(Node);
+ }
+ return DICompositeType(Node);
+}
+
+
+/// CreateTemporaryType - Create a temporary forward-declared type.
+DIType DIFactory::CreateTemporaryType() {
+ // Give the temporary MDNode a tag. It doesn't matter what tag we
+ // use here as long as DIType accepts it.
+ Value *Elts[] = {
+ GetTagConstant(DW_TAG_base_type)
+ };
+ MDNode *Node = MDNode::getTemporary(VMContext, Elts, array_lengthof(Elts));
+ return DIType(Node);
}
@@ -843,7 +963,7 @@ DICompositeType DIFactory::CreateCompositeType(unsigned Tag,
DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag,
DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNumber,
Constant *SizeInBits,
Constant *AlignInBits,
@@ -851,23 +971,30 @@ DICompositeType DIFactory::CreateCompositeTypeEx(unsigned Tag,
unsigned Flags,
DIType DerivedFrom,
DIArray Elements,
- unsigned RuntimeLang) {
-
+ unsigned RuntimeLang,
+ MDNode *ContainingType) {
Value *Elts[] = {
GetTagConstant(Tag),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),
SizeInBits,
AlignInBits,
OffsetInBits,
ConstantInt::get(Type::getInt32Ty(VMContext), Flags),
- DerivedFrom.getNode(),
- Elements.getNode(),
- ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang)
+ DerivedFrom,
+ Elements,
+ ConstantInt::get(Type::getInt32Ty(VMContext), RuntimeLang),
+ ContainingType
};
- return DICompositeType(MDNode::get(VMContext, &Elts[0], 12));
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], 13);
+ // Create a named metadata so that we do not lose this enum info.
+ if (Tag == dwarf::DW_TAG_enumeration_type) {
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.enum");
+ NMD->addOperand(Node);
+ }
+ return DICompositeType(Node);
}
@@ -878,41 +1005,50 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
StringRef Name,
StringRef DisplayName,
StringRef LinkageName,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNo, DIType Ty,
bool isLocalToUnit,
bool isDefinition,
unsigned VK, unsigned VIndex,
DIType ContainingType,
- bool isArtificial) {
+ bool isArtificial,
+ bool isOptimized,
+ Function *Fn) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_subprogram),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
MDString::get(VMContext, DisplayName),
MDString::get(VMContext, LinkageName),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- Ty.getNode(),
+ Ty,
ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
ConstantInt::get(Type::getInt32Ty(VMContext), (unsigned)VK),
ConstantInt::get(Type::getInt32Ty(VMContext), VIndex),
- ContainingType.getNode(),
- ConstantInt::get(Type::getInt1Ty(VMContext), isArtificial)
+ ContainingType,
+ ConstantInt::get(Type::getInt1Ty(VMContext), isArtificial),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
+ Fn
};
- return DISubprogram(MDNode::get(VMContext, &Elts[0], 15));
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], 17);
+
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+ NMD->addOperand(Node);
+ return DISubprogram(Node);
}
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
-/// given declaration.
-DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration) {
+/// given declaration.
+DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration){
if (SPDeclaration.isDefinition())
- return DISubprogram(SPDeclaration.getNode());
+ return DISubprogram(SPDeclaration);
- MDNode *DeclNode = SPDeclaration.getNode();
+ MDNode *DeclNode = SPDeclaration;
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_subprogram),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
@@ -928,9 +1064,16 @@ DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration)
DeclNode->getOperand(11), // Virtuality
DeclNode->getOperand(12), // VIndex
DeclNode->getOperand(13), // Containting Type
- DeclNode->getOperand(14) // isArtificial
+ DeclNode->getOperand(14), // isArtificial
+ DeclNode->getOperand(15), // isOptimized
+ SPDeclaration.getFunction()
};
- return DISubprogram(MDNode::get(VMContext, &Elts[0], 15));
+ MDNode *Node =MDNode::get(VMContext, &Elts[0], 16);
+
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+ NMD->addOperand(Node);
+ return DISubprogram(Node);
}
/// CreateGlobalVariable - Create a new descriptor for the specified global.
@@ -938,19 +1081,19 @@ DIGlobalVariable
DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
StringRef DisplayName,
StringRef LinkageName,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNo, DIType Ty,bool isLocalToUnit,
bool isDefinition, llvm::GlobalVariable *Val) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_variable),
llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
MDString::get(VMContext, DisplayName),
MDString::get(VMContext, LinkageName),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- Ty.getNode(),
+ Ty,
ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
Val
@@ -966,21 +1109,72 @@ DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
return DIGlobalVariable(Node);
}
+/// CreateGlobalVariable - Create a new descriptor for the specified constant.
+DIGlobalVariable
+DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name,
+ StringRef DisplayName,
+ StringRef LinkageName,
+ DIFile F,
+ unsigned LineNo, DIType Ty,bool isLocalToUnit,
+ bool isDefinition, llvm::Constant *Val) {
+ Value *Elts[] = {
+ GetTagConstant(dwarf::DW_TAG_variable),
+ llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)),
+ Context,
+ MDString::get(VMContext, Name),
+ MDString::get(VMContext, DisplayName),
+ MDString::get(VMContext, LinkageName),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
+ Ty,
+ ConstantInt::get(Type::getInt1Ty(VMContext), isLocalToUnit),
+ ConstantInt::get(Type::getInt1Ty(VMContext), isDefinition),
+ Val
+ };
+
+ Value *const *Vs = &Elts[0];
+ MDNode *Node = MDNode::get(VMContext,Vs, 12);
+
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.gv");
+ NMD->addOperand(Node);
+
+ return DIGlobalVariable(Node);
+}
/// CreateVariable - Create a new descriptor for the specified variable.
DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
StringRef Name,
- DICompileUnit CompileUnit, unsigned LineNo,
- DIType Ty) {
+ DIFile F,
+ unsigned LineNo,
+ DIType Ty, bool AlwaysPreserve) {
Value *Elts[] = {
GetTagConstant(Tag),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- Ty.getNode(),
+ Ty,
};
- return DIVariable(MDNode::get(VMContext, &Elts[0], 6));
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], 6);
+ if (AlwaysPreserve) {
+ // The optimizer may remove local variable. If there is an interest
+ // to preserve variable info in such situation then stash it in a
+ // named mdnode.
+ DISubprogram Fn(getDISubprogram(Context));
+ StringRef FName = "fn";
+ if (Fn.getFunction())
+ FName = Fn.getFunction()->getName();
+ char One = '\1';
+ if (FName.startswith(StringRef(&One, 1)))
+ FName = FName.substr(1);
+
+ SmallString<32> Out;
+ NamedMDNode *FnLocals =
+ M.getOrInsertNamedMetadata(Twine("llvm.dbg.lv.", FName).toStringRef(Out));
+ FnLocals->addOperand(Node);
+ }
+ return DIVariable(Node);
}
@@ -988,17 +1182,17 @@ DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
/// which has a complex address expression for its address.
DIVariable DIFactory::CreateComplexVariable(unsigned Tag, DIDescriptor Context,
const std::string &Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNo,
- DIType Ty,
+ DIType Ty,
SmallVector<Value *, 9> &addr) {
SmallVector<Value *, 9> Elts;
Elts.push_back(GetTagConstant(Tag));
- Elts.push_back(Context.getNode());
+ Elts.push_back(Context);
Elts.push_back(MDString::get(VMContext, Name));
- Elts.push_back(CompileUnit.getNode());
+ Elts.push_back(F);
Elts.push_back(ConstantInt::get(Type::getInt32Ty(VMContext), LineNo));
- Elts.push_back(Ty.getNode());
+ Elts.push_back(Ty);
Elts.insert(Elts.end(), addr.begin(), addr.end());
return DIVariable(MDNode::get(VMContext, &Elts[0], 6+addr.size()));
@@ -1008,26 +1202,31 @@ DIVariable DIFactory::CreateComplexVariable(unsigned Tag, DIDescriptor Context,
/// CreateBlock - This creates a descriptor for a lexical block with the
/// specified parent VMContext.
DILexicalBlock DIFactory::CreateLexicalBlock(DIDescriptor Context,
- unsigned LineNo, unsigned Col) {
+ DIFile F, unsigned LineNo,
+ unsigned Col) {
+ // Defeat MDNode uniqing for lexical blocks.
+ static unsigned int unique_id = 0;
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_lexical_block),
- Context.getNode(),
+ Context,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- ConstantInt::get(Type::getInt32Ty(VMContext), Col)
+ ConstantInt::get(Type::getInt32Ty(VMContext), Col),
+ F,
+ ConstantInt::get(Type::getInt32Ty(VMContext), unique_id++)
};
- return DILexicalBlock(MDNode::get(VMContext, &Elts[0], 4));
+ return DILexicalBlock(MDNode::get(VMContext, &Elts[0], 6));
}
/// CreateNameSpace - This creates new descriptor for a namespace
/// with the specified parent context.
DINameSpace DIFactory::CreateNameSpace(DIDescriptor Context, StringRef Name,
- DICompileUnit CompileUnit,
+ DIFile F,
unsigned LineNo) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_namespace),
- Context.getNode(),
+ Context,
MDString::get(VMContext, Name),
- CompileUnit.getNode(),
+ F,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo)
};
return DINameSpace(MDNode::get(VMContext, &Elts[0], 5));
@@ -1039,20 +1238,8 @@ DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
Value *Elts[] = {
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo),
- S.getNode(),
- OrigLoc.getNode(),
- };
- return DILocation(MDNode::get(VMContext, &Elts[0], 4));
-}
-
-/// CreateLocation - Creates a debug info location.
-DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
- DIScope S, MDNode *OrigLoc) {
- Value *Elts[] = {
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo),
- S.getNode(),
- OrigLoc
+ S,
+ OrigLoc,
};
return DILocation(MDNode::get(VMContext, &Elts[0], 4));
}
@@ -1065,12 +1252,12 @@ DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
Instruction *DIFactory::InsertDeclare(Value *Storage, DIVariable D,
Instruction *InsertBefore) {
assert(Storage && "no storage passed to dbg.declare");
- assert(D.getNode() && "empty DIVariable passed to dbg.declare");
+ assert(D.Verify() && "empty DIVariable passed to dbg.declare");
if (!DeclareFn)
DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
Value *Args[] = { MDNode::get(Storage->getContext(), &Storage, 1),
- D.getNode() };
+ D };
return CallInst::Create(DeclareFn, Args, Args+2, "", InsertBefore);
}
@@ -1078,16 +1265,16 @@ Instruction *DIFactory::InsertDeclare(Value *Storage, DIVariable D,
Instruction *DIFactory::InsertDeclare(Value *Storage, DIVariable D,
BasicBlock *InsertAtEnd) {
assert(Storage && "no storage passed to dbg.declare");
- assert(D.getNode() && "empty DIVariable passed to dbg.declare");
+ assert(D.Verify() && "invalid DIVariable passed to dbg.declare");
if (!DeclareFn)
DeclareFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_declare);
Value *Args[] = { MDNode::get(Storage->getContext(), &Storage, 1),
- D.getNode() };
+ D };
// If this block already has a terminator then insert this intrinsic
// before the terminator.
- if (TerminatorInst *T = InsertAtEnd->getTerminator())
+ if (TerminatorInst *T = InsertAtEnd->getTerminator())
return CallInst::Create(DeclareFn, Args, Args+2, "", T);
else
return CallInst::Create(DeclareFn, Args, Args+2, "", InsertAtEnd);}
@@ -1097,13 +1284,13 @@ Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
DIVariable D,
Instruction *InsertBefore) {
assert(V && "no value passed to dbg.value");
- assert(D.getNode() && "empty DIVariable passed to dbg.value");
+ assert(D.Verify() && "invalid DIVariable passed to dbg.value");
if (!ValueFn)
ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
Value *Args[] = { MDNode::get(V->getContext(), &V, 1),
ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset),
- D.getNode() };
+ D };
return CallInst::Create(ValueFn, Args, Args+3, "", InsertBefore);
}
@@ -1112,13 +1299,13 @@ Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
DIVariable D,
BasicBlock *InsertAtEnd) {
assert(V && "no value passed to dbg.value");
- assert(D.getNode() && "empty DIVariable passed to dbg.value");
+ assert(D.Verify() && "invalid DIVariable passed to dbg.value");
if (!ValueFn)
ValueFn = Intrinsic::getDeclaration(&M, Intrinsic::dbg_value);
- Value *Args[] = { MDNode::get(V->getContext(), &V, 1),
+ Value *Args[] = { MDNode::get(V->getContext(), &V, 1),
ConstantInt::get(Type::getInt64Ty(V->getContext()), Offset),
- D.getNode() };
+ D };
return CallInst::Create(ValueFn, Args, Args+3, "", InsertAtEnd);
}
@@ -1128,42 +1315,56 @@ Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, uint64_t Offset,
/// processModule - Process entire module and collect debug info.
void DebugInfoFinder::processModule(Module &M) {
- unsigned MDDbgKind = M.getMDKindID("dbg");
-
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
for (Function::iterator FI = (*I).begin(), FE = (*I).end(); FI != FE; ++FI)
for (BasicBlock::iterator BI = (*FI).begin(), BE = (*FI).end(); BI != BE;
++BI) {
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI))
processDeclare(DDI);
- else if (MDNode *L = BI->getMetadata(MDDbgKind))
- processLocation(DILocation(L));
- }
- NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv");
- if (!NMD)
- return;
+ DebugLoc Loc = BI->getDebugLoc();
+ if (Loc.isUnknown())
+ continue;
- for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
- DIGlobalVariable DIG(cast<MDNode>(NMD->getOperand(i)));
- if (addGlobalVariable(DIG)) {
- addCompileUnit(DIG.getCompileUnit());
- processType(DIG.getType());
+ LLVMContext &Ctx = BI->getContext();
+ DIDescriptor Scope(Loc.getScope(Ctx));
+
+ if (Scope.isCompileUnit())
+ addCompileUnit(DICompileUnit(Scope));
+ else if (Scope.isSubprogram())
+ processSubprogram(DISubprogram(Scope));
+ else if (Scope.isLexicalBlock())
+ processLexicalBlock(DILexicalBlock(Scope));
+
+ if (MDNode *IA = Loc.getInlinedAt(Ctx))
+ processLocation(DILocation(IA));
+ }
+
+ if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv")) {
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+ DIGlobalVariable DIG(cast<MDNode>(NMD->getOperand(i)));
+ if (addGlobalVariable(DIG)) {
+ addCompileUnit(DIG.getCompileUnit());
+ processType(DIG.getType());
+ }
}
}
+
+ if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.sp"))
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ processSubprogram(DISubprogram(NMD->getOperand(i)));
}
/// processLocation - Process DILocation.
void DebugInfoFinder::processLocation(DILocation Loc) {
- if (Loc.isNull()) return;
- DIScope S(Loc.getScope().getNode());
- if (S.isNull()) return;
+ if (!Loc.Verify()) return;
+ DIDescriptor S(Loc.getScope());
if (S.isCompileUnit())
- addCompileUnit(DICompileUnit(S.getNode()));
+ addCompileUnit(DICompileUnit(S));
else if (S.isSubprogram())
- processSubprogram(DISubprogram(S.getNode()));
+ processSubprogram(DISubprogram(S));
else if (S.isLexicalBlock())
- processLexicalBlock(DILexicalBlock(S.getNode()));
+ processLexicalBlock(DILexicalBlock(S));
processLocation(Loc.getOrigLocation());
}
@@ -1174,40 +1375,33 @@ void DebugInfoFinder::processType(DIType DT) {
addCompileUnit(DT.getCompileUnit());
if (DT.isCompositeType()) {
- DICompositeType DCT(DT.getNode());
+ DICompositeType DCT(DT);
processType(DCT.getTypeDerivedFrom());
DIArray DA = DCT.getTypeArray();
- if (!DA.isNull())
- for (unsigned i = 0, e = DA.getNumElements(); i != e; ++i) {
- DIDescriptor D = DA.getElement(i);
- DIType TyE = DIType(D.getNode());
- if (!TyE.isNull())
- processType(TyE);
- else
- processSubprogram(DISubprogram(D.getNode()));
- }
+ for (unsigned i = 0, e = DA.getNumElements(); i != e; ++i) {
+ DIDescriptor D = DA.getElement(i);
+ if (D.isType())
+ processType(DIType(D));
+ else if (D.isSubprogram())
+ processSubprogram(DISubprogram(D));
+ }
} else if (DT.isDerivedType()) {
- DIDerivedType DDT(DT.getNode());
- if (!DDT.isNull())
- processType(DDT.getTypeDerivedFrom());
+ DIDerivedType DDT(DT);
+ processType(DDT.getTypeDerivedFrom());
}
}
/// processLexicalBlock
void DebugInfoFinder::processLexicalBlock(DILexicalBlock LB) {
- if (LB.isNull())
- return;
DIScope Context = LB.getContext();
if (Context.isLexicalBlock())
- return processLexicalBlock(DILexicalBlock(Context.getNode()));
+ return processLexicalBlock(DILexicalBlock(Context));
else
- return processSubprogram(DISubprogram(Context.getNode()));
+ return processSubprogram(DISubprogram(Context));
}
/// processSubprogram - Process DISubprogram.
void DebugInfoFinder::processSubprogram(DISubprogram SP) {
- if (SP.isNull())
- return;
if (!addSubprogram(SP))
return;
addCompileUnit(SP.getCompileUnit());
@@ -1216,62 +1410,65 @@ void DebugInfoFinder::processSubprogram(DISubprogram SP) {
/// processDeclare - Process DbgDeclareInst.
void DebugInfoFinder::processDeclare(DbgDeclareInst *DDI) {
- DIVariable DV(cast<MDNode>(DDI->getVariable()));
- if (DV.isNull())
+ MDNode *N = dyn_cast<MDNode>(DDI->getVariable());
+ if (!N) return;
+
+ DIDescriptor DV(N);
+ if (!DV.isVariable())
return;
- if (!NodesSeen.insert(DV.getNode()))
+ if (!NodesSeen.insert(DV))
return;
- addCompileUnit(DV.getCompileUnit());
- processType(DV.getType());
+ addCompileUnit(DIVariable(N).getCompileUnit());
+ processType(DIVariable(N).getType());
}
/// addType - Add type into Tys.
bool DebugInfoFinder::addType(DIType DT) {
- if (DT.isNull())
+ if (!DT.isValid())
return false;
- if (!NodesSeen.insert(DT.getNode()))
+ if (!NodesSeen.insert(DT))
return false;
- TYs.push_back(DT.getNode());
+ TYs.push_back(DT);
return true;
}
/// addCompileUnit - Add compile unit into CUs.
bool DebugInfoFinder::addCompileUnit(DICompileUnit CU) {
- if (CU.isNull())
+ if (!CU.Verify())
return false;
- if (!NodesSeen.insert(CU.getNode()))
+ if (!NodesSeen.insert(CU))
return false;
- CUs.push_back(CU.getNode());
+ CUs.push_back(CU);
return true;
}
/// addGlobalVariable - Add global variable into GVs.
bool DebugInfoFinder::addGlobalVariable(DIGlobalVariable DIG) {
- if (DIG.isNull())
+ if (!DIDescriptor(DIG).isGlobalVariable())
return false;
- if (!NodesSeen.insert(DIG.getNode()))
+ if (!NodesSeen.insert(DIG))
return false;
- GVs.push_back(DIG.getNode());
+ GVs.push_back(DIG);
return true;
}
// addSubprogram - Add subprgoram into SPs.
bool DebugInfoFinder::addSubprogram(DISubprogram SP) {
- if (SP.isNull())
+ if (!DIDescriptor(SP).isSubprogram())
return false;
- if (!NodesSeen.insert(SP.getNode()))
+ if (!NodesSeen.insert(SP))
return false;
- SPs.push_back(SP.getNode());
+ SPs.push_back(SP);
return true;
}
@@ -1283,11 +1480,11 @@ static Value *findDbgGlobalDeclare(GlobalVariable *V) {
return 0;
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
- DIGlobalVariable DIG(cast_or_null<MDNode>(NMD->getOperand(i)));
- if (DIG.isNull())
+ DIDescriptor DIG(cast<MDNode>(NMD->getOperand(i)));
+ if (!DIG.isGlobalVariable())
continue;
- if (DIG.getGlobal() == V)
- return DIG.getNode();
+ if (DIGlobalVariable(DIG).getGlobal() == V)
+ return DIG;
}
return 0;
}
@@ -1296,16 +1493,16 @@ static Value *findDbgGlobalDeclare(GlobalVariable *V) {
/// It looks through pointer casts too.
static const DbgDeclareInst *findDbgDeclare(const Value *V) {
V = V->stripPointerCasts();
-
+
if (!isa<Instruction>(V) && !isa<Argument>(V))
return 0;
-
+
const Function *F = NULL;
if (const Instruction *I = dyn_cast<Instruction>(V))
F = I->getParent()->getParent();
else if (const Argument *A = dyn_cast<Argument>(V))
F = A->getParent();
-
+
for (Function::const_iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
for (BasicBlock::const_iterator BI = (*FI).begin(), BE = (*FI).end();
BI != BE; ++BI)
@@ -1358,51 +1555,25 @@ bool llvm::getLocationInfo(const Value *V, std::string &DisplayName,
return true;
}
-/// ExtractDebugLocation - Extract debug location information
-/// from DILocation.
-DebugLoc llvm::ExtractDebugLocation(DILocation &Loc,
- DebugLocTracker &DebugLocInfo) {
- DenseMap<MDNode *, unsigned>::iterator II
- = DebugLocInfo.DebugIdMap.find(Loc.getNode());
- if (II != DebugLocInfo.DebugIdMap.end())
- return DebugLoc::get(II->second);
-
- // Add a new location entry.
- unsigned Id = DebugLocInfo.DebugLocations.size();
- DebugLocInfo.DebugLocations.push_back(Loc.getNode());
- DebugLocInfo.DebugIdMap[Loc.getNode()] = Id;
-
- return DebugLoc::get(Id);
-}
-
/// getDISubprogram - Find subprogram that is enclosing this scope.
-DISubprogram llvm::getDISubprogram(MDNode *Scope) {
+DISubprogram llvm::getDISubprogram(const MDNode *Scope) {
DIDescriptor D(Scope);
- if (D.isNull())
- return DISubprogram();
-
- if (D.isCompileUnit())
- return DISubprogram();
-
if (D.isSubprogram())
return DISubprogram(Scope);
-
+
if (D.isLexicalBlock())
- return getDISubprogram(DILexicalBlock(Scope).getContext().getNode());
-
+ return getDISubprogram(DILexicalBlock(Scope).getContext());
+
return DISubprogram();
}
/// getDICompositeType - Find underlying composite type.
DICompositeType llvm::getDICompositeType(DIType T) {
- if (T.isNull())
- return DICompositeType();
-
if (T.isCompositeType())
- return DICompositeType(T.getNode());
-
+ return DICompositeType(T);
+
if (T.isDerivedType())
- return getDICompositeType(DIDerivedType(T.getNode()).getTypeDerivedFrom());
-
+ return getDICompositeType(DIDerivedType(T).getTypeDerivedFrom());
+
return DICompositeType();
}
diff --git a/libclamav/c++/llvm/lib/Analysis/DomPrinter.cpp b/libclamav/c++/llvm/lib/Analysis/DomPrinter.cpp
index 3af687a..9f34094 100644
--- a/libclamav/c++/llvm/lib/Analysis/DomPrinter.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/DomPrinter.cpp
@@ -43,10 +43,10 @@ struct DOTGraphTraits<DomTreeNode*> : public DefaultDOTGraphTraits {
if (isSimple())
return DOTGraphTraits<const Function*>
- ::getSimpleNodeLabel(BB, BB->getParent());
+ ::getSimpleNodeLabel(BB, BB->getParent());
else
return DOTGraphTraits<const Function*>
- ::getCompleteNodeLabel(BB, BB->getParent());
+ ::getCompleteNodeLabel(BB, BB->getParent());
}
};
@@ -83,127 +83,103 @@ struct DOTGraphTraits<PostDominatorTree*>
}
namespace {
-template <class Analysis, bool OnlyBBS>
-struct GenericGraphViewer : public FunctionPass {
- std::string Name;
-
- GenericGraphViewer(std::string GraphName, const void *ID) : FunctionPass(ID) {
- Name = GraphName;
- }
-
- virtual bool runOnFunction(Function &F) {
- Analysis *Graph;
- std::string Title, GraphName;
- Graph = &getAnalysis<Analysis>();
- GraphName = DOTGraphTraits<Analysis*>::getGraphName(Graph);
- Title = GraphName + " for '" + F.getNameStr() + "' function";
- ViewGraph(Graph, Name, OnlyBBS, Title);
-
- return false;
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequired<Analysis>();
- }
-};
-
struct DomViewer
: public DOTGraphTraitsViewer<DominatorTree, false> {
static char ID;
- DomViewer() : DOTGraphTraitsViewer<DominatorTree, false>("dom", &ID){}
+ DomViewer() : DOTGraphTraitsViewer<DominatorTree, false>("dom", ID){}
};
struct DomOnlyViewer
: public DOTGraphTraitsViewer<DominatorTree, true> {
static char ID;
- DomOnlyViewer() : DOTGraphTraitsViewer<DominatorTree, true>("domonly", &ID){}
+ DomOnlyViewer() : DOTGraphTraitsViewer<DominatorTree, true>("domonly", ID){}
};
struct PostDomViewer
: public DOTGraphTraitsViewer<PostDominatorTree, false> {
static char ID;
PostDomViewer() :
- DOTGraphTraitsViewer<PostDominatorTree, false>("postdom", &ID){}
+ DOTGraphTraitsViewer<PostDominatorTree, false>("postdom", ID){}
};
struct PostDomOnlyViewer
: public DOTGraphTraitsViewer<PostDominatorTree, true> {
static char ID;
PostDomOnlyViewer() :
- DOTGraphTraitsViewer<PostDominatorTree, true>("postdomonly", &ID){}
+ DOTGraphTraitsViewer<PostDominatorTree, true>("postdomonly", ID){}
};
} // end anonymous namespace
char DomViewer::ID = 0;
-RegisterPass<DomViewer> A("view-dom",
- "View dominance tree of function");
+INITIALIZE_PASS(DomViewer, "view-dom",
+ "View dominance tree of function", false, false);
char DomOnlyViewer::ID = 0;
-RegisterPass<DomOnlyViewer> B("view-dom-only",
- "View dominance tree of function "
- "(with no function bodies)");
+INITIALIZE_PASS(DomOnlyViewer, "view-dom-only",
+ "View dominance tree of function (with no function bodies)",
+ false, false);
char PostDomViewer::ID = 0;
-RegisterPass<PostDomViewer> C("view-postdom",
- "View postdominance tree of function");
+INITIALIZE_PASS(PostDomViewer, "view-postdom",
+ "View postdominance tree of function", false, false);
char PostDomOnlyViewer::ID = 0;
-RegisterPass<PostDomOnlyViewer> D("view-postdom-only",
- "View postdominance tree of function "
- "(with no function bodies)");
+INITIALIZE_PASS(PostDomOnlyViewer, "view-postdom-only",
+ "View postdominance tree of function "
+ "(with no function bodies)",
+ false, false);
namespace {
struct DomPrinter
: public DOTGraphTraitsPrinter<DominatorTree, false> {
static char ID;
- DomPrinter() : DOTGraphTraitsPrinter<DominatorTree, false>("dom", &ID) {}
+ DomPrinter() : DOTGraphTraitsPrinter<DominatorTree, false>("dom", ID) {}
};
struct DomOnlyPrinter
: public DOTGraphTraitsPrinter<DominatorTree, true> {
static char ID;
- DomOnlyPrinter() : DOTGraphTraitsPrinter<DominatorTree, true>("domonly", &ID) {}
+ DomOnlyPrinter() : DOTGraphTraitsPrinter<DominatorTree, true>("domonly", ID) {}
};
struct PostDomPrinter
: public DOTGraphTraitsPrinter<PostDominatorTree, false> {
static char ID;
PostDomPrinter() :
- DOTGraphTraitsPrinter<PostDominatorTree, false>("postdom", &ID) {}
+ DOTGraphTraitsPrinter<PostDominatorTree, false>("postdom", ID) {}
};
struct PostDomOnlyPrinter
: public DOTGraphTraitsPrinter<PostDominatorTree, true> {
static char ID;
PostDomOnlyPrinter() :
- DOTGraphTraitsPrinter<PostDominatorTree, true>("postdomonly", &ID) {}
+ DOTGraphTraitsPrinter<PostDominatorTree, true>("postdomonly", ID) {}
};
} // end anonymous namespace
char DomPrinter::ID = 0;
-RegisterPass<DomPrinter> E("dot-dom",
- "Print dominance tree of function "
- "to 'dot' file");
+INITIALIZE_PASS(DomPrinter, "dot-dom",
+ "Print dominance tree of function to 'dot' file",
+ false, false);
char DomOnlyPrinter::ID = 0;
-RegisterPass<DomOnlyPrinter> F("dot-dom-only",
- "Print dominance tree of function "
- "to 'dot' file "
- "(with no function bodies)");
+INITIALIZE_PASS(DomOnlyPrinter, "dot-dom-only",
+ "Print dominance tree of function to 'dot' file "
+ "(with no function bodies)",
+ false, false);
char PostDomPrinter::ID = 0;
-RegisterPass<PostDomPrinter> G("dot-postdom",
- "Print postdominance tree of function "
- "to 'dot' file");
+INITIALIZE_PASS(PostDomPrinter, "dot-postdom",
+ "Print postdominance tree of function to 'dot' file",
+ false, false);
char PostDomOnlyPrinter::ID = 0;
-RegisterPass<PostDomOnlyPrinter> H("dot-postdom-only",
- "Print postdominance tree of function "
- "to 'dot' file "
- "(with no function bodies)");
+INITIALIZE_PASS(PostDomOnlyPrinter, "dot-postdom-only",
+ "Print postdominance tree of function to 'dot' file "
+ "(with no function bodies)",
+ false, false);
// Create methods available outside of this file, to use them
// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
diff --git a/libclamav/c++/llvm/lib/Analysis/IPA/CallGraph.cpp b/libclamav/c++/llvm/lib/Analysis/IPA/CallGraph.cpp
index 8c43aa1..b363528 100644
--- a/libclamav/c++/llvm/lib/Analysis/IPA/CallGraph.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/IPA/CallGraph.cpp
@@ -42,7 +42,7 @@ class BasicCallGraph : public ModulePass, public CallGraph {
public:
static char ID; // Class identification, replacement for typeinfo
- BasicCallGraph() : ModulePass(&ID), Root(0),
+ BasicCallGraph() : ModulePass(ID), Root(0),
ExternalCallingNode(0), CallsExternalNode(0) {}
// runOnModule - Compute the call graph for the specified module.
@@ -86,8 +86,8 @@ public:
/// an analysis interface through multiple inheritance. If needed, it should
/// override this to adjust the this pointer as needed for the specified pass
/// info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&CallGraph::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &CallGraph::ID)
return (CallGraph*)this;
return this;
}
@@ -126,13 +126,15 @@ private:
}
// Loop over all of the users of the function, looking for non-call uses.
- for (Value::use_iterator I = F->use_begin(), E = F->use_end(); I != E; ++I)
- if ((!isa<CallInst>(I) && !isa<InvokeInst>(I))
- || !CallSite(cast<Instruction>(I)).isCallee(I)) {
+ for (Value::use_iterator I = F->use_begin(), E = F->use_end(); I != E; ++I){
+ User *U = *I;
+ if ((!isa<CallInst>(U) && !isa<InvokeInst>(U))
+ || !CallSite(cast<Instruction>(U)).isCallee(I)) {
// Not a call, or being used as a parameter rather than as the callee.
ExternalCallingNode->addCalledFunction(CallSite(), Node);
break;
}
+ }
// If this function is not defined in this translation unit, it could call
// anything.
@@ -143,8 +145,8 @@ private:
for (Function::iterator BB = F->begin(), BBE = F->end(); BB != BBE; ++BB)
for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
II != IE; ++II) {
- CallSite CS = CallSite::get(II);
- if (CS.getInstruction() && !isa<DbgInfoIntrinsic>(II)) {
+ CallSite CS(cast<Value>(II));
+ if (CS && !isa<DbgInfoIntrinsic>(II)) {
const Function *Callee = CS.getCalledFunction();
if (Callee)
Node->addCalledFunction(CS, getOrInsertFunction(Callee));
@@ -158,8 +160,11 @@ private:
// destroy - Release memory for the call graph
virtual void destroy() {
/// CallsExternalNode is not in the function map, delete it explicitly.
- delete CallsExternalNode;
- CallsExternalNode = 0;
+ if (CallsExternalNode) {
+ CallsExternalNode->allReferencesDropped();
+ delete CallsExternalNode;
+ CallsExternalNode = 0;
+ }
CallGraph::destroy();
}
};
@@ -167,9 +172,8 @@ private:
} //End anonymous namespace
static RegisterAnalysisGroup<CallGraph> X("Call Graph");
-static RegisterPass<BasicCallGraph>
-Y("basiccg", "Basic CallGraph Construction", false, true);
-static RegisterAnalysisGroup<CallGraph, true> Z(Y);
+INITIALIZE_AG_PASS(BasicCallGraph, CallGraph, "basiccg",
+ "Basic CallGraph Construction", false, true, true);
char CallGraph::ID = 0;
char BasicCallGraph::ID = 0;
@@ -181,6 +185,14 @@ void CallGraph::initialize(Module &M) {
void CallGraph::destroy() {
if (FunctionMap.empty()) return;
+ // Reset all node's use counts to zero before deleting them to prevent an
+ // assertion from firing.
+#ifndef NDEBUG
+ for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
+ I != E; ++I)
+ I->second->allReferencesDropped();
+#endif
+
for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
I != E; ++I)
delete I->second;
@@ -233,14 +245,16 @@ void CallGraphNode::print(raw_ostream &OS) const {
else
OS << "Call graph node <<null function>>";
- OS << "<<0x" << this << ">> #uses=" << getNumReferences() << '\n';
+ OS << "<<" << this << ">> #uses=" << getNumReferences() << '\n';
- for (const_iterator I = begin(), E = end(); I != E; ++I)
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ OS << " CS<" << I->first << "> calls ";
if (Function *FI = I->second->getFunction())
- OS << " Calls function '" << FI->getName() <<"'\n";
- else
- OS << " Calls external node\n";
- OS << "\n";
+ OS << "function '" << FI->getName() <<"'\n";
+ else
+ OS << "external node\n";
+ }
+ OS << '\n';
}
void CallGraphNode::dump() const { print(dbgs()); }
diff --git a/libclamav/c++/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp b/libclamav/c++/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
index 0e333d1..b7a27cb 100644
--- a/libclamav/c++/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/IPA/CallGraphSCCPass.cpp
@@ -17,15 +17,23 @@
#define DEBUG_TYPE "cgscc-passmgr"
#include "llvm/CallGraphSCCPass.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Function.h"
+#include "llvm/PassManagers.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/ADT/SCCIterator.h"
-#include "llvm/PassManagers.h"
-#include "llvm/Function.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
-#include "llvm/IntrinsicInst.h"
+#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+static cl::opt<unsigned>
+MaxIterations("max-cg-scc-iterations", cl::ReallyHidden, cl::init(4));
+
+STATISTIC(MaxSCCIterations, "Maximum CGSCCPassMgr iterations on one SCC");
+
//===----------------------------------------------------------------------===//
// CGPassManager
//
@@ -37,7 +45,7 @@ class CGPassManager : public ModulePass, public PMDataManager {
public:
static char ID;
explicit CGPassManager(int Depth)
- : ModulePass(&ID), PMDataManager(Depth) { }
+ : ModulePass(ID), PMDataManager(Depth) { }
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
@@ -80,9 +88,13 @@ public:
}
private:
- bool RunPassOnSCC(Pass *P, std::vector<CallGraphNode*> &CurSCC,
- CallGraph &CG, bool &CallGraphUpToDate);
- void RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC, CallGraph &CG,
+ bool RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
+ bool &DevirtualizedCall);
+
+ bool RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
+ CallGraph &CG, bool &CallGraphUpToDate,
+ bool &DevirtualizedCall);
+ bool RefreshCallGraph(CallGraphSCC &CurSCC, CallGraph &CG,
bool IsCheckingMode);
};
@@ -90,21 +102,24 @@ private:
char CGPassManager::ID = 0;
-bool CGPassManager::RunPassOnSCC(Pass *P, std::vector<CallGraphNode*> &CurSCC,
- CallGraph &CG, bool &CallGraphUpToDate) {
+
+bool CGPassManager::RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
+ CallGraph &CG, bool &CallGraphUpToDate,
+ bool &DevirtualizedCall) {
bool Changed = false;
PMDataManager *PM = P->getAsPMDataManager();
if (PM == 0) {
CallGraphSCCPass *CGSP = (CallGraphSCCPass*)P;
if (!CallGraphUpToDate) {
- RefreshCallGraph(CurSCC, CG, false);
+ DevirtualizedCall |= RefreshCallGraph(CurSCC, CG, false);
CallGraphUpToDate = true;
}
- Timer *T = StartPassTimer(CGSP);
- Changed = CGSP->runOnSCC(CurSCC);
- StopPassTimer(CGSP, T);
+ {
+ TimeRegion PassTimer(getPassTimer(CGSP));
+ Changed = CGSP->runOnSCC(CurSCC);
+ }
// After the CGSCCPass is done, when assertions are enabled, use
// RefreshCallGraph to verify that the callgraph was correctly updated.
@@ -122,12 +137,12 @@ bool CGPassManager::RunPassOnSCC(Pass *P, std::vector<CallGraphNode*> &CurSCC,
FPPassManager *FPP = (FPPassManager*)P;
// Run pass P on all functions in the current SCC.
- for (unsigned i = 0, e = CurSCC.size(); i != e; ++i) {
- if (Function *F = CurSCC[i]->getFunction()) {
+ for (CallGraphSCC::iterator I = CurSCC.begin(), E = CurSCC.end();
+ I != E; ++I) {
+ if (Function *F = (*I)->getFunction()) {
dumpPassInfo(P, EXECUTION_MSG, ON_FUNCTION_MSG, F->getName());
- Timer *T = StartPassTimer(FPP);
+ TimeRegion PassTimer(getPassTimer(FPP));
Changed |= FPP->runOnFunction(*F);
- StopPassTimer(FPP, T);
}
}
@@ -147,26 +162,39 @@ bool CGPassManager::RunPassOnSCC(Pass *P, std::vector<CallGraphNode*> &CurSCC,
/// FunctionPasses have potentially munged the callgraph, and can be used after
/// CallGraphSCC passes to verify that they correctly updated the callgraph.
///
-void CGPassManager::RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC,
+/// This function returns true if it devirtualized an existing function call,
+/// meaning it turned an indirect call into a direct call. This happens when
+/// a function pass like GVN optimizes away stuff feeding the indirect call.
+/// This never happens in checking mode.
+///
+bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
CallGraph &CG, bool CheckingMode) {
DenseMap<Value*, CallGraphNode*> CallSites;
DEBUG(dbgs() << "CGSCCPASSMGR: Refreshing SCC with " << CurSCC.size()
<< " nodes:\n";
- for (unsigned i = 0, e = CurSCC.size(); i != e; ++i)
- CurSCC[i]->dump();
+ for (CallGraphSCC::iterator I = CurSCC.begin(), E = CurSCC.end();
+ I != E; ++I)
+ (*I)->dump();
);
bool MadeChange = false;
+ bool DevirtualizedCall = false;
// Scan all functions in the SCC.
- for (unsigned sccidx = 0, e = CurSCC.size(); sccidx != e; ++sccidx) {
- CallGraphNode *CGN = CurSCC[sccidx];
+ unsigned FunctionNo = 0;
+ for (CallGraphSCC::iterator SCCIdx = CurSCC.begin(), E = CurSCC.end();
+ SCCIdx != E; ++SCCIdx, ++FunctionNo) {
+ CallGraphNode *CGN = *SCCIdx;
Function *F = CGN->getFunction();
if (F == 0 || F->isDeclaration()) continue;
// Walk the function body looking for call sites. Sync up the call sites in
// CGN with those actually in the function.
+
+ // Keep track of the number of direct and indirect calls that were
+ // invalidated and removed.
+ unsigned NumDirectRemoved = 0, NumIndirectRemoved = 0;
// Get the set of call sites currently in the function.
for (CallGraphNode::iterator I = CGN->begin(), E = CGN->end(); I != E; ) {
@@ -181,10 +209,16 @@ void CGPassManager::RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC,
// If the call edge is not from a call or invoke, then the function
// pass RAUW'd a call with another value. This can happen when
// constant folding happens of well known functions etc.
- CallSite::get(I->first).getInstruction() == 0) {
+ !CallSite(I->first)) {
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
+ // If this was an indirect call site, count it.
+ if (I->second->getFunction() == 0)
+ ++NumIndirectRemoved;
+ else
+ ++NumDirectRemoved;
+
// Just remove the edge from the set of callees, keep track of whether
// I points to the last element of the vector.
bool WasLast = I + 1 == E;
@@ -206,10 +240,13 @@ void CGPassManager::RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC,
}
// Loop over all of the instructions in the function, getting the callsites.
+ // Keep track of the number of direct/indirect calls added.
+ unsigned NumDirectAdded = 0, NumIndirectAdded = 0;
+
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- CallSite CS = CallSite::get(I);
- if (!CS.getInstruction() || isa<DbgInfoIntrinsic>(I)) continue;
+ CallSite CS(cast<Value>(I));
+ if (!CS || isa<DbgInfoIntrinsic>(I)) continue;
// If this call site already existed in the callgraph, just verify it
// matches up to expectations and remove it from CallSites.
@@ -240,19 +277,21 @@ void CGPassManager::RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC,
// If not, we either went from a direct call to indirect, indirect to
// direct, or direct to different direct.
CallGraphNode *CalleeNode;
- if (Function *Callee = CS.getCalledFunction())
+ if (Function *Callee = CS.getCalledFunction()) {
CalleeNode = CG.getOrInsertFunction(Callee);
- else
+ // Keep track of whether we turned an indirect call into a direct
+ // one.
+ if (ExistingNode->getFunction() == 0) {
+ DevirtualizedCall = true;
+ DEBUG(dbgs() << " CGSCCPASSMGR: Devirtualized call to '"
+ << Callee->getName() << "'\n");
+ }
+ } else {
CalleeNode = CG.getCallsExternalNode();
+ }
// Update the edge target in CGN.
- for (CallGraphNode::iterator I = CGN->begin(); ; ++I) {
- assert(I != CGN->end() && "Didn't find call entry");
- if (I->first == CS.getInstruction()) {
- I->second = CalleeNode;
- break;
- }
- }
+ CGN->replaceCallEdge(CS, CS, CalleeNode);
MadeChange = true;
continue;
}
@@ -260,19 +299,34 @@ void CGPassManager::RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC,
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
- // If the call site didn't exist in the CGN yet, add it. We assume that
- // newly introduced call sites won't be indirect. This could be fixed
- // in the future.
+ // If the call site didn't exist in the CGN yet, add it.
CallGraphNode *CalleeNode;
- if (Function *Callee = CS.getCalledFunction())
+ if (Function *Callee = CS.getCalledFunction()) {
CalleeNode = CG.getOrInsertFunction(Callee);
- else
+ ++NumDirectAdded;
+ } else {
CalleeNode = CG.getCallsExternalNode();
+ ++NumIndirectAdded;
+ }
CGN->addCalledFunction(CS, CalleeNode);
MadeChange = true;
}
+ // We scanned the old callgraph node, removing invalidated call sites and
+ // then added back newly found call sites. One thing that can happen is
+ // that an old indirect call site was deleted and replaced with a new direct
+ // call. In this case, we have devirtualized a call, and CGSCCPM would like
+ // to iteratively optimize the new code. Unfortunately, we don't really
+ // have a great way to detect when this happens. As an approximation, we
+ // just look at whether the number of indirect calls is reduced and the
+ // number of direct calls is increased. There are tons of ways to fool this
+ // (e.g. DCE'ing an indirect call and duplicating an unrelated block with a
+ // direct call) but this is close enough.
+ if (NumIndirectRemoved > NumIndirectAdded &&
+ NumDirectRemoved < NumDirectAdded)
+ DevirtualizedCall = true;
+
// After scanning this function, if we still have entries in callsites, then
// they are dangling pointers. WeakVH should save us for this, so abort if
// this happens.
@@ -280,18 +334,85 @@ void CGPassManager::RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC,
// Periodically do an explicit clear to remove tombstones when processing
// large scc's.
- if ((sccidx & 15) == 0)
+ if ((FunctionNo & 15) == 15)
CallSites.clear();
}
DEBUG(if (MadeChange) {
dbgs() << "CGSCCPASSMGR: Refreshed SCC is now:\n";
- for (unsigned i = 0, e = CurSCC.size(); i != e; ++i)
- CurSCC[i]->dump();
+ for (CallGraphSCC::iterator I = CurSCC.begin(), E = CurSCC.end();
+ I != E; ++I)
+ (*I)->dump();
+ if (DevirtualizedCall)
+ dbgs() << "CGSCCPASSMGR: Refresh devirtualized a call!\n";
+
} else {
dbgs() << "CGSCCPASSMGR: SCC Refresh didn't change call graph.\n";
}
);
+
+ return DevirtualizedCall;
+}
+
+/// RunAllPassesOnSCC - Execute the body of the entire pass manager on the
+/// specified SCC. This keeps track of whether a function pass devirtualizes
+/// any calls and returns it in DevirtualizedCall.
+bool CGPassManager::RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
+ bool &DevirtualizedCall) {
+ bool Changed = false;
+
+ // CallGraphUpToDate - Keep track of whether the callgraph is known to be
+ // up-to-date or not. The CGSSC pass manager runs two types of passes:
+ // CallGraphSCC Passes and other random function passes. Because other
+ // random function passes are not CallGraph aware, they may clobber the
+ // call graph by introducing new calls or deleting other ones. This flag
+ // is set to false when we run a function pass so that we know to clean up
+ // the callgraph when we need to run a CGSCCPass again.
+ bool CallGraphUpToDate = true;
+
+ // Run all passes on current SCC.
+ for (unsigned PassNo = 0, e = getNumContainedPasses();
+ PassNo != e; ++PassNo) {
+ Pass *P = getContainedPass(PassNo);
+
+ // If we're in -debug-pass=Executions mode, construct the SCC node list,
+ // otherwise avoid constructing this string as it is expensive.
+ if (isPassDebuggingExecutionsOrMore()) {
+ std::string Functions;
+ #ifndef NDEBUG
+ raw_string_ostream OS(Functions);
+ for (CallGraphSCC::iterator I = CurSCC.begin(), E = CurSCC.end();
+ I != E; ++I) {
+ if (I != CurSCC.begin()) OS << ", ";
+ (*I)->print(OS);
+ }
+ OS.flush();
+ #endif
+ dumpPassInfo(P, EXECUTION_MSG, ON_CG_MSG, Functions);
+ }
+ dumpRequiredSet(P);
+
+ initializeAnalysisImpl(P);
+
+ // Actually run this pass on the current SCC.
+ Changed |= RunPassOnSCC(P, CurSCC, CG,
+ CallGraphUpToDate, DevirtualizedCall);
+
+ if (Changed)
+ dumpPassInfo(P, MODIFICATION_MSG, ON_CG_MSG, "");
+ dumpPreservedSet(P);
+
+ verifyPreservedAnalysis(P);
+ removeNotPreservedAnalysis(P);
+ recordAvailableAnalysis(P);
+ removeDeadPasses(P, "", ON_CG_MSG);
+ }
+
+ // If the callgraph was left out of date (because the last pass run was a
+ // functionpass), refresh it before we move on to the next SCC.
+ if (!CallGraphUpToDate)
+ DevirtualizedCall |= RefreshCallGraph(CurSCC, CG, false);
+ return Changed;
}
/// run - Execute all of the passes scheduled for execution. Keep track of
@@ -299,72 +420,53 @@ void CGPassManager::RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC,
bool CGPassManager::runOnModule(Module &M) {
CallGraph &CG = getAnalysis<CallGraph>();
bool Changed = doInitialization(CG);
-
- std::vector<CallGraphNode*> CurSCC;
// Walk the callgraph in bottom-up SCC order.
- for (scc_iterator<CallGraph*> CGI = scc_begin(&CG), E = scc_end(&CG);
- CGI != E;) {
+ scc_iterator<CallGraph*> CGI = scc_begin(&CG);
+
+ CallGraphSCC CurSCC(&CGI);
+ while (!CGI.isAtEnd()) {
// Copy the current SCC and increment past it so that the pass can hack
// on the SCC if it wants to without invalidating our iterator.
- CurSCC = *CGI;
+ std::vector<CallGraphNode*> &NodeVec = *CGI;
+ CurSCC.initialize(&NodeVec[0], &NodeVec[0]+NodeVec.size());
++CGI;
+ // At the top level, we run all the passes in this pass manager on the
+ // functions in this SCC. However, we support iterative compilation in the
+ // case where a function pass devirtualizes a call to a function. For
+ // example, it is very common for a function pass (often GVN or instcombine)
+ // to eliminate the addressing that feeds into a call. With that improved
+ // information, we would like the call to be an inline candidate, infer
+ // mod-ref information etc.
+ //
+ // Because of this, we allow iteration up to a specified iteration count.
+ // This only happens in the case of a devirtualized call, so we only burn
+ // compile time in the case that we're making progress. We also have a hard
+ // iteration count limit in case there is crazy code.
+ unsigned Iteration = 0;
+ bool DevirtualizedCall = false;
+ do {
+ DEBUG(if (Iteration)
+ dbgs() << " SCCPASSMGR: Re-visiting SCC, iteration #"
+ << Iteration << '\n');
+ DevirtualizedCall = false;
+ Changed |= RunAllPassesOnSCC(CurSCC, CG, DevirtualizedCall);
+ } while (Iteration++ < MaxIterations && DevirtualizedCall);
- // CallGraphUpToDate - Keep track of whether the callgraph is known to be
- // up-to-date or not. The CGSSC pass manager runs two types of passes:
- // CallGraphSCC Passes and other random function passes. Because other
- // random function passes are not CallGraph aware, they may clobber the
- // call graph by introducing new calls or deleting other ones. This flag
- // is set to false when we run a function pass so that we know to clean up
- // the callgraph when we need to run a CGSCCPass again.
- bool CallGraphUpToDate = true;
+ if (DevirtualizedCall)
+ DEBUG(dbgs() << " CGSCCPASSMGR: Stopped iteration after " << Iteration
+ << " times, due to -max-cg-scc-iterations\n");
- // Run all passes on current SCC.
- for (unsigned PassNo = 0, e = getNumContainedPasses();
- PassNo != e; ++PassNo) {
- Pass *P = getContainedPass(PassNo);
-
- // If we're in -debug-pass=Executions mode, construct the SCC node list,
- // otherwise avoid constructing this string as it is expensive.
- if (isPassDebuggingExecutionsOrMore()) {
- std::string Functions;
-#ifndef NDEBUG
- raw_string_ostream OS(Functions);
- for (unsigned i = 0, e = CurSCC.size(); i != e; ++i) {
- if (i) OS << ", ";
- CurSCC[i]->print(OS);
- }
- OS.flush();
-#endif
- dumpPassInfo(P, EXECUTION_MSG, ON_CG_MSG, Functions);
- }
- dumpRequiredSet(P);
-
- initializeAnalysisImpl(P);
-
- // Actually run this pass on the current SCC.
- Changed |= RunPassOnSCC(P, CurSCC, CG, CallGraphUpToDate);
-
- if (Changed)
- dumpPassInfo(P, MODIFICATION_MSG, ON_CG_MSG, "");
- dumpPreservedSet(P);
-
- verifyPreservedAnalysis(P);
- removeNotPreservedAnalysis(P);
- recordAvailableAnalysis(P);
- removeDeadPasses(P, "", ON_CG_MSG);
- }
+ if (Iteration > MaxSCCIterations)
+ MaxSCCIterations = Iteration;
- // If the callgraph was left out of date (because the last pass run was a
- // functionpass), refresh it before we move on to the next SCC.
- if (!CallGraphUpToDate)
- RefreshCallGraph(CurSCC, CG, false);
}
Changed |= doFinalization(CG);
return Changed;
}
+
/// Initialize CG
bool CGPassManager::doInitialization(CallGraph &CG) {
bool Changed = false;
@@ -395,6 +497,32 @@ bool CGPassManager::doFinalization(CallGraph &CG) {
return Changed;
}
+//===----------------------------------------------------------------------===//
+// CallGraphSCC Implementation
+//===----------------------------------------------------------------------===//
+
+/// ReplaceNode - This informs the SCC and the pass manager that the specified
+/// Old node has been deleted, and New is to be used in its place.
+void CallGraphSCC::ReplaceNode(CallGraphNode *Old, CallGraphNode *New) {
+ assert(Old != New && "Should not replace node with self");
+ for (unsigned i = 0; ; ++i) {
+ assert(i != Nodes.size() && "Node not in SCC");
+ if (Nodes[i] != Old) continue;
+ Nodes[i] = New;
+ break;
+ }
+
+ // Update the active scc_iterator so that it doesn't contain dangling
+ // pointers to the old CallGraphNode.
+ scc_iterator<CallGraph*> *CGI = (scc_iterator<CallGraph*>*)Context;
+ CGI->ReplaceNode(Old, New);
+}
+
+
+//===----------------------------------------------------------------------===//
+// CallGraphSCCPass Implementation
+//===----------------------------------------------------------------------===//
+
/// Assign pass manager to manage this pass.
void CallGraphSCCPass::assignPassManager(PMStack &PMS,
PassManagerType PreferredType) {
@@ -439,3 +567,43 @@ void CallGraphSCCPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<CallGraph>();
AU.addPreserved<CallGraph>();
}
+
+
+//===----------------------------------------------------------------------===//
+// PrintCallGraphPass Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// PrintCallGraphPass - Print a Module corresponding to a call graph.
+ ///
+ class PrintCallGraphPass : public CallGraphSCCPass {
+ std::string Banner;
+ raw_ostream &Out; // raw_ostream to print on.
+
+ public:
+ static char ID;
+ PrintCallGraphPass() : CallGraphSCCPass(ID), Out(dbgs()) {}
+ PrintCallGraphPass(const std::string &B, raw_ostream &o)
+ : CallGraphSCCPass(ID), Banner(B), Out(o) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+
+ bool runOnSCC(CallGraphSCC &SCC) {
+ Out << Banner;
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
+ (*I)->getFunction()->print(Out);
+ return false;
+ }
+ };
+
+} // end anonymous namespace.
+
+char PrintCallGraphPass::ID = 0;
+
+Pass *CallGraphSCCPass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+ return new PrintCallGraphPass(Banner, O);
+}
+
diff --git a/libclamav/c++/llvm/lib/Analysis/IPA/FindUsedTypes.cpp b/libclamav/c++/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
index c4fb0b9..8eed9d6 100644
--- a/libclamav/c++/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/IPA/FindUsedTypes.cpp
@@ -23,8 +23,8 @@
using namespace llvm;
char FindUsedTypes::ID = 0;
-static RegisterPass<FindUsedTypes>
-X("print-used-types", "Find Used Types", false, true);
+INITIALIZE_PASS(FindUsedTypes, "print-used-types",
+ "Find Used Types", false, true);
// IncorporateType - Incorporate one type and all of its subtypes into the
// collection of used types.
diff --git a/libclamav/c++/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/libclamav/c++/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index 7b43089..6759b0a 100644
--- a/libclamav/c++/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -47,14 +47,15 @@ namespace {
/// GlobalInfo - Maintain mod/ref info for all of the globals without
/// addresses taken that are read or written (transitively) by this
/// function.
- std::map<GlobalValue*, unsigned> GlobalInfo;
+ std::map<const GlobalValue*, unsigned> GlobalInfo;
/// MayReadAnyGlobal - May read global variables, but it is not known which.
bool MayReadAnyGlobal;
- unsigned getInfoForGlobal(GlobalValue *GV) const {
+ unsigned getInfoForGlobal(const GlobalValue *GV) const {
unsigned Effect = MayReadAnyGlobal ? AliasAnalysis::Ref : 0;
- std::map<GlobalValue*, unsigned>::const_iterator I = GlobalInfo.find(GV);
+ std::map<const GlobalValue*, unsigned>::const_iterator I =
+ GlobalInfo.find(GV);
if (I != GlobalInfo.end())
Effect |= I->second;
return Effect;
@@ -71,23 +72,23 @@ namespace {
class GlobalsModRef : public ModulePass, public AliasAnalysis {
/// NonAddressTakenGlobals - The globals that do not have their addresses
/// taken.
- std::set<GlobalValue*> NonAddressTakenGlobals;
+ std::set<const GlobalValue*> NonAddressTakenGlobals;
/// IndirectGlobals - The memory pointed to by this global is known to be
/// 'owned' by the global.
- std::set<GlobalValue*> IndirectGlobals;
+ std::set<const GlobalValue*> IndirectGlobals;
/// AllocsForIndirectGlobals - If an instruction allocates memory for an
/// indirect global, this map indicates which one.
- std::map<Value*, GlobalValue*> AllocsForIndirectGlobals;
+ std::map<const Value*, const GlobalValue*> AllocsForIndirectGlobals;
/// FunctionInfo - For each function, keep track of what globals are
/// modified or read.
- std::map<Function*, FunctionRecord> FunctionInfo;
+ std::map<const Function*, FunctionRecord> FunctionInfo;
public:
static char ID;
- GlobalsModRef() : ModulePass(&ID) {}
+ GlobalsModRef() : ModulePass(ID) {}
bool runOnModule(Module &M) {
InitializeAliasAnalysis(this); // set up super class
@@ -107,39 +108,39 @@ namespace {
//
AliasResult alias(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size);
- ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
- ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
- return AliasAnalysis::getModRefInfo(CS1,CS2);
+ ModRefResult getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size);
+ ModRefResult getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
+ return AliasAnalysis::getModRefInfo(CS1, CS2);
}
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
- ModRefBehavior getModRefBehavior(Function *F,
- std::vector<PointerAccessInfo> *Info) {
+ ModRefBehavior getModRefBehavior(const Function *F) {
if (FunctionRecord *FR = getFunctionInfo(F)) {
if (FR->FunctionEffect == 0)
return DoesNotAccessMemory;
else if ((FR->FunctionEffect & Mod) == 0)
return OnlyReadsMemory;
}
- return AliasAnalysis::getModRefBehavior(F, Info);
+ return AliasAnalysis::getModRefBehavior(F);
}
/// getModRefBehavior - Return the behavior of the specified function if
/// called from the specified call site. The call site may be null in which
/// case the most generic behavior of this function should be returned.
- ModRefBehavior getModRefBehavior(CallSite CS,
- std::vector<PointerAccessInfo> *Info) {
- Function* F = CS.getCalledFunction();
- if (!F) return AliasAnalysis::getModRefBehavior(CS, Info);
+ ModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ const Function* F = CS.getCalledFunction();
+ if (!F) return AliasAnalysis::getModRefBehavior(CS);
if (FunctionRecord *FR = getFunctionInfo(F)) {
if (FR->FunctionEffect == 0)
return DoesNotAccessMemory;
else if ((FR->FunctionEffect & Mod) == 0)
return OnlyReadsMemory;
}
- return AliasAnalysis::getModRefBehavior(CS, Info);
+ return AliasAnalysis::getModRefBehavior(CS);
}
virtual void deleteValue(Value *V);
@@ -149,8 +150,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -158,8 +159,9 @@ namespace {
private:
/// getFunctionInfo - Return the function info for the function, or null if
/// we don't have anything useful to say about it.
- FunctionRecord *getFunctionInfo(Function *F) {
- std::map<Function*, FunctionRecord>::iterator I = FunctionInfo.find(F);
+ FunctionRecord *getFunctionInfo(const Function *F) {
+ std::map<const Function*, FunctionRecord>::iterator I =
+ FunctionInfo.find(F);
if (I != FunctionInfo.end())
return &I->second;
return 0;
@@ -175,9 +177,9 @@ namespace {
}
char GlobalsModRef::ID = 0;
-static RegisterPass<GlobalsModRef>
-X("globalsmodref-aa", "Simple mod/ref analysis for globals", false, true);
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(GlobalsModRef, AliasAnalysis,
+ "globalsmodref-aa", "Simple mod/ref analysis for globals",
+ false, true, false);
Pass *llvm::createGlobalsModRefPass() { return new GlobalsModRef(); }
@@ -233,33 +235,34 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,
GlobalValue *OkayStoreDest) {
if (!V->getType()->isPointerTy()) return true;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ for (Value::use_iterator UI = V->use_begin(), E=V->use_end(); UI != E; ++UI) {
+ User *U = *UI;
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
Readers.push_back(LI->getParent()->getParent());
- } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (V == SI->getOperand(1)) {
Writers.push_back(SI->getParent()->getParent());
} else if (SI->getOperand(1) != OkayStoreDest) {
return true; // Storing the pointer
}
- } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
+ } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
if (AnalyzeUsesOfPointer(GEP, Readers, Writers)) return true;
- } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
+ } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest))
return true;
- } else if (isFreeCall(*UI)) {
- Writers.push_back(cast<Instruction>(*UI)->getParent()->getParent());
- } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
+ } else if (isFreeCall(U)) {
+ Writers.push_back(cast<Instruction>(U)->getParent()->getParent());
+ } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
// Make sure that this is just the function being called, not that it is
// passing into the function.
- for (unsigned i = 1, e = CI->getNumOperands(); i != e; ++i)
- if (CI->getOperand(i) == V) return true;
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
+ for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
+ if (CI->getArgOperand(i) == V) return true;
+ } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
// Make sure that this is just the function being called, not that it is
// passing into the function.
- for (unsigned i = 3, e = II->getNumOperands(); i != e; ++i)
- if (II->getOperand(i) == V) return true;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) {
+ for (unsigned i = 0, e = II->getNumArgOperands(); i != e; ++i)
+ if (II->getArgOperand(i) == V) return true;
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
if (CE->getOpcode() == Instruction::GetElementPtr ||
CE->getOpcode() == Instruction::BitCast) {
if (AnalyzeUsesOfPointer(CE, Readers, Writers))
@@ -267,12 +270,14 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,
} else {
return true;
}
- } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(*UI)) {
+ } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
return true; // Allow comparison against null.
} else {
return true;
}
+ }
+
return false;
}
@@ -291,7 +296,8 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
// Walk the user list of the global. If we find anything other than a direct
// load or store, bail out.
for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
- if (LoadInst *LI = dyn_cast<LoadInst>(*I)) {
+ User *U = *I;
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
// The pointer loaded from the global can only be used in simple ways:
// we allow addressing of it and loading storing to it. We do *not* allow
// storing the loaded pointer somewhere else or passing to a function.
@@ -299,7 +305,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
if (AnalyzeUsesOfPointer(LI, ReadersWriters, ReadersWriters))
return false; // Loaded pointer escapes.
// TODO: Could try some IP mod/ref of the loaded pointer.
- } else if (StoreInst *SI = dyn_cast<StoreInst>(*I)) {
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
// Storing the global itself.
if (SI->getOperand(0) == GV) return false;
@@ -405,7 +411,7 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
FunctionEffect |= CalleeFR->FunctionEffect;
// Incorporate callee's effects on globals into our info.
- for (std::map<GlobalValue*, unsigned>::iterator GI =
+ for (std::map<const GlobalValue*, unsigned>::iterator GI =
CalleeFR->GlobalInfo.begin(), E = CalleeFR->GlobalInfo.end();
GI != E; ++GI)
FR.GlobalInfo[GI->first] |= GI->second;
@@ -473,13 +479,13 @@ AliasAnalysis::AliasResult
GlobalsModRef::alias(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size) {
// Get the base object these pointers point to.
- Value *UV1 = const_cast<Value*>(V1->getUnderlyingObject());
- Value *UV2 = const_cast<Value*>(V2->getUnderlyingObject());
+ const Value *UV1 = V1->getUnderlyingObject();
+ const Value *UV2 = V2->getUnderlyingObject();
// If either of the underlying values is a global, they may be non-addr-taken
// globals, which we can answer queries about.
- GlobalValue *GV1 = dyn_cast<GlobalValue>(UV1);
- GlobalValue *GV2 = dyn_cast<GlobalValue>(UV2);
+ const GlobalValue *GV1 = dyn_cast<GlobalValue>(UV1);
+ const GlobalValue *GV2 = dyn_cast<GlobalValue>(UV2);
if (GV1 || GV2) {
// If the global's address is taken, pretend we don't know it's a pointer to
// the global.
@@ -499,12 +505,12 @@ GlobalsModRef::alias(const Value *V1, unsigned V1Size,
// so, we may be able to handle this. First check to see if the base pointer
// is a direct load from an indirect global.
GV1 = GV2 = 0;
- if (LoadInst *LI = dyn_cast<LoadInst>(UV1))
+ if (const LoadInst *LI = dyn_cast<LoadInst>(UV1))
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
if (IndirectGlobals.count(GV))
GV1 = GV;
- if (LoadInst *LI = dyn_cast<LoadInst>(UV2))
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
+ if (const LoadInst *LI = dyn_cast<LoadInst>(UV2))
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getOperand(0)))
if (IndirectGlobals.count(GV))
GV2 = GV;
@@ -526,16 +532,17 @@ GlobalsModRef::alias(const Value *V1, unsigned V1Size,
}
AliasAnalysis::ModRefResult
-GlobalsModRef::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+GlobalsModRef::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
unsigned Known = ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
- if (GlobalValue *GV = dyn_cast<GlobalValue>(P->getUnderlyingObject()))
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(P->getUnderlyingObject()))
if (GV->hasLocalLinkage())
- if (Function *F = CS.getCalledFunction())
+ if (const Function *F = CS.getCalledFunction())
if (NonAddressTakenGlobals.count(GV))
- if (FunctionRecord *FR = getFunctionInfo(F))
+ if (const FunctionRecord *FR = getFunctionInfo(F))
Known = FR->getInfoForGlobal(GV);
if (Known == NoModRef)
@@ -554,7 +561,7 @@ void GlobalsModRef::deleteValue(Value *V) {
// any AllocRelatedValues for it.
if (IndirectGlobals.erase(GV)) {
// Remove any entries in AllocsForIndirectGlobals for this global.
- for (std::map<Value*, GlobalValue*>::iterator
+ for (std::map<const Value*, const GlobalValue*>::iterator
I = AllocsForIndirectGlobals.begin(),
E = AllocsForIndirectGlobals.end(); I != E; ) {
if (I->second == GV) {
diff --git a/libclamav/c++/llvm/lib/Analysis/IVUsers.cpp b/libclamav/c++/llvm/lib/Analysis/IVUsers.cpp
index 98a436f..cdf667a 100644
--- a/libclamav/c++/llvm/lib/Analysis/IVUsers.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/IVUsers.cpp
@@ -21,7 +21,6 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Assembly/AsmAnnotationWriter.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -29,153 +28,44 @@
using namespace llvm;
char IVUsers::ID = 0;
-static RegisterPass<IVUsers>
-X("iv-users", "Induction Variable Users", false, true);
+INITIALIZE_PASS(IVUsers, "iv-users", "Induction Variable Users", false, true);
Pass *llvm::createIVUsersPass() {
return new IVUsers();
}
-/// CollectSubexprs - Split S into subexpressions which can be pulled out into
-/// separate registers.
-static void CollectSubexprs(const SCEV *S,
- SmallVectorImpl<const SCEV *> &Ops,
- ScalarEvolution &SE) {
- if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
- // Break out add operands.
- for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
- I != E; ++I)
- CollectSubexprs(*I, Ops, SE);
- return;
- } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
- // Split a non-zero base out of an addrec.
- if (!AR->getStart()->isZero()) {
- CollectSubexprs(AR->getStart(), Ops, SE);
- CollectSubexprs(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()),
- AR->getStepRecurrence(SE),
- AR->getLoop()), Ops, SE);
- return;
- }
+/// isInteresting - Test whether the given expression is "interesting" when
+/// used by the given expression, within the context of analyzing the
+/// given loop.
+static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
+ ScalarEvolution *SE) {
+ // An addrec is interesting if it's affine or if it has an interesting start.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ // Keep things simple. Don't touch loop-variant strides.
+ if (AR->getLoop() == L)
+ return AR->isAffine() || !L->contains(I);
+ // Otherwise recurse to see if the start value is interesting, and that
+ // the step value is not interesting, since we don't yet know how to
+ // do effective SCEV expansions for addrecs with interesting steps.
+ return isInteresting(AR->getStart(), I, L, SE) &&
+ !isInteresting(AR->getStepRecurrence(*SE), I, L, SE);
}
- // Otherwise use the value itself.
- Ops.push_back(S);
-}
-
-/// getSCEVStartAndStride - Compute the start and stride of this expression,
-/// returning false if the expression is not a start/stride pair, or true if it
-/// is. The stride must be a loop invariant expression, but the start may be
-/// a mix of loop invariant and loop variant expressions. The start cannot,
-/// however, contain an AddRec from a different loop, unless that loop is an
-/// outer loop of the current loop.
-static bool getSCEVStartAndStride(const SCEV *&SH, Loop *L, Loop *UseLoop,
- const SCEV *&Start, const SCEV *&Stride,
- ScalarEvolution *SE, DominatorTree *DT) {
- const SCEV *TheAddRec = Start; // Initialize to zero.
-
- // If the outer level is an AddExpr, the operands are all start values except
- // for a nested AddRecExpr.
- if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(SH)) {
- for (unsigned i = 0, e = AE->getNumOperands(); i != e; ++i)
- if (const SCEVAddRecExpr *AddRec =
- dyn_cast<SCEVAddRecExpr>(AE->getOperand(i)))
- TheAddRec = SE->getAddExpr(AddRec, TheAddRec);
- else
- Start = SE->getAddExpr(Start, AE->getOperand(i));
- } else if (isa<SCEVAddRecExpr>(SH)) {
- TheAddRec = SH;
- } else {
- return false; // not analyzable.
- }
-
- // Break down TheAddRec into its component parts.
- SmallVector<const SCEV *, 4> Subexprs;
- CollectSubexprs(TheAddRec, Subexprs, *SE);
-
- // Look for an addrec on the current loop among the parts.
- const SCEV *AddRecStride = 0;
- for (SmallVectorImpl<const SCEV *>::iterator I = Subexprs.begin(),
- E = Subexprs.end(); I != E; ++I) {
- const SCEV *S = *I;
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
- if (AR->getLoop() == L) {
- *I = AR->getStart();
- AddRecStride = AR->getStepRecurrence(*SE);
- break;
+ // An add is interesting if exactly one of its operands is interesting.
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ bool AnyInterestingYet = false;
+ for (SCEVAddExpr::op_iterator OI = Add->op_begin(), OE = Add->op_end();
+ OI != OE; ++OI)
+ if (isInteresting(*OI, I, L, SE)) {
+ if (AnyInterestingYet)
+ return false;
+ AnyInterestingYet = true;
}
- }
- if (!AddRecStride)
- return false;
-
- // Add up everything else into a start value (which may not be
- // loop-invariant).
- const SCEV *AddRecStart = SE->getAddExpr(Subexprs);
-
- // Use getSCEVAtScope to attempt to simplify other loops out of
- // the picture.
- AddRecStart = SE->getSCEVAtScope(AddRecStart, UseLoop);
-
- Start = SE->getAddExpr(Start, AddRecStart);
-
- // If stride is an instruction, make sure it properly dominates the header.
- // Otherwise we could end up with a use before def situation.
- if (!isa<SCEVConstant>(AddRecStride)) {
- BasicBlock *Header = L->getHeader();
- if (!AddRecStride->properlyDominates(Header, DT))
- return false;
-
- DEBUG(dbgs() << "[";
- WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false);
- dbgs() << "] Variable stride: " << *AddRecStride << "\n");
+ return AnyInterestingYet;
}
- Stride = AddRecStride;
- return true;
-}
-
-/// IVUseShouldUsePostIncValue - We have discovered a "User" of an IV expression
-/// and now we need to decide whether the user should use the preinc or post-inc
-/// value. If this user should use the post-inc version of the IV, return true.
-///
-/// Choosing wrong here can break dominance properties (if we choose to use the
-/// post-inc value when we cannot) or it can end up adding extra live-ranges to
-/// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
-/// should use the post-inc value).
-static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
- Loop *L, DominatorTree *DT) {
- // If the user is in the loop, use the preinc value.
- if (L->contains(User)) return false;
-
- BasicBlock *LatchBlock = L->getLoopLatch();
- if (!LatchBlock)
- return false;
-
- // Ok, the user is outside of the loop. If it is dominated by the latch
- // block, use the post-inc value.
- if (DT->dominates(LatchBlock, User->getParent()))
- return true;
-
- // There is one case we have to be careful of: PHI nodes. These little guys
- // can live in blocks that are not dominated by the latch block, but (since
- // their uses occur in the predecessor block, not the block the PHI lives in)
- // should still use the post-inc value. Check for this case now.
- PHINode *PN = dyn_cast<PHINode>(User);
- if (!PN) return false; // not a phi, not dominated by latch block.
-
- // Look at all of the uses of IV by the PHI node. If any use corresponds to
- // a block that is not dominated by the latch block, give up and use the
- // preincremented value.
- unsigned NumUses = 0;
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (PN->getIncomingValue(i) == IV) {
- ++NumUses;
- if (!DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
- return false;
- }
-
- // Okay, all uses of IV by PN are in predecessor blocks that really are
- // dominated by the latch block. Use the post-incremented value.
- return true;
+ // Nothing else is interesting here.
+ return false;
}
/// AddUsersIfInteresting - Inspect the specified instruction. If it is a
@@ -194,18 +84,10 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
// Get the symbolic expression for this instruction.
const SCEV *ISE = SE->getSCEV(I);
- if (isa<SCEVCouldNotCompute>(ISE)) return false;
-
- // Get the start and stride for this expression.
- Loop *UseLoop = LI->getLoopFor(I->getParent());
- const SCEV *Start = SE->getIntegerSCEV(0, ISE->getType());
- const SCEV *Stride = Start;
- if (!getSCEVStartAndStride(ISE, L, UseLoop, Start, Stride, SE, DT))
- return false; // Non-reducible symbolic expression, bail out.
-
- // Keep things simple. Don't touch loop-variant strides.
- if (!Stride->isLoopInvariant(L) && L->contains(I))
+ // If we've come to an uninteresting expression, stop the traversal and
+ // call this a user.
+ if (!isInteresting(ISE, I, L, SE))
return false;
SmallPtrSet<Instruction *, 4> UniqueUsers;
@@ -241,32 +123,27 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
}
if (AddUserToIVUsers) {
- // Okay, we found a user that we cannot reduce. Analyze the instruction
- // and decide what to do with it. If we are a use inside of the loop, use
- // the value before incrementation, otherwise use it after incrementation.
- if (IVUseShouldUsePostIncValue(User, I, L, DT)) {
- // The value used will be incremented by the stride more than we are
- // expecting, so subtract this off.
- const SCEV *NewStart = SE->getMinusSCEV(Start, Stride);
- IVUses.push_back(new IVStrideUse(this, Stride, NewStart, User, I));
- IVUses.back().setIsUseOfPostIncrementedValue(true);
- DEBUG(dbgs() << " USING POSTINC SCEV, START=" << *NewStart<< "\n");
- } else {
- IVUses.push_back(new IVStrideUse(this, Stride, Start, User, I));
- }
+ // Okay, we found a user that we cannot reduce.
+ IVUses.push_back(new IVStrideUse(this, User, I));
+ IVStrideUse &NewUse = IVUses.back();
+ // Transform the expression into a normalized form.
+ ISE = TransformForPostIncUse(NormalizeAutodetect,
+ ISE, User, I,
+ NewUse.PostIncLoops,
+ *SE, *DT);
+ DEBUG(dbgs() << " NORMALIZED TO: " << *ISE << '\n');
}
}
return true;
}
-IVStrideUse &IVUsers::AddUser(const SCEV *Stride, const SCEV *Offset,
- Instruction *User, Value *Operand) {
- IVUses.push_back(new IVStrideUse(this, Stride, Offset, User, Operand));
+IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand) {
+ IVUses.push_back(new IVStrideUse(this, User, Operand));
return IVUses.back();
}
IVUsers::IVUsers()
- : LoopPass(&ID) {
+ : LoopPass(ID) {
}
void IVUsers::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -287,40 +164,11 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
// them by stride. Start by finding all of the PHI nodes in the header for
// this loop. If they are induction variables, inspect their uses.
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
- AddUsersIfInteresting(I);
+ (void)AddUsersIfInteresting(I);
return false;
}
-/// getReplacementExpr - Return a SCEV expression which computes the
-/// value of the OperandValToReplace of the given IVStrideUse.
-const SCEV *IVUsers::getReplacementExpr(const IVStrideUse &U) const {
- // Start with zero.
- const SCEV *RetVal = SE->getIntegerSCEV(0, U.getStride()->getType());
- // Create the basic add recurrence.
- RetVal = SE->getAddRecExpr(RetVal, U.getStride(), L);
- // Add the offset in a separate step, because it may be loop-variant.
- RetVal = SE->getAddExpr(RetVal, U.getOffset());
- // For uses of post-incremented values, add an extra stride to compute
- // the actual replacement value.
- if (U.isUseOfPostIncrementedValue())
- RetVal = SE->getAddExpr(RetVal, U.getStride());
- return RetVal;
-}
-
-/// getCanonicalExpr - Return a SCEV expression which computes the
-/// value of the SCEV of the given IVStrideUse, ignoring the
-/// isUseOfPostIncrementedValue flag.
-const SCEV *IVUsers::getCanonicalExpr(const IVStrideUse &U) const {
- // Start with zero.
- const SCEV *RetVal = SE->getIntegerSCEV(0, U.getStride()->getType());
- // Create the basic add recurrence.
- RetVal = SE->getAddRecExpr(RetVal, U.getStride(), L);
- // Add the offset in a separate step, because it may be loop-variant.
- RetVal = SE->getAddExpr(RetVal, U.getOffset());
- return RetVal;
-}
-
void IVUsers::print(raw_ostream &OS, const Module *M) const {
OS << "IV Users for loop ";
WriteAsOperand(OS, L->getHeader(), false);
@@ -330,19 +178,20 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const {
}
OS << ":\n";
- // Use a default AssemblyAnnotationWriter to suppress the default info
- // comments, which aren't relevant here.
- AssemblyAnnotationWriter Annotator;
for (ilist<IVStrideUse>::const_iterator UI = IVUses.begin(),
E = IVUses.end(); UI != E; ++UI) {
OS << " ";
WriteAsOperand(OS, UI->getOperandValToReplace(), false);
- OS << " = "
- << *getReplacementExpr(*UI);
- if (UI->isUseOfPostIncrementedValue())
- OS << " (post-inc)";
+ OS << " = " << *getReplacementExpr(*UI);
+ for (PostIncLoopSet::const_iterator
+ I = UI->PostIncLoops.begin(),
+ E = UI->PostIncLoops.end(); I != E; ++I) {
+ OS << " (post-inc with loop ";
+ WriteAsOperand(OS, (*I)->getHeader(), false);
+ OS << ")";
+ }
OS << " in ";
- UI->getUser()->print(OS, &Annotator);
+ UI->getUser()->print(OS);
OS << '\n';
}
}
@@ -356,6 +205,49 @@ void IVUsers::releaseMemory() {
IVUses.clear();
}
+/// getReplacementExpr - Return a SCEV expression which computes the
+/// value of the OperandValToReplace.
+const SCEV *IVUsers::getReplacementExpr(const IVStrideUse &IU) const {
+ return SE->getSCEV(IU.getOperandValToReplace());
+}
+
+/// getExpr - Return the expression for the use.
+const SCEV *IVUsers::getExpr(const IVStrideUse &IU) const {
+ return
+ TransformForPostIncUse(Normalize, getReplacementExpr(IU),
+ IU.getUser(), IU.getOperandValToReplace(),
+ const_cast<PostIncLoopSet &>(IU.getPostIncLoops()),
+ *SE, *DT);
+}
+
+static const SCEVAddRecExpr *findAddRecForLoop(const SCEV *S, const Loop *L) {
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ if (AR->getLoop() == L)
+ return AR;
+ return findAddRecForLoop(AR->getStart(), L);
+ }
+
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
+ I != E; ++I)
+ if (const SCEVAddRecExpr *AR = findAddRecForLoop(*I, L))
+ return AR;
+ return 0;
+ }
+
+ return 0;
+}
+
+const SCEV *IVUsers::getStride(const IVStrideUse &IU, const Loop *L) const {
+ if (const SCEVAddRecExpr *AR = findAddRecForLoop(getExpr(IU), L))
+ return AR->getStepRecurrence(*SE);
+ return 0;
+}
+
+void IVStrideUse::transformToPostInc(const Loop *L) {
+ PostIncLoops.insert(L);
+}
+
void IVStrideUse::deleted() {
// Remove this user from the list.
Parent->IVUses.erase(this);
diff --git a/libclamav/c++/llvm/lib/Analysis/InlineCost.cpp b/libclamav/c++/llvm/lib/Analysis/InlineCost.cpp
index ca50a17..3e550f3 100644
--- a/libclamav/c++/llvm/lib/Analysis/InlineCost.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/InlineCost.cpp
@@ -22,30 +22,31 @@ using namespace llvm;
// instructions will be constant folded if the specified value is constant.
//
unsigned InlineCostAnalyzer::FunctionInfo::
- CountCodeReductionForConstant(Value *V) {
+CountCodeReductionForConstant(Value *V) {
unsigned Reduction = 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
- if (isa<BranchInst>(*UI) || isa<SwitchInst>(*UI)) {
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
+ User *U = *UI;
+ if (isa<BranchInst>(U) || isa<SwitchInst>(U)) {
// We will be able to eliminate all but one of the successors.
- const TerminatorInst &TI = cast<TerminatorInst>(**UI);
+ const TerminatorInst &TI = cast<TerminatorInst>(*U);
const unsigned NumSucc = TI.getNumSuccessors();
unsigned Instrs = 0;
for (unsigned I = 0; I != NumSucc; ++I)
- Instrs += TI.getSuccessor(I)->size();
+ Instrs += Metrics.NumBBInsts[TI.getSuccessor(I)];
// We don't know which blocks will be eliminated, so use the average size.
Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
- } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
+ } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
// Turning an indirect call into a direct call is a BIG win
if (CI->getCalledValue() == V)
Reduction += InlineConstants::IndirectCallBonus;
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
+ } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
// Turning an indirect call into a direct call is a BIG win
if (II->getCalledValue() == V)
Reduction += InlineConstants::IndirectCallBonus;
} else {
// Figure out if this instruction will be removed due to simple constant
// propagation.
- Instruction &Inst = cast<Instruction>(**UI);
+ Instruction &Inst = cast<Instruction>(*U);
// We can't constant propagate instructions which have effects or
// read memory.
@@ -74,7 +75,7 @@ unsigned InlineCostAnalyzer::FunctionInfo::
Reduction += CountCodeReductionForConstant(&Inst);
}
}
-
+ }
return Reduction;
}
@@ -107,10 +108,10 @@ unsigned InlineCostAnalyzer::FunctionInfo::
return Reduction;
}
-// callIsSmall - If a call is likely to lower to a single target instruction, or
-// is otherwise deemed small return true.
-// TODO: Perhaps calls like memcpy, strcpy, etc?
-static bool callIsSmall(const Function *F) {
+/// callIsSmall - If a call is likely to lower to a single target instruction,
+/// or is otherwise deemed small return true.
+/// TODO: Perhaps calls like memcpy, strcpy, etc?
+bool llvm::callIsSmall(const Function *F) {
if (!F) return false;
if (F->hasLocalLinkage()) return false;
@@ -120,7 +121,7 @@ static bool callIsSmall(const Function *F) {
StringRef Name = F->getName();
// These will all likely lower to a single selection DAG node.
- if (Name == "copysign" || Name == "copysignf" ||
+ if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
Name == "sin" || Name == "sinf" || Name == "sinl" ||
Name == "cos" || Name == "cosf" || Name == "cosl" ||
@@ -142,7 +143,7 @@ static bool callIsSmall(const Function *F) {
/// from the specified block.
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
++NumBlocks;
-
+ unsigned NumInstsBeforeThisBB = NumInsts;
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
II != E; ++II) {
if (isa<PHINode>(II)) continue; // PHI nodes don't count.
@@ -151,22 +152,34 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
if (isa<DbgInfoIntrinsic>(II))
continue; // Debug intrinsics don't count as size.
-
- CallSite CS = CallSite::get(const_cast<Instruction*>(&*II));
-
+
+ ImmutableCallSite CS(cast<Instruction>(II));
+
// If this function contains a call to setjmp or _setjmp, never inline
// it. This is a hack because we depend on the user marking their local
// variables as volatile if they are live across a setjmp call, and they
// probably won't do this in callers.
- if (Function *F = CS.getCalledFunction())
+ if (const Function *F = CS.getCalledFunction()) {
if (F->isDeclaration() &&
(F->getName() == "setjmp" || F->getName() == "_setjmp"))
- NeverInline = true;
+ callsSetJmp = true;
+
+ // If this call is to function itself, then the function is recursive.
+ // Inlining it into other functions is a bad idea, because this is
+ // basically just a form of loop peeling, and our metrics aren't useful
+ // for that case.
+ if (F == BB->getParent())
+ isRecursive = true;
+ }
if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
// Each argument to a call takes on average one instruction to set up.
NumInsts += CS.arg_size();
- ++NumCalls;
+
+ // We don't want inline asm to count as a call - that would prevent loop
+ // unrolling. The argument setup cost is still real, though.
+ if (!isa<InlineAsm>(CS.getCalledValue()))
+ ++NumCalls;
}
}
@@ -207,7 +220,10 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
// jump would jump from the inlined copy of the function into the original
// function which is extremely undefined behavior.
if (isa<IndirectBrInst>(BB->getTerminator()))
- NeverInline = true;
+ containsIndirectBr = true;
+
+ // Remember NumInsts for this BB.
+ NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
}
/// analyzeFunction - Fill in the current structure with information gleaned
@@ -231,7 +247,7 @@ void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
// Don't bother calculating argument weights if we are never going to inline
// the function anyway.
- if (Metrics.NeverInline)
+ if (NeverInline())
return;
// Check out all of the arguments to the function, figuring out how much
@@ -242,19 +258,35 @@ void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
CountCodeReductionForAlloca(I)));
}
+/// NeverInline - returns true if the function should never be inlined into
+/// any caller
+bool InlineCostAnalyzer::FunctionInfo::NeverInline()
+{
+ return (Metrics.callsSetJmp || Metrics.isRecursive ||
+ Metrics.containsIndirectBr);
+
+}
// getInlineCost - The heuristic used to determine if we should inline the
// function call or not.
//
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
- SmallPtrSet<const Function *, 16> &NeverInline) {
+ SmallPtrSet<const Function*, 16> &NeverInline) {
+ return getInlineCost(CS, CS.getCalledFunction(), NeverInline);
+}
+
+InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
+ Function *Callee,
+ SmallPtrSet<const Function*, 16> &NeverInline) {
Instruction *TheCall = CS.getInstruction();
- Function *Callee = CS.getCalledFunction();
Function *Caller = TheCall->getParent()->getParent();
+ bool isDirectCall = CS.getCalledFunction() == Callee;
// Don't inline functions which can be redefined at link-time to mean
- // something else. Don't inline functions marked noinline.
+ // something else. Don't inline functions marked noinline or call sites
+ // marked noinline.
if (Callee->mayBeOverridden() ||
- Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee))
+ Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee) ||
+ CS.isNoInline())
return llvm::InlineCost::getNever();
// InlineCost - This value measures how good of an inline candidate this call
@@ -262,11 +294,11 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
// be inlined. This value may go negative.
//
int InlineCost = 0;
-
+
// If there is only one call of the function, and it has internal linkage,
// make it almost guaranteed to be inlined.
//
- if (Callee->hasLocalLinkage() && Callee->hasOneUse())
+ if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
InlineCost += InlineConstants::LastCallToStaticBonus;
// If this function uses the coldcc calling convention, prefer not to inline
@@ -283,31 +315,36 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
} else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
InlineCost += InlineConstants::NoreturnPenalty;
- // Get information about the callee...
- FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
+ // Get information about the callee.
+ FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
// If we haven't calculated this information yet, do so now.
- if (CalleeFI.Metrics.NumBlocks == 0)
- CalleeFI.analyzeFunction(Callee);
+ if (CalleeFI->Metrics.NumBlocks == 0)
+ CalleeFI->analyzeFunction(Callee);
// If we should never inline this, return a huge cost.
- if (CalleeFI.Metrics.NeverInline)
+ if (CalleeFI->NeverInline())
return InlineCost::getNever();
- // FIXME: It would be nice to kill off CalleeFI.NeverInline. Then we
+ // FIXME: It would be nice to kill off CalleeFI->NeverInline. Then we
// could move this up and avoid computing the FunctionInfo for
// things we are going to just return always inline for. This
// requires handling setjmp somewhere else, however.
if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
return InlineCost::getAlways();
- if (CalleeFI.Metrics.usesDynamicAlloca) {
- // Get infomation about the caller...
+ if (CalleeFI->Metrics.usesDynamicAlloca) {
+ // Get infomation about the caller.
FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
// If we haven't calculated this information yet, do so now.
- if (CallerFI.Metrics.NumBlocks == 0)
+ if (CallerFI.Metrics.NumBlocks == 0) {
CallerFI.analyzeFunction(Caller);
+
+ // Recompute the CalleeFI pointer, getting Caller could have invalidated
+ // it.
+ CalleeFI = &CachedFunctionInfo[Callee];
+ }
// Don't inline a callee with dynamic alloca into a caller without them.
// Functions containing dynamic alloca's are inefficient in various ways;
@@ -334,15 +371,15 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
// scalarization), so encourage the inlining of the function.
//
if (isa<AllocaInst>(I)) {
- if (ArgNo < CalleeFI.ArgumentWeights.size())
- InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
+ if (ArgNo < CalleeFI->ArgumentWeights.size())
+ InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;
// If this is a constant being passed into the function, use the argument
// weights calculated for the callee to determine how much will be folded
// away with this information.
} else if (isa<Constant>(I)) {
- if (ArgNo < CalleeFI.ArgumentWeights.size())
- InlineCost -= CalleeFI.ArgumentWeights[ArgNo].ConstantWeight;
+ if (ArgNo < CalleeFI->ArgumentWeights.size())
+ InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
}
}
@@ -350,15 +387,10 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
// likely to be inlined, look at factors that make us not want to inline it.
// Calls usually take a long time, so they make the inlining gain smaller.
- InlineCost += CalleeFI.Metrics.NumCalls * InlineConstants::CallPenalty;
+ InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
- // Don't inline into something too big, which would make it bigger.
- // "size" here is the number of basic blocks, not instructions.
- //
- InlineCost += Caller->size()/15;
-
// Look at the size of the callee. Each instruction counts as 5.
- InlineCost += CalleeFI.Metrics.NumInsts*InlineConstants::InstrCost;
+ InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;
return llvm::InlineCost::get(InlineCost);
}
@@ -368,7 +400,7 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
Function *Callee = CS.getCalledFunction();
- // Get information about the callee...
+ // Get information about the callee.
FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
// If we haven't calculated this information yet, do so now.
@@ -388,3 +420,63 @@ float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
Factor += 1.5f;
return Factor;
}
+
+/// growCachedCostInfo - update the cached cost info for Caller after Callee has
+/// been inlined.
+void
+InlineCostAnalyzer::growCachedCostInfo(Function *Caller, Function *Callee) {
+ CodeMetrics &CallerMetrics = CachedFunctionInfo[Caller].Metrics;
+
+ // For small functions we prefer to recalculate the cost for better accuracy.
+ if (CallerMetrics.NumBlocks < 10 || CallerMetrics.NumInsts < 1000) {
+ resetCachedCostInfo(Caller);
+ return;
+ }
+
+ // For large functions, we can save a lot of computation time by skipping
+ // recalculations.
+ if (CallerMetrics.NumCalls > 0)
+ --CallerMetrics.NumCalls;
+
+ if (Callee == 0) return;
+
+ CodeMetrics &CalleeMetrics = CachedFunctionInfo[Callee].Metrics;
+
+ // If we don't have metrics for the callee, don't recalculate them just to
+ // update an approximation in the caller. Instead, just recalculate the
+ // caller info from scratch.
+ if (CalleeMetrics.NumBlocks == 0) {
+ resetCachedCostInfo(Caller);
+ return;
+ }
+
+ // Since CalleeMetrics were already calculated, we know that the CallerMetrics
+ // reference isn't invalidated: both were in the DenseMap.
+ CallerMetrics.usesDynamicAlloca |= CalleeMetrics.usesDynamicAlloca;
+
+ // FIXME: If any of these three are true for the callee, the callee was
+ // not inlined into the caller, so I think they're redundant here.
+ CallerMetrics.callsSetJmp |= CalleeMetrics.callsSetJmp;
+ CallerMetrics.isRecursive |= CalleeMetrics.isRecursive;
+ CallerMetrics.containsIndirectBr |= CalleeMetrics.containsIndirectBr;
+
+ CallerMetrics.NumInsts += CalleeMetrics.NumInsts;
+ CallerMetrics.NumBlocks += CalleeMetrics.NumBlocks;
+ CallerMetrics.NumCalls += CalleeMetrics.NumCalls;
+ CallerMetrics.NumVectorInsts += CalleeMetrics.NumVectorInsts;
+ CallerMetrics.NumRets += CalleeMetrics.NumRets;
+
+ // analyzeBasicBlock counts each function argument as an inst.
+ if (CallerMetrics.NumInsts >= Callee->arg_size())
+ CallerMetrics.NumInsts -= Callee->arg_size();
+ else
+ CallerMetrics.NumInsts = 0;
+
+ // We are not updating the argument weights. We have already determined that
+ // Caller is a fairly large function, so we accept the loss of precision.
+}
+
+/// clear - empty the cache of inline costs
+void InlineCostAnalyzer::clear() {
+ CachedFunctionInfo.clear();
+}
diff --git a/libclamav/c++/llvm/lib/Analysis/InstCount.cpp b/libclamav/c++/llvm/lib/Analysis/InstCount.cpp
index bb2cf53..dcbcac0 100644
--- a/libclamav/c++/llvm/lib/Analysis/InstCount.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/InstCount.cpp
@@ -51,7 +51,7 @@ namespace {
}
public:
static char ID; // Pass identification, replacement for typeid
- InstCount() : FunctionPass(&ID) {}
+ InstCount() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F);
@@ -64,8 +64,8 @@ namespace {
}
char InstCount::ID = 0;
-static RegisterPass<InstCount>
-X("instcount", "Counts the various types of Instructions", false, true);
+INITIALIZE_PASS(InstCount, "instcount",
+ "Counts the various types of Instructions", false, true);
FunctionPass *llvm::createInstCountPass() { return new InstCount(); }
diff --git a/libclamav/c++/llvm/lib/Analysis/InstructionSimplify.cpp b/libclamav/c++/llvm/lib/Analysis/InstructionSimplify.cpp
index 8288e96..24cd343 100644
--- a/libclamav/c++/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -314,6 +314,35 @@ Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
return 0;
}
+/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
+/// the result. If not, this returns null.
+Value *llvm::SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal,
+ const TargetData *TD) {
+ // select true, X, Y -> X
+ // select false, X, Y -> Y
+ if (ConstantInt *CB = dyn_cast<ConstantInt>(CondVal))
+ return CB->getZExtValue() ? TrueVal : FalseVal;
+
+ // select C, X, X -> X
+ if (TrueVal == FalseVal)
+ return TrueVal;
+
+ if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
+ return FalseVal;
+ if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
+ return TrueVal;
+ if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
+ if (isa<Constant>(TrueVal))
+ return TrueVal;
+ return FalseVal;
+ }
+
+
+
+ return 0;
+}
+
+
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
@@ -391,6 +420,9 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD) {
case Instruction::FCmp:
return SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
I->getOperand(0), I->getOperand(1), TD);
+ case Instruction::Select:
+ return SimplifySelectInst(I->getOperand(0), I->getOperand(1),
+ I->getOperand(2), TD);
case Instruction::GetElementPtr: {
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
return SimplifyGEPInst(&Ops[0], Ops.size(), TD);
@@ -408,27 +440,47 @@ void llvm::ReplaceAndSimplifyAllUses(Instruction *From, Value *To,
const TargetData *TD) {
assert(From != To && "ReplaceAndSimplifyAllUses(X,X) is not valid!");
- // FromHandle - This keeps a weakvh on the from value so that we can know if
- // it gets deleted out from under us in a recursive simplification.
+ // FromHandle/ToHandle - This keeps a WeakVH on the from/to values so that
+ // we can know if it gets deleted out from under us or replaced in a
+ // recursive simplification.
WeakVH FromHandle(From);
+ WeakVH ToHandle(To);
while (!From->use_empty()) {
// Update the instruction to use the new value.
- Use &U = From->use_begin().getUse();
- Instruction *User = cast<Instruction>(U.getUser());
- U = To;
+ Use &TheUse = From->use_begin().getUse();
+ Instruction *User = cast<Instruction>(TheUse.getUser());
+ TheUse = To;
+
+ // Check to see if the instruction can be folded due to the operand
+ // replacement. For example changing (or X, Y) into (or X, -1) can replace
+ // the 'or' with -1.
+ Value *SimplifiedVal;
+ {
+ // Sanity check to make sure 'User' doesn't dangle across
+ // SimplifyInstruction.
+ AssertingVH<> UserHandle(User);
- // See if we can simplify it.
- if (Value *V = SimplifyInstruction(User, TD)) {
- // Recursively simplify this.
- ReplaceAndSimplifyAllUses(User, V, TD);
-
- // If the recursive simplification ended up revisiting and deleting 'From'
- // then we're done.
- if (FromHandle == 0)
- return;
+ SimplifiedVal = SimplifyInstruction(User, TD);
+ if (SimplifiedVal == 0) continue;
}
+
+ // Recursively simplify this user to the new value.
+ ReplaceAndSimplifyAllUses(User, SimplifiedVal, TD);
+ From = dyn_cast_or_null<Instruction>((Value*)FromHandle);
+ To = ToHandle;
+
+ assert(ToHandle && "To value deleted by recursive simplification?");
+
+ // If the recursive simplification ended up revisiting and deleting
+ // 'From' then we're done.
+ if (From == 0)
+ return;
}
+
+ // If 'From' has value handles referring to it, do a real RAUW to update them.
+ From->replaceAllUsesWith(To);
+
From->eraseFromParent();
}
diff --git a/libclamav/c++/llvm/lib/Analysis/IntervalPartition.cpp b/libclamav/c++/llvm/lib/Analysis/IntervalPartition.cpp
index 1f17b77..1c9e148 100644
--- a/libclamav/c++/llvm/lib/Analysis/IntervalPartition.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/IntervalPartition.cpp
@@ -16,8 +16,8 @@
using namespace llvm;
char IntervalPartition::ID = 0;
-static RegisterPass<IntervalPartition>
-X("intervals", "Interval Partition Construction", true, true);
+INITIALIZE_PASS(IntervalPartition, "intervals",
+ "Interval Partition Construction", true, true);
//===----------------------------------------------------------------------===//
// IntervalPartition Implementation
@@ -91,7 +91,7 @@ bool IntervalPartition::runOnFunction(Function &F) {
// distinguish it from a copy constructor. Always pass in false for now.
//
IntervalPartition::IntervalPartition(IntervalPartition &IP, bool)
- : FunctionPass(&ID) {
+ : FunctionPass(ID) {
assert(IP.getRootInterval() && "Cannot operate on empty IntervalPartitions!");
// Pass false to intervals_begin because we take ownership of it's memory
diff --git a/libclamav/c++/llvm/lib/Analysis/LazyValueInfo.cpp b/libclamav/c++/llvm/lib/Analysis/LazyValueInfo.cpp
index ff9026b..e32dbc4 100644
--- a/libclamav/c++/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -19,16 +19,18 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Support/CFG.h"
+#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ValueHandle.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
char LazyValueInfo::ID = 0;
-static RegisterPass<LazyValueInfo>
-X("lazy-value-info", "Lazy Value Information Analysis", false, true);
+INITIALIZE_PASS(LazyValueInfo, "lazy-value-info",
+ "Lazy Value Information Analysis", false, true);
namespace llvm {
FunctionPass *createLazyValueInfoPass() { return new LazyValueInfo(); }
@@ -50,12 +52,15 @@ class LVILatticeVal {
enum LatticeValueTy {
/// undefined - This LLVM Value has no known value yet.
undefined,
+
/// constant - This LLVM Value has a specific constant value.
constant,
-
/// notconstant - This LLVM value is known to not have the specified value.
notconstant,
+ /// constantrange
+ constantrange,
+
/// overdefined - This instruction is not known to be constant, and we know
/// it has a value.
overdefined
@@ -63,42 +68,62 @@ class LVILatticeVal {
/// Val: This stores the current lattice value along with the Constant* for
/// the constant if this is a 'constant' or 'notconstant' value.
- PointerIntPair<Constant *, 2, LatticeValueTy> Val;
+ LatticeValueTy Tag;
+ Constant *Val;
+ ConstantRange Range;
public:
- LVILatticeVal() : Val(0, undefined) {}
+ LVILatticeVal() : Tag(undefined), Val(0), Range(1, true) {}
static LVILatticeVal get(Constant *C) {
LVILatticeVal Res;
- Res.markConstant(C);
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+ Res.markConstantRange(ConstantRange(CI->getValue(), CI->getValue()+1));
+ else if (!isa<UndefValue>(C))
+ Res.markConstant(C);
return Res;
}
static LVILatticeVal getNot(Constant *C) {
LVILatticeVal Res;
- Res.markNotConstant(C);
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+ Res.markConstantRange(ConstantRange(CI->getValue()+1, CI->getValue()));
+ else
+ Res.markNotConstant(C);
+ return Res;
+ }
+ static LVILatticeVal getRange(ConstantRange CR) {
+ LVILatticeVal Res;
+ Res.markConstantRange(CR);
return Res;
}
- bool isUndefined() const { return Val.getInt() == undefined; }
- bool isConstant() const { return Val.getInt() == constant; }
- bool isNotConstant() const { return Val.getInt() == notconstant; }
- bool isOverdefined() const { return Val.getInt() == overdefined; }
+ bool isUndefined() const { return Tag == undefined; }
+ bool isConstant() const { return Tag == constant; }
+ bool isNotConstant() const { return Tag == notconstant; }
+ bool isConstantRange() const { return Tag == constantrange; }
+ bool isOverdefined() const { return Tag == overdefined; }
Constant *getConstant() const {
assert(isConstant() && "Cannot get the constant of a non-constant!");
- return Val.getPointer();
+ return Val;
}
Constant *getNotConstant() const {
assert(isNotConstant() && "Cannot get the constant of a non-notconstant!");
- return Val.getPointer();
+ return Val;
+ }
+
+ ConstantRange getConstantRange() const {
+ assert(isConstantRange() &&
+ "Cannot get the constant-range of a non-constant-range!");
+ return Range;
}
/// markOverdefined - Return true if this is a change in status.
bool markOverdefined() {
if (isOverdefined())
return false;
- Val.setInt(overdefined);
+ Tag = overdefined;
return true;
}
@@ -110,9 +135,9 @@ public:
}
assert(isUndefined());
- Val.setInt(constant);
+ Tag = constant;
assert(V && "Marking constant with NULL");
- Val.setPointer(V);
+ Val = V;
return true;
}
@@ -128,9 +153,29 @@ public:
else
assert(isUndefined());
- Val.setInt(notconstant);
+ Tag = notconstant;
assert(V && "Marking constant with NULL");
- Val.setPointer(V);
+ Val = V;
+ return true;
+ }
+
+ /// markConstantRange - Return true if this is a change in status.
+ bool markConstantRange(const ConstantRange NewR) {
+ if (isConstantRange()) {
+ if (NewR.isEmptySet())
+ return markOverdefined();
+
+ bool changed = Range == NewR;
+ Range = NewR;
+ return changed;
+ }
+
+ assert(isUndefined());
+ if (NewR.isEmptySet())
+ return markOverdefined();
+
+ Tag = constantrange;
+ Range = NewR;
return true;
}
@@ -147,20 +192,39 @@ public:
isa<ConstantExpr>(RHS.getNotConstant()))
return markOverdefined();
return false;
- }
- if (isConstant()) {
+ } else if (isConstant()) {
if (getConstant() == RHS.getNotConstant() ||
isa<ConstantExpr>(RHS.getNotConstant()) ||
isa<ConstantExpr>(getConstant()))
return markOverdefined();
return markNotConstant(RHS.getNotConstant());
+ } else if (isConstantRange()) {
+ return markOverdefined();
}
assert(isUndefined() && "Unexpected lattice");
return markNotConstant(RHS.getNotConstant());
}
+ if (RHS.isConstantRange()) {
+ if (isConstantRange()) {
+ ConstantRange NewR = Range.unionWith(RHS.getConstantRange());
+ if (NewR.isFullSet())
+ return markOverdefined();
+ else
+ return markConstantRange(NewR);
+ } else if (!isUndefined()) {
+ return markOverdefined();
+ }
+
+ assert(isUndefined() && "Unexpected lattice");
+ return markConstantRange(RHS.getConstantRange());
+ }
+
// RHS must be a constant, we must be undef, constant, or notconstant.
+ assert(!isConstantRange() &&
+ "Constant and ConstantRange cannot be merged.");
+
if (isUndefined())
return markConstant(RHS.getConstant());
@@ -191,6 +255,9 @@ raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
if (Val.isNotConstant())
return OS << "notconstant<" << *Val.getNotConstant() << '>';
+ else if (Val.isConstantRange())
+ return OS << "constantrange<" << Val.getConstantRange().getLower() << ", "
+ << Val.getConstantRange().getUpper() << '>';
return OS << "constant<" << *Val.getConstant() << '>';
}
}
@@ -206,17 +273,41 @@ namespace {
public:
/// BlockCacheEntryTy - This is a computed lattice value at the end of the
/// specified basic block for a Value* that depends on context.
- typedef std::pair<BasicBlock*, LVILatticeVal> BlockCacheEntryTy;
+ typedef std::pair<AssertingVH<BasicBlock>, LVILatticeVal> BlockCacheEntryTy;
/// ValueCacheEntryTy - This is all of the cached block information for
/// exactly one Value*. The entries are sorted by the BasicBlock* of the
/// entries, allowing us to do a lookup with a binary search.
- typedef std::vector<BlockCacheEntryTy> ValueCacheEntryTy;
+ typedef std::map<AssertingVH<BasicBlock>, LVILatticeVal> ValueCacheEntryTy;
private:
+ /// LVIValueHandle - A callback value handle update the cache when
+ /// values are erased.
+ struct LVIValueHandle : public CallbackVH {
+ LazyValueInfoCache *Parent;
+
+ LVIValueHandle(Value *V, LazyValueInfoCache *P)
+ : CallbackVH(V), Parent(P) { }
+
+ void deleted();
+ void allUsesReplacedWith(Value* V) {
+ deleted();
+ }
+
+ LVIValueHandle &operator=(Value *V) {
+ return *this = LVIValueHandle(V, Parent);
+ }
+ };
+
/// ValueCache - This is all of the cached information for all values,
/// mapped from Value* to key information.
- DenseMap<Value*, ValueCacheEntryTy> ValueCache;
+ std::map<LVIValueHandle, ValueCacheEntryTy> ValueCache;
+
+ /// OverDefinedCache - This tracks, on a per-block basis, the set of
+ /// values that are over-defined at the end of that block. This is required
+ /// for cache updating.
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> > OverDefinedCache;
+
public:
/// getValueInBlock - This is the query interface to determine the lattice
@@ -226,29 +317,23 @@ namespace {
/// getValueOnEdge - This is the query interface to determine the lattice
/// value for the specified Value* that is true on the specified edge.
LVILatticeVal getValueOnEdge(Value *V, BasicBlock *FromBB,BasicBlock *ToBB);
- };
-} // end anonymous namespace
-
-namespace {
- struct BlockCacheEntryComparator {
- static int Compare(const void *LHSv, const void *RHSv) {
- const LazyValueInfoCache::BlockCacheEntryTy *LHS =
- static_cast<const LazyValueInfoCache::BlockCacheEntryTy *>(LHSv);
- const LazyValueInfoCache::BlockCacheEntryTy *RHS =
- static_cast<const LazyValueInfoCache::BlockCacheEntryTy *>(RHSv);
- if (LHS->first < RHS->first)
- return -1;
- if (LHS->first > RHS->first)
- return 1;
- return 0;
- }
- bool operator()(const LazyValueInfoCache::BlockCacheEntryTy &LHS,
- const LazyValueInfoCache::BlockCacheEntryTy &RHS) const {
- return LHS.first < RHS.first;
+ /// threadEdge - This is the update interface to inform the cache that an
+ /// edge from PredBB to OldSucc has been threaded to be from PredBB to
+ /// NewSucc.
+ void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
+
+ /// eraseBlock - This is part of the update interface to inform the cache
+ /// that a block has been deleted.
+ void eraseBlock(BasicBlock *BB);
+
+ /// clear - Empty the cache.
+ void clear() {
+ ValueCache.clear();
+ OverDefinedCache.clear();
}
};
-}
+} // end anonymous namespace
//===----------------------------------------------------------------------===//
// LVIQuery Impl
@@ -267,78 +352,87 @@ namespace {
/// This is the current value being queried for.
Value *Val;
+ /// This is a pointer to the owning cache, for recursive queries.
+ LazyValueInfoCache &Parent;
+
/// This is all of the cached information about this value.
ValueCacheEntryTy &Cache;
+ /// This tracks, for each block, what values are overdefined.
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> > &OverDefinedCache;
+
/// NewBlocks - This is a mapping of the new BasicBlocks which have been
/// added to cache but that are not in sorted order.
- DenseMap<BasicBlock*, LVILatticeVal> NewBlockInfo;
+ DenseSet<BasicBlock*> NewBlockInfo;
+
public:
- LVIQuery(Value *V, ValueCacheEntryTy &VC) : Val(V), Cache(VC) {
+ LVIQuery(Value *V, LazyValueInfoCache &P,
+ ValueCacheEntryTy &VC,
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> > &ODC)
+ : Val(V), Parent(P), Cache(VC), OverDefinedCache(ODC) {
}
~LVIQuery() {
// When the query is done, insert the newly discovered facts into the
// cache in sorted order.
if (NewBlockInfo.empty()) return;
-
- // Grow the cache to exactly fit the new data.
- Cache.reserve(Cache.size() + NewBlockInfo.size());
- // If we only have one new entry, insert it instead of doing a full-on
- // sort.
- if (NewBlockInfo.size() == 1) {
- BlockCacheEntryTy Entry = *NewBlockInfo.begin();
- ValueCacheEntryTy::iterator I =
- std::lower_bound(Cache.begin(), Cache.end(), Entry,
- BlockCacheEntryComparator());
- assert((I == Cache.end() || I->first != Entry.first) &&
- "Entry already in map!");
-
- Cache.insert(I, Entry);
- return;
+ for (DenseSet<BasicBlock*>::iterator I = NewBlockInfo.begin(),
+ E = NewBlockInfo.end(); I != E; ++I) {
+ if (Cache[*I].isOverdefined())
+ OverDefinedCache.insert(std::make_pair(*I, Val));
}
-
- // TODO: If we only have two new elements, INSERT them both.
-
- Cache.insert(Cache.end(), NewBlockInfo.begin(), NewBlockInfo.end());
- array_pod_sort(Cache.begin(), Cache.end(),
- BlockCacheEntryComparator::Compare);
-
}
LVILatticeVal getBlockValue(BasicBlock *BB);
LVILatticeVal getEdgeValue(BasicBlock *FromBB, BasicBlock *ToBB);
private:
- LVILatticeVal &getCachedEntryForBlock(BasicBlock *BB);
+ LVILatticeVal getCachedEntryForBlock(BasicBlock *BB);
};
} // end anonymous namespace
-/// getCachedEntryForBlock - See if we already have a value for this block. If
-/// so, return it, otherwise create a new entry in the NewBlockInfo map to use.
-LVILatticeVal &LVIQuery::getCachedEntryForBlock(BasicBlock *BB) {
-
- // Do a binary search to see if we already have an entry for this block in
- // the cache set. If so, find it.
- if (!Cache.empty()) {
- ValueCacheEntryTy::iterator Entry =
- std::lower_bound(Cache.begin(), Cache.end(),
- BlockCacheEntryTy(BB, LVILatticeVal()),
- BlockCacheEntryComparator());
- if (Entry != Cache.end() && Entry->first == BB)
- return Entry->second;
+void LazyValueInfoCache::LVIValueHandle::deleted() {
+ for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
+ I = Parent->OverDefinedCache.begin(),
+ E = Parent->OverDefinedCache.end();
+ I != E; ) {
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator tmp = I;
+ ++I;
+ if (tmp->second == getValPtr())
+ Parent->OverDefinedCache.erase(tmp);
}
- // Otherwise, check to see if it's in NewBlockInfo or create a new entry if
- // not.
- return NewBlockInfo[BB];
+ // This erasure deallocates *this, so it MUST happen after we're done
+ // using any and all members of *this.
+ Parent->ValueCache.erase(*this);
+}
+
+void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
+ for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
+ I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E; ) {
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator tmp = I;
+ ++I;
+ if (tmp->first == BB)
+ OverDefinedCache.erase(tmp);
+ }
+
+ for (std::map<LVIValueHandle, ValueCacheEntryTy>::iterator
+ I = ValueCache.begin(), E = ValueCache.end(); I != E; ++I)
+ I->second.erase(BB);
+}
+
+/// getCachedEntryForBlock - See if we already have a value for this block. If
+/// so, return it, otherwise create a new entry in the Cache map to use.
+LVILatticeVal LVIQuery::getCachedEntryForBlock(BasicBlock *BB) {
+ NewBlockInfo.insert(BB);
+ return Cache[BB];
}
LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
// See if we already have a value for this block.
- LVILatticeVal &BBLV = getCachedEntryForBlock(BB);
+ LVILatticeVal BBLV = getCachedEntryForBlock(BB);
// If we've already computed this block's value, return it.
if (!BBLV.isUndefined()) {
@@ -350,13 +444,28 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
// lattice value to overdefined, so that cycles will terminate and be
// conservatively correct.
BBLV.markOverdefined();
+ Cache[BB] = BBLV;
- // If V is live into BB, see if our predecessors know anything about it.
Instruction *BBI = dyn_cast<Instruction>(Val);
if (BBI == 0 || BBI->getParent() != BB) {
LVILatticeVal Result; // Start Undefined.
- unsigned NumPreds = 0;
+ // If this is a pointer, and there's a load from that pointer in this BB,
+ // then we know that the pointer can't be NULL.
+ bool NotNull = false;
+ if (Val->getType()->isPointerTy()) {
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){
+ LoadInst *L = dyn_cast<LoadInst>(BI);
+ if (L && L->getPointerAddressSpace() == 0 &&
+ L->getPointerOperand()->getUnderlyingObject() ==
+ Val->getUnderlyingObject()) {
+ NotNull = true;
+ break;
+ }
+ }
+ }
+
+ unsigned NumPreds = 0;
// Loop over all of our predecessors, merging what we know from them into
// result.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
@@ -367,11 +476,19 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
if (Result.isOverdefined()) {
DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined because of pred.\n");
+ // If we previously determined that this is a pointer that can't be null
+ // then return that rather than giving up entirely.
+ if (NotNull) {
+ const PointerType *PTy = cast<PointerType>(Val->getType());
+ Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
+ }
+
return Result;
}
++NumPreds;
}
+
// If this is the entry block, we must be asking about an argument. The
// value is overdefined.
if (NumPreds == 0 && BB == &BB->getParent()->front()) {
@@ -382,24 +499,123 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) {
// Return the merged value, which is more precise than 'overdefined'.
assert(!Result.isOverdefined());
- return getCachedEntryForBlock(BB) = Result;
+ return Cache[BB] = Result;
}
// If this value is defined by an instruction in this block, we have to
// process it here somehow or return overdefined.
if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
- (void)PN;
- // TODO: PHI Translation in preds.
- } else {
+ LVILatticeVal Result; // Start Undefined.
+ // Loop over all of our predecessors, merging what we know from them into
+ // result.
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ Value* PhiVal = PN->getIncomingValueForBlock(*PI);
+ Result.mergeIn(Parent.getValueOnEdge(PhiVal, *PI, BB));
+
+ // If we hit overdefined, exit early. The BlockVals entry is already set
+ // to overdefined.
+ if (Result.isOverdefined()) {
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because of pred.\n");
+ return Result;
+ }
+ }
+
+ // Return the merged value, which is more precise than 'overdefined'.
+ assert(!Result.isOverdefined());
+ return Cache[BB] = Result;
}
-
- DEBUG(dbgs() << " compute BB '" << BB->getName()
- << "' - overdefined because inst def found.\n");
+ assert(Cache[BB].isOverdefined() && "Recursive query changed our cache?");
+
+ // We can only analyze the definitions of certain classes of instructions
+ // (integral binops and casts at the moment), so bail if this isn't one.
LVILatticeVal Result;
- Result.markOverdefined();
- return getCachedEntryForBlock(BB) = Result;
+ if ((!isa<BinaryOperator>(BBI) && !isa<CastInst>(BBI)) ||
+ !BBI->getType()->isIntegerTy()) {
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because inst def found.\n");
+ Result.markOverdefined();
+ return Result;
+ }
+
+ // FIXME: We're currently limited to binops with a constant RHS. This should
+ // be improved.
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
+ if (BO && !isa<ConstantInt>(BO->getOperand(1))) {
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because inst def found.\n");
+
+ Result.markOverdefined();
+ return Result;
+ }
+
+ // Figure out the range of the LHS. If that fails, bail.
+ LVILatticeVal LHSVal = Parent.getValueInBlock(BBI->getOperand(0), BB);
+ if (!LHSVal.isConstantRange()) {
+ Result.markOverdefined();
+ return Result;
+ }
+
+ ConstantInt *RHS = 0;
+ ConstantRange LHSRange = LHSVal.getConstantRange();
+ ConstantRange RHSRange(1);
+ const IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
+ if (isa<BinaryOperator>(BBI)) {
+ RHS = dyn_cast<ConstantInt>(BBI->getOperand(1));
+ if (!RHS) {
+ Result.markOverdefined();
+ return Result;
+ }
+
+ RHSRange = ConstantRange(RHS->getValue(), RHS->getValue()+1);
+ }
+
+ // NOTE: We're currently limited by the set of operations that ConstantRange
+ // can evaluate symbolically. Enhancing that set will allows us to analyze
+ // more definitions.
+ switch (BBI->getOpcode()) {
+ case Instruction::Add:
+ Result.markConstantRange(LHSRange.add(RHSRange));
+ break;
+ case Instruction::Sub:
+ Result.markConstantRange(LHSRange.sub(RHSRange));
+ break;
+ case Instruction::Mul:
+ Result.markConstantRange(LHSRange.multiply(RHSRange));
+ break;
+ case Instruction::UDiv:
+ Result.markConstantRange(LHSRange.udiv(RHSRange));
+ break;
+ case Instruction::Shl:
+ Result.markConstantRange(LHSRange.shl(RHSRange));
+ break;
+ case Instruction::LShr:
+ Result.markConstantRange(LHSRange.lshr(RHSRange));
+ break;
+ case Instruction::Trunc:
+ Result.markConstantRange(LHSRange.truncate(ResultTy->getBitWidth()));
+ break;
+ case Instruction::SExt:
+ Result.markConstantRange(LHSRange.signExtend(ResultTy->getBitWidth()));
+ break;
+ case Instruction::ZExt:
+ Result.markConstantRange(LHSRange.zeroExtend(ResultTy->getBitWidth()));
+ break;
+ case Instruction::BitCast:
+ Result.markConstantRange(LHSRange);
+ break;
+
+ // Unhandled instructions are overdefined.
+ default:
+ DEBUG(dbgs() << " compute BB '" << BB->getName()
+ << "' - overdefined because inst def found.\n");
+ Result.markOverdefined();
+ break;
+ }
+
+ return Cache[BB] = Result;
}
@@ -420,28 +636,57 @@ LVILatticeVal LVIQuery::getEdgeValue(BasicBlock *BBFrom, BasicBlock *BBTo) {
// it is.
if (BI->getCondition() == Val)
return LVILatticeVal::get(ConstantInt::get(
- Type::getInt1Ty(Val->getContext()), isTrueDest));
+ Type::getInt1Ty(Val->getContext()), isTrueDest));
// If the condition of the branch is an equality comparison, we may be
// able to infer the value.
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
- if (ICI->isEquality() && ICI->getOperand(0) == Val &&
- isa<Constant>(ICI->getOperand(1))) {
+ ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition());
+ if (ICI && ICI->getOperand(0) == Val &&
+ isa<Constant>(ICI->getOperand(1))) {
+ if (ICI->isEquality()) {
// We know that V has the RHS constant if this is a true SETEQ or
// false SETNE.
if (isTrueDest == (ICI->getPredicate() == ICmpInst::ICMP_EQ))
return LVILatticeVal::get(cast<Constant>(ICI->getOperand(1)));
return LVILatticeVal::getNot(cast<Constant>(ICI->getOperand(1)));
}
+
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
+ // Calculate the range of values that would satisfy the comparison.
+ ConstantRange CmpRange(CI->getValue(), CI->getValue()+1);
+ ConstantRange TrueValues =
+ ConstantRange::makeICmpRegion(ICI->getPredicate(), CmpRange);
+
+ // If we're interested in the false dest, invert the condition.
+ if (!isTrueDest) TrueValues = TrueValues.inverse();
+
+ // Figure out the possible values of the query BEFORE this branch.
+ LVILatticeVal InBlock = getBlockValue(BBFrom);
+ if (!InBlock.isConstantRange())
+ return LVILatticeVal::getRange(TrueValues);
+
+ // Find all potential values that satisfy both the input and output
+ // conditions.
+ ConstantRange PossibleValues =
+ TrueValues.intersectWith(InBlock.getConstantRange());
+
+ return LVILatticeVal::getRange(PossibleValues);
+ }
+ }
}
}
// If the edge was formed by a switch on the value, then we may know exactly
// what it is.
if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
- // If BBTo is the default destination of the switch, we don't know anything.
- // Given a more powerful range analysis we could know stuff.
- if (SI->getCondition() == Val && SI->getDefaultDest() != BBTo) {
+ if (SI->getCondition() == Val) {
+ // We don't know anything in the default case.
+ if (SI->getDefaultDest() == BBTo) {
+ LVILatticeVal Result;
+ Result.markOverdefined();
+ return Result;
+ }
+
// We only know something if there is exactly one value that goes from
// BBFrom to BBTo.
unsigned NumEdges = 0;
@@ -474,7 +719,9 @@ LVILatticeVal LazyValueInfoCache::getValueInBlock(Value *V, BasicBlock *BB) {
DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
<< BB->getName() << "'\n");
- LVILatticeVal Result = LVIQuery(V, ValueCache[V]).getBlockValue(BB);
+ LVILatticeVal Result = LVIQuery(V, *this,
+ ValueCache[LVIValueHandle(V, this)],
+ OverDefinedCache).getBlockValue(BB);
DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
@@ -488,24 +735,80 @@ getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB) {
DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
<< FromBB->getName() << "' to '" << ToBB->getName() << "'\n");
+
LVILatticeVal Result =
- LVIQuery(V, ValueCache[V]).getEdgeValue(FromBB, ToBB);
+ LVIQuery(V, *this, ValueCache[LVIValueHandle(V, this)],
+ OverDefinedCache).getEdgeValue(FromBB, ToBB);
DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
+void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
+ BasicBlock *NewSucc) {
+ // When an edge in the graph has been threaded, values that we could not
+ // determine a value for before (i.e. were marked overdefined) may be possible
+ // to solve now. We do NOT try to proactively update these values. Instead,
+ // we clear their entries from the cache, and allow lazy updating to recompute
+ // them when needed.
+
+ // The updating process is fairly simple: we need to dropped cached info
+ // for all values that were marked overdefined in OldSucc, and for those same
+ // values in any successor of OldSucc (except NewSucc) in which they were
+ // also marked overdefined.
+ std::vector<BasicBlock*> worklist;
+ worklist.push_back(OldSucc);
+
+ DenseSet<Value*> ClearSet;
+ for (std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator
+ I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E; ++I) {
+ if (I->first == OldSucc)
+ ClearSet.insert(I->second);
+ }
+
+ // Use a worklist to perform a depth-first search of OldSucc's successors.
+ // NOTE: We do not need a visited list since any blocks we have already
+ // visited will have had their overdefined markers cleared already, and we
+ // thus won't loop to their successors.
+ while (!worklist.empty()) {
+ BasicBlock *ToUpdate = worklist.back();
+ worklist.pop_back();
+
+ // Skip blocks only accessible through NewSucc.
+ if (ToUpdate == NewSucc) continue;
+
+ bool changed = false;
+ for (DenseSet<Value*>::iterator I = ClearSet.begin(),E = ClearSet.end();
+ I != E; ++I) {
+ // If a value was marked overdefined in OldSucc, and is here too...
+ std::set<std::pair<AssertingVH<BasicBlock>, Value*> >::iterator OI =
+ OverDefinedCache.find(std::make_pair(ToUpdate, *I));
+ if (OI == OverDefinedCache.end()) continue;
+
+ // Remove it from the caches.
+ ValueCacheEntryTy &Entry = ValueCache[LVIValueHandle(*I, this)];
+ ValueCacheEntryTy::iterator CI = Entry.find(ToUpdate);
+
+ assert(CI != Entry.end() && "Couldn't find entry to update?");
+ Entry.erase(CI);
+ OverDefinedCache.erase(OI);
+
+ // If we removed anything, then we potentially need to update
+ // blocks successors too.
+ changed = true;
+ }
+
+ if (!changed) continue;
+
+ worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate));
+ }
+}
+
//===----------------------------------------------------------------------===//
// LazyValueInfo Impl
//===----------------------------------------------------------------------===//
-bool LazyValueInfo::runOnFunction(Function &F) {
- TD = getAnalysisIfAvailable<TargetData>();
- // Fully lazy.
- return false;
-}
-
/// getCache - This lazily constructs the LazyValueInfoCache.
static LazyValueInfoCache &getCache(void *&PImpl) {
if (!PImpl)
@@ -513,6 +816,15 @@ static LazyValueInfoCache &getCache(void *&PImpl) {
return *static_cast<LazyValueInfoCache*>(PImpl);
}
+bool LazyValueInfo::runOnFunction(Function &F) {
+ if (PImpl)
+ getCache(PImpl).clear();
+
+ TD = getAnalysisIfAvailable<TargetData>();
+ // Fully lazy.
+ return false;
+}
+
void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
@@ -526,6 +838,11 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB) {
if (Result.isConstant())
return Result.getConstant();
+ else if (Result.isConstantRange()) {
+ ConstantRange CR = Result.getConstantRange();
+ if (const APInt *SingleVal = CR.getSingleElement())
+ return ConstantInt::get(V->getContext(), *SingleVal);
+ }
return 0;
}
@@ -537,6 +854,11 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (Result.isConstant())
return Result.getConstant();
+ else if (Result.isConstantRange()) {
+ ConstantRange CR = Result.getConstantRange();
+ if (const APInt *SingleVal = CR.getSingleElement())
+ return ConstantInt::get(V->getContext(), *SingleVal);
+ }
return 0;
}
@@ -557,6 +879,36 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
return Unknown;
}
+ if (Result.isConstantRange()) {
+ ConstantInt *CI = dyn_cast<ConstantInt>(C);
+ if (!CI) return Unknown;
+
+ ConstantRange CR = Result.getConstantRange();
+ if (Pred == ICmpInst::ICMP_EQ) {
+ if (!CR.contains(CI->getValue()))
+ return False;
+
+ if (CR.isSingleElement() && CR.contains(CI->getValue()))
+ return True;
+ } else if (Pred == ICmpInst::ICMP_NE) {
+ if (!CR.contains(CI->getValue()))
+ return True;
+
+ if (CR.isSingleElement() && CR.contains(CI->getValue()))
+ return False;
+ }
+
+ // Handle more complex predicates.
+ ConstantRange RHS(CI->getValue(), CI->getValue()+1);
+ ConstantRange TrueValues = ConstantRange::makeICmpRegion(Pred, RHS);
+ if (CR.intersectWith(TrueValues).isEmptySet())
+ return False;
+ else if (TrueValues.contains(CR))
+ return True;
+
+ return Unknown;
+ }
+
if (Result.isNotConstant()) {
// If this is an equality comparison, we can try to fold it knowing that
// "V != C1".
@@ -579,4 +931,11 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
return Unknown;
}
+void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
+ BasicBlock* NewSucc) {
+ if (PImpl) getCache(PImpl).threadEdge(PredBB, OldSucc, NewSucc);
+}
+void LazyValueInfo::eraseBlock(BasicBlock *BB) {
+ if (PImpl) getCache(PImpl).eraseBlock(BB);
+}
diff --git a/libclamav/c++/llvm/lib/Analysis/LibCallAliasAnalysis.cpp b/libclamav/c++/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
index 7419659..7f51202 100644
--- a/libclamav/c++/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/LibCallAliasAnalysis.cpp
@@ -20,11 +20,8 @@ using namespace llvm;
// Register this pass...
char LibCallAliasAnalysis::ID = 0;
-static RegisterPass<LibCallAliasAnalysis>
-X("libcall-aa", "LibCall Alias Analysis", false, true);
-
-// Declare that we implement the AliasAnalysis interface
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(LibCallAliasAnalysis, AliasAnalysis, "libcall-aa",
+ "LibCall Alias Analysis", false, true, false);
FunctionPass *llvm::createLibCallAliasAnalysisPass(LibCallInfo *LCI) {
return new LibCallAliasAnalysis(LCI);
@@ -46,7 +43,7 @@ void LibCallAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// vs the specified pointer/size.
AliasAnalysis::ModRefResult
LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
- CallSite CS, Value *P,
+ ImmutableCallSite CS, const Value *P,
unsigned Size) {
// If we have a function, check to see what kind of mod/ref effects it
// has. Start by including any info globally known about the function.
@@ -120,13 +117,14 @@ LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
// specified memory object.
//
AliasAnalysis::ModRefResult
-LibCallAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+LibCallAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+ const Value *P, unsigned Size) {
ModRefResult MRInfo = ModRef;
// If this is a direct call to a function that LCI knows about, get the
// information about the runtime function.
if (LCI) {
- if (Function *F = CS.getCalledFunction()) {
+ if (const Function *F = CS.getCalledFunction()) {
if (const LibCallFunctionInfo *FI = LCI->getFunctionInfo(F)) {
MRInfo = ModRefResult(MRInfo & AnalyzeLibCallDetails(FI, CS, P, Size));
if (MRInfo == NoModRef) return NoModRef;
diff --git a/libclamav/c++/llvm/lib/Analysis/LibCallSemantics.cpp b/libclamav/c++/llvm/lib/Analysis/LibCallSemantics.cpp
index e0060c3..81b0f46 100644
--- a/libclamav/c++/llvm/lib/Analysis/LibCallSemantics.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/LibCallSemantics.cpp
@@ -40,7 +40,8 @@ const LibCallLocationInfo &LibCallInfo::getLocationInfo(unsigned LocID) const {
/// getFunctionInfo - Return the LibCallFunctionInfo object corresponding to
/// the specified function if we have it. If not, return null.
-const LibCallFunctionInfo *LibCallInfo::getFunctionInfo(Function *F) const {
+const LibCallFunctionInfo *
+LibCallInfo::getFunctionInfo(const Function *F) const {
StringMap<const LibCallFunctionInfo*> *Map = getMap(Impl);
/// If this is the first time we are querying for this info, lazily construct
diff --git a/libclamav/c++/llvm/lib/Analysis/Lint.cpp b/libclamav/c++/llvm/lib/Analysis/Lint.cpp
new file mode 100644
index 0000000..a9d9724
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Analysis/Lint.cpp
@@ -0,0 +1,662 @@
+//===-- Lint.cpp - Check for common errors in LLVM IR ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass statically checks for common and easily-identified constructs
+// which produce undefined or likely unintended behavior in LLVM IR.
+//
+// It is not a guarantee of correctness, in two ways. First, it isn't
+// comprehensive. There are checks which could be done statically which are
+// not yet implemented. Some of these are indicated by TODO comments, but
+// those aren't comprehensive either. Second, many conditions cannot be
+// checked statically. This pass does no dynamic instrumentation, so it
+// can't check for all possible problems.
+//
+// Another limitation is that it assumes all code will be executed. A store
+// through a null pointer in a basic block which is never reached is harmless,
+// but this pass will warn about it anyway. This is the main reason why most
+// of these checks live here instead of in the Verifier pass.
+//
+// Optimization passes may make conditions that this pass checks for more or
+// less obvious. If an optimization pass appears to be introducing a warning,
+// it may be that the optimization pass is merely exposing an existing
+// condition in the code.
+//
+// This code may be run before instcombine. In many cases, instcombine checks
+// for the same kinds of things and turns instructions with undefined behavior
+// into unreachable (or equivalent). Because of this, this pass makes some
+// effort to look through bitcasts and so on.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/Lint.h"
+#include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Pass.h"
+#include "llvm/PassManager.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Function.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/InstVisitor.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace llvm;
+
+namespace {
+ namespace MemRef {
+ static unsigned Read = 1;
+ static unsigned Write = 2;
+ static unsigned Callee = 4;
+ static unsigned Branchee = 8;
+ }
+
+ class Lint : public FunctionPass, public InstVisitor<Lint> {
+ friend class InstVisitor<Lint>;
+
+ void visitFunction(Function &F);
+
+ void visitCallSite(CallSite CS);
+ void visitMemoryReference(Instruction &I, Value *Ptr,
+ unsigned Size, unsigned Align,
+ const Type *Ty, unsigned Flags);
+
+ void visitCallInst(CallInst &I);
+ void visitInvokeInst(InvokeInst &I);
+ void visitReturnInst(ReturnInst &I);
+ void visitLoadInst(LoadInst &I);
+ void visitStoreInst(StoreInst &I);
+ void visitXor(BinaryOperator &I);
+ void visitSub(BinaryOperator &I);
+ void visitLShr(BinaryOperator &I);
+ void visitAShr(BinaryOperator &I);
+ void visitShl(BinaryOperator &I);
+ void visitSDiv(BinaryOperator &I);
+ void visitUDiv(BinaryOperator &I);
+ void visitSRem(BinaryOperator &I);
+ void visitURem(BinaryOperator &I);
+ void visitAllocaInst(AllocaInst &I);
+ void visitVAArgInst(VAArgInst &I);
+ void visitIndirectBrInst(IndirectBrInst &I);
+ void visitExtractElementInst(ExtractElementInst &I);
+ void visitInsertElementInst(InsertElementInst &I);
+ void visitUnreachableInst(UnreachableInst &I);
+
+ Value *findValue(Value *V, bool OffsetOk) const;
+ Value *findValueImpl(Value *V, bool OffsetOk,
+ SmallPtrSet<Value *, 4> &Visited) const;
+
+ public:
+ Module *Mod;
+ AliasAnalysis *AA;
+ DominatorTree *DT;
+ TargetData *TD;
+
+ std::string Messages;
+ raw_string_ostream MessagesStr;
+
+ static char ID; // Pass identification, replacement for typeid
+ Lint() : FunctionPass(ID), MessagesStr(Messages) {}
+
+ virtual bool runOnFunction(Function &F);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<AliasAnalysis>();
+ AU.addRequired<DominatorTree>();
+ }
+ virtual void print(raw_ostream &O, const Module *M) const {}
+
+ void WriteValue(const Value *V) {
+ if (!V) return;
+ if (isa<Instruction>(V)) {
+ MessagesStr << *V << '\n';
+ } else {
+ WriteAsOperand(MessagesStr, V, true, Mod);
+ MessagesStr << '\n';
+ }
+ }
+
+ void WriteType(const Type *T) {
+ if (!T) return;
+ MessagesStr << ' ';
+ WriteTypeSymbolic(MessagesStr, T, Mod);
+ }
+
+ // CheckFailed - A check failed, so print out the condition and the message
+ // that failed. This provides a nice place to put a breakpoint if you want
+ // to see why something is not correct.
+ void CheckFailed(const Twine &Message,
+ const Value *V1 = 0, const Value *V2 = 0,
+ const Value *V3 = 0, const Value *V4 = 0) {
+ MessagesStr << Message.str() << "\n";
+ WriteValue(V1);
+ WriteValue(V2);
+ WriteValue(V3);
+ WriteValue(V4);
+ }
+
+ void CheckFailed(const Twine &Message, const Value *V1,
+ const Type *T2, const Value *V3 = 0) {
+ MessagesStr << Message.str() << "\n";
+ WriteValue(V1);
+ WriteType(T2);
+ WriteValue(V3);
+ }
+
+ void CheckFailed(const Twine &Message, const Type *T1,
+ const Type *T2 = 0, const Type *T3 = 0) {
+ MessagesStr << Message.str() << "\n";
+ WriteType(T1);
+ WriteType(T2);
+ WriteType(T3);
+ }
+ };
+}
+
+char Lint::ID = 0;
+INITIALIZE_PASS(Lint, "lint", "Statically lint-checks LLVM IR", false, true);
+
+// Assert - We know that cond should be true, if not print an error message.
+#define Assert(C, M) \
+ do { if (!(C)) { CheckFailed(M); return; } } while (0)
+#define Assert1(C, M, V1) \
+ do { if (!(C)) { CheckFailed(M, V1); return; } } while (0)
+#define Assert2(C, M, V1, V2) \
+ do { if (!(C)) { CheckFailed(M, V1, V2); return; } } while (0)
+#define Assert3(C, M, V1, V2, V3) \
+ do { if (!(C)) { CheckFailed(M, V1, V2, V3); return; } } while (0)
+#define Assert4(C, M, V1, V2, V3, V4) \
+ do { if (!(C)) { CheckFailed(M, V1, V2, V3, V4); return; } } while (0)
+
+// Lint::run - This is the main Analysis entry point for a
+// function.
+//
+bool Lint::runOnFunction(Function &F) {
+ Mod = F.getParent();
+ AA = &getAnalysis<AliasAnalysis>();
+ DT = &getAnalysis<DominatorTree>();
+ TD = getAnalysisIfAvailable<TargetData>();
+ visit(F);
+ dbgs() << MessagesStr.str();
+ Messages.clear();
+ return false;
+}
+
+void Lint::visitFunction(Function &F) {
+ // This isn't undefined behavior, it's just a little unusual, and it's a
+ // fairly common mistake to neglect to name a function.
+ Assert1(F.hasName() || F.hasLocalLinkage(),
+ "Unusual: Unnamed function with non-local linkage", &F);
+
+ // TODO: Check for irreducible control flow.
+}
+
+void Lint::visitCallSite(CallSite CS) {
+ Instruction &I = *CS.getInstruction();
+ Value *Callee = CS.getCalledValue();
+
+ visitMemoryReference(I, Callee, ~0u, 0, 0, MemRef::Callee);
+
+ if (Function *F = dyn_cast<Function>(findValue(Callee, /*OffsetOk=*/false))) {
+ Assert1(CS.getCallingConv() == F->getCallingConv(),
+ "Undefined behavior: Caller and callee calling convention differ",
+ &I);
+
+ const FunctionType *FT = F->getFunctionType();
+ unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
+
+ Assert1(FT->isVarArg() ?
+ FT->getNumParams() <= NumActualArgs :
+ FT->getNumParams() == NumActualArgs,
+ "Undefined behavior: Call argument count mismatches callee "
+ "argument count", &I);
+
+ Assert1(FT->getReturnType() == I.getType(),
+ "Undefined behavior: Call return type mismatches "
+ "callee return type", &I);
+
+ // Check argument types (in case the callee was casted) and attributes.
+ // TODO: Verify that caller and callee attributes are compatible.
+ Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end();
+ CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
+ for (; AI != AE; ++AI) {
+ Value *Actual = *AI;
+ if (PI != PE) {
+ Argument *Formal = PI++;
+ Assert1(Formal->getType() == Actual->getType(),
+ "Undefined behavior: Call argument type mismatches "
+ "callee parameter type", &I);
+
+ // Check that noalias arguments don't alias other arguments. The
+ // AliasAnalysis API isn't expressive enough for what we really want
+ // to do. Known partial overlap is not distinguished from the case
+ // where nothing is known.
+ if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy())
+ for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI) {
+ Assert1(AI == BI || AA->alias(*AI, *BI) != AliasAnalysis::MustAlias,
+ "Unusual: noalias argument aliases another argument", &I);
+ }
+
+ // Check that an sret argument points to valid memory.
+ if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
+ const Type *Ty =
+ cast<PointerType>(Formal->getType())->getElementType();
+ visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
+ TD ? TD->getABITypeAlignment(Ty) : 0,
+ Ty, MemRef::Read | MemRef::Write);
+ }
+ }
+ }
+ }
+
+ if (CS.isCall() && cast<CallInst>(CS.getInstruction())->isTailCall())
+ for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
+ AI != AE; ++AI) {
+ Value *Obj = findValue(*AI, /*OffsetOk=*/true);
+ Assert1(!isa<AllocaInst>(Obj),
+ "Undefined behavior: Call with \"tail\" keyword references "
+ "alloca", &I);
+ }
+
+
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I))
+ switch (II->getIntrinsicID()) {
+ default: break;
+
+ // TODO: Check more intrinsics
+
+ case Intrinsic::memcpy: {
+ MemCpyInst *MCI = cast<MemCpyInst>(&I);
+ // TODO: If the size is known, use it.
+ visitMemoryReference(I, MCI->getDest(), ~0u, MCI->getAlignment(), 0,
+ MemRef::Write);
+ visitMemoryReference(I, MCI->getSource(), ~0u, MCI->getAlignment(), 0,
+ MemRef::Read);
+
+ // Check that the memcpy arguments don't overlap. The AliasAnalysis API
+ // isn't expressive enough for what we really want to do. Known partial
+ // overlap is not distinguished from the case where nothing is known.
+ unsigned Size = 0;
+ if (const ConstantInt *Len =
+ dyn_cast<ConstantInt>(findValue(MCI->getLength(),
+ /*OffsetOk=*/false)))
+ if (Len->getValue().isIntN(32))
+ Size = Len->getValue().getZExtValue();
+ Assert1(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
+ AliasAnalysis::MustAlias,
+ "Undefined behavior: memcpy source and destination overlap", &I);
+ break;
+ }
+ case Intrinsic::memmove: {
+ MemMoveInst *MMI = cast<MemMoveInst>(&I);
+ // TODO: If the size is known, use it.
+ visitMemoryReference(I, MMI->getDest(), ~0u, MMI->getAlignment(), 0,
+ MemRef::Write);
+ visitMemoryReference(I, MMI->getSource(), ~0u, MMI->getAlignment(), 0,
+ MemRef::Read);
+ break;
+ }
+ case Intrinsic::memset: {
+ MemSetInst *MSI = cast<MemSetInst>(&I);
+ // TODO: If the size is known, use it.
+ visitMemoryReference(I, MSI->getDest(), ~0u, MSI->getAlignment(), 0,
+ MemRef::Write);
+ break;
+ }
+
+ case Intrinsic::vastart:
+ Assert1(I.getParent()->getParent()->isVarArg(),
+ "Undefined behavior: va_start called in a non-varargs function",
+ &I);
+
+ visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
+ MemRef::Read | MemRef::Write);
+ break;
+ case Intrinsic::vacopy:
+ visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0, MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(1), ~0u, 0, 0, MemRef::Read);
+ break;
+ case Intrinsic::vaend:
+ visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
+ MemRef::Read | MemRef::Write);
+ break;
+
+ case Intrinsic::stackrestore:
+ // Stackrestore doesn't read or write memory, but it sets the
+ // stack pointer, which the compiler may read from or write to
+ // at any time, so check it for both readability and writeability.
+ visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
+ MemRef::Read | MemRef::Write);
+ break;
+ }
+}
+
+void Lint::visitCallInst(CallInst &I) {
+ return visitCallSite(&I);
+}
+
+void Lint::visitInvokeInst(InvokeInst &I) {
+ return visitCallSite(&I);
+}
+
+void Lint::visitReturnInst(ReturnInst &I) {
+ Function *F = I.getParent()->getParent();
+ Assert1(!F->doesNotReturn(),
+ "Unusual: Return statement in function with noreturn attribute",
+ &I);
+
+ if (Value *V = I.getReturnValue()) {
+ Value *Obj = findValue(V, /*OffsetOk=*/true);
+ Assert1(!isa<AllocaInst>(Obj),
+ "Unusual: Returning alloca value", &I);
+ }
+}
+
+// TODO: Check that the reference is in bounds.
+// TODO: Check readnone/readonly function attributes.
+void Lint::visitMemoryReference(Instruction &I,
+ Value *Ptr, unsigned Size, unsigned Align,
+ const Type *Ty, unsigned Flags) {
+ // If no memory is being referenced, it doesn't matter if the pointer
+ // is valid.
+ if (Size == 0)
+ return;
+
+ Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true);
+ Assert1(!isa<ConstantPointerNull>(UnderlyingObject),
+ "Undefined behavior: Null pointer dereference", &I);
+ Assert1(!isa<UndefValue>(UnderlyingObject),
+ "Undefined behavior: Undef pointer dereference", &I);
+ Assert1(!isa<ConstantInt>(UnderlyingObject) ||
+ !cast<ConstantInt>(UnderlyingObject)->isAllOnesValue(),
+ "Unusual: All-ones pointer dereference", &I);
+ Assert1(!isa<ConstantInt>(UnderlyingObject) ||
+ !cast<ConstantInt>(UnderlyingObject)->isOne(),
+ "Unusual: Address one pointer dereference", &I);
+
+ if (Flags & MemRef::Write) {
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(UnderlyingObject))
+ Assert1(!GV->isConstant(),
+ "Undefined behavior: Write to read-only memory", &I);
+ Assert1(!isa<Function>(UnderlyingObject) &&
+ !isa<BlockAddress>(UnderlyingObject),
+ "Undefined behavior: Write to text section", &I);
+ }
+ if (Flags & MemRef::Read) {
+ Assert1(!isa<Function>(UnderlyingObject),
+ "Unusual: Load from function body", &I);
+ Assert1(!isa<BlockAddress>(UnderlyingObject),
+ "Undefined behavior: Load from block address", &I);
+ }
+ if (Flags & MemRef::Callee) {
+ Assert1(!isa<BlockAddress>(UnderlyingObject),
+ "Undefined behavior: Call to block address", &I);
+ }
+ if (Flags & MemRef::Branchee) {
+ Assert1(!isa<Constant>(UnderlyingObject) ||
+ isa<BlockAddress>(UnderlyingObject),
+ "Undefined behavior: Branch to non-blockaddress", &I);
+ }
+
+ if (TD) {
+ if (Align == 0 && Ty) Align = TD->getABITypeAlignment(Ty);
+
+ if (Align != 0) {
+ unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType());
+ APInt Mask = APInt::getAllOnesValue(BitWidth),
+ KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ ComputeMaskedBits(Ptr, Mask, KnownZero, KnownOne, TD);
+ Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))),
+ "Undefined behavior: Memory reference address is misaligned", &I);
+ }
+ }
+}
+
+void Lint::visitLoadInst(LoadInst &I) {
+ visitMemoryReference(I, I.getPointerOperand(),
+ AA->getTypeStoreSize(I.getType()), I.getAlignment(),
+ I.getType(), MemRef::Read);
+}
+
+void Lint::visitStoreInst(StoreInst &I) {
+ visitMemoryReference(I, I.getPointerOperand(),
+ AA->getTypeStoreSize(I.getOperand(0)->getType()),
+ I.getAlignment(),
+ I.getOperand(0)->getType(), MemRef::Write);
+}
+
+void Lint::visitXor(BinaryOperator &I) {
+ Assert1(!isa<UndefValue>(I.getOperand(0)) ||
+ !isa<UndefValue>(I.getOperand(1)),
+ "Undefined result: xor(undef, undef)", &I);
+}
+
+void Lint::visitSub(BinaryOperator &I) {
+ Assert1(!isa<UndefValue>(I.getOperand(0)) ||
+ !isa<UndefValue>(I.getOperand(1)),
+ "Undefined result: sub(undef, undef)", &I);
+}
+
+void Lint::visitLShr(BinaryOperator &I) {
+ if (ConstantInt *CI =
+ dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
+ Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
+ "Undefined result: Shift count out of range", &I);
+}
+
+void Lint::visitAShr(BinaryOperator &I) {
+ if (ConstantInt *CI =
+ dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
+ Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
+ "Undefined result: Shift count out of range", &I);
+}
+
+void Lint::visitShl(BinaryOperator &I) {
+ if (ConstantInt *CI =
+ dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
+ Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
+ "Undefined result: Shift count out of range", &I);
+}
+
+static bool isZero(Value *V, TargetData *TD) {
+ // Assume undef could be zero.
+ if (isa<UndefValue>(V)) return true;
+
+ unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
+ APInt Mask = APInt::getAllOnesValue(BitWidth),
+ KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD);
+ return KnownZero.isAllOnesValue();
+}
+
+void Lint::visitSDiv(BinaryOperator &I) {
+ Assert1(!isZero(I.getOperand(1), TD),
+ "Undefined behavior: Division by zero", &I);
+}
+
+void Lint::visitUDiv(BinaryOperator &I) {
+ Assert1(!isZero(I.getOperand(1), TD),
+ "Undefined behavior: Division by zero", &I);
+}
+
+void Lint::visitSRem(BinaryOperator &I) {
+ Assert1(!isZero(I.getOperand(1), TD),
+ "Undefined behavior: Division by zero", &I);
+}
+
+void Lint::visitURem(BinaryOperator &I) {
+ Assert1(!isZero(I.getOperand(1), TD),
+ "Undefined behavior: Division by zero", &I);
+}
+
+void Lint::visitAllocaInst(AllocaInst &I) {
+ if (isa<ConstantInt>(I.getArraySize()))
+ // This isn't undefined behavior, it's just an obvious pessimization.
+ Assert1(&I.getParent()->getParent()->getEntryBlock() == I.getParent(),
+ "Pessimization: Static alloca outside of entry block", &I);
+
+ // TODO: Check for an unusual size (MSB set?)
+}
+
+void Lint::visitVAArgInst(VAArgInst &I) {
+ visitMemoryReference(I, I.getOperand(0), ~0u, 0, 0,
+ MemRef::Read | MemRef::Write);
+}
+
+void Lint::visitIndirectBrInst(IndirectBrInst &I) {
+ visitMemoryReference(I, I.getAddress(), ~0u, 0, 0, MemRef::Branchee);
+
+ Assert1(I.getNumDestinations() != 0,
+ "Undefined behavior: indirectbr with no destinations", &I);
+}
+
+void Lint::visitExtractElementInst(ExtractElementInst &I) {
+ if (ConstantInt *CI =
+ dyn_cast<ConstantInt>(findValue(I.getIndexOperand(),
+ /*OffsetOk=*/false)))
+ Assert1(CI->getValue().ult(I.getVectorOperandType()->getNumElements()),
+ "Undefined result: extractelement index out of range", &I);
+}
+
+void Lint::visitInsertElementInst(InsertElementInst &I) {
+ if (ConstantInt *CI =
+ dyn_cast<ConstantInt>(findValue(I.getOperand(2),
+ /*OffsetOk=*/false)))
+ Assert1(CI->getValue().ult(I.getType()->getNumElements()),
+ "Undefined result: insertelement index out of range", &I);
+}
+
+void Lint::visitUnreachableInst(UnreachableInst &I) {
+ // This isn't undefined behavior, it's merely suspicious.
+ Assert1(&I == I.getParent()->begin() ||
+ prior(BasicBlock::iterator(&I))->mayHaveSideEffects(),
+ "Unusual: unreachable immediately preceded by instruction without "
+ "side effects", &I);
+}
+
+/// findValue - Look through bitcasts and simple memory reference patterns
+/// to identify an equivalent, but more informative, value. If OffsetOk
+/// is true, look through getelementptrs with non-zero offsets too.
+///
+/// Most analysis passes don't require this logic, because instcombine
+/// will simplify most of these kinds of things away. But it's a goal of
+/// this Lint pass to be useful even on non-optimized IR.
+Value *Lint::findValue(Value *V, bool OffsetOk) const {
+ SmallPtrSet<Value *, 4> Visited;
+ return findValueImpl(V, OffsetOk, Visited);
+}
+
+/// findValueImpl - Implementation helper for findValue.
+Value *Lint::findValueImpl(Value *V, bool OffsetOk,
+ SmallPtrSet<Value *, 4> &Visited) const {
+ // Detect self-referential values.
+ if (!Visited.insert(V))
+ return UndefValue::get(V->getType());
+
+ // TODO: Look through sext or zext cast, when the result is known to
+ // be interpreted as signed or unsigned, respectively.
+ // TODO: Look through eliminable cast pairs.
+ // TODO: Look through calls with unique return values.
+ // TODO: Look through vector insert/extract/shuffle.
+ V = OffsetOk ? V->getUnderlyingObject() : V->stripPointerCasts();
+ if (LoadInst *L = dyn_cast<LoadInst>(V)) {
+ BasicBlock::iterator BBI = L;
+ BasicBlock *BB = L->getParent();
+ SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
+ for (;;) {
+ if (!VisitedBlocks.insert(BB)) break;
+ if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(),
+ BB, BBI, 6, AA))
+ return findValueImpl(U, OffsetOk, Visited);
+ if (BBI != BB->begin()) break;
+ BB = BB->getUniquePredecessor();
+ if (!BB) break;
+ BBI = BB->end();
+ }
+ } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
+ if (Value *W = PN->hasConstantValue(DT))
+ return findValueImpl(W, OffsetOk, Visited);
+ } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
+ if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
+ Type::getInt64Ty(V->getContext())))
+ return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
+ } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
+ if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
+ Ex->idx_begin(),
+ Ex->idx_end()))
+ if (W != V)
+ return findValueImpl(W, OffsetOk, Visited);
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ // Same as above, but for ConstantExpr instead of Instruction.
+ if (Instruction::isCast(CE->getOpcode())) {
+ if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
+ CE->getOperand(0)->getType(),
+ CE->getType(),
+ TD ? TD->getIntPtrType(V->getContext()) :
+ Type::getInt64Ty(V->getContext())))
+ return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
+ } else if (CE->getOpcode() == Instruction::ExtractValue) {
+ const SmallVector<unsigned, 4> &Indices = CE->getIndices();
+ if (Value *W = FindInsertedValue(CE->getOperand(0),
+ Indices.begin(),
+ Indices.end()))
+ if (W != V)
+ return findValueImpl(W, OffsetOk, Visited);
+ }
+ }
+
+ // As a last resort, try SimplifyInstruction or constant folding.
+ if (Instruction *Inst = dyn_cast<Instruction>(V)) {
+ if (Value *W = SimplifyInstruction(Inst, TD))
+ if (W != Inst)
+ return findValueImpl(W, OffsetOk, Visited);
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ if (Value *W = ConstantFoldConstantExpression(CE, TD))
+ if (W != V)
+ return findValueImpl(W, OffsetOk, Visited);
+ }
+
+ return V;
+}
+
+//===----------------------------------------------------------------------===//
+// Implement the public interfaces to this file...
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createLintPass() {
+ return new Lint();
+}
+
+/// lintFunction - Check a function for errors, printing messages on stderr.
+///
+void llvm::lintFunction(const Function &f) {
+ Function &F = const_cast<Function&>(f);
+ assert(!F.isDeclaration() && "Cannot lint external functions");
+
+ FunctionPassManager FPM(F.getParent());
+ Lint *V = new Lint();
+ FPM.add(V);
+ FPM.run(F);
+}
+
+/// lintModule - Check a module for errors, printing messages on stderr.
+///
+void llvm::lintModule(const Module &M) {
+ PassManager PM;
+ Lint *V = new Lint();
+ PM.add(V);
+ PM.run(const_cast<Module&>(M));
+}
diff --git a/libclamav/c++/llvm/lib/Analysis/LiveValues.cpp b/libclamav/c++/llvm/lib/Analysis/LiveValues.cpp
index 1b91d93..0225f4f 100644
--- a/libclamav/c++/llvm/lib/Analysis/LiveValues.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/LiveValues.cpp
@@ -22,10 +22,10 @@ namespace llvm {
}
char LiveValues::ID = 0;
-static RegisterPass<LiveValues>
-X("live-values", "Value Liveness Analysis", false, true);
+INITIALIZE_PASS(LiveValues, "live-values",
+ "Value Liveness Analysis", false, true);
-LiveValues::LiveValues() : FunctionPass(&ID) {}
+LiveValues::LiveValues() : FunctionPass(ID) {}
void LiveValues::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTree>();
@@ -125,7 +125,7 @@ LiveValues::Memo &LiveValues::compute(const Value *V) {
bool LiveOutOfDefBB = false;
// Examine each use of the value.
- for (Value::use_const_iterator I = V->use_begin(), E = V->use_end();
+ for (Value::const_use_iterator I = V->use_begin(), E = V->use_end();
I != E; ++I) {
const User *U = *I;
const BasicBlock *UseBB = cast<Instruction>(U)->getParent();
diff --git a/libclamav/c++/llvm/lib/Analysis/Loads.cpp b/libclamav/c++/llvm/lib/Analysis/Loads.cpp
new file mode 100644
index 0000000..2ba1d86
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Analysis/Loads.cpp
@@ -0,0 +1,235 @@
+//===- Loads.cpp - Local load analysis ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines simple local analyses for load instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/GlobalAlias.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/IntrinsicInst.h"
+using namespace llvm;
+
+/// AreEquivalentAddressValues - Test if A and B will obviously have the same
+/// value. This includes recognizing that %t0 and %t1 will have the same
+/// value in code like this:
+/// %t0 = getelementptr \@a, 0, 3
+/// store i32 0, i32* %t0
+/// %t1 = getelementptr \@a, 0, 3
+/// %t2 = load i32* %t1
+///
+static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
+ // Test if the values are trivially equivalent.
+ if (A == B) return true;
+
+ // Test if the values come from identical arithmetic instructions.
+ // Use isIdenticalToWhenDefined instead of isIdenticalTo because
+ // this function is only used when one address use dominates the
+ // other, which means that they'll always either have the same
+ // value or one of them will have an undefined value.
+ if (isa<BinaryOperator>(A) || isa<CastInst>(A) ||
+ isa<PHINode>(A) || isa<GetElementPtrInst>(A))
+ if (const Instruction *BI = dyn_cast<Instruction>(B))
+ if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
+ return true;
+
+ // Otherwise they may not be equivalent.
+ return false;
+}
+
+/// getUnderlyingObjectWithOffset - Strip off up to MaxLookup GEPs and
+/// bitcasts to get back to the underlying object being addressed, keeping
+/// track of the offset in bytes from the GEPs relative to the result.
+/// This is closely related to Value::getUnderlyingObject but is located
+/// here to avoid making VMCore depend on TargetData.
+static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
+ uint64_t &ByteOffset,
+ unsigned MaxLookup = 6) {
+ if (!V->getType()->isPointerTy())
+ return V;
+ for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ if (!GEP->hasAllConstantIndices())
+ return V;
+ SmallVector<Value*, 8> Indices(GEP->op_begin() + 1, GEP->op_end());
+ ByteOffset += TD->getIndexedOffset(GEP->getPointerOperandType(),
+ &Indices[0], Indices.size());
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (GA->mayBeOverridden())
+ return V;
+ V = GA->getAliasee();
+ } else {
+ return V;
+ }
+ assert(V->getType()->isPointerTy() && "Unexpected operand type!");
+ }
+ return V;
+}
+
+/// isSafeToLoadUnconditionally - Return true if we know that executing a load
+/// from this value cannot trap. If it is not obviously safe to load from the
+/// specified pointer, we do a quick local scan of the basic block containing
+/// ScanFrom, to determine if the address is already accessed.
+bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
+ unsigned Align, const TargetData *TD) {
+ uint64_t ByteOffset = 0;
+ Value *Base = V;
+ if (TD)
+ Base = getUnderlyingObjectWithOffset(V, TD, ByteOffset);
+
+ const Type *BaseType = 0;
+ unsigned BaseAlign = 0;
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
+ // An alloca is safe to load from as load as it is suitably aligned.
+ BaseType = AI->getAllocatedType();
+ BaseAlign = AI->getAlignment();
+ } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(Base)) {
+ // Global variables are safe to load from but their size cannot be
+ // guaranteed if they are overridden.
+ if (!isa<GlobalAlias>(GV) && !GV->mayBeOverridden()) {
+ BaseType = GV->getType()->getElementType();
+ BaseAlign = GV->getAlignment();
+ }
+ }
+
+ if (BaseType && BaseType->isSized()) {
+ if (TD && BaseAlign == 0)
+ BaseAlign = TD->getPrefTypeAlignment(BaseType);
+
+ if (Align <= BaseAlign) {
+ if (!TD)
+ return true; // Loading directly from an alloca or global is OK.
+
+ // Check if the load is within the bounds of the underlying object.
+ const PointerType *AddrTy = cast<PointerType>(V->getType());
+ uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
+ if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
+ (Align == 0 || (ByteOffset % Align) == 0))
+ return true;
+ }
+ }
+
+ // Otherwise, be a little bit aggressive by scanning the local block where we
+ // want to check to see if the pointer is already being loaded or stored
+ // from/to. If so, the previous load or store would have already trapped,
+ // so there is no harm doing an extra load (also, CSE will later eliminate
+ // the load entirely).
+ BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
+
+ while (BBI != E) {
+ --BBI;
+
+ // If we see a free or a call which may write to memory (i.e. which might do
+ // a free) the pointer could be marked invalid.
+ if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
+ !isa<DbgInfoIntrinsic>(BBI))
+ return false;
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
+ if (AreEquivalentAddressValues(LI->getOperand(0), V)) return true;
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
+ if (AreEquivalentAddressValues(SI->getOperand(1), V)) return true;
+ }
+ }
+ return false;
+}
+
+/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at the
+/// instruction before ScanFrom) checking to see if we have the value at the
+/// memory address *Ptr locally available within a small number of instructions.
+/// If the value is available, return it.
+///
+/// If not, return the iterator for the last validated instruction that the
+/// value would be live through. If we scanned the entire block and didn't find
+/// something that invalidates *Ptr or provides it, ScanFrom would be left at
+/// begin() and this returns null. ScanFrom could also be left
+///
+/// MaxInstsToScan specifies the maximum instructions to scan in the block. If
+/// it is set to 0, it will scan the whole block. You can also optionally
+/// specify an alias analysis implementation, which makes this more precise.
+Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
+ BasicBlock::iterator &ScanFrom,
+ unsigned MaxInstsToScan,
+ AliasAnalysis *AA) {
+ if (MaxInstsToScan == 0) MaxInstsToScan = ~0U;
+
+ // If we're using alias analysis to disambiguate get the size of *Ptr.
+ unsigned AccessSize = 0;
+ if (AA) {
+ const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
+ AccessSize = AA->getTypeStoreSize(AccessTy);
+ }
+
+ while (ScanFrom != ScanBB->begin()) {
+ // We must ignore debug info directives when counting (otherwise they
+ // would affect codegen).
+ Instruction *Inst = --ScanFrom;
+ if (isa<DbgInfoIntrinsic>(Inst))
+ continue;
+
+ // Restore ScanFrom to expected value in case next test succeeds
+ ScanFrom++;
+
+ // Don't scan huge blocks.
+ if (MaxInstsToScan-- == 0) return 0;
+
+ --ScanFrom;
+ // If this is a load of Ptr, the loaded value is available.
+ if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ if (AreEquivalentAddressValues(LI->getOperand(0), Ptr))
+ return LI;
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ // If this is a store through Ptr, the value is available!
+ if (AreEquivalentAddressValues(SI->getOperand(1), Ptr))
+ return SI->getOperand(0);
+
+ // If Ptr is an alloca and this is a store to a different alloca, ignore
+ // the store. This is a trivial form of alias analysis that is important
+ // for reg2mem'd code.
+ if ((isa<AllocaInst>(Ptr) || isa<GlobalVariable>(Ptr)) &&
+ (isa<AllocaInst>(SI->getOperand(1)) ||
+ isa<GlobalVariable>(SI->getOperand(1))))
+ continue;
+
+ // If we have alias analysis and it says the store won't modify the loaded
+ // value, ignore the store.
+ if (AA &&
+ (AA->getModRefInfo(SI, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
+ continue;
+
+ // Otherwise the store that may or may not alias the pointer, bail out.
+ ++ScanFrom;
+ return 0;
+ }
+
+ // If this is some other instruction that may clobber Ptr, bail out.
+ if (Inst->mayWriteToMemory()) {
+ // If alias analysis claims that it really won't modify the load,
+ // ignore it.
+ if (AA &&
+ (AA->getModRefInfo(Inst, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
+ continue;
+
+ // May modify the pointer, bail out.
+ ++ScanFrom;
+ return 0;
+ }
+ }
+
+ // Got to the start of the block, we didn't find it, but are done for this
+ // block.
+ return 0;
+}
diff --git a/libclamav/c++/llvm/lib/Analysis/LoopDependenceAnalysis.cpp b/libclamav/c++/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
index bb4f46d..82c02dc 100644
--- a/libclamav/c++/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/LoopDependenceAnalysis.cpp
@@ -46,8 +46,8 @@ LoopPass *llvm::createLoopDependenceAnalysisPass() {
return new LoopDependenceAnalysis();
}
-static RegisterPass<LoopDependenceAnalysis>
-R("lda", "Loop Dependence Analysis", false, true);
+INITIALIZE_PASS(LoopDependenceAnalysis, "lda",
+ "Loop Dependence Analysis", false, true);
char LoopDependenceAnalysis::ID = 0;
//===----------------------------------------------------------------------===//
@@ -119,8 +119,7 @@ bool LoopDependenceAnalysis::findOrInsertDependencePair(Value *A,
P = Pairs.FindNodeOrInsertPos(id, insertPos);
if (P) return true;
- P = PairAllocator.Allocate<DependencePair>();
- new (P) DependencePair(id, A, B);
+ P = new (PairAllocator) DependencePair(id, A, B);
Pairs.InsertNode(P, insertPos);
return false;
}
diff --git a/libclamav/c++/llvm/lib/Analysis/LoopInfo.cpp b/libclamav/c++/llvm/lib/Analysis/LoopInfo.cpp
index 453af5a..46219d1 100644
--- a/libclamav/c++/llvm/lib/Analysis/LoopInfo.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/LoopInfo.cpp
@@ -29,17 +29,16 @@ using namespace llvm;
// Always verify loopinfo if expensive checking is enabled.
#ifdef XDEBUG
-bool VerifyLoopInfo = true;
+static bool VerifyLoopInfo = true;
#else
-bool VerifyLoopInfo = false;
+static bool VerifyLoopInfo = false;
#endif
static cl::opt<bool,true>
VerifyLoopInfoX("verify-loop-info", cl::location(VerifyLoopInfo),
cl::desc("Verify loop info (time consuming)"));
char LoopInfo::ID = 0;
-static RegisterPass<LoopInfo>
-X("loops", "Natural Loop Information", true, true);
+INITIALIZE_PASS(LoopInfo, "loops", "Natural Loop Information", true, true);
//===----------------------------------------------------------------------===//
// Loop implementation
@@ -124,14 +123,13 @@ PHINode *Loop::getCanonicalInductionVariable() const {
BasicBlock *H = getHeader();
BasicBlock *Incoming = 0, *Backedge = 0;
- typedef GraphTraits<Inverse<BasicBlock*> > InvBlockTraits;
- InvBlockTraits::ChildIteratorType PI = InvBlockTraits::child_begin(H);
- assert(PI != InvBlockTraits::child_end(H) &&
+ pred_iterator PI = pred_begin(H);
+ assert(PI != pred_end(H) &&
"Loop must have at least one backedge!");
Backedge = *PI++;
- if (PI == InvBlockTraits::child_end(H)) return 0; // dead loop
+ if (PI == pred_end(H)) return 0; // dead loop
Incoming = *PI++;
- if (PI != InvBlockTraits::child_end(H)) return 0; // multiple backedges?
+ if (PI != pred_end(H)) return 0; // multiple backedges?
if (contains(Incoming)) {
if (contains(Backedge))
@@ -157,18 +155,6 @@ PHINode *Loop::getCanonicalInductionVariable() const {
return 0;
}
-/// getCanonicalInductionVariableIncrement - Return the LLVM value that holds
-/// the canonical induction variable value for the "next" iteration of the
-/// loop. This always succeeds if getCanonicalInductionVariable succeeds.
-///
-Instruction *Loop::getCanonicalInductionVariableIncrement() const {
- if (PHINode *PN = getCanonicalInductionVariable()) {
- bool P1InLoop = contains(PN->getIncomingBlock(1));
- return cast<Instruction>(PN->getIncomingValue(P1InLoop));
- }
- return 0;
-}
-
/// getTripCount - Return a loop-invariant LLVM value indicating the number of
/// times the loop will be executed. Note that this means that the backedge
/// of the loop executes N-1 times. If the trip-count cannot be determined,
@@ -180,12 +166,12 @@ Instruction *Loop::getCanonicalInductionVariableIncrement() const {
Value *Loop::getTripCount() const {
// Canonical loops will end with a 'cmp ne I, V', where I is the incremented
// canonical induction variable and V is the trip count of the loop.
- Instruction *Inc = getCanonicalInductionVariableIncrement();
- if (Inc == 0) return 0;
- PHINode *IV = cast<PHINode>(Inc->getOperand(0));
+ PHINode *IV = getCanonicalInductionVariable();
+ if (IV == 0 || IV->getNumIncomingValues() != 2) return 0;
- BasicBlock *BackedgeBlock =
- IV->getIncomingBlock(contains(IV->getIncomingBlock(1)));
+ bool P0InLoop = contains(IV->getIncomingBlock(0));
+ Value *Inc = IV->getIncomingValue(!P0InLoop);
+ BasicBlock *BackedgeBlock = IV->getIncomingBlock(!P0InLoop);
if (BranchInst *BI = dyn_cast<BranchInst>(BackedgeBlock->getTerminator()))
if (BI->isConditional()) {
@@ -263,23 +249,28 @@ unsigned Loop::getSmallConstantTripMultiple() const {
}
/// isLCSSAForm - Return true if the Loop is in LCSSA form
-bool Loop::isLCSSAForm() const {
+bool Loop::isLCSSAForm(DominatorTree &DT) const {
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
- SmallPtrSet<BasicBlock *, 16> LoopBBs(block_begin(), block_end());
+ SmallPtrSet<BasicBlock*, 16> LoopBBs(block_begin(), block_end());
for (block_iterator BI = block_begin(), E = block_end(); BI != E; ++BI) {
BasicBlock *BB = *BI;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;++I)
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
++UI) {
- BasicBlock *UserBB = cast<Instruction>(*UI)->getParent();
- if (PHINode *P = dyn_cast<PHINode>(*UI))
+ User *U = *UI;
+ BasicBlock *UserBB = cast<Instruction>(U)->getParent();
+ if (PHINode *P = dyn_cast<PHINode>(U))
UserBB = P->getIncomingBlock(UI);
- // Check the current block, as a fast-path. Most values are used in
- // the same block they are defined in.
- if (UserBB != BB && !LoopBBs.count(UserBB))
+ // Check the current block, as a fast-path, before checking whether
+ // the use is anywhere in the loop. Most values are used in the same
+ // block they are defined in. Also, blocks not reachable from the
+ // entry are special; uses in them don't need to go through PHIs.
+ if (UserBB != BB &&
+ !LoopBBs.count(UserBB) &&
+ DT.isReachableFromEntry(UserBB))
return false;
}
}
@@ -336,16 +327,12 @@ Loop::getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const {
BasicBlock *current = *BI;
switchExitBlocks.clear();
- typedef GraphTraits<BasicBlock *> BlockTraits;
- typedef GraphTraits<Inverse<BasicBlock *> > InvBlockTraits;
- for (BlockTraits::ChildIteratorType I =
- BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
- I != E; ++I) {
+ for (succ_iterator I = succ_begin(*BI), E = succ_end(*BI); I != E; ++I) {
// If block is inside the loop then it is not a exit block.
if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
continue;
- InvBlockTraits::ChildIteratorType PI = InvBlockTraits::child_begin(*I);
+ pred_iterator PI = pred_begin(*I);
BasicBlock *firstPred = *PI;
// If current basic block is this exit block's first predecessor
@@ -358,8 +345,7 @@ Loop::getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const {
// If a terminator has more then two successors, for example SwitchInst,
// then it is possible that there are multiple edges from current block
// to one exit block.
- if (std::distance(BlockTraits::child_begin(current),
- BlockTraits::child_end(current)) <= 2) {
+ if (std::distance(succ_begin(current), succ_end(current)) <= 2) {
ExitBlocks.push_back(*I);
continue;
}
diff --git a/libclamav/c++/llvm/lib/Analysis/LoopPass.cpp b/libclamav/c++/llvm/lib/Analysis/LoopPass.cpp
index 2d613f6..15d4db8 100644
--- a/libclamav/c++/llvm/lib/Analysis/LoopPass.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/LoopPass.cpp
@@ -14,8 +14,44 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Timer.h"
using namespace llvm;
+namespace {
+
+/// PrintLoopPass - Print a Function corresponding to a Loop.
+///
+class PrintLoopPass : public LoopPass {
+private:
+ std::string Banner;
+ raw_ostream &Out; // raw_ostream to print on.
+
+public:
+ static char ID;
+ PrintLoopPass() : LoopPass(ID), Out(dbgs()) {}
+ PrintLoopPass(const std::string &B, raw_ostream &o)
+ : LoopPass(ID), Banner(B), Out(o) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+
+ bool runOnLoop(Loop *L, LPPassManager &) {
+ Out << Banner;
+ for (Loop::block_iterator b = L->block_begin(), be = L->block_end();
+ b != be;
+ ++b) {
+ (*b)->print(Out);
+ }
+ return false;
+ }
+};
+
+char PrintLoopPass::ID = 0;
+}
+
//===----------------------------------------------------------------------===//
// LPPassManager
//
@@ -23,7 +59,7 @@ using namespace llvm;
char LPPassManager::ID = 0;
LPPassManager::LPPassManager(int Depth)
- : FunctionPass(&ID), PMDataManager(Depth) {
+ : FunctionPass(ID), PMDataManager(Depth) {
skipThisLoop = false;
redoThisLoop = false;
LI = NULL;
@@ -147,7 +183,7 @@ void LPPassManager::redoLoop(Loop *L) {
void LPPassManager::cloneBasicBlockSimpleAnalysis(BasicBlock *From,
BasicBlock *To, Loop *L) {
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *LP = (LoopPass *)getContainedPass(Index);
+ LoopPass *LP = getContainedPass(Index);
LP->cloneBasicBlockAnalysis(From, To, L);
}
}
@@ -162,7 +198,7 @@ void LPPassManager::deleteSimpleAnalysisValue(Value *V, Loop *L) {
}
}
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *LP = (LoopPass *)getContainedPass(Index);
+ LoopPass *LP = getContainedPass(Index);
LP->deleteAnalysisValue(V, L);
}
}
@@ -204,7 +240,7 @@ bool LPPassManager::runOnFunction(Function &F) {
I != E; ++I) {
Loop *L = *I;
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *P = (LoopPass*)getContainedPass(Index);
+ LoopPass *P = getContainedPass(Index);
Changed |= P->doInitialization(L, *this);
}
}
@@ -218,25 +254,25 @@ bool LPPassManager::runOnFunction(Function &F) {
// Run all passes on the current Loop.
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *P = (LoopPass*)getContainedPass(Index);
+ LoopPass *P = getContainedPass(Index);
dumpPassInfo(P, EXECUTION_MSG, ON_LOOP_MSG,
- CurrentLoop->getHeader()->getNameStr());
+ CurrentLoop->getHeader()->getName());
dumpRequiredSet(P);
initializeAnalysisImpl(P);
{
PassManagerPrettyStackEntry X(P, *CurrentLoop->getHeader());
- Timer *T = StartPassTimer(P);
+ TimeRegion PassTimer(getPassTimer(P));
+
Changed |= P->runOnLoop(CurrentLoop, *this);
- StopPassTimer(P, T);
}
if (Changed)
dumpPassInfo(P, MODIFICATION_MSG, ON_LOOP_MSG,
skipThisLoop ? "<deleted>" :
- CurrentLoop->getHeader()->getNameStr());
+ CurrentLoop->getHeader()->getName());
dumpPreservedSet(P);
if (!skipThisLoop) {
@@ -245,9 +281,10 @@ bool LPPassManager::runOnFunction(Function &F) {
// is a function pass and it's really expensive to verify every
// loop in the function every time. That level of checking can be
// enabled with the -verify-loop-info option.
- Timer *T = StartPassTimer(LI);
- CurrentLoop->verifyLoop();
- StopPassTimer(LI, T);
+ {
+ TimeRegion PassTimer(getPassTimer(LI));
+ CurrentLoop->verifyLoop();
+ }
// Then call the regular verifyAnalysis functions.
verifyPreservedAnalysis(P);
@@ -257,7 +294,7 @@ bool LPPassManager::runOnFunction(Function &F) {
recordAvailableAnalysis(P);
removeDeadPasses(P,
skipThisLoop ? "<deleted>" :
- CurrentLoop->getHeader()->getNameStr(),
+ CurrentLoop->getHeader()->getName(),
ON_LOOP_MSG);
if (skipThisLoop)
@@ -283,7 +320,7 @@ bool LPPassManager::runOnFunction(Function &F) {
// Finalization
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
- LoopPass *P = (LoopPass *)getContainedPass(Index);
+ LoopPass *P = getContainedPass(Index);
Changed |= P->doFinalization();
}
@@ -304,6 +341,11 @@ void LPPassManager::dumpPassStructure(unsigned Offset) {
//===----------------------------------------------------------------------===//
// LoopPass
+Pass *LoopPass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+ return new PrintLoopPass(Banner, O);
+}
+
// Check if this pass is suitable for the current LPPassManager, if
// available. This pass P is not suitable for a LPPassManager if P
// is not preserving higher level analysis info used by other
diff --git a/libclamav/c++/llvm/lib/Analysis/MemoryBuiltins.cpp b/libclamav/c++/llvm/lib/Analysis/MemoryBuiltins.cpp
index 297b588..1ab18ca 100644
--- a/libclamav/c++/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -101,9 +101,9 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
if (const StructType *ST = dyn_cast<StructType>(T))
ElementSize = TD->getStructLayout(ST)->getSizeInBytes();
- // If malloc calls' arg can be determined to be a multiple of ElementSize,
+ // If malloc call's arg can be determined to be a multiple of ElementSize,
// return the multiple. Otherwise, return NULL.
- Value *MallocArg = CI->getOperand(1);
+ Value *MallocArg = CI->getArgOperand(0);
Value *Multiple = NULL;
if (ComputeMultiple(MallocArg, ElementSize, Multiple,
LookThroughSExt))
@@ -120,7 +120,7 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
Value *ArraySize = computeArraySize(CI, TD);
if (ArraySize &&
- ArraySize != ConstantInt::get(CI->getOperand(1)->getType(), 1))
+ ArraySize != ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
return CI;
// CI is a non-array malloc or we can't figure out that it is an array malloc.
@@ -139,7 +139,7 @@ const PointerType *llvm::getMallocType(const CallInst *CI) {
unsigned NumOfBitCastUses = 0;
// Determine if CallInst has a bitcast use.
- for (Value::use_const_iterator UI = CI->use_begin(), E = CI->use_end();
+ for (Value::const_use_iterator UI = CI->use_begin(), E = CI->use_end();
UI != E; )
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(*UI++)) {
MallocType = cast<PointerType>(BCI->getDestTy());
@@ -183,25 +183,25 @@ Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
// free Call Utility Functions.
//
-/// isFreeCall - Returns true if the value is a call to the builtin free()
-bool llvm::isFreeCall(const Value *I) {
+/// isFreeCall - Returns non-null if the value is a call to the builtin free()
+const CallInst *llvm::isFreeCall(const Value *I) {
const CallInst *CI = dyn_cast<CallInst>(I);
if (!CI)
- return false;
+ return 0;
Function *Callee = CI->getCalledFunction();
if (Callee == 0 || !Callee->isDeclaration() || Callee->getName() != "free")
- return false;
+ return 0;
// Check free prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
const FunctionType *FTy = Callee->getFunctionType();
if (!FTy->getReturnType()->isVoidTy())
- return false;
+ return 0;
if (FTy->getNumParams() != 1)
- return false;
+ return 0;
if (FTy->param_begin()->get() != Type::getInt8PtrTy(Callee->getContext()))
- return false;
+ return 0;
- return true;
+ return CI;
}
diff --git a/libclamav/c++/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/libclamav/c++/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 2aa2f17..d18d5ce 100644
--- a/libclamav/c++/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -46,11 +46,11 @@ STATISTIC(NumCacheCompleteNonLocalPtr,
char MemoryDependenceAnalysis::ID = 0;
// Register this pass...
-static RegisterPass<MemoryDependenceAnalysis> X("memdep",
- "Memory Dependence Analysis", false, true);
+INITIALIZE_PASS(MemoryDependenceAnalysis, "memdep",
+ "Memory Dependence Analysis", false, true);
MemoryDependenceAnalysis::MemoryDependenceAnalysis()
-: FunctionPass(&ID), PredCache(0) {
+: FunctionPass(ID), PredCache(0) {
}
MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
}
@@ -116,37 +116,25 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
} else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Pointer = V->getOperand(0);
PointerSize = AA->getTypeStoreSize(V->getType());
- } else if (isFreeCall(Inst)) {
- Pointer = Inst->getOperand(1);
+ } else if (const CallInst *CI = isFreeCall(Inst)) {
+ Pointer = CI->getArgOperand(0);
// calls to free() erase the entire structure
PointerSize = ~0ULL;
- } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
+ } else if (CallSite InstCS = cast<Value>(Inst)) {
// Debug intrinsics don't cause dependences.
if (isa<DbgInfoIntrinsic>(Inst)) continue;
- CallSite InstCS = CallSite::get(Inst);
// If these two calls do not interfere, look past it.
switch (AA->getModRefInfo(CS, InstCS)) {
case AliasAnalysis::NoModRef:
- // If the two calls don't interact (e.g. InstCS is readnone) keep
- // scanning.
+ // If the two calls are the same, return InstCS as a Def, so that
+ // CS can be found redundant and eliminated.
+ if (isReadOnlyCall && InstCS.onlyReadsMemory() &&
+ CS.getInstruction()->isIdenticalToWhenDefined(Inst))
+ return MemDepResult::getDef(Inst);
+
+ // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
+ // keep scanning.
continue;
- case AliasAnalysis::Ref:
- // If the two calls read the same memory locations and CS is a readonly
- // function, then we have two cases: 1) the calls may not interfere with
- // each other at all. 2) the calls may produce the same value. In case
- // #1 we want to ignore the values, in case #2, we want to return Inst
- // as a Def dependence. This allows us to CSE in cases like:
- // X = strlen(P);
- // memchr(...);
- // Y = strlen(P); // Y = X
- if (isReadOnlyCall) {
- if (CS.getCalledFunction() != 0 &&
- CS.getCalledFunction() == InstCS.getCalledFunction())
- return MemDepResult::getDef(Inst);
- // Ignore unrelated read/read call dependences.
- continue;
- }
- // FALL THROUGH
default:
return MemDepResult::getClobber(Inst);
}
@@ -196,10 +184,9 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// FIXME: This only considers queries directly on the invariant-tagged
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point.
- AliasAnalysis::AliasResult R =
- AA->alias(II->getOperand(3), ~0U, MemPtr, ~0U);
+ AliasAnalysis::AliasResult R = AA->alias(II->getArgOperand(2), MemPtr);
if (R == AliasAnalysis::MustAlias) {
- InvariantTag = II->getOperand(1);
+ InvariantTag = II->getArgOperand(0);
continue;
}
@@ -209,8 +196,7 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// FIXME: This only considers queries directly on the invariant-tagged
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point.
- AliasAnalysis::AliasResult R =
- AA->alias(II->getOperand(2), ~0U, MemPtr, ~0U);
+ AliasAnalysis::AliasResult R = AA->alias(II->getArgOperand(1), MemPtr);
if (R == AliasAnalysis::MustAlias)
return MemDepResult::getDef(II);
}
@@ -365,28 +351,29 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
MemPtr = LI->getPointerOperand();
MemSize = AA->getTypeStoreSize(LI->getType());
}
- } else if (isFreeCall(QueryInst)) {
- MemPtr = QueryInst->getOperand(1);
+ } else if (const CallInst *CI = isFreeCall(QueryInst)) {
+ MemPtr = CI->getArgOperand(0);
// calls to free() erase the entire structure, not just a field.
MemSize = ~0UL;
} else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
int IntrinsicID = 0; // Intrinsic IDs start at 1.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst);
+ if (II)
IntrinsicID = II->getIntrinsicID();
switch (IntrinsicID) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
- MemPtr = QueryInst->getOperand(2);
- MemSize = cast<ConstantInt>(QueryInst->getOperand(1))->getZExtValue();
+ MemPtr = II->getArgOperand(1);
+ MemSize = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
break;
case Intrinsic::invariant_end:
- MemPtr = QueryInst->getOperand(3);
- MemSize = cast<ConstantInt>(QueryInst->getOperand(2))->getZExtValue();
+ MemPtr = II->getArgOperand(2);
+ MemSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
break;
default:
- CallSite QueryCS = CallSite::get(QueryInst);
+ CallSite QueryCS(QueryInst);
bool isReadOnly = AA->onlyReadsMemory(QueryCS);
LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
QueryParent);
@@ -456,7 +443,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
// Okay, we have a cache entry. If we know it is not dirty, just return it
// with no computation.
if (!CacheP.second) {
- NumCacheNonLocal++;
+ ++NumCacheNonLocal;
return Cache;
}
@@ -478,7 +465,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
DirtyBlocks.push_back(*PI);
- NumUncacheNonLocal++;
+ ++NumUncacheNonLocal;
}
// isReadonlyCall - If this is a read-only call, we can be more aggressive.
diff --git a/libclamav/c++/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp b/libclamav/c++/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
new file mode 100644
index 0000000..2cc1c2a
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Analysis/ModuleDebugInfoPrinter.cpp
@@ -0,0 +1,85 @@
+//===-- ModuleDebugInfoPrinter.cpp - Prints module debug info metadata ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass decodes the debug info metadata in a module and prints in a
+// (sufficiently-prepared-) human-readable form.
+//
+// For example, run this pass from opt along with the -analyze option, and
+// it'll print to standard output.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/Pass.h"
+#include "llvm/Function.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+namespace {
+ class ModuleDebugInfoPrinter : public ModulePass {
+ DebugInfoFinder Finder;
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ ModuleDebugInfoPrinter() : ModulePass(ID) {}
+
+ virtual bool runOnModule(Module &M);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+ virtual void print(raw_ostream &O, const Module *M) const;
+ };
+}
+
+char ModuleDebugInfoPrinter::ID = 0;
+INITIALIZE_PASS(ModuleDebugInfoPrinter, "module-debuginfo",
+ "Decodes module-level debug info", false, true);
+
+ModulePass *llvm::createModuleDebugInfoPrinterPass() {
+ return new ModuleDebugInfoPrinter();
+}
+
+bool ModuleDebugInfoPrinter::runOnModule(Module &M) {
+ Finder.processModule(M);
+ return false;
+}
+
+void ModuleDebugInfoPrinter::print(raw_ostream &O, const Module *M) const {
+ for (DebugInfoFinder::iterator I = Finder.compile_unit_begin(),
+ E = Finder.compile_unit_end(); I != E; ++I) {
+ O << "Compile Unit: ";
+ DICompileUnit(*I).print(O);
+ O << '\n';
+ }
+
+ for (DebugInfoFinder::iterator I = Finder.subprogram_begin(),
+ E = Finder.subprogram_end(); I != E; ++I) {
+ O << "Subprogram: ";
+ DISubprogram(*I).print(O);
+ O << '\n';
+ }
+
+ for (DebugInfoFinder::iterator I = Finder.global_variable_begin(),
+ E = Finder.global_variable_end(); I != E; ++I) {
+ O << "GlobalVariable: ";
+ DIGlobalVariable(*I).print(O);
+ O << '\n';
+ }
+
+ for (DebugInfoFinder::iterator I = Finder.type_begin(),
+ E = Finder.type_end(); I != E; ++I) {
+ O << "Type: ";
+ DIType(*I).print(O);
+ O << '\n';
+ }
+}
diff --git a/libclamav/c++/llvm/lib/Analysis/PointerTracking.cpp b/libclamav/c++/llvm/lib/Analysis/PointerTracking.cpp
index b692246..07f4682 100644
--- a/libclamav/c++/llvm/lib/Analysis/PointerTracking.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/PointerTracking.cpp
@@ -28,7 +28,7 @@
using namespace llvm;
char PointerTracking::ID = 0;
-PointerTracking::PointerTracking() : FunctionPass(&ID) {}
+PointerTracking::PointerTracking() : FunctionPass(ID) {}
bool PointerTracking::runOnFunction(Function &F) {
predCache.clear();
@@ -144,6 +144,55 @@ const SCEV *PointerTracking::computeAllocationCount(Value *P,
return SE->getCouldNotCompute();
}
+Value *PointerTracking::computeAllocationCountValue(Value *P, const Type *&Ty) const
+{
+ Value *V = P->stripPointerCasts();
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
+ Ty = AI->getAllocatedType();
+ // arraySize elements of type Ty.
+ return AI->getArraySize();
+ }
+
+ if (CallInst *CI = extractMallocCall(V)) {
+ Ty = getMallocAllocatedType(CI);
+ if (!Ty)
+ return 0;
+ Value *arraySize = getMallocArraySize(CI, TD);
+ if (!arraySize) {
+ Ty = Type::getInt8Ty(P->getContext());
+ return CI->getArgOperand(0);
+ }
+ // arraySize elements of type Ty.
+ return arraySize;
+ }
+
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
+ if (GV->hasDefinitiveInitializer()) {
+ Constant *C = GV->getInitializer();
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
+ Ty = ATy->getElementType();
+ return ConstantInt::get(Type::getInt32Ty(P->getContext()),
+ ATy->getNumElements());
+ }
+ }
+ Ty = cast<PointerType>(GV->getType())->getElementType();
+ return ConstantInt::get(Type::getInt32Ty(P->getContext()), 1);
+ //TODO: implement more tracking for globals
+ }
+
+ if (CallInst *CI = dyn_cast<CallInst>(V)) {
+ CallSite CS(CI);
+ Function *F = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
+ if (F == reallocFunc) {
+ Ty = Type::getInt8Ty(P->getContext());
+ // realloc allocates arg1 bytes.
+ return CS.getArgument(1);
+ }
+ }
+
+ return 0;
+}
+
// Calculates the number of elements of type Ty allocated for P.
const SCEV *PointerTracking::computeAllocationCountForType(Value *P,
const Type *Ty)
@@ -183,17 +232,17 @@ enum SolverResult PointerTracking::isLoopGuardedBy(const Loop *L,
Predicate Pred,
const SCEV *A,
const SCEV *B) const {
- if (SE->isLoopGuardedByCond(L, Pred, A, B))
+ if (SE->isLoopEntryGuardedByCond(L, Pred, A, B))
return AlwaysTrue;
Pred = ICmpInst::getSwappedPredicate(Pred);
- if (SE->isLoopGuardedByCond(L, Pred, B, A))
+ if (SE->isLoopEntryGuardedByCond(L, Pred, B, A))
return AlwaysTrue;
Pred = ICmpInst::getInversePredicate(Pred);
- if (SE->isLoopGuardedByCond(L, Pred, B, A))
+ if (SE->isLoopEntryGuardedByCond(L, Pred, B, A))
return AlwaysFalse;
Pred = ICmpInst::getSwappedPredicate(Pred);
- if (SE->isLoopGuardedByCond(L, Pred, A, B))
+ if (SE->isLoopEntryGuardedByCond(L, Pred, A, B))
return AlwaysTrue;
return Unknown;
}
@@ -263,53 +312,5 @@ void PointerTracking::print(raw_ostream &OS, const Module* M) const {
}
}
-Value *PointerTracking::computeAllocationCountValue(Value *P, const Type *&Ty) const
-{
- Value *V = P->stripPointerCasts();
- if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- Ty = AI->getAllocatedType();
- // arraySize elements of type Ty.
- return AI->getArraySize();
- }
-
- if (CallInst *CI = extractMallocCall(V)) {
- Ty = getMallocAllocatedType(CI);
- if (!Ty)
- return 0;
- Value *arraySize = getMallocArraySize(CI, TD);
- if (!arraySize) {
- Ty = Type::getInt8Ty(P->getContext());
- return CI->getOperand(1);
- }
- // arraySize elements of type Ty.
- return arraySize;
- }
-
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
- if (GV->hasDefinitiveInitializer()) {
- Constant *C = GV->getInitializer();
- if (const ArrayType *ATy = dyn_cast<ArrayType>(C->getType())) {
- Ty = ATy->getElementType();
- return ConstantInt::get(Type::getInt32Ty(P->getContext()),
- ATy->getNumElements());
- }
- }
- Ty = cast<PointerType>(GV->getType())->getElementType();
- return ConstantInt::get(Type::getInt32Ty(P->getContext()), 1);
- //TODO: implement more tracking for globals
- }
-
- if (CallInst *CI = dyn_cast<CallInst>(V)) {
- CallSite CS(CI);
- Function *F = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
- if (F == reallocFunc) {
- Ty = Type::getInt8Ty(P->getContext());
- // realloc allocates arg1 bytes.
- return CS.getArgument(1);
- }
- }
-
- return 0;
-}
-static RegisterPass<PointerTracking> X("pointertracking",
- "Track pointer bounds", false, true);
+INITIALIZE_PASS(PointerTracking, "pointertracking",
+ "Track pointer bounds", false, true);
diff --git a/libclamav/c++/llvm/lib/Analysis/PostDominators.cpp b/libclamav/c++/llvm/lib/Analysis/PostDominators.cpp
index c38e050..cbe8d18 100644
--- a/libclamav/c++/llvm/lib/Analysis/PostDominators.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/PostDominators.cpp
@@ -28,12 +28,11 @@ using namespace llvm;
char PostDominatorTree::ID = 0;
char PostDominanceFrontier::ID = 0;
-static RegisterPass<PostDominatorTree>
-F("postdomtree", "Post-Dominator Tree Construction", true, true);
+INITIALIZE_PASS(PostDominatorTree, "postdomtree",
+ "Post-Dominator Tree Construction", true, true);
bool PostDominatorTree::runOnFunction(Function &F) {
DT->recalculate(F);
- DEBUG(DT->print(dbgs()));
return false;
}
@@ -54,8 +53,8 @@ FunctionPass* llvm::createPostDomTree() {
// PostDominanceFrontier Implementation
//===----------------------------------------------------------------------===//
-static RegisterPass<PostDominanceFrontier>
-H("postdomfrontier", "Post-Dominance Frontier Construction", true, true);
+INITIALIZE_PASS(PostDominanceFrontier, "postdomfrontier",
+ "Post-Dominance Frontier Construction", true, true);
const DominanceFrontier::DomSetType &
PostDominanceFrontier::calculate(const PostDominatorTree &DT,
@@ -68,10 +67,11 @@ PostDominanceFrontier::calculate(const PostDominatorTree &DT,
if (BB)
for (pred_iterator SI = pred_begin(BB), SE = pred_end(BB);
SI != SE; ++SI) {
+ BasicBlock *P = *SI;
// Does Node immediately dominate this predecessor?
- DomTreeNode *SINode = DT[*SI];
+ DomTreeNode *SINode = DT[P];
if (SINode && SINode->getIDom() != Node)
- S.insert(*SI);
+ S.insert(P);
}
// At this point, S is DFlocal. Now we union in DFup's of our children...
diff --git a/libclamav/c++/llvm/lib/Analysis/ProfileEstimatorPass.cpp b/libclamav/c++/llvm/lib/Analysis/ProfileEstimatorPass.cpp
index bce6b31..ecc0a18 100644
--- a/libclamav/c++/llvm/lib/Analysis/ProfileEstimatorPass.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/ProfileEstimatorPass.cpp
@@ -39,7 +39,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
explicit ProfileEstimatorPass(const double execcount = 0)
- : FunctionPass(&ID), ExecCount(execcount) {
+ : FunctionPass(ID), ExecCount(execcount) {
if (execcount == 0) ExecCount = LoopWeight;
}
@@ -59,8 +59,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&ProfileInfo::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &ProfileInfo::ID)
return (ProfileInfo*)this;
return this;
}
@@ -72,13 +72,11 @@ namespace {
} // End of anonymous namespace
char ProfileEstimatorPass::ID = 0;
-static RegisterPass<ProfileEstimatorPass>
-X("profile-estimator", "Estimate profiling information", false, true);
-
-static RegisterAnalysisGroup<ProfileInfo> Y(X);
+INITIALIZE_AG_PASS(ProfileEstimatorPass, ProfileInfo, "profile-estimator",
+ "Estimate profiling information", false, true, false);
namespace llvm {
- const PassInfo *ProfileEstimatorPassID = &X;
+ char &ProfileEstimatorPassID = ProfileEstimatorPass::ID;
FunctionPass *createProfileEstimatorPass() {
return new ProfileEstimatorPass();
@@ -398,7 +396,7 @@ bool ProfileEstimatorPass::runOnFunction(Function &F) {
for (Function::const_iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
const BasicBlock *BB = &(*FI);
BlockInformation[&F][BB] = 0;
- pred_const_iterator predi = pred_begin(BB), prede = pred_end(BB);
+ const_pred_iterator predi = pred_begin(BB), prede = pred_end(BB);
if (predi == prede) {
Edge e = getEdge(0,BB);
setEdgeWeight(e,0);
diff --git a/libclamav/c++/llvm/lib/Analysis/ProfileInfo.cpp b/libclamav/c++/llvm/lib/Analysis/ProfileInfo.cpp
index 85531be..fc7f286 100644
--- a/libclamav/c++/llvm/lib/Analysis/ProfileInfo.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/ProfileInfo.cpp
@@ -67,26 +67,28 @@ ProfileInfoT<Function,BasicBlock>::getExecutionCount(const BasicBlock *BB) {
double Count = MissingValue;
- pred_const_iterator PI = pred_begin(BB), PE = pred_end(BB);
+ const_pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
// Are there zero predecessors of this block?
if (PI == PE) {
- Edge e = getEdge(0,BB);
+ Edge e = getEdge(0, BB);
Count = getEdgeWeight(e);
} else {
// Otherwise, if there are predecessors, the execution count of this block is
// the sum of the edge frequencies from the incoming edges.
std::set<const BasicBlock*> ProcessedPreds;
Count = 0;
- for (; PI != PE; ++PI)
- if (ProcessedPreds.insert(*PI).second) {
- double w = getEdgeWeight(getEdge(*PI, BB));
+ for (; PI != PE; ++PI) {
+ const BasicBlock *P = *PI;
+ if (ProcessedPreds.insert(P).second) {
+ double w = getEdgeWeight(getEdge(P, BB));
if (w == MissingValue) {
Count = MissingValue;
break;
}
Count += w;
}
+ }
}
// If the predecessors did not suffice to get block weight, try successors.
@@ -508,7 +510,7 @@ bool ProfileInfoT<Function,BasicBlock>::
// have no value
double incount = 0;
SmallSet<const BasicBlock*,8> pred_visited;
- pred_const_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
+ const_pred_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
if (bbi==bbe) {
Edge e = getEdge(0,BB);
incount += readEdgeOrRemember(e, getEdgeWeight(e) ,edgetocalc,uncalculated);
@@ -577,12 +579,10 @@ static void readEdge(ProfileInfo *PI, ProfileInfo::Edge e, double &calcw, std::s
template<>
bool ProfileInfoT<Function,BasicBlock>::EstimateMissingEdges(const BasicBlock *BB) {
- bool hasNoSuccessors = false;
-
double inWeight = 0;
std::set<Edge> inMissing;
std::set<const BasicBlock*> ProcessedPreds;
- pred_const_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
+ const_pred_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
if (bbi == bbe) {
readEdge(this,getEdge(0,BB),inWeight,inMissing);
}
@@ -596,10 +596,8 @@ bool ProfileInfoT<Function,BasicBlock>::EstimateMissingEdges(const BasicBlock *B
std::set<Edge> outMissing;
std::set<const BasicBlock*> ProcessedSuccs;
succ_const_iterator sbbi = succ_begin(BB), sbbe = succ_end(BB);
- if (sbbi == sbbe) {
+ if (sbbi == sbbe)
readEdge(this,getEdge(BB,0),outWeight,outMissing);
- hasNoSuccessors = true;
- }
for ( ; sbbi != sbbe; ++sbbi ) {
if (ProcessedSuccs.insert(*sbbi).second) {
readEdge(this,getEdge(BB,*sbbi),outWeight,outMissing);
@@ -639,7 +637,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
// FI != FE; ++FI) {
// const BasicBlock* BB = &(*FI);
// {
-// pred_const_iterator NBB = pred_begin(BB), End = pred_end(BB);
+// const_pred_iterator NBB = pred_begin(BB), End = pred_end(BB);
// if (NBB == End) {
// setEdgeWeight(getEdge(0,BB),0);
// }
@@ -779,7 +777,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
// Calculate incoming flow.
double iw = 0; unsigned inmissing = 0; unsigned incount = 0; unsigned invalid = 0;
std::set<const BasicBlock *> Processed;
- for (pred_const_iterator NBB = pred_begin(BB), End = pred_end(BB);
+ for (const_pred_iterator NBB = pred_begin(BB), End = pred_end(BB);
NBB != End; ++NBB) {
if (Processed.insert(*NBB).second) {
Edge e = getEdge(*NBB, BB);
@@ -869,7 +867,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
if (getEdgeWeight(e) == MissingValue) {
double iw = 0;
std::set<const BasicBlock *> Processed;
- for (pred_const_iterator NBB = pred_begin(BB), End = pred_end(BB);
+ for (const_pred_iterator NBB = pred_begin(BB), End = pred_end(BB);
NBB != End; ++NBB) {
if (Processed.insert(*NBB).second) {
Edge e = getEdge(*NBB, BB);
@@ -893,7 +891,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
const BasicBlock *Dest;
Path P;
bool BackEdgeFound = false;
- for (pred_const_iterator NBB = pred_begin(BB), End = pred_end(BB);
+ for (const_pred_iterator NBB = pred_begin(BB), End = pred_end(BB);
NBB != End; ++NBB) {
Dest = GetPath(BB, *NBB, P, GetPathToDest | GetPathWithNewEdges);
if (Dest == *NBB) {
@@ -935,7 +933,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
// Calculate incoming flow.
double iw = 0;
std::set<const BasicBlock *> Processed;
- for (pred_const_iterator NBB = pred_begin(BB), End = pred_end(BB);
+ for (const_pred_iterator NBB = pred_begin(BB), End = pred_end(BB);
NBB != End; ++NBB) {
if (Processed.insert(*NBB).second) {
Edge e = getEdge(*NBB, BB);
@@ -965,7 +963,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
while(FI != FE && !FoundPath) {
const BasicBlock *BB = *FI; ++FI;
- for (pred_const_iterator NBB = pred_begin(BB), End = pred_end(BB);
+ for (const_pred_iterator NBB = pred_begin(BB), End = pred_end(BB);
NBB != End; ++NBB) {
Edge e = getEdge(*NBB,BB);
double w = getEdgeWeight(e);
@@ -1078,14 +1076,14 @@ raw_ostream& operator<<(raw_ostream &O, std::pair<const MachineBasicBlock *, con
namespace {
struct NoProfileInfo : public ImmutablePass, public ProfileInfo {
static char ID; // Class identification, replacement for typeinfo
- NoProfileInfo() : ImmutablePass(&ID) {}
+ NoProfileInfo() : ImmutablePass(ID) {}
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&ProfileInfo::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &ProfileInfo::ID)
return (ProfileInfo*)this;
return this;
}
@@ -1098,10 +1096,7 @@ namespace {
char NoProfileInfo::ID = 0;
// Register this pass...
-static RegisterPass<NoProfileInfo>
-X("no-profile", "No Profile Information", false, true);
-
-// Declare that we implement the ProfileInfo interface
-static RegisterAnalysisGroup<ProfileInfo, true> Y(X);
+INITIALIZE_AG_PASS(NoProfileInfo, ProfileInfo, "no-profile",
+ "No Profile Information", false, true, true);
ImmutablePass *llvm::createNoProfileInfoPass() { return new NoProfileInfo(); }
diff --git a/libclamav/c++/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp b/libclamav/c++/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
index ac9ed52..d325b57 100644
--- a/libclamav/c++/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/ProfileInfoLoaderPass.cpp
@@ -45,7 +45,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
explicit LoaderPass(const std::string &filename = "")
- : ModulePass(&ID), Filename(filename) {
+ : ModulePass(ID), Filename(filename) {
if (filename.empty()) Filename = ProfileInfoFilename;
}
@@ -67,8 +67,8 @@ namespace {
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&ProfileInfo::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &ProfileInfo::ID)
return (ProfileInfo*)this;
return this;
}
@@ -79,12 +79,10 @@ namespace {
} // End of anonymous namespace
char LoaderPass::ID = 0;
-static RegisterPass<LoaderPass>
-X("profile-loader", "Load profile information from llvmprof.out", false, true);
+INITIALIZE_AG_PASS(LoaderPass, ProfileInfo, "profile-loader",
+ "Load profile information from llvmprof.out", false, true, false);
-static RegisterAnalysisGroup<ProfileInfo> Y(X);
-
-const PassInfo *llvm::ProfileLoaderPassID = &X;
+char &llvm::ProfileLoaderPassID = LoaderPass::ID;
ModulePass *llvm::createProfileLoaderPass() { return new LoaderPass(); }
@@ -119,7 +117,7 @@ void LoaderPass::recurseBasicBlock(const BasicBlock *BB) {
bbi != bbe; ++bbi) {
recurseBasicBlock(*bbi);
}
- for (pred_const_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
+ for (const_pred_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
bbi != bbe; ++bbi) {
recurseBasicBlock(*bbi);
}
diff --git a/libclamav/c++/llvm/lib/Analysis/ProfileVerifierPass.cpp b/libclamav/c++/llvm/lib/Analysis/ProfileVerifierPass.cpp
index a2ddc8e..3f01b2d 100644
--- a/libclamav/c++/llvm/lib/Analysis/ProfileVerifierPass.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/ProfileVerifierPass.cpp
@@ -59,10 +59,10 @@ namespace llvm {
public:
static char ID; // Class identification, replacement for typeinfo
- explicit ProfileVerifierPassT () : FunctionPass(&ID) {
+ explicit ProfileVerifierPassT () : FunctionPass(ID) {
DisableAssertions = ProfileVerifierDisableAssertions;
}
- explicit ProfileVerifierPassT (bool da) : FunctionPass(&ID),
+ explicit ProfileVerifierPassT (bool da) : FunctionPass(ID),
DisableAssertions(da) {
}
@@ -96,8 +96,8 @@ namespace llvm {
double inWeight = 0;
int inCount = 0;
std::set<const BType*> ProcessedPreds;
- for ( pred_const_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
- bbi != bbe; ++bbi ) {
+ for (const_pred_iterator bbi = pred_begin(BB), bbe = pred_end(BB);
+ bbi != bbe; ++bbi ) {
if (ProcessedPreds.insert(*bbi).second) {
typename ProfileInfoT<FType, BType>::Edge E = PI->getEdge(*bbi,BB);
double EdgeWeight = PI->getEdgeWeight(E);
@@ -242,7 +242,7 @@ namespace llvm {
// Read predecessors.
std::set<const BType*> ProcessedPreds;
- pred_const_iterator bpi = pred_begin(BB), bpe = pred_end(BB);
+ const_pred_iterator bpi = pred_begin(BB), bpe = pred_end(BB);
// If there are none, check for (0,BB) edge.
if (bpi == bpe) {
DI.inWeight += ReadOrAssert(PI->getEdge(0,BB));
@@ -366,8 +366,8 @@ namespace llvm {
char ProfileVerifierPassT<FType, BType>::ID = 0;
}
-static RegisterPass<ProfileVerifierPass>
-X("profile-verifier", "Verify profiling information", false, true);
+INITIALIZE_PASS(ProfileVerifierPass, "profile-verifier",
+ "Verify profiling information", false, true);
namespace llvm {
FunctionPass *createProfileVerifierPass() {
diff --git a/libclamav/c++/llvm/lib/Analysis/README.txt b/libclamav/c++/llvm/lib/Analysis/README.txt
index c401090..0e96e4c 100644
--- a/libclamav/c++/llvm/lib/Analysis/README.txt
+++ b/libclamav/c++/llvm/lib/Analysis/README.txt
@@ -16,3 +16,15 @@ In addition to being much more complicated, it involves i65 arithmetic,
which is very inefficient when expanded into code.
//===---------------------------------------------------------------------===//
+
+In formatValue in test/CodeGen/X86/lsr-delayed-fold.ll,
+
+ScalarEvolution is forming this expression:
+
+((trunc i64 (-1 * %arg5) to i32) + (trunc i64 %arg5 to i32) + (-1 * (trunc i64 undef to i32)))
+
+This could be folded to
+
+(-1 * (trunc i64 undef to i32))
+
+//===---------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/lib/Analysis/RegionInfo.cpp b/libclamav/c++/llvm/lib/Analysis/RegionInfo.cpp
new file mode 100644
index 0000000..abc057a
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Analysis/RegionInfo.cpp
@@ -0,0 +1,749 @@
+//===- RegionInfo.cpp - SESE region detection analysis --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Detects single entry single exit regions in the control flow graph.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Analysis/LoopInfo.h"
+
+#define DEBUG_TYPE "region"
+#include "llvm/Support/Debug.h"
+
+#include <set>
+#include <algorithm>
+
+using namespace llvm;
+
+// Always verify if expensive checking is enabled.
+#ifdef XDEBUG
+static bool VerifyRegionInfo = true;
+#else
+static bool VerifyRegionInfo = false;
+#endif
+
+static cl::opt<bool,true>
+VerifyRegionInfoX("verify-region-info", cl::location(VerifyRegionInfo),
+ cl::desc("Verify region info (time consuming)"));
+
+STATISTIC(numRegions, "The # of regions");
+STATISTIC(numSimpleRegions, "The # of simple regions");
+
+//===----------------------------------------------------------------------===//
+/// PrintStyle - Print region in difference ways.
+enum PrintStyle { PrintNone, PrintBB, PrintRN };
+
+cl::opt<enum PrintStyle> printStyle("print-region-style", cl::Hidden,
+ cl::desc("style of printing regions"),
+ cl::values(
+ clEnumValN(PrintNone, "none", "print no details"),
+ clEnumValN(PrintBB, "bb", "print regions in detail with block_iterator"),
+ clEnumValN(PrintRN, "rn", "print regions in detail with element_iterator"),
+ clEnumValEnd));
+//===----------------------------------------------------------------------===//
+/// Region Implementation
+Region::Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo* RInfo,
+ DominatorTree *dt, Region *Parent)
+ : RegionNode(Parent, Entry, 1), RI(RInfo), DT(dt), exit(Exit) {}
+
+Region::~Region() {
+ // Free the cached nodes.
+ for (BBNodeMapT::iterator it = BBNodeMap.begin(),
+ ie = BBNodeMap.end(); it != ie; ++it)
+ delete it->second;
+
+ // Only clean the cache for this Region. Caches of child Regions will be
+ // cleaned when the child Regions are deleted.
+ BBNodeMap.clear();
+
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ delete *I;
+}
+
+bool Region::contains(const BasicBlock *B) const {
+ BasicBlock *BB = const_cast<BasicBlock*>(B);
+
+ assert(DT->getNode(BB) && "BB not part of the dominance tree");
+
+ BasicBlock *entry = getEntry(), *exit = getExit();
+
+ // Toplevel region.
+ if (!exit)
+ return true;
+
+ return (DT->dominates(entry, BB)
+ && !(DT->dominates(exit, BB) && DT->dominates(entry, exit)));
+}
+
+bool Region::contains(const Loop *L) const {
+ // BBs that are not part of any loop are element of the Loop
+ // described by the NULL pointer. This loop is not part of any region,
+ // except if the region describes the whole function.
+ if (L == 0)
+ return getExit() == 0;
+
+ if (!contains(L->getHeader()))
+ return false;
+
+ SmallVector<BasicBlock *, 8> ExitingBlocks;
+ L->getExitingBlocks(ExitingBlocks);
+
+ for (SmallVectorImpl<BasicBlock*>::iterator BI = ExitingBlocks.begin(),
+ BE = ExitingBlocks.end(); BI != BE; ++BI)
+ if (!contains(*BI))
+ return false;
+
+ return true;
+}
+
+Loop *Region::outermostLoopInRegion(Loop *L) const {
+ if (!contains(L))
+ return 0;
+
+ while (L && contains(L->getParentLoop())) {
+ L = L->getParentLoop();
+ }
+
+ return L;
+}
+
+Loop *Region::outermostLoopInRegion(LoopInfo *LI, BasicBlock* BB) const {
+ assert(LI && BB && "LI and BB cannot be null!");
+ Loop *L = LI->getLoopFor(BB);
+ return outermostLoopInRegion(L);
+}
+
+bool Region::isSimple() const {
+ bool isSimple = true;
+ bool found = false;
+
+ BasicBlock *entry = getEntry(), *exit = getExit();
+
+ // TopLevelRegion
+ if (!exit)
+ return false;
+
+ for (pred_iterator PI = pred_begin(entry), PE = pred_end(entry); PI != PE;
+ ++PI) {
+ BasicBlock *Pred = *PI;
+ if (DT->getNode(Pred) && !contains(Pred)) {
+ if (found) {
+ isSimple = false;
+ break;
+ }
+ found = true;
+ }
+ }
+
+ found = false;
+
+ for (pred_iterator PI = pred_begin(exit), PE = pred_end(exit); PI != PE;
+ ++PI)
+ if (contains(*PI)) {
+ if (found) {
+ isSimple = false;
+ break;
+ }
+ found = true;
+ }
+
+ return isSimple;
+}
+
+std::string Region::getNameStr() const {
+ std::string exitName;
+ std::string entryName;
+
+ if (getEntry()->getName().empty()) {
+ raw_string_ostream OS(entryName);
+
+ WriteAsOperand(OS, getEntry(), false);
+ entryName = OS.str();
+ } else
+ entryName = getEntry()->getNameStr();
+
+ if (getExit()) {
+ if (getExit()->getName().empty()) {
+ raw_string_ostream OS(exitName);
+
+ WriteAsOperand(OS, getExit(), false);
+ exitName = OS.str();
+ } else
+ exitName = getExit()->getNameStr();
+ } else
+ exitName = "<Function Return>";
+
+ return entryName + " => " + exitName;
+}
+
+void Region::verifyBBInRegion(BasicBlock *BB) const {
+ if (!contains(BB))
+ llvm_unreachable("Broken region found!");
+
+ BasicBlock *entry = getEntry(), *exit = getExit();
+
+ for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
+ if (!contains(*SI) && exit != *SI)
+ llvm_unreachable("Broken region found!");
+
+ if (entry != BB)
+ for (pred_iterator SI = pred_begin(BB), SE = pred_end(BB); SI != SE; ++SI)
+ if (!contains(*SI))
+ llvm_unreachable("Broken region found!");
+}
+
+void Region::verifyWalk(BasicBlock *BB, std::set<BasicBlock*> *visited) const {
+ BasicBlock *exit = getExit();
+
+ visited->insert(BB);
+
+ verifyBBInRegion(BB);
+
+ for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
+ if (*SI != exit && visited->find(*SI) == visited->end())
+ verifyWalk(*SI, visited);
+}
+
+void Region::verifyRegion() const {
+ // Only do verification when user wants to, otherwise this expensive
+ // check will be invoked by PassManager.
+ if (!VerifyRegionInfo) return;
+
+ std::set<BasicBlock*> visited;
+ verifyWalk(getEntry(), &visited);
+}
+
+void Region::verifyRegionNest() const {
+ for (Region::const_iterator RI = begin(), RE = end(); RI != RE; ++RI)
+ (*RI)->verifyRegionNest();
+
+ verifyRegion();
+}
+
+Region::block_iterator Region::block_begin() {
+ return GraphTraits<FlatIt<Region*> >::nodes_begin(this);
+}
+
+Region::block_iterator Region::block_end() {
+ return GraphTraits<FlatIt<Region*> >::nodes_end(this);
+}
+
+Region::const_block_iterator Region::block_begin() const {
+ return GraphTraits<FlatIt<const Region*> >::nodes_begin(this);
+}
+
+Region::const_block_iterator Region::block_end() const {
+ return GraphTraits<FlatIt<const Region*> >::nodes_end(this);
+}
+
+Region::element_iterator Region::element_begin() {
+ return GraphTraits<Region*>::nodes_begin(this);
+}
+
+Region::element_iterator Region::element_end() {
+ return GraphTraits<Region*>::nodes_end(this);
+}
+
+Region::const_element_iterator Region::element_begin() const {
+ return GraphTraits<const Region*>::nodes_begin(this);
+}
+
+Region::const_element_iterator Region::element_end() const {
+ return GraphTraits<const Region*>::nodes_end(this);
+}
+
+Region* Region::getSubRegionNode(BasicBlock *BB) const {
+ Region *R = RI->getRegionFor(BB);
+
+ if (!R || R == this)
+ return 0;
+
+ // If we pass the BB out of this region, that means our code is broken.
+ assert(contains(R) && "BB not in current region!");
+
+ while (contains(R->getParent()) && R->getParent() != this)
+ R = R->getParent();
+
+ if (R->getEntry() != BB)
+ return 0;
+
+ return R;
+}
+
+RegionNode* Region::getBBNode(BasicBlock *BB) const {
+ assert(contains(BB) && "Can get BB node out of this region!");
+
+ BBNodeMapT::const_iterator at = BBNodeMap.find(BB);
+
+ if (at != BBNodeMap.end())
+ return at->second;
+
+ RegionNode *NewNode = new RegionNode(const_cast<Region*>(this), BB);
+ BBNodeMap.insert(std::make_pair(BB, NewNode));
+ return NewNode;
+}
+
+RegionNode* Region::getNode(BasicBlock *BB) const {
+ assert(contains(BB) && "Can get BB node out of this region!");
+ if (Region* Child = getSubRegionNode(BB))
+ return Child->getNode();
+
+ return getBBNode(BB);
+}
+
+void Region::transferChildrenTo(Region *To) {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ (*I)->parent = To;
+ To->children.push_back(*I);
+ }
+ children.clear();
+}
+
+void Region::addSubRegion(Region *SubRegion) {
+ assert(SubRegion->parent == 0 && "SubRegion already has a parent!");
+ SubRegion->parent = this;
+ // Set up the region node.
+ assert(std::find(children.begin(), children.end(), SubRegion) == children.end()
+ && "Node already exist!");
+ children.push_back(SubRegion);
+}
+
+
+Region *Region::removeSubRegion(Region *Child) {
+ assert(Child->parent == this && "Child is not a child of this region!");
+ Child->parent = 0;
+ RegionSet::iterator I = std::find(children.begin(), children.end(), Child);
+ assert(I != children.end() && "Region does not exit. Unable to remove.");
+ children.erase(children.begin()+(I-begin()));
+ return Child;
+}
+
+unsigned Region::getDepth() const {
+ unsigned Depth = 0;
+
+ for (Region *R = parent; R != 0; R = R->parent)
+ ++Depth;
+
+ return Depth;
+}
+
+void Region::print(raw_ostream &OS, bool print_tree, unsigned level) const {
+ if (print_tree)
+ OS.indent(level*2) << "[" << level << "] " << getNameStr();
+ else
+ OS.indent(level*2) << getNameStr();
+
+ OS << "\n";
+
+
+ if (printStyle != PrintNone) {
+ OS.indent(level*2) << "{\n";
+ OS.indent(level*2 + 2);
+
+ if (printStyle == PrintBB) {
+ for (const_block_iterator I = block_begin(), E = block_end(); I!=E; ++I)
+ OS << **I << ", "; // TODO: remove the last ","
+ } else if (printStyle == PrintRN) {
+ for (const_element_iterator I = element_begin(), E = element_end(); I!=E; ++I)
+ OS << **I << ", "; // TODO: remove the last ",
+ }
+
+ OS << "\n";
+ }
+
+ if (print_tree)
+ for (const_iterator RI = begin(), RE = end(); RI != RE; ++RI)
+ (*RI)->print(OS, print_tree, level+1);
+
+ if (printStyle != PrintNone)
+ OS.indent(level*2) << "} \n";
+}
+
+void Region::dump() const {
+ print(dbgs(), true, getDepth());
+}
+
+void Region::clearNodeCache() {
+ BBNodeMap.clear();
+ for (Region::iterator RI = begin(), RE = end(); RI != RE; ++RI)
+ (*RI)->clearNodeCache();
+}
+
+//===----------------------------------------------------------------------===//
+// RegionInfo implementation
+//
+
+bool RegionInfo::isCommonDomFrontier(BasicBlock *BB, BasicBlock *entry,
+ BasicBlock *exit) const {
+ for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (DT->dominates(entry, P) && !DT->dominates(exit, P))
+ return false;
+ }
+ return true;
+}
+
+bool RegionInfo::isRegion(BasicBlock *entry, BasicBlock *exit) const {
+ assert(entry && exit && "entry and exit must not be null!");
+ typedef DominanceFrontier::DomSetType DST;
+
+ DST *entrySuccs = &DF->find(entry)->second;
+
+ // Exit is the header of a loop that contains the entry. In this case,
+ // the dominance frontier must only contain the exit.
+ if (!DT->dominates(entry, exit)) {
+ for (DST::iterator SI = entrySuccs->begin(), SE = entrySuccs->end();
+ SI != SE; ++SI)
+ if (*SI != exit && *SI != entry)
+ return false;
+
+ return true;
+ }
+
+ DST *exitSuccs = &DF->find(exit)->second;
+
+ // Do not allow edges leaving the region.
+ for (DST::iterator SI = entrySuccs->begin(), SE = entrySuccs->end();
+ SI != SE; ++SI) {
+ if (*SI == exit || *SI == entry)
+ continue;
+ if (exitSuccs->find(*SI) == exitSuccs->end())
+ return false;
+ if (!isCommonDomFrontier(*SI, entry, exit))
+ return false;
+ }
+
+ // Do not allow edges pointing into the region.
+ for (DST::iterator SI = exitSuccs->begin(), SE = exitSuccs->end();
+ SI != SE; ++SI)
+ if (DT->properlyDominates(entry, *SI) && *SI != exit)
+ return false;
+
+
+ return true;
+}
+
+void RegionInfo::insertShortCut(BasicBlock *entry, BasicBlock *exit,
+ BBtoBBMap *ShortCut) const {
+ assert(entry && exit && "entry and exit must not be null!");
+
+ BBtoBBMap::iterator e = ShortCut->find(exit);
+
+ if (e == ShortCut->end())
+ // No further region at exit available.
+ (*ShortCut)[entry] = exit;
+ else {
+ // We found a region e that starts at exit. Therefore (entry, e->second)
+ // is also a region, that is larger than (entry, exit). Insert the
+ // larger one.
+ BasicBlock *BB = e->second;
+ (*ShortCut)[entry] = BB;
+ }
+}
+
+DomTreeNode* RegionInfo::getNextPostDom(DomTreeNode* N,
+ BBtoBBMap *ShortCut) const {
+ BBtoBBMap::iterator e = ShortCut->find(N->getBlock());
+
+ if (e == ShortCut->end())
+ return N->getIDom();
+
+ return PDT->getNode(e->second)->getIDom();
+}
+
+bool RegionInfo::isTrivialRegion(BasicBlock *entry, BasicBlock *exit) const {
+ assert(entry && exit && "entry and exit must not be null!");
+
+ unsigned num_successors = succ_end(entry) - succ_begin(entry);
+
+ if (num_successors <= 1 && exit == *(succ_begin(entry)))
+ return true;
+
+ return false;
+}
+
+void RegionInfo::updateStatistics(Region *R) {
+ ++numRegions;
+
+ // TODO: Slow. Should only be enabled if -stats is used.
+ if (R->isSimple()) ++numSimpleRegions;
+}
+
+Region *RegionInfo::createRegion(BasicBlock *entry, BasicBlock *exit) {
+ assert(entry && exit && "entry and exit must not be null!");
+
+ if (isTrivialRegion(entry, exit))
+ return 0;
+
+ Region *region = new Region(entry, exit, this, DT);
+ BBtoRegion.insert(std::make_pair(entry, region));
+
+ #ifdef XDEBUG
+ region->verifyRegion();
+ #else
+ DEBUG(region->verifyRegion());
+ #endif
+
+ updateStatistics(region);
+ return region;
+}
+
+void RegionInfo::findRegionsWithEntry(BasicBlock *entry, BBtoBBMap *ShortCut) {
+ assert(entry);
+
+ DomTreeNode *N = PDT->getNode(entry);
+
+ if (!N)
+ return;
+
+ Region *lastRegion= 0;
+ BasicBlock *lastExit = entry;
+
+ // As only a BasicBlock that postdominates entry can finish a region, walk the
+ // post dominance tree upwards.
+ while ((N = getNextPostDom(N, ShortCut))) {
+ BasicBlock *exit = N->getBlock();
+
+ if (!exit)
+ break;
+
+ if (isRegion(entry, exit)) {
+ Region *newRegion = createRegion(entry, exit);
+
+ if (lastRegion)
+ newRegion->addSubRegion(lastRegion);
+
+ lastRegion = newRegion;
+ lastExit = exit;
+ }
+
+ // This can never be a region, so stop the search.
+ if (!DT->dominates(entry, exit))
+ break;
+ }
+
+ // Tried to create regions from entry to lastExit. Next time take a
+ // shortcut from entry to lastExit.
+ if (lastExit != entry)
+ insertShortCut(entry, lastExit, ShortCut);
+}
+
+void RegionInfo::scanForRegions(Function &F, BBtoBBMap *ShortCut) {
+ BasicBlock *entry = &(F.getEntryBlock());
+ DomTreeNode *N = DT->getNode(entry);
+
+ // Iterate over the dominance tree in post order to start with the small
+ // regions from the bottom of the dominance tree. If the small regions are
+ // detected first, detection of bigger regions is faster, as we can jump
+ // over the small regions.
+ for (po_iterator<DomTreeNode*> FI = po_begin(N), FE = po_end(N); FI != FE;
+ ++FI) {
+ findRegionsWithEntry(FI->getBlock(), ShortCut);
+ }
+}
+
+Region *RegionInfo::getTopMostParent(Region *region) {
+ while (region->parent)
+ region = region->getParent();
+
+ return region;
+}
+
+void RegionInfo::buildRegionsTree(DomTreeNode *N, Region *region) {
+ BasicBlock *BB = N->getBlock();
+
+ // Passed region exit
+ while (BB == region->getExit())
+ region = region->getParent();
+
+ BBtoRegionMap::iterator it = BBtoRegion.find(BB);
+
+ // This basic block is a start block of a region. It is already in the
+ // BBtoRegion relation. Only the child basic blocks have to be updated.
+ if (it != BBtoRegion.end()) {
+ Region *newRegion = it->second;;
+ region->addSubRegion(getTopMostParent(newRegion));
+ region = newRegion;
+ } else {
+ BBtoRegion[BB] = region;
+ }
+
+ for (DomTreeNode::iterator CI = N->begin(), CE = N->end(); CI != CE; ++CI)
+ buildRegionsTree(*CI, region);
+}
+
+void RegionInfo::releaseMemory() {
+ BBtoRegion.clear();
+ if (TopLevelRegion)
+ delete TopLevelRegion;
+ TopLevelRegion = 0;
+}
+
+RegionInfo::RegionInfo() : FunctionPass(ID) {
+ TopLevelRegion = 0;
+}
+
+RegionInfo::~RegionInfo() {
+ releaseMemory();
+}
+
+void RegionInfo::Calculate(Function &F) {
+ // ShortCut a function where for every BB the exit of the largest region
+ // starting with BB is stored. These regions can be threated as single BBS.
+ // This improves performance on linear CFGs.
+ BBtoBBMap ShortCut;
+
+ scanForRegions(F, &ShortCut);
+ BasicBlock *BB = &F.getEntryBlock();
+ buildRegionsTree(DT->getNode(BB), TopLevelRegion);
+}
+
+bool RegionInfo::runOnFunction(Function &F) {
+ releaseMemory();
+
+ DT = &getAnalysis<DominatorTree>();
+ PDT = &getAnalysis<PostDominatorTree>();
+ DF = &getAnalysis<DominanceFrontier>();
+
+ TopLevelRegion = new Region(&F.getEntryBlock(), 0, this, DT, 0);
+ updateStatistics(TopLevelRegion);
+
+ Calculate(F);
+
+ return false;
+}
+
+void RegionInfo::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<DominatorTree>();
+ AU.addRequired<PostDominatorTree>();
+ AU.addRequired<DominanceFrontier>();
+}
+
+void RegionInfo::print(raw_ostream &OS, const Module *) const {
+ OS << "Region tree:\n";
+ TopLevelRegion->print(OS, true, 0);
+ OS << "End region tree\n";
+}
+
+void RegionInfo::verifyAnalysis() const {
+ // Only do verification when user wants to, otherwise this expensive check
+ // will be invoked by PMDataManager::verifyPreservedAnalysis when
+ // a regionpass (marked PreservedAll) finish.
+ if (!VerifyRegionInfo) return;
+
+ TopLevelRegion->verifyRegionNest();
+}
+
+// Region pass manager support.
+Region *RegionInfo::getRegionFor(BasicBlock *BB) const {
+ BBtoRegionMap::const_iterator I=
+ BBtoRegion.find(BB);
+ return I != BBtoRegion.end() ? I->second : 0;
+}
+
+Region *RegionInfo::operator[](BasicBlock *BB) const {
+ return getRegionFor(BB);
+}
+
+
+BasicBlock *RegionInfo::getMaxRegionExit(BasicBlock *BB) const {
+ BasicBlock *Exit = NULL;
+
+ while (true) {
+ // Get largest region that starts at BB.
+ Region *R = getRegionFor(BB);
+ while (R && R->getParent() && R->getParent()->getEntry() == BB)
+ R = R->getParent();
+
+ // Get the single exit of BB.
+ if (R && R->getEntry() == BB)
+ Exit = R->getExit();
+ else if (++succ_begin(BB) == succ_end(BB))
+ Exit = *succ_begin(BB);
+ else // No single exit exists.
+ return Exit;
+
+ // Get largest region that starts at Exit.
+ Region *ExitR = getRegionFor(Exit);
+ while (ExitR && ExitR->getParent()
+ && ExitR->getParent()->getEntry() == Exit)
+ ExitR = ExitR->getParent();
+
+ for (pred_iterator PI = pred_begin(Exit), PE = pred_end(Exit); PI != PE;
+ ++PI)
+ if (!R->contains(*PI) && !ExitR->contains(*PI))
+ break;
+
+ // This stops infinite cycles.
+ if (DT->dominates(Exit, BB))
+ break;
+
+ BB = Exit;
+ }
+
+ return Exit;
+}
+
+Region*
+RegionInfo::getCommonRegion(Region *A, Region *B) const {
+ assert (A && B && "One of the Regions is NULL");
+
+ if (A->contains(B)) return A;
+
+ while (!B->contains(A))
+ B = B->getParent();
+
+ return B;
+}
+
+Region*
+RegionInfo::getCommonRegion(SmallVectorImpl<Region*> &Regions) const {
+ Region* ret = Regions.back();
+ Regions.pop_back();
+
+ for (SmallVectorImpl<Region*>::const_iterator I = Regions.begin(),
+ E = Regions.end(); I != E; ++I)
+ ret = getCommonRegion(ret, *I);
+
+ return ret;
+}
+
+Region*
+RegionInfo::getCommonRegion(SmallVectorImpl<BasicBlock*> &BBs) const {
+ Region* ret = getRegionFor(BBs.back());
+ BBs.pop_back();
+
+ for (SmallVectorImpl<BasicBlock*>::const_iterator I = BBs.begin(),
+ E = BBs.end(); I != E; ++I)
+ ret = getCommonRegion(ret, getRegionFor(*I));
+
+ return ret;
+}
+
+char RegionInfo::ID = 0;
+INITIALIZE_PASS(RegionInfo, "regions",
+ "Detect single entry single exit regions", true, true);
+
+// Create methods available outside of this file, to use them
+// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
+// the link time optimization.
+
+namespace llvm {
+ FunctionPass *createRegionInfoPass() {
+ return new RegionInfo();
+ }
+}
+
diff --git a/libclamav/c++/llvm/lib/Analysis/RegionPrinter.cpp b/libclamav/c++/llvm/lib/Analysis/RegionPrinter.cpp
new file mode 100644
index 0000000..fee5c1b
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Analysis/RegionPrinter.cpp
@@ -0,0 +1,186 @@
+//===- RegionPrinter.cpp - Print regions tree pass ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Print out the region tree of a function using dotty/graphviz.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/Analysis/RegionPrinter.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/DOTGraphTraitsPass.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+/// onlySimpleRegion - Show only the simple regions in the RegionViewer.
+static cl::opt<bool>
+onlySimpleRegions("only-simple-regions",
+ cl::desc("Show only simple regions in the graphviz viewer"),
+ cl::Hidden,
+ cl::init(false));
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<RegionNode*> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false)
+ : DefaultDOTGraphTraits(isSimple) {}
+
+ std::string getNodeLabel(RegionNode *Node, RegionNode *Graph) {
+
+ if (!Node->isSubRegion()) {
+ BasicBlock *BB = Node->getNodeAs<BasicBlock>();
+
+ if (isSimple())
+ return DOTGraphTraits<const Function*>
+ ::getSimpleNodeLabel(BB, BB->getParent());
+ else
+ return DOTGraphTraits<const Function*>
+ ::getCompleteNodeLabel(BB, BB->getParent());
+ }
+
+ return "Not implemented";
+ }
+};
+
+template<>
+struct DOTGraphTraits<RegionInfo*> : public DOTGraphTraits<RegionNode*> {
+
+ DOTGraphTraits (bool isSimple=false)
+ : DOTGraphTraits<RegionNode*>(isSimple) {}
+
+ static std::string getGraphName(RegionInfo *DT) {
+ return "Region Graph";
+ }
+
+ std::string getNodeLabel(RegionNode *Node, RegionInfo *G) {
+ return DOTGraphTraits<RegionNode*>::getNodeLabel(Node,
+ G->getTopLevelRegion());
+ }
+
+ // Print the cluster of the subregions. This groups the single basic blocks
+ // and adds a different background color for each group.
+ static void printRegionCluster(const Region *R, GraphWriter<RegionInfo*> &GW,
+ unsigned depth = 0) {
+ raw_ostream &O = GW.getOStream();
+ O.indent(2 * depth) << "subgraph cluster_" << static_cast<const void*>(R)
+ << " {\n";
+ O.indent(2 * (depth + 1)) << "label = \"\";\n";
+
+ if (!onlySimpleRegions || R->isSimple()) {
+ O.indent(2 * (depth + 1)) << "style = filled;\n";
+ O.indent(2 * (depth + 1)) << "color = "
+ << ((R->getDepth() * 2 % 12) + 1) << "\n";
+
+ } else {
+ O.indent(2 * (depth + 1)) << "style = solid;\n";
+ O.indent(2 * (depth + 1)) << "color = "
+ << ((R->getDepth() * 2 % 12) + 2) << "\n";
+ }
+
+ for (Region::const_iterator RI = R->begin(), RE = R->end(); RI != RE; ++RI)
+ printRegionCluster(*RI, GW, depth + 1);
+
+ RegionInfo *RI = R->getRegionInfo();
+
+ for (Region::const_block_iterator BI = R->block_begin(),
+ BE = R->block_end(); BI != BE; ++BI) {
+ BasicBlock *BB = (*BI)->getNodeAs<BasicBlock>();
+ if (RI->getRegionFor(BB) == R)
+ O.indent(2 * (depth + 1)) << "Node"
+ << static_cast<const void*>(RI->getTopLevelRegion()->getBBNode(BB))
+ << ";\n";
+ }
+
+ O.indent(2 * depth) << "}\n";
+ }
+
+ static void addCustomGraphFeatures(const RegionInfo* RI,
+ GraphWriter<RegionInfo*> &GW) {
+ raw_ostream &O = GW.getOStream();
+ O << "\tcolorscheme = \"paired12\"\n";
+ printRegionCluster(RI->getTopLevelRegion(), GW, 4);
+ }
+};
+} //end namespace llvm
+
+namespace {
+
+struct RegionViewer
+ : public DOTGraphTraitsViewer<RegionInfo, false> {
+ static char ID;
+ RegionViewer() : DOTGraphTraitsViewer<RegionInfo, false>("reg", ID){}
+};
+
+char RegionViewer::ID = 0;
+INITIALIZE_PASS(RegionViewer, "view-regions", "View regions of function",
+ true, true);
+
+struct RegionOnlyViewer
+ : public DOTGraphTraitsViewer<RegionInfo, true> {
+ static char ID;
+ RegionOnlyViewer() : DOTGraphTraitsViewer<RegionInfo, true>("regonly", ID){}
+};
+
+char RegionOnlyViewer::ID = 0;
+INITIALIZE_PASS(RegionOnlyViewer, "view-regions-only",
+ "View regions of function (with no function bodies)",
+ true, true);
+
+struct RegionPrinter
+ : public DOTGraphTraitsPrinter<RegionInfo, false> {
+ static char ID;
+ RegionPrinter() :
+ DOTGraphTraitsPrinter<RegionInfo, false>("reg", ID) {}
+};
+} //end anonymous namespace
+
+char RegionPrinter::ID = 0;
+INITIALIZE_PASS(RegionPrinter, "dot-regions",
+ "Print regions of function to 'dot' file", true, true);
+
+namespace {
+
+struct RegionOnlyPrinter
+ : public DOTGraphTraitsPrinter<RegionInfo, true> {
+ static char ID;
+ RegionOnlyPrinter() :
+ DOTGraphTraitsPrinter<RegionInfo, true>("reg", ID) {}
+};
+
+}
+
+char RegionOnlyPrinter::ID = 0;
+INITIALIZE_PASS(RegionOnlyPrinter, "dot-regions-only",
+ "Print regions of function to 'dot' file "
+ "(with no function bodies)",
+ true, true);
+
+FunctionPass* llvm::createRegionViewerPass() {
+ return new RegionViewer();
+}
+
+FunctionPass* llvm::createRegionOnlyViewerPass() {
+ return new RegionOnlyViewer();
+}
+
+FunctionPass* llvm::createRegionPrinterPass() {
+ return new RegionPrinter();
+}
+
+FunctionPass* llvm::createRegionOnlyPrinterPass() {
+ return new RegionOnlyPrinter();
+}
+
diff --git a/libclamav/c++/llvm/lib/Analysis/ScalarEvolution.cpp b/libclamav/c++/llvm/lib/Analysis/ScalarEvolution.cpp
index b979f33..b892d85 100644
--- a/libclamav/c++/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -103,8 +103,8 @@ MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
"derived loop"),
cl::init(100));
-static RegisterPass<ScalarEvolution>
-R("scalar-evolution", "Scalar Evolution Analysis", false, true);
+INITIALIZE_PASS(ScalarEvolution, "scalar-evolution",
+ "Scalar Evolution Analysis", false, true);
char ScalarEvolution::ID = 0;
//===----------------------------------------------------------------------===//
@@ -141,7 +141,7 @@ bool SCEV::isAllOnesValue() const {
}
SCEVCouldNotCompute::SCEVCouldNotCompute() :
- SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
+ SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
@@ -177,8 +177,7 @@ const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
ID.AddPointer(V);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
- new (S) SCEVConstant(ID, V);
+ SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -189,8 +188,8 @@ const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
const SCEV *
ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
- return getConstant(
- ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
+ const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
+ return getConstant(ConstantInt::get(ITy, V, isSigned));
}
const Type *SCEVConstant::getType() const { return V->getType(); }
@@ -199,7 +198,7 @@ void SCEVConstant::print(raw_ostream &OS) const {
WriteAsOperand(OS, V, false);
}
-SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
+SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
unsigned SCEVTy, const SCEV *op, const Type *ty)
: SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
@@ -211,7 +210,7 @@ bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
return Op->properlyDominates(BB, DT);
}
-SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
+SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scTruncate, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
@@ -223,7 +222,7 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const {
OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
}
-SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
+SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
@@ -235,7 +234,7 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
}
-SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
+SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scSignExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
@@ -248,30 +247,63 @@ void SCEVSignExtendExpr::print(raw_ostream &OS) const {
}
void SCEVCommutativeExpr::print(raw_ostream &OS) const {
- assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
const char *OpStr = getOperationStr();
- OS << "(" << *Operands[0];
- for (unsigned i = 1, e = Operands.size(); i != e; ++i)
- OS << OpStr << *Operands[i];
+ OS << "(";
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
+ OS << **I;
+ if (llvm::next(I) != E)
+ OS << OpStr;
+ }
OS << ")";
}
bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
- if (!getOperand(i)->dominates(BB, DT))
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+ if (!(*I)->dominates(BB, DT))
return false;
- }
return true;
}
bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
- if (!getOperand(i)->properlyDominates(BB, DT))
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+ if (!(*I)->properlyDominates(BB, DT))
return false;
- }
return true;
}
+bool SCEVNAryExpr::isLoopInvariant(const Loop *L) const {
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+ if (!(*I)->isLoopInvariant(L))
+ return false;
+ return true;
+}
+
+// hasComputableLoopEvolution - N-ary expressions have computable loop
+// evolutions iff they have at least one operand that varies with the loop,
+// but that all varying operands are computable.
+bool SCEVNAryExpr::hasComputableLoopEvolution(const Loop *L) const {
+ bool HasVarying = false;
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
+ const SCEV *S = *I;
+ if (!S->isLoopInvariant(L)) {
+ if (S->hasComputableLoopEvolution(L))
+ HasVarying = true;
+ else
+ return false;
+ }
+ }
+ return HasVarying;
+}
+
+bool SCEVNAryExpr::hasOperand(const SCEV *O) const {
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
+ const SCEV *S = *I;
+ if (O == S || S->hasOperand(O))
+ return true;
+ }
+ return false;
+}
+
bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
}
@@ -302,10 +334,14 @@ bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
if (QueryLoop->contains(L))
return false;
+ // This recurrence is invariant w.r.t. QueryLoop if L contains QueryLoop.
+ if (L->contains(QueryLoop))
+ return true;
+
// This recurrence is variant w.r.t. QueryLoop if any of its operands
// are variant.
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (!getOperand(i)->isLoopInvariant(QueryLoop))
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
+ if (!(*I)->isLoopInvariant(QueryLoop))
return false;
// Otherwise it's loop-invariant.
@@ -329,19 +365,43 @@ SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
void SCEVAddRecExpr::print(raw_ostream &OS) const {
OS << "{" << *Operands[0];
- for (unsigned i = 1, e = Operands.size(); i != e; ++i)
+ for (unsigned i = 1, e = NumOperands; i != e; ++i)
OS << ",+," << *Operands[i];
OS << "}<";
WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
OS << ">";
}
+void SCEVUnknown::deleted() {
+ // Clear this SCEVUnknown from ValuesAtScopes.
+ SE->ValuesAtScopes.erase(this);
+
+ // Remove this SCEVUnknown from the uniquing map.
+ SE->UniqueSCEVs.RemoveNode(this);
+
+ // Release the value.
+ setValPtr(0);
+}
+
+void SCEVUnknown::allUsesReplacedWith(Value *New) {
+ // Clear this SCEVUnknown from ValuesAtScopes.
+ SE->ValuesAtScopes.erase(this);
+
+ // Remove this SCEVUnknown from the uniquing map.
+ SE->UniqueSCEVs.RemoveNode(this);
+
+ // Update this SCEVUnknown to point to the new value. This is needed
+ // because there may still be outstanding SCEVs which still point to
+ // this SCEVUnknown.
+ setValPtr(New);
+}
+
bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
// All non-instruction values are loop invariant. All instructions are loop
// invariant if they are not contained in the specified loop.
// Instructions are never considered invariant in the function body
// (null loop) because they are defined within the "loop".
- if (Instruction *I = dyn_cast<Instruction>(V))
+ if (Instruction *I = dyn_cast<Instruction>(getValue()))
return L && !L->contains(I);
return true;
}
@@ -359,11 +419,11 @@ bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
}
const Type *SCEVUnknown::getType() const {
- return V->getType();
+ return getValue()->getType();
}
bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
- if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -380,7 +440,7 @@ bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
}
bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
- if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -405,7 +465,7 @@ bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
}
bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
- if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
if (CE->getOpcode() == Instruction::GetElementPtr &&
@@ -447,166 +507,183 @@ void SCEVUnknown::print(raw_ostream &OS) const {
}
// Otherwise just print it normally.
- WriteAsOperand(OS, V, false);
+ WriteAsOperand(OS, getValue(), false);
}
//===----------------------------------------------------------------------===//
// SCEV Utilities
//===----------------------------------------------------------------------===//
-static bool CompareTypes(const Type *A, const Type *B) {
- if (A->getTypeID() != B->getTypeID())
- return A->getTypeID() < B->getTypeID();
- if (const IntegerType *AI = dyn_cast<IntegerType>(A)) {
- const IntegerType *BI = cast<IntegerType>(B);
- return AI->getBitWidth() < BI->getBitWidth();
- }
- if (const PointerType *AI = dyn_cast<PointerType>(A)) {
- const PointerType *BI = cast<PointerType>(B);
- return CompareTypes(AI->getElementType(), BI->getElementType());
- }
- if (const ArrayType *AI = dyn_cast<ArrayType>(A)) {
- const ArrayType *BI = cast<ArrayType>(B);
- if (AI->getNumElements() != BI->getNumElements())
- return AI->getNumElements() < BI->getNumElements();
- return CompareTypes(AI->getElementType(), BI->getElementType());
- }
- if (const VectorType *AI = dyn_cast<VectorType>(A)) {
- const VectorType *BI = cast<VectorType>(B);
- if (AI->getNumElements() != BI->getNumElements())
- return AI->getNumElements() < BI->getNumElements();
- return CompareTypes(AI->getElementType(), BI->getElementType());
- }
- if (const StructType *AI = dyn_cast<StructType>(A)) {
- const StructType *BI = cast<StructType>(B);
- if (AI->getNumElements() != BI->getNumElements())
- return AI->getNumElements() < BI->getNumElements();
- for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i)
- if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) ||
- CompareTypes(BI->getElementType(i), AI->getElementType(i)))
- return CompareTypes(AI->getElementType(i), BI->getElementType(i));
- }
- return false;
-}
-
namespace {
/// SCEVComplexityCompare - Return true if the complexity of the LHS is less
/// than the complexity of the RHS. This comparator is used to canonicalize
/// expressions.
class SCEVComplexityCompare {
- LoopInfo *LI;
+ const LoopInfo *const LI;
public:
- explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
+ explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
+ // Return true or false if LHS is less than, or at least RHS, respectively.
bool operator()(const SCEV *LHS, const SCEV *RHS) const {
+ return compare(LHS, RHS) < 0;
+ }
+
+ // Return negative, zero, or positive, if LHS is less than, equal to, or
+ // greater than RHS, respectively. A three-way result allows recursive
+ // comparisons to be more efficient.
+ int compare(const SCEV *LHS, const SCEV *RHS) const {
// Fast-path: SCEVs are uniqued so we can do a quick equality check.
if (LHS == RHS)
- return false;
+ return 0;
// Primarily, sort the SCEVs by their getSCEVType().
- if (LHS->getSCEVType() != RHS->getSCEVType())
- return LHS->getSCEVType() < RHS->getSCEVType();
+ unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
+ if (LType != RType)
+ return (int)LType - (int)RType;
// Aside from the getSCEVType() ordering, the particular ordering
// isn't very important except that it's beneficial to be consistent,
// so that (a + b) and (b + a) don't end up as different expressions.
-
- // Sort SCEVUnknown values with some loose heuristics. TODO: This is
- // not as complete as it could be.
- if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
+ switch (LType) {
+ case scUnknown: {
+ const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
+ // Sort SCEVUnknown values with some loose heuristics. TODO: This is
+ // not as complete as it could be.
+ const Value *LV = LU->getValue(), *RV = RU->getValue();
+
// Order pointer values after integer values. This helps SCEVExpander
// form GEPs.
- if (LU->getType()->isPointerTy() && !RU->getType()->isPointerTy())
- return false;
- if (RU->getType()->isPointerTy() && !LU->getType()->isPointerTy())
- return true;
+ bool LIsPointer = LV->getType()->isPointerTy(),
+ RIsPointer = RV->getType()->isPointerTy();
+ if (LIsPointer != RIsPointer)
+ return (int)LIsPointer - (int)RIsPointer;
// Compare getValueID values.
- if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
- return LU->getValue()->getValueID() < RU->getValue()->getValueID();
+ unsigned LID = LV->getValueID(),
+ RID = RV->getValueID();
+ if (LID != RID)
+ return (int)LID - (int)RID;
// Sort arguments by their position.
- if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
- const Argument *RA = cast<Argument>(RU->getValue());
- return LA->getArgNo() < RA->getArgNo();
+ if (const Argument *LA = dyn_cast<Argument>(LV)) {
+ const Argument *RA = cast<Argument>(RV);
+ unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
+ return (int)LArgNo - (int)RArgNo;
}
- // For instructions, compare their loop depth, and their opcode.
- // This is pretty loose.
- if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
- Instruction *RV = cast<Instruction>(RU->getValue());
+ // For instructions, compare their loop depth, and their operand
+ // count. This is pretty loose.
+ if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
+ const Instruction *RInst = cast<Instruction>(RV);
// Compare loop depths.
- if (LI->getLoopDepth(LV->getParent()) !=
- LI->getLoopDepth(RV->getParent()))
- return LI->getLoopDepth(LV->getParent()) <
- LI->getLoopDepth(RV->getParent());
-
- // Compare opcodes.
- if (LV->getOpcode() != RV->getOpcode())
- return LV->getOpcode() < RV->getOpcode();
+ const BasicBlock *LParent = LInst->getParent(),
+ *RParent = RInst->getParent();
+ if (LParent != RParent) {
+ unsigned LDepth = LI->getLoopDepth(LParent),
+ RDepth = LI->getLoopDepth(RParent);
+ if (LDepth != RDepth)
+ return (int)LDepth - (int)RDepth;
+ }
// Compare the number of operands.
- if (LV->getNumOperands() != RV->getNumOperands())
- return LV->getNumOperands() < RV->getNumOperands();
+ unsigned LNumOps = LInst->getNumOperands(),
+ RNumOps = RInst->getNumOperands();
+ return (int)LNumOps - (int)RNumOps;
}
- return false;
+ return 0;
}
- // Compare constant values.
- if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
+ case scConstant: {
+ const SCEVConstant *LC = cast<SCEVConstant>(LHS);
const SCEVConstant *RC = cast<SCEVConstant>(RHS);
- if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
- return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
- return LC->getValue()->getValue().ult(RC->getValue()->getValue());
+
+ // Compare constant values.
+ const APInt &LA = LC->getValue()->getValue();
+ const APInt &RA = RC->getValue()->getValue();
+ unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
+ if (LBitWidth != RBitWidth)
+ return (int)LBitWidth - (int)RBitWidth;
+ return LA.ult(RA) ? -1 : 1;
}
- // Compare addrec loop depths.
- if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
+ case scAddRecExpr: {
+ const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
- if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
- return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
+
+ // Compare addrec loop depths.
+ const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
+ if (LLoop != RLoop) {
+ unsigned LDepth = LLoop->getLoopDepth(),
+ RDepth = RLoop->getLoopDepth();
+ if (LDepth != RDepth)
+ return (int)LDepth - (int)RDepth;
+ }
+
+ // Addrec complexity grows with operand count.
+ unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
+ if (LNumOps != RNumOps)
+ return (int)LNumOps - (int)RNumOps;
+
+ // Lexicographically compare.
+ for (unsigned i = 0; i != LNumOps; ++i) {
+ long X = compare(LA->getOperand(i), RA->getOperand(i));
+ if (X != 0)
+ return X;
+ }
+
+ return 0;
}
- // Lexicographically compare n-ary expressions.
- if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
+ case scAddExpr:
+ case scMulExpr:
+ case scSMaxExpr:
+ case scUMaxExpr: {
+ const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
- for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
- if (i >= RC->getNumOperands())
- return false;
- if (operator()(LC->getOperand(i), RC->getOperand(i)))
- return true;
- if (operator()(RC->getOperand(i), LC->getOperand(i)))
- return false;
+
+ // Lexicographically compare n-ary expressions.
+ unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
+ for (unsigned i = 0; i != LNumOps; ++i) {
+ if (i >= RNumOps)
+ return 1;
+ long X = compare(LC->getOperand(i), RC->getOperand(i));
+ if (X != 0)
+ return X;
}
- return LC->getNumOperands() < RC->getNumOperands();
+ return (int)LNumOps - (int)RNumOps;
}
- // Lexicographically compare udiv expressions.
- if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
+ case scUDivExpr: {
+ const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
- if (operator()(LC->getLHS(), RC->getLHS()))
- return true;
- if (operator()(RC->getLHS(), LC->getLHS()))
- return false;
- if (operator()(LC->getRHS(), RC->getRHS()))
- return true;
- if (operator()(RC->getRHS(), LC->getRHS()))
- return false;
- return false;
+
+ // Lexicographically compare udiv expressions.
+ long X = compare(LC->getLHS(), RC->getLHS());
+ if (X != 0)
+ return X;
+ return compare(LC->getRHS(), RC->getRHS());
}
- // Compare cast expressions by operand.
- if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend: {
+ const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
- return operator()(LC->getOperand(), RC->getOperand());
+
+ // Compare cast expressions by operand.
+ return compare(LC->getOperand(), RC->getOperand());
+ }
+
+ default:
+ break;
}
llvm_unreachable("Unknown SCEV kind!");
- return false;
+ return 0;
}
};
}
@@ -627,8 +704,9 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
if (Ops.size() == 2) {
// This is the common case, which also happens to be trivially simple.
// Special case it.
- if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
- std::swap(Ops[0], Ops[1]);
+ const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
+ if (SCEVComplexityCompare(LI)(RHS, LHS))
+ std::swap(LHS, RHS);
return;
}
@@ -760,7 +838,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
CalculationBits);
const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
for (unsigned i = 1; i != K; ++i) {
- const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
+ const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
Dividend = SE.getMulExpr(Dividend,
SE.getTruncateOrZeroExtend(S, CalculationTy));
}
@@ -821,7 +899,8 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
+ cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
+ getEffectiveSCEVType(Ty))));
// trunc(trunc(x)) --> trunc(x)
if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
@@ -843,11 +922,18 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
return getAddRecExpr(Operands, AddRec->getLoop());
}
- // The cast wasn't folded; create an explicit cast node.
- // Recompute the insert position, as it may have been invalidated.
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
- new (S) SCEVTruncateExpr(ID, Op, Ty);
+ // As a special case, fold trunc(undef) to undef. We don't want to
+ // know too much about SCEVUnknowns, but this special case is handy
+ // and harmless.
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
+ if (isa<UndefValue>(U->getValue()))
+ return getSCEV(UndefValue::get(Ty));
+
+ // The cast wasn't folded; create an explicit cast node. We can reuse
+ // the existing insert position since if we get here, we won't have
+ // made any changes which would invalidate it.
+ SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -861,12 +947,10 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
Ty = getEffectiveSCEVType(Ty);
// Fold if the operand is constant.
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
- const Type *IntTy = getEffectiveSCEVType(Ty);
- Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
- if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
- return getConstant(cast<ConstantInt>(C));
- }
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
+ return getConstant(
+ cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
+ getEffectiveSCEVType(Ty))));
// zext(zext(x)) --> zext(x)
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
@@ -956,7 +1040,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
getUnsignedRange(Step).getUnsignedMax());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
- (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
@@ -966,8 +1050,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
} else if (isKnownNegative(Step)) {
const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
getSignedRange(Step).getSignedMin());
- if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
- (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
+ if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
@@ -981,8 +1065,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
- new (S) SCEVZeroExtendExpr(ID, Op, Ty);
+ SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -996,12 +1080,10 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
Ty = getEffectiveSCEVType(Ty);
// Fold if the operand is constant.
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
- const Type *IntTy = getEffectiveSCEVType(Ty);
- Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
- if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
- return getConstant(cast<ConstantInt>(C));
- }
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
+ return getConstant(
+ cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
+ getEffectiveSCEVType(Ty))));
// sext(sext(x)) --> sext(x)
if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
@@ -1091,7 +1173,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
getSignedRange(Step).getSignedMax());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
- (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
@@ -1102,7 +1184,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
getSignedRange(Step).getSignedMin());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
- (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
@@ -1116,8 +1198,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
- new (S) SCEVSignExtendExpr(ID, Op, Ty);
+ SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -1165,6 +1247,13 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
return getAddRecExpr(Ops, AR->getLoop());
}
+ // As a special case, fold anyext(undef) to undef. We don't want to
+ // know too much about SCEVUnknowns, but this special case is handy
+ // and harmless.
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
+ if (isa<UndefValue>(U->getValue()))
+ return getSCEV(UndefValue::get(Ty));
+
// If the expression is obviously signed, use the sext cast value.
if (isa<SCEVSMaxExpr>(Op))
return SExt;
@@ -1202,23 +1291,34 @@ static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
SmallVector<const SCEV *, 8> &NewOps,
APInt &AccumulatedConstant,
- const SmallVectorImpl<const SCEV *> &Ops,
+ const SCEV *const *Ops, size_t NumOperands,
const APInt &Scale,
ScalarEvolution &SE) {
bool Interesting = false;
- // Iterate over the add operands.
- for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ // Iterate over the add operands. They are sorted, with constants first.
+ unsigned i = 0;
+ while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
+ ++i;
+ // Pull a buried constant out to the outside.
+ if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
+ Interesting = true;
+ AccumulatedConstant += Scale * C->getValue()->getValue();
+ }
+
+ // Next comes everything else. We're especially interested in multiplies
+ // here, but they're in the middle, so just visit the rest with one loop.
+ for (; i != NumOperands; ++i) {
const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
APInt NewScale =
Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
// A multiplication of a constant with another add; recurse.
+ const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
Interesting |=
CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
- cast<SCEVAddExpr>(Mul->getOperand(1))
- ->getOperands(),
+ Add->op_begin(), Add->getNumOperands(),
NewScale, SE);
} else {
// A multiplication of a constant with some other value. Update
@@ -1236,11 +1336,6 @@ CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
Interesting = true;
}
}
- } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
- // Pull a buried constant out to the outside.
- if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
- Interesting = true;
- AccumulatedConstant += Scale * C->getValue()->getValue();
} else {
// An ordinary operand. Update the map.
std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
@@ -1274,17 +1369,18 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
assert(!Ops.empty() && "Cannot get empty add!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Ops[i]->getType()) ==
- getEffectiveSCEVType(Ops[0]->getType()) &&
+ assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVAddExpr operand types don't match!");
#endif
// If HasNSW is true and all the operands are non-negative, infer HasNUW.
if (!HasNUW && HasNSW) {
bool All = true;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (!isKnownNonNegative(Ops[i])) {
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
+ E = Ops.end(); I != E; ++I)
+ if (!isKnownNonNegative(*I)) {
All = false;
break;
}
@@ -1309,30 +1405,37 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
// If we are left with a constant zero being added, strip it off.
- if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
+ if (LHSC->getValue()->isZero()) {
Ops.erase(Ops.begin());
--Idx;
}
- }
- if (Ops.size() == 1) return Ops[0];
+ if (Ops.size() == 1) return Ops[0];
+ }
- // Okay, check to see if the same value occurs in the operand list twice. If
- // so, merge them together into an multiply expression. Since we sorted the
- // list, these values are required to be adjacent.
+ // Okay, check to see if the same value occurs in the operand list more than
+ // once. If so, merge them together into an multiply expression. Since we
+ // sorted the list, these values are required to be adjacent.
const Type *Ty = Ops[0]->getType();
- for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
+ bool FoundMatch = false;
+ for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
- // Found a match, merge the two values into a multiply, and add any
- // remaining values to the result.
- const SCEV *Two = getIntegerSCEV(2, Ty);
- const SCEV *Mul = getMulExpr(Ops[i], Two);
- if (Ops.size() == 2)
+ // Scan ahead to count how many equal operands there are.
+ unsigned Count = 2;
+ while (i+Count != e && Ops[i+Count] == Ops[i])
+ ++Count;
+ // Merge the values into a multiply.
+ const SCEV *Scale = getConstant(Ty, Count);
+ const SCEV *Mul = getMulExpr(Scale, Ops[i]);
+ if (Ops.size() == Count)
return Mul;
- Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
- Ops.push_back(Mul);
- return getAddExpr(Ops, HasNUW, HasNSW);
+ Ops[i] = Mul;
+ Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
+ --i; e -= Count - 1;
+ FoundMatch = true;
}
+ if (FoundMatch)
+ return getAddExpr(Ops, HasNUW, HasNSW);
// Check for truncates. If all the operands are truncated from the same
// type, see if factoring out the truncate would permit the result to be
@@ -1354,9 +1457,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
LargeOps.push_back(T->getOperand());
} else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
- // This could be either sign or zero extension, but sign extension
- // is much more likely to be foldable here.
- LargeOps.push_back(getSignExtendExpr(C, SrcType));
+ LargeOps.push_back(getAnyExtendExpr(C, SrcType));
} else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
SmallVector<const SCEV *, 8> LargeMulOps;
for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
@@ -1369,9 +1470,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
LargeMulOps.push_back(T->getOperand());
} else if (const SCEVConstant *C =
dyn_cast<SCEVConstant>(M->getOperand(j))) {
- // This could be either sign or zero extension, but sign extension
- // is much more likely to be foldable here.
- LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
+ LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
} else {
Ok = false;
break;
@@ -1403,8 +1502,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
// If we have an add, expand the add operands onto the end of the operands
// list.
- Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
Ops.erase(Ops.begin()+Idx);
+ Ops.append(Add->op_begin(), Add->op_end());
DeletedAdd = true;
}
@@ -1427,12 +1526,13 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SmallVector<const SCEV *, 8> NewOps;
APInt AccumulatedConstant(BitWidth, 0);
if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
- Ops, APInt(BitWidth, 1), *this)) {
+ Ops.data(), Ops.size(),
+ APInt(BitWidth, 1), *this)) {
// Some interesting folding opportunity is present, so its worthwhile to
// re-generate the operands list. Group the operands by constant scale,
// to avoid multiplying by the same constant scale multiple times.
std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
- for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
+ for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(),
E = NewOps.end(); I != E; ++I)
MulOpLists[M.find(*I)->second].push_back(*I);
// Re-generate the operands list.
@@ -1445,7 +1545,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
Ops.push_back(getMulExpr(getConstant(I->first),
getAddExpr(I->second)));
if (Ops.empty())
- return getIntegerSCEV(0, Ty);
+ return getConstant(Ty, 0);
if (Ops.size() == 1)
return Ops[0];
return getAddExpr(Ops);
@@ -1459,20 +1559,23 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
+ if (isa<SCEVConstant>(MulOpSCEV))
+ continue;
for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
- if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
+ if (MulOpSCEV == Ops[AddOp]) {
// Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
if (Mul->getNumOperands() != 2) {
// If the multiply has more than two operands, we must get the
// Y*Z term.
- SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
- MulOps.erase(MulOps.begin()+MulOp);
+ SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
+ Mul->op_begin()+MulOp);
+ MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
InnerMul = getMulExpr(MulOps);
}
- const SCEV *One = getIntegerSCEV(1, Ty);
- const SCEV *AddOne = getAddExpr(InnerMul, One);
- const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
+ const SCEV *One = getConstant(Ty, 1);
+ const SCEV *AddOne = getAddExpr(One, InnerMul);
+ const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
if (Ops.size() == 2) return OuterMul;
if (AddOp < Idx) {
Ops.erase(Ops.begin()+AddOp);
@@ -1499,15 +1602,15 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
if (Mul->getNumOperands() != 2) {
SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
- Mul->op_end());
- MulOps.erase(MulOps.begin()+MulOp);
+ Mul->op_begin()+MulOp);
+ MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
InnerMul1 = getMulExpr(MulOps);
}
const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
if (OtherMul->getNumOperands() != 2) {
SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
- OtherMul->op_end());
- MulOps.erase(MulOps.begin()+OMulOp);
+ OtherMul->op_begin()+OMulOp);
+ MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
InnerMul2 = getMulExpr(MulOps);
}
const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
@@ -1534,8 +1637,9 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// they are loop invariant w.r.t. the recurrence.
SmallVector<const SCEV *, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
+ const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
+ if (Ops[i]->isLoopInvariant(AddRecLoop)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
@@ -1550,9 +1654,11 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
AddRec->op_end());
AddRecOps[0] = getAddExpr(LIOps);
- // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
- // is not associative so this isn't necessarily safe.
- const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
+ // Build the new addrec. Propagate the NUW and NSW flags if both the
+ // outer add and the inner addrec are guaranteed to have no overflow.
+ const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop,
+ HasNUW && AddRec->hasNoUnsignedWrap(),
+ HasNSW && AddRec->hasNoSignedWrap());
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
@@ -1570,30 +1676,31 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// there are multiple AddRec's with the same loop induction variable being
// added together. If so, we can fold them.
for (unsigned OtherIdx = Idx+1;
- OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
- if (OtherIdx != Idx) {
- const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
- if (AddRec->getLoop() == OtherAddRec->getLoop()) {
- // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
- SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
- AddRec->op_end());
- for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
- if (i >= NewOps.size()) {
- NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
- OtherAddRec->op_end());
- break;
+ OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
+ // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
+ SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
+ AddRec->op_end());
+ for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (const SCEVAddRecExpr *OtherAddRec =
+ dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
+ if (OtherAddRec->getLoop() == AddRecLoop) {
+ for (unsigned i = 0, e = OtherAddRec->getNumOperands();
+ i != e; ++i) {
+ if (i >= AddRecOps.size()) {
+ AddRecOps.append(OtherAddRec->op_begin()+i,
+ OtherAddRec->op_end());
+ break;
+ }
+ AddRecOps[i] = getAddExpr(AddRecOps[i],
+ OtherAddRec->getOperand(i));
+ }
+ Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
}
- NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
- }
- const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
-
- if (Ops.size() == 2) return NewAddRec;
-
- Ops.erase(Ops.begin()+Idx);
- Ops.erase(Ops.begin()+OtherIdx-1);
- Ops.push_back(NewAddRec);
- return getAddExpr(Ops);
- }
+ Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop);
+ return getAddExpr(Ops);
}
// Otherwise couldn't fold anything into this recurrence. Move onto the
@@ -1611,8 +1718,10 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEVAddExpr *S =
static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- S = SCEVAllocator.Allocate<SCEVAddExpr>();
- new (S) SCEVAddExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
if (HasNUW) S->setHasNoUnsignedWrap(true);
@@ -1627,17 +1736,18 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
assert(!Ops.empty() && "Cannot get empty mul!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Ops[i]->getType()) ==
- getEffectiveSCEVType(Ops[0]->getType()) &&
+ assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVMulExpr operand types don't match!");
#endif
// If HasNSW is true and all the operands are non-negative, infer HasNUW.
if (!HasNUW && HasNSW) {
bool All = true;
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (!isKnownNonNegative(Ops[i])) {
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
+ E = Ops.end(); I != E; ++I)
+ if (!isKnownNonNegative(*I)) {
All = false;
break;
}
@@ -1695,23 +1805,23 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
return getAddExpr(NewOps);
}
}
+
+ if (Ops.size() == 1)
+ return Ops[0];
}
// Skip over the add expression until we get to a multiply.
while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
++Idx;
- if (Ops.size() == 1)
- return Ops[0];
-
// If there are mul operands inline them all into this expression.
if (Idx < Ops.size()) {
bool DeletedMul = false;
while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
// If we have an mul, expand the mul operands onto the end of the operands
// list.
- Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
Ops.erase(Ops.begin()+Idx);
+ Ops.append(Mul->op_begin(), Mul->op_end());
DeletedMul = true;
}
@@ -1734,8 +1844,9 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// they are loop invariant w.r.t. the recurrence.
SmallVector<const SCEV *, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
+ const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
+ if (Ops[i]->isLoopInvariant(AddRecLoop)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
@@ -1746,23 +1857,15 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
SmallVector<const SCEV *, 4> NewOps;
NewOps.reserve(AddRec->getNumOperands());
- if (LIOps.size() == 1) {
- const SCEV *Scale = LIOps[0];
- for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
- NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
- } else {
- for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
- SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
- MulOps.push_back(AddRec->getOperand(i));
- NewOps.push_back(getMulExpr(MulOps));
- }
- }
+ const SCEV *Scale = getMulExpr(LIOps);
+ for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
+ NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
- // It's tempting to propagate the NSW flag here, but nsw multiplication
- // is not associative so this isn't necessarily safe.
- const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
+ // Build the new addrec. Propagate the NUW and NSW flags if both the
+ // outer mul and the inner addrec are guaranteed to have no overflow.
+ const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop,
HasNUW && AddRec->hasNoUnsignedWrap(),
- /*HasNSW=*/false);
+ HasNSW && AddRec->hasNoSignedWrap());
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
@@ -1780,28 +1883,30 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// there are multiple AddRec's with the same loop induction variable being
// multiplied together. If so, we can fold them.
for (unsigned OtherIdx = Idx+1;
- OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
- if (OtherIdx != Idx) {
- const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
- if (AddRec->getLoop() == OtherAddRec->getLoop()) {
- // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D}
- const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
- const SCEV *NewStart = getMulExpr(F->getStart(),
- G->getStart());
- const SCEV *B = F->getStepRecurrence(*this);
- const SCEV *D = G->getStepRecurrence(*this);
- const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
- getMulExpr(G, B),
- getMulExpr(B, D));
- const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
- F->getLoop());
- if (Ops.size() == 2) return NewAddRec;
-
- Ops.erase(Ops.begin()+Idx);
- Ops.erase(Ops.begin()+OtherIdx-1);
- Ops.push_back(NewAddRec);
- return getMulExpr(Ops);
- }
+ OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
+ // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L> -->
+ // {A*C,+,F*D + G*B + B*D}<L>
+ for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (const SCEVAddRecExpr *OtherAddRec =
+ dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
+ if (OtherAddRec->getLoop() == AddRecLoop) {
+ const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
+ const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart());
+ const SCEV *B = F->getStepRecurrence(*this);
+ const SCEV *D = G->getStepRecurrence(*this);
+ const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
+ getMulExpr(G, B),
+ getMulExpr(B, D));
+ const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
+ F->getLoop());
+ if (Ops.size() == 2) return NewAddRec;
+ Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
+ Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
+ }
+ return getMulExpr(Ops);
}
// Otherwise couldn't fold anything into this recurrence. Move onto the
@@ -1819,8 +1924,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEVMulExpr *S =
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- S = SCEVAllocator.Allocate<SCEVMulExpr>();
- new (S) SCEVMulExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
if (HasNUW) S->setHasNoUnsignedWrap(true);
@@ -1839,79 +1946,81 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
if (RHSC->getValue()->equalsInt(1))
return LHS; // X udiv 1 --> x
- if (RHSC->isZero())
- return getIntegerSCEV(0, LHS->getType()); // value is undefined
-
- // Determine if the division can be folded into the operands of
- // its operands.
- // TODO: Generalize this to non-constants by using known-bits information.
- const Type *Ty = LHS->getType();
- unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
- unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
- // For non-power-of-two values, effectively round the value up to the
- // nearest power of two.
- if (!RHSC->getValue()->getValue().isPowerOf2())
- ++MaxShiftAmt;
- const IntegerType *ExtTy =
- IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
- // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
- if (const SCEVConstant *Step =
- dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
- if (!Step->getValue()->getValue()
- .urem(RHSC->getValue()->getValue()) &&
- getZeroExtendExpr(AR, ExtTy) ==
- getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
- getZeroExtendExpr(Step, ExtTy),
- AR->getLoop())) {
- SmallVector<const SCEV *, 4> Operands;
- for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
- Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
- return getAddRecExpr(Operands, AR->getLoop());
- }
- // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
- if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
- for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
- Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
- if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
- // Find an operand that's safely divisible.
- for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
- const SCEV *Op = M->getOperand(i);
- const SCEV *Div = getUDivExpr(Op, RHSC);
- if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
- const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
- Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
- MOperands.end());
- Operands[i] = Div;
- return getMulExpr(Operands);
+ // If the denominator is zero, the result of the udiv is undefined. Don't
+ // try to analyze it, because the resolution chosen here may differ from
+ // the resolution chosen in other parts of the compiler.
+ if (!RHSC->getValue()->isZero()) {
+ // Determine if the division can be folded into the operands of
+ // its operands.
+ // TODO: Generalize this to non-constants by using known-bits information.
+ const Type *Ty = LHS->getType();
+ unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
+ unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
+ // For non-power-of-two values, effectively round the value up to the
+ // nearest power of two.
+ if (!RHSC->getValue()->getValue().isPowerOf2())
+ ++MaxShiftAmt;
+ const IntegerType *ExtTy =
+ IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
+ // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
+ if (const SCEVConstant *Step =
+ dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
+ if (!Step->getValue()->getValue()
+ .urem(RHSC->getValue()->getValue()) &&
+ getZeroExtendExpr(AR, ExtTy) ==
+ getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
+ getZeroExtendExpr(Step, ExtTy),
+ AR->getLoop())) {
+ SmallVector<const SCEV *, 4> Operands;
+ for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
+ Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
+ return getAddRecExpr(Operands, AR->getLoop());
}
+ // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
+ if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
+ SmallVector<const SCEV *, 4> Operands;
+ for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
+ Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
+ if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
+ // Find an operand that's safely divisible.
+ for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
+ const SCEV *Op = M->getOperand(i);
+ const SCEV *Div = getUDivExpr(Op, RHSC);
+ if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
+ Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
+ M->op_end());
+ Operands[i] = Div;
+ return getMulExpr(Operands);
+ }
+ }
+ }
+ // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
+ if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
+ SmallVector<const SCEV *, 4> Operands;
+ for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
+ Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
+ if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
+ Operands.clear();
+ for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
+ const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
+ if (isa<SCEVUDivExpr>(Op) ||
+ getMulExpr(Op, RHS) != A->getOperand(i))
+ break;
+ Operands.push_back(Op);
+ }
+ if (Operands.size() == A->getNumOperands())
+ return getAddExpr(Operands);
}
- }
- // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
- if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
- for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
- Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
- if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
- Operands.clear();
- for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
- const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
- if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
- break;
- Operands.push_back(Op);
- }
- if (Operands.size() == A->getNumOperands())
- return getAddExpr(Operands);
}
- }
- // Fold if both operands are constant.
- if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
- Constant *LHSCV = LHSC->getValue();
- Constant *RHSCV = RHSC->getValue();
- return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
- RHSCV)));
+ // Fold if both operands are constant.
+ if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
+ Constant *LHSCV = LHSC->getValue();
+ Constant *RHSCV = RHSC->getValue();
+ return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
+ RHSCV)));
+ }
}
}
@@ -1921,8 +2030,8 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
ID.AddPointer(RHS);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
- new (S) SCEVUDivExpr(ID, LHS, RHS);
+ SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
+ LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -1937,8 +2046,7 @@ const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
Operands.push_back(Start);
if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
if (StepChrec->getLoop() == L) {
- Operands.insert(Operands.end(), StepChrec->op_begin(),
- StepChrec->op_end());
+ Operands.append(StepChrec->op_begin(), StepChrec->op_end());
return getAddRecExpr(Operands, L);
}
@@ -1954,9 +2062,9 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
bool HasNUW, bool HasNSW) {
if (Operands.size() == 1) return Operands[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Operands[i]->getType()) ==
- getEffectiveSCEVType(Operands[0]->getType()) &&
+ assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
"SCEVAddRecExpr operand types don't match!");
#endif
@@ -1974,8 +2082,9 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
// If HasNSW is true and all the operands are non-negative, infer HasNUW.
if (!HasNUW && HasNSW) {
bool All = true;
- for (unsigned i = 0, e = Operands.size(); i != e; ++i)
- if (!isKnownNonNegative(Operands[i])) {
+ for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
+ E = Operands.end(); I != E; ++I)
+ if (!isKnownNonNegative(*I)) {
All = false;
break;
}
@@ -1985,9 +2094,9 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
// Canonicalize nested AddRecs in by nesting them in order of loop depth.
if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
const Loop *NestedLoop = NestedAR->getLoop();
- if (L->contains(NestedLoop->getHeader()) ?
+ if (L->contains(NestedLoop) ?
(L->getLoopDepth() < NestedLoop->getLoopDepth()) :
- (!NestedLoop->contains(L->getHeader()) &&
+ (!NestedLoop->contains(L) &&
DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
NestedAR->op_end());
@@ -2030,8 +2139,10 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
SCEVAddRecExpr *S =
static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
- S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
- new (S) SCEVAddRecExpr(ID, Operands, L);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
+ std::uninitialized_copy(Operands.begin(), Operands.end(), O);
+ S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
+ O, Operands.size(), L);
UniqueSCEVs.InsertNode(S, IP);
}
if (HasNUW) S->setHasNoUnsignedWrap(true);
@@ -2052,9 +2163,9 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
assert(!Ops.empty() && "Cannot get empty smax!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Ops[i]->getType()) ==
- getEffectiveSCEVType(Ops[0]->getType()) &&
+ assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVSMaxExpr operand types don't match!");
#endif
@@ -2086,9 +2197,9 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// maximum-int.
return Ops[0];
}
- }
- if (Ops.size() == 1) return Ops[0];
+ if (Ops.size() == 1) return Ops[0];
+ }
// Find the first SMax
while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
@@ -2099,8 +2210,8 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
if (Idx < Ops.size()) {
bool DeletedSMax = false;
while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
- Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
Ops.erase(Ops.begin()+Idx);
+ Ops.append(SMax->op_begin(), SMax->op_end());
DeletedSMax = true;
}
@@ -2112,7 +2223,13 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// so, delete one. Since we sorted the list, these values are required to
// be adjacent.
for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
- if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y
+ // X smax Y smax Y --> X smax Y
+ // X smax Y --> X, if X is always greater than Y
+ if (Ops[i] == Ops[i+1] ||
+ isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
+ Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
+ --i; --e;
+ } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
--i; --e;
}
@@ -2130,8 +2247,10 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
ID.AddPointer(Ops[i]);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
- new (S) SCEVSMaxExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -2149,9 +2268,9 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
assert(!Ops.empty() && "Cannot get empty umax!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Ops[i]->getType()) ==
- getEffectiveSCEVType(Ops[0]->getType()) &&
+ assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVUMaxExpr operand types don't match!");
#endif
@@ -2183,9 +2302,9 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// maximum-int.
return Ops[0];
}
- }
- if (Ops.size() == 1) return Ops[0];
+ if (Ops.size() == 1) return Ops[0];
+ }
// Find the first UMax
while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
@@ -2196,8 +2315,8 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
if (Idx < Ops.size()) {
bool DeletedUMax = false;
while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
- Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
Ops.erase(Ops.begin()+Idx);
+ Ops.append(UMax->op_begin(), UMax->op_end());
DeletedUMax = true;
}
@@ -2209,7 +2328,13 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
// so, delete one. Since we sorted the list, these values are required to
// be adjacent.
for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
- if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y
+ // X umax Y umax Y --> X umax Y
+ // X umax Y --> X, if X is always greater than Y
+ if (Ops[i] == Ops[i+1] ||
+ isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
+ Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
+ --i; --e;
+ } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
--i; --e;
}
@@ -2227,8 +2352,10 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
ID.AddPointer(Ops[i]);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
- new (S) SCEVUMaxExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -2246,9 +2373,17 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
}
const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
+ // If we have TargetData, we can bypass creating a target-independent
+ // constant expression and then folding it back into a ConstantInt.
+ // This is just a compile-time optimization.
+ if (TD)
+ return getConstant(TD->getIntPtrType(getContext()),
+ TD->getTypeAllocSize(AllocTy));
+
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
@@ -2256,16 +2391,25 @@ const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
Constant *C = ConstantExpr::getAlignOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
unsigned FieldNo) {
+ // If we have TargetData, we can bypass creating a target-independent
+ // constant expression and then folding it back into a ConstantInt.
+ // This is just a compile-time optimization.
+ if (TD)
+ return getConstant(TD->getIntPtrType(getContext()),
+ TD->getStructLayout(STy)->getElementOffset(FieldNo));
+
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
@@ -2274,7 +2418,8 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
Constant *FieldNo) {
Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
@@ -2289,9 +2434,14 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
ID.AddInteger(scUnknown);
ID.AddPointer(V);
void *IP = 0;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
- new (S) SCEVUnknown(ID, V);
+ if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
+ assert(cast<SCEVUnknown>(S)->getValue() == V &&
+ "Stale SCEVUnknown in uniquing map!");
+ return S;
+ }
+ SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
+ FirstUnknown);
+ FirstUnknown = cast<SCEVUnknown>(S);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -2355,18 +2505,16 @@ const SCEV *ScalarEvolution::getCouldNotCompute() {
const SCEV *ScalarEvolution::getSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
- std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
- if (I != Scalars.end()) return I->second;
+ ValueExprMapType::const_iterator I = ValueExprMap.find(V);
+ if (I != ValueExprMap.end()) return I->second;
const SCEV *S = createSCEV(V);
- Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
- return S;
-}
-/// getIntegerSCEV - Given a SCEVable type, create a constant for the
-/// specified signed integer value and return a SCEV for the constant.
-const SCEV *ScalarEvolution::getIntegerSCEV(int64_t Val, const Type *Ty) {
- const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
- return getConstant(ConstantInt::get(ITy, Val));
+ // The process of creating a SCEV for V may have caused other SCEVs
+ // to have been created, so it's necessary to insert the new entry
+ // from scratch, rather than trying to remember the insert position
+ // above.
+ ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
+ return S;
}
/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
@@ -2399,6 +2547,10 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
///
const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
const SCEV *RHS) {
+ // Fast path: X - X --> 0.
+ if (LHS == RHS)
+ return getConstant(LHS->getType(), 0);
+
// X - Y --> X + -Y
return getAddExpr(LHS, getNegativeSCEV(RHS));
}
@@ -2541,12 +2693,12 @@ PushDefUseChildren(Instruction *I,
// Push the def-use children onto the Worklist stack.
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI)
- Worklist.push_back(cast<Instruction>(UI));
+ Worklist.push_back(cast<Instruction>(*UI));
}
/// ForgetSymbolicValue - This looks up computed SCEV values for all
/// instructions that depend on the given instruction and removes them from
-/// the Scalars map if they reference SymName. This is used during PHI
+/// the ValueExprMapType map if they reference SymName. This is used during PHI
/// resolution.
void
ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
@@ -2559,9 +2711,9 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
+ ValueExprMapType::iterator It =
+ ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
// Short-circuit the def-use traversal if the symbolic name
// ceases to appear in expressions.
if (It->second != SymName && !It->second->hasOperand(SymName))
@@ -2578,7 +2730,7 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
!isa<SCEVUnknown>(It->second) ||
(I != PN && It->second == SymName)) {
ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMap.erase(It);
}
}
@@ -2590,23 +2742,37 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
/// a loop header, making it a potential recurrence, or it doesn't.
///
const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
- if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized.
- if (const Loop *L = LI->getLoopFor(PN->getParent()))
- if (L->getHeader() == PN->getParent()) {
- // If it lives in the loop header, it has two incoming values, one
- // from outside the loop, and one from inside.
- unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
- unsigned BackEdge = IncomingEdge^1;
-
+ if (const Loop *L = LI->getLoopFor(PN->getParent()))
+ if (L->getHeader() == PN->getParent()) {
+ // The loop may have multiple entrances or multiple exits; we can analyze
+ // this phi as an addrec if it has a unique entry value and a unique
+ // backedge value.
+ Value *BEValueV = 0, *StartValueV = 0;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ Value *V = PN->getIncomingValue(i);
+ if (L->contains(PN->getIncomingBlock(i))) {
+ if (!BEValueV) {
+ BEValueV = V;
+ } else if (BEValueV != V) {
+ BEValueV = 0;
+ break;
+ }
+ } else if (!StartValueV) {
+ StartValueV = V;
+ } else if (StartValueV != V) {
+ StartValueV = 0;
+ break;
+ }
+ }
+ if (BEValueV && StartValueV) {
// While we are analyzing this PHI node, handle its value symbolically.
const SCEV *SymbolicName = getUnknown(PN);
- assert(Scalars.find(PN) == Scalars.end() &&
+ assert(ValueExprMap.find(PN) == ValueExprMap.end() &&
"PHI node already processed?");
- Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
+ ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
// Using this symbolic name for the PHI, analyze the value coming around
// the back-edge.
- Value *BEValueV = PN->getIncomingValue(BackEdge);
const SCEV *BEValue = getSCEV(BEValueV);
// NOTE: If BEValue is loop invariant, we know that the PHI node just
@@ -2650,8 +2816,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
HasNSW = true;
}
- const SCEV *StartVal =
- getSCEV(PN->getIncomingValue(IncomingEdge));
+ const SCEV *StartVal = getSCEV(StartValueV);
const SCEV *PHISCEV =
getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
@@ -2665,7 +2830,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
ForgetSymbolicName(PN, SymbolicName);
- Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
return PHISCEV;
}
}
@@ -2677,12 +2842,12 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// Because the other in-value of i (0) fits the evolution of BEValue
// i really is an addrec evolution.
if (AddRec->getLoop() == L && AddRec->isAffine()) {
- const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
+ const SCEV *StartVal = getSCEV(StartValueV);
// If StartVal = j.start - j.stride, we can use StartVal as the
// initial step of the addrec evolution.
if (StartVal == getMinusSCEV(AddRec->getOperand(0),
- AddRec->getOperand(1))) {
+ AddRec->getOperand(1))) {
const SCEV *PHISCEV =
getAddRecExpr(StartVal, AddRec->getOperand(1), L);
@@ -2690,14 +2855,13 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
ForgetSymbolicName(PN, SymbolicName);
- Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
return PHISCEV;
}
}
}
-
- return SymbolicName;
}
+ }
// If the PHI has a single incoming value, follow that value, unless the
// PHI's incoming blocks are in a different loop, in which case doing so
@@ -2724,15 +2888,19 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
///
const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
- bool InBounds = GEP->isInBounds();
+ // Don't blindly transfer the inbounds flag from the GEP instruction to the
+ // Add expression, because the Instruction may be guarded by control flow
+ // and the no-overflow bits may not be valid for the expression in any
+ // context.
+
const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
// Don't attempt to analyze GEPs over unsized objects.
if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
return getUnknown(GEP);
- const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
+ const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
gep_type_iterator GTI = gep_type_begin(GEP);
- for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
+ for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
E = GEP->op_end();
I != E; ++I) {
Value *Index = *I;
@@ -2740,23 +2908,30 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- TotalOffset = getAddExpr(TotalOffset,
- getOffsetOfExpr(STy, FieldNo),
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
+ const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
+
+ // Add the field offset to the running total offset.
+ TotalOffset = getAddExpr(TotalOffset, FieldOffset);
} else {
// For an array, add the element offset, explicitly scaled.
- const SCEV *LocalOffset = getSCEV(Index);
+ const SCEV *ElementSize = getSizeOfExpr(*GTI);
+ const SCEV *IndexS = getSCEV(Index);
// Getelementptr indices are signed.
- LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
- // Lower "inbounds" GEPs to NSW arithmetic.
- LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
- TotalOffset = getAddExpr(TotalOffset, LocalOffset,
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
+ IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
+
+ // Multiply the index by the element size to compute the element offset.
+ const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize);
+
+ // Add the element offset to the running total offset.
+ TotalOffset = getAddExpr(TotalOffset, LocalOffset);
}
}
- return getAddExpr(getSCEV(Base), TotalOffset,
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
+
+ // Get the SCEV for the GEP base.
+ const SCEV *BaseS = getSCEV(Base);
+
+ // Add the total offset from all the GEP indices to the base.
+ return getAddExpr(BaseS, TotalOffset);
}
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
@@ -2913,9 +3088,10 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
// initial value.
if (AddRec->hasNoUnsignedWrap())
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
- ConservativeResult =
- ConstantRange(C->getValue()->getValue(),
- APInt(getTypeSizeInBits(C->getType()), 0));
+ if (!C->getValue()->isZero())
+ ConservativeResult =
+ ConservativeResult.intersectWith(
+ ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
// TODO: non-affine addrec
if (AddRec->isAffine()) {
@@ -2926,14 +3102,26 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
const SCEV *Start = AddRec->getStart();
- const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
+ const SCEV *Step = AddRec->getStepRecurrence(*this);
- // Check for overflow.
- if (!AddRec->hasNoUnsignedWrap())
+ ConstantRange StartRange = getUnsignedRange(Start);
+ ConstantRange StepRange = getSignedRange(Step);
+ ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
+ ConstantRange EndRange =
+ StartRange.add(MaxBECountRange.multiply(StepRange));
+
+ // Check for overflow. This must be done with ConstantRange arithmetic
+ // because we could be called from within the ScalarEvolution overflow
+ // checking code.
+ ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtMaxBECountRange =
+ MaxBECountRange.zextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
+ if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
+ ExtEndRange)
return ConservativeResult;
- ConstantRange StartRange = getUnsignedRange(Start);
- ConstantRange EndRange = getUnsignedRange(End);
APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
EndRange.getUnsignedMin());
APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
@@ -3057,14 +3245,26 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
const SCEV *Start = AddRec->getStart();
- const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
+ const SCEV *Step = AddRec->getStepRecurrence(*this);
- // Check for overflow.
- if (!AddRec->hasNoSignedWrap())
+ ConstantRange StartRange = getSignedRange(Start);
+ ConstantRange StepRange = getSignedRange(Step);
+ ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
+ ConstantRange EndRange =
+ StartRange.add(MaxBECountRange.multiply(StepRange));
+
+ // Check for overflow. This must be done with ConstantRange arithmetic
+ // because we could be called from within the ScalarEvolution overflow
+ // checking code.
+ ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtMaxBECountRange =
+ MaxBECountRange.zextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
+ if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
+ ExtEndRange)
return ConservativeResult;
- ConstantRange StartRange = getSignedRange(Start);
- ConstantRange EndRange = getSignedRange(End);
APInt Min = APIntOps::smin(StartRange.getSignedMin(),
EndRange.getSignedMin());
APInt Max = APIntOps::smax(StartRange.getSignedMax(),
@@ -3101,16 +3301,21 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
return getUnknown(V);
unsigned Opcode = Instruction::UserOp1;
- if (Instruction *I = dyn_cast<Instruction>(V))
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
Opcode = I->getOpcode();
- else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+
+ // Don't attempt to analyze instructions in blocks that aren't
+ // reachable. Such instructions don't matter, and they aren't required
+ // to obey basic rules for definitions dominating uses which this
+ // analysis depends on.
+ if (!DT->isReachableFromEntry(I->getParent()))
+ return getUnknown(V);
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
Opcode = CE->getOpcode();
else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
return getConstant(CI);
else if (isa<ConstantPointerNull>(V))
- return getIntegerSCEV(0, V->getType());
- else if (isa<UndefValue>(V))
- return getIntegerSCEV(0, V->getType());
+ return getConstant(V->getType(), 0);
else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
else
@@ -3118,18 +3323,42 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
Operator *U = cast<Operator>(V);
switch (Opcode) {
- case Instruction::Add:
- // Don't transfer the NSW and NUW bits from the Add instruction to the
- // Add expression, because the Instruction may be guarded by control
- // flow and the no-overflow bits may not be valid for the expression in
- // any context.
- return getAddExpr(getSCEV(U->getOperand(0)),
- getSCEV(U->getOperand(1)));
- case Instruction::Mul:
- // Don't transfer the NSW and NUW bits from the Mul instruction to the
- // Mul expression, as with Add.
- return getMulExpr(getSCEV(U->getOperand(0)),
- getSCEV(U->getOperand(1)));
+ case Instruction::Add: {
+ // The simple thing to do would be to just call getSCEV on both operands
+ // and call getAddExpr with the result. However if we're looking at a
+ // bunch of things all added together, this can be quite inefficient,
+ // because it leads to N-1 getAddExpr calls for N ultimate operands.
+ // Instead, gather up all the operands and make a single getAddExpr call.
+ // LLVM IR canonical form means we need only traverse the left operands.
+ SmallVector<const SCEV *, 4> AddOps;
+ AddOps.push_back(getSCEV(U->getOperand(1)));
+ for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
+ unsigned Opcode = Op->getValueID() - Value::InstructionVal;
+ if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
+ break;
+ U = cast<Operator>(Op);
+ const SCEV *Op1 = getSCEV(U->getOperand(1));
+ if (Opcode == Instruction::Sub)
+ AddOps.push_back(getNegativeSCEV(Op1));
+ else
+ AddOps.push_back(Op1);
+ }
+ AddOps.push_back(getSCEV(U->getOperand(0)));
+ return getAddExpr(AddOps);
+ }
+ case Instruction::Mul: {
+ // See the Add code above.
+ SmallVector<const SCEV *, 4> MulOps;
+ MulOps.push_back(getSCEV(U->getOperand(1)));
+ for (Value *Op = U->getOperand(0);
+ Op->getValueID() == Instruction::Mul + Value::InstructionVal;
+ Op = U->getOperand(0)) {
+ U = cast<Operator>(Op);
+ MulOps.push_back(getSCEV(U->getOperand(1)));
+ }
+ MulOps.push_back(getSCEV(U->getOperand(0)));
+ return getMulExpr(MulOps);
+ }
case Instruction::UDiv:
return getUDivExpr(getSCEV(U->getOperand(0)),
getSCEV(U->getOperand(1)));
@@ -3242,8 +3471,16 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// Turn shift left of a constant amount into a multiply.
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
+
+ // If the shift count is not less than the bitwidth, the result of
+ // the shift is undefined. Don't try to analyze it, because the
+ // resolution chosen here may differ from the resolution chosen in
+ // other parts of the compiler.
+ if (SA->getValue().uge(BitWidth))
+ break;
+
Constant *X = ConstantInt::get(getContext(),
- APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
+ APInt(BitWidth, 1).shl(SA->getZExtValue()));
return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
}
break;
@@ -3252,8 +3489,16 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// Turn logical shift right of a constant into a unsigned divide.
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
+
+ // If the shift count is not less than the bitwidth, the result of
+ // the shift is undefined. Don't try to analyze it, because the
+ // resolution chosen here may differ from the resolution chosen in
+ // other parts of the compiler.
+ if (SA->getValue().uge(BitWidth))
+ break;
+
Constant *X = ConstantInt::get(getContext(),
- APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
+ APInt(BitWidth, 1).shl(SA->getZExtValue()));
return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
}
break;
@@ -3261,19 +3506,26 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
case Instruction::AShr:
// For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
- if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
+ if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
if (L->getOpcode() == Instruction::Shl &&
L->getOperand(1) == U->getOperand(1)) {
- unsigned BitWidth = getTypeSizeInBits(U->getType());
+ uint64_t BitWidth = getTypeSizeInBits(U->getType());
+
+ // If the shift count is not less than the bitwidth, the result of
+ // the shift is undefined. Don't try to analyze it, because the
+ // resolution chosen here may differ from the resolution chosen in
+ // other parts of the compiler.
+ if (CI->getValue().uge(BitWidth))
+ break;
+
uint64_t Amt = BitWidth - CI->getZExtValue();
if (Amt == BitWidth)
return getSCEV(L->getOperand(0)); // shift by zero --> noop
- if (Amt > BitWidth)
- return getIntegerSCEV(0, U->getType()); // value is undefined
return
getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
- IntegerType::get(getContext(), Amt)),
- U->getType());
+ IntegerType::get(getContext(),
+ Amt)),
+ U->getType());
}
break;
@@ -3316,10 +3568,22 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// fall through
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
- return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
- else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
- return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
+ // a >s b ? a+x : b+x -> smax(a, b)+x
+ // a >s b ? b+x : a+x -> smin(a, b)+x
+ if (LHS->getType() == U->getType()) {
+ const SCEV *LS = getSCEV(LHS);
+ const SCEV *RS = getSCEV(RHS);
+ const SCEV *LA = getSCEV(U->getOperand(1));
+ const SCEV *RA = getSCEV(U->getOperand(2));
+ const SCEV *LDiff = getMinusSCEV(LA, LS);
+ const SCEV *RDiff = getMinusSCEV(RA, RS);
+ if (LDiff == RDiff)
+ return getAddExpr(getSMaxExpr(LS, RS), LDiff);
+ LDiff = getMinusSCEV(LA, RS);
+ RDiff = getMinusSCEV(RA, LS);
+ if (LDiff == RDiff)
+ return getAddExpr(getSMinExpr(LS, RS), LDiff);
+ }
break;
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
@@ -3327,28 +3591,52 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
// fall through
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
- if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
- return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
- else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
- return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
+ // a >u b ? a+x : b+x -> umax(a, b)+x
+ // a >u b ? b+x : a+x -> umin(a, b)+x
+ if (LHS->getType() == U->getType()) {
+ const SCEV *LS = getSCEV(LHS);
+ const SCEV *RS = getSCEV(RHS);
+ const SCEV *LA = getSCEV(U->getOperand(1));
+ const SCEV *RA = getSCEV(U->getOperand(2));
+ const SCEV *LDiff = getMinusSCEV(LA, LS);
+ const SCEV *RDiff = getMinusSCEV(RA, RS);
+ if (LDiff == RDiff)
+ return getAddExpr(getUMaxExpr(LS, RS), LDiff);
+ LDiff = getMinusSCEV(LA, RS);
+ RDiff = getMinusSCEV(RA, LS);
+ if (LDiff == RDiff)
+ return getAddExpr(getUMinExpr(LS, RS), LDiff);
+ }
break;
case ICmpInst::ICMP_NE:
- // n != 0 ? n : 1 -> umax(n, 1)
- if (LHS == U->getOperand(1) &&
- isa<ConstantInt>(U->getOperand(2)) &&
- cast<ConstantInt>(U->getOperand(2))->isOne() &&
+ // n != 0 ? n+x : 1+x -> umax(n, 1)+x
+ if (LHS->getType() == U->getType() &&
isa<ConstantInt>(RHS) &&
- cast<ConstantInt>(RHS)->isZero())
- return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
+ cast<ConstantInt>(RHS)->isZero()) {
+ const SCEV *One = getConstant(LHS->getType(), 1);
+ const SCEV *LS = getSCEV(LHS);
+ const SCEV *LA = getSCEV(U->getOperand(1));
+ const SCEV *RA = getSCEV(U->getOperand(2));
+ const SCEV *LDiff = getMinusSCEV(LA, LS);
+ const SCEV *RDiff = getMinusSCEV(RA, One);
+ if (LDiff == RDiff)
+ return getAddExpr(getUMaxExpr(One, LS), LDiff);
+ }
break;
case ICmpInst::ICMP_EQ:
- // n == 0 ? 1 : n -> umax(n, 1)
- if (LHS == U->getOperand(2) &&
- isa<ConstantInt>(U->getOperand(1)) &&
- cast<ConstantInt>(U->getOperand(1))->isOne() &&
+ // n == 0 ? 1+x : n+x -> umax(n, 1)+x
+ if (LHS->getType() == U->getType() &&
isa<ConstantInt>(RHS) &&
- cast<ConstantInt>(RHS)->isZero())
- return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
+ cast<ConstantInt>(RHS)->isZero()) {
+ const SCEV *One = getConstant(LHS->getType(), 1);
+ const SCEV *LS = getSCEV(LHS);
+ const SCEV *LA = getSCEV(U->getOperand(1));
+ const SCEV *RA = getSCEV(U->getOperand(2));
+ const SCEV *LDiff = getMinusSCEV(LA, One);
+ const SCEV *RDiff = getMinusSCEV(RA, LS);
+ if (LDiff == RDiff)
+ return getAddExpr(getUMaxExpr(One, LS), LDiff);
+ }
break;
default:
break;
@@ -3444,9 +3732,9 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
+ ValueExprMapType::iterator It =
+ ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
// SCEVUnknown for a PHI either means that it has an unrecognized
// structure, or it's a PHI that's in the progress of being computed
// by createNodeForPHI. In the former case, additional loop trip
@@ -3455,7 +3743,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
// own when it gets to that point.
if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMap.erase(It);
}
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
@@ -3484,11 +3772,10 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
+ ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMap.erase(It);
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
}
@@ -3513,11 +3800,10 @@ void ScalarEvolution::forgetValue(Value *V) {
I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
+ ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMap.erase(It);
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
}
@@ -3661,14 +3947,13 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
else
MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
} else {
- // Both conditions must be true for the loop to exit.
+ // Both conditions must be true at the same time for the loop to exit.
+ // For now, be conservative.
assert(L->contains(FBB) && "Loop block has no successor in loop!");
- if (BTI0.Exact != getCouldNotCompute() &&
- BTI1.Exact != getCouldNotCompute())
- BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
- if (BTI0.Max != getCouldNotCompute() &&
- BTI1.Max != getCouldNotCompute())
- MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
+ if (BTI0.Max == BTI1.Max)
+ MaxBECount = BTI0.Max;
+ if (BTI0.Exact == BTI1.Exact)
+ BECount = BTI0.Exact;
}
return BackedgeTakenInfo(BECount, MaxBECount);
@@ -3696,14 +3981,13 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
else
MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
} else {
- // Both conditions must be false for the loop to exit.
+ // Both conditions must be false at the same time for the loop to exit.
+ // For now, be conservative.
assert(L->contains(TBB) && "Loop block has no successor in loop!");
- if (BTI0.Exact != getCouldNotCompute() &&
- BTI1.Exact != getCouldNotCompute())
- BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
- if (BTI0.Max != getCouldNotCompute() &&
- BTI1.Max != getCouldNotCompute())
- MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
+ if (BTI0.Max == BTI1.Max)
+ MaxBECount = BTI0.Max;
+ if (BTI0.Exact == BTI1.Exact)
+ BECount = BTI0.Exact;
}
return BackedgeTakenInfo(BECount, MaxBECount);
@@ -3725,7 +4009,7 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
return getCouldNotCompute();
else
// The backedge is never taken.
- return getIntegerSCEV(0, CI->getType());
+ return getConstant(CI->getType(), 0);
}
// If it's not an integer or pointer comparison then compute it the hard way.
@@ -3772,6 +4056,9 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
Cond = ICmpInst::getSwappedPredicate(Cond);
}
+ // Simplify the operands before analyzing them.
+ (void)SimplifyICmpOperands(Cond, LHS, RHS);
+
// If we have a comparison of a chrec against a constant, try to use value
// ranges to answer this query.
if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
@@ -4000,8 +4287,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
// constant or derived from a PHI node themselves.
PHINode *PHI = 0;
for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
- if (!(isa<Constant>(I->getOperand(Op)) ||
- isa<GlobalValue>(I->getOperand(Op)))) {
+ if (!isa<Constant>(I->getOperand(Op))) {
PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
if (P == 0) return 0; // Not evolving from PHI
if (PHI == 0)
@@ -4022,11 +4308,9 @@ static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
const TargetData *TD) {
if (isa<PHINode>(V)) return PHIVal;
if (Constant *C = dyn_cast<Constant>(V)) return C;
- if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
Instruction *I = cast<Instruction>(V);
- std::vector<Constant*> Operands;
- Operands.resize(I->getNumOperands());
+ std::vector<Constant*> Operands(I->getNumOperands());
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
@@ -4048,12 +4332,12 @@ Constant *
ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
const APInt &BEs,
const Loop *L) {
- std::map<PHINode*, Constant*>::iterator I =
+ std::map<PHINode*, Constant*>::const_iterator I =
ConstantEvolutionLoopExitValue.find(PN);
if (I != ConstantEvolutionLoopExitValue.end())
return I->second;
- if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
+ if (BEs.ugt(MaxBruteForceIterations))
return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
@@ -4068,8 +4352,8 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
return RetVal = 0; // Must be a constant.
Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
- PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
- if (PN2 != PN)
+ if (getConstantEvolvingPHI(BEValue, L) != PN &&
+ !isa<Constant>(BEValue))
return RetVal = 0; // Not derived from same PHI.
// Execute the loop symbolically to determine the exit value.
@@ -4104,8 +4388,11 @@ ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
PHINode *PN = getConstantEvolvingPHI(Cond, L);
if (PN == 0) return getCouldNotCompute();
- // Since the loop is canonicalized, the PHI node must have two entries. One
- // entry must be a constant (coming in from outside of the loop), and the
+ // If the loop is canonicalized, the PHI will have exactly two entries.
+ // That's the only form we support here.
+ if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
+
+ // One entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
Constant *StartCST =
@@ -4113,8 +4400,9 @@ ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
if (StartCST == 0) return getCouldNotCompute(); // Must be a constant.
Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
- PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
- if (PN2 != PN) return getCouldNotCompute(); // Not derived from same PHI.
+ if (getConstantEvolvingPHI(BEValue, L) != PN &&
+ !isa<Constant>(BEValue))
+ return getCouldNotCompute(); // Not derived from same PHI.
// Okay, we find a PHI node that defines the trip count of this loop. Execute
// the loop symbolically to determine when the condition gets a value of
@@ -4202,54 +4490,51 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// the arguments into constants, and if so, try to constant propagate the
// result. This is particularly useful for computing loop exit values.
if (CanConstantFold(I)) {
- std::vector<Constant*> Operands;
- Operands.reserve(I->getNumOperands());
+ SmallVector<Constant *, 4> Operands;
+ bool MadeImprovement = false;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
Value *Op = I->getOperand(i);
if (Constant *C = dyn_cast<Constant>(Op)) {
Operands.push_back(C);
- } else {
- // If any of the operands is non-constant and if they are
- // non-integer and non-pointer, don't even try to analyze them
- // with scev techniques.
- if (!isSCEVable(Op->getType()))
- return V;
-
- const SCEV *OpV = getSCEVAtScope(Op, L);
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
- Constant *C = SC->getValue();
- if (C->getType() != Op->getType())
- C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
- Op->getType(),
- false),
- C, Op->getType());
- Operands.push_back(C);
- } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
- if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
- if (C->getType() != Op->getType())
- C =
- ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
- Op->getType(),
- false),
- C, Op->getType());
- Operands.push_back(C);
- } else
- return V;
- } else {
- return V;
- }
+ continue;
}
+
+ // If any of the operands is non-constant and if they are
+ // non-integer and non-pointer, don't even try to analyze them
+ // with scev techniques.
+ if (!isSCEVable(Op->getType()))
+ return V;
+
+ const SCEV *OrigV = getSCEV(Op);
+ const SCEV *OpV = getSCEVAtScope(OrigV, L);
+ MadeImprovement |= OrigV != OpV;
+
+ Constant *C = 0;
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
+ C = SC->getValue();
+ if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
+ C = dyn_cast<Constant>(SU->getValue());
+ if (!C) return V;
+ if (C->getType() != Op->getType())
+ C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
+ Op->getType(),
+ false),
+ C, Op->getType());
+ Operands.push_back(C);
}
- Constant *C = 0;
- if (const CmpInst *CI = dyn_cast<CmpInst>(I))
- C = ConstantFoldCompareInstOperands(CI->getPredicate(),
- Operands[0], Operands[1], TD);
- else
- C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- &Operands[0], Operands.size(), TD);
- if (C)
+ // Check to see if getSCEVAtScope actually made an improvement.
+ if (MadeImprovement) {
+ Constant *C = 0;
+ if (const CmpInst *CI = dyn_cast<CmpInst>(I))
+ C = ConstantFoldCompareInstOperands(CI->getPredicate(),
+ Operands[0], Operands[1], TD);
+ else
+ C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
+ &Operands[0], Operands.size(), TD);
+ if (!C) return V;
return getSCEV(C);
+ }
}
}
@@ -4299,7 +4584,29 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// If this is a loop recurrence for a loop that does not contain L, then we
// are dealing with the final value computed by the loop.
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
- if (!L || !AddRec->getLoop()->contains(L)) {
+ // First, attempt to evaluate each operand.
+ // Avoid performing the look-up in the common case where the specified
+ // expression has no loop-variant portions.
+ for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
+ const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
+ if (OpAtScope == AddRec->getOperand(i))
+ continue;
+
+ // Okay, at least one of these operands is loop variant but might be
+ // foldable. Build a new instance of the folded commutative expression.
+ SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
+ AddRec->op_begin()+i);
+ NewOps.push_back(OpAtScope);
+ for (++i; i != e; ++i)
+ NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
+
+ AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop()));
+ break;
+ }
+
+ // If the scope is outside the addrec's loop, evaluate it by using the
+ // loop exit value of the addrec.
+ if (!AddRec->getLoop()->contains(L)) {
// To evaluate this recurrence, we need to know how many times the AddRec
// loop iterates. Compute this now.
const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
@@ -4308,6 +4615,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// Then, evaluate the AddRec.
return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
}
+
return AddRec;
}
@@ -4548,7 +4856,7 @@ ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
// already. If so, the backedge will execute zero times.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
if (!C->getValue()->isNullValue())
- return getIntegerSCEV(0, C->getType());
+ return getConstant(C->getType(), 0);
return getCouldNotCompute(); // Otherwise it will loop infinitely.
}
@@ -4557,41 +4865,26 @@ ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
return getCouldNotCompute();
}
-/// getLoopPredecessor - If the given loop's header has exactly one unique
-/// predecessor outside the loop, return it. Otherwise return null.
-///
-BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
- BasicBlock *Header = L->getHeader();
- BasicBlock *Pred = 0;
- for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
- PI != E; ++PI)
- if (!L->contains(*PI)) {
- if (Pred && Pred != *PI) return 0; // Multiple predecessors.
- Pred = *PI;
- }
- return Pred;
-}
-
/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
/// (which may not be an immediate predecessor) which has exactly one
/// successor from which BB is reachable, or null if no such block is
/// found.
///
-BasicBlock *
+std::pair<BasicBlock *, BasicBlock *>
ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
// If the block has a unique predecessor, then there is no path from the
// predecessor to the block that does not go through the direct edge
// from the predecessor to the block.
if (BasicBlock *Pred = BB->getSinglePredecessor())
- return Pred;
+ return std::make_pair(Pred, BB);
// A loop's header is defined to be a block that dominates the loop.
// If the header has a unique predecessor outside the loop, it must be
// a block that has exactly one successor that can reach the loop.
if (Loop *L = LI->getLoopFor(BB))
- return getLoopPredecessor(L);
+ return std::make_pair(L->getLoopPredecessor(), L->getHeader());
- return 0;
+ return std::pair<BasicBlock *, BasicBlock *>();
}
/// HasSameValue - SCEV structural equivalence is usually sufficient for
@@ -4617,6 +4910,266 @@ static bool HasSameValue(const SCEV *A, const SCEV *B) {
return false;
}
+/// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
+/// predicate Pred. Return true iff any changes were made.
+///
+bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
+ const SCEV *&LHS, const SCEV *&RHS) {
+ bool Changed = false;
+
+ // Canonicalize a constant to the right side.
+ if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
+ // Check for both operands constant.
+ if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
+ if (ConstantExpr::getICmp(Pred,
+ LHSC->getValue(),
+ RHSC->getValue())->isNullValue())
+ goto trivially_false;
+ else
+ goto trivially_true;
+ }
+ // Otherwise swap the operands to put the constant on the right.
+ std::swap(LHS, RHS);
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ Changed = true;
+ }
+
+ // If we're comparing an addrec with a value which is loop-invariant in the
+ // addrec's loop, put the addrec on the left. Also make a dominance check,
+ // as both operands could be addrecs loop-invariant in each other's loop.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
+ const Loop *L = AR->getLoop();
+ if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) {
+ std::swap(LHS, RHS);
+ Pred = ICmpInst::getSwappedPredicate(Pred);
+ Changed = true;
+ }
+ }
+
+ // If there's a constant operand, canonicalize comparisons with boundary
+ // cases, and canonicalize *-or-equal comparisons to regular comparisons.
+ if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
+ const APInt &RA = RC->getValue()->getValue();
+ switch (Pred) {
+ default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
+ case ICmpInst::ICMP_EQ:
+ case ICmpInst::ICMP_NE:
+ break;
+ case ICmpInst::ICMP_UGE:
+ if ((RA - 1).isMinValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ Changed = true;
+ break;
+ }
+ if (RA.isMinValue()) goto trivially_true;
+
+ Pred = ICmpInst::ICMP_UGT;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ case ICmpInst::ICMP_ULE:
+ if ((RA + 1).isMaxValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMinValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxValue()) goto trivially_true;
+
+ Pred = ICmpInst::ICMP_ULT;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ case ICmpInst::ICMP_SGE:
+ if ((RA - 1).isMinSignedValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxSignedValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ Changed = true;
+ break;
+ }
+ if (RA.isMinSignedValue()) goto trivially_true;
+
+ Pred = ICmpInst::ICMP_SGT;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ case ICmpInst::ICMP_SLE:
+ if ((RA + 1).isMaxSignedValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMinSignedValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxSignedValue()) goto trivially_true;
+
+ Pred = ICmpInst::ICMP_SLT;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ case ICmpInst::ICMP_UGT:
+ if (RA.isMinValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ Changed = true;
+ break;
+ }
+ if ((RA + 1).isMaxValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxValue()) goto trivially_false;
+ break;
+ case ICmpInst::ICMP_ULT:
+ if (RA.isMaxValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ Changed = true;
+ break;
+ }
+ if ((RA - 1).isMinValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMinValue()) goto trivially_false;
+ break;
+ case ICmpInst::ICMP_SGT:
+ if (RA.isMinSignedValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ Changed = true;
+ break;
+ }
+ if ((RA + 1).isMaxSignedValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ RHS = getConstant(RA + 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMaxSignedValue()) goto trivially_false;
+ break;
+ case ICmpInst::ICMP_SLT:
+ if (RA.isMaxSignedValue()) {
+ Pred = ICmpInst::ICMP_NE;
+ Changed = true;
+ break;
+ }
+ if ((RA - 1).isMinSignedValue()) {
+ Pred = ICmpInst::ICMP_EQ;
+ RHS = getConstant(RA - 1);
+ Changed = true;
+ break;
+ }
+ if (RA.isMinSignedValue()) goto trivially_false;
+ break;
+ }
+ }
+
+ // Check for obvious equality.
+ if (HasSameValue(LHS, RHS)) {
+ if (ICmpInst::isTrueWhenEqual(Pred))
+ goto trivially_true;
+ if (ICmpInst::isFalseWhenEqual(Pred))
+ goto trivially_false;
+ }
+
+ // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
+ // adding or subtracting 1 from one of the operands.
+ switch (Pred) {
+ case ICmpInst::ICMP_SLE:
+ if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
+ RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
+ /*HasNUW=*/false, /*HasNSW=*/true);
+ Pred = ICmpInst::ICMP_SLT;
+ Changed = true;
+ } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
+ LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
+ /*HasNUW=*/false, /*HasNSW=*/true);
+ Pred = ICmpInst::ICMP_SLT;
+ Changed = true;
+ }
+ break;
+ case ICmpInst::ICMP_SGE:
+ if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
+ RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
+ /*HasNUW=*/false, /*HasNSW=*/true);
+ Pred = ICmpInst::ICMP_SGT;
+ Changed = true;
+ } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
+ LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
+ /*HasNUW=*/false, /*HasNSW=*/true);
+ Pred = ICmpInst::ICMP_SGT;
+ Changed = true;
+ }
+ break;
+ case ICmpInst::ICMP_ULE:
+ if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
+ RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
+ /*HasNUW=*/true, /*HasNSW=*/false);
+ Pred = ICmpInst::ICMP_ULT;
+ Changed = true;
+ } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
+ LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
+ /*HasNUW=*/true, /*HasNSW=*/false);
+ Pred = ICmpInst::ICMP_ULT;
+ Changed = true;
+ }
+ break;
+ case ICmpInst::ICMP_UGE:
+ if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
+ RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
+ /*HasNUW=*/true, /*HasNSW=*/false);
+ Pred = ICmpInst::ICMP_UGT;
+ Changed = true;
+ } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
+ LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
+ /*HasNUW=*/true, /*HasNSW=*/false);
+ Pred = ICmpInst::ICMP_UGT;
+ Changed = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ // TODO: More simplifications are possible here.
+
+ return Changed;
+
+trivially_true:
+ // Return 0 == 0.
+ LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
+ Pred = ICmpInst::ICMP_EQ;
+ return true;
+
+trivially_false:
+ // Return 0 != 0.
+ LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
+ Pred = ICmpInst::ICMP_NE;
+ return true;
+}
+
bool ScalarEvolution::isKnownNegative(const SCEV *S) {
return getSignedRange(S).getSignedMax().isNegative();
}
@@ -4639,10 +5192,36 @@ bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS) {
+ // Canonicalize the inputs first.
+ (void)SimplifyICmpOperands(Pred, LHS, RHS);
+
+ // If LHS or RHS is an addrec, check to see if the condition is true in
+ // every iteration of the loop.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
+ if (isLoopEntryGuardedByCond(
+ AR->getLoop(), Pred, AR->getStart(), RHS) &&
+ isLoopBackedgeGuardedByCond(
+ AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
+ return true;
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
+ if (isLoopEntryGuardedByCond(
+ AR->getLoop(), Pred, LHS, AR->getStart()) &&
+ isLoopBackedgeGuardedByCond(
+ AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
+ return true;
+
+ // Otherwise see what can be done with known constant ranges.
+ return isKnownPredicateWithRanges(Pred, LHS, RHS);
+}
+bool
+ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS) {
if (HasSameValue(LHS, RHS))
return ICmpInst::isTrueWhenEqual(Pred);
+ // This code is split out from isKnownPredicate because it is called from
+ // within isLoopEntryGuardedByCond.
switch (Pred) {
default:
llvm_unreachable("Unexpected ICmpInst::Predicate value!");
@@ -4735,39 +5314,39 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
LoopContinuePredicate->isUnconditional())
return false;
- return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
+ return isImpliedCond(Pred, LHS, RHS,
+ LoopContinuePredicate->getCondition(),
LoopContinuePredicate->getSuccessor(0) != L->getHeader());
}
-/// isLoopGuardedByCond - Test whether entry to the loop is protected
+/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
/// by a conditional between LHS and RHS. This is used to help avoid max
/// expressions in loop trip counts, and to eliminate casts.
bool
-ScalarEvolution::isLoopGuardedByCond(const Loop *L,
- ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
+ ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS) {
// Interpret a null as meaning no loop, where there is obviously no guard
// (interprocedural conditions notwithstanding).
if (!L) return false;
- BasicBlock *Predecessor = getLoopPredecessor(L);
- BasicBlock *PredecessorDest = L->getHeader();
-
// Starting at the loop predecessor, climb up the predecessor chain, as long
// as there are predecessors that can be found that have unique successors
// leading to the original header.
- for (; Predecessor;
- PredecessorDest = Predecessor,
- Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
+ for (std::pair<BasicBlock *, BasicBlock *>
+ Pair(L->getLoopPredecessor(), L->getHeader());
+ Pair.first;
+ Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
BranchInst *LoopEntryPredicate =
- dyn_cast<BranchInst>(Predecessor->getTerminator());
+ dyn_cast<BranchInst>(Pair.first->getTerminator());
if (!LoopEntryPredicate ||
LoopEntryPredicate->isUnconditional())
continue;
- if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
- LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
+ if (isImpliedCond(Pred, LHS, RHS,
+ LoopEntryPredicate->getCondition(),
+ LoopEntryPredicate->getSuccessor(0) != Pair.second))
return true;
}
@@ -4776,24 +5355,24 @@ ScalarEvolution::isLoopGuardedByCond(const Loop *L,
/// isImpliedCond - Test whether the condition described by Pred, LHS,
/// and RHS is true whenever the given Cond value evaluates to true.
-bool ScalarEvolution::isImpliedCond(Value *CondValue,
- ICmpInst::Predicate Pred,
+bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
+ Value *FoundCondValue,
bool Inverse) {
// Recursively handle And and Or conditions.
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
if (BO->getOpcode() == Instruction::And) {
if (!Inverse)
- return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
- isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
+ return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
+ isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
} else if (BO->getOpcode() == Instruction::Or) {
if (Inverse)
- return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
- isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
+ return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
+ isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
}
}
- ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
+ ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
if (!ICI) return false;
// Bail if the ICmp's operands' types are wider than the needed type
@@ -4831,117 +5410,12 @@ bool ScalarEvolution::isImpliedCond(Value *CondValue,
// Canonicalize the query to match the way instcombine will have
// canonicalized the comparison.
- // First, put a constant operand on the right.
- if (isa<SCEVConstant>(LHS)) {
- std::swap(LHS, RHS);
- Pred = ICmpInst::getSwappedPredicate(Pred);
- }
- // Then, canonicalize comparisons with boundary cases.
- if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
- const APInt &RA = RC->getValue()->getValue();
- switch (Pred) {
- default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
- case ICmpInst::ICMP_EQ:
- case ICmpInst::ICMP_NE:
- break;
- case ICmpInst::ICMP_UGE:
- if ((RA - 1).isMinValue()) {
- Pred = ICmpInst::ICMP_NE;
- RHS = getConstant(RA - 1);
- break;
- }
- if (RA.isMaxValue()) {
- Pred = ICmpInst::ICMP_EQ;
- break;
- }
- if (RA.isMinValue()) return true;
- break;
- case ICmpInst::ICMP_ULE:
- if ((RA + 1).isMaxValue()) {
- Pred = ICmpInst::ICMP_NE;
- RHS = getConstant(RA + 1);
- break;
- }
- if (RA.isMinValue()) {
- Pred = ICmpInst::ICMP_EQ;
- break;
- }
- if (RA.isMaxValue()) return true;
- break;
- case ICmpInst::ICMP_SGE:
- if ((RA - 1).isMinSignedValue()) {
- Pred = ICmpInst::ICMP_NE;
- RHS = getConstant(RA - 1);
- break;
- }
- if (RA.isMaxSignedValue()) {
- Pred = ICmpInst::ICMP_EQ;
- break;
- }
- if (RA.isMinSignedValue()) return true;
- break;
- case ICmpInst::ICMP_SLE:
- if ((RA + 1).isMaxSignedValue()) {
- Pred = ICmpInst::ICMP_NE;
- RHS = getConstant(RA + 1);
- break;
- }
- if (RA.isMinSignedValue()) {
- Pred = ICmpInst::ICMP_EQ;
- break;
- }
- if (RA.isMaxSignedValue()) return true;
- break;
- case ICmpInst::ICMP_UGT:
- if (RA.isMinValue()) {
- Pred = ICmpInst::ICMP_NE;
- break;
- }
- if ((RA + 1).isMaxValue()) {
- Pred = ICmpInst::ICMP_EQ;
- RHS = getConstant(RA + 1);
- break;
- }
- if (RA.isMaxValue()) return false;
- break;
- case ICmpInst::ICMP_ULT:
- if (RA.isMaxValue()) {
- Pred = ICmpInst::ICMP_NE;
- break;
- }
- if ((RA - 1).isMinValue()) {
- Pred = ICmpInst::ICMP_EQ;
- RHS = getConstant(RA - 1);
- break;
- }
- if (RA.isMinValue()) return false;
- break;
- case ICmpInst::ICMP_SGT:
- if (RA.isMinSignedValue()) {
- Pred = ICmpInst::ICMP_NE;
- break;
- }
- if ((RA + 1).isMaxSignedValue()) {
- Pred = ICmpInst::ICMP_EQ;
- RHS = getConstant(RA + 1);
- break;
- }
- if (RA.isMaxSignedValue()) return false;
- break;
- case ICmpInst::ICMP_SLT:
- if (RA.isMaxSignedValue()) {
- Pred = ICmpInst::ICMP_NE;
- break;
- }
- if ((RA - 1).isMinSignedValue()) {
- Pred = ICmpInst::ICMP_EQ;
- RHS = getConstant(RA - 1);
- break;
- }
- if (RA.isMinSignedValue()) return false;
- break;
- }
- }
+ if (SimplifyICmpOperands(Pred, LHS, RHS))
+ if (LHS == RHS)
+ return CmpInst::isTrueWhenEqual(Pred);
+ if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
+ if (FoundLHS == FoundRHS)
+ return CmpInst::isFalseWhenEqual(Pred);
// Check to see if we can make the LHS or RHS match.
if (LHS == FoundRHS || RHS == FoundLHS) {
@@ -5014,26 +5488,26 @@ ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
break;
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
return true;
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
return true;
break;
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
- if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
return true;
break;
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
- if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
return true;
break;
}
@@ -5052,7 +5526,7 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
"This code doesn't handle negative strides yet!");
const Type *Ty = Start->getType();
- const SCEV *NegOne = getIntegerSCEV(-1, Ty);
+ const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
const SCEV *Diff = getMinusSCEV(End, Start);
const SCEV *RoundUp = getAddExpr(Step, NegOne);
@@ -5108,7 +5582,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
// behavior, so if wrap does occur, the loop could either terminate or
// loop infinitely, but in either case, the loop is guaranteed to
// iterate at least until the iteration where the wrapping occurs.
- const SCEV *One = getIntegerSCEV(1, Step->getType());
+ const SCEV *One = getConstant(Step->getType(), 1);
if (isSigned) {
APInt Max = APInt::getSignedMaxValue(BitWidth);
if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
@@ -5142,10 +5616,10 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
// only know that it will execute (max(m,n)-n)/s times. In both cases,
// the division must round up.
const SCEV *End = RHS;
- if (!isLoopGuardedByCond(L,
- isSigned ? ICmpInst::ICMP_SLT :
- ICmpInst::ICMP_ULT,
- getMinusSCEV(Start, Step), RHS))
+ if (!isLoopEntryGuardedByCond(L,
+ isSigned ? ICmpInst::ICMP_SLT :
+ ICmpInst::ICMP_ULT,
+ getMinusSCEV(Start, Step), RHS))
End = isSigned ? getSMaxExpr(RHS, Start)
: getUMaxExpr(RHS, Start);
@@ -5159,7 +5633,7 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
// This allows the subsequent ceiling division of (N+(step-1))/step to
// compute the correct value.
const SCEV *StepMinusOne = getMinusSCEV(Step,
- getIntegerSCEV(1, Step->getType()));
+ getConstant(Step->getType(), 1));
MaxEnd = isSigned ?
getSMinExpr(MaxEnd,
getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
@@ -5196,7 +5670,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
if (!SC->getValue()->isZero()) {
SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
- Operands[0] = SE.getIntegerSCEV(0, SC->getType());
+ Operands[0] = SE.getConstant(SC->getType(), 0);
const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
if (const SCEVAddRecExpr *ShiftedAddRec =
dyn_cast<SCEVAddRecExpr>(Shifted))
@@ -5220,7 +5694,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
// iteration exits.
unsigned BitWidth = SE.getTypeSizeInBits(getType());
if (!Range.contains(APInt(BitWidth, 0)))
- return SE.getIntegerSCEV(0, getType());
+ return SE.getConstant(getType(), 0);
if (isAffine()) {
// If this is an affine expression then we have this situation:
@@ -5315,20 +5789,19 @@ void ScalarEvolution::SCEVCallbackVH::deleted() {
assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(getValPtr());
+ SE->ValueExprMap.erase(getValPtr());
// this now dangles!
}
-void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
+void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
// Forget all the expressions associated with users of the old value,
// so that future queries will recompute the expressions using the new
// value.
+ Value *Old = getValPtr();
SmallVector<User *, 16> Worklist;
SmallPtrSet<User *, 8> Visited;
- Value *Old = getValPtr();
- bool DeleteOld = false;
for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
UI != UE; ++UI)
Worklist.push_back(*UI);
@@ -5336,27 +5809,22 @@ void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
User *U = Worklist.pop_back_val();
// Deleting the Old value will cause this to dangle. Postpone
// that until everything else is done.
- if (U == Old) {
- DeleteOld = true;
+ if (U == Old)
continue;
- }
if (!Visited.insert(U))
continue;
if (PHINode *PN = dyn_cast<PHINode>(U))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(U);
+ SE->ValueExprMap.erase(U);
for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
UI != UE; ++UI)
Worklist.push_back(*UI);
}
- // Delete the Old value if it (indirectly) references itself.
- if (DeleteOld) {
- if (PHINode *PN = dyn_cast<PHINode>(Old))
- SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(Old);
- // this now dangles!
- }
- // this may dangle!
+ // Delete the Old value.
+ if (PHINode *PN = dyn_cast<PHINode>(Old))
+ SE->ConstantEvolutionLoopExitValue.erase(PN);
+ SE->ValueExprMap.erase(Old);
+ // this now dangles!
}
ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
@@ -5367,7 +5835,7 @@ ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
//===----------------------------------------------------------------------===//
ScalarEvolution::ScalarEvolution()
- : FunctionPass(&ID) {
+ : FunctionPass(ID), FirstUnknown(0) {
}
bool ScalarEvolution::runOnFunction(Function &F) {
@@ -5379,7 +5847,13 @@ bool ScalarEvolution::runOnFunction(Function &F) {
}
void ScalarEvolution::releaseMemory() {
- Scalars.clear();
+ // Iterate through all the SCEVUnknown instances and call their
+ // destructors, so that they release their references to their values.
+ for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
+ U->~SCEVUnknown();
+ FirstUnknown = 0;
+
+ ValueExprMap.clear();
BackedgeTakenCounts.clear();
ConstantEvolutionLoopExitValue.clear();
ValuesAtScopes.clear();
@@ -5445,7 +5919,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
WriteAsOperand(OS, F, /*PrintType=*/false);
OS << "\n";
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
- if (isSCEVable(I->getType())) {
+ if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
OS << *I << '\n';
OS << " --> ";
const SCEV *SV = SE.getSCEV(&*I);
diff --git a/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 17b254f..93b2a8b 100644
--- a/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -12,7 +12,7 @@
//
// This differs from traditional loop dependence analysis in that it tests
// for dependencies within a single iteration of a loop, rather than
-// dependences between different iterations.
+// dependencies between different iterations.
//
// ScalarEvolution has a more complete understanding of pointer arithmetic
// than BasicAliasAnalysis' collection of ad-hoc analyses.
@@ -34,14 +34,14 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- ScalarEvolutionAliasAnalysis() : FunctionPass(&ID), SE(0) {}
+ ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(0) {}
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
- if (PI->isPassID(&AliasAnalysis::ID))
+ virtual void *getAdjustedAnalysisPointer(AnalysisID PI) {
+ if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
@@ -58,11 +58,8 @@ namespace {
// Register this pass...
char ScalarEvolutionAliasAnalysis::ID = 0;
-static RegisterPass<ScalarEvolutionAliasAnalysis>
-X("scev-aa", "ScalarEvolution-based Alias Analysis", false, true);
-
-// Declare that we implement the AliasAnalysis interface
-static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+INITIALIZE_AG_PASS(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
+ "ScalarEvolution-based Alias Analysis", false, true, false);
FunctionPass *llvm::createScalarEvolutionAliasAnalysisPass() {
return new ScalarEvolutionAliasAnalysis();
@@ -106,6 +103,12 @@ ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) {
AliasAnalysis::AliasResult
ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
const Value *B, unsigned BSize) {
+ // If either of the memory references is empty, it doesn't matter what the
+ // pointer values are. This allows the code below to ignore this special
+ // case.
+ if (ASize == 0 || BSize == 0)
+ return NoAlias;
+
// This is ScalarEvolutionAliasAnalysis. Get the SCEVs!
const SCEV *AS = SE->getSCEV(const_cast<Value *>(A));
const SCEV *BS = SE->getSCEV(const_cast<Value *>(B));
@@ -118,14 +121,32 @@ ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
if (SE->getEffectiveSCEVType(AS->getType()) ==
SE->getEffectiveSCEVType(BS->getType())) {
unsigned BitWidth = SE->getTypeSizeInBits(AS->getType());
- APInt AI(BitWidth, ASize);
+ APInt ASizeInt(BitWidth, ASize);
+ APInt BSizeInt(BitWidth, BSize);
+
+ // Compute the difference between the two pointers.
const SCEV *BA = SE->getMinusSCEV(BS, AS);
- if (AI.ule(SE->getUnsignedRange(BA).getUnsignedMin())) {
- APInt BI(BitWidth, BSize);
- const SCEV *AB = SE->getMinusSCEV(AS, BS);
- if (BI.ule(SE->getUnsignedRange(AB).getUnsignedMin()))
- return NoAlias;
- }
+
+ // Test whether the difference is known to be great enough that memory of
+ // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
+ // are non-zero, which is special-cased above.
+ if (ASizeInt.ule(SE->getUnsignedRange(BA).getUnsignedMin()) &&
+ (-BSizeInt).uge(SE->getUnsignedRange(BA).getUnsignedMax()))
+ return NoAlias;
+
+ // Folding the subtraction while preserving range information can be tricky
+ // (because of INT_MIN, etc.); if the prior test failed, swap AS and BS
+ // and try again to see if things fold better that way.
+
+ // Compute the difference between the two pointers.
+ const SCEV *AB = SE->getMinusSCEV(AS, BS);
+
+ // Test whether the difference is known to be great enough that memory of
+ // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
+ // are non-zero, which is special-cased above.
+ if (BSizeInt.ule(SE->getUnsignedRange(AB).getUnsignedMin()) &&
+ (-ASizeInt).uge(SE->getUnsignedRange(AB).getUnsignedMax()))
+ return NoAlias;
}
// If ScalarEvolution can find an underlying object, form a new query.
@@ -134,8 +155,8 @@ ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
Value *AO = GetBaseValue(AS);
Value *BO = GetBaseValue(BS);
if ((AO && AO != A) || (BO && BO != B))
- if (alias(AO ? AO : A, AO ? ~0u : ASize,
- BO ? BO : B, BO ? ~0u : BSize) == NoAlias)
+ if (alias(AO ? AO : A, AO ? UnknownSize : ASize,
+ BO ? BO : B, BO ? UnknownSize : BSize) == NoAlias)
return NoAlias;
// Forward the query to the next analysis.
diff --git a/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index e27da96..66a06ae 100644
--- a/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -21,6 +21,43 @@
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
+/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
+/// reusing an existing cast if a suitable one exists, moving an existing
+/// cast if a suitable one exists but isn't in the right place, or
+/// creating a new one.
+Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
+ Instruction::CastOps Op,
+ BasicBlock::iterator IP) {
+ // Check to see if there is already a cast!
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI) {
+ User *U = *UI;
+ if (U->getType() == Ty)
+ if (CastInst *CI = dyn_cast<CastInst>(U))
+ if (CI->getOpcode() == Op) {
+ // If the cast isn't where we want it, fix it.
+ if (BasicBlock::iterator(CI) != IP) {
+ // Create a new cast, and leave the old cast in place in case
+ // it is being used as an insert point. Clear its operand
+ // so that it doesn't hold anything live.
+ Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP);
+ NewCI->takeName(CI);
+ CI->replaceAllUsesWith(NewCI);
+ CI->setOperand(0, UndefValue::get(V->getType()));
+ rememberInstruction(NewCI);
+ return NewCI;
+ }
+ rememberInstruction(CI);
+ return CI;
+ }
+ }
+
+ // Create a new cast.
+ Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
+ rememberInstruction(I);
+ return I;
+}
+
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to share
/// the casts.
@@ -54,71 +91,29 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
return CE->getOperand(0);
}
+ // Fold a cast of a constant.
if (Constant *C = dyn_cast<Constant>(V))
return ConstantExpr::getCast(Op, C, Ty);
+ // Cast the argument at the beginning of the entry block, after
+ // any bitcasts of other arguments.
if (Argument *A = dyn_cast<Argument>(V)) {
- // Check to see if there is already a cast!
- for (Value::use_iterator UI = A->use_begin(), E = A->use_end();
- UI != E; ++UI)
- if ((*UI)->getType() == Ty)
- if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
- if (CI->getOpcode() == Op) {
- // If the cast isn't the first instruction of the function, move it.
- if (BasicBlock::iterator(CI) !=
- A->getParent()->getEntryBlock().begin()) {
- // Recreate the cast at the beginning of the entry block.
- // The old cast is left in place in case it is being used
- // as an insert point.
- Instruction *NewCI =
- CastInst::Create(Op, V, Ty, "",
- A->getParent()->getEntryBlock().begin());
- NewCI->takeName(CI);
- CI->replaceAllUsesWith(NewCI);
- return NewCI;
- }
- return CI;
- }
-
- Instruction *I = CastInst::Create(Op, V, Ty, V->getName(),
- A->getParent()->getEntryBlock().begin());
- rememberInstruction(I);
- return I;
+ BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
+ while ((isa<BitCastInst>(IP) &&
+ isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
+ cast<BitCastInst>(IP)->getOperand(0) != A) ||
+ isa<DbgInfoIntrinsic>(IP))
+ ++IP;
+ return ReuseOrCreateCast(A, Ty, Op, IP);
}
+ // Cast the instruction immediately after the instruction.
Instruction *I = cast<Instruction>(V);
-
- // Check to see if there is already a cast. If there is, use it.
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI) {
- if ((*UI)->getType() == Ty)
- if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
- if (CI->getOpcode() == Op) {
- BasicBlock::iterator It = I; ++It;
- if (isa<InvokeInst>(I))
- It = cast<InvokeInst>(I)->getNormalDest()->begin();
- while (isa<PHINode>(It)) ++It;
- if (It != BasicBlock::iterator(CI)) {
- // Recreate the cast after the user.
- // The old cast is left in place in case it is being used
- // as an insert point.
- Instruction *NewCI = CastInst::Create(Op, V, Ty, "", It);
- NewCI->takeName(CI);
- CI->replaceAllUsesWith(NewCI);
- rememberInstruction(NewCI);
- return NewCI;
- }
- rememberInstruction(CI);
- return CI;
- }
- }
BasicBlock::iterator IP = I; ++IP;
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
IP = II->getNormalDest()->begin();
- while (isa<PHINode>(IP)) ++IP;
- Instruction *CI = CastInst::Create(Op, V, Ty, V->getName(), IP);
- rememberInstruction(CI);
- return CI;
+ while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP)) ++IP;
+ return ReuseOrCreateCast(I, Ty, Op, IP);
}
/// InsertBinop - Insert the specified binary operator, doing a small amount
@@ -192,7 +187,7 @@ static bool FactorOutConstant(const SCEV *&S,
// x/x == 1.
if (S == Factor) {
- S = SE.getIntegerSCEV(1, S->getType());
+ S = SE.getConstant(S->getType(), 1);
return true;
}
@@ -232,9 +227,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
- const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
- SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
- MOperands.end());
+ SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[0] =
SE.getConstant(C->getValue()->getValue().sdiv(
FC->getValue()->getValue()));
@@ -246,12 +239,10 @@ static bool FactorOutConstant(const SCEV *&S,
// Mul's operands. If so, we can just remove it.
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
const SCEV *SOp = M->getOperand(i);
- const SCEV *Remainder = SE.getIntegerSCEV(0, SOp->getType());
+ const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
Remainder->isZero()) {
- const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
- SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
- MOperands.end());
+ SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[i] = SOp;
S = SE.getMulExpr(NewMulOps);
return true;
@@ -263,7 +254,7 @@ static bool FactorOutConstant(const SCEV *&S,
// In an AddRec, check if both start and step are divisible.
if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
const SCEV *Step = A->getStepRecurrence(SE);
- const SCEV *StepRem = SE.getIntegerSCEV(0, Step->getType());
+ const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
return false;
if (!StepRem->isZero())
@@ -293,19 +284,17 @@ static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
// Let ScalarEvolution sort and simplify the non-addrecs list.
const SCEV *Sum = NoAddRecs.empty() ?
- SE.getIntegerSCEV(0, Ty) :
+ SE.getConstant(Ty, 0) :
SE.getAddExpr(NoAddRecs);
// If it returned an add, use the operands. Otherwise it simplified
// the sum into a single value, so just use that.
+ Ops.clear();
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
- Ops = Add->getOperands();
- else {
- Ops.clear();
- if (!Sum->isZero())
- Ops.push_back(Sum);
- }
+ Ops.append(Add->op_begin(), Add->op_end());
+ else if (!Sum->isZero())
+ Ops.push_back(Sum);
// Then append the addrecs.
- Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
+ Ops.append(AddRecs.begin(), AddRecs.end());
}
/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
@@ -322,13 +311,13 @@ static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
const SCEV *Start = A->getStart();
if (Start->isZero()) break;
- const SCEV *Zero = SE.getIntegerSCEV(0, Ty);
+ const SCEV *Zero = SE.getConstant(Ty, 0);
AddRecs.push_back(SE.getAddRecExpr(Zero,
A->getStepRecurrence(SE),
A->getLoop()));
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
Ops[i] = Zero;
- Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
+ Ops.append(Add->op_begin(), Add->op_end());
e += Add->getNumOperands();
} else {
Ops[i] = Start;
@@ -336,7 +325,7 @@ static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
}
if (!AddRecs.empty()) {
// Add the addrecs onto the end of the list.
- Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
+ Ops.append(AddRecs.begin(), AddRecs.end());
// Resort the operand list, moving any constants to the front.
SimplifyAddOperands(Ops, Ty, SE);
}
@@ -398,7 +387,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
SmallVector<const SCEV *, 8> NewOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
const SCEV *Op = Ops[i];
- const SCEV *Remainder = SE.getIntegerSCEV(0, Ty);
+ const SCEV *Remainder = SE.getConstant(Ty, 0);
if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
// Op now has ElSize factored out.
ScaledOps.push_back(Op);
@@ -648,6 +637,8 @@ static const Loop *GetRelevantLoop(const SCEV *S, LoopInfo &LI,
llvm_unreachable("Unexpected SCEV type!");
}
+namespace {
+
/// LoopCompare - Compare loops by PickMostRelevantLoop.
class LoopCompare {
DominatorTree &DT;
@@ -656,6 +647,11 @@ public:
bool operator()(std::pair<const Loop *, const SCEV *> LHS,
std::pair<const Loop *, const SCEV *> RHS) const {
+ // Keep pointer operands sorted at the end.
+ if (LHS.second->getType()->isPointerTy() !=
+ RHS.second->getType()->isPointerTy())
+ return LHS.second->getType()->isPointerTy();
+
// Compare loops with PickMostRelevantLoop.
if (LHS.first != RHS.first)
return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
@@ -674,6 +670,8 @@ public:
}
};
+}
+
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
const Type *Ty = SE.getEffectiveSCEVType(S->getType());
@@ -706,14 +704,23 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// The running sum expression is a pointer. Try to form a getelementptr
// at this level with that as the base.
SmallVector<const SCEV *, 4> NewOps;
- for (; I != E && I->first == CurLoop; ++I)
- NewOps.push_back(I->second);
+ for (; I != E && I->first == CurLoop; ++I) {
+ // If the operand is SCEVUnknown and not instructions, peek through
+ // it, to enable more of it to be folded into the GEP.
+ const SCEV *X = I->second;
+ if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
+ if (!isa<Instruction>(U->getValue()))
+ X = SE.getSCEV(U->getValue());
+ NewOps.push_back(X);
+ }
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
} else if (const PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
// The running sum is an integer, and there's a pointer at this level.
- // Try to form a getelementptr.
+ // Try to form a getelementptr. If the running sum is instructions,
+ // use a SCEVUnknown to avoid re-analyzing them.
SmallVector<const SCEV *, 4> NewOps;
- NewOps.push_back(SE.getUnknown(Sum));
+ NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
+ SE.getSCEV(Sum));
for (++I; I != E && I->first == CurLoop; ++I)
NewOps.push_back(I->second);
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
@@ -803,7 +810,7 @@ static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
Base = A->getStart();
Rest = SE.getAddExpr(Rest,
- SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()),
+ SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
A->getStepRecurrence(SE),
A->getLoop()));
}
@@ -972,9 +979,12 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Determine a normalized form of this expression, which is the expression
// before any post-inc adjustment is made.
const SCEVAddRecExpr *Normalized = S;
- if (L == PostIncLoop) {
- const SCEV *Step = S->getStepRecurrence(SE);
- Normalized = cast<SCEVAddRecExpr>(SE.getMinusSCEV(S, Step));
+ if (PostIncLoops.count(L)) {
+ PostIncLoopSet Loops;
+ Loops.insert(L);
+ Normalized =
+ cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
+ Loops, SE, *SE.DT));
}
// Strip off any non-loop-dominating component from the addrec start.
@@ -982,7 +992,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
const SCEV *PostLoopOffset = 0;
if (!Start->properlyDominates(L->getHeader(), SE.DT)) {
PostLoopOffset = Start;
- Start = SE.getIntegerSCEV(0, Normalized->getType());
+ Start = SE.getConstant(Normalized->getType(), 0);
Normalized =
cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start,
Normalized->getStepRecurrence(SE),
@@ -992,10 +1002,9 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Strip off any non-loop-dominating component from the addrec step.
const SCEV *Step = Normalized->getStepRecurrence(SE);
const SCEV *PostLoopScale = 0;
- if (!Step->hasComputableLoopEvolution(L) &&
- !Step->dominates(L->getHeader(), SE.DT)) {
+ if (!Step->dominates(L->getHeader(), SE.DT)) {
PostLoopScale = Step;
- Step = SE.getIntegerSCEV(1, Normalized->getType());
+ Step = SE.getConstant(Normalized->getType(), 1);
Normalized =
cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step,
Normalized->getLoop()));
@@ -1008,7 +1017,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Accommodate post-inc mode, if necessary.
Value *Result;
- if (L != PostIncLoop)
+ if (!PostIncLoops.count(L))
Result = PN;
else {
// In PostInc mode, use the post-incremented value.
@@ -1050,9 +1059,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// First check for an existing canonical IV in a suitable type.
PHINode *CanonicalIV = 0;
if (PHINode *PN = L->getCanonicalInductionVariable())
- if (SE.isSCEVable(PN->getType()) &&
- SE.getEffectiveSCEVType(PN->getType())->isIntegerTy() &&
- SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
+ if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
CanonicalIV = PN;
// Rewrite an AddRec in terms of the canonical induction variable, if
@@ -1060,16 +1067,16 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
if (CanonicalIV &&
SE.getTypeSizeInBits(CanonicalIV->getType()) >
SE.getTypeSizeInBits(Ty)) {
- const SmallVectorImpl<const SCEV *> &Ops = S->getOperands();
- SmallVector<const SCEV *, 4> NewOps(Ops.size());
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- NewOps[i] = SE.getAnyExtendExpr(Ops[i], CanonicalIV->getType());
+ SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
+ for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
+ NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop()));
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
BasicBlock::iterator NewInsertPt =
llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
- while (isa<PHINode>(NewInsertPt)) ++NewInsertPt;
+ while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt))
+ ++NewInsertPt;
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
NewInsertPt);
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
@@ -1078,9 +1085,8 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// {X,+,F} --> X + {0,+,F}
if (!S->getStart()->isZero()) {
- const SmallVectorImpl<const SCEV *> &SOperands = S->getOperands();
- SmallVector<const SCEV *, 4> NewOps(SOperands.begin(), SOperands.end());
- NewOps[0] = SE.getIntegerSCEV(0, Ty);
+ SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
+ NewOps[0] = SE.getConstant(Ty, 0);
const SCEV *Rest = SE.getAddRecExpr(NewOps, L);
// Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
@@ -1106,62 +1112,60 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
SE.getUnknown(expand(Rest))));
}
- // {0,+,1} --> Insert a canonical induction variable into the loop!
- if (S->isAffine() &&
- S->getOperand(1) == SE.getIntegerSCEV(1, Ty)) {
- // If there's a canonical IV, just use it.
- if (CanonicalIV) {
- assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
- "IVs with types different from the canonical IV should "
- "already have been handled!");
- return CanonicalIV;
- }
-
+ // If we don't yet have a canonical IV, create one.
+ if (!CanonicalIV) {
// Create and insert the PHI node for the induction variable in the
// specified loop.
BasicBlock *Header = L->getHeader();
- PHINode *PN = PHINode::Create(Ty, "indvar", Header->begin());
- rememberInstruction(PN);
+ CanonicalIV = PHINode::Create(Ty, "indvar", Header->begin());
+ rememberInstruction(CanonicalIV);
Constant *One = ConstantInt::get(Ty, 1);
for (pred_iterator HPI = pred_begin(Header), HPE = pred_end(Header);
- HPI != HPE; ++HPI)
- if (L->contains(*HPI)) {
+ HPI != HPE; ++HPI) {
+ BasicBlock *HP = *HPI;
+ if (L->contains(HP)) {
// Insert a unit add instruction right before the terminator
// corresponding to the back-edge.
- Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next",
- (*HPI)->getTerminator());
+ Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
+ "indvar.next",
+ HP->getTerminator());
rememberInstruction(Add);
- PN->addIncoming(Add, *HPI);
+ CanonicalIV->addIncoming(Add, HP);
} else {
- PN->addIncoming(Constant::getNullValue(Ty), *HPI);
+ CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
}
+ }
+ }
+
+ // {0,+,1} --> Insert a canonical induction variable into the loop!
+ if (S->isAffine() && S->getOperand(1)->isOne()) {
+ assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
+ "IVs with types different from the canonical IV should "
+ "already have been handled!");
+ return CanonicalIV;
}
// {0,+,F} --> {0,+,1} * F
- // Get the canonical induction variable I for this loop.
- Value *I = CanonicalIV ?
- CanonicalIV :
- getOrInsertCanonicalInductionVariable(L, Ty);
// If this is a simple linear addrec, emit it now as a special case.
if (S->isAffine()) // {0,+,F} --> i*F
return
expand(SE.getTruncateOrNoop(
- SE.getMulExpr(SE.getUnknown(I),
+ SE.getMulExpr(SE.getUnknown(CanonicalIV),
SE.getNoopOrAnyExtend(S->getOperand(1),
- I->getType())),
+ CanonicalIV->getType())),
Ty));
// If this is a chain of recurrences, turn it into a closed form, using the
// folders, then expandCodeFor the closed form. This allows the folders to
// simplify the expression without having to build a bunch of special code
// into this folder.
- const SCEV *IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV.
+ const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
// Promote S up to the canonical IV type, if the cast is foldable.
const SCEV *NewS = S;
- const SCEV *Ext = SE.getNoopOrAnyExtend(S, I->getType());
+ const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
if (isa<SCEVAddRecExpr>(Ext))
NewS = Ext;
@@ -1248,6 +1252,15 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
return LHS;
}
+Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
+ Instruction *I) {
+ BasicBlock::iterator IP = I;
+ while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
+ ++IP;
+ Builder.SetInsertPoint(IP->getParent(), IP);
+ return expandCodeFor(SH, Ty);
+}
+
Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) {
// Expand the code for this SCEV.
Value *V = expand(SH);
@@ -1267,26 +1280,15 @@ Value *SCEVExpander::expand(const SCEV *S) {
L = L->getParentLoop())
if (S->isLoopInvariant(L)) {
if (!L) break;
- if (BasicBlock *Preheader = L->getLoopPreheader()) {
+ if (BasicBlock *Preheader = L->getLoopPreheader())
InsertPt = Preheader->getTerminator();
- BasicBlock::iterator IP = InsertPt;
- // Back past any debug info instructions. Sometimes we inserted
- // something earlier before debug info but after any real instructions.
- // This should behave the same as if debug info was not present.
- while (IP != Preheader->begin()) {
- --IP;
- if (!isa<DbgInfoIntrinsic>(IP))
- break;
- InsertPt = IP;
- }
- }
} else {
// If the SCEV is computable at this level, insert it into the header
// after the PHIs (and after any other instructions that we've inserted
// there) so that it is guaranteed to dominate any user inside the loop.
- if (L && S->hasComputableLoopEvolution(L) && L != PostIncLoop)
+ if (L && S->hasComputableLoopEvolution(L) && !PostIncLoops.count(L))
InsertPt = L->getHeader()->getFirstNonPHI();
- while (isInsertedInstruction(InsertPt))
+ while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt))
InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
break;
}
@@ -1306,7 +1308,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
Value *V = visit(S);
// Remember the expanded value for this SCEV at this location.
- if (!PostIncLoop)
+ if (PostIncLoops.empty())
InsertedExpressions[std::make_pair(S, InsertPt)] = V;
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
@@ -1314,7 +1316,9 @@ Value *SCEVExpander::expand(const SCEV *S) {
}
void SCEVExpander::rememberInstruction(Value *I) {
- if (!PostIncLoop)
+ if (!PostIncLoops.empty())
+ InsertedPostIncValues.insert(I);
+ else
InsertedValues.insert(I);
// If we just claimed an existing instruction and that instruction had
@@ -1322,7 +1326,8 @@ void SCEVExpander::rememberInstruction(Value *I) {
// subsequently inserted code will be dominated.
if (Builder.GetInsertPoint() == I) {
BasicBlock::iterator It = cast<Instruction>(I);
- do { ++It; } while (isInsertedInstruction(It));
+ do { ++It; } while (isInsertedInstruction(It) ||
+ isa<DbgInfoIntrinsic>(It));
Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
}
}
@@ -1330,7 +1335,7 @@ void SCEVExpander::rememberInstruction(Value *I) {
void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
// If we acquired more instructions since the old insert point was saved,
// advance past them.
- while (isInsertedInstruction(I)) ++I;
+ while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I;
Builder.SetInsertPoint(BB, I);
}
@@ -1339,16 +1344,21 @@ void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
/// canonical induction variable of the specified type for the specified
/// loop (inserting one if there is none). A canonical induction variable
/// starts at zero and steps by one on each iteration.
-Value *
+PHINode *
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
const Type *Ty) {
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
- const SCEV *H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty),
- SE.getIntegerSCEV(1, Ty), L);
+
+ // Build a SCEV for {0,+,1}<L>.
+ const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
+ SE.getConstant(Ty, 1), L);
+
+ // Emit code for it.
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
- Value *V = expandCodeFor(H, 0, L->getHeader()->begin());
+ PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
if (SaveInsertBB)
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
+
return V;
}
diff --git a/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp b/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
new file mode 100644
index 0000000..ac36cef
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -0,0 +1,183 @@
+//===- ScalarEvolutionNormalization.cpp - See below -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements utilities for working with "normalized" expressions.
+// See the comments at the top of ScalarEvolutionNormalization.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
+using namespace llvm;
+
+/// IVUseShouldUsePostIncValue - We have discovered a "User" of an IV expression
+/// and now we need to decide whether the user should use the preinc or post-inc
+/// value. If this user should use the post-inc version of the IV, return true.
+///
+/// Choosing wrong here can break dominance properties (if we choose to use the
+/// post-inc value when we cannot) or it can end up adding extra live-ranges to
+/// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
+/// should use the post-inc value).
+static bool IVUseShouldUsePostIncValue(Instruction *User, Value *Operand,
+ const Loop *L, DominatorTree *DT) {
+ // If the user is in the loop, use the preinc value.
+ if (L->contains(User)) return false;
+
+ BasicBlock *LatchBlock = L->getLoopLatch();
+ if (!LatchBlock)
+ return false;
+
+ // Ok, the user is outside of the loop. If it is dominated by the latch
+ // block, use the post-inc value.
+ if (DT->dominates(LatchBlock, User->getParent()))
+ return true;
+
+ // There is one case we have to be careful of: PHI nodes. These little guys
+ // can live in blocks that are not dominated by the latch block, but (since
+ // their uses occur in the predecessor block, not the block the PHI lives in)
+ // should still use the post-inc value. Check for this case now.
+ PHINode *PN = dyn_cast<PHINode>(User);
+ if (!PN || !Operand) return false; // not a phi, not dominated by latch block.
+
+ // Look at all of the uses of Operand by the PHI node. If any use corresponds
+ // to a block that is not dominated by the latch block, give up and use the
+ // preincremented value.
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ if (PN->getIncomingValue(i) == Operand &&
+ !DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
+ return false;
+
+ // Okay, all uses of Operand by PN are in predecessor blocks that really are
+ // dominated by the latch block. Use the post-incremented value.
+ return true;
+}
+
+const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
+ const SCEV *S,
+ Instruction *User,
+ Value *OperandValToReplace,
+ PostIncLoopSet &Loops,
+ ScalarEvolution &SE,
+ DominatorTree &DT) {
+ if (isa<SCEVConstant>(S) || isa<SCEVUnknown>(S))
+ return S;
+
+ if (const SCEVCastExpr *X = dyn_cast<SCEVCastExpr>(S)) {
+ const SCEV *O = X->getOperand();
+ const SCEV *N = TransformForPostIncUse(Kind, O, User, OperandValToReplace,
+ Loops, SE, DT);
+ if (O != N)
+ switch (S->getSCEVType()) {
+ case scZeroExtend: return SE.getZeroExtendExpr(N, S->getType());
+ case scSignExtend: return SE.getSignExtendExpr(N, S->getType());
+ case scTruncate: return SE.getTruncateExpr(N, S->getType());
+ default: llvm_unreachable("Unexpected SCEVCastExpr kind!");
+ }
+ return S;
+ }
+
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ // An addrec. This is the interesting part.
+ SmallVector<const SCEV *, 8> Operands;
+ const Loop *L = AR->getLoop();
+ // The addrec conceptually uses its operands at loop entry.
+ Instruction *LUser = L->getHeader()->begin();
+ // Transform each operand.
+ for (SCEVNAryExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
+ I != E; ++I) {
+ const SCEV *O = *I;
+ const SCEV *N = TransformForPostIncUse(Kind, O, LUser, 0, Loops, SE, DT);
+ Operands.push_back(N);
+ }
+ const SCEV *Result = SE.getAddRecExpr(Operands, L);
+ switch (Kind) {
+ default: llvm_unreachable("Unexpected transform name!");
+ case NormalizeAutodetect:
+ if (IVUseShouldUsePostIncValue(User, OperandValToReplace, L, &DT)) {
+ const SCEV *TransformedStep =
+ TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
+ User, OperandValToReplace, Loops, SE, DT);
+ Result = SE.getMinusSCEV(Result, TransformedStep);
+ Loops.insert(L);
+ }
+#if 0
+ // This assert is conceptually correct, but ScalarEvolution currently
+ // sometimes fails to canonicalize two equal SCEVs to exactly the same
+ // form. It's possibly a pessimization when this happens, but it isn't a
+ // correctness problem, so disable this assert for now.
+ assert(S == TransformForPostIncUse(Denormalize, Result,
+ User, OperandValToReplace,
+ Loops, SE, DT) &&
+ "SCEV normalization is not invertible!");
+#endif
+ break;
+ case Normalize:
+ if (Loops.count(L)) {
+ const SCEV *TransformedStep =
+ TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
+ User, OperandValToReplace, Loops, SE, DT);
+ Result = SE.getMinusSCEV(Result, TransformedStep);
+ }
+#if 0
+ // See the comment on the assert above.
+ assert(S == TransformForPostIncUse(Denormalize, Result,
+ User, OperandValToReplace,
+ Loops, SE, DT) &&
+ "SCEV normalization is not invertible!");
+#endif
+ break;
+ case Denormalize:
+ if (Loops.count(L))
+ Result = cast<SCEVAddRecExpr>(Result)->getPostIncExpr(SE);
+ break;
+ }
+ return Result;
+ }
+
+ if (const SCEVNAryExpr *X = dyn_cast<SCEVNAryExpr>(S)) {
+ SmallVector<const SCEV *, 8> Operands;
+ bool Changed = false;
+ // Transform each operand.
+ for (SCEVNAryExpr::op_iterator I = X->op_begin(), E = X->op_end();
+ I != E; ++I) {
+ const SCEV *O = *I;
+ const SCEV *N = TransformForPostIncUse(Kind, O, User, OperandValToReplace,
+ Loops, SE, DT);
+ Changed |= N != O;
+ Operands.push_back(N);
+ }
+ // If any operand actually changed, return a transformed result.
+ if (Changed)
+ switch (S->getSCEVType()) {
+ case scAddExpr: return SE.getAddExpr(Operands);
+ case scMulExpr: return SE.getMulExpr(Operands);
+ case scSMaxExpr: return SE.getSMaxExpr(Operands);
+ case scUMaxExpr: return SE.getUMaxExpr(Operands);
+ default: llvm_unreachable("Unexpected SCEVNAryExpr kind!");
+ }
+ return S;
+ }
+
+ if (const SCEVUDivExpr *X = dyn_cast<SCEVUDivExpr>(S)) {
+ const SCEV *LO = X->getLHS();
+ const SCEV *RO = X->getRHS();
+ const SCEV *LN = TransformForPostIncUse(Kind, LO, User, OperandValToReplace,
+ Loops, SE, DT);
+ const SCEV *RN = TransformForPostIncUse(Kind, RO, User, OperandValToReplace,
+ Loops, SE, DT);
+ if (LO != LN || RO != RN)
+ return SE.getUDivExpr(LN, RN);
+ return S;
+ }
+
+ llvm_unreachable("Unexpected SCEV kind!");
+ return 0;
+}
diff --git a/libclamav/c++/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/libclamav/c++/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
new file mode 100644
index 0000000..bbfdcec
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -0,0 +1,191 @@
+//===- TypeBasedAliasAnalysis.cpp - Type-Based Alias Analysis -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeBasedAliasAnalysis pass, which implements
+// metadata-based TBAA.
+//
+// In LLVM IR, memory does not have types, so LLVM's own type system is not
+// suitable for doing TBAA. Instead, metadata is added to the IR to describe
+// a type system of a higher level language.
+//
+// This pass is language-independent. The type system is encoded in
+// metadata. This allows this pass to support typical C and C++ TBAA, but
+// it can also support custom aliasing behavior for other languages.
+//
+// This is a work-in-progress. It doesn't work yet, and the metadata
+// format isn't stable.
+//
+// TODO: getModRefBehavior. The AliasAnalysis infrastructure will need to
+// be extended.
+// TODO: AA chaining
+// TODO: struct fields
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Module.h"
+#include "llvm/Metadata.h"
+#include "llvm/Pass.h"
+using namespace llvm;
+
+namespace {
+ /// TBAANode - This is a simple wrapper around an MDNode which provides a
+ /// higher-level interface by hiding the details of how alias analysis
+ /// information is encoded in its operands.
+ class TBAANode {
+ const MDNode *Node;
+
+ public:
+ TBAANode() : Node(0) {}
+ explicit TBAANode(MDNode *N) : Node(N) {}
+
+ /// getNode - Get the MDNode for this TBAANode.
+ const MDNode *getNode() const { return Node; }
+
+ /// getParent - Get this TBAANode's Alias DAG parent.
+ TBAANode getParent() const {
+ if (Node->getNumOperands() < 2)
+ return TBAANode();
+ MDNode *P = dyn_cast<MDNode>(Node->getOperand(1));
+ if (!P)
+ return TBAANode();
+ // Ok, this node has a valid parent. Return it.
+ return TBAANode(P);
+ }
+
+ /// TypeIsImmutable - Test if this TBAANode represents a type for objects
+ /// which are not modified (by any means) in the context where this
+ /// AliasAnalysis is relevant.
+ bool TypeIsImmutable() const {
+ if (Node->getNumOperands() < 3)
+ return false;
+ ConstantInt *CI = dyn_cast<ConstantInt>(Node->getOperand(2));
+ if (!CI)
+ return false;
+ // TODO: Think about the encoding.
+ return CI->isOne();
+ }
+ };
+}
+
+namespace {
+ /// TypeBasedAliasAnalysis - This is a simple alias analysis
+ /// implementation that uses TypeBased to answer queries.
+ class TypeBasedAliasAnalysis : public ImmutablePass,
+ public AliasAnalysis {
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ TypeBasedAliasAnalysis() : ImmutablePass(ID) {}
+
+ /// getAdjustedAnalysisPointer - This method is used when a pass implements
+ /// an analysis interface through multiple inheritance. If needed, it
+ /// should override this to adjust the this pointer as needed for the
+ /// specified pass info.
+ virtual void *getAdjustedAnalysisPointer(const void *PI) {
+ if (PI == &AliasAnalysis::ID)
+ return (AliasAnalysis*)this;
+ return this;
+ }
+
+ private:
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual AliasResult alias(const Value *V1, unsigned V1Size,
+ const Value *V2, unsigned V2Size);
+ virtual bool pointsToConstantMemory(const Value *P);
+ };
+} // End of anonymous namespace
+
+// Register this pass...
+char TypeBasedAliasAnalysis::ID = 0;
+INITIALIZE_AG_PASS(TypeBasedAliasAnalysis, AliasAnalysis, "tbaa",
+ "Type-Based Alias Analysis", false, true, false);
+
+ImmutablePass *llvm::createTypeBasedAliasAnalysisPass() {
+ return new TypeBasedAliasAnalysis();
+}
+
+void
+TypeBasedAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AliasAnalysis::getAnalysisUsage(AU);
+}
+
+AliasAnalysis::AliasResult
+TypeBasedAliasAnalysis::alias(const Value *A, unsigned ASize,
+ const Value *B, unsigned BSize) {
+ // Currently, metadata can only be attached to Instructions.
+ const Instruction *AI = dyn_cast<Instruction>(A);
+ if (!AI) return MayAlias;
+ const Instruction *BI = dyn_cast<Instruction>(B);
+ if (!BI) return MayAlias;
+
+ // Get the attached MDNodes. If either value lacks a tbaa MDNode, we must
+ // be conservative.
+ MDNode *AM =
+ AI->getMetadata(AI->getParent()->getParent()->getParent()
+ ->getMDKindID("tbaa"));
+ if (!AM) return MayAlias;
+ MDNode *BM =
+ BI->getMetadata(BI->getParent()->getParent()->getParent()
+ ->getMDKindID("tbaa"));
+ if (!BM) return MayAlias;
+
+ // Keep track of the root node for A and B.
+ TBAANode RootA, RootB;
+
+ // Climb the DAG from A to see if we reach B.
+ for (TBAANode T(AM); ; ) {
+ if (T.getNode() == BM)
+ // B is an ancestor of A.
+ return MayAlias;
+
+ RootA = T;
+ T = T.getParent();
+ if (!T.getNode())
+ break;
+ }
+
+ // Climb the DAG from B to see if we reach A.
+ for (TBAANode T(BM); ; ) {
+ if (T.getNode() == AM)
+ // A is an ancestor of B.
+ return MayAlias;
+
+ RootB = T;
+ T = T.getParent();
+ if (!T.getNode())
+ break;
+ }
+
+ // Neither node is an ancestor of the other.
+
+ // If they have the same root, then we've proved there's no alias.
+ if (RootA.getNode() == RootB.getNode())
+ return NoAlias;
+
+ // If they have different roots, they're part of different potentially
+ // unrelated type systems, so we must be conservative.
+ return MayAlias;
+}
+
+bool TypeBasedAliasAnalysis::pointsToConstantMemory(const Value *P) {
+ // Currently, metadata can only be attached to Instructions.
+ const Instruction *I = dyn_cast<Instruction>(P);
+ if (!I) return false;
+
+ MDNode *M =
+ I->getMetadata(I->getParent()->getParent()->getParent()
+ ->getMDKindID("tbaa"));
+ if (!M) return false;
+
+ // If this is an "immutable" type, we can assume the pointer is pointing
+ // to constant memory.
+ return TBAANode(M).TypeIsImmutable();
+}
diff --git a/libclamav/c++/llvm/lib/Analysis/ValueTracking.cpp b/libclamav/c++/llvm/lib/Analysis/ValueTracking.cpp
index 5ae72f7..181c9b0 100644
--- a/libclamav/c++/llvm/lib/Analysis/ValueTracking.cpp
+++ b/libclamav/c++/llvm/lib/Analysis/ValueTracking.cpp
@@ -880,19 +880,20 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
}
Value *Mul0 = NULL;
- Value *Mul1 = NULL;
- bool M0 = ComputeMultiple(Op0, Base, Mul0,
- LookThroughSExt, Depth+1);
- bool M1 = ComputeMultiple(Op1, Base, Mul1,
- LookThroughSExt, Depth+1);
-
- if (M0) {
- if (isa<Constant>(Op1) && isa<Constant>(Mul0)) {
- // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
- Multiple = ConstantExpr::getMul(cast<Constant>(Mul0),
- cast<Constant>(Op1));
- return true;
- }
+ if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
+ if (Constant *Op1C = dyn_cast<Constant>(Op1))
+ if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
+ if (Op1C->getType()->getPrimitiveSizeInBits() <
+ MulC->getType()->getPrimitiveSizeInBits())
+ Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
+ if (Op1C->getType()->getPrimitiveSizeInBits() >
+ MulC->getType()->getPrimitiveSizeInBits())
+ MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
+
+ // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
+ Multiple = ConstantExpr::getMul(MulC, Op1C);
+ return true;
+ }
if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
if (Mul0CI->getValue() == 1) {
@@ -902,13 +903,21 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
}
}
- if (M1) {
- if (isa<Constant>(Op0) && isa<Constant>(Mul1)) {
- // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
- Multiple = ConstantExpr::getMul(cast<Constant>(Mul1),
- cast<Constant>(Op0));
- return true;
- }
+ Value *Mul1 = NULL;
+ if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
+ if (Constant *Op0C = dyn_cast<Constant>(Op0))
+ if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
+ if (Op0C->getType()->getPrimitiveSizeInBits() <
+ MulC->getType()->getPrimitiveSizeInBits())
+ Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
+ if (Op0C->getType()->getPrimitiveSizeInBits() >
+ MulC->getType()->getPrimitiveSizeInBits())
+ MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
+
+ // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
+ Multiple = ConstantExpr::getMul(MulC, Op0C);
+ return true;
+ }
if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
if (Mul1CI->getValue() == 1) {
@@ -953,7 +962,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
// sqrt(-0.0) = -0.0, no other negative results are possible.
if (II->getIntrinsicID() == Intrinsic::sqrt)
- return CannotBeNegativeZero(II->getOperand(1), Depth+1);
+ return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
if (const CallInst *CI = dyn_cast<CallInst>(I))
if (const Function *F = CI->getCalledFunction()) {
@@ -966,202 +975,13 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
if (F->getName() == "fabsl") return true;
if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
F->getName() == "sqrtl")
- return CannotBeNegativeZero(CI->getOperand(1), Depth+1);
+ return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
}
}
return false;
}
-
-/// GetLinearExpression - Analyze the specified value as a linear expression:
-/// "A*V + B", where A and B are constant integers. Return the scale and offset
-/// values as APInts and return V as a Value*. The incoming Value is known to
-/// have IntegerType. Note that this looks through extends, so the high bits
-/// may not be represented in the result.
-static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
- const TargetData *TD, unsigned Depth) {
- assert(V->getType()->isIntegerTy() && "Not an integer value");
-
- // Limit our recursion depth.
- if (Depth == 6) {
- Scale = 1;
- Offset = 0;
- return V;
- }
-
- if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
- if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
- switch (BOp->getOpcode()) {
- default: break;
- case Instruction::Or:
- // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
- // analyze it.
- if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), TD))
- break;
- // FALL THROUGH.
- case Instruction::Add:
- V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
- Offset += RHSC->getValue();
- return V;
- case Instruction::Mul:
- V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
- Offset *= RHSC->getValue();
- Scale *= RHSC->getValue();
- return V;
- case Instruction::Shl:
- V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
- Offset <<= RHSC->getValue().getLimitedValue();
- Scale <<= RHSC->getValue().getLimitedValue();
- return V;
- }
- }
- }
-
- // Since clients don't care about the high bits of the value, just scales and
- // offsets, we can look through extensions.
- if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
- Value *CastOp = cast<CastInst>(V)->getOperand(0);
- unsigned OldWidth = Scale.getBitWidth();
- unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
- Scale.trunc(SmallWidth);
- Offset.trunc(SmallWidth);
- Value *Result = GetLinearExpression(CastOp, Scale, Offset, TD, Depth+1);
- Scale.zext(OldWidth);
- Offset.zext(OldWidth);
- return Result;
- }
-
- Scale = 1;
- Offset = 0;
- return V;
-}
-
-/// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
-/// into a base pointer with a constant offset and a number of scaled symbolic
-/// offsets.
-///
-/// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
-/// the VarIndices vector) are Value*'s that are known to be scaled by the
-/// specified amount, but which may have other unrepresented high bits. As such,
-/// the gep cannot necessarily be reconstructed from its decomposed form.
-///
-/// When TargetData is around, this function is capable of analyzing everything
-/// that Value::getUnderlyingObject() can look through. When not, it just looks
-/// through pointer casts.
-///
-const Value *llvm::DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
- SmallVectorImpl<std::pair<const Value*, int64_t> > &VarIndices,
- const TargetData *TD) {
- // Limit recursion depth to limit compile time in crazy cases.
- unsigned MaxLookup = 6;
-
- BaseOffs = 0;
- do {
- // See if this is a bitcast or GEP.
- const Operator *Op = dyn_cast<Operator>(V);
- if (Op == 0) {
- // The only non-operator case we can handle are GlobalAliases.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
- if (!GA->mayBeOverridden()) {
- V = GA->getAliasee();
- continue;
- }
- }
- return V;
- }
-
- if (Op->getOpcode() == Instruction::BitCast) {
- V = Op->getOperand(0);
- continue;
- }
-
- const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
- if (GEPOp == 0)
- return V;
-
- // Don't attempt to analyze GEPs over unsized objects.
- if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
- ->getElementType()->isSized())
- return V;
-
- // If we are lacking TargetData information, we can't compute the offets of
- // elements computed by GEPs. However, we can handle bitcast equivalent
- // GEPs.
- if (!TD) {
- if (!GEPOp->hasAllZeroIndices())
- return V;
- V = GEPOp->getOperand(0);
- continue;
- }
-
- // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
- gep_type_iterator GTI = gep_type_begin(GEPOp);
- for (User::const_op_iterator I = GEPOp->op_begin()+1,
- E = GEPOp->op_end(); I != E; ++I) {
- Value *Index = *I;
- // Compute the (potentially symbolic) offset in bytes for this index.
- if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
- // For a struct, add the member offset.
- unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- if (FieldNo == 0) continue;
-
- BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
- continue;
- }
-
- // For an array/pointer, add the element offset, explicitly scaled.
- if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
- if (CIdx->isZero()) continue;
- BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
- continue;
- }
-
- uint64_t Scale = TD->getTypeAllocSize(*GTI);
-
- // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
- unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
- APInt IndexScale(Width, 0), IndexOffset(Width, 0);
- Index = GetLinearExpression(Index, IndexScale, IndexOffset, TD, 0);
-
- // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
- // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
- BaseOffs += IndexOffset.getZExtValue()*Scale;
- Scale *= IndexScale.getZExtValue();
-
-
- // If we already had an occurrance of this index variable, merge this
- // scale into it. For example, we want to handle:
- // A[x][x] -> x*16 + x*4 -> x*20
- // This also ensures that 'x' only appears in the index list once.
- for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
- if (VarIndices[i].first == Index) {
- Scale += VarIndices[i].second;
- VarIndices.erase(VarIndices.begin()+i);
- break;
- }
- }
-
- // Make sure that we have a scale that makes sense for this target's
- // pointer size.
- if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
- Scale <<= ShiftBits;
- Scale >>= ShiftBits;
- }
-
- if (Scale)
- VarIndices.push_back(std::make_pair(Index, Scale));
- }
-
- // Analyze the base pointer next.
- V = GEPOp->getOperand(0);
- } while (--MaxLookup);
-
- // If the chain of expressions is too deep, just return early.
- return V;
-}
-
-
// This is the recursive version of BuildSubAggregate. It takes a few different
// arguments. Idxs is the index within the nested struct From that we are
// looking at now (which is of type IndexedType). IdxSkip is the number of
@@ -1342,22 +1162,23 @@ Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
/// GetConstantStringInfo - This function computes the length of a
/// null-terminated C string pointed to by V. If successful, it returns true
/// and returns the string in Str. If unsuccessful, it returns false.
-bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset,
+bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
+ uint64_t Offset,
bool StopAtNul) {
// If V is NULL then return false;
if (V == NULL) return false;
// Look through bitcast instructions.
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(V))
+ if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
return GetConstantStringInfo(BCI->getOperand(0), Str, Offset, StopAtNul);
// If the value is not a GEP instruction nor a constant expression with a
// GEP instruction, then return false because ConstantArray can't occur
// any other way
- User *GEP = 0;
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
+ const User *GEP = 0;
+ if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
GEP = GEPI;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (CE->getOpcode() == Instruction::BitCast)
return GetConstantStringInfo(CE->getOperand(0), Str, Offset, StopAtNul);
if (CE->getOpcode() != Instruction::GetElementPtr)
@@ -1378,7 +1199,7 @@ bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset,
// Check to make sure that the first operand of the GEP is an integer and
// has value 0 so that we are sure we're indexing into the initializer.
- ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
+ const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
if (FirstIdx == 0 || !FirstIdx->isZero())
return false;
@@ -1386,7 +1207,7 @@ bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset,
// into the array. If this occurs, we can't say anything meaningful about
// the string.
uint64_t StartIdx = 0;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
StartIdx = CI->getZExtValue();
else
return false;
@@ -1397,10 +1218,10 @@ bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset,
// The GEP instruction, constant or instruction, must reference a global
// variable that is a constant and is initialized. The referenced constant
// initializer is the array that we'll use for optimization.
- GlobalVariable* GV = dyn_cast<GlobalVariable>(V);
+ const GlobalVariable* GV = dyn_cast<GlobalVariable>(V);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
return false;
- Constant *GlobalInit = GV->getInitializer();
+ const Constant *GlobalInit = GV->getInitializer();
// Handle the ConstantAggregateZero case
if (isa<ConstantAggregateZero>(GlobalInit)) {
@@ -1411,7 +1232,7 @@ bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset,
}
// Must be a Constant Array
- ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
+ const ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
if (Array == 0 || !Array->getType()->getElementType()->isIntegerTy(8))
return false;
@@ -1425,8 +1246,8 @@ bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset,
// to in the array.
Str.reserve(NumElts-Offset);
for (unsigned i = Offset; i != NumElts; ++i) {
- Constant *Elt = Array->getOperand(i);
- ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
+ const Constant *Elt = Array->getOperand(i);
+ const ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
if (!CI) // This array isn't suitable, non-int initializer.
return false;
if (StopAtNul && CI->isZero())
diff --git a/libclamav/c++/llvm/lib/AsmParser/CMakeLists.txt b/libclamav/c++/llvm/lib/AsmParser/CMakeLists.txt
deleted file mode 100644
index 985ebe2..0000000
--- a/libclamav/c++/llvm/lib/AsmParser/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-# AsmParser
-add_llvm_library(LLVMAsmParser
- LLLexer.cpp
- LLParser.cpp
- Parser.cpp
- )
diff --git a/libclamav/c++/llvm/lib/AsmParser/LLLexer.cpp b/libclamav/c++/llvm/lib/AsmParser/LLLexer.cpp
deleted file mode 100644
index 46f3cbc..0000000
--- a/libclamav/c++/llvm/lib/AsmParser/LLLexer.cpp
+++ /dev/null
@@ -1,865 +0,0 @@
-//===- LLLexer.cpp - Lexer for .ll Files ----------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Implement the Lexer for .ll files.
-//
-//===----------------------------------------------------------------------===//
-
-#include "LLLexer.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Instruction.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Assembly/Parser.h"
-#include <cstdio>
-#include <cstdlib>
-#include <cstring>
-using namespace llvm;
-
-bool LLLexer::Error(LocTy ErrorLoc, const std::string &Msg) const {
- ErrorInfo = SM.GetMessage(ErrorLoc, Msg, "error");
- return true;
-}
-
-//===----------------------------------------------------------------------===//
-// Helper functions.
-//===----------------------------------------------------------------------===//
-
-// atoull - Convert an ascii string of decimal digits into the unsigned long
-// long representation... this does not have to do input error checking,
-// because we know that the input will be matched by a suitable regex...
-//
-uint64_t LLLexer::atoull(const char *Buffer, const char *End) {
- uint64_t Result = 0;
- for (; Buffer != End; Buffer++) {
- uint64_t OldRes = Result;
- Result *= 10;
- Result += *Buffer-'0';
- if (Result < OldRes) { // Uh, oh, overflow detected!!!
- Error("constant bigger than 64 bits detected!");
- return 0;
- }
- }
- return Result;
-}
-
-uint64_t LLLexer::HexIntToVal(const char *Buffer, const char *End) {
- uint64_t Result = 0;
- for (; Buffer != End; ++Buffer) {
- uint64_t OldRes = Result;
- Result *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Result += C-'0';
- else if (C >= 'A' && C <= 'F')
- Result += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Result += C-'a'+10;
-
- if (Result < OldRes) { // Uh, oh, overflow detected!!!
- Error("constant bigger than 64 bits detected!");
- return 0;
- }
- }
- return Result;
-}
-
-void LLLexer::HexToIntPair(const char *Buffer, const char *End,
- uint64_t Pair[2]) {
- Pair[0] = 0;
- for (int i=0; i<16; i++, Buffer++) {
- assert(Buffer != End);
- Pair[0] *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Pair[0] += C-'0';
- else if (C >= 'A' && C <= 'F')
- Pair[0] += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Pair[0] += C-'a'+10;
- }
- Pair[1] = 0;
- for (int i=0; i<16 && Buffer != End; i++, Buffer++) {
- Pair[1] *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Pair[1] += C-'0';
- else if (C >= 'A' && C <= 'F')
- Pair[1] += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Pair[1] += C-'a'+10;
- }
- if (Buffer != End)
- Error("constant bigger than 128 bits detected!");
-}
-
-/// FP80HexToIntPair - translate an 80 bit FP80 number (20 hexits) into
-/// { low64, high16 } as usual for an APInt.
-void LLLexer::FP80HexToIntPair(const char *Buffer, const char *End,
- uint64_t Pair[2]) {
- Pair[1] = 0;
- for (int i=0; i<4 && Buffer != End; i++, Buffer++) {
- assert(Buffer != End);
- Pair[1] *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Pair[1] += C-'0';
- else if (C >= 'A' && C <= 'F')
- Pair[1] += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Pair[1] += C-'a'+10;
- }
- Pair[0] = 0;
- for (int i=0; i<16; i++, Buffer++) {
- Pair[0] *= 16;
- char C = *Buffer;
- if (C >= '0' && C <= '9')
- Pair[0] += C-'0';
- else if (C >= 'A' && C <= 'F')
- Pair[0] += C-'A'+10;
- else if (C >= 'a' && C <= 'f')
- Pair[0] += C-'a'+10;
- }
- if (Buffer != End)
- Error("constant bigger than 128 bits detected!");
-}
-
-// UnEscapeLexed - Run through the specified buffer and change \xx codes to the
-// appropriate character.
-static void UnEscapeLexed(std::string &Str) {
- if (Str.empty()) return;
-
- char *Buffer = &Str[0], *EndBuffer = Buffer+Str.size();
- char *BOut = Buffer;
- for (char *BIn = Buffer; BIn != EndBuffer; ) {
- if (BIn[0] == '\\') {
- if (BIn < EndBuffer-1 && BIn[1] == '\\') {
- *BOut++ = '\\'; // Two \ becomes one
- BIn += 2;
- } else if (BIn < EndBuffer-2 && isxdigit(BIn[1]) && isxdigit(BIn[2])) {
- char Tmp = BIn[3]; BIn[3] = 0; // Terminate string
- *BOut = (char)strtol(BIn+1, 0, 16); // Convert to number
- BIn[3] = Tmp; // Restore character
- BIn += 3; // Skip over handled chars
- ++BOut;
- } else {
- *BOut++ = *BIn++;
- }
- } else {
- *BOut++ = *BIn++;
- }
- }
- Str.resize(BOut-Buffer);
-}
-
-/// isLabelChar - Return true for [-a-zA-Z$._0-9].
-static bool isLabelChar(char C) {
- return isalnum(C) || C == '-' || C == '$' || C == '.' || C == '_';
-}
-
-
-/// isLabelTail - Return true if this pointer points to a valid end of a label.
-static const char *isLabelTail(const char *CurPtr) {
- while (1) {
- if (CurPtr[0] == ':') return CurPtr+1;
- if (!isLabelChar(CurPtr[0])) return 0;
- ++CurPtr;
- }
-}
-
-
-
-//===----------------------------------------------------------------------===//
-// Lexer definition.
-//===----------------------------------------------------------------------===//
-
-LLLexer::LLLexer(MemoryBuffer *StartBuf, SourceMgr &sm, SMDiagnostic &Err,
- LLVMContext &C)
- : CurBuf(StartBuf), ErrorInfo(Err), SM(sm), Context(C), APFloatVal(0.0) {
- CurPtr = CurBuf->getBufferStart();
-}
-
-std::string LLLexer::getFilename() const {
- return CurBuf->getBufferIdentifier();
-}
-
-int LLLexer::getNextChar() {
- char CurChar = *CurPtr++;
- switch (CurChar) {
- default: return (unsigned char)CurChar;
- case 0:
- // A nul character in the stream is either the end of the current buffer or
- // a random nul in the file. Disambiguate that here.
- if (CurPtr-1 != CurBuf->getBufferEnd())
- return 0; // Just whitespace.
-
- // Otherwise, return end of file.
- --CurPtr; // Another call to lex will return EOF again.
- return EOF;
- }
-}
-
-
-lltok::Kind LLLexer::LexToken() {
- TokStart = CurPtr;
-
- int CurChar = getNextChar();
- switch (CurChar) {
- default:
- // Handle letters: [a-zA-Z_]
- if (isalpha(CurChar) || CurChar == '_')
- return LexIdentifier();
-
- return lltok::Error;
- case EOF: return lltok::Eof;
- case 0:
- case ' ':
- case '\t':
- case '\n':
- case '\r':
- // Ignore whitespace.
- return LexToken();
- case '+': return LexPositive();
- case '@': return LexAt();
- case '%': return LexPercent();
- case '"': return LexQuote();
- case '.':
- if (const char *Ptr = isLabelTail(CurPtr)) {
- CurPtr = Ptr;
- StrVal.assign(TokStart, CurPtr-1);
- return lltok::LabelStr;
- }
- if (CurPtr[0] == '.' && CurPtr[1] == '.') {
- CurPtr += 2;
- return lltok::dotdotdot;
- }
- return lltok::Error;
- case '$':
- if (const char *Ptr = isLabelTail(CurPtr)) {
- CurPtr = Ptr;
- StrVal.assign(TokStart, CurPtr-1);
- return lltok::LabelStr;
- }
- return lltok::Error;
- case ';':
- SkipLineComment();
- return LexToken();
- case '!': return LexExclaim();
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- case '-':
- return LexDigitOrNegative();
- case '=': return lltok::equal;
- case '[': return lltok::lsquare;
- case ']': return lltok::rsquare;
- case '{': return lltok::lbrace;
- case '}': return lltok::rbrace;
- case '<': return lltok::less;
- case '>': return lltok::greater;
- case '(': return lltok::lparen;
- case ')': return lltok::rparen;
- case ',': return lltok::comma;
- case '*': return lltok::star;
- case '\\': return lltok::backslash;
- }
-}
-
-void LLLexer::SkipLineComment() {
- while (1) {
- if (CurPtr[0] == '\n' || CurPtr[0] == '\r' || getNextChar() == EOF)
- return;
- }
-}
-
-/// LexAt - Lex all tokens that start with an @ character:
-/// GlobalVar @\"[^\"]*\"
-/// GlobalVar @[-a-zA-Z$._][-a-zA-Z$._0-9]*
-/// GlobalVarID @[0-9]+
-lltok::Kind LLLexer::LexAt() {
- // Handle AtStringConstant: @\"[^\"]*\"
- if (CurPtr[0] == '"') {
- ++CurPtr;
-
- while (1) {
- int CurChar = getNextChar();
-
- if (CurChar == EOF) {
- Error("end of file in global variable name");
- return lltok::Error;
- }
- if (CurChar == '"') {
- StrVal.assign(TokStart+2, CurPtr-1);
- UnEscapeLexed(StrVal);
- return lltok::GlobalVar;
- }
- }
- }
-
- // Handle GlobalVarName: @[-a-zA-Z$._][-a-zA-Z$._0-9]*
- if (isalpha(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
- CurPtr[0] == '.' || CurPtr[0] == '_') {
- ++CurPtr;
- while (isalnum(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
- CurPtr[0] == '.' || CurPtr[0] == '_')
- ++CurPtr;
-
- StrVal.assign(TokStart+1, CurPtr); // Skip @
- return lltok::GlobalVar;
- }
-
- // Handle GlobalVarID: @[0-9]+
- if (isdigit(CurPtr[0])) {
- for (++CurPtr; isdigit(CurPtr[0]); ++CurPtr)
- /*empty*/;
-
- uint64_t Val = atoull(TokStart+1, CurPtr);
- if ((unsigned)Val != Val)
- Error("invalid value number (too large)!");
- UIntVal = unsigned(Val);
- return lltok::GlobalID;
- }
-
- return lltok::Error;
-}
-
-
-/// LexPercent - Lex all tokens that start with a % character:
-/// LocalVar ::= %\"[^\"]*\"
-/// LocalVar ::= %[-a-zA-Z$._][-a-zA-Z$._0-9]*
-/// LocalVarID ::= %[0-9]+
-lltok::Kind LLLexer::LexPercent() {
- // Handle LocalVarName: %\"[^\"]*\"
- if (CurPtr[0] == '"') {
- ++CurPtr;
-
- while (1) {
- int CurChar = getNextChar();
-
- if (CurChar == EOF) {
- Error("end of file in string constant");
- return lltok::Error;
- }
- if (CurChar == '"') {
- StrVal.assign(TokStart+2, CurPtr-1);
- UnEscapeLexed(StrVal);
- return lltok::LocalVar;
- }
- }
- }
-
- // Handle LocalVarName: %[-a-zA-Z$._][-a-zA-Z$._0-9]*
- if (isalpha(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
- CurPtr[0] == '.' || CurPtr[0] == '_') {
- ++CurPtr;
- while (isalnum(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
- CurPtr[0] == '.' || CurPtr[0] == '_')
- ++CurPtr;
-
- StrVal.assign(TokStart+1, CurPtr); // Skip %
- return lltok::LocalVar;
- }
-
- // Handle LocalVarID: %[0-9]+
- if (isdigit(CurPtr[0])) {
- for (++CurPtr; isdigit(CurPtr[0]); ++CurPtr)
- /*empty*/;
-
- uint64_t Val = atoull(TokStart+1, CurPtr);
- if ((unsigned)Val != Val)
- Error("invalid value number (too large)!");
- UIntVal = unsigned(Val);
- return lltok::LocalVarID;
- }
-
- return lltok::Error;
-}
-
-/// LexQuote - Lex all tokens that start with a " character:
-/// QuoteLabel "[^"]+":
-/// StringConstant "[^"]*"
-lltok::Kind LLLexer::LexQuote() {
- while (1) {
- int CurChar = getNextChar();
-
- if (CurChar == EOF) {
- Error("end of file in quoted string");
- return lltok::Error;
- }
-
- if (CurChar != '"') continue;
-
- if (CurPtr[0] != ':') {
- StrVal.assign(TokStart+1, CurPtr-1);
- UnEscapeLexed(StrVal);
- return lltok::StringConstant;
- }
-
- ++CurPtr;
- StrVal.assign(TokStart+1, CurPtr-2);
- UnEscapeLexed(StrVal);
- return lltok::LabelStr;
- }
-}
-
-static bool JustWhitespaceNewLine(const char *&Ptr) {
- const char *ThisPtr = Ptr;
- while (*ThisPtr == ' ' || *ThisPtr == '\t')
- ++ThisPtr;
- if (*ThisPtr == '\n' || *ThisPtr == '\r') {
- Ptr = ThisPtr;
- return true;
- }
- return false;
-}
-
-/// LexExclaim:
-/// !foo
-/// !
-lltok::Kind LLLexer::LexExclaim() {
- // Lex a metadata name as a MetadataVar.
- if (isalpha(CurPtr[0])) {
- ++CurPtr;
- while (isalnum(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
- CurPtr[0] == '.' || CurPtr[0] == '_')
- ++CurPtr;
-
- StrVal.assign(TokStart+1, CurPtr); // Skip !
- return lltok::MetadataVar;
- }
- return lltok::exclaim;
-}
-
-/// LexIdentifier: Handle several related productions:
-/// Label [-a-zA-Z$._0-9]+:
-/// IntegerType i[0-9]+
-/// Keyword sdiv, float, ...
-/// HexIntConstant [us]0x[0-9A-Fa-f]+
-lltok::Kind LLLexer::LexIdentifier() {
- const char *StartChar = CurPtr;
- const char *IntEnd = CurPtr[-1] == 'i' ? 0 : StartChar;
- const char *KeywordEnd = 0;
-
- for (; isLabelChar(*CurPtr); ++CurPtr) {
- // If we decide this is an integer, remember the end of the sequence.
- if (!IntEnd && !isdigit(*CurPtr)) IntEnd = CurPtr;
- if (!KeywordEnd && !isalnum(*CurPtr) && *CurPtr != '_') KeywordEnd = CurPtr;
- }
-
- // If we stopped due to a colon, this really is a label.
- if (*CurPtr == ':') {
- StrVal.assign(StartChar-1, CurPtr++);
- return lltok::LabelStr;
- }
-
- // Otherwise, this wasn't a label. If this was valid as an integer type,
- // return it.
- if (IntEnd == 0) IntEnd = CurPtr;
- if (IntEnd != StartChar) {
- CurPtr = IntEnd;
- uint64_t NumBits = atoull(StartChar, CurPtr);
- if (NumBits < IntegerType::MIN_INT_BITS ||
- NumBits > IntegerType::MAX_INT_BITS) {
- Error("bitwidth for integer type out of range!");
- return lltok::Error;
- }
- TyVal = IntegerType::get(Context, NumBits);
- return lltok::Type;
- }
-
- // Otherwise, this was a letter sequence. See which keyword this is.
- if (KeywordEnd == 0) KeywordEnd = CurPtr;
- CurPtr = KeywordEnd;
- --StartChar;
- unsigned Len = CurPtr-StartChar;
-#define KEYWORD(STR) \
- if (Len == strlen(#STR) && !memcmp(StartChar, #STR, strlen(#STR))) \
- return lltok::kw_##STR;
-
- KEYWORD(begin); KEYWORD(end);
- KEYWORD(true); KEYWORD(false);
- KEYWORD(declare); KEYWORD(define);
- KEYWORD(global); KEYWORD(constant);
-
- KEYWORD(private);
- KEYWORD(linker_private);
- KEYWORD(internal);
- KEYWORD(available_externally);
- KEYWORD(linkonce);
- KEYWORD(linkonce_odr);
- KEYWORD(weak);
- KEYWORD(weak_odr);
- KEYWORD(appending);
- KEYWORD(dllimport);
- KEYWORD(dllexport);
- KEYWORD(common);
- KEYWORD(default);
- KEYWORD(hidden);
- KEYWORD(protected);
- KEYWORD(extern_weak);
- KEYWORD(external);
- KEYWORD(thread_local);
- KEYWORD(zeroinitializer);
- KEYWORD(undef);
- KEYWORD(null);
- KEYWORD(to);
- KEYWORD(tail);
- KEYWORD(target);
- KEYWORD(triple);
- KEYWORD(deplibs);
- KEYWORD(datalayout);
- KEYWORD(volatile);
- KEYWORD(nuw);
- KEYWORD(nsw);
- KEYWORD(exact);
- KEYWORD(inbounds);
- KEYWORD(align);
- KEYWORD(addrspace);
- KEYWORD(section);
- KEYWORD(alias);
- KEYWORD(module);
- KEYWORD(asm);
- KEYWORD(sideeffect);
- KEYWORD(alignstack);
- KEYWORD(gc);
-
- KEYWORD(ccc);
- KEYWORD(fastcc);
- KEYWORD(coldcc);
- KEYWORD(x86_stdcallcc);
- KEYWORD(x86_fastcallcc);
- KEYWORD(arm_apcscc);
- KEYWORD(arm_aapcscc);
- KEYWORD(arm_aapcs_vfpcc);
- KEYWORD(msp430_intrcc);
-
- KEYWORD(cc);
- KEYWORD(c);
-
- KEYWORD(signext);
- KEYWORD(zeroext);
- KEYWORD(inreg);
- KEYWORD(sret);
- KEYWORD(nounwind);
- KEYWORD(noreturn);
- KEYWORD(noalias);
- KEYWORD(nocapture);
- KEYWORD(byval);
- KEYWORD(nest);
- KEYWORD(readnone);
- KEYWORD(readonly);
-
- KEYWORD(inlinehint);
- KEYWORD(noinline);
- KEYWORD(alwaysinline);
- KEYWORD(optsize);
- KEYWORD(ssp);
- KEYWORD(sspreq);
- KEYWORD(noredzone);
- KEYWORD(noimplicitfloat);
- KEYWORD(naked);
-
- KEYWORD(type);
- KEYWORD(opaque);
- KEYWORD(union);
-
- KEYWORD(eq); KEYWORD(ne); KEYWORD(slt); KEYWORD(sgt); KEYWORD(sle);
- KEYWORD(sge); KEYWORD(ult); KEYWORD(ugt); KEYWORD(ule); KEYWORD(uge);
- KEYWORD(oeq); KEYWORD(one); KEYWORD(olt); KEYWORD(ogt); KEYWORD(ole);
- KEYWORD(oge); KEYWORD(ord); KEYWORD(uno); KEYWORD(ueq); KEYWORD(une);
-
- KEYWORD(x);
- KEYWORD(blockaddress);
-#undef KEYWORD
-
- // Keywords for types.
-#define TYPEKEYWORD(STR, LLVMTY) \
- if (Len == strlen(STR) && !memcmp(StartChar, STR, strlen(STR))) { \
- TyVal = LLVMTY; return lltok::Type; }
- TYPEKEYWORD("void", Type::getVoidTy(Context));
- TYPEKEYWORD("float", Type::getFloatTy(Context));
- TYPEKEYWORD("double", Type::getDoubleTy(Context));
- TYPEKEYWORD("x86_fp80", Type::getX86_FP80Ty(Context));
- TYPEKEYWORD("fp128", Type::getFP128Ty(Context));
- TYPEKEYWORD("ppc_fp128", Type::getPPC_FP128Ty(Context));
- TYPEKEYWORD("label", Type::getLabelTy(Context));
- TYPEKEYWORD("metadata", Type::getMetadataTy(Context));
-#undef TYPEKEYWORD
-
- // Handle special forms for autoupgrading. Drop these in LLVM 3.0. This is
- // to avoid conflicting with the sext/zext instructions, below.
- if (Len == 4 && !memcmp(StartChar, "sext", 4)) {
- // Scan CurPtr ahead, seeing if there is just whitespace before the newline.
- if (JustWhitespaceNewLine(CurPtr))
- return lltok::kw_signext;
- } else if (Len == 4 && !memcmp(StartChar, "zext", 4)) {
- // Scan CurPtr ahead, seeing if there is just whitespace before the newline.
- if (JustWhitespaceNewLine(CurPtr))
- return lltok::kw_zeroext;
- } else if (Len == 6 && !memcmp(StartChar, "malloc", 6)) {
- // FIXME: Remove in LLVM 3.0.
- // Autoupgrade malloc instruction.
- return lltok::kw_malloc;
- } else if (Len == 4 && !memcmp(StartChar, "free", 4)) {
- // FIXME: Remove in LLVM 3.0.
- // Autoupgrade malloc instruction.
- return lltok::kw_free;
- }
-
- // Keywords for instructions.
-#define INSTKEYWORD(STR, Enum) \
- if (Len == strlen(#STR) && !memcmp(StartChar, #STR, strlen(#STR))) { \
- UIntVal = Instruction::Enum; return lltok::kw_##STR; }
-
- INSTKEYWORD(add, Add); INSTKEYWORD(fadd, FAdd);
- INSTKEYWORD(sub, Sub); INSTKEYWORD(fsub, FSub);
- INSTKEYWORD(mul, Mul); INSTKEYWORD(fmul, FMul);
- INSTKEYWORD(udiv, UDiv); INSTKEYWORD(sdiv, SDiv); INSTKEYWORD(fdiv, FDiv);
- INSTKEYWORD(urem, URem); INSTKEYWORD(srem, SRem); INSTKEYWORD(frem, FRem);
- INSTKEYWORD(shl, Shl); INSTKEYWORD(lshr, LShr); INSTKEYWORD(ashr, AShr);
- INSTKEYWORD(and, And); INSTKEYWORD(or, Or); INSTKEYWORD(xor, Xor);
- INSTKEYWORD(icmp, ICmp); INSTKEYWORD(fcmp, FCmp);
-
- INSTKEYWORD(phi, PHI);
- INSTKEYWORD(call, Call);
- INSTKEYWORD(trunc, Trunc);
- INSTKEYWORD(zext, ZExt);
- INSTKEYWORD(sext, SExt);
- INSTKEYWORD(fptrunc, FPTrunc);
- INSTKEYWORD(fpext, FPExt);
- INSTKEYWORD(uitofp, UIToFP);
- INSTKEYWORD(sitofp, SIToFP);
- INSTKEYWORD(fptoui, FPToUI);
- INSTKEYWORD(fptosi, FPToSI);
- INSTKEYWORD(inttoptr, IntToPtr);
- INSTKEYWORD(ptrtoint, PtrToInt);
- INSTKEYWORD(bitcast, BitCast);
- INSTKEYWORD(select, Select);
- INSTKEYWORD(va_arg, VAArg);
- INSTKEYWORD(ret, Ret);
- INSTKEYWORD(br, Br);
- INSTKEYWORD(switch, Switch);
- INSTKEYWORD(indirectbr, IndirectBr);
- INSTKEYWORD(invoke, Invoke);
- INSTKEYWORD(unwind, Unwind);
- INSTKEYWORD(unreachable, Unreachable);
-
- INSTKEYWORD(alloca, Alloca);
- INSTKEYWORD(load, Load);
- INSTKEYWORD(store, Store);
- INSTKEYWORD(getelementptr, GetElementPtr);
-
- INSTKEYWORD(extractelement, ExtractElement);
- INSTKEYWORD(insertelement, InsertElement);
- INSTKEYWORD(shufflevector, ShuffleVector);
- INSTKEYWORD(getresult, ExtractValue);
- INSTKEYWORD(extractvalue, ExtractValue);
- INSTKEYWORD(insertvalue, InsertValue);
-#undef INSTKEYWORD
-
- // Check for [us]0x[0-9A-Fa-f]+ which are Hexadecimal constant generated by
- // the CFE to avoid forcing it to deal with 64-bit numbers.
- if ((TokStart[0] == 'u' || TokStart[0] == 's') &&
- TokStart[1] == '0' && TokStart[2] == 'x' && isxdigit(TokStart[3])) {
- int len = CurPtr-TokStart-3;
- uint32_t bits = len * 4;
- APInt Tmp(bits, StringRef(TokStart+3, len), 16);
- uint32_t activeBits = Tmp.getActiveBits();
- if (activeBits > 0 && activeBits < bits)
- Tmp.trunc(activeBits);
- APSIntVal = APSInt(Tmp, TokStart[0] == 'u');
- return lltok::APSInt;
- }
-
- // If this is "cc1234", return this as just "cc".
- if (TokStart[0] == 'c' && TokStart[1] == 'c') {
- CurPtr = TokStart+2;
- return lltok::kw_cc;
- }
-
- // If this starts with "call", return it as CALL. This is to support old
- // broken .ll files. FIXME: remove this with LLVM 3.0.
- if (CurPtr-TokStart > 4 && !memcmp(TokStart, "call", 4)) {
- CurPtr = TokStart+4;
- UIntVal = Instruction::Call;
- return lltok::kw_call;
- }
-
- // Finally, if this isn't known, return an error.
- CurPtr = TokStart+1;
- return lltok::Error;
-}
-
-
-/// Lex0x: Handle productions that start with 0x, knowing that it matches and
-/// that this is not a label:
-/// HexFPConstant 0x[0-9A-Fa-f]+
-/// HexFP80Constant 0xK[0-9A-Fa-f]+
-/// HexFP128Constant 0xL[0-9A-Fa-f]+
-/// HexPPC128Constant 0xM[0-9A-Fa-f]+
-lltok::Kind LLLexer::Lex0x() {
- CurPtr = TokStart + 2;
-
- char Kind;
- if (CurPtr[0] >= 'K' && CurPtr[0] <= 'M') {
- Kind = *CurPtr++;
- } else {
- Kind = 'J';
- }
-
- if (!isxdigit(CurPtr[0])) {
- // Bad token, return it as an error.
- CurPtr = TokStart+1;
- return lltok::Error;
- }
-
- while (isxdigit(CurPtr[0]))
- ++CurPtr;
-
- if (Kind == 'J') {
- // HexFPConstant - Floating point constant represented in IEEE format as a
- // hexadecimal number for when exponential notation is not precise enough.
- // Float and double only.
- APFloatVal = APFloat(BitsToDouble(HexIntToVal(TokStart+2, CurPtr)));
- return lltok::APFloat;
- }
-
- uint64_t Pair[2];
- switch (Kind) {
- default: llvm_unreachable("Unknown kind!");
- case 'K':
- // F80HexFPConstant - x87 long double in hexadecimal format (10 bytes)
- FP80HexToIntPair(TokStart+3, CurPtr, Pair);
- APFloatVal = APFloat(APInt(80, 2, Pair));
- return lltok::APFloat;
- case 'L':
- // F128HexFPConstant - IEEE 128-bit in hexadecimal format (16 bytes)
- HexToIntPair(TokStart+3, CurPtr, Pair);
- APFloatVal = APFloat(APInt(128, 2, Pair), true);
- return lltok::APFloat;
- case 'M':
- // PPC128HexFPConstant - PowerPC 128-bit in hexadecimal format (16 bytes)
- HexToIntPair(TokStart+3, CurPtr, Pair);
- APFloatVal = APFloat(APInt(128, 2, Pair));
- return lltok::APFloat;
- }
-}
-
-/// LexIdentifier: Handle several related productions:
-/// Label [-a-zA-Z$._0-9]+:
-/// NInteger -[0-9]+
-/// FPConstant [-+]?[0-9]+[.][0-9]*([eE][-+]?[0-9]+)?
-/// PInteger [0-9]+
-/// HexFPConstant 0x[0-9A-Fa-f]+
-/// HexFP80Constant 0xK[0-9A-Fa-f]+
-/// HexFP128Constant 0xL[0-9A-Fa-f]+
-/// HexPPC128Constant 0xM[0-9A-Fa-f]+
-lltok::Kind LLLexer::LexDigitOrNegative() {
- // If the letter after the negative is a number, this is probably a label.
- if (!isdigit(TokStart[0]) && !isdigit(CurPtr[0])) {
- // Okay, this is not a number after the -, it's probably a label.
- if (const char *End = isLabelTail(CurPtr)) {
- StrVal.assign(TokStart, End-1);
- CurPtr = End;
- return lltok::LabelStr;
- }
-
- return lltok::Error;
- }
-
- // At this point, it is either a label, int or fp constant.
-
- // Skip digits, we have at least one.
- for (; isdigit(CurPtr[0]); ++CurPtr)
- /*empty*/;
-
- // Check to see if this really is a label afterall, e.g. "-1:".
- if (isLabelChar(CurPtr[0]) || CurPtr[0] == ':') {
- if (const char *End = isLabelTail(CurPtr)) {
- StrVal.assign(TokStart, End-1);
- CurPtr = End;
- return lltok::LabelStr;
- }
- }
-
- // If the next character is a '.', then it is a fp value, otherwise its
- // integer.
- if (CurPtr[0] != '.') {
- if (TokStart[0] == '0' && TokStart[1] == 'x')
- return Lex0x();
- unsigned Len = CurPtr-TokStart;
- uint32_t numBits = ((Len * 64) / 19) + 2;
- APInt Tmp(numBits, StringRef(TokStart, Len), 10);
- if (TokStart[0] == '-') {
- uint32_t minBits = Tmp.getMinSignedBits();
- if (minBits > 0 && minBits < numBits)
- Tmp.trunc(minBits);
- APSIntVal = APSInt(Tmp, false);
- } else {
- uint32_t activeBits = Tmp.getActiveBits();
- if (activeBits > 0 && activeBits < numBits)
- Tmp.trunc(activeBits);
- APSIntVal = APSInt(Tmp, true);
- }
- return lltok::APSInt;
- }
-
- ++CurPtr;
-
- // Skip over [0-9]*([eE][-+]?[0-9]+)?
- while (isdigit(CurPtr[0])) ++CurPtr;
-
- if (CurPtr[0] == 'e' || CurPtr[0] == 'E') {
- if (isdigit(CurPtr[1]) ||
- ((CurPtr[1] == '-' || CurPtr[1] == '+') && isdigit(CurPtr[2]))) {
- CurPtr += 2;
- while (isdigit(CurPtr[0])) ++CurPtr;
- }
- }
-
- APFloatVal = APFloat(atof(TokStart));
- return lltok::APFloat;
-}
-
-/// FPConstant [-+]?[0-9]+[.][0-9]*([eE][-+]?[0-9]+)?
-lltok::Kind LLLexer::LexPositive() {
- // If the letter after the negative is a number, this is probably not a
- // label.
- if (!isdigit(CurPtr[0]))
- return lltok::Error;
-
- // Skip digits.
- for (++CurPtr; isdigit(CurPtr[0]); ++CurPtr)
- /*empty*/;
-
- // At this point, we need a '.'.
- if (CurPtr[0] != '.') {
- CurPtr = TokStart+1;
- return lltok::Error;
- }
-
- ++CurPtr;
-
- // Skip over [0-9]*([eE][-+]?[0-9]+)?
- while (isdigit(CurPtr[0])) ++CurPtr;
-
- if (CurPtr[0] == 'e' || CurPtr[0] == 'E') {
- if (isdigit(CurPtr[1]) ||
- ((CurPtr[1] == '-' || CurPtr[1] == '+') && isdigit(CurPtr[2]))) {
- CurPtr += 2;
- while (isdigit(CurPtr[0])) ++CurPtr;
- }
- }
-
- APFloatVal = APFloat(atof(TokStart));
- return lltok::APFloat;
-}
diff --git a/libclamav/c++/llvm/lib/AsmParser/LLLexer.h b/libclamav/c++/llvm/lib/AsmParser/LLLexer.h
deleted file mode 100644
index 3057992..0000000
--- a/libclamav/c++/llvm/lib/AsmParser/LLLexer.h
+++ /dev/null
@@ -1,90 +0,0 @@
-//===- LLLexer.h - Lexer for LLVM Assembly Files ----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class represents the Lexer for .ll files.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LIB_ASMPARSER_LLLEXER_H
-#define LIB_ASMPARSER_LLLEXER_H
-
-#include "LLToken.h"
-#include "llvm/ADT/APSInt.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/Support/SourceMgr.h"
-#include <string>
-
-namespace llvm {
- class MemoryBuffer;
- class Type;
- class SMDiagnostic;
- class LLVMContext;
-
- class LLLexer {
- const char *CurPtr;
- MemoryBuffer *CurBuf;
- SMDiagnostic &ErrorInfo;
- SourceMgr &SM;
- LLVMContext &Context;
-
- // Information about the current token.
- const char *TokStart;
- lltok::Kind CurKind;
- std::string StrVal;
- unsigned UIntVal;
- const Type *TyVal;
- APFloat APFloatVal;
- APSInt APSIntVal;
-
- std::string TheError;
- public:
- explicit LLLexer(MemoryBuffer *StartBuf, SourceMgr &SM, SMDiagnostic &,
- LLVMContext &C);
- ~LLLexer() {}
-
- lltok::Kind Lex() {
- return CurKind = LexToken();
- }
-
- typedef SMLoc LocTy;
- LocTy getLoc() const { return SMLoc::getFromPointer(TokStart); }
- lltok::Kind getKind() const { return CurKind; }
- const std::string getStrVal() const { return StrVal; }
- const Type *getTyVal() const { return TyVal; }
- unsigned getUIntVal() const { return UIntVal; }
- const APSInt &getAPSIntVal() const { return APSIntVal; }
- const APFloat &getAPFloatVal() const { return APFloatVal; }
-
-
- bool Error(LocTy L, const std::string &Msg) const;
- bool Error(const std::string &Msg) const { return Error(getLoc(), Msg); }
- std::string getFilename() const;
-
- private:
- lltok::Kind LexToken();
-
- int getNextChar();
- void SkipLineComment();
- lltok::Kind LexIdentifier();
- lltok::Kind LexDigitOrNegative();
- lltok::Kind LexPositive();
- lltok::Kind LexAt();
- lltok::Kind LexExclaim();
- lltok::Kind LexPercent();
- lltok::Kind LexQuote();
- lltok::Kind Lex0x();
-
- uint64_t atoull(const char *Buffer, const char *End);
- uint64_t HexIntToVal(const char *Buffer, const char *End);
- void HexToIntPair(const char *Buffer, const char *End, uint64_t Pair[2]);
- void FP80HexToIntPair(const char *Buff, const char *End, uint64_t Pair[2]);
- };
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/AsmParser/LLParser.cpp b/libclamav/c++/llvm/lib/AsmParser/LLParser.cpp
deleted file mode 100644
index 8083a07..0000000
--- a/libclamav/c++/llvm/lib/AsmParser/LLParser.cpp
+++ /dev/null
@@ -1,3936 +0,0 @@
-//===-- LLParser.cpp - Parser Class ---------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the parser class for .ll files.
-//
-//===----------------------------------------------------------------------===//
-
-#include "LLParser.h"
-#include "llvm/AutoUpgrade.h"
-#include "llvm/CallingConv.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/InlineAsm.h"
-#include "llvm/Instructions.h"
-#include "llvm/Module.h"
-#include "llvm/Operator.h"
-#include "llvm/ValueSymbolTable.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-/// Run: module ::= toplevelentity*
-bool LLParser::Run() {
- // Prime the lexer.
- Lex.Lex();
-
- return ParseTopLevelEntities() ||
- ValidateEndOfModule();
-}
-
-/// ValidateEndOfModule - Do final validity and sanity checks at the end of the
-/// module.
-bool LLParser::ValidateEndOfModule() {
- // Update auto-upgraded malloc calls to "malloc".
- // FIXME: Remove in LLVM 3.0.
- if (MallocF) {
- MallocF->setName("malloc");
- // If setName() does not set the name to "malloc", then there is already a
- // declaration of "malloc". In that case, iterate over all calls to MallocF
- // and get them to call the declared "malloc" instead.
- if (MallocF->getName() != "malloc") {
- Constant *RealMallocF = M->getFunction("malloc");
- if (RealMallocF->getType() != MallocF->getType())
- RealMallocF = ConstantExpr::getBitCast(RealMallocF, MallocF->getType());
- MallocF->replaceAllUsesWith(RealMallocF);
- MallocF->eraseFromParent();
- MallocF = NULL;
- }
- }
-
-
- // If there are entries in ForwardRefBlockAddresses at this point, they are
- // references after the function was defined. Resolve those now.
- while (!ForwardRefBlockAddresses.empty()) {
- // Okay, we are referencing an already-parsed function, resolve them now.
- Function *TheFn = 0;
- const ValID &Fn = ForwardRefBlockAddresses.begin()->first;
- if (Fn.Kind == ValID::t_GlobalName)
- TheFn = M->getFunction(Fn.StrVal);
- else if (Fn.UIntVal < NumberedVals.size())
- TheFn = dyn_cast<Function>(NumberedVals[Fn.UIntVal]);
-
- if (TheFn == 0)
- return Error(Fn.Loc, "unknown function referenced by blockaddress");
-
- // Resolve all these references.
- if (ResolveForwardRefBlockAddresses(TheFn,
- ForwardRefBlockAddresses.begin()->second,
- 0))
- return true;
-
- ForwardRefBlockAddresses.erase(ForwardRefBlockAddresses.begin());
- }
-
-
- if (!ForwardRefTypes.empty())
- return Error(ForwardRefTypes.begin()->second.second,
- "use of undefined type named '" +
- ForwardRefTypes.begin()->first + "'");
- if (!ForwardRefTypeIDs.empty())
- return Error(ForwardRefTypeIDs.begin()->second.second,
- "use of undefined type '%" +
- utostr(ForwardRefTypeIDs.begin()->first) + "'");
-
- if (!ForwardRefVals.empty())
- return Error(ForwardRefVals.begin()->second.second,
- "use of undefined value '@" + ForwardRefVals.begin()->first +
- "'");
-
- if (!ForwardRefValIDs.empty())
- return Error(ForwardRefValIDs.begin()->second.second,
- "use of undefined value '@" +
- utostr(ForwardRefValIDs.begin()->first) + "'");
-
- if (!ForwardRefMDNodes.empty())
- return Error(ForwardRefMDNodes.begin()->second.second,
- "use of undefined metadata '!" +
- utostr(ForwardRefMDNodes.begin()->first) + "'");
-
-
- // Look for intrinsic functions and CallInst that need to be upgraded
- for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; )
- UpgradeCallsToIntrinsic(FI++); // must be post-increment, as we remove
-
- // Check debug info intrinsics.
- CheckDebugInfoIntrinsics(M);
- return false;
-}
-
-bool LLParser::ResolveForwardRefBlockAddresses(Function *TheFn,
- std::vector<std::pair<ValID, GlobalValue*> > &Refs,
- PerFunctionState *PFS) {
- // Loop over all the references, resolving them.
- for (unsigned i = 0, e = Refs.size(); i != e; ++i) {
- BasicBlock *Res;
- if (PFS) {
- if (Refs[i].first.Kind == ValID::t_LocalName)
- Res = PFS->GetBB(Refs[i].first.StrVal, Refs[i].first.Loc);
- else
- Res = PFS->GetBB(Refs[i].first.UIntVal, Refs[i].first.Loc);
- } else if (Refs[i].first.Kind == ValID::t_LocalID) {
- return Error(Refs[i].first.Loc,
- "cannot take address of numeric label after the function is defined");
- } else {
- Res = dyn_cast_or_null<BasicBlock>(
- TheFn->getValueSymbolTable().lookup(Refs[i].first.StrVal));
- }
-
- if (Res == 0)
- return Error(Refs[i].first.Loc,
- "referenced value is not a basic block");
-
- // Get the BlockAddress for this and update references to use it.
- BlockAddress *BA = BlockAddress::get(TheFn, Res);
- Refs[i].second->replaceAllUsesWith(BA);
- Refs[i].second->eraseFromParent();
- }
- return false;
-}
-
-
-//===----------------------------------------------------------------------===//
-// Top-Level Entities
-//===----------------------------------------------------------------------===//
-
-bool LLParser::ParseTopLevelEntities() {
- while (1) {
- switch (Lex.getKind()) {
- default: return TokError("expected top-level entity");
- case lltok::Eof: return false;
- //case lltok::kw_define:
- case lltok::kw_declare: if (ParseDeclare()) return true; break;
- case lltok::kw_define: if (ParseDefine()) return true; break;
- case lltok::kw_module: if (ParseModuleAsm()) return true; break;
- case lltok::kw_target: if (ParseTargetDefinition()) return true; break;
- case lltok::kw_deplibs: if (ParseDepLibs()) return true; break;
- case lltok::kw_type: if (ParseUnnamedType()) return true; break;
- case lltok::LocalVarID: if (ParseUnnamedType()) return true; break;
- case lltok::StringConstant: // FIXME: REMOVE IN LLVM 3.0
- case lltok::LocalVar: if (ParseNamedType()) return true; break;
- case lltok::GlobalID: if (ParseUnnamedGlobal()) return true; break;
- case lltok::GlobalVar: if (ParseNamedGlobal()) return true; break;
- case lltok::exclaim: if (ParseStandaloneMetadata()) return true; break;
- case lltok::MetadataVar: if (ParseNamedMetadata()) return true; break;
-
- // The Global variable production with no name can have many different
- // optional leading prefixes, the production is:
- // GlobalVar ::= OptionalLinkage OptionalVisibility OptionalThreadLocal
- // OptionalAddrSpace ('constant'|'global') ...
- case lltok::kw_private : // OptionalLinkage
- case lltok::kw_linker_private: // OptionalLinkage
- case lltok::kw_internal: // OptionalLinkage
- case lltok::kw_weak: // OptionalLinkage
- case lltok::kw_weak_odr: // OptionalLinkage
- case lltok::kw_linkonce: // OptionalLinkage
- case lltok::kw_linkonce_odr: // OptionalLinkage
- case lltok::kw_appending: // OptionalLinkage
- case lltok::kw_dllexport: // OptionalLinkage
- case lltok::kw_common: // OptionalLinkage
- case lltok::kw_dllimport: // OptionalLinkage
- case lltok::kw_extern_weak: // OptionalLinkage
- case lltok::kw_external: { // OptionalLinkage
- unsigned Linkage, Visibility;
- if (ParseOptionalLinkage(Linkage) ||
- ParseOptionalVisibility(Visibility) ||
- ParseGlobal("", SMLoc(), Linkage, true, Visibility))
- return true;
- break;
- }
- case lltok::kw_default: // OptionalVisibility
- case lltok::kw_hidden: // OptionalVisibility
- case lltok::kw_protected: { // OptionalVisibility
- unsigned Visibility;
- if (ParseOptionalVisibility(Visibility) ||
- ParseGlobal("", SMLoc(), 0, false, Visibility))
- return true;
- break;
- }
-
- case lltok::kw_thread_local: // OptionalThreadLocal
- case lltok::kw_addrspace: // OptionalAddrSpace
- case lltok::kw_constant: // GlobalType
- case lltok::kw_global: // GlobalType
- if (ParseGlobal("", SMLoc(), 0, false, 0)) return true;
- break;
- }
- }
-}
-
-
-/// toplevelentity
-/// ::= 'module' 'asm' STRINGCONSTANT
-bool LLParser::ParseModuleAsm() {
- assert(Lex.getKind() == lltok::kw_module);
- Lex.Lex();
-
- std::string AsmStr;
- if (ParseToken(lltok::kw_asm, "expected 'module asm'") ||
- ParseStringConstant(AsmStr)) return true;
-
- const std::string &AsmSoFar = M->getModuleInlineAsm();
- if (AsmSoFar.empty())
- M->setModuleInlineAsm(AsmStr);
- else
- M->setModuleInlineAsm(AsmSoFar+"\n"+AsmStr);
- return false;
-}
-
-/// toplevelentity
-/// ::= 'target' 'triple' '=' STRINGCONSTANT
-/// ::= 'target' 'datalayout' '=' STRINGCONSTANT
-bool LLParser::ParseTargetDefinition() {
- assert(Lex.getKind() == lltok::kw_target);
- std::string Str;
- switch (Lex.Lex()) {
- default: return TokError("unknown target property");
- case lltok::kw_triple:
- Lex.Lex();
- if (ParseToken(lltok::equal, "expected '=' after target triple") ||
- ParseStringConstant(Str))
- return true;
- M->setTargetTriple(Str);
- return false;
- case lltok::kw_datalayout:
- Lex.Lex();
- if (ParseToken(lltok::equal, "expected '=' after target datalayout") ||
- ParseStringConstant(Str))
- return true;
- M->setDataLayout(Str);
- return false;
- }
-}
-
-/// toplevelentity
-/// ::= 'deplibs' '=' '[' ']'
-/// ::= 'deplibs' '=' '[' STRINGCONSTANT (',' STRINGCONSTANT)* ']'
-bool LLParser::ParseDepLibs() {
- assert(Lex.getKind() == lltok::kw_deplibs);
- Lex.Lex();
- if (ParseToken(lltok::equal, "expected '=' after deplibs") ||
- ParseToken(lltok::lsquare, "expected '=' after deplibs"))
- return true;
-
- if (EatIfPresent(lltok::rsquare))
- return false;
-
- std::string Str;
- if (ParseStringConstant(Str)) return true;
- M->addLibrary(Str);
-
- while (EatIfPresent(lltok::comma)) {
- if (ParseStringConstant(Str)) return true;
- M->addLibrary(Str);
- }
-
- return ParseToken(lltok::rsquare, "expected ']' at end of list");
-}
-
-/// ParseUnnamedType:
-/// ::= 'type' type
-/// ::= LocalVarID '=' 'type' type
-bool LLParser::ParseUnnamedType() {
- unsigned TypeID = NumberedTypes.size();
-
- // Handle the LocalVarID form.
- if (Lex.getKind() == lltok::LocalVarID) {
- if (Lex.getUIntVal() != TypeID)
- return Error(Lex.getLoc(), "type expected to be numbered '%" +
- utostr(TypeID) + "'");
- Lex.Lex(); // eat LocalVarID;
-
- if (ParseToken(lltok::equal, "expected '=' after name"))
- return true;
- }
-
- assert(Lex.getKind() == lltok::kw_type);
- LocTy TypeLoc = Lex.getLoc();
- Lex.Lex(); // eat kw_type
-
- PATypeHolder Ty(Type::getVoidTy(Context));
- if (ParseType(Ty)) return true;
-
- // See if this type was previously referenced.
- std::map<unsigned, std::pair<PATypeHolder, LocTy> >::iterator
- FI = ForwardRefTypeIDs.find(TypeID);
- if (FI != ForwardRefTypeIDs.end()) {
- if (FI->second.first.get() == Ty)
- return Error(TypeLoc, "self referential type is invalid");
-
- cast<DerivedType>(FI->second.first.get())->refineAbstractTypeTo(Ty);
- Ty = FI->second.first.get();
- ForwardRefTypeIDs.erase(FI);
- }
-
- NumberedTypes.push_back(Ty);
-
- return false;
-}
-
-/// toplevelentity
-/// ::= LocalVar '=' 'type' type
-bool LLParser::ParseNamedType() {
- std::string Name = Lex.getStrVal();
- LocTy NameLoc = Lex.getLoc();
- Lex.Lex(); // eat LocalVar.
-
- PATypeHolder Ty(Type::getVoidTy(Context));
-
- if (ParseToken(lltok::equal, "expected '=' after name") ||
- ParseToken(lltok::kw_type, "expected 'type' after name") ||
- ParseType(Ty))
- return true;
-
- // Set the type name, checking for conflicts as we do so.
- bool AlreadyExists = M->addTypeName(Name, Ty);
- if (!AlreadyExists) return false;
-
- // See if this type is a forward reference. We need to eagerly resolve
- // types to allow recursive type redefinitions below.
- std::map<std::string, std::pair<PATypeHolder, LocTy> >::iterator
- FI = ForwardRefTypes.find(Name);
- if (FI != ForwardRefTypes.end()) {
- if (FI->second.first.get() == Ty)
- return Error(NameLoc, "self referential type is invalid");
-
- cast<DerivedType>(FI->second.first.get())->refineAbstractTypeTo(Ty);
- Ty = FI->second.first.get();
- ForwardRefTypes.erase(FI);
- }
-
- // Inserting a name that is already defined, get the existing name.
- const Type *Existing = M->getTypeByName(Name);
- assert(Existing && "Conflict but no matching type?!");
-
- // Otherwise, this is an attempt to redefine a type. That's okay if
- // the redefinition is identical to the original.
- // FIXME: REMOVE REDEFINITIONS IN LLVM 3.0
- if (Existing == Ty) return false;
-
- // Any other kind of (non-equivalent) redefinition is an error.
- return Error(NameLoc, "redefinition of type named '" + Name + "' of type '" +
- Ty->getDescription() + "'");
-}
-
-
-/// toplevelentity
-/// ::= 'declare' FunctionHeader
-bool LLParser::ParseDeclare() {
- assert(Lex.getKind() == lltok::kw_declare);
- Lex.Lex();
-
- Function *F;
- return ParseFunctionHeader(F, false);
-}
-
-/// toplevelentity
-/// ::= 'define' FunctionHeader '{' ...
-bool LLParser::ParseDefine() {
- assert(Lex.getKind() == lltok::kw_define);
- Lex.Lex();
-
- Function *F;
- return ParseFunctionHeader(F, true) ||
- ParseFunctionBody(*F);
-}
-
-/// ParseGlobalType
-/// ::= 'constant'
-/// ::= 'global'
-bool LLParser::ParseGlobalType(bool &IsConstant) {
- if (Lex.getKind() == lltok::kw_constant)
- IsConstant = true;
- else if (Lex.getKind() == lltok::kw_global)
- IsConstant = false;
- else {
- IsConstant = false;
- return TokError("expected 'global' or 'constant'");
- }
- Lex.Lex();
- return false;
-}
-
-/// ParseUnnamedGlobal:
-/// OptionalVisibility ALIAS ...
-/// OptionalLinkage OptionalVisibility ... -> global variable
-/// GlobalID '=' OptionalVisibility ALIAS ...
-/// GlobalID '=' OptionalLinkage OptionalVisibility ... -> global variable
-bool LLParser::ParseUnnamedGlobal() {
- unsigned VarID = NumberedVals.size();
- std::string Name;
- LocTy NameLoc = Lex.getLoc();
-
- // Handle the GlobalID form.
- if (Lex.getKind() == lltok::GlobalID) {
- if (Lex.getUIntVal() != VarID)
- return Error(Lex.getLoc(), "variable expected to be numbered '%" +
- utostr(VarID) + "'");
- Lex.Lex(); // eat GlobalID;
-
- if (ParseToken(lltok::equal, "expected '=' after name"))
- return true;
- }
-
- bool HasLinkage;
- unsigned Linkage, Visibility;
- if (ParseOptionalLinkage(Linkage, HasLinkage) ||
- ParseOptionalVisibility(Visibility))
- return true;
-
- if (HasLinkage || Lex.getKind() != lltok::kw_alias)
- return ParseGlobal(Name, NameLoc, Linkage, HasLinkage, Visibility);
- return ParseAlias(Name, NameLoc, Visibility);
-}
-
-/// ParseNamedGlobal:
-/// GlobalVar '=' OptionalVisibility ALIAS ...
-/// GlobalVar '=' OptionalLinkage OptionalVisibility ... -> global variable
-bool LLParser::ParseNamedGlobal() {
- assert(Lex.getKind() == lltok::GlobalVar);
- LocTy NameLoc = Lex.getLoc();
- std::string Name = Lex.getStrVal();
- Lex.Lex();
-
- bool HasLinkage;
- unsigned Linkage, Visibility;
- if (ParseToken(lltok::equal, "expected '=' in global variable") ||
- ParseOptionalLinkage(Linkage, HasLinkage) ||
- ParseOptionalVisibility(Visibility))
- return true;
-
- if (HasLinkage || Lex.getKind() != lltok::kw_alias)
- return ParseGlobal(Name, NameLoc, Linkage, HasLinkage, Visibility);
- return ParseAlias(Name, NameLoc, Visibility);
-}
-
-// MDString:
-// ::= '!' STRINGCONSTANT
-bool LLParser::ParseMDString(MDString *&Result) {
- std::string Str;
- if (ParseStringConstant(Str)) return true;
- Result = MDString::get(Context, Str);
- return false;
-}
-
-// MDNode:
-// ::= '!' MDNodeNumber
-bool LLParser::ParseMDNodeID(MDNode *&Result) {
- // !{ ..., !42, ... }
- unsigned MID = 0;
- if (ParseUInt32(MID)) return true;
-
- // Check existing MDNode.
- if (MID < NumberedMetadata.size() && NumberedMetadata[MID] != 0) {
- Result = NumberedMetadata[MID];
- return false;
- }
-
- // Create MDNode forward reference.
-
- // FIXME: This is not unique enough!
- std::string FwdRefName = "llvm.mdnode.fwdref." + utostr(MID);
- Value *V = MDString::get(Context, FwdRefName);
- MDNode *FwdNode = MDNode::get(Context, &V, 1);
- ForwardRefMDNodes[MID] = std::make_pair(FwdNode, Lex.getLoc());
-
- if (NumberedMetadata.size() <= MID)
- NumberedMetadata.resize(MID+1);
- NumberedMetadata[MID] = FwdNode;
- Result = FwdNode;
- return false;
-}
-
-/// ParseNamedMetadata:
-/// !foo = !{ !1, !2 }
-bool LLParser::ParseNamedMetadata() {
- assert(Lex.getKind() == lltok::MetadataVar);
- std::string Name = Lex.getStrVal();
- Lex.Lex();
-
- if (ParseToken(lltok::equal, "expected '=' here") ||
- ParseToken(lltok::exclaim, "Expected '!' here") ||
- ParseToken(lltok::lbrace, "Expected '{' here"))
- return true;
-
- SmallVector<MDNode *, 8> Elts;
- do {
- // Null is a special case since it is typeless.
- if (EatIfPresent(lltok::kw_null)) {
- Elts.push_back(0);
- continue;
- }
-
- if (ParseToken(lltok::exclaim, "Expected '!' here"))
- return true;
-
- MDNode *N = 0;
- if (ParseMDNodeID(N)) return true;
- Elts.push_back(N);
- } while (EatIfPresent(lltok::comma));
-
- if (ParseToken(lltok::rbrace, "expected end of metadata node"))
- return true;
-
- NamedMDNode::Create(Context, Name, Elts.data(), Elts.size(), M);
- return false;
-}
-
-/// ParseStandaloneMetadata:
-/// !42 = !{...}
-bool LLParser::ParseStandaloneMetadata() {
- assert(Lex.getKind() == lltok::exclaim);
- Lex.Lex();
- unsigned MetadataID = 0;
-
- LocTy TyLoc;
- PATypeHolder Ty(Type::getVoidTy(Context));
- SmallVector<Value *, 16> Elts;
- if (ParseUInt32(MetadataID) ||
- ParseToken(lltok::equal, "expected '=' here") ||
- ParseType(Ty, TyLoc) ||
- ParseToken(lltok::exclaim, "Expected '!' here") ||
- ParseToken(lltok::lbrace, "Expected '{' here") ||
- ParseMDNodeVector(Elts, NULL) ||
- ParseToken(lltok::rbrace, "expected end of metadata node"))
- return true;
-
- MDNode *Init = MDNode::get(Context, Elts.data(), Elts.size());
-
- // See if this was forward referenced, if so, handle it.
- std::map<unsigned, std::pair<TrackingVH<MDNode>, LocTy> >::iterator
- FI = ForwardRefMDNodes.find(MetadataID);
- if (FI != ForwardRefMDNodes.end()) {
- FI->second.first->replaceAllUsesWith(Init);
- ForwardRefMDNodes.erase(FI);
-
- assert(NumberedMetadata[MetadataID] == Init && "Tracking VH didn't work");
- } else {
- if (MetadataID >= NumberedMetadata.size())
- NumberedMetadata.resize(MetadataID+1);
-
- if (NumberedMetadata[MetadataID] != 0)
- return TokError("Metadata id is already used");
- NumberedMetadata[MetadataID] = Init;
- }
-
- return false;
-}
-
-/// ParseAlias:
-/// ::= GlobalVar '=' OptionalVisibility 'alias' OptionalLinkage Aliasee
-/// Aliasee
-/// ::= TypeAndValue
-/// ::= 'bitcast' '(' TypeAndValue 'to' Type ')'
-/// ::= 'getelementptr' 'inbounds'? '(' ... ')'
-///
-/// Everything through visibility has already been parsed.
-///
-bool LLParser::ParseAlias(const std::string &Name, LocTy NameLoc,
- unsigned Visibility) {
- assert(Lex.getKind() == lltok::kw_alias);
- Lex.Lex();
- unsigned Linkage;
- LocTy LinkageLoc = Lex.getLoc();
- if (ParseOptionalLinkage(Linkage))
- return true;
-
- if (Linkage != GlobalValue::ExternalLinkage &&
- Linkage != GlobalValue::WeakAnyLinkage &&
- Linkage != GlobalValue::WeakODRLinkage &&
- Linkage != GlobalValue::InternalLinkage &&
- Linkage != GlobalValue::PrivateLinkage &&
- Linkage != GlobalValue::LinkerPrivateLinkage)
- return Error(LinkageLoc, "invalid linkage type for alias");
-
- Constant *Aliasee;
- LocTy AliaseeLoc = Lex.getLoc();
- if (Lex.getKind() != lltok::kw_bitcast &&
- Lex.getKind() != lltok::kw_getelementptr) {
- if (ParseGlobalTypeAndValue(Aliasee)) return true;
- } else {
- // The bitcast dest type is not present, it is implied by the dest type.
- ValID ID;
- if (ParseValID(ID)) return true;
- if (ID.Kind != ValID::t_Constant)
- return Error(AliaseeLoc, "invalid aliasee");
- Aliasee = ID.ConstantVal;
- }
-
- if (!Aliasee->getType()->isPointerTy())
- return Error(AliaseeLoc, "alias must have pointer type");
-
- // Okay, create the alias but do not insert it into the module yet.
- GlobalAlias* GA = new GlobalAlias(Aliasee->getType(),
- (GlobalValue::LinkageTypes)Linkage, Name,
- Aliasee);
- GA->setVisibility((GlobalValue::VisibilityTypes)Visibility);
-
- // See if this value already exists in the symbol table. If so, it is either
- // a redefinition or a definition of a forward reference.
- if (GlobalValue *Val = M->getNamedValue(Name)) {
- // See if this was a redefinition. If so, there is no entry in
- // ForwardRefVals.
- std::map<std::string, std::pair<GlobalValue*, LocTy> >::iterator
- I = ForwardRefVals.find(Name);
- if (I == ForwardRefVals.end())
- return Error(NameLoc, "redefinition of global named '@" + Name + "'");
-
- // Otherwise, this was a definition of forward ref. Verify that types
- // agree.
- if (Val->getType() != GA->getType())
- return Error(NameLoc,
- "forward reference and definition of alias have different types");
-
- // If they agree, just RAUW the old value with the alias and remove the
- // forward ref info.
- Val->replaceAllUsesWith(GA);
- Val->eraseFromParent();
- ForwardRefVals.erase(I);
- }
-
- // Insert into the module, we know its name won't collide now.
- M->getAliasList().push_back(GA);
- assert(GA->getNameStr() == Name && "Should not be a name conflict!");
-
- return false;
-}
-
-/// ParseGlobal
-/// ::= GlobalVar '=' OptionalLinkage OptionalVisibility OptionalThreadLocal
-/// OptionalAddrSpace GlobalType Type Const
-/// ::= OptionalLinkage OptionalVisibility OptionalThreadLocal
-/// OptionalAddrSpace GlobalType Type Const
-///
-/// Everything through visibility has been parsed already.
-///
-bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
- unsigned Linkage, bool HasLinkage,
- unsigned Visibility) {
- unsigned AddrSpace;
- bool ThreadLocal, IsConstant;
- LocTy TyLoc;
-
- PATypeHolder Ty(Type::getVoidTy(Context));
- if (ParseOptionalToken(lltok::kw_thread_local, ThreadLocal) ||
- ParseOptionalAddrSpace(AddrSpace) ||
- ParseGlobalType(IsConstant) ||
- ParseType(Ty, TyLoc))
- return true;
-
- // If the linkage is specified and is external, then no initializer is
- // present.
- Constant *Init = 0;
- if (!HasLinkage || (Linkage != GlobalValue::DLLImportLinkage &&
- Linkage != GlobalValue::ExternalWeakLinkage &&
- Linkage != GlobalValue::ExternalLinkage)) {
- if (ParseGlobalValue(Ty, Init))
- return true;
- }
-
- if (Ty->isFunctionTy() || Ty->isLabelTy())
- return Error(TyLoc, "invalid type for global variable");
-
- GlobalVariable *GV = 0;
-
- // See if the global was forward referenced, if so, use the global.
- if (!Name.empty()) {
- if (GlobalValue *GVal = M->getNamedValue(Name)) {
- if (!ForwardRefVals.erase(Name) || !isa<GlobalValue>(GVal))
- return Error(NameLoc, "redefinition of global '@" + Name + "'");
- GV = cast<GlobalVariable>(GVal);
- }
- } else {
- std::map<unsigned, std::pair<GlobalValue*, LocTy> >::iterator
- I = ForwardRefValIDs.find(NumberedVals.size());
- if (I != ForwardRefValIDs.end()) {
- GV = cast<GlobalVariable>(I->second.first);
- ForwardRefValIDs.erase(I);
- }
- }
-
- if (GV == 0) {
- GV = new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage, 0,
- Name, 0, false, AddrSpace);
- } else {
- if (GV->getType()->getElementType() != Ty)
- return Error(TyLoc,
- "forward reference and definition of global have different types");
-
- // Move the forward-reference to the correct spot in the module.
- M->getGlobalList().splice(M->global_end(), M->getGlobalList(), GV);
- }
-
- if (Name.empty())
- NumberedVals.push_back(GV);
-
- // Set the parsed properties on the global.
- if (Init)
- GV->setInitializer(Init);
- GV->setConstant(IsConstant);
- GV->setLinkage((GlobalValue::LinkageTypes)Linkage);
- GV->setVisibility((GlobalValue::VisibilityTypes)Visibility);
- GV->setThreadLocal(ThreadLocal);
-
- // Parse attributes on the global.
- while (Lex.getKind() == lltok::comma) {
- Lex.Lex();
-
- if (Lex.getKind() == lltok::kw_section) {
- Lex.Lex();
- GV->setSection(Lex.getStrVal());
- if (ParseToken(lltok::StringConstant, "expected global section string"))
- return true;
- } else if (Lex.getKind() == lltok::kw_align) {
- unsigned Alignment;
- if (ParseOptionalAlignment(Alignment)) return true;
- GV->setAlignment(Alignment);
- } else {
- TokError("unknown global variable property!");
- }
- }
-
- return false;
-}
-
-
-//===----------------------------------------------------------------------===//
-// GlobalValue Reference/Resolution Routines.
-//===----------------------------------------------------------------------===//
-
-/// GetGlobalVal - Get a value with the specified name or ID, creating a
-/// forward reference record if needed. This can return null if the value
-/// exists but does not have the right type.
-GlobalValue *LLParser::GetGlobalVal(const std::string &Name, const Type *Ty,
- LocTy Loc) {
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
- if (PTy == 0) {
- Error(Loc, "global variable reference must have pointer type");
- return 0;
- }
-
- // Look this name up in the normal function symbol table.
- GlobalValue *Val =
- cast_or_null<GlobalValue>(M->getValueSymbolTable().lookup(Name));
-
- // If this is a forward reference for the value, see if we already created a
- // forward ref record.
- if (Val == 0) {
- std::map<std::string, std::pair<GlobalValue*, LocTy> >::iterator
- I = ForwardRefVals.find(Name);
- if (I != ForwardRefVals.end())
- Val = I->second.first;
- }
-
- // If we have the value in the symbol table or fwd-ref table, return it.
- if (Val) {
- if (Val->getType() == Ty) return Val;
- Error(Loc, "'@" + Name + "' defined with type '" +
- Val->getType()->getDescription() + "'");
- return 0;
- }
-
- // Otherwise, create a new forward reference for this value and remember it.
- GlobalValue *FwdVal;
- if (const FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType())) {
- // Function types can return opaque but functions can't.
- if (FT->getReturnType()->isOpaqueTy()) {
- Error(Loc, "function may not return opaque type");
- return 0;
- }
-
- FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, Name, M);
- } else {
- FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
- GlobalValue::ExternalWeakLinkage, 0, Name);
- }
-
- ForwardRefVals[Name] = std::make_pair(FwdVal, Loc);
- return FwdVal;
-}
-
-GlobalValue *LLParser::GetGlobalVal(unsigned ID, const Type *Ty, LocTy Loc) {
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
- if (PTy == 0) {
- Error(Loc, "global variable reference must have pointer type");
- return 0;
- }
-
- GlobalValue *Val = ID < NumberedVals.size() ? NumberedVals[ID] : 0;
-
- // If this is a forward reference for the value, see if we already created a
- // forward ref record.
- if (Val == 0) {
- std::map<unsigned, std::pair<GlobalValue*, LocTy> >::iterator
- I = ForwardRefValIDs.find(ID);
- if (I != ForwardRefValIDs.end())
- Val = I->second.first;
- }
-
- // If we have the value in the symbol table or fwd-ref table, return it.
- if (Val) {
- if (Val->getType() == Ty) return Val;
- Error(Loc, "'@" + utostr(ID) + "' defined with type '" +
- Val->getType()->getDescription() + "'");
- return 0;
- }
-
- // Otherwise, create a new forward reference for this value and remember it.
- GlobalValue *FwdVal;
- if (const FunctionType *FT = dyn_cast<FunctionType>(PTy->getElementType())) {
- // Function types can return opaque but functions can't.
- if (FT->getReturnType()->isOpaqueTy()) {
- Error(Loc, "function may not return opaque type");
- return 0;
- }
- FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, "", M);
- } else {
- FwdVal = new GlobalVariable(*M, PTy->getElementType(), false,
- GlobalValue::ExternalWeakLinkage, 0, "");
- }
-
- ForwardRefValIDs[ID] = std::make_pair(FwdVal, Loc);
- return FwdVal;
-}
-
-
-//===----------------------------------------------------------------------===//
-// Helper Routines.
-//===----------------------------------------------------------------------===//
-
-/// ParseToken - If the current token has the specified kind, eat it and return
-/// success. Otherwise, emit the specified error and return failure.
-bool LLParser::ParseToken(lltok::Kind T, const char *ErrMsg) {
- if (Lex.getKind() != T)
- return TokError(ErrMsg);
- Lex.Lex();
- return false;
-}
-
-/// ParseStringConstant
-/// ::= StringConstant
-bool LLParser::ParseStringConstant(std::string &Result) {
- if (Lex.getKind() != lltok::StringConstant)
- return TokError("expected string constant");
- Result = Lex.getStrVal();
- Lex.Lex();
- return false;
-}
-
-/// ParseUInt32
-/// ::= uint32
-bool LLParser::ParseUInt32(unsigned &Val) {
- if (Lex.getKind() != lltok::APSInt || Lex.getAPSIntVal().isSigned())
- return TokError("expected integer");
- uint64_t Val64 = Lex.getAPSIntVal().getLimitedValue(0xFFFFFFFFULL+1);
- if (Val64 != unsigned(Val64))
- return TokError("expected 32-bit integer (too large)");
- Val = Val64;
- Lex.Lex();
- return false;
-}
-
-
-/// ParseOptionalAddrSpace
-/// := /*empty*/
-/// := 'addrspace' '(' uint32 ')'
-bool LLParser::ParseOptionalAddrSpace(unsigned &AddrSpace) {
- AddrSpace = 0;
- if (!EatIfPresent(lltok::kw_addrspace))
- return false;
- return ParseToken(lltok::lparen, "expected '(' in address space") ||
- ParseUInt32(AddrSpace) ||
- ParseToken(lltok::rparen, "expected ')' in address space");
-}
-
-/// ParseOptionalAttrs - Parse a potentially empty attribute list. AttrKind
-/// indicates what kind of attribute list this is: 0: function arg, 1: result,
-/// 2: function attr.
-/// 3: function arg after value: FIXME: REMOVE IN LLVM 3.0
-bool LLParser::ParseOptionalAttrs(unsigned &Attrs, unsigned AttrKind) {
- Attrs = Attribute::None;
- LocTy AttrLoc = Lex.getLoc();
-
- while (1) {
- switch (Lex.getKind()) {
- case lltok::kw_sext:
- case lltok::kw_zext:
- // Treat these as signext/zeroext if they occur in the argument list after
- // the value, as in "call i8 @foo(i8 10 sext)". If they occur before the
- // value, as in "call i8 @foo(i8 sext (" then it is part of a constant
- // expr.
- // FIXME: REMOVE THIS IN LLVM 3.0
- if (AttrKind == 3) {
- if (Lex.getKind() == lltok::kw_sext)
- Attrs |= Attribute::SExt;
- else
- Attrs |= Attribute::ZExt;
- break;
- }
- // FALL THROUGH.
- default: // End of attributes.
- if (AttrKind != 2 && (Attrs & Attribute::FunctionOnly))
- return Error(AttrLoc, "invalid use of function-only attribute");
-
- if (AttrKind != 0 && AttrKind != 3 && (Attrs & Attribute::ParameterOnly))
- return Error(AttrLoc, "invalid use of parameter-only attribute");
-
- return false;
- case lltok::kw_zeroext: Attrs |= Attribute::ZExt; break;
- case lltok::kw_signext: Attrs |= Attribute::SExt; break;
- case lltok::kw_inreg: Attrs |= Attribute::InReg; break;
- case lltok::kw_sret: Attrs |= Attribute::StructRet; break;
- case lltok::kw_noalias: Attrs |= Attribute::NoAlias; break;
- case lltok::kw_nocapture: Attrs |= Attribute::NoCapture; break;
- case lltok::kw_byval: Attrs |= Attribute::ByVal; break;
- case lltok::kw_nest: Attrs |= Attribute::Nest; break;
-
- case lltok::kw_noreturn: Attrs |= Attribute::NoReturn; break;
- case lltok::kw_nounwind: Attrs |= Attribute::NoUnwind; break;
- case lltok::kw_noinline: Attrs |= Attribute::NoInline; break;
- case lltok::kw_readnone: Attrs |= Attribute::ReadNone; break;
- case lltok::kw_readonly: Attrs |= Attribute::ReadOnly; break;
- case lltok::kw_inlinehint: Attrs |= Attribute::InlineHint; break;
- case lltok::kw_alwaysinline: Attrs |= Attribute::AlwaysInline; break;
- case lltok::kw_optsize: Attrs |= Attribute::OptimizeForSize; break;
- case lltok::kw_ssp: Attrs |= Attribute::StackProtect; break;
- case lltok::kw_sspreq: Attrs |= Attribute::StackProtectReq; break;
- case lltok::kw_noredzone: Attrs |= Attribute::NoRedZone; break;
- case lltok::kw_noimplicitfloat: Attrs |= Attribute::NoImplicitFloat; break;
- case lltok::kw_naked: Attrs |= Attribute::Naked; break;
-
- case lltok::kw_alignstack: {
- unsigned Alignment;
- if (ParseOptionalStackAlignment(Alignment))
- return true;
- Attrs |= Attribute::constructStackAlignmentFromInt(Alignment);
- continue;
- }
-
- case lltok::kw_align: {
- unsigned Alignment;
- if (ParseOptionalAlignment(Alignment))
- return true;
- Attrs |= Attribute::constructAlignmentFromInt(Alignment);
- continue;
- }
-
- }
- Lex.Lex();
- }
-}
-
-/// ParseOptionalLinkage
-/// ::= /*empty*/
-/// ::= 'private'
-/// ::= 'linker_private'
-/// ::= 'internal'
-/// ::= 'weak'
-/// ::= 'weak_odr'
-/// ::= 'linkonce'
-/// ::= 'linkonce_odr'
-/// ::= 'appending'
-/// ::= 'dllexport'
-/// ::= 'common'
-/// ::= 'dllimport'
-/// ::= 'extern_weak'
-/// ::= 'external'
-bool LLParser::ParseOptionalLinkage(unsigned &Res, bool &HasLinkage) {
- HasLinkage = false;
- switch (Lex.getKind()) {
- default: Res=GlobalValue::ExternalLinkage; return false;
- case lltok::kw_private: Res = GlobalValue::PrivateLinkage; break;
- case lltok::kw_linker_private: Res = GlobalValue::LinkerPrivateLinkage; break;
- case lltok::kw_internal: Res = GlobalValue::InternalLinkage; break;
- case lltok::kw_weak: Res = GlobalValue::WeakAnyLinkage; break;
- case lltok::kw_weak_odr: Res = GlobalValue::WeakODRLinkage; break;
- case lltok::kw_linkonce: Res = GlobalValue::LinkOnceAnyLinkage; break;
- case lltok::kw_linkonce_odr: Res = GlobalValue::LinkOnceODRLinkage; break;
- case lltok::kw_available_externally:
- Res = GlobalValue::AvailableExternallyLinkage;
- break;
- case lltok::kw_appending: Res = GlobalValue::AppendingLinkage; break;
- case lltok::kw_dllexport: Res = GlobalValue::DLLExportLinkage; break;
- case lltok::kw_common: Res = GlobalValue::CommonLinkage; break;
- case lltok::kw_dllimport: Res = GlobalValue::DLLImportLinkage; break;
- case lltok::kw_extern_weak: Res = GlobalValue::ExternalWeakLinkage; break;
- case lltok::kw_external: Res = GlobalValue::ExternalLinkage; break;
- }
- Lex.Lex();
- HasLinkage = true;
- return false;
-}
-
-/// ParseOptionalVisibility
-/// ::= /*empty*/
-/// ::= 'default'
-/// ::= 'hidden'
-/// ::= 'protected'
-///
-bool LLParser::ParseOptionalVisibility(unsigned &Res) {
- switch (Lex.getKind()) {
- default: Res = GlobalValue::DefaultVisibility; return false;
- case lltok::kw_default: Res = GlobalValue::DefaultVisibility; break;
- case lltok::kw_hidden: Res = GlobalValue::HiddenVisibility; break;
- case lltok::kw_protected: Res = GlobalValue::ProtectedVisibility; break;
- }
- Lex.Lex();
- return false;
-}
-
-/// ParseOptionalCallingConv
-/// ::= /*empty*/
-/// ::= 'ccc'
-/// ::= 'fastcc'
-/// ::= 'coldcc'
-/// ::= 'x86_stdcallcc'
-/// ::= 'x86_fastcallcc'
-/// ::= 'arm_apcscc'
-/// ::= 'arm_aapcscc'
-/// ::= 'arm_aapcs_vfpcc'
-/// ::= 'msp430_intrcc'
-/// ::= 'cc' UINT
-///
-bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) {
- switch (Lex.getKind()) {
- default: CC = CallingConv::C; return false;
- case lltok::kw_ccc: CC = CallingConv::C; break;
- case lltok::kw_fastcc: CC = CallingConv::Fast; break;
- case lltok::kw_coldcc: CC = CallingConv::Cold; break;
- case lltok::kw_x86_stdcallcc: CC = CallingConv::X86_StdCall; break;
- case lltok::kw_x86_fastcallcc: CC = CallingConv::X86_FastCall; break;
- case lltok::kw_arm_apcscc: CC = CallingConv::ARM_APCS; break;
- case lltok::kw_arm_aapcscc: CC = CallingConv::ARM_AAPCS; break;
- case lltok::kw_arm_aapcs_vfpcc:CC = CallingConv::ARM_AAPCS_VFP; break;
- case lltok::kw_msp430_intrcc: CC = CallingConv::MSP430_INTR; break;
- case lltok::kw_cc: {
- unsigned ArbitraryCC;
- Lex.Lex();
- if (ParseUInt32(ArbitraryCC)) {
- return true;
- } else
- CC = static_cast<CallingConv::ID>(ArbitraryCC);
- return false;
- }
- break;
- }
-
- Lex.Lex();
- return false;
-}
-
-/// ParseInstructionMetadata
-/// ::= !dbg !42 (',' !dbg !57)*
-bool LLParser::
-ParseInstructionMetadata(SmallVectorImpl<std::pair<unsigned,
- MDNode *> > &Result){
- do {
- if (Lex.getKind() != lltok::MetadataVar)
- return TokError("expected metadata after comma");
-
- std::string Name = Lex.getStrVal();
- Lex.Lex();
-
- MDNode *Node;
- if (ParseToken(lltok::exclaim, "expected '!' here") ||
- ParseMDNodeID(Node))
- return true;
-
- unsigned MDK = M->getMDKindID(Name.c_str());
- Result.push_back(std::make_pair(MDK, Node));
-
- // If this is the end of the list, we're done.
- } while (EatIfPresent(lltok::comma));
- return false;
-}
-
-/// ParseOptionalAlignment
-/// ::= /* empty */
-/// ::= 'align' 4
-bool LLParser::ParseOptionalAlignment(unsigned &Alignment) {
- Alignment = 0;
- if (!EatIfPresent(lltok::kw_align))
- return false;
- LocTy AlignLoc = Lex.getLoc();
- if (ParseUInt32(Alignment)) return true;
- if (!isPowerOf2_32(Alignment))
- return Error(AlignLoc, "alignment is not a power of two");
- return false;
-}
-
-/// ParseOptionalCommaAlign
-/// ::=
-/// ::= ',' align 4
-///
-/// This returns with AteExtraComma set to true if it ate an excess comma at the
-/// end.
-bool LLParser::ParseOptionalCommaAlign(unsigned &Alignment,
- bool &AteExtraComma) {
- AteExtraComma = false;
- while (EatIfPresent(lltok::comma)) {
- // Metadata at the end is an early exit.
- if (Lex.getKind() == lltok::MetadataVar) {
- AteExtraComma = true;
- return false;
- }
-
- if (Lex.getKind() == lltok::kw_align) {
- if (ParseOptionalAlignment(Alignment)) return true;
- } else
- return true;
- }
-
- return false;
-}
-
-/// ParseOptionalStackAlignment
-/// ::= /* empty */
-/// ::= 'alignstack' '(' 4 ')'
-bool LLParser::ParseOptionalStackAlignment(unsigned &Alignment) {
- Alignment = 0;
- if (!EatIfPresent(lltok::kw_alignstack))
- return false;
- LocTy ParenLoc = Lex.getLoc();
- if (!EatIfPresent(lltok::lparen))
- return Error(ParenLoc, "expected '('");
- LocTy AlignLoc = Lex.getLoc();
- if (ParseUInt32(Alignment)) return true;
- ParenLoc = Lex.getLoc();
- if (!EatIfPresent(lltok::rparen))
- return Error(ParenLoc, "expected ')'");
- if (!isPowerOf2_32(Alignment))
- return Error(AlignLoc, "stack alignment is not a power of two");
- return false;
-}
-
-/// ParseIndexList - This parses the index list for an insert/extractvalue
-/// instruction. This sets AteExtraComma in the case where we eat an extra
-/// comma at the end of the line and find that it is followed by metadata.
-/// Clients that don't allow metadata can call the version of this function that
-/// only takes one argument.
-///
-/// ParseIndexList
-/// ::= (',' uint32)+
-///
-bool LLParser::ParseIndexList(SmallVectorImpl<unsigned> &Indices,
- bool &AteExtraComma) {
- AteExtraComma = false;
-
- if (Lex.getKind() != lltok::comma)
- return TokError("expected ',' as start of index list");
-
- while (EatIfPresent(lltok::comma)) {
- if (Lex.getKind() == lltok::MetadataVar) {
- AteExtraComma = true;
- return false;
- }
- unsigned Idx;
- if (ParseUInt32(Idx)) return true;
- Indices.push_back(Idx);
- }
-
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Type Parsing.
-//===----------------------------------------------------------------------===//
-
-/// ParseType - Parse and resolve a full type.
-bool LLParser::ParseType(PATypeHolder &Result, bool AllowVoid) {
- LocTy TypeLoc = Lex.getLoc();
- if (ParseTypeRec(Result)) return true;
-
- // Verify no unresolved uprefs.
- if (!UpRefs.empty())
- return Error(UpRefs.back().Loc, "invalid unresolved type up reference");
-
- if (!AllowVoid && Result.get()->isVoidTy())
- return Error(TypeLoc, "void type only allowed for function results");
-
- return false;
-}
-
-/// HandleUpRefs - Every time we finish a new layer of types, this function is
-/// called. It loops through the UpRefs vector, which is a list of the
-/// currently active types. For each type, if the up-reference is contained in
-/// the newly completed type, we decrement the level count. When the level
-/// count reaches zero, the up-referenced type is the type that is passed in:
-/// thus we can complete the cycle.
-///
-PATypeHolder LLParser::HandleUpRefs(const Type *ty) {
- // If Ty isn't abstract, or if there are no up-references in it, then there is
- // nothing to resolve here.
- if (!ty->isAbstract() || UpRefs.empty()) return ty;
-
- PATypeHolder Ty(ty);
-#if 0
- dbgs() << "Type '" << Ty->getDescription()
- << "' newly formed. Resolving upreferences.\n"
- << UpRefs.size() << " upreferences active!\n";
-#endif
-
- // If we find any resolvable upreferences (i.e., those whose NestingLevel goes
- // to zero), we resolve them all together before we resolve them to Ty. At
- // the end of the loop, if there is anything to resolve to Ty, it will be in
- // this variable.
- OpaqueType *TypeToResolve = 0;
-
- for (unsigned i = 0; i != UpRefs.size(); ++i) {
- // Determine if 'Ty' directly contains this up-references 'LastContainedTy'.
- bool ContainsType =
- std::find(Ty->subtype_begin(), Ty->subtype_end(),
- UpRefs[i].LastContainedTy) != Ty->subtype_end();
-
-#if 0
- dbgs() << " UR#" << i << " - TypeContains(" << Ty->getDescription() << ", "
- << UpRefs[i].LastContainedTy->getDescription() << ") = "
- << (ContainsType ? "true" : "false")
- << " level=" << UpRefs[i].NestingLevel << "\n";
-#endif
- if (!ContainsType)
- continue;
-
- // Decrement level of upreference
- unsigned Level = --UpRefs[i].NestingLevel;
- UpRefs[i].LastContainedTy = Ty;
-
- // If the Up-reference has a non-zero level, it shouldn't be resolved yet.
- if (Level != 0)
- continue;
-
-#if 0
- dbgs() << " * Resolving upreference for " << UpRefs[i].UpRefTy << "\n";
-#endif
- if (!TypeToResolve)
- TypeToResolve = UpRefs[i].UpRefTy;
- else
- UpRefs[i].UpRefTy->refineAbstractTypeTo(TypeToResolve);
- UpRefs.erase(UpRefs.begin()+i); // Remove from upreference list.
- --i; // Do not skip the next element.
- }
-
- if (TypeToResolve)
- TypeToResolve->refineAbstractTypeTo(Ty);
-
- return Ty;
-}
-
-
-/// ParseTypeRec - The recursive function used to process the internal
-/// implementation details of types.
-bool LLParser::ParseTypeRec(PATypeHolder &Result) {
- switch (Lex.getKind()) {
- default:
- return TokError("expected type");
- case lltok::Type:
- // TypeRec ::= 'float' | 'void' (etc)
- Result = Lex.getTyVal();
- Lex.Lex();
- break;
- case lltok::kw_opaque:
- // TypeRec ::= 'opaque'
- Result = OpaqueType::get(Context);
- Lex.Lex();
- break;
- case lltok::lbrace:
- // TypeRec ::= '{' ... '}'
- if (ParseStructType(Result, false))
- return true;
- break;
- case lltok::kw_union:
- // TypeRec ::= 'union' '{' ... '}'
- if (ParseUnionType(Result))
- return true;
- break;
- case lltok::lsquare:
- // TypeRec ::= '[' ... ']'
- Lex.Lex(); // eat the lsquare.
- if (ParseArrayVectorType(Result, false))
- return true;
- break;
- case lltok::less: // Either vector or packed struct.
- // TypeRec ::= '<' ... '>'
- Lex.Lex();
- if (Lex.getKind() == lltok::lbrace) {
- if (ParseStructType(Result, true) ||
- ParseToken(lltok::greater, "expected '>' at end of packed struct"))
- return true;
- } else if (ParseArrayVectorType(Result, true))
- return true;
- break;
- case lltok::LocalVar:
- case lltok::StringConstant: // FIXME: REMOVE IN LLVM 3.0
- // TypeRec ::= %foo
- if (const Type *T = M->getTypeByName(Lex.getStrVal())) {
- Result = T;
- } else {
- Result = OpaqueType::get(Context);
- ForwardRefTypes.insert(std::make_pair(Lex.getStrVal(),
- std::make_pair(Result,
- Lex.getLoc())));
- M->addTypeName(Lex.getStrVal(), Result.get());
- }
- Lex.Lex();
- break;
-
- case lltok::LocalVarID:
- // TypeRec ::= %4
- if (Lex.getUIntVal() < NumberedTypes.size())
- Result = NumberedTypes[Lex.getUIntVal()];
- else {
- std::map<unsigned, std::pair<PATypeHolder, LocTy> >::iterator
- I = ForwardRefTypeIDs.find(Lex.getUIntVal());
- if (I != ForwardRefTypeIDs.end())
- Result = I->second.first;
- else {
- Result = OpaqueType::get(Context);
- ForwardRefTypeIDs.insert(std::make_pair(Lex.getUIntVal(),
- std::make_pair(Result,
- Lex.getLoc())));
- }
- }
- Lex.Lex();
- break;
- case lltok::backslash: {
- // TypeRec ::= '\' 4
- Lex.Lex();
- unsigned Val;
- if (ParseUInt32(Val)) return true;
- OpaqueType *OT = OpaqueType::get(Context); //Use temporary placeholder.
- UpRefs.push_back(UpRefRecord(Lex.getLoc(), Val, OT));
- Result = OT;
- break;
- }
- }
-
- // Parse the type suffixes.
- while (1) {
- switch (Lex.getKind()) {
- // End of type.
- default: return false;
-
- // TypeRec ::= TypeRec '*'
- case lltok::star:
- if (Result.get()->isLabelTy())
- return TokError("basic block pointers are invalid");
- if (Result.get()->isVoidTy())
- return TokError("pointers to void are invalid; use i8* instead");
- if (!PointerType::isValidElementType(Result.get()))
- return TokError("pointer to this type is invalid");
- Result = HandleUpRefs(PointerType::getUnqual(Result.get()));
- Lex.Lex();
- break;
-
- // TypeRec ::= TypeRec 'addrspace' '(' uint32 ')' '*'
- case lltok::kw_addrspace: {
- if (Result.get()->isLabelTy())
- return TokError("basic block pointers are invalid");
- if (Result.get()->isVoidTy())
- return TokError("pointers to void are invalid; use i8* instead");
- if (!PointerType::isValidElementType(Result.get()))
- return TokError("pointer to this type is invalid");
- unsigned AddrSpace;
- if (ParseOptionalAddrSpace(AddrSpace) ||
- ParseToken(lltok::star, "expected '*' in address space"))
- return true;
-
- Result = HandleUpRefs(PointerType::get(Result.get(), AddrSpace));
- break;
- }
-
- /// Types '(' ArgTypeListI ')' OptFuncAttrs
- case lltok::lparen:
- if (ParseFunctionType(Result))
- return true;
- break;
- }
- }
-}
-
-/// ParseParameterList
-/// ::= '(' ')'
-/// ::= '(' Arg (',' Arg)* ')'
-/// Arg
-/// ::= Type OptionalAttributes Value OptionalAttributes
-bool LLParser::ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
- PerFunctionState &PFS) {
- if (ParseToken(lltok::lparen, "expected '(' in call"))
- return true;
-
- while (Lex.getKind() != lltok::rparen) {
- // If this isn't the first argument, we need a comma.
- if (!ArgList.empty() &&
- ParseToken(lltok::comma, "expected ',' in argument list"))
- return true;
-
- // Parse the argument.
- LocTy ArgLoc;
- PATypeHolder ArgTy(Type::getVoidTy(Context));
- unsigned ArgAttrs1 = Attribute::None;
- unsigned ArgAttrs2 = Attribute::None;
- Value *V;
- if (ParseType(ArgTy, ArgLoc))
- return true;
-
- // Otherwise, handle normal operands.
- if (ParseOptionalAttrs(ArgAttrs1, 0) ||
- ParseValue(ArgTy, V, PFS) ||
- // FIXME: Should not allow attributes after the argument, remove this
- // in LLVM 3.0.
- ParseOptionalAttrs(ArgAttrs2, 3))
- return true;
- ArgList.push_back(ParamInfo(ArgLoc, V, ArgAttrs1|ArgAttrs2));
- }
-
- Lex.Lex(); // Lex the ')'.
- return false;
-}
-
-
-
-/// ParseArgumentList - Parse the argument list for a function type or function
-/// prototype. If 'inType' is true then we are parsing a FunctionType.
-/// ::= '(' ArgTypeListI ')'
-/// ArgTypeListI
-/// ::= /*empty*/
-/// ::= '...'
-/// ::= ArgTypeList ',' '...'
-/// ::= ArgType (',' ArgType)*
-///
-bool LLParser::ParseArgumentList(std::vector<ArgInfo> &ArgList,
- bool &isVarArg, bool inType) {
- isVarArg = false;
- assert(Lex.getKind() == lltok::lparen);
- Lex.Lex(); // eat the (.
-
- if (Lex.getKind() == lltok::rparen) {
- // empty
- } else if (Lex.getKind() == lltok::dotdotdot) {
- isVarArg = true;
- Lex.Lex();
- } else {
- LocTy TypeLoc = Lex.getLoc();
- PATypeHolder ArgTy(Type::getVoidTy(Context));
- unsigned Attrs;
- std::string Name;
-
- // If we're parsing a type, use ParseTypeRec, because we allow recursive
- // types (such as a function returning a pointer to itself). If parsing a
- // function prototype, we require fully resolved types.
- if ((inType ? ParseTypeRec(ArgTy) : ParseType(ArgTy)) ||
- ParseOptionalAttrs(Attrs, 0)) return true;
-
- if (ArgTy->isVoidTy())
- return Error(TypeLoc, "argument can not have void type");
-
- if (Lex.getKind() == lltok::LocalVar ||
- Lex.getKind() == lltok::StringConstant) { // FIXME: REMOVE IN LLVM 3.0
- Name = Lex.getStrVal();
- Lex.Lex();
- }
-
- if (!FunctionType::isValidArgumentType(ArgTy))
- return Error(TypeLoc, "invalid type for function argument");
-
- ArgList.push_back(ArgInfo(TypeLoc, ArgTy, Attrs, Name));
-
- while (EatIfPresent(lltok::comma)) {
- // Handle ... at end of arg list.
- if (EatIfPresent(lltok::dotdotdot)) {
- isVarArg = true;
- break;
- }
-
- // Otherwise must be an argument type.
- TypeLoc = Lex.getLoc();
- if ((inType ? ParseTypeRec(ArgTy) : ParseType(ArgTy)) ||
- ParseOptionalAttrs(Attrs, 0)) return true;
-
- if (ArgTy->isVoidTy())
- return Error(TypeLoc, "argument can not have void type");
-
- if (Lex.getKind() == lltok::LocalVar ||
- Lex.getKind() == lltok::StringConstant) { // FIXME: REMOVE IN LLVM 3.0
- Name = Lex.getStrVal();
- Lex.Lex();
- } else {
- Name = "";
- }
-
- if (!ArgTy->isFirstClassType() && !ArgTy->isOpaqueTy())
- return Error(TypeLoc, "invalid type for function argument");
-
- ArgList.push_back(ArgInfo(TypeLoc, ArgTy, Attrs, Name));
- }
- }
-
- return ParseToken(lltok::rparen, "expected ')' at end of argument list");
-}
-
-/// ParseFunctionType
-/// ::= Type ArgumentList OptionalAttrs
-bool LLParser::ParseFunctionType(PATypeHolder &Result) {
- assert(Lex.getKind() == lltok::lparen);
-
- if (!FunctionType::isValidReturnType(Result))
- return TokError("invalid function return type");
-
- std::vector<ArgInfo> ArgList;
- bool isVarArg;
- unsigned Attrs;
- if (ParseArgumentList(ArgList, isVarArg, true) ||
- // FIXME: Allow, but ignore attributes on function types!
- // FIXME: Remove in LLVM 3.0
- ParseOptionalAttrs(Attrs, 2))
- return true;
-
- // Reject names on the arguments lists.
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- if (!ArgList[i].Name.empty())
- return Error(ArgList[i].Loc, "argument name invalid in function type");
- if (!ArgList[i].Attrs != 0) {
- // Allow but ignore attributes on function types; this permits
- // auto-upgrade.
- // FIXME: REJECT ATTRIBUTES ON FUNCTION TYPES in LLVM 3.0
- }
- }
-
- std::vector<const Type*> ArgListTy;
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
- ArgListTy.push_back(ArgList[i].Type);
-
- Result = HandleUpRefs(FunctionType::get(Result.get(),
- ArgListTy, isVarArg));
- return false;
-}
-
-/// ParseStructType: Handles packed and unpacked types. </> parsed elsewhere.
-/// TypeRec
-/// ::= '{' '}'
-/// ::= '{' TypeRec (',' TypeRec)* '}'
-/// ::= '<' '{' '}' '>'
-/// ::= '<' '{' TypeRec (',' TypeRec)* '}' '>'
-bool LLParser::ParseStructType(PATypeHolder &Result, bool Packed) {
- assert(Lex.getKind() == lltok::lbrace);
- Lex.Lex(); // Consume the '{'
-
- if (EatIfPresent(lltok::rbrace)) {
- Result = StructType::get(Context, Packed);
- return false;
- }
-
- std::vector<PATypeHolder> ParamsList;
- LocTy EltTyLoc = Lex.getLoc();
- if (ParseTypeRec(Result)) return true;
- ParamsList.push_back(Result);
-
- if (Result->isVoidTy())
- return Error(EltTyLoc, "struct element can not have void type");
- if (!StructType::isValidElementType(Result))
- return Error(EltTyLoc, "invalid element type for struct");
-
- while (EatIfPresent(lltok::comma)) {
- EltTyLoc = Lex.getLoc();
- if (ParseTypeRec(Result)) return true;
-
- if (Result->isVoidTy())
- return Error(EltTyLoc, "struct element can not have void type");
- if (!StructType::isValidElementType(Result))
- return Error(EltTyLoc, "invalid element type for struct");
-
- ParamsList.push_back(Result);
- }
-
- if (ParseToken(lltok::rbrace, "expected '}' at end of struct"))
- return true;
-
- std::vector<const Type*> ParamsListTy;
- for (unsigned i = 0, e = ParamsList.size(); i != e; ++i)
- ParamsListTy.push_back(ParamsList[i].get());
- Result = HandleUpRefs(StructType::get(Context, ParamsListTy, Packed));
- return false;
-}
-
-/// ParseUnionType
-/// TypeRec
-/// ::= 'union' '{' TypeRec (',' TypeRec)* '}'
-bool LLParser::ParseUnionType(PATypeHolder &Result) {
- assert(Lex.getKind() == lltok::kw_union);
- Lex.Lex(); // Consume the 'union'
-
- if (ParseToken(lltok::lbrace, "'{' expected after 'union'")) return true;
-
- SmallVector<PATypeHolder, 8> ParamsList;
- do {
- LocTy EltTyLoc = Lex.getLoc();
- if (ParseTypeRec(Result)) return true;
- ParamsList.push_back(Result);
-
- if (Result->isVoidTy())
- return Error(EltTyLoc, "union element can not have void type");
- if (!UnionType::isValidElementType(Result))
- return Error(EltTyLoc, "invalid element type for union");
-
- } while (EatIfPresent(lltok::comma)) ;
-
- if (ParseToken(lltok::rbrace, "expected '}' at end of union"))
- return true;
-
- SmallVector<const Type*, 8> ParamsListTy;
- for (unsigned i = 0, e = ParamsList.size(); i != e; ++i)
- ParamsListTy.push_back(ParamsList[i].get());
- Result = HandleUpRefs(UnionType::get(&ParamsListTy[0], ParamsListTy.size()));
- return false;
-}
-
-/// ParseArrayVectorType - Parse an array or vector type, assuming the first
-/// token has already been consumed.
-/// TypeRec
-/// ::= '[' APSINTVAL 'x' Types ']'
-/// ::= '<' APSINTVAL 'x' Types '>'
-bool LLParser::ParseArrayVectorType(PATypeHolder &Result, bool isVector) {
- if (Lex.getKind() != lltok::APSInt || Lex.getAPSIntVal().isSigned() ||
- Lex.getAPSIntVal().getBitWidth() > 64)
- return TokError("expected number in address space");
-
- LocTy SizeLoc = Lex.getLoc();
- uint64_t Size = Lex.getAPSIntVal().getZExtValue();
- Lex.Lex();
-
- if (ParseToken(lltok::kw_x, "expected 'x' after element count"))
- return true;
-
- LocTy TypeLoc = Lex.getLoc();
- PATypeHolder EltTy(Type::getVoidTy(Context));
- if (ParseTypeRec(EltTy)) return true;
-
- if (EltTy->isVoidTy())
- return Error(TypeLoc, "array and vector element type cannot be void");
-
- if (ParseToken(isVector ? lltok::greater : lltok::rsquare,
- "expected end of sequential type"))
- return true;
-
- if (isVector) {
- if (Size == 0)
- return Error(SizeLoc, "zero element vector is illegal");
- if ((unsigned)Size != Size)
- return Error(SizeLoc, "size too large for vector");
- if (!VectorType::isValidElementType(EltTy))
- return Error(TypeLoc, "vector element type must be fp or integer");
- Result = VectorType::get(EltTy, unsigned(Size));
- } else {
- if (!ArrayType::isValidElementType(EltTy))
- return Error(TypeLoc, "invalid array element type");
- Result = HandleUpRefs(ArrayType::get(EltTy, Size));
- }
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Function Semantic Analysis.
-//===----------------------------------------------------------------------===//
-
-LLParser::PerFunctionState::PerFunctionState(LLParser &p, Function &f,
- int functionNumber)
- : P(p), F(f), FunctionNumber(functionNumber) {
-
- // Insert unnamed arguments into the NumberedVals list.
- for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
- AI != E; ++AI)
- if (!AI->hasName())
- NumberedVals.push_back(AI);
-}
-
-LLParser::PerFunctionState::~PerFunctionState() {
- // If there were any forward referenced non-basicblock values, delete them.
- for (std::map<std::string, std::pair<Value*, LocTy> >::iterator
- I = ForwardRefVals.begin(), E = ForwardRefVals.end(); I != E; ++I)
- if (!isa<BasicBlock>(I->second.first)) {
- I->second.first->replaceAllUsesWith(
- UndefValue::get(I->second.first->getType()));
- delete I->second.first;
- I->second.first = 0;
- }
-
- for (std::map<unsigned, std::pair<Value*, LocTy> >::iterator
- I = ForwardRefValIDs.begin(), E = ForwardRefValIDs.end(); I != E; ++I)
- if (!isa<BasicBlock>(I->second.first)) {
- I->second.first->replaceAllUsesWith(
- UndefValue::get(I->second.first->getType()));
- delete I->second.first;
- I->second.first = 0;
- }
-}
-
-bool LLParser::PerFunctionState::FinishFunction() {
- // Check to see if someone took the address of labels in this block.
- if (!P.ForwardRefBlockAddresses.empty()) {
- ValID FunctionID;
- if (!F.getName().empty()) {
- FunctionID.Kind = ValID::t_GlobalName;
- FunctionID.StrVal = F.getName();
- } else {
- FunctionID.Kind = ValID::t_GlobalID;
- FunctionID.UIntVal = FunctionNumber;
- }
-
- std::map<ValID, std::vector<std::pair<ValID, GlobalValue*> > >::iterator
- FRBAI = P.ForwardRefBlockAddresses.find(FunctionID);
- if (FRBAI != P.ForwardRefBlockAddresses.end()) {
- // Resolve all these references.
- if (P.ResolveForwardRefBlockAddresses(&F, FRBAI->second, this))
- return true;
-
- P.ForwardRefBlockAddresses.erase(FRBAI);
- }
- }
-
- if (!ForwardRefVals.empty())
- return P.Error(ForwardRefVals.begin()->second.second,
- "use of undefined value '%" + ForwardRefVals.begin()->first +
- "'");
- if (!ForwardRefValIDs.empty())
- return P.Error(ForwardRefValIDs.begin()->second.second,
- "use of undefined value '%" +
- utostr(ForwardRefValIDs.begin()->first) + "'");
- return false;
-}
-
-
-/// GetVal - Get a value with the specified name or ID, creating a
-/// forward reference record if needed. This can return null if the value
-/// exists but does not have the right type.
-Value *LLParser::PerFunctionState::GetVal(const std::string &Name,
- const Type *Ty, LocTy Loc) {
- // Look this name up in the normal function symbol table.
- Value *Val = F.getValueSymbolTable().lookup(Name);
-
- // If this is a forward reference for the value, see if we already created a
- // forward ref record.
- if (Val == 0) {
- std::map<std::string, std::pair<Value*, LocTy> >::iterator
- I = ForwardRefVals.find(Name);
- if (I != ForwardRefVals.end())
- Val = I->second.first;
- }
-
- // If we have the value in the symbol table or fwd-ref table, return it.
- if (Val) {
- if (Val->getType() == Ty) return Val;
- if (Ty->isLabelTy())
- P.Error(Loc, "'%" + Name + "' is not a basic block");
- else
- P.Error(Loc, "'%" + Name + "' defined with type '" +
- Val->getType()->getDescription() + "'");
- return 0;
- }
-
- // Don't make placeholders with invalid type.
- if (!Ty->isFirstClassType() && !Ty->isOpaqueTy() && !Ty->isLabelTy()) {
- P.Error(Loc, "invalid use of a non-first-class type");
- return 0;
- }
-
- // Otherwise, create a new forward reference for this value and remember it.
- Value *FwdVal;
- if (Ty->isLabelTy())
- FwdVal = BasicBlock::Create(F.getContext(), Name, &F);
- else
- FwdVal = new Argument(Ty, Name);
-
- ForwardRefVals[Name] = std::make_pair(FwdVal, Loc);
- return FwdVal;
-}
-
-Value *LLParser::PerFunctionState::GetVal(unsigned ID, const Type *Ty,
- LocTy Loc) {
- // Look this name up in the normal function symbol table.
- Value *Val = ID < NumberedVals.size() ? NumberedVals[ID] : 0;
-
- // If this is a forward reference for the value, see if we already created a
- // forward ref record.
- if (Val == 0) {
- std::map<unsigned, std::pair<Value*, LocTy> >::iterator
- I = ForwardRefValIDs.find(ID);
- if (I != ForwardRefValIDs.end())
- Val = I->second.first;
- }
-
- // If we have the value in the symbol table or fwd-ref table, return it.
- if (Val) {
- if (Val->getType() == Ty) return Val;
- if (Ty->isLabelTy())
- P.Error(Loc, "'%" + utostr(ID) + "' is not a basic block");
- else
- P.Error(Loc, "'%" + utostr(ID) + "' defined with type '" +
- Val->getType()->getDescription() + "'");
- return 0;
- }
-
- if (!Ty->isFirstClassType() && !Ty->isOpaqueTy() && !Ty->isLabelTy()) {
- P.Error(Loc, "invalid use of a non-first-class type");
- return 0;
- }
-
- // Otherwise, create a new forward reference for this value and remember it.
- Value *FwdVal;
- if (Ty->isLabelTy())
- FwdVal = BasicBlock::Create(F.getContext(), "", &F);
- else
- FwdVal = new Argument(Ty);
-
- ForwardRefValIDs[ID] = std::make_pair(FwdVal, Loc);
- return FwdVal;
-}
-
-/// SetInstName - After an instruction is parsed and inserted into its
-/// basic block, this installs its name.
-bool LLParser::PerFunctionState::SetInstName(int NameID,
- const std::string &NameStr,
- LocTy NameLoc, Instruction *Inst) {
- // If this instruction has void type, it cannot have a name or ID specified.
- if (Inst->getType()->isVoidTy()) {
- if (NameID != -1 || !NameStr.empty())
- return P.Error(NameLoc, "instructions returning void cannot have a name");
- return false;
- }
-
- // If this was a numbered instruction, verify that the instruction is the
- // expected value and resolve any forward references.
- if (NameStr.empty()) {
- // If neither a name nor an ID was specified, just use the next ID.
- if (NameID == -1)
- NameID = NumberedVals.size();
-
- if (unsigned(NameID) != NumberedVals.size())
- return P.Error(NameLoc, "instruction expected to be numbered '%" +
- utostr(NumberedVals.size()) + "'");
-
- std::map<unsigned, std::pair<Value*, LocTy> >::iterator FI =
- ForwardRefValIDs.find(NameID);
- if (FI != ForwardRefValIDs.end()) {
- if (FI->second.first->getType() != Inst->getType())
- return P.Error(NameLoc, "instruction forward referenced with type '" +
- FI->second.first->getType()->getDescription() + "'");
- FI->second.first->replaceAllUsesWith(Inst);
- delete FI->second.first;
- ForwardRefValIDs.erase(FI);
- }
-
- NumberedVals.push_back(Inst);
- return false;
- }
-
- // Otherwise, the instruction had a name. Resolve forward refs and set it.
- std::map<std::string, std::pair<Value*, LocTy> >::iterator
- FI = ForwardRefVals.find(NameStr);
- if (FI != ForwardRefVals.end()) {
- if (FI->second.first->getType() != Inst->getType())
- return P.Error(NameLoc, "instruction forward referenced with type '" +
- FI->second.first->getType()->getDescription() + "'");
- FI->second.first->replaceAllUsesWith(Inst);
- delete FI->second.first;
- ForwardRefVals.erase(FI);
- }
-
- // Set the name on the instruction.
- Inst->setName(NameStr);
-
- if (Inst->getNameStr() != NameStr)
- return P.Error(NameLoc, "multiple definition of local value named '" +
- NameStr + "'");
- return false;
-}
-
-/// GetBB - Get a basic block with the specified name or ID, creating a
-/// forward reference record if needed.
-BasicBlock *LLParser::PerFunctionState::GetBB(const std::string &Name,
- LocTy Loc) {
- return cast_or_null<BasicBlock>(GetVal(Name,
- Type::getLabelTy(F.getContext()), Loc));
-}
-
-BasicBlock *LLParser::PerFunctionState::GetBB(unsigned ID, LocTy Loc) {
- return cast_or_null<BasicBlock>(GetVal(ID,
- Type::getLabelTy(F.getContext()), Loc));
-}
-
-/// DefineBB - Define the specified basic block, which is either named or
-/// unnamed. If there is an error, this returns null otherwise it returns
-/// the block being defined.
-BasicBlock *LLParser::PerFunctionState::DefineBB(const std::string &Name,
- LocTy Loc) {
- BasicBlock *BB;
- if (Name.empty())
- BB = GetBB(NumberedVals.size(), Loc);
- else
- BB = GetBB(Name, Loc);
- if (BB == 0) return 0; // Already diagnosed error.
-
- // Move the block to the end of the function. Forward ref'd blocks are
- // inserted wherever they happen to be referenced.
- F.getBasicBlockList().splice(F.end(), F.getBasicBlockList(), BB);
-
- // Remove the block from forward ref sets.
- if (Name.empty()) {
- ForwardRefValIDs.erase(NumberedVals.size());
- NumberedVals.push_back(BB);
- } else {
- // BB forward references are already in the function symbol table.
- ForwardRefVals.erase(Name);
- }
-
- return BB;
-}
-
-//===----------------------------------------------------------------------===//
-// Constants.
-//===----------------------------------------------------------------------===//
-
-/// ParseValID - Parse an abstract value that doesn't necessarily have a
-/// type implied. For example, if we parse "4" we don't know what integer type
-/// it has. The value will later be combined with its type and checked for
-/// sanity. PFS is used to convert function-local operands of metadata (since
-/// metadata operands are not just parsed here but also converted to values).
-/// PFS can be null when we are not parsing metadata values inside a function.
-bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
- ID.Loc = Lex.getLoc();
- switch (Lex.getKind()) {
- default: return TokError("expected value token");
- case lltok::GlobalID: // @42
- ID.UIntVal = Lex.getUIntVal();
- ID.Kind = ValID::t_GlobalID;
- break;
- case lltok::GlobalVar: // @foo
- ID.StrVal = Lex.getStrVal();
- ID.Kind = ValID::t_GlobalName;
- break;
- case lltok::LocalVarID: // %42
- ID.UIntVal = Lex.getUIntVal();
- ID.Kind = ValID::t_LocalID;
- break;
- case lltok::LocalVar: // %foo
- case lltok::StringConstant: // "foo" - FIXME: REMOVE IN LLVM 3.0
- ID.StrVal = Lex.getStrVal();
- ID.Kind = ValID::t_LocalName;
- break;
- case lltok::exclaim: // !{...} MDNode, !"foo" MDString
- Lex.Lex();
-
- if (EatIfPresent(lltok::lbrace)) {
- SmallVector<Value*, 16> Elts;
- if (ParseMDNodeVector(Elts, PFS) ||
- ParseToken(lltok::rbrace, "expected end of metadata node"))
- return true;
-
- ID.MDNodeVal = MDNode::get(Context, Elts.data(), Elts.size());
- ID.Kind = ValID::t_MDNode;
- return false;
- }
-
- // Standalone metadata reference
- // !{ ..., !42, ... }
- if (Lex.getKind() == lltok::APSInt) {
- if (ParseMDNodeID(ID.MDNodeVal)) return true;
- ID.Kind = ValID::t_MDNode;
- return false;
- }
-
- // MDString:
- // ::= '!' STRINGCONSTANT
- if (ParseMDString(ID.MDStringVal)) return true;
- ID.Kind = ValID::t_MDString;
- return false;
- case lltok::APSInt:
- ID.APSIntVal = Lex.getAPSIntVal();
- ID.Kind = ValID::t_APSInt;
- break;
- case lltok::APFloat:
- ID.APFloatVal = Lex.getAPFloatVal();
- ID.Kind = ValID::t_APFloat;
- break;
- case lltok::kw_true:
- ID.ConstantVal = ConstantInt::getTrue(Context);
- ID.Kind = ValID::t_Constant;
- break;
- case lltok::kw_false:
- ID.ConstantVal = ConstantInt::getFalse(Context);
- ID.Kind = ValID::t_Constant;
- break;
- case lltok::kw_null: ID.Kind = ValID::t_Null; break;
- case lltok::kw_undef: ID.Kind = ValID::t_Undef; break;
- case lltok::kw_zeroinitializer: ID.Kind = ValID::t_Zero; break;
-
- case lltok::lbrace: {
- // ValID ::= '{' ConstVector '}'
- Lex.Lex();
- SmallVector<Constant*, 16> Elts;
- if (ParseGlobalValueVector(Elts) ||
- ParseToken(lltok::rbrace, "expected end of struct constant"))
- return true;
-
- ID.ConstantVal = ConstantStruct::get(Context, Elts.data(),
- Elts.size(), false);
- ID.Kind = ValID::t_Constant;
- return false;
- }
- case lltok::less: {
- // ValID ::= '<' ConstVector '>' --> Vector.
- // ValID ::= '<' '{' ConstVector '}' '>' --> Packed Struct.
- Lex.Lex();
- bool isPackedStruct = EatIfPresent(lltok::lbrace);
-
- SmallVector<Constant*, 16> Elts;
- LocTy FirstEltLoc = Lex.getLoc();
- if (ParseGlobalValueVector(Elts) ||
- (isPackedStruct &&
- ParseToken(lltok::rbrace, "expected end of packed struct")) ||
- ParseToken(lltok::greater, "expected end of constant"))
- return true;
-
- if (isPackedStruct) {
- ID.ConstantVal =
- ConstantStruct::get(Context, Elts.data(), Elts.size(), true);
- ID.Kind = ValID::t_Constant;
- return false;
- }
-
- if (Elts.empty())
- return Error(ID.Loc, "constant vector must not be empty");
-
- if (!Elts[0]->getType()->isIntegerTy() &&
- !Elts[0]->getType()->isFloatingPointTy())
- return Error(FirstEltLoc,
- "vector elements must have integer or floating point type");
-
- // Verify that all the vector elements have the same type.
- for (unsigned i = 1, e = Elts.size(); i != e; ++i)
- if (Elts[i]->getType() != Elts[0]->getType())
- return Error(FirstEltLoc,
- "vector element #" + utostr(i) +
- " is not of type '" + Elts[0]->getType()->getDescription());
-
- ID.ConstantVal = ConstantVector::get(Elts.data(), Elts.size());
- ID.Kind = ValID::t_Constant;
- return false;
- }
- case lltok::lsquare: { // Array Constant
- Lex.Lex();
- SmallVector<Constant*, 16> Elts;
- LocTy FirstEltLoc = Lex.getLoc();
- if (ParseGlobalValueVector(Elts) ||
- ParseToken(lltok::rsquare, "expected end of array constant"))
- return true;
-
- // Handle empty element.
- if (Elts.empty()) {
- // Use undef instead of an array because it's inconvenient to determine
- // the element type at this point, there being no elements to examine.
- ID.Kind = ValID::t_EmptyArray;
- return false;
- }
-
- if (!Elts[0]->getType()->isFirstClassType())
- return Error(FirstEltLoc, "invalid array element type: " +
- Elts[0]->getType()->getDescription());
-
- ArrayType *ATy = ArrayType::get(Elts[0]->getType(), Elts.size());
-
- // Verify all elements are correct type!
- for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
- if (Elts[i]->getType() != Elts[0]->getType())
- return Error(FirstEltLoc,
- "array element #" + utostr(i) +
- " is not of type '" +Elts[0]->getType()->getDescription());
- }
-
- ID.ConstantVal = ConstantArray::get(ATy, Elts.data(), Elts.size());
- ID.Kind = ValID::t_Constant;
- return false;
- }
- case lltok::kw_c: // c "foo"
- Lex.Lex();
- ID.ConstantVal = ConstantArray::get(Context, Lex.getStrVal(), false);
- if (ParseToken(lltok::StringConstant, "expected string")) return true;
- ID.Kind = ValID::t_Constant;
- return false;
-
- case lltok::kw_asm: {
- // ValID ::= 'asm' SideEffect? AlignStack? STRINGCONSTANT ',' STRINGCONSTANT
- bool HasSideEffect, AlignStack;
- Lex.Lex();
- if (ParseOptionalToken(lltok::kw_sideeffect, HasSideEffect) ||
- ParseOptionalToken(lltok::kw_alignstack, AlignStack) ||
- ParseStringConstant(ID.StrVal) ||
- ParseToken(lltok::comma, "expected comma in inline asm expression") ||
- ParseToken(lltok::StringConstant, "expected constraint string"))
- return true;
- ID.StrVal2 = Lex.getStrVal();
- ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack)<<1);
- ID.Kind = ValID::t_InlineAsm;
- return false;
- }
-
- case lltok::kw_blockaddress: {
- // ValID ::= 'blockaddress' '(' @foo ',' %bar ')'
- Lex.Lex();
-
- ValID Fn, Label;
- LocTy FnLoc, LabelLoc;
-
- if (ParseToken(lltok::lparen, "expected '(' in block address expression") ||
- ParseValID(Fn) ||
- ParseToken(lltok::comma, "expected comma in block address expression")||
- ParseValID(Label) ||
- ParseToken(lltok::rparen, "expected ')' in block address expression"))
- return true;
-
- if (Fn.Kind != ValID::t_GlobalID && Fn.Kind != ValID::t_GlobalName)
- return Error(Fn.Loc, "expected function name in blockaddress");
- if (Label.Kind != ValID::t_LocalID && Label.Kind != ValID::t_LocalName)
- return Error(Label.Loc, "expected basic block name in blockaddress");
-
- // Make a global variable as a placeholder for this reference.
- GlobalVariable *FwdRef = new GlobalVariable(*M, Type::getInt8Ty(Context),
- false, GlobalValue::InternalLinkage,
- 0, "");
- ForwardRefBlockAddresses[Fn].push_back(std::make_pair(Label, FwdRef));
- ID.ConstantVal = FwdRef;
- ID.Kind = ValID::t_Constant;
- return false;
- }
-
- case lltok::kw_trunc:
- case lltok::kw_zext:
- case lltok::kw_sext:
- case lltok::kw_fptrunc:
- case lltok::kw_fpext:
- case lltok::kw_bitcast:
- case lltok::kw_uitofp:
- case lltok::kw_sitofp:
- case lltok::kw_fptoui:
- case lltok::kw_fptosi:
- case lltok::kw_inttoptr:
- case lltok::kw_ptrtoint: {
- unsigned Opc = Lex.getUIntVal();
- PATypeHolder DestTy(Type::getVoidTy(Context));
- Constant *SrcVal;
- Lex.Lex();
- if (ParseToken(lltok::lparen, "expected '(' after constantexpr cast") ||
- ParseGlobalTypeAndValue(SrcVal) ||
- ParseToken(lltok::kw_to, "expected 'to' in constantexpr cast") ||
- ParseType(DestTy) ||
- ParseToken(lltok::rparen, "expected ')' at end of constantexpr cast"))
- return true;
- if (!CastInst::castIsValid((Instruction::CastOps)Opc, SrcVal, DestTy))
- return Error(ID.Loc, "invalid cast opcode for cast from '" +
- SrcVal->getType()->getDescription() + "' to '" +
- DestTy->getDescription() + "'");
- ID.ConstantVal = ConstantExpr::getCast((Instruction::CastOps)Opc,
- SrcVal, DestTy);
- ID.Kind = ValID::t_Constant;
- return false;
- }
- case lltok::kw_extractvalue: {
- Lex.Lex();
- Constant *Val;
- SmallVector<unsigned, 4> Indices;
- if (ParseToken(lltok::lparen, "expected '(' in extractvalue constantexpr")||
- ParseGlobalTypeAndValue(Val) ||
- ParseIndexList(Indices) ||
- ParseToken(lltok::rparen, "expected ')' in extractvalue constantexpr"))
- return true;
-
- if (!Val->getType()->isAggregateType())
- return Error(ID.Loc, "extractvalue operand must be aggregate type");
- if (!ExtractValueInst::getIndexedType(Val->getType(), Indices.begin(),
- Indices.end()))
- return Error(ID.Loc, "invalid indices for extractvalue");
- ID.ConstantVal =
- ConstantExpr::getExtractValue(Val, Indices.data(), Indices.size());
- ID.Kind = ValID::t_Constant;
- return false;
- }
- case lltok::kw_insertvalue: {
- Lex.Lex();
- Constant *Val0, *Val1;
- SmallVector<unsigned, 4> Indices;
- if (ParseToken(lltok::lparen, "expected '(' in insertvalue constantexpr")||
- ParseGlobalTypeAndValue(Val0) ||
- ParseToken(lltok::comma, "expected comma in insertvalue constantexpr")||
- ParseGlobalTypeAndValue(Val1) ||
- ParseIndexList(Indices) ||
- ParseToken(lltok::rparen, "expected ')' in insertvalue constantexpr"))
- return true;
- if (!Val0->getType()->isAggregateType())
- return Error(ID.Loc, "insertvalue operand must be aggregate type");
- if (!ExtractValueInst::getIndexedType(Val0->getType(), Indices.begin(),
- Indices.end()))
- return Error(ID.Loc, "invalid indices for insertvalue");
- ID.ConstantVal = ConstantExpr::getInsertValue(Val0, Val1,
- Indices.data(), Indices.size());
- ID.Kind = ValID::t_Constant;
- return false;
- }
- case lltok::kw_icmp:
- case lltok::kw_fcmp: {
- unsigned PredVal, Opc = Lex.getUIntVal();
- Constant *Val0, *Val1;
- Lex.Lex();
- if (ParseCmpPredicate(PredVal, Opc) ||
- ParseToken(lltok::lparen, "expected '(' in compare constantexpr") ||
- ParseGlobalTypeAndValue(Val0) ||
- ParseToken(lltok::comma, "expected comma in compare constantexpr") ||
- ParseGlobalTypeAndValue(Val1) ||
- ParseToken(lltok::rparen, "expected ')' in compare constantexpr"))
- return true;
-
- if (Val0->getType() != Val1->getType())
- return Error(ID.Loc, "compare operands must have the same type");
-
- CmpInst::Predicate Pred = (CmpInst::Predicate)PredVal;
-
- if (Opc == Instruction::FCmp) {
- if (!Val0->getType()->isFPOrFPVectorTy())
- return Error(ID.Loc, "fcmp requires floating point operands");
- ID.ConstantVal = ConstantExpr::getFCmp(Pred, Val0, Val1);
- } else {
- assert(Opc == Instruction::ICmp && "Unexpected opcode for CmpInst!");
- if (!Val0->getType()->isIntOrIntVectorTy() &&
- !Val0->getType()->isPointerTy())
- return Error(ID.Loc, "icmp requires pointer or integer operands");
- ID.ConstantVal = ConstantExpr::getICmp(Pred, Val0, Val1);
- }
- ID.Kind = ValID::t_Constant;
- return false;
- }
-
- // Binary Operators.
- case lltok::kw_add:
- case lltok::kw_fadd:
- case lltok::kw_sub:
- case lltok::kw_fsub:
- case lltok::kw_mul:
- case lltok::kw_fmul:
- case lltok::kw_udiv:
- case lltok::kw_sdiv:
- case lltok::kw_fdiv:
- case lltok::kw_urem:
- case lltok::kw_srem:
- case lltok::kw_frem: {
- bool NUW = false;
- bool NSW = false;
- bool Exact = false;
- unsigned Opc = Lex.getUIntVal();
- Constant *Val0, *Val1;
- Lex.Lex();
- LocTy ModifierLoc = Lex.getLoc();
- if (Opc == Instruction::Add ||
- Opc == Instruction::Sub ||
- Opc == Instruction::Mul) {
- if (EatIfPresent(lltok::kw_nuw))
- NUW = true;
- if (EatIfPresent(lltok::kw_nsw)) {
- NSW = true;
- if (EatIfPresent(lltok::kw_nuw))
- NUW = true;
- }
- } else if (Opc == Instruction::SDiv) {
- if (EatIfPresent(lltok::kw_exact))
- Exact = true;
- }
- if (ParseToken(lltok::lparen, "expected '(' in binary constantexpr") ||
- ParseGlobalTypeAndValue(Val0) ||
- ParseToken(lltok::comma, "expected comma in binary constantexpr") ||
- ParseGlobalTypeAndValue(Val1) ||
- ParseToken(lltok::rparen, "expected ')' in binary constantexpr"))
- return true;
- if (Val0->getType() != Val1->getType())
- return Error(ID.Loc, "operands of constexpr must have same type");
- if (!Val0->getType()->isIntOrIntVectorTy()) {
- if (NUW)
- return Error(ModifierLoc, "nuw only applies to integer operations");
- if (NSW)
- return Error(ModifierLoc, "nsw only applies to integer operations");
- }
- // API compatibility: Accept either integer or floating-point types with
- // add, sub, and mul.
- if (!Val0->getType()->isIntOrIntVectorTy() &&
- !Val0->getType()->isFPOrFPVectorTy())
- return Error(ID.Loc,"constexpr requires integer, fp, or vector operands");
- unsigned Flags = 0;
- if (NUW) Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
- if (NSW) Flags |= OverflowingBinaryOperator::NoSignedWrap;
- if (Exact) Flags |= SDivOperator::IsExact;
- Constant *C = ConstantExpr::get(Opc, Val0, Val1, Flags);
- ID.ConstantVal = C;
- ID.Kind = ValID::t_Constant;
- return false;
- }
-
- // Logical Operations
- case lltok::kw_shl:
- case lltok::kw_lshr:
- case lltok::kw_ashr:
- case lltok::kw_and:
- case lltok::kw_or:
- case lltok::kw_xor: {
- unsigned Opc = Lex.getUIntVal();
- Constant *Val0, *Val1;
- Lex.Lex();
- if (ParseToken(lltok::lparen, "expected '(' in logical constantexpr") ||
- ParseGlobalTypeAndValue(Val0) ||
- ParseToken(lltok::comma, "expected comma in logical constantexpr") ||
- ParseGlobalTypeAndValue(Val1) ||
- ParseToken(lltok::rparen, "expected ')' in logical constantexpr"))
- return true;
- if (Val0->getType() != Val1->getType())
- return Error(ID.Loc, "operands of constexpr must have same type");
- if (!Val0->getType()->isIntOrIntVectorTy())
- return Error(ID.Loc,
- "constexpr requires integer or integer vector operands");
- ID.ConstantVal = ConstantExpr::get(Opc, Val0, Val1);
- ID.Kind = ValID::t_Constant;
- return false;
- }
-
- case lltok::kw_getelementptr:
- case lltok::kw_shufflevector:
- case lltok::kw_insertelement:
- case lltok::kw_extractelement:
- case lltok::kw_select: {
- unsigned Opc = Lex.getUIntVal();
- SmallVector<Constant*, 16> Elts;
- bool InBounds = false;
- Lex.Lex();
- if (Opc == Instruction::GetElementPtr)
- InBounds = EatIfPresent(lltok::kw_inbounds);
- if (ParseToken(lltok::lparen, "expected '(' in constantexpr") ||
- ParseGlobalValueVector(Elts) ||
- ParseToken(lltok::rparen, "expected ')' in constantexpr"))
- return true;
-
- if (Opc == Instruction::GetElementPtr) {
- if (Elts.size() == 0 || !Elts[0]->getType()->isPointerTy())
- return Error(ID.Loc, "getelementptr requires pointer operand");
-
- if (!GetElementPtrInst::getIndexedType(Elts[0]->getType(),
- (Value**)(Elts.data() + 1),
- Elts.size() - 1))
- return Error(ID.Loc, "invalid indices for getelementptr");
- ID.ConstantVal = InBounds ?
- ConstantExpr::getInBoundsGetElementPtr(Elts[0],
- Elts.data() + 1,
- Elts.size() - 1) :
- ConstantExpr::getGetElementPtr(Elts[0],
- Elts.data() + 1, Elts.size() - 1);
- } else if (Opc == Instruction::Select) {
- if (Elts.size() != 3)
- return Error(ID.Loc, "expected three operands to select");
- if (const char *Reason = SelectInst::areInvalidOperands(Elts[0], Elts[1],
- Elts[2]))
- return Error(ID.Loc, Reason);
- ID.ConstantVal = ConstantExpr::getSelect(Elts[0], Elts[1], Elts[2]);
- } else if (Opc == Instruction::ShuffleVector) {
- if (Elts.size() != 3)
- return Error(ID.Loc, "expected three operands to shufflevector");
- if (!ShuffleVectorInst::isValidOperands(Elts[0], Elts[1], Elts[2]))
- return Error(ID.Loc, "invalid operands to shufflevector");
- ID.ConstantVal =
- ConstantExpr::getShuffleVector(Elts[0], Elts[1],Elts[2]);
- } else if (Opc == Instruction::ExtractElement) {
- if (Elts.size() != 2)
- return Error(ID.Loc, "expected two operands to extractelement");
- if (!ExtractElementInst::isValidOperands(Elts[0], Elts[1]))
- return Error(ID.Loc, "invalid extractelement operands");
- ID.ConstantVal = ConstantExpr::getExtractElement(Elts[0], Elts[1]);
- } else {
- assert(Opc == Instruction::InsertElement && "Unknown opcode");
- if (Elts.size() != 3)
- return Error(ID.Loc, "expected three operands to insertelement");
- if (!InsertElementInst::isValidOperands(Elts[0], Elts[1], Elts[2]))
- return Error(ID.Loc, "invalid insertelement operands");
- ID.ConstantVal =
- ConstantExpr::getInsertElement(Elts[0], Elts[1],Elts[2]);
- }
-
- ID.Kind = ValID::t_Constant;
- return false;
- }
- }
-
- Lex.Lex();
- return false;
-}
-
-/// ParseGlobalValue - Parse a global value with the specified type.
-bool LLParser::ParseGlobalValue(const Type *Ty, Constant *&C) {
- C = 0;
- ValID ID;
- Value *V = NULL;
- bool Parsed = ParseValID(ID) ||
- ConvertValIDToValue(Ty, ID, V, NULL);
- if (V && !(C = dyn_cast<Constant>(V)))
- return Error(ID.Loc, "global values must be constants");
- return Parsed;
-}
-
-bool LLParser::ParseGlobalTypeAndValue(Constant *&V) {
- PATypeHolder Type(Type::getVoidTy(Context));
- return ParseType(Type) ||
- ParseGlobalValue(Type, V);
-}
-
-/// ParseGlobalValueVector
-/// ::= /*empty*/
-/// ::= TypeAndValue (',' TypeAndValue)*
-bool LLParser::ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts) {
- // Empty list.
- if (Lex.getKind() == lltok::rbrace ||
- Lex.getKind() == lltok::rsquare ||
- Lex.getKind() == lltok::greater ||
- Lex.getKind() == lltok::rparen)
- return false;
-
- Constant *C;
- if (ParseGlobalTypeAndValue(C)) return true;
- Elts.push_back(C);
-
- while (EatIfPresent(lltok::comma)) {
- if (ParseGlobalTypeAndValue(C)) return true;
- Elts.push_back(C);
- }
-
- return false;
-}
-
-
-//===----------------------------------------------------------------------===//
-// Function Parsing.
-//===----------------------------------------------------------------------===//
-
-bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
- PerFunctionState *PFS) {
- if (Ty->isFunctionTy())
- return Error(ID.Loc, "functions are not values, refer to them as pointers");
-
- switch (ID.Kind) {
- default: llvm_unreachable("Unknown ValID!");
- case ValID::t_LocalID:
- if (!PFS) return Error(ID.Loc, "invalid use of function-local name");
- V = PFS->GetVal(ID.UIntVal, Ty, ID.Loc);
- return (V == 0);
- case ValID::t_LocalName:
- if (!PFS) return Error(ID.Loc, "invalid use of function-local name");
- V = PFS->GetVal(ID.StrVal, Ty, ID.Loc);
- return (V == 0);
- case ValID::t_InlineAsm: {
- const PointerType *PTy = dyn_cast<PointerType>(Ty);
- const FunctionType *FTy =
- PTy ? dyn_cast<FunctionType>(PTy->getElementType()) : 0;
- if (!FTy || !InlineAsm::Verify(FTy, ID.StrVal2))
- return Error(ID.Loc, "invalid type for inline asm constraint string");
- V = InlineAsm::get(FTy, ID.StrVal, ID.StrVal2, ID.UIntVal&1, ID.UIntVal>>1);
- return false;
- }
- case ValID::t_MDNode:
- if (!Ty->isMetadataTy())
- return Error(ID.Loc, "metadata value must have metadata type");
- V = ID.MDNodeVal;
- return false;
- case ValID::t_MDString:
- if (!Ty->isMetadataTy())
- return Error(ID.Loc, "metadata value must have metadata type");
- V = ID.MDStringVal;
- return false;
- case ValID::t_GlobalName:
- V = GetGlobalVal(ID.StrVal, Ty, ID.Loc);
- return V == 0;
- case ValID::t_GlobalID:
- V = GetGlobalVal(ID.UIntVal, Ty, ID.Loc);
- return V == 0;
- case ValID::t_APSInt:
- if (!Ty->isIntegerTy())
- return Error(ID.Loc, "integer constant must have integer type");
- ID.APSIntVal.extOrTrunc(Ty->getPrimitiveSizeInBits());
- V = ConstantInt::get(Context, ID.APSIntVal);
- return false;
- case ValID::t_APFloat:
- if (!Ty->isFloatingPointTy() ||
- !ConstantFP::isValueValidForType(Ty, ID.APFloatVal))
- return Error(ID.Loc, "floating point constant invalid for type");
-
- // The lexer has no type info, so builds all float and double FP constants
- // as double. Fix this here. Long double does not need this.
- if (&ID.APFloatVal.getSemantics() == &APFloat::IEEEdouble &&
- Ty->isFloatTy()) {
- bool Ignored;
- ID.APFloatVal.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven,
- &Ignored);
- }
- V = ConstantFP::get(Context, ID.APFloatVal);
-
- if (V->getType() != Ty)
- return Error(ID.Loc, "floating point constant does not have type '" +
- Ty->getDescription() + "'");
-
- return false;
- case ValID::t_Null:
- if (!Ty->isPointerTy())
- return Error(ID.Loc, "null must be a pointer type");
- V = ConstantPointerNull::get(cast<PointerType>(Ty));
- return false;
- case ValID::t_Undef:
- // FIXME: LabelTy should not be a first-class type.
- if ((!Ty->isFirstClassType() || Ty->isLabelTy()) &&
- !Ty->isOpaqueTy())
- return Error(ID.Loc, "invalid type for undef constant");
- V = UndefValue::get(Ty);
- return false;
- case ValID::t_EmptyArray:
- if (!Ty->isArrayTy() || cast<ArrayType>(Ty)->getNumElements() != 0)
- return Error(ID.Loc, "invalid empty array initializer");
- V = UndefValue::get(Ty);
- return false;
- case ValID::t_Zero:
- // FIXME: LabelTy should not be a first-class type.
- if (!Ty->isFirstClassType() || Ty->isLabelTy())
- return Error(ID.Loc, "invalid type for null constant");
- V = Constant::getNullValue(Ty);
- return false;
- case ValID::t_Constant:
- if (ID.ConstantVal->getType() != Ty) {
- // Allow a constant struct with a single member to be converted
- // to a union, if the union has a member which is the same type
- // as the struct member.
- if (const UnionType* utype = dyn_cast<UnionType>(Ty)) {
- return ParseUnionValue(utype, ID, V);
- }
-
- return Error(ID.Loc, "constant expression type mismatch");
- }
-
- V = ID.ConstantVal;
- return false;
- }
-}
-
-bool LLParser::ParseValue(const Type *Ty, Value *&V, PerFunctionState &PFS) {
- V = 0;
- ValID ID;
- return ParseValID(ID, &PFS) ||
- ConvertValIDToValue(Ty, ID, V, &PFS);
-}
-
-bool LLParser::ParseTypeAndValue(Value *&V, PerFunctionState &PFS) {
- PATypeHolder T(Type::getVoidTy(Context));
- return ParseType(T) ||
- ParseValue(T, V, PFS);
-}
-
-bool LLParser::ParseTypeAndBasicBlock(BasicBlock *&BB, LocTy &Loc,
- PerFunctionState &PFS) {
- Value *V;
- Loc = Lex.getLoc();
- if (ParseTypeAndValue(V, PFS)) return true;
- if (!isa<BasicBlock>(V))
- return Error(Loc, "expected a basic block");
- BB = cast<BasicBlock>(V);
- return false;
-}
-
-bool LLParser::ParseUnionValue(const UnionType* utype, ValID &ID, Value *&V) {
- if (const StructType* stype = dyn_cast<StructType>(ID.ConstantVal->getType())) {
- if (stype->getNumContainedTypes() != 1)
- return Error(ID.Loc, "constant expression type mismatch");
- int index = utype->getElementTypeIndex(stype->getContainedType(0));
- if (index < 0)
- return Error(ID.Loc, "initializer type is not a member of the union");
-
- V = ConstantUnion::get(
- utype, cast<Constant>(ID.ConstantVal->getOperand(0)));
- return false;
- }
-
- return Error(ID.Loc, "constant expression type mismatch");
-}
-
-
-/// FunctionHeader
-/// ::= OptionalLinkage OptionalVisibility OptionalCallingConv OptRetAttrs
-/// Type GlobalName '(' ArgList ')' OptFuncAttrs OptSection
-/// OptionalAlign OptGC
-bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
- // Parse the linkage.
- LocTy LinkageLoc = Lex.getLoc();
- unsigned Linkage;
-
- unsigned Visibility, RetAttrs;
- CallingConv::ID CC;
- PATypeHolder RetType(Type::getVoidTy(Context));
- LocTy RetTypeLoc = Lex.getLoc();
- if (ParseOptionalLinkage(Linkage) ||
- ParseOptionalVisibility(Visibility) ||
- ParseOptionalCallingConv(CC) ||
- ParseOptionalAttrs(RetAttrs, 1) ||
- ParseType(RetType, RetTypeLoc, true /*void allowed*/))
- return true;
-
- // Verify that the linkage is ok.
- switch ((GlobalValue::LinkageTypes)Linkage) {
- case GlobalValue::ExternalLinkage:
- break; // always ok.
- case GlobalValue::DLLImportLinkage:
- case GlobalValue::ExternalWeakLinkage:
- if (isDefine)
- return Error(LinkageLoc, "invalid linkage for function definition");
- break;
- case GlobalValue::PrivateLinkage:
- case GlobalValue::LinkerPrivateLinkage:
- case GlobalValue::InternalLinkage:
- case GlobalValue::AvailableExternallyLinkage:
- case GlobalValue::LinkOnceAnyLinkage:
- case GlobalValue::LinkOnceODRLinkage:
- case GlobalValue::WeakAnyLinkage:
- case GlobalValue::WeakODRLinkage:
- case GlobalValue::DLLExportLinkage:
- if (!isDefine)
- return Error(LinkageLoc, "invalid linkage for function declaration");
- break;
- case GlobalValue::AppendingLinkage:
- case GlobalValue::CommonLinkage:
- return Error(LinkageLoc, "invalid function linkage type");
- }
-
- if (!FunctionType::isValidReturnType(RetType) ||
- RetType->isOpaqueTy())
- return Error(RetTypeLoc, "invalid function return type");
-
- LocTy NameLoc = Lex.getLoc();
-
- std::string FunctionName;
- if (Lex.getKind() == lltok::GlobalVar) {
- FunctionName = Lex.getStrVal();
- } else if (Lex.getKind() == lltok::GlobalID) { // @42 is ok.
- unsigned NameID = Lex.getUIntVal();
-
- if (NameID != NumberedVals.size())
- return TokError("function expected to be numbered '%" +
- utostr(NumberedVals.size()) + "'");
- } else {
- return TokError("expected function name");
- }
-
- Lex.Lex();
-
- if (Lex.getKind() != lltok::lparen)
- return TokError("expected '(' in function argument list");
-
- std::vector<ArgInfo> ArgList;
- bool isVarArg;
- unsigned FuncAttrs;
- std::string Section;
- unsigned Alignment;
- std::string GC;
-
- if (ParseArgumentList(ArgList, isVarArg, false) ||
- ParseOptionalAttrs(FuncAttrs, 2) ||
- (EatIfPresent(lltok::kw_section) &&
- ParseStringConstant(Section)) ||
- ParseOptionalAlignment(Alignment) ||
- (EatIfPresent(lltok::kw_gc) &&
- ParseStringConstant(GC)))
- return true;
-
- // If the alignment was parsed as an attribute, move to the alignment field.
- if (FuncAttrs & Attribute::Alignment) {
- Alignment = Attribute::getAlignmentFromAttrs(FuncAttrs);
- FuncAttrs &= ~Attribute::Alignment;
- }
-
- // Okay, if we got here, the function is syntactically valid. Convert types
- // and do semantic checks.
- std::vector<const Type*> ParamTypeList;
- SmallVector<AttributeWithIndex, 8> Attrs;
- // FIXME : In 3.0, stop accepting zext, sext and inreg as optional function
- // attributes.
- unsigned ObsoleteFuncAttrs = Attribute::ZExt|Attribute::SExt|Attribute::InReg;
- if (FuncAttrs & ObsoleteFuncAttrs) {
- RetAttrs |= FuncAttrs & ObsoleteFuncAttrs;
- FuncAttrs &= ~ObsoleteFuncAttrs;
- }
-
- if (RetAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttrs));
-
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- ParamTypeList.push_back(ArgList[i].Type);
- if (ArgList[i].Attrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs));
- }
-
- if (FuncAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FuncAttrs));
-
- AttrListPtr PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
-
- if (PAL.paramHasAttr(1, Attribute::StructRet) && !RetType->isVoidTy())
- return Error(RetTypeLoc, "functions with 'sret' argument must return void");
-
- const FunctionType *FT =
- FunctionType::get(RetType, ParamTypeList, isVarArg);
- const PointerType *PFT = PointerType::getUnqual(FT);
-
- Fn = 0;
- if (!FunctionName.empty()) {
- // If this was a definition of a forward reference, remove the definition
- // from the forward reference table and fill in the forward ref.
- std::map<std::string, std::pair<GlobalValue*, LocTy> >::iterator FRVI =
- ForwardRefVals.find(FunctionName);
- if (FRVI != ForwardRefVals.end()) {
- Fn = M->getFunction(FunctionName);
- ForwardRefVals.erase(FRVI);
- } else if ((Fn = M->getFunction(FunctionName))) {
- // If this function already exists in the symbol table, then it is
- // multiply defined. We accept a few cases for old backwards compat.
- // FIXME: Remove this stuff for LLVM 3.0.
- if (Fn->getType() != PFT || Fn->getAttributes() != PAL ||
- (!Fn->isDeclaration() && isDefine)) {
- // If the redefinition has different type or different attributes,
- // reject it. If both have bodies, reject it.
- return Error(NameLoc, "invalid redefinition of function '" +
- FunctionName + "'");
- } else if (Fn->isDeclaration()) {
- // Make sure to strip off any argument names so we can't get conflicts.
- for (Function::arg_iterator AI = Fn->arg_begin(), AE = Fn->arg_end();
- AI != AE; ++AI)
- AI->setName("");
- }
- } else if (M->getNamedValue(FunctionName)) {
- return Error(NameLoc, "redefinition of function '@" + FunctionName + "'");
- }
-
- } else {
- // If this is a definition of a forward referenced function, make sure the
- // types agree.
- std::map<unsigned, std::pair<GlobalValue*, LocTy> >::iterator I
- = ForwardRefValIDs.find(NumberedVals.size());
- if (I != ForwardRefValIDs.end()) {
- Fn = cast<Function>(I->second.first);
- if (Fn->getType() != PFT)
- return Error(NameLoc, "type of definition and forward reference of '@" +
- utostr(NumberedVals.size()) +"' disagree");
- ForwardRefValIDs.erase(I);
- }
- }
-
- if (Fn == 0)
- Fn = Function::Create(FT, GlobalValue::ExternalLinkage, FunctionName, M);
- else // Move the forward-reference to the correct spot in the module.
- M->getFunctionList().splice(M->end(), M->getFunctionList(), Fn);
-
- if (FunctionName.empty())
- NumberedVals.push_back(Fn);
-
- Fn->setLinkage((GlobalValue::LinkageTypes)Linkage);
- Fn->setVisibility((GlobalValue::VisibilityTypes)Visibility);
- Fn->setCallingConv(CC);
- Fn->setAttributes(PAL);
- Fn->setAlignment(Alignment);
- Fn->setSection(Section);
- if (!GC.empty()) Fn->setGC(GC.c_str());
-
- // Add all of the arguments we parsed to the function.
- Function::arg_iterator ArgIt = Fn->arg_begin();
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i, ++ArgIt) {
- // If we run out of arguments in the Function prototype, exit early.
- // FIXME: REMOVE THIS IN LLVM 3.0, this is just for the mismatch case above.
- if (ArgIt == Fn->arg_end()) break;
-
- // If the argument has a name, insert it into the argument symbol table.
- if (ArgList[i].Name.empty()) continue;
-
- // Set the name, if it conflicted, it will be auto-renamed.
- ArgIt->setName(ArgList[i].Name);
-
- if (ArgIt->getNameStr() != ArgList[i].Name)
- return Error(ArgList[i].Loc, "redefinition of argument '%" +
- ArgList[i].Name + "'");
- }
-
- return false;
-}
-
-
-/// ParseFunctionBody
-/// ::= '{' BasicBlock+ '}'
-/// ::= 'begin' BasicBlock+ 'end' // FIXME: remove in LLVM 3.0
-///
-bool LLParser::ParseFunctionBody(Function &Fn) {
- if (Lex.getKind() != lltok::lbrace && Lex.getKind() != lltok::kw_begin)
- return TokError("expected '{' in function body");
- Lex.Lex(); // eat the {.
-
- int FunctionNumber = -1;
- if (!Fn.hasName()) FunctionNumber = NumberedVals.size()-1;
-
- PerFunctionState PFS(*this, Fn, FunctionNumber);
-
- // We need at least one basic block.
- if (Lex.getKind() == lltok::rbrace || Lex.getKind() == lltok::kw_end)
- return TokError("function body requires at least one basic block");
-
- while (Lex.getKind() != lltok::rbrace && Lex.getKind() != lltok::kw_end)
- if (ParseBasicBlock(PFS)) return true;
-
- // Eat the }.
- Lex.Lex();
-
- // Verify function is ok.
- return PFS.FinishFunction();
-}
-
-/// ParseBasicBlock
-/// ::= LabelStr? Instruction*
-bool LLParser::ParseBasicBlock(PerFunctionState &PFS) {
- // If this basic block starts out with a name, remember it.
- std::string Name;
- LocTy NameLoc = Lex.getLoc();
- if (Lex.getKind() == lltok::LabelStr) {
- Name = Lex.getStrVal();
- Lex.Lex();
- }
-
- BasicBlock *BB = PFS.DefineBB(Name, NameLoc);
- if (BB == 0) return true;
-
- std::string NameStr;
-
- // Parse the instructions in this block until we get a terminator.
- Instruction *Inst;
- SmallVector<std::pair<unsigned, MDNode *>, 4> MetadataOnInst;
- do {
- // This instruction may have three possibilities for a name: a) none
- // specified, b) name specified "%foo =", c) number specified: "%4 =".
- LocTy NameLoc = Lex.getLoc();
- int NameID = -1;
- NameStr = "";
-
- if (Lex.getKind() == lltok::LocalVarID) {
- NameID = Lex.getUIntVal();
- Lex.Lex();
- if (ParseToken(lltok::equal, "expected '=' after instruction id"))
- return true;
- } else if (Lex.getKind() == lltok::LocalVar ||
- // FIXME: REMOVE IN LLVM 3.0
- Lex.getKind() == lltok::StringConstant) {
- NameStr = Lex.getStrVal();
- Lex.Lex();
- if (ParseToken(lltok::equal, "expected '=' after instruction name"))
- return true;
- }
-
- switch (ParseInstruction(Inst, BB, PFS)) {
- default: assert(0 && "Unknown ParseInstruction result!");
- case InstError: return true;
- case InstNormal:
- // With a normal result, we check to see if the instruction is followed by
- // a comma and metadata.
- if (EatIfPresent(lltok::comma))
- if (ParseInstructionMetadata(MetadataOnInst))
- return true;
- break;
- case InstExtraComma:
- // If the instruction parser ate an extra comma at the end of it, it
- // *must* be followed by metadata.
- if (ParseInstructionMetadata(MetadataOnInst))
- return true;
- break;
- }
-
- // Set metadata attached with this instruction.
- for (unsigned i = 0, e = MetadataOnInst.size(); i != e; ++i)
- Inst->setMetadata(MetadataOnInst[i].first, MetadataOnInst[i].second);
- MetadataOnInst.clear();
-
- BB->getInstList().push_back(Inst);
-
- // Set the name on the instruction.
- if (PFS.SetInstName(NameID, NameStr, NameLoc, Inst)) return true;
- } while (!isa<TerminatorInst>(Inst));
-
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Instruction Parsing.
-//===----------------------------------------------------------------------===//
-
-/// ParseInstruction - Parse one of the many different instructions.
-///
-int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
- PerFunctionState &PFS) {
- lltok::Kind Token = Lex.getKind();
- if (Token == lltok::Eof)
- return TokError("found end of file when expecting more instructions");
- LocTy Loc = Lex.getLoc();
- unsigned KeywordVal = Lex.getUIntVal();
- Lex.Lex(); // Eat the keyword.
-
- switch (Token) {
- default: return Error(Loc, "expected instruction opcode");
- // Terminator Instructions.
- case lltok::kw_unwind: Inst = new UnwindInst(Context); return false;
- case lltok::kw_unreachable: Inst = new UnreachableInst(Context); return false;
- case lltok::kw_ret: return ParseRet(Inst, BB, PFS);
- case lltok::kw_br: return ParseBr(Inst, PFS);
- case lltok::kw_switch: return ParseSwitch(Inst, PFS);
- case lltok::kw_indirectbr: return ParseIndirectBr(Inst, PFS);
- case lltok::kw_invoke: return ParseInvoke(Inst, PFS);
- // Binary Operators.
- case lltok::kw_add:
- case lltok::kw_sub:
- case lltok::kw_mul: {
- bool NUW = false;
- bool NSW = false;
- LocTy ModifierLoc = Lex.getLoc();
- if (EatIfPresent(lltok::kw_nuw))
- NUW = true;
- if (EatIfPresent(lltok::kw_nsw)) {
- NSW = true;
- if (EatIfPresent(lltok::kw_nuw))
- NUW = true;
- }
- // API compatibility: Accept either integer or floating-point types.
- bool Result = ParseArithmetic(Inst, PFS, KeywordVal, 0);
- if (!Result) {
- if (!Inst->getType()->isIntOrIntVectorTy()) {
- if (NUW)
- return Error(ModifierLoc, "nuw only applies to integer operations");
- if (NSW)
- return Error(ModifierLoc, "nsw only applies to integer operations");
- }
- if (NUW)
- cast<BinaryOperator>(Inst)->setHasNoUnsignedWrap(true);
- if (NSW)
- cast<BinaryOperator>(Inst)->setHasNoSignedWrap(true);
- }
- return Result;
- }
- case lltok::kw_fadd:
- case lltok::kw_fsub:
- case lltok::kw_fmul: return ParseArithmetic(Inst, PFS, KeywordVal, 2);
-
- case lltok::kw_sdiv: {
- bool Exact = false;
- if (EatIfPresent(lltok::kw_exact))
- Exact = true;
- bool Result = ParseArithmetic(Inst, PFS, KeywordVal, 1);
- if (!Result)
- if (Exact)
- cast<BinaryOperator>(Inst)->setIsExact(true);
- return Result;
- }
-
- case lltok::kw_udiv:
- case lltok::kw_urem:
- case lltok::kw_srem: return ParseArithmetic(Inst, PFS, KeywordVal, 1);
- case lltok::kw_fdiv:
- case lltok::kw_frem: return ParseArithmetic(Inst, PFS, KeywordVal, 2);
- case lltok::kw_shl:
- case lltok::kw_lshr:
- case lltok::kw_ashr:
- case lltok::kw_and:
- case lltok::kw_or:
- case lltok::kw_xor: return ParseLogical(Inst, PFS, KeywordVal);
- case lltok::kw_icmp:
- case lltok::kw_fcmp: return ParseCompare(Inst, PFS, KeywordVal);
- // Casts.
- case lltok::kw_trunc:
- case lltok::kw_zext:
- case lltok::kw_sext:
- case lltok::kw_fptrunc:
- case lltok::kw_fpext:
- case lltok::kw_bitcast:
- case lltok::kw_uitofp:
- case lltok::kw_sitofp:
- case lltok::kw_fptoui:
- case lltok::kw_fptosi:
- case lltok::kw_inttoptr:
- case lltok::kw_ptrtoint: return ParseCast(Inst, PFS, KeywordVal);
- // Other.
- case lltok::kw_select: return ParseSelect(Inst, PFS);
- case lltok::kw_va_arg: return ParseVA_Arg(Inst, PFS);
- case lltok::kw_extractelement: return ParseExtractElement(Inst, PFS);
- case lltok::kw_insertelement: return ParseInsertElement(Inst, PFS);
- case lltok::kw_shufflevector: return ParseShuffleVector(Inst, PFS);
- case lltok::kw_phi: return ParsePHI(Inst, PFS);
- case lltok::kw_call: return ParseCall(Inst, PFS, false);
- case lltok::kw_tail: return ParseCall(Inst, PFS, true);
- // Memory.
- case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
- case lltok::kw_malloc: return ParseAlloc(Inst, PFS, BB, false);
- case lltok::kw_free: return ParseFree(Inst, PFS, BB);
- case lltok::kw_load: return ParseLoad(Inst, PFS, false);
- case lltok::kw_store: return ParseStore(Inst, PFS, false);
- case lltok::kw_volatile:
- if (EatIfPresent(lltok::kw_load))
- return ParseLoad(Inst, PFS, true);
- else if (EatIfPresent(lltok::kw_store))
- return ParseStore(Inst, PFS, true);
- else
- return TokError("expected 'load' or 'store'");
- case lltok::kw_getresult: return ParseGetResult(Inst, PFS);
- case lltok::kw_getelementptr: return ParseGetElementPtr(Inst, PFS);
- case lltok::kw_extractvalue: return ParseExtractValue(Inst, PFS);
- case lltok::kw_insertvalue: return ParseInsertValue(Inst, PFS);
- }
-}
-
-/// ParseCmpPredicate - Parse an integer or fp predicate, based on Kind.
-bool LLParser::ParseCmpPredicate(unsigned &P, unsigned Opc) {
- if (Opc == Instruction::FCmp) {
- switch (Lex.getKind()) {
- default: TokError("expected fcmp predicate (e.g. 'oeq')");
- case lltok::kw_oeq: P = CmpInst::FCMP_OEQ; break;
- case lltok::kw_one: P = CmpInst::FCMP_ONE; break;
- case lltok::kw_olt: P = CmpInst::FCMP_OLT; break;
- case lltok::kw_ogt: P = CmpInst::FCMP_OGT; break;
- case lltok::kw_ole: P = CmpInst::FCMP_OLE; break;
- case lltok::kw_oge: P = CmpInst::FCMP_OGE; break;
- case lltok::kw_ord: P = CmpInst::FCMP_ORD; break;
- case lltok::kw_uno: P = CmpInst::FCMP_UNO; break;
- case lltok::kw_ueq: P = CmpInst::FCMP_UEQ; break;
- case lltok::kw_une: P = CmpInst::FCMP_UNE; break;
- case lltok::kw_ult: P = CmpInst::FCMP_ULT; break;
- case lltok::kw_ugt: P = CmpInst::FCMP_UGT; break;
- case lltok::kw_ule: P = CmpInst::FCMP_ULE; break;
- case lltok::kw_uge: P = CmpInst::FCMP_UGE; break;
- case lltok::kw_true: P = CmpInst::FCMP_TRUE; break;
- case lltok::kw_false: P = CmpInst::FCMP_FALSE; break;
- }
- } else {
- switch (Lex.getKind()) {
- default: TokError("expected icmp predicate (e.g. 'eq')");
- case lltok::kw_eq: P = CmpInst::ICMP_EQ; break;
- case lltok::kw_ne: P = CmpInst::ICMP_NE; break;
- case lltok::kw_slt: P = CmpInst::ICMP_SLT; break;
- case lltok::kw_sgt: P = CmpInst::ICMP_SGT; break;
- case lltok::kw_sle: P = CmpInst::ICMP_SLE; break;
- case lltok::kw_sge: P = CmpInst::ICMP_SGE; break;
- case lltok::kw_ult: P = CmpInst::ICMP_ULT; break;
- case lltok::kw_ugt: P = CmpInst::ICMP_UGT; break;
- case lltok::kw_ule: P = CmpInst::ICMP_ULE; break;
- case lltok::kw_uge: P = CmpInst::ICMP_UGE; break;
- }
- }
- Lex.Lex();
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Terminator Instructions.
-//===----------------------------------------------------------------------===//
-
-/// ParseRet - Parse a return instruction.
-/// ::= 'ret' void (',' !dbg, !1)*
-/// ::= 'ret' TypeAndValue (',' !dbg, !1)*
-/// ::= 'ret' TypeAndValue (',' TypeAndValue)+ (',' !dbg, !1)*
-/// [[obsolete: LLVM 3.0]]
-int LLParser::ParseRet(Instruction *&Inst, BasicBlock *BB,
- PerFunctionState &PFS) {
- PATypeHolder Ty(Type::getVoidTy(Context));
- if (ParseType(Ty, true /*void allowed*/)) return true;
-
- if (Ty->isVoidTy()) {
- Inst = ReturnInst::Create(Context);
- return false;
- }
-
- Value *RV;
- if (ParseValue(Ty, RV, PFS)) return true;
-
- bool ExtraComma = false;
- if (EatIfPresent(lltok::comma)) {
- // Parse optional custom metadata, e.g. !dbg
- if (Lex.getKind() == lltok::MetadataVar) {
- ExtraComma = true;
- } else {
- // The normal case is one return value.
- // FIXME: LLVM 3.0 remove MRV support for 'ret i32 1, i32 2', requiring
- // use of 'ret {i32,i32} {i32 1, i32 2}'
- SmallVector<Value*, 8> RVs;
- RVs.push_back(RV);
-
- do {
- // If optional custom metadata, e.g. !dbg is seen then this is the
- // end of MRV.
- if (Lex.getKind() == lltok::MetadataVar)
- break;
- if (ParseTypeAndValue(RV, PFS)) return true;
- RVs.push_back(RV);
- } while (EatIfPresent(lltok::comma));
-
- RV = UndefValue::get(PFS.getFunction().getReturnType());
- for (unsigned i = 0, e = RVs.size(); i != e; ++i) {
- Instruction *I = InsertValueInst::Create(RV, RVs[i], i, "mrv");
- BB->getInstList().push_back(I);
- RV = I;
- }
- }
- }
-
- Inst = ReturnInst::Create(Context, RV);
- return ExtraComma ? InstExtraComma : InstNormal;
-}
-
-
-/// ParseBr
-/// ::= 'br' TypeAndValue
-/// ::= 'br' TypeAndValue ',' TypeAndValue ',' TypeAndValue
-bool LLParser::ParseBr(Instruction *&Inst, PerFunctionState &PFS) {
- LocTy Loc, Loc2;
- Value *Op0;
- BasicBlock *Op1, *Op2;
- if (ParseTypeAndValue(Op0, Loc, PFS)) return true;
-
- if (BasicBlock *BB = dyn_cast<BasicBlock>(Op0)) {
- Inst = BranchInst::Create(BB);
- return false;
- }
-
- if (Op0->getType() != Type::getInt1Ty(Context))
- return Error(Loc, "branch condition must have 'i1' type");
-
- if (ParseToken(lltok::comma, "expected ',' after branch condition") ||
- ParseTypeAndBasicBlock(Op1, Loc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after true destination") ||
- ParseTypeAndBasicBlock(Op2, Loc2, PFS))
- return true;
-
- Inst = BranchInst::Create(Op1, Op2, Op0);
- return false;
-}
-
-/// ParseSwitch
-/// Instruction
-/// ::= 'switch' TypeAndValue ',' TypeAndValue '[' JumpTable ']'
-/// JumpTable
-/// ::= (TypeAndValue ',' TypeAndValue)*
-bool LLParser::ParseSwitch(Instruction *&Inst, PerFunctionState &PFS) {
- LocTy CondLoc, BBLoc;
- Value *Cond;
- BasicBlock *DefaultBB;
- if (ParseTypeAndValue(Cond, CondLoc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after switch condition") ||
- ParseTypeAndBasicBlock(DefaultBB, BBLoc, PFS) ||
- ParseToken(lltok::lsquare, "expected '[' with switch table"))
- return true;
-
- if (!Cond->getType()->isIntegerTy())
- return Error(CondLoc, "switch condition must have integer type");
-
- // Parse the jump table pairs.
- SmallPtrSet<Value*, 32> SeenCases;
- SmallVector<std::pair<ConstantInt*, BasicBlock*>, 32> Table;
- while (Lex.getKind() != lltok::rsquare) {
- Value *Constant;
- BasicBlock *DestBB;
-
- if (ParseTypeAndValue(Constant, CondLoc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after case value") ||
- ParseTypeAndBasicBlock(DestBB, PFS))
- return true;
-
- if (!SeenCases.insert(Constant))
- return Error(CondLoc, "duplicate case value in switch");
- if (!isa<ConstantInt>(Constant))
- return Error(CondLoc, "case value is not a constant integer");
-
- Table.push_back(std::make_pair(cast<ConstantInt>(Constant), DestBB));
- }
-
- Lex.Lex(); // Eat the ']'.
-
- SwitchInst *SI = SwitchInst::Create(Cond, DefaultBB, Table.size());
- for (unsigned i = 0, e = Table.size(); i != e; ++i)
- SI->addCase(Table[i].first, Table[i].second);
- Inst = SI;
- return false;
-}
-
-/// ParseIndirectBr
-/// Instruction
-/// ::= 'indirectbr' TypeAndValue ',' '[' LabelList ']'
-bool LLParser::ParseIndirectBr(Instruction *&Inst, PerFunctionState &PFS) {
- LocTy AddrLoc;
- Value *Address;
- if (ParseTypeAndValue(Address, AddrLoc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after indirectbr address") ||
- ParseToken(lltok::lsquare, "expected '[' with indirectbr"))
- return true;
-
- if (!Address->getType()->isPointerTy())
- return Error(AddrLoc, "indirectbr address must have pointer type");
-
- // Parse the destination list.
- SmallVector<BasicBlock*, 16> DestList;
-
- if (Lex.getKind() != lltok::rsquare) {
- BasicBlock *DestBB;
- if (ParseTypeAndBasicBlock(DestBB, PFS))
- return true;
- DestList.push_back(DestBB);
-
- while (EatIfPresent(lltok::comma)) {
- if (ParseTypeAndBasicBlock(DestBB, PFS))
- return true;
- DestList.push_back(DestBB);
- }
- }
-
- if (ParseToken(lltok::rsquare, "expected ']' at end of block list"))
- return true;
-
- IndirectBrInst *IBI = IndirectBrInst::Create(Address, DestList.size());
- for (unsigned i = 0, e = DestList.size(); i != e; ++i)
- IBI->addDestination(DestList[i]);
- Inst = IBI;
- return false;
-}
-
-
-/// ParseInvoke
-/// ::= 'invoke' OptionalCallingConv OptionalAttrs Type Value ParamList
-/// OptionalAttrs 'to' TypeAndValue 'unwind' TypeAndValue
-bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
- LocTy CallLoc = Lex.getLoc();
- unsigned RetAttrs, FnAttrs;
- CallingConv::ID CC;
- PATypeHolder RetType(Type::getVoidTy(Context));
- LocTy RetTypeLoc;
- ValID CalleeID;
- SmallVector<ParamInfo, 16> ArgList;
-
- BasicBlock *NormalBB, *UnwindBB;
- if (ParseOptionalCallingConv(CC) ||
- ParseOptionalAttrs(RetAttrs, 1) ||
- ParseType(RetType, RetTypeLoc, true /*void allowed*/) ||
- ParseValID(CalleeID) ||
- ParseParameterList(ArgList, PFS) ||
- ParseOptionalAttrs(FnAttrs, 2) ||
- ParseToken(lltok::kw_to, "expected 'to' in invoke") ||
- ParseTypeAndBasicBlock(NormalBB, PFS) ||
- ParseToken(lltok::kw_unwind, "expected 'unwind' in invoke") ||
- ParseTypeAndBasicBlock(UnwindBB, PFS))
- return true;
-
- // If RetType is a non-function pointer type, then this is the short syntax
- // for the call, which means that RetType is just the return type. Infer the
- // rest of the function argument types from the arguments that are present.
- const PointerType *PFTy = 0;
- const FunctionType *Ty = 0;
- if (!(PFTy = dyn_cast<PointerType>(RetType)) ||
- !(Ty = dyn_cast<FunctionType>(PFTy->getElementType()))) {
- // Pull out the types of all of the arguments...
- std::vector<const Type*> ParamTypes;
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
- ParamTypes.push_back(ArgList[i].V->getType());
-
- if (!FunctionType::isValidReturnType(RetType))
- return Error(RetTypeLoc, "Invalid result type for LLVM function");
-
- Ty = FunctionType::get(RetType, ParamTypes, false);
- PFTy = PointerType::getUnqual(Ty);
- }
-
- // Look up the callee.
- Value *Callee;
- if (ConvertValIDToValue(PFTy, CalleeID, Callee, &PFS)) return true;
-
- // FIXME: In LLVM 3.0, stop accepting zext, sext and inreg as optional
- // function attributes.
- unsigned ObsoleteFuncAttrs = Attribute::ZExt|Attribute::SExt|Attribute::InReg;
- if (FnAttrs & ObsoleteFuncAttrs) {
- RetAttrs |= FnAttrs & ObsoleteFuncAttrs;
- FnAttrs &= ~ObsoleteFuncAttrs;
- }
-
- // Set up the Attributes for the function.
- SmallVector<AttributeWithIndex, 8> Attrs;
- if (RetAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttrs));
-
- SmallVector<Value*, 8> Args;
-
- // Loop through FunctionType's arguments and ensure they are specified
- // correctly. Also, gather any parameter attributes.
- FunctionType::param_iterator I = Ty->param_begin();
- FunctionType::param_iterator E = Ty->param_end();
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- const Type *ExpectedTy = 0;
- if (I != E) {
- ExpectedTy = *I++;
- } else if (!Ty->isVarArg()) {
- return Error(ArgList[i].Loc, "too many arguments specified");
- }
-
- if (ExpectedTy && ExpectedTy != ArgList[i].V->getType())
- return Error(ArgList[i].Loc, "argument is not of expected type '" +
- ExpectedTy->getDescription() + "'");
- Args.push_back(ArgList[i].V);
- if (ArgList[i].Attrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs));
- }
-
- if (I != E)
- return Error(CallLoc, "not enough parameters specified for call");
-
- if (FnAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs));
-
- // Finish off the Attributes and check them
- AttrListPtr PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
-
- InvokeInst *II = InvokeInst::Create(Callee, NormalBB, UnwindBB,
- Args.begin(), Args.end());
- II->setCallingConv(CC);
- II->setAttributes(PAL);
- Inst = II;
- return false;
-}
-
-
-
-//===----------------------------------------------------------------------===//
-// Binary Operators.
-//===----------------------------------------------------------------------===//
-
-/// ParseArithmetic
-/// ::= ArithmeticOps TypeAndValue ',' Value
-///
-/// If OperandType is 0, then any FP or integer operand is allowed. If it is 1,
-/// then any integer operand is allowed, if it is 2, any fp operand is allowed.
-bool LLParser::ParseArithmetic(Instruction *&Inst, PerFunctionState &PFS,
- unsigned Opc, unsigned OperandType) {
- LocTy Loc; Value *LHS, *RHS;
- if (ParseTypeAndValue(LHS, Loc, PFS) ||
- ParseToken(lltok::comma, "expected ',' in arithmetic operation") ||
- ParseValue(LHS->getType(), RHS, PFS))
- return true;
-
- bool Valid;
- switch (OperandType) {
- default: llvm_unreachable("Unknown operand type!");
- case 0: // int or FP.
- Valid = LHS->getType()->isIntOrIntVectorTy() ||
- LHS->getType()->isFPOrFPVectorTy();
- break;
- case 1: Valid = LHS->getType()->isIntOrIntVectorTy(); break;
- case 2: Valid = LHS->getType()->isFPOrFPVectorTy(); break;
- }
-
- if (!Valid)
- return Error(Loc, "invalid operand type for instruction");
-
- Inst = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
- return false;
-}
-
-/// ParseLogical
-/// ::= ArithmeticOps TypeAndValue ',' Value {
-bool LLParser::ParseLogical(Instruction *&Inst, PerFunctionState &PFS,
- unsigned Opc) {
- LocTy Loc; Value *LHS, *RHS;
- if (ParseTypeAndValue(LHS, Loc, PFS) ||
- ParseToken(lltok::comma, "expected ',' in logical operation") ||
- ParseValue(LHS->getType(), RHS, PFS))
- return true;
-
- if (!LHS->getType()->isIntOrIntVectorTy())
- return Error(Loc,"instruction requires integer or integer vector operands");
-
- Inst = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
- return false;
-}
-
-
-/// ParseCompare
-/// ::= 'icmp' IPredicates TypeAndValue ',' Value
-/// ::= 'fcmp' FPredicates TypeAndValue ',' Value
-bool LLParser::ParseCompare(Instruction *&Inst, PerFunctionState &PFS,
- unsigned Opc) {
- // Parse the integer/fp comparison predicate.
- LocTy Loc;
- unsigned Pred;
- Value *LHS, *RHS;
- if (ParseCmpPredicate(Pred, Opc) ||
- ParseTypeAndValue(LHS, Loc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after compare value") ||
- ParseValue(LHS->getType(), RHS, PFS))
- return true;
-
- if (Opc == Instruction::FCmp) {
- if (!LHS->getType()->isFPOrFPVectorTy())
- return Error(Loc, "fcmp requires floating point operands");
- Inst = new FCmpInst(CmpInst::Predicate(Pred), LHS, RHS);
- } else {
- assert(Opc == Instruction::ICmp && "Unknown opcode for CmpInst!");
- if (!LHS->getType()->isIntOrIntVectorTy() &&
- !LHS->getType()->isPointerTy())
- return Error(Loc, "icmp requires integer operands");
- Inst = new ICmpInst(CmpInst::Predicate(Pred), LHS, RHS);
- }
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Other Instructions.
-//===----------------------------------------------------------------------===//
-
-
-/// ParseCast
-/// ::= CastOpc TypeAndValue 'to' Type
-bool LLParser::ParseCast(Instruction *&Inst, PerFunctionState &PFS,
- unsigned Opc) {
- LocTy Loc; Value *Op;
- PATypeHolder DestTy(Type::getVoidTy(Context));
- if (ParseTypeAndValue(Op, Loc, PFS) ||
- ParseToken(lltok::kw_to, "expected 'to' after cast value") ||
- ParseType(DestTy))
- return true;
-
- if (!CastInst::castIsValid((Instruction::CastOps)Opc, Op, DestTy)) {
- CastInst::castIsValid((Instruction::CastOps)Opc, Op, DestTy);
- return Error(Loc, "invalid cast opcode for cast from '" +
- Op->getType()->getDescription() + "' to '" +
- DestTy->getDescription() + "'");
- }
- Inst = CastInst::Create((Instruction::CastOps)Opc, Op, DestTy);
- return false;
-}
-
-/// ParseSelect
-/// ::= 'select' TypeAndValue ',' TypeAndValue ',' TypeAndValue
-bool LLParser::ParseSelect(Instruction *&Inst, PerFunctionState &PFS) {
- LocTy Loc;
- Value *Op0, *Op1, *Op2;
- if (ParseTypeAndValue(Op0, Loc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after select condition") ||
- ParseTypeAndValue(Op1, PFS) ||
- ParseToken(lltok::comma, "expected ',' after select value") ||
- ParseTypeAndValue(Op2, PFS))
- return true;
-
- if (const char *Reason = SelectInst::areInvalidOperands(Op0, Op1, Op2))
- return Error(Loc, Reason);
-
- Inst = SelectInst::Create(Op0, Op1, Op2);
- return false;
-}
-
-/// ParseVA_Arg
-/// ::= 'va_arg' TypeAndValue ',' Type
-bool LLParser::ParseVA_Arg(Instruction *&Inst, PerFunctionState &PFS) {
- Value *Op;
- PATypeHolder EltTy(Type::getVoidTy(Context));
- LocTy TypeLoc;
- if (ParseTypeAndValue(Op, PFS) ||
- ParseToken(lltok::comma, "expected ',' after vaarg operand") ||
- ParseType(EltTy, TypeLoc))
- return true;
-
- if (!EltTy->isFirstClassType())
- return Error(TypeLoc, "va_arg requires operand with first class type");
-
- Inst = new VAArgInst(Op, EltTy);
- return false;
-}
-
-/// ParseExtractElement
-/// ::= 'extractelement' TypeAndValue ',' TypeAndValue
-bool LLParser::ParseExtractElement(Instruction *&Inst, PerFunctionState &PFS) {
- LocTy Loc;
- Value *Op0, *Op1;
- if (ParseTypeAndValue(Op0, Loc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after extract value") ||
- ParseTypeAndValue(Op1, PFS))
- return true;
-
- if (!ExtractElementInst::isValidOperands(Op0, Op1))
- return Error(Loc, "invalid extractelement operands");
-
- Inst = ExtractElementInst::Create(Op0, Op1);
- return false;
-}
-
-/// ParseInsertElement
-/// ::= 'insertelement' TypeAndValue ',' TypeAndValue ',' TypeAndValue
-bool LLParser::ParseInsertElement(Instruction *&Inst, PerFunctionState &PFS) {
- LocTy Loc;
- Value *Op0, *Op1, *Op2;
- if (ParseTypeAndValue(Op0, Loc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after insertelement value") ||
- ParseTypeAndValue(Op1, PFS) ||
- ParseToken(lltok::comma, "expected ',' after insertelement value") ||
- ParseTypeAndValue(Op2, PFS))
- return true;
-
- if (!InsertElementInst::isValidOperands(Op0, Op1, Op2))
- return Error(Loc, "invalid insertelement operands");
-
- Inst = InsertElementInst::Create(Op0, Op1, Op2);
- return false;
-}
-
-/// ParseShuffleVector
-/// ::= 'shufflevector' TypeAndValue ',' TypeAndValue ',' TypeAndValue
-bool LLParser::ParseShuffleVector(Instruction *&Inst, PerFunctionState &PFS) {
- LocTy Loc;
- Value *Op0, *Op1, *Op2;
- if (ParseTypeAndValue(Op0, Loc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after shuffle mask") ||
- ParseTypeAndValue(Op1, PFS) ||
- ParseToken(lltok::comma, "expected ',' after shuffle value") ||
- ParseTypeAndValue(Op2, PFS))
- return true;
-
- if (!ShuffleVectorInst::isValidOperands(Op0, Op1, Op2))
- return Error(Loc, "invalid extractelement operands");
-
- Inst = new ShuffleVectorInst(Op0, Op1, Op2);
- return false;
-}
-
-/// ParsePHI
-/// ::= 'phi' Type '[' Value ',' Value ']' (',' '[' Value ',' Value ']')*
-int LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) {
- PATypeHolder Ty(Type::getVoidTy(Context));
- Value *Op0, *Op1;
- LocTy TypeLoc = Lex.getLoc();
-
- if (ParseType(Ty) ||
- ParseToken(lltok::lsquare, "expected '[' in phi value list") ||
- ParseValue(Ty, Op0, PFS) ||
- ParseToken(lltok::comma, "expected ',' after insertelement value") ||
- ParseValue(Type::getLabelTy(Context), Op1, PFS) ||
- ParseToken(lltok::rsquare, "expected ']' in phi value list"))
- return true;
-
- bool AteExtraComma = false;
- SmallVector<std::pair<Value*, BasicBlock*>, 16> PHIVals;
- while (1) {
- PHIVals.push_back(std::make_pair(Op0, cast<BasicBlock>(Op1)));
-
- if (!EatIfPresent(lltok::comma))
- break;
-
- if (Lex.getKind() == lltok::MetadataVar) {
- AteExtraComma = true;
- break;
- }
-
- if (ParseToken(lltok::lsquare, "expected '[' in phi value list") ||
- ParseValue(Ty, Op0, PFS) ||
- ParseToken(lltok::comma, "expected ',' after insertelement value") ||
- ParseValue(Type::getLabelTy(Context), Op1, PFS) ||
- ParseToken(lltok::rsquare, "expected ']' in phi value list"))
- return true;
- }
-
- if (!Ty->isFirstClassType())
- return Error(TypeLoc, "phi node must have first class type");
-
- PHINode *PN = PHINode::Create(Ty);
- PN->reserveOperandSpace(PHIVals.size());
- for (unsigned i = 0, e = PHIVals.size(); i != e; ++i)
- PN->addIncoming(PHIVals[i].first, PHIVals[i].second);
- Inst = PN;
- return AteExtraComma ? InstExtraComma : InstNormal;
-}
-
-/// ParseCall
-/// ::= 'tail'? 'call' OptionalCallingConv OptionalAttrs Type Value
-/// ParameterList OptionalAttrs
-bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
- bool isTail) {
- unsigned RetAttrs, FnAttrs;
- CallingConv::ID CC;
- PATypeHolder RetType(Type::getVoidTy(Context));
- LocTy RetTypeLoc;
- ValID CalleeID;
- SmallVector<ParamInfo, 16> ArgList;
- LocTy CallLoc = Lex.getLoc();
-
- if ((isTail && ParseToken(lltok::kw_call, "expected 'tail call'")) ||
- ParseOptionalCallingConv(CC) ||
- ParseOptionalAttrs(RetAttrs, 1) ||
- ParseType(RetType, RetTypeLoc, true /*void allowed*/) ||
- ParseValID(CalleeID) ||
- ParseParameterList(ArgList, PFS) ||
- ParseOptionalAttrs(FnAttrs, 2))
- return true;
-
- // If RetType is a non-function pointer type, then this is the short syntax
- // for the call, which means that RetType is just the return type. Infer the
- // rest of the function argument types from the arguments that are present.
- const PointerType *PFTy = 0;
- const FunctionType *Ty = 0;
- if (!(PFTy = dyn_cast<PointerType>(RetType)) ||
- !(Ty = dyn_cast<FunctionType>(PFTy->getElementType()))) {
- // Pull out the types of all of the arguments...
- std::vector<const Type*> ParamTypes;
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
- ParamTypes.push_back(ArgList[i].V->getType());
-
- if (!FunctionType::isValidReturnType(RetType))
- return Error(RetTypeLoc, "Invalid result type for LLVM function");
-
- Ty = FunctionType::get(RetType, ParamTypes, false);
- PFTy = PointerType::getUnqual(Ty);
- }
-
- // Look up the callee.
- Value *Callee;
- if (ConvertValIDToValue(PFTy, CalleeID, Callee, &PFS)) return true;
-
- // FIXME: In LLVM 3.0, stop accepting zext, sext and inreg as optional
- // function attributes.
- unsigned ObsoleteFuncAttrs = Attribute::ZExt|Attribute::SExt|Attribute::InReg;
- if (FnAttrs & ObsoleteFuncAttrs) {
- RetAttrs |= FnAttrs & ObsoleteFuncAttrs;
- FnAttrs &= ~ObsoleteFuncAttrs;
- }
-
- // Set up the Attributes for the function.
- SmallVector<AttributeWithIndex, 8> Attrs;
- if (RetAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttrs));
-
- SmallVector<Value*, 8> Args;
-
- // Loop through FunctionType's arguments and ensure they are specified
- // correctly. Also, gather any parameter attributes.
- FunctionType::param_iterator I = Ty->param_begin();
- FunctionType::param_iterator E = Ty->param_end();
- for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
- const Type *ExpectedTy = 0;
- if (I != E) {
- ExpectedTy = *I++;
- } else if (!Ty->isVarArg()) {
- return Error(ArgList[i].Loc, "too many arguments specified");
- }
-
- if (ExpectedTy && ExpectedTy != ArgList[i].V->getType())
- return Error(ArgList[i].Loc, "argument is not of expected type '" +
- ExpectedTy->getDescription() + "'");
- Args.push_back(ArgList[i].V);
- if (ArgList[i].Attrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs));
- }
-
- if (I != E)
- return Error(CallLoc, "not enough parameters specified for call");
-
- if (FnAttrs != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs));
-
- // Finish off the Attributes and check them
- AttrListPtr PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
-
- CallInst *CI = CallInst::Create(Callee, Args.begin(), Args.end());
- CI->setTailCall(isTail);
- CI->setCallingConv(CC);
- CI->setAttributes(PAL);
- Inst = CI;
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Memory Instructions.
-//===----------------------------------------------------------------------===//
-
-/// ParseAlloc
-/// ::= 'malloc' Type (',' TypeAndValue)? (',' OptionalInfo)?
-/// ::= 'alloca' Type (',' TypeAndValue)? (',' OptionalInfo)?
-int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS,
- BasicBlock* BB, bool isAlloca) {
- PATypeHolder Ty(Type::getVoidTy(Context));
- Value *Size = 0;
- LocTy SizeLoc;
- unsigned Alignment = 0;
- if (ParseType(Ty)) return true;
-
- bool AteExtraComma = false;
- if (EatIfPresent(lltok::comma)) {
- if (Lex.getKind() == lltok::kw_align) {
- if (ParseOptionalAlignment(Alignment)) return true;
- } else if (Lex.getKind() == lltok::MetadataVar) {
- AteExtraComma = true;
- } else {
- if (ParseTypeAndValue(Size, SizeLoc, PFS) ||
- ParseOptionalCommaAlign(Alignment, AteExtraComma))
- return true;
- }
- }
-
- if (Size && !Size->getType()->isIntegerTy(32))
- return Error(SizeLoc, "element count must be i32");
-
- if (isAlloca) {
- Inst = new AllocaInst(Ty, Size, Alignment);
- return AteExtraComma ? InstExtraComma : InstNormal;
- }
-
- // Autoupgrade old malloc instruction to malloc call.
- // FIXME: Remove in LLVM 3.0.
- const Type *IntPtrTy = Type::getInt32Ty(Context);
- Constant *AllocSize = ConstantExpr::getSizeOf(Ty);
- AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, IntPtrTy);
- if (!MallocF)
- // Prototype malloc as "void *(int32)".
- // This function is renamed as "malloc" in ValidateEndOfModule().
- MallocF = cast<Function>(
- M->getOrInsertFunction("", Type::getInt8PtrTy(Context), IntPtrTy, NULL));
- Inst = CallInst::CreateMalloc(BB, IntPtrTy, Ty, AllocSize, Size, MallocF);
-return AteExtraComma ? InstExtraComma : InstNormal;
-}
-
-/// ParseFree
-/// ::= 'free' TypeAndValue
-bool LLParser::ParseFree(Instruction *&Inst, PerFunctionState &PFS,
- BasicBlock* BB) {
- Value *Val; LocTy Loc;
- if (ParseTypeAndValue(Val, Loc, PFS)) return true;
- if (!Val->getType()->isPointerTy())
- return Error(Loc, "operand to free must be a pointer");
- Inst = CallInst::CreateFree(Val, BB);
- return false;
-}
-
-/// ParseLoad
-/// ::= 'volatile'? 'load' TypeAndValue (',' OptionalInfo)?
-int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
- Value *Val; LocTy Loc;
- unsigned Alignment = 0;
- bool AteExtraComma = false;
- if (ParseTypeAndValue(Val, Loc, PFS) ||
- ParseOptionalCommaAlign(Alignment, AteExtraComma))
- return true;
-
- if (!Val->getType()->isPointerTy() ||
- !cast<PointerType>(Val->getType())->getElementType()->isFirstClassType())
- return Error(Loc, "load operand must be a pointer to a first class type");
-
- Inst = new LoadInst(Val, "", isVolatile, Alignment);
- return AteExtraComma ? InstExtraComma : InstNormal;
-}
-
-/// ParseStore
-/// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
-int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
- bool isVolatile) {
- Value *Val, *Ptr; LocTy Loc, PtrLoc;
- unsigned Alignment = 0;
- bool AteExtraComma = false;
- if (ParseTypeAndValue(Val, Loc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after store operand") ||
- ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
- ParseOptionalCommaAlign(Alignment, AteExtraComma))
- return true;
-
- if (!Ptr->getType()->isPointerTy())
- return Error(PtrLoc, "store operand must be a pointer");
- if (!Val->getType()->isFirstClassType())
- return Error(Loc, "store operand must be a first class value");
- if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType())
- return Error(Loc, "stored value and pointer type do not match");
-
- Inst = new StoreInst(Val, Ptr, isVolatile, Alignment);
- return AteExtraComma ? InstExtraComma : InstNormal;
-}
-
-/// ParseGetResult
-/// ::= 'getresult' TypeAndValue ',' i32
-/// FIXME: Remove support for getresult in LLVM 3.0
-bool LLParser::ParseGetResult(Instruction *&Inst, PerFunctionState &PFS) {
- Value *Val; LocTy ValLoc, EltLoc;
- unsigned Element;
- if (ParseTypeAndValue(Val, ValLoc, PFS) ||
- ParseToken(lltok::comma, "expected ',' after getresult operand") ||
- ParseUInt32(Element, EltLoc))
- return true;
-
- if (!Val->getType()->isStructTy() && !Val->getType()->isArrayTy())
- return Error(ValLoc, "getresult inst requires an aggregate operand");
- if (!ExtractValueInst::getIndexedType(Val->getType(), Element))
- return Error(EltLoc, "invalid getresult index for value");
- Inst = ExtractValueInst::Create(Val, Element);
- return false;
-}
-
-/// ParseGetElementPtr
-/// ::= 'getelementptr' 'inbounds'? TypeAndValue (',' TypeAndValue)*
-int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
- Value *Ptr, *Val; LocTy Loc, EltLoc;
-
- bool InBounds = EatIfPresent(lltok::kw_inbounds);
-
- if (ParseTypeAndValue(Ptr, Loc, PFS)) return true;
-
- if (!Ptr->getType()->isPointerTy())
- return Error(Loc, "base of getelementptr must be a pointer");
-
- SmallVector<Value*, 16> Indices;
- bool AteExtraComma = false;
- while (EatIfPresent(lltok::comma)) {
- if (Lex.getKind() == lltok::MetadataVar) {
- AteExtraComma = true;
- break;
- }
- if (ParseTypeAndValue(Val, EltLoc, PFS)) return true;
- if (!Val->getType()->isIntegerTy())
- return Error(EltLoc, "getelementptr index must be an integer");
- Indices.push_back(Val);
- }
-
- if (!GetElementPtrInst::getIndexedType(Ptr->getType(),
- Indices.begin(), Indices.end()))
- return Error(Loc, "invalid getelementptr indices");
- Inst = GetElementPtrInst::Create(Ptr, Indices.begin(), Indices.end());
- if (InBounds)
- cast<GetElementPtrInst>(Inst)->setIsInBounds(true);
- return AteExtraComma ? InstExtraComma : InstNormal;
-}
-
-/// ParseExtractValue
-/// ::= 'extractvalue' TypeAndValue (',' uint32)+
-int LLParser::ParseExtractValue(Instruction *&Inst, PerFunctionState &PFS) {
- Value *Val; LocTy Loc;
- SmallVector<unsigned, 4> Indices;
- bool AteExtraComma;
- if (ParseTypeAndValue(Val, Loc, PFS) ||
- ParseIndexList(Indices, AteExtraComma))
- return true;
-
- if (!Val->getType()->isAggregateType())
- return Error(Loc, "extractvalue operand must be aggregate type");
-
- if (!ExtractValueInst::getIndexedType(Val->getType(), Indices.begin(),
- Indices.end()))
- return Error(Loc, "invalid indices for extractvalue");
- Inst = ExtractValueInst::Create(Val, Indices.begin(), Indices.end());
- return AteExtraComma ? InstExtraComma : InstNormal;
-}
-
-/// ParseInsertValue
-/// ::= 'insertvalue' TypeAndValue ',' TypeAndValue (',' uint32)+
-int LLParser::ParseInsertValue(Instruction *&Inst, PerFunctionState &PFS) {
- Value *Val0, *Val1; LocTy Loc0, Loc1;
- SmallVector<unsigned, 4> Indices;
- bool AteExtraComma;
- if (ParseTypeAndValue(Val0, Loc0, PFS) ||
- ParseToken(lltok::comma, "expected comma after insertvalue operand") ||
- ParseTypeAndValue(Val1, Loc1, PFS) ||
- ParseIndexList(Indices, AteExtraComma))
- return true;
-
- if (!Val0->getType()->isAggregateType())
- return Error(Loc0, "insertvalue operand must be aggregate type");
-
- if (!ExtractValueInst::getIndexedType(Val0->getType(), Indices.begin(),
- Indices.end()))
- return Error(Loc0, "invalid indices for insertvalue");
- Inst = InsertValueInst::Create(Val0, Val1, Indices.begin(), Indices.end());
- return AteExtraComma ? InstExtraComma : InstNormal;
-}
-
-//===----------------------------------------------------------------------===//
-// Embedded metadata.
-//===----------------------------------------------------------------------===//
-
-/// ParseMDNodeVector
-/// ::= Element (',' Element)*
-/// Element
-/// ::= 'null' | TypeAndValue
-bool LLParser::ParseMDNodeVector(SmallVectorImpl<Value*> &Elts,
- PerFunctionState *PFS) {
- do {
- // Null is a special case since it is typeless.
- if (EatIfPresent(lltok::kw_null)) {
- Elts.push_back(0);
- continue;
- }
-
- Value *V = 0;
- PATypeHolder Ty(Type::getVoidTy(Context));
- ValID ID;
- if (ParseType(Ty) || ParseValID(ID, PFS) ||
- ConvertValIDToValue(Ty, ID, V, PFS))
- return true;
-
- Elts.push_back(V);
- } while (EatIfPresent(lltok::comma));
-
- return false;
-}
diff --git a/libclamav/c++/llvm/lib/AsmParser/LLParser.h b/libclamav/c++/llvm/lib/AsmParser/LLParser.h
deleted file mode 100644
index 9abe404..0000000
--- a/libclamav/c++/llvm/lib/AsmParser/LLParser.h
+++ /dev/null
@@ -1,360 +0,0 @@
-//===-- LLParser.h - Parser Class -------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the parser class for .ll files.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ASMPARSER_LLPARSER_H
-#define LLVM_ASMPARSER_LLPARSER_H
-
-#include "LLLexer.h"
-#include "llvm/Module.h"
-#include "llvm/Type.h"
-#include "llvm/Support/ValueHandle.h"
-#include <map>
-
-namespace llvm {
- class Module;
- class OpaqueType;
- class Function;
- class Value;
- class BasicBlock;
- class Instruction;
- class Constant;
- class GlobalValue;
- class MDString;
- class MDNode;
- class UnionType;
-
- /// ValID - Represents a reference of a definition of some sort with no type.
- /// There are several cases where we have to parse the value but where the
- /// type can depend on later context. This may either be a numeric reference
- /// or a symbolic (%var) reference. This is just a discriminated union.
- struct ValID {
- enum {
- t_LocalID, t_GlobalID, // ID in UIntVal.
- t_LocalName, t_GlobalName, // Name in StrVal.
- t_APSInt, t_APFloat, // Value in APSIntVal/APFloatVal.
- t_Null, t_Undef, t_Zero, // No value.
- t_EmptyArray, // No value: []
- t_Constant, // Value in ConstantVal.
- t_InlineAsm, // Value in StrVal/StrVal2/UIntVal.
- t_MDNode, // Value in MDNodeVal.
- t_MDString // Value in MDStringVal.
- } Kind;
-
- LLLexer::LocTy Loc;
- unsigned UIntVal;
- std::string StrVal, StrVal2;
- APSInt APSIntVal;
- APFloat APFloatVal;
- Constant *ConstantVal;
- MDNode *MDNodeVal;
- MDString *MDStringVal;
- ValID() : APFloatVal(0.0) {}
-
- bool operator<(const ValID &RHS) const {
- if (Kind == t_LocalID || Kind == t_GlobalID)
- return UIntVal < RHS.UIntVal;
- assert((Kind == t_LocalName || Kind == t_GlobalName) &&
- "Ordering not defined for this ValID kind yet");
- return StrVal < RHS.StrVal;
- }
- };
-
- class LLParser {
- public:
- typedef LLLexer::LocTy LocTy;
- private:
- LLVMContext& Context;
- LLLexer Lex;
- Module *M;
-
- // Type resolution handling data structures.
- std::map<std::string, std::pair<PATypeHolder, LocTy> > ForwardRefTypes;
- std::map<unsigned, std::pair<PATypeHolder, LocTy> > ForwardRefTypeIDs;
- std::vector<PATypeHolder> NumberedTypes;
- std::vector<TrackingVH<MDNode> > NumberedMetadata;
- std::map<unsigned, std::pair<TrackingVH<MDNode>, LocTy> > ForwardRefMDNodes;
- struct UpRefRecord {
- /// Loc - This is the location of the upref.
- LocTy Loc;
-
- /// NestingLevel - The number of nesting levels that need to be popped
- /// before this type is resolved.
- unsigned NestingLevel;
-
- /// LastContainedTy - This is the type at the current binding level for
- /// the type. Every time we reduce the nesting level, this gets updated.
- const Type *LastContainedTy;
-
- /// UpRefTy - This is the actual opaque type that the upreference is
- /// represented with.
- OpaqueType *UpRefTy;
-
- UpRefRecord(LocTy L, unsigned NL, OpaqueType *URTy)
- : Loc(L), NestingLevel(NL), LastContainedTy((Type*)URTy),
- UpRefTy(URTy) {}
- };
- std::vector<UpRefRecord> UpRefs;
-
- // Global Value reference information.
- std::map<std::string, std::pair<GlobalValue*, LocTy> > ForwardRefVals;
- std::map<unsigned, std::pair<GlobalValue*, LocTy> > ForwardRefValIDs;
- std::vector<GlobalValue*> NumberedVals;
-
- // References to blockaddress. The key is the function ValID, the value is
- // a list of references to blocks in that function.
- std::map<ValID, std::vector<std::pair<ValID, GlobalValue*> > >
- ForwardRefBlockAddresses;
-
- Function *MallocF;
- public:
- LLParser(MemoryBuffer *F, SourceMgr &SM, SMDiagnostic &Err, Module *m) :
- Context(m->getContext()), Lex(F, SM, Err, m->getContext()),
- M(m), MallocF(NULL) {}
- bool Run();
-
- LLVMContext& getContext() { return Context; }
-
- private:
-
- bool Error(LocTy L, const std::string &Msg) const {
- return Lex.Error(L, Msg);
- }
- bool TokError(const std::string &Msg) const {
- return Error(Lex.getLoc(), Msg);
- }
-
- /// GetGlobalVal - Get a value with the specified name or ID, creating a
- /// forward reference record if needed. This can return null if the value
- /// exists but does not have the right type.
- GlobalValue *GetGlobalVal(const std::string &N, const Type *Ty, LocTy Loc);
- GlobalValue *GetGlobalVal(unsigned ID, const Type *Ty, LocTy Loc);
-
- // Helper Routines.
- bool ParseToken(lltok::Kind T, const char *ErrMsg);
- bool EatIfPresent(lltok::Kind T) {
- if (Lex.getKind() != T) return false;
- Lex.Lex();
- return true;
- }
- bool ParseOptionalToken(lltok::Kind T, bool &Present) {
- if (Lex.getKind() != T) {
- Present = false;
- } else {
- Lex.Lex();
- Present = true;
- }
- return false;
- }
- bool ParseStringConstant(std::string &Result);
- bool ParseUInt32(unsigned &Val);
- bool ParseUInt32(unsigned &Val, LocTy &Loc) {
- Loc = Lex.getLoc();
- return ParseUInt32(Val);
- }
- bool ParseOptionalAddrSpace(unsigned &AddrSpace);
- bool ParseOptionalAttrs(unsigned &Attrs, unsigned AttrKind);
- bool ParseOptionalLinkage(unsigned &Linkage, bool &HasLinkage);
- bool ParseOptionalLinkage(unsigned &Linkage) {
- bool HasLinkage; return ParseOptionalLinkage(Linkage, HasLinkage);
- }
- bool ParseOptionalVisibility(unsigned &Visibility);
- bool ParseOptionalCallingConv(CallingConv::ID &CC);
- bool ParseOptionalAlignment(unsigned &Alignment);
- bool ParseOptionalStackAlignment(unsigned &Alignment);
- bool ParseInstructionMetadata(SmallVectorImpl<std::pair<unsigned,
- MDNode *> > &);
- bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma);
- bool ParseIndexList(SmallVectorImpl<unsigned> &Indices,bool &AteExtraComma);
- bool ParseIndexList(SmallVectorImpl<unsigned> &Indices) {
- bool AteExtraComma;
- if (ParseIndexList(Indices, AteExtraComma)) return true;
- if (AteExtraComma)
- return TokError("expected index");
- return false;
- }
-
- // Top-Level Entities
- bool ParseTopLevelEntities();
- bool ValidateEndOfModule();
- bool ParseTargetDefinition();
- bool ParseDepLibs();
- bool ParseModuleAsm();
- bool ParseUnnamedType();
- bool ParseNamedType();
- bool ParseDeclare();
- bool ParseDefine();
-
- bool ParseGlobalType(bool &IsConstant);
- bool ParseUnnamedGlobal();
- bool ParseNamedGlobal();
- bool ParseGlobal(const std::string &Name, LocTy Loc, unsigned Linkage,
- bool HasLinkage, unsigned Visibility);
- bool ParseAlias(const std::string &Name, LocTy Loc, unsigned Visibility);
- bool ParseStandaloneMetadata();
- bool ParseNamedMetadata();
- bool ParseMDString(MDString *&Result);
- bool ParseMDNodeID(MDNode *&Result);
-
- // Type Parsing.
- bool ParseType(PATypeHolder &Result, bool AllowVoid = false);
- bool ParseType(PATypeHolder &Result, LocTy &Loc, bool AllowVoid = false) {
- Loc = Lex.getLoc();
- return ParseType(Result, AllowVoid);
- }
- bool ParseTypeRec(PATypeHolder &H);
- bool ParseStructType(PATypeHolder &H, bool Packed);
- bool ParseUnionType(PATypeHolder &H);
- bool ParseArrayVectorType(PATypeHolder &H, bool isVector);
- bool ParseFunctionType(PATypeHolder &Result);
- PATypeHolder HandleUpRefs(const Type *Ty);
-
- // Function Semantic Analysis.
- class PerFunctionState {
- LLParser &P;
- Function &F;
- std::map<std::string, std::pair<Value*, LocTy> > ForwardRefVals;
- std::map<unsigned, std::pair<Value*, LocTy> > ForwardRefValIDs;
- std::vector<Value*> NumberedVals;
-
- /// FunctionNumber - If this is an unnamed function, this is the slot
- /// number of it, otherwise it is -1.
- int FunctionNumber;
- public:
- PerFunctionState(LLParser &p, Function &f, int FunctionNumber);
- ~PerFunctionState();
-
- Function &getFunction() const { return F; }
-
- bool FinishFunction();
-
- /// GetVal - Get a value with the specified name or ID, creating a
- /// forward reference record if needed. This can return null if the value
- /// exists but does not have the right type.
- Value *GetVal(const std::string &Name, const Type *Ty, LocTy Loc);
- Value *GetVal(unsigned ID, const Type *Ty, LocTy Loc);
-
- /// SetInstName - After an instruction is parsed and inserted into its
- /// basic block, this installs its name.
- bool SetInstName(int NameID, const std::string &NameStr, LocTy NameLoc,
- Instruction *Inst);
-
- /// GetBB - Get a basic block with the specified name or ID, creating a
- /// forward reference record if needed. This can return null if the value
- /// is not a BasicBlock.
- BasicBlock *GetBB(const std::string &Name, LocTy Loc);
- BasicBlock *GetBB(unsigned ID, LocTy Loc);
-
- /// DefineBB - Define the specified basic block, which is either named or
- /// unnamed. If there is an error, this returns null otherwise it returns
- /// the block being defined.
- BasicBlock *DefineBB(const std::string &Name, LocTy Loc);
- };
-
- bool ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V,
- PerFunctionState *PFS);
-
- bool ParseValue(const Type *Ty, Value *&V, PerFunctionState &PFS);
- bool ParseValue(const Type *Ty, Value *&V, LocTy &Loc,
- PerFunctionState &PFS) {
- Loc = Lex.getLoc();
- return ParseValue(Ty, V, PFS);
- }
-
- bool ParseTypeAndValue(Value *&V, PerFunctionState &PFS);
- bool ParseTypeAndValue(Value *&V, LocTy &Loc, PerFunctionState &PFS) {
- Loc = Lex.getLoc();
- return ParseTypeAndValue(V, PFS);
- }
- bool ParseTypeAndBasicBlock(BasicBlock *&BB, LocTy &Loc,
- PerFunctionState &PFS);
- bool ParseTypeAndBasicBlock(BasicBlock *&BB, PerFunctionState &PFS) {
- LocTy Loc;
- return ParseTypeAndBasicBlock(BB, Loc, PFS);
- }
-
- bool ParseUnionValue(const UnionType* utype, ValID &ID, Value *&V);
-
- struct ParamInfo {
- LocTy Loc;
- Value *V;
- unsigned Attrs;
- ParamInfo(LocTy loc, Value *v, unsigned attrs)
- : Loc(loc), V(v), Attrs(attrs) {}
- };
- bool ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
- PerFunctionState &PFS);
-
- // Constant Parsing.
- bool ParseValID(ValID &ID, PerFunctionState *PFS = NULL);
- bool ParseGlobalValue(const Type *Ty, Constant *&V);
- bool ParseGlobalTypeAndValue(Constant *&V);
- bool ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts);
- bool ParseMDNodeVector(SmallVectorImpl<Value*> &, PerFunctionState *PFS);
-
- // Function Parsing.
- struct ArgInfo {
- LocTy Loc;
- PATypeHolder Type;
- unsigned Attrs;
- std::string Name;
- ArgInfo(LocTy L, PATypeHolder Ty, unsigned Attr, const std::string &N)
- : Loc(L), Type(Ty), Attrs(Attr), Name(N) {}
- };
- bool ParseArgumentList(std::vector<ArgInfo> &ArgList,
- bool &isVarArg, bool inType);
- bool ParseFunctionHeader(Function *&Fn, bool isDefine);
- bool ParseFunctionBody(Function &Fn);
- bool ParseBasicBlock(PerFunctionState &PFS);
-
- // Instruction Parsing. Each instruction parsing routine can return with a
- // normal result, an error result, or return having eaten an extra comma.
- enum InstResult { InstNormal = 0, InstError = 1, InstExtraComma = 2 };
- int ParseInstruction(Instruction *&Inst, BasicBlock *BB,
- PerFunctionState &PFS);
- bool ParseCmpPredicate(unsigned &Pred, unsigned Opc);
-
- int ParseRet(Instruction *&Inst, BasicBlock *BB, PerFunctionState &PFS);
- bool ParseBr(Instruction *&Inst, PerFunctionState &PFS);
- bool ParseSwitch(Instruction *&Inst, PerFunctionState &PFS);
- bool ParseIndirectBr(Instruction *&Inst, PerFunctionState &PFS);
- bool ParseInvoke(Instruction *&Inst, PerFunctionState &PFS);
-
- bool ParseArithmetic(Instruction *&I, PerFunctionState &PFS, unsigned Opc,
- unsigned OperandType);
- bool ParseLogical(Instruction *&I, PerFunctionState &PFS, unsigned Opc);
- bool ParseCompare(Instruction *&I, PerFunctionState &PFS, unsigned Opc);
- bool ParseCast(Instruction *&I, PerFunctionState &PFS, unsigned Opc);
- bool ParseSelect(Instruction *&I, PerFunctionState &PFS);
- bool ParseVA_Arg(Instruction *&I, PerFunctionState &PFS);
- bool ParseExtractElement(Instruction *&I, PerFunctionState &PFS);
- bool ParseInsertElement(Instruction *&I, PerFunctionState &PFS);
- bool ParseShuffleVector(Instruction *&I, PerFunctionState &PFS);
- int ParsePHI(Instruction *&I, PerFunctionState &PFS);
- bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
- int ParseAlloc(Instruction *&I, PerFunctionState &PFS,
- BasicBlock *BB = 0, bool isAlloca = true);
- bool ParseFree(Instruction *&I, PerFunctionState &PFS, BasicBlock *BB);
- int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
- int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
- bool ParseGetResult(Instruction *&I, PerFunctionState &PFS);
- int ParseGetElementPtr(Instruction *&I, PerFunctionState &PFS);
- int ParseExtractValue(Instruction *&I, PerFunctionState &PFS);
- int ParseInsertValue(Instruction *&I, PerFunctionState &PFS);
-
- bool ResolveForwardRefBlockAddresses(Function *TheFn,
- std::vector<std::pair<ValID, GlobalValue*> > &Refs,
- PerFunctionState *PFS);
- };
-} // End llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/AsmParser/LLToken.h b/libclamav/c++/llvm/lib/AsmParser/LLToken.h
deleted file mode 100644
index 3ac9169..0000000
--- a/libclamav/c++/llvm/lib/AsmParser/LLToken.h
+++ /dev/null
@@ -1,145 +0,0 @@
-//===- LLToken.h - Token Codes for LLVM Assembly Files ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the enums for the .ll lexer.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LIBS_ASMPARSER_LLTOKEN_H
-#define LIBS_ASMPARSER_LLTOKEN_H
-
-namespace llvm {
-namespace lltok {
- enum Kind {
- // Markers
- Eof, Error,
-
- // Tokens with no info.
- dotdotdot, // ...
- equal, comma, // = ,
- star, // *
- lsquare, rsquare, // [ ]
- lbrace, rbrace, // { }
- less, greater, // < >
- lparen, rparen, // ( )
- backslash, // \ (not /)
- exclaim, // !
-
- kw_x,
- kw_begin, kw_end,
- kw_true, kw_false,
- kw_declare, kw_define,
- kw_global, kw_constant,
-
- kw_private, kw_linker_private, kw_internal, kw_linkonce, kw_linkonce_odr,
- kw_weak, kw_weak_odr, kw_appending, kw_dllimport, kw_dllexport, kw_common,
- kw_available_externally,
- kw_default, kw_hidden, kw_protected,
- kw_extern_weak,
- kw_external, kw_thread_local,
- kw_zeroinitializer,
- kw_undef, kw_null,
- kw_to,
- kw_tail,
- kw_target,
- kw_triple,
- kw_deplibs,
- kw_datalayout,
- kw_volatile,
- kw_nuw,
- kw_nsw,
- kw_exact,
- kw_inbounds,
- kw_align,
- kw_addrspace,
- kw_section,
- kw_alias,
- kw_module,
- kw_asm,
- kw_sideeffect,
- kw_alignstack,
- kw_gc,
- kw_c,
-
- kw_cc, kw_ccc, kw_fastcc, kw_coldcc,
- kw_x86_stdcallcc, kw_x86_fastcallcc,
- kw_arm_apcscc, kw_arm_aapcscc, kw_arm_aapcs_vfpcc,
- kw_msp430_intrcc,
-
- kw_signext,
- kw_zeroext,
- kw_inreg,
- kw_sret,
- kw_nounwind,
- kw_noreturn,
- kw_noalias,
- kw_nocapture,
- kw_byval,
- kw_nest,
- kw_readnone,
- kw_readonly,
-
- kw_inlinehint,
- kw_noinline,
- kw_alwaysinline,
- kw_optsize,
- kw_ssp,
- kw_sspreq,
- kw_noredzone,
- kw_noimplicitfloat,
- kw_naked,
-
- kw_type,
- kw_opaque,
- kw_union,
-
- kw_eq, kw_ne, kw_slt, kw_sgt, kw_sle, kw_sge, kw_ult, kw_ugt, kw_ule,
- kw_uge, kw_oeq, kw_one, kw_olt, kw_ogt, kw_ole, kw_oge, kw_ord, kw_uno,
- kw_ueq, kw_une,
-
- // Instruction Opcodes (Opcode in UIntVal).
- kw_add, kw_fadd, kw_sub, kw_fsub, kw_mul, kw_fmul,
- kw_udiv, kw_sdiv, kw_fdiv,
- kw_urem, kw_srem, kw_frem, kw_shl, kw_lshr, kw_ashr,
- kw_and, kw_or, kw_xor, kw_icmp, kw_fcmp,
-
- kw_phi, kw_call,
- kw_trunc, kw_zext, kw_sext, kw_fptrunc, kw_fpext, kw_uitofp, kw_sitofp,
- kw_fptoui, kw_fptosi, kw_inttoptr, kw_ptrtoint, kw_bitcast,
- kw_select, kw_va_arg,
-
- kw_ret, kw_br, kw_switch, kw_indirectbr, kw_invoke, kw_unwind,
- kw_unreachable,
-
- kw_malloc, kw_alloca, kw_free, kw_load, kw_store, kw_getelementptr,
-
- kw_extractelement, kw_insertelement, kw_shufflevector, kw_getresult,
- kw_extractvalue, kw_insertvalue, kw_blockaddress,
-
- // Unsigned Valued tokens (UIntVal).
- GlobalID, // @42
- LocalVarID, // %42
-
- // String valued tokens (StrVal).
- LabelStr, // foo:
- GlobalVar, // @foo @"foo"
- LocalVar, // %foo %"foo"
- MetadataVar, // !foo
- StringConstant, // "foo"
-
- // Type valued tokens (TyVal).
- Type,
-
- APFloat, // APFloatVal
- APSInt // APSInt
- };
-} // end namespace lltok
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/AsmParser/Makefile b/libclamav/c++/llvm/lib/AsmParser/Makefile
deleted file mode 100644
index 995bb0e..0000000
--- a/libclamav/c++/llvm/lib/AsmParser/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-##===- lib/AsmParser/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-LIBRARYNAME := LLVMAsmParser
-BUILD_ARCHIVE = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/AsmParser/Parser.cpp b/libclamav/c++/llvm/lib/AsmParser/Parser.cpp
deleted file mode 100644
index aac4027..0000000
--- a/libclamav/c++/llvm/lib/AsmParser/Parser.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-//===- Parser.cpp - Main dispatch module for the Parser library -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This library implements the functionality defined in llvm/Assembly/Parser.h
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Assembly/Parser.h"
-#include "LLParser.h"
-#include "llvm/Module.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cstring>
-using namespace llvm;
-
-Module *llvm::ParseAssembly(MemoryBuffer *F,
- Module *M,
- SMDiagnostic &Err,
- LLVMContext &Context) {
- SourceMgr SM;
- SM.AddNewSourceBuffer(F, SMLoc());
-
- // If we are parsing into an existing module, do it.
- if (M)
- return LLParser(F, SM, Err, M).Run() ? 0 : M;
-
- // Otherwise create a new module.
- OwningPtr<Module> M2(new Module(F->getBufferIdentifier(), Context));
- if (LLParser(F, SM, Err, M2.get()).Run())
- return 0;
- return M2.take();
-}
-
-Module *llvm::ParseAssemblyFile(const std::string &Filename, SMDiagnostic &Err,
- LLVMContext &Context) {
- std::string ErrorStr;
- MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrorStr);
- if (F == 0) {
- Err = SMDiagnostic("", -1, -1,
- "Could not open input file '" + Filename + "': " +
- ErrorStr, "");
- return 0;
- }
-
- return ParseAssembly(F, 0, Err, Context);
-}
-
-Module *llvm::ParseAssemblyString(const char *AsmString, Module *M,
- SMDiagnostic &Err, LLVMContext &Context) {
- MemoryBuffer *F =
- MemoryBuffer::getMemBuffer(AsmString, AsmString+strlen(AsmString),
- "<string>");
-
- return ParseAssembly(F, M, Err, Context);
-}
diff --git a/libclamav/c++/llvm/lib/Bitcode/Makefile b/libclamav/c++/llvm/lib/Bitcode/Makefile
deleted file mode 100644
index 2d6b5ad..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-##===- lib/Bitcode/Makefile --------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-PARALLEL_DIRS = Reader Writer
-
-include $(LEVEL)/Makefile.common
-
diff --git a/libclamav/c++/llvm/lib/Bitcode/Reader/BitReader.cpp b/libclamav/c++/llvm/lib/Bitcode/Reader/BitReader.cpp
deleted file mode 100644
index 15844c0..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Reader/BitReader.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-//===-- BitReader.cpp -----------------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm-c/BitReader.h"
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include <string>
-#include <cstring>
-
-using namespace llvm;
-
-/* Builds a module from the bitcode in the specified memory buffer, returning a
- reference to the module via the OutModule parameter. Returns 0 on success.
- Optionally returns a human-readable error message via OutMessage. */
-LLVMBool LLVMParseBitcode(LLVMMemoryBufferRef MemBuf,
- LLVMModuleRef *OutModule, char **OutMessage) {
- return LLVMParseBitcodeInContext(wrap(&getGlobalContext()), MemBuf, OutModule,
- OutMessage);
-}
-
-LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef,
- LLVMMemoryBufferRef MemBuf,
- LLVMModuleRef *OutModule,
- char **OutMessage) {
- std::string Message;
-
- *OutModule = wrap(ParseBitcodeFile(unwrap(MemBuf), *unwrap(ContextRef),
- &Message));
- if (!*OutModule) {
- if (OutMessage)
- *OutMessage = strdup(Message.c_str());
- return 1;
- }
-
- return 0;
-}
-
-/* Reads a module from the specified path, returning via the OutModule parameter
- a module provider which performs lazy deserialization. Returns 0 on success.
- Optionally returns a human-readable error message via OutMessage. */
-LLVMBool LLVMGetBitcodeModuleInContext(LLVMContextRef ContextRef,
- LLVMMemoryBufferRef MemBuf,
- LLVMModuleRef *OutM,
- char **OutMessage) {
- std::string Message;
-
- *OutM = wrap(getLazyBitcodeModule(unwrap(MemBuf), *unwrap(ContextRef),
- &Message));
- if (!*OutM) {
- if (OutMessage)
- *OutMessage = strdup(Message.c_str());
- return 1;
- }
-
- return 0;
-
-}
-
-LLVMBool LLVMGetBitcodeModule(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM,
- char **OutMessage) {
- return LLVMGetBitcodeModuleInContext(LLVMGetGlobalContext(), MemBuf, OutM,
- OutMessage);
-}
-
-/* Deprecated: Use LLVMGetBitcodeModuleInContext instead. */
-LLVMBool LLVMGetBitcodeModuleProviderInContext(LLVMContextRef ContextRef,
- LLVMMemoryBufferRef MemBuf,
- LLVMModuleProviderRef *OutMP,
- char **OutMessage) {
- return LLVMGetBitcodeModuleInContext(ContextRef, MemBuf,
- reinterpret_cast<LLVMModuleRef*>(OutMP),
- OutMessage);
-}
-
-/* Deprecated: Use LLVMGetBitcodeModule instead. */
-LLVMBool LLVMGetBitcodeModuleProvider(LLVMMemoryBufferRef MemBuf,
- LLVMModuleProviderRef *OutMP,
- char **OutMessage) {
- return LLVMGetBitcodeModuleProviderInContext(LLVMGetGlobalContext(), MemBuf,
- OutMP, OutMessage);
-}
diff --git a/libclamav/c++/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/libclamav/c++/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
deleted file mode 100644
index a328837..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ /dev/null
@@ -1,2445 +0,0 @@
-//===- BitcodeReader.cpp - Internal BitcodeReader implementation ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This header defines the BitcodeReader class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "BitcodeReader.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/InlineAsm.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Module.h"
-#include "llvm/Operator.h"
-#include "llvm/AutoUpgrade.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/OperandTraits.h"
-using namespace llvm;
-
-void BitcodeReader::FreeState() {
- if (BufferOwned)
- delete Buffer;
- Buffer = 0;
- std::vector<PATypeHolder>().swap(TypeList);
- ValueList.clear();
- MDValueList.clear();
-
- std::vector<AttrListPtr>().swap(MAttributes);
- std::vector<BasicBlock*>().swap(FunctionBBs);
- std::vector<Function*>().swap(FunctionsWithBodies);
- DeferredFunctionInfo.clear();
-}
-
-//===----------------------------------------------------------------------===//
-// Helper functions to implement forward reference resolution, etc.
-//===----------------------------------------------------------------------===//
-
-/// ConvertToString - Convert a string from a record into an std::string, return
-/// true on failure.
-template<typename StrTy>
-static bool ConvertToString(SmallVector<uint64_t, 64> &Record, unsigned Idx,
- StrTy &Result) {
- if (Idx > Record.size())
- return true;
-
- for (unsigned i = Idx, e = Record.size(); i != e; ++i)
- Result += (char)Record[i];
- return false;
-}
-
-static GlobalValue::LinkageTypes GetDecodedLinkage(unsigned Val) {
- switch (Val) {
- default: // Map unknown/new linkages to external
- case 0: return GlobalValue::ExternalLinkage;
- case 1: return GlobalValue::WeakAnyLinkage;
- case 2: return GlobalValue::AppendingLinkage;
- case 3: return GlobalValue::InternalLinkage;
- case 4: return GlobalValue::LinkOnceAnyLinkage;
- case 5: return GlobalValue::DLLImportLinkage;
- case 6: return GlobalValue::DLLExportLinkage;
- case 7: return GlobalValue::ExternalWeakLinkage;
- case 8: return GlobalValue::CommonLinkage;
- case 9: return GlobalValue::PrivateLinkage;
- case 10: return GlobalValue::WeakODRLinkage;
- case 11: return GlobalValue::LinkOnceODRLinkage;
- case 12: return GlobalValue::AvailableExternallyLinkage;
- case 13: return GlobalValue::LinkerPrivateLinkage;
- }
-}
-
-static GlobalValue::VisibilityTypes GetDecodedVisibility(unsigned Val) {
- switch (Val) {
- default: // Map unknown visibilities to default.
- case 0: return GlobalValue::DefaultVisibility;
- case 1: return GlobalValue::HiddenVisibility;
- case 2: return GlobalValue::ProtectedVisibility;
- }
-}
-
-static int GetDecodedCastOpcode(unsigned Val) {
- switch (Val) {
- default: return -1;
- case bitc::CAST_TRUNC : return Instruction::Trunc;
- case bitc::CAST_ZEXT : return Instruction::ZExt;
- case bitc::CAST_SEXT : return Instruction::SExt;
- case bitc::CAST_FPTOUI : return Instruction::FPToUI;
- case bitc::CAST_FPTOSI : return Instruction::FPToSI;
- case bitc::CAST_UITOFP : return Instruction::UIToFP;
- case bitc::CAST_SITOFP : return Instruction::SIToFP;
- case bitc::CAST_FPTRUNC : return Instruction::FPTrunc;
- case bitc::CAST_FPEXT : return Instruction::FPExt;
- case bitc::CAST_PTRTOINT: return Instruction::PtrToInt;
- case bitc::CAST_INTTOPTR: return Instruction::IntToPtr;
- case bitc::CAST_BITCAST : return Instruction::BitCast;
- }
-}
-static int GetDecodedBinaryOpcode(unsigned Val, const Type *Ty) {
- switch (Val) {
- default: return -1;
- case bitc::BINOP_ADD:
- return Ty->isFPOrFPVectorTy() ? Instruction::FAdd : Instruction::Add;
- case bitc::BINOP_SUB:
- return Ty->isFPOrFPVectorTy() ? Instruction::FSub : Instruction::Sub;
- case bitc::BINOP_MUL:
- return Ty->isFPOrFPVectorTy() ? Instruction::FMul : Instruction::Mul;
- case bitc::BINOP_UDIV: return Instruction::UDiv;
- case bitc::BINOP_SDIV:
- return Ty->isFPOrFPVectorTy() ? Instruction::FDiv : Instruction::SDiv;
- case bitc::BINOP_UREM: return Instruction::URem;
- case bitc::BINOP_SREM:
- return Ty->isFPOrFPVectorTy() ? Instruction::FRem : Instruction::SRem;
- case bitc::BINOP_SHL: return Instruction::Shl;
- case bitc::BINOP_LSHR: return Instruction::LShr;
- case bitc::BINOP_ASHR: return Instruction::AShr;
- case bitc::BINOP_AND: return Instruction::And;
- case bitc::BINOP_OR: return Instruction::Or;
- case bitc::BINOP_XOR: return Instruction::Xor;
- }
-}
-
-namespace llvm {
-namespace {
- /// @brief A class for maintaining the slot number definition
- /// as a placeholder for the actual definition for forward constants defs.
- class ConstantPlaceHolder : public ConstantExpr {
- ConstantPlaceHolder(); // DO NOT IMPLEMENT
- void operator=(const ConstantPlaceHolder &); // DO NOT IMPLEMENT
- public:
- // allocate space for exactly one operand
- void *operator new(size_t s) {
- return User::operator new(s, 1);
- }
- explicit ConstantPlaceHolder(const Type *Ty, LLVMContext& Context)
- : ConstantExpr(Ty, Instruction::UserOp1, &Op<0>(), 1) {
- Op<0>() = UndefValue::get(Type::getInt32Ty(Context));
- }
-
- /// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
- static inline bool classof(const ConstantPlaceHolder *) { return true; }
- static bool classof(const Value *V) {
- return isa<ConstantExpr>(V) &&
- cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1;
- }
-
-
- /// Provide fast operand accessors
- //DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- };
-}
-
-// FIXME: can we inherit this from ConstantExpr?
-template <>
-struct OperandTraits<ConstantPlaceHolder> : public FixedNumOperandTraits<1> {
-};
-}
-
-
-void BitcodeReaderValueList::AssignValue(Value *V, unsigned Idx) {
- if (Idx == size()) {
- push_back(V);
- return;
- }
-
- if (Idx >= size())
- resize(Idx+1);
-
- WeakVH &OldV = ValuePtrs[Idx];
- if (OldV == 0) {
- OldV = V;
- return;
- }
-
- // Handle constants and non-constants (e.g. instrs) differently for
- // efficiency.
- if (Constant *PHC = dyn_cast<Constant>(&*OldV)) {
- ResolveConstants.push_back(std::make_pair(PHC, Idx));
- OldV = V;
- } else {
- // If there was a forward reference to this value, replace it.
- Value *PrevVal = OldV;
- OldV->replaceAllUsesWith(V);
- delete PrevVal;
- }
-}
-
-
-Constant *BitcodeReaderValueList::getConstantFwdRef(unsigned Idx,
- const Type *Ty) {
- if (Idx >= size())
- resize(Idx + 1);
-
- if (Value *V = ValuePtrs[Idx]) {
- assert(Ty == V->getType() && "Type mismatch in constant table!");
- return cast<Constant>(V);
- }
-
- // Create and return a placeholder, which will later be RAUW'd.
- Constant *C = new ConstantPlaceHolder(Ty, Context);
- ValuePtrs[Idx] = C;
- return C;
-}
-
-Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, const Type *Ty) {
- if (Idx >= size())
- resize(Idx + 1);
-
- if (Value *V = ValuePtrs[Idx]) {
- assert((Ty == 0 || Ty == V->getType()) && "Type mismatch in value table!");
- return V;
- }
-
- // No type specified, must be invalid reference.
- if (Ty == 0) return 0;
-
- // Create and return a placeholder, which will later be RAUW'd.
- Value *V = new Argument(Ty);
- ValuePtrs[Idx] = V;
- return V;
-}
-
-/// ResolveConstantForwardRefs - Once all constants are read, this method bulk
-/// resolves any forward references. The idea behind this is that we sometimes
-/// get constants (such as large arrays) which reference *many* forward ref
-/// constants. Replacing each of these causes a lot of thrashing when
-/// building/reuniquing the constant. Instead of doing this, we look at all the
-/// uses and rewrite all the place holders at once for any constant that uses
-/// a placeholder.
-void BitcodeReaderValueList::ResolveConstantForwardRefs() {
- // Sort the values by-pointer so that they are efficient to look up with a
- // binary search.
- std::sort(ResolveConstants.begin(), ResolveConstants.end());
-
- SmallVector<Constant*, 64> NewOps;
-
- while (!ResolveConstants.empty()) {
- Value *RealVal = operator[](ResolveConstants.back().second);
- Constant *Placeholder = ResolveConstants.back().first;
- ResolveConstants.pop_back();
-
- // Loop over all users of the placeholder, updating them to reference the
- // new value. If they reference more than one placeholder, update them all
- // at once.
- while (!Placeholder->use_empty()) {
- Value::use_iterator UI = Placeholder->use_begin();
-
- // If the using object isn't uniqued, just update the operands. This
- // handles instructions and initializers for global variables.
- if (!isa<Constant>(*UI) || isa<GlobalValue>(*UI)) {
- UI.getUse().set(RealVal);
- continue;
- }
-
- // Otherwise, we have a constant that uses the placeholder. Replace that
- // constant with a new constant that has *all* placeholder uses updated.
- Constant *UserC = cast<Constant>(*UI);
- for (User::op_iterator I = UserC->op_begin(), E = UserC->op_end();
- I != E; ++I) {
- Value *NewOp;
- if (!isa<ConstantPlaceHolder>(*I)) {
- // Not a placeholder reference.
- NewOp = *I;
- } else if (*I == Placeholder) {
- // Common case is that it just references this one placeholder.
- NewOp = RealVal;
- } else {
- // Otherwise, look up the placeholder in ResolveConstants.
- ResolveConstantsTy::iterator It =
- std::lower_bound(ResolveConstants.begin(), ResolveConstants.end(),
- std::pair<Constant*, unsigned>(cast<Constant>(*I),
- 0));
- assert(It != ResolveConstants.end() && It->first == *I);
- NewOp = operator[](It->second);
- }
-
- NewOps.push_back(cast<Constant>(NewOp));
- }
-
- // Make the new constant.
- Constant *NewC;
- if (ConstantArray *UserCA = dyn_cast<ConstantArray>(UserC)) {
- NewC = ConstantArray::get(UserCA->getType(), &NewOps[0],
- NewOps.size());
- } else if (ConstantStruct *UserCS = dyn_cast<ConstantStruct>(UserC)) {
- NewC = ConstantStruct::get(Context, &NewOps[0], NewOps.size(),
- UserCS->getType()->isPacked());
- } else if (isa<ConstantVector>(UserC)) {
- NewC = ConstantVector::get(&NewOps[0], NewOps.size());
- } else {
- assert(isa<ConstantExpr>(UserC) && "Must be a ConstantExpr.");
- NewC = cast<ConstantExpr>(UserC)->getWithOperands(&NewOps[0],
- NewOps.size());
- }
-
- UserC->replaceAllUsesWith(NewC);
- UserC->destroyConstant();
- NewOps.clear();
- }
-
- // Update all ValueHandles, they should be the only users at this point.
- Placeholder->replaceAllUsesWith(RealVal);
- delete Placeholder;
- }
-}
-
-void BitcodeReaderMDValueList::AssignValue(Value *V, unsigned Idx) {
- if (Idx == size()) {
- push_back(V);
- return;
- }
-
- if (Idx >= size())
- resize(Idx+1);
-
- WeakVH &OldV = MDValuePtrs[Idx];
- if (OldV == 0) {
- OldV = V;
- return;
- }
-
- // If there was a forward reference to this value, replace it.
- Value *PrevVal = OldV;
- OldV->replaceAllUsesWith(V);
- delete PrevVal;
- // Deleting PrevVal sets Idx value in MDValuePtrs to null. Set new
- // value for Idx.
- MDValuePtrs[Idx] = V;
-}
-
-Value *BitcodeReaderMDValueList::getValueFwdRef(unsigned Idx) {
- if (Idx >= size())
- resize(Idx + 1);
-
- if (Value *V = MDValuePtrs[Idx]) {
- assert(V->getType()->isMetadataTy() && "Type mismatch in value table!");
- return V;
- }
-
- // Create and return a placeholder, which will later be RAUW'd.
- Value *V = new Argument(Type::getMetadataTy(Context));
- MDValuePtrs[Idx] = V;
- return V;
-}
-
-const Type *BitcodeReader::getTypeByID(unsigned ID, bool isTypeTable) {
- // If the TypeID is in range, return it.
- if (ID < TypeList.size())
- return TypeList[ID].get();
- if (!isTypeTable) return 0;
-
- // The type table allows forward references. Push as many Opaque types as
- // needed to get up to ID.
- while (TypeList.size() <= ID)
- TypeList.push_back(OpaqueType::get(Context));
- return TypeList.back().get();
-}
-
-//===----------------------------------------------------------------------===//
-// Functions for parsing blocks from the bitcode file
-//===----------------------------------------------------------------------===//
-
-bool BitcodeReader::ParseAttributeBlock() {
- if (Stream.EnterSubBlock(bitc::PARAMATTR_BLOCK_ID))
- return Error("Malformed block record");
-
- if (!MAttributes.empty())
- return Error("Multiple PARAMATTR blocks found!");
-
- SmallVector<uint64_t, 64> Record;
-
- SmallVector<AttributeWithIndex, 8> Attrs;
-
- // Read all the records.
- while (1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (Stream.ReadBlockEnd())
- return Error("Error at end of PARAMATTR block");
- return false;
- }
-
- if (Code == bitc::ENTER_SUBBLOCK) {
- // No known subblocks, always skip them.
- Stream.ReadSubBlockID();
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- // Read a record.
- Record.clear();
- switch (Stream.ReadRecord(Code, Record)) {
- default: // Default behavior: ignore.
- break;
- case bitc::PARAMATTR_CODE_ENTRY: { // ENTRY: [paramidx0, attr0, ...]
- if (Record.size() & 1)
- return Error("Invalid ENTRY record");
-
- // FIXME : Remove this autoupgrade code in LLVM 3.0.
- // If Function attributes are using index 0 then transfer them
- // to index ~0. Index 0 is used for return value attributes but used to be
- // used for function attributes.
- Attributes RetAttribute = Attribute::None;
- Attributes FnAttribute = Attribute::None;
- for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
- // FIXME: remove in LLVM 3.0
- // The alignment is stored as a 16-bit raw value from bits 31--16.
- // We shift the bits above 31 down by 11 bits.
-
- unsigned Alignment = (Record[i+1] & (0xffffull << 16)) >> 16;
- if (Alignment && !isPowerOf2_32(Alignment))
- return Error("Alignment is not a power of two.");
-
- Attributes ReconstitutedAttr = Record[i+1] & 0xffff;
- if (Alignment)
- ReconstitutedAttr |= Attribute::constructAlignmentFromInt(Alignment);
- ReconstitutedAttr |= (Record[i+1] & (0xffffull << 32)) >> 11;
- Record[i+1] = ReconstitutedAttr;
-
- if (Record[i] == 0)
- RetAttribute = Record[i+1];
- else if (Record[i] == ~0U)
- FnAttribute = Record[i+1];
- }
-
- unsigned OldRetAttrs = (Attribute::NoUnwind|Attribute::NoReturn|
- Attribute::ReadOnly|Attribute::ReadNone);
-
- if (FnAttribute == Attribute::None && RetAttribute != Attribute::None &&
- (RetAttribute & OldRetAttrs) != 0) {
- if (FnAttribute == Attribute::None) { // add a slot so they get added.
- Record.push_back(~0U);
- Record.push_back(0);
- }
-
- FnAttribute |= RetAttribute & OldRetAttrs;
- RetAttribute &= ~OldRetAttrs;
- }
-
- for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
- if (Record[i] == 0) {
- if (RetAttribute != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RetAttribute));
- } else if (Record[i] == ~0U) {
- if (FnAttribute != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0U, FnAttribute));
- } else if (Record[i+1] != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(Record[i], Record[i+1]));
- }
-
- MAttributes.push_back(AttrListPtr::get(Attrs.begin(), Attrs.end()));
- Attrs.clear();
- break;
- }
- }
- }
-}
-
-
-bool BitcodeReader::ParseTypeTable() {
- if (Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID))
- return Error("Malformed block record");
-
- if (!TypeList.empty())
- return Error("Multiple TYPE_BLOCKs found!");
-
- SmallVector<uint64_t, 64> Record;
- unsigned NumRecords = 0;
-
- // Read all the records for this type table.
- while (1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (NumRecords != TypeList.size())
- return Error("Invalid type forward reference in TYPE_BLOCK");
- if (Stream.ReadBlockEnd())
- return Error("Error at end of type table block");
- return false;
- }
-
- if (Code == bitc::ENTER_SUBBLOCK) {
- // No known subblocks, always skip them.
- Stream.ReadSubBlockID();
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- // Read a record.
- Record.clear();
- const Type *ResultTy = 0;
- switch (Stream.ReadRecord(Code, Record)) {
- default: // Default behavior: unknown type.
- ResultTy = 0;
- break;
- case bitc::TYPE_CODE_NUMENTRY: // TYPE_CODE_NUMENTRY: [numentries]
- // TYPE_CODE_NUMENTRY contains a count of the number of types in the
- // type list. This allows us to reserve space.
- if (Record.size() < 1)
- return Error("Invalid TYPE_CODE_NUMENTRY record");
- TypeList.reserve(Record[0]);
- continue;
- case bitc::TYPE_CODE_VOID: // VOID
- ResultTy = Type::getVoidTy(Context);
- break;
- case bitc::TYPE_CODE_FLOAT: // FLOAT
- ResultTy = Type::getFloatTy(Context);
- break;
- case bitc::TYPE_CODE_DOUBLE: // DOUBLE
- ResultTy = Type::getDoubleTy(Context);
- break;
- case bitc::TYPE_CODE_X86_FP80: // X86_FP80
- ResultTy = Type::getX86_FP80Ty(Context);
- break;
- case bitc::TYPE_CODE_FP128: // FP128
- ResultTy = Type::getFP128Ty(Context);
- break;
- case bitc::TYPE_CODE_PPC_FP128: // PPC_FP128
- ResultTy = Type::getPPC_FP128Ty(Context);
- break;
- case bitc::TYPE_CODE_LABEL: // LABEL
- ResultTy = Type::getLabelTy(Context);
- break;
- case bitc::TYPE_CODE_OPAQUE: // OPAQUE
- ResultTy = 0;
- break;
- case bitc::TYPE_CODE_METADATA: // METADATA
- ResultTy = Type::getMetadataTy(Context);
- break;
- case bitc::TYPE_CODE_INTEGER: // INTEGER: [width]
- if (Record.size() < 1)
- return Error("Invalid Integer type record");
-
- ResultTy = IntegerType::get(Context, Record[0]);
- break;
- case bitc::TYPE_CODE_POINTER: { // POINTER: [pointee type] or
- // [pointee type, address space]
- if (Record.size() < 1)
- return Error("Invalid POINTER type record");
- unsigned AddressSpace = 0;
- if (Record.size() == 2)
- AddressSpace = Record[1];
- ResultTy = PointerType::get(getTypeByID(Record[0], true),
- AddressSpace);
- break;
- }
- case bitc::TYPE_CODE_FUNCTION: {
- // FIXME: attrid is dead, remove it in LLVM 3.0
- // FUNCTION: [vararg, attrid, retty, paramty x N]
- if (Record.size() < 3)
- return Error("Invalid FUNCTION type record");
- std::vector<const Type*> ArgTys;
- for (unsigned i = 3, e = Record.size(); i != e; ++i)
- ArgTys.push_back(getTypeByID(Record[i], true));
-
- ResultTy = FunctionType::get(getTypeByID(Record[2], true), ArgTys,
- Record[0]);
- break;
- }
- case bitc::TYPE_CODE_STRUCT: { // STRUCT: [ispacked, eltty x N]
- if (Record.size() < 1)
- return Error("Invalid STRUCT type record");
- std::vector<const Type*> EltTys;
- for (unsigned i = 1, e = Record.size(); i != e; ++i)
- EltTys.push_back(getTypeByID(Record[i], true));
- ResultTy = StructType::get(Context, EltTys, Record[0]);
- break;
- }
- case bitc::TYPE_CODE_UNION: { // UNION: [eltty x N]
- SmallVector<const Type*, 8> EltTys;
- for (unsigned i = 0, e = Record.size(); i != e; ++i)
- EltTys.push_back(getTypeByID(Record[i], true));
- ResultTy = UnionType::get(&EltTys[0], EltTys.size());
- break;
- }
- case bitc::TYPE_CODE_ARRAY: // ARRAY: [numelts, eltty]
- if (Record.size() < 2)
- return Error("Invalid ARRAY type record");
- ResultTy = ArrayType::get(getTypeByID(Record[1], true), Record[0]);
- break;
- case bitc::TYPE_CODE_VECTOR: // VECTOR: [numelts, eltty]
- if (Record.size() < 2)
- return Error("Invalid VECTOR type record");
- ResultTy = VectorType::get(getTypeByID(Record[1], true), Record[0]);
- break;
- }
-
- if (NumRecords == TypeList.size()) {
- // If this is a new type slot, just append it.
- TypeList.push_back(ResultTy ? ResultTy : OpaqueType::get(Context));
- ++NumRecords;
- } else if (ResultTy == 0) {
- // Otherwise, this was forward referenced, so an opaque type was created,
- // but the result type is actually just an opaque. Leave the one we
- // created previously.
- ++NumRecords;
- } else {
- // Otherwise, this was forward referenced, so an opaque type was created.
- // Resolve the opaque type to the real type now.
- assert(NumRecords < TypeList.size() && "Typelist imbalance");
- const OpaqueType *OldTy = cast<OpaqueType>(TypeList[NumRecords++].get());
-
- // Don't directly push the new type on the Tab. Instead we want to replace
- // the opaque type we previously inserted with the new concrete value. The
- // refinement from the abstract (opaque) type to the new type causes all
- // uses of the abstract type to use the concrete type (NewTy). This will
- // also cause the opaque type to be deleted.
- const_cast<OpaqueType*>(OldTy)->refineAbstractTypeTo(ResultTy);
-
- // This should have replaced the old opaque type with the new type in the
- // value table... or with a preexisting type that was already in the
- // system. Let's just make sure it did.
- assert(TypeList[NumRecords-1].get() != OldTy &&
- "refineAbstractType didn't work!");
- }
- }
-}
-
-
-bool BitcodeReader::ParseTypeSymbolTable() {
- if (Stream.EnterSubBlock(bitc::TYPE_SYMTAB_BLOCK_ID))
- return Error("Malformed block record");
-
- SmallVector<uint64_t, 64> Record;
-
- // Read all the records for this type table.
- std::string TypeName;
- while (1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (Stream.ReadBlockEnd())
- return Error("Error at end of type symbol table block");
- return false;
- }
-
- if (Code == bitc::ENTER_SUBBLOCK) {
- // No known subblocks, always skip them.
- Stream.ReadSubBlockID();
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- // Read a record.
- Record.clear();
- switch (Stream.ReadRecord(Code, Record)) {
- default: // Default behavior: unknown type.
- break;
- case bitc::TST_CODE_ENTRY: // TST_ENTRY: [typeid, namechar x N]
- if (ConvertToString(Record, 1, TypeName))
- return Error("Invalid TST_ENTRY record");
- unsigned TypeID = Record[0];
- if (TypeID >= TypeList.size())
- return Error("Invalid Type ID in TST_ENTRY record");
-
- TheModule->addTypeName(TypeName, TypeList[TypeID].get());
- TypeName.clear();
- break;
- }
- }
-}
-
-bool BitcodeReader::ParseValueSymbolTable() {
- if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
- return Error("Malformed block record");
-
- SmallVector<uint64_t, 64> Record;
-
- // Read all the records for this value table.
- SmallString<128> ValueName;
- while (1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (Stream.ReadBlockEnd())
- return Error("Error at end of value symbol table block");
- return false;
- }
- if (Code == bitc::ENTER_SUBBLOCK) {
- // No known subblocks, always skip them.
- Stream.ReadSubBlockID();
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- // Read a record.
- Record.clear();
- switch (Stream.ReadRecord(Code, Record)) {
- default: // Default behavior: unknown type.
- break;
- case bitc::VST_CODE_ENTRY: { // VST_ENTRY: [valueid, namechar x N]
- if (ConvertToString(Record, 1, ValueName))
- return Error("Invalid VST_ENTRY record");
- unsigned ValueID = Record[0];
- if (ValueID >= ValueList.size())
- return Error("Invalid Value ID in VST_ENTRY record");
- Value *V = ValueList[ValueID];
-
- V->setName(StringRef(ValueName.data(), ValueName.size()));
- ValueName.clear();
- break;
- }
- case bitc::VST_CODE_BBENTRY: {
- if (ConvertToString(Record, 1, ValueName))
- return Error("Invalid VST_BBENTRY record");
- BasicBlock *BB = getBasicBlock(Record[0]);
- if (BB == 0)
- return Error("Invalid BB ID in VST_BBENTRY record");
-
- BB->setName(StringRef(ValueName.data(), ValueName.size()));
- ValueName.clear();
- break;
- }
- }
- }
-}
-
-bool BitcodeReader::ParseMetadata() {
- unsigned NextMDValueNo = MDValueList.size();
-
- if (Stream.EnterSubBlock(bitc::METADATA_BLOCK_ID))
- return Error("Malformed block record");
-
- SmallVector<uint64_t, 64> Record;
-
- // Read all the records.
- while (1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (Stream.ReadBlockEnd())
- return Error("Error at end of PARAMATTR block");
- return false;
- }
-
- if (Code == bitc::ENTER_SUBBLOCK) {
- // No known subblocks, always skip them.
- Stream.ReadSubBlockID();
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- bool IsFunctionLocal = false;
- // Read a record.
- Record.clear();
- switch (Stream.ReadRecord(Code, Record)) {
- default: // Default behavior: ignore.
- break;
- case bitc::METADATA_NAME: {
- // Read named of the named metadata.
- unsigned NameLength = Record.size();
- SmallString<8> Name;
- Name.resize(NameLength);
- for (unsigned i = 0; i != NameLength; ++i)
- Name[i] = Record[i];
- Record.clear();
- Code = Stream.ReadCode();
-
- // METADATA_NAME is always followed by METADATA_NAMED_NODE.
- if (Stream.ReadRecord(Code, Record) != bitc::METADATA_NAMED_NODE)
- assert ( 0 && "Inavlid Named Metadata record");
-
- // Read named metadata elements.
- unsigned Size = Record.size();
- SmallVector<MDNode *, 8> Elts;
- for (unsigned i = 0; i != Size; ++i) {
- if (Record[i] == ~0U) {
- Elts.push_back(NULL);
- continue;
- }
- MDNode *MD = dyn_cast<MDNode>(MDValueList.getValueFwdRef(Record[i]));
- if (MD == 0)
- return Error("Malformed metadata record");
- Elts.push_back(MD);
- }
- Value *V = NamedMDNode::Create(Context, Name.str(), Elts.data(),
- Elts.size(), TheModule);
- MDValueList.AssignValue(V, NextMDValueNo++);
- break;
- }
- case bitc::METADATA_FN_NODE:
- IsFunctionLocal = true;
- // fall-through
- case bitc::METADATA_NODE: {
- if (Record.empty() || Record.size() % 2 == 1)
- return Error("Invalid METADATA_NODE record");
-
- unsigned Size = Record.size();
- SmallVector<Value*, 8> Elts;
- for (unsigned i = 0; i != Size; i += 2) {
- const Type *Ty = getTypeByID(Record[i], false);
- if (Ty->isMetadataTy())
- Elts.push_back(MDValueList.getValueFwdRef(Record[i+1]));
- else if (!Ty->isVoidTy())
- Elts.push_back(ValueList.getValueFwdRef(Record[i+1], Ty));
- else
- Elts.push_back(NULL);
- }
- Value *V = MDNode::getWhenValsUnresolved(Context, &Elts[0], Elts.size(),
- IsFunctionLocal);
- IsFunctionLocal = false;
- MDValueList.AssignValue(V, NextMDValueNo++);
- break;
- }
- case bitc::METADATA_STRING: {
- unsigned MDStringLength = Record.size();
- SmallString<8> String;
- String.resize(MDStringLength);
- for (unsigned i = 0; i != MDStringLength; ++i)
- String[i] = Record[i];
- Value *V = MDString::get(Context,
- StringRef(String.data(), String.size()));
- MDValueList.AssignValue(V, NextMDValueNo++);
- break;
- }
- case bitc::METADATA_KIND: {
- unsigned RecordLength = Record.size();
- if (Record.empty() || RecordLength < 2)
- return Error("Invalid METADATA_KIND record");
- SmallString<8> Name;
- Name.resize(RecordLength-1);
- unsigned Kind = Record[0];
- (void) Kind;
- for (unsigned i = 1; i != RecordLength; ++i)
- Name[i-1] = Record[i];
-
- unsigned NewKind = TheModule->getMDKindID(Name.str());
- assert(Kind == NewKind &&
- "FIXME: Unable to handle custom metadata mismatch!");(void)NewKind;
- break;
- }
- }
- }
-}
-
-/// DecodeSignRotatedValue - Decode a signed value stored with the sign bit in
-/// the LSB for dense VBR encoding.
-static uint64_t DecodeSignRotatedValue(uint64_t V) {
- if ((V & 1) == 0)
- return V >> 1;
- if (V != 1)
- return -(V >> 1);
- // There is no such thing as -0 with integers. "-0" really means MININT.
- return 1ULL << 63;
-}
-
-/// ResolveGlobalAndAliasInits - Resolve all of the initializers for global
-/// values and aliases that we can.
-bool BitcodeReader::ResolveGlobalAndAliasInits() {
- std::vector<std::pair<GlobalVariable*, unsigned> > GlobalInitWorklist;
- std::vector<std::pair<GlobalAlias*, unsigned> > AliasInitWorklist;
-
- GlobalInitWorklist.swap(GlobalInits);
- AliasInitWorklist.swap(AliasInits);
-
- while (!GlobalInitWorklist.empty()) {
- unsigned ValID = GlobalInitWorklist.back().second;
- if (ValID >= ValueList.size()) {
- // Not ready to resolve this yet, it requires something later in the file.
- GlobalInits.push_back(GlobalInitWorklist.back());
- } else {
- if (Constant *C = dyn_cast<Constant>(ValueList[ValID]))
- GlobalInitWorklist.back().first->setInitializer(C);
- else
- return Error("Global variable initializer is not a constant!");
- }
- GlobalInitWorklist.pop_back();
- }
-
- while (!AliasInitWorklist.empty()) {
- unsigned ValID = AliasInitWorklist.back().second;
- if (ValID >= ValueList.size()) {
- AliasInits.push_back(AliasInitWorklist.back());
- } else {
- if (Constant *C = dyn_cast<Constant>(ValueList[ValID]))
- AliasInitWorklist.back().first->setAliasee(C);
- else
- return Error("Alias initializer is not a constant!");
- }
- AliasInitWorklist.pop_back();
- }
- return false;
-}
-
-bool BitcodeReader::ParseConstants() {
- if (Stream.EnterSubBlock(bitc::CONSTANTS_BLOCK_ID))
- return Error("Malformed block record");
-
- SmallVector<uint64_t, 64> Record;
-
- // Read all the records for this value table.
- const Type *CurTy = Type::getInt32Ty(Context);
- unsigned NextCstNo = ValueList.size();
- while (1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK)
- break;
-
- if (Code == bitc::ENTER_SUBBLOCK) {
- // No known subblocks, always skip them.
- Stream.ReadSubBlockID();
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- // Read a record.
- Record.clear();
- Value *V = 0;
- unsigned BitCode = Stream.ReadRecord(Code, Record);
- switch (BitCode) {
- default: // Default behavior: unknown constant
- case bitc::CST_CODE_UNDEF: // UNDEF
- V = UndefValue::get(CurTy);
- break;
- case bitc::CST_CODE_SETTYPE: // SETTYPE: [typeid]
- if (Record.empty())
- return Error("Malformed CST_SETTYPE record");
- if (Record[0] >= TypeList.size())
- return Error("Invalid Type ID in CST_SETTYPE record");
- CurTy = TypeList[Record[0]];
- continue; // Skip the ValueList manipulation.
- case bitc::CST_CODE_NULL: // NULL
- V = Constant::getNullValue(CurTy);
- break;
- case bitc::CST_CODE_INTEGER: // INTEGER: [intval]
- if (!CurTy->isIntegerTy() || Record.empty())
- return Error("Invalid CST_INTEGER record");
- V = ConstantInt::get(CurTy, DecodeSignRotatedValue(Record[0]));
- break;
- case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval]
- if (!CurTy->isIntegerTy() || Record.empty())
- return Error("Invalid WIDE_INTEGER record");
-
- unsigned NumWords = Record.size();
- SmallVector<uint64_t, 8> Words;
- Words.resize(NumWords);
- for (unsigned i = 0; i != NumWords; ++i)
- Words[i] = DecodeSignRotatedValue(Record[i]);
- V = ConstantInt::get(Context,
- APInt(cast<IntegerType>(CurTy)->getBitWidth(),
- NumWords, &Words[0]));
- break;
- }
- case bitc::CST_CODE_FLOAT: { // FLOAT: [fpval]
- if (Record.empty())
- return Error("Invalid FLOAT record");
- if (CurTy->isFloatTy())
- V = ConstantFP::get(Context, APFloat(APInt(32, (uint32_t)Record[0])));
- else if (CurTy->isDoubleTy())
- V = ConstantFP::get(Context, APFloat(APInt(64, Record[0])));
- else if (CurTy->isX86_FP80Ty()) {
- // Bits are not stored the same way as a normal i80 APInt, compensate.
- uint64_t Rearrange[2];
- Rearrange[0] = (Record[1] & 0xffffLL) | (Record[0] << 16);
- Rearrange[1] = Record[0] >> 48;
- V = ConstantFP::get(Context, APFloat(APInt(80, 2, Rearrange)));
- } else if (CurTy->isFP128Ty())
- V = ConstantFP::get(Context, APFloat(APInt(128, 2, &Record[0]), true));
- else if (CurTy->isPPC_FP128Ty())
- V = ConstantFP::get(Context, APFloat(APInt(128, 2, &Record[0])));
- else
- V = UndefValue::get(CurTy);
- break;
- }
-
- case bitc::CST_CODE_AGGREGATE: {// AGGREGATE: [n x value number]
- if (Record.empty())
- return Error("Invalid CST_AGGREGATE record");
-
- unsigned Size = Record.size();
- std::vector<Constant*> Elts;
-
- if (const StructType *STy = dyn_cast<StructType>(CurTy)) {
- for (unsigned i = 0; i != Size; ++i)
- Elts.push_back(ValueList.getConstantFwdRef(Record[i],
- STy->getElementType(i)));
- V = ConstantStruct::get(STy, Elts);
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) {
- const Type *EltTy = ATy->getElementType();
- for (unsigned i = 0; i != Size; ++i)
- Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
- V = ConstantArray::get(ATy, Elts);
- } else if (const VectorType *VTy = dyn_cast<VectorType>(CurTy)) {
- const Type *EltTy = VTy->getElementType();
- for (unsigned i = 0; i != Size; ++i)
- Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
- V = ConstantVector::get(Elts);
- } else {
- V = UndefValue::get(CurTy);
- }
- break;
- }
- case bitc::CST_CODE_STRING: { // STRING: [values]
- if (Record.empty())
- return Error("Invalid CST_AGGREGATE record");
-
- const ArrayType *ATy = cast<ArrayType>(CurTy);
- const Type *EltTy = ATy->getElementType();
-
- unsigned Size = Record.size();
- std::vector<Constant*> Elts;
- for (unsigned i = 0; i != Size; ++i)
- Elts.push_back(ConstantInt::get(EltTy, Record[i]));
- V = ConstantArray::get(ATy, Elts);
- break;
- }
- case bitc::CST_CODE_CSTRING: { // CSTRING: [values]
- if (Record.empty())
- return Error("Invalid CST_AGGREGATE record");
-
- const ArrayType *ATy = cast<ArrayType>(CurTy);
- const Type *EltTy = ATy->getElementType();
-
- unsigned Size = Record.size();
- std::vector<Constant*> Elts;
- for (unsigned i = 0; i != Size; ++i)
- Elts.push_back(ConstantInt::get(EltTy, Record[i]));
- Elts.push_back(Constant::getNullValue(EltTy));
- V = ConstantArray::get(ATy, Elts);
- break;
- }
- case bitc::CST_CODE_CE_BINOP: { // CE_BINOP: [opcode, opval, opval]
- if (Record.size() < 3) return Error("Invalid CE_BINOP record");
- int Opc = GetDecodedBinaryOpcode(Record[0], CurTy);
- if (Opc < 0) {
- V = UndefValue::get(CurTy); // Unknown binop.
- } else {
- Constant *LHS = ValueList.getConstantFwdRef(Record[1], CurTy);
- Constant *RHS = ValueList.getConstantFwdRef(Record[2], CurTy);
- unsigned Flags = 0;
- if (Record.size() >= 4) {
- if (Opc == Instruction::Add ||
- Opc == Instruction::Sub ||
- Opc == Instruction::Mul) {
- if (Record[3] & (1 << bitc::OBO_NO_SIGNED_WRAP))
- Flags |= OverflowingBinaryOperator::NoSignedWrap;
- if (Record[3] & (1 << bitc::OBO_NO_UNSIGNED_WRAP))
- Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
- } else if (Opc == Instruction::SDiv) {
- if (Record[3] & (1 << bitc::SDIV_EXACT))
- Flags |= SDivOperator::IsExact;
- }
- }
- V = ConstantExpr::get(Opc, LHS, RHS, Flags);
- }
- break;
- }
- case bitc::CST_CODE_CE_CAST: { // CE_CAST: [opcode, opty, opval]
- if (Record.size() < 3) return Error("Invalid CE_CAST record");
- int Opc = GetDecodedCastOpcode(Record[0]);
- if (Opc < 0) {
- V = UndefValue::get(CurTy); // Unknown cast.
- } else {
- const Type *OpTy = getTypeByID(Record[1]);
- if (!OpTy) return Error("Invalid CE_CAST record");
- Constant *Op = ValueList.getConstantFwdRef(Record[2], OpTy);
- V = ConstantExpr::getCast(Opc, Op, CurTy);
- }
- break;
- }
- case bitc::CST_CODE_CE_INBOUNDS_GEP:
- case bitc::CST_CODE_CE_GEP: { // CE_GEP: [n x operands]
- if (Record.size() & 1) return Error("Invalid CE_GEP record");
- SmallVector<Constant*, 16> Elts;
- for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
- const Type *ElTy = getTypeByID(Record[i]);
- if (!ElTy) return Error("Invalid CE_GEP record");
- Elts.push_back(ValueList.getConstantFwdRef(Record[i+1], ElTy));
- }
- if (BitCode == bitc::CST_CODE_CE_INBOUNDS_GEP)
- V = ConstantExpr::getInBoundsGetElementPtr(Elts[0], &Elts[1],
- Elts.size()-1);
- else
- V = ConstantExpr::getGetElementPtr(Elts[0], &Elts[1],
- Elts.size()-1);
- break;
- }
- case bitc::CST_CODE_CE_SELECT: // CE_SELECT: [opval#, opval#, opval#]
- if (Record.size() < 3) return Error("Invalid CE_SELECT record");
- V = ConstantExpr::getSelect(ValueList.getConstantFwdRef(Record[0],
- Type::getInt1Ty(Context)),
- ValueList.getConstantFwdRef(Record[1],CurTy),
- ValueList.getConstantFwdRef(Record[2],CurTy));
- break;
- case bitc::CST_CODE_CE_EXTRACTELT: { // CE_EXTRACTELT: [opty, opval, opval]
- if (Record.size() < 3) return Error("Invalid CE_EXTRACTELT record");
- const VectorType *OpTy =
- dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
- if (OpTy == 0) return Error("Invalid CE_EXTRACTELT record");
- Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
- Constant *Op1 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
- V = ConstantExpr::getExtractElement(Op0, Op1);
- break;
- }
- case bitc::CST_CODE_CE_INSERTELT: { // CE_INSERTELT: [opval, opval, opval]
- const VectorType *OpTy = dyn_cast<VectorType>(CurTy);
- if (Record.size() < 3 || OpTy == 0)
- return Error("Invalid CE_INSERTELT record");
- Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
- Constant *Op1 = ValueList.getConstantFwdRef(Record[1],
- OpTy->getElementType());
- Constant *Op2 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
- V = ConstantExpr::getInsertElement(Op0, Op1, Op2);
- break;
- }
- case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval]
- const VectorType *OpTy = dyn_cast<VectorType>(CurTy);
- if (Record.size() < 3 || OpTy == 0)
- return Error("Invalid CE_SHUFFLEVEC record");
- Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
- Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy);
- const Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
- OpTy->getNumElements());
- Constant *Op2 = ValueList.getConstantFwdRef(Record[2], ShufTy);
- V = ConstantExpr::getShuffleVector(Op0, Op1, Op2);
- break;
- }
- case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval]
- const VectorType *RTy = dyn_cast<VectorType>(CurTy);
- const VectorType *OpTy = dyn_cast<VectorType>(getTypeByID(Record[0]));
- if (Record.size() < 4 || RTy == 0 || OpTy == 0)
- return Error("Invalid CE_SHUFVEC_EX record");
- Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
- Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
- const Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
- RTy->getNumElements());
- Constant *Op2 = ValueList.getConstantFwdRef(Record[3], ShufTy);
- V = ConstantExpr::getShuffleVector(Op0, Op1, Op2);
- break;
- }
- case bitc::CST_CODE_CE_CMP: { // CE_CMP: [opty, opval, opval, pred]
- if (Record.size() < 4) return Error("Invalid CE_CMP record");
- const Type *OpTy = getTypeByID(Record[0]);
- if (OpTy == 0) return Error("Invalid CE_CMP record");
- Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
- Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
-
- if (OpTy->isFPOrFPVectorTy())
- V = ConstantExpr::getFCmp(Record[3], Op0, Op1);
- else
- V = ConstantExpr::getICmp(Record[3], Op0, Op1);
- break;
- }
- case bitc::CST_CODE_INLINEASM: {
- if (Record.size() < 2) return Error("Invalid INLINEASM record");
- std::string AsmStr, ConstrStr;
- bool HasSideEffects = Record[0] & 1;
- bool IsAlignStack = Record[0] >> 1;
- unsigned AsmStrSize = Record[1];
- if (2+AsmStrSize >= Record.size())
- return Error("Invalid INLINEASM record");
- unsigned ConstStrSize = Record[2+AsmStrSize];
- if (3+AsmStrSize+ConstStrSize > Record.size())
- return Error("Invalid INLINEASM record");
-
- for (unsigned i = 0; i != AsmStrSize; ++i)
- AsmStr += (char)Record[2+i];
- for (unsigned i = 0; i != ConstStrSize; ++i)
- ConstrStr += (char)Record[3+AsmStrSize+i];
- const PointerType *PTy = cast<PointerType>(CurTy);
- V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()),
- AsmStr, ConstrStr, HasSideEffects, IsAlignStack);
- break;
- }
- case bitc::CST_CODE_BLOCKADDRESS:{
- if (Record.size() < 3) return Error("Invalid CE_BLOCKADDRESS record");
- const Type *FnTy = getTypeByID(Record[0]);
- if (FnTy == 0) return Error("Invalid CE_BLOCKADDRESS record");
- Function *Fn =
- dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy));
- if (Fn == 0) return Error("Invalid CE_BLOCKADDRESS record");
-
- GlobalVariable *FwdRef = new GlobalVariable(*Fn->getParent(),
- Type::getInt8Ty(Context),
- false, GlobalValue::InternalLinkage,
- 0, "");
- BlockAddrFwdRefs[Fn].push_back(std::make_pair(Record[2], FwdRef));
- V = FwdRef;
- break;
- }
- }
-
- ValueList.AssignValue(V, NextCstNo);
- ++NextCstNo;
- }
-
- if (NextCstNo != ValueList.size())
- return Error("Invalid constant reference!");
-
- if (Stream.ReadBlockEnd())
- return Error("Error at end of constants block");
-
- // Once all the constants have been read, go through and resolve forward
- // references.
- ValueList.ResolveConstantForwardRefs();
- return false;
-}
-
-/// RememberAndSkipFunctionBody - When we see the block for a function body,
-/// remember where it is and then skip it. This lets us lazily deserialize the
-/// functions.
-bool BitcodeReader::RememberAndSkipFunctionBody() {
- // Get the function we are talking about.
- if (FunctionsWithBodies.empty())
- return Error("Insufficient function protos");
-
- Function *Fn = FunctionsWithBodies.back();
- FunctionsWithBodies.pop_back();
-
- // Save the current stream state.
- uint64_t CurBit = Stream.GetCurrentBitNo();
- DeferredFunctionInfo[Fn] = CurBit;
-
- // Skip over the function block for now.
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- return false;
-}
-
-bool BitcodeReader::ParseModule() {
- if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
- return Error("Malformed block record");
-
- SmallVector<uint64_t, 64> Record;
- std::vector<std::string> SectionTable;
- std::vector<std::string> GCTable;
-
- // Read all the records for this module.
- while (!Stream.AtEndOfStream()) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (Stream.ReadBlockEnd())
- return Error("Error at end of module block");
-
- // Patch the initializers for globals and aliases up.
- ResolveGlobalAndAliasInits();
- if (!GlobalInits.empty() || !AliasInits.empty())
- return Error("Malformed global initializer set");
- if (!FunctionsWithBodies.empty())
- return Error("Too few function bodies found");
-
- // Look for intrinsic functions which need to be upgraded at some point
- for (Module::iterator FI = TheModule->begin(), FE = TheModule->end();
- FI != FE; ++FI) {
- Function* NewFn;
- if (UpgradeIntrinsicFunction(FI, NewFn))
- UpgradedIntrinsics.push_back(std::make_pair(FI, NewFn));
- }
-
- // Force deallocation of memory for these vectors to favor the client that
- // want lazy deserialization.
- std::vector<std::pair<GlobalVariable*, unsigned> >().swap(GlobalInits);
- std::vector<std::pair<GlobalAlias*, unsigned> >().swap(AliasInits);
- std::vector<Function*>().swap(FunctionsWithBodies);
- return false;
- }
-
- if (Code == bitc::ENTER_SUBBLOCK) {
- switch (Stream.ReadSubBlockID()) {
- default: // Skip unknown content.
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- break;
- case bitc::BLOCKINFO_BLOCK_ID:
- if (Stream.ReadBlockInfoBlock())
- return Error("Malformed BlockInfoBlock");
- break;
- case bitc::PARAMATTR_BLOCK_ID:
- if (ParseAttributeBlock())
- return true;
- break;
- case bitc::TYPE_BLOCK_ID:
- if (ParseTypeTable())
- return true;
- break;
- case bitc::TYPE_SYMTAB_BLOCK_ID:
- if (ParseTypeSymbolTable())
- return true;
- break;
- case bitc::VALUE_SYMTAB_BLOCK_ID:
- if (ParseValueSymbolTable())
- return true;
- break;
- case bitc::CONSTANTS_BLOCK_ID:
- if (ParseConstants() || ResolveGlobalAndAliasInits())
- return true;
- break;
- case bitc::METADATA_BLOCK_ID:
- if (ParseMetadata())
- return true;
- break;
- case bitc::FUNCTION_BLOCK_ID:
- // If this is the first function body we've seen, reverse the
- // FunctionsWithBodies list.
- if (!HasReversedFunctionsWithBodies) {
- std::reverse(FunctionsWithBodies.begin(), FunctionsWithBodies.end());
- HasReversedFunctionsWithBodies = true;
- }
-
- if (RememberAndSkipFunctionBody())
- return true;
- break;
- }
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- // Read a record.
- switch (Stream.ReadRecord(Code, Record)) {
- default: break; // Default behavior, ignore unknown content.
- case bitc::MODULE_CODE_VERSION: // VERSION: [version#]
- if (Record.size() < 1)
- return Error("Malformed MODULE_CODE_VERSION");
- // Only version #0 is supported so far.
- if (Record[0] != 0)
- return Error("Unknown bitstream version!");
- break;
- case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
- std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid MODULE_CODE_TRIPLE record");
- TheModule->setTargetTriple(S);
- break;
- }
- case bitc::MODULE_CODE_DATALAYOUT: { // DATALAYOUT: [strchr x N]
- std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid MODULE_CODE_DATALAYOUT record");
- TheModule->setDataLayout(S);
- break;
- }
- case bitc::MODULE_CODE_ASM: { // ASM: [strchr x N]
- std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid MODULE_CODE_ASM record");
- TheModule->setModuleInlineAsm(S);
- break;
- }
- case bitc::MODULE_CODE_DEPLIB: { // DEPLIB: [strchr x N]
- std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid MODULE_CODE_DEPLIB record");
- TheModule->addLibrary(S);
- break;
- }
- case bitc::MODULE_CODE_SECTIONNAME: { // SECTIONNAME: [strchr x N]
- std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid MODULE_CODE_SECTIONNAME record");
- SectionTable.push_back(S);
- break;
- }
- case bitc::MODULE_CODE_GCNAME: { // SECTIONNAME: [strchr x N]
- std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid MODULE_CODE_GCNAME record");
- GCTable.push_back(S);
- break;
- }
- // GLOBALVAR: [pointer type, isconst, initid,
- // linkage, alignment, section, visibility, threadlocal]
- case bitc::MODULE_CODE_GLOBALVAR: {
- if (Record.size() < 6)
- return Error("Invalid MODULE_CODE_GLOBALVAR record");
- const Type *Ty = getTypeByID(Record[0]);
- if (!Ty->isPointerTy())
- return Error("Global not a pointer type!");
- unsigned AddressSpace = cast<PointerType>(Ty)->getAddressSpace();
- Ty = cast<PointerType>(Ty)->getElementType();
-
- bool isConstant = Record[1];
- GlobalValue::LinkageTypes Linkage = GetDecodedLinkage(Record[3]);
- unsigned Alignment = (1 << Record[4]) >> 1;
- std::string Section;
- if (Record[5]) {
- if (Record[5]-1 >= SectionTable.size())
- return Error("Invalid section ID");
- Section = SectionTable[Record[5]-1];
- }
- GlobalValue::VisibilityTypes Visibility = GlobalValue::DefaultVisibility;
- if (Record.size() > 6)
- Visibility = GetDecodedVisibility(Record[6]);
- bool isThreadLocal = false;
- if (Record.size() > 7)
- isThreadLocal = Record[7];
-
- GlobalVariable *NewGV =
- new GlobalVariable(*TheModule, Ty, isConstant, Linkage, 0, "", 0,
- isThreadLocal, AddressSpace);
- NewGV->setAlignment(Alignment);
- if (!Section.empty())
- NewGV->setSection(Section);
- NewGV->setVisibility(Visibility);
- NewGV->setThreadLocal(isThreadLocal);
-
- ValueList.push_back(NewGV);
-
- // Remember which value to use for the global initializer.
- if (unsigned InitID = Record[2])
- GlobalInits.push_back(std::make_pair(NewGV, InitID-1));
- break;
- }
- // FUNCTION: [type, callingconv, isproto, linkage, paramattr,
- // alignment, section, visibility, gc]
- case bitc::MODULE_CODE_FUNCTION: {
- if (Record.size() < 8)
- return Error("Invalid MODULE_CODE_FUNCTION record");
- const Type *Ty = getTypeByID(Record[0]);
- if (!Ty->isPointerTy())
- return Error("Function not a pointer type!");
- const FunctionType *FTy =
- dyn_cast<FunctionType>(cast<PointerType>(Ty)->getElementType());
- if (!FTy)
- return Error("Function not a pointer to function type!");
-
- Function *Func = Function::Create(FTy, GlobalValue::ExternalLinkage,
- "", TheModule);
-
- Func->setCallingConv(static_cast<CallingConv::ID>(Record[1]));
- bool isProto = Record[2];
- Func->setLinkage(GetDecodedLinkage(Record[3]));
- Func->setAttributes(getAttributes(Record[4]));
-
- Func->setAlignment((1 << Record[5]) >> 1);
- if (Record[6]) {
- if (Record[6]-1 >= SectionTable.size())
- return Error("Invalid section ID");
- Func->setSection(SectionTable[Record[6]-1]);
- }
- Func->setVisibility(GetDecodedVisibility(Record[7]));
- if (Record.size() > 8 && Record[8]) {
- if (Record[8]-1 > GCTable.size())
- return Error("Invalid GC ID");
- Func->setGC(GCTable[Record[8]-1].c_str());
- }
- ValueList.push_back(Func);
-
- // If this is a function with a body, remember the prototype we are
- // creating now, so that we can match up the body with them later.
- if (!isProto)
- FunctionsWithBodies.push_back(Func);
- break;
- }
- // ALIAS: [alias type, aliasee val#, linkage]
- // ALIAS: [alias type, aliasee val#, linkage, visibility]
- case bitc::MODULE_CODE_ALIAS: {
- if (Record.size() < 3)
- return Error("Invalid MODULE_ALIAS record");
- const Type *Ty = getTypeByID(Record[0]);
- if (!Ty->isPointerTy())
- return Error("Function not a pointer type!");
-
- GlobalAlias *NewGA = new GlobalAlias(Ty, GetDecodedLinkage(Record[2]),
- "", 0, TheModule);
- // Old bitcode files didn't have visibility field.
- if (Record.size() > 3)
- NewGA->setVisibility(GetDecodedVisibility(Record[3]));
- ValueList.push_back(NewGA);
- AliasInits.push_back(std::make_pair(NewGA, Record[1]));
- break;
- }
- /// MODULE_CODE_PURGEVALS: [numvals]
- case bitc::MODULE_CODE_PURGEVALS:
- // Trim down the value list to the specified size.
- if (Record.size() < 1 || Record[0] > ValueList.size())
- return Error("Invalid MODULE_PURGEVALS record");
- ValueList.shrinkTo(Record[0]);
- break;
- }
- Record.clear();
- }
-
- return Error("Premature end of bitstream");
-}
-
-bool BitcodeReader::ParseBitcodeInto(Module *M) {
- TheModule = 0;
-
- if (Buffer->getBufferSize() & 3)
- return Error("Bitcode stream should be a multiple of 4 bytes in length");
-
- unsigned char *BufPtr = (unsigned char *)Buffer->getBufferStart();
- unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
-
- // If we have a wrapper header, parse it and ignore the non-bc file contents.
- // The magic number is 0x0B17C0DE stored in little endian.
- if (isBitcodeWrapper(BufPtr, BufEnd))
- if (SkipBitcodeWrapperHeader(BufPtr, BufEnd))
- return Error("Invalid bitcode wrapper header");
-
- StreamFile.init(BufPtr, BufEnd);
- Stream.init(StreamFile);
-
- // Sniff for the signature.
- if (Stream.Read(8) != 'B' ||
- Stream.Read(8) != 'C' ||
- Stream.Read(4) != 0x0 ||
- Stream.Read(4) != 0xC ||
- Stream.Read(4) != 0xE ||
- Stream.Read(4) != 0xD)
- return Error("Invalid bitcode signature");
-
- // We expect a number of well-defined blocks, though we don't necessarily
- // need to understand them all.
- while (!Stream.AtEndOfStream()) {
- unsigned Code = Stream.ReadCode();
-
- if (Code != bitc::ENTER_SUBBLOCK)
- return Error("Invalid record at top-level");
-
- unsigned BlockID = Stream.ReadSubBlockID();
-
- // We only know the MODULE subblock ID.
- switch (BlockID) {
- case bitc::BLOCKINFO_BLOCK_ID:
- if (Stream.ReadBlockInfoBlock())
- return Error("Malformed BlockInfoBlock");
- break;
- case bitc::MODULE_BLOCK_ID:
- // Reject multiple MODULE_BLOCK's in a single bitstream.
- if (TheModule)
- return Error("Multiple MODULE_BLOCKs in same stream");
- TheModule = M;
- if (ParseModule())
- return true;
- break;
- default:
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- break;
- }
- }
-
- return false;
-}
-
-/// ParseMetadataAttachment - Parse metadata attachments.
-bool BitcodeReader::ParseMetadataAttachment() {
- if (Stream.EnterSubBlock(bitc::METADATA_ATTACHMENT_ID))
- return Error("Malformed block record");
-
- SmallVector<uint64_t, 64> Record;
- while(1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (Stream.ReadBlockEnd())
- return Error("Error at end of PARAMATTR block");
- break;
- }
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
- // Read a metadata attachment record.
- Record.clear();
- switch (Stream.ReadRecord(Code, Record)) {
- default: // Default behavior: ignore.
- break;
- case bitc::METADATA_ATTACHMENT: {
- unsigned RecordLength = Record.size();
- if (Record.empty() || (RecordLength - 1) % 2 == 1)
- return Error ("Invalid METADATA_ATTACHMENT reader!");
- Instruction *Inst = InstructionList[Record[0]];
- for (unsigned i = 1; i != RecordLength; i = i+2) {
- unsigned Kind = Record[i];
- Value *Node = MDValueList.getValueFwdRef(Record[i+1]);
- Inst->setMetadata(Kind, cast<MDNode>(Node));
- }
- break;
- }
- }
- }
- return false;
-}
-
-/// ParseFunctionBody - Lazily parse the specified function body block.
-bool BitcodeReader::ParseFunctionBody(Function *F) {
- if (Stream.EnterSubBlock(bitc::FUNCTION_BLOCK_ID))
- return Error("Malformed block record");
-
- InstructionList.clear();
- unsigned ModuleValueListSize = ValueList.size();
-
- // Add all the function arguments to the value table.
- for(Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
- ValueList.push_back(I);
-
- unsigned NextValueNo = ValueList.size();
- BasicBlock *CurBB = 0;
- unsigned CurBBNo = 0;
-
- // Read all the records.
- SmallVector<uint64_t, 64> Record;
- while (1) {
- unsigned Code = Stream.ReadCode();
- if (Code == bitc::END_BLOCK) {
- if (Stream.ReadBlockEnd())
- return Error("Error at end of function block");
- break;
- }
-
- if (Code == bitc::ENTER_SUBBLOCK) {
- switch (Stream.ReadSubBlockID()) {
- default: // Skip unknown content.
- if (Stream.SkipBlock())
- return Error("Malformed block record");
- break;
- case bitc::CONSTANTS_BLOCK_ID:
- if (ParseConstants()) return true;
- NextValueNo = ValueList.size();
- break;
- case bitc::VALUE_SYMTAB_BLOCK_ID:
- if (ParseValueSymbolTable()) return true;
- break;
- case bitc::METADATA_ATTACHMENT_ID:
- if (ParseMetadataAttachment()) return true;
- break;
- case bitc::METADATA_BLOCK_ID:
- if (ParseMetadata()) return true;
- break;
- }
- continue;
- }
-
- if (Code == bitc::DEFINE_ABBREV) {
- Stream.ReadAbbrevRecord();
- continue;
- }
-
- // Read a record.
- Record.clear();
- Instruction *I = 0;
- unsigned BitCode = Stream.ReadRecord(Code, Record);
- switch (BitCode) {
- default: // Default behavior: reject
- return Error("Unknown instruction");
- case bitc::FUNC_CODE_DECLAREBLOCKS: // DECLAREBLOCKS: [nblocks]
- if (Record.size() < 1 || Record[0] == 0)
- return Error("Invalid DECLAREBLOCKS record");
- // Create all the basic blocks for the function.
- FunctionBBs.resize(Record[0]);
- for (unsigned i = 0, e = FunctionBBs.size(); i != e; ++i)
- FunctionBBs[i] = BasicBlock::Create(Context, "", F);
- CurBB = FunctionBBs[0];
- continue;
-
- case bitc::FUNC_CODE_INST_BINOP: { // BINOP: [opval, ty, opval, opcode]
- unsigned OpNum = 0;
- Value *LHS, *RHS;
- if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
- getValue(Record, OpNum, LHS->getType(), RHS) ||
- OpNum+1 > Record.size())
- return Error("Invalid BINOP record");
-
- int Opc = GetDecodedBinaryOpcode(Record[OpNum++], LHS->getType());
- if (Opc == -1) return Error("Invalid BINOP record");
- I = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
- InstructionList.push_back(I);
- if (OpNum < Record.size()) {
- if (Opc == Instruction::Add ||
- Opc == Instruction::Sub ||
- Opc == Instruction::Mul) {
- if (Record[OpNum] & (1 << bitc::OBO_NO_SIGNED_WRAP))
- cast<BinaryOperator>(I)->setHasNoSignedWrap(true);
- if (Record[OpNum] & (1 << bitc::OBO_NO_UNSIGNED_WRAP))
- cast<BinaryOperator>(I)->setHasNoUnsignedWrap(true);
- } else if (Opc == Instruction::SDiv) {
- if (Record[OpNum] & (1 << bitc::SDIV_EXACT))
- cast<BinaryOperator>(I)->setIsExact(true);
- }
- }
- break;
- }
- case bitc::FUNC_CODE_INST_CAST: { // CAST: [opval, opty, destty, castopc]
- unsigned OpNum = 0;
- Value *Op;
- if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
- OpNum+2 != Record.size())
- return Error("Invalid CAST record");
-
- const Type *ResTy = getTypeByID(Record[OpNum]);
- int Opc = GetDecodedCastOpcode(Record[OpNum+1]);
- if (Opc == -1 || ResTy == 0)
- return Error("Invalid CAST record");
- I = CastInst::Create((Instruction::CastOps)Opc, Op, ResTy);
- InstructionList.push_back(I);
- break;
- }
- case bitc::FUNC_CODE_INST_INBOUNDS_GEP:
- case bitc::FUNC_CODE_INST_GEP: { // GEP: [n x operands]
- unsigned OpNum = 0;
- Value *BasePtr;
- if (getValueTypePair(Record, OpNum, NextValueNo, BasePtr))
- return Error("Invalid GEP record");
-
- SmallVector<Value*, 16> GEPIdx;
- while (OpNum != Record.size()) {
- Value *Op;
- if (getValueTypePair(Record, OpNum, NextValueNo, Op))
- return Error("Invalid GEP record");
- GEPIdx.push_back(Op);
- }
-
- I = GetElementPtrInst::Create(BasePtr, GEPIdx.begin(), GEPIdx.end());
- InstructionList.push_back(I);
- if (BitCode == bitc::FUNC_CODE_INST_INBOUNDS_GEP)
- cast<GetElementPtrInst>(I)->setIsInBounds(true);
- break;
- }
-
- case bitc::FUNC_CODE_INST_EXTRACTVAL: {
- // EXTRACTVAL: [opty, opval, n x indices]
- unsigned OpNum = 0;
- Value *Agg;
- if (getValueTypePair(Record, OpNum, NextValueNo, Agg))
- return Error("Invalid EXTRACTVAL record");
-
- SmallVector<unsigned, 4> EXTRACTVALIdx;
- for (unsigned RecSize = Record.size();
- OpNum != RecSize; ++OpNum) {
- uint64_t Index = Record[OpNum];
- if ((unsigned)Index != Index)
- return Error("Invalid EXTRACTVAL index");
- EXTRACTVALIdx.push_back((unsigned)Index);
- }
-
- I = ExtractValueInst::Create(Agg,
- EXTRACTVALIdx.begin(), EXTRACTVALIdx.end());
- InstructionList.push_back(I);
- break;
- }
-
- case bitc::FUNC_CODE_INST_INSERTVAL: {
- // INSERTVAL: [opty, opval, opty, opval, n x indices]
- unsigned OpNum = 0;
- Value *Agg;
- if (getValueTypePair(Record, OpNum, NextValueNo, Agg))
- return Error("Invalid INSERTVAL record");
- Value *Val;
- if (getValueTypePair(Record, OpNum, NextValueNo, Val))
- return Error("Invalid INSERTVAL record");
-
- SmallVector<unsigned, 4> INSERTVALIdx;
- for (unsigned RecSize = Record.size();
- OpNum != RecSize; ++OpNum) {
- uint64_t Index = Record[OpNum];
- if ((unsigned)Index != Index)
- return Error("Invalid INSERTVAL index");
- INSERTVALIdx.push_back((unsigned)Index);
- }
-
- I = InsertValueInst::Create(Agg, Val,
- INSERTVALIdx.begin(), INSERTVALIdx.end());
- InstructionList.push_back(I);
- break;
- }
-
- case bitc::FUNC_CODE_INST_SELECT: { // SELECT: [opval, ty, opval, opval]
- // obsolete form of select
- // handles select i1 ... in old bitcode
- unsigned OpNum = 0;
- Value *TrueVal, *FalseVal, *Cond;
- if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
- getValue(Record, OpNum, TrueVal->getType(), FalseVal) ||
- getValue(Record, OpNum, Type::getInt1Ty(Context), Cond))
- return Error("Invalid SELECT record");
-
- I = SelectInst::Create(Cond, TrueVal, FalseVal);
- InstructionList.push_back(I);
- break;
- }
-
- case bitc::FUNC_CODE_INST_VSELECT: {// VSELECT: [ty,opval,opval,predty,pred]
- // new form of select
- // handles select i1 or select [N x i1]
- unsigned OpNum = 0;
- Value *TrueVal, *FalseVal, *Cond;
- if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
- getValue(Record, OpNum, TrueVal->getType(), FalseVal) ||
- getValueTypePair(Record, OpNum, NextValueNo, Cond))
- return Error("Invalid SELECT record");
-
- // select condition can be either i1 or [N x i1]
- if (const VectorType* vector_type =
- dyn_cast<const VectorType>(Cond->getType())) {
- // expect <n x i1>
- if (vector_type->getElementType() != Type::getInt1Ty(Context))
- return Error("Invalid SELECT condition type");
- } else {
- // expect i1
- if (Cond->getType() != Type::getInt1Ty(Context))
- return Error("Invalid SELECT condition type");
- }
-
- I = SelectInst::Create(Cond, TrueVal, FalseVal);
- InstructionList.push_back(I);
- break;
- }
-
- case bitc::FUNC_CODE_INST_EXTRACTELT: { // EXTRACTELT: [opty, opval, opval]
- unsigned OpNum = 0;
- Value *Vec, *Idx;
- if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
- getValue(Record, OpNum, Type::getInt32Ty(Context), Idx))
- return Error("Invalid EXTRACTELT record");
- I = ExtractElementInst::Create(Vec, Idx);
- InstructionList.push_back(I);
- break;
- }
-
- case bitc::FUNC_CODE_INST_INSERTELT: { // INSERTELT: [ty, opval,opval,opval]
- unsigned OpNum = 0;
- Value *Vec, *Elt, *Idx;
- if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
- getValue(Record, OpNum,
- cast<VectorType>(Vec->getType())->getElementType(), Elt) ||
- getValue(Record, OpNum, Type::getInt32Ty(Context), Idx))
- return Error("Invalid INSERTELT record");
- I = InsertElementInst::Create(Vec, Elt, Idx);
- InstructionList.push_back(I);
- break;
- }
-
- case bitc::FUNC_CODE_INST_SHUFFLEVEC: {// SHUFFLEVEC: [opval,ty,opval,opval]
- unsigned OpNum = 0;
- Value *Vec1, *Vec2, *Mask;
- if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) ||
- getValue(Record, OpNum, Vec1->getType(), Vec2))
- return Error("Invalid SHUFFLEVEC record");
-
- if (getValueTypePair(Record, OpNum, NextValueNo, Mask))
- return Error("Invalid SHUFFLEVEC record");
- I = new ShuffleVectorInst(Vec1, Vec2, Mask);
- InstructionList.push_back(I);
- break;
- }
-
- case bitc::FUNC_CODE_INST_CMP: // CMP: [opty, opval, opval, pred]
- // Old form of ICmp/FCmp returning bool
- // Existed to differentiate between icmp/fcmp and vicmp/vfcmp which were
- // both legal on vectors but had different behaviour.
- case bitc::FUNC_CODE_INST_CMP2: { // CMP2: [opty, opval, opval, pred]
- // FCmp/ICmp returning bool or vector of bool
-
- unsigned OpNum = 0;
- Value *LHS, *RHS;
- if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
- getValue(Record, OpNum, LHS->getType(), RHS) ||
- OpNum+1 != Record.size())
- return Error("Invalid CMP record");
-
- if (LHS->getType()->isFPOrFPVectorTy())
- I = new FCmpInst((FCmpInst::Predicate)Record[OpNum], LHS, RHS);
- else
- I = new ICmpInst((ICmpInst::Predicate)Record[OpNum], LHS, RHS);
- InstructionList.push_back(I);
- break;
- }
-
- case bitc::FUNC_CODE_INST_GETRESULT: { // GETRESULT: [ty, val, n]
- if (Record.size() != 2)
- return Error("Invalid GETRESULT record");
- unsigned OpNum = 0;
- Value *Op;
- getValueTypePair(Record, OpNum, NextValueNo, Op);
- unsigned Index = Record[1];
- I = ExtractValueInst::Create(Op, Index);
- InstructionList.push_back(I);
- break;
- }
-
- case bitc::FUNC_CODE_INST_RET: // RET: [opty,opval<optional>]
- {
- unsigned Size = Record.size();
- if (Size == 0) {
- I = ReturnInst::Create(Context);
- InstructionList.push_back(I);
- break;
- }
-
- unsigned OpNum = 0;
- SmallVector<Value *,4> Vs;
- do {
- Value *Op = NULL;
- if (getValueTypePair(Record, OpNum, NextValueNo, Op))
- return Error("Invalid RET record");
- Vs.push_back(Op);
- } while(OpNum != Record.size());
-
- const Type *ReturnType = F->getReturnType();
- if (Vs.size() > 1 ||
- (ReturnType->isStructTy() &&
- (Vs.empty() || Vs[0]->getType() != ReturnType))) {
- Value *RV = UndefValue::get(ReturnType);
- for (unsigned i = 0, e = Vs.size(); i != e; ++i) {
- I = InsertValueInst::Create(RV, Vs[i], i, "mrv");
- InstructionList.push_back(I);
- CurBB->getInstList().push_back(I);
- ValueList.AssignValue(I, NextValueNo++);
- RV = I;
- }
- I = ReturnInst::Create(Context, RV);
- InstructionList.push_back(I);
- break;
- }
-
- I = ReturnInst::Create(Context, Vs[0]);
- InstructionList.push_back(I);
- break;
- }
- case bitc::FUNC_CODE_INST_BR: { // BR: [bb#, bb#, opval] or [bb#]
- if (Record.size() != 1 && Record.size() != 3)
- return Error("Invalid BR record");
- BasicBlock *TrueDest = getBasicBlock(Record[0]);
- if (TrueDest == 0)
- return Error("Invalid BR record");
-
- if (Record.size() == 1) {
- I = BranchInst::Create(TrueDest);
- InstructionList.push_back(I);
- }
- else {
- BasicBlock *FalseDest = getBasicBlock(Record[1]);
- Value *Cond = getFnValueByID(Record[2], Type::getInt1Ty(Context));
- if (FalseDest == 0 || Cond == 0)
- return Error("Invalid BR record");
- I = BranchInst::Create(TrueDest, FalseDest, Cond);
- InstructionList.push_back(I);
- }
- break;
- }
- case bitc::FUNC_CODE_INST_SWITCH: { // SWITCH: [opty, op0, op1, ...]
- if (Record.size() < 3 || (Record.size() & 1) == 0)
- return Error("Invalid SWITCH record");
- const Type *OpTy = getTypeByID(Record[0]);
- Value *Cond = getFnValueByID(Record[1], OpTy);
- BasicBlock *Default = getBasicBlock(Record[2]);
- if (OpTy == 0 || Cond == 0 || Default == 0)
- return Error("Invalid SWITCH record");
- unsigned NumCases = (Record.size()-3)/2;
- SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases);
- InstructionList.push_back(SI);
- for (unsigned i = 0, e = NumCases; i != e; ++i) {
- ConstantInt *CaseVal =
- dyn_cast_or_null<ConstantInt>(getFnValueByID(Record[3+i*2], OpTy));
- BasicBlock *DestBB = getBasicBlock(Record[1+3+i*2]);
- if (CaseVal == 0 || DestBB == 0) {
- delete SI;
- return Error("Invalid SWITCH record!");
- }
- SI->addCase(CaseVal, DestBB);
- }
- I = SI;
- break;
- }
- case bitc::FUNC_CODE_INST_INDIRECTBR: { // INDIRECTBR: [opty, op0, op1, ...]
- if (Record.size() < 2)
- return Error("Invalid INDIRECTBR record");
- const Type *OpTy = getTypeByID(Record[0]);
- Value *Address = getFnValueByID(Record[1], OpTy);
- if (OpTy == 0 || Address == 0)
- return Error("Invalid INDIRECTBR record");
- unsigned NumDests = Record.size()-2;
- IndirectBrInst *IBI = IndirectBrInst::Create(Address, NumDests);
- InstructionList.push_back(IBI);
- for (unsigned i = 0, e = NumDests; i != e; ++i) {
- if (BasicBlock *DestBB = getBasicBlock(Record[2+i])) {
- IBI->addDestination(DestBB);
- } else {
- delete IBI;
- return Error("Invalid INDIRECTBR record!");
- }
- }
- I = IBI;
- break;
- }
-
- case bitc::FUNC_CODE_INST_INVOKE: {
- // INVOKE: [attrs, cc, normBB, unwindBB, fnty, op0,op1,op2, ...]
- if (Record.size() < 4) return Error("Invalid INVOKE record");
- AttrListPtr PAL = getAttributes(Record[0]);
- unsigned CCInfo = Record[1];
- BasicBlock *NormalBB = getBasicBlock(Record[2]);
- BasicBlock *UnwindBB = getBasicBlock(Record[3]);
-
- unsigned OpNum = 4;
- Value *Callee;
- if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
- return Error("Invalid INVOKE record");
-
- const PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType());
- const FunctionType *FTy = !CalleeTy ? 0 :
- dyn_cast<FunctionType>(CalleeTy->getElementType());
-
- // Check that the right number of fixed parameters are here.
- if (FTy == 0 || NormalBB == 0 || UnwindBB == 0 ||
- Record.size() < OpNum+FTy->getNumParams())
- return Error("Invalid INVOKE record");
-
- SmallVector<Value*, 16> Ops;
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
- Ops.push_back(getFnValueByID(Record[OpNum], FTy->getParamType(i)));
- if (Ops.back() == 0) return Error("Invalid INVOKE record");
- }
-
- if (!FTy->isVarArg()) {
- if (Record.size() != OpNum)
- return Error("Invalid INVOKE record");
- } else {
- // Read type/value pairs for varargs params.
- while (OpNum != Record.size()) {
- Value *Op;
- if (getValueTypePair(Record, OpNum, NextValueNo, Op))
- return Error("Invalid INVOKE record");
- Ops.push_back(Op);
- }
- }
-
- I = InvokeInst::Create(Callee, NormalBB, UnwindBB,
- Ops.begin(), Ops.end());
- InstructionList.push_back(I);
- cast<InvokeInst>(I)->setCallingConv(
- static_cast<CallingConv::ID>(CCInfo));
- cast<InvokeInst>(I)->setAttributes(PAL);
- break;
- }
- case bitc::FUNC_CODE_INST_UNWIND: // UNWIND
- I = new UnwindInst(Context);
- InstructionList.push_back(I);
- break;
- case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE
- I = new UnreachableInst(Context);
- InstructionList.push_back(I);
- break;
- case bitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...]
- if (Record.size() < 1 || ((Record.size()-1)&1))
- return Error("Invalid PHI record");
- const Type *Ty = getTypeByID(Record[0]);
- if (!Ty) return Error("Invalid PHI record");
-
- PHINode *PN = PHINode::Create(Ty);
- InstructionList.push_back(PN);
- PN->reserveOperandSpace((Record.size()-1)/2);
-
- for (unsigned i = 0, e = Record.size()-1; i != e; i += 2) {
- Value *V = getFnValueByID(Record[1+i], Ty);
- BasicBlock *BB = getBasicBlock(Record[2+i]);
- if (!V || !BB) return Error("Invalid PHI record");
- PN->addIncoming(V, BB);
- }
- I = PN;
- break;
- }
-
- case bitc::FUNC_CODE_INST_MALLOC: { // MALLOC: [instty, op, align]
- // Autoupgrade malloc instruction to malloc call.
- // FIXME: Remove in LLVM 3.0.
- if (Record.size() < 3)
- return Error("Invalid MALLOC record");
- const PointerType *Ty =
- dyn_cast_or_null<PointerType>(getTypeByID(Record[0]));
- Value *Size = getFnValueByID(Record[1], Type::getInt32Ty(Context));
- if (!Ty || !Size) return Error("Invalid MALLOC record");
- if (!CurBB) return Error("Invalid malloc instruction with no BB");
- const Type *Int32Ty = IntegerType::getInt32Ty(CurBB->getContext());
- Constant *AllocSize = ConstantExpr::getSizeOf(Ty->getElementType());
- AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, Int32Ty);
- I = CallInst::CreateMalloc(CurBB, Int32Ty, Ty->getElementType(),
- AllocSize, Size, NULL);
- InstructionList.push_back(I);
- break;
- }
- case bitc::FUNC_CODE_INST_FREE: { // FREE: [op, opty]
- unsigned OpNum = 0;
- Value *Op;
- if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
- OpNum != Record.size())
- return Error("Invalid FREE record");
- if (!CurBB) return Error("Invalid free instruction with no BB");
- I = CallInst::CreateFree(Op, CurBB);
- InstructionList.push_back(I);
- break;
- }
- case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, op, align]
- if (Record.size() < 3)
- return Error("Invalid ALLOCA record");
- const PointerType *Ty =
- dyn_cast_or_null<PointerType>(getTypeByID(Record[0]));
- Value *Size = getFnValueByID(Record[1], Type::getInt32Ty(Context));
- unsigned Align = Record[2];
- if (!Ty || !Size) return Error("Invalid ALLOCA record");
- I = new AllocaInst(Ty->getElementType(), Size, (1 << Align) >> 1);
- InstructionList.push_back(I);
- break;
- }
- case bitc::FUNC_CODE_INST_LOAD: { // LOAD: [opty, op, align, vol]
- unsigned OpNum = 0;
- Value *Op;
- if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
- OpNum+2 != Record.size())
- return Error("Invalid LOAD record");
-
- I = new LoadInst(Op, "", Record[OpNum+1], (1 << Record[OpNum]) >> 1);
- InstructionList.push_back(I);
- break;
- }
- case bitc::FUNC_CODE_INST_STORE2: { // STORE2:[ptrty, ptr, val, align, vol]
- unsigned OpNum = 0;
- Value *Val, *Ptr;
- if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
- getValue(Record, OpNum,
- cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
- OpNum+2 != Record.size())
- return Error("Invalid STORE record");
-
- I = new StoreInst(Val, Ptr, Record[OpNum+1], (1 << Record[OpNum]) >> 1);
- InstructionList.push_back(I);
- break;
- }
- case bitc::FUNC_CODE_INST_STORE: { // STORE:[val, valty, ptr, align, vol]
- // FIXME: Legacy form of store instruction. Should be removed in LLVM 3.0.
- unsigned OpNum = 0;
- Value *Val, *Ptr;
- if (getValueTypePair(Record, OpNum, NextValueNo, Val) ||
- getValue(Record, OpNum,
- PointerType::getUnqual(Val->getType()), Ptr)||
- OpNum+2 != Record.size())
- return Error("Invalid STORE record");
-
- I = new StoreInst(Val, Ptr, Record[OpNum+1], (1 << Record[OpNum]) >> 1);
- InstructionList.push_back(I);
- break;
- }
- case bitc::FUNC_CODE_INST_CALL: {
- // CALL: [paramattrs, cc, fnty, fnid, arg0, arg1...]
- if (Record.size() < 3)
- return Error("Invalid CALL record");
-
- AttrListPtr PAL = getAttributes(Record[0]);
- unsigned CCInfo = Record[1];
-
- unsigned OpNum = 2;
- Value *Callee;
- if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
- return Error("Invalid CALL record");
-
- const PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
- const FunctionType *FTy = 0;
- if (OpTy) FTy = dyn_cast<FunctionType>(OpTy->getElementType());
- if (!FTy || Record.size() < FTy->getNumParams()+OpNum)
- return Error("Invalid CALL record");
-
- SmallVector<Value*, 16> Args;
- // Read the fixed params.
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
- if (FTy->getParamType(i)->getTypeID()==Type::LabelTyID)
- Args.push_back(getBasicBlock(Record[OpNum]));
- else
- Args.push_back(getFnValueByID(Record[OpNum], FTy->getParamType(i)));
- if (Args.back() == 0) return Error("Invalid CALL record");
- }
-
- // Read type/value pairs for varargs params.
- if (!FTy->isVarArg()) {
- if (OpNum != Record.size())
- return Error("Invalid CALL record");
- } else {
- while (OpNum != Record.size()) {
- Value *Op;
- if (getValueTypePair(Record, OpNum, NextValueNo, Op))
- return Error("Invalid CALL record");
- Args.push_back(Op);
- }
- }
-
- I = CallInst::Create(Callee, Args.begin(), Args.end());
- InstructionList.push_back(I);
- cast<CallInst>(I)->setCallingConv(
- static_cast<CallingConv::ID>(CCInfo>>1));
- cast<CallInst>(I)->setTailCall(CCInfo & 1);
- cast<CallInst>(I)->setAttributes(PAL);
- break;
- }
- case bitc::FUNC_CODE_INST_VAARG: { // VAARG: [valistty, valist, instty]
- if (Record.size() < 3)
- return Error("Invalid VAARG record");
- const Type *OpTy = getTypeByID(Record[0]);
- Value *Op = getFnValueByID(Record[1], OpTy);
- const Type *ResTy = getTypeByID(Record[2]);
- if (!OpTy || !Op || !ResTy)
- return Error("Invalid VAARG record");
- I = new VAArgInst(Op, ResTy);
- InstructionList.push_back(I);
- break;
- }
- }
-
- // Add instruction to end of current BB. If there is no current BB, reject
- // this file.
- if (CurBB == 0) {
- delete I;
- return Error("Invalid instruction with no BB");
- }
- CurBB->getInstList().push_back(I);
-
- // If this was a terminator instruction, move to the next block.
- if (isa<TerminatorInst>(I)) {
- ++CurBBNo;
- CurBB = CurBBNo < FunctionBBs.size() ? FunctionBBs[CurBBNo] : 0;
- }
-
- // Non-void values get registered in the value table for future use.
- if (I && !I->getType()->isVoidTy())
- ValueList.AssignValue(I, NextValueNo++);
- }
-
- // Check the function list for unresolved values.
- if (Argument *A = dyn_cast<Argument>(ValueList.back())) {
- if (A->getParent() == 0) {
- // We found at least one unresolved value. Nuke them all to avoid leaks.
- for (unsigned i = ModuleValueListSize, e = ValueList.size(); i != e; ++i){
- if ((A = dyn_cast<Argument>(ValueList.back())) && A->getParent() == 0) {
- A->replaceAllUsesWith(UndefValue::get(A->getType()));
- delete A;
- }
- }
- return Error("Never resolved value found in function!");
- }
- }
-
- // See if anything took the address of blocks in this function. If so,
- // resolve them now.
- /// BlockAddrFwdRefs - These are blockaddr references to basic blocks. These
- /// are resolved lazily when functions are loaded.
- DenseMap<Function*, std::vector<BlockAddrRefTy> >::iterator BAFRI =
- BlockAddrFwdRefs.find(F);
- if (BAFRI != BlockAddrFwdRefs.end()) {
- std::vector<BlockAddrRefTy> &RefList = BAFRI->second;
- for (unsigned i = 0, e = RefList.size(); i != e; ++i) {
- unsigned BlockIdx = RefList[i].first;
- if (BlockIdx >= FunctionBBs.size())
- return Error("Invalid blockaddress block #");
-
- GlobalVariable *FwdRef = RefList[i].second;
- FwdRef->replaceAllUsesWith(BlockAddress::get(F, FunctionBBs[BlockIdx]));
- FwdRef->eraseFromParent();
- }
-
- BlockAddrFwdRefs.erase(BAFRI);
- }
-
- // Trim the value list down to the size it was before we parsed this function.
- ValueList.shrinkTo(ModuleValueListSize);
- std::vector<BasicBlock*>().swap(FunctionBBs);
-
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// GVMaterializer implementation
-//===----------------------------------------------------------------------===//
-
-
-bool BitcodeReader::isMaterializable(const GlobalValue *GV) const {
- if (const Function *F = dyn_cast<Function>(GV)) {
- return F->isDeclaration() &&
- DeferredFunctionInfo.count(const_cast<Function*>(F));
- }
- return false;
-}
-
-bool BitcodeReader::Materialize(GlobalValue *GV, std::string *ErrInfo) {
- Function *F = dyn_cast<Function>(GV);
- // If it's not a function or is already material, ignore the request.
- if (!F || !F->isMaterializable()) return false;
-
- DenseMap<Function*, uint64_t>::iterator DFII = DeferredFunctionInfo.find(F);
- assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!");
-
- // Move the bit stream to the saved position of the deferred function body.
- Stream.JumpToBit(DFII->second);
-
- if (ParseFunctionBody(F)) {
- if (ErrInfo) *ErrInfo = ErrorString;
- return true;
- }
-
- // Upgrade any old intrinsic calls in the function.
- for (UpgradedIntrinsicMap::iterator I = UpgradedIntrinsics.begin(),
- E = UpgradedIntrinsics.end(); I != E; ++I) {
- if (I->first != I->second) {
- for (Value::use_iterator UI = I->first->use_begin(),
- UE = I->first->use_end(); UI != UE; ) {
- if (CallInst* CI = dyn_cast<CallInst>(*UI++))
- UpgradeIntrinsicCall(CI, I->second);
- }
- }
- }
-
- return false;
-}
-
-bool BitcodeReader::isDematerializable(const GlobalValue *GV) const {
- const Function *F = dyn_cast<Function>(GV);
- if (!F || F->isDeclaration())
- return false;
- return DeferredFunctionInfo.count(const_cast<Function*>(F));
-}
-
-void BitcodeReader::Dematerialize(GlobalValue *GV) {
- Function *F = dyn_cast<Function>(GV);
- // If this function isn't dematerializable, this is a noop.
- if (!F || !isDematerializable(F))
- return;
-
- assert(DeferredFunctionInfo.count(F) && "No info to read function later?");
-
- // Just forget the function body, we can remat it later.
- F->deleteBody();
-}
-
-
-bool BitcodeReader::MaterializeModule(Module *M, std::string *ErrInfo) {
- assert(M == TheModule &&
- "Can only Materialize the Module this BitcodeReader is attached to.");
- // Iterate over the module, deserializing any functions that are still on
- // disk.
- for (Module::iterator F = TheModule->begin(), E = TheModule->end();
- F != E; ++F)
- if (F->isMaterializable() &&
- Materialize(F, ErrInfo))
- return true;
-
- // Upgrade any intrinsic calls that slipped through (should not happen!) and
- // delete the old functions to clean up. We can't do this unless the entire
- // module is materialized because there could always be another function body
- // with calls to the old function.
- for (std::vector<std::pair<Function*, Function*> >::iterator I =
- UpgradedIntrinsics.begin(), E = UpgradedIntrinsics.end(); I != E; ++I) {
- if (I->first != I->second) {
- for (Value::use_iterator UI = I->first->use_begin(),
- UE = I->first->use_end(); UI != UE; ) {
- if (CallInst* CI = dyn_cast<CallInst>(*UI++))
- UpgradeIntrinsicCall(CI, I->second);
- }
- if (!I->first->use_empty())
- I->first->replaceAllUsesWith(I->second);
- I->first->eraseFromParent();
- }
- }
- std::vector<std::pair<Function*, Function*> >().swap(UpgradedIntrinsics);
-
- // Check debug info intrinsics.
- CheckDebugInfoIntrinsics(TheModule);
-
- return false;
-}
-
-
-//===----------------------------------------------------------------------===//
-// External interface
-//===----------------------------------------------------------------------===//
-
-/// getLazyBitcodeModule - lazy function-at-a-time loading from a file.
-///
-Module *llvm::getLazyBitcodeModule(MemoryBuffer *Buffer,
- LLVMContext& Context,
- std::string *ErrMsg) {
- Module *M = new Module(Buffer->getBufferIdentifier(), Context);
- BitcodeReader *R = new BitcodeReader(Buffer, Context);
- M->setMaterializer(R);
- if (R->ParseBitcodeInto(M)) {
- if (ErrMsg)
- *ErrMsg = R->getErrorString();
-
- delete M; // Also deletes R.
- return 0;
- }
- // Have the BitcodeReader dtor delete 'Buffer'.
- R->setBufferOwned(true);
- return M;
-}
-
-/// ParseBitcodeFile - Read the specified bitcode file, returning the module.
-/// If an error occurs, return null and fill in *ErrMsg if non-null.
-Module *llvm::ParseBitcodeFile(MemoryBuffer *Buffer, LLVMContext& Context,
- std::string *ErrMsg){
- Module *M = getLazyBitcodeModule(Buffer, Context, ErrMsg);
- if (!M) return 0;
-
- // Don't let the BitcodeReader dtor delete 'Buffer', regardless of whether
- // there was an error.
- static_cast<BitcodeReader*>(M->getMaterializer())->setBufferOwned(false);
-
- // Read in the entire module, and destroy the BitcodeReader.
- if (M->MaterializeAllPermanently(ErrMsg)) {
- delete M;
- return NULL;
- }
- return M;
-}
diff --git a/libclamav/c++/llvm/lib/Bitcode/Reader/BitcodeReader.h b/libclamav/c++/llvm/lib/Bitcode/Reader/BitcodeReader.h
deleted file mode 100644
index 55c71f7..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Reader/BitcodeReader.h
+++ /dev/null
@@ -1,267 +0,0 @@
-//===- BitcodeReader.h - Internal BitcodeReader impl ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This header defines the BitcodeReader class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef BITCODE_READER_H
-#define BITCODE_READER_H
-
-#include "llvm/GVMaterializer.h"
-#include "llvm/Attributes.h"
-#include "llvm/Type.h"
-#include "llvm/OperandTraits.h"
-#include "llvm/Bitcode/BitstreamReader.h"
-#include "llvm/Bitcode/LLVMBitCodes.h"
-#include "llvm/Support/ValueHandle.h"
-#include "llvm/ADT/DenseMap.h"
-#include <vector>
-
-namespace llvm {
- class MemoryBuffer;
- class LLVMContext;
-
-//===----------------------------------------------------------------------===//
-// BitcodeReaderValueList Class
-//===----------------------------------------------------------------------===//
-
-class BitcodeReaderValueList {
- std::vector<WeakVH> ValuePtrs;
-
- /// ResolveConstants - As we resolve forward-referenced constants, we add
- /// information about them to this vector. This allows us to resolve them in
- /// bulk instead of resolving each reference at a time. See the code in
- /// ResolveConstantForwardRefs for more information about this.
- ///
- /// The key of this vector is the placeholder constant, the value is the slot
- /// number that holds the resolved value.
- typedef std::vector<std::pair<Constant*, unsigned> > ResolveConstantsTy;
- ResolveConstantsTy ResolveConstants;
- LLVMContext& Context;
-public:
- BitcodeReaderValueList(LLVMContext& C) : Context(C) {}
- ~BitcodeReaderValueList() {
- assert(ResolveConstants.empty() && "Constants not resolved?");
- }
-
- // vector compatibility methods
- unsigned size() const { return ValuePtrs.size(); }
- void resize(unsigned N) { ValuePtrs.resize(N); }
- void push_back(Value *V) {
- ValuePtrs.push_back(V);
- }
-
- void clear() {
- assert(ResolveConstants.empty() && "Constants not resolved?");
- ValuePtrs.clear();
- }
-
- Value *operator[](unsigned i) const {
- assert(i < ValuePtrs.size());
- return ValuePtrs[i];
- }
-
- Value *back() const { return ValuePtrs.back(); }
- void pop_back() { ValuePtrs.pop_back(); }
- bool empty() const { return ValuePtrs.empty(); }
- void shrinkTo(unsigned N) {
- assert(N <= size() && "Invalid shrinkTo request!");
- ValuePtrs.resize(N);
- }
-
- Constant *getConstantFwdRef(unsigned Idx, const Type *Ty);
- Value *getValueFwdRef(unsigned Idx, const Type *Ty);
-
- void AssignValue(Value *V, unsigned Idx);
-
- /// ResolveConstantForwardRefs - Once all constants are read, this method bulk
- /// resolves any forward references.
- void ResolveConstantForwardRefs();
-};
-
-
-//===----------------------------------------------------------------------===//
-// BitcodeReaderMDValueList Class
-//===----------------------------------------------------------------------===//
-
-class BitcodeReaderMDValueList {
- std::vector<WeakVH> MDValuePtrs;
-
- LLVMContext &Context;
-public:
- BitcodeReaderMDValueList(LLVMContext& C) : Context(C) {}
-
- // vector compatibility methods
- unsigned size() const { return MDValuePtrs.size(); }
- void resize(unsigned N) { MDValuePtrs.resize(N); }
- void push_back(Value *V) { MDValuePtrs.push_back(V); }
- void clear() { MDValuePtrs.clear(); }
- Value *back() const { return MDValuePtrs.back(); }
- void pop_back() { MDValuePtrs.pop_back(); }
- bool empty() const { return MDValuePtrs.empty(); }
-
- Value *operator[](unsigned i) const {
- assert(i < MDValuePtrs.size());
- return MDValuePtrs[i];
- }
-
- void shrinkTo(unsigned N) {
- assert(N <= size() && "Invalid shrinkTo request!");
- MDValuePtrs.resize(N);
- }
-
- Value *getValueFwdRef(unsigned Idx);
- void AssignValue(Value *V, unsigned Idx);
-};
-
-class BitcodeReader : public GVMaterializer {
- LLVMContext &Context;
- Module *TheModule;
- MemoryBuffer *Buffer;
- bool BufferOwned;
- BitstreamReader StreamFile;
- BitstreamCursor Stream;
-
- const char *ErrorString;
-
- std::vector<PATypeHolder> TypeList;
- BitcodeReaderValueList ValueList;
- BitcodeReaderMDValueList MDValueList;
- SmallVector<Instruction *, 64> InstructionList;
-
- std::vector<std::pair<GlobalVariable*, unsigned> > GlobalInits;
- std::vector<std::pair<GlobalAlias*, unsigned> > AliasInits;
-
- /// MAttributes - The set of attributes by index. Index zero in the
- /// file is for null, and is thus not represented here. As such all indices
- /// are off by one.
- std::vector<AttrListPtr> MAttributes;
-
- /// FunctionBBs - While parsing a function body, this is a list of the basic
- /// blocks for the function.
- std::vector<BasicBlock*> FunctionBBs;
-
- // When reading the module header, this list is populated with functions that
- // have bodies later in the file.
- std::vector<Function*> FunctionsWithBodies;
-
- // When intrinsic functions are encountered which require upgrading they are
- // stored here with their replacement function.
- typedef std::vector<std::pair<Function*, Function*> > UpgradedIntrinsicMap;
- UpgradedIntrinsicMap UpgradedIntrinsics;
-
- // After the module header has been read, the FunctionsWithBodies list is
- // reversed. This keeps track of whether we've done this yet.
- bool HasReversedFunctionsWithBodies;
-
- /// DeferredFunctionInfo - When function bodies are initially scanned, this
- /// map contains info about where to find deferred function body in the
- /// stream.
- DenseMap<Function*, uint64_t> DeferredFunctionInfo;
-
- /// BlockAddrFwdRefs - These are blockaddr references to basic blocks. These
- /// are resolved lazily when functions are loaded.
- typedef std::pair<unsigned, GlobalVariable*> BlockAddrRefTy;
- DenseMap<Function*, std::vector<BlockAddrRefTy> > BlockAddrFwdRefs;
-
-public:
- explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C)
- : Context(C), TheModule(0), Buffer(buffer), BufferOwned(false),
- ErrorString(0), ValueList(C), MDValueList(C) {
- HasReversedFunctionsWithBodies = false;
- }
- ~BitcodeReader() {
- FreeState();
- }
-
- void FreeState();
-
- /// setBufferOwned - If this is true, the reader will destroy the MemoryBuffer
- /// when the reader is destroyed.
- void setBufferOwned(bool Owned) { BufferOwned = Owned; }
-
- virtual bool isMaterializable(const GlobalValue *GV) const;
- virtual bool isDematerializable(const GlobalValue *GV) const;
- virtual bool Materialize(GlobalValue *GV, std::string *ErrInfo = 0);
- virtual bool MaterializeModule(Module *M, std::string *ErrInfo = 0);
- virtual void Dematerialize(GlobalValue *GV);
-
- bool Error(const char *Str) {
- ErrorString = Str;
- return true;
- }
- const char *getErrorString() const { return ErrorString; }
-
- /// @brief Main interface to parsing a bitcode buffer.
- /// @returns true if an error occurred.
- bool ParseBitcodeInto(Module *M);
-private:
- const Type *getTypeByID(unsigned ID, bool isTypeTable = false);
- Value *getFnValueByID(unsigned ID, const Type *Ty) {
- if (Ty == Type::getMetadataTy(Context))
- return MDValueList.getValueFwdRef(ID);
- else
- return ValueList.getValueFwdRef(ID, Ty);
- }
- BasicBlock *getBasicBlock(unsigned ID) const {
- if (ID >= FunctionBBs.size()) return 0; // Invalid ID
- return FunctionBBs[ID];
- }
- AttrListPtr getAttributes(unsigned i) const {
- if (i-1 < MAttributes.size())
- return MAttributes[i-1];
- return AttrListPtr();
- }
-
- /// getValueTypePair - Read a value/type pair out of the specified record from
- /// slot 'Slot'. Increment Slot past the number of slots used in the record.
- /// Return true on failure.
- bool getValueTypePair(SmallVector<uint64_t, 64> &Record, unsigned &Slot,
- unsigned InstNum, Value *&ResVal) {
- if (Slot == Record.size()) return true;
- unsigned ValNo = (unsigned)Record[Slot++];
- if (ValNo < InstNum) {
- // If this is not a forward reference, just return the value we already
- // have.
- ResVal = getFnValueByID(ValNo, 0);
- return ResVal == 0;
- } else if (Slot == Record.size()) {
- return true;
- }
-
- unsigned TypeNo = (unsigned)Record[Slot++];
- ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo));
- return ResVal == 0;
- }
- bool getValue(SmallVector<uint64_t, 64> &Record, unsigned &Slot,
- const Type *Ty, Value *&ResVal) {
- if (Slot == Record.size()) return true;
- unsigned ValNo = (unsigned)Record[Slot++];
- ResVal = getFnValueByID(ValNo, Ty);
- return ResVal == 0;
- }
-
-
- bool ParseModule();
- bool ParseAttributeBlock();
- bool ParseTypeTable();
- bool ParseTypeSymbolTable();
- bool ParseValueSymbolTable();
- bool ParseConstants();
- bool RememberAndSkipFunctionBody();
- bool ParseFunctionBody(Function *F);
- bool ResolveGlobalAndAliasInits();
- bool ParseMetadata();
- bool ParseMetadataAttachment();
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Bitcode/Reader/CMakeLists.txt b/libclamav/c++/llvm/lib/Bitcode/Reader/CMakeLists.txt
deleted file mode 100644
index 693d431..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Reader/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-add_llvm_library(LLVMBitReader
- BitReader.cpp
- BitcodeReader.cpp
- )
diff --git a/libclamav/c++/llvm/lib/Bitcode/Reader/Makefile b/libclamav/c++/llvm/lib/Bitcode/Reader/Makefile
deleted file mode 100644
index 59af8d5..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Reader/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Bitcode/Reader/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMBitReader
-BUILD_ARCHIVE = 1
-
-include $(LEVEL)/Makefile.common
-
diff --git a/libclamav/c++/llvm/lib/Bitcode/Writer/BitWriter.cpp b/libclamav/c++/llvm/lib/Bitcode/Writer/BitWriter.cpp
deleted file mode 100644
index 4288422..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Writer/BitWriter.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- BitWriter.cpp -----------------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm-c/BitWriter.h"
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-
-/*===-- Operations on modules ---------------------------------------------===*/
-
-int LLVMWriteBitcodeToFile(LLVMModuleRef M, const char *Path) {
- std::string ErrorInfo;
- raw_fd_ostream OS(Path, ErrorInfo,
- raw_fd_ostream::F_Binary);
-
- if (!ErrorInfo.empty())
- return -1;
-
- WriteBitcodeToFile(unwrap(M), OS);
- return 0;
-}
-
-int LLVMWriteBitcodeToFD(LLVMModuleRef M, int FD, int ShouldClose,
- int Unbuffered) {
- raw_fd_ostream OS(FD, ShouldClose, Unbuffered);
-
- WriteBitcodeToFile(unwrap(M), OS);
- return 0;
-}
-
-int LLVMWriteBitcodeToFileHandle(LLVMModuleRef M, int FileHandle) {
- return LLVMWriteBitcodeToFD(M, FileHandle, true, false);
-}
diff --git a/libclamav/c++/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/libclamav/c++/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
deleted file mode 100644
index 82e73b5..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ /dev/null
@@ -1,1657 +0,0 @@
-//===--- Bitcode/Writer/BitcodeWriter.cpp - Bitcode Writer ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Bitcode writer implementation.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Bitcode/BitstreamWriter.h"
-#include "llvm/Bitcode/LLVMBitCodes.h"
-#include "ValueEnumerator.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/InlineAsm.h"
-#include "llvm/Instructions.h"
-#include "llvm/Module.h"
-#include "llvm/Operator.h"
-#include "llvm/TypeSymbolTable.h"
-#include "llvm/ValueSymbolTable.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Program.h"
-using namespace llvm;
-
-/// These are manifest constants used by the bitcode writer. They do not need to
-/// be kept in sync with the reader, but need to be consistent within this file.
-enum {
- CurVersion = 0,
-
- // VALUE_SYMTAB_BLOCK abbrev id's.
- VST_ENTRY_8_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
- VST_ENTRY_7_ABBREV,
- VST_ENTRY_6_ABBREV,
- VST_BBENTRY_6_ABBREV,
-
- // CONSTANTS_BLOCK abbrev id's.
- CONSTANTS_SETTYPE_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
- CONSTANTS_INTEGER_ABBREV,
- CONSTANTS_CE_CAST_Abbrev,
- CONSTANTS_NULL_Abbrev,
-
- // FUNCTION_BLOCK abbrev id's.
- FUNCTION_INST_LOAD_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
- FUNCTION_INST_BINOP_ABBREV,
- FUNCTION_INST_BINOP_FLAGS_ABBREV,
- FUNCTION_INST_CAST_ABBREV,
- FUNCTION_INST_RET_VOID_ABBREV,
- FUNCTION_INST_RET_VAL_ABBREV,
- FUNCTION_INST_UNREACHABLE_ABBREV
-};
-
-
-static unsigned GetEncodedCastOpcode(unsigned Opcode) {
- switch (Opcode) {
- default: llvm_unreachable("Unknown cast instruction!");
- case Instruction::Trunc : return bitc::CAST_TRUNC;
- case Instruction::ZExt : return bitc::CAST_ZEXT;
- case Instruction::SExt : return bitc::CAST_SEXT;
- case Instruction::FPToUI : return bitc::CAST_FPTOUI;
- case Instruction::FPToSI : return bitc::CAST_FPTOSI;
- case Instruction::UIToFP : return bitc::CAST_UITOFP;
- case Instruction::SIToFP : return bitc::CAST_SITOFP;
- case Instruction::FPTrunc : return bitc::CAST_FPTRUNC;
- case Instruction::FPExt : return bitc::CAST_FPEXT;
- case Instruction::PtrToInt: return bitc::CAST_PTRTOINT;
- case Instruction::IntToPtr: return bitc::CAST_INTTOPTR;
- case Instruction::BitCast : return bitc::CAST_BITCAST;
- }
-}
-
-static unsigned GetEncodedBinaryOpcode(unsigned Opcode) {
- switch (Opcode) {
- default: llvm_unreachable("Unknown binary instruction!");
- case Instruction::Add:
- case Instruction::FAdd: return bitc::BINOP_ADD;
- case Instruction::Sub:
- case Instruction::FSub: return bitc::BINOP_SUB;
- case Instruction::Mul:
- case Instruction::FMul: return bitc::BINOP_MUL;
- case Instruction::UDiv: return bitc::BINOP_UDIV;
- case Instruction::FDiv:
- case Instruction::SDiv: return bitc::BINOP_SDIV;
- case Instruction::URem: return bitc::BINOP_UREM;
- case Instruction::FRem:
- case Instruction::SRem: return bitc::BINOP_SREM;
- case Instruction::Shl: return bitc::BINOP_SHL;
- case Instruction::LShr: return bitc::BINOP_LSHR;
- case Instruction::AShr: return bitc::BINOP_ASHR;
- case Instruction::And: return bitc::BINOP_AND;
- case Instruction::Or: return bitc::BINOP_OR;
- case Instruction::Xor: return bitc::BINOP_XOR;
- }
-}
-
-
-
-static void WriteStringRecord(unsigned Code, const std::string &Str,
- unsigned AbbrevToUse, BitstreamWriter &Stream) {
- SmallVector<unsigned, 64> Vals;
-
- // Code: [strchar x N]
- for (unsigned i = 0, e = Str.size(); i != e; ++i)
- Vals.push_back(Str[i]);
-
- // Emit the finished record.
- Stream.EmitRecord(Code, Vals, AbbrevToUse);
-}
-
-// Emit information about parameter attributes.
-static void WriteAttributeTable(const ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- const std::vector<AttrListPtr> &Attrs = VE.getAttributes();
- if (Attrs.empty()) return;
-
- Stream.EnterSubblock(bitc::PARAMATTR_BLOCK_ID, 3);
-
- SmallVector<uint64_t, 64> Record;
- for (unsigned i = 0, e = Attrs.size(); i != e; ++i) {
- const AttrListPtr &A = Attrs[i];
- for (unsigned i = 0, e = A.getNumSlots(); i != e; ++i) {
- const AttributeWithIndex &PAWI = A.getSlot(i);
- Record.push_back(PAWI.Index);
-
- // FIXME: remove in LLVM 3.0
- // Store the alignment in the bitcode as a 16-bit raw value instead of a
- // 5-bit log2 encoded value. Shift the bits above the alignment up by
- // 11 bits.
- uint64_t FauxAttr = PAWI.Attrs & 0xffff;
- if (PAWI.Attrs & Attribute::Alignment)
- FauxAttr |= (1ull<<16)<<(((PAWI.Attrs & Attribute::Alignment)-1) >> 16);
- FauxAttr |= (PAWI.Attrs & (0x3FFull << 21)) << 11;
-
- Record.push_back(FauxAttr);
- }
-
- Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record);
- Record.clear();
- }
-
- Stream.ExitBlock();
-}
-
-/// WriteTypeTable - Write out the type table for a module.
-static void WriteTypeTable(const ValueEnumerator &VE, BitstreamWriter &Stream) {
- const ValueEnumerator::TypeList &TypeList = VE.getTypes();
-
- Stream.EnterSubblock(bitc::TYPE_BLOCK_ID, 4 /*count from # abbrevs */);
- SmallVector<uint64_t, 64> TypeVals;
-
- // Abbrev for TYPE_CODE_POINTER.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_POINTER));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
- Abbv->Add(BitCodeAbbrevOp(0)); // Addrspace = 0
- unsigned PtrAbbrev = Stream.EmitAbbrev(Abbv);
-
- // Abbrev for TYPE_CODE_FUNCTION.
- Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_FUNCTION));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isvararg
- Abbv->Add(BitCodeAbbrevOp(0)); // FIXME: DEAD value, remove in LLVM 3.0
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
- unsigned FunctionAbbrev = Stream.EmitAbbrev(Abbv);
-
- // Abbrev for TYPE_CODE_STRUCT.
- Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ispacked
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
- unsigned StructAbbrev = Stream.EmitAbbrev(Abbv);
-
- // Abbrev for TYPE_CODE_UNION.
- Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_UNION));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
- unsigned UnionAbbrev = Stream.EmitAbbrev(Abbv);
-
- // Abbrev for TYPE_CODE_ARRAY.
- Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_ARRAY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // size
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
- unsigned ArrayAbbrev = Stream.EmitAbbrev(Abbv);
-
- // Emit an entry count so the reader can reserve space.
- TypeVals.push_back(TypeList.size());
- Stream.EmitRecord(bitc::TYPE_CODE_NUMENTRY, TypeVals);
- TypeVals.clear();
-
- // Loop over all of the types, emitting each in turn.
- for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
- const Type *T = TypeList[i].first;
- int AbbrevToUse = 0;
- unsigned Code = 0;
-
- switch (T->getTypeID()) {
- default: llvm_unreachable("Unknown type!");
- case Type::VoidTyID: Code = bitc::TYPE_CODE_VOID; break;
- case Type::FloatTyID: Code = bitc::TYPE_CODE_FLOAT; break;
- case Type::DoubleTyID: Code = bitc::TYPE_CODE_DOUBLE; break;
- case Type::X86_FP80TyID: Code = bitc::TYPE_CODE_X86_FP80; break;
- case Type::FP128TyID: Code = bitc::TYPE_CODE_FP128; break;
- case Type::PPC_FP128TyID: Code = bitc::TYPE_CODE_PPC_FP128; break;
- case Type::LabelTyID: Code = bitc::TYPE_CODE_LABEL; break;
- case Type::OpaqueTyID: Code = bitc::TYPE_CODE_OPAQUE; break;
- case Type::MetadataTyID: Code = bitc::TYPE_CODE_METADATA; break;
- case Type::IntegerTyID:
- // INTEGER: [width]
- Code = bitc::TYPE_CODE_INTEGER;
- TypeVals.push_back(cast<IntegerType>(T)->getBitWidth());
- break;
- case Type::PointerTyID: {
- const PointerType *PTy = cast<PointerType>(T);
- // POINTER: [pointee type, address space]
- Code = bitc::TYPE_CODE_POINTER;
- TypeVals.push_back(VE.getTypeID(PTy->getElementType()));
- unsigned AddressSpace = PTy->getAddressSpace();
- TypeVals.push_back(AddressSpace);
- if (AddressSpace == 0) AbbrevToUse = PtrAbbrev;
- break;
- }
- case Type::FunctionTyID: {
- const FunctionType *FT = cast<FunctionType>(T);
- // FUNCTION: [isvararg, attrid, retty, paramty x N]
- Code = bitc::TYPE_CODE_FUNCTION;
- TypeVals.push_back(FT->isVarArg());
- TypeVals.push_back(0); // FIXME: DEAD: remove in llvm 3.0
- TypeVals.push_back(VE.getTypeID(FT->getReturnType()));
- for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i)
- TypeVals.push_back(VE.getTypeID(FT->getParamType(i)));
- AbbrevToUse = FunctionAbbrev;
- break;
- }
- case Type::StructTyID: {
- const StructType *ST = cast<StructType>(T);
- // STRUCT: [ispacked, eltty x N]
- Code = bitc::TYPE_CODE_STRUCT;
- TypeVals.push_back(ST->isPacked());
- // Output all of the element types.
- for (StructType::element_iterator I = ST->element_begin(),
- E = ST->element_end(); I != E; ++I)
- TypeVals.push_back(VE.getTypeID(*I));
- AbbrevToUse = StructAbbrev;
- break;
- }
- case Type::UnionTyID: {
- const UnionType *UT = cast<UnionType>(T);
- // UNION: [eltty x N]
- Code = bitc::TYPE_CODE_UNION;
- // Output all of the element types.
- for (UnionType::element_iterator I = UT->element_begin(),
- E = UT->element_end(); I != E; ++I)
- TypeVals.push_back(VE.getTypeID(*I));
- AbbrevToUse = UnionAbbrev;
- break;
- }
- case Type::ArrayTyID: {
- const ArrayType *AT = cast<ArrayType>(T);
- // ARRAY: [numelts, eltty]
- Code = bitc::TYPE_CODE_ARRAY;
- TypeVals.push_back(AT->getNumElements());
- TypeVals.push_back(VE.getTypeID(AT->getElementType()));
- AbbrevToUse = ArrayAbbrev;
- break;
- }
- case Type::VectorTyID: {
- const VectorType *VT = cast<VectorType>(T);
- // VECTOR [numelts, eltty]
- Code = bitc::TYPE_CODE_VECTOR;
- TypeVals.push_back(VT->getNumElements());
- TypeVals.push_back(VE.getTypeID(VT->getElementType()));
- break;
- }
- }
-
- // Emit the finished record.
- Stream.EmitRecord(Code, TypeVals, AbbrevToUse);
- TypeVals.clear();
- }
-
- Stream.ExitBlock();
-}
-
-static unsigned getEncodedLinkage(const GlobalValue *GV) {
- switch (GV->getLinkage()) {
- default: llvm_unreachable("Invalid linkage!");
- case GlobalValue::ExternalLinkage: return 0;
- case GlobalValue::WeakAnyLinkage: return 1;
- case GlobalValue::AppendingLinkage: return 2;
- case GlobalValue::InternalLinkage: return 3;
- case GlobalValue::LinkOnceAnyLinkage: return 4;
- case GlobalValue::DLLImportLinkage: return 5;
- case GlobalValue::DLLExportLinkage: return 6;
- case GlobalValue::ExternalWeakLinkage: return 7;
- case GlobalValue::CommonLinkage: return 8;
- case GlobalValue::PrivateLinkage: return 9;
- case GlobalValue::WeakODRLinkage: return 10;
- case GlobalValue::LinkOnceODRLinkage: return 11;
- case GlobalValue::AvailableExternallyLinkage: return 12;
- case GlobalValue::LinkerPrivateLinkage: return 13;
- }
-}
-
-static unsigned getEncodedVisibility(const GlobalValue *GV) {
- switch (GV->getVisibility()) {
- default: llvm_unreachable("Invalid visibility!");
- case GlobalValue::DefaultVisibility: return 0;
- case GlobalValue::HiddenVisibility: return 1;
- case GlobalValue::ProtectedVisibility: return 2;
- }
-}
-
-// Emit top-level description of module, including target triple, inline asm,
-// descriptors for global variables, and function prototype info.
-static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- // Emit the list of dependent libraries for the Module.
- for (Module::lib_iterator I = M->lib_begin(), E = M->lib_end(); I != E; ++I)
- WriteStringRecord(bitc::MODULE_CODE_DEPLIB, *I, 0/*TODO*/, Stream);
-
- // Emit various pieces of data attached to a module.
- if (!M->getTargetTriple().empty())
- WriteStringRecord(bitc::MODULE_CODE_TRIPLE, M->getTargetTriple(),
- 0/*TODO*/, Stream);
- if (!M->getDataLayout().empty())
- WriteStringRecord(bitc::MODULE_CODE_DATALAYOUT, M->getDataLayout(),
- 0/*TODO*/, Stream);
- if (!M->getModuleInlineAsm().empty())
- WriteStringRecord(bitc::MODULE_CODE_ASM, M->getModuleInlineAsm(),
- 0/*TODO*/, Stream);
-
- // Emit information about sections and GC, computing how many there are. Also
- // compute the maximum alignment value.
- std::map<std::string, unsigned> SectionMap;
- std::map<std::string, unsigned> GCMap;
- unsigned MaxAlignment = 0;
- unsigned MaxGlobalType = 0;
- for (Module::const_global_iterator GV = M->global_begin(),E = M->global_end();
- GV != E; ++GV) {
- MaxAlignment = std::max(MaxAlignment, GV->getAlignment());
- MaxGlobalType = std::max(MaxGlobalType, VE.getTypeID(GV->getType()));
-
- if (!GV->hasSection()) continue;
- // Give section names unique ID's.
- unsigned &Entry = SectionMap[GV->getSection()];
- if (Entry != 0) continue;
- WriteStringRecord(bitc::MODULE_CODE_SECTIONNAME, GV->getSection(),
- 0/*TODO*/, Stream);
- Entry = SectionMap.size();
- }
- for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {
- MaxAlignment = std::max(MaxAlignment, F->getAlignment());
- if (F->hasSection()) {
- // Give section names unique ID's.
- unsigned &Entry = SectionMap[F->getSection()];
- if (!Entry) {
- WriteStringRecord(bitc::MODULE_CODE_SECTIONNAME, F->getSection(),
- 0/*TODO*/, Stream);
- Entry = SectionMap.size();
- }
- }
- if (F->hasGC()) {
- // Same for GC names.
- unsigned &Entry = GCMap[F->getGC()];
- if (!Entry) {
- WriteStringRecord(bitc::MODULE_CODE_GCNAME, F->getGC(),
- 0/*TODO*/, Stream);
- Entry = GCMap.size();
- }
- }
- }
-
- // Emit abbrev for globals, now that we know # sections and max alignment.
- unsigned SimpleGVarAbbrev = 0;
- if (!M->global_empty()) {
- // Add an abbrev for common globals with no visibility or thread localness.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_GLOBALVAR));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(MaxGlobalType+1)));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Constant.
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Initializer.
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // Linkage.
- if (MaxAlignment == 0) // Alignment.
- Abbv->Add(BitCodeAbbrevOp(0));
- else {
- unsigned MaxEncAlignment = Log2_32(MaxAlignment)+1;
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(MaxEncAlignment+1)));
- }
- if (SectionMap.empty()) // Section.
- Abbv->Add(BitCodeAbbrevOp(0));
- else
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(SectionMap.size()+1)));
- // Don't bother emitting vis + thread local.
- SimpleGVarAbbrev = Stream.EmitAbbrev(Abbv);
- }
-
- // Emit the global variable information.
- SmallVector<unsigned, 64> Vals;
- for (Module::const_global_iterator GV = M->global_begin(),E = M->global_end();
- GV != E; ++GV) {
- unsigned AbbrevToUse = 0;
-
- // GLOBALVAR: [type, isconst, initid,
- // linkage, alignment, section, visibility, threadlocal]
- Vals.push_back(VE.getTypeID(GV->getType()));
- Vals.push_back(GV->isConstant());
- Vals.push_back(GV->isDeclaration() ? 0 :
- (VE.getValueID(GV->getInitializer()) + 1));
- Vals.push_back(getEncodedLinkage(GV));
- Vals.push_back(Log2_32(GV->getAlignment())+1);
- Vals.push_back(GV->hasSection() ? SectionMap[GV->getSection()] : 0);
- if (GV->isThreadLocal() ||
- GV->getVisibility() != GlobalValue::DefaultVisibility) {
- Vals.push_back(getEncodedVisibility(GV));
- Vals.push_back(GV->isThreadLocal());
- } else {
- AbbrevToUse = SimpleGVarAbbrev;
- }
-
- Stream.EmitRecord(bitc::MODULE_CODE_GLOBALVAR, Vals, AbbrevToUse);
- Vals.clear();
- }
-
- // Emit the function proto information.
- for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {
- // FUNCTION: [type, callingconv, isproto, paramattr,
- // linkage, alignment, section, visibility, gc]
- Vals.push_back(VE.getTypeID(F->getType()));
- Vals.push_back(F->getCallingConv());
- Vals.push_back(F->isDeclaration());
- Vals.push_back(getEncodedLinkage(F));
- Vals.push_back(VE.getAttributeID(F->getAttributes()));
- Vals.push_back(Log2_32(F->getAlignment())+1);
- Vals.push_back(F->hasSection() ? SectionMap[F->getSection()] : 0);
- Vals.push_back(getEncodedVisibility(F));
- Vals.push_back(F->hasGC() ? GCMap[F->getGC()] : 0);
-
- unsigned AbbrevToUse = 0;
- Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse);
- Vals.clear();
- }
-
-
- // Emit the alias information.
- for (Module::const_alias_iterator AI = M->alias_begin(), E = M->alias_end();
- AI != E; ++AI) {
- Vals.push_back(VE.getTypeID(AI->getType()));
- Vals.push_back(VE.getValueID(AI->getAliasee()));
- Vals.push_back(getEncodedLinkage(AI));
- Vals.push_back(getEncodedVisibility(AI));
- unsigned AbbrevToUse = 0;
- Stream.EmitRecord(bitc::MODULE_CODE_ALIAS, Vals, AbbrevToUse);
- Vals.clear();
- }
-}
-
-static uint64_t GetOptimizationFlags(const Value *V) {
- uint64_t Flags = 0;
-
- if (const OverflowingBinaryOperator *OBO =
- dyn_cast<OverflowingBinaryOperator>(V)) {
- if (OBO->hasNoSignedWrap())
- Flags |= 1 << bitc::OBO_NO_SIGNED_WRAP;
- if (OBO->hasNoUnsignedWrap())
- Flags |= 1 << bitc::OBO_NO_UNSIGNED_WRAP;
- } else if (const SDivOperator *Div = dyn_cast<SDivOperator>(V)) {
- if (Div->isExact())
- Flags |= 1 << bitc::SDIV_EXACT;
- }
-
- return Flags;
-}
-
-static void WriteMDNode(const MDNode *N,
- const ValueEnumerator &VE,
- BitstreamWriter &Stream,
- SmallVector<uint64_t, 64> &Record) {
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- if (N->getOperand(i)) {
- Record.push_back(VE.getTypeID(N->getOperand(i)->getType()));
- Record.push_back(VE.getValueID(N->getOperand(i)));
- } else {
- Record.push_back(VE.getTypeID(Type::getVoidTy(N->getContext())));
- Record.push_back(0);
- }
- }
- unsigned MDCode = N->isFunctionLocal() ? bitc::METADATA_FN_NODE :
- bitc::METADATA_NODE;
- Stream.EmitRecord(MDCode, Record, 0);
- Record.clear();
-}
-
-static void WriteModuleMetadata(const ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- const ValueEnumerator::ValueList &Vals = VE.getMDValues();
- bool StartedMetadataBlock = false;
- unsigned MDSAbbrev = 0;
- SmallVector<uint64_t, 64> Record;
- for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
-
- if (const MDNode *N = dyn_cast<MDNode>(Vals[i].first)) {
- if (!N->isFunctionLocal() || !N->getFunction()) {
- if (!StartedMetadataBlock) {
- Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
- StartedMetadataBlock = true;
- }
- WriteMDNode(N, VE, Stream, Record);
- }
- } else if (const MDString *MDS = dyn_cast<MDString>(Vals[i].first)) {
- if (!StartedMetadataBlock) {
- Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
-
- // Abbrev for METADATA_STRING.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_STRING));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
- MDSAbbrev = Stream.EmitAbbrev(Abbv);
- StartedMetadataBlock = true;
- }
-
- // Code: [strchar x N]
- Record.append(MDS->begin(), MDS->end());
-
- // Emit the finished record.
- Stream.EmitRecord(bitc::METADATA_STRING, Record, MDSAbbrev);
- Record.clear();
- } else if (const NamedMDNode *NMD = dyn_cast<NamedMDNode>(Vals[i].first)) {
- if (!StartedMetadataBlock) {
- Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
- StartedMetadataBlock = true;
- }
-
- // Write name.
- StringRef Str = NMD->getName();
- for (unsigned i = 0, e = Str.size(); i != e; ++i)
- Record.push_back(Str[i]);
- Stream.EmitRecord(bitc::METADATA_NAME, Record, 0/*TODO*/);
- Record.clear();
-
- // Write named metadata operands.
- for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
- if (NMD->getOperand(i))
- Record.push_back(VE.getValueID(NMD->getOperand(i)));
- else
- Record.push_back(~0U);
- }
- Stream.EmitRecord(bitc::METADATA_NAMED_NODE, Record, 0);
- Record.clear();
- }
- }
-
- if (StartedMetadataBlock)
- Stream.ExitBlock();
-}
-
-static void WriteFunctionLocalMetadata(const Function &F,
- const ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- bool StartedMetadataBlock = false;
- SmallVector<uint64_t, 64> Record;
- const ValueEnumerator::ValueList &Vals = VE.getMDValues();
-
- for (unsigned i = 0, e = Vals.size(); i != e; ++i)
- if (const MDNode *N = dyn_cast<MDNode>(Vals[i].first))
- if (N->isFunctionLocal() && N->getFunction() == &F) {
- if (!StartedMetadataBlock) {
- Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
- StartedMetadataBlock = true;
- }
- WriteMDNode(N, VE, Stream, Record);
- }
-
- if (StartedMetadataBlock)
- Stream.ExitBlock();
-}
-
-static void WriteMetadataAttachment(const Function &F,
- const ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- bool StartedMetadataBlock = false;
- SmallVector<uint64_t, 64> Record;
-
- // Write metadata attachments
- // METADATA_ATTACHMENT - [m x [value, [n x [id, mdnode]]]
- SmallVector<std::pair<unsigned, MDNode*>, 4> MDs;
-
- for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- MDs.clear();
- I->getAllMetadata(MDs);
-
- // If no metadata, ignore instruction.
- if (MDs.empty()) continue;
-
- Record.push_back(VE.getInstructionID(I));
-
- for (unsigned i = 0, e = MDs.size(); i != e; ++i) {
- Record.push_back(MDs[i].first);
- Record.push_back(VE.getValueID(MDs[i].second));
- }
- if (!StartedMetadataBlock) {
- Stream.EnterSubblock(bitc::METADATA_ATTACHMENT_ID, 3);
- StartedMetadataBlock = true;
- }
- Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0);
- Record.clear();
- }
-
- if (StartedMetadataBlock)
- Stream.ExitBlock();
-}
-
-static void WriteModuleMetadataStore(const Module *M, BitstreamWriter &Stream) {
- SmallVector<uint64_t, 64> Record;
-
- // Write metadata kinds
- // METADATA_KIND - [n x [id, name]]
- SmallVector<StringRef, 4> Names;
- M->getMDKindNames(Names);
-
- assert(Names[0] == "" && "MDKind #0 is invalid");
- if (Names.size() == 1) return;
-
- Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
-
- for (unsigned MDKindID = 1, e = Names.size(); MDKindID != e; ++MDKindID) {
- Record.push_back(MDKindID);
- StringRef KName = Names[MDKindID];
- Record.append(KName.begin(), KName.end());
-
- Stream.EmitRecord(bitc::METADATA_KIND, Record, 0);
- Record.clear();
- }
-
- Stream.ExitBlock();
-}
-
-static void WriteConstants(unsigned FirstVal, unsigned LastVal,
- const ValueEnumerator &VE,
- BitstreamWriter &Stream, bool isGlobal) {
- if (FirstVal == LastVal) return;
-
- Stream.EnterSubblock(bitc::CONSTANTS_BLOCK_ID, 4);
-
- unsigned AggregateAbbrev = 0;
- unsigned String8Abbrev = 0;
- unsigned CString7Abbrev = 0;
- unsigned CString6Abbrev = 0;
- // If this is a constant pool for the module, emit module-specific abbrevs.
- if (isGlobal) {
- // Abbrev for CST_CODE_AGGREGATE.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_AGGREGATE));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(LastVal+1)));
- AggregateAbbrev = Stream.EmitAbbrev(Abbv);
-
- // Abbrev for CST_CODE_STRING.
- Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_STRING));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
- String8Abbrev = Stream.EmitAbbrev(Abbv);
- // Abbrev for CST_CODE_CSTRING.
- Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
- CString7Abbrev = Stream.EmitAbbrev(Abbv);
- // Abbrev for CST_CODE_CSTRING.
- Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
- CString6Abbrev = Stream.EmitAbbrev(Abbv);
- }
-
- SmallVector<uint64_t, 64> Record;
-
- const ValueEnumerator::ValueList &Vals = VE.getValues();
- const Type *LastTy = 0;
- for (unsigned i = FirstVal; i != LastVal; ++i) {
- const Value *V = Vals[i].first;
- // If we need to switch types, do so now.
- if (V->getType() != LastTy) {
- LastTy = V->getType();
- Record.push_back(VE.getTypeID(LastTy));
- Stream.EmitRecord(bitc::CST_CODE_SETTYPE, Record,
- CONSTANTS_SETTYPE_ABBREV);
- Record.clear();
- }
-
- if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) {
- Record.push_back(unsigned(IA->hasSideEffects()) |
- unsigned(IA->isAlignStack()) << 1);
-
- // Add the asm string.
- const std::string &AsmStr = IA->getAsmString();
- Record.push_back(AsmStr.size());
- for (unsigned i = 0, e = AsmStr.size(); i != e; ++i)
- Record.push_back(AsmStr[i]);
-
- // Add the constraint string.
- const std::string &ConstraintStr = IA->getConstraintString();
- Record.push_back(ConstraintStr.size());
- for (unsigned i = 0, e = ConstraintStr.size(); i != e; ++i)
- Record.push_back(ConstraintStr[i]);
- Stream.EmitRecord(bitc::CST_CODE_INLINEASM, Record);
- Record.clear();
- continue;
- }
- const Constant *C = cast<Constant>(V);
- unsigned Code = -1U;
- unsigned AbbrevToUse = 0;
- if (C->isNullValue()) {
- Code = bitc::CST_CODE_NULL;
- } else if (isa<UndefValue>(C)) {
- Code = bitc::CST_CODE_UNDEF;
- } else if (const ConstantInt *IV = dyn_cast<ConstantInt>(C)) {
- if (IV->getBitWidth() <= 64) {
- int64_t V = IV->getSExtValue();
- if (V >= 0)
- Record.push_back(V << 1);
- else
- Record.push_back((-V << 1) | 1);
- Code = bitc::CST_CODE_INTEGER;
- AbbrevToUse = CONSTANTS_INTEGER_ABBREV;
- } else { // Wide integers, > 64 bits in size.
- // We have an arbitrary precision integer value to write whose
- // bit width is > 64. However, in canonical unsigned integer
- // format it is likely that the high bits are going to be zero.
- // So, we only write the number of active words.
- unsigned NWords = IV->getValue().getActiveWords();
- const uint64_t *RawWords = IV->getValue().getRawData();
- for (unsigned i = 0; i != NWords; ++i) {
- int64_t V = RawWords[i];
- if (V >= 0)
- Record.push_back(V << 1);
- else
- Record.push_back((-V << 1) | 1);
- }
- Code = bitc::CST_CODE_WIDE_INTEGER;
- }
- } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
- Code = bitc::CST_CODE_FLOAT;
- const Type *Ty = CFP->getType();
- if (Ty->isFloatTy() || Ty->isDoubleTy()) {
- Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
- } else if (Ty->isX86_FP80Ty()) {
- // api needed to prevent premature destruction
- // bits are not in the same order as a normal i80 APInt, compensate.
- APInt api = CFP->getValueAPF().bitcastToAPInt();
- const uint64_t *p = api.getRawData();
- Record.push_back((p[1] << 48) | (p[0] >> 16));
- Record.push_back(p[0] & 0xffffLL);
- } else if (Ty->isFP128Ty() || Ty->isPPC_FP128Ty()) {
- APInt api = CFP->getValueAPF().bitcastToAPInt();
- const uint64_t *p = api.getRawData();
- Record.push_back(p[0]);
- Record.push_back(p[1]);
- } else {
- assert (0 && "Unknown FP type!");
- }
- } else if (isa<ConstantArray>(C) && cast<ConstantArray>(C)->isString()) {
- const ConstantArray *CA = cast<ConstantArray>(C);
- // Emit constant strings specially.
- unsigned NumOps = CA->getNumOperands();
- // If this is a null-terminated string, use the denser CSTRING encoding.
- if (CA->getOperand(NumOps-1)->isNullValue()) {
- Code = bitc::CST_CODE_CSTRING;
- --NumOps; // Don't encode the null, which isn't allowed by char6.
- } else {
- Code = bitc::CST_CODE_STRING;
- AbbrevToUse = String8Abbrev;
- }
- bool isCStr7 = Code == bitc::CST_CODE_CSTRING;
- bool isCStrChar6 = Code == bitc::CST_CODE_CSTRING;
- for (unsigned i = 0; i != NumOps; ++i) {
- unsigned char V = cast<ConstantInt>(CA->getOperand(i))->getZExtValue();
- Record.push_back(V);
- isCStr7 &= (V & 128) == 0;
- if (isCStrChar6)
- isCStrChar6 = BitCodeAbbrevOp::isChar6(V);
- }
-
- if (isCStrChar6)
- AbbrevToUse = CString6Abbrev;
- else if (isCStr7)
- AbbrevToUse = CString7Abbrev;
- } else if (isa<ConstantArray>(C) || isa<ConstantStruct>(V) ||
- isa<ConstantUnion>(C) || isa<ConstantVector>(V)) {
- Code = bitc::CST_CODE_AGGREGATE;
- for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i)
- Record.push_back(VE.getValueID(C->getOperand(i)));
- AbbrevToUse = AggregateAbbrev;
- } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
- switch (CE->getOpcode()) {
- default:
- if (Instruction::isCast(CE->getOpcode())) {
- Code = bitc::CST_CODE_CE_CAST;
- Record.push_back(GetEncodedCastOpcode(CE->getOpcode()));
- Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
- Record.push_back(VE.getValueID(C->getOperand(0)));
- AbbrevToUse = CONSTANTS_CE_CAST_Abbrev;
- } else {
- assert(CE->getNumOperands() == 2 && "Unknown constant expr!");
- Code = bitc::CST_CODE_CE_BINOP;
- Record.push_back(GetEncodedBinaryOpcode(CE->getOpcode()));
- Record.push_back(VE.getValueID(C->getOperand(0)));
- Record.push_back(VE.getValueID(C->getOperand(1)));
- uint64_t Flags = GetOptimizationFlags(CE);
- if (Flags != 0)
- Record.push_back(Flags);
- }
- break;
- case Instruction::GetElementPtr:
- Code = bitc::CST_CODE_CE_GEP;
- if (cast<GEPOperator>(C)->isInBounds())
- Code = bitc::CST_CODE_CE_INBOUNDS_GEP;
- for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) {
- Record.push_back(VE.getTypeID(C->getOperand(i)->getType()));
- Record.push_back(VE.getValueID(C->getOperand(i)));
- }
- break;
- case Instruction::Select:
- Code = bitc::CST_CODE_CE_SELECT;
- Record.push_back(VE.getValueID(C->getOperand(0)));
- Record.push_back(VE.getValueID(C->getOperand(1)));
- Record.push_back(VE.getValueID(C->getOperand(2)));
- break;
- case Instruction::ExtractElement:
- Code = bitc::CST_CODE_CE_EXTRACTELT;
- Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
- Record.push_back(VE.getValueID(C->getOperand(0)));
- Record.push_back(VE.getValueID(C->getOperand(1)));
- break;
- case Instruction::InsertElement:
- Code = bitc::CST_CODE_CE_INSERTELT;
- Record.push_back(VE.getValueID(C->getOperand(0)));
- Record.push_back(VE.getValueID(C->getOperand(1)));
- Record.push_back(VE.getValueID(C->getOperand(2)));
- break;
- case Instruction::ShuffleVector:
- // If the return type and argument types are the same, this is a
- // standard shufflevector instruction. If the types are different,
- // then the shuffle is widening or truncating the input vectors, and
- // the argument type must also be encoded.
- if (C->getType() == C->getOperand(0)->getType()) {
- Code = bitc::CST_CODE_CE_SHUFFLEVEC;
- } else {
- Code = bitc::CST_CODE_CE_SHUFVEC_EX;
- Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
- }
- Record.push_back(VE.getValueID(C->getOperand(0)));
- Record.push_back(VE.getValueID(C->getOperand(1)));
- Record.push_back(VE.getValueID(C->getOperand(2)));
- break;
- case Instruction::ICmp:
- case Instruction::FCmp:
- Code = bitc::CST_CODE_CE_CMP;
- Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
- Record.push_back(VE.getValueID(C->getOperand(0)));
- Record.push_back(VE.getValueID(C->getOperand(1)));
- Record.push_back(CE->getPredicate());
- break;
- }
- } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
- assert(BA->getFunction() == BA->getBasicBlock()->getParent() &&
- "Malformed blockaddress");
- Code = bitc::CST_CODE_BLOCKADDRESS;
- Record.push_back(VE.getTypeID(BA->getFunction()->getType()));
- Record.push_back(VE.getValueID(BA->getFunction()));
- Record.push_back(VE.getGlobalBasicBlockID(BA->getBasicBlock()));
- } else {
- llvm_unreachable("Unknown constant!");
- }
- Stream.EmitRecord(Code, Record, AbbrevToUse);
- Record.clear();
- }
-
- Stream.ExitBlock();
-}
-
-static void WriteModuleConstants(const ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- const ValueEnumerator::ValueList &Vals = VE.getValues();
-
- // Find the first constant to emit, which is the first non-globalvalue value.
- // We know globalvalues have been emitted by WriteModuleInfo.
- for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
- if (!isa<GlobalValue>(Vals[i].first)) {
- WriteConstants(i, Vals.size(), VE, Stream, true);
- return;
- }
- }
-}
-
-/// PushValueAndType - The file has to encode both the value and type id for
-/// many values, because we need to know what type to create for forward
-/// references. However, most operands are not forward references, so this type
-/// field is not needed.
-///
-/// This function adds V's value ID to Vals. If the value ID is higher than the
-/// instruction ID, then it is a forward reference, and it also includes the
-/// type ID.
-static bool PushValueAndType(const Value *V, unsigned InstID,
- SmallVector<unsigned, 64> &Vals,
- ValueEnumerator &VE) {
- unsigned ValID = VE.getValueID(V);
- Vals.push_back(ValID);
- if (ValID >= InstID) {
- Vals.push_back(VE.getTypeID(V->getType()));
- return true;
- }
- return false;
-}
-
-/// WriteInstruction - Emit an instruction to the specified stream.
-static void WriteInstruction(const Instruction &I, unsigned InstID,
- ValueEnumerator &VE, BitstreamWriter &Stream,
- SmallVector<unsigned, 64> &Vals) {
- unsigned Code = 0;
- unsigned AbbrevToUse = 0;
- VE.setInstructionID(&I);
- switch (I.getOpcode()) {
- default:
- if (Instruction::isCast(I.getOpcode())) {
- Code = bitc::FUNC_CODE_INST_CAST;
- if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE))
- AbbrevToUse = FUNCTION_INST_CAST_ABBREV;
- Vals.push_back(VE.getTypeID(I.getType()));
- Vals.push_back(GetEncodedCastOpcode(I.getOpcode()));
- } else {
- assert(isa<BinaryOperator>(I) && "Unknown instruction!");
- Code = bitc::FUNC_CODE_INST_BINOP;
- if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE))
- AbbrevToUse = FUNCTION_INST_BINOP_ABBREV;
- Vals.push_back(VE.getValueID(I.getOperand(1)));
- Vals.push_back(GetEncodedBinaryOpcode(I.getOpcode()));
- uint64_t Flags = GetOptimizationFlags(&I);
- if (Flags != 0) {
- if (AbbrevToUse == FUNCTION_INST_BINOP_ABBREV)
- AbbrevToUse = FUNCTION_INST_BINOP_FLAGS_ABBREV;
- Vals.push_back(Flags);
- }
- }
- break;
-
- case Instruction::GetElementPtr:
- Code = bitc::FUNC_CODE_INST_GEP;
- if (cast<GEPOperator>(&I)->isInBounds())
- Code = bitc::FUNC_CODE_INST_INBOUNDS_GEP;
- for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
- PushValueAndType(I.getOperand(i), InstID, Vals, VE);
- break;
- case Instruction::ExtractValue: {
- Code = bitc::FUNC_CODE_INST_EXTRACTVAL;
- PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- const ExtractValueInst *EVI = cast<ExtractValueInst>(&I);
- for (const unsigned *i = EVI->idx_begin(), *e = EVI->idx_end(); i != e; ++i)
- Vals.push_back(*i);
- break;
- }
- case Instruction::InsertValue: {
- Code = bitc::FUNC_CODE_INST_INSERTVAL;
- PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- PushValueAndType(I.getOperand(1), InstID, Vals, VE);
- const InsertValueInst *IVI = cast<InsertValueInst>(&I);
- for (const unsigned *i = IVI->idx_begin(), *e = IVI->idx_end(); i != e; ++i)
- Vals.push_back(*i);
- break;
- }
- case Instruction::Select:
- Code = bitc::FUNC_CODE_INST_VSELECT;
- PushValueAndType(I.getOperand(1), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(2)));
- PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- break;
- case Instruction::ExtractElement:
- Code = bitc::FUNC_CODE_INST_EXTRACTELT;
- PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
- break;
- case Instruction::InsertElement:
- Code = bitc::FUNC_CODE_INST_INSERTELT;
- PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
- Vals.push_back(VE.getValueID(I.getOperand(2)));
- break;
- case Instruction::ShuffleVector:
- Code = bitc::FUNC_CODE_INST_SHUFFLEVEC;
- PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
- Vals.push_back(VE.getValueID(I.getOperand(2)));
- break;
- case Instruction::ICmp:
- case Instruction::FCmp:
- // compare returning Int1Ty or vector of Int1Ty
- Code = bitc::FUNC_CODE_INST_CMP2;
- PushValueAndType(I.getOperand(0), InstID, Vals, VE);
- Vals.push_back(VE.getValueID(I.getOperand(1)));
- Vals.push_back(cast<CmpInst>(I).getPredicate());
- break;
-
- case Instruction::Ret:
- {
- Code = bitc::FUNC_CODE_INST_RET;
- unsigned NumOperands = I.getNumOperands();
- if (NumOperands == 0)
- AbbrevToUse = FUNCTION_INST_RET_VOID_ABBREV;
- else if (NumOperands == 1) {
- if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE))
- AbbrevToUse = FUNCTION_INST_RET_VAL_ABBREV;
- } else {
- for (unsigned i = 0, e = NumOperands; i != e; ++i)
- PushValueAndType(I.getOperand(i), InstID, Vals, VE);
- }
- }
- break;
- case Instruction::Br:
- {
- Code = bitc::FUNC_CODE_INST_BR;
- BranchInst &II = cast<BranchInst>(I);
- Vals.push_back(VE.getValueID(II.getSuccessor(0)));
- if (II.isConditional()) {
- Vals.push_back(VE.getValueID(II.getSuccessor(1)));
- Vals.push_back(VE.getValueID(II.getCondition()));
- }
- }
- break;
- case Instruction::Switch:
- Code = bitc::FUNC_CODE_INST_SWITCH;
- Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
- for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
- Vals.push_back(VE.getValueID(I.getOperand(i)));
- break;
- case Instruction::IndirectBr:
- Code = bitc::FUNC_CODE_INST_INDIRECTBR;
- Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
- for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
- Vals.push_back(VE.getValueID(I.getOperand(i)));
- break;
-
- case Instruction::Invoke: {
- const InvokeInst *II = cast<InvokeInst>(&I);
- const Value *Callee(II->getCalledValue());
- const PointerType *PTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
- Code = bitc::FUNC_CODE_INST_INVOKE;
-
- Vals.push_back(VE.getAttributeID(II->getAttributes()));
- Vals.push_back(II->getCallingConv());
- Vals.push_back(VE.getValueID(II->getNormalDest()));
- Vals.push_back(VE.getValueID(II->getUnwindDest()));
- PushValueAndType(Callee, InstID, Vals, VE);
-
- // Emit value #'s for the fixed parameters.
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- Vals.push_back(VE.getValueID(I.getOperand(i+3))); // fixed param.
-
- // Emit type/value pairs for varargs params.
- if (FTy->isVarArg()) {
- for (unsigned i = 3+FTy->getNumParams(), e = I.getNumOperands();
- i != e; ++i)
- PushValueAndType(I.getOperand(i), InstID, Vals, VE); // vararg
- }
- break;
- }
- case Instruction::Unwind:
- Code = bitc::FUNC_CODE_INST_UNWIND;
- break;
- case Instruction::Unreachable:
- Code = bitc::FUNC_CODE_INST_UNREACHABLE;
- AbbrevToUse = FUNCTION_INST_UNREACHABLE_ABBREV;
- break;
-
- case Instruction::PHI:
- Code = bitc::FUNC_CODE_INST_PHI;
- Vals.push_back(VE.getTypeID(I.getType()));
- for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
- Vals.push_back(VE.getValueID(I.getOperand(i)));
- break;
-
- case Instruction::Alloca:
- Code = bitc::FUNC_CODE_INST_ALLOCA;
- Vals.push_back(VE.getTypeID(I.getType()));
- Vals.push_back(VE.getValueID(I.getOperand(0))); // size.
- Vals.push_back(Log2_32(cast<AllocaInst>(I).getAlignment())+1);
- break;
-
- case Instruction::Load:
- Code = bitc::FUNC_CODE_INST_LOAD;
- if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr
- AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
-
- Vals.push_back(Log2_32(cast<LoadInst>(I).getAlignment())+1);
- Vals.push_back(cast<LoadInst>(I).isVolatile());
- break;
- case Instruction::Store:
- Code = bitc::FUNC_CODE_INST_STORE2;
- PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr
- Vals.push_back(VE.getValueID(I.getOperand(0))); // val.
- Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1);
- Vals.push_back(cast<StoreInst>(I).isVolatile());
- break;
- case Instruction::Call: {
- const PointerType *PTy = cast<PointerType>(I.getOperand(0)->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
-
- Code = bitc::FUNC_CODE_INST_CALL;
-
- const CallInst *CI = cast<CallInst>(&I);
- Vals.push_back(VE.getAttributeID(CI->getAttributes()));
- Vals.push_back((CI->getCallingConv() << 1) | unsigned(CI->isTailCall()));
- PushValueAndType(CI->getOperand(0), InstID, Vals, VE); // Callee
-
- // Emit value #'s for the fixed parameters.
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- Vals.push_back(VE.getValueID(I.getOperand(i+1))); // fixed param.
-
- // Emit type/value pairs for varargs params.
- if (FTy->isVarArg()) {
- unsigned NumVarargs = I.getNumOperands()-1-FTy->getNumParams();
- for (unsigned i = I.getNumOperands()-NumVarargs, e = I.getNumOperands();
- i != e; ++i)
- PushValueAndType(I.getOperand(i), InstID, Vals, VE); // varargs
- }
- break;
- }
- case Instruction::VAArg:
- Code = bitc::FUNC_CODE_INST_VAARG;
- Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); // valistty
- Vals.push_back(VE.getValueID(I.getOperand(0))); // valist.
- Vals.push_back(VE.getTypeID(I.getType())); // restype.
- break;
- }
-
- Stream.EmitRecord(Code, Vals, AbbrevToUse);
- Vals.clear();
-}
-
-// Emit names for globals/functions etc.
-static void WriteValueSymbolTable(const ValueSymbolTable &VST,
- const ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- if (VST.empty()) return;
- Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4);
-
- // FIXME: Set up the abbrev, we know how many values there are!
- // FIXME: We know if the type names can use 7-bit ascii.
- SmallVector<unsigned, 64> NameVals;
-
- for (ValueSymbolTable::const_iterator SI = VST.begin(), SE = VST.end();
- SI != SE; ++SI) {
-
- const ValueName &Name = *SI;
-
- // Figure out the encoding to use for the name.
- bool is7Bit = true;
- bool isChar6 = true;
- for (const char *C = Name.getKeyData(), *E = C+Name.getKeyLength();
- C != E; ++C) {
- if (isChar6)
- isChar6 = BitCodeAbbrevOp::isChar6(*C);
- if ((unsigned char)*C & 128) {
- is7Bit = false;
- break; // don't bother scanning the rest.
- }
- }
-
- unsigned AbbrevToUse = VST_ENTRY_8_ABBREV;
-
- // VST_ENTRY: [valueid, namechar x N]
- // VST_BBENTRY: [bbid, namechar x N]
- unsigned Code;
- if (isa<BasicBlock>(SI->getValue())) {
- Code = bitc::VST_CODE_BBENTRY;
- if (isChar6)
- AbbrevToUse = VST_BBENTRY_6_ABBREV;
- } else {
- Code = bitc::VST_CODE_ENTRY;
- if (isChar6)
- AbbrevToUse = VST_ENTRY_6_ABBREV;
- else if (is7Bit)
- AbbrevToUse = VST_ENTRY_7_ABBREV;
- }
-
- NameVals.push_back(VE.getValueID(SI->getValue()));
- for (const char *P = Name.getKeyData(),
- *E = Name.getKeyData()+Name.getKeyLength(); P != E; ++P)
- NameVals.push_back((unsigned char)*P);
-
- // Emit the finished record.
- Stream.EmitRecord(Code, NameVals, AbbrevToUse);
- NameVals.clear();
- }
- Stream.ExitBlock();
-}
-
-/// WriteFunction - Emit a function body to the module stream.
-static void WriteFunction(const Function &F, ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- Stream.EnterSubblock(bitc::FUNCTION_BLOCK_ID, 4);
- VE.incorporateFunction(F);
-
- SmallVector<unsigned, 64> Vals;
-
- // Emit the number of basic blocks, so the reader can create them ahead of
- // time.
- Vals.push_back(VE.getBasicBlocks().size());
- Stream.EmitRecord(bitc::FUNC_CODE_DECLAREBLOCKS, Vals);
- Vals.clear();
-
- // If there are function-local constants, emit them now.
- unsigned CstStart, CstEnd;
- VE.getFunctionConstantRange(CstStart, CstEnd);
- WriteConstants(CstStart, CstEnd, VE, Stream, false);
-
- // If there is function-local metadata, emit it now.
- WriteFunctionLocalMetadata(F, VE, Stream);
-
- // Keep a running idea of what the instruction ID is.
- unsigned InstID = CstEnd;
-
- // Finally, emit all the instructions, in order.
- for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- WriteInstruction(*I, InstID, VE, Stream, Vals);
- if (!I->getType()->isVoidTy())
- ++InstID;
- }
-
- // Emit names for all the instructions etc.
- WriteValueSymbolTable(F.getValueSymbolTable(), VE, Stream);
-
- WriteMetadataAttachment(F, VE, Stream);
- VE.purgeFunction();
- Stream.ExitBlock();
-}
-
-/// WriteTypeSymbolTable - Emit a block for the specified type symtab.
-static void WriteTypeSymbolTable(const TypeSymbolTable &TST,
- const ValueEnumerator &VE,
- BitstreamWriter &Stream) {
- if (TST.empty()) return;
-
- Stream.EnterSubblock(bitc::TYPE_SYMTAB_BLOCK_ID, 3);
-
- // 7-bit fixed width VST_CODE_ENTRY strings.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
- unsigned V7Abbrev = Stream.EmitAbbrev(Abbv);
-
- SmallVector<unsigned, 64> NameVals;
-
- for (TypeSymbolTable::const_iterator TI = TST.begin(), TE = TST.end();
- TI != TE; ++TI) {
- // TST_ENTRY: [typeid, namechar x N]
- NameVals.push_back(VE.getTypeID(TI->second));
-
- const std::string &Str = TI->first;
- bool is7Bit = true;
- for (unsigned i = 0, e = Str.size(); i != e; ++i) {
- NameVals.push_back((unsigned char)Str[i]);
- if (Str[i] & 128)
- is7Bit = false;
- }
-
- // Emit the finished record.
- Stream.EmitRecord(bitc::VST_CODE_ENTRY, NameVals, is7Bit ? V7Abbrev : 0);
- NameVals.clear();
- }
-
- Stream.ExitBlock();
-}
-
-// Emit blockinfo, which defines the standard abbreviations etc.
-static void WriteBlockInfo(const ValueEnumerator &VE, BitstreamWriter &Stream) {
- // We only want to emit block info records for blocks that have multiple
- // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK. Other
- // blocks can defined their abbrevs inline.
- Stream.EnterBlockInfoBlock(2);
-
- { // 8-bit fixed-width VST_ENTRY/VST_BBENTRY strings.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
- if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID,
- Abbv) != VST_ENTRY_8_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
-
- { // 7-bit fixed width VST_ENTRY strings.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
- if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID,
- Abbv) != VST_ENTRY_7_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
- { // 6-bit char6 VST_ENTRY strings.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
- if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID,
- Abbv) != VST_ENTRY_6_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
- { // 6-bit char6 VST_BBENTRY strings.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_BBENTRY));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
- if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID,
- Abbv) != VST_BBENTRY_6_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
-
-
-
- { // SETTYPE abbrev for CONSTANTS_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_SETTYPE));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
- Log2_32_Ceil(VE.getTypes().size()+1)));
- if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID,
- Abbv) != CONSTANTS_SETTYPE_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
-
- { // INTEGER abbrev for CONSTANTS_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_INTEGER));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
- if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID,
- Abbv) != CONSTANTS_INTEGER_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
-
- { // CE_CAST abbrev for CONSTANTS_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CE_CAST));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // cast opc
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // typeid
- Log2_32_Ceil(VE.getTypes().size()+1)));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
-
- if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID,
- Abbv) != CONSTANTS_CE_CAST_Abbrev)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
- { // NULL abbrev for CONSTANTS_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_NULL));
- if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID,
- Abbv) != CONSTANTS_NULL_Abbrev)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
-
- // FIXME: This should only use space for first class types!
-
- { // INST_LOAD abbrev for FUNCTION_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_LOAD));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Ptr
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // Align
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // volatile
- if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID,
- Abbv) != FUNCTION_INST_LOAD_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
- { // INST_BINOP abbrev for FUNCTION_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
- if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID,
- Abbv) != FUNCTION_INST_BINOP_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
- { // INST_BINOP_FLAGS abbrev for FUNCTION_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); // flags
- if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID,
- Abbv) != FUNCTION_INST_BINOP_FLAGS_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
- { // INST_CAST abbrev for FUNCTION_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_CAST));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // OpVal
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
- Log2_32_Ceil(VE.getTypes().size()+1)));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
- if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID,
- Abbv) != FUNCTION_INST_CAST_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
-
- { // INST_RET abbrev for FUNCTION_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET));
- if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID,
- Abbv) != FUNCTION_INST_RET_VOID_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
- { // INST_RET abbrev for FUNCTION_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET));
- Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ValID
- if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID,
- Abbv) != FUNCTION_INST_RET_VAL_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
- { // INST_UNREACHABLE abbrev for FUNCTION_BLOCK.
- BitCodeAbbrev *Abbv = new BitCodeAbbrev();
- Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_UNREACHABLE));
- if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID,
- Abbv) != FUNCTION_INST_UNREACHABLE_ABBREV)
- llvm_unreachable("Unexpected abbrev ordering!");
- }
-
- Stream.ExitBlock();
-}
-
-
-/// WriteModule - Emit the specified module to the bitstream.
-static void WriteModule(const Module *M, BitstreamWriter &Stream) {
- Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
-
- // Emit the version number if it is non-zero.
- if (CurVersion) {
- SmallVector<unsigned, 1> Vals;
- Vals.push_back(CurVersion);
- Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
- }
-
- // Analyze the module, enumerating globals, functions, etc.
- ValueEnumerator VE(M);
-
- // Emit blockinfo, which defines the standard abbreviations etc.
- WriteBlockInfo(VE, Stream);
-
- // Emit information about parameter attributes.
- WriteAttributeTable(VE, Stream);
-
- // Emit information describing all of the types in the module.
- WriteTypeTable(VE, Stream);
-
- // Emit top-level description of module, including target triple, inline asm,
- // descriptors for global variables, and function prototype info.
- WriteModuleInfo(M, VE, Stream);
-
- // Emit constants.
- WriteModuleConstants(VE, Stream);
-
- // Emit metadata.
- WriteModuleMetadata(VE, Stream);
-
- // Emit function bodies.
- for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I)
- if (!I->isDeclaration())
- WriteFunction(*I, VE, Stream);
-
- // Emit metadata.
- WriteModuleMetadataStore(M, Stream);
-
- // Emit the type symbol table information.
- WriteTypeSymbolTable(M->getTypeSymbolTable(), VE, Stream);
-
- // Emit names for globals/functions etc.
- WriteValueSymbolTable(M->getValueSymbolTable(), VE, Stream);
-
- Stream.ExitBlock();
-}
-
-/// EmitDarwinBCHeader - If generating a bc file on darwin, we have to emit a
-/// header and trailer to make it compatible with the system archiver. To do
-/// this we emit the following header, and then emit a trailer that pads the
-/// file out to be a multiple of 16 bytes.
-///
-/// struct bc_header {
-/// uint32_t Magic; // 0x0B17C0DE
-/// uint32_t Version; // Version, currently always 0.
-/// uint32_t BitcodeOffset; // Offset to traditional bitcode file.
-/// uint32_t BitcodeSize; // Size of traditional bitcode file.
-/// uint32_t CPUType; // CPU specifier.
-/// ... potentially more later ...
-/// };
-enum {
- DarwinBCSizeFieldOffset = 3*4, // Offset to bitcode_size.
- DarwinBCHeaderSize = 5*4
-};
-
-/// isARMTriplet - Return true if the triplet looks like:
-/// arm-*, thumb-*, armv[0-9]-*, thumbv[0-9]-*, armv5te-*, or armv6t2-*.
-static bool isARMTriplet(const std::string &TT) {
- size_t Pos = 0;
- size_t Size = TT.size();
- if (Size >= 6 &&
- TT[0] == 't' && TT[1] == 'h' && TT[2] == 'u' &&
- TT[3] == 'm' && TT[4] == 'b')
- Pos = 5;
- else if (Size >= 4 && TT[0] == 'a' && TT[1] == 'r' && TT[2] == 'm')
- Pos = 3;
- else
- return false;
-
- if (TT[Pos] == '-')
- return true;
- else if (TT[Pos] == 'v') {
- if (Size >= Pos+4 &&
- TT[Pos+1] == '6' && TT[Pos+2] == 't' && TT[Pos+3] == '2')
- return true;
- else if (Size >= Pos+4 &&
- TT[Pos+1] == '5' && TT[Pos+2] == 't' && TT[Pos+3] == 'e')
- return true;
- } else
- return false;
- while (++Pos < Size && TT[Pos] != '-') {
- if (!isdigit(TT[Pos]))
- return false;
- }
- return true;
-}
-
-static void EmitDarwinBCHeader(BitstreamWriter &Stream,
- const std::string &TT) {
- unsigned CPUType = ~0U;
-
- // Match x86_64-*, i[3-9]86-*, powerpc-*, powerpc64-*, arm-*, thumb-*,
- // armv[0-9]-*, thumbv[0-9]-*, armv5te-*, or armv6t2-*. The CPUType is a magic
- // number from /usr/include/mach/machine.h. It is ok to reproduce the
- // specific constants here because they are implicitly part of the Darwin ABI.
- enum {
- DARWIN_CPU_ARCH_ABI64 = 0x01000000,
- DARWIN_CPU_TYPE_X86 = 7,
- DARWIN_CPU_TYPE_ARM = 12,
- DARWIN_CPU_TYPE_POWERPC = 18
- };
-
- if (TT.find("x86_64-") == 0)
- CPUType = DARWIN_CPU_TYPE_X86 | DARWIN_CPU_ARCH_ABI64;
- else if (TT.size() >= 5 && TT[0] == 'i' && TT[2] == '8' && TT[3] == '6' &&
- TT[4] == '-' && TT[1] - '3' < 6)
- CPUType = DARWIN_CPU_TYPE_X86;
- else if (TT.find("powerpc-") == 0)
- CPUType = DARWIN_CPU_TYPE_POWERPC;
- else if (TT.find("powerpc64-") == 0)
- CPUType = DARWIN_CPU_TYPE_POWERPC | DARWIN_CPU_ARCH_ABI64;
- else if (isARMTriplet(TT))
- CPUType = DARWIN_CPU_TYPE_ARM;
-
- // Traditional Bitcode starts after header.
- unsigned BCOffset = DarwinBCHeaderSize;
-
- Stream.Emit(0x0B17C0DE, 32);
- Stream.Emit(0 , 32); // Version.
- Stream.Emit(BCOffset , 32);
- Stream.Emit(0 , 32); // Filled in later.
- Stream.Emit(CPUType , 32);
-}
-
-/// EmitDarwinBCTrailer - Emit the darwin epilog after the bitcode file and
-/// finalize the header.
-static void EmitDarwinBCTrailer(BitstreamWriter &Stream, unsigned BufferSize) {
- // Update the size field in the header.
- Stream.BackpatchWord(DarwinBCSizeFieldOffset, BufferSize-DarwinBCHeaderSize);
-
- // If the file is not a multiple of 16 bytes, insert dummy padding.
- while (BufferSize & 15) {
- Stream.Emit(0, 8);
- ++BufferSize;
- }
-}
-
-
-/// WriteBitcodeToFile - Write the specified module to the specified output
-/// stream.
-void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out) {
- std::vector<unsigned char> Buffer;
- BitstreamWriter Stream(Buffer);
-
- Buffer.reserve(256*1024);
-
- WriteBitcodeToStream( M, Stream );
-
- // If writing to stdout, set binary mode.
- if (&llvm::outs() == &Out)
- sys::Program::ChangeStdoutToBinary();
-
- // Write the generated bitstream to "Out".
- Out.write((char*)&Buffer.front(), Buffer.size());
-
- // Make sure it hits disk now.
- Out.flush();
-}
-
-/// WriteBitcodeToStream - Write the specified module to the specified output
-/// stream.
-void llvm::WriteBitcodeToStream(const Module *M, BitstreamWriter &Stream) {
- // If this is darwin, emit a file header and trailer if needed.
- bool isDarwin = M->getTargetTriple().find("-darwin") != std::string::npos;
- if (isDarwin)
- EmitDarwinBCHeader(Stream, M->getTargetTriple());
-
- // Emit the file header.
- Stream.Emit((unsigned)'B', 8);
- Stream.Emit((unsigned)'C', 8);
- Stream.Emit(0x0, 4);
- Stream.Emit(0xC, 4);
- Stream.Emit(0xE, 4);
- Stream.Emit(0xD, 4);
-
- // Emit the module.
- WriteModule(M, Stream);
-
- if (isDarwin)
- EmitDarwinBCTrailer(Stream, Stream.getBuffer().size());
-}
diff --git a/libclamav/c++/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp b/libclamav/c++/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
deleted file mode 100644
index 3a0d3ce..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Writer/BitcodeWriterPass.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//===--- Bitcode/Writer/BitcodeWriterPass.cpp - Bitcode Writer ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// BitcodeWriterPass implementation.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Pass.h"
-using namespace llvm;
-
-namespace {
- class WriteBitcodePass : public ModulePass {
- raw_ostream &OS; // raw_ostream to print on
- public:
- static char ID; // Pass identification, replacement for typeid
- explicit WriteBitcodePass(raw_ostream &o)
- : ModulePass(&ID), OS(o) {}
-
- const char *getPassName() const { return "Bitcode Writer"; }
-
- bool runOnModule(Module &M) {
- WriteBitcodeToFile(&M, OS);
- return false;
- }
- };
-}
-
-char WriteBitcodePass::ID = 0;
-
-/// createBitcodeWriterPass - Create and return a pass that writes the module
-/// to the specified ostream.
-ModulePass *llvm::createBitcodeWriterPass(raw_ostream &Str) {
- return new WriteBitcodePass(Str);
-}
diff --git a/libclamav/c++/llvm/lib/Bitcode/Writer/CMakeLists.txt b/libclamav/c++/llvm/lib/Bitcode/Writer/CMakeLists.txt
deleted file mode 100644
index f097b09..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Writer/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-add_llvm_library(LLVMBitWriter
- BitWriter.cpp
- BitcodeWriter.cpp
- BitcodeWriterPass.cpp
- ValueEnumerator.cpp
- )
diff --git a/libclamav/c++/llvm/lib/Bitcode/Writer/Makefile b/libclamav/c++/llvm/lib/Bitcode/Writer/Makefile
deleted file mode 100644
index 7b0bd72..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Writer/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Bitcode/Reader/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMBitWriter
-BUILD_ARCHIVE = 1
-
-include $(LEVEL)/Makefile.common
-
diff --git a/libclamav/c++/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/libclamav/c++/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
deleted file mode 100644
index aa4c3af..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ /dev/null
@@ -1,460 +0,0 @@
-//===-- ValueEnumerator.cpp - Number values and types for bitcode writer --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the ValueEnumerator class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ValueEnumerator.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/TypeSymbolTable.h"
-#include "llvm/ValueSymbolTable.h"
-#include "llvm/Instructions.h"
-#include <algorithm>
-using namespace llvm;
-
-static bool isSingleValueType(const std::pair<const llvm::Type*,
- unsigned int> &P) {
- return P.first->isSingleValueType();
-}
-
-static bool isIntegerValue(const std::pair<const Value*, unsigned> &V) {
- return V.first->getType()->isIntegerTy();
-}
-
-static bool CompareByFrequency(const std::pair<const llvm::Type*,
- unsigned int> &P1,
- const std::pair<const llvm::Type*,
- unsigned int> &P2) {
- return P1.second > P2.second;
-}
-
-/// ValueEnumerator - Enumerate module-level information.
-ValueEnumerator::ValueEnumerator(const Module *M) {
- // Enumerate the global variables.
- for (Module::const_global_iterator I = M->global_begin(),
- E = M->global_end(); I != E; ++I)
- EnumerateValue(I);
-
- // Enumerate the functions.
- for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I) {
- EnumerateValue(I);
- EnumerateAttributes(cast<Function>(I)->getAttributes());
- }
-
- // Enumerate the aliases.
- for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
- I != E; ++I)
- EnumerateValue(I);
-
- // Remember what is the cutoff between globalvalue's and other constants.
- unsigned FirstConstant = Values.size();
-
- // Enumerate the global variable initializers.
- for (Module::const_global_iterator I = M->global_begin(),
- E = M->global_end(); I != E; ++I)
- if (I->hasInitializer())
- EnumerateValue(I->getInitializer());
-
- // Enumerate the aliasees.
- for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
- I != E; ++I)
- EnumerateValue(I->getAliasee());
-
- // Enumerate types used by the type symbol table.
- EnumerateTypeSymbolTable(M->getTypeSymbolTable());
-
- // Insert constants and metadata that are named at module level into the slot
- // pool so that the module symbol table can refer to them...
- EnumerateValueSymbolTable(M->getValueSymbolTable());
- EnumerateMDSymbolTable(M->getMDSymbolTable());
-
- SmallVector<std::pair<unsigned, MDNode*>, 8> MDs;
-
- // Enumerate types used by function bodies and argument lists.
- for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {
-
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
- I != E; ++I)
- EnumerateType(I->getType());
-
- for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;++I){
- for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
- OI != E; ++OI) {
- if (MDNode *MD = dyn_cast<MDNode>(*OI))
- if (MD->isFunctionLocal() && MD->getFunction())
- // These will get enumerated during function-incorporation.
- continue;
- EnumerateOperandType(*OI);
- }
- EnumerateType(I->getType());
- if (const CallInst *CI = dyn_cast<CallInst>(I))
- EnumerateAttributes(CI->getAttributes());
- else if (const InvokeInst *II = dyn_cast<InvokeInst>(I))
- EnumerateAttributes(II->getAttributes());
-
- // Enumerate metadata attached with this instruction.
- MDs.clear();
- I->getAllMetadata(MDs);
- for (unsigned i = 0, e = MDs.size(); i != e; ++i)
- EnumerateMetadata(MDs[i].second);
- }
- }
-
- // Optimize constant ordering.
- OptimizeConstants(FirstConstant, Values.size());
-
- // Sort the type table by frequency so that most commonly used types are early
- // in the table (have low bit-width).
- std::stable_sort(Types.begin(), Types.end(), CompareByFrequency);
-
- // Partition the Type ID's so that the single-value types occur before the
- // aggregate types. This allows the aggregate types to be dropped from the
- // type table after parsing the global variable initializers.
- std::partition(Types.begin(), Types.end(), isSingleValueType);
-
- // Now that we rearranged the type table, rebuild TypeMap.
- for (unsigned i = 0, e = Types.size(); i != e; ++i)
- TypeMap[Types[i].first] = i+1;
-}
-
-unsigned ValueEnumerator::getInstructionID(const Instruction *Inst) const {
- InstructionMapType::const_iterator I = InstructionMap.find(Inst);
- assert (I != InstructionMap.end() && "Instruction is not mapped!");
- return I->second;
-}
-
-void ValueEnumerator::setInstructionID(const Instruction *I) {
- InstructionMap[I] = InstructionCount++;
-}
-
-unsigned ValueEnumerator::getValueID(const Value *V) const {
- if (isa<MDNode>(V) || isa<MDString>(V)) {
- ValueMapType::const_iterator I = MDValueMap.find(V);
- assert(I != MDValueMap.end() && "Value not in slotcalculator!");
- return I->second-1;
- }
-
- ValueMapType::const_iterator I = ValueMap.find(V);
- assert(I != ValueMap.end() && "Value not in slotcalculator!");
- return I->second-1;
-}
-
-// Optimize constant ordering.
-namespace {
- struct CstSortPredicate {
- ValueEnumerator &VE;
- explicit CstSortPredicate(ValueEnumerator &ve) : VE(ve) {}
- bool operator()(const std::pair<const Value*, unsigned> &LHS,
- const std::pair<const Value*, unsigned> &RHS) {
- // Sort by plane.
- if (LHS.first->getType() != RHS.first->getType())
- return VE.getTypeID(LHS.first->getType()) <
- VE.getTypeID(RHS.first->getType());
- // Then by frequency.
- return LHS.second > RHS.second;
- }
- };
-}
-
-/// OptimizeConstants - Reorder constant pool for denser encoding.
-void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) {
- if (CstStart == CstEnd || CstStart+1 == CstEnd) return;
-
- CstSortPredicate P(*this);
- std::stable_sort(Values.begin()+CstStart, Values.begin()+CstEnd, P);
-
- // Ensure that integer constants are at the start of the constant pool. This
- // is important so that GEP structure indices come before gep constant exprs.
- std::partition(Values.begin()+CstStart, Values.begin()+CstEnd,
- isIntegerValue);
-
- // Rebuild the modified portion of ValueMap.
- for (; CstStart != CstEnd; ++CstStart)
- ValueMap[Values[CstStart].first] = CstStart+1;
-}
-
-
-/// EnumerateTypeSymbolTable - Insert all of the types in the specified symbol
-/// table.
-void ValueEnumerator::EnumerateTypeSymbolTable(const TypeSymbolTable &TST) {
- for (TypeSymbolTable::const_iterator TI = TST.begin(), TE = TST.end();
- TI != TE; ++TI)
- EnumerateType(TI->second);
-}
-
-/// EnumerateValueSymbolTable - Insert all of the values in the specified symbol
-/// table into the values table.
-void ValueEnumerator::EnumerateValueSymbolTable(const ValueSymbolTable &VST) {
- for (ValueSymbolTable::const_iterator VI = VST.begin(), VE = VST.end();
- VI != VE; ++VI)
- EnumerateValue(VI->getValue());
-}
-
-/// EnumerateMDSymbolTable - Insert all of the values in the specified metadata
-/// table.
-void ValueEnumerator::EnumerateMDSymbolTable(const MDSymbolTable &MST) {
- for (MDSymbolTable::const_iterator MI = MST.begin(), ME = MST.end();
- MI != ME; ++MI)
- EnumerateValue(MI->getValue());
-}
-
-void ValueEnumerator::EnumerateNamedMDNode(const NamedMDNode *MD) {
- // Check to see if it's already in!
- unsigned &MDValueID = MDValueMap[MD];
- if (MDValueID) {
- // Increment use count.
- MDValues[MDValueID-1].second++;
- return;
- }
-
- // Enumerate the type of this value.
- EnumerateType(MD->getType());
-
- for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i)
- if (MDNode *E = MD->getOperand(i))
- EnumerateValue(E);
- MDValues.push_back(std::make_pair(MD, 1U));
- MDValueMap[MD] = Values.size();
-}
-
-void ValueEnumerator::EnumerateMetadata(const Value *MD) {
- assert((isa<MDNode>(MD) || isa<MDString>(MD)) && "Invalid metadata kind");
- // Check to see if it's already in!
- unsigned &MDValueID = MDValueMap[MD];
- if (MDValueID) {
- // Increment use count.
- MDValues[MDValueID-1].second++;
- return;
- }
-
- // Enumerate the type of this value.
- EnumerateType(MD->getType());
-
- if (const MDNode *N = dyn_cast<MDNode>(MD)) {
- MDValues.push_back(std::make_pair(MD, 1U));
- MDValueMap[MD] = MDValues.size();
- MDValueID = MDValues.size();
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- if (Value *V = N->getOperand(i))
- EnumerateValue(V);
- else
- EnumerateType(Type::getVoidTy(MD->getContext()));
- }
- return;
- }
-
- // Add the value.
- assert(isa<MDString>(MD) && "Unknown metadata kind");
- MDValues.push_back(std::make_pair(MD, 1U));
- MDValueID = MDValues.size();
-}
-
-void ValueEnumerator::EnumerateValue(const Value *V) {
- assert(!V->getType()->isVoidTy() && "Can't insert void values!");
- if (isa<MDNode>(V) || isa<MDString>(V))
- return EnumerateMetadata(V);
- else if (const NamedMDNode *NMD = dyn_cast<NamedMDNode>(V))
- return EnumerateNamedMDNode(NMD);
-
- // Check to see if it's already in!
- unsigned &ValueID = ValueMap[V];
- if (ValueID) {
- // Increment use count.
- Values[ValueID-1].second++;
- return;
- }
-
- // Enumerate the type of this value.
- EnumerateType(V->getType());
-
- if (const Constant *C = dyn_cast<Constant>(V)) {
- if (isa<GlobalValue>(C)) {
- // Initializers for globals are handled explicitly elsewhere.
- } else if (isa<ConstantArray>(C) && cast<ConstantArray>(C)->isString()) {
- // Do not enumerate the initializers for an array of simple characters.
- // The initializers just polute the value table, and we emit the strings
- // specially.
- } else if (C->getNumOperands()) {
- // If a constant has operands, enumerate them. This makes sure that if a
- // constant has uses (for example an array of const ints), that they are
- // inserted also.
-
- // We prefer to enumerate them with values before we enumerate the user
- // itself. This makes it more likely that we can avoid forward references
- // in the reader. We know that there can be no cycles in the constants
- // graph that don't go through a global variable.
- for (User::const_op_iterator I = C->op_begin(), E = C->op_end();
- I != E; ++I)
- if (!isa<BasicBlock>(*I)) // Don't enumerate BB operand to BlockAddress.
- EnumerateValue(*I);
-
- // Finally, add the value. Doing this could make the ValueID reference be
- // dangling, don't reuse it.
- Values.push_back(std::make_pair(V, 1U));
- ValueMap[V] = Values.size();
- return;
- }
- }
-
- // Add the value.
- Values.push_back(std::make_pair(V, 1U));
- ValueID = Values.size();
-}
-
-
-void ValueEnumerator::EnumerateType(const Type *Ty) {
- unsigned &TypeID = TypeMap[Ty];
-
- if (TypeID) {
- // If we've already seen this type, just increase its occurrence count.
- Types[TypeID-1].second++;
- return;
- }
-
- // First time we saw this type, add it.
- Types.push_back(std::make_pair(Ty, 1U));
- TypeID = Types.size();
-
- // Enumerate subtypes.
- for (Type::subtype_iterator I = Ty->subtype_begin(), E = Ty->subtype_end();
- I != E; ++I)
- EnumerateType(*I);
-}
-
-// Enumerate the types for the specified value. If the value is a constant,
-// walk through it, enumerating the types of the constant.
-void ValueEnumerator::EnumerateOperandType(const Value *V) {
- EnumerateType(V->getType());
-
- if (const Constant *C = dyn_cast<Constant>(V)) {
- // If this constant is already enumerated, ignore it, we know its type must
- // be enumerated.
- if (ValueMap.count(V)) return;
-
- // This constant may have operands, make sure to enumerate the types in
- // them.
- for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
- const User *Op = C->getOperand(i);
-
- // Don't enumerate basic blocks here, this happens as operands to
- // blockaddress.
- if (isa<BasicBlock>(Op)) continue;
-
- EnumerateOperandType(cast<Constant>(Op));
- }
-
- if (const MDNode *N = dyn_cast<MDNode>(V)) {
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- if (Value *Elem = N->getOperand(i))
- EnumerateOperandType(Elem);
- }
- } else if (isa<MDString>(V) || isa<MDNode>(V))
- EnumerateValue(V);
-}
-
-void ValueEnumerator::EnumerateAttributes(const AttrListPtr &PAL) {
- if (PAL.isEmpty()) return; // null is always 0.
- // Do a lookup.
- unsigned &Entry = AttributeMap[PAL.getRawPointer()];
- if (Entry == 0) {
- // Never saw this before, add it.
- Attributes.push_back(PAL);
- Entry = Attributes.size();
- }
-}
-
-
-void ValueEnumerator::incorporateFunction(const Function &F) {
- InstructionCount = 0;
- NumModuleValues = Values.size();
-
- // Adding function arguments to the value table.
- for(Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
- I != E; ++I)
- EnumerateValue(I);
-
- FirstFuncConstantID = Values.size();
-
- // Add all function-level constants to the value table.
- for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I)
- for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
- OI != E; ++OI) {
- if ((isa<Constant>(*OI) && !isa<GlobalValue>(*OI)) ||
- isa<InlineAsm>(*OI))
- EnumerateValue(*OI);
- }
- BasicBlocks.push_back(BB);
- ValueMap[BB] = BasicBlocks.size();
- }
-
- // Optimize the constant layout.
- OptimizeConstants(FirstFuncConstantID, Values.size());
-
- // Add the function's parameter attributes so they are available for use in
- // the function's instruction.
- EnumerateAttributes(F.getAttributes());
-
- FirstInstID = Values.size();
-
- SmallVector<MDNode *, 8> FunctionLocalMDs;
- // Add all of the instructions.
- for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I) {
- for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
- OI != E; ++OI) {
- if (MDNode *MD = dyn_cast<MDNode>(*OI))
- if (MD->isFunctionLocal() && MD->getFunction())
- // Enumerate metadata after the instructions they might refer to.
- FunctionLocalMDs.push_back(MD);
- }
- if (!I->getType()->isVoidTy())
- EnumerateValue(I);
- }
- }
-
- // Add all of the function-local metadata.
- for (unsigned i = 0, e = FunctionLocalMDs.size(); i != e; ++i)
- EnumerateOperandType(FunctionLocalMDs[i]);
-}
-
-void ValueEnumerator::purgeFunction() {
- /// Remove purged values from the ValueMap.
- for (unsigned i = NumModuleValues, e = Values.size(); i != e; ++i)
- ValueMap.erase(Values[i].first);
- for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i)
- ValueMap.erase(BasicBlocks[i]);
-
- Values.resize(NumModuleValues);
- BasicBlocks.clear();
-}
-
-static void IncorporateFunctionInfoGlobalBBIDs(const Function *F,
- DenseMap<const BasicBlock*, unsigned> &IDMap) {
- unsigned Counter = 0;
- for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- IDMap[BB] = ++Counter;
-}
-
-/// getGlobalBasicBlockID - This returns the function-specific ID for the
-/// specified basic block. This is relatively expensive information, so it
-/// should only be used by rare constructs such as address-of-label.
-unsigned ValueEnumerator::getGlobalBasicBlockID(const BasicBlock *BB) const {
- unsigned &Idx = GlobalBasicBlockIDs[BB];
- if (Idx != 0)
- return Idx-1;
-
- IncorporateFunctionInfoGlobalBBIDs(BB->getParent(), GlobalBasicBlockIDs);
- return getGlobalBasicBlockID(BB);
-}
-
diff --git a/libclamav/c++/llvm/lib/Bitcode/Writer/ValueEnumerator.h b/libclamav/c++/llvm/lib/Bitcode/Writer/ValueEnumerator.h
deleted file mode 100644
index 4f8ebf5..0000000
--- a/libclamav/c++/llvm/lib/Bitcode/Writer/ValueEnumerator.h
+++ /dev/null
@@ -1,144 +0,0 @@
-//===-- Bitcode/Writer/ValueEnumerator.h - Number values --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class gives values and types Unique ID's.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef VALUE_ENUMERATOR_H
-#define VALUE_ENUMERATOR_H
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/Attributes.h"
-#include <vector>
-
-namespace llvm {
-
-class Type;
-class Value;
-class Instruction;
-class BasicBlock;
-class Function;
-class Module;
-class MetadataBase;
-class NamedMDNode;
-class AttrListPtr;
-class TypeSymbolTable;
-class ValueSymbolTable;
-class MDSymbolTable;
-
-class ValueEnumerator {
-public:
- // For each type, we remember its Type* and occurrence frequency.
- typedef std::vector<std::pair<const Type*, unsigned> > TypeList;
-
- // For each value, we remember its Value* and occurrence frequency.
- typedef std::vector<std::pair<const Value*, unsigned> > ValueList;
-private:
- typedef DenseMap<const Type*, unsigned> TypeMapType;
- TypeMapType TypeMap;
- TypeList Types;
-
- typedef DenseMap<const Value*, unsigned> ValueMapType;
- ValueMapType ValueMap;
- ValueList Values;
- ValueList MDValues;
- ValueMapType MDValueMap;
-
- typedef DenseMap<void*, unsigned> AttributeMapType;
- AttributeMapType AttributeMap;
- std::vector<AttrListPtr> Attributes;
-
- /// GlobalBasicBlockIDs - This map memoizes the basic block ID's referenced by
- /// the "getGlobalBasicBlockID" method.
- mutable DenseMap<const BasicBlock*, unsigned> GlobalBasicBlockIDs;
-
- typedef DenseMap<const Instruction*, unsigned> InstructionMapType;
- InstructionMapType InstructionMap;
- unsigned InstructionCount;
-
- /// BasicBlocks - This contains all the basic blocks for the currently
- /// incorporated function. Their reverse mapping is stored in ValueMap.
- std::vector<const BasicBlock*> BasicBlocks;
-
- /// When a function is incorporated, this is the size of the Values list
- /// before incorporation.
- unsigned NumModuleValues;
- unsigned FirstFuncConstantID;
- unsigned FirstInstID;
-
- ValueEnumerator(const ValueEnumerator &); // DO NOT IMPLEMENT
- void operator=(const ValueEnumerator &); // DO NOT IMPLEMENT
-public:
- ValueEnumerator(const Module *M);
-
- unsigned getValueID(const Value *V) const;
-
- unsigned getTypeID(const Type *T) const {
- TypeMapType::const_iterator I = TypeMap.find(T);
- assert(I != TypeMap.end() && "Type not in ValueEnumerator!");
- return I->second-1;
- }
-
- unsigned getInstructionID(const Instruction *I) const;
- void setInstructionID(const Instruction *I);
-
- unsigned getAttributeID(const AttrListPtr &PAL) const {
- if (PAL.isEmpty()) return 0; // Null maps to zero.
- AttributeMapType::const_iterator I = AttributeMap.find(PAL.getRawPointer());
- assert(I != AttributeMap.end() && "Attribute not in ValueEnumerator!");
- return I->second;
- }
-
- /// getFunctionConstantRange - Return the range of values that corresponds to
- /// function-local constants.
- void getFunctionConstantRange(unsigned &Start, unsigned &End) const {
- Start = FirstFuncConstantID;
- End = FirstInstID;
- }
-
- const ValueList &getValues() const { return Values; }
- const ValueList &getMDValues() const { return MDValues; }
- const TypeList &getTypes() const { return Types; }
- const std::vector<const BasicBlock*> &getBasicBlocks() const {
- return BasicBlocks;
- }
- const std::vector<AttrListPtr> &getAttributes() const {
- return Attributes;
- }
-
- /// getGlobalBasicBlockID - This returns the function-specific ID for the
- /// specified basic block. This is relatively expensive information, so it
- /// should only be used by rare constructs such as address-of-label.
- unsigned getGlobalBasicBlockID(const BasicBlock *BB) const;
-
- /// incorporateFunction/purgeFunction - If you'd like to deal with a function,
- /// use these two methods to get its data into the ValueEnumerator!
- ///
- void incorporateFunction(const Function &F);
- void purgeFunction();
-
-private:
- void OptimizeConstants(unsigned CstStart, unsigned CstEnd);
-
- void EnumerateMetadata(const Value *MD);
- void EnumerateNamedMDNode(const NamedMDNode *NMD);
- void EnumerateValue(const Value *V);
- void EnumerateType(const Type *T);
- void EnumerateOperandType(const Value *V);
- void EnumerateAttributes(const AttrListPtr &PAL);
-
- void EnumerateTypeSymbolTable(const TypeSymbolTable &ST);
- void EnumerateValueSymbolTable(const ValueSymbolTable &ST);
- void EnumerateMDSymbolTable(const MDSymbolTable &ST);
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/libclamav/c++/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 8840622..5a634d6 100644
--- a/libclamav/c++/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -21,6 +21,7 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -31,17 +32,20 @@ using namespace llvm;
// If DebugDiv > 0 then only break antidep with (ID % DebugDiv) == DebugMod
static cl::opt<int>
DebugDiv("agg-antidep-debugdiv",
- cl::desc("Debug control for aggressive anti-dep breaker"),
- cl::init(0), cl::Hidden);
+ cl::desc("Debug control for aggressive anti-dep breaker"),
+ cl::init(0), cl::Hidden);
static cl::opt<int>
DebugMod("agg-antidep-debugmod",
- cl::desc("Debug control for aggressive anti-dep breaker"),
- cl::init(0), cl::Hidden);
+ cl::desc("Debug control for aggressive anti-dep breaker"),
+ cl::init(0), cl::Hidden);
AggressiveAntiDepState::AggressiveAntiDepState(const unsigned TargetRegs,
MachineBasicBlock *BB) :
- NumTargetRegs(TargetRegs), GroupNodes(TargetRegs, 0) {
-
+ NumTargetRegs(TargetRegs), GroupNodes(TargetRegs, 0),
+ GroupNodeIndices(TargetRegs, 0),
+ KillIndices(TargetRegs, 0),
+ DefIndices(TargetRegs, 0)
+{
const unsigned BBSize = BB->size();
for (unsigned i = 0; i < NumTargetRegs; ++i) {
// Initialize all registers to be in their own group. Initially we
@@ -53,8 +57,7 @@ AggressiveAntiDepState::AggressiveAntiDepState(const unsigned TargetRegs,
}
}
-unsigned AggressiveAntiDepState::GetGroup(unsigned Reg)
-{
+unsigned AggressiveAntiDepState::GetGroup(unsigned Reg) {
unsigned Node = GroupNodeIndices[Reg];
while (GroupNodes[Node] != Node)
Node = GroupNodes[Node];
@@ -114,6 +117,7 @@ AggressiveAntiDepBreaker(MachineFunction& MFi,
TargetSubtarget::RegClassVector& CriticalPathRCs) :
AntiDepBreaker(), MF(MFi),
MRI(MF.getRegInfo()),
+ TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
AllocatableSet(TRI->getAllocatableSet(MF)),
State(NULL) {
@@ -143,8 +147,8 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
State = new AggressiveAntiDepState(TRI->getNumRegs(), BB);
bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
- unsigned *KillIndices = State->GetKillIndices();
- unsigned *DefIndices = State->GetDefIndices();
+ std::vector<unsigned> &KillIndices = State->GetKillIndices();
+ std::vector<unsigned> &DefIndices = State->GetDefIndices();
// Determine the live-out physregs for this block.
if (IsReturnBlock) {
@@ -163,25 +167,27 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
DefIndices[AliasReg] = ~0u;
}
}
- } else {
- // In a non-return block, examine the live-in regs of all successors.
- for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ }
+
+ // In a non-return block, examine the live-in regs of all successors.
+ // Note a return block can have successors if the return instruction is
+ // predicated.
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
- for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
+ for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- State->UnionGroups(Reg, 0);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- State->UnionGroups(AliasReg, 0);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = ~0u;
- }
+ unsigned Reg = *I;
+ State->UnionGroups(Reg, 0);
+ KillIndices[Reg] = BB->size();
+ DefIndices[Reg] = ~0u;
+ // Repeat, for all aliases.
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ unsigned AliasReg = *Alias;
+ State->UnionGroups(AliasReg, 0);
+ KillIndices[AliasReg] = BB->size();
+ DefIndices[AliasReg] = ~0u;
}
- }
+ }
// Mark live-out callee-saved registers. In a return block this is
// all callee-saved registers. In non-return this is any
@@ -210,7 +216,7 @@ void AggressiveAntiDepBreaker::FinishBlock() {
}
void AggressiveAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
- unsigned InsertPosIndex) {
+ unsigned InsertPosIndex) {
assert(Count < InsertPosIndex && "Instruction index out of expected range!");
std::set<unsigned> PassthruRegs;
@@ -222,7 +228,7 @@ void AggressiveAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
DEBUG(MI->dump());
DEBUG(dbgs() << "\tRegs:");
- unsigned *DefIndices = State->GetDefIndices();
+ std::vector<unsigned> &DefIndices = State->GetDefIndices();
for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) {
// If Reg is current live, then mark that it can't be renamed as
// we don't know the extent of its live-range anymore (now that it
@@ -244,7 +250,7 @@ void AggressiveAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
}
bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr *MI,
- MachineOperand& MO)
+ MachineOperand& MO)
{
if (!MO.isReg() || !MO.isImplicit())
return false;
@@ -281,9 +287,9 @@ void AggressiveAntiDepBreaker::GetPassthruRegs(MachineInstr *MI,
/// AntiDepEdges - Return in Edges the anti- and output- dependencies
/// in SU that we want to consider for breaking.
-static void AntiDepEdges(SUnit *SU, std::vector<SDep*>& Edges) {
+static void AntiDepEdges(const SUnit *SU, std::vector<const SDep*>& Edges) {
SmallSet<unsigned, 4> RegSet;
- for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
+ for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
if ((P->getKind() == SDep::Anti) || (P->getKind() == SDep::Output)) {
unsigned Reg = P->getReg();
@@ -297,14 +303,14 @@ static void AntiDepEdges(SUnit *SU, std::vector<SDep*>& Edges) {
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
-static SUnit *CriticalPathStep(SUnit *SU) {
- SDep *Next = 0;
+static const SUnit *CriticalPathStep(const SUnit *SU) {
+ const SDep *Next = 0;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
if (SU != 0) {
- for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
+ for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
- SUnit *PredSU = P->getSUnit();
+ const SUnit *PredSU = P->getSUnit();
unsigned PredLatency = P->getLatency();
unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
// In the case of a latency tie, prefer an anti-dependency edge over
@@ -324,8 +330,8 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
const char *tag,
const char *header,
const char *footer) {
- unsigned *KillIndices = State->GetKillIndices();
- unsigned *DefIndices = State->GetDefIndices();
+ std::vector<unsigned> &KillIndices = State->GetKillIndices();
+ std::vector<unsigned> &DefIndices = State->GetDefIndices();
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
@@ -359,9 +365,8 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
unsigned Count,
- std::set<unsigned>& PassthruRegs)
-{
- unsigned *DefIndices = State->GetDefIndices();
+ std::set<unsigned>& PassthruRegs) {
+ std::vector<unsigned> &DefIndices = State->GetDefIndices();
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
@@ -391,7 +396,8 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
// defined in a call must not be changed (ABI).
- if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq()) {
+ if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() ||
+ TII->isPredicated(MI)) {
DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
@@ -439,11 +445,31 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
}
void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI,
- unsigned Count) {
+ unsigned Count) {
DEBUG(dbgs() << "\tUse Groups:");
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
+ // If MI's uses have special allocation requirement, don't allow
+ // any use registers to be changed. Also assume all registers
+ // used in a call must not be changed (ABI).
+ // FIXME: The issue with predicated instruction is more complex. We are being
+ // conservatively here because the kill markers cannot be trusted after
+ // if-conversion:
+ // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // ...
+ // STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
+ // %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
+ // STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ //
+ // The first R6 kill is not really a kill since it's killed by a predicated
+ // instruction which may not be executed. The second R6 def may or may not
+ // re-define R6 so it's not safe to change it since the last R6 use cannot be
+ // changed.
+ bool Special = MI->getDesc().isCall() ||
+ MI->getDesc().hasExtraSrcRegAllocReq() ||
+ TII->isPredicated(MI);
+
// Scan the register uses for this instruction and update
// live-ranges, groups and RegRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -460,10 +486,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI,
// for the register.
HandleLastUse(Reg, Count, "(last-use)");
- // If MI's uses have special allocation requirement, don't allow
- // any use registers to be changed. Also assume all registers
- // used in a call must not be changed (ABI).
- if (MI->getDesc().isCall() || MI->getDesc().hasExtraSrcRegAllocReq()) {
+ if (Special) {
DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
@@ -539,8 +562,8 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
unsigned AntiDepGroupIndex,
RenameOrderType& RenameOrder,
std::map<unsigned, unsigned> &RenameMap) {
- unsigned *KillIndices = State->GetKillIndices();
- unsigned *DefIndices = State->GetDefIndices();
+ std::vector<unsigned> &KillIndices = State->GetKillIndices();
+ std::vector<unsigned> &DefIndices = State->GetDefIndices();
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
@@ -605,8 +628,12 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// order. If that register is available, and the corresponding
// registers are available for the other group subregisters, then we
// can use those registers to rename.
+
+ // FIXME: Using getMinimalPhysRegClass is very conservative. We should
+ // check every use of the register and find the largest register class
+ // that can be used in all of them.
const TargetRegisterClass *SuperRC =
- TRI->getPhysicalRegisterRegClass(SuperReg, MVT::Other);
+ TRI->getMinimalPhysRegClass(SuperReg, MVT::Other);
const TargetRegisterClass::iterator RB = SuperRC->allocation_order_begin(MF);
const TargetRegisterClass::iterator RE = SuperRC->allocation_order_end(MF);
@@ -627,6 +654,8 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
if (R == RB) R = RE;
--R;
const unsigned NewSuperReg = *R;
+ // Don't consider non-allocatable registers
+ if (!AllocatableSet.test(NewSuperReg)) continue;
// Don't replace a register with itself.
if (NewSuperReg == SuperReg) continue;
@@ -704,12 +733,12 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
/// ScheduleDAG and break them by renaming registers.
///
unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
- std::vector<SUnit>& SUnits,
- MachineBasicBlock::iterator& Begin,
- MachineBasicBlock::iterator& End,
+ const std::vector<SUnit>& SUnits,
+ MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
unsigned InsertPosIndex) {
- unsigned *KillIndices = State->GetKillIndices();
- unsigned *DefIndices = State->GetDefIndices();
+ std::vector<unsigned> &KillIndices = State->GetKillIndices();
+ std::vector<unsigned> &DefIndices = State->GetDefIndices();
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
@@ -721,20 +750,21 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
RenameOrderType RenameOrder;
// ...need a map from MI to SUnit.
- std::map<MachineInstr *, SUnit *> MISUnitMap;
+ std::map<MachineInstr *, const SUnit *> MISUnitMap;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
- SUnit *SU = &SUnits[i];
- MISUnitMap.insert(std::pair<MachineInstr *, SUnit *>(SU->getInstr(), SU));
+ const SUnit *SU = &SUnits[i];
+ MISUnitMap.insert(std::pair<MachineInstr *, const SUnit *>(SU->getInstr(),
+ SU));
}
// Track progress along the critical path through the SUnit graph as
// we walk the instructions. This is needed for regclasses that only
// break critical-path anti-dependencies.
- SUnit *CriticalPathSU = 0;
+ const SUnit *CriticalPathSU = 0;
MachineInstr *CriticalPathMI = 0;
if (CriticalPathSet.any()) {
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
- SUnit *SU = &SUnits[i];
+ const SUnit *SU = &SUnits[i];
if (!CriticalPathSU ||
((SU->getDepth() + SU->Latency) >
(CriticalPathSU->getDepth() + CriticalPathSU->Latency))) {
@@ -775,8 +805,8 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
// The dependence edges that represent anti- and output-
// dependencies that are candidates for breaking.
- std::vector<SDep*> Edges;
- SUnit *PathSU = MISUnitMap[MI];
+ std::vector<const SDep *> Edges;
+ const SUnit *PathSU = MISUnitMap[MI];
AntiDepEdges(PathSU, Edges);
// If MI is not on the critical path, then we don't rename
@@ -794,7 +824,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
if (!MI->isKill()) {
// Attempt to break each anti-dependency...
for (unsigned i = 0, e = Edges.size(); i != e; ++i) {
- SDep *Edge = Edges[i];
+ const SDep *Edge = Edges[i];
SUnit *NextSU = Edge->getSUnit();
if ((Edge->getKind() != SDep::Anti) &&
@@ -838,7 +868,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
// Also, if there are dependencies on other SUnits with the
// same register as the anti-dependency, don't attempt to
// break it.
- for (SUnit::pred_iterator P = PathSU->Preds.begin(),
+ for (SUnit::const_pred_iterator P = PathSU->Preds.begin(),
PE = PathSU->Preds.end(); P != PE; ++P) {
if (P->getSUnit() == NextSU ?
(P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
@@ -847,7 +877,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
break;
}
}
- for (SUnit::pred_iterator P = PathSU->Preds.begin(),
+ for (SUnit::const_pred_iterator P = PathSU->Preds.begin(),
PE = PathSU->Preds.end(); P != PE; ++P) {
if ((P->getSUnit() == NextSU) && (P->getKind() != SDep::Anti) &&
(P->getKind() != SDep::Output)) {
@@ -905,6 +935,19 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
AggressiveAntiDepState::RegisterReference>::iterator
Q = Range.first, QE = Range.second; Q != QE; ++Q) {
Q->second.Operand->setReg(NewReg);
+ // If the SU for the instruction being updated has debug
+ // information related to the anti-dependency register, make
+ // sure to update that as well.
+ const SUnit *SU = MISUnitMap[Q->second.Operand->getParent()];
+ if (!SU) continue;
+ for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i) {
+ MachineInstr *DI = SU->DbgInstrList[i];
+ assert (DI->getNumOperands()==3 && DI->getOperand(0).isReg() &&
+ DI->getOperand(0).getReg()
+ && "Non register dbg_value attached to SUnit!");
+ if (DI->getOperand(0).getReg() == AntiDepReg)
+ DI->getOperand(0).setReg(NewReg);
+ }
}
// We just went back in time and modified history; the
diff --git a/libclamav/c++/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h b/libclamav/c++/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h
index a62d68c..9d715cc 100644
--- a/libclamav/c++/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h
+++ b/libclamav/c++/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h
@@ -59,27 +59,27 @@ namespace llvm {
/// currently representing the group that the register belongs to.
/// Register 0 is always represented by the 0 group, a group
/// composed of registers that are not eligible for anti-aliasing.
- unsigned GroupNodeIndices[TargetRegisterInfo::FirstVirtualRegister];
+ std::vector<unsigned> GroupNodeIndices;
/// RegRefs - Map registers to all their references within a live range.
std::multimap<unsigned, RegisterReference> RegRefs;
/// KillIndices - The index of the most recent kill (proceding bottom-up),
/// or ~0u if the register is not live.
- unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
+ std::vector<unsigned> KillIndices;
/// DefIndices - The index of the most recent complete def (proceding bottom
/// up), or ~0u if the register is live.
- unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
+ std::vector<unsigned> DefIndices;
public:
AggressiveAntiDepState(const unsigned TargetRegs, MachineBasicBlock *BB);
/// GetKillIndices - Return the kill indices.
- unsigned *GetKillIndices() { return KillIndices; }
+ std::vector<unsigned> &GetKillIndices() { return KillIndices; }
/// GetDefIndices - Return the define indices.
- unsigned *GetDefIndices() { return DefIndices; }
+ std::vector<unsigned> &GetDefIndices() { return DefIndices; }
/// GetRegRefs - Return the RegRefs map.
std::multimap<unsigned, RegisterReference>& GetRegRefs() { return RegRefs; }
@@ -115,6 +115,7 @@ namespace llvm {
class AggressiveAntiDepBreaker : public AntiDepBreaker {
MachineFunction& MF;
MachineRegisterInfo &MRI;
+ const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
/// AllocatableSet - The set of allocatable registers.
@@ -142,9 +143,9 @@ namespace llvm {
/// path
/// of the ScheduleDAG and break them by renaming registers.
///
- unsigned BreakAntiDependencies(std::vector<SUnit>& SUnits,
- MachineBasicBlock::iterator& Begin,
- MachineBasicBlock::iterator& End,
+ unsigned BreakAntiDependencies(const std::vector<SUnit>& SUnits,
+ MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
unsigned InsertPosIndex);
/// Observe - Update liveness information to account for the current
diff --git a/libclamav/c++/llvm/lib/CodeGen/Analysis.cpp b/libclamav/c++/llvm/lib/CodeGen/Analysis.cpp
new file mode 100644
index 0000000..e3dd646
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/Analysis.cpp
@@ -0,0 +1,285 @@
+//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities --*- C++ ------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several CodeGen-specific LLVM IR analysis utilties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+using namespace llvm;
+
+/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
+/// of insertvalue or extractvalue indices that identify a member, return
+/// the linearized index of the start of the member.
+///
+unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
+ const unsigned *Indices,
+ const unsigned *IndicesEnd,
+ unsigned CurIndex) {
+ // Base case: We're done.
+ if (Indices && Indices == IndicesEnd)
+ return CurIndex;
+
+ // Given a struct type, recursively traverse the elements.
+ if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ for (StructType::element_iterator EB = STy->element_begin(),
+ EI = EB,
+ EE = STy->element_end();
+ EI != EE; ++EI) {
+ if (Indices && *Indices == unsigned(EI - EB))
+ return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
+ CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
+ }
+ return CurIndex;
+ }
+ // Given an array type, recursively traverse the elements.
+ else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ const Type *EltTy = ATy->getElementType();
+ for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
+ if (Indices && *Indices == i)
+ return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
+ CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
+ }
+ return CurIndex;
+ }
+ // We haven't found the type we're looking for, so keep searching.
+ return CurIndex + 1;
+}
+
+/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
+/// EVTs that represent all the individual underlying
+/// non-aggregate types that comprise it.
+///
+/// If Offsets is non-null, it points to a vector to be filled in
+/// with the in-memory offsets of each of the individual values.
+///
+void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
+ SmallVectorImpl<EVT> &ValueVTs,
+ SmallVectorImpl<uint64_t> *Offsets,
+ uint64_t StartingOffset) {
+ // Given a struct type, recursively traverse the elements.
+ if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
+ for (StructType::element_iterator EB = STy->element_begin(),
+ EI = EB,
+ EE = STy->element_end();
+ EI != EE; ++EI)
+ ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
+ StartingOffset + SL->getElementOffset(EI - EB));
+ return;
+ }
+ // Given an array type, recursively traverse the elements.
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ const Type *EltTy = ATy->getElementType();
+ uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
+ for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
+ ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
+ StartingOffset + i * EltSize);
+ return;
+ }
+ // Interpret void as zero return values.
+ if (Ty->isVoidTy())
+ return;
+ // Base case: we can get an EVT for this LLVM IR type.
+ ValueVTs.push_back(TLI.getValueType(Ty));
+ if (Offsets)
+ Offsets->push_back(StartingOffset);
+}
+
+/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
+GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
+ V = V->stripPointerCasts();
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
+
+ if (GV && GV->getName() == "llvm.eh.catch.all.value") {
+ assert(GV->hasInitializer() &&
+ "The EH catch-all value must have an initializer");
+ Value *Init = GV->getInitializer();
+ GV = dyn_cast<GlobalVariable>(Init);
+ if (!GV) V = cast<ConstantPointerNull>(Init);
+ }
+
+ assert((GV || isa<ConstantPointerNull>(V)) &&
+ "TypeInfo must be a global variable or NULL");
+ return GV;
+}
+
+/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
+/// processed uses a memory 'm' constraint.
+bool
+llvm::hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
+ const TargetLowering &TLI) {
+ for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
+ InlineAsm::ConstraintInfo &CI = CInfos[i];
+ for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
+ TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
+ if (CType == TargetLowering::C_Memory)
+ return true;
+ }
+
+ // Indirect operand accesses access memory.
+ if (CI.isIndirect)
+ return true;
+ }
+
+ return false;
+}
+
+/// getFCmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR floating-point condition code. This includes
+/// consideration of global floating-point math flags.
+///
+ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
+ ISD::CondCode FPC, FOC;
+ switch (Pred) {
+ case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
+ case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
+ case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
+ case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
+ case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
+ case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
+ case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
+ case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
+ case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
+ case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
+ case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
+ case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
+ case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
+ case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
+ case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
+ case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
+ default:
+ llvm_unreachable("Invalid FCmp predicate opcode!");
+ FOC = FPC = ISD::SETFALSE;
+ break;
+ }
+ if (NoNaNsFPMath)
+ return FOC;
+ else
+ return FPC;
+}
+
+/// getICmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR integer condition code.
+///
+ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
+ switch (Pred) {
+ case ICmpInst::ICMP_EQ: return ISD::SETEQ;
+ case ICmpInst::ICMP_NE: return ISD::SETNE;
+ case ICmpInst::ICMP_SLE: return ISD::SETLE;
+ case ICmpInst::ICMP_ULE: return ISD::SETULE;
+ case ICmpInst::ICMP_SGE: return ISD::SETGE;
+ case ICmpInst::ICMP_UGE: return ISD::SETUGE;
+ case ICmpInst::ICMP_SLT: return ISD::SETLT;
+ case ICmpInst::ICMP_ULT: return ISD::SETULT;
+ case ICmpInst::ICMP_SGT: return ISD::SETGT;
+ case ICmpInst::ICMP_UGT: return ISD::SETUGT;
+ default:
+ llvm_unreachable("Invalid ICmp predicate opcode!");
+ return ISD::SETNE;
+ }
+}
+
+/// Test if the given instruction is in a position to be optimized
+/// with a tail-call. This roughly means that it's in a block with
+/// a return and there's nothing that needs to be scheduled
+/// between it and the return.
+///
+/// This function only tests target-independent requirements.
+bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
+ const TargetLowering &TLI) {
+ const Instruction *I = CS.getInstruction();
+ const BasicBlock *ExitBB = I->getParent();
+ const TerminatorInst *Term = ExitBB->getTerminator();
+ const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
+ const Function *F = ExitBB->getParent();
+
+ // The block must end in a return statement or unreachable.
+ //
+ // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
+ // an unreachable, for now. The way tailcall optimization is currently
+ // implemented means it will add an epilogue followed by a jump. That is
+ // not profitable. Also, if the callee is a special function (e.g.
+ // longjmp on x86), it can end up causing miscompilation that has not
+ // been fully understood.
+ if (!Ret &&
+ (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false;
+
+ // If I will have a chain, make sure no other instruction that will have a
+ // chain interposes between I and the return.
+ if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
+ !I->isSafeToSpeculativelyExecute())
+ for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
+ --BBI) {
+ if (&*BBI == I)
+ break;
+ // Debug info intrinsics do not get in the way of tail call optimization.
+ if (isa<DbgInfoIntrinsic>(BBI))
+ continue;
+ if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
+ !BBI->isSafeToSpeculativelyExecute())
+ return false;
+ }
+
+ // If the block ends with a void return or unreachable, it doesn't matter
+ // what the call's return type is.
+ if (!Ret || Ret->getNumOperands() == 0) return true;
+
+ // If the return value is undef, it doesn't matter what the call's
+ // return type is.
+ if (isa<UndefValue>(Ret->getOperand(0))) return true;
+
+ // Conservatively require the attributes of the call to match those of
+ // the return. Ignore noalias because it doesn't affect the call sequence.
+ unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
+ if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
+ return false;
+
+ // It's not safe to eliminate the sign / zero extension of the return value.
+ if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
+ return false;
+
+ // Otherwise, make sure the unmodified return value of I is the return value.
+ for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
+ U = dyn_cast<Instruction>(U->getOperand(0))) {
+ if (!U)
+ return false;
+ if (!U->hasOneUse())
+ return false;
+ if (U == I)
+ break;
+ // Check for a truly no-op truncate.
+ if (isa<TruncInst>(U) &&
+ TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
+ continue;
+ // Check for a truly no-op bitcast.
+ if (isa<BitCastInst>(U) &&
+ (U->getOperand(0)->getType() == U->getType() ||
+ (U->getOperand(0)->getType()->isPointerTy() &&
+ U->getType()->isPointerTy())))
+ continue;
+ // Otherwise it's not a true no-op.
+ return false;
+ }
+
+ return true;
+}
+
diff --git a/libclamav/c++/llvm/lib/CodeGen/AntiDepBreaker.h b/libclamav/c++/llvm/lib/CodeGen/AntiDepBreaker.h
index 3ee30c6..086b757 100644
--- a/libclamav/c++/llvm/lib/CodeGen/AntiDepBreaker.h
+++ b/libclamav/c++/llvm/lib/CodeGen/AntiDepBreaker.h
@@ -39,9 +39,9 @@ public:
/// basic-block region and break them by renaming registers. Return
/// the number of anti-dependencies broken.
///
- virtual unsigned BreakAntiDependencies(std::vector<SUnit>& SUnits,
- MachineBasicBlock::iterator& Begin,
- MachineBasicBlock::iterator& End,
+ virtual unsigned BreakAntiDependencies(const std::vector<SUnit>& SUnits,
+ MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
unsigned InsertPosIndex) =0;
/// Observe - Update liveness information to account for the current
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
deleted file mode 100644
index bd2b1b6..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ /dev/null
@@ -1,1822 +0,0 @@
-//===-- AsmPrinter.cpp - Common AsmPrinter code ---------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the AsmPrinter class.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Constants.h"
-#include "llvm/Module.h"
-#include "llvm/CodeGen/DwarfWriter.h"
-#include "llvm/CodeGen/GCMetadataPrinter.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCSection.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Format.h"
-#include "llvm/Support/FormattedStream.h"
-#include <cerrno>
-using namespace llvm;
-
-STATISTIC(EmittedInsts, "Number of machine instrs printed");
-
-char AsmPrinter::ID = 0;
-AsmPrinter::AsmPrinter(formatted_raw_ostream &o, TargetMachine &tm,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *T)
- : MachineFunctionPass(&ID), O(o),
- TM(tm), MAI(T), TRI(tm.getRegisterInfo()),
- OutContext(Ctx), OutStreamer(Streamer),
- LastMI(0), LastFn(0), Counter(~0U), PrevDLT(NULL) {
- DW = 0; MMI = 0;
- VerboseAsm = Streamer.isVerboseAsm();
-}
-
-AsmPrinter::~AsmPrinter() {
- for (gcp_iterator I = GCMetadataPrinters.begin(),
- E = GCMetadataPrinters.end(); I != E; ++I)
- delete I->second;
-
- delete &OutStreamer;
- delete &OutContext;
-}
-
-/// getFunctionNumber - Return a unique ID for the current function.
-///
-unsigned AsmPrinter::getFunctionNumber() const {
- return MF->getFunctionNumber();
-}
-
-TargetLoweringObjectFile &AsmPrinter::getObjFileLowering() const {
- return TM.getTargetLowering()->getObjFileLowering();
-}
-
-/// getCurrentSection() - Return the current section we are emitting to.
-const MCSection *AsmPrinter::getCurrentSection() const {
- return OutStreamer.getCurrentSection();
-}
-
-
-void AsmPrinter::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- MachineFunctionPass::getAnalysisUsage(AU);
- AU.addRequired<GCModuleInfo>();
- if (VerboseAsm)
- AU.addRequired<MachineLoopInfo>();
-}
-
-bool AsmPrinter::doInitialization(Module &M) {
- // Initialize TargetLoweringObjectFile.
- const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
- .Initialize(OutContext, TM);
-
- Mang = new Mangler(*MAI);
-
- // Allow the target to emit any magic that it wants at the start of the file.
- EmitStartOfAsmFile(M);
-
- // Very minimal debug info. It is ignored if we emit actual debug info. If we
- // don't, this at least helps the user find where a global came from.
- if (MAI->hasSingleParameterDotFile()) {
- // .file "foo.c"
- OutStreamer.EmitFileDirective(M.getModuleIdentifier());
- }
-
- GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>();
- assert(MI && "AsmPrinter didn't require GCModuleInfo?");
- for (GCModuleInfo::iterator I = MI->begin(), E = MI->end(); I != E; ++I)
- if (GCMetadataPrinter *MP = GetOrCreateGCPrinter(*I))
- MP->beginAssembly(O, *this, *MAI);
-
- if (!M.getModuleInlineAsm().empty())
- O << MAI->getCommentString() << " Start of file scope inline assembly\n"
- << M.getModuleInlineAsm()
- << '\n' << MAI->getCommentString()
- << " End of file scope inline assembly\n";
-
- MMI = getAnalysisIfAvailable<MachineModuleInfo>();
- if (MMI)
- MMI->AnalyzeModule(M);
- DW = getAnalysisIfAvailable<DwarfWriter>();
- if (DW)
- DW->BeginModule(&M, MMI, O, this, MAI);
-
- return false;
-}
-
-void AsmPrinter::EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const {
- switch ((GlobalValue::LinkageTypes)Linkage) {
- case GlobalValue::CommonLinkage:
- case GlobalValue::LinkOnceAnyLinkage:
- case GlobalValue::LinkOnceODRLinkage:
- case GlobalValue::WeakAnyLinkage:
- case GlobalValue::WeakODRLinkage:
- case GlobalValue::LinkerPrivateLinkage:
- if (MAI->getWeakDefDirective() != 0) {
- // .globl _foo
- OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
- // .weak_definition _foo
- OutStreamer.EmitSymbolAttribute(GVSym, MCSA_WeakDefinition);
- } else if (const char *LinkOnce = MAI->getLinkOnceDirective()) {
- // .globl _foo
- OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
- // FIXME: linkonce should be a section attribute, handled by COFF Section
- // assignment.
- // http://sourceware.org/binutils/docs-2.20/as/Linkonce.html#Linkonce
- // .linkonce discard
- // FIXME: It would be nice to use .linkonce samesize for non-common
- // globals.
- O << LinkOnce;
- } else {
- // .weak _foo
- OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Weak);
- }
- break;
- case GlobalValue::DLLExportLinkage:
- case GlobalValue::AppendingLinkage:
- // FIXME: appending linkage variables should go into a section of
- // their name or something. For now, just emit them as external.
- case GlobalValue::ExternalLinkage:
- // If external or appending, declare as a global symbol.
- // .globl _foo
- OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
- break;
- case GlobalValue::PrivateLinkage:
- case GlobalValue::InternalLinkage:
- break;
- default:
- llvm_unreachable("Unknown linkage type!");
- }
-}
-
-
-/// EmitGlobalVariable - Emit the specified global variable to the .s file.
-void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
- if (!GV->hasInitializer()) // External globals require no code.
- return;
-
- // Check to see if this is a special global used by LLVM, if so, emit it.
- if (EmitSpecialLLVMGlobal(GV))
- return;
-
- MCSymbol *GVSym = GetGlobalValueSymbol(GV);
- EmitVisibility(GVSym, GV->getVisibility());
-
- if (MAI->hasDotTypeDotSizeDirective())
- OutStreamer.EmitSymbolAttribute(GVSym, MCSA_ELF_TypeObject);
-
- SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM);
-
- const TargetData *TD = TM.getTargetData();
- unsigned Size = TD->getTypeAllocSize(GV->getType()->getElementType());
- unsigned AlignLog = TD->getPreferredAlignmentLog(GV);
-
- // Handle common and BSS local symbols (.lcomm).
- if (GVKind.isCommon() || GVKind.isBSSLocal()) {
- if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
-
- if (VerboseAsm) {
- WriteAsOperand(OutStreamer.GetCommentOS(), GV,
- /*PrintType=*/false, GV->getParent());
- OutStreamer.GetCommentOS() << '\n';
- }
-
- // Handle common symbols.
- if (GVKind.isCommon()) {
- // .comm _foo, 42, 4
- OutStreamer.EmitCommonSymbol(GVSym, Size, 1 << AlignLog);
- return;
- }
-
- // Handle local BSS symbols.
- if (MAI->hasMachoZeroFillDirective()) {
- const MCSection *TheSection =
- getObjFileLowering().SectionForGlobal(GV, GVKind, Mang, TM);
- // .zerofill __DATA, __bss, _foo, 400, 5
- OutStreamer.EmitZerofill(TheSection, GVSym, Size, 1 << AlignLog);
- return;
- }
-
- if (MAI->hasLCOMMDirective()) {
- // .lcomm _foo, 42
- OutStreamer.EmitLocalCommonSymbol(GVSym, Size);
- return;
- }
-
- // .local _foo
- OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Local);
- // .comm _foo, 42, 4
- OutStreamer.EmitCommonSymbol(GVSym, Size, 1 << AlignLog);
- return;
- }
-
- const MCSection *TheSection =
- getObjFileLowering().SectionForGlobal(GV, GVKind, Mang, TM);
-
- // Handle the zerofill directive on darwin, which is a special form of BSS
- // emission.
- if (GVKind.isBSSExtern() && MAI->hasMachoZeroFillDirective()) {
- // .globl _foo
- OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
- // .zerofill __DATA, __common, _foo, 400, 5
- OutStreamer.EmitZerofill(TheSection, GVSym, Size, 1 << AlignLog);
- return;
- }
-
- OutStreamer.SwitchSection(TheSection);
-
- EmitLinkage(GV->getLinkage(), GVSym);
- EmitAlignment(AlignLog, GV);
-
- if (VerboseAsm) {
- WriteAsOperand(OutStreamer.GetCommentOS(), GV,
- /*PrintType=*/false, GV->getParent());
- OutStreamer.GetCommentOS() << '\n';
- }
- OutStreamer.EmitLabel(GVSym);
-
- EmitGlobalConstant(GV->getInitializer());
-
- if (MAI->hasDotTypeDotSizeDirective())
- // .size foo, 42
- OutStreamer.EmitELFSize(GVSym, MCConstantExpr::Create(Size, OutContext));
-
- OutStreamer.AddBlankLine();
-}
-
-/// EmitFunctionHeader - This method emits the header for the current
-/// function.
-void AsmPrinter::EmitFunctionHeader() {
- // Print out constants referenced by the function
- EmitConstantPool();
-
- // Print the 'header' of function.
- const Function *F = MF->getFunction();
-
- OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(F, Mang, TM));
- EmitVisibility(CurrentFnSym, F->getVisibility());
-
- EmitLinkage(F->getLinkage(), CurrentFnSym);
- EmitAlignment(MF->getAlignment(), F);
-
- if (MAI->hasDotTypeDotSizeDirective())
- OutStreamer.EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction);
-
- if (VerboseAsm) {
- WriteAsOperand(OutStreamer.GetCommentOS(), F,
- /*PrintType=*/false, F->getParent());
- OutStreamer.GetCommentOS() << '\n';
- }
-
- // Emit the CurrentFnSym. This is a virtual function to allow targets to
- // do their wild and crazy things as required.
- EmitFunctionEntryLabel();
-
- // Add some workaround for linkonce linkage on Cygwin\MinGW.
- if (MAI->getLinkOnceDirective() != 0 &&
- (F->hasLinkOnceLinkage() || F->hasWeakLinkage()))
- // FIXME: What is this?
- O << "Lllvm$workaround$fake$stub$" << *CurrentFnSym << ":\n";
-
- // Emit pre-function debug and/or EH information.
- if (MAI->doesSupportDebugInformation() || MAI->doesSupportExceptionHandling())
- DW->BeginFunction(MF);
-}
-
-/// EmitFunctionEntryLabel - Emit the label that is the entrypoint for the
-/// function. This can be overridden by targets as required to do custom stuff.
-void AsmPrinter::EmitFunctionEntryLabel() {
- OutStreamer.EmitLabel(CurrentFnSym);
-}
-
-
-/// EmitComments - Pretty-print comments for instructions.
-static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
- const MachineFunction *MF = MI.getParent()->getParent();
- const TargetMachine &TM = MF->getTarget();
-
- if (!MI.getDebugLoc().isUnknown()) {
- DILocation DLT = MF->getDILocation(MI.getDebugLoc());
-
- // Print source line info.
- DIScope Scope = DLT.getScope();
- // Omit the directory, because it's likely to be long and uninteresting.
- if (!Scope.isNull())
- CommentOS << Scope.getFilename();
- else
- CommentOS << "<unknown>";
- CommentOS << ':' << DLT.getLineNumber();
- if (DLT.getColumnNumber() != 0)
- CommentOS << ':' << DLT.getColumnNumber();
- CommentOS << '\n';
- }
-
- // Check for spills and reloads
- int FI;
-
- const MachineFrameInfo *FrameInfo = MF->getFrameInfo();
-
- // We assume a single instruction only has a spill or reload, not
- // both.
- const MachineMemOperand *MMO;
- if (TM.getInstrInfo()->isLoadFromStackSlotPostFE(&MI, FI)) {
- if (FrameInfo->isSpillSlotObjectIndex(FI)) {
- MMO = *MI.memoperands_begin();
- CommentOS << MMO->getSize() << "-byte Reload\n";
- }
- } else if (TM.getInstrInfo()->hasLoadFromStackSlot(&MI, MMO, FI)) {
- if (FrameInfo->isSpillSlotObjectIndex(FI))
- CommentOS << MMO->getSize() << "-byte Folded Reload\n";
- } else if (TM.getInstrInfo()->isStoreToStackSlotPostFE(&MI, FI)) {
- if (FrameInfo->isSpillSlotObjectIndex(FI)) {
- MMO = *MI.memoperands_begin();
- CommentOS << MMO->getSize() << "-byte Spill\n";
- }
- } else if (TM.getInstrInfo()->hasStoreToStackSlot(&MI, MMO, FI)) {
- if (FrameInfo->isSpillSlotObjectIndex(FI))
- CommentOS << MMO->getSize() << "-byte Folded Spill\n";
- }
-
- // Check for spill-induced copies
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (TM.getInstrInfo()->isMoveInstr(MI, SrcReg, DstReg,
- SrcSubIdx, DstSubIdx)) {
- if (MI.getAsmPrinterFlag(MachineInstr::ReloadReuse))
- CommentOS << " Reload Reuse\n";
- }
-}
-
-
-
-/// EmitFunctionBody - This method emits the body and trailer for a
-/// function.
-void AsmPrinter::EmitFunctionBody() {
- // Emit target-specific gunk before the function body.
- EmitFunctionBodyStart();
-
- // Print out code for the function.
- bool HasAnyRealCode = false;
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
- I != E; ++I) {
- // Print a label for the basic block.
- EmitBasicBlockStart(I);
- for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
- II != IE; ++II) {
- // Print the assembly for the instruction.
- if (!II->isLabel())
- HasAnyRealCode = true;
-
- ++EmittedInsts;
-
- // FIXME: Clean up processDebugLoc.
- processDebugLoc(II, true);
-
- if (VerboseAsm)
- EmitComments(*II, OutStreamer.GetCommentOS());
-
- switch (II->getOpcode()) {
- case TargetOpcode::DBG_LABEL:
- case TargetOpcode::EH_LABEL:
- case TargetOpcode::GC_LABEL:
- printLabelInst(II);
- break;
- case TargetOpcode::INLINEASM:
- printInlineAsm(II);
- break;
- case TargetOpcode::IMPLICIT_DEF:
- printImplicitDef(II);
- break;
- case TargetOpcode::KILL:
- printKill(II);
- break;
- default:
- EmitInstruction(II);
- break;
- }
-
- // FIXME: Clean up processDebugLoc.
- processDebugLoc(II, false);
- }
- }
-
- // If the function is empty and the object file uses .subsections_via_symbols,
- // then we need to emit *something* to the function body to prevent the
- // labels from collapsing together. Just emit a 0 byte.
- if (MAI->hasSubsectionsViaSymbols() && !HasAnyRealCode)
- OutStreamer.EmitIntValue(0, 1, 0/*addrspace*/);
-
- // Emit target-specific gunk after the function body.
- EmitFunctionBodyEnd();
-
- if (MAI->hasDotTypeDotSizeDirective())
- O << "\t.size\t" << *CurrentFnSym << ", .-" << *CurrentFnSym << '\n';
-
- // Emit post-function debug information.
- if (MAI->doesSupportDebugInformation() || MAI->doesSupportExceptionHandling())
- DW->EndFunction(MF);
-
- // Print out jump tables referenced by the function.
- EmitJumpTableInfo();
-
- OutStreamer.AddBlankLine();
-}
-
-
-bool AsmPrinter::doFinalization(Module &M) {
- // Emit global variables.
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- EmitGlobalVariable(I);
-
- // Emit final debug information.
- if (MAI->doesSupportDebugInformation() || MAI->doesSupportExceptionHandling())
- DW->EndModule();
-
- // If the target wants to know about weak references, print them all.
- if (MAI->getWeakRefDirective()) {
- // FIXME: This is not lazy, it would be nice to only print weak references
- // to stuff that is actually used. Note that doing so would require targets
- // to notice uses in operands (due to constant exprs etc). This should
- // happen with the MC stuff eventually.
-
- // Print out module-level global variables here.
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I) {
- if (!I->hasExternalWeakLinkage()) continue;
- OutStreamer.EmitSymbolAttribute(GetGlobalValueSymbol(I),
- MCSA_WeakReference);
- }
-
- for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
- if (!I->hasExternalWeakLinkage()) continue;
- OutStreamer.EmitSymbolAttribute(GetGlobalValueSymbol(I),
- MCSA_WeakReference);
- }
- }
-
- if (MAI->hasSetDirective()) {
- OutStreamer.AddBlankLine();
- for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
- I != E; ++I) {
- MCSymbol *Name = GetGlobalValueSymbol(I);
-
- const GlobalValue *GV = cast<GlobalValue>(I->getAliasedGlobal());
- MCSymbol *Target = GetGlobalValueSymbol(GV);
-
- if (I->hasExternalLinkage() || !MAI->getWeakRefDirective())
- OutStreamer.EmitSymbolAttribute(Name, MCSA_Global);
- else if (I->hasWeakLinkage())
- OutStreamer.EmitSymbolAttribute(Name, MCSA_WeakReference);
- else
- assert(I->hasLocalLinkage() && "Invalid alias linkage");
-
- EmitVisibility(Name, I->getVisibility());
-
- // Emit the directives as assignments aka .set:
- OutStreamer.EmitAssignment(Name,
- MCSymbolRefExpr::Create(Target, OutContext));
- }
- }
-
- GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>();
- assert(MI && "AsmPrinter didn't require GCModuleInfo?");
- for (GCModuleInfo::iterator I = MI->end(), E = MI->begin(); I != E; )
- if (GCMetadataPrinter *MP = GetOrCreateGCPrinter(*--I))
- MP->finishAssembly(O, *this, *MAI);
-
- // If we don't have any trampolines, then we don't require stack memory
- // to be executable. Some targets have a directive to declare this.
- Function *InitTrampolineIntrinsic = M.getFunction("llvm.init.trampoline");
- if (!InitTrampolineIntrinsic || InitTrampolineIntrinsic->use_empty())
- if (MCSection *S = MAI->getNonexecutableStackSection(OutContext))
- OutStreamer.SwitchSection(S);
-
- // Allow the target to emit any magic that it wants at the end of the file,
- // after everything else has gone out.
- EmitEndOfAsmFile(M);
-
- delete Mang; Mang = 0;
- DW = 0; MMI = 0;
-
- OutStreamer.Finish();
- return false;
-}
-
-void AsmPrinter::SetupMachineFunction(MachineFunction &MF) {
- this->MF = &MF;
- // Get the function symbol.
- CurrentFnSym = GetGlobalValueSymbol(MF.getFunction());
-
- if (VerboseAsm)
- LI = &getAnalysis<MachineLoopInfo>();
-}
-
-namespace {
- // SectionCPs - Keep track the alignment, constpool entries per Section.
- struct SectionCPs {
- const MCSection *S;
- unsigned Alignment;
- SmallVector<unsigned, 4> CPEs;
- SectionCPs(const MCSection *s, unsigned a) : S(s), Alignment(a) {}
- };
-}
-
-/// EmitConstantPool - Print to the current output stream assembly
-/// representations of the constants in the constant pool MCP. This is
-/// used to print out constants which have been "spilled to memory" by
-/// the code generator.
-///
-void AsmPrinter::EmitConstantPool() {
- const MachineConstantPool *MCP = MF->getConstantPool();
- const std::vector<MachineConstantPoolEntry> &CP = MCP->getConstants();
- if (CP.empty()) return;
-
- // Calculate sections for constant pool entries. We collect entries to go into
- // the same section together to reduce amount of section switch statements.
- SmallVector<SectionCPs, 4> CPSections;
- for (unsigned i = 0, e = CP.size(); i != e; ++i) {
- const MachineConstantPoolEntry &CPE = CP[i];
- unsigned Align = CPE.getAlignment();
-
- SectionKind Kind;
- switch (CPE.getRelocationInfo()) {
- default: llvm_unreachable("Unknown section kind");
- case 2: Kind = SectionKind::getReadOnlyWithRel(); break;
- case 1:
- Kind = SectionKind::getReadOnlyWithRelLocal();
- break;
- case 0:
- switch (TM.getTargetData()->getTypeAllocSize(CPE.getType())) {
- case 4: Kind = SectionKind::getMergeableConst4(); break;
- case 8: Kind = SectionKind::getMergeableConst8(); break;
- case 16: Kind = SectionKind::getMergeableConst16();break;
- default: Kind = SectionKind::getMergeableConst(); break;
- }
- }
-
- const MCSection *S = getObjFileLowering().getSectionForConstant(Kind);
-
- // The number of sections are small, just do a linear search from the
- // last section to the first.
- bool Found = false;
- unsigned SecIdx = CPSections.size();
- while (SecIdx != 0) {
- if (CPSections[--SecIdx].S == S) {
- Found = true;
- break;
- }
- }
- if (!Found) {
- SecIdx = CPSections.size();
- CPSections.push_back(SectionCPs(S, Align));
- }
-
- if (Align > CPSections[SecIdx].Alignment)
- CPSections[SecIdx].Alignment = Align;
- CPSections[SecIdx].CPEs.push_back(i);
- }
-
- // Now print stuff into the calculated sections.
- for (unsigned i = 0, e = CPSections.size(); i != e; ++i) {
- OutStreamer.SwitchSection(CPSections[i].S);
- EmitAlignment(Log2_32(CPSections[i].Alignment));
-
- unsigned Offset = 0;
- for (unsigned j = 0, ee = CPSections[i].CPEs.size(); j != ee; ++j) {
- unsigned CPI = CPSections[i].CPEs[j];
- MachineConstantPoolEntry CPE = CP[CPI];
-
- // Emit inter-object padding for alignment.
- unsigned AlignMask = CPE.getAlignment() - 1;
- unsigned NewOffset = (Offset + AlignMask) & ~AlignMask;
- OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/);
-
- const Type *Ty = CPE.getType();
- Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
-
- // Emit the label with a comment on it.
- if (VerboseAsm) {
- OutStreamer.GetCommentOS() << "constant pool ";
- WriteTypeSymbolic(OutStreamer.GetCommentOS(), CPE.getType(),
- MF->getFunction()->getParent());
- OutStreamer.GetCommentOS() << '\n';
- }
- OutStreamer.EmitLabel(GetCPISymbol(CPI));
-
- if (CPE.isMachineConstantPoolEntry())
- EmitMachineConstantPoolValue(CPE.Val.MachineCPVal);
- else
- EmitGlobalConstant(CPE.Val.ConstVal);
- }
- }
-}
-
-/// EmitJumpTableInfo - Print assembly representations of the jump tables used
-/// by the current function to the current output stream.
-///
-void AsmPrinter::EmitJumpTableInfo() {
- const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- if (MJTI == 0) return;
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- if (JT.empty()) return;
-
- // Pick the directive to use to print the jump table entries, and switch to
- // the appropriate section.
- const Function *F = MF->getFunction();
- bool JTInDiffSection = false;
- if (// In PIC mode, we need to emit the jump table to the same section as the
- // function body itself, otherwise the label differences won't make sense.
- // FIXME: Need a better predicate for this: what about custom entries?
- MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 ||
- // We should also do if the section name is NULL or function is declared
- // in discardable section
- // FIXME: this isn't the right predicate, should be based on the MCSection
- // for the function.
- F->isWeakForLinker()) {
- OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(F,Mang,TM));
- } else {
- // Otherwise, drop it in the readonly section.
- const MCSection *ReadOnlySection =
- getObjFileLowering().getSectionForConstant(SectionKind::getReadOnly());
- OutStreamer.SwitchSection(ReadOnlySection);
- JTInDiffSection = true;
- }
-
- EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getTargetData())));
-
- for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) {
- const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
-
- // If this jump table was deleted, ignore it.
- if (JTBBs.empty()) continue;
-
- // For the EK_LabelDifference32 entry, if the target supports .set, emit a
- // .set directive for each unique entry. This reduces the number of
- // relocations the assembler will generate for the jump table.
- if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 &&
- MAI->hasSetDirective()) {
- SmallPtrSet<const MachineBasicBlock*, 16> EmittedSets;
- const TargetLowering *TLI = TM.getTargetLowering();
- const MCExpr *Base = TLI->getPICJumpTableRelocBaseExpr(MF,JTI,OutContext);
- for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii) {
- const MachineBasicBlock *MBB = JTBBs[ii];
- if (!EmittedSets.insert(MBB)) continue;
-
- // .set LJTSet, LBB32-base
- const MCExpr *LHS =
- MCSymbolRefExpr::Create(MBB->getSymbol(OutContext), OutContext);
- OutStreamer.EmitAssignment(GetJTSetSymbol(JTI, MBB->getNumber()),
- MCBinaryExpr::CreateSub(LHS, Base, OutContext));
- }
- }
-
- // On some targets (e.g. Darwin) we want to emit two consequtive labels
- // before each jump table. The first label is never referenced, but tells
- // the assembler and linker the extents of the jump table object. The
- // second label is actually referenced by the code.
- if (JTInDiffSection && MAI->getLinkerPrivateGlobalPrefix()[0])
- // FIXME: This doesn't have to have any specific name, just any randomly
- // named and numbered 'l' label would work. Simplify GetJTISymbol.
- OutStreamer.EmitLabel(GetJTISymbol(JTI, true));
-
- OutStreamer.EmitLabel(GetJTISymbol(JTI));
-
- for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii)
- EmitJumpTableEntry(MJTI, JTBBs[ii], JTI);
- }
-}
-
-/// EmitJumpTableEntry - Emit a jump table entry for the specified MBB to the
-/// current stream.
-void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
- const MachineBasicBlock *MBB,
- unsigned UID) const {
- const MCExpr *Value = 0;
- switch (MJTI->getEntryKind()) {
- case MachineJumpTableInfo::EK_Custom32:
- Value = TM.getTargetLowering()->LowerCustomJumpTableEntry(MJTI, MBB, UID,
- OutContext);
- break;
- case MachineJumpTableInfo::EK_BlockAddress:
- // EK_BlockAddress - Each entry is a plain address of block, e.g.:
- // .word LBB123
- Value = MCSymbolRefExpr::Create(MBB->getSymbol(OutContext), OutContext);
- break;
- case MachineJumpTableInfo::EK_GPRel32BlockAddress: {
- // EK_GPRel32BlockAddress - Each entry is an address of block, encoded
- // with a relocation as gp-relative, e.g.:
- // .gprel32 LBB123
- MCSymbol *MBBSym = MBB->getSymbol(OutContext);
- OutStreamer.EmitGPRel32Value(MCSymbolRefExpr::Create(MBBSym, OutContext));
- return;
- }
-
- case MachineJumpTableInfo::EK_LabelDifference32: {
- // EK_LabelDifference32 - Each entry is the address of the block minus
- // the address of the jump table. This is used for PIC jump tables where
- // gprel32 is not supported. e.g.:
- // .word LBB123 - LJTI1_2
- // If the .set directive is supported, this is emitted as:
- // .set L4_5_set_123, LBB123 - LJTI1_2
- // .word L4_5_set_123
-
- // If we have emitted set directives for the jump table entries, print
- // them rather than the entries themselves. If we're emitting PIC, then
- // emit the table entries as differences between two text section labels.
- if (MAI->hasSetDirective()) {
- // If we used .set, reference the .set's symbol.
- Value = MCSymbolRefExpr::Create(GetJTSetSymbol(UID, MBB->getNumber()),
- OutContext);
- break;
- }
- // Otherwise, use the difference as the jump table entry.
- Value = MCSymbolRefExpr::Create(MBB->getSymbol(OutContext), OutContext);
- const MCExpr *JTI = MCSymbolRefExpr::Create(GetJTISymbol(UID), OutContext);
- Value = MCBinaryExpr::CreateSub(Value, JTI, OutContext);
- break;
- }
- }
-
- assert(Value && "Unknown entry kind!");
-
- unsigned EntrySize = MJTI->getEntrySize(*TM.getTargetData());
- OutStreamer.EmitValue(Value, EntrySize, /*addrspace*/0);
-}
-
-
-/// EmitSpecialLLVMGlobal - Check to see if the specified global is a
-/// special global used by LLVM. If so, emit it and return true, otherwise
-/// do nothing and return false.
-bool AsmPrinter::EmitSpecialLLVMGlobal(const GlobalVariable *GV) {
- if (GV->getName() == "llvm.used") {
- if (MAI->hasNoDeadStrip()) // No need to emit this at all.
- EmitLLVMUsedList(GV->getInitializer());
- return true;
- }
-
- // Ignore debug and non-emitted data. This handles llvm.compiler.used.
- if (GV->getSection() == "llvm.metadata" ||
- GV->hasAvailableExternallyLinkage())
- return true;
-
- if (!GV->hasAppendingLinkage()) return false;
-
- assert(GV->hasInitializer() && "Not a special LLVM global!");
-
- const TargetData *TD = TM.getTargetData();
- unsigned Align = Log2_32(TD->getPointerPrefAlignment());
- if (GV->getName() == "llvm.global_ctors") {
- OutStreamer.SwitchSection(getObjFileLowering().getStaticCtorSection());
- EmitAlignment(Align, 0);
- EmitXXStructorList(GV->getInitializer());
-
- if (TM.getRelocationModel() == Reloc::Static &&
- MAI->hasStaticCtorDtorReferenceInStaticMode()) {
- StringRef Sym(".constructors_used");
- OutStreamer.EmitSymbolAttribute(OutContext.GetOrCreateSymbol(Sym),
- MCSA_Reference);
- }
- return true;
- }
-
- if (GV->getName() == "llvm.global_dtors") {
- OutStreamer.SwitchSection(getObjFileLowering().getStaticDtorSection());
- EmitAlignment(Align, 0);
- EmitXXStructorList(GV->getInitializer());
-
- if (TM.getRelocationModel() == Reloc::Static &&
- MAI->hasStaticCtorDtorReferenceInStaticMode()) {
- StringRef Sym(".destructors_used");
- OutStreamer.EmitSymbolAttribute(OutContext.GetOrCreateSymbol(Sym),
- MCSA_Reference);
- }
- return true;
- }
-
- return false;
-}
-
-/// EmitLLVMUsedList - For targets that define a MAI::UsedDirective, mark each
-/// global in the specified llvm.used list for which emitUsedDirectiveFor
-/// is true, as being used with this directive.
-void AsmPrinter::EmitLLVMUsedList(Constant *List) {
- // Should be an array of 'i8*'.
- ConstantArray *InitList = dyn_cast<ConstantArray>(List);
- if (InitList == 0) return;
-
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
- const GlobalValue *GV =
- dyn_cast<GlobalValue>(InitList->getOperand(i)->stripPointerCasts());
- if (GV && getObjFileLowering().shouldEmitUsedDirectiveFor(GV, Mang))
- OutStreamer.EmitSymbolAttribute(GetGlobalValueSymbol(GV),
- MCSA_NoDeadStrip);
- }
-}
-
-/// EmitXXStructorList - Emit the ctor or dtor list. This just prints out the
-/// function pointers, ignoring the init priority.
-void AsmPrinter::EmitXXStructorList(Constant *List) {
- // Should be an array of '{ int, void ()* }' structs. The first value is the
- // init priority, which we ignore.
- if (!isa<ConstantArray>(List)) return;
- ConstantArray *InitList = cast<ConstantArray>(List);
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
- if (ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i))){
- if (CS->getNumOperands() != 2) return; // Not array of 2-element structs.
-
- if (CS->getOperand(1)->isNullValue())
- return; // Found a null terminator, exit printing.
- // Emit the function pointer.
- EmitGlobalConstant(CS->getOperand(1));
- }
-}
-
-//===--------------------------------------------------------------------===//
-// Emission and print routines
-//
-
-/// EmitInt8 - Emit a byte directive and value.
-///
-void AsmPrinter::EmitInt8(int Value) const {
- OutStreamer.EmitIntValue(Value, 1, 0/*addrspace*/);
-}
-
-/// EmitInt16 - Emit a short directive and value.
-///
-void AsmPrinter::EmitInt16(int Value) const {
- OutStreamer.EmitIntValue(Value, 2, 0/*addrspace*/);
-}
-
-/// EmitInt32 - Emit a long directive and value.
-///
-void AsmPrinter::EmitInt32(int Value) const {
- OutStreamer.EmitIntValue(Value, 4, 0/*addrspace*/);
-}
-
-/// EmitInt64 - Emit a long long directive and value.
-///
-void AsmPrinter::EmitInt64(uint64_t Value) const {
- OutStreamer.EmitIntValue(Value, 8, 0/*addrspace*/);
-}
-
-//===----------------------------------------------------------------------===//
-
-// EmitAlignment - Emit an alignment directive to the specified power of
-// two boundary. For example, if you pass in 3 here, you will get an 8
-// byte alignment. If a global value is specified, and if that global has
-// an explicit alignment requested, it will unconditionally override the
-// alignment request. However, if ForcedAlignBits is specified, this value
-// has final say: the ultimate alignment will be the max of ForcedAlignBits
-// and the alignment computed with NumBits and the global.
-//
-// The algorithm is:
-// Align = NumBits;
-// if (GV && GV->hasalignment) Align = GV->getalignment();
-// Align = std::max(Align, ForcedAlignBits);
-//
-void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV,
- unsigned ForcedAlignBits,
- bool UseFillExpr) const {
- if (GV && GV->getAlignment())
- NumBits = Log2_32(GV->getAlignment());
- NumBits = std::max(NumBits, ForcedAlignBits);
-
- if (NumBits == 0) return; // No need to emit alignment.
-
- if (getCurrentSection()->getKind().isText())
- OutStreamer.EmitCodeAlignment(1 << NumBits);
- else
- OutStreamer.EmitValueToAlignment(1 << NumBits, 0, 1, 0);
-}
-
-/// LowerConstant - Lower the specified LLVM Constant to an MCExpr.
-///
-static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
- MCContext &Ctx = AP.OutContext;
-
- if (CV->isNullValue() || isa<UndefValue>(CV))
- return MCConstantExpr::Create(0, Ctx);
-
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV))
- return MCConstantExpr::Create(CI->getZExtValue(), Ctx);
-
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
- return MCSymbolRefExpr::Create(AP.GetGlobalValueSymbol(GV), Ctx);
- if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV))
- return MCSymbolRefExpr::Create(AP.GetBlockAddressSymbol(BA), Ctx);
-
- const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV);
- if (CE == 0) {
- llvm_unreachable("Unknown constant value to lower!");
- return MCConstantExpr::Create(0, Ctx);
- }
-
- switch (CE->getOpcode()) {
- default:
- // If the code isn't optimized, there may be outstanding folding
- // opportunities. Attempt to fold the expression using TargetData as a
- // last resort before giving up.
- if (Constant *C =
- ConstantFoldConstantExpression(CE, AP.TM.getTargetData()))
- if (C != CE)
- return LowerConstant(C, AP);
-#ifndef NDEBUG
- CE->dump();
-#endif
- llvm_unreachable("FIXME: Don't support this constant expr");
- case Instruction::GetElementPtr: {
- const TargetData &TD = *AP.TM.getTargetData();
- // Generate a symbolic expression for the byte address
- const Constant *PtrVal = CE->getOperand(0);
- SmallVector<Value*, 8> IdxVec(CE->op_begin()+1, CE->op_end());
- int64_t Offset = TD.getIndexedOffset(PtrVal->getType(), &IdxVec[0],
- IdxVec.size());
-
- const MCExpr *Base = LowerConstant(CE->getOperand(0), AP);
- if (Offset == 0)
- return Base;
-
- // Truncate/sext the offset to the pointer size.
- if (TD.getPointerSizeInBits() != 64) {
- int SExtAmount = 64-TD.getPointerSizeInBits();
- Offset = (Offset << SExtAmount) >> SExtAmount;
- }
-
- return MCBinaryExpr::CreateAdd(Base, MCConstantExpr::Create(Offset, Ctx),
- Ctx);
- }
-
- case Instruction::Trunc:
- // We emit the value and depend on the assembler to truncate the generated
- // expression properly. This is important for differences between
- // blockaddress labels. Since the two labels are in the same function, it
- // is reasonable to treat their delta as a 32-bit value.
- // FALL THROUGH.
- case Instruction::BitCast:
- return LowerConstant(CE->getOperand(0), AP);
-
- case Instruction::IntToPtr: {
- const TargetData &TD = *AP.TM.getTargetData();
- // Handle casts to pointers by changing them into casts to the appropriate
- // integer type. This promotes constant folding and simplifies this code.
- Constant *Op = CE->getOperand(0);
- Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
- false/*ZExt*/);
- return LowerConstant(Op, AP);
- }
-
- case Instruction::PtrToInt: {
- const TargetData &TD = *AP.TM.getTargetData();
- // Support only foldable casts to/from pointers that can be eliminated by
- // changing the pointer to the appropriately sized integer type.
- Constant *Op = CE->getOperand(0);
- const Type *Ty = CE->getType();
-
- const MCExpr *OpExpr = LowerConstant(Op, AP);
-
- // We can emit the pointer value into this slot if the slot is an
- // integer slot equal to the size of the pointer.
- if (TD.getTypeAllocSize(Ty) == TD.getTypeAllocSize(Op->getType()))
- return OpExpr;
-
- // Otherwise the pointer is smaller than the resultant integer, mask off
- // the high bits so we are sure to get a proper truncation if the input is
- // a constant expr.
- unsigned InBits = TD.getTypeAllocSizeInBits(Op->getType());
- const MCExpr *MaskExpr = MCConstantExpr::Create(~0ULL >> (64-InBits), Ctx);
- return MCBinaryExpr::CreateAnd(OpExpr, MaskExpr, Ctx);
- }
-
- // The MC library also has a right-shift operator, but it isn't consistently
- // signed or unsigned between different targets.
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- case Instruction::SDiv:
- case Instruction::SRem:
- case Instruction::Shl:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor: {
- const MCExpr *LHS = LowerConstant(CE->getOperand(0), AP);
- const MCExpr *RHS = LowerConstant(CE->getOperand(1), AP);
- switch (CE->getOpcode()) {
- default: llvm_unreachable("Unknown binary operator constant cast expr");
- case Instruction::Add: return MCBinaryExpr::CreateAdd(LHS, RHS, Ctx);
- case Instruction::Sub: return MCBinaryExpr::CreateSub(LHS, RHS, Ctx);
- case Instruction::Mul: return MCBinaryExpr::CreateMul(LHS, RHS, Ctx);
- case Instruction::SDiv: return MCBinaryExpr::CreateDiv(LHS, RHS, Ctx);
- case Instruction::SRem: return MCBinaryExpr::CreateMod(LHS, RHS, Ctx);
- case Instruction::Shl: return MCBinaryExpr::CreateShl(LHS, RHS, Ctx);
- case Instruction::And: return MCBinaryExpr::CreateAnd(LHS, RHS, Ctx);
- case Instruction::Or: return MCBinaryExpr::CreateOr (LHS, RHS, Ctx);
- case Instruction::Xor: return MCBinaryExpr::CreateXor(LHS, RHS, Ctx);
- }
- }
- }
-}
-
-static void EmitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace,
- AsmPrinter &AP) {
- if (AddrSpace != 0 || !CA->isString()) {
- // Not a string. Print the values in successive locations
- for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i)
- AP.EmitGlobalConstant(CA->getOperand(i), AddrSpace);
- return;
- }
-
- // Otherwise, it can be emitted as .ascii.
- SmallVector<char, 128> TmpVec;
- TmpVec.reserve(CA->getNumOperands());
- for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i)
- TmpVec.push_back(cast<ConstantInt>(CA->getOperand(i))->getZExtValue());
-
- AP.OutStreamer.EmitBytes(StringRef(TmpVec.data(), TmpVec.size()), AddrSpace);
-}
-
-static void EmitGlobalConstantVector(const ConstantVector *CV,
- unsigned AddrSpace, AsmPrinter &AP) {
- for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i)
- AP.EmitGlobalConstant(CV->getOperand(i), AddrSpace);
-}
-
-static void EmitGlobalConstantStruct(const ConstantStruct *CS,
- unsigned AddrSpace, AsmPrinter &AP) {
- // Print the fields in successive locations. Pad to align if needed!
- const TargetData *TD = AP.TM.getTargetData();
- unsigned Size = TD->getTypeAllocSize(CS->getType());
- const StructLayout *Layout = TD->getStructLayout(CS->getType());
- uint64_t SizeSoFar = 0;
- for (unsigned i = 0, e = CS->getNumOperands(); i != e; ++i) {
- const Constant *Field = CS->getOperand(i);
-
- // Check if padding is needed and insert one or more 0s.
- uint64_t FieldSize = TD->getTypeAllocSize(Field->getType());
- uint64_t PadSize = ((i == e-1 ? Size : Layout->getElementOffset(i+1))
- - Layout->getElementOffset(i)) - FieldSize;
- SizeSoFar += FieldSize + PadSize;
-
- // Now print the actual field value.
- AP.EmitGlobalConstant(Field, AddrSpace);
-
- // Insert padding - this may include padding to increase the size of the
- // current field up to the ABI size (if the struct is not packed) as well
- // as padding to ensure that the next field starts at the right offset.
- AP.OutStreamer.EmitZeros(PadSize, AddrSpace);
- }
- assert(SizeSoFar == Layout->getSizeInBytes() &&
- "Layout of constant struct may be incorrect!");
-}
-
-static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
- AsmPrinter &AP) {
- // FP Constants are printed as integer constants to avoid losing
- // precision.
- if (CFP->getType()->isDoubleTy()) {
- if (AP.VerboseAsm) {
- double Val = CFP->getValueAPF().convertToDouble();
- AP.OutStreamer.GetCommentOS() << "double " << Val << '\n';
- }
-
- uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
- AP.OutStreamer.EmitIntValue(Val, 8, AddrSpace);
- return;
- }
-
- if (CFP->getType()->isFloatTy()) {
- if (AP.VerboseAsm) {
- float Val = CFP->getValueAPF().convertToFloat();
- AP.OutStreamer.GetCommentOS() << "float " << Val << '\n';
- }
- uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
- AP.OutStreamer.EmitIntValue(Val, 4, AddrSpace);
- return;
- }
-
- if (CFP->getType()->isX86_FP80Ty()) {
- // all long double variants are printed as hex
- // api needed to prevent premature destruction
- APInt API = CFP->getValueAPF().bitcastToAPInt();
- const uint64_t *p = API.getRawData();
- if (AP.VerboseAsm) {
- // Convert to double so we can print the approximate val as a comment.
- APFloat DoubleVal = CFP->getValueAPF();
- bool ignored;
- DoubleVal.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven,
- &ignored);
- AP.OutStreamer.GetCommentOS() << "x86_fp80 ~= "
- << DoubleVal.convertToDouble() << '\n';
- }
-
- if (AP.TM.getTargetData()->isBigEndian()) {
- AP.OutStreamer.EmitIntValue(p[1], 2, AddrSpace);
- AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
- } else {
- AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
- AP.OutStreamer.EmitIntValue(p[1], 2, AddrSpace);
- }
-
- // Emit the tail padding for the long double.
- const TargetData &TD = *AP.TM.getTargetData();
- AP.OutStreamer.EmitZeros(TD.getTypeAllocSize(CFP->getType()) -
- TD.getTypeStoreSize(CFP->getType()), AddrSpace);
- return;
- }
-
- assert(CFP->getType()->isPPC_FP128Ty() &&
- "Floating point constant type not handled");
- // All long double variants are printed as hex api needed to prevent
- // premature destruction.
- APInt API = CFP->getValueAPF().bitcastToAPInt();
- const uint64_t *p = API.getRawData();
- if (AP.TM.getTargetData()->isBigEndian()) {
- AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
- AP.OutStreamer.EmitIntValue(p[1], 8, AddrSpace);
- } else {
- AP.OutStreamer.EmitIntValue(p[1], 8, AddrSpace);
- AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
- }
-}
-
-static void EmitGlobalConstantLargeInt(const ConstantInt *CI,
- unsigned AddrSpace, AsmPrinter &AP) {
- const TargetData *TD = AP.TM.getTargetData();
- unsigned BitWidth = CI->getBitWidth();
- assert((BitWidth & 63) == 0 && "only support multiples of 64-bits");
-
- // We don't expect assemblers to support integer data directives
- // for more than 64 bits, so we emit the data in at most 64-bit
- // quantities at a time.
- const uint64_t *RawData = CI->getValue().getRawData();
- for (unsigned i = 0, e = BitWidth / 64; i != e; ++i) {
- uint64_t Val = TD->isBigEndian() ? RawData[e - i - 1] : RawData[i];
- AP.OutStreamer.EmitIntValue(Val, 8, AddrSpace);
- }
-}
-
-/// EmitGlobalConstant - Print a general LLVM constant to the .s file.
-void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) {
- if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV)) {
- uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType());
- if (Size == 0) Size = 1; // An empty "_foo:" followed by a section is undef.
- return OutStreamer.EmitZeros(Size, AddrSpace);
- }
-
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
- unsigned Size = TM.getTargetData()->getTypeAllocSize(CV->getType());
- switch (Size) {
- case 1:
- case 2:
- case 4:
- case 8:
- if (VerboseAsm)
- OutStreamer.GetCommentOS() << format("0x%llx\n", CI->getZExtValue());
- OutStreamer.EmitIntValue(CI->getZExtValue(), Size, AddrSpace);
- return;
- default:
- EmitGlobalConstantLargeInt(CI, AddrSpace, *this);
- return;
- }
- }
-
- if (const ConstantArray *CVA = dyn_cast<ConstantArray>(CV))
- return EmitGlobalConstantArray(CVA, AddrSpace, *this);
-
- if (const ConstantStruct *CVS = dyn_cast<ConstantStruct>(CV))
- return EmitGlobalConstantStruct(CVS, AddrSpace, *this);
-
- if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV))
- return EmitGlobalConstantFP(CFP, AddrSpace, *this);
-
- if (const ConstantVector *V = dyn_cast<ConstantVector>(CV))
- return EmitGlobalConstantVector(V, AddrSpace, *this);
-
- if (isa<ConstantPointerNull>(CV)) {
- unsigned Size = TM.getTargetData()->getTypeAllocSize(CV->getType());
- OutStreamer.EmitIntValue(0, Size, AddrSpace);
- return;
- }
-
- // Otherwise, it must be a ConstantExpr. Lower it to an MCExpr, then emit it
- // thread the streamer with EmitValue.
- OutStreamer.EmitValue(LowerConstant(CV, *this),
- TM.getTargetData()->getTypeAllocSize(CV->getType()),
- AddrSpace);
-}
-
-void AsmPrinter::EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
- // Target doesn't support this yet!
- llvm_unreachable("Target does not support EmitMachineConstantPoolValue");
-}
-
-/// PrintSpecial - Print information related to the specified machine instr
-/// that is independent of the operand, and may be independent of the instr
-/// itself. This can be useful for portably encoding the comment character
-/// or other bits of target-specific knowledge into the asmstrings. The
-/// syntax used is ${:comment}. Targets can override this to add support
-/// for their own strange codes.
-void AsmPrinter::PrintSpecial(const MachineInstr *MI, const char *Code) const {
- if (!strcmp(Code, "private")) {
- O << MAI->getPrivateGlobalPrefix();
- } else if (!strcmp(Code, "comment")) {
- if (VerboseAsm)
- O << MAI->getCommentString();
- } else if (!strcmp(Code, "uid")) {
- // Comparing the address of MI isn't sufficient, because machineinstrs may
- // be allocated to the same address across functions.
- const Function *ThisF = MI->getParent()->getParent()->getFunction();
-
- // If this is a new LastFn instruction, bump the counter.
- if (LastMI != MI || LastFn != ThisF) {
- ++Counter;
- LastMI = MI;
- LastFn = ThisF;
- }
- O << Counter;
- } else {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Unknown special formatter '" << Code
- << "' for machine instr: " << *MI;
- llvm_report_error(Msg.str());
- }
-}
-
-/// processDebugLoc - Processes the debug information of each machine
-/// instruction's DebugLoc.
-void AsmPrinter::processDebugLoc(const MachineInstr *MI,
- bool BeforePrintingInsn) {
- if (!MAI || !DW || !MAI->doesSupportDebugInformation()
- || !DW->ShouldEmitDwarfDebug())
- return;
- DebugLoc DL = MI->getDebugLoc();
- if (DL.isUnknown())
- return;
- DILocation CurDLT = MF->getDILocation(DL);
- if (CurDLT.getScope().isNull())
- return;
-
- if (!BeforePrintingInsn) {
- // After printing instruction
- DW->EndScope(MI);
- } else if (CurDLT.getNode() != PrevDLT) {
- unsigned L = DW->RecordSourceLine(CurDLT.getLineNumber(),
- CurDLT.getColumnNumber(),
- CurDLT.getScope().getNode());
- printLabel(L);
- O << '\n';
- DW->BeginScope(MI, L);
- PrevDLT = CurDLT.getNode();
- }
-}
-
-
-/// printInlineAsm - This method formats and prints the specified machine
-/// instruction that is an inline asm.
-void AsmPrinter::printInlineAsm(const MachineInstr *MI) const {
- unsigned NumOperands = MI->getNumOperands();
-
- // Count the number of register definitions.
- unsigned NumDefs = 0;
- for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
- ++NumDefs)
- assert(NumDefs != NumOperands-1 && "No asm string?");
-
- assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
-
- // Disassemble the AsmStr, printing out the literal pieces, the operands, etc.
- const char *AsmStr = MI->getOperand(NumDefs).getSymbolName();
-
- O << '\t';
-
- // If this asmstr is empty, just print the #APP/#NOAPP markers.
- // These are useful to see where empty asm's wound up.
- if (AsmStr[0] == 0) {
- O << MAI->getCommentString() << MAI->getInlineAsmStart() << "\n\t";
- O << MAI->getCommentString() << MAI->getInlineAsmEnd() << '\n';
- return;
- }
-
- O << MAI->getCommentString() << MAI->getInlineAsmStart() << "\n\t";
-
- // The variant of the current asmprinter.
- int AsmPrinterVariant = MAI->getAssemblerDialect();
-
- int CurVariant = -1; // The number of the {.|.|.} region we are in.
- const char *LastEmitted = AsmStr; // One past the last character emitted.
-
- while (*LastEmitted) {
- switch (*LastEmitted) {
- default: {
- // Not a special case, emit the string section literally.
- const char *LiteralEnd = LastEmitted+1;
- while (*LiteralEnd && *LiteralEnd != '{' && *LiteralEnd != '|' &&
- *LiteralEnd != '}' && *LiteralEnd != '$' && *LiteralEnd != '\n')
- ++LiteralEnd;
- if (CurVariant == -1 || CurVariant == AsmPrinterVariant)
- O.write(LastEmitted, LiteralEnd-LastEmitted);
- LastEmitted = LiteralEnd;
- break;
- }
- case '\n':
- ++LastEmitted; // Consume newline character.
- O << '\n'; // Indent code with newline.
- break;
- case '$': {
- ++LastEmitted; // Consume '$' character.
- bool Done = true;
-
- // Handle escapes.
- switch (*LastEmitted) {
- default: Done = false; break;
- case '$': // $$ -> $
- if (CurVariant == -1 || CurVariant == AsmPrinterVariant)
- O << '$';
- ++LastEmitted; // Consume second '$' character.
- break;
- case '(': // $( -> same as GCC's { character.
- ++LastEmitted; // Consume '(' character.
- if (CurVariant != -1) {
- llvm_report_error("Nested variants found in inline asm string: '"
- + std::string(AsmStr) + "'");
- }
- CurVariant = 0; // We're in the first variant now.
- break;
- case '|':
- ++LastEmitted; // consume '|' character.
- if (CurVariant == -1)
- O << '|'; // this is gcc's behavior for | outside a variant
- else
- ++CurVariant; // We're in the next variant.
- break;
- case ')': // $) -> same as GCC's } char.
- ++LastEmitted; // consume ')' character.
- if (CurVariant == -1)
- O << '}'; // this is gcc's behavior for } outside a variant
- else
- CurVariant = -1;
- break;
- }
- if (Done) break;
-
- bool HasCurlyBraces = false;
- if (*LastEmitted == '{') { // ${variable}
- ++LastEmitted; // Consume '{' character.
- HasCurlyBraces = true;
- }
-
- // If we have ${:foo}, then this is not a real operand reference, it is a
- // "magic" string reference, just like in .td files. Arrange to call
- // PrintSpecial.
- if (HasCurlyBraces && *LastEmitted == ':') {
- ++LastEmitted;
- const char *StrStart = LastEmitted;
- const char *StrEnd = strchr(StrStart, '}');
- if (StrEnd == 0) {
- llvm_report_error("Unterminated ${:foo} operand in inline asm string: '"
- + std::string(AsmStr) + "'");
- }
-
- std::string Val(StrStart, StrEnd);
- PrintSpecial(MI, Val.c_str());
- LastEmitted = StrEnd+1;
- break;
- }
-
- const char *IDStart = LastEmitted;
- char *IDEnd;
- errno = 0;
- long Val = strtol(IDStart, &IDEnd, 10); // We only accept numbers for IDs.
- if (!isdigit(*IDStart) || (Val == 0 && errno == EINVAL)) {
- llvm_report_error("Bad $ operand number in inline asm string: '"
- + std::string(AsmStr) + "'");
- }
- LastEmitted = IDEnd;
-
- char Modifier[2] = { 0, 0 };
-
- if (HasCurlyBraces) {
- // If we have curly braces, check for a modifier character. This
- // supports syntax like ${0:u}, which correspond to "%u0" in GCC asm.
- if (*LastEmitted == ':') {
- ++LastEmitted; // Consume ':' character.
- if (*LastEmitted == 0) {
- llvm_report_error("Bad ${:} expression in inline asm string: '"
- + std::string(AsmStr) + "'");
- }
-
- Modifier[0] = *LastEmitted;
- ++LastEmitted; // Consume modifier character.
- }
-
- if (*LastEmitted != '}') {
- llvm_report_error("Bad ${} expression in inline asm string: '"
- + std::string(AsmStr) + "'");
- }
- ++LastEmitted; // Consume '}' character.
- }
-
- if ((unsigned)Val >= NumOperands-1) {
- llvm_report_error("Invalid $ operand number in inline asm string: '"
- + std::string(AsmStr) + "'");
- }
-
- // Okay, we finally have a value number. Ask the target to print this
- // operand!
- if (CurVariant == -1 || CurVariant == AsmPrinterVariant) {
- unsigned OpNo = 1;
-
- bool Error = false;
-
- // Scan to find the machine operand number for the operand.
- for (; Val; --Val) {
- if (OpNo >= MI->getNumOperands()) break;
- unsigned OpFlags = MI->getOperand(OpNo).getImm();
- OpNo += InlineAsm::getNumOperandRegisters(OpFlags) + 1;
- }
-
- if (OpNo >= MI->getNumOperands()) {
- Error = true;
- } else {
- unsigned OpFlags = MI->getOperand(OpNo).getImm();
- ++OpNo; // Skip over the ID number.
-
- if (Modifier[0] == 'l') // labels are target independent
- O << *MI->getOperand(OpNo).getMBB()->getSymbol(OutContext);
- else {
- AsmPrinter *AP = const_cast<AsmPrinter*>(this);
- if ((OpFlags & 7) == 4) {
- Error = AP->PrintAsmMemoryOperand(MI, OpNo, AsmPrinterVariant,
- Modifier[0] ? Modifier : 0);
- } else {
- Error = AP->PrintAsmOperand(MI, OpNo, AsmPrinterVariant,
- Modifier[0] ? Modifier : 0);
- }
- }
- }
- if (Error) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Invalid operand found in inline asm: '" << AsmStr << "'\n";
- MI->print(Msg);
- llvm_report_error(Msg.str());
- }
- }
- break;
- }
- }
- }
- O << "\n\t" << MAI->getCommentString() << MAI->getInlineAsmEnd();
- OutStreamer.AddBlankLine();
-}
-
-/// printImplicitDef - This method prints the specified machine instruction
-/// that is an implicit def.
-void AsmPrinter::printImplicitDef(const MachineInstr *MI) const {
- if (!VerboseAsm) return;
- O.PadToColumn(MAI->getCommentColumn());
- O << MAI->getCommentString() << " implicit-def: "
- << TRI->getName(MI->getOperand(0).getReg());
- OutStreamer.AddBlankLine();
-}
-
-void AsmPrinter::printKill(const MachineInstr *MI) const {
- if (!VerboseAsm) return;
- O.PadToColumn(MAI->getCommentColumn());
- O << MAI->getCommentString() << " kill:";
- for (unsigned n = 0, e = MI->getNumOperands(); n != e; ++n) {
- const MachineOperand &op = MI->getOperand(n);
- assert(op.isReg() && "KILL instruction must have only register operands");
- O << ' ' << TRI->getName(op.getReg()) << (op.isDef() ? "<def>" : "<kill>");
- }
- OutStreamer.AddBlankLine();
-}
-
-/// printLabel - This method prints a local label used by debug and
-/// exception handling tables.
-void AsmPrinter::printLabelInst(const MachineInstr *MI) const {
- printLabel(MI->getOperand(0).getImm());
- OutStreamer.AddBlankLine();
-}
-
-void AsmPrinter::printLabel(unsigned Id) const {
- O << MAI->getPrivateGlobalPrefix() << "label" << Id << ':';
-}
-
-/// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
-/// instruction, using the specified assembler variant. Targets should
-/// override this to format as appropriate.
-bool AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode) {
- // Target doesn't support this yet!
- return true;
-}
-
-bool AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode) {
- // Target doesn't support this yet!
- return true;
-}
-
-MCSymbol *AsmPrinter::GetBlockAddressSymbol(const BlockAddress *BA) const {
- return GetBlockAddressSymbol(BA->getFunction(), BA->getBasicBlock());
-}
-
-MCSymbol *AsmPrinter::GetBlockAddressSymbol(const Function *F,
- const BasicBlock *BB) const {
- assert(BB->hasName() &&
- "Address of anonymous basic block not supported yet!");
-
- // This code must use the function name itself, and not the function number,
- // since it must be possible to generate the label name from within other
- // functions.
- SmallString<60> FnName;
- Mang->getNameWithPrefix(FnName, F, false);
-
- // FIXME: THIS IS BROKEN IF THE LLVM BASIC BLOCK DOESN'T HAVE A NAME!
- SmallString<60> NameResult;
- Mang->getNameWithPrefix(NameResult,
- StringRef("BA") + Twine((unsigned)FnName.size()) +
- "_" + FnName.str() + "_" + BB->getName(),
- Mangler::Private);
-
- return OutContext.GetOrCreateSymbol(NameResult.str());
-}
-
-/// GetCPISymbol - Return the symbol for the specified constant pool entry.
-MCSymbol *AsmPrinter::GetCPISymbol(unsigned CPID) const {
- SmallString<60> Name;
- raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix() << "CPI"
- << getFunctionNumber() << '_' << CPID;
- return OutContext.GetOrCreateSymbol(Name.str());
-}
-
-/// GetJTISymbol - Return the symbol for the specified jump table entry.
-MCSymbol *AsmPrinter::GetJTISymbol(unsigned JTID, bool isLinkerPrivate) const {
- return MF->getJTISymbol(JTID, OutContext, isLinkerPrivate);
-}
-
-/// GetJTSetSymbol - Return the symbol for the specified jump table .set
-/// FIXME: privatize to AsmPrinter.
-MCSymbol *AsmPrinter::GetJTSetSymbol(unsigned UID, unsigned MBBID) const {
- SmallString<60> Name;
- raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix()
- << getFunctionNumber() << '_' << UID << "_set_" << MBBID;
- return OutContext.GetOrCreateSymbol(Name.str());
-}
-
-/// GetGlobalValueSymbol - Return the MCSymbol for the specified global
-/// value.
-MCSymbol *AsmPrinter::GetGlobalValueSymbol(const GlobalValue *GV) const {
- SmallString<60> NameStr;
- Mang->getNameWithPrefix(NameStr, GV, false);
- return OutContext.GetOrCreateSymbol(NameStr.str());
-}
-
-/// GetSymbolWithGlobalValueBase - Return the MCSymbol for a symbol with
-/// global value name as its base, with the specified suffix, and where the
-/// symbol is forced to have private linkage if ForcePrivate is true.
-MCSymbol *AsmPrinter::GetSymbolWithGlobalValueBase(const GlobalValue *GV,
- StringRef Suffix,
- bool ForcePrivate) const {
- SmallString<60> NameStr;
- Mang->getNameWithPrefix(NameStr, GV, ForcePrivate);
- NameStr.append(Suffix.begin(), Suffix.end());
- return OutContext.GetOrCreateSymbol(NameStr.str());
-}
-
-/// GetExternalSymbolSymbol - Return the MCSymbol for the specified
-/// ExternalSymbol.
-MCSymbol *AsmPrinter::GetExternalSymbolSymbol(StringRef Sym) const {
- SmallString<60> NameStr;
- Mang->getNameWithPrefix(NameStr, Sym);
- return OutContext.GetOrCreateSymbol(NameStr.str());
-}
-
-
-
-/// PrintParentLoopComment - Print comments about parent loops of this one.
-static void PrintParentLoopComment(raw_ostream &OS, const MachineLoop *Loop,
- unsigned FunctionNumber) {
- if (Loop == 0) return;
- PrintParentLoopComment(OS, Loop->getParentLoop(), FunctionNumber);
- OS.indent(Loop->getLoopDepth()*2)
- << "Parent Loop BB" << FunctionNumber << "_"
- << Loop->getHeader()->getNumber()
- << " Depth=" << Loop->getLoopDepth() << '\n';
-}
-
-
-/// PrintChildLoopComment - Print comments about child loops within
-/// the loop for this basic block, with nesting.
-static void PrintChildLoopComment(raw_ostream &OS, const MachineLoop *Loop,
- unsigned FunctionNumber) {
- // Add child loop information
- for (MachineLoop::iterator CL = Loop->begin(), E = Loop->end();CL != E; ++CL){
- OS.indent((*CL)->getLoopDepth()*2)
- << "Child Loop BB" << FunctionNumber << "_"
- << (*CL)->getHeader()->getNumber() << " Depth " << (*CL)->getLoopDepth()
- << '\n';
- PrintChildLoopComment(OS, *CL, FunctionNumber);
- }
-}
-
-/// PrintBasicBlockLoopComments - Pretty-print comments for basic blocks.
-static void PrintBasicBlockLoopComments(const MachineBasicBlock &MBB,
- const MachineLoopInfo *LI,
- const AsmPrinter &AP) {
- // Add loop depth information
- const MachineLoop *Loop = LI->getLoopFor(&MBB);
- if (Loop == 0) return;
-
- MachineBasicBlock *Header = Loop->getHeader();
- assert(Header && "No header for loop");
-
- // If this block is not a loop header, just print out what is the loop header
- // and return.
- if (Header != &MBB) {
- AP.OutStreamer.AddComment(" in Loop: Header=BB" +
- Twine(AP.getFunctionNumber())+"_" +
- Twine(Loop->getHeader()->getNumber())+
- " Depth="+Twine(Loop->getLoopDepth()));
- return;
- }
-
- // Otherwise, it is a loop header. Print out information about child and
- // parent loops.
- raw_ostream &OS = AP.OutStreamer.GetCommentOS();
-
- PrintParentLoopComment(OS, Loop->getParentLoop(), AP.getFunctionNumber());
-
- OS << "=>";
- OS.indent(Loop->getLoopDepth()*2-2);
-
- OS << "This ";
- if (Loop->empty())
- OS << "Inner ";
- OS << "Loop Header: Depth=" + Twine(Loop->getLoopDepth()) << '\n';
-
- PrintChildLoopComment(OS, Loop, AP.getFunctionNumber());
-}
-
-
-/// EmitBasicBlockStart - This method prints the label for the specified
-/// MachineBasicBlock, an alignment (if present) and a comment describing
-/// it if appropriate.
-void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock *MBB) const {
- // Emit an alignment directive for this block, if needed.
- if (unsigned Align = MBB->getAlignment())
- EmitAlignment(Log2_32(Align));
-
- // If the block has its address taken, emit a special label to satisfy
- // references to the block. This is done so that we don't need to
- // remember the number of this label, and so that we can make
- // forward references to labels without knowing what their numbers
- // will be.
- if (MBB->hasAddressTaken()) {
- const BasicBlock *BB = MBB->getBasicBlock();
- if (VerboseAsm)
- OutStreamer.AddComment("Address Taken");
- OutStreamer.EmitLabel(GetBlockAddressSymbol(BB->getParent(), BB));
- }
-
- // Print the main label for the block.
- if (MBB->pred_empty() || isBlockOnlyReachableByFallthrough(MBB)) {
- if (VerboseAsm) {
- // NOTE: Want this comment at start of line.
- O << MAI->getCommentString() << " BB#" << MBB->getNumber() << ':';
- if (const BasicBlock *BB = MBB->getBasicBlock())
- if (BB->hasName())
- OutStreamer.AddComment("%" + BB->getName());
-
- PrintBasicBlockLoopComments(*MBB, LI, *this);
- OutStreamer.AddBlankLine();
- }
- } else {
- if (VerboseAsm) {
- if (const BasicBlock *BB = MBB->getBasicBlock())
- if (BB->hasName())
- OutStreamer.AddComment("%" + BB->getName());
- PrintBasicBlockLoopComments(*MBB, LI, *this);
- }
-
- OutStreamer.EmitLabel(MBB->getSymbol(OutContext));
- }
-}
-
-void AsmPrinter::EmitVisibility(MCSymbol *Sym, unsigned Visibility) const {
- MCSymbolAttr Attr = MCSA_Invalid;
-
- switch (Visibility) {
- default: break;
- case GlobalValue::HiddenVisibility:
- Attr = MAI->getHiddenVisibilityAttr();
- break;
- case GlobalValue::ProtectedVisibility:
- Attr = MAI->getProtectedVisibilityAttr();
- break;
- }
-
- if (Attr != MCSA_Invalid)
- OutStreamer.EmitSymbolAttribute(Sym, Attr);
-}
-
-void AsmPrinter::printOffset(int64_t Offset) const {
- if (Offset > 0)
- O << '+' << Offset;
- else if (Offset < 0)
- O << Offset;
-}
-
-/// isBlockOnlyReachableByFallthough - Return true if the basic block has
-/// exactly one predecessor and the control transfer mechanism between
-/// the predecessor and this block is a fall-through.
-bool AsmPrinter::isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB)
- const {
- // If this is a landing pad, it isn't a fall through. If it has no preds,
- // then nothing falls through to it.
- if (MBB->isLandingPad() || MBB->pred_empty())
- return false;
-
- // If there isn't exactly one predecessor, it can't be a fall through.
- MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PI2 = PI;
- ++PI2;
- if (PI2 != MBB->pred_end())
- return false;
-
- // The predecessor has to be immediately before this block.
- const MachineBasicBlock *Pred = *PI;
-
- if (!Pred->isLayoutSuccessor(MBB))
- return false;
-
- // If the block is completely empty, then it definitely does fall through.
- if (Pred->empty())
- return true;
-
- // Otherwise, check the last instruction.
- const MachineInstr &LastInst = Pred->back();
- return !LastInst.getDesc().isBarrier();
-}
-
-
-
-GCMetadataPrinter *AsmPrinter::GetOrCreateGCPrinter(GCStrategy *S) {
- if (!S->usesMetadata())
- return 0;
-
- gcp_iterator GCPI = GCMetadataPrinters.find(S);
- if (GCPI != GCMetadataPrinters.end())
- return GCPI->second;
-
- const char *Name = S->getName().c_str();
-
- for (GCMetadataPrinterRegistry::iterator
- I = GCMetadataPrinterRegistry::begin(),
- E = GCMetadataPrinterRegistry::end(); I != E; ++I)
- if (strcmp(Name, I->getName()) == 0) {
- GCMetadataPrinter *GMP = I->instantiate();
- GMP->S = S;
- GCMetadataPrinters.insert(std::make_pair(S, GMP));
- return GMP;
- }
-
- llvm_report_error("no GCMetadataPrinter registered for GC: " + Twine(Name));
- return 0;
-}
-
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/CMakeLists.txt b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/CMakeLists.txt
deleted file mode 100644
index 066aaab..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/CMakeLists.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-add_llvm_library(LLVMAsmPrinter
- AsmPrinter.cpp
- DIE.cpp
- DwarfDebug.cpp
- DwarfException.cpp
- DwarfLabel.cpp
- DwarfPrinter.cpp
- DwarfWriter.cpp
- OcamlGCPrinter.cpp
- )
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DIE.cpp b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
deleted file mode 100644
index 63360c0..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
+++ /dev/null
@@ -1,432 +0,0 @@
-//===--- lib/CodeGen/DIE.cpp - DWARF Info Entries -------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Data structures for DWARF info entries.
-//
-//===----------------------------------------------------------------------===//
-
-#include "DIE.h"
-#include "DwarfPrinter.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Format.h"
-#include "llvm/Support/FormattedStream.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// DIEAbbrevData Implementation
-//===----------------------------------------------------------------------===//
-
-/// Profile - Used to gather unique data for the abbreviation folding set.
-///
-void DIEAbbrevData::Profile(FoldingSetNodeID &ID) const {
- ID.AddInteger(Attribute);
- ID.AddInteger(Form);
-}
-
-//===----------------------------------------------------------------------===//
-// DIEAbbrev Implementation
-//===----------------------------------------------------------------------===//
-
-/// Profile - Used to gather unique data for the abbreviation folding set.
-///
-void DIEAbbrev::Profile(FoldingSetNodeID &ID) const {
- ID.AddInteger(Tag);
- ID.AddInteger(ChildrenFlag);
-
- // For each attribute description.
- for (unsigned i = 0, N = Data.size(); i < N; ++i)
- Data[i].Profile(ID);
-}
-
-/// Emit - Print the abbreviation using the specified asm printer.
-///
-void DIEAbbrev::Emit(const DwarfPrinter *DP) const {
- // Emit its Dwarf tag type.
- // FIXME: Doing work even in non-asm-verbose runs.
- DP->EmitULEB128(Tag, dwarf::TagString(Tag));
-
- // Emit whether it has children DIEs.
- // FIXME: Doing work even in non-asm-verbose runs.
- DP->EmitULEB128(ChildrenFlag, dwarf::ChildrenString(ChildrenFlag));
-
- // For each attribute description.
- for (unsigned i = 0, N = Data.size(); i < N; ++i) {
- const DIEAbbrevData &AttrData = Data[i];
-
- // Emit attribute type.
- // FIXME: Doing work even in non-asm-verbose runs.
- DP->EmitULEB128(AttrData.getAttribute(),
- dwarf::AttributeString(AttrData.getAttribute()));
-
- // Emit form type.
- // FIXME: Doing work even in non-asm-verbose runs.
- DP->EmitULEB128(AttrData.getForm(),
- dwarf::FormEncodingString(AttrData.getForm()));
- }
-
- // Mark end of abbreviation.
- DP->EmitULEB128(0, "EOM(1)");
- DP->EmitULEB128(0, "EOM(2)");
-}
-
-#ifndef NDEBUG
-void DIEAbbrev::print(raw_ostream &O) {
- O << "Abbreviation @"
- << format("0x%lx", (long)(intptr_t)this)
- << " "
- << dwarf::TagString(Tag)
- << " "
- << dwarf::ChildrenString(ChildrenFlag)
- << '\n';
-
- for (unsigned i = 0, N = Data.size(); i < N; ++i) {
- O << " "
- << dwarf::AttributeString(Data[i].getAttribute())
- << " "
- << dwarf::FormEncodingString(Data[i].getForm())
- << '\n';
- }
-}
-void DIEAbbrev::dump() { print(dbgs()); }
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIE Implementation
-//===----------------------------------------------------------------------===//
-
-DIE::~DIE() {
- for (unsigned i = 0, N = Children.size(); i < N; ++i)
- delete Children[i];
-}
-
-/// addSiblingOffset - Add a sibling offset field to the front of the DIE.
-///
-void DIE::addSiblingOffset() {
- DIEInteger *DI = new DIEInteger(0);
- Values.insert(Values.begin(), DI);
- Abbrev.AddFirstAttribute(dwarf::DW_AT_sibling, dwarf::DW_FORM_ref4);
-}
-
-#ifndef NDEBUG
-void DIE::print(raw_ostream &O, unsigned IncIndent) {
- IndentCount += IncIndent;
- const std::string Indent(IndentCount, ' ');
- bool isBlock = Abbrev.getTag() == 0;
-
- if (!isBlock) {
- O << Indent
- << "Die: "
- << format("0x%lx", (long)(intptr_t)this)
- << ", Offset: " << Offset
- << ", Size: " << Size
- << "\n";
-
- O << Indent
- << dwarf::TagString(Abbrev.getTag())
- << " "
- << dwarf::ChildrenString(Abbrev.getChildrenFlag());
- } else {
- O << "Size: " << Size;
- }
- O << "\n";
-
- const SmallVector<DIEAbbrevData, 8> &Data = Abbrev.getData();
-
- IndentCount += 2;
- for (unsigned i = 0, N = Data.size(); i < N; ++i) {
- O << Indent;
-
- if (!isBlock)
- O << dwarf::AttributeString(Data[i].getAttribute());
- else
- O << "Blk[" << i << "]";
-
- O << " "
- << dwarf::FormEncodingString(Data[i].getForm())
- << " ";
- Values[i]->print(O);
- O << "\n";
- }
- IndentCount -= 2;
-
- for (unsigned j = 0, M = Children.size(); j < M; ++j) {
- Children[j]->print(O, 4);
- }
-
- if (!isBlock) O << "\n";
- IndentCount -= IncIndent;
-}
-
-void DIE::dump() {
- print(dbgs());
-}
-#endif
-
-
-#ifndef NDEBUG
-void DIEValue::dump() {
- print(dbgs());
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIEInteger Implementation
-//===----------------------------------------------------------------------===//
-
-/// EmitValue - Emit integer of appropriate size.
-///
-void DIEInteger::EmitValue(DwarfPrinter *D, unsigned Form) const {
- const AsmPrinter *Asm = D->getAsm();
- unsigned Size = ~0U;
- switch (Form) {
- case dwarf::DW_FORM_flag: // Fall thru
- case dwarf::DW_FORM_ref1: // Fall thru
- case dwarf::DW_FORM_data1: Size = 1; break;
- case dwarf::DW_FORM_ref2: // Fall thru
- case dwarf::DW_FORM_data2: Size = 2; break;
- case dwarf::DW_FORM_ref4: // Fall thru
- case dwarf::DW_FORM_data4: Size = 4; break;
- case dwarf::DW_FORM_ref8: // Fall thru
- case dwarf::DW_FORM_data8: Size = 8; break;
- case dwarf::DW_FORM_udata: D->EmitULEB128(Integer); return;
- case dwarf::DW_FORM_sdata: D->EmitSLEB128(Integer, ""); return;
- default: llvm_unreachable("DIE Value form not supported yet");
- }
- Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/);
-}
-
-/// SizeOf - Determine size of integer value in bytes.
-///
-unsigned DIEInteger::SizeOf(const TargetData *TD, unsigned Form) const {
- switch (Form) {
- case dwarf::DW_FORM_flag: // Fall thru
- case dwarf::DW_FORM_ref1: // Fall thru
- case dwarf::DW_FORM_data1: return sizeof(int8_t);
- case dwarf::DW_FORM_ref2: // Fall thru
- case dwarf::DW_FORM_data2: return sizeof(int16_t);
- case dwarf::DW_FORM_ref4: // Fall thru
- case dwarf::DW_FORM_data4: return sizeof(int32_t);
- case dwarf::DW_FORM_ref8: // Fall thru
- case dwarf::DW_FORM_data8: return sizeof(int64_t);
- case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
- case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
- default: llvm_unreachable("DIE Value form not supported yet"); break;
- }
- return 0;
-}
-
-#ifndef NDEBUG
-void DIEInteger::print(raw_ostream &O) {
- O << "Int: " << (int64_t)Integer
- << format(" 0x%llx", (unsigned long long)Integer);
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIEString Implementation
-//===----------------------------------------------------------------------===//
-
-/// EmitValue - Emit string value.
-///
-void DIEString::EmitValue(DwarfPrinter *D, unsigned Form) const {
- D->getAsm()->OutStreamer.EmitBytes(Str, /*addrspace*/0);
- // Emit nul terminator.
- D->getAsm()->OutStreamer.EmitIntValue(0, 1, /*addrspace*/0);
-}
-
-#ifndef NDEBUG
-void DIEString::print(raw_ostream &O) {
- O << "Str: \"" << Str << "\"";
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIEDwarfLabel Implementation
-//===----------------------------------------------------------------------===//
-
-/// EmitValue - Emit label value.
-///
-void DIEDwarfLabel::EmitValue(DwarfPrinter *D, unsigned Form) const {
- bool IsSmall = Form == dwarf::DW_FORM_data4;
- D->EmitReference(Label, false, IsSmall);
-}
-
-/// SizeOf - Determine size of label value in bytes.
-///
-unsigned DIEDwarfLabel::SizeOf(const TargetData *TD, unsigned Form) const {
- if (Form == dwarf::DW_FORM_data4) return 4;
- return TD->getPointerSize();
-}
-
-#ifndef NDEBUG
-void DIEDwarfLabel::print(raw_ostream &O) {
- O << "Lbl: ";
- Label.print(O);
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIEObjectLabel Implementation
-//===----------------------------------------------------------------------===//
-
-/// EmitValue - Emit label value.
-///
-void DIEObjectLabel::EmitValue(DwarfPrinter *D, unsigned Form) const {
- bool IsSmall = Form == dwarf::DW_FORM_data4;
- D->EmitReference(Sym, false, IsSmall);
-}
-
-/// SizeOf - Determine size of label value in bytes.
-///
-unsigned DIEObjectLabel::SizeOf(const TargetData *TD, unsigned Form) const {
- if (Form == dwarf::DW_FORM_data4) return 4;
- return TD->getPointerSize();
-}
-
-#ifndef NDEBUG
-void DIEObjectLabel::print(raw_ostream &O) {
- O << "Obj: " << Sym->getName();
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIESectionOffset Implementation
-//===----------------------------------------------------------------------===//
-
-/// EmitValue - Emit delta value.
-///
-void DIESectionOffset::EmitValue(DwarfPrinter *D, unsigned Form) const {
- bool IsSmall = Form == dwarf::DW_FORM_data4;
- D->EmitSectionOffset(Label.getTag(), Section.getTag(),
- Label.getNumber(), Section.getNumber(),
- IsSmall, IsEH, UseSet);
- D->getAsm()->O << '\n'; // FIXME: Necesssary?
-}
-
-/// SizeOf - Determine size of delta value in bytes.
-///
-unsigned DIESectionOffset::SizeOf(const TargetData *TD, unsigned Form) const {
- if (Form == dwarf::DW_FORM_data4) return 4;
- return TD->getPointerSize();
-}
-
-#ifndef NDEBUG
-void DIESectionOffset::print(raw_ostream &O) {
- O << "Off: ";
- Label.print(O);
- O << "-";
- Section.print(O);
- O << "-" << IsEH << "-" << UseSet;
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIEDelta Implementation
-//===----------------------------------------------------------------------===//
-
-/// EmitValue - Emit delta value.
-///
-void DIEDelta::EmitValue(DwarfPrinter *D, unsigned Form) const {
- bool IsSmall = Form == dwarf::DW_FORM_data4;
- D->EmitDifference(LabelHi, LabelLo, IsSmall);
-}
-
-/// SizeOf - Determine size of delta value in bytes.
-///
-unsigned DIEDelta::SizeOf(const TargetData *TD, unsigned Form) const {
- if (Form == dwarf::DW_FORM_data4) return 4;
- return TD->getPointerSize();
-}
-
-#ifndef NDEBUG
-void DIEDelta::print(raw_ostream &O) {
- O << "Del: ";
- LabelHi.print(O);
- O << "-";
- LabelLo.print(O);
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIEEntry Implementation
-//===----------------------------------------------------------------------===//
-
-/// EmitValue - Emit debug information entry offset.
-///
-void DIEEntry::EmitValue(DwarfPrinter *D, unsigned Form) const {
- D->getAsm()->EmitInt32(Entry->getOffset());
-}
-
-#ifndef NDEBUG
-void DIEEntry::print(raw_ostream &O) {
- O << format("Die: 0x%lx", (long)(intptr_t)Entry);
-}
-#endif
-
-//===----------------------------------------------------------------------===//
-// DIEBlock Implementation
-//===----------------------------------------------------------------------===//
-
-/// ComputeSize - calculate the size of the block.
-///
-unsigned DIEBlock::ComputeSize(const TargetData *TD) {
- if (!Size) {
- const SmallVector<DIEAbbrevData, 8> &AbbrevData = Abbrev.getData();
- for (unsigned i = 0, N = Values.size(); i < N; ++i)
- Size += Values[i]->SizeOf(TD, AbbrevData[i].getForm());
- }
-
- return Size;
-}
-
-/// EmitValue - Emit block data.
-///
-void DIEBlock::EmitValue(DwarfPrinter *D, unsigned Form) const {
- const AsmPrinter *Asm = D->getAsm();
- switch (Form) {
- case dwarf::DW_FORM_block1: Asm->EmitInt8(Size); break;
- case dwarf::DW_FORM_block2: Asm->EmitInt16(Size); break;
- case dwarf::DW_FORM_block4: Asm->EmitInt32(Size); break;
- case dwarf::DW_FORM_block: D->EmitULEB128(Size); break;
- default: llvm_unreachable("Improper form for block"); break;
- }
-
- const SmallVector<DIEAbbrevData, 8> &AbbrevData = Abbrev.getData();
- for (unsigned i = 0, N = Values.size(); i < N; ++i) {
- Asm->O << '\n';
- Values[i]->EmitValue(D, AbbrevData[i].getForm());
- }
-}
-
-/// SizeOf - Determine size of block data in bytes.
-///
-unsigned DIEBlock::SizeOf(const TargetData *TD, unsigned Form) const {
- switch (Form) {
- case dwarf::DW_FORM_block1: return Size + sizeof(int8_t);
- case dwarf::DW_FORM_block2: return Size + sizeof(int16_t);
- case dwarf::DW_FORM_block4: return Size + sizeof(int32_t);
- case dwarf::DW_FORM_block: return Size + MCAsmInfo::getULEB128Size(Size);
- default: llvm_unreachable("Improper form for block"); break;
- }
- return 0;
-}
-
-#ifndef NDEBUG
-void DIEBlock::print(raw_ostream &O) {
- O << "Blk: ";
- DIE::print(O, 5);
-}
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DIE.h b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DIE.h
deleted file mode 100644
index af90289..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DIE.h
+++ /dev/null
@@ -1,494 +0,0 @@
-//===--- lib/CodeGen/DIE.h - DWARF Info Entries -----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Data structures for DWARF info entries.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CODEGEN_ASMPRINTER_DIE_H__
-#define CODEGEN_ASMPRINTER_DIE_H__
-
-#include "DwarfLabel.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Dwarf.h"
-#include <vector>
-
-namespace llvm {
- class AsmPrinter;
- class DwarfPrinter;
- class TargetData;
- class MCSymbol;
-
- //===--------------------------------------------------------------------===//
- /// DIEAbbrevData - Dwarf abbreviation data, describes the one attribute of a
- /// Dwarf abbreviation.
- class DIEAbbrevData {
- /// Attribute - Dwarf attribute code.
- ///
- unsigned Attribute;
-
- /// Form - Dwarf form code.
- ///
- unsigned Form;
- public:
- DIEAbbrevData(unsigned A, unsigned F) : Attribute(A), Form(F) {}
-
- // Accessors.
- unsigned getAttribute() const { return Attribute; }
- unsigned getForm() const { return Form; }
-
- /// Profile - Used to gather unique data for the abbreviation folding set.
- ///
- void Profile(FoldingSetNodeID &ID) const;
- };
-
- //===--------------------------------------------------------------------===//
- /// DIEAbbrev - Dwarf abbreviation, describes the organization of a debug
- /// information object.
- class DIEAbbrev : public FoldingSetNode {
- /// Tag - Dwarf tag code.
- ///
- unsigned Tag;
-
- /// Unique number for node.
- ///
- unsigned Number;
-
- /// ChildrenFlag - Dwarf children flag.
- ///
- unsigned ChildrenFlag;
-
- /// Data - Raw data bytes for abbreviation.
- ///
- SmallVector<DIEAbbrevData, 8> Data;
-
- public:
- DIEAbbrev(unsigned T, unsigned C) : Tag(T), ChildrenFlag(C), Data() {}
- virtual ~DIEAbbrev() {}
-
- // Accessors.
- unsigned getTag() const { return Tag; }
- unsigned getNumber() const { return Number; }
- unsigned getChildrenFlag() const { return ChildrenFlag; }
- const SmallVector<DIEAbbrevData, 8> &getData() const { return Data; }
- void setTag(unsigned T) { Tag = T; }
- void setChildrenFlag(unsigned CF) { ChildrenFlag = CF; }
- void setNumber(unsigned N) { Number = N; }
-
- /// AddAttribute - Adds another set of attribute information to the
- /// abbreviation.
- void AddAttribute(unsigned Attribute, unsigned Form) {
- Data.push_back(DIEAbbrevData(Attribute, Form));
- }
-
- /// AddFirstAttribute - Adds a set of attribute information to the front
- /// of the abbreviation.
- void AddFirstAttribute(unsigned Attribute, unsigned Form) {
- Data.insert(Data.begin(), DIEAbbrevData(Attribute, Form));
- }
-
- /// Profile - Used to gather unique data for the abbreviation folding set.
- ///
- void Profile(FoldingSetNodeID &ID) const;
-
- /// Emit - Print the abbreviation using the specified asm printer.
- ///
- void Emit(const DwarfPrinter *DP) const;
-
-#ifndef NDEBUG
- void print(raw_ostream &O);
- void dump();
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIE - A structured debug information entry. Has an abbreviation which
- /// describes it's organization.
- class CompileUnit;
- class DIEValue;
-
- class DIE {
- protected:
- /// Abbrev - Buffer for constructing abbreviation.
- ///
- DIEAbbrev Abbrev;
-
- /// Offset - Offset in debug info section.
- ///
- unsigned Offset;
-
- /// Size - Size of instance + children.
- ///
- unsigned Size;
-
- /// Children DIEs.
- ///
- std::vector<DIE *> Children;
-
- DIE *Parent;
-
- /// Attributes values.
- ///
- SmallVector<DIEValue*, 32> Values;
-
- // Private data for print()
- mutable unsigned IndentCount;
- public:
- explicit DIE(unsigned Tag)
- : Abbrev(Tag, dwarf::DW_CHILDREN_no), Offset(0),
- Size(0), Parent (0), IndentCount(0) {}
- virtual ~DIE();
-
- // Accessors.
- DIEAbbrev &getAbbrev() { return Abbrev; }
- unsigned getAbbrevNumber() const { return Abbrev.getNumber(); }
- unsigned getTag() const { return Abbrev.getTag(); }
- unsigned getOffset() const { return Offset; }
- unsigned getSize() const { return Size; }
- const std::vector<DIE *> &getChildren() const { return Children; }
- SmallVector<DIEValue*, 32> &getValues() { return Values; }
- DIE *getParent() const { return Parent; }
- void setTag(unsigned Tag) { Abbrev.setTag(Tag); }
- void setOffset(unsigned O) { Offset = O; }
- void setSize(unsigned S) { Size = S; }
- void setParent(DIE *P) { Parent = P; }
-
- /// addValue - Add a value and attributes to a DIE.
- ///
- void addValue(unsigned Attribute, unsigned Form, DIEValue *Value) {
- Abbrev.AddAttribute(Attribute, Form);
- Values.push_back(Value);
- }
-
- /// SiblingOffset - Return the offset of the debug information entry's
- /// sibling.
- unsigned getSiblingOffset() const { return Offset + Size; }
-
- /// addSiblingOffset - Add a sibling offset field to the front of the DIE.
- ///
- void addSiblingOffset();
-
- /// addChild - Add a child to the DIE.
- ///
- void addChild(DIE *Child) {
- if (Child->getParent()) {
- assert (Child->getParent() == this && "Unexpected DIE Parent!");
- return;
- }
- Abbrev.setChildrenFlag(dwarf::DW_CHILDREN_yes);
- Children.push_back(Child);
- Child->setParent(this);
- }
-
-#ifndef NDEBUG
- void print(raw_ostream &O, unsigned IncIndent = 0);
- void dump();
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIEValue - A debug information entry value.
- ///
- class DIEValue {
- public:
- enum {
- isInteger,
- isString,
- isLabel,
- isAsIsLabel,
- isSectionOffset,
- isDelta,
- isEntry,
- isBlock
- };
- protected:
- /// Type - Type of data stored in the value.
- ///
- unsigned Type;
- public:
- explicit DIEValue(unsigned T) : Type(T) {}
- virtual ~DIEValue() {}
-
- // Accessors
- unsigned getType() const { return Type; }
-
- /// EmitValue - Emit value via the Dwarf writer.
- ///
- virtual void EmitValue(DwarfPrinter *D, unsigned Form) const = 0;
-
- /// SizeOf - Return the size of a value in bytes.
- ///
- virtual unsigned SizeOf(const TargetData *TD, unsigned Form) const = 0;
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIEValue *) { return true; }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O) = 0;
- void dump();
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIEInteger - An integer value DIE.
- ///
- class DIEInteger : public DIEValue {
- uint64_t Integer;
- public:
- explicit DIEInteger(uint64_t I) : DIEValue(isInteger), Integer(I) {}
-
- /// BestForm - Choose the best form for integer.
- ///
- static unsigned BestForm(bool IsSigned, uint64_t Int) {
- if (IsSigned) {
- if ((char)Int == (signed)Int) return dwarf::DW_FORM_data1;
- if ((short)Int == (signed)Int) return dwarf::DW_FORM_data2;
- if ((int)Int == (signed)Int) return dwarf::DW_FORM_data4;
- } else {
- if ((unsigned char)Int == Int) return dwarf::DW_FORM_data1;
- if ((unsigned short)Int == Int) return dwarf::DW_FORM_data2;
- if ((unsigned int)Int == Int) return dwarf::DW_FORM_data4;
- }
- return dwarf::DW_FORM_data8;
- }
-
- /// EmitValue - Emit integer of appropriate size.
- ///
- virtual void EmitValue(DwarfPrinter *D, unsigned Form) const;
-
- /// SizeOf - Determine size of integer value in bytes.
- ///
- virtual unsigned SizeOf(const TargetData *TD, unsigned Form) const;
-
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIEInteger *) { return true; }
- static bool classof(const DIEValue *I) { return I->getType() == isInteger; }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O);
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIEString - A string value DIE. This DIE keeps string reference only.
- ///
- class DIEString : public DIEValue {
- const StringRef Str;
- public:
- explicit DIEString(const StringRef S) : DIEValue(isString), Str(S) {}
-
- /// EmitValue - Emit string value.
- ///
- virtual void EmitValue(DwarfPrinter *D, unsigned Form) const;
-
- /// SizeOf - Determine size of string value in bytes.
- ///
- virtual unsigned SizeOf(const TargetData *, unsigned /*Form*/) const {
- return Str.size() + sizeof(char); // sizeof('\0');
- }
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIEString *) { return true; }
- static bool classof(const DIEValue *S) { return S->getType() == isString; }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O);
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIEDwarfLabel - A Dwarf internal label expression DIE.
- //
- class DIEDwarfLabel : public DIEValue {
- const DWLabel Label;
- public:
- explicit DIEDwarfLabel(const DWLabel &L) : DIEValue(isLabel), Label(L) {}
-
- /// EmitValue - Emit label value.
- ///
- virtual void EmitValue(DwarfPrinter *D, unsigned Form) const;
-
- /// SizeOf - Determine size of label value in bytes.
- ///
- virtual unsigned SizeOf(const TargetData *TD, unsigned Form) const;
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIEDwarfLabel *) { return true; }
- static bool classof(const DIEValue *L) { return L->getType() == isLabel; }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O);
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIEObjectLabel - A label to an object in code or data.
- //
- class DIEObjectLabel : public DIEValue {
- const MCSymbol *Sym;
- public:
- explicit DIEObjectLabel(const MCSymbol *S)
- : DIEValue(isAsIsLabel), Sym(S) {}
-
- /// EmitValue - Emit label value.
- ///
- virtual void EmitValue(DwarfPrinter *D, unsigned Form) const;
-
- /// SizeOf - Determine size of label value in bytes.
- ///
- virtual unsigned SizeOf(const TargetData *TD, unsigned Form) const;
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIEObjectLabel *) { return true; }
- static bool classof(const DIEValue *L) {
- return L->getType() == isAsIsLabel;
- }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O);
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIESectionOffset - A section offset DIE.
- ///
- class DIESectionOffset : public DIEValue {
- const DWLabel Label;
- const DWLabel Section;
- bool IsEH : 1;
- bool UseSet : 1;
- public:
- DIESectionOffset(const DWLabel &Lab, const DWLabel &Sec,
- bool isEH = false, bool useSet = true)
- : DIEValue(isSectionOffset), Label(Lab), Section(Sec),
- IsEH(isEH), UseSet(useSet) {}
-
- /// EmitValue - Emit section offset.
- ///
- virtual void EmitValue(DwarfPrinter *D, unsigned Form) const;
-
- /// SizeOf - Determine size of section offset value in bytes.
- ///
- virtual unsigned SizeOf(const TargetData *TD, unsigned Form) const;
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIESectionOffset *) { return true; }
- static bool classof(const DIEValue *D) {
- return D->getType() == isSectionOffset;
- }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O);
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIEDelta - A simple label difference DIE.
- ///
- class DIEDelta : public DIEValue {
- const DWLabel LabelHi;
- const DWLabel LabelLo;
- public:
- DIEDelta(const DWLabel &Hi, const DWLabel &Lo)
- : DIEValue(isDelta), LabelHi(Hi), LabelLo(Lo) {}
-
- /// EmitValue - Emit delta value.
- ///
- virtual void EmitValue(DwarfPrinter *D, unsigned Form) const;
-
- /// SizeOf - Determine size of delta value in bytes.
- ///
- virtual unsigned SizeOf(const TargetData *TD, unsigned Form) const;
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIEDelta *) { return true; }
- static bool classof(const DIEValue *D) { return D->getType() == isDelta; }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O);
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIEntry - A pointer to another debug information entry. An instance of
- /// this class can also be used as a proxy for a debug information entry not
- /// yet defined (ie. types.)
- class DIEEntry : public DIEValue {
- DIE *Entry;
- public:
- explicit DIEEntry(DIE *E) : DIEValue(isEntry), Entry(E) {}
-
- DIE *getEntry() const { return Entry; }
- void setEntry(DIE *E) { Entry = E; }
-
- /// EmitValue - Emit debug information entry offset.
- ///
- virtual void EmitValue(DwarfPrinter *D, unsigned Form) const;
-
- /// SizeOf - Determine size of debug information entry in bytes.
- ///
- virtual unsigned SizeOf(const TargetData *TD, unsigned Form) const {
- return sizeof(int32_t);
- }
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIEEntry *) { return true; }
- static bool classof(const DIEValue *E) { return E->getType() == isEntry; }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O);
-#endif
- };
-
- //===--------------------------------------------------------------------===//
- /// DIEBlock - A block of values. Primarily used for location expressions.
- //
- class DIEBlock : public DIEValue, public DIE {
- unsigned Size; // Size in bytes excluding size header.
- public:
- DIEBlock()
- : DIEValue(isBlock), DIE(0), Size(0) {}
- virtual ~DIEBlock() {}
-
- /// ComputeSize - calculate the size of the block.
- ///
- unsigned ComputeSize(const TargetData *TD);
-
- /// BestForm - Choose the best form for data.
- ///
- unsigned BestForm() const {
- if ((unsigned char)Size == Size) return dwarf::DW_FORM_block1;
- if ((unsigned short)Size == Size) return dwarf::DW_FORM_block2;
- if ((unsigned int)Size == Size) return dwarf::DW_FORM_block4;
- return dwarf::DW_FORM_block;
- }
-
- /// EmitValue - Emit block data.
- ///
- virtual void EmitValue(DwarfPrinter *D, unsigned Form) const;
-
- /// SizeOf - Determine size of block data in bytes.
- ///
- virtual unsigned SizeOf(const TargetData *TD, unsigned Form) const;
-
- // Implement isa/cast/dyncast.
- static bool classof(const DIEBlock *) { return true; }
- static bool classof(const DIEValue *E) { return E->getType() == isBlock; }
-
-#ifndef NDEBUG
- virtual void print(raw_ostream &O);
-#endif
- };
-
-} // end llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
deleted file mode 100644
index 5ad1e5e..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ /dev/null
@@ -1,3024 +0,0 @@
-//===-- llvm/CodeGen/DwarfDebug.cpp - Dwarf Debug Framework ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains support for writing dwarf debug info into asm files.
-//
-//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "dwarfdebug"
-#include "DwarfDebug.h"
-#include "llvm/Module.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/MC/MCSection.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ValueHandle.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/Timer.h"
-#include "llvm/System/Path.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-
-/// Configuration values for initial hash set sizes (log2).
-///
-static const unsigned InitAbbreviationsSetSize = 9; // log2(512)
-
-namespace llvm {
-
-//===----------------------------------------------------------------------===//
-/// CompileUnit - This dwarf writer support class manages information associate
-/// with a source file.
-class CompileUnit {
- /// ID - File identifier for source.
- ///
- unsigned ID;
-
- /// Die - Compile unit debug information entry.
- ///
- DIE *CUDie;
-
- /// IndexTyDie - An anonymous type for index type.
- DIE *IndexTyDie;
-
- /// GVToDieMap - Tracks the mapping of unit level debug informaton
- /// variables to debug information entries.
- /// FIXME : Rename GVToDieMap -> NodeToDieMap
- DenseMap<MDNode *, DIE *> GVToDieMap;
-
- /// GVToDIEEntryMap - Tracks the mapping of unit level debug informaton
- /// descriptors to debug information entries using a DIEEntry proxy.
- /// FIXME : Rename
- DenseMap<MDNode *, DIEEntry *> GVToDIEEntryMap;
-
- /// Globals - A map of globally visible named entities for this unit.
- ///
- StringMap<DIE*> Globals;
-
- /// GlobalTypes - A map of globally visible types for this unit.
- ///
- StringMap<DIE*> GlobalTypes;
-
-public:
- CompileUnit(unsigned I, DIE *D)
- : ID(I), CUDie(D), IndexTyDie(0) {}
- ~CompileUnit() { delete CUDie; delete IndexTyDie; }
-
- // Accessors.
- unsigned getID() const { return ID; }
- DIE* getCUDie() const { return CUDie; }
- const StringMap<DIE*> &getGlobals() const { return Globals; }
- const StringMap<DIE*> &getGlobalTypes() const { return GlobalTypes; }
-
- /// hasContent - Return true if this compile unit has something to write out.
- ///
- bool hasContent() const { return !CUDie->getChildren().empty(); }
-
- /// addGlobal - Add a new global entity to the compile unit.
- ///
- void addGlobal(const std::string &Name, DIE *Die) { Globals[Name] = Die; }
-
- /// addGlobalType - Add a new global type to the compile unit.
- ///
- void addGlobalType(const std::string &Name, DIE *Die) {
- GlobalTypes[Name] = Die;
- }
-
- /// getDIE - Returns the debug information entry map slot for the
- /// specified debug variable.
- DIE *getDIE(MDNode *N) { return GVToDieMap.lookup(N); }
-
- /// insertDIE - Insert DIE into the map.
- void insertDIE(MDNode *N, DIE *D) {
- GVToDieMap.insert(std::make_pair(N, D));
- }
-
- /// getDIEEntry - Returns the debug information entry for the speciefied
- /// debug variable.
- DIEEntry *getDIEEntry(MDNode *N) {
- DenseMap<MDNode *, DIEEntry *>::iterator I = GVToDIEEntryMap.find(N);
- if (I == GVToDIEEntryMap.end())
- return NULL;
- return I->second;
- }
-
- /// insertDIEEntry - Insert debug information entry into the map.
- void insertDIEEntry(MDNode *N, DIEEntry *E) {
- GVToDIEEntryMap.insert(std::make_pair(N, E));
- }
-
- /// addDie - Adds or interns the DIE to the compile unit.
- ///
- void addDie(DIE *Buffer) {
- this->CUDie->addChild(Buffer);
- }
-
- // getIndexTyDie - Get an anonymous type for index type.
- DIE *getIndexTyDie() {
- return IndexTyDie;
- }
-
- // setIndexTyDie - Set D as anonymous type for index which can be reused
- // later.
- void setIndexTyDie(DIE *D) {
- IndexTyDie = D;
- }
-
-};
-
-//===----------------------------------------------------------------------===//
-/// DbgVariable - This class is used to track local variable information.
-///
-class DbgVariable {
- DIVariable Var; // Variable Descriptor.
- unsigned FrameIndex; // Variable frame index.
- DbgVariable *AbstractVar; // Abstract variable for this variable.
- DIE *TheDIE;
-public:
- DbgVariable(DIVariable V, unsigned I)
- : Var(V), FrameIndex(I), AbstractVar(0), TheDIE(0) {}
-
- // Accessors.
- DIVariable getVariable() const { return Var; }
- unsigned getFrameIndex() const { return FrameIndex; }
- void setAbstractVariable(DbgVariable *V) { AbstractVar = V; }
- DbgVariable *getAbstractVariable() const { return AbstractVar; }
- void setDIE(DIE *D) { TheDIE = D; }
- DIE *getDIE() const { return TheDIE; }
-};
-
-//===----------------------------------------------------------------------===//
-/// DbgScope - This class is used to track scope information.
-///
-class DbgScope {
- DbgScope *Parent; // Parent to this scope.
- DIDescriptor Desc; // Debug info descriptor for scope.
- // Location at which this scope is inlined.
- AssertingVH<MDNode> InlinedAtLocation;
- bool AbstractScope; // Abstract Scope
- unsigned StartLabelID; // Label ID of the beginning of scope.
- unsigned EndLabelID; // Label ID of the end of scope.
- const MachineInstr *LastInsn; // Last instruction of this scope.
- const MachineInstr *FirstInsn; // First instruction of this scope.
- SmallVector<DbgScope *, 4> Scopes; // Scopes defined in scope.
- SmallVector<DbgVariable *, 8> Variables;// Variables declared in scope.
-
- // Private state for dump()
- mutable unsigned IndentLevel;
-public:
- DbgScope(DbgScope *P, DIDescriptor D, MDNode *I = 0)
- : Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(false),
- StartLabelID(0), EndLabelID(0),
- LastInsn(0), FirstInsn(0), IndentLevel(0) {}
- virtual ~DbgScope();
-
- // Accessors.
- DbgScope *getParent() const { return Parent; }
- void setParent(DbgScope *P) { Parent = P; }
- DIDescriptor getDesc() const { return Desc; }
- MDNode *getInlinedAt() const {
- return InlinedAtLocation;
- }
- MDNode *getScopeNode() const { return Desc.getNode(); }
- unsigned getStartLabelID() const { return StartLabelID; }
- unsigned getEndLabelID() const { return EndLabelID; }
- SmallVector<DbgScope *, 4> &getScopes() { return Scopes; }
- SmallVector<DbgVariable *, 8> &getVariables() { return Variables; }
- void setStartLabelID(unsigned S) { StartLabelID = S; }
- void setEndLabelID(unsigned E) { EndLabelID = E; }
- void setLastInsn(const MachineInstr *MI) { LastInsn = MI; }
- const MachineInstr *getLastInsn() { return LastInsn; }
- void setFirstInsn(const MachineInstr *MI) { FirstInsn = MI; }
- void setAbstractScope() { AbstractScope = true; }
- bool isAbstractScope() const { return AbstractScope; }
- const MachineInstr *getFirstInsn() { return FirstInsn; }
-
- /// addScope - Add a scope to the scope.
- ///
- void addScope(DbgScope *S) { Scopes.push_back(S); }
-
- /// addVariable - Add a variable to the scope.
- ///
- void addVariable(DbgVariable *V) { Variables.push_back(V); }
-
- void fixInstructionMarkers(DenseMap<const MachineInstr *,
- unsigned> &MIIndexMap) {
- assert (getFirstInsn() && "First instruction is missing!");
-
- // Use the end of last child scope as end of this scope.
- SmallVector<DbgScope *, 4> &Scopes = getScopes();
- const MachineInstr *LastInsn = getFirstInsn();
- unsigned LIndex = 0;
- if (Scopes.empty()) {
- assert (getLastInsn() && "Inner most scope does not have last insn!");
- return;
- }
- for (SmallVector<DbgScope *, 4>::iterator SI = Scopes.begin(),
- SE = Scopes.end(); SI != SE; ++SI) {
- DbgScope *DS = *SI;
- DS->fixInstructionMarkers(MIIndexMap);
- const MachineInstr *DSLastInsn = DS->getLastInsn();
- unsigned DSI = MIIndexMap[DSLastInsn];
- if (DSI > LIndex) {
- LastInsn = DSLastInsn;
- LIndex = DSI;
- }
- }
-
- unsigned CurrentLastInsnIndex = 0;
- if (const MachineInstr *CL = getLastInsn())
- CurrentLastInsnIndex = MIIndexMap[CL];
- unsigned FIndex = MIIndexMap[getFirstInsn()];
-
- // Set LastInsn as the last instruction for this scope only if
- // it follows
- // 1) this scope's first instruction and
- // 2) current last instruction for this scope, if any.
- if (LIndex >= CurrentLastInsnIndex && LIndex >= FIndex)
- setLastInsn(LastInsn);
- }
-
-#ifndef NDEBUG
- void dump() const;
-#endif
-};
-
-#ifndef NDEBUG
-void DbgScope::dump() const {
- raw_ostream &err = dbgs();
- err.indent(IndentLevel);
- MDNode *N = Desc.getNode();
- N->dump();
- err << " [" << StartLabelID << ", " << EndLabelID << "]\n";
- if (AbstractScope)
- err << "Abstract Scope\n";
-
- IndentLevel += 2;
- if (!Scopes.empty())
- err << "Children ...\n";
- for (unsigned i = 0, e = Scopes.size(); i != e; ++i)
- if (Scopes[i] != this)
- Scopes[i]->dump();
-
- IndentLevel -= 2;
-}
-#endif
-
-DbgScope::~DbgScope() {
- for (unsigned i = 0, N = Scopes.size(); i < N; ++i)
- delete Scopes[i];
- for (unsigned j = 0, M = Variables.size(); j < M; ++j)
- delete Variables[j];
-}
-
-} // end llvm namespace
-
-DwarfDebug::DwarfDebug(raw_ostream &OS, AsmPrinter *A, const MCAsmInfo *T)
- : DwarfPrinter(OS, A, T, "dbg"), ModuleCU(0),
- AbbreviationsSet(InitAbbreviationsSetSize), Abbreviations(),
- DIEValues(), StringPool(),
- SectionSourceLines(), didInitial(false), shouldEmit(false),
- CurrentFnDbgScope(0), DebugTimer(0) {
- if (TimePassesIsEnabled)
- DebugTimer = new Timer("Dwarf Debug Writer");
-}
-DwarfDebug::~DwarfDebug() {
- for (unsigned j = 0, M = DIEValues.size(); j < M; ++j)
- delete DIEValues[j];
-
- delete DebugTimer;
-}
-
-/// assignAbbrevNumber - Define a unique number for the abbreviation.
-///
-void DwarfDebug::assignAbbrevNumber(DIEAbbrev &Abbrev) {
- // Profile the node so that we can make it unique.
- FoldingSetNodeID ID;
- Abbrev.Profile(ID);
-
- // Check the set for priors.
- DIEAbbrev *InSet = AbbreviationsSet.GetOrInsertNode(&Abbrev);
-
- // If it's newly added.
- if (InSet == &Abbrev) {
- // Add to abbreviation list.
- Abbreviations.push_back(&Abbrev);
-
- // Assign the vector position + 1 as its number.
- Abbrev.setNumber(Abbreviations.size());
- } else {
- // Assign existing abbreviation number.
- Abbrev.setNumber(InSet->getNumber());
- }
-}
-
-/// createDIEEntry - Creates a new DIEEntry to be a proxy for a debug
-/// information entry.
-DIEEntry *DwarfDebug::createDIEEntry(DIE *Entry) {
- DIEEntry *Value = new DIEEntry(Entry);
- DIEValues.push_back(Value);
- return Value;
-}
-
-/// addUInt - Add an unsigned integer attribute data and value.
-///
-void DwarfDebug::addUInt(DIE *Die, unsigned Attribute,
- unsigned Form, uint64_t Integer) {
- if (!Form) Form = DIEInteger::BestForm(false, Integer);
- DIEValue *Value = new DIEInteger(Integer);
- DIEValues.push_back(Value);
- Die->addValue(Attribute, Form, Value);
-}
-
-/// addSInt - Add an signed integer attribute data and value.
-///
-void DwarfDebug::addSInt(DIE *Die, unsigned Attribute,
- unsigned Form, int64_t Integer) {
- if (!Form) Form = DIEInteger::BestForm(true, Integer);
- DIEValue *Value = new DIEInteger(Integer);
- DIEValues.push_back(Value);
- Die->addValue(Attribute, Form, Value);
-}
-
-/// addString - Add a string attribute data and value. DIEString only
-/// keeps string reference.
-void DwarfDebug::addString(DIE *Die, unsigned Attribute, unsigned Form,
- StringRef String) {
- DIEValue *Value = new DIEString(String);
- DIEValues.push_back(Value);
- Die->addValue(Attribute, Form, Value);
-}
-
-/// addLabel - Add a Dwarf label attribute data and value.
-///
-void DwarfDebug::addLabel(DIE *Die, unsigned Attribute, unsigned Form,
- const DWLabel &Label) {
- DIEValue *Value = new DIEDwarfLabel(Label);
- DIEValues.push_back(Value);
- Die->addValue(Attribute, Form, Value);
-}
-
-/// addObjectLabel - Add an non-Dwarf label attribute data and value.
-///
-void DwarfDebug::addObjectLabel(DIE *Die, unsigned Attribute, unsigned Form,
- const MCSymbol *Sym) {
- DIEValue *Value = new DIEObjectLabel(Sym);
- DIEValues.push_back(Value);
- Die->addValue(Attribute, Form, Value);
-}
-
-/// addSectionOffset - Add a section offset label attribute data and value.
-///
-void DwarfDebug::addSectionOffset(DIE *Die, unsigned Attribute, unsigned Form,
- const DWLabel &Label, const DWLabel &Section,
- bool isEH, bool useSet) {
- DIEValue *Value = new DIESectionOffset(Label, Section, isEH, useSet);
- DIEValues.push_back(Value);
- Die->addValue(Attribute, Form, Value);
-}
-
-/// addDelta - Add a label delta attribute data and value.
-///
-void DwarfDebug::addDelta(DIE *Die, unsigned Attribute, unsigned Form,
- const DWLabel &Hi, const DWLabel &Lo) {
- DIEValue *Value = new DIEDelta(Hi, Lo);
- DIEValues.push_back(Value);
- Die->addValue(Attribute, Form, Value);
-}
-
-/// addBlock - Add block data.
-///
-void DwarfDebug::addBlock(DIE *Die, unsigned Attribute, unsigned Form,
- DIEBlock *Block) {
- Block->ComputeSize(TD);
- DIEValues.push_back(Block);
- Die->addValue(Attribute, Block->BestForm(), Block);
-}
-
-/// addSourceLine - Add location information to specified debug information
-/// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DIVariable *V) {
- // If there is no compile unit specified, don't add a line #.
- if (V->getCompileUnit().isNull())
- return;
-
- unsigned Line = V->getLineNumber();
- unsigned FileID = findCompileUnit(V->getCompileUnit())->getID();
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
-}
-
-/// addSourceLine - Add location information to specified debug information
-/// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DIGlobal *G) {
- // If there is no compile unit specified, don't add a line #.
- if (G->getCompileUnit().isNull())
- return;
-
- unsigned Line = G->getLineNumber();
- unsigned FileID = findCompileUnit(G->getCompileUnit())->getID();
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
-}
-
-/// addSourceLine - Add location information to specified debug information
-/// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DISubprogram *SP) {
- // If there is no compile unit specified, don't add a line #.
- if (SP->getCompileUnit().isNull())
- return;
- // If the line number is 0, don't add it.
- if (SP->getLineNumber() == 0)
- return;
-
-
- unsigned Line = SP->getLineNumber();
- unsigned FileID = findCompileUnit(SP->getCompileUnit())->getID();
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
-}
-
-/// addSourceLine - Add location information to specified debug information
-/// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DIType *Ty) {
- // If there is no compile unit specified, don't add a line #.
- DICompileUnit CU = Ty->getCompileUnit();
- if (CU.isNull())
- return;
-
- unsigned Line = Ty->getLineNumber();
- unsigned FileID = findCompileUnit(CU)->getID();
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
-}
-
-/// addSourceLine - Add location information to specified debug information
-/// entry.
-void DwarfDebug::addSourceLine(DIE *Die, const DINameSpace *NS) {
- // If there is no compile unit specified, don't add a line #.
- if (NS->getCompileUnit().isNull())
- return;
-
- unsigned Line = NS->getLineNumber();
- StringRef FN = NS->getFilename();
- StringRef Dir = NS->getDirectory();
-
- unsigned FileID = GetOrCreateSourceID(Dir, FN);
- assert(FileID && "Invalid file id");
- addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID);
- addUInt(Die, dwarf::DW_AT_decl_line, 0, Line);
-}
-
-/* Byref variables, in Blocks, are declared by the programmer as
- "SomeType VarName;", but the compiler creates a
- __Block_byref_x_VarName struct, and gives the variable VarName
- either the struct, or a pointer to the struct, as its type. This
- is necessary for various behind-the-scenes things the compiler
- needs to do with by-reference variables in blocks.
-
- However, as far as the original *programmer* is concerned, the
- variable should still have type 'SomeType', as originally declared.
-
- The following function dives into the __Block_byref_x_VarName
- struct to find the original type of the variable. This will be
- passed back to the code generating the type for the Debug
- Information Entry for the variable 'VarName'. 'VarName' will then
- have the original type 'SomeType' in its debug information.
-
- The original type 'SomeType' will be the type of the field named
- 'VarName' inside the __Block_byref_x_VarName struct.
-
- NOTE: In order for this to not completely fail on the debugger
- side, the Debug Information Entry for the variable VarName needs to
- have a DW_AT_location that tells the debugger how to unwind through
- the pointers and __Block_byref_x_VarName struct to find the actual
- value of the variable. The function addBlockByrefType does this. */
-
-/// Find the type the programmer originally declared the variable to be
-/// and return that type.
-///
-DIType DwarfDebug::getBlockByrefType(DIType Ty, std::string Name) {
-
- DIType subType = Ty;
- unsigned tag = Ty.getTag();
-
- if (tag == dwarf::DW_TAG_pointer_type) {
- DIDerivedType DTy = DIDerivedType(Ty.getNode());
- subType = DTy.getTypeDerivedFrom();
- }
-
- DICompositeType blockStruct = DICompositeType(subType.getNode());
-
- DIArray Elements = blockStruct.getTypeArray();
-
- if (Elements.isNull())
- return Ty;
-
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
- DIDescriptor Element = Elements.getElement(i);
- DIDerivedType DT = DIDerivedType(Element.getNode());
- if (Name == DT.getName())
- return (DT.getTypeDerivedFrom());
- }
-
- return Ty;
-}
-
-/// addComplexAddress - Start with the address based on the location provided,
-/// and generate the DWARF information necessary to find the actual variable
-/// given the extra address information encoded in the DIVariable, starting from
-/// the starting location. Add the DWARF information to the die.
-///
-void DwarfDebug::addComplexAddress(DbgVariable *&DV, DIE *Die,
- unsigned Attribute,
- const MachineLocation &Location) {
- const DIVariable &VD = DV->getVariable();
- DIType Ty = VD.getType();
-
- // Decode the original location, and use that as the start of the byref
- // variable's location.
- unsigned Reg = RI->getDwarfRegNum(Location.getReg(), false);
- DIEBlock *Block = new DIEBlock();
-
- if (Location.isReg()) {
- if (Reg < 32) {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_reg0 + Reg);
- } else {
- Reg = Reg - dwarf::DW_OP_reg0;
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_breg0 + Reg);
- addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
- }
- } else {
- if (Reg < 32)
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_breg0 + Reg);
- else {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_bregx);
- addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
- }
-
- addUInt(Block, 0, dwarf::DW_FORM_sdata, Location.getOffset());
- }
-
- for (unsigned i = 0, N = VD.getNumAddrElements(); i < N; ++i) {
- uint64_t Element = VD.getAddrElement(i);
-
- if (Element == DIFactory::OpPlus) {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
- addUInt(Block, 0, dwarf::DW_FORM_udata, VD.getAddrElement(++i));
- } else if (Element == DIFactory::OpDeref) {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
- } else llvm_unreachable("unknown DIFactory Opcode");
- }
-
- // Now attach the location information to the DIE.
- addBlock(Die, Attribute, 0, Block);
-}
-
-/* Byref variables, in Blocks, are declared by the programmer as "SomeType
- VarName;", but the compiler creates a __Block_byref_x_VarName struct, and
- gives the variable VarName either the struct, or a pointer to the struct, as
- its type. This is necessary for various behind-the-scenes things the
- compiler needs to do with by-reference variables in Blocks.
-
- However, as far as the original *programmer* is concerned, the variable
- should still have type 'SomeType', as originally declared.
-
- The function getBlockByrefType dives into the __Block_byref_x_VarName
- struct to find the original type of the variable, which is then assigned to
- the variable's Debug Information Entry as its real type. So far, so good.
- However now the debugger will expect the variable VarName to have the type
- SomeType. So we need the location attribute for the variable to be an
- expression that explains to the debugger how to navigate through the
- pointers and struct to find the actual variable of type SomeType.
-
- The following function does just that. We start by getting
- the "normal" location for the variable. This will be the location
- of either the struct __Block_byref_x_VarName or the pointer to the
- struct __Block_byref_x_VarName.
-
- The struct will look something like:
-
- struct __Block_byref_x_VarName {
- ... <various fields>
- struct __Block_byref_x_VarName *forwarding;
- ... <various other fields>
- SomeType VarName;
- ... <maybe more fields>
- };
-
- If we are given the struct directly (as our starting point) we
- need to tell the debugger to:
-
- 1). Add the offset of the forwarding field.
-
- 2). Follow that pointer to get the real __Block_byref_x_VarName
- struct to use (the real one may have been copied onto the heap).
-
- 3). Add the offset for the field VarName, to find the actual variable.
-
- If we started with a pointer to the struct, then we need to
- dereference that pointer first, before the other steps.
- Translating this into DWARF ops, we will need to append the following
- to the current location description for the variable:
-
- DW_OP_deref -- optional, if we start with a pointer
- DW_OP_plus_uconst <forward_fld_offset>
- DW_OP_deref
- DW_OP_plus_uconst <varName_fld_offset>
-
- That is what this function does. */
-
-/// addBlockByrefAddress - Start with the address based on the location
-/// provided, and generate the DWARF information necessary to find the
-/// actual Block variable (navigating the Block struct) based on the
-/// starting location. Add the DWARF information to the die. For
-/// more information, read large comment just above here.
-///
-void DwarfDebug::addBlockByrefAddress(DbgVariable *&DV, DIE *Die,
- unsigned Attribute,
- const MachineLocation &Location) {
- const DIVariable &VD = DV->getVariable();
- DIType Ty = VD.getType();
- DIType TmpTy = Ty;
- unsigned Tag = Ty.getTag();
- bool isPointer = false;
-
- StringRef varName = VD.getName();
-
- if (Tag == dwarf::DW_TAG_pointer_type) {
- DIDerivedType DTy = DIDerivedType(Ty.getNode());
- TmpTy = DTy.getTypeDerivedFrom();
- isPointer = true;
- }
-
- DICompositeType blockStruct = DICompositeType(TmpTy.getNode());
-
- // Find the __forwarding field and the variable field in the __Block_byref
- // struct.
- DIArray Fields = blockStruct.getTypeArray();
- DIDescriptor varField = DIDescriptor();
- DIDescriptor forwardingField = DIDescriptor();
-
-
- for (unsigned i = 0, N = Fields.getNumElements(); i < N; ++i) {
- DIDescriptor Element = Fields.getElement(i);
- DIDerivedType DT = DIDerivedType(Element.getNode());
- StringRef fieldName = DT.getName();
- if (fieldName == "__forwarding")
- forwardingField = Element;
- else if (fieldName == varName)
- varField = Element;
- }
-
- assert(!varField.isNull() && "Can't find byref variable in Block struct");
- assert(!forwardingField.isNull()
- && "Can't find forwarding field in Block struct");
-
- // Get the offsets for the forwarding field and the variable field.
- unsigned int forwardingFieldOffset =
- DIDerivedType(forwardingField.getNode()).getOffsetInBits() >> 3;
- unsigned int varFieldOffset =
- DIDerivedType(varField.getNode()).getOffsetInBits() >> 3;
-
- // Decode the original location, and use that as the start of the byref
- // variable's location.
- unsigned Reg = RI->getDwarfRegNum(Location.getReg(), false);
- DIEBlock *Block = new DIEBlock();
-
- if (Location.isReg()) {
- if (Reg < 32)
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_reg0 + Reg);
- else {
- Reg = Reg - dwarf::DW_OP_reg0;
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_breg0 + Reg);
- addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
- }
- } else {
- if (Reg < 32)
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_breg0 + Reg);
- else {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_bregx);
- addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
- }
-
- addUInt(Block, 0, dwarf::DW_FORM_sdata, Location.getOffset());
- }
-
- // If we started with a pointer to the __Block_byref... struct, then
- // the first thing we need to do is dereference the pointer (DW_OP_deref).
- if (isPointer)
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
-
- // Next add the offset for the '__forwarding' field:
- // DW_OP_plus_uconst ForwardingFieldOffset. Note there's no point in
- // adding the offset if it's 0.
- if (forwardingFieldOffset > 0) {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
- addUInt(Block, 0, dwarf::DW_FORM_udata, forwardingFieldOffset);
- }
-
- // Now dereference the __forwarding field to get to the real __Block_byref
- // struct: DW_OP_deref.
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
-
- // Now that we've got the real __Block_byref... struct, add the offset
- // for the variable's field to get to the location of the actual variable:
- // DW_OP_plus_uconst varFieldOffset. Again, don't add if it's 0.
- if (varFieldOffset > 0) {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
- addUInt(Block, 0, dwarf::DW_FORM_udata, varFieldOffset);
- }
-
- // Now attach the location information to the DIE.
- addBlock(Die, Attribute, 0, Block);
-}
-
-/// addAddress - Add an address attribute to a die based on the location
-/// provided.
-void DwarfDebug::addAddress(DIE *Die, unsigned Attribute,
- const MachineLocation &Location) {
- unsigned Reg = RI->getDwarfRegNum(Location.getReg(), false);
- DIEBlock *Block = new DIEBlock();
-
- if (Location.isReg()) {
- if (Reg < 32) {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_reg0 + Reg);
- } else {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_regx);
- addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
- }
- } else {
- if (Reg < 32) {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_breg0 + Reg);
- } else {
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_bregx);
- addUInt(Block, 0, dwarf::DW_FORM_udata, Reg);
- }
-
- addUInt(Block, 0, dwarf::DW_FORM_sdata, Location.getOffset());
- }
-
- addBlock(Die, Attribute, 0, Block);
-}
-
-/// addToContextOwner - Add Die into the list of its context owner's children.
-void DwarfDebug::addToContextOwner(DIE *Die, DIDescriptor Context) {
- if (Context.isNull())
- ModuleCU->addDie(Die);
- else if (Context.isType()) {
- DIE *ContextDIE = getOrCreateTypeDIE(DIType(Context.getNode()));
- ContextDIE->addChild(Die);
- } else if (Context.isNameSpace()) {
- DIE *ContextDIE = getOrCreateNameSpace(DINameSpace(Context.getNode()));
- ContextDIE->addChild(Die);
- } else if (DIE *ContextDIE = ModuleCU->getDIE(Context.getNode()))
- ContextDIE->addChild(Die);
- else
- ModuleCU->addDie(Die);
-}
-
-/// getOrCreateTypeDIE - Find existing DIE or create new DIE for the
-/// given DIType.
-DIE *DwarfDebug::getOrCreateTypeDIE(DIType Ty) {
- DIE *TyDIE = ModuleCU->getDIE(Ty.getNode());
- if (TyDIE)
- return TyDIE;
-
- // Create new type.
- TyDIE = new DIE(dwarf::DW_TAG_base_type);
- ModuleCU->insertDIE(Ty.getNode(), TyDIE);
- if (Ty.isBasicType())
- constructTypeDIE(*TyDIE, DIBasicType(Ty.getNode()));
- else if (Ty.isCompositeType())
- constructTypeDIE(*TyDIE, DICompositeType(Ty.getNode()));
- else {
- assert(Ty.isDerivedType() && "Unknown kind of DIType");
- constructTypeDIE(*TyDIE, DIDerivedType(Ty.getNode()));
- }
-
- addToContextOwner(TyDIE, Ty.getContext());
- return TyDIE;
-}
-
-/// addType - Add a new type attribute to the specified entity.
-void DwarfDebug::addType(DIE *Entity, DIType Ty) {
- if (Ty.isNull())
- return;
-
- // Check for pre-existence.
- DIEEntry *Entry = ModuleCU->getDIEEntry(Ty.getNode());
- // If it exists then use the existing value.
- if (Entry) {
- Entity->addValue(dwarf::DW_AT_type, dwarf::DW_FORM_ref4, Entry);
- return;
- }
-
- // Set up proxy.
- Entry = createDIEEntry();
- ModuleCU->insertDIEEntry(Ty.getNode(), Entry);
-
- // Construct type.
- DIE *Buffer = getOrCreateTypeDIE(Ty);
-
- Entry->setEntry(Buffer);
- Entity->addValue(dwarf::DW_AT_type, dwarf::DW_FORM_ref4, Entry);
-}
-
-/// constructTypeDIE - Construct basic type die from DIBasicType.
-void DwarfDebug::constructTypeDIE(DIE &Buffer, DIBasicType BTy) {
- // Get core information.
- StringRef Name = BTy.getName();
- Buffer.setTag(dwarf::DW_TAG_base_type);
- addUInt(&Buffer, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
- BTy.getEncoding());
-
- // Add name if not anonymous or intermediate type.
- if (!Name.empty())
- addString(&Buffer, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
- uint64_t Size = BTy.getSizeInBits() >> 3;
- addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, Size);
-}
-
-/// constructTypeDIE - Construct derived type die from DIDerivedType.
-void DwarfDebug::constructTypeDIE(DIE &Buffer, DIDerivedType DTy) {
- // Get core information.
- StringRef Name = DTy.getName();
- uint64_t Size = DTy.getSizeInBits() >> 3;
- unsigned Tag = DTy.getTag();
-
- // FIXME - Workaround for templates.
- if (Tag == dwarf::DW_TAG_inheritance) Tag = dwarf::DW_TAG_reference_type;
-
- Buffer.setTag(Tag);
-
- // Map to main type, void will not have a type.
- DIType FromTy = DTy.getTypeDerivedFrom();
- addType(&Buffer, FromTy);
-
- // Add name if not anonymous or intermediate type.
- if (!Name.empty())
- addString(&Buffer, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
-
- // Add size if non-zero (derived types might be zero-sized.)
- if (Size)
- addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, Size);
-
- // Add source line info if available and TyDesc is not a forward declaration.
- if (!DTy.isForwardDecl())
- addSourceLine(&Buffer, &DTy);
-}
-
-/// constructTypeDIE - Construct type DIE from DICompositeType.
-void DwarfDebug::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
- // Get core information.
- StringRef Name = CTy.getName();
-
- uint64_t Size = CTy.getSizeInBits() >> 3;
- unsigned Tag = CTy.getTag();
- Buffer.setTag(Tag);
-
- switch (Tag) {
- case dwarf::DW_TAG_vector_type:
- case dwarf::DW_TAG_array_type:
- constructArrayTypeDIE(Buffer, &CTy);
- break;
- case dwarf::DW_TAG_enumeration_type: {
- DIArray Elements = CTy.getTypeArray();
-
- // Add enumerators to enumeration type.
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
- DIE *ElemDie = NULL;
- DIEnumerator Enum(Elements.getElement(i).getNode());
- if (!Enum.isNull()) {
- ElemDie = constructEnumTypeDIE(&Enum);
- Buffer.addChild(ElemDie);
- }
- }
- }
- break;
- case dwarf::DW_TAG_subroutine_type: {
- // Add return type.
- DIArray Elements = CTy.getTypeArray();
- DIDescriptor RTy = Elements.getElement(0);
- addType(&Buffer, DIType(RTy.getNode()));
-
- // Add prototype flag.
- addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
-
- // Add arguments.
- for (unsigned i = 1, N = Elements.getNumElements(); i < N; ++i) {
- DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
- DIDescriptor Ty = Elements.getElement(i);
- addType(Arg, DIType(Ty.getNode()));
- Buffer.addChild(Arg);
- }
- }
- break;
- case dwarf::DW_TAG_structure_type:
- case dwarf::DW_TAG_union_type:
- case dwarf::DW_TAG_class_type: {
- // Add elements to structure type.
- DIArray Elements = CTy.getTypeArray();
-
- // A forward struct declared type may not have elements available.
- if (Elements.isNull())
- break;
-
- // Add elements to structure type.
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
- DIDescriptor Element = Elements.getElement(i);
- if (Element.isNull())
- continue;
- DIE *ElemDie = NULL;
- if (Element.getTag() == dwarf::DW_TAG_subprogram)
- ElemDie = createSubprogramDIE(DISubprogram(Element.getNode()));
- else if (Element.getTag() == dwarf::DW_TAG_auto_variable) {
- DIVariable DV(Element.getNode());
- ElemDie = new DIE(dwarf::DW_TAG_variable);
- addString(ElemDie, dwarf::DW_AT_name, dwarf::DW_FORM_string,
- DV.getName());
- addType(ElemDie, DV.getType());
- addUInt(ElemDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
- addUInt(ElemDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
- addSourceLine(ElemDie, &DV);
- } else
- ElemDie = createMemberDIE(DIDerivedType(Element.getNode()));
- Buffer.addChild(ElemDie);
- }
-
- if (CTy.isAppleBlockExtension())
- addUInt(&Buffer, dwarf::DW_AT_APPLE_block, dwarf::DW_FORM_flag, 1);
-
- unsigned RLang = CTy.getRunTimeLang();
- if (RLang)
- addUInt(&Buffer, dwarf::DW_AT_APPLE_runtime_class,
- dwarf::DW_FORM_data1, RLang);
-
- DICompositeType ContainingType = CTy.getContainingType();
- if (!ContainingType.isNull())
- addDIEEntry(&Buffer, dwarf::DW_AT_containing_type, dwarf::DW_FORM_ref4,
- getOrCreateTypeDIE(DIType(ContainingType.getNode())));
- break;
- }
- default:
- break;
- }
-
- // Add name if not anonymous or intermediate type.
- if (!Name.empty())
- addString(&Buffer, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
-
- if (Tag == dwarf::DW_TAG_enumeration_type || Tag == dwarf::DW_TAG_class_type ||
- Tag == dwarf::DW_TAG_structure_type || Tag == dwarf::DW_TAG_union_type) {
- // Add size if non-zero (derived types might be zero-sized.)
- if (Size)
- addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, Size);
- else {
- // Add zero size if it is not a forward declaration.
- if (CTy.isForwardDecl())
- addUInt(&Buffer, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
- else
- addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, 0);
- }
-
- // Add source line info if available.
- if (!CTy.isForwardDecl())
- addSourceLine(&Buffer, &CTy);
- }
-}
-
-/// constructSubrangeDIE - Construct subrange DIE from DISubrange.
-void DwarfDebug::constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy){
- int64_t L = SR.getLo();
- int64_t H = SR.getHi();
- DIE *DW_Subrange = new DIE(dwarf::DW_TAG_subrange_type);
-
- addDIEEntry(DW_Subrange, dwarf::DW_AT_type, dwarf::DW_FORM_ref4, IndexTy);
- if (L)
- addSInt(DW_Subrange, dwarf::DW_AT_lower_bound, 0, L);
- addSInt(DW_Subrange, dwarf::DW_AT_upper_bound, 0, H);
-
- Buffer.addChild(DW_Subrange);
-}
-
-/// constructArrayTypeDIE - Construct array type DIE from DICompositeType.
-void DwarfDebug::constructArrayTypeDIE(DIE &Buffer,
- DICompositeType *CTy) {
- Buffer.setTag(dwarf::DW_TAG_array_type);
- if (CTy->getTag() == dwarf::DW_TAG_vector_type)
- addUInt(&Buffer, dwarf::DW_AT_GNU_vector, dwarf::DW_FORM_flag, 1);
-
- // Emit derived type.
- addType(&Buffer, CTy->getTypeDerivedFrom());
- DIArray Elements = CTy->getTypeArray();
-
- // Get an anonymous type for index type.
- DIE *IdxTy = ModuleCU->getIndexTyDie();
- if (!IdxTy) {
- // Construct an anonymous type for index type.
- IdxTy = new DIE(dwarf::DW_TAG_base_type);
- addUInt(IdxTy, dwarf::DW_AT_byte_size, 0, sizeof(int32_t));
- addUInt(IdxTy, dwarf::DW_AT_encoding, dwarf::DW_FORM_data1,
- dwarf::DW_ATE_signed);
- ModuleCU->addDie(IdxTy);
- ModuleCU->setIndexTyDie(IdxTy);
- }
-
- // Add subranges to array type.
- for (unsigned i = 0, N = Elements.getNumElements(); i < N; ++i) {
- DIDescriptor Element = Elements.getElement(i);
- if (Element.getTag() == dwarf::DW_TAG_subrange_type)
- constructSubrangeDIE(Buffer, DISubrange(Element.getNode()), IdxTy);
- }
-}
-
-/// constructEnumTypeDIE - Construct enum type DIE from DIEnumerator.
-DIE *DwarfDebug::constructEnumTypeDIE(DIEnumerator *ETy) {
- DIE *Enumerator = new DIE(dwarf::DW_TAG_enumerator);
- StringRef Name = ETy->getName();
- addString(Enumerator, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
- int64_t Value = ETy->getEnumValue();
- addSInt(Enumerator, dwarf::DW_AT_const_value, dwarf::DW_FORM_sdata, Value);
- return Enumerator;
-}
-
-/// getRealLinkageName - If special LLVM prefix that is used to inform the asm
-/// printer to not emit usual symbol prefix before the symbol name is used then
-/// return linkage name after skipping this special LLVM prefix.
-static StringRef getRealLinkageName(StringRef LinkageName) {
- char One = '\1';
- if (LinkageName.startswith(StringRef(&One, 1)))
- return LinkageName.substr(1);
- return LinkageName;
-}
-
-/// createGlobalVariableDIE - Create new DIE using GV.
-DIE *DwarfDebug::createGlobalVariableDIE(const DIGlobalVariable &GV) {
- // If the global variable was optmized out then no need to create debug info
- // entry.
- if (!GV.getGlobal()) return NULL;
- if (GV.getDisplayName().empty()) return NULL;
-
- DIE *GVDie = new DIE(dwarf::DW_TAG_variable);
- addString(GVDie, dwarf::DW_AT_name, dwarf::DW_FORM_string,
- GV.getDisplayName());
-
- StringRef LinkageName = GV.getLinkageName();
- if (!LinkageName.empty())
- addString(GVDie, dwarf::DW_AT_MIPS_linkage_name, dwarf::DW_FORM_string,
- getRealLinkageName(LinkageName));
-
- addType(GVDie, GV.getType());
- if (!GV.isLocalToUnit())
- addUInt(GVDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
- addSourceLine(GVDie, &GV);
-
- return GVDie;
-}
-
-/// createMemberDIE - Create new member DIE.
-DIE *DwarfDebug::createMemberDIE(const DIDerivedType &DT) {
- DIE *MemberDie = new DIE(DT.getTag());
- StringRef Name = DT.getName();
- if (!Name.empty())
- addString(MemberDie, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
-
- addType(MemberDie, DT.getTypeDerivedFrom());
-
- addSourceLine(MemberDie, &DT);
-
- DIEBlock *MemLocationDie = new DIEBlock();
- addUInt(MemLocationDie, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus_uconst);
-
- uint64_t Size = DT.getSizeInBits();
- uint64_t FieldSize = DT.getOriginalTypeSize();
-
- if (Size != FieldSize) {
- // Handle bitfield.
- addUInt(MemberDie, dwarf::DW_AT_byte_size, 0, DT.getOriginalTypeSize()>>3);
- addUInt(MemberDie, dwarf::DW_AT_bit_size, 0, DT.getSizeInBits());
-
- uint64_t Offset = DT.getOffsetInBits();
- uint64_t AlignMask = ~(DT.getAlignInBits() - 1);
- uint64_t HiMark = (Offset + FieldSize) & AlignMask;
- uint64_t FieldOffset = (HiMark - FieldSize);
- Offset -= FieldOffset;
-
- // Maybe we need to work from the other end.
- if (TD->isLittleEndian()) Offset = FieldSize - (Offset + Size);
- addUInt(MemberDie, dwarf::DW_AT_bit_offset, 0, Offset);
-
- // Here WD_AT_data_member_location points to the anonymous
- // field that includes this bit field.
- addUInt(MemLocationDie, 0, dwarf::DW_FORM_udata, FieldOffset >> 3);
-
- } else
- // This is not a bitfield.
- addUInt(MemLocationDie, 0, dwarf::DW_FORM_udata, DT.getOffsetInBits() >> 3);
-
- if (DT.getTag() == dwarf::DW_TAG_inheritance
- && DT.isVirtual()) {
-
- // For C++, virtual base classes are not at fixed offset. Use following
- // expression to extract appropriate offset from vtable.
- // BaseAddr = ObAddr + *((*ObAddr) - Offset)
-
- DIEBlock *VBaseLocationDie = new DIEBlock();
- addUInt(VBaseLocationDie, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_dup);
- addUInt(VBaseLocationDie, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
- addUInt(VBaseLocationDie, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
- addUInt(VBaseLocationDie, 0, dwarf::DW_FORM_udata, DT.getOffsetInBits());
- addUInt(VBaseLocationDie, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_minus);
- addUInt(VBaseLocationDie, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_deref);
- addUInt(VBaseLocationDie, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
-
- addBlock(MemberDie, dwarf::DW_AT_data_member_location, 0,
- VBaseLocationDie);
- } else
- addBlock(MemberDie, dwarf::DW_AT_data_member_location, 0, MemLocationDie);
-
- if (DT.isProtected())
- addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
- dwarf::DW_ACCESS_protected);
- else if (DT.isPrivate())
- addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
- dwarf::DW_ACCESS_private);
- else if (DT.getTag() == dwarf::DW_TAG_inheritance)
- addUInt(MemberDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_flag,
- dwarf::DW_ACCESS_public);
- if (DT.isVirtual())
- addUInt(MemberDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_flag,
- dwarf::DW_VIRTUALITY_virtual);
- return MemberDie;
-}
-
-/// createSubprogramDIE - Create new DIE using SP.
-DIE *DwarfDebug::createSubprogramDIE(const DISubprogram &SP, bool MakeDecl) {
- DIE *SPDie = ModuleCU->getDIE(SP.getNode());
- if (SPDie)
- return SPDie;
-
- SPDie = new DIE(dwarf::DW_TAG_subprogram);
- // Constructors and operators for anonymous aggregates do not have names.
- if (!SP.getName().empty())
- addString(SPDie, dwarf::DW_AT_name, dwarf::DW_FORM_string, SP.getName());
-
- StringRef LinkageName = SP.getLinkageName();
- if (!LinkageName.empty())
- addString(SPDie, dwarf::DW_AT_MIPS_linkage_name, dwarf::DW_FORM_string,
- getRealLinkageName(LinkageName));
-
- addSourceLine(SPDie, &SP);
-
- // Add prototyped tag, if C or ObjC.
- unsigned Lang = SP.getCompileUnit().getLanguage();
- if (Lang == dwarf::DW_LANG_C99 || Lang == dwarf::DW_LANG_C89 ||
- Lang == dwarf::DW_LANG_ObjC)
- addUInt(SPDie, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1);
-
- // Add Return Type.
- DICompositeType SPTy = SP.getType();
- DIArray Args = SPTy.getTypeArray();
- unsigned SPTag = SPTy.getTag();
-
- if (Args.isNull() || SPTag != dwarf::DW_TAG_subroutine_type)
- addType(SPDie, SPTy);
- else
- addType(SPDie, DIType(Args.getElement(0).getNode()));
-
- unsigned VK = SP.getVirtuality();
- if (VK) {
- addUInt(SPDie, dwarf::DW_AT_virtuality, dwarf::DW_FORM_flag, VK);
- DIEBlock *Block = new DIEBlock();
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
- addUInt(Block, 0, dwarf::DW_FORM_data1, SP.getVirtualIndex());
- addBlock(SPDie, dwarf::DW_AT_vtable_elem_location, 0, Block);
- ContainingTypeMap.insert(std::make_pair(SPDie,
- SP.getContainingType().getNode()));
- }
-
- if (MakeDecl || !SP.isDefinition()) {
- addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
-
- // Add arguments. Do not add arguments for subprogram definition. They will
- // be handled while processing variables.
- DICompositeType SPTy = SP.getType();
- DIArray Args = SPTy.getTypeArray();
- unsigned SPTag = SPTy.getTag();
-
- if (SPTag == dwarf::DW_TAG_subroutine_type)
- for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
- DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
- DIType ATy = DIType(DIType(Args.getElement(i).getNode()));
- addType(Arg, ATy);
- if (ATy.isArtificial())
- addUInt(Arg, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
- SPDie->addChild(Arg);
- }
- }
-
- if (SP.isArtificial())
- addUInt(SPDie, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
-
- // DW_TAG_inlined_subroutine may refer to this DIE.
- ModuleCU->insertDIE(SP.getNode(), SPDie);
- return SPDie;
-}
-
-/// findCompileUnit - Get the compile unit for the given descriptor.
-///
-CompileUnit *DwarfDebug::findCompileUnit(DICompileUnit Unit) {
- DenseMap<Value *, CompileUnit *>::const_iterator I =
- CompileUnitMap.find(Unit.getNode());
- if (I == CompileUnitMap.end())
- return constructCompileUnit(Unit.getNode());
- return I->second;
-}
-
-/// getUpdatedDbgScope - Find or create DbgScope assicated with the instruction.
-/// Initialize scope and update scope hierarchy.
-DbgScope *DwarfDebug::getUpdatedDbgScope(MDNode *N, const MachineInstr *MI,
- MDNode *InlinedAt) {
- assert (N && "Invalid Scope encoding!");
- assert (MI && "Missing machine instruction!");
- bool GetConcreteScope = (MI && InlinedAt);
-
- DbgScope *NScope = NULL;
-
- if (InlinedAt)
- NScope = DbgScopeMap.lookup(InlinedAt);
- else
- NScope = DbgScopeMap.lookup(N);
- assert (NScope && "Unable to find working scope!");
-
- if (NScope->getFirstInsn())
- return NScope;
-
- DbgScope *Parent = NULL;
- if (GetConcreteScope) {
- DILocation IL(InlinedAt);
- Parent = getUpdatedDbgScope(IL.getScope().getNode(), MI,
- IL.getOrigLocation().getNode());
- assert (Parent && "Unable to find Parent scope!");
- NScope->setParent(Parent);
- Parent->addScope(NScope);
- } else if (DIDescriptor(N).isLexicalBlock()) {
- DILexicalBlock DB(N);
- if (!DB.getContext().isNull()) {
- Parent = getUpdatedDbgScope(DB.getContext().getNode(), MI, InlinedAt);
- NScope->setParent(Parent);
- Parent->addScope(NScope);
- }
- }
-
- NScope->setFirstInsn(MI);
-
- if (!Parent && !InlinedAt) {
- StringRef SPName = DISubprogram(N).getLinkageName();
- if (SPName == MF->getFunction()->getName())
- CurrentFnDbgScope = NScope;
- }
-
- if (GetConcreteScope) {
- ConcreteScopes[InlinedAt] = NScope;
- getOrCreateAbstractScope(N);
- }
-
- return NScope;
-}
-
-DbgScope *DwarfDebug::getOrCreateAbstractScope(MDNode *N) {
- assert (N && "Invalid Scope encoding!");
-
- DbgScope *AScope = AbstractScopes.lookup(N);
- if (AScope)
- return AScope;
-
- DbgScope *Parent = NULL;
-
- DIDescriptor Scope(N);
- if (Scope.isLexicalBlock()) {
- DILexicalBlock DB(N);
- DIDescriptor ParentDesc = DB.getContext();
- if (!ParentDesc.isNull())
- Parent = getOrCreateAbstractScope(ParentDesc.getNode());
- }
-
- AScope = new DbgScope(Parent, DIDescriptor(N), NULL);
-
- if (Parent)
- Parent->addScope(AScope);
- AScope->setAbstractScope();
- AbstractScopes[N] = AScope;
- if (DIDescriptor(N).isSubprogram())
- AbstractScopesList.push_back(AScope);
- return AScope;
-}
-
-/// updateSubprogramScopeDIE - Find DIE for the given subprogram and
-/// attach appropriate DW_AT_low_pc and DW_AT_high_pc attributes.
-/// If there are global variables in this scope then create and insert
-/// DIEs for these variables.
-DIE *DwarfDebug::updateSubprogramScopeDIE(MDNode *SPNode) {
-
- DIE *SPDie = ModuleCU->getDIE(SPNode);
- assert (SPDie && "Unable to find subprogram DIE!");
- DISubprogram SP(SPNode);
- // There is not any need to generate specification DIE for a function
- // defined at compile unit level. If a function is defined inside another
- // function then gdb prefers the definition at top level and but does not
- // expect specification DIE in parent function. So avoid creating
- // specification DIE for a function defined inside a function.
- if (SP.isDefinition() && !SP.getContext().isCompileUnit()
- && !SP.getContext().isSubprogram()) {
- addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
- // Add arguments.
- DICompositeType SPTy = SP.getType();
- DIArray Args = SPTy.getTypeArray();
- unsigned SPTag = SPTy.getTag();
- if (SPTag == dwarf::DW_TAG_subroutine_type)
- for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) {
- DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter);
- DIType ATy = DIType(DIType(Args.getElement(i).getNode()));
- addType(Arg, ATy);
- if (ATy.isArtificial())
- addUInt(Arg, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
- SPDie->addChild(Arg);
- }
- DIE *SPDeclDie = SPDie;
- SPDie = new DIE(dwarf::DW_TAG_subprogram);
- addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4,
- SPDeclDie);
- ModuleCU->addDie(SPDie);
- }
-
- addLabel(SPDie, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr,
- DWLabel("func_begin", SubprogramCount));
- addLabel(SPDie, dwarf::DW_AT_high_pc, dwarf::DW_FORM_addr,
- DWLabel("func_end", SubprogramCount));
- MachineLocation Location(RI->getFrameRegister(*MF));
- addAddress(SPDie, dwarf::DW_AT_frame_base, Location);
-
- if (!DISubprogram(SPNode).isLocalToUnit())
- addUInt(SPDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1);
-
- return SPDie;
-}
-
-/// constructLexicalScope - Construct new DW_TAG_lexical_block
-/// for this scope and attach DW_AT_low_pc/DW_AT_high_pc labels.
-DIE *DwarfDebug::constructLexicalScopeDIE(DbgScope *Scope) {
- unsigned StartID = MMI->MappedLabel(Scope->getStartLabelID());
- unsigned EndID = MMI->MappedLabel(Scope->getEndLabelID());
-
- // Ignore empty scopes.
- if (StartID == EndID && StartID != 0)
- return NULL;
-
- DIE *ScopeDIE = new DIE(dwarf::DW_TAG_lexical_block);
- if (Scope->isAbstractScope())
- return ScopeDIE;
-
- addLabel(ScopeDIE, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr,
- StartID ?
- DWLabel("label", StartID)
- : DWLabel("func_begin", SubprogramCount));
- addLabel(ScopeDIE, dwarf::DW_AT_high_pc, dwarf::DW_FORM_addr,
- EndID ?
- DWLabel("label", EndID)
- : DWLabel("func_end", SubprogramCount));
-
-
-
- return ScopeDIE;
-}
-
-/// constructInlinedScopeDIE - This scope represents inlined body of
-/// a function. Construct DIE to represent this concrete inlined copy
-/// of the function.
-DIE *DwarfDebug::constructInlinedScopeDIE(DbgScope *Scope) {
- unsigned StartID = MMI->MappedLabel(Scope->getStartLabelID());
- unsigned EndID = MMI->MappedLabel(Scope->getEndLabelID());
- assert (StartID && "Invalid starting label for an inlined scope!");
- assert (EndID && "Invalid end label for an inlined scope!");
- // Ignore empty scopes.
- if (StartID == EndID && StartID != 0)
- return NULL;
-
- DIScope DS(Scope->getScopeNode());
- if (DS.isNull())
- return NULL;
- DIE *ScopeDIE = new DIE(dwarf::DW_TAG_inlined_subroutine);
-
- DISubprogram InlinedSP = getDISubprogram(DS.getNode());
- DIE *OriginDIE = ModuleCU->getDIE(InlinedSP.getNode());
- assert (OriginDIE && "Unable to find Origin DIE!");
- addDIEEntry(ScopeDIE, dwarf::DW_AT_abstract_origin,
- dwarf::DW_FORM_ref4, OriginDIE);
-
- addLabel(ScopeDIE, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr,
- DWLabel("label", StartID));
- addLabel(ScopeDIE, dwarf::DW_AT_high_pc, dwarf::DW_FORM_addr,
- DWLabel("label", EndID));
-
- InlinedSubprogramDIEs.insert(OriginDIE);
-
- // Track the start label for this inlined function.
- DenseMap<MDNode *, SmallVector<InlineInfoLabels, 4> >::iterator
- I = InlineInfo.find(InlinedSP.getNode());
-
- if (I == InlineInfo.end()) {
- InlineInfo[InlinedSP.getNode()].push_back(std::make_pair(StartID,
- ScopeDIE));
- InlinedSPNodes.push_back(InlinedSP.getNode());
- } else
- I->second.push_back(std::make_pair(StartID, ScopeDIE));
-
- StringPool.insert(InlinedSP.getName());
- StringPool.insert(getRealLinkageName(InlinedSP.getLinkageName()));
-
- DILocation DL(Scope->getInlinedAt());
- addUInt(ScopeDIE, dwarf::DW_AT_call_file, 0, ModuleCU->getID());
- addUInt(ScopeDIE, dwarf::DW_AT_call_line, 0, DL.getLineNumber());
-
- return ScopeDIE;
-}
-
-
-/// constructVariableDIE - Construct a DIE for the given DbgVariable.
-DIE *DwarfDebug::constructVariableDIE(DbgVariable *DV, DbgScope *Scope) {
- // Get the descriptor.
- const DIVariable &VD = DV->getVariable();
- StringRef Name = VD.getName();
- if (Name.empty())
- return NULL;
-
- // Translate tag to proper Dwarf tag. The result variable is dropped for
- // now.
- unsigned Tag;
- switch (VD.getTag()) {
- case dwarf::DW_TAG_return_variable:
- return NULL;
- case dwarf::DW_TAG_arg_variable:
- Tag = dwarf::DW_TAG_formal_parameter;
- break;
- case dwarf::DW_TAG_auto_variable: // fall thru
- default:
- Tag = dwarf::DW_TAG_variable;
- break;
- }
-
- // Define variable debug information entry.
- DIE *VariableDie = new DIE(Tag);
-
-
- DIE *AbsDIE = NULL;
- if (DbgVariable *AV = DV->getAbstractVariable())
- AbsDIE = AV->getDIE();
-
- if (AbsDIE) {
- DIScope DS(Scope->getScopeNode());
- DISubprogram InlinedSP = getDISubprogram(DS.getNode());
- DIE *OriginSPDIE = ModuleCU->getDIE(InlinedSP.getNode());
- (void) OriginSPDIE;
- assert (OriginSPDIE && "Unable to find Origin DIE for the SP!");
- DIE *AbsDIE = DV->getAbstractVariable()->getDIE();
- assert (AbsDIE && "Unable to find Origin DIE for the Variable!");
- addDIEEntry(VariableDie, dwarf::DW_AT_abstract_origin,
- dwarf::DW_FORM_ref4, AbsDIE);
- }
- else {
- addString(VariableDie, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
- addSourceLine(VariableDie, &VD);
-
- // Add variable type.
- // FIXME: isBlockByrefVariable should be reformulated in terms of complex
- // addresses instead.
- if (VD.isBlockByrefVariable())
- addType(VariableDie, getBlockByrefType(VD.getType(), Name));
- else
- addType(VariableDie, VD.getType());
- }
-
- // Add variable address.
- if (!Scope->isAbstractScope()) {
- MachineLocation Location;
- unsigned FrameReg;
- int Offset = RI->getFrameIndexReference(*MF, DV->getFrameIndex(), FrameReg);
- Location.set(FrameReg, Offset);
-
- if (VD.hasComplexAddress())
- addComplexAddress(DV, VariableDie, dwarf::DW_AT_location, Location);
- else if (VD.isBlockByrefVariable())
- addBlockByrefAddress(DV, VariableDie, dwarf::DW_AT_location, Location);
- else
- addAddress(VariableDie, dwarf::DW_AT_location, Location);
- }
-
- if (Tag == dwarf::DW_TAG_formal_parameter && VD.getType().isArtificial())
- addUInt(VariableDie, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1);
- DV->setDIE(VariableDie);
- return VariableDie;
-
-}
-
-void DwarfDebug::addPubTypes(DISubprogram SP) {
- DICompositeType SPTy = SP.getType();
- unsigned SPTag = SPTy.getTag();
- if (SPTag != dwarf::DW_TAG_subroutine_type)
- return;
-
- DIArray Args = SPTy.getTypeArray();
- if (Args.isNull())
- return;
-
- for (unsigned i = 0, e = Args.getNumElements(); i != e; ++i) {
- DIType ATy(Args.getElement(i).getNode());
- if (ATy.isNull())
- continue;
- DICompositeType CATy = getDICompositeType(ATy);
- if (!CATy.isNull() && !CATy.getName().empty()) {
- if (DIEEntry *Entry = ModuleCU->getDIEEntry(CATy.getNode()))
- ModuleCU->addGlobalType(CATy.getName(), Entry->getEntry());
- }
- }
-}
-
-/// constructScopeDIE - Construct a DIE for this scope.
-DIE *DwarfDebug::constructScopeDIE(DbgScope *Scope) {
- if (!Scope)
- return NULL;
- DIScope DS(Scope->getScopeNode());
- if (DS.isNull())
- return NULL;
-
- DIE *ScopeDIE = NULL;
- if (Scope->getInlinedAt())
- ScopeDIE = constructInlinedScopeDIE(Scope);
- else if (DS.isSubprogram()) {
- if (Scope->isAbstractScope())
- ScopeDIE = ModuleCU->getDIE(DS.getNode());
- else
- ScopeDIE = updateSubprogramScopeDIE(DS.getNode());
- }
- else {
- ScopeDIE = constructLexicalScopeDIE(Scope);
- if (!ScopeDIE) return NULL;
- }
-
- // Add variables to scope.
- SmallVector<DbgVariable *, 8> &Variables = Scope->getVariables();
- for (unsigned i = 0, N = Variables.size(); i < N; ++i) {
- DIE *VariableDIE = constructVariableDIE(Variables[i], Scope);
- if (VariableDIE)
- ScopeDIE->addChild(VariableDIE);
- }
-
- // Add nested scopes.
- SmallVector<DbgScope *, 4> &Scopes = Scope->getScopes();
- for (unsigned j = 0, M = Scopes.size(); j < M; ++j) {
- // Define the Scope debug information entry.
- DIE *NestedDIE = constructScopeDIE(Scopes[j]);
- if (NestedDIE)
- ScopeDIE->addChild(NestedDIE);
- }
-
- if (DS.isSubprogram())
- addPubTypes(DISubprogram(DS.getNode()));
-
- return ScopeDIE;
-}
-
-/// GetOrCreateSourceID - Look up the source id with the given directory and
-/// source file names. If none currently exists, create a new id and insert it
-/// in the SourceIds map. This can update DirectoryNames and SourceFileNames
-/// maps as well.
-unsigned DwarfDebug::GetOrCreateSourceID(StringRef DirName, StringRef FileName) {
- unsigned DId;
- StringMap<unsigned>::iterator DI = DirectoryIdMap.find(DirName);
- if (DI != DirectoryIdMap.end()) {
- DId = DI->getValue();
- } else {
- DId = DirectoryNames.size() + 1;
- DirectoryIdMap[DirName] = DId;
- DirectoryNames.push_back(DirName);
- }
-
- unsigned FId;
- StringMap<unsigned>::iterator FI = SourceFileIdMap.find(FileName);
- if (FI != SourceFileIdMap.end()) {
- FId = FI->getValue();
- } else {
- FId = SourceFileNames.size() + 1;
- SourceFileIdMap[FileName] = FId;
- SourceFileNames.push_back(FileName);
- }
-
- DenseMap<std::pair<unsigned, unsigned>, unsigned>::iterator SI =
- SourceIdMap.find(std::make_pair(DId, FId));
- if (SI != SourceIdMap.end())
- return SI->second;
-
- unsigned SrcId = SourceIds.size() + 1; // DW_AT_decl_file cannot be 0.
- SourceIdMap[std::make_pair(DId, FId)] = SrcId;
- SourceIds.push_back(std::make_pair(DId, FId));
-
- return SrcId;
-}
-
-/// getOrCreateNameSpace - Create a DIE for DINameSpace.
-DIE *DwarfDebug::getOrCreateNameSpace(DINameSpace NS) {
- DIE *NDie = ModuleCU->getDIE(NS.getNode());
- if (NDie)
- return NDie;
- NDie = new DIE(dwarf::DW_TAG_namespace);
- ModuleCU->insertDIE(NS.getNode(), NDie);
- if (!NS.getName().empty())
- addString(NDie, dwarf::DW_AT_name, dwarf::DW_FORM_string, NS.getName());
- addSourceLine(NDie, &NS);
- addToContextOwner(NDie, NS.getContext());
- return NDie;
-}
-
-CompileUnit *DwarfDebug::constructCompileUnit(MDNode *N) {
- DICompileUnit DIUnit(N);
- StringRef FN = DIUnit.getFilename();
- StringRef Dir = DIUnit.getDirectory();
- unsigned ID = GetOrCreateSourceID(Dir, FN);
-
- DIE *Die = new DIE(dwarf::DW_TAG_compile_unit);
- addSectionOffset(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4,
- DWLabel("section_line", 0), DWLabel("section_line", 0),
- false);
- addString(Die, dwarf::DW_AT_producer, dwarf::DW_FORM_string,
- DIUnit.getProducer());
- addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data1,
- DIUnit.getLanguage());
- addString(Die, dwarf::DW_AT_name, dwarf::DW_FORM_string, FN);
-
- if (!Dir.empty())
- addString(Die, dwarf::DW_AT_comp_dir, dwarf::DW_FORM_string, Dir);
- if (DIUnit.isOptimized())
- addUInt(Die, dwarf::DW_AT_APPLE_optimized, dwarf::DW_FORM_flag, 1);
-
- StringRef Flags = DIUnit.getFlags();
- if (!Flags.empty())
- addString(Die, dwarf::DW_AT_APPLE_flags, dwarf::DW_FORM_string, Flags);
-
- unsigned RVer = DIUnit.getRunTimeVersion();
- if (RVer)
- addUInt(Die, dwarf::DW_AT_APPLE_major_runtime_vers,
- dwarf::DW_FORM_data1, RVer);
-
- CompileUnit *Unit = new CompileUnit(ID, Die);
- if (!ModuleCU && DIUnit.isMain()) {
- // Use first compile unit marked as isMain as the compile unit
- // for this module.
- ModuleCU = Unit;
- }
-
- CompileUnitMap[DIUnit.getNode()] = Unit;
- CompileUnits.push_back(Unit);
- return Unit;
-}
-
-void DwarfDebug::constructGlobalVariableDIE(MDNode *N) {
- DIGlobalVariable DI_GV(N);
-
- // If debug information is malformed then ignore it.
- if (DI_GV.Verify() == false)
- return;
-
- // Check for pre-existence.
- if (ModuleCU->getDIE(DI_GV.getNode()))
- return;
-
- DIE *VariableDie = createGlobalVariableDIE(DI_GV);
- if (!VariableDie)
- return;
-
- // Add to map.
- ModuleCU->insertDIE(N, VariableDie);
-
- // Add to context owner.
- DIDescriptor GVContext = DI_GV.getContext();
- // Do not create specification DIE if context is either compile unit
- // or a subprogram.
- if (DI_GV.isDefinition() && !GVContext.isCompileUnit()
- && !GVContext.isSubprogram()) {
- // Create specification DIE.
- DIE *VariableSpecDIE = new DIE(dwarf::DW_TAG_variable);
- addDIEEntry(VariableSpecDIE, dwarf::DW_AT_specification,
- dwarf::DW_FORM_ref4, VariableDie);
- DIEBlock *Block = new DIEBlock();
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr);
- addObjectLabel(Block, 0, dwarf::DW_FORM_udata,
- Asm->GetGlobalValueSymbol(DI_GV.getGlobal()));
- addBlock(VariableSpecDIE, dwarf::DW_AT_location, 0, Block);
- addUInt(VariableDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1);
- ModuleCU->addDie(VariableSpecDIE);
- } else {
- DIEBlock *Block = new DIEBlock();
- addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr);
- addObjectLabel(Block, 0, dwarf::DW_FORM_udata,
- Asm->GetGlobalValueSymbol(DI_GV.getGlobal()));
- addBlock(VariableDie, dwarf::DW_AT_location, 0, Block);
- }
- addToContextOwner(VariableDie, GVContext);
-
- // Expose as global. FIXME - need to check external flag.
- ModuleCU->addGlobal(DI_GV.getName(), VariableDie);
-
- DIType GTy = DI_GV.getType();
- if (GTy.isCompositeType() && !GTy.getName().empty()) {
- DIEEntry *Entry = ModuleCU->getDIEEntry(GTy.getNode());
- assert (Entry && "Missing global type!");
- ModuleCU->addGlobalType(GTy.getName(), Entry->getEntry());
- }
- return;
-}
-
-void DwarfDebug::constructSubprogramDIE(MDNode *N) {
- DISubprogram SP(N);
-
- // Check for pre-existence.
- if (ModuleCU->getDIE(N))
- return;
-
- if (!SP.isDefinition())
- // This is a method declaration which will be handled while constructing
- // class type.
- return;
-
- DIE *SubprogramDie = createSubprogramDIE(SP);
-
- // Add to map.
- ModuleCU->insertDIE(N, SubprogramDie);
-
- // Add to context owner.
- addToContextOwner(SubprogramDie, SP.getContext());
-
- // Expose as global.
- ModuleCU->addGlobal(SP.getName(), SubprogramDie);
-
- return;
-}
-
-/// beginModule - Emit all Dwarf sections that should come prior to the
-/// content. Create global DIEs and emit initial debug info sections.
-/// This is inovked by the target AsmPrinter.
-void DwarfDebug::beginModule(Module *M, MachineModuleInfo *mmi) {
- this->M = M;
-
- if (TimePassesIsEnabled)
- DebugTimer->startTimer();
-
- if (!MAI->doesSupportDebugInformation())
- return;
-
- DebugInfoFinder DbgFinder;
- DbgFinder.processModule(*M);
-
- // Create all the compile unit DIEs.
- for (DebugInfoFinder::iterator I = DbgFinder.compile_unit_begin(),
- E = DbgFinder.compile_unit_end(); I != E; ++I)
- constructCompileUnit(*I);
-
- if (CompileUnits.empty()) {
- if (TimePassesIsEnabled)
- DebugTimer->stopTimer();
-
- return;
- }
-
- // If main compile unit for this module is not seen than randomly
- // select first compile unit.
- if (!ModuleCU)
- ModuleCU = CompileUnits[0];
-
- // Create DIEs for each subprogram.
- for (DebugInfoFinder::iterator I = DbgFinder.subprogram_begin(),
- E = DbgFinder.subprogram_end(); I != E; ++I)
- constructSubprogramDIE(*I);
-
- // Create DIEs for each global variable.
- for (DebugInfoFinder::iterator I = DbgFinder.global_variable_begin(),
- E = DbgFinder.global_variable_end(); I != E; ++I)
- constructGlobalVariableDIE(*I);
-
- MMI = mmi;
- shouldEmit = true;
- MMI->setDebugInfoAvailability(true);
-
- // Prime section data.
- SectionMap.insert(Asm->getObjFileLowering().getTextSection());
-
- // Print out .file directives to specify files for .loc directives. These are
- // printed out early so that they precede any .loc directives.
- if (MAI->hasDotLocAndDotFile()) {
- for (unsigned i = 1, e = getNumSourceIds()+1; i != e; ++i) {
- // Remember source id starts at 1.
- std::pair<unsigned, unsigned> Id = getSourceDirectoryAndFileIds(i);
- // FIXME: don't use sys::path for this! This should not depend on the
- // host.
- sys::Path FullPath(getSourceDirectoryName(Id.first));
- bool AppendOk =
- FullPath.appendComponent(getSourceFileName(Id.second));
- assert(AppendOk && "Could not append filename to directory!");
- AppendOk = false;
- Asm->OutStreamer.EmitDwarfFileDirective(i, FullPath.str());
- }
- }
-
- // Emit initial sections
- emitInitial();
-
- if (TimePassesIsEnabled)
- DebugTimer->stopTimer();
-}
-
-/// endModule - Emit all Dwarf sections that should come after the content.
-///
-void DwarfDebug::endModule() {
- if (!ModuleCU)
- return;
-
- if (TimePassesIsEnabled)
- DebugTimer->startTimer();
-
- // Attach DW_AT_inline attribute with inlined subprogram DIEs.
- for (SmallPtrSet<DIE *, 4>::iterator AI = InlinedSubprogramDIEs.begin(),
- AE = InlinedSubprogramDIEs.end(); AI != AE; ++AI) {
- DIE *ISP = *AI;
- addUInt(ISP, dwarf::DW_AT_inline, 0, dwarf::DW_INL_inlined);
- }
-
- // Insert top level DIEs.
- for (SmallVector<DIE *, 4>::iterator TI = TopLevelDIEsVector.begin(),
- TE = TopLevelDIEsVector.end(); TI != TE; ++TI)
- ModuleCU->getCUDie()->addChild(*TI);
-
- for (DenseMap<DIE *, MDNode *>::iterator CI = ContainingTypeMap.begin(),
- CE = ContainingTypeMap.end(); CI != CE; ++CI) {
- DIE *SPDie = CI->first;
- MDNode *N = dyn_cast_or_null<MDNode>(CI->second);
- if (!N) continue;
- DIE *NDie = ModuleCU->getDIE(N);
- if (!NDie) continue;
- addDIEEntry(SPDie, dwarf::DW_AT_containing_type, dwarf::DW_FORM_ref4, NDie);
- // FIXME - This is not the correct approach.
- // addDIEEntry(NDie, dwarf::DW_AT_containing_type, dwarf::DW_FORM_ref4, NDie);
- }
-
- // Standard sections final addresses.
- Asm->OutStreamer.SwitchSection(Asm->getObjFileLowering().getTextSection());
- EmitLabel("text_end", 0);
- Asm->OutStreamer.SwitchSection(Asm->getObjFileLowering().getDataSection());
- EmitLabel("data_end", 0);
-
- // End text sections.
- for (unsigned i = 1, N = SectionMap.size(); i <= N; ++i) {
- Asm->OutStreamer.SwitchSection(SectionMap[i]);
- EmitLabel("section_end", i);
- }
-
- // Emit common frame information.
- emitCommonDebugFrame();
-
- // Emit function debug frame information
- for (std::vector<FunctionDebugFrameInfo>::iterator I = DebugFrames.begin(),
- E = DebugFrames.end(); I != E; ++I)
- emitFunctionDebugFrame(*I);
-
- // Compute DIE offsets and sizes.
- computeSizeAndOffsets();
-
- // Emit all the DIEs into a debug info section
- emitDebugInfo();
-
- // Corresponding abbreviations into a abbrev section.
- emitAbbreviations();
-
- // Emit source line correspondence into a debug line section.
- emitDebugLines();
-
- // Emit info into a debug pubnames section.
- emitDebugPubNames();
-
- // Emit info into a debug pubtypes section.
- emitDebugPubTypes();
-
- // Emit info into a debug str section.
- emitDebugStr();
-
- // Emit info into a debug loc section.
- emitDebugLoc();
-
- // Emit info into a debug aranges section.
- EmitDebugARanges();
-
- // Emit info into a debug ranges section.
- emitDebugRanges();
-
- // Emit info into a debug macinfo section.
- emitDebugMacInfo();
-
- // Emit inline info.
- emitDebugInlineInfo();
-
- if (TimePassesIsEnabled)
- DebugTimer->stopTimer();
-}
-
-/// findAbstractVariable - Find abstract variable, if any, associated with Var.
-DbgVariable *DwarfDebug::findAbstractVariable(DIVariable &Var,
- unsigned FrameIdx,
- DILocation &ScopeLoc) {
-
- DbgVariable *AbsDbgVariable = AbstractVariables.lookup(Var.getNode());
- if (AbsDbgVariable)
- return AbsDbgVariable;
-
- DbgScope *Scope = AbstractScopes.lookup(ScopeLoc.getScope().getNode());
- if (!Scope)
- return NULL;
-
- AbsDbgVariable = new DbgVariable(Var, FrameIdx);
- Scope->addVariable(AbsDbgVariable);
- AbstractVariables[Var.getNode()] = AbsDbgVariable;
- return AbsDbgVariable;
-}
-
-/// collectVariableInfo - Populate DbgScope entries with variables' info.
-void DwarfDebug::collectVariableInfo() {
- if (!MMI) return;
-
- MachineModuleInfo::VariableDbgInfoMapTy &VMap = MMI->getVariableDbgInfo();
- for (MachineModuleInfo::VariableDbgInfoMapTy::iterator VI = VMap.begin(),
- VE = VMap.end(); VI != VE; ++VI) {
- MDNode *Var = VI->first;
- if (!Var) continue;
- DIVariable DV (Var);
- std::pair< unsigned, MDNode *> VP = VI->second;
- DILocation ScopeLoc(VP.second);
-
- DbgScope *Scope =
- ConcreteScopes.lookup(ScopeLoc.getOrigLocation().getNode());
- if (!Scope)
- Scope = DbgScopeMap.lookup(ScopeLoc.getScope().getNode());
- // If variable scope is not found then skip this variable.
- if (!Scope)
- continue;
-
- DbgVariable *RegVar = new DbgVariable(DV, VP.first);
- Scope->addVariable(RegVar);
- if (DbgVariable *AbsDbgVariable = findAbstractVariable(DV, VP.first,
- ScopeLoc))
- RegVar->setAbstractVariable(AbsDbgVariable);
- }
-}
-
-/// beginScope - Process beginning of a scope starting at Label.
-void DwarfDebug::beginScope(const MachineInstr *MI, unsigned Label) {
- InsnToDbgScopeMapTy::iterator I = DbgScopeBeginMap.find(MI);
- if (I == DbgScopeBeginMap.end())
- return;
- ScopeVector &SD = I->second;
- for (ScopeVector::iterator SDI = SD.begin(), SDE = SD.end();
- SDI != SDE; ++SDI)
- (*SDI)->setStartLabelID(Label);
-}
-
-/// endScope - Process end of a scope.
-void DwarfDebug::endScope(const MachineInstr *MI) {
- InsnToDbgScopeMapTy::iterator I = DbgScopeEndMap.find(MI);
- if (I == DbgScopeEndMap.end())
- return;
-
- unsigned Label = MMI->NextLabelID();
- Asm->printLabel(Label);
- O << '\n';
-
- SmallVector<DbgScope *, 2> &SD = I->second;
- for (SmallVector<DbgScope *, 2>::iterator SDI = SD.begin(), SDE = SD.end();
- SDI != SDE; ++SDI)
- (*SDI)->setEndLabelID(Label);
- return;
-}
-
-/// createDbgScope - Create DbgScope for the scope.
-void DwarfDebug::createDbgScope(MDNode *Scope, MDNode *InlinedAt) {
-
- if (!InlinedAt) {
- DbgScope *WScope = DbgScopeMap.lookup(Scope);
- if (WScope)
- return;
- WScope = new DbgScope(NULL, DIDescriptor(Scope), NULL);
- DbgScopeMap.insert(std::make_pair(Scope, WScope));
- if (DIDescriptor(Scope).isLexicalBlock())
- createDbgScope(DILexicalBlock(Scope).getContext().getNode(), NULL);
- return;
- }
-
- DbgScope *WScope = DbgScopeMap.lookup(InlinedAt);
- if (WScope)
- return;
-
- WScope = new DbgScope(NULL, DIDescriptor(Scope), InlinedAt);
- DbgScopeMap.insert(std::make_pair(InlinedAt, WScope));
- DILocation DL(InlinedAt);
- createDbgScope(DL.getScope().getNode(), DL.getOrigLocation().getNode());
-}
-
-/// extractScopeInformation - Scan machine instructions in this function
-/// and collect DbgScopes. Return true, if atleast one scope was found.
-bool DwarfDebug::extractScopeInformation() {
- // If scope information was extracted using .dbg intrinsics then there is not
- // any need to extract these information by scanning each instruction.
- if (!DbgScopeMap.empty())
- return false;
-
- DenseMap<const MachineInstr *, unsigned> MIIndexMap;
- unsigned MIIndex = 0;
- // Scan each instruction and create scopes. First build working set of scopes.
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
- I != E; ++I) {
- for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
- II != IE; ++II) {
- const MachineInstr *MInsn = II;
- MIIndexMap[MInsn] = MIIndex++;
- DebugLoc DL = MInsn->getDebugLoc();
- if (DL.isUnknown()) continue;
- DILocation DLT = MF->getDILocation(DL);
- DIScope DLTScope = DLT.getScope();
- if (DLTScope.isNull()) continue;
- // There is no need to create another DIE for compile unit. For all
- // other scopes, create one DbgScope now. This will be translated
- // into a scope DIE at the end.
- if (DLTScope.isCompileUnit()) continue;
- createDbgScope(DLTScope.getNode(), DLT.getOrigLocation().getNode());
- }
- }
-
-
- // Build scope hierarchy using working set of scopes.
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
- I != E; ++I) {
- for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
- II != IE; ++II) {
- const MachineInstr *MInsn = II;
- DebugLoc DL = MInsn->getDebugLoc();
- if (DL.isUnknown()) continue;
- DILocation DLT = MF->getDILocation(DL);
- DIScope DLTScope = DLT.getScope();
- if (DLTScope.isNull()) continue;
- // There is no need to create another DIE for compile unit. For all
- // other scopes, create one DbgScope now. This will be translated
- // into a scope DIE at the end.
- if (DLTScope.isCompileUnit()) continue;
- DbgScope *Scope = getUpdatedDbgScope(DLTScope.getNode(), MInsn,
- DLT.getOrigLocation().getNode());
- Scope->setLastInsn(MInsn);
- }
- }
-
- if (!CurrentFnDbgScope)
- return false;
-
- CurrentFnDbgScope->fixInstructionMarkers(MIIndexMap);
-
- // Each scope has first instruction and last instruction to mark beginning
- // and end of a scope respectively. Create an inverse map that list scopes
- // starts (and ends) with an instruction. One instruction may start (or end)
- // multiple scopes. Ignore scopes that are not reachable.
- SmallVector<DbgScope *, 4> WorkList;
- WorkList.push_back(CurrentFnDbgScope);
- while (!WorkList.empty()) {
- DbgScope *S = WorkList.back(); WorkList.pop_back();
-
- SmallVector<DbgScope *, 4> &Children = S->getScopes();
- if (!Children.empty())
- for (SmallVector<DbgScope *, 4>::iterator SI = Children.begin(),
- SE = Children.end(); SI != SE; ++SI)
- WorkList.push_back(*SI);
-
- if (S->isAbstractScope())
- continue;
- const MachineInstr *MI = S->getFirstInsn();
- assert (MI && "DbgScope does not have first instruction!");
-
- InsnToDbgScopeMapTy::iterator IDI = DbgScopeBeginMap.find(MI);
- if (IDI != DbgScopeBeginMap.end())
- IDI->second.push_back(S);
- else
- DbgScopeBeginMap[MI].push_back(S);
-
- MI = S->getLastInsn();
- assert (MI && "DbgScope does not have last instruction!");
- IDI = DbgScopeEndMap.find(MI);
- if (IDI != DbgScopeEndMap.end())
- IDI->second.push_back(S);
- else
- DbgScopeEndMap[MI].push_back(S);
- }
-
- return !DbgScopeMap.empty();
-}
-
-/// beginFunction - Gather pre-function debug information. Assumes being
-/// emitted immediately after the function entry point.
-void DwarfDebug::beginFunction(const MachineFunction *MF) {
- this->MF = MF;
-
- if (!ShouldEmitDwarfDebug()) return;
-
- if (TimePassesIsEnabled)
- DebugTimer->startTimer();
-
- if (!extractScopeInformation())
- return;
-
- collectVariableInfo();
-
- // Assumes in correct section after the entry point.
- EmitLabel("func_begin", ++SubprogramCount);
-
- // Emit label for the implicitly defined dbg.stoppoint at the start of the
- // function.
- DebugLoc FDL = MF->getDefaultDebugLoc();
- if (!FDL.isUnknown()) {
- DILocation DLT = MF->getDILocation(FDL);
- unsigned LabelID = 0;
- DISubprogram SP = getDISubprogram(DLT.getScope().getNode());
- if (!SP.isNull())
- LabelID = recordSourceLine(SP.getLineNumber(), 0,
- DLT.getScope().getNode());
- else
- LabelID = recordSourceLine(DLT.getLineNumber(),
- DLT.getColumnNumber(),
- DLT.getScope().getNode());
- Asm->printLabel(LabelID);
- O << '\n';
- }
- if (TimePassesIsEnabled)
- DebugTimer->stopTimer();
-}
-
-/// endFunction - Gather and emit post-function debug information.
-///
-void DwarfDebug::endFunction(const MachineFunction *MF) {
- if (!ShouldEmitDwarfDebug()) return;
-
- if (TimePassesIsEnabled)
- DebugTimer->startTimer();
-
- if (DbgScopeMap.empty())
- return;
-
- if (CurrentFnDbgScope) {
- // Define end label for subprogram.
- EmitLabel("func_end", SubprogramCount);
-
- // Get function line info.
- if (!Lines.empty()) {
- // Get section line info.
- unsigned ID = SectionMap.insert(Asm->getCurrentSection());
- if (SectionSourceLines.size() < ID) SectionSourceLines.resize(ID);
- std::vector<SrcLineInfo> &SectionLineInfos = SectionSourceLines[ID-1];
- // Append the function info to section info.
- SectionLineInfos.insert(SectionLineInfos.end(),
- Lines.begin(), Lines.end());
- }
-
- // Construct abstract scopes.
- for (SmallVector<DbgScope *, 4>::iterator AI = AbstractScopesList.begin(),
- AE = AbstractScopesList.end(); AI != AE; ++AI)
- constructScopeDIE(*AI);
-
- constructScopeDIE(CurrentFnDbgScope);
-
- DebugFrames.push_back(FunctionDebugFrameInfo(SubprogramCount,
- MMI->getFrameMoves()));
- }
-
- // Clear debug info
- CurrentFnDbgScope = NULL;
- DbgScopeMap.clear();
- DbgScopeBeginMap.clear();
- DbgScopeEndMap.clear();
- ConcreteScopes.clear();
- AbstractScopesList.clear();
- Lines.clear();
-
- if (TimePassesIsEnabled)
- DebugTimer->stopTimer();
-}
-
-/// recordSourceLine - Records location information and associates it with a
-/// label. Returns a unique label ID used to generate a label and provide
-/// correspondence to the source line list.
-unsigned DwarfDebug::recordSourceLine(unsigned Line, unsigned Col,
- MDNode *S) {
- if (!MMI)
- return 0;
-
- if (TimePassesIsEnabled)
- DebugTimer->startTimer();
-
- StringRef Dir;
- StringRef Fn;
-
- DIDescriptor Scope(S);
- if (Scope.isCompileUnit()) {
- DICompileUnit CU(S);
- Dir = CU.getDirectory();
- Fn = CU.getFilename();
- } else if (Scope.isSubprogram()) {
- DISubprogram SP(S);
- Dir = SP.getDirectory();
- Fn = SP.getFilename();
- } else if (Scope.isLexicalBlock()) {
- DILexicalBlock DB(S);
- Dir = DB.getDirectory();
- Fn = DB.getFilename();
- } else
- assert (0 && "Unexpected scope info");
-
- unsigned Src = GetOrCreateSourceID(Dir, Fn);
- unsigned ID = MMI->NextLabelID();
- Lines.push_back(SrcLineInfo(Line, Col, Src, ID));
-
- if (TimePassesIsEnabled)
- DebugTimer->stopTimer();
-
- return ID;
-}
-
-/// getOrCreateSourceID - Public version of GetOrCreateSourceID. This can be
-/// timed. Look up the source id with the given directory and source file
-/// names. If none currently exists, create a new id and insert it in the
-/// SourceIds map. This can update DirectoryNames and SourceFileNames maps as
-/// well.
-unsigned DwarfDebug::getOrCreateSourceID(const std::string &DirName,
- const std::string &FileName) {
- if (TimePassesIsEnabled)
- DebugTimer->startTimer();
-
- unsigned SrcId = GetOrCreateSourceID(DirName.c_str(), FileName.c_str());
-
- if (TimePassesIsEnabled)
- DebugTimer->stopTimer();
-
- return SrcId;
-}
-
-//===----------------------------------------------------------------------===//
-// Emit Methods
-//===----------------------------------------------------------------------===//
-
-/// computeSizeAndOffset - Compute the size and offset of a DIE.
-///
-unsigned
-DwarfDebug::computeSizeAndOffset(DIE *Die, unsigned Offset, bool Last) {
- // Get the children.
- const std::vector<DIE *> &Children = Die->getChildren();
-
- // If not last sibling and has children then add sibling offset attribute.
- if (!Last && !Children.empty()) Die->addSiblingOffset();
-
- // Record the abbreviation.
- assignAbbrevNumber(Die->getAbbrev());
-
- // Get the abbreviation for this DIE.
- unsigned AbbrevNumber = Die->getAbbrevNumber();
- const DIEAbbrev *Abbrev = Abbreviations[AbbrevNumber - 1];
-
- // Set DIE offset
- Die->setOffset(Offset);
-
- // Start the size with the size of abbreviation code.
- Offset += MCAsmInfo::getULEB128Size(AbbrevNumber);
-
- const SmallVector<DIEValue*, 32> &Values = Die->getValues();
- const SmallVector<DIEAbbrevData, 8> &AbbrevData = Abbrev->getData();
-
- // Size the DIE attribute values.
- for (unsigned i = 0, N = Values.size(); i < N; ++i)
- // Size attribute value.
- Offset += Values[i]->SizeOf(TD, AbbrevData[i].getForm());
-
- // Size the DIE children if any.
- if (!Children.empty()) {
- assert(Abbrev->getChildrenFlag() == dwarf::DW_CHILDREN_yes &&
- "Children flag not set");
-
- for (unsigned j = 0, M = Children.size(); j < M; ++j)
- Offset = computeSizeAndOffset(Children[j], Offset, (j + 1) == M);
-
- // End of children marker.
- Offset += sizeof(int8_t);
- }
-
- Die->setSize(Offset - Die->getOffset());
- return Offset;
-}
-
-/// computeSizeAndOffsets - Compute the size and offset of all the DIEs.
-///
-void DwarfDebug::computeSizeAndOffsets() {
- // Compute size of compile unit header.
- static unsigned Offset =
- sizeof(int32_t) + // Length of Compilation Unit Info
- sizeof(int16_t) + // DWARF version number
- sizeof(int32_t) + // Offset Into Abbrev. Section
- sizeof(int8_t); // Pointer Size (in bytes)
-
- computeSizeAndOffset(ModuleCU->getCUDie(), Offset, true);
- CompileUnitOffsets[ModuleCU] = 0;
-}
-
-/// emitInitial - Emit initial Dwarf declarations. This is necessary for cc
-/// tools to recognize the object file contains Dwarf information.
-void DwarfDebug::emitInitial() {
- // Check to see if we already emitted intial headers.
- if (didInitial) return;
- didInitial = true;
-
- const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
-
- // Dwarf sections base addresses.
- if (MAI->doesDwarfRequireFrameSection()) {
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfFrameSection());
- EmitLabel("section_debug_frame", 0);
- }
-
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfInfoSection());
- EmitLabel("section_info", 0);
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfAbbrevSection());
- EmitLabel("section_abbrev", 0);
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfARangesSection());
- EmitLabel("section_aranges", 0);
-
- if (const MCSection *LineInfoDirective = TLOF.getDwarfMacroInfoSection()) {
- Asm->OutStreamer.SwitchSection(LineInfoDirective);
- EmitLabel("section_macinfo", 0);
- }
-
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfLineSection());
- EmitLabel("section_line", 0);
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfLocSection());
- EmitLabel("section_loc", 0);
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfPubNamesSection());
- EmitLabel("section_pubnames", 0);
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfPubTypesSection());
- EmitLabel("section_pubtypes", 0);
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfStrSection());
- EmitLabel("section_str", 0);
- Asm->OutStreamer.SwitchSection(TLOF.getDwarfRangesSection());
- EmitLabel("section_ranges", 0);
-
- Asm->OutStreamer.SwitchSection(TLOF.getTextSection());
- EmitLabel("text_begin", 0);
- Asm->OutStreamer.SwitchSection(TLOF.getDataSection());
- EmitLabel("data_begin", 0);
-}
-
-/// emitDIE - Recusively Emits a debug information entry.
-///
-void DwarfDebug::emitDIE(DIE *Die) {
- // Get the abbreviation for this DIE.
- unsigned AbbrevNumber = Die->getAbbrevNumber();
- const DIEAbbrev *Abbrev = Abbreviations[AbbrevNumber - 1];
-
- Asm->O << '\n';
-
- // Emit the code (index) for the abbreviation.
- if (Asm->VerboseAsm)
- Asm->OutStreamer.AddComment("Abbrev [" + Twine(AbbrevNumber) + "] 0x" +
- Twine::utohexstr(Die->getOffset()) + ":0x" +
- Twine::utohexstr(Die->getSize()) + " " +
- dwarf::TagString(Abbrev->getTag()));
- EmitULEB128(AbbrevNumber);
-
- SmallVector<DIEValue*, 32> &Values = Die->getValues();
- const SmallVector<DIEAbbrevData, 8> &AbbrevData = Abbrev->getData();
-
- // Emit the DIE attribute values.
- for (unsigned i = 0, N = Values.size(); i < N; ++i) {
- unsigned Attr = AbbrevData[i].getAttribute();
- unsigned Form = AbbrevData[i].getForm();
- assert(Form && "Too many attributes for DIE (check abbreviation)");
-
- if (Asm->VerboseAsm)
- Asm->OutStreamer.AddComment(dwarf::AttributeString(Attr));
-
- switch (Attr) {
- case dwarf::DW_AT_sibling:
- Asm->EmitInt32(Die->getSiblingOffset());
- break;
- case dwarf::DW_AT_abstract_origin: {
- DIEEntry *E = cast<DIEEntry>(Values[i]);
- DIE *Origin = E->getEntry();
- unsigned Addr = Origin->getOffset();
- Asm->EmitInt32(Addr);
- break;
- }
- default:
- // Emit an attribute using the defined form.
- Values[i]->EmitValue(this, Form);
- O << "\n"; // REMOVE This once all EmitValue impls emit their own newline.
- break;
- }
- }
-
- // Emit the DIE children if any.
- if (Abbrev->getChildrenFlag() == dwarf::DW_CHILDREN_yes) {
- const std::vector<DIE *> &Children = Die->getChildren();
-
- for (unsigned j = 0, M = Children.size(); j < M; ++j)
- emitDIE(Children[j]);
-
- Asm->EmitInt8(0); EOL("End Of Children Mark");
- }
-}
-
-/// emitDebugInfo - Emit the debug info section.
-///
-void DwarfDebug::emitDebugInfo() {
- // Start debug info section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfInfoSection());
- DIE *Die = ModuleCU->getCUDie();
-
- // Emit the compile units header.
- EmitLabel("info_begin", ModuleCU->getID());
-
- // Emit size of content not including length itself
- unsigned ContentSize = Die->getSize() +
- sizeof(int16_t) + // DWARF version number
- sizeof(int32_t) + // Offset Into Abbrev. Section
- sizeof(int8_t) + // Pointer Size (in bytes)
- sizeof(int32_t); // FIXME - extra pad for gdb bug.
-
- Asm->EmitInt32(ContentSize); EOL("Length of Compilation Unit Info");
- Asm->EmitInt16(dwarf::DWARF_VERSION); EOL("DWARF version number");
- EmitSectionOffset("abbrev_begin", "section_abbrev", 0, 0, true, false);
- EOL("Offset Into Abbrev. Section");
- Asm->EmitInt8(TD->getPointerSize()); EOL("Address Size (in bytes)");
-
- emitDIE(Die);
- // FIXME - extra padding for gdb bug.
- Asm->EmitInt8(0); EOL("Extra Pad For GDB");
- Asm->EmitInt8(0); EOL("Extra Pad For GDB");
- Asm->EmitInt8(0); EOL("Extra Pad For GDB");
- Asm->EmitInt8(0); EOL("Extra Pad For GDB");
- EmitLabel("info_end", ModuleCU->getID());
- Asm->O << '\n';
-}
-
-/// emitAbbreviations - Emit the abbreviation section.
-///
-void DwarfDebug::emitAbbreviations() const {
- // Check to see if it is worth the effort.
- if (!Abbreviations.empty()) {
- // Start the debug abbrev section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfAbbrevSection());
-
- EmitLabel("abbrev_begin", 0);
-
- // For each abbrevation.
- for (unsigned i = 0, N = Abbreviations.size(); i < N; ++i) {
- // Get abbreviation data
- const DIEAbbrev *Abbrev = Abbreviations[i];
-
- // Emit the abbrevations code (base 1 index.)
- EmitULEB128(Abbrev->getNumber(), "Abbreviation Code");
-
- // Emit the abbreviations data.
- Abbrev->Emit(this);
- Asm->O << '\n';
- }
-
- // Mark end of abbreviations.
- EmitULEB128(0, "EOM(3)");
-
- EmitLabel("abbrev_end", 0);
- Asm->O << '\n';
- }
-}
-
-/// emitEndOfLineMatrix - Emit the last address of the section and the end of
-/// the line matrix.
-///
-void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) {
- // Define last address of section.
- Asm->EmitInt8(0); EOL("Extended Op");
- Asm->EmitInt8(TD->getPointerSize() + 1); EOL("Op size");
- Asm->EmitInt8(dwarf::DW_LNE_set_address); EOL("DW_LNE_set_address");
- EmitReference("section_end", SectionEnd); EOL("Section end label");
-
- // Mark end of matrix.
- Asm->EmitInt8(0); EOL("DW_LNE_end_sequence");
- Asm->EmitInt8(1);
- Asm->EmitInt8(1);
-}
-
-/// emitDebugLines - Emit source line information.
-///
-void DwarfDebug::emitDebugLines() {
- // If the target is using .loc/.file, the assembler will be emitting the
- // .debug_line table automatically.
- if (MAI->hasDotLocAndDotFile())
- return;
-
- // Minimum line delta, thus ranging from -10..(255-10).
- const int MinLineDelta = -(dwarf::DW_LNS_fixed_advance_pc + 1);
- // Maximum line delta, thus ranging from -10..(255-10).
- const int MaxLineDelta = 255 + MinLineDelta;
-
- // Start the dwarf line section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfLineSection());
-
- // Construct the section header.
- EmitDifference("line_end", 0, "line_begin", 0, true);
- EOL("Length of Source Line Info");
- EmitLabel("line_begin", 0);
-
- Asm->EmitInt16(dwarf::DWARF_VERSION); EOL("DWARF version number");
-
- EmitDifference("line_prolog_end", 0, "line_prolog_begin", 0, true);
- EOL("Prolog Length");
- EmitLabel("line_prolog_begin", 0);
-
- Asm->EmitInt8(1); EOL("Minimum Instruction Length");
- Asm->EmitInt8(1); EOL("Default is_stmt_start flag");
- Asm->EmitInt8(MinLineDelta); EOL("Line Base Value (Special Opcodes)");
- Asm->EmitInt8(MaxLineDelta); EOL("Line Range Value (Special Opcodes)");
- Asm->EmitInt8(-MinLineDelta); EOL("Special Opcode Base");
-
- // Line number standard opcode encodings argument count
- Asm->EmitInt8(0); EOL("DW_LNS_copy arg count");
- Asm->EmitInt8(1); EOL("DW_LNS_advance_pc arg count");
- Asm->EmitInt8(1); EOL("DW_LNS_advance_line arg count");
- Asm->EmitInt8(1); EOL("DW_LNS_set_file arg count");
- Asm->EmitInt8(1); EOL("DW_LNS_set_column arg count");
- Asm->EmitInt8(0); EOL("DW_LNS_negate_stmt arg count");
- Asm->EmitInt8(0); EOL("DW_LNS_set_basic_block arg count");
- Asm->EmitInt8(0); EOL("DW_LNS_const_add_pc arg count");
- Asm->EmitInt8(1); EOL("DW_LNS_fixed_advance_pc arg count");
-
- // Emit directories.
- for (unsigned DI = 1, DE = getNumSourceDirectories()+1; DI != DE; ++DI) {
- const std::string &Dir = getSourceDirectoryName(DI);
- if (Asm->VerboseAsm) Asm->OutStreamer.AddComment("Directory");
- Asm->OutStreamer.EmitBytes(StringRef(Dir.c_str(), Dir.size()+1), 0);
- }
-
- Asm->EmitInt8(0); EOL("End of directories");
-
- // Emit files.
- for (unsigned SI = 1, SE = getNumSourceIds()+1; SI != SE; ++SI) {
- // Remember source id starts at 1.
- std::pair<unsigned, unsigned> Id = getSourceDirectoryAndFileIds(SI);
- const std::string &FN = getSourceFileName(Id.second);
- if (Asm->VerboseAsm) Asm->OutStreamer.AddComment("Source");
- Asm->OutStreamer.EmitBytes(StringRef(FN.c_str(), FN.size()+1), 0);
-
- EmitULEB128(Id.first, "Directory #");
- EmitULEB128(0, "Mod date");
- EmitULEB128(0, "File size");
- }
-
- Asm->EmitInt8(0); EOL("End of files");
-
- EmitLabel("line_prolog_end", 0);
-
- // A sequence for each text section.
- unsigned SecSrcLinesSize = SectionSourceLines.size();
-
- for (unsigned j = 0; j < SecSrcLinesSize; ++j) {
- // Isolate current sections line info.
- const std::vector<SrcLineInfo> &LineInfos = SectionSourceLines[j];
-
- /*if (Asm->isVerbose()) {
- const MCSection *S = SectionMap[j + 1];
- O << '\t' << MAI->getCommentString() << " Section"
- << S->getName() << '\n';
- }*/
- Asm->O << '\n';
-
- // Dwarf assumes we start with first line of first source file.
- unsigned Source = 1;
- unsigned Line = 1;
-
- // Construct rows of the address, source, line, column matrix.
- for (unsigned i = 0, N = LineInfos.size(); i < N; ++i) {
- const SrcLineInfo &LineInfo = LineInfos[i];
- unsigned LabelID = MMI->MappedLabel(LineInfo.getLabelID());
- if (!LabelID) continue;
-
- if (LineInfo.getLine() == 0) continue;
-
- if (!Asm->isVerbose())
- Asm->O << '\n';
- else {
- std::pair<unsigned, unsigned> SourceID =
- getSourceDirectoryAndFileIds(LineInfo.getSourceID());
- O << '\t' << MAI->getCommentString() << ' '
- << getSourceDirectoryName(SourceID.first) << '/'
- << getSourceFileName(SourceID.second)
- << ':' << utostr_32(LineInfo.getLine()) << '\n';
- }
-
- // Define the line address.
- Asm->EmitInt8(0); EOL("Extended Op");
- Asm->EmitInt8(TD->getPointerSize() + 1); EOL("Op size");
- Asm->EmitInt8(dwarf::DW_LNE_set_address); EOL("DW_LNE_set_address");
- EmitReference("label", LabelID); EOL("Location label");
-
- // If change of source, then switch to the new source.
- if (Source != LineInfo.getSourceID()) {
- Source = LineInfo.getSourceID();
- Asm->EmitInt8(dwarf::DW_LNS_set_file); EOL("DW_LNS_set_file");
- EmitULEB128(Source, "New Source");
- }
-
- // If change of line.
- if (Line != LineInfo.getLine()) {
- // Determine offset.
- int Offset = LineInfo.getLine() - Line;
- int Delta = Offset - MinLineDelta;
-
- // Update line.
- Line = LineInfo.getLine();
-
- // If delta is small enough and in range...
- if (Delta >= 0 && Delta < (MaxLineDelta - 1)) {
- // ... then use fast opcode.
- Asm->EmitInt8(Delta - MinLineDelta); EOL("Line Delta");
- } else {
- // ... otherwise use long hand.
- Asm->EmitInt8(dwarf::DW_LNS_advance_line);
- EOL("DW_LNS_advance_line");
- EmitSLEB128(Offset, "Line Offset");
- Asm->EmitInt8(dwarf::DW_LNS_copy); EOL("DW_LNS_copy");
- }
- } else {
- // Copy the previous row (different address or source)
- Asm->EmitInt8(dwarf::DW_LNS_copy); EOL("DW_LNS_copy");
- }
- }
-
- emitEndOfLineMatrix(j + 1);
- }
-
- if (SecSrcLinesSize == 0)
- // Because we're emitting a debug_line section, we still need a line
- // table. The linker and friends expect it to exist. If there's nothing to
- // put into it, emit an empty table.
- emitEndOfLineMatrix(1);
-
- EmitLabel("line_end", 0);
- Asm->O << '\n';
-}
-
-/// emitCommonDebugFrame - Emit common frame info into a debug frame section.
-///
-void DwarfDebug::emitCommonDebugFrame() {
- if (!MAI->doesDwarfRequireFrameSection())
- return;
-
- int stackGrowth =
- Asm->TM.getFrameInfo()->getStackGrowthDirection() ==
- TargetFrameInfo::StackGrowsUp ?
- TD->getPointerSize() : -TD->getPointerSize();
-
- // Start the dwarf frame section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfFrameSection());
-
- EmitLabel("debug_frame_common", 0);
- EmitDifference("debug_frame_common_end", 0,
- "debug_frame_common_begin", 0, true);
- EOL("Length of Common Information Entry");
-
- EmitLabel("debug_frame_common_begin", 0);
- Asm->EmitInt32((int)dwarf::DW_CIE_ID);
- EOL("CIE Identifier Tag");
- Asm->EmitInt8(dwarf::DW_CIE_VERSION);
- EOL("CIE Version");
- Asm->OutStreamer.EmitIntValue(0, 1, /*addrspace*/0); // nul terminator.
- EOL("CIE Augmentation");
- EmitULEB128(1, "CIE Code Alignment Factor");
- EmitSLEB128(stackGrowth, "CIE Data Alignment Factor");
- Asm->EmitInt8(RI->getDwarfRegNum(RI->getRARegister(), false));
- EOL("CIE RA Column");
-
- std::vector<MachineMove> Moves;
- RI->getInitialFrameState(Moves);
-
- EmitFrameMoves(NULL, 0, Moves, false);
-
- Asm->EmitAlignment(2, 0, 0, false);
- EmitLabel("debug_frame_common_end", 0);
- Asm->O << '\n';
-}
-
-/// emitFunctionDebugFrame - Emit per function frame info into a debug frame
-/// section.
-void
-DwarfDebug::emitFunctionDebugFrame(const FunctionDebugFrameInfo&DebugFrameInfo){
- if (!MAI->doesDwarfRequireFrameSection())
- return;
-
- // Start the dwarf frame section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfFrameSection());
-
- EmitDifference("debug_frame_end", DebugFrameInfo.Number,
- "debug_frame_begin", DebugFrameInfo.Number, true);
- EOL("Length of Frame Information Entry");
-
- EmitLabel("debug_frame_begin", DebugFrameInfo.Number);
-
- EmitSectionOffset("debug_frame_common", "section_debug_frame",
- 0, 0, true, false);
- EOL("FDE CIE offset");
-
- EmitReference("func_begin", DebugFrameInfo.Number);
- EOL("FDE initial location");
- EmitDifference("func_end", DebugFrameInfo.Number,
- "func_begin", DebugFrameInfo.Number);
- EOL("FDE address range");
-
- EmitFrameMoves("func_begin", DebugFrameInfo.Number, DebugFrameInfo.Moves,
- false);
-
- Asm->EmitAlignment(2, 0, 0, false);
- EmitLabel("debug_frame_end", DebugFrameInfo.Number);
- Asm->O << '\n';
-}
-
-/// emitDebugPubNames - Emit visible names into a debug pubnames section.
-///
-void DwarfDebug::emitDebugPubNames() {
- // Start the dwarf pubnames section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfPubNamesSection());
-
- EmitDifference("pubnames_end", ModuleCU->getID(),
- "pubnames_begin", ModuleCU->getID(), true);
- EOL("Length of Public Names Info");
-
- EmitLabel("pubnames_begin", ModuleCU->getID());
-
- Asm->EmitInt16(dwarf::DWARF_VERSION); EOL("DWARF Version");
-
- EmitSectionOffset("info_begin", "section_info",
- ModuleCU->getID(), 0, true, false);
- EOL("Offset of Compilation Unit Info");
-
- EmitDifference("info_end", ModuleCU->getID(), "info_begin", ModuleCU->getID(),
- true);
- EOL("Compilation Unit Length");
-
- const StringMap<DIE*> &Globals = ModuleCU->getGlobals();
- for (StringMap<DIE*>::const_iterator
- GI = Globals.begin(), GE = Globals.end(); GI != GE; ++GI) {
- const char *Name = GI->getKeyData();
- DIE * Entity = GI->second;
-
- Asm->EmitInt32(Entity->getOffset()); EOL("DIE offset");
-
- if (Asm->VerboseAsm)
- Asm->OutStreamer.AddComment("External Name");
- Asm->OutStreamer.EmitBytes(StringRef(Name, strlen(Name)+1), 0);
- }
-
- Asm->EmitInt32(0); EOL("End Mark");
- EmitLabel("pubnames_end", ModuleCU->getID());
- Asm->O << '\n';
-}
-
-void DwarfDebug::emitDebugPubTypes() {
- // Start the dwarf pubnames section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfPubTypesSection());
- EmitDifference("pubtypes_end", ModuleCU->getID(),
- "pubtypes_begin", ModuleCU->getID(), true);
- EOL("Length of Public Types Info");
-
- EmitLabel("pubtypes_begin", ModuleCU->getID());
-
- if (Asm->VerboseAsm) Asm->OutStreamer.AddComment("DWARF Version");
- Asm->EmitInt16(dwarf::DWARF_VERSION);
-
- EmitSectionOffset("info_begin", "section_info",
- ModuleCU->getID(), 0, true, false);
- EOL("Offset of Compilation ModuleCU Info");
-
- EmitDifference("info_end", ModuleCU->getID(), "info_begin", ModuleCU->getID(),
- true);
- EOL("Compilation ModuleCU Length");
-
- const StringMap<DIE*> &Globals = ModuleCU->getGlobalTypes();
- for (StringMap<DIE*>::const_iterator
- GI = Globals.begin(), GE = Globals.end(); GI != GE; ++GI) {
- const char *Name = GI->getKeyData();
- DIE * Entity = GI->second;
-
- if (Asm->VerboseAsm) Asm->OutStreamer.AddComment("DIE offset");
- Asm->EmitInt32(Entity->getOffset());
-
- if (Asm->VerboseAsm) Asm->OutStreamer.AddComment("External Name");
- Asm->OutStreamer.EmitBytes(StringRef(Name, GI->getKeyLength()+1), 0);
- }
-
- Asm->EmitInt32(0); EOL("End Mark");
- EmitLabel("pubtypes_end", ModuleCU->getID());
- Asm->O << '\n';
-}
-
-/// emitDebugStr - Emit visible names into a debug str section.
-///
-void DwarfDebug::emitDebugStr() {
- // Check to see if it is worth the effort.
- if (!StringPool.empty()) {
- // Start the dwarf str section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfStrSection());
-
- // For each of strings in the string pool.
- for (unsigned StringID = 1, N = StringPool.size();
- StringID <= N; ++StringID) {
- // Emit a label for reference from debug information entries.
- EmitLabel("string", StringID);
-
- // Emit the string itself.
- const std::string &String = StringPool[StringID];
- Asm->OutStreamer.EmitBytes(StringRef(String.c_str(), String.size()+1), 0);
- }
-
- Asm->O << '\n';
- }
-}
-
-/// emitDebugLoc - Emit visible names into a debug loc section.
-///
-void DwarfDebug::emitDebugLoc() {
- // Start the dwarf loc section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfLocSection());
-}
-
-/// EmitDebugARanges - Emit visible names into a debug aranges section.
-///
-void DwarfDebug::EmitDebugARanges() {
- // Start the dwarf aranges section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfARangesSection());
-
- // FIXME - Mock up
-#if 0
- CompileUnit *Unit = GetBaseCompileUnit();
-
- // Don't include size of length
- Asm->EmitInt32(0x1c); EOL("Length of Address Ranges Info");
-
- Asm->EmitInt16(dwarf::DWARF_VERSION); EOL("Dwarf Version");
-
- EmitReference("info_begin", Unit->getID());
- EOL("Offset of Compilation Unit Info");
-
- Asm->EmitInt8(TD->getPointerSize()); EOL("Size of Address");
-
- Asm->EmitInt8(0); EOL("Size of Segment Descriptor");
-
- Asm->EmitInt16(0); EOL("Pad (1)");
- Asm->EmitInt16(0); EOL("Pad (2)");
-
- // Range 1
- EmitReference("text_begin", 0); EOL("Address");
- EmitDifference("text_end", 0, "text_begin", 0, true); EOL("Length");
-
- Asm->EmitInt32(0); EOL("EOM (1)");
- Asm->EmitInt32(0); EOL("EOM (2)");
-#endif
-}
-
-/// emitDebugRanges - Emit visible names into a debug ranges section.
-///
-void DwarfDebug::emitDebugRanges() {
- // Start the dwarf ranges section.
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfRangesSection());
-}
-
-/// emitDebugMacInfo - Emit visible names into a debug macinfo section.
-///
-void DwarfDebug::emitDebugMacInfo() {
- if (const MCSection *LineInfo =
- Asm->getObjFileLowering().getDwarfMacroInfoSection()) {
- // Start the dwarf macinfo section.
- Asm->OutStreamer.SwitchSection(LineInfo);
- }
-}
-
-/// emitDebugInlineInfo - Emit inline info using following format.
-/// Section Header:
-/// 1. length of section
-/// 2. Dwarf version number
-/// 3. address size.
-///
-/// Entries (one "entry" for each function that was inlined):
-///
-/// 1. offset into __debug_str section for MIPS linkage name, if exists;
-/// otherwise offset into __debug_str for regular function name.
-/// 2. offset into __debug_str section for regular function name.
-/// 3. an unsigned LEB128 number indicating the number of distinct inlining
-/// instances for the function.
-///
-/// The rest of the entry consists of a {die_offset, low_pc} pair for each
-/// inlined instance; the die_offset points to the inlined_subroutine die in the
-/// __debug_info section, and the low_pc is the starting address for the
-/// inlining instance.
-void DwarfDebug::emitDebugInlineInfo() {
- if (!MAI->doesDwarfUsesInlineInfoSection())
- return;
-
- if (!ModuleCU)
- return;
-
- Asm->OutStreamer.SwitchSection(
- Asm->getObjFileLowering().getDwarfDebugInlineSection());
-
- EmitDifference("debug_inlined_end", 1,
- "debug_inlined_begin", 1, true);
- EOL("Length of Debug Inlined Information Entry");
-
- EmitLabel("debug_inlined_begin", 1);
-
- Asm->EmitInt16(dwarf::DWARF_VERSION); EOL("Dwarf Version");
- Asm->EmitInt8(TD->getPointerSize()); EOL("Address Size (in bytes)");
-
- for (SmallVector<MDNode *, 4>::iterator I = InlinedSPNodes.begin(),
- E = InlinedSPNodes.end(); I != E; ++I) {
-
- MDNode *Node = *I;
- DenseMap<MDNode *, SmallVector<InlineInfoLabels, 4> >::iterator II
- = InlineInfo.find(Node);
- SmallVector<InlineInfoLabels, 4> &Labels = II->second;
- DISubprogram SP(Node);
- StringRef LName = SP.getLinkageName();
- StringRef Name = SP.getName();
-
- if (LName.empty()) {
- Asm->OutStreamer.EmitBytes(Name, 0);
- Asm->OutStreamer.EmitIntValue(0, 1, 0); // nul terminator.
- } else
- EmitSectionOffset("string", "section_str",
- StringPool.idFor(getRealLinkageName(LName)), false, true);
-
- EOL("MIPS linkage name");
- EmitSectionOffset("string", "section_str",
- StringPool.idFor(Name), false, true);
- EOL("Function name");
- EmitULEB128(Labels.size(), "Inline count");
-
- for (SmallVector<InlineInfoLabels, 4>::iterator LI = Labels.begin(),
- LE = Labels.end(); LI != LE; ++LI) {
- DIE *SP = LI->second;
- Asm->EmitInt32(SP->getOffset()); EOL("DIE offset");
-
- if (TD->getPointerSize() == sizeof(int32_t))
- O << MAI->getData32bitsDirective();
- else
- O << MAI->getData64bitsDirective();
-
- PrintLabelName("label", LI->first); EOL("low_pc");
- }
- }
-
- EmitLabel("debug_inlined_end", 1);
- Asm->O << '\n';
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
deleted file mode 100644
index 55baa92..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ /dev/null
@@ -1,566 +0,0 @@
-//===-- llvm/CodeGen/DwarfDebug.h - Dwarf Debug Framework ------*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains support for writing dwarf debug info into asm files.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CODEGEN_ASMPRINTER_DWARFDEBUG_H__
-#define CODEGEN_ASMPRINTER_DWARFDEBUG_H__
-
-#include "DIE.h"
-#include "DwarfPrinter.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/UniqueVector.h"
-#include <string>
-
-namespace llvm {
-
-class CompileUnit;
-class DbgConcreteScope;
-class DbgScope;
-class DbgVariable;
-class MachineFrameInfo;
-class MachineModuleInfo;
-class MCAsmInfo;
-class Timer;
-
-//===----------------------------------------------------------------------===//
-/// SrcLineInfo - This class is used to record source line correspondence.
-///
-class SrcLineInfo {
- unsigned Line; // Source line number.
- unsigned Column; // Source column.
- unsigned SourceID; // Source ID number.
- unsigned LabelID; // Label in code ID number.
-public:
- SrcLineInfo(unsigned L, unsigned C, unsigned S, unsigned I)
- : Line(L), Column(C), SourceID(S), LabelID(I) {}
-
- // Accessors
- unsigned getLine() const { return Line; }
- unsigned getColumn() const { return Column; }
- unsigned getSourceID() const { return SourceID; }
- unsigned getLabelID() const { return LabelID; }
-};
-
-class DwarfDebug : public DwarfPrinter {
- //===--------------------------------------------------------------------===//
- // Attributes used to construct specific Dwarf sections.
- //
-
- /// CompileUnitMap - A map of global variables representing compile units to
- /// compile units.
- DenseMap<Value *, CompileUnit *> CompileUnitMap;
-
- /// CompileUnits - All the compile units in this module.
- ///
- SmallVector<CompileUnit *, 8> CompileUnits;
-
- /// ModuleCU - All DIEs are inserted in ModuleCU.
- CompileUnit *ModuleCU;
-
- /// AbbreviationsSet - Used to uniquely define abbreviations.
- ///
- FoldingSet<DIEAbbrev> AbbreviationsSet;
-
- /// Abbreviations - A list of all the unique abbreviations in use.
- ///
- std::vector<DIEAbbrev *> Abbreviations;
-
- /// DirectoryIdMap - Directory name to directory id map.
- ///
- StringMap<unsigned> DirectoryIdMap;
-
- /// DirectoryNames - A list of directory names.
- SmallVector<std::string, 8> DirectoryNames;
-
- /// SourceFileIdMap - Source file name to source file id map.
- ///
- StringMap<unsigned> SourceFileIdMap;
-
- /// SourceFileNames - A list of source file names.
- SmallVector<std::string, 8> SourceFileNames;
-
- /// SourceIdMap - Source id map, i.e. pair of directory id and source file
- /// id mapped to a unique id.
- DenseMap<std::pair<unsigned, unsigned>, unsigned> SourceIdMap;
-
- /// SourceIds - Reverse map from source id to directory id + file id pair.
- ///
- SmallVector<std::pair<unsigned, unsigned>, 8> SourceIds;
-
- /// Lines - List of source line correspondence.
- std::vector<SrcLineInfo> Lines;
-
- /// DIEValues - A list of all the unique values in use.
- ///
- std::vector<DIEValue *> DIEValues;
-
- /// StringPool - A UniqueVector of strings used by indirect references.
- ///
- UniqueVector<std::string> StringPool;
-
- /// SectionMap - Provides a unique id per text section.
- ///
- UniqueVector<const MCSection*> SectionMap;
-
- /// SectionSourceLines - Tracks line numbers per text section.
- ///
- std::vector<std::vector<SrcLineInfo> > SectionSourceLines;
-
- /// didInitial - Flag to indicate if initial emission has been done.
- ///
- bool didInitial;
-
- /// shouldEmit - Flag to indicate if debug information should be emitted.
- ///
- bool shouldEmit;
-
- // CurrentFnDbgScope - Top level scope for the current function.
- //
- DbgScope *CurrentFnDbgScope;
-
- /// DbgScopeMap - Tracks the scopes in the current function.
- ///
- DenseMap<MDNode *, DbgScope *> DbgScopeMap;
-
- /// ConcreteScopes - Tracks the concrete scopees in the current function.
- /// These scopes are also included in DbgScopeMap.
- DenseMap<MDNode *, DbgScope *> ConcreteScopes;
-
- /// AbstractScopes - Tracks the abstract scopes a module. These scopes are
- /// not included DbgScopeMap.
- DenseMap<MDNode *, DbgScope *> AbstractScopes;
- SmallVector<DbgScope *, 4>AbstractScopesList;
-
- /// AbstractVariables - Collection on abstract variables.
- DenseMap<MDNode *, DbgVariable *> AbstractVariables;
-
- /// InliendSubprogramDIEs - Collection of subprgram DIEs that are marked
- /// (at the end of the module) as DW_AT_inline.
- SmallPtrSet<DIE *, 4> InlinedSubprogramDIEs;
-
- DenseMap<DIE *, MDNode *> ContainingTypeMap;
-
- /// AbstractSubprogramDIEs - Collection of abstruct subprogram DIEs.
- SmallPtrSet<DIE *, 4> AbstractSubprogramDIEs;
-
- /// TopLevelDIEs - Collection of top level DIEs.
- SmallPtrSet<DIE *, 4> TopLevelDIEs;
- SmallVector<DIE *, 4> TopLevelDIEsVector;
-
- typedef SmallVector<DbgScope *, 2> ScopeVector;
- typedef DenseMap<const MachineInstr *, ScopeVector>
- InsnToDbgScopeMapTy;
-
- /// DbgScopeBeginMap - Maps instruction with a list of DbgScopes it starts.
- InsnToDbgScopeMapTy DbgScopeBeginMap;
-
- /// DbgScopeEndMap - Maps instruction with a list DbgScopes it ends.
- InsnToDbgScopeMapTy DbgScopeEndMap;
-
- /// InlineInfo - Keep track of inlined functions and their location. This
- /// information is used to populate debug_inlined section.
- typedef std::pair<unsigned, DIE *> InlineInfoLabels;
- DenseMap<MDNode *, SmallVector<InlineInfoLabels, 4> > InlineInfo;
- SmallVector<MDNode *, 4> InlinedSPNodes;
-
- /// CompileUnitOffsets - A vector of the offsets of the compile units. This is
- /// used when calculating the "origin" of a concrete instance of an inlined
- /// function.
- DenseMap<CompileUnit *, unsigned> CompileUnitOffsets;
-
- /// DebugTimer - Timer for the Dwarf debug writer.
- Timer *DebugTimer;
-
- struct FunctionDebugFrameInfo {
- unsigned Number;
- std::vector<MachineMove> Moves;
-
- FunctionDebugFrameInfo(unsigned Num, const std::vector<MachineMove> &M)
- : Number(Num), Moves(M) {}
- };
-
- std::vector<FunctionDebugFrameInfo> DebugFrames;
-
- /// getSourceDirectoryAndFileIds - Return the directory and file ids that
- /// maps to the source id. Source id starts at 1.
- std::pair<unsigned, unsigned>
- getSourceDirectoryAndFileIds(unsigned SId) const {
- return SourceIds[SId-1];
- }
-
- /// getNumSourceDirectories - Return the number of source directories in the
- /// debug info.
- unsigned getNumSourceDirectories() const {
- return DirectoryNames.size();
- }
-
- /// getSourceDirectoryName - Return the name of the directory corresponding
- /// to the id.
- const std::string &getSourceDirectoryName(unsigned Id) const {
- return DirectoryNames[Id - 1];
- }
-
- /// getSourceFileName - Return the name of the source file corresponding
- /// to the id.
- const std::string &getSourceFileName(unsigned Id) const {
- return SourceFileNames[Id - 1];
- }
-
- /// getNumSourceIds - Return the number of unique source ids.
- unsigned getNumSourceIds() const {
- return SourceIds.size();
- }
-
- /// assignAbbrevNumber - Define a unique number for the abbreviation.
- ///
- void assignAbbrevNumber(DIEAbbrev &Abbrev);
-
- /// createDIEEntry - Creates a new DIEEntry to be a proxy for a debug
- /// information entry.
- DIEEntry *createDIEEntry(DIE *Entry = NULL);
-
- /// addUInt - Add an unsigned integer attribute data and value.
- ///
- void addUInt(DIE *Die, unsigned Attribute, unsigned Form, uint64_t Integer);
-
- /// addSInt - Add an signed integer attribute data and value.
- ///
- void addSInt(DIE *Die, unsigned Attribute, unsigned Form, int64_t Integer);
-
- /// addString - Add a string attribute data and value.
- ///
- void addString(DIE *Die, unsigned Attribute, unsigned Form,
- const StringRef Str);
-
- /// addLabel - Add a Dwarf label attribute data and value.
- ///
- void addLabel(DIE *Die, unsigned Attribute, unsigned Form,
- const DWLabel &Label);
-
- /// addObjectLabel - Add an non-Dwarf label attribute data and value.
- ///
- void addObjectLabel(DIE *Die, unsigned Attribute, unsigned Form,
- const MCSymbol *Sym);
-
- /// addSectionOffset - Add a section offset label attribute data and value.
- ///
- void addSectionOffset(DIE *Die, unsigned Attribute, unsigned Form,
- const DWLabel &Label, const DWLabel &Section,
- bool isEH = false, bool useSet = true);
-
- /// addDelta - Add a label delta attribute data and value.
- ///
- void addDelta(DIE *Die, unsigned Attribute, unsigned Form,
- const DWLabel &Hi, const DWLabel &Lo);
-
- /// addDIEEntry - Add a DIE attribute data and value.
- ///
- void addDIEEntry(DIE *Die, unsigned Attribute, unsigned Form, DIE *Entry) {
- Die->addValue(Attribute, Form, createDIEEntry(Entry));
- }
-
- /// addBlock - Add block data.
- ///
- void addBlock(DIE *Die, unsigned Attribute, unsigned Form, DIEBlock *Block);
-
- /// addSourceLine - Add location information to specified debug information
- /// entry.
- void addSourceLine(DIE *Die, const DIVariable *V);
- void addSourceLine(DIE *Die, const DIGlobal *G);
- void addSourceLine(DIE *Die, const DISubprogram *SP);
- void addSourceLine(DIE *Die, const DIType *Ty);
- void addSourceLine(DIE *Die, const DINameSpace *NS);
-
- /// addAddress - Add an address attribute to a die based on the location
- /// provided.
- void addAddress(DIE *Die, unsigned Attribute,
- const MachineLocation &Location);
-
- /// addComplexAddress - Start with the address based on the location provided,
- /// and generate the DWARF information necessary to find the actual variable
- /// (navigating the extra location information encoded in the type) based on
- /// the starting location. Add the DWARF information to the die.
- ///
- void addComplexAddress(DbgVariable *&DV, DIE *Die, unsigned Attribute,
- const MachineLocation &Location);
-
- // FIXME: Should be reformulated in terms of addComplexAddress.
- /// addBlockByrefAddress - Start with the address based on the location
- /// provided, and generate the DWARF information necessary to find the
- /// actual Block variable (navigating the Block struct) based on the
- /// starting location. Add the DWARF information to the die. Obsolete,
- /// please use addComplexAddress instead.
- ///
- void addBlockByrefAddress(DbgVariable *&DV, DIE *Die, unsigned Attribute,
- const MachineLocation &Location);
-
- /// addToContextOwner - Add Die into the list of its context owner's children.
- void addToContextOwner(DIE *Die, DIDescriptor Context);
-
- /// addType - Add a new type attribute to the specified entity.
- void addType(DIE *Entity, DIType Ty);
-
-
- /// getOrCreateNameSpace - Create a DIE for DINameSpace.
- DIE *getOrCreateNameSpace(DINameSpace NS);
-
- /// getOrCreateTypeDIE - Find existing DIE or create new DIE for the
- /// given DIType.
- DIE *getOrCreateTypeDIE(DIType Ty);
-
- void addPubTypes(DISubprogram SP);
-
- /// constructTypeDIE - Construct basic type die from DIBasicType.
- void constructTypeDIE(DIE &Buffer,
- DIBasicType BTy);
-
- /// constructTypeDIE - Construct derived type die from DIDerivedType.
- void constructTypeDIE(DIE &Buffer,
- DIDerivedType DTy);
-
- /// constructTypeDIE - Construct type DIE from DICompositeType.
- void constructTypeDIE(DIE &Buffer,
- DICompositeType CTy);
-
- /// constructSubrangeDIE - Construct subrange DIE from DISubrange.
- void constructSubrangeDIE(DIE &Buffer, DISubrange SR, DIE *IndexTy);
-
- /// constructArrayTypeDIE - Construct array type DIE from DICompositeType.
- void constructArrayTypeDIE(DIE &Buffer,
- DICompositeType *CTy);
-
- /// constructEnumTypeDIE - Construct enum type DIE from DIEnumerator.
- DIE *constructEnumTypeDIE(DIEnumerator *ETy);
-
- /// createGlobalVariableDIE - Create new DIE using GV.
- DIE *createGlobalVariableDIE(const DIGlobalVariable &GV);
-
- /// createMemberDIE - Create new member DIE.
- DIE *createMemberDIE(const DIDerivedType &DT);
-
- /// createSubprogramDIE - Create new DIE using SP.
- DIE *createSubprogramDIE(const DISubprogram &SP, bool MakeDecl = false);
-
- /// findCompileUnit - Get the compile unit for the given descriptor.
- ///
- CompileUnit *findCompileUnit(DICompileUnit Unit);
-
- /// getUpdatedDbgScope - Find or create DbgScope assicated with
- /// the instruction. Initialize scope and update scope hierarchy.
- DbgScope *getUpdatedDbgScope(MDNode *N, const MachineInstr *MI, MDNode *InlinedAt);
-
- /// createDbgScope - Create DbgScope for the scope.
- void createDbgScope(MDNode *Scope, MDNode *InlinedAt);
-
- DbgScope *getOrCreateAbstractScope(MDNode *N);
-
- /// findAbstractVariable - Find abstract variable associated with Var.
- DbgVariable *findAbstractVariable(DIVariable &Var, unsigned FrameIdx,
- DILocation &Loc);
-
- /// updateSubprogramScopeDIE - Find DIE for the given subprogram and
- /// attach appropriate DW_AT_low_pc and DW_AT_high_pc attributes.
- /// If there are global variables in this scope then create and insert
- /// DIEs for these variables.
- DIE *updateSubprogramScopeDIE(MDNode *SPNode);
-
- /// constructLexicalScope - Construct new DW_TAG_lexical_block
- /// for this scope and attach DW_AT_low_pc/DW_AT_high_pc labels.
- DIE *constructLexicalScopeDIE(DbgScope *Scope);
-
- /// constructInlinedScopeDIE - This scope represents inlined body of
- /// a function. Construct DIE to represent this concrete inlined copy
- /// of the function.
- DIE *constructInlinedScopeDIE(DbgScope *Scope);
-
- /// constructVariableDIE - Construct a DIE for the given DbgVariable.
- DIE *constructVariableDIE(DbgVariable *DV, DbgScope *S);
-
- /// constructScopeDIE - Construct a DIE for this scope.
- DIE *constructScopeDIE(DbgScope *Scope);
-
- /// emitInitial - Emit initial Dwarf declarations. This is necessary for cc
- /// tools to recognize the object file contains Dwarf information.
- void emitInitial();
-
- /// emitDIE - Recusively Emits a debug information entry.
- ///
- void emitDIE(DIE *Die);
-
- /// computeSizeAndOffset - Compute the size and offset of a DIE.
- ///
- unsigned computeSizeAndOffset(DIE *Die, unsigned Offset, bool Last);
-
- /// computeSizeAndOffsets - Compute the size and offset of all the DIEs.
- ///
- void computeSizeAndOffsets();
-
- /// EmitDebugInfo - Emit the debug info section.
- ///
- void emitDebugInfo();
-
- /// emitAbbreviations - Emit the abbreviation section.
- ///
- void emitAbbreviations() const;
-
- /// emitEndOfLineMatrix - Emit the last address of the section and the end of
- /// the line matrix.
- ///
- void emitEndOfLineMatrix(unsigned SectionEnd);
-
- /// emitDebugLines - Emit source line information.
- ///
- void emitDebugLines();
-
- /// emitCommonDebugFrame - Emit common frame info into a debug frame section.
- ///
- void emitCommonDebugFrame();
-
- /// emitFunctionDebugFrame - Emit per function frame info into a debug frame
- /// section.
- void emitFunctionDebugFrame(const FunctionDebugFrameInfo &DebugFrameInfo);
-
- /// emitDebugPubNames - Emit visible names into a debug pubnames section.
- ///
- void emitDebugPubNames();
-
- /// emitDebugPubTypes - Emit visible types into a debug pubtypes section.
- ///
- void emitDebugPubTypes();
-
- /// emitDebugStr - Emit visible names into a debug str section.
- ///
- void emitDebugStr();
-
- /// emitDebugLoc - Emit visible names into a debug loc section.
- ///
- void emitDebugLoc();
-
- /// EmitDebugARanges - Emit visible names into a debug aranges section.
- ///
- void EmitDebugARanges();
-
- /// emitDebugRanges - Emit visible names into a debug ranges section.
- ///
- void emitDebugRanges();
-
- /// emitDebugMacInfo - Emit visible names into a debug macinfo section.
- ///
- void emitDebugMacInfo();
-
- /// emitDebugInlineInfo - Emit inline info using following format.
- /// Section Header:
- /// 1. length of section
- /// 2. Dwarf version number
- /// 3. address size.
- ///
- /// Entries (one "entry" for each function that was inlined):
- ///
- /// 1. offset into __debug_str section for MIPS linkage name, if exists;
- /// otherwise offset into __debug_str for regular function name.
- /// 2. offset into __debug_str section for regular function name.
- /// 3. an unsigned LEB128 number indicating the number of distinct inlining
- /// instances for the function.
- ///
- /// The rest of the entry consists of a {die_offset, low_pc} pair for each
- /// inlined instance; the die_offset points to the inlined_subroutine die in
- /// the __debug_info section, and the low_pc is the starting address for the
- /// inlining instance.
- void emitDebugInlineInfo();
-
- /// GetOrCreateSourceID - Look up the source id with the given directory and
- /// source file names. If none currently exists, create a new id and insert it
- /// in the SourceIds map. This can update DirectoryNames and SourceFileNames maps
- /// as well.
- unsigned GetOrCreateSourceID(StringRef DirName, StringRef FileName);
-
- CompileUnit *constructCompileUnit(MDNode *N);
-
- void constructGlobalVariableDIE(MDNode *N);
-
- void constructSubprogramDIE(MDNode *N);
-
- // FIXME: This should go away in favor of complex addresses.
- /// Find the type the programmer originally declared the variable to be
- /// and return that type. Obsolete, use GetComplexAddrType instead.
- ///
- DIType getBlockByrefType(DIType Ty, std::string Name);
-
-public:
- //===--------------------------------------------------------------------===//
- // Main entry points.
- //
- DwarfDebug(raw_ostream &OS, AsmPrinter *A, const MCAsmInfo *T);
- virtual ~DwarfDebug();
-
- /// ShouldEmitDwarfDebug - Returns true if Dwarf debugging declarations should
- /// be emitted.
- bool ShouldEmitDwarfDebug() const { return shouldEmit; }
-
- /// beginModule - Emit all Dwarf sections that should come prior to the
- /// content.
- void beginModule(Module *M, MachineModuleInfo *MMI);
-
- /// endModule - Emit all Dwarf sections that should come after the content.
- ///
- void endModule();
-
- /// beginFunction - Gather pre-function debug information. Assumes being
- /// emitted immediately after the function entry point.
- void beginFunction(const MachineFunction *MF);
-
- /// endFunction - Gather and emit post-function debug information.
- ///
- void endFunction(const MachineFunction *MF);
-
- /// recordSourceLine - Records location information and associates it with a
- /// label. Returns a unique label ID used to generate a label and provide
- /// correspondence to the source line list.
- unsigned recordSourceLine(unsigned Line, unsigned Col, MDNode *Scope);
-
- /// getSourceLineCount - Return the number of source lines in the debug
- /// info.
- unsigned getSourceLineCount() const {
- return Lines.size();
- }
-
- /// getOrCreateSourceID - Public version of GetOrCreateSourceID. This can be
- /// timed. Look up the source id with the given directory and source file
- /// names. If none currently exists, create a new id and insert it in the
- /// SourceIds map. This can update DirectoryNames and SourceFileNames maps as
- /// well.
- unsigned getOrCreateSourceID(const std::string &DirName,
- const std::string &FileName);
-
- /// extractScopeInformation - Scan machine instructions in this function
- /// and collect DbgScopes. Return true, if atleast one scope was found.
- bool extractScopeInformation();
-
- /// collectVariableInfo - Populate DbgScope entries with variables' info.
- void collectVariableInfo();
-
- /// beginScope - Process beginning of a scope starting at Label.
- void beginScope(const MachineInstr *MI, unsigned Label);
-
- /// endScope - Prcess end of a scope.
- void endScope(const MachineInstr *MI);
-};
-} // End of namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
deleted file mode 100644
index 2b08ba4..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfException.cpp
+++ /dev/null
@@ -1,985 +0,0 @@
-//===-- CodeGen/AsmPrinter/DwarfException.cpp - Dwarf Exception Impl ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains support for writing DWARF exception info into asm files.
-//
-//===----------------------------------------------------------------------===//
-
-#include "DwarfException.h"
-#include "llvm/Module.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCSection.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/Timer.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Twine.h"
-using namespace llvm;
-
-DwarfException::DwarfException(raw_ostream &OS, AsmPrinter *A,
- const MCAsmInfo *T)
- : DwarfPrinter(OS, A, T, "eh"), shouldEmitTable(false),shouldEmitMoves(false),
- shouldEmitTableModule(false), shouldEmitMovesModule(false),
- ExceptionTimer(0) {
- if (TimePassesIsEnabled)
- ExceptionTimer = new Timer("DWARF Exception Writer");
-}
-
-DwarfException::~DwarfException() {
- delete ExceptionTimer;
-}
-
-/// CreateLabelDiff - Emit a label and subtract it from the expression we
-/// already have. This is equivalent to emitting "foo - .", but we have to emit
-/// the label for "." directly.
-const MCExpr *DwarfException::CreateLabelDiff(const MCExpr *ExprRef,
- const char *LabelName,
- unsigned Index) {
- SmallString<64> Name;
- raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix()
- << LabelName << Asm->getFunctionNumber()
- << "_" << Index;
- MCSymbol *DotSym = Asm->OutContext.GetOrCreateSymbol(Name.str());
- Asm->OutStreamer.EmitLabel(DotSym);
-
- return MCBinaryExpr::CreateSub(ExprRef,
- MCSymbolRefExpr::Create(DotSym,
- Asm->OutContext),
- Asm->OutContext);
-}
-
-/// EmitCIE - Emit a Common Information Entry (CIE). This holds information that
-/// is shared among many Frame Description Entries. There is at least one CIE
-/// in every non-empty .debug_frame section.
-void DwarfException::EmitCIE(const Function *PersonalityFn, unsigned Index) {
- // Size and sign of stack growth.
- int stackGrowth =
- Asm->TM.getFrameInfo()->getStackGrowthDirection() ==
- TargetFrameInfo::StackGrowsUp ?
- TD->getPointerSize() : -TD->getPointerSize();
-
- const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
-
- // Begin eh frame section.
- Asm->OutStreamer.SwitchSection(TLOF.getEHFrameSection());
-
- if (MAI->is_EHSymbolPrivate())
- O << MAI->getPrivateGlobalPrefix();
- O << "EH_frame" << Index << ":\n";
-
- EmitLabel("section_eh_frame", Index);
-
- // Define base labels.
- EmitLabel("eh_frame_common", Index);
-
- // Define the eh frame length.
- EmitDifference("eh_frame_common_end", Index,
- "eh_frame_common_begin", Index, true);
- EOL("Length of Common Information Entry");
-
- // EH frame header.
- EmitLabel("eh_frame_common_begin", Index);
- if (Asm->VerboseAsm) Asm->OutStreamer.AddComment("CIE Identifier Tag");
- Asm->OutStreamer.EmitIntValue(0, 4/*size*/, 0/*addrspace*/);
- if (Asm->VerboseAsm) Asm->OutStreamer.AddComment("DW_CIE_VERSION");
- Asm->OutStreamer.EmitIntValue(dwarf::DW_CIE_VERSION, 1/*size*/, 0/*addr*/);
-
- // The personality presence indicates that language specific information will
- // show up in the eh frame. Find out how we are supposed to lower the
- // personality function reference:
-
- unsigned LSDAEncoding = TLOF.getLSDAEncoding();
- unsigned FDEEncoding = TLOF.getFDEEncoding();
- unsigned PerEncoding = TLOF.getPersonalityEncoding();
-
- char Augmentation[6] = { 0 };
- unsigned AugmentationSize = 0;
- char *APtr = Augmentation + 1;
-
- if (PersonalityFn) {
- // There is a personality function.
- *APtr++ = 'P';
- AugmentationSize += 1 + SizeOfEncodedValue(PerEncoding);
- }
-
- if (UsesLSDA[Index]) {
- // An LSDA pointer is in the FDE augmentation.
- *APtr++ = 'L';
- ++AugmentationSize;
- }
-
- if (FDEEncoding != dwarf::DW_EH_PE_absptr) {
- // A non-default pointer encoding for the FDE.
- *APtr++ = 'R';
- ++AugmentationSize;
- }
-
- if (APtr != Augmentation + 1)
- Augmentation[0] = 'z';
-
- Asm->OutStreamer.EmitBytes(StringRef(Augmentation, strlen(Augmentation)+1),0);
- EOL("CIE Augmentation");
-
- // Round out reader.
- EmitULEB128(1, "CIE Code Alignment Factor");
- EmitSLEB128(stackGrowth, "CIE Data Alignment Factor");
- Asm->EmitInt8(RI->getDwarfRegNum(RI->getRARegister(), true));
- EOL("CIE Return Address Column");
-
- if (Augmentation[0]) {
- EmitULEB128(AugmentationSize, "Augmentation Size");
-
- // If there is a personality, we need to indicate the function's location.
- if (PersonalityFn) {
- EmitEncodingByte(PerEncoding, "Personality");
- EmitReference(PersonalityFn, PerEncoding);
- EOL("Personality");
- }
- if (UsesLSDA[Index])
- EmitEncodingByte(LSDAEncoding, "LSDA");
- if (FDEEncoding != dwarf::DW_EH_PE_absptr)
- EmitEncodingByte(FDEEncoding, "FDE");
- }
-
- // Indicate locations of general callee saved registers in frame.
- std::vector<MachineMove> Moves;
- RI->getInitialFrameState(Moves);
- EmitFrameMoves(NULL, 0, Moves, true);
-
- // On Darwin the linker honors the alignment of eh_frame, which means it must
- // be 8-byte on 64-bit targets to match what gcc does. Otherwise you get
- // holes which confuse readers of eh_frame.
- Asm->EmitAlignment(TD->getPointerSize() == 4 ? 2 : 3, 0, 0, false);
- EmitLabel("eh_frame_common_end", Index);
- Asm->O << '\n';
-}
-
-/// EmitFDE - Emit the Frame Description Entry (FDE) for the function.
-void DwarfException::EmitFDE(const FunctionEHFrameInfo &EHFrameInfo) {
- assert(!EHFrameInfo.function->hasAvailableExternallyLinkage() &&
- "Should not emit 'available externally' functions at all");
-
- const Function *TheFunc = EHFrameInfo.function;
- const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
-
- unsigned LSDAEncoding = TLOF.getLSDAEncoding();
- unsigned FDEEncoding = TLOF.getFDEEncoding();
-
- Asm->OutStreamer.SwitchSection(TLOF.getEHFrameSection());
-
- // Externally visible entry into the functions eh frame info. If the
- // corresponding function is static, this should not be externally visible.
- if (!TheFunc->hasLocalLinkage())
- if (const char *GlobalEHDirective = MAI->getGlobalEHDirective())
- O << GlobalEHDirective << *EHFrameInfo.FunctionEHSym << '\n';
-
- // If corresponding function is weak definition, this should be too.
- if (TheFunc->isWeakForLinker() && MAI->getWeakDefDirective())
- O << MAI->getWeakDefDirective() << *EHFrameInfo.FunctionEHSym << '\n';
-
- // If corresponding function is hidden, this should be too.
- if (TheFunc->hasHiddenVisibility())
- if (MCSymbolAttr HiddenAttr = MAI->getHiddenVisibilityAttr())
- Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
- HiddenAttr);
-
- // If there are no calls then you can't unwind. This may mean we can omit the
- // EH Frame, but some environments do not handle weak absolute symbols. If
- // UnwindTablesMandatory is set we cannot do this optimization; the unwind
- // info is to be available for non-EH uses.
- if (!EHFrameInfo.hasCalls && !UnwindTablesMandatory &&
- (!TheFunc->isWeakForLinker() ||
- !MAI->getWeakDefDirective() ||
- MAI->getSupportsWeakOmittedEHFrame())) {
- O << *EHFrameInfo.FunctionEHSym << " = 0\n";
- // This name has no connection to the function, so it might get
- // dead-stripped when the function is not, erroneously. Prohibit
- // dead-stripping unconditionally.
- if (MAI->hasNoDeadStrip())
- Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
- MCSA_NoDeadStrip);
- } else {
- O << *EHFrameInfo.FunctionEHSym << ":\n";
-
- // EH frame header.
- EmitDifference("eh_frame_end", EHFrameInfo.Number,
- "eh_frame_begin", EHFrameInfo.Number,
- true);
- EOL("Length of Frame Information Entry");
-
- EmitLabel("eh_frame_begin", EHFrameInfo.Number);
-
- EmitSectionOffset("eh_frame_begin", "eh_frame_common",
- EHFrameInfo.Number, EHFrameInfo.PersonalityIndex,
- true, true, false);
-
- EOL("FDE CIE offset");
-
- EmitReference("eh_func_begin", EHFrameInfo.Number, FDEEncoding);
- EOL("FDE initial location");
- EmitDifference("eh_func_end", EHFrameInfo.Number,
- "eh_func_begin", EHFrameInfo.Number,
- SizeOfEncodedValue(FDEEncoding) == 4);
- EOL("FDE address range");
-
- // If there is a personality and landing pads then point to the language
- // specific data area in the exception table.
- if (MMI->getPersonalities()[0] != NULL) {
- unsigned Size = SizeOfEncodedValue(LSDAEncoding);
-
- EmitULEB128(Size, "Augmentation size");
- if (EHFrameInfo.hasLandingPads)
- EmitReference("exception", EHFrameInfo.Number, LSDAEncoding);
- else
- Asm->OutStreamer.EmitIntValue(0, Size/*size*/, 0/*addrspace*/);
-
- EOL("Language Specific Data Area");
- } else {
- EmitULEB128(0, "Augmentation size");
- }
-
- // Indicate locations of function specific callee saved registers in frame.
- EmitFrameMoves("eh_func_begin", EHFrameInfo.Number, EHFrameInfo.Moves,
- true);
-
- // On Darwin the linker honors the alignment of eh_frame, which means it
- // must be 8-byte on 64-bit targets to match what gcc does. Otherwise you
- // get holes which confuse readers of eh_frame.
- Asm->EmitAlignment(TD->getPointerSize() == sizeof(int32_t) ? 2 : 3,
- 0, 0, false);
- EmitLabel("eh_frame_end", EHFrameInfo.Number);
-
- // If the function is marked used, this table should be also. We cannot
- // make the mark unconditional in this case, since retaining the table also
- // retains the function in this case, and there is code around that depends
- // on unused functions (calling undefined externals) being dead-stripped to
- // link correctly. Yes, there really is.
- if (MMI->isUsedFunction(EHFrameInfo.function))
- if (MAI->hasNoDeadStrip())
- Asm->OutStreamer.EmitSymbolAttribute(EHFrameInfo.FunctionEHSym,
- MCSA_NoDeadStrip);
- }
- Asm->O << '\n';
-}
-
-/// SharedTypeIds - How many leading type ids two landing pads have in common.
-unsigned DwarfException::SharedTypeIds(const LandingPadInfo *L,
- const LandingPadInfo *R) {
- const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
- unsigned LSize = LIds.size(), RSize = RIds.size();
- unsigned MinSize = LSize < RSize ? LSize : RSize;
- unsigned Count = 0;
-
- for (; Count != MinSize; ++Count)
- if (LIds[Count] != RIds[Count])
- return Count;
-
- return Count;
-}
-
-/// PadLT - Order landing pads lexicographically by type id.
-bool DwarfException::PadLT(const LandingPadInfo *L, const LandingPadInfo *R) {
- const std::vector<int> &LIds = L->TypeIds, &RIds = R->TypeIds;
- unsigned LSize = LIds.size(), RSize = RIds.size();
- unsigned MinSize = LSize < RSize ? LSize : RSize;
-
- for (unsigned i = 0; i != MinSize; ++i)
- if (LIds[i] != RIds[i])
- return LIds[i] < RIds[i];
-
- return LSize < RSize;
-}
-
-/// ComputeActionsTable - Compute the actions table and gather the first action
-/// index for each landing pad site.
-unsigned DwarfException::
-ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
- SmallVectorImpl<ActionEntry> &Actions,
- SmallVectorImpl<unsigned> &FirstActions) {
-
- // The action table follows the call-site table in the LSDA. The individual
- // records are of two types:
- //
- // * Catch clause
- // * Exception specification
- //
- // The two record kinds have the same format, with only small differences.
- // They are distinguished by the "switch value" field: Catch clauses
- // (TypeInfos) have strictly positive switch values, and exception
- // specifications (FilterIds) have strictly negative switch values. Value 0
- // indicates a catch-all clause.
- //
- // Negative type IDs index into FilterIds. Positive type IDs index into
- // TypeInfos. The value written for a positive type ID is just the type ID
- // itself. For a negative type ID, however, the value written is the
- // (negative) byte offset of the corresponding FilterIds entry. The byte
- // offset is usually equal to the type ID (because the FilterIds entries are
- // written using a variable width encoding, which outputs one byte per entry
- // as long as the value written is not too large) but can differ. This kind
- // of complication does not occur for positive type IDs because type infos are
- // output using a fixed width encoding. FilterOffsets[i] holds the byte
- // offset corresponding to FilterIds[i].
-
- const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
- SmallVector<int, 16> FilterOffsets;
- FilterOffsets.reserve(FilterIds.size());
- int Offset = -1;
-
- for (std::vector<unsigned>::const_iterator
- I = FilterIds.begin(), E = FilterIds.end(); I != E; ++I) {
- FilterOffsets.push_back(Offset);
- Offset -= MCAsmInfo::getULEB128Size(*I);
- }
-
- FirstActions.reserve(LandingPads.size());
-
- int FirstAction = 0;
- unsigned SizeActions = 0;
- const LandingPadInfo *PrevLPI = 0;
-
- for (SmallVectorImpl<const LandingPadInfo *>::const_iterator
- I = LandingPads.begin(), E = LandingPads.end(); I != E; ++I) {
- const LandingPadInfo *LPI = *I;
- const std::vector<int> &TypeIds = LPI->TypeIds;
- const unsigned NumShared = PrevLPI ? SharedTypeIds(LPI, PrevLPI) : 0;
- unsigned SizeSiteActions = 0;
-
- if (NumShared < TypeIds.size()) {
- unsigned SizeAction = 0;
- unsigned PrevAction = (unsigned)-1;
-
- if (NumShared) {
- const unsigned SizePrevIds = PrevLPI->TypeIds.size();
- assert(Actions.size());
- PrevAction = Actions.size() - 1;
- SizeAction =
- MCAsmInfo::getSLEB128Size(Actions[PrevAction].NextAction) +
- MCAsmInfo::getSLEB128Size(Actions[PrevAction].ValueForTypeID);
-
- for (unsigned j = NumShared; j != SizePrevIds; ++j) {
- assert(PrevAction != (unsigned)-1 && "PrevAction is invalid!");
- SizeAction -=
- MCAsmInfo::getSLEB128Size(Actions[PrevAction].ValueForTypeID);
- SizeAction += -Actions[PrevAction].NextAction;
- PrevAction = Actions[PrevAction].Previous;
- }
- }
-
- // Compute the actions.
- for (unsigned J = NumShared, M = TypeIds.size(); J != M; ++J) {
- int TypeID = TypeIds[J];
- assert(-1 - TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
- int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
- unsigned SizeTypeID = MCAsmInfo::getSLEB128Size(ValueForTypeID);
-
- int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
- SizeAction = SizeTypeID + MCAsmInfo::getSLEB128Size(NextAction);
- SizeSiteActions += SizeAction;
-
- ActionEntry Action = { ValueForTypeID, NextAction, PrevAction };
- Actions.push_back(Action);
- PrevAction = Actions.size() - 1;
- }
-
- // Record the first action of the landing pad site.
- FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
- } // else identical - re-use previous FirstAction
-
- // Information used when created the call-site table. The action record
- // field of the call site record is the offset of the first associated
- // action record, relative to the start of the actions table. This value is
- // biased by 1 (1 indicating the start of the actions table), and 0
- // indicates that there are no actions.
- FirstActions.push_back(FirstAction);
-
- // Compute this sites contribution to size.
- SizeActions += SizeSiteActions;
-
- PrevLPI = LPI;
- }
-
- return SizeActions;
-}
-
-/// CallToNoUnwindFunction - Return `true' if this is a call to a function
-/// marked `nounwind'. Return `false' otherwise.
-bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) {
- assert(MI->getDesc().isCall() && "This should be a call instruction!");
-
- bool MarkedNoUnwind = false;
- bool SawFunc = false;
-
- for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
- const MachineOperand &MO = MI->getOperand(I);
-
- if (MO.isGlobal()) {
- if (Function *F = dyn_cast<Function>(MO.getGlobal())) {
- if (SawFunc) {
- // Be conservative. If we have more than one function operand for this
- // call, then we can't make the assumption that it's the callee and
- // not a parameter to the call.
- //
- // FIXME: Determine if there's a way to say that `F' is the callee or
- // parameter.
- MarkedNoUnwind = false;
- break;
- }
-
- MarkedNoUnwind = F->doesNotThrow();
- SawFunc = true;
- }
- }
- }
-
- return MarkedNoUnwind;
-}
-
-/// ComputeCallSiteTable - Compute the call-site table. The entry for an invoke
-/// has a try-range containing the call, a non-zero landing pad, and an
-/// appropriate action. The entry for an ordinary call has a try-range
-/// containing the call and zero for the landing pad and the action. Calls
-/// marked 'nounwind' have no entry and must not be contained in the try-range
-/// of any entry - they form gaps in the table. Entries must be ordered by
-/// try-range address.
-void DwarfException::
-ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
- const RangeMapType &PadMap,
- const SmallVectorImpl<const LandingPadInfo *> &LandingPads,
- const SmallVectorImpl<unsigned> &FirstActions) {
- // The end label of the previous invoke or nounwind try-range.
- unsigned LastLabel = 0;
-
- // Whether there is a potentially throwing instruction (currently this means
- // an ordinary call) between the end of the previous try-range and now.
- bool SawPotentiallyThrowing = false;
-
- // Whether the last CallSite entry was for an invoke.
- bool PreviousIsInvoke = false;
-
- // Visit all instructions in order of address.
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
- I != E; ++I) {
- for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
- MI != E; ++MI) {
- if (!MI->isLabel()) {
- if (MI->getDesc().isCall())
- SawPotentiallyThrowing |= !CallToNoUnwindFunction(MI);
-
- continue;
- }
-
- unsigned BeginLabel = MI->getOperand(0).getImm();
- assert(BeginLabel && "Invalid label!");
-
- // End of the previous try-range?
- if (BeginLabel == LastLabel)
- SawPotentiallyThrowing = false;
-
- // Beginning of a new try-range?
- RangeMapType::const_iterator L = PadMap.find(BeginLabel);
- if (L == PadMap.end())
- // Nope, it was just some random label.
- continue;
-
- const PadRange &P = L->second;
- const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
- assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
- "Inconsistent landing pad map!");
-
- // For Dwarf exception handling (SjLj handling doesn't use this). If some
- // instruction between the previous try-range and this one may throw,
- // create a call-site entry with no landing pad for the region between the
- // try-ranges.
- if (SawPotentiallyThrowing &&
- MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf) {
- CallSiteEntry Site = { LastLabel, BeginLabel, 0, 0 };
- CallSites.push_back(Site);
- PreviousIsInvoke = false;
- }
-
- LastLabel = LandingPad->EndLabels[P.RangeIndex];
- assert(BeginLabel && LastLabel && "Invalid landing pad!");
-
- if (LandingPad->LandingPadLabel) {
- // This try-range is for an invoke.
- CallSiteEntry Site = {
- BeginLabel,
- LastLabel,
- LandingPad->LandingPadLabel,
- FirstActions[P.PadIndex]
- };
-
- // Try to merge with the previous call-site. SJLJ doesn't do this
- if (PreviousIsInvoke &&
- MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf) {
- CallSiteEntry &Prev = CallSites.back();
- if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
- // Extend the range of the previous entry.
- Prev.EndLabel = Site.EndLabel;
- continue;
- }
- }
-
- // Otherwise, create a new call-site.
- if (MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf)
- CallSites.push_back(Site);
- else {
- // SjLj EH must maintain the call sites in the order assigned
- // to them by the SjLjPrepare pass.
- unsigned SiteNo = MMI->getCallSiteBeginLabel(BeginLabel);
- if (CallSites.size() < SiteNo)
- CallSites.resize(SiteNo);
- CallSites[SiteNo - 1] = Site;
- }
- PreviousIsInvoke = true;
- } else {
- // Create a gap.
- PreviousIsInvoke = false;
- }
- }
- }
-
- // If some instruction between the previous try-range and the end of the
- // function may throw, create a call-site entry with no landing pad for the
- // region following the try-range.
- if (SawPotentiallyThrowing &&
- MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf) {
- CallSiteEntry Site = { LastLabel, 0, 0, 0 };
- CallSites.push_back(Site);
- }
-}
-
-/// EmitExceptionTable - Emit landing pads and actions.
-///
-/// The general organization of the table is complex, but the basic concepts are
-/// easy. First there is a header which describes the location and organization
-/// of the three components that follow.
-///
-/// 1. The landing pad site information describes the range of code covered by
-/// the try. In our case it's an accumulation of the ranges covered by the
-/// invokes in the try. There is also a reference to the landing pad that
-/// handles the exception once processed. Finally an index into the actions
-/// table.
-/// 2. The action table, in our case, is composed of pairs of type IDs and next
-/// action offset. Starting with the action index from the landing pad
-/// site, each type ID is checked for a match to the current exception. If
-/// it matches then the exception and type id are passed on to the landing
-/// pad. Otherwise the next action is looked up. This chain is terminated
-/// with a next action of zero. If no type id is found then the frame is
-/// unwound and handling continues.
-/// 3. Type ID table contains references to all the C++ typeinfo for all
-/// catches in the function. This tables is reverse indexed base 1.
-void DwarfException::EmitExceptionTable() {
- const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
- const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
- const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
- if (PadInfos.empty()) return;
-
- // Sort the landing pads in order of their type ids. This is used to fold
- // duplicate actions.
- SmallVector<const LandingPadInfo *, 64> LandingPads;
- LandingPads.reserve(PadInfos.size());
-
- for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
- LandingPads.push_back(&PadInfos[i]);
-
- std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
-
- // Compute the actions table and gather the first action index for each
- // landing pad site.
- SmallVector<ActionEntry, 32> Actions;
- SmallVector<unsigned, 64> FirstActions;
- unsigned SizeActions=ComputeActionsTable(LandingPads, Actions, FirstActions);
-
- // Invokes and nounwind calls have entries in PadMap (due to being bracketed
- // by try-range labels when lowered). Ordinary calls do not, so appropriate
- // try-ranges for them need be deduced when using DWARF exception handling.
- RangeMapType PadMap;
- for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
- const LandingPadInfo *LandingPad = LandingPads[i];
- for (unsigned j = 0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
- unsigned BeginLabel = LandingPad->BeginLabels[j];
- assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
- PadRange P = { i, j };
- PadMap[BeginLabel] = P;
- }
- }
-
- // Compute the call-site table.
- SmallVector<CallSiteEntry, 64> CallSites;
- ComputeCallSiteTable(CallSites, PadMap, LandingPads, FirstActions);
-
- // Final tallies.
-
- // Call sites.
- const unsigned SiteStartSize = SizeOfEncodedValue(dwarf::DW_EH_PE_udata4);
- const unsigned SiteLengthSize = SizeOfEncodedValue(dwarf::DW_EH_PE_udata4);
- const unsigned LandingPadSize = SizeOfEncodedValue(dwarf::DW_EH_PE_udata4);
- bool IsSJLJ = MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
- bool HaveTTData = IsSJLJ ? (!TypeInfos.empty() || !FilterIds.empty()) : true;
- unsigned CallSiteTableLength;
-
- if (IsSJLJ)
- CallSiteTableLength = 0;
- else
- CallSiteTableLength = CallSites.size() *
- (SiteStartSize + SiteLengthSize + LandingPadSize);
-
- for (unsigned i = 0, e = CallSites.size(); i < e; ++i) {
- CallSiteTableLength += MCAsmInfo::getULEB128Size(CallSites[i].Action);
- if (IsSJLJ)
- CallSiteTableLength += MCAsmInfo::getULEB128Size(i);
- }
-
- // Type infos.
- const MCSection *LSDASection = Asm->getObjFileLowering().getLSDASection();
- unsigned TTypeEncoding;
- unsigned TypeFormatSize;
-
- if (!HaveTTData) {
- // For SjLj exceptions, if there is no TypeInfo, then we just explicitly say
- // that we're omitting that bit.
- TTypeEncoding = dwarf::DW_EH_PE_omit;
- TypeFormatSize = SizeOfEncodedValue(dwarf::DW_EH_PE_absptr);
- } else {
- // Okay, we have actual filters or typeinfos to emit. As such, we need to
- // pick a type encoding for them. We're about to emit a list of pointers to
- // typeinfo objects at the end of the LSDA. However, unless we're in static
- // mode, this reference will require a relocation by the dynamic linker.
- //
- // Because of this, we have a couple of options:
- //
- // 1) If we are in -static mode, we can always use an absolute reference
- // from the LSDA, because the static linker will resolve it.
- //
- // 2) Otherwise, if the LSDA section is writable, we can output the direct
- // reference to the typeinfo and allow the dynamic linker to relocate
- // it. Since it is in a writable section, the dynamic linker won't
- // have a problem.
- //
- // 3) Finally, if we're in PIC mode and the LDSA section isn't writable,
- // we need to use some form of indirection. For example, on Darwin,
- // we can output a statically-relocatable reference to a dyld stub. The
- // offset to the stub is constant, but the contents are in a section
- // that is updated by the dynamic linker. This is easy enough, but we
- // need to tell the personality function of the unwinder to indirect
- // through the dyld stub.
- //
- // FIXME: When (3) is actually implemented, we'll have to emit the stubs
- // somewhere. This predicate should be moved to a shared location that is
- // in target-independent code.
- //
- TTypeEncoding = Asm->getObjFileLowering().getTTypeEncoding();
- TypeFormatSize = SizeOfEncodedValue(TTypeEncoding);
- }
-
- // Begin the exception table.
- Asm->OutStreamer.SwitchSection(LSDASection);
- Asm->EmitAlignment(2, 0, 0, false);
-
- // Emit the LSDA.
- O << "GCC_except_table" << SubprogramCount << ":\n";
- EmitLabel("exception", SubprogramCount);
-
- if (IsSJLJ) {
- SmallString<16> LSDAName;
- raw_svector_ostream(LSDAName) << MAI->getPrivateGlobalPrefix() <<
- "_LSDA_" << Asm->getFunctionNumber();
- O << LSDAName.str() << ":\n";
- }
-
- // Emit the LSDA header.
- EmitEncodingByte(dwarf::DW_EH_PE_omit, "@LPStart");
- EmitEncodingByte(TTypeEncoding, "@TType");
-
- // The type infos need to be aligned. GCC does this by inserting padding just
- // before the type infos. However, this changes the size of the exception
- // table, so you need to take this into account when you output the exception
- // table size. However, the size is output using a variable length encoding.
- // So by increasing the size by inserting padding, you may increase the number
- // of bytes used for writing the size. If it increases, say by one byte, then
- // you now need to output one less byte of padding to get the type infos
- // aligned. However this decreases the size of the exception table. This
- // changes the value you have to output for the exception table size. Due to
- // the variable length encoding, the number of bytes used for writing the
- // length may decrease. If so, you then have to increase the amount of
- // padding. And so on. If you look carefully at the GCC code you will see that
- // it indeed does this in a loop, going on and on until the values stabilize.
- // We chose another solution: don't output padding inside the table like GCC
- // does, instead output it before the table.
- unsigned SizeTypes = TypeInfos.size() * TypeFormatSize;
- unsigned CallSiteTableLengthSize =
- MCAsmInfo::getULEB128Size(CallSiteTableLength);
- unsigned TTypeBaseOffset =
- sizeof(int8_t) + // Call site format
- CallSiteTableLengthSize + // Call site table length size
- CallSiteTableLength + // Call site table length
- SizeActions + // Actions size
- SizeTypes;
- unsigned TTypeBaseOffsetSize = MCAsmInfo::getULEB128Size(TTypeBaseOffset);
- unsigned TotalSize =
- sizeof(int8_t) + // LPStart format
- sizeof(int8_t) + // TType format
- (HaveTTData ? TTypeBaseOffsetSize : 0) + // TType base offset size
- TTypeBaseOffset; // TType base offset
- unsigned SizeAlign = (4 - TotalSize) & 3;
-
- if (HaveTTData) {
- // Account for any extra padding that will be added to the call site table
- // length.
- EmitULEB128(TTypeBaseOffset, "@TType base offset", SizeAlign);
- SizeAlign = 0;
- }
-
- // SjLj Exception handling
- if (IsSJLJ) {
- EmitEncodingByte(dwarf::DW_EH_PE_udata4, "Call site");
-
- // Add extra padding if it wasn't added to the TType base offset.
- EmitULEB128(CallSiteTableLength, "Call site table length", SizeAlign);
-
- // Emit the landing pad site information.
- unsigned idx = 0;
- for (SmallVectorImpl<CallSiteEntry>::const_iterator
- I = CallSites.begin(), E = CallSites.end(); I != E; ++I, ++idx) {
- const CallSiteEntry &S = *I;
-
- // Offset of the landing pad, counted in 16-byte bundles relative to the
- // @LPStart address.
- EmitULEB128(idx, "Landing pad");
-
- // Offset of the first associated action record, relative to the start of
- // the action table. This value is biased by 1 (1 indicates the start of
- // the action table), and 0 indicates that there are no actions.
- EmitULEB128(S.Action, "Action");
- }
- } else {
- // DWARF Exception handling
- assert(MAI->getExceptionHandlingType() == ExceptionHandling::Dwarf);
-
- // The call-site table is a list of all call sites that may throw an
- // exception (including C++ 'throw' statements) in the procedure
- // fragment. It immediately follows the LSDA header. Each entry indicates,
- // for a given call, the first corresponding action record and corresponding
- // landing pad.
- //
- // The table begins with the number of bytes, stored as an LEB128
- // compressed, unsigned integer. The records immediately follow the record
- // count. They are sorted in increasing call-site address. Each record
- // indicates:
- //
- // * The position of the call-site.
- // * The position of the landing pad.
- // * The first action record for that call site.
- //
- // A missing entry in the call-site table indicates that a call is not
- // supposed to throw.
-
- // Emit the landing pad call site table.
- EmitEncodingByte(dwarf::DW_EH_PE_udata4, "Call site");
-
- // Add extra padding if it wasn't added to the TType base offset.
- EmitULEB128(CallSiteTableLength, "Call site table length", SizeAlign);
-
- for (SmallVectorImpl<CallSiteEntry>::const_iterator
- I = CallSites.begin(), E = CallSites.end(); I != E; ++I) {
- const CallSiteEntry &S = *I;
- const char *BeginTag;
- unsigned BeginNumber;
-
- if (!S.BeginLabel) {
- BeginTag = "eh_func_begin";
- BeginNumber = SubprogramCount;
- } else {
- BeginTag = "label";
- BeginNumber = S.BeginLabel;
- }
-
- // Offset of the call site relative to the previous call site, counted in
- // number of 16-byte bundles. The first call site is counted relative to
- // the start of the procedure fragment.
- EmitSectionOffset(BeginTag, "eh_func_begin", BeginNumber, SubprogramCount,
- true, true);
- EOL("Region start");
-
- if (!S.EndLabel)
- EmitDifference("eh_func_end", SubprogramCount, BeginTag, BeginNumber,
- true);
- else
- EmitDifference("label", S.EndLabel, BeginTag, BeginNumber, true);
-
- EOL("Region length");
-
- // Offset of the landing pad, counted in 16-byte bundles relative to the
- // @LPStart address.
- if (!S.PadLabel) {
- Asm->OutStreamer.AddComment("Landing pad");
- Asm->OutStreamer.EmitIntValue(0, 4/*size*/, 0/*addrspace*/);
- } else {
- EmitSectionOffset("label", "eh_func_begin", S.PadLabel, SubprogramCount,
- true, true);
- EOL("Landing pad");
- }
-
- // Offset of the first associated action record, relative to the start of
- // the action table. This value is biased by 1 (1 indicates the start of
- // the action table), and 0 indicates that there are no actions.
- EmitULEB128(S.Action, "Action");
- }
- }
-
- // Emit the Action Table.
- if (Actions.size() != 0) EOL("-- Action Record Table --");
- for (SmallVectorImpl<ActionEntry>::const_iterator
- I = Actions.begin(), E = Actions.end(); I != E; ++I) {
- const ActionEntry &Action = *I;
- EOL("Action Record:");
-
- // Type Filter
- //
- // Used by the runtime to match the type of the thrown exception to the
- // type of the catch clauses or the types in the exception specification.
- EmitSLEB128(Action.ValueForTypeID, " TypeInfo index");
-
- // Action Record
- //
- // Self-relative signed displacement in bytes of the next action record,
- // or 0 if there is no next action record.
- EmitSLEB128(Action.NextAction, " Next action");
- }
-
- // Emit the Catch TypeInfos.
- if (!TypeInfos.empty()) EOL("-- Catch TypeInfos --");
- for (std::vector<GlobalVariable *>::const_reverse_iterator
- I = TypeInfos.rbegin(), E = TypeInfos.rend(); I != E; ++I) {
- const GlobalVariable *GV = *I;
-
- if (GV) {
- EmitReference(GV, TTypeEncoding);
- EOL("TypeInfo");
- } else {
- PrintRelDirective(TTypeEncoding);
- O << "0x0";
- EOL("");
- }
- }
-
- // Emit the Exception Specifications.
- if (!FilterIds.empty()) EOL("-- Filter IDs --");
- for (std::vector<unsigned>::const_iterator
- I = FilterIds.begin(), E = FilterIds.end(); I < E; ++I) {
- unsigned TypeID = *I;
- EmitULEB128(TypeID, TypeID != 0 ? "Exception specification" : 0);
- }
-
- Asm->EmitAlignment(2, 0, 0, false);
-}
-
-/// EndModule - Emit all exception information that should come after the
-/// content.
-void DwarfException::EndModule() {
- if (MAI->getExceptionHandlingType() != ExceptionHandling::Dwarf)
- return;
-
- if (!shouldEmitMovesModule && !shouldEmitTableModule)
- return;
-
- if (TimePassesIsEnabled)
- ExceptionTimer->startTimer();
-
- const std::vector<Function *> Personalities = MMI->getPersonalities();
-
- for (unsigned I = 0, E = Personalities.size(); I < E; ++I)
- EmitCIE(Personalities[I], I);
-
- for (std::vector<FunctionEHFrameInfo>::iterator
- I = EHFrames.begin(), E = EHFrames.end(); I != E; ++I)
- EmitFDE(*I);
-
- if (TimePassesIsEnabled)
- ExceptionTimer->stopTimer();
-}
-
-/// BeginFunction - Gather pre-function exception information. Assumes it's
-/// being emitted immediately after the function entry point.
-void DwarfException::BeginFunction(const MachineFunction *MF) {
- if (!MMI || !MAI->doesSupportExceptionHandling()) return;
-
- if (TimePassesIsEnabled)
- ExceptionTimer->startTimer();
-
- this->MF = MF;
- shouldEmitTable = shouldEmitMoves = false;
-
- // Map all labels and get rid of any dead landing pads.
- MMI->TidyLandingPads();
-
- // If any landing pads survive, we need an EH table.
- if (!MMI->getLandingPads().empty())
- shouldEmitTable = true;
-
- // See if we need frame move info.
- if (!MF->getFunction()->doesNotThrow() || UnwindTablesMandatory)
- shouldEmitMoves = true;
-
- if (shouldEmitMoves || shouldEmitTable)
- // Assumes in correct section after the entry point.
- EmitLabel("eh_func_begin", ++SubprogramCount);
-
- shouldEmitTableModule |= shouldEmitTable;
- shouldEmitMovesModule |= shouldEmitMoves;
-
- if (TimePassesIsEnabled)
- ExceptionTimer->stopTimer();
-}
-
-/// EndFunction - Gather and emit post-function exception information.
-///
-void DwarfException::EndFunction() {
- if (!shouldEmitMoves && !shouldEmitTable) return;
-
- if (TimePassesIsEnabled)
- ExceptionTimer->startTimer();
-
- EmitLabel("eh_func_end", SubprogramCount);
- EmitExceptionTable();
-
- MCSymbol *FunctionEHSym =
- Asm->GetSymbolWithGlobalValueBase(MF->getFunction(), ".eh",
- Asm->MAI->is_EHSymbolPrivate());
-
- // Save EH frame information
- EHFrames.push_back(FunctionEHFrameInfo(FunctionEHSym, SubprogramCount,
- MMI->getPersonalityIndex(),
- MF->getFrameInfo()->hasCalls(),
- !MMI->getLandingPads().empty(),
- MMI->getFrameMoves(),
- MF->getFunction()));
-
- // Record if this personality index uses a landing pad.
- UsesLSDA[MMI->getPersonalityIndex()] |= !MMI->getLandingPads().empty();
-
- if (TimePassesIsEnabled)
- ExceptionTimer->stopTimer();
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfException.h b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
deleted file mode 100644
index 3db1a00..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfException.h
+++ /dev/null
@@ -1,205 +0,0 @@
-//===-- DwarfException.h - Dwarf Exception Framework -----------*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains support for writing dwarf exception info into asm files.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_ASMPRINTER_DWARFEXCEPTION_H
-#define LLVM_CODEGEN_ASMPRINTER_DWARFEXCEPTION_H
-
-#include "DIE.h"
-#include "DwarfPrinter.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/ADT/DenseMap.h"
-#include <string>
-
-namespace llvm {
-
-struct LandingPadInfo;
-class MachineModuleInfo;
-class MCAsmInfo;
-class MCExpr;
-class Timer;
-class raw_ostream;
-
-//===----------------------------------------------------------------------===//
-/// DwarfException - Emits Dwarf exception handling directives.
-///
-class DwarfException : public DwarfPrinter {
- struct FunctionEHFrameInfo {
- MCSymbol *FunctionEHSym; // L_foo.eh
- unsigned Number;
- unsigned PersonalityIndex;
- bool hasCalls;
- bool hasLandingPads;
- std::vector<MachineMove> Moves;
- const Function *function;
-
- FunctionEHFrameInfo(MCSymbol *EHSym, unsigned Num, unsigned P,
- bool hC, bool hL,
- const std::vector<MachineMove> &M,
- const Function *f):
- FunctionEHSym(EHSym), Number(Num), PersonalityIndex(P),
- hasCalls(hC), hasLandingPads(hL), Moves(M), function (f) { }
- };
-
- std::vector<FunctionEHFrameInfo> EHFrames;
-
- /// UsesLSDA - Indicates whether an FDE that uses the CIE at the given index
- /// uses an LSDA. If so, then we need to encode that information in the CIE's
- /// augmentation.
- DenseMap<unsigned, bool> UsesLSDA;
-
- /// shouldEmitTable - Per-function flag to indicate if EH tables should
- /// be emitted.
- bool shouldEmitTable;
-
- /// shouldEmitMoves - Per-function flag to indicate if frame moves info
- /// should be emitted.
- bool shouldEmitMoves;
-
- /// shouldEmitTableModule - Per-module flag to indicate if EH tables
- /// should be emitted.
- bool shouldEmitTableModule;
-
- /// shouldEmitFrameModule - Per-module flag to indicate if frame moves
- /// should be emitted.
- bool shouldEmitMovesModule;
-
- /// ExceptionTimer - Timer for the Dwarf exception writer.
- Timer *ExceptionTimer;
-
- /// EmitCIE - Emit a Common Information Entry (CIE). This holds information
- /// that is shared among many Frame Description Entries. There is at least
- /// one CIE in every non-empty .debug_frame section.
- void EmitCIE(const Function *Personality, unsigned Index);
-
- /// EmitFDE - Emit the Frame Description Entry (FDE) for the function.
- void EmitFDE(const FunctionEHFrameInfo &EHFrameInfo);
-
- /// EmitExceptionTable - Emit landing pads and actions.
- ///
- /// The general organization of the table is complex, but the basic concepts
- /// are easy. First there is a header which describes the location and
- /// organization of the three components that follow.
- /// 1. The landing pad site information describes the range of code covered
- /// by the try. In our case it's an accumulation of the ranges covered
- /// by the invokes in the try. There is also a reference to the landing
- /// pad that handles the exception once processed. Finally an index into
- /// the actions table.
- /// 2. The action table, in our case, is composed of pairs of type ids
- /// and next action offset. Starting with the action index from the
- /// landing pad site, each type Id is checked for a match to the current
- /// exception. If it matches then the exception and type id are passed
- /// on to the landing pad. Otherwise the next action is looked up. This
- /// chain is terminated with a next action of zero. If no type id is
- /// found the frame is unwound and handling continues.
- /// 3. Type id table contains references to all the C++ typeinfo for all
- /// catches in the function. This tables is reversed indexed base 1.
-
- /// SharedTypeIds - How many leading type ids two landing pads have in common.
- static unsigned SharedTypeIds(const LandingPadInfo *L,
- const LandingPadInfo *R);
-
- /// PadLT - Order landing pads lexicographically by type id.
- static bool PadLT(const LandingPadInfo *L, const LandingPadInfo *R);
-
- struct KeyInfo {
- static inline unsigned getEmptyKey() { return -1U; }
- static inline unsigned getTombstoneKey() { return -2U; }
- static unsigned getHashValue(const unsigned &Key) { return Key; }
- static bool isEqual(unsigned LHS, unsigned RHS) { return LHS == RHS; }
- };
-
- /// PadRange - Structure holding a try-range and the associated landing pad.
- struct PadRange {
- // The index of the landing pad.
- unsigned PadIndex;
- // The index of the begin and end labels in the landing pad's label lists.
- unsigned RangeIndex;
- };
-
- typedef DenseMap<unsigned, PadRange, KeyInfo> RangeMapType;
-
- /// ActionEntry - Structure describing an entry in the actions table.
- struct ActionEntry {
- int ValueForTypeID; // The value to write - may not be equal to the type id.
- int NextAction;
- unsigned Previous;
- };
-
- /// CallSiteEntry - Structure describing an entry in the call-site table.
- struct CallSiteEntry {
- // The 'try-range' is BeginLabel .. EndLabel.
- unsigned BeginLabel; // zero indicates the start of the function.
- unsigned EndLabel; // zero indicates the end of the function.
-
- // The landing pad starts at PadLabel.
- unsigned PadLabel; // zero indicates that there is no landing pad.
- unsigned Action;
- };
-
- /// ComputeActionsTable - Compute the actions table and gather the first
- /// action index for each landing pad site.
- unsigned ComputeActionsTable(const SmallVectorImpl<const LandingPadInfo*>&LPs,
- SmallVectorImpl<ActionEntry> &Actions,
- SmallVectorImpl<unsigned> &FirstActions);
-
- /// CallToNoUnwindFunction - Return `true' if this is a call to a function
- /// marked `nounwind'. Return `false' otherwise.
- bool CallToNoUnwindFunction(const MachineInstr *MI);
-
- /// ComputeCallSiteTable - Compute the call-site table. The entry for an
- /// invoke has a try-range containing the call, a non-zero landing pad and an
- /// appropriate action. The entry for an ordinary call has a try-range
- /// containing the call and zero for the landing pad and the action. Calls
- /// marked 'nounwind' have no entry and must not be contained in the try-range
- /// of any entry - they form gaps in the table. Entries must be ordered by
- /// try-range address.
- void ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
- const RangeMapType &PadMap,
- const SmallVectorImpl<const LandingPadInfo *> &LPs,
- const SmallVectorImpl<unsigned> &FirstActions);
- void EmitExceptionTable();
-
- /// CreateLabelDiff - Emit a label and subtract it from the expression we
- /// already have. This is equivalent to emitting "foo - .", but we have to
- /// emit the label for "." directly.
- const MCExpr *CreateLabelDiff(const MCExpr *ExprRef, const char *LabelName,
- unsigned Index);
-public:
- //===--------------------------------------------------------------------===//
- // Main entry points.
- //
- DwarfException(raw_ostream &OS, AsmPrinter *A, const MCAsmInfo *T);
- virtual ~DwarfException();
-
- /// BeginModule - Emit all exception information that should come prior to the
- /// content.
- void BeginModule(Module *m, MachineModuleInfo *mmi) {
- this->M = m;
- this->MMI = mmi;
- }
-
- /// EndModule - Emit all exception information that should come after the
- /// content.
- void EndModule();
-
- /// BeginFunction - Gather pre-function exception information. Assumes being
- /// emitted immediately after the function entry point.
- void BeginFunction(const MachineFunction *MF);
-
- /// EndFunction - Gather and emit post-function exception information.
- void EndFunction();
-};
-
-} // End of namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp
deleted file mode 100644
index 6e9293a..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfLabel.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-//===--- lib/CodeGen/DwarfLabel.cpp - Dwarf Label -------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// DWARF Labels
-//
-//===----------------------------------------------------------------------===//
-
-#include "DwarfLabel.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-/// Profile - Used to gather unique data for the folding set.
-///
-void DWLabel::Profile(FoldingSetNodeID &ID) const {
- ID.AddString(Tag);
- ID.AddInteger(Number);
-}
-
-#ifndef NDEBUG
-void DWLabel::print(raw_ostream &O) const {
- O << "." << Tag;
- if (Number) O << Number;
-}
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfLabel.h b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfLabel.h
deleted file mode 100644
index 0c0cc4b..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfLabel.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//===--- lib/CodeGen/DwarfLabel.h - Dwarf Label -----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// DWARF Labels.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CODEGEN_ASMPRINTER_DWARFLABEL_H__
-#define CODEGEN_ASMPRINTER_DWARFLABEL_H__
-
-namespace llvm {
- class FoldingSetNodeID;
- class raw_ostream;
-
- //===--------------------------------------------------------------------===//
- /// DWLabel - Labels are used to track locations in the assembler file.
- /// Labels appear in the form @verbatim <prefix><Tag><Number> @endverbatim,
- /// where the tag is a category of label (Ex. location) and number is a value
- /// unique in that category.
- class DWLabel {
- /// Tag - Label category tag. Should always be a statically declared C
- /// string.
- ///
- const char *Tag;
-
- /// Number - Value to make label unique.
- ///
- unsigned Number;
- public:
- DWLabel(const char *T, unsigned N) : Tag(T), Number(N) {}
-
- // Accessors.
- const char *getTag() const { return Tag; }
- unsigned getNumber() const { return Number; }
-
- /// Profile - Used to gather unique data for the folding set.
- ///
- void Profile(FoldingSetNodeID &ID) const;
-
-#ifndef NDEBUG
- void print(raw_ostream &O) const;
-#endif
- };
-} // end llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp
deleted file mode 100644
index 28ff0eb..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.cpp
+++ /dev/null
@@ -1,402 +0,0 @@
-//===--- lib/CodeGen/DwarfPrinter.cpp - Dwarf Printer ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Emit general DWARF directives.
-//
-//===----------------------------------------------------------------------===//
-
-#include "DwarfPrinter.h"
-#include "llvm/Module.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/ADT/SmallString.h"
-using namespace llvm;
-
-DwarfPrinter::DwarfPrinter(raw_ostream &OS, AsmPrinter *A, const MCAsmInfo *T,
- const char *flavor)
-: O(OS), Asm(A), MAI(T), TD(Asm->TM.getTargetData()),
- RI(Asm->TM.getRegisterInfo()), M(NULL), MF(NULL), MMI(NULL),
- SubprogramCount(0), Flavor(flavor), SetCounter(1) {}
-
-/// SizeOfEncodedValue - Return the size of the encoding in bytes.
-unsigned DwarfPrinter::SizeOfEncodedValue(unsigned Encoding) const {
- if (Encoding == dwarf::DW_EH_PE_omit)
- return 0;
-
- switch (Encoding & 0x07) {
- case dwarf::DW_EH_PE_absptr:
- return TD->getPointerSize();
- case dwarf::DW_EH_PE_udata2:
- return 2;
- case dwarf::DW_EH_PE_udata4:
- return 4;
- case dwarf::DW_EH_PE_udata8:
- return 8;
- }
-
- assert(0 && "Invalid encoded value.");
- return 0;
-}
-
-void DwarfPrinter::PrintRelDirective(bool Force32Bit, bool isInSection) const {
- if (isInSection && MAI->getDwarfSectionOffsetDirective())
- O << MAI->getDwarfSectionOffsetDirective();
- else if (Force32Bit || TD->getPointerSize() == sizeof(int32_t))
- O << MAI->getData32bitsDirective();
- else
- O << MAI->getData64bitsDirective();
-}
-
-void DwarfPrinter::PrintRelDirective(unsigned Encoding) const {
- unsigned Size = SizeOfEncodedValue(Encoding);
- assert((Size == 4 || Size == 8) && "Do not support other types or rels!");
-
- O << (Size == 4 ?
- MAI->getData32bitsDirective() : MAI->getData64bitsDirective());
-}
-
-/// EOL - Print a newline character to asm stream. If a comment is present
-/// then it will be printed first. Comments should not contain '\n'.
-void DwarfPrinter::EOL(const Twine &Comment) const {
- if (Asm->VerboseAsm && !Comment.isTriviallyEmpty()) {
- Asm->O.PadToColumn(MAI->getCommentColumn());
- Asm->O << Asm->MAI->getCommentString() << ' ' << Comment;
- }
- Asm->O << '\n';
-}
-
-static const char *DecodeDWARFEncoding(unsigned Encoding) {
- switch (Encoding) {
- case dwarf::DW_EH_PE_absptr: return "absptr";
- case dwarf::DW_EH_PE_omit: return "omit";
- case dwarf::DW_EH_PE_pcrel: return "pcrel";
- case dwarf::DW_EH_PE_udata4: return "udata4";
- case dwarf::DW_EH_PE_udata8: return "udata8";
- case dwarf::DW_EH_PE_sdata4: return "sdata4";
- case dwarf::DW_EH_PE_sdata8: return "sdata8";
- case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata4: return "pcrel udata4";
- case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4: return "pcrel sdata4";
- case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8: return "pcrel udata8";
- case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata8: return "pcrel sdata8";
- case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_udata4:
- return "indirect pcrel udata4";
- case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_sdata4:
- return "indirect pcrel sdata4";
- case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_udata8:
- return "indirect pcrel udata8";
- case dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_sdata8:
- return "indirect pcrel sdata8";
- }
-
- return "<unknown encoding>";
-}
-
-/// EmitEncodingByte - Emit a .byte 42 directive that corresponds to an
-/// encoding. If verbose assembly output is enabled, we output comments
-/// describing the encoding. Desc is an optional string saying what the
-/// encoding is specifying (e.g. "LSDA").
-void DwarfPrinter::EmitEncodingByte(unsigned Val, const char *Desc) {
- if (Asm->VerboseAsm) {
- if (Desc != 0)
- Asm->OutStreamer.AddComment(Twine(Desc)+" Encoding = " +
- Twine(DecodeDWARFEncoding(Val)));
- else
- Asm->OutStreamer.AddComment(Twine("Encoding = ") +
- DecodeDWARFEncoding(Val));
- }
-
- Asm->OutStreamer.EmitIntValue(Val, 1, 0/*addrspace*/);
-}
-
-/// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
-void DwarfPrinter::EmitCFAByte(unsigned Val) {
- if (Asm->VerboseAsm) {
- if (Val >= dwarf::DW_CFA_offset && Val < dwarf::DW_CFA_offset+64)
- Asm->OutStreamer.AddComment("DW_CFA_offset + Reg (" +
- Twine(Val-dwarf::DW_CFA_offset) + ")");
- else
- Asm->OutStreamer.AddComment(dwarf::CallFrameString(Val));
- }
- Asm->OutStreamer.EmitIntValue(Val, 1, 0/*addrspace*/);
-}
-
-/// EmitSLEB128 - emit the specified signed leb128 value.
-void DwarfPrinter::EmitSLEB128(int Value, const char *Desc) const {
- if (Asm->VerboseAsm && Desc)
- Asm->OutStreamer.AddComment(Desc);
-
- if (MAI->hasLEB128()) {
- O << "\t.sleb128\t" << Value;
- Asm->OutStreamer.AddBlankLine();
- return;
- }
-
- // If we don't have .sleb128, emit as .bytes.
- int Sign = Value >> (8 * sizeof(Value) - 1);
- bool IsMore;
-
- do {
- unsigned char Byte = static_cast<unsigned char>(Value & 0x7f);
- Value >>= 7;
- IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0;
- if (IsMore) Byte |= 0x80;
- Asm->OutStreamer.EmitIntValue(Byte, 1, /*addrspace*/0);
- } while (IsMore);
-}
-
-/// EmitULEB128 - emit the specified signed leb128 value.
-void DwarfPrinter::EmitULEB128(unsigned Value, const char *Desc,
- unsigned PadTo) const {
- if (Asm->VerboseAsm && Desc)
- Asm->OutStreamer.AddComment(Desc);
-
- if (MAI->hasLEB128() && PadTo == 0) {
- O << "\t.uleb128\t" << Value;
- Asm->OutStreamer.AddBlankLine();
- return;
- }
-
- // If we don't have .uleb128 or we want to emit padding, emit as .bytes.
- do {
- unsigned char Byte = static_cast<unsigned char>(Value & 0x7f);
- Value >>= 7;
- if (Value || PadTo != 0) Byte |= 0x80;
- Asm->OutStreamer.EmitIntValue(Byte, 1, /*addrspace*/0);
- } while (Value);
-
- if (PadTo) {
- if (PadTo > 1)
- Asm->OutStreamer.EmitFill(PadTo - 1, 0x80/*fillval*/, 0/*addrspace*/);
- Asm->OutStreamer.EmitFill(1, 0/*fillval*/, 0/*addrspace*/);
- }
-}
-
-
-/// PrintLabelName - Print label name in form used by Dwarf writer.
-///
-void DwarfPrinter::PrintLabelName(const char *Tag, unsigned Number) const {
- O << MAI->getPrivateGlobalPrefix() << Tag;
- if (Number) O << Number;
-}
-void DwarfPrinter::PrintLabelName(const char *Tag, unsigned Number,
- const char *Suffix) const {
- O << MAI->getPrivateGlobalPrefix() << Tag;
- if (Number) O << Number;
- O << Suffix;
-}
-
-/// EmitLabel - Emit location label for internal use by Dwarf.
-///
-void DwarfPrinter::EmitLabel(const char *Tag, unsigned Number) const {
- PrintLabelName(Tag, Number);
- O << ":\n";
-}
-
-/// EmitReference - Emit a reference to a label.
-///
-void DwarfPrinter::EmitReference(const char *Tag, unsigned Number,
- bool IsPCRelative, bool Force32Bit) const {
- PrintRelDirective(Force32Bit);
- PrintLabelName(Tag, Number);
- if (IsPCRelative) O << "-" << MAI->getPCSymbol();
-}
-void DwarfPrinter::EmitReference(const std::string &Name, bool IsPCRelative,
- bool Force32Bit) const {
- PrintRelDirective(Force32Bit);
- O << Name;
- if (IsPCRelative) O << "-" << MAI->getPCSymbol();
-}
-
-void DwarfPrinter::EmitReference(const MCSymbol *Sym, bool IsPCRelative,
- bool Force32Bit) const {
- PrintRelDirective(Force32Bit);
- O << *Sym;
- if (IsPCRelative) O << "-" << MAI->getPCSymbol();
-}
-
-void DwarfPrinter::EmitReference(const char *Tag, unsigned Number,
- unsigned Encoding) const {
- SmallString<64> Name;
- raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix()
- << Tag << Number;
-
- MCSymbol *Sym = Asm->OutContext.GetOrCreateSymbol(Name.str());
- EmitReference(Sym, Encoding);
-}
-
-void DwarfPrinter::EmitReference(const MCSymbol *Sym, unsigned Encoding) const {
- const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
-
- PrintRelDirective(Encoding);
- O << *TLOF.getSymbolForDwarfReference(Sym, Asm->MMI, Encoding);;
-}
-
-void DwarfPrinter::EmitReference(const GlobalValue *GV, unsigned Encoding)const {
- const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
-
- PrintRelDirective(Encoding);
- O << *TLOF.getSymbolForDwarfGlobalReference(GV, Asm->Mang,
- Asm->MMI, Encoding);;
-}
-
-/// EmitDifference - Emit the difference between two labels. If this assembler
-/// supports .set, we emit a .set of a temporary and then use it in the .word.
-void DwarfPrinter::EmitDifference(const char *TagHi, unsigned NumberHi,
- const char *TagLo, unsigned NumberLo,
- bool IsSmall) {
- if (MAI->hasSetDirective()) {
- // FIXME: switch to OutStreamer.EmitAssignment.
- O << "\t.set\t";
- PrintLabelName("set", SetCounter, Flavor);
- O << ",";
- PrintLabelName(TagHi, NumberHi);
- O << "-";
- PrintLabelName(TagLo, NumberLo);
- O << "\n";
-
- PrintRelDirective(IsSmall);
- PrintLabelName("set", SetCounter, Flavor);
- ++SetCounter;
- } else {
- PrintRelDirective(IsSmall);
- PrintLabelName(TagHi, NumberHi);
- O << "-";
- PrintLabelName(TagLo, NumberLo);
- }
-}
-
-void DwarfPrinter::EmitSectionOffset(const char* Label, const char* Section,
- unsigned LabelNumber,
- unsigned SectionNumber,
- bool IsSmall, bool isEH,
- bool useSet) {
- bool printAbsolute = false;
- if (isEH)
- printAbsolute = MAI->isAbsoluteEHSectionOffsets();
- else
- printAbsolute = MAI->isAbsoluteDebugSectionOffsets();
-
- if (MAI->hasSetDirective() && useSet) {
- // FIXME: switch to OutStreamer.EmitAssignment.
- O << "\t.set\t";
- PrintLabelName("set", SetCounter, Flavor);
- O << ",";
- PrintLabelName(Label, LabelNumber);
-
- if (!printAbsolute) {
- O << "-";
- PrintLabelName(Section, SectionNumber);
- }
-
- O << "\n";
- PrintRelDirective(IsSmall);
- PrintLabelName("set", SetCounter, Flavor);
- ++SetCounter;
- } else {
- PrintRelDirective(IsSmall, true);
- PrintLabelName(Label, LabelNumber);
-
- if (!printAbsolute) {
- O << "-";
- PrintLabelName(Section, SectionNumber);
- }
- }
-}
-
-/// EmitFrameMoves - Emit frame instructions to describe the layout of the
-/// frame.
-void DwarfPrinter::EmitFrameMoves(const char *BaseLabel, unsigned BaseLabelID,
- const std::vector<MachineMove> &Moves,
- bool isEH) {
- int stackGrowth =
- Asm->TM.getFrameInfo()->getStackGrowthDirection() ==
- TargetFrameInfo::StackGrowsUp ?
- TD->getPointerSize() : -TD->getPointerSize();
- bool IsLocal = BaseLabel && strcmp(BaseLabel, "label") == 0;
-
- for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
- const MachineMove &Move = Moves[i];
- unsigned LabelID = Move.getLabelID();
-
- if (LabelID) {
- LabelID = MMI->MappedLabel(LabelID);
-
- // Throw out move if the label is invalid.
- if (!LabelID) continue;
- }
-
- const MachineLocation &Dst = Move.getDestination();
- const MachineLocation &Src = Move.getSource();
-
- // Advance row if new location.
- if (BaseLabel && LabelID && (BaseLabelID != LabelID || !IsLocal)) {
- EmitCFAByte(dwarf::DW_CFA_advance_loc4);
- EmitDifference("label", LabelID, BaseLabel, BaseLabelID, true);
- Asm->O << '\n';
-
- BaseLabelID = LabelID;
- BaseLabel = "label";
- IsLocal = true;
- }
-
- // If advancing cfa.
- if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) {
- if (!Src.isReg()) {
- if (Src.getReg() == MachineLocation::VirtualFP) {
- EmitCFAByte(dwarf::DW_CFA_def_cfa_offset);
- } else {
- EmitCFAByte(dwarf::DW_CFA_def_cfa);
- EmitULEB128(RI->getDwarfRegNum(Src.getReg(), isEH), "Register");
- }
-
- int Offset = -Src.getOffset();
- EmitULEB128(Offset, "Offset");
- } else {
- llvm_unreachable("Machine move not supported yet.");
- }
- } else if (Src.isReg() &&
- Src.getReg() == MachineLocation::VirtualFP) {
- if (Dst.isReg()) {
- EmitCFAByte(dwarf::DW_CFA_def_cfa_register);
- EmitULEB128(RI->getDwarfRegNum(Dst.getReg(), isEH), "Register");
- } else {
- llvm_unreachable("Machine move not supported yet.");
- }
- } else {
- unsigned Reg = RI->getDwarfRegNum(Src.getReg(), isEH);
- int Offset = Dst.getOffset() / stackGrowth;
-
- if (Offset < 0) {
- EmitCFAByte(dwarf::DW_CFA_offset_extended_sf);
- EmitULEB128(Reg, "Reg");
- EmitSLEB128(Offset, "Offset");
- } else if (Reg < 64) {
- EmitCFAByte(dwarf::DW_CFA_offset + Reg);
- EmitULEB128(Offset, "Offset");
- } else {
- EmitCFAByte(dwarf::DW_CFA_offset_extended);
- EmitULEB128(Reg, "Reg");
- EmitULEB128(Offset, "Offset");
- }
- }
- }
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.h b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.h
deleted file mode 100644
index bd715f2..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfPrinter.h
+++ /dev/null
@@ -1,177 +0,0 @@
-//===--- lib/CodeGen/DwarfPrinter.h - Dwarf Printer -------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Emit general DWARF directives.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CODEGEN_ASMPRINTER_DWARFPRINTER_H__
-#define CODEGEN_ASMPRINTER_DWARFPRINTER_H__
-
-#include "DwarfLabel.h"
-#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/FormattedStream.h"
-#include <vector>
-
-namespace llvm {
-class AsmPrinter;
-class MachineFunction;
-class MachineModuleInfo;
-class Module;
-class MCAsmInfo;
-class TargetData;
-class TargetRegisterInfo;
-class GlobalValue;
-class MCSymbol;
-class Twine;
-
-class DwarfPrinter {
-protected:
- ~DwarfPrinter() {}
-
- //===-------------------------------------------------------------==---===//
- // Core attributes used by the DWARF printer.
- //
-
- /// O - Stream to .s file.
- raw_ostream &O;
-
- /// Asm - Target of Dwarf emission.
- AsmPrinter *Asm;
-
- /// MAI - Target asm information.
- const MCAsmInfo *MAI;
-
- /// TD - Target data.
- const TargetData *TD;
-
- /// RI - Register Information.
- const TargetRegisterInfo *RI;
-
- /// M - Current module.
- Module *M;
-
- /// MF - Current machine function.
- const MachineFunction *MF;
-
- /// MMI - Collected machine module information.
- MachineModuleInfo *MMI;
-
- /// SubprogramCount - The running count of functions being compiled.
- unsigned SubprogramCount;
-
- /// Flavor - A unique string indicating what dwarf producer this is, used to
- /// unique labels.
- const char * const Flavor;
-
- /// SetCounter - A unique number for each '.set' directive.
- unsigned SetCounter;
-
- DwarfPrinter(raw_ostream &OS, AsmPrinter *A, const MCAsmInfo *T,
- const char *flavor);
-public:
-
- //===------------------------------------------------------------------===//
- // Accessors.
- //
- const AsmPrinter *getAsm() const { return Asm; }
- MachineModuleInfo *getMMI() const { return MMI; }
- const MCAsmInfo *getMCAsmInfo() const { return MAI; }
- const TargetData *getTargetData() const { return TD; }
-
- /// SizeOfEncodedValue - Return the size of the encoding in bytes.
- unsigned SizeOfEncodedValue(unsigned Encoding) const;
-
- void PrintRelDirective(unsigned Encoding) const;
- void PrintRelDirective(bool Force32Bit = false,
- bool isInSection = false) const;
-
- /// EOL - Print a newline character to asm stream. If a comment is present
- /// then it will be printed first. Comments should not contain '\n'.
- void EOL(const Twine &Comment) const;
-
- /// EmitEncodingByte - Emit a .byte 42 directive that corresponds to an
- /// encoding. If verbose assembly output is enabled, we output comments
- /// describing the encoding. Desc is a string saying what the encoding is
- /// specifying (e.g. "LSDA").
- void EmitEncodingByte(unsigned Val, const char *Desc);
-
- /// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
- void EmitCFAByte(unsigned Val);
-
-
- /// EmitSLEB128 - emit the specified signed leb128 value.
- void EmitSLEB128(int Value, const char *Desc) const;
-
- /// EmitULEB128 - emit the specified unsigned leb128 value.
- void EmitULEB128(unsigned Value, const char *Desc = 0,
- unsigned PadTo = 0) const;
-
-
- /// PrintLabelName - Print label name in form used by Dwarf writer.
- ///
- void PrintLabelName(const DWLabel &Label) const {
- PrintLabelName(Label.getTag(), Label.getNumber());
- }
- void PrintLabelName(const char *Tag, unsigned Number) const;
- void PrintLabelName(const char *Tag, unsigned Number,
- const char *Suffix) const;
-
- /// EmitLabel - Emit location label for internal use by Dwarf.
- ///
- void EmitLabel(const DWLabel &Label) const {
- EmitLabel(Label.getTag(), Label.getNumber());
- }
- void EmitLabel(const char *Tag, unsigned Number) const;
-
- /// EmitReference - Emit a reference to a label.
- ///
- void EmitReference(const DWLabel &Label, bool IsPCRelative = false,
- bool Force32Bit = false) const {
- EmitReference(Label.getTag(), Label.getNumber(),
- IsPCRelative, Force32Bit);
- }
- void EmitReference(const char *Tag, unsigned Number,
- bool IsPCRelative = false,
- bool Force32Bit = false) const;
- void EmitReference(const std::string &Name, bool IsPCRelative = false,
- bool Force32Bit = false) const;
- void EmitReference(const MCSymbol *Sym, bool IsPCRelative = false,
- bool Force32Bit = false) const;
-
- void EmitReference(const char *Tag, unsigned Number, unsigned Encoding) const;
- void EmitReference(const MCSymbol *Sym, unsigned Encoding) const;
- void EmitReference(const GlobalValue *GV, unsigned Encoding) const;
-
- /// EmitDifference - Emit the difference between two labels.
- void EmitDifference(const DWLabel &LabelHi, const DWLabel &LabelLo,
- bool IsSmall = false) {
- EmitDifference(LabelHi.getTag(), LabelHi.getNumber(),
- LabelLo.getTag(), LabelLo.getNumber(),
- IsSmall);
- }
- void EmitDifference(const char *TagHi, unsigned NumberHi,
- const char *TagLo, unsigned NumberLo,
- bool IsSmall = false);
-
- void EmitSectionOffset(const char* Label, const char* Section,
- unsigned LabelNumber, unsigned SectionNumber,
- bool IsSmall = false, bool isEH = false,
- bool useSet = true);
-
- /// EmitFrameMoves - Emit frame instructions to describe the layout of the
- /// frame.
- void EmitFrameMoves(const char *BaseLabel, unsigned BaseLabelID,
- const std::vector<MachineMove> &Moves, bool isEH);
-};
-
-} // end llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp
deleted file mode 100644
index 08e1bbc..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/DwarfWriter.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-//===-- llvm/CodeGen/DwarfWriter.cpp - Dwarf Framework --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains support for writing dwarf info into asm files.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CodeGen/DwarfWriter.h"
-#include "DwarfDebug.h"
-#include "DwarfException.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-
-using namespace llvm;
-
-static RegisterPass<DwarfWriter>
-X("dwarfwriter", "DWARF Information Writer");
-char DwarfWriter::ID = 0;
-
-//===----------------------------------------------------------------------===//
-/// DwarfWriter Implementation
-///
-
-DwarfWriter::DwarfWriter()
- : ImmutablePass(&ID), DD(0), DE(0) {}
-
-DwarfWriter::~DwarfWriter() {
- delete DE;
- delete DD;
-}
-
-/// BeginModule - Emit all Dwarf sections that should come prior to the
-/// content.
-void DwarfWriter::BeginModule(Module *M,
- MachineModuleInfo *MMI,
- raw_ostream &OS, AsmPrinter *A,
- const MCAsmInfo *T) {
- DE = new DwarfException(OS, A, T);
- DD = new DwarfDebug(OS, A, T);
- DE->BeginModule(M, MMI);
- DD->beginModule(M, MMI);
-}
-
-/// EndModule - Emit all Dwarf sections that should come after the content.
-///
-void DwarfWriter::EndModule() {
- DE->EndModule();
- DD->endModule();
- delete DD; DD = 0;
- delete DE; DE = 0;
-}
-
-/// BeginFunction - Gather pre-function debug information. Assumes being
-/// emitted immediately after the function entry point.
-void DwarfWriter::BeginFunction(const MachineFunction *MF) {
- DE->BeginFunction(MF);
- DD->beginFunction(MF);
-}
-
-/// EndFunction - Gather and emit post-function debug information.
-///
-void DwarfWriter::EndFunction(const MachineFunction *MF) {
- DD->endFunction(MF);
- DE->EndFunction();
-
- if (MachineModuleInfo *MMI = DD->getMMI() ? DD->getMMI() : DE->getMMI())
- // Clear function debug information.
- MMI->EndFunction();
-}
-
-/// RecordSourceLine - Records location information and associates it with a
-/// label. Returns a unique label ID used to generate a label and provide
-/// correspondence to the source line list.
-unsigned DwarfWriter::RecordSourceLine(unsigned Line, unsigned Col,
- MDNode *Scope) {
- return DD->recordSourceLine(Line, Col, Scope);
-}
-
-/// getRecordSourceLineCount - Count source lines.
-unsigned DwarfWriter::getRecordSourceLineCount() {
- return DD->getSourceLineCount();
-}
-
-/// ShouldEmitDwarfDebug - Returns true if Dwarf debugging declarations should
-/// be emitted.
-bool DwarfWriter::ShouldEmitDwarfDebug() const {
- return DD && DD->ShouldEmitDwarfDebug();
-}
-
-void DwarfWriter::BeginScope(const MachineInstr *MI, unsigned L) {
- DD->beginScope(MI, L);
-}
-void DwarfWriter::EndScope(const MachineInstr *MI) {
- DD->endScope(MI);
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/Makefile b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/Makefile
deleted file mode 100644
index 60aa6cb..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-##===- lib/CodeGen/AsmPrinter/Makefile ---------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMAsmPrinter
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
deleted file mode 100644
index 3531ed6..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-//===-- OcamlGCPrinter.cpp - Ocaml frametable emitter ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements printing the assembly code for an Ocaml frametable.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CodeGen/GCs.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/GCMetadataPrinter.h"
-#include "llvm/Module.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormattedStream.h"
-using namespace llvm;
-
-namespace {
-
- class OcamlGCMetadataPrinter : public GCMetadataPrinter {
- public:
- void beginAssembly(raw_ostream &OS, AsmPrinter &AP,
- const MCAsmInfo &MAI);
-
- void finishAssembly(raw_ostream &OS, AsmPrinter &AP,
- const MCAsmInfo &MAI);
- };
-
-}
-
-static GCMetadataPrinterRegistry::Add<OcamlGCMetadataPrinter>
-Y("ocaml", "ocaml 3.10-compatible collector");
-
-void llvm::linkOcamlGCPrinter() { }
-
-static void EmitCamlGlobal(const Module &M, raw_ostream &OS, AsmPrinter &AP,
- const MCAsmInfo &MAI, const char *Id) {
- const std::string &MId = M.getModuleIdentifier();
-
- std::string Mangled;
- Mangled += MAI.getGlobalPrefix();
- Mangled += "caml";
- size_t Letter = Mangled.size();
- Mangled.append(MId.begin(), std::find(MId.begin(), MId.end(), '.'));
- Mangled += "__";
- Mangled += Id;
-
- // Capitalize the first letter of the module name.
- Mangled[Letter] = toupper(Mangled[Letter]);
-
- if (const char *GlobalDirective = MAI.getGlobalDirective())
- OS << GlobalDirective << Mangled << "\n";
- OS << Mangled << ":\n";
-}
-
-void OcamlGCMetadataPrinter::beginAssembly(raw_ostream &OS, AsmPrinter &AP,
- const MCAsmInfo &MAI) {
- AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection());
- EmitCamlGlobal(getModule(), OS, AP, MAI, "code_begin");
-
- AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getDataSection());
- EmitCamlGlobal(getModule(), OS, AP, MAI, "data_begin");
-}
-
-/// emitAssembly - Print the frametable. The ocaml frametable format is thus:
-///
-/// extern "C" struct align(sizeof(intptr_t)) {
-/// uint16_t NumDescriptors;
-/// struct align(sizeof(intptr_t)) {
-/// void *ReturnAddress;
-/// uint16_t FrameSize;
-/// uint16_t NumLiveOffsets;
-/// uint16_t LiveOffsets[NumLiveOffsets];
-/// } Descriptors[NumDescriptors];
-/// } caml${module}__frametable;
-///
-/// Note that this precludes programs from stack frames larger than 64K
-/// (FrameSize and LiveOffsets would overflow). FrameTablePrinter will abort if
-/// either condition is detected in a function which uses the GC.
-///
-void OcamlGCMetadataPrinter::finishAssembly(raw_ostream &OS, AsmPrinter &AP,
- const MCAsmInfo &MAI) {
- const char *AddressDirective;
- int AddressAlignLog;
- if (AP.TM.getTargetData()->getPointerSize() == sizeof(int32_t)) {
- AddressDirective = MAI.getData32bitsDirective();
- AddressAlignLog = 2;
- } else {
- AddressDirective = MAI.getData64bitsDirective();
- AddressAlignLog = 3;
- }
-
- AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection());
- EmitCamlGlobal(getModule(), OS, AP, MAI, "code_end");
-
- AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getDataSection());
- EmitCamlGlobal(getModule(), OS, AP, MAI, "data_end");
-
- OS << AddressDirective << 0 << '\n'; // FIXME: Why does ocaml emit this??
-
- AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getDataSection());
- EmitCamlGlobal(getModule(), OS, AP, MAI, "frametable");
-
- for (iterator I = begin(), IE = end(); I != IE; ++I) {
- GCFunctionInfo &FI = **I;
-
- uint64_t FrameSize = FI.getFrameSize();
- if (FrameSize >= 1<<16) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Function '" << FI.getFunction().getName()
- << "' is too large for the ocaml GC! "
- << "Frame size " << FrameSize << " >= 65536.\n";
- Msg << "(" << uintptr_t(&FI) << ")";
- llvm_report_error(Msg.str()); // Very rude!
- }
-
- OS << "\t" << MAI.getCommentString() << " live roots for "
- << FI.getFunction().getName() << "\n";
-
- for (GCFunctionInfo::iterator J = FI.begin(), JE = FI.end(); J != JE; ++J) {
- size_t LiveCount = FI.live_size(J);
- if (LiveCount >= 1<<16) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Function '" << FI.getFunction().getName()
- << "' is too large for the ocaml GC! "
- << "Live root count " << LiveCount << " >= 65536.";
- llvm_report_error(Msg.str()); // Very rude!
- }
-
- OS << AddressDirective
- << MAI.getPrivateGlobalPrefix() << "label" << J->Num << '\n';
-
- AP.EmitInt16(FrameSize);
-
- AP.EmitInt16(LiveCount);
-
- for (GCFunctionInfo::live_iterator K = FI.live_begin(J),
- KE = FI.live_end(J); K != KE; ++K) {
- assert(K->StackOffset < 1<<16 &&
- "GC root stack offset is outside of fixed stack frame and out "
- "of range for ocaml GC!");
-
- AP.EmitInt32(K->StackOffset);
- }
-
- AP.EmitAlignment(AddressAlignLog);
- }
- }
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/BranchFolding.cpp b/libclamav/c++/llvm/lib/CodeGen/BranchFolding.cpp
index d41a134..cb81aa3 100644
--- a/libclamav/c++/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/BranchFolding.cpp
@@ -65,7 +65,7 @@ namespace {
public:
static char ID;
explicit BranchFolderPass(bool defaultEnableTailMerge)
- : MachineFunctionPass(&ID), BranchFolder(defaultEnableTailMerge) {}
+ : MachineFunctionPass(ID), BranchFolder(defaultEnableTailMerge) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual const char *getPassName() const { return "Control Flow Optimizer"; }
@@ -105,17 +105,6 @@ void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) {
while (!MBB->succ_empty())
MBB->removeSuccessor(MBB->succ_end()-1);
- // If there are any labels in the basic block, unregister them from
- // MachineModuleInfo.
- if (MMI && !MBB->empty()) {
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
- if (I->isLabel())
- // The label ID # is always operand #0, an immediate.
- MMI->InvalidateLabel(I->getOperand(0).getImm());
- }
- }
-
// Remove the block.
MF->erase(MBB);
}
@@ -275,28 +264,21 @@ static unsigned HashMachineInstr(const MachineInstr *MI) {
return Hash;
}
-/// HashEndOfMBB - Hash the last few instructions in the MBB. For blocks
-/// with no successors, we hash two instructions, because cross-jumping
-/// only saves code when at least two instructions are removed (since a
-/// branch must be inserted). For blocks with a successor, one of the
-/// two blocks to be tail-merged will end with a branch already, so
-/// it gains to cross-jump even for one instruction.
-static unsigned HashEndOfMBB(const MachineBasicBlock *MBB,
- unsigned minCommonTailLength) {
+/// HashEndOfMBB - Hash the last instruction in the MBB.
+static unsigned HashEndOfMBB(const MachineBasicBlock *MBB) {
MachineBasicBlock::const_iterator I = MBB->end();
if (I == MBB->begin())
return 0; // Empty MBB.
--I;
- unsigned Hash = HashMachineInstr(I);
-
- if (I == MBB->begin() || minCommonTailLength == 1)
- return Hash; // Single instr MBB.
+ // Skip debug info so it will not affect codegen.
+ while (I->isDebugValue()) {
+ if (I==MBB->begin())
+ return 0; // MBB empty except for debug info.
+ --I;
+ }
- --I;
- // Hash in the second-to-last instruction.
- Hash ^= HashMachineInstr(I) << 2;
- return Hash;
+ return HashMachineInstr(I);
}
/// ComputeCommonTailLength - Given two machine basic blocks, compute the number
@@ -312,9 +294,32 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1,
unsigned TailLen = 0;
while (I1 != MBB1->begin() && I2 != MBB2->begin()) {
--I1; --I2;
- // Don't merge debugging pseudos.
- if (I1->isDebugValue() || I2->isDebugValue() ||
- !I1->isIdenticalTo(I2) ||
+ // Skip debugging pseudos; necessary to avoid changing the code.
+ while (I1->isDebugValue()) {
+ if (I1==MBB1->begin()) {
+ while (I2->isDebugValue()) {
+ if (I2==MBB2->begin())
+ // I1==DBG at begin; I2==DBG at begin
+ return TailLen;
+ --I2;
+ }
+ ++I2;
+ // I1==DBG at begin; I2==non-DBG, or first of DBGs not at begin
+ return TailLen;
+ }
+ --I1;
+ }
+ // I1==first (untested) non-DBG preceding known match
+ while (I2->isDebugValue()) {
+ if (I2==MBB2->begin()) {
+ ++I1;
+ // I1==non-DBG, or first of DBGs not at begin; I2==DBG at begin
+ return TailLen;
+ }
+ --I2;
+ }
+ // I1, I2==first (untested) non-DBGs preceding known match
+ if (!I1->isIdenticalTo(I2) ||
// FIXME: This check is dubious. It's used to get around a problem where
// people incorrectly expect inline asm directives to remain in the same
// relative order. This is untenable because normal compiler
@@ -326,27 +331,37 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1,
}
++TailLen;
}
+ // Back past possible debugging pseudos at beginning of block. This matters
+ // when one block differs from the other only by whether debugging pseudos
+ // are present at the beginning. (This way, the various checks later for
+ // I1==MBB1->begin() work as expected.)
+ if (I1 == MBB1->begin() && I2 != MBB2->begin()) {
+ --I2;
+ while (I2->isDebugValue()) {
+ if (I2 == MBB2->begin()) {
+ return TailLen;
+ }
+ --I2;
+ }
+ ++I2;
+ }
+ if (I2 == MBB2->begin() && I1 != MBB1->begin()) {
+ --I1;
+ while (I1->isDebugValue()) {
+ if (I1 == MBB1->begin())
+ return TailLen;
+ --I1;
+ }
+ ++I1;
+ }
return TailLen;
}
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
-/// after it, replacing it with an unconditional branch to NewDest. This
-/// returns true if OldInst's block is modified, false if NewDest is modified.
+/// after it, replacing it with an unconditional branch to NewDest.
void BranchFolder::ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
MachineBasicBlock *NewDest) {
- MachineBasicBlock *OldBB = OldInst->getParent();
-
- // Remove all the old successors of OldBB from the CFG.
- while (!OldBB->succ_empty())
- OldBB->removeSuccessor(OldBB->succ_begin());
-
- // Remove all the dead instructions from the end of OldBB.
- OldBB->erase(OldInst, OldBB->end());
-
- // If OldBB isn't immediately before OldBB, insert a branch to it.
- if (++MachineFunction::iterator(OldBB) != MachineFunction::iterator(NewDest))
- TII->InsertBranch(*OldBB, NewDest, 0, SmallVector<MachineOperand, 0>());
- OldBB->addSuccessor(NewDest);
+ TII->ReplaceTailWithBranchTo(OldInst, NewDest);
++NumTailMerge;
}
@@ -355,6 +370,9 @@ void BranchFolder::ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
/// iterator. This returns the new MBB.
MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
MachineBasicBlock::iterator BBI1) {
+ if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
+ return 0;
+
MachineFunction &MF = *CurMBB.getParent();
// Create the fall-through block.
@@ -415,18 +433,20 @@ static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
MachineFunction::iterator I = llvm::next(MachineFunction::iterator(CurMBB));
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
+ DebugLoc dl; // FIXME: this is nowhere
if (I != MF->end() &&
!TII->AnalyzeBranch(*CurMBB, TBB, FBB, Cond, true)) {
MachineBasicBlock *NextBB = I;
if (TBB == NextBB && !Cond.empty() && !FBB) {
if (!TII->ReverseBranchCondition(Cond)) {
TII->RemoveBranch(*CurMBB);
- TII->InsertBranch(*CurMBB, SuccBB, NULL, Cond);
+ TII->InsertBranch(*CurMBB, SuccBB, NULL, Cond, dl);
return;
}
}
}
- TII->InsertBranch(*CurMBB, SuccBB, NULL, SmallVector<MachineOperand, 0>());
+ TII->InsertBranch(*CurMBB, SuccBB, NULL,
+ SmallVector<MachineOperand, 0>(), dl);
}
bool
@@ -597,9 +617,10 @@ void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
/// CreateCommonTailOnlyBlock - None of the blocks to be tail-merged consist
/// only of the common tail. Create a block that does by splitting one.
-unsigned BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
- unsigned maxCommonTailLength) {
- unsigned commonTailIndex = 0;
+bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
+ unsigned maxCommonTailLength,
+ unsigned &commonTailIndex) {
+ commonTailIndex = 0;
unsigned TimeEstimate = ~0U;
for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
// Use PredBB if possible; that doesn't require a new branch.
@@ -621,10 +642,17 @@ unsigned BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
SameTails[commonTailIndex].getTailStartPos();
MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
+ // If the common tail includes any debug info we will take it pretty
+ // randomly from one of the inputs. Might be better to remove it?
DEBUG(dbgs() << "\nSplitting BB#" << MBB->getNumber() << ", size "
<< maxCommonTailLength);
MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI);
+ if (!newMBB) {
+ DEBUG(dbgs() << "... failed!");
+ return false;
+ }
+
SameTails[commonTailIndex].setBlock(newMBB);
SameTails[commonTailIndex].setTailStartPos(newMBB->begin());
@@ -632,7 +660,7 @@ unsigned BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
if (PredBB == MBB)
PredBB = newMBB;
- return commonTailIndex;
+ return true;
}
// See if any of the blocks in MergePotentials (which all have a common single
@@ -727,7 +755,11 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
!SameTails[commonTailIndex].tailIsWholeBlock())) {
// None of the blocks consist entirely of the common tail.
// Split a block so that one does.
- commonTailIndex = CreateCommonTailOnlyBlock(PredBB, maxCommonTailLength);
+ if (!CreateCommonTailOnlyBlock(PredBB,
+ maxCommonTailLength, commonTailIndex)) {
+ RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
+ continue;
+ }
}
MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
@@ -763,7 +795,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
MergePotentials.clear();
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
if (I->succ_empty())
- MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(I, 2U), I));
+ MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(I), I));
}
// See if we can do any tail merging on those.
@@ -844,13 +876,13 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
}
// Remove the unconditional branch at the end, if any.
if (TBB && (Cond.empty() || FBB)) {
+ DebugLoc dl; // FIXME: this is nowhere
TII->RemoveBranch(*PBB);
if (!Cond.empty())
// reinsert conditional branch only, for now
- TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond);
+ TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond, dl);
}
- MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(PBB, 1U),
- *P));
+ MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(PBB), *P));
}
}
if (MergePotentials.size() >= 2)
@@ -890,6 +922,29 @@ bool BranchFolder::OptimizeBranches(MachineFunction &MF) {
return MadeChange;
}
+// Blocks should be considered empty if they contain only debug info;
+// else the debug info would affect codegen.
+static bool IsEmptyBlock(MachineBasicBlock *MBB) {
+ if (MBB->empty())
+ return true;
+ for (MachineBasicBlock::iterator MBBI = MBB->begin(), MBBE = MBB->end();
+ MBBI!=MBBE; ++MBBI) {
+ if (!MBBI->isDebugValue())
+ return false;
+ }
+ return true;
+}
+
+// Blocks with only debug info and branches should be considered the same
+// as blocks with only branches.
+static bool IsBranchOnlyBlock(MachineBasicBlock *MBB) {
+ MachineBasicBlock::iterator MBBI, MBBE;
+ for (MBBI = MBB->begin(), MBBE = MBB->end(); MBBI!=MBBE; ++MBBI) {
+ if (!MBBI->isDebugValue())
+ break;
+ }
+ return (MBBI->getDesc().isBranch());
+}
/// IsBetterFallthrough - Return true if it would be clearly better to
/// fall-through to MBB1 than to fall through into MBB2. This has to return
@@ -901,15 +956,21 @@ static bool IsBetterFallthrough(MachineBasicBlock *MBB1,
// MBB1 doesn't, we prefer to fall through into MBB1. This allows us to
// optimize branches that branch to either a return block or an assert block
// into a fallthrough to the return.
- if (MBB1->empty() || MBB2->empty()) return false;
+ if (IsEmptyBlock(MBB1) || IsEmptyBlock(MBB2)) return false;
// If there is a clear successor ordering we make sure that one block
// will fall through to the next
if (MBB1->isSuccessor(MBB2)) return true;
if (MBB2->isSuccessor(MBB1)) return false;
- MachineInstr *MBB1I = --MBB1->end();
- MachineInstr *MBB2I = --MBB2->end();
+ // Neither block consists entirely of debug info (per IsEmptyBlock check),
+ // so we needn't test for falling off the beginning here.
+ MachineBasicBlock::iterator MBB1I = --MBB1->end();
+ while (MBB1I->isDebugValue())
+ --MBB1I;
+ MachineBasicBlock::iterator MBB2I = --MBB2->end();
+ while (MBB2I->isDebugValue())
+ --MBB2I;
return MBB2I->getDesc().isCall() && !MBB1I->getDesc().isCall();
}
@@ -918,6 +979,7 @@ static bool IsBetterFallthrough(MachineBasicBlock *MBB1,
bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
bool MadeChange = false;
MachineFunction &MF = *MBB->getParent();
+ DebugLoc dl; // FIXME: this is nowhere
ReoptimizeBlock:
MachineFunction::iterator FallThrough = MBB;
@@ -927,7 +989,7 @@ ReoptimizeBlock:
// explicitly. Landing pads should not do this since the landing-pad table
// points to this block. Blocks with their addresses taken shouldn't be
// optimized away.
- if (MBB->empty() && !MBB->isLandingPad() && !MBB->hasAddressTaken()) {
+ if (IsEmptyBlock(MBB) && !MBB->isLandingPad() && !MBB->hasAddressTaken()) {
// Dead block? Leave for cleanup later.
if (MBB->pred_empty()) return MadeChange;
@@ -969,7 +1031,7 @@ ReoptimizeBlock:
TII->RemoveBranch(PrevBB);
PriorCond.clear();
if (PriorTBB != MBB)
- TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond);
+ TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1008,7 +1070,7 @@ ReoptimizeBlock:
// the condition is false, remove the uncond second branch.
if (PriorFBB == MBB) {
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond);
+ TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1021,7 +1083,7 @@ ReoptimizeBlock:
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
if (!TII->ReverseBranchCondition(NewPriorCond)) {
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond);
+ TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1050,22 +1112,6 @@ ReoptimizeBlock:
!IsBetterFallthrough(PriorTBB, MBB))
DoTransform = false;
- // We don't want to do this transformation if we have control flow like:
- // br cond BB2
- // BB1:
- // ..
- // jmp BBX
- // BB2:
- // ..
- // ret
- //
- // In this case, we could actually be moving the return block *into* a
- // loop!
- if (DoTransform && !MBB->succ_empty() &&
- (!PriorTBB->canFallThrough() || PriorTBB->empty()))
- DoTransform = false;
-
-
if (DoTransform) {
// Reverse the branch so we will fall through on the previous true cond.
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
@@ -1074,7 +1120,7 @@ ReoptimizeBlock:
<< "To make fallthrough to: " << *PriorTBB << "\n");
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond);
+ TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond, dl);
// Move this block to the end of the function.
MBB->moveAfter(--MF.end());
@@ -1103,7 +1149,7 @@ ReoptimizeBlock:
SmallVector<MachineOperand, 4> NewCond(CurCond);
if (!TII->ReverseBranchCondition(NewCond)) {
TII->RemoveBranch(*MBB);
- TII->InsertBranch(*MBB, CurFBB, CurTBB, NewCond);
+ TII->InsertBranch(*MBB, CurFBB, CurTBB, NewCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1113,13 +1159,29 @@ ReoptimizeBlock:
// If this branch is the only thing in its block, see if we can forward
// other blocks across it.
if (CurTBB && CurCond.empty() && CurFBB == 0 &&
- MBB->begin()->getDesc().isBranch() && CurTBB != MBB &&
+ IsBranchOnlyBlock(MBB) && CurTBB != MBB &&
!MBB->hasAddressTaken()) {
// This block may contain just an unconditional branch. Because there can
// be 'non-branch terminators' in the block, try removing the branch and
// then seeing if the block is empty.
TII->RemoveBranch(*MBB);
-
+ // If the only things remaining in the block are debug info, remove these
+ // as well, so this will behave the same as an empty block in non-debug
+ // mode.
+ if (!MBB->empty()) {
+ bool NonDebugInfoFound = false;
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
+ I != E; ++I) {
+ if (!I->isDebugValue()) {
+ NonDebugInfoFound = true;
+ break;
+ }
+ }
+ if (!NonDebugInfoFound)
+ // Make the block empty, losing the debug info (we could probably
+ // improve this in some cases.)
+ MBB->erase(MBB->begin(), MBB->end());
+ }
// If this block is just an unconditional branch to CurTBB, we can
// usually completely eliminate the block. The only case we cannot
// completely eliminate the block is when the block before this one
@@ -1142,7 +1204,7 @@ ReoptimizeBlock:
PriorFBB = MBB;
}
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond);
+ TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, dl);
}
// Iterate through all the predecessors, revectoring each in-turn.
@@ -1168,7 +1230,7 @@ ReoptimizeBlock:
if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) {
TII->RemoveBranch(*PMBB);
NewCurCond.clear();
- TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond);
+ TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond, dl);
MadeChange = true;
++NumBranchOpts;
PMBB->CorrectExtraCFGEdges(NewCurTBB, 0, false);
@@ -1188,7 +1250,7 @@ ReoptimizeBlock:
}
// Add the branch back if the block is more than just an uncond branch.
- TII->InsertBranch(*MBB, CurTBB, 0, CurCond);
+ TII->InsertBranch(*MBB, CurTBB, 0, CurCond, dl);
}
}
@@ -1228,7 +1290,7 @@ ReoptimizeBlock:
if (CurFallsThru) {
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
CurCond.clear();
- TII->InsertBranch(*MBB, NextBB, 0, CurCond);
+ TII->InsertBranch(*MBB, NextBB, 0, CurCond, dl);
}
MBB->moveAfter(PredBB);
MadeChange = true;
diff --git a/libclamav/c++/llvm/lib/CodeGen/BranchFolding.h b/libclamav/c++/llvm/lib/CodeGen/BranchFolding.h
index b087395..15dfa7f 100644
--- a/libclamav/c++/llvm/lib/CodeGen/BranchFolding.h
+++ b/libclamav/c++/llvm/lib/CodeGen/BranchFolding.h
@@ -102,8 +102,9 @@ namespace llvm {
MachineBasicBlock *PredBB);
void RemoveBlocksWithHash(unsigned CurHash, MachineBasicBlock* SuccBB,
MachineBasicBlock* PredBB);
- unsigned CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
- unsigned maxCommonTailLength);
+ bool CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
+ unsigned maxCommonTailLength,
+ unsigned &commonTailIndex);
bool OptimizeBranches(MachineFunction &MF);
bool OptimizeBlock(MachineBasicBlock *MBB);
diff --git a/libclamav/c++/llvm/lib/CodeGen/CMakeLists.txt b/libclamav/c++/llvm/lib/CodeGen/CMakeLists.txt
index d385b86..2ef115d 100644
--- a/libclamav/c++/llvm/lib/CodeGen/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/CodeGen/CMakeLists.txt
@@ -1,18 +1,20 @@
add_llvm_library(LLVMCodeGen
AggressiveAntiDepBreaker.cpp
+ Analysis.cpp
BranchFolding.cpp
CalcSpillWeights.cpp
+ CallingConvLower.cpp
CodePlacementOpt.cpp
CriticalAntiDepBreaker.cpp
DeadMachineInstructionElim.cpp
DwarfEHPrepare.cpp
ELFCodeEmitter.cpp
ELFWriter.cpp
- ExactHazardRecognizer.cpp
GCMetadata.cpp
GCMetadataPrinter.cpp
GCStrategy.cpp
IfConversion.cpp
+ InlineSpiller.cpp
IntrinsicLowering.cpp
LLVMTargetMachine.cpp
LatencyPriorityQueue.cpp
@@ -20,6 +22,7 @@ add_llvm_library(LLVMCodeGen
LiveIntervalAnalysis.cpp
LiveStackAnalysis.cpp
LiveVariables.cpp
+ LocalStackSlotAllocation.cpp
LowerSubregs.cpp
MachineBasicBlock.cpp
MachineCSE.cpp
@@ -27,6 +30,7 @@ add_llvm_library(LLVMCodeGen
MachineFunction.cpp
MachineFunctionAnalysis.cpp
MachineFunctionPass.cpp
+ MachineFunctionPrinterPass.cpp
MachineInstr.cpp
MachineLICM.cpp
MachineLoopInfo.cpp
@@ -39,20 +43,22 @@ add_llvm_library(LLVMCodeGen
MachineVerifier.cpp
ObjectCodeEmitter.cpp
OcamlGC.cpp
- OptimizeExts.cpp
OptimizePHIs.cpp
PHIElimination.cpp
Passes.cpp
+ PeepholeOptimizer.cpp
+ PostRAHazardRecognizer.cpp
PostRASchedulerList.cpp
PreAllocSplitting.cpp
ProcessImplicitDefs.cpp
PrologEpilogInserter.cpp
PseudoSourceValue.cpp
+ RegAllocFast.cpp
RegAllocLinearScan.cpp
- RegAllocLocal.cpp
RegAllocPBQP.cpp
RegisterCoalescer.cpp
RegisterScavenging.cpp
+ RenderMachineFunction.cpp
ScheduleDAG.cpp
ScheduleDAGEmit.cpp
ScheduleDAGInstrs.cpp
@@ -63,6 +69,8 @@ add_llvm_library(LLVMCodeGen
SjLjEHPrepare.cpp
SlotIndexes.cpp
Spiller.cpp
+ SplitKit.cpp
+ Splitter.cpp
StackProtector.cpp
StackSlotColoring.cpp
StrongPHIElimination.cpp
diff --git a/libclamav/c++/llvm/lib/CodeGen/CalcSpillWeights.cpp b/libclamav/c++/llvm/lib/CodeGen/CalcSpillWeights.cpp
index a328d0e..1b7e08a 100644
--- a/libclamav/c++/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -25,8 +25,8 @@
using namespace llvm;
char CalculateSpillWeights::ID = 0;
-static RegisterPass<CalculateSpillWeights> X("calcspillweights",
- "Calculate spill weights");
+INITIALIZE_PASS(CalculateSpillWeights, "calcspillweights",
+ "Calculate spill weights", false, false);
void CalculateSpillWeights::getAnalysisUsage(AnalysisUsage &au) const {
au.addRequired<LiveIntervals>();
@@ -41,108 +41,184 @@ bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &fn) {
<< "********** Function: "
<< fn.getFunction()->getName() << '\n');
- LiveIntervals *lis = &getAnalysis<LiveIntervals>();
- MachineLoopInfo *loopInfo = &getAnalysis<MachineLoopInfo>();
- const TargetInstrInfo *tii = fn.getTarget().getInstrInfo();
- MachineRegisterInfo *mri = &fn.getRegInfo();
-
- SmallSet<unsigned, 4> processed;
- for (MachineFunction::iterator mbbi = fn.begin(), mbbe = fn.end();
- mbbi != mbbe; ++mbbi) {
- MachineBasicBlock* mbb = mbbi;
- SlotIndex mbbEnd = lis->getMBBEndIdx(mbb);
- MachineLoop* loop = loopInfo->getLoopFor(mbb);
- unsigned loopDepth = loop ? loop->getLoopDepth() : 0;
- bool isExiting = loop ? loop->isLoopExiting(mbb) : false;
-
- for (MachineBasicBlock::const_iterator mii = mbb->begin(), mie = mbb->end();
- mii != mie; ++mii) {
- const MachineInstr *mi = mii;
- if (tii->isIdentityCopy(*mi) || mi->isImplicitDef() || mi->isDebugValue())
- continue;
-
- for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
- const MachineOperand &mopi = mi->getOperand(i);
- if (!mopi.isReg() || mopi.getReg() == 0)
- continue;
- unsigned reg = mopi.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(mopi.getReg()))
- continue;
- // Multiple uses of reg by the same instruction. It should not
- // contribute to spill weight again.
- if (!processed.insert(reg))
- continue;
-
- bool hasDef = mopi.isDef();
- bool hasUse = !hasDef;
- for (unsigned j = i+1; j != e; ++j) {
- const MachineOperand &mopj = mi->getOperand(j);
- if (!mopj.isReg() || mopj.getReg() != reg)
- continue;
- hasDef |= mopj.isDef();
- hasUse |= mopj.isUse();
- if (hasDef && hasUse)
- break;
- }
-
- LiveInterval ®Int = lis->getInterval(reg);
- float weight = lis->getSpillWeight(hasDef, hasUse, loopDepth);
- if (hasDef && isExiting) {
- // Looks like this is a loop count variable update.
- SlotIndex defIdx = lis->getInstructionIndex(mi).getDefIndex();
- const LiveRange *dlr =
- lis->getInterval(reg).getLiveRangeContaining(defIdx);
- if (dlr->end >= mbbEnd)
- weight *= 3.0F;
- }
- regInt.weight += weight;
- }
- processed.clear();
- }
+ LiveIntervals &lis = getAnalysis<LiveIntervals>();
+ VirtRegAuxInfo vrai(fn, lis, getAnalysis<MachineLoopInfo>());
+ for (LiveIntervals::iterator I = lis.begin(), E = lis.end(); I != E; ++I) {
+ LiveInterval &li = *I->second;
+ if (TargetRegisterInfo::isVirtualRegister(li.reg))
+ vrai.CalculateWeightAndHint(li);
+ }
+ return false;
+}
+
+// Return the preferred allocation register for reg, given a COPY instruction.
+static unsigned copyHint(const MachineInstr *mi, unsigned reg,
+ const TargetRegisterInfo &tri,
+ const MachineRegisterInfo &mri) {
+ unsigned sub, hreg, hsub;
+ if (mi->getOperand(0).getReg() == reg) {
+ sub = mi->getOperand(0).getSubReg();
+ hreg = mi->getOperand(1).getReg();
+ hsub = mi->getOperand(1).getSubReg();
+ } else {
+ sub = mi->getOperand(1).getSubReg();
+ hreg = mi->getOperand(0).getReg();
+ hsub = mi->getOperand(0).getSubReg();
}
- for (LiveIntervals::iterator I = lis->begin(), E = lis->end(); I != E; ++I) {
- LiveInterval &li = *I->second;
- if (TargetRegisterInfo::isVirtualRegister(li.reg)) {
- // If the live interval length is essentially zero, i.e. in every live
- // range the use follows def immediately, it doesn't make sense to spill
- // it and hope it will be easier to allocate for this li.
- if (isZeroLengthInterval(&li)) {
- li.weight = HUGE_VALF;
- continue;
- }
-
- bool isLoad = false;
- SmallVector<LiveInterval*, 4> spillIs;
- if (lis->isReMaterializable(li, spillIs, isLoad)) {
- // If all of the definitions of the interval are re-materializable,
- // it is a preferred candidate for spilling. If non of the defs are
- // loads, then it's potentially very cheap to re-materialize.
- // FIXME: this gets much more complicated once we support non-trivial
- // re-materialization.
- if (isLoad)
- li.weight *= 0.9F;
- else
- li.weight *= 0.5F;
- }
-
- // Slightly prefer live interval that has been assigned a preferred reg.
- std::pair<unsigned, unsigned> Hint = mri->getRegAllocationHint(li.reg);
- if (Hint.first || Hint.second)
- li.weight *= 1.01F;
-
- lis->normalizeSpillWeight(li);
+ if (!hreg)
+ return 0;
+
+ if (TargetRegisterInfo::isVirtualRegister(hreg))
+ return sub == hsub ? hreg : 0;
+
+ const TargetRegisterClass *rc = mri.getRegClass(reg);
+
+ // Only allow physreg hints in rc.
+ if (sub == 0)
+ return rc->contains(hreg) ? hreg : 0;
+
+ // reg:sub should match the physreg hreg.
+ return tri.getMatchingSuperReg(hreg, sub, rc);
+}
+
+void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) {
+ MachineRegisterInfo &mri = mf_.getRegInfo();
+ const TargetRegisterInfo &tri = *mf_.getTarget().getRegisterInfo();
+ MachineBasicBlock *mbb = 0;
+ MachineLoop *loop = 0;
+ unsigned loopDepth = 0;
+ bool isExiting = false;
+ float totalWeight = 0;
+ SmallPtrSet<MachineInstr*, 8> visited;
+
+ // Find the best physreg hist and the best virtreg hint.
+ float bestPhys = 0, bestVirt = 0;
+ unsigned hintPhys = 0, hintVirt = 0;
+
+ // Don't recompute a target specific hint.
+ bool noHint = mri.getRegAllocationHint(li.reg).first != 0;
+
+ for (MachineRegisterInfo::reg_iterator I = mri.reg_begin(li.reg);
+ MachineInstr *mi = I.skipInstruction();) {
+ if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue())
+ continue;
+ if (!visited.insert(mi))
+ continue;
+
+ // Get loop info for mi.
+ if (mi->getParent() != mbb) {
+ mbb = mi->getParent();
+ loop = loops_.getLoopFor(mbb);
+ loopDepth = loop ? loop->getLoopDepth() : 0;
+ isExiting = loop ? loop->isLoopExiting(mbb) : false;
+ }
+
+ // Calculate instr weight.
+ bool reads, writes;
+ tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg);
+ float weight = LiveIntervals::getSpillWeight(writes, reads, loopDepth);
+
+ // Give extra weight to what looks like a loop induction variable update.
+ if (writes && isExiting && lis_.isLiveOutOfMBB(li, mbb))
+ weight *= 3;
+
+ totalWeight += weight;
+
+ // Get allocation hints from copies.
+ if (noHint || !mi->isCopy())
+ continue;
+ unsigned hint = copyHint(mi, li.reg, tri, mri);
+ if (!hint)
+ continue;
+ float hweight = hint_[hint] += weight;
+ if (TargetRegisterInfo::isPhysicalRegister(hint)) {
+ if (hweight > bestPhys && lis_.isAllocatable(hint))
+ bestPhys = hweight, hintPhys = hint;
+ } else {
+ if (hweight > bestVirt)
+ bestVirt = hweight, hintVirt = hint;
}
}
-
- return false;
+
+ hint_.clear();
+
+ // Always prefer the physreg hint.
+ if (unsigned hint = hintPhys ? hintPhys : hintVirt) {
+ mri.setRegAllocationHint(li.reg, 0, hint);
+ // Weakly boost the spill weifght of hinted registers.
+ totalWeight *= 1.01F;
+ }
+
+ // Mark li as unspillable if all live ranges are tiny.
+ if (li.isZeroLength()) {
+ li.markNotSpillable();
+ return;
+ }
+
+ // If all of the definitions of the interval are re-materializable,
+ // it is a preferred candidate for spilling. If none of the defs are
+ // loads, then it's potentially very cheap to re-materialize.
+ // FIXME: this gets much more complicated once we support non-trivial
+ // re-materialization.
+ bool isLoad = false;
+ SmallVector<LiveInterval*, 4> spillIs;
+ if (lis_.isReMaterializable(li, spillIs, isLoad)) {
+ if (isLoad)
+ totalWeight *= 0.9F;
+ else
+ totalWeight *= 0.5F;
+ }
+
+ li.weight = totalWeight;
+ lis_.normalizeSpillWeight(li);
}
-/// Returns true if the given live interval is zero length.
-bool CalculateSpillWeights::isZeroLengthInterval(LiveInterval *li) const {
- for (LiveInterval::Ranges::const_iterator
- i = li->ranges.begin(), e = li->ranges.end(); i != e; ++i)
- if (i->end.getPrevIndex() > i->start)
- return false;
- return true;
+void VirtRegAuxInfo::CalculateRegClass(unsigned reg) {
+ MachineRegisterInfo &mri = mf_.getRegInfo();
+ const TargetRegisterInfo *tri = mf_.getTarget().getRegisterInfo();
+ const TargetRegisterClass *orc = mri.getRegClass(reg);
+ SmallPtrSet<const TargetRegisterClass*,8> rcs;
+
+ for (MachineRegisterInfo::reg_nodbg_iterator I = mri.reg_nodbg_begin(reg),
+ E = mri.reg_nodbg_end(); I != E; ++I) {
+ // The targets don't have accurate enough regclass descriptions that we can
+ // handle subregs. We need something similar to
+ // TRI::getMatchingSuperRegClass, but returning a super class instead of a
+ // sub class.
+ if (I.getOperand().getSubReg()) {
+ DEBUG(dbgs() << "Cannot handle subregs: " << I.getOperand() << '\n');
+ return;
+ }
+ if (const TargetRegisterClass *rc =
+ I->getDesc().getRegClass(I.getOperandNo(), tri))
+ rcs.insert(rc);
+ }
+
+ // If we found no regclass constraints, just leave reg as is.
+ // In theory, we could inflate to the largest superclass of reg's existing
+ // class, but that might not be legal for the current cpu setting.
+ // This could happen if reg is only used by COPY instructions, so we may need
+ // to improve on this.
+ if (rcs.empty()) {
+ return;
+ }
+
+ // Compute the intersection of all classes in rcs.
+ // This ought to be independent of iteration order, but if the target register
+ // classes don't form a proper algebra, it is possible to get different
+ // results. The solution is to make sure the intersection of any two register
+ // classes is also a register class or the null set.
+ const TargetRegisterClass *rc = 0;
+ for (SmallPtrSet<const TargetRegisterClass*,8>::iterator I = rcs.begin(),
+ E = rcs.end(); I != E; ++I) {
+ rc = rc ? getCommonSubClass(rc, *I) : *I;
+ assert(rc && "Incompatible regclass constraints found");
+ }
+
+ if (rc == orc)
+ return;
+ DEBUG(dbgs() << "Inflating " << orc->getName() << ":%reg" << reg << " to "
+ << rc->getName() <<".\n");
+ mri.setRegClass(reg, rc);
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/CallingConvLower.cpp b/libclamav/c++/llvm/lib/CodeGen/CallingConvLower.cpp
new file mode 100644
index 0000000..62ad817
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -0,0 +1,177 @@
+//===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CCState class, used for lowering and implementing
+// calling conventions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace llvm;
+
+CCState::CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &tm,
+ SmallVector<CCValAssign, 16> &locs, LLVMContext &C)
+ : CallingConv(CC), IsVarArg(isVarArg), TM(tm),
+ TRI(*TM.getRegisterInfo()), Locs(locs), Context(C) {
+ // No stack is used.
+ StackOffset = 0;
+
+ UsedRegs.resize((TRI.getNumRegs()+31)/32);
+}
+
+// HandleByVal - Allocate a stack slot large enough to pass an argument by
+// value. The size and alignment information of the argument is encoded in its
+// parameter attribute.
+void CCState::HandleByVal(unsigned ValNo, EVT ValVT,
+ EVT LocVT, CCValAssign::LocInfo LocInfo,
+ int MinSize, int MinAlign,
+ ISD::ArgFlagsTy ArgFlags) {
+ unsigned Align = ArgFlags.getByValAlign();
+ unsigned Size = ArgFlags.getByValSize();
+ if (MinSize > (int)Size)
+ Size = MinSize;
+ if (MinAlign > (int)Align)
+ Align = MinAlign;
+ unsigned Offset = AllocateStack(Size, Align);
+
+ addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+}
+
+/// MarkAllocated - Mark a register and all of its aliases as allocated.
+void CCState::MarkAllocated(unsigned Reg) {
+ UsedRegs[Reg/32] |= 1 << (Reg&31);
+
+ if (const unsigned *RegAliases = TRI.getAliasSet(Reg))
+ for (; (Reg = *RegAliases); ++RegAliases)
+ UsedRegs[Reg/32] |= 1 << (Reg&31);
+}
+
+/// AnalyzeFormalArguments - Analyze an array of argument values,
+/// incorporating info about the formals into this state.
+void
+CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+ CCAssignFn Fn) {
+ unsigned NumArgs = Ins.size();
+
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ EVT ArgVT = Ins[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
+#ifndef NDEBUG
+ dbgs() << "Formal argument #" << i << " has unhandled type "
+ << ArgVT.getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+ }
+}
+
+/// CheckReturn - Analyze the return values of a function, returning true if
+/// the return can be performed without sret-demotion, and false otherwise.
+bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ CCAssignFn Fn) {
+ // Determine which register each value should be copied into.
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ EVT VT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
+ return false;
+ }
+ return true;
+}
+
+/// AnalyzeReturn - Analyze the returned values of a return,
+/// incorporating info about the result values into this state.
+void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ CCAssignFn Fn) {
+ // Determine which register each value should be copied into.
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ EVT VT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
+#ifndef NDEBUG
+ dbgs() << "Return operand #" << i << " has unhandled type "
+ << VT.getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+ }
+}
+
+/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
+/// incorporating info about the passed values into this state.
+void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ CCAssignFn Fn) {
+ unsigned NumOps = Outs.size();
+ for (unsigned i = 0; i != NumOps; ++i) {
+ EVT ArgVT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
+#ifndef NDEBUG
+ dbgs() << "Call operand #" << i << " has unhandled type "
+ << ArgVT.getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+ }
+}
+
+/// AnalyzeCallOperands - Same as above except it takes vectors of types
+/// and argument flags.
+void CCState::AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+ CCAssignFn Fn) {
+ unsigned NumOps = ArgVTs.size();
+ for (unsigned i = 0; i != NumOps; ++i) {
+ EVT ArgVT = ArgVTs[i];
+ ISD::ArgFlagsTy ArgFlags = Flags[i];
+ if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
+#ifndef NDEBUG
+ dbgs() << "Call operand #" << i << " has unhandled type "
+ << ArgVT.getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+ }
+}
+
+/// AnalyzeCallResult - Analyze the return values of a call,
+/// incorporating info about the passed values into this state.
+void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
+ CCAssignFn Fn) {
+ for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
+ EVT VT = Ins[i].VT;
+ ISD::ArgFlagsTy Flags = Ins[i].Flags;
+ if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
+#ifndef NDEBUG
+ dbgs() << "Call result #" << i << " has unhandled type "
+ << VT.getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+ }
+}
+
+/// AnalyzeCallResult - Same as above except it's specialized for calls which
+/// produce a single value.
+void CCState::AnalyzeCallResult(EVT VT, CCAssignFn Fn) {
+ if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
+#ifndef NDEBUG
+ dbgs() << "Call result has unhandled type "
+ << VT.getEVTString();
+#endif
+ llvm_unreachable(0);
+ }
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/CodePlacementOpt.cpp b/libclamav/c++/llvm/lib/CodeGen/CodePlacementOpt.cpp
index 3ff2a04..91a9536 100644
--- a/libclamav/c++/llvm/lib/CodeGen/CodePlacementOpt.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/CodePlacementOpt.cpp
@@ -36,7 +36,7 @@ namespace {
public:
static char ID;
- CodePlacementOpt() : MachineFunctionPass(&ID) {}
+ CodePlacementOpt() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual const char *getPassName() const {
@@ -178,6 +178,8 @@ bool CodePlacementOpt::EliminateUnconditionalJumpsToTop(MachineFunction &MF,
continue;
// Move the block.
+ DEBUG(dbgs() << "CGP: Moving blocks starting at BB#" << Pred->getNumber()
+ << " to top of loop.\n");
Changed = true;
// Move it and all the blocks that can reach it via fallthrough edges
@@ -297,6 +299,8 @@ bool CodePlacementOpt::MoveDiscontiguousLoopBlocks(MachineFunction &MF,
continue;
// Move the block.
+ DEBUG(dbgs() << "CGP: Moving blocks starting at BB#" << BB->getNumber()
+ << " to be contiguous with loop.\n");
Changed = true;
// Process this block and all loop blocks contiguous with it, to keep
diff --git a/libclamav/c++/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/libclamav/c++/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 7d3de89..335d2d8 100644
--- a/libclamav/c++/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -18,6 +18,7 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -26,25 +27,26 @@
using namespace llvm;
CriticalAntiDepBreaker::
-CriticalAntiDepBreaker(MachineFunction& MFi) :
+CriticalAntiDepBreaker(MachineFunction& MFi) :
AntiDepBreaker(), MF(MFi),
MRI(MF.getRegInfo()),
+ TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
- AllocatableSet(TRI->getAllocatableSet(MF))
-{
-}
+ AllocatableSet(TRI->getAllocatableSet(MF)),
+ Classes(TRI->getNumRegs(), static_cast<const TargetRegisterClass *>(0)),
+ KillIndices(TRI->getNumRegs(), 0),
+ DefIndices(TRI->getNumRegs(), 0) {}
CriticalAntiDepBreaker::~CriticalAntiDepBreaker() {
}
void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
- // Clear out the register class data.
- std::fill(Classes, array_endof(Classes),
- static_cast<const TargetRegisterClass *>(0));
-
- // Initialize the indices to indicate that no registers are live.
const unsigned BBSize = BB->size();
- for (unsigned i = 0; i < TRI->getNumRegs(); ++i) {
+ for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) {
+ // Clear out the register class data.
+ Classes[i] = static_cast<const TargetRegisterClass *>(0);
+
+ // Initialize the indices to indicate that no registers are live.
KillIndices[i] = ~0u;
DefIndices[i] = BBSize;
}
@@ -63,6 +65,7 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
KillIndices[Reg] = BB->size();
DefIndices[Reg] = ~0u;
+
// Repeat, for all aliases.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
@@ -71,25 +74,28 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
DefIndices[AliasReg] = ~0u;
}
}
- } else {
- // In a non-return block, examine the live-in regs of all successors.
- for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ }
+
+ // In a non-return block, examine the live-in regs of all successors.
+ // Note a return block can have successors if the return instruction is
+ // predicated.
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
- for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
+ for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = ~0u;
- }
+ unsigned Reg = *I;
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[Reg] = BB->size();
+ DefIndices[Reg] = ~0u;
+
+ // Repeat, for all aliases.
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ unsigned AliasReg = *Alias;
+ Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[AliasReg] = BB->size();
+ DefIndices[AliasReg] = ~0u;
}
- }
+ }
// Mark live-out callee-saved registers. In a return block this is
// all callee-saved registers. In non-return this is any
@@ -102,6 +108,7 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
KillIndices[Reg] = BB->size();
DefIndices[Reg] = ~0u;
+
// Repeat, for all aliases.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
@@ -130,8 +137,10 @@ void CriticalAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg)
if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
+
// Mark this register to be non-renamable.
Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+
// Move the def index to the end of the previous region, to reflect
// that the def could theoretically have been scheduled at the end.
DefIndices[Reg] = InsertPosIndex;
@@ -143,13 +152,13 @@ void CriticalAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
-static SDep *CriticalPathStep(SUnit *SU) {
- SDep *Next = 0;
+static const SDep *CriticalPathStep(const SUnit *SU) {
+ const SDep *Next = 0;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
- for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
+ for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
- SUnit *PredSU = P->getSUnit();
+ const SUnit *PredSU = P->getSUnit();
unsigned PredLatency = P->getLatency();
unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
// In the case of a latency tie, prefer an anti-dependency edge over
@@ -164,6 +173,26 @@ static SDep *CriticalPathStep(SUnit *SU) {
}
void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
+ // It's not safe to change register allocation for source operands of
+ // that have special allocation requirements. Also assume all registers
+ // used in a call must not be changed (ABI).
+ // FIXME: The issue with predicated instruction is more complex. We are being
+ // conservatively here because the kill markers cannot be trusted after
+ // if-conversion:
+ // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // ...
+ // STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
+ // %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
+ // STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ //
+ // The first R6 kill is not really a kill since it's killed by a predicated
+ // instruction which may not be executed. The second R6 def may or may not
+ // re-define R6 so it's not safe to change it since the last R6 use cannot be
+ // changed.
+ bool Special = MI->getDesc().isCall() ||
+ MI->getDesc().hasExtraSrcRegAllocReq() ||
+ TII->isPredicated(MI);
+
// Scan the register operands for this instruction and update
// Classes and RegRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -172,7 +201,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
const TargetRegisterClass *NewRC = 0;
-
+
if (i < MI->getDesc().getNumOperands())
NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
@@ -199,9 +228,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
RegRefs.insert(std::make_pair(Reg, &MO));
- // It's not safe to change register allocation for source operands of
- // that have special allocation requirements.
- if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
+ if (MO.isUse() && Special) {
if (KeepRegs.insert(Reg)) {
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg)
@@ -216,38 +243,43 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
// Update liveness.
// Proceding upwards, registers that are defed but not used in this
// instruction are now dead.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (!MO.isDef()) continue;
- // Ignore two-addr defs.
- if (MI->isRegTiedToUseOperand(i)) continue;
-
- DefIndices[Reg] = Count;
- KillIndices[Reg] = ~0u;
- assert(((KillIndices[Reg] == ~0u) !=
- (DefIndices[Reg] == ~0u)) &&
- "Kill and Def maps aren't consistent for Reg!");
- KeepRegs.erase(Reg);
- Classes[Reg] = 0;
- RegRefs.erase(Reg);
- // Repeat, for all subregs.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- unsigned SubregReg = *Subreg;
- DefIndices[SubregReg] = Count;
- KillIndices[SubregReg] = ~0u;
- KeepRegs.erase(SubregReg);
- Classes[SubregReg] = 0;
- RegRefs.erase(SubregReg);
- }
- // Conservatively mark super-registers as unusable.
- for (const unsigned *Super = TRI->getSuperRegisters(Reg);
- *Super; ++Super) {
- unsigned SuperReg = *Super;
- Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+
+ if (!TII->isPredicated(MI)) {
+ // Predicated defs are modeled as read + write, i.e. similar to two
+ // address updates.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ if (!MO.isDef()) continue;
+ // Ignore two-addr defs.
+ if (MI->isRegTiedToUseOperand(i)) continue;
+
+ DefIndices[Reg] = Count;
+ KillIndices[Reg] = ~0u;
+ assert(((KillIndices[Reg] == ~0u) !=
+ (DefIndices[Reg] == ~0u)) &&
+ "Kill and Def maps aren't consistent for Reg!");
+ KeepRegs.erase(Reg);
+ Classes[Reg] = 0;
+ RegRefs.erase(Reg);
+ // Repeat, for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ unsigned SubregReg = *Subreg;
+ DefIndices[SubregReg] = Count;
+ KillIndices[SubregReg] = ~0u;
+ KeepRegs.erase(SubregReg);
+ Classes[SubregReg] = 0;
+ RegRefs.erase(SubregReg);
+ }
+ // Conservatively mark super-registers as unusable.
+ for (const unsigned *Super = TRI->getSuperRegisters(Reg);
+ *Super; ++Super) {
+ unsigned SuperReg = *Super;
+ Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ }
}
}
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -298,6 +330,8 @@ CriticalAntiDepBreaker::findSuitableFreeRegister(MachineInstr *MI,
for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
RE = RC->allocation_order_end(MF); R != RE; ++R) {
unsigned NewReg = *R;
+ // Don't consider non-allocatable registers
+ if (!AllocatableSet.test(NewReg)) continue;
// Don't replace a register with itself.
if (NewReg == AntiDepReg) continue;
// Don't replace a register with one that was recently used to repair
@@ -326,18 +360,23 @@ CriticalAntiDepBreaker::findSuitableFreeRegister(MachineInstr *MI,
}
unsigned CriticalAntiDepBreaker::
-BreakAntiDependencies(std::vector<SUnit>& SUnits,
- MachineBasicBlock::iterator& Begin,
- MachineBasicBlock::iterator& End,
+BreakAntiDependencies(const std::vector<SUnit>& SUnits,
+ MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
unsigned InsertPosIndex) {
// The code below assumes that there is at least one instruction,
// so just duck out immediately if the block is empty.
if (SUnits.empty()) return 0;
+ // Keep a map of the MachineInstr*'s back to the SUnit representing them.
+ // This is used for updating debug information.
+ DenseMap<MachineInstr*,const SUnit*> MISUnitMap;
+
// Find the node at the bottom of the critical path.
- SUnit *Max = 0;
+ const SUnit *Max = 0;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
- SUnit *SU = &SUnits[i];
+ const SUnit *SU = &SUnits[i];
+ MISUnitMap[SU->getInstr()] = SU;
if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
Max = SU;
}
@@ -357,7 +396,7 @@ BreakAntiDependencies(std::vector<SUnit>& SUnits,
// Track progress along the critical path through the SUnit graph as we walk
// the instructions.
- SUnit *CriticalPathSU = Max;
+ const SUnit *CriticalPathSU = Max;
MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
// Consider this pattern:
@@ -401,7 +440,7 @@ BreakAntiDependencies(std::vector<SUnit>& SUnits,
// fix that remaining critical edge too. This is a little more involved,
// because unlike the most recent register, less recent registers should
// still be considered, though only if no other registers are available.
- unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
+ std::vector<unsigned> LastNewReg(TRI->getNumRegs(), 0);
// Attempt to break anti-dependence edges on the critical path. Walk the
// instructions from the bottom up, tracking information about liveness
@@ -422,15 +461,15 @@ BreakAntiDependencies(std::vector<SUnit>& SUnits,
// breaking anti-dependence edges that aren't going to significantly
// impact the overall schedule. There are a limited number of registers
// and we want to save them for the important edges.
- //
+ //
// TODO: Instructions with multiple defs could have multiple
// anti-dependencies. The current code here only knows how to break one
// edge per instruction. Note that we'd have to be able to break all of
// the anti-dependencies in an instruction in order to be effective.
unsigned AntiDepReg = 0;
if (MI == CriticalPathMI) {
- if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
- SUnit *NextSU = Edge->getSUnit();
+ if (const SDep *Edge = CriticalPathStep(CriticalPathSU)) {
+ const SUnit *NextSU = Edge->getSUnit();
// Only consider anti-dependence edges.
if (Edge->getKind() == SDep::Anti) {
@@ -452,7 +491,7 @@ BreakAntiDependencies(std::vector<SUnit>& SUnits,
// Also, if there are dependencies on other SUnits with the
// same register as the anti-dependency, don't attempt to
// break it.
- for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
+ for (SUnit::const_pred_iterator P = CriticalPathSU->Preds.begin(),
PE = CriticalPathSU->Preds.end(); P != PE; ++P)
if (P->getSUnit() == NextSU ?
(P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
@@ -473,7 +512,11 @@ BreakAntiDependencies(std::vector<SUnit>& SUnits,
PrescanInstruction(MI);
- if (MI->getDesc().hasExtraDefRegAllocReq())
+ // If MI's defs have a special allocation requirement, don't allow
+ // any def registers to be changed. Also assume all registers
+ // defined in a call must not be changed (ABI).
+ if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() ||
+ TII->isPredicated(MI))
// If this instruction's defs have special allocation requirement, don't
// break this anti-dependency.
AntiDepReg = 0;
@@ -485,7 +528,7 @@ BreakAntiDependencies(std::vector<SUnit>& SUnits,
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- if (MO.isUse() && AntiDepReg == Reg) {
+ if (MO.isUse() && TRI->regsOverlap(AntiDepReg, Reg)) {
AntiDepReg = 0;
break;
}
@@ -519,8 +562,22 @@ BreakAntiDependencies(std::vector<SUnit>& SUnits,
std::multimap<unsigned, MachineOperand *>::iterator>
Range = RegRefs.equal_range(AntiDepReg);
for (std::multimap<unsigned, MachineOperand *>::iterator
- Q = Range.first, QE = Range.second; Q != QE; ++Q)
+ Q = Range.first, QE = Range.second; Q != QE; ++Q) {
Q->second->setReg(NewReg);
+ // If the SU for the instruction being updated has debug information
+ // related to the anti-dependency register, make sure to update that
+ // as well.
+ const SUnit *SU = MISUnitMap[Q->second->getParent()];
+ if (!SU) continue;
+ for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i) {
+ MachineInstr *DI = SU->DbgInstrList[i];
+ assert (DI->getNumOperands()==3 && DI->getOperand(0).isReg() &&
+ DI->getOperand(0).getReg()
+ && "Non register dbg_value attached to SUnit!");
+ if (DI->getOperand(0).getReg() == AntiDepReg)
+ DI->getOperand(0).setReg(NewReg);
+ }
+ }
// We just went back in time and modified history; the
// liveness information for the anti-depenence reg is now
diff --git a/libclamav/c++/llvm/lib/CodeGen/CriticalAntiDepBreaker.h b/libclamav/c++/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
index 9e8db02..0ed7c35 100644
--- a/libclamav/c++/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
+++ b/libclamav/c++/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
@@ -22,15 +22,18 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
#include <map>
namespace llvm {
+class TargetInstrInfo;
+class TargetRegisterInfo;
+
class CriticalAntiDepBreaker : public AntiDepBreaker {
MachineFunction& MF;
MachineRegisterInfo &MRI;
+ const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
/// AllocatableSet - The set of allocatable registers.
@@ -43,19 +46,18 @@ namespace llvm {
/// corresponding value is null. If the register is live but used in
/// multiple register classes, the corresponding value is -1 casted to a
/// pointer.
- const TargetRegisterClass *
- Classes[TargetRegisterInfo::FirstVirtualRegister];
+ std::vector<const TargetRegisterClass*> Classes;
/// RegRegs - Map registers to all their references within a live range.
std::multimap<unsigned, MachineOperand *> RegRefs;
/// KillIndices - The index of the most recent kill (proceding bottom-up),
/// or ~0u if the register is not live.
- unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
+ std::vector<unsigned> KillIndices;
/// DefIndices - The index of the most recent complete def (proceding bottom
/// up), or ~0u if the register is live.
- unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
+ std::vector<unsigned> DefIndices;
/// KeepRegs - A set of registers which are live and cannot be changed to
/// break anti-dependencies.
@@ -72,9 +74,9 @@ namespace llvm {
/// path
/// of the ScheduleDAG and break them by renaming registers.
///
- unsigned BreakAntiDependencies(std::vector<SUnit>& SUnits,
- MachineBasicBlock::iterator& Begin,
- MachineBasicBlock::iterator& End,
+ unsigned BreakAntiDependencies(const std::vector<SUnit>& SUnits,
+ MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
unsigned InsertPosIndex);
/// Observe - Update liveness information to account for the current
diff --git a/libclamav/c++/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp b/libclamav/c++/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
index d69c995..318d922 100644
--- a/libclamav/c++/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -36,7 +36,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- DeadMachineInstructionElim() : MachineFunctionPass(&ID) {}
+ DeadMachineInstructionElim() : MachineFunctionPass(ID) {}
private:
bool isDead(const MachineInstr *MI) const;
@@ -44,9 +44,8 @@ namespace {
}
char DeadMachineInstructionElim::ID = 0;
-static RegisterPass<DeadMachineInstructionElim>
-Y("dead-mi-elimination",
- "Remove dead machine instructions");
+INITIALIZE_PASS(DeadMachineInstructionElim, "dead-mi-elimination",
+ "Remove dead machine instructions", false, false);
FunctionPass *llvm::createDeadMachineInstructionElimPass() {
return new DeadMachineInstructionElim();
@@ -81,9 +80,8 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
TRI = MF.getTarget().getRegisterInfo();
TII = MF.getTarget().getInstrInfo();
- // Compute a bitvector to represent all non-allocatable physregs.
- BitVector NonAllocatableRegs = TRI->getAllocatableSet(MF);
- NonAllocatableRegs.flip();
+ // Treat reserved registers as always live.
+ BitVector ReservedRegs = TRI->getReservedRegs(MF);
// Loop over all instructions in all blocks, from bottom to top, so that it's
// more likely that chains of dependent but ultimately dead instructions will
@@ -92,9 +90,8 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
I != E; ++I) {
MachineBasicBlock *MBB = &*I;
- // Start out assuming that all non-allocatable registers are live
- // out of this block.
- LivePhysRegs = NonAllocatableRegs;
+ // Start out assuming that reserved registers are live out of this block.
+ LivePhysRegs = ReservedRegs;
// Also add any explicit live-out physregs for this block.
if (!MBB->empty() && MBB->back().getDesc().isReturn())
@@ -105,6 +102,10 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
LivePhysRegs.set(Reg);
}
+ // FIXME: Add live-ins from sucessors to LivePhysRegs. Normally, physregs
+ // are not live across blocks, but some targets (x86) can have flags live
+ // out of a block.
+
// Now scan the instructions and delete dead ones, tracking physreg
// liveness as we go.
for (MachineBasicBlock::reverse_iterator MII = MBB->rbegin(),
diff --git a/libclamav/c++/llvm/lib/CodeGen/DwarfEHPrepare.cpp b/libclamav/c++/llvm/lib/CodeGen/DwarfEHPrepare.cpp
index 39fc85e..550fd3e 100644
--- a/libclamav/c++/llvm/lib/CodeGen/DwarfEHPrepare.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/DwarfEHPrepare.cpp
@@ -8,44 +8,52 @@
//===----------------------------------------------------------------------===//
//
// This pass mulches exception handling code into a form adapted to code
-// generation. Required if using dwarf exception handling.
+// generation. Required if using dwarf exception handling.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "dwarfehprepare"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/CodeGen/Passes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/PromoteMemToReg.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
STATISTIC(NumLandingPadsSplit, "Number of landing pads split");
STATISTIC(NumUnwindsLowered, "Number of unwind instructions lowered");
STATISTIC(NumExceptionValuesMoved, "Number of eh.exception calls moved");
-STATISTIC(NumStackTempsIntroduced, "Number of stack temporaries introduced");
namespace {
class DwarfEHPrepare : public FunctionPass {
+ const TargetMachine *TM;
const TargetLowering *TLI;
- bool CompileFast;
// The eh.exception intrinsic.
Function *ExceptionValueIntrinsic;
+ // The eh.selector intrinsic.
+ Function *SelectorIntrinsic;
+
+ // _Unwind_Resume_or_Rethrow call.
+ Constant *URoR;
+
+ // The EH language-specific catch-all type.
+ GlobalVariable *EHCatchAllValue;
+
// _Unwind_Resume or the target equivalent.
Constant *RewindFunction;
- // Dominator info is used when turning stack temporaries into registers.
+ // We both use and preserve dominator info.
DominatorTree *DT;
- DominanceFrontier *DF;
// The function we are running on.
Function *F;
@@ -54,42 +62,52 @@ namespace {
typedef SmallPtrSet<BasicBlock*, 8> BBSet;
BBSet LandingPads;
- // Stack temporary used to hold eh.exception values.
- AllocaInst *ExceptionValueVar;
-
bool NormalizeLandingPads();
bool LowerUnwinds();
bool MoveExceptionValueCalls();
- bool FinishStackTemporaries();
- bool PromoteStackTemporaries();
Instruction *CreateExceptionValueCall(BasicBlock *BB);
- Instruction *CreateValueLoad(BasicBlock *BB);
-
- /// CreateReadOfExceptionValue - Return the result of the eh.exception
- /// intrinsic by calling the intrinsic if in a landing pad, or loading
- /// it from the exception value variable otherwise.
- Instruction *CreateReadOfExceptionValue(BasicBlock *BB) {
- return LandingPads.count(BB) ?
- CreateExceptionValueCall(BB) : CreateValueLoad(BB);
- }
+ /// CleanupSelectors - Any remaining eh.selector intrinsic calls which still
+ /// use the "llvm.eh.catch.all.value" call need to convert to using its
+ /// initializer instead.
+ bool CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels);
+
+ bool HasCatchAllInSelector(IntrinsicInst *);
+
+ /// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
+ void FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
+ SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels);
+
+ /// FindAllURoRInvokes - Find all URoR invokes in the function.
+ void FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes);
+
+ /// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow"
+ /// calls. The "unwind" part of these invokes jump to a landing pad within
+ /// the current function. This is a candidate to merge the selector
+ /// associated with the URoR invoke with the one from the URoR's landing
+ /// pad.
+ bool HandleURoRInvokes();
+
+ /// FindSelectorAndURoR - Find the eh.selector call and URoR call associated
+ /// with the eh.exception call. This recursively looks past instructions
+ /// which don't change the EH pointer value, like casts or PHI nodes.
+ bool FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
+ SmallPtrSet<IntrinsicInst*, 8> &SelCalls);
+
public:
static char ID; // Pass identification, replacement for typeid.
- DwarfEHPrepare(const TargetLowering *tli, bool fast) :
- FunctionPass(&ID), TLI(tli), CompileFast(fast),
- ExceptionValueIntrinsic(0), RewindFunction(0) {}
+ DwarfEHPrepare(const TargetMachine *tm) :
+ FunctionPass(ID), TM(tm), TLI(TM->getTargetLowering()),
+ ExceptionValueIntrinsic(0), SelectorIntrinsic(0),
+ URoR(0), EHCatchAllValue(0), RewindFunction(0) {}
virtual bool runOnFunction(Function &Fn);
- // getAnalysisUsage - We need dominance frontiers for memory promotion.
+ // getAnalysisUsage - We need the dominator tree for handling URoR.
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- if (!CompileFast)
- AU.addRequired<DominatorTree>();
+ AU.addRequired<DominatorTree>();
AU.addPreserved<DominatorTree>();
- if (!CompileFast)
- AU.addRequired<DominanceFrontier>();
- AU.addPreserved<DominanceFrontier>();
}
const char *getPassName() const {
@@ -101,8 +119,231 @@ namespace {
char DwarfEHPrepare::ID = 0;
-FunctionPass *llvm::createDwarfEHPass(const TargetLowering *tli, bool fast) {
- return new DwarfEHPrepare(tli, fast);
+FunctionPass *llvm::createDwarfEHPass(const TargetMachine *tm) {
+ return new DwarfEHPrepare(tm);
+}
+
+/// HasCatchAllInSelector - Return true if the intrinsic instruction has a
+/// catch-all.
+bool DwarfEHPrepare::HasCatchAllInSelector(IntrinsicInst *II) {
+ if (!EHCatchAllValue) return false;
+
+ unsigned ArgIdx = II->getNumArgOperands() - 1;
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(II->getArgOperand(ArgIdx));
+ return GV == EHCatchAllValue;
+}
+
+/// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
+void DwarfEHPrepare::
+FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
+ SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels) {
+ for (Value::use_iterator
+ I = SelectorIntrinsic->use_begin(),
+ E = SelectorIntrinsic->use_end(); I != E; ++I) {
+ IntrinsicInst *II = cast<IntrinsicInst>(*I);
+
+ if (II->getParent()->getParent() != F)
+ continue;
+
+ if (!HasCatchAllInSelector(II))
+ Sels.insert(II);
+ else
+ CatchAllSels.insert(II);
+ }
+}
+
+/// FindAllURoRInvokes - Find all URoR invokes in the function.
+void DwarfEHPrepare::
+FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes) {
+ for (Value::use_iterator
+ I = URoR->use_begin(),
+ E = URoR->use_end(); I != E; ++I) {
+ if (InvokeInst *II = dyn_cast<InvokeInst>(*I))
+ URoRInvokes.insert(II);
+ }
+}
+
+/// CleanupSelectors - Any remaining eh.selector intrinsic calls which still use
+/// the "llvm.eh.catch.all.value" call need to convert to using its
+/// initializer instead.
+bool DwarfEHPrepare::CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
+ if (!EHCatchAllValue) return false;
+
+ if (!SelectorIntrinsic) {
+ SelectorIntrinsic =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
+ if (!SelectorIntrinsic) return false;
+ }
+
+ bool Changed = false;
+ for (SmallPtrSet<IntrinsicInst*, 32>::iterator
+ I = Sels.begin(), E = Sels.end(); I != E; ++I) {
+ IntrinsicInst *Sel = *I;
+
+ // Index of the "llvm.eh.catch.all.value" variable.
+ unsigned OpIdx = Sel->getNumArgOperands() - 1;
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(Sel->getArgOperand(OpIdx));
+ if (GV != EHCatchAllValue) continue;
+ Sel->setArgOperand(OpIdx, EHCatchAllValue->getInitializer());
+ Changed = true;
+ }
+
+ return Changed;
+}
+
+/// FindSelectorAndURoR - Find the eh.selector call associated with the
+/// eh.exception call. And indicate if there is a URoR "invoke" associated with
+/// the eh.exception call. This recursively looks past instructions which don't
+/// change the EH pointer value, like casts or PHI nodes.
+bool
+DwarfEHPrepare::FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
+ SmallPtrSet<IntrinsicInst*, 8> &SelCalls) {
+ SmallPtrSet<PHINode*, 32> SeenPHIs;
+ bool Changed = false;
+
+ for (Value::use_iterator
+ I = Inst->use_begin(), E = Inst->use_end(); I != E; ++I) {
+ Instruction *II = dyn_cast<Instruction>(*I);
+ if (!II || II->getParent()->getParent() != F) continue;
+
+ if (IntrinsicInst *Sel = dyn_cast<IntrinsicInst>(II)) {
+ if (Sel->getIntrinsicID() == Intrinsic::eh_selector)
+ SelCalls.insert(Sel);
+ } else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(II)) {
+ if (Invoke->getCalledFunction() == URoR)
+ URoRInvoke = true;
+ } else if (CastInst *CI = dyn_cast<CastInst>(II)) {
+ Changed |= FindSelectorAndURoR(CI, URoRInvoke, SelCalls);
+ } else if (PHINode *PN = dyn_cast<PHINode>(II)) {
+ if (SeenPHIs.insert(PN))
+ // Don't process a PHI node more than once.
+ Changed |= FindSelectorAndURoR(PN, URoRInvoke, SelCalls);
+ }
+ }
+
+ return Changed;
+}
+
+/// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow" calls. The
+/// "unwind" part of these invokes jump to a landing pad within the current
+/// function. This is a candidate to merge the selector associated with the URoR
+/// invoke with the one from the URoR's landing pad.
+bool DwarfEHPrepare::HandleURoRInvokes() {
+ if (!EHCatchAllValue) {
+ EHCatchAllValue =
+ F->getParent()->getNamedGlobal("llvm.eh.catch.all.value");
+ if (!EHCatchAllValue) return false;
+ }
+
+ if (!SelectorIntrinsic) {
+ SelectorIntrinsic =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
+ if (!SelectorIntrinsic) return false;
+ }
+
+ SmallPtrSet<IntrinsicInst*, 32> Sels;
+ SmallPtrSet<IntrinsicInst*, 32> CatchAllSels;
+ FindAllCleanupSelectors(Sels, CatchAllSels);
+
+ if (!URoR) {
+ URoR = F->getParent()->getFunction("_Unwind_Resume_or_Rethrow");
+ if (!URoR) return CleanupSelectors(CatchAllSels);
+ }
+
+ SmallPtrSet<InvokeInst*, 32> URoRInvokes;
+ FindAllURoRInvokes(URoRInvokes);
+
+ SmallPtrSet<IntrinsicInst*, 32> SelsToConvert;
+
+ for (SmallPtrSet<IntrinsicInst*, 32>::iterator
+ SI = Sels.begin(), SE = Sels.end(); SI != SE; ++SI) {
+ const BasicBlock *SelBB = (*SI)->getParent();
+ for (SmallPtrSet<InvokeInst*, 32>::iterator
+ UI = URoRInvokes.begin(), UE = URoRInvokes.end(); UI != UE; ++UI) {
+ const BasicBlock *URoRBB = (*UI)->getParent();
+ if (DT->dominates(SelBB, URoRBB)) {
+ SelsToConvert.insert(*SI);
+ break;
+ }
+ }
+ }
+
+ bool Changed = false;
+
+ if (Sels.size() != SelsToConvert.size()) {
+ // If we haven't been able to convert all of the clean-up selectors, then
+ // loop through the slow way to see if they still need to be converted.
+ if (!ExceptionValueIntrinsic) {
+ ExceptionValueIntrinsic =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_exception);
+ if (!ExceptionValueIntrinsic)
+ return CleanupSelectors(CatchAllSels);
+ }
+
+ for (Value::use_iterator
+ I = ExceptionValueIntrinsic->use_begin(),
+ E = ExceptionValueIntrinsic->use_end(); I != E; ++I) {
+ IntrinsicInst *EHPtr = dyn_cast<IntrinsicInst>(*I);
+ if (!EHPtr || EHPtr->getParent()->getParent() != F) continue;
+
+ bool URoRInvoke = false;
+ SmallPtrSet<IntrinsicInst*, 8> SelCalls;
+ Changed |= FindSelectorAndURoR(EHPtr, URoRInvoke, SelCalls);
+
+ if (URoRInvoke) {
+ // This EH pointer is being used by an invoke of an URoR instruction and
+ // an eh.selector intrinsic call. If the eh.selector is a 'clean-up', we
+ // need to convert it to a 'catch-all'.
+ for (SmallPtrSet<IntrinsicInst*, 8>::iterator
+ SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI)
+ if (!HasCatchAllInSelector(*SI))
+ SelsToConvert.insert(*SI);
+ }
+ }
+ }
+
+ if (!SelsToConvert.empty()) {
+ // Convert all clean-up eh.selectors, which are associated with "invokes" of
+ // URoR calls, into catch-all eh.selectors.
+ Changed = true;
+
+ for (SmallPtrSet<IntrinsicInst*, 8>::iterator
+ SI = SelsToConvert.begin(), SE = SelsToConvert.end();
+ SI != SE; ++SI) {
+ IntrinsicInst *II = *SI;
+
+ // Use the exception object pointer and the personality function
+ // from the original selector.
+ CallSite CS(II);
+ IntrinsicInst::op_iterator I = CS.arg_begin();
+ IntrinsicInst::op_iterator E = CS.arg_end();
+ IntrinsicInst::op_iterator B = prior(E);
+
+ // Exclude last argument if it is an integer.
+ if (isa<ConstantInt>(B)) E = B;
+
+ // Add exception object pointer (front).
+ // Add personality function (next).
+ // Add in any filter IDs (rest).
+ SmallVector<Value*, 8> Args(I, E);
+
+ Args.push_back(EHCatchAllValue->getInitializer()); // Catch-all indicator.
+
+ CallInst *NewSelector =
+ CallInst::Create(SelectorIntrinsic, Args.begin(), Args.end(),
+ "eh.sel.catch.all", II);
+
+ NewSelector->setTailCall(II->isTailCall());
+ NewSelector->setAttributes(II->getAttributes());
+ NewSelector->setCallingConv(II->getCallingConv());
+
+ II->replaceAllUsesWith(NewSelector);
+ II->eraseFromParent();
+ }
+ }
+
+ Changed |= CleanupSelectors(CatchAllSels);
+ return Changed;
}
/// NormalizeLandingPads - Normalize and discover landing pads, noting them
@@ -115,7 +356,7 @@ FunctionPass *llvm::createDwarfEHPass(const TargetLowering *tli, bool fast) {
bool DwarfEHPrepare::NormalizeLandingPads() {
bool Changed = false;
- const MCAsmInfo *MAI = TLI->getTargetMachine().getMCAsmInfo();
+ const MCAsmInfo *MAI = TM->getMCAsmInfo();
bool usingSjLjEH = MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;
for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
@@ -212,11 +453,8 @@ bool DwarfEHPrepare::NormalizeLandingPads() {
// Add a fallthrough from NewBB to the original landing pad.
BranchInst::Create(LPad, NewBB);
- // Now update DominatorTree and DominanceFrontier analysis information.
- if (DT)
- DT->splitBlock(NewBB);
- if (DF)
- DF->splitBlock(NewBB);
+ // Now update DominatorTree analysis information.
+ DT->splitBlock(NewBB);
// Remember the newly constructed landing pad. The original landing pad
// LPad is no longer a landing pad now that all unwind edges have been
@@ -266,7 +504,7 @@ bool DwarfEHPrepare::LowerUnwinds() {
// Create the call...
CallInst *CI = CallInst::Create(RewindFunction,
- CreateReadOfExceptionValue(TI->getParent()),
+ CreateExceptionValueCall(TI->getParent()),
"", TI);
CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
// ...followed by an UnreachableInst.
@@ -282,9 +520,11 @@ bool DwarfEHPrepare::LowerUnwinds() {
}
/// MoveExceptionValueCalls - Ensure that eh.exception is only ever called from
-/// landing pads by replacing calls outside of landing pads with loads from a
-/// stack temporary. Move eh.exception calls inside landing pads to the start
-/// of the landing pad (optional, but may make things simpler for later passes).
+/// landing pads by replacing calls outside of landing pads with direct use of
+/// a register holding the appropriate value; this requires adding calls inside
+/// all landing pads to initialize the register. Also, move eh.exception calls
+/// inside landing pads to the start of the landing pad (optional, but may make
+/// things simpler for later passes).
bool DwarfEHPrepare::MoveExceptionValueCalls() {
// If the eh.exception intrinsic is not declared in the module then there is
// nothing to do. Speed up compilation by checking for this common case.
@@ -294,68 +534,94 @@ bool DwarfEHPrepare::MoveExceptionValueCalls() {
bool Changed = false;
- for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
- for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
- if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
- if (CI->getIntrinsicID() == Intrinsic::eh_exception) {
- if (!CI->use_empty()) {
- Value *ExceptionValue = CreateReadOfExceptionValue(BB);
- if (CI == ExceptionValue) {
- // The call was at the start of a landing pad - leave it alone.
- assert(LandingPads.count(BB) &&
- "Created eh.exception call outside landing pad!");
- continue;
- }
- CI->replaceAllUsesWith(ExceptionValue);
- }
- CI->eraseFromParent();
- ++NumExceptionValuesMoved;
- Changed = true;
+ // Move calls to eh.exception that are inside a landing pad to the start of
+ // the landing pad.
+ for (BBSet::const_iterator LI = LandingPads.begin(), LE = LandingPads.end();
+ LI != LE; ++LI) {
+ BasicBlock *LP = *LI;
+ for (BasicBlock::iterator II = LP->getFirstNonPHIOrDbg(), IE = LP->end();
+ II != IE;)
+ if (EHExceptionInst *EI = dyn_cast<EHExceptionInst>(II++)) {
+ // Found a call to eh.exception.
+ if (!EI->use_empty()) {
+ // If there is already a call to eh.exception at the start of the
+ // landing pad, then get hold of it; otherwise create such a call.
+ Value *CallAtStart = CreateExceptionValueCall(LP);
+
+ // If the call was at the start of a landing pad then leave it alone.
+ if (EI == CallAtStart)
+ continue;
+ EI->replaceAllUsesWith(CallAtStart);
}
+ EI->eraseFromParent();
+ ++NumExceptionValuesMoved;
+ Changed = true;
+ }
}
- return Changed;
-}
-
-/// FinishStackTemporaries - If we introduced a stack variable to hold the
-/// exception value then initialize it in each landing pad.
-bool DwarfEHPrepare::FinishStackTemporaries() {
- if (!ExceptionValueVar)
- // Nothing to do.
- return false;
+ // Look for calls to eh.exception that are not in a landing pad. If one is
+ // found, then a register that holds the exception value will be created in
+ // each landing pad, and the SSAUpdater will be used to compute the values
+ // returned by eh.exception calls outside of landing pads.
+ SSAUpdater SSA;
+
+ // Remember where we found the eh.exception call, to avoid rescanning earlier
+ // basic blocks which we already know contain no eh.exception calls.
+ bool FoundCallOutsideLandingPad = false;
+ Function::iterator BB = F->begin();
+ for (Function::iterator BE = F->end(); BB != BE; ++BB) {
+ // Skip over landing pads.
+ if (LandingPads.count(BB))
+ continue;
- bool Changed = false;
+ for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
+ II != IE; ++II)
+ if (isa<EHExceptionInst>(II)) {
+ SSA.Initialize(II->getType(), II->getName());
+ FoundCallOutsideLandingPad = true;
+ break;
+ }
- // Make sure that there is a store of the exception value at the start of
- // each landing pad.
- for (BBSet::iterator LI = LandingPads.begin(), LE = LandingPads.end();
- LI != LE; ++LI) {
- Instruction *ExceptionValue = CreateReadOfExceptionValue(*LI);
- Instruction *Store = new StoreInst(ExceptionValue, ExceptionValueVar);
- Store->insertAfter(ExceptionValue);
- Changed = true;
+ if (FoundCallOutsideLandingPad)
+ break;
}
- return Changed;
-}
+ // If all calls to eh.exception are in landing pads then we are done.
+ if (!FoundCallOutsideLandingPad)
+ return Changed;
+
+ // Add a call to eh.exception at the start of each landing pad, and tell the
+ // SSAUpdater that this is the value produced by the landing pad.
+ for (BBSet::iterator LI = LandingPads.begin(), LE = LandingPads.end();
+ LI != LE; ++LI)
+ SSA.AddAvailableValue(*LI, CreateExceptionValueCall(*LI));
+
+ // Now turn all calls to eh.exception that are not in a landing pad into a use
+ // of the appropriate register.
+ for (Function::iterator BE = F->end(); BB != BE; ++BB) {
+ // Skip over landing pads.
+ if (LandingPads.count(BB))
+ continue;
-/// PromoteStackTemporaries - Turn any stack temporaries we introduced into
-/// registers if possible.
-bool DwarfEHPrepare::PromoteStackTemporaries() {
- if (ExceptionValueVar && DT && DF && isAllocaPromotable(ExceptionValueVar)) {
- // Turn the exception temporary into registers and phi nodes if possible.
- std::vector<AllocaInst*> Allocas(1, ExceptionValueVar);
- PromoteMemToReg(Allocas, *DT, *DF);
- return true;
+ for (BasicBlock::iterator II = BB->getFirstNonPHIOrDbg(), IE = BB->end();
+ II != IE;)
+ if (EHExceptionInst *EI = dyn_cast<EHExceptionInst>(II++)) {
+ // Found a call to eh.exception, replace it with the value from any
+ // upstream landing pad(s).
+ EI->replaceAllUsesWith(SSA.GetValueAtEndOfBlock(BB));
+ EI->eraseFromParent();
+ ++NumExceptionValuesMoved;
+ }
}
- return false;
+
+ return true;
}
/// CreateExceptionValueCall - Insert a call to the eh.exception intrinsic at
/// the start of the basic block (unless there already is one, in which case
/// the existing call is returned).
Instruction *DwarfEHPrepare::CreateExceptionValueCall(BasicBlock *BB) {
- Instruction *Start = BB->getFirstNonPHI();
+ Instruction *Start = BB->getFirstNonPHIOrDbg();
// Is this a call to eh.exception?
if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Start))
if (CI->getIntrinsicID() == Intrinsic::eh_exception)
@@ -371,36 +637,11 @@ Instruction *DwarfEHPrepare::CreateExceptionValueCall(BasicBlock *BB) {
return CallInst::Create(ExceptionValueIntrinsic, "eh.value.call", Start);
}
-/// CreateValueLoad - Insert a load of the exception value stack variable
-/// (creating it if necessary) at the start of the basic block (unless
-/// there already is a load, in which case the existing load is returned).
-Instruction *DwarfEHPrepare::CreateValueLoad(BasicBlock *BB) {
- Instruction *Start = BB->getFirstNonPHI();
- // Is this a load of the exception temporary?
- if (ExceptionValueVar)
- if (LoadInst* LI = dyn_cast<LoadInst>(Start))
- if (LI->getPointerOperand() == ExceptionValueVar)
- // Reuse the existing load.
- return Start;
-
- // Create the temporary if we didn't already.
- if (!ExceptionValueVar) {
- ExceptionValueVar = new AllocaInst(PointerType::getUnqual(
- Type::getInt8Ty(BB->getContext())), "eh.value", F->begin()->begin());
- ++NumStackTempsIntroduced;
- }
-
- // Load the value.
- return new LoadInst(ExceptionValueVar, "eh.value.load", Start);
-}
-
bool DwarfEHPrepare::runOnFunction(Function &Fn) {
bool Changed = false;
// Initialize internal state.
- DT = getAnalysisIfAvailable<DominatorTree>();
- DF = getAnalysisIfAvailable<DominanceFrontier>();
- ExceptionValueVar = 0;
+ DT = &getAnalysis<DominatorTree>();
F = &Fn;
// Ensure that only unwind edges end at landing pads (a landing pad is a
@@ -415,12 +656,7 @@ bool DwarfEHPrepare::runOnFunction(Function &Fn) {
// Move eh.exception calls to landing pads.
Changed |= MoveExceptionValueCalls();
- // Initialize any stack temporaries we introduced.
- Changed |= FinishStackTemporaries();
-
- // Turn any stack temporaries into registers if possible.
- if (!CompileFast)
- Changed |= PromoteStackTemporaries();
+ Changed |= HandleURoRInvokes();
LandingPads.clear();
diff --git a/libclamav/c++/llvm/lib/CodeGen/ELF.h b/libclamav/c++/llvm/lib/CodeGen/ELF.h
index cb5a8c0..fb884c9 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ELF.h
+++ b/libclamav/c++/llvm/lib/CodeGen/ELF.h
@@ -22,36 +22,12 @@
#include "llvm/CodeGen/BinaryObject.h"
#include "llvm/CodeGen/MachineRelocation.h"
+#include "llvm/Support/ELF.h"
#include "llvm/System/DataTypes.h"
namespace llvm {
class GlobalValue;
- // Identification Indexes
- enum {
- EI_MAG0 = 0,
- EI_MAG1 = 1,
- EI_MAG2 = 2,
- EI_MAG3 = 3
- };
-
- // File types
- enum {
- ET_NONE = 0, // No file type
- ET_REL = 1, // Relocatable file
- ET_EXEC = 2, // Executable file
- ET_DYN = 3, // Shared object file
- ET_CORE = 4, // Core file
- ET_LOPROC = 0xff00, // Beginning of processor-specific codes
- ET_HIPROC = 0xffff // Processor-specific
- };
-
- // Versioning
- enum {
- EV_NONE = 0,
- EV_CURRENT = 1
- };
-
/// ELFSym - This struct contains information about each symbol that is
/// added to logical symbol table for the module. This is eventually
/// turned into a real symbol table in the file.
@@ -108,9 +84,9 @@ namespace llvm {
static ELFSym *getExtSym(const char *Ext) {
ELFSym *Sym = new ELFSym();
Sym->Source.Ext = Ext;
- Sym->setBind(STB_GLOBAL);
- Sym->setType(STT_NOTYPE);
- Sym->setVisibility(STV_DEFAULT);
+ Sym->setBind(ELF::STB_GLOBAL);
+ Sym->setType(ELF::STT_NOTYPE);
+ Sym->setVisibility(ELF::STV_DEFAULT);
Sym->SourceType = isExtSym;
return Sym;
}
@@ -118,9 +94,9 @@ namespace llvm {
// getSectionSym - Returns a elf symbol to represent an elf section
static ELFSym *getSectionSym() {
ELFSym *Sym = new ELFSym();
- Sym->setBind(STB_LOCAL);
- Sym->setType(STT_SECTION);
- Sym->setVisibility(STV_DEFAULT);
+ Sym->setBind(ELF::STB_LOCAL);
+ Sym->setType(ELF::STT_SECTION);
+ Sym->setVisibility(ELF::STV_DEFAULT);
Sym->SourceType = isOther;
return Sym;
}
@@ -128,9 +104,9 @@ namespace llvm {
// getFileSym - Returns a elf symbol to represent the module identifier
static ELFSym *getFileSym() {
ELFSym *Sym = new ELFSym();
- Sym->setBind(STB_LOCAL);
- Sym->setType(STT_FILE);
- Sym->setVisibility(STV_DEFAULT);
+ Sym->setBind(ELF::STB_LOCAL);
+ Sym->setType(ELF::STT_FILE);
+ Sym->setVisibility(ELF::STV_DEFAULT);
Sym->SectionIdx = 0xfff1; // ELFSection::SHN_ABS;
Sym->SourceType = isOther;
return Sym;
@@ -141,8 +117,8 @@ namespace llvm {
ELFSym *Sym = new ELFSym();
Sym->Source.GV = GV;
Sym->setBind(Bind);
- Sym->setType(STT_NOTYPE);
- Sym->setVisibility(STV_DEFAULT);
+ Sym->setType(ELF::STT_NOTYPE);
+ Sym->setVisibility(ELF::STV_DEFAULT);
Sym->SectionIdx = 0; //ELFSection::SHN_UNDEF;
Sym->SourceType = isGV;
return Sym;
@@ -159,35 +135,14 @@ namespace llvm {
// Symbol index into the Symbol table
unsigned SymTabIdx;
- enum {
- STB_LOCAL = 0, // Local sym, not visible outside obj file containing def
- STB_GLOBAL = 1, // Global sym, visible to all object files being combined
- STB_WEAK = 2 // Weak symbol, like global but lower-precedence
- };
-
- enum {
- STT_NOTYPE = 0, // Symbol's type is not specified
- STT_OBJECT = 1, // Symbol is a data object (variable, array, etc.)
- STT_FUNC = 2, // Symbol is executable code (function, etc.)
- STT_SECTION = 3, // Symbol refers to a section
- STT_FILE = 4 // Local, absolute symbol that refers to a file
- };
-
- enum {
- STV_DEFAULT = 0, // Visibility is specified by binding type
- STV_INTERNAL = 1, // Defined by processor supplements
- STV_HIDDEN = 2, // Not visible to other components
- STV_PROTECTED = 3 // Visible in other components but not preemptable
- };
-
ELFSym() : SourceType(isOther), NameIdx(0), Value(0),
- Size(0), Info(0), Other(STV_DEFAULT), SectionIdx(0),
+ Size(0), Info(0), Other(ELF::STV_DEFAULT), SectionIdx(0),
SymTabIdx(0) {}
unsigned getBind() const { return (Info >> 4) & 0xf; }
unsigned getType() const { return Info & 0xf; }
- bool isLocalBind() const { return getBind() == STB_LOCAL; }
- bool isFileType() const { return getType() == STT_FILE; }
+ bool isLocalBind() const { return getBind() == ELF::STB_LOCAL; }
+ bool isFileType() const { return getType() == ELF::STT_FILE; }
void setBind(unsigned X) {
assert(X == (X & 0xF) && "Bind value out of range!");
@@ -222,51 +177,6 @@ namespace llvm {
unsigned Align; // sh_addralign - Alignment of section.
unsigned EntSize; // sh_entsize - Size of entries in the section e
- // Section Header Flags
- enum {
- SHF_WRITE = 1 << 0, // Writable
- SHF_ALLOC = 1 << 1, // Mapped into the process addr space
- SHF_EXECINSTR = 1 << 2, // Executable
- SHF_MERGE = 1 << 4, // Might be merged if equal
- SHF_STRINGS = 1 << 5, // Contains null-terminated strings
- SHF_INFO_LINK = 1 << 6, // 'sh_info' contains SHT index
- SHF_LINK_ORDER = 1 << 7, // Preserve order after combining
- SHF_OS_NONCONFORMING = 1 << 8, // nonstandard OS support required
- SHF_GROUP = 1 << 9, // Section is a member of a group
- SHF_TLS = 1 << 10 // Section holds thread-local data
- };
-
- // Section Types
- enum {
- SHT_NULL = 0, // No associated section (inactive entry).
- SHT_PROGBITS = 1, // Program-defined contents.
- SHT_SYMTAB = 2, // Symbol table.
- SHT_STRTAB = 3, // String table.
- SHT_RELA = 4, // Relocation entries; explicit addends.
- SHT_HASH = 5, // Symbol hash table.
- SHT_DYNAMIC = 6, // Information for dynamic linking.
- SHT_NOTE = 7, // Information about the file.
- SHT_NOBITS = 8, // Data occupies no space in the file.
- SHT_REL = 9, // Relocation entries; no explicit addends.
- SHT_SHLIB = 10, // Reserved.
- SHT_DYNSYM = 11, // Symbol table.
- SHT_LOPROC = 0x70000000, // Lowest processor arch-specific type.
- SHT_HIPROC = 0x7fffffff, // Highest processor arch-specific type.
- SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
- SHT_HIUSER = 0xffffffff // Highest type reserved for applications.
- };
-
- // Special section indices.
- enum {
- SHN_UNDEF = 0, // Undefined, missing, irrelevant
- SHN_LORESERVE = 0xff00, // Lowest reserved index
- SHN_LOPROC = 0xff00, // Lowest processor-specific index
- SHN_HIPROC = 0xff1f, // Highest processor-specific index
- SHN_ABS = 0xfff1, // Symbol has absolute value; no relocation
- SHN_COMMON = 0xfff2, // FORTRAN COMMON or C external global variables
- SHN_HIRESERVE = 0xffff // Highest reserved index
- };
-
/// SectionIdx - The number of the section in the Section Table.
unsigned short SectionIdx;
diff --git a/libclamav/c++/llvm/lib/CodeGen/ELFCodeEmitter.cpp b/libclamav/c++/llvm/lib/CodeGen/ELFCodeEmitter.cpp
index 8416d3b..3fb087c 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ELFCodeEmitter.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/ELFCodeEmitter.cpp
@@ -71,7 +71,7 @@ void ELFCodeEmitter::startFunction(MachineFunction &MF) {
bool ELFCodeEmitter::finishFunction(MachineFunction &MF) {
// Add a symbol to represent the function.
const Function *F = MF.getFunction();
- ELFSym *FnSym = ELFSym::getGV(F, EW.getGlobalELFBinding(F), ELFSym::STT_FUNC,
+ ELFSym *FnSym = ELFSym::getGV(F, EW.getGlobalELFBinding(F), ELF::STT_FUNC,
EW.getGlobalELFVisibility(F));
FnSym->SectionIdx = ES->SectionIdx;
FnSym->Size = ES->getCurrentPCOffset()-FnStartOff;
@@ -90,7 +90,7 @@ bool ELFCodeEmitter::finishFunction(MachineFunction &MF) {
for (std::vector<MachineRelocation>::iterator MRI = JTRelocations.begin(),
MRE = JTRelocations.end(); MRI != MRE; ++MRI) {
MachineRelocation &MR = *MRI;
- unsigned MBBOffset = getMachineBasicBlockAddress(MR.getBasicBlock());
+ uintptr_t MBBOffset = getMachineBasicBlockAddress(MR.getBasicBlock());
MR.setResultPointer((void*)MBBOffset);
MR.setConstantVal(ES->SectionIdx);
JTSection.addRelocation(MR);
diff --git a/libclamav/c++/llvm/lib/CodeGen/ELFCodeEmitter.h b/libclamav/c++/llvm/lib/CodeGen/ELFCodeEmitter.h
index b5e9c84..2ec1f6e 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ELFCodeEmitter.h
+++ b/libclamav/c++/llvm/lib/CodeGen/ELFCodeEmitter.h
@@ -57,13 +57,13 @@ namespace llvm {
bool finishFunction(MachineFunction &F);
/// emitLabel - Emits a label
- virtual void emitLabel(uint64_t LabelID) {
+ virtual void emitLabel(MCSymbol *Label) {
assert("emitLabel not implemented");
}
/// getLabelAddress - Return the address of the specified LabelID,
/// only usable after the LabelID has been emitted.
- virtual uintptr_t getLabelAddress(uint64_t Label) const {
+ virtual uintptr_t getLabelAddress(MCSymbol *Label) const {
assert("getLabelAddress not implemented");
return 0;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/ELFWriter.cpp b/libclamav/c++/llvm/lib/CodeGen/ELFWriter.cpp
index 0979c04..d14728d 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ELFWriter.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/ELFWriter.cpp
@@ -63,8 +63,8 @@ char ELFWriter::ID = 0;
//===----------------------------------------------------------------------===//
ELFWriter::ELFWriter(raw_ostream &o, TargetMachine &tm)
- : MachineFunctionPass(&ID), O(o), TM(tm),
- OutContext(*new MCContext()),
+ : MachineFunctionPass(ID), O(o), TM(tm),
+ OutContext(*new MCContext(*TM.getMCAsmInfo())),
TLOF(TM.getTargetLowering()->getObjFileLowering()),
is64Bit(TM.getTargetData()->getPointerSizeInBits() == 64),
isLittleEndian(TM.getTargetData()->isLittleEndian()),
@@ -109,7 +109,7 @@ bool ELFWriter::doInitialization(Module &M) {
// Initialize TargetLoweringObjectFile.
const_cast<TargetLoweringObjectFile&>(TLOF).Initialize(OutContext, TM);
- Mang = new Mangler(*MAI);
+ Mang = new Mangler(OutContext, *TM.getTargetData());
// ELF Header
// ----------
@@ -129,12 +129,12 @@ bool ELFWriter::doInitialization(Module &M) {
ElfHdr.emitByte(TEW->getEIClass()); // e_ident[EI_CLASS]
ElfHdr.emitByte(TEW->getEIData()); // e_ident[EI_DATA]
- ElfHdr.emitByte(EV_CURRENT); // e_ident[EI_VERSION]
+ ElfHdr.emitByte(ELF::EV_CURRENT); // e_ident[EI_VERSION]
ElfHdr.emitAlignment(16); // e_ident[EI_NIDENT-EI_PAD]
- ElfHdr.emitWord16(ET_REL); // e_type
+ ElfHdr.emitWord16(ELF::ET_REL); // e_type
ElfHdr.emitWord16(TEW->getEMachine()); // e_machine = target
- ElfHdr.emitWord32(EV_CURRENT); // e_version
+ ElfHdr.emitWord32(ELF::EV_CURRENT); // e_version
ElfHdr.emitWord(0); // e_entry, no entry point in .o file
ElfHdr.emitWord(0); // e_phoff, no program header for .o
ELFHdr_e_shoff_Offset = ElfHdr.size();
@@ -208,7 +208,7 @@ ELFSection &ELFWriter::getDtorSection() {
}
// getTextSection - Get the text section for the specified function
-ELFSection &ELFWriter::getTextSection(Function *F) {
+ELFSection &ELFWriter::getTextSection(const Function *F) {
const MCSectionELF *Text =
(const MCSectionELF *)TLOF.SectionForGlobal(F, Mang, TM);
return getSection(Text->getSectionName(), Text->getType(), Text->getFlags());
@@ -252,7 +252,7 @@ ELFSection &ELFWriter::getConstantPoolSection(MachineConstantPoolEntry &CPE) {
// is true if the relocation section contains entries with addends.
ELFSection &ELFWriter::getRelocSection(ELFSection &S) {
unsigned SectionType = TEW->hasRelocationAddend() ?
- ELFSection::SHT_RELA : ELFSection::SHT_REL;
+ ELF::SHT_RELA : ELF::SHT_REL;
std::string SectionName(".rel");
if (TEW->hasRelocationAddend())
@@ -268,11 +268,11 @@ unsigned ELFWriter::getGlobalELFVisibility(const GlobalValue *GV) {
default:
llvm_unreachable("unknown visibility type");
case GlobalValue::DefaultVisibility:
- return ELFSym::STV_DEFAULT;
+ return ELF::STV_DEFAULT;
case GlobalValue::HiddenVisibility:
- return ELFSym::STV_HIDDEN;
+ return ELF::STV_HIDDEN;
case GlobalValue::ProtectedVisibility:
- return ELFSym::STV_PROTECTED;
+ return ELF::STV_PROTECTED;
}
return 0;
}
@@ -280,23 +280,23 @@ unsigned ELFWriter::getGlobalELFVisibility(const GlobalValue *GV) {
// getGlobalELFBinding - Returns the ELF specific binding type
unsigned ELFWriter::getGlobalELFBinding(const GlobalValue *GV) {
if (GV->hasInternalLinkage())
- return ELFSym::STB_LOCAL;
+ return ELF::STB_LOCAL;
if (GV->isWeakForLinker() && !GV->hasCommonLinkage())
- return ELFSym::STB_WEAK;
+ return ELF::STB_WEAK;
- return ELFSym::STB_GLOBAL;
+ return ELF::STB_GLOBAL;
}
// getGlobalELFType - Returns the ELF specific type for a global
unsigned ELFWriter::getGlobalELFType(const GlobalValue *GV) {
if (GV->isDeclaration())
- return ELFSym::STT_NOTYPE;
+ return ELF::STT_NOTYPE;
if (isa<Function>(GV))
- return ELFSym::STT_FUNC;
+ return ELF::STT_FUNC;
- return ELFSym::STT_OBJECT;
+ return ELF::STT_OBJECT;
}
// IsELFUndefSym - True if the global value must be marked as a symbol
@@ -364,7 +364,7 @@ void ELFWriter::EmitGlobal(const GlobalValue *GV) {
GblSym->Size = Size;
if (S->HasCommonSymbols()) { // Symbol must go to a common section
- GblSym->SectionIdx = ELFSection::SHN_COMMON;
+ GblSym->SectionIdx = ELF::SHN_COMMON;
// A new linkonce section is created for each global in the
// common section, the default alignment is 1 and the symbol
@@ -507,7 +507,7 @@ void ELFWriter::EmitGlobalConstant(const Constant *CV, ELFSection &GblS) {
std::string msg;
raw_string_ostream ErrorMsg(msg);
ErrorMsg << "Constant unimp for type: " << *CV->getType();
- llvm_report_error(ErrorMsg.str());
+ report_fatal_error(ErrorMsg.str());
}
// ResolveConstantExpr - Resolve the constant expression until it stop
@@ -572,10 +572,8 @@ CstExprResTy ELFWriter::ResolveConstantExpr(const Constant *CV) {
}
}
- std::string msg(CE->getOpcodeName());
- raw_string_ostream ErrorMsg(msg);
- ErrorMsg << ": Unsupported ConstantExpr type";
- llvm_report_error(ErrorMsg.str());
+ report_fatal_error(CE->getOpcodeName() +
+ StringRef(": Unsupported ConstantExpr type"));
return std::make_pair(CV, 0); // silence warning
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/ELFWriter.h b/libclamav/c++/llvm/lib/CodeGen/ELFWriter.h
index b61b484..b8bac55 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ELFWriter.h
+++ b/libclamav/c++/llvm/lib/CodeGen/ELFWriter.h
@@ -39,6 +39,7 @@ namespace llvm {
class raw_ostream;
class SectionKind;
class MCContext;
+ class TargetMachine;
typedef std::vector<ELFSym*>::iterator ELFSymIter;
typedef std::vector<ELFSection*>::iterator ELFSectionIter;
@@ -160,29 +161,29 @@ namespace llvm {
SN->SectionIdx = NumSections++;
SN->Type = Type;
SN->Flags = Flags;
- SN->Link = ELFSection::SHN_UNDEF;
+ SN->Link = ELF::SHN_UNDEF;
SN->Align = Align;
return *SN;
}
ELFSection &getNonExecStackSection() {
- return getSection(".note.GNU-stack", ELFSection::SHT_PROGBITS, 0, 1);
+ return getSection(".note.GNU-stack", ELF::SHT_PROGBITS, 0, 1);
}
ELFSection &getSymbolTableSection() {
- return getSection(".symtab", ELFSection::SHT_SYMTAB, 0);
+ return getSection(".symtab", ELF::SHT_SYMTAB, 0);
}
ELFSection &getStringTableSection() {
- return getSection(".strtab", ELFSection::SHT_STRTAB, 0, 1);
+ return getSection(".strtab", ELF::SHT_STRTAB, 0, 1);
}
ELFSection &getSectionHeaderStringTableSection() {
- return getSection(".shstrtab", ELFSection::SHT_STRTAB, 0, 1);
+ return getSection(".shstrtab", ELF::SHT_STRTAB, 0, 1);
}
ELFSection &getNullSection() {
- return getSection("", ELFSection::SHT_NULL, 0);
+ return getSection("", ELF::SHT_NULL, 0);
}
ELFSection &getDataSection();
@@ -191,7 +192,7 @@ namespace llvm {
ELFSection &getDtorSection();
ELFSection &getJumpTableSection();
ELFSection &getConstantPoolSection(MachineConstantPoolEntry &CPE);
- ELFSection &getTextSection(Function *F);
+ ELFSection &getTextSection(const Function *F);
ELFSection &getRelocSection(ELFSection &S);
// Helpers for obtaining ELF specific info.
diff --git a/libclamav/c++/llvm/lib/CodeGen/ExactHazardRecognizer.cpp b/libclamav/c++/llvm/lib/CodeGen/ExactHazardRecognizer.cpp
deleted file mode 100644
index 61959bb..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/ExactHazardRecognizer.cpp
+++ /dev/null
@@ -1,161 +0,0 @@
-//===----- ExactHazardRecognizer.cpp - hazard recognizer -------- ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This implements a hazard recognizer using the instructions itineraries
-// defined for the current target.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "post-RA-sched"
-#include "ExactHazardRecognizer.h"
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetInstrItineraries.h"
-
-using namespace llvm;
-
-ExactHazardRecognizer::
-ExactHazardRecognizer(const InstrItineraryData &LItinData) :
- ScheduleHazardRecognizer(), ItinData(LItinData)
-{
- // Determine the maximum depth of any itinerary. This determines the
- // depth of the scoreboard. We always make the scoreboard at least 1
- // cycle deep to avoid dealing with the boundary condition.
- ScoreboardDepth = 1;
- if (!ItinData.isEmpty()) {
- for (unsigned idx = 0; ; ++idx) {
- if (ItinData.isEndMarker(idx))
- break;
-
- const InstrStage *IS = ItinData.beginStage(idx);
- const InstrStage *E = ItinData.endStage(idx);
- unsigned ItinDepth = 0;
- for (; IS != E; ++IS)
- ItinDepth += IS->getCycles();
-
- ScoreboardDepth = std::max(ScoreboardDepth, ItinDepth);
- }
- }
-
- Scoreboard = new unsigned[ScoreboardDepth];
- ScoreboardHead = 0;
-
- DEBUG(dbgs() << "Using exact hazard recognizer: ScoreboardDepth = "
- << ScoreboardDepth << '\n');
-}
-
-ExactHazardRecognizer::~ExactHazardRecognizer() {
- delete [] Scoreboard;
-}
-
-void ExactHazardRecognizer::Reset() {
- memset(Scoreboard, 0, ScoreboardDepth * sizeof(unsigned));
- ScoreboardHead = 0;
-}
-
-unsigned ExactHazardRecognizer::getFutureIndex(unsigned offset) {
- return (ScoreboardHead + offset) % ScoreboardDepth;
-}
-
-void ExactHazardRecognizer::dumpScoreboard() {
- dbgs() << "Scoreboard:\n";
-
- unsigned last = ScoreboardDepth - 1;
- while ((last > 0) && (Scoreboard[getFutureIndex(last)] == 0))
- last--;
-
- for (unsigned i = 0; i <= last; i++) {
- unsigned FUs = Scoreboard[getFutureIndex(i)];
- dbgs() << "\t";
- for (int j = 31; j >= 0; j--)
- dbgs() << ((FUs & (1 << j)) ? '1' : '0');
- dbgs() << '\n';
- }
-}
-
-ExactHazardRecognizer::HazardType ExactHazardRecognizer::getHazardType(SUnit *SU) {
- if (ItinData.isEmpty())
- return NoHazard;
-
- unsigned cycle = 0;
-
- // Use the itinerary for the underlying instruction to check for
- // free FU's in the scoreboard at the appropriate future cycles.
- unsigned idx = SU->getInstr()->getDesc().getSchedClass();
- for (const InstrStage *IS = ItinData.beginStage(idx),
- *E = ItinData.endStage(idx); IS != E; ++IS) {
- // We must find one of the stage's units free for every cycle the
- // stage is occupied. FIXME it would be more accurate to find the
- // same unit free in all the cycles.
- for (unsigned int i = 0; i < IS->getCycles(); ++i) {
- assert(((cycle + i) < ScoreboardDepth) &&
- "Scoreboard depth exceeded!");
-
- unsigned index = getFutureIndex(cycle + i);
- unsigned freeUnits = IS->getUnits() & ~Scoreboard[index];
- if (!freeUnits) {
- DEBUG(dbgs() << "*** Hazard in cycle " << (cycle + i) << ", ");
- DEBUG(dbgs() << "SU(" << SU->NodeNum << "): ");
- DEBUG(SU->getInstr()->dump());
- return Hazard;
- }
- }
-
- // Advance the cycle to the next stage.
- cycle += IS->getNextCycles();
- }
-
- return NoHazard;
-}
-
-void ExactHazardRecognizer::EmitInstruction(SUnit *SU) {
- if (ItinData.isEmpty())
- return;
-
- unsigned cycle = 0;
-
- // Use the itinerary for the underlying instruction to reserve FU's
- // in the scoreboard at the appropriate future cycles.
- unsigned idx = SU->getInstr()->getDesc().getSchedClass();
- for (const InstrStage *IS = ItinData.beginStage(idx),
- *E = ItinData.endStage(idx); IS != E; ++IS) {
- // We must reserve one of the stage's units for every cycle the
- // stage is occupied. FIXME it would be more accurate to reserve
- // the same unit free in all the cycles.
- for (unsigned int i = 0; i < IS->getCycles(); ++i) {
- assert(((cycle + i) < ScoreboardDepth) &&
- "Scoreboard depth exceeded!");
-
- unsigned index = getFutureIndex(cycle + i);
- unsigned freeUnits = IS->getUnits() & ~Scoreboard[index];
-
- // reduce to a single unit
- unsigned freeUnit = 0;
- do {
- freeUnit = freeUnits;
- freeUnits = freeUnit & (freeUnit - 1);
- } while (freeUnits);
-
- assert(freeUnit && "No function unit available!");
- Scoreboard[index] |= freeUnit;
- }
-
- // Advance the cycle to the next stage.
- cycle += IS->getNextCycles();
- }
-
- DEBUG(dumpScoreboard());
-}
-
-void ExactHazardRecognizer::AdvanceCycle() {
- Scoreboard[ScoreboardHead] = 0;
- ScoreboardHead = getFutureIndex(1);
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/ExactHazardRecognizer.h b/libclamav/c++/llvm/lib/CodeGen/ExactHazardRecognizer.h
deleted file mode 100644
index 71ac979..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/ExactHazardRecognizer.h
+++ /dev/null
@@ -1,61 +0,0 @@
-//=- llvm/CodeGen/ExactHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the ExactHazardRecognizer class, which
-// implements hazard-avoidance heuristics for scheduling, based on the
-// scheduling itineraries specified for the target.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
-#define LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
-
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetInstrItineraries.h"
-
-namespace llvm {
- class ExactHazardRecognizer : public ScheduleHazardRecognizer {
- // Itinerary data for the target.
- const InstrItineraryData &ItinData;
-
- // Scoreboard to track function unit usage. Scoreboard[0] is a
- // mask of the FUs in use in the cycle currently being
- // schedule. Scoreboard[1] is a mask for the next cycle. The
- // Scoreboard is used as a circular buffer with the current cycle
- // indicated by ScoreboardHead.
- unsigned *Scoreboard;
-
- // The maximum number of cycles monitored by the Scoreboard. This
- // value is determined based on the target itineraries to ensure
- // that all hazards can be tracked.
- unsigned ScoreboardDepth;
-
- // Indices into the Scoreboard that represent the current cycle.
- unsigned ScoreboardHead;
-
- // Return the scoreboard index to use for 'offset' cycles in the
- // future. 'offset' of 0 returns ScoreboardHead.
- unsigned getFutureIndex(unsigned offset);
-
- // Print the scoreboard.
- void dumpScoreboard();
-
- public:
- ExactHazardRecognizer(const InstrItineraryData &ItinData);
- ~ExactHazardRecognizer();
-
- virtual HazardType getHazardType(SUnit *SU);
- virtual void Reset();
- virtual void EmitInstruction(SUnit *SU);
- virtual void AdvanceCycle();
- };
-}
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/GCMetadata.cpp b/libclamav/c++/llvm/lib/CodeGen/GCMetadata.cpp
index 055172b..0f6e882 100644
--- a/libclamav/c++/llvm/lib/CodeGen/GCMetadata.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/GCMetadata.cpp
@@ -17,6 +17,7 @@
#include "llvm/Pass.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Function.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -29,8 +30,8 @@ namespace {
raw_ostream &OS;
public:
- Printer() : FunctionPass(&ID), OS(errs()) {}
- explicit Printer(raw_ostream &OS) : FunctionPass(&ID), OS(OS) {}
+ Printer() : FunctionPass(ID), OS(errs()) {}
+ explicit Printer(raw_ostream &OS) : FunctionPass(ID), OS(OS) {}
const char *getPassName() const;
@@ -54,8 +55,8 @@ namespace {
}
-static RegisterPass<GCModuleInfo>
-X("collector-metadata", "Create Garbage Collector Module Metadata");
+INITIALIZE_PASS(GCModuleInfo, "collector-metadata",
+ "Create Garbage Collector Module Metadata", false, false);
// -----------------------------------------------------------------------------
@@ -69,7 +70,7 @@ GCFunctionInfo::~GCFunctionInfo() {}
char GCModuleInfo::ID = 0;
GCModuleInfo::GCModuleInfo()
- : ImmutablePass(&ID) {}
+ : ImmutablePass(ID) {}
GCModuleInfo::~GCModuleInfo() {
clear();
@@ -150,30 +151,31 @@ static const char *DescKind(GC::PointKind Kind) {
}
bool Printer::runOnFunction(Function &F) {
- if (!F.hasGC()) {
- GCFunctionInfo *FD = &getAnalysis<GCModuleInfo>().getFunctionInfo(F);
+ if (F.hasGC()) return false;
+
+ GCFunctionInfo *FD = &getAnalysis<GCModuleInfo>().getFunctionInfo(F);
+
+ OS << "GC roots for " << FD->getFunction().getNameStr() << ":\n";
+ for (GCFunctionInfo::roots_iterator RI = FD->roots_begin(),
+ RE = FD->roots_end(); RI != RE; ++RI)
+ OS << "\t" << RI->Num << "\t" << RI->StackOffset << "[sp]\n";
+
+ OS << "GC safe points for " << FD->getFunction().getNameStr() << ":\n";
+ for (GCFunctionInfo::iterator PI = FD->begin(),
+ PE = FD->end(); PI != PE; ++PI) {
- OS << "GC roots for " << FD->getFunction().getNameStr() << ":\n";
- for (GCFunctionInfo::roots_iterator RI = FD->roots_begin(),
- RE = FD->roots_end(); RI != RE; ++RI)
- OS << "\t" << RI->Num << "\t" << RI->StackOffset << "[sp]\n";
+ OS << "\t" << PI->Label->getName() << ": "
+ << DescKind(PI->Kind) << ", live = {";
- OS << "GC safe points for " << FD->getFunction().getNameStr() << ":\n";
- for (GCFunctionInfo::iterator PI = FD->begin(),
- PE = FD->end(); PI != PE; ++PI) {
-
- OS << "\tlabel " << PI->Num << ": " << DescKind(PI->Kind) << ", live = {";
-
- for (GCFunctionInfo::live_iterator RI = FD->live_begin(PI),
- RE = FD->live_end(PI);;) {
- OS << " " << RI->Num;
- if (++RI == RE)
- break;
- OS << ",";
- }
-
- OS << " }\n";
+ for (GCFunctionInfo::live_iterator RI = FD->live_begin(PI),
+ RE = FD->live_end(PI);;) {
+ OS << " " << RI->Num;
+ if (++RI == RE)
+ break;
+ OS << ",";
}
+
+ OS << " }\n";
}
return false;
@@ -187,7 +189,7 @@ FunctionPass *llvm::createGCInfoDeleter() {
return new Deleter();
}
-Deleter::Deleter() : FunctionPass(&ID) {}
+Deleter::Deleter() : FunctionPass(ID) {}
const char *Deleter::getPassName() const {
return "Delete Garbage Collector Information";
diff --git a/libclamav/c++/llvm/lib/CodeGen/GCMetadataPrinter.cpp b/libclamav/c++/llvm/lib/CodeGen/GCMetadataPrinter.cpp
index 9cd2925..f80e9ce 100644
--- a/libclamav/c++/llvm/lib/CodeGen/GCMetadataPrinter.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/GCMetadataPrinter.cpp
@@ -12,19 +12,16 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/GCMetadataPrinter.h"
-
using namespace llvm;
GCMetadataPrinter::GCMetadataPrinter() { }
GCMetadataPrinter::~GCMetadataPrinter() { }
-void GCMetadataPrinter::beginAssembly(raw_ostream &OS, AsmPrinter &AP,
- const MCAsmInfo &MAI) {
+void GCMetadataPrinter::beginAssembly(AsmPrinter &AP) {
// Default is no action.
}
-void GCMetadataPrinter::finishAssembly(raw_ostream &OS, AsmPrinter &AP,
- const MCAsmInfo &MAI) {
+void GCMetadataPrinter::finishAssembly(AsmPrinter &AP) {
// Default is no action.
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/GCStrategy.cpp b/libclamav/c++/llvm/lib/CodeGen/GCStrategy.cpp
index b5006fd..719fa19 100644
--- a/libclamav/c++/llvm/lib/CodeGen/GCStrategy.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/GCStrategy.cpp
@@ -71,9 +71,9 @@ namespace {
void FindSafePoints(MachineFunction &MF);
void VisitCallPoint(MachineBasicBlock::iterator MI);
- unsigned InsertLabel(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- DebugLoc DL) const;
+ MCSymbol *InsertLabel(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ DebugLoc DL) const;
void FindStackOffsets(MachineFunction &MF);
@@ -130,7 +130,7 @@ FunctionPass *llvm::createGCLoweringPass() {
char LowerIntrinsics::ID = 0;
LowerIntrinsics::LowerIntrinsics()
- : FunctionPass(&ID) {}
+ : FunctionPass(ID) {}
const char *LowerIntrinsics::getPassName() const {
return "Lower Garbage Collection Instructions";
@@ -181,9 +181,10 @@ bool LowerIntrinsics::InsertRootInitializers(Function &F, AllocaInst **Roots,
for (AllocaInst **I = Roots, **E = Roots + Count; I != E; ++I)
if (!InitedRoots.count(*I)) {
- new StoreInst(ConstantPointerNull::get(cast<PointerType>(
- cast<PointerType>((*I)->getType())->getElementType())),
- *I, IP);
+ StoreInst* SI = new StoreInst(ConstantPointerNull::get(cast<PointerType>(
+ cast<PointerType>((*I)->getType())->getElementType())),
+ *I);
+ SI->insertAfter(*I);
MadeChange = true;
}
@@ -259,7 +260,7 @@ bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
bool LowerRd = !S.customReadBarrier();
bool InitRoots = S.initializeRoots();
- SmallVector<AllocaInst*,32> Roots;
+ SmallVector<AllocaInst*, 32> Roots;
bool MadeChange = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
@@ -270,7 +271,8 @@ bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
case Intrinsic::gcwrite:
if (LowerWr) {
// Replace a write barrier with a simple store.
- Value *St = new StoreInst(CI->getOperand(1), CI->getOperand(3), CI);
+ Value *St = new StoreInst(CI->getArgOperand(0),
+ CI->getArgOperand(2), CI);
CI->replaceAllUsesWith(St);
CI->eraseFromParent();
}
@@ -278,7 +280,7 @@ bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
case Intrinsic::gcread:
if (LowerRd) {
// Replace a read barrier with a simple load.
- Value *Ld = new LoadInst(CI->getOperand(2), "", CI);
+ Value *Ld = new LoadInst(CI->getArgOperand(1), "", CI);
Ld->takeName(CI);
CI->replaceAllUsesWith(Ld);
CI->eraseFromParent();
@@ -289,7 +291,7 @@ bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
// Initialize the GC root, but do not delete the intrinsic. The
// backend needs the intrinsic to flag the stack slot.
Roots.push_back(cast<AllocaInst>(
- CI->getOperand(1)->stripPointerCasts()));
+ CI->getArgOperand(0)->stripPointerCasts()));
}
break;
default:
@@ -316,7 +318,7 @@ FunctionPass *llvm::createGCMachineCodeAnalysisPass() {
char MachineCodeAnalysis::ID = 0;
MachineCodeAnalysis::MachineCodeAnalysis()
- : MachineFunctionPass(&ID) {}
+ : MachineFunctionPass(ID) {}
const char *MachineCodeAnalysis::getPassName() const {
return "Analyze Machine Code For Garbage Collection";
@@ -329,14 +331,11 @@ void MachineCodeAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<GCModuleInfo>();
}
-unsigned MachineCodeAnalysis::InsertLabel(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- DebugLoc DL) const {
- unsigned Label = MMI->NextLabelID();
-
- BuildMI(MBB, MI, DL,
- TII->get(TargetOpcode::GC_LABEL)).addImm(Label);
-
+MCSymbol *MachineCodeAnalysis::InsertLabel(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ DebugLoc DL) const {
+ MCSymbol *Label = MBB.getParent()->getContext().CreateTempSymbol();
+ BuildMI(MBB, MI, DL, TII->get(TargetOpcode::GC_LABEL)).addSym(Label);
return Label;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/IfConversion.cpp b/libclamav/c++/llvm/lib/CodeGen/IfConversion.cpp
index c61fd17..0ea30d7 100644
--- a/libclamav/c++/llvm/lib/CodeGen/IfConversion.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/IfConversion.cpp
@@ -20,6 +20,7 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -33,20 +34,22 @@ using namespace llvm;
static cl::opt<int> IfCvtFnStart("ifcvt-fn-start", cl::init(-1), cl::Hidden);
static cl::opt<int> IfCvtFnStop("ifcvt-fn-stop", cl::init(-1), cl::Hidden);
static cl::opt<int> IfCvtLimit("ifcvt-limit", cl::init(-1), cl::Hidden);
-static cl::opt<bool> DisableSimple("disable-ifcvt-simple",
+static cl::opt<bool> DisableSimple("disable-ifcvt-simple",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableSimpleF("disable-ifcvt-simple-false",
+static cl::opt<bool> DisableSimpleF("disable-ifcvt-simple-false",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangle("disable-ifcvt-triangle",
+static cl::opt<bool> DisableTriangle("disable-ifcvt-triangle",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangleR("disable-ifcvt-triangle-rev",
+static cl::opt<bool> DisableTriangleR("disable-ifcvt-triangle-rev",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangleF("disable-ifcvt-triangle-false",
+static cl::opt<bool> DisableTriangleF("disable-ifcvt-triangle-false",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangleFR("disable-ifcvt-triangle-false-rev",
+static cl::opt<bool> DisableTriangleFR("disable-ifcvt-triangle-false-rev",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableDiamond("disable-ifcvt-diamond",
+static cl::opt<bool> DisableDiamond("disable-ifcvt-diamond",
cl::init(false), cl::Hidden);
+static cl::opt<bool> IfCvtBranchFold("ifcvt-branch-fold",
+ cl::init(true), cl::Hidden);
STATISTIC(NumSimple, "Number of simple if-conversions performed");
STATISTIC(NumSimpleFalse, "Number of simple (F) if-conversions performed");
@@ -115,7 +118,7 @@ namespace {
BB(0), TrueBB(0), FalseBB(0) {}
};
- /// IfcvtToken - Record information about pending if-conversions to attemp:
+ /// IfcvtToken - Record information about pending if-conversions to attempt:
/// BBI - Corresponding BBInfo.
/// Kind - Type of block. See IfcvtKind.
/// NeedSubsumption - True if the to-be-predicated BB has already been
@@ -146,11 +149,12 @@ namespace {
const TargetLowering *TLI;
const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
bool MadeChange;
int FnNum;
public:
static char ID;
- IfConverter() : MachineFunctionPass(&ID), FnNum(-1) {}
+ IfConverter() : MachineFunctionPass(ID), FnNum(-1) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual const char *getPassName() const { return "If Converter"; }
@@ -167,8 +171,7 @@ namespace {
std::vector<IfcvtToken*> &Tokens);
bool FeasibilityAnalysis(BBInfo &BBI, SmallVectorImpl<MachineOperand> &Cond,
bool isTriangle = false, bool RevBranch = false);
- bool AnalyzeBlocks(MachineFunction &MF,
- std::vector<IfcvtToken*> &Tokens);
+ void AnalyzeBlocks(MachineFunction &MF, std::vector<IfcvtToken*> &Tokens);
void InvalidatePreds(MachineBasicBlock *BB);
void RemoveExtraEdges(BBInfo &BBI);
bool IfConvertSimple(BBInfo &BBI, IfcvtKind Kind);
@@ -177,14 +180,22 @@ namespace {
unsigned NumDups1, unsigned NumDups2);
void PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
- SmallVectorImpl<MachineOperand> &Cond);
+ SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs);
void CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs,
bool IgnoreBr = false);
- void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI);
+ void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges = true);
- bool MeetIfcvtSizeLimit(unsigned Size) const {
- return Size > 0 && Size <= TLI->getIfCvtBlockSizeLimit();
+ bool MeetIfcvtSizeLimit(MachineBasicBlock &BB, unsigned Size) const {
+ return Size > 0 && TII->isProfitableToIfCvt(BB, Size);
+ }
+
+ bool MeetIfcvtSizeLimit(MachineBasicBlock &TBB, unsigned TSize,
+ MachineBasicBlock &FBB, unsigned FSize) const {
+ return TSize > 0 && FSize > 0 &&
+ TII->isProfitableToIfCvt(TBB, TSize, FBB, FSize);
}
// blockAlwaysFallThrough - Block ends without a terminator.
@@ -219,16 +230,22 @@ namespace {
char IfConverter::ID = 0;
}
-static RegisterPass<IfConverter>
-X("if-converter", "If Converter");
+INITIALIZE_PASS(IfConverter, "if-converter", "If Converter", false, false);
FunctionPass *llvm::createIfConverterPass() { return new IfConverter(); }
bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
TLI = MF.getTarget().getTargetLowering();
TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
if (!TII) return false;
+ // Tail merge tend to expose more if-conversion opportunities.
+ BranchFolder BF(true);
+ bool BFChange = BF.OptimizeFunction(MF, TII,
+ MF.getTarget().getRegisterInfo(),
+ getAnalysisIfAvailable<MachineModuleInfo>());
+
DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
<< MF.getFunction()->getName() << "\'");
@@ -253,7 +270,8 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
while (IfCvtLimit == -1 || (int)NumIfCvts < IfCvtLimit) {
// Do an initial analysis for each basic block and find all the potential
// candidates to perform if-conversion.
- bool Change = AnalyzeBlocks(MF, Tokens);
+ bool Change = false;
+ AnalyzeBlocks(MF, Tokens);
while (!Tokens.empty()) {
IfcvtToken *Token = Tokens.back();
Tokens.pop_back();
@@ -281,7 +299,8 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
case ICSimpleFalse: {
bool isFalse = Kind == ICSimpleFalse;
if ((isFalse && DisableSimpleF) || (!isFalse && DisableSimple)) break;
- DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ? " false" :"")
+ DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ?
+ " false" : "")
<< "): BB#" << BBI.BB->getNumber() << " ("
<< ((Kind == ICSimpleFalse)
? BBI.FalseBB->getNumber()
@@ -289,8 +308,8 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
RetVal = IfConvertSimple(BBI, Kind);
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
- if (isFalse) NumSimpleFalse++;
- else NumSimple++;
+ if (isFalse) ++NumSimpleFalse;
+ else ++NumSimple;
}
break;
}
@@ -316,11 +335,11 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
if (isFalse) {
- if (isRev) NumTriangleFRev++;
- else NumTriangleFalse++;
+ if (isRev) ++NumTriangleFRev;
+ else ++NumTriangleFalse;
} else {
- if (isRev) NumTriangleRev++;
- else NumTriangle++;
+ if (isRev) ++NumTriangleRev;
+ else ++NumTriangle;
}
}
break;
@@ -332,7 +351,7 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
<< BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertDiamond(BBI, Kind, NumDups, NumDups2);
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
- if (RetVal) NumDiamonds++;
+ if (RetVal) ++NumDiamonds;
break;
}
}
@@ -361,13 +380,14 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
Roots.clear();
BBAnalysis.clear();
- if (MadeChange) {
+ if (MadeChange && IfCvtBranchFold) {
BranchFolder BF(false);
BF.OptimizeFunction(MF, TII,
MF.getTarget().getRegisterInfo(),
getAnalysisIfAvailable<MachineModuleInfo>());
}
+ MadeChange |= BFChange;
return MadeChange;
}
@@ -387,9 +407,10 @@ static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
/// ReverseBranchCondition - Reverse the condition of the end of the block
/// branch. Swap block's 'true' and 'false' successors.
bool IfConverter::ReverseBranchCondition(BBInfo &BBI) {
+ DebugLoc dl; // FIXME: this is nowhere
if (!TII->ReverseBranchCondition(BBI.BrCond)) {
TII->RemoveBranch(*BBI.BB);
- TII->InsertBranch(*BBI.BB, BBI.FalseBB, BBI.TrueBB, BBI.BrCond);
+ TII->InsertBranch(*BBI.BB, BBI.FalseBB, BBI.TrueBB, BBI.BrCond, dl);
std::swap(BBI.TrueBB, BBI.FalseBB);
return true;
}
@@ -420,7 +441,7 @@ bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const {
if (TrueBBI.BB->pred_size() > 1) {
if (TrueBBI.CannotBeCopied ||
- TrueBBI.NonPredSize > TLI->getIfCvtDupBlockSizeLimit())
+ !TII->isProfitableToDupForIfCvt(*TrueBBI.BB, TrueBBI.NonPredSize))
return false;
Dups = TrueBBI.NonPredSize;
}
@@ -431,7 +452,7 @@ bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const {
/// ValidTriangle - Returns true if the 'true' and 'false' blocks (along
/// with their common predecessor) forms a valid triangle shape for ifcvt.
/// If 'FalseBranch' is true, it checks if 'true' block's false branch
-/// branches to the false branch rather than the other way around. It also
+/// branches to the 'false' block rather than the other way around. It also
/// returns the number of instructions that the ifcvt would need to duplicate
/// if performed in 'Dups'.
bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
@@ -457,7 +478,7 @@ bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
++Size;
}
}
- if (Size > TLI->getIfCvtDupBlockSizeLimit())
+ if (!TII->isProfitableToDupForIfCvt(*TrueBBI.BB, Size))
return false;
Dups = Size;
}
@@ -514,7 +535,27 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
MachineBasicBlock::iterator TI = TrueBBI.BB->begin();
MachineBasicBlock::iterator FI = FalseBBI.BB->begin();
- while (TI != TrueBBI.BB->end() && FI != FalseBBI.BB->end()) {
+ MachineBasicBlock::iterator TIE = TrueBBI.BB->end();
+ MachineBasicBlock::iterator FIE = FalseBBI.BB->end();
+ // Skip dbg_value instructions
+ while (TI != TIE && TI->isDebugValue())
+ ++TI;
+ while (FI != FIE && FI->isDebugValue())
+ ++FI;
+ while (TI != TIE && FI != FIE) {
+ // Skip dbg_value instructions. These do not count.
+ if (TI->isDebugValue()) {
+ while (TI != TIE && TI->isDebugValue())
+ ++TI;
+ if (TI == TIE)
+ break;
+ }
+ if (FI->isDebugValue()) {
+ while (FI != FIE && FI->isDebugValue())
+ ++FI;
+ if (FI == FIE)
+ break;
+ }
if (!TI->isIdenticalTo(FI))
break;
++Dups1;
@@ -524,7 +565,27 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
TI = firstNonBranchInst(TrueBBI.BB, TII);
FI = firstNonBranchInst(FalseBBI.BB, TII);
- while (TI != TrueBBI.BB->begin() && FI != FalseBBI.BB->begin()) {
+ MachineBasicBlock::iterator TIB = TrueBBI.BB->begin();
+ MachineBasicBlock::iterator FIB = FalseBBI.BB->begin();
+ // Skip dbg_value instructions at end of the bb's.
+ while (TI != TIB && TI->isDebugValue())
+ --TI;
+ while (FI != FIB && FI->isDebugValue())
+ --FI;
+ while (TI != TIB && FI != FIB) {
+ // Skip dbg_value instructions. These do not count.
+ if (TI->isDebugValue()) {
+ while (TI != TIB && TI->isDebugValue())
+ --TI;
+ if (TI == TIB)
+ break;
+ }
+ if (FI->isDebugValue()) {
+ while (FI != FIB && FI->isDebugValue())
+ --FI;
+ if (FI == FIB)
+ break;
+ }
if (!TI->isIdenticalTo(FI))
break;
++Dups2;
@@ -556,7 +617,7 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
// No false branch. This BB must end with a conditional branch and a
// fallthrough.
if (!BBI.FalseBB)
- BBI.FalseBB = findFalseBlock(BBI.BB, BBI.TrueBB);
+ BBI.FalseBB = findFalseBlock(BBI.BB, BBI.TrueBB);
if (!BBI.FalseBB) {
// Malformed bcc? True and false blocks are the same?
BBI.IsUnpredicable = true;
@@ -569,6 +630,9 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
BBI.ClobbersPred = false;
for (MachineBasicBlock::iterator I = BBI.BB->begin(), E = BBI.BB->end();
I != E; ++I) {
+ if (I->isDebugValue())
+ continue;
+
const TargetInstrDesc &TID = I->getDesc();
if (TID.isNotDuplicable())
BBI.CannotBeCopied = true;
@@ -702,8 +766,8 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
bool FNeedSub = FalseBBI.Predicate.size() > 0;
bool Enqueued = false;
if (CanRevCond && ValidDiamond(TrueBBI, FalseBBI, Dups, Dups2) &&
- MeetIfcvtSizeLimit(TrueBBI.NonPredSize - (Dups + Dups2)) &&
- MeetIfcvtSizeLimit(FalseBBI.NonPredSize - (Dups + Dups2)) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize - (Dups + Dups2),
+ *FalseBBI.BB, FalseBBI.NonPredSize - (Dups + Dups2)) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
// Diamond:
@@ -720,7 +784,7 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
}
if (ValidTriangle(TrueBBI, FalseBBI, false, Dups) &&
- MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true)) {
// Triangle:
// EBB
@@ -732,23 +796,23 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
Tokens.push_back(new IfcvtToken(BBI, ICTriangle, TNeedSub, Dups));
Enqueued = true;
}
-
+
if (ValidTriangle(TrueBBI, FalseBBI, true, Dups) &&
- MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleRev, TNeedSub, Dups));
Enqueued = true;
}
if (ValidSimple(TrueBBI, Dups) &&
- MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond)) {
// Simple (split, no rejoin):
// EBB
// | \_
// | |
// | TBB---> exit
- // |
+ // |
// FBB
Tokens.push_back(new IfcvtToken(BBI, ICSimple, TNeedSub, Dups));
Enqueued = true;
@@ -757,21 +821,21 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
if (CanRevCond) {
// Try the other path...
if (ValidTriangle(FalseBBI, TrueBBI, false, Dups) &&
- MeetIfcvtSizeLimit(FalseBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
FeasibilityAnalysis(FalseBBI, RevCond, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFalse, FNeedSub, Dups));
Enqueued = true;
}
if (ValidTriangle(FalseBBI, TrueBBI, true, Dups) &&
- MeetIfcvtSizeLimit(FalseBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
FeasibilityAnalysis(FalseBBI, RevCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFRev, FNeedSub, Dups));
Enqueued = true;
}
if (ValidSimple(FalseBBI, Dups) &&
- MeetIfcvtSizeLimit(FalseBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
Tokens.push_back(new IfcvtToken(BBI, ICSimpleFalse, FNeedSub, Dups));
Enqueued = true;
@@ -785,11 +849,9 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
}
/// AnalyzeBlocks - Analyze all blocks and find entries for all if-conversion
-/// candidates. It returns true if any CFG restructuring is done to expose more
-/// if-conversion opportunities.
-bool IfConverter::AnalyzeBlocks(MachineFunction &MF,
+/// candidates.
+void IfConverter::AnalyzeBlocks(MachineFunction &MF,
std::vector<IfcvtToken*> &Tokens) {
- bool Change = false;
std::set<MachineBasicBlock*> Visited;
for (unsigned i = 0, e = Roots.size(); i != e; ++i) {
for (idf_ext_iterator<MachineBasicBlock*> I=idf_ext_begin(Roots[i],Visited),
@@ -801,20 +863,23 @@ bool IfConverter::AnalyzeBlocks(MachineFunction &MF,
// Sort to favor more complex ifcvt scheme.
std::stable_sort(Tokens.begin(), Tokens.end(), IfcvtTokenCmp);
-
- return Change;
}
/// canFallThroughTo - Returns true either if ToBB is the next block after BB or
/// that all the intervening blocks are empty (given BB can fall through to its
/// next block).
static bool canFallThroughTo(MachineBasicBlock *BB, MachineBasicBlock *ToBB) {
- MachineFunction::iterator I = BB;
+ MachineFunction::iterator PI = BB;
+ MachineFunction::iterator I = llvm::next(PI);
MachineFunction::iterator TI = ToBB;
MachineFunction::iterator E = BB->getParent()->end();
- while (++I != TI)
- if (I == E || !I->empty())
+ while (I != TI) {
+ // Check isSuccessor to avoid case where the next block is empty, but
+ // it's not a successor.
+ if (I == E || !I->empty() || !PI->isSuccessor(I))
return false;
+ PI = I++;
+ }
return true;
}
@@ -836,8 +901,9 @@ void IfConverter::InvalidatePreds(MachineBasicBlock *BB) {
///
static void InsertUncondBranch(MachineBasicBlock *BB, MachineBasicBlock *ToBB,
const TargetInstrInfo *TII) {
+ DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 0> NoCond;
- TII->InsertBranch(*BB, ToBB, NULL, NoCond);
+ TII->InsertBranch(*BB, ToBB, NULL, NoCond, dl);
}
/// RemoveExtraEdges - Remove true / false edges if either / both are no longer
@@ -849,6 +915,66 @@ void IfConverter::RemoveExtraEdges(BBInfo &BBI) {
BBI.BB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
}
+/// InitPredRedefs / UpdatePredRedefs - Defs by predicated instructions are
+/// modeled as read + write (sort like two-address instructions). These
+/// routines track register liveness and add implicit uses to if-converted
+/// instructions to conform to the model.
+static void InitPredRedefs(MachineBasicBlock *BB, SmallSet<unsigned,4> &Redefs,
+ const TargetRegisterInfo *TRI) {
+ for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
+ E = BB->livein_end(); I != E; ++I) {
+ unsigned Reg = *I;
+ Redefs.insert(Reg);
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ Redefs.insert(*Subreg);
+ }
+}
+
+static void UpdatePredRedefs(MachineInstr *MI, SmallSet<unsigned,4> &Redefs,
+ const TargetRegisterInfo *TRI,
+ bool AddImpUse = false) {
+ SmallVector<unsigned, 4> Defs;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ if (MO.isDef())
+ Defs.push_back(Reg);
+ else if (MO.isKill()) {
+ Redefs.erase(Reg);
+ for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ Redefs.erase(*SR);
+ }
+ }
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
+ unsigned Reg = Defs[i];
+ if (Redefs.count(Reg)) {
+ if (AddImpUse)
+ // Treat predicated update as read + write.
+ MI->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
+ true/*IsImp*/,false/*IsKill*/));
+ } else {
+ Redefs.insert(Reg);
+ for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ Redefs.insert(*SR);
+ }
+ }
+}
+
+static void UpdatePredRedefs(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator E,
+ SmallSet<unsigned,4> &Redefs,
+ const TargetRegisterInfo *TRI) {
+ while (I != E) {
+ UpdatePredRedefs(I, Redefs, TRI);
+ ++I;
+ }
+}
+
/// IfConvertSimple - If convert a simple (split, no rejoin) sub-CFG.
///
bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) {
@@ -873,13 +999,19 @@ bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) {
if (TII->ReverseBranchCondition(Cond))
assert(false && "Unable to reverse branch condition!");
+ // Initialize liveins to the first BB. These are potentiall redefined by
+ // predicated instructions.
+ SmallSet<unsigned, 4> Redefs;
+ InitPredRedefs(CvtBBI->BB, Redefs, TRI);
+ InitPredRedefs(NextBBI->BB, Redefs, TRI);
+
if (CvtBBI->BB->pred_size() > 1) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Copy instructions in the true block, predicate them, and add them to
// the entry block.
- CopyAndPredicateBlock(BBI, *CvtBBI, Cond);
+ CopyAndPredicateBlock(BBI, *CvtBBI, Cond, Redefs);
} else {
- PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond);
+ PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond, Redefs);
// Merge converted block into entry block.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
@@ -922,6 +1054,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
BBInfo &FalseBBI = BBAnalysis[BBI.FalseBB->getNumber()];
BBInfo *CvtBBI = &TrueBBI;
BBInfo *NextBBI = &FalseBBI;
+ DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 4> Cond(BBI.BrCond.begin(), BBI.BrCond.end());
if (Kind == ICTriangleFalse || Kind == ICTriangleFRev)
@@ -957,21 +1090,26 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
}
}
+ // Initialize liveins to the first BB. These are potentially redefined by
+ // predicated instructions.
+ SmallSet<unsigned, 4> Redefs;
+ InitPredRedefs(CvtBBI->BB, Redefs, TRI);
+ InitPredRedefs(NextBBI->BB, Redefs, TRI);
+
bool HasEarlyExit = CvtBBI->FalseBB != NULL;
- bool DupBB = CvtBBI->BB->pred_size() > 1;
- if (DupBB) {
+ if (CvtBBI->BB->pred_size() > 1) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Copy instructions in the true block, predicate them, and add them to
// the entry block.
- CopyAndPredicateBlock(BBI, *CvtBBI, Cond, true);
+ CopyAndPredicateBlock(BBI, *CvtBBI, Cond, Redefs, true);
} else {
// Predicate the 'true' block after removing its branch.
CvtBBI->NonPredSize -= TII->RemoveBranch(*CvtBBI->BB);
- PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond);
+ PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond, Redefs);
// Now merge the entry of the triangle with the true block.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
- MergeBlocks(BBI, *CvtBBI);
+ MergeBlocks(BBI, *CvtBBI, false);
}
// If 'true' block has a 'false' successor, add an exit branch to it.
@@ -980,7 +1118,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
CvtBBI->BrCond.end());
if (TII->ReverseBranchCondition(RevCond))
assert(false && "Unable to reverse branch condition!");
- TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, NULL, RevCond);
+ TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, NULL, RevCond, dl);
BBI.BB->addSuccessor(CvtBBI->FalseBB);
}
@@ -1009,7 +1147,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
RemoveExtraEdges(BBI);
// Update block info. BB can be iteratively if-converted.
- if (!IterIfcvt)
+ if (!IterIfcvt)
BBI.IsDone = true;
InvalidatePreds(BBI.BB);
CvtBBI->IsDone = true;
@@ -1044,9 +1182,9 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
return false;
}
- // Merge the 'true' and 'false' blocks by copying the instructions
- // from the 'false' block to the 'true' block. That is, unless the true
- // block would clobber the predicate, in that case, do the opposite.
+ // Put the predicated instructions from the 'true' block before the
+ // instructions from the 'false' block, unless the true block would clobber
+ // the predicate, in which case, do the opposite.
BBInfo *BBI1 = &TrueBBI;
BBInfo *BBI2 = &FalseBBI;
SmallVector<MachineOperand, 4> RevCond(BBI.BrCond.begin(), BBI.BrCond.end());
@@ -1071,39 +1209,72 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
// Remove the conditional branch from entry to the blocks.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
+ // Initialize liveins to the first BB. These are potentially redefined by
+ // predicated instructions.
+ SmallSet<unsigned, 4> Redefs;
+ InitPredRedefs(BBI1->BB, Redefs, TRI);
+
// Remove the duplicated instructions at the beginnings of both paths.
MachineBasicBlock::iterator DI1 = BBI1->BB->begin();
MachineBasicBlock::iterator DI2 = BBI2->BB->begin();
+ MachineBasicBlock::iterator DIE1 = BBI1->BB->end();
+ MachineBasicBlock::iterator DIE2 = BBI2->BB->end();
+ // Skip dbg_value instructions
+ while (DI1 != DIE1 && DI1->isDebugValue())
+ ++DI1;
+ while (DI2 != DIE2 && DI2->isDebugValue())
+ ++DI2;
BBI1->NonPredSize -= NumDups1;
BBI2->NonPredSize -= NumDups1;
+
+ // Skip past the dups on each side separately since there may be
+ // differing dbg_value entries.
+ for (unsigned i = 0; i < NumDups1; ++DI1) {
+ if (!DI1->isDebugValue())
+ ++i;
+ }
while (NumDups1 != 0) {
- ++DI1;
++DI2;
- --NumDups1;
+ if (!DI2->isDebugValue())
+ --NumDups1;
}
+
+ UpdatePredRedefs(BBI1->BB->begin(), DI1, Redefs, TRI);
BBI.BB->splice(BBI.BB->end(), BBI1->BB, BBI1->BB->begin(), DI1);
BBI2->BB->erase(BBI2->BB->begin(), DI2);
// Predicate the 'true' block after removing its branch.
BBI1->NonPredSize -= TII->RemoveBranch(*BBI1->BB);
DI1 = BBI1->BB->end();
- for (unsigned i = 0; i != NumDups2; ++i)
+ for (unsigned i = 0; i != NumDups2; ) {
+ // NumDups2 only counted non-dbg_value instructions, so this won't
+ // run off the head of the list.
+ assert (DI1 != BBI1->BB->begin());
--DI1;
+ // skip dbg_value instructions
+ if (!DI1->isDebugValue())
+ ++i;
+ }
BBI1->BB->erase(DI1, BBI1->BB->end());
- PredicateBlock(*BBI1, BBI1->BB->end(), *Cond1);
+ PredicateBlock(*BBI1, BBI1->BB->end(), *Cond1, Redefs);
// Predicate the 'false' block.
BBI2->NonPredSize -= TII->RemoveBranch(*BBI2->BB);
DI2 = BBI2->BB->end();
while (NumDups2 != 0) {
+ // NumDups2 only counted non-dbg_value instructions, so this won't
+ // run off the head of the list.
+ assert (DI2 != BBI2->BB->begin());
--DI2;
- --NumDups2;
+ // skip dbg_value instructions
+ if (!DI2->isDebugValue())
+ --NumDups2;
}
- PredicateBlock(*BBI2, DI2, *Cond2);
+ PredicateBlock(*BBI2, DI2, *Cond2, Redefs);
// Merge the true block into the entry of the diamond.
- MergeBlocks(BBI, *BBI1);
- MergeBlocks(BBI, *BBI2);
+ MergeBlocks(BBI, *BBI1, TailBB == 0);
+ MergeBlocks(BBI, *BBI2, TailBB == 0);
// If the if-converted block falls through or unconditionally branches into
// the tail block, and the tail block does not have other predecessors, then
@@ -1111,16 +1282,32 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
// tail, add a unconditional branch to it.
if (TailBB) {
BBInfo TailBBI = BBAnalysis[TailBB->getNumber()];
- if (TailBB->pred_size() == 1 && !TailBBI.HasFallThrough) {
- BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
+ bool CanMergeTail = !TailBBI.HasFallThrough;
+ // There may still be a fall-through edge from BBI1 or BBI2 to TailBB;
+ // check if there are any other predecessors besides those.
+ unsigned NumPreds = TailBB->pred_size();
+ if (NumPreds > 1)
+ CanMergeTail = false;
+ else if (NumPreds == 1 && CanMergeTail) {
+ MachineBasicBlock::pred_iterator PI = TailBB->pred_begin();
+ if (*PI != BBI1->BB && *PI != BBI2->BB)
+ CanMergeTail = false;
+ }
+ if (CanMergeTail) {
MergeBlocks(BBI, TailBBI);
TailBBI.IsDone = true;
} else {
+ BBI.BB->addSuccessor(TailBB);
InsertUncondBranch(BBI.BB, TailBB, TII);
BBI.HasFallThrough = false;
}
}
+ // RemoveExtraEdges won't work if the block has an unanalyzable branch,
+ // which can happen here if TailBB is unanalyzable and is merged, so
+ // explicitly remove BBI1 and BBI2 as successors.
+ BBI.BB->removeSuccessor(BBI1->BB);
+ BBI.BB->removeSuccessor(BBI2->BB);
RemoveExtraEdges(BBI);
// Update block info.
@@ -1135,9 +1322,10 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
/// specified end with the specified condition.
void IfConverter::PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
- SmallVectorImpl<MachineOperand> &Cond) {
+ SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs) {
for (MachineBasicBlock::iterator I = BBI.BB->begin(); I != E; ++I) {
- if (TII->isPredicated(I))
+ if (I->isDebugValue() || TII->isPredicated(I))
continue;
if (!TII->PredicateInstruction(I, Cond)) {
#ifndef NDEBUG
@@ -1145,6 +1333,10 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
#endif
llvm_unreachable(0);
}
+
+ // If the predicated instruction now redefines a register as the result of
+ // if-conversion, add an implicit kill.
+ UpdatePredRedefs(I, Redefs, TRI, true);
}
std::copy(Cond.begin(), Cond.end(), std::back_inserter(BBI.Predicate));
@@ -1152,48 +1344,55 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
BBI.IsAnalyzed = false;
BBI.NonPredSize = 0;
- NumIfConvBBs++;
+ ++NumIfConvBBs;
}
/// CopyAndPredicateBlock - Copy and predicate instructions from source BB to
/// the destination block. Skip end of block branches if IgnoreBr is true.
void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs,
bool IgnoreBr) {
MachineFunction &MF = *ToBBI.BB->getParent();
for (MachineBasicBlock::iterator I = FromBBI.BB->begin(),
E = FromBBI.BB->end(); I != E; ++I) {
const TargetInstrDesc &TID = I->getDesc();
- bool isPredicated = TII->isPredicated(I);
// Do not copy the end of the block branches.
- if (IgnoreBr && !isPredicated && TID.isBranch())
+ if (IgnoreBr && TID.isBranch())
break;
MachineInstr *MI = MF.CloneMachineInstr(I);
ToBBI.BB->insert(ToBBI.BB->end(), MI);
ToBBI.NonPredSize++;
- if (!isPredicated)
+ if (!TII->isPredicated(I) && !MI->isDebugValue()) {
if (!TII->PredicateInstruction(MI, Cond)) {
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
#endif
llvm_unreachable(0);
}
+ }
+
+ // If the predicated instruction now redefines a register as the result of
+ // if-conversion, add an implicit kill.
+ UpdatePredRedefs(MI, Redefs, TRI, true);
}
- std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
- FromBBI.BB->succ_end());
- MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
- MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
+ if (!IgnoreBr) {
+ std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
+ FromBBI.BB->succ_end());
+ MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
+ MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
- for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
- MachineBasicBlock *Succ = Succs[i];
- // Fallthrough edge can't be transferred.
- if (Succ == FallThrough)
- continue;
- ToBBI.BB->addSuccessor(Succ);
+ for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
+ MachineBasicBlock *Succ = Succs[i];
+ // Fallthrough edge can't be transferred.
+ if (Succ == FallThrough)
+ continue;
+ ToBBI.BB->addSuccessor(Succ);
+ }
}
std::copy(FromBBI.Predicate.begin(), FromBBI.Predicate.end(),
@@ -1203,25 +1402,18 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
ToBBI.ClobbersPred |= FromBBI.ClobbersPred;
ToBBI.IsAnalyzed = false;
- NumDupBBs++;
+ ++NumDupBBs;
}
/// MergeBlocks - Move all instructions from FromBB to the end of ToBB.
-///
-void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI) {
+/// This will leave FromBB as an empty block, so remove all of its
+/// successor edges except for the fall-through edge. If AddEdges is true,
+/// i.e., when FromBBI's branch is being moved, add those successor edges to
+/// ToBBI.
+void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges) {
ToBBI.BB->splice(ToBBI.BB->end(),
FromBBI.BB, FromBBI.BB->begin(), FromBBI.BB->end());
- // Redirect all branches to FromBB to ToBB.
- std::vector<MachineBasicBlock *> Preds(FromBBI.BB->pred_begin(),
- FromBBI.BB->pred_end());
- for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
- MachineBasicBlock *Pred = Preds[i];
- if (Pred == ToBBI.BB)
- continue;
- Pred->ReplaceUsesOfBlockWith(FromBBI.BB, ToBBI.BB);
- }
-
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
FromBBI.BB->succ_end());
MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
@@ -1233,7 +1425,8 @@ void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI) {
if (Succ == FallThrough)
continue;
FromBBI.BB->removeSuccessor(Succ);
- ToBBI.BB->addSuccessor(Succ);
+ if (AddEdges)
+ ToBBI.BB->addSuccessor(Succ);
}
// Now FromBBI always falls through to the next block!
diff --git a/libclamav/c++/llvm/lib/CodeGen/InlineSpiller.cpp b/libclamav/c++/llvm/lib/CodeGen/InlineSpiller.cpp
new file mode 100644
index 0000000..b965bfd
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -0,0 +1,480 @@
+//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The inline spiller modifies the machine function directly instead of
+// inserting spills and restores in VirtRegMap.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "spiller"
+#include "Spiller.h"
+#include "SplitKit.h"
+#include "VirtRegMap.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+class InlineSpiller : public Spiller {
+ MachineFunctionPass &pass_;
+ MachineFunction &mf_;
+ LiveIntervals &lis_;
+ MachineLoopInfo &loops_;
+ VirtRegMap &vrm_;
+ MachineFrameInfo &mfi_;
+ MachineRegisterInfo &mri_;
+ const TargetInstrInfo &tii_;
+ const TargetRegisterInfo &tri_;
+ const BitVector reserved_;
+
+ SplitAnalysis splitAnalysis_;
+
+ // Variables that are valid during spill(), but used by multiple methods.
+ LiveInterval *li_;
+ SmallVectorImpl<LiveInterval*> *newIntervals_;
+ const TargetRegisterClass *rc_;
+ int stackSlot_;
+ const SmallVectorImpl<LiveInterval*> *spillIs_;
+
+ // Values of the current interval that can potentially remat.
+ SmallPtrSet<VNInfo*, 8> reMattable_;
+
+ // Values in reMattable_ that failed to remat at some point.
+ SmallPtrSet<VNInfo*, 8> usedValues_;
+
+ ~InlineSpiller() {}
+
+public:
+ InlineSpiller(MachineFunctionPass &pass,
+ MachineFunction &mf,
+ VirtRegMap &vrm)
+ : pass_(pass),
+ mf_(mf),
+ lis_(pass.getAnalysis<LiveIntervals>()),
+ loops_(pass.getAnalysis<MachineLoopInfo>()),
+ vrm_(vrm),
+ mfi_(*mf.getFrameInfo()),
+ mri_(mf.getRegInfo()),
+ tii_(*mf.getTarget().getInstrInfo()),
+ tri_(*mf.getTarget().getRegisterInfo()),
+ reserved_(tri_.getReservedRegs(mf_)),
+ splitAnalysis_(mf, lis_, loops_) {}
+
+ void spill(LiveInterval *li,
+ SmallVectorImpl<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs);
+
+private:
+ bool split();
+
+ bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
+ SlotIndex UseIdx);
+ bool reMaterializeFor(MachineBasicBlock::iterator MI);
+ void reMaterializeAll();
+
+ bool coalesceStackAccess(MachineInstr *MI);
+ bool foldMemoryOperand(MachineBasicBlock::iterator MI,
+ const SmallVectorImpl<unsigned> &Ops);
+ void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
+ void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
+};
+}
+
+namespace llvm {
+Spiller *createInlineSpiller(MachineFunctionPass &pass,
+ MachineFunction &mf,
+ VirtRegMap &vrm) {
+ return new InlineSpiller(pass, mf, vrm);
+}
+}
+
+/// split - try splitting the current interval into pieces that may allocate
+/// separately. Return true if successful.
+bool InlineSpiller::split() {
+ splitAnalysis_.analyze(li_);
+
+ if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) {
+ // We can split, but li_ may be left intact with fewer uses.
+ if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_)
+ .splitAroundLoop(loop))
+ return true;
+ }
+
+ // Try splitting into single block intervals.
+ SplitAnalysis::BlockPtrSet blocks;
+ if (splitAnalysis_.getMultiUseBlocks(blocks)) {
+ if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_)
+ .splitSingleBlocks(blocks))
+ return true;
+ }
+
+ // Try splitting inside a basic block.
+ if (const MachineBasicBlock *MBB = splitAnalysis_.getBlockForInsideSplit()) {
+ if (SplitEditor(splitAnalysis_, lis_, vrm_, *newIntervals_)
+ .splitInsideBlock(MBB))
+ return true;
+ }
+
+ // We may have been able to split out some uses, but the original interval is
+ // intact, and it should still be spilled.
+ return false;
+}
+
+/// allUsesAvailableAt - Return true if all registers used by OrigMI at
+/// OrigIdx are also available with the same value at UseIdx.
+bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI,
+ SlotIndex OrigIdx,
+ SlotIndex UseIdx) {
+ OrigIdx = OrigIdx.getUseIndex();
+ UseIdx = UseIdx.getUseIndex();
+ for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = OrigMI->getOperand(i);
+ if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
+ continue;
+ // Reserved registers are OK.
+ if (MO.isUndef() || !lis_.hasInterval(MO.getReg()))
+ continue;
+ // We don't want to move any defs.
+ if (MO.isDef())
+ return false;
+ // We cannot depend on virtual registers in spillIs_. They will be spilled.
+ for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
+ if ((*spillIs_)[si]->reg == MO.getReg())
+ return false;
+
+ LiveInterval &LI = lis_.getInterval(MO.getReg());
+ const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx);
+ if (!OVNI)
+ continue;
+ if (OVNI != LI.getVNInfoAt(UseIdx))
+ return false;
+ }
+ return true;
+}
+
+/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
+/// reloading it.
+bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
+ SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
+ VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
+ if (!OrigVNI) {
+ DEBUG(dbgs() << "\tadding <undef> flags: ");
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
+ MO.setIsUndef();
+ }
+ DEBUG(dbgs() << UseIdx << '\t' << *MI);
+ return true;
+ }
+ if (!reMattable_.count(OrigVNI)) {
+ DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
+ << UseIdx << '\t' << *MI);
+ return false;
+ }
+ MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
+ if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
+ usedValues_.insert(OrigVNI);
+ DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
+ return false;
+ }
+
+ // If the instruction also writes li_->reg, it had better not require the same
+ // register for uses and defs.
+ bool Reads, Writes;
+ SmallVector<unsigned, 8> Ops;
+ tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
+ if (Writes) {
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(Ops[i]);
+ if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
+ usedValues_.insert(OrigVNI);
+ DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
+ return false;
+ }
+ }
+ }
+
+ // Alocate a new register for the remat.
+ unsigned NewVReg = mri_.createVirtualRegister(rc_);
+ vrm_.grow();
+ LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
+ NewLI.markNotSpillable();
+ newIntervals_->push_back(&NewLI);
+
+ // Finally we can rematerialize OrigMI before MI.
+ MachineBasicBlock &MBB = *MI->getParent();
+ tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
+ MachineBasicBlock::iterator RematMI = MI;
+ SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
+ DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI);
+
+ // Replace operands
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(Ops[i]);
+ if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
+ MO.setReg(NewVReg);
+ MO.setIsKill();
+ }
+ }
+ DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
+
+ VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
+ lis_.getVNInfoAllocator());
+ NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
+ DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
+ return true;
+}
+
+/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
+/// and trim the live ranges after.
+void InlineSpiller::reMaterializeAll() {
+ // Do a quick scan of the interval values to find if any are remattable.
+ reMattable_.clear();
+ usedValues_.clear();
+ for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
+ E = li_->vni_end(); I != E; ++I) {
+ VNInfo *VNI = *I;
+ if (VNI->isUnused() || !VNI->isDefAccurate())
+ continue;
+ MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
+ if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
+ continue;
+ reMattable_.insert(VNI);
+ }
+
+ // Often, no defs are remattable.
+ if (reMattable_.empty())
+ return;
+
+ // Try to remat before all uses of li_->reg.
+ bool anyRemat = false;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ RI = mri_.use_nodbg_begin(li_->reg);
+ MachineInstr *MI = RI.skipInstruction();)
+ anyRemat |= reMaterializeFor(MI);
+
+ if (!anyRemat)
+ return;
+
+ // Remove any values that were completely rematted.
+ bool anyRemoved = false;
+ for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
+ E = reMattable_.end(); I != E; ++I) {
+ VNInfo *VNI = *I;
+ if (VNI->hasPHIKill() || usedValues_.count(VNI))
+ continue;
+ MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
+ DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
+ lis_.RemoveMachineInstrFromMaps(DefMI);
+ vrm_.RemoveMachineInstrFromMaps(DefMI);
+ DefMI->eraseFromParent();
+ VNI->setIsDefAccurate(false);
+ anyRemoved = true;
+ }
+
+ if (!anyRemoved)
+ return;
+
+ // Removing values may cause debug uses where li_ is not live.
+ for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg);
+ MachineInstr *MI = RI.skipInstruction();) {
+ if (!MI->isDebugValue())
+ continue;
+ // Try to preserve the debug value if li_ is live immediately after it.
+ MachineBasicBlock::iterator NextMI = MI;
+ ++NextMI;
+ if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
+ VNInfo *VNI = li_->getVNInfoAt(lis_.getInstructionIndex(NextMI));
+ if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI)))
+ continue;
+ }
+ DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
+ MI->eraseFromParent();
+ }
+}
+
+/// If MI is a load or store of stackSlot_, it can be removed.
+bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
+ int FI = 0;
+ unsigned reg;
+ if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) &&
+ !(reg = tii_.isStoreToStackSlot(MI, FI)))
+ return false;
+
+ // We have a stack access. Is it the right register and slot?
+ if (reg != li_->reg || FI != stackSlot_)
+ return false;
+
+ DEBUG(dbgs() << "Coalescing stack access: " << *MI);
+ lis_.RemoveMachineInstrFromMaps(MI);
+ MI->eraseFromParent();
+ return true;
+}
+
+/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
+/// Return true on success, and MI will be erased.
+bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
+ const SmallVectorImpl<unsigned> &Ops) {
+ // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
+ // operands.
+ SmallVector<unsigned, 8> FoldOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ unsigned Idx = Ops[i];
+ MachineOperand &MO = MI->getOperand(Idx);
+ if (MO.isImplicit())
+ continue;
+ // FIXME: Teach targets to deal with subregs.
+ if (MO.getSubReg())
+ return false;
+ // Tied use operands should not be passed to foldMemoryOperand.
+ if (!MI->isRegTiedToDefOperand(Idx))
+ FoldOps.push_back(Idx);
+ }
+
+ MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
+ if (!FoldMI)
+ return false;
+ lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
+ vrm_.addSpillSlotUse(stackSlot_, FoldMI);
+ MI->eraseFromParent();
+ DEBUG(dbgs() << "\tfolded: " << *FoldMI);
+ return true;
+}
+
+/// insertReload - Insert a reload of NewLI.reg before MI.
+void InlineSpiller::insertReload(LiveInterval &NewLI,
+ MachineBasicBlock::iterator MI) {
+ MachineBasicBlock &MBB = *MI->getParent();
+ SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
+ tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
+ --MI; // Point to load instruction.
+ SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
+ vrm_.addSpillSlotUse(stackSlot_, MI);
+ DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
+ VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
+ lis_.getVNInfoAllocator());
+ NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
+}
+
+/// insertSpill - Insert a spill of NewLI.reg after MI.
+void InlineSpiller::insertSpill(LiveInterval &NewLI,
+ MachineBasicBlock::iterator MI) {
+ MachineBasicBlock &MBB = *MI->getParent();
+ SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
+ tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
+ --MI; // Point to store instruction.
+ SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
+ vrm_.addSpillSlotUse(stackSlot_, MI);
+ DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
+ VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
+ lis_.getVNInfoAllocator());
+ NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
+}
+
+void InlineSpiller::spill(LiveInterval *li,
+ SmallVectorImpl<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs) {
+ DEBUG(dbgs() << "Inline spilling " << *li << "\n");
+ assert(li->isSpillable() && "Attempting to spill already spilled value.");
+ assert(!li->isStackSlot() && "Trying to spill a stack slot.");
+
+ li_ = li;
+ newIntervals_ = &newIntervals;
+ rc_ = mri_.getRegClass(li->reg);
+ spillIs_ = &spillIs;
+
+ if (split())
+ return;
+
+ reMaterializeAll();
+
+ // Remat may handle everything.
+ if (li_->empty())
+ return;
+
+ stackSlot_ = vrm_.getStackSlot(li->reg);
+ if (stackSlot_ == VirtRegMap::NO_STACK_SLOT)
+ stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
+
+ // Iterate over instructions using register.
+ for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
+ MachineInstr *MI = RI.skipInstruction();) {
+
+ // Debug values are not allowed to affect codegen.
+ if (MI->isDebugValue()) {
+ // Modify DBG_VALUE now that the value is in a spill slot.
+ uint64_t Offset = MI->getOperand(1).getImm();
+ const MDNode *MDPtr = MI->getOperand(2).getMetadata();
+ DebugLoc DL = MI->getDebugLoc();
+ if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_,
+ Offset, MDPtr, DL)) {
+ DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
+ MachineBasicBlock *MBB = MI->getParent();
+ MBB->insert(MBB->erase(MI), NewDV);
+ } else {
+ DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
+ MI->eraseFromParent();
+ }
+ continue;
+ }
+
+ // Stack slot accesses may coalesce away.
+ if (coalesceStackAccess(MI))
+ continue;
+
+ // Analyze instruction.
+ bool Reads, Writes;
+ SmallVector<unsigned, 8> Ops;
+ tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
+
+ // Attempt to fold memory ops.
+ if (foldMemoryOperand(MI, Ops))
+ continue;
+
+ // Allocate interval around instruction.
+ // FIXME: Infer regclass from instruction alone.
+ unsigned NewVReg = mri_.createVirtualRegister(rc_);
+ vrm_.grow();
+ LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
+ NewLI.markNotSpillable();
+
+ if (Reads)
+ insertReload(NewLI, MI);
+
+ // Rewrite instruction operands.
+ bool hasLiveDef = false;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(Ops[i]);
+ MO.setReg(NewVReg);
+ if (MO.isUse()) {
+ if (!MI->isRegTiedToDefOperand(Ops[i]))
+ MO.setIsKill();
+ } else {
+ if (!MO.isDead())
+ hasLiveDef = true;
+ }
+ }
+
+ // FIXME: Use a second vreg if instruction has no tied ops.
+ if (Writes && hasLiveDef)
+ insertSpill(NewLI, MI);
+
+ DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
+ newIntervals.push_back(&NewLI);
+ }
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/IntrinsicLowering.cpp b/libclamav/c++/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 87ab7ef..3852eba 100644
--- a/libclamav/c++/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -16,6 +16,7 @@
#include "llvm/Module.h"
#include "llvm/Type.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/raw_ostream.h"
@@ -83,6 +84,12 @@ static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
return NewCI;
}
+// VisualStudio defines setjmp as _setjmp
+#if defined(_MSC_VER) && defined(setjmp)
+#define setjmp_undefined_for_visual_studio
+#undef setjmp
+#endif
+
void IntrinsicLowering::AddPrototypes(Module &M) {
LLVMContext &Context = M.getContext();
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
@@ -308,21 +315,22 @@ static Value *LowerCTLZ(LLVMContext &Context, Value *V, Instruction *IP) {
static void ReplaceFPIntrinsicWithCall(CallInst *CI, const char *Fname,
const char *Dname,
const char *LDname) {
- switch (CI->getOperand(1)->getType()->getTypeID()) {
+ CallSite CS(CI);
+ switch (CI->getArgOperand(0)->getType()->getTypeID()) {
default: llvm_unreachable("Invalid type in intrinsic");
case Type::FloatTyID:
- ReplaceCallWith(Fname, CI, CI->op_begin() + 1, CI->op_end(),
+ ReplaceCallWith(Fname, CI, CS.arg_begin(), CS.arg_end(),
Type::getFloatTy(CI->getContext()));
break;
case Type::DoubleTyID:
- ReplaceCallWith(Dname, CI, CI->op_begin() + 1, CI->op_end(),
+ ReplaceCallWith(Dname, CI, CS.arg_begin(), CS.arg_end(),
Type::getDoubleTy(CI->getContext()));
break;
case Type::X86_FP80TyID:
case Type::FP128TyID:
case Type::PPC_FP128TyID:
- ReplaceCallWith(LDname, CI, CI->op_begin() + 1, CI->op_end(),
- CI->getOperand(1)->getType());
+ ReplaceCallWith(LDname, CI, CS.arg_begin(), CS.arg_end(),
+ CI->getArgOperand(0)->getType());
break;
}
}
@@ -331,15 +339,16 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
IRBuilder<> Builder(CI->getParent(), CI);
LLVMContext &Context = CI->getContext();
- Function *Callee = CI->getCalledFunction();
+ const Function *Callee = CI->getCalledFunction();
assert(Callee && "Cannot lower an indirect call!");
+ CallSite CS(CI);
switch (Callee->getIntrinsicID()) {
case Intrinsic::not_intrinsic:
- llvm_report_error("Cannot lower a call to a non-intrinsic function '"+
+ report_fatal_error("Cannot lower a call to a non-intrinsic function '"+
Callee->getName() + "'!");
default:
- llvm_report_error("Code generator does not support intrinsic function '"+
+ report_fatal_error("Code generator does not support intrinsic function '"+
Callee->getName()+"'!");
// The setjmp/longjmp intrinsics should only exist in the code if it was
@@ -347,7 +356,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
// by the lowerinvoke pass. In both cases, the right thing to do is to
// convert the call to an explicit setjmp or longjmp call.
case Intrinsic::setjmp: {
- Value *V = ReplaceCallWith("setjmp", CI, CI->op_begin() + 1, CI->op_end(),
+ Value *V = ReplaceCallWith("setjmp", CI, CS.arg_begin(), CS.arg_end(),
Type::getInt32Ty(Context));
if (!CI->getType()->isVoidTy())
CI->replaceAllUsesWith(V);
@@ -359,32 +368,32 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
case Intrinsic::longjmp: {
- ReplaceCallWith("longjmp", CI, CI->op_begin() + 1, CI->op_end(),
+ ReplaceCallWith("longjmp", CI, CS.arg_begin(), CS.arg_end(),
Type::getVoidTy(Context));
break;
}
case Intrinsic::siglongjmp: {
// Insert the call to abort
- ReplaceCallWith("abort", CI, CI->op_end(), CI->op_end(),
+ ReplaceCallWith("abort", CI, CS.arg_end(), CS.arg_end(),
Type::getVoidTy(Context));
break;
}
case Intrinsic::ctpop:
- CI->replaceAllUsesWith(LowerCTPOP(Context, CI->getOperand(1), CI));
+ CI->replaceAllUsesWith(LowerCTPOP(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::bswap:
- CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getOperand(1), CI));
+ CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::ctlz:
- CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getOperand(1), CI));
+ CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::cttz: {
// cttz(x) -> ctpop(~X & (X-1))
- Value *Src = CI->getOperand(1);
+ Value *Src = CI->getArgOperand(0);
Value *NotSrc = Builder.CreateNot(Src);
NotSrc->setName(Src->getName() + ".not");
Value *SrcM1 = ConstantInt::get(Src->getType(), 1);
@@ -445,37 +454,38 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
case Intrinsic::memcpy: {
const IntegerType *IntPtr = TD.getIntPtrType(Context);
- Value *Size = Builder.CreateIntCast(CI->getOperand(3), IntPtr,
+ Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
- Ops[0] = CI->getOperand(1);
- Ops[1] = CI->getOperand(2);
+ Ops[0] = CI->getArgOperand(0);
+ Ops[1] = CI->getArgOperand(1);
Ops[2] = Size;
- ReplaceCallWith("memcpy", CI, Ops, Ops+3, CI->getOperand(1)->getType());
+ ReplaceCallWith("memcpy", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
break;
}
case Intrinsic::memmove: {
const IntegerType *IntPtr = TD.getIntPtrType(Context);
- Value *Size = Builder.CreateIntCast(CI->getOperand(3), IntPtr,
+ Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
- Ops[0] = CI->getOperand(1);
- Ops[1] = CI->getOperand(2);
+ Ops[0] = CI->getArgOperand(0);
+ Ops[1] = CI->getArgOperand(1);
Ops[2] = Size;
- ReplaceCallWith("memmove", CI, Ops, Ops+3, CI->getOperand(1)->getType());
+ ReplaceCallWith("memmove", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
break;
}
case Intrinsic::memset: {
const IntegerType *IntPtr = TD.getIntPtrType(Context);
- Value *Size = Builder.CreateIntCast(CI->getOperand(3), IntPtr,
+ Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
- Ops[0] = CI->getOperand(1);
+ Ops[0] = CI->getArgOperand(0);
// Extend the amount to i32.
- Ops[1] = Builder.CreateIntCast(CI->getOperand(2), Type::getInt32Ty(Context),
+ Ops[1] = Builder.CreateIntCast(CI->getArgOperand(1),
+ Type::getInt32Ty(Context),
/* isSigned */ false);
Ops[2] = Size;
- ReplaceCallWith("memset", CI, Ops, Ops+3, CI->getOperand(1)->getType());
+ ReplaceCallWith("memset", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
break;
}
case Intrinsic::sqrt: {
diff --git a/libclamav/c++/llvm/lib/CodeGen/LLVMTargetMachine.cpp b/libclamav/c++/llvm/lib/CodeGen/LLVMTargetMachine.cpp
index 5e88865..3603802 100644
--- a/libclamav/c++/llvm/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/LLVMTargetMachine.cpp
@@ -13,16 +13,15 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/PassManager.h"
-#include "llvm/Pass.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/CodeGen/Passes.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetRegistry.h"
@@ -51,6 +50,9 @@ static cl::opt<bool> DisableSSC("disable-ssc", cl::Hidden,
cl::desc("Disable Stack Slot Coloring"));
static cl::opt<bool> DisableMachineLICM("disable-machine-licm", cl::Hidden,
cl::desc("Disable Machine LICM"));
+static cl::opt<bool> DisablePostRAMachineLICM("disable-postra-machine-licm",
+ cl::Hidden,
+ cl::desc("Disable Machine LICM"));
static cl::opt<bool> DisableMachineSink("disable-machine-sink", cl::Hidden,
cl::desc("Disable Machine Sinking"));
static cl::opt<bool> DisableLSR("disable-lsr", cl::Hidden,
@@ -63,13 +65,16 @@ static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
cl::desc("Print LLVM IR input to isel pass"));
static cl::opt<bool> PrintGCInfo("print-gc", cl::Hidden,
cl::desc("Dump garbage collector data"));
+static cl::opt<bool> ShowMCEncoding("show-mc-encoding", cl::Hidden,
+ cl::desc("Show encoding in .s output"));
+static cl::opt<bool> ShowMCInst("show-mc-inst", cl::Hidden,
+ cl::desc("Show instruction structure in .s output"));
+static cl::opt<bool> EnableMCLogging("enable-mc-api-logging", cl::Hidden,
+ cl::desc("Enable MC API logging"));
static cl::opt<bool> VerifyMachineCode("verify-machineinstrs", cl::Hidden,
cl::desc("Verify generated machine code"),
cl::init(getenv("LLVM_VERIFY_MACHINEINSTRS")!=NULL));
-static cl::opt<bool> EnableMachineCSE("enable-machine-cse", cl::Hidden,
- cl::desc("Enable Machine CSE"));
-
static cl::opt<cl::boolOrDefault>
AsmVerbose("asm-verbose", cl::desc("Add comments to directives."),
cl::init(cl::BOU_UNSET));
@@ -80,7 +85,7 @@ static bool getVerboseAsm() {
case cl::BOU_UNSET: return TargetMachine::getAsmVerbosityDefault();
case cl::BOU_TRUE: return true;
case cl::BOU_FALSE: return false;
- }
+ }
}
// Enable or disable FastISel. Both options are needed, because
@@ -97,21 +102,19 @@ static cl::opt<bool> EnableSplitGEPGVN("split-gep-gvn", cl::Hidden,
cl::desc("Split GEPs and run no-load GVN"));
LLVMTargetMachine::LLVMTargetMachine(const Target &T,
- const std::string &TargetTriple)
- : TargetMachine(T) {
+ const std::string &Triple)
+ : TargetMachine(T), TargetTriple(Triple) {
AsmInfo = T.createAsmInfo(TargetTriple);
}
// Set the default code model for the JIT for a generic target.
// FIXME: Is small right here? or .is64Bit() ? Large : Small?
-void
-LLVMTargetMachine::setCodeModelForJIT() {
+void LLVMTargetMachine::setCodeModelForJIT() {
setCodeModel(CodeModel::Small);
}
// Set the default code model for static compilation for a generic target.
-void
-LLVMTargetMachine::setCodeModelForStatic() {
+void LLVMTargetMachine::setCodeModelForStatic() {
setCodeModel(CodeModel::Small);
}
@@ -121,68 +124,64 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
CodeGenOpt::Level OptLevel,
bool DisableVerify) {
// Add common CodeGen passes.
- if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify))
+ MCContext *Context = 0;
+ if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify, Context))
return true;
+ assert(Context != 0 && "Failed to get MCContext");
- OwningPtr<MCContext> Context(new MCContext());
+ const MCAsmInfo &MAI = *getMCAsmInfo();
OwningPtr<MCStreamer> AsmStreamer;
- formatted_raw_ostream *LegacyOutput;
switch (FileType) {
default: return true;
case CGFT_AssemblyFile: {
- const MCAsmInfo &MAI = *getMCAsmInfo();
MCInstPrinter *InstPrinter =
- getTarget().createMCInstPrinter(MAI.getAssemblerDialect(), MAI, Out);
- AsmStreamer.reset(createAsmStreamer(*Context, Out, MAI,
+ getTarget().createMCInstPrinter(MAI.getAssemblerDialect(), MAI);
+
+ // Create a code emitter if asked to show the encoding.
+ MCCodeEmitter *MCE = 0;
+ if (ShowMCEncoding)
+ MCE = getTarget().createCodeEmitter(*this, *Context);
+
+ AsmStreamer.reset(createAsmStreamer(*Context, Out,
getTargetData()->isLittleEndian(),
getVerboseAsm(), InstPrinter,
- /*codeemitter*/0));
- // Set the AsmPrinter's "O" to the output file.
- LegacyOutput = &Out;
+ MCE, ShowMCInst));
break;
}
case CGFT_ObjectFile: {
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
MCCodeEmitter *MCE = getTarget().createCodeEmitter(*this, *Context);
- if (MCE == 0)
+ TargetAsmBackend *TAB = getTarget().createAsmBackend(TargetTriple);
+ if (MCE == 0 || TAB == 0)
return true;
-
- AsmStreamer.reset(createMachOStreamer(*Context, Out, MCE));
-
- // Any output to the asmprinter's "O" stream is bad and needs to be fixed,
- // force it to come out stderr.
- // FIXME: this is horrible and leaks, eventually remove the raw_ostream from
- // asmprinter.
- LegacyOutput = new formatted_raw_ostream(errs());
+
+ AsmStreamer.reset(getTarget().createObjectStreamer(TargetTriple, *Context,
+ *TAB, Out, MCE,
+ hasMCRelaxAll()));
break;
}
case CGFT_Null:
// The Null output is intended for use for performance analysis and testing,
// not real users.
AsmStreamer.reset(createNullStreamer(*Context));
- // Any output to the asmprinter's "O" stream is bad and needs to be fixed,
- // force it to come out stderr.
- // FIXME: this is horrible and leaks, eventually remove the raw_ostream from
- // asmprinter.
- LegacyOutput = new formatted_raw_ostream(errs());
break;
}
-
- // Create the AsmPrinter, which takes ownership of Context and AsmStreamer
- // if successful.
- FunctionPass *Printer =
- getTarget().createAsmPrinter(*LegacyOutput, *this, *Context, *AsmStreamer,
- getMCAsmInfo());
+
+ if (EnableMCLogging)
+ AsmStreamer.reset(createLoggingStreamer(AsmStreamer.take(), errs()));
+
+ // Create the AsmPrinter, which takes ownership of AsmStreamer if successful.
+ FunctionPass *Printer = getTarget().createAsmPrinter(*this, *AsmStreamer);
if (Printer == 0)
return true;
-
- // If successful, createAsmPrinter took ownership of AsmStreamer and Context.
- Context.take(); AsmStreamer.take();
-
+
+ // If successful, createAsmPrinter took ownership of AsmStreamer.
+ AsmStreamer.take();
+
PM.add(Printer);
-
+
// Make sure the code model is set.
setCodeModelForStatic();
PM.add(createGCInfoDeleter());
@@ -201,9 +200,10 @@ bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM,
bool DisableVerify) {
// Make sure the code model is set.
setCodeModelForJIT();
-
+
// Add common CodeGen passes.
- if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify))
+ MCContext *Ctx = 0;
+ if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify, Ctx))
return true;
addCodeEmitter(PM, OptLevel, JCE);
@@ -212,20 +212,36 @@ bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM,
return false; // success!
}
-static void printNoVerify(PassManagerBase &PM,
- const char *Banner) {
+/// addPassesToEmitMC - Add passes to the specified pass manager to get
+/// machine code emitted with the MCJIT. This method returns true if machine
+/// code is not supported. It fills the MCContext Ctx pointer which can be
+/// used to build custom MCStreamer.
+///
+bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
+ MCContext *&Ctx,
+ CodeGenOpt::Level OptLevel,
+ bool DisableVerify) {
+ // Add common CodeGen passes.
+ if (addCommonCodeGenPasses(PM, OptLevel, DisableVerify, Ctx))
+ return true;
+ // Make sure the code model is set.
+ setCodeModelForJIT();
+
+ return false; // success!
+}
+
+static void printNoVerify(PassManagerBase &PM, const char *Banner) {
if (PrintMachineCode)
PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
}
static void printAndVerify(PassManagerBase &PM,
- const char *Banner,
- bool allowDoubleDefs = false) {
+ const char *Banner) {
if (PrintMachineCode)
PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
if (VerifyMachineCode)
- PM.add(createMachineVerifierPass(allowDoubleDefs));
+ PM.add(createMachineVerifierPass());
}
/// addCommonCodeGenPasses - Add standard LLVM codegen passes used for both
@@ -233,7 +249,8 @@ static void printAndVerify(PassManagerBase &PM,
///
bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
CodeGenOpt::Level OptLevel,
- bool DisableVerify) {
+ bool DisableVerify,
+ MCContext *&OutContext) {
// Standard LLVM-Level Passes.
// Before running any passes, run the verifier to determine if the input
@@ -254,10 +271,14 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
PM.add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
}
+ PM.add(createGCLoweringPass());
+
+ // Make sure that no unreachable blocks are instruction selected.
+ PM.add(createUnreachableBlockEliminationPass());
+
// Turn exception handling constructs into something the code generators can
// handle.
- switch (getMCAsmInfo()->getExceptionHandlingType())
- {
+ switch (getMCAsmInfo()->getExceptionHandlingType()) {
case ExceptionHandling::SjLj:
// SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
// Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
@@ -266,26 +287,25 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
// pad is shared by multiple invokes and is also a target of a normal
// edge from elsewhere.
PM.add(createSjLjEHPass(getTargetLowering()));
- PM.add(createDwarfEHPass(getTargetLowering(), OptLevel==CodeGenOpt::None));
- break;
+ // FALLTHROUGH
case ExceptionHandling::Dwarf:
- PM.add(createDwarfEHPass(getTargetLowering(), OptLevel==CodeGenOpt::None));
+ PM.add(createDwarfEHPass(this));
break;
case ExceptionHandling::None:
PM.add(createLowerInvokePass(getTargetLowering()));
+
+ // The lower invoke pass may create unreachable code. Remove it.
+ PM.add(createUnreachableBlockEliminationPass());
break;
}
- PM.add(createGCLoweringPass());
-
- // Make sure that no unreachable blocks are instruction selected.
- PM.add(createUnreachableBlockEliminationPass());
-
if (OptLevel != CodeGenOpt::None && !DisableCGP)
PM.add(createCodeGenPreparePass(getTargetLowering()));
PM.add(createStackProtectorPass(getTargetLowering()));
+ addPreISel(PM, OptLevel);
+
if (PrintISelInput)
PM.add(createPrintFunctionPass("\n\n"
"*** Final LLVM Code input to ISel ***\n",
@@ -298,6 +318,12 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
// Standard Lower-Level Passes.
+ // Install a MachineModuleInfo class, which is an immutable pass that holds
+ // all the per-module stuff we're generating, including MCContext.
+ MachineModuleInfo *MMI = new MachineModuleInfo(*getMCAsmInfo());
+ PM.add(MMI);
+ OutContext = &MMI->getContext(); // Return the MCContext specifically by-ref.
+
// Set up a MachineFunction for the rest of CodeGen to work on.
PM.add(new MachineFunctionAnalysis(*this, OptLevel));
@@ -311,53 +337,60 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
return true;
// Print the instruction selected machine code...
- printAndVerify(PM, "After Instruction Selection",
- /* allowDoubleDefs= */ true);
+ printAndVerify(PM, "After Instruction Selection");
// Optimize PHIs before DCE: removing dead PHI cycles may make more
// instructions dead.
if (OptLevel != CodeGenOpt::None)
PM.add(createOptimizePHIsPass());
- // Delete dead machine instructions regardless of optimization level.
- PM.add(createDeadMachineInstructionElimPass());
- printAndVerify(PM, "After codegen DCE pass",
- /* allowDoubleDefs= */ true);
+ // If the target requests it, assign local variables to stack slots relative
+ // to one another and simplify frame index references where possible.
+ PM.add(createLocalStackSlotAllocationPass());
if (OptLevel != CodeGenOpt::None) {
- PM.add(createOptimizeExtsPass());
+ // With optimization, dead code should already be eliminated. However
+ // there is one known exception: lowered code for arguments that are only
+ // used by tail calls, where the tail calls reuse the incoming stack
+ // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
+ PM.add(createDeadMachineInstructionElimPass());
+ printAndVerify(PM, "After codegen DCE pass");
+
+ PM.add(createPeepholeOptimizerPass());
if (!DisableMachineLICM)
PM.add(createMachineLICMPass());
- if (EnableMachineCSE)
- PM.add(createMachineCSEPass());
+ PM.add(createMachineCSEPass());
if (!DisableMachineSink)
PM.add(createMachineSinkingPass());
- printAndVerify(PM, "After MachineLICM and MachineSinking",
- /* allowDoubleDefs= */ true);
+ printAndVerify(PM, "After Machine LICM, CSE and Sinking passes");
}
// Pre-ra tail duplication.
if (OptLevel != CodeGenOpt::None && !DisableEarlyTailDup) {
PM.add(createTailDuplicatePass(true));
- printAndVerify(PM, "After Pre-RegAlloc TailDuplicate",
- /* allowDoubleDefs= */ true);
+ printAndVerify(PM, "After Pre-RegAlloc TailDuplicate");
}
// Run pre-ra passes.
if (addPreRegAlloc(PM, OptLevel))
- printAndVerify(PM, "After PreRegAlloc passes",
- /* allowDoubleDefs= */ true);
+ printAndVerify(PM, "After PreRegAlloc passes");
// Perform register allocation.
- PM.add(createRegisterAllocator());
+ PM.add(createRegisterAllocator(OptLevel));
printAndVerify(PM, "After Register Allocation");
- // Perform stack slot coloring.
- if (OptLevel != CodeGenOpt::None && !DisableSSC) {
+ // Perform stack slot coloring and post-ra machine LICM.
+ if (OptLevel != CodeGenOpt::None) {
// FIXME: Re-enable coloring with register when it's capable of adding
// kill markers.
- PM.add(createStackSlotColoringPass(false));
- printAndVerify(PM, "After StackSlotColoring");
+ if (!DisableSSC)
+ PM.add(createStackSlotColoringPass(false));
+
+ // Run post-ra machine LICM to hoist reloads / remats.
+ if (!DisablePostRAMachineLICM)
+ PM.add(createMachineLICMPass(false));
+
+ printAndVerify(PM, "After StackSlotColoring and postra Machine LICM");
}
// Run post-ra passes.
diff --git a/libclamav/c++/llvm/lib/CodeGen/LatencyPriorityQueue.cpp b/libclamav/c++/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
index f1bd573..b9527fa 100644
--- a/libclamav/c++/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
@@ -68,7 +68,7 @@ SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
return OnlyAvailablePred;
}
-void LatencyPriorityQueue::push_impl(SUnit *SU) {
+void LatencyPriorityQueue::push(SUnit *SU) {
// Look at all of the successors of this node. Count the number of nodes that
// this node is the sole unscheduled node for.
unsigned NumNodesBlocking = 0;
@@ -79,7 +79,7 @@ void LatencyPriorityQueue::push_impl(SUnit *SU) {
}
NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
- Queue.push(SU);
+ Queue.push_back(SU);
}
@@ -114,3 +114,25 @@ void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
// NumNodesSolelyBlocking value.
push(OnlyAvailablePred);
}
+
+SUnit *LatencyPriorityQueue::pop() {
+ if (empty()) return NULL;
+ std::vector<SUnit *>::iterator Best = Queue.begin();
+ for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
+ E = Queue.end(); I != E; ++I)
+ if (Picker(*Best, *I))
+ Best = I;
+ SUnit *V = *Best;
+ if (Best != prior(Queue.end()))
+ std::swap(*Best, Queue.back());
+ Queue.pop_back();
+ return V;
+}
+
+void LatencyPriorityQueue::remove(SUnit *SU) {
+ assert(!Queue.empty() && "Queue is empty!");
+ std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU);
+ if (I != prior(Queue.end()))
+ std::swap(*I, Queue.back());
+ Queue.pop_back();
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/LiveInterval.cpp b/libclamav/c++/llvm/lib/CodeGen/LiveInterval.cpp
index 465b306..59f380a 100644
--- a/libclamav/c++/llvm/lib/CodeGen/LiveInterval.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/LiveInterval.cpp
@@ -68,6 +68,37 @@ bool LiveInterval::liveBeforeAndAt(SlotIndex I) const {
return r->end == I;
}
+/// killedAt - Return true if a live range ends at index. Note that the kill
+/// point is not contained in the half-open live range. It is usually the
+/// getDefIndex() slot following its last use.
+bool LiveInterval::killedAt(SlotIndex I) const {
+ Ranges::const_iterator r = std::lower_bound(ranges.begin(), ranges.end(), I);
+
+ // Now r points to the first interval with start >= I, or ranges.end().
+ if (r == ranges.begin())
+ return false;
+
+ --r;
+ // Now r points to the last interval with end <= I.
+ // r->end is the kill point.
+ return r->end == I;
+}
+
+/// killedInRange - Return true if the interval has kills in [Start,End).
+bool LiveInterval::killedInRange(SlotIndex Start, SlotIndex End) const {
+ Ranges::const_iterator r =
+ std::lower_bound(ranges.begin(), ranges.end(), End);
+
+ // Now r points to the first interval with start >= End, or ranges.end().
+ if (r == ranges.begin())
+ return false;
+
+ --r;
+ // Now r points to the last interval with end <= End.
+ // r->end is the kill point.
+ return r->end >= Start && r->end < End;
+}
+
// overlaps - Return true if the intersection of the two live intervals is
// not empty.
//
@@ -88,6 +119,7 @@ bool LiveInterval::liveBeforeAndAt(SlotIndex I) const {
//
bool LiveInterval::overlapsFrom(const LiveInterval& other,
const_iterator StartPos) const {
+ assert(!empty() && "empty interval");
const_iterator i = begin();
const_iterator ie = end();
const_iterator j = StartPos;
@@ -130,16 +162,58 @@ bool LiveInterval::overlapsFrom(const LiveInterval& other,
/// by [Start, End).
bool LiveInterval::overlaps(SlotIndex Start, SlotIndex End) const {
assert(Start < End && "Invalid range");
- const_iterator I = begin();
- const_iterator E = end();
- const_iterator si = std::upper_bound(I, E, Start);
- const_iterator ei = std::upper_bound(I, E, End);
- if (si != ei)
- return true;
- if (si == I)
- return false;
- --si;
- return si->contains(Start);
+ const_iterator I = std::lower_bound(begin(), end(), End);
+ return I != begin() && (--I)->end > Start;
+}
+
+
+/// ValNo is dead, remove it. If it is the largest value number, just nuke it
+/// (and any other deleted values neighboring it), otherwise mark it as ~1U so
+/// it can be nuked later.
+void LiveInterval::markValNoForDeletion(VNInfo *ValNo) {
+ if (ValNo->id == getNumValNums()-1) {
+ do {
+ valnos.pop_back();
+ } while (!valnos.empty() && valnos.back()->isUnused());
+ } else {
+ ValNo->setIsUnused(true);
+ }
+}
+
+/// RenumberValues - Renumber all values in order of appearance and delete the
+/// remaining unused values.
+void LiveInterval::RenumberValues(LiveIntervals &lis) {
+ SmallPtrSet<VNInfo*, 8> Seen;
+ bool seenPHIDef = false;
+ valnos.clear();
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ VNInfo *VNI = I->valno;
+ if (!Seen.insert(VNI))
+ continue;
+ assert(!VNI->isUnused() && "Unused valno used by live range");
+ VNI->id = (unsigned)valnos.size();
+ valnos.push_back(VNI);
+ VNI->setHasPHIKill(false);
+ if (VNI->isPHIDef())
+ seenPHIDef = true;
+ }
+
+ // Recompute phi kill flags.
+ if (!seenPHIDef)
+ return;
+ for (const_vni_iterator I = vni_begin(), E = vni_end(); I != E; ++I) {
+ VNInfo *VNI = *I;
+ if (!VNI->isPHIDef())
+ continue;
+ const MachineBasicBlock *PHIBB = lis.getMBBFromIndex(VNI->def);
+ assert(PHIBB && "No basic block for phi-def");
+ for (MachineBasicBlock::const_pred_iterator PI = PHIBB->pred_begin(),
+ PE = PHIBB->pred_end(); PI != PE; ++PI) {
+ VNInfo *KVNI = getVNInfoAt(lis.getMBBEndIdx(*PI).getPrevSlot());
+ if (KVNI)
+ KVNI->setHasPHIKill(true);
+ }
+ }
}
/// extendIntervalEndTo - This method is used when we want to extend the range
@@ -149,10 +223,9 @@ bool LiveInterval::overlaps(SlotIndex Start, SlotIndex End) const {
void LiveInterval::extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd) {
assert(I != ranges.end() && "Not a valid interval!");
VNInfo *ValNo = I->valno;
- SlotIndex OldEnd = I->end;
// Search for the first interval that we can't merge with.
- Ranges::iterator MergeTo = next(I);
+ Ranges::iterator MergeTo = llvm::next(I);
for (; MergeTo != ranges.end() && NewEnd >= MergeTo->end; ++MergeTo) {
assert(MergeTo->valno == ValNo && "Cannot merge with differing values!");
}
@@ -161,14 +234,11 @@ void LiveInterval::extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd) {
I->end = std::max(NewEnd, prior(MergeTo)->end);
// Erase any dead ranges.
- ranges.erase(next(I), MergeTo);
-
- // Update kill info.
- ValNo->removeKills(OldEnd, I->end.getPrevSlot());
+ ranges.erase(llvm::next(I), MergeTo);
// If the newly formed range now touches the range after it and if they have
// the same value number, merge the two ranges into one range.
- Ranges::iterator Next = next(I);
+ Ranges::iterator Next = llvm::next(I);
if (Next != ranges.end() && Next->start <= I->end && Next->valno == ValNo) {
I->end = Next->end;
ranges.erase(Next);
@@ -207,7 +277,7 @@ LiveInterval::extendIntervalStartTo(Ranges::iterator I, SlotIndex NewStart) {
MergeTo->end = I->end;
}
- ranges.erase(next(MergeTo), next(I));
+ ranges.erase(llvm::next(MergeTo), llvm::next(I));
return MergeTo;
}
@@ -245,9 +315,6 @@ LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
// endpoint as well.
if (End > it->end)
extendIntervalEndTo(it, End);
- else if (End < it->end)
- // Overlapping intervals, there might have been a kill here.
- it->valno->removeKill(End);
return it;
}
} else {
@@ -263,7 +330,7 @@ LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
return ranges.insert(it, LR);
}
-/// isInOneLiveRange - Return true if the range specified is entirely in
+/// isInOneLiveRange - Return true if the range specified is entirely in
/// a single LiveRange of the live interval.
bool LiveInterval::isInOneLiveRange(SlotIndex Start, SlotIndex End) {
Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
@@ -288,7 +355,6 @@ void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
VNInfo *ValNo = I->valno;
if (I->start == Start) {
if (I->end == End) {
- ValNo->removeKills(Start, End);
if (RemoveDeadValNo) {
// Check if val# is dead.
bool isDead = true;
@@ -296,18 +362,10 @@ void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
if (II != I && II->valno == ValNo) {
isDead = false;
break;
- }
- if (isDead) {
- // Now that ValNo is dead, remove it. If it is the largest value
- // number, just nuke it (and any other deleted values neighboring it),
- // otherwise mark it as ~1U so it can be nuked later.
- if (ValNo->id == getNumValNums()-1) {
- do {
- valnos.pop_back();
- } while (!valnos.empty() && valnos.back()->isUnused());
- } else {
- ValNo->setIsUnused(true);
}
+ if (isDead) {
+ // Now that ValNo is dead, remove it.
+ markValNoForDeletion(ValNo);
}
}
@@ -320,7 +378,6 @@ void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
// Otherwise if the span we are removing is at the end of the LiveRange,
// adjust the other way.
if (I->end == End) {
- ValNo->removeKills(Start, End);
I->end = Start;
return;
}
@@ -330,7 +387,7 @@ void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
I->end = Start; // Trim the old interval.
// Insert the new one.
- ranges.insert(next(I), LiveRange(End, OldEnd, ValNo));
+ ranges.insert(llvm::next(I), LiveRange(End, OldEnd, ValNo));
}
/// removeValNo - Remove all the ranges defined by the specified value#.
@@ -344,21 +401,13 @@ void LiveInterval::removeValNo(VNInfo *ValNo) {
if (I->valno == ValNo)
ranges.erase(I);
} while (I != E);
- // Now that ValNo is dead, remove it. If it is the largest value
- // number, just nuke it (and any other deleted values neighboring it),
- // otherwise mark it as ~1U so it can be nuked later.
- if (ValNo->id == getNumValNums()-1) {
- do {
- valnos.pop_back();
- } while (!valnos.empty() && valnos.back()->isUnused());
- } else {
- ValNo->setIsUnused(true);
- }
+ // Now that ValNo is dead, remove it.
+ markValNoForDeletion(ValNo);
}
/// getLiveRangeContaining - Return the live range that contains the
/// specified index, or null if there is none.
-LiveInterval::const_iterator
+LiveInterval::const_iterator
LiveInterval::FindLiveRangeContaining(SlotIndex Idx) const {
const_iterator It = std::upper_bound(begin(), end(), Idx);
if (It != ranges.begin()) {
@@ -370,7 +419,7 @@ LiveInterval::FindLiveRangeContaining(SlotIndex Idx) const {
return end();
}
-LiveInterval::iterator
+LiveInterval::iterator
LiveInterval::FindLiveRangeContaining(SlotIndex Idx) {
iterator It = std::upper_bound(begin(), end(), Idx);
if (It != begin()) {
@@ -378,7 +427,7 @@ LiveInterval::FindLiveRangeContaining(SlotIndex Idx) {
if (It->contains(Idx))
return It;
}
-
+
return end();
}
@@ -410,11 +459,11 @@ VNInfo *LiveInterval::findDefinedVNInfoForStackInt(unsigned reg) const {
/// the intervals are not joinable, this aborts.
void LiveInterval::join(LiveInterval &Other,
const int *LHSValNoAssignments,
- const int *RHSValNoAssignments,
+ const int *RHSValNoAssignments,
SmallVector<VNInfo*, 16> &NewVNInfo,
MachineRegisterInfo *MRI) {
// Determine if any of our live range values are mapped. This is uncommon, so
- // we want to avoid the interval scan if not.
+ // we want to avoid the interval scan if not.
bool MustMapCurValNos = false;
unsigned NumVals = getNumValNums();
unsigned NumNewVals = NewVNInfo.size();
@@ -434,7 +483,7 @@ void LiveInterval::join(LiveInterval &Other,
++OutIt;
for (iterator I = OutIt, E = end(); I != E; ++I) {
OutIt->valno = NewVNInfo[LHSValNoAssignments[I->valno->id]];
-
+
// If this live range has the same value # as its immediate predecessor,
// and if they are neighbors, remove one LiveRange. This happens when we
// have [0,3:0)[4,7:1) and map 0/1 onto the same value #.
@@ -445,12 +494,12 @@ void LiveInterval::join(LiveInterval &Other,
OutIt->start = I->start;
OutIt->end = I->end;
}
-
+
// Didn't merge, on to the next one.
++OutIt;
}
}
-
+
// If we merge some live ranges, chop off the end.
ranges.erase(OutIt, end());
}
@@ -468,7 +517,7 @@ void LiveInterval::join(LiveInterval &Other,
if (VNI) {
if (NumValNos >= NumVals)
valnos.push_back(VNI);
- else
+ else
valnos[NumValNos] = VNI;
VNI->id = NumValNos++; // Renumber val#.
}
@@ -487,25 +536,13 @@ void LiveInterval::join(LiveInterval &Other,
}
ComputeJoinedWeight(Other);
-
- // Update regalloc hint if currently there isn't one.
- if (TargetRegisterInfo::isVirtualRegister(reg) &&
- TargetRegisterInfo::isVirtualRegister(Other.reg)) {
- std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(reg);
- if (Hint.first == 0 && Hint.second == 0) {
- std::pair<unsigned, unsigned> OtherHint =
- MRI->getRegAllocationHint(Other.reg);
- if (OtherHint.first || OtherHint.second)
- MRI->setRegAllocationHint(reg, OtherHint.first, OtherHint.second);
- }
- }
}
/// MergeRangesInAsValue - Merge all of the intervals in RHS into this live
/// interval as the specified value number. The LiveRanges in RHS are
/// allowed to overlap with LiveRanges in the current interval, but only if
/// the overlapping LiveRanges have the specified value number.
-void LiveInterval::MergeRangesInAsValue(const LiveInterval &RHS,
+void LiveInterval::MergeRangesInAsValue(const LiveInterval &RHS,
VNInfo *LHSValNo) {
// TODO: Make this more efficient.
iterator InsertPos = begin();
@@ -529,6 +566,7 @@ void LiveInterval::MergeValueInAsValue(
SmallVector<VNInfo*, 4> ReplacedValNos;
iterator IP = begin();
for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
+ assert(I->valno == RHS.getValNumInfo(I->valno->id) && "Bad VNInfo");
if (I->valno != RHSValNo)
continue;
SlotIndex Start = I->start, End = I->end;
@@ -553,7 +591,7 @@ void LiveInterval::MergeValueInAsValue(
// If this trimmed away the whole range, ignore it.
if (Start == End) continue;
}
-
+
// Map the valno in the other live range to the current live range.
IP = addRangeFrom(LiveRange(Start, End, LHSValNo), IP);
}
@@ -568,18 +606,10 @@ void LiveInterval::MergeValueInAsValue(
if (I->valno == V1) {
isDead = false;
break;
- }
- if (isDead) {
- // Now that V1 is dead, remove it. If it is the largest value number,
- // just nuke it (and any other deleted values neighboring it), otherwise
- // mark it as ~1U so it can be nuked later.
- if (V1->id == getNumValNums()-1) {
- do {
- valnos.pop_back();
- } while (!valnos.empty() && valnos.back()->isUnused());
- } else {
- V1->setIsUnused(true);
}
+ if (isDead) {
+ // Now that V1 is dead, remove it.
+ markValNoForDeletion(V1);
}
}
}
@@ -591,9 +621,9 @@ void LiveInterval::MergeValueInAsValue(
/// used with an unknown definition value.
void LiveInterval::MergeInClobberRanges(LiveIntervals &li_,
const LiveInterval &Clobbers,
- BumpPtrAllocator &VNInfoAllocator) {
+ VNInfo::Allocator &VNInfoAllocator) {
if (Clobbers.empty()) return;
-
+
DenseMap<VNInfo*, VNInfo*> ValNoMaps;
VNInfo *UnusedValNo = 0;
iterator IP = begin();
@@ -658,15 +688,15 @@ void LiveInterval::MergeInClobberRanges(LiveIntervals &li_,
void LiveInterval::MergeInClobberRange(LiveIntervals &li_,
SlotIndex Start,
SlotIndex End,
- BumpPtrAllocator &VNInfoAllocator) {
+ VNInfo::Allocator &VNInfoAllocator) {
// Find a value # to use for the clobber ranges. If there is already a value#
// for unknown values, use it.
VNInfo *ClobberValNo =
getNextValue(li_.getInvalidIndex(), 0, false, VNInfoAllocator);
-
+
iterator IP = begin();
IP = std::upper_bound(IP, end(), Start);
-
+
// If the start of this range overlaps with an existing liverange, trim it.
if (IP != begin() && IP[-1].end > Start) {
Start = IP[-1].end;
@@ -679,7 +709,7 @@ void LiveInterval::MergeInClobberRange(LiveIntervals &li_,
// If this trimmed away the whole range, ignore it.
if (Start == End) return;
}
-
+
// Insert the clobber interval.
addRangeFrom(LiveRange(Start, End, ClobberValNo), IP);
}
@@ -706,7 +736,7 @@ VNInfo* LiveInterval::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) {
for (iterator I = begin(); I != end(); ) {
iterator LR = I++;
if (LR->valno != V1) continue; // Not a V1 LiveRange.
-
+
// Okay, we found a V1 live range. If it had a previous, touching, V2 live
// range, extend it.
if (LR != begin()) {
@@ -720,11 +750,11 @@ VNInfo* LiveInterval::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) {
LR = Prev;
}
}
-
+
// Okay, now we have a V1 or V2 live range that is maximally merged forward.
// Ensure that it is a V2 live-range.
LR->valno = V2;
-
+
// If we can merge it into later V2 live ranges, do so now. We ignore any
// following V1 live ranges, as they will be merged in subsequent iterations
// of the loop.
@@ -736,24 +766,16 @@ VNInfo* LiveInterval::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) {
}
}
}
-
- // Now that V1 is dead, remove it. If it is the largest value number, just
- // nuke it (and any other deleted values neighboring it), otherwise mark it as
- // ~1U so it can be nuked later.
- if (V1->id == getNumValNums()-1) {
- do {
- valnos.pop_back();
- } while (valnos.back()->isUnused());
- } else {
- V1->setIsUnused(true);
- }
-
+
+ // Now that V1 is dead, remove it.
+ markValNoForDeletion(V1);
+
return V2;
}
void LiveInterval::Copy(const LiveInterval &RHS,
MachineRegisterInfo *MRI,
- BumpPtrAllocator &VNInfoAllocator) {
+ VNInfo::Allocator &VNInfoAllocator) {
ranges.clear();
valnos.clear();
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(RHS.reg);
@@ -823,10 +845,12 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
else {
OS << " = ";
for (LiveInterval::Ranges::const_iterator I = ranges.begin(),
- E = ranges.end(); I != E; ++I)
- OS << *I;
+ E = ranges.end(); I != E; ++I) {
+ OS << *I;
+ assert(I->valno == getValNumInfo(I->valno->id) && "Bad VNInfo");
+ }
}
-
+
// Print value number info.
if (getNumValNums()) {
OS << " ";
@@ -843,21 +867,10 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
OS << "?";
else
OS << vni->def;
- unsigned ee = vni->kills.size();
- if (ee || vni->hasPHIKill()) {
- OS << "-(";
- for (unsigned j = 0; j != ee; ++j) {
- OS << vni->kills[j];
- if (j != ee-1)
- OS << " ";
- }
- if (vni->hasPHIKill()) {
- if (ee)
- OS << " ";
- OS << "phi";
- }
- OS << ")";
- }
+ if (vni->hasPHIKill())
+ OS << "-phikill";
+ if (vni->hasRedefByEC())
+ OS << "-ec";
}
}
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/libclamav/c++/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index ba7805b..2726fc3 100644
--- a/libclamav/c++/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -47,33 +47,32 @@
using namespace llvm;
// Hidden options for help debugging.
-static cl::opt<bool> DisableReMat("disable-rematerialization",
+static cl::opt<bool> DisableReMat("disable-rematerialization",
cl::init(false), cl::Hidden);
-static cl::opt<bool> EnableFastSpilling("fast-spill",
- cl::init(false), cl::Hidden);
-
STATISTIC(numIntervals , "Number of original intervals");
STATISTIC(numFolds , "Number of loads/stores folded into instructions");
STATISTIC(numSplits , "Number of intervals split");
char LiveIntervals::ID = 0;
-static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
+INITIALIZE_PASS(LiveIntervals, "liveintervals",
+ "Live Interval Analysis", false, false);
void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<LiveVariables>();
AU.addRequired<LiveVariables>();
- AU.addPreservedID(MachineLoopInfoID);
+ AU.addPreserved<LiveVariables>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
AU.addPreservedID(MachineDominatorsID);
-
+
if (!StrongPHIElim) {
AU.addPreservedID(PHIEliminationID);
AU.addRequiredID(PHIEliminationID);
}
-
+
AU.addRequiredID(TwoAddressInstructionPassID);
AU.addPreserved<ProcessImplicitDefs>();
AU.addRequired<ProcessImplicitDefs>();
@@ -82,11 +81,6 @@ void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
-static void VNInfoDTor(void* Ptr)
-{
- reinterpret_cast<VNInfo*>(Ptr)->~VNInfo();
-}
-
void LiveIntervals::releaseMemory() {
// Free the live intervals themselves.
for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
@@ -95,8 +89,8 @@ void LiveIntervals::releaseMemory() {
r2iMap_.clear();
- // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
- VNInfoAllocator.Reset((unsigned)sizeof(VNInfo), alignof<VNInfo>(), VNInfoDTor);
+ // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
+ VNInfoAllocator.Reset();
while (!CloneMIs.empty()) {
MachineInstr *MI = CloneMIs.back();
CloneMIs.pop_back();
@@ -146,7 +140,7 @@ void LiveIntervals::printInstrs(raw_ostream &OS) const {
for (MachineBasicBlock::iterator mii = mbbi->begin(),
mie = mbbi->end(); mii != mie; ++mii) {
if (mii->isDebugValue())
- OS << SlotIndex::getEmptyKey() << '\t' << *mii;
+ OS << " \t" << *mii;
else
OS << getInstructionIndex(mii) << '\t' << *mii;
}
@@ -196,9 +190,9 @@ bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
const MachineInstr &MI = *I;
// Allow copies to and from li.reg
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
- if (SrcReg == li.reg || DstReg == li.reg)
+ if (MI.isCopy())
+ if (MI.getOperand(0).getReg() == li.reg ||
+ MI.getOperand(1).getReg() == li.reg)
continue;
// Check for operands using reg
@@ -223,10 +217,7 @@ bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
return false;
}
-/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
-/// it can check use as well.
-bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
- unsigned Reg, bool CheckUse,
+bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
@@ -244,12 +235,11 @@ bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
MachineOperand& MO = MI->getOperand(i);
if (!MO.isReg())
continue;
- if (MO.isUse() && !CheckUse)
- continue;
unsigned PhysReg = MO.getReg();
- if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
+ if (PhysReg == 0 || PhysReg == Reg ||
+ TargetRegisterInfo::isVirtualRegister(PhysReg))
continue;
- if (tri_->isSubRegister(Reg, PhysReg))
+ if (tri_->regsOverlap(Reg, PhysReg))
return true;
}
}
@@ -267,6 +257,41 @@ static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
}
#endif
+static
+bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
+ unsigned Reg = MI.getOperand(MOIdx).getReg();
+ for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
+ const MachineOperand &MO = MI.getOperand(i);
+ if (!MO.isReg())
+ continue;
+ if (MO.getReg() == Reg && MO.isDef()) {
+ assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
+ MI.getOperand(MOIdx).getSubReg() &&
+ (MO.getSubReg() || MO.isImplicit()));
+ return true;
+ }
+ }
+ return false;
+}
+
+/// isPartialRedef - Return true if the specified def at the specific index is
+/// partially re-defining the specified live interval. A common case of this is
+/// a definition of the sub-register.
+bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
+ LiveInterval &interval) {
+ if (!MO.getSubReg() || MO.isEarlyClobber())
+ return false;
+
+ SlotIndex RedefIndex = MIIdx.getDefIndex();
+ const LiveRange *OldLR =
+ interval.getLiveRangeContaining(RedefIndex.getUseIndex());
+ if (OldLR->valno->isDefAccurate()) {
+ MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
+ return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
+ }
+ return false;
+}
+
void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
MachineBasicBlock::iterator mi,
SlotIndex MIIdx,
@@ -290,15 +315,19 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// of inputs.
if (MO.isEarlyClobber())
defIndex = MIIdx.getUseIndex();
- VNInfo *ValNo;
+
+ // Make sure the first definition is not a partial redefinition. Add an
+ // <imp-def> of the full register.
+ if (MO.getSubReg())
+ mi->addRegisterDefined(interval.reg);
+
MachineInstr *CopyMI = NULL;
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
- tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
+ if (mi->isCopyLike()) {
CopyMI = mi;
- // Earlyclobbers move back one.
- ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
+ }
+ VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
+ VNInfoAllocator);
assert(ValNo->id == 0 && "First value in interval is not 0?");
// Loop over all of the blocks that the vreg is defined in. There are
@@ -321,7 +350,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
LiveRange LR(defIndex, killIdx, ValNo);
interval.addRange(LR);
DEBUG(dbgs() << " +" << LR << "\n");
- ValNo->addKill(killIdx);
return;
}
}
@@ -341,7 +369,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// valno in the killing blocks.
assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
DEBUG(dbgs() << " phi-join");
- ValNo->addKill(indexes_->getTerminatorGap(mbb));
ValNo->setHasPHIKill(true);
} else {
// Iterate over all of the blocks that the variable is completely
@@ -372,23 +399,32 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
}
LiveRange LR(Start, killIdx, ValNo);
interval.addRange(LR);
- ValNo->addKill(killIdx);
DEBUG(dbgs() << " +" << LR);
}
} else {
+ if (MultipleDefsBySameMI(*mi, MOIdx))
+ // Multiple defs of the same virtual register by the same instruction.
+ // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
+ // This is likely due to elimination of REG_SEQUENCE instructions. Return
+ // here since there is nothing to do.
+ return;
+
// If this is the second time we see a virtual register definition, it
// must be due to phi elimination or two addr elimination. If this is
// the result of two address elimination, then the vreg is one of the
// def-and-use register operand.
- if (mi->isRegTiedToUseOperand(MOIdx)) {
+
+ // It may also be partial redef like this:
+ // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
+ // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
+ bool PartReDef = isPartialRedef(MIIdx, MO, interval);
+ if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
// If this is a two-address definition, then we have already processed
// the live range. The only problem is that we didn't realize there
// are actually two values in the live interval. Because of this we
// need to take the LiveRegion that defines this register and split it
// into two values.
- assert(interval.containsOneValue());
- SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
SlotIndex RedefIndex = MIIdx.getDefIndex();
if (MO.isEarlyClobber())
RedefIndex = MIIdx.getUseIndex();
@@ -396,15 +432,12 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
const LiveRange *OldLR =
interval.getLiveRangeContaining(RedefIndex.getUseIndex());
VNInfo *OldValNo = OldLR->valno;
+ SlotIndex DefIndex = OldValNo->def.getDefIndex();
- // Delete the initial value, which should be short and continuous,
+ // Delete the previous value, which should be short and continuous,
// because the 2-addr copy must be in the same MBB as the redef.
interval.removeRange(DefIndex, RedefIndex);
- // Two-address vregs should always only be redefined once. This means
- // that at this point, there should be exactly one value number in it.
- assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
-
// The new value number (#1) is defined by the instruction we claimed
// defined value #0.
VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
@@ -415,12 +448,15 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// Value#0 is now defined by the 2-addr instruction.
OldValNo->def = RedefIndex;
OldValNo->setCopy(0);
-
+
+ // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
+ if (PartReDef && mi->isCopyLike())
+ OldValNo->setCopy(&*mi);
+
// Add the new live interval which replaces the range for the input copy.
LiveRange LR(DefIndex, RedefIndex, ValNo);
DEBUG(dbgs() << " replace range with " << LR);
interval.addRange(LR);
- ValNo->addKill(RedefIndex);
// If this redefinition is dead, we need to add a dummy unit live
// range covering the def slot.
@@ -432,8 +468,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
dbgs() << " RESULT: ";
interval.print(dbgs(), tri_);
});
- } else {
- assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
+ } else if (lv_->isPHIJoin(interval.reg)) {
// In the case of PHI elimination, each variable definition is only
// live until the end of the block. We've already taken care of the
// rest of the live range.
@@ -444,18 +479,17 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
VNInfo *ValNo;
MachineInstr *CopyMI = NULL;
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
- tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
+ if (mi->isCopyLike())
CopyMI = mi;
ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
-
+
SlotIndex killIndex = getMBBEndIdx(mbb);
LiveRange LR(defIndex, killIndex, ValNo);
interval.addRange(LR);
- ValNo->addKill(indexes_->getTerminatorGap(mbb));
ValNo->setHasPHIKill(true);
DEBUG(dbgs() << " phi-join +" << LR);
+ } else {
+ llvm_unreachable("Multiply defined register");
}
}
@@ -509,7 +543,7 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
end = baseIndex.getDefIndex();
goto exit;
} else {
- int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
+ int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
if (DefIdx != -1) {
if (mi->isRegTiedToUseOperand(DefIdx)) {
// Two-address instruction.
@@ -525,10 +559,10 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
goto exit;
}
}
-
+
baseIndex = baseIndex.getNextIndex();
}
-
+
// The only case we should have a dead physreg here without a killing or
// instruction where we know it's dead is if it is live-in to the function
// and never used. Another possible case is the implicit use of the
@@ -547,7 +581,6 @@ exit:
ValNo->setHasRedefByEC(true);
LiveRange LR(start, end, ValNo);
interval.addRange(LR);
- LR.valno->addKill(end);
DEBUG(dbgs() << " +" << LR << '\n');
}
@@ -561,9 +594,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
getOrCreateInterval(MO.getReg()));
else if (allocatableRegs_[MO.getReg()]) {
MachineInstr *CopyMI = NULL;
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
- tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
+ if (MI->isCopyLike())
CopyMI = MI;
handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
getOrCreateInterval(MO.getReg()), CopyMI);
@@ -571,7 +602,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
// If MI also modifies the sub-register explicitly, avoid processing it
// more than once. Do not pass in TRI here so it checks for exact match.
- if (!MI->modifiesRegister(*AS))
+ if (!MI->definesRegister(*AS))
handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
getOrCreateInterval(*AS), 0);
}
@@ -588,6 +619,16 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
// Look for kills, if it reaches a def before it's killed, then it shouldn't
// be considered a livein.
MachineBasicBlock::iterator mi = MBB->begin();
+ MachineBasicBlock::iterator E = MBB->end();
+ // Skip over DBG_VALUE at the start of the MBB.
+ if (mi != E && mi->isDebugValue()) {
+ while (++mi != E && mi->isDebugValue())
+ ;
+ if (mi == E)
+ // MBB is empty except for DBG_VALUE's.
+ return;
+ }
+
SlotIndex baseIndex = MIIdx;
SlotIndex start = baseIndex;
if (getInstructionFromIndex(baseIndex) == 0)
@@ -596,21 +637,13 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
SlotIndex end = baseIndex;
bool SeenDefUse = false;
- MachineBasicBlock::iterator E = MBB->end();
while (mi != E) {
- if (mi->isDebugValue()) {
- ++mi;
- if (mi != E && !mi->isDebugValue()) {
- baseIndex = indexes_->getNextNonNullIndex(baseIndex);
- }
- continue;
- }
if (mi->killsRegister(interval.reg, tri_)) {
DEBUG(dbgs() << " killed");
end = baseIndex.getDefIndex();
SeenDefUse = true;
break;
- } else if (mi->modifiesRegister(interval.reg, tri_)) {
+ } else if (mi->definesRegister(interval.reg, tri_)) {
// Another instruction redefines the register before it is ever read.
// Then the register is essentially dead at the instruction that defines
// it. Hence its interval is:
@@ -621,10 +654,11 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
break;
}
- ++mi;
- if (mi != E && !mi->isDebugValue()) {
+ while (++mi != E && mi->isDebugValue())
+ // Skip over DBG_VALUE.
+ ;
+ if (mi != E)
baseIndex = indexes_->getNextNonNullIndex(baseIndex);
- }
}
// Live-in register might not be used at all.
@@ -645,7 +679,6 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
LiveRange LR(start, end, vni);
interval.addRange(LR);
- LR.valno->addKill(end);
DEBUG(dbgs() << " +" << LR << '\n');
}
@@ -653,7 +686,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
/// registers. for some ordering of the machine instructions [1,N] a
/// live interval is an interval [i, j) where 1 <= i <= j < N for
/// which a variable is live
-void LiveIntervals::computeIntervals() {
+void LiveIntervals::computeIntervals() {
DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
<< "********** Function: "
<< ((Value*)mf_->getFunction())->getName() << '\n');
@@ -667,10 +700,11 @@ void LiveIntervals::computeIntervals() {
// Track the index of the current machine instr.
SlotIndex MIIndex = getMBBStartIdx(MBB);
- DEBUG(dbgs() << MBB->getName() << ":\n");
+ DEBUG(dbgs() << "BB#" << MBB->getNumber()
+ << ":\t\t# derived from " << MBB->getName() << "\n");
// Create intervals for live-ins to this BB first.
- for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
+ for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
LE = MBB->livein_end(); LI != LE; ++LI) {
handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
// Multiple live-ins can alias the same register.
@@ -679,11 +713,11 @@ void LiveIntervals::computeIntervals() {
handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
true);
}
-
+
// Skip over empty initial indices.
if (getInstructionFromIndex(MIIndex) == 0)
MIIndex = indexes_->getNextNonNullIndex(MIIndex);
-
+
for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
MI != miEnd; ++MI) {
DEBUG(dbgs() << MIIndex << "\t" << *MI);
@@ -702,7 +736,7 @@ void LiveIntervals::computeIntervals() {
else if (MO.isUndef())
UndefUses.push_back(MO.getReg());
}
-
+
// Move to the next instr slot.
MIIndex = indexes_->getNextNonNullIndex(MIIndex);
}
@@ -730,37 +764,6 @@ LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
return NewLI;
}
-/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
-/// copy field and returns the source register that defines it.
-unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
- if (!VNI->getCopy())
- return 0;
-
- if (VNI->getCopy()->isExtractSubreg()) {
- // If it's extracting out of a physical register, return the sub-register.
- unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
- unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
- if (SrcSubReg == DstSubReg)
- // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
- // reg1034 can still be coalesced to EDX.
- return Reg;
- assert(DstSubReg == 0);
- Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
- }
- return Reg;
- } else if (VNI->getCopy()->isInsertSubreg() ||
- VNI->getCopy()->isSubregToReg())
- return VNI->getCopy()->getOperand(2).getReg();
-
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
- return SrcReg;
- llvm_unreachable("Unrecognized copy instruction!");
- return 0;
-}
-
//===----------------------------------------------------------------------===//
// Register allocator hooks.
//
@@ -778,7 +781,7 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
unsigned Reg = MO.getReg();
if (Reg == 0 || Reg == li.reg)
continue;
-
+
if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
!allocatableRegs_[Reg])
continue;
@@ -797,7 +800,7 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
/// which reaches the given instruction also reaches the specified use index.
bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
SlotIndex UseIdx) const {
- SlotIndex Index = getInstructionIndex(MI);
+ SlotIndex Index = getInstructionIndex(MI);
VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
return UI != li.end() && UI->valno == ValNo;
@@ -821,8 +824,9 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
unsigned ImpUse = getReMatImplicitUse(li, MI);
if (ImpUse) {
const LiveInterval &ImpLi = getInterval(ImpUse);
- for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
- re = mri_->use_end(); ri != re; ++ri) {
+ for (MachineRegisterInfo::use_nodbg_iterator
+ ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
+ ri != re; ++ri) {
MachineInstr *UseMI = &*ri;
SlotIndex UseIdx = getInstructionIndex(UseMI);
if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
@@ -901,7 +905,7 @@ static bool FilterFoldedOps(MachineInstr *MI,
}
return false;
}
-
+
/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
/// slot / to reg or any rematerialized load into ith operand of specified
@@ -933,22 +937,22 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
if (DefMI && (MRInfo & VirtRegMap::isMod))
return false;
- MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
- : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
+ MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
+ : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
if (fmi) {
// Remember this instruction uses the spill slot.
if (isSS) vrm.addSpillSlotUse(Slot, fmi);
// Attempt to fold the memory reference into the instruction. If
// we can do this, we don't need to insert spill code.
- MachineBasicBlock &MBB = *MI->getParent();
if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
vrm.transferSpillPts(MI, fmi);
vrm.transferRestorePts(MI, fmi);
vrm.transferEmergencySpills(MI, fmi);
ReplaceMachineInstrInMaps(MI, fmi);
- MI = MBB.insert(MBB.erase(MI), fmi);
+ MI->eraseFromParent();
+ MI = fmi;
++numFolds;
return true;
}
@@ -1021,7 +1025,7 @@ void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
bool LiveIntervals::
rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
- bool TrySplit, SlotIndex index, SlotIndex end,
+ bool TrySplit, SlotIndex index, SlotIndex end,
MachineInstr *MI,
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot,
@@ -1040,7 +1044,6 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
if (!mop.isReg())
continue;
unsigned Reg = mop.getReg();
- unsigned RegI = Reg;
if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
if (Reg != li.reg)
@@ -1054,7 +1057,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
// all of its uses are rematerialized, simply delete it.
if (MI == ReMatOrigDefMI && CanDelete) {
DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
- << MI << '\n');
+ << *MI << '\n');
RemoveMachineInstrFromMaps(MI);
vrm.RemoveMachineInstrFromMaps(MI);
MI->eraseFromParent();
@@ -1081,27 +1084,9 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
// keep the src/dst regs pinned.
//
// Keep track of whether we replace a use and/or def so that we can
- // create the spill interval with the appropriate range.
-
- HasUse = mop.isUse();
- HasDef = mop.isDef();
+ // create the spill interval with the appropriate range.
SmallVector<unsigned, 2> Ops;
- Ops.push_back(i);
- for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
- const MachineOperand &MOj = MI->getOperand(j);
- if (!MOj.isReg())
- continue;
- unsigned RegJ = MOj.getReg();
- if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
- continue;
- if (RegJ == RegI) {
- Ops.push_back(j);
- if (!MOj.isUndef()) {
- HasUse |= MOj.isUse();
- HasDef |= MOj.isDef();
- }
- }
- }
+ tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
// Create a new virtual register for the spill interval.
// Create the new register now so we can map the fold instruction
@@ -1161,7 +1146,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
if (mopj.isImplicit())
rewriteImplicitOps(li, MI, NewVReg, vrm);
}
-
+
if (CreatedNewVReg) {
if (DefIsReMat) {
vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
@@ -1236,16 +1221,7 @@ bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
const VNInfo *VNI,
MachineBasicBlock *MBB,
SlotIndex Idx) const {
- SlotIndex End = getMBBEndIdx(MBB);
- for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
- if (VNI->kills[j].isPHI())
- continue;
-
- SlotIndex KillIdx = VNI->kills[j];
- if (KillIdx > Idx && KillIdx <= End)
- return true;
- }
- return false;
+ return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
}
/// RewriteInfo - Keep track of machine instrs that will be rewritten
@@ -1254,10 +1230,7 @@ namespace {
struct RewriteInfo {
SlotIndex Index;
MachineInstr *MI;
- bool HasUse;
- bool HasDef;
- RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
- : Index(i), MI(mi), HasUse(u), HasDef(d) {}
+ RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
};
struct RewriteInfoCompare {
@@ -1297,12 +1270,30 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
MachineOperand &O = ri.getOperand();
++ri;
if (MI->isDebugValue()) {
- // Remove debug info for now.
- O.setReg(0U);
+ // Modify DBG_VALUE now that the value is in a spill slot.
+ if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
+ uint64_t Offset = MI->getOperand(1).getImm();
+ const MDNode *MDPtr = MI->getOperand(2).getMetadata();
+ DebugLoc DL = MI->getDebugLoc();
+ int FI = isLoadSS ? LdSlot : (int)Slot;
+ if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
+ Offset, MDPtr, DL)) {
+ DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
+ ReplaceMachineInstrInMaps(MI, NewDV);
+ MachineBasicBlock *MBB = MI->getParent();
+ MBB->insert(MBB->erase(MI), NewDV);
+ continue;
+ }
+ }
+
DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
+ RemoveMachineInstrFromMaps(MI);
+ vrm.RemoveMachineInstrFromMaps(MI);
+ MI->eraseFromParent();
continue;
}
- assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
+ assert(!(O.isImplicit() && O.isUse()) &&
+ "Spilling register that's used as implicit use?");
SlotIndex index = getInstructionIndex(MI);
if (index < start || index >= end)
continue;
@@ -1318,7 +1309,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
// easily see a situation where both registers are reloaded before
// the INSERT_SUBREG and both target registers that would overlap.
continue;
- RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
+ RewriteMIs.push_back(RewriteInfo(index, MI));
}
std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
@@ -1328,18 +1319,11 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
RewriteInfo &rwi = RewriteMIs[i];
++i;
SlotIndex index = rwi.Index;
- bool MIHasUse = rwi.HasUse;
- bool MIHasDef = rwi.HasDef;
MachineInstr *MI = rwi.MI;
// If MI def and/or use the same register multiple times, then there
// are multiple entries.
- unsigned NumUses = MIHasUse;
while (i != e && RewriteMIs[i].MI == MI) {
assert(RewriteMIs[i].Index == index);
- bool isUse = RewriteMIs[i].HasUse;
- if (isUse) ++NumUses;
- MIHasUse |= isUse;
- MIHasDef |= RewriteMIs[i].HasDef;
++i;
}
MachineBasicBlock *MBB = MI->getParent();
@@ -1364,7 +1348,8 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
// = use
// It's better to start a new interval to avoid artifically
// extend the new interval.
- if (MIHasDef && !MIHasUse) {
+ if (MI->readsWritesVirtualRegister(li.reg) ==
+ std::make_pair(false,true)) {
MBBVRegsMap.erase(MBB->getNumber());
ThisVReg = 0;
}
@@ -1522,6 +1507,12 @@ LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
MachineOperand &O = ri.getOperand();
MachineInstr *MI = &*ri;
++ri;
+ if (MI->isDebugValue()) {
+ // Remove debug info for now.
+ O.setReg(0U);
+ DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
+ continue;
+ }
if (O.isDef()) {
assert(MI->isImplicitDef() &&
"Register def was not rewritten?");
@@ -1558,7 +1549,7 @@ LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
// overflow a float. This expression behaves like 10^d for small d, but is
// more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
// headroom before overflow.
- float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
+ float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
return (isDef + isUse) * lc;
}
@@ -1570,103 +1561,9 @@ LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
}
std::vector<LiveInterval*> LiveIntervals::
-addIntervalsForSpillsFast(const LiveInterval &li,
- const MachineLoopInfo *loopInfo,
- VirtRegMap &vrm) {
- unsigned slot = vrm.assignVirt2StackSlot(li.reg);
-
- std::vector<LiveInterval*> added;
-
- assert(li.isSpillable() && "attempt to spill already spilled interval!");
-
- DEBUG({
- dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
- li.dump();
- dbgs() << '\n';
- });
-
- const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
-
- MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
- while (RI != mri_->reg_end()) {
- MachineInstr* MI = &*RI;
-
- SmallVector<unsigned, 2> Indices;
- bool HasUse = false;
- bool HasDef = false;
-
- for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
- MachineOperand& mop = MI->getOperand(i);
- if (!mop.isReg() || mop.getReg() != li.reg) continue;
-
- HasUse |= MI->getOperand(i).isUse();
- HasDef |= MI->getOperand(i).isDef();
-
- Indices.push_back(i);
- }
-
- if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
- Indices, true, slot, li.reg)) {
- unsigned NewVReg = mri_->createVirtualRegister(rc);
- vrm.grow();
- vrm.assignVirt2StackSlot(NewVReg, slot);
-
- // create a new register for this spill
- LiveInterval &nI = getOrCreateInterval(NewVReg);
- nI.markNotSpillable();
-
- // Rewrite register operands to use the new vreg.
- for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
- E = Indices.end(); I != E; ++I) {
- MI->getOperand(*I).setReg(NewVReg);
-
- if (MI->getOperand(*I).isUse())
- MI->getOperand(*I).setIsKill(true);
- }
-
- // Fill in the new live interval.
- SlotIndex index = getInstructionIndex(MI);
- if (HasUse) {
- LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
- nI.getNextValue(SlotIndex(), 0, false,
- getVNInfoAllocator()));
- DEBUG(dbgs() << " +" << LR);
- nI.addRange(LR);
- vrm.addRestorePoint(NewVReg, MI);
- }
- if (HasDef) {
- LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
- nI.getNextValue(SlotIndex(), 0, false,
- getVNInfoAllocator()));
- DEBUG(dbgs() << " +" << LR);
- nI.addRange(LR);
- vrm.addSpillPoint(NewVReg, true, MI);
- }
-
- added.push_back(&nI);
-
- DEBUG({
- dbgs() << "\t\t\t\tadded new interval: ";
- nI.dump();
- dbgs() << '\n';
- });
- }
-
-
- RI = mri_->reg_begin(li.reg);
- }
-
- return added;
-}
-
-std::vector<LiveInterval*> LiveIntervals::
addIntervalsForSpills(const LiveInterval &li,
SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
-
- if (EnableFastSpilling)
- return addIntervalsForSpillsFast(li, loopInfo, vrm);
-
assert(li.isSpillable() && "attempt to spill already spilled interval!");
DEBUG({
@@ -1789,7 +1686,7 @@ addIntervalsForSpills(const LiveInterval &li,
if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
Slot = vrm.assignVirt2StackSlot(li.reg);
-
+
// This case only occurs when the prealloc splitter has already assigned
// a stack slot to this vreg.
else
@@ -1846,7 +1743,7 @@ addIntervalsForSpills(const LiveInterval &li,
Ops.push_back(j);
if (MO.isDef())
continue;
- if (isReMat ||
+ if (isReMat ||
(!FoundUse && !alsoFoldARestore(Id, index, VReg,
RestoreMBBs, RestoreIdxes))) {
// MI has two-address uses of the same register. If the use
@@ -1959,7 +1856,6 @@ addIntervalsForSpills(const LiveInterval &li,
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
LiveInterval *LI = NewLIs[i];
if (!LI->empty()) {
- LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
if (!AddedKill.count(LI)) {
LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
SlotIndex LastUseIdx = LR->end.getBaseIndex();
@@ -1992,7 +1888,7 @@ bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
/// getRepresentativeReg - Find the largest super register of the specified
/// physical register.
unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
- // Find the largest super-register that is allocatable.
+ // Find the largest super-register that is allocatable.
unsigned BestReg = Reg;
for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
unsigned SuperReg = *AS;
@@ -2014,6 +1910,8 @@ unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
E = mri_->reg_end(); I != E; ++I) {
MachineOperand &O = I.getOperand();
MachineInstr *MI = O.getParent();
+ if (MI->isDebugValue())
+ continue;
SlotIndex Index = getInstructionIndex(MI);
if (pli.liveAt(Index))
++NumConflicts;
@@ -2054,7 +1952,7 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
E = mri_->reg_end(); I != E; ++I) {
MachineOperand &O = I.getOperand();
MachineInstr *MI = O.getParent();
- if (SeenMIs.count(MI))
+ if (MI->isDebugValue() || SeenMIs.count(MI))
continue;
SeenMIs.insert(MI);
SlotIndex Index = getInstructionIndex(MI);
@@ -2078,7 +1976,7 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
<< "constraints:\n";
MI->print(Msg, tm_);
}
- llvm_report_error(Msg.str());
+ report_fatal_error(Msg.str());
}
for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
if (!hasInterval(*AS))
@@ -2100,12 +1998,11 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
SlotIndex(getInstructionIndex(startInst).getDefIndex()),
startInst, true, getVNInfoAllocator());
VN->setHasPHIKill(true);
- VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
LiveRange LR(
SlotIndex(getInstructionIndex(startInst).getDefIndex()),
getMBBEndIdx(startInst->getParent()), VN);
Interval.addRange(LR);
-
+
return LR;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/LiveStackAnalysis.cpp b/libclamav/c++/llvm/lib/CodeGen/LiveStackAnalysis.cpp
index d2f3775..b5c385f 100644
--- a/libclamav/c++/llvm/lib/CodeGen/LiveStackAnalysis.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/LiveStackAnalysis.cpp
@@ -25,7 +25,8 @@
using namespace llvm;
char LiveStacks::ID = 0;
-static RegisterPass<LiveStacks> X("livestacks", "Live Stack Slot Analysis");
+INITIALIZE_PASS(LiveStacks, "livestacks",
+ "Live Stack Slot Analysis", false, false);
void LiveStacks::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -35,7 +36,7 @@ void LiveStacks::getAnalysisUsage(AnalysisUsage &AU) const {
}
void LiveStacks::releaseMemory() {
- // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
+ // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
VNInfoAllocator.Reset();
S2IMap.clear();
S2RCMap.clear();
diff --git a/libclamav/c++/llvm/lib/CodeGen/LiveVariables.cpp b/libclamav/c++/llvm/lib/CodeGen/LiveVariables.cpp
index 519990e..375307b 100644
--- a/libclamav/c++/llvm/lib/CodeGen/LiveVariables.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/LiveVariables.cpp
@@ -42,7 +42,8 @@
using namespace llvm;
char LiveVariables::ID = 0;
-static RegisterPass<LiveVariables> X("livevars", "Live Variable Analysis");
+INITIALIZE_PASS(LiveVariables, "livevars",
+ "Live Variable Analysis", false, false);
void LiveVariables::getAnalysisUsage(AnalysisUsage &AU) const {
@@ -286,7 +287,7 @@ MachineInstr *LiveVariables::FindLastRefOrPartRef(unsigned Reg) {
MachineInstr *LastDef = PhysRegDef[Reg];
MachineInstr *LastUse = PhysRegUse[Reg];
if (!LastDef && !LastUse)
- return false;
+ return 0;
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
@@ -482,21 +483,6 @@ void LiveVariables::UpdatePhysRegDefs(MachineInstr *MI,
}
}
-namespace {
- struct RegSorter {
- const TargetRegisterInfo *TRI;
-
- RegSorter(const TargetRegisterInfo *tri) : TRI(tri) { }
- bool operator()(unsigned A, unsigned B) {
- if (TRI->isSubRegister(A, B))
- return true;
- else if (TRI->isSubRegister(B, A))
- return false;
- return A < B;
- }
- };
-}
-
bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
MRI = &mf.getRegInfo();
@@ -531,7 +517,7 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
// Mark live-in registers as live-in.
SmallVector<unsigned, 4> Defs;
- for (MachineBasicBlock::const_livein_iterator II = MBB->livein_begin(),
+ for (MachineBasicBlock::livein_iterator II = MBB->livein_begin(),
EE = MBB->livein_end(); II != EE; ++II) {
assert(TargetRegisterInfo::isPhysicalRegister(*II) &&
"Cannot have a live-in virtual register!");
@@ -556,17 +542,21 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
if (MI->isPHI())
NumOperandsToProcess = 1;
+ // Clear kill and dead markers. LV will recompute them.
SmallVector<unsigned, 4> UseRegs;
SmallVector<unsigned, 4> DefRegs;
for (unsigned i = 0; i != NumOperandsToProcess; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
+ MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || MO.getReg() == 0)
continue;
unsigned MOReg = MO.getReg();
- if (MO.isUse())
+ if (MO.isUse()) {
+ MO.setIsKill(false);
UseRegs.push_back(MOReg);
- if (MO.isDef())
+ } else /*MO.isDef()*/ {
+ MO.setIsDead(false);
DefRegs.push_back(MOReg);
+ }
}
// Process all uses.
@@ -605,7 +595,12 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
// Finally, if the last instruction in the block is a return, make sure to
// mark it as using all of the live-out values in the function.
- if (!MBB->empty() && MBB->back().getDesc().isReturn()) {
+ // Things marked both call and return are tail calls; do not do this for
+ // them. The tail callee need not take the same registers as input
+ // that it produces as output, and there are dependencies for its input
+ // registers elsewhere.
+ if (!MBB->empty() && MBB->back().getDesc().isReturn()
+ && !MBB->back().getDesc().isCall()) {
MachineInstr *Ret = &MBB->back();
for (MachineRegisterInfo::liveout_iterator
diff --git a/libclamav/c++/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp b/libclamav/c++/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
new file mode 100644
index 0000000..7e366f0
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/LocalStackSlotAllocation.cpp
@@ -0,0 +1,354 @@
+//===- LocalStackSlotAllocation.cpp - Pre-allocate locals to stack slots --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass assigns local frame indices to stack slots relative to one another
+// and allocates additional base registers to access them when the target
+// estimates the are likely to be out of range of stack pointer and frame
+// pointer relative addressing.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "localstackalloc"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetFrameInfo.h"
+
+using namespace llvm;
+
+STATISTIC(NumAllocations, "Number of frame indices allocated into local block");
+STATISTIC(NumBaseRegisters, "Number of virtual frame base registers allocated");
+STATISTIC(NumReplacements, "Number of frame indices references replaced");
+
+namespace {
+ class FrameRef {
+ MachineBasicBlock::iterator MI; // Instr referencing the frame
+ int64_t LocalOffset; // Local offset of the frame idx referenced
+ public:
+ FrameRef(MachineBasicBlock::iterator I, int64_t Offset) :
+ MI(I), LocalOffset(Offset) {}
+ bool operator<(const FrameRef &RHS) const {
+ return LocalOffset < RHS.LocalOffset;
+ }
+ MachineBasicBlock::iterator getMachineInstr() { return MI; }
+ };
+
+ class LocalStackSlotPass: public MachineFunctionPass {
+ SmallVector<int64_t,16> LocalOffsets;
+
+ void AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx, int64_t &Offset,
+ bool StackGrowsDown, unsigned &MaxAlign);
+ void calculateFrameObjectOffsets(MachineFunction &Fn);
+ bool insertFrameReferenceRegisters(MachineFunction &Fn);
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ explicit LocalStackSlotPass() : MachineFunctionPass(ID) { }
+ bool runOnMachineFunction(MachineFunction &MF);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ const char *getPassName() const {
+ return "Local Stack Slot Allocation";
+ }
+
+ private:
+ };
+} // end anonymous namespace
+
+char LocalStackSlotPass::ID = 0;
+
+FunctionPass *llvm::createLocalStackSlotAllocationPass() {
+ return new LocalStackSlotPass();
+}
+
+bool LocalStackSlotPass::runOnMachineFunction(MachineFunction &MF) {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ unsigned LocalObjectCount = MFI->getObjectIndexEnd();
+
+ // If the target doesn't want/need this pass, or if there are no locals
+ // to consider, early exit.
+ if (!TRI->requiresVirtualBaseRegisters(MF) || LocalObjectCount == 0)
+ return true;
+
+ // Make sure we have enough space to store the local offsets.
+ LocalOffsets.resize(MFI->getObjectIndexEnd());
+
+ // Lay out the local blob.
+ calculateFrameObjectOffsets(MF);
+
+ // Insert virtual base registers to resolve frame index references.
+ bool UsedBaseRegs = insertFrameReferenceRegisters(MF);
+
+ // Tell MFI whether any base registers were allocated. PEI will only
+ // want to use the local block allocations from this pass if there were any.
+ // Otherwise, PEI can do a bit better job of getting the alignment right
+ // without a hole at the start since it knows the alignment of the stack
+ // at the start of local allocation, and this pass doesn't.
+ MFI->setUseLocalStackAllocationBlock(UsedBaseRegs);
+
+ return true;
+}
+
+/// AdjustStackOffset - Helper function used to adjust the stack frame offset.
+void LocalStackSlotPass::AdjustStackOffset(MachineFrameInfo *MFI,
+ int FrameIdx, int64_t &Offset,
+ bool StackGrowsDown,
+ unsigned &MaxAlign) {
+ // If the stack grows down, add the object size to find the lowest address.
+ if (StackGrowsDown)
+ Offset += MFI->getObjectSize(FrameIdx);
+
+ unsigned Align = MFI->getObjectAlignment(FrameIdx);
+
+ // If the alignment of this object is greater than that of the stack, then
+ // increase the stack alignment to match.
+ MaxAlign = std::max(MaxAlign, Align);
+
+ // Adjust to alignment boundary.
+ Offset = (Offset + Align - 1) / Align * Align;
+
+ int64_t LocalOffset = StackGrowsDown ? -Offset : Offset;
+ DEBUG(dbgs() << "Allocate FI(" << FrameIdx << ") to local offset "
+ << LocalOffset << "\n");
+ // Keep the offset available for base register allocation
+ LocalOffsets[FrameIdx] = LocalOffset;
+ // And tell MFI about it for PEI to use later
+ MFI->mapLocalFrameObject(FrameIdx, LocalOffset);
+
+ if (!StackGrowsDown)
+ Offset += MFI->getObjectSize(FrameIdx);
+
+ ++NumAllocations;
+}
+
+/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
+/// abstract stack objects.
+///
+void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
+ // Loop over all of the stack objects, assigning sequential addresses...
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
+ const TargetFrameInfo &TFI = *Fn.getTarget().getFrameInfo();
+ bool StackGrowsDown =
+ TFI.getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown;
+ int64_t Offset = 0;
+ unsigned MaxAlign = 0;
+
+ // Make sure that the stack protector comes before the local variables on the
+ // stack.
+ SmallSet<int, 16> LargeStackObjs;
+ if (MFI->getStackProtectorIndex() >= 0) {
+ AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), Offset,
+ StackGrowsDown, MaxAlign);
+
+ // Assign large stack objects first.
+ for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+ if (MFI->isDeadObjectIndex(i))
+ continue;
+ if (MFI->getStackProtectorIndex() == (int)i)
+ continue;
+ if (!MFI->MayNeedStackProtector(i))
+ continue;
+
+ AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
+ LargeStackObjs.insert(i);
+ }
+ }
+
+ // Then assign frame offsets to stack objects that are not used to spill
+ // callee saved registers.
+ for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+ if (MFI->isDeadObjectIndex(i))
+ continue;
+ if (MFI->getStackProtectorIndex() == (int)i)
+ continue;
+ if (LargeStackObjs.count(i))
+ continue;
+
+ AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
+ }
+
+ // Remember how big this blob of stack space is
+ MFI->setLocalFrameSize(Offset);
+ MFI->setLocalFrameMaxAlign(MaxAlign);
+}
+
+static inline bool
+lookupCandidateBaseReg(const SmallVector<std::pair<unsigned, int64_t>, 8> &Regs,
+ std::pair<unsigned, int64_t> &RegOffset,
+ int64_t FrameSizeAdjust,
+ int64_t LocalFrameOffset,
+ const MachineInstr *MI,
+ const TargetRegisterInfo *TRI) {
+ unsigned e = Regs.size();
+ for (unsigned i = 0; i < e; ++i) {
+ RegOffset = Regs[i];
+ // Check if the relative offset from the where the base register references
+ // to the target address is in range for the instruction.
+ int64_t Offset = FrameSizeAdjust + LocalFrameOffset - RegOffset.second;
+ if (TRI->isFrameOffsetLegal(MI, Offset))
+ return true;
+ }
+ return false;
+}
+
+bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
+ // Scan the function's instructions looking for frame index references.
+ // For each, ask the target if it wants a virtual base register for it
+ // based on what we can tell it about where the local will end up in the
+ // stack frame. If it wants one, re-use a suitable one we've previously
+ // allocated, or if there isn't one that fits the bill, allocate a new one
+ // and ask the target to create a defining instruction for it.
+ bool UsedBaseReg = false;
+
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
+ const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
+ const TargetFrameInfo &TFI = *Fn.getTarget().getFrameInfo();
+ bool StackGrowsDown =
+ TFI.getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown;
+ MachineBasicBlock::iterator InsertionPt = Fn.begin()->begin();
+
+ // Collect all of the instructions in the block that reference
+ // a frame index. Also store the frame index referenced to ease later
+ // lookup. (For any insn that has more than one FI reference, we arbitrarily
+ // choose the first one).
+ SmallVector<FrameRef, 64> FrameReferenceInsns;
+ // A base register definition is a register+offset pair.
+ SmallVector<std::pair<unsigned, int64_t>, 8> BaseRegisters;
+
+
+ for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
+ for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
+ MachineInstr *MI = I;
+ // Debug value instructions can't be out of range, so they don't need
+ // any updates.
+ if (MI->isDebugValue())
+ continue;
+ // For now, allocate the base register(s) within the basic block
+ // where they're used, and don't try to keep them around outside
+ // of that. It may be beneficial to try sharing them more broadly
+ // than that, but the increased register pressure makes that a
+ // tricky thing to balance. Investigate if re-materializing these
+ // becomes an issue.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ // Consider replacing all frame index operands that reference
+ // an object allocated in the local block.
+ if (MI->getOperand(i).isFI()) {
+ // Don't try this with values not in the local block.
+ if (!MFI->isObjectPreAllocated(MI->getOperand(i).getIndex()))
+ break;
+ FrameReferenceInsns.
+ push_back(FrameRef(MI, LocalOffsets[MI->getOperand(i).getIndex()]));
+ break;
+ }
+ }
+ }
+ }
+ // Sort the frame references by local offset
+ array_pod_sort(FrameReferenceInsns.begin(), FrameReferenceInsns.end());
+
+
+ // Loop throught the frame references and allocate for them as necessary
+ for (int ref = 0, e = FrameReferenceInsns.size(); ref < e ; ++ref) {
+ MachineBasicBlock::iterator I =
+ FrameReferenceInsns[ref].getMachineInstr();
+ MachineInstr *MI = I;
+ for (unsigned idx = 0, e = MI->getNumOperands(); idx != e; ++idx) {
+ // Consider replacing all frame index operands that reference
+ // an object allocated in the local block.
+ if (MI->getOperand(idx).isFI()) {
+ int FrameIdx = MI->getOperand(idx).getIndex();
+
+ assert(MFI->isObjectPreAllocated(FrameIdx) &&
+ "Only pre-allocated locals expected!");
+
+ DEBUG(dbgs() << "Considering: " << *MI);
+ if (TRI->needsFrameBaseReg(MI, LocalOffsets[FrameIdx])) {
+ unsigned BaseReg = 0;
+ int64_t Offset = 0;
+ int64_t FrameSizeAdjust =
+ StackGrowsDown ? MFI->getLocalFrameSize() : 0;
+
+ DEBUG(dbgs() << " Replacing FI in: " << *MI);
+
+ // If we have a suitable base register available, use it; otherwise
+ // create a new one. Note that any offset encoded in the
+ // instruction itself will be taken into account by the target,
+ // so we don't have to adjust for it here when reusing a base
+ // register.
+ std::pair<unsigned, int64_t> RegOffset;
+ if (lookupCandidateBaseReg(BaseRegisters, RegOffset,
+ FrameSizeAdjust,
+ LocalOffsets[FrameIdx],
+ MI, TRI)) {
+ DEBUG(dbgs() << " Reusing base register " <<
+ RegOffset.first << "\n");
+ // We found a register to reuse.
+ BaseReg = RegOffset.first;
+ Offset = FrameSizeAdjust + LocalOffsets[FrameIdx] -
+ RegOffset.second;
+ } else {
+ // No previously defined register was in range, so create a
+ // new one.
+ int64_t InstrOffset = TRI->getFrameIndexInstrOffset(MI, idx);
+ const TargetRegisterClass *RC = TRI->getPointerRegClass();
+ BaseReg = Fn.getRegInfo().createVirtualRegister(RC);
+
+ DEBUG(dbgs() << " Materializing base register " << BaseReg <<
+ " at frame local offset " <<
+ LocalOffsets[FrameIdx] + InstrOffset << "\n");
+ // Tell the target to insert the instruction to initialize
+ // the base register.
+ TRI->materializeFrameBaseRegister(InsertionPt, BaseReg,
+ FrameIdx, InstrOffset);
+
+ // The base register already includes any offset specified
+ // by the instruction, so account for that so it doesn't get
+ // applied twice.
+ Offset = -InstrOffset;
+
+ int64_t BaseOffset = FrameSizeAdjust + LocalOffsets[FrameIdx] +
+ InstrOffset;
+ BaseRegisters.push_back(
+ std::pair<unsigned, int64_t>(BaseReg, BaseOffset));
+ ++NumBaseRegisters;
+ UsedBaseReg = true;
+ }
+ assert(BaseReg != 0 && "Unable to allocate virtual base register!");
+
+ // Modify the instruction to use the new base register rather
+ // than the frame index operand.
+ TRI->resolveFrameIndex(I, BaseReg, Offset);
+ DEBUG(dbgs() << "Resolved: " << *MI);
+
+ ++NumReplacements;
+ }
+ }
+ }
+ }
+ return UsedBaseReg;
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/LowerSubregs.cpp b/libclamav/c++/llvm/lib/CodeGen/LowerSubregs.cpp
index b4ef648..ad1c537 100644
--- a/libclamav/c++/llvm/lib/CodeGen/LowerSubregs.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/LowerSubregs.cpp
@@ -36,7 +36,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- LowerSubregsInstructionPass() : MachineFunctionPass(&ID) {}
+ LowerSubregsInstructionPass() : MachineFunctionPass(ID) {}
const char *getPassName() const {
return "Subregister lowering instruction pass";
@@ -53,15 +53,12 @@ namespace {
bool runOnMachineFunction(MachineFunction&);
private:
- bool LowerExtract(MachineInstr *MI);
- bool LowerInsert(MachineInstr *MI);
bool LowerSubregToReg(MachineInstr *MI);
+ bool LowerCopy(MachineInstr *MI);
void TransferDeadFlag(MachineInstr *MI, unsigned DstReg,
const TargetRegisterInfo *TRI);
- void TransferKillFlag(MachineInstr *MI, unsigned SrcReg,
- const TargetRegisterInfo *TRI,
- bool AddIfNotFound = false);
+ void TransferImplicitDefs(MachineInstr *MI);
};
char LowerSubregsInstructionPass::ID = 0;
@@ -83,92 +80,36 @@ LowerSubregsInstructionPass::TransferDeadFlag(MachineInstr *MI,
if (MII->addRegisterDead(DstReg, TRI))
break;
assert(MII != MI->getParent()->begin() &&
- "copyRegToReg output doesn't reference destination register!");
+ "copyPhysReg output doesn't reference destination register!");
}
}
-/// TransferKillFlag - MI is a pseudo-instruction with SrcReg killed,
-/// and the lowered replacement instructions immediately precede it.
-/// Mark the replacement instructions with the kill flag.
+/// TransferImplicitDefs - MI is a pseudo-instruction, and the lowered
+/// replacement instructions immediately precede it. Copy any implicit-def
+/// operands from MI to the replacement instruction.
void
-LowerSubregsInstructionPass::TransferKillFlag(MachineInstr *MI,
- unsigned SrcReg,
- const TargetRegisterInfo *TRI,
- bool AddIfNotFound) {
- for (MachineBasicBlock::iterator MII =
- prior(MachineBasicBlock::iterator(MI)); ; --MII) {
- if (MII->addRegisterKilled(SrcReg, TRI, AddIfNotFound))
- break;
- assert(MII != MI->getParent()->begin() &&
- "copyRegToReg output doesn't reference source register!");
+LowerSubregsInstructionPass::TransferImplicitDefs(MachineInstr *MI) {
+ MachineBasicBlock::iterator CopyMI = MI;
+ --CopyMI;
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isImplicit() || MO.isUse())
+ continue;
+ CopyMI->addOperand(MachineOperand::CreateReg(MO.getReg(), true, true));
}
}
-bool LowerSubregsInstructionPass::LowerExtract(MachineInstr *MI) {
- MachineBasicBlock *MBB = MI->getParent();
-
- assert(MI->getOperand(0).isReg() && MI->getOperand(0).isDef() &&
- MI->getOperand(1).isReg() && MI->getOperand(1).isUse() &&
- MI->getOperand(2).isImm() && "Malformed extract_subreg");
-
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned SuperReg = MI->getOperand(1).getReg();
- unsigned SubIdx = MI->getOperand(2).getImm();
- unsigned SrcReg = TRI->getSubReg(SuperReg, SubIdx);
-
- assert(TargetRegisterInfo::isPhysicalRegister(SuperReg) &&
- "Extract supperg source must be a physical register");
- assert(TargetRegisterInfo::isPhysicalRegister(DstReg) &&
- "Extract destination must be in a physical register");
- assert(SrcReg && "invalid subregister index for register");
-
- DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
-
- if (SrcReg == DstReg) {
- // No need to insert an identity copy instruction.
- if (MI->getOperand(1).isKill()) {
- // We must make sure the super-register gets killed. Replace the
- // instruction with KILL.
- MI->setDesc(TII->get(TargetOpcode::KILL));
- MI->RemoveOperand(2); // SubIdx
- DEBUG(dbgs() << "subreg: replace by: " << *MI);
- return true;
- }
-
- DEBUG(dbgs() << "subreg: eliminated!");
- } else {
- // Insert copy
- const TargetRegisterClass *TRCS = TRI->getPhysicalRegisterRegClass(DstReg);
- const TargetRegisterClass *TRCD = TRI->getPhysicalRegisterRegClass(SrcReg);
- bool Emitted = TII->copyRegToReg(*MBB, MI, DstReg, SrcReg, TRCD, TRCS);
- (void)Emitted;
- assert(Emitted && "Subreg and Dst must be of compatible register class");
- // Transfer the kill/dead flags, if needed.
- if (MI->getOperand(0).isDead())
- TransferDeadFlag(MI, DstReg, TRI);
- if (MI->getOperand(1).isKill())
- TransferKillFlag(MI, SuperReg, TRI, true);
- DEBUG({
- MachineBasicBlock::iterator dMI = MI;
- dbgs() << "subreg: " << *(--dMI);
- });
- }
-
- DEBUG(dbgs() << '\n');
- MBB->erase(MI);
- return true;
-}
-
bool LowerSubregsInstructionPass::LowerSubregToReg(MachineInstr *MI) {
MachineBasicBlock *MBB = MI->getParent();
assert((MI->getOperand(0).isReg() && MI->getOperand(0).isDef()) &&
MI->getOperand(1).isImm() &&
(MI->getOperand(2).isReg() && MI->getOperand(2).isUse()) &&
MI->getOperand(3).isImm() && "Invalid subreg_to_reg");
-
+
unsigned DstReg = MI->getOperand(0).getReg();
unsigned InsReg = MI->getOperand(2).getReg();
- unsigned InsSIdx = MI->getOperand(2).getSubReg();
+ assert(!MI->getOperand(2).getSubReg() && "SubIdx on physreg?");
unsigned SubIdx = MI->getOperand(3).getImm();
assert(SubIdx != 0 && "Invalid index for insert_subreg");
@@ -181,26 +122,25 @@ bool LowerSubregsInstructionPass::LowerSubregToReg(MachineInstr *MI) {
DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
- if (DstSubReg == InsReg && InsSIdx == 0) {
+ if (DstSubReg == InsReg) {
// No need to insert an identify copy instruction.
// Watch out for case like this:
- // %RAX<def> = ...
- // %RAX<def> = SUBREG_TO_REG 0, %EAX:3<kill>, 3
- // The first def is defining RAX, not EAX so the top bits were not
- // zero extended.
+ // %RAX<def> = SUBREG_TO_REG 0, %EAX<kill>, 3
+ // We must leave %RAX live.
+ if (DstReg != InsReg) {
+ MI->setDesc(TII->get(TargetOpcode::KILL));
+ MI->RemoveOperand(3); // SubIdx
+ MI->RemoveOperand(1); // Imm
+ DEBUG(dbgs() << "subreg: replace by: " << *MI);
+ return true;
+ }
DEBUG(dbgs() << "subreg: eliminated!");
} else {
- // Insert sub-register copy
- const TargetRegisterClass *TRC0= TRI->getPhysicalRegisterRegClass(DstSubReg);
- const TargetRegisterClass *TRC1= TRI->getPhysicalRegisterRegClass(InsReg);
- bool Emitted = TII->copyRegToReg(*MBB, MI, DstSubReg, InsReg, TRC0, TRC1);
- (void)Emitted;
- assert(Emitted && "Subreg and Dst must be of compatible register class");
+ TII->copyPhysReg(*MBB, MI, MI->getDebugLoc(), DstSubReg, InsReg,
+ MI->getOperand(2).isKill());
// Transfer the kill/dead flags, if needed.
if (MI->getOperand(0).isDead())
TransferDeadFlag(MI, DstSubReg, TRI);
- if (MI->getOperand(2).isKill())
- TransferKillFlag(MI, InsReg, TRI);
DEBUG({
MachineBasicBlock::iterator dMI = MI;
dbgs() << "subreg: " << *(--dMI);
@@ -212,86 +152,39 @@ bool LowerSubregsInstructionPass::LowerSubregToReg(MachineInstr *MI) {
return true;
}
-bool LowerSubregsInstructionPass::LowerInsert(MachineInstr *MI) {
- MachineBasicBlock *MBB = MI->getParent();
- assert((MI->getOperand(0).isReg() && MI->getOperand(0).isDef()) &&
- (MI->getOperand(1).isReg() && MI->getOperand(1).isUse()) &&
- (MI->getOperand(2).isReg() && MI->getOperand(2).isUse()) &&
- MI->getOperand(3).isImm() && "Invalid insert_subreg");
-
- unsigned DstReg = MI->getOperand(0).getReg();
-#ifndef NDEBUG
- unsigned SrcReg = MI->getOperand(1).getReg();
-#endif
- unsigned InsReg = MI->getOperand(2).getReg();
- unsigned SubIdx = MI->getOperand(3).getImm();
-
- assert(DstReg == SrcReg && "insert_subreg not a two-address instruction?");
- assert(SubIdx != 0 && "Invalid index for insert_subreg");
- unsigned DstSubReg = TRI->getSubReg(DstReg, SubIdx);
- assert(DstSubReg && "invalid subregister index for register");
- assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- "Insert superreg source must be in a physical register");
- assert(TargetRegisterInfo::isPhysicalRegister(InsReg) &&
- "Inserted value must be in a physical register");
+bool LowerSubregsInstructionPass::LowerCopy(MachineInstr *MI) {
+ MachineOperand &DstMO = MI->getOperand(0);
+ MachineOperand &SrcMO = MI->getOperand(1);
- DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
-
- if (DstSubReg == InsReg) {
- // No need to insert an identity copy instruction. If the SrcReg was
- // <undef>, we need to make sure it is alive by inserting a KILL
- if (MI->getOperand(1).isUndef() && !MI->getOperand(0).isDead()) {
- MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI->getDebugLoc(),
- TII->get(TargetOpcode::KILL), DstReg);
- if (MI->getOperand(2).isUndef())
- MIB.addReg(InsReg, RegState::Undef);
- else
- MIB.addReg(InsReg, RegState::Kill);
- } else {
- DEBUG(dbgs() << "subreg: eliminated!\n");
- MBB->erase(MI);
+ if (SrcMO.getReg() == DstMO.getReg()) {
+ DEBUG(dbgs() << "identity copy: " << *MI);
+ // No need to insert an identity copy instruction, but replace with a KILL
+ // if liveness is changed.
+ if (DstMO.isDead() || SrcMO.isUndef() || MI->getNumOperands() > 2) {
+ // We must make sure the super-register gets killed. Replace the
+ // instruction with KILL.
+ MI->setDesc(TII->get(TargetOpcode::KILL));
+ DEBUG(dbgs() << "replaced by: " << *MI);
return true;
}
- } else {
- // Insert sub-register copy
- const TargetRegisterClass *TRC0= TRI->getPhysicalRegisterRegClass(DstSubReg);
- const TargetRegisterClass *TRC1= TRI->getPhysicalRegisterRegClass(InsReg);
- if (MI->getOperand(2).isUndef())
- // If the source register being inserted is undef, then this becomes a
- // KILL.
- BuildMI(*MBB, MI, MI->getDebugLoc(),
- TII->get(TargetOpcode::KILL), DstSubReg);
- else {
- bool Emitted = TII->copyRegToReg(*MBB, MI, DstSubReg, InsReg, TRC0, TRC1);
- (void)Emitted;
- assert(Emitted && "Subreg and Dst must be of compatible register class");
- }
- MachineBasicBlock::iterator CopyMI = MI;
- --CopyMI;
-
- // INSERT_SUBREG is a two-address instruction so it implicitly kills SrcReg.
- if (!MI->getOperand(1).isUndef())
- CopyMI->addOperand(MachineOperand::CreateReg(DstReg, false, true, true));
-
- // Transfer the kill/dead flags, if needed.
- if (MI->getOperand(0).isDead()) {
- TransferDeadFlag(MI, DstSubReg, TRI);
- } else {
- // Make sure the full DstReg is live after this replacement.
- CopyMI->addOperand(MachineOperand::CreateReg(DstReg, true, true));
- }
-
- // Make sure the inserted register gets killed
- if (MI->getOperand(2).isKill() && !MI->getOperand(2).isUndef())
- TransferKillFlag(MI, InsReg, TRI);
+ // Vanilla identity copy.
+ MI->eraseFromParent();
+ return true;
}
- DEBUG({
- MachineBasicBlock::iterator dMI = MI;
- dbgs() << "subreg: " << *(--dMI) << "\n";
- });
+ DEBUG(dbgs() << "real copy: " << *MI);
+ TII->copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(),
+ DstMO.getReg(), SrcMO.getReg(), SrcMO.isKill());
- MBB->erase(MI);
+ if (DstMO.isDead())
+ TransferDeadFlag(MI, DstMO.getReg(), TRI);
+ if (MI->getNumOperands() > 2)
+ TransferImplicitDefs(MI);
+ DEBUG({
+ MachineBasicBlock::iterator dMI = MI;
+ dbgs() << "replaced by: " << *(--dMI);
+ });
+ MI->eraseFromParent();
return true;
}
@@ -314,12 +207,13 @@ bool LowerSubregsInstructionPass::runOnMachineFunction(MachineFunction &MF) {
mi != me;) {
MachineBasicBlock::iterator nmi = llvm::next(mi);
MachineInstr *MI = mi;
- if (MI->isExtractSubreg()) {
- MadeChange |= LowerExtract(MI);
- } else if (MI->isInsertSubreg()) {
- MadeChange |= LowerInsert(MI);
- } else if (MI->isSubregToReg()) {
+ assert(!MI->isInsertSubreg() && "INSERT_SUBREG should no longer appear");
+ assert(MI->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
+ "EXTRACT_SUBREG should no longer appear");
+ if (MI->isSubregToReg()) {
MadeChange |= LowerSubregToReg(MI);
+ } else if (MI->isCopy()) {
+ MadeChange |= LowerCopy(MI);
}
mi = nmi;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineBasicBlock.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineBasicBlock.cpp
index 64134ce..50f3f67 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -13,7 +13,10 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/BasicBlock.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -23,6 +26,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/raw_ostream.h"
@@ -41,13 +45,13 @@ MachineBasicBlock::~MachineBasicBlock() {
/// getSymbol - Return the MCSymbol for this basic block.
///
-MCSymbol *MachineBasicBlock::getSymbol(MCContext &Ctx) const {
- SmallString<60> Name;
+MCSymbol *MachineBasicBlock::getSymbol() const {
const MachineFunction *MF = getParent();
- raw_svector_ostream(Name)
- << MF->getTarget().getMCAsmInfo()->getPrivateGlobalPrefix() << "BB"
- << MF->getFunctionNumber() << '_' << getNumber();
- return Ctx.GetOrCreateSymbol(Name.str());
+ MCContext &Ctx = MF->getContext();
+ const char *Prefix = Ctx.getAsmInfo().getPrivateGlobalPrefix();
+ return Ctx.GetOrCreateSymbol(Twine(Prefix) + "BB" +
+ Twine(MF->getFunctionNumber()) + "_" +
+ Twine(getNumber()));
}
@@ -135,6 +139,13 @@ void ilist_traits<MachineInstr>::deleteNode(MachineInstr* MI) {
Parent->getParent()->DeleteMachineInstr(MI);
}
+MachineBasicBlock::iterator MachineBasicBlock::getFirstNonPHI() {
+ iterator I = begin();
+ while (I != end() && I->isPHI())
+ ++I;
+ return I;
+}
+
MachineBasicBlock::iterator MachineBasicBlock::getFirstTerminator() {
iterator I = end();
while (I != begin() && (--I)->getDesc().isTerminator())
@@ -190,7 +201,7 @@ void MachineBasicBlock::print(raw_ostream &OS) const {
const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
if (!livein_empty()) {
OS << " Live Ins:";
- for (const_livein_iterator I = livein_begin(),E = livein_end(); I != E; ++I)
+ for (livein_iterator I = livein_begin(),E = livein_end(); I != E; ++I)
OutputReg(OS, *I, TRI);
OS << '\n';
}
@@ -217,13 +228,14 @@ void MachineBasicBlock::print(raw_ostream &OS) const {
}
void MachineBasicBlock::removeLiveIn(unsigned Reg) {
- livein_iterator I = std::find(livein_begin(), livein_end(), Reg);
- assert(I != livein_end() && "Not a live in!");
+ std::vector<unsigned>::iterator I =
+ std::find(LiveIns.begin(), LiveIns.end(), Reg);
+ assert(I != LiveIns.end() && "Not a live in!");
LiveIns.erase(I);
}
bool MachineBasicBlock::isLiveIn(unsigned Reg) const {
- const_livein_iterator I = std::find(livein_begin(), livein_end(), Reg);
+ livein_iterator I = std::find(livein_begin(), livein_end(), Reg);
return I != livein_end();
}
@@ -243,6 +255,7 @@ void MachineBasicBlock::updateTerminator() {
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
+ DebugLoc dl; // FIXME: this is nowhere
bool B = TII->AnalyzeBranch(*this, TBB, FBB, Cond);
(void) B;
assert(!B && "UpdateTerminators requires analyzable predecessors!");
@@ -257,7 +270,7 @@ void MachineBasicBlock::updateTerminator() {
// its layout successor, insert a branch.
TBB = *succ_begin();
if (!isLayoutSuccessor(TBB))
- TII->InsertBranch(*this, TBB, 0, Cond);
+ TII->InsertBranch(*this, TBB, 0, Cond, dl);
}
} else {
if (FBB) {
@@ -268,10 +281,10 @@ void MachineBasicBlock::updateTerminator() {
if (TII->ReverseBranchCondition(Cond))
return;
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, FBB, 0, Cond);
+ TII->InsertBranch(*this, FBB, 0, Cond, dl);
} else if (isLayoutSuccessor(FBB)) {
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, TBB, 0, Cond);
+ TII->InsertBranch(*this, TBB, 0, Cond, dl);
}
} else {
// The block has a fallthrough conditional branch.
@@ -282,14 +295,14 @@ void MachineBasicBlock::updateTerminator() {
if (TII->ReverseBranchCondition(Cond)) {
// We can't reverse the condition, add an unconditional branch.
Cond.clear();
- TII->InsertBranch(*this, MBBA, 0, Cond);
+ TII->InsertBranch(*this, MBBA, 0, Cond, dl);
return;
}
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, MBBA, 0, Cond);
+ TII->InsertBranch(*this, MBBA, 0, Cond, dl);
} else if (!isLayoutSuccessor(MBBA)) {
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, TBB, MBBA, Cond);
+ TII->InsertBranch(*this, TBB, MBBA, Cond, dl);
}
}
}
@@ -329,12 +342,32 @@ void MachineBasicBlock::transferSuccessors(MachineBasicBlock *fromMBB) {
if (this == fromMBB)
return;
- for (MachineBasicBlock::succ_iterator I = fromMBB->succ_begin(),
- E = fromMBB->succ_end(); I != E; ++I)
- addSuccessor(*I);
+ while (!fromMBB->succ_empty()) {
+ MachineBasicBlock *Succ = *fromMBB->succ_begin();
+ addSuccessor(Succ);
+ fromMBB->removeSuccessor(Succ);
+ }
+}
+
+void
+MachineBasicBlock::transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB) {
+ if (this == fromMBB)
+ return;
- while (!fromMBB->succ_empty())
- fromMBB->removeSuccessor(fromMBB->succ_begin());
+ while (!fromMBB->succ_empty()) {
+ MachineBasicBlock *Succ = *fromMBB->succ_begin();
+ addSuccessor(Succ);
+ fromMBB->removeSuccessor(Succ);
+
+ // Fix up any PHI nodes in the successor.
+ for (MachineBasicBlock::iterator MI = Succ->begin(), ME = Succ->end();
+ MI != ME && MI->isPHI(); ++MI)
+ for (unsigned i = 2, e = MI->getNumOperands()+1; i != e; i += 2) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.getMBB() == fromMBB)
+ MO.setMBB(this);
+ }
+ }
}
bool MachineBasicBlock::isSuccessor(const MachineBasicBlock *MBB) const {
@@ -393,6 +426,104 @@ bool MachineBasicBlock::canFallThrough() {
return FBB == 0;
}
+MachineBasicBlock *
+MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
+ MachineFunction *MF = getParent();
+ DebugLoc dl; // FIXME: this is nowhere
+
+ // We may need to update this's terminator, but we can't do that if AnalyzeBranch
+ // fails. If this uses a jump table, we won't touch it.
+ const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ MachineBasicBlock *TBB = 0, *FBB = 0;
+ SmallVector<MachineOperand, 4> Cond;
+ if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
+ return NULL;
+
+ MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
+ MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
+ DEBUG(dbgs() << "Splitting critical edge:"
+ " BB#" << getNumber()
+ << " -- BB#" << NMBB->getNumber()
+ << " -- BB#" << Succ->getNumber() << '\n');
+
+ ReplaceUsesOfBlockWith(Succ, NMBB);
+ updateTerminator();
+
+ // Insert unconditional "jump Succ" instruction in NMBB if necessary.
+ NMBB->addSuccessor(Succ);
+ if (!NMBB->isLayoutSuccessor(Succ)) {
+ Cond.clear();
+ MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, NULL, Cond, dl);
+ }
+
+ // Fix PHI nodes in Succ so they refer to NMBB instead of this
+ for (MachineBasicBlock::iterator i = Succ->begin(), e = Succ->end();
+ i != e && i->isPHI(); ++i)
+ for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
+ if (i->getOperand(ni+1).getMBB() == this)
+ i->getOperand(ni+1).setMBB(NMBB);
+
+ if (LiveVariables *LV =
+ P->getAnalysisIfAvailable<LiveVariables>())
+ LV->addNewBlock(NMBB, this, Succ);
+
+ if (MachineDominatorTree *MDT =
+ P->getAnalysisIfAvailable<MachineDominatorTree>()) {
+ // Update dominator information.
+ MachineDomTreeNode *SucccDTNode = MDT->getNode(Succ);
+
+ bool IsNewIDom = true;
+ for (const_pred_iterator PI = Succ->pred_begin(), E = Succ->pred_end();
+ PI != E; ++PI) {
+ MachineBasicBlock *PredBB = *PI;
+ if (PredBB == NMBB)
+ continue;
+ if (!MDT->dominates(SucccDTNode, MDT->getNode(PredBB))) {
+ IsNewIDom = false;
+ break;
+ }
+ }
+
+ // We know "this" dominates the newly created basic block.
+ MachineDomTreeNode *NewDTNode = MDT->addNewBlock(NMBB, this);
+
+ // If all the other predecessors of "Succ" are dominated by "Succ" itself
+ // then the new block is the new immediate dominator of "Succ". Otherwise,
+ // the new block doesn't dominate anything.
+ if (IsNewIDom)
+ MDT->changeImmediateDominator(SucccDTNode, NewDTNode);
+ }
+
+ if (MachineLoopInfo *MLI = P->getAnalysisIfAvailable<MachineLoopInfo>())
+ if (MachineLoop *TIL = MLI->getLoopFor(this)) {
+ // If one or the other blocks were not in a loop, the new block is not
+ // either, and thus LI doesn't need to be updated.
+ if (MachineLoop *DestLoop = MLI->getLoopFor(Succ)) {
+ if (TIL == DestLoop) {
+ // Both in the same loop, the NMBB joins loop.
+ DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
+ } else if (TIL->contains(DestLoop)) {
+ // Edge from an outer loop to an inner loop. Add to the outer loop.
+ TIL->addBasicBlockToLoop(NMBB, MLI->getBase());
+ } else if (DestLoop->contains(TIL)) {
+ // Edge from an inner loop to an outer loop. Add to the outer loop.
+ DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
+ } else {
+ // Edge from two loops with no containment relation. Because these
+ // are natural loops, we know that the destination block must be the
+ // header of its loop (adding a branch into a loop elsewhere would
+ // create an irreducible loop).
+ assert(DestLoop->getHeader() == Succ &&
+ "Should not create irreducible loops!");
+ if (MachineLoop *P = DestLoop->getParentLoop())
+ P->addBasicBlockToLoop(NMBB, MLI->getBase());
+ }
+ }
+ }
+
+ return NMBB;
+}
+
/// removeFromParent - This method unlinks 'this' from the containing function,
/// and returns it, but does not delete it.
MachineBasicBlock *MachineBasicBlock::removeFromParent() {
@@ -459,54 +590,41 @@ bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA,
// conditional branch followed by an unconditional branch. DestA is the
// 'true' destination and DestB is the 'false' destination.
- bool MadeChange = false;
- bool AddedFallThrough = false;
+ bool Changed = false;
MachineFunction::iterator FallThru =
llvm::next(MachineFunction::iterator(this));
-
- if (isCond) {
- // If this block ends with a conditional branch that falls through to its
- // successor, set DestB as the successor.
- if (DestB == 0 && FallThru != getParent()->end()) {
+
+ if (DestA == 0 && DestB == 0) {
+ // Block falls through to successor.
+ DestA = FallThru;
+ DestB = FallThru;
+ } else if (DestA != 0 && DestB == 0) {
+ if (isCond)
+ // Block ends in conditional jump that falls through to successor.
DestB = FallThru;
- AddedFallThrough = true;
- }
} else {
- // If this is an unconditional branch with no explicit dest, it must just be
- // a fallthrough into DestA.
- if (DestA == 0 && FallThru != getParent()->end()) {
- DestA = FallThru;
- AddedFallThrough = true;
- }
+ assert(DestA && DestB && isCond &&
+ "CFG in a bad state. Cannot correct CFG edges");
}
-
+
+ // Remove superfluous edges. I.e., those which aren't destinations of this
+ // basic block, duplicate edges, or landing pads.
+ SmallPtrSet<const MachineBasicBlock*, 8> SeenMBBs;
MachineBasicBlock::succ_iterator SI = succ_begin();
- MachineBasicBlock *OrigDestA = DestA, *OrigDestB = DestB;
while (SI != succ_end()) {
const MachineBasicBlock *MBB = *SI;
- if (MBB == DestA) {
- DestA = 0;
- ++SI;
- } else if (MBB == DestB) {
- DestB = 0;
- ++SI;
- } else if (MBB->isLandingPad() &&
- MBB != OrigDestA && MBB != OrigDestB) {
- ++SI;
- } else {
- // Otherwise, this is a superfluous edge, remove it.
+ if (!SeenMBBs.insert(MBB) ||
+ (MBB != DestA && MBB != DestB && !MBB->isLandingPad())) {
+ // This is a superfluous edge, remove it.
SI = removeSuccessor(SI);
- MadeChange = true;
+ Changed = true;
+ } else {
+ ++SI;
}
}
- if (!AddedFallThrough)
- assert(DestA == 0 && DestB == 0 && "MachineCFG is missing edges!");
- else if (isCond)
- assert(DestA == 0 && "MachineCFG is missing edges!");
-
- return MadeChange;
+ return Changed;
}
/// findDebugLoc - find the next valid DebugLoc starting at MBBI, skipping
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineCSE.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineCSE.cpp
index b376e3d..272b54d 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineCSE.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineCSE.cpp
@@ -20,25 +20,28 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ScopedHashTable.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
STATISTIC(NumCoalesces, "Number of copies coalesced");
STATISTIC(NumCSEs, "Number of common subexpression eliminated");
+STATISTIC(NumPhysCSEs, "Number of phyreg defining common subexpr eliminated");
namespace {
class MachineCSE : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
- MachineRegisterInfo *MRI;
- MachineDominatorTree *DT;
AliasAnalysis *AA;
+ MachineDominatorTree *DT;
+ MachineRegisterInfo *MRI;
public:
static char ID; // Pass identification
- MachineCSE() : MachineFunctionPass(&ID), CurrVN(0) {}
+ MachineCSE() : MachineFunctionPass(ID), LookAheadLimit(5), CurrVN(0) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -46,28 +49,50 @@ namespace {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
+ AU.addPreservedID(MachineLoopInfoID);
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
}
+ virtual void releaseMemory() {
+ ScopeMap.clear();
+ Exps.clear();
+ }
+
private:
- unsigned CurrVN;
+ const unsigned LookAheadLimit;
+ typedef ScopedHashTableScope<MachineInstr*, unsigned,
+ MachineInstrExpressionTrait> ScopeType;
+ DenseMap<MachineBasicBlock*, ScopeType*> ScopeMap;
ScopedHashTable<MachineInstr*, unsigned, MachineInstrExpressionTrait> VNT;
SmallVector<MachineInstr*, 64> Exps;
+ unsigned CurrVN;
bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB);
bool isPhysDefTriviallyDead(unsigned Reg,
MachineBasicBlock::const_iterator I,
- MachineBasicBlock::const_iterator E);
- bool hasLivePhysRegDefUse(MachineInstr *MI, MachineBasicBlock *MBB);
+ MachineBasicBlock::const_iterator E) const ;
+ bool hasLivePhysRegDefUse(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ unsigned &PhysDef) const;
+ bool PhysRegDefReaches(MachineInstr *CSMI, MachineInstr *MI,
+ unsigned PhysDef) const;
bool isCSECandidate(MachineInstr *MI);
- bool ProcessBlock(MachineDomTreeNode *Node);
+ bool isProfitableToCSE(unsigned CSReg, unsigned Reg,
+ MachineInstr *CSMI, MachineInstr *MI);
+ void EnterScope(MachineBasicBlock *MBB);
+ void ExitScope(MachineBasicBlock *MBB);
+ bool ProcessBlock(MachineBasicBlock *MBB);
+ void ExitScopeIfDone(MachineDomTreeNode *Node,
+ DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
+ DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap);
+ bool PerformCSE(MachineDomTreeNode *Node);
};
} // end anonymous namespace
char MachineCSE::ID = 0;
-static RegisterPass<MachineCSE>
-X("machine-cse", "Machine Common Subexpression Elimination");
+INITIALIZE_PASS(MachineCSE, "machine-cse",
+ "Machine Common Subexpression Elimination", false, false);
FunctionPass *llvm::createMachineCSEPass() { return new MachineCSE(); }
@@ -81,38 +106,53 @@ bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI,
unsigned Reg = MO.getReg();
if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
- if (!MRI->hasOneUse(Reg))
+ if (!MRI->hasOneNonDBGUse(Reg))
// Only coalesce single use copies. This ensure the copy will be
// deleted.
continue;
MachineInstr *DefMI = MRI->getVRegDef(Reg);
if (DefMI->getParent() != MBB)
continue;
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (TII->isMoveInstr(*DefMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
- TargetRegisterInfo::isVirtualRegister(SrcReg) &&
- !SrcSubIdx && !DstSubIdx) {
- MO.setReg(SrcReg);
- DefMI->eraseFromParent();
- ++NumCoalesces;
- Changed = true;
- }
+ if (!DefMI->isCopy())
+ continue;
+ unsigned SrcReg = DefMI->getOperand(1).getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
+ continue;
+ if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg())
+ continue;
+ const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+ const TargetRegisterClass *NewRC = getCommonSubClass(RC, SRC);
+ if (!NewRC)
+ continue;
+ DEBUG(dbgs() << "Coalescing: " << *DefMI);
+ DEBUG(dbgs() << "*** to: " << *MI);
+ MO.setReg(SrcReg);
+ MRI->clearKillFlags(SrcReg);
+ if (NewRC != SRC)
+ MRI->setRegClass(SrcReg, NewRC);
+ DefMI->eraseFromParent();
+ ++NumCoalesces;
+ Changed = true;
}
return Changed;
}
-bool MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
- MachineBasicBlock::const_iterator I,
- MachineBasicBlock::const_iterator E) {
- unsigned LookAheadLeft = 5;
- while (LookAheadLeft--) {
+bool
+MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
+ MachineBasicBlock::const_iterator I,
+ MachineBasicBlock::const_iterator E) const {
+ unsigned LookAheadLeft = LookAheadLimit;
+ while (LookAheadLeft) {
+ // Skip over dbg_value's.
+ while (I != E && I->isDebugValue())
+ ++I;
+
if (I == E)
// Reached end of block, register is obviously dead.
return true;
- if (I->isDebugValue())
- continue;
bool SeenDef = false;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = I->getOperand(i);
@@ -121,6 +161,7 @@ bool MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
if (!TRI->regsOverlap(MO.getReg(), Reg))
continue;
if (MO.isUse())
+ // Found a use!
return false;
SeenDef = true;
}
@@ -128,51 +169,91 @@ bool MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
// See a def of Reg (or an alias) before encountering any use, it's
// trivially dead.
return true;
+
+ --LookAheadLeft;
++I;
}
return false;
}
-bool MachineCSE::hasLivePhysRegDefUse(MachineInstr *MI, MachineBasicBlock *MBB){
- unsigned PhysDef = 0;
+/// hasLivePhysRegDefUse - Return true if the specified instruction read / write
+/// physical registers (except for dead defs of physical registers). It also
+/// returns the physical register def by reference if it's the only one and the
+/// instruction does not uses a physical register.
+bool MachineCSE::hasLivePhysRegDefUse(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ unsigned &PhysDef) const {
+ PhysDef = 0;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
+ const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- if (MO.isUse())
- // Can't touch anything to read a physical register.
- return true;
- if (MO.isDead())
- // If the def is dead, it's ok.
- continue;
- // Ok, this is a physical register def that's not marked "dead". That's
- // common since this pass is run before livevariables. We can scan
- // forward a few instructions and check if it is obviously dead.
- if (PhysDef)
- // Multiple physical register defs. These are rare, forget about it.
- return true;
- PhysDef = Reg;
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
+ if (MO.isUse()) {
+ // Can't touch anything to read a physical register.
+ PhysDef = 0;
+ return true;
}
+ if (MO.isDead())
+ // If the def is dead, it's ok.
+ continue;
+ // Ok, this is a physical register def that's not marked "dead". That's
+ // common since this pass is run before livevariables. We can scan
+ // forward a few instructions and check if it is obviously dead.
+ if (PhysDef) {
+ // Multiple physical register defs. These are rare, forget about it.
+ PhysDef = 0;
+ return true;
+ }
+ PhysDef = Reg;
}
if (PhysDef) {
- MachineBasicBlock::iterator I = MI; I = llvm::next(I);
+ MachineBasicBlock::const_iterator I = MI; I = llvm::next(I);
if (!isPhysDefTriviallyDead(PhysDef, I, MBB->end()))
return true;
}
return false;
}
+bool MachineCSE::PhysRegDefReaches(MachineInstr *CSMI, MachineInstr *MI,
+ unsigned PhysDef) const {
+ // For now conservatively returns false if the common subexpression is
+ // not in the same basic block as the given instruction.
+ MachineBasicBlock *MBB = MI->getParent();
+ if (CSMI->getParent() != MBB)
+ return false;
+ MachineBasicBlock::const_iterator I = CSMI; I = llvm::next(I);
+ MachineBasicBlock::const_iterator E = MI;
+ unsigned LookAheadLeft = LookAheadLimit;
+ while (LookAheadLeft) {
+ // Skip over dbg_value's.
+ while (I != E && I->isDebugValue())
+ ++I;
+
+ if (I == E)
+ return true;
+ if (I->modifiesRegister(PhysDef, TRI))
+ return false;
+
+ --LookAheadLeft;
+ ++I;
+ }
+
+ return false;
+}
+
bool MachineCSE::isCSECandidate(MachineInstr *MI) {
- // Ignore copies or instructions that read / write physical registers
- // (except for dead defs of physical registers).
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) ||
- MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg())
+ if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
+ MI->isKill() || MI->isInlineAsm() || MI->isDebugValue())
+ return false;
+
+ // Ignore copies.
+ if (MI->isCopyLike())
return false;
// Ignore stuff that we obviously can't move.
@@ -194,12 +275,83 @@ bool MachineCSE::isCSECandidate(MachineInstr *MI) {
return true;
}
-bool MachineCSE::ProcessBlock(MachineDomTreeNode *Node) {
+/// isProfitableToCSE - Return true if it's profitable to eliminate MI with a
+/// common expression that defines Reg.
+bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg,
+ MachineInstr *CSMI, MachineInstr *MI) {
+ // FIXME: Heuristics that works around the lack the live range splitting.
+
+ // Heuristics #1: Don't cse "cheap" computating if the def is not local or in an
+ // immediate predecessor. We don't want to increase register pressure and end up
+ // causing other computation to be spilled.
+ if (MI->getDesc().isAsCheapAsAMove()) {
+ MachineBasicBlock *CSBB = CSMI->getParent();
+ MachineBasicBlock *BB = MI->getParent();
+ if (CSBB != BB &&
+ find(CSBB->succ_begin(), CSBB->succ_end(), BB) == CSBB->succ_end())
+ return false;
+ }
+
+ // Heuristics #2: If the expression doesn't not use a vr and the only use
+ // of the redundant computation are copies, do not cse.
+ bool HasVRegUse = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isUse() && MO.getReg() &&
+ TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+ HasVRegUse = true;
+ break;
+ }
+ }
+ if (!HasVRegUse) {
+ bool HasNonCopyUse = false;
+ for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
+ E = MRI->use_nodbg_end(); I != E; ++I) {
+ MachineInstr *Use = &*I;
+ // Ignore copies.
+ if (!Use->isCopyLike()) {
+ HasNonCopyUse = true;
+ break;
+ }
+ }
+ if (!HasNonCopyUse)
+ return false;
+ }
+
+ // Heuristics #3: If the common subexpression is used by PHIs, do not reuse
+ // it unless the defined value is already used in the BB of the new use.
+ bool HasPHI = false;
+ SmallPtrSet<MachineBasicBlock*, 4> CSBBs;
+ for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(CSReg),
+ E = MRI->use_nodbg_end(); I != E; ++I) {
+ MachineInstr *Use = &*I;
+ HasPHI |= Use->isPHI();
+ CSBBs.insert(Use->getParent());
+ }
+
+ if (!HasPHI)
+ return true;
+ return CSBBs.count(MI->getParent());
+}
+
+void MachineCSE::EnterScope(MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
+ ScopeType *Scope = new ScopeType(VNT);
+ ScopeMap[MBB] = Scope;
+}
+
+void MachineCSE::ExitScope(MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
+ DenseMap<MachineBasicBlock*, ScopeType*>::iterator SI = ScopeMap.find(MBB);
+ assert(SI != ScopeMap.end());
+ ScopeMap.erase(SI);
+ delete SI->second;
+}
+
+bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
bool Changed = false;
- ScopedHashTableScope<MachineInstr*, unsigned,
- MachineInstrExpressionTrait> VNTS(VNT);
- MachineBasicBlock *MBB = Node->getBlock();
+ SmallVector<std::pair<unsigned, unsigned>, 8> CSEPairs;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E; ) {
MachineInstr *MI = &*I;
++I;
@@ -207,19 +359,37 @@ bool MachineCSE::ProcessBlock(MachineDomTreeNode *Node) {
if (!isCSECandidate(MI))
continue;
+ bool DefPhys = false;
bool FoundCSE = VNT.count(MI);
if (!FoundCSE) {
// Look for trivial copy coalescing opportunities.
- if (PerformTrivialCoalescing(MI, MBB))
+ if (PerformTrivialCoalescing(MI, MBB)) {
+ // After coalescing MI itself may become a copy.
+ if (MI->isCopyLike())
+ continue;
FoundCSE = VNT.count(MI);
+ }
}
// FIXME: commute commutable instructions?
// If the instruction defines a physical register and the value *may* be
// used, then it's not safe to replace it with a common subexpression.
- if (FoundCSE && hasLivePhysRegDefUse(MI, MBB))
+ unsigned PhysDef = 0;
+ if (FoundCSE && hasLivePhysRegDefUse(MI, MBB, PhysDef)) {
FoundCSE = false;
+ // ... Unless the CS is local and it also defines the physical register
+ // which is not clobbered in between.
+ if (PhysDef) {
+ unsigned CSVN = VNT.lookup(MI);
+ MachineInstr *CSMI = Exps[CSVN];
+ if (PhysRegDefReaches(CSMI, MI, PhysDef)) {
+ FoundCSE = true;
+ DefPhys = true;
+ }
+ }
+ }
+
if (!FoundCSE) {
VNT.insert(MI, CurrVN++);
Exps.push_back(MI);
@@ -231,6 +401,9 @@ bool MachineCSE::ProcessBlock(MachineDomTreeNode *Node) {
MachineInstr *CSMI = Exps[CSVN];
DEBUG(dbgs() << "Examining: " << *MI);
DEBUG(dbgs() << "*** Found a common subexpression: " << *CSMI);
+
+ // Check if it's profitable to perform this CSE.
+ bool DoCSE = true;
unsigned NumDefs = MI->getDesc().getNumDefs();
for (unsigned i = 0, e = MI->getNumOperands(); NumDefs && i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
@@ -243,17 +416,91 @@ bool MachineCSE::ProcessBlock(MachineDomTreeNode *Node) {
assert(TargetRegisterInfo::isVirtualRegister(OldReg) &&
TargetRegisterInfo::isVirtualRegister(NewReg) &&
"Do not CSE physical register defs!");
- MRI->replaceRegWith(OldReg, NewReg);
+ if (!isProfitableToCSE(NewReg, OldReg, CSMI, MI)) {
+ DoCSE = false;
+ break;
+ }
+ CSEPairs.push_back(std::make_pair(OldReg, NewReg));
--NumDefs;
}
- MI->eraseFromParent();
- ++NumCSEs;
+
+ // Actually perform the elimination.
+ if (DoCSE) {
+ for (unsigned i = 0, e = CSEPairs.size(); i != e; ++i) {
+ MRI->replaceRegWith(CSEPairs[i].first, CSEPairs[i].second);
+ MRI->clearKillFlags(CSEPairs[i].second);
+ }
+ MI->eraseFromParent();
+ ++NumCSEs;
+ if (DefPhys)
+ ++NumPhysCSEs;
+ } else {
+ DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
+ VNT.insert(MI, CurrVN++);
+ Exps.push_back(MI);
+ }
+ CSEPairs.clear();
}
- // Recursively call ProcessBlock with childred.
- const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
- for (unsigned i = 0, e = Children.size(); i != e; ++i)
- Changed |= ProcessBlock(Children[i]);
+ return Changed;
+}
+
+/// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
+/// dominator tree node if its a leaf or all of its children are done. Walk
+/// up the dominator tree to destroy ancestors which are now done.
+void
+MachineCSE::ExitScopeIfDone(MachineDomTreeNode *Node,
+ DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
+ DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
+ if (OpenChildren[Node])
+ return;
+
+ // Pop scope.
+ ExitScope(Node->getBlock());
+
+ // Now traverse upwards to pop ancestors whose offsprings are all done.
+ while (MachineDomTreeNode *Parent = ParentMap[Node]) {
+ unsigned Left = --OpenChildren[Parent];
+ if (Left != 0)
+ break;
+ ExitScope(Parent->getBlock());
+ Node = Parent;
+ }
+}
+
+bool MachineCSE::PerformCSE(MachineDomTreeNode *Node) {
+ SmallVector<MachineDomTreeNode*, 32> Scopes;
+ SmallVector<MachineDomTreeNode*, 8> WorkList;
+ DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
+ DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
+
+ CurrVN = 0;
+
+ // Perform a DFS walk to determine the order of visit.
+ WorkList.push_back(Node);
+ do {
+ Node = WorkList.pop_back_val();
+ Scopes.push_back(Node);
+ const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
+ unsigned NumChildren = Children.size();
+ OpenChildren[Node] = NumChildren;
+ for (unsigned i = 0; i != NumChildren; ++i) {
+ MachineDomTreeNode *Child = Children[i];
+ ParentMap[Child] = Node;
+ WorkList.push_back(Child);
+ }
+ } while (!WorkList.empty());
+
+ // Now perform CSE.
+ bool Changed = false;
+ for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
+ MachineDomTreeNode *Node = Scopes[i];
+ MachineBasicBlock *MBB = Node->getBlock();
+ EnterScope(MBB);
+ Changed |= ProcessBlock(MBB);
+ // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
+ ExitScopeIfDone(Node, OpenChildren, ParentMap);
+ }
return Changed;
}
@@ -262,7 +509,7 @@ bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
TII = MF.getTarget().getInstrInfo();
TRI = MF.getTarget().getRegisterInfo();
MRI = &MF.getRegInfo();
- DT = &getAnalysis<MachineDominatorTree>();
AA = &getAnalysis<AliasAnalysis>();
- return ProcessBlock(DT->getRootNode());
+ DT = &getAnalysis<MachineDominatorTree>();
+ return PerformCSE(DT->getRootNode());
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineDominators.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineDominators.cpp
index 4088739..3c67478 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineDominators.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineDominators.cpp
@@ -24,10 +24,10 @@ TEMPLATE_INSTANTIATION(class DominatorTreeBase<MachineBasicBlock>);
char MachineDominatorTree::ID = 0;
-static RegisterPass<MachineDominatorTree>
-E("machinedomtree", "MachineDominator Tree Construction", true);
+INITIALIZE_PASS(MachineDominatorTree, "machinedomtree",
+ "MachineDominator Tree Construction", true, true);
-const PassInfo *const llvm::MachineDominatorsID = &E;
+char &llvm::MachineDominatorsID = MachineDominatorTree::ID;
void MachineDominatorTree::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -41,12 +41,11 @@ bool MachineDominatorTree::runOnMachineFunction(MachineFunction &F) {
}
MachineDominatorTree::MachineDominatorTree()
- : MachineFunctionPass(&ID) {
+ : MachineFunctionPass(ID) {
DT = new DominatorTreeBase<MachineBasicBlock>(false);
}
MachineDominatorTree::~MachineDominatorTree() {
- DT->releaseMemory();
delete DT;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineFunction.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineFunction.cpp
index 33d8400..0171700 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineFunction.cpp
@@ -23,6 +23,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -39,40 +40,6 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-namespace {
- struct Printer : public MachineFunctionPass {
- static char ID;
-
- raw_ostream &OS;
- const std::string Banner;
-
- Printer(raw_ostream &os, const std::string &banner)
- : MachineFunctionPass(&ID), OS(os), Banner(banner) {}
-
- const char *getPassName() const { return "MachineFunction Printer"; }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- bool runOnMachineFunction(MachineFunction &MF) {
- OS << "# " << Banner << ":\n";
- MF.print(OS);
- return false;
- }
- };
- char Printer::ID = 0;
-}
-
-/// Returns a newly-created MachineFunction Printer pass. The default banner is
-/// empty.
-///
-FunctionPass *llvm::createMachineFunctionPrinterPass(raw_ostream &OS,
- const std::string &Banner){
- return new Printer(OS, Banner);
-}
-
//===----------------------------------------------------------------------===//
// MachineFunction implementation
//===----------------------------------------------------------------------===//
@@ -84,22 +51,19 @@ void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
MBB->getParent()->DeleteMachineBasicBlock(MBB);
}
-MachineFunction::MachineFunction(Function *F, const TargetMachine &TM,
- unsigned FunctionNum)
- : Fn(F), Target(TM) {
+MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
+ unsigned FunctionNum, MachineModuleInfo &mmi)
+ : Fn(F), Target(TM), Ctx(mmi.getContext()), MMI(mmi) {
if (TM.getRegisterInfo())
- RegInfo = new (Allocator.Allocate<MachineRegisterInfo>())
- MachineRegisterInfo(*TM.getRegisterInfo());
+ RegInfo = new (Allocator) MachineRegisterInfo(*TM.getRegisterInfo());
else
RegInfo = 0;
MFInfo = 0;
- FrameInfo = new (Allocator.Allocate<MachineFrameInfo>())
- MachineFrameInfo(*TM.getFrameInfo());
+ FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameInfo());
if (Fn->hasFnAttr(Attribute::StackAlignment))
FrameInfo->setMaxAlignment(Attribute::getStackAlignmentFromAttrs(
Fn->getAttributes().getFnAttributes()));
- ConstantPool = new (Allocator.Allocate<MachineConstantPool>())
- MachineConstantPool(TM.getTargetData());
+ ConstantPool = new (Allocator) MachineConstantPool(TM.getTargetData());
Alignment = TM.getTargetLowering()->getFunctionAlignment(F);
FunctionNumber = FunctionNum;
JumpTableInfo = 0;
@@ -132,7 +96,7 @@ MachineJumpTableInfo *MachineFunction::
getOrCreateJumpTableInfo(unsigned EntryKind) {
if (JumpTableInfo) return JumpTableInfo;
- JumpTableInfo = new (Allocator.Allocate<MachineJumpTableInfo>())
+ JumpTableInfo = new (Allocator)
MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
return JumpTableInfo;
}
@@ -229,14 +193,13 @@ MachineMemOperand *
MachineFunction::getMachineMemOperand(const Value *v, unsigned f,
int64_t o, uint64_t s,
unsigned base_alignment) {
- return new (Allocator.Allocate<MachineMemOperand>())
- MachineMemOperand(v, f, o, s, base_alignment);
+ return new (Allocator) MachineMemOperand(v, f, o, s, base_alignment);
}
MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size) {
- return new (Allocator.Allocate<MachineMemOperand>())
+ return new (Allocator)
MachineMemOperand(MMO->getValue(), MMO->getFlags(),
int64_t(uint64_t(MMO->getOffset()) +
uint64_t(Offset)),
@@ -415,7 +378,7 @@ void MachineFunction::viewCFG() const
#ifndef NDEBUG
ViewGraph(this, "mf" + getFunction()->getNameStr());
#else
- errs() << "SelectionDAG::viewGraph is only available in debug builds on "
+ errs() << "MachineFunction::viewCFG is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
@@ -425,7 +388,7 @@ void MachineFunction::viewCFGOnly() const
#ifndef NDEBUG
ViewGraph(this, "mf" + getFunction()->getNameStr(), true);
#else
- errs() << "SelectionDAG::viewGraph is only available in debug builds on "
+ errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
@@ -434,21 +397,17 @@ void MachineFunction::viewCFGOnly() const
/// create a corresponding virtual register for it.
unsigned MachineFunction::addLiveIn(unsigned PReg,
const TargetRegisterClass *RC) {
- assert(RC->contains(PReg) && "Not the correct regclass!");
- unsigned VReg = getRegInfo().createVirtualRegister(RC);
- getRegInfo().addLiveIn(PReg, VReg);
+ MachineRegisterInfo &MRI = getRegInfo();
+ unsigned VReg = MRI.getLiveInVirtReg(PReg);
+ if (VReg) {
+ assert(MRI.getRegClass(VReg) == RC && "Register class mismatch!");
+ return VReg;
+ }
+ VReg = MRI.createVirtualRegister(RC);
+ MRI.addLiveIn(PReg, VReg);
return VReg;
}
-/// getDILocation - Get the DILocation for a given DebugLoc object.
-DILocation MachineFunction::getDILocation(DebugLoc DL) const {
- unsigned Idx = DL.getIndex();
- assert(Idx < DebugLocInfo.DebugLocations.size() &&
- "Invalid index into debug locations!");
- return DILocation(DebugLocInfo.DebugLocations[Idx]);
-}
-
-
/// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
/// normal 'L' label is returned.
@@ -478,10 +437,16 @@ MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
/// index with a negative value.
///
int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
- bool Immutable, bool isSS) {
+ bool Immutable) {
assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
- Objects.insert(Objects.begin(), StackObject(Size, 1, SPOffset, Immutable,
- isSS));
+ // The alignment of the frame index can be determined from its offset from
+ // the incoming frame position. If the frame object is at offset 32 and
+ // the stack is guaranteed to be 16-byte aligned, then we know that the
+ // object is 16-byte aligned.
+ unsigned StackAlign = TFI.getStackAlignment();
+ unsigned Align = MinAlign(SPOffset, StackAlign);
+ Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
+ /*isSS*/false, false));
return -++NumFixedObjects;
}
@@ -572,6 +537,8 @@ unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const {
case MachineJumpTableInfo::EK_LabelDifference32:
case MachineJumpTableInfo::EK_Custom32:
return 4;
+ case MachineJumpTableInfo::EK_Inline:
+ return 0;
}
assert(0 && "Unknown jump table encoding!");
return ~0;
@@ -589,6 +556,8 @@ unsigned MachineJumpTableInfo::getEntryAlignment(const TargetData &TD) const {
case MachineJumpTableInfo::EK_LabelDifference32:
case MachineJumpTableInfo::EK_Custom32:
return TD.getABIIntegerTypeAlignment(32);
+ case MachineJumpTableInfo::EK_Inline:
+ return 1;
}
assert(0 && "Unknown jump table encoding!");
return ~0;
@@ -672,7 +641,7 @@ MachineConstantPool::~MachineConstantPool() {
/// CanShareConstantPoolEntry - Test whether the given two constants
/// can be allocated the same constant pool entry.
-static bool CanShareConstantPoolEntry(Constant *A, Constant *B,
+static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
const TargetData *TD) {
// Handle the trivial case quickly.
if (A == B) return true;
@@ -687,17 +656,17 @@ static bool CanShareConstantPoolEntry(Constant *A, Constant *B,
// If a floating-point value and an integer value have the same encoding,
// they can share a constant-pool entry.
- if (ConstantFP *AFP = dyn_cast<ConstantFP>(A))
- if (ConstantInt *BI = dyn_cast<ConstantInt>(B))
+ if (const ConstantFP *AFP = dyn_cast<ConstantFP>(A))
+ if (const ConstantInt *BI = dyn_cast<ConstantInt>(B))
return AFP->getValueAPF().bitcastToAPInt() == BI->getValue();
- if (ConstantFP *BFP = dyn_cast<ConstantFP>(B))
- if (ConstantInt *AI = dyn_cast<ConstantInt>(A))
+ if (const ConstantFP *BFP = dyn_cast<ConstantFP>(B))
+ if (const ConstantInt *AI = dyn_cast<ConstantInt>(A))
return BFP->getValueAPF().bitcastToAPInt() == AI->getValue();
// Two vectors can share an entry if each pair of corresponding
// elements could.
- if (ConstantVector *AV = dyn_cast<ConstantVector>(A))
- if (ConstantVector *BV = dyn_cast<ConstantVector>(B)) {
+ if (const ConstantVector *AV = dyn_cast<ConstantVector>(A))
+ if (const ConstantVector *BV = dyn_cast<ConstantVector>(B)) {
if (AV->getType()->getNumElements() != BV->getType()->getNumElements())
return false;
for (unsigned i = 0, e = AV->getType()->getNumElements(); i != e; ++i)
@@ -716,7 +685,7 @@ static bool CanShareConstantPoolEntry(Constant *A, Constant *B,
/// an existing one. User must specify the log2 of the minimum required
/// alignment for the object.
///
-unsigned MachineConstantPool::getConstantPoolIndex(Constant *C,
+unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
if (Alignment > PoolAlignment) PoolAlignment = Alignment;
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
index 8d87e3e..4f84b95 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineFunctionAnalysis.cpp
@@ -13,20 +13,21 @@
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
using namespace llvm;
// Register this pass with PassInfo directly to avoid having to define
// a default constructor.
static PassInfo
X("Machine Function Analysis", "machine-function-analysis",
- intptr_t(&MachineFunctionAnalysis::ID), 0,
+ &MachineFunctionAnalysis::ID, 0,
/*CFGOnly=*/false, /*is_analysis=*/true);
char MachineFunctionAnalysis::ID = 0;
MachineFunctionAnalysis::MachineFunctionAnalysis(const TargetMachine &tm,
CodeGenOpt::Level OL) :
- FunctionPass(&ID), TM(tm), OptLevel(OL), MF(0) {
+ FunctionPass(ID), TM(tm), OptLevel(OL), MF(0) {
}
MachineFunctionAnalysis::~MachineFunctionAnalysis() {
@@ -34,9 +35,24 @@ MachineFunctionAnalysis::~MachineFunctionAnalysis() {
assert(!MF && "MachineFunctionAnalysis left initialized!");
}
+void MachineFunctionAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<MachineModuleInfo>();
+}
+
+bool MachineFunctionAnalysis::doInitialization(Module &M) {
+ MachineModuleInfo *MMI = getAnalysisIfAvailable<MachineModuleInfo>();
+ assert(MMI && "MMI not around yet??");
+ MMI->setModule(&M);
+ NextFnNum = 0;
+ return false;
+}
+
+
bool MachineFunctionAnalysis::runOnFunction(Function &F) {
assert(!MF && "MachineFunctionAnalysis already initialized!");
- MF = new MachineFunction(&F, TM, NextFnNum++);
+ MF = new MachineFunction(&F, TM, NextFnNum++,
+ getAnalysis<MachineModuleInfo>());
return false;
}
@@ -44,7 +60,3 @@ void MachineFunctionAnalysis::releaseMemory() {
delete MF;
MF = 0;
}
-
-void MachineFunctionAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineFunctionPass.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineFunctionPass.cpp
index 2f8d4c9..e5a4912 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineFunctionPass.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineFunctionPass.cpp
@@ -15,8 +15,14 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Passes.h"
using namespace llvm;
+Pass *MachineFunctionPass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+ return createMachineFunctionPrinterPass(O, Banner);
+}
+
bool MachineFunctionPass::runOnFunction(Function &F) {
// Do not codegen any 'available_externally' functions at all, they have
// definitions outside the translation unit.
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
new file mode 100644
index 0000000..2aaa798
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineFunctionPrinterPass.cpp
@@ -0,0 +1,60 @@
+//===-- MachineFunctionPrinterPass.cpp ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MachineFunctionPrinterPass implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+/// MachineFunctionPrinterPass - This is a pass to dump the IR of a
+/// MachineFunction.
+///
+struct MachineFunctionPrinterPass : public MachineFunctionPass {
+ static char ID;
+
+ raw_ostream &OS;
+ const std::string Banner;
+
+ MachineFunctionPrinterPass(raw_ostream &os, const std::string &banner)
+ : MachineFunctionPass(ID), OS(os), Banner(banner) {}
+
+ const char *getPassName() const { return "MachineFunction Printer"; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) {
+ OS << "# " << Banner << ":\n";
+ MF.print(OS);
+ return false;
+ }
+};
+
+char MachineFunctionPrinterPass::ID = 0;
+}
+
+namespace llvm {
+/// Returns a newly-created MachineFunction Printer pass. The
+/// default banner is empty.
+///
+MachineFunctionPass *createMachineFunctionPrinterPass(raw_ostream &OS,
+ const std::string &Banner){
+ return new MachineFunctionPrinterPass(OS, Banner);
+}
+
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineInstr.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineInstr.cpp
index e23670d..446e461 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineInstr.cpp
@@ -15,6 +15,7 @@
#include "llvm/Constants.h"
#include "llvm/Function.h"
#include "llvm/InlineAsm.h"
+#include "llvm/Metadata.h"
#include "llvm/Type.h"
#include "llvm/Value.h"
#include "llvm/Assembly/Writer.h"
@@ -23,6 +24,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetInstrDesc.h"
@@ -35,7 +37,6 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/Metadata.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -110,6 +111,26 @@ void MachineOperand::setReg(unsigned Reg) {
Contents.Reg.RegNo = Reg;
}
+void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx,
+ const TargetRegisterInfo &TRI) {
+ assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ if (SubIdx && getSubReg())
+ SubIdx = TRI.composeSubRegIndices(SubIdx, getSubReg());
+ setReg(Reg);
+ if (SubIdx)
+ setSubReg(SubIdx);
+}
+
+void MachineOperand::substPhysReg(unsigned Reg, const TargetRegisterInfo &TRI) {
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ if (getSubReg()) {
+ Reg = TRI.getSubReg(Reg, getSubReg());
+ assert(Reg && "Invalid SubReg for physical register");
+ setSubReg(0);
+ }
+ setReg(Reg);
+}
+
/// ChangeToImmediate - Replace this operand with a new immediate operand of
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
@@ -189,6 +210,10 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const {
getOffset() == Other.getOffset();
case MachineOperand::MO_BlockAddress:
return getBlockAddress() == Other.getBlockAddress();
+ case MachineOperand::MO_MCSymbol:
+ return getMCSymbol() == Other.getMCSymbol();
+ case MachineOperand::MO_Metadata:
+ return getMetadata() == Other.getMetadata();
}
}
@@ -214,8 +239,12 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
OS << "%physreg" << getReg();
}
- if (getSubReg() != 0)
- OS << ':' << getSubReg();
+ if (getSubReg() != 0) {
+ if (TM)
+ OS << ':' << TM->getRegisterInfo()->getSubRegIndexName(getSubReg());
+ else
+ OS << ':' << getSubReg();
+ }
if (isDef() || isKill() || isDead() || isImplicit() || isUndef() ||
isEarlyClobber()) {
@@ -291,6 +320,9 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const {
WriteAsOperand(OS, getMetadata(), /*PrintType=*/false);
OS << '>';
break;
+ case MachineOperand::MO_MCSymbol:
+ OS << "<MCSym=" << *getMCSymbol() << '>';
+ break;
default:
llvm_unreachable("Unrecognized operand type");
}
@@ -389,7 +421,7 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const MachineMemOperand &MMO) {
/// TID NULL and no operands.
MachineInstr::MachineInstr()
: TID(0), NumImplicitOps(0), AsmPrinterFlags(0), MemRefs(0), MemRefsEnd(0),
- Parent(0), debugLoc(DebugLoc::getUnknownLoc()) {
+ Parent(0) {
// Make sure that we get added to a machine basicblock
LeakDetector::addGarbageObject(this);
}
@@ -403,20 +435,14 @@ void MachineInstr::addImplicitDefUseOperands() {
addOperand(MachineOperand::CreateReg(*ImpUses, false, true));
}
-/// MachineInstr ctor - This constructor create a MachineInstr and add the
-/// implicit operands. It reserves space for number of operands specified by
-/// TargetInstrDesc or the numOperands if it is not zero. (for
-/// instructions with variable number of operands).
+/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
+/// implicit operands. It reserves space for the number of operands specified by
+/// the TargetInstrDesc.
MachineInstr::MachineInstr(const TargetInstrDesc &tid, bool NoImp)
: TID(&tid), NumImplicitOps(0), AsmPrinterFlags(0),
- MemRefs(0), MemRefsEnd(0), Parent(0),
- debugLoc(DebugLoc::getUnknownLoc()) {
- if (!NoImp && TID->getImplicitDefs())
- for (const unsigned *ImpDefs = TID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
- NumImplicitOps++;
- if (!NoImp && TID->getImplicitUses())
- for (const unsigned *ImpUses = TID->getImplicitUses(); *ImpUses; ++ImpUses)
- NumImplicitOps++;
+ MemRefs(0), MemRefsEnd(0), Parent(0) {
+ if (!NoImp)
+ NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + TID->getNumOperands());
if (!NoImp)
addImplicitDefUseOperands();
@@ -429,12 +455,8 @@ MachineInstr::MachineInstr(const TargetInstrDesc &tid, const DebugLoc dl,
bool NoImp)
: TID(&tid), NumImplicitOps(0), AsmPrinterFlags(0), MemRefs(0), MemRefsEnd(0),
Parent(0), debugLoc(dl) {
- if (!NoImp && TID->getImplicitDefs())
- for (const unsigned *ImpDefs = TID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
- NumImplicitOps++;
- if (!NoImp && TID->getImplicitUses())
- for (const unsigned *ImpUses = TID->getImplicitUses(); *ImpUses; ++ImpUses)
- NumImplicitOps++;
+ if (!NoImp)
+ NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + TID->getNumOperands());
if (!NoImp)
addImplicitDefUseOperands();
@@ -445,18 +467,11 @@ MachineInstr::MachineInstr(const TargetInstrDesc &tid, const DebugLoc dl,
/// MachineInstr ctor - Work exactly the same as the ctor two above, except
/// that the MachineInstr is created and added to the end of the specified
/// basic block.
-///
MachineInstr::MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &tid)
: TID(&tid), NumImplicitOps(0), AsmPrinterFlags(0),
- MemRefs(0), MemRefsEnd(0), Parent(0),
- debugLoc(DebugLoc::getUnknownLoc()) {
+ MemRefs(0), MemRefsEnd(0), Parent(0) {
assert(MBB && "Cannot use inserting ctor with null basic block!");
- if (TID->ImplicitDefs)
- for (const unsigned *ImpDefs = TID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
- NumImplicitOps++;
- if (TID->ImplicitUses)
- for (const unsigned *ImpUses = TID->getImplicitUses(); *ImpUses; ++ImpUses)
- NumImplicitOps++;
+ NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + TID->getNumOperands());
addImplicitDefUseOperands();
// Make sure that we get added to a machine basicblock
@@ -471,12 +486,7 @@ MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
: TID(&tid), NumImplicitOps(0), AsmPrinterFlags(0), MemRefs(0), MemRefsEnd(0),
Parent(0), debugLoc(dl) {
assert(MBB && "Cannot use inserting ctor with null basic block!");
- if (TID->ImplicitDefs)
- for (const unsigned *ImpDefs = TID->getImplicitDefs(); *ImpDefs; ++ImpDefs)
- NumImplicitOps++;
- if (TID->ImplicitUses)
- for (const unsigned *ImpUses = TID->getImplicitUses(); *ImpUses; ++ImpUses)
- NumImplicitOps++;
+ NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
Operands.reserve(NumImplicitOps + TID->getNumOperands());
addImplicitDefUseOperands();
// Make sure that we get added to a machine basicblock
@@ -795,25 +805,57 @@ int MachineInstr::findRegisterUseOperandIdx(unsigned Reg, bool isKill,
}
return -1;
}
-
+
+/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
+/// indicating if this instruction reads or writes Reg. This also considers
+/// partial defines.
+std::pair<bool,bool>
+MachineInstr::readsWritesVirtualRegister(unsigned Reg,
+ SmallVectorImpl<unsigned> *Ops) const {
+ bool PartDef = false; // Partial redefine.
+ bool FullDef = false; // Full define.
+ bool Use = false;
+
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = getOperand(i);
+ if (!MO.isReg() || MO.getReg() != Reg)
+ continue;
+ if (Ops)
+ Ops->push_back(i);
+ if (MO.isUse())
+ Use |= !MO.isUndef();
+ else if (MO.getSubReg())
+ PartDef = true;
+ else
+ FullDef = true;
+ }
+ // A partial redefine uses Reg unless there is also a full define.
+ return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
+}
+
/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
/// the specified register or -1 if it is not found. If isDead is true, defs
/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
/// also checks if there is a def of a super-register.
-int MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead,
- const TargetRegisterInfo *TRI) const {
+int
+MachineInstr::findRegisterDefOperandIdx(unsigned Reg, bool isDead, bool Overlap,
+ const TargetRegisterInfo *TRI) const {
+ bool isPhys = TargetRegisterInfo::isPhysicalRegister(Reg);
for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
if (!MO.isReg() || !MO.isDef())
continue;
unsigned MOReg = MO.getReg();
- if (MOReg == Reg ||
- (TRI &&
- TargetRegisterInfo::isPhysicalRegister(MOReg) &&
- TargetRegisterInfo::isPhysicalRegister(Reg) &&
- TRI->isSubRegister(MOReg, Reg)))
- if (!isDead || MO.isDead())
- return i;
+ bool Found = (MOReg == Reg);
+ if (!Found && TRI && isPhys &&
+ TargetRegisterInfo::isPhysicalRegister(MOReg)) {
+ if (Overlap)
+ Found = TRI->regsOverlap(MOReg, Reg);
+ else
+ Found = TRI->isSubRegister(MOReg, Reg);
+ }
+ if (Found && (!isDead || MO.isDead()))
+ return i;
}
return -1;
}
@@ -839,14 +881,14 @@ int MachineInstr::findFirstPredOperandIdx() const {
bool MachineInstr::
isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
if (isInlineAsm()) {
- assert(DefOpIdx >= 2);
+ assert(DefOpIdx >= 3);
const MachineOperand &MO = getOperand(DefOpIdx);
if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0)
return false;
// Determine the actual operand index that corresponds to this index.
unsigned DefNo = 0;
unsigned DefPart = 0;
- for (unsigned i = 1, e = getNumOperands(); i < e; ) {
+ for (unsigned i = 2, e = getNumOperands(); i < e; ) {
const MachineOperand &FMO = getOperand(i);
// After the normal asm operands there may be additional imp-def regs.
if (!FMO.isImm())
@@ -861,7 +903,7 @@ isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
}
++DefNo;
}
- for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
+ for (unsigned i = 2, e = getNumOperands(); i != e; ++i) {
const MachineOperand &FMO = getOperand(i);
if (!FMO.isImm())
continue;
@@ -904,7 +946,7 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
// Find the flag operand corresponding to UseOpIdx
unsigned FlagIdx, NumOps=0;
- for (FlagIdx = 1; FlagIdx < UseOpIdx; FlagIdx += NumOps+1) {
+ for (FlagIdx = 2; FlagIdx < UseOpIdx; FlagIdx += NumOps+1) {
const MachineOperand &UFMO = getOperand(FlagIdx);
// After the normal asm operands there may be additional imp-def regs.
if (!UFMO.isImm())
@@ -922,9 +964,9 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
if (!DefOpIdx)
return true;
- unsigned DefIdx = 1;
- // Remember to adjust the index. First operand is asm string, then there
- // is a flag for each.
+ unsigned DefIdx = 2;
+ // Remember to adjust the index. First operand is asm string, second is
+ // the AlignStack bit, then there is a flag for each.
while (DefNo) {
const MachineOperand &FMO = getOperand(DefIdx);
assert(FMO.isImm());
@@ -952,6 +994,16 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
return true;
}
+/// clearKillInfo - Clears kill flags on all operands.
+///
+void MachineInstr::clearKillInfo() {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = getOperand(i);
+ if (MO.isReg() && MO.isUse())
+ MO.setIsKill(false);
+ }
+}
+
/// copyKillDeadInfo - Copies kill / dead operand properties from MI.
///
void MachineInstr::copyKillDeadInfo(const MachineInstr *MI) {
@@ -985,6 +1037,29 @@ void MachineInstr::copyPredicates(const MachineInstr *MI) {
}
}
+void MachineInstr::substituteRegister(unsigned FromReg,
+ unsigned ToReg,
+ unsigned SubIdx,
+ const TargetRegisterInfo &RegInfo) {
+ if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
+ if (SubIdx)
+ ToReg = RegInfo.getSubReg(ToReg, SubIdx);
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = getOperand(i);
+ if (!MO.isReg() || MO.getReg() != FromReg)
+ continue;
+ MO.substPhysReg(ToReg, RegInfo);
+ }
+ } else {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = getOperand(i);
+ if (!MO.isReg() || MO.getReg() != FromReg)
+ continue;
+ MO.substVirtReg(ToReg, SubIdx, RegInfo);
+ }
+ }
+}
+
/// isSafeToMove - Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
/// the instruction's location and its intended destination.
@@ -1119,19 +1194,60 @@ unsigned MachineInstr::isConstantValuePHI() const {
return Reg;
}
+/// allDefsAreDead - Return true if all the defs of this instruction are dead.
+///
+bool MachineInstr::allDefsAreDead() const {
+ for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
+ const MachineOperand &MO = getOperand(i);
+ if (!MO.isReg() || MO.isUse())
+ continue;
+ if (!MO.isDead())
+ return false;
+ }
+ return true;
+}
+
void MachineInstr::dump() const {
dbgs() << " " << *this;
}
+static void printDebugLoc(DebugLoc DL, const MachineFunction *MF,
+ raw_ostream &CommentOS) {
+ const LLVMContext &Ctx = MF->getFunction()->getContext();
+ if (!DL.isUnknown()) { // Print source line info.
+ DIScope Scope(DL.getScope(Ctx));
+ // Omit the directory, because it's likely to be long and uninteresting.
+ if (Scope.Verify())
+ CommentOS << Scope.getFilename();
+ else
+ CommentOS << "<unknown>";
+ CommentOS << ':' << DL.getLine();
+ if (DL.getCol() != 0)
+ CommentOS << ':' << DL.getCol();
+ DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx));
+ if (!InlinedAtDL.isUnknown()) {
+ CommentOS << " @[ ";
+ printDebugLoc(InlinedAtDL, MF, CommentOS);
+ CommentOS << " ]";
+ }
+ }
+}
+
void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
// We can be a bit tidier if we know the TargetMachine and/or MachineFunction.
const MachineFunction *MF = 0;
+ const MachineRegisterInfo *MRI = 0;
if (const MachineBasicBlock *MBB = getParent()) {
MF = MBB->getParent();
if (!TM && MF)
TM = &MF->getTarget();
+ if (MF)
+ MRI = &MF->getRegInfo();
}
+ // Save a list of virtual registers.
+ SmallVector<unsigned, 8> VirtRegs;
+
// Print explicitly defined operands on the left of an assignment syntax.
unsigned StartOp = 0, e = getNumOperands();
for (; StartOp < e && getOperand(StartOp).isReg() &&
@@ -1140,6 +1256,9 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
++StartOp) {
if (StartOp != 0) OS << ", ";
getOperand(StartOp).print(OS, TM);
+ unsigned Reg = getOperand(StartOp).getReg();
+ if (Reg && TargetRegisterInfo::isVirtualRegister(Reg))
+ VirtRegs.push_back(Reg);
}
if (StartOp != 0)
@@ -1154,6 +1273,10 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
const MachineOperand &MO = getOperand(i);
+ if (MO.isReg() && MO.getReg() &&
+ TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ VirtRegs.push_back(MO.getReg());
+
// Omit call-clobbered registers which aren't used anywhere. This makes
// call instructions much less noisy on targets where calls clobber lots
// of registers. Don't rely on MO.isDead() because we may be called before
@@ -1188,7 +1311,17 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
if (TOI.isOptionalDef())
OS << "opt:";
}
- MO.print(OS, TM);
+ if (isDebugValue() && MO.isMetadata()) {
+ // Pretty print DBG_VALUE instructions.
+ const MDNode *MD = MO.getMetadata();
+ if (const MDString *MDS = dyn_cast<MDString>(MD->getOperand(2)))
+ OS << "!\"" << MDS->getString() << '\"';
+ else
+ MO.print(OS, TM);
+ } else if (TM && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
+ OS << TM->getRegisterInfo()->getSubRegIndexName(MO.getImm());
+ } else
+ MO.print(OS, TM);
}
// Briefly indicate whether any call clobbers were omitted.
@@ -1205,27 +1338,33 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
for (mmo_iterator i = memoperands_begin(), e = memoperands_end();
i != e; ++i) {
OS << **i;
- if (next(i) != e)
+ if (llvm::next(i) != e)
OS << " ";
}
}
+ // Print the regclass of any virtual registers encountered.
+ if (MRI && !VirtRegs.empty()) {
+ if (!HaveSemi) OS << ";"; HaveSemi = true;
+ for (unsigned i = 0; i != VirtRegs.size(); ++i) {
+ const TargetRegisterClass *RC = MRI->getRegClass(VirtRegs[i]);
+ OS << " " << RC->getName() << ":%reg" << VirtRegs[i];
+ for (unsigned j = i+1; j != VirtRegs.size();) {
+ if (MRI->getRegClass(VirtRegs[j]) != RC) {
+ ++j;
+ continue;
+ }
+ if (VirtRegs[i] != VirtRegs[j])
+ OS << "," << VirtRegs[j];
+ VirtRegs.erase(VirtRegs.begin()+j);
+ }
+ }
+ }
+
if (!debugLoc.isUnknown() && MF) {
if (!HaveSemi) OS << ";";
-
- // TODO: print InlinedAtLoc information
-
- DILocation DLT = MF->getDILocation(debugLoc);
- DIScope Scope = DLT.getScope();
OS << " dbg:";
- // Omit the directory, since it's usually long and uninteresting.
- if (!Scope.isNull())
- OS << Scope.getFilename();
- else
- OS << "<unknown>";
- OS << ':' << DLT.getLineNumber();
- if (DLT.getColumnNumber() != 0)
- OS << ':' << DLT.getColumnNumber();
+ printDebugLoc(debugLoc, MF, OS);
}
OS << "\n";
@@ -1349,11 +1488,40 @@ bool MachineInstr::addRegisterDead(unsigned IncomingReg,
void MachineInstr::addRegisterDefined(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo) {
- MachineOperand *MO = findRegisterDefOperand(IncomingReg, false, RegInfo);
- if (!MO || MO->getSubReg())
- addOperand(MachineOperand::CreateReg(IncomingReg,
- true /*IsDef*/,
- true /*IsImp*/));
+ if (TargetRegisterInfo::isPhysicalRegister(IncomingReg)) {
+ MachineOperand *MO = findRegisterDefOperand(IncomingReg, false, RegInfo);
+ if (MO)
+ return;
+ } else {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = getOperand(i);
+ if (MO.isReg() && MO.getReg() == IncomingReg && MO.isDef() &&
+ MO.getSubReg() == 0)
+ return;
+ }
+ }
+ addOperand(MachineOperand::CreateReg(IncomingReg,
+ true /*IsDef*/,
+ true /*IsImp*/));
+}
+
+void MachineInstr::setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
+ const TargetRegisterInfo &TRI) {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = getOperand(i);
+ if (!MO.isReg() || !MO.isDef()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ bool Dead = true;
+ for (SmallVectorImpl<unsigned>::const_iterator I = UsedRegs.begin(),
+ E = UsedRegs.end(); I != E; ++I)
+ if (TRI.regsOverlap(*I, Reg)) {
+ Dead = false;
+ break;
+ }
+ // If there are no uses, including partial uses, the def is dead.
+ if (Dead) MO.setIsDead();
+ }
}
unsigned
@@ -1363,30 +1531,33 @@ MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
const MachineOperand &MO = MI->getOperand(i);
uint64_t Key = (uint64_t)MO.getType() << 32;
switch (MO.getType()) {
- default: break;
- case MachineOperand::MO_Register:
- if (MO.isDef() && MO.getReg() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()))
- continue; // Skip virtual register defs.
- Key |= MO.getReg();
- break;
- case MachineOperand::MO_Immediate:
- Key |= MO.getImm();
- break;
- case MachineOperand::MO_FrameIndex:
- case MachineOperand::MO_ConstantPoolIndex:
- case MachineOperand::MO_JumpTableIndex:
- Key |= MO.getIndex();
- break;
- case MachineOperand::MO_MachineBasicBlock:
- Key |= DenseMapInfo<void*>::getHashValue(MO.getMBB());
- break;
- case MachineOperand::MO_GlobalAddress:
- Key |= DenseMapInfo<void*>::getHashValue(MO.getGlobal());
- break;
- case MachineOperand::MO_BlockAddress:
- Key |= DenseMapInfo<void*>::getHashValue(MO.getBlockAddress());
- break;
+ default: break;
+ case MachineOperand::MO_Register:
+ if (MO.isDef() && MO.getReg() &&
+ TargetRegisterInfo::isVirtualRegister(MO.getReg()))
+ continue; // Skip virtual register defs.
+ Key |= MO.getReg();
+ break;
+ case MachineOperand::MO_Immediate:
+ Key |= MO.getImm();
+ break;
+ case MachineOperand::MO_FrameIndex:
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_JumpTableIndex:
+ Key |= MO.getIndex();
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ Key |= DenseMapInfo<void*>::getHashValue(MO.getMBB());
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ Key |= DenseMapInfo<void*>::getHashValue(MO.getGlobal());
+ break;
+ case MachineOperand::MO_BlockAddress:
+ Key |= DenseMapInfo<void*>::getHashValue(MO.getBlockAddress());
+ break;
+ case MachineOperand::MO_MCSymbol:
+ Key |= DenseMapInfo<void*>::getHashValue(MO.getMCSymbol());
+ break;
}
Key += ~(Key << 32);
Key ^= (Key >> 22);
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineLICM.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineLICM.cpp
index 0361694..1a74b74 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineLICM.cpp
@@ -22,8 +22,8 @@
#define DEBUG_TYPE "machine-licm"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -33,6 +33,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -41,20 +42,23 @@ using namespace llvm;
STATISTIC(NumHoisted, "Number of machine instructions hoisted out of loops");
STATISTIC(NumCSEed, "Number of hoisted machine instructions CSEed");
+STATISTIC(NumPostRAHoisted,
+ "Number of machine instructions hoisted out of loops post regalloc");
namespace {
class MachineLICM : public MachineFunctionPass {
- MachineConstantPool *MCP;
+ bool PreRegAlloc;
+
const TargetMachine *TM;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
- BitVector AllocatableSet;
+ const MachineFrameInfo *MFI;
+ MachineRegisterInfo *RegInfo;
// Various analyses that we use...
AliasAnalysis *AA; // Alias analysis info.
- MachineLoopInfo *LI; // Current MachineLoopInfo
+ MachineLoopInfo *MLI; // Current MachineLoopInfo
MachineDominatorTree *DT; // Machine dominator tree for the cur loop
- MachineRegisterInfo *RegInfo; // Machine register information
// State that is updated as we process loops
bool Changed; // True if a loop is changed.
@@ -62,17 +66,23 @@ namespace {
MachineLoop *CurLoop; // The current loop we are working on.
MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
- // For each opcode, keep a list of potentail CSE instructions.
+ BitVector AllocatableSet;
+
+ // For each opcode, keep a list of potential CSE instructions.
DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
+
public:
static char ID; // Pass identification, replacement for typeid
- MachineLICM() : MachineFunctionPass(&ID) {}
+ MachineLICM() :
+ MachineFunctionPass(ID), PreRegAlloc(true) {}
+
+ explicit MachineLICM(bool PreRA) :
+ MachineFunctionPass(ID), PreRegAlloc(PreRA) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
const char *getPassName() const { return "Machine Instruction LICM"; }
- // FIXME: Loop preheaders?
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<MachineLoopInfo>();
@@ -88,6 +98,39 @@ namespace {
}
private:
+ /// CandidateInfo - Keep track of information about hoisting candidates.
+ struct CandidateInfo {
+ MachineInstr *MI;
+ unsigned Def;
+ int FI;
+ CandidateInfo(MachineInstr *mi, unsigned def, int fi)
+ : MI(mi), Def(def), FI(fi) {}
+ };
+
+ /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
+ /// invariants out to the preheader.
+ void HoistRegionPostRA();
+
+ /// HoistPostRA - When an instruction is found to only use loop invariant
+ /// operands that is safe to hoist, this instruction is called to do the
+ /// dirty work.
+ void HoistPostRA(MachineInstr *MI, unsigned Def);
+
+ /// ProcessMI - Examine the instruction for potentai LICM candidate. Also
+ /// gather register def and frame object update information.
+ void ProcessMI(MachineInstr *MI, unsigned *PhysRegDefs,
+ SmallSet<int, 32> &StoredFIs,
+ SmallVector<CandidateInfo, 32> &Candidates);
+
+ /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the
+ /// current loop.
+ void AddToLiveIns(unsigned Reg);
+
+ /// IsLICMCandidate - Returns true if the instruction may be a suitable
+ /// candidate for LICM. e.g. If the instruction is a call, then it's
+ /// obviously not safe to hoist it.
+ bool IsLICMCandidate(MachineInstr &I);
+
/// IsLoopInvariantInst - Returns true if the instruction is loop
/// invariant. I.e., all virtual register operands are defined outside of
/// the loop, physical registers aren't accessed (explicitly or implicitly),
@@ -138,69 +181,292 @@ namespace {
/// current loop preheader that may become duplicates of instructions that
/// are hoisted out of the loop.
void InitCSEMap(MachineBasicBlock *BB);
+
+ /// getCurPreheader - Get the preheader for the current loop, splitting
+ /// a critical edge if needed.
+ MachineBasicBlock *getCurPreheader();
};
} // end anonymous namespace
char MachineLICM::ID = 0;
-static RegisterPass<MachineLICM>
-X("machinelicm", "Machine Loop Invariant Code Motion");
+INITIALIZE_PASS(MachineLICM, "machinelicm",
+ "Machine Loop Invariant Code Motion", false, false);
-FunctionPass *llvm::createMachineLICMPass() { return new MachineLICM(); }
+FunctionPass *llvm::createMachineLICMPass(bool PreRegAlloc) {
+ return new MachineLICM(PreRegAlloc);
+}
-/// LoopIsOuterMostWithPreheader - Test if the given loop is the outer-most
-/// loop that has a preheader.
-static bool LoopIsOuterMostWithPreheader(MachineLoop *CurLoop) {
+/// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
+/// loop that has a unique predecessor.
+static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
+ // Check whether this loop even has a unique predecessor.
+ if (!CurLoop->getLoopPredecessor())
+ return false;
+ // Ok, now check to see if any of its outer loops do.
for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
- if (L->getLoopPreheader())
+ if (L->getLoopPredecessor())
return false;
+ // None of them did, so this is the outermost with a unique predecessor.
return true;
}
-/// Hoist expressions out of the specified loop. Note, alias info for inner loop
-/// is not preserved so it is not a good idea to run LICM multiple times on one
-/// loop.
-///
bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "******** Machine LICM ********\n");
+ if (PreRegAlloc)
+ DEBUG(dbgs() << "******** Pre-regalloc Machine LICM ********\n");
+ else
+ DEBUG(dbgs() << "******** Post-regalloc Machine LICM ********\n");
Changed = FirstInLoop = false;
- MCP = MF.getConstantPool();
TM = &MF.getTarget();
TII = TM->getInstrInfo();
TRI = TM->getRegisterInfo();
+ MFI = MF.getFrameInfo();
RegInfo = &MF.getRegInfo();
AllocatableSet = TRI->getAllocatableSet(MF);
// Get our Loop information...
- LI = &getAnalysis<MachineLoopInfo>();
- DT = &getAnalysis<MachineDominatorTree>();
- AA = &getAnalysis<AliasAnalysis>();
+ MLI = &getAnalysis<MachineLoopInfo>();
+ DT = &getAnalysis<MachineDominatorTree>();
+ AA = &getAnalysis<AliasAnalysis>();
+
+ SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
+ while (!Worklist.empty()) {
+ CurLoop = Worklist.pop_back_val();
+ CurPreheader = 0;
+
+ // If this is done before regalloc, only visit outer-most preheader-sporting
+ // loops.
+ if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
+ Worklist.append(CurLoop->begin(), CurLoop->end());
+ continue;
+ }
- for (MachineLoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) {
- CurLoop = *I;
+ if (!PreRegAlloc)
+ HoistRegionPostRA();
+ else {
+ // CSEMap is initialized for loop header when the first instruction is
+ // being hoisted.
+ MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
+ FirstInLoop = true;
+ HoistRegion(N);
+ CSEMap.clear();
+ }
+ }
+
+ return Changed;
+}
- // Only visit outer-most preheader-sporting loops.
- if (!LoopIsOuterMostWithPreheader(CurLoop))
+/// InstructionStoresToFI - Return true if instruction stores to the
+/// specified frame.
+static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
+ for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
+ oe = MI->memoperands_end(); o != oe; ++o) {
+ if (!(*o)->isStore() || !(*o)->getValue())
continue;
+ if (const FixedStackPseudoSourceValue *Value =
+ dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
+ if (Value->getFrameIndex() == FI)
+ return true;
+ }
+ }
+ return false;
+}
- // Determine the block to which to hoist instructions. If we can't find a
- // suitable loop preheader, we can't do any hoisting.
- //
- // FIXME: We are only hoisting if the basic block coming into this loop
- // has only one successor. This isn't the case in general because we haven't
- // broken critical edges or added preheaders.
- CurPreheader = CurLoop->getLoopPreheader();
- if (!CurPreheader)
+/// ProcessMI - Examine the instruction for potentai LICM candidate. Also
+/// gather register def and frame object update information.
+void MachineLICM::ProcessMI(MachineInstr *MI,
+ unsigned *PhysRegDefs,
+ SmallSet<int, 32> &StoredFIs,
+ SmallVector<CandidateInfo, 32> &Candidates) {
+ bool RuledOut = false;
+ bool HasNonInvariantUse = false;
+ unsigned Def = 0;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isFI()) {
+ // Remember if the instruction stores to the frame index.
+ int FI = MO.getIndex();
+ if (!StoredFIs.count(FI) &&
+ MFI->isSpillSlotObjectIndex(FI) &&
+ InstructionStoresToFI(MI, FI))
+ StoredFIs.insert(FI);
+ HasNonInvariantUse = true;
+ continue;
+ }
+
+ if (!MO.isReg())
continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
+ "Not expecting virtual register!");
+
+ if (!MO.isDef()) {
+ if (Reg && PhysRegDefs[Reg])
+ // If it's using a non-loop-invariant register, then it's obviously not
+ // safe to hoist.
+ HasNonInvariantUse = true;
+ continue;
+ }
- // CSEMap is initialized for loop header when the first instruction is
- // being hoisted.
- FirstInLoop = true;
- HoistRegion(DT->getNode(CurLoop->getHeader()));
- CSEMap.clear();
+ if (MO.isImplicit()) {
+ ++PhysRegDefs[Reg];
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ ++PhysRegDefs[*AS];
+ if (!MO.isDead())
+ // Non-dead implicit def? This cannot be hoisted.
+ RuledOut = true;
+ // No need to check if a dead implicit def is also defined by
+ // another instruction.
+ continue;
+ }
+
+ // FIXME: For now, avoid instructions with multiple defs, unless
+ // it's a dead implicit def.
+ if (Def)
+ RuledOut = true;
+ else
+ Def = Reg;
+
+ // If we have already seen another instruction that defines the same
+ // register, then this is not safe.
+ if (++PhysRegDefs[Reg] > 1)
+ // MI defined register is seen defined by another instruction in
+ // the loop, it cannot be a LICM candidate.
+ RuledOut = true;
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ if (++PhysRegDefs[*AS] > 1)
+ RuledOut = true;
}
- return Changed;
+ // Only consider reloads for now and remats which do not have register
+ // operands. FIXME: Consider unfold load folding instructions.
+ if (Def && !RuledOut) {
+ int FI = INT_MIN;
+ if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
+ (TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
+ Candidates.push_back(CandidateInfo(MI, Def, FI));
+ }
+}
+
+/// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
+/// invariants out to the preheader.
+void MachineLICM::HoistRegionPostRA() {
+ unsigned NumRegs = TRI->getNumRegs();
+ unsigned *PhysRegDefs = new unsigned[NumRegs];
+ std::fill(PhysRegDefs, PhysRegDefs + NumRegs, 0);
+
+ SmallVector<CandidateInfo, 32> Candidates;
+ SmallSet<int, 32> StoredFIs;
+
+ // Walk the entire region, count number of defs for each register, and
+ // collect potential LICM candidates.
+ const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
+ for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
+ MachineBasicBlock *BB = Blocks[i];
+ // Conservatively treat live-in's as an external def.
+ // FIXME: That means a reload that're reused in successor block(s) will not
+ // be LICM'ed.
+ for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
+ E = BB->livein_end(); I != E; ++I) {
+ unsigned Reg = *I;
+ ++PhysRegDefs[Reg];
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ ++PhysRegDefs[*AS];
+ }
+
+ for (MachineBasicBlock::iterator
+ MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
+ MachineInstr *MI = &*MII;
+ ProcessMI(MI, PhysRegDefs, StoredFIs, Candidates);
+ }
+ }
+
+ // Now evaluate whether the potential candidates qualify.
+ // 1. Check if the candidate defined register is defined by another
+ // instruction in the loop.
+ // 2. If the candidate is a load from stack slot (always true for now),
+ // check if the slot is stored anywhere in the loop.
+ for (unsigned i = 0, e = Candidates.size(); i != e; ++i) {
+ if (Candidates[i].FI != INT_MIN &&
+ StoredFIs.count(Candidates[i].FI))
+ continue;
+
+ if (PhysRegDefs[Candidates[i].Def] == 1) {
+ bool Safe = true;
+ MachineInstr *MI = Candidates[i].MI;
+ for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
+ const MachineOperand &MO = MI->getOperand(j);
+ if (!MO.isReg() || MO.isDef() || !MO.getReg())
+ continue;
+ if (PhysRegDefs[MO.getReg()]) {
+ // If it's using a non-loop-invariant register, then it's obviously
+ // not safe to hoist.
+ Safe = false;
+ break;
+ }
+ }
+ if (Safe)
+ HoistPostRA(MI, Candidates[i].Def);
+ }
+ }
+
+ delete[] PhysRegDefs;
+}
+
+/// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current
+/// loop, and make sure it is not killed by any instructions in the loop.
+void MachineLICM::AddToLiveIns(unsigned Reg) {
+ const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
+ for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
+ MachineBasicBlock *BB = Blocks[i];
+ if (!BB->isLiveIn(Reg))
+ BB->addLiveIn(Reg);
+ for (MachineBasicBlock::iterator
+ MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
+ MachineInstr *MI = &*MII;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
+ if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
+ MO.setIsKill(false);
+ }
+ }
+ }
+}
+
+/// HoistPostRA - When an instruction is found to only use loop invariant
+/// operands that is safe to hoist, this instruction is called to do the
+/// dirty work.
+void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
+ MachineBasicBlock *Preheader = getCurPreheader();
+ if (!Preheader) return;
+
+ // Now move the instructions to the predecessor, inserting it before any
+ // terminator instructions.
+ DEBUG({
+ dbgs() << "Hoisting " << *MI;
+ if (Preheader->getBasicBlock())
+ dbgs() << " to MachineBasicBlock "
+ << Preheader->getName();
+ if (MI->getParent()->getBasicBlock())
+ dbgs() << " from MachineBasicBlock "
+ << MI->getParent()->getName();
+ dbgs() << "\n";
+ });
+
+ // Splice the instruction to the preheader.
+ MachineBasicBlock *MBB = MI->getParent();
+ Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
+
+ // Add register to livein list to all the BBs in the current loop since a
+ // loop invariant must be kept live throughout the whole loop. This is
+ // important to ensure later passes do not scavenge the def register.
+ AddToLiveIns(Def);
+
+ ++NumPostRAHoisted;
+ Changed = true;
}
/// HoistRegion - Walk the specified region of the CFG (defined by all blocks
@@ -222,10 +488,26 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N) {
MII = NextMII;
}
- const std::vector<MachineDomTreeNode*> &Children = N->getChildren();
+ // Don't hoist things out of a large switch statement. This often causes
+ // code to be hoisted that wasn't going to be executed, and increases
+ // register pressure in a situation where it's likely to matter.
+ if (BB->succ_size() < 25) {
+ const std::vector<MachineDomTreeNode*> &Children = N->getChildren();
+ for (unsigned I = 0, E = Children.size(); I != E; ++I)
+ HoistRegion(Children[I]);
+ }
+}
- for (unsigned I = 0, E = Children.size(); I != E; ++I)
- HoistRegion(Children[I]);
+/// IsLICMCandidate - Returns true if the instruction may be a suitable
+/// candidate for LICM. e.g. If the instruction is a call, then it's obviously
+/// not safe to hoist it.
+bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
+ // Check if it's safe to move the instruction.
+ bool DontMoveAcrossStore = true;
+ if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore))
+ return false;
+
+ return true;
}
/// IsLoopInvariantInst - Returns true if the instruction is loop
@@ -234,24 +516,9 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N) {
/// effects that aren't captured by the operands or other flags.
///
bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
- const TargetInstrDesc &TID = I.getDesc();
-
- // Ignore stuff that we obviously can't hoist.
- if (TID.mayStore() || TID.isCall() || TID.isTerminator() ||
- TID.hasUnmodeledSideEffects())
+ if (!IsLICMCandidate(I))
return false;
- if (TID.mayLoad()) {
- // Okay, this instruction does a load. As a refinement, we allow the target
- // to decide whether the loaded value is actually a constant. If so, we can
- // actually use it as a load.
- if (!I.isInvariantLoad(AA))
- // FIXME: we should be able to hoist loads with no other side effects if
- // there are no other instructions which can change memory in this loop.
- // This is a trivial form of alias analysis.
- return false;
- }
-
// The instruction is loop invariant if all of its operands are.
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = I.getOperand(i);
@@ -341,9 +608,6 @@ bool MachineLICM::isLoadFromConstantMemory(MachineInstr *MI) {
/// IsProfitableToHoist - Return true if it is potentially profitable to hoist
/// the given loop invariant.
bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
- if (MI.isImplicitDef())
- return false;
-
// FIXME: For now, only hoist re-materilizable instructions. LICM will
// increase register pressure. We want to make sure it doesn't increase
// spilling.
@@ -453,7 +717,9 @@ MachineLICM::LookForDuplicate(const MachineInstr *MI,
bool MachineLICM::EliminateCSE(MachineInstr *MI,
DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
- if (CI == CSEMap.end())
+ // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
+ // the undef property onto uses.
+ if (CI == CSEMap.end() || MI->isImplicitDef())
return false;
if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
@@ -471,8 +737,10 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI,
"Instructions with different phys regs are not identical!");
if (MO.isReg() && MO.isDef() &&
- !TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
+ !TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
RegInfo->replaceRegWith(MO.getReg(), Dup->getOperand(i).getReg());
+ RegInfo->clearKillFlags(Dup->getOperand(i).getReg());
+ }
}
MI->eraseFromParent();
++NumCSEed;
@@ -485,6 +753,9 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI,
/// that are safe to hoist, this instruction is called to do the dirty work.
///
void MachineLICM::Hoist(MachineInstr *MI) {
+ MachineBasicBlock *Preheader = getCurPreheader();
+ if (!Preheader) return;
+
// First check whether we should hoist this instruction.
if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
// If not, try unfolding a hoistable load.
@@ -496,9 +767,9 @@ void MachineLICM::Hoist(MachineInstr *MI) {
// terminator instructions.
DEBUG({
dbgs() << "Hoisting " << *MI;
- if (CurPreheader->getBasicBlock())
+ if (Preheader->getBasicBlock())
dbgs() << " to MachineBasicBlock "
- << CurPreheader->getName();
+ << Preheader->getName();
if (MI->getParent()->getBasicBlock())
dbgs() << " from MachineBasicBlock "
<< MI->getParent()->getName();
@@ -507,7 +778,10 @@ void MachineLICM::Hoist(MachineInstr *MI) {
// If this is the first instruction being hoisted to the preheader,
// initialize the CSE map with potential common expressions.
- InitCSEMap(CurPreheader);
+ if (FirstInLoop) {
+ InitCSEMap(Preheader);
+ FirstInLoop = false;
+ }
// Look for opportunity to CSE the hoisted instruction.
unsigned Opcode = MI->getOpcode();
@@ -515,7 +789,16 @@ void MachineLICM::Hoist(MachineInstr *MI) {
CI = CSEMap.find(Opcode);
if (!EliminateCSE(MI, CI)) {
// Otherwise, splice the instruction to the preheader.
- CurPreheader->splice(CurPreheader->getFirstTerminator(),MI->getParent(),MI);
+ Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
+
+ // Clear the kill flags of any register this instruction defines,
+ // since they may need to be live throughout the entire loop
+ // rather than just live for part of it.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isDef() && !MO.isDead())
+ RegInfo->clearKillFlags(MO.getReg());
+ }
// Add to the CSE map.
if (CI != CSEMap.end())
@@ -530,3 +813,30 @@ void MachineLICM::Hoist(MachineInstr *MI) {
++NumHoisted;
Changed = true;
}
+
+MachineBasicBlock *MachineLICM::getCurPreheader() {
+ // Determine the block to which to hoist instructions. If we can't find a
+ // suitable loop predecessor, we can't do any hoisting.
+
+ // If we've tried to get a preheader and failed, don't try again.
+ if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
+ return 0;
+
+ if (!CurPreheader) {
+ CurPreheader = CurLoop->getLoopPreheader();
+ if (!CurPreheader) {
+ MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
+ if (!Pred) {
+ CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
+ return 0;
+ }
+
+ CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
+ if (!CurPreheader) {
+ CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
+ return 0;
+ }
+ }
+ }
+ return CurPreheader;
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineLoopInfo.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineLoopInfo.cpp
index 269538b..bca4b0c 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineLoopInfo.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineLoopInfo.cpp
@@ -30,10 +30,10 @@ TEMPLATE_INSTANTIATION(MLIB);
}
char MachineLoopInfo::ID = 0;
-static RegisterPass<MachineLoopInfo>
-X("machine-loops", "Machine Natural Loop Construction", true);
+INITIALIZE_PASS(MachineLoopInfo, "machine-loops",
+ "Machine Natural Loop Construction", true, true);
-const PassInfo *const llvm::MachineLoopInfoID = &X;
+char &llvm::MachineLoopInfoID = MachineLoopInfo::ID;
bool MachineLoopInfo::runOnMachineFunction(MachineFunction &) {
releaseMemory();
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineModuleInfo.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineModuleInfo.cpp
index 5052af7..b647a4d 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineModuleInfo.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineModuleInfo.cpp
@@ -10,6 +10,9 @@
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Module.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -17,50 +20,277 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/Instructions.h"
-#include "llvm/Module.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
using namespace llvm::dwarf;
// Handle the Pass registration stuff necessary to use TargetData's.
-static RegisterPass<MachineModuleInfo>
-X("machinemoduleinfo", "Module Information");
+INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo",
+ "Machine Module Information", false, false);
char MachineModuleInfo::ID = 0;
// Out of line virtual method.
MachineModuleInfoImpl::~MachineModuleInfoImpl() {}
+namespace llvm {
+class MMIAddrLabelMapCallbackPtr : CallbackVH {
+ MMIAddrLabelMap *Map;
+public:
+ MMIAddrLabelMapCallbackPtr() : Map(0) {}
+ MMIAddrLabelMapCallbackPtr(Value *V) : CallbackVH(V), Map(0) {}
+
+ void setPtr(BasicBlock *BB) {
+ ValueHandleBase::operator=(BB);
+ }
+
+ void setMap(MMIAddrLabelMap *map) { Map = map; }
+
+ virtual void deleted();
+ virtual void allUsesReplacedWith(Value *V2);
+};
+
+class MMIAddrLabelMap {
+ MCContext &Context;
+ struct AddrLabelSymEntry {
+ /// Symbols - The symbols for the label. This is a pointer union that is
+ /// either one symbol (the common case) or a list of symbols.
+ PointerUnion<MCSymbol *, std::vector<MCSymbol*>*> Symbols;
+
+ Function *Fn; // The containing function of the BasicBlock.
+ unsigned Index; // The index in BBCallbacks for the BasicBlock.
+ };
+
+ DenseMap<AssertingVH<BasicBlock>, AddrLabelSymEntry> AddrLabelSymbols;
+
+ /// BBCallbacks - Callbacks for the BasicBlock's that we have entries for. We
+ /// use this so we get notified if a block is deleted or RAUWd.
+ std::vector<MMIAddrLabelMapCallbackPtr> BBCallbacks;
+
+ /// DeletedAddrLabelsNeedingEmission - This is a per-function list of symbols
+ /// whose corresponding BasicBlock got deleted. These symbols need to be
+ /// emitted at some point in the file, so AsmPrinter emits them after the
+ /// function body.
+ DenseMap<AssertingVH<Function>, std::vector<MCSymbol*> >
+ DeletedAddrLabelsNeedingEmission;
+public:
+
+ MMIAddrLabelMap(MCContext &context) : Context(context) {}
+ ~MMIAddrLabelMap() {
+ assert(DeletedAddrLabelsNeedingEmission.empty() &&
+ "Some labels for deleted blocks never got emitted");
+
+ // Deallocate any of the 'list of symbols' case.
+ for (DenseMap<AssertingVH<BasicBlock>, AddrLabelSymEntry>::iterator
+ I = AddrLabelSymbols.begin(), E = AddrLabelSymbols.end(); I != E; ++I)
+ if (I->second.Symbols.is<std::vector<MCSymbol*>*>())
+ delete I->second.Symbols.get<std::vector<MCSymbol*>*>();
+ }
+
+ MCSymbol *getAddrLabelSymbol(BasicBlock *BB);
+ std::vector<MCSymbol*> getAddrLabelSymbolToEmit(BasicBlock *BB);
+
+ void takeDeletedSymbolsForFunction(Function *F,
+ std::vector<MCSymbol*> &Result);
+
+ void UpdateForDeletedBlock(BasicBlock *BB);
+ void UpdateForRAUWBlock(BasicBlock *Old, BasicBlock *New);
+};
+}
+
+MCSymbol *MMIAddrLabelMap::getAddrLabelSymbol(BasicBlock *BB) {
+ assert(BB->hasAddressTaken() &&
+ "Shouldn't get label for block without address taken");
+ AddrLabelSymEntry &Entry = AddrLabelSymbols[BB];
+
+ // If we already had an entry for this block, just return it.
+ if (!Entry.Symbols.isNull()) {
+ assert(BB->getParent() == Entry.Fn && "Parent changed");
+ if (Entry.Symbols.is<MCSymbol*>())
+ return Entry.Symbols.get<MCSymbol*>();
+ return (*Entry.Symbols.get<std::vector<MCSymbol*>*>())[0];
+ }
+
+ // Otherwise, this is a new entry, create a new symbol for it and add an
+ // entry to BBCallbacks so we can be notified if the BB is deleted or RAUWd.
+ BBCallbacks.push_back(BB);
+ BBCallbacks.back().setMap(this);
+ Entry.Index = BBCallbacks.size()-1;
+ Entry.Fn = BB->getParent();
+ MCSymbol *Result = Context.CreateTempSymbol();
+ Entry.Symbols = Result;
+ return Result;
+}
+
+std::vector<MCSymbol*>
+MMIAddrLabelMap::getAddrLabelSymbolToEmit(BasicBlock *BB) {
+ assert(BB->hasAddressTaken() &&
+ "Shouldn't get label for block without address taken");
+ AddrLabelSymEntry &Entry = AddrLabelSymbols[BB];
+
+ std::vector<MCSymbol*> Result;
+
+ // If we already had an entry for this block, just return it.
+ if (Entry.Symbols.isNull())
+ Result.push_back(getAddrLabelSymbol(BB));
+ else if (MCSymbol *Sym = Entry.Symbols.dyn_cast<MCSymbol*>())
+ Result.push_back(Sym);
+ else
+ Result = *Entry.Symbols.get<std::vector<MCSymbol*>*>();
+ return Result;
+}
+
+
+/// takeDeletedSymbolsForFunction - If we have any deleted symbols for F, return
+/// them.
+void MMIAddrLabelMap::
+takeDeletedSymbolsForFunction(Function *F, std::vector<MCSymbol*> &Result) {
+ DenseMap<AssertingVH<Function>, std::vector<MCSymbol*> >::iterator I =
+ DeletedAddrLabelsNeedingEmission.find(F);
+
+ // If there are no entries for the function, just return.
+ if (I == DeletedAddrLabelsNeedingEmission.end()) return;
+
+ // Otherwise, take the list.
+ std::swap(Result, I->second);
+ DeletedAddrLabelsNeedingEmission.erase(I);
+}
+
+
+void MMIAddrLabelMap::UpdateForDeletedBlock(BasicBlock *BB) {
+ // If the block got deleted, there is no need for the symbol. If the symbol
+ // was already emitted, we can just forget about it, otherwise we need to
+ // queue it up for later emission when the function is output.
+ AddrLabelSymEntry Entry = AddrLabelSymbols[BB];
+ AddrLabelSymbols.erase(BB);
+ assert(!Entry.Symbols.isNull() && "Didn't have a symbol, why a callback?");
+ BBCallbacks[Entry.Index] = 0; // Clear the callback.
+
+ assert((BB->getParent() == 0 || BB->getParent() == Entry.Fn) &&
+ "Block/parent mismatch");
+
+ // Handle both the single and the multiple symbols cases.
+ if (MCSymbol *Sym = Entry.Symbols.dyn_cast<MCSymbol*>()) {
+ if (Sym->isDefined())
+ return;
+
+ // If the block is not yet defined, we need to emit it at the end of the
+ // function. Add the symbol to the DeletedAddrLabelsNeedingEmission list
+ // for the containing Function. Since the block is being deleted, its
+ // parent may already be removed, we have to get the function from 'Entry'.
+ DeletedAddrLabelsNeedingEmission[Entry.Fn].push_back(Sym);
+ } else {
+ std::vector<MCSymbol*> *Syms = Entry.Symbols.get<std::vector<MCSymbol*>*>();
+
+ for (unsigned i = 0, e = Syms->size(); i != e; ++i) {
+ MCSymbol *Sym = (*Syms)[i];
+ if (Sym->isDefined()) continue; // Ignore already emitted labels.
+
+ // If the block is not yet defined, we need to emit it at the end of the
+ // function. Add the symbol to the DeletedAddrLabelsNeedingEmission list
+ // for the containing Function. Since the block is being deleted, its
+ // parent may already be removed, we have to get the function from
+ // 'Entry'.
+ DeletedAddrLabelsNeedingEmission[Entry.Fn].push_back(Sym);
+ }
+
+ // The entry is deleted, free the memory associated with the symbol list.
+ delete Syms;
+ }
+}
+
+void MMIAddrLabelMap::UpdateForRAUWBlock(BasicBlock *Old, BasicBlock *New) {
+ // Get the entry for the RAUW'd block and remove it from our map.
+ AddrLabelSymEntry OldEntry = AddrLabelSymbols[Old];
+ AddrLabelSymbols.erase(Old);
+ assert(!OldEntry.Symbols.isNull() && "Didn't have a symbol, why a callback?");
+
+ AddrLabelSymEntry &NewEntry = AddrLabelSymbols[New];
+
+ // If New is not address taken, just move our symbol over to it.
+ if (NewEntry.Symbols.isNull()) {
+ BBCallbacks[OldEntry.Index].setPtr(New); // Update the callback.
+ NewEntry = OldEntry; // Set New's entry.
+ return;
+ }
+
+ BBCallbacks[OldEntry.Index] = 0; // Update the callback.
+
+ // Otherwise, we need to add the old symbol to the new block's set. If it is
+ // just a single entry, upgrade it to a symbol list.
+ if (MCSymbol *PrevSym = NewEntry.Symbols.dyn_cast<MCSymbol*>()) {
+ std::vector<MCSymbol*> *SymList = new std::vector<MCSymbol*>();
+ SymList->push_back(PrevSym);
+ NewEntry.Symbols = SymList;
+ }
+
+ std::vector<MCSymbol*> *SymList =
+ NewEntry.Symbols.get<std::vector<MCSymbol*>*>();
+
+ // If the old entry was a single symbol, add it.
+ if (MCSymbol *Sym = OldEntry.Symbols.dyn_cast<MCSymbol*>()) {
+ SymList->push_back(Sym);
+ return;
+ }
+
+ // Otherwise, concatenate the list.
+ std::vector<MCSymbol*> *Syms =OldEntry.Symbols.get<std::vector<MCSymbol*>*>();
+ SymList->insert(SymList->end(), Syms->begin(), Syms->end());
+ delete Syms;
+}
+
+
+void MMIAddrLabelMapCallbackPtr::deleted() {
+ Map->UpdateForDeletedBlock(cast<BasicBlock>(getValPtr()));
+}
+
+void MMIAddrLabelMapCallbackPtr::allUsesReplacedWith(Value *V2) {
+ Map->UpdateForRAUWBlock(cast<BasicBlock>(getValPtr()), cast<BasicBlock>(V2));
+}
+
+
//===----------------------------------------------------------------------===//
-MachineModuleInfo::MachineModuleInfo()
-: ImmutablePass(&ID)
-, ObjFileMMI(0)
-, CurCallSite(0)
-, CallsEHReturn(0)
-, CallsUnwindInit(0)
-, DbgInfoAvailable(false) {
+MachineModuleInfo::MachineModuleInfo(const MCAsmInfo &MAI)
+: ImmutablePass(ID), Context(MAI),
+ ObjFileMMI(0),
+ CurCallSite(0), CallsEHReturn(0), CallsUnwindInit(0), DbgInfoAvailable(false){
// Always emit some info, by default "no personality" info.
Personalities.push_back(NULL);
+ AddrLabelSymbols = 0;
+ TheModule = 0;
+}
+
+MachineModuleInfo::MachineModuleInfo()
+: ImmutablePass(ID), Context(*(MCAsmInfo*)0) {
+ assert(0 && "This MachineModuleInfo constructor should never be called, MMI "
+ "should always be explicitly constructed by LLVMTargetMachine");
+ abort();
}
MachineModuleInfo::~MachineModuleInfo() {
delete ObjFileMMI;
+
+ // FIXME: Why isn't doFinalization being called??
+ //assert(AddrLabelSymbols == 0 && "doFinalization not called");
+ delete AddrLabelSymbols;
+ AddrLabelSymbols = 0;
}
/// doInitialization - Initialize the state for a new module.
///
bool MachineModuleInfo::doInitialization() {
+ assert(AddrLabelSymbols == 0 && "Improperly initialized");
return false;
}
/// doFinalization - Tear down the state after completion of a module.
///
bool MachineModuleInfo::doFinalization() {
+ delete AddrLabelSymbols;
+ AddrLabelSymbols = 0;
return false;
}
@@ -83,23 +313,61 @@ void MachineModuleInfo::EndFunction() {
/// AnalyzeModule - Scan the module for global debug information.
///
-void MachineModuleInfo::AnalyzeModule(Module &M) {
+void MachineModuleInfo::AnalyzeModule(const Module &M) {
// Insert functions in the llvm.used array (but not llvm.compiler.used) into
// UsedFunctions.
- GlobalVariable *GV = M.getGlobalVariable("llvm.used");
+ const GlobalVariable *GV = M.getGlobalVariable("llvm.used");
if (!GV || !GV->hasInitializer()) return;
// Should be an array of 'i8*'.
- ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
if (InitList == 0) return;
for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
- if (Function *F =
+ if (const Function *F =
dyn_cast<Function>(InitList->getOperand(i)->stripPointerCasts()))
UsedFunctions.insert(F);
}
-//===-EH-------------------------------------------------------------------===//
+//===- Address of Block Management ----------------------------------------===//
+
+
+/// getAddrLabelSymbol - Return the symbol to be used for the specified basic
+/// block when its address is taken. This cannot be its normal LBB label
+/// because the block may be accessed outside its containing function.
+MCSymbol *MachineModuleInfo::getAddrLabelSymbol(const BasicBlock *BB) {
+ // Lazily create AddrLabelSymbols.
+ if (AddrLabelSymbols == 0)
+ AddrLabelSymbols = new MMIAddrLabelMap(Context);
+ return AddrLabelSymbols->getAddrLabelSymbol(const_cast<BasicBlock*>(BB));
+}
+
+/// getAddrLabelSymbolToEmit - Return the symbol to be used for the specified
+/// basic block when its address is taken. If other blocks were RAUW'd to
+/// this one, we may have to emit them as well, return the whole set.
+std::vector<MCSymbol*> MachineModuleInfo::
+getAddrLabelSymbolToEmit(const BasicBlock *BB) {
+ // Lazily create AddrLabelSymbols.
+ if (AddrLabelSymbols == 0)
+ AddrLabelSymbols = new MMIAddrLabelMap(Context);
+ return AddrLabelSymbols->getAddrLabelSymbolToEmit(const_cast<BasicBlock*>(BB));
+}
+
+
+/// takeDeletedSymbolsForFunction - If the specified function has had any
+/// references to address-taken blocks generated, but the block got deleted,
+/// return the symbol now so we can emit it. This prevents emitting a
+/// reference to a symbol that has no definition.
+void MachineModuleInfo::
+takeDeletedSymbolsForFunction(const Function *F,
+ std::vector<MCSymbol*> &Result) {
+ // If no blocks have had their addresses taken, we're done.
+ if (AddrLabelSymbols == 0) return;
+ return AddrLabelSymbols->
+ takeDeletedSymbolsForFunction(const_cast<Function*>(F), Result);
+}
+
+//===- EH -----------------------------------------------------------------===//
/// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the
/// specified MachineBasicBlock.
@@ -119,7 +387,7 @@ LandingPadInfo &MachineModuleInfo::getOrCreateLandingPadInfo
/// addInvoke - Provide the begin and end labels of an invoke style call and
/// associate it with a try landing pad block.
void MachineModuleInfo::addInvoke(MachineBasicBlock *LandingPad,
- unsigned BeginLabel, unsigned EndLabel) {
+ MCSymbol *BeginLabel, MCSymbol *EndLabel) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.BeginLabels.push_back(BeginLabel);
LP.EndLabels.push_back(EndLabel);
@@ -127,8 +395,8 @@ void MachineModuleInfo::addInvoke(MachineBasicBlock *LandingPad,
/// addLandingPad - Provide the label of a try LandingPad block.
///
-unsigned MachineModuleInfo::addLandingPad(MachineBasicBlock *LandingPad) {
- unsigned LandingPadLabel = NextLabelID();
+MCSymbol *MachineModuleInfo::addLandingPad(MachineBasicBlock *LandingPad) {
+ MCSymbol *LandingPadLabel = Context.CreateTempSymbol();
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.LandingPadLabel = LandingPadLabel;
return LandingPadLabel;
@@ -137,7 +405,7 @@ unsigned MachineModuleInfo::addLandingPad(MachineBasicBlock *LandingPad) {
/// addPersonality - Provide the personality function for the exception
/// information.
void MachineModuleInfo::addPersonality(MachineBasicBlock *LandingPad,
- Function *Personality) {
+ const Function *Personality) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.Personality = Personality;
@@ -156,7 +424,7 @@ void MachineModuleInfo::addPersonality(MachineBasicBlock *LandingPad,
/// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
///
void MachineModuleInfo::addCatchTypeInfo(MachineBasicBlock *LandingPad,
- std::vector<GlobalVariable *> &TyInfo) {
+ std::vector<const GlobalVariable *> &TyInfo) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
for (unsigned N = TyInfo.size(); N; --N)
LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
@@ -165,7 +433,7 @@ void MachineModuleInfo::addCatchTypeInfo(MachineBasicBlock *LandingPad,
/// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
///
void MachineModuleInfo::addFilterTypeInfo(MachineBasicBlock *LandingPad,
- std::vector<GlobalVariable *> &TyInfo) {
+ std::vector<const GlobalVariable *> &TyInfo) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
std::vector<unsigned> IdsInFilter(TyInfo.size());
for (unsigned I = 0, E = TyInfo.size(); I != E; ++I)
@@ -182,10 +450,13 @@ void MachineModuleInfo::addCleanup(MachineBasicBlock *LandingPad) {
/// TidyLandingPads - Remap landing pad labels and remove any deleted landing
/// pads.
-void MachineModuleInfo::TidyLandingPads() {
+void MachineModuleInfo::TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap) {
for (unsigned i = 0; i != LandingPads.size(); ) {
LandingPadInfo &LandingPad = LandingPads[i];
- LandingPad.LandingPadLabel = MappedLabel(LandingPad.LandingPadLabel);
+ if (LandingPad.LandingPadLabel &&
+ !LandingPad.LandingPadLabel->isDefined() &&
+ (!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0))
+ LandingPad.LandingPadLabel = 0;
// Special case: we *should* emit LPs with null LP MBB. This indicates
// "nounwind" case.
@@ -194,19 +465,17 @@ void MachineModuleInfo::TidyLandingPads() {
continue;
}
- for (unsigned j=0; j != LandingPads[i].BeginLabels.size(); ) {
- unsigned BeginLabel = MappedLabel(LandingPad.BeginLabels[j]);
- unsigned EndLabel = MappedLabel(LandingPad.EndLabels[j]);
-
- if (!BeginLabel || !EndLabel) {
- LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
- LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
- continue;
- }
-
- LandingPad.BeginLabels[j] = BeginLabel;
- LandingPad.EndLabels[j] = EndLabel;
- ++j;
+ for (unsigned j = 0, e = LandingPads[i].BeginLabels.size(); j != e; ++j) {
+ MCSymbol *BeginLabel = LandingPad.BeginLabels[j];
+ MCSymbol *EndLabel = LandingPad.EndLabels[j];
+ if ((BeginLabel->isDefined() ||
+ (LPMap && (*LPMap)[BeginLabel] != 0)) &&
+ (EndLabel->isDefined() ||
+ (LPMap && (*LPMap)[EndLabel] != 0))) continue;
+
+ LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
+ LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
+ --j, --e;
}
// Remove landing pads with no try-ranges.
@@ -220,14 +489,13 @@ void MachineModuleInfo::TidyLandingPads() {
if (!LandingPad.LandingPadBlock ||
(LandingPad.TypeIds.size() == 1 && !LandingPad.TypeIds[0]))
LandingPad.TypeIds.clear();
-
++i;
}
}
/// getTypeIDFor - Return the type id for the specified typeinfo. This is
/// function wide.
-unsigned MachineModuleInfo::getTypeIDFor(GlobalVariable *TI) {
+unsigned MachineModuleInfo::getTypeIDFor(const GlobalVariable *TI) {
for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
if (TypeInfos[i] == TI) return i + 1;
@@ -267,7 +535,7 @@ try_next:;
}
/// getPersonality - Return the personality function for the current function.
-Function *MachineModuleInfo::getPersonality() const {
+const Function *MachineModuleInfo::getPersonality() const {
// FIXME: Until PR1414 will be fixed, we're using 1 personality function per
// function
return !LandingPads.empty() ? LandingPads[0].Personality : NULL;
@@ -295,3 +563,19 @@ unsigned MachineModuleInfo::getPersonalityIndex() const {
return 0;
}
+namespace {
+ /// VariableDebugSorter - Comparison to sort the VariableDbgInfo map
+ /// by source location, to avoid depending on the arbitrary order that
+ /// instruction selection visits variables in.
+ struct VariableDebugSorter {
+ bool operator()(const MachineModuleInfo::VariableDbgInfoMapTy::value_type &A,
+ const MachineModuleInfo::VariableDbgInfoMapTy::value_type &B)
+ const {
+ if (A.second.second.getLine() != B.second.second.getLine())
+ return A.second.second.getLine() < B.second.second.getLine();
+ if (A.second.second.getCol() != B.second.second.getCol())
+ return A.second.second.getCol() < B.second.second.getCol();
+ return false;
+ }
+ };
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineModuleInfoImpls.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineModuleInfoImpls.cpp
index 39d2c75..5ab56c0 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineModuleInfoImpls.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineModuleInfoImpls.cpp
@@ -25,10 +25,9 @@ void MachineModuleInfoMachO::Anchor() {}
void MachineModuleInfoELF::Anchor() {}
static int SortSymbolPair(const void *LHS, const void *RHS) {
- const MCSymbol *LHSS =
- ((const std::pair<MCSymbol*, MCSymbol*>*)LHS)->first;
- const MCSymbol *RHSS =
- ((const std::pair<MCSymbol*, MCSymbol*>*)RHS)->first;
+ typedef std::pair<MCSymbol*, MachineModuleInfoImpl::StubValueTy> PairTy;
+ const MCSymbol *LHSS = ((const PairTy *)LHS)->first;
+ const MCSymbol *RHSS = ((const PairTy *)RHS)->first;
return LHSS->getName().compare(RHSS->getName());
}
@@ -36,7 +35,7 @@ static int SortSymbolPair(const void *LHS, const void *RHS) {
/// sorted orer.
MachineModuleInfoImpl::SymbolListTy
MachineModuleInfoImpl::GetSortedStubs(const DenseMap<MCSymbol*,
- MCSymbol*> &Map) {
+ MachineModuleInfoImpl::StubValueTy>&Map) {
MachineModuleInfoImpl::SymbolListTy List(Map.begin(), Map.end());
if (!List.empty())
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index d9ab677..5d852f2 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -12,12 +12,15 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) {
VRegInfo.reserve(256);
RegAllocHints.reserve(256);
- RegClass2VRegMap.resize(TRI.getNumRegClasses()+1); // RC ID starts at 1.
+ RegClass2VRegMap = new std::vector<unsigned>[TRI.getNumRegClasses()];
UsedPhysRegs.resize(TRI.getNumRegs());
// Create the physreg use/def lists.
@@ -34,6 +37,7 @@ MachineRegisterInfo::~MachineRegisterInfo() {
"PhysRegUseDefLists has entries after all instructions are deleted");
#endif
delete [] PhysRegUseDefLists;
+ delete [] RegClass2VRegMap;
}
/// setRegClass - Set the register class of the specified virtual register.
@@ -49,7 +53,7 @@ MachineRegisterInfo::setRegClass(unsigned Reg, const TargetRegisterClass *RC) {
// Remove from old register class's vregs list. This may be slow but
// fortunately this operation is rarely needed.
std::vector<unsigned> &VRegs = RegClass2VRegMap[OldRC->getID()];
- std::vector<unsigned>::iterator I=std::find(VRegs.begin(), VRegs.end(), VR);
+ std::vector<unsigned>::iterator I = std::find(VRegs.begin(), VRegs.end(), VR);
VRegs.erase(I);
// Add to new register class's vregs list.
@@ -130,6 +134,88 @@ bool MachineRegisterInfo::hasOneNonDBGUse(unsigned RegNo) const {
return ++UI == use_nodbg_end();
}
+/// clearKillFlags - Iterate over all the uses of the given register and
+/// clear the kill flag from the MachineOperand. This function is used by
+/// optimization passes which extend register lifetimes and need only
+/// preserve conservative kill flag information.
+void MachineRegisterInfo::clearKillFlags(unsigned Reg) const {
+ for (use_iterator UI = use_begin(Reg), UE = use_end(); UI != UE; ++UI)
+ UI.getOperand().setIsKill(false);
+}
+
+bool MachineRegisterInfo::isLiveIn(unsigned Reg) const {
+ for (livein_iterator I = livein_begin(), E = livein_end(); I != E; ++I)
+ if (I->first == Reg || I->second == Reg)
+ return true;
+ return false;
+}
+
+bool MachineRegisterInfo::isLiveOut(unsigned Reg) const {
+ for (liveout_iterator I = liveout_begin(), E = liveout_end(); I != E; ++I)
+ if (*I == Reg)
+ return true;
+ return false;
+}
+
+/// getLiveInPhysReg - If VReg is a live-in virtual register, return the
+/// corresponding live-in physical register.
+unsigned MachineRegisterInfo::getLiveInPhysReg(unsigned VReg) const {
+ for (livein_iterator I = livein_begin(), E = livein_end(); I != E; ++I)
+ if (I->second == VReg)
+ return I->first;
+ return 0;
+}
+
+/// getLiveInVirtReg - If PReg is a live-in physical register, return the
+/// corresponding live-in physical register.
+unsigned MachineRegisterInfo::getLiveInVirtReg(unsigned PReg) const {
+ for (livein_iterator I = livein_begin(), E = livein_end(); I != E; ++I)
+ if (I->first == PReg)
+ return I->second;
+ return 0;
+}
+
+/// EmitLiveInCopies - Emit copies to initialize livein virtual registers
+/// into the given entry block.
+void
+MachineRegisterInfo::EmitLiveInCopies(MachineBasicBlock *EntryMBB,
+ const TargetRegisterInfo &TRI,
+ const TargetInstrInfo &TII) {
+ // Emit the copies into the top of the block.
+ for (unsigned i = 0, e = LiveIns.size(); i != e; ++i)
+ if (LiveIns[i].second) {
+ if (use_empty(LiveIns[i].second)) {
+ // The livein has no uses. Drop it.
+ //
+ // It would be preferable to have isel avoid creating live-in
+ // records for unused arguments in the first place, but it's
+ // complicated by the debug info code for arguments.
+ LiveIns.erase(LiveIns.begin() + i);
+ --i; --e;
+ } else {
+ // Emit a copy.
+ BuildMI(*EntryMBB, EntryMBB->begin(), DebugLoc(),
+ TII.get(TargetOpcode::COPY), LiveIns[i].second)
+ .addReg(LiveIns[i].first);
+
+ // Add the register to the entry block live-in set.
+ EntryMBB->addLiveIn(LiveIns[i].first);
+ }
+ } else {
+ // Add the register to the entry block live-in set.
+ EntryMBB->addLiveIn(LiveIns[i].first);
+ }
+}
+
+void MachineRegisterInfo::closePhysRegsUsed(const TargetRegisterInfo &TRI) {
+ for (int i = UsedPhysRegs.find_first(); i >= 0;
+ i = UsedPhysRegs.find_next(i))
+ for (const unsigned *SS = TRI.getSubRegisters(i);
+ unsigned SubReg = *SS; ++SS)
+ if (SubReg > unsigned(i))
+ UsedPhysRegs.set(SubReg);
+}
+
#ifndef NDEBUG
void MachineRegisterInfo::dumpUses(unsigned Reg) const {
for (use_iterator I = use_begin(Reg), E = use_end(); I != E; ++I)
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineSSAUpdater.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineSSAUpdater.cpp
index 2255dc3..84d6df2 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineSSAUpdater.cpp
@@ -21,34 +21,28 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/SSAUpdaterImpl.h"
using namespace llvm;
typedef DenseMap<MachineBasicBlock*, unsigned> AvailableValsTy;
-typedef std::vector<std::pair<MachineBasicBlock*, unsigned> >
- IncomingPredInfoTy;
-
static AvailableValsTy &getAvailableVals(void *AV) {
return *static_cast<AvailableValsTy*>(AV);
}
-static IncomingPredInfoTy &getIncomingPredInfo(void *IPI) {
- return *static_cast<IncomingPredInfoTy*>(IPI);
-}
-
-
MachineSSAUpdater::MachineSSAUpdater(MachineFunction &MF,
SmallVectorImpl<MachineInstr*> *NewPHI)
- : AV(0), IPI(0), InsertedPHIs(NewPHI) {
+ : AV(0), InsertedPHIs(NewPHI) {
TII = MF.getTarget().getInstrInfo();
MRI = &MF.getRegInfo();
}
MachineSSAUpdater::~MachineSSAUpdater() {
delete &getAvailableVals(AV);
- delete &getIncomingPredInfo(IPI);
}
/// Initialize - Reset this object to get ready for a new set of SSA
@@ -59,11 +53,6 @@ void MachineSSAUpdater::Initialize(unsigned V) {
else
getAvailableVals(AV).clear();
- if (IPI == 0)
- IPI = new IncomingPredInfoTy();
- else
- getIncomingPredInfo(IPI).clear();
-
VR = V;
VRC = MRI->getRegClass(VR);
}
@@ -123,11 +112,12 @@ static
MachineInstr *InsertNewDef(unsigned Opcode,
MachineBasicBlock *BB, MachineBasicBlock::iterator I,
const TargetRegisterClass *RC,
- MachineRegisterInfo *MRI, const TargetInstrInfo *TII) {
+ MachineRegisterInfo *MRI,
+ const TargetInstrInfo *TII) {
unsigned NewVR = MRI->createVirtualRegister(RC);
- return BuildMI(*BB, I, DebugLoc::getUnknownLoc(), TII->get(Opcode), NewVR);
+ return BuildMI(*BB, I, DebugLoc(), TII->get(Opcode), NewVR);
}
-
+
/// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
/// is live in the middle of the specified block.
///
@@ -150,7 +140,7 @@ MachineInstr *InsertNewDef(unsigned Opcode,
unsigned MachineSSAUpdater::GetValueInMiddleOfBlock(MachineBasicBlock *BB) {
// If there is no definition of the renamed variable in this block, just use
// GetValueAtEndOfBlock to do our work.
- if (!getAvailableVals(AV).count(BB))
+ if (!HasValueForBlock(BB))
return GetValueAtEndOfBlockInternal(BB);
// If there are no predecessors, just return undef.
@@ -252,143 +242,131 @@ void MachineSSAUpdater::ReplaceRegWith(unsigned OldReg, unsigned NewReg) {
I->second = NewReg;
}
-/// GetValueAtEndOfBlockInternal - Check to see if AvailableVals has an entry
-/// for the specified BB and if so, return it. If not, construct SSA form by
-/// walking predecessors inserting PHI nodes as needed until we get to a block
-/// where the value is available.
-///
-unsigned MachineSSAUpdater::GetValueAtEndOfBlockInternal(MachineBasicBlock *BB){
- AvailableValsTy &AvailableVals = getAvailableVals(AV);
+/// MachinePHIiter - Iterator for PHI operands. This is used for the
+/// PHI_iterator in the SSAUpdaterImpl template.
+namespace {
+ class MachinePHIiter {
+ private:
+ MachineInstr *PHI;
+ unsigned idx;
+
+ public:
+ explicit MachinePHIiter(MachineInstr *P) // begin iterator
+ : PHI(P), idx(1) {}
+ MachinePHIiter(MachineInstr *P, bool) // end iterator
+ : PHI(P), idx(PHI->getNumOperands()) {}
+
+ MachinePHIiter &operator++() { idx += 2; return *this; }
+ bool operator==(const MachinePHIiter& x) const { return idx == x.idx; }
+ bool operator!=(const MachinePHIiter& x) const { return !operator==(x); }
+ unsigned getIncomingValue() { return PHI->getOperand(idx).getReg(); }
+ MachineBasicBlock *getIncomingBlock() {
+ return PHI->getOperand(idx+1).getMBB();
+ }
+ };
+}
- // Query AvailableVals by doing an insertion of null.
- std::pair<AvailableValsTy::iterator, bool> InsertRes =
- AvailableVals.insert(std::make_pair(BB, 0));
-
- // Handle the case when the insertion fails because we have already seen BB.
- if (!InsertRes.second) {
- // If the insertion failed, there are two cases. The first case is that the
- // value is already available for the specified block. If we get this, just
- // return the value.
- if (InsertRes.first->second != 0)
- return InsertRes.first->second;
-
- // Otherwise, if the value we find is null, then this is the value is not
- // known but it is being computed elsewhere in our recursion. This means
- // that we have a cycle. Handle this by inserting a PHI node and returning
- // it. When we get back to the first instance of the recursion we will fill
- // in the PHI node.
- MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->front();
- MachineInstr *NewPHI = InsertNewDef(TargetOpcode::PHI, BB, Loc,
- VRC, MRI,TII);
- unsigned NewVR = NewPHI->getOperand(0).getReg();
- InsertRes.first->second = NewVR;
- return NewVR;
+/// SSAUpdaterTraits<MachineSSAUpdater> - Traits for the SSAUpdaterImpl
+/// template, specialized for MachineSSAUpdater.
+namespace llvm {
+template<>
+class SSAUpdaterTraits<MachineSSAUpdater> {
+public:
+ typedef MachineBasicBlock BlkT;
+ typedef unsigned ValT;
+ typedef MachineInstr PhiT;
+
+ typedef MachineBasicBlock::succ_iterator BlkSucc_iterator;
+ static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return BB->succ_begin(); }
+ static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return BB->succ_end(); }
+
+ typedef MachinePHIiter PHI_iterator;
+ static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); }
+ static inline PHI_iterator PHI_end(PhiT *PHI) {
+ return PHI_iterator(PHI, true);
}
- // If there are no predecessors, then we must have found an unreachable block
- // just return 'undef'. Since there are no predecessors, InsertRes must not
- // be invalidated.
- if (BB->pred_empty()) {
+ /// FindPredecessorBlocks - Put the predecessors of BB into the Preds
+ /// vector.
+ static void FindPredecessorBlocks(MachineBasicBlock *BB,
+ SmallVectorImpl<MachineBasicBlock*> *Preds){
+ for (MachineBasicBlock::pred_iterator PI = BB->pred_begin(),
+ E = BB->pred_end(); PI != E; ++PI)
+ Preds->push_back(*PI);
+ }
+
+ /// GetUndefVal - Create an IMPLICIT_DEF instruction with a new register.
+ /// Add it into the specified block and return the register.
+ static unsigned GetUndefVal(MachineBasicBlock *BB,
+ MachineSSAUpdater *Updater) {
// Insert an implicit_def to represent an undef value.
MachineInstr *NewDef = InsertNewDef(TargetOpcode::IMPLICIT_DEF,
BB, BB->getFirstTerminator(),
- VRC, MRI, TII);
- return InsertRes.first->second = NewDef->getOperand(0).getReg();
+ Updater->VRC, Updater->MRI,
+ Updater->TII);
+ return NewDef->getOperand(0).getReg();
}
- // Okay, the value isn't in the map and we just inserted a null in the entry
- // to indicate that we're processing the block. Since we have no idea what
- // value is in this block, we have to recurse through our predecessors.
- //
- // While we're walking our predecessors, we keep track of them in a vector,
- // then insert a PHI node in the end if we actually need one. We could use a
- // smallvector here, but that would take a lot of stack space for every level
- // of the recursion, just use IncomingPredInfo as an explicit stack.
- IncomingPredInfoTy &IncomingPredInfo = getIncomingPredInfo(IPI);
- unsigned FirstPredInfoEntry = IncomingPredInfo.size();
-
- // As we're walking the predecessors, keep track of whether they are all
- // producing the same value. If so, this value will capture it, if not, it
- // will get reset to null. We distinguish the no-predecessor case explicitly
- // below.
- unsigned SingularValue = 0;
- bool isFirstPred = true;
- for (MachineBasicBlock::pred_iterator PI = BB->pred_begin(),
- E = BB->pred_end(); PI != E; ++PI) {
- MachineBasicBlock *PredBB = *PI;
- unsigned PredVal = GetValueAtEndOfBlockInternal(PredBB);
- IncomingPredInfo.push_back(std::make_pair(PredBB, PredVal));
-
- // Compute SingularValue.
- if (isFirstPred) {
- SingularValue = PredVal;
- isFirstPred = false;
- } else if (PredVal != SingularValue)
- SingularValue = 0;
+ /// CreateEmptyPHI - Create a PHI instruction that defines a new register.
+ /// Add it into the specified block and return the register.
+ static unsigned CreateEmptyPHI(MachineBasicBlock *BB, unsigned NumPreds,
+ MachineSSAUpdater *Updater) {
+ MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->front();
+ MachineInstr *PHI = InsertNewDef(TargetOpcode::PHI, BB, Loc,
+ Updater->VRC, Updater->MRI,
+ Updater->TII);
+ return PHI->getOperand(0).getReg();
}
- /// Look up BB's entry in AvailableVals. 'InsertRes' may be invalidated. If
- /// this block is involved in a loop, a no-entry PHI node will have been
- /// inserted as InsertedVal. Otherwise, we'll still have the null we inserted
- /// above.
- unsigned &InsertedVal = AvailableVals[BB];
-
- // If all the predecessor values are the same then we don't need to insert a
- // PHI. This is the simple and common case.
- if (SingularValue) {
- // If a PHI node got inserted, replace it with the singlar value and delete
- // it.
- if (InsertedVal) {
- MachineInstr *OldVal = MRI->getVRegDef(InsertedVal);
- // Be careful about dead loops. These RAUW's also update InsertedVal.
- assert(InsertedVal != SingularValue && "Dead loop?");
- ReplaceRegWith(InsertedVal, SingularValue);
- OldVal->eraseFromParent();
- }
-
- InsertedVal = SingularValue;
-
- // Drop the entries we added in IncomingPredInfo to restore the stack.
- IncomingPredInfo.erase(IncomingPredInfo.begin()+FirstPredInfoEntry,
- IncomingPredInfo.end());
- return InsertedVal;
+ /// AddPHIOperand - Add the specified value as an operand of the PHI for
+ /// the specified predecessor block.
+ static void AddPHIOperand(MachineInstr *PHI, unsigned Val,
+ MachineBasicBlock *Pred) {
+ PHI->addOperand(MachineOperand::CreateReg(Val, false));
+ PHI->addOperand(MachineOperand::CreateMBB(Pred));
}
+ /// InstrIsPHI - Check if an instruction is a PHI.
+ ///
+ static MachineInstr *InstrIsPHI(MachineInstr *I) {
+ if (I && I->isPHI())
+ return I;
+ return 0;
+ }
- // Otherwise, we do need a PHI: insert one now if we don't already have one.
- MachineInstr *InsertedPHI;
- if (InsertedVal == 0) {
- MachineBasicBlock::iterator Loc = BB->empty() ? BB->end() : BB->front();
- InsertedPHI = InsertNewDef(TargetOpcode::PHI, BB, Loc,
- VRC, MRI, TII);
- InsertedVal = InsertedPHI->getOperand(0).getReg();
- } else {
- InsertedPHI = MRI->getVRegDef(InsertedVal);
+ /// ValueIsPHI - Check if the instruction that defines the specified register
+ /// is a PHI instruction.
+ static MachineInstr *ValueIsPHI(unsigned Val, MachineSSAUpdater *Updater) {
+ return InstrIsPHI(Updater->MRI->getVRegDef(Val));
}
- // Fill in all the predecessors of the PHI.
- MachineInstrBuilder MIB(InsertedPHI);
- for (IncomingPredInfoTy::iterator I =
- IncomingPredInfo.begin()+FirstPredInfoEntry,
- E = IncomingPredInfo.end(); I != E; ++I)
- MIB.addReg(I->second).addMBB(I->first);
+ /// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source
+ /// operands, i.e., it was just added.
+ static MachineInstr *ValueIsNewPHI(unsigned Val, MachineSSAUpdater *Updater) {
+ MachineInstr *PHI = ValueIsPHI(Val, Updater);
+ if (PHI && PHI->getNumOperands() <= 1)
+ return PHI;
+ return 0;
+ }
- // Drop the entries we added in IncomingPredInfo to restore the stack.
- IncomingPredInfo.erase(IncomingPredInfo.begin()+FirstPredInfoEntry,
- IncomingPredInfo.end());
+ /// GetPHIValue - For the specified PHI instruction, return the register
+ /// that it defines.
+ static unsigned GetPHIValue(MachineInstr *PHI) {
+ return PHI->getOperand(0).getReg();
+ }
+};
- // See if the PHI node can be merged to a single value. This can happen in
- // loop cases when we get a PHI of itself and one other value.
- if (unsigned ConstVal = InsertedPHI->isConstantValuePHI()) {
- MRI->replaceRegWith(InsertedVal, ConstVal);
- InsertedPHI->eraseFromParent();
- InsertedVal = ConstVal;
- } else {
- DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n");
+} // End llvm namespace
- // If the client wants to know about all new instructions, tell it.
- if (InsertedPHIs) InsertedPHIs->push_back(InsertedPHI);
- }
+/// GetValueAtEndOfBlockInternal - Check to see if AvailableVals has an entry
+/// for the specified BB and if so, return it. If not, construct SSA form by
+/// first calculating the required placement of PHIs and then inserting new
+/// PHIs where needed.
+unsigned MachineSSAUpdater::GetValueAtEndOfBlockInternal(MachineBasicBlock *BB){
+ AvailableValsTy &AvailableVals = getAvailableVals(AV);
+ if (unsigned V = AvailableVals[BB])
+ return V;
- return InsertedVal;
+ SSAUpdaterImpl<MachineSSAUpdater> Impl(this, &AvailableVals, InsertedPHIs);
+ return Impl.GetValue(BB);
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineSink.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineSink.cpp
index e47ba7c..c8f8faf 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineSink.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineSink.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass moves instructions into successor blocks, when possible, so that
+// This pass moves instructions into successor blocks when possible, so that
// they aren't executed on paths where their results aren't needed.
//
// This pass is not intended to be a replacement or a complete alternative
@@ -20,16 +20,27 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-STATISTIC(NumSunk, "Number of machine instructions sunk");
+static cl::opt<bool>
+SplitEdges("machine-sink-split",
+ cl::desc("Split critical edges during machine sinking"),
+ cl::init(false), cl::Hidden);
+static cl::opt<unsigned>
+SplitLimit("split-limit",
+ cl::init(~0u), cl::Hidden);
+
+STATISTIC(NumSunk, "Number of machine instructions sunk");
+STATISTIC(NumSplit, "Number of critical edges split");
namespace {
class MachineSinking : public MachineFunctionPass {
@@ -37,88 +48,105 @@ namespace {
const TargetRegisterInfo *TRI;
MachineRegisterInfo *RegInfo; // Machine register information
MachineDominatorTree *DT; // Machine dominator tree
+ MachineLoopInfo *LI;
AliasAnalysis *AA;
BitVector AllocatableSet; // Which physregs are allocatable?
public:
static char ID; // Pass identification
- MachineSinking() : MachineFunctionPass(&ID) {}
-
+ MachineSinking() : MachineFunctionPass(ID) {}
+
virtual bool runOnMachineFunction(MachineFunction &MF);
-
+
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineDominatorTree>();
+ AU.addPreserved<MachineLoopInfo>();
}
private:
bool ProcessBlock(MachineBasicBlock &MBB);
+ MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *From,
+ MachineBasicBlock *To);
bool SinkInstruction(MachineInstr *MI, bool &SawStore);
- bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB) const;
+ bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
+ MachineBasicBlock *DefMBB, bool &LocalUse) const;
};
} // end anonymous namespace
-
+
char MachineSinking::ID = 0;
-static RegisterPass<MachineSinking>
-X("machine-sink", "Machine code sinking");
+INITIALIZE_PASS(MachineSinking, "machine-sink",
+ "Machine code sinking", false, false);
FunctionPass *llvm::createMachineSinkingPass() { return new MachineSinking(); }
/// AllUsesDominatedByBlock - Return true if all uses of the specified register
-/// occur in blocks dominated by the specified block.
-bool MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
- MachineBasicBlock *MBB) const {
+/// occur in blocks dominated by the specified block. If any use is in the
+/// definition block, then return false since it is never legal to move def
+/// after uses.
+bool MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
+ MachineBasicBlock *MBB,
+ MachineBasicBlock *DefMBB,
+ bool &LocalUse) const {
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Only makes sense for vregs");
// Ignoring debug uses is necessary so debug info doesn't affect the code.
// This may leave a referencing dbg_value in the original block, before
// the definition of the vreg. Dwarf generator handles this although the
// user might not get the right info at runtime.
- for (MachineRegisterInfo::use_nodbg_iterator I =
- RegInfo->use_nodbg_begin(Reg),
- E = RegInfo->use_nodbg_end(); I != E; ++I) {
+ for (MachineRegisterInfo::use_nodbg_iterator
+ I = RegInfo->use_nodbg_begin(Reg), E = RegInfo->use_nodbg_end();
+ I != E; ++I) {
// Determine the block of the use.
MachineInstr *UseInst = &*I;
MachineBasicBlock *UseBlock = UseInst->getParent();
+
if (UseInst->isPHI()) {
// PHI nodes use the operand in the predecessor block, not the block with
// the PHI.
UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB();
+ } else if (UseBlock == DefMBB) {
+ LocalUse = true;
+ return false;
}
+
// Check that it dominates.
if (!DT->dominates(MBB, UseBlock))
return false;
}
+
return true;
}
bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "******** Machine Sinking ********\n");
-
+
const TargetMachine &TM = MF.getTarget();
TII = TM.getInstrInfo();
TRI = TM.getRegisterInfo();
RegInfo = &MF.getRegInfo();
DT = &getAnalysis<MachineDominatorTree>();
+ LI = &getAnalysis<MachineLoopInfo>();
AA = &getAnalysis<AliasAnalysis>();
AllocatableSet = TRI->getAllocatableSet(MF);
bool EverMadeChange = false;
-
+
while (1) {
bool MadeChange = false;
// Process all basic blocks.
- for (MachineFunction::iterator I = MF.begin(), E = MF.end();
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end();
I != E; ++I)
MadeChange |= ProcessBlock(*I);
-
+
// If this iteration over the code changed anything, keep iterating.
if (!MadeChange) break;
EverMadeChange = true;
- }
+ }
return EverMadeChange;
}
@@ -126,6 +154,11 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
// Can't sink anything out of a block that has less than two successors.
if (MBB.succ_size() <= 1 || MBB.empty()) return false;
+ // Don't bother sinking code out of unreachable blocks. In addition to being
+ // unprofitable, it can also lead to infinite looping, because in an
+ // unreachable loop there may be nowhere to stop.
+ if (!DT->isReachableFromEntry(&MBB)) return false;
+
bool MadeChange = false;
// Walk the basic block bottom-up. Remember if we saw a store.
@@ -134,7 +167,7 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
bool ProcessedBegin, SawStore = false;
do {
MachineInstr *MI = I; // The instruction to sink.
-
+
// Predecrement I (if it's not begin) so that it isn't invalidated by
// sinking.
ProcessedBegin = I == MBB.begin();
@@ -146,20 +179,80 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
if (SinkInstruction(MI, SawStore))
++NumSunk, MadeChange = true;
-
+
// If we just processed the first instruction in the block, we're done.
} while (!ProcessedBegin);
-
+
return MadeChange;
}
+MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineBasicBlock *FromBB,
+ MachineBasicBlock *ToBB) {
+ // Avoid breaking back edge. From == To means backedge for single BB loop.
+ if (!SplitEdges || NumSplit == SplitLimit || FromBB == ToBB)
+ return 0;
+
+ // Check for more "complex" loops.
+ if (LI->getLoopFor(FromBB) != LI->getLoopFor(ToBB) ||
+ !LI->isLoopHeader(ToBB)) {
+ // It's not always legal to break critical edges and sink the computation
+ // to the edge.
+ //
+ // BB#1:
+ // v1024
+ // Beq BB#3
+ // <fallthrough>
+ // BB#2:
+ // ... no uses of v1024
+ // <fallthrough>
+ // BB#3:
+ // ...
+ // = v1024
+ //
+ // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
+ //
+ // BB#1:
+ // ...
+ // Bne BB#2
+ // BB#4:
+ // v1024 =
+ // B BB#3
+ // BB#2:
+ // ... no uses of v1024
+ // <fallthrough>
+ // BB#3:
+ // ...
+ // = v1024
+ //
+ // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
+ // flow. We need to ensure the new basic block where the computation is
+ // sunk to dominates all the uses.
+ // It's only legal to break critical edge and sink the computation to the
+ // new block if all the predecessors of "To", except for "From", are
+ // not dominated by "From". Given SSA property, this means these
+ // predecessors are dominated by "To".
+ for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
+ E = ToBB->pred_end(); PI != E; ++PI) {
+ if (*PI == FromBB)
+ continue;
+ if (!DT->dominates(ToBB, *PI))
+ return 0;
+ }
+
+ // FIXME: Determine if it's cost effective to break this edge.
+ return FromBB->SplitCriticalEdge(ToBB, this);
+ }
+
+ return 0;
+}
+
/// SinkInstruction - Determine whether it is safe to sink the specified machine
/// instruction out of its current block into a successor.
bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// Check if it's safe to move the instruction.
if (!MI->isSafeToMove(TII, AA, SawStore))
return false;
-
+
// FIXME: This should include support for sinking instructions within the
// block they are currently in to shorten the live ranges. We often get
// instructions sunk into the top of a large block, but it would be better to
@@ -167,22 +260,22 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// be careful not to *increase* register pressure though, e.g. sinking
// "x = y + z" down if it kills y and z would increase the live ranges of y
// and z and only shrink the live range of x.
-
+
// Loop over all the operands of the specified instruction. If there is
// anything we can't handle, bail out.
MachineBasicBlock *ParentBlock = MI->getParent();
-
+
// SuccToSinkTo - This is the successor to sink this instruction to, once we
// decide.
MachineBasicBlock *SuccToSinkTo = 0;
-
+
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue; // Ignore non-register operands.
-
+
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
-
+
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (MO.isUse()) {
// If the physreg has no defs anywhere, it's just an ambient register
@@ -190,13 +283,16 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// it could get allocated to something with a def during allocation.
if (!RegInfo->def_empty(Reg))
return false;
+
if (AllocatableSet.test(Reg))
return false;
+
// Check for a def among the register's aliases too.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
if (!RegInfo->def_empty(AliasReg))
return false;
+
if (AllocatableSet.test(AliasReg))
return false;
}
@@ -211,44 +307,52 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// If it's not safe to move defs of the register class, then abort.
if (!TII->isSafeToMoveRegClassDefs(RegInfo->getRegClass(Reg)))
return false;
-
+
// FIXME: This picks a successor to sink into based on having one
// successor that dominates all the uses. However, there are cases where
// sinking can happen but where the sink point isn't a successor. For
// example:
+ //
// x = computation
// if () {} else {}
// use x
- // the instruction could be sunk over the whole diamond for the
+ //
+ // the instruction could be sunk over the whole diamond for the
// if/then/else (or loop, etc), allowing it to be sunk into other blocks
// after that.
-
+
// Virtual register defs can only be sunk if all their uses are in blocks
// dominated by one of the successors.
if (SuccToSinkTo) {
// If a previous operand picked a block to sink to, then this operand
// must be sinkable to the same block.
- if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo))
+ bool LocalUse = false;
+ if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, ParentBlock, LocalUse))
return false;
+
continue;
}
-
+
// Otherwise, we should look at all the successors and decide which one
// we should sink to.
for (MachineBasicBlock::succ_iterator SI = ParentBlock->succ_begin(),
E = ParentBlock->succ_end(); SI != E; ++SI) {
- if (AllUsesDominatedByBlock(Reg, *SI)) {
+ bool LocalUse = false;
+ if (AllUsesDominatedByBlock(Reg, *SI, ParentBlock, LocalUse)) {
SuccToSinkTo = *SI;
break;
}
+ if (LocalUse)
+ // Def is used locally, it's never safe to move this def.
+ return false;
}
-
+
// If we couldn't find a block to sink to, ignore this instruction.
if (SuccToSinkTo == 0)
return false;
}
}
-
+
// If there are no outputs, it must have side-effects.
if (SuccToSinkTo == 0)
return false;
@@ -257,31 +361,85 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// landing pad is implicitly defined.
if (SuccToSinkTo->isLandingPad())
return false;
-
+
// It is not possible to sink an instruction into its own block. This can
// happen with loops.
if (MI->getParent() == SuccToSinkTo)
return false;
-
- DEBUG(dbgs() << "Sink instr " << *MI);
- DEBUG(dbgs() << "to block " << *SuccToSinkTo);
-
+
+ // If the instruction to move defines a dead physical register which is live
+ // when leaving the basic block, don't move it because it could turn into a
+ // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (SuccToSinkTo->isLiveIn(Reg))
+ return false;
+ }
+
+ DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo);
+
// If the block has multiple predecessors, this would introduce computation on
// a path that it doesn't already exist. We could split the critical edge,
// but for now we just punt.
// FIXME: Split critical edges if not backedges.
if (SuccToSinkTo->pred_size() > 1) {
- DEBUG(dbgs() << " *** PUNTING: Critical edge found\n");
- return false;
+ // We cannot sink a load across a critical edge - there may be stores in
+ // other code paths.
+ bool TryBreak = false;
+ bool store = true;
+ if (!MI->isSafeToMove(TII, AA, store)) {
+ DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
+ TryBreak = true;
+ }
+
+ // We don't want to sink across a critical edge if we don't dominate the
+ // successor. We could be introducing calculations to new code paths.
+ if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
+ DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
+ TryBreak = true;
+ }
+
+ // Don't sink instructions into a loop.
+ if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
+ DEBUG(dbgs() << " *** NOTE: Loop header found\n");
+ TryBreak = true;
+ }
+
+ // Otherwise we are OK with sinking along a critical edge.
+ if (!TryBreak)
+ DEBUG(dbgs() << "Sinking along critical edge.\n");
+ else {
+ MachineBasicBlock *NewSucc = SplitCriticalEdge(ParentBlock, SuccToSinkTo);
+ if (!NewSucc) {
+ DEBUG(dbgs() <<
+ " *** PUNTING: Not legal or profitable to break critical edge\n");
+ return false;
+ } else {
+ DEBUG(dbgs() << " *** Splitting critical edge:"
+ " BB#" << ParentBlock->getNumber()
+ << " -- BB#" << NewSucc->getNumber()
+ << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
+ SuccToSinkTo = NewSucc;
+ ++NumSplit;
+ }
+ }
}
-
- // Determine where to insert into. Skip phi nodes.
+
+ // Determine where to insert into. Skip phi nodes.
MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
++InsertPos;
-
+
// Move the instruction.
SuccToSinkTo->splice(InsertPos, ParentBlock, MI,
++MachineBasicBlock::iterator(MI));
+
+ // Conservatively, clear any kill flags, since it's possible that they are no
+ // longer correct.
+ MI->clearKillInfo();
+
return true;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/MachineVerifier.cpp b/libclamav/c++/llvm/lib/CodeGen/MachineVerifier.cpp
index 434a1e8..1e88562 100644
--- a/libclamav/c++/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1,4 +1,4 @@
-//===-- MachineVerifier.cpp - Machine Code Verifier -------------*- C++ -*-===//
+//===-- MachineVerifier.cpp - Machine Code Verifier -----------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -24,6 +24,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Function.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -44,19 +45,14 @@ using namespace llvm;
namespace {
struct MachineVerifier {
- MachineVerifier(Pass *pass, bool allowDoubleDefs) :
+ MachineVerifier(Pass *pass) :
PASS(pass),
- allowVirtDoubleDefs(allowDoubleDefs),
- allowPhysDoubleDefs(allowDoubleDefs),
OutFileName(getenv("LLVM_VERIFY_MACHINEINSTRS"))
{}
bool runOnMachineFunction(MachineFunction &MF);
Pass *const PASS;
- const bool allowVirtDoubleDefs;
- const bool allowPhysDoubleDefs;
-
const char *const OutFileName;
raw_ostream *OS;
const MachineFunction *MF;
@@ -91,10 +87,6 @@ namespace {
// defined. Map value is the user.
RegMap vregsLiveIn;
- // Vregs that must be dead in because they are defined without being
- // killed first. Map value is the defining instruction.
- RegMap vregsDeadIn;
-
// Regs killed in MBB. They may be defined again, and will then be in both
// regsKilled and regsLiveOut.
RegSet regsKilled;
@@ -175,6 +167,7 @@ namespace {
// Analysis information if available
LiveVariables *LiveVars;
+ const LiveIntervals *LiveInts;
void visitMachineFunctionBefore();
void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
@@ -195,15 +188,14 @@ namespace {
void calcRegsRequired();
void verifyLiveVariables();
+ void verifyLiveIntervals();
};
struct MachineVerifierPass : public MachineFunctionPass {
static char ID; // Pass ID, replacement for typeid
- bool AllowDoubleDefs;
- explicit MachineVerifierPass(bool allowDoubleDefs = false)
- : MachineFunctionPass(&ID),
- AllowDoubleDefs(allowDoubleDefs) {}
+ MachineVerifierPass()
+ : MachineFunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -211,7 +203,7 @@ namespace {
}
bool runOnMachineFunction(MachineFunction &MF) {
- MF.verify(this, AllowDoubleDefs);
+ MF.verify(this);
return false;
}
};
@@ -219,17 +211,15 @@ namespace {
}
char MachineVerifierPass::ID = 0;
-static RegisterPass<MachineVerifierPass>
-MachineVer("machineverifier", "Verify generated machine code");
-static const PassInfo *const MachineVerifyID = &MachineVer;
+INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
+ "Verify generated machine code", false, false);
-FunctionPass *llvm::createMachineVerifierPass(bool allowPhysDoubleDefs) {
- return new MachineVerifierPass(allowPhysDoubleDefs);
+FunctionPass *llvm::createMachineVerifierPass() {
+ return new MachineVerifierPass();
}
-void MachineFunction::verify(Pass *p, bool allowDoubleDefs) const {
- MachineVerifier(p, allowDoubleDefs)
- .runOnMachineFunction(const_cast<MachineFunction&>(*this));
+void MachineFunction::verify(Pass *p) const {
+ MachineVerifier(p).runOnMachineFunction(const_cast<MachineFunction&>(*this));
}
bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
@@ -255,10 +245,13 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
TRI = TM->getRegisterInfo();
MRI = &MF.getRegInfo();
+ LiveVars = NULL;
+ LiveInts = NULL;
if (PASS) {
- LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
- } else {
- LiveVars = NULL;
+ LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
+ // We don't want to verify LiveVariables if LiveIntervals is available.
+ if (!LiveInts)
+ LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
}
visitMachineFunctionBefore();
@@ -279,7 +272,7 @@ bool MachineVerifier::runOnMachineFunction(MachineFunction &MF) {
if (OutFile)
delete OutFile;
else if (foundErrors)
- llvm_report_error("Found "+Twine(foundErrors)+" machine code errors.");
+ report_fatal_error("Found "+Twine(foundErrors)+" machine code errors.");
// Clean up.
regsLive.clear();
@@ -351,8 +344,8 @@ void MachineVerifier::visitMachineFunctionBefore() {
}
// Does iterator point to a and b as the first two elements?
-bool matchPair(MachineBasicBlock::const_succ_iterator i,
- const MachineBasicBlock *a, const MachineBasicBlock *b) {
+static bool matchPair(MachineBasicBlock::const_succ_iterator i,
+ const MachineBasicBlock *a, const MachineBasicBlock *b) {
if (*i == a)
return *++i == b;
if (*i == b)
@@ -390,7 +383,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB exits via unconditional fall-through but its successor "
"differs from its CFG successor!", MBB);
}
- if (!MBB->empty() && MBB->back().getDesc().isBarrier()) {
+ if (!MBB->empty() && MBB->back().getDesc().isBarrier() &&
+ !TII->isPredicated(&MBB->back())) {
report("MBB exits via unconditional fall-through but ends with a "
"barrier instruction!", MBB);
}
@@ -470,7 +464,7 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
}
regsLive.clear();
- for (MachineBasicBlock::const_livein_iterator I = MBB->livein_begin(),
+ for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
E = MBB->livein_end(); I != E; ++I) {
if (!TargetRegisterInfo::isPhysicalRegister(*I)) {
report("MBB live-in list contains non-physical register", MBB);
@@ -511,6 +505,20 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
if ((*I)->isStore() && !TI.mayStore())
report("Missing mayStore flag", MI);
}
+
+ // Debug values must not have a slot index.
+ // Other instructions must have one.
+ if (LiveInts) {
+ bool mapped = !LiveInts->isNotInMIMap(MI);
+ if (MI->isDebugValue()) {
+ if (mapped)
+ report("Debug instruction has a slot index", MI);
+ } else {
+ if (!mapped)
+ report("Missing slot index", MI);
+ }
+ }
+
}
void
@@ -552,28 +560,47 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
regsLiveInButUnused.erase(Reg);
bool isKill = false;
- if (MO->isKill()) {
- isKill = true;
- // Tied operands on two-address instuctions MUST NOT have a <kill> flag.
- if (MI->isRegTiedToDefOperand(MONum))
+ unsigned defIdx;
+ if (MI->isRegTiedToDefOperand(MONum, &defIdx)) {
+ // A two-addr use counts as a kill if use and def are the same.
+ unsigned DefReg = MI->getOperand(defIdx).getReg();
+ if (Reg == DefReg) {
+ isKill = true;
+ // ANd in that case an explicit kill flag is not allowed.
+ if (MO->isKill())
report("Illegal kill flag on two-address instruction operand",
MO, MONum);
- } else {
- // TwoAddress instr modifying a reg is treated as kill+def.
- unsigned defIdx;
- if (MI->isRegTiedToDefOperand(MONum, &defIdx) &&
- MI->getOperand(defIdx).getReg() == Reg)
- isKill = true;
- }
- if (isKill) {
+ } else if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ report("Two-address instruction operands must be identical",
+ MO, MONum);
+ }
+ } else
+ isKill = MO->isKill();
+
+ if (isKill)
addRegWithSubRegs(regsKilled, Reg);
- // Check that LiveVars knows this kill
- if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg)) {
- LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
- if (std::find(VI.Kills.begin(),
- VI.Kills.end(), MI) == VI.Kills.end())
- report("Kill missing from LiveVariables", MO, MONum);
+ // Check that LiveVars knows this kill.
+ if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg) &&
+ MO->isKill()) {
+ LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
+ if (std::find(VI.Kills.begin(),
+ VI.Kills.end(), MI) == VI.Kills.end())
+ report("Kill missing from LiveVariables", MO, MONum);
+ }
+
+ // Check LiveInts liveness and kill.
+ if (LiveInts && !LiveInts->isNotInMIMap(MI)) {
+ SlotIndex UseIdx = LiveInts->getInstructionIndex(MI).getUseIndex();
+ if (LiveInts->hasInterval(Reg)) {
+ const LiveInterval &LI = LiveInts->getInterval(Reg);
+ if (!LI.liveAt(UseIdx)) {
+ report("No live range at use", MO, MONum);
+ *OS << UseIdx << " is not live in " << LI << '\n';
+ }
+ // TODO: Verify isKill == LI.killedAt.
+ } else if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ report("Virtual register has no Live interval", MO, MONum);
}
}
@@ -602,6 +629,28 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
addRegWithSubRegs(regsDead, Reg);
else
addRegWithSubRegs(regsDefined, Reg);
+
+ // Check LiveInts for a live range, but only for virtual registers.
+ if (LiveInts && TargetRegisterInfo::isVirtualRegister(Reg) &&
+ !LiveInts->isNotInMIMap(MI)) {
+ SlotIndex DefIdx = LiveInts->getInstructionIndex(MI).getDefIndex();
+ if (LiveInts->hasInterval(Reg)) {
+ const LiveInterval &LI = LiveInts->getInterval(Reg);
+ if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx)) {
+ assert(LR->valno && "NULL valno is not allowed");
+ if (LR->valno->def != DefIdx) {
+ report("Inconsistent valno->def", MO, MONum);
+ *OS << "Valno " << LR->valno->id << " is not defined at "
+ << DefIdx << " in " << LI << '\n';
+ }
+ } else {
+ report("No live range at def", MO, MONum);
+ *OS << DefIdx << " is not live in " << LI << '\n';
+ }
+ } else {
+ report("Virtual register has no Live interval", MO, MONum);
+ }
+ }
}
// Check register classes.
@@ -631,11 +680,14 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
// Virtual register.
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
if (SubIdx) {
- if (RC->subregclasses_begin()+SubIdx >= RC->subregclasses_end()) {
+ const TargetRegisterClass *SRC = RC->getSubRegisterRegClass(SubIdx);
+ if (!SRC) {
report("Invalid subregister index for virtual register", MO, MONum);
+ *OS << "Register class " << RC->getName()
+ << " does not support subreg index " << SubIdx << "\n";
return;
}
- RC = *(RC->subregclasses_begin()+SubIdx);
+ RC = SRC;
}
if (const TargetRegisterClass *DRC = TOI.getRegClass(TRI)) {
if (RC != DRC && !RC->hasSuperClass(DRC)) {
@@ -662,40 +714,9 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
void MachineVerifier::visitMachineInstrAfter(const MachineInstr *MI) {
BBInfo &MInfo = MBBInfoMap[MI->getParent()];
set_union(MInfo.regsKilled, regsKilled);
- set_subtract(regsLive, regsKilled);
- regsKilled.clear();
-
- // Verify that both <def> and <def,dead> operands refer to dead registers.
- RegVector defs(regsDefined);
- defs.append(regsDead.begin(), regsDead.end());
-
- for (RegVector::const_iterator I = defs.begin(), E = defs.end();
- I != E; ++I) {
- if (regsLive.count(*I)) {
- if (TargetRegisterInfo::isPhysicalRegister(*I)) {
- if (!allowPhysDoubleDefs && !isReserved(*I) &&
- !regsLiveInButUnused.count(*I)) {
- report("Redefining a live physical register", MI);
- *OS << "Register " << TRI->getName(*I)
- << " was defined but already live.\n";
- }
- } else {
- if (!allowVirtDoubleDefs) {
- report("Redefining a live virtual register", MI);
- *OS << "Virtual register %reg" << *I
- << " was defined but already live.\n";
- }
- }
- } else if (TargetRegisterInfo::isVirtualRegister(*I) &&
- !MInfo.regsKilled.count(*I)) {
- // Virtual register defined without being killed first must be dead on
- // entry.
- MInfo.vregsDeadIn.insert(std::make_pair(*I, MI));
- }
- }
-
- set_subtract(regsLive, regsDead); regsDead.clear();
- set_union(regsLive, regsDefined); regsDefined.clear();
+ set_subtract(regsLive, regsKilled); regsKilled.clear();
+ set_subtract(regsLive, regsDead); regsDead.clear();
+ set_union(regsLive, regsDefined); regsDefined.clear();
}
void
@@ -820,35 +841,15 @@ void MachineVerifier::visitMachineFunctionAfter() {
continue;
checkPHIOps(MFI);
-
- // Verify dead-in virtual registers.
- if (!allowVirtDoubleDefs) {
- for (MachineBasicBlock::const_pred_iterator PrI = MFI->pred_begin(),
- PrE = MFI->pred_end(); PrI != PrE; ++PrI) {
- BBInfo &PrInfo = MBBInfoMap[*PrI];
- if (!PrInfo.reachable)
- continue;
-
- for (RegMap::iterator I = MInfo.vregsDeadIn.begin(),
- E = MInfo.vregsDeadIn.end(); I != E; ++I) {
- // DeadIn register must be in neither regsLiveOut or vregsPassed of
- // any predecessor.
- if (PrInfo.isLiveOut(I->first)) {
- report("Live-in virtual register redefined", I->second);
- *OS << "Register %reg" << I->first
- << " was live-out from predecessor MBB #"
- << (*PrI)->getNumber() << ".\n";
- }
- }
- }
- }
}
- // Now check LiveVariables info if available
- if (LiveVars) {
+ // Now check liveness info if available
+ if (LiveVars || LiveInts)
calcRegsRequired();
+ if (LiveVars)
verifyLiveVariables();
- }
+ if (LiveInts)
+ verifyLiveIntervals();
}
void MachineVerifier::verifyLiveVariables() {
@@ -878,4 +879,55 @@ void MachineVerifier::verifyLiveVariables() {
}
}
+void MachineVerifier::verifyLiveIntervals() {
+ assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
+ for (LiveIntervals::const_iterator LVI = LiveInts->begin(),
+ LVE = LiveInts->end(); LVI != LVE; ++LVI) {
+ const LiveInterval &LI = *LVI->second;
+ assert(LVI->first == LI.reg && "Invalid reg to interval mapping");
+
+ for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
+ I!=E; ++I) {
+ VNInfo *VNI = *I;
+ const LiveRange *DefLR = LI.getLiveRangeContaining(VNI->def);
+
+ if (!DefLR) {
+ if (!VNI->isUnused()) {
+ report("Valno not live at def and not marked unused", MF);
+ *OS << "Valno #" << VNI->id << " in " << LI << '\n';
+ }
+ continue;
+ }
+
+ if (VNI->isUnused())
+ continue;
+
+ if (DefLR->valno != VNI) {
+ report("Live range at def has different valno", MF);
+ DefLR->print(*OS);
+ *OS << " should use valno #" << VNI->id << " in " << LI << '\n';
+ }
+
+ }
+
+ for (LiveInterval::const_iterator I = LI.begin(), E = LI.end(); I!=E; ++I) {
+ const LiveRange &LR = *I;
+ assert(LR.valno && "Live range has no valno");
+
+ if (LR.valno->id >= LI.getNumValNums() ||
+ LR.valno != LI.getValNumInfo(LR.valno->id)) {
+ report("Foreign valno in live range", MF);
+ LR.print(*OS);
+ *OS << " has a valno not in " << LI << '\n';
+ }
+
+ if (LR.valno->isUnused()) {
+ report("Live range valno is marked unused", MF);
+ LR.print(*OS);
+ *OS << " in " << LI << '\n';
+ }
+
+ }
+ }
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/OptimizeExts.cpp b/libclamav/c++/llvm/lib/CodeGen/OptimizeExts.cpp
deleted file mode 100644
index acb6869..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/OptimizeExts.cpp
+++ /dev/null
@@ -1,197 +0,0 @@
-//===-- OptimizeExts.cpp - Optimize sign / zero extension instrs -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass performs optimization of sign / zero extension instructions. It
-// may be extended to handle other instructions of similar property.
-//
-// On some targets, some instructions, e.g. X86 sign / zero extension, may
-// leave the source value in the lower part of the result. This pass will
-// replace (some) uses of the pre-extension value with uses of the sub-register
-// of the results.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ext-opt"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-static cl::opt<bool> Aggressive("aggressive-ext-opt", cl::Hidden,
- cl::desc("Aggressive extension optimization"));
-
-STATISTIC(NumReuse, "Number of extension results reused");
-
-namespace {
- class OptimizeExts : public MachineFunctionPass {
- const TargetMachine *TM;
- const TargetInstrInfo *TII;
- MachineRegisterInfo *MRI;
- MachineDominatorTree *DT; // Machine dominator tree
-
- public:
- static char ID; // Pass identification
- OptimizeExts() : MachineFunctionPass(&ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- MachineFunctionPass::getAnalysisUsage(AU);
- if (Aggressive) {
- AU.addRequired<MachineDominatorTree>();
- AU.addPreserved<MachineDominatorTree>();
- }
- }
-
- private:
- bool OptimizeInstr(MachineInstr *MI, MachineBasicBlock *MBB,
- SmallPtrSet<MachineInstr*, 8> &LocalMIs);
- };
-}
-
-char OptimizeExts::ID = 0;
-static RegisterPass<OptimizeExts>
-X("opt-exts", "Optimize sign / zero extensions");
-
-FunctionPass *llvm::createOptimizeExtsPass() { return new OptimizeExts(); }
-
-/// OptimizeInstr - If instruction is a copy-like instruction, i.e. it reads
-/// a single register and writes a single register and it does not modify
-/// the source, and if the source value is preserved as a sub-register of
-/// the result, then replace all reachable uses of the source with the subreg
-/// of the result.
-bool OptimizeExts::OptimizeInstr(MachineInstr *MI, MachineBasicBlock *MBB,
- SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
- bool Changed = false;
- LocalMIs.insert(MI);
-
- unsigned SrcReg, DstReg, SubIdx;
- if (TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx)) {
- if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
- TargetRegisterInfo::isPhysicalRegister(SrcReg))
- return false;
-
- MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg);
- if (++UI == MRI->use_end())
- // No other uses.
- return false;
-
- // Ok, the source has other uses. See if we can replace the other uses
- // with use of the result of the extension.
- SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
- UI = MRI->use_begin(DstReg);
- for (MachineRegisterInfo::use_iterator UE = MRI->use_end(); UI != UE;
- ++UI)
- ReachedBBs.insert(UI->getParent());
-
- bool ExtendLife = true;
- // Uses that are in the same BB of uses of the result of the instruction.
- SmallVector<MachineOperand*, 8> Uses;
- // Uses that the result of the instruction can reach.
- SmallVector<MachineOperand*, 8> ExtendedUses;
-
- UI = MRI->use_begin(SrcReg);
- for (MachineRegisterInfo::use_iterator UE = MRI->use_end(); UI != UE;
- ++UI) {
- MachineOperand &UseMO = UI.getOperand();
- MachineInstr *UseMI = &*UI;
- if (UseMI == MI)
- continue;
- if (UseMI->isPHI()) {
- ExtendLife = false;
- continue;
- }
-
- MachineBasicBlock *UseMBB = UseMI->getParent();
- if (UseMBB == MBB) {
- // Local uses that come after the extension.
- if (!LocalMIs.count(UseMI))
- Uses.push_back(&UseMO);
- } else if (ReachedBBs.count(UseMBB))
- // Non-local uses where the result of extension is used. Always
- // replace these unless it's a PHI.
- Uses.push_back(&UseMO);
- else if (Aggressive && DT->dominates(MBB, UseMBB))
- // We may want to extend live range of the extension result in order
- // to replace these uses.
- ExtendedUses.push_back(&UseMO);
- else {
- // Both will be live out of the def MBB anyway. Don't extend live
- // range of the extension result.
- ExtendLife = false;
- break;
- }
- }
-
- if (ExtendLife && !ExtendedUses.empty())
- // Ok, we'll extend the liveness of the extension result.
- std::copy(ExtendedUses.begin(), ExtendedUses.end(),
- std::back_inserter(Uses));
-
- // Now replace all uses.
- if (!Uses.empty()) {
- SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
- // Look for PHI uses of the extended result, we don't want to extend the
- // liveness of a PHI input. It breaks all kinds of assumptions down
- // stream. A PHI use is expected to be the kill of its source values.
- UI = MRI->use_begin(DstReg);
- for (MachineRegisterInfo::use_iterator UE = MRI->use_end(); UI != UE;
- ++UI)
- if (UI->isPHI())
- PHIBBs.insert(UI->getParent());
-
- const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
- for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
- MachineOperand *UseMO = Uses[i];
- MachineInstr *UseMI = UseMO->getParent();
- MachineBasicBlock *UseMBB = UseMI->getParent();
- if (PHIBBs.count(UseMBB))
- continue;
- unsigned NewVR = MRI->createVirtualRegister(RC);
- BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
- TII->get(TargetOpcode::EXTRACT_SUBREG), NewVR)
- .addReg(DstReg).addImm(SubIdx);
- UseMO->setReg(NewVR);
- ++NumReuse;
- Changed = true;
- }
- }
- }
-
- return Changed;
-}
-
-bool OptimizeExts::runOnMachineFunction(MachineFunction &MF) {
- TM = &MF.getTarget();
- TII = TM->getInstrInfo();
- MRI = &MF.getRegInfo();
- DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
-
- bool Changed = false;
-
- SmallPtrSet<MachineInstr*, 8> LocalMIs;
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
- MachineBasicBlock *MBB = &*I;
- LocalMIs.clear();
- for (MachineBasicBlock::iterator MII = I->begin(), ME = I->end(); MII != ME;
- ++MII) {
- MachineInstr *MI = &*MII;
- Changed |= OptimizeInstr(MI, MBB, LocalMIs);
- }
- }
-
- return Changed;
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/OptimizePHIs.cpp b/libclamav/c++/llvm/lib/CodeGen/OptimizePHIs.cpp
index 2717d4d..edb4eea 100644
--- a/libclamav/c++/llvm/lib/CodeGen/OptimizePHIs.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/OptimizePHIs.cpp
@@ -33,7 +33,7 @@ namespace {
public:
static char ID; // Pass identification
- OptimizePHIs() : MachineFunctionPass(&ID) {}
+ OptimizePHIs() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -54,8 +54,8 @@ namespace {
}
char OptimizePHIs::ID = 0;
-static RegisterPass<OptimizePHIs>
-X("opt-phis", "Optimize machine instruction PHIs");
+INITIALIZE_PASS(OptimizePHIs, "opt-phis",
+ "Optimize machine instruction PHIs", false, false);
FunctionPass *llvm::createOptimizePHIsPass() { return new OptimizePHIs(); }
@@ -101,12 +101,11 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI,
MachineInstr *SrcMI = MRI->getVRegDef(SrcReg);
// Skip over register-to-register moves.
- unsigned MvSrcReg, MvDstReg, SrcSubIdx, DstSubIdx;
- if (SrcMI &&
- TII->isMoveInstr(*SrcMI, MvSrcReg, MvDstReg, SrcSubIdx, DstSubIdx) &&
- SrcSubIdx == 0 && DstSubIdx == 0 &&
- TargetRegisterInfo::isVirtualRegister(MvSrcReg))
- SrcMI = MRI->getVRegDef(MvSrcReg);
+ if (SrcMI && SrcMI->isCopy() &&
+ !SrcMI->getOperand(0).getSubReg() &&
+ !SrcMI->getOperand(1).getSubReg() &&
+ TargetRegisterInfo::isVirtualRegister(SrcMI->getOperand(1).getReg()))
+ SrcMI = MRI->getVRegDef(SrcMI->getOperand(1).getReg());
if (!SrcMI)
return false;
diff --git a/libclamav/c++/llvm/lib/CodeGen/PBQP/HeuristicBase.h b/libclamav/c++/llvm/lib/CodeGen/PBQP/HeuristicBase.h
index 3bb24e1..791c227 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PBQP/HeuristicBase.h
+++ b/libclamav/c++/llvm/lib/CodeGen/PBQP/HeuristicBase.h
@@ -173,9 +173,13 @@ namespace PBQP {
bool finished = false;
while (!finished) {
- if (!optimalReduce())
- if (!impl().heuristicReduce())
+ if (!optimalReduce()) {
+ if (impl().heuristicReduce()) {
+ getSolver().recordRN();
+ } else {
finished = true;
+ }
+ }
}
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/PBQP/HeuristicSolver.h b/libclamav/c++/llvm/lib/CodeGen/PBQP/HeuristicSolver.h
index bd18b52..35514f9 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PBQP/HeuristicSolver.h
+++ b/libclamav/c++/llvm/lib/CodeGen/PBQP/HeuristicSolver.h
@@ -226,6 +226,8 @@ namespace PBQP {
// Nothing to do. Just push the node onto the reduction stack.
pushToStack(nItr);
+
+ s.recordR0();
}
/// \brief Apply rule R1.
@@ -274,6 +276,7 @@ namespace PBQP {
assert(nd.getSolverDegree() == 0 &&
"Degree 1 with edge removed should be 0.");
pushToStack(xnItr);
+ s.recordR1();
}
/// \brief Apply rule R2.
@@ -378,8 +381,14 @@ namespace PBQP {
removeSolverEdge(zxeItr);
pushToStack(xnItr);
+ s.recordR2();
}
+ /// \brief Record an application of the RN rule.
+ ///
+ /// For use by the HeuristicBase.
+ void recordRN() { s.recordRN(); }
+
private:
NodeData& getSolverNodeData(Graph::NodeItr nItr) {
@@ -406,7 +415,7 @@ namespace PBQP {
// Create node data objects.
for (Graph::NodeItr nItr = g.nodesBegin(), nEnd = g.nodesEnd();
- nItr != nEnd; ++nItr) {
+ nItr != nEnd; ++nItr) {
nodeDataList.push_back(NodeData());
g.setNodeData(nItr, &nodeDataList.back());
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h b/libclamav/c++/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h
index 30d34d9..18eaf7c 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h
+++ b/libclamav/c++/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h
@@ -18,7 +18,6 @@
#ifndef LLVM_CODEGEN_PBQP_HEURISTICS_BRIGGS_H
#define LLVM_CODEGEN_PBQP_HEURISTICS_BRIGGS_H
-#include "llvm/Support/Compiler.h"
#include "../HeuristicSolver.h"
#include "../HeuristicBase.h"
@@ -53,9 +52,7 @@ namespace PBQP {
bool operator()(Graph::NodeItr n1Itr, Graph::NodeItr n2Itr) const {
if (s->getSolverDegree(n1Itr) > s->getSolverDegree(n2Itr))
return true;
- if (s->getSolverDegree(n1Itr) < s->getSolverDegree(n2Itr))
- return false;
- return (&*n1Itr < &*n2Itr);
+ return false;
}
private:
HeuristicSolverImpl<Briggs> *s;
@@ -70,9 +67,7 @@ namespace PBQP {
cost2 = g->getNodeCosts(n2Itr)[0] / s->getSolverDegree(n2Itr);
if (cost1 < cost2)
return true;
- if (cost1 > cost2)
- return false;
- return (&*n1Itr < &*n2Itr);
+ return false;
}
private:
@@ -267,8 +262,8 @@ namespace PBQP {
if (!nd.isHeuristic)
return;
- EdgeData &ed ATTRIBUTE_UNUSED = getHeuristicEdgeData(eItr);
-
+ EdgeData &ed = getHeuristicEdgeData(eItr);
+ (void)ed;
assert(ed.isUpToDate && "Edge data is not up to date.");
// Update node.
diff --git a/libclamav/c++/llvm/lib/CodeGen/PBQP/Solution.h b/libclamav/c++/llvm/lib/CodeGen/PBQP/Solution.h
index 294b537..047fd04 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PBQP/Solution.h
+++ b/libclamav/c++/llvm/lib/CodeGen/PBQP/Solution.h
@@ -26,15 +26,46 @@ namespace PBQP {
/// To get the selection for each node in the problem use the getSelection method.
class Solution {
private:
+
typedef std::map<Graph::NodeItr, unsigned, NodeItrComparator> SelectionsMap;
SelectionsMap selections;
+ unsigned r0Reductions, r1Reductions, r2Reductions, rNReductions;
+
public:
/// \brief Number of nodes for which selections have been made.
/// @return Number of nodes for which selections have been made.
unsigned numNodes() const { return selections.size(); }
+ /// \brief Records a reduction via the R0 rule. Should be called from the
+ /// solver only.
+ void recordR0() { ++r0Reductions; }
+
+ /// \brief Returns the number of R0 reductions applied to solve the problem.
+ unsigned numR0Reductions() const { return r0Reductions; }
+
+ /// \brief Records a reduction via the R1 rule. Should be called from the
+ /// solver only.
+ void recordR1() { ++r1Reductions; }
+
+ /// \brief Returns the number of R1 reductions applied to solve the problem.
+ unsigned numR1Reductions() const { return r1Reductions; }
+
+ /// \brief Records a reduction via the R2 rule. Should be called from the
+ /// solver only.
+ void recordR2() { ++r2Reductions; }
+
+ /// \brief Returns the number of R2 reductions applied to solve the problem.
+ unsigned numR2Reductions() const { return r2Reductions; }
+
+ /// \brief Records a reduction via the RN rule. Should be called from the
+ /// solver only.
+ void recordRN() { ++ rNReductions; }
+
+ /// \brief Returns the number of RN reductions applied to solve the problem.
+ unsigned numRNReductions() const { return rNReductions; }
+
/// \brief Set the selection for a given node.
/// @param nItr Node iterator.
/// @param selection Selection for nItr.
diff --git a/libclamav/c++/llvm/lib/CodeGen/PHIElimination.cpp b/libclamav/c++/llvm/lib/CodeGen/PHIElimination.cpp
index 8bbe0a7..d4df4c5 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PHIElimination.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/PHIElimination.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Function.h"
@@ -27,7 +28,6 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include <algorithm>
@@ -35,57 +35,58 @@
using namespace llvm;
STATISTIC(NumAtomic, "Number of atomic phis lowered");
-STATISTIC(NumSplits, "Number of critical edges split on demand");
STATISTIC(NumReused, "Number of reused lowered phis");
char PHIElimination::ID = 0;
-static RegisterPass<PHIElimination>
-X("phi-node-elimination", "Eliminate PHI nodes for register allocation");
+INITIALIZE_PASS(PHIElimination, "phi-node-elimination",
+ "Eliminate PHI nodes for register allocation", false, false);
-const PassInfo *const llvm::PHIEliminationID = &X;
+char &llvm::PHIEliminationID = PHIElimination::ID;
void llvm::PHIElimination::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<LiveVariables>();
AU.addPreserved<MachineDominatorTree>();
- // rdar://7401784 This would be nice:
- // AU.addPreservedID(MachineLoopInfoID);
+ AU.addPreserved<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
-bool llvm::PHIElimination::runOnMachineFunction(MachineFunction &Fn) {
- MRI = &Fn.getRegInfo();
+bool llvm::PHIElimination::runOnMachineFunction(MachineFunction &MF) {
+ MRI = &MF.getRegInfo();
bool Changed = false;
// Split critical edges to help the coalescer
- if (LiveVariables *LV = getAnalysisIfAvailable<LiveVariables>())
- for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
- Changed |= SplitPHIEdges(Fn, *I, *LV);
+ if (LiveVariables *LV = getAnalysisIfAvailable<LiveVariables>()) {
+ MachineLoopInfo *MLI = getAnalysisIfAvailable<MachineLoopInfo>();
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
+ Changed |= SplitPHIEdges(MF, *I, *LV, MLI);
+ }
// Populate VRegPHIUseCount
- analyzePHINodes(Fn);
+ analyzePHINodes(MF);
// Eliminate PHI instructions by inserting copies into predecessor blocks.
- for (MachineFunction::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
- Changed |= EliminatePHINodes(Fn, *I);
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
+ Changed |= EliminatePHINodes(MF, *I);
// Remove dead IMPLICIT_DEF instructions.
for (SmallPtrSet<MachineInstr*, 4>::iterator I = ImpDefs.begin(),
E = ImpDefs.end(); I != E; ++I) {
MachineInstr *DefMI = *I;
unsigned DefReg = DefMI->getOperand(0).getReg();
- if (MRI->use_empty(DefReg))
+ if (MRI->use_nodbg_empty(DefReg))
DefMI->eraseFromParent();
}
// Clean up the lowered PHI instructions.
for (LoweredPHIMap::iterator I = LoweredPHIs.begin(), E = LoweredPHIs.end();
I != E; ++I)
- Fn.DeleteMachineInstr(I->first);
+ MF.DeleteMachineInstr(I->first);
LoweredPHIs.clear();
ImpDefs.clear();
VRegPHIUseCount.clear();
+
return Changed;
}
@@ -180,11 +181,11 @@ void llvm::PHIElimination::LowerAtomicPHINode(
unsigned NumSrcs = (MPhi->getNumOperands() - 1) / 2;
unsigned DestReg = MPhi->getOperand(0).getReg();
+ assert(MPhi->getOperand(0).getSubReg() == 0 && "Can't handle sub-reg PHIs");
bool isDead = MPhi->getOperand(0).isDead();
// Create a new register for the incoming PHI arguments.
MachineFunction &MF = *MBB.getParent();
- const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
unsigned IncomingReg = 0;
bool reusedIncoming = false; // Is IncomingReg reused from an earlier PHI?
@@ -208,9 +209,12 @@ void llvm::PHIElimination::LowerAtomicPHINode(
++NumReused;
DEBUG(dbgs() << "Reusing %reg" << IncomingReg << " for " << *MPhi);
} else {
+ const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
entry = IncomingReg = MF.getRegInfo().createVirtualRegister(RC);
}
- TII->copyRegToReg(MBB, AfterPHIsIt, DestReg, IncomingReg, RC, RC);
+ BuildMI(MBB, AfterPHIsIt, MPhi->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), DestReg)
+ .addReg(IncomingReg);
}
// Update live variable information if there is any.
@@ -264,6 +268,8 @@ void llvm::PHIElimination::LowerAtomicPHINode(
SmallPtrSet<MachineBasicBlock*, 8> MBBsInsertedInto;
for (int i = NumSrcs - 1; i >= 0; --i) {
unsigned SrcReg = MPhi->getOperand(i*2+1).getReg();
+ unsigned SrcSubReg = MPhi->getOperand(i*2+1).getSubReg();
+
assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
"Machine PHI Operands must all be virtual registers!");
@@ -292,7 +298,8 @@ void llvm::PHIElimination::LowerAtomicPHINode(
// Insert the copy.
if (!reusedIncoming && IncomingReg)
- TII->copyRegToReg(opBlock, InsertPos, IncomingReg, SrcReg, RC, RC);
+ BuildMI(opBlock, InsertPos, MPhi->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), IncomingReg).addReg(SrcReg, 0, SrcSubReg);
// Now update live variable information if we have it. Otherwise we're done
if (!LV) continue;
@@ -364,8 +371,8 @@ void llvm::PHIElimination::LowerAtomicPHINode(
/// used in a PHI node. We map that to the BB the vreg is coming from. This is
/// used later to determine when the vreg is killed in the BB.
///
-void llvm::PHIElimination::analyzePHINodes(const MachineFunction& Fn) {
- for (MachineFunction::const_iterator I = Fn.begin(), E = Fn.end();
+void llvm::PHIElimination::analyzePHINodes(const MachineFunction& MF) {
+ for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
I != E; ++I)
for (MachineBasicBlock::const_iterator BBI = I->begin(), BBE = I->end();
BBI != BBE && BBI->isPHI(); ++BBI)
@@ -376,10 +383,12 @@ void llvm::PHIElimination::analyzePHINodes(const MachineFunction& Fn) {
bool llvm::PHIElimination::SplitPHIEdges(MachineFunction &MF,
MachineBasicBlock &MBB,
- LiveVariables &LV) {
+ LiveVariables &LV,
+ MachineLoopInfo *MLI) {
if (MBB.empty() || !MBB.front().isPHI() || MBB.isLandingPad())
return false; // Quick exit for basic blocks without PHIs.
+ bool Changed = false;
for (MachineBasicBlock::const_iterator BBI = MBB.begin(), BBE = MBB.end();
BBI != BBE && BBI->isPHI(); ++BBI) {
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
@@ -388,58 +397,16 @@ bool llvm::PHIElimination::SplitPHIEdges(MachineFunction &MF,
// We break edges when registers are live out from the predecessor block
// (not considering PHI nodes). If the register is live in to this block
// anyway, we would gain nothing from splitting.
- if (!LV.isLiveIn(Reg, MBB) && LV.isLiveOut(Reg, *PreMBB))
- SplitCriticalEdge(PreMBB, &MBB);
+ // Avoid splitting backedges of loops. It would introduce small
+ // out-of-line blocks into the loop which is very bad for code placement.
+ if (PreMBB != &MBB &&
+ !LV.isLiveIn(Reg, MBB) && LV.isLiveOut(Reg, *PreMBB)) {
+ if (!MLI ||
+ !(MLI->getLoopFor(PreMBB) == MLI->getLoopFor(&MBB) &&
+ MLI->isLoopHeader(&MBB)))
+ Changed |= PreMBB->SplitCriticalEdge(&MBB, this) != 0;
+ }
}
}
return true;
}
-
-MachineBasicBlock *PHIElimination::SplitCriticalEdge(MachineBasicBlock *A,
- MachineBasicBlock *B) {
- assert(A && B && "Missing MBB end point");
-
- MachineFunction *MF = A->getParent();
-
- // We may need to update A's terminator, but we can't do that if AnalyzeBranch
- // fails. If A uses a jump table, we won't touch it.
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
- MachineBasicBlock *TBB = 0, *FBB = 0;
- SmallVector<MachineOperand, 4> Cond;
- if (TII->AnalyzeBranch(*A, TBB, FBB, Cond))
- return NULL;
-
- ++NumSplits;
-
- MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
- MF->insert(llvm::next(MachineFunction::iterator(A)), NMBB);
- DEBUG(dbgs() << "PHIElimination splitting critical edge:"
- " BB#" << A->getNumber()
- << " -- BB#" << NMBB->getNumber()
- << " -- BB#" << B->getNumber() << '\n');
-
- A->ReplaceUsesOfBlockWith(B, NMBB);
- A->updateTerminator();
-
- // Insert unconditional "jump B" instruction in NMBB if necessary.
- NMBB->addSuccessor(B);
- if (!NMBB->isLayoutSuccessor(B)) {
- Cond.clear();
- MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, B, NULL, Cond);
- }
-
- // Fix PHI nodes in B so they refer to NMBB instead of A
- for (MachineBasicBlock::iterator i = B->begin(), e = B->end();
- i != e && i->isPHI(); ++i)
- for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
- if (i->getOperand(ni+1).getMBB() == A)
- i->getOperand(ni+1).setMBB(NMBB);
-
- if (LiveVariables *LV=getAnalysisIfAvailable<LiveVariables>())
- LV->addNewBlock(NMBB, A, B);
-
- if (MachineDominatorTree *MDT=getAnalysisIfAvailable<MachineDominatorTree>())
- MDT->addNewBlock(NMBB, A);
-
- return NMBB;
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/PHIElimination.h b/libclamav/c++/llvm/lib/CodeGen/PHIElimination.h
index 7dedf03..45a9718 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PHIElimination.h
+++ b/libclamav/c++/llvm/lib/CodeGen/PHIElimination.h
@@ -13,19 +13,21 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
namespace llvm {
class LiveVariables;
+ class MachineRegisterInfo;
+ class MachineLoopInfo;
/// Lower PHI instructions to copies.
class PHIElimination : public MachineFunctionPass {
- MachineRegisterInfo *MRI; // Machine register information
+ MachineRegisterInfo *MRI; // Machine register information
public:
static char ID; // Pass identification, replacement for typeid
- PHIElimination() : MachineFunctionPass(&ID) {}
+ PHIElimination() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &Fn);
@@ -49,7 +51,7 @@ namespace llvm {
/// Split critical edges where necessary for good coalescer performance.
bool SplitPHIEdges(MachineFunction &MF, MachineBasicBlock &MBB,
- LiveVariables &LV);
+ LiveVariables &LV, MachineLoopInfo *MLI);
/// SplitCriticalEdge - Split a critical edge from A to B by
/// inserting a new MBB. Update branches in A and PHI instructions
diff --git a/libclamav/c++/llvm/lib/CodeGen/Passes.cpp b/libclamav/c++/llvm/lib/CodeGen/Passes.cpp
index 5ea2941..3489db2 100644
--- a/libclamav/c++/llvm/lib/CodeGen/Passes.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/Passes.cpp
@@ -24,6 +24,11 @@ using namespace llvm;
//===---------------------------------------------------------------------===//
MachinePassRegistry RegisterRegAlloc::Registry;
+static FunctionPass *createDefaultRegisterAllocator() { return 0; }
+static RegisterRegAlloc
+defaultRegAlloc("default",
+ "pick register allocator based on -O option",
+ createDefaultRegisterAllocator);
//===---------------------------------------------------------------------===//
///
@@ -33,8 +38,8 @@ MachinePassRegistry RegisterRegAlloc::Registry;
static cl::opt<RegisterRegAlloc::FunctionPassCtor, false,
RegisterPassParser<RegisterRegAlloc> >
RegAlloc("regalloc",
- cl::init(&createLinearScanRegisterAllocator),
- cl::desc("Register allocator to use (default=linearscan)"));
+ cl::init(&createDefaultRegisterAllocator),
+ cl::desc("Register allocator to use"));
//===---------------------------------------------------------------------===//
@@ -42,13 +47,22 @@ RegAlloc("regalloc",
/// createRegisterAllocator - choose the appropriate register allocator.
///
//===---------------------------------------------------------------------===//
-FunctionPass *llvm::createRegisterAllocator() {
+FunctionPass *llvm::createRegisterAllocator(CodeGenOpt::Level OptLevel) {
RegisterRegAlloc::FunctionPassCtor Ctor = RegisterRegAlloc::getDefault();
-
+
if (!Ctor) {
Ctor = RegAlloc;
RegisterRegAlloc::setDefault(RegAlloc);
}
-
- return Ctor();
+
+ if (Ctor != createDefaultRegisterAllocator)
+ return Ctor();
+
+ // When the 'default' allocator is requested, pick one based on OptLevel.
+ switch (OptLevel) {
+ case CodeGenOpt::None:
+ return createFastRegisterAllocator();
+ default:
+ return createLinearScanRegisterAllocator();
+ }
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/libclamav/c++/llvm/lib/CodeGen/PeepholeOptimizer.cpp
new file mode 100644
index 0000000..17cee46
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/PeepholeOptimizer.cpp
@@ -0,0 +1,287 @@
+//===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Perform peephole optimizations on the machine code:
+//
+// - Optimize Extensions
+//
+// Optimization of sign / zero extension instructions. It may be extended to
+// handle other instructions with similar properties.
+//
+// On some targets, some instructions, e.g. X86 sign / zero extension, may
+// leave the source value in the lower part of the result. This optimization
+// will replace some uses of the pre-extension value with uses of the
+// sub-register of the results.
+//
+// - Optimize Comparisons
+//
+// Optimization of comparison instructions. For instance, in this code:
+//
+// sub r1, 1
+// cmp r1, 0
+// bz L1
+//
+// If the "sub" instruction all ready sets (or could be modified to set) the
+// same flag that the "cmp" instruction sets and that "bz" uses, then we can
+// eliminate the "cmp" instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "peephole-opt"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+// Optimize Extensions
+static cl::opt<bool>
+Aggressive("aggressive-ext-opt", cl::Hidden,
+ cl::desc("Aggressive extension optimization"));
+
+STATISTIC(NumReuse, "Number of extension results reused");
+STATISTIC(NumEliminated, "Number of compares eliminated");
+
+namespace {
+ class PeepholeOptimizer : public MachineFunctionPass {
+ const TargetMachine *TM;
+ const TargetInstrInfo *TII;
+ MachineRegisterInfo *MRI;
+ MachineDominatorTree *DT; // Machine dominator tree
+
+ public:
+ static char ID; // Pass identification
+ PeepholeOptimizer() : MachineFunctionPass(ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ if (Aggressive) {
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ }
+ }
+
+ private:
+ bool OptimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
+ bool OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
+ SmallPtrSet<MachineInstr*, 8> &LocalMIs);
+ };
+}
+
+char PeepholeOptimizer::ID = 0;
+INITIALIZE_PASS(PeepholeOptimizer, "peephole-opts",
+ "Peephole Optimizations", false, false);
+
+FunctionPass *llvm::createPeepholeOptimizerPass() {
+ return new PeepholeOptimizer();
+}
+
+/// OptimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
+/// a single register and writes a single register and it does not modify the
+/// source, and if the source value is preserved as a sub-register of the
+/// result, then replace all reachable uses of the source with the subreg of the
+/// result.
+///
+/// Do not generate an EXTRACT that is used only in a debug use, as this changes
+/// the code. Since this code does not currently share EXTRACTs, just ignore all
+/// debug uses.
+bool PeepholeOptimizer::
+OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
+ SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
+ LocalMIs.insert(MI);
+
+ unsigned SrcReg, DstReg, SubIdx;
+ if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
+ return false;
+
+ if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
+ TargetRegisterInfo::isPhysicalRegister(SrcReg))
+ return false;
+
+ MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(SrcReg);
+ if (++UI == MRI->use_nodbg_end())
+ // No other uses.
+ return false;
+
+ // The source has other uses. See if we can replace the other uses with use of
+ // the result of the extension.
+ SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
+ UI = MRI->use_nodbg_begin(DstReg);
+ for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
+ UI != UE; ++UI)
+ ReachedBBs.insert(UI->getParent());
+
+ // Uses that are in the same BB of uses of the result of the instruction.
+ SmallVector<MachineOperand*, 8> Uses;
+
+ // Uses that the result of the instruction can reach.
+ SmallVector<MachineOperand*, 8> ExtendedUses;
+
+ bool ExtendLife = true;
+ UI = MRI->use_nodbg_begin(SrcReg);
+ for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end();
+ UI != UE; ++UI) {
+ MachineOperand &UseMO = UI.getOperand();
+ MachineInstr *UseMI = &*UI;
+ if (UseMI == MI)
+ continue;
+
+ if (UseMI->isPHI()) {
+ ExtendLife = false;
+ continue;
+ }
+
+ // It's an error to translate this:
+ //
+ // %reg1025 = <sext> %reg1024
+ // ...
+ // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
+ //
+ // into this:
+ //
+ // %reg1025 = <sext> %reg1024
+ // ...
+ // %reg1027 = COPY %reg1025:4
+ // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
+ //
+ // The problem here is that SUBREG_TO_REG is there to assert that an
+ // implicit zext occurs. It doesn't insert a zext instruction. If we allow
+ // the COPY here, it will give us the value after the <sext>, not the
+ // original value of %reg1024 before <sext>.
+ if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
+ continue;
+
+ MachineBasicBlock *UseMBB = UseMI->getParent();
+ if (UseMBB == MBB) {
+ // Local uses that come after the extension.
+ if (!LocalMIs.count(UseMI))
+ Uses.push_back(&UseMO);
+ } else if (ReachedBBs.count(UseMBB)) {
+ // Non-local uses where the result of the extension is used. Always
+ // replace these unless it's a PHI.
+ Uses.push_back(&UseMO);
+ } else if (Aggressive && DT->dominates(MBB, UseMBB)) {
+ // We may want to extend the live range of the extension result in order
+ // to replace these uses.
+ ExtendedUses.push_back(&UseMO);
+ } else {
+ // Both will be live out of the def MBB anyway. Don't extend live range of
+ // the extension result.
+ ExtendLife = false;
+ break;
+ }
+ }
+
+ if (ExtendLife && !ExtendedUses.empty())
+ // Extend the liveness of the extension result.
+ std::copy(ExtendedUses.begin(), ExtendedUses.end(),
+ std::back_inserter(Uses));
+
+ // Now replace all uses.
+ bool Changed = false;
+ if (!Uses.empty()) {
+ SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
+
+ // Look for PHI uses of the extended result, we don't want to extend the
+ // liveness of a PHI input. It breaks all kinds of assumptions down
+ // stream. A PHI use is expected to be the kill of its source values.
+ UI = MRI->use_nodbg_begin(DstReg);
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UE = MRI->use_nodbg_end(); UI != UE; ++UI)
+ if (UI->isPHI())
+ PHIBBs.insert(UI->getParent());
+
+ const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
+ for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
+ MachineOperand *UseMO = Uses[i];
+ MachineInstr *UseMI = UseMO->getParent();
+ MachineBasicBlock *UseMBB = UseMI->getParent();
+ if (PHIBBs.count(UseMBB))
+ continue;
+
+ unsigned NewVR = MRI->createVirtualRegister(RC);
+ BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), NewVR)
+ .addReg(DstReg, 0, SubIdx);
+
+ UseMO->setReg(NewVR);
+ ++NumReuse;
+ Changed = true;
+ }
+ }
+
+ return Changed;
+}
+
+/// OptimizeCmpInstr - If the instruction is a compare and the previous
+/// instruction it's comparing against all ready sets (or could be modified to
+/// set) the same flag as the compare, then we can remove the comparison and use
+/// the flag from the previous instruction.
+bool PeepholeOptimizer::OptimizeCmpInstr(MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ // If this instruction is a comparison against zero and isn't comparing a
+ // physical register, we can try to optimize it.
+ unsigned SrcReg;
+ int CmpValue;
+ if (!TII->AnalyzeCompare(MI, SrcReg, CmpValue) ||
+ TargetRegisterInfo::isPhysicalRegister(SrcReg) || CmpValue != 0)
+ return false;
+
+ MachineRegisterInfo::def_iterator DI = MRI->def_begin(SrcReg);
+ if (llvm::next(DI) != MRI->def_end())
+ // Only support one definition.
+ return false;
+
+ // Attempt to convert the defining instruction to set the "zero" flag.
+ if (TII->ConvertToSetZeroFlag(&*DI, MI)) {
+ ++NumEliminated;
+ return true;
+ }
+
+ return false;
+}
+
+bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
+ TM = &MF.getTarget();
+ TII = TM->getInstrInfo();
+ MRI = &MF.getRegInfo();
+ DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
+
+ bool Changed = false;
+
+ SmallPtrSet<MachineInstr*, 8> LocalMIs;
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
+ MachineBasicBlock *MBB = &*I;
+ LocalMIs.clear();
+
+ for (MachineBasicBlock::iterator
+ MII = I->begin(), ME = I->end(); MII != ME; ) {
+ MachineInstr *MI = &*MII;
+
+ if (MI->getDesc().isCompare() &&
+ !MI->getDesc().hasUnmodeledSideEffects()) {
+ ++MII; // The iterator may become invalid if the compare is deleted.
+ Changed |= OptimizeCmpInstr(MI, MBB);
+ } else {
+ Changed |= OptimizeExtInstr(MI, MBB, LocalMIs);
+ ++MII;
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/PostRAHazardRecognizer.cpp b/libclamav/c++/llvm/lib/CodeGen/PostRAHazardRecognizer.cpp
new file mode 100644
index 0000000..cbde2b0
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/PostRAHazardRecognizer.cpp
@@ -0,0 +1,180 @@
+//===----- PostRAHazardRecognizer.cpp - hazard recognizer -------- ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements a hazard recognizer using the instructions itineraries
+// defined for the current target.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "post-RA-sched"
+#include "llvm/CodeGen/PostRAHazardRecognizer.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrItineraries.h"
+
+using namespace llvm;
+
+PostRAHazardRecognizer::
+PostRAHazardRecognizer(const InstrItineraryData &LItinData) :
+ ScheduleHazardRecognizer(), ItinData(LItinData) {
+ // Determine the maximum depth of any itinerary. This determines the
+ // depth of the scoreboard. We always make the scoreboard at least 1
+ // cycle deep to avoid dealing with the boundary condition.
+ unsigned ScoreboardDepth = 1;
+ if (!ItinData.isEmpty()) {
+ for (unsigned idx = 0; ; ++idx) {
+ if (ItinData.isEndMarker(idx))
+ break;
+
+ const InstrStage *IS = ItinData.beginStage(idx);
+ const InstrStage *E = ItinData.endStage(idx);
+ unsigned ItinDepth = 0;
+ for (; IS != E; ++IS)
+ ItinDepth += IS->getCycles();
+
+ ScoreboardDepth = std::max(ScoreboardDepth, ItinDepth);
+ }
+ }
+
+ ReservedScoreboard.reset(ScoreboardDepth);
+ RequiredScoreboard.reset(ScoreboardDepth);
+
+ DEBUG(dbgs() << "Using post-ra hazard recognizer: ScoreboardDepth = "
+ << ScoreboardDepth << '\n');
+}
+
+void PostRAHazardRecognizer::Reset() {
+ RequiredScoreboard.reset();
+ ReservedScoreboard.reset();
+}
+
+void PostRAHazardRecognizer::ScoreBoard::dump() const {
+ dbgs() << "Scoreboard:\n";
+
+ unsigned last = Depth - 1;
+ while ((last > 0) && ((*this)[last] == 0))
+ last--;
+
+ for (unsigned i = 0; i <= last; i++) {
+ unsigned FUs = (*this)[i];
+ dbgs() << "\t";
+ for (int j = 31; j >= 0; j--)
+ dbgs() << ((FUs & (1 << j)) ? '1' : '0');
+ dbgs() << '\n';
+ }
+}
+
+ScheduleHazardRecognizer::HazardType
+PostRAHazardRecognizer::getHazardType(SUnit *SU) {
+ if (ItinData.isEmpty())
+ return NoHazard;
+
+ unsigned cycle = 0;
+
+ // Use the itinerary for the underlying instruction to check for
+ // free FU's in the scoreboard at the appropriate future cycles.
+ unsigned idx = SU->getInstr()->getDesc().getSchedClass();
+ for (const InstrStage *IS = ItinData.beginStage(idx),
+ *E = ItinData.endStage(idx); IS != E; ++IS) {
+ // We must find one of the stage's units free for every cycle the
+ // stage is occupied. FIXME it would be more accurate to find the
+ // same unit free in all the cycles.
+ for (unsigned int i = 0; i < IS->getCycles(); ++i) {
+ assert(((cycle + i) < RequiredScoreboard.getDepth()) &&
+ "Scoreboard depth exceeded!");
+
+ unsigned freeUnits = IS->getUnits();
+ switch (IS->getReservationKind()) {
+ default:
+ assert(0 && "Invalid FU reservation");
+ case InstrStage::Required:
+ // Required FUs conflict with both reserved and required ones
+ freeUnits &= ~ReservedScoreboard[cycle + i];
+ // FALLTHROUGH
+ case InstrStage::Reserved:
+ // Reserved FUs can conflict only with required ones.
+ freeUnits &= ~RequiredScoreboard[cycle + i];
+ break;
+ }
+
+ if (!freeUnits) {
+ DEBUG(dbgs() << "*** Hazard in cycle " << (cycle + i) << ", ");
+ DEBUG(dbgs() << "SU(" << SU->NodeNum << "): ");
+ DEBUG(SU->getInstr()->dump());
+ return Hazard;
+ }
+ }
+
+ // Advance the cycle to the next stage.
+ cycle += IS->getNextCycles();
+ }
+
+ return NoHazard;
+}
+
+void PostRAHazardRecognizer::EmitInstruction(SUnit *SU) {
+ if (ItinData.isEmpty())
+ return;
+
+ unsigned cycle = 0;
+
+ // Use the itinerary for the underlying instruction to reserve FU's
+ // in the scoreboard at the appropriate future cycles.
+ unsigned idx = SU->getInstr()->getDesc().getSchedClass();
+ for (const InstrStage *IS = ItinData.beginStage(idx),
+ *E = ItinData.endStage(idx); IS != E; ++IS) {
+ // We must reserve one of the stage's units for every cycle the
+ // stage is occupied. FIXME it would be more accurate to reserve
+ // the same unit free in all the cycles.
+ for (unsigned int i = 0; i < IS->getCycles(); ++i) {
+ assert(((cycle + i) < RequiredScoreboard.getDepth()) &&
+ "Scoreboard depth exceeded!");
+
+ unsigned freeUnits = IS->getUnits();
+ switch (IS->getReservationKind()) {
+ default:
+ assert(0 && "Invalid FU reservation");
+ case InstrStage::Required:
+ // Required FUs conflict with both reserved and required ones
+ freeUnits &= ~ReservedScoreboard[cycle + i];
+ // FALLTHROUGH
+ case InstrStage::Reserved:
+ // Reserved FUs can conflict only with required ones.
+ freeUnits &= ~RequiredScoreboard[cycle + i];
+ break;
+ }
+
+ // reduce to a single unit
+ unsigned freeUnit = 0;
+ do {
+ freeUnit = freeUnits;
+ freeUnits = freeUnit & (freeUnit - 1);
+ } while (freeUnits);
+
+ assert(freeUnit && "No function unit available!");
+ if (IS->getReservationKind() == InstrStage::Required)
+ RequiredScoreboard[cycle + i] |= freeUnit;
+ else
+ ReservedScoreboard[cycle + i] |= freeUnit;
+ }
+
+ // Advance the cycle to the next stage.
+ cycle += IS->getNextCycles();
+ }
+
+ DEBUG(ReservedScoreboard.dump());
+ DEBUG(RequiredScoreboard.dump());
+}
+
+void PostRAHazardRecognizer::AdvanceCycle() {
+ ReservedScoreboard[0] = 0; ReservedScoreboard.advance();
+ RequiredScoreboard[0] = 0; RequiredScoreboard.advance();
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/PostRASchedulerList.cpp b/libclamav/c++/llvm/lib/CodeGen/PostRASchedulerList.cpp
index 424181c..f0bd6d1 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PostRASchedulerList.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/PostRASchedulerList.cpp
@@ -22,8 +22,6 @@
#include "AntiDepBreaker.h"
#include "AggressiveAntiDepBreaker.h"
#include "CriticalAntiDepBreaker.h"
-#include "ExactHazardRecognizer.h"
-#include "SimpleHazardRecognizer.h"
#include "ScheduleDAGInstrs.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
@@ -46,7 +44,6 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Statistic.h"
-#include <map>
#include <set>
using namespace llvm;
@@ -66,10 +63,6 @@ EnableAntiDepBreaking("break-anti-dependencies",
cl::desc("Break post-RA scheduling anti-dependencies: "
"\"critical\", \"all\", or \"none\""),
cl::init("none"), cl::Hidden);
-static cl::opt<bool>
-EnablePostRAHazardAvoidance("avoid-hazards",
- cl::desc("Enable exact hazard avoidance"),
- cl::init(true), cl::Hidden);
// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
static cl::opt<int>
@@ -86,12 +79,13 @@ AntiDepBreaker::~AntiDepBreaker() { }
namespace {
class PostRAScheduler : public MachineFunctionPass {
AliasAnalysis *AA;
+ const TargetInstrInfo *TII;
CodeGenOpt::Level OptLevel;
public:
static char ID;
PostRAScheduler(CodeGenOpt::Level ol) :
- MachineFunctionPass(&ID), OptLevel(ol) {}
+ MachineFunctionPass(ID), OptLevel(ol) {}
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -115,7 +109,7 @@ namespace {
/// AvailableQueue - The priority queue to use for the available SUnits.
///
LatencyPriorityQueue AvailableQueue;
-
+
/// PendingQueue - This contains all of the instructions whose operands have
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands becomes available, the instruction is
@@ -136,7 +130,7 @@ namespace {
/// KillIndices - The index of the most recent kill (proceding bottom-up),
/// or ~0u if the register is not live.
- unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
+ std::vector<unsigned> KillIndices;
public:
SchedulePostRATDList(MachineFunction &MF,
@@ -146,7 +140,8 @@ namespace {
AntiDepBreaker *ADB,
AliasAnalysis *aa)
: ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
- HazardRec(HR), AntiDepBreak(ADB), AA(aa) {}
+ HazardRec(HR), AntiDepBreak(ADB), AA(aa),
+ KillIndices(TRI->getNumRegs()) {}
~SchedulePostRATDList() {
}
@@ -159,7 +154,7 @@ namespace {
/// Schedule - Schedule the instruction range using list scheduling.
///
void Schedule();
-
+
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
///
@@ -180,7 +175,7 @@ namespace {
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void ListScheduleTopDown();
void StartBlockForKills(MachineBasicBlock *BB);
-
+
// ToggleKillFlag - Toggle a register operand kill flag. Other
// adjustments may be made to the instruction if necessary. Return
// true if the operand has been deleted, false if not.
@@ -188,30 +183,9 @@ namespace {
};
}
-/// isSchedulingBoundary - Test if the given instruction should be
-/// considered a scheduling boundary. This primarily includes labels
-/// and terminators.
-///
-static bool isSchedulingBoundary(const MachineInstr *MI,
- const MachineFunction &MF) {
- // Terminators and labels can't be scheduled around.
- if (MI->getDesc().isTerminator() || MI->isLabel())
- return true;
-
- // Don't attempt to schedule around any instruction that modifies
- // a stack-oriented pointer, as it's unlikely to be profitable. This
- // saves compile time, because it doesn't require every single
- // stack slot reference to depend on the instruction that does the
- // modification.
- const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
- if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
- return true;
-
- return false;
-}
-
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
AA = &getAnalysis<AliasAnalysis>();
+ TII = Fn.getTarget().getInstrInfo();
// Check for explicit enable/disable of post-ra scheduling.
TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
@@ -228,23 +202,24 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
// Check for antidep breaking override...
if (EnableAntiDepBreaking.getPosition() > 0) {
- AntiDepMode = (EnableAntiDepBreaking == "all") ? TargetSubtarget::ANTIDEP_ALL :
- (EnableAntiDepBreaking == "critical") ? TargetSubtarget::ANTIDEP_CRITICAL :
- TargetSubtarget::ANTIDEP_NONE;
+ AntiDepMode = (EnableAntiDepBreaking == "all") ?
+ TargetSubtarget::ANTIDEP_ALL :
+ (EnableAntiDepBreaking == "critical")
+ ? TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE;
}
DEBUG(dbgs() << "PostRAScheduler\n");
const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
- const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
- ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
- (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
- (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
- AntiDepBreaker *ADB =
+ const TargetMachine &TM = Fn.getTarget();
+ const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
+ ScheduleHazardRecognizer *HR =
+ TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins);
+ AntiDepBreaker *ADB =
((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
(AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) :
- ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
+ ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
(AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA);
@@ -271,10 +246,10 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
MachineBasicBlock::iterator Current = MBB->end();
unsigned Count = MBB->size(), CurrentCount = Count;
for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
- MachineInstr *MI = prior(I);
- if (isSchedulingBoundary(MI, Fn)) {
+ MachineInstr *MI = llvm::prior(I);
+ if (TII->isSchedulingBoundary(MI, MBB, Fn)) {
Scheduler.Run(MBB, I, Current, CurrentCount);
- Scheduler.EmitSchedule(0);
+ Scheduler.EmitSchedule();
Current = MI;
CurrentCount = Count - 1;
Scheduler.Observe(MI, CurrentCount);
@@ -286,7 +261,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
assert((MBB->begin() == Current || CurrentCount != 0) &&
"Instruction count mismatch!");
Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
- Scheduler.EmitSchedule(0);
+ Scheduler.EmitSchedule();
// Clean up register live-range state.
Scheduler.FinishBlock();
@@ -300,7 +275,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
return true;
}
-
+
/// StartBlock - Initialize register live-range state for scheduling in
/// this block.
///
@@ -321,10 +296,10 @@ void SchedulePostRATDList::Schedule() {
BuildSchedGraph(AA);
if (AntiDepBreak != NULL) {
- unsigned Broken =
+ unsigned Broken =
AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
InsertPosIndex);
-
+
if (Broken != 0) {
// We made changes. Update the dependency graph.
// Theoretically we could update the graph in place:
@@ -337,7 +312,7 @@ void SchedulePostRATDList::Schedule() {
EntrySU = SUnit();
ExitSU = SUnit();
BuildSchedGraph(AA);
-
+
NumFixedAnti += Broken;
}
}
@@ -415,7 +390,7 @@ bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
MO.setIsKill(true);
return false;
}
-
+
// If MO itself is live, clear the kill flag...
if (KillIndices[MO.getReg()] != ~0u) {
MO.setIsKill(false);
@@ -454,7 +429,7 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
BitVector ReservedRegs = TRI->getReservedRegs(MF);
StartBlockForKills(MBB);
-
+
// Examine block from end to start...
unsigned Count = MBB->size();
for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
@@ -474,9 +449,9 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
if (!MO.isDef()) continue;
// Ignore two-addr defs.
if (MI->isRegTiedToUseOperand(i)) continue;
-
+
KillIndices[Reg] = ~0u;
-
+
// Repeat for all subregs.
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
@@ -511,17 +486,17 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
if (kill)
kill = (KillIndices[Reg] == ~0u);
}
-
+
if (MO.isKill() != kill) {
DEBUG(dbgs() << "Fixing " << MO << " in ");
// Warning: ToggleKillFlag may invalidate MO.
ToggleKillFlag(MI, MO);
DEBUG(MI->dump());
}
-
+
killedRegs.insert(Reg);
}
-
+
// Mark any used register (that is not using undef) and subregs as
// now live...
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -531,7 +506,7 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
KillIndices[Reg] = Count;
-
+
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
KillIndices[*Subreg] = Count;
@@ -563,7 +538,7 @@ void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
// available. This is the max of the start time of all predecessors plus
// their latencies.
SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
-
+
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
@@ -584,9 +559,9 @@ void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
-
+
Sequence.push_back(SU);
- assert(CurCycle >= SU->getDepth() &&
+ assert(CurCycle >= SU->getDepth() &&
"Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
@@ -599,7 +574,7 @@ void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
/// schedulers.
void SchedulePostRATDList::ListScheduleTopDown() {
unsigned CurCycle = 0;
-
+
// We're scheduling top-down but we're visiting the regions in
// bottom-up order, so we don't know the hazards at the start of a
// region. So assume no hazards (this should usually be ok as most
@@ -680,15 +655,6 @@ void SchedulePostRATDList::ListScheduleTopDown() {
ScheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
CycleHasInsts = true;
-
- // If we are using the target-specific hazards, then don't
- // advance the cycle time just because we schedule a node. If
- // the target allows it we can schedule multiple nodes in the
- // same cycle.
- if (!EnablePostRAHazardAvoidance) {
- if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
- ++CurCycle;
- }
} else {
if (CycleHasInsts) {
DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
diff --git a/libclamav/c++/llvm/lib/CodeGen/PreAllocSplitting.cpp b/libclamav/c++/llvm/lib/CodeGen/PreAllocSplitting.cpp
index 70e91aa..cd9d83e 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PreAllocSplitting.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/PreAllocSplitting.cpp
@@ -92,7 +92,7 @@ namespace {
public:
static char ID;
PreAllocSplitting()
- : MachineFunctionPass(&ID) {}
+ : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -203,10 +203,11 @@ namespace {
char PreAllocSplitting::ID = 0;
-static RegisterPass<PreAllocSplitting>
-X("pre-alloc-splitting", "Pre-Register Allocation Live Interval Splitting");
+INITIALIZE_PASS(PreAllocSplitting, "pre-alloc-splitting",
+ "Pre-Register Allocation Live Interval Splitting",
+ false, false);
-const PassInfo *const llvm::PreAllocSplittingID = &X;
+char &llvm::PreAllocSplittingID = PreAllocSplitting::ID;
/// findSpillPoint - Find a gap as far away from the given MI that's suitable
/// for spilling the current live interval. The index must be before any
@@ -512,9 +513,6 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
LI->addRange(LiveRange(UseIndex, EndIndex, RetVNI));
// FIXME: Need to set kills properly for inter-block stuff.
- if (RetVNI->isKill(UseIndex)) RetVNI->removeKill(UseIndex);
- if (IsIntraBlock)
- RetVNI->addKill(EndIndex);
} else if (ContainsDefs && ContainsUses) {
SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
@@ -556,12 +554,6 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
NewVNs, LiveOut, Phis, false, true);
LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
-
- if (foundUse && RetVNI->isKill(StartIndex))
- RetVNI->removeKill(StartIndex);
- if (IsIntraBlock) {
- RetVNI->addKill(EndIndex);
- }
}
// Memoize results so we don't have to recompute them.
@@ -636,9 +628,6 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
I->second->setHasPHIKill(true);
- SlotIndex KillIndex(LIs->getMBBEndIdx(I->first), true);
- if (!I->second->isKill(KillIndex))
- I->second->addKill(KillIndex);
}
}
@@ -648,8 +637,6 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
} else
EndIndex = LIs->getMBBEndIdx(MBB);
LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
- if (IsIntraBlock)
- RetVNI->addKill(EndIndex);
// Memoize results so we don't have to recompute them.
if (!IsIntraBlock)
@@ -665,7 +652,7 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
/// ReconstructLiveInterval - Recompute a live interval from scratch.
void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
- BumpPtrAllocator& Alloc = LIs->getVNInfoAllocator();
+ VNInfo::Allocator& Alloc = LIs->getVNInfoAllocator();
// Clear the old ranges and valnos;
LI->clear();
@@ -690,11 +677,9 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
VNInfo* NewVN = LI->getNextValue(DefIdx, 0, true, Alloc);
// If the def is a move, set the copy field.
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (TII->isMoveInstr(*DI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
- if (DstReg == LI->reg)
- NewVN->setCopy(&*DI);
-
+ if (DI->isCopyLike() && DI->getOperand(0).getReg() == LI->reg)
+ NewVN->setCopy(&*DI);
+
NewVNs[&*DI] = NewVN;
}
@@ -725,25 +710,6 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
VNInfo* DeadVN = NewVNs[&*DI];
LI->addRange(LiveRange(DefIdx, DefIdx.getNextSlot(), DeadVN));
- DeadVN->addKill(DefIdx);
- }
-
- // Update kill markers.
- for (LiveInterval::vni_iterator VI = LI->vni_begin(), VE = LI->vni_end();
- VI != VE; ++VI) {
- VNInfo* VNI = *VI;
- for (unsigned i = 0, e = VNI->kills.size(); i != e; ++i) {
- SlotIndex KillIdx = VNI->kills[i];
- if (KillIdx.isPHI())
- continue;
- MachineInstr *KillMI = LIs->getInstructionFromIndex(KillIdx);
- if (KillMI) {
- MachineOperand *KillMO = KillMI->findRegisterUseOperand(CurrLI->reg);
- if (KillMO)
- // It could be a dead def.
- KillMO->setIsKill();
- }
- }
}
}
@@ -773,19 +739,14 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
VNsToCopy.push_back(OldVN);
// Locate two-address redefinitions
- for (VNInfo::KillSet::iterator KI = OldVN->kills.begin(),
- KE = OldVN->kills.end(); KI != KE; ++KI) {
- assert(!KI->isPHI() &&
- "VN previously reported having no PHI kills.");
- MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
- unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
- if (DefIdx == ~0U) continue;
- if (MI->isRegTiedToUseOperand(DefIdx)) {
- VNInfo* NextVN =
- CurrLI->findDefinedVNInfoForRegInt(KI->getDefIndex());
- if (NextVN == OldVN) continue;
+ for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(CurrLI->reg),
+ DE = MRI->def_end(); DI != DE; ++DI) {
+ if (!DI->isRegTiedToUseOperand(DI.getOperandNo())) continue;
+ SlotIndex DefIdx = LIs->getInstructionIndex(&*DI).getDefIndex();
+ VNInfo* NextVN = CurrLI->findDefinedVNInfoForRegInt(DefIdx);
+ if (std::find(VNsToCopy.begin(), VNsToCopy.end(), NextVN) !=
+ VNsToCopy.end())
Stack.push_back(NextVN);
- }
}
}
@@ -836,7 +797,7 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
if (IntervalSSMap.count(CurrLI->reg))
IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
- NumRenumbers++;
+ ++NumRenumbers;
}
bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
@@ -854,7 +815,7 @@ bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
if (KillPt == DefMI->getParent()->end())
return false;
- TII->reMaterialize(MBB, RestorePt, VReg, 0, DefMI, TRI);
+ TII->reMaterialize(MBB, RestorePt, VReg, 0, DefMI, *TRI);
SlotIndex RematIdx = LIs->InsertMachineInstrInMaps(prior(RestorePt));
ReconstructLiveInterval(CurrLI);
@@ -882,7 +843,7 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
!RefsInMBB.count(FoldPt))
--FoldPt;
- int OpIdx = FoldPt->findRegisterDefOperandIdx(vreg, false);
+ int OpIdx = FoldPt->findRegisterDefOperandIdx(vreg);
if (OpIdx == -1)
return 0;
@@ -899,12 +860,11 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
SS = MFI->CreateSpillStackObject(RC->getSize(), RC->getAlignment());
}
- MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
- FoldPt, Ops, SS);
+ MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
if (FMI) {
LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
- FMI = MBB->insert(MBB->erase(FoldPt), FMI);
+ FoldPt->eraseFromParent();
++NumFolds;
IntervalSSMap[vreg] = SS;
@@ -980,12 +940,11 @@ MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg,
if (!TII->canFoldMemoryOperand(FoldPt, Ops))
return 0;
- MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
- FoldPt, Ops, SS);
+ MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
if (FMI) {
LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
- FMI = MBB->insert(MBB->erase(FoldPt), FMI);
+ FoldPt->eraseFromParent();
++NumRestoreFolds;
}
@@ -1061,7 +1020,8 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
// Add spill.
SS = CreateSpillStackSlot(CurrLI->reg, RC);
- TII->storeRegToStackSlot(*BarrierMBB, SpillPt, CurrLI->reg, true, SS, RC);
+ TII->storeRegToStackSlot(*BarrierMBB, SpillPt, CurrLI->reg, true, SS, RC,
+ TRI);
SpillMI = prior(SpillPt);
SpillIndex = LIs->InsertMachineInstrInMaps(SpillMI);
}
@@ -1097,7 +1057,8 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
}
// Add spill.
SS = CreateSpillStackSlot(CurrLI->reg, RC);
- TII->storeRegToStackSlot(*DefMBB, SpillPt, CurrLI->reg, false, SS, RC);
+ TII->storeRegToStackSlot(*DefMBB, SpillPt, CurrLI->reg, false, SS, RC,
+ TRI);
SpillMI = prior(SpillPt);
SpillIndex = LIs->InsertMachineInstrInMaps(SpillMI);
}
@@ -1116,7 +1077,7 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
RestoreIndex = LIs->getInstructionIndex(RestorePt);
FoldedRestore = true;
} else {
- TII->loadRegFromStackSlot(*BarrierMBB, RestorePt, CurrLI->reg, SS, RC);
+ TII->loadRegFromStackSlot(*BarrierMBB, RestorePt, CurrLI->reg, SS, RC, TRI);
MachineInstr *LoadMI = prior(RestorePt);
RestoreIndex = LIs->InsertMachineInstrInMaps(LoadMI);
}
@@ -1152,7 +1113,7 @@ PreAllocSplitting::SplitRegLiveIntervals(const TargetRegisterClass **RCs,
// codegen is not modelling. Ignore these barriers for now.
if (!TII->isSafeToMoveRegClassDefs(*RC))
continue;
- std::vector<unsigned> &VRs = MRI->getRegClassVirtRegs(*RC);
+ const std::vector<unsigned> &VRs = MRI->getRegClassVirtRegs(*RC);
for (unsigned i = 0, e = VRs.size(); i != e; ++i) {
unsigned Reg = VRs[i];
if (!LIs->hasInterval(Reg))
@@ -1190,7 +1151,7 @@ unsigned PreAllocSplitting::getNumberOfNonSpills(
int StoreFrameIndex;
unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
- NonSpills++;
+ ++NonSpills;
int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
@@ -1253,7 +1214,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
(*LI)->removeValNo(CurrVN);
DefMI->eraseFromParent();
VNUseCount.erase(CurrVN);
- NumDeadSpills++;
+ ++NumDeadSpills;
changed = true;
continue;
}
@@ -1289,9 +1250,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
Ops.push_back(OpIdx);
if (!TII->canFoldMemoryOperand(use, Ops)) continue;
- MachineInstr* NewMI =
- TII->foldMemoryOperand(*use->getParent()->getParent(),
- use, Ops, FrameIndex);
+ MachineInstr* NewMI = TII->foldMemoryOperand(use, Ops, FrameIndex);
if (!NewMI) continue;
@@ -1301,10 +1260,9 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
(*LI)->removeValNo(CurrVN);
DefMI->eraseFromParent();
- MachineBasicBlock* MBB = use->getParent();
- NewMI = MBB->insert(MBB->erase(use), NewMI);
+ use->eraseFromParent();
VNUseCount[CurrVN].erase(use);
-
+
// Remove deleted instructions. Note that we need to remove them from
// the VNInfo->use map as well, just to be safe.
for (SmallPtrSet<MachineInstr*, 4>::iterator II =
@@ -1326,7 +1284,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
if (VI->second.erase(use))
VI->second.insert(NewMI);
- NumDeadSpills++;
+ ++NumDeadSpills;
changed = true;
continue;
}
@@ -1348,7 +1306,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
LIs->RemoveMachineInstrFromMaps(DefMI);
(*LI)->removeValNo(CurrVN);
DefMI->eraseFromParent();
- NumDeadSpills++;
+ ++NumDeadSpills;
changed = true;
}
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/ProcessImplicitDefs.cpp b/libclamav/c++/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
index d7179b3..b8831db 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -26,8 +26,8 @@
using namespace llvm;
char ProcessImplicitDefs::ID = 0;
-static RegisterPass<ProcessImplicitDefs> X("processimpdefs",
- "Process Implicit Definitions.");
+INITIALIZE_PASS(ProcessImplicitDefs, "processimpdefs",
+ "Process Implicit Definitions.", false, false);
void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -41,18 +41,34 @@ void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
-bool ProcessImplicitDefs::CanTurnIntoImplicitDef(MachineInstr *MI,
- unsigned Reg, unsigned OpIdx,
- const TargetInstrInfo *tii_) {
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
- Reg == SrcReg)
- return true;
+bool
+ProcessImplicitDefs::CanTurnIntoImplicitDef(MachineInstr *MI,
+ unsigned Reg, unsigned OpIdx,
+ const TargetInstrInfo *tii_,
+ SmallSet<unsigned, 8> &ImpDefRegs) {
+ switch(OpIdx) {
+ case 1:
+ return MI->isCopy() && (MI->getOperand(0).getSubReg() == 0 ||
+ ImpDefRegs.count(MI->getOperand(0).getReg()));
+ case 2:
+ return MI->isSubregToReg() && (MI->getOperand(0).getSubReg() == 0 ||
+ ImpDefRegs.count(MI->getOperand(0).getReg()));
+ default: return false;
+ }
+}
- if (OpIdx == 2 && MI->isSubregToReg())
- return true;
- if (OpIdx == 1 && MI->isExtractSubreg())
- return true;
+static bool isUndefCopy(MachineInstr *MI, unsigned Reg,
+ const TargetInstrInfo *tii_,
+ SmallSet<unsigned, 8> &ImpDefRegs) {
+ if (MI->isCopy()) {
+ MachineOperand &MO0 = MI->getOperand(0);
+ MachineOperand &MO1 = MI->getOperand(1);
+ if (MO1.getReg() != Reg)
+ return false;
+ if (!MO0.getSubReg() || ImpDefRegs.count(MO0.getReg()))
+ return true;
+ return false;
+ }
return false;
}
@@ -89,6 +105,8 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
MachineInstr *MI = &*I;
++I;
if (MI->isImplicitDef()) {
+ if (MI->getOperand(0).getSubReg())
+ continue;
unsigned Reg = MI->getOperand(0).getReg();
ImpDefRegs.insert(Reg);
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
@@ -99,11 +117,10 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
continue;
}
- if (MI->isInsertSubreg()) {
- MachineOperand &MO = MI->getOperand(2);
- if (ImpDefRegs.count(MO.getReg())) {
- // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
- // This is an identity copy, eliminate it now.
+ // Eliminate %reg1032:sub<def> = COPY undef.
+ if (MI->isCopy() && MI->getOperand(0).getSubReg()) {
+ MachineOperand &MO = MI->getOperand(1);
+ if (MO.isUndef() || ImpDefRegs.count(MO.getReg())) {
if (MO.isKill()) {
LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
vi.removeKill(MI);
@@ -117,7 +134,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
bool ChangedToImpDef = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand& MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isUse() || MO.isUndef())
+ if (!MO.isReg() || (MO.isDef() && !MO.getSubReg()) || MO.isUndef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
@@ -125,7 +142,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
if (!ImpDefRegs.count(Reg))
continue;
// Use is a copy, just turn it into an implicit_def.
- if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
+ if (CanTurnIntoImplicitDef(MI, Reg, i, tii_, ImpDefRegs)) {
bool isKill = MO.isKill();
MI->setDesc(tii_->get(TargetOpcode::IMPLICIT_DEF));
for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
@@ -142,6 +159,12 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
Changed = true;
MO.setIsUndef();
+ // This is a partial register redef of an implicit def.
+ // Make sure the whole register is defined by the instruction.
+ if (MO.isDef()) {
+ MI->addRegisterDefined(Reg);
+ continue;
+ }
if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
// Make sure other uses of
for (unsigned j = i+1; j != e; ++j) {
@@ -216,9 +239,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
MachineInstr *RMI = RUses[i];
// Turn a copy use into an implicit_def.
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
- Reg == SrcReg) {
+ if (isUndefCopy(RMI, Reg, tii_, ImpDefRegs)) {
RMI->setDesc(tii_->get(TargetOpcode::IMPLICIT_DEF));
bool isKill = false;
diff --git a/libclamav/c++/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/libclamav/c++/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index 138e711..e2802c1 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -19,12 +19,12 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "pei"
#include "PrologEpilogInserter.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/Target/TargetMachine.h"
@@ -33,7 +33,10 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include <climits>
@@ -41,8 +44,11 @@ using namespace llvm;
char PEI::ID = 0;
-static RegisterPass<PEI>
-X("prologepilog", "Prologue/Epilogue Insertion");
+INITIALIZE_PASS(PEI, "prologepilog",
+ "Prologue/Epilogue Insertion", false, false);
+
+STATISTIC(NumVirtualFrameRegs, "Number of virtual frame regs encountered");
+STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
/// createPrologEpilogCodeInserter - This function returns a pass that inserts
/// prolog and epilog code, and eliminates abstract frame references.
@@ -58,13 +64,9 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
RS = TRI->requiresRegisterScavenging(Fn) ? new RegScavenger() : NULL;
FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(Fn);
- // Get MachineModuleInfo so that we can track the construction of the
- // frame.
- if (MachineModuleInfo *MMI = getAnalysisIfAvailable<MachineModuleInfo>())
- Fn.getFrameInfo()->setMachineModuleInfo(MMI);
-
- // Calculate the MaxCallFrameSize and HasCalls variables for the function's
- // frame information. Also eliminates call frame pseudo instructions.
+ // Calculate the MaxCallFrameSize and AdjustsStack variables for the
+ // function's frame information. Also eliminates call frame pseudo
+ // instructions.
calculateCallsInformation(Fn);
// Allow the target machine to make some adjustments to the function
@@ -76,10 +78,10 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
calculateCalleeSavedRegisters(Fn);
// Determine placement of CSR spill/restore code:
- // - with shrink wrapping, place spills and restores to tightly
+ // - With shrink wrapping, place spills and restores to tightly
// enclose regions in the Machine CFG of the function where
- // they are used. Without shrink wrapping
- // - default (no shrink wrapping), place all spills in the
+ // they are used.
+ // - Without shink wrapping (default), place all spills in the
// entry block, all restores in return blocks.
placeCSRSpillsAndRestores(Fn);
@@ -96,8 +98,8 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
// Add prolog and epilog code to the function. This function is required
// to align the stack frame as necessary for any stack variables or
- // called functions. Because of this, calculateCalleeSavedRegisters
- // must be called before this function in order to set the HasCalls
+ // called functions. Because of this, calculateCalleeSavedRegisters()
+ // must be called before this function in order to set the AdjustsStack
// and MaxCallFrameSize variables.
if (!F->hasFnAttr(Attribute::Naked))
insertPrologEpilogCode(Fn);
@@ -131,15 +133,15 @@ void PEI::getAnalysisUsage(AnalysisUsage &AU) const {
}
#endif
-/// calculateCallsInformation - Calculate the MaxCallFrameSize and HasCalls
+/// calculateCallsInformation - Calculate the MaxCallFrameSize and AdjustsStack
/// variables for the function's frame information and eliminate call frame
/// pseudo instructions.
void PEI::calculateCallsInformation(MachineFunction &Fn) {
const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
- MachineFrameInfo *FFI = Fn.getFrameInfo();
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
unsigned MaxCallFrameSize = 0;
- bool HasCalls = FFI->hasCalls();
+ bool AdjustsStack = MFI->adjustsStack();
// Get the function call frame set-up and tear-down instruction opcode
int FrameSetupOpcode = RegInfo->getCallFrameSetupOpcode();
@@ -159,16 +161,16 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) {
" instructions should have a single immediate argument!");
unsigned Size = I->getOperand(0).getImm();
if (Size > MaxCallFrameSize) MaxCallFrameSize = Size;
- HasCalls = true;
+ AdjustsStack = true;
FrameSDOps.push_back(I);
} else if (I->isInlineAsm()) {
- // An InlineAsm might be a call; assume it is to get the stack frame
- // aligned correctly for calls.
- HasCalls = true;
+ // Some inline asm's need a stack frame, as indicated by operand 1.
+ if (I->getOperand(1).getImm())
+ AdjustsStack = true;
}
- FFI->setHasCalls(HasCalls);
- FFI->setMaxCallFrameSize(MaxCallFrameSize);
+ MFI->setAdjustsStack(AdjustsStack);
+ MFI->setMaxCallFrameSize(MaxCallFrameSize);
for (std::vector<MachineBasicBlock::iterator>::iterator
i = FrameSDOps.begin(), e = FrameSDOps.end(); i != e; ++i) {
@@ -189,7 +191,7 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) {
void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
const TargetFrameInfo *TFI = Fn.getTarget().getFrameInfo();
- MachineFrameInfo *FFI = Fn.getFrameInfo();
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
// Get the callee saved register list...
const unsigned *CSRegs = RegInfo->getCalleeSavedRegs(&Fn);
@@ -202,22 +204,21 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
if (CSRegs == 0 || CSRegs[0] == 0)
return;
- // Figure out which *callee saved* registers are modified by the current
- // function, thus needing to be saved and restored in the prolog/epilog.
- const TargetRegisterClass * const *CSRegClasses =
- RegInfo->getCalleeSavedRegClasses(&Fn);
+ // In Naked functions we aren't going to save any registers.
+ if (Fn.getFunction()->hasFnAttr(Attribute::Naked))
+ return;
std::vector<CalleeSavedInfo> CSI;
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
if (Fn.getRegInfo().isPhysRegUsed(Reg)) {
// If the reg is modified, save it!
- CSI.push_back(CalleeSavedInfo(Reg, CSRegClasses[i]));
+ CSI.push_back(CalleeSavedInfo(Reg));
} else {
for (const unsigned *AliasSet = RegInfo->getAliasSet(Reg);
*AliasSet; ++AliasSet) { // Check alias registers too.
if (Fn.getRegInfo().isPhysRegUsed(*AliasSet)) {
- CSI.push_back(CalleeSavedInfo(Reg, CSRegClasses[i]));
+ CSI.push_back(CalleeSavedInfo(Reg));
break;
}
}
@@ -236,7 +237,7 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
for (std::vector<CalleeSavedInfo>::iterator
I = CSI.begin(), E = CSI.end(); I != E; ++I) {
unsigned Reg = I->getReg();
- const TargetRegisterClass *RC = I->getRegClass();
+ const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
int FrameIdx;
if (RegInfo->hasReservedSpillSlot(Fn, Reg, FrameIdx)) {
@@ -260,19 +261,18 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
// the TargetRegisterClass if the stack alignment is smaller. Use the
// min.
Align = std::min(Align, StackAlign);
- FrameIdx = FFI->CreateStackObject(RC->getSize(), Align, true);
+ FrameIdx = MFI->CreateStackObject(RC->getSize(), Align, true);
if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
} else {
// Spill it to the stack where we must.
- FrameIdx = FFI->CreateFixedObject(RC->getSize(), FixedSlot->Offset,
- true, false);
+ FrameIdx = MFI->CreateFixedObject(RC->getSize(), FixedSlot->Offset, true);
}
I->setFrameIdx(FrameIdx);
}
- FFI->setCalleeSavedInfo(CSI);
+ MFI->setCalleeSavedInfo(CSI);
}
/// insertCSRSpillsAndRestores - Insert spill and restore code for
@@ -280,30 +280,33 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
///
void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Get callee saved register information.
- MachineFrameInfo *FFI = Fn.getFrameInfo();
- const std::vector<CalleeSavedInfo> &CSI = FFI->getCalleeSavedInfo();
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- FFI->setCalleeSavedInfoValid(true);
+ MFI->setCalleeSavedInfoValid(true);
// Early exit if no callee saved registers are modified!
if (CSI.empty())
return;
const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
+ const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
MachineBasicBlock::iterator I;
if (! ShrinkWrapThisFunction) {
// Spill using target interface.
I = EntryBlock->begin();
- if (!TII.spillCalleeSavedRegisters(*EntryBlock, I, CSI)) {
+ if (!TII.spillCalleeSavedRegisters(*EntryBlock, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
// Add the callee-saved register as live-in.
// It's killed at the spill.
EntryBlock->addLiveIn(CSI[i].getReg());
// Insert the spill to the stack frame.
- TII.storeRegToStackSlot(*EntryBlock, I, CSI[i].getReg(), true,
- CSI[i].getFrameIdx(), CSI[i].getRegClass());
+ unsigned Reg = CSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(*EntryBlock, I, Reg, true,
+ CSI[i].getFrameIdx(), RC, TRI);
}
}
@@ -325,11 +328,13 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Restore all registers immediately before the return and any
// terminators that preceed it.
- if (!TII.restoreCalleeSavedRegisters(*MBB, I, CSI)) {
+ if (!TII.restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- TII.loadRegFromStackSlot(*MBB, I, CSI[i].getReg(),
+ unsigned Reg = CSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(*MBB, I, Reg,
CSI[i].getFrameIdx(),
- CSI[i].getRegClass());
+ RC, TRI);
assert(I != MBB->begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert
@@ -373,10 +378,12 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
MBB->addLiveIn(blockCSI[i].getReg());
// Insert the spill to the stack frame.
- TII.storeRegToStackSlot(*MBB, I, blockCSI[i].getReg(),
+ unsigned Reg = blockCSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(*MBB, I, Reg,
true,
blockCSI[i].getFrameIdx(),
- blockCSI[i].getRegClass());
+ RC, TRI);
}
}
@@ -422,9 +429,11 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Restore all registers immediately before the return and any
// terminators that preceed it.
for (unsigned i = 0, e = blockCSI.size(); i != e; ++i) {
- TII.loadRegFromStackSlot(*MBB, I, blockCSI[i].getReg(),
+ unsigned Reg = blockCSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(*MBB, I, Reg,
blockCSI[i].getFrameIdx(),
- blockCSI[i].getRegClass());
+ RC, TRI);
assert(I != MBB->begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert
@@ -441,14 +450,14 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
/// AdjustStackOffset - Helper function used to adjust the stack frame offset.
static inline void
-AdjustStackOffset(MachineFrameInfo *FFI, int FrameIdx,
+AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx,
bool StackGrowsDown, int64_t &Offset,
unsigned &MaxAlign) {
// If the stack grows down, add the object size to find the lowest address.
if (StackGrowsDown)
- Offset += FFI->getObjectSize(FrameIdx);
+ Offset += MFI->getObjectSize(FrameIdx);
- unsigned Align = FFI->getObjectAlignment(FrameIdx);
+ unsigned Align = MFI->getObjectAlignment(FrameIdx);
// If the alignment of this object is greater than that of the stack, then
// increase the stack alignment to match.
@@ -458,10 +467,12 @@ AdjustStackOffset(MachineFrameInfo *FFI, int FrameIdx,
Offset = (Offset + Align - 1) / Align * Align;
if (StackGrowsDown) {
- FFI->setObjectOffset(FrameIdx, -Offset); // Set the computed offset
+ DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset << "]\n");
+ MFI->setObjectOffset(FrameIdx, -Offset); // Set the computed offset
} else {
- FFI->setObjectOffset(FrameIdx, Offset);
- Offset += FFI->getObjectSize(FrameIdx);
+ DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << Offset << "]\n");
+ MFI->setObjectOffset(FrameIdx, Offset);
+ Offset += MFI->getObjectSize(FrameIdx);
}
}
@@ -475,7 +486,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
TFI.getStackGrowthDirection() == TargetFrameInfo::StackGrowsDown;
// Loop over all of the stack objects, assigning sequential addresses...
- MachineFrameInfo *FFI = Fn.getFrameInfo();
+ MachineFrameInfo *MFI = Fn.getFrameInfo();
// Start at the beginning of the local area.
// The Offset is the distance from the stack top in the direction
@@ -492,17 +503,17 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// We currently don't support filling in holes in between fixed sized
// objects, so we adjust 'Offset' to point to the end of last fixed sized
// preallocated object.
- for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
+ for (int i = MFI->getObjectIndexBegin(); i != 0; ++i) {
int64_t FixedOff;
if (StackGrowsDown) {
// The maximum distance from the stack pointer is at lower address of
// the object -- which is given by offset. For down growing stack
// the offset is negative, so we negate the offset to get the distance.
- FixedOff = -FFI->getObjectOffset(i);
+ FixedOff = -MFI->getObjectOffset(i);
} else {
// The maximum distance from the start pointer is at the upper
// address of the object.
- FixedOff = FFI->getObjectOffset(i) + FFI->getObjectSize(i);
+ FixedOff = MFI->getObjectOffset(i) + MFI->getObjectSize(i);
}
if (FixedOff > Offset) Offset = FixedOff;
}
@@ -511,29 +522,29 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// callee saved registers.
if (StackGrowsDown) {
for (unsigned i = MinCSFrameIndex; i <= MaxCSFrameIndex; ++i) {
- // If stack grows down, we need to add size of find the lowest
+ // If the stack grows down, we need to add the size to find the lowest
// address of the object.
- Offset += FFI->getObjectSize(i);
+ Offset += MFI->getObjectSize(i);
- unsigned Align = FFI->getObjectAlignment(i);
+ unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
Offset = (Offset+Align-1)/Align*Align;
- FFI->setObjectOffset(i, -Offset); // Set the computed offset
+ MFI->setObjectOffset(i, -Offset); // Set the computed offset
}
} else {
int MaxCSFI = MaxCSFrameIndex, MinCSFI = MinCSFrameIndex;
for (int i = MaxCSFI; i >= MinCSFI ; --i) {
- unsigned Align = FFI->getObjectAlignment(i);
+ unsigned Align = MFI->getObjectAlignment(i);
// Adjust to alignment boundary
Offset = (Offset+Align-1)/Align*Align;
- FFI->setObjectOffset(i, Offset);
- Offset += FFI->getObjectSize(i);
+ MFI->setObjectOffset(i, Offset);
+ Offset += MFI->getObjectSize(i);
}
}
- unsigned MaxAlign = FFI->getMaxAlignment();
+ unsigned MaxAlign = MFI->getMaxAlignment();
// Make sure the special register scavenging spill slot is closest to the
// frame pointer if a frame pointer is required.
@@ -541,28 +552,81 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
if (RS && RegInfo->hasFP(Fn) && !RegInfo->needsStackRealignment(Fn)) {
int SFI = RS->getScavengingFrameIndex();
if (SFI >= 0)
- AdjustStackOffset(FFI, SFI, StackGrowsDown, Offset, MaxAlign);
+ AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
+ }
+
+ // FIXME: Once this is working, then enable flag will change to a target
+ // check for whether the frame is large enough to want to use virtual
+ // frame index registers. Functions which don't want/need this optimization
+ // will continue to use the existing code path.
+ if (MFI->getUseLocalStackAllocationBlock()) {
+ unsigned Align = MFI->getLocalFrameMaxAlign();
+
+ // Adjust to alignment boundary.
+ Offset = (Offset + Align - 1) / Align * Align;
+
+ DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
+
+ // Resolve offsets for objects in the local block.
+ for (unsigned i = 0, e = MFI->getLocalFrameObjectCount(); i != e; ++i) {
+ std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i);
+ int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
+ DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
+ FIOffset << "]\n");
+ MFI->setObjectOffset(Entry.first, FIOffset);
+ }
+ // Allocate the local block
+ Offset += MFI->getLocalFrameSize();
+
+ MaxAlign = std::max(Align, MaxAlign);
}
// Make sure that the stack protector comes before the local variables on the
// stack.
- if (FFI->getStackProtectorIndex() >= 0)
- AdjustStackOffset(FFI, FFI->getStackProtectorIndex(), StackGrowsDown,
+ SmallSet<int, 16> LargeStackObjs;
+ if (MFI->getStackProtectorIndex() >= 0) {
+ AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
Offset, MaxAlign);
+ // Assign large stack objects first.
+ for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+ if (MFI->isObjectPreAllocated(i) &&
+ MFI->getUseLocalStackAllocationBlock())
+ continue;
+ if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
+ continue;
+ if (RS && (int)i == RS->getScavengingFrameIndex())
+ continue;
+ if (MFI->isDeadObjectIndex(i))
+ continue;
+ if (MFI->getStackProtectorIndex() == (int)i)
+ continue;
+ if (!MFI->MayNeedStackProtector(i))
+ continue;
+
+ AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
+ LargeStackObjs.insert(i);
+ }
+ }
+
// Then assign frame offsets to stack objects that are not used to spill
// callee saved registers.
- for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
+ for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
+ if (MFI->isObjectPreAllocated(i) &&
+ MFI->getUseLocalStackAllocationBlock())
+ continue;
if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
continue;
if (RS && (int)i == RS->getScavengingFrameIndex())
continue;
- if (FFI->isDeadObjectIndex(i))
+ if (MFI->isDeadObjectIndex(i))
+ continue;
+ if (MFI->getStackProtectorIndex() == (int)i)
continue;
- if (FFI->getStackProtectorIndex() == (int)i)
+ if (LargeStackObjs.count(i))
continue;
- AdjustStackOffset(FFI, i, StackGrowsDown, Offset, MaxAlign);
+ AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
}
// Make sure the special register scavenging spill slot is closest to the
@@ -570,15 +634,15 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
if (RS && (!RegInfo->hasFP(Fn) || RegInfo->needsStackRealignment(Fn))) {
int SFI = RS->getScavengingFrameIndex();
if (SFI >= 0)
- AdjustStackOffset(FFI, SFI, StackGrowsDown, Offset, MaxAlign);
+ AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
}
if (!RegInfo->targetHandlesStackFrameRounding()) {
// If we have reserved argument space for call sites in the function
// immediately on entry to the current function, count it as part of the
// overall stack size.
- if (FFI->hasCalls() && RegInfo->hasReservedCallFrame(Fn))
- Offset += FFI->getMaxCallFrameSize();
+ if (MFI->adjustsStack() && RegInfo->hasReservedCallFrame(Fn))
+ Offset += MFI->getMaxCallFrameSize();
// Round up the size to a multiple of the alignment. If the function has
// any calls or alloca's, align to the target's StackAlignment value to
@@ -586,23 +650,23 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
// otherwise, for leaf functions, align to the TransientStackAlignment
// value.
unsigned StackAlign;
- if (FFI->hasCalls() || FFI->hasVarSizedObjects() ||
- (RegInfo->needsStackRealignment(Fn) && FFI->getObjectIndexEnd() != 0))
+ if (MFI->adjustsStack() || MFI->hasVarSizedObjects() ||
+ (RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0))
StackAlign = TFI.getStackAlignment();
else
StackAlign = TFI.getTransientStackAlignment();
- // If the frame pointer is eliminated, all frame offsets will be relative
- // to SP not FP; align to MaxAlign so this works.
+
+ // If the frame pointer is eliminated, all frame offsets will be relative to
+ // SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
unsigned AlignMask = StackAlign - 1;
Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
}
// Update frame info to pretend that this is part of the stack...
- FFI->setStackSize(Offset - LocalAreaOffset);
+ MFI->setStackSize(Offset - LocalAreaOffset);
}
-
/// insertPrologEpilogCode - Scan the function for modified callee saved
/// registers, insert spill code for these callee saved registers, then add
/// prolog and epilog code to the function.
@@ -621,7 +685,6 @@ void PEI::insertPrologEpilogCode(MachineFunction &Fn) {
}
}
-
/// replaceFrameIndices - Replace all MO_FrameIndex operands with physical
/// register references and actual offsets.
///
@@ -639,6 +702,9 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
for (MachineFunction::iterator BB = Fn.begin(),
E = Fn.end(); BB != E; ++BB) {
+#ifndef NDEBUG
+ int SPAdjCount = 0; // frame setup / destroy count.
+#endif
int SPAdj = 0; // SP offset due to call frame setup / destroy.
if (RS && !FrameIndexVirtualScavenging) RS->enterBasicBlock(BB);
@@ -646,6 +712,10 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
if (I->getOpcode() == FrameSetupOpcode ||
I->getOpcode() == FrameDestroyOpcode) {
+#ifndef NDEBUG
+ // Track whether we see even pairs of them
+ SPAdjCount += I->getOpcode() == FrameSetupOpcode ? 1 : -1;
+#endif
// Remember how much SP has been adjusted to create the call
// frame.
int Size = I->getOperand(0).getImm();
@@ -685,16 +755,8 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
// If this instruction has a FrameIndex operand, we need to
// use that target machine register info object to eliminate
// it.
- int Value;
- unsigned VReg =
- TRI.eliminateFrameIndex(MI, SPAdj, &Value,
+ TRI.eliminateFrameIndex(MI, SPAdj,
FrameIndexVirtualScavenging ? NULL : RS);
- if (VReg) {
- assert (FrameIndexVirtualScavenging &&
- "Not scavenging, but virtual returned from "
- "eliminateFrameIndex()!");
- FrameConstantRegMap[VReg] = FrameConstantEntry(Value, SPAdj);
- }
// Reset the iterator if we were at the beginning of the BB.
if (AtBeginning) {
@@ -712,42 +774,16 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
if (RS && !FrameIndexVirtualScavenging && MI) RS->forward(MI);
}
- assert(SPAdj == 0 && "Unbalanced call frame setup / destroy pairs?");
+ // If we have evenly matched pairs of frame setup / destroy instructions,
+ // make sure the adjustments come out to zero. If we don't have matched
+ // pairs, we can't be sure the missing bit isn't in another basic block
+ // due to a custom inserter playing tricks, so just asserting SPAdj==0
+ // isn't sufficient. See tMOVCC on Thumb1, for example.
+ assert((SPAdjCount || SPAdj == 0) &&
+ "Unbalanced call frame setup / destroy pairs?");
}
}
-/// findLastUseReg - find the killing use of the specified register within
-/// the instruciton range. Return the operand number of the kill in Operand.
-static MachineBasicBlock::iterator
-findLastUseReg(MachineBasicBlock::iterator I, MachineBasicBlock::iterator ME,
- unsigned Reg) {
- // Scan forward to find the last use of this virtual register
- for (++I; I != ME; ++I) {
- MachineInstr *MI = I;
- bool isDefInsn = false;
- bool isKillInsn = false;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i)
- if (MI->getOperand(i).isReg()) {
- unsigned OpReg = MI->getOperand(i).getReg();
- if (OpReg == 0 || !TargetRegisterInfo::isVirtualRegister(OpReg))
- continue;
- assert (OpReg == Reg
- && "overlapping use of scavenged index register!");
- // If this is the killing use, we have a candidate.
- if (MI->getOperand(i).isKill())
- isKillInsn = true;
- else if (MI->getOperand(i).isDef())
- isDefInsn = true;
- }
- if (isKillInsn && !isDefInsn)
- return I;
- }
- // If we hit the end of the basic block, there was no kill of
- // the virtual register, which is wrong.
- assert (0 && "scavenged index register never killed!");
- return ME;
-}
-
/// scavengeFrameVirtualRegs - Replace all frame index virtual registers
/// with physical registers. Use the register scavenger to find an
/// appropriate register to use.
@@ -757,27 +793,14 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
E = Fn.end(); BB != E; ++BB) {
RS->enterBasicBlock(BB);
- // FIXME: The logic flow in this function is still too convoluted.
- // It needs a cleanup refactoring. Do that in preparation for tracking
- // more than one scratch register value and using ranges to find
- // available scratch registers.
- unsigned CurrentVirtReg = 0;
- unsigned CurrentScratchReg = 0;
- bool havePrevValue = false;
- int PrevValue = 0;
- MachineInstr *PrevLastUseMI = NULL;
- unsigned PrevLastUseOp = 0;
- bool trackingCurrentValue = false;
+ unsigned VirtReg = 0;
+ unsigned ScratchReg = 0;
int SPAdj = 0;
- int Value = 0;
// The instruction stream may change in the loop, so check BB->end()
// directly.
for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ) {
MachineInstr *MI = I;
- bool isDefInsn = false;
- bool isKillInsn = false;
- bool clobbersScratchReg = false;
bool DoIncr = true;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
if (MI->getOperand(i).isReg()) {
@@ -785,122 +808,30 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
unsigned Reg = MO.getReg();
if (Reg == 0)
continue;
- if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
- // If we have a previous scratch reg, check and see if anything
- // here kills whatever value is in there.
- if (Reg == CurrentScratchReg) {
- if (MO.isUse()) {
- // Two-address operands implicitly kill
- if (MO.isKill() || MI->isRegTiedToDefOperand(i))
- clobbersScratchReg = true;
- } else {
- assert (MO.isDef());
- clobbersScratchReg = true;
- }
- }
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
- }
- // If this is a def, remember that this insn defines the value.
- // This lets us properly consider insns which re-use the scratch
- // register, such as r2 = sub r2, #imm, in the middle of the
- // scratch range.
- if (MO.isDef())
- isDefInsn = true;
+
+ ++NumVirtualFrameRegs;
// Have we already allocated a scratch register for this virtual?
- if (Reg != CurrentVirtReg) {
+ if (Reg != VirtReg) {
// When we first encounter a new virtual register, it
// must be a definition.
assert(MI->getOperand(i).isDef() &&
"frame index virtual missing def!");
- // We can't have nested virtual register live ranges because
- // there's only a guarantee of one scavenged register at a time.
- assert (CurrentVirtReg == 0 &&
- "overlapping frame index virtual registers!");
-
- // If the target gave us information about what's in the register,
- // we can use that to re-use scratch regs.
- DenseMap<unsigned, FrameConstantEntry>::iterator Entry =
- FrameConstantRegMap.find(Reg);
- trackingCurrentValue = Entry != FrameConstantRegMap.end();
- if (trackingCurrentValue) {
- SPAdj = (*Entry).second.second;
- Value = (*Entry).second.first;
- } else
- SPAdj = Value = 0;
-
- // If the scratch register from the last allocation is still
- // available, see if the value matches. If it does, just re-use it.
- if (trackingCurrentValue && havePrevValue && PrevValue == Value) {
- // FIXME: This assumes that the instructions in the live range
- // for the virtual register are exclusively for the purpose
- // of populating the value in the register. That's reasonable
- // for these frame index registers, but it's still a very, very
- // strong assumption. rdar://7322732. Better would be to
- // explicitly check each instruction in the range for references
- // to the virtual register. Only delete those insns that
- // touch the virtual register.
-
- // Find the last use of the new virtual register. Remove all
- // instruction between here and there, and update the current
- // instruction to reference the last use insn instead.
- MachineBasicBlock::iterator LastUseMI =
- findLastUseReg(I, BB->end(), Reg);
-
- // Remove all instructions up 'til the last use, since they're
- // just calculating the value we already have.
- BB->erase(I, LastUseMI);
- I = LastUseMI;
-
- // Extend the live range of the scratch register
- PrevLastUseMI->getOperand(PrevLastUseOp).setIsKill(false);
- RS->setUsed(CurrentScratchReg);
- CurrentVirtReg = Reg;
-
- // We deleted the instruction we were scanning the operands of.
- // Jump back to the instruction iterator loop. Don't increment
- // past this instruction since we updated the iterator already.
- DoIncr = false;
- break;
- }
-
// Scavenge a new scratch register
- CurrentVirtReg = Reg;
+ VirtReg = Reg;
const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg);
- CurrentScratchReg = RS->FindUnusedReg(RC);
- if (CurrentScratchReg == 0)
- // No register is "free". Scavenge a register.
- CurrentScratchReg = RS->scavengeRegister(RC, I, SPAdj);
-
- PrevValue = Value;
+ ScratchReg = RS->scavengeRegister(RC, I, SPAdj);
+ ++NumScavengedRegs;
}
// replace this reference to the virtual register with the
// scratch register.
- assert (CurrentScratchReg && "Missing scratch register!");
- MI->getOperand(i).setReg(CurrentScratchReg);
+ assert (ScratchReg && "Missing scratch register!");
+ MI->getOperand(i).setReg(ScratchReg);
- if (MI->getOperand(i).isKill()) {
- isKillInsn = true;
- PrevLastUseOp = i;
- PrevLastUseMI = MI;
- }
}
}
- // If this is the last use of the scratch, stop tracking it. The
- // last use will be a kill operand in an instruction that does
- // not also define the scratch register.
- if (isKillInsn && !isDefInsn) {
- CurrentVirtReg = 0;
- havePrevValue = trackingCurrentValue;
- }
- // Similarly, notice if instruction clobbered the value in the
- // register we're tracking for possible later reuse. This is noted
- // above, but enforced here since the value is still live while we
- // process the rest of the operands of the instruction.
- if (clobbersScratchReg) {
- havePrevValue = false;
- CurrentScratchReg = 0;
- }
if (DoIncr) {
RS->forward(I);
++I;
diff --git a/libclamav/c++/llvm/lib/CodeGen/PrologEpilogInserter.h b/libclamav/c++/llvm/lib/CodeGen/PrologEpilogInserter.h
index 931f1eb..d575124 100644
--- a/libclamav/c++/llvm/lib/CodeGen/PrologEpilogInserter.h
+++ b/libclamav/c++/llvm/lib/CodeGen/PrologEpilogInserter.h
@@ -36,7 +36,7 @@ namespace llvm {
class PEI : public MachineFunctionPass {
public:
static char ID;
- PEI() : MachineFunctionPass(&ID) {}
+ PEI() : MachineFunctionPass(ID) {}
const char *getPassName() const {
return "Prolog/Epilog Insertion & Frame Finalization";
@@ -99,12 +99,6 @@ namespace llvm {
// TRI->requiresFrameIndexScavenging() for the curren function.
bool FrameIndexVirtualScavenging;
- // When using the scavenger post-pass to resolve frame reference
- // materialization registers, maintain a map of the registers to
- // the constant value and SP adjustment associated with it.
- typedef std::pair<int, int> FrameConstantEntry;
- DenseMap<unsigned, FrameConstantEntry> FrameConstantRegMap;
-
#ifndef NDEBUG
// Machine function handle.
MachineFunction* MF;
diff --git a/libclamav/c++/llvm/lib/CodeGen/RegAllocFast.cpp b/libclamav/c++/llvm/lib/CodeGen/RegAllocFast.cpp
new file mode 100644
index 0000000..fc150d5
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -0,0 +1,1048 @@
+//===-- RegAllocFast.cpp - A fast register allocator for debug code -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This register allocator allocates registers to a basic block at a time,
+// attempting to keep values in registers and reusing registers as appropriate.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "regalloc"
+#include "llvm/BasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
+#include <algorithm>
+using namespace llvm;
+
+STATISTIC(NumStores, "Number of stores added");
+STATISTIC(NumLoads , "Number of loads added");
+STATISTIC(NumCopies, "Number of copies coalesced");
+
+static RegisterRegAlloc
+ fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator);
+
+namespace {
+ class RAFast : public MachineFunctionPass {
+ public:
+ static char ID;
+ RAFast() : MachineFunctionPass(ID), StackSlotForVirtReg(-1),
+ isBulkSpilling(false) {}
+ private:
+ const TargetMachine *TM;
+ MachineFunction *MF;
+ MachineRegisterInfo *MRI;
+ const TargetRegisterInfo *TRI;
+ const TargetInstrInfo *TII;
+
+ // Basic block currently being allocated.
+ MachineBasicBlock *MBB;
+
+ // StackSlotForVirtReg - Maps virtual regs to the frame index where these
+ // values are spilled.
+ IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
+
+ // Everything we know about a live virtual register.
+ struct LiveReg {
+ MachineInstr *LastUse; // Last instr to use reg.
+ unsigned PhysReg; // Currently held here.
+ unsigned short LastOpNum; // OpNum on LastUse.
+ bool Dirty; // Register needs spill.
+
+ LiveReg(unsigned p=0) : LastUse(0), PhysReg(p), LastOpNum(0),
+ Dirty(false) {}
+ };
+
+ typedef DenseMap<unsigned, LiveReg> LiveRegMap;
+ typedef LiveRegMap::value_type LiveRegEntry;
+
+ // LiveVirtRegs - This map contains entries for each virtual register
+ // that is currently available in a physical register.
+ LiveRegMap LiveVirtRegs;
+
+ DenseMap<unsigned, MachineInstr *> LiveDbgValueMap;
+
+ // RegState - Track the state of a physical register.
+ enum RegState {
+ // A disabled register is not available for allocation, but an alias may
+ // be in use. A register can only be moved out of the disabled state if
+ // all aliases are disabled.
+ regDisabled,
+
+ // A free register is not currently in use and can be allocated
+ // immediately without checking aliases.
+ regFree,
+
+ // A reserved register has been assigned expolicitly (e.g., setting up a
+ // call parameter), and it remains reserved until it is used.
+ regReserved
+
+ // A register state may also be a virtual register number, indication that
+ // the physical register is currently allocated to a virtual register. In
+ // that case, LiveVirtRegs contains the inverse mapping.
+ };
+
+ // PhysRegState - One of the RegState enums, or a virtreg.
+ std::vector<unsigned> PhysRegState;
+
+ // UsedInInstr - BitVector of physregs that are used in the current
+ // instruction, and so cannot be allocated.
+ BitVector UsedInInstr;
+
+ // Allocatable - vector of allocatable physical registers.
+ BitVector Allocatable;
+
+ // SkippedInstrs - Descriptors of instructions whose clobber list was
+ // ignored because all registers were spilled. It is still necessary to
+ // mark all the clobbered registers as used by the function.
+ SmallPtrSet<const TargetInstrDesc*, 4> SkippedInstrs;
+
+ // isBulkSpilling - This flag is set when LiveRegMap will be cleared
+ // completely after spilling all live registers. LiveRegMap entries should
+ // not be erased.
+ bool isBulkSpilling;
+
+ enum {
+ spillClean = 1,
+ spillDirty = 100,
+ spillImpossible = ~0u
+ };
+ public:
+ virtual const char *getPassName() const {
+ return "Fast Register Allocator";
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequiredID(PHIEliminationID);
+ AU.addRequiredID(TwoAddressInstructionPassID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ private:
+ bool runOnMachineFunction(MachineFunction &Fn);
+ void AllocateBasicBlock();
+ void handleThroughOperands(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &VirtDead);
+ int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC);
+ bool isLastUseOfLocalReg(MachineOperand&);
+
+ void addKillFlag(const LiveReg&);
+ void killVirtReg(LiveRegMap::iterator);
+ void killVirtReg(unsigned VirtReg);
+ void spillVirtReg(MachineBasicBlock::iterator MI, LiveRegMap::iterator);
+ void spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg);
+
+ void usePhysReg(MachineOperand&);
+ void definePhysReg(MachineInstr *MI, unsigned PhysReg, RegState NewState);
+ unsigned calcSpillCost(unsigned PhysReg) const;
+ void assignVirtToPhysReg(LiveRegEntry &LRE, unsigned PhysReg);
+ void allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint);
+ LiveRegMap::iterator defineVirtReg(MachineInstr *MI, unsigned OpNum,
+ unsigned VirtReg, unsigned Hint);
+ LiveRegMap::iterator reloadVirtReg(MachineInstr *MI, unsigned OpNum,
+ unsigned VirtReg, unsigned Hint);
+ void spillAll(MachineInstr *MI);
+ bool setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg);
+ };
+ char RAFast::ID = 0;
+}
+
+/// getStackSpaceFor - This allocates space for the specified virtual register
+/// to be held on the stack.
+int RAFast::getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC) {
+ // Find the location Reg would belong...
+ int SS = StackSlotForVirtReg[VirtReg];
+ if (SS != -1)
+ return SS; // Already has space allocated?
+
+ // Allocate a new stack object for this spill location...
+ int FrameIdx = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
+ RC->getAlignment());
+
+ // Assign the slot.
+ StackSlotForVirtReg[VirtReg] = FrameIdx;
+ return FrameIdx;
+}
+
+/// isLastUseOfLocalReg - Return true if MO is the only remaining reference to
+/// its virtual register, and it is guaranteed to be a block-local register.
+///
+bool RAFast::isLastUseOfLocalReg(MachineOperand &MO) {
+ // Check for non-debug uses or defs following MO.
+ // This is the most likely way to fail - fast path it.
+ MachineOperand *Next = &MO;
+ while ((Next = Next->getNextOperandForReg()))
+ if (!Next->isDebug())
+ return false;
+
+ // If the register has ever been spilled or reloaded, we conservatively assume
+ // it is a global register used in multiple blocks.
+ if (StackSlotForVirtReg[MO.getReg()] != -1)
+ return false;
+
+ // Check that the use/def chain has exactly one operand - MO.
+ return &MRI->reg_nodbg_begin(MO.getReg()).getOperand() == &MO;
+}
+
+/// addKillFlag - Set kill flags on last use of a virtual register.
+void RAFast::addKillFlag(const LiveReg &LR) {
+ if (!LR.LastUse) return;
+ MachineOperand &MO = LR.LastUse->getOperand(LR.LastOpNum);
+ if (MO.isUse() && !LR.LastUse->isRegTiedToDefOperand(LR.LastOpNum)) {
+ if (MO.getReg() == LR.PhysReg)
+ MO.setIsKill();
+ else
+ LR.LastUse->addRegisterKilled(LR.PhysReg, TRI, true);
+ }
+}
+
+/// killVirtReg - Mark virtreg as no longer available.
+void RAFast::killVirtReg(LiveRegMap::iterator LRI) {
+ addKillFlag(LRI->second);
+ const LiveReg &LR = LRI->second;
+ assert(PhysRegState[LR.PhysReg] == LRI->first && "Broken RegState mapping");
+ PhysRegState[LR.PhysReg] = regFree;
+ // Erase from LiveVirtRegs unless we're spilling in bulk.
+ if (!isBulkSpilling)
+ LiveVirtRegs.erase(LRI);
+}
+
+/// killVirtReg - Mark virtreg as no longer available.
+void RAFast::killVirtReg(unsigned VirtReg) {
+ assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
+ "killVirtReg needs a virtual register");
+ LiveRegMap::iterator LRI = LiveVirtRegs.find(VirtReg);
+ if (LRI != LiveVirtRegs.end())
+ killVirtReg(LRI);
+}
+
+/// spillVirtReg - This method spills the value specified by VirtReg into the
+/// corresponding stack slot if needed.
+void RAFast::spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg) {
+ assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
+ "Spilling a physical register is illegal!");
+ LiveRegMap::iterator LRI = LiveVirtRegs.find(VirtReg);
+ assert(LRI != LiveVirtRegs.end() && "Spilling unmapped virtual register");
+ spillVirtReg(MI, LRI);
+}
+
+/// spillVirtReg - Do the actual work of spilling.
+void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
+ LiveRegMap::iterator LRI) {
+ LiveReg &LR = LRI->second;
+ assert(PhysRegState[LR.PhysReg] == LRI->first && "Broken RegState mapping");
+
+ if (LR.Dirty) {
+ // If this physreg is used by the instruction, we want to kill it on the
+ // instruction, not on the spill.
+ bool SpillKill = LR.LastUse != MI;
+ LR.Dirty = false;
+ DEBUG(dbgs() << "Spilling %reg" << LRI->first
+ << " in " << TRI->getName(LR.PhysReg));
+ const TargetRegisterClass *RC = MRI->getRegClass(LRI->first);
+ int FI = getStackSpaceFor(LRI->first, RC);
+ DEBUG(dbgs() << " to stack slot #" << FI << "\n");
+ TII->storeRegToStackSlot(*MBB, MI, LR.PhysReg, SpillKill, FI, RC, TRI);
+ ++NumStores; // Update statistics
+
+ // If this register is used by DBG_VALUE then insert new DBG_VALUE to
+ // identify spilled location as the place to find corresponding variable's
+ // value.
+ if (MachineInstr *DBG = LiveDbgValueMap.lookup(LRI->first)) {
+ const MDNode *MDPtr =
+ DBG->getOperand(DBG->getNumOperands()-1).getMetadata();
+ int64_t Offset = 0;
+ if (DBG->getOperand(1).isImm())
+ Offset = DBG->getOperand(1).getImm();
+ DebugLoc DL;
+ if (MI == MBB->end()) {
+ // If MI is at basic block end then use last instruction's location.
+ MachineBasicBlock::iterator EI = MI;
+ DL = (--EI)->getDebugLoc();
+ }
+ else
+ DL = MI->getDebugLoc();
+ if (MachineInstr *NewDV =
+ TII->emitFrameIndexDebugValue(*MF, FI, Offset, MDPtr, DL)) {
+ MachineBasicBlock *MBB = DBG->getParent();
+ MBB->insert(MI, NewDV);
+ DEBUG(dbgs() << "Inserting debug info due to spill:" << "\n" << *NewDV);
+ LiveDbgValueMap[LRI->first] = NewDV;
+ }
+ }
+ if (SpillKill)
+ LR.LastUse = 0; // Don't kill register again
+ }
+ killVirtReg(LRI);
+}
+
+/// spillAll - Spill all dirty virtregs without killing them.
+void RAFast::spillAll(MachineInstr *MI) {
+ if (LiveVirtRegs.empty()) return;
+ isBulkSpilling = true;
+ // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order
+ // of spilling here is deterministic, if arbitrary.
+ for (LiveRegMap::iterator i = LiveVirtRegs.begin(), e = LiveVirtRegs.end();
+ i != e; ++i)
+ spillVirtReg(MI, i);
+ LiveVirtRegs.clear();
+ isBulkSpilling = false;
+}
+
+/// usePhysReg - Handle the direct use of a physical register.
+/// Check that the register is not used by a virtreg.
+/// Kill the physreg, marking it free.
+/// This may add implicit kills to MO->getParent() and invalidate MO.
+void RAFast::usePhysReg(MachineOperand &MO) {
+ unsigned PhysReg = MO.getReg();
+ assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
+ "Bad usePhysReg operand");
+
+ switch (PhysRegState[PhysReg]) {
+ case regDisabled:
+ break;
+ case regReserved:
+ PhysRegState[PhysReg] = regFree;
+ // Fall through
+ case regFree:
+ UsedInInstr.set(PhysReg);
+ MO.setIsKill();
+ return;
+ default:
+ // The physreg was allocated to a virtual register. That means to value we
+ // wanted has been clobbered.
+ llvm_unreachable("Instruction uses an allocated register");
+ }
+
+ // Maybe a superregister is reserved?
+ for (const unsigned *AS = TRI->getAliasSet(PhysReg);
+ unsigned Alias = *AS; ++AS) {
+ switch (PhysRegState[Alias]) {
+ case regDisabled:
+ break;
+ case regReserved:
+ assert(TRI->isSuperRegister(PhysReg, Alias) &&
+ "Instruction is not using a subregister of a reserved register");
+ // Leave the superregister in the working set.
+ PhysRegState[Alias] = regFree;
+ UsedInInstr.set(Alias);
+ MO.getParent()->addRegisterKilled(Alias, TRI, true);
+ return;
+ case regFree:
+ if (TRI->isSuperRegister(PhysReg, Alias)) {
+ // Leave the superregister in the working set.
+ UsedInInstr.set(Alias);
+ MO.getParent()->addRegisterKilled(Alias, TRI, true);
+ return;
+ }
+ // Some other alias was in the working set - clear it.
+ PhysRegState[Alias] = regDisabled;
+ break;
+ default:
+ llvm_unreachable("Instruction uses an alias of an allocated register");
+ }
+ }
+
+ // All aliases are disabled, bring register into working set.
+ PhysRegState[PhysReg] = regFree;
+ UsedInInstr.set(PhysReg);
+ MO.setIsKill();
+}
+
+/// definePhysReg - Mark PhysReg as reserved or free after spilling any
+/// virtregs. This is very similar to defineVirtReg except the physreg is
+/// reserved instead of allocated.
+void RAFast::definePhysReg(MachineInstr *MI, unsigned PhysReg,
+ RegState NewState) {
+ UsedInInstr.set(PhysReg);
+ switch (unsigned VirtReg = PhysRegState[PhysReg]) {
+ case regDisabled:
+ break;
+ default:
+ spillVirtReg(MI, VirtReg);
+ // Fall through.
+ case regFree:
+ case regReserved:
+ PhysRegState[PhysReg] = NewState;
+ return;
+ }
+
+ // This is a disabled register, disable all aliases.
+ PhysRegState[PhysReg] = NewState;
+ for (const unsigned *AS = TRI->getAliasSet(PhysReg);
+ unsigned Alias = *AS; ++AS) {
+ UsedInInstr.set(Alias);
+ switch (unsigned VirtReg = PhysRegState[Alias]) {
+ case regDisabled:
+ break;
+ default:
+ spillVirtReg(MI, VirtReg);
+ // Fall through.
+ case regFree:
+ case regReserved:
+ PhysRegState[Alias] = regDisabled;
+ if (TRI->isSuperRegister(PhysReg, Alias))
+ return;
+ break;
+ }
+ }
+}
+
+
+// calcSpillCost - Return the cost of spilling clearing out PhysReg and
+// aliases so it is free for allocation.
+// Returns 0 when PhysReg is free or disabled with all aliases disabled - it
+// can be allocated directly.
+// Returns spillImpossible when PhysReg or an alias can't be spilled.
+unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
+ if (UsedInInstr.test(PhysReg))
+ return spillImpossible;
+ switch (unsigned VirtReg = PhysRegState[PhysReg]) {
+ case regDisabled:
+ break;
+ case regFree:
+ return 0;
+ case regReserved:
+ return spillImpossible;
+ default:
+ return LiveVirtRegs.lookup(VirtReg).Dirty ? spillDirty : spillClean;
+ }
+
+ // This is a disabled register, add up const of aliases.
+ unsigned Cost = 0;
+ for (const unsigned *AS = TRI->getAliasSet(PhysReg);
+ unsigned Alias = *AS; ++AS) {
+ if (UsedInInstr.test(Alias))
+ return spillImpossible;
+ switch (unsigned VirtReg = PhysRegState[Alias]) {
+ case regDisabled:
+ break;
+ case regFree:
+ ++Cost;
+ break;
+ case regReserved:
+ return spillImpossible;
+ default:
+ Cost += LiveVirtRegs.lookup(VirtReg).Dirty ? spillDirty : spillClean;
+ break;
+ }
+ }
+ return Cost;
+}
+
+
+/// assignVirtToPhysReg - This method updates local state so that we know
+/// that PhysReg is the proper container for VirtReg now. The physical
+/// register must not be used for anything else when this is called.
+///
+void RAFast::assignVirtToPhysReg(LiveRegEntry &LRE, unsigned PhysReg) {
+ DEBUG(dbgs() << "Assigning %reg" << LRE.first << " to "
+ << TRI->getName(PhysReg) << "\n");
+ PhysRegState[PhysReg] = LRE.first;
+ assert(!LRE.second.PhysReg && "Already assigned a physreg");
+ LRE.second.PhysReg = PhysReg;
+}
+
+/// allocVirtReg - Allocate a physical register for VirtReg.
+void RAFast::allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint) {
+ const unsigned VirtReg = LRE.first;
+
+ assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
+ "Can only allocate virtual registers");
+
+ const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
+
+ // Ignore invalid hints.
+ if (Hint && (!TargetRegisterInfo::isPhysicalRegister(Hint) ||
+ !RC->contains(Hint) || !Allocatable.test(Hint)))
+ Hint = 0;
+
+ // Take hint when possible.
+ if (Hint) {
+ switch(calcSpillCost(Hint)) {
+ default:
+ definePhysReg(MI, Hint, regFree);
+ // Fall through.
+ case 0:
+ return assignVirtToPhysReg(LRE, Hint);
+ case spillImpossible:
+ break;
+ }
+ }
+
+ TargetRegisterClass::iterator AOB = RC->allocation_order_begin(*MF);
+ TargetRegisterClass::iterator AOE = RC->allocation_order_end(*MF);
+
+ // First try to find a completely free register.
+ for (TargetRegisterClass::iterator I = AOB; I != AOE; ++I) {
+ unsigned PhysReg = *I;
+ if (PhysRegState[PhysReg] == regFree && !UsedInInstr.test(PhysReg) &&
+ Allocatable.test(PhysReg))
+ return assignVirtToPhysReg(LRE, PhysReg);
+ }
+
+ DEBUG(dbgs() << "Allocating %reg" << VirtReg << " from " << RC->getName()
+ << "\n");
+
+ unsigned BestReg = 0, BestCost = spillImpossible;
+ for (TargetRegisterClass::iterator I = AOB; I != AOE; ++I) {
+ if (!Allocatable.test(*I))
+ continue;
+ unsigned Cost = calcSpillCost(*I);
+ // Cost is 0 when all aliases are already disabled.
+ if (Cost == 0)
+ return assignVirtToPhysReg(LRE, *I);
+ if (Cost < BestCost)
+ BestReg = *I, BestCost = Cost;
+ }
+
+ if (BestReg) {
+ definePhysReg(MI, BestReg, regFree);
+ return assignVirtToPhysReg(LRE, BestReg);
+ }
+
+ // Nothing we can do.
+ std::string msg;
+ raw_string_ostream Msg(msg);
+ Msg << "Ran out of registers during register allocation!";
+ if (MI->isInlineAsm()) {
+ Msg << "\nPlease check your inline asm statement for "
+ << "invalid constraints:\n";
+ MI->print(Msg, TM);
+ }
+ report_fatal_error(Msg.str());
+}
+
+/// defineVirtReg - Allocate a register for VirtReg and mark it as dirty.
+RAFast::LiveRegMap::iterator
+RAFast::defineVirtReg(MachineInstr *MI, unsigned OpNum,
+ unsigned VirtReg, unsigned Hint) {
+ assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
+ "Not a virtual register");
+ LiveRegMap::iterator LRI;
+ bool New;
+ tie(LRI, New) = LiveVirtRegs.insert(std::make_pair(VirtReg, LiveReg()));
+ LiveReg &LR = LRI->second;
+ if (New) {
+ // If there is no hint, peek at the only use of this register.
+ if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) &&
+ MRI->hasOneNonDBGUse(VirtReg)) {
+ const MachineInstr &UseMI = *MRI->use_nodbg_begin(VirtReg);
+ // It's a copy, use the destination register as a hint.
+ if (UseMI.isCopyLike())
+ Hint = UseMI.getOperand(0).getReg();
+ }
+ allocVirtReg(MI, *LRI, Hint);
+ } else if (LR.LastUse) {
+ // Redefining a live register - kill at the last use, unless it is this
+ // instruction defining VirtReg multiple times.
+ if (LR.LastUse != MI || LR.LastUse->getOperand(LR.LastOpNum).isUse())
+ addKillFlag(LR);
+ }
+ assert(LR.PhysReg && "Register not assigned");
+ LR.LastUse = MI;
+ LR.LastOpNum = OpNum;
+ LR.Dirty = true;
+ UsedInInstr.set(LR.PhysReg);
+ return LRI;
+}
+
+/// reloadVirtReg - Make sure VirtReg is available in a physreg and return it.
+RAFast::LiveRegMap::iterator
+RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
+ unsigned VirtReg, unsigned Hint) {
+ assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
+ "Not a virtual register");
+ LiveRegMap::iterator LRI;
+ bool New;
+ tie(LRI, New) = LiveVirtRegs.insert(std::make_pair(VirtReg, LiveReg()));
+ LiveReg &LR = LRI->second;
+ MachineOperand &MO = MI->getOperand(OpNum);
+ if (New) {
+ allocVirtReg(MI, *LRI, Hint);
+ const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
+ int FrameIndex = getStackSpaceFor(VirtReg, RC);
+ DEBUG(dbgs() << "Reloading %reg" << VirtReg << " into "
+ << TRI->getName(LR.PhysReg) << "\n");
+ TII->loadRegFromStackSlot(*MBB, MI, LR.PhysReg, FrameIndex, RC, TRI);
+ ++NumLoads;
+ } else if (LR.Dirty) {
+ if (isLastUseOfLocalReg(MO)) {
+ DEBUG(dbgs() << "Killing last use: " << MO << "\n");
+ if (MO.isUse())
+ MO.setIsKill();
+ else
+ MO.setIsDead();
+ } else if (MO.isKill()) {
+ DEBUG(dbgs() << "Clearing dubious kill: " << MO << "\n");
+ MO.setIsKill(false);
+ } else if (MO.isDead()) {
+ DEBUG(dbgs() << "Clearing dubious dead: " << MO << "\n");
+ MO.setIsDead(false);
+ }
+ } else if (MO.isKill()) {
+ // We must remove kill flags from uses of reloaded registers because the
+ // register would be killed immediately, and there might be a second use:
+ // %foo = OR %x<kill>, %x
+ // This would cause a second reload of %x into a different register.
+ DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n");
+ MO.setIsKill(false);
+ } else if (MO.isDead()) {
+ DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n");
+ MO.setIsDead(false);
+ }
+ assert(LR.PhysReg && "Register not assigned");
+ LR.LastUse = MI;
+ LR.LastOpNum = OpNum;
+ UsedInInstr.set(LR.PhysReg);
+ return LRI;
+}
+
+// setPhysReg - Change operand OpNum in MI the refer the PhysReg, considering
+// subregs. This may invalidate any operand pointers.
+// Return true if the operand kills its register.
+bool RAFast::setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg) {
+ MachineOperand &MO = MI->getOperand(OpNum);
+ if (!MO.getSubReg()) {
+ MO.setReg(PhysReg);
+ return MO.isKill() || MO.isDead();
+ }
+
+ // Handle subregister index.
+ MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : 0);
+ MO.setSubReg(0);
+
+ // A kill flag implies killing the full register. Add corresponding super
+ // register kill.
+ if (MO.isKill()) {
+ MI->addRegisterKilled(PhysReg, TRI, true);
+ return true;
+ }
+ return MO.isDead();
+}
+
+// Handle special instruction operand like early clobbers and tied ops when
+// there are additional physreg defines.
+void RAFast::handleThroughOperands(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &VirtDead) {
+ DEBUG(dbgs() << "Scanning for through registers:");
+ SmallSet<unsigned, 8> ThroughRegs;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (MO.isEarlyClobber() || MI->isRegTiedToDefOperand(i) ||
+ (MO.getSubReg() && MI->readsVirtualRegister(Reg))) {
+ if (ThroughRegs.insert(Reg))
+ DEBUG(dbgs() << " %reg" << Reg);
+ }
+ }
+
+ // If any physreg defines collide with preallocated through registers,
+ // we must spill and reallocate.
+ DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isDef()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ UsedInInstr.set(Reg);
+ if (ThroughRegs.count(PhysRegState[Reg]))
+ definePhysReg(MI, Reg, regFree);
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
+ UsedInInstr.set(*AS);
+ if (ThroughRegs.count(PhysRegState[*AS]))
+ definePhysReg(MI, *AS, regFree);
+ }
+ }
+
+ SmallVector<unsigned, 8> PartialDefs;
+ DEBUG(dbgs() << "Allocating tied uses and early clobbers.\n");
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (MO.isUse()) {
+ unsigned DefIdx = 0;
+ if (!MI->isRegTiedToDefOperand(i, &DefIdx)) continue;
+ DEBUG(dbgs() << "Operand " << i << "("<< MO << ") is tied to operand "
+ << DefIdx << ".\n");
+ LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
+ unsigned PhysReg = LRI->second.PhysReg;
+ setPhysReg(MI, i, PhysReg);
+ // Note: we don't update the def operand yet. That would cause the normal
+ // def-scan to attempt spilling.
+ } else if (MO.getSubReg() && MI->readsVirtualRegister(Reg)) {
+ DEBUG(dbgs() << "Partial redefine: " << MO << "\n");
+ // Reload the register, but don't assign to the operand just yet.
+ // That would confuse the later phys-def processing pass.
+ LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
+ PartialDefs.push_back(LRI->second.PhysReg);
+ } else if (MO.isEarlyClobber()) {
+ // Note: defineVirtReg may invalidate MO.
+ LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
+ unsigned PhysReg = LRI->second.PhysReg;
+ if (setPhysReg(MI, i, PhysReg))
+ VirtDead.push_back(Reg);
+ }
+ }
+
+ // Restore UsedInInstr to a state usable for allocating normal virtual uses.
+ UsedInInstr.reset();
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ UsedInInstr.set(Reg);
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ UsedInInstr.set(*AS);
+ }
+
+ // Also mark PartialDefs as used to avoid reallocation.
+ for (unsigned i = 0, e = PartialDefs.size(); i != e; ++i)
+ UsedInInstr.set(PartialDefs[i]);
+}
+
+void RAFast::AllocateBasicBlock() {
+ DEBUG(dbgs() << "\nAllocating " << *MBB);
+
+ PhysRegState.assign(TRI->getNumRegs(), regDisabled);
+ assert(LiveVirtRegs.empty() && "Mapping not cleared form last block?");
+
+ MachineBasicBlock::iterator MII = MBB->begin();
+
+ // Add live-in registers as live.
+ for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
+ E = MBB->livein_end(); I != E; ++I)
+ if (Allocatable.test(*I))
+ definePhysReg(MII, *I, regReserved);
+
+ SmallVector<unsigned, 8> VirtDead;
+ SmallVector<MachineInstr*, 32> Coalesced;
+
+ // Otherwise, sequentially allocate each instruction in the MBB.
+ while (MII != MBB->end()) {
+ MachineInstr *MI = MII++;
+ const TargetInstrDesc &TID = MI->getDesc();
+ DEBUG({
+ dbgs() << "\n>> " << *MI << "Regs:";
+ for (unsigned Reg = 1, E = TRI->getNumRegs(); Reg != E; ++Reg) {
+ if (PhysRegState[Reg] == regDisabled) continue;
+ dbgs() << " " << TRI->getName(Reg);
+ switch(PhysRegState[Reg]) {
+ case regFree:
+ break;
+ case regReserved:
+ dbgs() << "*";
+ break;
+ default:
+ dbgs() << "=%reg" << PhysRegState[Reg];
+ if (LiveVirtRegs[PhysRegState[Reg]].Dirty)
+ dbgs() << "*";
+ assert(LiveVirtRegs[PhysRegState[Reg]].PhysReg == Reg &&
+ "Bad inverse map");
+ break;
+ }
+ }
+ dbgs() << '\n';
+ // Check that LiveVirtRegs is the inverse.
+ for (LiveRegMap::iterator i = LiveVirtRegs.begin(),
+ e = LiveVirtRegs.end(); i != e; ++i) {
+ assert(TargetRegisterInfo::isVirtualRegister(i->first) &&
+ "Bad map key");
+ assert(TargetRegisterInfo::isPhysicalRegister(i->second.PhysReg) &&
+ "Bad map value");
+ assert(PhysRegState[i->second.PhysReg] == i->first &&
+ "Bad inverse map");
+ }
+ });
+
+ // Debug values are not allowed to change codegen in any way.
+ if (MI->isDebugValue()) {
+ bool ScanDbgValue = true;
+ while (ScanDbgValue) {
+ ScanDbgValue = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ LiveDbgValueMap[Reg] = MI;
+ LiveRegMap::iterator LRI = LiveVirtRegs.find(Reg);
+ if (LRI != LiveVirtRegs.end())
+ setPhysReg(MI, i, LRI->second.PhysReg);
+ else {
+ int SS = StackSlotForVirtReg[Reg];
+ if (SS == -1)
+ // We can't allocate a physreg for a DebugValue, sorry!
+ MO.setReg(0);
+ else {
+ // Modify DBG_VALUE now that the value is in a spill slot.
+ int64_t Offset = MI->getOperand(1).getImm();
+ const MDNode *MDPtr =
+ MI->getOperand(MI->getNumOperands()-1).getMetadata();
+ DebugLoc DL = MI->getDebugLoc();
+ if (MachineInstr *NewDV =
+ TII->emitFrameIndexDebugValue(*MF, SS, Offset, MDPtr, DL)) {
+ DEBUG(dbgs() << "Modifying debug info due to spill:" <<
+ "\t" << *MI);
+ MachineBasicBlock *MBB = MI->getParent();
+ MBB->insert(MBB->erase(MI), NewDV);
+ // Scan NewDV operands from the beginning.
+ MI = NewDV;
+ ScanDbgValue = true;
+ break;
+ } else
+ // We can't allocate a physreg for a DebugValue; sorry!
+ MO.setReg(0);
+ }
+ }
+ }
+ }
+ // Next instruction.
+ continue;
+ }
+
+ // If this is a copy, we may be able to coalesce.
+ unsigned CopySrc = 0, CopyDst = 0, CopySrcSub = 0, CopyDstSub = 0;
+ if (MI->isCopy()) {
+ CopyDst = MI->getOperand(0).getReg();
+ CopySrc = MI->getOperand(1).getReg();
+ CopyDstSub = MI->getOperand(0).getSubReg();
+ CopySrcSub = MI->getOperand(1).getSubReg();
+ }
+
+ // Track registers used by instruction.
+ UsedInInstr.reset();
+
+ // First scan.
+ // Mark physreg uses and early clobbers as used.
+ // Find the end of the virtreg operands
+ unsigned VirtOpEnd = 0;
+ bool hasTiedOps = false;
+ bool hasEarlyClobbers = false;
+ bool hasPartialRedefs = false;
+ bool hasPhysDefs = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg) continue;
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ VirtOpEnd = i+1;
+ if (MO.isUse()) {
+ hasTiedOps = hasTiedOps ||
+ TID.getOperandConstraint(i, TOI::TIED_TO) != -1;
+ } else {
+ if (MO.isEarlyClobber())
+ hasEarlyClobbers = true;
+ if (MO.getSubReg() && MI->readsVirtualRegister(Reg))
+ hasPartialRedefs = true;
+ }
+ continue;
+ }
+ if (!Allocatable.test(Reg)) continue;
+ if (MO.isUse()) {
+ usePhysReg(MO);
+ } else if (MO.isEarlyClobber()) {
+ definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ?
+ regFree : regReserved);
+ hasEarlyClobbers = true;
+ } else
+ hasPhysDefs = true;
+ }
+
+ // The instruction may have virtual register operands that must be allocated
+ // the same register at use-time and def-time: early clobbers and tied
+ // operands. If there are also physical defs, these registers must avoid
+ // both physical defs and uses, making them more constrained than normal
+ // operands.
+ // Similarly, if there are multiple defs and tied operands, we must make
+ // sure the same register is allocated to uses and defs.
+ // We didn't detect inline asm tied operands above, so just make this extra
+ // pass for all inline asm.
+ if (MI->isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
+ (hasTiedOps && (hasPhysDefs || TID.getNumDefs() > 1))) {
+ handleThroughOperands(MI, VirtDead);
+ // Don't attempt coalescing when we have funny stuff going on.
+ CopyDst = 0;
+ // Pretend we have early clobbers so the use operands get marked below.
+ // This is not necessary for the common case of a single tied use.
+ hasEarlyClobbers = true;
+ }
+
+ // Second scan.
+ // Allocate virtreg uses.
+ for (unsigned i = 0; i != VirtOpEnd; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (MO.isUse()) {
+ LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, CopyDst);
+ unsigned PhysReg = LRI->second.PhysReg;
+ CopySrc = (CopySrc == Reg || CopySrc == PhysReg) ? PhysReg : 0;
+ if (setPhysReg(MI, i, PhysReg))
+ killVirtReg(LRI);
+ }
+ }
+
+ MRI->addPhysRegsUsed(UsedInInstr);
+
+ // Track registers defined by instruction - early clobbers and tied uses at
+ // this point.
+ UsedInInstr.reset();
+ if (hasEarlyClobbers) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ // Look for physreg defs and tied uses.
+ if (!MO.isDef() && !MI->isRegTiedToDefOperand(i)) continue;
+ UsedInInstr.set(Reg);
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ UsedInInstr.set(*AS);
+ }
+ }
+
+ unsigned DefOpEnd = MI->getNumOperands();
+ if (TID.isCall()) {
+ // Spill all virtregs before a call. This serves two purposes: 1. If an
+ // exception is thrown, the landing pad is going to expect to find
+ // registers in their spill slots, and 2. we don't have to wade through
+ // all the <imp-def> operands on the call instruction.
+ DefOpEnd = VirtOpEnd;
+ DEBUG(dbgs() << " Spilling remaining registers before call.\n");
+ spillAll(MI);
+
+ // The imp-defs are skipped below, but we still need to mark those
+ // registers as used by the function.
+ SkippedInstrs.insert(&TID);
+ }
+
+ // Third scan.
+ // Allocate defs and collect dead defs.
+ for (unsigned i = 0; i != DefOpEnd; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
+ continue;
+ unsigned Reg = MO.getReg();
+
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ if (!Allocatable.test(Reg)) continue;
+ definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ?
+ regFree : regReserved);
+ continue;
+ }
+ LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, CopySrc);
+ unsigned PhysReg = LRI->second.PhysReg;
+ if (setPhysReg(MI, i, PhysReg)) {
+ VirtDead.push_back(Reg);
+ CopyDst = 0; // cancel coalescing;
+ } else
+ CopyDst = (CopyDst == Reg || CopyDst == PhysReg) ? PhysReg : 0;
+ }
+
+ // Kill dead defs after the scan to ensure that multiple defs of the same
+ // register are allocated identically. We didn't need to do this for uses
+ // because we are crerating our own kill flags, and they are always at the
+ // last use.
+ for (unsigned i = 0, e = VirtDead.size(); i != e; ++i)
+ killVirtReg(VirtDead[i]);
+ VirtDead.clear();
+
+ MRI->addPhysRegsUsed(UsedInInstr);
+
+ if (CopyDst && CopyDst == CopySrc && CopyDstSub == CopySrcSub) {
+ DEBUG(dbgs() << "-- coalescing: " << *MI);
+ Coalesced.push_back(MI);
+ } else {
+ DEBUG(dbgs() << "<< " << *MI);
+ }
+ }
+
+ // Spill all physical registers holding virtual registers now.
+ DEBUG(dbgs() << "Spilling live registers at end of block.\n");
+ spillAll(MBB->getFirstTerminator());
+
+ // Erase all the coalesced copies. We are delaying it until now because
+ // LiveVirtRegs might refer to the instrs.
+ for (unsigned i = 0, e = Coalesced.size(); i != e; ++i)
+ MBB->erase(Coalesced[i]);
+ NumCopies += Coalesced.size();
+
+ DEBUG(MBB->dump());
+}
+
+/// runOnMachineFunction - Register allocate the whole function
+///
+bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
+ DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n"
+ << "********** Function: "
+ << ((Value*)Fn.getFunction())->getName() << '\n');
+ MF = &Fn;
+ MRI = &MF->getRegInfo();
+ TM = &Fn.getTarget();
+ TRI = TM->getRegisterInfo();
+ TII = TM->getInstrInfo();
+
+ UsedInInstr.resize(TRI->getNumRegs());
+ Allocatable = TRI->getAllocatableSet(*MF);
+
+ // initialize the virtual->physical register map to have a 'null'
+ // mapping for all virtual registers
+ unsigned LastVirtReg = MRI->getLastVirtReg();
+ StackSlotForVirtReg.grow(LastVirtReg);
+
+ // Loop over all of the basic blocks, eliminating virtual register references
+ for (MachineFunction::iterator MBBi = Fn.begin(), MBBe = Fn.end();
+ MBBi != MBBe; ++MBBi) {
+ MBB = &*MBBi;
+ AllocateBasicBlock();
+ }
+
+ // Make sure the set of used physregs is closed under subreg operations.
+ MRI->closePhysRegsUsed(*TRI);
+
+ // Add the clobber lists for all the instructions we skipped earlier.
+ for (SmallPtrSet<const TargetInstrDesc*, 4>::const_iterator
+ I = SkippedInstrs.begin(), E = SkippedInstrs.end(); I != E; ++I)
+ if (const unsigned *Defs = (*I)->getImplicitDefs())
+ while (*Defs)
+ MRI->setPhysRegUsed(*Defs++);
+
+ SkippedInstrs.clear();
+ StackSlotForVirtReg.clear();
+ LiveDbgValueMap.clear();
+ return true;
+}
+
+FunctionPass *llvm::createFastRegisterAllocator() {
+ return new RAFast();
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/RegAllocLinearScan.cpp b/libclamav/c++/llvm/lib/CodeGen/RegAllocLinearScan.cpp
index 5c5a394..5c62354 100644
--- a/libclamav/c++/llvm/lib/CodeGen/RegAllocLinearScan.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/RegAllocLinearScan.cpp
@@ -83,13 +83,14 @@ namespace {
// pressure, it can caused fewer GPRs to be held in the queue.
static cl::opt<unsigned>
NumRecentlyUsedRegs("linearscan-skip-count",
- cl::desc("Number of registers for linearscan to remember to skip."),
+ cl::desc("Number of registers for linearscan to remember"
+ "to skip."),
cl::init(0),
cl::Hidden);
-
+
struct RALinScan : public MachineFunctionPass {
static char ID;
- RALinScan() : MachineFunctionPass(&ID) {
+ RALinScan() : MachineFunctionPass(ID) {
// Initialize the queue to record recently-used registers.
if (NumRecentlyUsedRegs > 0)
RecentRegs.resize(NumRecentlyUsedRegs, 0);
@@ -124,9 +125,10 @@ namespace {
const TargetRegisterInfo* tri_;
const TargetInstrInfo* tii_;
BitVector allocatableRegs_;
+ BitVector reservedRegs_;
LiveIntervals* li_;
LiveStacks* ls_;
- const MachineLoopInfo *loopInfo;
+ MachineLoopInfo *loopInfo;
/// handled_ - Intervals are added to the handled_ set in the order of their
/// start value. This is uses for backtracking.
@@ -254,9 +256,9 @@ namespace {
SmallVector<LiveInterval*, 8> &SpillIntervals);
/// attemptTrivialCoalescing - If a simple interval is defined by a copy,
- /// try allocate the definition the same register as the source register
- /// if the register is not defined during live time of the interval. This
- /// eliminate a copy. This is used to coalesce copies which were not
+ /// try to allocate the definition to the same register as the source,
+ /// if the register is not defined during the life time of the interval.
+ /// This eliminates a copy, and is used to coalesce copies which were not
/// coalesced away before allocation either due to dest and src being in
/// different register classes or because the coalescer was overly
/// conservative.
@@ -334,6 +336,17 @@ namespace {
SmallVector<unsigned, 256> &inactiveCounts,
bool SkipDGRegs);
+ /// getFirstNonReservedPhysReg - return the first non-reserved physical
+ /// register in the register class.
+ unsigned getFirstNonReservedPhysReg(const TargetRegisterClass *RC) {
+ TargetRegisterClass::iterator aoe = RC->allocation_order_end(*mf_);
+ TargetRegisterClass::iterator i = RC->allocation_order_begin(*mf_);
+ while (i != aoe && reservedRegs_.test(*i))
+ ++i;
+ assert(i != aoe && "All registers reserved?!");
+ return *i;
+ }
+
void ComputeRelatedRegClasses();
template <typename ItTy>
@@ -357,8 +370,8 @@ namespace {
char RALinScan::ID = 0;
}
-static RegisterPass<RALinScan>
-X("linearscan-regalloc", "Linear Scan Register Allocator");
+INITIALIZE_PASS(RALinScan, "linearscan-regalloc",
+ "Linear Scan Register Allocator", false, false);
void RALinScan::ComputeRelatedRegClasses() {
// First pass, add all reg classes to the union, and determine at least one
@@ -370,7 +383,7 @@ void RALinScan::ComputeRelatedRegClasses() {
for (TargetRegisterClass::iterator I = (*RCI)->begin(), E = (*RCI)->end();
I != E; ++I) {
HasAliases = HasAliases || *tri_->getAliasSet(*I) != 0;
-
+
const TargetRegisterClass *&PRC = OneClassForEachPhysReg[*I];
if (PRC) {
// Already processed this register. Just make sure we know that
@@ -381,7 +394,7 @@ void RALinScan::ComputeRelatedRegClasses() {
}
}
}
-
+
// Second pass, now that we know conservatively what register classes each reg
// belongs to, add info about aliases. We don't need to do this for targets
// without register aliases.
@@ -418,19 +431,15 @@ unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
unsigned CandReg;
{
MachineInstr *CopyMI;
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
if (vni->def != SlotIndex() && vni->isDefAccurate() &&
- (CopyMI = li_->getInstructionFromIndex(vni->def)) &&
- tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg))
+ (CopyMI = li_->getInstructionFromIndex(vni->def)) && CopyMI->isCopy())
// Defined by a copy, try to extend SrcReg forward
- CandReg = SrcReg;
+ CandReg = CopyMI->getOperand(1).getReg();
else if (TrivCoalesceEnds &&
- (CopyMI =
- li_->getInstructionFromIndex(range.end.getBaseIndex())) &&
- tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
- cur.reg == SrcReg)
+ (CopyMI = li_->getInstructionFromIndex(range.end.getBaseIndex())) &&
+ CopyMI->isCopy() && cur.reg == CopyMI->getOperand(1).getReg())
// Only used by a copy, try to extend DstReg backwards
- CandReg = DstReg;
+ CandReg = CopyMI->getOperand(0).getReg();
else
return Reg;
}
@@ -467,6 +476,7 @@ bool RALinScan::runOnMachineFunction(MachineFunction &fn) {
tri_ = tm_->getRegisterInfo();
tii_ = tm_->getInstrInfo();
allocatableRegs_ = tri_->getAllocatableSet(fn);
+ reservedRegs_ = tri_->getReservedRegs(fn);
li_ = &getAnalysis<LiveIntervals>();
ls_ = &getAnalysis<LiveStacks>();
loopInfo = &getAnalysis<MachineLoopInfo>();
@@ -485,9 +495,9 @@ bool RALinScan::runOnMachineFunction(MachineFunction &fn) {
vrm_ = &getAnalysis<VirtRegMap>();
if (!rewriter_.get()) rewriter_.reset(createVirtRegRewriter());
-
- spiller_.reset(createSpiller(mf_, li_, loopInfo, vrm_));
-
+
+ spiller_.reset(createSpiller(*this, *mf_, *vrm_));
+
initIntervalSets();
linearScan();
@@ -541,7 +551,7 @@ void RALinScan::linearScan() {
// linear scan algorithm
DEBUG({
dbgs() << "********** LINEAR SCAN **********\n"
- << "********** Function: "
+ << "********** Function: "
<< mf_->getFunction()->getName() << '\n';
printIntervals("fixed", fixed_.begin(), fixed_.end());
});
@@ -763,7 +773,8 @@ FindIntervalInVector(RALinScan::IntervalPtrs &IP, LiveInterval *LI) {
return IP.end();
}
-static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, SlotIndex Point){
+static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V,
+ SlotIndex Point){
for (unsigned i = 0, e = V.size(); i != e; ++i) {
RALinScan::IntervalPtr &IP = V[i];
LiveInterval::iterator I = std::upper_bound(IP.first->begin(),
@@ -802,14 +813,14 @@ static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
static
float getConflictWeight(LiveInterval *cur, unsigned Reg, LiveIntervals *li_,
MachineRegisterInfo *mri_,
- const MachineLoopInfo *loopInfo) {
+ MachineLoopInfo *loopInfo) {
float Conflicts = 0;
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(Reg),
E = mri_->reg_end(); I != E; ++I) {
MachineInstr *MI = &*I;
if (cur->liveAt(li_->getInstructionIndex(MI))) {
unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
- Conflicts += powf(10.0f, (float)loopDepth);
+ Conflicts += std::pow(10.0f, (float)loopDepth);
}
}
return Conflicts;
@@ -835,7 +846,7 @@ void RALinScan::findIntervalsToSpill(LiveInterval *cur,
dbgs() << tri_->getName(Candidates[i].first) << " ";
dbgs() << "\n";
});
-
+
// Calculate the number of conflicts of each candidate.
for (IntervalPtrs::iterator i = active_.begin(); i != active_.end(); ++i) {
unsigned Reg = i->first->reg;
@@ -953,7 +964,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
if (cur->empty()) {
unsigned physReg = vrm_->getRegAllocPref(cur->reg);
if (!physReg)
- physReg = *RC->allocation_order_begin(*mf_);
+ physReg = getFirstNonReservedPhysReg(RC);
DEBUG(dbgs() << tri_->getName(physReg) << '\n');
// Note the register is not really in use.
vrm_->assignVirt2Phys(cur->reg, physReg);
@@ -976,9 +987,10 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
if ((vni->def != SlotIndex()) && !vni->isUnused() &&
vni->isDefAccurate()) {
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (CopyMI &&
- tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
+ if (CopyMI && CopyMI->isCopy()) {
+ unsigned DstSubReg = CopyMI->getOperand(0).getSubReg();
+ unsigned SrcReg = CopyMI->getOperand(1).getReg();
+ unsigned SrcSubReg = CopyMI->getOperand(1).getSubReg();
unsigned Reg = 0;
if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
Reg = SrcReg;
@@ -1004,7 +1016,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Can only allocate virtual registers!");
const TargetRegisterClass *RegRC = mri_->getRegClass(Reg);
- // If this is not in a related reg class to the register we're allocating,
+ // If this is not in a related reg class to the register we're allocating,
// don't check it.
if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
cur->overlapsFrom(*i->first, i->second-1)) {
@@ -1013,7 +1025,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
SpillWeightsToAdd.push_back(std::make_pair(Reg, i->first->weight));
}
}
-
+
// Speculatively check to see if we can get a register right now. If not,
// we know we won't be able to by adding more constraints. If so, we can
// check to see if it is valid. Doing an exhaustive search of the fixed_ list
@@ -1028,7 +1040,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
SmallSet<unsigned, 8> RegAliases;
for (const unsigned *AS = tri_->getAliasSet(physReg); *AS; ++AS)
RegAliases.insert(*AS);
-
+
bool ConflictsWithFixed = false;
for (unsigned i = 0, e = fixed_.size(); i != e; ++i) {
IntervalPtr &IP = fixed_[i];
@@ -1048,7 +1060,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
}
}
}
-
+
// Okay, the register picked by our speculative getFreePhysReg call turned
// out to be in use. Actually add all of the conflicting fixed registers to
// regUse_ so we can do an accurate query.
@@ -1060,7 +1072,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
LiveInterval *I = IP.first;
const TargetRegisterClass *RegRC = OneClassForEachPhysReg[I->reg];
- if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
+ if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader &&
I->endIndex() > StartPosition) {
LiveInterval::iterator II = I->advanceTo(IP.second, StartPosition);
IP.second = II;
@@ -1079,11 +1091,11 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
physReg = getFreePhysReg(cur);
}
}
-
+
// Restore the physical register tracker, removing information about the
// future.
restoreRegUses();
-
+
// If we find a free register, we are done: assign this virtual to
// the free physical register and add this interval to the active
// list.
@@ -1098,7 +1110,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
UpgradeRegister(physReg);
if (LiveInterval *NextReloadLI = hasNextReloadInterval(cur)) {
// "Downgrade" physReg to try to keep physReg from being allocated until
- // the next reload from the same SS is allocated.
+ // the next reload from the same SS is allocated.
mri_->setRegAllocationHint(NextReloadLI->reg, 0, physReg);
DowngradeRegister(cur, physReg);
}
@@ -1111,7 +1123,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
for (std::vector<std::pair<unsigned, float> >::iterator
I = SpillWeightsToAdd.begin(), E = SpillWeightsToAdd.end(); I != E; ++I)
updateSpillWeights(SpillWeights, I->first, I->second, RC);
-
+
// for each interval in active, update spill weights.
for (IntervalPtrs::const_iterator i = active_.begin(), e = active_.end();
i != e; ++i) {
@@ -1121,7 +1133,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
reg = vrm_->getPhys(reg);
updateSpillWeights(SpillWeights, reg, i->first->weight, RC);
}
-
+
DEBUG(dbgs() << "\tassigning stack slot at interval "<< *cur << ":\n");
// Find a register to spill.
@@ -1135,17 +1147,22 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
e = RC->allocation_order_end(*mf_); i != e; ++i) {
unsigned reg = *i;
float regWeight = SpillWeights[reg];
- // Skip recently allocated registers.
+ // Don't even consider reserved regs.
+ if (reservedRegs_.test(reg))
+ continue;
+ // Skip recently allocated registers and reserved registers.
if (minWeight > regWeight && !isRecentlyUsed(reg))
Found = true;
RegsWeights.push_back(std::make_pair(reg, regWeight));
}
-
+
// If we didn't find a register that is spillable, try aliases?
if (!Found) {
for (TargetRegisterClass::iterator i = RC->allocation_order_begin(*mf_),
e = RC->allocation_order_end(*mf_); i != e; ++i) {
unsigned reg = *i;
+ if (reservedRegs_.test(reg))
+ continue;
// No need to worry about if the alias register size < regsize of RC.
// We are going to spill all registers that alias it anyway.
for (const unsigned* as = tri_->getAliasSet(reg); *as; ++as)
@@ -1159,7 +1176,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
minWeight = RegsWeights[0].second;
if (minWeight == HUGE_VALF) {
// All registers must have inf weight. Just grab one!
- minReg = BestPhysReg ? BestPhysReg : *RC->allocation_order_begin(*mf_);
+ minReg = BestPhysReg ? BestPhysReg : getFirstNonReservedPhysReg(RC);
if (cur->weight == HUGE_VALF ||
li_->getApproximateInstructionCount(*cur) == 0) {
// Spill a physical register around defs and uses.
@@ -1177,7 +1194,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
assignRegOrStackSlotAtInterval(cur);
} else {
assert(false && "Ran out of registers during register allocation!");
- llvm_report_error("Ran out of registers during register allocation!");
+ report_fatal_error("Ran out of registers during register allocation!");
}
return;
}
@@ -1204,10 +1221,8 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
// linearscan.
if (cur->weight != HUGE_VALF && cur->weight <= minWeight) {
DEBUG(dbgs() << "\t\t\tspilling(c): " << *cur << '\n');
- SmallVector<LiveInterval*, 8> spillIs;
- std::vector<LiveInterval*> added;
-
- added = spiller_->spill(cur, spillIs);
+ SmallVector<LiveInterval*, 8> spillIs, added;
+ spiller_->spill(cur, added, spillIs);
std::sort(added.begin(), added.end(), LISorter());
addStackInterval(cur, ls_, li_, mri_, *vrm_);
@@ -1269,29 +1284,33 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
// The earliest start of a Spilled interval indicates up to where
// in handled we need to roll back
- assert(!spillIs.empty() && "No spill intervals?");
+ assert(!spillIs.empty() && "No spill intervals?");
SlotIndex earliestStart = spillIs[0]->beginIndex();
-
+
// Spill live intervals of virtual regs mapped to the physical register we
// want to clear (and its aliases). We only spill those that overlap with the
// current interval as the rest do not affect its allocation. we also keep
// track of the earliest start of all spilled live intervals since this will
// mark our rollback point.
- std::vector<LiveInterval*> added;
+ SmallVector<LiveInterval*, 8> added;
while (!spillIs.empty()) {
LiveInterval *sli = spillIs.back();
spillIs.pop_back();
DEBUG(dbgs() << "\t\t\tspilling(a): " << *sli << '\n');
if (sli->beginIndex() < earliestStart)
earliestStart = sli->beginIndex();
-
- std::vector<LiveInterval*> newIs;
- newIs = spiller_->spill(sli, spillIs, &earliestStart);
+ spiller_->spill(sli, added, spillIs);
addStackInterval(sli, ls_, li_, mri_, *vrm_);
- std::copy(newIs.begin(), newIs.end(), std::back_inserter(added));
spilled.insert(sli->reg);
}
+ // Include any added intervals in earliestStart.
+ for (unsigned i = 0, e = added.size(); i != e; ++i) {
+ SlotIndex SI = added[i]->beginIndex();
+ if (SI < earliestStart)
+ earliestStart = SI;
+ }
+
DEBUG(dbgs() << "\t\trolling back to: " << earliestStart << '\n');
// Scan handled in reverse order up to the earliest start of a
@@ -1414,6 +1433,9 @@ unsigned RALinScan::getFreePhysReg(LiveInterval* cur,
// Ignore "downgraded" registers.
if (SkipDGRegs && DowngradedRegs.count(Reg))
continue;
+ // Skip reserved registers.
+ if (reservedRegs_.test(Reg))
+ continue;
// Skip recently allocated registers.
if (isRegAvail(Reg) && !isRecentlyUsed(Reg)) {
FreeReg = Reg;
@@ -1442,6 +1464,9 @@ unsigned RALinScan::getFreePhysReg(LiveInterval* cur,
// Ignore "downgraded" registers.
if (SkipDGRegs && DowngradedRegs.count(Reg))
continue;
+ // Skip reserved registers.
+ if (reservedRegs_.test(Reg))
+ continue;
if (isRegAvail(Reg) && Reg < inactiveCounts.size() &&
FreeRegInactiveCount < inactiveCounts[Reg] && !isRecentlyUsed(Reg)) {
FreeReg = Reg;
@@ -1462,17 +1487,17 @@ unsigned RALinScan::getFreePhysReg(LiveInterval* cur,
unsigned RALinScan::getFreePhysReg(LiveInterval *cur) {
SmallVector<unsigned, 256> inactiveCounts;
unsigned MaxInactiveCount = 0;
-
+
const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
-
+
for (IntervalPtrs::iterator i = inactive_.begin(), e = inactive_.end();
i != e; ++i) {
unsigned reg = i->first->reg;
assert(TargetRegisterInfo::isVirtualRegister(reg) &&
"Can only allocate virtual registers!");
- // If this is not in a related reg class to the register we're allocating,
+ // If this is not in a related reg class to the register we're allocating,
// don't check it.
const TargetRegisterClass *RegRC = mri_->getRegClass(reg);
if (RelatedRegClasses.getLeaderValue(RegRC) == RCLeader) {
@@ -1489,7 +1514,7 @@ unsigned RALinScan::getFreePhysReg(LiveInterval *cur) {
unsigned Preference = vrm_->getRegAllocPref(cur->reg);
if (Preference) {
DEBUG(dbgs() << "(preferred: " << tri_->getName(Preference) << ") ");
- if (isRegAvail(Preference) &&
+ if (isRegAvail(Preference) &&
RC->contains(Preference))
return Preference;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/RegAllocLocal.cpp b/libclamav/c++/llvm/lib/CodeGen/RegAllocLocal.cpp
deleted file mode 100644
index 04303cf..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/RegAllocLocal.cpp
+++ /dev/null
@@ -1,1157 +0,0 @@
-//===-- RegAllocLocal.cpp - A BasicBlock generic register allocator -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This register allocator allocates registers to a basic block at a time,
-// attempting to keep values in registers and reusing registers as appropriate.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "regalloc"
-#include "llvm/BasicBlock.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/IndexedMap.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
-#include <algorithm>
-using namespace llvm;
-
-STATISTIC(NumStores, "Number of stores added");
-STATISTIC(NumLoads , "Number of loads added");
-
-static RegisterRegAlloc
- localRegAlloc("local", "local register allocator",
- createLocalRegisterAllocator);
-
-namespace {
- class RALocal : public MachineFunctionPass {
- public:
- static char ID;
- RALocal() : MachineFunctionPass(&ID), StackSlotForVirtReg(-1) {}
- private:
- const TargetMachine *TM;
- MachineFunction *MF;
- const TargetRegisterInfo *TRI;
- const TargetInstrInfo *TII;
-
- // StackSlotForVirtReg - Maps virtual regs to the frame index where these
- // values are spilled.
- IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
-
- // Virt2PhysRegMap - This map contains entries for each virtual register
- // that is currently available in a physical register.
- IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysRegMap;
-
- unsigned &getVirt2PhysRegMapSlot(unsigned VirtReg) {
- return Virt2PhysRegMap[VirtReg];
- }
-
- // PhysRegsUsed - This array is effectively a map, containing entries for
- // each physical register that currently has a value (ie, it is in
- // Virt2PhysRegMap). The value mapped to is the virtual register
- // corresponding to the physical register (the inverse of the
- // Virt2PhysRegMap), or 0. The value is set to 0 if this register is pinned
- // because it is used by a future instruction, and to -2 if it is not
- // allocatable. If the entry for a physical register is -1, then the
- // physical register is "not in the map".
- //
- std::vector<int> PhysRegsUsed;
-
- // PhysRegsUseOrder - This contains a list of the physical registers that
- // currently have a virtual register value in them. This list provides an
- // ordering of registers, imposing a reallocation order. This list is only
- // used if all registers are allocated and we have to spill one, in which
- // case we spill the least recently used register. Entries at the front of
- // the list are the least recently used registers, entries at the back are
- // the most recently used.
- //
- std::vector<unsigned> PhysRegsUseOrder;
-
- // Virt2LastUseMap - This maps each virtual register to its last use
- // (MachineInstr*, operand index pair).
- IndexedMap<std::pair<MachineInstr*, unsigned>, VirtReg2IndexFunctor>
- Virt2LastUseMap;
-
- std::pair<MachineInstr*,unsigned>& getVirtRegLastUse(unsigned Reg) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Illegal VirtReg!");
- return Virt2LastUseMap[Reg];
- }
-
- // VirtRegModified - This bitset contains information about which virtual
- // registers need to be spilled back to memory when their registers are
- // scavenged. If a virtual register has simply been rematerialized, there
- // is no reason to spill it to memory when we need the register back.
- //
- BitVector VirtRegModified;
-
- // UsedInMultipleBlocks - Tracks whether a particular register is used in
- // more than one block.
- BitVector UsedInMultipleBlocks;
-
- void markVirtRegModified(unsigned Reg, bool Val = true) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Illegal VirtReg!");
- Reg -= TargetRegisterInfo::FirstVirtualRegister;
- if (Val)
- VirtRegModified.set(Reg);
- else
- VirtRegModified.reset(Reg);
- }
-
- bool isVirtRegModified(unsigned Reg) const {
- assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Illegal VirtReg!");
- assert(Reg - TargetRegisterInfo::FirstVirtualRegister < VirtRegModified.size()
- && "Illegal virtual register!");
- return VirtRegModified[Reg - TargetRegisterInfo::FirstVirtualRegister];
- }
-
- void AddToPhysRegsUseOrder(unsigned Reg) {
- std::vector<unsigned>::iterator It =
- std::find(PhysRegsUseOrder.begin(), PhysRegsUseOrder.end(), Reg);
- if (It != PhysRegsUseOrder.end())
- PhysRegsUseOrder.erase(It);
- PhysRegsUseOrder.push_back(Reg);
- }
-
- void MarkPhysRegRecentlyUsed(unsigned Reg) {
- if (PhysRegsUseOrder.empty() ||
- PhysRegsUseOrder.back() == Reg) return; // Already most recently used
-
- for (unsigned i = PhysRegsUseOrder.size(); i != 0; --i)
- if (areRegsEqual(Reg, PhysRegsUseOrder[i-1])) {
- unsigned RegMatch = PhysRegsUseOrder[i-1]; // remove from middle
- PhysRegsUseOrder.erase(PhysRegsUseOrder.begin()+i-1);
- // Add it to the end of the list
- PhysRegsUseOrder.push_back(RegMatch);
- if (RegMatch == Reg)
- return; // Found an exact match, exit early
- }
- }
-
- public:
- virtual const char *getPassName() const {
- return "Local Register Allocator";
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequiredID(PHIEliminationID);
- AU.addRequiredID(TwoAddressInstructionPassID);
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- private:
- /// runOnMachineFunction - Register allocate the whole function
- bool runOnMachineFunction(MachineFunction &Fn);
-
- /// AllocateBasicBlock - Register allocate the specified basic block.
- void AllocateBasicBlock(MachineBasicBlock &MBB);
-
-
- /// areRegsEqual - This method returns true if the specified registers are
- /// related to each other. To do this, it checks to see if they are equal
- /// or if the first register is in the alias set of the second register.
- ///
- bool areRegsEqual(unsigned R1, unsigned R2) const {
- if (R1 == R2) return true;
- for (const unsigned *AliasSet = TRI->getAliasSet(R2);
- *AliasSet; ++AliasSet) {
- if (*AliasSet == R1) return true;
- }
- return false;
- }
-
- /// getStackSpaceFor - This returns the frame index of the specified virtual
- /// register on the stack, allocating space if necessary.
- int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC);
-
- /// removePhysReg - This method marks the specified physical register as no
- /// longer being in use.
- ///
- void removePhysReg(unsigned PhysReg);
-
- /// spillVirtReg - This method spills the value specified by PhysReg into
- /// the virtual register slot specified by VirtReg. It then updates the RA
- /// data structures to indicate the fact that PhysReg is now available.
- ///
- void spillVirtReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- unsigned VirtReg, unsigned PhysReg);
-
- /// spillPhysReg - This method spills the specified physical register into
- /// the virtual register slot associated with it. If OnlyVirtRegs is set to
- /// true, then the request is ignored if the physical register does not
- /// contain a virtual register.
- ///
- void spillPhysReg(MachineBasicBlock &MBB, MachineInstr *I,
- unsigned PhysReg, bool OnlyVirtRegs = false);
-
- /// assignVirtToPhysReg - This method updates local state so that we know
- /// that PhysReg is the proper container for VirtReg now. The physical
- /// register must not be used for anything else when this is called.
- ///
- void assignVirtToPhysReg(unsigned VirtReg, unsigned PhysReg);
-
- /// isPhysRegAvailable - Return true if the specified physical register is
- /// free and available for use. This also includes checking to see if
- /// aliased registers are all free...
- ///
- bool isPhysRegAvailable(unsigned PhysReg) const;
-
- /// getFreeReg - Look to see if there is a free register available in the
- /// specified register class. If not, return 0.
- ///
- unsigned getFreeReg(const TargetRegisterClass *RC);
-
- /// getReg - Find a physical register to hold the specified virtual
- /// register. If all compatible physical registers are used, this method
- /// spills the last used virtual register to the stack, and uses that
- /// register. If NoFree is true, that means the caller knows there isn't
- /// a free register, do not call getFreeReg().
- unsigned getReg(MachineBasicBlock &MBB, MachineInstr *MI,
- unsigned VirtReg, bool NoFree = false);
-
- /// reloadVirtReg - This method transforms the specified virtual
- /// register use to refer to a physical register. This method may do this
- /// in one of several ways: if the register is available in a physical
- /// register already, it uses that physical register. If the value is not
- /// in a physical register, and if there are physical registers available,
- /// it loads it into a register: PhysReg if that is an available physical
- /// register, otherwise any physical register of the right class.
- /// If register pressure is high, and it is possible, it tries to fold the
- /// load of the virtual register into the instruction itself. It avoids
- /// doing this if register pressure is low to improve the chance that
- /// subsequent instructions can use the reloaded value. This method
- /// returns the modified instruction.
- ///
- MachineInstr *reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
- unsigned OpNum, SmallSet<unsigned, 4> &RRegs,
- unsigned PhysReg);
-
- /// ComputeLocalLiveness - Computes liveness of registers within a basic
- /// block, setting the killed/dead flags as appropriate.
- void ComputeLocalLiveness(MachineBasicBlock& MBB);
-
- void reloadPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I,
- unsigned PhysReg);
- };
- char RALocal::ID = 0;
-}
-
-/// getStackSpaceFor - This allocates space for the specified virtual register
-/// to be held on the stack.
-int RALocal::getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC) {
- // Find the location Reg would belong...
- int SS = StackSlotForVirtReg[VirtReg];
- if (SS != -1)
- return SS; // Already has space allocated?
-
- // Allocate a new stack object for this spill location...
- int FrameIdx = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
- RC->getAlignment());
-
- // Assign the slot...
- StackSlotForVirtReg[VirtReg] = FrameIdx;
- return FrameIdx;
-}
-
-
-/// removePhysReg - This method marks the specified physical register as no
-/// longer being in use.
-///
-void RALocal::removePhysReg(unsigned PhysReg) {
- PhysRegsUsed[PhysReg] = -1; // PhyReg no longer used
-
- std::vector<unsigned>::iterator It =
- std::find(PhysRegsUseOrder.begin(), PhysRegsUseOrder.end(), PhysReg);
- if (It != PhysRegsUseOrder.end())
- PhysRegsUseOrder.erase(It);
-}
-
-
-/// spillVirtReg - This method spills the value specified by PhysReg into the
-/// virtual register slot specified by VirtReg. It then updates the RA data
-/// structures to indicate the fact that PhysReg is now available.
-///
-void RALocal::spillVirtReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned VirtReg, unsigned PhysReg) {
- assert(VirtReg && "Spilling a physical register is illegal!"
- " Must not have appropriate kill for the register or use exists beyond"
- " the intended one.");
- DEBUG(dbgs() << " Spilling register " << TRI->getName(PhysReg)
- << " containing %reg" << VirtReg);
-
- if (!isVirtRegModified(VirtReg)) {
- DEBUG(dbgs() << " which has not been modified, so no store necessary!");
- std::pair<MachineInstr*, unsigned> &LastUse = getVirtRegLastUse(VirtReg);
- if (LastUse.first)
- LastUse.first->getOperand(LastUse.second).setIsKill();
- } else {
- // Otherwise, there is a virtual register corresponding to this physical
- // register. We only need to spill it into its stack slot if it has been
- // modified.
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
- int FrameIndex = getStackSpaceFor(VirtReg, RC);
- DEBUG(dbgs() << " to stack slot #" << FrameIndex);
- // If the instruction reads the register that's spilled, (e.g. this can
- // happen if it is a move to a physical register), then the spill
- // instruction is not a kill.
- bool isKill = !(I != MBB.end() && I->readsRegister(PhysReg));
- TII->storeRegToStackSlot(MBB, I, PhysReg, isKill, FrameIndex, RC);
- ++NumStores; // Update statistics
- }
-
- getVirt2PhysRegMapSlot(VirtReg) = 0; // VirtReg no longer available
-
- DEBUG(dbgs() << '\n');
- removePhysReg(PhysReg);
-}
-
-
-/// spillPhysReg - This method spills the specified physical register into the
-/// virtual register slot associated with it. If OnlyVirtRegs is set to true,
-/// then the request is ignored if the physical register does not contain a
-/// virtual register.
-///
-void RALocal::spillPhysReg(MachineBasicBlock &MBB, MachineInstr *I,
- unsigned PhysReg, bool OnlyVirtRegs) {
- if (PhysRegsUsed[PhysReg] != -1) { // Only spill it if it's used!
- assert(PhysRegsUsed[PhysReg] != -2 && "Non allocable reg used!");
- if (PhysRegsUsed[PhysReg] || !OnlyVirtRegs)
- spillVirtReg(MBB, I, PhysRegsUsed[PhysReg], PhysReg);
- } else {
- // If the selected register aliases any other registers, we must make
- // sure that one of the aliases isn't alive.
- for (const unsigned *AliasSet = TRI->getAliasSet(PhysReg);
- *AliasSet; ++AliasSet)
- if (PhysRegsUsed[*AliasSet] != -1 && // Spill aliased register.
- PhysRegsUsed[*AliasSet] != -2) // If allocatable.
- if (PhysRegsUsed[*AliasSet])
- spillVirtReg(MBB, I, PhysRegsUsed[*AliasSet], *AliasSet);
- }
-}
-
-
-/// assignVirtToPhysReg - This method updates local state so that we know
-/// that PhysReg is the proper container for VirtReg now. The physical
-/// register must not be used for anything else when this is called.
-///
-void RALocal::assignVirtToPhysReg(unsigned VirtReg, unsigned PhysReg) {
- assert(PhysRegsUsed[PhysReg] == -1 && "Phys reg already assigned!");
- // Update information to note the fact that this register was just used, and
- // it holds VirtReg.
- PhysRegsUsed[PhysReg] = VirtReg;
- getVirt2PhysRegMapSlot(VirtReg) = PhysReg;
- AddToPhysRegsUseOrder(PhysReg); // New use of PhysReg
-}
-
-
-/// isPhysRegAvailable - Return true if the specified physical register is free
-/// and available for use. This also includes checking to see if aliased
-/// registers are all free...
-///
-bool RALocal::isPhysRegAvailable(unsigned PhysReg) const {
- if (PhysRegsUsed[PhysReg] != -1) return false;
-
- // If the selected register aliases any other allocated registers, it is
- // not free!
- for (const unsigned *AliasSet = TRI->getAliasSet(PhysReg);
- *AliasSet; ++AliasSet)
- if (PhysRegsUsed[*AliasSet] >= 0) // Aliased register in use?
- return false; // Can't use this reg then.
- return true;
-}
-
-
-/// getFreeReg - Look to see if there is a free register available in the
-/// specified register class. If not, return 0.
-///
-unsigned RALocal::getFreeReg(const TargetRegisterClass *RC) {
- // Get iterators defining the range of registers that are valid to allocate in
- // this class, which also specifies the preferred allocation order.
- TargetRegisterClass::iterator RI = RC->allocation_order_begin(*MF);
- TargetRegisterClass::iterator RE = RC->allocation_order_end(*MF);
-
- for (; RI != RE; ++RI)
- if (isPhysRegAvailable(*RI)) { // Is reg unused?
- assert(*RI != 0 && "Cannot use register!");
- return *RI; // Found an unused register!
- }
- return 0;
-}
-
-
-/// getReg - Find a physical register to hold the specified virtual
-/// register. If all compatible physical registers are used, this method spills
-/// the last used virtual register to the stack, and uses that register.
-///
-unsigned RALocal::getReg(MachineBasicBlock &MBB, MachineInstr *I,
- unsigned VirtReg, bool NoFree) {
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
-
- // First check to see if we have a free register of the requested type...
- unsigned PhysReg = NoFree ? 0 : getFreeReg(RC);
-
- // If we didn't find an unused register, scavenge one now!
- if (PhysReg == 0) {
- assert(!PhysRegsUseOrder.empty() && "No allocated registers??");
-
- // Loop over all of the preallocated registers from the least recently used
- // to the most recently used. When we find one that is capable of holding
- // our register, use it.
- for (unsigned i = 0; PhysReg == 0; ++i) {
- assert(i != PhysRegsUseOrder.size() &&
- "Couldn't find a register of the appropriate class!");
-
- unsigned R = PhysRegsUseOrder[i];
-
- // We can only use this register if it holds a virtual register (ie, it
- // can be spilled). Do not use it if it is an explicitly allocated
- // physical register!
- assert(PhysRegsUsed[R] != -1 &&
- "PhysReg in PhysRegsUseOrder, but is not allocated?");
- if (PhysRegsUsed[R] && PhysRegsUsed[R] != -2) {
- // If the current register is compatible, use it.
- if (RC->contains(R)) {
- PhysReg = R;
- break;
- } else {
- // If one of the registers aliased to the current register is
- // compatible, use it.
- for (const unsigned *AliasIt = TRI->getAliasSet(R);
- *AliasIt; ++AliasIt) {
- if (RC->contains(*AliasIt) &&
- // If this is pinned down for some reason, don't use it. For
- // example, if CL is pinned, and we run across CH, don't use
- // CH as justification for using scavenging ECX (which will
- // fail).
- PhysRegsUsed[*AliasIt] != 0 &&
-
- // Make sure the register is allocatable. Don't allocate SIL on
- // x86-32.
- PhysRegsUsed[*AliasIt] != -2) {
- PhysReg = *AliasIt; // Take an aliased register
- break;
- }
- }
- }
- }
- }
-
- assert(PhysReg && "Physical register not assigned!?!?");
-
- // At this point PhysRegsUseOrder[i] is the least recently used register of
- // compatible register class. Spill it to memory and reap its remains.
- spillPhysReg(MBB, I, PhysReg);
- }
-
- // Now that we know which register we need to assign this to, do it now!
- assignVirtToPhysReg(VirtReg, PhysReg);
- return PhysReg;
-}
-
-
-/// reloadVirtReg - This method transforms the specified virtual
-/// register use to refer to a physical register. This method may do this in
-/// one of several ways: if the register is available in a physical register
-/// already, it uses that physical register. If the value is not in a physical
-/// register, and if there are physical registers available, it loads it into a
-/// register: PhysReg if that is an available physical register, otherwise any
-/// register. If register pressure is high, and it is possible, it tries to
-/// fold the load of the virtual register into the instruction itself. It
-/// avoids doing this if register pressure is low to improve the chance that
-/// subsequent instructions can use the reloaded value. This method returns
-/// the modified instruction.
-///
-MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
- unsigned OpNum,
- SmallSet<unsigned, 4> &ReloadedRegs,
- unsigned PhysReg) {
- unsigned VirtReg = MI->getOperand(OpNum).getReg();
-
- // If the virtual register is already available, just update the instruction
- // and return.
- if (unsigned PR = getVirt2PhysRegMapSlot(VirtReg)) {
- MI->getOperand(OpNum).setReg(PR); // Assign the input register
- if (!MI->isDebugValue()) {
- // Do not do these for DBG_VALUE as they can affect codegen.
- MarkPhysRegRecentlyUsed(PR); // Already have this value available!
- getVirtRegLastUse(VirtReg) = std::make_pair(MI, OpNum);
- }
- return MI;
- }
-
- // Otherwise, we need to fold it into the current instruction, or reload it.
- // If we have registers available to hold the value, use them.
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
- // If we already have a PhysReg (this happens when the instruction is a
- // reg-to-reg copy with a PhysReg destination) use that.
- if (!PhysReg || !TargetRegisterInfo::isPhysicalRegister(PhysReg) ||
- !isPhysRegAvailable(PhysReg))
- PhysReg = getFreeReg(RC);
- int FrameIndex = getStackSpaceFor(VirtReg, RC);
-
- if (PhysReg) { // Register is available, allocate it!
- assignVirtToPhysReg(VirtReg, PhysReg);
- } else { // No registers available.
- // Force some poor hapless value out of the register file to
- // make room for the new register, and reload it.
- PhysReg = getReg(MBB, MI, VirtReg, true);
- }
-
- markVirtRegModified(VirtReg, false); // Note that this reg was just reloaded
-
- DEBUG(dbgs() << " Reloading %reg" << VirtReg << " into "
- << TRI->getName(PhysReg) << "\n");
-
- // Add move instruction(s)
- TII->loadRegFromStackSlot(MBB, MI, PhysReg, FrameIndex, RC);
- ++NumLoads; // Update statistics
-
- MF->getRegInfo().setPhysRegUsed(PhysReg);
- MI->getOperand(OpNum).setReg(PhysReg); // Assign the input register
- getVirtRegLastUse(VirtReg) = std::make_pair(MI, OpNum);
-
- if (!ReloadedRegs.insert(PhysReg)) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Ran out of registers during register allocation!";
- if (MI->isInlineAsm()) {
- Msg << "\nPlease check your inline asm statement for invalid "
- << "constraints:\n";
- MI->print(Msg, TM);
- }
- llvm_report_error(Msg.str());
- }
- for (const unsigned *SubRegs = TRI->getSubRegisters(PhysReg);
- *SubRegs; ++SubRegs) {
- if (!ReloadedRegs.insert(*SubRegs)) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Ran out of registers during register allocation!";
- if (MI->isInlineAsm()) {
- Msg << "\nPlease check your inline asm statement for invalid "
- << "constraints:\n";
- MI->print(Msg, TM);
- }
- llvm_report_error(Msg.str());
- }
- }
-
- return MI;
-}
-
-/// isReadModWriteImplicitKill - True if this is an implicit kill for a
-/// read/mod/write register, i.e. update partial register.
-static bool isReadModWriteImplicitKill(MachineInstr *MI, unsigned Reg) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == Reg && MO.isImplicit() &&
- MO.isDef() && !MO.isDead())
- return true;
- }
- return false;
-}
-
-/// isReadModWriteImplicitDef - True if this is an implicit def for a
-/// read/mod/write register, i.e. update partial register.
-static bool isReadModWriteImplicitDef(MachineInstr *MI, unsigned Reg) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == Reg && MO.isImplicit() &&
- !MO.isDef() && MO.isKill())
- return true;
- }
- return false;
-}
-
-// precedes - Helper function to determine with MachineInstr A
-// precedes MachineInstr B within the same MBB.
-static bool precedes(MachineBasicBlock::iterator A,
- MachineBasicBlock::iterator B) {
- if (A == B)
- return false;
-
- MachineBasicBlock::iterator I = A->getParent()->begin();
- while (I != A->getParent()->end()) {
- if (I == A)
- return true;
- else if (I == B)
- return false;
-
- ++I;
- }
-
- return false;
-}
-
-/// ComputeLocalLiveness - Computes liveness of registers within a basic
-/// block, setting the killed/dead flags as appropriate.
-void RALocal::ComputeLocalLiveness(MachineBasicBlock& MBB) {
- MachineRegisterInfo& MRI = MBB.getParent()->getRegInfo();
- // Keep track of the most recently seen previous use or def of each reg,
- // so that we can update them with dead/kill markers.
- DenseMap<unsigned, std::pair<MachineInstr*, unsigned> > LastUseDef;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- if (I->isDebugValue())
- continue;
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = I->getOperand(i);
- // Uses don't trigger any flags, but we need to save
- // them for later. Also, we have to process these
- // _before_ processing the defs, since an instr
- // uses regs before it defs them.
- if (MO.isReg() && MO.getReg() && MO.isUse()) {
- LastUseDef[MO.getReg()] = std::make_pair(I, i);
-
-
- if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) continue;
-
- const unsigned* Aliases = TRI->getAliasSet(MO.getReg());
- if (Aliases) {
- while (*Aliases) {
- DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
- alias = LastUseDef.find(*Aliases);
-
- if (alias != LastUseDef.end() && alias->second.first != I)
- LastUseDef[*Aliases] = std::make_pair(I, i);
-
- ++Aliases;
- }
- }
- }
- }
-
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = I->getOperand(i);
- // Defs others than 2-addr redefs _do_ trigger flag changes:
- // - A def followed by a def is dead
- // - A use followed by a def is a kill
- if (MO.isReg() && MO.getReg() && MO.isDef()) {
- DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
- last = LastUseDef.find(MO.getReg());
- if (last != LastUseDef.end()) {
- // Check if this is a two address instruction. If so, then
- // the def does not kill the use.
- if (last->second.first == I &&
- I->isRegTiedToUseOperand(i))
- continue;
-
- MachineOperand& lastUD =
- last->second.first->getOperand(last->second.second);
- if (lastUD.isDef())
- lastUD.setIsDead(true);
- else
- lastUD.setIsKill(true);
- }
-
- LastUseDef[MO.getReg()] = std::make_pair(I, i);
- }
- }
- }
-
- // Live-out (of the function) registers contain return values of the function,
- // so we need to make sure they are alive at return time.
- if (!MBB.empty() && MBB.back().getDesc().isReturn()) {
- MachineInstr* Ret = &MBB.back();
- for (MachineRegisterInfo::liveout_iterator
- I = MF->getRegInfo().liveout_begin(),
- E = MF->getRegInfo().liveout_end(); I != E; ++I)
- if (!Ret->readsRegister(*I)) {
- Ret->addOperand(MachineOperand::CreateReg(*I, false, true));
- LastUseDef[*I] = std::make_pair(Ret, Ret->getNumOperands()-1);
- }
- }
-
- // Finally, loop over the final use/def of each reg
- // in the block and determine if it is dead.
- for (DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
- I = LastUseDef.begin(), E = LastUseDef.end(); I != E; ++I) {
- MachineInstr* MI = I->second.first;
- unsigned idx = I->second.second;
- MachineOperand& MO = MI->getOperand(idx);
-
- bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(MO.getReg());
-
- // A crude approximation of "live-out" calculation
- bool usedOutsideBlock = isPhysReg ? false :
- UsedInMultipleBlocks.test(MO.getReg() -
- TargetRegisterInfo::FirstVirtualRegister);
- if (!isPhysReg && !usedOutsideBlock) {
- // DBG_VALUE complicates this: if the only refs of a register outside
- // this block are DBG_VALUE, we can't keep the reg live just for that,
- // as it will cause the reg to be spilled at the end of this block when
- // it wouldn't have been otherwise. Nullify the DBG_VALUEs when that
- // happens.
- bool UsedByDebugValueOnly = false;
- for (MachineRegisterInfo::reg_iterator UI = MRI.reg_begin(MO.getReg()),
- UE = MRI.reg_end(); UI != UE; ++UI)
- // Two cases:
- // - used in another block
- // - used in the same block before it is defined (loop)
- if (UI->getParent() != &MBB ||
- (MO.isDef() && UI.getOperand().isUse() && precedes(&*UI, MI))) {
- if (UI->isDebugValue()) {
- UsedByDebugValueOnly = true;
- continue;
- }
- // A non-DBG_VALUE use means we can leave DBG_VALUE uses alone.
- UsedInMultipleBlocks.set(MO.getReg() -
- TargetRegisterInfo::FirstVirtualRegister);
- usedOutsideBlock = true;
- UsedByDebugValueOnly = false;
- break;
- }
- if (UsedByDebugValueOnly)
- for (MachineRegisterInfo::reg_iterator UI = MRI.reg_begin(MO.getReg()),
- UE = MRI.reg_end(); UI != UE; ++UI)
- if (UI->isDebugValue() &&
- (UI->getParent() != &MBB ||
- (MO.isDef() && precedes(&*UI, MI))))
- UI.getOperand().setReg(0U);
- }
-
- // Physical registers and those that are not live-out of the block
- // are killed/dead at their last use/def within this block.
- if (isPhysReg || !usedOutsideBlock) {
- if (MO.isUse()) {
- // Don't mark uses that are tied to defs as kills.
- if (!MI->isRegTiedToDefOperand(idx))
- MO.setIsKill(true);
- } else
- MO.setIsDead(true);
- }
- }
-}
-
-void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) {
- // loop over each instruction
- MachineBasicBlock::iterator MII = MBB.begin();
-
- DEBUG({
- const BasicBlock *LBB = MBB.getBasicBlock();
- if (LBB)
- dbgs() << "\nStarting RegAlloc of BB: " << LBB->getName();
- });
-
- // Add live-in registers as active.
- for (MachineBasicBlock::livein_iterator I = MBB.livein_begin(),
- E = MBB.livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- MF->getRegInfo().setPhysRegUsed(Reg);
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(Reg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] != -2) {
- AddToPhysRegsUseOrder(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- }
- }
- }
-
- ComputeLocalLiveness(MBB);
-
- // Otherwise, sequentially allocate each instruction in the MBB.
- while (MII != MBB.end()) {
- MachineInstr *MI = MII++;
- const TargetInstrDesc &TID = MI->getDesc();
- DEBUG({
- dbgs() << "\nStarting RegAlloc of: " << *MI;
- dbgs() << " Regs have values: ";
- for (unsigned i = 0; i != TRI->getNumRegs(); ++i)
- if (PhysRegsUsed[i] != -1 && PhysRegsUsed[i] != -2)
- dbgs() << "[" << TRI->getName(i)
- << ",%reg" << PhysRegsUsed[i] << "] ";
- dbgs() << '\n';
- });
-
- // Determine whether this is a copy instruction. The cases where the
- // source or destination are phys regs are handled specially.
- unsigned SrcCopyReg, DstCopyReg, SrcCopySubReg, DstCopySubReg;
- unsigned SrcCopyPhysReg = 0U;
- bool isCopy = TII->isMoveInstr(*MI, SrcCopyReg, DstCopyReg,
- SrcCopySubReg, DstCopySubReg);
- if (isCopy && TargetRegisterInfo::isVirtualRegister(SrcCopyReg))
- SrcCopyPhysReg = getVirt2PhysRegMapSlot(SrcCopyReg);
-
- // Loop over the implicit uses, making sure that they are at the head of the
- // use order list, so they don't get reallocated.
- if (TID.ImplicitUses) {
- for (const unsigned *ImplicitUses = TID.ImplicitUses;
- *ImplicitUses; ++ImplicitUses)
- MarkPhysRegRecentlyUsed(*ImplicitUses);
- }
-
- SmallVector<unsigned, 8> Kills;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (MO.isReg() && MO.isKill()) {
- if (!MO.isImplicit())
- Kills.push_back(MO.getReg());
- else if (!isReadModWriteImplicitKill(MI, MO.getReg()))
- // These are extra physical register kills when a sub-register
- // is defined (def of a sub-register is a read/mod/write of the
- // larger registers). Ignore.
- Kills.push_back(MO.getReg());
- }
- }
-
- // If any physical regs are earlyclobber, spill any value they might
- // have in them, then mark them unallocatable.
- // If any virtual regs are earlyclobber, allocate them now (before
- // freeing inputs that are killed).
- if (MI->isInlineAsm()) {
- for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (MO.isReg() && MO.isDef() && MO.isEarlyClobber() &&
- MO.getReg()) {
- if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
- unsigned DestVirtReg = MO.getReg();
- unsigned DestPhysReg;
-
- // If DestVirtReg already has a value, use it.
- if (!(DestPhysReg = getVirt2PhysRegMapSlot(DestVirtReg)))
- DestPhysReg = getReg(MBB, MI, DestVirtReg);
- MF->getRegInfo().setPhysRegUsed(DestPhysReg);
- markVirtRegModified(DestVirtReg);
- getVirtRegLastUse(DestVirtReg) =
- std::make_pair((MachineInstr*)0, 0);
- DEBUG(dbgs() << " Assigning " << TRI->getName(DestPhysReg)
- << " to %reg" << DestVirtReg << "\n");
- MO.setReg(DestPhysReg); // Assign the earlyclobber register
- } else {
- unsigned Reg = MO.getReg();
- if (PhysRegsUsed[Reg] == -2) continue; // Something like ESP.
- // These are extra physical register defs when a sub-register
- // is defined (def of a sub-register is a read/mod/write of the
- // larger registers). Ignore.
- if (isReadModWriteImplicitDef(MI, MO.getReg())) continue;
-
- MF->getRegInfo().setPhysRegUsed(Reg);
- spillPhysReg(MBB, MI, Reg, true); // Spill any existing value in reg
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(Reg);
-
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] != -2) {
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(*SubRegs);
- }
- }
- }
- }
- }
- }
-
- // If a DBG_VALUE says something is located in a spilled register,
- // change the DBG_VALUE to be undef, which prevents the register
- // from being reloaded here. Doing that would change the generated
- // code, unless another use immediately follows this instruction.
- if (MI->isDebugValue() &&
- MI->getNumOperands()==3 && MI->getOperand(0).isReg()) {
- unsigned VirtReg = MI->getOperand(0).getReg();
- if (VirtReg && TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- !getVirt2PhysRegMapSlot(VirtReg))
- MI->getOperand(0).setReg(0U);
- }
-
- // Get the used operands into registers. This has the potential to spill
- // incoming values if we are out of registers. Note that we completely
- // ignore physical register uses here. We assume that if an explicit
- // physical register is referenced by the instruction, that it is guaranteed
- // to be live-in, or the input is badly hosed.
- //
- SmallSet<unsigned, 4> ReloadedRegs;
- for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
- MachineOperand& MO = MI->getOperand(i);
- // here we are looking for only used operands (never def&use)
- if (MO.isReg() && !MO.isDef() && MO.getReg() && !MO.isImplicit() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()))
- MI = reloadVirtReg(MBB, MI, i, ReloadedRegs,
- isCopy ? DstCopyReg : 0);
- }
-
- // If this instruction is the last user of this register, kill the
- // value, freeing the register being used, so it doesn't need to be
- // spilled to memory.
- //
- for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
- unsigned VirtReg = Kills[i];
- unsigned PhysReg = VirtReg;
- if (TargetRegisterInfo::isVirtualRegister(VirtReg)) {
- // If the virtual register was never materialized into a register, it
- // might not be in the map, but it won't hurt to zero it out anyway.
- unsigned &PhysRegSlot = getVirt2PhysRegMapSlot(VirtReg);
- PhysReg = PhysRegSlot;
- PhysRegSlot = 0;
- } else if (PhysRegsUsed[PhysReg] == -2) {
- // Unallocatable register dead, ignore.
- continue;
- } else {
- assert((!PhysRegsUsed[PhysReg] || PhysRegsUsed[PhysReg] == -1) &&
- "Silently clearing a virtual register?");
- }
-
- if (PhysReg) {
- DEBUG(dbgs() << " Last use of " << TRI->getName(PhysReg)
- << "[%reg" << VirtReg <<"], removing it from live set\n");
- removePhysReg(PhysReg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(PhysReg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] != -2) {
- DEBUG(dbgs() << " Last use of "
- << TRI->getName(*SubRegs) << "[%reg" << VirtReg
- <<"], removing it from live set\n");
- removePhysReg(*SubRegs);
- }
- }
- }
- }
-
- // Loop over all of the operands of the instruction, spilling registers that
- // are defined, and marking explicit destinations in the PhysRegsUsed map.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (MO.isReg() && MO.isDef() && !MO.isImplicit() && MO.getReg() &&
- !MO.isEarlyClobber() &&
- TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
- unsigned Reg = MO.getReg();
- if (PhysRegsUsed[Reg] == -2) continue; // Something like ESP.
- // These are extra physical register defs when a sub-register
- // is defined (def of a sub-register is a read/mod/write of the
- // larger registers). Ignore.
- if (isReadModWriteImplicitDef(MI, MO.getReg())) continue;
-
- MF->getRegInfo().setPhysRegUsed(Reg);
- spillPhysReg(MBB, MI, Reg, true); // Spill any existing value in reg
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(Reg);
-
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] != -2) {
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(*SubRegs);
- }
- }
- }
- }
-
- // Loop over the implicit defs, spilling them as well.
- if (TID.ImplicitDefs) {
- for (const unsigned *ImplicitDefs = TID.ImplicitDefs;
- *ImplicitDefs; ++ImplicitDefs) {
- unsigned Reg = *ImplicitDefs;
- if (PhysRegsUsed[Reg] != -2) {
- spillPhysReg(MBB, MI, Reg, true);
- AddToPhysRegsUseOrder(Reg);
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- }
- MF->getRegInfo().setPhysRegUsed(Reg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] != -2) {
- AddToPhysRegsUseOrder(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- }
- }
- }
- }
-
- SmallVector<unsigned, 8> DeadDefs;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (MO.isReg() && MO.isDead())
- DeadDefs.push_back(MO.getReg());
- }
-
- // Okay, we have allocated all of the source operands and spilled any values
- // that would be destroyed by defs of this instruction. Loop over the
- // explicit defs and assign them to a register, spilling incoming values if
- // we need to scavenge a register.
- //
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (MO.isReg() && MO.isDef() && MO.getReg() &&
- !MO.isEarlyClobber() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
- unsigned DestVirtReg = MO.getReg();
- unsigned DestPhysReg;
-
- // If DestVirtReg already has a value, use it.
- if (!(DestPhysReg = getVirt2PhysRegMapSlot(DestVirtReg))) {
- // If this is a copy try to reuse the input as the output;
- // that will make the copy go away.
- // If this is a copy, the source reg is a phys reg, and
- // that reg is available, use that phys reg for DestPhysReg.
- // If this is a copy, the source reg is a virtual reg, and
- // the phys reg that was assigned to that virtual reg is now
- // available, use that phys reg for DestPhysReg. (If it's now
- // available that means this was the last use of the source.)
- if (isCopy &&
- TargetRegisterInfo::isPhysicalRegister(SrcCopyReg) &&
- isPhysRegAvailable(SrcCopyReg)) {
- DestPhysReg = SrcCopyReg;
- assignVirtToPhysReg(DestVirtReg, DestPhysReg);
- } else if (isCopy &&
- TargetRegisterInfo::isVirtualRegister(SrcCopyReg) &&
- SrcCopyPhysReg && isPhysRegAvailable(SrcCopyPhysReg) &&
- MF->getRegInfo().getRegClass(DestVirtReg)->
- contains(SrcCopyPhysReg)) {
- DestPhysReg = SrcCopyPhysReg;
- assignVirtToPhysReg(DestVirtReg, DestPhysReg);
- } else
- DestPhysReg = getReg(MBB, MI, DestVirtReg);
- }
- MF->getRegInfo().setPhysRegUsed(DestPhysReg);
- markVirtRegModified(DestVirtReg);
- getVirtRegLastUse(DestVirtReg) = std::make_pair((MachineInstr*)0, 0);
- DEBUG(dbgs() << " Assigning " << TRI->getName(DestPhysReg)
- << " to %reg" << DestVirtReg << "\n");
- MO.setReg(DestPhysReg); // Assign the output register
- }
- }
-
- // If this instruction defines any registers that are immediately dead,
- // kill them now.
- //
- for (unsigned i = 0, e = DeadDefs.size(); i != e; ++i) {
- unsigned VirtReg = DeadDefs[i];
- unsigned PhysReg = VirtReg;
- if (TargetRegisterInfo::isVirtualRegister(VirtReg)) {
- unsigned &PhysRegSlot = getVirt2PhysRegMapSlot(VirtReg);
- PhysReg = PhysRegSlot;
- assert(PhysReg != 0);
- PhysRegSlot = 0;
- } else if (PhysRegsUsed[PhysReg] == -2) {
- // Unallocatable register dead, ignore.
- continue;
- }
-
- if (PhysReg) {
- DEBUG(dbgs() << " Register " << TRI->getName(PhysReg)
- << " [%reg" << VirtReg
- << "] is never used, removing it from live set\n");
- removePhysReg(PhysReg);
- for (const unsigned *AliasSet = TRI->getAliasSet(PhysReg);
- *AliasSet; ++AliasSet) {
- if (PhysRegsUsed[*AliasSet] != -2) {
- DEBUG(dbgs() << " Register " << TRI->getName(*AliasSet)
- << " [%reg" << *AliasSet
- << "] is never used, removing it from live set\n");
- removePhysReg(*AliasSet);
- }
- }
- }
- }
-
- // Finally, if this is a noop copy instruction, zap it. (Except that if
- // the copy is dead, it must be kept to avoid messing up liveness info for
- // the register scavenger. See pr4100.)
- if (TII->isMoveInstr(*MI, SrcCopyReg, DstCopyReg,
- SrcCopySubReg, DstCopySubReg) &&
- SrcCopyReg == DstCopyReg && DeadDefs.empty())
- MBB.erase(MI);
- }
-
- MachineBasicBlock::iterator MI = MBB.getFirstTerminator();
-
- // Spill all physical registers holding virtual registers now.
- for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i)
- if (PhysRegsUsed[i] != -1 && PhysRegsUsed[i] != -2) {
- if (unsigned VirtReg = PhysRegsUsed[i])
- spillVirtReg(MBB, MI, VirtReg, i);
- else
- removePhysReg(i);
- }
-
-#if 0
- // This checking code is very expensive.
- bool AllOk = true;
- for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
- e = MF->getRegInfo().getLastVirtReg(); i <= e; ++i)
- if (unsigned PR = Virt2PhysRegMap[i]) {
- cerr << "Register still mapped: " << i << " -> " << PR << "\n";
- AllOk = false;
- }
- assert(AllOk && "Virtual registers still in phys regs?");
-#endif
-
- // Clear any physical register which appear live at the end of the basic
- // block, but which do not hold any virtual registers. e.g., the stack
- // pointer.
- PhysRegsUseOrder.clear();
-}
-
-/// runOnMachineFunction - Register allocate the whole function
-///
-bool RALocal::runOnMachineFunction(MachineFunction &Fn) {
- DEBUG(dbgs() << "Machine Function\n");
- MF = &Fn;
- TM = &Fn.getTarget();
- TRI = TM->getRegisterInfo();
- TII = TM->getInstrInfo();
-
- PhysRegsUsed.assign(TRI->getNumRegs(), -1);
-
- // At various places we want to efficiently check to see whether a register
- // is allocatable. To handle this, we mark all unallocatable registers as
- // being pinned down, permanently.
- {
- BitVector Allocable = TRI->getAllocatableSet(Fn);
- for (unsigned i = 0, e = Allocable.size(); i != e; ++i)
- if (!Allocable[i])
- PhysRegsUsed[i] = -2; // Mark the reg unallocable.
- }
-
- // initialize the virtual->physical register map to have a 'null'
- // mapping for all virtual registers
- unsigned LastVirtReg = MF->getRegInfo().getLastVirtReg();
- StackSlotForVirtReg.grow(LastVirtReg);
- Virt2PhysRegMap.grow(LastVirtReg);
- Virt2LastUseMap.grow(LastVirtReg);
- VirtRegModified.resize(LastVirtReg+1-TargetRegisterInfo::FirstVirtualRegister);
- UsedInMultipleBlocks.resize(LastVirtReg+1-TargetRegisterInfo::FirstVirtualRegister);
-
- // Loop over all of the basic blocks, eliminating virtual register references
- for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
- MBB != MBBe; ++MBB)
- AllocateBasicBlock(*MBB);
-
- StackSlotForVirtReg.clear();
- PhysRegsUsed.clear();
- VirtRegModified.clear();
- UsedInMultipleBlocks.clear();
- Virt2PhysRegMap.clear();
- Virt2LastUseMap.clear();
- return true;
-}
-
-FunctionPass *llvm::createLocalRegisterAllocator() {
- return new RALocal();
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/RegAllocPBQP.cpp b/libclamav/c++/llvm/lib/CodeGen/RegAllocPBQP.cpp
index 81cfd8f..61f337b 100644
--- a/libclamav/c++/llvm/lib/CodeGen/RegAllocPBQP.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/RegAllocPBQP.cpp
@@ -34,6 +34,8 @@
#include "PBQP/HeuristicSolver.h"
#include "PBQP/Graph.h"
#include "PBQP/Heuristics/Briggs.h"
+#include "RenderMachineFunction.h"
+#include "Splitter.h"
#include "VirtRegMap.h"
#include "VirtRegRewriter.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
@@ -65,6 +67,11 @@ pbqpCoalescing("pbqp-coalescing",
cl::desc("Attempt coalescing during PBQP register allocation."),
cl::init(false), cl::Hidden);
+static cl::opt<bool>
+pbqpPreSplitting("pbqp-pre-splitting",
+ cl::desc("Pre-splite before PBQP register allocation."),
+ cl::init(false), cl::Hidden);
+
namespace {
///
@@ -77,7 +84,7 @@ namespace {
static char ID;
/// Construct a PBQP register allocator.
- PBQPRegAlloc() : MachineFunctionPass(&ID) {}
+ PBQPRegAlloc() : MachineFunctionPass(ID) {}
/// Return the pass name.
virtual const char* getPassName() const {
@@ -96,7 +103,10 @@ namespace {
au.addPreserved<LiveStacks>();
au.addRequired<MachineLoopInfo>();
au.addPreserved<MachineLoopInfo>();
+ if (pbqpPreSplitting)
+ au.addRequired<LoopSplitter>();
au.addRequired<VirtRegMap>();
+ au.addRequired<RenderMachineFunction>();
MachineFunctionPass::getAnalysisUsage(au);
}
@@ -104,7 +114,15 @@ namespace {
virtual bool runOnMachineFunction(MachineFunction &MF);
private:
- typedef std::map<const LiveInterval*, unsigned> LI2NodeMap;
+
+ class LIOrdering {
+ public:
+ bool operator()(const LiveInterval *li1, const LiveInterval *li2) const {
+ return li1->reg < li2->reg;
+ }
+ };
+
+ typedef std::map<const LiveInterval*, unsigned, LIOrdering> LI2NodeMap;
typedef std::vector<const LiveInterval*> Node2LIMap;
typedef std::vector<unsigned> AllowedSet;
typedef std::vector<AllowedSet> AllowedSetMap;
@@ -112,7 +130,7 @@ namespace {
typedef std::pair<unsigned, unsigned> RegPair;
typedef std::map<RegPair, PBQP::PBQPNum> CoalesceMap;
- typedef std::set<LiveInterval*> LiveIntervalSet;
+ typedef std::set<LiveInterval*, LIOrdering> LiveIntervalSet;
typedef std::vector<PBQP::Graph::NodeItr> NodeVector;
@@ -122,6 +140,7 @@ namespace {
const TargetInstrInfo *tii;
const MachineLoopInfo *loopInfo;
MachineRegisterInfo *mri;
+ RenderMachineFunction *rmf;
LiveIntervals *lis;
LiveStacks *lss;
@@ -379,12 +398,14 @@ PBQPRegAlloc::CoalesceMap PBQPRegAlloc::findCoalesces() {
iItr != iEnd; ++iItr) {
const MachineInstr *instr = &*iItr;
- unsigned srcReg, dstReg, srcSubReg, dstSubReg;
// If this isn't a copy then continue to the next instruction.
- if (!tii->isMoveInstr(*instr, srcReg, dstReg, srcSubReg, dstSubReg))
+ if (!instr->isCopy())
continue;
+ unsigned srcReg = instr->getOperand(1).getReg();
+ unsigned dstReg = instr->getOperand(0).getReg();
+
// If the registers are already the same our job is nice and easy.
if (dstReg == srcReg)
continue;
@@ -396,28 +417,23 @@ PBQPRegAlloc::CoalesceMap PBQPRegAlloc::findCoalesces() {
if (srcRegIsPhysical && dstRegIsPhysical)
continue;
- // If it's a copy that includes a virtual register but the source and
- // destination classes differ then we can't coalesce, so continue with
- // the next instruction.
- const TargetRegisterClass *srcRegClass = srcRegIsPhysical ?
- tri->getPhysicalRegisterRegClass(srcReg) : mri->getRegClass(srcReg);
-
- const TargetRegisterClass *dstRegClass = dstRegIsPhysical ?
- tri->getPhysicalRegisterRegClass(dstReg) : mri->getRegClass(dstReg);
-
- if (srcRegClass != dstRegClass)
+ // If it's a copy that includes two virtual register but the source and
+ // destination classes differ then we can't coalesce.
+ if (!srcRegIsPhysical && !dstRegIsPhysical &&
+ mri->getRegClass(srcReg) != mri->getRegClass(dstReg))
continue;
- // We also need any physical regs to be allocable, coalescing with
- // a non-allocable register is invalid.
- if (srcRegIsPhysical) {
+ // If one is physical and one is virtual, check that the physical is
+ // allocatable in the class of the virtual.
+ if (srcRegIsPhysical && !dstRegIsPhysical) {
+ const TargetRegisterClass *dstRegClass = mri->getRegClass(dstReg);
if (std::find(dstRegClass->allocation_order_begin(*mf),
dstRegClass->allocation_order_end(*mf), srcReg) ==
dstRegClass->allocation_order_end(*mf))
continue;
}
-
- if (dstRegIsPhysical) {
+ if (!srcRegIsPhysical && dstRegIsPhysical) {
+ const TargetRegisterClass *srcRegClass = mri->getRegClass(srcReg);
if (std::find(srcRegClass->allocation_order_begin(*mf),
srcRegClass->allocation_order_end(*mf), dstReg) ==
srcRegClass->allocation_order_end(*mf))
@@ -489,7 +505,7 @@ PBQPRegAlloc::CoalesceMap PBQPRegAlloc::findCoalesces() {
// did, but none of their definitions would prevent us from coalescing.
// We're good to go with the coalesce.
- float cBenefit = powf(10.0f, loopInfo->getLoopDepth(mbb)) / 5.0;
+ float cBenefit = std::pow(10.0f, (float)loopInfo->getLoopDepth(mbb)) / 5.0;
coalescesFound[RegPair(srcReg, dstReg)] = cBenefit;
coalescesFound[RegPair(dstReg, srcReg)] = cBenefit;
@@ -572,6 +588,8 @@ PBQP::Graph PBQPRegAlloc::constructPBQPProblem() {
// Resize allowedSets container appropriately.
allowedSets.resize(vregIntervalsToAlloc.size());
+ BitVector ReservedRegs = tri->getReservedRegs(*mf);
+
// Iterate over virtual register intervals to compute allowed sets...
for (unsigned node = 0; node < node2LI.size(); ++node) {
@@ -580,8 +598,12 @@ PBQP::Graph PBQPRegAlloc::constructPBQPProblem() {
const TargetRegisterClass *liRC = mri->getRegClass(li->reg);
// Start by assuming all allocable registers in the class are allowed...
- RegVector liAllowed(liRC->allocation_order_begin(*mf),
- liRC->allocation_order_end(*mf));
+ RegVector liAllowed;
+ TargetRegisterClass::iterator aob = liRC->allocation_order_begin(*mf);
+ TargetRegisterClass::iterator aoe = liRC->allocation_order_end(*mf);
+ for (TargetRegisterClass::iterator it = aob; it != aoe; ++it)
+ if (!ReservedRegs.test(*it))
+ liAllowed.push_back(*it);
// Eliminate the physical registers which overlap with this range, along
// with all their aliases.
@@ -740,9 +762,11 @@ bool PBQPRegAlloc::mapPBQPToRegAlloc(const PBQP::Solution &solution) {
const LiveInterval *spillInterval = node2LI[node];
double oldSpillWeight = spillInterval->weight;
SmallVector<LiveInterval*, 8> spillIs;
+ rmf->rememberUseDefs(spillInterval);
std::vector<LiveInterval*> newSpills =
lis->addIntervalsForSpills(*spillInterval, spillIs, loopInfo, *vrm);
addStackInterval(spillInterval, mri);
+ rmf->rememberSpills(spillInterval, newSpills);
(void) oldSpillWeight;
DEBUG(dbgs() << "VREG " << virtReg << " -> SPILLED (Cost: "
@@ -850,9 +874,11 @@ bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
lis = &getAnalysis<LiveIntervals>();
lss = &getAnalysis<LiveStacks>();
loopInfo = &getAnalysis<MachineLoopInfo>();
+ rmf = &getAnalysis<RenderMachineFunction>();
vrm = &getAnalysis<VirtRegMap>();
+
DEBUG(dbgs() << "PBQP Register Allocating for " << mf->getFunction()->getName() << "\n");
// Allocator main loop:
@@ -889,6 +915,8 @@ bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
// Finalise allocation, allocate empty ranges.
finalizeAlloc();
+ rmf->renderMachineFunction("After PBQP register allocation.", vrm);
+
vregIntervalsToAlloc.clear();
emptyVRegIntervals.clear();
li2Node.clear();
diff --git a/libclamav/c++/llvm/lib/CodeGen/RegisterCoalescer.cpp b/libclamav/c++/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 1131e3d..02b5539 100644
--- a/libclamav/c++/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -16,6 +16,8 @@
#include "llvm/CodeGen/RegisterCoalescer.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Pass.h"
@@ -33,6 +35,159 @@ char RegisterCoalescer::ID = 0;
//
RegisterCoalescer::~RegisterCoalescer() {}
+unsigned CoalescerPair::compose(unsigned a, unsigned b) const {
+ if (!a) return b;
+ if (!b) return a;
+ return tri_.composeSubRegIndices(a, b);
+}
+
+bool CoalescerPair::isMoveInstr(const MachineInstr *MI,
+ unsigned &Src, unsigned &Dst,
+ unsigned &SrcSub, unsigned &DstSub) const {
+ if (MI->isCopy()) {
+ Dst = MI->getOperand(0).getReg();
+ DstSub = MI->getOperand(0).getSubReg();
+ Src = MI->getOperand(1).getReg();
+ SrcSub = MI->getOperand(1).getSubReg();
+ } else if (MI->isSubregToReg()) {
+ Dst = MI->getOperand(0).getReg();
+ DstSub = compose(MI->getOperand(0).getSubReg(), MI->getOperand(3).getImm());
+ Src = MI->getOperand(2).getReg();
+ SrcSub = MI->getOperand(2).getSubReg();
+ } else
+ return false;
+ return true;
+}
+
+bool CoalescerPair::setRegisters(const MachineInstr *MI) {
+ srcReg_ = dstReg_ = subIdx_ = 0;
+ newRC_ = 0;
+ flipped_ = crossClass_ = false;
+
+ unsigned Src, Dst, SrcSub, DstSub;
+ if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
+ return false;
+ partial_ = SrcSub || DstSub;
+
+ // If one register is a physreg, it must be Dst.
+ if (TargetRegisterInfo::isPhysicalRegister(Src)) {
+ if (TargetRegisterInfo::isPhysicalRegister(Dst))
+ return false;
+ std::swap(Src, Dst);
+ std::swap(SrcSub, DstSub);
+ flipped_ = true;
+ }
+
+ const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+
+ if (TargetRegisterInfo::isPhysicalRegister(Dst)) {
+ // Eliminate DstSub on a physreg.
+ if (DstSub) {
+ Dst = tri_.getSubReg(Dst, DstSub);
+ if (!Dst) return false;
+ DstSub = 0;
+ }
+
+ // Eliminate SrcSub by picking a corresponding Dst superregister.
+ if (SrcSub) {
+ Dst = tri_.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src));
+ if (!Dst) return false;
+ SrcSub = 0;
+ } else if (!MRI.getRegClass(Src)->contains(Dst)) {
+ return false;
+ }
+ } else {
+ // Both registers are virtual.
+
+ // Both registers have subreg indices.
+ if (SrcSub && DstSub) {
+ // For now we only handle the case of identical indices in commensurate
+ // registers: Dreg:ssub_1 + Dreg:ssub_1 -> Dreg
+ // FIXME: Handle Qreg:ssub_3 + Dreg:ssub_1 as QReg:dsub_1 + Dreg.
+ if (SrcSub != DstSub)
+ return false;
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
+ const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
+ if (!getCommonSubClass(DstRC, SrcRC))
+ return false;
+ SrcSub = DstSub = 0;
+ }
+
+ // There can be no SrcSub.
+ if (SrcSub) {
+ std::swap(Src, Dst);
+ DstSub = SrcSub;
+ SrcSub = 0;
+ assert(!flipped_ && "Unexpected flip");
+ flipped_ = true;
+ }
+
+ // Find the new register class.
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
+ const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
+ if (DstSub)
+ newRC_ = tri_.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
+ else
+ newRC_ = getCommonSubClass(DstRC, SrcRC);
+ if (!newRC_)
+ return false;
+ crossClass_ = newRC_ != DstRC || newRC_ != SrcRC;
+ }
+ // Check our invariants
+ assert(TargetRegisterInfo::isVirtualRegister(Src) && "Src must be virtual");
+ assert(!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) &&
+ "Cannot have a physical SubIdx");
+ srcReg_ = Src;
+ dstReg_ = Dst;
+ subIdx_ = DstSub;
+ return true;
+}
+
+bool CoalescerPair::flip() {
+ if (subIdx_ || TargetRegisterInfo::isPhysicalRegister(dstReg_))
+ return false;
+ std::swap(srcReg_, dstReg_);
+ flipped_ = !flipped_;
+ return true;
+}
+
+bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
+ if (!MI)
+ return false;
+ unsigned Src, Dst, SrcSub, DstSub;
+ if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
+ return false;
+
+ // Find the virtual register that is srcReg_.
+ if (Dst == srcReg_) {
+ std::swap(Src, Dst);
+ std::swap(SrcSub, DstSub);
+ } else if (Src != srcReg_) {
+ return false;
+ }
+
+ // Now check that Dst matches dstReg_.
+ if (TargetRegisterInfo::isPhysicalRegister(dstReg_)) {
+ if (!TargetRegisterInfo::isPhysicalRegister(Dst))
+ return false;
+ assert(!subIdx_ && "Inconsistent CoalescerPair state.");
+ // DstSub could be set for a physreg from INSERT_SUBREG.
+ if (DstSub)
+ Dst = tri_.getSubReg(Dst, DstSub);
+ // Full copy of Src.
+ if (!SrcSub)
+ return dstReg_ == Dst;
+ // This is a partial register copy. Check that the parts match.
+ return tri_.getSubReg(dstReg_, SrcSub) == Dst;
+ } else {
+ // dstReg_ is virtual.
+ if (dstReg_ != Dst)
+ return false;
+ // Registers match, do the subregisters line up?
+ return compose(subIdx_, SrcSub) == DstSub;
+ }
+}
+
// Because of the way .a files work, we must force the SimpleRC
// implementation to be pulled in if the RegisterCoalescer classes are
// pulled in. Otherwise we run the risk of RegisterCoalescer being
diff --git a/libclamav/c++/llvm/lib/CodeGen/RegisterScavenging.cpp b/libclamav/c++/llvm/lib/CodeGen/RegisterScavenging.cpp
index 67bf209..a2580b8 100644
--- a/libclamav/c++/llvm/lib/CodeGen/RegisterScavenging.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/RegisterScavenging.cpp
@@ -21,7 +21,9 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -64,7 +66,7 @@ void RegScavenger::initRegState() {
return;
// Live-in registers are in use.
- for (MachineBasicBlock::const_livein_iterator I = MBB->livein_begin(),
+ for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
E = MBB->livein_end(); I != E; ++I)
setUsed(*I);
@@ -136,8 +138,15 @@ void RegScavenger::forward() {
ScavengeRestore = NULL;
}
+ if (MI->isDebugValue())
+ return;
+
// Find out which registers are early clobbered, killed, defined, and marked
// def-dead in this instruction.
+ // FIXME: The scavenger is not predication aware. If the instruction is
+ // predicated, conservatively assume "kill" markers do not actually kill the
+ // register. Similarly ignores "dead" markers.
+ bool isPred = TII->isPredicated(MI);
BitVector EarlyClobberRegs(NumPhysRegs);
BitVector KillRegs(NumPhysRegs);
BitVector DefRegs(NumPhysRegs);
@@ -152,11 +161,11 @@ void RegScavenger::forward() {
if (MO.isUse()) {
// Two-address operands implicitly kill.
- if (MO.isKill() || MI->isRegTiedToDefOperand(i))
+ if (!isPred && (MO.isKill() || MI->isRegTiedToDefOperand(i)))
addRegWithSubRegs(KillRegs, Reg);
} else {
assert(MO.isDef());
- if (MO.isDead())
+ if (!isPred && MO.isDead())
addRegWithSubRegs(DeadRegs, Reg);
else
addRegWithSubRegs(DefRegs, Reg);
@@ -219,24 +228,29 @@ void RegScavenger::getRegsUsed(BitVector &used, bool includeReserved) {
used = ~RegsAvailable & ~ReservedRegs;
}
-/// CreateRegClassMask - Set the bits that represent the registers in the
-/// TargetRegisterClass.
-static void CreateRegClassMask(const TargetRegisterClass *RC, BitVector &Mask) {
- for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I != E;
- ++I)
- Mask.set(*I);
-}
-
unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I)
- if (!isAliasUsed(*I))
+ if (!isAliasUsed(*I)) {
+ DEBUG(dbgs() << "Scavenger found unused reg: " << TRI->getName(*I) <<
+ "\n");
return *I;
+ }
return 0;
}
+/// getRegsAvailable - Return all available registers in the register class
+/// in Mask.
+void RegScavenger::getRegsAvailable(const TargetRegisterClass *RC,
+ BitVector &Mask) {
+ for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
+ I != E; ++I)
+ if (!isAliasUsed(*I))
+ Mask.set(*I);
+}
+
/// findSurvivorReg - Return the candidate register that is unused for the
-/// longest after MBBI. UseMI is set to the instruction where the search
+/// longest after StargMII. UseMI is set to the instruction where the search
/// stopped.
///
/// No more than InstrLimit instructions are inspected.
@@ -255,6 +269,10 @@ unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
bool inVirtLiveRange = false;
for (++MI; InstrLimit > 0 && MI != ME; ++MI, --InstrLimit) {
+ if (MI->isDebugValue()) {
+ ++InstrLimit; // Don't count debug instructions
+ continue;
+ }
bool isVirtKillInsn = false;
bool isVirtDefInsn = false;
// Remove any candidates touched by instruction.
@@ -304,11 +322,9 @@ unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
MachineBasicBlock::iterator I,
int SPAdj) {
- // Mask off the registers which are not in the TargetRegisterClass.
- BitVector Candidates(NumPhysRegs, false);
- CreateRegClassMask(RC, Candidates);
- // Do not include reserved registers.
- Candidates ^= ReservedRegs & Candidates;
+ // Consider all allocatable registers in the register class initially
+ BitVector Candidates =
+ TRI->getAllocatableSet(*I->getParent()->getParent(), RC);
// Exclude all the registers being used by the instruction.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
@@ -318,15 +334,20 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
Candidates.reset(MO.getReg());
}
+ // Try to find a register that's unused if there is one, as then we won't
+ // have to spill.
+ if ((Candidates & RegsAvailable).any())
+ Candidates &= RegsAvailable;
+
// Find the register whose use is furthest away.
MachineBasicBlock::iterator UseMI;
unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI);
- // If we found an unused register there is no reason to spill it. We have
- // probably found a callee-saved register that has been saved in the
- // prologue, but happens to be unused at this point.
- if (!isAliasUsed(SReg))
+ // If we found an unused register there is no reason to spill it.
+ if (!isAliasUsed(SReg)) {
+ DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n");
return SReg;
+ }
assert(ScavengedReg == 0 &&
"Scavenger slot is live, unable to scavenge another register!");
@@ -340,14 +361,14 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
// Spill the scavenged register before I.
assert(ScavengingFrameIndex >= 0 &&
"Cannot scavenge register without an emergency spill slot!");
- TII->storeRegToStackSlot(*MBB, I, SReg, true, ScavengingFrameIndex, RC);
+ TII->storeRegToStackSlot(*MBB, I, SReg, true, ScavengingFrameIndex, RC,TRI);
MachineBasicBlock::iterator II = prior(I);
- TRI->eliminateFrameIndex(II, SPAdj, NULL, this);
+ TRI->eliminateFrameIndex(II, SPAdj, this);
// Restore the scavenged register before its use (or first terminator).
- TII->loadRegFromStackSlot(*MBB, UseMI, SReg, ScavengingFrameIndex, RC);
+ TII->loadRegFromStackSlot(*MBB, UseMI, SReg, ScavengingFrameIndex, RC, TRI);
II = prior(UseMI);
- TRI->eliminateFrameIndex(II, SPAdj, NULL, this);
+ TRI->eliminateFrameIndex(II, SPAdj, this);
}
ScavengeRestore = prior(UseMI);
@@ -356,5 +377,8 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
// ScavengedReg = SReg;
ScavengedRC = RC;
+ DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) <<
+ "\n");
+
return SReg;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/RenderMachineFunction.cpp b/libclamav/c++/llvm/lib/CodeGen/RenderMachineFunction.cpp
new file mode 100644
index 0000000..93426ee
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/RenderMachineFunction.cpp
@@ -0,0 +1,1014 @@
+//===-- llvm/CodeGen/RenderMachineFunction.cpp - MF->HTML -----s-----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "rendermf"
+
+#include "RenderMachineFunction.h"
+
+#include "VirtRegMap.h"
+
+#include "llvm/Function.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <sstream>
+
+using namespace llvm;
+
+char RenderMachineFunction::ID = 0;
+INITIALIZE_PASS(RenderMachineFunction, "rendermf",
+ "Render machine functions (and related info) to HTML pages",
+ false, false);
+
+static cl::opt<std::string>
+outputFileSuffix("rmf-file-suffix",
+ cl::desc("Appended to function name to get output file name "
+ "(default: \".html\")"),
+ cl::init(".html"), cl::Hidden);
+
+static cl::opt<std::string>
+machineFuncsToRender("rmf-funcs",
+ cl::desc("Coma seperated list of functions to render"
+ ", or \"*\"."),
+ cl::init(""), cl::Hidden);
+
+static cl::opt<std::string>
+pressureClasses("rmf-classes",
+ cl::desc("Register classes to render pressure for."),
+ cl::init(""), cl::Hidden);
+
+static cl::opt<std::string>
+showIntervals("rmf-intervals",
+ cl::desc("Live intervals to show alongside code."),
+ cl::init(""), cl::Hidden);
+
+static cl::opt<bool>
+filterEmpty("rmf-filter-empty-intervals",
+ cl::desc("Don't display empty intervals."),
+ cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
+showEmptyIndexes("rmf-empty-indexes",
+ cl::desc("Render indexes not associated with instructions or "
+ "MBB starts."),
+ cl::init(false), cl::Hidden);
+
+static cl::opt<bool>
+useFancyVerticals("rmf-fancy-verts",
+ cl::desc("Use SVG for vertical text."),
+ cl::init(true), cl::Hidden);
+
+static cl::opt<bool>
+prettyHTML("rmf-pretty-html",
+ cl::desc("Pretty print HTML. For debugging the renderer only.."),
+ cl::init(false), cl::Hidden);
+
+
+namespace llvm {
+
+ bool MFRenderingOptions::renderingOptionsProcessed;
+ std::set<std::string> MFRenderingOptions::mfNamesToRender;
+ bool MFRenderingOptions::renderAllMFs = false;
+
+ std::set<std::string> MFRenderingOptions::classNamesToRender;
+ bool MFRenderingOptions::renderAllClasses = false;
+
+ std::set<std::pair<unsigned, unsigned> >
+ MFRenderingOptions::intervalNumsToRender;
+ unsigned MFRenderingOptions::intervalTypesToRender = ExplicitOnly;
+
+ template <typename OutputItr>
+ void MFRenderingOptions::splitComaSeperatedList(const std::string &s,
+ OutputItr outItr) {
+ std::string::const_iterator curPos = s.begin();
+ std::string::const_iterator nextComa = std::find(curPos, s.end(), ',');
+ while (nextComa != s.end()) {
+ std::string elem;
+ std::copy(curPos, nextComa, std::back_inserter(elem));
+ *outItr = elem;
+ ++outItr;
+ curPos = llvm::next(nextComa);
+ nextComa = std::find(curPos, s.end(), ',');
+ }
+
+ if (curPos != s.end()) {
+ std::string elem;
+ std::copy(curPos, s.end(), std::back_inserter(elem));
+ *outItr = elem;
+ ++outItr;
+ }
+ }
+
+ void MFRenderingOptions::processOptions() {
+ if (!renderingOptionsProcessed) {
+ processFuncNames();
+ processRegClassNames();
+ processIntervalNumbers();
+ renderingOptionsProcessed = true;
+ }
+ }
+
+ void MFRenderingOptions::processFuncNames() {
+ if (machineFuncsToRender == "*") {
+ renderAllMFs = true;
+ } else {
+ splitComaSeperatedList(machineFuncsToRender,
+ std::inserter(mfNamesToRender,
+ mfNamesToRender.begin()));
+ }
+ }
+
+ void MFRenderingOptions::processRegClassNames() {
+ if (pressureClasses == "*") {
+ renderAllClasses = true;
+ } else {
+ splitComaSeperatedList(pressureClasses,
+ std::inserter(classNamesToRender,
+ classNamesToRender.begin()));
+ }
+ }
+
+ void MFRenderingOptions::processIntervalNumbers() {
+ std::set<std::string> intervalRanges;
+ splitComaSeperatedList(showIntervals,
+ std::inserter(intervalRanges,
+ intervalRanges.begin()));
+ std::for_each(intervalRanges.begin(), intervalRanges.end(),
+ processIntervalRange);
+ }
+
+ void MFRenderingOptions::processIntervalRange(
+ const std::string &intervalRangeStr) {
+ if (intervalRangeStr == "*") {
+ intervalTypesToRender |= All;
+ } else if (intervalRangeStr == "virt-nospills*") {
+ intervalTypesToRender |= VirtNoSpills;
+ } else if (intervalRangeStr == "spills*") {
+ intervalTypesToRender |= VirtSpills;
+ } else if (intervalRangeStr == "virt*") {
+ intervalTypesToRender |= AllVirt;
+ } else if (intervalRangeStr == "phys*") {
+ intervalTypesToRender |= AllPhys;
+ } else {
+ std::istringstream iss(intervalRangeStr);
+ unsigned reg1, reg2;
+ if ((iss >> reg1 >> std::ws)) {
+ if (iss.eof()) {
+ intervalNumsToRender.insert(std::make_pair(reg1, reg1 + 1));
+ } else {
+ char c;
+ iss >> c;
+ if (c == '-' && (iss >> reg2)) {
+ intervalNumsToRender.insert(std::make_pair(reg1, reg2 + 1));
+ } else {
+ dbgs() << "Warning: Invalid interval range \""
+ << intervalRangeStr << "\" in -rmf-intervals. Skipping.\n";
+ }
+ }
+ } else {
+ dbgs() << "Warning: Invalid interval number \""
+ << intervalRangeStr << "\" in -rmf-intervals. Skipping.\n";
+ }
+ }
+ }
+
+ void MFRenderingOptions::setup(MachineFunction *mf,
+ const TargetRegisterInfo *tri,
+ LiveIntervals *lis,
+ const RenderMachineFunction *rmf) {
+ this->mf = mf;
+ this->tri = tri;
+ this->lis = lis;
+ this->rmf = rmf;
+
+ clear();
+ }
+
+ void MFRenderingOptions::clear() {
+ regClassesTranslatedToCurrentFunction = false;
+ regClassSet.clear();
+
+ intervalsTranslatedToCurrentFunction = false;
+ intervalSet.clear();
+ }
+
+ void MFRenderingOptions::resetRenderSpecificOptions() {
+ intervalSet.clear();
+ intervalsTranslatedToCurrentFunction = false;
+ }
+
+ bool MFRenderingOptions::shouldRenderCurrentMachineFunction() const {
+ processOptions();
+
+ return (renderAllMFs ||
+ mfNamesToRender.find(mf->getFunction()->getName()) !=
+ mfNamesToRender.end());
+ }
+
+ const MFRenderingOptions::RegClassSet& MFRenderingOptions::regClasses() const{
+ translateRegClassNamesToCurrentFunction();
+ return regClassSet;
+ }
+
+ const MFRenderingOptions::IntervalSet& MFRenderingOptions::intervals() const {
+ translateIntervalNumbersToCurrentFunction();
+ return intervalSet;
+ }
+
+ bool MFRenderingOptions::renderEmptyIndexes() const {
+ return showEmptyIndexes;
+ }
+
+ bool MFRenderingOptions::fancyVerticals() const {
+ return useFancyVerticals;
+ }
+
+ void MFRenderingOptions::translateRegClassNamesToCurrentFunction() const {
+ if (!regClassesTranslatedToCurrentFunction) {
+ processOptions();
+ for (TargetRegisterInfo::regclass_iterator rcItr = tri->regclass_begin(),
+ rcEnd = tri->regclass_end();
+ rcItr != rcEnd; ++rcItr) {
+ const TargetRegisterClass *trc = *rcItr;
+ if (renderAllClasses ||
+ classNamesToRender.find(trc->getName()) !=
+ classNamesToRender.end()) {
+ regClassSet.insert(trc);
+ }
+ }
+ regClassesTranslatedToCurrentFunction = true;
+ }
+ }
+
+ void MFRenderingOptions::translateIntervalNumbersToCurrentFunction() const {
+ if (!intervalsTranslatedToCurrentFunction) {
+ processOptions();
+
+ // If we're not just doing explicit then do a copy over all matching
+ // types.
+ if (intervalTypesToRender != ExplicitOnly) {
+ for (LiveIntervals::iterator liItr = lis->begin(), liEnd = lis->end();
+ liItr != liEnd; ++liItr) {
+ LiveInterval *li = liItr->second;
+
+ if (filterEmpty && li->empty())
+ continue;
+
+ if ((TargetRegisterInfo::isPhysicalRegister(li->reg) &&
+ (intervalTypesToRender & AllPhys))) {
+ intervalSet.insert(li);
+ } else if (TargetRegisterInfo::isVirtualRegister(li->reg)) {
+ if (((intervalTypesToRender & VirtNoSpills) && !rmf->isSpill(li)) ||
+ ((intervalTypesToRender & VirtSpills) && rmf->isSpill(li))) {
+ intervalSet.insert(li);
+ }
+ }
+ }
+ }
+
+ // If we need to process the explicit list...
+ if (intervalTypesToRender != All) {
+ for (std::set<std::pair<unsigned, unsigned> >::const_iterator
+ regRangeItr = intervalNumsToRender.begin(),
+ regRangeEnd = intervalNumsToRender.end();
+ regRangeItr != regRangeEnd; ++regRangeItr) {
+ const std::pair<unsigned, unsigned> &range = *regRangeItr;
+ for (unsigned reg = range.first; reg != range.second; ++reg) {
+ if (lis->hasInterval(reg)) {
+ intervalSet.insert(&lis->getInterval(reg));
+ }
+ }
+ }
+ }
+
+ intervalsTranslatedToCurrentFunction = true;
+ }
+ }
+
+ // ---------- TargetRegisterExtraInformation implementation ----------
+
+ TargetRegisterExtraInfo::TargetRegisterExtraInfo()
+ : mapsPopulated(false) {
+ }
+
+ void TargetRegisterExtraInfo::setup(MachineFunction *mf,
+ MachineRegisterInfo *mri,
+ const TargetRegisterInfo *tri,
+ LiveIntervals *lis) {
+ this->mf = mf;
+ this->mri = mri;
+ this->tri = tri;
+ this->lis = lis;
+ }
+
+ void TargetRegisterExtraInfo::reset() {
+ if (!mapsPopulated) {
+ initWorst();
+ //initBounds();
+ initCapacity();
+ mapsPopulated = true;
+ }
+
+ resetPressureAndLiveStates();
+ }
+
+ void TargetRegisterExtraInfo::clear() {
+ prWorst.clear();
+ vrWorst.clear();
+ capacityMap.clear();
+ pressureMap.clear();
+ //liveStatesMap.clear();
+ mapsPopulated = false;
+ }
+
+ void TargetRegisterExtraInfo::initWorst() {
+ assert(!mapsPopulated && prWorst.empty() && vrWorst.empty() &&
+ "Worst map already initialised?");
+
+ // Start with the physical registers.
+ for (unsigned preg = 1; preg < tri->getNumRegs(); ++preg) {
+ WorstMapLine &pregLine = prWorst[preg];
+
+ for (TargetRegisterInfo::regclass_iterator rcItr = tri->regclass_begin(),
+ rcEnd = tri->regclass_end();
+ rcItr != rcEnd; ++rcItr) {
+ const TargetRegisterClass *trc = *rcItr;
+
+ unsigned numOverlaps = 0;
+ for (TargetRegisterClass::iterator rItr = trc->begin(),
+ rEnd = trc->end();
+ rItr != rEnd; ++rItr) {
+ unsigned trcPReg = *rItr;
+ if (tri->regsOverlap(preg, trcPReg))
+ ++numOverlaps;
+ }
+
+ pregLine[trc] = numOverlaps;
+ }
+ }
+
+ // Now the register classes.
+ for (TargetRegisterInfo::regclass_iterator rc1Itr = tri->regclass_begin(),
+ rcEnd = tri->regclass_end();
+ rc1Itr != rcEnd; ++rc1Itr) {
+ const TargetRegisterClass *trc1 = *rc1Itr;
+ WorstMapLine &classLine = vrWorst[trc1];
+
+ for (TargetRegisterInfo::regclass_iterator rc2Itr = tri->regclass_begin();
+ rc2Itr != rcEnd; ++rc2Itr) {
+ const TargetRegisterClass *trc2 = *rc2Itr;
+
+ unsigned worst = 0;
+
+ for (TargetRegisterClass::iterator trc1Itr = trc1->begin(),
+ trc1End = trc1->end();
+ trc1Itr != trc1End; ++trc1Itr) {
+ unsigned trc1Reg = *trc1Itr;
+ unsigned trc1RegWorst = 0;
+
+ for (TargetRegisterClass::iterator trc2Itr = trc2->begin(),
+ trc2End = trc2->end();
+ trc2Itr != trc2End; ++trc2Itr) {
+ unsigned trc2Reg = *trc2Itr;
+ if (tri->regsOverlap(trc1Reg, trc2Reg))
+ ++trc1RegWorst;
+ }
+ if (trc1RegWorst > worst) {
+ worst = trc1RegWorst;
+ }
+ }
+
+ if (worst != 0) {
+ classLine[trc2] = worst;
+ }
+ }
+ }
+ }
+
+ unsigned TargetRegisterExtraInfo::getWorst(
+ unsigned reg,
+ const TargetRegisterClass *trc) const {
+ const WorstMapLine *wml = 0;
+ if (TargetRegisterInfo::isPhysicalRegister(reg)) {
+ PRWorstMap::const_iterator prwItr = prWorst.find(reg);
+ assert(prwItr != prWorst.end() && "Missing prWorst entry.");
+ wml = &prwItr->second;
+ } else {
+ const TargetRegisterClass *regTRC = mri->getRegClass(reg);
+ VRWorstMap::const_iterator vrwItr = vrWorst.find(regTRC);
+ assert(vrwItr != vrWorst.end() && "Missing vrWorst entry.");
+ wml = &vrwItr->second;
+ }
+
+ WorstMapLine::const_iterator wmlItr = wml->find(trc);
+ if (wmlItr == wml->end())
+ return 0;
+
+ return wmlItr->second;
+ }
+
+ void TargetRegisterExtraInfo::initCapacity() {
+ assert(!mapsPopulated && capacityMap.empty() &&
+ "Capacity map already initialised?");
+
+ for (TargetRegisterInfo::regclass_iterator rcItr = tri->regclass_begin(),
+ rcEnd = tri->regclass_end();
+ rcItr != rcEnd; ++rcItr) {
+ const TargetRegisterClass *trc = *rcItr;
+ unsigned capacity = std::distance(trc->allocation_order_begin(*mf),
+ trc->allocation_order_end(*mf));
+
+ if (capacity != 0)
+ capacityMap[trc] = capacity;
+ }
+ }
+
+ unsigned TargetRegisterExtraInfo::getCapacity(
+ const TargetRegisterClass *trc) const {
+ CapacityMap::const_iterator cmItr = capacityMap.find(trc);
+ assert(cmItr != capacityMap.end() &&
+ "vreg with unallocable register class");
+ return cmItr->second;
+ }
+
+ void TargetRegisterExtraInfo::resetPressureAndLiveStates() {
+ pressureMap.clear();
+ //liveStatesMap.clear();
+
+ // Iterate over all slots.
+
+
+ // Iterate over all live intervals.
+ for (LiveIntervals::iterator liItr = lis->begin(),
+ liEnd = lis->end();
+ liItr != liEnd; ++liItr) {
+ LiveInterval *li = liItr->second;
+
+ const TargetRegisterClass *liTRC;
+
+ if (TargetRegisterInfo::isPhysicalRegister(li->reg))
+ continue;
+
+ liTRC = mri->getRegClass(li->reg);
+
+
+ // For all ranges in the current interal.
+ for (LiveInterval::iterator lrItr = li->begin(),
+ lrEnd = li->end();
+ lrItr != lrEnd; ++lrItr) {
+ LiveRange *lr = &*lrItr;
+
+ // For all slots in the current range.
+ for (SlotIndex i = lr->start; i != lr->end; i = i.getNextSlot()) {
+
+ // Record increased pressure at index for all overlapping classes.
+ for (TargetRegisterInfo::regclass_iterator
+ rcItr = tri->regclass_begin(),
+ rcEnd = tri->regclass_end();
+ rcItr != rcEnd; ++rcItr) {
+ const TargetRegisterClass *trc = *rcItr;
+
+ if (trc->allocation_order_begin(*mf) ==
+ trc->allocation_order_end(*mf))
+ continue;
+
+ unsigned worstAtI = getWorst(li->reg, trc);
+
+ if (worstAtI != 0) {
+ pressureMap[i][trc] += worstAtI;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ unsigned TargetRegisterExtraInfo::getPressureAtSlot(
+ const TargetRegisterClass *trc,
+ SlotIndex i) const {
+ PressureMap::const_iterator pmItr = pressureMap.find(i);
+ if (pmItr == pressureMap.end())
+ return 0;
+ const PressureMapLine &pmLine = pmItr->second;
+ PressureMapLine::const_iterator pmlItr = pmLine.find(trc);
+ if (pmlItr == pmLine.end())
+ return 0;
+ return pmlItr->second;
+ }
+
+ bool TargetRegisterExtraInfo::classOverCapacityAtSlot(
+ const TargetRegisterClass *trc,
+ SlotIndex i) const {
+ return (getPressureAtSlot(trc, i) > getCapacity(trc));
+ }
+
+ // ---------- MachineFunctionRenderer implementation ----------
+
+ void RenderMachineFunction::Spacer::print(raw_ostream &os) const {
+ if (!prettyHTML)
+ return;
+ for (unsigned i = 0; i < ns; ++i) {
+ os << " ";
+ }
+ }
+
+ RenderMachineFunction::Spacer RenderMachineFunction::s(unsigned ns) const {
+ return Spacer(ns);
+ }
+
+ raw_ostream& operator<<(raw_ostream &os, const RenderMachineFunction::Spacer &s) {
+ s.print(os);
+ return os;
+ }
+
+ template <typename Iterator>
+ std::string RenderMachineFunction::escapeChars(Iterator sBegin, Iterator sEnd) const {
+ std::string r;
+
+ for (Iterator sItr = sBegin; sItr != sEnd; ++sItr) {
+ char c = *sItr;
+
+ switch (c) {
+ case '<': r.append("<"); break;
+ case '>': r.append(">"); break;
+ case '&': r.append("&"); break;
+ case ' ': r.append(" "); break;
+ case '\"': r.append("""); break;
+ default: r.push_back(c); break;
+ }
+ }
+
+ return r;
+ }
+
+ RenderMachineFunction::LiveState
+ RenderMachineFunction::getLiveStateAt(const LiveInterval *li,
+ SlotIndex i) const {
+ const MachineInstr *mi = sis->getInstructionFromIndex(i);
+
+ // For uses/defs recorded use/def indexes override current liveness and
+ // instruction operands (Only for the interval which records the indexes).
+ if (i.isUse() || i.isDef()) {
+ UseDefs::const_iterator udItr = useDefs.find(li);
+ if (udItr != useDefs.end()) {
+ const SlotSet &slotSet = udItr->second;
+ if (slotSet.count(i)) {
+ if (i.isUse()) {
+ return Used;
+ }
+ // else
+ return Defined;
+ }
+ }
+ }
+
+ // If the slot is a load/store, or there's no info in the use/def set then
+ // use liveness and instruction operand info.
+ if (li->liveAt(i)) {
+
+ if (mi == 0) {
+ if (vrm == 0 ||
+ (vrm->getStackSlot(li->reg) == VirtRegMap::NO_STACK_SLOT)) {
+ return AliveReg;
+ } else {
+ return AliveStack;
+ }
+ } else {
+ if (i.isDef() && mi->definesRegister(li->reg, tri)) {
+ return Defined;
+ } else if (i.isUse() && mi->readsRegister(li->reg)) {
+ return Used;
+ } else {
+ if (vrm == 0 ||
+ (vrm->getStackSlot(li->reg) == VirtRegMap::NO_STACK_SLOT)) {
+ return AliveReg;
+ } else {
+ return AliveStack;
+ }
+ }
+ }
+ }
+ return Dead;
+ }
+
+ RenderMachineFunction::PressureState
+ RenderMachineFunction::getPressureStateAt(const TargetRegisterClass *trc,
+ SlotIndex i) const {
+ if (trei.getPressureAtSlot(trc, i) == 0) {
+ return Zero;
+ } else if (trei.classOverCapacityAtSlot(trc, i)){
+ return High;
+ }
+ return Low;
+ }
+
+ /// \brief Render a machine instruction.
+ void RenderMachineFunction::renderMachineInstr(raw_ostream &os,
+ const MachineInstr *mi) const {
+ std::string s;
+ raw_string_ostream oss(s);
+ oss << *mi;
+
+ os << escapeChars(oss.str());
+ }
+
+ template <typename T>
+ void RenderMachineFunction::renderVertical(const Spacer &indent,
+ raw_ostream &os,
+ const T &t) const {
+ if (ro.fancyVerticals()) {
+ os << indent << "<object\n"
+ << indent + s(2) << "class=\"obj\"\n"
+ << indent + s(2) << "type=\"image/svg+xml\"\n"
+ << indent + s(2) << "width=\"14px\"\n"
+ << indent + s(2) << "height=\"55px\"\n"
+ << indent + s(2) << "data=\"data:image/svg+xml,\n"
+ << indent + s(4) << "<svg xmlns='http://www.w3.org/2000/svg'>\n"
+ << indent + s(6) << "<text x='-55' y='10' "
+ "font-family='Courier' font-size='12' "
+ "transform='rotate(-90)' "
+ "text-rendering='optimizeSpeed' "
+ "fill='#000'>" << t << "</text>\n"
+ << indent + s(4) << "</svg>\">\n"
+ << indent << "</object>\n";
+ } else {
+ std::ostringstream oss;
+ oss << t;
+ std::string tStr(oss.str());
+
+ os << indent;
+ for (std::string::iterator tStrItr = tStr.begin(), tStrEnd = tStr.end();
+ tStrItr != tStrEnd; ++tStrItr) {
+ os << *tStrItr << "<br/>";
+ }
+ os << "\n";
+ }
+ }
+
+ void RenderMachineFunction::insertCSS(const Spacer &indent,
+ raw_ostream &os) const {
+ os << indent << "<style type=\"text/css\">\n"
+ << indent + s(2) << "body { font-color: black; }\n"
+ << indent + s(2) << "table.code td { font-family: monospace; "
+ "border-width: 0px; border-style: solid; "
+ "border-bottom: 1px solid #dddddd; white-space: nowrap; }\n"
+ << indent + s(2) << "table.code td.p-z { background-color: #000000; }\n"
+ << indent + s(2) << "table.code td.p-l { background-color: #00ff00; }\n"
+ << indent + s(2) << "table.code td.p-h { background-color: #ff0000; }\n"
+ << indent + s(2) << "table.code td.l-n { background-color: #ffffff; }\n"
+ << indent + s(2) << "table.code td.l-d { background-color: #ff0000; }\n"
+ << indent + s(2) << "table.code td.l-u { background-color: #ffff00; }\n"
+ << indent + s(2) << "table.code td.l-r { background-color: #000000; }\n"
+ << indent + s(2) << "table.code td.l-s { background-color: #770000; }\n"
+ << indent + s(2) << "table.code th { border-width: 0px; "
+ "border-style: solid; }\n"
+ << indent << "</style>\n";
+ }
+
+ void RenderMachineFunction::renderFunctionSummary(
+ const Spacer &indent, raw_ostream &os,
+ const char * const renderContextStr) const {
+ os << indent << "<h1>Function: " << mf->getFunction()->getName()
+ << "</h1>\n"
+ << indent << "<h2>Rendering context: " << renderContextStr << "</h2>\n";
+ }
+
+
+ void RenderMachineFunction::renderPressureTableLegend(
+ const Spacer &indent,
+ raw_ostream &os) const {
+ os << indent << "<h2>Rendering Pressure Legend:</h2>\n"
+ << indent << "<table class=\"code\">\n"
+ << indent + s(2) << "<tr>\n"
+ << indent + s(4) << "<th>Pressure</th><th>Description</th>"
+ "<th>Appearance</th>\n"
+ << indent + s(2) << "</tr>\n"
+ << indent + s(2) << "<tr>\n"
+ << indent + s(4) << "<td>No Pressure</td>"
+ "<td>No physical registers of this class requested.</td>"
+ "<td class=\"p-z\"> </td>\n"
+ << indent + s(2) << "</tr>\n"
+ << indent + s(2) << "<tr>\n"
+ << indent + s(4) << "<td>Low Pressure</td>"
+ "<td>Sufficient physical registers to meet demand.</td>"
+ "<td class=\"p-l\"> </td>\n"
+ << indent + s(2) << "</tr>\n"
+ << indent + s(2) << "<tr>\n"
+ << indent + s(4) << "<td>High Pressure</td>"
+ "<td>Potentially insufficient physical registers to meet demand.</td>"
+ "<td class=\"p-h\"> </td>\n"
+ << indent + s(2) << "</tr>\n"
+ << indent << "</table>\n";
+ }
+
+ template <typename CellType>
+ void RenderMachineFunction::renderCellsWithRLE(
+ const Spacer &indent, raw_ostream &os,
+ const std::pair<CellType, unsigned> &rleAccumulator,
+ const std::map<CellType, std::string> &cellTypeStrs) const {
+
+ if (rleAccumulator.second == 0)
+ return;
+
+ typename std::map<CellType, std::string>::const_iterator ctsItr =
+ cellTypeStrs.find(rleAccumulator.first);
+
+ assert(ctsItr != cellTypeStrs.end() && "No string for given cell type.");
+
+ os << indent + s(4) << "<td class=\"" << ctsItr->second << "\"";
+ if (rleAccumulator.second > 1)
+ os << " colspan=" << rleAccumulator.second;
+ os << "></td>\n";
+ }
+
+
+ void RenderMachineFunction::renderCodeTablePlusPI(const Spacer &indent,
+ raw_ostream &os) const {
+
+ std::map<LiveState, std::string> lsStrs;
+ lsStrs[Dead] = "l-n";
+ lsStrs[Defined] = "l-d";
+ lsStrs[Used] = "l-u";
+ lsStrs[AliveReg] = "l-r";
+ lsStrs[AliveStack] = "l-s";
+
+ std::map<PressureState, std::string> psStrs;
+ psStrs[Zero] = "p-z";
+ psStrs[Low] = "p-l";
+ psStrs[High] = "p-h";
+
+ // Open the table...
+
+ os << indent << "<table cellpadding=0 cellspacing=0 class=\"code\">\n"
+ << indent + s(2) << "<tr>\n";
+
+ // Render the header row...
+
+ os << indent + s(4) << "<th>index</th>\n"
+ << indent + s(4) << "<th>instr</th>\n";
+
+ // Render class names if necessary...
+ if (!ro.regClasses().empty()) {
+ for (MFRenderingOptions::RegClassSet::const_iterator
+ rcItr = ro.regClasses().begin(),
+ rcEnd = ro.regClasses().end();
+ rcItr != rcEnd; ++rcItr) {
+ const TargetRegisterClass *trc = *rcItr;
+ os << indent + s(4) << "<th>\n";
+ renderVertical(indent + s(6), os, trc->getName());
+ os << indent + s(4) << "</th>\n";
+ }
+ }
+
+ // FIXME: Is there a nicer way to insert space between columns in HTML?
+ if (!ro.regClasses().empty() && !ro.intervals().empty())
+ os << indent + s(4) << "<th> </th>\n";
+
+ // Render interval numbers if necessary...
+ if (!ro.intervals().empty()) {
+ for (MFRenderingOptions::IntervalSet::const_iterator
+ liItr = ro.intervals().begin(),
+ liEnd = ro.intervals().end();
+ liItr != liEnd; ++liItr) {
+
+ const LiveInterval *li = *liItr;
+ os << indent + s(4) << "<th>\n";
+ renderVertical(indent + s(6), os, li->reg);
+ os << indent + s(4) << "</th>\n";
+ }
+ }
+
+ os << indent + s(2) << "</tr>\n";
+
+ // End header row, start with the data rows...
+
+ MachineInstr *mi = 0;
+
+ // Data rows:
+ for (SlotIndex i = sis->getZeroIndex(); i != sis->getLastIndex();
+ i = i.getNextSlot()) {
+
+ // Render the slot column.
+ os << indent + s(2) << "<tr height=6ex>\n";
+
+ // Render the code column.
+ if (i.isLoad()) {
+ MachineBasicBlock *mbb = sis->getMBBFromIndex(i);
+ mi = sis->getInstructionFromIndex(i);
+
+ if (i == sis->getMBBStartIdx(mbb) || mi != 0 ||
+ ro.renderEmptyIndexes()) {
+ os << indent + s(4) << "<td rowspan=4>" << i << " </td>\n"
+ << indent + s(4) << "<td rowspan=4>\n";
+
+ if (i == sis->getMBBStartIdx(mbb)) {
+ os << indent + s(6) << "BB#" << mbb->getNumber() << ": \n";
+ } else if (mi != 0) {
+ os << indent + s(6) << " ";
+ renderMachineInstr(os, mi);
+ } else {
+ // Empty interval - leave blank.
+ }
+ os << indent + s(4) << "</td>\n";
+ } else {
+ i = i.getStoreIndex(); // <- Will be incremented to the next index.
+ continue;
+ }
+ }
+
+ // Render the class columns.
+ if (!ro.regClasses().empty()) {
+ std::pair<PressureState, unsigned> psRLEAccumulator(Zero, 0);
+ for (MFRenderingOptions::RegClassSet::const_iterator
+ rcItr = ro.regClasses().begin(),
+ rcEnd = ro.regClasses().end();
+ rcItr != rcEnd; ++rcItr) {
+ const TargetRegisterClass *trc = *rcItr;
+ PressureState newPressure = getPressureStateAt(trc, i);
+
+ if (newPressure == psRLEAccumulator.first) {
+ ++psRLEAccumulator.second;
+ } else {
+ renderCellsWithRLE(indent + s(4), os, psRLEAccumulator, psStrs);
+ psRLEAccumulator.first = newPressure;
+ psRLEAccumulator.second = 1;
+ }
+ }
+ renderCellsWithRLE(indent + s(4), os, psRLEAccumulator, psStrs);
+ }
+
+ // FIXME: Is there a nicer way to insert space between columns in HTML?
+ if (!ro.regClasses().empty() && !ro.intervals().empty())
+ os << indent + s(4) << "<td width=2em></td>\n";
+
+ if (!ro.intervals().empty()) {
+ std::pair<LiveState, unsigned> lsRLEAccumulator(Dead, 0);
+ for (MFRenderingOptions::IntervalSet::const_iterator
+ liItr = ro.intervals().begin(),
+ liEnd = ro.intervals().end();
+ liItr != liEnd; ++liItr) {
+ const LiveInterval *li = *liItr;
+ LiveState newLiveness = getLiveStateAt(li, i);
+
+ if (newLiveness == lsRLEAccumulator.first) {
+ ++lsRLEAccumulator.second;
+ } else {
+ renderCellsWithRLE(indent + s(4), os, lsRLEAccumulator, lsStrs);
+ lsRLEAccumulator.first = newLiveness;
+ lsRLEAccumulator.second = 1;
+ }
+ }
+ renderCellsWithRLE(indent + s(4), os, lsRLEAccumulator, lsStrs);
+ }
+ os << indent + s(2) << "</tr>\n";
+ }
+
+ os << indent << "</table>\n";
+
+ if (!ro.regClasses().empty())
+ renderPressureTableLegend(indent, os);
+ }
+
+ void RenderMachineFunction::renderFunctionPage(
+ raw_ostream &os,
+ const char * const renderContextStr) const {
+ os << "<html>\n"
+ << s(2) << "<head>\n"
+ << s(4) << "<title>" << fqn << "</title>\n";
+
+ insertCSS(s(4), os);
+
+ os << s(2) << "<head>\n"
+ << s(2) << "<body >\n";
+
+ renderFunctionSummary(s(4), os, renderContextStr);
+
+ os << s(4) << "<br/><br/><br/>\n";
+
+ //renderLiveIntervalInfoTable(" ", os);
+
+ os << s(4) << "<br/><br/><br/>\n";
+
+ renderCodeTablePlusPI(s(4), os);
+
+ os << s(2) << "</body>\n"
+ << "</html>\n";
+ }
+
+ void RenderMachineFunction::getAnalysisUsage(AnalysisUsage &au) const {
+ au.addRequired<SlotIndexes>();
+ au.addRequired<LiveIntervals>();
+ au.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(au);
+ }
+
+ bool RenderMachineFunction::runOnMachineFunction(MachineFunction &fn) {
+
+ mf = &fn;
+ mri = &mf->getRegInfo();
+ tri = mf->getTarget().getRegisterInfo();
+ lis = &getAnalysis<LiveIntervals>();
+ sis = &getAnalysis<SlotIndexes>();
+
+ trei.setup(mf, mri, tri, lis);
+ ro.setup(mf, tri, lis, this);
+ spillIntervals.clear();
+ spillFor.clear();
+ useDefs.clear();
+
+ fqn = mf->getFunction()->getParent()->getModuleIdentifier() + "." +
+ mf->getFunction()->getName().str();
+
+ return false;
+ }
+
+ void RenderMachineFunction::releaseMemory() {
+ trei.clear();
+ ro.clear();
+ spillIntervals.clear();
+ spillFor.clear();
+ useDefs.clear();
+ }
+
+ void RenderMachineFunction::rememberUseDefs(const LiveInterval *li) {
+
+ if (!ro.shouldRenderCurrentMachineFunction())
+ return;
+
+ for (MachineRegisterInfo::reg_iterator rItr = mri->reg_begin(li->reg),
+ rEnd = mri->reg_end();
+ rItr != rEnd; ++rItr) {
+ const MachineInstr *mi = &*rItr;
+ if (mi->readsRegister(li->reg)) {
+ useDefs[li].insert(lis->getInstructionIndex(mi).getUseIndex());
+ }
+ if (mi->definesRegister(li->reg)) {
+ useDefs[li].insert(lis->getInstructionIndex(mi).getDefIndex());
+ }
+ }
+ }
+
+ void RenderMachineFunction::rememberSpills(
+ const LiveInterval *li,
+ const std::vector<LiveInterval*> &spills) {
+
+ if (!ro.shouldRenderCurrentMachineFunction())
+ return;
+
+ for (std::vector<LiveInterval*>::const_iterator siItr = spills.begin(),
+ siEnd = spills.end();
+ siItr != siEnd; ++siItr) {
+ const LiveInterval *spill = *siItr;
+ spillIntervals[li].insert(spill);
+ spillFor[spill] = li;
+ }
+ }
+
+ bool RenderMachineFunction::isSpill(const LiveInterval *li) const {
+ SpillForMap::const_iterator sfItr = spillFor.find(li);
+ if (sfItr == spillFor.end())
+ return false;
+ return true;
+ }
+
+ void RenderMachineFunction::renderMachineFunction(
+ const char *renderContextStr,
+ const VirtRegMap *vrm,
+ const char *renderSuffix) {
+ if (!ro.shouldRenderCurrentMachineFunction())
+ return;
+
+ this->vrm = vrm;
+ trei.reset();
+
+ std::string rpFileName(mf->getFunction()->getName().str() +
+ (renderSuffix ? renderSuffix : "") +
+ outputFileSuffix);
+
+ std::string errMsg;
+ raw_fd_ostream outFile(rpFileName.c_str(), errMsg, raw_fd_ostream::F_Binary);
+
+ renderFunctionPage(outFile, renderContextStr);
+
+ ro.resetRenderSpecificOptions();
+ }
+
+ std::string RenderMachineFunction::escapeChars(const std::string &s) const {
+ return escapeChars(s.begin(), s.end());
+ }
+
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/RenderMachineFunction.h b/libclamav/c++/llvm/lib/CodeGen/RenderMachineFunction.h
new file mode 100644
index 0000000..8d56a82
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/RenderMachineFunction.h
@@ -0,0 +1,336 @@
+//===-- llvm/CodeGen/RenderMachineFunction.h - MF->HTML -*- C++ -*---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_RENDERMACHINEFUNCTION_H
+#define LLVM_CODEGEN_RENDERMACHINEFUNCTION_H
+
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+#include <algorithm>
+#include <map>
+#include <set>
+#include <string>
+
+namespace llvm {
+
+ class LiveInterval;
+ class LiveIntervals;
+ class MachineInstr;
+ class MachineRegisterInfo;
+ class RenderMachineFunction;
+ class TargetRegisterClass;
+ class TargetRegisterInfo;
+ class VirtRegMap;
+ class raw_ostream;
+
+ /// \brief Helper class to process rendering options. Tries to be as lazy as
+ /// possible.
+ class MFRenderingOptions {
+ public:
+
+ struct RegClassComp {
+ bool operator()(const TargetRegisterClass *trc1,
+ const TargetRegisterClass *trc2) const {
+ std::string trc1Name(trc1->getName()), trc2Name(trc2->getName());
+ return std::lexicographical_compare(trc1Name.begin(), trc1Name.end(),
+ trc2Name.begin(), trc2Name.end());
+ }
+ };
+
+ typedef std::set<const TargetRegisterClass*, RegClassComp> RegClassSet;
+
+ struct IntervalComp {
+ bool operator()(const LiveInterval *li1, const LiveInterval *li2) const {
+ return li1->reg < li2->reg;
+ }
+ };
+
+ typedef std::set<const LiveInterval*, IntervalComp> IntervalSet;
+
+ /// Initialise the rendering options.
+ void setup(MachineFunction *mf, const TargetRegisterInfo *tri,
+ LiveIntervals *lis, const RenderMachineFunction *rmf);
+
+ /// Clear translations of options to the current function.
+ void clear();
+
+ /// Reset any options computed for this specific rendering.
+ void resetRenderSpecificOptions();
+
+ /// Should we render the current function.
+ bool shouldRenderCurrentMachineFunction() const;
+
+ /// Return the set of register classes to render pressure for.
+ const RegClassSet& regClasses() const;
+
+ /// Return the set of live intervals to render liveness for.
+ const IntervalSet& intervals() const;
+
+ /// Render indexes which are not associated with instructions / MBB starts.
+ bool renderEmptyIndexes() const;
+
+ /// Return whether or not to render using SVG for fancy vertical text.
+ bool fancyVerticals() const;
+
+ private:
+
+ static bool renderingOptionsProcessed;
+ static std::set<std::string> mfNamesToRender;
+ static bool renderAllMFs;
+
+ static std::set<std::string> classNamesToRender;
+ static bool renderAllClasses;
+
+
+ static std::set<std::pair<unsigned, unsigned> > intervalNumsToRender;
+ typedef enum { ExplicitOnly = 0,
+ AllPhys = 1,
+ VirtNoSpills = 2,
+ VirtSpills = 4,
+ AllVirt = 6,
+ All = 7 }
+ IntervalTypesToRender;
+ static unsigned intervalTypesToRender;
+
+ template <typename OutputItr>
+ static void splitComaSeperatedList(const std::string &s, OutputItr outItr);
+
+ static void processOptions();
+
+ static void processFuncNames();
+ static void processRegClassNames();
+ static void processIntervalNumbers();
+
+ static void processIntervalRange(const std::string &intervalRangeStr);
+
+ MachineFunction *mf;
+ const TargetRegisterInfo *tri;
+ LiveIntervals *lis;
+ const RenderMachineFunction *rmf;
+
+ mutable bool regClassesTranslatedToCurrentFunction;
+ mutable RegClassSet regClassSet;
+
+ mutable bool intervalsTranslatedToCurrentFunction;
+ mutable IntervalSet intervalSet;
+
+ void translateRegClassNamesToCurrentFunction() const;
+
+ void translateIntervalNumbersToCurrentFunction() const;
+ };
+
+ /// \brief Provide extra information about the physical and virtual registers
+ /// in the function being compiled.
+ class TargetRegisterExtraInfo {
+ public:
+ TargetRegisterExtraInfo();
+
+ /// \brief Set up TargetRegisterExtraInfo with pointers to necessary
+ /// sources of information.
+ void setup(MachineFunction *mf, MachineRegisterInfo *mri,
+ const TargetRegisterInfo *tri, LiveIntervals *lis);
+
+ /// \brief Recompute tables for changed function.
+ void reset();
+
+ /// \brief Free all tables in TargetRegisterExtraInfo.
+ void clear();
+
+ /// \brief Maximum number of registers from trc which alias reg.
+ unsigned getWorst(unsigned reg, const TargetRegisterClass *trc) const;
+
+ /// \brief Returns the number of allocable registers in trc.
+ unsigned getCapacity(const TargetRegisterClass *trc) const;
+
+ /// \brief Return the number of registers of class trc that may be
+ /// needed at slot i.
+ unsigned getPressureAtSlot(const TargetRegisterClass *trc,
+ SlotIndex i) const;
+
+ /// \brief Return true if the number of registers of type trc that may be
+ /// needed at slot i is greater than the capacity of trc.
+ bool classOverCapacityAtSlot(const TargetRegisterClass *trc,
+ SlotIndex i) const;
+
+ private:
+
+ MachineFunction *mf;
+ MachineRegisterInfo *mri;
+ const TargetRegisterInfo *tri;
+ LiveIntervals *lis;
+
+ typedef std::map<const TargetRegisterClass*, unsigned> WorstMapLine;
+ typedef std::map<const TargetRegisterClass*, WorstMapLine> VRWorstMap;
+ VRWorstMap vrWorst;
+
+ typedef std::map<unsigned, WorstMapLine> PRWorstMap;
+ PRWorstMap prWorst;
+
+ typedef std::map<const TargetRegisterClass*, unsigned> CapacityMap;
+ CapacityMap capacityMap;
+
+ typedef std::map<const TargetRegisterClass*, unsigned> PressureMapLine;
+ typedef std::map<SlotIndex, PressureMapLine> PressureMap;
+ PressureMap pressureMap;
+
+ bool mapsPopulated;
+
+ /// \brief Initialise the 'worst' table.
+ void initWorst();
+
+ /// \brief Initialise the 'capacity' table.
+ void initCapacity();
+
+ /// \brief Initialise/Reset the 'pressure' and live states tables.
+ void resetPressureAndLiveStates();
+ };
+
+ /// \brief Render MachineFunction objects and related information to a HTML
+ /// page.
+ class RenderMachineFunction : public MachineFunctionPass {
+ public:
+ static char ID;
+
+ RenderMachineFunction() : MachineFunctionPass(ID) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &au) const;
+
+ virtual bool runOnMachineFunction(MachineFunction &fn);
+
+ virtual void releaseMemory();
+
+ void rememberUseDefs(const LiveInterval *li);
+
+ void rememberSpills(const LiveInterval *li,
+ const std::vector<LiveInterval*> &spills);
+
+ bool isSpill(const LiveInterval *li) const;
+
+ /// \brief Render this machine function to HTML.
+ ///
+ /// @param renderContextStr This parameter will be included in the top of
+ /// the html file to explain where (in the
+ /// codegen pipeline) this function was rendered
+ /// from. Set it to something like
+ /// "Pre-register-allocation".
+ /// @param vrm If non-null the VRM will be queried to determine
+ /// whether a virtual register was allocated to a
+ /// physical register or spilled.
+ /// @param renderFilePrefix This string will be appended to the function
+ /// name (before the output file suffix) to enable
+ /// multiple renderings from the same function.
+ void renderMachineFunction(const char *renderContextStr,
+ const VirtRegMap *vrm = 0,
+ const char *renderSuffix = 0);
+
+ private:
+ class Spacer;
+ friend raw_ostream& operator<<(raw_ostream &os, const Spacer &s);
+
+ std::string fqn;
+
+ MachineFunction *mf;
+ MachineRegisterInfo *mri;
+ const TargetRegisterInfo *tri;
+ LiveIntervals *lis;
+ SlotIndexes *sis;
+ const VirtRegMap *vrm;
+
+ TargetRegisterExtraInfo trei;
+ MFRenderingOptions ro;
+
+
+
+ // Utilities.
+ typedef enum { Dead, Defined, Used, AliveReg, AliveStack } LiveState;
+ LiveState getLiveStateAt(const LiveInterval *li, SlotIndex i) const;
+
+ typedef enum { Zero, Low, High } PressureState;
+ PressureState getPressureStateAt(const TargetRegisterClass *trc,
+ SlotIndex i) const;
+
+ typedef std::map<const LiveInterval*, std::set<const LiveInterval*> >
+ SpillIntervals;
+ SpillIntervals spillIntervals;
+
+ typedef std::map<const LiveInterval*, const LiveInterval*> SpillForMap;
+ SpillForMap spillFor;
+
+ typedef std::set<SlotIndex> SlotSet;
+ typedef std::map<const LiveInterval*, SlotSet> UseDefs;
+ UseDefs useDefs;
+
+ // ---------- Rendering methods ----------
+
+ /// For inserting spaces when pretty printing.
+ class Spacer {
+ public:
+ explicit Spacer(unsigned numSpaces) : ns(numSpaces) {}
+ Spacer operator+(const Spacer &o) const { return Spacer(ns + o.ns); }
+ void print(raw_ostream &os) const;
+ private:
+ unsigned ns;
+ };
+
+ Spacer s(unsigned ns) const;
+
+ template <typename Iterator>
+ std::string escapeChars(Iterator sBegin, Iterator sEnd) const;
+
+ /// \brief Render a machine instruction.
+ void renderMachineInstr(raw_ostream &os,
+ const MachineInstr *mi) const;
+
+ /// \brief Render vertical text.
+ template <typename T>
+ void renderVertical(const Spacer &indent,
+ raw_ostream &os,
+ const T &t) const;
+
+ /// \brief Insert CSS layout info.
+ void insertCSS(const Spacer &indent,
+ raw_ostream &os) const;
+
+ /// \brief Render a brief summary of the function (including rendering
+ /// context).
+ void renderFunctionSummary(const Spacer &indent,
+ raw_ostream &os,
+ const char * const renderContextStr) const;
+
+ /// \brief Render a legend for the pressure table.
+ void renderPressureTableLegend(const Spacer &indent,
+ raw_ostream &os) const;
+
+ /// \brief Render a consecutive set of HTML cells of the same class using
+ /// the colspan attribute for run-length encoding.
+ template <typename CellType>
+ void renderCellsWithRLE(
+ const Spacer &indent, raw_ostream &os,
+ const std::pair<CellType, unsigned> &rleAccumulator,
+ const std::map<CellType, std::string> &cellTypeStrs) const;
+
+ /// \brief Render code listing, potentially with register pressure
+ /// and live intervals shown alongside.
+ void renderCodeTablePlusPI(const Spacer &indent,
+ raw_ostream &os) const;
+
+ /// \brief Render the HTML page representing the MachineFunction.
+ void renderFunctionPage(raw_ostream &os,
+ const char * const renderContextStr) const;
+
+ std::string escapeChars(const std::string &s) const;
+ };
+}
+
+#endif /* LLVM_CODEGEN_RENDERMACHINEFUNCTION_H */
diff --git a/libclamav/c++/llvm/lib/CodeGen/ScheduleDAG.cpp b/libclamav/c++/llvm/lib/CodeGen/ScheduleDAG.cpp
index 1f3e295..7d39dc4 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ScheduleDAG.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/ScheduleDAG.cpp
@@ -27,9 +27,7 @@ ScheduleDAG::ScheduleDAG(MachineFunction &mf)
: TM(mf.getTarget()),
TII(TM.getInstrInfo()),
TRI(TM.getRegisterInfo()),
- TLI(TM.getTargetLowering()),
MF(mf), MRI(mf.getRegInfo()),
- ConstPool(MF.getConstantPool()),
EntrySU(), ExitSU() {
}
@@ -382,26 +380,26 @@ void ScheduleDAG::VerifySchedule(bool isBottomUp) {
}
#endif
-/// InitDAGTopologicalSorting - create the initial topological
+/// InitDAGTopologicalSorting - create the initial topological
/// ordering from the DAG to be scheduled.
///
-/// The idea of the algorithm is taken from
+/// The idea of the algorithm is taken from
/// "Online algorithms for managing the topological order of
/// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
-/// This is the MNR algorithm, which was first introduced by
-/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
+/// This is the MNR algorithm, which was first introduced by
+/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
/// "Maintaining a topological order under edge insertions".
///
-/// Short description of the algorithm:
+/// Short description of the algorithm:
///
/// Topological ordering, ord, of a DAG maps each node to a topological
/// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
///
-/// This means that if there is a path from the node X to the node Z,
+/// This means that if there is a path from the node X to the node Z,
/// then ord(X) < ord(Z).
///
/// This property can be used to check for reachability of nodes:
-/// if Z is reachable from X, then an insertion of the edge Z->X would
+/// if Z is reachable from X, then an insertion of the edge Z->X would
/// create a cycle.
///
/// The algorithm first computes a topological ordering for the DAG by
@@ -433,7 +431,7 @@ void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
// Collect leaf nodes.
WorkList.push_back(SU);
}
- }
+ }
int Id = DAGSize;
while (!WorkList.empty()) {
@@ -458,7 +456,7 @@ void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
SUnit *SU = &SUnits[i];
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
- assert(Node2Index[SU->NodeNum] > Node2Index[I->getSUnit()->NodeNum] &&
+ assert(Node2Index[SU->NodeNum] > Node2Index[I->getSUnit()->NodeNum] &&
"Wrong topological sorting");
}
}
@@ -496,7 +494,7 @@ void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
bool& HasLoop) {
std::vector<const SUnit*> WorkList;
- WorkList.reserve(SUnits.size());
+ WorkList.reserve(SUnits.size());
WorkList.push_back(SU);
do {
@@ -506,20 +504,20 @@ void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
for (int I = SU->Succs.size()-1; I >= 0; --I) {
int s = SU->Succs[I].getSUnit()->NodeNum;
if (Node2Index[s] == UpperBound) {
- HasLoop = true;
+ HasLoop = true;
return;
}
// Visit successors if not already and in affected region.
if (!Visited.test(s) && Node2Index[s] < UpperBound) {
WorkList.push_back(SU->Succs[I].getSUnit());
- }
- }
+ }
+ }
} while (!WorkList.empty());
}
-/// Shift - Renumber the nodes so that the topological ordering is
+/// Shift - Renumber the nodes so that the topological ordering is
/// preserved.
-void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
+void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
int UpperBound) {
std::vector<int> L;
int shift = 0;
@@ -570,7 +568,7 @@ bool ScheduleDAGTopologicalSort::IsReachable(const SUnit *SU,
// Is Ord(TargetSU) < Ord(SU) ?
if (LowerBound < UpperBound) {
Visited.reset();
- // There may be a path from TargetSU to SU. Check for it.
+ // There may be a path from TargetSU to SU. Check for it.
DFS(TargetSU, UpperBound, HasLoop);
}
return HasLoop;
@@ -582,8 +580,7 @@ void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
Index2Node[index] = n;
}
-ScheduleDAGTopologicalSort::ScheduleDAGTopologicalSort(
- std::vector<SUnit> &sunits)
- : SUnits(sunits) {}
+ScheduleDAGTopologicalSort::
+ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits) : SUnits(sunits) {}
ScheduleHazardRecognizer::~ScheduleHazardRecognizer() {}
diff --git a/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGEmit.cpp b/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
index 8e03420..0a2fb37 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
@@ -50,10 +50,8 @@ void ScheduleDAG::EmitPhysRegCopy(SUnit *SU,
break;
}
}
- bool Success = TII->copyRegToReg(*BB, InsertPos, Reg, VRI->second,
- SU->CopyDstRC, SU->CopySrcRC);
- (void)Success;
- assert(Success && "copyRegToReg failed!");
+ BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), Reg)
+ .addReg(VRI->second);
} else {
// Copy from physical register.
assert(I->getReg() && "Unknown physical register!");
@@ -61,10 +59,8 @@ void ScheduleDAG::EmitPhysRegCopy(SUnit *SU,
bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
isNew = isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
- bool Success = TII->copyRegToReg(*BB, InsertPos, VRBase, I->getReg(),
- SU->CopyDstRC, SU->CopySrcRC);
- (void)Success;
- assert(Success && "copyRegToReg failed!");
+ BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase)
+ .addReg(I->getReg());
}
break;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
index badf34e..ea93dd5 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -32,8 +32,10 @@ using namespace llvm;
ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo &mli,
const MachineDominatorTree &mdt)
- : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) {
+ : ScheduleDAG(mf), MLI(mli), MDT(mdt), Defs(TRI->getNumRegs()),
+ Uses(TRI->getNumRegs()), LoopRegs(MLI, MDT) {
MFI = mf.getFrameInfo();
+ DbgValueVec.clear();
}
/// Run - perform scheduling.
@@ -157,6 +159,11 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
+ // Keep track of dangling debug references to registers.
+ std::vector<std::pair<MachineInstr*, unsigned> >
+ DanglingDebugValue(TRI->getNumRegs(),
+ std::make_pair(static_cast<MachineInstr*>(0), 0));
+
// Check to see if the scheduler cares about latencies.
bool UnitLatencies = ForceUnitLatencies();
@@ -164,10 +171,24 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
+ // Remove any stale debug info; sometimes BuildSchedGraph is called again
+ // without emitting the info from the previous call.
+ DbgValueVec.clear();
+
// Walk the list of instructions, from bottom moving up.
for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
MII != MIE; --MII) {
MachineInstr *MI = prior(MII);
+ // DBG_VALUE does not have SUnit's built, so just remember these for later
+ // reinsertion.
+ if (MI->isDebugValue()) {
+ if (MI->getNumOperands()==3 && MI->getOperand(0).isReg() &&
+ MI->getOperand(0).getReg())
+ DanglingDebugValue[MI->getOperand(0).getReg()] =
+ std::make_pair(MI, DbgValueVec.size());
+ DbgValueVec.push_back(MI);
+ continue;
+ }
const TargetInstrDesc &TID = MI->getDesc();
assert(!TID.isTerminator() && !MI->isLabel() &&
"Cannot schedule terminators or labels!");
@@ -188,6 +209,13 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
if (Reg == 0) continue;
assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
+
+ if (MO.isDef() && DanglingDebugValue[Reg].first!=0) {
+ SU->DbgInstrList.push_back(DanglingDebugValue[Reg].first);
+ DbgValueVec[DanglingDebugValue[Reg].second] = 0;
+ DanglingDebugValue[Reg] = std::make_pair((MachineInstr*)0, 0);
+ }
+
std::vector<SUnit *> &UseList = Uses[Reg];
std::vector<SUnit *> &DefList = Defs[Reg];
// Optionally add output and anti dependencies. For anti
@@ -221,48 +249,47 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
unsigned DataLatency = SU->Latency;
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i];
- if (UseSU != SU) {
- unsigned LDataLatency = DataLatency;
- // Optionally add in a special extra latency for nodes that
- // feed addresses.
- // TODO: Do this for register aliases too.
- // TODO: Perhaps we should get rid of
- // SpecialAddressLatency and just move this into
- // adjustSchedDependency for the targets that care about
- // it.
- if (SpecialAddressLatency != 0 && !UnitLatencies) {
- MachineInstr *UseMI = UseSU->getInstr();
- const TargetInstrDesc &UseTID = UseMI->getDesc();
- int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
- assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
- if ((UseTID.mayLoad() || UseTID.mayStore()) &&
- (unsigned)RegUseIndex < UseTID.getNumOperands() &&
- UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
- LDataLatency += SpecialAddressLatency;
- }
- // Adjust the dependence latency using operand def/use
- // information (if any), and then allow the target to
- // perform its own adjustments.
- const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
- if (!UnitLatencies) {
- ComputeOperandLatency(SU, UseSU, (SDep &)dep);
- ST.adjustSchedDependency(SU, UseSU, (SDep &)dep);
- }
- UseSU->addPred(dep);
+ if (UseSU == SU)
+ continue;
+ unsigned LDataLatency = DataLatency;
+ // Optionally add in a special extra latency for nodes that
+ // feed addresses.
+ // TODO: Do this for register aliases too.
+ // TODO: Perhaps we should get rid of
+ // SpecialAddressLatency and just move this into
+ // adjustSchedDependency for the targets that care about it.
+ if (SpecialAddressLatency != 0 && !UnitLatencies) {
+ MachineInstr *UseMI = UseSU->getInstr();
+ const TargetInstrDesc &UseTID = UseMI->getDesc();
+ int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
+ assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
+ if ((UseTID.mayLoad() || UseTID.mayStore()) &&
+ (unsigned)RegUseIndex < UseTID.getNumOperands() &&
+ UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
+ LDataLatency += SpecialAddressLatency;
}
+ // Adjust the dependence latency using operand def/use
+ // information (if any), and then allow the target to
+ // perform its own adjustments.
+ const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
+ if (!UnitLatencies) {
+ ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
+ ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
+ }
+ UseSU->addPred(dep);
}
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
std::vector<SUnit *> &UseList = Uses[*Alias];
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i];
- if (UseSU != SU) {
- const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
- if (!UnitLatencies) {
- ComputeOperandLatency(SU, UseSU, (SDep &)dep);
- ST.adjustSchedDependency(SU, UseSU, (SDep &)dep);
- }
- UseSU->addPred(dep);
+ if (UseSU == SU)
+ continue;
+ const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
+ if (!UnitLatencies) {
+ ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
+ ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
}
+ UseSU->addPred(dep);
}
}
@@ -501,7 +528,8 @@ void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
MachineInstr *DefMI = Def->getInstr();
int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
if (DefIdx != -1) {
- int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(), DefIdx);
+ int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(),
+ DefIdx);
if (DefCycle >= 0) {
MachineInstr *UseMI = Use->getInstr();
const unsigned UseClass = UseMI->getDesc().getSchedClass();
@@ -545,8 +573,7 @@ std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
}
// EmitSchedule - Emit the machine code in scheduled order.
-MachineBasicBlock *ScheduleDAGInstrs::
-EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
+MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
// For MachineInstr-based scheduling, we're rescheduling the instructions in
// the block, so start by removing them from the block.
while (Begin != InsertPos) {
@@ -555,6 +582,14 @@ EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
BB->remove(I);
}
+ // First reinsert any remaining debug_values; these are either constants,
+ // or refer to live-in registers. The beginning of the block is the right
+ // place for the latter. The former might reasonably be placed elsewhere
+ // using some kind of ordering algorithm, but right now it doesn't matter.
+ for (int i = DbgValueVec.size()-1; i>=0; --i)
+ if (DbgValueVec[i])
+ BB->insert(InsertPos, DbgValueVec[i]);
+
// Then re-insert them according to the given schedule.
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
SUnit *SU = Sequence[i];
@@ -565,12 +600,21 @@ EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
}
BB->insert(InsertPos, SU->getInstr());
+ for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i)
+ BB->insert(InsertPos, SU->DbgInstrList[i]);
}
// Update the Begin iterator, as the first instruction in the block
// may have been scheduled later.
- if (!Sequence.empty())
+ if (!DbgValueVec.empty()) {
+ for (int i = DbgValueVec.size()-1; i>=0; --i)
+ if (DbgValueVec[i]!=0) {
+ Begin = DbgValueVec[DbgValueVec.size()-1];
+ break;
+ }
+ } else if (!Sequence.empty())
Begin = Sequence[0]->getInstr();
+ DbgValueVec.clear();
return BB;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGInstrs.h b/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGInstrs.h
index 366c3a8..c8f543f 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGInstrs.h
+++ b/libclamav/c++/llvm/lib/CodeGen/ScheduleDAGInstrs.h
@@ -20,7 +20,6 @@
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include <map>
@@ -33,7 +32,7 @@ namespace llvm {
/// For example, loop induction variable increments should be
/// scheduled as soon as possible after the variable's last use.
///
- class VISIBILITY_HIDDEN LoopDependencies {
+ class LLVM_LIBRARY_VISIBILITY LoopDependencies {
const MachineLoopInfo &MLI;
const MachineDominatorTree &MDT;
@@ -70,8 +69,10 @@ namespace llvm {
const SmallSet<unsigned, 8> &LoopLiveIns) {
unsigned Count = 0;
for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I, ++Count) {
+ I != E; ++I) {
const MachineInstr *MI = I;
+ if (MI->isDebugValue())
+ continue;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
@@ -80,6 +81,7 @@ namespace llvm {
if (LoopLiveIns.count(MOReg))
Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
}
+ ++Count; // Not every iteration due to dbg_value above.
}
const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
@@ -95,7 +97,7 @@ namespace llvm {
/// ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of
/// MachineInstrs.
- class VISIBILITY_HIDDEN ScheduleDAGInstrs : public ScheduleDAG {
+ class LLVM_LIBRARY_VISIBILITY ScheduleDAGInstrs : public ScheduleDAG {
const MachineLoopInfo &MLI;
const MachineDominatorTree &MDT;
const MachineFrameInfo *MFI;
@@ -104,8 +106,12 @@ namespace llvm {
/// are as we iterate upward through the instructions. This is allocated
/// here instead of inside BuildSchedGraph to avoid the need for it to be
/// initialized and destructed for each block.
- std::vector<SUnit *> Defs[TargetRegisterInfo::FirstVirtualRegister];
- std::vector<SUnit *> Uses[TargetRegisterInfo::FirstVirtualRegister];
+ std::vector<std::vector<SUnit *> > Defs;
+ std::vector<std::vector<SUnit *> > Uses;
+
+ /// DbgValueVec - Remember DBG_VALUEs that refer to a particular
+ /// register.
+ std::vector<MachineInstr *>DbgValueVec;
/// PendingLoads - Remember where unknown loads are after the most recent
/// unknown store, as we iterate. As with Defs and Uses, this is here
@@ -167,8 +173,7 @@ namespace llvm {
virtual void ComputeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const;
- virtual MachineBasicBlock*
- EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*>*);
+ virtual MachineBasicBlock *EmitSchedule();
/// StartBlock - Prepare to perform scheduling in the given block.
///
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/CMakeLists.txt b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/CMakeLists.txt
index 80c7d7c..799988a 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/CMakeLists.txt
@@ -1,5 +1,4 @@
add_llvm_library(LLVMSelectionDAG
- CallingConvLower.cpp
DAGCombiner.cpp
FastISel.cpp
FunctionLoweringInfo.cpp
@@ -20,6 +19,7 @@ add_llvm_library(LLVMSelectionDAG
SelectionDAGISel.cpp
SelectionDAGPrinter.cpp
TargetLowering.cpp
+ TargetSelectionDAGInfo.cpp
)
target_link_libraries (LLVMSelectionDAG LLVMAnalysis LLVMAsmPrinter LLVMCodeGen)
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp
deleted file mode 100644
index 4e6c1fc..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-//===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the CCState class, used for lowering and implementing
-// calling conventions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-using namespace llvm;
-
-CCState::CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &tm,
- SmallVector<CCValAssign, 16> &locs, LLVMContext &C)
- : CallingConv(CC), IsVarArg(isVarArg), TM(tm),
- TRI(*TM.getRegisterInfo()), Locs(locs), Context(C) {
- // No stack is used.
- StackOffset = 0;
-
- UsedRegs.resize((TRI.getNumRegs()+31)/32);
-}
-
-// HandleByVal - Allocate a stack slot large enough to pass an argument by
-// value. The size and alignment information of the argument is encoded in its
-// parameter attribute.
-void CCState::HandleByVal(unsigned ValNo, EVT ValVT,
- EVT LocVT, CCValAssign::LocInfo LocInfo,
- int MinSize, int MinAlign,
- ISD::ArgFlagsTy ArgFlags) {
- unsigned Align = ArgFlags.getByValAlign();
- unsigned Size = ArgFlags.getByValSize();
- if (MinSize > (int)Size)
- Size = MinSize;
- if (MinAlign > (int)Align)
- Align = MinAlign;
- unsigned Offset = AllocateStack(Size, Align);
-
- addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
-}
-
-/// MarkAllocated - Mark a register and all of its aliases as allocated.
-void CCState::MarkAllocated(unsigned Reg) {
- UsedRegs[Reg/32] |= 1 << (Reg&31);
-
- if (const unsigned *RegAliases = TRI.getAliasSet(Reg))
- for (; (Reg = *RegAliases); ++RegAliases)
- UsedRegs[Reg/32] |= 1 << (Reg&31);
-}
-
-/// AnalyzeFormalArguments - Analyze an array of argument values,
-/// incorporating info about the formals into this state.
-void
-CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
- CCAssignFn Fn) {
- unsigned NumArgs = Ins.size();
-
- for (unsigned i = 0; i != NumArgs; ++i) {
- EVT ArgVT = Ins[i].VT;
- ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
- if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
-#ifndef NDEBUG
- dbgs() << "Formal argument #" << i << " has unhandled type "
- << ArgVT.getEVTString();
-#endif
- llvm_unreachable(0);
- }
- }
-}
-
-/// CheckReturn - Analyze the return values of a function, returning true if
-/// the return can be performed without sret-demotion, and false otherwise.
-bool CCState::CheckReturn(const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- CCAssignFn Fn) {
- // Determine which register each value should be copied into.
- for (unsigned i = 0, e = OutTys.size(); i != e; ++i) {
- EVT VT = OutTys[i];
- ISD::ArgFlagsTy ArgFlags = ArgsFlags[i];
- if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
- return false;
- }
- return true;
-}
-
-/// AnalyzeReturn - Analyze the returned values of a return,
-/// incorporating info about the result values into this state.
-void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
- CCAssignFn Fn) {
- // Determine which register each value should be copied into.
- for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- EVT VT = Outs[i].Val.getValueType();
- ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
- if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
-#ifndef NDEBUG
- dbgs() << "Return operand #" << i << " has unhandled type "
- << VT.getEVTString();
-#endif
- llvm_unreachable(0);
- }
- }
-}
-
-
-/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
-/// incorporating info about the passed values into this state.
-void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
- CCAssignFn Fn) {
- unsigned NumOps = Outs.size();
- for (unsigned i = 0; i != NumOps; ++i) {
- EVT ArgVT = Outs[i].Val.getValueType();
- ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
- if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
-#ifndef NDEBUG
- dbgs() << "Call operand #" << i << " has unhandled type "
- << ArgVT.getEVTString();
-#endif
- llvm_unreachable(0);
- }
- }
-}
-
-/// AnalyzeCallOperands - Same as above except it takes vectors of types
-/// and argument flags.
-void CCState::AnalyzeCallOperands(SmallVectorImpl<EVT> &ArgVTs,
- SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
- CCAssignFn Fn) {
- unsigned NumOps = ArgVTs.size();
- for (unsigned i = 0; i != NumOps; ++i) {
- EVT ArgVT = ArgVTs[i];
- ISD::ArgFlagsTy ArgFlags = Flags[i];
- if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
-#ifndef NDEBUG
- dbgs() << "Call operand #" << i << " has unhandled type "
- << ArgVT.getEVTString();
-#endif
- llvm_unreachable(0);
- }
- }
-}
-
-/// AnalyzeCallResult - Analyze the return values of a call,
-/// incorporating info about the passed values into this state.
-void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
- CCAssignFn Fn) {
- for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- EVT VT = Ins[i].VT;
- ISD::ArgFlagsTy Flags = Ins[i].Flags;
- if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
-#ifndef NDEBUG
- dbgs() << "Call result #" << i << " has unhandled type "
- << VT.getEVTString();
-#endif
- llvm_unreachable(0);
- }
- }
-}
-
-/// AnalyzeCallResult - Same as above except it's specialized for calls which
-/// produce a single value.
-void CCState::AnalyzeCallResult(EVT VT, CCAssignFn Fn) {
- if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
-#ifndef NDEBUG
- dbgs() << "Call result has unhandled type "
- << VT.getEVTString();
-#endif
- llvm_unreachable(0);
- }
-}
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index fc43305..c9c4d91 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -129,6 +129,14 @@ namespace {
bool CombineToPreIndexedLoadStore(SDNode *N);
bool CombineToPostIndexedLoadStore(SDNode *N);
+ void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad);
+ SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace);
+ SDValue SExtPromoteOperand(SDValue Op, EVT PVT);
+ SDValue ZExtPromoteOperand(SDValue Op, EVT PVT);
+ SDValue PromoteIntBinOp(SDValue Op);
+ SDValue PromoteIntShiftOp(SDValue Op);
+ SDValue PromoteExtend(SDValue Op);
+ bool PromoteLoad(SDValue Op);
/// combine - call the node-specific routine that knows how to fold each
/// particular type of node. If that doesn't do anything, try the
@@ -203,6 +211,7 @@ namespace {
SDValue visitBUILD_VECTOR(SDNode *N);
SDValue visitCONCAT_VECTORS(SDNode *N);
SDValue visitVECTOR_SHUFFLE(SDNode *N);
+ SDValue visitMEMBARRIER(SDNode *N);
SDValue XformToShuffleWithZero(SDNode *N);
SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS);
@@ -254,24 +263,28 @@ namespace {
/// looking for a better chain (aliasing node.)
SDValue FindBetterChain(SDNode *N, SDValue Chain);
- /// getShiftAmountTy - Returns a type large enough to hold any valid
- /// shift amount - before type legalization these can be huge.
- EVT getShiftAmountTy() {
- return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
- }
-
-public:
+ public:
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
- : DAG(D),
- TLI(D.getTargetLoweringInfo()),
- Level(Unrestricted),
- OptLevel(OL),
- LegalOperations(false),
- LegalTypes(false),
- AA(A) {}
+ : DAG(D), TLI(D.getTargetLoweringInfo()), Level(Unrestricted),
+ OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {}
/// Run - runs the dag combiner on all nodes in the work list
void Run(CombineLevel AtLevel);
+
+ SelectionDAG &getDAG() const { return DAG; }
+
+ /// getShiftAmountTy - Returns a type large enough to hold any valid
+ /// shift amount - before type legalization these can be huge.
+ EVT getShiftAmountTy() {
+ return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
+ }
+
+ /// isTypeLegal - This method returns true if we are running before type
+ /// legalization or if the specified VT is legal.
+ bool isTypeLegal(const EVT &VT) {
+ if (!LegalTypes) return true;
+ return TLI.isTypeLegal(VT);
+ }
};
}
@@ -577,9 +590,8 @@ SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
return SDValue(N, 0);
}
-void
-DAGCombiner::CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &
- TLO) {
+void DAGCombiner::
+CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
// Replace all uses. If any nodes become isomorphic to other nodes and
// are deleted, make sure to remove them from our worklist.
WorkListRemover DeadNodes(*this);
@@ -609,7 +621,7 @@ DAGCombiner::CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &
/// it can be simplified or if things it uses can be simplified by bit
/// propagation. If so, return true.
bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
- TargetLowering::TargetLoweringOpt TLO(DAG);
+ TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
APInt KnownZero, KnownOne;
if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
return false;
@@ -629,6 +641,280 @@ bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
return true;
}
+void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {
+ DebugLoc dl = Load->getDebugLoc();
+ EVT VT = Load->getValueType(0);
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, VT, SDValue(ExtLoad, 0));
+
+ DEBUG(dbgs() << "\nReplacing.9 ";
+ Load->dump(&DAG);
+ dbgs() << "\nWith: ";
+ Trunc.getNode()->dump(&DAG);
+ dbgs() << '\n');
+ WorkListRemover DeadNodes(*this);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc, &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1),
+ &DeadNodes);
+ removeFromWorkList(Load);
+ DAG.DeleteNode(Load);
+ AddToWorkList(Trunc.getNode());
+}
+
+SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
+ Replace = false;
+ DebugLoc dl = Op.getDebugLoc();
+ if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
+ EVT MemVT = LD->getMemoryVT();
+ ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
+ ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD : ISD::EXTLOAD)
+ : LD->getExtensionType();
+ Replace = true;
+ return DAG.getExtLoad(ExtType, PVT, dl,
+ LD->getChain(), LD->getBasePtr(),
+ LD->getSrcValue(), LD->getSrcValueOffset(),
+ MemVT, LD->isVolatile(),
+ LD->isNonTemporal(), LD->getAlignment());
+ }
+
+ unsigned Opc = Op.getOpcode();
+ switch (Opc) {
+ default: break;
+ case ISD::AssertSext:
+ return DAG.getNode(ISD::AssertSext, dl, PVT,
+ SExtPromoteOperand(Op.getOperand(0), PVT),
+ Op.getOperand(1));
+ case ISD::AssertZext:
+ return DAG.getNode(ISD::AssertZext, dl, PVT,
+ ZExtPromoteOperand(Op.getOperand(0), PVT),
+ Op.getOperand(1));
+ case ISD::Constant: {
+ unsigned ExtOpc =
+ Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+ return DAG.getNode(ExtOpc, dl, PVT, Op);
+ }
+ }
+
+ if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
+ return SDValue();
+ return DAG.getNode(ISD::ANY_EXTEND, dl, PVT, Op);
+}
+
+SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {
+ if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT))
+ return SDValue();
+ EVT OldVT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ bool Replace = false;
+ SDValue NewOp = PromoteOperand(Op, PVT, Replace);
+ if (NewOp.getNode() == 0)
+ return SDValue();
+ AddToWorkList(NewOp.getNode());
+
+ if (Replace)
+ ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
+ return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, NewOp.getValueType(), NewOp,
+ DAG.getValueType(OldVT));
+}
+
+SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {
+ EVT OldVT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ bool Replace = false;
+ SDValue NewOp = PromoteOperand(Op, PVT, Replace);
+ if (NewOp.getNode() == 0)
+ return SDValue();
+ AddToWorkList(NewOp.getNode());
+
+ if (Replace)
+ ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode());
+ return DAG.getZeroExtendInReg(NewOp, dl, OldVT);
+}
+
+/// PromoteIntBinOp - Promote the specified integer binary operation if the
+/// target indicates it is beneficial. e.g. On x86, it's usually better to
+/// promote i16 operations to i32 since i16 instructions are longer.
+SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {
+ if (!LegalOperations)
+ return SDValue();
+
+ EVT VT = Op.getValueType();
+ if (VT.isVector() || !VT.isInteger())
+ return SDValue();
+
+ // If operation type is 'undesirable', e.g. i16 on x86, consider
+ // promoting it.
+ unsigned Opc = Op.getOpcode();
+ if (TLI.isTypeDesirableForOp(Opc, VT))
+ return SDValue();
+
+ EVT PVT = VT;
+ // Consult target whether it is a good idea to promote this operation and
+ // what's the right type to promote it to.
+ if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
+ assert(PVT != VT && "Don't know what type to promote to!");
+
+ bool Replace0 = false;
+ SDValue N0 = Op.getOperand(0);
+ SDValue NN0 = PromoteOperand(N0, PVT, Replace0);
+ if (NN0.getNode() == 0)
+ return SDValue();
+
+ bool Replace1 = false;
+ SDValue N1 = Op.getOperand(1);
+ SDValue NN1;
+ if (N0 == N1)
+ NN1 = NN0;
+ else {
+ NN1 = PromoteOperand(N1, PVT, Replace1);
+ if (NN1.getNode() == 0)
+ return SDValue();
+ }
+
+ AddToWorkList(NN0.getNode());
+ if (NN1.getNode())
+ AddToWorkList(NN1.getNode());
+
+ if (Replace0)
+ ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode());
+ if (Replace1)
+ ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode());
+
+ DEBUG(dbgs() << "\nPromoting ";
+ Op.getNode()->dump(&DAG));
+ DebugLoc dl = Op.getDebugLoc();
+ return DAG.getNode(ISD::TRUNCATE, dl, VT,
+ DAG.getNode(Opc, dl, PVT, NN0, NN1));
+ }
+ return SDValue();
+}
+
+/// PromoteIntShiftOp - Promote the specified integer shift operation if the
+/// target indicates it is beneficial. e.g. On x86, it's usually better to
+/// promote i16 operations to i32 since i16 instructions are longer.
+SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {
+ if (!LegalOperations)
+ return SDValue();
+
+ EVT VT = Op.getValueType();
+ if (VT.isVector() || !VT.isInteger())
+ return SDValue();
+
+ // If operation type is 'undesirable', e.g. i16 on x86, consider
+ // promoting it.
+ unsigned Opc = Op.getOpcode();
+ if (TLI.isTypeDesirableForOp(Opc, VT))
+ return SDValue();
+
+ EVT PVT = VT;
+ // Consult target whether it is a good idea to promote this operation and
+ // what's the right type to promote it to.
+ if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
+ assert(PVT != VT && "Don't know what type to promote to!");
+
+ bool Replace = false;
+ SDValue N0 = Op.getOperand(0);
+ if (Opc == ISD::SRA)
+ N0 = SExtPromoteOperand(Op.getOperand(0), PVT);
+ else if (Opc == ISD::SRL)
+ N0 = ZExtPromoteOperand(Op.getOperand(0), PVT);
+ else
+ N0 = PromoteOperand(N0, PVT, Replace);
+ if (N0.getNode() == 0)
+ return SDValue();
+
+ AddToWorkList(N0.getNode());
+ if (Replace)
+ ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode());
+
+ DEBUG(dbgs() << "\nPromoting ";
+ Op.getNode()->dump(&DAG));
+ DebugLoc dl = Op.getDebugLoc();
+ return DAG.getNode(ISD::TRUNCATE, dl, VT,
+ DAG.getNode(Opc, dl, PVT, N0, Op.getOperand(1)));
+ }
+ return SDValue();
+}
+
+SDValue DAGCombiner::PromoteExtend(SDValue Op) {
+ if (!LegalOperations)
+ return SDValue();
+
+ EVT VT = Op.getValueType();
+ if (VT.isVector() || !VT.isInteger())
+ return SDValue();
+
+ // If operation type is 'undesirable', e.g. i16 on x86, consider
+ // promoting it.
+ unsigned Opc = Op.getOpcode();
+ if (TLI.isTypeDesirableForOp(Opc, VT))
+ return SDValue();
+
+ EVT PVT = VT;
+ // Consult target whether it is a good idea to promote this operation and
+ // what's the right type to promote it to.
+ if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
+ assert(PVT != VT && "Don't know what type to promote to!");
+ // fold (aext (aext x)) -> (aext x)
+ // fold (aext (zext x)) -> (zext x)
+ // fold (aext (sext x)) -> (sext x)
+ DEBUG(dbgs() << "\nPromoting ";
+ Op.getNode()->dump(&DAG));
+ return DAG.getNode(Op.getOpcode(), Op.getDebugLoc(), VT, Op.getOperand(0));
+ }
+ return SDValue();
+}
+
+bool DAGCombiner::PromoteLoad(SDValue Op) {
+ if (!LegalOperations)
+ return false;
+
+ EVT VT = Op.getValueType();
+ if (VT.isVector() || !VT.isInteger())
+ return false;
+
+ // If operation type is 'undesirable', e.g. i16 on x86, consider
+ // promoting it.
+ unsigned Opc = Op.getOpcode();
+ if (TLI.isTypeDesirableForOp(Opc, VT))
+ return false;
+
+ EVT PVT = VT;
+ // Consult target whether it is a good idea to promote this operation and
+ // what's the right type to promote it to.
+ if (TLI.IsDesirableToPromoteOp(Op, PVT)) {
+ assert(PVT != VT && "Don't know what type to promote to!");
+
+ DebugLoc dl = Op.getDebugLoc();
+ SDNode *N = Op.getNode();
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ EVT MemVT = LD->getMemoryVT();
+ ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
+ ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD : ISD::EXTLOAD)
+ : LD->getExtensionType();
+ SDValue NewLD = DAG.getExtLoad(ExtType, PVT, dl,
+ LD->getChain(), LD->getBasePtr(),
+ LD->getSrcValue(), LD->getSrcValueOffset(),
+ MemVT, LD->isVolatile(),
+ LD->isNonTemporal(), LD->getAlignment());
+ SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, VT, NewLD);
+
+ DEBUG(dbgs() << "\nPromoting ";
+ N->dump(&DAG);
+ dbgs() << "\nTo: ";
+ Result.getNode()->dump(&DAG);
+ dbgs() << '\n');
+ WorkListRemover DeadNodes(*this);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result, &DeadNodes);
+ DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1), &DeadNodes);
+ removeFromWorkList(N);
+ DAG.DeleteNode(N);
+ AddToWorkList(Result.getNode());
+ return true;
+ }
+ return false;
+}
+
+
//===----------------------------------------------------------------------===//
// Main DAG Combiner implementation
//===----------------------------------------------------------------------===//
@@ -732,7 +1018,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
}
SDValue DAGCombiner::visit(SDNode *N) {
- switch(N->getOpcode()) {
+ switch (N->getOpcode()) {
default: break;
case ISD::TokenFactor: return visitTokenFactor(N);
case ISD::MERGE_VALUES: return visitMERGE_VALUES(N);
@@ -794,6 +1080,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N);
case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
+ case ISD::MEMBARRIER: return visitMEMBARRIER(N);
}
return SDValue();
}
@@ -817,6 +1104,35 @@ SDValue DAGCombiner::combine(SDNode *N) {
}
}
+ // If nothing happened still, try promoting the operation.
+ if (RV.getNode() == 0) {
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::ADD:
+ case ISD::SUB:
+ case ISD::MUL:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR:
+ RV = PromoteIntBinOp(SDValue(N, 0));
+ break;
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
+ RV = PromoteIntShiftOp(SDValue(N, 0));
+ break;
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND:
+ RV = PromoteExtend(SDValue(N, 0));
+ break;
+ case ISD::LOAD:
+ if (PromoteLoad(SDValue(N, 0)))
+ RV = SDValue(N, 0);
+ break;
+ }
+ }
+
// If N is a commutative binary node, try commuting it to enable more
// sdisel CSE.
if (RV.getNode() == 0 &&
@@ -999,7 +1315,7 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C &&
GA->getOpcode() == ISD::GlobalAddress)
- return DAG.getGlobalAddress(GA->getGlobal(), VT,
+ return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT,
GA->getOffset() +
(uint64_t)N1C->getSExtValue());
// fold ((c1-A)+c2) -> (c1+c2)-A
@@ -1236,7 +1552,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
// fold (sub Sym, c) -> Sym-c
if (N1C && GA->getOpcode() == ISD::GlobalAddress)
- return DAG.getGlobalAddress(GA->getGlobal(), VT,
+ return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT,
GA->getOffset() -
(uint64_t)N1C->getSExtValue());
// fold (sub Sym+c1, Sym+c2) -> c1-c2
@@ -1714,15 +2030,20 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
// fold (OP (zext x), (zext y)) -> (zext (OP x, y))
// fold (OP (sext x), (sext y)) -> (sext (OP x, y))
// fold (OP (aext x), (aext y)) -> (aext (OP x, y))
- // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y))
+ // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free)
//
// do not sink logical op inside of a vector extend, since it may combine
// into a vsetcc.
EVT Op0VT = N0.getOperand(0).getValueType();
if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
- N0.getOpcode() == ISD::ANY_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND ||
- (N0.getOpcode() == ISD::TRUNCATE && TLI.isTypeLegal(Op0VT))) &&
+ // Avoid infinite looping with PromoteIntBinOp.
+ (N0.getOpcode() == ISD::ANY_EXTEND &&
+ (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) ||
+ (N0.getOpcode() == ISD::TRUNCATE &&
+ (!TLI.isZExtFree(VT, Op0VT) ||
+ !TLI.isTruncateFree(Op0VT, VT)) &&
+ TLI.isTypeLegal(Op0VT))) &&
!VT.isVector() &&
Op0VT == N1.getOperand(0).getValueType() &&
(!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) {
@@ -1877,7 +2198,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
LN0->getChain(), LN0->getBasePtr(),
LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
@@ -1900,7 +2221,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
@@ -1934,7 +2255,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
SDValue NewLoad =
- DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy,
+ DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
LN0->getChain(), LN0->getBasePtr(),
LN0->getSrcValue(), LN0->getSrcValueOffset(),
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
@@ -1970,7 +2291,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
SDValue Load =
- DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy,
+ DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
LN0->getChain(), NewPtr,
LN0->getSrcValue(), LN0->getSrcValueOffset(),
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
@@ -2001,7 +2322,8 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
}
// fold (or x, undef) -> -1
- if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) {
+ if (!LegalOperations &&
+ (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) {
EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
}
@@ -2109,6 +2431,11 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
if (SDNode *Rot = MatchRotate(N0, N1, N->getDebugLoc()))
return SDValue(Rot, 0);
+ // Simplify the operands using demanded-bits information.
+ if (!VT.isVector() &&
+ SimplifyDemandedBits(SDValue(N, 0)))
+ return SDValue(N, 0);
+
return SDValue();
}
@@ -2579,7 +2906,13 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
HiBitsMask);
}
- return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue();
+ if (N1C) {
+ SDValue NewSHL = visitShiftByConstant(N, N1C->getZExtValue());
+ if (NewSHL.getNode())
+ return NewSHL;
+ }
+
+ return SDValue();
}
SDValue DAGCombiner::visitSRA(SDNode *N) {
@@ -2693,7 +3026,13 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0, N1);
- return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue();
+ if (N1C) {
+ SDValue NewSRA = visitShiftByConstant(N, N1C->getZExtValue());
+ if (NewSRA.getNode())
+ return NewSRA;
+ }
+
+ return SDValue();
}
SDValue DAGCombiner::visitSRL(SDNode *N) {
@@ -2731,6 +3070,15 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0),
DAG.getConstant(c1 + c2, N1.getValueType()));
}
+
+ // fold (srl (shl x, c), c) -> (and x, cst2)
+ if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
+ N0.getValueSizeInBits() <= 64) {
+ uint64_t ShAmt = N1C->getZExtValue()+64-N0.getValueSizeInBits();
+ return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0),
+ DAG.getConstant(~0ULL >> ShAmt, VT));
+ }
+
// fold (srl (anyextend x), c) -> (anyextend (srl x, c))
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
@@ -2739,10 +3087,12 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
if (N1C->getZExtValue() >= SmallVT.getSizeInBits())
return DAG.getUNDEF(VT);
- SDValue SmallShift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), SmallVT,
- N0.getOperand(0), N1);
- AddToWorkList(SmallShift.getNode());
- return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, SmallShift);
+ if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) {
+ SDValue SmallShift = DAG.getNode(ISD::SRL, N0.getDebugLoc(), SmallVT,
+ N0.getOperand(0), N1);
+ AddToWorkList(SmallShift.getNode());
+ return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, SmallShift);
+ }
}
// fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign
@@ -2819,6 +3169,11 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return NewSRL;
}
+ // Attempt to convert a srl of a load into a narrower zero-extending load.
+ SDValue NarrowLoad = ReduceLoadWidth(N);
+ if (NarrowLoad.getNode())
+ return NarrowLoad;
+
// Here is a common situation. We want to optimize:
//
// %a = ...
@@ -3092,8 +3447,12 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
if (NarrowLoad.getNode()) {
- if (NarrowLoad.getNode() != N0.getNode())
+ SDNode* oye = N0.getNode()->getOperand(0).getNode();
+ if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
+ // CombineTo deleted the truncate, if needed, but not what's under it.
+ AddToWorkList(oye);
+ }
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
@@ -3144,7 +3503,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(),
@@ -3188,7 +3547,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
@@ -3205,24 +3564,40 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
if (N0.getOpcode() == ISD::SETCC) {
// sext(setcc) -> sext_in_reg(vsetcc) for vectors.
- if (VT.isVector() &&
+ // Only do this before legalize for now.
+ if (VT.isVector() && !LegalOperations) {
+ EVT N0VT = N0.getOperand(0).getValueType();
// We know that the # elements of the results is the same as the
// # elements of the compare (and the # elements of the compare result
// for that matter). Check to see that they are the same size. If so,
// we know that the element size of the sext'd result matches the
// element size of the compare operands.
- VT.getSizeInBits() == N0.getOperand(0).getValueType().getSizeInBits() &&
-
- // Only do this before legalize for now.
- !LegalOperations) {
- return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
- N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ if (VT.getSizeInBits() == N0VT.getSizeInBits())
+ return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ // If the desired elements are smaller or larger than the source
+ // elements we can use a matching integer vector type and then
+ // truncate/sign extend
+ else {
+ EVT MatchingElementType =
+ EVT::getIntegerVT(*DAG.getContext(),
+ N0VT.getScalarType().getSizeInBits());
+ EVT MatchingVectorType =
+ EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
+ N0VT.getVectorNumElements());
+ SDValue VsetCC =
+ DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
+ }
}
-
+
// sext(setcc x, y, cc) -> (select_cc x, y, -1, 0, cc)
+ unsigned ElementWidth = VT.getScalarType().getSizeInBits();
SDValue NegOne =
- DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
+ DAG.getConstant(APInt::getAllOnesValue(ElementWidth), VT);
SDValue SCC =
SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
NegOne, DAG.getConstant(0, VT),
@@ -3236,9 +3611,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
N0.getOperand(0), N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get()),
NegOne, DAG.getConstant(0, VT));
- }
-
-
+ }
// fold (sext x) -> (zext x) if the sign bit is known zero.
if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
@@ -3266,18 +3639,19 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
if (N0.getOpcode() == ISD::TRUNCATE) {
SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
if (NarrowLoad.getNode()) {
- if (NarrowLoad.getNode() != N0.getNode())
+ SDNode* oye = N0.getNode()->getOperand(0).getNode();
+ if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
+ // CombineTo deleted the truncate, if needed, but not what's under it.
+ AddToWorkList(oye);
+ }
return DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, NarrowLoad);
}
}
// fold (zext (truncate x)) -> (and x, mask)
if (N0.getOpcode() == ISD::TRUNCATE &&
- (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) &&
- (!TLI.isTruncateFree(N0.getOperand(0).getValueType(),
- N0.getValueType()) ||
- !TLI.isZExtFree(N0.getValueType(), VT))) {
+ (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
SDValue Op = N0.getOperand(0);
if (Op.getValueType().bitsLT(VT)) {
Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op);
@@ -3318,7 +3692,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(),
@@ -3362,7 +3736,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
@@ -3377,8 +3751,48 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
}
}
- // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
if (N0.getOpcode() == ISD::SETCC) {
+ if (!LegalOperations && VT.isVector()) {
+ // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors.
+ // Only do this before legalize for now.
+ EVT N0VT = N0.getOperand(0).getValueType();
+ EVT EltVT = VT.getVectorElementType();
+ SmallVector<SDValue,8> OneOps(VT.getVectorNumElements(),
+ DAG.getConstant(1, EltVT));
+ if (VT.getSizeInBits() == N0VT.getSizeInBits()) {
+ // We know that the # elements of the results is the same as the
+ // # elements of the compare (and the # elements of the compare result
+ // for that matter). Check to see that they are the same size. If so,
+ // we know that the element size of the sext'd result matches the
+ // element size of the compare operands.
+ return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
+ DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get()),
+ DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
+ &OneOps[0], OneOps.size()));
+ } else {
+ // If the desired elements are smaller or larger than the source
+ // elements we can use a matching integer vector type and then
+ // truncate/sign extend
+ EVT MatchingElementType =
+ EVT::getIntegerVT(*DAG.getContext(),
+ N0VT.getScalarType().getSizeInBits());
+ EVT MatchingVectorType =
+ EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
+ N0VT.getVectorNumElements());
+ SDValue VsetCC =
+ DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getNode(ISD::AND, N->getDebugLoc(), VT,
+ DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT),
+ DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), VT,
+ &OneOps[0], OneOps.size()));
+ }
+ }
+
+ // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
SDValue SCC =
SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
DAG.getConstant(1, VT), DAG.getConstant(0, VT),
@@ -3431,8 +3845,12 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
if (N0.getOpcode() == ISD::TRUNCATE) {
SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
if (NarrowLoad.getNode()) {
- if (NarrowLoad.getNode() != N0.getNode())
+ SDNode* oye = N0.getNode()->getOperand(0).getNode();
+ if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
+ // CombineTo deleted the truncate, if needed, but not what's under it.
+ AddToWorkList(oye);
+ }
return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, NarrowLoad);
}
}
@@ -3476,7 +3894,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(),
@@ -3520,8 +3938,9 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
- SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), N->getDebugLoc(),
- VT, LN0->getChain(), LN0->getBasePtr(),
+ SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT,
+ N->getDebugLoc(),
+ LN0->getChain(), LN0->getBasePtr(),
LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
LN0->isVolatile(), LN0->isNonTemporal(),
@@ -3534,8 +3953,39 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
- // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
if (N0.getOpcode() == ISD::SETCC) {
+ // aext(setcc) -> sext_in_reg(vsetcc) for vectors.
+ // Only do this before legalize for now.
+ if (VT.isVector() && !LegalOperations) {
+ EVT N0VT = N0.getOperand(0).getValueType();
+ // We know that the # elements of the results is the same as the
+ // # elements of the compare (and the # elements of the compare result
+ // for that matter). Check to see that they are the same size. If so,
+ // we know that the element size of the sext'd result matches the
+ // element size of the compare operands.
+ if (VT.getSizeInBits() == N0VT.getSizeInBits())
+ return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ // If the desired elements are smaller or larger than the source
+ // elements we can use a matching integer vector type and then
+ // truncate/sign extend
+ else {
+ EVT MatchingElementType =
+ EVT::getIntegerVT(*DAG.getContext(),
+ N0VT.getScalarType().getSizeInBits());
+ EVT MatchingVectorType =
+ EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
+ N0VT.getVectorNumElements());
+ SDValue VsetCC =
+ DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
+ }
+ }
+
+ // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
SDValue SCC =
SimplifySelectCC(N->getDebugLoc(), N0.getOperand(0), N0.getOperand(1),
DAG.getConstant(1, VT), DAG.getConstant(0, VT),
@@ -3588,6 +4038,7 @@ SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
/// extended, also fold the extension to form a extending load.
SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
unsigned Opc = N->getOpcode();
+
ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -3604,6 +4055,15 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
if (LegalOperations && !TLI.isLoadExtLegal(ISD::SEXTLOAD, ExtVT))
return SDValue();
+ } else if (Opc == ISD::SRL) {
+ // Annother special-case: SRL is basically zero-extending a narrower
+ // value.
+ ExtType = ISD::ZEXTLOAD;
+ N0 = SDValue(N, 0);
+ ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ if (!N01) return SDValue();
+ ExtVT = EVT::getIntegerVT(*DAG.getContext(),
+ VT.getSizeInBits() - N01->getZExtValue());
}
unsigned EVTBits = ExtVT.getSizeInBits();
@@ -3624,7 +4084,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
if (isa<LoadSDNode>(N0) && N0.hasOneUse() && ExtVT.isRound() &&
- cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() > EVTBits &&
+ cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits() >= EVTBits &&
// Do not change the width of a volatile load.
!cast<LoadSDNode>(N0)->isVolatile()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
@@ -3649,7 +4109,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
? DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
LN0->isVolatile(), LN0->isNonTemporal(), NewAlign)
- : DAG.getExtLoad(ExtType, N0.getDebugLoc(), VT, LN0->getChain(), NewPtr,
+ : DAG.getExtLoad(ExtType, VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
NewAlign);
@@ -3694,7 +4154,8 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
// if x is small enough.
if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
SDValue N00 = N0.getOperand(0);
- if (N00.getValueType().getScalarType().getSizeInBits() < EVTBits)
+ if (N00.getValueType().getScalarType().getSizeInBits() <= EVTBits &&
+ (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
return DAG.getNode(ISD::SIGN_EXTEND, N->getDebugLoc(), VT, N00, N1);
}
@@ -3735,7 +4196,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), EVT,
@@ -3752,7 +4213,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), EVT,
@@ -3779,7 +4240,8 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
if (N0.getOpcode() == ISD::TRUNCATE)
return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0));
// fold (truncate (ext x)) -> (ext x) or (truncate x) or x
- if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::SIGN_EXTEND||
+ if (N0.getOpcode() == ISD::ZERO_EXTEND ||
+ N0.getOpcode() == ISD::SIGN_EXTEND ||
N0.getOpcode() == ISD::ANY_EXTEND) {
if (N0.getOperand(0).getValueType().bitsLT(VT))
// if the source is smaller than the dest, we still need an extend
@@ -3805,7 +4267,18 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
// fold (truncate (load x)) -> (smaller load x)
// fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
- return ReduceLoadWidth(N);
+ if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
+ SDValue Reduced = ReduceLoadWidth(N);
+ if (Reduced.getNode())
+ return Reduced;
+ }
+
+ // Simplify the operands using demanded-bits information.
+ if (!VT.isVector() &&
+ SimplifyDemandedBits(SDValue(N, 0)))
+ return SDValue(N, 0);
+
+ return SDValue();
}
static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
@@ -3949,7 +4422,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
VT.isInteger() && !VT.isVector()) {
unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
- if (TLI.isTypeLegal(IntXVT) || !LegalTypes) {
+ if (isTypeLegal(IntXVT)) {
SDValue X = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
IntXVT, N0.getOperand(1));
AddToWorkList(X.getNode());
@@ -4016,6 +4489,16 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
// If this is a conversion of N elements of one type to N elements of another
// type, convert each element. This handles FP<->INT cases.
if (SrcBitSize == DstBitSize) {
+ EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
+ BV->getValueType(0).getVectorNumElements());
+
+ // Due to the FP element handling below calling this routine recursively,
+ // we can end up with a scalar-to-vector node here.
+ if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR)
+ return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
+ DAG.getNode(ISD::BIT_CONVERT, BV->getDebugLoc(),
+ DstEltVT, BV->getOperand(0)));
+
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
SDValue Op = BV->getOperand(i);
@@ -4027,8 +4510,6 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
DstEltVT, Op));
AddToWorkList(Ops.back().getNode());
}
- EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT,
- BV->getValueType(0).getVectorNumElements());
return DAG.getNode(ISD::BUILD_VECTOR, BV->getDebugLoc(), VT,
&Ops[0], Ops.size());
}
@@ -4075,8 +4556,8 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
if (Op.getOpcode() == ISD::UNDEF) continue;
EltIsUndef = false;
- NewBits |= (APInt(cast<ConstantSDNode>(Op)->getAPIntValue()).
- zextOrTrunc(SrcBitSize).zext(DstBitSize));
+ NewBits |= APInt(cast<ConstantSDNode>(Op)->getAPIntValue()).
+ zextOrTrunc(SrcBitSize).zext(DstBitSize);
}
if (EltIsUndef)
@@ -4464,7 +4945,7 @@ SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) {
ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
// fold (fp_round_inreg c1fp) -> c1fp
- if (N0CFP && (TLI.isTypeLegal(EVT) || !LegalTypes)) {
+ if (N0CFP && isTypeLegal(EVT)) {
SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), EVT);
return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, Round);
}
@@ -4503,7 +4984,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(),
@@ -4676,7 +5157,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
if (Op0.getOpcode() == Op1.getOpcode()) {
// Avoid missing important xor optimizations.
SDValue Tmp = visitXOR(TheXor);
- if (Tmp.getNode()) {
+ if (Tmp.getNode() && Tmp.getNode() != TheXor) {
DEBUG(dbgs() << "\nReplacing.8 ";
TheXor->dump(&DAG);
dbgs() << "\nWith: ";
@@ -4925,10 +5406,6 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {
SDValue Offset;
ISD::MemIndexedMode AM = ISD::UNINDEXED;
if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
- if (Ptr == Offset && Op->getOpcode() == ISD::ADD)
- std::swap(BasePtr, Offset);
- if (Ptr != BasePtr)
- continue;
// Don't create a indexed load / store with zero offset.
if (isa<ConstantSDNode>(Offset) &&
cast<ConstantSDNode>(Offset)->isNullValue())
@@ -5022,18 +5499,6 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
SDValue Chain = LD->getChain();
SDValue Ptr = LD->getBasePtr();
- // Try to infer better alignment information than the load already has.
- if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
- if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
- if (Align > LD->getAlignment())
- return DAG.getExtLoad(LD->getExtensionType(), N->getDebugLoc(),
- LD->getValueType(0),
- Chain, Ptr, LD->getSrcValue(),
- LD->getSrcValueOffset(), LD->getMemoryVT(),
- LD->isVolatile(), LD->isNonTemporal(), Align);
- }
- }
-
// If load is not volatile and there are no uses of the loaded value (and
// the updated indexed value in case of indexed loads), change uses of the
// chain value into uses of the chain input (i.e. delete the dead load).
@@ -5099,6 +5564,18 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
}
}
+ // Try to infer better alignment information than the load already has.
+ if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
+ if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
+ if (Align > LD->getAlignment())
+ return DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
+ N->getDebugLoc(),
+ Chain, Ptr, LD->getSrcValue(),
+ LD->getSrcValueOffset(), LD->getMemoryVT(),
+ LD->isVolatile(), LD->isNonTemporal(), Align);
+ }
+ }
+
if (CombinerAA) {
// Walk up chain skipping non-aliasing memory nodes.
SDValue BetterChain = FindBetterChain(N, Chain);
@@ -5115,8 +5592,8 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
LD->isVolatile(), LD->isNonTemporal(),
LD->getAlignment());
} else {
- ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(),
- LD->getValueType(0),
+ ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
+ LD->getDebugLoc(),
BetterChain, Ptr, LD->getSrcValue(),
LD->getSrcValueOffset(),
LD->getMemoryVT(),
@@ -5145,6 +5622,136 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
return SDValue();
}
+/// CheckForMaskedLoad - Check to see if V is (and load (ptr), imm), where the
+/// load is having specific bytes cleared out. If so, return the byte size
+/// being masked out and the shift amount.
+static std::pair<unsigned, unsigned>
+CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
+ std::pair<unsigned, unsigned> Result(0, 0);
+
+ // Check for the structure we're looking for.
+ if (V->getOpcode() != ISD::AND ||
+ !isa<ConstantSDNode>(V->getOperand(1)) ||
+ !ISD::isNormalLoad(V->getOperand(0).getNode()))
+ return Result;
+
+ // Check the chain and pointer.
+ LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
+ if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer.
+
+ // The store should be chained directly to the load or be an operand of a
+ // tokenfactor.
+ if (LD == Chain.getNode())
+ ; // ok.
+ else if (Chain->getOpcode() != ISD::TokenFactor)
+ return Result; // Fail.
+ else {
+ bool isOk = false;
+ for (unsigned i = 0, e = Chain->getNumOperands(); i != e; ++i)
+ if (Chain->getOperand(i).getNode() == LD) {
+ isOk = true;
+ break;
+ }
+ if (!isOk) return Result;
+ }
+
+ // This only handles simple types.
+ if (V.getValueType() != MVT::i16 &&
+ V.getValueType() != MVT::i32 &&
+ V.getValueType() != MVT::i64)
+ return Result;
+
+ // Check the constant mask. Invert it so that the bits being masked out are
+ // 0 and the bits being kept are 1. Use getSExtValue so that leading bits
+ // follow the sign bit for uniformity.
+ uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue();
+ unsigned NotMaskLZ = CountLeadingZeros_64(NotMask);
+ if (NotMaskLZ & 7) return Result; // Must be multiple of a byte.
+ unsigned NotMaskTZ = CountTrailingZeros_64(NotMask);
+ if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
+ if (NotMaskLZ == 64) return Result; // All zero mask.
+
+ // See if we have a continuous run of bits. If so, we have 0*1+0*
+ if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64)
+ return Result;
+
+ // Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
+ if (V.getValueType() != MVT::i64 && NotMaskLZ)
+ NotMaskLZ -= 64-V.getValueSizeInBits();
+
+ unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
+ switch (MaskedBytes) {
+ case 1:
+ case 2:
+ case 4: break;
+ default: return Result; // All one mask, or 5-byte mask.
+ }
+
+ // Verify that the first bit starts at a multiple of mask so that the access
+ // is aligned the same as the access width.
+ if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
+
+ Result.first = MaskedBytes;
+ Result.second = NotMaskTZ/8;
+ return Result;
+}
+
+
+/// ShrinkLoadReplaceStoreWithStore - Check to see if IVal is something that
+/// provides a value as specified by MaskInfo. If so, replace the specified
+/// store with a narrower store of truncated IVal.
+static SDNode *
+ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
+ SDValue IVal, StoreSDNode *St,
+ DAGCombiner *DC) {
+ unsigned NumBytes = MaskInfo.first;
+ unsigned ByteShift = MaskInfo.second;
+ SelectionDAG &DAG = DC->getDAG();
+
+ // Check to see if IVal is all zeros in the part being masked in by the 'or'
+ // that uses this. If not, this is not a replacement.
+ APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
+ ByteShift*8, (ByteShift+NumBytes)*8);
+ if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0;
+
+ // Check that it is legal on the target to do this. It is legal if the new
+ // VT we're shrinking to (i8/i16/i32) is legal or we're still before type
+ // legalization.
+ MVT VT = MVT::getIntegerVT(NumBytes*8);
+ if (!DC->isTypeLegal(VT))
+ return 0;
+
+ // Okay, we can do this! Replace the 'St' store with a store of IVal that is
+ // shifted by ByteShift and truncated down to NumBytes.
+ if (ByteShift)
+ IVal = DAG.getNode(ISD::SRL, IVal->getDebugLoc(), IVal.getValueType(), IVal,
+ DAG.getConstant(ByteShift*8, DC->getShiftAmountTy()));
+
+ // Figure out the offset for the store and the alignment of the access.
+ unsigned StOffset;
+ unsigned NewAlign = St->getAlignment();
+
+ if (DAG.getTargetLoweringInfo().isLittleEndian())
+ StOffset = ByteShift;
+ else
+ StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
+
+ SDValue Ptr = St->getBasePtr();
+ if (StOffset) {
+ Ptr = DAG.getNode(ISD::ADD, IVal->getDebugLoc(), Ptr.getValueType(),
+ Ptr, DAG.getConstant(StOffset, Ptr.getValueType()));
+ NewAlign = MinAlign(NewAlign, StOffset);
+ }
+
+ // Truncate down to the new size.
+ IVal = DAG.getNode(ISD::TRUNCATE, IVal->getDebugLoc(), VT, IVal);
+
+ ++OpsNarrowed;
+ return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr,
+ St->getSrcValue(), St->getSrcValueOffset()+StOffset,
+ false, false, NewAlign).getNode();
+}
+
/// ReduceLoadOpStoreWidth - Look for sequence of load / op / store where op is
/// one of 'or', 'xor', and 'and' of immediates. If 'op' is only touching some
@@ -5164,12 +5771,35 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
return SDValue();
unsigned Opc = Value.getOpcode();
+
+ // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
+ // is a byte mask indicating a consecutive number of bytes, check to see if
+ // Y is known to provide just those bytes. If so, we try to replace the
+ // load + replace + store sequence with a single (narrower) store, which makes
+ // the load dead.
+ if (Opc == ISD::OR) {
+ std::pair<unsigned, unsigned> MaskedLoad;
+ MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
+ if (MaskedLoad.first)
+ if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
+ Value.getOperand(1), ST,this))
+ return SDValue(NewST, 0);
+
+ // Or is commutative, so try swapping X and Y.
+ MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
+ if (MaskedLoad.first)
+ if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
+ Value.getOperand(0), ST,this))
+ return SDValue(NewST, 0);
+ }
+
if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
Value.getOperand(1).getOpcode() != ISD::Constant)
return SDValue();
SDValue N0 = Value.getOperand(0);
- if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) {
+ if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() &&
+ Chain == SDValue(N0.getNode(), 1)) {
LoadSDNode *LD = cast<LoadSDNode>(N0);
if (LD->getBasePtr() != Ptr)
return SDValue();
@@ -5211,8 +5841,8 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
- if (NewAlign <
- TLI.getTargetData()->getABITypeAlignment(NewVT.getTypeForEVT(*DAG.getContext())))
+ const Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
+ if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy))
return SDValue();
SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(),
@@ -5250,17 +5880,6 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
SDValue Value = ST->getValue();
SDValue Ptr = ST->getBasePtr();
- // Try to infer better alignment information than the store already has.
- if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
- if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
- if (Align > ST->getAlignment())
- return DAG.getTruncStore(Chain, N->getDebugLoc(), Value,
- Ptr, ST->getSrcValue(),
- ST->getSrcValueOffset(), ST->getMemoryVT(),
- ST->isVolatile(), ST->isNonTemporal(), Align);
- }
- }
-
// If this is a store of a bit convert, store the input value if the
// resultant store does not need a higher alignment than the original.
if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
@@ -5293,8 +5912,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
case MVT::ppcf128:
break;
case MVT::f32:
- if (((TLI.isTypeLegal(MVT::i32) || !LegalTypes) && !LegalOperations &&
- !ST->isVolatile()) ||
+ if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
bitcastToAPInt().getZExtValue(), MVT::i32);
@@ -5305,7 +5923,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
}
break;
case MVT::f64:
- if (((TLI.isTypeLegal(MVT::i64) || !LegalTypes) && !LegalOperations &&
+ if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations &&
!ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
@@ -5351,6 +5969,17 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
}
}
+ // Try to infer better alignment information than the store already has.
+ if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) {
+ if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
+ if (Align > ST->getAlignment())
+ return DAG.getTruncStore(Chain, N->getDebugLoc(), Value,
+ Ptr, ST->getSrcValue(),
+ ST->getSrcValueOffset(), ST->getMemoryVT(),
+ ST->isVolatile(), ST->isNonTemporal(), Align);
+ }
+ }
+
if (CombinerAA) {
// Walk up chain skipping non-aliasing memory nodes.
SDValue BetterChain = FindBetterChain(N, Chain);
@@ -5411,7 +6040,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
if (SimplifyDemandedBits(Value,
APInt::getLowBitsSet(
Value.getValueType().getScalarType().getSizeInBits(),
- ST->getMemoryVT().getSizeInBits())))
+ ST->getMemoryVT().getScalarType().getSizeInBits())))
return SDValue(N, 0);
}
@@ -5449,6 +6078,10 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
SDValue InVal = N->getOperand(1);
SDValue EltNo = N->getOperand(2);
+ // If the inserted element is an UNDEF, just use the input vector.
+ if (InVal.getOpcode() == ISD::UNDEF)
+ return InVec;
+
// If the invec is a BUILD_VECTOR and if EltNo is a constant, build a new
// vector with the inserted element.
if (InVec.getOpcode() == ISD::BUILD_VECTOR && isa<ConstantSDNode>(EltNo)) {
@@ -5486,7 +6119,6 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// Check if the result type doesn't match the inserted element type. A
// SCALAR_TO_VECTOR may truncate the inserted element and the
// EXTRACT_VECTOR_ELT may widen the extracted vector.
- EVT EltVT = InVec.getValueType().getVectorElementType();
SDValue InOp = InVec.getOperand(0);
EVT NVT = N->getValueType(0);
if (InOp.getValueType() != NVT) {
@@ -5551,7 +6183,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
InVec = InVec.getOperand(0);
if (ISD::isNormalLoad(InVec.getNode())) {
LN0 = cast<LoadSDNode>(InVec);
- Elt = (Idx < (int)NumElems) ? Idx : Idx - NumElems;
+ Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems;
}
}
@@ -5659,7 +6291,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
}
// Add count and size info.
- if (!TLI.isTypeLegal(VT) && LegalTypes)
+ if (!isTypeLegal(VT))
return SDValue();
// Return the new VECTOR_SHUFFLE node.
@@ -5686,8 +6318,6 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
}
SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
- return SDValue();
-
EVT VT = N->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
@@ -5702,7 +6332,6 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
// all scalar elements the same.
if (cast<ShuffleVectorSDNode>(N)->isSplat()) {
SDNode *V = N0.getNode();
-
// If this is a bit convert that changes the element type of the vector but
// not the number of vector elements, look through it. Be careful not to
@@ -5744,6 +6373,59 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitMEMBARRIER(SDNode* N) {
+ if (!TLI.getShouldFoldAtomicFences())
+ return SDValue();
+
+ SDValue atomic = N->getOperand(0);
+ switch (atomic.getOpcode()) {
+ case ISD::ATOMIC_CMP_SWAP:
+ case ISD::ATOMIC_SWAP:
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_AND:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_UMIN:
+ case ISD::ATOMIC_LOAD_UMAX:
+ break;
+ default:
+ return SDValue();
+ }
+
+ SDValue fence = atomic.getOperand(0);
+ if (fence.getOpcode() != ISD::MEMBARRIER)
+ return SDValue();
+
+ switch (atomic.getOpcode()) {
+ case ISD::ATOMIC_CMP_SWAP:
+ return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
+ fence.getOperand(0),
+ atomic.getOperand(1), atomic.getOperand(2),
+ atomic.getOperand(3)), atomic.getResNo());
+ case ISD::ATOMIC_SWAP:
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_AND:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_UMIN:
+ case ISD::ATOMIC_LOAD_UMAX:
+ return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
+ fence.getOperand(0),
+ atomic.getOperand(1), atomic.getOperand(2)),
+ atomic.getResNo());
+ default:
+ return SDValue();
+ }
+}
+
/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform
/// an AND to a vector_shuffle with the destination vector and a zero vector.
/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
@@ -5834,13 +6516,21 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) {
break;
}
- Ops.push_back(DAG.getNode(N->getOpcode(), LHS.getDebugLoc(),
- EltType, LHSOp, RHSOp));
- AddToWorkList(Ops.back().getNode());
- assert((Ops.back().getOpcode() == ISD::UNDEF ||
- Ops.back().getOpcode() == ISD::Constant ||
- Ops.back().getOpcode() == ISD::ConstantFP) &&
- "Scalar binop didn't fold!");
+ // If the vector element type is not legal, the BUILD_VECTOR operands
+ // are promoted and implicitly truncated. Make that explicit here.
+ if (LHSOp.getValueType() != EltType)
+ LHSOp = DAG.getNode(ISD::TRUNCATE, LHS.getDebugLoc(), EltType, LHSOp);
+ if (RHSOp.getValueType() != EltType)
+ RHSOp = DAG.getNode(ISD::TRUNCATE, RHS.getDebugLoc(), EltType, RHSOp);
+
+ SDValue FoldOp = DAG.getNode(N->getOpcode(), LHS.getDebugLoc(), EltType,
+ LHSOp, RHSOp);
+ if (FoldOp.getOpcode() != ISD::UNDEF &&
+ FoldOp.getOpcode() != ISD::Constant &&
+ FoldOp.getOpcode() != ISD::ConstantFP)
+ break;
+ Ops.push_back(FoldOp);
+ AddToWorkList(FoldOp.getNode());
}
if (Ops.size() == LHS.getNumOperands()) {
@@ -5967,8 +6657,8 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
LLD->getAlignment());
} else {
Load = DAG.getExtLoad(LLD->getExtensionType(),
- TheSelect->getDebugLoc(),
TheSelect->getValueType(0),
+ TheSelect->getDebugLoc(),
LLD->getChain(), Addr, 0, 0,
LLD->getMemoryVT(),
LLD->isVolatile(),
@@ -6209,38 +6899,34 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
}
}
- // Check to see if this is an integer abs. select_cc setl[te] X, 0, -X, X ->
+ // Check to see if this is an integer abs.
+ // select_cc setg[te] X, 0, X, -X ->
+ // select_cc setgt X, -1, X, -X ->
+ // select_cc setl[te] X, 0, -X, X ->
+ // select_cc setlt X, 1, -X, X ->
// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
- if (N1C && N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE) &&
- N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) &&
- N2.getOperand(0) == N1 && N0.getValueType().isInteger()) {
+ if (N1C) {
+ ConstantSDNode *SubC = NULL;
+ if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
+ (N1C->isAllOnesValue() && CC == ISD::SETGT)) &&
+ N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1))
+ SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0));
+ else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) ||
+ (N1C->isOne() && CC == ISD::SETLT)) &&
+ N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1))
+ SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0));
+
EVT XType = N0.getValueType();
- SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType, N0,
- DAG.getConstant(XType.getSizeInBits()-1,
- getShiftAmountTy()));
- SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(), XType,
- N0, Shift);
- AddToWorkList(Shift.getNode());
- AddToWorkList(Add.getNode());
- return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
- }
- // Check to see if this is an integer abs. select_cc setgt X, -1, X, -X ->
- // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
- if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT &&
- N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) {
- if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) {
- EVT XType = N0.getValueType();
- if (SubC->isNullValue() && XType.isInteger()) {
- SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
- N0,
- DAG.getConstant(XType.getSizeInBits()-1,
- getShiftAmountTy()));
- SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
- XType, N0, Shift);
- AddToWorkList(Shift.getNode());
- AddToWorkList(Add.getNode());
- return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
- }
+ if (SubC && SubC->isNullValue() && XType.isInteger()) {
+ SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
+ N0,
+ DAG.getConstant(XType.getSizeInBits()-1,
+ getShiftAmountTy()));
+ SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
+ XType, N0, Shift);
+ AddToWorkList(Shift.getNode());
+ AddToWorkList(Add.getNode());
+ return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
}
}
@@ -6287,7 +6973,7 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) {
/// FindBaseOffset - Return true if base is a frame index, which is known not
// to alias with anything but itself. Provides base object and offset as results.
static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
- GlobalValue *&GV, void *&CV) {
+ const GlobalValue *&GV, void *&CV) {
// Assume it is a primitive operation.
Base = Ptr; Offset = 0; GV = 0; CV = 0;
@@ -6335,7 +7021,7 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
// Gather base node and offset information.
SDValue Base1, Base2;
int64_t Offset1, Offset2;
- GlobalValue *GV1, *GV2;
+ const GlobalValue *GV1, *GV2;
void *CV1, *CV2;
bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1);
bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2);
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 1d76c7c..a4eed71 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1,4 +1,4 @@
-///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
+//===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -44,20 +44,60 @@
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/DwarfWriter.h"
#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
-#include "SelectionDAGBuilder.h"
-#include "FunctionLoweringInfo.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
-unsigned FastISel::getRegForValue(Value *V) {
+/// startNewBlock - Set the current block to which generated machine
+/// instructions will be appended, and clear the local CSE map.
+///
+void FastISel::startNewBlock() {
+ LocalValueMap.clear();
+
+ // Start out as null, meaining no local-value instructions have
+ // been emitted.
+ LastLocalValue = 0;
+
+ // Advance the last local value past any EH_LABEL instructions.
+ MachineBasicBlock::iterator
+ I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
+ while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
+ LastLocalValue = I;
+ ++I;
+ }
+}
+
+bool FastISel::hasTrivialKill(const Value *V) const {
+ // Don't consider constants or arguments to have trivial kills.
+ const Instruction *I = dyn_cast<Instruction>(V);
+ if (!I)
+ return false;
+
+ // No-op casts are trivially coalesced by fast-isel.
+ if (const CastInst *Cast = dyn_cast<CastInst>(I))
+ if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
+ !hasTrivialKill(Cast->getOperand(0)))
+ return false;
+
+ // Only instructions with a single use in the same basic block are considered
+ // to have trivial kills.
+ return I->hasOneUse() &&
+ !(I->getOpcode() == Instruction::BitCast ||
+ I->getOpcode() == Instruction::PtrToInt ||
+ I->getOpcode() == Instruction::IntToPtr) &&
+ cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
+}
+
+unsigned FastISel::getRegForValue(const Value *V) {
EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
// Don't handle non-simple values in FastISel.
if (!RealVT.isSimple())
@@ -79,13 +119,40 @@ unsigned FastISel::getRegForValue(Value *V) {
// cache values defined by Instructions across blocks, and other values
// only locally. This is because Instructions already have the SSA
// def-dominates-use requirement enforced.
- if (ValueMap.count(V))
- return ValueMap[V];
+ DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
+ if (I != FuncInfo.ValueMap.end()) {
+ unsigned Reg = I->second;
+ return Reg;
+ }
unsigned Reg = LocalValueMap[V];
if (Reg != 0)
return Reg;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ // In bottom-up mode, just create the virtual register which will be used
+ // to hold the value. It will be materialized later.
+ if (isa<Instruction>(V) &&
+ (!isa<AllocaInst>(V) ||
+ !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
+ return FuncInfo.InitializeRegForValue(V);
+
+ SavePoint SaveInsertPt = enterLocalValueArea();
+
+ // Materialize the value in a register. Emit any instructions in the
+ // local value area.
+ Reg = materializeRegForValue(V, VT);
+
+ leaveLocalValueArea(SaveInsertPt);
+
+ return Reg;
+}
+
+/// materializeRegForValue - Helper for getRegForValue. This function is
+/// called when the value isn't already available in a register and must
+/// be materialized with new instructions.
+unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
+ unsigned Reg = 0;
+
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getValue().getActiveBits() <= 64)
Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
} else if (isa<AllocaInst>(V)) {
@@ -95,10 +162,12 @@ unsigned FastISel::getRegForValue(Value *V) {
// local-CSE'd with actual integer zeros.
Reg =
getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
- } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
+ } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
+ // Try to emit the constant directly.
Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
if (!Reg) {
+ // Try to emit the constant by using an integer constant with a cast.
const APFloat &Flt = CF->getValueAPF();
EVT IntVT = TLI.getPointerTy();
@@ -113,15 +182,20 @@ unsigned FastISel::getRegForValue(Value *V) {
unsigned IntegerReg =
getRegForValue(ConstantInt::get(V->getContext(), IntVal));
if (IntegerReg != 0)
- Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
+ Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
+ IntegerReg, /*Kill=*/false);
}
}
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
- if (!SelectOperator(CE, CE->getOpcode())) return 0;
- Reg = LocalValueMap[CE];
+ } else if (const Operator *Op = dyn_cast<Operator>(V)) {
+ if (!SelectOperator(Op, Op->getOpcode()))
+ if (!isa<Instruction>(Op) ||
+ !TargetSelectInstruction(cast<Instruction>(Op)))
+ return 0;
+ Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
// If target-independent code couldn't handle the value, give target-specific
@@ -131,18 +205,21 @@ unsigned FastISel::getRegForValue(Value *V) {
// Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate.
- if (Reg != 0)
+ if (Reg != 0) {
LocalValueMap[V] = Reg;
+ LastLocalValue = MRI.getVRegDef(Reg);
+ }
return Reg;
}
-unsigned FastISel::lookUpRegForValue(Value *V) {
+unsigned FastISel::lookUpRegForValue(const Value *V) {
// Look up the value to see if we already have a register for it. We
// cache values defined by Instructions across blocks, and other values
// only locally. This is because Instructions already have the SSA
- // def-dominatess-use requirement enforced.
- if (ValueMap.count(V))
- return ValueMap[V];
+ // def-dominates-use requirement enforced.
+ DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
+ if (I != FuncInfo.ValueMap.end())
+ return I->second;
return LocalValueMap[V];
}
@@ -152,43 +229,86 @@ unsigned FastISel::lookUpRegForValue(Value *V) {
/// NOTE: This is only necessary because we might select a block that uses
/// a value before we select the block that defines the value. It might be
/// possible to fix this by selecting blocks in reverse postorder.
-unsigned FastISel::UpdateValueMap(Value* I, unsigned Reg) {
+unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
if (!isa<Instruction>(I)) {
LocalValueMap[I] = Reg;
return Reg;
}
- unsigned &AssignedReg = ValueMap[I];
+ unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0)
+ // Use the new register.
AssignedReg = Reg;
else if (Reg != AssignedReg) {
- const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
- TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
- Reg, RegClass, RegClass);
+ // Arrange for uses of AssignedReg to be replaced by uses of Reg.
+ FuncInfo.RegFixups[AssignedReg] = Reg;
+
+ AssignedReg = Reg;
}
+
return AssignedReg;
}
-unsigned FastISel::getRegForGEPIndex(Value *Idx) {
+std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
unsigned IdxN = getRegForValue(Idx);
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
- return 0;
+ return std::pair<unsigned, bool>(0, false);
+
+ bool IdxNIsKill = hasTrivialKill(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend it.
MVT PtrVT = TLI.getPointerTy();
EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
- if (IdxVT.bitsLT(PtrVT))
- IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
- else if (IdxVT.bitsGT(PtrVT))
- IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
- return IdxN;
+ if (IdxVT.bitsLT(PtrVT)) {
+ IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
+ IdxN, IdxNIsKill);
+ IdxNIsKill = true;
+ }
+ else if (IdxVT.bitsGT(PtrVT)) {
+ IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
+ IdxN, IdxNIsKill);
+ IdxNIsKill = true;
+ }
+ return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
+}
+
+void FastISel::recomputeInsertPt() {
+ if (getLastLocalValue()) {
+ FuncInfo.InsertPt = getLastLocalValue();
+ FuncInfo.MBB = FuncInfo.InsertPt->getParent();
+ ++FuncInfo.InsertPt;
+ } else
+ FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
+
+ // Now skip past any EH_LABELs, which must remain at the beginning.
+ while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
+ FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
+ ++FuncInfo.InsertPt;
+}
+
+FastISel::SavePoint FastISel::enterLocalValueArea() {
+ MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
+ DebugLoc OldDL = DL;
+ recomputeInsertPt();
+ DL = DebugLoc();
+ SavePoint SP = { OldInsertPt, OldDL };
+ return SP;
+}
+
+void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
+ if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
+ LastLocalValue = llvm::prior(FuncInfo.InsertPt);
+
+ // Restore the previous insert position.
+ FuncInfo.InsertPt = OldInsertPt.InsertPt;
+ DL = OldInsertPt.DL;
}
/// SelectBinaryOp - Select and emit code for a binary operator instruction,
/// which has an opcode which directly corresponds to the given ISD opcode.
///
-bool FastISel::SelectBinaryOp(User *I, unsigned ISDOpcode) {
+bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
if (VT == MVT::Other || !VT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
@@ -214,10 +334,13 @@ bool FastISel::SelectBinaryOp(User *I, unsigned ISDOpcode) {
// Unhandled operand. Halt "fast" selection and bail.
return false;
+ bool Op0IsKill = hasTrivialKill(I->getOperand(0));
+
// Check if the second operand is a constant and handle it appropriately.
if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
- ISDOpcode, Op0, CI->getZExtValue());
+ ISDOpcode, Op0, Op0IsKill,
+ CI->getZExtValue());
if (ResultReg != 0) {
// We successfully emitted code for the given LLVM Instruction.
UpdateValueMap(I, ResultReg);
@@ -228,7 +351,7 @@ bool FastISel::SelectBinaryOp(User *I, unsigned ISDOpcode) {
// Check if the second operand is a constant float.
if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
- ISDOpcode, Op0, CF);
+ ISDOpcode, Op0, Op0IsKill, CF);
if (ResultReg != 0) {
// We successfully emitted code for the given LLVM Instruction.
UpdateValueMap(I, ResultReg);
@@ -241,9 +364,13 @@ bool FastISel::SelectBinaryOp(User *I, unsigned ISDOpcode) {
// Unhandled operand. Halt "fast" selection and bail.
return false;
+ bool Op1IsKill = hasTrivialKill(I->getOperand(1));
+
// Now we have both operands in registers. Emit the instruction.
unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
- ISDOpcode, Op0, Op1);
+ ISDOpcode,
+ Op0, Op0IsKill,
+ Op1, Op1IsKill);
if (ResultReg == 0)
// Target-specific code wasn't able to find a machine opcode for
// the given ISD opcode and type. Halt "fast" selection and bail.
@@ -254,17 +381,19 @@ bool FastISel::SelectBinaryOp(User *I, unsigned ISDOpcode) {
return true;
}
-bool FastISel::SelectGetElementPtr(User *I) {
+bool FastISel::SelectGetElementPtr(const User *I) {
unsigned N = getRegForValue(I->getOperand(0));
if (N == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
+ bool NIsKill = hasTrivialKill(I->getOperand(0));
+
const Type *Ty = I->getOperand(0)->getType();
MVT VT = TLI.getPointerTy();
- for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
- OI != E; ++OI) {
- Value *Idx = *OI;
+ for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
+ E = I->op_end(); OI != E; ++OI) {
+ const Value *Idx = *OI;
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
@@ -272,41 +401,46 @@ bool FastISel::SelectGetElementPtr(User *I) {
uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
// FIXME: This can be optimized by combining the add with a
// subsequent one.
- N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
+ N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
if (N == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
+ NIsKill = true;
}
Ty = StTy->getElementType(Field);
} else {
Ty = cast<SequentialType>(Ty)->getElementType();
// If this is a constant subscript, handle it quickly.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
- if (CI->getZExtValue() == 0) continue;
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
+ if (CI->isZero()) continue;
uint64_t Offs =
TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
- N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
+ N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
if (N == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
+ NIsKill = true;
continue;
}
// N = N + Idx * ElementSize;
uint64_t ElementSize = TD.getTypeAllocSize(Ty);
- unsigned IdxN = getRegForGEPIndex(Idx);
+ std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
+ unsigned IdxN = Pair.first;
+ bool IdxNIsKill = Pair.second;
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
if (ElementSize != 1) {
- IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
+ IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
+ IdxNIsKill = true;
}
- N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
+ N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
if (N == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
@@ -318,63 +452,63 @@ bool FastISel::SelectGetElementPtr(User *I) {
return true;
}
-bool FastISel::SelectCall(User *I) {
- Function *F = cast<CallInst>(I)->getCalledFunction();
+bool FastISel::SelectCall(const User *I) {
+ const Function *F = cast<CallInst>(I)->getCalledFunction();
if (!F) return false;
+ // Handle selected intrinsic function calls.
unsigned IID = F->getIntrinsicID();
switch (IID) {
default: break;
case Intrinsic::dbg_declare: {
- DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
- if (!DIDescriptor::ValidDebugInfo(DI->getVariable(), CodeGenOpt::None)||!DW
- || !DW->ShouldEmitDwarfDebug())
+ const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
+ if (!DIVariable(DI->getVariable()).Verify() ||
+ !FuncInfo.MF->getMMI().hasDebugInfo())
return true;
- Value *Address = DI->getAddress();
+ const Value *Address = DI->getAddress();
if (!Address)
return true;
- AllocaInst *AI = dyn_cast<AllocaInst>(Address);
+ if (isa<UndefValue>(Address))
+ return true;
+ const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
// Don't handle byval struct arguments or VLAs, for example.
- if (!AI) break;
- DenseMap<const AllocaInst*, int>::iterator SI =
- StaticAllocaMap.find(AI);
- if (SI == StaticAllocaMap.end()) break; // VLAs.
- int FI = SI->second;
- if (MMI) {
- if (MDNode *Dbg = DI->getMetadata("dbg"))
- MMI->setVariableDbgInfo(DI->getVariable(), FI, Dbg);
- }
- // Building the map above is target independent. Generating DBG_VALUE
- // inline is target dependent; do this now.
- (void)TargetSelectInstruction(cast<Instruction>(I));
+ if (!AI)
+ // Building the map above is target independent. Generating DBG_VALUE
+ // inline is target dependent; do this now.
+ (void)TargetSelectInstruction(cast<Instruction>(I));
return true;
}
case Intrinsic::dbg_value: {
- // This requires target support, but right now X86 is the only Fast target.
- DbgValueInst *DI = cast<DbgValueInst>(I);
+ // This form of DBG_VALUE is target-independent.
+ const DbgValueInst *DI = cast<DbgValueInst>(I);
const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
- Value *V = DI->getValue();
+ const Value *V = DI->getValue();
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
- BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
- } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
- } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
- BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(0U).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
+ } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addImm(CI->getZExtValue()).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
+ } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addFPImm(CF).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) {
- BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
// Insert an undef so we can see what we dropped.
- BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(0U).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
}
return true;
}
@@ -383,14 +517,13 @@ bool FastISel::SelectCall(User *I) {
switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
default: break;
case TargetLowering::Expand: {
- assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
+ assert(FuncInfo.MBB->isLandingPad() &&
+ "Call to eh.exception not in landing pad!");
unsigned Reg = TLI.getExceptionAddressRegister();
const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- Reg, RC, RC);
- assert(InsertedCopy && "Can't copy address registers!");
- InsertedCopy = InsertedCopy;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(Reg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -402,54 +535,51 @@ bool FastISel::SelectCall(User *I) {
switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
default: break;
case TargetLowering::Expand: {
- if (MMI) {
- if (MBB->isLandingPad())
- AddCatchInfo(*cast<CallInst>(I), MMI, MBB);
- else {
+ if (FuncInfo.MBB->isLandingPad())
+ AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
+ else {
#ifndef NDEBUG
- CatchInfoLost.insert(cast<CallInst>(I));
+ FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
#endif
- // FIXME: Mark exception selector register as live in. Hack for PR1508.
- unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) MBB->addLiveIn(Reg);
- }
-
+ // FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister();
- EVT SrcVT = TLI.getPointerTy();
- const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
- unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
- RC, RC);
- assert(InsertedCopy && "Can't copy address registers!");
- InsertedCopy = InsertedCopy;
-
- // Cast the register to the type of the selector.
- if (SrcVT.bitsGT(MVT::i32))
- ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
- ResultReg);
- else if (SrcVT.bitsLT(MVT::i32))
- ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
- ISD::SIGN_EXTEND, ResultReg);
- if (ResultReg == 0)
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
-
- UpdateValueMap(I, ResultReg);
- } else {
- unsigned ResultReg =
- getRegForValue(Constant::getNullValue(I->getType()));
- UpdateValueMap(I, ResultReg);
+ if (Reg) FuncInfo.MBB->addLiveIn(Reg);
}
+
+ unsigned Reg = TLI.getExceptionSelectorRegister();
+ EVT SrcVT = TLI.getPointerTy();
+ const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(Reg);
+
+ bool ResultRegIsKill = hasTrivialKill(I);
+
+ // Cast the register to the type of the selector.
+ if (SrcVT.bitsGT(MVT::i32))
+ ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
+ ResultReg, ResultRegIsKill);
+ else if (SrcVT.bitsLT(MVT::i32))
+ ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
+ ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
+ if (ResultReg == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+
+ UpdateValueMap(I, ResultReg);
+
return true;
}
}
break;
}
}
+
+ // An arbitrary call. Bail.
return false;
}
-bool FastISel::SelectCast(User *I, unsigned Opcode) {
+bool FastISel::SelectCast(const User *I, unsigned Opcode) {
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType());
@@ -479,12 +609,15 @@ bool FastISel::SelectCast(User *I, unsigned Opcode) {
// Unhandled operand. Halt "fast" selection and bail.
return false;
+ bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
+
// If the operand is i1, arrange for the high bits in the register to be zero.
if (SrcVT == MVT::i1) {
SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
- InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
+ InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
if (!InputReg)
return false;
+ InputRegIsKill = true;
}
// If the result is i1, truncate to the target's type for i1 first.
if (DstVT == MVT::i1)
@@ -493,7 +626,7 @@ bool FastISel::SelectCast(User *I, unsigned Opcode) {
unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
DstVT.getSimpleVT(),
Opcode,
- InputReg);
+ InputReg, InputRegIsKill);
if (!ResultReg)
return false;
@@ -501,7 +634,7 @@ bool FastISel::SelectCast(User *I, unsigned Opcode) {
return true;
}
-bool FastISel::SelectBitCast(User *I) {
+bool FastISel::SelectBitCast(const User *I) {
// If the bitcast doesn't change the type, just use the operand value.
if (I->getType() == I->getOperand(0)->getType()) {
unsigned Reg = getRegForValue(I->getOperand(0));
@@ -525,24 +658,26 @@ bool FastISel::SelectBitCast(User *I) {
if (Op0 == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
+
+ bool Op0IsKill = hasTrivialKill(I->getOperand(0));
// First, try to perform the bitcast by inserting a reg-reg copy.
unsigned ResultReg = 0;
if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
- ResultReg = createResultReg(DstClass);
-
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- Op0, DstClass, SrcClass);
- if (!InsertedCopy)
- ResultReg = 0;
+ // Don't attempt a cross-class copy. It will likely fail.
+ if (SrcClass == DstClass) {
+ ResultReg = createResultReg(DstClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(Op0);
+ }
}
// If the reg-reg copy failed, select a BIT_CONVERT opcode.
if (!ResultReg)
ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
- ISD::BIT_CONVERT, Op0);
+ ISD::BIT_CONVERT, Op0, Op0IsKill);
if (!ResultReg)
return false;
@@ -552,15 +687,28 @@ bool FastISel::SelectBitCast(User *I) {
}
bool
-FastISel::SelectInstruction(Instruction *I) {
+FastISel::SelectInstruction(const Instruction *I) {
+ // Just before the terminator instruction, insert instructions to
+ // feed PHI nodes in successor blocks.
+ if (isa<TerminatorInst>(I))
+ if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
+ return false;
+
+ DL = I->getDebugLoc();
+
// First, try doing target-independent selection.
- if (SelectOperator(I, I->getOpcode()))
+ if (SelectOperator(I, I->getOpcode())) {
+ DL = DebugLoc();
return true;
+ }
// Next, try calling the target to attempt to handle the instruction.
- if (TargetSelectInstruction(I))
+ if (TargetSelectInstruction(I)) {
+ DL = DebugLoc();
return true;
+ }
+ DL = DebugLoc();
return false;
}
@@ -568,27 +716,30 @@ FastISel::SelectInstruction(Instruction *I) {
/// unless it is the immediate (fall-through) successor, and update
/// the CFG.
void
-FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
- if (MBB->isLayoutSuccessor(MSucc)) {
+FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
+ if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
// The unconditional fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
- TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
+ TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
+ SmallVector<MachineOperand, 0>(), DL);
}
- MBB->addSuccessor(MSucc);
+ FuncInfo.MBB->addSuccessor(MSucc);
}
/// SelectFNeg - Emit an FNeg operation.
///
bool
-FastISel::SelectFNeg(User *I) {
+FastISel::SelectFNeg(const User *I) {
unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
if (OpReg == 0) return false;
+ bool OpRegIsKill = hasTrivialKill(I);
+
// If the target has ISD::FNEG, use it.
EVT VT = TLI.getValueType(I->getType());
unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
- ISD::FNEG, OpReg);
+ ISD::FNEG, OpReg, OpRegIsKill);
if (ResultReg != 0) {
UpdateValueMap(I, ResultReg);
return true;
@@ -602,18 +753,19 @@ FastISel::SelectFNeg(User *I) {
return false;
unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
- ISD::BIT_CONVERT, OpReg);
+ ISD::BIT_CONVERT, OpReg, OpRegIsKill);
if (IntReg == 0)
return false;
- unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR, IntReg,
+ unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
+ IntReg, /*Kill=*/true,
UINT64_C(1) << (VT.getSizeInBits()-1),
IntVT.getSimpleVT());
if (IntResultReg == 0)
return false;
ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
- ISD::BIT_CONVERT, IntResultReg);
+ ISD::BIT_CONVERT, IntResultReg, /*Kill=*/true);
if (ResultReg == 0)
return false;
@@ -622,7 +774,7 @@ FastISel::SelectFNeg(User *I) {
}
bool
-FastISel::SelectOperator(User *I, unsigned Opcode) {
+FastISel::SelectOperator(const User *I, unsigned Opcode) {
switch (Opcode) {
case Instruction::Add:
return SelectBinaryOp(I, ISD::ADD);
@@ -668,12 +820,12 @@ FastISel::SelectOperator(User *I, unsigned Opcode) {
return SelectGetElementPtr(I);
case Instruction::Br: {
- BranchInst *BI = cast<BranchInst>(I);
+ const BranchInst *BI = cast<BranchInst>(I);
if (BI->isUnconditional()) {
- BasicBlock *LLVMSucc = BI->getSuccessor(0);
- MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
- FastEmitBranch(MSucc);
+ const BasicBlock *LLVMSucc = BI->getSuccessor(0);
+ MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
+ FastEmitBranch(MSucc, BI->getDebugLoc());
return true;
}
@@ -686,13 +838,9 @@ FastISel::SelectOperator(User *I, unsigned Opcode) {
// Nothing to emit.
return true;
- case Instruction::PHI:
- // PHI nodes are already emitted.
- return true;
-
case Instruction::Alloca:
// FunctionLowering has the static-sized case covered.
- if (StaticAllocaMap.count(cast<AllocaInst>(I)))
+ if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
return true;
// Dynamic-sized alloca is not handled yet.
@@ -729,39 +877,25 @@ FastISel::SelectOperator(User *I, unsigned Opcode) {
return true;
}
+ case Instruction::PHI:
+ llvm_unreachable("FastISel shouldn't visit PHI nodes!");
+
default:
// Unhandled instruction. Halt "fast" selection and bail.
return false;
}
}
-FastISel::FastISel(MachineFunction &mf,
- MachineModuleInfo *mmi,
- DwarfWriter *dw,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &cil
-#endif
- )
- : MBB(0),
- ValueMap(vm),
- MBBMap(bm),
- StaticAllocaMap(am),
-#ifndef NDEBUG
- CatchInfoLost(cil),
-#endif
- MF(mf),
- MMI(mmi),
- DW(dw),
- MRI(MF.getRegInfo()),
- MFI(*MF.getFrameInfo()),
- MCP(*MF.getConstantPool()),
- TM(MF.getTarget()),
+FastISel::FastISel(FunctionLoweringInfo &funcInfo)
+ : FuncInfo(funcInfo),
+ MRI(FuncInfo.MF->getRegInfo()),
+ MFI(*FuncInfo.MF->getFrameInfo()),
+ MCP(*FuncInfo.MF->getConstantPool()),
+ TM(FuncInfo.MF->getTarget()),
TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()),
- TLI(*TM.getTargetLowering()) {
+ TLI(*TM.getTargetLowering()),
+ TRI(*TM.getRegisterInfo()) {
}
FastISel::~FastISel() {}
@@ -772,13 +906,15 @@ unsigned FastISel::FastEmit_(MVT, MVT,
}
unsigned FastISel::FastEmit_r(MVT, MVT,
- unsigned, unsigned /*Op0*/) {
+ unsigned,
+ unsigned /*Op0*/, bool /*Op0IsKill*/) {
return 0;
}
unsigned FastISel::FastEmit_rr(MVT, MVT,
- unsigned, unsigned /*Op0*/,
- unsigned /*Op0*/) {
+ unsigned,
+ unsigned /*Op0*/, bool /*Op0IsKill*/,
+ unsigned /*Op1*/, bool /*Op1IsKill*/) {
return 0;
}
@@ -787,25 +923,28 @@ unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
}
unsigned FastISel::FastEmit_f(MVT, MVT,
- unsigned, ConstantFP * /*FPImm*/) {
+ unsigned, const ConstantFP * /*FPImm*/) {
return 0;
}
unsigned FastISel::FastEmit_ri(MVT, MVT,
- unsigned, unsigned /*Op0*/,
+ unsigned,
+ unsigned /*Op0*/, bool /*Op0IsKill*/,
uint64_t /*Imm*/) {
return 0;
}
unsigned FastISel::FastEmit_rf(MVT, MVT,
- unsigned, unsigned /*Op0*/,
- ConstantFP * /*FPImm*/) {
+ unsigned,
+ unsigned /*Op0*/, bool /*Op0IsKill*/,
+ const ConstantFP * /*FPImm*/) {
return 0;
}
unsigned FastISel::FastEmit_rri(MVT, MVT,
unsigned,
- unsigned /*Op0*/, unsigned /*Op1*/,
+ unsigned /*Op0*/, bool /*Op0IsKill*/,
+ unsigned /*Op1*/, bool /*Op1IsKill*/,
uint64_t /*Imm*/) {
return 0;
}
@@ -815,16 +954,18 @@ unsigned FastISel::FastEmit_rri(MVT, MVT,
/// If that fails, it materializes the immediate into a register and try
/// FastEmit_rr instead.
unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
- unsigned Op0, uint64_t Imm,
- MVT ImmType) {
+ unsigned Op0, bool Op0IsKill,
+ uint64_t Imm, MVT ImmType) {
// First check if immediate type is legal. If not, we can't use the ri form.
- unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
+ unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
if (ResultReg != 0)
return ResultReg;
unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
if (MaterialReg == 0)
return 0;
- return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
+ return FastEmit_rr(VT, VT, Opcode,
+ Op0, Op0IsKill,
+ MaterialReg, /*Kill=*/true);
}
/// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
@@ -832,10 +973,10 @@ unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
/// FastEmit_rf. If that fails, it materializes the immediate into a register
/// and try FastEmit_rr instead.
unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
- unsigned Op0, ConstantFP *FPImm,
- MVT ImmType) {
+ unsigned Op0, bool Op0IsKill,
+ const ConstantFP *FPImm, MVT ImmType) {
// First check if immediate type is legal. If not, we can't use the rf form.
- unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
+ unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, Op0IsKill, FPImm);
if (ResultReg != 0)
return ResultReg;
@@ -865,11 +1006,13 @@ unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
if (IntegerReg == 0)
return 0;
MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
- ISD::SINT_TO_FP, IntegerReg);
+ ISD::SINT_TO_FP, IntegerReg, /*Kill=*/true);
if (MaterialReg == 0)
return 0;
}
- return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
+ return FastEmit_rr(VT, VT, Opcode,
+ Op0, Op0IsKill,
+ MaterialReg, /*Kill=*/true);
}
unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
@@ -881,24 +1024,24 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
- BuildMI(MBB, DL, II, ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
return ResultReg;
}
unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0) {
+ unsigned Op0, bool Op0IsKill) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addReg(Op0);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill);
else {
- BuildMI(MBB, DL, II).addReg(Op0);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Op0, Op0IsKill * RegState::Kill);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
@@ -906,72 +1049,87 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0, unsigned Op1) {
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill);
else {
- BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0, uint64_t Imm) {
+ unsigned Op0, bool Op0IsKill,
+ uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addImm(Imm);
else {
- BuildMI(MBB, DL, II).addReg(Op0).addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0, ConstantFP *FPImm) {
+ unsigned Op0, bool Op0IsKill,
+ const ConstantFP *FPImm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addFPImm(FPImm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addFPImm(FPImm);
else {
- BuildMI(MBB, DL, II).addReg(Op0).addFPImm(FPImm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addFPImm(FPImm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
- unsigned Op0, unsigned Op1, uint64_t Imm) {
+ unsigned Op0, bool Op0IsKill,
+ unsigned Op1, bool Op1IsKill,
+ uint64_t Imm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill)
+ .addImm(Imm);
else {
- BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1).addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill)
+ .addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -983,38 +1141,101 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
else {
- BuildMI(MBB, DL, II).addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
- unsigned Op0, uint32_t Idx) {
- const TargetRegisterClass* RC = MRI.getRegClass(Op0);
-
+ unsigned Op0, bool Op0IsKill,
+ uint32_t Idx) {
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG);
-
- if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Idx);
- else {
- BuildMI(MBB, DL, II).addReg(Op0).addImm(Idx);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC);
- if (!InsertedCopy)
- ResultReg = 0;
- }
+ assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
+ "Cannot yet extract from physregs");
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(Op0, getKillRegState(Op0IsKill), Idx);
return ResultReg;
}
/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
/// with all but the least significant bit set to zero.
-unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op) {
- return FastEmit_ri(VT, VT, ISD::AND, Op, 1);
+unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
+ return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
+}
+
+/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
+/// Emit code to ensure constants are copied into registers when needed.
+/// Remember the virtual registers that need to be added to the Machine PHI
+/// nodes as input. We cannot just directly add them, because expansion
+/// might result in multiple MBB's for one BB. As such, the start of the
+/// BB might correspond to a different MBB than the end.
+bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
+ const TerminatorInst *TI = LLVMBB->getTerminator();
+
+ SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
+ unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
+
+ // Check successor nodes' PHI nodes that expect a constant to be available
+ // from this block.
+ for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
+ const BasicBlock *SuccBB = TI->getSuccessor(succ);
+ if (!isa<PHINode>(SuccBB->begin())) continue;
+ MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
+
+ // If this terminator has multiple identical successors (common for
+ // switches), only handle each succ once.
+ if (!SuccsHandled.insert(SuccMBB)) continue;
+
+ MachineBasicBlock::iterator MBBI = SuccMBB->begin();
+
+ // At this point we know that there is a 1-1 correspondence between LLVM PHI
+ // nodes and Machine PHI nodes, but the incoming operands have not been
+ // emitted yet.
+ for (BasicBlock::const_iterator I = SuccBB->begin();
+ const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
+
+ // Ignore dead phi's.
+ if (PN->use_empty()) continue;
+
+ // Only handle legal types. Two interesting things to note here. First,
+ // by bailing out early, we may leave behind some dead instructions,
+ // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
+ // own moves. Second, this check is necessary becuase FastISel doesn't
+ // use CreateRegs to create registers, so it always creates
+ // exactly one register for each non-void instruction.
+ EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
+ if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
+ // Promote MVT::i1.
+ if (VT == MVT::i1)
+ VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
+ else {
+ FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
+ return false;
+ }
+ }
+
+ const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
+
+ // Set the DebugLoc for the copy. Prefer the location of the operand
+ // if there is one; use the location of the PHI otherwise.
+ DL = PN->getDebugLoc();
+ if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
+ DL = Inst->getDebugLoc();
+
+ unsigned Reg = getRegForValue(PHIOp);
+ if (Reg == 0) {
+ FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
+ return false;
+ }
+ FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
+ DL = DebugLoc();
+ }
+ }
+
+ return true;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 50f4c32..5ef6404 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -13,156 +13,86 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "function-lowering-info"
-#include "FunctionLoweringInfo.h"
-#include "llvm/CallingConv.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
-/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
-/// of insertvalue or extractvalue indices that identify a member, return
-/// the linearized index of the start of the member.
-///
-unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
- const unsigned *Indices,
- const unsigned *IndicesEnd,
- unsigned CurIndex) {
- // Base case: We're done.
- if (Indices && Indices == IndicesEnd)
- return CurIndex;
-
- // Given a struct type, recursively traverse the elements.
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
- for (StructType::element_iterator EB = STy->element_begin(),
- EI = EB,
- EE = STy->element_end();
- EI != EE; ++EI) {
- if (Indices && *Indices == unsigned(EI - EB))
- return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
- CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
- }
- return CurIndex;
- }
- // Given an array type, recursively traverse the elements.
- else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
- const Type *EltTy = ATy->getElementType();
- for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
- if (Indices && *Indices == i)
- return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
- CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
- }
- return CurIndex;
- }
- // We haven't found the type we're looking for, so keep searching.
- return CurIndex + 1;
-}
-
-/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
-/// EVTs that represent all the individual underlying
-/// non-aggregate types that comprise it.
-///
-/// If Offsets is non-null, it points to a vector to be filled in
-/// with the in-memory offsets of each of the individual values.
-///
-void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
- SmallVectorImpl<EVT> &ValueVTs,
- SmallVectorImpl<uint64_t> *Offsets,
- uint64_t StartingOffset) {
- // Given a struct type, recursively traverse the elements.
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
- for (StructType::element_iterator EB = STy->element_begin(),
- EI = EB,
- EE = STy->element_end();
- EI != EE; ++EI)
- ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
- StartingOffset + SL->getElementOffset(EI - EB));
- return;
- }
- // Given an array type, recursively traverse the elements.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
- const Type *EltTy = ATy->getElementType();
- uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
- for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
- ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
- StartingOffset + i * EltSize);
- return;
- }
- // Interpret void as zero return values.
- if (Ty->isVoidTy())
- return;
- // Base case: we can get an EVT for this LLVM IR type.
- ValueVTs.push_back(TLI.getValueType(Ty));
- if (Offsets)
- Offsets->push_back(StartingOffset);
-}
-
/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
/// PHI nodes or outside of the basic block that defines it, or used by a
/// switch or atomic instruction, which may expand to multiple basic blocks.
-static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
+static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
+ if (I->use_empty()) return false;
if (isa<PHINode>(I)) return true;
- BasicBlock *BB = I->getParent();
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
+ const BasicBlock *BB = I->getParent();
+ for (Value::const_use_iterator UI = I->use_begin(), E = I->use_end();
+ UI != E; ++UI) {
+ const User *U = *UI;
+ if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
return true;
+ }
return false;
}
/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
/// entry block, return true. This includes arguments used by switches, since
/// the switch may expand into multiple basic blocks.
-static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
+static bool isOnlyUsedInEntryBlock(const Argument *A, bool EnableFastISel) {
// With FastISel active, we may be splitting blocks, so force creation
// of virtual registers for all non-dead arguments.
- // Don't force virtual registers for byval arguments though, because
- // fast-isel can't handle those in all cases.
- if (EnableFastISel && !A->hasByValAttr())
+ if (EnableFastISel)
return A->use_empty();
- BasicBlock *Entry = A->getParent()->begin();
- for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
+ const BasicBlock *Entry = A->getParent()->begin();
+ for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
+ UI != E; ++UI) {
+ const User *U = *UI;
+ if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
return false; // Use not in entry block.
+ }
return true;
}
-FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
+FunctionLoweringInfo::FunctionLoweringInfo(const TargetLowering &tli)
: TLI(tli) {
}
-void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
- bool EnableFastISel) {
+void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
Fn = &fn;
MF = &mf;
RegInfo = &MF->getRegInfo();
+ // Check whether the function can return without sret-demotion.
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(Fn->getReturnType(),
+ Fn->getAttributes().getRetAttributes(), Outs, TLI);
+ CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), Fn->isVarArg(),
+ Outs, Fn->getContext());
+
// Create a vreg for each argument register that is not dead and is used
// outside of the entry block for the function.
- for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
+ for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
AI != E; ++AI)
if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
InitializeRegForValue(AI);
@@ -170,10 +100,10 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
// Initialize the mapping of values to registers. This is only set up for
// instruction values that are used outside of the block that defines
// them.
- Function::iterator BB = Fn->begin(), EB = Fn->end();
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
- if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
+ Function::const_iterator BB = Fn->begin(), EB = Fn->end();
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(I))
+ if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
const Type *Ty = AI->getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align =
@@ -182,21 +112,60 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
TySize *= CUI->getZExtValue(); // Get total allocated size.
if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
+
+ // The object may need to be placed onto the stack near the stack
+ // protector if one exists. Determine here if this object is a suitable
+ // candidate. I.e., it would trigger the creation of a stack protector.
+ bool MayNeedSP =
+ (AI->isArrayAllocation() ||
+ (TySize > 8 && isa<ArrayType>(Ty) &&
+ cast<ArrayType>(Ty)->getElementType()->isIntegerTy(8)));
StaticAllocaMap[AI] =
- MF->getFrameInfo()->CreateStackObject(TySize, Align, false);
+ MF->getFrameInfo()->CreateStackObject(TySize, Align, false, MayNeedSP);
}
for (; BB != EB; ++BB)
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
+ // Mark values used outside their block as exported, by allocating
+ // a virtual register for them.
+ if (isUsedOutsideOfDefiningBlock(I))
if (!isa<AllocaInst>(I) ||
!StaticAllocaMap.count(cast<AllocaInst>(I)))
InitializeRegForValue(I);
+ // Collect llvm.dbg.declare information. This is done now instead of
+ // during the initial isel pass through the IR so that it is done
+ // in a predictable order.
+ if (const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(I)) {
+ MachineModuleInfo &MMI = MF->getMMI();
+ if (MMI.hasDebugInfo() &&
+ DIVariable(DI->getVariable()).Verify() &&
+ !DI->getDebugLoc().isUnknown()) {
+ // Don't handle byval struct arguments or VLAs, for example.
+ // Non-byval arguments are handled here (they refer to the stack
+ // temporary alloca at this point).
+ const Value *Address = DI->getAddress();
+ if (Address) {
+ if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
+ Address = BCI->getOperand(0);
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ StaticAllocaMap.find(AI);
+ if (SI != StaticAllocaMap.end()) { // Check for VLAs.
+ int FI = SI->second;
+ MMI.setVariableDbgInfo(DI->getVariable(),
+ FI, DI->getDebugLoc());
+ }
+ }
+ }
+ }
+ }
+ }
+
// Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
// also creates the initial PHI MachineInstrs, though none of the input
// operands are populated.
- for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
+ for (BB = Fn->begin(); BB != EB; ++BB) {
MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
MBBMap[BB] = MBB;
MF->push_back(MBB);
@@ -209,14 +178,11 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
// Create Machine PHI nodes for LLVM PHI nodes, lowering them as
// appropriate.
- PHINode *PN;
- DebugLoc DL;
- for (BasicBlock::iterator
- I = BB->begin(), E = BB->end(); I != E; ++I) {
-
- PN = dyn_cast<PHINode>(I);
- if (!PN || PN->use_empty()) continue;
+ for (BasicBlock::const_iterator I = BB->begin();
+ const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
+ if (PN->use_empty()) continue;
+ DebugLoc DL = PN->getDebugLoc();
unsigned PHIReg = ValueMap[PN];
assert(PHIReg && "PHI node does not have an assigned virtual register!");
@@ -232,12 +198,20 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
}
}
}
+
+ // Mark landing pad blocks.
+ for (BB = Fn->begin(); BB != EB; ++BB)
+ if (const InvokeInst *Invoke = dyn_cast<InvokeInst>(BB->getTerminator()))
+ MBBMap[Invoke->getSuccessor(1)]->setIsLandingPad();
}
/// clear - Clear out all the function-specific state. This returns this
/// FunctionLoweringInfo to an empty state, ready to be used for a
/// different function.
void FunctionLoweringInfo::clear() {
+ assert(CatchInfoFound.size() == CatchInfoLost.size() &&
+ "Not all catch info was assigned to a landing pad!");
+
MBBMap.clear();
ValueMap.clear();
StaticAllocaMap.clear();
@@ -246,52 +220,69 @@ void FunctionLoweringInfo::clear() {
CatchInfoFound.clear();
#endif
LiveOutRegInfo.clear();
+ ArgDbgValues.clear();
+ ByValArgFrameIndexMap.clear();
+ RegFixups.clear();
}
-unsigned FunctionLoweringInfo::MakeReg(EVT VT) {
+/// CreateReg - Allocate a single virtual register for the given type.
+unsigned FunctionLoweringInfo::CreateReg(EVT VT) {
return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
}
-/// CreateRegForValue - Allocate the appropriate number of virtual registers of
+/// CreateRegs - Allocate the appropriate number of virtual registers of
/// the correctly promoted or expanded types. Assign these registers
/// consecutive vreg numbers and return the first assigned number.
///
/// In the case that the given value has struct or array type, this function
/// will assign registers for each member or element.
///
-unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
+unsigned FunctionLoweringInfo::CreateRegs(const Type *Ty) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, V->getType(), ValueVTs);
+ ComputeValueVTs(TLI, Ty, ValueVTs);
unsigned FirstReg = 0;
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
EVT ValueVT = ValueVTs[Value];
- EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT);
+ EVT RegisterVT = TLI.getRegisterType(Ty->getContext(), ValueVT);
- unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT);
+ unsigned NumRegs = TLI.getNumRegisters(Ty->getContext(), ValueVT);
for (unsigned i = 0; i != NumRegs; ++i) {
- unsigned R = MakeReg(RegisterVT);
+ unsigned R = CreateReg(RegisterVT);
if (!FirstReg) FirstReg = R;
}
}
return FirstReg;
}
-/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
-GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
- V = V->stripPointerCasts();
- GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
- assert ((GV || isa<ConstantPointerNull>(V)) &&
- "TypeInfo must be a global variable or NULL");
- return GV;
+/// setByValArgumentFrameIndex - Record frame index for the byval
+/// argument. This overrides previous frame index entry for this argument,
+/// if any.
+void FunctionLoweringInfo::setByValArgumentFrameIndex(const Argument *A,
+ int FI) {
+ assert (A->hasByValAttr() && "Argument does not have byval attribute!");
+ ByValArgFrameIndexMap[A] = FI;
+}
+
+/// getByValArgumentFrameIndex - Get frame index for the byval argument.
+/// If the argument does not have any assigned frame index then 0 is
+/// returned.
+int FunctionLoweringInfo::getByValArgumentFrameIndex(const Argument *A) {
+ assert (A->hasByValAttr() && "Argument does not have byval attribute!");
+ DenseMap<const Argument *, int>::iterator I =
+ ByValArgFrameIndexMap.find(A);
+ if (I != ByValArgFrameIndexMap.end())
+ return I->second;
+ DEBUG(dbgs() << "Argument does not have assigned frame index!");
+ return 0;
}
/// AddCatchInfo - Extract the personality and type infos from an eh.selector
/// call, and add them to the specified machine basic block.
-void llvm::AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
+void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
MachineBasicBlock *MBB) {
// Inform the MachineModuleInfo of the personality for this landing pad.
- ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
+ const ConstantExpr *CE = cast<ConstantExpr>(I.getArgOperand(1));
assert(CE->getOpcode() == Instruction::BitCast &&
isa<Function>(CE->getOperand(0)) &&
"Personality should be a function");
@@ -299,19 +290,19 @@ void llvm::AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
// Gather all the type infos for this landing pad and pass them along to
// MachineModuleInfo.
- std::vector<GlobalVariable *> TyInfo;
- unsigned N = I.getNumOperands();
+ std::vector<const GlobalVariable *> TyInfo;
+ unsigned N = I.getNumArgOperands();
- for (unsigned i = N - 1; i > 2; --i) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
+ for (unsigned i = N - 1; i > 1; --i) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(i))) {
unsigned FilterLength = CI->getZExtValue();
unsigned FirstCatch = i + FilterLength + !FilterLength;
- assert (FirstCatch <= N && "Invalid filter length");
+ assert(FirstCatch <= N && "Invalid filter length");
if (FirstCatch < N) {
TyInfo.reserve(N - FirstCatch);
for (unsigned j = FirstCatch; j < N; ++j)
- TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
+ TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
MMI->addCatchTypeInfo(MBB, TyInfo);
TyInfo.clear();
}
@@ -323,7 +314,7 @@ void llvm::AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
// Filter.
TyInfo.reserve(FilterLength - 1);
for (unsigned j = i + 1; j < FirstCatch; ++j)
- TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
+ TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
MMI->addFilterTypeInfo(MBB, TyInfo);
TyInfo.clear();
}
@@ -332,18 +323,19 @@ void llvm::AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
}
}
- if (N > 3) {
- TyInfo.reserve(N - 3);
- for (unsigned j = 3; j < N; ++j)
- TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
+ if (N > 2) {
+ TyInfo.reserve(N - 2);
+ for (unsigned j = 2; j < N; ++j)
+ TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
MMI->addCatchTypeInfo(MBB, TyInfo);
}
}
-void llvm::CopyCatchInfo(BasicBlock *SrcBB, BasicBlock *DestBB,
+void llvm::CopyCatchInfo(const BasicBlock *SrcBB, const BasicBlock *DestBB,
MachineModuleInfo *MMI, FunctionLoweringInfo &FLI) {
- for (BasicBlock::iterator I = SrcBB->begin(), E = --SrcBB->end(); I != E; ++I)
- if (EHSelectorInst *EHSel = dyn_cast<EHSelectorInst>(I)) {
+ for (BasicBlock::const_iterator I = SrcBB->begin(), E = --SrcBB->end();
+ I != E; ++I)
+ if (const EHSelectorInst *EHSel = dyn_cast<EHSelectorInst>(I)) {
// Apply the catch info to DestBB.
AddCatchInfo(*EHSel, MMI, FLI.MBBMap[DestBB]);
#ifndef NDEBUG
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h
deleted file mode 100644
index d851e64..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h
+++ /dev/null
@@ -1,151 +0,0 @@
-//===-- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This implements routines for translating functions from LLVM IR into
-// Machine IR.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef FUNCTIONLOWERINGINFO_H
-#define FUNCTIONLOWERINGINFO_H
-
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/DenseMap.h"
-#ifndef NDEBUG
-#include "llvm/ADT/SmallSet.h"
-#endif
-#include "llvm/CodeGen/ValueTypes.h"
-#include <vector>
-
-namespace llvm {
-
-class AllocaInst;
-class BasicBlock;
-class CallInst;
-class Function;
-class GlobalVariable;
-class Instruction;
-class MachineBasicBlock;
-class MachineFunction;
-class MachineModuleInfo;
-class MachineRegisterInfo;
-class TargetLowering;
-class Value;
-
-//===--------------------------------------------------------------------===//
-/// FunctionLoweringInfo - This contains information that is global to a
-/// function that is used when lowering a region of the function.
-///
-class FunctionLoweringInfo {
-public:
- TargetLowering &TLI;
- Function *Fn;
- MachineFunction *MF;
- MachineRegisterInfo *RegInfo;
-
- /// CanLowerReturn - true iff the function's return value can be lowered to
- /// registers.
- bool CanLowerReturn;
-
- /// DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg
- /// allocated to hold a pointer to the hidden sret parameter.
- unsigned DemoteRegister;
-
- explicit FunctionLoweringInfo(TargetLowering &TLI);
-
- /// set - Initialize this FunctionLoweringInfo with the given Function
- /// and its associated MachineFunction.
- ///
- void set(Function &Fn, MachineFunction &MF, bool EnableFastISel);
-
- /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
- DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
-
- /// ValueMap - Since we emit code for the function a basic block at a time,
- /// we must remember which virtual registers hold the values for
- /// cross-basic-block values.
- DenseMap<const Value*, unsigned> ValueMap;
-
- /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
- /// the entry block. This allows the allocas to be efficiently referenced
- /// anywhere in the function.
- DenseMap<const AllocaInst*, int> StaticAllocaMap;
-
-#ifndef NDEBUG
- SmallSet<Instruction*, 8> CatchInfoLost;
- SmallSet<Instruction*, 8> CatchInfoFound;
-#endif
-
- unsigned MakeReg(EVT VT);
-
- /// isExportedInst - Return true if the specified value is an instruction
- /// exported from its block.
- bool isExportedInst(const Value *V) {
- return ValueMap.count(V);
- }
-
- unsigned CreateRegForValue(const Value *V);
-
- unsigned InitializeRegForValue(const Value *V) {
- unsigned &R = ValueMap[V];
- assert(R == 0 && "Already initialized this value register!");
- return R = CreateRegForValue(V);
- }
-
- struct LiveOutInfo {
- unsigned NumSignBits;
- APInt KnownOne, KnownZero;
- LiveOutInfo() : NumSignBits(0), KnownOne(1, 0), KnownZero(1, 0) {}
- };
-
- /// LiveOutRegInfo - Information about live out vregs, indexed by their
- /// register number offset by 'FirstVirtualRegister'.
- std::vector<LiveOutInfo> LiveOutRegInfo;
-
- /// clear - Clear out all the function-specific state. This returns this
- /// FunctionLoweringInfo to an empty state, ready to be used for a
- /// different function.
- void clear();
-};
-
-/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
-/// of insertvalue or extractvalue indices that identify a member, return
-/// the linearized index of the start of the member.
-///
-unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
- const unsigned *Indices,
- const unsigned *IndicesEnd,
- unsigned CurIndex = 0);
-
-/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
-/// EVTs that represent all the individual underlying
-/// non-aggregate types that comprise it.
-///
-/// If Offsets is non-null, it points to a vector to be filled in
-/// with the in-memory offsets of each of the individual values.
-///
-void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
- SmallVectorImpl<EVT> &ValueVTs,
- SmallVectorImpl<uint64_t> *Offsets = 0,
- uint64_t StartingOffset = 0);
-
-/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
-GlobalVariable *ExtractTypeInfo(Value *V);
-
-/// AddCatchInfo - Extract the personality and type infos from an eh.selector
-/// call, and add them to the specified machine basic block.
-void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI, MachineBasicBlock *MBB);
-
-/// CopyCatchInfo - Copy catch information from DestBB to SrcBB.
-void CopyCatchInfo(BasicBlock *SrcBB, BasicBlock *DestBB,
- MachineModuleInfo *MMI, FunctionLoweringInfo &FLI);
-
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 625de11..61c2a90 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -15,7 +15,7 @@
#define DEBUG_TYPE "instr-emitter"
#include "InstrEmitter.h"
-#include "SDDbgValue.h"
+#include "SDNodeDbgValue.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -123,7 +123,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
EVT VT = Node->getValueType(ResNo);
const TargetRegisterClass *SrcRC = 0, *DstRC = 0;
- SrcRC = TRI->getPhysicalRegisterRegClass(SrcReg, VT);
+ SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
// Figure out the register class to create for the destreg.
if (VRBase) {
@@ -142,11 +142,8 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
} else {
// Create the reg, emit the copy.
VRBase = MRI->createVirtualRegister(DstRC);
- bool Emitted = TII->copyRegToReg(*MBB, InsertPos, VRBase, SrcReg,
- DstRC, SrcRC);
-
- assert(Emitted && "Unable to issue a copy instruction!\n");
- (void) Emitted;
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ VRBase).addReg(SrcReg);
}
SDValue Op(Node, ResNo);
@@ -246,7 +243,7 @@ unsigned InstrEmitter::getVR(SDValue Op,
const TargetRegisterClass *RC = TLI->getRegClassFor(Op.getValueType());
VReg = MRI->createVirtualRegister(RC);
}
- BuildMI(MBB, Op.getDebugLoc(),
+ BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
return VReg;
}
@@ -264,7 +261,8 @@ void
InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
- DenseMap<SDValue, unsigned> &VRBaseMap) {
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ bool IsDebug, bool IsClone, bool IsCloned) {
assert(Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Flag &&
"Chain and flag operands should occur at end of operand list!");
@@ -287,15 +285,38 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
"Don't have operand info for this instruction!");
if (DstRC && SrcRC != DstRC && !SrcRC->hasSuperClass(DstRC)) {
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
- bool Emitted = TII->copyRegToReg(*MBB, InsertPos, NewVReg, VReg,
- DstRC, SrcRC);
- assert(Emitted && "Unable to issue a copy instruction!\n");
- (void) Emitted;
+ BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
VReg = NewVReg;
}
}
- MI->addOperand(MachineOperand::CreateReg(VReg, isOptDef));
+ // If this value has only one use, that use is a kill. This is a
+ // conservative approximation. InstrEmitter does trivial coalescing
+ // with CopyFromReg nodes, so don't emit kill flags for them.
+ // Avoid kill flags on Schedule cloned nodes, since there will be
+ // multiple uses.
+ // Tied operands are never killed, so we need to check that. And that
+ // means we need to determine the index of the operand.
+ bool isKill = Op.hasOneUse() &&
+ Op.getNode()->getOpcode() != ISD::CopyFromReg &&
+ !IsDebug &&
+ !(IsClone || IsCloned);
+ if (isKill) {
+ unsigned Idx = MI->getNumOperands();
+ while (Idx > 0 &&
+ MI->getOperand(Idx-1).isReg() && MI->getOperand(Idx-1).isImplicit())
+ --Idx;
+ bool isTied = MI->getDesc().getOperandConstraint(Idx, TOI::TIED_TO) != -1;
+ if (isTied)
+ isKill = false;
+ }
+
+ MI->addOperand(MachineOperand::CreateReg(VReg, isOptDef,
+ false/*isImp*/, isKill,
+ false/*isDead*/, false/*isUndef*/,
+ false/*isEarlyClobber*/,
+ 0/*SubReg*/, IsDebug));
}
/// AddOperand - Add the specified operand to the specified machine instr. II
@@ -305,9 +326,11 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
- DenseMap<SDValue, unsigned> &VRBaseMap) {
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ bool IsDebug, bool IsClone, bool IsCloned) {
if (Op.isMachineOpcode()) {
- AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap);
+ AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap,
+ IsDebug, IsClone, IsCloned);
} else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
MI->addOperand(MachineOperand::CreateImm(C->getSExtValue()));
} else if (ConstantFPSDNode *F = dyn_cast<ConstantFPSDNode>(Op)) {
@@ -356,7 +379,8 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
assert(Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Flag &&
"Chain and flag operands should occur at end of operand list!");
- AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap);
+ AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap,
+ IsDebug, IsClone, IsCloned);
}
}
@@ -378,7 +402,8 @@ getSuperRegisterRegClass(const TargetRegisterClass *TRC,
/// EmitSubregNode - Generate machine code for subreg nodes.
///
void InstrEmitter::EmitSubregNode(SDNode *Node,
- DenseMap<SDValue, unsigned> &VRBaseMap){
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ bool IsClone, bool IsCloned) {
unsigned VRBase = 0;
unsigned Opc = Node->getMachineOpcode();
@@ -398,12 +423,9 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
}
if (Opc == TargetOpcode::EXTRACT_SUBREG) {
+ // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub
unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
- // Create the extract_subreg machine instruction.
- MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
- TII->get(TargetOpcode::EXTRACT_SUBREG));
-
// Figure out the register class to create for the destreg.
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
const TargetRegisterClass *TRC = MRI->getRegClass(VReg);
@@ -420,10 +442,16 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
VRBase = MRI->createVirtualRegister(SRC);
}
- // Add def, source, and subreg index
- MI->addOperand(MachineOperand::CreateReg(VRBase, true));
- AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap);
- MI->addOperand(MachineOperand::CreateImm(SubIdx));
+ // Create the extract_subreg machine instruction.
+ MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), VRBase);
+
+ // Add source, and subreg index
+ AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap, /*IsDebug=*/false,
+ IsClone, IsCloned);
+ assert(TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg()) &&
+ "Cannot yet extract from physregs");
+ MI->getOperand(1).setSubReg(SubIdx);
MBB->insert(InsertPos, MI);
} else if (Opc == TargetOpcode::INSERT_SUBREG ||
Opc == TargetOpcode::SUBREG_TO_REG) {
@@ -434,8 +462,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
unsigned SubIdx = cast<ConstantSDNode>(N2)->getZExtValue();
const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
const TargetRegisterClass *SRC =
- getSuperRegisterRegClass(TRC, SubIdx,
- Node->getValueType(0));
+ getSuperRegisterRegClass(TRC, SubIdx, Node->getValueType(0));
// Figure out the register class to create for the destreg.
// Note that if we're going to directly use an existing register,
@@ -457,9 +484,11 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
const ConstantSDNode *SD = cast<ConstantSDNode>(N0);
MI->addOperand(MachineOperand::CreateImm(SD->getZExtValue()));
} else
- AddOperand(MI, N0, 0, 0, VRBaseMap);
+ AddOperand(MI, N0, 0, 0, VRBaseMap, /*IsDebug=*/false,
+ IsClone, IsCloned);
// Add the subregster being inserted
- AddOperand(MI, N1, 0, 0, VRBaseMap);
+ AddOperand(MI, N1, 0, 0, VRBaseMap, /*IsDebug=*/false,
+ IsClone, IsCloned);
MI->addOperand(MachineOperand::CreateImm(SubIdx));
MBB->insert(InsertPos, MI);
} else
@@ -479,18 +508,13 @@ void
InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
DenseMap<SDValue, unsigned> &VRBaseMap) {
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
- const TargetRegisterClass *SrcRC = MRI->getRegClass(VReg);
+ // Create the new VReg in the destination class and emit a copy.
unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
const TargetRegisterClass *DstRC = TRI->getRegClass(DstRCIdx);
-
- // Create the new VReg in the destination class and emit a copy.
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
- bool Emitted = TII->copyRegToReg(*MBB, InsertPos, NewVReg, VReg,
- DstRC, SrcRC);
- assert(Emitted &&
- "Unable to issue a copy instruction for a COPY_TO_REGCLASS node!\n");
- (void) Emitted;
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ NewVReg).addReg(VReg);
SDValue Op(Node, 0);
bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
@@ -498,143 +522,249 @@ InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
assert(isNew && "Node emitted out of order - early");
}
-/// EmitDbgValue - Generate any debug info that refers to this Node. Constant
-/// dbg_value is not handled here.
-void
-InstrEmitter::EmitDbgValue(SDNode *Node,
- DenseMap<SDValue, unsigned> &VRBaseMap,
- SDDbgValue *sd) {
- if (!Node->getHasDebugValue())
- return;
- if (!sd)
- return;
- unsigned VReg = getVR(SDValue(sd->getSDNode(), sd->getResNo()), VRBaseMap);
- const TargetInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
- DebugLoc DL = sd->getDebugLoc();
- MachineInstr *MI;
- if (VReg) {
- MI = BuildMI(*MF, DL, II).addReg(VReg, RegState::Debug).
- addImm(sd->getOffset()).
- addMetadata(sd->getMDPtr());
- } else {
- // Insert an Undef so we can see what we dropped.
- MI = BuildMI(*MF, DL, II).addReg(0U).addImm(sd->getOffset()).
- addMetadata(sd->getMDPtr());
+/// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
+///
+void InstrEmitter::EmitRegSequence(SDNode *Node,
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ bool IsClone, bool IsCloned) {
+ const TargetRegisterClass *RC = TLI->getRegClassFor(Node->getValueType(0));
+ unsigned NewVReg = MRI->createVirtualRegister(RC);
+ MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
+ TII->get(TargetOpcode::REG_SEQUENCE), NewVReg);
+ unsigned NumOps = Node->getNumOperands();
+ assert((NumOps & 1) == 0 &&
+ "REG_SEQUENCE must have an even number of operands!");
+ const TargetInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
+ for (unsigned i = 0; i != NumOps; ++i) {
+ SDValue Op = Node->getOperand(i);
+ if (i & 1) {
+ unsigned SubIdx = cast<ConstantSDNode>(Op)->getZExtValue();
+ unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap);
+ const TargetRegisterClass *TRC = MRI->getRegClass(SubReg);
+ const TargetRegisterClass *SRC =
+ TRI->getMatchingSuperRegClass(RC, TRC, SubIdx);
+ if (!SRC)
+ llvm_unreachable("Invalid subregister index in REG_SEQUENCE");
+ if (SRC != RC) {
+ MRI->setRegClass(NewVReg, SRC);
+ RC = SRC;
+ }
+ }
+ AddOperand(MI, Op, i+1, &II, VRBaseMap, /*IsDebug=*/false,
+ IsClone, IsCloned);
}
+
MBB->insert(InsertPos, MI);
+ SDValue Op(Node, 0);
+ bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
+ isNew = isNew; // Silence compiler warning.
+ assert(isNew && "Node emitted out of order - early");
}
-/// EmitDbgValue - Generate constant debug info. No SDNode is involved.
-void
-InstrEmitter::EmitDbgValue(SDDbgValue *sd) {
- if (!sd)
- return;
+/// EmitDbgValue - Generate machine instruction for a dbg_value node.
+///
+MachineInstr *
+InstrEmitter::EmitDbgValue(SDDbgValue *SD,
+ DenseMap<SDValue, unsigned> &VRBaseMap) {
+ uint64_t Offset = SD->getOffset();
+ MDNode* MDPtr = SD->getMDPtr();
+ DebugLoc DL = SD->getDebugLoc();
+
+ if (SD->getKind() == SDDbgValue::FRAMEIX) {
+ // Stack address; this needs to be lowered in target-dependent fashion.
+ // EmitTargetCodeForFrameDebugValue is responsible for allocation.
+ unsigned FrameIx = SD->getFrameIx();
+ return TII->emitFrameIndexDebugValue(*MF, FrameIx, Offset, MDPtr, DL);
+ }
+ // Otherwise, we're going to create an instruction here.
const TargetInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
- DebugLoc DL = sd->getDebugLoc();
- MachineInstr *MI;
- Value *V = sd->getConst();
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- MI = BuildMI(*MF, DL, II).addImm(CI->getZExtValue()).
- addImm(sd->getOffset()).
- addMetadata(sd->getMDPtr());
- } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
- MI = BuildMI(*MF, DL, II).addFPImm(CF).addImm(sd->getOffset()).
- addMetadata(sd->getMDPtr());
+ MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
+ if (SD->getKind() == SDDbgValue::SDNODE) {
+ SDNode *Node = SD->getSDNode();
+ SDValue Op = SDValue(Node, SD->getResNo());
+ // It's possible we replaced this SDNode with other(s) and therefore
+ // didn't generate code for it. It's better to catch these cases where
+ // they happen and transfer the debug info, but trying to guarantee that
+ // in all cases would be very fragile; this is a safeguard for any
+ // that were missed.
+ DenseMap<SDValue, unsigned>::iterator I = VRBaseMap.find(Op);
+ if (I==VRBaseMap.end())
+ MIB.addReg(0U); // undef
+ else
+ AddOperand(&*MIB, Op, (*MIB).getNumOperands(), &II, VRBaseMap,
+ /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false);
+ } else if (SD->getKind() == SDDbgValue::CONST) {
+ const Value *V = SD->getConst();
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ // FIXME: SDDbgValue constants aren't updated with legalization, so it's
+ // possible to have i128 constants in them at this point. Dwarf writer
+ // does not handle i128 constants at the moment so, as a crude workaround,
+ // just drop the debug info if this happens.
+ if (!CI->getValue().isSignedIntN(64))
+ MIB.addReg(0U);
+ else
+ MIB.addImm(CI->getSExtValue());
+ } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
+ MIB.addFPImm(CF);
+ } else {
+ // Could be an Undef. In any case insert an Undef so we can see what we
+ // dropped.
+ MIB.addReg(0U);
+ }
} else {
// Insert an Undef so we can see what we dropped.
- MI = BuildMI(*MF, DL, II).addReg(0U).addImm(sd->getOffset()).
- addMetadata(sd->getMDPtr());
+ MIB.addReg(0U);
}
- MBB->insert(InsertPos, MI);
+
+ MIB.addImm(Offset).addMetadata(MDPtr);
+ return &*MIB;
}
-/// EmitNode - Generate machine code for a node and needed dependencies.
+/// EmitMachineNode - Generate machine code for a target-specific node and
+/// needed dependencies.
///
-void InstrEmitter::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
- DenseMap<SDValue, unsigned> &VRBaseMap,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
- // If machine instruction
- if (Node->isMachineOpcode()) {
- unsigned Opc = Node->getMachineOpcode();
-
- // Handle subreg insert/extract specially
- if (Opc == TargetOpcode::EXTRACT_SUBREG ||
- Opc == TargetOpcode::INSERT_SUBREG ||
- Opc == TargetOpcode::SUBREG_TO_REG) {
- EmitSubregNode(Node, VRBaseMap);
- return;
- }
+void InstrEmitter::
+EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
+ DenseMap<SDValue, unsigned> &VRBaseMap) {
+ unsigned Opc = Node->getMachineOpcode();
+
+ // Handle subreg insert/extract specially
+ if (Opc == TargetOpcode::EXTRACT_SUBREG ||
+ Opc == TargetOpcode::INSERT_SUBREG ||
+ Opc == TargetOpcode::SUBREG_TO_REG) {
+ EmitSubregNode(Node, VRBaseMap, IsClone, IsCloned);
+ return;
+ }
- // Handle COPY_TO_REGCLASS specially.
- if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
- EmitCopyToRegClassNode(Node, VRBaseMap);
- return;
- }
+ // Handle COPY_TO_REGCLASS specially.
+ if (Opc == TargetOpcode::COPY_TO_REGCLASS) {
+ EmitCopyToRegClassNode(Node, VRBaseMap);
+ return;
+ }
- if (Opc == TargetOpcode::IMPLICIT_DEF)
- // We want a unique VR for each IMPLICIT_DEF use.
- return;
-
- const TargetInstrDesc &II = TII->get(Opc);
- unsigned NumResults = CountResults(Node);
- unsigned NodeOperands = CountOperands(Node);
- bool HasPhysRegOuts = (NumResults > II.getNumDefs()) &&
- II.getImplicitDefs() != 0;
+ // Handle REG_SEQUENCE specially.
+ if (Opc == TargetOpcode::REG_SEQUENCE) {
+ EmitRegSequence(Node, VRBaseMap, IsClone, IsCloned);
+ return;
+ }
+
+ if (Opc == TargetOpcode::IMPLICIT_DEF)
+ // We want a unique VR for each IMPLICIT_DEF use.
+ return;
+
+ const TargetInstrDesc &II = TII->get(Opc);
+ unsigned NumResults = CountResults(Node);
+ unsigned NodeOperands = CountOperands(Node);
+ bool HasPhysRegOuts = NumResults > II.getNumDefs() && II.getImplicitDefs()!=0;
#ifndef NDEBUG
- unsigned NumMIOperands = NodeOperands + NumResults;
- assert((II.getNumOperands() == NumMIOperands ||
- HasPhysRegOuts || II.isVariadic()) &&
- "#operands for dag node doesn't match .td file!");
+ unsigned NumMIOperands = NodeOperands + NumResults;
+ if (II.isVariadic())
+ assert(NumMIOperands >= II.getNumOperands() &&
+ "Too few operands for a variadic node!");
+ else
+ assert(NumMIOperands >= II.getNumOperands() &&
+ NumMIOperands <= II.getNumOperands()+II.getNumImplicitDefs() &&
+ "#operands for dag node doesn't match .td file!");
#endif
- // Create the new machine instruction.
- MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), II);
-
- // Add result register values for things that are defined by this
- // instruction.
- if (NumResults)
- CreateVirtualRegisters(Node, MI, II, IsClone, IsCloned, VRBaseMap);
-
- // Emit all of the actual operands of this instruction, adding them to the
- // instruction as appropriate.
- bool HasOptPRefs = II.getNumDefs() > NumResults;
- assert((!HasOptPRefs || !HasPhysRegOuts) &&
- "Unable to cope with optional defs and phys regs defs!");
- unsigned NumSkip = HasOptPRefs ? II.getNumDefs() - NumResults : 0;
- for (unsigned i = NumSkip; i != NodeOperands; ++i)
- AddOperand(MI, Node->getOperand(i), i-NumSkip+II.getNumDefs(), &II,
- VRBaseMap);
-
- // Transfer all of the memory reference descriptions of this instruction.
- MI->setMemRefs(cast<MachineSDNode>(Node)->memoperands_begin(),
- cast<MachineSDNode>(Node)->memoperands_end());
-
- if (II.usesCustomInsertionHook()) {
- // Insert this instruction into the basic block using a target
- // specific inserter which may returns a new basic block.
- MBB = TLI->EmitInstrWithCustomInserter(MI, MBB, EM);
- InsertPos = MBB->end();
- } else {
- MBB->insert(InsertPos, MI);
- }
-
- // Additional results must be an physical register def.
- if (HasPhysRegOuts) {
- for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
- unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
- if (Node->hasAnyUseOfValue(i))
- EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
- // If there are no uses, mark the register as dead now, so that
- // MachineLICM/Sink can see that it's dead. Don't do this if the
- // node has a Flag value, for the benefit of targets still using
- // Flag for values in physregs.
- else if (Node->getValueType(Node->getNumValues()-1) != MVT::Flag)
- MI->addRegisterDead(Reg, TRI);
+ // Create the new machine instruction.
+ MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), II);
+
+ // The MachineInstr constructor adds implicit-def operands. Scan through
+ // these to determine which are dead.
+ if (MI->getNumOperands() != 0 &&
+ Node->getValueType(Node->getNumValues()-1) == MVT::Flag) {
+ // First, collect all used registers.
+ SmallVector<unsigned, 8> UsedRegs;
+ for (SDNode *F = Node->getFlaggedUser(); F; F = F->getFlaggedUser())
+ if (F->getOpcode() == ISD::CopyFromReg)
+ UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
+ else {
+ // Collect declared implicit uses.
+ const TargetInstrDesc &TID = TII->get(F->getMachineOpcode());
+ UsedRegs.append(TID.getImplicitUses(),
+ TID.getImplicitUses() + TID.getNumImplicitUses());
+ // In addition to declared implicit uses, we must also check for
+ // direct RegisterSDNode operands.
+ for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
+ if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
+ unsigned Reg = R->getReg();
+ if (Reg != 0 && TargetRegisterInfo::isPhysicalRegister(Reg))
+ UsedRegs.push_back(Reg);
+ }
}
+ // Then mark unused registers as dead.
+ MI->setPhysRegsDeadExcept(UsedRegs, *TRI);
+ }
+
+ // Add result register values for things that are defined by this
+ // instruction.
+ if (NumResults)
+ CreateVirtualRegisters(Node, MI, II, IsClone, IsCloned, VRBaseMap);
+
+ // Emit all of the actual operands of this instruction, adding them to the
+ // instruction as appropriate.
+ bool HasOptPRefs = II.getNumDefs() > NumResults;
+ assert((!HasOptPRefs || !HasPhysRegOuts) &&
+ "Unable to cope with optional defs and phys regs defs!");
+ unsigned NumSkip = HasOptPRefs ? II.getNumDefs() - NumResults : 0;
+ for (unsigned i = NumSkip; i != NodeOperands; ++i)
+ AddOperand(MI, Node->getOperand(i), i-NumSkip+II.getNumDefs(), &II,
+ VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned);
+
+ // Transfer all of the memory reference descriptions of this instruction.
+ MI->setMemRefs(cast<MachineSDNode>(Node)->memoperands_begin(),
+ cast<MachineSDNode>(Node)->memoperands_end());
+
+ // Insert the instruction into position in the block. This needs to
+ // happen before any custom inserter hook is called so that the
+ // hook knows where in the block to insert the replacement code.
+ MBB->insert(InsertPos, MI);
+
+ if (II.usesCustomInsertionHook()) {
+ // Insert this instruction into the basic block using a target
+ // specific inserter which may returns a new basic block.
+ bool AtEnd = InsertPos == MBB->end();
+ MachineBasicBlock *NewMBB = TLI->EmitInstrWithCustomInserter(MI, MBB);
+ if (NewMBB != MBB) {
+ if (AtEnd)
+ InsertPos = NewMBB->end();
+ MBB = NewMBB;
}
return;
}
+
+ // Additional results must be an physical register def.
+ if (HasPhysRegOuts) {
+ for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
+ unsigned Reg = II.getImplicitDefs()[i - II.getNumDefs()];
+ if (Node->hasAnyUseOfValue(i))
+ EmitCopyFromReg(Node, i, IsClone, IsCloned, Reg, VRBaseMap);
+ // If there are no uses, mark the register as dead now, so that
+ // MachineLICM/Sink can see that it's dead. Don't do this if the
+ // node has a Flag value, for the benefit of targets still using
+ // Flag for values in physregs.
+ else if (Node->getValueType(Node->getNumValues()-1) != MVT::Flag)
+ MI->addRegisterDead(Reg, TRI);
+ }
+ }
+
+ // If the instruction has implicit defs and the node doesn't, mark the
+ // implicit def as dead. If the node has any flag outputs, we don't do this
+ // because we don't know what implicit defs are being used by flagged nodes.
+ if (Node->getValueType(Node->getNumValues()-1) != MVT::Flag)
+ if (const unsigned *IDList = II.getImplicitDefs()) {
+ for (unsigned i = NumResults, e = II.getNumDefs()+II.getNumImplicitDefs();
+ i != e; ++i)
+ MI->addRegisterDead(IDList[i-II.getNumDefs()], TRI);
+ }
+}
+/// EmitSpecialNode - Generate machine code for a target-independent node and
+/// needed dependencies.
+void InstrEmitter::
+EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
+ DenseMap<SDValue, unsigned> &VRBaseMap) {
switch (Node->getOpcode()) {
default:
#ifndef NDEBUG
@@ -659,24 +789,9 @@ void InstrEmitter::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
break;
-
- const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0;
- // Get the register classes of the src/dst.
- if (TargetRegisterInfo::isVirtualRegister(SrcReg))
- SrcTRC = MRI->getRegClass(SrcReg);
- else
- SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
- if (TargetRegisterInfo::isVirtualRegister(DestReg))
- DstTRC = MRI->getRegClass(DestReg);
- else
- DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
- Node->getOperand(1).getValueType());
-
- bool Emitted = TII->copyRegToReg(*MBB, InsertPos, DestReg, SrcReg,
- DstTRC, SrcTRC);
- assert(Emitted && "Unable to issue a copy instruction!\n");
- (void) Emitted;
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ DestReg).addReg(SrcReg);
break;
}
case ISD::CopyFromReg: {
@@ -684,6 +799,13 @@ void InstrEmitter::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
EmitCopyFromReg(Node, 0, IsClone, IsCloned, SrcReg, VRBaseMap);
break;
}
+ case ISD::EH_LABEL: {
+ MCSymbol *S = cast<EHLabelSDNode>(Node)->getLabel();
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
+ TII->get(TargetOpcode::EH_LABEL)).addSym(S);
+ break;
+ }
+
case ISD::INLINEASM: {
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
@@ -694,12 +816,18 @@ void InstrEmitter::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
TII->get(TargetOpcode::INLINEASM));
// Add the asm string as an external symbol operand.
- const char *AsmStr =
- cast<ExternalSymbolSDNode>(Node->getOperand(1))->getSymbol();
+ SDValue AsmStrV = Node->getOperand(InlineAsm::Op_AsmString);
+ const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
MI->addOperand(MachineOperand::CreateES(AsmStr));
+ // Add the isAlignStack bit.
+ int64_t isAlignStack =
+ cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_IsAlignStack))->
+ getZExtValue();
+ MI->addOperand(MachineOperand::CreateImm(isAlignStack));
+
// Add all of the operand registers to the instruction.
- for (unsigned i = 2; i != NumOps;) {
+ for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags =
cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
@@ -707,31 +835,47 @@ void InstrEmitter::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
MI->addOperand(MachineOperand::CreateImm(Flags));
++i; // Skip the ID value.
- switch (Flags & 7) {
+ switch (InlineAsm::getKind(Flags)) {
default: llvm_unreachable("Bad flags!");
- case 2: // Def of register.
+ case InlineAsm::Kind_RegDef:
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
- MI->addOperand(MachineOperand::CreateReg(Reg, true));
+ // FIXME: Add dead flags for physical and virtual registers defined.
+ // For now, mark physical register defs as implicit to help fast
+ // regalloc. This makes inline asm look a lot like calls.
+ MI->addOperand(MachineOperand::CreateReg(Reg, true,
+ /*isImp=*/ TargetRegisterInfo::isPhysicalRegister(Reg)));
}
break;
- case 6: // Def of earlyclobber register.
+ case InlineAsm::Kind_RegDefEarlyClobber:
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
- MI->addOperand(MachineOperand::CreateReg(Reg, true, false, false,
- false, false, true));
+ MI->addOperand(MachineOperand::CreateReg(Reg, /*isDef=*/ true,
+ /*isImp=*/ TargetRegisterInfo::isPhysicalRegister(Reg),
+ /*isKill=*/ false,
+ /*isDead=*/ false,
+ /*isUndef=*/false,
+ /*isEarlyClobber=*/ true));
}
break;
- case 1: // Use of register.
- case 3: // Immediate.
- case 4: // Addressing mode.
+ case InlineAsm::Kind_RegUse: // Use of register.
+ case InlineAsm::Kind_Imm: // Immediate.
+ case InlineAsm::Kind_Mem: // Addressing mode.
// The addressing mode has been selected, just add all of the
// operands to the machine instruction.
for (; NumVals; --NumVals, ++i)
- AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap);
+ AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap,
+ /*IsDebug=*/false, IsClone, IsCloned);
break;
}
}
+
+ // Get the mdnode from the asm if it exists and add it to the instruction.
+ SDValue MDV = Node->getOperand(InlineAsm::Op_MDNode);
+ const MDNode *MD = cast<MDNodeSDNode>(MDV)->getMD();
+ if (MD)
+ MI->addOperand(MachineOperand::CreateMetadata(MD));
+
MBB->insert(InsertPos, MI);
break;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.h b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.h
index 4fe9f19..02c044c 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.h
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.h
@@ -64,7 +64,8 @@ class InstrEmitter {
void AddRegisterOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
- DenseMap<SDValue, unsigned> &VRBaseMap);
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ bool IsDebug, bool IsClone, bool IsCloned);
/// AddOperand - Add the specified operand to the specified machine instr. II
/// specifies the instruction information for the node, and IIOpNum is the
@@ -73,11 +74,13 @@ class InstrEmitter {
void AddOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
- DenseMap<SDValue, unsigned> &VRBaseMap);
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ bool IsDebug, bool IsClone, bool IsCloned);
/// EmitSubregNode - Generate machine code for subreg nodes.
///
- void EmitSubregNode(SDNode *Node, DenseMap<SDValue, unsigned> &VRBaseMap);
+ void EmitSubregNode(SDNode *Node, DenseMap<SDValue, unsigned> &VRBaseMap,
+ bool IsClone, bool IsCloned);
/// EmitCopyToRegClassNode - Generate machine code for COPY_TO_REGCLASS nodes.
/// COPY_TO_REGCLASS is just a normal copy, except that the destination
@@ -86,6 +89,10 @@ class InstrEmitter {
void EmitCopyToRegClassNode(SDNode *Node,
DenseMap<SDValue, unsigned> &VRBaseMap);
+ /// EmitRegSequence - Generate machine code for REG_SEQUENCE nodes.
+ ///
+ void EmitRegSequence(SDNode *Node, DenseMap<SDValue, unsigned> &VRBaseMap,
+ bool IsClone, bool IsCloned);
public:
/// CountResults - The results of target nodes have register or immediate
/// operands first, then an optional chain, and optional flag operands
@@ -98,21 +105,20 @@ public:
/// MachineInstr.
static unsigned CountOperands(SDNode *Node);
- /// EmitDbgValue - Generate any debug info that refers to this Node. Constant
- /// dbg_value is not handled here.
- void EmitDbgValue(SDNode *Node,
- DenseMap<SDValue, unsigned> &VRBaseMap,
- SDDbgValue* sd);
-
-
- /// EmitDbgValue - Generate a constant DBG_VALUE. No node is involved.
- void EmitDbgValue(SDDbgValue* sd);
+ /// EmitDbgValue - Generate machine instruction for a dbg_value node.
+ ///
+ MachineInstr *EmitDbgValue(SDDbgValue *SD,
+ DenseMap<SDValue, unsigned> &VRBaseMap);
/// EmitNode - Generate machine code for a node and needed dependencies.
///
void EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
- DenseMap<SDValue, unsigned> &VRBaseMap,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM);
+ DenseMap<SDValue, unsigned> &VRBaseMap) {
+ if (Node->isMachineOpcode())
+ EmitMachineNode(Node, IsClone, IsCloned, VRBaseMap);
+ else
+ EmitSpecialNode(Node, IsClone, IsCloned, VRBaseMap);
+ }
/// getBlock - Return the current basic block.
MachineBasicBlock *getBlock() { return MBB; }
@@ -123,6 +129,12 @@ public:
/// InstrEmitter - Construct an InstrEmitter and set it to start inserting
/// at the given position in the given block.
InstrEmitter(MachineBasicBlock *mbb, MachineBasicBlock::iterator insertpos);
+
+private:
+ void EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
+ DenseMap<SDValue, unsigned> &VRBaseMap);
+ void EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
+ DenseMap<SDValue, unsigned> &VRBaseMap);
};
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index f498263..2981cd3 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -16,7 +16,6 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/DwarfWriter.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Target/TargetFrameInfo.h"
@@ -24,7 +23,6 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetSubtarget.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
@@ -39,7 +37,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include <map>
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -56,7 +53,8 @@ using namespace llvm;
///
namespace {
class SelectionDAGLegalize {
- TargetLowering &TLI;
+ const TargetMachine &TM;
+ const TargetLowering &TLI;
SelectionDAG &DAG;
CodeGenOpt::Level OptLevel;
@@ -102,8 +100,7 @@ public:
/// it is already legal or we need to expand it into multiple registers of
/// smaller integer type, or we need to promote it to a larger type.
LegalizeAction getTypeAction(EVT VT) const {
- return
- (LegalizeAction)ValueTypeActions.getTypeAction(*DAG.getContext(), VT);
+ return (LegalizeAction)ValueTypeActions.getTypeAction(VT);
}
/// isTypeLegal - Return true if this type is legal on this target.
@@ -136,7 +133,7 @@ private:
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
- SDValue N1, SDValue N2,
+ SDValue N1, SDValue N2,
SmallVectorImpl<int> &Mask) const;
bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest,
@@ -146,6 +143,8 @@ private:
DebugLoc dl);
SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned);
+ std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
+ SDNode *Node, bool isSigned);
SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32,
RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80,
RTLIB::Libcall Call_PPCF128);
@@ -175,6 +174,8 @@ private:
SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
SDValue ExpandVectorBuildThroughStack(SDNode* Node);
+ std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
+
void ExpandNode(SDNode *Node, SmallVectorImpl<SDValue> &Results);
void PromoteNode(SDNode *Node, SmallVectorImpl<SDValue> &Results);
};
@@ -184,8 +185,8 @@ private:
/// performs the same shuffe in terms of order or result bytes, but on a type
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
-SDValue
-SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
+SDValue
+SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
SDValue N1, SDValue N2,
SmallVectorImpl<int> &Mask) const {
unsigned NumMaskElts = VT.getVectorNumElements();
@@ -196,12 +197,12 @@ SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
if (NumEltsGrowth == 1)
return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]);
-
+
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumMaskElts; ++i) {
int Idx = Mask[i];
for (unsigned j = 0; j != NumEltsGrowth; ++j) {
- if (Idx < 0)
+ if (Idx < 0)
NewMask.push_back(-1);
else
NewMask.push_back(Idx * NumEltsGrowth + j);
@@ -214,7 +215,8 @@ SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag,
CodeGenOpt::Level ol)
- : TLI(dag.getTargetLoweringInfo()), DAG(dag), OptLevel(ol),
+ : TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()),
+ DAG(dag), OptLevel(ol),
ValueTypeActions(TLI.getValueTypeActions()) {
assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE &&
"Too many value types for ValueTypeActions to hold!");
@@ -322,7 +324,8 @@ bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest,
bool OperandsLeadToDest = false;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
OperandsLeadToDest |= // If an operand leads to Dest, so do we.
- LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest, NodesLeadingTo);
+ LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest,
+ NodesLeadingTo);
if (OperandsLeadToDest) {
NodesLeadingTo.insert(N);
@@ -359,7 +362,7 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
EVT SVT = VT;
while (SVT != MVT::f32) {
SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
- if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) &&
+ if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) &&
// Only do this if the target has a native EXTLOAD instruction from
// smaller type.
TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
@@ -374,8 +377,8 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
if (Extend)
- return DAG.getExtLoad(ISD::EXTLOAD, dl,
- OrigVT, DAG.getEntryNode(),
+ return DAG.getExtLoad(ISD::EXTLOAD, OrigVT, dl,
+ DAG.getEntryNode(),
CPIdx, PseudoSourceValue::getConstantPool(),
0, VT, false, false, Alignment);
return DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx,
@@ -410,7 +413,9 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// to the final destination using (unaligned) integer loads and stores.
EVT StoredVT = ST->getMemoryVT();
EVT RegVT =
- TLI.getRegisterType(*DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(), StoredVT.getSizeInBits()));
+ TLI.getRegisterType(*DAG.getContext(),
+ EVT::getIntegerVT(*DAG.getContext(),
+ StoredVT.getSizeInBits()));
unsigned StoredBytes = StoredVT.getSizeInBits() / 8;
unsigned RegBytes = RegVT.getSizeInBits() / 8;
unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes;
@@ -446,10 +451,11 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// The last store may be partial. Do a truncating store. On big-endian
// machines this requires an extending load from the stack slot to ensure
// that the bits are in the right place.
- EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset));
+ EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
+ 8 * (StoredBytes - Offset));
// Load from the stack slot.
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, RegVT, dl, Store, StackPtr,
NULL, 0, MemVT, false, false, 0);
Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
@@ -549,8 +555,9 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
}
// The last copy may be partial. Do an extending load.
- EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8 * (LoadedBytes - Offset));
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
+ EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
+ 8 * (LoadedBytes - Offset));
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, RegVT, dl, Chain, Ptr,
LD->getSrcValue(), SVOffset + Offset,
MemVT, LD->isVolatile(),
LD->isNonTemporal(),
@@ -566,7 +573,7 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
Stores.size());
// Finally, perform the original load only redirected to the stack slot.
- Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
+ Load = DAG.getExtLoad(LD->getExtensionType(), VT, dl, TF, StackBase,
NULL, 0, LoadedVT, false, false, 0);
// Callers expect a MERGE_VALUES node.
@@ -595,23 +602,23 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
// Load the value in two parts
SDValue Lo, Hi;
if (TLI.isLittleEndian()) {
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getSrcValue(),
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, VT, dl, Chain, Ptr, LD->getSrcValue(),
SVOffset, NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, TLI.getPointerTy()));
- Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getSrcValue(),
+ Hi = DAG.getExtLoad(HiExtType, VT, dl, Chain, Ptr, LD->getSrcValue(),
SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(),
- LD->isNonTemporal(), MinAlign(Alignment, IncrementSize));
+ LD->isNonTemporal(), MinAlign(Alignment,IncrementSize));
} else {
- Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getSrcValue(),
+ Hi = DAG.getExtLoad(HiExtType, VT, dl, Chain, Ptr, LD->getSrcValue(),
SVOffset, NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, TLI.getPointerTy()));
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getSrcValue(),
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, VT, dl, Chain, Ptr, LD->getSrcValue(),
SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(),
- LD->isNonTemporal(), MinAlign(Alignment, IncrementSize));
+ LD->isNonTemporal(), MinAlign(Alignment,IncrementSize));
}
// aggregate the two parts
@@ -771,7 +778,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
"Unexpected illegal type!");
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
- assert((isTypeLegal(Node->getOperand(i).getValueType()) ||
+ assert((isTypeLegal(Node->getOperand(i).getValueType()) ||
Node->getOperand(i).getOpcode() == ISD::TargetConstant) &&
"Unexpected illegal type!");
@@ -851,6 +858,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::MERGE_VALUES:
case ISD::EH_RETURN:
case ISD::FRAME_TO_ARGS_OFFSET:
+ case ISD::EH_SJLJ_SETJMP:
+ case ISD::EH_SJLJ_LONGJMP:
// These operations lie about being legal: when they claim to be legal,
// they should actually be expanded.
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
@@ -923,8 +932,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
break;
}
- Result = DAG.UpdateNodeOperands(Result.getValue(0), Ops.data(),
- Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), Ops.data(),
+ Ops.size()), 0);
switch (Action) {
case TargetLowering::Legal:
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
@@ -969,11 +978,11 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Node->dump( &DAG);
dbgs() << "\n";
#endif
- llvm_unreachable("Do not know how to legalize this operator!");
+ assert(0 && "Do not know how to legalize this operator!");
case ISD::BUILD_VECTOR:
switch (TLI.getOperationAction(ISD::BUILD_VECTOR, Node->getValueType(0))) {
- default: llvm_unreachable("This action is not supported yet!");
+ default: assert(0 && "This action is not supported yet!");
case TargetLowering::Custom:
Tmp3 = TLI.LowerOperation(Result, DAG);
if (Tmp3.getNode()) {
@@ -998,11 +1007,11 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
NodesLeadingTo);
}
- // Now that we legalized all of the inputs (which may have inserted
- // libcalls) create the new CALLSEQ_START node.
+ // Now that we have legalized all of the inputs (which may have inserted
+ // libcalls), create the new CALLSEQ_START node.
Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain.
- // Merge in the last call, to ensure that this call start after the last
+ // Merge in the last call to ensure that this call starts after the last
// call ended.
if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken) {
Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
@@ -1014,7 +1023,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
if (Tmp1 != Node->getOperand(0)) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
- Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), &Ops[0],
+ Ops.size()), Result.getResNo());
}
// Remember that the CALLSEQ_START is legalized.
@@ -1056,7 +1066,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
if (Tmp1 != Node->getOperand(0)) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
- Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ &Ops[0], Ops.size()),
+ Result.getResNo());
}
} else {
Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1));
@@ -1065,7 +1077,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
Ops.back() = Tmp2;
- Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ &Ops[0], Ops.size()),
+ Result.getResNo());
}
}
assert(IsLegalizingCall && "Call sequence imbalance between start/end?");
@@ -1085,12 +1099,14 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD) {
EVT VT = Node->getValueType(0);
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp2, LD->getOffset()),
+ Result.getResNo());
Tmp3 = Result.getValue(0);
Tmp4 = Result.getValue(1);
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
- default: llvm_unreachable("This action is not supported yet!");
+ default: assert(0 && "This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned load and the target doesn't support it,
// expand it.
@@ -1098,7 +1114,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
+ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
DAG, TLI);
Tmp3 = Result.getOperand(0);
Tmp4 = Result.getOperand(1);
@@ -1164,7 +1180,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
ISD::LoadExtType NewExtType =
ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
- Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
+ Result = DAG.getExtLoad(NewExtType, Node->getValueType(0), dl,
Tmp1, Tmp2, LD->getSrcValue(), SVOffset,
NVT, isVolatile, isNonTemporal, Alignment);
@@ -1200,8 +1216,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
if (TLI.isLittleEndian()) {
// EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD at +2:i8, 16)
// Load the bottom RoundWidth bits.
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl,
- Node->getValueType(0), Tmp1, Tmp2,
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, Node->getValueType(0), dl,
+ Tmp1, Tmp2,
LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
isNonTemporal, Alignment);
@@ -1209,13 +1225,13 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
IncrementSize = RoundWidth / 8;
Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2,
+ Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), dl, Tmp1, Tmp2,
LD->getSrcValue(), SVOffset + IncrementSize,
ExtraVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
- // Build a factor node to remember that this load is independent of the
- // other one.
+ // Build a factor node to remember that this load is independent of
+ // the other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
@@ -1229,7 +1245,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Big endian - avoid unaligned loads.
// EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD at +2:i8
// Load the top RoundWidth bits.
- Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2,
+ Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), dl, Tmp1, Tmp2,
LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
isNonTemporal, Alignment);
@@ -1237,14 +1253,14 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
IncrementSize = RoundWidth / 8;
Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
DAG.getIntPtrConstant(IncrementSize));
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl,
- Node->getValueType(0), Tmp1, Tmp2,
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD,
+ Node->getValueType(0), dl, Tmp1, Tmp2,
LD->getSrcValue(), SVOffset + IncrementSize,
ExtraVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
- // Build a factor node to remember that this load is independent of the
- // other one.
+ // Build a factor node to remember that this load is independent of
+ // the other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
@@ -1260,12 +1276,14 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Tmp2 = LegalizeOp(Ch);
} else {
switch (TLI.getLoadExtAction(ExtType, SrcVT)) {
- default: llvm_unreachable("This action is not supported yet!");
+ default: assert(0 && "This action is not supported yet!");
case TargetLowering::Custom:
isCustom = true;
// FALLTHROUGH
case TargetLowering::Legal:
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp2, LD->getOffset()),
+ Result.getResNo());
Tmp1 = Result.getValue(0);
Tmp2 = Result.getValue(1);
@@ -1279,10 +1297,12 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned load and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
+ const Type *Ty =
+ LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment =
+ TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
+ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
DAG, TLI);
Tmp1 = Result.getOperand(0);
Tmp2 = Result.getOperand(1);
@@ -1293,25 +1313,35 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
}
break;
case TargetLowering::Expand:
- // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
- // f128 = EXTLOAD {f32,f64} too
- if ((SrcVT == MVT::f32 && (Node->getValueType(0) == MVT::f64 ||
- Node->getValueType(0) == MVT::f128)) ||
- (SrcVT == MVT::f64 && Node->getValueType(0) == MVT::f128)) {
+ if (!TLI.isLoadExtLegal(ISD::EXTLOAD, SrcVT) && isTypeLegal(SrcVT)) {
SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, LD->getSrcValue(),
LD->getSrcValueOffset(),
LD->isVolatile(), LD->isNonTemporal(),
LD->getAlignment());
- Result = DAG.getNode(ISD::FP_EXTEND, dl,
- Node->getValueType(0), Load);
+ unsigned ExtendOp;
+ switch (ExtType) {
+ case ISD::EXTLOAD:
+ ExtendOp = (SrcVT.isFloatingPoint() ?
+ ISD::FP_EXTEND : ISD::ANY_EXTEND);
+ break;
+ case ISD::SEXTLOAD: ExtendOp = ISD::SIGN_EXTEND; break;
+ case ISD::ZEXTLOAD: ExtendOp = ISD::ZERO_EXTEND; break;
+ default: llvm_unreachable("Unexpected extend load type!");
+ }
+ Result = DAG.getNode(ExtendOp, dl, Node->getValueType(0), Load);
Tmp1 = LegalizeOp(Result); // Relegalize new nodes.
Tmp2 = LegalizeOp(Load.getValue(1));
break;
}
- assert(ExtType != ISD::EXTLOAD &&"EXTLOAD should always be supported!");
+ // FIXME: This does not work for vectors on most targets. Sign- and
+ // zero-extend operations are currently folded into extending loads,
+ // whether they are legal or not, and then we end up here without any
+ // support for legalizing them.
+ assert(ExtType != ISD::EXTLOAD &&
+ "EXTLOAD should always be supported!");
// Turn the unsupported load into an EXTLOAD followed by an explicit
// zero/sign extend inreg.
- Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0),
+ Result = DAG.getExtLoad(ISD::EXTLOAD, Node->getValueType(0), dl,
Tmp1, Tmp2, LD->getSrcValue(),
LD->getSrcValueOffset(), SrcVT,
LD->isVolatile(), LD->isNonTemporal(),
@@ -1353,18 +1383,20 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
{
Tmp3 = LegalizeOp(ST->getValue());
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
- ST->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp3, Tmp2,
+ ST->getOffset()),
+ Result.getResNo());
EVT VT = Tmp3.getValueType();
switch (TLI.getOperationAction(ISD::STORE, VT)) {
- default: llvm_unreachable("This action is not supported yet!");
+ default: assert(0 && "This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned store and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
DAG, TLI);
@@ -1395,7 +1427,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Promote to a byte-sized store with upper bits zero if not
// storing an integral number of bytes. For example, promote
// TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
- EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StVT.getStoreSizeInBits());
+ EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
+ StVT.getStoreSizeInBits());
Tmp3 = DAG.getZeroExtendInReg(Tmp3, dl, StVT);
Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
SVOffset, NVT, isVolatile, isNonTemporal,
@@ -1456,17 +1489,19 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
} else {
if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() ||
Tmp2 != ST->getBasePtr())
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
- ST->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp3, Tmp2,
+ ST->getOffset()),
+ Result.getResNo());
switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
- default: llvm_unreachable("This action is not supported yet!");
+ default: assert(0 && "This action is not supported yet!");
case TargetLowering::Legal:
// If this is an unaligned store and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
DAG, TLI);
@@ -1528,7 +1563,7 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, NULL, 0,
false, false, 0);
else
- return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr,
+ return DAG.getExtLoad(ISD::EXTLOAD, Op.getValueType(), dl, Ch, StackPtr,
NULL, 0, Vec.getValueType().getVectorElementType(),
false, false, 0);
}
@@ -1565,7 +1600,7 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
Node->getOperand(i), Idx, SV, Offset,
EltVT, false, false, 0));
} else
- Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
+ Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
Node->getOperand(i), Idx, SV, Offset,
false, false, 0));
}
@@ -1585,35 +1620,51 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
DebugLoc dl = Node->getDebugLoc();
SDValue Tmp1 = Node->getOperand(0);
SDValue Tmp2 = Node->getOperand(1);
- assert((Tmp2.getValueType() == MVT::f32 ||
- Tmp2.getValueType() == MVT::f64) &&
- "Ugly special-cased code!");
- // Get the sign bit of the RHS.
+
+ // Get the sign bit of the RHS. First obtain a value that has the same
+ // sign as the sign bit, i.e. negative if and only if the sign bit is 1.
SDValue SignBit;
- EVT IVT = Tmp2.getValueType() == MVT::f64 ? MVT::i64 : MVT::i32;
+ EVT FloatVT = Tmp2.getValueType();
+ EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits());
if (isTypeLegal(IVT)) {
+ // Convert to an integer with the same sign bit.
SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, IVT, Tmp2);
} else {
- assert(isTypeLegal(TLI.getPointerTy()) &&
- (TLI.getPointerTy() == MVT::i32 ||
- TLI.getPointerTy() == MVT::i64) &&
- "Legal type for load?!");
- SDValue StackPtr = DAG.CreateStackTemporary(Tmp2.getValueType());
- SDValue StorePtr = StackPtr, LoadPtr = StackPtr;
+ // Store the float to memory, then load the sign part out as an integer.
+ MVT LoadTy = TLI.getPointerTy();
+ // First create a temporary that is aligned for both the load and store.
+ SDValue StackPtr = DAG.CreateStackTemporary(FloatVT, LoadTy);
+ // Then store the float to it.
SDValue Ch =
- DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StorePtr, NULL, 0,
+ DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StackPtr, NULL, 0,
false, false, 0);
- if (Tmp2.getValueType() == MVT::f64 && TLI.isLittleEndian())
- LoadPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(),
- LoadPtr, DAG.getIntPtrConstant(4));
- SignBit = DAG.getExtLoad(ISD::SEXTLOAD, dl, TLI.getPointerTy(),
- Ch, LoadPtr, NULL, 0, MVT::i32,
- false, false, 0);
- }
- SignBit =
- DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()),
- SignBit, DAG.getConstant(0, SignBit.getValueType()),
- ISD::SETLT);
+ if (TLI.isBigEndian()) {
+ assert(FloatVT.isByteSized() && "Unsupported floating point type!");
+ // Load out a legal integer with the same sign bit as the float.
+ SignBit = DAG.getLoad(LoadTy, dl, Ch, StackPtr, NULL, 0, false, false, 0);
+ } else { // Little endian
+ SDValue LoadPtr = StackPtr;
+ // The float may be wider than the integer we are going to load. Advance
+ // the pointer so that the loaded integer will contain the sign bit.
+ unsigned Strides = (FloatVT.getSizeInBits()-1)/LoadTy.getSizeInBits();
+ unsigned ByteOffset = (Strides * LoadTy.getSizeInBits()) / 8;
+ LoadPtr = DAG.getNode(ISD::ADD, dl, LoadPtr.getValueType(),
+ LoadPtr, DAG.getIntPtrConstant(ByteOffset));
+ // Load a legal integer containing the sign bit.
+ SignBit = DAG.getLoad(LoadTy, dl, Ch, LoadPtr, NULL, 0, false, false, 0);
+ // Move the sign bit to the top bit of the loaded integer.
+ unsigned BitShift = LoadTy.getSizeInBits() -
+ (FloatVT.getSizeInBits() - 8 * ByteOffset);
+ assert(BitShift < LoadTy.getSizeInBits() && "Pointer advanced wrong?");
+ if (BitShift)
+ SignBit = DAG.getNode(ISD::SHL, dl, LoadTy, SignBit,
+ DAG.getConstant(BitShift,TLI.getShiftAmountTy()));
+ }
+ }
+ // Now get the sign bit proper, by seeing whether the value is negative.
+ SignBit = DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()),
+ SignBit, DAG.getConstant(0, SignBit.getValueType()),
+ ISD::SETLT);
// Get the absolute value of the result.
SDValue AbsVal = DAG.getNode(ISD::FABS, dl, Tmp1.getValueType(), Tmp1);
// Select between the nabs and abs value based on the sign bit of
@@ -1643,8 +1694,7 @@ void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
- unsigned StackAlign =
- TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
+ unsigned StackAlign = TM.getFrameInfo()->getStackAlignment();
if (Align > StackAlign)
SP = DAG.getNode(ISD::AND, dl, VT, SP,
DAG.getConstant(-(uint64_t)Align, VT));
@@ -1668,7 +1718,7 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
EVT OpVT = LHS.getValueType();
ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get();
switch (TLI.getCondCodeAction(CCCode, OpVT)) {
- default: llvm_unreachable("Unknown condition code action!");
+ default: assert(0 && "Unknown condition code action!");
case TargetLowering::Legal:
// Nothing to do.
break;
@@ -1676,7 +1726,7 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT,
ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID;
unsigned Opc = 0;
switch (CCCode) {
- default: llvm_unreachable("Don't know how to expand this condition!");
+ default: assert(0 && "Don't know how to expand this condition!");
case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break;
case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break;
case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break;
@@ -1723,8 +1773,8 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
unsigned SlotSize = SlotVT.getSizeInBits();
unsigned DestSize = DestVT.getSizeInBits();
- unsigned DestAlign =
- TLI.getTargetData()->getPrefTypeAlignment(DestVT.getTypeForEVT(*DAG.getContext()));
+ const Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
+ unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType);
// Emit a store to the stack slot. Use a truncstore if the input value is
// later than DestVT.
@@ -1745,7 +1795,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
DestAlign);
assert(SlotSize < DestSize && "Unknown extension!");
- return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, SV, 0, SlotVT,
+ return DAG.getExtLoad(ISD::EXTLOAD, DestVT, dl, Store, FIPtr, SV, 0, SlotVT,
false, false, DestAlign);
}
@@ -1908,6 +1958,44 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
return CallInfo.first;
}
+// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to
+// ExpandLibCall except that the first operand is the in-chain.
+std::pair<SDValue, SDValue>
+SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
+ SDNode *Node,
+ bool isSigned) {
+ assert(!IsLegalizingCall && "Cannot overlap legalization of calls!");
+ SDValue InChain = Node->getOperand(0);
+
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
+ EVT ArgVT = Node->getOperand(i).getValueType();
+ const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Entry.Node = Node->getOperand(i);
+ Entry.Ty = ArgTy;
+ Entry.isSExt = isSigned;
+ Entry.isZExt = !isSigned;
+ Args.push_back(Entry);
+ }
+ SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
+ TLI.getPointerTy());
+
+ // Splice the libcall in wherever FindInputOutputChains tells us to.
+ const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ std::pair<SDValue, SDValue> CallInfo =
+ TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+ 0, TLI.getLibcallCallingConv(LC), false,
+ /*isReturnValueUsed=*/true,
+ Callee, Args, DAG, Node->getDebugLoc());
+
+ // Legalize the call sequence, starting with the chain. This will advance
+ // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
+ // was added by LowerCallTo (guaranteeing proper serialization of calls).
+ LegalizeOp(CallInfo.second);
+ return CallInfo;
+}
+
SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
RTLIB::Libcall Call_F32,
RTLIB::Libcall Call_F64,
@@ -1915,7 +2003,7 @@ SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
RTLIB::Libcall Call_PPCF128) {
RTLIB::Libcall LC;
switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unexpected request for libcall!");
+ default: assert(0 && "Unexpected request for libcall!");
case MVT::f32: LC = Call_F32; break;
case MVT::f64: LC = Call_F64; break;
case MVT::f80: LC = Call_F80; break;
@@ -1932,7 +2020,7 @@ SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned,
RTLIB::Libcall Call_I128) {
RTLIB::Libcall LC;
switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unexpected request for libcall!");
+ default: assert(0 && "Unexpected request for libcall!");
case MVT::i8: LC = Call_I8; break;
case MVT::i16: LC = Call_I16; break;
case MVT::i32: LC = Call_I32; break;
@@ -2008,6 +2096,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
return Result;
}
assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet");
+ // Code below here assumes !isSigned without checking again.
// Implementation of unsigned i64 to f64 following the algorithm in
// __floatundidf in compiler_rt. This implementation has the advantage
@@ -2029,10 +2118,46 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84);
SDValue LoFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, LoOr);
SDValue HiFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, HiOr);
- SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, TwoP84PlusTwoP52);
+ SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt,
+ TwoP84PlusTwoP52);
return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub);
}
+ // Implementation of unsigned i64 to f32. This implementation has the
+ // advantage of performing rounding correctly.
+ // TODO: Generalize this for use with other types.
+ if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) {
+ EVT SHVT = TLI.getShiftAmountTy();
+
+ SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
+ DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64));
+ SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And,
+ DAG.getConstant(UINT64_C(0x800), MVT::i64));
+ SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
+ DAG.getConstant(UINT64_C(0x7ff), MVT::i64));
+ SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
+ And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE);
+ SDValue Sel = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ne, Or, Op0);
+ SDValue Ge = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
+ Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64),
+ ISD::SETUGE);
+ SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0);
+
+ SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
+ DAG.getConstant(32, SHVT));
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh);
+ SDValue Fcvt = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Trunc);
+ SDValue TwoP32 =
+ DAG.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64);
+ SDValue Fmul = DAG.getNode(ISD::FMUL, dl, MVT::f64, TwoP32, Fcvt);
+ SDValue Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sel2);
+ SDValue Fcvt2 = DAG.getNode(ISD::UINT_TO_FP, dl, MVT::f64, Lo);
+ SDValue Fadd = DAG.getNode(ISD::FADD, dl, MVT::f64, Fmul, Fcvt2);
+ return DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Fadd,
+ DAG.getIntPtrConstant(0));
+
+ }
+
SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, dl, DestVT, Op0);
SDValue SignSet = DAG.getSetCC(dl, TLI.getSetCCResultType(Op0.getValueType()),
@@ -2047,7 +2172,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
// offset depending on the data type.
uint64_t FF;
switch (Op0.getValueType().getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unsupported integer type!");
+ default: assert(0 && "Unsupported integer type!");
case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
@@ -2068,7 +2193,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
false, false, Alignment);
else {
FudgeInReg =
- LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT,
+ LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, DestVT, dl,
DAG.getEntryNode(), CPIdx,
PseudoSourceValue::getConstantPool(), 0,
MVT::f32, false, false, Alignment));
@@ -2167,7 +2292,7 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
EVT SHVT = TLI.getShiftAmountTy();
SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
switch (VT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("Unhandled Expand type in BSWAP!");
+ default: assert(0 && "Unhandled Expand type in BSWAP!");
case MVT::i16:
Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
@@ -2212,7 +2337,7 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
DebugLoc dl) {
switch (Opc) {
- default: llvm_unreachable("Cannot expand this yet!");
+ default: assert(0 && "Cannot expand this yet!");
case ISD::CTPOP: {
static const uint64_t mask[6] = {
0x5555555555555555ULL, 0x3333333333333333ULL,
@@ -2278,6 +2403,92 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
}
}
+std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) {
+ unsigned Opc = Node->getOpcode();
+ MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
+ RTLIB::Libcall LC;
+
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unhandled atomic intrinsic Expand!");
+ break;
+ case ISD::ATOMIC_SWAP:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break;
+ }
+ break;
+ case ISD::ATOMIC_CMP_SWAP:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_ADD:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_SUB:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_AND:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_OR:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_XOR:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_NAND:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break;
+ }
+ break;
+ }
+
+ return ExpandChainLibCall(LC, Node, false);
+}
+
void SelectionDAGLegalize::ExpandNode(SDNode *Node,
SmallVectorImpl<SDValue> &Results) {
DebugLoc dl = Node->getDebugLoc();
@@ -2303,10 +2514,48 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::EH_RETURN:
case ISD::EH_LABEL:
case ISD::PREFETCH:
- case ISD::MEMBARRIER:
case ISD::VAEND:
+ case ISD::EH_SJLJ_LONGJMP:
Results.push_back(Node->getOperand(0));
break;
+ case ISD::EH_SJLJ_SETJMP:
+ Results.push_back(DAG.getConstant(0, MVT::i32));
+ Results.push_back(Node->getOperand(0));
+ break;
+ case ISD::MEMBARRIER: {
+ // If the target didn't lower this, lower it to '__sync_synchronize()' call
+ TargetLowering::ArgListTy Args;
+ std::pair<SDValue, SDValue> CallResult =
+ TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
+ false, false, false, false, 0, CallingConv::C, false,
+ /*isReturnValueUsed=*/true,
+ DAG.getExternalSymbol("__sync_synchronize",
+ TLI.getPointerTy()),
+ Args, DAG, dl);
+ Results.push_back(CallResult.second);
+ break;
+ }
+ // By default, atomic intrinsics are marked Legal and lowered. Targets
+ // which don't support them directly, however, may want libcalls, in which
+ // case they mark them Expand, and we get here.
+ // FIXME: Unimplemented for now. Add libcalls.
+ case ISD::ATOMIC_SWAP:
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_AND:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_UMIN:
+ case ISD::ATOMIC_LOAD_UMAX:
+ case ISD::ATOMIC_CMP_SWAP: {
+ std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node);
+ Results.push_back(Tmp.first);
+ Results.push_back(Tmp.second);
+ break;
+ }
case ISD::DYNAMIC_STACKALLOC:
ExpandDYNAMIC_STACKALLOC(Node, Results);
break;
@@ -2318,10 +2567,10 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
EVT VT = Node->getValueType(0);
if (VT.isInteger())
Results.push_back(DAG.getConstant(0, VT));
- else if (VT.isFloatingPoint())
+ else {
+ assert(VT.isFloatingPoint() && "Unknown value type!");
Results.push_back(DAG.getConstantFP(0, VT));
- else
- llvm_unreachable("Unknown value type!");
+ }
break;
}
case ISD::TRAP: {
@@ -2411,15 +2660,31 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
EVT VT = Node->getValueType(0);
Tmp1 = Node->getOperand(0);
Tmp2 = Node->getOperand(1);
- SDValue VAList = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0,
- false, false, 0);
+ unsigned Align = Node->getConstantOperandVal(3);
+
+ SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0,
+ false, false, 0);
+ SDValue VAList = VAListLoad;
+
+ if (Align > TLI.getMinStackArgumentAlignment()) {
+ assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
+
+ VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
+ DAG.getConstant(Align - 1,
+ TLI.getPointerTy()));
+
+ VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList,
+ DAG.getConstant(-Align,
+ TLI.getPointerTy()));
+ }
+
// Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
DAG.getConstant(TLI.getTargetData()->
- getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
+ getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
TLI.getPointerTy()));
// Store the incremented VAList to the legalized pointer
- Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Tmp2, V, 0,
+ Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, V, 0,
false, false, 0);
// Load the actual argument out of the pointer VAList
Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0,
@@ -2442,7 +2707,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::EXTRACT_VECTOR_ELT:
if (Node->getOperand(0).getValueType().getVectorNumElements() == 1)
// This must be an access of the only element. Return it.
- Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, Node->getValueType(0),
+ Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, Node->getValueType(0),
Node->getOperand(0));
else
Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0));
@@ -2469,6 +2734,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
EVT VT = Node->getValueType(0);
EVT EltVT = VT.getVectorElementType();
+ if (getTypeAction(EltVT) == Promote)
+ EltVT = TLI.getTypeToTransformTo(*DAG.getContext(), EltVT);
unsigned NumElems = VT.getVectorNumElements();
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i != NumElems; ++i) {
@@ -2620,6 +2887,12 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
Results.push_back(ExpandFPLibCall(Node, RTLIB::REM_F32, RTLIB::REM_F64,
RTLIB::REM_F80, RTLIB::REM_PPCF128));
break;
+ case ISD::FP16_TO_FP32:
+ Results.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32, Node, false));
+ break;
+ case ISD::FP32_TO_FP16:
+ Results.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16, Node, false));
+ break;
case ISD::ConstantFP: {
ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Node);
// Check to see if this FP immediate is already legal.
@@ -2821,7 +3094,14 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
RHS);
TopHalf = BottomHalf.getValue(1);
- } else if (TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2))) {
+ } else {
+ // FIXME: We should be able to fall back to a libcall with an illegal
+ // type in some cases.
+ // Also, we can fall back to a division in some cases, but that's a big
+ // performance hit in the general case.
+ assert(TLI.isTypeLegal(EVT::getIntegerVT(*DAG.getContext(),
+ VT.getSizeInBits() * 2)) &&
+ "Don't know how to expand this operation yet!");
EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() * 2);
LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
@@ -2830,12 +3110,6 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
DAG.getIntPtrConstant(0));
TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
DAG.getIntPtrConstant(1));
- } else {
- // FIXME: We should be able to fall back to a libcall with an illegal
- // type in some cases.
- // Also, we can fall back to a division in some cases, but that's a big
- // performance hit in the general case.
- llvm_unreachable("Don't know how to expand this operation yet!");
}
if (isSigned) {
Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, TLI.getShiftAmountTy());
@@ -2885,17 +3159,17 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
const TargetData &TD = *TLI.getTargetData();
unsigned EntrySize =
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
-
+
Index = DAG.getNode(ISD::MUL, dl, PTy,
Index, DAG.getConstant(EntrySize, PTy));
SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
- SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr,
+ SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, PTy, dl, Chain, Addr,
PseudoSourceValue::getJumpTable(), 0, MemVT,
false, false, 0);
Addr = LD;
- if (TLI.getTargetMachine().getRelocationModel() == Reloc::PIC_) {
+ if (TM.getRelocationModel() == Reloc::PIC_) {
// For PIC, the sequence is:
// BRIND(load(Jumptable + index) + RelocBase)
// RelocBase can be JumpTable, GOT or some sort of global base.
@@ -3057,11 +3331,10 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
if (OVT.isVector()) {
ExtOp = ISD::BIT_CONVERT;
TruncOp = ISD::BIT_CONVERT;
- } else if (OVT.isInteger()) {
+ } else {
+ assert(OVT.isInteger() && "Cannot promote logic operation");
ExtOp = ISD::ANY_EXTEND;
TruncOp = ISD::TRUNCATE;
- } else {
- llvm_report_error("Cannot promote logic operation");
}
// Promote each of the values to the new type.
Tmp1 = DAG.getNode(ExtOp, dl, NVT, Node->getOperand(0));
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 35a7c7c..650ee5a 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -79,6 +79,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
case ISD::FNEG: R = SoftenFloatRes_FNEG(N); break;
case ISD::FP_EXTEND: R = SoftenFloatRes_FP_EXTEND(N); break;
case ISD::FP_ROUND: R = SoftenFloatRes_FP_ROUND(N); break;
+ case ISD::FP16_TO_FP32:R = SoftenFloatRes_FP16_TO_FP32(N); break;
case ISD::FPOW: R = SoftenFloatRes_FPOW(N); break;
case ISD::FPOWI: R = SoftenFloatRes_FPOWI(N); break;
case ISD::FREM: R = SoftenFloatRes_FREM(N); break;
@@ -108,14 +109,16 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_BIT_CONVERT(SDNode *N) {
SDValue DAGTypeLegalizer::SoftenFloatRes_BUILD_PAIR(SDNode *N) {
// Convert the inputs to integers, and build a new pair out of them.
return DAG.getNode(ISD::BUILD_PAIR, N->getDebugLoc(),
- TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
+ TLI.getTypeToTransformTo(*DAG.getContext(),
+ N->getValueType(0)),
BitConvertToInteger(N->getOperand(0)),
BitConvertToInteger(N->getOperand(1)));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_ConstantFP(ConstantFPSDNode *N) {
return DAG.getConstant(N->getValueAPF().bitcastToAPInt(),
- TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)));
+ TLI.getTypeToTransformTo(*DAG.getContext(),
+ N->getValueType(0)));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N) {
@@ -332,6 +335,15 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) {
return MakeLibCall(LC, NVT, &Op, 1, false, N->getDebugLoc());
}
+// FIXME: Should we just use 'normal' FP_EXTEND / FP_TRUNC instead of special
+// nodes?
+SDValue DAGTypeLegalizer::SoftenFloatRes_FP16_TO_FP32(SDNode *N) {
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ SDValue Op = N->getOperand(0);
+ return MakeLibCall(RTLIB::FPEXT_F16_F32, NVT, &Op, 1, false,
+ N->getDebugLoc());
+}
+
SDValue DAGTypeLegalizer::SoftenFloatRes_FP_ROUND(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue Op = N->getOperand(0);
@@ -441,8 +453,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
SDValue NewL;
if (L->getExtensionType() == ISD::NON_EXTLOAD) {
- NewL = DAG.getLoad(L->getAddressingMode(), dl, L->getExtensionType(),
- NVT, L->getChain(), L->getBasePtr(), L->getOffset(),
+ NewL = DAG.getLoad(L->getAddressingMode(), L->getExtensionType(),
+ NVT, dl, L->getChain(), L->getBasePtr(), L->getOffset(),
L->getSrcValue(), L->getSrcValueOffset(), NVT,
L->isVolatile(), L->isNonTemporal(), L->getAlignment());
// Legalized the chain result - switch anything that used the old chain to
@@ -452,8 +464,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
}
// Do a non-extending load followed by FP_EXTEND.
- NewL = DAG.getLoad(L->getAddressingMode(), dl, ISD::NON_EXTLOAD,
- L->getMemoryVT(), L->getChain(),
+ NewL = DAG.getLoad(L->getAddressingMode(), ISD::NON_EXTLOAD,
+ L->getMemoryVT(), dl, L->getChain(),
L->getBasePtr(), L->getOffset(),
L->getSrcValue(), L->getSrcValueOffset(),
L->getMemoryVT(), L->isVolatile(),
@@ -480,7 +492,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_SELECT_CC(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_UNDEF(SDNode *N) {
- return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)));
+ return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(),
+ N->getValueType(0)));
}
SDValue DAGTypeLegalizer::SoftenFloatRes_VAARG(SDNode *N) {
@@ -491,7 +504,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_VAARG(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
SDValue NewVAARG;
- NewVAARG = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2));
+ NewVAARG = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2),
+ N->getConstantOperandVal(3));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
@@ -522,7 +536,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP(SDNode *N) {
// Sign/zero extend the argument if the libcall takes a larger type.
SDValue Op = DAG.getNode(Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl,
NVT, N->getOperand(0));
- return MakeLibCall(LC, TLI.getTypeToTransformTo(*DAG.getContext(), RVT), &Op, 1, false, dl);
+ return MakeLibCall(LC, TLI.getTypeToTransformTo(*DAG.getContext(), RVT),
+ &Op, 1, false, dl);
}
@@ -548,6 +563,7 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::FP_ROUND: Res = SoftenFloatOp_FP_ROUND(N); break;
case ISD::FP_TO_SINT: Res = SoftenFloatOp_FP_TO_SINT(N); break;
case ISD::FP_TO_UINT: Res = SoftenFloatOp_FP_TO_UINT(N); break;
+ case ISD::FP32_TO_FP16:Res = SoftenFloatOp_FP32_TO_FP16(N); break;
case ISD::SELECT_CC: Res = SoftenFloatOp_SELECT_CC(N); break;
case ISD::SETCC: Res = SoftenFloatOp_SETCC(N); break;
case ISD::STORE: Res = SoftenFloatOp_STORE(N, OpNo); break;
@@ -683,9 +699,10 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
- N->getOperand(4));
+ N->getOperand(4)),
+ 0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_SINT(SDNode *N) {
@@ -704,6 +721,13 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_UINT(SDNode *N) {
return MakeLibCall(LC, RVT, &Op, 1, false, N->getDebugLoc());
}
+SDValue DAGTypeLegalizer::SoftenFloatOp_FP32_TO_FP16(SDNode *N) {
+ EVT RVT = N->getValueType(0);
+ RTLIB::Libcall LC = RTLIB::FPROUND_F32_F16;
+ SDValue Op = GetSoftenedFloat(N->getOperand(0));
+ return MakeLibCall(LC, RVT, &Op, 1, false, N->getDebugLoc());
+}
+
SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) {
SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1);
ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get();
@@ -717,9 +741,10 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
- DAG.getCondCode(CCCode));
+ DAG.getCondCode(CCCode)),
+ 0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) {
@@ -735,8 +760,9 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) {
}
// Otherwise, update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
- DAG.getCondCode(CCCode));
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
+ DAG.getCondCode(CCCode)),
+ 0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) {
@@ -800,6 +826,7 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
case ISD::FABS: ExpandFloatRes_FABS(N, Lo, Hi); break;
case ISD::FADD: ExpandFloatRes_FADD(N, Lo, Hi); break;
case ISD::FCEIL: ExpandFloatRes_FCEIL(N, Lo, Hi); break;
+ case ISD::FCOPYSIGN: ExpandFloatRes_FCOPYSIGN(N, Lo, Hi); break;
case ISD::FCOS: ExpandFloatRes_FCOS(N, Lo, Hi); break;
case ISD::FDIV: ExpandFloatRes_FDIV(N, Lo, Hi); break;
case ISD::FEXP: ExpandFloatRes_FEXP(N, Lo, Hi); break;
@@ -873,6 +900,17 @@ void DAGTypeLegalizer::ExpandFloatRes_FCEIL(SDNode *N,
GetPairElements(Call, Lo, Hi);
}
+void DAGTypeLegalizer::ExpandFloatRes_FCOPYSIGN(SDNode *N,
+ SDValue &Lo, SDValue &Hi) {
+ SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
+ RTLIB::COPYSIGN_F32,
+ RTLIB::COPYSIGN_F64,
+ RTLIB::COPYSIGN_F80,
+ RTLIB::COPYSIGN_PPCF128),
+ N, false);
+ GetPairElements(Call, Lo, Hi);
+}
+
void DAGTypeLegalizer::ExpandFloatRes_FCOS(SDNode *N,
SDValue &Lo, SDValue &Hi) {
SDValue Call = LibCallify(GetFPLibCall(N->getValueType(0),
@@ -1072,7 +1110,7 @@ void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDValue &Lo,
assert(NVT.isByteSized() && "Expanded type not byte sized!");
assert(LD->getMemoryVT().bitsLE(NVT) && "Float type not round?");
- Hi = DAG.getExtLoad(LD->getExtensionType(), dl, NVT, Chain, Ptr,
+ Hi = DAG.getExtLoad(LD->getExtensionType(), NVT, dl, Chain, Ptr,
LD->getSrcValue(), LD->getSrcValueOffset(),
LD->getMemoryVT(), LD->isVolatile(),
LD->isNonTemporal(), LD->getAlignment());
@@ -1260,9 +1298,9 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
- N->getOperand(4));
+ N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) {
@@ -1341,9 +1379,9 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
- DAG.getCondCode(CCCode));
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) {
@@ -1359,8 +1397,8 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) {
}
// Otherwise, update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
- DAG.getCondCode(CCCode));
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) {
@@ -1374,7 +1412,8 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) {
SDValue Chain = ST->getChain();
SDValue Ptr = ST->getBasePtr();
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ST->getValue().getValueType());
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(),
+ ST->getValue().getValueType());
assert(NVT.isByteSized() && "Expanded type not byte sized!");
assert(ST->getMemoryVT().bitsLE(NVT) && "Float type not round?");
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 81f28ad..f8c5890 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -80,6 +80,8 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: Res = PromoteIntRes_FP_TO_XINT(N); break;
+ case ISD::FP32_TO_FP16:Res = PromoteIntRes_FP32_TO_FP16(N); break;
+
case ISD::AND:
case ISD::OR:
case ISD::XOR:
@@ -202,7 +204,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
std::swap(Lo, Hi);
InOp = DAG.getNode(ISD::ANY_EXTEND, dl,
- EVT::getIntegerVT(*DAG.getContext(), NOutVT.getSizeInBits()),
+ EVT::getIntegerVT(*DAG.getContext(),
+ NOutVT.getSizeInBits()),
JoinIntegers(Lo, Hi));
return DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, InOp);
}
@@ -231,8 +234,9 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BUILD_PAIR(SDNode *N) {
// The pair element type may be legal, or may not promote to the same type as
// the result, for example i14 = BUILD_PAIR (i7, i7). Handle all cases.
return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(),
- TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
- JoinIntegers(N->getOperand(0), N->getOperand(1)));
+ TLI.getTypeToTransformTo(*DAG.getContext(),
+ N->getValueType(0)), JoinIntegers(N->getOperand(0),
+ N->getOperand(1)));
}
SDValue DAGTypeLegalizer::PromoteIntRes_Constant(SDNode *N) {
@@ -242,7 +246,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Constant(SDNode *N) {
// Zero extend things like i1, sign extend everything else. It shouldn't
// matter in theory which one we pick, but this tends to give better code?
unsigned Opc = VT.isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
- SDValue Result = DAG.getNode(Opc, dl, TLI.getTypeToTransformTo(*DAG.getContext(), VT),
+ SDValue Result = DAG.getNode(Opc, dl,
+ TLI.getTypeToTransformTo(*DAG.getContext(), VT),
SDValue(N, 0));
assert(isa<ConstantSDNode>(Result) && "Didn't constant fold ext?");
return Result;
@@ -307,8 +312,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
// If we're promoting a UINT to a larger size and the larger FP_TO_UINT is
// not Legal, check to see if we can use FP_TO_SINT instead. (If both UINT
- // and SINT conversions are Custom, there is no way to tell which is preferable.
- // We choose SINT because that's the right thing on PPC.)
+ // and SINT conversions are Custom, there is no way to tell which is
+ // preferable. We choose SINT because that's the right thing on PPC.)
if (N->getOpcode() == ISD::FP_TO_UINT &&
!TLI.isOperationLegal(ISD::FP_TO_UINT, NVT) &&
TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NVT))
@@ -324,6 +329,16 @@ SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) {
NVT, Res, DAG.getValueType(N->getValueType(0)));
}
+SDValue DAGTypeLegalizer::PromoteIntRes_FP32_TO_FP16(SDNode *N) {
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ DebugLoc dl = N->getDebugLoc();
+
+ SDValue Res = DAG.getNode(N->getOpcode(), dl, NVT, N->getOperand(0));
+
+ return DAG.getNode(ISD::AssertZext, dl,
+ NVT, Res, DAG.getValueType(N->getValueType(0)));
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) {
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
DebugLoc dl = N->getDebugLoc();
@@ -356,7 +371,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_LOAD(LoadSDNode *N) {
ISD::LoadExtType ExtType =
ISD::isNON_EXTLoad(N) ? ISD::EXTLOAD : N->getExtensionType();
DebugLoc dl = N->getDebugLoc();
- SDValue Res = DAG.getExtLoad(ExtType, dl, NVT, N->getChain(), N->getBasePtr(),
+ SDValue Res = DAG.getExtLoad(ExtType, NVT, dl, N->getChain(), N->getBasePtr(),
N->getSrcValue(), N->getSrcValueOffset(),
N->getMemoryVT(), N->isVolatile(),
N->isNonTemporal(), N->getAlignment());
@@ -452,7 +467,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) {
SDValue DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N) {
return DAG.getNode(ISD::SHL, N->getDebugLoc(),
- TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
+ TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)),
GetPromotedInteger(N->getOperand(0)), N->getOperand(1));
}
@@ -543,7 +558,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_UDIV(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntRes_UNDEF(SDNode *N) {
- return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)));
+ return DAG.getUNDEF(TLI.getTypeToTransformTo(*DAG.getContext(),
+ N->getValueType(0)));
}
SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
@@ -558,7 +574,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
SmallVector<SDValue, 8> Parts(NumRegs);
for (unsigned i = 0; i < NumRegs; ++i) {
- Parts[i] = DAG.getVAArg(RegVT, dl, Chain, Ptr, N->getOperand(2));
+ Parts[i] = DAG.getVAArg(RegVT, dl, Chain, Ptr, N->getOperand(2),
+ N->getConstantOperandVal(3));
Chain = Parts[i].getValue(1);
}
@@ -634,6 +651,7 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::STORE: Res = PromoteIntOp_STORE(cast<StoreSDNode>(N),
OpNo); break;
case ISD::TRUNCATE: Res = PromoteIntOp_TRUNCATE(N); break;
+ case ISD::FP16_TO_FP32:
case ISD::UINT_TO_FP: Res = PromoteIntOp_UINT_TO_FP(N); break;
case ISD::ZERO_EXTEND: Res = PromoteIntOp_ZERO_EXTEND(N); break;
@@ -710,8 +728,9 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo) {
// The chain (Op#0), CC (#1) and basic block destination (Op#4) are always
// legal types.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
- N->getOperand(1), LHS, RHS, N->getOperand(4));
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
+ N->getOperand(1), LHS, RHS, N->getOperand(4)),
+ 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
@@ -722,8 +741,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
SDValue Cond = PromoteTargetBoolean(N->getOperand(1), SVT);
// The chain (Op#0) and basic block destination (Op#2) are always legal types.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), Cond,
- N->getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Cond,
+ N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) {
@@ -758,7 +777,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {
for (unsigned i = 0; i < NumElts; ++i)
NewOps.push_back(GetPromotedInteger(N->getOperand(i)));
- return DAG.UpdateNodeOperands(SDValue(N, 0), &NewOps[0], NumElts);
+ return SDValue(DAG.UpdateNodeOperands(N, &NewOps[0], NumElts), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_CONVERT_RNDSAT(SDNode *N) {
@@ -783,17 +802,18 @@ SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,
assert(N->getOperand(1).getValueType().getSizeInBits() >=
N->getValueType(0).getVectorElementType().getSizeInBits() &&
"Type of inserted value narrower than vector element type!");
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
GetPromotedInteger(N->getOperand(1)),
- N->getOperand(2));
+ N->getOperand(2)),
+ 0);
}
assert(OpNo == 2 && "Different operand and result vector types?");
// Promote the index.
SDValue Idx = ZExtPromotedInteger(N->getOperand(2));
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
- N->getOperand(1), Idx);
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
+ N->getOperand(1), Idx), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) {
@@ -804,15 +824,14 @@ SDValue DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) {
SDValue Flag = GetPromotedInteger(N->getOperand(i));
NewOps[i] = DAG.getZeroExtendInReg(Flag, dl, MVT::i1);
}
- return DAG.UpdateNodeOperands(SDValue (N, 0), NewOps,
- array_lengthof(NewOps));
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps, array_lengthof(NewOps)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SCALAR_TO_VECTOR(SDNode *N) {
// Integer SCALAR_TO_VECTOR operands are implicitly truncated, so just promote
// the operand in place.
- return DAG.UpdateNodeOperands(SDValue(N, 0),
- GetPromotedInteger(N->getOperand(0)));
+ return SDValue(DAG.UpdateNodeOperands(N,
+ GetPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
@@ -822,8 +841,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
EVT SVT = TLI.getSetCCResultType(N->getOperand(1).getValueType());
SDValue Cond = PromoteTargetBoolean(N->getOperand(0), SVT);
- return DAG.UpdateNodeOperands(SDValue(N, 0), Cond,
- N->getOperand(1), N->getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(N, Cond,
+ N->getOperand(1), N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo) {
@@ -834,8 +853,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo) {
PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(4))->get());
// The CC (#4) and the possible return values (#2 and #3) have legal types.
- return DAG.UpdateNodeOperands(SDValue(N, 0), LHS, RHS, N->getOperand(2),
- N->getOperand(3), N->getOperand(4));
+ return SDValue(DAG.UpdateNodeOperands(N, LHS, RHS, N->getOperand(2),
+ N->getOperand(3), N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SETCC(SDNode *N, unsigned OpNo) {
@@ -846,12 +865,12 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SETCC(SDNode *N, unsigned OpNo) {
PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(2))->get());
// The CC (#2) is always legal.
- return DAG.UpdateNodeOperands(SDValue(N, 0), LHS, RHS, N->getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(N, LHS, RHS, N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_Shift(SDNode *N) {
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
- ZExtPromotedInteger(N->getOperand(1)));
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
+ ZExtPromotedInteger(N->getOperand(1))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SIGN_EXTEND(SDNode *N) {
@@ -863,8 +882,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SIGN_EXTEND(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntOp_SINT_TO_FP(SDNode *N) {
- return DAG.UpdateNodeOperands(SDValue(N, 0),
- SExtPromotedInteger(N->getOperand(0)));
+ return SDValue(DAG.UpdateNodeOperands(N,
+ SExtPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){
@@ -890,8 +909,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_TRUNCATE(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntOp_UINT_TO_FP(SDNode *N) {
- return DAG.UpdateNodeOperands(SDValue(N, 0),
- ZExtPromotedInteger(N->getOperand(0)));
+ return SDValue(DAG.UpdateNodeOperands(N,
+ ZExtPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) {
@@ -975,6 +994,11 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SHL:
case ISD::SRA:
case ISD::SRL: ExpandIntRes_Shift(N, Lo, Hi); break;
+
+ case ISD::SADDO:
+ case ISD::SSUBO: ExpandIntRes_SADDSUBO(N, Lo, Hi); break;
+ case ISD::UADDO:
+ case ISD::USUBO: ExpandIntRes_UADDSUBO(N, Lo, Hi); break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
@@ -1008,7 +1032,7 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
Hi = InL;
} else if (Amt == 1 &&
TLI.isOperationLegalOrCustom(ISD::ADDC,
- TLI.getTypeToExpandTo(*DAG.getContext(), NVT))) {
+ TLI.getTypeToExpandTo(*DAG.getContext(), NVT))) {
// Emit this X << 1 as X+X.
SDVTList VTList = DAG.getVTList(NVT, MVT::Flag);
SDValue LoOps[2] = { InL, InL };
@@ -1370,7 +1394,8 @@ void DAGTypeLegalizer::ExpandIntRes_AssertSext(SDNode *N,
if (NVTBits < EVTBits) {
Hi = DAG.getNode(ISD::AssertSext, dl, NVT, Hi,
- DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), EVTBits - NVTBits)));
+ DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
+ EVTBits - NVTBits)));
} else {
Lo = DAG.getNode(ISD::AssertSext, dl, NVT, Lo, DAG.getValueType(EVT));
// The high part replicates the sign bit of Lo, make it explicit.
@@ -1390,7 +1415,8 @@ void DAGTypeLegalizer::ExpandIntRes_AssertZext(SDNode *N,
if (NVTBits < EVTBits) {
Hi = DAG.getNode(ISD::AssertZext, dl, NVT, Hi,
- DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), EVTBits - NVTBits)));
+ DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
+ EVTBits - NVTBits)));
} else {
Lo = DAG.getNode(ISD::AssertZext, dl, NVT, Lo, DAG.getValueType(EVT));
// The high part must be zero, make it explicit.
@@ -1509,7 +1535,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
if (N->getMemoryVT().bitsLE(NVT)) {
EVT MemVT = N->getMemoryVT();
- Lo = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
+ Lo = DAG.getExtLoad(ExtType, NVT, dl, Ch, Ptr, N->getSrcValue(), SVOffset,
MemVT, isVolatile, isNonTemporal, Alignment);
// Remember the chain.
@@ -1542,7 +1568,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
unsigned IncrementSize = NVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getSrcValue(),
+ Hi = DAG.getExtLoad(ExtType, NVT, dl, Ch, Ptr, N->getSrcValue(),
SVOffset+IncrementSize, NEVT,
isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
@@ -1560,7 +1586,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
unsigned ExcessBits = (EBytes - IncrementSize)*8;
// Load both the high bits and maybe some of the low bits.
- Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
+ Hi = DAG.getExtLoad(ExtType, NVT, dl, Ch, Ptr, N->getSrcValue(), SVOffset,
EVT::getIntegerVT(*DAG.getContext(),
MemVT.getSizeInBits() - ExcessBits),
isVolatile, isNonTemporal, Alignment);
@@ -1569,7 +1595,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
// Load the rest of the low bits.
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, NVT, Ch, Ptr, N->getSrcValue(),
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, NVT, dl, Ch, Ptr, N->getSrcValue(),
SVOffset+IncrementSize,
EVT::getIntegerVT(*DAG.getContext(), ExcessBits),
isVolatile, isNonTemporal,
@@ -1699,6 +1725,48 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N,
SplitInteger(MakeLibCall(LC, VT, Ops, 2, true/*irrelevant*/, dl), Lo, Hi);
}
+void DAGTypeLegalizer::ExpandIntRes_SADDSUBO(SDNode *Node,
+ SDValue &Lo, SDValue &Hi) {
+ SDValue LHS = Node->getOperand(0);
+ SDValue RHS = Node->getOperand(1);
+ DebugLoc dl = Node->getDebugLoc();
+
+ // Expand the result by simply replacing it with the equivalent
+ // non-overflow-checking operation.
+ SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
+ ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
+ LHS, RHS);
+ SplitInteger(Sum, Lo, Hi);
+
+ // Compute the overflow.
+ //
+ // LHSSign -> LHS >= 0
+ // RHSSign -> RHS >= 0
+ // SumSign -> Sum >= 0
+ //
+ // Add:
+ // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
+ // Sub:
+ // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
+ //
+ EVT OType = Node->getValueType(1);
+ SDValue Zero = DAG.getConstant(0, LHS.getValueType());
+
+ SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE);
+ SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE);
+ SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign,
+ Node->getOpcode() == ISD::SADDO ?
+ ISD::SETEQ : ISD::SETNE);
+
+ SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE);
+ SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE);
+
+ SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE);
+
+ // Use the calculated overflow everywhere.
+ ReplaceValueWith(SDValue(Node, 1), Cmp);
+}
+
void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
@@ -1833,7 +1901,8 @@ void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N,
unsigned ExcessBits =
Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
- DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), ExcessBits)));
+ DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
+ ExcessBits)));
}
}
@@ -1859,7 +1928,8 @@ ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo, SDValue &Hi) {
unsigned ExcessBits =
EVT.getSizeInBits() - Lo.getValueType().getSizeInBits();
Hi = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, Hi.getValueType(), Hi,
- DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), ExcessBits)));
+ DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(),
+ ExcessBits)));
}
}
@@ -1894,6 +1964,29 @@ void DAGTypeLegalizer::ExpandIntRes_TRUNCATE(SDNode *N,
Hi = DAG.getNode(ISD::TRUNCATE, dl, NVT, Hi);
}
+void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N,
+ SDValue &Lo, SDValue &Hi) {
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ DebugLoc dl = N->getDebugLoc();
+
+ // Expand the result by simply replacing it with the equivalent
+ // non-overflow-checking operation.
+ SDValue Sum = DAG.getNode(N->getOpcode() == ISD::UADDO ?
+ ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
+ LHS, RHS);
+ SplitInteger(Sum, Lo, Hi);
+
+ // Calculate the overflow: addition overflows iff a + b < a, and subtraction
+ // overflows iff a - b > a.
+ SDValue Ofl = DAG.getSetCC(dl, N->getValueType(1), Sum, LHS,
+ N->getOpcode () == ISD::UADDO ?
+ ISD::SETULT : ISD::SETUGT);
+
+ // Use the calculated overflow everywhere.
+ ReplaceValueWith(SDValue(N, 1), Ofl);
+}
+
void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
@@ -1955,7 +2048,9 @@ void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N,
SplitInteger(Res, Lo, Hi);
unsigned ExcessBits =
Op.getValueType().getSizeInBits() - NVT.getSizeInBits();
- Hi = DAG.getZeroExtendInReg(Hi, dl, EVT::getIntegerVT(*DAG.getContext(), ExcessBits));
+ Hi = DAG.getZeroExtendInReg(Hi, dl,
+ EVT::getIntegerVT(*DAG.getContext(),
+ ExcessBits));
}
}
@@ -2135,9 +2230,9 @@ SDValue DAGTypeLegalizer::ExpandIntOp_BR_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
- N->getOperand(4));
+ N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) {
@@ -2153,9 +2248,9 @@ SDValue DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
- DAG.getCondCode(CCCode));
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) {
@@ -2171,8 +2266,8 @@ SDValue DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) {
}
// Otherwise, update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
- DAG.getCondCode(CCCode));
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_Shift(SDNode *N) {
@@ -2181,7 +2276,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_Shift(SDNode *N) {
// upper half of the shift amount is zero. Just use the lower half.
SDValue Lo, Hi;
GetExpandedInteger(N->getOperand(1), Lo, Hi);
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), Lo);
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Lo), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_RETURNADDR(SDNode *N) {
@@ -2190,7 +2285,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_RETURNADDR(SDNode *N) {
// constant to valid type.
SDValue Lo, Hi;
GetExpandedInteger(N->getOperand(0), Lo, Hi);
- return DAG.UpdateNodeOperands(SDValue(N, 0), Lo);
+ return SDValue(DAG.UpdateNodeOperands(N, Lo), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDNode *N) {
@@ -2256,7 +2351,8 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
unsigned EBytes = ExtVT.getStoreSize();
unsigned IncrementSize = NVT.getSizeInBits()/8;
unsigned ExcessBits = (EBytes - IncrementSize)*8;
- EVT HiVT = EVT::getIntegerVT(*DAG.getContext(), ExtVT.getSizeInBits() - ExcessBits);
+ EVT HiVT = EVT::getIntegerVT(*DAG.getContext(),
+ ExtVT.getSizeInBits() - ExcessBits);
if (ExcessBits < NVT.getSizeInBits()) {
// Transfer high bits from the top of Lo to the bottom of Hi.
@@ -2294,13 +2390,29 @@ SDValue DAGTypeLegalizer::ExpandIntOp_TRUNCATE(SDNode *N) {
return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), N->getValueType(0), InL);
}
+static const fltSemantics *EVTToAPFloatSemantics(EVT VT) {
+ switch (VT.getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unknown FP format");
+ case MVT::f32: return &APFloat::IEEEsingle;
+ case MVT::f64: return &APFloat::IEEEdouble;
+ case MVT::f80: return &APFloat::x87DoubleExtended;
+ case MVT::f128: return &APFloat::IEEEquad;
+ case MVT::ppcf128: return &APFloat::PPCDoubleDouble;
+ }
+}
+
SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
SDValue Op = N->getOperand(0);
EVT SrcVT = Op.getValueType();
EVT DstVT = N->getValueType(0);
DebugLoc dl = N->getDebugLoc();
- if (TLI.getOperationAction(ISD::SINT_TO_FP, SrcVT) == TargetLowering::Custom){
+ // The following optimization is valid only if every value in SrcVT (when
+ // treated as signed) is representable in DstVT. Check that the mantissa
+ // size of DstVT is >= than the number of bits in SrcVT -1.
+ const fltSemantics *sem = EVTToAPFloatSemantics(DstVT);
+ if (APFloat::semanticsPrecision(*sem) >= SrcVT.getSizeInBits()-1 &&
+ TLI.getOperationAction(ISD::SINT_TO_FP, SrcVT) == TargetLowering::Custom){
// Do a signed conversion then adjust the result.
SDValue SignedConv = DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Op);
SignedConv = TLI.LowerOperation(SignedConv, DAG);
@@ -2348,7 +2460,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
// Load the value out, extending it from f32 to the destination float type.
// FIXME: Avoid the extend by constructing the right constant pool?
- SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, DstVT, DAG.getEntryNode(),
+ SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, DstVT, dl, DAG.getEntryNode(),
FudgePtr, NULL, 0, MVT::f32,
false, false, Alignment);
return DAG.getNode(ISD::FADD, dl, DstVT, SignedConv, Fudge);
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index f3e7ca4..6e56c98 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -485,15 +485,14 @@ SDNode *DAGTypeLegalizer::AnalyzeNewNode(SDNode *N) {
NewOps.push_back(Op);
} else if (Op != OrigOp) {
// This is the first operand to change - add all operands so far.
- NewOps.insert(NewOps.end(), N->op_begin(), N->op_begin() + i);
+ NewOps.append(N->op_begin(), N->op_begin() + i);
NewOps.push_back(Op);
}
}
// Some operands changed - update the node.
if (!NewOps.empty()) {
- SDNode *M = DAG.UpdateNodeOperands(SDValue(N, 0), &NewOps[0],
- NewOps.size()).getNode();
+ SDNode *M = DAG.UpdateNodeOperands(N, &NewOps[0], NewOps.size());
if (M != N) {
// The node morphed into a different node. Normally for this to happen
// the original node would have to be marked NewNode. However this can
@@ -684,44 +683,50 @@ void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) {
// can potentially cause recursive merging.
SmallSetVector<SDNode*, 16> NodesToAnalyze;
NodeUpdateListener NUL(*this, NodesToAnalyze);
- DAG.ReplaceAllUsesOfValueWith(From, To, &NUL);
-
- // The old node may still be present in a map like ExpandedIntegers or
- // PromotedIntegers. Inform maps about the replacement.
- ReplacedValues[From] = To;
-
- // Process the list of nodes that need to be reanalyzed.
- while (!NodesToAnalyze.empty()) {
- SDNode *N = NodesToAnalyze.back();
- NodesToAnalyze.pop_back();
- if (N->getNodeId() != DAGTypeLegalizer::NewNode)
- // The node was analyzed while reanalyzing an earlier node - it is safe to
- // skip. Note that this is not a morphing node - otherwise it would still
- // be marked NewNode.
- continue;
+ do {
+ DAG.ReplaceAllUsesOfValueWith(From, To, &NUL);
+
+ // The old node may still be present in a map like ExpandedIntegers or
+ // PromotedIntegers. Inform maps about the replacement.
+ ReplacedValues[From] = To;
+
+ // Process the list of nodes that need to be reanalyzed.
+ while (!NodesToAnalyze.empty()) {
+ SDNode *N = NodesToAnalyze.back();
+ NodesToAnalyze.pop_back();
+ if (N->getNodeId() != DAGTypeLegalizer::NewNode)
+ // The node was analyzed while reanalyzing an earlier node - it is safe
+ // to skip. Note that this is not a morphing node - otherwise it would
+ // still be marked NewNode.
+ continue;
- // Analyze the node's operands and recalculate the node ID.
- SDNode *M = AnalyzeNewNode(N);
- if (M != N) {
- // The node morphed into a different node. Make everyone use the new node
- // instead.
- assert(M->getNodeId() != NewNode && "Analysis resulted in NewNode!");
- assert(N->getNumValues() == M->getNumValues() &&
- "Node morphing changed the number of results!");
- for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
- SDValue OldVal(N, i);
- SDValue NewVal(M, i);
- if (M->getNodeId() == Processed)
- RemapValue(NewVal);
- DAG.ReplaceAllUsesOfValueWith(OldVal, NewVal, &NUL);
+ // Analyze the node's operands and recalculate the node ID.
+ SDNode *M = AnalyzeNewNode(N);
+ if (M != N) {
+ // The node morphed into a different node. Make everyone use the new
+ // node instead.
+ assert(M->getNodeId() != NewNode && "Analysis resulted in NewNode!");
+ assert(N->getNumValues() == M->getNumValues() &&
+ "Node morphing changed the number of results!");
+ for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
+ SDValue OldVal(N, i);
+ SDValue NewVal(M, i);
+ if (M->getNodeId() == Processed)
+ RemapValue(NewVal);
+ DAG.ReplaceAllUsesOfValueWith(OldVal, NewVal, &NUL);
+ }
+ // The original node continues to exist in the DAG, marked NewNode.
}
- // The original node continues to exist in the DAG, marked NewNode.
}
- }
+ // When recursively update nodes with new nodes, it is possible to have
+ // new uses of From due to CSE. If this happens, replace the new uses of
+ // From with To.
+ } while (!From.use_empty());
}
void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) {
- assert(Result.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ assert(Result.getValueType() ==
+ TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
"Invalid type for promoted integer");
AnalyzeNewValue(Result);
@@ -731,7 +736,8 @@ void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) {
}
void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) {
- assert(Result.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ assert(Result.getValueType() ==
+ TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
"Invalid type for softened float");
AnalyzeNewValue(Result);
@@ -762,7 +768,8 @@ void DAGTypeLegalizer::GetExpandedInteger(SDValue Op, SDValue &Lo,
void DAGTypeLegalizer::SetExpandedInteger(SDValue Op, SDValue Lo,
SDValue Hi) {
- assert(Lo.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ assert(Lo.getValueType() ==
+ TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
Hi.getValueType() == Lo.getValueType() &&
"Invalid type for expanded integer");
// Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
@@ -788,7 +795,8 @@ void DAGTypeLegalizer::GetExpandedFloat(SDValue Op, SDValue &Lo,
void DAGTypeLegalizer::SetExpandedFloat(SDValue Op, SDValue Lo,
SDValue Hi) {
- assert(Lo.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ assert(Lo.getValueType() ==
+ TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
Hi.getValueType() == Lo.getValueType() &&
"Invalid type for expanded float");
// Lo/Hi may have been newly allocated, if so, add nodeid's as relevant.
@@ -832,7 +840,8 @@ void DAGTypeLegalizer::SetSplitVector(SDValue Op, SDValue Lo,
}
void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {
- assert(Result.getValueType() == TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
+ assert(Result.getValueType() ==
+ TLI.getTypeToTransformTo(*DAG.getContext(), Op.getValueType()) &&
"Invalid type for widened vector");
AnalyzeNewValue(Result);
@@ -940,7 +949,8 @@ void DAGTypeLegalizer::GetSplitDestVTs(EVT InVT, EVT &LoVT, EVT &HiVT) {
} else {
unsigned NumElements = InVT.getVectorNumElements();
assert(!(NumElements & 1) && "Splitting vector, but not in half!");
- LoVT = HiVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(), NumElements/2);
+ LoVT = HiVT = EVT::getVectorVT(*DAG.getContext(),
+ InVT.getVectorElementType(), NumElements/2);
}
}
@@ -980,7 +990,8 @@ SDValue DAGTypeLegalizer::JoinIntegers(SDValue Lo, SDValue Hi) {
DebugLoc dlLo = Lo.getDebugLoc();
EVT LVT = Lo.getValueType();
EVT HVT = Hi.getValueType();
- EVT NVT = EVT::getIntegerVT(*DAG.getContext(), LVT.getSizeInBits() + HVT.getSizeInBits());
+ EVT NVT = EVT::getIntegerVT(*DAG.getContext(),
+ LVT.getSizeInBits() + HVT.getSizeInBits());
Lo = DAG.getNode(ISD::ZERO_EXTEND, dlLo, NVT, Lo);
Hi = DAG.getNode(ISD::ANY_EXTEND, dlHi, NVT, Hi);
@@ -1082,7 +1093,8 @@ void DAGTypeLegalizer::SplitInteger(SDValue Op,
/// type half the size of Op's.
void DAGTypeLegalizer::SplitInteger(SDValue Op,
SDValue &Lo, SDValue &Hi) {
- EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Op.getValueType().getSizeInBits()/2);
+ EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(),
+ Op.getValueType().getSizeInBits()/2);
SplitInteger(Op, HalfVT, HalfVT, Lo, Hi);
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index b0af357..d560292 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -32,8 +32,8 @@ namespace llvm {
/// involves promoting small sizes to large sizes or splitting up large values
/// into small values.
///
-class VISIBILITY_HIDDEN DAGTypeLegalizer {
- TargetLowering &TLI;
+class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
+ const TargetLowering &TLI;
SelectionDAG &DAG;
public:
// NodeIdFlags - This pass uses the NodeId on the SDNodes to hold information
@@ -75,7 +75,7 @@ private:
/// getTypeAction - Return how we should legalize values of this type.
LegalizeAction getTypeAction(EVT VT) const {
- switch (ValueTypeActions.getTypeAction(*DAG.getContext(), VT)) {
+ switch (ValueTypeActions.getTypeAction(VT)) {
default:
assert(false && "Unknown legalize action!");
case TargetLowering::Legal:
@@ -86,8 +86,7 @@ private:
// 2) For vectors, use a wider vector type (e.g. v3i32 -> v4i32).
if (!VT.isVector())
return PromoteInteger;
- else
- return WidenVector;
+ return WidenVector;
case TargetLowering::Expand:
// Expand can mean
// 1) split scalar in half, 2) convert a float to an integer,
@@ -95,23 +94,21 @@ private:
if (!VT.isVector()) {
if (VT.isInteger())
return ExpandInteger;
- else if (VT.getSizeInBits() ==
- TLI.getTypeToTransformTo(*DAG.getContext(), VT).getSizeInBits())
+ if (VT.getSizeInBits() ==
+ TLI.getTypeToTransformTo(*DAG.getContext(), VT).getSizeInBits())
return SoftenFloat;
- else
- return ExpandFloat;
- } else if (VT.getVectorNumElements() == 1) {
- return ScalarizeVector;
- } else {
- return SplitVector;
+ return ExpandFloat;
}
+
+ if (VT.getVectorNumElements() == 1)
+ return ScalarizeVector;
+ return SplitVector;
}
}
/// isTypeLegal - Return true if this type is legal on this target.
bool isTypeLegal(EVT VT) const {
- return (ValueTypeActions.getTypeAction(*DAG.getContext(), VT) ==
- TargetLowering::Legal);
+ return ValueTypeActions.getTypeAction(VT) == TargetLowering::Legal;
}
/// IgnoreNodeResults - Pretend all of this node's results are legal.
@@ -257,6 +254,7 @@ private:
SDValue PromoteIntRes_CTTZ(SDNode *N);
SDValue PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue PromoteIntRes_FP_TO_XINT(SDNode *N);
+ SDValue PromoteIntRes_FP32_TO_FP16(SDNode *N);
SDValue PromoteIntRes_INT_EXTEND(SDNode *N);
SDValue PromoteIntRes_LOAD(LoadSDNode *N);
SDValue PromoteIntRes_Overflow(SDNode *N);
@@ -344,6 +342,9 @@ private:
void ExpandIntRes_UREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_Shift (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_SADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_UADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
+
void ExpandShiftByConstant(SDNode *N, unsigned Amt,
SDValue &Lo, SDValue &Hi);
bool ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -406,6 +407,7 @@ private:
SDValue SoftenFloatRes_FNEARBYINT(SDNode *N);
SDValue SoftenFloatRes_FNEG(SDNode *N);
SDValue SoftenFloatRes_FP_EXTEND(SDNode *N);
+ SDValue SoftenFloatRes_FP16_TO_FP32(SDNode *N);
SDValue SoftenFloatRes_FP_ROUND(SDNode *N);
SDValue SoftenFloatRes_FPOW(SDNode *N);
SDValue SoftenFloatRes_FPOWI(SDNode *N);
@@ -429,6 +431,7 @@ private:
SDValue SoftenFloatOp_FP_ROUND(SDNode *N);
SDValue SoftenFloatOp_FP_TO_SINT(SDNode *N);
SDValue SoftenFloatOp_FP_TO_UINT(SDNode *N);
+ SDValue SoftenFloatOp_FP32_TO_FP16(SDNode *N);
SDValue SoftenFloatOp_SELECT_CC(SDNode *N);
SDValue SoftenFloatOp_SETCC(SDNode *N);
SDValue SoftenFloatOp_STORE(SDNode *N, unsigned OpNo);
@@ -455,6 +458,7 @@ private:
void ExpandFloatRes_FABS (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FADD (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FCEIL (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandFloatRes_FCOPYSIGN (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FCOS (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FDIV (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandFloatRes_FEXP (SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -577,6 +581,7 @@ private:
SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo);
+ SDValue SplitVecOp_CONCAT_VECTORS(SDNode *N);
//===--------------------------------------------------------------------===//
// Vector Widening Support: LegalizeVectorTypes.cpp
@@ -616,6 +621,7 @@ private:
SDValue WidenVecRes_Binary(SDNode *N);
SDValue WidenVecRes_Convert(SDNode *N);
+ SDValue WidenVecRes_POWI(SDNode *N);
SDValue WidenVecRes_Shift(SDNode *N);
SDValue WidenVecRes_Unary(SDNode *N);
SDValue WidenVecRes_InregOp(SDNode *N);
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 5e83b4b..9c2b1d9 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -173,8 +173,9 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT);
SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl,
- EVT::getVectorVT(*DAG.getContext(), NewVT, 2*OldElts),
- OldVec);
+ EVT::getVectorVT(*DAG.getContext(),
+ NewVT, 2*OldElts),
+ OldVec);
// Extract the elements at 2 * Idx and 2 * Idx + 1 from the new vector.
SDValue Idx = N->getOperand(1);
@@ -237,13 +238,15 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
}
void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ EVT OVT = N->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
SDValue Chain = N->getOperand(0);
SDValue Ptr = N->getOperand(1);
DebugLoc dl = N->getDebugLoc();
+ const unsigned Align = N->getConstantOperandVal(3);
- Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2));
- Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, N->getOperand(2));
+ Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2), Align);
+ Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, N->getOperand(2), 0);
// Handle endianness of the load.
if (TLI.isBigEndian())
@@ -268,7 +271,9 @@ SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
// is no point, and it might create expansion loops). For example, on
// x86 this turns v1i64 = BIT_CONVERT i64 into v1i64 = BIT_CONVERT v2i32.
EVT OVT = N->getOperand(0).getValueType();
- EVT NVT = EVT::getVectorVT(*DAG.getContext(), TLI.getTypeToTransformTo(*DAG.getContext(), OVT), 2);
+ EVT NVT = EVT::getVectorVT(*DAG.getContext(),
+ TLI.getTypeToTransformTo(*DAG.getContext(), OVT),
+ 2);
if (isTypeLegal(NVT)) {
SDValue Parts[2];
@@ -312,8 +317,9 @@ SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
}
SDValue NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
- EVT::getVectorVT(*DAG.getContext(), NewVT, NewElts.size()),
- &NewElts[0], NewElts.size());
+ EVT::getVectorVT(*DAG.getContext(),
+ NewVT, NewElts.size()),
+ &NewElts[0], NewElts.size());
// Convert the new vector to the old vector type.
return DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, NewVec);
@@ -380,7 +386,8 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
DebugLoc dl = N->getDebugLoc();
StoreSDNode *St = cast<StoreSDNode>(N);
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), St->getValue().getValueType());
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(),
+ St->getValue().getValueType());
SDValue Chain = St->getChain();
SDValue Ptr = St->getBasePtr();
int SVOffset = St->getSrcValueOffset();
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index b5f84c0..621c087 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -34,7 +34,7 @@ using namespace llvm;
namespace {
class VectorLegalizer {
SelectionDAG& DAG;
- TargetLowering& TLI;
+ const TargetLowering &TLI;
bool Changed; // Keep track of whether anything changed
/// LegalizedNodes - For nodes that are of legal width, and that have more
@@ -116,7 +116,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
Ops.push_back(LegalizeOp(Node->getOperand(i)));
SDValue Result =
- DAG.UpdateNodeOperands(Op.getValue(0), Ops.data(), Ops.size());
+ SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops.data(), Ops.size()), 0);
bool HasVectorValue = false;
for (SDNode::value_iterator J = Node->value_begin(), E = Node->value_end();
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 8363c3a..93bc2d0 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -165,9 +165,10 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) {
SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) {
assert(N->isUnindexed() && "Indexed vector load?");
- SDValue Result = DAG.getLoad(ISD::UNINDEXED, N->getDebugLoc(),
+ SDValue Result = DAG.getLoad(ISD::UNINDEXED,
N->getExtensionType(),
N->getValueType(0).getVectorElementType(),
+ N->getDebugLoc(),
N->getChain(), N->getBasePtr(),
DAG.getUNDEF(N->getBasePtr().getValueType()),
N->getSrcValue(), N->getSrcValueOffset(),
@@ -448,6 +449,11 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
+ case ISD::FEXP:
+ case ISD::FEXP2:
+ case ISD::FLOG:
+ case ISD::FLOG2:
+ case ISD::FLOG10:
SplitVecRes_UnaryOp(N, Lo, Hi);
break;
@@ -705,8 +711,9 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// Store the new element. This may be larger than the vector element type,
// so use a truncating store.
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
+ const Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
unsigned Alignment =
- TLI.getTargetData()->getPrefTypeAlignment(VecVT.getTypeForEVT(*DAG.getContext()));
+ TLI.getTargetData()->getPrefTypeAlignment(VecType);
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, NULL, 0, EltVT,
false, false, 0);
@@ -754,14 +761,14 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
EVT LoMemVT, HiMemVT;
GetSplitDestVTs(MemoryVT, LoMemVT, HiMemVT);
- Lo = DAG.getLoad(ISD::UNINDEXED, dl, ExtType, LoVT, Ch, Ptr, Offset,
+ Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset,
SV, SVOffset, LoMemVT, isVolatile, isNonTemporal, Alignment);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
SVOffset += IncrementSize;
- Hi = DAG.getLoad(ISD::UNINDEXED, dl, ExtType, HiVT, Ch, Ptr, Offset,
+ Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset,
SV, SVOffset, HiMemVT, isVolatile, isNonTemporal, Alignment);
// Build a factor node to remember that this load is independent of the
@@ -976,6 +983,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::BIT_CONVERT: Res = SplitVecOp_BIT_CONVERT(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
+ case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break;
case ISD::STORE:
Res = SplitVecOp_STORE(cast<StoreSDNode>(N), OpNo);
break;
@@ -1081,10 +1089,10 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
uint64_t LoElts = Lo.getValueType().getVectorNumElements();
if (IdxVal < LoElts)
- return DAG.UpdateNodeOperands(SDValue(N, 0), Lo, Idx);
- return DAG.UpdateNodeOperands(SDValue(N, 0), Hi,
+ return SDValue(DAG.UpdateNodeOperands(N, Lo, Idx), 0);
+ return SDValue(DAG.UpdateNodeOperands(N, Hi,
DAG.getConstant(IdxVal - LoElts,
- Idx.getValueType()));
+ Idx.getValueType())), 0);
}
// Store the vector to the stack.
@@ -1098,14 +1106,14 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
// Load back the required element.
StackPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
- return DAG.getExtLoad(ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr,
+ return DAG.getExtLoad(ISD::EXTLOAD, N->getValueType(0), dl, Store, StackPtr,
SV, 0, EltVT, false, false, 0);
}
SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
assert(N->isUnindexed() && "Indexed store of vector?");
assert(OpNo == 1 && "Can only split the stored value");
- DebugLoc dl = N->getDebugLoc();
+ DebugLoc DL = N->getDebugLoc();
bool isTruncating = N->isTruncatingStore();
SDValue Ch = N->getChain();
@@ -1124,25 +1132,49 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
if (isTruncating)
- Lo = DAG.getTruncStore(Ch, dl, Lo, Ptr, N->getSrcValue(), SVOffset,
+ Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getSrcValue(), SVOffset,
LoMemVT, isVol, isNT, Alignment);
else
- Lo = DAG.getStore(Ch, dl, Lo, Ptr, N->getSrcValue(), SVOffset,
+ Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getSrcValue(), SVOffset,
isVol, isNT, Alignment);
// Increment the pointer to the other half.
- Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
+ Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
SVOffset += IncrementSize;
if (isTruncating)
- Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr, N->getSrcValue(), SVOffset,
+ Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr, N->getSrcValue(), SVOffset,
HiMemVT, isVol, isNT, Alignment);
else
- Hi = DAG.getStore(Ch, dl, Hi, Ptr, N->getSrcValue(), SVOffset,
+ Hi = DAG.getStore(Ch, DL, Hi, Ptr, N->getSrcValue(), SVOffset,
isVol, isNT, Alignment);
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
+}
+
+SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
+ DebugLoc DL = N->getDebugLoc();
+
+ // The input operands all must have the same type, and we know the result the
+ // result type is valid. Convert this to a buildvector which extracts all the
+ // input elements.
+ // TODO: If the input elements are power-two vectors, we could convert this to
+ // a new CONCAT_VECTORS node with elements that are half-wide.
+ SmallVector<SDValue, 32> Elts;
+ EVT EltVT = N->getValueType(0).getVectorElementType();
+ for (unsigned op = 0, e = N->getNumOperands(); op != e; ++op) {
+ SDValue Op = N->getOperand(op);
+ for (unsigned i = 0, e = Op.getValueType().getVectorNumElements();
+ i != e; ++i) {
+ Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
+ Op, DAG.getIntPtrConstant(i)));
+
+ }
+ }
+
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0),
+ &Elts[0], Elts.size());
}
@@ -1198,7 +1230,6 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FDIV:
case ISD::FMUL:
case ISD::FPOW:
- case ISD::FPOWI:
case ISD::FREM:
case ISD::FSUB:
case ISD::MUL:
@@ -1214,6 +1245,10 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
Res = WidenVecRes_Binary(N);
break;
+ case ISD::FPOWI:
+ Res = WidenVecRes_POWI(N);
+ break;
+
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
@@ -1240,6 +1275,11 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FNEG:
case ISD::FSIN:
case ISD::FSQRT:
+ case ISD::FEXP:
+ case ISD::FEXP2:
+ case ISD::FLOG:
+ case ISD::FLOG2:
+ case ISD::FLOG10:
Res = WidenVecRes_Unary(N);
break;
}
@@ -1257,9 +1297,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
EVT WidenEltVT = WidenVT.getVectorElementType();
EVT VT = WidenVT;
unsigned NumElts = VT.getVectorNumElements();
- while (!TLI.isTypeLegal(VT) && NumElts != 1) {
- NumElts = NumElts / 2;
- VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
+ while (!TLI.isTypeSynthesizable(VT) && NumElts != 1) {
+ NumElts = NumElts / 2;
+ VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
}
if (NumElts != 1 && !TLI.canOpTrap(N->getOpcode(), VT)) {
@@ -1267,80 +1307,123 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2);
- } else if (NumElts == 1) {
- // No legal vector version so unroll the vector operation and then widen.
+ }
+
+ // No legal vector version so unroll the vector operation and then widen.
+ if (NumElts == 1)
return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
- } else {
- // Since the operation can trap, apply operation on the original vector.
- SDValue InOp1 = GetWidenedVector(N->getOperand(0));
- SDValue InOp2 = GetWidenedVector(N->getOperand(1));
- unsigned CurNumElts = N->getValueType(0).getVectorNumElements();
-
- SmallVector<SDValue, 16> ConcatOps(CurNumElts);
- unsigned ConcatEnd = 0; // Current ConcatOps index.
- unsigned Idx = 0; // Current Idx into input vectors.
- while (CurNumElts != 0) {
- while (CurNumElts >= NumElts) {
- SDValue EOp1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp1,
- DAG.getIntPtrConstant(Idx));
- SDValue EOp2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp2,
- DAG.getIntPtrConstant(Idx));
- ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2);
- Idx += NumElts;
- CurNumElts -= NumElts;
- }
- EVT PrevVecVT = VT;
- do {
- NumElts = NumElts / 2;
- VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
- } while (!TLI.isTypeLegal(VT) && NumElts != 1);
-
- if (NumElts == 1) {
- // Since we are using concat vector, build a vector from the scalar ops.
- SDValue VecOp = DAG.getUNDEF(PrevVecVT);
- for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
- SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
- InOp1, DAG.getIntPtrConstant(Idx));
- SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
- InOp2, DAG.getIntPtrConstant(Idx));
- VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, PrevVecVT, VecOp,
- DAG.getNode(Opcode, dl, WidenEltVT, EOp1, EOp2),
- DAG.getIntPtrConstant(i));
- }
- CurNumElts = 0;
- ConcatOps[ConcatEnd++] = VecOp;
+
+ // Since the operation can trap, apply operation on the original vector.
+ EVT MaxVT = VT;
+ SDValue InOp1 = GetWidenedVector(N->getOperand(0));
+ SDValue InOp2 = GetWidenedVector(N->getOperand(1));
+ unsigned CurNumElts = N->getValueType(0).getVectorNumElements();
+
+ SmallVector<SDValue, 16> ConcatOps(CurNumElts);
+ unsigned ConcatEnd = 0; // Current ConcatOps index.
+ int Idx = 0; // Current Idx into input vectors.
+
+ // NumElts := greatest synthesizable vector size (at most WidenVT)
+ // while (orig. vector has unhandled elements) {
+ // take munches of size NumElts from the beginning and add to ConcatOps
+ // NumElts := next smaller supported vector size or 1
+ // }
+ while (CurNumElts != 0) {
+ while (CurNumElts >= NumElts) {
+ SDValue EOp1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp1,
+ DAG.getIntPtrConstant(Idx));
+ SDValue EOp2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp2,
+ DAG.getIntPtrConstant(Idx));
+ ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, VT, EOp1, EOp2);
+ Idx += NumElts;
+ CurNumElts -= NumElts;
+ }
+ do {
+ NumElts = NumElts / 2;
+ VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
+ } while (!TLI.isTypeSynthesizable(VT) && NumElts != 1);
+
+ if (NumElts == 1) {
+ for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
+ SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
+ InOp1, DAG.getIntPtrConstant(Idx));
+ SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
+ InOp2, DAG.getIntPtrConstant(Idx));
+ ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
+ EOp1, EOp2);
}
+ CurNumElts = 0;
}
+ }
- // Check to see if we have a single operation with the widen type.
- if (ConcatEnd == 1) {
- VT = ConcatOps[0].getValueType();
- if (VT == WidenVT)
- return ConcatOps[0];
- }
+ // Check to see if we have a single operation with the widen type.
+ if (ConcatEnd == 1) {
+ VT = ConcatOps[0].getValueType();
+ if (VT == WidenVT)
+ return ConcatOps[0];
+ }
- // Rebuild vector to one with the widen type
+ // while (Some element of ConcatOps is not of type MaxVT) {
+ // From the end of ConcatOps, collect elements of the same type and put
+ // them into an op of the next larger supported type
+ // }
+ while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
Idx = ConcatEnd - 1;
- while (Idx != 0) {
- VT = ConcatOps[Idx--].getValueType();
- while (Idx != 0 && ConcatOps[Idx].getValueType() == VT)
- --Idx;
- if (Idx != 0) {
- VT = ConcatOps[Idx].getValueType();
- ConcatOps[Idx+1] = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
- &ConcatOps[Idx+1], ConcatEnd - Idx - 1);
- ConcatEnd = Idx + 2;
+ VT = ConcatOps[Idx--].getValueType();
+ while (Idx >= 0 && ConcatOps[Idx].getValueType() == VT)
+ Idx--;
+
+ int NextSize = VT.isVector() ? VT.getVectorNumElements() : 1;
+ EVT NextVT;
+ do {
+ NextSize *= 2;
+ NextVT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NextSize);
+ } while (!TLI.isTypeSynthesizable(NextVT));
+
+ if (!VT.isVector()) {
+ // Scalar type, create an INSERT_VECTOR_ELEMENT of type NextVT
+ SDValue VecOp = DAG.getUNDEF(NextVT);
+ unsigned NumToInsert = ConcatEnd - Idx - 1;
+ for (unsigned i = 0, OpIdx = Idx+1; i < NumToInsert; i++, OpIdx++) {
+ VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NextVT, VecOp,
+ ConcatOps[OpIdx], DAG.getIntPtrConstant(i));
}
+ ConcatOps[Idx+1] = VecOp;
+ ConcatEnd = Idx + 2;
+ } else {
+ // Vector type, create a CONCAT_VECTORS of type NextVT
+ SDValue undefVec = DAG.getUNDEF(VT);
+ unsigned OpsToConcat = NextSize/VT.getVectorNumElements();
+ SmallVector<SDValue, 16> SubConcatOps(OpsToConcat);
+ unsigned RealVals = ConcatEnd - Idx - 1;
+ unsigned SubConcatEnd = 0;
+ unsigned SubConcatIdx = Idx + 1;
+ while (SubConcatEnd < RealVals)
+ SubConcatOps[SubConcatEnd++] = ConcatOps[++Idx];
+ while (SubConcatEnd < OpsToConcat)
+ SubConcatOps[SubConcatEnd++] = undefVec;
+ ConcatOps[SubConcatIdx] = DAG.getNode(ISD::CONCAT_VECTORS, dl,
+ NextVT, &SubConcatOps[0],
+ OpsToConcat);
+ ConcatEnd = SubConcatIdx + 1;
}
-
- unsigned NumOps = WidenVT.getVectorNumElements()/VT.getVectorNumElements();
- if (NumOps != ConcatEnd ) {
- SDValue UndefVal = DAG.getUNDEF(VT);
- for (unsigned j = ConcatEnd; j < NumOps; ++j)
- ConcatOps[j] = UndefVal;
- }
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &ConcatOps[0], NumOps);
}
+
+ // Check to see if we have a single operation with the widen type.
+ if (ConcatEnd == 1) {
+ VT = ConcatOps[0].getValueType();
+ if (VT == WidenVT)
+ return ConcatOps[0];
+ }
+
+ // add undefs of size MaxVT until ConcatOps grows to length of WidenVT
+ unsigned NumOps = WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements();
+ if (NumOps != ConcatEnd ) {
+ SDValue UndefVal = DAG.getUNDEF(MaxVT);
+ for (unsigned j = ConcatEnd; j < NumOps; ++j)
+ ConcatOps[j] = UndefVal;
+ }
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &ConcatOps[0], NumOps);
}
SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
@@ -1365,7 +1448,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
return DAG.getNode(Opcode, dl, WidenVT, InOp);
}
- if (TLI.isTypeLegal(InWidenVT)) {
+ if (TLI.isTypeSynthesizable(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1409,6 +1492,13 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
}
+SDValue DAGTypeLegalizer::WidenVecRes_POWI(SDNode *N) {
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ SDValue InOp = GetWidenedVector(N->getOperand(0));
+ SDValue ShOp = N->getOperand(1);
+ return DAG.getNode(N->getOpcode(), N->getDebugLoc(), WidenVT, InOp, ShOp);
+}
+
SDValue DAGTypeLegalizer::WidenVecRes_Shift(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp = GetWidenedVector(N->getOperand(0));
@@ -1419,7 +1509,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_Shift(SDNode *N) {
ShOp = GetWidenedVector(ShOp);
ShVT = ShOp.getValueType();
}
- EVT ShWidenVT = EVT::getVectorVT(*DAG.getContext(), ShVT.getVectorElementType(),
+ EVT ShWidenVT = EVT::getVectorVT(*DAG.getContext(),
+ ShVT.getVectorElementType(),
WidenVT.getVectorNumElements());
if (ShVT != ShWidenVT)
ShOp = ModifyToType(ShOp, ShWidenVT);
@@ -1493,12 +1584,13 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
unsigned NewNumElts = WidenSize / InSize;
if (InVT.isVector()) {
EVT InEltVT = InVT.getVectorElementType();
- NewInVT= EVT::getVectorVT(*DAG.getContext(), InEltVT, WidenSize / InEltVT.getSizeInBits());
+ NewInVT = EVT::getVectorVT(*DAG.getContext(), InEltVT,
+ WidenSize / InEltVT.getSizeInBits());
} else {
NewInVT = EVT::getVectorVT(*DAG.getContext(), InVT, NewNumElts);
}
- if (TLI.isTypeLegal(NewInVT)) {
+ if (TLI.isTypeSynthesizable(NewInVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1617,7 +1709,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
SDValue RndOp = N->getOperand(3);
SDValue SatOp = N->getOperand(4);
- EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
unsigned WidenNumElts = WidenVT.getVectorNumElements();
EVT InVT = InOp.getValueType();
@@ -1638,7 +1730,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
SatOp, CvtCode);
}
- if (TLI.isTypeLegal(InWidenVT)) {
+ if (TLI.isTypeSynthesizable(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1650,9 +1742,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
SmallVector<SDValue, 16> Ops(NumConcat);
Ops[0] = InOp;
SDValue UndefVal = DAG.getUNDEF(InVT);
- for (unsigned i = 1; i != NumConcat; ++i) {
+ for (unsigned i = 1; i != NumConcat; ++i)
Ops[i] = UndefVal;
- }
+
InOp = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWidenVT, &Ops[0],NumConcat);
return DAG.getConvertRndSat(WidenVT, dl, InOp, DTyOp, STyOp, RndOp,
SatOp, CvtCode);
@@ -1791,7 +1883,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_SELECT(SDNode *N) {
EVT CondVT = Cond1.getValueType();
if (CondVT.isVector()) {
EVT CondEltVT = CondVT.getVectorElementType();
- EVT CondWidenVT = EVT::getVectorVT(*DAG.getContext(), CondEltVT, WidenNumElts);
+ EVT CondWidenVT = EVT::getVectorVT(*DAG.getContext(),
+ CondEltVT, WidenNumElts);
if (getTypeAction(CondVT) == WidenVector)
Cond1 = GetWidenedVector(Cond1);
@@ -1859,7 +1952,8 @@ SDValue DAGTypeLegalizer::WidenVecRes_VSETCC(SDNode *N) {
SDValue InOp1 = N->getOperand(0);
EVT InVT = InOp1.getValueType();
assert(InVT.isVector() && "can not widen non vector type");
- EVT WidenInVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(), WidenNumElts);
+ EVT WidenInVT = EVT::getVectorVT(*DAG.getContext(),
+ InVT.getVectorElementType(), WidenNumElts);
InOp1 = GetWidenedVector(InOp1);
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
@@ -1962,7 +2056,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
if (InWidenSize % Size == 0 && !VT.isVector()) {
unsigned NewNumElts = InWidenSize / Size;
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
- if (TLI.isTypeLegal(NewVT)) {
+ if (TLI.isTypeSynthesizable(NewVT)) {
SDValue BitOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, InOp);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
DAG.getIntPtrConstant(0));
@@ -2060,7 +2154,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
unsigned MemVTWidth = MemVT.getSizeInBits();
if (MemVT.getSizeInBits() <= WidenEltWidth)
break;
- if (TLI.isTypeLegal(MemVT) && (WidenWidth % MemVTWidth) == 0 &&
+ if (TLI.isTypeSynthesizable(MemVT) && (WidenWidth % MemVTWidth) == 0 &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
RetVT = MemVT;
@@ -2074,7 +2168,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
VT >= (unsigned)MVT::FIRST_VECTOR_VALUETYPE; --VT) {
EVT MemVT = (MVT::SimpleValueType) VT;
unsigned MemVTWidth = MemVT.getSizeInBits();
- if (TLI.isTypeLegal(MemVT) && WidenEltVT == MemVT.getVectorElementType() &&
+ if (TLI.isTypeSynthesizable(MemVT) && WidenEltVT == MemVT.getVectorElementType() &&
(WidenWidth % MemVTWidth) == 0 &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
@@ -2124,7 +2218,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
// The routines chops the vector into the largest vector loads with the same
// element type or scalar loads and then recombines it to the widen vector
// type.
- EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), LD->getValueType(0));
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(),LD->getValueType(0));
unsigned WidenWidth = WidenVT.getSizeInBits();
EVT LdVT = LD->getMemoryVT();
DebugLoc dl = LD->getDebugLoc();
@@ -2153,25 +2247,24 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
// Check if we can load the element with one instruction
if (LdWidth <= NewVTWidth) {
- if (NewVT.isVector()) {
- if (NewVT != WidenVT) {
- assert(WidenWidth % NewVTWidth == 0);
- unsigned NumConcat = WidenWidth / NewVTWidth;
- SmallVector<SDValue, 16> ConcatOps(NumConcat);
- SDValue UndefVal = DAG.getUNDEF(NewVT);
- ConcatOps[0] = LdOp;
- for (unsigned i = 1; i != NumConcat; ++i)
- ConcatOps[i] = UndefVal;
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &ConcatOps[0],
- NumConcat);
- } else
- return LdOp;
- } else {
- unsigned NumElts = WidenWidth / LdWidth;
+ if (!NewVT.isVector()) {
+ unsigned NumElts = WidenWidth / NewVTWidth;
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, VecOp);
}
+ if (NewVT == WidenVT)
+ return LdOp;
+
+ assert(WidenWidth % NewVTWidth == 0);
+ unsigned NumConcat = WidenWidth / NewVTWidth;
+ SmallVector<SDValue, 16> ConcatOps(NumConcat);
+ SDValue UndefVal = DAG.getUNDEF(NewVT);
+ ConcatOps[0] = LdOp;
+ for (unsigned i = 1; i != NumConcat; ++i)
+ ConcatOps[i] = UndefVal;
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &ConcatOps[0],
+ NumConcat);
}
// Load vector by using multiple loads from largest vector to scalar
@@ -2204,52 +2297,55 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
// Build the vector from the loads operations
unsigned End = LdOps.size();
- if (LdOps[0].getValueType().isVector()) {
- // If the load contains vectors, build the vector using concat vector.
- // All of the vectors used to loads are power of 2 and the scalars load
- // can be combined to make a power of 2 vector.
- SmallVector<SDValue, 16> ConcatOps(End);
- int i = End - 1;
- int Idx = End;
- EVT LdTy = LdOps[i].getValueType();
- // First combine the scalar loads to a vector
- if (!LdTy.isVector()) {
- for (--i; i >= 0; --i) {
- LdTy = LdOps[i].getValueType();
- if (LdTy.isVector())
- break;
- }
- ConcatOps[--Idx] = BuildVectorFromScalar(DAG, LdTy, LdOps, i+1, End);
- }
- ConcatOps[--Idx] = LdOps[i];
+ if (!LdOps[0].getValueType().isVector())
+ // All the loads are scalar loads.
+ return BuildVectorFromScalar(DAG, WidenVT, LdOps, 0, End);
+
+ // If the load contains vectors, build the vector using concat vector.
+ // All of the vectors used to loads are power of 2 and the scalars load
+ // can be combined to make a power of 2 vector.
+ SmallVector<SDValue, 16> ConcatOps(End);
+ int i = End - 1;
+ int Idx = End;
+ EVT LdTy = LdOps[i].getValueType();
+ // First combine the scalar loads to a vector
+ if (!LdTy.isVector()) {
for (--i; i >= 0; --i) {
- EVT NewLdTy = LdOps[i].getValueType();
- if (NewLdTy != LdTy) {
- // Create a larger vector
- ConcatOps[End-1] = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewLdTy,
- &ConcatOps[Idx], End - Idx);
- Idx = End - 1;
- LdTy = NewLdTy;
- }
- ConcatOps[--Idx] = LdOps[i];
+ LdTy = LdOps[i].getValueType();
+ if (LdTy.isVector())
+ break;
}
+ ConcatOps[--Idx] = BuildVectorFromScalar(DAG, LdTy, LdOps, i+1, End);
+ }
+ ConcatOps[--Idx] = LdOps[i];
+ for (--i; i >= 0; --i) {
+ EVT NewLdTy = LdOps[i].getValueType();
+ if (NewLdTy != LdTy) {
+ // Create a larger vector
+ ConcatOps[End-1] = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewLdTy,
+ &ConcatOps[Idx], End - Idx);
+ Idx = End - 1;
+ LdTy = NewLdTy;
+ }
+ ConcatOps[--Idx] = LdOps[i];
+ }
- if (WidenWidth != LdTy.getSizeInBits()*(End - Idx)) {
- // We need to fill the rest with undefs to build the vector
- unsigned NumOps = WidenWidth / LdTy.getSizeInBits();
- SmallVector<SDValue, 16> WidenOps(NumOps);
- SDValue UndefVal = DAG.getUNDEF(LdTy);
- unsigned i = 0;
- for (; i != End-Idx; ++i)
- WidenOps[i] = ConcatOps[Idx+i];
- for (; i != NumOps; ++i)
- WidenOps[i] = UndefVal;
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &WidenOps[0],NumOps);
- } else
- return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT,
- &ConcatOps[Idx], End - Idx);
- } else // All the loads are scalar loads.
- return BuildVectorFromScalar(DAG, WidenVT, LdOps, 0, End);
+ if (WidenWidth == LdTy.getSizeInBits()*(End - Idx))
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT,
+ &ConcatOps[Idx], End - Idx);
+
+ // We need to fill the rest with undefs to build the vector
+ unsigned NumOps = WidenWidth / LdTy.getSizeInBits();
+ SmallVector<SDValue, 16> WidenOps(NumOps);
+ SDValue UndefVal = DAG.getUNDEF(LdTy);
+ {
+ unsigned i = 0;
+ for (; i != End-Idx; ++i)
+ WidenOps[i] = ConcatOps[Idx+i];
+ for (; i != NumOps; ++i)
+ WidenOps[i] = UndefVal;
+ }
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, &WidenOps[0],NumOps);
}
SDValue
@@ -2280,14 +2376,14 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(WidenNumElts);
unsigned Increment = LdEltVT.getSizeInBits() / 8;
- Ops[0] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr, SV, SVOffset,
+ Ops[0] = DAG.getExtLoad(ExtType, EltVT, dl, Chain, BasePtr, SV, SVOffset,
LdEltVT, isVolatile, isNonTemporal, Align);
LdChain.push_back(Ops[0].getValue(1));
unsigned i = 0, Offset = Increment;
for (i=1; i < NumElts; ++i, Offset += Increment) {
SDValue NewBasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
BasePtr, DAG.getIntPtrConstant(Offset));
- Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr, SV,
+ Ops[i] = DAG.getExtLoad(ExtType, EltVT, dl, Chain, NewBasePtr, SV,
SVOffset + Offset, LdEltVT, isVolatile,
isNonTemporal, Align);
LdChain.push_back(Ops[i].getValue(1));
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SDDbgValue.h b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SDDbgValue.h
deleted file mode 100644
index 9e15fc9..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SDDbgValue.h
+++ /dev/null
@@ -1,67 +0,0 @@
-//===-- llvm/CodeGen/SDDbgValue.h - SD dbg_value handling--------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the SDDbgValue class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_SDDBGVALUE_H
-#define LLVM_CODEGEN_SDDBGVALUE_H
-
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/DebugLoc.h"
-
-namespace llvm {
-
-class MDNode;
-class SDNode;
-class Value;
-
-/// SDDbgValue - Holds the information from a dbg_value node through SDISel.
-/// Either Const or Node is nonzero, but not both.
-/// We do not use SDValue here to avoid including its header.
-
-class SDDbgValue {
- SDNode *Node; // valid for non-constants
- unsigned ResNo; // valid for non-constants
- Value *Const; // valid for constants
- MDNode *mdPtr;
- uint64_t Offset;
- DebugLoc DL;
-public:
- // Constructor for non-constants.
- SDDbgValue(MDNode *mdP, SDNode *N, unsigned R, uint64_t off, DebugLoc dl) :
- Node(N), ResNo(R), Const(0), mdPtr(mdP), Offset(off), DL(dl) {}
-
- // Constructor for constants.
- SDDbgValue(MDNode *mdP, Value *C, uint64_t off, DebugLoc dl) : Node(0),
- ResNo(0), Const(C), mdPtr(mdP), Offset(off), DL(dl) {}
-
- // Returns the MDNode pointer.
- MDNode *getMDPtr() { return mdPtr; }
-
- // Returns the SDNode* (valid for non-constants only).
- SDNode *getSDNode() { assert (!Const); return Node; }
-
- // Returns the ResNo (valid for non-constants only).
- unsigned getResNo() { assert (!Const); return ResNo; }
-
- // Returns the Value* for a constant (invalid for non-constants).
- Value *getConst() { assert (!Node); return Const; }
-
- // Returns the offset.
- uint64_t getOffset() { return Offset; }
-
- // Returns the DebugLoc.
- DebugLoc getDebugLoc() { return DL; }
-};
-
-} // end llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
new file mode 100644
index 0000000..ac2d338
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
@@ -0,0 +1,114 @@
+//===-- llvm/CodeGen/SDNodeDbgValue.h - SelectionDAG dbg_value --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SDDbgValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SDNODEDBGVALUE_H
+#define LLVM_CODEGEN_SDNODEDBGVALUE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DebugLoc.h"
+#include "llvm/System/DataTypes.h"
+
+namespace llvm {
+
+class MDNode;
+class SDNode;
+class Value;
+
+/// SDDbgValue - Holds the information from a dbg_value node through SDISel.
+/// We do not use SDValue here to avoid including its header.
+
+class SDDbgValue {
+public:
+ enum DbgValueKind {
+ SDNODE = 0, // value is the result of an expression
+ CONST = 1, // value is a constant
+ FRAMEIX = 2 // value is contents of a stack location
+ };
+private:
+ enum DbgValueKind kind;
+ union {
+ struct {
+ SDNode *Node; // valid for expressions
+ unsigned ResNo; // valid for expressions
+ } s;
+ const Value *Const; // valid for constants
+ unsigned FrameIx; // valid for stack objects
+ } u;
+ MDNode *mdPtr;
+ uint64_t Offset;
+ DebugLoc DL;
+ unsigned Order;
+ bool Invalid;
+public:
+ // Constructor for non-constants.
+ SDDbgValue(MDNode *mdP, SDNode *N, unsigned R, uint64_t off, DebugLoc dl,
+ unsigned O) : mdPtr(mdP), Offset(off), DL(dl), Order(O),
+ Invalid(false) {
+ kind = SDNODE;
+ u.s.Node = N;
+ u.s.ResNo = R;
+ }
+
+ // Constructor for constants.
+ SDDbgValue(MDNode *mdP, const Value *C, uint64_t off, DebugLoc dl,
+ unsigned O) :
+ mdPtr(mdP), Offset(off), DL(dl), Order(O), Invalid(false) {
+ kind = CONST;
+ u.Const = C;
+ }
+
+ // Constructor for frame indices.
+ SDDbgValue(MDNode *mdP, unsigned FI, uint64_t off, DebugLoc dl, unsigned O) :
+ mdPtr(mdP), Offset(off), DL(dl), Order(O), Invalid(false) {
+ kind = FRAMEIX;
+ u.FrameIx = FI;
+ }
+
+ // Returns the kind.
+ DbgValueKind getKind() { return kind; }
+
+ // Returns the MDNode pointer.
+ MDNode *getMDPtr() { return mdPtr; }
+
+ // Returns the SDNode* for a register ref
+ SDNode *getSDNode() { assert (kind==SDNODE); return u.s.Node; }
+
+ // Returns the ResNo for a register ref
+ unsigned getResNo() { assert (kind==SDNODE); return u.s.ResNo; }
+
+ // Returns the Value* for a constant
+ const Value *getConst() { assert (kind==CONST); return u.Const; }
+
+ // Returns the FrameIx for a stack object
+ unsigned getFrameIx() { assert (kind==FRAMEIX); return u.FrameIx; }
+
+ // Returns the offset.
+ uint64_t getOffset() { return Offset; }
+
+ // Returns the DebugLoc.
+ DebugLoc getDebugLoc() { return DL; }
+
+ // Returns the SDNodeOrder. This is the order of the preceding node in the
+ // input.
+ unsigned getOrder() { return Order; }
+
+ // setIsInvalidated / isInvalidated - Setter / getter of the "Invalidated"
+ // property. A SDDbgValue is invalid if the SDNode that produces the value is
+ // deleted.
+ void setIsInvalidated() { Invalid = true; }
+ bool isInvalidated() { return Invalid; }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index ad8630a..fae2729 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -13,6 +13,7 @@
#define DEBUG_TYPE "pre-RA-sched"
#include "ScheduleDAGSDNodes.h"
+#include "llvm/InlineAsm.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -432,6 +433,30 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
return N->getValueType(NumRes);
}
+/// CheckForLiveRegDef - Return true and update live register vector if the
+/// specified register def of the specified SUnit clobbers any "live" registers.
+static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
+ std::vector<SUnit*> &LiveRegDefs,
+ SmallSet<unsigned, 4> &RegAdded,
+ SmallVector<unsigned, 4> &LRegs,
+ const TargetRegisterInfo *TRI) {
+ bool Added = false;
+ if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != SU) {
+ if (RegAdded.insert(Reg)) {
+ LRegs.push_back(Reg);
+ Added = true;
+ }
+ }
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
+ if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
+ if (RegAdded.insert(*Alias)) {
+ LRegs.push_back(*Alias);
+ Added = true;
+ }
+ }
+ return Added;
+}
+
/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
/// scheduling of the given node to satisfy live physical register dependencies.
/// If the specific node is the last one that's available to schedule, do
@@ -446,37 +471,44 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isAssignedRegDep()) {
- unsigned Reg = I->getReg();
- if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != I->getSUnit()) {
- if (RegAdded.insert(Reg))
- LRegs.push_back(Reg);
- }
- for (const unsigned *Alias = TRI->getAliasSet(Reg);
- *Alias; ++Alias)
- if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != I->getSUnit()) {
- if (RegAdded.insert(*Alias))
- LRegs.push_back(*Alias);
- }
+ CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
+ RegAdded, LRegs, TRI);
}
}
for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) {
+ if (Node->getOpcode() == ISD::INLINEASM) {
+ // Inline asm can clobber physical defs.
+ unsigned NumOps = Node->getNumOperands();
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
+ --NumOps; // Ignore the flag operand.
+
+ for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
+ unsigned Flags =
+ cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
+ unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+
+ ++i; // Skip the ID value.
+ if (InlineAsm::isRegDefKind(Flags) ||
+ InlineAsm::isRegDefEarlyClobberKind(Flags)) {
+ // Check for def of register or earlyclobber register.
+ for (; NumVals; --NumVals, ++i) {
+ unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
+ }
+ } else
+ i += NumVals;
+ }
+ continue;
+ }
if (!Node->isMachineOpcode())
continue;
const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
if (!TID.ImplicitDefs)
continue;
for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
- if (LiveRegDefs[*Reg] && LiveRegDefs[*Reg] != SU) {
- if (RegAdded.insert(*Reg))
- LRegs.push_back(*Reg);
- }
- for (const unsigned *Alias = TRI->getAliasSet(*Reg);
- *Alias; ++Alias)
- if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
- if (RegAdded.insert(*Alias))
- LRegs.push_back(*Alias);
- }
+ CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
}
return !LRegs.empty();
@@ -535,7 +567,7 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
SUnit *LRDef = LiveRegDefs[Reg];
EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
const TargetRegisterClass *RC =
- TRI->getPhysicalRegisterRegClass(Reg, VT);
+ TRI->getMinimalPhysRegClass(Reg, VT);
const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
// If cross copy register class is null, then it must be possible copy
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
index b92a672..56f5ded 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
@@ -30,7 +30,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/PriorityQueue.h"
#include "llvm/ADT/Statistic.h"
#include <climits>
using namespace llvm;
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 3f1766d..4c3e4e3 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -17,18 +17,19 @@
#define DEBUG_TYPE "pre-RA-sched"
#include "ScheduleDAGSDNodes.h"
+#include "llvm/InlineAsm.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/ADT/PriorityQueue.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <climits>
using namespace llvm;
@@ -52,6 +53,18 @@ static RegisterScheduler
"order when possible",
createSourceListDAGScheduler);
+static RegisterScheduler
+ hybridListDAGScheduler("list-hybrid",
+ "Bottom-up register pressure aware list scheduling "
+ "which tries to balance latency and register pressure",
+ createHybridListDAGScheduler);
+
+static RegisterScheduler
+ ILPListDAGScheduler("list-ilp",
+ "Bottom-up register pressure aware list scheduling "
+ "which tries to balance ILP and register pressure",
+ createILPListDAGScheduler);
+
namespace {
//===----------------------------------------------------------------------===//
/// ScheduleDAGRRList - The actual register reduction list scheduler
@@ -63,6 +76,10 @@ private:
/// it is top-down.
bool isBottomUp;
+ /// NeedLatency - True if the scheduler will make use of latency information.
+ ///
+ bool NeedLatency;
+
/// AvailableQueue - The priority queue to use for the available SUnits.
SchedulingPriorityQueue *AvailableQueue;
@@ -79,9 +96,9 @@ private:
public:
ScheduleDAGRRList(MachineFunction &mf,
- bool isbottomup,
+ bool isbottomup, bool needlatency,
SchedulingPriorityQueue *availqueue)
- : ScheduleDAGSDNodes(mf), isBottomUp(isbottomup),
+ : ScheduleDAGSDNodes(mf), isBottomUp(isbottomup), NeedLatency(needlatency),
AvailableQueue(availqueue), Topo(SUnits) {
}
@@ -160,16 +177,20 @@ private:
return NewNode;
}
- /// ForceUnitLatencies - Return true, since register-pressure-reducing
- /// scheduling doesn't need actual latency information.
- bool ForceUnitLatencies() const { return true; }
+ /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
+ /// need actual latency information but the hybrid scheduler does.
+ bool ForceUnitLatencies() const {
+ return !NeedLatency;
+ }
};
} // end anonymous namespace
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGRRList::Schedule() {
- DEBUG(dbgs() << "********** List Scheduling **********\n");
+ DEBUG(dbgs()
+ << "********** List Scheduling BB#" << BB->getNumber()
+ << " **********\n");
NumLiveRegs = 0;
LiveRegDefs.resize(TRI->getNumRegs(), NULL);
@@ -212,6 +233,12 @@ void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
#endif
--PredSU->NumSuccsLeft;
+ if (!ForceUnitLatencies()) {
+ // Updating predecessor's height. This is now the cycle when the
+ // predecessor can be scheduled without causing a pipeline stall.
+ PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
+ }
+
// If all the node's successors are scheduled, this node is ready
// to be scheduled. Ignore the special EntrySU node.
if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
@@ -243,13 +270,20 @@ void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
/// count of its predecessors. If a predecessor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
- DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
+ DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
- assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
+#ifndef NDEBUG
+ if (CurCycle < SU->getHeight())
+ DEBUG(dbgs() << " Height [" << SU->getHeight() << "] pipeline stall!\n");
+#endif
+
+ // FIXME: Handle noop hazard.
SU->setHeightToAtLeast(CurCycle);
Sequence.push_back(SU);
+ AvailableQueue->ScheduledNode(SU);
+
ReleasePredecessors(SU, CurCycle);
// Release all the implicit physical register defs that are live.
@@ -268,7 +302,6 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
}
SU->isScheduled = true;
- AvailableQueue->ScheduledNode(SU);
}
/// CapturePred - This does the opposite of ReleasePred. Since SU is being
@@ -292,12 +325,10 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
DEBUG(SU->dump(this));
- AvailableQueue->UnscheduledNode(SU);
-
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
CapturePred(&*I);
- if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]) {
+ if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]){
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
"Physical register dependency violated?");
@@ -323,6 +354,7 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
SU->isScheduled = false;
SU->isAvailable = true;
AvailableQueue->push(SU);
+ AvailableQueue->UnscheduledNode(SU);
}
/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
@@ -338,6 +370,7 @@ void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
SU->isAvailable = false;
UnscheduleNodeBottomUp(OldSU);
--CurCycle;
+ AvailableQueue->setCurCycle(CurCycle);
}
assert(!SU->isSucc(OldSU) && "Something is wrong!");
@@ -385,7 +418,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
return NULL;
- DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
+ DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
N = NewNodes[1];
@@ -503,7 +536,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
SU = NewSU;
}
- DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
+ DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
NewSU = CreateClone(SU);
// New SUnit has the exact same predecessors.
@@ -647,13 +680,14 @@ bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
--NumOps; // Ignore the flag operand.
- for (unsigned i = 2; i != NumOps;) {
+ for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags =
cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
- unsigned NumVals = (Flags & 0xffff) >> 3;
+ unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
++i; // Skip the ID value.
- if ((Flags & 7) == 2 || (Flags & 7) == 6) {
+ if (InlineAsm::isRegDefKind(Flags) ||
+ InlineAsm::isRegDefEarlyClobberKind(Flags)) {
// Check for def of register or earlyclobber register.
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
@@ -770,7 +804,7 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
SUnit *LRDef = LiveRegDefs[Reg];
EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
const TargetRegisterClass *RC =
- TRI->getPhysicalRegisterRegClass(Reg, VT);
+ TRI->getMinimalPhysRegClass(Reg, VT);
const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
// If cross copy register class is null, then it must be possible copy
@@ -784,7 +818,7 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
// Issue copies, these can be expensive cross register class copies.
SmallVector<SUnit*, 2> Copies;
InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
- DEBUG(dbgs() << "Adding an edge from SU #" << TrySU->NodeNum
+ DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
<< " to SU #" << Copies.front()->NodeNum << "\n");
AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
/*Reg=*/0, /*isNormalMemory=*/false,
@@ -793,7 +827,7 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
NewDef = Copies.back();
}
- DEBUG(dbgs() << "Adding an edge from SU #" << NewDef->NodeNum
+ DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
<< " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
@@ -819,6 +853,7 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
if (CurSU)
ScheduleNodeBottomUp(CurSU, CurCycle);
++CurCycle;
+ AvailableQueue->setCurCycle(CurCycle);
}
// Reverse the order if it is bottom up.
@@ -887,6 +922,7 @@ void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
/// schedulers.
void ScheduleDAGRRList::ListScheduleTopDown() {
unsigned CurCycle = 0;
+ AvailableQueue->setCurCycle(CurCycle);
// Release any successors of the special Entry node.
ReleaseSuccessors(&EntrySU);
@@ -909,6 +945,7 @@ void ScheduleDAGRRList::ListScheduleTopDown() {
if (CurSU)
ScheduleNodeTopDown(CurSU, CurCycle);
++CurCycle;
+ AvailableQueue->setCurCycle(CurCycle);
}
#ifndef NDEBUG
@@ -928,7 +965,8 @@ namespace {
template<class SF>
class RegReductionPriorityQueue;
- /// Sorting functions for the Available queue.
+ /// bu_ls_rr_sort - Priority function for bottom up register pressure
+ // reduction scheduler.
struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
@@ -937,6 +975,8 @@ namespace {
bool operator()(const SUnit* left, const SUnit* right) const;
};
+ // td_ls_rr_sort - Priority function for top down register pressure reduction
+ // scheduler.
struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
@@ -945,6 +985,7 @@ namespace {
bool operator()(const SUnit* left, const SUnit* right) const;
};
+ // src_ls_rr_sort - Priority function for source order scheduler.
struct src_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
RegReductionPriorityQueue<src_ls_rr_sort> *SPQ;
src_ls_rr_sort(RegReductionPriorityQueue<src_ls_rr_sort> *spq)
@@ -954,6 +995,29 @@ namespace {
bool operator()(const SUnit* left, const SUnit* right) const;
};
+
+ // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
+ struct hybrid_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ RegReductionPriorityQueue<hybrid_ls_rr_sort> *SPQ;
+ hybrid_ls_rr_sort(RegReductionPriorityQueue<hybrid_ls_rr_sort> *spq)
+ : SPQ(spq) {}
+ hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
+ : SPQ(RHS.SPQ) {}
+
+ bool operator()(const SUnit* left, const SUnit* right) const;
+ };
+
+ // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
+ // scheduler.
+ struct ilp_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ RegReductionPriorityQueue<ilp_ls_rr_sort> *SPQ;
+ ilp_ls_rr_sort(RegReductionPriorityQueue<ilp_ls_rr_sort> *spq)
+ : SPQ(spq) {}
+ ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
+ : SPQ(RHS.SPQ) {}
+
+ bool operator()(const SUnit* left, const SUnit* right) const;
+ };
} // end anonymous namespace
/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
@@ -988,25 +1052,51 @@ CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
namespace {
template<class SF>
class RegReductionPriorityQueue : public SchedulingPriorityQueue {
- PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
- unsigned currentQueueId;
+ std::vector<SUnit*> Queue;
+ SF Picker;
+ unsigned CurQueueId;
+ bool TracksRegPressure;
protected:
// SUnits - The SUnits for the current graph.
std::vector<SUnit> *SUnits;
-
+
+ MachineFunction &MF;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
+ const TargetLowering *TLI;
ScheduleDAGRRList *scheduleDAG;
// SethiUllmanNumbers - The SethiUllman number for each node.
std::vector<unsigned> SethiUllmanNumbers;
+ /// RegPressure - Tracking current reg pressure per register class.
+ ///
+ std::vector<unsigned> RegPressure;
+
+ /// RegLimit - Tracking the number of allocatable registers per register
+ /// class.
+ std::vector<unsigned> RegLimit;
+
public:
- RegReductionPriorityQueue(const TargetInstrInfo *tii,
- const TargetRegisterInfo *tri)
- : Queue(SF(this)), currentQueueId(0),
- TII(tii), TRI(tri), scheduleDAG(NULL) {}
+ RegReductionPriorityQueue(MachineFunction &mf,
+ bool tracksrp,
+ const TargetInstrInfo *tii,
+ const TargetRegisterInfo *tri,
+ const TargetLowering *tli)
+ : Picker(this), CurQueueId(0), TracksRegPressure(tracksrp),
+ MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
+ if (TracksRegPressure) {
+ unsigned NumRC = TRI->getNumRegClasses();
+ RegLimit.resize(NumRC);
+ RegPressure.resize(NumRC);
+ std::fill(RegLimit.begin(), RegLimit.end(), 0);
+ std::fill(RegPressure.begin(), RegPressure.end(), 0);
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I)
+ RegLimit[(*I)->getID()] = tli->getRegPressureLimit(*I, MF);
+ }
+ }
void initNodes(std::vector<SUnit> &sunits) {
SUnits = &sunits;
@@ -1033,6 +1123,7 @@ namespace {
void releaseState() {
SUnits = 0;
SethiUllmanNumbers.clear();
+ std::fill(RegPressure.begin(), RegPressure.end(), 0);
}
unsigned getNodePriority(const SUnit *SU) const {
@@ -1065,26 +1156,26 @@ namespace {
unsigned getNodeOrdering(const SUnit *SU) const {
return scheduleDAG->DAG->GetOrdering(SU->getNode());
}
-
- unsigned size() const { return Queue.size(); }
bool empty() const { return Queue.empty(); }
void push(SUnit *U) {
assert(!U->NodeQueueId && "Node in the queue already");
- U->NodeQueueId = ++currentQueueId;
- Queue.push(U);
+ U->NodeQueueId = ++CurQueueId;
+ Queue.push_back(U);
}
- void push_all(const std::vector<SUnit *> &Nodes) {
- for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
- push(Nodes[i]);
- }
-
SUnit *pop() {
if (empty()) return NULL;
- SUnit *V = Queue.top();
- Queue.pop();
+ std::vector<SUnit *>::iterator Best = Queue.begin();
+ for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
+ E = Queue.end(); I != E; ++I)
+ if (Picker(*Best, *I))
+ Best = I;
+ SUnit *V = *Best;
+ if (Best != prior(Queue.end()))
+ std::swap(*Best, Queue.back());
+ Queue.pop_back();
V->NodeQueueId = 0;
return V;
}
@@ -1092,14 +1183,252 @@ namespace {
void remove(SUnit *SU) {
assert(!Queue.empty() && "Queue is empty!");
assert(SU->NodeQueueId != 0 && "Not in queue!");
- Queue.erase_one(SU);
+ std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
+ SU);
+ if (I != prior(Queue.end()))
+ std::swap(*I, Queue.back());
+ Queue.pop_back();
SU->NodeQueueId = 0;
}
+ bool HighRegPressure(const SUnit *SU) const {
+ if (!TLI)
+ return false;
+
+ for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+ SUnit *PredSU = I->getSUnit();
+ const SDNode *PN = PredSU->getNode();
+ if (!PN->isMachineOpcode()) {
+ if (PN->getOpcode() == ISD::CopyFromReg) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned Cost = TLI->getRepRegClassCostFor(VT);
+ if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
+ return true;
+ }
+ continue;
+ }
+ unsigned POpc = PN->getMachineOpcode();
+ if (POpc == TargetOpcode::IMPLICIT_DEF)
+ continue;
+ if (POpc == TargetOpcode::EXTRACT_SUBREG) {
+ EVT VT = PN->getOperand(0).getValueType();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned Cost = TLI->getRepRegClassCostFor(VT);
+ // Check if this increases register pressure of the specific register
+ // class to the point where it would cause spills.
+ if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
+ return true;
+ continue;
+ } else if (POpc == TargetOpcode::INSERT_SUBREG ||
+ POpc == TargetOpcode::SUBREG_TO_REG) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned Cost = TLI->getRepRegClassCostFor(VT);
+ // Check if this increases register pressure of the specific register
+ // class to the point where it would cause spills.
+ if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
+ return true;
+ continue;
+ }
+ unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = PN->getValueType(i);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (RegPressure[RCId] >= RegLimit[RCId])
+ return true; // Reg pressure already high.
+ unsigned Cost = TLI->getRepRegClassCostFor(VT);
+ if (!PN->hasAnyUseOfValue(i))
+ continue;
+ // Check if this increases register pressure of the specific register
+ // class to the point where it would cause spills.
+ if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ void ScheduledNode(SUnit *SU) {
+ if (!TracksRegPressure)
+ return;
+
+ const SDNode *N = SU->getNode();
+ if (!N->isMachineOpcode()) {
+ if (N->getOpcode() != ISD::CopyToReg)
+ return;
+ } else {
+ unsigned Opc = N->getMachineOpcode();
+ if (Opc == TargetOpcode::EXTRACT_SUBREG ||
+ Opc == TargetOpcode::INSERT_SUBREG ||
+ Opc == TargetOpcode::SUBREG_TO_REG ||
+ Opc == TargetOpcode::REG_SEQUENCE ||
+ Opc == TargetOpcode::IMPLICIT_DEF)
+ return;
+ }
+
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+ SUnit *PredSU = I->getSUnit();
+ if (PredSU->NumSuccsLeft != PredSU->NumSuccs)
+ continue;
+ const SDNode *PN = PredSU->getNode();
+ if (!PN->isMachineOpcode()) {
+ if (PN->getOpcode() == ISD::CopyFromReg) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ }
+ continue;
+ }
+ unsigned POpc = PN->getMachineOpcode();
+ if (POpc == TargetOpcode::IMPLICIT_DEF)
+ continue;
+ if (POpc == TargetOpcode::EXTRACT_SUBREG) {
+ EVT VT = PN->getOperand(0).getValueType();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
+ } else if (POpc == TargetOpcode::INSERT_SUBREG ||
+ POpc == TargetOpcode::SUBREG_TO_REG) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
+ }
+ unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = PN->getValueType(i);
+ if (!PN->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ }
+ }
+
+ // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
+ // may transfer data dependencies to CopyToReg.
+ if (SU->NumSuccs && N->isMachineOpcode()) {
+ unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = N->getValueType(i);
+ if (!N->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
+ // Register pressure tracking is imprecise. This can happen.
+ RegPressure[RCId] = 0;
+ else
+ RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
+ }
+ }
+
+ dumpRegPressure();
+ }
+
+ void UnscheduledNode(SUnit *SU) {
+ if (!TracksRegPressure)
+ return;
+
+ const SDNode *N = SU->getNode();
+ if (!N->isMachineOpcode()) {
+ if (N->getOpcode() != ISD::CopyToReg)
+ return;
+ } else {
+ unsigned Opc = N->getMachineOpcode();
+ if (Opc == TargetOpcode::EXTRACT_SUBREG ||
+ Opc == TargetOpcode::INSERT_SUBREG ||
+ Opc == TargetOpcode::SUBREG_TO_REG ||
+ Opc == TargetOpcode::REG_SEQUENCE ||
+ Opc == TargetOpcode::IMPLICIT_DEF)
+ return;
+ }
+
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+ SUnit *PredSU = I->getSUnit();
+ if (PredSU->NumSuccsLeft != PredSU->NumSuccs)
+ continue;
+ const SDNode *PN = PredSU->getNode();
+ if (!PN->isMachineOpcode()) {
+ if (PN->getOpcode() == ISD::CopyFromReg) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ }
+ continue;
+ }
+ unsigned POpc = PN->getMachineOpcode();
+ if (POpc == TargetOpcode::IMPLICIT_DEF)
+ continue;
+ if (POpc == TargetOpcode::EXTRACT_SUBREG) {
+ EVT VT = PN->getOperand(0).getValueType();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
+ } else if (POpc == TargetOpcode::INSERT_SUBREG ||
+ POpc == TargetOpcode::SUBREG_TO_REG) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
+ }
+ unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = PN->getValueType(i);
+ if (!PN->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
+ // Register pressure tracking is imprecise. This can happen.
+ RegPressure[RCId] = 0;
+ else
+ RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
+ }
+ }
+
+ // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
+ // may transfer data dependencies to CopyToReg.
+ if (SU->NumSuccs && N->isMachineOpcode()) {
+ unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
+ for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
+ EVT VT = N->getValueType(i);
+ if (VT == MVT::Flag || VT == MVT::Other)
+ continue;
+ if (!N->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ }
+ }
+
+ dumpRegPressure();
+ }
+
void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
scheduleDAG = scheduleDag;
}
+ void dumpRegPressure() const {
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I) {
+ const TargetRegisterClass *RC = *I;
+ unsigned Id = RC->getID();
+ unsigned RP = RegPressure[Id];
+ if (!RP) continue;
+ DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
+ << '\n');
+ }
+ }
+
protected:
bool canClobber(const SUnit *SU, const SUnit *Op);
void AddPseudoTwoAddrDeps();
@@ -1115,6 +1444,12 @@ namespace {
typedef RegReductionPriorityQueue<src_ls_rr_sort>
SrcRegReductionPriorityQueue;
+
+ typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
+ HybridBURRPriorityQueue;
+
+ typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
+ ILPBURRPriorityQueue;
}
/// closestSucc - Returns the scheduled cycle of the successor which is
@@ -1201,7 +1536,7 @@ bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
}
// Source order, otherwise bottom up.
-bool src_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const{
+bool src_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
unsigned LOrder = SPQ->getNodeOrdering(left);
unsigned ROrder = SPQ->getNodeOrdering(right);
@@ -1213,6 +1548,69 @@ bool src_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const{
return BURRSort(left, right, SPQ);
}
+bool hybrid_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const{
+ bool LHigh = SPQ->HighRegPressure(left);
+ bool RHigh = SPQ->HighRegPressure(right);
+ // Avoid causing spills. If register pressure is high, schedule for
+ // register pressure reduction.
+ if (LHigh && !RHigh)
+ return true;
+ else if (!LHigh && RHigh)
+ return false;
+ else if (!LHigh && !RHigh) {
+ // Low register pressure situation, schedule for latency if possible.
+ bool LStall = left->SchedulingPref == Sched::Latency &&
+ SPQ->getCurCycle() < left->getHeight();
+ bool RStall = right->SchedulingPref == Sched::Latency &&
+ SPQ->getCurCycle() < right->getHeight();
+ // If scheduling one of the node will cause a pipeline stall, delay it.
+ // If scheduling either one of the node will cause a pipeline stall, sort
+ // them according to their height.
+ // If neither will cause a pipeline stall, try to reduce register pressure.
+ if (LStall) {
+ if (!RStall)
+ return true;
+ if (left->getHeight() != right->getHeight())
+ return left->getHeight() > right->getHeight();
+ } else if (RStall)
+ return false;
+
+ // If either node is scheduling for latency, sort them by height and latency
+ // first.
+ if (left->SchedulingPref == Sched::Latency ||
+ right->SchedulingPref == Sched::Latency) {
+ if (left->getHeight() != right->getHeight())
+ return left->getHeight() > right->getHeight();
+ if (left->Latency != right->Latency)
+ return left->Latency > right->Latency;
+ }
+ }
+
+ return BURRSort(left, right, SPQ);
+}
+
+bool ilp_ls_rr_sort::operator()(const SUnit *left,
+ const SUnit *right) const {
+ bool LHigh = SPQ->HighRegPressure(left);
+ bool RHigh = SPQ->HighRegPressure(right);
+ // Avoid causing spills. If register pressure is high, schedule for
+ // register pressure reduction.
+ if (LHigh && !RHigh)
+ return true;
+ else if (!LHigh && RHigh)
+ return false;
+ else if (!LHigh && !RHigh) {
+ // Low register pressure situation, schedule to maximize instruction level
+ // parallelism.
+ if (left->NumPreds > right->NumPreds)
+ return false;
+ else if (left->NumPreds < right->NumPreds)
+ return false;
+ }
+
+ return BURRSort(left, right, SPQ);
+}
+
template<class SF>
bool
RegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
@@ -1377,8 +1775,8 @@ void RegReductionPriorityQueue<SF>::PrescheduleNodesWithMultipleUses() {
// Ok, the transformation is safe and the heuristics suggest it is
// profitable. Update the graph.
- DEBUG(dbgs() << "Prescheduling SU # " << SU->NodeNum
- << " next to PredSU # " << PredSU->NodeNum
+ DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
+ << " next to PredSU #" << PredSU->NodeNum
<< " to guide scheduling in the presence of multiple uses\n");
for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
SDep Edge = PredSU->Succs[i];
@@ -1467,7 +1865,7 @@ void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
(hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
(!SU->isCommutable && SuccSU->isCommutable)) &&
!scheduleDAG->IsReachable(SuccSU, SU)) {
- DEBUG(dbgs() << "Adding a pseudo-two-addr edge from SU # "
+ DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
<< SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
/*Reg=*/0, /*isNormalMemory=*/false,
@@ -1559,10 +1957,9 @@ llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
const TargetInstrInfo *TII = TM.getInstrInfo();
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
- BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
-
- ScheduleDAGRRList *SD =
- new ScheduleDAGRRList(*IS->MF, true, PQ);
+ BURegReductionPriorityQueue *PQ =
+ new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, false, PQ);
PQ->setScheduleDAG(SD);
return SD;
}
@@ -1573,10 +1970,9 @@ llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
const TargetInstrInfo *TII = TM.getInstrInfo();
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
- TDRegReductionPriorityQueue *PQ = new TDRegReductionPriorityQueue(TII, TRI);
-
- ScheduleDAGRRList *SD =
- new ScheduleDAGRRList(*IS->MF, false, PQ);
+ TDRegReductionPriorityQueue *PQ =
+ new TDRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, false, PQ);
PQ->setScheduleDAG(SD);
return SD;
}
@@ -1587,10 +1983,37 @@ llvm::createSourceListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
const TargetInstrInfo *TII = TM.getInstrInfo();
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
- SrcRegReductionPriorityQueue *PQ = new SrcRegReductionPriorityQueue(TII, TRI);
+ SrcRegReductionPriorityQueue *PQ =
+ new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, false, PQ);
+ PQ->setScheduleDAG(SD);
+ return SD;
+}
- ScheduleDAGRRList *SD =
- new ScheduleDAGRRList(*IS->MF, true, PQ);
+llvm::ScheduleDAGSDNodes *
+llvm::createHybridListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+ const TargetMachine &TM = IS->TM;
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetLowering *TLI = &IS->getTargetLowering();
+
+ HybridBURRPriorityQueue *PQ =
+ new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, true, PQ);
+ PQ->setScheduleDAG(SD);
+ return SD;
+}
+
+llvm::ScheduleDAGSDNodes *
+llvm::createILPListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+ const TargetMachine &TM = IS->TM;
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetLowering *TLI = &IS->getTargetLowering();
+
+ ILPBURRPriorityQueue *PQ =
+ new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, true, PQ);
PQ->setScheduleDAG(SD);
return SD;
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index 06e7b8c..f1bf82a 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -13,15 +13,18 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "pre-RA-sched"
+#include "SDNodeDbgValue.h"
#include "ScheduleDAGSDNodes.h"
#include "InstrEmitter.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtarget.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
@@ -42,6 +45,29 @@ void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb,
ScheduleDAG::Run(bb, insertPos);
}
+/// NewSUnit - Creates a new SUnit and return a ptr to it.
+///
+SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
+#ifndef NDEBUG
+ const SUnit *Addr = 0;
+ if (!SUnits.empty())
+ Addr = &SUnits[0];
+#endif
+ SUnits.push_back(SUnit(N, (unsigned)SUnits.size()));
+ assert((Addr == 0 || Addr == &SUnits[0]) &&
+ "SUnits std::vector reallocated on the fly!");
+ SUnits.back().OrigNode = &SUnits.back();
+ SUnit *SU = &SUnits.back();
+ const TargetLowering &TLI = DAG->getTargetLoweringInfo();
+ if (!N ||
+ (N->isMachineOpcode() &&
+ N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF))
+ SU->SchedulingPref = Sched::None;
+ else
+ SU->SchedulingPref = TLI.getSchedulingPreference(N);
+ return SU;
+}
+
SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
SUnit *SU = NewSUnit(Old->getNode());
SU->OrigNode = Old->OrigNode;
@@ -50,6 +76,7 @@ SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
SU->isCommutable = Old->isCommutable;
SU->hasPhysRegDefs = Old->hasPhysRegDefs;
SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
+ SU->SchedulingPref = Old->SchedulingPref;
Old->isCloned = true;
return SU;
}
@@ -75,7 +102,7 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
PhysReg = Reg;
const TargetRegisterClass *RC =
- TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
+ TRI->getMinimalPhysRegClass(Reg, Def->getValueType(ResNo));
Cost = RC->getCopyCost();
}
}
@@ -84,17 +111,42 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
SelectionDAG *DAG) {
SmallVector<EVT, 4> VTs;
- for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
- VTs.push_back(N->getValueType(i));
+ SDNode *FlagDestNode = Flag.getNode();
+
+ // Don't add a flag from a node to itself.
+ if (FlagDestNode == N) return;
+
+ // Don't add a flag to something which already has a flag.
+ if (N->getValueType(N->getNumValues() - 1) == MVT::Flag) return;
+
+ for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
+ VTs.push_back(N->getValueType(I));
+
if (AddFlag)
VTs.push_back(MVT::Flag);
+
SmallVector<SDValue, 4> Ops;
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- Ops.push_back(N->getOperand(i));
- if (Flag.getNode())
+ for (unsigned I = 0, E = N->getNumOperands(); I != E; ++I)
+ Ops.push_back(N->getOperand(I));
+
+ if (FlagDestNode)
Ops.push_back(Flag);
+
SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
+ MachineSDNode::mmo_iterator Begin = 0, End = 0;
+ MachineSDNode *MN = dyn_cast<MachineSDNode>(N);
+
+ // Store memory references.
+ if (MN) {
+ Begin = MN->memoperands_begin();
+ End = MN->memoperands_end();
+ }
+
DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
+
+ // Reset the memory references
+ if (MN)
+ MN->setMemRefs(Begin, End);
}
/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
@@ -102,98 +154,98 @@ static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
/// offsets are not far apart (target specific), it add MVT::Flag inputs and
/// outputs to ensure they are scheduled together and in order. This
/// optimization may benefit some targets by improving cache locality.
-void ScheduleDAGSDNodes::ClusterNeighboringLoads() {
+void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
+ SDNode *Chain = 0;
+ unsigned NumOps = Node->getNumOperands();
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
+ Chain = Node->getOperand(NumOps-1).getNode();
+ if (!Chain)
+ return;
+
+ // Look for other loads of the same chain. Find loads that are loading from
+ // the same base pointer and different offsets.
SmallPtrSet<SDNode*, 16> Visited;
SmallVector<int64_t, 4> Offsets;
DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
- for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
- E = DAG->allnodes_end(); NI != E; ++NI) {
- SDNode *Node = &*NI;
- if (!Node || !Node->isMachineOpcode())
+ bool Cluster = false;
+ SDNode *Base = Node;
+ for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
+ I != E; ++I) {
+ SDNode *User = *I;
+ if (User == Node || !Visited.insert(User))
continue;
-
- unsigned Opc = Node->getMachineOpcode();
- const TargetInstrDesc &TID = TII->get(Opc);
- if (!TID.mayLoad())
+ int64_t Offset1, Offset2;
+ if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
+ Offset1 == Offset2)
+ // FIXME: Should be ok if they addresses are identical. But earlier
+ // optimizations really should have eliminated one of the loads.
continue;
+ if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
+ Offsets.push_back(Offset1);
+ O2SMap.insert(std::make_pair(Offset2, User));
+ Offsets.push_back(Offset2);
+ if (Offset2 < Offset1)
+ Base = User;
+ Cluster = true;
+ }
- SDNode *Chain = 0;
- unsigned NumOps = Node->getNumOperands();
- if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
- Chain = Node->getOperand(NumOps-1).getNode();
- if (!Chain)
- continue;
+ if (!Cluster)
+ return;
- // Look for other loads of the same chain. Find loads that are loading from
- // the same base pointer and different offsets.
- Visited.clear();
- Offsets.clear();
- O2SMap.clear();
- bool Cluster = false;
- SDNode *Base = Node;
- int64_t BaseOffset;
- for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
- I != E; ++I) {
- SDNode *User = *I;
- if (User == Node || !Visited.insert(User))
- continue;
- int64_t Offset1, Offset2;
- if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
- Offset1 == Offset2)
- // FIXME: Should be ok if they addresses are identical. But earlier
- // optimizations really should have eliminated one of the loads.
- continue;
- if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
- Offsets.push_back(Offset1);
- O2SMap.insert(std::make_pair(Offset2, User));
- Offsets.push_back(Offset2);
- if (Offset2 < Offset1) {
- Base = User;
- BaseOffset = Offset2;
- } else {
- BaseOffset = Offset1;
- }
- Cluster = true;
- }
+ // Sort them in increasing order.
+ std::sort(Offsets.begin(), Offsets.end());
+
+ // Check if the loads are close enough.
+ SmallVector<SDNode*, 4> Loads;
+ unsigned NumLoads = 0;
+ int64_t BaseOff = Offsets[0];
+ SDNode *BaseLoad = O2SMap[BaseOff];
+ Loads.push_back(BaseLoad);
+ for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
+ int64_t Offset = Offsets[i];
+ SDNode *Load = O2SMap[Offset];
+ if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
+ break; // Stop right here. Ignore loads that are further away.
+ Loads.push_back(Load);
+ ++NumLoads;
+ }
- if (!Cluster)
- continue;
+ if (NumLoads == 0)
+ return;
- // Sort them in increasing order.
- std::sort(Offsets.begin(), Offsets.end());
-
- // Check if the loads are close enough.
- SmallVector<SDNode*, 4> Loads;
- unsigned NumLoads = 0;
- int64_t BaseOff = Offsets[0];
- SDNode *BaseLoad = O2SMap[BaseOff];
- Loads.push_back(BaseLoad);
- for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
- int64_t Offset = Offsets[i];
- SDNode *Load = O2SMap[Offset];
- if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,
- NumLoads))
- break; // Stop right here. Ignore loads that are further away.
- Loads.push_back(Load);
- ++NumLoads;
- }
+ // Cluster loads by adding MVT::Flag outputs and inputs. This also
+ // ensure they are scheduled in order of increasing addresses.
+ SDNode *Lead = Loads[0];
+ AddFlags(Lead, SDValue(0, 0), true, DAG);
+
+ SDValue InFlag = SDValue(Lead, Lead->getNumValues() - 1);
+ for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
+ bool OutFlag = I < E - 1;
+ SDNode *Load = Loads[I];
+
+ AddFlags(Load, InFlag, OutFlag, DAG);
+
+ if (OutFlag)
+ InFlag = SDValue(Load, Load->getNumValues() - 1);
- if (NumLoads == 0)
+ ++LoadsClustered;
+ }
+}
+
+/// ClusterNodes - Cluster certain nodes which should be scheduled together.
+///
+void ScheduleDAGSDNodes::ClusterNodes() {
+ for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
+ E = DAG->allnodes_end(); NI != E; ++NI) {
+ SDNode *Node = &*NI;
+ if (!Node || !Node->isMachineOpcode())
continue;
- // Cluster loads by adding MVT::Flag outputs and inputs. This also
- // ensure they are scheduled in order of increasing addresses.
- SDNode *Lead = Loads[0];
- AddFlags(Lead, SDValue(0,0), true, DAG);
- SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
- for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
- bool OutFlag = i < e-1;
- SDNode *Load = Loads[i];
- AddFlags(Load, InFlag, OutFlag, DAG);
- if (OutFlag)
- InFlag = SDValue(Load, Load->getNumValues()-1);
- ++LoadsClustered;
- }
+ unsigned Opc = Node->getMachineOpcode();
+ const TargetInstrDesc &TID = TII->get(Opc);
+ if (TID.mayLoad())
+ // Cluster loads from "near" addresses into combined SUnits.
+ ClusterNeighboringLoads(Node);
}
}
@@ -215,9 +267,6 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
// This is a temporary workaround.
SUnits.reserve(NumNodes * 2);
- // Check to see if the scheduler cares about latencies.
- bool UnitLatencies = ForceUnitLatencies();
-
// Add all nodes in depth first order.
SmallVector<SDNode*, 64> Worklist;
SmallPtrSet<SDNode*, 64> Visited;
@@ -280,10 +329,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
N->setNodeId(NodeSUnit->NodeNum);
// Assign the Latency field of NodeSUnit using target-provided information.
- if (UnitLatencies)
- NodeSUnit->Latency = 1;
- else
- ComputeLatency(NodeSUnit);
+ ComputeLatency(NodeSUnit);
}
}
@@ -348,11 +394,13 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
if (Cost >= 0)
PhysReg = 0;
- const SDep& dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
- OpSU->Latency, PhysReg);
+ // If this is a ctrl dep, latency is 1.
+ unsigned OpLatency = isChain ? 1 : OpSU->Latency;
+ const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
+ OpLatency, PhysReg);
if (!isChain && !UnitLatencies) {
- ComputeOperandLatency(OpSU, SU, (SDep &)dep);
- ST.adjustSchedDependency(OpSU, SU, (SDep &)dep);
+ ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
+ ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
}
SU->addPred(dep);
@@ -366,8 +414,8 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
/// excludes nodes that aren't interesting to scheduling, and represents
/// flagged together nodes with a single SUnit.
void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
- // Cluster loads from "near" addresses into combined SUnits.
- ClusterNeighboringLoads();
+ // Cluster certain nodes which should be scheduled together.
+ ClusterNodes();
// Populate the SUnits array.
BuildSchedUnits();
// Compute all the scheduling dependencies between nodes.
@@ -375,7 +423,17 @@ void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
}
void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
+ // Check to see if the scheduler cares about latencies.
+ if (ForceUnitLatencies()) {
+ SU->Latency = 1;
+ return;
+ }
+
const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
+ if (InstrItins.isEmpty()) {
+ SU->Latency = 1;
+ return;
+ }
// Compute the latency for the node. We use the sum of the latencies for
// all nodes flagged together into this SUnit.
@@ -387,6 +445,40 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
}
}
+void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
+ unsigned OpIdx, SDep& dep) const{
+ // Check to see if the scheduler cares about latencies.
+ if (ForceUnitLatencies())
+ return;
+
+ const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
+ if (InstrItins.isEmpty())
+ return;
+
+ if (dep.getKind() != SDep::Data)
+ return;
+
+ unsigned DefIdx = Use->getOperand(OpIdx).getResNo();
+ if (Def->isMachineOpcode()) {
+ const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
+ if (DefIdx >= II.getNumDefs())
+ return;
+ int DefCycle = InstrItins.getOperandCycle(II.getSchedClass(), DefIdx);
+ if (DefCycle < 0)
+ return;
+ int UseCycle = 1;
+ if (Use->isMachineOpcode()) {
+ const unsigned UseClass = TII->get(Use->getMachineOpcode()).getSchedClass();
+ UseCycle = InstrItins.getOperandCycle(UseClass, OpIdx);
+ }
+ if (UseCycle >= 0) {
+ int Latency = DefCycle - UseCycle + 1;
+ if (Latency >= 0)
+ dep.setLatency(Latency);
+ }
+ }
+}
+
void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
if (!SU->getNode()) {
dbgs() << "PHYS REG COPY\n";
@@ -406,12 +498,77 @@ void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
}
}
+namespace {
+ struct OrderSorter {
+ bool operator()(const std::pair<unsigned, MachineInstr*> &A,
+ const std::pair<unsigned, MachineInstr*> &B) {
+ return A.first < B.first;
+ }
+ };
+}
+
+// ProcessSourceNode - Process nodes with source order numbers. These are added
+// to a vector which EmitSchedule uses to determine how to insert dbg_value
+// instructions in the right order.
+static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
+ InstrEmitter &Emitter,
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
+ SmallSet<unsigned, 8> &Seen) {
+ unsigned Order = DAG->GetOrdering(N);
+ if (!Order || !Seen.insert(Order))
+ return;
+
+ MachineBasicBlock *BB = Emitter.getBlock();
+ if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) {
+ // Did not insert any instruction.
+ Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
+ return;
+ }
+
+ Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
+ if (!N->getHasDebugValue())
+ return;
+ // Opportunistically insert immediate dbg_value uses, i.e. those with source
+ // order number right after the N.
+ MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
+ SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N);
+ for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
+ if (DVs[i]->isInvalidated())
+ continue;
+ unsigned DVOrder = DVs[i]->getOrder();
+ if (DVOrder == ++Order) {
+ MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap);
+ if (DbgMI) {
+ Orders.push_back(std::make_pair(DVOrder, DbgMI));
+ BB->insert(InsertPos, DbgMI);
+ }
+ DVs[i]->setIsInvalidated();
+ }
+ }
+}
+
+
/// EmitSchedule - Emit the machine code in scheduled order.
-MachineBasicBlock *ScheduleDAGSDNodes::
-EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
+MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
InstrEmitter Emitter(BB, InsertPos);
DenseMap<SDValue, unsigned> VRBaseMap;
DenseMap<SUnit*, unsigned> CopyVRBaseMap;
+ SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
+ SmallSet<unsigned, 8> Seen;
+ bool HasDbg = DAG->hasDebugValues();
+
+ // If this is the first BB, emit byval parameter dbg_value's.
+ if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
+ SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
+ SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
+ for (; PDI != PDE; ++PDI) {
+ MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
+ if (DbgMI)
+ BB->insert(InsertPos, DbgMI);
+ }
+ }
+
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
SUnit *SU = Sequence[i];
if (!SU) {
@@ -433,12 +590,80 @@ EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
N = N->getFlaggedNode())
FlaggedNodes.push_back(N);
while (!FlaggedNodes.empty()) {
+ SDNode *N = FlaggedNodes.back();
Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
- VRBaseMap, EM);
+ VRBaseMap);
+ // Remember the source order of the inserted instruction.
+ if (HasDbg)
+ ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
FlaggedNodes.pop_back();
}
Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
- VRBaseMap, EM);
+ VRBaseMap);
+ // Remember the source order of the inserted instruction.
+ if (HasDbg)
+ ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders,
+ Seen);
+ }
+
+ // Insert all the dbg_values which have not already been inserted in source
+ // order sequence.
+ if (HasDbg) {
+ MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI();
+
+ // Sort the source order instructions and use the order to insert debug
+ // values.
+ std::sort(Orders.begin(), Orders.end(), OrderSorter());
+
+ SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
+ SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
+ // Now emit the rest according to source order.
+ unsigned LastOrder = 0;
+ for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
+ unsigned Order = Orders[i].first;
+ MachineInstr *MI = Orders[i].second;
+ // Insert all SDDbgValue's whose order(s) are before "Order".
+ if (!MI)
+ continue;
+#ifndef NDEBUG
+ unsigned LastDIOrder = 0;
+#endif
+ for (; DI != DE &&
+ (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
+#ifndef NDEBUG
+ assert((*DI)->getOrder() >= LastDIOrder &&
+ "SDDbgValue nodes must be in source order!");
+ LastDIOrder = (*DI)->getOrder();
+#endif
+ if ((*DI)->isInvalidated())
+ continue;
+ MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
+ if (DbgMI) {
+ if (!LastOrder)
+ // Insert to start of the BB (after PHIs).
+ BB->insert(BBBegin, DbgMI);
+ else {
+ // Insert at the instruction, which may be in a different
+ // block, if the block was split by a custom inserter.
+ MachineBasicBlock::iterator Pos = MI;
+ MI->getParent()->insert(llvm::next(Pos), DbgMI);
+ }
+ }
+ }
+ LastOrder = Order;
+ }
+ // Add trailing DbgValue's before the terminator. FIXME: May want to add
+ // some of them before one or more conditional branches?
+ while (DI != DE) {
+ MachineBasicBlock *InsertBB = Emitter.getBlock();
+ MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator();
+ if (!(*DI)->isInvalidated()) {
+ MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, VRBaseMap);
+ if (DbgMI)
+ InsertBB->insert(Pos, DbgMI);
+ }
+ ++DI;
+ }
}
BB = Emitter.getBlock();
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 6b829b6..842fc8c 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -59,24 +59,14 @@ namespace llvm {
if (isa<JumpTableSDNode>(Node)) return true;
if (isa<ExternalSymbolSDNode>(Node)) return true;
if (isa<BlockAddressSDNode>(Node)) return true;
- if (Node->getOpcode() == ISD::EntryToken) return true;
+ if (Node->getOpcode() == ISD::EntryToken ||
+ isa<MDNodeSDNode>(Node)) return true;
return false;
}
/// NewSUnit - Creates a new SUnit and return a ptr to it.
///
- SUnit *NewSUnit(SDNode *N) {
-#ifndef NDEBUG
- const SUnit *Addr = 0;
- if (!SUnits.empty())
- Addr = &SUnits[0];
-#endif
- SUnits.push_back(SUnit(N, (unsigned)SUnits.size()));
- assert((Addr == 0 || Addr == &SUnits[0]) &&
- "SUnits std::vector reallocated on the fly!");
- SUnits.back().OrigNode = &SUnits.back();
- return &SUnits.back();
- }
+ SUnit *NewSUnit(SDNode *N);
/// Clone - Creates a clone of the specified SUnit. It does not copy the
/// predecessors / successors info nor the temporary scheduling states.
@@ -93,8 +83,16 @@ namespace llvm {
///
virtual void ComputeLatency(SUnit *SU);
- virtual MachineBasicBlock *
- EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM);
+ /// ComputeOperandLatency - Override dependence edge latency using
+ /// operand use/def information
+ ///
+ virtual void ComputeOperandLatency(SUnit *Def, SUnit *Use,
+ SDep& dep) const { }
+
+ virtual void ComputeOperandLatency(SDNode *Def, SDNode *Use,
+ unsigned OpIdx, SDep& dep) const;
+
+ virtual MachineBasicBlock *EmitSchedule();
/// Schedule - Order nodes according to selected style, filling
/// in the Sequence member.
@@ -110,7 +108,10 @@ namespace llvm {
private:
/// ClusterNeighboringLoads - Cluster loads from "near" addresses into
/// combined SUnits.
- void ClusterNeighboringLoads();
+ void ClusterNeighboringLoads(SDNode *Node);
+ /// ClusterNodes - Cluster certain nodes which should be scheduled together.
+ ///
+ void ClusterNodes();
/// BuildSchedUnits, AddSchedEdges - Helper functions for BuildSchedGraph.
void BuildSchedUnits();
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 746d4e2..ad06ebd 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -13,7 +13,9 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "SDNodeOrdering.h"
+#include "SDNodeDbgValue.h"
#include "llvm/Constants.h"
+#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Function.h"
#include "llvm/GlobalAlias.h"
@@ -31,6 +33,7 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
@@ -303,10 +306,6 @@ ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
return Result;
}
-const TargetMachine &SelectionDAG::getTarget() const {
- return MF->getTarget();
-}
-
//===----------------------------------------------------------------------===//
// SDNode Profile Support
//===----------------------------------------------------------------------===//
@@ -596,6 +595,11 @@ void SelectionDAG::DeallocateNode(SDNode *N) {
// Remove the ordering of this node.
Ordering->remove(N);
+
+ // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
+ SmallVector<SDDbgValue*, 2> &DbgVals = DbgInfo->getSDDbgValues(N);
+ for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
+ DbgVals[i]->setIsInvalidated();
}
/// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
@@ -786,26 +790,24 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
}
// EntryNode could meaningfully have debug info if we can find it...
-SelectionDAG::SelectionDAG(TargetLowering &tli, FunctionLoweringInfo &fli)
- : TLI(tli), FLI(fli), DW(0),
- EntryNode(ISD::EntryToken, DebugLoc::getUnknownLoc(),
- getVTList(MVT::Other)),
+SelectionDAG::SelectionDAG(const TargetMachine &tm)
+ : TM(tm), TLI(*tm.getTargetLowering()), TSI(*tm.getSelectionDAGInfo()),
+ EntryNode(ISD::EntryToken, DebugLoc(), getVTList(MVT::Other)),
Root(getEntryNode()), Ordering(0) {
AllNodes.push_back(&EntryNode);
Ordering = new SDNodeOrdering();
+ DbgInfo = new SDDbgInfo();
}
-void SelectionDAG::init(MachineFunction &mf, MachineModuleInfo *mmi,
- DwarfWriter *dw) {
+void SelectionDAG::init(MachineFunction &mf) {
MF = &mf;
- MMI = mmi;
- DW = dw;
Context = &mf.getFunction()->getContext();
}
SelectionDAG::~SelectionDAG() {
allnodes_clear();
delete Ordering;
+ delete DbgInfo;
}
void SelectionDAG::allnodes_clear() {
@@ -831,8 +833,8 @@ void SelectionDAG::clear() {
EntryNode.UseList = 0;
AllNodes.push_back(&EntryNode);
Root = getEntryNode();
- delete Ordering;
- Ordering = new SDNodeOrdering();
+ Ordering->clear();
+ DbgInfo->clear();
}
SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
@@ -898,8 +900,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
return SDValue(N, 0);
if (!N) {
- N = NodeAllocator.Allocate<ConstantSDNode>();
- new (N) ConstantSDNode(isT, &Val, EltVT);
+ N = new (NodeAllocator) ConstantSDNode(isT, &Val, EltVT);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
}
@@ -908,8 +909,7 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
if (VT.isVector()) {
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
- Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(),
- VT, &Ops[0], Ops.size());
+ Result = getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, &Ops[0], Ops.size());
}
return Result;
}
@@ -942,8 +942,7 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
return SDValue(N, 0);
if (!N) {
- N = NodeAllocator.Allocate<ConstantFPSDNode>();
- new (N) ConstantFPSDNode(isTarget, &V, EltVT);
+ N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
}
@@ -953,8 +952,7 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
SmallVector<SDValue, 8> Ops;
Ops.assign(VT.getVectorNumElements(), Result);
// FIXME DebugLoc info might be appropriate here
- Result = getNode(ISD::BUILD_VECTOR, DebugLoc::getUnknownLoc(),
- VT, &Ops[0], Ops.size());
+ Result = getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, &Ops[0], Ops.size());
}
return Result;
}
@@ -963,11 +961,21 @@ SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
EVT EltVT = VT.getScalarType();
if (EltVT==MVT::f32)
return getConstantFP(APFloat((float)Val), VT, isTarget);
- else
+ else if (EltVT==MVT::f64)
return getConstantFP(APFloat(Val), VT, isTarget);
+ else if (EltVT==MVT::f80 || EltVT==MVT::f128) {
+ bool ignored;
+ APFloat apf = APFloat(Val);
+ apf.convert(*EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
+ &ignored);
+ return getConstantFP(apf, VT, isTarget);
+ } else {
+ assert(0 && "Unsupported type in getConstantFP");
+ return SDValue();
+ }
}
-SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV,
+SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, DebugLoc DL,
EVT VT, int64_t Offset,
bool isTargetGA,
unsigned char TargetFlags) {
@@ -1002,8 +1010,8 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<GlobalAddressSDNode>();
- new (N) GlobalAddressSDNode(Opc, GV, VT, Offset, TargetFlags);
+ SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL, GV, VT,
+ Offset, TargetFlags);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1018,8 +1026,7 @@ SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<FrameIndexSDNode>();
- new (N) FrameIndexSDNode(FI, VT, isTarget);
+ SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1038,14 +1045,14 @@ SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<JumpTableSDNode>();
- new (N) JumpTableSDNode(JTI, VT, isTarget, TargetFlags);
+ SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
+ TargetFlags);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
-SDValue SelectionDAG::getConstantPool(Constant *C, EVT VT,
+SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
unsigned Alignment, int Offset,
bool isTarget,
unsigned char TargetFlags) {
@@ -1064,8 +1071,8 @@ SDValue SelectionDAG::getConstantPool(Constant *C, EVT VT,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<ConstantPoolSDNode>();
- new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment, TargetFlags);
+ SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
+ Alignment, TargetFlags);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1091,8 +1098,8 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<ConstantPoolSDNode>();
- new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment, TargetFlags);
+ SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
+ Alignment, TargetFlags);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1106,8 +1113,7 @@ SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<BasicBlockSDNode>();
- new (N) BasicBlockSDNode(MBB);
+ SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1122,8 +1128,7 @@ SDValue SelectionDAG::getValueType(EVT VT) {
ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
if (N) return SDValue(N, 0);
- N = NodeAllocator.Allocate<VTSDNode>();
- new (N) VTSDNode(VT);
+ N = new (NodeAllocator) VTSDNode(VT);
AllNodes.push_back(N);
return SDValue(N, 0);
}
@@ -1131,8 +1136,7 @@ SDValue SelectionDAG::getValueType(EVT VT) {
SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
SDNode *&N = ExternalSymbols[Sym];
if (N) return SDValue(N, 0);
- N = NodeAllocator.Allocate<ExternalSymbolSDNode>();
- new (N) ExternalSymbolSDNode(false, Sym, 0, VT);
+ N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
AllNodes.push_back(N);
return SDValue(N, 0);
}
@@ -1143,8 +1147,7 @@ SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
TargetFlags)];
if (N) return SDValue(N, 0);
- N = NodeAllocator.Allocate<ExternalSymbolSDNode>();
- new (N) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
+ N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
AllNodes.push_back(N);
return SDValue(N, 0);
}
@@ -1154,8 +1157,7 @@ SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
CondCodeNodes.resize(Cond+1);
if (CondCodeNodes[Cond] == 0) {
- CondCodeSDNode *N = NodeAllocator.Allocate<CondCodeSDNode>();
- new (N) CondCodeSDNode(Cond);
+ CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
CondCodeNodes[Cond] = N;
AllNodes.push_back(N);
}
@@ -1260,8 +1262,8 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, DebugLoc dl, SDValue N1,
int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
- ShuffleVectorSDNode *N = NodeAllocator.Allocate<ShuffleVectorSDNode>();
- new (N) ShuffleVectorSDNode(VT, dl, N1, N2, MaskAlloc);
+ ShuffleVectorSDNode *N =
+ new (NodeAllocator) ShuffleVectorSDNode(VT, dl, N1, N2, MaskAlloc);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1284,8 +1286,8 @@ SDValue SelectionDAG::getConvertRndSat(EVT VT, DebugLoc dl,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- CvtRndSatSDNode *N = NodeAllocator.Allocate<CvtRndSatSDNode>();
- new (N) CvtRndSatSDNode(VT, dl, Ops, 5, Code);
+ CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl, Ops, 5,
+ Code);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1299,32 +1301,29 @@ SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<RegisterSDNode>();
- new (N) RegisterSDNode(RegNo, VT);
+ SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
-SDValue SelectionDAG::getLabel(unsigned Opcode, DebugLoc dl,
- SDValue Root,
- unsigned LabelID) {
+SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) {
FoldingSetNodeID ID;
SDValue Ops[] = { Root };
- AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), &Ops[0], 1);
- ID.AddInteger(LabelID);
+ AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
+ ID.AddPointer(Label);
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
-
- SDNode *N = NodeAllocator.Allocate<LabelSDNode>();
- new (N) LabelSDNode(Opcode, dl, Root, LabelID);
+
+ SDNode *N = new (NodeAllocator) EHLabelSDNode(dl, Root, Label);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
-SDValue SelectionDAG::getBlockAddress(BlockAddress *BA, EVT VT,
+
+SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
bool isTarget,
unsigned char TargetFlags) {
unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
@@ -1337,8 +1336,7 @@ SDValue SelectionDAG::getBlockAddress(BlockAddress *BA, EVT VT,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<BlockAddressSDNode>();
- new (N) BlockAddressSDNode(Opc, VT, BA, TargetFlags);
+ SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, TargetFlags);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -1356,13 +1354,29 @@ SDValue SelectionDAG::getSrcValue(const Value *V) {
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<SrcValueSDNode>();
- new (N) SrcValueSDNode(V);
+ SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
}
+/// getMDNode - Return an MDNodeSDNode which holds an MDNode.
+SDValue SelectionDAG::getMDNode(const MDNode *MD) {
+ FoldingSetNodeID ID;
+ AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
+ ID.AddPointer(MD);
+
+ void *IP = 0;
+ if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
+ return SDValue(E, 0);
+
+ SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
+ CSEMap.InsertNode(N, IP);
+ AllNodes.push_back(N);
+ return SDValue(N, 0);
+}
+
+
/// getShiftAmountOperand - Return the specified value casted to
/// the target's desired shift amount type.
SDValue SelectionDAG::getShiftAmountOperand(SDValue Op) {
@@ -1911,7 +1925,8 @@ void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask,
// Output known-0 bits are known if clear or set in both the low clear bits
// common to both LHS & RHS. For example, 8+(X<<3) is known to have the
// low 3 bits clear.
- APInt Mask2 = APInt::getLowBitsSet(BitWidth, Mask.countTrailingOnes());
+ APInt Mask2 = APInt::getLowBitsSet(BitWidth,
+ BitWidth - Mask.countLeadingZeros());
ComputeMaskedBits(Op.getOperand(0), Mask2, KnownZero2, KnownOne2, Depth+1);
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
@@ -2221,7 +2236,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
// If we're told that NaNs won't happen, assume they won't.
- if (FiniteOnlyFPMath())
+ if (NoNaNsFPMath)
return true;
// If the value is a constant, we can obviously see if it is a NaN or not.
@@ -2260,40 +2275,9 @@ bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const {
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
if (!GA) return false;
if (GA->getOffset() != 0) return false;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(GA->getGlobal());
+ const GlobalVariable *GV = dyn_cast<GlobalVariable>(GA->getGlobal());
if (!GV) return false;
- MachineModuleInfo *MMI = getMachineModuleInfo();
- return MMI && MMI->hasDebugInfo();
-}
-
-
-/// getShuffleScalarElt - Returns the scalar element that will make up the ith
-/// element of the result of the vector shuffle.
-SDValue SelectionDAG::getShuffleScalarElt(const ShuffleVectorSDNode *N,
- unsigned i) {
- EVT VT = N->getValueType(0);
- DebugLoc dl = N->getDebugLoc();
- if (N->getMaskElt(i) < 0)
- return getUNDEF(VT.getVectorElementType());
- unsigned Index = N->getMaskElt(i);
- unsigned NumElems = VT.getVectorNumElements();
- SDValue V = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1);
- Index %= NumElems;
-
- if (V.getOpcode() == ISD::BIT_CONVERT) {
- V = V.getOperand(0);
- EVT VVT = V.getValueType();
- if (!VVT.isVector() || VVT.getVectorNumElements() != (unsigned)NumElems)
- return SDValue();
- }
- if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
- return (Index == 0) ? V.getOperand(0)
- : getUNDEF(VT.getVectorElementType());
- if (V.getOpcode() == ISD::BUILD_VECTOR)
- return V.getOperand(Index);
- if (const ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(V))
- return getShuffleScalarElt(SVN, Index);
- return SDValue();
+ return MF->getMMI().hasDebugInfo();
}
@@ -2306,8 +2290,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT) {
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<SDNode>();
- new (N) SDNode(Opcode, DL, getVTList(VT));
+ SDNode *N = new (NodeAllocator) SDNode(Opcode, DL, getVTList(VT));
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
@@ -2322,22 +2305,20 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
// Constant fold unary operations with an integer constant operand.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
const APInt &Val = C->getAPIntValue();
- unsigned BitWidth = VT.getSizeInBits();
switch (Opcode) {
default: break;
case ISD::SIGN_EXTEND:
- return getConstant(APInt(Val).sextOrTrunc(BitWidth), VT);
+ return getConstant(APInt(Val).sextOrTrunc(VT.getSizeInBits()), VT);
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::TRUNCATE:
- return getConstant(APInt(Val).zextOrTrunc(BitWidth), VT);
+ return getConstant(APInt(Val).zextOrTrunc(VT.getSizeInBits()), VT);
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP: {
const uint64_t zero[] = {0, 0};
- // No compile time operations on this type.
- if (VT==MVT::ppcf128)
- break;
- APFloat apf = APFloat(APInt(BitWidth, 2, zero));
+ // No compile time operations on ppcf128.
+ if (VT == MVT::ppcf128) break;
+ APFloat apf = APFloat(APInt(VT.getSizeInBits(), 2, zero));
(void)apf.convertFromAPInt(Val,
Opcode==ISD::SINT_TO_FP,
APFloat::rmNearestTiesToEven);
@@ -2459,9 +2440,18 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
VT.getVectorNumElements() ==
Operand.getValueType().getVectorNumElements()) &&
"Vector element count mismatch!");
- if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND)
+
+ if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
+ OpOpcode == ISD::ANY_EXTEND)
// (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
+
+ // (ext (trunx x)) -> x
+ if (OpOpcode == ISD::TRUNCATE) {
+ SDValue OpOp = Operand.getNode()->getOperand(0);
+ if (OpOp.getValueType() == VT)
+ return OpOp;
+ }
break;
case ISD::TRUNCATE:
assert(VT.isInteger() && Operand.getValueType().isInteger() &&
@@ -2537,12 +2527,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- N = NodeAllocator.Allocate<UnarySDNode>();
- new (N) UnarySDNode(Opcode, DL, VTs, Operand);
+ N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTs, Operand);
CSEMap.InsertNode(N, IP);
} else {
- N = NodeAllocator.Allocate<UnarySDNode>();
- new (N) UnarySDNode(Opcode, DL, VTs, Operand);
+ N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTs, Operand);
}
AllNodes.push_back(N);
@@ -2607,13 +2595,15 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// one big BUILD_VECTOR.
if (N1.getOpcode() == ISD::BUILD_VECTOR &&
N2.getOpcode() == ISD::BUILD_VECTOR) {
- SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
- Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
+ SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
+ N1.getNode()->op_end());
+ Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
}
break;
case ISD::AND:
- assert(VT.isInteger() && N1.getValueType() == N2.getValueType() &&
+ assert(VT.isInteger() && "This operator does not apply to FP types!");
+ assert(N1.getValueType() == N2.getValueType() &&
N1.getValueType() == VT && "Binary operator types must match!");
// (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
// worth handling here.
@@ -2626,7 +2616,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
case ISD::XOR:
case ISD::ADD:
case ISD::SUB:
- assert(VT.isInteger() && N1.getValueType() == N2.getValueType() &&
+ assert(VT.isInteger() && "This operator does not apply to FP types!");
+ assert(N1.getValueType() == N2.getValueType() &&
N1.getValueType() == VT && "Binary operator types must match!");
// (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
// it's worth handling here.
@@ -2641,7 +2632,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
case ISD::SDIV:
case ISD::SREM:
assert(VT.isInteger() && "This operator does not apply to FP types!");
- // fall through
+ assert(N1.getValueType() == N2.getValueType() &&
+ N1.getValueType() == VT && "Binary operator types must match!");
+ break;
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
@@ -2664,6 +2657,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
return N1;
}
}
+ assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
assert(N1.getValueType() == N2.getValueType() &&
N1.getValueType() == VT && "Binary operator types must match!");
break;
@@ -2791,14 +2785,19 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// If the indices are the same, return the inserted element else
// if the indices are known different, extract the element from
// the original vector.
- if (N1.getOperand(2) == N2) {
- if (VT == N1.getOperand(1).getValueType())
- return N1.getOperand(1);
- else
- return getSExtOrTrunc(N1.getOperand(1), DL, VT);
- } else if (isa<ConstantSDNode>(N1.getOperand(2)) &&
- isa<ConstantSDNode>(N2))
+ SDValue N1Op2 = N1.getOperand(2);
+ ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
+
+ if (N1Op2C && N2C) {
+ if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
+ if (VT == N1.getOperand(1).getValueType())
+ return N1.getOperand(1);
+ else
+ return getSExtOrTrunc(N1.getOperand(1), DL, VT);
+ }
+
return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
+ }
}
break;
case ISD::EXTRACT_ELEMENT:
@@ -2970,12 +2969,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- N = NodeAllocator.Allocate<BinarySDNode>();
- new (N) BinarySDNode(Opcode, DL, VTs, N1, N2);
+ N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTs, N1, N2);
CSEMap.InsertNode(N, IP);
} else {
- N = NodeAllocator.Allocate<BinarySDNode>();
- new (N) BinarySDNode(Opcode, DL, VTs, N1, N2);
+ N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTs, N1, N2);
}
AllNodes.push_back(N);
@@ -2989,7 +2986,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3) {
// Perform various simplifications.
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
- ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
switch (Opcode) {
case ISD::CONCAT_VECTORS:
// A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
@@ -2997,9 +2993,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
if (N1.getOpcode() == ISD::BUILD_VECTOR &&
N2.getOpcode() == ISD::BUILD_VECTOR &&
N3.getOpcode() == ISD::BUILD_VECTOR) {
- SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
- Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
- Elts.insert(Elts.end(), N3.getNode()->op_begin(), N3.getNode()->op_end());
+ SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
+ N1.getNode()->op_end());
+ Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
+ Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
}
break;
@@ -3019,14 +3016,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
if (N2 == N3) return N2; // select C, X, X -> X
break;
- case ISD::BRCOND:
- if (N2C) {
- if (N2C->getZExtValue()) // Unconditional branch
- return getNode(ISD::BR, DL, MVT::Other, N1, N3);
- else
- return N1; // Never-taken branch
- }
- break;
case ISD::VECTOR_SHUFFLE:
llvm_unreachable("should use getVectorShuffle constructor!");
break;
@@ -3048,12 +3037,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- N = NodeAllocator.Allocate<TernarySDNode>();
- new (N) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
+ N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
CSEMap.InsertNode(N, IP);
} else {
- N = NodeAllocator.Allocate<TernarySDNode>();
- new (N) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
+ N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
}
AllNodes.push_back(N);
@@ -3104,6 +3091,8 @@ SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
/// operand.
static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
DebugLoc dl) {
+ assert(Value.getOpcode() != ISD::UNDEF);
+
unsigned NumBits = VT.getScalarType().getSizeInBits();
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
APInt Val = APInt(NumBits, C->getZExtValue() & 255);
@@ -3142,11 +3131,17 @@ static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
if (Str.empty()) {
if (VT.isInteger())
return DAG.getConstant(0, VT);
- unsigned NumElts = VT.getVectorNumElements();
- MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
- DAG.getConstant(0,
- EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts)));
+ else if (VT.getSimpleVT().SimpleTy == MVT::f32 ||
+ VT.getSimpleVT().SimpleTy == MVT::f64)
+ return DAG.getConstantFP(0.0, VT);
+ else if (VT.isVector()) {
+ unsigned NumElts = VT.getVectorNumElements();
+ MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
+ return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
+ DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
+ EltVT, NumElts)));
+ } else
+ llvm_unreachable("Expected type!");
}
assert(!VT.isVector() && "Can't handle vector type here!");
@@ -3187,58 +3182,43 @@ static bool isMemSrcFromString(SDValue Src, std::string &Str) {
if (!G)
return false;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
+ const GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
if (GV && GetConstantStringInfo(GV, Str, SrcDelta, false))
return true;
return false;
}
-/// MeetsMaxMemopRequirement - Determines if the number of memory ops required
-/// to replace the memset / memcpy is below the threshold. It also returns the
-/// types of the sequence of memory ops to perform memset / memcpy.
-static
-bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
- SDValue Dst, SDValue Src,
- unsigned Limit, uint64_t Size, unsigned &Align,
- std::string &Str, bool &isSrcStr,
- SelectionDAG &DAG,
- const TargetLowering &TLI) {
- isSrcStr = isMemSrcFromString(Src, Str);
- bool isSrcConst = isa<ConstantSDNode>(Src);
- EVT VT = TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr, DAG);
- bool AllowUnalign = TLI.allowsUnalignedMemoryAccesses(VT);
- if (VT != MVT::Other) {
- const Type *Ty = VT.getTypeForEVT(*DAG.getContext());
- unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
- // If source is a string constant, this will require an unaligned load.
- if (NewAlign > Align && (isSrcConst || AllowUnalign)) {
- if (Dst.getOpcode() != ISD::FrameIndex) {
- // Can't change destination alignment. It requires a unaligned store.
- if (AllowUnalign)
- VT = MVT::Other;
- } else {
- int FI = cast<FrameIndexSDNode>(Dst)->getIndex();
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
- if (MFI->isFixedObjectIndex(FI)) {
- // Can't change destination alignment. It requires a unaligned store.
- if (AllowUnalign)
- VT = MVT::Other;
- } else {
- // Give the stack frame object a larger alignment if needed.
- if (MFI->getObjectAlignment(FI) < NewAlign)
- MFI->setObjectAlignment(FI, NewAlign);
- Align = NewAlign;
- }
- }
- }
- }
+/// FindOptimalMemOpLowering - Determines the optimial series memory ops
+/// to replace the memset / memcpy. Return true if the number of memory ops
+/// is below the threshold. It returns the types of the sequence of
+/// memory ops to perform memset / memcpy by reference.
+static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
+ unsigned Limit, uint64_t Size,
+ unsigned DstAlign, unsigned SrcAlign,
+ bool NonScalarIntSafe,
+ bool MemcpyStrSrc,
+ SelectionDAG &DAG,
+ const TargetLowering &TLI) {
+ assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
+ "Expecting memcpy / memset source to meet alignment requirement!");
+ // If 'SrcAlign' is zero, that means the memory operation does not need load
+ // the value, i.e. memset or memcpy from constant string. Otherwise, it's
+ // the inferred alignment of the source. 'DstAlign', on the other hand, is the
+ // specified alignment of the memory operation. If it is zero, that means
+ // it's possible to change the alignment of the destination. 'MemcpyStrSrc'
+ // indicates whether the memcpy source is constant so it does not need to be
+ // loaded.
+ EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
+ NonScalarIntSafe, MemcpyStrSrc,
+ DAG.getMachineFunction());
if (VT == MVT::Other) {
- if (TLI.allowsUnalignedMemoryAccesses(MVT::i64)) {
- VT = MVT::i64;
+ if (DstAlign >= TLI.getTargetData()->getPointerPrefAlignment() ||
+ TLI.allowsUnalignedMemoryAccesses(VT)) {
+ VT = TLI.getPointerTy();
} else {
- switch (Align & 7) {
+ switch (DstAlign & 7) {
case 0: VT = MVT::i64; break;
case 4: VT = MVT::i32; break;
case 2: VT = MVT::i16; break;
@@ -3254,13 +3234,22 @@ bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
if (VT.bitsGT(LVT))
VT = LVT;
}
+
+ // If we're optimizing for size, and there is a limit, bump the maximum number
+ // of operations inserted down to 4. This is a wild guess that approximates
+ // the size of a call to memcpy or memset (3 arguments + call).
+ if (Limit != ~0U) {
+ const Function *F = DAG.getMachineFunction().getFunction();
+ if (F->hasFnAttr(Attribute::OptimizeForSize))
+ Limit = 4;
+ }
unsigned NumMemOps = 0;
while (Size != 0) {
unsigned VTSize = VT.getSizeInBits() / 8;
while (VTSize > Size) {
// For now, only use non-vector load / store's for the left-over pieces.
- if (VT.isVector()) {
+ if (VT.isVector() || VT.isFloatingPoint()) {
VT = MVT::i64;
while (!TLI.isTypeLegal(VT))
VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
@@ -3283,28 +3272,50 @@ bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
}
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain, SDValue Dst,
- SDValue Src, uint64_t Size,
- unsigned Align, bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff){
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDValue Chain, SDValue Dst,
+ SDValue Src, uint64_t Size,
+ unsigned Align, bool isVol,
+ bool AlwaysInline,
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff) {
+ // Turn a memcpy of undef to nop.
+ if (Src.getOpcode() == ISD::UNDEF)
+ return Chain;
// Expand memcpy to a series of load and store ops if the size operand falls
// below a certain threshold.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
- uint64_t Limit = -1ULL;
- if (!AlwaysInline)
- Limit = TLI.getMaxStoresPerMemcpy();
- unsigned DstAlign = Align; // Destination alignment can change.
+ bool DstAlignCanChange = false;
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
+ if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
+ DstAlignCanChange = true;
+ unsigned SrcAlign = DAG.InferPtrAlignment(Src);
+ if (Align > SrcAlign)
+ SrcAlign = Align;
std::string Str;
- bool CopyFromStr;
- if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign,
- Str, CopyFromStr, DAG, TLI))
+ bool CopyFromStr = isMemSrcFromString(Src, Str);
+ bool isZeroStr = CopyFromStr && Str.empty();
+ unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy();
+
+ if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
+ (DstAlignCanChange ? 0 : Align),
+ (isZeroStr ? 0 : SrcAlign),
+ true, CopyFromStr, DAG, TLI))
return SDValue();
+ if (DstAlignCanChange) {
+ const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
+ unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
+ if (NewAlign > Align) {
+ // Give the stack frame object a larger alignment if needed.
+ if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
+ MFI->setObjectAlignment(FI->getIndex(), NewAlign);
+ Align = NewAlign;
+ }
+ }
- bool isZeroStr = CopyFromStr && Str.empty();
SmallVector<SDValue, 8> OutChains;
unsigned NumMemOps = MemOps.size();
uint64_t SrcOff = 0, DstOff = 0;
@@ -3313,16 +3324,17 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Value, Store;
- if (CopyFromStr && (isZeroStr || !VT.isVector())) {
+ if (CopyFromStr &&
+ (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
// It's unlikely a store of a vector immediate can be done in a single
// instruction. It would require a load from a constantpool first.
- // We also handle store a vector with all zero's.
+ // We only handle zero vectors here.
// FIXME: Handle other cases where store of vector immediate is done in
// a single instruction.
Value = getMemsetStringVal(VT, dl, DAG, TLI, Str, SrcOff);
Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, false, false, DstAlign);
+ DstSV, DstSVOff + DstOff, isVol, false, Align);
} else {
// The type might not be legal for the target. This should only happen
// if the type is smaller than a legal type, as on PPC, so the right
@@ -3331,13 +3343,14 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
// FIXME does the case above also need this?
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
assert(NVT.bitsGE(VT));
- Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
+ Value = DAG.getExtLoad(ISD::EXTLOAD, NVT, dl, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
- SrcSV, SrcSVOff + SrcOff, VT, false, false, Align);
+ SrcSV, SrcSVOff + SrcOff, VT, isVol, false,
+ MinAlign(SrcAlign, SrcOff));
Store = DAG.getTruncStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, VT, false, false,
- DstAlign);
+ DstSV, DstSVOff + DstOff, VT, isVol, false,
+ Align);
}
OutChains.push_back(Store);
SrcOff += VTSize;
@@ -3349,28 +3362,47 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
}
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain, SDValue Dst,
- SDValue Src, uint64_t Size,
- unsigned Align, bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff){
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDValue Chain, SDValue Dst,
+ SDValue Src, uint64_t Size,
+ unsigned Align, bool isVol,
+ bool AlwaysInline,
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff) {
+ // Turn a memmove of undef to nop.
+ if (Src.getOpcode() == ISD::UNDEF)
+ return Chain;
// Expand memmove to a series of load and store ops if the size operand falls
// below a certain threshold.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
- uint64_t Limit = -1ULL;
- if (!AlwaysInline)
- Limit = TLI.getMaxStoresPerMemmove();
- unsigned DstAlign = Align; // Destination alignment can change.
- std::string Str;
- bool CopyFromStr;
- if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign,
- Str, CopyFromStr, DAG, TLI))
+ bool DstAlignCanChange = false;
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
+ if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
+ DstAlignCanChange = true;
+ unsigned SrcAlign = DAG.InferPtrAlignment(Src);
+ if (Align > SrcAlign)
+ SrcAlign = Align;
+ unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove();
+
+ if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
+ (DstAlignCanChange ? 0 : Align),
+ SrcAlign, true, false, DAG, TLI))
return SDValue();
- uint64_t SrcOff = 0, DstOff = 0;
+ if (DstAlignCanChange) {
+ const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
+ unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
+ if (NewAlign > Align) {
+ // Give the stack frame object a larger alignment if needed.
+ if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
+ MFI->setObjectAlignment(FI->getIndex(), NewAlign);
+ Align = NewAlign;
+ }
+ }
+ uint64_t SrcOff = 0, DstOff = 0;
SmallVector<SDValue, 8> LoadValues;
SmallVector<SDValue, 8> LoadChains;
SmallVector<SDValue, 8> OutChains;
@@ -3382,7 +3414,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Value = DAG.getLoad(VT, dl, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
- SrcSV, SrcSVOff + SrcOff, false, false, Align);
+ SrcSV, SrcSVOff + SrcOff, isVol, false, SrcAlign);
LoadValues.push_back(Value);
LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize;
@@ -3397,7 +3429,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
Store = DAG.getStore(Chain, dl, LoadValues[i],
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, false, false, DstAlign);
+ DstSV, DstSVOff + DstOff, isVol, false, Align);
OutChains.push_back(Store);
DstOff += VTSize;
}
@@ -3407,24 +3439,43 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
}
static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain, SDValue Dst,
- SDValue Src, uint64_t Size,
- unsigned Align,
- const Value *DstSV, uint64_t DstSVOff) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDValue Chain, SDValue Dst,
+ SDValue Src, uint64_t Size,
+ unsigned Align, bool isVol,
+ const Value *DstSV, uint64_t DstSVOff) {
+ // Turn a memset of undef to nop.
+ if (Src.getOpcode() == ISD::UNDEF)
+ return Chain;
// Expand memset to a series of load/store ops if the size operand
// falls below a certain threshold.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
- std::string Str;
- bool CopyFromStr;
- if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, TLI.getMaxStoresPerMemset(),
- Size, Align, Str, CopyFromStr, DAG, TLI))
+ bool DstAlignCanChange = false;
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
+ if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
+ DstAlignCanChange = true;
+ bool NonScalarIntSafe =
+ isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
+ if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(),
+ Size, (DstAlignCanChange ? 0 : Align), 0,
+ NonScalarIntSafe, false, DAG, TLI))
return SDValue();
+ if (DstAlignCanChange) {
+ const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
+ unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
+ if (NewAlign > Align) {
+ // Give the stack frame object a larger alignment if needed.
+ if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
+ MFI->setObjectAlignment(FI->getIndex(), NewAlign);
+ Align = NewAlign;
+ }
+ }
+
SmallVector<SDValue, 8> OutChains;
uint64_t DstOff = 0;
-
unsigned NumMemOps = MemOps.size();
for (unsigned i = 0; i < NumMemOps; i++) {
EVT VT = MemOps[i];
@@ -3432,7 +3483,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue Value = getMemsetValue(Src, VT, DAG, dl);
SDValue Store = DAG.getStore(Chain, dl, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstSVOff + DstOff, false, false, 0);
+ DstSV, DstSVOff + DstOff, isVol, false, 0);
OutChains.push_back(Store);
DstOff += VTSize;
}
@@ -3443,7 +3494,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align, bool AlwaysInline,
+ unsigned Align, bool isVol, bool AlwaysInline,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
@@ -3455,10 +3506,9 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
if (ConstantSize->isNullValue())
return Chain;
- SDValue Result =
- getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
- ConstantSize->getZExtValue(),
- Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
+ ConstantSize->getZExtValue(),Align,
+ isVol, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
}
@@ -3466,8 +3516,8 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
// Then check to see if we should lower the memcpy with target-specific
// code. If the target chooses to do this, this is the next best.
SDValue Result =
- TLI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
- AlwaysInline,
+ TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
+ isVol, AlwaysInline,
DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
@@ -3477,10 +3527,16 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
if (AlwaysInline) {
assert(ConstantSize && "AlwaysInline requires a constant size!");
return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
- ConstantSize->getZExtValue(), Align, true,
- DstSV, DstSVOff, SrcSV, SrcSVOff);
+ ConstantSize->getZExtValue(), Align, isVol,
+ true, DstSV, DstSVOff, SrcSV, SrcSVOff);
}
+ // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
+ // memcpy is not guaranteed to be safe. libc memcpys aren't required to
+ // respect volatile, so they may do things like read or write memory
+ // beyond the given memory regions. But fixing this isn't easy, and most
+ // people don't care.
+
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -3502,7 +3558,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align,
+ unsigned Align, bool isVol,
const Value *DstSV, uint64_t DstSVOff,
const Value *SrcSV, uint64_t SrcSVOff) {
@@ -3516,8 +3572,8 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
- ConstantSize->getZExtValue(),
- Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
+ ConstantSize->getZExtValue(), Align, isVol,
+ false, DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
}
@@ -3525,11 +3581,14 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
// Then check to see if we should lower the memmove with target-specific
// code. If the target chooses to do this, this is the next best.
SDValue Result =
- TLI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align,
+ TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.getNode())
return Result;
+ // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
+ // not be safe. See memcpy above for more details.
+
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -3551,7 +3610,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Src, SDValue Size,
- unsigned Align,
+ unsigned Align, bool isVol,
const Value *DstSV, uint64_t DstSVOff) {
// Check to see if we should lower the memset to stores first.
@@ -3564,7 +3623,8 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
SDValue Result =
getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
- Align, DstSV, DstSVOff);
+ Align, isVol, DstSV, DstSVOff);
+
if (Result.getNode())
return Result;
}
@@ -3572,12 +3632,12 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
// Then check to see if we should lower the memset with target-specific
// code. If the target chooses to do this, this is the next best.
SDValue Result =
- TLI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align,
+ TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
DstSV, DstSVOff);
if (Result.getNode())
return Result;
- // Emit a library call.
+ // Emit a library call.
const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -3654,8 +3714,8 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
cast<AtomicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
- SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
- new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Cmp, Swp, MMO);
+ SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain,
+ Ptr, Cmp, Swp, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -3717,8 +3777,8 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
cast<AtomicSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
- SDNode* N = NodeAllocator.Allocate<AtomicSDNode>();
- new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Val, MMO);
+ SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain,
+ Ptr, Val, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -3796,20 +3856,20 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
return SDValue(E, 0);
}
- N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
- new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
+ N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps,
+ MemVT, MMO);
CSEMap.InsertNode(N, IP);
} else {
- N = NodeAllocator.Allocate<MemIntrinsicSDNode>();
- new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
+ N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps,
+ MemVT, MMO);
}
AllNodes.push_back(N);
return SDValue(N, 0);
}
SDValue
-SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
- ISD::LoadExtType ExtType, EVT VT, SDValue Chain,
+SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset,
const Value *SV, int SVOffset, EVT MemVT,
bool isVolatile, bool isNonTemporal,
@@ -3832,12 +3892,12 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
MachineMemOperand *MMO =
MF.getMachineMemOperand(SV, Flags, SVOffset,
MemVT.getStoreSize(), Alignment);
- return getLoad(AM, dl, ExtType, VT, Chain, Ptr, Offset, MemVT, MMO);
+ return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
}
SDValue
-SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
- ISD::LoadExtType ExtType, EVT VT, SDValue Chain,
+SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset, EVT MemVT,
MachineMemOperand *MMO) {
if (VT == MemVT) {
@@ -3874,8 +3934,8 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
cast<LoadSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
- SDNode *N = NodeAllocator.Allocate<LoadSDNode>();
- new (N) LoadSDNode(Ops, dl, VTs, AM, ExtType, MemVT, MMO);
+ SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl, VTs, AM, ExtType,
+ MemVT, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -3887,18 +3947,18 @@ SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl,
bool isVolatile, bool isNonTemporal,
unsigned Alignment) {
SDValue Undef = getUNDEF(Ptr.getValueType());
- return getLoad(ISD::UNINDEXED, dl, ISD::NON_EXTLOAD, VT, Chain, Ptr, Undef,
+ return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
SV, SVOffset, VT, isVolatile, isNonTemporal, Alignment);
}
-SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
+SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
const Value *SV,
int SVOffset, EVT MemVT,
bool isVolatile, bool isNonTemporal,
unsigned Alignment) {
SDValue Undef = getUNDEF(Ptr.getValueType());
- return getLoad(ISD::UNINDEXED, dl, ExtType, VT, Chain, Ptr, Undef,
+ return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
SV, SVOffset, MemVT, isVolatile, isNonTemporal, Alignment);
}
@@ -3908,7 +3968,7 @@ SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
"Load is already a indexed load!");
- return getLoad(AM, dl, LD->getExtensionType(), OrigLoad.getValueType(),
+ return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
LD->getChain(), Base, Offset, LD->getSrcValue(),
LD->getSrcValueOffset(), LD->getMemoryVT(),
LD->isVolatile(), LD->isNonTemporal(), LD->getAlignment());
@@ -3956,8 +4016,8 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
- SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
- new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, false, VT, MMO);
+ SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED,
+ false, VT, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -4020,8 +4080,8 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
cast<StoreSDNode>(E)->refineAlignment(MMO);
return SDValue(E, 0);
}
- SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
- new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, true, SVT, MMO);
+ SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED,
+ true, SVT, MMO);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -4043,10 +4103,10 @@ SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = NodeAllocator.Allocate<StoreSDNode>();
- new (N) StoreSDNode(Ops, dl, VTs, AM,
- ST->isTruncatingStore(), ST->getMemoryVT(),
- ST->getMemOperand());
+ SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, AM,
+ ST->isTruncatingStore(),
+ ST->getMemoryVT(),
+ ST->getMemOperand());
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
return SDValue(N, 0);
@@ -4054,9 +4114,10 @@ SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
SDValue SelectionDAG::getVAArg(EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
- SDValue SV) {
- SDValue Ops[] = { Chain, Ptr, SV };
- return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 3);
+ SDValue SV,
+ unsigned Align) {
+ SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
+ return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
}
SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
@@ -4117,12 +4178,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- N = NodeAllocator.Allocate<SDNode>();
- new (N) SDNode(Opcode, DL, VTs, Ops, NumOps);
+ N = new (NodeAllocator) SDNode(Opcode, DL, VTs, Ops, NumOps);
CSEMap.InsertNode(N, IP);
} else {
- N = NodeAllocator.Allocate<SDNode>();
- new (N) SDNode(Opcode, DL, VTs, Ops, NumOps);
+ N = new (NodeAllocator) SDNode(Opcode, DL, VTs, Ops, NumOps);
}
AllNodes.push_back(N);
@@ -4185,32 +4244,26 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
return SDValue(E, 0);
if (NumOps == 1) {
- N = NodeAllocator.Allocate<UnarySDNode>();
- new (N) UnarySDNode(Opcode, DL, VTList, Ops[0]);
+ N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTList, Ops[0]);
} else if (NumOps == 2) {
- N = NodeAllocator.Allocate<BinarySDNode>();
- new (N) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
+ N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
} else if (NumOps == 3) {
- N = NodeAllocator.Allocate<TernarySDNode>();
- new (N) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1], Ops[2]);
+ N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1],
+ Ops[2]);
} else {
- N = NodeAllocator.Allocate<SDNode>();
- new (N) SDNode(Opcode, DL, VTList, Ops, NumOps);
+ N = new (NodeAllocator) SDNode(Opcode, DL, VTList, Ops, NumOps);
}
CSEMap.InsertNode(N, IP);
} else {
if (NumOps == 1) {
- N = NodeAllocator.Allocate<UnarySDNode>();
- new (N) UnarySDNode(Opcode, DL, VTList, Ops[0]);
+ N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTList, Ops[0]);
} else if (NumOps == 2) {
- N = NodeAllocator.Allocate<BinarySDNode>();
- new (N) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
+ N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
} else if (NumOps == 3) {
- N = NodeAllocator.Allocate<TernarySDNode>();
- new (N) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1], Ops[2]);
+ N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1],
+ Ops[2]);
} else {
- N = NodeAllocator.Allocate<SDNode>();
- new (N) SDNode(Opcode, DL, VTList, Ops, NumOps);
+ N = new (NodeAllocator) SDNode(Opcode, DL, VTList, Ops, NumOps);
}
}
AllNodes.push_back(N);
@@ -4346,17 +4399,16 @@ SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
-SDValue SelectionDAG::UpdateNodeOperands(SDValue InN, SDValue Op) {
- SDNode *N = InN.getNode();
+SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
// Check to see if there is no change.
- if (Op == N->getOperand(0)) return InN;
+ if (Op == N->getOperand(0)) return N;
// See if the modified node already exists.
void *InsertPos = 0;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
- return SDValue(Existing, InN.getResNo());
+ return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
@@ -4368,22 +4420,20 @@ SDValue SelectionDAG::UpdateNodeOperands(SDValue InN, SDValue Op) {
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
- return InN;
+ return N;
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue InN, SDValue Op1, SDValue Op2) {
- SDNode *N = InN.getNode();
+SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
// Check to see if there is no change.
if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
- return InN; // No operands changed, just return the input node.
+ return N; // No operands changed, just return the input node.
// See if the modified node already exists.
void *InsertPos = 0;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
- return SDValue(Existing, InN.getResNo());
+ return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
@@ -4398,32 +4448,31 @@ UpdateNodeOperands(SDValue InN, SDValue Op1, SDValue Op2) {
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
- return InN;
+ return N;
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, SDValue Op3) {
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
SDValue Ops[] = { Op1, Op2, Op3 };
return UpdateNodeOperands(N, Ops, 3);
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4) {
SDValue Ops[] = { Op1, Op2, Op3, Op4 };
return UpdateNodeOperands(N, Ops, 4);
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5) {
SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
return UpdateNodeOperands(N, Ops, 5);
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
- SDNode *N = InN.getNode();
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
assert(N->getNumOperands() == NumOps &&
"Update with wrong number of operands");
@@ -4437,12 +4486,12 @@ UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
}
// No operands changed, just return the input node.
- if (!AnyChange) return InN;
+ if (!AnyChange) return N;
// See if the modified node already exists.
void *InsertPos = 0;
if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
- return SDValue(Existing, InN.getResNo());
+ return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
@@ -4456,7 +4505,7 @@ UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
- return InN;
+ return N;
}
/// DropOperands - Release the operands and set this node to have
@@ -4635,7 +4684,7 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
// remainder of the current SelectionDAG iteration, so we can allocate
// the operands directly out of a pool with no recycling metadata.
MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
- Ops, NumOps);
+ Ops, NumOps);
else
MN->InitOperands(MN->LocalOperands, Ops, NumOps);
MN->OperandsNeedDelete = false;
@@ -4809,8 +4858,7 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
}
// Allocate a new MachineSDNode.
- N = NodeAllocator.Allocate<MachineSDNode>();
- new (N) MachineSDNode(~Opcode, DL, VTs);
+ N = new (NodeAllocator) MachineSDNode(~Opcode, DL, VTs);
// Initialize the operands list.
if (NumOps > array_lengthof(N->LocalOperands))
@@ -4869,6 +4917,26 @@ SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
return NULL;
}
+/// getDbgValue - Creates a SDDbgValue node.
+///
+SDDbgValue *
+SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
+ DebugLoc DL, unsigned O) {
+ return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O);
+}
+
+SDDbgValue *
+SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
+ DebugLoc DL, unsigned O) {
+ return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
+}
+
+SDDbgValue *
+SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
+ DebugLoc DL, unsigned O) {
+ return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
+}
+
namespace {
/// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
@@ -5264,6 +5332,13 @@ unsigned SelectionDAG::GetOrdering(const SDNode *SD) const {
return Ordering->getOrder(SD);
}
+/// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
+/// value is produced by SD.
+void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
+ DbgInfo->add(DB, SD, isParameter);
+ if (SD)
+ SD->setHasDebugValue(true);
+}
//===----------------------------------------------------------------------===//
// SDNode Class
@@ -5273,11 +5348,11 @@ HandleSDNode::~HandleSDNode() {
DropOperands();
}
-GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, const GlobalValue *GA,
+GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, DebugLoc DL,
+ const GlobalValue *GA,
EVT VT, int64_t o, unsigned char TF)
- : SDNode(Opc, DebugLoc::getUnknownLoc(), getSDVTList(VT)),
- Offset(o), TargetFlags(TF) {
- TheGlobal = const_cast<GlobalValue*>(GA);
+ : SDNode(Opc, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
+ TheGlobal = GA;
}
MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
@@ -5331,6 +5406,8 @@ const EVT *SDNode::getValueTypeList(EVT VT) {
sys::SmartScopedLock<true> Lock(*VTMutex);
return &(*EVTs->insert(VT).first);
} else {
+ assert(VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
+ "Value type out of range!");
return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
}
}
@@ -5497,6 +5574,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::PCMARKER: return "PCMarker";
case ISD::READCYCLECOUNTER: return "ReadCycleCounter";
case ISD::SRCVALUE: return "SrcValue";
+ case ISD::MDNODE_SDNODE: return "MDNode";
case ISD::EntryToken: return "EntryToken";
case ISD::TokenFactor: return "TokenFactor";
case ISD::AssertSext: return "AssertSext";
@@ -5520,6 +5598,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::LSDAADDR: return "LSDAADDR";
case ISD::EHSELECTION: return "EHSELECTION";
case ISD::EH_RETURN: return "EH_RETURN";
+ case ISD::EH_SJLJ_SETJMP: return "EH_SJLJ_SETJMP";
+ case ISD::EH_SJLJ_LONGJMP: return "EH_SJLJ_LONGJMP";
case ISD::ConstantPool: return "ConstantPool";
case ISD::ExternalSymbol: return "ExternalSymbol";
case ISD::BlockAddress: return "BlockAddress";
@@ -5560,13 +5640,16 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::FSQRT: return "fsqrt";
case ISD::FSIN: return "fsin";
case ISD::FCOS: return "fcos";
- case ISD::FPOWI: return "fpowi";
- case ISD::FPOW: return "fpow";
case ISD::FTRUNC: return "ftrunc";
case ISD::FFLOOR: return "ffloor";
case ISD::FCEIL: return "fceil";
case ISD::FRINT: return "frint";
case ISD::FNEARBYINT: return "fnearbyint";
+ case ISD::FEXP: return "fexp";
+ case ISD::FEXP2: return "fexp2";
+ case ISD::FLOG: return "flog";
+ case ISD::FLOG2: return "flog2";
+ case ISD::FLOG10: return "flog10";
// Binary operators
case ISD::ADD: return "add";
@@ -5597,7 +5680,9 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::FREM: return "frem";
case ISD::FCOPYSIGN: return "fcopysign";
case ISD::FGETSIGN: return "fgetsign";
+ case ISD::FPOW: return "fpow";
+ case ISD::FPOWI: return "fpowi";
case ISD::SETCC: return "setcc";
case ISD::VSETCC: return "vsetcc";
case ISD::SELECT: return "select";
@@ -5639,6 +5724,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::FP_TO_SINT: return "fp_to_sint";
case ISD::FP_TO_UINT: return "fp_to_uint";
case ISD::BIT_CONVERT: return "bit_convert";
+ case ISD::FP16_TO_FP32: return "fp16_to_fp32";
+ case ISD::FP32_TO_FP16: return "fp32_to_fp16";
case ISD::CONVERT_RNDSAT: {
switch (cast<CvtRndSatSDNode>(this)->getCvtCode()) {
@@ -5758,6 +5845,7 @@ std::string ISD::ArgFlagsTy::getArgFlagsString() {
void SDNode::dump() const { dump(0); }
void SDNode::dump(const SelectionDAG *G) const {
print(dbgs(), G);
+ dbgs() << '\n';
}
void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
@@ -5781,7 +5869,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
for (MachineSDNode::mmo_iterator i = MN->memoperands_begin(),
e = MN->memoperands_end(); i != e; ++i) {
OS << **i;
- if (next(i) != e)
+ if (llvm::next(i) != e)
OS << " ";
}
OS << ">";
@@ -5863,6 +5951,11 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
OS << "<" << M->getValue() << ">";
else
OS << "<null>";
+ } else if (const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(this)) {
+ if (MD->getMD())
+ OS << "<" << MD->getMD() << ">";
+ else
+ OS << "<null>";
} else if (const VTSDNode *N = dyn_cast<VTSDNode>(this)) {
OS << ":" << N->getVT().getEVTString();
}
@@ -5911,9 +6004,24 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
if (G)
if (unsigned Order = G->GetOrdering(this))
OS << " [ORD=" << Order << ']';
-
+
if (getNodeId() != -1)
OS << " [ID=" << getNodeId() << ']';
+
+ DebugLoc dl = getDebugLoc();
+ if (G && !dl.isUnknown()) {
+ DIScope
+ Scope(dl.getScope(G->getMachineFunction().getFunction()->getContext()));
+ OS << " dbg:";
+ // Omit the directory, since it's usually long and uninteresting.
+ if (Scope.Verify())
+ OS << Scope.getFilename();
+ else
+ OS << "<unknown>";
+ OS << ':' << dl.getLine();
+ if (dl.getCol() != 0)
+ OS << ':' << dl.getCol();
+ }
}
void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
@@ -6000,7 +6108,7 @@ SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
unsigned i;
for (i= 0; i != NE; ++i) {
- for (unsigned j = 0; j != N->getNumOperands(); ++j) {
+ for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
SDValue Operand = N->getOperand(j);
EVT OperandVT = Operand.getValueType();
if (OperandVT.isVector()) {
@@ -6078,8 +6186,8 @@ bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
return true;
}
- GlobalValue *GV1 = NULL;
- GlobalValue *GV2 = NULL;
+ const GlobalValue *GV1 = NULL;
+ const GlobalValue *GV2 = NULL;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
@@ -6094,10 +6202,22 @@ bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
/// it cannot be inferred.
unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
// If this is a GlobalAddress + cst, return the alignment.
- GlobalValue *GV;
+ const GlobalValue *GV;
int64_t GVOffset = 0;
- if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset))
- return MinAlign(GV->getAlignment(), GVOffset);
+ if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
+ // If GV has specified alignment, then use it. Otherwise, use the preferred
+ // alignment.
+ unsigned Align = GV->getAlignment();
+ if (!Align) {
+ if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) {
+ if (GVar->hasInitializer()) {
+ const TargetData *TD = TLI.getTargetData();
+ Align = TD->getPreferredAlignment(GVar);
+ }
+ }
+ }
+ return MinAlign(Align, GVOffset);
+ }
// If this is a direct reference to a stack slot, use information about the
// stack slot's alignment.
@@ -6117,23 +6237,6 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
FrameOffset);
- if (MFI.isFixedObjectIndex(FrameIdx)) {
- int64_t ObjectOffset = MFI.getObjectOffset(FrameIdx) + FrameOffset;
-
- // The alignment of the frame index can be determined from its offset from
- // the incoming frame position. If the frame object is at offset 32 and
- // the stack is guaranteed to be 16-byte aligned, then we know that the
- // object is 16-byte aligned.
- unsigned StackAlign = getTarget().getFrameInfo()->getStackAlignment();
- unsigned Align = MinAlign(ObjectOffset, StackAlign);
-
- // Finally, the frame object itself may have a known alignment. Factor
- // the alignment + offset into a new alignment. For example, if we know
- // the FI is 8 byte aligned, but the pointer is 4 off, we really have a
- // 4-byte alignment of the resultant pointer. Likewise align 4 + 4-byte
- // offset = 4-byte alignment, align 4 + 1-byte offset = align 1, etc.
- return std::max(Align, FIInfoAlign);
- }
return FIInfoAlign;
}
@@ -6251,8 +6354,8 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
if (OpVal.getOpcode() == ISD::UNDEF)
SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
- SplatValue |= (APInt(CN->getAPIntValue()).zextOrTrunc(EltBitSize).
- zextOrTrunc(sz) << BitPos);
+ SplatValue |= APInt(CN->getAPIntValue()).zextOrTrunc(EltBitSize).
+ zextOrTrunc(sz) << BitPos;
else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
else
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 5ee2253..e657445 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "isel"
+#include "SDNodeDbgValue.h"
#include "SelectionDAGBuilder.h"
-#include "FunctionLoweringInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -27,8 +27,11 @@
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -39,7 +42,6 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/DwarfWriter.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
@@ -68,129 +70,29 @@ LimitFPPrecision("limit-float-precision",
cl::location(LimitFloatPrecision),
cl::init(0));
-namespace {
- /// RegsForValue - This struct represents the registers (physical or virtual)
- /// that a particular set of values is assigned, and the type information
- /// about the value. The most common situation is to represent one value at a
- /// time, but struct or array values are handled element-wise as multiple
- /// values. The splitting of aggregates is performed recursively, so that we
- /// never have aggregate-typed registers. The values at this point do not
- /// necessarily have legal types, so each value may require one or more
- /// registers of some legal type.
- ///
- struct RegsForValue {
- /// TLI - The TargetLowering object.
- ///
- const TargetLowering *TLI;
-
- /// ValueVTs - The value types of the values, which may not be legal, and
- /// may need be promoted or synthesized from one or more registers.
- ///
- SmallVector<EVT, 4> ValueVTs;
-
- /// RegVTs - The value types of the registers. This is the same size as
- /// ValueVTs and it records, for each value, what the type of the assigned
- /// register or registers are. (Individual values are never synthesized
- /// from more than one type of register.)
- ///
- /// With virtual registers, the contents of RegVTs is redundant with TLI's
- /// getRegisterType member function, however when with physical registers
- /// it is necessary to have a separate record of the types.
- ///
- SmallVector<EVT, 4> RegVTs;
-
- /// Regs - This list holds the registers assigned to the values.
- /// Each legal or promoted value requires one register, and each
- /// expanded value requires multiple registers.
- ///
- SmallVector<unsigned, 4> Regs;
-
- RegsForValue() : TLI(0) {}
-
- RegsForValue(const TargetLowering &tli,
- const SmallVector<unsigned, 4> ®s,
- EVT regvt, EVT valuevt)
- : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
- RegsForValue(const TargetLowering &tli,
- const SmallVector<unsigned, 4> ®s,
- const SmallVector<EVT, 4> ®vts,
- const SmallVector<EVT, 4> &valuevts)
- : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
- RegsForValue(LLVMContext &Context, const TargetLowering &tli,
- unsigned Reg, const Type *Ty) : TLI(&tli) {
- ComputeValueVTs(tli, Ty, ValueVTs);
-
- for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
- EVT ValueVT = ValueVTs[Value];
- unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
- EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
- for (unsigned i = 0; i != NumRegs; ++i)
- Regs.push_back(Reg + i);
- RegVTs.push_back(RegisterVT);
- Reg += NumRegs;
- }
- }
-
- /// areValueTypesLegal - Return true if types of all the values are legal.
- bool areValueTypesLegal() {
- for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
- EVT RegisterVT = RegVTs[Value];
- if (!TLI->isTypeLegal(RegisterVT))
- return false;
- }
- return true;
- }
-
-
- /// append - Add the specified values to this one.
- void append(const RegsForValue &RHS) {
- TLI = RHS.TLI;
- ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
- RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
- Regs.append(RHS.Regs.begin(), RHS.Regs.end());
- }
-
-
- /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
- /// this value and returns the result as a ValueVTs value. This uses
- /// Chain/Flag as the input and updates them for the output Chain/Flag.
- /// If the Flag pointer is NULL, no flag is used.
- SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const;
-
- /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
- /// specified value into the registers specified by this object. This uses
- /// Chain/Flag as the input and updates them for the output Chain/Flag.
- /// If the Flag pointer is NULL, no flag is used.
- void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const;
-
- /// AddInlineAsmOperands - Add this value to the specified inlineasm node
- /// operand list. This adds the code marker, matching input operand index
- /// (if applicable), and includes the number of values added into it.
- void AddInlineAsmOperands(unsigned Code,
- bool HasMatching, unsigned MatchingIdx,
- SelectionDAG &DAG,
- std::vector<SDValue> &Ops) const;
- };
-}
-
+static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
+ const SDValue *Parts, unsigned NumParts,
+ EVT PartVT, EVT ValueVT);
+
/// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type
/// larger then ValueVT then AssertOp can be used to specify whether the extra
/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
/// (ISD::AssertSext).
-static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
+static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
const SDValue *Parts,
unsigned NumParts, EVT PartVT, EVT ValueVT,
ISD::NodeType AssertOp = ISD::DELETED_NODE) {
+ if (ValueVT.isVector())
+ return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT);
+
assert(NumParts > 0 && "No parts to assemble!");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Val = Parts[0];
if (NumParts > 1) {
// Assemble the value from multiple parts.
- if (!ValueVT.isVector() && ValueVT.isInteger()) {
+ if (ValueVT.isInteger()) {
unsigned PartBits = PartVT.getSizeInBits();
unsigned ValueBits = ValueVT.getSizeInBits();
@@ -205,25 +107,25 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
if (RoundParts > 2) {
- Lo = getCopyFromParts(DAG, dl, Parts, RoundParts / 2,
+ Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
PartVT, HalfVT);
- Hi = getCopyFromParts(DAG, dl, Parts + RoundParts / 2,
+ Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
RoundParts / 2, PartVT, HalfVT);
} else {
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
+ Lo = DAG.getNode(ISD::BIT_CONVERT, DL, HalfVT, Parts[0]);
+ Hi = DAG.getNode(ISD::BIT_CONVERT, DL, HalfVT, Parts[1]);
}
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
+ Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
if (RoundParts < NumParts) {
// Assemble the trailing non-power-of-2 part.
unsigned OddParts = NumParts - RoundParts;
EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
- Hi = getCopyFromParts(DAG, dl,
+ Hi = getCopyFromParts(DAG, DL,
Parts + RoundParts, OddParts, PartVT, OddVT);
// Combine the round and odd parts.
@@ -231,68 +133,29 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
if (TLI.isBigEndian())
std::swap(Lo, Hi);
EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
- Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
- Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
+ Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
+ Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
DAG.getConstant(Lo.getValueType().getSizeInBits(),
TLI.getPointerTy()));
- Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
- Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
- }
- } else if (ValueVT.isVector()) {
- // Handle a multi-element vector.
- EVT IntermediateVT, RegisterVT;
- unsigned NumIntermediates;
- unsigned NumRegs =
- TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
- NumIntermediates, RegisterVT);
- assert(NumRegs == NumParts
- && "Part count doesn't match vector breakdown!");
- NumParts = NumRegs; // Silence a compiler warning.
- assert(RegisterVT == PartVT
- && "Part type doesn't match vector breakdown!");
- assert(RegisterVT == Parts[0].getValueType() &&
- "Part type doesn't match part!");
-
- // Assemble the parts into intermediate operands.
- SmallVector<SDValue, 8> Ops(NumIntermediates);
- if (NumIntermediates == NumParts) {
- // If the register was not expanded, truncate or copy the value,
- // as appropriate.
- for (unsigned i = 0; i != NumParts; ++i)
- Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
- PartVT, IntermediateVT);
- } else if (NumParts > 0) {
- // If the intermediate type was expanded, build the intermediate
- // operands from the parts.
- assert(NumParts % NumIntermediates == 0 &&
- "Must expand into a divisible number of parts!");
- unsigned Factor = NumParts / NumIntermediates;
- for (unsigned i = 0; i != NumIntermediates; ++i)
- Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
- PartVT, IntermediateVT);
+ Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
+ Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
}
-
- // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
- // intermediate operands.
- Val = DAG.getNode(IntermediateVT.isVector() ?
- ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
- ValueVT, &Ops[0], NumIntermediates);
} else if (PartVT.isFloatingPoint()) {
// FP split into multiple FP parts (for ppcf128)
assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
"Unexpected split");
SDValue Lo, Hi;
- Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
- Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
+ Lo = DAG.getNode(ISD::BIT_CONVERT, DL, EVT(MVT::f64), Parts[0]);
+ Hi = DAG.getNode(ISD::BIT_CONVERT, DL, EVT(MVT::f64), Parts[1]);
if (TLI.isBigEndian())
std::swap(Lo, Hi);
- Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
+ Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
} else {
// FP split into integer parts (soft fp)
assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split");
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
- Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
+ Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT);
}
}
@@ -302,219 +165,315 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
if (PartVT == ValueVT)
return Val;
- if (PartVT.isVector()) {
- assert(ValueVT.isVector() && "Unknown vector conversion!");
- return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
- }
-
- if (ValueVT.isVector()) {
- assert(ValueVT.getVectorElementType() == PartVT &&
- ValueVT.getVectorNumElements() == 1 &&
- "Only trivial scalar-to-vector conversions should get here!");
- return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
- }
-
- if (PartVT.isInteger() &&
- ValueVT.isInteger()) {
+ if (PartVT.isInteger() && ValueVT.isInteger()) {
if (ValueVT.bitsLT(PartVT)) {
// For a truncate, see if we have any information to
// indicate whether the truncated bits will always be
// zero or sign-extension.
if (AssertOp != ISD::DELETED_NODE)
- Val = DAG.getNode(AssertOp, dl, PartVT, Val,
+ Val = DAG.getNode(AssertOp, DL, PartVT, Val,
DAG.getValueType(ValueVT));
- return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
- } else {
- return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
+ return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
}
+ return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
}
if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
- if (ValueVT.bitsLT(Val.getValueType())) {
- // FP_ROUND's are always exact here.
- return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
+ // FP_ROUND's are always exact here.
+ if (ValueVT.bitsLT(Val.getValueType()))
+ return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
DAG.getIntPtrConstant(1));
- }
- return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
+ return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
}
if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
- return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
+ return DAG.getNode(ISD::BIT_CONVERT, DL, ValueVT, Val);
llvm_unreachable("Unknown mismatch!");
return SDValue();
}
+/// getCopyFromParts - Create a value that contains the specified legal parts
+/// combined into the value they represent. If the parts combine to a type
+/// larger then ValueVT then AssertOp can be used to specify whether the extra
+/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
+/// (ISD::AssertSext).
+static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
+ const SDValue *Parts, unsigned NumParts,
+ EVT PartVT, EVT ValueVT) {
+ assert(ValueVT.isVector() && "Not a vector value");
+ assert(NumParts > 0 && "No parts to assemble!");
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ SDValue Val = Parts[0];
+
+ // Handle a multi-element vector.
+ if (NumParts > 1) {
+ EVT IntermediateVT, RegisterVT;
+ unsigned NumIntermediates;
+ unsigned NumRegs =
+ TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
+ NumIntermediates, RegisterVT);
+ assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
+ NumParts = NumRegs; // Silence a compiler warning.
+ assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
+ assert(RegisterVT == Parts[0].getValueType() &&
+ "Part type doesn't match part!");
+
+ // Assemble the parts into intermediate operands.
+ SmallVector<SDValue, 8> Ops(NumIntermediates);
+ if (NumIntermediates == NumParts) {
+ // If the register was not expanded, truncate or copy the value,
+ // as appropriate.
+ for (unsigned i = 0; i != NumParts; ++i)
+ Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
+ PartVT, IntermediateVT);
+ } else if (NumParts > 0) {
+ // If the intermediate type was expanded, build the intermediate
+ // operands from the parts.
+ assert(NumParts % NumIntermediates == 0 &&
+ "Must expand into a divisible number of parts!");
+ unsigned Factor = NumParts / NumIntermediates;
+ for (unsigned i = 0; i != NumIntermediates; ++i)
+ Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
+ PartVT, IntermediateVT);
+ }
+
+ // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
+ // intermediate operands.
+ Val = DAG.getNode(IntermediateVT.isVector() ?
+ ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, DL,
+ ValueVT, &Ops[0], NumIntermediates);
+ }
+
+ // There is now one part, held in Val. Correct it to match ValueVT.
+ PartVT = Val.getValueType();
+
+ if (PartVT == ValueVT)
+ return Val;
+
+ if (PartVT.isVector()) {
+ // If the element type of the source/dest vectors are the same, but the
+ // parts vector has more elements than the value vector, then we have a
+ // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
+ // elements we want.
+ if (PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
+ assert(PartVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
+ "Cannot narrow, it would be a lossy transformation");
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
+ DAG.getIntPtrConstant(0));
+ }
+
+ // Vector/Vector bitcast.
+ return DAG.getNode(ISD::BIT_CONVERT, DL, ValueVT, Val);
+ }
+
+ assert(ValueVT.getVectorElementType() == PartVT &&
+ ValueVT.getVectorNumElements() == 1 &&
+ "Only trivial scalar-to-vector conversions should get here!");
+ return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
+}
+
+
+
+
+static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc dl,
+ SDValue Val, SDValue *Parts, unsigned NumParts,
+ EVT PartVT);
+
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
/// integers, ExtendKind can be used to specify how to generate the extra bits.
-static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl,
+static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
EVT PartVT,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT PtrVT = TLI.getPointerTy();
EVT ValueVT = Val.getValueType();
+
+ // Handle the vector case separately.
+ if (ValueVT.isVector())
+ return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT);
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned PartBits = PartVT.getSizeInBits();
unsigned OrigNumParts = NumParts;
assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
- if (!NumParts)
+ if (NumParts == 0)
return;
- if (!ValueVT.isVector()) {
- if (PartVT == ValueVT) {
- assert(NumParts == 1 && "No-op copy with multiple parts!");
- Parts[0] = Val;
- return;
- }
-
- if (NumParts * PartBits > ValueVT.getSizeInBits()) {
- // If the parts cover more bits than the value has, promote the value.
- if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
- assert(NumParts == 1 && "Do not know what to promote to!");
- Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
- } else if (PartVT.isInteger() && ValueVT.isInteger()) {
- ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
- Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
- } else {
- llvm_unreachable("Unknown mismatch!");
- }
- } else if (PartBits == ValueVT.getSizeInBits()) {
- // Different types of the same size.
- assert(NumParts == 1 && PartVT != ValueVT);
- Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
- } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
- // If the parts cover less bits than value has, truncate the value.
- if (PartVT.isInteger() && ValueVT.isInteger()) {
- ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
- Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
- } else {
- llvm_unreachable("Unknown mismatch!");
- }
- }
-
- // The value may have changed - recompute ValueVT.
- ValueVT = Val.getValueType();
- assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
- "Failed to tile the value with PartVT!");
-
- if (NumParts == 1) {
- assert(PartVT == ValueVT && "Type conversion failed!");
- Parts[0] = Val;
- return;
- }
+ assert(!ValueVT.isVector() && "Vector case handled elsewhere");
+ if (PartVT == ValueVT) {
+ assert(NumParts == 1 && "No-op copy with multiple parts!");
+ Parts[0] = Val;
+ return;
+ }
- // Expand the value into multiple parts.
- if (NumParts & (NumParts - 1)) {
- // The number of parts is not a power of 2. Split off and copy the tail.
+ if (NumParts * PartBits > ValueVT.getSizeInBits()) {
+ // If the parts cover more bits than the value has, promote the value.
+ if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
+ assert(NumParts == 1 && "Do not know what to promote to!");
+ Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
+ } else {
assert(PartVT.isInteger() && ValueVT.isInteger() &&
- "Do not know what to expand to!");
- unsigned RoundParts = 1 << Log2_32(NumParts);
- unsigned RoundBits = RoundParts * PartBits;
- unsigned OddParts = NumParts - RoundParts;
- SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
- DAG.getConstant(RoundBits,
- TLI.getPointerTy()));
- getCopyToParts(DAG, dl, OddVal, Parts + RoundParts,
- OddParts, PartVT);
-
- if (TLI.isBigEndian())
- // The odd parts were reversed by getCopyToParts - unreverse them.
- std::reverse(Parts + RoundParts, Parts + NumParts);
-
- NumParts = RoundParts;
+ "Unknown mismatch!");
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
- Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
+ Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
}
+ } else if (PartBits == ValueVT.getSizeInBits()) {
+ // Different types of the same size.
+ assert(NumParts == 1 && PartVT != ValueVT);
+ Val = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Val);
+ } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
+ // If the parts cover less bits than value has, truncate the value.
+ assert(PartVT.isInteger() && ValueVT.isInteger() &&
+ "Unknown mismatch!");
+ ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
+ Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
+ }
+
+ // The value may have changed - recompute ValueVT.
+ ValueVT = Val.getValueType();
+ assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
+ "Failed to tile the value with PartVT!");
- // The number of parts is a power of 2. Repeatedly bisect the value using
- // EXTRACT_ELEMENT.
- Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
- EVT::getIntegerVT(*DAG.getContext(),
- ValueVT.getSizeInBits()),
- Val);
-
- for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
- for (unsigned i = 0; i < NumParts; i += StepSize) {
- unsigned ThisBits = StepSize * PartBits / 2;
- EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
- SDValue &Part0 = Parts[i];
- SDValue &Part1 = Parts[i+StepSize/2];
-
- Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
- ThisVT, Part0,
- DAG.getConstant(1, PtrVT));
- Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
- ThisVT, Part0,
- DAG.getConstant(0, PtrVT));
-
- if (ThisBits == PartBits && ThisVT != PartVT) {
- Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
- PartVT, Part0);
- Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
- PartVT, Part1);
- }
+ if (NumParts == 1) {
+ assert(PartVT == ValueVT && "Type conversion failed!");
+ Parts[0] = Val;
+ return;
+ }
+
+ // Expand the value into multiple parts.
+ if (NumParts & (NumParts - 1)) {
+ // The number of parts is not a power of 2. Split off and copy the tail.
+ assert(PartVT.isInteger() && ValueVT.isInteger() &&
+ "Do not know what to expand to!");
+ unsigned RoundParts = 1 << Log2_32(NumParts);
+ unsigned RoundBits = RoundParts * PartBits;
+ unsigned OddParts = NumParts - RoundParts;
+ SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
+ DAG.getIntPtrConstant(RoundBits));
+ getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT);
+
+ if (TLI.isBigEndian())
+ // The odd parts were reversed by getCopyToParts - unreverse them.
+ std::reverse(Parts + RoundParts, Parts + NumParts);
+
+ NumParts = RoundParts;
+ ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
+ Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
+ }
+
+ // The number of parts is a power of 2. Repeatedly bisect the value using
+ // EXTRACT_ELEMENT.
+ Parts[0] = DAG.getNode(ISD::BIT_CONVERT, DL,
+ EVT::getIntegerVT(*DAG.getContext(),
+ ValueVT.getSizeInBits()),
+ Val);
+
+ for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
+ for (unsigned i = 0; i < NumParts; i += StepSize) {
+ unsigned ThisBits = StepSize * PartBits / 2;
+ EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
+ SDValue &Part0 = Parts[i];
+ SDValue &Part1 = Parts[i+StepSize/2];
+
+ Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
+ ThisVT, Part0, DAG.getIntPtrConstant(1));
+ Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
+ ThisVT, Part0, DAG.getIntPtrConstant(0));
+
+ if (ThisBits == PartBits && ThisVT != PartVT) {
+ Part0 = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Part0);
+ Part1 = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Part1);
}
}
+ }
- if (TLI.isBigEndian())
- std::reverse(Parts, Parts + OrigNumParts);
+ if (TLI.isBigEndian())
+ std::reverse(Parts, Parts + OrigNumParts);
+}
- return;
- }
- // Vector ValueVT.
+/// getCopyToPartsVector - Create a series of nodes that contain the specified
+/// value split into legal parts.
+static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
+ SDValue Val, SDValue *Parts, unsigned NumParts,
+ EVT PartVT) {
+ EVT ValueVT = Val.getValueType();
+ assert(ValueVT.isVector() && "Not a vector");
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
if (NumParts == 1) {
- if (PartVT != ValueVT) {
- if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
- Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
- } else {
- assert(ValueVT.getVectorElementType() == PartVT &&
- ValueVT.getVectorNumElements() == 1 &&
- "Only trivial vector-to-scalar conversions should get here!");
- Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- PartVT, Val,
- DAG.getConstant(0, PtrVT));
- }
+ if (PartVT == ValueVT) {
+ // Nothing to do.
+ } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
+ // Bitconvert vector->vector case.
+ Val = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Val);
+ } else if (PartVT.isVector() &&
+ PartVT.getVectorElementType() == ValueVT.getVectorElementType()&&
+ PartVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
+ EVT ElementVT = PartVT.getVectorElementType();
+ // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
+ // undef elements.
+ SmallVector<SDValue, 16> Ops;
+ for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
+ ElementVT, Val, DAG.getIntPtrConstant(i)));
+
+ for (unsigned i = ValueVT.getVectorNumElements(),
+ e = PartVT.getVectorNumElements(); i != e; ++i)
+ Ops.push_back(DAG.getUNDEF(ElementVT));
+
+ Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, &Ops[0], Ops.size());
+
+ // FIXME: Use CONCAT for 2x -> 4x.
+
+ //SDValue UndefElts = DAG.getUNDEF(VectorTy);
+ //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
+ } else {
+ // Vector -> scalar conversion.
+ assert(ValueVT.getVectorElementType() == PartVT &&
+ ValueVT.getVectorNumElements() == 1 &&
+ "Only trivial vector-to-scalar conversions should get here!");
+ Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
+ PartVT, Val, DAG.getIntPtrConstant(0));
}
-
+
Parts[0] = Val;
return;
}
-
+
// Handle a multi-element vector.
EVT IntermediateVT, RegisterVT;
unsigned NumIntermediates;
unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
- IntermediateVT, NumIntermediates, RegisterVT);
+ IntermediateVT,
+ NumIntermediates, RegisterVT);
unsigned NumElements = ValueVT.getVectorNumElements();
-
+
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
-
+
// Split the vector into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i) {
if (IntermediateVT.isVector())
- Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
+ Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
IntermediateVT, Val,
- DAG.getConstant(i * (NumElements / NumIntermediates),
- PtrVT));
+ DAG.getIntPtrConstant(i * (NumElements / NumIntermediates)));
else
- Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- IntermediateVT, Val,
- DAG.getConstant(i, PtrVT));
+ Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
+ IntermediateVT, Val, DAG.getIntPtrConstant(i));
}
-
+
// Split the intermediate operands into legal parts.
if (NumParts == NumIntermediates) {
// If the register was not expanded, promote or copy the value,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
- getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
+ getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT);
} else if (NumParts > 0) {
// If the intermediate type was expanded, split each the value into
// legal parts.
@@ -522,18 +481,281 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl,
"Must expand into a divisible number of parts!");
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
- getCopyToParts(DAG, dl, Ops[i], &Parts[i*Factor], Factor, PartVT);
+ getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT);
}
}
+
+
+namespace {
+ /// RegsForValue - This struct represents the registers (physical or virtual)
+ /// that a particular set of values is assigned, and the type information
+ /// about the value. The most common situation is to represent one value at a
+ /// time, but struct or array values are handled element-wise as multiple
+ /// values. The splitting of aggregates is performed recursively, so that we
+ /// never have aggregate-typed registers. The values at this point do not
+ /// necessarily have legal types, so each value may require one or more
+ /// registers of some legal type.
+ ///
+ struct RegsForValue {
+ /// ValueVTs - The value types of the values, which may not be legal, and
+ /// may need be promoted or synthesized from one or more registers.
+ ///
+ SmallVector<EVT, 4> ValueVTs;
+
+ /// RegVTs - The value types of the registers. This is the same size as
+ /// ValueVTs and it records, for each value, what the type of the assigned
+ /// register or registers are. (Individual values are never synthesized
+ /// from more than one type of register.)
+ ///
+ /// With virtual registers, the contents of RegVTs is redundant with TLI's
+ /// getRegisterType member function, however when with physical registers
+ /// it is necessary to have a separate record of the types.
+ ///
+ SmallVector<EVT, 4> RegVTs;
+
+ /// Regs - This list holds the registers assigned to the values.
+ /// Each legal or promoted value requires one register, and each
+ /// expanded value requires multiple registers.
+ ///
+ SmallVector<unsigned, 4> Regs;
+
+ RegsForValue() {}
+
+ RegsForValue(const SmallVector<unsigned, 4> ®s,
+ EVT regvt, EVT valuevt)
+ : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
+
+ RegsForValue(LLVMContext &Context, const TargetLowering &tli,
+ unsigned Reg, const Type *Ty) {
+ ComputeValueVTs(tli, Ty, ValueVTs);
+
+ for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
+ EVT RegisterVT = tli.getRegisterType(Context, ValueVT);
+ for (unsigned i = 0; i != NumRegs; ++i)
+ Regs.push_back(Reg + i);
+ RegVTs.push_back(RegisterVT);
+ Reg += NumRegs;
+ }
+ }
+
+ /// areValueTypesLegal - Return true if types of all the values are legal.
+ bool areValueTypesLegal(const TargetLowering &TLI) {
+ for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ EVT RegisterVT = RegVTs[Value];
+ if (!TLI.isTypeLegal(RegisterVT))
+ return false;
+ }
+ return true;
+ }
+
+ /// append - Add the specified values to this one.
+ void append(const RegsForValue &RHS) {
+ ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
+ RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
+ Regs.append(RHS.Regs.begin(), RHS.Regs.end());
+ }
+
+ /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
+ /// this value and returns the result as a ValueVTs value. This uses
+ /// Chain/Flag as the input and updates them for the output Chain/Flag.
+ /// If the Flag pointer is NULL, no flag is used.
+ SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
+ DebugLoc dl,
+ SDValue &Chain, SDValue *Flag) const;
+
+ /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
+ /// specified value into the registers specified by this object. This uses
+ /// Chain/Flag as the input and updates them for the output Chain/Flag.
+ /// If the Flag pointer is NULL, no flag is used.
+ void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
+ SDValue &Chain, SDValue *Flag) const;
+
+ /// AddInlineAsmOperands - Add this value to the specified inlineasm node
+ /// operand list. This adds the code marker, matching input operand index
+ /// (if applicable), and includes the number of values added into it.
+ void AddInlineAsmOperands(unsigned Kind,
+ bool HasMatching, unsigned MatchingIdx,
+ SelectionDAG &DAG,
+ std::vector<SDValue> &Ops) const;
+ };
+}
+
+/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
+/// this value and returns the result as a ValueVT value. This uses
+/// Chain/Flag as the input and updates them for the output Chain/Flag.
+/// If the Flag pointer is NULL, no flag is used.
+SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
+ FunctionLoweringInfo &FuncInfo,
+ DebugLoc dl,
+ SDValue &Chain, SDValue *Flag) const {
+ // A Value with type {} or [0 x %t] needs no registers.
+ if (ValueVTs.empty())
+ return SDValue();
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+ // Assemble the legal parts into the final values.
+ SmallVector<SDValue, 4> Values(ValueVTs.size());
+ SmallVector<SDValue, 8> Parts;
+ for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ // Copy the legal parts from the registers.
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
+ EVT RegisterVT = RegVTs[Value];
+
+ Parts.resize(NumRegs);
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ SDValue P;
+ if (Flag == 0) {
+ P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
+ } else {
+ P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
+ *Flag = P.getValue(2);
+ }
+
+ Chain = P.getValue(1);
+
+ // If the source register was virtual and if we know something about it,
+ // add an assert node.
+ if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
+ RegisterVT.isInteger() && !RegisterVT.isVector()) {
+ unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
+ if (FuncInfo.LiveOutRegInfo.size() > SlotNo) {
+ const FunctionLoweringInfo::LiveOutInfo &LOI =
+ FuncInfo.LiveOutRegInfo[SlotNo];
+
+ unsigned RegSize = RegisterVT.getSizeInBits();
+ unsigned NumSignBits = LOI.NumSignBits;
+ unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
+
+ // FIXME: We capture more information than the dag can represent. For
+ // now, just use the tightest assertzext/assertsext possible.
+ bool isSExt = true;
+ EVT FromVT(MVT::Other);
+ if (NumSignBits == RegSize)
+ isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
+ else if (NumZeroBits >= RegSize-1)
+ isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
+ else if (NumSignBits > RegSize-8)
+ isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
+ else if (NumZeroBits >= RegSize-8)
+ isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
+ else if (NumSignBits > RegSize-16)
+ isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
+ else if (NumZeroBits >= RegSize-16)
+ isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
+ else if (NumSignBits > RegSize-32)
+ isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
+ else if (NumZeroBits >= RegSize-32)
+ isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
+
+ if (FromVT != MVT::Other)
+ P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
+ RegisterVT, P, DAG.getValueType(FromVT));
+ }
+ }
+
+ Parts[i] = P;
+ }
+
+ Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
+ NumRegs, RegisterVT, ValueVT);
+ Part += NumRegs;
+ Parts.clear();
+ }
+
+ return DAG.getNode(ISD::MERGE_VALUES, dl,
+ DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
+ &Values[0], ValueVTs.size());
+}
+
+/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
+/// specified value into the registers specified by this object. This uses
+/// Chain/Flag as the input and updates them for the output Chain/Flag.
+/// If the Flag pointer is NULL, no flag is used.
+void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
+ SDValue &Chain, SDValue *Flag) const {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+ // Get the list of the values's legal parts.
+ unsigned NumRegs = Regs.size();
+ SmallVector<SDValue, 8> Parts(NumRegs);
+ for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
+ EVT RegisterVT = RegVTs[Value];
+
+ getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
+ &Parts[Part], NumParts, RegisterVT);
+ Part += NumParts;
+ }
+
+ // Copy the parts into the registers.
+ SmallVector<SDValue, 8> Chains(NumRegs);
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ SDValue Part;
+ if (Flag == 0) {
+ Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
+ } else {
+ Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
+ *Flag = Part.getValue(1);
+ }
+
+ Chains[i] = Part.getValue(0);
+ }
+
+ if (NumRegs == 1 || Flag)
+ // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
+ // flagged to it. That is the CopyToReg nodes and the user are considered
+ // a single scheduling unit. If we create a TokenFactor and return it as
+ // chain, then the TokenFactor is both a predecessor (operand) of the
+ // user as well as a successor (the TF operands are flagged to the user).
+ // c1, f1 = CopyToReg
+ // c2, f2 = CopyToReg
+ // c3 = TokenFactor c1, c2
+ // ...
+ // = op c3, ..., f2
+ Chain = Chains[NumRegs-1];
+ else
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
+}
+
+/// AddInlineAsmOperands - Add this value to the specified inlineasm node
+/// operand list. This adds the code marker and includes the number of
+/// values added into it.
+void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
+ unsigned MatchingIdx,
+ SelectionDAG &DAG,
+ std::vector<SDValue> &Ops) const {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+ unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
+ if (HasMatching)
+ Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
+ SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
+ Ops.push_back(Res);
+
+ for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
+ EVT RegisterVT = RegVTs[Value];
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ assert(Reg < Regs.size() && "Mismatch in # registers expected");
+ Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
+ }
+ }
+}
+
void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
AA = &aa;
GFI = gfi;
TD = DAG.getTarget().getTargetData();
}
-/// clear - Clear out the curret SelectionDAG and the associated
+/// clear - Clear out the current SelectionDAG and the associated
/// state and prepare this SelectionDAGBuilder object to be used
/// for a new block. This doesn't clear out information about
/// additional blocks that are needed to complete switch lowering
@@ -541,11 +763,11 @@ void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
/// consumed.
void SelectionDAGBuilder::clear() {
NodeMap.clear();
+ UnusedArgNodeMap.clear();
PendingLoads.clear();
PendingExports.clear();
- EdgeMapping.clear();
- DAG.clear();
- CurDebugLoc = DebugLoc::getUnknownLoc();
+ DanglingDebugInfoMap.clear();
+ CurDebugLoc = DebugLoc();
HasTailCall = false;
}
@@ -612,11 +834,26 @@ void SelectionDAGBuilder::AssignOrderingToNode(const SDNode *Node) {
AssignOrderingToNode(Node->getOperand(I).getNode());
}
-void SelectionDAGBuilder::visit(Instruction &I) {
+void SelectionDAGBuilder::visit(const Instruction &I) {
+ // Set up outgoing PHI node register values before emitting the terminator.
+ if (isa<TerminatorInst>(&I))
+ HandlePHINodesInSuccessorBlocks(I.getParent());
+
+ CurDebugLoc = I.getDebugLoc();
+
visit(I.getOpcode(), I);
+
+ if (!isa<TerminatorInst>(&I) && !HasTailCall)
+ CopyToExportRegsIfNeeded(&I);
+
+ CurDebugLoc = DebugLoc();
}
-void SelectionDAGBuilder::visit(unsigned Opcode, User &I) {
+void SelectionDAGBuilder::visitPHI(const PHINode &) {
+ llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
+}
+
+void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
// Note: this doesn't use InstVisitor, because it has to work with
// ConstantExpr's in addition to instructions.
switch (Opcode) {
@@ -634,32 +871,97 @@ void SelectionDAGBuilder::visit(unsigned Opcode, User &I) {
}
}
+// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
+// generate the debug data structures now that we've seen its definition.
+void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
+ SDValue Val) {
+ DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
+ if (DDI.getDI()) {
+ const DbgValueInst *DI = DDI.getDI();
+ DebugLoc dl = DDI.getdl();
+ unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
+ MDNode *Variable = DI->getVariable();
+ uint64_t Offset = DI->getOffset();
+ SDDbgValue *SDV;
+ if (Val.getNode()) {
+ if (!EmitFuncArgumentDbgValue(V, Variable, Offset, Val)) {
+ SDV = DAG.getDbgValue(Variable, Val.getNode(),
+ Val.getResNo(), Offset, dl, DbgSDNodeOrder);
+ DAG.AddDbgValue(SDV, Val.getNode(), false);
+ }
+ } else {
+ SDV = DAG.getDbgValue(Variable, UndefValue::get(V->getType()),
+ Offset, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, 0, false);
+ }
+ DanglingDebugInfoMap[V] = DanglingDebugInfo();
+ }
+}
+
+// getValue - Return an SDValue for the given Value.
SDValue SelectionDAGBuilder::getValue(const Value *V) {
+ // If we already have an SDValue for this value, use it. It's important
+ // to do this first, so that we don't create a CopyFromReg if we already
+ // have a regular SDValue.
+ SDValue &N = NodeMap[V];
+ if (N.getNode()) return N;
+
+ // If there's a virtual register allocated and initialized for this
+ // value, use it.
+ DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
+ if (It != FuncInfo.ValueMap.end()) {
+ unsigned InReg = It->second;
+ RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
+ SDValue Chain = DAG.getEntryNode();
+ return N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain,NULL);
+ }
+
+ // Otherwise create a new SDValue and remember it.
+ SDValue Val = getValueImpl(V);
+ NodeMap[V] = Val;
+ resolveDanglingDebugInfo(V, Val);
+ return Val;
+}
+
+/// getNonRegisterValue - Return an SDValue for the given Value, but
+/// don't look in FuncInfo.ValueMap for a virtual register.
+SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
+ // If we already have an SDValue for this value, use it.
SDValue &N = NodeMap[V];
if (N.getNode()) return N;
- if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
+ // Otherwise create a new SDValue and remember it.
+ SDValue Val = getValueImpl(V);
+ NodeMap[V] = Val;
+ resolveDanglingDebugInfo(V, Val);
+ return Val;
+}
+
+/// getValueImpl - Helper function for getValue and getNonRegisterValue.
+/// Create an SDValue for the given value.
+SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
+ if (const Constant *C = dyn_cast<Constant>(V)) {
EVT VT = TLI.getValueType(V->getType(), true);
- if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
- return N = DAG.getConstant(*CI, VT);
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
+ return DAG.getConstant(*CI, VT);
- if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
- return N = DAG.getGlobalAddress(GV, VT);
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
+ return DAG.getGlobalAddress(GV, getCurDebugLoc(), VT);
if (isa<ConstantPointerNull>(C))
- return N = DAG.getConstant(0, TLI.getPointerTy());
+ return DAG.getConstant(0, TLI.getPointerTy());
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
- return N = DAG.getConstantFP(*CFP, VT);
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
+ return DAG.getConstantFP(*CFP, VT);
if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
- return N = DAG.getUNDEF(VT);
+ return DAG.getUNDEF(VT);
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
visit(CE->getOpcode(), *CE);
SDValue N1 = NodeMap[V];
- assert(N1.getNode() && "visit didn't populate the ValueMap!");
+ assert(N1.getNode() && "visit didn't populate the NodeMap!");
return N1;
}
@@ -704,7 +1006,7 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) {
getCurDebugLoc());
}
- if (BlockAddress *BA = dyn_cast<BlockAddress>(C))
+ if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
return DAG.getBlockAddress(BA, VT);
const VectorType *VecTy = cast<VectorType>(V->getType());
@@ -713,7 +1015,7 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) {
// Now that we know the number and type of the elements, get that number of
// elements into the Ops array based on what kind of constant it is.
SmallVector<SDValue, 16> Ops;
- if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
+ if (const ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
for (unsigned i = 0; i != NumElements; ++i)
Ops.push_back(getValue(CP->getOperand(i)));
} else {
@@ -742,82 +1044,25 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) {
return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
}
- unsigned InReg = FuncInfo.ValueMap[V];
- assert(InReg && "Value not in map!");
-
- RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
- SDValue Chain = DAG.getEntryNode();
- return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
-}
-
-/// Get the EVTs and ArgFlags collections that represent the legalized return
-/// type of the given function. This does not require a DAG or a return value,
-/// and is suitable for use before any DAGs for the function are constructed.
-static void getReturnInfo(const Type* ReturnType,
- Attributes attr, SmallVectorImpl<EVT> &OutVTs,
- SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
- TargetLowering &TLI,
- SmallVectorImpl<uint64_t> *Offsets = 0) {
- SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, ReturnType, ValueVTs);
- unsigned NumValues = ValueVTs.size();
- if (NumValues == 0) return;
- unsigned Offset = 0;
-
- for (unsigned j = 0, f = NumValues; j != f; ++j) {
- EVT VT = ValueVTs[j];
- ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
-
- if (attr & Attribute::SExt)
- ExtendKind = ISD::SIGN_EXTEND;
- else if (attr & Attribute::ZExt)
- ExtendKind = ISD::ZERO_EXTEND;
-
- // FIXME: C calling convention requires the return type to be promoted to
- // at least 32-bit. But this is not necessary for non-C calling
- // conventions. The frontend should mark functions whose return values
- // require promoting with signext or zeroext attributes.
- if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
- EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
- if (VT.bitsLT(MinVT))
- VT = MinVT;
- }
-
- unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
- EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
- unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
- PartVT.getTypeForEVT(ReturnType->getContext()));
-
- // 'inreg' on function refers to return value
- ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
- if (attr & Attribute::InReg)
- Flags.setInReg();
-
- // Propagate extension type if any
- if (attr & Attribute::SExt)
- Flags.setSExt();
- else if (attr & Attribute::ZExt)
- Flags.setZExt();
-
- for (unsigned i = 0; i < NumParts; ++i) {
- OutVTs.push_back(PartVT);
- OutFlags.push_back(Flags);
- if (Offsets)
- {
- Offsets->push_back(Offset);
- Offset += PartSize;
- }
- }
+ // If this is an instruction which fast-isel has deferred, select it now.
+ if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
+ unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
+ RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
+ SDValue Chain = DAG.getEntryNode();
+ return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL);
}
+
+ llvm_unreachable("Can't get register for value!");
+ return SDValue();
}
-void SelectionDAGBuilder::visitRet(ReturnInst &I) {
+void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs;
- FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
+ SmallVector<SDValue, 8> OutVals;
- if (!FLI.CanLowerReturn) {
- unsigned DemoteReg = FLI.DemoteRegister;
+ if (!FuncInfo.CanLowerReturn) {
+ unsigned DemoteReg = FuncInfo.DemoteRegister;
const Function *F = I.getParent()->getParent();
// Emit a store of the return value through the virtual register.
@@ -836,10 +1081,10 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) {
unsigned NumValues = ValueVTs.size();
SmallVector<SDValue, 4> Chains(NumValues);
- EVT PtrVT = PtrValueVTs[0];
for (unsigned i = 0; i != NumValues; ++i) {
- SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, RetPtr,
- DAG.getConstant(Offsets[i], PtrVT));
+ SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(),
+ RetPtr.getValueType(), RetPtr,
+ DAG.getIntPtrConstant(Offsets[i]));
Chains[i] =
DAG.getStore(Chain, getCurDebugLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + i),
@@ -893,8 +1138,11 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) {
else if (F->paramHasAttr(0, Attribute::ZExt))
Flags.setZExt();
- for (unsigned i = 0; i < NumParts; ++i)
- Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
+ for (unsigned i = 0; i < NumParts; ++i) {
+ Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
+ /*isfixed=*/true));
+ OutVals.push_back(Parts[i]);
+ }
}
}
}
@@ -903,7 +1151,7 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) {
CallingConv::ID CallConv =
DAG.getMachineFunction().getFunction()->getCallingConv();
Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
- Outs, getCurDebugLoc(), DAG);
+ Outs, OutVals, getCurDebugLoc(), DAG);
// Verify that the target's LowerReturn behaved as expected.
assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
@@ -916,18 +1164,18 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) {
/// CopyToExportRegsIfNeeded - If the given value has virtual registers
/// created for it, emit nodes to copy the value into the virtual
/// registers.
-void SelectionDAGBuilder::CopyToExportRegsIfNeeded(Value *V) {
- if (!V->use_empty()) {
- DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
- if (VMI != FuncInfo.ValueMap.end())
- CopyValueToVirtualRegister(V, VMI->second);
+void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
+ DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
+ if (VMI != FuncInfo.ValueMap.end()) {
+ assert(!V->use_empty() && "Unused value assigned virtual registers!");
+ CopyValueToVirtualRegister(V, VMI->second);
}
}
/// ExportFromCurrentBlock - If this condition isn't known to be exported from
/// the current basic block, add it to ValueMap now so that we'll get a
/// CopyTo/FromReg.
-void SelectionDAGBuilder::ExportFromCurrentBlock(Value *V) {
+void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
// No need to export constants.
if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
@@ -938,11 +1186,11 @@ void SelectionDAGBuilder::ExportFromCurrentBlock(Value *V) {
CopyValueToVirtualRegister(V, Reg);
}
-bool SelectionDAGBuilder::isExportableFromCurrentBlock(Value *V,
+bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
const BasicBlock *FromBB) {
// The operands of the setcc have to be in this block. We don't know
// how to export them from some other block.
- if (Instruction *VI = dyn_cast<Instruction>(V)) {
+ if (const Instruction *VI = dyn_cast<Instruction>(V)) {
// Can export from current BB.
if (VI->getParent() == FromBB)
return true;
@@ -971,85 +1219,31 @@ static bool InBlock(const Value *V, const BasicBlock *BB) {
return true;
}
-/// getFCmpCondCode - Return the ISD condition code corresponding to
-/// the given LLVM IR floating-point condition code. This includes
-/// consideration of global floating-point math flags.
-///
-static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
- ISD::CondCode FPC, FOC;
- switch (Pred) {
- case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
- case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
- case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
- case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
- case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
- case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
- case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
- case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
- case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
- case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
- case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
- case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
- case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
- case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
- case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
- case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
- default:
- llvm_unreachable("Invalid FCmp predicate opcode!");
- FOC = FPC = ISD::SETFALSE;
- break;
- }
- if (FiniteOnlyFPMath())
- return FOC;
- else
- return FPC;
-}
-
-/// getICmpCondCode - Return the ISD condition code corresponding to
-/// the given LLVM IR integer condition code.
-///
-static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
- switch (Pred) {
- case ICmpInst::ICMP_EQ: return ISD::SETEQ;
- case ICmpInst::ICMP_NE: return ISD::SETNE;
- case ICmpInst::ICMP_SLE: return ISD::SETLE;
- case ICmpInst::ICMP_ULE: return ISD::SETULE;
- case ICmpInst::ICMP_SGE: return ISD::SETGE;
- case ICmpInst::ICMP_UGE: return ISD::SETUGE;
- case ICmpInst::ICMP_SLT: return ISD::SETLT;
- case ICmpInst::ICMP_ULT: return ISD::SETULT;
- case ICmpInst::ICMP_SGT: return ISD::SETGT;
- case ICmpInst::ICMP_UGT: return ISD::SETUGT;
- default:
- llvm_unreachable("Invalid ICmp predicate opcode!");
- return ISD::SETNE;
- }
-}
-
/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
/// This function emits a branch and is used at the leaves of an OR or an
/// AND operator tree.
///
void
-SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
+SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- MachineBasicBlock *CurBB) {
+ MachineBasicBlock *CurBB,
+ MachineBasicBlock *SwitchBB) {
const BasicBlock *BB = CurBB->getBasicBlock();
// If the leaf of the tree is a comparison, merge the condition into
// the caseblock.
- if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
+ if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
// The operands of the cmp have to be in this block. We don't know
// how to export them from some other block. If this is the first block
// of the sequence, no exporting is needed.
- if (CurBB == CurMBB ||
+ if (CurBB == SwitchBB ||
(isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
ISD::CondCode Condition;
- if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
Condition = getICmpCondCode(IC->getPredicate());
- } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
+ } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
Condition = getFCmpCondCode(FC->getPredicate());
} else {
Condition = ISD::SETEQ; // silence warning.
@@ -1070,19 +1264,20 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
}
/// FindMergedConditions - If Cond is an expression like
-void SelectionDAGBuilder::FindMergedConditions(Value *Cond,
+void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
+ MachineBasicBlock *SwitchBB,
unsigned Opc) {
// If this node is not part of the or/and tree, emit it as a branch.
- Instruction *BOp = dyn_cast<Instruction>(Cond);
+ const Instruction *BOp = dyn_cast<Instruction>(Cond);
if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
(unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
BOp->getParent() != CurBB->getBasicBlock() ||
!InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
!InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
- EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
+ EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB);
return;
}
@@ -1102,10 +1297,10 @@ void SelectionDAGBuilder::FindMergedConditions(Value *Cond,
//
// Emit the LHS condition.
- FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
+ FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc);
// Emit the RHS condition into TmpBB.
- FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
+ FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
} else {
assert(Opc == Instruction::And && "Unknown merge op!");
// Codegen X & Y as:
@@ -1118,10 +1313,10 @@ void SelectionDAGBuilder::FindMergedConditions(Value *Cond,
// This requires creation of TmpBB after CurBB.
// Emit the LHS condition.
- FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
+ FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc);
// Emit the RHS condition into TmpBB.
- FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
+ FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
}
}
@@ -1156,19 +1351,21 @@ SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
return true;
}
-void SelectionDAGBuilder::visitBr(BranchInst &I) {
+void SelectionDAGBuilder::visitBr(const BranchInst &I) {
+ MachineBasicBlock *BrMBB = FuncInfo.MBB;
+
// Update machine-CFG edges.
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
- MachineFunction::iterator BBI = CurMBB;
+ MachineFunction::iterator BBI = BrMBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
if (I.isUnconditional()) {
// Update machine-CFG edges.
- CurMBB->addSuccessor(Succ0MBB);
+ BrMBB->addSuccessor(Succ0MBB);
// If this is not a fall-through branch, emit the branch.
if (Succ0MBB != NextBlock)
@@ -1181,7 +1378,7 @@ void SelectionDAGBuilder::visitBr(BranchInst &I) {
// If this condition is one of the special cases we handle, do special stuff
// now.
- Value *CondVal = I.getCondition();
+ const Value *CondVal = I.getCondition();
MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
// If this is a series of conditions that are or'd or and'd together, emit
@@ -1199,15 +1396,16 @@ void SelectionDAGBuilder::visitBr(BranchInst &I) {
// cmp D, E
// jle foo
//
- if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
+ if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
if (BOp->hasOneUse() &&
(BOp->getOpcode() == Instruction::And ||
BOp->getOpcode() == Instruction::Or)) {
- FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
+ FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
+ BOp->getOpcode());
// If the compares in later blocks need to use values not currently
// exported from this block, export them now. This block should always
// be the first entry.
- assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
+ assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
// Allow some cases to be rejected.
if (ShouldEmitAsBranches(SwitchCases)) {
@@ -1217,7 +1415,7 @@ void SelectionDAGBuilder::visitBr(BranchInst &I) {
}
// Emit the branch for this block.
- visitSwitchCase(SwitchCases[0]);
+ visitSwitchCase(SwitchCases[0], BrMBB);
SwitchCases.erase(SwitchCases.begin());
return;
}
@@ -1233,16 +1431,17 @@ void SelectionDAGBuilder::visitBr(BranchInst &I) {
// Create a CaseBlock record representing this branch.
CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
- NULL, Succ0MBB, Succ1MBB, CurMBB);
+ NULL, Succ0MBB, Succ1MBB, BrMBB);
// Use visitSwitchCase to actually insert the fast branch sequence for this
// cond branch.
- visitSwitchCase(CB);
+ visitSwitchCase(CB, BrMBB);
}
/// visitSwitchCase - Emits the necessary code to represent a single node in
/// the binary search tree resulting from lowering a switch instruction.
-void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) {
+void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
+ MachineBasicBlock *SwitchBB) {
SDValue Cond;
SDValue CondLHS = getValue(CB.CmpLHS);
DebugLoc dl = getCurDebugLoc();
@@ -1281,13 +1480,13 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) {
}
// Update successor info
- CurMBB->addSuccessor(CB.TrueBB);
- CurMBB->addSuccessor(CB.FalseBB);
+ SwitchBB->addSuccessor(CB.TrueBB);
+ SwitchBB->addSuccessor(CB.FalseBB);
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
MachineBasicBlock *NextBlock = 0;
- MachineFunction::iterator BBI = CurMBB;
+ MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
@@ -1303,18 +1502,10 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) {
MVT::Other, getControlRoot(), Cond,
DAG.getBasicBlock(CB.TrueBB));
- // If the branch was constant folded, fix up the CFG.
- if (BrCond.getOpcode() == ISD::BR) {
- CurMBB->removeSuccessor(CB.FalseBB);
- } else {
- // Otherwise, go ahead and insert the false branch.
- if (BrCond == getControlRoot())
- CurMBB->removeSuccessor(CB.TrueBB);
-
- if (CB.FalseBB != NextBlock)
- BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
- DAG.getBasicBlock(CB.FalseBB));
- }
+ // Insert the false branch.
+ if (CB.FalseBB != NextBlock)
+ BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
+ DAG.getBasicBlock(CB.FalseBB));
DAG.setRoot(BrCond);
}
@@ -1336,7 +1527,8 @@ void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
/// visitJumpTableHeader - This function emits necessary code to produce index
/// in the JumpTable from switch case.
void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
- JumpTableHeader &JTH) {
+ JumpTableHeader &JTH,
+ MachineBasicBlock *SwitchBB) {
// Subtract the lowest switch case value from the value being switched on and
// conditional branch to default mbb if the result is greater than the
// difference between smallest and largest cases.
@@ -1352,7 +1544,7 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
// therefore require extension or truncating.
SwitchOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), TLI.getPointerTy());
- unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
+ unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy());
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
JumpTableReg, SwitchOp);
JT.Reg = JumpTableReg;
@@ -1368,7 +1560,7 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
MachineBasicBlock *NextBlock = 0;
- MachineFunction::iterator BBI = CurMBB;
+ MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
@@ -1386,7 +1578,8 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
/// visitBitTestHeader - This function emits necessary code to produce value
/// suitable for "bit tests"
-void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) {
+void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
+ MachineBasicBlock *SwitchBB) {
// Subtract the minimum value
SDValue SwitchOp = getValue(B.SValue);
EVT VT = SwitchOp.getValueType();
@@ -1402,21 +1595,21 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) {
SDValue ShiftOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(),
TLI.getPointerTy());
- B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
+ B.Reg = FuncInfo.CreateReg(TLI.getPointerTy());
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
B.Reg, ShiftOp);
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
MachineBasicBlock *NextBlock = 0;
- MachineFunction::iterator BBI = CurMBB;
+ MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
MachineBasicBlock* MBB = B.Cases[0].ThisBB;
- CurMBB->addSuccessor(B.Default);
- CurMBB->addSuccessor(MBB);
+ SwitchBB->addSuccessor(B.Default);
+ SwitchBB->addSuccessor(MBB);
SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
MVT::Other, CopyTo, RangeCmp,
@@ -1432,35 +1625,48 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) {
/// visitBitTestCase - this function produces one "bit test"
void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
unsigned Reg,
- BitTestCase &B) {
- // Make desired shift
+ BitTestCase &B,
+ MachineBasicBlock *SwitchBB) {
SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
TLI.getPointerTy());
- SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
- TLI.getPointerTy(),
- DAG.getConstant(1, TLI.getPointerTy()),
- ShiftOp);
-
- // Emit bit tests and jumps
- SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
- TLI.getPointerTy(), SwitchVal,
- DAG.getConstant(B.Mask, TLI.getPointerTy()));
- SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
- TLI.getSetCCResultType(AndOp.getValueType()),
- AndOp, DAG.getConstant(0, TLI.getPointerTy()),
- ISD::SETNE);
-
- CurMBB->addSuccessor(B.TargetBB);
- CurMBB->addSuccessor(NextMBB);
+ SDValue Cmp;
+ if (CountPopulation_64(B.Mask) == 1) {
+ // Testing for a single bit; just compare the shift count with what it
+ // would need to be to shift a 1 bit in that position.
+ Cmp = DAG.getSetCC(getCurDebugLoc(),
+ TLI.getSetCCResultType(ShiftOp.getValueType()),
+ ShiftOp,
+ DAG.getConstant(CountTrailingZeros_64(B.Mask),
+ TLI.getPointerTy()),
+ ISD::SETEQ);
+ } else {
+ // Make desired shift
+ SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
+ TLI.getPointerTy(),
+ DAG.getConstant(1, TLI.getPointerTy()),
+ ShiftOp);
+
+ // Emit bit tests and jumps
+ SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
+ TLI.getPointerTy(), SwitchVal,
+ DAG.getConstant(B.Mask, TLI.getPointerTy()));
+ Cmp = DAG.getSetCC(getCurDebugLoc(),
+ TLI.getSetCCResultType(AndOp.getValueType()),
+ AndOp, DAG.getConstant(0, TLI.getPointerTy()),
+ ISD::SETNE);
+ }
+
+ SwitchBB->addSuccessor(B.TargetBB);
+ SwitchBB->addSuccessor(NextMBB);
SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
MVT::Other, getControlRoot(),
- AndCmp, DAG.getBasicBlock(B.TargetBB));
+ Cmp, DAG.getBasicBlock(B.TargetBB));
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
MachineBasicBlock *NextBlock = 0;
- MachineFunction::iterator BBI = CurMBB;
+ MachineFunction::iterator BBI = SwitchBB;
if (++BBI != FuncInfo.MF->end())
NextBlock = BBI;
@@ -1471,7 +1677,9 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
DAG.setRoot(BrAnd);
}
-void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
+void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
+ MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
+
// Retrieve successors.
MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
@@ -1487,8 +1695,8 @@ void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
CopyToExportRegsIfNeeded(&I);
// Update successor info
- CurMBB->addSuccessor(Return);
- CurMBB->addSuccessor(LandingPad);
+ InvokeMBB->addSuccessor(Return);
+ InvokeMBB->addSuccessor(LandingPad);
// Drop into normal successor.
DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
@@ -1496,15 +1704,16 @@ void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
DAG.getBasicBlock(Return)));
}
-void SelectionDAGBuilder::visitUnwind(UnwindInst &I) {
+void SelectionDAGBuilder::visitUnwind(const UnwindInst &I) {
}
/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
/// small case ranges).
bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
CaseRecVector& WorkList,
- Value* SV,
- MachineBasicBlock* Default) {
+ const Value* SV,
+ MachineBasicBlock *Default,
+ MachineBasicBlock *SwitchBB) {
Case& BackCase = *(CR.Range.second-1);
// Size is the number of Cases represented by this range.
@@ -1557,7 +1766,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
FallThrough = Default;
}
- Value *RHS, *LHS, *MHS;
+ const Value *RHS, *LHS, *MHS;
ISD::CondCode CC;
if (I->High == I->Low) {
// This is just small small case range :) containing exactly 1 case
@@ -1573,8 +1782,8 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
// code into the current block. Otherwise, push the CaseBlock onto the
// vector to be later processed by SDISel, and insert the node's MBB
// before the next MBB.
- if (CurBlock == CurMBB)
- visitSwitchCase(CB);
+ if (CurBlock == SwitchBB)
+ visitSwitchCase(CB, SwitchBB);
else
SwitchCases.push_back(CB);
@@ -1600,8 +1809,9 @@ static APInt ComputeRange(const APInt &First, const APInt &Last) {
/// handleJTSwitchCase - Emit jumptable for current switch case range
bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
- Value* SV,
- MachineBasicBlock* Default) {
+ const Value* SV,
+ MachineBasicBlock* Default,
+ MachineBasicBlock *SwitchBB) {
Case& FrontCase = *CR.Range.first;
Case& BackCase = *(CR.Range.second-1);
@@ -1613,7 +1823,7 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
I!=E; ++I)
TSize += I->size();
- if (!areJTsAllowed(TLI) || TSize.ult(APInt(First.getBitWidth(), 4)))
+ if (!areJTsAllowed(TLI) || TSize.ult(4))
return false;
APInt Range = ComputeRange(First, Last);
@@ -1682,9 +1892,9 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
// Set the jump table information so that we can codegen it as a second
// MachineBasicBlock
JumpTable JT(-1U, JTI, JumpTableBB, Default);
- JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
- if (CR.CaseBB == CurMBB)
- visitJumpTableHeader(JT, JTH);
+ JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == SwitchBB));
+ if (CR.CaseBB == SwitchBB)
+ visitJumpTableHeader(JT, JTH, SwitchBB);
JTCases.push_back(JumpTableBlock(JTH, JT));
@@ -1695,8 +1905,9 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
/// 2 subtrees.
bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
- Value* SV,
- MachineBasicBlock* Default) {
+ const Value* SV,
+ MachineBasicBlock *Default,
+ MachineBasicBlock *SwitchBB) {
// Get the MachineFunction which holds the current MBB. This is used when
// inserting any additional MBBs necessary to represent the switch.
MachineFunction *CurMF = FuncInfo.MF;
@@ -1810,8 +2021,8 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
// Otherwise, branch to LHS.
CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
- if (CR.CaseBB == CurMBB)
- visitSwitchCase(CB);
+ if (CR.CaseBB == SwitchBB)
+ visitSwitchCase(CB, SwitchBB);
else
SwitchCases.push_back(CB);
@@ -1823,8 +2034,9 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
/// of masks and emit bit tests with these masks.
bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
- Value* SV,
- MachineBasicBlock* Default){
+ const Value* SV,
+ MachineBasicBlock* Default,
+ MachineBasicBlock *SwitchBB){
EVT PTy = TLI.getPointerTy();
unsigned IntPtrBits = PTy.getSizeInBits();
@@ -1867,7 +2079,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
<< "Low bound: " << minValue << '\n'
<< "High bound: " << maxValue << '\n');
- if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
+ if (cmpRange.uge(IntPtrBits) ||
(!(Dests.size() == 1 && numCmps >= 3) &&
!(Dests.size() == 2 && numCmps >= 5) &&
!(Dests.size() >= 3 && numCmps >= 6)))
@@ -1879,8 +2091,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
// Optimize the case where all the case values fit in a
// word without having to subtract minValue. In this case,
// we can optimize away the subtraction.
- if (minValue.isNonNegative() &&
- maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
+ if (minValue.isNonNegative() && maxValue.slt(IntPtrBits)) {
cmpRange = maxValue;
} else {
lowBound = minValue;
@@ -1940,11 +2151,11 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
}
BitTestBlock BTB(lowBound, cmpRange, SV,
- -1U, (CR.CaseBB == CurMBB),
+ -1U, (CR.CaseBB == SwitchBB),
CR.CaseBB, Default, BTC);
- if (CR.CaseBB == CurMBB)
- visitBitTestHeader(BTB);
+ if (CR.CaseBB == SwitchBB)
+ visitBitTestHeader(BTB, SwitchBB);
BitTestCases.push_back(BTB);
@@ -1994,7 +2205,9 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
return numCmps;
}
-void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
+void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
+ MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
+
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
@@ -2005,7 +2218,7 @@ void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
// Update machine-CFG edges.
// If this is not a fall-through branch, emit the branch.
- CurMBB->addSuccessor(Default);
+ SwitchMBB->addSuccessor(Default);
if (Default != NextBlock)
DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
MVT::Other, getControlRoot(),
@@ -2026,38 +2239,41 @@ void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
// Get the Value to be switched on and default basic blocks, which will be
// inserted into CaseBlock records, representing basic blocks in the binary
// search tree.
- Value *SV = SI.getOperand(0);
+ const Value *SV = SI.getOperand(0);
// Push the initial CaseRec onto the worklist
CaseRecVector WorkList;
- WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
+ WorkList.push_back(CaseRec(SwitchMBB,0,0,
+ CaseRange(Cases.begin(),Cases.end())));
while (!WorkList.empty()) {
// Grab a record representing a case range to process off the worklist
CaseRec CR = WorkList.back();
WorkList.pop_back();
- if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
+ if (handleBitTestsSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
continue;
// If the range has few cases (two or less) emit a series of specific
// tests.
- if (handleSmallSwitchRange(CR, WorkList, SV, Default))
+ if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB))
continue;
// If the switch has more than 5 blocks, and at least 40% dense, and the
// target supports indirect branches, then emit a jump table rather than
// lowering the switch to a binary tree of conditional branches.
- if (handleJTSwitchCase(CR, WorkList, SV, Default))
+ if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
continue;
// Emit binary tree. We need to pick a pivot, and push left and right ranges
// onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
- handleBTSplitSwitchCase(CR, WorkList, SV, Default);
+ handleBTSplitSwitchCase(CR, WorkList, SV, Default, SwitchMBB);
}
}
-void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) {
+void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
+ MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
+
// Update machine-CFG edges with unique successors.
SmallVector<BasicBlock*, 32> succs;
succs.reserve(I.getNumSuccessors());
@@ -2066,14 +2282,14 @@ void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) {
array_pod_sort(succs.begin(), succs.end());
succs.erase(std::unique(succs.begin(), succs.end()), succs.end());
for (unsigned i = 0, e = succs.size(); i != e; ++i)
- CurMBB->addSuccessor(FuncInfo.MBBMap[succs[i]]);
+ IndirectBrMBB->addSuccessor(FuncInfo.MBBMap[succs[i]]);
DAG.setRoot(DAG.getNode(ISD::BRIND, getCurDebugLoc(),
MVT::Other, getControlRoot(),
getValue(I.getAddress())));
}
-void SelectionDAGBuilder::visitFSub(User &I) {
+void SelectionDAGBuilder::visitFSub(const User &I) {
// -0.0 - X --> fneg
const Type *Ty = I.getType();
if (Ty->isVectorTy()) {
@@ -2103,14 +2319,14 @@ void SelectionDAGBuilder::visitFSub(User &I) {
visitBinary(I, ISD::FSUB);
}
-void SelectionDAGBuilder::visitBinary(User &I, unsigned OpCode) {
+void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
Op1.getValueType(), Op1, Op2));
}
-void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) {
+void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
if (!I.getType()->isVectorTy() &&
@@ -2144,11 +2360,11 @@ void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) {
Op1.getValueType(), Op1, Op2));
}
-void SelectionDAGBuilder::visitICmp(User &I) {
+void SelectionDAGBuilder::visitICmp(const User &I) {
ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
- if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
predicate = IC->getPredicate();
- else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
+ else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
predicate = ICmpInst::Predicate(IC->getPredicate());
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
@@ -2158,11 +2374,11 @@ void SelectionDAGBuilder::visitICmp(User &I) {
setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
}
-void SelectionDAGBuilder::visitFCmp(User &I) {
+void SelectionDAGBuilder::visitFCmp(const User &I) {
FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
- if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
+ if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
predicate = FC->getPredicate();
- else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
+ else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
predicate = FCmpInst::Predicate(FC->getPredicate());
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
@@ -2171,7 +2387,7 @@ void SelectionDAGBuilder::visitFCmp(User &I) {
setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
}
-void SelectionDAGBuilder::visitSelect(User &I) {
+void SelectionDAGBuilder::visitSelect(const User &I) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, I.getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
@@ -2184,7 +2400,8 @@ void SelectionDAGBuilder::visitSelect(User &I) {
for (unsigned i = 0; i != NumValues; ++i)
Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
- TrueVal.getNode()->getValueType(i), Cond,
+ TrueVal.getNode()->getValueType(TrueVal.getResNo()+i),
+ Cond,
SDValue(TrueVal.getNode(),
TrueVal.getResNo() + i),
SDValue(FalseVal.getNode(),
@@ -2195,14 +2412,14 @@ void SelectionDAGBuilder::visitSelect(User &I) {
&Values[0], NumValues));
}
-void SelectionDAGBuilder::visitTrunc(User &I) {
+void SelectionDAGBuilder::visitTrunc(const User &I) {
// TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
}
-void SelectionDAGBuilder::visitZExt(User &I) {
+void SelectionDAGBuilder::visitZExt(const User &I) {
// ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// ZExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
@@ -2210,7 +2427,7 @@ void SelectionDAGBuilder::visitZExt(User &I) {
setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
}
-void SelectionDAGBuilder::visitSExt(User &I) {
+void SelectionDAGBuilder::visitSExt(const User &I) {
// SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// SExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
@@ -2218,7 +2435,7 @@ void SelectionDAGBuilder::visitSExt(User &I) {
setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
}
-void SelectionDAGBuilder::visitFPTrunc(User &I) {
+void SelectionDAGBuilder::visitFPTrunc(const User &I) {
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
@@ -2226,60 +2443,58 @@ void SelectionDAGBuilder::visitFPTrunc(User &I) {
DestVT, N, DAG.getIntPtrConstant(0)));
}
-void SelectionDAGBuilder::visitFPExt(User &I){
+void SelectionDAGBuilder::visitFPExt(const User &I){
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
}
-void SelectionDAGBuilder::visitFPToUI(User &I) {
+void SelectionDAGBuilder::visitFPToUI(const User &I) {
// FPToUI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
}
-void SelectionDAGBuilder::visitFPToSI(User &I) {
+void SelectionDAGBuilder::visitFPToSI(const User &I) {
// FPToSI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
}
-void SelectionDAGBuilder::visitUIToFP(User &I) {
+void SelectionDAGBuilder::visitUIToFP(const User &I) {
// UIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
}
-void SelectionDAGBuilder::visitSIToFP(User &I){
+void SelectionDAGBuilder::visitSIToFP(const User &I){
// SIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
}
-void SelectionDAGBuilder::visitPtrToInt(User &I) {
+void SelectionDAGBuilder::visitPtrToInt(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT SrcVT = N.getValueType();
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
}
-void SelectionDAGBuilder::visitIntToPtr(User &I) {
+void SelectionDAGBuilder::visitIntToPtr(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT SrcVT = N.getValueType();
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
}
-void SelectionDAGBuilder::visitBitCast(User &I) {
+void SelectionDAGBuilder::visitBitCast(const User &I) {
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
@@ -2292,7 +2507,7 @@ void SelectionDAGBuilder::visitBitCast(User &I) {
setValue(&I, N); // noop cast.
}
-void SelectionDAGBuilder::visitInsertElement(User &I) {
+void SelectionDAGBuilder::visitInsertElement(const User &I) {
SDValue InVec = getValue(I.getOperand(0));
SDValue InVal = getValue(I.getOperand(1));
SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
@@ -2303,7 +2518,7 @@ void SelectionDAGBuilder::visitInsertElement(User &I) {
InVec, InVal, InIdx));
}
-void SelectionDAGBuilder::visitExtractElement(User &I) {
+void SelectionDAGBuilder::visitExtractElement(const User &I) {
SDValue InVec = getValue(I.getOperand(0));
SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
TLI.getPointerTy(),
@@ -2322,7 +2537,7 @@ static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
return true;
}
-void SelectionDAGBuilder::visitShuffleVector(User &I) {
+void SelectionDAGBuilder::visitShuffleVector(const User &I) {
SmallVector<int, 8> Mask;
SDValue Src1 = getValue(I.getOperand(0));
SDValue Src2 = getValue(I.getOperand(1));
@@ -2503,7 +2718,7 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) {
VT, &Ops[0], Ops.size()));
}
-void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) {
+void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
const Value *Op0 = I.getOperand(0);
const Value *Op1 = I.getOperand(1);
const Type *AggTy = I.getType();
@@ -2544,7 +2759,7 @@ void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) {
&Values[0], NumAggValues));
}
-void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) {
+void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
const Value *Op0 = I.getOperand(0);
const Type *AggTy = Op0->getType();
const Type *ValTy = I.getType();
@@ -2572,13 +2787,13 @@ void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) {
&Values[0], NumValValues));
}
-void SelectionDAGBuilder::visitGetElementPtr(User &I) {
+void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
SDValue N = getValue(I.getOperand(0));
const Type *Ty = I.getOperand(0)->getType();
- for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
+ for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
- Value *Idx = *OI;
+ const Value *Idx = *OI;
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
@@ -2593,8 +2808,8 @@ void SelectionDAGBuilder::visitGetElementPtr(User &I) {
Ty = cast<SequentialType>(Ty)->getElementType();
// If this is a constant subscript, handle it quickly.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
- if (CI->getZExtValue() == 0) continue;
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
+ if (CI->isZero()) continue;
uint64_t Offs =
TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
SDValue OffsVal;
@@ -2644,7 +2859,7 @@ void SelectionDAGBuilder::visitGetElementPtr(User &I) {
setValue(&I, N);
}
-void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
+void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
// If this is a fixed sized alloca in the entry block of the function,
// allocate it statically on the stack.
if (FuncInfo.StaticAllocaMap.count(&I))
@@ -2658,18 +2873,18 @@ void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
SDValue AllocSize = getValue(I.getArraySize());
- AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
- AllocSize,
- DAG.getConstant(TySize, AllocSize.getValueType()));
-
EVT IntPtr = TLI.getPointerTy();
- AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
+ if (AllocSize.getValueType() != IntPtr)
+ AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
+
+ AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), IntPtr,
+ AllocSize,
+ DAG.getConstant(TySize, IntPtr));
// Handle alignment. If the requested alignment is less than or equal to
// the stack alignment, ignore it. If the size is greater than or equal to
// the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
- unsigned StackAlign =
- TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
+ unsigned StackAlign = TM.getFrameInfo()->getStackAlignment();
if (Align <= StackAlign)
Align = 0;
@@ -2693,10 +2908,10 @@ void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
// Inform the Frame Information that we have just allocated a variable-sized
// object.
- FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
+ FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1);
}
-void SelectionDAGBuilder::visitLoad(LoadInst &I) {
+void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
const Value *SV = I.getOperand(0);
SDValue Ptr = getValue(SV);
@@ -2756,9 +2971,9 @@ void SelectionDAGBuilder::visitLoad(LoadInst &I) {
&Values[0], NumValues));
}
-void SelectionDAGBuilder::visitStore(StoreInst &I) {
- Value *SrcV = I.getOperand(0);
- Value *PtrV = I.getOperand(1);
+void SelectionDAGBuilder::visitStore(const StoreInst &I) {
+ const Value *SrcV = I.getOperand(0);
+ const Value *PtrV = I.getOperand(1);
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
@@ -2795,7 +3010,7 @@ void SelectionDAGBuilder::visitStore(StoreInst &I) {
/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
/// node.
-void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I,
+void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
unsigned Intrinsic) {
bool HasChain = !I.doesNotAccessMemory();
bool OnlyLoad = HasChain && I.onlyReadsMemory();
@@ -2820,8 +3035,8 @@ void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I,
Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
// Add all operands of the call to the operand list.
- for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
- SDValue Op = getValue(I.getOperand(i));
+ for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
+ SDValue Op = getValue(I.getArgOperand(i));
assert(TLI.isTypeLegal(Op.getValueType()) &&
"Intrinsic uses a non-legal type?");
Ops.push_back(Op);
@@ -2921,15 +3136,16 @@ getF32Constant(SelectionDAG &DAG, unsigned Flt) {
/// visitIntrinsicCall: I is a call instruction
/// Op is the associated NodeType for I
const char *
-SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
+SelectionDAGBuilder::implVisitBinaryAtomic(const CallInst& I,
+ ISD::NodeType Op) {
SDValue Root = getRoot();
SDValue L =
DAG.getAtomic(Op, getCurDebugLoc(),
- getValue(I.getOperand(2)).getValueType().getSimpleVT(),
+ getValue(I.getArgOperand(1)).getValueType().getSimpleVT(),
Root,
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2)),
- I.getOperand(1));
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)),
+ I.getArgOperand(0));
setValue(&I, L);
DAG.setRoot(L.getValue(1));
return 0;
@@ -2937,9 +3153,9 @@ SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
const char *
-SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
- SDValue Op1 = getValue(I.getOperand(1));
- SDValue Op2 = getValue(I.getOperand(2));
+SelectionDAGBuilder::implVisitAluOverflow(const CallInst &I, ISD::NodeType Op) {
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
setValue(&I, DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2));
@@ -2949,13 +3165,13 @@ SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
/// visitExp - Lower an exp intrinsic. Handles the special sequences for
/// limited-precision mode.
void
-SelectionDAGBuilder::visitExp(CallInst &I) {
+SelectionDAGBuilder::visitExp(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
// Put the exponent in the right bit position for later addition to the
// final result:
@@ -3065,8 +3281,8 @@ SelectionDAGBuilder::visitExp(CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FEXP, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3075,13 +3291,13 @@ SelectionDAGBuilder::visitExp(CallInst &I) {
/// visitLog - Lower a log intrinsic. Handles the special sequences for
/// limited-precision mode.
void
-SelectionDAGBuilder::visitLog(CallInst &I) {
+SelectionDAGBuilder::visitLog(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
// Scale the exponent by log(2) [0.69314718f].
@@ -3175,8 +3391,8 @@ SelectionDAGBuilder::visitLog(CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FLOG, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3185,13 +3401,13 @@ SelectionDAGBuilder::visitLog(CallInst &I) {
/// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
/// limited-precision mode.
void
-SelectionDAGBuilder::visitLog2(CallInst &I) {
+SelectionDAGBuilder::visitLog2(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
// Get the exponent.
@@ -3284,8 +3500,8 @@ SelectionDAGBuilder::visitLog2(CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FLOG2, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3294,13 +3510,13 @@ SelectionDAGBuilder::visitLog2(CallInst &I) {
/// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
/// limited-precision mode.
void
-SelectionDAGBuilder::visitLog10(CallInst &I) {
+SelectionDAGBuilder::visitLog10(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
// Scale the exponent by log10(2) [0.30102999f].
@@ -3386,8 +3602,8 @@ SelectionDAGBuilder::visitLog10(CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FLOG10, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3396,13 +3612,13 @@ SelectionDAGBuilder::visitLog10(CallInst &I) {
/// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
/// limited-precision mode.
void
-SelectionDAGBuilder::visitExp2(CallInst &I) {
+SelectionDAGBuilder::visitExp2(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
@@ -3500,8 +3716,8 @@ SelectionDAGBuilder::visitExp2(CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FEXP2, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3510,14 +3726,14 @@ SelectionDAGBuilder::visitExp2(CallInst &I) {
/// visitPow - Lower a pow intrinsic. Handles the special sequences for
/// limited-precision mode with x == 10.0f.
void
-SelectionDAGBuilder::visitPow(CallInst &I) {
+SelectionDAGBuilder::visitPow(const CallInst &I) {
SDValue result;
- Value *Val = I.getOperand(1);
+ const Value *Val = I.getArgOperand(0);
DebugLoc dl = getCurDebugLoc();
bool IsExp10 = false;
if (getValue(Val).getValueType() == MVT::f32 &&
- getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
+ getValue(I.getArgOperand(1)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
@@ -3528,7 +3744,7 @@ SelectionDAGBuilder::visitPow(CallInst &I) {
}
if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(2));
+ SDValue Op = getValue(I.getArgOperand(1));
// Put the exponent in the right bit position for later addition to the
// final result:
@@ -3633,9 +3849,9 @@ SelectionDAGBuilder::visitPow(CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FPOW, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)));
}
setValue(&I, result);
@@ -3658,7 +3874,7 @@ static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
if (Val == 0)
return DAG.getConstantFP(1.0, LHS.getValueType());
- Function *F = DAG.getMachineFunction().getFunction();
+ const Function *F = DAG.getMachineFunction().getFunction();
if (!F->hasFnAttr(Attribute::OptimizeForSize) ||
// If optimizing for size, don't insert too many multiplies. This
// inserts up to 5 multiplies.
@@ -3694,12 +3910,72 @@ static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
}
+/// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
+/// argument, create the corresponding DBG_VALUE machine instruction for it now.
+/// At the end of instruction selection, they will be inserted to the entry BB.
+bool
+SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
+ int64_t Offset,
+ const SDValue &N) {
+ const Argument *Arg = dyn_cast<Argument>(V);
+ if (!Arg)
+ return false;
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ // Ignore inlined function arguments here.
+ DIVariable DV(Variable);
+ if (DV.isInlinedFnArgument(MF.getFunction()))
+ return false;
+
+ MachineBasicBlock *MBB = FuncInfo.MBB;
+ if (MBB != &MF.front())
+ return false;
+
+ unsigned Reg = 0;
+ if (Arg->hasByValAttr()) {
+ // Byval arguments' frame index is recorded during argument lowering.
+ // Use this info directly.
+ const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
+ Reg = TRI->getFrameRegister(MF);
+ Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
+ }
+
+ if (N.getNode() && N.getOpcode() == ISD::CopyFromReg) {
+ Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
+ if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ unsigned PR = RegInfo.getLiveInPhysReg(Reg);
+ if (PR)
+ Reg = PR;
+ }
+ }
+
+ if (!Reg) {
+ DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
+ if (VMI == FuncInfo.ValueMap.end())
+ return false;
+ Reg = VMI->second;
+ }
+
+ const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo();
+ MachineInstrBuilder MIB = BuildMI(MF, getCurDebugLoc(),
+ TII->get(TargetOpcode::DBG_VALUE))
+ .addReg(Reg, RegState::Debug).addImm(Offset).addMetadata(Variable);
+ FuncInfo.ArgDbgValues.push_back(&*MIB);
+ return true;
+}
+
+// VisualStudio defines setjmp as _setjmp
+#if defined(_MSC_VER) && defined(setjmp)
+#define setjmp_undefined_for_visual_studio
+#undef setjmp
+#endif
/// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
/// we want to emit this as a call to a named external function, return the name
/// otherwise lower it and return null.
const char *
-SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
+SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DebugLoc dl = getCurDebugLoc();
SDValue Res;
@@ -3713,110 +3989,195 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::vacopy: visitVACopy(I); return 0;
case Intrinsic::returnaddress:
setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::frameaddress:
setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::setjmp:
return "_setjmp"+!TLI.usesUnderscoreSetJmp();
case Intrinsic::longjmp:
return "_longjmp"+!TLI.usesUnderscoreLongJmp();
case Intrinsic::memcpy: {
- SDValue Op1 = getValue(I.getOperand(1));
- SDValue Op2 = getValue(I.getOperand(2));
- SDValue Op3 = getValue(I.getOperand(3));
- unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
- I.getOperand(1), 0, I.getOperand(2), 0));
+ // Assert for address < 256 since we support only user defined address
+ // spaces.
+ assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
+ < 256 &&
+ cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
+ < 256 &&
+ "Unknown address space");
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDValue Op3 = getValue(I.getArgOperand(2));
+ unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+ bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
+ DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol, false,
+ I.getArgOperand(0), 0, I.getArgOperand(1), 0));
return 0;
}
case Intrinsic::memset: {
- SDValue Op1 = getValue(I.getOperand(1));
- SDValue Op2 = getValue(I.getOperand(2));
- SDValue Op3 = getValue(I.getOperand(3));
- unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
- I.getOperand(1), 0));
+ // Assert for address < 256 since we support only user defined address
+ // spaces.
+ assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
+ < 256 &&
+ "Unknown address space");
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDValue Op3 = getValue(I.getArgOperand(2));
+ unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+ bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
+ DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
+ I.getArgOperand(0), 0));
return 0;
}
case Intrinsic::memmove: {
- SDValue Op1 = getValue(I.getOperand(1));
- SDValue Op2 = getValue(I.getOperand(2));
- SDValue Op3 = getValue(I.getOperand(3));
- unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
+ // Assert for address < 256 since we support only user defined address
+ // spaces.
+ assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
+ < 256 &&
+ cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
+ < 256 &&
+ "Unknown address space");
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDValue Op3 = getValue(I.getArgOperand(2));
+ unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+ bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
// If the source and destination are known to not be aliases, we can
// lower memmove as memcpy.
uint64_t Size = -1ULL;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
Size = C->getZExtValue();
- if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
+ if (AA->alias(I.getArgOperand(0), Size, I.getArgOperand(1), Size) ==
AliasAnalysis::NoAlias) {
- DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
- I.getOperand(1), 0, I.getOperand(2), 0));
+ DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
+ false, I.getArgOperand(0), 0,
+ I.getArgOperand(1), 0));
return 0;
}
- DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
- I.getOperand(1), 0, I.getOperand(2), 0));
+ DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
+ I.getArgOperand(0), 0, I.getArgOperand(1), 0));
return 0;
}
case Intrinsic::dbg_declare: {
- // FIXME: currently, we get here only if OptLevel != CodeGenOpt::None.
- // The real handling of this intrinsic is in FastISel.
- if (OptLevel != CodeGenOpt::None)
- // FIXME: Variable debug info is not supported here.
- return 0;
- DwarfWriter *DW = DAG.getDwarfWriter();
- if (!DW)
- return 0;
- DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
- if (!DIDescriptor::ValidDebugInfo(DI.getVariable(), CodeGenOpt::None))
- return 0;
-
+ const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
MDNode *Variable = DI.getVariable();
- Value *Address = DI.getAddress();
- if (!Address)
+ const Value *Address = DI.getAddress();
+ if (!Address || !DIVariable(DI.getVariable()).Verify())
return 0;
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
- Address = BCI->getOperand(0);
- AllocaInst *AI = dyn_cast<AllocaInst>(Address);
- // Don't handle byval struct arguments or VLAs, for example.
- if (!AI)
+
+ // Build an entry in DbgOrdering. Debug info input nodes get an SDNodeOrder
+ // but do not always have a corresponding SDNode built. The SDNodeOrder
+ // absolute, but not relative, values are different depending on whether
+ // debug info exists.
+ ++SDNodeOrder;
+
+ // Check if address has undef value.
+ if (isa<UndefValue>(Address) ||
+ (Address->use_empty() && !isa<Argument>(Address))) {
+ SDDbgValue*SDV =
+ DAG.getDbgValue(Variable, UndefValue::get(Address->getType()),
+ 0, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, 0, false);
return 0;
- DenseMap<const AllocaInst*, int>::iterator SI =
- FuncInfo.StaticAllocaMap.find(AI);
- if (SI == FuncInfo.StaticAllocaMap.end())
- return 0; // VLAs.
- int FI = SI->second;
+ }
- if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo())
- if (MDNode *Dbg = DI.getMetadata("dbg"))
- MMI->setVariableDbgInfo(Variable, FI, Dbg);
+ SDValue &N = NodeMap[Address];
+ if (!N.getNode() && isa<Argument>(Address))
+ // Check unused arguments map.
+ N = UnusedArgNodeMap[Address];
+ SDDbgValue *SDV;
+ if (N.getNode()) {
+ // Parameters are handled specially.
+ bool isParameter =
+ DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable;
+ if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
+ Address = BCI->getOperand(0);
+ const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
+
+ if (isParameter && !AI) {
+ FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
+ if (FINode)
+ // Byval parameter. We have a frame index at this point.
+ SDV = DAG.getDbgValue(Variable, FINode->getIndex(),
+ 0, dl, SDNodeOrder);
+ else
+ // Can't do anything with other non-AI cases yet. This might be a
+ // parameter of a callee function that got inlined, for example.
+ return 0;
+ } else if (AI)
+ SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(),
+ 0, dl, SDNodeOrder);
+ else
+ // Can't do anything with other non-AI cases yet.
+ return 0;
+ DAG.AddDbgValue(SDV, N.getNode(), isParameter);
+ } else {
+ // If Address is an arugment then try to emits its dbg value using
+ // virtual register info from the FuncInfo.ValueMap. Otherwise add undef
+ // to help track missing debug info.
+ if (!EmitFuncArgumentDbgValue(Address, Variable, 0, N)) {
+ SDV = DAG.getDbgValue(Variable, UndefValue::get(Address->getType()),
+ 0, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, 0, false);
+ }
+ }
return 0;
}
case Intrinsic::dbg_value: {
- // FIXME: currently, we get here only if OptLevel != CodeGenOpt::None.
- // The real handling of this intrinsic is in FastISel.
- if (OptLevel != CodeGenOpt::None)
- // FIXME: Variable debug info is not supported here.
- return 0;
- DwarfWriter *DW = DAG.getDwarfWriter();
- if (!DW)
- return 0;
- DbgValueInst &DI = cast<DbgValueInst>(I);
- if (!DIDescriptor::ValidDebugInfo(DI.getVariable(), CodeGenOpt::None))
+ const DbgValueInst &DI = cast<DbgValueInst>(I);
+ if (!DIVariable(DI.getVariable()).Verify())
return 0;
MDNode *Variable = DI.getVariable();
- Value *V = DI.getValue();
+ uint64_t Offset = DI.getOffset();
+ const Value *V = DI.getValue();
if (!V)
return 0;
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(V))
+
+ // Build an entry in DbgOrdering. Debug info input nodes get an SDNodeOrder
+ // but do not always have a corresponding SDNode built. The SDNodeOrder
+ // absolute, but not relative, values are different depending on whether
+ // debug info exists.
+ ++SDNodeOrder;
+ SDDbgValue *SDV;
+ if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) {
+ SDV = DAG.getDbgValue(Variable, V, Offset, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, 0, false);
+ } else {
+ // Do not use getValue() in here; we don't want to generate code at
+ // this point if it hasn't been done yet.
+ SDValue N = NodeMap[V];
+ if (!N.getNode() && isa<Argument>(V))
+ // Check unused arguments map.
+ N = UnusedArgNodeMap[V];
+ if (N.getNode()) {
+ if (!EmitFuncArgumentDbgValue(V, Variable, Offset, N)) {
+ SDV = DAG.getDbgValue(Variable, N.getNode(),
+ N.getResNo(), Offset, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, N.getNode(), false);
+ }
+ } else if (isa<PHINode>(V) && !V->use_empty() ) {
+ // Do not call getValue(V) yet, as we don't want to generate code.
+ // Remember it for later.
+ DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
+ DanglingDebugInfoMap[V] = DDI;
+ } else {
+ // We may expand this to cover more cases. One case where we have no
+ // data available is an unreferenced parameter; we need this fallback.
+ SDV = DAG.getDbgValue(Variable, UndefValue::get(V->getType()),
+ Offset, dl, SDNodeOrder);
+ DAG.AddDbgValue(SDV, 0, false);
+ }
+ }
+
+ // Build a debug info table entry.
+ if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
V = BCI->getOperand(0);
- AllocaInst *AI = dyn_cast<AllocaInst>(V);
+ const AllocaInst *AI = dyn_cast<AllocaInst>(V);
// Don't handle byval struct arguments or VLAs, for example.
if (!AI)
return 0;
@@ -3825,14 +4186,16 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
if (SI == FuncInfo.StaticAllocaMap.end())
return 0; // VLAs.
int FI = SI->second;
- if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo())
- if (MDNode *Dbg = DI.getMetadata("dbg"))
- MMI->setVariableDbgInfo(Variable, FI, Dbg);
+
+ MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
+ if (!DI.getDebugLoc().isUnknown() && MMI.hasDebugInfo())
+ MMI.setVariableDbgInfo(Variable, FI, DI.getDebugLoc());
return 0;
}
case Intrinsic::eh_exception: {
// Insert the EXCEPTIONADDR instruction.
- assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
+ assert(FuncInfo.MBB->isLandingPad() &&
+ "Call to eh.exception not in landing pad!");
SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
SDValue Ops[1];
Ops[0] = DAG.getRoot();
@@ -3843,23 +4206,23 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::eh_selector: {
- MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
-
- if (CurMBB->isLandingPad())
- AddCatchInfo(I, MMI, CurMBB);
+ MachineBasicBlock *CallMBB = FuncInfo.MBB;
+ MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
+ if (CallMBB->isLandingPad())
+ AddCatchInfo(I, &MMI, CallMBB);
else {
#ifndef NDEBUG
FuncInfo.CatchInfoLost.insert(&I);
#endif
// FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) CurMBB->addLiveIn(Reg);
+ if (Reg) FuncInfo.MBB->addLiveIn(Reg);
}
// Insert the EHSELECTION instruction.
SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
SDValue Ops[2];
- Ops[0] = getValue(I.getOperand(1));
+ Ops[0] = getValue(I.getArgOperand(0));
Ops[1] = getRoot();
SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
DAG.setRoot(Op.getValue(1));
@@ -3868,44 +4231,28 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::eh_typeid_for: {
- MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
-
- if (MMI) {
- // Find the type id for the given typeinfo.
- GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
- unsigned TypeID = MMI->getTypeIDFor(GV);
- Res = DAG.getConstant(TypeID, MVT::i32);
- } else {
- // Return something different to eh_selector.
- Res = DAG.getConstant(1, MVT::i32);
- }
-
+ // Find the type id for the given typeinfo.
+ GlobalVariable *GV = ExtractTypeInfo(I.getArgOperand(0));
+ unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
+ Res = DAG.getConstant(TypeID, MVT::i32);
setValue(&I, Res);
return 0;
}
case Intrinsic::eh_return_i32:
case Intrinsic::eh_return_i64:
- if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
- MMI->setCallsEHReturn(true);
- DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
- MVT::Other,
- getControlRoot(),
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2))));
- } else {
- setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
- }
-
+ DAG.getMachineFunction().getMMI().setCallsEHReturn(true);
+ DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
+ MVT::Other,
+ getControlRoot(),
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1))));
return 0;
case Intrinsic::eh_unwind_init:
- if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
- MMI->setCallsUnwindInit(true);
- }
+ DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
return 0;
case Intrinsic::eh_dwarf_cfa: {
- EVT VT = getValue(I.getOperand(1)).getValueType();
- SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
+ SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), dl,
TLI.getPointerTy());
SDValue Offset = DAG.getNode(ISD::ADD, dl,
TLI.getPointerTy(),
@@ -3920,12 +4267,23 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
return 0;
}
case Intrinsic::eh_sjlj_callsite: {
- MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
- ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
+ MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
+ ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
- assert(MMI->getCurrentCallSite() == 0 && "Overlapping call sites!");
+ assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
- MMI->setCurrentCallSite(CI->getZExtValue());
+ MMI.setCurrentCallSite(CI->getZExtValue());
+ return 0;
+ }
+ case Intrinsic::eh_sjlj_setjmp: {
+ setValue(&I, DAG.getNode(ISD::EH_SJLJ_SETJMP, dl, MVT::i32, getRoot(),
+ getValue(I.getArgOperand(0))));
+ return 0;
+ }
+ case Intrinsic::eh_sjlj_longjmp: {
+ DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, dl, MVT::Other,
+ getRoot(),
+ getValue(I.getArgOperand(0))));
return 0;
}
@@ -3951,34 +4309,34 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
}
EVT DestVT = TLI.getValueType(I.getType());
- Value *Op1 = I.getOperand(1);
+ const Value *Op1 = I.getArgOperand(0);
Res = DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
DAG.getValueType(DestVT),
DAG.getValueType(getValue(Op1).getValueType()),
- getValue(I.getOperand(2)),
- getValue(I.getOperand(3)),
+ getValue(I.getArgOperand(1)),
+ getValue(I.getArgOperand(2)),
Code);
setValue(&I, Res);
return 0;
}
case Intrinsic::sqrt:
setValue(&I, DAG.getNode(ISD::FSQRT, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::powi:
- setValue(&I, ExpandPowI(dl, getValue(I.getOperand(1)),
- getValue(I.getOperand(2)), DAG));
+ setValue(&I, ExpandPowI(dl, getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)), DAG));
return 0;
case Intrinsic::sin:
setValue(&I, DAG.getNode(ISD::FSIN, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::cos:
setValue(&I, DAG.getNode(ISD::FCOS, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::log:
visitLog(I);
@@ -3998,8 +4356,16 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::pow:
visitPow(I);
return 0;
+ case Intrinsic::convert_to_fp16:
+ setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, dl,
+ MVT::i16, getValue(I.getArgOperand(0))));
+ return 0;
+ case Intrinsic::convert_from_fp16:
+ setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, dl,
+ MVT::f32, getValue(I.getArgOperand(0))));
+ return 0;
case Intrinsic::pcmarker: {
- SDValue Tmp = getValue(I.getOperand(1));
+ SDValue Tmp = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
return 0;
}
@@ -4014,23 +4380,23 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::bswap:
setValue(&I, DAG.getNode(ISD::BSWAP, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::cttz: {
- SDValue Arg = getValue(I.getOperand(1));
+ SDValue Arg = getValue(I.getArgOperand(0));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(ISD::CTTZ, dl, Ty, Arg));
return 0;
}
case Intrinsic::ctlz: {
- SDValue Arg = getValue(I.getOperand(1));
+ SDValue Arg = getValue(I.getArgOperand(0));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(ISD::CTLZ, dl, Ty, Arg));
return 0;
}
case Intrinsic::ctpop: {
- SDValue Arg = getValue(I.getOperand(1));
+ SDValue Arg = getValue(I.getArgOperand(0));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(ISD::CTPOP, dl, Ty, Arg));
return 0;
@@ -4044,7 +4410,7 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
return 0;
}
case Intrinsic::stackrestore: {
- Res = getValue(I.getOperand(1));
+ Res = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Res));
return 0;
}
@@ -4054,8 +4420,8 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
MachineFrameInfo *MFI = MF.getFrameInfo();
EVT PtrTy = TLI.getPointerTy();
- SDValue Src = getValue(I.getOperand(1)); // The guard's value.
- AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
+ SDValue Src = getValue(I.getArgOperand(0)); // The guard's value.
+ AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
int FI = FuncInfo.StaticAllocaMap[Slot];
MFI->setStackProtectorIndex(FI);
@@ -4072,14 +4438,14 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::objectsize: {
// If we don't know by now, we're never going to know.
- ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
+ ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
assert(CI && "Non-constant type in __builtin_object_size?");
- SDValue Arg = getValue(I.getOperand(0));
+ SDValue Arg = getValue(I.getCalledValue());
EVT Ty = Arg.getValueType();
- if (CI->getZExtValue() == 0)
+ if (CI->isZero())
Res = DAG.getConstant(-1ULL, Ty);
else
Res = DAG.getConstant(0, Ty);
@@ -4092,14 +4458,14 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
return 0;
case Intrinsic::init_trampoline: {
- const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
+ const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
SDValue Ops[6];
Ops[0] = getRoot();
- Ops[1] = getValue(I.getOperand(1));
- Ops[2] = getValue(I.getOperand(2));
- Ops[3] = getValue(I.getOperand(3));
- Ops[4] = DAG.getSrcValue(I.getOperand(1));
+ Ops[1] = getValue(I.getArgOperand(0));
+ Ops[2] = getValue(I.getArgOperand(1));
+ Ops[3] = getValue(I.getArgOperand(2));
+ Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
Ops[5] = DAG.getSrcValue(F);
Res = DAG.getNode(ISD::TRAMPOLINE, dl,
@@ -4112,8 +4478,8 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::gcroot:
if (GFI) {
- Value *Alloca = I.getOperand(1);
- Constant *TypeMap = cast<Constant>(I.getOperand(2));
+ const Value *Alloca = I.getArgOperand(0);
+ const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
GFI->addStackRoot(FI->getIndex(), TypeMap);
@@ -4145,9 +4511,9 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::prefetch: {
SDValue Ops[4];
Ops[0] = getRoot();
- Ops[1] = getValue(I.getOperand(1));
- Ops[2] = getValue(I.getOperand(2));
- Ops[3] = getValue(I.getOperand(3));
+ Ops[1] = getValue(I.getArgOperand(0));
+ Ops[2] = getValue(I.getArgOperand(1));
+ Ops[3] = getValue(I.getArgOperand(2));
DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
return 0;
}
@@ -4156,7 +4522,7 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
SDValue Ops[6];
Ops[0] = getRoot();
for (int x = 1; x < 6; ++x)
- Ops[x] = getValue(I.getOperand(x));
+ Ops[x] = getValue(I.getArgOperand(x - 1));
DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
return 0;
@@ -4165,12 +4531,12 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
SDValue Root = getRoot();
SDValue L =
DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
- getValue(I.getOperand(2)).getValueType().getSimpleVT(),
+ getValue(I.getArgOperand(1)).getValueType().getSimpleVT(),
Root,
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2)),
- getValue(I.getOperand(3)),
- I.getOperand(1));
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)),
+ getValue(I.getArgOperand(2)),
+ I.getArgOperand(0));
setValue(&I, L);
DAG.setRoot(L.getValue(1));
return 0;
@@ -4210,111 +4576,27 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
}
-/// Test if the given instruction is in a position to be optimized
-/// with a tail-call. This roughly means that it's in a block with
-/// a return and there's nothing that needs to be scheduled
-/// between it and the return.
-///
-/// This function only tests target-independent requirements.
-static bool
-isInTailCallPosition(CallSite CS, Attributes CalleeRetAttr,
- const TargetLowering &TLI) {
- const Instruction *I = CS.getInstruction();
- const BasicBlock *ExitBB = I->getParent();
- const TerminatorInst *Term = ExitBB->getTerminator();
- const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
- const Function *F = ExitBB->getParent();
-
- // The block must end in a return statement or unreachable.
- //
- // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
- // an unreachable, for now. The way tailcall optimization is currently
- // implemented means it will add an epilogue followed by a jump. That is
- // not profitable. Also, if the callee is a special function (e.g.
- // longjmp on x86), it can end up causing miscompilation that has not
- // been fully understood.
- if (!Ret &&
- (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false;
-
- // If I will have a chain, make sure no other instruction that will have a
- // chain interposes between I and the return.
- if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
- !I->isSafeToSpeculativelyExecute())
- for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
- --BBI) {
- if (&*BBI == I)
- break;
- if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
- !BBI->isSafeToSpeculativelyExecute())
- return false;
- }
-
- // If the block ends with a void return or unreachable, it doesn't matter
- // what the call's return type is.
- if (!Ret || Ret->getNumOperands() == 0) return true;
-
- // If the return value is undef, it doesn't matter what the call's
- // return type is.
- if (isa<UndefValue>(Ret->getOperand(0))) return true;
-
- // Conservatively require the attributes of the call to match those of
- // the return. Ignore noalias because it doesn't affect the call sequence.
- unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
- if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
- return false;
-
- // It's not safe to eliminate the sign / zero extension of the return value.
- if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
- return false;
-
- // Otherwise, make sure the unmodified return value of I is the return value.
- for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
- U = dyn_cast<Instruction>(U->getOperand(0))) {
- if (!U)
- return false;
- if (!U->hasOneUse())
- return false;
- if (U == I)
- break;
- // Check for a truly no-op truncate.
- if (isa<TruncInst>(U) &&
- TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
- continue;
- // Check for a truly no-op bitcast.
- if (isa<BitCastInst>(U) &&
- (U->getOperand(0)->getType() == U->getType() ||
- (U->getOperand(0)->getType()->isPointerTy() &&
- U->getType()->isPointerTy())))
- continue;
- // Otherwise it's not a true no-op.
- return false;
- }
-
- return true;
-}
-
-void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
+void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
bool isTailCall,
MachineBasicBlock *LandingPad) {
const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
const Type *RetTy = FTy->getReturnType();
- MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
- unsigned BeginLabel = 0, EndLabel = 0;
+ MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
+ MCSymbol *BeginLabel = 0;
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Args.reserve(CS.arg_size());
// Check whether the function can return without sret-demotion.
- SmallVector<EVT, 4> OutVTs;
- SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
+ SmallVector<ISD::OutputArg, 4> Outs;
SmallVector<uint64_t, 4> Offsets;
- getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
- OutVTs, OutsFlags, TLI, &Offsets);
+ GetReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
+ Outs, TLI, &Offsets);
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
- FTy->isVarArg(), OutVTs, OutsFlags, DAG);
+ FTy->isVarArg(), Outs, FTy->getContext());
SDValue DemoteStackSlot;
@@ -4341,7 +4623,7 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
RetTy = Type::getVoidTy(FTy->getContext());
}
- for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
+ for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
SDValue ArgNode = getValue(*i);
Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
@@ -4357,25 +4639,24 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
Args.push_back(Entry);
}
- if (LandingPad && MMI) {
+ if (LandingPad) {
// Insert a label before the invoke call to mark the try range. This can be
// used to detect deletion of the invoke via the MachineModuleInfo.
- BeginLabel = MMI->NextLabelID();
+ BeginLabel = MMI.getContext().CreateTempSymbol();
// For SjLj, keep track of which landing pads go with which invokes
// so as to maintain the ordering of pads in the LSDA.
- unsigned CallSiteIndex = MMI->getCurrentCallSite();
+ unsigned CallSiteIndex = MMI.getCurrentCallSite();
if (CallSiteIndex) {
- MMI->setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
+ MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
// Now that the call site is handled, stop tracking it.
- MMI->setCurrentCallSite(0);
+ MMI.setCurrentCallSite(0);
}
// Both PendingLoads and PendingExports must be flushed here;
// this call might not return.
(void)getRoot();
- DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
- getControlRoot(), BeginLabel));
+ DAG.setRoot(DAG.getEHLabel(getCurDebugLoc(), getControlRoot(), BeginLabel));
}
// Check if target-independent constraints permit a tail call here.
@@ -4384,6 +4665,11 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
!isInTailCallPosition(CS, CS.getAttributes().getRetAttributes(), TLI))
isTailCall = false;
+ // If there's a possibility that fast-isel has already selected some amount
+ // of the current basic block, don't emit a tail call.
+ if (isTailCall && EnableFastISel)
+ isTailCall = false;
+
std::pair<SDValue,SDValue> Result =
TLI.LowerCallTo(getRoot(), RetTy,
CS.paramHasAttr(0, Attribute::SExt),
@@ -4408,7 +4694,7 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
ComputeValueVTs(TLI, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register");
EVT PtrVT = PVTs[0];
- unsigned NumValues = OutVTs.size();
+ unsigned NumValues = Outs.size();
SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(NumValues);
@@ -4416,7 +4702,7 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
DemoteStackSlot,
DAG.getConstant(Offsets[i], PtrVT));
- SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second,
+ SDValue L = DAG.getLoad(Outs[i].VT, getCurDebugLoc(), Result.second,
Add, NULL, Offsets[i], false, false, 1);
Values[i] = L;
Chains[i] = L.getValue(1);
@@ -4460,26 +4746,25 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
else
HasTailCall = true;
- if (LandingPad && MMI) {
+ if (LandingPad) {
// Insert a label at the end of the invoke call to mark the try range. This
// can be used to detect deletion of the invoke via the MachineModuleInfo.
- EndLabel = MMI->NextLabelID();
- DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
- getRoot(), EndLabel));
+ MCSymbol *EndLabel = MMI.getContext().CreateTempSymbol();
+ DAG.setRoot(DAG.getEHLabel(getCurDebugLoc(), getRoot(), EndLabel));
// Inform MachineModuleInfo of range.
- MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
+ MMI.addInvoke(LandingPad, BeginLabel, EndLabel);
}
}
/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
/// value is equal or not-equal to zero.
-static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
+ for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
UI != E; ++UI) {
- if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ if (const ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
if (IC->isEquality())
- if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
+ if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
if (C->isNullValue())
continue;
// Unknown instruction.
@@ -4488,17 +4773,20 @@ static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
return true;
}
-static SDValue getMemCmpLoad(Value *PtrVal, MVT LoadVT, const Type *LoadTy,
+static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
+ const Type *LoadTy,
SelectionDAGBuilder &Builder) {
// Check to see if this load can be trivially constant folded, e.g. if the
// input is from a string literal.
- if (Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
+ if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
// Cast pointer to the type we really want to load.
- LoadInput = ConstantExpr::getBitCast(LoadInput,
+ LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
PointerType::getUnqual(LoadTy));
- if (Constant *LoadCst = ConstantFoldLoadFromConstPtr(LoadInput, Builder.TD))
+ if (const Constant *LoadCst =
+ ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
+ Builder.TD))
return Builder.getValue(LoadCst);
}
@@ -4531,18 +4819,18 @@ static SDValue getMemCmpLoad(Value *PtrVal, MVT LoadVT, const Type *LoadTy,
/// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
/// If so, return true and lower it, otherwise return false and it will be
/// lowered like a normal call.
-bool SelectionDAGBuilder::visitMemCmpCall(CallInst &I) {
+bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
// Verify that the prototype makes sense. int memcmp(void*,void*,size_t)
- if (I.getNumOperands() != 4)
+ if (I.getNumArgOperands() != 3)
return false;
- Value *LHS = I.getOperand(1), *RHS = I.getOperand(2);
+ const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
- !I.getOperand(3)->getType()->isIntegerTy() ||
+ !I.getArgOperand(2)->getType()->isIntegerTy() ||
!I.getType()->isIntegerTy())
return false;
- ConstantInt *Size = dyn_cast<ConstantInt>(I.getOperand(3));
+ const ConstantInt *Size = dyn_cast<ConstantInt>(I.getArgOperand(2));
// memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
// memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
@@ -4608,12 +4896,17 @@ bool SelectionDAGBuilder::visitMemCmpCall(CallInst &I) {
}
-void SelectionDAGBuilder::visitCall(CallInst &I) {
+void SelectionDAGBuilder::visitCall(const CallInst &I) {
+ // Handle inline assembly differently.
+ if (isa<InlineAsm>(I.getCalledValue())) {
+ visitInlineAsm(&I);
+ return;
+ }
+
const char *RenameFn = 0;
if (Function *F = I.getCalledFunction()) {
if (F->isDeclaration()) {
- const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
- if (II) {
+ if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
if (unsigned IID = II->getIntrinsicID(F)) {
RenameFn = visitIntrinsicCall(I, IID);
if (!RenameFn)
@@ -4631,52 +4924,52 @@ void SelectionDAGBuilder::visitCall(CallInst &I) {
// can't be a library call.
if (!F->hasLocalLinkage() && F->hasName()) {
StringRef Name = F->getName();
- if (Name == "copysign" || Name == "copysignf") {
- if (I.getNumOperands() == 3 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType() &&
- I.getType() == I.getOperand(2)->getType()) {
- SDValue LHS = getValue(I.getOperand(1));
- SDValue RHS = getValue(I.getOperand(2));
+ if (Name == "copysign" || Name == "copysignf" || Name == "copysignl") {
+ if (I.getNumArgOperands() == 2 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
+ I.getType() == I.getArgOperand(1)->getType()) {
+ SDValue LHS = getValue(I.getArgOperand(0));
+ SDValue RHS = getValue(I.getArgOperand(1));
setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
LHS.getValueType(), LHS, RHS));
return;
}
} else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
- if (I.getNumOperands() == 2 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType()) {
- SDValue Tmp = getValue(I.getOperand(1));
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType()) {
+ SDValue Tmp = getValue(I.getArgOperand(0));
setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
} else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
- if (I.getNumOperands() == 2 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType() &&
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getOperand(1));
+ SDValue Tmp = getValue(I.getArgOperand(0));
setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
} else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
- if (I.getNumOperands() == 2 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType() &&
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getOperand(1));
+ SDValue Tmp = getValue(I.getArgOperand(0));
setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
} else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
- if (I.getNumOperands() == 2 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType() &&
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getOperand(1));
+ SDValue Tmp = getValue(I.getArgOperand(0));
setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
@@ -4686,14 +4979,11 @@ void SelectionDAGBuilder::visitCall(CallInst &I) {
return;
}
}
- } else if (isa<InlineAsm>(I.getOperand(0))) {
- visitInlineAsm(&I);
- return;
}
-
+
SDValue Callee;
if (!RenameFn)
- Callee = getValue(I.getOperand(0));
+ Callee = getValue(I.getCalledValue());
else
Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
@@ -4702,214 +4992,11 @@ void SelectionDAGBuilder::visitCall(CallInst &I) {
LowerCallTo(&I, Callee, I.isTailCall());
}
-/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
-/// this value and returns the result as a ValueVT value. This uses
-/// Chain/Flag as the input and updates them for the output Chain/Flag.
-/// If the Flag pointer is NULL, no flag is used.
-SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const {
- // Assemble the legal parts into the final values.
- SmallVector<SDValue, 4> Values(ValueVTs.size());
- SmallVector<SDValue, 8> Parts;
- for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
- // Copy the legal parts from the registers.
- EVT ValueVT = ValueVTs[Value];
- unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
- EVT RegisterVT = RegVTs[Value];
-
- Parts.resize(NumRegs);
- for (unsigned i = 0; i != NumRegs; ++i) {
- SDValue P;
- if (Flag == 0) {
- P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
- } else {
- P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
- *Flag = P.getValue(2);
- }
-
- Chain = P.getValue(1);
-
- // If the source register was virtual and if we know something about it,
- // add an assert node.
- if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
- RegisterVT.isInteger() && !RegisterVT.isVector()) {
- unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
- FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
- if (FLI.LiveOutRegInfo.size() > SlotNo) {
- FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
-
- unsigned RegSize = RegisterVT.getSizeInBits();
- unsigned NumSignBits = LOI.NumSignBits;
- unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
-
- // FIXME: We capture more information than the dag can represent. For
- // now, just use the tightest assertzext/assertsext possible.
- bool isSExt = true;
- EVT FromVT(MVT::Other);
- if (NumSignBits == RegSize)
- isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
- else if (NumZeroBits >= RegSize-1)
- isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
- else if (NumSignBits > RegSize-8)
- isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
- else if (NumZeroBits >= RegSize-8)
- isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
- else if (NumSignBits > RegSize-16)
- isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
- else if (NumZeroBits >= RegSize-16)
- isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
- else if (NumSignBits > RegSize-32)
- isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
- else if (NumZeroBits >= RegSize-32)
- isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
-
- if (FromVT != MVT::Other)
- P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
- RegisterVT, P, DAG.getValueType(FromVT));
- }
- }
-
- Parts[i] = P;
- }
-
- Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
- NumRegs, RegisterVT, ValueVT);
- Part += NumRegs;
- Parts.clear();
- }
-
- return DAG.getNode(ISD::MERGE_VALUES, dl,
- DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
- &Values[0], ValueVTs.size());
-}
-
-/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
-/// specified value into the registers specified by this object. This uses
-/// Chain/Flag as the input and updates them for the output Chain/Flag.
-/// If the Flag pointer is NULL, no flag is used.
-void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const {
- // Get the list of the values's legal parts.
- unsigned NumRegs = Regs.size();
- SmallVector<SDValue, 8> Parts(NumRegs);
- for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
- EVT ValueVT = ValueVTs[Value];
- unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
- EVT RegisterVT = RegVTs[Value];
-
- getCopyToParts(DAG, dl,
- Val.getValue(Val.getResNo() + Value),
- &Parts[Part], NumParts, RegisterVT);
- Part += NumParts;
- }
-
- // Copy the parts into the registers.
- SmallVector<SDValue, 8> Chains(NumRegs);
- for (unsigned i = 0; i != NumRegs; ++i) {
- SDValue Part;
- if (Flag == 0) {
- Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
- } else {
- Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
- *Flag = Part.getValue(1);
- }
-
- Chains[i] = Part.getValue(0);
- }
-
- if (NumRegs == 1 || Flag)
- // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
- // flagged to it. That is the CopyToReg nodes and the user are considered
- // a single scheduling unit. If we create a TokenFactor and return it as
- // chain, then the TokenFactor is both a predecessor (operand) of the
- // user as well as a successor (the TF operands are flagged to the user).
- // c1, f1 = CopyToReg
- // c2, f2 = CopyToReg
- // c3 = TokenFactor c1, c2
- // ...
- // = op c3, ..., f2
- Chain = Chains[NumRegs-1];
- else
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
-}
-
-/// AddInlineAsmOperands - Add this value to the specified inlineasm node
-/// operand list. This adds the code marker and includes the number of
-/// values added into it.
-void RegsForValue::AddInlineAsmOperands(unsigned Code,
- bool HasMatching,unsigned MatchingIdx,
- SelectionDAG &DAG,
- std::vector<SDValue> &Ops) const {
- assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
- unsigned Flag = Code | (Regs.size() << 3);
- if (HasMatching)
- Flag |= 0x80000000 | (MatchingIdx << 16);
- SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
- Ops.push_back(Res);
-
- for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
- unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
- EVT RegisterVT = RegVTs[Value];
- for (unsigned i = 0; i != NumRegs; ++i) {
- assert(Reg < Regs.size() && "Mismatch in # registers expected");
- Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
- }
- }
-}
-
-/// isAllocatableRegister - If the specified register is safe to allocate,
-/// i.e. it isn't a stack pointer or some other special register, return the
-/// register class for the register. Otherwise, return null.
-static const TargetRegisterClass *
-isAllocatableRegister(unsigned Reg, MachineFunction &MF,
- const TargetLowering &TLI,
- const TargetRegisterInfo *TRI) {
- EVT FoundVT = MVT::Other;
- const TargetRegisterClass *FoundRC = 0;
- for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
- E = TRI->regclass_end(); RCI != E; ++RCI) {
- EVT ThisVT = MVT::Other;
-
- const TargetRegisterClass *RC = *RCI;
- // If none of the value types for this register class are valid, we
- // can't use it. For example, 64-bit reg classes on 32-bit targets.
- for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
- I != E; ++I) {
- if (TLI.isTypeLegal(*I)) {
- // If we have already found this register in a different register class,
- // choose the one with the largest VT specified. For example, on
- // PowerPC, we favor f64 register classes over f32.
- if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
- ThisVT = *I;
- break;
- }
- }
- }
-
- if (ThisVT == MVT::Other) continue;
-
- // NOTE: This isn't ideal. In particular, this might allocate the
- // frame pointer in functions that need it (due to them not being taken
- // out of allocation, because a variable sized allocation hasn't been seen
- // yet). This is a slight code pessimization, but should still work.
- for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
- E = RC->allocation_order_end(MF); I != E; ++I)
- if (*I == Reg) {
- // We found a matching register class. Keep looking at others in case
- // we find one with larger registers that this physreg is also in.
- FoundRC = RC;
- FoundVT = ThisVT;
- break;
- }
- }
- return FoundRC;
-}
-
-
namespace llvm {
+
/// AsmOperandInfo - This contains information for each constraint that we are
/// lowering.
-class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
+class LLVM_LIBRARY_VISIBILITY SDISelAsmOperandInfo :
public TargetLowering::AsmOperandInfo {
public:
/// CallOperand - If this is the result output operand or a clobber
@@ -4959,7 +5046,7 @@ public:
if (isIndirect) {
const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
if (!PtrTy)
- llvm_report_error("Indirect operand for inline asm not a pointer!");
+ report_fatal_error("Indirect operand for inline asm not a pointer!");
OpTy = PtrTy->getElementType();
}
@@ -4995,8 +5082,56 @@ private:
Regs.insert(*Aliases);
}
};
+
} // end llvm namespace.
+/// isAllocatableRegister - If the specified register is safe to allocate,
+/// i.e. it isn't a stack pointer or some other special register, return the
+/// register class for the register. Otherwise, return null.
+static const TargetRegisterClass *
+isAllocatableRegister(unsigned Reg, MachineFunction &MF,
+ const TargetLowering &TLI,
+ const TargetRegisterInfo *TRI) {
+ EVT FoundVT = MVT::Other;
+ const TargetRegisterClass *FoundRC = 0;
+ for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
+ E = TRI->regclass_end(); RCI != E; ++RCI) {
+ EVT ThisVT = MVT::Other;
+
+ const TargetRegisterClass *RC = *RCI;
+ // If none of the value types for this register class are valid, we
+ // can't use it. For example, 64-bit reg classes on 32-bit targets.
+ for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
+ I != E; ++I) {
+ if (TLI.isTypeLegal(*I)) {
+ // If we have already found this register in a different register class,
+ // choose the one with the largest VT specified. For example, on
+ // PowerPC, we favor f64 register classes over f32.
+ if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
+ ThisVT = *I;
+ break;
+ }
+ }
+ }
+
+ if (ThisVT == MVT::Other) continue;
+
+ // NOTE: This isn't ideal. In particular, this might allocate the
+ // frame pointer in functions that need it (due to them not being taken
+ // out of allocation, because a variable sized allocation hasn't been seen
+ // yet). This is a slight code pessimization, but should still work.
+ for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
+ E = RC->allocation_order_end(MF); I != E; ++I)
+ if (*I == Reg) {
+ // We found a matching register class. Keep looking at others in case
+ // we find one with larger registers that this physreg is also in.
+ FoundRC = RC;
+ FoundVT = ThisVT;
+ break;
+ }
+ }
+ return FoundRC;
+}
/// GetRegistersForValue - Assign registers (virtual or physical) for the
/// specified operand. We prefer to assign virtual registers, to allow the
@@ -5108,7 +5243,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
}
}
- OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
+ OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
return;
@@ -5126,7 +5261,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
for (; NumRegs; --NumRegs)
Regs.push_back(RegInfo.createVirtualRegister(RC));
- OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
+ OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
return;
}
@@ -5169,7 +5304,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
for (unsigned i = RegStart; i != RegEnd; ++i)
Regs.push_back(RegClassRegs[i]);
- OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
+ OpInfo.AssignedRegs = RegsForValue(Regs, *RC->vt_begin(),
OpInfo.ConstraintVT);
OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
return;
@@ -5179,31 +5314,10 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
// Otherwise, we couldn't allocate enough registers for this.
}
-/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
-/// processed uses a memory 'm' constraint.
-static bool
-hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
- const TargetLowering &TLI) {
- for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
- InlineAsm::ConstraintInfo &CI = CInfos[i];
- for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
- TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
- if (CType == TargetLowering::C_Memory)
- return true;
- }
-
- // Indirect operand accesses access memory.
- if (CI.isIndirect)
- return true;
- }
-
- return false;
-}
-
/// visitInlineAsm - Handle a call to an InlineAsm object.
///
-void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
- InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
+void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
+ const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
/// ConstraintOperands - Information about all of the constraints.
std::vector<SDISelAsmOperandInfo> ConstraintOperands;
@@ -5239,7 +5353,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
case InlineAsm::isOutput:
// Indirect outputs just consume an argument.
if (OpInfo.isIndirect) {
- OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
+ OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
break;
}
@@ -5256,7 +5370,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
++ResNo;
break;
case InlineAsm::isInput:
- OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
+ OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
break;
case InlineAsm::isClobber:
// Nothing to do.
@@ -5269,7 +5383,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// Strip bitcasts, if any. This mostly comes up for functions.
OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
- if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
+ if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
} else {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
@@ -5292,21 +5406,22 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// error.
if (OpInfo.hasMatchingInput()) {
SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
+
if (OpInfo.ConstraintVT != Input.ConstraintVT) {
if ((OpInfo.ConstraintVT.isInteger() !=
Input.ConstraintVT.isInteger()) ||
(OpInfo.ConstraintVT.getSizeInBits() !=
Input.ConstraintVT.getSizeInBits())) {
- llvm_report_error("Unsupported asm: input constraint"
- " with a matching output constraint of incompatible"
- " type!");
+ report_fatal_error("Unsupported asm: input constraint"
+ " with a matching output constraint of"
+ " incompatible type!");
}
Input.ConstraintVT = OpInfo.ConstraintVT;
}
}
// Compute the constraint code and ConstraintType to use.
- TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
+ TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
// If this is a memory input, and if the operand is not indirect, do what we
// need to to provide an address for the memory input.
@@ -5321,7 +5436,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// If the operand is a float, integer, or vector constant, spill to a
// constant pool entry to get its address.
- Value *OpVal = OpInfo.CallOperandVal;
+ const Value *OpVal = OpInfo.CallOperandVal;
if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
isa<ConstantVector>(OpVal)) {
OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
@@ -5374,6 +5489,15 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
TLI.getPointerTy()));
+ // If we have a !srcloc metadata node associated with it, we want to attach
+ // this to the ultimately generated inline asm machineinstr. To do this, we
+ // pass in the third operand as this (potentially null) inline asm MDNode.
+ const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
+ AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
+
+ // Remember the AlignStack bit as operand 3.
+ AsmNodeOperands.push_back(DAG.getTargetConstant(IA->isAlignStack() ? 1 : 0,
+ MVT::i1));
// Loop over all of the inputs, copying the operand values into the
// appropriate registers and processing the output regs.
@@ -5393,8 +5517,8 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
assert(OpInfo.isIndirect && "Memory output must be indirect operand");
// Add information to the INLINEASM node to know about this output.
- unsigned ResOpType = 4/*MEM*/ | (1<<3);
- AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
+ unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
+ AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags,
TLI.getPointerTy()));
AsmNodeOperands.push_back(OpInfo.CallOperand);
break;
@@ -5404,10 +5528,9 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// Copy the output from the appropriate register. Find a register that
// we can use.
- if (OpInfo.AssignedRegs.Regs.empty()) {
- llvm_report_error("Couldn't allocate output reg for"
- " constraint '" + OpInfo.ConstraintCode + "'!");
- }
+ if (OpInfo.AssignedRegs.Regs.empty())
+ report_fatal_error("Couldn't allocate output reg for constraint '" +
+ Twine(OpInfo.ConstraintCode) + "'!");
// If this is an indirect operand, store through the pointer after the
// asm.
@@ -5424,8 +5547,8 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// Add information to the INLINEASM node to know that this register is
// set.
OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
- 6 /* EARLYCLOBBER REGDEF */ :
- 2 /* REGDEF */ ,
+ InlineAsm::Kind_RegDefEarlyClobber :
+ InlineAsm::Kind_RegDef,
false,
0,
DAG,
@@ -5442,29 +5565,31 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// Scan until we find the definition we already emitted of this operand.
// When we find it, create a RegsForValue operand.
- unsigned CurOp = 2; // The first operand.
+ unsigned CurOp = InlineAsm::Op_FirstOperand;
for (; OperandNo; --OperandNo) {
// Advance to the next operand.
unsigned OpFlag =
cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
- assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
- (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
- (OpFlag & 7) == 4 /*MEM*/) &&
- "Skipped past definitions?");
+ assert((InlineAsm::isRegDefKind(OpFlag) ||
+ InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
+ InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?");
CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
}
unsigned OpFlag =
cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
- if ((OpFlag & 7) == 2 /*REGDEF*/
- || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
+ if (InlineAsm::isRegDefKind(OpFlag) ||
+ InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
// Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
if (OpInfo.isIndirect) {
- llvm_report_error("Don't know how to handle tied indirect "
- "register inputs yet!");
+ // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
+ LLVMContext &Ctx = *DAG.getContext();
+ Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:"
+ " don't know how to handle tied "
+ "indirect register inputs");
}
+
RegsForValue MatchedRegs;
- MatchedRegs.TLI = &TLI;
MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
MatchedRegs.RegVTs.push_back(RegVT);
@@ -5477,49 +5602,54 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// Use the produced MatchedRegs object to
MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
Chain, &Flag);
- MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
+ MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
true, OpInfo.getMatchedOperand(),
DAG, AsmNodeOperands);
break;
- } else {
- assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
- assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
- "Unexpected number of operands");
- // Add information to the INLINEASM node to know about this input.
- // See InlineAsm.h isUseOperandTiedToDef.
- OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
- AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
- TLI.getPointerTy()));
- AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
- break;
}
+
+ assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
+ assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
+ "Unexpected number of operands");
+ // Add information to the INLINEASM node to know about this input.
+ // See InlineAsm.h isUseOperandTiedToDef.
+ OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
+ OpInfo.getMatchedOperand());
+ AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
+ TLI.getPointerTy()));
+ AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
+ break;
}
- if (OpInfo.ConstraintType == TargetLowering::C_Other) {
- assert(!OpInfo.isIndirect &&
- "Don't know how to handle indirect other inputs yet!");
+ // Treat indirect 'X' constraint as memory.
+ if (OpInfo.ConstraintType == TargetLowering::C_Other &&
+ OpInfo.isIndirect)
+ OpInfo.ConstraintType = TargetLowering::C_Memory;
+ if (OpInfo.ConstraintType == TargetLowering::C_Other) {
std::vector<SDValue> Ops;
TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
- hasMemory, Ops, DAG);
- if (Ops.empty()) {
- llvm_report_error("Invalid operand for inline asm"
- " constraint '" + OpInfo.ConstraintCode + "'!");
- }
+ Ops, DAG);
+ if (Ops.empty())
+ report_fatal_error("Invalid operand for inline asm constraint '" +
+ Twine(OpInfo.ConstraintCode) + "'!");
// Add information to the INLINEASM node to know about this input.
- unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
+ unsigned ResOpType =
+ InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
TLI.getPointerTy()));
AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
break;
- } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
+ }
+
+ if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
"Memory operands expect pointer values");
// Add information to the INLINEASM node to know about this input.
- unsigned ResOpType = 4/*MEM*/ | (1<<3);
+ unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
TLI.getPointerTy()));
AsmNodeOperands.push_back(InOperandVal);
@@ -5534,15 +5664,14 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// Copy the input into the appropriate registers.
if (OpInfo.AssignedRegs.Regs.empty() ||
- !OpInfo.AssignedRegs.areValueTypesLegal()) {
- llvm_report_error("Couldn't allocate input reg for"
- " constraint '"+ OpInfo.ConstraintCode +"'!");
- }
+ !OpInfo.AssignedRegs.areValueTypesLegal(TLI))
+ report_fatal_error("Couldn't allocate input reg for constraint '" +
+ Twine(OpInfo.ConstraintCode) + "'!");
OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
Chain, &Flag);
- OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
+ OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
DAG, AsmNodeOperands);
break;
}
@@ -5550,7 +5679,8 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// Add the clobbered value to the operand list, so that the register
// allocator is aware that the physreg got clobbered.
if (!OpInfo.AssignedRegs.Regs.empty())
- OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
+ OpInfo.AssignedRegs.AddInlineAsmOperands(
+ InlineAsm::Kind_RegDefEarlyClobber,
false, 0, DAG,
AsmNodeOperands);
break;
@@ -5558,8 +5688,8 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
}
}
- // Finish up input operands.
- AsmNodeOperands[0] = Chain;
+ // Finish up input operands. Set the input chain and add the flag last.
+ AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
@@ -5570,7 +5700,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// If this asm returns a register value, copy the result from that register
// and set it as the value of the call.
if (!RetValRegs.Regs.empty()) {
- SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
+ SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
Chain, &Flag);
// FIXME: Why don't we do this for inline asms with MRVs?
@@ -5603,17 +5733,16 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
return;
}
- std::vector<std::pair<SDValue, Value*> > StoresToEmit;
+ std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
// Process indirect outputs, first output all of the flagged copies out of
// physregs.
for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
- Value *Ptr = IndirectStoresToEmit[i].second;
- SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
+ const Value *Ptr = IndirectStoresToEmit[i].second;
+ SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
Chain, &Flag);
StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
-
}
// Emit the non-flagged stores from the physregs.
@@ -5634,35 +5763,37 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
DAG.setRoot(Chain);
}
-void SelectionDAGBuilder::visitVAStart(CallInst &I) {
+void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
MVT::Other, getRoot(),
- getValue(I.getOperand(1)),
- DAG.getSrcValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)),
+ DAG.getSrcValue(I.getArgOperand(0))));
}
-void SelectionDAGBuilder::visitVAArg(VAArgInst &I) {
+void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
+ const TargetData &TD = *TLI.getTargetData();
SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
getRoot(), getValue(I.getOperand(0)),
- DAG.getSrcValue(I.getOperand(0)));
+ DAG.getSrcValue(I.getOperand(0)),
+ TD.getABITypeAlignment(I.getType()));
setValue(&I, V);
DAG.setRoot(V.getValue(1));
}
-void SelectionDAGBuilder::visitVAEnd(CallInst &I) {
+void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
MVT::Other, getRoot(),
- getValue(I.getOperand(1)),
- DAG.getSrcValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)),
+ DAG.getSrcValue(I.getArgOperand(0))));
}
-void SelectionDAGBuilder::visitVACopy(CallInst &I) {
+void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
MVT::Other, getRoot(),
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2)),
- DAG.getSrcValue(I.getOperand(1)),
- DAG.getSrcValue(I.getOperand(2))));
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)),
+ DAG.getSrcValue(I.getArgOperand(0)),
+ DAG.getSrcValue(I.getArgOperand(1))));
}
/// TargetLowering::LowerCallTo - This is the default LowerCallTo
@@ -5676,9 +5807,11 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
CallingConv::ID CallConv, bool isTailCall,
bool isReturnValueUsed,
SDValue Callee,
- ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
+ ArgListTy &Args, SelectionDAG &DAG,
+ DebugLoc dl) const {
// Handle all of the outgoing arguments.
SmallVector<ISD::OutputArg, 32> Outs;
+ SmallVector<SDValue, 32> OutVals;
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
@@ -5732,13 +5865,15 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
- ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
+ ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(),
+ i < NumFixedArgs);
if (NumParts > 1 && j == 0)
MyFlags.Flags.setSplit();
else if (j != 0)
MyFlags.Flags.setOrigAlign(1);
Outs.push_back(MyFlags);
+ OutVals.push_back(Parts[j]);
}
}
}
@@ -5767,7 +5902,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
SmallVector<SDValue, 4> InVals;
Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
- Outs, Ins, dl, DAG, InVals);
+ Outs, OutVals, Ins, dl, DAG, InVals);
// Verify that the target's LowerCall behaved as expected.
assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
@@ -5776,12 +5911,6 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
"LowerCall emitted a return value for a tail call!");
assert((isTailCall || InVals.size() == Ins.size()) &&
"LowerCall didn't emit the correct number of values!");
- DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
- assert(InVals[i].getNode() &&
- "LowerCall emitted a null value!");
- assert(Ins[i].VT == InVals[i].getValueType() &&
- "LowerCall emitted a value with the wrong type!");
- });
// For a tail call, the return value is merely live-out and there aren't
// any nodes in the DAG representing it. Return a special value to
@@ -5792,6 +5921,13 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
return std::make_pair(SDValue(), SDValue());
}
+ DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
+ assert(InVals[i].getNode() &&
+ "LowerCall emitted a null value!");
+ assert(Ins[i].VT == InVals[i].getValueType() &&
+ "LowerCall emitted a value with the wrong type!");
+ });
+
// Collect the legal value parts into potentially illegal values
// that correspond to the original function's return values.
ISD::NodeType AssertOp = ISD::DELETED_NODE;
@@ -5826,19 +5962,20 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
void TargetLowering::LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
SDValue Res = LowerOperation(SDValue(N, 0), DAG);
if (Res.getNode())
Results.push_back(Res);
}
-SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
+SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
llvm_unreachable("LowerOperation not implemented for this target!");
return SDValue();
}
-void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
- SDValue Op = getValue(V);
+void
+SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
+ SDValue Op = getNonRegisterValue(V);
assert((Op.getOpcode() != ISD::CopyFromReg ||
cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
"Copy from a reg to the same reg!");
@@ -5852,25 +5989,20 @@ void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
#include "llvm/CodeGen/SelectionDAGISel.h"
-void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
+void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
// If this is the entry block, emit arguments.
- Function &F = *LLVMBB->getParent();
+ const Function &F = *LLVMBB->getParent();
SelectionDAG &DAG = SDB->DAG;
- SDValue OldRoot = DAG.getRoot();
DebugLoc dl = SDB->getCurDebugLoc();
const TargetData *TD = TLI.getTargetData();
SmallVector<ISD::InputArg, 16> Ins;
// Check whether the function can return without sret-demotion.
- SmallVector<EVT, 4> OutVTs;
- SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
- getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
- OutVTs, OutsFlags, TLI);
- FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
-
- FLI.CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), F.isVarArg(),
- OutVTs, OutsFlags, DAG);
- if (!FLI.CanLowerReturn) {
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
+ Outs, TLI);
+
+ if (!FuncInfo->CanLowerReturn) {
// Put in an sret pointer parameter before all the other parameters.
SmallVector<EVT, 1> ValueVTs;
ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
@@ -5879,14 +6011,14 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
// or one register.
ISD::ArgFlagsTy Flags;
Flags.setSRet();
- EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), ValueVTs[0]);
+ EVT RegisterVT = TLI.getRegisterType(*DAG.getContext(), ValueVTs[0]);
ISD::InputArg RetArg(Flags, RegisterVT, true);
Ins.push_back(RetArg);
}
// Set up the incoming argument description vector.
unsigned Idx = 1;
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
+ for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; ++I, ++Idx) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, I->getType(), ValueVTs);
@@ -5964,7 +6096,7 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
// Set up the argument values.
unsigned i = 0;
Idx = 1;
- if (!FLI.CanLowerReturn) {
+ if (!FuncInfo->CanLowerReturn) {
// Create a virtual register for the sret pointer, and put in a copy
// from the sret argument into it.
SmallVector<EVT, 1> ValueVTs;
@@ -5978,7 +6110,7 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
MachineFunction& MF = SDB->DAG.getMachineFunction();
MachineRegisterInfo& RegInfo = MF.getRegInfo();
unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT));
- FLI.DemoteRegister = SRetReg;
+ FuncInfo->DemoteRegister = SRetReg;
NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(),
SRetReg, ArgValue);
DAG.setRoot(NewRoot);
@@ -5988,12 +6120,18 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
++i;
}
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
+ for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
++I, ++Idx) {
SmallVector<SDValue, 4> ArgValues;
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, I->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
+
+ // If this argument is unused then remember its value. It is used to generate
+ // debugging information.
+ if (I->use_empty() && NumValues)
+ SDB->setUnusedArgValue(I, InVals[i]);
+
for (unsigned Value = 0; Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
@@ -6014,9 +6152,17 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
i += NumParts;
}
+ // Note down frame index for byval arguments.
+ if (I->hasByValAttr() && !ArgValues.empty())
+ if (FrameIndexSDNode *FI =
+ dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
+ FuncInfo->setByValArgumentFrameIndex(I, FI->getIndex());
+
if (!I->use_empty()) {
- SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues,
- SDB->getCurDebugLoc());
+ SDValue Res;
+ if (!ArgValues.empty())
+ Res = DAG.getMergeValues(&ArgValues[0], NumValues,
+ SDB->getCurDebugLoc());
SDB->setValue(I, Res);
// If this argument is live outside of the entry block, insert a copy from
@@ -6029,7 +6175,7 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
// Finally, if the target has anything special to do, allow it to do so.
// FIXME: this should insert code into the DAG!
- EmitFunctionEntryCode(F, SDB->DAG.getMachineFunction());
+ EmitFunctionEntryCode();
}
/// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
@@ -6040,51 +6186,53 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
/// the end.
///
void
-SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
- TerminatorInst *TI = LLVMBB->getTerminator();
+SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
+ const TerminatorInst *TI = LLVMBB->getTerminator();
SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
// Check successor nodes' PHI nodes that expect a constant to be available
// from this block.
for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
- BasicBlock *SuccBB = TI->getSuccessor(succ);
+ const BasicBlock *SuccBB = TI->getSuccessor(succ);
if (!isa<PHINode>(SuccBB->begin())) continue;
- MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
+ MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
// If this terminator has multiple identical successors (common for
// switches), only handle each succ once.
if (!SuccsHandled.insert(SuccMBB)) continue;
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
- PHINode *PN;
// At this point we know that there is a 1-1 correspondence between LLVM PHI
// nodes and Machine PHI nodes, but the incoming operands have not been
// emitted yet.
- for (BasicBlock::iterator I = SuccBB->begin();
- (PN = dyn_cast<PHINode>(I)); ++I) {
+ for (BasicBlock::const_iterator I = SuccBB->begin();
+ const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
// Ignore dead phi's.
if (PN->use_empty()) continue;
unsigned Reg;
- Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
+ const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
- if (Constant *C = dyn_cast<Constant>(PHIOp)) {
- unsigned &RegOut = SDB->ConstantsOut[C];
+ if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
+ unsigned &RegOut = ConstantsOut[C];
if (RegOut == 0) {
- RegOut = FuncInfo->CreateRegForValue(C);
- SDB->CopyValueToVirtualRegister(C, RegOut);
+ RegOut = FuncInfo.CreateRegs(C->getType());
+ CopyValueToVirtualRegister(C, RegOut);
}
Reg = RegOut;
} else {
- Reg = FuncInfo->ValueMap[PHIOp];
- if (Reg == 0) {
+ DenseMap<const Value *, unsigned>::iterator I =
+ FuncInfo.ValueMap.find(PHIOp);
+ if (I != FuncInfo.ValueMap.end())
+ Reg = I->second;
+ else {
assert(isa<AllocaInst>(PHIOp) &&
- FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
+ FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
"Didn't codegen value into a register!??");
- Reg = FuncInfo->CreateRegForValue(PHIOp);
- SDB->CopyValueToVirtualRegister(PHIOp, Reg);
+ Reg = FuncInfo.CreateRegs(PHIOp->getType());
+ CopyValueToVirtualRegister(PHIOp, Reg);
}
}
@@ -6094,77 +6242,12 @@ SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
ComputeValueVTs(TLI, PN->getType(), ValueVTs);
for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
EVT VT = ValueVTs[vti];
- unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
+ unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
for (unsigned i = 0, e = NumRegisters; i != e; ++i)
- SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
+ FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
Reg += NumRegisters;
}
}
}
- SDB->ConstantsOut.clear();
-}
-
-/// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
-/// supports legal types, and it emits MachineInstrs directly instead of
-/// creating SelectionDAG nodes.
-///
-bool
-SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
- FastISel *F) {
- TerminatorInst *TI = LLVMBB->getTerminator();
-
- SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
- unsigned OrigNumPHINodesToUpdate = SDB->PHINodesToUpdate.size();
-
- // Check successor nodes' PHI nodes that expect a constant to be available
- // from this block.
- for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
- BasicBlock *SuccBB = TI->getSuccessor(succ);
- if (!isa<PHINode>(SuccBB->begin())) continue;
- MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
-
- // If this terminator has multiple identical successors (common for
- // switches), only handle each succ once.
- if (!SuccsHandled.insert(SuccMBB)) continue;
-
- MachineBasicBlock::iterator MBBI = SuccMBB->begin();
- PHINode *PN;
-
- // At this point we know that there is a 1-1 correspondence between LLVM PHI
- // nodes and Machine PHI nodes, but the incoming operands have not been
- // emitted yet.
- for (BasicBlock::iterator I = SuccBB->begin();
- (PN = dyn_cast<PHINode>(I)); ++I) {
- // Ignore dead phi's.
- if (PN->use_empty()) continue;
-
- // Only handle legal types. Two interesting things to note here. First,
- // by bailing out early, we may leave behind some dead instructions,
- // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
- // own moves. Second, this check is necessary becuase FastISel doesn't
- // use CreateRegForValue to create registers, so it always creates
- // exactly one register for each non-void instruction.
- EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
- if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
- // Promote MVT::i1.
- if (VT == MVT::i1)
- VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
- else {
- SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
- return false;
- }
- }
-
- Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
-
- unsigned Reg = F->getRegForValue(PHIOp);
- if (Reg == 0) {
- SDB->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
- return false;
- }
- SDB->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
- }
- }
-
- return true;
+ ConstantsOut.clear();
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index bc4b33d..5f400e9 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -18,9 +18,6 @@
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
-#ifndef NDEBUG
-#include "llvm/ADT/SmallSet.h"
-#endif
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Support/CallSite.h"
@@ -36,6 +33,7 @@ class BasicBlock;
class BitCastInst;
class BranchInst;
class CallInst;
+class DbgValueInst;
class ExtractElementInst;
class ExtractValueInst;
class FCmpInst;
@@ -56,13 +54,14 @@ class InsertValueInst;
class Instruction;
class LoadInst;
class MachineBasicBlock;
-class MachineFunction;
class MachineInstr;
class MachineRegisterInfo;
+class MDNode;
class PHINode;
class PtrToIntInst;
class ReturnInst;
class SDISelAsmOperandInfo;
+class SDDbgValue;
class SExtInst;
class SelectInst;
class ShuffleVectorInst;
@@ -81,15 +80,34 @@ class ZExtInst;
//===----------------------------------------------------------------------===//
/// SelectionDAGBuilder - This is the common target-independent lowering
/// implementation that is parameterized by a TargetLowering object.
-/// Also, targets can overload any lowering method.
///
class SelectionDAGBuilder {
- MachineBasicBlock *CurMBB;
-
/// CurDebugLoc - current file + line number. Changes as we build the DAG.
DebugLoc CurDebugLoc;
DenseMap<const Value*, SDValue> NodeMap;
+
+ /// UnusedArgNodeMap - Maps argument value for unused arguments. This is used
+ /// to preserve debug information for incoming arguments.
+ DenseMap<const Value*, SDValue> UnusedArgNodeMap;
+
+ /// DanglingDebugInfo - Helper type for DanglingDebugInfoMap.
+ class DanglingDebugInfo {
+ const DbgValueInst* DI;
+ DebugLoc dl;
+ unsigned SDNodeOrder;
+ public:
+ DanglingDebugInfo() : DI(0), dl(DebugLoc()), SDNodeOrder(0) { }
+ DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO) :
+ DI(di), dl(DL), SDNodeOrder(SDNO) { }
+ const DbgValueInst* getDI() { return DI; }
+ DebugLoc getdl() { return dl; }
+ unsigned getSDNodeOrder() { return SDNodeOrder; }
+ };
+
+ /// DanglingDebugInfoMap - Keeps track of dbg_values for which we have not
+ /// yet seen the referent. We defer handling these until we do see it.
+ DenseMap<const Value*, DanglingDebugInfo> DanglingDebugInfoMap;
public:
/// PendingLoads - Loads are not emitted to the program immediately. We bunch
@@ -144,15 +162,16 @@ private:
/// CaseRec - A struct with ctor used in lowering switches to a binary tree
/// of conditional branches.
struct CaseRec {
- CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
+ CaseRec(MachineBasicBlock *bb, const Constant *lt, const Constant *ge,
+ CaseRange r) :
CaseBB(bb), LT(lt), GE(ge), Range(r) {}
/// CaseBB - The MBB in which to emit the compare and branch
MachineBasicBlock *CaseBB;
/// LT, GE - If nonzero, we know the current case value must be less-than or
/// greater-than-or-equal-to these Constants.
- Constant *LT;
- Constant *GE;
+ const Constant *LT;
+ const Constant *GE;
/// Range - A pair of iterators representing the range of case values to be
/// processed at this point in the binary search tree.
CaseRange Range;
@@ -183,7 +202,8 @@ private:
/// SelectionDAGBuilder and SDISel for the code generation of additional basic
/// blocks needed by multi-case switch statements.
struct CaseBlock {
- CaseBlock(ISD::CondCode cc, Value *cmplhs, Value *cmprhs, Value *cmpmiddle,
+ CaseBlock(ISD::CondCode cc, const Value *cmplhs, const Value *cmprhs,
+ const Value *cmpmiddle,
MachineBasicBlock *truebb, MachineBasicBlock *falsebb,
MachineBasicBlock *me)
: CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs),
@@ -193,7 +213,7 @@ private:
// CmpLHS/CmpRHS/CmpMHS - The LHS/MHS/RHS of the comparison to emit.
// Emit by default LHS op RHS. MHS is used for range comparisons:
// If MHS is not null: (LHS <= MHS) and (MHS <= RHS).
- Value *CmpLHS, *CmpMHS, *CmpRHS;
+ const Value *CmpLHS, *CmpMHS, *CmpRHS;
// TrueBB/FalseBB - the block to branch to if the setcc is true/false.
MachineBasicBlock *TrueBB, *FalseBB;
// ThisBB - the block into which to emit the code for the setcc and branches
@@ -215,12 +235,12 @@ private:
MachineBasicBlock *Default;
};
struct JumpTableHeader {
- JumpTableHeader(APInt F, APInt L, Value *SV, MachineBasicBlock *H,
+ JumpTableHeader(APInt F, APInt L, const Value *SV, MachineBasicBlock *H,
bool E = false):
First(F), Last(L), SValue(SV), HeaderBB(H), Emitted(E) {}
APInt First;
APInt Last;
- Value *SValue;
+ const Value *SValue;
MachineBasicBlock *HeaderBB;
bool Emitted;
};
@@ -237,7 +257,7 @@ private:
typedef SmallVector<BitTestCase, 3> BitTestInfo;
struct BitTestBlock {
- BitTestBlock(APInt F, APInt R, Value* SV,
+ BitTestBlock(APInt F, APInt R, const Value* SV,
unsigned Rg, bool E,
MachineBasicBlock* P, MachineBasicBlock* D,
const BitTestInfo& C):
@@ -245,7 +265,7 @@ private:
Parent(P), Default(D), Cases(C) { }
APInt First;
APInt Range;
- Value *SValue;
+ const Value *SValue;
unsigned Reg;
bool Emitted;
MachineBasicBlock *Parent;
@@ -257,7 +277,8 @@ public:
// TLI - This is information that describes the available target features we
// need for lowering. This indicates when operations are unavailable,
// implemented with a libcall, etc.
- TargetLowering &TLI;
+ const TargetMachine &TM;
+ const TargetLowering &TLI;
SelectionDAG &DAG;
const TargetData *TD;
AliasAnalysis *AA;
@@ -272,17 +293,9 @@ public:
/// SwitchInst code generation information.
std::vector<BitTestBlock> BitTestCases;
- /// PHINodesToUpdate - A list of phi instructions whose operand list will
- /// be updated after processing the current basic block.
- std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
-
- /// EdgeMapping - If an edge from CurMBB to any MBB is changed (e.g. due to
- /// scheduler custom lowering), track the change here.
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> EdgeMapping;
-
// Emit PHI-node-operand constants only once even if used by multiple
// PHI nodes.
- DenseMap<Constant*, unsigned> ConstantsOut;
+ DenseMap<const Constant *, unsigned> ConstantsOut;
/// FuncInfo - Information about the function as a whole.
///
@@ -303,18 +316,16 @@ public:
LLVMContext *Context;
- SelectionDAGBuilder(SelectionDAG &dag, TargetLowering &tli,
- FunctionLoweringInfo &funcinfo,
+ SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
CodeGenOpt::Level ol)
- : CurDebugLoc(DebugLoc::getUnknownLoc()), SDNodeOrder(0),
- TLI(tli), DAG(dag), FuncInfo(funcinfo), OptLevel(ol),
- HasTailCall(false),
- Context(dag.getContext()) {
+ : SDNodeOrder(0), TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()),
+ DAG(dag), FuncInfo(funcinfo), OptLevel(ol),
+ HasTailCall(false), Context(dag.getContext()) {
}
void init(GCFunctionInfo *gfi, AliasAnalysis &aa);
- /// clear - Clear out the curret SelectionDAG and the associated
+ /// clear - Clear out the current SelectionDAG and the associated
/// state and prepare this SelectionDAGBuilder object to be used
/// for a new block. This doesn't clear out information about
/// additional blocks that are needed to complete switch lowering
@@ -336,24 +347,26 @@ public:
SDValue getControlRoot();
DebugLoc getCurDebugLoc() const { return CurDebugLoc; }
- void setCurDebugLoc(DebugLoc dl) { CurDebugLoc = dl; }
unsigned getSDNodeOrder() const { return SDNodeOrder; }
- void CopyValueToVirtualRegister(Value *V, unsigned Reg);
+ void CopyValueToVirtualRegister(const Value *V, unsigned Reg);
/// AssignOrderingToNode - Assign an ordering to the node. The order is gotten
/// from how the code appeared in the source. The ordering is used by the
/// scheduler to effectively turn off scheduling.
void AssignOrderingToNode(const SDNode *Node);
- void visit(Instruction &I);
+ void visit(const Instruction &I);
- void visit(unsigned Opcode, User &I);
-
- void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
+ void visit(unsigned Opcode, const User &I);
+ // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
+ // generate the debug data structures now that we've seen its definition.
+ void resolveDanglingDebugInfo(const Value *V, SDValue Val);
SDValue getValue(const Value *V);
+ SDValue getNonRegisterValue(const Value *V);
+ SDValue getValueImpl(const Value *V);
void setValue(const Value *V, SDValue NewN) {
SDValue &N = NodeMap[V];
@@ -361,140 +374,162 @@ public:
N = NewN;
}
+ void setUnusedArgValue(const Value *V, SDValue NewN) {
+ SDValue &N = UnusedArgNodeMap[V];
+ assert(N.getNode() == 0 && "Already set a value for this node!");
+ N = NewN;
+ }
+
void GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
std::set<unsigned> &OutputRegs,
std::set<unsigned> &InputRegs);
- void FindMergedConditions(Value *Cond, MachineBasicBlock *TBB,
+ void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
- unsigned Opc);
- void EmitBranchForMergedCondition(Value *Cond, MachineBasicBlock *TBB,
+ MachineBasicBlock *SwitchBB, unsigned Opc);
+ void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- MachineBasicBlock *CurBB);
+ MachineBasicBlock *CurBB,
+ MachineBasicBlock *SwitchBB);
bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases);
- bool isExportableFromCurrentBlock(Value *V, const BasicBlock *FromBB);
- void CopyToExportRegsIfNeeded(Value *V);
- void ExportFromCurrentBlock(Value *V);
- void LowerCallTo(CallSite CS, SDValue Callee, bool IsTailCall,
+ bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
+ void CopyToExportRegsIfNeeded(const Value *V);
+ void ExportFromCurrentBlock(const Value *V);
+ void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall,
MachineBasicBlock *LandingPad = NULL);
private:
// Terminator instructions.
- void visitRet(ReturnInst &I);
- void visitBr(BranchInst &I);
- void visitSwitch(SwitchInst &I);
- void visitIndirectBr(IndirectBrInst &I);
- void visitUnreachable(UnreachableInst &I) { /* noop */ }
+ void visitRet(const ReturnInst &I);
+ void visitBr(const BranchInst &I);
+ void visitSwitch(const SwitchInst &I);
+ void visitIndirectBr(const IndirectBrInst &I);
+ void visitUnreachable(const UnreachableInst &I) { /* noop */ }
// Helpers for visitSwitch
bool handleSmallSwitchRange(CaseRec& CR,
CaseRecVector& WorkList,
- Value* SV,
- MachineBasicBlock* Default);
+ const Value* SV,
+ MachineBasicBlock* Default,
+ MachineBasicBlock *SwitchBB);
bool handleJTSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
- Value* SV,
- MachineBasicBlock* Default);
+ const Value* SV,
+ MachineBasicBlock* Default,
+ MachineBasicBlock *SwitchBB);
bool handleBTSplitSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
- Value* SV,
- MachineBasicBlock* Default);
+ const Value* SV,
+ MachineBasicBlock* Default,
+ MachineBasicBlock *SwitchBB);
bool handleBitTestsSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
- Value* SV,
- MachineBasicBlock* Default);
+ const Value* SV,
+ MachineBasicBlock* Default,
+ MachineBasicBlock *SwitchBB);
public:
- void visitSwitchCase(CaseBlock &CB);
- void visitBitTestHeader(BitTestBlock &B);
+ void visitSwitchCase(CaseBlock &CB,
+ MachineBasicBlock *SwitchBB);
+ void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB);
void visitBitTestCase(MachineBasicBlock* NextMBB,
unsigned Reg,
- BitTestCase &B);
+ BitTestCase &B,
+ MachineBasicBlock *SwitchBB);
void visitJumpTable(JumpTable &JT);
- void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH);
+ void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH,
+ MachineBasicBlock *SwitchBB);
private:
// These all get lowered before this pass.
- void visitInvoke(InvokeInst &I);
- void visitUnwind(UnwindInst &I);
-
- void visitBinary(User &I, unsigned OpCode);
- void visitShift(User &I, unsigned Opcode);
- void visitAdd(User &I) { visitBinary(I, ISD::ADD); }
- void visitFAdd(User &I) { visitBinary(I, ISD::FADD); }
- void visitSub(User &I) { visitBinary(I, ISD::SUB); }
- void visitFSub(User &I);
- void visitMul(User &I) { visitBinary(I, ISD::MUL); }
- void visitFMul(User &I) { visitBinary(I, ISD::FMUL); }
- void visitURem(User &I) { visitBinary(I, ISD::UREM); }
- void visitSRem(User &I) { visitBinary(I, ISD::SREM); }
- void visitFRem(User &I) { visitBinary(I, ISD::FREM); }
- void visitUDiv(User &I) { visitBinary(I, ISD::UDIV); }
- void visitSDiv(User &I) { visitBinary(I, ISD::SDIV); }
- void visitFDiv(User &I) { visitBinary(I, ISD::FDIV); }
- void visitAnd (User &I) { visitBinary(I, ISD::AND); }
- void visitOr (User &I) { visitBinary(I, ISD::OR); }
- void visitXor (User &I) { visitBinary(I, ISD::XOR); }
- void visitShl (User &I) { visitShift(I, ISD::SHL); }
- void visitLShr(User &I) { visitShift(I, ISD::SRL); }
- void visitAShr(User &I) { visitShift(I, ISD::SRA); }
- void visitICmp(User &I);
- void visitFCmp(User &I);
+ void visitInvoke(const InvokeInst &I);
+ void visitUnwind(const UnwindInst &I);
+
+ void visitBinary(const User &I, unsigned OpCode);
+ void visitShift(const User &I, unsigned Opcode);
+ void visitAdd(const User &I) { visitBinary(I, ISD::ADD); }
+ void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); }
+ void visitSub(const User &I) { visitBinary(I, ISD::SUB); }
+ void visitFSub(const User &I);
+ void visitMul(const User &I) { visitBinary(I, ISD::MUL); }
+ void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); }
+ void visitURem(const User &I) { visitBinary(I, ISD::UREM); }
+ void visitSRem(const User &I) { visitBinary(I, ISD::SREM); }
+ void visitFRem(const User &I) { visitBinary(I, ISD::FREM); }
+ void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); }
+ void visitSDiv(const User &I) { visitBinary(I, ISD::SDIV); }
+ void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); }
+ void visitAnd (const User &I) { visitBinary(I, ISD::AND); }
+ void visitOr (const User &I) { visitBinary(I, ISD::OR); }
+ void visitXor (const User &I) { visitBinary(I, ISD::XOR); }
+ void visitShl (const User &I) { visitShift(I, ISD::SHL); }
+ void visitLShr(const User &I) { visitShift(I, ISD::SRL); }
+ void visitAShr(const User &I) { visitShift(I, ISD::SRA); }
+ void visitICmp(const User &I);
+ void visitFCmp(const User &I);
// Visit the conversion instructions
- void visitTrunc(User &I);
- void visitZExt(User &I);
- void visitSExt(User &I);
- void visitFPTrunc(User &I);
- void visitFPExt(User &I);
- void visitFPToUI(User &I);
- void visitFPToSI(User &I);
- void visitUIToFP(User &I);
- void visitSIToFP(User &I);
- void visitPtrToInt(User &I);
- void visitIntToPtr(User &I);
- void visitBitCast(User &I);
-
- void visitExtractElement(User &I);
- void visitInsertElement(User &I);
- void visitShuffleVector(User &I);
-
- void visitExtractValue(ExtractValueInst &I);
- void visitInsertValue(InsertValueInst &I);
-
- void visitGetElementPtr(User &I);
- void visitSelect(User &I);
-
- void visitAlloca(AllocaInst &I);
- void visitLoad(LoadInst &I);
- void visitStore(StoreInst &I);
- void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
- void visitCall(CallInst &I);
- bool visitMemCmpCall(CallInst &I);
+ void visitTrunc(const User &I);
+ void visitZExt(const User &I);
+ void visitSExt(const User &I);
+ void visitFPTrunc(const User &I);
+ void visitFPExt(const User &I);
+ void visitFPToUI(const User &I);
+ void visitFPToSI(const User &I);
+ void visitUIToFP(const User &I);
+ void visitSIToFP(const User &I);
+ void visitPtrToInt(const User &I);
+ void visitIntToPtr(const User &I);
+ void visitBitCast(const User &I);
+
+ void visitExtractElement(const User &I);
+ void visitInsertElement(const User &I);
+ void visitShuffleVector(const User &I);
+
+ void visitExtractValue(const ExtractValueInst &I);
+ void visitInsertValue(const InsertValueInst &I);
+
+ void visitGetElementPtr(const User &I);
+ void visitSelect(const User &I);
+
+ void visitAlloca(const AllocaInst &I);
+ void visitLoad(const LoadInst &I);
+ void visitStore(const StoreInst &I);
+ void visitPHI(const PHINode &I);
+ void visitCall(const CallInst &I);
+ bool visitMemCmpCall(const CallInst &I);
- void visitInlineAsm(CallSite CS);
- const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
- void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
-
- void visitPow(CallInst &I);
- void visitExp2(CallInst &I);
- void visitExp(CallInst &I);
- void visitLog(CallInst &I);
- void visitLog2(CallInst &I);
- void visitLog10(CallInst &I);
-
- void visitVAStart(CallInst &I);
- void visitVAArg(VAArgInst &I);
- void visitVAEnd(CallInst &I);
- void visitVACopy(CallInst &I);
-
- void visitUserOp1(Instruction &I) {
+ void visitInlineAsm(ImmutableCallSite CS);
+ const char *visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
+ void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
+
+ void visitPow(const CallInst &I);
+ void visitExp2(const CallInst &I);
+ void visitExp(const CallInst &I);
+ void visitLog(const CallInst &I);
+ void visitLog2(const CallInst &I);
+ void visitLog10(const CallInst &I);
+
+ void visitVAStart(const CallInst &I);
+ void visitVAArg(const VAArgInst &I);
+ void visitVAEnd(const CallInst &I);
+ void visitVACopy(const CallInst &I);
+
+ void visitUserOp1(const Instruction &I) {
llvm_unreachable("UserOp1 should not exist at instruction selection time!");
}
- void visitUserOp2(Instruction &I) {
+ void visitUserOp2(const Instruction &I) {
llvm_unreachable("UserOp2 should not exist at instruction selection time!");
}
- const char *implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op);
- const char *implVisitAluOverflow(CallInst &I, ISD::NodeType Op);
+ const char *implVisitBinaryAtomic(const CallInst& I, ISD::NodeType Op);
+ const char *implVisitAluOverflow(const CallInst &I, ISD::NodeType Op);
+
+ void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
+
+ /// EmitFuncArgumentDbgValue - If V is an function argument then create
+ /// corresponding DBG_VALUE machine instruction for it now. At the end of
+ /// instruction selection, they will be inserted to the entry BB.
+ bool EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
+ int64_t Offset, const SDValue &N);
};
} // end namespace llvm
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 05f9f1f..66cb5ce 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -14,37 +14,30 @@
#define DEBUG_TYPE "isel"
#include "ScheduleDAGSDNodes.h"
#include "SelectionDAGBuilder.h"
-#include "FunctionLoweringInfo.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Constants.h"
-#include "llvm/CallingConv.h"
-#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
-#include "llvm/GlobalVariable.h"
#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/GCMetadata.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/DwarfWriter.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
@@ -53,7 +46,6 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/Statistic.h"
@@ -61,6 +53,7 @@
using namespace llvm;
STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
+STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
static cl::opt<bool>
EnableFastISelVerbose("fast-isel-verbose", cl::Hidden,
@@ -69,10 +62,6 @@ EnableFastISelVerbose("fast-isel-verbose", cl::Hidden,
static cl::opt<bool>
EnableFastISelAbort("fast-isel-abort", cl::Hidden,
cl::desc("Enable abort calls when \"fast\" instruction fails"));
-static cl::opt<bool>
-SchedLiveInCopies("schedule-livein-copies", cl::Hidden,
- cl::desc("Schedule copies of livein registers"),
- cl::init(false));
#ifndef NDEBUG
static cl::opt<bool>
@@ -143,12 +132,16 @@ namespace llvm {
const TargetLowering &TLI = IS->getTargetLowering();
if (OptLevel == CodeGenOpt::None)
- return createFastDAGScheduler(IS, OptLevel);
- if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
+ return createSourceListDAGScheduler(IS, OptLevel);
+ if (TLI.getSchedulingPreference() == Sched::Latency)
return createTDListDAGScheduler(IS, OptLevel);
- assert(TLI.getSchedulingPreference() ==
- TargetLowering::SchedulingForRegPressure && "Unknown sched type!");
- return createBURRListDAGScheduler(IS, OptLevel);
+ if (TLI.getSchedulingPreference() == Sched::RegPressure)
+ return createBURRListDAGScheduler(IS, OptLevel);
+ if (TLI.getSchedulingPreference() == Sched::Hybrid)
+ return createHybridListDAGScheduler(IS, OptLevel);
+ assert(TLI.getSchedulingPreference() == Sched::ILP &&
+ "Unknown sched type!");
+ return createILPListDAGScheduler(IS, OptLevel);
}
}
@@ -161,9 +154,9 @@ namespace llvm {
// When new basic blocks are inserted and the edges from MBB to its successors
// are modified, the method should insert pairs of <OldSucc, NewSucc> into the
// DenseMap.
-MachineBasicBlock *TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
+MachineBasicBlock *
+TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
#ifndef NDEBUG
dbgs() << "If a target marks an instruction with "
"'usesCustomInserter', it must implement "
@@ -173,115 +166,15 @@ MachineBasicBlock *TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
return 0;
}
-/// EmitLiveInCopy - Emit a copy for a live in physical register. If the
-/// physical register has only a single copy use, then coalesced the copy
-/// if possible.
-static void EmitLiveInCopy(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator &InsertPos,
- unsigned VirtReg, unsigned PhysReg,
- const TargetRegisterClass *RC,
- DenseMap<MachineInstr*, unsigned> &CopyRegMap,
- const MachineRegisterInfo &MRI,
- const TargetRegisterInfo &TRI,
- const TargetInstrInfo &TII) {
- unsigned NumUses = 0;
- MachineInstr *UseMI = NULL;
- for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(VirtReg),
- UE = MRI.use_end(); UI != UE; ++UI) {
- UseMI = &*UI;
- if (++NumUses > 1)
- break;
- }
-
- // If the number of uses is not one, or the use is not a move instruction,
- // don't coalesce. Also, only coalesce away a virtual register to virtual
- // register copy.
- bool Coalesced = false;
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (NumUses == 1 &&
- TII.isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
- TargetRegisterInfo::isVirtualRegister(DstReg)) {
- VirtReg = DstReg;
- Coalesced = true;
- }
-
- // Now find an ideal location to insert the copy.
- MachineBasicBlock::iterator Pos = InsertPos;
- while (Pos != MBB->begin()) {
- MachineInstr *PrevMI = prior(Pos);
- DenseMap<MachineInstr*, unsigned>::iterator RI = CopyRegMap.find(PrevMI);
- // copyRegToReg might emit multiple instructions to do a copy.
- unsigned CopyDstReg = (RI == CopyRegMap.end()) ? 0 : RI->second;
- if (CopyDstReg && !TRI.regsOverlap(CopyDstReg, PhysReg))
- // This is what the BB looks like right now:
- // r1024 = mov r0
- // ...
- // r1 = mov r1024
- //
- // We want to insert "r1025 = mov r1". Inserting this copy below the
- // move to r1024 makes it impossible for that move to be coalesced.
- //
- // r1025 = mov r1
- // r1024 = mov r0
- // ...
- // r1 = mov 1024
- // r2 = mov 1025
- break; // Woot! Found a good location.
- --Pos;
- }
-
- bool Emitted = TII.copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
- assert(Emitted && "Unable to issue a live-in copy instruction!\n");
- (void) Emitted;
-
- CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
- if (Coalesced) {
- if (&*InsertPos == UseMI) ++InsertPos;
- MBB->erase(UseMI);
- }
-}
-
-/// EmitLiveInCopies - If this is the first basic block in the function,
-/// and if it has live ins that need to be copied into vregs, emit the
-/// copies into the block.
-static void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
- const MachineRegisterInfo &MRI,
- const TargetRegisterInfo &TRI,
- const TargetInstrInfo &TII) {
- if (SchedLiveInCopies) {
- // Emit the copies at a heuristically-determined location in the block.
- DenseMap<MachineInstr*, unsigned> CopyRegMap;
- MachineBasicBlock::iterator InsertPos = EntryMBB->begin();
- for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
- E = MRI.livein_end(); LI != E; ++LI)
- if (LI->second) {
- const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
- EmitLiveInCopy(EntryMBB, InsertPos, LI->second, LI->first,
- RC, CopyRegMap, MRI, TRI, TII);
- }
- } else {
- // Emit the copies into the top of the block.
- for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
- E = MRI.livein_end(); LI != E; ++LI)
- if (LI->second) {
- const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
- bool Emitted = TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
- LI->second, LI->first, RC, RC);
- assert(Emitted && "Unable to issue a live-in copy instruction!\n");
- (void) Emitted;
- }
- }
-}
-
//===----------------------------------------------------------------------===//
// SelectionDAGISel code
//===----------------------------------------------------------------------===//
-SelectionDAGISel::SelectionDAGISel(TargetMachine &tm, CodeGenOpt::Level OL) :
- MachineFunctionPass(&ID), TM(tm), TLI(*tm.getTargetLowering()),
+SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm, CodeGenOpt::Level OL) :
+ MachineFunctionPass(ID), TM(tm), TLI(*tm.getTargetLowering()),
FuncInfo(new FunctionLoweringInfo(TLI)),
- CurDAG(new SelectionDAG(TLI, *FuncInfo)),
- SDB(new SelectionDAGBuilder(*CurDAG, TLI, *FuncInfo, OL)),
+ CurDAG(new SelectionDAG(tm)),
+ SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
GFI(),
OptLevel(OL),
DAGSize(0)
@@ -293,258 +186,182 @@ SelectionDAGISel::~SelectionDAGISel() {
delete FuncInfo;
}
-unsigned SelectionDAGISel::MakeReg(EVT VT) {
- return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
-}
-
void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
AU.addPreserved<AliasAnalysis>();
AU.addRequired<GCModuleInfo>();
AU.addPreserved<GCModuleInfo>();
- AU.addRequired<DwarfWriter>();
- AU.addPreserved<DwarfWriter>();
MachineFunctionPass::getAnalysisUsage(AU);
}
-bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
- Function &Fn = *mf.getFunction();
+/// FunctionCallsSetJmp - Return true if the function has a call to setjmp or
+/// other function that gcc recognizes as "returning twice". This is used to
+/// limit code-gen optimizations on the machine function.
+///
+/// FIXME: Remove after <rdar://problem/8031714> is fixed.
+static bool FunctionCallsSetJmp(const Function *F) {
+ const Module *M = F->getParent();
+ static const char *ReturnsTwiceFns[] = {
+ "setjmp",
+ "sigsetjmp",
+ "setjmp_syscall",
+ "savectx",
+ "qsetjmp",
+ "vfork",
+ "getcontext"
+ };
+#define NUM_RETURNS_TWICE_FNS sizeof(ReturnsTwiceFns) / sizeof(const char *)
+
+ for (unsigned I = 0; I < NUM_RETURNS_TWICE_FNS; ++I)
+ if (const Function *Callee = M->getFunction(ReturnsTwiceFns[I])) {
+ if (!Callee->use_empty())
+ for (Value::const_use_iterator
+ I = Callee->use_begin(), E = Callee->use_end();
+ I != E; ++I)
+ if (const CallInst *CI = dyn_cast<CallInst>(*I))
+ if (CI->getParent()->getParent() == F)
+ return true;
+ }
+ return false;
+#undef NUM_RETURNS_TWICE_FNS
+}
+
+bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// Do some sanity-checking on the command-line options.
assert((!EnableFastISelVerbose || EnableFastISel) &&
"-fast-isel-verbose requires -fast-isel");
assert((!EnableFastISelAbort || EnableFastISel) &&
"-fast-isel-abort requires -fast-isel");
- // Get alias analysis for load/store combining.
- AA = &getAnalysis<AliasAnalysis>();
-
- MF = &mf;
+ const Function &Fn = *mf.getFunction();
const TargetInstrInfo &TII = *TM.getInstrInfo();
const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
- if (Fn.hasGC())
- GFI = &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn);
- else
- GFI = 0;
+ MF = &mf;
RegInfo = &MF->getRegInfo();
+ AA = &getAnalysis<AliasAnalysis>();
+ GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(Fn) : 0;
+
DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
- MachineModuleInfo *MMI = getAnalysisIfAvailable<MachineModuleInfo>();
- DwarfWriter *DW = getAnalysisIfAvailable<DwarfWriter>();
- CurDAG->init(*MF, MMI, DW);
- FuncInfo->set(Fn, *MF, EnableFastISel);
+ CurDAG->init(*MF);
+ FuncInfo->set(Fn, *MF);
SDB->init(GFI, *AA);
- for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
- if (InvokeInst *Invoke = dyn_cast<InvokeInst>(I->getTerminator()))
- // Mark landing pad.
- FuncInfo->MBBMap[Invoke->getSuccessor(1)]->setIsLandingPad();
-
- SelectAllBasicBlocks(Fn, *MF, MMI, DW, TII);
+ SelectAllBasicBlocks(Fn);
// If the first basic block in the function has live ins that need to be
// copied into vregs, emit the copies into the top of the block before
// emitting the code for the block.
- EmitLiveInCopies(MF->begin(), *RegInfo, TRI, TII);
+ MachineBasicBlock *EntryMBB = MF->begin();
+ RegInfo->EmitLiveInCopies(EntryMBB, TRI, TII);
+
+ DenseMap<unsigned, unsigned> LiveInMap;
+ if (!FuncInfo->ArgDbgValues.empty())
+ for (MachineRegisterInfo::livein_iterator LI = RegInfo->livein_begin(),
+ E = RegInfo->livein_end(); LI != E; ++LI)
+ if (LI->second)
+ LiveInMap.insert(std::make_pair(LI->first, LI->second));
+
+ // Insert DBG_VALUE instructions for function arguments to the entry block.
+ for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
+ MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
+ unsigned Reg = MI->getOperand(0).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ EntryMBB->insert(EntryMBB->begin(), MI);
+ else {
+ MachineInstr *Def = RegInfo->getVRegDef(Reg);
+ MachineBasicBlock::iterator InsertPos = Def;
+ // FIXME: VR def may not be in entry block.
+ Def->getParent()->insert(llvm::next(InsertPos), MI);
+ }
- // Add function live-ins to entry block live-in set.
- for (MachineRegisterInfo::livein_iterator I = RegInfo->livein_begin(),
- E = RegInfo->livein_end(); I != E; ++I)
- MF->begin()->addLiveIn(I->first);
+ // If Reg is live-in then update debug info to track its copy in a vreg.
+ DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Reg);
+ if (LDI != LiveInMap.end()) {
+ MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
+ MachineBasicBlock::iterator InsertPos = Def;
+ const MDNode *Variable =
+ MI->getOperand(MI->getNumOperands()-1).getMetadata();
+ unsigned Offset = MI->getOperand(1).getImm();
+ // Def is never a terminator here, so it is ok to increment InsertPos.
+ BuildMI(*EntryMBB, ++InsertPos, MI->getDebugLoc(),
+ TII.get(TargetOpcode::DBG_VALUE))
+ .addReg(LDI->second, RegState::Debug)
+ .addImm(Offset).addMetadata(Variable);
+ }
+ }
-#ifndef NDEBUG
- assert(FuncInfo->CatchInfoFound.size() == FuncInfo->CatchInfoLost.size() &&
- "Not all catch info was assigned to a landing pad!");
-#endif
+ // Determine if there are any calls in this machine function.
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ if (!MFI->hasCalls()) {
+ for (MachineFunction::const_iterator
+ I = MF->begin(), E = MF->end(); I != E; ++I) {
+ const MachineBasicBlock *MBB = I;
+ for (MachineBasicBlock::const_iterator
+ II = MBB->begin(), IE = MBB->end(); II != IE; ++II) {
+ const TargetInstrDesc &TID = TM.getInstrInfo()->get(II->getOpcode());
+
+ // Operand 1 of an inline asm instruction indicates whether the asm
+ // needs stack or not.
+ if ((II->isInlineAsm() && II->getOperand(1).getImm()) ||
+ (TID.isCall() && !TID.isReturn())) {
+ MFI->setHasCalls(true);
+ goto done;
+ }
+ }
+ }
+ done:;
+ }
+
+ // Determine if there is a call to setjmp in the machine function.
+ MF->setCallsSetJmp(FunctionCallsSetJmp(&Fn));
+
+ // Replace forward-declared registers with the registers containing
+ // the desired value.
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ for (DenseMap<unsigned, unsigned>::iterator
+ I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end();
+ I != E; ++I) {
+ unsigned From = I->first;
+ unsigned To = I->second;
+ // If To is also scheduled to be replaced, find what its ultimate
+ // replacement is.
+ for (;;) {
+ DenseMap<unsigned, unsigned>::iterator J =
+ FuncInfo->RegFixups.find(To);
+ if (J == E) break;
+ To = J->second;
+ }
+ // Replace it.
+ MRI.replaceRegWith(From, To);
+ }
+ // Release function-specific state. SDB and CurDAG are already cleared
+ // at this point.
FuncInfo->clear();
return true;
}
-/// SetDebugLoc - Update MF's and SDB's DebugLocs if debug information is
-/// attached with this instruction.
-static void SetDebugLoc(unsigned MDDbgKind, Instruction *I,
- SelectionDAGBuilder *SDB,
- FastISel *FastIS, MachineFunction *MF) {
- if (isa<DbgInfoIntrinsic>(I)) return;
-
- if (MDNode *Dbg = I->getMetadata(MDDbgKind)) {
- DILocation DILoc(Dbg);
- DebugLoc Loc = ExtractDebugLocation(DILoc, MF->getDebugLocInfo());
-
- SDB->setCurDebugLoc(Loc);
-
- if (FastIS)
- FastIS->setCurDebugLoc(Loc);
-
- // If the function doesn't have a default debug location yet, set
- // it. This is kind of a hack.
- if (MF->getDefaultDebugLoc().isUnknown())
- MF->setDefaultDebugLoc(Loc);
- }
-}
-
-/// ResetDebugLoc - Set MF's and SDB's DebugLocs to Unknown.
-static void ResetDebugLoc(SelectionDAGBuilder *SDB, FastISel *FastIS) {
- SDB->setCurDebugLoc(DebugLoc::getUnknownLoc());
- if (FastIS)
- FastIS->setCurDebugLoc(DebugLoc::getUnknownLoc());
-}
-
-void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB,
- BasicBlock::iterator Begin,
- BasicBlock::iterator End,
- bool &HadTailCall) {
- SDB->setCurrentBasicBlock(BB);
- unsigned MDDbgKind = LLVMBB->getContext().getMDKindID("dbg");
-
+void
+SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
+ BasicBlock::const_iterator End,
+ bool &HadTailCall) {
// Lower all of the non-terminator instructions. If a call is emitted
- // as a tail call, cease emitting nodes for this block.
- for (BasicBlock::iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
- SetDebugLoc(MDDbgKind, I, SDB, 0, MF);
-
- if (!isa<TerminatorInst>(I)) {
- SDB->visit(*I);
-
- // Set the current debug location back to "unknown" so that it doesn't
- // spuriously apply to subsequent instructions.
- ResetDebugLoc(SDB, 0);
- }
- }
-
- if (!SDB->HasTailCall) {
- // Ensure that all instructions which are used outside of their defining
- // blocks are available as virtual registers. Invoke is handled elsewhere.
- for (BasicBlock::iterator I = Begin; I != End; ++I)
- if (!isa<PHINode>(I) && !isa<InvokeInst>(I))
- SDB->CopyToExportRegsIfNeeded(I);
-
- // Handle PHI nodes in successor blocks.
- if (End == LLVMBB->end()) {
- HandlePHINodesInSuccessorBlocks(LLVMBB);
-
- // Lower the terminator after the copies are emitted.
- SetDebugLoc(MDDbgKind, LLVMBB->getTerminator(), SDB, 0, MF);
- SDB->visit(*LLVMBB->getTerminator());
- ResetDebugLoc(SDB, 0);
- }
- }
+ // as a tail call, cease emitting nodes for this block. Terminators
+ // are handled below.
+ for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I)
+ SDB->visit(*I);
// Make sure the root of the DAG is up-to-date.
CurDAG->setRoot(SDB->getControlRoot());
-
- // Final step, emit the lowered DAG as machine code.
- CodeGenAndEmitDAG();
HadTailCall = SDB->HasTailCall;
SDB->clear();
-}
-
-namespace {
-/// WorkListRemover - This class is a DAGUpdateListener that removes any deleted
-/// nodes from the worklist.
-class SDOPsWorkListRemover : public SelectionDAG::DAGUpdateListener {
- SmallVector<SDNode*, 128> &Worklist;
-public:
- SDOPsWorkListRemover(SmallVector<SDNode*, 128> &wl) : Worklist(wl) {}
-
- virtual void NodeDeleted(SDNode *N, SDNode *E) {
- Worklist.erase(std::remove(Worklist.begin(), Worklist.end(), N),
- Worklist.end());
- }
-
- virtual void NodeUpdated(SDNode *N) {
- // Ignore updates.
- }
-};
-}
-
-/// TrivialTruncElim - Eliminate some trivial nops that can result from
-/// ShrinkDemandedOps: (trunc (ext n)) -> n.
-static bool TrivialTruncElim(SDValue Op,
- TargetLowering::TargetLoweringOpt &TLO) {
- SDValue N0 = Op.getOperand(0);
- EVT VT = Op.getValueType();
- if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
- N0.getOpcode() == ISD::SIGN_EXTEND ||
- N0.getOpcode() == ISD::ANY_EXTEND) &&
- N0.getOperand(0).getValueType() == VT) {
- return TLO.CombineTo(Op, N0.getOperand(0));
- }
- return false;
-}
-
-/// ShrinkDemandedOps - A late transformation pass that shrink expressions
-/// using TargetLowering::TargetLoweringOpt::ShrinkDemandedOp. It converts
-/// x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
-void SelectionDAGISel::ShrinkDemandedOps() {
- SmallVector<SDNode*, 128> Worklist;
- // Add all the dag nodes to the worklist.
- Worklist.reserve(CurDAG->allnodes_size());
- for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
- E = CurDAG->allnodes_end(); I != E; ++I)
- Worklist.push_back(I);
-
- APInt Mask;
- APInt KnownZero;
- APInt KnownOne;
-
- TargetLowering::TargetLoweringOpt TLO(*CurDAG, true);
- while (!Worklist.empty()) {
- SDNode *N = Worklist.pop_back_val();
-
- if (N->use_empty() && N != CurDAG->getRoot().getNode()) {
- CurDAG->DeleteNode(N);
- continue;
- }
-
- // Run ShrinkDemandedOp on scalar binary operations.
- if (N->getNumValues() == 1 &&
- N->getValueType(0).isSimple() && N->getValueType(0).isInteger()) {
- unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
- APInt Demanded = APInt::getAllOnesValue(BitWidth);
- APInt KnownZero, KnownOne;
- if (TLI.SimplifyDemandedBits(SDValue(N, 0), Demanded,
- KnownZero, KnownOne, TLO) ||
- (N->getOpcode() == ISD::TRUNCATE &&
- TrivialTruncElim(SDValue(N, 0), TLO))) {
- // Revisit the node.
- Worklist.erase(std::remove(Worklist.begin(), Worklist.end(), N),
- Worklist.end());
- Worklist.push_back(N);
-
- // Replace the old value with the new one.
- DEBUG(errs() << "\nReplacing ";
- TLO.Old.getNode()->dump(CurDAG);
- errs() << "\nWith: ";
- TLO.New.getNode()->dump(CurDAG);
- errs() << '\n');
-
- Worklist.push_back(TLO.New.getNode());
-
- SDOPsWorkListRemover DeadNodes(Worklist);
- CurDAG->ReplaceAllUsesOfValueWith(TLO.Old, TLO.New, &DeadNodes);
-
- if (TLO.Old.getNode()->use_empty()) {
- for (unsigned i = 0, e = TLO.Old.getNode()->getNumOperands();
- i != e; ++i) {
- SDNode *OpNode = TLO.Old.getNode()->getOperand(i).getNode();
- if (OpNode->hasOneUse()) {
- Worklist.erase(std::remove(Worklist.begin(), Worklist.end(),
- OpNode), Worklist.end());
- Worklist.push_back(OpNode);
- }
- }
-
- Worklist.erase(std::remove(Worklist.begin(), Worklist.end(),
- TLO.Old.getNode()), Worklist.end());
- CurDAG->DeleteNode(TLO.Old.getNode());
- }
- }
- }
- }
+ // Final step, emit the lowered DAG as machine code.
+ CodeGenAndEmitDAG();
}
void SelectionDAGISel::ComputeLiveOutVRegInfo() {
@@ -610,23 +427,19 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
ViewSUnitDAGs)
BlockName = MF->getFunction()->getNameStr() + ":" +
- BB->getBasicBlock()->getNameStr();
+ FuncInfo->MBB->getBasicBlock()->getNameStr();
- DEBUG(dbgs() << "Initial selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Initial selection DAG:\n"; CurDAG->dump());
if (ViewDAGCombine1) CurDAG->viewGraph("dag-combine1 input for " + BlockName);
// Run the DAG combiner in pre-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining 1", GroupName);
- CurDAG->Combine(Unrestricted, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining 1", GroupName, TimePassesIsEnabled);
CurDAG->Combine(Unrestricted, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized lowered selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized lowered selection DAG:\n"; CurDAG->dump());
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
@@ -634,44 +447,36 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
BlockName);
bool Changed;
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Type Legalization", GroupName);
- Changed = CurDAG->LegalizeTypes();
- } else {
+ {
+ NamedRegionTimer T("Type Legalization", GroupName, TimePassesIsEnabled);
Changed = CurDAG->LegalizeTypes();
}
- DEBUG(dbgs() << "Type-legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Type-legalized selection DAG:\n"; CurDAG->dump());
if (Changed) {
if (ViewDAGCombineLT)
CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining after legalize types", GroupName);
- CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining after legalize types", GroupName,
+ TimePassesIsEnabled);
CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized type-legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized type-legalized selection DAG:\n";
+ CurDAG->dump());
}
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Vector Legalization", GroupName);
- Changed = CurDAG->LegalizeVectors();
- } else {
+ {
+ NamedRegionTimer T("Vector Legalization", GroupName, TimePassesIsEnabled);
Changed = CurDAG->LegalizeVectors();
}
if (Changed) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Type Legalization 2", GroupName);
- CurDAG->LegalizeTypes();
- } else {
+ {
+ NamedRegionTimer T("Type Legalization 2", GroupName, TimePassesIsEnabled);
CurDAG->LegalizeTypes();
}
@@ -679,93 +484,79 @@ void SelectionDAGISel::CodeGenAndEmitDAG() {
CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining after legalize vectors", GroupName);
- CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining after legalize vectors", GroupName,
+ TimePassesIsEnabled);
CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized vector-legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized vector-legalized selection DAG:\n";
+ CurDAG->dump());
}
if (ViewLegalizeDAGs) CurDAG->viewGraph("legalize input for " + BlockName);
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Legalization", GroupName);
- CurDAG->Legalize(OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Legalization", GroupName, TimePassesIsEnabled);
CurDAG->Legalize(OptLevel);
}
- DEBUG(dbgs() << "Legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Legalized selection DAG:\n"; CurDAG->dump());
if (ViewDAGCombine2) CurDAG->viewGraph("dag-combine2 input for " + BlockName);
// Run the DAG combiner in post-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining 2", GroupName);
- CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining 2", GroupName, TimePassesIsEnabled);
CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
-
- if (ViewISelDAGs) CurDAG->viewGraph("isel input for " + BlockName);
+ DEBUG(dbgs() << "Optimized legalized selection DAG:\n"; CurDAG->dump());
- if (OptLevel != CodeGenOpt::None) {
- ShrinkDemandedOps();
+ if (OptLevel != CodeGenOpt::None)
ComputeLiveOutVRegInfo();
- }
+
+ if (ViewISelDAGs) CurDAG->viewGraph("isel input for " + BlockName);
// Third, instruction select all of the operations to machine code, adding the
// code to the MachineBasicBlock.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Selection", GroupName);
- DoInstructionSelection();
- } else {
+ {
+ NamedRegionTimer T("Instruction Selection", GroupName, TimePassesIsEnabled);
DoInstructionSelection();
}
- DEBUG(dbgs() << "Selected selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Selected selection DAG:\n"; CurDAG->dump());
if (ViewSchedDAGs) CurDAG->viewGraph("scheduler input for " + BlockName);
// Schedule machine code.
ScheduleDAGSDNodes *Scheduler = CreateScheduler();
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Scheduling", GroupName);
- Scheduler->Run(CurDAG, BB, BB->end());
- } else {
- Scheduler->Run(CurDAG, BB, BB->end());
+ {
+ NamedRegionTimer T("Instruction Scheduling", GroupName,
+ TimePassesIsEnabled);
+ Scheduler->Run(CurDAG, FuncInfo->MBB, FuncInfo->InsertPt);
}
if (ViewSUnitDAGs) Scheduler->viewGraph();
// Emit machine code to BB. This can change 'BB' to the last block being
// inserted into.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Creation", GroupName);
- BB = Scheduler->EmitSchedule(&SDB->EdgeMapping);
- } else {
- BB = Scheduler->EmitSchedule(&SDB->EdgeMapping);
+ {
+ NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
+
+ FuncInfo->MBB = Scheduler->EmitSchedule();
+ FuncInfo->InsertPt = Scheduler->InsertPos;
}
// Free the scheduler state.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName);
- delete Scheduler;
- } else {
+ {
+ NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName,
+ TimePassesIsEnabled);
delete Scheduler;
}
- DEBUG(dbgs() << "Selected machine code:\n");
- DEBUG(BB->dump());
+ // Free the SelectionDAG state, now that we're finished with it.
+ CurDAG->clear();
}
void SelectionDAGISel::DoInstructionSelection() {
@@ -819,179 +610,155 @@ void SelectionDAGISel::DoInstructionSelection() {
CurDAG->setRoot(Dummy.getValue());
}
+
DEBUG(errs() << "===== Instruction selection ends:\n");
PostprocessISelDAG();
-
- // FIXME: This shouldn't be needed, remove it.
- CurDAG->RemoveDeadNodes();
}
+/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
+/// do other setup for EH landing-pad blocks.
+void SelectionDAGISel::PrepareEHLandingPad() {
+ // Add a label to mark the beginning of the landing pad. Deletion of the
+ // landing pad can thus be detected via the MachineModuleInfo.
+ MCSymbol *Label = MF->getMMI().addLandingPad(FuncInfo->MBB);
+
+ const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
+ BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
+ .addSym(Label);
+
+ // Mark exception register as live in.
+ unsigned Reg = TLI.getExceptionAddressRegister();
+ if (Reg) FuncInfo->MBB->addLiveIn(Reg);
+
+ // Mark exception selector register as live in.
+ Reg = TLI.getExceptionSelectorRegister();
+ if (Reg) FuncInfo->MBB->addLiveIn(Reg);
+
+ // FIXME: Hack around an exception handling flaw (PR1508): the personality
+ // function and list of typeids logically belong to the invoke (or, if you
+ // like, the basic block containing the invoke), and need to be associated
+ // with it in the dwarf exception handling tables. Currently however the
+ // information is provided by an intrinsic (eh.selector) that can be moved
+ // to unexpected places by the optimizers: if the unwind edge is critical,
+ // then breaking it can result in the intrinsics being in the successor of
+ // the landing pad, not the landing pad itself. This results
+ // in exceptions not being caught because no typeids are associated with
+ // the invoke. This may not be the only way things can go wrong, but it
+ // is the only way we try to work around for the moment.
+ const BasicBlock *LLVMBB = FuncInfo->MBB->getBasicBlock();
+ const BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator());
+
+ if (Br && Br->isUnconditional()) { // Critical edge?
+ BasicBlock::const_iterator I, E;
+ for (I = LLVMBB->begin(), E = --LLVMBB->end(); I != E; ++I)
+ if (isa<EHSelectorInst>(I))
+ break;
-void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
- MachineFunction &MF,
- MachineModuleInfo *MMI,
- DwarfWriter *DW,
- const TargetInstrInfo &TII) {
+ if (I == E)
+ // No catch info found - try to extract some from the successor.
+ CopyCatchInfo(Br->getSuccessor(0), LLVMBB, &MF->getMMI(), *FuncInfo);
+ }
+}
+
+void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = 0;
if (EnableFastISel)
- FastIS = TLI.createFastISel(MF, MMI, DW,
- FuncInfo->ValueMap,
- FuncInfo->MBBMap,
- FuncInfo->StaticAllocaMap
-#ifndef NDEBUG
- , FuncInfo->CatchInfoLost
-#endif
- );
-
- unsigned MDDbgKind = Fn.getContext().getMDKindID("dbg");
+ FastIS = TLI.createFastISel(*FuncInfo);
// Iterate over all basic blocks in the function.
- for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
- BasicBlock *LLVMBB = &*I;
- BB = FuncInfo->MBBMap[LLVMBB];
+ for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
+ const BasicBlock *LLVMBB = &*I;
+ FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
+ FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
- BasicBlock::iterator const Begin = LLVMBB->begin();
- BasicBlock::iterator const End = LLVMBB->end();
- BasicBlock::iterator BI = Begin;
+ BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI();
+ BasicBlock::const_iterator const End = LLVMBB->end();
+ BasicBlock::const_iterator BI = End;
+ FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
+
+ // Setup an EH landing-pad block.
+ if (FuncInfo->MBB->isLandingPad())
+ PrepareEHLandingPad();
+
// Lower any arguments needed in this block if this is the entry block.
- bool SuppressFastISel = false;
- if (LLVMBB == &Fn.getEntryBlock()) {
+ if (LLVMBB == &Fn.getEntryBlock())
LowerArguments(LLVMBB);
- // If any of the arguments has the byval attribute, forgo
- // fast-isel in the entry block.
- if (FastIS) {
- unsigned j = 1;
- for (Function::arg_iterator I = Fn.arg_begin(), E = Fn.arg_end();
- I != E; ++I, ++j)
- if (Fn.paramHasAttr(j, Attribute::ByVal)) {
- if (EnableFastISelVerbose || EnableFastISelAbort)
- dbgs() << "FastISel skips entry block due to byval argument\n";
- SuppressFastISel = true;
- break;
- }
- }
- }
-
- if (MMI && BB->isLandingPad()) {
- // Add a label to mark the beginning of the landing pad. Deletion of the
- // landing pad can thus be detected via the MachineModuleInfo.
- unsigned LabelID = MMI->addLandingPad(BB);
-
- const TargetInstrDesc &II = TII.get(TargetOpcode::EH_LABEL);
- BuildMI(BB, SDB->getCurDebugLoc(), II).addImm(LabelID);
-
- // Mark exception register as live in.
- unsigned Reg = TLI.getExceptionAddressRegister();
- if (Reg) BB->addLiveIn(Reg);
-
- // Mark exception selector register as live in.
- Reg = TLI.getExceptionSelectorRegister();
- if (Reg) BB->addLiveIn(Reg);
-
- // FIXME: Hack around an exception handling flaw (PR1508): the personality
- // function and list of typeids logically belong to the invoke (or, if you
- // like, the basic block containing the invoke), and need to be associated
- // with it in the dwarf exception handling tables. Currently however the
- // information is provided by an intrinsic (eh.selector) that can be moved
- // to unexpected places by the optimizers: if the unwind edge is critical,
- // then breaking it can result in the intrinsics being in the successor of
- // the landing pad, not the landing pad itself. This results
- // in exceptions not being caught because no typeids are associated with
- // the invoke. This may not be the only way things can go wrong, but it
- // is the only way we try to work around for the moment.
- BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator());
-
- if (Br && Br->isUnconditional()) { // Critical edge?
- BasicBlock::iterator I, E;
- for (I = LLVMBB->begin(), E = --LLVMBB->end(); I != E; ++I)
- if (isa<EHSelectorInst>(I))
- break;
-
- if (I == E)
- // No catch info found - try to extract some from the successor.
- CopyCatchInfo(Br->getSuccessor(0), LLVMBB, MMI, *FuncInfo);
- }
- }
-
// Before doing SelectionDAG ISel, see if FastISel has been requested.
- if (FastIS && !SuppressFastISel) {
+ if (FastIS) {
+ FastIS->startNewBlock();
+
// Emit code for any incoming arguments. This must happen before
// beginning FastISel on the entry block.
if (LLVMBB == &Fn.getEntryBlock()) {
CurDAG->setRoot(SDB->getControlRoot());
- CodeGenAndEmitDAG();
SDB->clear();
+ CodeGenAndEmitDAG();
+
+ // If we inserted any instructions at the beginning, make a note of
+ // where they are, so we can be sure to emit subsequent instructions
+ // after them.
+ if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
+ FastIS->setLastLocalValue(llvm::prior(FuncInfo->InsertPt));
+ else
+ FastIS->setLastLocalValue(0);
}
- FastIS->startNewBlock(BB);
+
// Do FastISel on as many instructions as possible.
- for (; BI != End; ++BI) {
- // Just before the terminator instruction, insert instructions to
- // feed PHI nodes in successor blocks.
- if (isa<TerminatorInst>(BI))
- if (!HandlePHINodesInSuccessorBlocksFast(LLVMBB, FastIS)) {
- ++NumFastIselFailures;
- ResetDebugLoc(SDB, FastIS);
- if (EnableFastISelVerbose || EnableFastISelAbort) {
- dbgs() << "FastISel miss: ";
- BI->dump();
- }
- assert(!EnableFastISelAbort &&
- "FastISel didn't handle a PHI in a successor");
- break;
- }
+ for (; BI != Begin; --BI) {
+ const Instruction *Inst = llvm::prior(BI);
+
+ // If we no longer require this instruction, skip it.
+ if (!Inst->mayWriteToMemory() &&
+ !isa<TerminatorInst>(Inst) &&
+ !isa<DbgInfoIntrinsic>(Inst) &&
+ !FuncInfo->isExportedInst(Inst))
+ continue;
- SetDebugLoc(MDDbgKind, BI, SDB, FastIS, &MF);
+ // Bottom-up: reset the insert pos at the top, after any local-value
+ // instructions.
+ FastIS->recomputeInsertPt();
// Try to select the instruction with FastISel.
- if (FastIS->SelectInstruction(BI)) {
- ResetDebugLoc(SDB, FastIS);
+ if (FastIS->SelectInstruction(Inst))
continue;
- }
-
- // Clear out the debug location so that it doesn't carry over to
- // unrelated instructions.
- ResetDebugLoc(SDB, FastIS);
// Then handle certain instructions as single-LLVM-Instruction blocks.
- if (isa<CallInst>(BI)) {
+ if (isa<CallInst>(Inst)) {
++NumFastIselFailures;
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel missed call: ";
- BI->dump();
+ Inst->dump();
}
- if (!BI->getType()->isVoidTy()) {
- unsigned &R = FuncInfo->ValueMap[BI];
+ if (!Inst->getType()->isVoidTy() && !Inst->use_empty()) {
+ unsigned &R = FuncInfo->ValueMap[Inst];
if (!R)
- R = FuncInfo->CreateRegForValue(BI);
+ R = FuncInfo->CreateRegs(Inst->getType());
}
bool HadTailCall = false;
- SelectBasicBlock(LLVMBB, BI, llvm::next(BI), HadTailCall);
+ SelectBasicBlock(Inst, BI, HadTailCall);
// If the call was emitted as a tail call, we're done with the block.
if (HadTailCall) {
- BI = End;
+ --BI;
break;
}
- // If the instruction was codegen'd with multiple blocks,
- // inform the FastISel object where to resume inserting.
- FastIS->setCurrentBlock(BB);
continue;
}
// Otherwise, give up on FastISel for the rest of the block.
// For now, be a little lenient about non-branch terminators.
- if (!isa<TerminatorInst>(BI) || isa<BranchInst>(BI)) {
+ if (!isa<TerminatorInst>(Inst) || isa<BranchInst>(Inst)) {
++NumFastIselFailures;
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel miss: ";
- BI->dump();
+ Inst->dump();
}
if (EnableFastISelAbort)
// The "fast" selector couldn't handle something and bailed.
@@ -1000,17 +767,18 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
}
break;
}
+
+ FastIS->recomputeInsertPt();
}
// Run SelectionDAG instruction selection on the remainder of the block
// not handled by FastISel. If FastISel is not run, this is the entire
// block.
- if (BI != End) {
- bool HadTailCall;
- SelectBasicBlock(LLVMBB, BI, End, HadTailCall);
- }
+ bool HadTailCall;
+ SelectBasicBlock(Begin, BI, HadTailCall);
FinishBasicBlock();
+ FuncInfo->PHINodesToUpdate.clear();
}
delete FastIS;
@@ -1019,32 +787,28 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn,
void
SelectionDAGISel::FinishBasicBlock() {
- DEBUG(dbgs() << "Target-post-processed machine code:\n");
- DEBUG(BB->dump());
-
DEBUG(dbgs() << "Total amount of phi nodes to update: "
- << SDB->PHINodesToUpdate.size() << "\n");
- DEBUG(for (unsigned i = 0, e = SDB->PHINodesToUpdate.size(); i != e; ++i)
+ << FuncInfo->PHINodesToUpdate.size() << "\n";
+ for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
dbgs() << "Node " << i << " : ("
- << SDB->PHINodesToUpdate[i].first
- << ", " << SDB->PHINodesToUpdate[i].second << ")\n");
+ << FuncInfo->PHINodesToUpdate[i].first
+ << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
// Next, now that we know what the last MBB the LLVM BB expanded is, update
// PHI nodes in successors.
if (SDB->SwitchCases.empty() &&
SDB->JTCases.empty() &&
SDB->BitTestCases.empty()) {
- for (unsigned i = 0, e = SDB->PHINodesToUpdate.size(); i != e; ++i) {
- MachineInstr *PHI = SDB->PHINodesToUpdate[i].first;
+ for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
+ MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
- if (!BB->isSuccessor(PHI->getParent()))
+ if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
continue;
- PHI->addOperand(MachineOperand::CreateReg(SDB->PHINodesToUpdate[i].second,
- false));
- PHI->addOperand(MachineOperand::CreateMBB(BB));
+ PHI->addOperand(
+ MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
+ PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
}
- SDB->PHINodesToUpdate.clear();
return;
}
@@ -1052,38 +816,41 @@ SelectionDAGISel::FinishBasicBlock() {
// Lower header first, if it wasn't already lowered
if (!SDB->BitTestCases[i].Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
- BB = SDB->BitTestCases[i].Parent;
- SDB->setCurrentBasicBlock(BB);
+ FuncInfo->MBB = SDB->BitTestCases[i].Parent;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
- SDB->visitBitTestHeader(SDB->BitTestCases[i]);
+ SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
- CodeGenAndEmitDAG();
SDB->clear();
+ CodeGenAndEmitDAG();
}
for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) {
// Set the current basic block to the mbb we wish to insert the code into
- BB = SDB->BitTestCases[i].Cases[j].ThisBB;
- SDB->setCurrentBasicBlock(BB);
+ FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
if (j+1 != ej)
SDB->visitBitTestCase(SDB->BitTestCases[i].Cases[j+1].ThisBB,
SDB->BitTestCases[i].Reg,
- SDB->BitTestCases[i].Cases[j]);
+ SDB->BitTestCases[i].Cases[j],
+ FuncInfo->MBB);
else
SDB->visitBitTestCase(SDB->BitTestCases[i].Default,
SDB->BitTestCases[i].Reg,
- SDB->BitTestCases[i].Cases[j]);
+ SDB->BitTestCases[i].Cases[j],
+ FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
- CodeGenAndEmitDAG();
SDB->clear();
+ CodeGenAndEmitDAG();
}
// Update PHI Nodes
- for (unsigned pi = 0, pe = SDB->PHINodesToUpdate.size(); pi != pe; ++pi) {
- MachineInstr *PHI = SDB->PHINodesToUpdate[pi].first;
+ for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
+ pi != pe; ++pi) {
+ MachineInstr *PHI = FuncInfo->PHINodesToUpdate[pi].first;
MachineBasicBlock *PHIBB = PHI->getParent();
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
@@ -1091,10 +858,12 @@ SelectionDAGISel::FinishBasicBlock() {
// from last "case" BB.
if (PHIBB == SDB->BitTestCases[i].Default) {
PHI->addOperand(MachineOperand::
- CreateReg(SDB->PHINodesToUpdate[pi].second, false));
+ CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
+ false));
PHI->addOperand(MachineOperand::CreateMBB(SDB->BitTestCases[i].Parent));
PHI->addOperand(MachineOperand::
- CreateReg(SDB->PHINodesToUpdate[pi].second, false));
+ CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
+ false));
PHI->addOperand(MachineOperand::CreateMBB(SDB->BitTestCases[i].Cases.
back().ThisBB));
}
@@ -1104,7 +873,8 @@ SelectionDAGISel::FinishBasicBlock() {
MachineBasicBlock* cBB = SDB->BitTestCases[i].Cases[j].ThisBB;
if (cBB->isSuccessor(PHIBB)) {
PHI->addOperand(MachineOperand::
- CreateReg(SDB->PHINodesToUpdate[pi].second, false));
+ CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
+ false));
PHI->addOperand(MachineOperand::CreateMBB(cBB));
}
}
@@ -1119,42 +889,46 @@ SelectionDAGISel::FinishBasicBlock() {
// Lower header first, if it wasn't already lowered
if (!SDB->JTCases[i].first.Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
- BB = SDB->JTCases[i].first.HeaderBB;
- SDB->setCurrentBasicBlock(BB);
+ FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
- SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first);
+ SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
+ FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
- CodeGenAndEmitDAG();
SDB->clear();
+ CodeGenAndEmitDAG();
}
// Set the current basic block to the mbb we wish to insert the code into
- BB = SDB->JTCases[i].second.MBB;
- SDB->setCurrentBasicBlock(BB);
+ FuncInfo->MBB = SDB->JTCases[i].second.MBB;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
SDB->visitJumpTable(SDB->JTCases[i].second);
CurDAG->setRoot(SDB->getRoot());
- CodeGenAndEmitDAG();
SDB->clear();
+ CodeGenAndEmitDAG();
// Update PHI Nodes
- for (unsigned pi = 0, pe = SDB->PHINodesToUpdate.size(); pi != pe; ++pi) {
- MachineInstr *PHI = SDB->PHINodesToUpdate[pi].first;
+ for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
+ pi != pe; ++pi) {
+ MachineInstr *PHI = FuncInfo->PHINodesToUpdate[pi].first;
MachineBasicBlock *PHIBB = PHI->getParent();
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
// "default" BB. We can go there only from header BB.
if (PHIBB == SDB->JTCases[i].second.Default) {
PHI->addOperand
- (MachineOperand::CreateReg(SDB->PHINodesToUpdate[pi].second, false));
+ (MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
+ false));
PHI->addOperand
(MachineOperand::CreateMBB(SDB->JTCases[i].first.HeaderBB));
}
// JT BB. Just iterate over successors here
- if (BB->isSuccessor(PHIBB)) {
+ if (FuncInfo->MBB->isSuccessor(PHIBB)) {
PHI->addOperand
- (MachineOperand::CreateReg(SDB->PHINodesToUpdate[pi].second, false));
- PHI->addOperand(MachineOperand::CreateMBB(BB));
+ (MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
+ false));
+ PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
}
}
}
@@ -1162,14 +936,14 @@ SelectionDAGISel::FinishBasicBlock() {
// If the switch block involved a branch to one of the actual successors, we
// need to update PHI nodes in that block.
- for (unsigned i = 0, e = SDB->PHINodesToUpdate.size(); i != e; ++i) {
- MachineInstr *PHI = SDB->PHINodesToUpdate[i].first;
+ for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
+ MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
- if (BB->isSuccessor(PHI->getParent())) {
- PHI->addOperand(MachineOperand::CreateReg(SDB->PHINodesToUpdate[i].second,
- false));
- PHI->addOperand(MachineOperand::CreateMBB(BB));
+ if (FuncInfo->MBB->isSuccessor(PHI->getParent())) {
+ PHI->addOperand(
+ MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
+ PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
}
}
@@ -1177,39 +951,43 @@ SelectionDAGISel::FinishBasicBlock() {
// additional DAGs necessary.
for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
// Set the current basic block to the mbb we wish to insert the code into
- MachineBasicBlock *ThisBB = BB = SDB->SwitchCases[i].ThisBB;
- SDB->setCurrentBasicBlock(BB);
-
- // Emit the code
- SDB->visitSwitchCase(SDB->SwitchCases[i]);
+ MachineBasicBlock *ThisBB = FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
+
+ // Determine the unique successors.
+ SmallVector<MachineBasicBlock *, 2> Succs;
+ Succs.push_back(SDB->SwitchCases[i].TrueBB);
+ if (SDB->SwitchCases[i].TrueBB != SDB->SwitchCases[i].FalseBB)
+ Succs.push_back(SDB->SwitchCases[i].FalseBB);
+
+ // Emit the code. Note that this could result in ThisBB being split, so
+ // we need to check for updates.
+ SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
+ SDB->clear();
CodeGenAndEmitDAG();
+ ThisBB = FuncInfo->MBB;
// Handle any PHI nodes in successors of this chunk, as if we were coming
// from the original BB before switch expansion. Note that PHI nodes can
// occur multiple times in PHINodesToUpdate. We have to be very careful to
// handle them the right number of times.
- while ((BB = SDB->SwitchCases[i].TrueBB)) { // Handle LHS and RHS.
- // If new BB's are created during scheduling, the edges may have been
- // updated. That is, the edge from ThisBB to BB may have been split and
- // BB's predecessor is now another block.
- DenseMap<MachineBasicBlock*, MachineBasicBlock*>::iterator EI =
- SDB->EdgeMapping.find(BB);
- if (EI != SDB->EdgeMapping.end())
- ThisBB = EI->second;
-
- // BB may have been removed from the CFG if a branch was constant folded.
- if (ThisBB->isSuccessor(BB)) {
- for (MachineBasicBlock::iterator Phi = BB->begin();
- Phi != BB->end() && Phi->isPHI();
+ for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
+ FuncInfo->MBB = Succs[i];
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
+ // FuncInfo->MBB may have been removed from the CFG if a branch was
+ // constant folded.
+ if (ThisBB->isSuccessor(FuncInfo->MBB)) {
+ for (MachineBasicBlock::iterator Phi = FuncInfo->MBB->begin();
+ Phi != FuncInfo->MBB->end() && Phi->isPHI();
++Phi) {
// This value for this PHI node is recorded in PHINodesToUpdate.
for (unsigned pn = 0; ; ++pn) {
- assert(pn != SDB->PHINodesToUpdate.size() &&
+ assert(pn != FuncInfo->PHINodesToUpdate.size() &&
"Didn't find PHI entry!");
- if (SDB->PHINodesToUpdate[pn].first == Phi) {
+ if (FuncInfo->PHINodesToUpdate[pn].first == Phi) {
Phi->addOperand(MachineOperand::
- CreateReg(SDB->PHINodesToUpdate[pn].second,
+ CreateReg(FuncInfo->PHINodesToUpdate[pn].second,
false));
Phi->addOperand(MachineOperand::CreateMBB(ThisBB));
break;
@@ -1217,21 +995,9 @@ SelectionDAGISel::FinishBasicBlock() {
}
}
}
-
- // Don't process RHS if same block as LHS.
- if (BB == SDB->SwitchCases[i].FalseBB)
- SDB->SwitchCases[i].FalseBB = 0;
-
- // If we haven't handled the RHS, do so now. Otherwise, we're done.
- SDB->SwitchCases[i].TrueBB = SDB->SwitchCases[i].FalseBB;
- SDB->SwitchCases[i].FalseBB = 0;
}
- assert(SDB->SwitchCases[i].TrueBB == 0 && SDB->SwitchCases[i].FalseBB == 0);
- SDB->clear();
}
SDB->SwitchCases.clear();
-
- SDB->PHINodesToUpdate.clear();
}
@@ -1330,16 +1096,18 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
std::vector<SDValue> InOps;
std::swap(InOps, Ops);
- Ops.push_back(InOps[0]); // input chain.
- Ops.push_back(InOps[1]); // input asm string.
+ Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
+ Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
+ Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
+ Ops.push_back(InOps[InlineAsm::Op_IsAlignStack]); // 3
- unsigned i = 2, e = InOps.size();
+ unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
if (InOps[e-1].getValueType() == MVT::Flag)
--e; // Don't process a flag operand if it is here.
while (i != e) {
unsigned Flags = cast<ConstantSDNode>(InOps[i])->getZExtValue();
- if ((Flags & 7) != 4 /*MEM*/) {
+ if (!InlineAsm::isMemKind(Flags)) {
// Just skip over this operand, copying the operands verbatim.
Ops.insert(Ops.end(), InOps.begin()+i,
InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1);
@@ -1349,14 +1117,14 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
"Memory operand with multiple values?");
// Otherwise, this is a memory operand. Ask the target to select it.
std::vector<SDValue> SelOps;
- if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps)) {
- llvm_report_error("Could not match memory address. Inline asm"
- " failure!");
- }
+ if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps))
+ report_fatal_error("Could not match memory address. Inline asm"
+ " failure!");
// Add this to the output node.
- Ops.push_back(CurDAG->getTargetConstant(4/*MEM*/ | (SelOps.size()<< 3),
- MVT::i32));
+ unsigned NewFlags =
+ InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
+ Ops.push_back(CurDAG->getTargetConstant(NewFlags, MVT::i32));
Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
i += 2;
}
@@ -1433,7 +1201,8 @@ bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
/// IsLegalToFold - Returns true if the specific operand node N of
/// U can be folded during instruction selection that starts at Root.
bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
- bool IgnoreChains) const {
+ CodeGenOpt::Level OptLevel,
+ bool IgnoreChains) {
if (OptLevel == CodeGenOpt::None) return false;
// If Root use can somehow reach N through a path that that doesn't contain
@@ -1517,14 +1286,6 @@ SDNode *SelectionDAGISel::Select_UNDEF(SDNode *N) {
return CurDAG->SelectNodeTo(N, TargetOpcode::IMPLICIT_DEF,N->getValueType(0));
}
-SDNode *SelectionDAGISel::Select_EH_LABEL(SDNode *N) {
- SDValue Chain = N->getOperand(0);
- unsigned C = cast<LabelSDNode>(N)->getLabelID();
- SDValue Tmp = CurDAG->getTargetConstant(C, MVT::i32);
- return CurDAG->SelectNodeTo(N, TargetOpcode::EH_LABEL,
- MVT::Other, Tmp, Chain);
-}
-
/// GetVBR - decode a vbr encoding whose top bit is set.
ALWAYS_INLINE static uint64_t
GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
@@ -1580,8 +1341,9 @@ UpdateChainsAndFlags(SDNode *NodeToMatch, SDValue InputChain,
assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain, &ISU);
- // If the node became dead, delete it.
- if (ChainNode->use_empty())
+ // If the node became dead and we haven't already seen it, delete it.
+ if (ChainNode->use_empty() &&
+ !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
NowDeadNodes.push_back(ChainNode);
}
}
@@ -1602,8 +1364,9 @@ UpdateChainsAndFlags(SDNode *NodeToMatch, SDValue InputChain,
CurDAG->ReplaceAllUsesOfValueWith(SDValue(FRN, FRN->getNumValues()-1),
InputFlag, &ISU);
- // If the node became dead, delete it.
- if (FRN->use_empty())
+ // If the node became dead and we haven't already seen it, delete it.
+ if (FRN->use_empty() &&
+ !std::count(NowDeadNodes.begin(), NowDeadNodes.end(), FRN))
NowDeadNodes.push_back(FRN);
}
}
@@ -1651,7 +1414,8 @@ WalkChainUsers(SDNode *ChainedNode,
if (User->getOpcode() == ISD::CopyToReg ||
User->getOpcode() == ISD::CopyFromReg ||
- User->getOpcode() == ISD::INLINEASM) {
+ User->getOpcode() == ISD::INLINEASM ||
+ User->getOpcode() == ISD::EH_LABEL) {
// If their node ID got reset to -1 then they've already been selected.
// Treat them like a MachineOpcode.
if (User->getNodeId() == -1)
@@ -1797,9 +1561,9 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
// It is possible we're using MorphNodeTo to replace a node with no
// normal results with one that has a normal result (or we could be
// adding a chain) and the input could have flags and chains as well.
- // In this case we need to shifting the operands down.
+ // In this case we need to shift the operands down.
// FIXME: This is a horrible hack and broken in obscure cases, no worse
- // than the old isel though. We should sink this into MorphNodeTo.
+ // than the old isel though.
int OldFlagResultNo = -1, OldChainResultNo = -1;
unsigned NTMNumResults = Node->getNumValues();
@@ -1832,7 +1596,7 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
SDValue(Res, ResNumResults-1));
if ((EmitNodeInfo & OPFL_FlagOutput) != 0)
- --ResNumResults;
+ --ResNumResults;
// Move the chain reference if needed.
if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
@@ -1875,7 +1639,9 @@ CheckNodePredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
ALWAYS_INLINE static bool
CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDNode *N) {
- return N->getOpcode() == MatcherTable[MatcherIndex++];
+ uint16_t Opc = MatcherTable[MatcherIndex++];
+ Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
+ return N->getOpcode() == Opc;
}
ALWAYS_INLINE static bool
@@ -2011,6 +1777,7 @@ static unsigned IsPredicateKnownToFail(const unsigned char *Table,
}
}
+namespace {
struct MatchScope {
/// FailIndex - If this match fails, this is the index to continue with.
@@ -2032,6 +1799,8 @@ struct MatchScope {
bool HasChainNodesMatched, HasFlagResultNodesMatched;
};
+}
+
SDNode *SelectionDAGISel::
SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned TableSize) {
@@ -2042,7 +1811,10 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case ISD::EntryToken: // These nodes remain the same.
case ISD::BasicBlock:
case ISD::Register:
+ //case ISD::VALUETYPE:
+ //case ISD::CONDCODE:
case ISD::HANDLENODE:
+ case ISD::MDNODE_SDNODE:
case ISD::TargetConstant:
case ISD::TargetConstantFP:
case ISD::TargetConstantPool:
@@ -2055,6 +1827,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case ISD::TokenFactor:
case ISD::CopyFromReg:
case ISD::CopyToReg:
+ case ISD::EH_LABEL:
NodeToMatch->setNodeId(-1); // Mark selected.
return 0;
case ISD::AssertSext:
@@ -2063,7 +1836,6 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
NodeToMatch->getOperand(0));
return 0;
case ISD::INLINEASM: return Select_INLINEASM(NodeToMatch);
- case ISD::EH_LABEL: return Select_EH_LABEL(NodeToMatch);
case ISD::UNDEF: return Select_UNDEF(NodeToMatch);
}
@@ -2127,7 +1899,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (CaseSize == 0) break;
// Get the opcode, add the index to the table.
- unsigned Opc = MatcherTable[Idx++];
+ uint16_t Opc = MatcherTable[Idx++];
+ Opc |= (unsigned short)MatcherTable[Idx++] << 8;
if (Opc >= OpcodeOffset.size())
OpcodeOffset.resize((Opc+1)*2);
OpcodeOffset[Opc] = Idx;
@@ -2141,6 +1914,9 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
while (1) {
assert(MatcherIndex < TableSize && "Invalid index");
+#ifndef NDEBUG
+ unsigned CurrentOpcodeIndex = MatcherIndex;
+#endif
BuiltinOpcodes Opcode = (BuiltinOpcodes)MatcherTable[MatcherIndex++];
switch (Opcode) {
case OPC_Scope: {
@@ -2163,6 +1939,9 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
FailIndex = MatcherIndex+NumToSkip;
+ unsigned MatcherIndexOfPredicate = MatcherIndex;
+ (void)MatcherIndexOfPredicate; // silence warning.
+
// If we can't evaluate this predicate without pushing a scope (e.g. if
// it is a 'MoveParent') or if the predicate succeeds on this node, we
// push the scope and evaluate the full predicate chain.
@@ -2172,9 +1951,10 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (!Result)
break;
- DEBUG(errs() << " Skipped scope entry at index " << MatcherIndex
- << " continuing at " << FailIndex << "\n");
-
+ DEBUG(errs() << " Skipped scope entry (due to false predicate) at "
+ << "index " << MatcherIndexOfPredicate
+ << ", continuing at " << FailIndex << "\n");
+ ++NumDAGIselRetries;
// Otherwise, we know that this case of the Scope is guaranteed to fail,
// move to the next case.
@@ -2280,8 +2060,11 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
if (CaseSize == 0) break;
+ uint16_t Opc = MatcherTable[MatcherIndex++];
+ Opc |= (unsigned short)MatcherTable[MatcherIndex++] << 8;
+
// If the opcode matches, then we will execute this case.
- if (CurNodeOpcode == MatcherTable[MatcherIndex++])
+ if (CurNodeOpcode == Opc)
break;
// Otherwise, skip over this case.
@@ -2370,7 +2153,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (!IsProfitableToFold(N, NodeStack[NodeStack.size()-2].getNode(),
NodeToMatch) ||
!IsLegalToFold(N, NodeStack[NodeStack.size()-2].getNode(),
- NodeToMatch, true/*We validate our own chains*/))
+ NodeToMatch, OptLevel,
+ true/*We validate our own chains*/))
break;
continue;
@@ -2410,6 +2194,35 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
continue;
}
+ case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
+ case OPC_EmitMergeInputChains1_1: { // OPC_EmitMergeInputChains, 1, 1
+ // These are space-optimized forms of OPC_EmitMergeInputChains.
+ assert(InputChain.getNode() == 0 &&
+ "EmitMergeInputChains should be the first chain producing node");
+ assert(ChainNodesMatched.empty() &&
+ "Should only have one EmitMergeInputChains per match");
+
+ // Read all of the chained nodes.
+ unsigned RecNo = Opcode == OPC_EmitMergeInputChains1_1;
+ assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
+ ChainNodesMatched.push_back(RecordedNodes[RecNo].getNode());
+
+ // FIXME: What if other value results of the node have uses not matched
+ // by this pattern?
+ if (ChainNodesMatched.back() != NodeToMatch &&
+ !RecordedNodes[RecNo].hasOneUse()) {
+ ChainNodesMatched.clear();
+ break;
+ }
+
+ // Merge the input chains if they are not intra-pattern references.
+ InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
+
+ if (InputChain.getNode() == 0)
+ break; // Failed to merge.
+ continue;
+ }
+
case OPC_EmitMergeInputChains: {
assert(InputChain.getNode() == 0 &&
"EmitMergeInputChains should be the first chain producing node");
@@ -2628,14 +2441,10 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
assert(ResSlot < RecordedNodes.size() && "Invalid CheckSame");
SDValue Res = RecordedNodes[ResSlot];
- // FIXME2: Eliminate this horrible hack by fixing the 'Gen' program
- // after (parallel) on input patterns are removed. This would also
- // allow us to stop encoding #results in OPC_CompleteMatch's table
- // entry.
- if (NodeToMatch->getNumValues() <= i ||
- NodeToMatch->getValueType(i) == MVT::Other ||
- NodeToMatch->getValueType(i) == MVT::Flag)
- break;
+ assert(i < NodeToMatch->getNumValues() &&
+ NodeToMatch->getValueType(i) != MVT::Other &&
+ NodeToMatch->getValueType(i) != MVT::Flag &&
+ "Invalid number of results to complete!");
assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
NodeToMatch->getValueType(i) == MVT::iPTR ||
Res.getValueType() == MVT::iPTR ||
@@ -2666,6 +2475,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// If the code reached this point, then the match failed. See if there is
// another child to try in the current 'Scope', otherwise pop it until we
// find a case to check.
+ DEBUG(errs() << " Match failed at index " << CurrentOpcodeIndex << "\n");
+ ++NumDAGIselRetries;
while (1) {
if (MatchScopes.empty()) {
CannotYetSelect(NodeToMatch);
@@ -2680,13 +2491,12 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
NodeStack.append(LastScope.NodeStack.begin(), LastScope.NodeStack.end());
N = NodeStack.back();
- DEBUG(errs() << " Match failed at index " << MatcherIndex
- << " continuing at " << LastScope.FailIndex << "\n");
-
if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
MatcherIndex = LastScope.FailIndex;
+ DEBUG(errs() << " Continuing at " << MatcherIndex << "\n");
+
InputChain = LastScope.InputChain;
InputFlag = LastScope.InputFlag;
if (!LastScope.HasChainNodesMatched)
@@ -2737,7 +2547,7 @@ void SelectionDAGISel::CannotYetSelect(SDNode *N) {
else
Msg << "unknown intrinsic #" << iid;
}
- llvm_report_error(Msg.str());
+ report_fatal_error(Msg.str());
}
char SelectionDAGISel::ID = 0;
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index 3786bd1..8313de5 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -199,7 +199,7 @@ const std::string SelectionDAG::getGraphAttrs(const SDNode *N) const {
#else
errs() << "SelectionDAG::getGraphAttrs is only available in debug builds"
<< " on systems with Graphviz or gv!\n";
- return std::string("");
+ return std::string();
#endif
}
@@ -278,7 +278,7 @@ std::string ScheduleDAGSDNodes::getGraphNodeLabel(const SUnit *SU) const {
FlaggedNodes.push_back(N);
while (!FlaggedNodes.empty()) {
O << DOTGraphTraits<SelectionDAG*>
- ::getSimpleNodeLabel(FlaggedNodes.back(), DAG);
+ ::getSimpleNodeLabel(FlaggedNodes.back(), DAG);
FlaggedNodes.pop_back();
if (!FlaggedNodes.empty())
O << "\n ";
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 48d2a1c..b74f600 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -18,9 +18,9 @@
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetSubtarget.h"
#include "llvm/GlobalVariable.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -174,17 +174,25 @@ static void InitLibcallNames(const char **Names) {
Names[RTLIB::FLOOR_F64] = "floor";
Names[RTLIB::FLOOR_F80] = "floorl";
Names[RTLIB::FLOOR_PPCF128] = "floorl";
+ Names[RTLIB::COPYSIGN_F32] = "copysignf";
+ Names[RTLIB::COPYSIGN_F64] = "copysign";
+ Names[RTLIB::COPYSIGN_F80] = "copysignl";
+ Names[RTLIB::COPYSIGN_PPCF128] = "copysignl";
Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
+ Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
+ Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
- Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfi8";
- Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfi16";
+ Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfqi";
+ Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfhi";
Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
+ Names[RTLIB::FPTOSINT_F64_I8] = "__fixdfqi";
+ Names[RTLIB::FPTOSINT_F64_I16] = "__fixdfhi";
Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
@@ -194,11 +202,13 @@ static void InitLibcallNames(const char **Names) {
Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
- Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfi8";
- Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfi16";
+ Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfqi";
+ Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfhi";
Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
+ Names[RTLIB::FPTOUINT_F64_I8] = "__fixunsdfqi";
+ Names[RTLIB::FPTOUINT_F64_I16] = "__fixunsdfhi";
Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
@@ -252,6 +262,38 @@ static void InitLibcallNames(const char **Names) {
Names[RTLIB::MEMMOVE] = "memmove";
Names[RTLIB::MEMSET] = "memset";
Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
+ Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
+ Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
+ Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4";
+ Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8";
+ Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
+ Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
+ Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
+ Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
+ Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
+ Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
+ Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
+ Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
+ Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
+ Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
+ Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
+ Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
+ Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
+ Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
+ Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
+ Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
+ Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
+ Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
+ Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
+ Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
+ Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
+ Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
+ Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and-xor_4";
+ Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
+ Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
+ Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
+ Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
+ Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
}
/// InitLibcallCallingConvs - Set default libcall CallingConvs.
@@ -269,6 +311,7 @@ RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
if (RetVT == MVT::f64)
return FPEXT_F32_F64;
}
+
return UNKNOWN_LIBCALL;
}
@@ -288,6 +331,7 @@ RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
if (OpVT == MVT::ppcf128)
return FPROUND_PPCF128_F64;
}
+
return UNKNOWN_LIBCALL;
}
@@ -306,6 +350,10 @@ RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
if (RetVT == MVT::i128)
return FPTOSINT_F32_I128;
} else if (OpVT == MVT::f64) {
+ if (RetVT == MVT::i8)
+ return FPTOSINT_F64_I8;
+ if (RetVT == MVT::i16)
+ return FPTOSINT_F64_I16;
if (RetVT == MVT::i32)
return FPTOSINT_F64_I32;
if (RetVT == MVT::i64)
@@ -345,6 +393,10 @@ RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
if (RetVT == MVT::i128)
return FPTOUINT_F32_I128;
} else if (OpVT == MVT::f64) {
+ if (RetVT == MVT::i8)
+ return FPTOUINT_F64_I8;
+ if (RetVT == MVT::i16)
+ return FPTOUINT_F64_I16;
if (RetVT == MVT::i32)
return FPTOUINT_F64_I32;
if (RetVT == MVT::i64)
@@ -460,14 +512,14 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
}
/// NOTE: The constructor takes ownership of TLOF.
-TargetLowering::TargetLowering(TargetMachine &tm,TargetLoweringObjectFile *tlof)
+TargetLowering::TargetLowering(const TargetMachine &tm,
+ const TargetLoweringObjectFile *tlof)
: TM(tm), TD(TM.getTargetData()), TLOF(*tlof) {
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
memset(LoadExtActions, 0, sizeof(LoadExtActions));
memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
- memset(ConvertActions, 0, sizeof(ConvertActions));
memset(CondCodeActions, 0, sizeof(CondCodeActions));
// Set default actions for various operations.
@@ -524,12 +576,12 @@ TargetLowering::TargetLowering(TargetMachine &tm,TargetLoweringObjectFile *tlof)
ExceptionPointerRegister = 0;
ExceptionSelectorRegister = 0;
BooleanContents = UndefinedBooleanContent;
- SchedPreferenceInfo = SchedulingForLatency;
+ SchedPreferenceInfo = Sched::Latency;
JumpBufSize = 0;
JumpBufAlignment = 0;
- IfCvtBlockSizeLimit = 2;
- IfCvtDupBlockSizeLimit = 0;
PrefLoopAlignment = 0;
+ MinStackArgumentAlignment = 1;
+ ShouldFoldAtomicFences = false;
InitLibcallNames(LibcallRoutineNames);
InitCmpLibcallCCs(CmpLibcallCCs);
@@ -559,9 +611,9 @@ bool TargetLowering::canOpTrap(unsigned Op, EVT VT) const {
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
- unsigned &NumIntermediates,
- EVT &RegisterVT,
- TargetLowering* TLI) {
+ unsigned &NumIntermediates,
+ EVT &RegisterVT,
+ TargetLowering *TLI) {
// Figure out the right, legal destination reg to copy into.
unsigned NumElts = VT.getVectorNumElements();
MVT EltTy = VT.getVectorElementType();
@@ -591,18 +643,61 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
EVT DestVT = TLI->getRegisterType(NewVT);
RegisterVT = DestVT;
- if (EVT(DestVT).bitsLT(NewVT)) {
- // Value is expanded, e.g. i64 -> i16.
+ if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
- } else {
- // Otherwise, promotion or legal types use the same number of registers as
- // the vector decimated to the appropriate level.
- return NumVectorRegs;
- }
- return 1;
+ // Otherwise, promotion or legal types use the same number of registers as
+ // the vector decimated to the appropriate level.
+ return NumVectorRegs;
+}
+
+/// isLegalRC - Return true if the value types that can be represented by the
+/// specified register class are all legal.
+bool TargetLowering::isLegalRC(const TargetRegisterClass *RC) const {
+ for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
+ I != E; ++I) {
+ if (isTypeLegal(*I))
+ return true;
+ }
+ return false;
+}
+
+/// hasLegalSuperRegRegClasses - Return true if the specified register class
+/// has one or more super-reg register classes that are legal.
+bool
+TargetLowering::hasLegalSuperRegRegClasses(const TargetRegisterClass *RC) const{
+ if (*RC->superregclasses_begin() == 0)
+ return false;
+ for (TargetRegisterInfo::regclass_iterator I = RC->superregclasses_begin(),
+ E = RC->superregclasses_end(); I != E; ++I) {
+ const TargetRegisterClass *RRC = *I;
+ if (isLegalRC(RRC))
+ return true;
+ }
+ return false;
+}
+
+/// findRepresentativeClass - Return the largest legal super-reg register class
+/// of the register class for the specified type and its associated "cost".
+std::pair<const TargetRegisterClass*, uint8_t>
+TargetLowering::findRepresentativeClass(EVT VT) const {
+ const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
+ if (!RC)
+ return std::make_pair(RC, 0);
+ const TargetRegisterClass *BestRC = RC;
+ for (TargetRegisterInfo::regclass_iterator I = RC->superregclasses_begin(),
+ E = RC->superregclasses_end(); I != E; ++I) {
+ const TargetRegisterClass *RRC = *I;
+ if (RRC->isASubClass() || !isLegalRC(RRC))
+ continue;
+ if (!hasLegalSuperRegRegClasses(RRC))
+ return std::make_pair(RRC, 1);
+ BestRC = RRC;
+ }
+ return std::make_pair(BestRC, 1);
}
+
/// computeRegisterProperties - Once all of the register classes are added,
/// this allows us to compute derived properties we expose.
void TargetLowering::computeRegisterProperties() {
@@ -686,41 +781,60 @@ void TargetLowering::computeRegisterProperties() {
for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
MVT VT = (MVT::SimpleValueType)i;
- if (!isTypeLegal(VT)) {
- MVT IntermediateVT;
- EVT RegisterVT;
- unsigned NumIntermediates;
- NumRegistersForVT[i] =
- getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
- RegisterVT, this);
- RegisterTypeForVT[i] = RegisterVT;
-
- // Determine if there is a legal wider type.
+ if (isTypeLegal(VT)) continue;
+
+ // Determine if there is a legal wider type. If so, we should promote to
+ // that wider vector type.
+ EVT EltVT = VT.getVectorElementType();
+ unsigned NElts = VT.getVectorNumElements();
+ if (NElts != 1) {
bool IsLegalWiderType = false;
- EVT EltVT = VT.getVectorElementType();
- unsigned NElts = VT.getVectorNumElements();
for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
EVT SVT = (MVT::SimpleValueType)nVT;
- if (isTypeLegal(SVT) && SVT.getVectorElementType() == EltVT &&
- SVT.getVectorNumElements() > NElts && NElts != 1) {
+ if (SVT.getVectorElementType() == EltVT &&
+ SVT.getVectorNumElements() > NElts &&
+ isTypeSynthesizable(SVT)) {
TransformToType[i] = SVT;
+ RegisterTypeForVT[i] = SVT;
+ NumRegistersForVT[i] = 1;
ValueTypeActions.setTypeAction(VT, Promote);
IsLegalWiderType = true;
break;
}
}
- if (!IsLegalWiderType) {
- EVT NVT = VT.getPow2VectorType();
- if (NVT == VT) {
- // Type is already a power of 2. The default action is to split.
- TransformToType[i] = MVT::Other;
- ValueTypeActions.setTypeAction(VT, Expand);
- } else {
- TransformToType[i] = NVT;
- ValueTypeActions.setTypeAction(VT, Promote);
- }
- }
+ if (IsLegalWiderType) continue;
}
+
+ MVT IntermediateVT;
+ EVT RegisterVT;
+ unsigned NumIntermediates;
+ NumRegistersForVT[i] =
+ getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
+ RegisterVT, this);
+ RegisterTypeForVT[i] = RegisterVT;
+
+ EVT NVT = VT.getPow2VectorType();
+ if (NVT == VT) {
+ // Type is already a power of 2. The default action is to split.
+ TransformToType[i] = MVT::Other;
+ ValueTypeActions.setTypeAction(VT, Expand);
+ } else {
+ TransformToType[i] = NVT;
+ ValueTypeActions.setTypeAction(VT, Promote);
+ }
+ }
+
+ // Determine the 'representative' register class for each value type.
+ // An representative register class is the largest (meaning one which is
+ // not a sub-register class / subreg register class) legal register class for
+ // a group of value types. For example, on i386, i8, i16, and i32
+ // representative would be GR32; while on x86_64 it's GR64.
+ for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
+ const TargetRegisterClass* RRC;
+ uint8_t Cost;
+ tie(RRC, Cost) = findRepresentativeClass((MVT::SimpleValueType)i);
+ RepRegClassForVT[i] = RRC;
+ RepRegClassCostForVT[i] = Cost;
}
}
@@ -750,8 +864,21 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
EVT &IntermediateVT,
unsigned &NumIntermediates,
EVT &RegisterVT) const {
- // Figure out the right, legal destination reg to copy into.
unsigned NumElts = VT.getVectorNumElements();
+
+ // If there is a wider vector type with the same element type as this one,
+ // we should widen to that legal vector type. This handles things like
+ // <2 x float> -> <4 x float>.
+ if (NumElts != 1 && getTypeAction(VT) == Promote) {
+ RegisterVT = getTypeToTransformTo(Context, VT);
+ if (isTypeLegal(RegisterVT)) {
+ IntermediateVT = RegisterVT;
+ NumIntermediates = 1;
+ return 1;
+ }
+ }
+
+ // Figure out the right, legal destination reg to copy into.
EVT EltTy = VT.getVectorElementType();
unsigned NumVectorRegs = 1;
@@ -780,30 +907,71 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
EVT DestVT = getRegisterType(Context, NewVT);
RegisterVT = DestVT;
- if (DestVT.bitsLT(NewVT)) {
- // Value is expanded, e.g. i64 -> i16.
+ if (DestVT.bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
- } else {
- // Otherwise, promotion or legal types use the same number of registers as
- // the vector decimated to the appropriate level.
- return NumVectorRegs;
- }
- return 1;
+ // Otherwise, promotion or legal types use the same number of registers as
+ // the vector decimated to the appropriate level.
+ return NumVectorRegs;
}
-/// getWidenVectorType: given a vector type, returns the type to widen to
-/// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
-/// If there is no vector type that we want to widen to, returns MVT::Other
-/// When and where to widen is target dependent based on the cost of
-/// scalarizing vs using the wider vector type.
-EVT TargetLowering::getWidenVectorType(EVT VT) const {
- assert(VT.isVector());
- if (isTypeLegal(VT))
- return VT;
-
- // Default is not to widen until moved to LegalizeTypes
- return MVT::Other;
+/// Get the EVTs and ArgFlags collections that represent the legalized return
+/// type of the given function. This does not require a DAG or a return value,
+/// and is suitable for use before any DAGs for the function are constructed.
+/// TODO: Move this out of TargetLowering.cpp.
+void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
+ SmallVectorImpl<ISD::OutputArg> &Outs,
+ const TargetLowering &TLI,
+ SmallVectorImpl<uint64_t> *Offsets) {
+ SmallVector<EVT, 4> ValueVTs;
+ ComputeValueVTs(TLI, ReturnType, ValueVTs);
+ unsigned NumValues = ValueVTs.size();
+ if (NumValues == 0) return;
+ unsigned Offset = 0;
+
+ for (unsigned j = 0, f = NumValues; j != f; ++j) {
+ EVT VT = ValueVTs[j];
+ ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
+
+ if (attr & Attribute::SExt)
+ ExtendKind = ISD::SIGN_EXTEND;
+ else if (attr & Attribute::ZExt)
+ ExtendKind = ISD::ZERO_EXTEND;
+
+ // FIXME: C calling convention requires the return type to be promoted to
+ // at least 32-bit. But this is not necessary for non-C calling
+ // conventions. The frontend should mark functions whose return values
+ // require promoting with signext or zeroext attributes.
+ if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
+ EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
+ if (VT.bitsLT(MinVT))
+ VT = MinVT;
+ }
+
+ unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
+ EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
+ unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
+ PartVT.getTypeForEVT(ReturnType->getContext()));
+
+ // 'inreg' on function refers to return value
+ ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
+ if (attr & Attribute::InReg)
+ Flags.setInReg();
+
+ // Propagate extension type if any
+ if (attr & Attribute::SExt)
+ Flags.setSExt();
+ else if (attr & Attribute::ZExt)
+ Flags.setZExt();
+
+ for (unsigned i = 0; i < NumParts; ++i) {
+ Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true));
+ if (Offsets) {
+ Offsets->push_back(Offset);
+ Offset += PartSize;
+ }
+ }
+ }
}
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
@@ -1037,7 +1205,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
return true;
// If the operation can be done in a smaller type, do so.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// Output known-1 bits are only known if set in both the LHS & RHS.
@@ -1071,7 +1239,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
// If the operation can be done in a smaller type, do so.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// Output known-0 bits are only known if clear in both the LHS & RHS.
@@ -1096,7 +1264,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if ((KnownZero2 & NewMask) == NewMask)
return TLO.CombineTo(Op, Op.getOperand(1));
// If the operation can be done in a smaller type, do so.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// If all of the unknown bits are known to be zero on one side or the other
@@ -1215,9 +1383,32 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
}
}
- if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt),
+ if (SimplifyDemandedBits(InOp, NewMask.lshr(ShAmt),
KnownZero, KnownOne, TLO, Depth+1))
return true;
+
+ // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits
+ // are not demanded. This will likely allow the anyext to be folded away.
+ if (InOp.getNode()->getOpcode() == ISD::ANY_EXTEND) {
+ SDValue InnerOp = InOp.getNode()->getOperand(0);
+ EVT InnerVT = InnerOp.getValueType();
+ if ((APInt::getHighBitsSet(BitWidth,
+ BitWidth - InnerVT.getSizeInBits()) &
+ DemandedMask) == 0 &&
+ isTypeDesirableForOp(ISD::SHL, InnerVT)) {
+ EVT ShTy = getShiftAmountTy();
+ if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
+ ShTy = InnerVT;
+ SDValue NarrowShl =
+ TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
+ TLO.DAG.getConstant(ShAmt, ShTy));
+ return
+ TLO.CombineTo(Op,
+ TLO.DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(),
+ NarrowShl));
+ }
+ }
+
KnownZero <<= SA->getZExtValue();
KnownOne <<= SA->getZExtValue();
// low bits known zero.
@@ -1274,8 +1465,9 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// variable. The low bit of the shift cannot be an input sign bit unless
// the shift amount is >= the size of the datatype, which is undefined.
if (DemandedMask == 1)
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
- Op.getOperand(0), Op.getOperand(1)));
+ return TLO.CombineTo(Op,
+ TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(),
+ Op.getOperand(0), Op.getOperand(1)));
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
EVT VT = Op.getValueType();
@@ -1321,11 +1513,10 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// present in the input.
APInt NewBits =
APInt::getHighBitsSet(BitWidth,
- BitWidth - EVT.getScalarType().getSizeInBits()) &
- NewMask;
+ BitWidth - EVT.getScalarType().getSizeInBits());
// If none of the extended bits are demanded, eliminate the sextinreg.
- if (NewBits == 0)
+ if ((NewBits & NewMask) == 0)
return TLO.CombineTo(Op, Op.getOperand(0));
APInt InSignBit = APInt::getSignBit(EVT.getScalarType().getSizeInBits());
@@ -1460,23 +1651,29 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
case ISD::SRL:
// Shrink SRL by a constant if none of the high bits shifted in are
// demanded.
- if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){
- APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
- OperandBitWidth - BitWidth);
- HighBits = HighBits.lshr(ShAmt->getZExtValue());
- HighBits.trunc(BitWidth);
-
- if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
- // None of the shifted in bits are needed. Add a truncate of the
- // shift input, then shift it.
- SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
- Op.getValueType(),
- In.getOperand(0));
- return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
- Op.getValueType(),
- NewTrunc,
- In.getOperand(1)));
- }
+ if (TLO.LegalTypes() &&
+ !isTypeDesirableForOp(ISD::SRL, Op.getValueType()))
+ // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is
+ // undesirable.
+ break;
+ ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1));
+ if (!ShAmt)
+ break;
+ APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
+ OperandBitWidth - BitWidth);
+ HighBits = HighBits.lshr(ShAmt->getZExtValue());
+ HighBits.trunc(BitWidth);
+
+ if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
+ // None of the shifted in bits are needed. Add a truncate of the
+ // shift input, then shift it.
+ SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl,
+ Op.getValueType(),
+ In.getOperand(0));
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl,
+ Op.getValueType(),
+ NewTrunc,
+ In.getOperand(1)));
}
break;
}
@@ -1486,13 +1683,17 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
break;
}
case ISD::AssertZext: {
- EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- APInt InMask = APInt::getLowBitsSet(BitWidth,
- VT.getSizeInBits());
- if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask,
+ // Demand all the bits of the input that are demanded in the output.
+ // The low bits are obvious; the high bits are demanded because we're
+ // asserting that they're zero here.
+ if (SimplifyDemandedBits(Op.getOperand(0), NewMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+
+ EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ APInt InMask = APInt::getLowBitsSet(BitWidth,
+ VT.getSizeInBits());
KnownZero |= ~InMask & NewMask;
break;
}
@@ -1532,7 +1733,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
KnownOne2, TLO, Depth+1))
return true;
// See if the operation should be performed at a smaller bit width.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
}
// FALL THROUGH
@@ -1782,12 +1983,9 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
EVT ExtDstTy = N0.getValueType();
unsigned ExtDstTyBits = ExtDstTy.getSizeInBits();
- // If the extended part has any inconsistent bits, it cannot ever
- // compare equal. In other words, they have to be all ones or all
- // zeros.
- APInt ExtBits =
- APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits);
- if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits)
+ // If the constant doesn't fit into the number of bits for the source of
+ // the sign extension, it is impossible for both sides to be equal.
+ if (C1.getMinSignedBits() > ExtSrcTyBits)
return DAG.getConstant(Cond == ISD::SETNE, VT);
SDValue ZextOp;
@@ -1869,10 +2067,15 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
isa<ConstantSDNode>(Op0.getOperand(1)) &&
cast<ConstantSDNode>(Op0.getOperand(1))->getAPIntValue() == 1) {
// If this is (X&1) == / != 1, normalize it to (X&1) != / == 0.
- if (Op0.getValueType() != VT)
+ if (Op0.getValueType().bitsGT(VT))
Op0 = DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)),
DAG.getConstant(1, VT));
+ else if (Op0.getValueType().bitsLT(VT))
+ Op0 = DAG.getNode(ISD::AND, dl, VT,
+ DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)),
+ DAG.getConstant(1, VT));
+
return DAG.getSetCC(dl, VT, Op0,
DAG.getConstant(0, Op0.getValueType()),
Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ);
@@ -2240,7 +2443,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
-bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA,
+bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue* &GA,
int64_t &Offset) const {
if (isa<GlobalAddressSDNode>(N)) {
GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N);
@@ -2329,7 +2532,6 @@ const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
/// vector. If it is invalid, don't add anything to Ops.
void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
switch (ConstraintLetter) {
@@ -2367,7 +2569,8 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
if (ConstraintLetter != 'n') {
int64_t Offs = GA->getOffset();
if (C) Offs += C->getZExtValue();
- Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
+ Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
+ C ? C->getDebugLoc() : DebugLoc(),
Op.getValueType(), Offs));
return;
}
@@ -2399,7 +2602,7 @@ std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const {
if (Constraint[0] != '{')
- return std::make_pair(0, static_cast<TargetRegisterClass*>(0));
+ return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?");
// Remove the braces from around the name.
@@ -2431,7 +2634,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint,
}
}
- return std::make_pair(0, static_cast<TargetRegisterClass*>(0));
+ return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
}
//===----------------------------------------------------------------------===//
@@ -2490,18 +2693,18 @@ static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
/// 'm' over 'r', for example.
///
static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
- bool hasMemory, const TargetLowering &TLI,
+ const TargetLowering &TLI,
SDValue Op, SelectionDAG *DAG) {
assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
unsigned BestIdx = 0;
TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
int BestGenerality = -1;
-
+
// Loop over the options, keeping track of the most general one.
for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
TargetLowering::ConstraintType CType =
TLI.getConstraintType(OpInfo.Codes[i]);
-
+
// If this is an 'other' constraint, see if the operand is valid for it.
// For example, on X86 we might have an 'rI' constraint. If the operand
// is an integer in the range [0..31] we want to use I (saving a load
@@ -2510,7 +2713,7 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
assert(OpInfo.Codes[i].size() == 1 &&
"Unhandled multi-letter 'other' constraint");
std::vector<SDValue> ResultOps;
- TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], hasMemory,
+ TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0],
ResultOps, *DAG);
if (!ResultOps.empty()) {
BestType = CType;
@@ -2519,6 +2722,11 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
}
}
+ // Things with matching constraints can only be registers, per gcc
+ // documentation. This mainly affects "g" constraints.
+ if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
+ continue;
+
// This constraint letter is more general than the previous one, use it.
int Generality = getConstraintGenerality(CType);
if (Generality > BestGenerality) {
@@ -2537,7 +2745,6 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
/// OpInfo.ConstraintCode and OpInfo.ConstraintType.
void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
- bool hasMemory,
SelectionDAG *DAG) const {
assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
@@ -2546,7 +2753,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
OpInfo.ConstraintCode = OpInfo.Codes[0];
OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
} else {
- ChooseConstraint(OpInfo, hasMemory, *this, Op, DAG);
+ ChooseConstraint(OpInfo, *this, Op, DAG);
}
// 'X' matches anything.
diff --git a/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
new file mode 100644
index 0000000..a081e3c
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp
@@ -0,0 +1,23 @@
+//===-- TargetSelectionDAGInfo.cpp - SelectionDAG Info --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements the TargetSelectionDAGInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace llvm;
+
+TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM)
+ : TD(TM.getTargetData()) {
+}
+
+TargetSelectionDAGInfo::~TargetSelectionDAGInfo() {
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/ShadowStackGC.cpp b/libclamav/c++/llvm/lib/CodeGen/ShadowStackGC.cpp
index 0e6d479..6ab0cb0 100644
--- a/libclamav/c++/llvm/lib/CodeGen/ShadowStackGC.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/ShadowStackGC.cpp
@@ -31,6 +31,7 @@
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/IRBuilder.h"
using namespace llvm;
@@ -158,9 +159,10 @@ namespace {
// Create a new invoke instruction.
Args.clear();
- Args.append(CI->op_begin() + 1, CI->op_end());
+ CallSite CS(CI);
+ Args.append(CS.arg_begin(), CS.arg_end());
- InvokeInst *II = InvokeInst::Create(CI->getOperand(0),
+ InvokeInst *II = InvokeInst::Create(CI->getCalledValue(),
NewBB, CleanupBB,
Args.begin(), Args.end(),
CI->getName(), CallBB);
@@ -194,7 +196,7 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
unsigned NumMeta = 0;
SmallVector<Constant*,16> Metadata;
for (unsigned I = 0; I != Roots.size(); ++I) {
- Constant *C = cast<Constant>(Roots[I].first->getOperand(2));
+ Constant *C = cast<Constant>(Roots[I].first->getArgOperand(1));
if (!C->isNullValue())
NumMeta = I + 1;
Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
@@ -322,16 +324,16 @@ void ShadowStackGC::CollectRoots(Function &F) {
assert(Roots.empty() && "Not cleaned up?");
- SmallVector<std::pair<CallInst*,AllocaInst*>,16> MetaRoots;
+ SmallVector<std::pair<CallInst*, AllocaInst*>, 16> MetaRoots;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
if (Function *F = CI->getCalledFunction())
if (F->getIntrinsicID() == Intrinsic::gcroot) {
- std::pair<CallInst*,AllocaInst*> Pair = std::make_pair(
- CI, cast<AllocaInst>(CI->getOperand(1)->stripPointerCasts()));
- if (IsNullValue(CI->getOperand(2)))
+ std::pair<CallInst*, AllocaInst*> Pair = std::make_pair(
+ CI, cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
+ if (IsNullValue(CI->getArgOperand(1)))
Roots.push_back(Pair);
else
MetaRoots.push_back(Pair);
diff --git a/libclamav/c++/llvm/lib/CodeGen/SimpleHazardRecognizer.h b/libclamav/c++/llvm/lib/CodeGen/SimpleHazardRecognizer.h
deleted file mode 100644
index f69feaf..0000000
--- a/libclamav/c++/llvm/lib/CodeGen/SimpleHazardRecognizer.h
+++ /dev/null
@@ -1,89 +0,0 @@
-//=- llvm/CodeGen/SimpleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the SimpleHazardRecognizer class, which
-// implements hazard-avoidance heuristics for scheduling, based on the
-// scheduling itineraries specified for the target.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_SIMPLEHAZARDRECOGNIZER_H
-#define LLVM_CODEGEN_SIMPLEHAZARDRECOGNIZER_H
-
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-namespace llvm {
- /// SimpleHazardRecognizer - A *very* simple hazard recognizer. It uses
- /// a coarse classification and attempts to avoid that instructions of
- /// a given class aren't grouped too densely together.
- class SimpleHazardRecognizer : public ScheduleHazardRecognizer {
- /// Class - A simple classification for SUnits.
- enum Class {
- Other, Load, Store
- };
-
- /// Window - The Class values of the most recently issued
- /// instructions.
- Class Window[8];
-
- /// getClass - Classify the given SUnit.
- Class getClass(const SUnit *SU) {
- const MachineInstr *MI = SU->getInstr();
- const TargetInstrDesc &TID = MI->getDesc();
- if (TID.mayLoad())
- return Load;
- if (TID.mayStore())
- return Store;
- return Other;
- }
-
- /// Step - Rotate the existing entries in Window and insert the
- /// given class value in position as the most recent.
- void Step(Class C) {
- std::copy(Window+1, array_endof(Window), Window);
- Window[array_lengthof(Window)-1] = C;
- }
-
- public:
- SimpleHazardRecognizer() : Window() {
- Reset();
- }
-
- virtual HazardType getHazardType(SUnit *SU) {
- Class C = getClass(SU);
- if (C == Other)
- return NoHazard;
- unsigned Score = 0;
- for (unsigned i = 0; i != array_lengthof(Window); ++i)
- if (Window[i] == C)
- Score += i + 1;
- if (Score > array_lengthof(Window) * 2)
- return Hazard;
- return NoHazard;
- }
-
- virtual void Reset() {
- for (unsigned i = 0; i != array_lengthof(Window); ++i)
- Window[i] = Other;
- }
-
- virtual void EmitInstruction(SUnit *SU) {
- Step(getClass(SU));
- }
-
- virtual void AdvanceCycle() {
- Step(Other);
- }
- };
-}
-
-#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp b/libclamav/c++/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
index ce72b2f..b29ea19 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
@@ -31,6 +31,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
@@ -59,17 +60,15 @@ DisableCrossClassJoin("disable-cross-class-join",
cl::init(false), cl::Hidden);
static cl::opt<bool>
-PhysJoinTweak("tweak-phys-join-heuristics",
- cl::desc("Tweak heuristics for joining phys reg with vr"),
+DisablePhysicalJoin("disable-physical-join",
+ cl::desc("Avoid coalescing physical register copies"),
cl::init(false), cl::Hidden);
-static RegisterPass<SimpleRegisterCoalescing>
-X("simple-register-coalescing", "Simple Register Coalescing");
+INITIALIZE_AG_PASS(SimpleRegisterCoalescing, RegisterCoalescer,
+ "simple-register-coalescing", "Simple Register Coalescing",
+ false, false, true);
-// Declare that we implement the RegisterCoalescer interface
-static RegisterAnalysisGroup<RegisterCoalescer, true/*The Default*/> V(X);
-
-const PassInfo *const llvm::SimpleRegisterCoalescingID = &X;
+char &llvm::SimpleRegisterCoalescingID = SimpleRegisterCoalescing::ID;
void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -103,15 +102,23 @@ void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
///
/// This returns true if an interval was modified.
///
-bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
- LiveInterval &IntB,
+bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP,
MachineInstr *CopyMI) {
+ // Bail if there is no dst interval - can happen when merging physical subreg
+ // operations.
+ if (!li_->hasInterval(CP.getDstReg()))
+ return false;
+
+ LiveInterval &IntA =
+ li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
+ LiveInterval &IntB =
+ li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
// the example above.
LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
- assert(BLR != IntB.end() && "Live range not found!");
+ if (BLR == IntB.end()) return false;
VNInfo *BValNo = BLR->valno;
// Get the location that B is defined at. Two options: either this value has
@@ -123,7 +130,8 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// AValNo is the value number in A that defines the copy, A3 in the example.
SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
- assert(ALR != IntA.end() && "Live range not found!");
+ // The live range might not exist after fun with physreg coalescing.
+ if (ALR == IntA.end()) return false;
VNInfo *AValNo = ALR->valno;
// If it's re-defined by an early clobber somewhere in the live range, then
// it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
@@ -149,26 +157,21 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// If AValNo is defined as a copy from IntB, we can potentially process this.
// Get the instruction that defines this value number.
- unsigned SrcReg = li_->getVNInfoSourceReg(AValNo);
- if (!SrcReg) return false; // Not defined by a copy.
-
- // If the value number is not defined by a copy instruction, ignore it.
-
- // If the source register comes from an interval other than IntB, we can't
- // handle this.
- if (SrcReg != IntB.reg) return false;
+ if (!CP.isCoalescable(AValNo->getCopy()))
+ return false;
// Get the LiveRange in IntB that this value number starts with.
LiveInterval::iterator ValLR =
IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
- assert(ValLR != IntB.end() && "Live range not found!");
+ if (ValLR == IntB.end())
+ return false;
// Make sure that the end of the live range is inside the same block as
// CopyMI.
MachineInstr *ValLREndInst =
li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
- if (!ValLREndInst ||
- ValLREndInst->getParent() != CopyMI->getParent()) return false;
+ if (!ValLREndInst || ValLREndInst->getParent() != CopyMI->getParent())
+ return false;
// Okay, we now know that ValLR ends in the same block that the CopyMI
// live-range starts. If there are no intervening live ranges between them in
@@ -183,7 +186,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
for (const unsigned* SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR)
if (li_->hasInterval(*SR) && IntA.overlaps(li_->getInterval(*SR))) {
DEBUG({
- dbgs() << "Interfere with sub-register ";
+ dbgs() << "\t\tInterfere with sub-register ";
li_->getInterval(*SR).print(dbgs(), tri_);
});
return false;
@@ -191,7 +194,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
}
DEBUG({
- dbgs() << "\nExtending: ";
+ dbgs() << "Extending: ";
IntB.print(dbgs(), tri_);
});
@@ -211,6 +214,8 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// physreg has sub-registers, update their live intervals as well.
if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
+ if (!li_->hasInterval(*SR))
+ continue;
LiveInterval &SRLI = li_->getInterval(*SR);
SRLI.addRange(LiveRange(FillerStart, FillerEnd,
SRLI.getNextValue(FillerStart, 0, true,
@@ -220,7 +225,6 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// Okay, merge "B1" into the same value number as "B0".
if (BValNo != ValLR->valno) {
- IntB.addKills(ValLR->valno, BValNo->kills);
IntB.MergeValueNumberInto(BValNo, ValLR->valno);
}
DEBUG({
@@ -234,13 +238,12 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
if (UIdx != -1) {
ValLREndInst->getOperand(UIdx).setIsKill(false);
- ValLR->valno->removeKill(FillerStart);
}
// If the copy instruction was killing the destination register before the
// merge, find the last use and trim the live range. That will also add the
// isKill marker.
- if (CopyMI->killsRegister(IntA.reg))
+ if (ALR->end == CopyIdx)
TrimLiveIntervalToLastUse(CopyUseIdx, CopyMI->getParent(), IntA, ALR);
++numExtends;
@@ -263,6 +266,9 @@ bool SimpleRegisterCoalescing::HasOtherReachingDefs(LiveInterval &IntA,
for (; BI != IntB.ranges.end() && AI->end >= BI->start; ++BI) {
if (BI->valno == BValNo)
continue;
+ // When BValNo is null, we're looking for a dummy clobber-value for a subreg.
+ if (!BValNo && !BI->valno->isDefAccurate() && !BI->valno->getCopy())
+ continue;
if (BI->start <= AI->start && BI->end > AI->start)
return true;
if (BI->start > AI->start && BI->start < AI->end)
@@ -305,23 +311,31 @@ TransferImplicitOps(MachineInstr *MI, MachineInstr *NewMI) {
///
/// This returns true if an interval was modified.
///
-bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
- LiveInterval &IntB,
+bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
MachineInstr *CopyMI) {
- SlotIndex CopyIdx =
- li_->getInstructionIndex(CopyMI).getDefIndex();
-
// FIXME: For now, only eliminate the copy by commuting its def when the
// source register is a virtual register. We want to guard against cases
// where the copy is a back edge copy and commuting the def lengthen the
// live interval of the source register to the entire loop.
- if (TargetRegisterInfo::isPhysicalRegister(IntA.reg))
+ if (CP.isPhys() && CP.isFlipped())
return false;
+ // Bail if there is no dst interval.
+ if (!li_->hasInterval(CP.getDstReg()))
+ return false;
+
+ SlotIndex CopyIdx =
+ li_->getInstructionIndex(CopyMI).getDefIndex();
+
+ LiveInterval &IntA =
+ li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
+ LiveInterval &IntB =
+ li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
+
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
// the example above.
LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
- assert(BLR != IntB.end() && "Live range not found!");
+ if (BLR == IntB.end()) return false;
VNInfo *BValNo = BLR->valno;
// Get the location that B is defined at. Two options: either this value has
@@ -343,6 +357,8 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
AValNo->isUnused() || AValNo->hasPHIKill())
return false;
MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
+ if (!DefMI)
+ return false;
const TargetInstrDesc &TID = DefMI->getDesc();
if (!TID.isCommutable())
return false;
@@ -373,6 +389,14 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
if (HasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
return false;
+ // Abort if the aliases of IntB.reg have values that are not simply the
+ // clobbers from the superreg.
+ if (TargetRegisterInfo::isPhysicalRegister(IntB.reg))
+ for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
+ if (li_->hasInterval(*AS) &&
+ HasOtherReachingDefs(IntA, li_->getInterval(*AS), AValNo, 0))
+ return false;
+
// If some of the uses of IntA.reg is already coalesced away, return false.
// It's not possible to determine whether it's safe to perform the coalescing.
for (MachineRegisterInfo::use_nodbg_iterator UI =
@@ -387,6 +411,8 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
return false;
}
+ DEBUG(dbgs() << "\tRemoveCopyByCommutingDef: " << *DefMI);
+
// At this point we have decided that it is legal to do this
// transformation. Start by commuting the instruction.
MachineBasicBlock *MBB = DefMI->getParent();
@@ -403,7 +429,6 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
bool BHasPHIKill = BValNo->hasPHIKill();
SmallVector<VNInfo*, 4> BDeadValNos;
- VNInfo::KillSet BKills;
std::map<SlotIndex, SlotIndex> BExtend;
// If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
@@ -414,16 +439,11 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
// C = A<kill>
// ...
// = B
- //
- // then do not add kills of A to the newly created B interval.
bool Extended = BLR->end > ALR->end && ALR->end != ALR->start;
if (Extended)
BExtend[ALR->end] = BLR->end;
// Update uses of IntA of the specific Val# with IntB.
- bool BHasSubRegs = false;
- if (TargetRegisterInfo::isPhysicalRegister(IntB.reg))
- BHasSubRegs = *tri_->getSubRegisters(IntB.reg);
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
UE = mri_->use_end(); UI != UE;) {
MachineOperand &UseMO = UI.getOperand();
@@ -441,66 +461,63 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
if (ULR == IntA.end() || ULR->valno != AValNo)
continue;
- UseMO.setReg(NewReg);
+ if (TargetRegisterInfo::isPhysicalRegister(NewReg))
+ UseMO.substPhysReg(NewReg, *tri_);
+ else
+ UseMO.setReg(NewReg);
if (UseMI == CopyMI)
continue;
if (UseMO.isKill()) {
if (Extended)
UseMO.setIsKill(false);
- else
- BKills.push_back(UseIdx.getDefIndex());
}
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (!tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
+ if (!UseMI->isCopy())
continue;
- if (DstReg == IntB.reg) {
- // This copy will become a noop. If it's defining a new val#,
- // remove that val# as well. However this live range is being
- // extended to the end of the existing live range defined by the copy.
- SlotIndex DefIdx = UseIdx.getDefIndex();
- const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
- BHasPHIKill |= DLR->valno->hasPHIKill();
- assert(DLR->valno->def == DefIdx);
- BDeadValNos.push_back(DLR->valno);
- BExtend[DLR->start] = DLR->end;
- JoinedCopies.insert(UseMI);
- // If this is a kill but it's going to be removed, the last use
- // of the same val# is the new kill.
- if (UseMO.isKill())
- BKills.pop_back();
- }
+ if (UseMI->getOperand(0).getReg() != IntB.reg ||
+ UseMI->getOperand(0).getSubReg())
+ continue;
+
+ // This copy will become a noop. If it's defining a new val#,
+ // remove that val# as well. However this live range is being
+ // extended to the end of the existing live range defined by the copy.
+ SlotIndex DefIdx = UseIdx.getDefIndex();
+ const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
+ if (!DLR)
+ continue;
+ BHasPHIKill |= DLR->valno->hasPHIKill();
+ assert(DLR->valno->def == DefIdx);
+ BDeadValNos.push_back(DLR->valno);
+ BExtend[DLR->start] = DLR->end;
+ JoinedCopies.insert(UseMI);
}
// We need to insert a new liverange: [ALR.start, LastUse). It may be we can
// simply extend BLR if CopyMI doesn't end the range.
DEBUG({
- dbgs() << "\nExtending: ";
+ dbgs() << "Extending: ";
IntB.print(dbgs(), tri_);
});
// Remove val#'s defined by copies that will be coalesced away.
for (unsigned i = 0, e = BDeadValNos.size(); i != e; ++i) {
VNInfo *DeadVNI = BDeadValNos[i];
- if (BHasSubRegs) {
- for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
- LiveInterval &SRLI = li_->getInterval(*SR);
- const LiveRange *SRLR = SRLI.getLiveRangeContaining(DeadVNI->def);
- SRLI.removeValNo(SRLR->valno);
+ if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
+ for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS) {
+ if (!li_->hasInterval(*AS))
+ continue;
+ LiveInterval &ASLI = li_->getInterval(*AS);
+ if (const LiveRange *ASLR = ASLI.getLiveRangeContaining(DeadVNI->def))
+ ASLI.removeValNo(ASLR->valno);
}
}
IntB.removeValNo(BDeadValNos[i]);
}
// Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
- // is updated. Kills are also updated.
+ // is updated.
VNInfo *ValNo = BValNo;
ValNo->def = AValNo->def;
ValNo->setCopy(0);
- for (unsigned j = 0, ee = ValNo->kills.size(); j != ee; ++j) {
- if (ValNo->kills[j] != BLR->end)
- BKills.push_back(ValNo->kills[j]);
- }
- ValNo->kills.clear();
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
AI != AE; ++AI) {
if (AI->valno != AValNo) continue;
@@ -510,24 +527,12 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
if (EI != BExtend.end())
End = EI->second;
IntB.addRange(LiveRange(AI->start, End, ValNo));
-
- // If the IntB live range is assigned to a physical register, and if that
- // physreg has sub-registers, update their live intervals as well.
- if (BHasSubRegs) {
- for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
- LiveInterval &SRLI = li_->getInterval(*SR);
- SRLI.MergeInClobberRange(*li_, AI->start, End,
- li_->getVNInfoAllocator());
- }
- }
}
- IntB.addKills(ValNo, BKills);
ValNo->setHasPHIKill(BHasPHIKill);
DEBUG({
dbgs() << " result = ";
IntB.print(dbgs(), tri_);
- dbgs() << '\n';
dbgs() << "\nShortening: ";
IntA.print(dbgs(), tri_);
});
@@ -615,13 +620,10 @@ SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(SlotIndex CopyIdx,
// of last use.
LastUse->setIsKill();
removeRange(li, LastUseIdx.getDefIndex(), LR->end, li_, tri_);
- LR->valno->addKill(LastUseIdx.getDefIndex());
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (tii_->isMoveInstr(*LastUseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
- DstReg == li.reg) {
- // Last use is itself an identity code.
- int DeadIdx = LastUseMI->findRegisterDefOperandIdx(li.reg, false, tri_);
- LastUseMI->getOperand(DeadIdx).setIsDead();
+ if (LastUseMI->isCopy()) {
+ MachineOperand &DefMO = LastUseMI->getOperand(0);
+ if (DefMO.getReg() == li.reg && !DefMO.getSubReg())
+ DefMO.setIsDead();
}
return true;
}
@@ -656,6 +658,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
ValNo->isUnused() || ValNo->hasPHIKill())
return false;
MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
+ assert(DefMI && "Defining instruction disappeared");
const TargetInstrDesc &TID = DefMI->getDesc();
if (!TID.isAsCheapAsAMove())
return false;
@@ -694,33 +697,20 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
return false;
}
- SlotIndex DefIdx = CopyIdx.getDefIndex();
- const LiveRange *DLR= li_->getInterval(DstReg).getLiveRangeContaining(DefIdx);
- DLR->valno->setCopy(0);
- // Don't forget to update sub-register intervals.
- if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
- for (const unsigned* SR = tri_->getSubRegisters(DstReg); *SR; ++SR) {
- if (!li_->hasInterval(*SR))
- continue;
- const LiveRange *DLR =
- li_->getInterval(*SR).getLiveRangeContaining(DefIdx);
- if (DLR && DLR->valno->getCopy() == CopyMI)
- DLR->valno->setCopy(0);
- }
- }
+ RemoveCopyFlag(DstReg, CopyMI);
// If copy kills the source register, find the last use and propagate
// kill.
bool checkForDeadDef = false;
MachineBasicBlock *MBB = CopyMI->getParent();
- if (CopyMI->killsRegister(SrcInt.reg))
+ if (SrcLR->end == CopyIdx.getDefIndex())
if (!TrimLiveIntervalToLastUse(CopyIdx, MBB, SrcInt, SrcLR)) {
checkForDeadDef = true;
}
MachineBasicBlock::iterator MII =
llvm::next(MachineBasicBlock::iterator(CopyMI));
- tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, tri_);
+ tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, *tri_);
MachineInstr *NewMI = prior(MII);
if (checkForDeadDef) {
@@ -740,24 +730,8 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
MachineOperand &MO = CopyMI->getOperand(i);
if (MO.isReg() && MO.isImplicit())
NewMI->addOperand(MO);
- if (MO.isDef() && li_->hasInterval(MO.getReg())) {
- unsigned Reg = MO.getReg();
- const LiveRange *DLR =
- li_->getInterval(Reg).getLiveRangeContaining(DefIdx);
- if (DLR && DLR->valno->getCopy() == CopyMI)
- DLR->valno->setCopy(0);
- // Handle subregs as well
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- for (const unsigned* SR = tri_->getSubRegisters(Reg); *SR; ++SR) {
- if (!li_->hasInterval(*SR))
- continue;
- const LiveRange *DLR =
- li_->getInterval(*SR).getLiveRangeContaining(DefIdx);
- if (DLR && DLR->valno->getCopy() == CopyMI)
- DLR->valno->setCopy(0);
- }
- }
- }
+ if (MO.isDef())
+ RemoveCopyFlag(MO.getReg(), CopyMI);
}
TransferImplicitOps(CopyMI, NewMI);
@@ -776,128 +750,69 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
/// being updated is not zero, make sure to set it to the correct physical
/// subregister.
void
-SimpleRegisterCoalescing::UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg,
- unsigned SubIdx) {
- bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
- if (DstIsPhys && SubIdx) {
- // Figure out the real physical register we are updating with.
- DstReg = tri_->getSubReg(DstReg, SubIdx);
- SubIdx = 0;
- }
-
- // Copy the register use-list before traversing it. We may be adding operands
- // and invalidating pointers.
- SmallVector<std::pair<MachineInstr*, unsigned>, 32> reglist;
- for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg),
- E = mri_->reg_end(); I != E; ++I)
- reglist.push_back(std::make_pair(&*I, I.getOperandNo()));
-
- for (unsigned N=0; N != reglist.size(); ++N) {
- MachineInstr *UseMI = reglist[N].first;
- MachineOperand &O = UseMI->getOperand(reglist[N].second);
- unsigned OldSubIdx = O.getSubReg();
+SimpleRegisterCoalescing::UpdateRegDefsUses(const CoalescerPair &CP) {
+ bool DstIsPhys = CP.isPhys();
+ unsigned SrcReg = CP.getSrcReg();
+ unsigned DstReg = CP.getDstReg();
+ unsigned SubIdx = CP.getSubIdx();
+
+ for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg);
+ MachineInstr *UseMI = I.skipInstruction();) {
+ // A PhysReg copy that won't be coalesced can perhaps be rematerialized
+ // instead.
if (DstIsPhys) {
- unsigned UseDstReg = DstReg;
- if (OldSubIdx)
- UseDstReg = tri_->getSubReg(DstReg, OldSubIdx);
-
- unsigned CopySrcReg, CopyDstReg, CopySrcSubIdx, CopyDstSubIdx;
- if (tii_->isMoveInstr(*UseMI, CopySrcReg, CopyDstReg,
- CopySrcSubIdx, CopyDstSubIdx) &&
- CopySrcReg != CopyDstReg &&
- CopySrcReg == SrcReg && CopyDstReg != UseDstReg) {
- // If the use is a copy and it won't be coalesced away, and its source
- // is defined by a trivial computation, try to rematerialize it instead.
- if (ReMaterializeTrivialDef(li_->getInterval(SrcReg), CopyDstReg,
- CopyDstSubIdx, UseMI))
- continue;
- }
+ if (UseMI->isCopy() &&
+ !UseMI->getOperand(1).getSubReg() &&
+ !UseMI->getOperand(0).getSubReg() &&
+ UseMI->getOperand(1).getReg() == SrcReg &&
+ UseMI->getOperand(0).getReg() != SrcReg &&
+ UseMI->getOperand(0).getReg() != DstReg &&
+ !JoinedCopies.count(UseMI) &&
+ ReMaterializeTrivialDef(li_->getInterval(SrcReg),
+ UseMI->getOperand(0).getReg(), 0, UseMI))
+ continue;
+ }
- O.setReg(UseDstReg);
- O.setSubReg(0);
- if (OldSubIdx) {
- // Def and kill of subregister of a virtual register actually defs and
- // kills the whole register. Add imp-defs and imp-kills as needed.
- if (O.isDef()) {
- if(O.isDead())
- UseMI->addRegisterDead(DstReg, tri_, true);
- else
- UseMI->addRegisterDefined(DstReg, tri_);
- } else if (!O.isUndef() &&
- (O.isKill() ||
- UseMI->isRegTiedToDefOperand(&O-&UseMI->getOperand(0))))
- UseMI->addRegisterKilled(DstReg, tri_, true);
- }
- continue;
+ SmallVector<unsigned,8> Ops;
+ bool Reads, Writes;
+ tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
+ bool Kills = false, Deads = false;
+
+ // Replace SrcReg with DstReg in all UseMI operands.
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ MachineOperand &MO = UseMI->getOperand(Ops[i]);
+ Kills |= MO.isKill();
+ Deads |= MO.isDead();
+
+ if (DstIsPhys)
+ MO.substPhysReg(DstReg, *tri_);
+ else
+ MO.substVirtReg(DstReg, SubIdx, *tri_);
}
- // Sub-register indexes goes from small to large. e.g.
- // RAX: 1 -> AL, 2 -> AX, 3 -> EAX
- // EAX: 1 -> AL, 2 -> AX
- // So RAX's sub-register 2 is AX, RAX's sub-regsiter 3 is EAX, whose
- // sub-register 2 is also AX.
- if (SubIdx && OldSubIdx && SubIdx != OldSubIdx)
- assert(OldSubIdx < SubIdx && "Conflicting sub-register index!");
- else if (SubIdx)
- O.setSubReg(SubIdx);
- // Remove would-be duplicated kill marker.
- if (O.isKill() && UseMI->killsRegister(DstReg))
- O.setIsKill(false);
- O.setReg(DstReg);
-
- // After updating the operand, check if the machine instruction has
- // become a copy. If so, update its val# information.
+ // This instruction is a copy that will be removed.
if (JoinedCopies.count(UseMI))
continue;
- const TargetInstrDesc &TID = UseMI->getDesc();
- unsigned CopySrcReg, CopyDstReg, CopySrcSubIdx, CopyDstSubIdx;
- if (TID.getNumDefs() == 1 && TID.getNumOperands() > 2 &&
- tii_->isMoveInstr(*UseMI, CopySrcReg, CopyDstReg,
- CopySrcSubIdx, CopyDstSubIdx) &&
- CopySrcReg != CopyDstReg &&
- (TargetRegisterInfo::isVirtualRegister(CopyDstReg) ||
- allocatableRegs_[CopyDstReg])) {
- LiveInterval &LI = li_->getInterval(CopyDstReg);
- SlotIndex DefIdx =
- li_->getInstructionIndex(UseMI).getDefIndex();
- if (const LiveRange *DLR = LI.getLiveRangeContaining(DefIdx)) {
- if (DLR->valno->def == DefIdx)
- DLR->valno->setCopy(UseMI);
- }
+ if (SubIdx) {
+ // If UseMI was a simple SrcReg def, make sure we didn't turn it into a
+ // read-modify-write of DstReg.
+ if (Deads)
+ UseMI->addRegisterDead(DstReg, tri_);
+ else if (!Reads && Writes)
+ UseMI->addRegisterDefined(DstReg, tri_);
+
+ // Kill flags apply to the whole physical register.
+ if (DstIsPhys && Kills)
+ UseMI->addRegisterKilled(DstReg, tri_);
}
- }
-}
-/// RemoveUnnecessaryKills - Remove kill markers that are no longer accurate
-/// due to live range lengthening as the result of coalescing.
-void SimpleRegisterCoalescing::RemoveUnnecessaryKills(unsigned Reg,
- LiveInterval &LI) {
- for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
- UE = mri_->use_end(); UI != UE; ++UI) {
- MachineOperand &UseMO = UI.getOperand();
- if (!UseMO.isKill())
- continue;
- MachineInstr *UseMI = UseMO.getParent();
- SlotIndex UseIdx =
- li_->getInstructionIndex(UseMI).getUseIndex();
- const LiveRange *LR = LI.getLiveRangeContaining(UseIdx);
- if (!LR ||
- (!LR->valno->isKill(UseIdx.getDefIndex()) &&
- LR->valno->def != UseIdx.getDefIndex())) {
- // Interesting problem. After coalescing reg1027's def and kill are both
- // at the same point: %reg1027,0.000000e+00 = [56,814:0) 0 at 70-(814)
- //
- // bb5:
- // 60 %reg1027<def> = t2MOVr %reg1027, 14, %reg0, %reg0
- // 68 %reg1027<def> = t2LDRi12 %reg1027<kill>, 8, 14, %reg0
- // 76 t2CMPzri %reg1038<kill,undef>, 0, 14, %reg0, %CPSR<imp-def>
- // 84 %reg1027<def> = t2MOVr %reg1027, 14, %reg0, %reg0
- // 96 t2Bcc mbb<bb5,0x2030910>, 1, %CPSR<kill>
- //
- // Do not remove the kill marker on t2LDRi12.
- UseMO.setIsKill(false);
- }
+ DEBUG({
+ dbgs() << "\t\tupdated: ";
+ if (!UseMI->isDebugValue())
+ dbgs() << li_->getInstructionIndex(UseMI) << "\t";
+ dbgs() << *UseMI;
+ });
}
}
@@ -953,6 +868,27 @@ bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
return removeIntervalIfEmpty(li, li_, tri_);
}
+void SimpleRegisterCoalescing::RemoveCopyFlag(unsigned DstReg,
+ const MachineInstr *CopyMI) {
+ SlotIndex DefIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
+ if (li_->hasInterval(DstReg)) {
+ LiveInterval &LI = li_->getInterval(DstReg);
+ if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
+ if (LR->valno->getCopy() == CopyMI)
+ LR->valno->setCopy(0);
+ }
+ if (!TargetRegisterInfo::isPhysicalRegister(DstReg))
+ return;
+ for (const unsigned* AS = tri_->getAliasSet(DstReg); *AS; ++AS) {
+ if (!li_->hasInterval(*AS))
+ continue;
+ LiveInterval &LI = li_->getInterval(*AS);
+ if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
+ if (LR->valno->getCopy() == CopyMI)
+ LR->valno->setCopy(0);
+ }
+}
+
/// PropagateDeadness - Propagate the dead marker to the instruction which
/// defines the val#.
static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
@@ -961,7 +897,7 @@ static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
MachineInstr *DefMI =
li_->getInstructionFromIndex(LRStart.getDefIndex());
if (DefMI && DefMI != CopyMI) {
- int DeadIdx = DefMI->findRegisterDefOperandIdx(li.reg, false);
+ int DeadIdx = DefMI->findRegisterDefOperandIdx(li.reg);
if (DeadIdx != -1)
DefMI->getOperand(DeadIdx).setIsDead();
else
@@ -986,8 +922,8 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
// Live-in to the function but dead. Remove it from entry live-in set.
if (mf_->begin()->isLiveIn(li.reg))
mf_->begin()->removeLiveIn(li.reg);
- const LiveRange *LR = li.getLiveRangeContaining(CopyIdx);
- removeRange(li, LR->start, LR->end, li_, tri_);
+ if (const LiveRange *LR = li.getLiveRangeContaining(CopyIdx))
+ removeRange(li, LR->start, LR->end, li_, tri_);
return removeIntervalIfEmpty(li, li_, tri_);
}
@@ -1025,305 +961,57 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
// val#, then propagate the dead marker.
PropagateDeadness(li, CopyMI, RemoveStart, li_, tri_);
++numDeadValNo;
-
- if (LR->valno->isKill(RemoveEnd))
- LR->valno->removeKill(RemoveEnd);
}
removeRange(li, RemoveStart, RemoveEnd, li_, tri_);
return removeIntervalIfEmpty(li, li_, tri_);
}
-/// CanCoalesceWithImpDef - Returns true if the specified copy instruction
-/// from an implicit def to another register can be coalesced away.
-bool SimpleRegisterCoalescing::CanCoalesceWithImpDef(MachineInstr *CopyMI,
- LiveInterval &li,
- LiveInterval &ImpLi) const{
- if (!CopyMI->killsRegister(ImpLi.reg))
- return false;
- // Make sure this is the only use.
- for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(ImpLi.reg),
- UE = mri_->use_end(); UI != UE;) {
- MachineInstr *UseMI = &*UI;
- ++UI;
- if (CopyMI == UseMI || JoinedCopies.count(UseMI))
- continue;
- return false;
- }
- return true;
-}
-
-
-/// isWinToJoinVRWithSrcPhysReg - Return true if it's worth while to join a
-/// a virtual destination register with physical source register.
-bool
-SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
- MachineBasicBlock *CopyMBB,
- LiveInterval &DstInt,
- LiveInterval &SrcInt) {
- // If the virtual register live interval is long but it has low use desity,
- // do not join them, instead mark the physical register as its allocation
- // preference.
- const TargetRegisterClass *RC = mri_->getRegClass(DstInt.reg);
- unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
- unsigned Length = li_->getApproximateInstructionCount(DstInt);
- if (Length > Threshold &&
- (((float)std::distance(mri_->use_nodbg_begin(DstInt.reg),
- mri_->use_nodbg_end()) / Length) <
- (1.0 / Threshold)))
- return false;
-
- // If the virtual register live interval extends into a loop, turn down
- // aggressiveness.
- SlotIndex CopyIdx =
- li_->getInstructionIndex(CopyMI).getDefIndex();
- const MachineLoop *L = loopInfo->getLoopFor(CopyMBB);
- if (!L) {
- // Let's see if the virtual register live interval extends into the loop.
- LiveInterval::iterator DLR = DstInt.FindLiveRangeContaining(CopyIdx);
- assert(DLR != DstInt.end() && "Live range not found!");
- DLR = DstInt.FindLiveRangeContaining(DLR->end.getNextSlot());
- if (DLR != DstInt.end()) {
- CopyMBB = li_->getMBBFromIndex(DLR->start);
- L = loopInfo->getLoopFor(CopyMBB);
- }
- }
- if (!L || Length <= Threshold)
- return true;
-
- SlotIndex UseIdx = CopyIdx.getUseIndex();
- LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
- MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
- if (loopInfo->getLoopFor(SMBB) != L) {
- if (!loopInfo->isLoopHeader(CopyMBB))
- return false;
- // If vr's live interval extends pass the loop header, do not join.
- for (MachineBasicBlock::succ_iterator SI = CopyMBB->succ_begin(),
- SE = CopyMBB->succ_end(); SI != SE; ++SI) {
- MachineBasicBlock *SuccMBB = *SI;
- if (SuccMBB == CopyMBB)
- continue;
- if (DstInt.overlaps(li_->getMBBStartIdx(SuccMBB),
- li_->getMBBEndIdx(SuccMBB)))
- return false;
- }
- }
- return true;
-}
-
-/// isWinToJoinVRWithDstPhysReg - Return true if it's worth while to join a
-/// copy from a virtual source register to a physical destination register.
+/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
+/// two virtual registers from different register classes.
bool
-SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
- MachineBasicBlock *CopyMBB,
- LiveInterval &DstInt,
- LiveInterval &SrcInt) {
- // If the virtual register live interval is long but it has low use density,
- // do not join them, instead mark the physical register as its allocation
- // preference.
- const TargetRegisterClass *RC = mri_->getRegClass(SrcInt.reg);
- unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
- unsigned Length = li_->getApproximateInstructionCount(SrcInt);
- if (Length > Threshold &&
- (((float)std::distance(mri_->use_nodbg_begin(SrcInt.reg),
- mri_->use_nodbg_end()) / Length) <
- (1.0 / Threshold)))
- return false;
-
- if (SrcInt.empty())
- // Must be implicit_def.
- return false;
-
- // If the virtual register live interval is defined or cross a loop, turn
- // down aggressiveness.
- SlotIndex CopyIdx =
- li_->getInstructionIndex(CopyMI).getDefIndex();
- SlotIndex UseIdx = CopyIdx.getUseIndex();
- LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
- assert(SLR != SrcInt.end() && "Live range not found!");
- SLR = SrcInt.FindLiveRangeContaining(SLR->start.getPrevSlot());
- if (SLR == SrcInt.end())
+SimpleRegisterCoalescing::isWinToJoinCrossClass(unsigned SrcReg,
+ unsigned DstReg,
+ const TargetRegisterClass *SrcRC,
+ const TargetRegisterClass *DstRC,
+ const TargetRegisterClass *NewRC) {
+ unsigned NewRCCount = allocatableRCRegs_[NewRC].count();
+ // This heuristics is good enough in practice, but it's obviously not *right*.
+ // 4 is a magic number that works well enough for x86, ARM, etc. It filter
+ // out all but the most restrictive register classes.
+ if (NewRCCount > 4 ||
+ // Early exit if the function is fairly small, coalesce aggressively if
+ // that's the case. For really special register classes with 3 or
+ // fewer registers, be a bit more careful.
+ (li_->getFuncInstructionCount() / NewRCCount) < 8)
return true;
- MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
- const MachineLoop *L = loopInfo->getLoopFor(SMBB);
-
- if (!L || Length <= Threshold)
+ LiveInterval &SrcInt = li_->getInterval(SrcReg);
+ LiveInterval &DstInt = li_->getInterval(DstReg);
+ unsigned SrcSize = li_->getApproximateInstructionCount(SrcInt);
+ unsigned DstSize = li_->getApproximateInstructionCount(DstInt);
+ if (SrcSize <= NewRCCount && DstSize <= NewRCCount)
return true;
-
- if (loopInfo->getLoopFor(CopyMBB) != L) {
- if (SMBB != L->getLoopLatch())
+ // Estimate *register use density*. If it doubles or more, abort.
+ unsigned SrcUses = std::distance(mri_->use_nodbg_begin(SrcReg),
+ mri_->use_nodbg_end());
+ unsigned DstUses = std::distance(mri_->use_nodbg_begin(DstReg),
+ mri_->use_nodbg_end());
+ unsigned NewUses = SrcUses + DstUses;
+ unsigned NewSize = SrcSize + DstSize;
+ if (SrcRC != NewRC && SrcSize > NewRCCount) {
+ unsigned SrcRCCount = allocatableRCRegs_[SrcRC].count();
+ if (NewUses*SrcSize*SrcRCCount > 2*SrcUses*NewSize*NewRCCount)
return false;
- // If vr's live interval is extended from before the loop latch, do not
- // join.
- for (MachineBasicBlock::pred_iterator PI = SMBB->pred_begin(),
- PE = SMBB->pred_end(); PI != PE; ++PI) {
- MachineBasicBlock *PredMBB = *PI;
- if (PredMBB == SMBB)
- continue;
- if (SrcInt.overlaps(li_->getMBBStartIdx(PredMBB),
- li_->getMBBEndIdx(PredMBB)))
- return false;
- }
}
- return true;
-}
-
-/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
-/// two virtual registers from different register classes.
-bool
-SimpleRegisterCoalescing::isWinToJoinCrossClass(unsigned LargeReg,
- unsigned SmallReg,
- unsigned Threshold) {
- // Then make sure the intervals are *short*.
- LiveInterval &LargeInt = li_->getInterval(LargeReg);
- LiveInterval &SmallInt = li_->getInterval(SmallReg);
- unsigned LargeSize = li_->getApproximateInstructionCount(LargeInt);
- unsigned SmallSize = li_->getApproximateInstructionCount(SmallInt);
- if (LargeSize > Threshold) {
- unsigned SmallUses = std::distance(mri_->use_nodbg_begin(SmallReg),
- mri_->use_nodbg_end());
- unsigned LargeUses = std::distance(mri_->use_nodbg_begin(LargeReg),
- mri_->use_nodbg_end());
- if (SmallUses*LargeSize < LargeUses*SmallSize)
+ if (DstRC != NewRC && DstSize > NewRCCount) {
+ unsigned DstRCCount = allocatableRCRegs_[DstRC].count();
+ if (NewUses*DstSize*DstRCCount > 2*DstUses*NewSize*NewRCCount)
return false;
}
return true;
}
-/// HasIncompatibleSubRegDefUse - If we are trying to coalesce a virtual
-/// register with a physical register, check if any of the virtual register
-/// operand is a sub-register use or def. If so, make sure it won't result
-/// in an illegal extract_subreg or insert_subreg instruction. e.g.
-/// vr1024 = extract_subreg vr1025, 1
-/// ...
-/// vr1024 = mov8rr AH
-/// If vr1024 is coalesced with AH, the extract_subreg is now illegal since
-/// AH does not have a super-reg whose sub-register 1 is AH.
-bool
-SimpleRegisterCoalescing::HasIncompatibleSubRegDefUse(MachineInstr *CopyMI,
- unsigned VirtReg,
- unsigned PhysReg) {
- for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(VirtReg),
- E = mri_->reg_end(); I != E; ++I) {
- MachineOperand &O = I.getOperand();
- if (O.isDebug())
- continue;
- MachineInstr *MI = &*I;
- if (MI == CopyMI || JoinedCopies.count(MI))
- continue;
- unsigned SubIdx = O.getSubReg();
- if (SubIdx && !tri_->getSubReg(PhysReg, SubIdx))
- return true;
- if (MI->isExtractSubreg()) {
- SubIdx = MI->getOperand(2).getImm();
- if (O.isUse() && !tri_->getSubReg(PhysReg, SubIdx))
- return true;
- if (O.isDef()) {
- unsigned SrcReg = MI->getOperand(1).getReg();
- const TargetRegisterClass *RC =
- TargetRegisterInfo::isPhysicalRegister(SrcReg)
- ? tri_->getPhysicalRegisterRegClass(SrcReg)
- : mri_->getRegClass(SrcReg);
- if (!tri_->getMatchingSuperReg(PhysReg, SubIdx, RC))
- return true;
- }
- }
- if (MI->isInsertSubreg() || MI->isSubregToReg()) {
- SubIdx = MI->getOperand(3).getImm();
- if (VirtReg == MI->getOperand(0).getReg()) {
- if (!tri_->getSubReg(PhysReg, SubIdx))
- return true;
- } else {
- unsigned DstReg = MI->getOperand(0).getReg();
- const TargetRegisterClass *RC =
- TargetRegisterInfo::isPhysicalRegister(DstReg)
- ? tri_->getPhysicalRegisterRegClass(DstReg)
- : mri_->getRegClass(DstReg);
- if (!tri_->getMatchingSuperReg(PhysReg, SubIdx, RC))
- return true;
- }
- }
- }
- return false;
-}
-
-
-/// CanJoinExtractSubRegToPhysReg - Return true if it's possible to coalesce
-/// an extract_subreg where dst is a physical register, e.g.
-/// cl = EXTRACT_SUBREG reg1024, 1
-bool
-SimpleRegisterCoalescing::CanJoinExtractSubRegToPhysReg(unsigned DstReg,
- unsigned SrcReg, unsigned SubIdx,
- unsigned &RealDstReg) {
- const TargetRegisterClass *RC = mri_->getRegClass(SrcReg);
- RealDstReg = tri_->getMatchingSuperReg(DstReg, SubIdx, RC);
- assert(RealDstReg && "Invalid extract_subreg instruction!");
-
- // For this type of EXTRACT_SUBREG, conservatively
- // check if the live interval of the source register interfere with the
- // actual super physical register we are trying to coalesce with.
- LiveInterval &RHS = li_->getInterval(SrcReg);
- if (li_->hasInterval(RealDstReg) &&
- RHS.overlaps(li_->getInterval(RealDstReg))) {
- DEBUG({
- dbgs() << "Interfere with register ";
- li_->getInterval(RealDstReg).print(dbgs(), tri_);
- });
- return false; // Not coalescable
- }
- for (const unsigned* SR = tri_->getSubRegisters(RealDstReg); *SR; ++SR)
- if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
- DEBUG({
- dbgs() << "Interfere with sub-register ";
- li_->getInterval(*SR).print(dbgs(), tri_);
- });
- return false; // Not coalescable
- }
- return true;
-}
-
-/// CanJoinInsertSubRegToPhysReg - Return true if it's possible to coalesce
-/// an insert_subreg where src is a physical register, e.g.
-/// reg1024 = INSERT_SUBREG reg1024, c1, 0
-bool
-SimpleRegisterCoalescing::CanJoinInsertSubRegToPhysReg(unsigned DstReg,
- unsigned SrcReg, unsigned SubIdx,
- unsigned &RealSrcReg) {
- const TargetRegisterClass *RC = mri_->getRegClass(DstReg);
- RealSrcReg = tri_->getMatchingSuperReg(SrcReg, SubIdx, RC);
- assert(RealSrcReg && "Invalid extract_subreg instruction!");
-
- LiveInterval &RHS = li_->getInterval(DstReg);
- if (li_->hasInterval(RealSrcReg) &&
- RHS.overlaps(li_->getInterval(RealSrcReg))) {
- DEBUG({
- dbgs() << "Interfere with register ";
- li_->getInterval(RealSrcReg).print(dbgs(), tri_);
- });
- return false; // Not coalescable
- }
- for (const unsigned* SR = tri_->getSubRegisters(RealSrcReg); *SR; ++SR)
- if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
- DEBUG({
- dbgs() << "Interfere with sub-register ";
- li_->getInterval(*SR).print(dbgs(), tri_);
- });
- return false; // Not coalescable
- }
- return true;
-}
-
-/// getRegAllocPreference - Return register allocation preference register.
-///
-static unsigned getRegAllocPreference(unsigned Reg, MachineFunction &MF,
- MachineRegisterInfo *MRI,
- const TargetRegisterInfo *TRI) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
- return 0;
- std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
- return TRI->ResolveRegAllocHint(Hint.first, Hint.second, MF);
-}
/// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
/// which are the src/dst of the copy instruction CopyMI. This returns true
@@ -1339,354 +1027,100 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
DEBUG(dbgs() << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
- unsigned SrcReg, DstReg, SrcSubIdx = 0, DstSubIdx = 0;
- bool isExtSubReg = CopyMI->isExtractSubreg();
- bool isInsSubReg = CopyMI->isInsertSubreg();
- bool isSubRegToReg = CopyMI->isSubregToReg();
- unsigned SubIdx = 0;
- if (isExtSubReg) {
- DstReg = CopyMI->getOperand(0).getReg();
- DstSubIdx = CopyMI->getOperand(0).getSubReg();
- SrcReg = CopyMI->getOperand(1).getReg();
- SrcSubIdx = CopyMI->getOperand(2).getImm();
- } else if (isInsSubReg || isSubRegToReg) {
- DstReg = CopyMI->getOperand(0).getReg();
- DstSubIdx = CopyMI->getOperand(3).getImm();
- SrcReg = CopyMI->getOperand(2).getReg();
- SrcSubIdx = CopyMI->getOperand(2).getSubReg();
- if (SrcSubIdx && SrcSubIdx != DstSubIdx) {
- // r1025 = INSERT_SUBREG r1025, r1024<2>, 2 Then r1024 has already been
- // coalesced to a larger register so the subreg indices cancel out.
- DEBUG(dbgs() << "\tSource of insert_subreg or subreg_to_reg is already "
- "coalesced to another register.\n");
- return false; // Not coalescable.
- }
- } else if (tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
- if (SrcSubIdx && DstSubIdx && SrcSubIdx != DstSubIdx) {
- // e.g. %reg16404:1<def> = MOV8rr %reg16412:2<kill>
- Again = true;
- return false; // Not coalescable.
- }
- } else {
- llvm_unreachable("Unrecognized copy instruction!");
+ CoalescerPair CP(*tii_, *tri_);
+ if (!CP.setRegisters(CopyMI)) {
+ DEBUG(dbgs() << "\tNot coalescable.\n");
+ return false;
}
// If they are already joined we continue.
- if (SrcReg == DstReg) {
+ if (CP.getSrcReg() == CP.getDstReg()) {
DEBUG(dbgs() << "\tCopy already coalesced.\n");
return false; // Not coalescable.
}
- bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
- bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
-
- // If they are both physical registers, we cannot join them.
- if (SrcIsPhys && DstIsPhys) {
- DEBUG(dbgs() << "\tCan not coalesce physregs.\n");
- return false; // Not coalescable.
- }
-
- // We only join virtual registers with allocatable physical registers.
- if (SrcIsPhys && !allocatableRegs_[SrcReg]) {
- DEBUG(dbgs() << "\tSrc reg is unallocatable physreg.\n");
- return false; // Not coalescable.
- }
- if (DstIsPhys && !allocatableRegs_[DstReg]) {
- DEBUG(dbgs() << "\tDst reg is unallocatable physreg.\n");
- return false; // Not coalescable.
+ if (DisablePhysicalJoin && CP.isPhys()) {
+ DEBUG(dbgs() << "\tPhysical joins disabled.\n");
+ return false;
}
- // Check that a physical source register is compatible with dst regclass
- if (SrcIsPhys) {
- unsigned SrcSubReg = SrcSubIdx ?
- tri_->getSubReg(SrcReg, SrcSubIdx) : SrcReg;
- const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
- const TargetRegisterClass *DstSubRC = DstRC;
- if (DstSubIdx)
- DstSubRC = DstRC->getSubRegisterRegClass(DstSubIdx);
- assert(DstSubRC && "Illegal subregister index");
- if (!DstSubRC->contains(SrcSubReg)) {
- DEBUG(dbgs() << "\tIncompatible destination regclass: "
- << tri_->getName(SrcSubReg) << " not in "
- << DstSubRC->getName() << ".\n");
- return false; // Not coalescable.
- }
- }
+ DEBUG(dbgs() << "\tConsidering merging %reg" << CP.getSrcReg());
- // Check that a physical dst register is compatible with source regclass
- if (DstIsPhys) {
- unsigned DstSubReg = DstSubIdx ?
- tri_->getSubReg(DstReg, DstSubIdx) : DstReg;
- const TargetRegisterClass *SrcRC = mri_->getRegClass(SrcReg);
- const TargetRegisterClass *SrcSubRC = SrcRC;
- if (SrcSubIdx)
- SrcSubRC = SrcRC->getSubRegisterRegClass(SrcSubIdx);
- assert(SrcSubRC && "Illegal subregister index");
- if (!SrcSubRC->contains(DstSubReg)) {
- DEBUG(dbgs() << "\tIncompatible source regclass: "
- << tri_->getName(DstSubReg) << " not in "
- << SrcSubRC->getName() << ".\n");
- (void)DstSubReg;
- return false; // Not coalescable.
+ // Enforce policies.
+ if (CP.isPhys()) {
+ DEBUG(dbgs() <<" with physreg %" << tri_->getName(CP.getDstReg()) << "\n");
+ // Only coalesce to allocatable physreg.
+ if (!li_->isAllocatable(CP.getDstReg())) {
+ DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
+ return false; // Not coalescable.
}
- }
-
- // Should be non-null only when coalescing to a sub-register class.
- bool CrossRC = false;
- const TargetRegisterClass *SrcRC= SrcIsPhys ? 0 : mri_->getRegClass(SrcReg);
- const TargetRegisterClass *DstRC= DstIsPhys ? 0 : mri_->getRegClass(DstReg);
- const TargetRegisterClass *NewRC = NULL;
- MachineBasicBlock *CopyMBB = CopyMI->getParent();
- unsigned RealDstReg = 0;
- unsigned RealSrcReg = 0;
- if (isExtSubReg || isInsSubReg || isSubRegToReg) {
- SubIdx = CopyMI->getOperand(isExtSubReg ? 2 : 3).getImm();
- if (SrcIsPhys && isExtSubReg) {
- // r1024 = EXTRACT_SUBREG EAX, 0 then r1024 is really going to be
- // coalesced with AX.
- unsigned DstSubIdx = CopyMI->getOperand(0).getSubReg();
- if (DstSubIdx) {
- // r1024<2> = EXTRACT_SUBREG EAX, 2. Then r1024 has already been
- // coalesced to a larger register so the subreg indices cancel out.
- if (DstSubIdx != SubIdx) {
- DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
- return false; // Not coalescable.
- }
- } else
- SrcReg = tri_->getSubReg(SrcReg, SubIdx);
- SubIdx = 0;
- } else if (DstIsPhys && (isInsSubReg || isSubRegToReg)) {
- // EAX = INSERT_SUBREG EAX, r1024, 0
- unsigned SrcSubIdx = CopyMI->getOperand(2).getSubReg();
- if (SrcSubIdx) {
- // EAX = INSERT_SUBREG EAX, r1024<2>, 2 Then r1024 has already been
- // coalesced to a larger register so the subreg indices cancel out.
- if (SrcSubIdx != SubIdx) {
- DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
- return false; // Not coalescable.
- }
- } else
- DstReg = tri_->getSubReg(DstReg, SubIdx);
- SubIdx = 0;
- } else if ((DstIsPhys && isExtSubReg) ||
- (SrcIsPhys && (isInsSubReg || isSubRegToReg))) {
- if (!isSubRegToReg && CopyMI->getOperand(1).getSubReg()) {
- DEBUG(dbgs() << "\tSrc of extract_subreg already coalesced with reg"
- << " of a super-class.\n");
- return false; // Not coalescable.
- }
-
- if (isExtSubReg) {
- if (!CanJoinExtractSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealDstReg))
- return false; // Not coalescable
- } else {
- if (!CanJoinInsertSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealSrcReg))
- return false; // Not coalescable
- }
- SubIdx = 0;
- } else {
- unsigned OldSubIdx = isExtSubReg ? CopyMI->getOperand(0).getSubReg()
- : CopyMI->getOperand(2).getSubReg();
- if (OldSubIdx) {
- if (OldSubIdx == SubIdx && !differingRegisterClasses(SrcReg, DstReg))
- // r1024<2> = EXTRACT_SUBREG r1025, 2. Then r1024 has already been
- // coalesced to a larger register so the subreg indices cancel out.
- // Also check if the other larger register is of the same register
- // class as the would be resulting register.
- SubIdx = 0;
- else {
- DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
- return false; // Not coalescable.
- }
- }
- if (SubIdx) {
- if (!DstIsPhys && !SrcIsPhys) {
- if (isInsSubReg || isSubRegToReg) {
- NewRC = tri_->getMatchingSuperRegClass(DstRC, SrcRC, SubIdx);
- } else // extract_subreg {
- NewRC = tri_->getMatchingSuperRegClass(SrcRC, DstRC, SubIdx);
- }
- if (!NewRC) {
- DEBUG(dbgs() << "\t Conflicting sub-register indices.\n");
- return false; // Not coalescable
- }
+ } else {
+ DEBUG({
+ dbgs() << " with reg%" << CP.getDstReg();
+ if (CP.getSubIdx())
+ dbgs() << ":" << tri_->getSubRegIndexName(CP.getSubIdx());
+ dbgs() << " to " << CP.getNewRC()->getName() << "\n";
+ });
- unsigned LargeReg = isExtSubReg ? SrcReg : DstReg;
- unsigned SmallReg = isExtSubReg ? DstReg : SrcReg;
- unsigned Limit= allocatableRCRegs_[mri_->getRegClass(SmallReg)].count();
- if (!isWinToJoinCrossClass(LargeReg, SmallReg, Limit)) {
- Again = true; // May be possible to coalesce later.
- return false;
- }
- }
- }
- } else if (differingRegisterClasses(SrcReg, DstReg)) {
- if (DisableCrossClassJoin)
- return false;
- CrossRC = true;
-
- // FIXME: What if the result of a EXTRACT_SUBREG is then coalesced
- // with another? If it's the resulting destination register, then
- // the subidx must be propagated to uses (but only those defined
- // by the EXTRACT_SUBREG). If it's being coalesced into another
- // register, it should be safe because register is assumed to have
- // the register class of the super-register.
-
- // Process moves where one of the registers have a sub-register index.
- MachineOperand *DstMO = CopyMI->findRegisterDefOperand(DstReg);
- MachineOperand *SrcMO = CopyMI->findRegisterUseOperand(SrcReg);
- SubIdx = DstMO->getSubReg();
- if (SubIdx) {
- if (SrcMO->getSubReg())
- // FIXME: can we handle this?
+ // Avoid constraining virtual register regclass too much.
+ if (CP.isCrossClass()) {
+ if (DisableCrossClassJoin) {
+ DEBUG(dbgs() << "\tCross-class joins disabled.\n");
return false;
- // This is not an insert_subreg but it looks like one.
- // e.g. %reg1024:4 = MOV32rr %EAX
- isInsSubReg = true;
- if (SrcIsPhys) {
- if (!CanJoinInsertSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealSrcReg))
- return false; // Not coalescable
- SubIdx = 0;
- }
- } else {
- SubIdx = SrcMO->getSubReg();
- if (SubIdx) {
- // This is not a extract_subreg but it looks like one.
- // e.g. %cl = MOV16rr %reg1024:1
- isExtSubReg = true;
- if (DstIsPhys) {
- if (!CanJoinExtractSubRegToPhysReg(DstReg, SrcReg, SubIdx,RealDstReg))
- return false; // Not coalescable
- SubIdx = 0;
- }
}
- }
-
- unsigned LargeReg = SrcReg;
- unsigned SmallReg = DstReg;
-
- // Now determine the register class of the joined register.
- if (isExtSubReg) {
- if (SubIdx && DstRC && DstRC->isASubClass()) {
- // This is a move to a sub-register class. However, the source is a
- // sub-register of a larger register class. We don't know what should
- // the register class be. FIXME.
- Again = true;
+ if (!isWinToJoinCrossClass(CP.getSrcReg(), CP.getDstReg(),
+ mri_->getRegClass(CP.getSrcReg()),
+ mri_->getRegClass(CP.getDstReg()),
+ CP.getNewRC())) {
+ DEBUG(dbgs() << "\tAvoid coalescing to constrained register class: "
+ << CP.getNewRC()->getName() << ".\n");
+ Again = true; // May be possible to coalesce later.
return false;
}
- if (!DstIsPhys && !SrcIsPhys)
- NewRC = SrcRC;
- } else if (!SrcIsPhys && !DstIsPhys) {
- NewRC = getCommonSubClass(SrcRC, DstRC);
- if (!NewRC) {
- DEBUG(dbgs() << "\tDisjoint regclasses: "
- << SrcRC->getName() << ", "
- << DstRC->getName() << ".\n");
- return false; // Not coalescable.
- }
- if (DstRC->getSize() > SrcRC->getSize())
- std::swap(LargeReg, SmallReg);
}
- // If we are joining two virtual registers and the resulting register
- // class is more restrictive (fewer register, smaller size). Check if it's
- // worth doing the merge.
- if (!SrcIsPhys && !DstIsPhys &&
- (isExtSubReg || DstRC->isASubClass()) &&
- !isWinToJoinCrossClass(LargeReg, SmallReg,
- allocatableRCRegs_[NewRC].count())) {
- DEBUG(dbgs() << "\tSrc/Dest are different register classes: "
- << SrcRC->getName() << "/"
- << DstRC->getName() << " -> "
- << NewRC->getName() << ".\n");
- // Allow the coalescer to try again in case either side gets coalesced to
- // a physical register that's compatible with the other side. e.g.
- // r1024 = MOV32to32_ r1025
- // But later r1024 is assigned EAX then r1025 may be coalesced with EAX.
- Again = true; // May be possible to coalesce later.
+ // When possible, let DstReg be the larger interval.
+ if (!CP.getSubIdx() && li_->getInterval(CP.getSrcReg()).ranges.size() >
+ li_->getInterval(CP.getDstReg()).ranges.size())
+ CP.flip();
+ }
+
+ // We need to be careful about coalescing a source physical register with a
+ // virtual register. Once the coalescing is done, it cannot be broken and
+ // these are not spillable! If the destination interval uses are far away,
+ // think twice about coalescing them!
+ // FIXME: Why are we skipping this test for partial copies?
+ // CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
+ if (!CP.isPartial() && CP.isPhys()) {
+ LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg());
+
+ // Don't join with physregs that have a ridiculous number of live
+ // ranges. The data structure performance is really bad when that
+ // happens.
+ if (li_->hasInterval(CP.getDstReg()) &&
+ li_->getInterval(CP.getDstReg()).ranges.size() > 1000) {
+ ++numAborts;
+ DEBUG(dbgs()
+ << "\tPhysical register live interval too complicated, abort!\n");
return false;
}
- }
-
- // Will it create illegal extract_subreg / insert_subreg?
- if (SrcIsPhys && HasIncompatibleSubRegDefUse(CopyMI, DstReg, SrcReg))
- return false;
- if (DstIsPhys && HasIncompatibleSubRegDefUse(CopyMI, SrcReg, DstReg))
- return false;
-
- LiveInterval &SrcInt = li_->getInterval(SrcReg);
- LiveInterval &DstInt = li_->getInterval(DstReg);
- assert(SrcInt.reg == SrcReg && DstInt.reg == DstReg &&
- "Register mapping is horribly broken!");
- DEBUG({
- dbgs() << "\t\tInspecting "; SrcInt.print(dbgs(), tri_);
- dbgs() << " and "; DstInt.print(dbgs(), tri_);
- dbgs() << ": ";
- });
+ const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg());
+ unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
+ unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
+ if (Length > Threshold &&
+ std::distance(mri_->use_nodbg_begin(CP.getSrcReg()),
+ mri_->use_nodbg_end()) * Threshold < Length) {
+ // Before giving up coalescing, if definition of source is defined by
+ // trivial computation, try rematerializing it.
+ if (!CP.isFlipped() &&
+ ReMaterializeTrivialDef(JoinVInt, CP.getDstReg(), 0, CopyMI))
+ return true;
- // Save a copy of the virtual register live interval. We'll manually
- // merge this into the "real" physical register live interval this is
- // coalesced with.
- LiveInterval *SavedLI = 0;
- if (RealDstReg)
- SavedLI = li_->dupInterval(&SrcInt);
- else if (RealSrcReg)
- SavedLI = li_->dupInterval(&DstInt);
-
- // Check if it is necessary to propagate "isDead" property.
- if (!isExtSubReg && !isInsSubReg && !isSubRegToReg) {
- MachineOperand *mopd = CopyMI->findRegisterDefOperand(DstReg, false);
- bool isDead = mopd->isDead();
-
- // We need to be careful about coalescing a source physical register with a
- // virtual register. Once the coalescing is done, it cannot be broken and
- // these are not spillable! If the destination interval uses are far away,
- // think twice about coalescing them!
- if (!isDead && (SrcIsPhys || DstIsPhys)) {
- // If the copy is in a loop, take care not to coalesce aggressively if the
- // src is coming in from outside the loop (or the dst is out of the loop).
- // If it's not in a loop, then determine whether to join them base purely
- // by the length of the interval.
- if (PhysJoinTweak) {
- if (SrcIsPhys) {
- if (!isWinToJoinVRWithSrcPhysReg(CopyMI, CopyMBB, DstInt, SrcInt)) {
- mri_->setRegAllocationHint(DstInt.reg, 0, SrcReg);
- ++numAborts;
- DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
- Again = true; // May be possible to coalesce later.
- return false;
- }
- } else {
- if (!isWinToJoinVRWithDstPhysReg(CopyMI, CopyMBB, DstInt, SrcInt)) {
- mri_->setRegAllocationHint(SrcInt.reg, 0, DstReg);
- ++numAborts;
- DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
- Again = true; // May be possible to coalesce later.
- return false;
- }
- }
- } else {
- // If the virtual register live interval is long but it has low use
- // density, do not join them, instead mark the physical register as its
- // allocation preference.
- LiveInterval &JoinVInt = SrcIsPhys ? DstInt : SrcInt;
- unsigned JoinVReg = SrcIsPhys ? DstReg : SrcReg;
- unsigned JoinPReg = SrcIsPhys ? SrcReg : DstReg;
- const TargetRegisterClass *RC = mri_->getRegClass(JoinVReg);
- unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
- unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
- float Ratio = 1.0 / Threshold;
- if (Length > Threshold &&
- (((float)std::distance(mri_->use_nodbg_begin(JoinVReg),
- mri_->use_nodbg_end()) / Length) < Ratio)) {
- mri_->setRegAllocationHint(JoinVInt.reg, 0, JoinPReg);
- ++numAborts;
- DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
- Again = true; // May be possible to coalesce later.
- return false;
- }
- }
+ ++numAborts;
+ DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
+ Again = true; // May be possible to coalesce later.
+ return false;
}
}
@@ -1694,127 +1128,58 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// Otherwise, if one of the intervals being joined is a physreg, this method
// always canonicalizes DstInt to be it. The output "SrcInt" will not have
// been modified, so we can use this information below to update aliases.
- bool Swapped = false;
- // If SrcInt is implicitly defined, it's safe to coalesce.
- bool isEmpty = SrcInt.empty();
- if (isEmpty && !CanCoalesceWithImpDef(CopyMI, DstInt, SrcInt)) {
- // Only coalesce an empty interval (defined by implicit_def) with
- // another interval which has a valno defined by the CopyMI and the CopyMI
- // is a kill of the implicit def.
- DEBUG(dbgs() << "Not profitable!\n");
- return false;
- }
-
- if (!isEmpty && !JoinIntervals(DstInt, SrcInt, Swapped)) {
+ if (!JoinIntervals(CP)) {
// Coalescing failed.
// If definition of source is defined by trivial computation, try
// rematerializing it.
- if (!isExtSubReg && !isInsSubReg && !isSubRegToReg &&
- ReMaterializeTrivialDef(SrcInt, DstReg, DstSubIdx, CopyMI))
+ if (!CP.isFlipped() &&
+ ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()),
+ CP.getDstReg(), 0, CopyMI))
return true;
// If we can eliminate the copy without merging the live ranges, do so now.
- if (!isExtSubReg && !isInsSubReg && !isSubRegToReg &&
- (AdjustCopiesBackFrom(SrcInt, DstInt, CopyMI) ||
- RemoveCopyByCommutingDef(SrcInt, DstInt, CopyMI))) {
- JoinedCopies.insert(CopyMI);
- DEBUG(dbgs() << "Trivial!\n");
- return true;
+ if (!CP.isPartial()) {
+ if (AdjustCopiesBackFrom(CP, CopyMI) ||
+ RemoveCopyByCommutingDef(CP, CopyMI)) {
+ JoinedCopies.insert(CopyMI);
+ DEBUG(dbgs() << "\tTrivial!\n");
+ return true;
+ }
}
// Otherwise, we are unable to join the intervals.
- DEBUG(dbgs() << "Interference!\n");
+ DEBUG(dbgs() << "\tInterference!\n");
Again = true; // May be possible to coalesce later.
return false;
}
- LiveInterval *ResSrcInt = &SrcInt;
- LiveInterval *ResDstInt = &DstInt;
- if (Swapped) {
- std::swap(SrcReg, DstReg);
- std::swap(ResSrcInt, ResDstInt);
- }
- assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
- "LiveInterval::join didn't work right!");
-
- // If we're about to merge live ranges into a physical register live interval,
- // we have to update any aliased register's live ranges to indicate that they
- // have clobbered values for this range.
- if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
- // If this is a extract_subreg where dst is a physical register, e.g.
- // cl = EXTRACT_SUBREG reg1024, 1
- // then create and update the actual physical register allocated to RHS.
- if (RealDstReg || RealSrcReg) {
- LiveInterval &RealInt =
- li_->getOrCreateInterval(RealDstReg ? RealDstReg : RealSrcReg);
- for (LiveInterval::const_vni_iterator I = SavedLI->vni_begin(),
- E = SavedLI->vni_end(); I != E; ++I) {
- const VNInfo *ValNo = *I;
- VNInfo *NewValNo = RealInt.getNextValue(ValNo->def, ValNo->getCopy(),
- false, // updated at *
- li_->getVNInfoAllocator());
- NewValNo->setFlags(ValNo->getFlags()); // * updated here.
- RealInt.addKills(NewValNo, ValNo->kills);
- RealInt.MergeValueInAsValue(*SavedLI, ValNo, NewValNo);
- }
- RealInt.weight += SavedLI->weight;
- DstReg = RealDstReg ? RealDstReg : RealSrcReg;
- }
-
- // Update the liveintervals of sub-registers.
- for (const unsigned *AS = tri_->getSubRegisters(DstReg); *AS; ++AS)
- li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, *ResSrcInt,
- li_->getVNInfoAllocator());
- }
-
- // If this is a EXTRACT_SUBREG, make sure the result of coalescing is the
- // larger super-register.
- if ((isExtSubReg || isInsSubReg || isSubRegToReg) &&
- !SrcIsPhys && !DstIsPhys) {
- if ((isExtSubReg && !Swapped) ||
- ((isInsSubReg || isSubRegToReg) && Swapped)) {
- ResSrcInt->Copy(*ResDstInt, mri_, li_->getVNInfoAllocator());
- std::swap(SrcReg, DstReg);
- std::swap(ResSrcInt, ResDstInt);
- }
- }
-
// Coalescing to a virtual register that is of a sub-register class of the
// other. Make sure the resulting register is set to the right register class.
- if (CrossRC)
+ if (CP.isCrossClass()) {
++numCrossRCs;
-
- // This may happen even if it's cross-rc coalescing. e.g.
- // %reg1026<def> = SUBREG_TO_REG 0, %reg1037<kill>, 4
- // reg1026 -> GR64, reg1037 -> GR32_ABCD. The resulting register will have to
- // be allocate a register from GR64_ABCD.
- if (NewRC)
- mri_->setRegClass(DstReg, NewRC);
+ mri_->setRegClass(CP.getDstReg(), CP.getNewRC());
+ }
// Remember to delete the copy instruction.
JoinedCopies.insert(CopyMI);
- // Some live range has been lengthened due to colaescing, eliminate the
- // unnecessary kills.
- RemoveUnnecessaryKills(SrcReg, *ResDstInt);
- if (TargetRegisterInfo::isVirtualRegister(DstReg))
- RemoveUnnecessaryKills(DstReg, *ResDstInt);
-
- UpdateRegDefsUses(SrcReg, DstReg, SubIdx);
+ UpdateRegDefsUses(CP);
// If we have extended the live range of a physical register, make sure we
// update live-in lists as well.
- if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
- const LiveInterval &VRegInterval = li_->getInterval(SrcReg);
+ if (CP.isPhys()) {
SmallVector<MachineBasicBlock*, 16> BlockSeq;
- for (LiveInterval::const_iterator I = VRegInterval.begin(),
- E = VRegInterval.end(); I != E; ++I ) {
+ // JoinIntervals invalidates the VNInfos in SrcInt, but we only need the
+ // ranges for this, and they are preserved.
+ LiveInterval &SrcInt = li_->getInterval(CP.getSrcReg());
+ for (LiveInterval::const_iterator I = SrcInt.begin(), E = SrcInt.end();
+ I != E; ++I ) {
li_->findLiveInMBBs(I->start, I->end, BlockSeq);
for (unsigned idx = 0, size = BlockSeq.size(); idx != size; ++idx) {
MachineBasicBlock &block = *BlockSeq[idx];
- if (!block.isLiveIn(DstReg))
- block.addLiveIn(DstReg);
+ if (!block.isLiveIn(CP.getDstReg()))
+ block.addLiveIn(CP.getDstReg());
}
BlockSeq.clear();
}
@@ -1822,32 +1187,17 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// SrcReg is guarateed to be the register whose live interval that is
// being merged.
- li_->removeInterval(SrcReg);
+ li_->removeInterval(CP.getSrcReg());
// Update regalloc hint.
- tri_->UpdateRegAllocHint(SrcReg, DstReg, *mf_);
-
- // Manually deleted the live interval copy.
- if (SavedLI) {
- SavedLI->clear();
- delete SavedLI;
- }
-
- // If resulting interval has a preference that no longer fits because of subreg
- // coalescing, just clear the preference.
- unsigned Preference = getRegAllocPreference(ResDstInt->reg, *mf_, mri_, tri_);
- if (Preference && (isExtSubReg || isInsSubReg || isSubRegToReg) &&
- TargetRegisterInfo::isVirtualRegister(ResDstInt->reg)) {
- const TargetRegisterClass *RC = mri_->getRegClass(ResDstInt->reg);
- if (!RC->contains(Preference))
- mri_->setRegAllocationHint(ResDstInt->reg, 0, 0);
- }
+ tri_->UpdateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *mf_);
DEBUG({
- dbgs() << "\n\t\tJoined. Result = ";
- ResDstInt->print(dbgs(), tri_);
- dbgs() << "\n";
- });
+ LiveInterval &DstInt = li_->getInterval(CP.getDstReg());
+ dbgs() << "\tJoined. Result = ";
+ DstInt.print(dbgs(), tri_);
+ dbgs() << "\n";
+ });
++numJoins;
return true;
@@ -1904,263 +1254,53 @@ static unsigned ComputeUltimateVN(VNInfo *VNI,
return ThisValNoAssignments[VN] = UltimateVN;
}
-static bool InVector(VNInfo *Val, const SmallVector<VNInfo*, 8> &V) {
- return std::find(V.begin(), V.end(), Val) != V.end();
-}
-
-static bool isValNoDefMove(const MachineInstr *MI, unsigned DR, unsigned SR,
- const TargetInstrInfo *TII,
- const TargetRegisterInfo *TRI) {
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
- ;
- else if (MI->isExtractSubreg()) {
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(1).getReg();
- } else if (MI->isSubregToReg() ||
- MI->isInsertSubreg()) {
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(2).getReg();
- } else
- return false;
- return (SrcReg == SR || TRI->isSuperRegister(SR, SrcReg)) &&
- (DstReg == DR || TRI->isSuperRegister(DR, DstReg));
-}
-
-/// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
-/// the specified live interval is defined by a copy from the specified
-/// register.
-bool SimpleRegisterCoalescing::RangeIsDefinedByCopyFromReg(LiveInterval &li,
- LiveRange *LR,
- unsigned Reg) {
- unsigned SrcReg = li_->getVNInfoSourceReg(LR->valno);
- if (SrcReg == Reg)
- return true;
- // FIXME: Do isPHIDef and isDefAccurate both need to be tested?
- if ((LR->valno->isPHIDef() || !LR->valno->isDefAccurate()) &&
- TargetRegisterInfo::isPhysicalRegister(li.reg) &&
- *tri_->getSuperRegisters(li.reg)) {
- // It's a sub-register live interval, we may not have precise information.
- // Re-compute it.
- MachineInstr *DefMI = li_->getInstructionFromIndex(LR->start);
- if (DefMI && isValNoDefMove(DefMI, li.reg, Reg, tii_, tri_)) {
- // Cache computed info.
- LR->valno->def = LR->start;
- LR->valno->setCopy(DefMI);
- return true;
- }
- }
- return false;
-}
-
-
-/// ValueLiveAt - Return true if the LiveRange pointed to by the given
-/// iterator, or any subsequent range with the same value number,
-/// is live at the given point.
-bool SimpleRegisterCoalescing::ValueLiveAt(LiveInterval::iterator LRItr,
- LiveInterval::iterator LREnd,
- SlotIndex defPoint) const {
- for (const VNInfo *valno = LRItr->valno;
- (LRItr != LREnd) && (LRItr->valno == valno); ++LRItr) {
- if (LRItr->contains(defPoint))
- return true;
- }
-
- return false;
-}
-
-
-/// SimpleJoin - Attempt to joint the specified interval into this one. The
-/// caller of this method must guarantee that the RHS only contains a single
-/// value number and that the RHS is not defined by a copy from this
-/// interval. This returns false if the intervals are not joinable, or it
-/// joins them and returns true.
-bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS){
- assert(RHS.containsOneValue());
-
- // Some number (potentially more than one) value numbers in the current
- // interval may be defined as copies from the RHS. Scan the overlapping
- // portions of the LHS and RHS, keeping track of this and looking for
- // overlapping live ranges that are NOT defined as copies. If these exist, we
- // cannot coalesce.
-
- LiveInterval::iterator LHSIt = LHS.begin(), LHSEnd = LHS.end();
- LiveInterval::iterator RHSIt = RHS.begin(), RHSEnd = RHS.end();
-
- if (LHSIt->start < RHSIt->start) {
- LHSIt = std::upper_bound(LHSIt, LHSEnd, RHSIt->start);
- if (LHSIt != LHS.begin()) --LHSIt;
- } else if (RHSIt->start < LHSIt->start) {
- RHSIt = std::upper_bound(RHSIt, RHSEnd, LHSIt->start);
- if (RHSIt != RHS.begin()) --RHSIt;
- }
-
- SmallVector<VNInfo*, 8> EliminatedLHSVals;
-
- while (1) {
- // Determine if these live intervals overlap.
- bool Overlaps = false;
- if (LHSIt->start <= RHSIt->start)
- Overlaps = LHSIt->end > RHSIt->start;
- else
- Overlaps = RHSIt->end > LHSIt->start;
-
- // If the live intervals overlap, there are two interesting cases: if the
- // LHS interval is defined by a copy from the RHS, it's ok and we record
- // that the LHS value # is the same as the RHS. If it's not, then we cannot
- // coalesce these live ranges and we bail out.
- if (Overlaps) {
- // If we haven't already recorded that this value # is safe, check it.
- if (!InVector(LHSIt->valno, EliminatedLHSVals)) {
- // If it's re-defined by an early clobber somewhere in the live range,
- // then conservatively abort coalescing.
- if (LHSIt->valno->hasRedefByEC())
- return false;
- // Copy from the RHS?
- if (!RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg))
- return false; // Nope, bail out.
-
- if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
- // Here is an interesting situation:
- // BB1:
- // vr1025 = copy vr1024
- // ..
- // BB2:
- // vr1024 = op
- // = vr1025
- // Even though vr1025 is copied from vr1024, it's not safe to
- // coalesce them since the live range of vr1025 intersects the
- // def of vr1024. This happens because vr1025 is assigned the
- // value of the previous iteration of vr1024.
+/// JoinIntervals - Attempt to join these two intervals. On failure, this
+/// returns false.
+bool SimpleRegisterCoalescing::JoinIntervals(CoalescerPair &CP) {
+ LiveInterval &RHS = li_->getInterval(CP.getSrcReg());
+ DEBUG({ dbgs() << "\t\tRHS = "; RHS.print(dbgs(), tri_); dbgs() << "\n"; });
+
+ // If a live interval is a physical register, check for interference with any
+ // aliases. The interference check implemented here is a bit more conservative
+ // than the full interfeence check below. We allow overlapping live ranges
+ // only when one is a copy of the other.
+ if (CP.isPhys()) {
+ for (const unsigned *AS = tri_->getAliasSet(CP.getDstReg()); *AS; ++AS){
+ if (!li_->hasInterval(*AS))
+ continue;
+ const LiveInterval &LHS = li_->getInterval(*AS);
+ LiveInterval::const_iterator LI = LHS.begin();
+ for (LiveInterval::const_iterator RI = RHS.begin(), RE = RHS.end();
+ RI != RE; ++RI) {
+ LI = std::lower_bound(LI, LHS.end(), RI->start);
+ // Does LHS have an overlapping live range starting before RI?
+ if ((LI != LHS.begin() && LI[-1].end > RI->start) &&
+ (RI->start != RI->valno->def ||
+ !CP.isCoalescable(li_->getInstructionFromIndex(RI->start)))) {
+ DEBUG({
+ dbgs() << "\t\tInterference from alias: ";
+ LHS.print(dbgs(), tri_);
+ dbgs() << "\n\t\tOverlap at " << RI->start << " and no copy.\n";
+ });
return false;
- EliminatedLHSVals.push_back(LHSIt->valno);
- }
-
- // We know this entire LHS live range is okay, so skip it now.
- if (++LHSIt == LHSEnd) break;
- continue;
- }
+ }
- if (LHSIt->end < RHSIt->end) {
- if (++LHSIt == LHSEnd) break;
- } else {
- // One interesting case to check here. It's possible that we have
- // something like "X3 = Y" which defines a new value number in the LHS,
- // and is the last use of this liverange of the RHS. In this case, we
- // want to notice this copy (so that it gets coalesced away) even though
- // the live ranges don't actually overlap.
- if (LHSIt->start == RHSIt->end) {
- if (InVector(LHSIt->valno, EliminatedLHSVals)) {
- // We already know that this value number is going to be merged in
- // if coalescing succeeds. Just skip the liverange.
- if (++LHSIt == LHSEnd) break;
- } else {
- // If it's re-defined by an early clobber somewhere in the live range,
- // then conservatively abort coalescing.
- if (LHSIt->valno->hasRedefByEC())
+ // Check that LHS ranges beginning in this range are copies.
+ for (; LI != LHS.end() && LI->start < RI->end; ++LI) {
+ if (LI->start != LI->valno->def ||
+ !CP.isCoalescable(li_->getInstructionFromIndex(LI->start))) {
+ DEBUG({
+ dbgs() << "\t\tInterference from alias: ";
+ LHS.print(dbgs(), tri_);
+ dbgs() << "\n\t\tDef at " << LI->start << " is not a copy.\n";
+ });
return false;
- // Otherwise, if this is a copy from the RHS, mark it as being merged
- // in.
- if (RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg)) {
- if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
- // Here is an interesting situation:
- // BB1:
- // vr1025 = copy vr1024
- // ..
- // BB2:
- // vr1024 = op
- // = vr1025
- // Even though vr1025 is copied from vr1024, it's not safe to
- // coalesced them since live range of vr1025 intersects the
- // def of vr1024. This happens because vr1025 is assigned the
- // value of the previous iteration of vr1024.
- return false;
- EliminatedLHSVals.push_back(LHSIt->valno);
-
- // We know this entire LHS live range is okay, so skip it now.
- if (++LHSIt == LHSEnd) break;
}
}
}
-
- if (++RHSIt == RHSEnd) break;
- }
- }
-
- // If we got here, we know that the coalescing will be successful and that
- // the value numbers in EliminatedLHSVals will all be merged together. Since
- // the most common case is that EliminatedLHSVals has a single number, we
- // optimize for it: if there is more than one value, we merge them all into
- // the lowest numbered one, then handle the interval as if we were merging
- // with one value number.
- VNInfo *LHSValNo = NULL;
- if (EliminatedLHSVals.size() > 1) {
- // Loop through all the equal value numbers merging them into the smallest
- // one.
- VNInfo *Smallest = EliminatedLHSVals[0];
- for (unsigned i = 1, e = EliminatedLHSVals.size(); i != e; ++i) {
- if (EliminatedLHSVals[i]->id < Smallest->id) {
- // Merge the current notion of the smallest into the smaller one.
- LHS.MergeValueNumberInto(Smallest, EliminatedLHSVals[i]);
- Smallest = EliminatedLHSVals[i];
- } else {
- // Merge into the smallest.
- LHS.MergeValueNumberInto(EliminatedLHSVals[i], Smallest);
- }
}
- LHSValNo = Smallest;
- } else if (EliminatedLHSVals.empty()) {
- if (TargetRegisterInfo::isPhysicalRegister(LHS.reg) &&
- *tri_->getSuperRegisters(LHS.reg))
- // Imprecise sub-register information. Can't handle it.
- return false;
- llvm_unreachable("No copies from the RHS?");
- } else {
- LHSValNo = EliminatedLHSVals[0];
- }
-
- // Okay, now that there is a single LHS value number that we're merging the
- // RHS into, update the value number info for the LHS to indicate that the
- // value number is defined where the RHS value number was.
- const VNInfo *VNI = RHS.getValNumInfo(0);
- LHSValNo->def = VNI->def;
- LHSValNo->setCopy(VNI->getCopy());
-
- // Okay, the final step is to loop over the RHS live intervals, adding them to
- // the LHS.
- if (VNI->hasPHIKill())
- LHSValNo->setHasPHIKill(true);
- LHS.addKills(LHSValNo, VNI->kills);
- LHS.MergeRangesInAsValue(RHS, LHSValNo);
-
- LHS.ComputeJoinedWeight(RHS);
-
- // Update regalloc hint if both are virtual registers.
- if (TargetRegisterInfo::isVirtualRegister(LHS.reg) &&
- TargetRegisterInfo::isVirtualRegister(RHS.reg)) {
- std::pair<unsigned, unsigned> RHSPref = mri_->getRegAllocationHint(RHS.reg);
- std::pair<unsigned, unsigned> LHSPref = mri_->getRegAllocationHint(LHS.reg);
- if (RHSPref != LHSPref)
- mri_->setRegAllocationHint(LHS.reg, RHSPref.first, RHSPref.second);
}
- // Update the liveintervals of sub-registers.
- if (TargetRegisterInfo::isPhysicalRegister(LHS.reg))
- for (const unsigned *AS = tri_->getSubRegisters(LHS.reg); *AS; ++AS)
- li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, LHS,
- li_->getVNInfoAllocator());
-
- return true;
-}
-
-/// JoinIntervals - Attempt to join these two intervals. On failure, this
-/// returns false. Otherwise, if one of the intervals being joined is a
-/// physreg, this method always canonicalizes LHS to be it. The output
-/// "RHS" will not have been modified, so we can use this information
-/// below to update aliases.
-bool
-SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
- bool &Swapped) {
// Compute the final value assignment, assuming that the live ranges can be
// coalesced.
SmallVector<int, 16> LHSValNoAssignments;
@@ -2169,203 +1309,87 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
SmallVector<VNInfo*, 16> NewVNInfo;
- // If a live interval is a physical register, conservatively check if any
- // of its sub-registers is overlapping the live interval of the virtual
- // register. If so, do not coalesce.
- if (TargetRegisterInfo::isPhysicalRegister(LHS.reg) &&
- *tri_->getSubRegisters(LHS.reg)) {
- // If it's coalescing a virtual register to a physical register, estimate
- // its live interval length. This is the *cost* of scanning an entire live
- // interval. If the cost is low, we'll do an exhaustive check instead.
-
- // If this is something like this:
- // BB1:
- // v1024 = op
- // ...
- // BB2:
- // ...
- // RAX = v1024
- //
- // That is, the live interval of v1024 crosses a bb. Then we can't rely on
- // less conservative check. It's possible a sub-register is defined before
- // v1024 (or live in) and live out of BB1.
- if (RHS.containsOneValue() &&
- li_->intervalIsInOneMBB(RHS) &&
- li_->getApproximateInstructionCount(RHS) <= 10) {
- // Perform a more exhaustive check for some common cases.
- if (li_->conflictsWithPhysRegRef(RHS, LHS.reg, true, JoinedCopies))
- return false;
- } else {
- for (const unsigned* SR = tri_->getSubRegisters(LHS.reg); *SR; ++SR)
- if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
- DEBUG({
- dbgs() << "Interfere with sub-register ";
- li_->getInterval(*SR).print(dbgs(), tri_);
- });
- return false;
- }
- }
- } else if (TargetRegisterInfo::isPhysicalRegister(RHS.reg) &&
- *tri_->getSubRegisters(RHS.reg)) {
- if (LHS.containsOneValue() &&
- li_->getApproximateInstructionCount(LHS) <= 10) {
- // Perform a more exhaustive check for some common cases.
- if (li_->conflictsWithPhysRegRef(LHS, RHS.reg, false, JoinedCopies))
- return false;
- } else {
- for (const unsigned* SR = tri_->getSubRegisters(RHS.reg); *SR; ++SR)
- if (li_->hasInterval(*SR) && LHS.overlaps(li_->getInterval(*SR))) {
- DEBUG({
- dbgs() << "Interfere with sub-register ";
- li_->getInterval(*SR).print(dbgs(), tri_);
- });
- return false;
- }
- }
- }
+ LiveInterval &LHS = li_->getOrCreateInterval(CP.getDstReg());
+ DEBUG({ dbgs() << "\t\tLHS = "; LHS.print(dbgs(), tri_); dbgs() << "\n"; });
- // Compute ultimate value numbers for the LHS and RHS values.
- if (RHS.containsOneValue()) {
- // Copies from a liveinterval with a single value are simple to handle and
- // very common, handle the special case here. This is important, because
- // often RHS is small and LHS is large (e.g. a physreg).
-
- // Find out if the RHS is defined as a copy from some value in the LHS.
- int RHSVal0DefinedFromLHS = -1;
- int RHSValID = -1;
- VNInfo *RHSValNoInfo = NULL;
- VNInfo *RHSValNoInfo0 = RHS.getValNumInfo(0);
- unsigned RHSSrcReg = li_->getVNInfoSourceReg(RHSValNoInfo0);
- if (RHSSrcReg == 0 || RHSSrcReg != LHS.reg) {
- // If RHS is not defined as a copy from the LHS, we can use simpler and
- // faster checks to see if the live ranges are coalescable. This joiner
- // can't swap the LHS/RHS intervals though.
- if (!TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
- return SimpleJoin(LHS, RHS);
- } else {
- RHSValNoInfo = RHSValNoInfo0;
- }
- } else {
- // It was defined as a copy from the LHS, find out what value # it is.
- RHSValNoInfo =
- LHS.getLiveRangeContaining(RHSValNoInfo0->def.getPrevSlot())->valno;
- RHSValID = RHSValNoInfo->id;
- RHSVal0DefinedFromLHS = RHSValID;
- }
+ // Loop over the value numbers of the LHS, seeing if any are defined from
+ // the RHS.
+ for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
+ i != e; ++i) {
+ VNInfo *VNI = *i;
+ if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
+ continue;
- LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
- RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
- NewVNInfo.resize(LHS.getNumValNums(), NULL);
-
- // Okay, *all* of the values in LHS that are defined as a copy from RHS
- // should now get updated.
- for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- unsigned VN = VNI->id;
- if (unsigned LHSSrcReg = li_->getVNInfoSourceReg(VNI)) {
- if (LHSSrcReg != RHS.reg) {
- // If this is not a copy from the RHS, its value number will be
- // unmodified by the coalescing.
- NewVNInfo[VN] = VNI;
- LHSValNoAssignments[VN] = VN;
- } else if (RHSValID == -1) {
- // Otherwise, it is a copy from the RHS, and we don't already have a
- // value# for it. Keep the current value number, but remember it.
- LHSValNoAssignments[VN] = RHSValID = VN;
- NewVNInfo[VN] = RHSValNoInfo;
- LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
- } else {
- // Otherwise, use the specified value #.
- LHSValNoAssignments[VN] = RHSValID;
- if (VN == (unsigned)RHSValID) { // Else this val# is dead.
- NewVNInfo[VN] = RHSValNoInfo;
- LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
- }
- }
- } else {
- NewVNInfo[VN] = VNI;
- LHSValNoAssignments[VN] = VN;
- }
- }
+ // Never join with a register that has EarlyClobber redefs.
+ if (VNI->hasRedefByEC())
+ return false;
- assert(RHSValID != -1 && "Didn't find value #?");
- RHSValNoAssignments[0] = RHSValID;
- if (RHSVal0DefinedFromLHS != -1) {
- // This path doesn't go through ComputeUltimateVN so just set
- // it to anything.
- RHSValsDefinedFromLHS[RHSValNoInfo0] = (VNInfo*)1;
- }
- } else {
- // Loop over the value numbers of the LHS, seeing if any are defined from
- // the RHS.
- for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
- continue;
+ // DstReg is known to be a register in the LHS interval. If the src is
+ // from the RHS interval, we can use its value #.
+ if (!CP.isCoalescable(VNI->getCopy()))
+ continue;
- // DstReg is known to be a register in the LHS interval. If the src is
- // from the RHS interval, we can use its value #.
- if (li_->getVNInfoSourceReg(VNI) != RHS.reg)
- continue;
+ // Figure out the value # from the RHS.
+ LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+ // The copy could be to an aliased physreg.
+ if (!lr) continue;
+ LHSValsDefinedFromRHS[VNI] = lr->valno;
+ }
- // Figure out the value # from the RHS.
- LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
- assert(lr && "Cannot find live range");
- LHSValsDefinedFromRHS[VNI] = lr->valno;
- }
+ // Loop over the value numbers of the RHS, seeing if any are defined from
+ // the LHS.
+ for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
+ i != e; ++i) {
+ VNInfo *VNI = *i;
+ if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
+ continue;
- // Loop over the value numbers of the RHS, seeing if any are defined from
- // the LHS.
- for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
- continue;
+ // Never join with a register that has EarlyClobber redefs.
+ if (VNI->hasRedefByEC())
+ return false;
- // DstReg is known to be a register in the RHS interval. If the src is
- // from the LHS interval, we can use its value #.
- if (li_->getVNInfoSourceReg(VNI) != LHS.reg)
- continue;
+ // DstReg is known to be a register in the RHS interval. If the src is
+ // from the LHS interval, we can use its value #.
+ if (!CP.isCoalescable(VNI->getCopy()))
+ continue;
- // Figure out the value # from the LHS.
- LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
- assert(lr && "Cannot find live range");
- RHSValsDefinedFromLHS[VNI] = lr->valno;
- }
+ // Figure out the value # from the LHS.
+ LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+ // The copy could be to an aliased physreg.
+ if (!lr) continue;
+ RHSValsDefinedFromLHS[VNI] = lr->valno;
+ }
- LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
- RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
- NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
+ LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
+ RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
+ NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
- for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- unsigned VN = VNI->id;
- if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
- continue;
- ComputeUltimateVN(VNI, NewVNInfo,
- LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
- LHSValNoAssignments, RHSValNoAssignments);
+ for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
+ i != e; ++i) {
+ VNInfo *VNI = *i;
+ unsigned VN = VNI->id;
+ if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+ continue;
+ ComputeUltimateVN(VNI, NewVNInfo,
+ LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
+ LHSValNoAssignments, RHSValNoAssignments);
+ }
+ for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
+ i != e; ++i) {
+ VNInfo *VNI = *i;
+ unsigned VN = VNI->id;
+ if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+ continue;
+ // If this value number isn't a copy from the LHS, it's a new number.
+ if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
+ NewVNInfo.push_back(VNI);
+ RHSValNoAssignments[VN] = NewVNInfo.size()-1;
+ continue;
}
- for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- unsigned VN = VNI->id;
- if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
- continue;
- // If this value number isn't a copy from the LHS, it's a new number.
- if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
- NewVNInfo.push_back(VNI);
- RHSValNoAssignments[VN] = NewVNInfo.size()-1;
- continue;
- }
- ComputeUltimateVN(VNI, NewVNInfo,
- RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
- RHSValNoAssignments, LHSValNoAssignments);
- }
+ ComputeUltimateVN(VNI, NewVNInfo,
+ RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
+ RHSValNoAssignments, LHSValNoAssignments);
}
// Armed with the mappings of LHS/RHS values to ultimate values, walk the
@@ -2376,15 +1400,17 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
LiveInterval::const_iterator JE = RHS.end();
// Skip ahead until the first place of potential sharing.
- if (I->start < J->start) {
- I = std::upper_bound(I, IE, J->start);
- if (I != LHS.begin()) --I;
- } else if (J->start < I->start) {
- J = std::upper_bound(J, JE, I->start);
- if (J != RHS.begin()) --J;
+ if (I != IE && J != JE) {
+ if (I->start < J->start) {
+ I = std::upper_bound(I, IE, J->start);
+ if (I != LHS.begin()) --I;
+ } else if (J->start < I->start) {
+ J = std::upper_bound(J, JE, I->start);
+ if (J != RHS.begin()) --J;
+ }
}
- while (1) {
+ while (I != IE && J != JE) {
// Determine if these two live ranges overlap.
bool Overlaps;
if (I->start < J->start) {
@@ -2406,13 +1432,10 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
return false;
}
- if (I->end < J->end) {
+ if (I->end < J->end)
++I;
- if (I == IE) break;
- } else {
+ else
++J;
- if (J == JE) break;
- }
}
// Update kill info. Some live ranges are extended due to copy coalescing.
@@ -2420,10 +1443,8 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
VNInfo *VNI = I->first;
unsigned LHSValID = LHSValNoAssignments[VNI->id];
- NewVNInfo[LHSValID]->removeKill(VNI->def);
if (VNI->hasPHIKill())
NewVNInfo[LHSValID]->setHasPHIKill(true);
- RHS.addKills(NewVNInfo[LHSValID], VNI->kills);
}
// Update kill info. Some live ranges are extended due to copy coalescing.
@@ -2431,25 +1452,19 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
VNInfo *VNI = I->first;
unsigned RHSValID = RHSValNoAssignments[VNI->id];
- NewVNInfo[RHSValID]->removeKill(VNI->def);
if (VNI->hasPHIKill())
NewVNInfo[RHSValID]->setHasPHIKill(true);
- LHS.addKills(NewVNInfo[RHSValID], VNI->kills);
}
+ if (LHSValNoAssignments.empty())
+ LHSValNoAssignments.push_back(-1);
+ if (RHSValNoAssignments.empty())
+ RHSValNoAssignments.push_back(-1);
+
// If we get here, we know that we can coalesce the live ranges. Ask the
// intervals to coalesce themselves now.
- if ((RHS.ranges.size() > LHS.ranges.size() &&
- TargetRegisterInfo::isVirtualRegister(LHS.reg)) ||
- TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
- RHS.join(LHS, &RHSValNoAssignments[0], &LHSValNoAssignments[0], NewVNInfo,
- mri_);
- Swapped = true;
- } else {
- LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
- mri_);
- Swapped = false;
- }
+ LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
+ mri_);
return true;
}
@@ -2488,26 +1503,19 @@ void SimpleRegisterCoalescing::CopyCoalesceInMBB(MachineBasicBlock *MBB,
MachineInstr *Inst = MII++;
// If this isn't a copy nor a extract_subreg, we can't join intervals.
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- bool isInsUndef = false;
- if (Inst->isExtractSubreg()) {
+ unsigned SrcReg, DstReg;
+ if (Inst->isCopy()) {
DstReg = Inst->getOperand(0).getReg();
SrcReg = Inst->getOperand(1).getReg();
- } else if (Inst->isInsertSubreg()) {
+ } else if (Inst->isSubregToReg()) {
DstReg = Inst->getOperand(0).getReg();
SrcReg = Inst->getOperand(2).getReg();
- if (Inst->getOperand(1).isUndef())
- isInsUndef = true;
- } else if (Inst->isInsertSubreg() || Inst->isSubregToReg()) {
- DstReg = Inst->getOperand(0).getReg();
- SrcReg = Inst->getOperand(2).getReg();
- } else if (!tii_->isMoveInstr(*Inst, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
+ } else
continue;
bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
- if (isInsUndef ||
- (li_->hasInterval(SrcReg) && li_->getInterval(SrcReg).empty()))
+ if (li_->hasInterval(SrcReg) && li_->getInterval(SrcReg).empty())
ImpDefCopies.push_back(CopyRec(Inst, 0));
else if (SrcIsPhys || DstIsPhys)
PhysCopies.push_back(CopyRec(Inst, 0));
@@ -2627,10 +1635,7 @@ SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start,
E = mri_->use_nodbg_end(); I != E; ++I) {
MachineOperand &Use = I.getOperand();
MachineInstr *UseMI = Use.getParent();
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
- SrcReg == DstReg)
- // Ignore identity copies.
+ if (UseMI->isIdentityCopy())
continue;
SlotIndex Idx = li_->getInstructionIndex(UseMI);
// FIXME: Should this be Idx != UseIdx? SlotIndex() will return something
@@ -2656,9 +1661,7 @@ SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start,
return NULL;
// Ignore identity copies.
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (!(tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
- SrcReg == DstReg))
+ if (!MI->isIdentityCopy())
for (unsigned i = 0, NumOps = MI->getNumOperands(); i != NumOps; ++i) {
MachineOperand &Use = MI->getOperand(i);
if (Use.isReg() && Use.isUse() && Use.getReg() &&
@@ -2674,13 +1677,6 @@ SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start,
return NULL;
}
-void SimpleRegisterCoalescing::printRegName(unsigned reg) const {
- if (TargetRegisterInfo::isPhysicalRegister(reg))
- dbgs() << tri_->getName(reg);
- else
- dbgs() << "%reg" << reg;
-}
-
void SimpleRegisterCoalescing::releaseMemory() {
JoinedCopies.clear();
ReMatCopies.clear();
@@ -2701,7 +1697,6 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
<< "********** Function: "
<< ((Value*)mf_->getFunction())->getName() << '\n');
- allocatableRegs_ = tri_->getAllocatableSet(fn);
for (TargetRegisterInfo::regclass_iterator I = tri_->regclass_begin(),
E = tri_->regclass_end(); I != E; ++I)
allocatableRCRegs_.insert(std::make_pair(*I,
@@ -2729,31 +1724,35 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
mii != mie; ) {
MachineInstr *MI = mii;
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
if (JoinedCopies.count(MI)) {
// Delete all coalesced copies.
bool DoDelete = true;
- if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
- assert((MI->isExtractSubreg() || MI->isInsertSubreg() ||
- MI->isSubregToReg()) && "Unrecognized copy instruction");
- DstReg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
- // Do not delete extract_subreg, insert_subreg of physical
- // registers unless the definition is dead. e.g.
- // %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
- // or else the scavenger may complain. LowerSubregs will
- // delete them later.
- DoDelete = false;
- }
- if (MI->registerDefIsDead(DstReg)) {
- LiveInterval &li = li_->getInterval(DstReg);
+ assert(MI->isCopyLike() && "Unrecognized copy instruction");
+ unsigned SrcReg = MI->getOperand(MI->isSubregToReg() ? 2 : 1).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
+ MI->getNumOperands() > 2)
+ // Do not delete extract_subreg, insert_subreg of physical
+ // registers unless the definition is dead. e.g.
+ // %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
+ // or else the scavenger may complain. LowerSubregs will
+ // delete them later.
+ DoDelete = false;
+
+ if (MI->allDefsAreDead()) {
+ LiveInterval &li = li_->getInterval(SrcReg);
if (!ShortenDeadCopySrcLiveRange(li, MI))
ShortenDeadCopyLiveRange(li, MI);
DoDelete = true;
}
- if (!DoDelete)
+ if (!DoDelete) {
+ // We need the instruction to adjust liveness, so make it a KILL.
+ if (MI->isSubregToReg()) {
+ MI->RemoveOperand(3);
+ MI->RemoveOperand(1);
+ }
+ MI->setDesc(tii_->get(TargetOpcode::KILL));
mii = llvm::next(mii);
- else {
+ } else {
li_->RemoveMachineInstrFromMaps(MI);
mii = mbbi->erase(mii);
++numPeep;
@@ -2776,7 +1775,7 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
if (MO.isDead())
continue;
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
- !mri_->use_empty(Reg)) {
+ !mri_->use_nodbg_empty(Reg)) {
isDead = false;
break;
}
@@ -2795,13 +1794,13 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
}
// If the move will be an identity move delete it
- bool isMove= tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx);
- if (isMove && SrcReg == DstReg) {
+ if (MI->isIdentityCopy()) {
+ unsigned SrcReg = MI->getOperand(1).getReg();
if (li_->hasInterval(SrcReg)) {
LiveInterval &RegInt = li_->getInterval(SrcReg);
// If def of this move instruction is dead, remove its live range
- // from the dstination register's live interval.
- if (MI->registerDefIsDead(DstReg)) {
+ // from the destination register's live interval.
+ if (MI->allDefsAreDead()) {
if (!ShortenDeadCopySrcLiveRange(RegInt, MI))
ShortenDeadCopyLiveRange(RegInt, MI);
}
@@ -2809,8 +1808,21 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
li_->RemoveMachineInstrFromMaps(MI);
mii = mbbi->erase(mii);
++numPeep;
- } else {
- ++mii;
+ continue;
+ }
+
+ ++mii;
+
+ // Check for now unnecessary kill flags.
+ if (li_->isNotInMIMap(MI)) continue;
+ SlotIndex DefIdx = li_->getInstructionIndex(MI).getDefIndex();
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isKill()) continue;
+ unsigned reg = MO.getReg();
+ if (!reg || !li_->hasInterval(reg)) continue;
+ if (!li_->getInterval(reg).killedAt(DefIdx))
+ MO.setIsKill(false);
}
}
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/SimpleRegisterCoalescing.h b/libclamav/c++/llvm/lib/CodeGen/SimpleRegisterCoalescing.h
index f668064..855bdb9 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SimpleRegisterCoalescing.h
+++ b/libclamav/c++/llvm/lib/CodeGen/SimpleRegisterCoalescing.h
@@ -47,7 +47,6 @@ namespace llvm {
const MachineLoopInfo* loopInfo;
AliasAnalysis *AA;
- BitVector allocatableRegs_;
DenseMap<const TargetRegisterClass*, BitVector> allocatableRCRegs_;
/// JoinedCopies - Keep track of copies eliminated due to coalescing.
@@ -64,7 +63,7 @@ namespace llvm {
public:
static char ID; // Pass identifcation, replacement for typeid
- SimpleRegisterCoalescing() : MachineFunctionPass(&ID) {}
+ SimpleRegisterCoalescing() : MachineFunctionPass(ID) {}
struct InstrSlots {
enum {
@@ -105,21 +104,12 @@ namespace llvm {
/// possible to coalesce this interval, but it may be possible if other
/// things get coalesced, then it returns true by reference in 'Again'.
bool JoinCopy(CopyRec &TheCopy, bool &Again);
-
+
/// JoinIntervals - Attempt to join these two intervals. On failure, this
- /// returns false. Otherwise, if one of the intervals being joined is a
- /// physreg, this method always canonicalizes DestInt to be it. The output
- /// "SrcInt" will not have been modified, so we can use this information
- /// below to update aliases.
- bool JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, bool &Swapped);
-
- /// SimpleJoin - Attempt to join the specified interval into this one. The
- /// caller of this method must guarantee that the RHS only contains a single
- /// value number and that the RHS is not defined by a copy from this
- /// interval. This returns false if the intervals are not joinable, or it
- /// joins them and returns true.
- bool SimpleJoin(LiveInterval &LHS, LiveInterval &RHS);
-
+ /// returns false. The output "SrcInt" will not have been modified, so we can
+ /// use this information below to update aliases.
+ bool JoinIntervals(CoalescerPair &CP);
+
/// Return true if the two specified registers belong to different register
/// classes. The registers may be either phys or virt regs.
bool differingRegisterClasses(unsigned RegA, unsigned RegB) const;
@@ -128,8 +118,7 @@ namespace llvm {
/// the source value number is defined by a copy from the destination reg
/// see if we can merge these two destination reg valno# into a single
/// value number, eliminating a copy.
- bool AdjustCopiesBackFrom(LiveInterval &IntA, LiveInterval &IntB,
- MachineInstr *CopyMI);
+ bool AdjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
/// HasOtherReachingDefs - Return true if there are definitions of IntB
/// other than BValNo val# that can reach uses of AValno val# of IntA.
@@ -140,8 +129,7 @@ namespace llvm {
/// If the source value number is defined by a commutable instruction and
/// its other operand is coalesced to the copy dest register, see if we
/// can transform the copy into a noop by commuting the definition.
- bool RemoveCopyByCommutingDef(LiveInterval &IntA, LiveInterval &IntB,
- MachineInstr *CopyMI);
+ bool RemoveCopyByCommutingDef(const CoalescerPair &CP,MachineInstr *CopyMI);
/// TrimLiveIntervalToLastUse - If there is a last use in the same basic
/// block as the copy instruction, trim the ive interval to the last use
@@ -155,74 +143,20 @@ namespace llvm {
bool ReMaterializeTrivialDef(LiveInterval &SrcInt, unsigned DstReg,
unsigned DstSubIdx, MachineInstr *CopyMI);
- /// CanCoalesceWithImpDef - Returns true if the specified copy instruction
- /// from an implicit def to another register can be coalesced away.
- bool CanCoalesceWithImpDef(MachineInstr *CopyMI,
- LiveInterval &li, LiveInterval &ImpLi) const;
-
- /// TurnCopiesFromValNoToImpDefs - The specified value# is defined by an
- /// implicit_def and it is being removed. Turn all copies from this value#
- /// into implicit_defs.
- void TurnCopiesFromValNoToImpDefs(LiveInterval &li, VNInfo *VNI);
-
- /// isWinToJoinVRWithSrcPhysReg - Return true if it's worth while to join a
- /// a virtual destination register with physical source register.
- bool isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
- MachineBasicBlock *CopyMBB,
- LiveInterval &DstInt, LiveInterval &SrcInt);
-
- /// isWinToJoinVRWithDstPhysReg - Return true if it's worth while to join a
- /// copy from a virtual source register to a physical destination register.
- bool isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
- MachineBasicBlock *CopyMBB,
- LiveInterval &DstInt, LiveInterval &SrcInt);
-
/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
/// two virtual registers from different register classes.
- bool isWinToJoinCrossClass(unsigned LargeReg, unsigned SmallReg,
- unsigned Threshold);
-
- /// HasIncompatibleSubRegDefUse - If we are trying to coalesce a virtual
- /// register with a physical register, check if any of the virtual register
- /// operand is a sub-register use or def. If so, make sure it won't result
- /// in an illegal extract_subreg or insert_subreg instruction.
- bool HasIncompatibleSubRegDefUse(MachineInstr *CopyMI,
- unsigned VirtReg, unsigned PhysReg);
-
- /// CanJoinExtractSubRegToPhysReg - Return true if it's possible to coalesce
- /// an extract_subreg where dst is a physical register, e.g.
- /// cl = EXTRACT_SUBREG reg1024, 1
- bool CanJoinExtractSubRegToPhysReg(unsigned DstReg, unsigned SrcReg,
- unsigned SubIdx, unsigned &RealDstReg);
-
- /// CanJoinInsertSubRegToPhysReg - Return true if it's possible to coalesce
- /// an insert_subreg where src is a physical register, e.g.
- /// reg1024 = INSERT_SUBREG reg1024, c1, 0
- bool CanJoinInsertSubRegToPhysReg(unsigned DstReg, unsigned SrcReg,
- unsigned SubIdx, unsigned &RealDstReg);
-
- /// ValueLiveAt - Return true if the LiveRange pointed to by the given
- /// iterator, or any subsequent range with the same value number,
- /// is live at the given point.
- bool ValueLiveAt(LiveInterval::iterator LRItr, LiveInterval::iterator LREnd,
- SlotIndex defPoint) const;
-
- /// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
- /// the specified live interval is defined by a copy from the specified
- /// register.
- bool RangeIsDefinedByCopyFromReg(LiveInterval &li, LiveRange *LR,
- unsigned Reg);
+ bool isWinToJoinCrossClass(unsigned SrcReg,
+ unsigned DstReg,
+ const TargetRegisterClass *SrcRC,
+ const TargetRegisterClass *DstRC,
+ const TargetRegisterClass *NewRC);
/// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
/// update the subregister number if it is not zero. If DstReg is a
/// physical register and the existing subregister number of the def / use
/// being updated is not zero, make sure to set it to the correct physical
/// subregister.
- void UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg, unsigned SubIdx);
-
- /// RemoveUnnecessaryKills - Remove kill markers that are no longer accurate
- /// due to live range lengthening as the result of coalescing.
- void RemoveUnnecessaryKills(unsigned Reg, LiveInterval &LI);
+ void UpdateRegDefsUses(const CoalescerPair &CP);
/// ShortenDeadCopyLiveRange - Shorten a live range defined by a dead copy.
/// Return true if live interval is removed.
@@ -239,12 +173,14 @@ namespace llvm {
/// it as well.
bool RemoveDeadDef(LiveInterval &li, MachineInstr *DefMI);
+ /// RemoveCopyFlag - If DstReg is no longer defined by CopyMI, clear the
+ /// VNInfo copy flag for DstReg and all aliases.
+ void RemoveCopyFlag(unsigned DstReg, const MachineInstr *CopyMI);
+
/// lastRegisterUse - Returns the last use of the specific register between
/// cycles Start and End or NULL if there are no uses.
MachineOperand *lastRegisterUse(SlotIndex Start, SlotIndex End,
unsigned Reg, SlotIndex &LastUseIdx) const;
-
- void printRegName(unsigned reg) const;
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/libclamav/c++/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 059e8d6..b637980 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -46,6 +46,8 @@ namespace {
Constant *UnregisterFn;
Constant *BuiltinSetjmpFn;
Constant *FrameAddrFn;
+ Constant *StackAddrFn;
+ Constant *StackRestoreFn;
Constant *LSDAAddrFn;
Value *PersonalityFn;
Constant *SelectorFn;
@@ -56,7 +58,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit SjLjEHPass(const TargetLowering *tli = NULL)
- : FunctionPass(&ID), TLI(tli) { }
+ : FunctionPass(ID), TLI(tli) { }
bool doInitialization(Module &M);
bool runOnFunction(Function &F);
@@ -69,7 +71,7 @@ namespace {
void insertCallSiteStore(Instruction *I, int Number, Value *CallSite);
void markInvokeCallSite(InvokeInst *II, int InvokeNo, Value *CallSite,
SwitchInst *CatchSwitch);
- void splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes);
+ void splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes);
bool insertSjLjEHSupport(Function &F);
};
} // end anonymous namespace
@@ -107,6 +109,8 @@ bool SjLjEHPass::doInitialization(Module &M) {
PointerType::getUnqual(FunctionContextTy),
(Type *)0);
FrameAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::frameaddress);
+ StackAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave);
+ StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
BuiltinSetjmpFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_setjmp);
LSDAAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_lsda);
SelectorFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_selector);
@@ -175,8 +179,10 @@ static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) {
/// we spill into a stack location, guaranteeing that there is nothing live
/// across the unwind edge. This process also splits all critical edges
/// coming out of invoke's.
+/// FIXME: Move this function to a common utility file (Local.cpp?) so
+/// both SjLj and LowerInvoke can use it.
void SjLjEHPass::
-splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
+splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
// First step, split all critical edges from invoke instructions.
for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
InvokeInst *II = Invokes[i];
@@ -198,16 +204,33 @@ splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
++AfterAllocaInsertPt;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI) {
- // This is always a no-op cast because we're casting AI to AI->getType() so
- // src and destination types are identical. BitCast is the only possibility.
- CastInst *NC = new BitCastInst(
- AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
- AI->replaceAllUsesWith(NC);
- // Normally its is forbidden to replace a CastInst's operand because it
- // could cause the opcode to reflect an illegal conversion. However, we're
- // replacing it here with the same value it was constructed with to simply
- // make NC its user.
- NC->setOperand(0, AI);
+ const Type *Ty = AI->getType();
+ // Aggregate types can't be cast, but are legal argument types, so we have
+ // to handle them differently. We use an extract/insert pair as a
+ // lightweight method to achieve the same goal.
+ if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
+ Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
+ Instruction *NI = InsertValueInst::Create(AI, EI, 0);
+ NI->insertAfter(EI);
+ AI->replaceAllUsesWith(NI);
+ // Set the operand of the instructions back to the AllocaInst.
+ EI->setOperand(0, AI);
+ NI->setOperand(0, AI);
+ } else {
+ // This is always a no-op cast because we're casting AI to AI->getType()
+ // so src and destination types are identical. BitCast is the only
+ // possibility.
+ CastInst *NC = new BitCastInst(
+ AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
+ AI->replaceAllUsesWith(NC);
+ // Set the operand of the cast instruction back to the AllocaInst.
+ // Normally it's forbidden to replace a CastInst's operand because it
+ // could cause the opcode to reflect an illegal conversion. However,
+ // we're replacing it here with the same value it was constructed with.
+ // We do this because the above replaceAllUsesWith() clobbered the
+ // operand, but we want this one to remain.
+ NC->setOperand(0, AI);
+ }
}
// Finally, scan the code looking for instructions with bad live ranges.
@@ -266,6 +289,9 @@ splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
}
// If we decided we need a spill, do it.
+ // FIXME: Spilling this way is overkill, as it forces all uses of
+ // the value to be reloaded from the stack slot, even those that aren't
+ // in the unwind blocks. We should be more selective.
if (NeedsSpill) {
++NumSpilled;
DemoteRegToStack(*Inst, true);
@@ -294,22 +320,34 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
// If we don't have any invokes or unwinds, there's nothing to do.
if (Unwinds.empty() && Invokes.empty()) return false;
- // Find the eh.selector.* and eh.exception calls. We'll use the first
- // eh.selector to determine the right personality function to use. For
- // SJLJ, we always use the same personality for the whole function,
- // not on a per-selector basis.
+ // Find the eh.selector.*, eh.exception and alloca calls.
+ //
+ // Remember any allocas() that aren't in the entry block, as the
+ // jmpbuf saved SP will need to be updated for them.
+ //
+ // We'll use the first eh.selector to determine the right personality
+ // function to use. For SJLJ, we always use the same personality for the
+ // whole function, not on a per-selector basis.
// FIXME: That's a bit ugly. Better way?
SmallVector<CallInst*,16> EH_Selectors;
SmallVector<CallInst*,16> EH_Exceptions;
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
+ SmallVector<Instruction*,16> JmpbufUpdatePoints;
+ // Note: Skip the entry block since there's nothing there that interests
+ // us. eh.selector and eh.exception shouldn't ever be there, and we
+ // want to disregard any allocas that are there.
+ for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
if (CallInst *CI = dyn_cast<CallInst>(I)) {
if (CI->getCalledFunction() == SelectorFn) {
- if (!PersonalityFn) PersonalityFn = CI->getOperand(2);
+ if (!PersonalityFn) PersonalityFn = CI->getArgOperand(1);
EH_Selectors.push_back(CI);
} else if (CI->getCalledFunction() == ExceptionFn) {
EH_Exceptions.push_back(CI);
+ } else if (CI->getCalledFunction() == StackRestoreFn) {
+ JmpbufUpdatePoints.push_back(CI);
}
+ } else if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
+ JmpbufUpdatePoints.push_back(AI);
}
}
}
@@ -329,7 +367,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
// we spill into a stack location, guaranteeing that there is nothing live
// across the unwind edge. This process also splits all critical edges
// coming out of invoke's.
- splitLiveRangesLiveAcrossInvokes(Invokes);
+ splitLiveRangesAcrossInvokes(Invokes);
BasicBlock *EntryBB = F.begin();
// Create an alloca for the incoming jump buffer ptr and the new jump buffer
@@ -419,7 +457,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
// Populate the Function Context
// 1. LSDA address
// 2. Personality function address
- // 3. jmpbuf (save FP and call eh.sjlj.setjmp)
+ // 3. jmpbuf (save SP, FP and call eh.sjlj.setjmp)
// LSDA address
Idxs[0] = Zero;
@@ -440,31 +478,41 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
EntryBB->getTerminator());
- // Save the frame pointer.
+ // Save the frame pointer.
Idxs[1] = ConstantInt::get(Int32Ty, 5);
- Value *FieldPtr
+ Value *JBufPtr
= GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
"jbuf_gep",
EntryBB->getTerminator());
Idxs[1] = ConstantInt::get(Int32Ty, 0);
- Value *ElemPtr =
- GetElementPtrInst::Create(FieldPtr, Idxs, Idxs+2, "jbuf_fp_gep",
+ Value *FramePtr =
+ GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_fp_gep",
EntryBB->getTerminator());
Value *Val = CallInst::Create(FrameAddrFn,
ConstantInt::get(Int32Ty, 0),
"fp",
EntryBB->getTerminator());
- new StoreInst(Val, ElemPtr, true, EntryBB->getTerminator());
- // Call the setjmp instrinsic. It fills in the rest of the jmpbuf
+ new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());
+
+ // Save the stack pointer.
+ Idxs[1] = ConstantInt::get(Int32Ty, 2);
+ Value *StackPtr =
+ GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_sp_gep",
+ EntryBB->getTerminator());
+
+ Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
+ new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());
+
+ // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
Value *SetjmpArg =
- CastInst::Create(Instruction::BitCast, FieldPtr,
+ CastInst::Create(Instruction::BitCast, JBufPtr,
Type::getInt8PtrTy(F.getContext()), "",
EntryBB->getTerminator());
Value *DispatchVal = CallInst::Create(BuiltinSetjmpFn, SetjmpArg,
"dispatch",
EntryBB->getTerminator());
- // check the return value of the setjmp. non-zero goes to dispatcher
+ // check the return value of the setjmp. non-zero goes to dispatcher.
Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
ICmpInst::ICMP_EQ, DispatchVal, Zero,
"notunwind");
@@ -509,6 +557,16 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
Unwinds[i]->eraseFromParent();
}
+ // Following any allocas not in the entry block, update the saved SP
+ // in the jmpbuf to the new value.
+ for (unsigned i = 0, e = JmpbufUpdatePoints.size(); i != e; ++i) {
+ Instruction *AI = JmpbufUpdatePoints[i];
+ Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
+ StackAddr->insertAfter(AI);
+ Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
+ StoreStackAddr->insertAfter(StackAddr);
+ }
+
// Finally, for any returns from this function, if this function contains an
// invoke, add a call to unregister the function context.
for (unsigned i = 0, e = Returns.size(); i != e; ++i)
diff --git a/libclamav/c++/llvm/lib/CodeGen/SlotIndexes.cpp b/libclamav/c++/llvm/lib/CodeGen/SlotIndexes.cpp
index 6110ef5..1bc148f 100644
--- a/libclamav/c++/llvm/lib/CodeGen/SlotIndexes.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/SlotIndexes.cpp
@@ -40,7 +40,8 @@ namespace {
}
char SlotIndexes::ID = 0;
-static RegisterPass<SlotIndexes> X("slotindexes", "Slot index numbering");
+INITIALIZE_PASS(SlotIndexes, "slotindexes",
+ "Slot index numbering", false, false);
IndexListEntry* IndexListEntry::getEmptyKeyEntry() {
return &*IndexListEntryEmptyKey;
@@ -213,9 +214,11 @@ void SlotIndexes::dump() const {
// Print a SlotIndex to a raw_ostream.
void SlotIndex::print(raw_ostream &os) const {
- os << getIndex();
+ os << entry().getIndex();
if (isPHI())
os << "*";
+ else
+ os << "LudS"[getSlot()];
}
// Dump a SlotIndex to stderr.
diff --git a/libclamav/c++/llvm/lib/CodeGen/Spiller.cpp b/libclamav/c++/llvm/lib/CodeGen/Spiller.cpp
index c06279d..59d5ab3 100644
--- a/libclamav/c++/llvm/lib/CodeGen/Spiller.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/Spiller.cpp
@@ -14,18 +14,21 @@
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <set>
using namespace llvm;
namespace {
- enum SpillerName { trivial, standard, splitting };
+ enum SpillerName { trivial, standard, splitting, inline_ };
}
static cl::opt<SpillerName>
@@ -35,6 +38,7 @@ spillerOpt("spiller",
cl::values(clEnumVal(trivial, "trivial spiller"),
clEnumVal(standard, "default spiller"),
clEnumVal(splitting, "splitting spiller"),
+ clEnumValN(inline_, "inline", "inline spiller"),
clEnumValEnd),
cl::init(standard));
@@ -46,27 +50,31 @@ namespace {
/// Utility class for spillers.
class SpillerBase : public Spiller {
protected:
-
+ MachineFunctionPass *pass;
MachineFunction *mf;
+ VirtRegMap *vrm;
LiveIntervals *lis;
MachineFrameInfo *mfi;
MachineRegisterInfo *mri;
const TargetInstrInfo *tii;
- VirtRegMap *vrm;
-
- /// Construct a spiller base.
- SpillerBase(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
- : mf(mf), lis(lis), vrm(vrm)
+ const TargetRegisterInfo *tri;
+
+ /// Construct a spiller base.
+ SpillerBase(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm)
+ : pass(&pass), mf(&mf), vrm(&vrm)
{
- mfi = mf->getFrameInfo();
- mri = &mf->getRegInfo();
- tii = mf->getTarget().getInstrInfo();
+ lis = &pass.getAnalysis<LiveIntervals>();
+ mfi = mf.getFrameInfo();
+ mri = &mf.getRegInfo();
+ tii = mf.getTarget().getInstrInfo();
+ tri = mf.getTarget().getRegisterInfo();
}
/// Add spill ranges for every use/def of the live interval, inserting loads
/// immediately before each use, and stores after each def. No folding or
/// remat is attempted.
- std::vector<LiveInterval*> trivialSpillEverywhere(LiveInterval *li) {
+ void trivialSpillEverywhere(LiveInterval *li,
+ SmallVectorImpl<LiveInterval*> &newIntervals) {
DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
assert(li->weight != HUGE_VALF &&
@@ -77,8 +85,6 @@ protected:
DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
- std::vector<LiveInterval*> added;
-
const TargetRegisterClass *trc = mri->getRegClass(li->reg);
unsigned ss = vrm->assignVirt2StackSlot(li->reg);
@@ -95,7 +101,7 @@ protected:
do {
++regItr;
} while (regItr != mri->reg_end() && (&*regItr == mi));
-
+
// Collect uses & defs for this instr.
SmallVector<unsigned, 2> indices;
bool hasUse = false;
@@ -115,7 +121,7 @@ protected:
vrm->assignVirt2StackSlot(newVReg, ss);
LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
newLI->weight = HUGE_VALF;
-
+
// Update the reg operands & kill flags.
for (unsigned i = 0; i < indices.size(); ++i) {
unsigned mopIdx = indices[i];
@@ -130,108 +136,117 @@ protected:
// Insert reload if necessary.
MachineBasicBlock::iterator miItr(mi);
if (hasUse) {
- tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc);
+ tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc,
+ tri);
MachineInstr *loadInstr(prior(miItr));
SlotIndex loadIndex =
lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
+ vrm->addSpillSlotUse(ss, loadInstr);
SlotIndex endIndex = loadIndex.getNextIndex();
VNInfo *loadVNI =
newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator());
- loadVNI->addKill(endIndex);
newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
}
// Insert store if necessary.
if (hasDef) {
- tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg, true,
- ss, trc);
+ tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg,
+ true, ss, trc, tri);
MachineInstr *storeInstr(llvm::next(miItr));
SlotIndex storeIndex =
lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
+ vrm->addSpillSlotUse(ss, storeInstr);
SlotIndex beginIndex = storeIndex.getPrevIndex();
VNInfo *storeVNI =
newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator());
- storeVNI->addKill(storeIndex);
newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
}
- added.push_back(newLI);
+ newIntervals.push_back(newLI);
}
-
- return added;
}
-
};
+} // end anonymous namespace
+
+namespace {
/// Spills any live range using the spill-everywhere method with no attempt at
/// folding.
class TrivialSpiller : public SpillerBase {
public:
- TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
- : SpillerBase(mf, lis, vrm) {}
+ TrivialSpiller(MachineFunctionPass &pass, MachineFunction &mf,
+ VirtRegMap &vrm)
+ : SpillerBase(pass, mf, vrm) {}
- std::vector<LiveInterval*> spill(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &spillIs,
- SlotIndex*) {
+ void spill(LiveInterval *li,
+ SmallVectorImpl<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &) {
// Ignore spillIs - we don't use it.
- return trivialSpillEverywhere(li);
+ trivialSpillEverywhere(li, newIntervals);
}
-
};
+} // end anonymous namespace
+
+namespace {
+
/// Falls back on LiveIntervals::addIntervalsForSpills.
class StandardSpiller : public Spiller {
protected:
LiveIntervals *lis;
- const MachineLoopInfo *loopInfo;
+ MachineLoopInfo *loopInfo;
VirtRegMap *vrm;
public:
- StandardSpiller(LiveIntervals *lis, const MachineLoopInfo *loopInfo,
- VirtRegMap *vrm)
- : lis(lis), loopInfo(loopInfo), vrm(vrm) {}
+ StandardSpiller(MachineFunctionPass &pass, MachineFunction &mf,
+ VirtRegMap &vrm)
+ : lis(&pass.getAnalysis<LiveIntervals>()),
+ loopInfo(pass.getAnalysisIfAvailable<MachineLoopInfo>()),
+ vrm(&vrm) {}
/// Falls back on LiveIntervals::addIntervalsForSpills.
- std::vector<LiveInterval*> spill(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &spillIs,
- SlotIndex*) {
- return lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
+ void spill(LiveInterval *li,
+ SmallVectorImpl<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs) {
+ std::vector<LiveInterval*> added =
+ lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
+ newIntervals.insert(newIntervals.end(), added.begin(), added.end());
}
-
};
+} // end anonymous namespace
+
+namespace {
+
/// When a call to spill is placed this spiller will first try to break the
/// interval up into its component values (one new interval per value).
/// If this fails, or if a call is placed to spill a previously split interval
-/// then the spiller falls back on the standard spilling mechanism.
+/// then the spiller falls back on the standard spilling mechanism.
class SplittingSpiller : public StandardSpiller {
public:
- SplittingSpiller(MachineFunction *mf, LiveIntervals *lis,
- const MachineLoopInfo *loopInfo, VirtRegMap *vrm)
- : StandardSpiller(lis, loopInfo, vrm) {
-
- mri = &mf->getRegInfo();
- tii = mf->getTarget().getInstrInfo();
- tri = mf->getTarget().getRegisterInfo();
+ SplittingSpiller(MachineFunctionPass &pass, MachineFunction &mf,
+ VirtRegMap &vrm)
+ : StandardSpiller(pass, mf, vrm) {
+ mri = &mf.getRegInfo();
+ tii = mf.getTarget().getInstrInfo();
+ tri = mf.getTarget().getRegisterInfo();
}
- std::vector<LiveInterval*> spill(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &spillIs,
- SlotIndex *earliestStart) {
-
- if (worthTryingToSplit(li)) {
- return tryVNISplit(li, earliestStart);
- }
- // else
- return StandardSpiller::spill(li, spillIs, earliestStart);
+ void spill(LiveInterval *li,
+ SmallVectorImpl<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs) {
+ if (worthTryingToSplit(li))
+ tryVNISplit(li);
+ else
+ StandardSpiller::spill(li, newIntervals, spillIs);
}
private:
MachineRegisterInfo *mri;
const TargetInstrInfo *tii;
- const TargetRegisterInfo *tri;
+ const TargetRegisterInfo *tri;
DenseSet<LiveInterval*> alreadySplit;
bool worthTryingToSplit(LiveInterval *li) const {
@@ -239,8 +254,7 @@ private:
}
/// Try to break a LiveInterval into its component values.
- std::vector<LiveInterval*> tryVNISplit(LiveInterval *li,
- SlotIndex *earliestStart) {
+ std::vector<LiveInterval*> tryVNISplit(LiveInterval *li) {
DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n");
@@ -248,42 +262,34 @@ private:
SmallVector<VNInfo*, 4> vnis;
std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis));
-
+
for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(),
vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
VNInfo *vni = *vniItr;
-
- // Skip unused VNIs, or VNIs with no kills.
- if (vni->isUnused() || vni->kills.empty())
+
+ // Skip unused VNIs.
+ if (vni->isUnused())
continue;
DEBUG(dbgs() << " Extracted Val #" << vni->id << " as ");
LiveInterval *splitInterval = extractVNI(li, vni);
-
+
if (splitInterval != 0) {
DEBUG(dbgs() << *splitInterval << "\n");
added.push_back(splitInterval);
alreadySplit.insert(splitInterval);
- if (earliestStart != 0) {
- if (splitInterval->beginIndex() < *earliestStart)
- *earliestStart = splitInterval->beginIndex();
- }
} else {
DEBUG(dbgs() << "0\n");
}
- }
+ }
DEBUG(dbgs() << "Original LI: " << *li << "\n");
// If there original interval still contains some live ranges
- // add it to added and alreadySplit.
+ // add it to added and alreadySplit.
if (!li->empty()) {
added.push_back(li);
alreadySplit.insert(li);
- if (earliestStart != 0) {
- if (li->beginIndex() < *earliestStart)
- *earliestStart = li->beginIndex();
- }
}
return added;
@@ -292,16 +298,15 @@ private:
/// Extract the given value number from the interval.
LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
assert(vni->isDefAccurate() || vni->isPHIDef());
- assert(!vni->kills.empty());
- // Create a new vreg and live interval, copy VNI kills & ranges over.
+ // Create a new vreg and live interval, copy VNI ranges over.
const TargetRegisterClass *trc = mri->getRegClass(li->reg);
unsigned newVReg = mri->createVirtualRegister(trc);
vrm->grow();
LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator());
- // Start by copying all live ranges in the VN to the new interval.
+ // Start by copying all live ranges in the VN to the new interval.
for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end();
rItr != rEnd; ++rItr) {
if (rItr->valno == vni) {
@@ -309,7 +314,7 @@ private:
}
}
- // Erase the old VNI & ranges.
+ // Erase the old VNI & ranges.
li->removeValNo(vni);
// Collect all current uses of the register belonging to the given VNI.
@@ -326,14 +331,13 @@ private:
// Insert a copy at the start of the MBB. The range proceeding the
// copy will be attached to the original LiveInterval.
MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def);
- tii->copyRegToReg(*defMBB, defMBB->begin(), newVReg, li->reg, trc, trc);
- MachineInstr *copyMI = defMBB->begin();
- copyMI->addRegisterKilled(li->reg, tri);
+ MachineInstr *copyMI = BuildMI(*defMBB, defMBB->begin(), DebugLoc(),
+ tii->get(TargetOpcode::COPY), newVReg)
+ .addReg(li->reg, RegState::Kill);
SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB),
0, false, lis->getVNInfoAllocator());
phiDefVNI->setIsPHIDef(true);
- phiDefVNI->addKill(copyIdx.getDefIndex());
li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
LiveRange *oldPHIDefRange =
newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
@@ -356,8 +360,8 @@ private:
newVNI->setIsPHIDef(false); // not a PHI def anymore.
newVNI->setIsDefAccurate(true);
} else {
- // non-PHI def. Rename the def. If it's two-addr that means renaming the use
- // and inserting a new copy too.
+ // non-PHI def. Rename the def. If it's two-addr that means renaming the
+ // use and inserting a new copy too.
MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def);
// We'll rename this now, so we can remove it from uses.
uses.erase(defInst);
@@ -373,37 +377,26 @@ private:
twoAddrUseIsUndef = true;
}
}
-
+
SlotIndex defIdx = lis->getInstructionIndex(defInst);
newVNI->def = defIdx.getDefIndex();
if (isTwoAddr && !twoAddrUseIsUndef) {
MachineBasicBlock *defMBB = defInst->getParent();
- tii->copyRegToReg(*defMBB, defInst, newVReg, li->reg, trc, trc);
- MachineInstr *copyMI = prior(MachineBasicBlock::iterator(defInst));
+ MachineInstr *copyMI = BuildMI(*defMBB, defInst, DebugLoc(),
+ tii->get(TargetOpcode::COPY), newVReg)
+ .addReg(li->reg, RegState::Kill);
SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
- copyMI->addRegisterKilled(li->reg, tri);
LiveRange *origUseRange =
li->getLiveRangeContaining(newVNI->def.getUseIndex());
- VNInfo *origUseVNI = origUseRange->valno;
origUseRange->end = copyIdx.getDefIndex();
- bool updatedKills = false;
- for (unsigned k = 0; k < origUseVNI->kills.size(); ++k) {
- if (origUseVNI->kills[k] == defIdx.getDefIndex()) {
- origUseVNI->kills[k] = copyIdx.getDefIndex();
- updatedKills = true;
- break;
- }
- }
- assert(updatedKills && "Failed to update VNI kill list.");
VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
true, lis->getVNInfoAllocator());
- copyVNI->addKill(defIdx.getDefIndex());
LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
newLI->addRange(copyRange);
- }
+ }
}
-
+
for (std::set<MachineInstr*>::iterator
usesItr = uses.begin(), usesEnd = uses.end();
usesItr != usesEnd; ++usesItr) {
@@ -423,7 +416,7 @@ private:
// Check if this instr is two address.
unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg);
bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx);
-
+
// Rename uses (and defs for two-address instrs).
for (unsigned i = 0; i < useInst->getNumOperands(); ++i) {
MachineOperand &mo = useInst->getOperand(i);
@@ -439,9 +432,9 @@ private:
// reg.
MachineBasicBlock *useMBB = useInst->getParent();
MachineBasicBlock::iterator useItr(useInst);
- tii->copyRegToReg(*useMBB, llvm::next(useItr), li->reg, newVReg, trc, trc);
- MachineInstr *copyMI = llvm::next(useItr);
- copyMI->addRegisterKilled(newVReg, tri);
+ MachineInstr *copyMI = BuildMI(*useMBB, llvm::next(useItr), DebugLoc(),
+ tii->get(TargetOpcode::COPY), newVReg)
+ .addReg(li->reg, RegState::Kill);
SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
// Change the old two-address defined range & vni to start at
@@ -457,55 +450,44 @@ private:
VNInfo *copyVNI =
newLI->getNextValue(useIdx.getDefIndex(), 0, true,
lis->getVNInfoAllocator());
- copyVNI->addKill(copyIdx.getDefIndex());
LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
newLI->addRange(copyRange);
}
}
-
- // Iterate over any PHI kills - we'll need to insert new copies for them.
- for (VNInfo::KillSet::iterator
- killItr = newVNI->kills.begin(), killEnd = newVNI->kills.end();
- killItr != killEnd; ++killItr) {
- SlotIndex killIdx(*killItr);
- if (killItr->isPHI()) {
- MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
- LiveRange *oldKillRange =
- newLI->getLiveRangeContaining(killIdx);
-
- assert(oldKillRange != 0 && "No kill range?");
-
- tii->copyRegToReg(*killMBB, killMBB->getFirstTerminator(),
- li->reg, newVReg, trc, trc);
- MachineInstr *copyMI = prior(killMBB->getFirstTerminator());
- copyMI->addRegisterKilled(newVReg, tri);
- SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
- // Save the current end. We may need it to add a new range if the
- // current range runs of the end of the MBB.
- SlotIndex newKillRangeEnd = oldKillRange->end;
- oldKillRange->end = copyIdx.getDefIndex();
+ // Iterate over any PHI kills - we'll need to insert new copies for them.
+ for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end();
+ LRI != LRE; ++LRI) {
+ if (LRI->valno != newVNI || LRI->end.isPHI())
+ continue;
+ SlotIndex killIdx = LRI->end;
+ MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
+ MachineInstr *copyMI = BuildMI(*killMBB, killMBB->getFirstTerminator(),
+ DebugLoc(), tii->get(TargetOpcode::COPY),
+ li->reg)
+ .addReg(newVReg, RegState::Kill);
+ SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
- if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
- assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
- "PHI kill range doesn't reach kill-block end. Not sane.");
- newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
- newKillRangeEnd, newVNI));
- }
+ // Save the current end. We may need it to add a new range if the
+ // current range runs of the end of the MBB.
+ SlotIndex newKillRangeEnd = LRI->end;
+ LRI->end = copyIdx.getDefIndex();
- *killItr = oldKillRange->end;
- VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
- copyMI, true,
- lis->getVNInfoAllocator());
- newKillVNI->addKill(lis->getMBBTerminatorGap(killMBB));
- newKillVNI->setHasPHIKill(true);
- li->addRange(LiveRange(copyIdx.getDefIndex(),
- lis->getMBBEndIdx(killMBB),
- newKillVNI));
+ if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
+ assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
+ "PHI kill range doesn't reach kill-block end. Not sane.");
+ newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
+ newKillRangeEnd, newVNI));
}
+ VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
+ copyMI, true,
+ lis->getVNInfoAllocator());
+ newKillVNI->setHasPHIKill(true);
+ li->addRange(LiveRange(copyIdx.getDefIndex(),
+ lis->getMBBEndIdx(killMBB),
+ newKillVNI));
}
-
newVNI->setHasPHIKill(false);
return newLI;
@@ -513,15 +495,23 @@ private:
};
+} // end anonymous namespace
+
+
+namespace llvm {
+Spiller *createInlineSpiller(MachineFunctionPass &pass,
+ MachineFunction &mf,
+ VirtRegMap &vrm);
}
-llvm::Spiller* llvm::createSpiller(MachineFunction *mf, LiveIntervals *lis,
- const MachineLoopInfo *loopInfo,
- VirtRegMap *vrm) {
+llvm::Spiller* llvm::createSpiller(MachineFunctionPass &pass,
+ MachineFunction &mf,
+ VirtRegMap &vrm) {
switch (spillerOpt) {
- case trivial: return new TrivialSpiller(mf, lis, vrm); break;
- case standard: return new StandardSpiller(lis, loopInfo, vrm); break;
- case splitting: return new SplittingSpiller(mf, lis, loopInfo, vrm); break;
- default: llvm_unreachable("Unreachable!"); break;
+ default: assert(0 && "unknown spiller");
+ case trivial: return new TrivialSpiller(pass, mf, vrm);
+ case standard: return new StandardSpiller(pass, mf, vrm);
+ case splitting: return new SplittingSpiller(pass, mf, vrm);
+ case inline_: return createInlineSpiller(pass, mf, vrm);
}
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/Spiller.h b/libclamav/c++/llvm/lib/CodeGen/Spiller.h
index dda52e8..59bc0ec 100644
--- a/libclamav/c++/llvm/lib/CodeGen/Spiller.h
+++ b/libclamav/c++/llvm/lib/CodeGen/Spiller.h
@@ -11,19 +11,14 @@
#define LLVM_CODEGEN_SPILLER_H
#include "llvm/ADT/SmallVector.h"
-#include <vector>
namespace llvm {
class LiveInterval;
- class LiveIntervals;
- class LiveStacks;
class MachineFunction;
- class MachineInstr;
- class MachineLoopInfo;
+ class MachineFunctionPass;
class SlotIndex;
class VirtRegMap;
- class VNInfo;
/// Spiller interface.
///
@@ -33,17 +28,23 @@ namespace llvm {
public:
virtual ~Spiller() = 0;
- /// Spill the given live range. The method used will depend on the Spiller
- /// implementation selected.
- virtual std::vector<LiveInterval*> spill(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &spillIs,
- SlotIndex *earliestIndex = 0) = 0;
+ /// spill - Spill the given live interval. The method used will depend on
+ /// the Spiller implementation selected.
+ ///
+ /// @param li The live interval to be spilled.
+ /// @param spillIs A list of intervals that are about to be spilled,
+ /// and so cannot be used for remat etc.
+ /// @param newIntervals The newly created intervals will be appended here.
+ virtual void spill(LiveInterval *li,
+ SmallVectorImpl<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs) = 0;
};
/// Create and return a spiller object, as specified on the command line.
- Spiller* createSpiller(MachineFunction *mf, LiveIntervals *li,
- const MachineLoopInfo *loopInfo, VirtRegMap *vrm);
+ Spiller* createSpiller(MachineFunctionPass &pass,
+ MachineFunction &mf,
+ VirtRegMap &vrm);
}
#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/SplitKit.cpp b/libclamav/c++/llvm/lib/CodeGen/SplitKit.cpp
new file mode 100644
index 0000000..29474f0
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/SplitKit.cpp
@@ -0,0 +1,1097 @@
+//===---------- SplitKit.cpp - Toolkit for splitting live ranges ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the SplitAnalysis class as well as mutator functions for
+// live range splitting.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "splitter"
+#include "SplitKit.h"
+#include "VirtRegMap.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+static cl::opt<bool>
+AllowSplit("spiller-splits-edges",
+ cl::desc("Allow critical edge splitting during spilling"));
+
+//===----------------------------------------------------------------------===//
+// Split Analysis
+//===----------------------------------------------------------------------===//
+
+SplitAnalysis::SplitAnalysis(const MachineFunction &mf,
+ const LiveIntervals &lis,
+ const MachineLoopInfo &mli)
+ : mf_(mf),
+ lis_(lis),
+ loops_(mli),
+ tii_(*mf.getTarget().getInstrInfo()),
+ curli_(0) {}
+
+void SplitAnalysis::clear() {
+ usingInstrs_.clear();
+ usingBlocks_.clear();
+ usingLoops_.clear();
+ curli_ = 0;
+}
+
+bool SplitAnalysis::canAnalyzeBranch(const MachineBasicBlock *MBB) {
+ MachineBasicBlock *T, *F;
+ SmallVector<MachineOperand, 4> Cond;
+ return !tii_.AnalyzeBranch(const_cast<MachineBasicBlock&>(*MBB), T, F, Cond);
+}
+
+/// analyzeUses - Count instructions, basic blocks, and loops using curli.
+void SplitAnalysis::analyzeUses() {
+ const MachineRegisterInfo &MRI = mf_.getRegInfo();
+ for (MachineRegisterInfo::reg_iterator I = MRI.reg_begin(curli_->reg);
+ MachineInstr *MI = I.skipInstruction();) {
+ if (MI->isDebugValue() || !usingInstrs_.insert(MI))
+ continue;
+ MachineBasicBlock *MBB = MI->getParent();
+ if (usingBlocks_[MBB]++)
+ continue;
+ if (MachineLoop *Loop = loops_.getLoopFor(MBB))
+ usingLoops_[Loop]++;
+ }
+ DEBUG(dbgs() << " counted "
+ << usingInstrs_.size() << " instrs, "
+ << usingBlocks_.size() << " blocks, "
+ << usingLoops_.size() << " loops.\n");
+}
+
+/// removeUse - Update statistics by noting that MI no longer uses curli.
+void SplitAnalysis::removeUse(const MachineInstr *MI) {
+ if (!usingInstrs_.erase(MI))
+ return;
+
+ // Decrement MBB count.
+ const MachineBasicBlock *MBB = MI->getParent();
+ BlockCountMap::iterator bi = usingBlocks_.find(MBB);
+ assert(bi != usingBlocks_.end() && "MBB missing");
+ assert(bi->second && "0 count in map");
+ if (--bi->second)
+ return;
+ // No more uses in MBB.
+ usingBlocks_.erase(bi);
+
+ // Decrement loop count.
+ MachineLoop *Loop = loops_.getLoopFor(MBB);
+ if (!Loop)
+ return;
+ LoopCountMap::iterator li = usingLoops_.find(Loop);
+ assert(li != usingLoops_.end() && "Loop missing");
+ assert(li->second && "0 count in map");
+ if (--li->second)
+ return;
+ // No more blocks in Loop.
+ usingLoops_.erase(li);
+}
+
+// Get three sets of basic blocks surrounding a loop: Blocks inside the loop,
+// predecessor blocks, and exit blocks.
+void SplitAnalysis::getLoopBlocks(const MachineLoop *Loop, LoopBlocks &Blocks) {
+ Blocks.clear();
+
+ // Blocks in the loop.
+ Blocks.Loop.insert(Loop->block_begin(), Loop->block_end());
+
+ // Predecessor blocks.
+ const MachineBasicBlock *Header = Loop->getHeader();
+ for (MachineBasicBlock::const_pred_iterator I = Header->pred_begin(),
+ E = Header->pred_end(); I != E; ++I)
+ if (!Blocks.Loop.count(*I))
+ Blocks.Preds.insert(*I);
+
+ // Exit blocks.
+ for (MachineLoop::block_iterator I = Loop->block_begin(),
+ E = Loop->block_end(); I != E; ++I) {
+ const MachineBasicBlock *MBB = *I;
+ for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end(); SI != SE; ++SI)
+ if (!Blocks.Loop.count(*SI))
+ Blocks.Exits.insert(*SI);
+ }
+}
+
+/// analyzeLoopPeripheralUse - Return an enum describing how curli_ is used in
+/// and around the Loop.
+SplitAnalysis::LoopPeripheralUse SplitAnalysis::
+analyzeLoopPeripheralUse(const SplitAnalysis::LoopBlocks &Blocks) {
+ LoopPeripheralUse use = ContainedInLoop;
+ for (BlockCountMap::iterator I = usingBlocks_.begin(), E = usingBlocks_.end();
+ I != E; ++I) {
+ const MachineBasicBlock *MBB = I->first;
+ // Is this a peripheral block?
+ if (use < MultiPeripheral &&
+ (Blocks.Preds.count(MBB) || Blocks.Exits.count(MBB))) {
+ if (I->second > 1) use = MultiPeripheral;
+ else use = SinglePeripheral;
+ continue;
+ }
+ // Is it a loop block?
+ if (Blocks.Loop.count(MBB))
+ continue;
+ // It must be an unrelated block.
+ return OutsideLoop;
+ }
+ return use;
+}
+
+/// getCriticalExits - It may be necessary to partially break critical edges
+/// leaving the loop if an exit block has phi uses of curli. Collect the exit
+/// blocks that need special treatment into CriticalExits.
+void SplitAnalysis::getCriticalExits(const SplitAnalysis::LoopBlocks &Blocks,
+ BlockPtrSet &CriticalExits) {
+ CriticalExits.clear();
+
+ // A critical exit block contains a phi def of curli, and has a predecessor
+ // that is not in the loop nor a loop predecessor.
+ // For such an exit block, the edges carrying the new variable must be moved
+ // to a new pre-exit block.
+ for (BlockPtrSet::iterator I = Blocks.Exits.begin(), E = Blocks.Exits.end();
+ I != E; ++I) {
+ const MachineBasicBlock *Succ = *I;
+ SlotIndex SuccIdx = lis_.getMBBStartIdx(Succ);
+ VNInfo *SuccVNI = curli_->getVNInfoAt(SuccIdx);
+ // This exit may not have curli live in at all. No need to split.
+ if (!SuccVNI)
+ continue;
+ // If this is not a PHI def, it is either using a value from before the
+ // loop, or a value defined inside the loop. Both are safe.
+ if (!SuccVNI->isPHIDef() || SuccVNI->def.getBaseIndex() != SuccIdx)
+ continue;
+ // This exit block does have a PHI. Does it also have a predecessor that is
+ // not a loop block or loop predecessor?
+ for (MachineBasicBlock::const_pred_iterator PI = Succ->pred_begin(),
+ PE = Succ->pred_end(); PI != PE; ++PI) {
+ const MachineBasicBlock *Pred = *PI;
+ if (Blocks.Loop.count(Pred) || Blocks.Preds.count(Pred))
+ continue;
+ // This is a critical exit block, and we need to split the exit edge.
+ CriticalExits.insert(Succ);
+ break;
+ }
+ }
+}
+
+/// canSplitCriticalExits - Return true if it is possible to insert new exit
+/// blocks before the blocks in CriticalExits.
+bool
+SplitAnalysis::canSplitCriticalExits(const SplitAnalysis::LoopBlocks &Blocks,
+ BlockPtrSet &CriticalExits) {
+ // If we don't allow critical edge splitting, require no critical exits.
+ if (!AllowSplit)
+ return CriticalExits.empty();
+
+ for (BlockPtrSet::iterator I = CriticalExits.begin(), E = CriticalExits.end();
+ I != E; ++I) {
+ const MachineBasicBlock *Succ = *I;
+ // We want to insert a new pre-exit MBB before Succ, and change all the
+ // in-loop blocks to branch to the pre-exit instead of Succ.
+ // Check that all the in-loop predecessors can be changed.
+ for (MachineBasicBlock::const_pred_iterator PI = Succ->pred_begin(),
+ PE = Succ->pred_end(); PI != PE; ++PI) {
+ const MachineBasicBlock *Pred = *PI;
+ // The external predecessors won't be altered.
+ if (!Blocks.Loop.count(Pred) && !Blocks.Preds.count(Pred))
+ continue;
+ if (!canAnalyzeBranch(Pred))
+ return false;
+ }
+
+ // If Succ's layout predecessor falls through, that too must be analyzable.
+ // We need to insert the pre-exit block in the gap.
+ MachineFunction::const_iterator MFI = Succ;
+ if (MFI == mf_.begin())
+ continue;
+ if (!canAnalyzeBranch(--MFI))
+ return false;
+ }
+ // No problems found.
+ return true;
+}
+
+void SplitAnalysis::analyze(const LiveInterval *li) {
+ clear();
+ curli_ = li;
+ analyzeUses();
+}
+
+const MachineLoop *SplitAnalysis::getBestSplitLoop() {
+ assert(curli_ && "Call analyze() before getBestSplitLoop");
+ if (usingLoops_.empty())
+ return 0;
+
+ LoopPtrSet Loops, SecondLoops;
+ LoopBlocks Blocks;
+ BlockPtrSet CriticalExits;
+
+ // Find first-class and second class candidate loops.
+ // We prefer to split around loops where curli is used outside the periphery.
+ for (LoopCountMap::const_iterator I = usingLoops_.begin(),
+ E = usingLoops_.end(); I != E; ++I) {
+ const MachineLoop *Loop = I->first;
+ getLoopBlocks(Loop, Blocks);
+
+ // FIXME: We need an SSA updater to properly handle multiple exit blocks.
+ if (Blocks.Exits.size() > 1) {
+ DEBUG(dbgs() << " multiple exits from " << *Loop);
+ continue;
+ }
+
+ LoopPtrSet *LPS = 0;
+ switch(analyzeLoopPeripheralUse(Blocks)) {
+ case OutsideLoop:
+ LPS = &Loops;
+ break;
+ case MultiPeripheral:
+ LPS = &SecondLoops;
+ break;
+ case ContainedInLoop:
+ DEBUG(dbgs() << " contained in " << *Loop);
+ continue;
+ case SinglePeripheral:
+ DEBUG(dbgs() << " single peripheral use in " << *Loop);
+ continue;
+ }
+ // Will it be possible to split around this loop?
+ getCriticalExits(Blocks, CriticalExits);
+ DEBUG(dbgs() << " " << CriticalExits.size() << " critical exits from "
+ << *Loop);
+ if (!canSplitCriticalExits(Blocks, CriticalExits))
+ continue;
+ // This is a possible split.
+ assert(LPS);
+ LPS->insert(Loop);
+ }
+
+ DEBUG(dbgs() << " getBestSplitLoop found " << Loops.size() << " + "
+ << SecondLoops.size() << " candidate loops.\n");
+
+ // If there are no first class loops available, look at second class loops.
+ if (Loops.empty())
+ Loops = SecondLoops;
+
+ if (Loops.empty())
+ return 0;
+
+ // Pick the earliest loop.
+ // FIXME: Are there other heuristics to consider?
+ const MachineLoop *Best = 0;
+ SlotIndex BestIdx;
+ for (LoopPtrSet::const_iterator I = Loops.begin(), E = Loops.end(); I != E;
+ ++I) {
+ SlotIndex Idx = lis_.getMBBStartIdx((*I)->getHeader());
+ if (!Best || Idx < BestIdx)
+ Best = *I, BestIdx = Idx;
+ }
+ DEBUG(dbgs() << " getBestSplitLoop found " << *Best);
+ return Best;
+}
+
+/// getMultiUseBlocks - if curli has more than one use in a basic block, it
+/// may be an advantage to split curli for the duration of the block.
+bool SplitAnalysis::getMultiUseBlocks(BlockPtrSet &Blocks) {
+ // If curli is local to one block, there is no point to splitting it.
+ if (usingBlocks_.size() <= 1)
+ return false;
+ // Add blocks with multiple uses.
+ for (BlockCountMap::iterator I = usingBlocks_.begin(), E = usingBlocks_.end();
+ I != E; ++I)
+ switch (I->second) {
+ case 0:
+ case 1:
+ continue;
+ case 2: {
+ // It doesn't pay to split a 2-instr block if it redefines curli.
+ VNInfo *VN1 = curli_->getVNInfoAt(lis_.getMBBStartIdx(I->first));
+ VNInfo *VN2 =
+ curli_->getVNInfoAt(lis_.getMBBEndIdx(I->first).getPrevIndex());
+ // live-in and live-out with a different value.
+ if (VN1 && VN2 && VN1 != VN2)
+ continue;
+ } // Fall through.
+ default:
+ Blocks.insert(I->first);
+ }
+ return !Blocks.empty();
+}
+
+//===----------------------------------------------------------------------===//
+// LiveIntervalMap
+//===----------------------------------------------------------------------===//
+
+// defValue - Introduce a li_ def for ParentVNI that could be later than
+// ParentVNI->def.
+VNInfo *LiveIntervalMap::defValue(const VNInfo *ParentVNI, SlotIndex Idx) {
+ assert(ParentVNI && "Mapping NULL value");
+ assert(Idx.isValid() && "Invalid SlotIndex");
+ assert(parentli_.getVNInfoAt(Idx) == ParentVNI && "Bad ParentVNI");
+
+ // Is this a simple 1-1 mapping? Not likely.
+ if (Idx == ParentVNI->def)
+ return mapValue(ParentVNI, Idx);
+
+ // This is a complex def. Mark with a NULL in valueMap.
+ VNInfo *OldVNI =
+ valueMap_.insert(
+ ValueMap::value_type(ParentVNI, static_cast<VNInfo *>(0))).first->second;
+ // The static_cast<VNInfo *> is only needed to work around a bug in an
+ // old version of the C++0x standard which the following compilers
+ // implemented and have yet to fix:
+ //
+ // Microsoft Visual Studio 2010 Version 10.0.30319.1 RTMRel
+ // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.30319.01
+ //
+ // If/When we move to C++0x, this can be replaced by nullptr.
+ (void)OldVNI;
+ assert(OldVNI == 0 && "Simple/Complex values mixed");
+
+ // Should we insert a minimal snippet of VNI LiveRange, or can we count on
+ // callers to do that? We need it for lookups of complex values.
+ VNInfo *VNI = li_.getNextValue(Idx, 0, true, lis_.getVNInfoAllocator());
+ return VNI;
+}
+
+// mapValue - Find the mapped value for ParentVNI at Idx.
+// Potentially create phi-def values.
+VNInfo *LiveIntervalMap::mapValue(const VNInfo *ParentVNI, SlotIndex Idx) {
+ assert(ParentVNI && "Mapping NULL value");
+ assert(Idx.isValid() && "Invalid SlotIndex");
+ assert(parentli_.getVNInfoAt(Idx) == ParentVNI && "Bad ParentVNI");
+
+ // Use insert for lookup, so we can add missing values with a second lookup.
+ std::pair<ValueMap::iterator,bool> InsP =
+ valueMap_.insert(ValueMap::value_type(ParentVNI, static_cast<VNInfo *>(0)));
+ // The static_cast<VNInfo *> is only needed to work around a bug in an
+ // old version of the C++0x standard which the following compilers
+ // implemented and have yet to fix:
+ //
+ // Microsoft Visual Studio 2010 Version 10.0.30319.1 RTMRel
+ // Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.30319.01
+ //
+ // If/When we move to C++0x, this can be replaced by nullptr.
+
+ // This was an unknown value. Create a simple mapping.
+ if (InsP.second)
+ return InsP.first->second = li_.createValueCopy(ParentVNI,
+ lis_.getVNInfoAllocator());
+ // This was a simple mapped value.
+ if (InsP.first->second)
+ return InsP.first->second;
+
+ // This is a complex mapped value. There may be multiple defs, and we may need
+ // to create phi-defs.
+ MachineBasicBlock *IdxMBB = lis_.getMBBFromIndex(Idx);
+ assert(IdxMBB && "No MBB at Idx");
+
+ // Is there a def in the same MBB we can extend?
+ if (VNInfo *VNI = extendTo(IdxMBB, Idx))
+ return VNI;
+
+ // Now for the fun part. We know that ParentVNI potentially has multiple defs,
+ // and we may need to create even more phi-defs to preserve VNInfo SSA form.
+ // Perform a depth-first search for predecessor blocks where we know the
+ // dominating VNInfo. Insert phi-def VNInfos along the path back to IdxMBB.
+
+ // Track MBBs where we have created or learned the dominating value.
+ // This may change during the DFS as we create new phi-defs.
+ typedef DenseMap<MachineBasicBlock*, VNInfo*> MBBValueMap;
+ MBBValueMap DomValue;
+
+ for (idf_iterator<MachineBasicBlock*>
+ IDFI = idf_begin(IdxMBB),
+ IDFE = idf_end(IdxMBB); IDFI != IDFE;) {
+ MachineBasicBlock *MBB = *IDFI;
+ SlotIndex End = lis_.getMBBEndIdx(MBB);
+
+ // We are operating on the restricted CFG where ParentVNI is live.
+ if (parentli_.getVNInfoAt(End.getPrevSlot()) != ParentVNI) {
+ IDFI.skipChildren();
+ continue;
+ }
+
+ // Do we have a dominating value in this block?
+ VNInfo *VNI = extendTo(MBB, End);
+ if (!VNI) {
+ ++IDFI;
+ continue;
+ }
+
+ // Yes, VNI dominates MBB. Track the path back to IdxMBB, creating phi-defs
+ // as needed along the way.
+ for (unsigned PI = IDFI.getPathLength()-1; PI != 0; --PI) {
+ // Start from MBB's immediate successor. End at IdxMBB.
+ MachineBasicBlock *Succ = IDFI.getPath(PI-1);
+ std::pair<MBBValueMap::iterator, bool> InsP =
+ DomValue.insert(MBBValueMap::value_type(Succ, VNI));
+
+ // This is the first time we backtrack to Succ.
+ if (InsP.second)
+ continue;
+
+ // We reached Succ again with the same VNI. Nothing is going to change.
+ VNInfo *OVNI = InsP.first->second;
+ if (OVNI == VNI)
+ break;
+
+ // Succ already has a phi-def. No need to continue.
+ SlotIndex Start = lis_.getMBBStartIdx(Succ);
+ if (OVNI->def == Start)
+ break;
+
+ // We have a collision between the old and new VNI at Succ. That means
+ // neither dominates and we need a new phi-def.
+ VNI = li_.getNextValue(Start, 0, true, lis_.getVNInfoAllocator());
+ VNI->setIsPHIDef(true);
+ InsP.first->second = VNI;
+
+ // Replace OVNI with VNI in the remaining path.
+ for (; PI > 1 ; --PI) {
+ MBBValueMap::iterator I = DomValue.find(IDFI.getPath(PI-2));
+ if (I == DomValue.end() || I->second != OVNI)
+ break;
+ I->second = VNI;
+ }
+ }
+
+ // No need to search the children, we found a dominating value.
+ IDFI.skipChildren();
+ }
+
+ // The search should at least find a dominating value for IdxMBB.
+ assert(!DomValue.empty() && "Couldn't find a reaching definition");
+
+ // Since we went through the trouble of a full DFS visiting all reaching defs,
+ // the values in DomValue are now accurate. No more phi-defs are needed for
+ // these blocks, so we can color the live ranges.
+ // This makes the next mapValue call much faster.
+ VNInfo *IdxVNI = 0;
+ for (MBBValueMap::iterator I = DomValue.begin(), E = DomValue.end(); I != E;
+ ++I) {
+ MachineBasicBlock *MBB = I->first;
+ VNInfo *VNI = I->second;
+ SlotIndex Start = lis_.getMBBStartIdx(MBB);
+ if (MBB == IdxMBB) {
+ // Don't add full liveness to IdxMBB, stop at Idx.
+ if (Start != Idx)
+ li_.addRange(LiveRange(Start, Idx, VNI));
+ // The caller had better add some liveness to IdxVNI, or it leaks.
+ IdxVNI = VNI;
+ } else
+ li_.addRange(LiveRange(Start, lis_.getMBBEndIdx(MBB), VNI));
+ }
+
+ assert(IdxVNI && "Didn't find value for Idx");
+ return IdxVNI;
+}
+
+// extendTo - Find the last li_ value defined in MBB at or before Idx. The
+// parentli_ is assumed to be live at Idx. Extend the live range to Idx.
+// Return the found VNInfo, or NULL.
+VNInfo *LiveIntervalMap::extendTo(MachineBasicBlock *MBB, SlotIndex Idx) {
+ LiveInterval::iterator I = std::upper_bound(li_.begin(), li_.end(), Idx);
+ if (I == li_.begin())
+ return 0;
+ --I;
+ if (I->start < lis_.getMBBStartIdx(MBB))
+ return 0;
+ if (I->end < Idx)
+ I->end = Idx;
+ return I->valno;
+}
+
+// addSimpleRange - Add a simple range from parentli_ to li_.
+// ParentVNI must be live in the [Start;End) interval.
+void LiveIntervalMap::addSimpleRange(SlotIndex Start, SlotIndex End,
+ const VNInfo *ParentVNI) {
+ VNInfo *VNI = mapValue(ParentVNI, Start);
+ // A simple mappoing is easy.
+ if (VNI->def == ParentVNI->def) {
+ li_.addRange(LiveRange(Start, End, VNI));
+ return;
+ }
+
+ // ParentVNI is a complex value. We must map per MBB.
+ MachineFunction::iterator MBB = lis_.getMBBFromIndex(Start);
+ MachineFunction::iterator MBBE = lis_.getMBBFromIndex(End);
+
+ if (MBB == MBBE) {
+ li_.addRange(LiveRange(Start, End, VNI));
+ return;
+ }
+
+ // First block.
+ li_.addRange(LiveRange(Start, lis_.getMBBEndIdx(MBB), VNI));
+
+ // Run sequence of full blocks.
+ for (++MBB; MBB != MBBE; ++MBB) {
+ Start = lis_.getMBBStartIdx(MBB);
+ li_.addRange(LiveRange(Start, lis_.getMBBEndIdx(MBB),
+ mapValue(ParentVNI, Start)));
+ }
+
+ // Final block.
+ Start = lis_.getMBBStartIdx(MBB);
+ if (Start != End)
+ li_.addRange(LiveRange(Start, End, mapValue(ParentVNI, Start)));
+}
+
+/// addRange - Add live ranges to li_ where [Start;End) intersects parentli_.
+/// All needed values whose def is not inside [Start;End) must be defined
+/// beforehand so mapValue will work.
+void LiveIntervalMap::addRange(SlotIndex Start, SlotIndex End) {
+ LiveInterval::const_iterator B = parentli_.begin(), E = parentli_.end();
+ LiveInterval::const_iterator I = std::lower_bound(B, E, Start);
+
+ // Check if --I begins before Start and overlaps.
+ if (I != B) {
+ --I;
+ if (I->end > Start)
+ addSimpleRange(Start, std::min(End, I->end), I->valno);
+ ++I;
+ }
+
+ // The remaining ranges begin after Start.
+ for (;I != E && I->start < End; ++I)
+ addSimpleRange(I->start, std::min(End, I->end), I->valno);
+}
+
+//===----------------------------------------------------------------------===//
+// Split Editor
+//===----------------------------------------------------------------------===//
+
+/// Create a new SplitEditor for editing the LiveInterval analyzed by SA.
+SplitEditor::SplitEditor(SplitAnalysis &sa, LiveIntervals &lis, VirtRegMap &vrm,
+ SmallVectorImpl<LiveInterval*> &intervals)
+ : sa_(sa), lis_(lis), vrm_(vrm),
+ mri_(vrm.getMachineFunction().getRegInfo()),
+ tii_(*vrm.getMachineFunction().getTarget().getInstrInfo()),
+ curli_(sa_.getCurLI()),
+ dupli_(0), openli_(0),
+ intervals_(intervals),
+ firstInterval(intervals_.size())
+{
+ assert(curli_ && "SplitEditor created from empty SplitAnalysis");
+
+ // Make sure curli_ is assigned a stack slot, so all our intervals get the
+ // same slot as curli_.
+ if (vrm_.getStackSlot(curli_->reg) == VirtRegMap::NO_STACK_SLOT)
+ vrm_.assignVirt2StackSlot(curli_->reg);
+
+}
+
+LiveInterval *SplitEditor::createInterval() {
+ unsigned curli = sa_.getCurLI()->reg;
+ unsigned Reg = mri_.createVirtualRegister(mri_.getRegClass(curli));
+ LiveInterval &Intv = lis_.getOrCreateInterval(Reg);
+ vrm_.grow();
+ vrm_.assignVirt2StackSlot(Reg, vrm_.getStackSlot(curli));
+ return &Intv;
+}
+
+LiveInterval *SplitEditor::getDupLI() {
+ if (!dupli_) {
+ // Create an interval for dupli that is a copy of curli.
+ dupli_ = createInterval();
+ dupli_->Copy(*curli_, &mri_, lis_.getVNInfoAllocator());
+ }
+ return dupli_;
+}
+
+VNInfo *SplitEditor::mapValue(const VNInfo *curliVNI) {
+ VNInfo *&VNI = valueMap_[curliVNI];
+ if (!VNI)
+ VNI = openli_->createValueCopy(curliVNI, lis_.getVNInfoAllocator());
+ return VNI;
+}
+
+/// Insert a COPY instruction curli -> li. Allocate a new value from li
+/// defined by the COPY. Note that rewrite() will deal with the curli
+/// register, so this function can be used to copy from any interval - openli,
+/// curli, or dupli.
+VNInfo *SplitEditor::insertCopy(LiveInterval &LI,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) {
+ MachineInstr *MI = BuildMI(MBB, I, DebugLoc(), tii_.get(TargetOpcode::COPY),
+ LI.reg).addReg(curli_->reg);
+ SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
+ return LI.getNextValue(DefIdx, MI, true, lis_.getVNInfoAllocator());
+}
+
+/// Create a new virtual register and live interval.
+void SplitEditor::openIntv() {
+ assert(!openli_ && "Previous LI not closed before openIntv");
+ openli_ = createInterval();
+ intervals_.push_back(openli_);
+ liveThrough_ = false;
+}
+
+/// enterIntvBefore - Enter openli before the instruction at Idx. If curli is
+/// not live before Idx, a COPY is not inserted.
+void SplitEditor::enterIntvBefore(SlotIndex Idx) {
+ assert(openli_ && "openIntv not called before enterIntvBefore");
+
+ // Copy from curli_ if it is live.
+ if (VNInfo *CurVNI = curli_->getVNInfoAt(Idx.getUseIndex())) {
+ MachineInstr *MI = lis_.getInstructionFromIndex(Idx);
+ assert(MI && "enterIntvBefore called with invalid index");
+ VNInfo *VNI = insertCopy(*openli_, *MI->getParent(), MI);
+ openli_->addRange(LiveRange(VNI->def, Idx.getDefIndex(), VNI));
+
+ // Make sure CurVNI is properly mapped.
+ VNInfo *&mapVNI = valueMap_[CurVNI];
+ // We dont have SSA update yet, so only one entry per value is allowed.
+ assert(!mapVNI && "enterIntvBefore called more than once for the same value");
+ mapVNI = VNI;
+ }
+ DEBUG(dbgs() << " enterIntvBefore " << Idx << ": " << *openli_ << '\n');
+}
+
+/// enterIntvAtEnd - Enter openli at the end of MBB.
+/// PhiMBB is a successor inside openli where a PHI value is created.
+/// Currently, all entries must share the same PhiMBB.
+void SplitEditor::enterIntvAtEnd(MachineBasicBlock &A, MachineBasicBlock &B) {
+ assert(openli_ && "openIntv not called before enterIntvAtEnd");
+
+ SlotIndex EndA = lis_.getMBBEndIdx(&A);
+ VNInfo *CurVNIA = curli_->getVNInfoAt(EndA.getPrevIndex());
+ if (!CurVNIA) {
+ DEBUG(dbgs() << " enterIntvAtEnd, curli not live out of BB#"
+ << A.getNumber() << ".\n");
+ return;
+ }
+
+ // Add a phi kill value and live range out of A.
+ VNInfo *VNIA = insertCopy(*openli_, A, A.getFirstTerminator());
+ openli_->addRange(LiveRange(VNIA->def, EndA, VNIA));
+
+ // FIXME: If this is the only entry edge, we don't need the extra PHI value.
+ // FIXME: If there are multiple entry blocks (so not a loop), we need proper
+ // SSA update.
+
+ // Now look at the start of B.
+ SlotIndex StartB = lis_.getMBBStartIdx(&B);
+ SlotIndex EndB = lis_.getMBBEndIdx(&B);
+ const LiveRange *CurB = curli_->getLiveRangeContaining(StartB);
+ if (!CurB) {
+ DEBUG(dbgs() << " enterIntvAtEnd: curli not live in to BB#"
+ << B.getNumber() << ".\n");
+ return;
+ }
+
+ VNInfo *VNIB = openli_->getVNInfoAt(StartB);
+ if (!VNIB) {
+ // Create a phi value.
+ VNIB = openli_->getNextValue(SlotIndex(StartB, true), 0, false,
+ lis_.getVNInfoAllocator());
+ VNIB->setIsPHIDef(true);
+ VNInfo *&mapVNI = valueMap_[CurB->valno];
+ if (mapVNI) {
+ // Multiple copies - must create PHI value.
+ abort();
+ } else {
+ // This is the first copy of dupLR. Mark the mapping.
+ mapVNI = VNIB;
+ }
+
+ }
+
+ DEBUG(dbgs() << " enterIntvAtEnd: " << *openli_ << '\n');
+}
+
+/// useIntv - indicate that all instructions in MBB should use openli.
+void SplitEditor::useIntv(const MachineBasicBlock &MBB) {
+ useIntv(lis_.getMBBStartIdx(&MBB), lis_.getMBBEndIdx(&MBB));
+}
+
+void SplitEditor::useIntv(SlotIndex Start, SlotIndex End) {
+ assert(openli_ && "openIntv not called before useIntv");
+
+ // Map the curli values from the interval into openli_
+ LiveInterval::const_iterator B = curli_->begin(), E = curli_->end();
+ LiveInterval::const_iterator I = std::lower_bound(B, E, Start);
+
+ if (I != B) {
+ --I;
+ // I begins before Start, but overlaps.
+ if (I->end > Start)
+ openli_->addRange(LiveRange(Start, std::min(End, I->end),
+ mapValue(I->valno)));
+ ++I;
+ }
+
+ // The remaining ranges begin after Start.
+ for (;I != E && I->start < End; ++I)
+ openli_->addRange(LiveRange(I->start, std::min(End, I->end),
+ mapValue(I->valno)));
+ DEBUG(dbgs() << " use [" << Start << ';' << End << "): " << *openli_
+ << '\n');
+}
+
+/// leaveIntvAfter - Leave openli after the instruction at Idx.
+void SplitEditor::leaveIntvAfter(SlotIndex Idx) {
+ assert(openli_ && "openIntv not called before leaveIntvAfter");
+
+ const LiveRange *CurLR = curli_->getLiveRangeContaining(Idx.getDefIndex());
+ if (!CurLR || CurLR->end <= Idx.getBoundaryIndex()) {
+ DEBUG(dbgs() << " leaveIntvAfter " << Idx << ": not live\n");
+ return;
+ }
+
+ // Was this value of curli live through openli?
+ if (!openli_->liveAt(CurLR->valno->def)) {
+ DEBUG(dbgs() << " leaveIntvAfter " << Idx << ": using external value\n");
+ liveThrough_ = true;
+ return;
+ }
+
+ // We are going to insert a back copy, so we must have a dupli_.
+ LiveRange *DupLR = getDupLI()->getLiveRangeContaining(Idx.getDefIndex());
+ assert(DupLR && "dupli not live into black, but curli is?");
+
+ // Insert the COPY instruction.
+ MachineBasicBlock::iterator I = lis_.getInstructionFromIndex(Idx);
+ MachineInstr *MI = BuildMI(*I->getParent(), llvm::next(I), I->getDebugLoc(),
+ tii_.get(TargetOpcode::COPY), dupli_->reg)
+ .addReg(openli_->reg);
+ SlotIndex CopyIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
+ openli_->addRange(LiveRange(Idx.getDefIndex(), CopyIdx,
+ mapValue(CurLR->valno)));
+ DupLR->valno->def = CopyIdx;
+ DEBUG(dbgs() << " leaveIntvAfter " << Idx << ": " << *openli_ << '\n');
+}
+
+/// leaveIntvAtTop - Leave the interval at the top of MBB.
+/// Currently, only one value can leave the interval.
+void SplitEditor::leaveIntvAtTop(MachineBasicBlock &MBB) {
+ assert(openli_ && "openIntv not called before leaveIntvAtTop");
+
+ SlotIndex Start = lis_.getMBBStartIdx(&MBB);
+ const LiveRange *CurLR = curli_->getLiveRangeContaining(Start);
+
+ // Is curli even live-in to MBB?
+ if (!CurLR) {
+ DEBUG(dbgs() << " leaveIntvAtTop at " << Start << ": not live\n");
+ return;
+ }
+
+ // Is curli defined by PHI at the beginning of MBB?
+ bool isPHIDef = CurLR->valno->isPHIDef() &&
+ CurLR->valno->def.getBaseIndex() == Start;
+
+ // If MBB is using a value of curli that was defined outside the openli range,
+ // we don't want to copy it back here.
+ if (!isPHIDef && !openli_->liveAt(CurLR->valno->def)) {
+ DEBUG(dbgs() << " leaveIntvAtTop at " << Start
+ << ": using external value\n");
+ liveThrough_ = true;
+ return;
+ }
+
+ // We are going to insert a back copy, so we must have a dupli_.
+ LiveRange *DupLR = getDupLI()->getLiveRangeContaining(Start);
+ assert(DupLR && "dupli not live into black, but curli is?");
+
+ // Insert the COPY instruction.
+ MachineInstr *MI = BuildMI(MBB, MBB.begin(), DebugLoc(),
+ tii_.get(TargetOpcode::COPY), dupli_->reg)
+ .addReg(openli_->reg);
+ SlotIndex Idx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
+
+ // Adjust dupli and openli values.
+ if (isPHIDef) {
+ // dupli was already a PHI on entry to MBB. Simply insert an openli PHI,
+ // and shift the dupli def down to the COPY.
+ VNInfo *VNI = openli_->getNextValue(SlotIndex(Start, true), 0, false,
+ lis_.getVNInfoAllocator());
+ VNI->setIsPHIDef(true);
+ openli_->addRange(LiveRange(VNI->def, Idx, VNI));
+
+ dupli_->removeRange(Start, Idx);
+ DupLR->valno->def = Idx;
+ DupLR->valno->setIsPHIDef(false);
+ } else {
+ // The dupli value was defined somewhere inside the openli range.
+ DEBUG(dbgs() << " leaveIntvAtTop source value defined at "
+ << DupLR->valno->def << "\n");
+ // FIXME: We may not need a PHI here if all predecessors have the same
+ // value.
+ VNInfo *VNI = openli_->getNextValue(SlotIndex(Start, true), 0, false,
+ lis_.getVNInfoAllocator());
+ VNI->setIsPHIDef(true);
+ openli_->addRange(LiveRange(VNI->def, Idx, VNI));
+
+ // FIXME: What if DupLR->valno is used by multiple exits? SSA Update.
+
+ // closeIntv is going to remove the superfluous live ranges.
+ DupLR->valno->def = Idx;
+ DupLR->valno->setIsPHIDef(false);
+ }
+
+ DEBUG(dbgs() << " leaveIntvAtTop at " << Idx << ": " << *openli_ << '\n');
+}
+
+/// closeIntv - Indicate that we are done editing the currently open
+/// LiveInterval, and ranges can be trimmed.
+void SplitEditor::closeIntv() {
+ assert(openli_ && "openIntv not called before closeIntv");
+
+ DEBUG(dbgs() << " closeIntv cleaning up\n");
+ DEBUG(dbgs() << " open " << *openli_ << '\n');
+
+ if (liveThrough_) {
+ DEBUG(dbgs() << " value live through region, leaving dupli as is.\n");
+ } else {
+ // live out with copies inserted, or killed by region. Either way we need to
+ // remove the overlapping region from dupli.
+ getDupLI();
+ for (LiveInterval::iterator I = openli_->begin(), E = openli_->end();
+ I != E; ++I) {
+ dupli_->removeRange(I->start, I->end);
+ }
+ // FIXME: A block branching to the entry block may also branch elsewhere
+ // curli is live. We need both openli and curli to be live in that case.
+ DEBUG(dbgs() << " dup2 " << *dupli_ << '\n');
+ }
+ openli_ = 0;
+ valueMap_.clear();
+}
+
+/// rewrite - after all the new live ranges have been created, rewrite
+/// instructions using curli to use the new intervals.
+void SplitEditor::rewrite() {
+ assert(!openli_ && "Previous LI not closed before rewrite");
+ const LiveInterval *curli = sa_.getCurLI();
+ for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(curli->reg),
+ RE = mri_.reg_end(); RI != RE;) {
+ MachineOperand &MO = RI.getOperand();
+ MachineInstr *MI = MO.getParent();
+ ++RI;
+ if (MI->isDebugValue()) {
+ DEBUG(dbgs() << "Zapping " << *MI);
+ // FIXME: We can do much better with debug values.
+ MO.setReg(0);
+ continue;
+ }
+ SlotIndex Idx = lis_.getInstructionIndex(MI);
+ Idx = MO.isUse() ? Idx.getUseIndex() : Idx.getDefIndex();
+ LiveInterval *LI = dupli_;
+ for (unsigned i = firstInterval, e = intervals_.size(); i != e; ++i) {
+ LiveInterval *testli = intervals_[i];
+ if (testli->liveAt(Idx)) {
+ LI = testli;
+ break;
+ }
+ }
+ if (LI) {
+ MO.setReg(LI->reg);
+ sa_.removeUse(MI);
+ DEBUG(dbgs() << " rewrite " << Idx << '\t' << *MI);
+ }
+ }
+
+ // dupli_ goes in last, after rewriting.
+ if (dupli_) {
+ if (dupli_->empty()) {
+ DEBUG(dbgs() << " dupli became empty?\n");
+ lis_.removeInterval(dupli_->reg);
+ dupli_ = 0;
+ } else {
+ dupli_->RenumberValues(lis_);
+ intervals_.push_back(dupli_);
+ }
+ }
+
+ // Calculate spill weight and allocation hints for new intervals.
+ VirtRegAuxInfo vrai(vrm_.getMachineFunction(), lis_, sa_.loops_);
+ for (unsigned i = firstInterval, e = intervals_.size(); i != e; ++i) {
+ LiveInterval &li = *intervals_[i];
+ vrai.CalculateRegClass(li.reg);
+ vrai.CalculateWeightAndHint(li);
+ DEBUG(dbgs() << " new interval " << mri_.getRegClass(li.reg)->getName()
+ << ":" << li << '\n');
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Loop Splitting
+//===----------------------------------------------------------------------===//
+
+bool SplitEditor::splitAroundLoop(const MachineLoop *Loop) {
+ SplitAnalysis::LoopBlocks Blocks;
+ sa_.getLoopBlocks(Loop, Blocks);
+
+ // Break critical edges as needed.
+ SplitAnalysis::BlockPtrSet CriticalExits;
+ sa_.getCriticalExits(Blocks, CriticalExits);
+ assert(CriticalExits.empty() && "Cannot break critical exits yet");
+
+ // Create new live interval for the loop.
+ openIntv();
+
+ // Insert copies in the predecessors.
+ for (SplitAnalysis::BlockPtrSet::iterator I = Blocks.Preds.begin(),
+ E = Blocks.Preds.end(); I != E; ++I) {
+ MachineBasicBlock &MBB = const_cast<MachineBasicBlock&>(**I);
+ enterIntvAtEnd(MBB, *Loop->getHeader());
+ }
+
+ // Switch all loop blocks.
+ for (SplitAnalysis::BlockPtrSet::iterator I = Blocks.Loop.begin(),
+ E = Blocks.Loop.end(); I != E; ++I)
+ useIntv(**I);
+
+ // Insert back copies in the exit blocks.
+ for (SplitAnalysis::BlockPtrSet::iterator I = Blocks.Exits.begin(),
+ E = Blocks.Exits.end(); I != E; ++I) {
+ MachineBasicBlock &MBB = const_cast<MachineBasicBlock&>(**I);
+ leaveIntvAtTop(MBB);
+ }
+
+ // Done.
+ closeIntv();
+ rewrite();
+ return dupli_;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Single Block Splitting
+//===----------------------------------------------------------------------===//
+
+/// splitSingleBlocks - Split curli into a separate live interval inside each
+/// basic block in Blocks. Return true if curli has been completely replaced,
+/// false if curli is still intact, and needs to be spilled or split further.
+bool SplitEditor::splitSingleBlocks(const SplitAnalysis::BlockPtrSet &Blocks) {
+ DEBUG(dbgs() << " splitSingleBlocks for " << Blocks.size() << " blocks.\n");
+ // Determine the first and last instruction using curli in each block.
+ typedef std::pair<SlotIndex,SlotIndex> IndexPair;
+ typedef DenseMap<const MachineBasicBlock*,IndexPair> IndexPairMap;
+ IndexPairMap MBBRange;
+ for (SplitAnalysis::InstrPtrSet::const_iterator I = sa_.usingInstrs_.begin(),
+ E = sa_.usingInstrs_.end(); I != E; ++I) {
+ const MachineBasicBlock *MBB = (*I)->getParent();
+ if (!Blocks.count(MBB))
+ continue;
+ SlotIndex Idx = lis_.getInstructionIndex(*I);
+ DEBUG(dbgs() << " BB#" << MBB->getNumber() << '\t' << Idx << '\t' << **I);
+ IndexPair &IP = MBBRange[MBB];
+ if (!IP.first.isValid() || Idx < IP.first)
+ IP.first = Idx;
+ if (!IP.second.isValid() || Idx > IP.second)
+ IP.second = Idx;
+ }
+
+ // Create a new interval for each block.
+ for (SplitAnalysis::BlockPtrSet::const_iterator I = Blocks.begin(),
+ E = Blocks.end(); I != E; ++I) {
+ IndexPair &IP = MBBRange[*I];
+ DEBUG(dbgs() << " splitting for BB#" << (*I)->getNumber() << ": ["
+ << IP.first << ';' << IP.second << ")\n");
+ assert(IP.first.isValid() && IP.second.isValid());
+
+ openIntv();
+ enterIntvBefore(IP.first);
+ useIntv(IP.first.getBaseIndex(), IP.second.getBoundaryIndex());
+ leaveIntvAfter(IP.second);
+ closeIntv();
+ }
+ rewrite();
+ return dupli_;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Sub Block Splitting
+//===----------------------------------------------------------------------===//
+
+/// getBlockForInsideSplit - If curli is contained inside a single basic block,
+/// and it wou pay to subdivide the interval inside that block, return it.
+/// Otherwise return NULL. The returned block can be passed to
+/// SplitEditor::splitInsideBlock.
+const MachineBasicBlock *SplitAnalysis::getBlockForInsideSplit() {
+ // The interval must be exclusive to one block.
+ if (usingBlocks_.size() != 1)
+ return 0;
+ // Don't to this for less than 4 instructions. We want to be sure that
+ // splitting actually reduces the instruction count per interval.
+ if (usingInstrs_.size() < 4)
+ return 0;
+ return usingBlocks_.begin()->first;
+}
+
+/// splitInsideBlock - Split curli into multiple intervals inside MBB. Return
+/// true if curli has been completely replaced, false if curli is still
+/// intact, and needs to be spilled or split further.
+bool SplitEditor::splitInsideBlock(const MachineBasicBlock *MBB) {
+ SmallVector<SlotIndex, 32> Uses;
+ Uses.reserve(sa_.usingInstrs_.size());
+ for (SplitAnalysis::InstrPtrSet::const_iterator I = sa_.usingInstrs_.begin(),
+ E = sa_.usingInstrs_.end(); I != E; ++I)
+ if ((*I)->getParent() == MBB)
+ Uses.push_back(lis_.getInstructionIndex(*I));
+ DEBUG(dbgs() << " splitInsideBlock BB#" << MBB->getNumber() << " for "
+ << Uses.size() << " instructions.\n");
+ assert(Uses.size() >= 3 && "Need at least 3 instructions");
+ array_pod_sort(Uses.begin(), Uses.end());
+
+ // Simple algorithm: Find the largest gap between uses as determined by slot
+ // indices. Create new intervals for instructions before the gap and after the
+ // gap.
+ unsigned bestPos = 0;
+ int bestGap = 0;
+ DEBUG(dbgs() << " dist (" << Uses[0]);
+ for (unsigned i = 1, e = Uses.size(); i != e; ++i) {
+ int g = Uses[i-1].distance(Uses[i]);
+ DEBUG(dbgs() << ") -" << g << "- (" << Uses[i]);
+ if (g > bestGap)
+ bestPos = i, bestGap = g;
+ }
+ DEBUG(dbgs() << "), best: -" << bestGap << "-\n");
+
+ // bestPos points to the first use after the best gap.
+ assert(bestPos > 0 && "Invalid gap");
+
+ // FIXME: Don't create intervals for low densities.
+
+ // First interval before the gap. Don't create single-instr intervals.
+ if (bestPos > 1) {
+ openIntv();
+ enterIntvBefore(Uses.front());
+ useIntv(Uses.front().getBaseIndex(), Uses[bestPos-1].getBoundaryIndex());
+ leaveIntvAfter(Uses[bestPos-1]);
+ closeIntv();
+ }
+
+ // Second interval after the gap.
+ if (bestPos < Uses.size()-1) {
+ openIntv();
+ enterIntvBefore(Uses[bestPos]);
+ useIntv(Uses[bestPos].getBaseIndex(), Uses.back().getBoundaryIndex());
+ leaveIntvAfter(Uses.back());
+ closeIntv();
+ }
+
+ rewrite();
+ return dupli_;
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/SplitKit.h b/libclamav/c++/llvm/lib/CodeGen/SplitKit.h
new file mode 100644
index 0000000..ddef746
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/SplitKit.h
@@ -0,0 +1,321 @@
+//===---------- SplitKit.cpp - Toolkit for splitting live ranges ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the SplitAnalysis class as well as mutator functions for
+// live range splitting.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+
+namespace llvm {
+
+class LiveInterval;
+class LiveIntervals;
+class MachineInstr;
+class MachineLoop;
+class MachineLoopInfo;
+class MachineRegisterInfo;
+class TargetInstrInfo;
+class VirtRegMap;
+class VNInfo;
+
+/// SplitAnalysis - Analyze a LiveInterval, looking for live range splitting
+/// opportunities.
+class SplitAnalysis {
+public:
+ const MachineFunction &mf_;
+ const LiveIntervals &lis_;
+ const MachineLoopInfo &loops_;
+ const TargetInstrInfo &tii_;
+
+ // Instructions using the the current register.
+ typedef SmallPtrSet<const MachineInstr*, 16> InstrPtrSet;
+ InstrPtrSet usingInstrs_;
+
+ // The number of instructions using curli in each basic block.
+ typedef DenseMap<const MachineBasicBlock*, unsigned> BlockCountMap;
+ BlockCountMap usingBlocks_;
+
+ // The number of basic block using curli in each loop.
+ typedef DenseMap<const MachineLoop*, unsigned> LoopCountMap;
+ LoopCountMap usingLoops_;
+
+private:
+ // Current live interval.
+ const LiveInterval *curli_;
+
+ // Sumarize statistics by counting instructions using curli_.
+ void analyzeUses();
+
+ /// canAnalyzeBranch - Return true if MBB ends in a branch that can be
+ /// analyzed.
+ bool canAnalyzeBranch(const MachineBasicBlock *MBB);
+
+public:
+ SplitAnalysis(const MachineFunction &mf, const LiveIntervals &lis,
+ const MachineLoopInfo &mli);
+
+ /// analyze - set curli to the specified interval, and analyze how it may be
+ /// split.
+ void analyze(const LiveInterval *li);
+
+ /// removeUse - Update statistics by noting that mi no longer uses curli.
+ void removeUse(const MachineInstr *mi);
+
+ const LiveInterval *getCurLI() { return curli_; }
+
+ /// clear - clear all data structures so SplitAnalysis is ready to analyze a
+ /// new interval.
+ void clear();
+
+ typedef SmallPtrSet<const MachineBasicBlock*, 16> BlockPtrSet;
+ typedef SmallPtrSet<const MachineLoop*, 16> LoopPtrSet;
+
+ // Sets of basic blocks surrounding a machine loop.
+ struct LoopBlocks {
+ BlockPtrSet Loop; // Blocks in the loop.
+ BlockPtrSet Preds; // Loop predecessor blocks.
+ BlockPtrSet Exits; // Loop exit blocks.
+
+ void clear() {
+ Loop.clear();
+ Preds.clear();
+ Exits.clear();
+ }
+ };
+
+ // Calculate the block sets surrounding the loop.
+ void getLoopBlocks(const MachineLoop *Loop, LoopBlocks &Blocks);
+
+ /// LoopPeripheralUse - how is a variable used in and around a loop?
+ /// Peripheral blocks are the loop predecessors and exit blocks.
+ enum LoopPeripheralUse {
+ ContainedInLoop, // All uses are inside the loop.
+ SinglePeripheral, // At most one instruction per peripheral block.
+ MultiPeripheral, // Multiple instructions in some peripheral blocks.
+ OutsideLoop // Uses outside loop periphery.
+ };
+
+ /// analyzeLoopPeripheralUse - Return an enum describing how curli_ is used in
+ /// and around the Loop.
+ LoopPeripheralUse analyzeLoopPeripheralUse(const LoopBlocks&);
+
+ /// getCriticalExits - It may be necessary to partially break critical edges
+ /// leaving the loop if an exit block has phi uses of curli. Collect the exit
+ /// blocks that need special treatment into CriticalExits.
+ void getCriticalExits(const LoopBlocks &Blocks, BlockPtrSet &CriticalExits);
+
+ /// canSplitCriticalExits - Return true if it is possible to insert new exit
+ /// blocks before the blocks in CriticalExits.
+ bool canSplitCriticalExits(const LoopBlocks &Blocks,
+ BlockPtrSet &CriticalExits);
+
+ /// getBestSplitLoop - Return the loop where curli may best be split to a
+ /// separate register, or NULL.
+ const MachineLoop *getBestSplitLoop();
+
+ /// getMultiUseBlocks - Add basic blocks to Blocks that may benefit from
+ /// having curli split to a new live interval. Return true if Blocks can be
+ /// passed to SplitEditor::splitSingleBlocks.
+ bool getMultiUseBlocks(BlockPtrSet &Blocks);
+
+ /// getBlockForInsideSplit - If curli is contained inside a single basic block,
+ /// and it wou pay to subdivide the interval inside that block, return it.
+ /// Otherwise return NULL. The returned block can be passed to
+ /// SplitEditor::splitInsideBlock.
+ const MachineBasicBlock *getBlockForInsideSplit();
+};
+
+
+/// LiveIntervalMap - Map values from a large LiveInterval into a small
+/// interval that is a subset. Insert phi-def values as needed. This class is
+/// used by SplitEditor to create new smaller LiveIntervals.
+///
+/// parentli_ is the larger interval, li_ is the subset interval. Every value
+/// in li_ corresponds to exactly one value in parentli_, and the live range
+/// of the value is contained within the live range of the parentli_ value.
+/// Values in parentli_ may map to any number of openli_ values, including 0.
+class LiveIntervalMap {
+ LiveIntervals &lis_;
+
+ // The parent interval is never changed.
+ const LiveInterval &parentli_;
+
+ // The child interval's values are fully contained inside parentli_ values.
+ LiveInterval &li_;
+
+ typedef DenseMap<const VNInfo*, VNInfo*> ValueMap;
+
+ // Map parentli_ values to simple values in li_ that are defined at the same
+ // SlotIndex, or NULL for parentli_ values that have complex li_ defs.
+ // Note there is a difference between values mapping to NULL (complex), and
+ // values not present (unknown/unmapped).
+ ValueMap valueMap_;
+
+ // extendTo - Find the last li_ value defined in MBB at or before Idx. The
+ // parentli_ is assumed to be live at Idx. Extend the live range to Idx.
+ // Return the found VNInfo, or NULL.
+ VNInfo *extendTo(MachineBasicBlock *MBB, SlotIndex Idx);
+
+ // addSimpleRange - Add a simple range from parentli_ to li_.
+ // ParentVNI must be live in the [Start;End) interval.
+ void addSimpleRange(SlotIndex Start, SlotIndex End, const VNInfo *ParentVNI);
+
+public:
+ LiveIntervalMap(LiveIntervals &lis,
+ const LiveInterval &parentli,
+ LiveInterval &li)
+ : lis_(lis), parentli_(parentli), li_(li) {}
+
+ /// defValue - define a value in li_ from the parentli_ value VNI and Idx.
+ /// Idx does not have to be ParentVNI->def, but it must be contained within
+ /// ParentVNI's live range in parentli_.
+ /// Return the new li_ value.
+ VNInfo *defValue(const VNInfo *ParentVNI, SlotIndex Idx);
+
+ /// mapValue - map ParentVNI to the corresponding li_ value at Idx. It is
+ /// assumed that ParentVNI is live at Idx.
+ /// If ParentVNI has not been defined by defValue, it is assumed that
+ /// ParentVNI->def dominates Idx.
+ /// If ParentVNI has been defined by defValue one or more times, a value that
+ /// dominates Idx will be returned. This may require creating extra phi-def
+ /// values and adding live ranges to li_.
+ VNInfo *mapValue(const VNInfo *ParentVNI, SlotIndex Idx);
+
+ /// addRange - Add live ranges to li_ where [Start;End) intersects parentli_.
+ /// All needed values whose def is not inside [Start;End) must be defined
+ /// beforehand so mapValue will work.
+ void addRange(SlotIndex Start, SlotIndex End);
+};
+
+
+/// SplitEditor - Edit machine code and LiveIntervals for live range
+/// splitting.
+///
+/// - Create a SplitEditor from a SplitAnalysis.
+/// - Start a new live interval with openIntv.
+/// - Mark the places where the new interval is entered using enterIntv*
+/// - Mark the ranges where the new interval is used with useIntv*
+/// - Mark the places where the interval is exited with exitIntv*.
+/// - Finish the current interval with closeIntv and repeat from 2.
+/// - Rewrite instructions with rewrite().
+///
+class SplitEditor {
+ SplitAnalysis &sa_;
+ LiveIntervals &lis_;
+ VirtRegMap &vrm_;
+ MachineRegisterInfo &mri_;
+ const TargetInstrInfo &tii_;
+
+ /// curli_ - The immutable interval we are currently splitting.
+ const LiveInterval *const curli_;
+
+ /// dupli_ - Created as a copy of curli_, ranges are carved out as new
+ /// intervals get added through openIntv / closeIntv. This is used to avoid
+ /// editing curli_.
+ LiveInterval *dupli_;
+
+ /// Currently open LiveInterval.
+ LiveInterval *openli_;
+
+ /// createInterval - Create a new virtual register and LiveInterval with same
+ /// register class and spill slot as curli.
+ LiveInterval *createInterval();
+
+ /// getDupLI - Ensure dupli is created and return it.
+ LiveInterval *getDupLI();
+
+ /// valueMap_ - Map values in cupli to values in openli. These are direct 1-1
+ /// mappings, and do not include values created by inserted copies.
+ DenseMap<const VNInfo*, VNInfo*> valueMap_;
+
+ /// mapValue - Return the openIntv value that corresponds to the given curli
+ /// value.
+ VNInfo *mapValue(const VNInfo *curliVNI);
+
+ /// A dupli value is live through openIntv.
+ bool liveThrough_;
+
+ /// All the new intervals created for this split are added to intervals_.
+ SmallVectorImpl<LiveInterval*> &intervals_;
+
+ /// The index into intervals_ of the first interval we added. There may be
+ /// others from before we got it.
+ unsigned firstInterval;
+
+ /// Insert a COPY instruction curli -> li. Allocate a new value from li
+ /// defined by the COPY
+ VNInfo *insertCopy(LiveInterval &LI,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I);
+
+public:
+ /// Create a new SplitEditor for editing the LiveInterval analyzed by SA.
+ /// Newly created intervals will be appended to newIntervals.
+ SplitEditor(SplitAnalysis &SA, LiveIntervals&, VirtRegMap&,
+ SmallVectorImpl<LiveInterval*> &newIntervals);
+
+ /// getAnalysis - Get the corresponding analysis.
+ SplitAnalysis &getAnalysis() { return sa_; }
+
+ /// Create a new virtual register and live interval.
+ void openIntv();
+
+ /// enterIntvBefore - Enter openli before the instruction at Idx. If curli is
+ /// not live before Idx, a COPY is not inserted.
+ void enterIntvBefore(SlotIndex Idx);
+
+ /// enterIntvAtEnd - Enter openli at the end of MBB.
+ /// PhiMBB is a successor inside openli where a PHI value is created.
+ /// Currently, all entries must share the same PhiMBB.
+ void enterIntvAtEnd(MachineBasicBlock &MBB, MachineBasicBlock &PhiMBB);
+
+ /// useIntv - indicate that all instructions in MBB should use openli.
+ void useIntv(const MachineBasicBlock &MBB);
+
+ /// useIntv - indicate that all instructions in range should use openli.
+ void useIntv(SlotIndex Start, SlotIndex End);
+
+ /// leaveIntvAfter - Leave openli after the instruction at Idx.
+ void leaveIntvAfter(SlotIndex Idx);
+
+ /// leaveIntvAtTop - Leave the interval at the top of MBB.
+ /// Currently, only one value can leave the interval.
+ void leaveIntvAtTop(MachineBasicBlock &MBB);
+
+ /// closeIntv - Indicate that we are done editing the currently open
+ /// LiveInterval, and ranges can be trimmed.
+ void closeIntv();
+
+ /// rewrite - after all the new live ranges have been created, rewrite
+ /// instructions using curli to use the new intervals.
+ void rewrite();
+
+ // ===--- High level methods ---===
+
+ /// splitAroundLoop - Split curli into a separate live interval inside
+ /// the loop. Return true if curli has been completely replaced, false if
+ /// curli is still intact, and needs to be spilled or split further.
+ bool splitAroundLoop(const MachineLoop*);
+
+ /// splitSingleBlocks - Split curli into a separate live interval inside each
+ /// basic block in Blocks. Return true if curli has been completely replaced,
+ /// false if curli is still intact, and needs to be spilled or split further.
+ bool splitSingleBlocks(const SplitAnalysis::BlockPtrSet &Blocks);
+
+ /// splitInsideBlock - Split curli into multiple intervals inside MBB. Return
+ /// true if curli has been completely replaced, false if curli is still
+ /// intact, and needs to be spilled or split further.
+ bool splitInsideBlock(const MachineBasicBlock *);
+};
+
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/Splitter.cpp b/libclamav/c++/llvm/lib/CodeGen/Splitter.cpp
new file mode 100644
index 0000000..38f3b1f
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/Splitter.cpp
@@ -0,0 +1,817 @@
+//===-- llvm/CodeGen/Splitter.cpp - Splitter -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "loopsplitter"
+
+#include "Splitter.h"
+
+#include "SimpleRegisterCoalescing.h"
+#include "llvm/Module.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveStackAnalysis.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+char LoopSplitter::ID = 0;
+INITIALIZE_PASS(LoopSplitter, "loop-splitting",
+ "Split virtual regists across loop boundaries.", false, false);
+
+namespace llvm {
+
+ class StartSlotComparator {
+ public:
+ StartSlotComparator(LiveIntervals &lis) : lis(lis) {}
+ bool operator()(const MachineBasicBlock *mbb1,
+ const MachineBasicBlock *mbb2) const {
+ return lis.getMBBStartIdx(mbb1) < lis.getMBBStartIdx(mbb2);
+ }
+ private:
+ LiveIntervals &lis;
+ };
+
+ class LoopSplit {
+ public:
+ LoopSplit(LoopSplitter &ls, LiveInterval &li, MachineLoop &loop)
+ : ls(ls), li(li), loop(loop), valid(true), inSplit(false), newLI(0) {
+ assert(TargetRegisterInfo::isVirtualRegister(li.reg) &&
+ "Cannot split physical registers.");
+ }
+
+ LiveInterval& getLI() const { return li; }
+
+ MachineLoop& getLoop() const { return loop; }
+
+ bool isValid() const { return valid; }
+
+ bool isWorthwhile() const { return valid && (inSplit || !outSplits.empty()); }
+
+ void invalidate() { valid = false; }
+
+ void splitIncoming() { inSplit = true; }
+
+ void splitOutgoing(MachineLoop::Edge &edge) { outSplits.insert(edge); }
+
+ void addLoopInstr(MachineInstr *i) { loopInstrs.push_back(i); }
+
+ void apply() {
+ assert(valid && "Attempt to apply invalid split.");
+ applyIncoming();
+ applyOutgoing();
+ copyRanges();
+ renameInside();
+ }
+
+ private:
+ LoopSplitter &ls;
+ LiveInterval &li;
+ MachineLoop &loop;
+ bool valid, inSplit;
+ std::set<MachineLoop::Edge> outSplits;
+ std::vector<MachineInstr*> loopInstrs;
+
+ LiveInterval *newLI;
+ std::map<VNInfo*, VNInfo*> vniMap;
+
+ LiveInterval* getNewLI() {
+ if (newLI == 0) {
+ const TargetRegisterClass *trc = ls.mri->getRegClass(li.reg);
+ unsigned vreg = ls.mri->createVirtualRegister(trc);
+ newLI = &ls.lis->getOrCreateInterval(vreg);
+ }
+ return newLI;
+ }
+
+ VNInfo* getNewVNI(VNInfo *oldVNI) {
+ VNInfo *newVNI = vniMap[oldVNI];
+
+ if (newVNI == 0) {
+ newVNI = getNewLI()->createValueCopy(oldVNI,
+ ls.lis->getVNInfoAllocator());
+ vniMap[oldVNI] = newVNI;
+ }
+
+ return newVNI;
+ }
+
+ void applyIncoming() {
+ if (!inSplit) {
+ return;
+ }
+
+ MachineBasicBlock *preHeader = loop.getLoopPreheader();
+ if (preHeader == 0) {
+ assert(ls.canInsertPreHeader(loop) &&
+ "Can't insert required preheader.");
+ preHeader = &ls.insertPreHeader(loop);
+ }
+
+ LiveRange *preHeaderRange =
+ ls.lis->findExitingRange(li, preHeader);
+ assert(preHeaderRange != 0 && "Range not live into preheader.");
+
+ // Insert the new copy.
+ MachineInstr *copy = BuildMI(*preHeader,
+ preHeader->getFirstTerminator(),
+ DebugLoc(),
+ ls.tii->get(TargetOpcode::COPY))
+ .addReg(getNewLI()->reg, RegState::Define)
+ .addReg(li.reg, RegState::Kill);
+
+ ls.lis->InsertMachineInstrInMaps(copy);
+
+ SlotIndex copyDefIdx = ls.lis->getInstructionIndex(copy).getDefIndex();
+
+ VNInfo *newVal = getNewVNI(preHeaderRange->valno);
+ newVal->def = copyDefIdx;
+ newVal->setCopy(copy);
+ newVal->setIsDefAccurate(true);
+ li.removeRange(copyDefIdx, ls.lis->getMBBEndIdx(preHeader), true);
+
+ getNewLI()->addRange(LiveRange(copyDefIdx,
+ ls.lis->getMBBEndIdx(preHeader),
+ newVal));
+ }
+
+ void applyOutgoing() {
+
+ for (std::set<MachineLoop::Edge>::iterator osItr = outSplits.begin(),
+ osEnd = outSplits.end();
+ osItr != osEnd; ++osItr) {
+ MachineLoop::Edge edge = *osItr;
+ MachineBasicBlock *outBlock = edge.second;
+ if (ls.isCriticalEdge(edge)) {
+ assert(ls.canSplitEdge(edge) && "Unsplitable critical edge.");
+ outBlock = &ls.splitEdge(edge, loop);
+ }
+ LiveRange *outRange = ls.lis->findEnteringRange(li, outBlock);
+ assert(outRange != 0 && "No exiting range?");
+
+ MachineInstr *copy = BuildMI(*outBlock, outBlock->begin(),
+ DebugLoc(),
+ ls.tii->get(TargetOpcode::COPY))
+ .addReg(li.reg, RegState::Define)
+ .addReg(getNewLI()->reg, RegState::Kill);
+
+ ls.lis->InsertMachineInstrInMaps(copy);
+
+ SlotIndex copyDefIdx = ls.lis->getInstructionIndex(copy).getDefIndex();
+
+ // Blow away output range definition.
+ outRange->valno->def = ls.lis->getInvalidIndex();
+ outRange->valno->setIsDefAccurate(false);
+ li.removeRange(ls.lis->getMBBStartIdx(outBlock), copyDefIdx);
+
+ VNInfo *newVal =
+ getNewLI()->getNextValue(SlotIndex(ls.lis->getMBBStartIdx(outBlock),
+ true),
+ 0, false, ls.lis->getVNInfoAllocator());
+
+ getNewLI()->addRange(LiveRange(ls.lis->getMBBStartIdx(outBlock),
+ copyDefIdx, newVal));
+
+ }
+ }
+
+ void copyRange(LiveRange &lr) {
+ std::pair<bool, LoopSplitter::SlotPair> lsr =
+ ls.getLoopSubRange(lr, loop);
+
+ if (!lsr.first)
+ return;
+
+ LiveRange loopRange(lsr.second.first, lsr.second.second,
+ getNewVNI(lr.valno));
+
+ li.removeRange(loopRange.start, loopRange.end, true);
+
+ getNewLI()->addRange(loopRange);
+ }
+
+ void copyRanges() {
+ for (std::vector<MachineInstr*>::iterator iItr = loopInstrs.begin(),
+ iEnd = loopInstrs.end();
+ iItr != iEnd; ++iItr) {
+ MachineInstr &instr = **iItr;
+ SlotIndex instrIdx = ls.lis->getInstructionIndex(&instr);
+ if (instr.modifiesRegister(li.reg, 0)) {
+ LiveRange *defRange =
+ li.getLiveRangeContaining(instrIdx.getDefIndex());
+ if (defRange != 0) // May have caught this already.
+ copyRange(*defRange);
+ }
+ if (instr.readsRegister(li.reg, 0)) {
+ LiveRange *useRange =
+ li.getLiveRangeContaining(instrIdx.getUseIndex());
+ if (useRange != 0) { // May have caught this already.
+ copyRange(*useRange);
+ }
+ }
+ }
+
+ for (MachineLoop::block_iterator bbItr = loop.block_begin(),
+ bbEnd = loop.block_end();
+ bbItr != bbEnd; ++bbItr) {
+ MachineBasicBlock &loopBlock = **bbItr;
+ LiveRange *enteringRange =
+ ls.lis->findEnteringRange(li, &loopBlock);
+ if (enteringRange != 0) {
+ copyRange(*enteringRange);
+ }
+ }
+ }
+
+ void renameInside() {
+ for (std::vector<MachineInstr*>::iterator iItr = loopInstrs.begin(),
+ iEnd = loopInstrs.end();
+ iItr != iEnd; ++iItr) {
+ MachineInstr &instr = **iItr;
+ for (unsigned i = 0; i < instr.getNumOperands(); ++i) {
+ MachineOperand &mop = instr.getOperand(i);
+ if (mop.isReg() && mop.getReg() == li.reg) {
+ mop.setReg(getNewLI()->reg);
+ }
+ }
+ }
+ }
+
+ };
+
+ void LoopSplitter::getAnalysisUsage(AnalysisUsage &au) const {
+ au.addRequired<MachineDominatorTree>();
+ au.addPreserved<MachineDominatorTree>();
+ au.addRequired<MachineLoopInfo>();
+ au.addPreserved<MachineLoopInfo>();
+ au.addPreserved<RegisterCoalescer>();
+ au.addPreserved<CalculateSpillWeights>();
+ au.addPreserved<LiveStacks>();
+ au.addRequired<SlotIndexes>();
+ au.addPreserved<SlotIndexes>();
+ au.addRequired<LiveIntervals>();
+ au.addPreserved<LiveIntervals>();
+ MachineFunctionPass::getAnalysisUsage(au);
+ }
+
+ bool LoopSplitter::runOnMachineFunction(MachineFunction &fn) {
+
+ mf = &fn;
+ mri = &mf->getRegInfo();
+ tii = mf->getTarget().getInstrInfo();
+ tri = mf->getTarget().getRegisterInfo();
+ sis = &getAnalysis<SlotIndexes>();
+ lis = &getAnalysis<LiveIntervals>();
+ mli = &getAnalysis<MachineLoopInfo>();
+ mdt = &getAnalysis<MachineDominatorTree>();
+
+ fqn = mf->getFunction()->getParent()->getModuleIdentifier() + "." +
+ mf->getFunction()->getName().str();
+
+ dbgs() << "Splitting " << mf->getFunction()->getName() << ".";
+
+ dumpOddTerminators();
+
+// dbgs() << "----------------------------------------\n";
+// lis->dump();
+// dbgs() << "----------------------------------------\n";
+
+// std::deque<MachineLoop*> loops;
+// std::copy(mli->begin(), mli->end(), std::back_inserter(loops));
+// dbgs() << "Loops:\n";
+// while (!loops.empty()) {
+// MachineLoop &loop = *loops.front();
+// loops.pop_front();
+// std::copy(loop.begin(), loop.end(), std::back_inserter(loops));
+
+// dumpLoopInfo(loop);
+// }
+
+ //lis->dump();
+ //exit(0);
+
+ // Setup initial intervals.
+ for (LiveIntervals::iterator liItr = lis->begin(), liEnd = lis->end();
+ liItr != liEnd; ++liItr) {
+ LiveInterval *li = liItr->second;
+
+ if (TargetRegisterInfo::isVirtualRegister(li->reg) &&
+ !lis->intervalIsInOneMBB(*li)) {
+ intervals.push_back(li);
+ }
+ }
+
+ processIntervals();
+
+ intervals.clear();
+
+// dbgs() << "----------------------------------------\n";
+// lis->dump();
+// dbgs() << "----------------------------------------\n";
+
+ dumpOddTerminators();
+
+ //exit(1);
+
+ return false;
+ }
+
+ void LoopSplitter::releaseMemory() {
+ fqn.clear();
+ intervals.clear();
+ loopRangeMap.clear();
+ }
+
+ void LoopSplitter::dumpOddTerminators() {
+ for (MachineFunction::iterator bbItr = mf->begin(), bbEnd = mf->end();
+ bbItr != bbEnd; ++bbItr) {
+ MachineBasicBlock *mbb = &*bbItr;
+ MachineBasicBlock *a = 0, *b = 0;
+ SmallVector<MachineOperand, 4> c;
+ if (tii->AnalyzeBranch(*mbb, a, b, c)) {
+ dbgs() << "MBB#" << mbb->getNumber() << " has multiway terminator.\n";
+ dbgs() << " Terminators:\n";
+ for (MachineBasicBlock::iterator iItr = mbb->begin(), iEnd = mbb->end();
+ iItr != iEnd; ++iItr) {
+ MachineInstr *instr= &*iItr;
+ dbgs() << " " << *instr << "";
+ }
+ dbgs() << "\n Listed successors: [ ";
+ for (MachineBasicBlock::succ_iterator sItr = mbb->succ_begin(), sEnd = mbb->succ_end();
+ sItr != sEnd; ++sItr) {
+ MachineBasicBlock *succMBB = *sItr;
+ dbgs() << succMBB->getNumber() << " ";
+ }
+ dbgs() << "]\n\n";
+ }
+ }
+ }
+
+ void LoopSplitter::dumpLoopInfo(MachineLoop &loop) {
+ MachineBasicBlock &headerBlock = *loop.getHeader();
+ typedef SmallVector<MachineLoop::Edge, 8> ExitEdgesList;
+ ExitEdgesList exitEdges;
+ loop.getExitEdges(exitEdges);
+
+ dbgs() << " Header: BB#" << headerBlock.getNumber() << ", Contains: [ ";
+ for (std::vector<MachineBasicBlock*>::const_iterator
+ subBlockItr = loop.getBlocks().begin(),
+ subBlockEnd = loop.getBlocks().end();
+ subBlockItr != subBlockEnd; ++subBlockItr) {
+ MachineBasicBlock &subBlock = **subBlockItr;
+ dbgs() << "BB#" << subBlock.getNumber() << " ";
+ }
+ dbgs() << "], Exit edges: [ ";
+ for (ExitEdgesList::iterator exitEdgeItr = exitEdges.begin(),
+ exitEdgeEnd = exitEdges.end();
+ exitEdgeItr != exitEdgeEnd; ++exitEdgeItr) {
+ MachineLoop::Edge &exitEdge = *exitEdgeItr;
+ dbgs() << "(MBB#" << exitEdge.first->getNumber()
+ << ", MBB#" << exitEdge.second->getNumber() << ") ";
+ }
+ dbgs() << "], Sub-Loop Headers: [ ";
+ for (MachineLoop::iterator subLoopItr = loop.begin(),
+ subLoopEnd = loop.end();
+ subLoopItr != subLoopEnd; ++subLoopItr) {
+ MachineLoop &subLoop = **subLoopItr;
+ MachineBasicBlock &subLoopBlock = *subLoop.getHeader();
+ dbgs() << "BB#" << subLoopBlock.getNumber() << " ";
+ }
+ dbgs() << "]\n";
+ }
+
+ void LoopSplitter::updateTerminators(MachineBasicBlock &mbb) {
+ mbb.updateTerminator();
+
+ for (MachineBasicBlock::iterator miItr = mbb.begin(), miEnd = mbb.end();
+ miItr != miEnd; ++miItr) {
+ if (lis->isNotInMIMap(miItr)) {
+ lis->InsertMachineInstrInMaps(miItr);
+ }
+ }
+ }
+
+ bool LoopSplitter::canInsertPreHeader(MachineLoop &loop) {
+ MachineBasicBlock *header = loop.getHeader();
+ MachineBasicBlock *a = 0, *b = 0;
+ SmallVector<MachineOperand, 4> c;
+
+ for (MachineBasicBlock::pred_iterator pbItr = header->pred_begin(),
+ pbEnd = header->pred_end();
+ pbItr != pbEnd; ++pbItr) {
+ MachineBasicBlock *predBlock = *pbItr;
+ if (!!tii->AnalyzeBranch(*predBlock, a, b, c)) {
+ return false;
+ }
+ }
+
+ MachineFunction::iterator headerItr(header);
+ if (headerItr == mf->begin())
+ return true;
+ MachineBasicBlock *headerLayoutPred = llvm::prior(headerItr);
+ assert(headerLayoutPred != 0 && "Header should have layout pred.");
+
+ return (!tii->AnalyzeBranch(*headerLayoutPred, a, b, c));
+ }
+
+ MachineBasicBlock& LoopSplitter::insertPreHeader(MachineLoop &loop) {
+ assert(loop.getLoopPreheader() == 0 && "Loop already has preheader.");
+
+ MachineBasicBlock &header = *loop.getHeader();
+
+ // Save the preds - we'll need to update them once we insert the preheader.
+ typedef std::set<MachineBasicBlock*> HeaderPreds;
+ HeaderPreds headerPreds;
+
+ for (MachineBasicBlock::pred_iterator predItr = header.pred_begin(),
+ predEnd = header.pred_end();
+ predItr != predEnd; ++predItr) {
+ if (!loop.contains(*predItr))
+ headerPreds.insert(*predItr);
+ }
+
+ assert(!headerPreds.empty() && "No predecessors for header?");
+
+ //dbgs() << fqn << " MBB#" << header.getNumber() << " inserting preheader...";
+
+ MachineBasicBlock *preHeader =
+ mf->CreateMachineBasicBlock(header.getBasicBlock());
+
+ assert(preHeader != 0 && "Failed to create pre-header.");
+
+ mf->insert(header, preHeader);
+
+ for (HeaderPreds::iterator hpItr = headerPreds.begin(),
+ hpEnd = headerPreds.end();
+ hpItr != hpEnd; ++hpItr) {
+ assert(*hpItr != 0 && "How'd a null predecessor get into this set?");
+ MachineBasicBlock &hp = **hpItr;
+ hp.ReplaceUsesOfBlockWith(&header, preHeader);
+ }
+ preHeader->addSuccessor(&header);
+
+ MachineBasicBlock *oldLayoutPred =
+ llvm::prior(MachineFunction::iterator(preHeader));
+ if (oldLayoutPred != 0) {
+ updateTerminators(*oldLayoutPred);
+ }
+
+ lis->InsertMBBInMaps(preHeader);
+
+ if (MachineLoop *parentLoop = loop.getParentLoop()) {
+ assert(parentLoop->getHeader() != loop.getHeader() &&
+ "Parent loop has same header?");
+ parentLoop->addBasicBlockToLoop(preHeader, mli->getBase());
+
+ // Invalidate all parent loop ranges.
+ while (parentLoop != 0) {
+ loopRangeMap.erase(parentLoop);
+ parentLoop = parentLoop->getParentLoop();
+ }
+ }
+
+ for (LiveIntervals::iterator liItr = lis->begin(),
+ liEnd = lis->end();
+ liItr != liEnd; ++liItr) {
+ LiveInterval &li = *liItr->second;
+
+ // Is this safe for physregs?
+ // TargetRegisterInfo::isPhysicalRegister(li.reg) ||
+ if (!lis->isLiveInToMBB(li, &header))
+ continue;
+
+ if (lis->isLiveInToMBB(li, preHeader)) {
+ assert(lis->isLiveOutOfMBB(li, preHeader) &&
+ "Range terminates in newly added preheader?");
+ continue;
+ }
+
+ bool insertRange = false;
+
+ for (MachineBasicBlock::pred_iterator predItr = preHeader->pred_begin(),
+ predEnd = preHeader->pred_end();
+ predItr != predEnd; ++predItr) {
+ MachineBasicBlock *predMBB = *predItr;
+ if (lis->isLiveOutOfMBB(li, predMBB)) {
+ insertRange = true;
+ break;
+ }
+ }
+
+ if (!insertRange)
+ continue;
+
+ VNInfo *newVal = li.getNextValue(lis->getMBBStartIdx(preHeader),
+ 0, false, lis->getVNInfoAllocator());
+ li.addRange(LiveRange(lis->getMBBStartIdx(preHeader),
+ lis->getMBBEndIdx(preHeader),
+ newVal));
+ }
+
+
+ //dbgs() << "Dumping SlotIndexes:\n";
+ //sis->dump();
+
+ //dbgs() << "done. (Added MBB#" << preHeader->getNumber() << ")\n";
+
+ return *preHeader;
+ }
+
+ bool LoopSplitter::isCriticalEdge(MachineLoop::Edge &edge) {
+ assert(edge.first->succ_size() > 1 && "Non-sensical edge.");
+ if (edge.second->pred_size() > 1)
+ return true;
+ return false;
+ }
+
+ bool LoopSplitter::canSplitEdge(MachineLoop::Edge &edge) {
+ MachineFunction::iterator outBlockItr(edge.second);
+ if (outBlockItr == mf->begin())
+ return true;
+ MachineBasicBlock *outBlockLayoutPred = llvm::prior(outBlockItr);
+ assert(outBlockLayoutPred != 0 && "Should have a layout pred if out!=begin.");
+ MachineBasicBlock *a = 0, *b = 0;
+ SmallVector<MachineOperand, 4> c;
+ return (!tii->AnalyzeBranch(*outBlockLayoutPred, a, b, c) &&
+ !tii->AnalyzeBranch(*edge.first, a, b, c));
+ }
+
+ MachineBasicBlock& LoopSplitter::splitEdge(MachineLoop::Edge &edge,
+ MachineLoop &loop) {
+
+ MachineBasicBlock &inBlock = *edge.first;
+ MachineBasicBlock &outBlock = *edge.second;
+
+ assert((inBlock.succ_size() > 1) && (outBlock.pred_size() > 1) &&
+ "Splitting non-critical edge?");
+
+ //dbgs() << fqn << " Splitting edge (MBB#" << inBlock.getNumber()
+ // << " -> MBB#" << outBlock.getNumber() << ")...";
+
+ MachineBasicBlock *splitBlock =
+ mf->CreateMachineBasicBlock();
+
+ assert(splitBlock != 0 && "Failed to create split block.");
+
+ mf->insert(&outBlock, splitBlock);
+
+ inBlock.ReplaceUsesOfBlockWith(&outBlock, splitBlock);
+ splitBlock->addSuccessor(&outBlock);
+
+ MachineBasicBlock *oldLayoutPred =
+ llvm::prior(MachineFunction::iterator(splitBlock));
+ if (oldLayoutPred != 0) {
+ updateTerminators(*oldLayoutPred);
+ }
+
+ lis->InsertMBBInMaps(splitBlock);
+
+ loopRangeMap.erase(&loop);
+
+ MachineLoop *splitParentLoop = loop.getParentLoop();
+ while (splitParentLoop != 0 &&
+ !splitParentLoop->contains(&outBlock)) {
+ splitParentLoop = splitParentLoop->getParentLoop();
+ }
+
+ if (splitParentLoop != 0) {
+ assert(splitParentLoop->contains(&loop) &&
+ "Split-block parent doesn't contain original loop?");
+ splitParentLoop->addBasicBlockToLoop(splitBlock, mli->getBase());
+
+ // Invalidate all parent loop ranges.
+ while (splitParentLoop != 0) {
+ loopRangeMap.erase(splitParentLoop);
+ splitParentLoop = splitParentLoop->getParentLoop();
+ }
+ }
+
+
+ for (LiveIntervals::iterator liItr = lis->begin(),
+ liEnd = lis->end();
+ liItr != liEnd; ++liItr) {
+ LiveInterval &li = *liItr->second;
+ bool intersects = lis->isLiveOutOfMBB(li, &inBlock) &&
+ lis->isLiveInToMBB(li, &outBlock);
+ if (lis->isLiveInToMBB(li, splitBlock)) {
+ if (!intersects) {
+ li.removeRange(lis->getMBBStartIdx(splitBlock),
+ lis->getMBBEndIdx(splitBlock), true);
+ }
+ } else if (intersects) {
+ VNInfo *newVal = li.getNextValue(lis->getMBBStartIdx(splitBlock),
+ 0, false, lis->getVNInfoAllocator());
+ li.addRange(LiveRange(lis->getMBBStartIdx(splitBlock),
+ lis->getMBBEndIdx(splitBlock),
+ newVal));
+ }
+ }
+
+ //dbgs() << "done. (Added MBB#" << splitBlock->getNumber() << ")\n";
+
+ return *splitBlock;
+ }
+
+ LoopSplitter::LoopRanges& LoopSplitter::getLoopRanges(MachineLoop &loop) {
+ typedef std::set<MachineBasicBlock*, StartSlotComparator> LoopMBBSet;
+ LoopRangeMap::iterator lrItr = loopRangeMap.find(&loop);
+ if (lrItr == loopRangeMap.end()) {
+ LoopMBBSet loopMBBs((StartSlotComparator(*lis)));
+ std::copy(loop.block_begin(), loop.block_end(),
+ std::inserter(loopMBBs, loopMBBs.begin()));
+
+ assert(!loopMBBs.empty() && "No blocks in loop?");
+
+ LoopRanges &loopRanges = loopRangeMap[&loop];
+ assert(loopRanges.empty() && "Loop encountered but not processed?");
+ SlotIndex oldEnd = lis->getMBBEndIdx(*loopMBBs.begin());
+ loopRanges.push_back(
+ std::make_pair(lis->getMBBStartIdx(*loopMBBs.begin()),
+ lis->getInvalidIndex()));
+ for (LoopMBBSet::iterator curBlockItr = llvm::next(loopMBBs.begin()),
+ curBlockEnd = loopMBBs.end();
+ curBlockItr != curBlockEnd; ++curBlockItr) {
+ SlotIndex newStart = lis->getMBBStartIdx(*curBlockItr);
+ if (newStart != oldEnd) {
+ loopRanges.back().second = oldEnd;
+ loopRanges.push_back(std::make_pair(newStart,
+ lis->getInvalidIndex()));
+ }
+ oldEnd = lis->getMBBEndIdx(*curBlockItr);
+ }
+
+ loopRanges.back().second =
+ lis->getMBBEndIdx(*llvm::prior(loopMBBs.end()));
+
+ return loopRanges;
+ }
+ return lrItr->second;
+ }
+
+ std::pair<bool, LoopSplitter::SlotPair> LoopSplitter::getLoopSubRange(
+ const LiveRange &lr,
+ MachineLoop &loop) {
+ LoopRanges &loopRanges = getLoopRanges(loop);
+ LoopRanges::iterator lrItr = loopRanges.begin(),
+ lrEnd = loopRanges.end();
+ while (lrItr != lrEnd && lr.start >= lrItr->second) {
+ ++lrItr;
+ }
+
+ if (lrItr == lrEnd) {
+ SlotIndex invalid = lis->getInvalidIndex();
+ return std::make_pair(false, SlotPair(invalid, invalid));
+ }
+
+ SlotIndex srStart(lr.start < lrItr->first ? lrItr->first : lr.start);
+ SlotIndex srEnd(lr.end > lrItr->second ? lrItr->second : lr.end);
+
+ return std::make_pair(true, SlotPair(srStart, srEnd));
+ }
+
+ void LoopSplitter::dumpLoopRanges(MachineLoop &loop) {
+ LoopRanges &loopRanges = getLoopRanges(loop);
+ dbgs() << "For loop MBB#" << loop.getHeader()->getNumber() << ", subranges are: [ ";
+ for (LoopRanges::iterator lrItr = loopRanges.begin(), lrEnd = loopRanges.end();
+ lrItr != lrEnd; ++lrItr) {
+ dbgs() << "[" << lrItr->first << ", " << lrItr->second << ") ";
+ }
+ dbgs() << "]\n";
+ }
+
+ void LoopSplitter::processHeader(LoopSplit &split) {
+ MachineBasicBlock &header = *split.getLoop().getHeader();
+ //dbgs() << " Processing loop header BB#" << header.getNumber() << "\n";
+
+ if (!lis->isLiveInToMBB(split.getLI(), &header))
+ return; // Not live in, but nothing wrong so far.
+
+ MachineBasicBlock *preHeader = split.getLoop().getLoopPreheader();
+ if (!preHeader) {
+
+ if (!canInsertPreHeader(split.getLoop())) {
+ split.invalidate();
+ return; // Couldn't insert a pre-header. Bail on this interval.
+ }
+
+ for (MachineBasicBlock::pred_iterator predItr = header.pred_begin(),
+ predEnd = header.pred_end();
+ predItr != predEnd; ++predItr) {
+ if (lis->isLiveOutOfMBB(split.getLI(), *predItr)) {
+ split.splitIncoming();
+ break;
+ }
+ }
+ } else if (lis->isLiveOutOfMBB(split.getLI(), preHeader)) {
+ split.splitIncoming();
+ }
+ }
+
+ void LoopSplitter::processLoopExits(LoopSplit &split) {
+ typedef SmallVector<MachineLoop::Edge, 8> ExitEdgesList;
+ ExitEdgesList exitEdges;
+ split.getLoop().getExitEdges(exitEdges);
+
+ //dbgs() << " Processing loop exits:\n";
+
+ for (ExitEdgesList::iterator exitEdgeItr = exitEdges.begin(),
+ exitEdgeEnd = exitEdges.end();
+ exitEdgeItr != exitEdgeEnd; ++exitEdgeItr) {
+ MachineLoop::Edge exitEdge = *exitEdgeItr;
+
+ LiveRange *outRange =
+ split.getLI().getLiveRangeContaining(lis->getMBBStartIdx(exitEdge.second));
+
+ if (outRange != 0) {
+ if (isCriticalEdge(exitEdge) && !canSplitEdge(exitEdge)) {
+ split.invalidate();
+ return;
+ }
+
+ split.splitOutgoing(exitEdge);
+ }
+ }
+ }
+
+ void LoopSplitter::processLoopUses(LoopSplit &split) {
+ std::set<MachineInstr*> processed;
+
+ for (MachineRegisterInfo::reg_iterator
+ rItr = mri->reg_begin(split.getLI().reg),
+ rEnd = mri->reg_end();
+ rItr != rEnd; ++rItr) {
+ MachineInstr &instr = *rItr;
+ if (split.getLoop().contains(&instr) && processed.count(&instr) == 0) {
+ split.addLoopInstr(&instr);
+ processed.insert(&instr);
+ }
+ }
+
+ //dbgs() << " Rewriting reg" << li.reg << " to reg" << newLI->reg
+ // << " in blocks [ ";
+ //dbgs() << "]\n";
+ }
+
+ bool LoopSplitter::splitOverLoop(LiveInterval &li, MachineLoop &loop) {
+ assert(TargetRegisterInfo::isVirtualRegister(li.reg) &&
+ "Attempt to split physical register.");
+
+ LoopSplit split(*this, li, loop);
+ processHeader(split);
+ if (split.isValid())
+ processLoopExits(split);
+ if (split.isValid())
+ processLoopUses(split);
+ if (split.isValid() /* && split.isWorthwhile() */) {
+ split.apply();
+ DEBUG(dbgs() << "Success.\n");
+ return true;
+ }
+ DEBUG(dbgs() << "Failed.\n");
+ return false;
+ }
+
+ void LoopSplitter::processInterval(LiveInterval &li) {
+ std::deque<MachineLoop*> loops;
+ std::copy(mli->begin(), mli->end(), std::back_inserter(loops));
+
+ while (!loops.empty()) {
+ MachineLoop &loop = *loops.front();
+ loops.pop_front();
+ DEBUG(
+ dbgs() << fqn << " reg" << li.reg << " " << li.weight << " BB#"
+ << loop.getHeader()->getNumber() << " ";
+ );
+ if (!splitOverLoop(li, loop)) {
+ // Couldn't split over outer loop, schedule sub-loops to be checked.
+ std::copy(loop.begin(), loop.end(), std::back_inserter(loops));
+ }
+ }
+ }
+
+ void LoopSplitter::processIntervals() {
+ while (!intervals.empty()) {
+ LiveInterval &li = *intervals.front();
+ intervals.pop_front();
+
+ assert(!lis->intervalIsInOneMBB(li) &&
+ "Single interval in process worklist.");
+
+ processInterval(li);
+ }
+ }
+
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/Splitter.h b/libclamav/c++/llvm/lib/CodeGen/Splitter.h
new file mode 100644
index 0000000..a726a7b
--- /dev/null
+++ b/libclamav/c++/llvm/lib/CodeGen/Splitter.h
@@ -0,0 +1,99 @@
+//===-- llvm/CodeGen/Splitter.h - Splitter -*- C++ -*----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SPLITTER_H
+#define LLVM_CODEGEN_SPLITTER_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+
+#include <deque>
+#include <map>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+ class LiveInterval;
+ class LiveIntervals;
+ struct LiveRange;
+ class LoopSplit;
+ class MachineDominatorTree;
+ class MachineRegisterInfo;
+ class SlotIndexes;
+ class TargetInstrInfo;
+ class VNInfo;
+
+ class LoopSplitter : public MachineFunctionPass {
+ friend class LoopSplit;
+ public:
+ static char ID;
+
+ LoopSplitter() : MachineFunctionPass(ID) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &au) const;
+
+ virtual bool runOnMachineFunction(MachineFunction &fn);
+
+ virtual void releaseMemory();
+
+
+ private:
+
+ MachineFunction *mf;
+ LiveIntervals *lis;
+ MachineLoopInfo *mli;
+ MachineRegisterInfo *mri;
+ MachineDominatorTree *mdt;
+ SlotIndexes *sis;
+ const TargetInstrInfo *tii;
+ const TargetRegisterInfo *tri;
+
+ std::string fqn;
+ std::deque<LiveInterval*> intervals;
+
+ typedef std::pair<SlotIndex, SlotIndex> SlotPair;
+ typedef std::vector<SlotPair> LoopRanges;
+ typedef std::map<MachineLoop*, LoopRanges> LoopRangeMap;
+ LoopRangeMap loopRangeMap;
+
+ void dumpLoopInfo(MachineLoop &loop);
+
+ void dumpOddTerminators();
+
+ void updateTerminators(MachineBasicBlock &mbb);
+
+ bool canInsertPreHeader(MachineLoop &loop);
+ MachineBasicBlock& insertPreHeader(MachineLoop &loop);
+
+ bool isCriticalEdge(MachineLoop::Edge &edge);
+ bool canSplitEdge(MachineLoop::Edge &edge);
+ MachineBasicBlock& splitEdge(MachineLoop::Edge &edge, MachineLoop &loop);
+
+ LoopRanges& getLoopRanges(MachineLoop &loop);
+ std::pair<bool, SlotPair> getLoopSubRange(const LiveRange &lr,
+ MachineLoop &loop);
+
+ void dumpLoopRanges(MachineLoop &loop);
+
+ void processHeader(LoopSplit &split);
+ void processLoopExits(LoopSplit &split);
+ void processLoopUses(LoopSplit &split);
+
+ bool splitOverLoop(LiveInterval &li, MachineLoop &loop);
+
+ void processInterval(LiveInterval &li);
+
+ void processIntervals();
+ };
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/lib/CodeGen/StackProtector.cpp b/libclamav/c++/llvm/lib/CodeGen/StackProtector.cpp
index 8a6a727..9f51778 100644
--- a/libclamav/c++/llvm/lib/CodeGen/StackProtector.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/StackProtector.cpp
@@ -62,17 +62,17 @@ namespace {
bool RequiresStackProtector() const;
public:
static char ID; // Pass identification, replacement for typeid.
- StackProtector() : FunctionPass(&ID), TLI(0) {}
+ StackProtector() : FunctionPass(ID), TLI(0) {}
StackProtector(const TargetLowering *tli)
- : FunctionPass(&ID), TLI(tli) {}
+ : FunctionPass(ID), TLI(tli) {}
virtual bool runOnFunction(Function &Fn);
};
} // end anonymous namespace
char StackProtector::ID = 0;
-static RegisterPass<StackProtector>
-X("stack-protector", "Insert stack protectors");
+INITIALIZE_PASS(StackProtector, "stack-protector",
+ "Insert stack protectors", false, false);
FunctionPass *llvm::createStackProtectorPass(const TargetLowering *tli) {
return new StackProtector(tli);
@@ -136,7 +136,7 @@ bool StackProtector::RequiresStackProtector() const {
bool StackProtector::InsertStackProtectors() {
BasicBlock *FailBB = 0; // The basic block to jump to if check fails.
AllocaInst *AI = 0; // Place on stack that stores the stack guard.
- Constant *StackGuardVar = 0; // The stack guard variable.
+ Value *StackGuardVar = 0; // The stack guard variable.
for (Function::iterator I = F->begin(), E = F->end(); I != E; ) {
BasicBlock *BB = I++;
@@ -153,9 +153,17 @@ bool StackProtector::InsertStackProtectors() {
// StackGuard = load __stack_chk_guard
// call void @llvm.stackprotect.create(StackGuard, StackGuardSlot)
//
- PointerType *PtrTy = PointerType::getUnqual(
- Type::getInt8Ty(RI->getContext()));
- StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
+ const PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
+ unsigned AddressSpace, Offset;
+ if (TLI->getStackCookieLocation(AddressSpace, Offset)) {
+ Constant *OffsetVal =
+ ConstantInt::get(Type::getInt32Ty(RI->getContext()), Offset);
+
+ StackGuardVar = ConstantExpr::getIntToPtr(OffsetVal,
+ PointerType::get(PtrTy, AddressSpace));
+ } else {
+ StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
+ }
BasicBlock &Entry = F->getEntryBlock();
Instruction *InsPt = &Entry.front();
diff --git a/libclamav/c++/llvm/lib/CodeGen/StackSlotColoring.cpp b/libclamav/c++/llvm/lib/CodeGen/StackSlotColoring.cpp
index 12d38f0..8d57ae9 100644
--- a/libclamav/c++/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -13,10 +13,13 @@
#define DEBUG_TYPE "stackcoloring"
#include "VirtRegMap.h"
+#include "llvm/Function.h"
+#include "llvm/Module.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -92,9 +95,9 @@ namespace {
public:
static char ID; // Pass identification
StackSlotColoring() :
- MachineFunctionPass(&ID), ColorWithRegs(false), NextColor(-1) {}
+ MachineFunctionPass(ID), ColorWithRegs(false), NextColor(-1) {}
StackSlotColoring(bool RegColor) :
- MachineFunctionPass(&ID), ColorWithRegs(RegColor), NextColor(-1) {}
+ MachineFunctionPass(ID), ColorWithRegs(RegColor), NextColor(-1) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -142,8 +145,8 @@ namespace {
char StackSlotColoring::ID = 0;
-static RegisterPass<StackSlotColoring>
-X("stack-slot-coloring", "Stack Slot Coloring");
+INITIALIZE_PASS(StackSlotColoring, "stack-slot-coloring",
+ "Stack Slot Coloring", false, false);
FunctionPass *llvm::createStackSlotColoringPass(bool RegColor) {
return new StackSlotColoring(RegColor);
@@ -182,7 +185,8 @@ void StackSlotColoring::ScanForSpillSlotRefs(MachineFunction &MF) {
if (!LS->hasInterval(FI))
continue;
LiveInterval &li = LS->getInterval(FI);
- li.weight += LiveIntervals::getSpillWeight(false, true, loopDepth);
+ if (!MI->isDebugValue())
+ li.weight += LiveIntervals::getSpillWeight(false, true, loopDepth);
SSRefs[FI].push_back(MI);
}
}
@@ -504,8 +508,7 @@ bool StackSlotColoring::PropagateBackward(MachineBasicBlock::iterator MII,
// Abort the use is actually a sub-register def. We don't have enough
// information to figure out if it is really legal.
- if (MO.getSubReg() || MII->isExtractSubreg() ||
- MII->isInsertSubreg() || MII->isSubregToReg())
+ if (MO.getSubReg() || MII->isSubregToReg())
return false;
const TargetRegisterClass *RC = TID.OpInfo[i].getRegClass(TRI);
@@ -567,7 +570,7 @@ bool StackSlotColoring::PropagateForward(MachineBasicBlock::iterator MII,
// Abort the use is actually a sub-register use. We don't have enough
// information to figure out if it is really legal.
- if (MO.getSubReg() || MII->isExtractSubreg())
+ if (MO.getSubReg())
return false;
const TargetRegisterClass *RC = TID.OpInfo[i].getRegClass(TRI);
@@ -606,7 +609,8 @@ StackSlotColoring::UnfoldAndRewriteInstruction(MachineInstr *MI, int OldFI,
DEBUG(MI->dump());
++NumLoadElim;
} else {
- TII->copyRegToReg(*MBB, MI, DstReg, Reg, RC, RC);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ DstReg).addReg(Reg);
++NumRegRepl;
}
@@ -622,7 +626,8 @@ StackSlotColoring::UnfoldAndRewriteInstruction(MachineInstr *MI, int OldFI,
DEBUG(MI->dump());
++NumStoreElim;
} else {
- TII->copyRegToReg(*MBB, MI, Reg, SrcReg, RC, RC);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY), Reg)
+ .addReg(SrcReg);
++NumRegRepl;
}
@@ -696,7 +701,11 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
- DEBUG(dbgs() << "********** Stack Slot Coloring **********\n");
+ DEBUG({
+ dbgs() << "********** Stack Slot Coloring **********\n"
+ << "********** Function: "
+ << MF.getFunction()->getName() << '\n';
+ });
MFI = MF.getFrameInfo();
MRI = &MF.getRegInfo();
@@ -715,6 +724,13 @@ bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
return false;
}
+ // If there are calls to setjmp or sigsetjmp, don't perform stack slot
+ // coloring. The stack could be modified before the longjmp is executed,
+ // resulting in the wrong value being used afterwards. (See
+ // <rdar://problem/8007500>.)
+ if (MF.callsSetJmp())
+ return false;
+
// Gather spill slot references
ScanForSpillSlotRefs(MF);
InitializeSlots();
diff --git a/libclamav/c++/llvm/lib/CodeGen/StrongPHIElimination.cpp b/libclamav/c++/llvm/lib/CodeGen/StrongPHIElimination.cpp
index f8f6a55..894dbfa 100644
--- a/libclamav/c++/llvm/lib/CodeGen/StrongPHIElimination.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/StrongPHIElimination.cpp
@@ -25,6 +25,7 @@
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterCoalescer.h"
@@ -38,7 +39,7 @@ using namespace llvm;
namespace {
struct StrongPHIElimination : public MachineFunctionPass {
static char ID; // Pass identification, replacement for typeid
- StrongPHIElimination() : MachineFunctionPass(&ID) {}
+ StrongPHIElimination() : MachineFunctionPass(ID) {}
// Waiting stores, for each MBB, the set of copies that need to
// be inserted into that MBB
@@ -149,11 +150,10 @@ namespace {
}
char StrongPHIElimination::ID = 0;
-static RegisterPass<StrongPHIElimination>
-X("strong-phi-node-elimination",
- "Eliminate PHI nodes for register allocation, intelligently");
+INITIALIZE_PASS(StrongPHIElimination, "strong-phi-node-elimination",
+ "Eliminate PHI nodes for register allocation, intelligently", false, false);
-const PassInfo *const llvm::StrongPHIEliminationID = &X;
+char &llvm::StrongPHIEliminationID = StrongPHIElimination::ID;
/// computeDFS - Computes the DFS-in and DFS-out numbers of the dominator tree
/// of the given MachineFunction. These numbers are then used in other parts
@@ -695,9 +695,8 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
// Insert copy from curr.second to a temporary at
// the Phi defining curr.second
MachineBasicBlock::iterator PI = MRI.getVRegDef(curr.second);
- TII->copyRegToReg(*PI->getParent(), PI, t,
- curr.second, RC, RC);
-
+ BuildMI(*PI->getParent(), PI, DebugLoc(), TII->get(TargetOpcode::COPY),
+ t).addReg(curr.second);
DEBUG(dbgs() << "Inserted copy from " << curr.second << " to " << t
<< "\n");
@@ -712,8 +711,8 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
}
// Insert copy from map[curr.first] to curr.second
- TII->copyRegToReg(*MBB, MBB->getFirstTerminator(), curr.second,
- map[curr.first], RC, RC);
+ BuildMI(*MBB, MBB->getFirstTerminator(), DebugLoc(),
+ TII->get(TargetOpcode::COPY), curr.second).addReg(map[curr.first]);
map[curr.first] = curr.second;
DEBUG(dbgs() << "Inserted copy from " << curr.first << " to "
<< curr.second << "\n");
@@ -761,8 +760,8 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
// Insert a copy from dest to a new temporary t at the end of b
unsigned t = MF->getRegInfo().createVirtualRegister(RC);
- TII->copyRegToReg(*MBB, MBB->getFirstTerminator(), t,
- curr.second, RC, RC);
+ BuildMI(*MBB, MBB->getFirstTerminator(), DebugLoc(),
+ TII->get(TargetOpcode::COPY), t).addReg(curr.second);
map[curr.second] = t;
MachineBasicBlock::iterator TI = MBB->getFirstTerminator();
@@ -830,9 +829,6 @@ void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
LiveInterval& Int = LI.getInterval(I->getOperand(i).getReg());
VNInfo* FirstVN = *Int.vni_begin();
FirstVN->setHasPHIKill(false);
- if (I->getOperand(i).isKill())
- FirstVN->addKill(LI.getInstructionIndex(I).getUseIndex());
-
LiveRange LR (LI.getMBBStartIdx(I->getParent()),
LI.getInstructionIndex(I).getUseIndex().getNextSlot(),
FirstVN);
@@ -959,9 +955,8 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
} else {
// Insert a last-minute copy if a conflict was detected.
const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
- const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(I->first);
- TII->copyRegToReg(*SI->second, SI->second->getFirstTerminator(),
- I->first, SI->first, RC, RC);
+ BuildMI(*SI->second, SI->second->getFirstTerminator(), DebugLoc(),
+ TII->get(TargetOpcode::COPY), I->first).addReg(SI->first);
LI.renumber();
diff --git a/libclamav/c++/llvm/lib/CodeGen/TailDuplication.cpp b/libclamav/c++/llvm/lib/CodeGen/TailDuplication.cpp
index 3223e53..a815b36 100644
--- a/libclamav/c++/llvm/lib/CodeGen/TailDuplication.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/TailDuplication.cpp
@@ -17,6 +17,7 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineSSAUpdater.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -68,7 +69,7 @@ namespace {
public:
static char ID;
explicit TailDuplicatePass(bool PreRA) :
- MachineFunctionPass(&ID), PreRegAlloc(PreRA) {}
+ MachineFunctionPass(ID), PreRegAlloc(PreRA) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual const char *getPassName() const { return "Tail Duplication"; }
@@ -253,14 +254,15 @@ bool TailDuplicatePass::TailDuplicateBlocks(MachineFunction &MF) {
// SSA form.
for (unsigned i = 0, e = Copies.size(); i != e; ++i) {
MachineInstr *Copy = Copies[i];
- unsigned Src, Dst, SrcSR, DstSR;
- if (TII->isMoveInstr(*Copy, Src, Dst, SrcSR, DstSR)) {
- MachineRegisterInfo::use_iterator UI = MRI->use_begin(Src);
- if (++UI == MRI->use_end()) {
- // Copy is the only use. Do trivial copy propagation here.
- MRI->replaceRegWith(Dst, Src);
- Copy->eraseFromParent();
- }
+ if (!Copy->isCopy())
+ continue;
+ unsigned Dst = Copy->getOperand(0).getReg();
+ unsigned Src = Copy->getOperand(1).getReg();
+ MachineRegisterInfo::use_iterator UI = MRI->use_begin(Src);
+ if (++UI == MRI->use_end()) {
+ // Copy is the only use. Do trivial copy propagation here.
+ MRI->replaceRegWith(Dst, Src);
+ Copy->eraseFromParent();
}
}
@@ -495,7 +497,7 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, MachineFunction &MF,
if (InstrCount == MaxDuplicateCount) return false;
// Remember if we saw a call.
if (I->getDesc().isCall()) HasCall = true;
- if (!I->isPHI())
+ if (!I->isPHI() && !I->isDebugValue())
InstrCount += 1;
}
// Heuristically, don't tail-duplicate calls if it would expand code size,
@@ -559,11 +561,9 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, MachineFunction &MF,
}
MachineBasicBlock::iterator Loc = PredBB->getFirstTerminator();
for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
- const TargetRegisterClass *RC = MRI->getRegClass(CopyInfos[i].first);
- TII->copyRegToReg(*PredBB, Loc, CopyInfos[i].first,
- CopyInfos[i].second, RC,RC);
- MachineInstr *CopyMI = prior(Loc);
- Copies.push_back(CopyMI);
+ Copies.push_back(BuildMI(*PredBB, Loc, DebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ CopyInfos[i].first).addReg(CopyInfos[i].second));
}
NumInstrDups += TailBB->size() - 1; // subtract one for removed branch
@@ -618,11 +618,10 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, MachineFunction &MF,
}
MachineBasicBlock::iterator Loc = PrevBB->getFirstTerminator();
for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
- const TargetRegisterClass *RC = MRI->getRegClass(CopyInfos[i].first);
- TII->copyRegToReg(*PrevBB, Loc, CopyInfos[i].first,
- CopyInfos[i].second, RC, RC);
- MachineInstr *CopyMI = prior(Loc);
- Copies.push_back(CopyMI);
+ Copies.push_back(BuildMI(*PrevBB, Loc, DebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ CopyInfos[i].first)
+ .addReg(CopyInfos[i].second));
}
} else {
// No PHIs to worry about, just splice the instructions over.
@@ -648,17 +647,6 @@ void TailDuplicatePass::RemoveDeadBlock(MachineBasicBlock *MBB) {
while (!MBB->succ_empty())
MBB->removeSuccessor(MBB->succ_end()-1);
- // If there are any labels in the basic block, unregister them from
- // MachineModuleInfo.
- if (MMI && !MBB->empty()) {
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
- if (I->isLabel())
- // The label ID # is always operand #0, an immediate.
- MMI->InvalidateLabel(I->getOperand(0).getImm());
- }
- }
-
// Remove the block.
MBB->eraseFromParent();
}
diff --git a/libclamav/c++/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp b/libclamav/c++/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
index e9e998f..6e4a0d8 100644
--- a/libclamav/c++/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/SmallVector.h"
@@ -21,11 +22,34 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PostRAHazardRecognizer.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
+/// after it, replacing it with an unconditional branch to NewDest.
+void
+TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+ MachineBasicBlock *NewDest) const {
+ MachineBasicBlock *MBB = Tail->getParent();
+
+ // Remove all the old successors of MBB from the CFG.
+ while (!MBB->succ_empty())
+ MBB->removeSuccessor(MBB->succ_begin());
+
+ // Remove all the dead instructions from the end of MBB.
+ MBB->erase(Tail, MBB->end());
+
+ // If MBB isn't immediately before MBB, insert a branch to it.
+ if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
+ InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
+ Tail->getDebugLoc());
+ MBB->addSuccessor(NewDest);
+}
+
// commuteInstruction - The default implementation of this method just exchanges
// the two operands returned by findCommutedOpIndices.
MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
@@ -40,7 +64,7 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
std::string msg;
raw_string_ostream Msg(msg);
Msg << "Don't know how to commute: " << *MI;
- llvm_report_error(Msg.str());
+ report_fatal_error(Msg.str());
}
assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
@@ -136,17 +160,9 @@ void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB,
unsigned DestReg,
unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const {
+ const TargetRegisterInfo &TRI) const {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
- MachineOperand &MO = MI->getOperand(0);
- if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
- MO.setReg(DestReg);
- MO.setSubReg(SubIdx);
- } else if (SubIdx) {
- MO.setReg(TRI->getSubReg(DestReg, SubIdx));
- } else {
- MO.setReg(DestReg);
- }
+ MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
MBB.insert(I, MI);
}
@@ -162,17 +178,45 @@ MachineInstr *TargetInstrInfoImpl::duplicate(MachineInstr *Orig,
return MF.CloneMachineInstr(Orig);
}
-unsigned
-TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const {
- unsigned FnSize = 0;
- for (MachineFunction::const_iterator MBBI = MF.begin(), E = MF.end();
- MBBI != E; ++MBBI) {
- const MachineBasicBlock &MBB = *MBBI;
- for (MachineBasicBlock::const_iterator I = MBB.begin(),E = MBB.end();
- I != E; ++I)
- FnSize += GetInstSizeInBytes(I);
- }
- return FnSize;
+// If the COPY instruction in MI can be folded to a stack operation, return
+// the register class to use.
+static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
+ unsigned FoldIdx) {
+ assert(MI->isCopy() && "MI must be a COPY instruction");
+ if (MI->getNumOperands() != 2)
+ return 0;
+ assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
+
+ const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
+ const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
+
+ if (FoldOp.getSubReg() || LiveOp.getSubReg())
+ return 0;
+
+ unsigned FoldReg = FoldOp.getReg();
+ unsigned LiveReg = LiveOp.getReg();
+
+ assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
+ "Cannot fold physregs");
+
+ const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
+
+ if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
+ return RC->contains(LiveOp.getReg()) ? RC : 0;
+
+ const TargetRegisterClass *LiveRC = MRI.getRegClass(LiveReg);
+ if (RC == LiveRC || RC->hasSubClass(LiveRC))
+ return RC;
+
+ // FIXME: Allow folding when register classes are memory compatible.
+ return 0;
+}
+
+bool TargetInstrInfoImpl::
+canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const {
+ return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
}
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
@@ -182,10 +226,9 @@ TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const {
/// removing the old instruction and adding the new one in the instruction
/// stream.
MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
+ int FI) const {
unsigned Flags = 0;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (MI->getOperand(Ops[i]).isDef())
@@ -193,34 +236,56 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
else
Flags |= MachineMemOperand::MOLoad;
+ MachineBasicBlock *MBB = MI->getParent();
+ assert(MBB && "foldMemoryOperand needs an inserted instruction");
+ MachineFunction &MF = *MBB->getParent();
+
// Ask the target to do the actual folding.
- MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
- if (!NewMI) return 0;
+ if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
+ // Add a memory operand, foldMemoryOperandImpl doesn't do that.
+ assert((!(Flags & MachineMemOperand::MOStore) ||
+ NewMI->getDesc().mayStore()) &&
+ "Folded a def to a non-store!");
+ assert((!(Flags & MachineMemOperand::MOLoad) ||
+ NewMI->getDesc().mayLoad()) &&
+ "Folded a use to a non-load!");
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+ assert(MFI.getObjectOffset(FI) != -1);
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
+ Flags, /*Offset=*/0,
+ MFI.getObjectSize(FI),
+ MFI.getObjectAlignment(FI));
+ NewMI->addMemOperand(MF, MMO);
- assert((!(Flags & MachineMemOperand::MOStore) ||
- NewMI->getDesc().mayStore()) &&
- "Folded a def to a non-store!");
- assert((!(Flags & MachineMemOperand::MOLoad) ||
- NewMI->getDesc().mayLoad()) &&
- "Folded a use to a non-load!");
- const MachineFrameInfo &MFI = *MF.getFrameInfo();
- assert(MFI.getObjectOffset(FrameIndex) != -1);
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIndex),
- Flags, /*Offset=*/0,
- MFI.getObjectSize(FrameIndex),
- MFI.getObjectAlignment(FrameIndex));
- NewMI->addMemOperand(MF, MMO);
+ // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
+ return MBB->insert(MI, NewMI);
+ }
- return NewMI;
+ // Straight COPY may fold as load/store.
+ if (!MI->isCopy() || Ops.size() != 1)
+ return 0;
+
+ const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
+ if (!RC)
+ return 0;
+
+ const MachineOperand &MO = MI->getOperand(1-Ops[0]);
+ MachineBasicBlock::iterator Pos = MI;
+ const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+
+ if (Flags == MachineMemOperand::MOStore)
+ storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
+ else
+ loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
+ return --Pos;
}
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
@@ -228,11 +293,15 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
#endif
+ MachineBasicBlock &MBB = *MI->getParent();
+ MachineFunction &MF = *MBB.getParent();
// Ask the target to do the actual folding.
MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
if (!NewMI) return 0;
+ NewMI = MBB.insert(MI, NewMI);
+
// Copy the memoperands from the load to the folded instruction.
NewMI->setMemRefs(LoadMI->memoperands_begin(),
LoadMI->memoperands_end());
@@ -240,11 +309,9 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
return NewMI;
}
-bool
-TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(const MachineInstr *
- MI,
- AliasAnalysis *
- AA) const {
+bool TargetInstrInfo::
+isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
+ AliasAnalysis *AA) const {
const MachineFunction &MF = *MI->getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetMachine &TM = MF.getTarget();
@@ -324,3 +391,31 @@ TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(const MachineInstr *
// Everything checked out.
return true;
}
+
+/// isSchedulingBoundary - Test if the given instruction should be
+/// considered a scheduling boundary. This primarily includes labels
+/// and terminators.
+bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const{
+ // Terminators and labels can't be scheduled around.
+ if (MI->getDesc().isTerminator() || MI->isLabel())
+ return true;
+
+ // Don't attempt to schedule around any instruction that defines
+ // a stack-oriented pointer, as it's unlikely to be profitable. This
+ // saves compile time, because it doesn't require every single
+ // stack slot reference to depend on the instruction that does the
+ // modification.
+ const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
+ if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
+ return true;
+
+ return false;
+}
+
+// Default implementation of CreateTargetPostRAHazardRecognizer.
+ScheduleHazardRecognizer *TargetInstrInfoImpl::
+CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {
+ return (ScheduleHazardRecognizer *)new PostRAHazardRecognizer(II);
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/libclamav/c++/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index d127f53..f1e10ee 100644
--- a/libclamav/c++/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -22,6 +22,7 @@
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
@@ -38,108 +39,88 @@ using namespace dwarf;
//===----------------------------------------------------------------------===//
// ELF
//===----------------------------------------------------------------------===//
-typedef StringMap<const MCSectionELF*> ELFUniqueMapTy;
-
-TargetLoweringObjectFileELF::~TargetLoweringObjectFileELF() {
- // If we have the section uniquing map, free it.
- delete (ELFUniqueMapTy*)UniquingMap;
-}
-
-const MCSection *TargetLoweringObjectFileELF::
-getELFSection(StringRef Section, unsigned Type, unsigned Flags,
- SectionKind Kind, bool IsExplicit) const {
- if (UniquingMap == 0)
- UniquingMap = new ELFUniqueMapTy();
- ELFUniqueMapTy &Map = *(ELFUniqueMapTy*)UniquingMap;
-
- // Do the lookup, if we have a hit, return it.
- const MCSectionELF *&Entry = Map[Section];
- if (Entry) return Entry;
-
- return Entry = MCSectionELF::Create(Section, Type, Flags, Kind, IsExplicit,
- getContext());
-}
void TargetLoweringObjectFileELF::Initialize(MCContext &Ctx,
const TargetMachine &TM) {
- if (UniquingMap != 0)
- ((ELFUniqueMapTy*)UniquingMap)->clear();
TargetLoweringObjectFile::Initialize(Ctx, TM);
BSSSection =
- getELFSection(".bss", MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_WRITE | MCSectionELF::SHF_ALLOC,
- SectionKind::getBSS());
+ getContext().getELFSection(".bss", MCSectionELF::SHT_NOBITS,
+ MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ SectionKind::getBSS());
TextSection =
- getELFSection(".text", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_EXECINSTR | MCSectionELF::SHF_ALLOC,
- SectionKind::getText());
+ getContext().getELFSection(".text", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_EXECINSTR |
+ MCSectionELF::SHF_ALLOC,
+ SectionKind::getText());
DataSection =
- getELFSection(".data", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_WRITE | MCSectionELF::SHF_ALLOC,
- SectionKind::getDataRel());
+ getContext().getELFSection(".data", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ SectionKind::getDataRel());
ReadOnlySection =
- getELFSection(".rodata", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC,
- SectionKind::getReadOnly());
+ getContext().getELFSection(".rodata", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC,
+ SectionKind::getReadOnly());
TLSDataSection =
- getELFSection(".tdata", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_TLS |
- MCSectionELF::SHF_WRITE, SectionKind::getThreadData());
+ getContext().getELFSection(".tdata", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_TLS |
+ MCSectionELF::SHF_WRITE,
+ SectionKind::getThreadData());
TLSBSSSection =
- getELFSection(".tbss", MCSectionELF::SHT_NOBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_TLS |
- MCSectionELF::SHF_WRITE, SectionKind::getThreadBSS());
+ getContext().getELFSection(".tbss", MCSectionELF::SHT_NOBITS,
+ MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_TLS |
+ MCSectionELF::SHF_WRITE,
+ SectionKind::getThreadBSS());
DataRelSection =
- getELFSection(".data.rel", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE,
- SectionKind::getDataRel());
+ getContext().getELFSection(".data.rel", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ SectionKind::getDataRel());
DataRelLocalSection =
- getELFSection(".data.rel.local", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE,
- SectionKind::getDataRelLocal());
+ getContext().getELFSection(".data.rel.local", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ SectionKind::getDataRelLocal());
DataRelROSection =
- getELFSection(".data.rel.ro", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE,
- SectionKind::getReadOnlyWithRel());
+ getContext().getELFSection(".data.rel.ro", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ SectionKind::getReadOnlyWithRel());
DataRelROLocalSection =
- getELFSection(".data.rel.ro.local", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE,
- SectionKind::getReadOnlyWithRelLocal());
+ getContext().getELFSection(".data.rel.ro.local", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ SectionKind::getReadOnlyWithRelLocal());
MergeableConst4Section =
- getELFSection(".rodata.cst4", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_MERGE,
- SectionKind::getMergeableConst4());
+ getContext().getELFSection(".rodata.cst4", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_MERGE,
+ SectionKind::getMergeableConst4());
MergeableConst8Section =
- getELFSection(".rodata.cst8", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_MERGE,
- SectionKind::getMergeableConst8());
+ getContext().getELFSection(".rodata.cst8", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_MERGE,
+ SectionKind::getMergeableConst8());
MergeableConst16Section =
- getELFSection(".rodata.cst16", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_MERGE,
- SectionKind::getMergeableConst16());
+ getContext().getELFSection(".rodata.cst16", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_MERGE,
+ SectionKind::getMergeableConst16());
StaticCtorSection =
- getELFSection(".ctors", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE,
- SectionKind::getDataRel());
+ getContext().getELFSection(".ctors", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ SectionKind::getDataRel());
StaticDtorSection =
- getELFSection(".dtors", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE,
- SectionKind::getDataRel());
+ getContext().getELFSection(".dtors", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ SectionKind::getDataRel());
// Exception Handling Sections.
@@ -148,47 +129,48 @@ void TargetLoweringObjectFileELF::Initialize(MCContext &Ctx,
// runtime hit for C++ apps. Either the contents of the LSDA need to be
// adjusted or this should be a data section.
LSDASection =
- getELFSection(".gcc_except_table", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC, SectionKind::getReadOnly());
+ getContext().getELFSection(".gcc_except_table", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC,
+ SectionKind::getReadOnly());
EHFrameSection =
- getELFSection(".eh_frame", MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC | MCSectionELF::SHF_WRITE,
- SectionKind::getDataRel());
+ getContext().getELFSection(".eh_frame", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |MCSectionELF::SHF_WRITE,
+ SectionKind::getDataRel());
// Debug Info Sections.
DwarfAbbrevSection =
- getELFSection(".debug_abbrev", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_abbrev", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfInfoSection =
- getELFSection(".debug_info", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_info", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfLineSection =
- getELFSection(".debug_line", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_line", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfFrameSection =
- getELFSection(".debug_frame", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_frame", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfPubNamesSection =
- getELFSection(".debug_pubnames", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_pubnames", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfPubTypesSection =
- getELFSection(".debug_pubtypes", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_pubtypes", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfStrSection =
- getELFSection(".debug_str", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_str", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfLocSection =
- getELFSection(".debug_loc", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_loc", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfARangesSection =
- getELFSection(".debug_aranges", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_aranges", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfRangesSection =
- getELFSection(".debug_ranges", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_ranges", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
DwarfMacroInfoSection =
- getELFSection(".debug_macinfo", MCSectionELF::SHT_PROGBITS, 0,
- SectionKind::getMetadata());
+ getContext().getELFSection(".debug_macinfo", MCSectionELF::SHT_PROGBITS, 0,
+ SectionKind::getMetadata());
}
@@ -277,9 +259,9 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
// Infer section flags from the section name if we can.
Kind = getELFKindForNamedSection(SectionName, Kind);
- return getELFSection(SectionName,
- getELFSectionType(SectionName, Kind),
- getELFSectionFlags(Kind), Kind, true);
+ return getContext().getELFSection(SectionName,
+ getELFSectionType(SectionName, Kind),
+ getELFSectionFlags(Kind), Kind, true);
}
static const char *getSectionPrefixForUniqueGlobal(SectionKind Kind) {
@@ -298,19 +280,54 @@ static const char *getSectionPrefixForUniqueGlobal(SectionKind Kind) {
return ".gnu.linkonce.d.rel.ro.";
}
+/// getSectionPrefixForGlobal - Return the section prefix name used by options
+/// FunctionsSections and DataSections.
+static const char *getSectionPrefixForGlobal(SectionKind Kind) {
+ if (Kind.isText()) return ".text.";
+ if (Kind.isReadOnly()) return ".rodata.";
+
+ if (Kind.isThreadData()) return ".tdata.";
+ if (Kind.isThreadBSS()) return ".tbss.";
+
+ if (Kind.isDataNoRel()) return ".data.";
+ if (Kind.isDataRelLocal()) return ".data.rel.local.";
+ if (Kind.isDataRel()) return ".data.rel.";
+ if (Kind.isReadOnlyWithRelLocal()) return ".data.rel.ro.local.";
+
+ assert(Kind.isReadOnlyWithRel() && "Unknown section kind");
+ return ".data.rel.ro.";
+}
+
+
const MCSection *TargetLoweringObjectFileELF::
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const {
+ // If we have -ffunction-section or -fdata-section then we should emit the
+ // global value to a uniqued section specifically for it.
+ bool EmitUniquedSection;
+ if (Kind.isText())
+ EmitUniquedSection = TM.getFunctionSections();
+ else
+ EmitUniquedSection = TM.getDataSections();
// If this global is linkonce/weak and the target handles this by emitting it
// into a 'uniqued' section name, create and return the section now.
- if (GV->isWeakForLinker() && !Kind.isCommon() && !Kind.isBSS()) {
- const char *Prefix = getSectionPrefixForUniqueGlobal(Kind);
- SmallString<128> Name;
- Name.append(Prefix, Prefix+strlen(Prefix));
- Mang->getNameWithPrefix(Name, GV, false);
- return getELFSection(Name.str(), getELFSectionType(Name.str(), Kind),
- getELFSectionFlags(Kind), Kind);
+ if ((GV->isWeakForLinker() || EmitUniquedSection) &&
+ !Kind.isCommon() && !Kind.isBSS()) {
+ const char *Prefix;
+ if (GV->isWeakForLinker())
+ Prefix = getSectionPrefixForUniqueGlobal(Kind);
+ else {
+ assert(EmitUniquedSection);
+ Prefix = getSectionPrefixForGlobal(Kind);
+ }
+
+ SmallString<128> Name(Prefix, Prefix+strlen(Prefix));
+ MCSymbol *Sym = Mang->getSymbol(GV);
+ Name.append(Sym->getName().begin(), Sym->getName().end());
+ return getContext().getELFSection(Name.str(),
+ getELFSectionType(Name.str(), Kind),
+ getELFSectionFlags(Kind), Kind);
}
if (Kind.isText()) return TextSection;
@@ -335,11 +352,11 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
std::string Name = SizeSpec + utostr(Align);
- return getELFSection(Name, MCSectionELF::SHT_PROGBITS,
- MCSectionELF::SHF_ALLOC |
- MCSectionELF::SHF_MERGE |
- MCSectionELF::SHF_STRINGS,
- Kind);
+ return getContext().getELFSection(Name, MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_ALLOC |
+ MCSectionELF::SHF_MERGE |
+ MCSectionELF::SHF_STRINGS,
+ Kind);
}
if (Kind.isMergeableConst()) {
@@ -391,8 +408,9 @@ getSectionForConstant(SectionKind Kind) const {
}
const MCExpr *TargetLoweringObjectFileELF::
-getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding) const {
+getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI,
+ unsigned Encoding, MCStreamer &Streamer) const {
if (Encoding & dwarf::DW_EH_PE_indirect) {
MachineModuleInfoELF &ELFMMI = MMI->getObjFileInfo<MachineModuleInfoELF>();
@@ -403,89 +421,81 @@ getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
// Add information about the stub reference to ELFMMI so that the stub
// gets emitted by the asmprinter.
- MCSymbol *Sym = getContext().GetOrCreateSymbol(Name.str());
- MCSymbol *&StubSym = ELFMMI.getGVStubEntry(Sym);
- if (StubSym == 0) {
- Name.clear();
- Mang->getNameWithPrefix(Name, GV, false);
- StubSym = getContext().GetOrCreateSymbol(Name.str());
+ MCSymbol *SSym = getContext().GetOrCreateSymbol(Name.str());
+ MachineModuleInfoImpl::StubValueTy &StubSym = ELFMMI.getGVStubEntry(SSym);
+ if (StubSym.getPointer() == 0) {
+ MCSymbol *Sym = Mang->getSymbol(GV);
+ StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
}
return TargetLoweringObjectFile::
- getSymbolForDwarfReference(Sym, MMI,
- Encoding & ~dwarf::DW_EH_PE_indirect);
+ getExprForDwarfReference(SSym, Mang, MMI,
+ Encoding & ~dwarf::DW_EH_PE_indirect, Streamer);
}
return TargetLoweringObjectFile::
- getSymbolForDwarfGlobalReference(GV, Mang, MMI, Encoding);
+ getExprForDwarfGlobalReference(GV, Mang, MMI, Encoding, Streamer);
}
//===----------------------------------------------------------------------===//
// MachO
//===----------------------------------------------------------------------===//
-typedef StringMap<const MCSectionMachO*> MachOUniqueMapTy;
-
-TargetLoweringObjectFileMachO::~TargetLoweringObjectFileMachO() {
- // If we have the MachO uniquing map, free it.
- delete (MachOUniqueMapTy*)UniquingMap;
-}
-
-
-const MCSectionMachO *TargetLoweringObjectFileMachO::
-getMachOSection(StringRef Segment, StringRef Section,
- unsigned TypeAndAttributes,
- unsigned Reserved2, SectionKind Kind) const {
- // We unique sections by their segment/section pair. The returned section
- // may not have the same flags as the requested section, if so this should be
- // diagnosed by the client as an error.
-
- // Create the map if it doesn't already exist.
- if (UniquingMap == 0)
- UniquingMap = new MachOUniqueMapTy();
- MachOUniqueMapTy &Map = *(MachOUniqueMapTy*)UniquingMap;
-
- // Form the name to look up.
- SmallString<64> Name;
- Name += Segment;
- Name.push_back(',');
- Name += Section;
-
- // Do the lookup, if we have a hit, return it.
- const MCSectionMachO *&Entry = Map[Name.str()];
- if (Entry) return Entry;
-
- // Otherwise, return a new section.
- return Entry = MCSectionMachO::Create(Segment, Section, TypeAndAttributes,
- Reserved2, Kind, getContext());
-}
-
-
void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
const TargetMachine &TM) {
- if (UniquingMap != 0)
- ((MachOUniqueMapTy*)UniquingMap)->clear();
+ // _foo.eh symbols are currently always exported so that the linker knows
+ // about them. This is not necessary on 10.6 and later, but it
+ // doesn't hurt anything.
+ // FIXME: I need to get this from Triple.
+ IsFunctionEHSymbolGlobal = true;
+ IsFunctionEHFrameSymbolPrivate = false;
+ SupportsWeakOmittedEHFrame = false;
+
TargetLoweringObjectFile::Initialize(Ctx, TM);
TextSection // .text
- = getMachOSection("__TEXT", "__text",
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- SectionKind::getText());
+ = getContext().getMachOSection("__TEXT", "__text",
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ SectionKind::getText());
DataSection // .data
- = getMachOSection("__DATA", "__data", 0, SectionKind::getDataRel());
-
+ = getContext().getMachOSection("__DATA", "__data", 0,
+ SectionKind::getDataRel());
+
+ TLSDataSection // .tdata
+ = getContext().getMachOSection("__DATA", "__thread_data",
+ MCSectionMachO::S_THREAD_LOCAL_REGULAR,
+ SectionKind::getDataRel());
+ TLSBSSSection // .tbss
+ = getContext().getMachOSection("__DATA", "__thread_bss",
+ MCSectionMachO::S_THREAD_LOCAL_ZEROFILL,
+ SectionKind::getThreadBSS());
+
+ // TODO: Verify datarel below.
+ TLSTLVSection // .tlv
+ = getContext().getMachOSection("__DATA", "__thread_vars",
+ MCSectionMachO::S_THREAD_LOCAL_VARIABLES,
+ SectionKind::getDataRel());
+
+ TLSThreadInitSection
+ = getContext().getMachOSection("__DATA", "__thread_init",
+ MCSectionMachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS,
+ SectionKind::getDataRel());
+
CStringSection // .cstring
- = getMachOSection("__TEXT", "__cstring", MCSectionMachO::S_CSTRING_LITERALS,
- SectionKind::getMergeable1ByteCString());
+ = getContext().getMachOSection("__TEXT", "__cstring",
+ MCSectionMachO::S_CSTRING_LITERALS,
+ SectionKind::getMergeable1ByteCString());
UStringSection
- = getMachOSection("__TEXT","__ustring", 0,
- SectionKind::getMergeable2ByteCString());
+ = getContext().getMachOSection("__TEXT","__ustring", 0,
+ SectionKind::getMergeable2ByteCString());
FourByteConstantSection // .literal4
- = getMachOSection("__TEXT", "__literal4", MCSectionMachO::S_4BYTE_LITERALS,
- SectionKind::getMergeableConst4());
+ = getContext().getMachOSection("__TEXT", "__literal4",
+ MCSectionMachO::S_4BYTE_LITERALS,
+ SectionKind::getMergeableConst4());
EightByteConstantSection // .literal8
- = getMachOSection("__TEXT", "__literal8", MCSectionMachO::S_8BYTE_LITERALS,
- SectionKind::getMergeableConst8());
+ = getContext().getMachOSection("__TEXT", "__literal8",
+ MCSectionMachO::S_8BYTE_LITERALS,
+ SectionKind::getMergeableConst8());
// ld_classic doesn't support .literal16 in 32-bit mode, and ld64 falls back
// to using it in -static mode.
@@ -493,110 +503,128 @@ void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
if (TM.getRelocationModel() != Reloc::Static &&
TM.getTargetData()->getPointerSize() == 32)
SixteenByteConstantSection = // .literal16
- getMachOSection("__TEXT", "__literal16",MCSectionMachO::S_16BYTE_LITERALS,
- SectionKind::getMergeableConst16());
+ getContext().getMachOSection("__TEXT", "__literal16",
+ MCSectionMachO::S_16BYTE_LITERALS,
+ SectionKind::getMergeableConst16());
ReadOnlySection // .const
- = getMachOSection("__TEXT", "__const", 0, SectionKind::getReadOnly());
+ = getContext().getMachOSection("__TEXT", "__const", 0,
+ SectionKind::getReadOnly());
TextCoalSection
- = getMachOSection("__TEXT", "__textcoal_nt",
- MCSectionMachO::S_COALESCED |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- SectionKind::getText());
+ = getContext().getMachOSection("__TEXT", "__textcoal_nt",
+ MCSectionMachO::S_COALESCED |
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ SectionKind::getText());
ConstTextCoalSection
- = getMachOSection("__TEXT", "__const_coal", MCSectionMachO::S_COALESCED,
- SectionKind::getText());
- ConstDataCoalSection
- = getMachOSection("__DATA","__const_coal", MCSectionMachO::S_COALESCED,
- SectionKind::getText());
+ = getContext().getMachOSection("__TEXT", "__const_coal",
+ MCSectionMachO::S_COALESCED,
+ SectionKind::getReadOnly());
ConstDataSection // .const_data
- = getMachOSection("__DATA", "__const", 0,
- SectionKind::getReadOnlyWithRel());
+ = getContext().getMachOSection("__DATA", "__const", 0,
+ SectionKind::getReadOnlyWithRel());
DataCoalSection
- = getMachOSection("__DATA","__datacoal_nt", MCSectionMachO::S_COALESCED,
- SectionKind::getDataRel());
+ = getContext().getMachOSection("__DATA","__datacoal_nt",
+ MCSectionMachO::S_COALESCED,
+ SectionKind::getDataRel());
DataCommonSection
- = getMachOSection("__DATA","__common", MCSectionMachO::S_ZEROFILL,
- SectionKind::getBSS());
+ = getContext().getMachOSection("__DATA","__common",
+ MCSectionMachO::S_ZEROFILL,
+ SectionKind::getBSS());
DataBSSSection
- = getMachOSection("__DATA","__bss", MCSectionMachO::S_ZEROFILL,
- SectionKind::getBSS());
+ = getContext().getMachOSection("__DATA","__bss", MCSectionMachO::S_ZEROFILL,
+ SectionKind::getBSS());
LazySymbolPointerSection
- = getMachOSection("__DATA", "__la_symbol_ptr",
- MCSectionMachO::S_LAZY_SYMBOL_POINTERS,
- SectionKind::getMetadata());
+ = getContext().getMachOSection("__DATA", "__la_symbol_ptr",
+ MCSectionMachO::S_LAZY_SYMBOL_POINTERS,
+ SectionKind::getMetadata());
NonLazySymbolPointerSection
- = getMachOSection("__DATA", "__nl_symbol_ptr",
- MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS,
- SectionKind::getMetadata());
+ = getContext().getMachOSection("__DATA", "__nl_symbol_ptr",
+ MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS,
+ SectionKind::getMetadata());
if (TM.getRelocationModel() == Reloc::Static) {
StaticCtorSection
- = getMachOSection("__TEXT", "__constructor", 0,SectionKind::getDataRel());
+ = getContext().getMachOSection("__TEXT", "__constructor", 0,
+ SectionKind::getDataRel());
StaticDtorSection
- = getMachOSection("__TEXT", "__destructor", 0, SectionKind::getDataRel());
+ = getContext().getMachOSection("__TEXT", "__destructor", 0,
+ SectionKind::getDataRel());
} else {
StaticCtorSection
- = getMachOSection("__DATA", "__mod_init_func",
- MCSectionMachO::S_MOD_INIT_FUNC_POINTERS,
- SectionKind::getDataRel());
+ = getContext().getMachOSection("__DATA", "__mod_init_func",
+ MCSectionMachO::S_MOD_INIT_FUNC_POINTERS,
+ SectionKind::getDataRel());
StaticDtorSection
- = getMachOSection("__DATA", "__mod_term_func",
- MCSectionMachO::S_MOD_TERM_FUNC_POINTERS,
- SectionKind::getDataRel());
+ = getContext().getMachOSection("__DATA", "__mod_term_func",
+ MCSectionMachO::S_MOD_TERM_FUNC_POINTERS,
+ SectionKind::getDataRel());
}
// Exception Handling.
- LSDASection = getMachOSection("__DATA", "__gcc_except_tab", 0,
- SectionKind::getDataRel());
+ LSDASection = getContext().getMachOSection("__TEXT", "__gcc_except_tab", 0,
+ SectionKind::getReadOnlyWithRel());
EHFrameSection =
- getMachOSection("__TEXT", "__eh_frame",
- MCSectionMachO::S_COALESCED |
- MCSectionMachO::S_ATTR_NO_TOC |
- MCSectionMachO::S_ATTR_STRIP_STATIC_SYMS |
- MCSectionMachO::S_ATTR_LIVE_SUPPORT,
- SectionKind::getReadOnly());
+ getContext().getMachOSection("__TEXT", "__eh_frame",
+ MCSectionMachO::S_COALESCED |
+ MCSectionMachO::S_ATTR_NO_TOC |
+ MCSectionMachO::S_ATTR_STRIP_STATIC_SYMS |
+ MCSectionMachO::S_ATTR_LIVE_SUPPORT,
+ SectionKind::getReadOnly());
// Debug Information.
DwarfAbbrevSection =
- getMachOSection("__DWARF", "__debug_abbrev", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_abbrev",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfInfoSection =
- getMachOSection("__DWARF", "__debug_info", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_info",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfLineSection =
- getMachOSection("__DWARF", "__debug_line", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_line",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfFrameSection =
- getMachOSection("__DWARF", "__debug_frame", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_frame",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfPubNamesSection =
- getMachOSection("__DWARF", "__debug_pubnames", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_pubnames",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfPubTypesSection =
- getMachOSection("__DWARF", "__debug_pubtypes", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_pubtypes",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfStrSection =
- getMachOSection("__DWARF", "__debug_str", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_str",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfLocSection =
- getMachOSection("__DWARF", "__debug_loc", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_loc",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfARangesSection =
- getMachOSection("__DWARF", "__debug_aranges", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_aranges",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfRangesSection =
- getMachOSection("__DWARF", "__debug_ranges", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_ranges",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfMacroInfoSection =
- getMachOSection("__DWARF", "__debug_macinfo", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_macinfo",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
DwarfDebugInlineSection =
- getMachOSection("__DWARF", "__debug_inlined", MCSectionMachO::S_ATTR_DEBUG,
- SectionKind::getMetadata());
+ getContext().getMachOSection("__DWARF", "__debug_inlined",
+ MCSectionMachO::S_ATTR_DEBUG,
+ SectionKind::getMetadata());
+
+ TLSExtraDataSection = TLSTLVSection;
}
const MCSection *TargetLoweringObjectFileMachO::
@@ -609,8 +637,8 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
MCSectionMachO::ParseSectionSpecifier(GV->getSection(), Segment, Section,
TAA, StubSize);
if (!ErrorCode.empty()) {
- // If invalid, report the error with llvm_report_error.
- llvm_report_error("Global variable '" + GV->getNameStr() +
+ // If invalid, report the error with report_fatal_error.
+ report_fatal_error("Global variable '" + GV->getNameStr() +
"' has an invalid section specifier '" + GV->getSection()+
"': " + ErrorCode + ".");
// Fall back to dropping it into the data section.
@@ -619,14 +647,14 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
// Get the section.
const MCSectionMachO *S =
- getMachOSection(Segment, Section, TAA, StubSize, Kind);
+ getContext().getMachOSection(Segment, Section, TAA, StubSize, Kind);
// Okay, now that we got the section, verify that the TAA & StubSize agree.
// If the user declared multiple globals with different section flags, we need
// to reject it here.
if (S->getTypeAndAttributes() != TAA || S->getStubSize() != StubSize) {
- // If invalid, report the error with llvm_report_error.
- llvm_report_error("Global variable '" + GV->getNameStr() +
+ // If invalid, report the error with report_fatal_error.
+ report_fatal_error("Global variable '" + GV->getNameStr() +
"' section type or attributes does not match previous"
" section specifier");
}
@@ -637,7 +665,10 @@ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
const MCSection *TargetLoweringObjectFileMachO::
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const {
- assert(!Kind.isThreadLocal() && "Darwin doesn't support TLS");
+
+ // Handle thread local data.
+ if (Kind.isThreadBSS()) return TLSBSSSection;
+ if (Kind.isThreadData()) return TLSDataSection;
if (Kind.isText())
return GV->isWeakForLinker() ? TextCoalSection : TextSection;
@@ -725,9 +756,8 @@ shouldEmitUsedDirectiveFor(const GlobalValue *GV, Mangler *Mang) const {
// FIXME: ObjC metadata is currently emitted as internal symbols that have
// \1L and \0l prefixes on them. Fix them to be Private/LinkerPrivate and
// this horrible hack can go away.
- SmallString<64> Name;
- Mang->getNameWithPrefix(Name, GV, false);
- if (Name[0] == 'L' || Name[0] == 'l')
+ MCSymbol *Sym = Mang->getSymbol(GV);
+ if (Sym->getName()[0] == 'L' || Sym->getName()[0] == 'l')
return false;
}
@@ -735,8 +765,9 @@ shouldEmitUsedDirectiveFor(const GlobalValue *GV, Mangler *Mang) const {
}
const MCExpr *TargetLoweringObjectFileMachO::
-getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding) const {
+getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const {
// The mach-o version of this method defaults to returning a stub reference.
if (Encoding & DW_EH_PE_indirect) {
@@ -749,21 +780,20 @@ getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
// Add information about the stub reference to MachOMMI so that the stub
// gets emitted by the asmprinter.
- MCSymbol *Sym = getContext().GetOrCreateSymbol(Name.str());
- MCSymbol *&StubSym = MachOMMI.getGVStubEntry(Sym);
- if (StubSym == 0) {
- Name.clear();
- Mang->getNameWithPrefix(Name, GV, false);
- StubSym = getContext().GetOrCreateSymbol(Name.str());
+ MCSymbol *SSym = getContext().GetOrCreateSymbol(Name.str());
+ MachineModuleInfoImpl::StubValueTy &StubSym = MachOMMI.getGVStubEntry(SSym);
+ if (StubSym.getPointer() == 0) {
+ MCSymbol *Sym = Mang->getSymbol(GV);
+ StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
}
return TargetLoweringObjectFile::
- getSymbolForDwarfReference(Sym, MMI,
- Encoding & ~dwarf::DW_EH_PE_indirect);
+ getExprForDwarfReference(SSym, Mang, MMI,
+ Encoding & ~dwarf::DW_EH_PE_indirect, Streamer);
}
return TargetLoweringObjectFile::
- getSymbolForDwarfGlobalReference(GV, Mang, MMI, Encoding);
+ getExprForDwarfGlobalReference(GV, Mang, MMI, Encoding, Streamer);
}
unsigned TargetLoweringObjectFileMachO::getPersonalityEncoding() const {
@@ -779,101 +809,167 @@ unsigned TargetLoweringObjectFileMachO::getFDEEncoding() const {
}
unsigned TargetLoweringObjectFileMachO::getTTypeEncoding() const {
- return DW_EH_PE_absptr;
+ return DW_EH_PE_indirect | DW_EH_PE_pcrel | DW_EH_PE_sdata4;
}
//===----------------------------------------------------------------------===//
// COFF
//===----------------------------------------------------------------------===//
-typedef StringMap<const MCSectionCOFF*> COFFUniqueMapTy;
-
-TargetLoweringObjectFileCOFF::~TargetLoweringObjectFileCOFF() {
- delete (COFFUniqueMapTy*)UniquingMap;
-}
-
-
-const MCSection *TargetLoweringObjectFileCOFF::
-getCOFFSection(StringRef Name, bool isDirective, SectionKind Kind) const {
- // Create the map if it doesn't already exist.
- if (UniquingMap == 0)
- UniquingMap = new MachOUniqueMapTy();
- COFFUniqueMapTy &Map = *(COFFUniqueMapTy*)UniquingMap;
-
- // Do the lookup, if we have a hit, return it.
- const MCSectionCOFF *&Entry = Map[Name];
- if (Entry) return Entry;
-
- return Entry = MCSectionCOFF::Create(Name, isDirective, Kind, getContext());
-}
-
void TargetLoweringObjectFileCOFF::Initialize(MCContext &Ctx,
const TargetMachine &TM) {
- if (UniquingMap != 0)
- ((COFFUniqueMapTy*)UniquingMap)->clear();
TargetLoweringObjectFile::Initialize(Ctx, TM);
- TextSection = getCOFFSection("\t.text", true, SectionKind::getText());
- DataSection = getCOFFSection("\t.data", true, SectionKind::getDataRel());
+ TextSection =
+ getContext().getCOFFSection(".text",
+ COFF::IMAGE_SCN_CNT_CODE |
+ COFF::IMAGE_SCN_MEM_EXECUTE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getText());
+ DataSection =
+ getContext().getCOFFSection(".data",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
+ ReadOnlySection =
+ getContext().getCOFFSection(".rdata",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getReadOnly());
StaticCtorSection =
- getCOFFSection(".ctors", false, SectionKind::getDataRel());
+ getContext().getCOFFSection(".ctors",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
StaticDtorSection =
- getCOFFSection(".dtors", false, SectionKind::getDataRel());
+ getContext().getCOFFSection(".dtors",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
// FIXME: We're emitting LSDA info into a readonly section on COFF, even
// though it contains relocatable pointers. In PIC mode, this is probably a
// big runtime hit for C++ apps. Either the contents of the LSDA need to be
// adjusted or this should be a data section.
LSDASection =
- getCOFFSection(".gcc_except_table", false, SectionKind::getReadOnly());
+ getContext().getCOFFSection(".gcc_except_table",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getReadOnly());
EHFrameSection =
- getCOFFSection(".eh_frame", false, SectionKind::getDataRel());
+ getContext().getCOFFSection(".eh_frame",
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
+ SectionKind::getDataRel());
// Debug info.
- // FIXME: Don't use 'directive' mode here.
DwarfAbbrevSection =
- getCOFFSection("\t.section\t.debug_abbrev,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_abbrev",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfInfoSection =
- getCOFFSection("\t.section\t.debug_info,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_info",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfLineSection =
- getCOFFSection("\t.section\t.debug_line,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_line",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfFrameSection =
- getCOFFSection("\t.section\t.debug_frame,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_frame",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfPubNamesSection =
- getCOFFSection("\t.section\t.debug_pubnames,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_pubnames",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfPubTypesSection =
- getCOFFSection("\t.section\t.debug_pubtypes,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_pubtypes",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfStrSection =
- getCOFFSection("\t.section\t.debug_str,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_str",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfLocSection =
- getCOFFSection("\t.section\t.debug_loc,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_loc",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfARangesSection =
- getCOFFSection("\t.section\t.debug_aranges,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_aranges",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfRangesSection =
- getCOFFSection("\t.section\t.debug_ranges,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_ranges",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
DwarfMacroInfoSection =
- getCOFFSection("\t.section\t.debug_macinfo,\"dr\"",
- true, SectionKind::getMetadata());
+ getContext().getCOFFSection(".debug_macinfo",
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
+ SectionKind::getMetadata());
+
+ DrectveSection =
+ getContext().getCOFFSection(".drectve",
+ COFF::IMAGE_SCN_LNK_INFO,
+ SectionKind::getMetadata());
+}
+
+static unsigned
+getCOFFSectionFlags(SectionKind K) {
+ unsigned Flags = 0;
+
+ if (K.isMetadata())
+ Flags |=
+ COFF::IMAGE_SCN_MEM_DISCARDABLE;
+ else if (K.isText())
+ Flags |=
+ COFF::IMAGE_SCN_MEM_EXECUTE |
+ COFF::IMAGE_SCN_CNT_CODE;
+ else if (K.isBSS ())
+ Flags |=
+ COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE;
+ else if (K.isReadOnly())
+ Flags |=
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ;
+ else if (K.isWriteable())
+ Flags |=
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE;
+
+ return Flags;
}
const MCSection *TargetLoweringObjectFileCOFF::
getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const {
- return getCOFFSection(GV->getSection(), false, Kind);
+ return getContext().getCOFFSection(GV->getSection(),
+ getCOFFSectionFlags(Kind),
+ Kind);
}
static const char *getCOFFSectionPrefixForUniqueGlobal(SectionKind Kind) {
if (Kind.isText())
return ".text$linkonce";
+ if (Kind.isBSS ())
+ return ".bss$linkonce";
if (Kind.isWriteable())
return ".data$linkonce";
return ".rdata$linkonce";
@@ -890,8 +986,15 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
if (GV->isWeakForLinker()) {
const char *Prefix = getCOFFSectionPrefixForUniqueGlobal(Kind);
SmallString<128> Name(Prefix, Prefix+strlen(Prefix));
- Mang->getNameWithPrefix(Name, GV, false);
- return getCOFFSection(Name.str(), false, Kind);
+ MCSymbol *Sym = Mang->getSymbol(GV);
+ Name.append(Sym->getName().begin(), Sym->getName().end());
+
+ unsigned Characteristics = getCOFFSectionFlags(Kind);
+
+ Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
+
+ return getContext().getCOFFSection(Name.str(), Characteristics,
+ COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH, Kind);
}
if (Kind.isText())
diff --git a/libclamav/c++/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/libclamav/c++/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index c840b39..78989c5 100644
--- a/libclamav/c++/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -33,6 +33,7 @@
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -40,6 +41,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
@@ -77,6 +79,10 @@ namespace {
// registers from virtual registers. e.g. r1 = move v1024.
DenseMap<unsigned, unsigned> DstRegMap;
+ /// RegSequences - Keep track the list of REG_SEQUENCE instructions seen
+ /// during the initial walk of the machine function.
+ SmallVector<MachineInstr*, 16> RegSequences;
+
bool Sink3AddrInstruction(MachineBasicBlock *MBB, MachineInstr *MI,
unsigned Reg,
MachineBasicBlock::iterator OldPos);
@@ -123,9 +129,16 @@ namespace {
void ProcessCopy(MachineInstr *MI, MachineBasicBlock *MBB,
SmallPtrSet<MachineInstr*, 8> &Processed);
+ void CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs, unsigned DstReg);
+
+ /// EliminateRegSequences - Eliminate REG_SEQUENCE instructions as part
+ /// of the de-ssa process. This replaces sources of REG_SEQUENCE as
+ /// sub-register references of the register defined by REG_SEQUENCE.
+ bool EliminateRegSequences();
+
public:
static char ID; // Pass identification, replacement for typeid
- TwoAddressInstructionPass() : MachineFunctionPass(&ID) {}
+ TwoAddressInstructionPass() : MachineFunctionPass(ID) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -146,10 +159,10 @@ namespace {
}
char TwoAddressInstructionPass::ID = 0;
-static RegisterPass<TwoAddressInstructionPass>
-X("twoaddressinstruction", "Two-Address instruction pass");
+INITIALIZE_PASS(TwoAddressInstructionPass, "twoaddressinstruction",
+ "Two-Address instruction pass", false, false);
-const PassInfo *const llvm::TwoAddressInstructionPassID = &X;
+char &llvm::TwoAddressInstructionPassID = TwoAddressInstructionPass::ID;
/// Sink3AddrInstruction - A two-address instruction has been converted to a
/// three-address instruction to avoid clobbering a register. Try to sink it
@@ -188,8 +201,9 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB,
// Find the instruction that kills SavedReg.
MachineInstr *KillMI = NULL;
- for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SavedReg),
- UE = MRI->use_end(); UI != UE; ++UI) {
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(SavedReg),
+ UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
MachineOperand &UseMO = UI.getOperand();
if (!UseMO.isKill())
continue;
@@ -280,8 +294,8 @@ TwoAddressInstructionPass::isProfitableToReMat(unsigned Reg,
MachineInstr *MI, MachineInstr *DefMI,
MachineBasicBlock *MBB, unsigned Loc) {
bool OtherUse = false;
- for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
- UE = MRI->use_end(); UI != UE; ++UI) {
+ for (MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(Reg),
+ UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
MachineOperand &UseMO = UI.getOperand();
MachineInstr *UseMI = UseMO.getParent();
MachineBasicBlock *UseMBB = UseMI->getParent();
@@ -366,26 +380,18 @@ static bool isCopyToReg(MachineInstr &MI, const TargetInstrInfo *TII,
bool &IsSrcPhys, bool &IsDstPhys) {
SrcReg = 0;
DstReg = 0;
- unsigned SrcSubIdx, DstSubIdx;
- if (!TII->isMoveInstr(MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
- if (MI.isExtractSubreg()) {
- DstReg = MI.getOperand(0).getReg();
- SrcReg = MI.getOperand(1).getReg();
- } else if (MI.isInsertSubreg()) {
- DstReg = MI.getOperand(0).getReg();
- SrcReg = MI.getOperand(2).getReg();
- } else if (MI.isSubregToReg()) {
- DstReg = MI.getOperand(0).getReg();
- SrcReg = MI.getOperand(2).getReg();
- }
- }
+ if (MI.isCopy()) {
+ DstReg = MI.getOperand(0).getReg();
+ SrcReg = MI.getOperand(1).getReg();
+ } else if (MI.isInsertSubreg() || MI.isSubregToReg()) {
+ DstReg = MI.getOperand(0).getReg();
+ SrcReg = MI.getOperand(2).getReg();
+ } else
+ return false;
- if (DstReg) {
- IsSrcPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
- IsDstPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
- return true;
- }
- return false;
+ IsSrcPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
+ IsDstPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
+ return true;
}
/// isKilled - Test if the given register value, which is used by the given
@@ -767,7 +773,7 @@ canUpdateDeletedKills(SmallVector<unsigned, 4> &Kills,
if (!LastKill)
return false;
- bool isModRef = LastKill->modifiesRegister(Kill);
+ bool isModRef = LastKill->definesRegister(Kill);
NewKills.push_back(std::make_pair(std::make_pair(Kill, isModRef),
LastKill));
}
@@ -884,6 +890,108 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
}
}
}
+
+ // If this is an instruction with a load folded into it, try unfolding
+ // the load, e.g. avoid this:
+ // movq %rdx, %rcx
+ // addq (%rax), %rcx
+ // in favor of this:
+ // movq (%rax), %rcx
+ // addq %rdx, %rcx
+ // because it's preferable to schedule a load than a register copy.
+ if (TID.mayLoad() && !regBKilled) {
+ // Determine if a load can be unfolded.
+ unsigned LoadRegIndex;
+ unsigned NewOpc =
+ TII->getOpcodeAfterMemoryUnfold(mi->getOpcode(),
+ /*UnfoldLoad=*/true,
+ /*UnfoldStore=*/false,
+ &LoadRegIndex);
+ if (NewOpc != 0) {
+ const TargetInstrDesc &UnfoldTID = TII->get(NewOpc);
+ if (UnfoldTID.getNumDefs() == 1) {
+ MachineFunction &MF = *mbbi->getParent();
+
+ // Unfold the load.
+ DEBUG(dbgs() << "2addr: UNFOLDING: " << *mi);
+ const TargetRegisterClass *RC =
+ UnfoldTID.OpInfo[LoadRegIndex].getRegClass(TRI);
+ unsigned Reg = MRI->createVirtualRegister(RC);
+ SmallVector<MachineInstr *, 2> NewMIs;
+ if (!TII->unfoldMemoryOperand(MF, mi, Reg,
+ /*UnfoldLoad=*/true,/*UnfoldStore=*/false,
+ NewMIs)) {
+ DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
+ return false;
+ }
+ assert(NewMIs.size() == 2 &&
+ "Unfolded a load into multiple instructions!");
+ // The load was previously folded, so this is the only use.
+ NewMIs[1]->addRegisterKilled(Reg, TRI);
+
+ // Tentatively insert the instructions into the block so that they
+ // look "normal" to the transformation logic.
+ mbbi->insert(mi, NewMIs[0]);
+ mbbi->insert(mi, NewMIs[1]);
+
+ DEBUG(dbgs() << "2addr: NEW LOAD: " << *NewMIs[0]
+ << "2addr: NEW INST: " << *NewMIs[1]);
+
+ // Transform the instruction, now that it no longer has a load.
+ unsigned NewDstIdx = NewMIs[1]->findRegisterDefOperandIdx(regA);
+ unsigned NewSrcIdx = NewMIs[1]->findRegisterUseOperandIdx(regB);
+ MachineBasicBlock::iterator NewMI = NewMIs[1];
+ bool TransformSuccess =
+ TryInstructionTransform(NewMI, mi, mbbi,
+ NewSrcIdx, NewDstIdx, Dist);
+ if (TransformSuccess ||
+ NewMIs[1]->getOperand(NewSrcIdx).isKill()) {
+ // Success, or at least we made an improvement. Keep the unfolded
+ // instructions and discard the original.
+ if (LV) {
+ for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = mi->getOperand(i);
+ if (MO.isReg() && MO.getReg() != 0 &&
+ TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+ if (MO.isUse()) {
+ if (MO.isKill()) {
+ if (NewMIs[0]->killsRegister(MO.getReg()))
+ LV->replaceKillInstruction(MO.getReg(), mi, NewMIs[0]);
+ else {
+ assert(NewMIs[1]->killsRegister(MO.getReg()) &&
+ "Kill missing after load unfold!");
+ LV->replaceKillInstruction(MO.getReg(), mi, NewMIs[1]);
+ }
+ }
+ } else if (LV->removeVirtualRegisterDead(MO.getReg(), mi)) {
+ if (NewMIs[1]->registerDefIsDead(MO.getReg()))
+ LV->addVirtualRegisterDead(MO.getReg(), NewMIs[1]);
+ else {
+ assert(NewMIs[0]->registerDefIsDead(MO.getReg()) &&
+ "Dead flag missing after load unfold!");
+ LV->addVirtualRegisterDead(MO.getReg(), NewMIs[0]);
+ }
+ }
+ }
+ }
+ LV->addVirtualRegisterKilled(Reg, NewMIs[1]);
+ }
+ mi->eraseFromParent();
+ mi = NewMIs[1];
+ if (TransformSuccess)
+ return true;
+ } else {
+ // Transforming didn't eliminate the tie and didn't lead to an
+ // improvement. Clean up the unfolded instructions and keep the
+ // original.
+ DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
+ NewMIs[0]->eraseFromParent();
+ NewMIs[1]->eraseFromParent();
+ }
+ }
+ }
+ }
+
return false;
}
@@ -927,6 +1035,11 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
mi = nmi;
continue;
}
+
+ // Remember REG_SEQUENCE instructions, we'll deal with them later.
+ if (mi->isRegSequence())
+ RegSequences.push_back(&*mi);
+
const TargetInstrDesc &TID = mi->getDesc();
bool FirstTied = true;
@@ -1029,13 +1142,12 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
isProfitableToReMat(regB, rc, mi, DefMI, mbbi, Dist)){
DEBUG(dbgs() << "2addr: REMATTING : " << *DefMI << "\n");
unsigned regASubIdx = mi->getOperand(DstIdx).getSubReg();
- TII->reMaterialize(*mbbi, mi, regA, regASubIdx, DefMI, TRI);
+ TII->reMaterialize(*mbbi, mi, regA, regASubIdx, DefMI, *TRI);
ReMatRegs.set(regB);
++NumReMats;
} else {
- bool Emitted = TII->copyRegToReg(*mbbi, mi, regA, regB, rc, rc);
- (void)Emitted;
- assert(Emitted && "Unable to issue a copy instruction!\n");
+ BuildMI(*mbbi, mi, mi->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ regA).addReg(regB);
}
MachineBasicBlock::iterator prevMI = prior(mi);
@@ -1085,12 +1197,30 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
}
}
}
-
+
+ // Schedule the source copy / remat inserted to form two-address
+ // instruction. FIXME: Does it matter the distance map may not be
+ // accurate after it's scheduled?
+ TII->scheduleTwoAddrSource(prior(mi), mi, *TRI);
+
MadeChange = true;
DEBUG(dbgs() << "\t\trewrite to:\t" << *mi);
}
+ // Rewrite INSERT_SUBREG as COPY now that we no longer need SSA form.
+ if (mi->isInsertSubreg()) {
+ // From %reg = INSERT_SUBREG %reg, %subreg, subidx
+ // To %reg:subidx = COPY %subreg
+ unsigned SubIdx = mi->getOperand(3).getImm();
+ mi->RemoveOperand(3);
+ assert(mi->getOperand(0).getSubReg() == 0 && "Unexpected subreg idx");
+ mi->getOperand(0).setSubReg(SubIdx);
+ mi->RemoveOperand(1);
+ mi->setDesc(TII->get(TargetOpcode::COPY));
+ DEBUG(dbgs() << "\t\tconvert to:\t" << *mi);
+ }
+
// Clear TiedOperands here instead of at the top of the loop
// since most instructions do not have tied operands.
TiedOperands.clear();
@@ -1101,12 +1231,268 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
// Some remat'ed instructions are dead.
int VReg = ReMatRegs.find_first();
while (VReg != -1) {
- if (MRI->use_empty(VReg)) {
+ if (MRI->use_nodbg_empty(VReg)) {
MachineInstr *DefMI = MRI->getVRegDef(VReg);
DefMI->eraseFromParent();
}
VReg = ReMatRegs.find_next(VReg);
}
+ // Eliminate REG_SEQUENCE instructions. Their whole purpose was to preseve
+ // SSA form. It's now safe to de-SSA.
+ MadeChange |= EliminateRegSequences();
+
return MadeChange;
}
+
+static void UpdateRegSequenceSrcs(unsigned SrcReg,
+ unsigned DstReg, unsigned SubIdx,
+ MachineRegisterInfo *MRI,
+ const TargetRegisterInfo &TRI) {
+ for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(SrcReg),
+ RE = MRI->reg_end(); RI != RE; ) {
+ MachineOperand &MO = RI.getOperand();
+ ++RI;
+ MO.substVirtReg(DstReg, SubIdx, TRI);
+ }
+}
+
+/// CoalesceExtSubRegs - If a number of sources of the REG_SEQUENCE are
+/// EXTRACT_SUBREG from the same register and to the same virtual register
+/// with different sub-register indices, attempt to combine the
+/// EXTRACT_SUBREGs and pre-coalesce them. e.g.
+/// %reg1026<def> = VLDMQ %reg1025<kill>, 260, pred:14, pred:%reg0
+/// %reg1029:6<def> = EXTRACT_SUBREG %reg1026, 6
+/// %reg1029:5<def> = EXTRACT_SUBREG %reg1026<kill>, 5
+/// Since D subregs 5, 6 can combine to a Q register, we can coalesce
+/// reg1026 to reg1029.
+void
+TwoAddressInstructionPass::CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs,
+ unsigned DstReg) {
+ SmallSet<unsigned, 4> Seen;
+ for (unsigned i = 0, e = Srcs.size(); i != e; ++i) {
+ unsigned SrcReg = Srcs[i];
+ if (!Seen.insert(SrcReg))
+ continue;
+
+ // Check that the instructions are all in the same basic block.
+ MachineInstr *SrcDefMI = MRI->getVRegDef(SrcReg);
+ MachineInstr *DstDefMI = MRI->getVRegDef(DstReg);
+ if (SrcDefMI->getParent() != DstDefMI->getParent())
+ continue;
+
+ // If there are no other uses than copies which feed into
+ // the reg_sequence, then we might be able to coalesce them.
+ bool CanCoalesce = true;
+ SmallVector<unsigned, 4> SrcSubIndices, DstSubIndices;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(SrcReg),
+ UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
+ MachineInstr *UseMI = &*UI;
+ if (!UseMI->isCopy() || UseMI->getOperand(0).getReg() != DstReg) {
+ CanCoalesce = false;
+ break;
+ }
+ SrcSubIndices.push_back(UseMI->getOperand(1).getSubReg());
+ DstSubIndices.push_back(UseMI->getOperand(0).getSubReg());
+ }
+
+ if (!CanCoalesce || SrcSubIndices.size() < 2)
+ continue;
+
+ // Check that the source subregisters can be combined.
+ std::sort(SrcSubIndices.begin(), SrcSubIndices.end());
+ unsigned NewSrcSubIdx = 0;
+ if (!TRI->canCombineSubRegIndices(MRI->getRegClass(SrcReg), SrcSubIndices,
+ NewSrcSubIdx))
+ continue;
+
+ // Check that the destination subregisters can also be combined.
+ std::sort(DstSubIndices.begin(), DstSubIndices.end());
+ unsigned NewDstSubIdx = 0;
+ if (!TRI->canCombineSubRegIndices(MRI->getRegClass(DstReg), DstSubIndices,
+ NewDstSubIdx))
+ continue;
+
+ // If neither source nor destination can be combined to the full register,
+ // just give up. This could be improved if it ever matters.
+ if (NewSrcSubIdx != 0 && NewDstSubIdx != 0)
+ continue;
+
+ // Now that we know that all the uses are extract_subregs and that those
+ // subregs can somehow be combined, scan all the extract_subregs again to
+ // make sure the subregs are in the right order and can be composed.
+ MachineInstr *SomeMI = 0;
+ CanCoalesce = true;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(SrcReg),
+ UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
+ MachineInstr *UseMI = &*UI;
+ assert(UseMI->isCopy());
+ unsigned DstSubIdx = UseMI->getOperand(0).getSubReg();
+ unsigned SrcSubIdx = UseMI->getOperand(1).getSubReg();
+ assert(DstSubIdx != 0 && "missing subreg from RegSequence elimination");
+ if ((NewDstSubIdx == 0 &&
+ TRI->composeSubRegIndices(NewSrcSubIdx, DstSubIdx) != SrcSubIdx) ||
+ (NewSrcSubIdx == 0 &&
+ TRI->composeSubRegIndices(NewDstSubIdx, SrcSubIdx) != DstSubIdx)) {
+ CanCoalesce = false;
+ break;
+ }
+ // Keep track of one of the uses.
+ SomeMI = UseMI;
+ }
+ if (!CanCoalesce)
+ continue;
+
+ // Insert a copy to replace the original.
+ MachineBasicBlock::iterator InsertLoc = SomeMI;
+ MachineInstr *CopyMI = BuildMI(*SomeMI->getParent(), SomeMI,
+ SomeMI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY))
+ .addReg(DstReg, RegState::Define, NewDstSubIdx)
+ .addReg(SrcReg, 0, NewSrcSubIdx);
+
+ // Remove all the old extract instructions.
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(SrcReg),
+ UE = MRI->use_nodbg_end(); UI != UE; ) {
+ MachineInstr *UseMI = &*UI;
+ ++UI;
+ if (UseMI == CopyMI)
+ continue;
+ assert(UseMI->isCopy());
+ // Move any kills to the new copy or extract instruction.
+ if (UseMI->getOperand(1).isKill()) {
+ CopyMI->getOperand(1).setIsKill();
+ if (LV)
+ // Update live variables
+ LV->replaceKillInstruction(SrcReg, UseMI, &*CopyMI);
+ }
+ UseMI->eraseFromParent();
+ }
+ }
+}
+
+static bool HasOtherRegSequenceUses(unsigned Reg, MachineInstr *RegSeq,
+ MachineRegisterInfo *MRI) {
+ for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
+ UE = MRI->use_end(); UI != UE; ++UI) {
+ MachineInstr *UseMI = &*UI;
+ if (UseMI != RegSeq && UseMI->isRegSequence())
+ return true;
+ }
+ return false;
+}
+
+/// EliminateRegSequences - Eliminate REG_SEQUENCE instructions as part
+/// of the de-ssa process. This replaces sources of REG_SEQUENCE as
+/// sub-register references of the register defined by REG_SEQUENCE. e.g.
+///
+/// %reg1029<def>, %reg1030<def> = VLD1q16 %reg1024<kill>, ...
+/// %reg1031<def> = REG_SEQUENCE %reg1029<kill>, 5, %reg1030<kill>, 6
+/// =>
+/// %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
+bool TwoAddressInstructionPass::EliminateRegSequences() {
+ if (RegSequences.empty())
+ return false;
+
+ for (unsigned i = 0, e = RegSequences.size(); i != e; ++i) {
+ MachineInstr *MI = RegSequences[i];
+ unsigned DstReg = MI->getOperand(0).getReg();
+ if (MI->getOperand(0).getSubReg() ||
+ TargetRegisterInfo::isPhysicalRegister(DstReg) ||
+ !(MI->getNumOperands() & 1)) {
+ DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << *MI);
+ llvm_unreachable(0);
+ }
+
+ bool IsImpDef = true;
+ SmallVector<unsigned, 4> RealSrcs;
+ SmallSet<unsigned, 4> Seen;
+ for (unsigned i = 1, e = MI->getNumOperands(); i < e; i += 2) {
+ unsigned SrcReg = MI->getOperand(i).getReg();
+ if (MI->getOperand(i).getSubReg() ||
+ TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ DEBUG(dbgs() << "Illegal REG_SEQUENCE instruction:" << *MI);
+ llvm_unreachable(0);
+ }
+
+ MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
+ if (DefMI->isImplicitDef()) {
+ DefMI->eraseFromParent();
+ continue;
+ }
+ IsImpDef = false;
+
+ // Remember COPY sources. These might be candidate for coalescing.
+ if (DefMI->isCopy() && DefMI->getOperand(1).getSubReg())
+ RealSrcs.push_back(DefMI->getOperand(1).getReg());
+
+ bool isKill = MI->getOperand(i).isKill();
+ if (!Seen.insert(SrcReg) || MI->getParent() != DefMI->getParent() ||
+ !isKill || HasOtherRegSequenceUses(SrcReg, MI, MRI)) {
+ // REG_SEQUENCE cannot have duplicated operands, add a copy.
+ // Also add an copy if the source is live-in the block. We don't want
+ // to end up with a partial-redef of a livein, e.g.
+ // BB0:
+ // reg1051:10<def> =
+ // ...
+ // BB1:
+ // ... = reg1051:10
+ // BB2:
+ // reg1051:9<def> =
+ // LiveIntervalAnalysis won't like it.
+ //
+ // If the REG_SEQUENCE doesn't kill its source, keeping live variables
+ // correctly up to date becomes very difficult. Insert a copy.
+
+ // Defer any kill flag to the last operand using SrcReg. Otherwise, we
+ // might insert a COPY that uses SrcReg after is was killed.
+ if (isKill)
+ for (unsigned j = i + 2; j < e; j += 2)
+ if (MI->getOperand(j).getReg() == SrcReg) {
+ MI->getOperand(j).setIsKill();
+ isKill = false;
+ break;
+ }
+
+ MachineBasicBlock::iterator InsertLoc = MI;
+ MachineInstr *CopyMI = BuildMI(*MI->getParent(), InsertLoc,
+ MI->getDebugLoc(), TII->get(TargetOpcode::COPY))
+ .addReg(DstReg, RegState::Define, MI->getOperand(i+1).getImm())
+ .addReg(SrcReg, getKillRegState(isKill));
+ MI->getOperand(i).setReg(0);
+ if (LV && isKill)
+ LV->replaceKillInstruction(SrcReg, MI, CopyMI);
+ DEBUG(dbgs() << "Inserted: " << *CopyMI);
+ }
+ }
+
+ for (unsigned i = 1, e = MI->getNumOperands(); i < e; i += 2) {
+ unsigned SrcReg = MI->getOperand(i).getReg();
+ if (!SrcReg) continue;
+ unsigned SubIdx = MI->getOperand(i+1).getImm();
+ UpdateRegSequenceSrcs(SrcReg, DstReg, SubIdx, MRI, *TRI);
+ }
+
+ if (IsImpDef) {
+ DEBUG(dbgs() << "Turned: " << *MI << " into an IMPLICIT_DEF");
+ MI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
+ for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
+ MI->RemoveOperand(j);
+ } else {
+ DEBUG(dbgs() << "Eliminated: " << *MI);
+ MI->eraseFromParent();
+ }
+
+ // Try coalescing some EXTRACT_SUBREG instructions. This can create
+ // INSERT_SUBREG instructions that must have <undef> flags added by
+ // LiveIntervalAnalysis, so only run it when LiveVariables is available.
+ if (LV)
+ CoalesceExtSubRegs(RealSrcs, DstReg);
+ }
+
+ RegSequences.clear();
+ return true;
+}
diff --git a/libclamav/c++/llvm/lib/CodeGen/UnreachableBlockElim.cpp b/libclamav/c++/llvm/lib/CodeGen/UnreachableBlockElim.cpp
index b0f0a07..6dd3333 100644
--- a/libclamav/c++/llvm/lib/CodeGen/UnreachableBlockElim.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/UnreachableBlockElim.cpp
@@ -43,7 +43,7 @@ namespace {
virtual bool runOnFunction(Function &F);
public:
static char ID; // Pass identification, replacement for typeid
- UnreachableBlockElim() : FunctionPass(&ID) {}
+ UnreachableBlockElim() : FunctionPass(ID) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<ProfileInfo>();
@@ -51,8 +51,8 @@ namespace {
};
}
char UnreachableBlockElim::ID = 0;
-static RegisterPass<UnreachableBlockElim>
-X("unreachableblockelim", "Remove unreachable blocks from the CFG");
+INITIALIZE_PASS(UnreachableBlockElim, "unreachableblockelim",
+ "Remove unreachable blocks from the CFG", false, false);
FunctionPass *llvm::createUnreachableBlockEliminationPass() {
return new UnreachableBlockElim();
@@ -100,16 +100,15 @@ namespace {
MachineModuleInfo *MMI;
public:
static char ID; // Pass identification, replacement for typeid
- UnreachableMachineBlockElim() : MachineFunctionPass(&ID) {}
+ UnreachableMachineBlockElim() : MachineFunctionPass(ID) {}
};
}
char UnreachableMachineBlockElim::ID = 0;
-static RegisterPass<UnreachableMachineBlockElim>
-Y("unreachable-mbb-elimination",
- "Remove unreachable machine basic blocks");
+INITIALIZE_PASS(UnreachableMachineBlockElim, "unreachable-mbb-elimination",
+ "Remove unreachable machine basic blocks", false, false);
-const PassInfo *const llvm::UnreachableMachineBlockElimID = &Y;
+char &llvm::UnreachableMachineBlockElimID = UnreachableMachineBlockElim::ID;
void UnreachableMachineBlockElim::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<MachineLoopInfo>();
@@ -165,20 +164,8 @@ bool UnreachableMachineBlockElim::runOnMachineFunction(MachineFunction &F) {
}
// Actually remove the blocks now.
- for (unsigned i = 0, e = DeadBlocks.size(); i != e; ++i) {
- MachineBasicBlock *MBB = DeadBlocks[i];
- // If there are any labels in the basic block, unregister them from
- // MachineModuleInfo.
- if (MMI && !MBB->empty()) {
- for (MachineBasicBlock::iterator I = MBB->begin(),
- E = MBB->end(); I != E; ++I) {
- if (I->isLabel())
- // The label ID # is always operand #0, an immediate.
- MMI->InvalidateLabel(I->getOperand(0).getImm());
- }
- }
- MBB->eraseFromParent();
- }
+ for (unsigned i = 0, e = DeadBlocks.size(); i != e; ++i)
+ DeadBlocks[i]->eraseFromParent();
// Cleanup PHI nodes.
for (MachineFunction::iterator I = F.begin(), E = F.end(); I != E; ++I) {
diff --git a/libclamav/c++/llvm/lib/CodeGen/VirtRegMap.cpp b/libclamav/c++/llvm/lib/CodeGen/VirtRegMap.cpp
index ed02696..20ffcff 100644
--- a/libclamav/c++/llvm/lib/CodeGen/VirtRegMap.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/VirtRegMap.cpp
@@ -48,8 +48,7 @@ STATISTIC(NumSpills , "Number of register spills");
char VirtRegMap::ID = 0;
-static RegisterPass<VirtRegMap>
-X("virtregmap", "Virtual Register Map");
+INITIALIZE_PASS(VirtRegMap, "virtregmap", "Virtual Register Map", false, false);
bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
MRI = &mf.getRegInfo();
diff --git a/libclamav/c++/llvm/lib/CodeGen/VirtRegMap.h b/libclamav/c++/llvm/lib/CodeGen/VirtRegMap.h
index a5599f6..8b6082d 100644
--- a/libclamav/c++/llvm/lib/CodeGen/VirtRegMap.h
+++ b/libclamav/c++/llvm/lib/CodeGen/VirtRegMap.h
@@ -139,7 +139,7 @@ namespace llvm {
public:
static char ID;
- VirtRegMap() : MachineFunctionPass(&ID), Virt2PhysMap(NO_PHYS_REG),
+ VirtRegMap() : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
Virt2StackSlotMap(NO_STACK_SLOT),
Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
Virt2SplitKillMap(SlotIndex()), ReMatMap(NULL),
@@ -152,6 +152,11 @@ namespace llvm {
MachineFunctionPass::getAnalysisUsage(AU);
}
+ MachineFunction &getMachineFunction() const {
+ assert(MF && "getMachineFunction called before runOnMAchineFunction");
+ return *MF;
+ }
+
void grow();
/// @brief returns true if the specified virtual register is
diff --git a/libclamav/c++/llvm/lib/CodeGen/VirtRegRewriter.cpp b/libclamav/c++/llvm/lib/CodeGen/VirtRegRewriter.cpp
index 7aa0a91..240d28c 100644
--- a/libclamav/c++/llvm/lib/CodeGen/VirtRegRewriter.cpp
+++ b/libclamav/c++/llvm/lib/CodeGen/VirtRegRewriter.cpp
@@ -9,7 +9,9 @@
#define DEBUG_TYPE "virtregrewriter"
#include "VirtRegRewriter.h"
+#include "VirtRegMap.h"
#include "llvm/Function.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -65,23 +67,16 @@ VirtRegRewriter::~VirtRegRewriter() {}
/// Note that operands may be added, so the MO reference is no longer valid.
static void substitutePhysReg(MachineOperand &MO, unsigned Reg,
const TargetRegisterInfo &TRI) {
- if (unsigned SubIdx = MO.getSubReg()) {
- // Insert the physical subreg and reset the subreg field.
- MO.setReg(TRI.getSubReg(Reg, SubIdx));
- MO.setSubReg(0);
-
- // Any def, dead, and kill flags apply to the full virtual register, so they
- // also apply to the full physical register. Add imp-def/dead and imp-kill
- // as needed.
+ if (MO.getSubReg()) {
+ MO.substPhysReg(Reg, TRI);
+
+ // Any kill flags apply to the full virtual register, so they also apply to
+ // the full physical register.
+ // We assume that partial defs have already been decorated with a super-reg
+ // <imp-def> operand by LiveIntervals.
MachineInstr &MI = *MO.getParent();
- if (MO.isDef())
- if (MO.isDead())
- MI.addRegisterDead(Reg, &TRI, /*AddIfNotFound=*/ true);
- else
- MI.addRegisterDefined(Reg, &TRI);
- else if (!MO.isUndef() &&
- (MO.isKill() ||
- MI.isRegTiedToDefOperand(&MO-&MI.getOperand(0))))
+ if (MO.isUse() && !MO.isUndef() &&
+ (MO.isKill() || MI.isRegTiedToDefOperand(&MO-&MI.getOperand(0))))
MI.addRegisterKilled(Reg, &TRI, /*AddIfNotFound=*/ true);
} else {
MO.setReg(Reg);
@@ -98,7 +93,7 @@ struct TrivialRewriter : public VirtRegRewriter {
bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
LiveIntervals* LIs) {
DEBUG(dbgs() << "********** REWRITE MACHINE CODE **********\n");
- DEBUG(dbgs() << "********** Function: "
+ DEBUG(dbgs() << "********** Function: "
<< MF.getFunction()->getName() << '\n');
DEBUG(dbgs() << "**** Machine Instrs"
<< "(NOTE! Does not include spills and reloads!) ****\n");
@@ -135,10 +130,10 @@ struct TrivialRewriter : public VirtRegRewriter {
changed |= !reglist.empty();
}
}
-
+
DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
DEBUG(MF.dump());
-
+
return changed;
}
@@ -208,7 +203,7 @@ public:
/// in the specified physreg. If CanClobber is true, the physreg can be
/// modified at any time without changing the semantics of the program.
void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) {
- // If this stack slot is thought to be available in some other physreg,
+ // If this stack slot is thought to be available in some other physreg,
// remove its record.
ModifyStackSlotOrReMat(SlotOrReMat);
@@ -364,7 +359,7 @@ struct ReusedOp {
// AssignedPhysReg - The physreg that was assigned for use by the reload.
unsigned AssignedPhysReg;
-
+
// VirtReg - The virtual register itself.
unsigned VirtReg;
@@ -384,11 +379,11 @@ public:
ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
PhysRegsClobbered.resize(tri->getNumRegs());
}
-
+
bool hasReuses() const {
return !Reuses.empty();
}
-
+
/// addReuse - If we choose to reuse a virtual register that is already
/// available instead of reloading it, remember that we did so.
void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
@@ -397,9 +392,9 @@ public:
// If the reload is to the assigned register anyway, no undo will be
// required.
if (PhysRegReused == AssignedPhysReg) return;
-
+
// Otherwise, remember this.
- Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
+ Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
AssignedPhysReg, VirtReg));
}
@@ -410,10 +405,10 @@ public:
bool isClobbered(unsigned PhysReg) const {
return PhysRegsClobbered.test(PhysReg);
}
-
+
/// GetRegForReload - We are about to emit a reload into PhysReg. If there
/// is some other operand that is using the specified register, either pick
- /// a new register to use, or evict the previous reload and use this reg.
+ /// a new register to use, or evict the previous reload and use this reg.
unsigned GetRegForReload(const TargetRegisterClass *RC, unsigned PhysReg,
MachineFunction &MF, MachineInstr *MI,
AvailableSpills &Spills,
@@ -458,7 +453,7 @@ public:
/// blocks each of which is a successor of the specified BB and has no other
/// predecessor.
static void findSinglePredSuccessor(MachineBasicBlock *MBB,
- SmallVectorImpl<MachineBasicBlock *> &Succs) {
+ SmallVectorImpl<MachineBasicBlock *> &Succs){
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
SE = MBB->succ_end(); SI != SE; ++SI) {
MachineBasicBlock *SuccMBB = *SI;
@@ -525,7 +520,7 @@ static void InvalidateKills(MachineInstr &MI,
/// reference.
static bool InvalidateRegDef(MachineBasicBlock::iterator I,
MachineInstr &NewDef, unsigned Reg,
- bool &HasLiveDef,
+ bool &HasLiveDef,
const TargetRegisterInfo *TRI) {
// Due to remat, it's possible this reg isn't being reused. That is,
// the def of this reg (by prev MI) is now dead.
@@ -572,6 +567,9 @@ static bool InvalidateRegDef(MachineBasicBlock::iterator I,
static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
BitVector &RegKills,
std::vector<MachineOperand*> &KillOps) {
+ // These do not affect kill info at all.
+ if (MI.isDebugValue())
+ return;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || !MO.isUse() || MO.isUndef())
@@ -579,7 +577,7 @@ static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
unsigned Reg = MO.getReg();
if (Reg == 0)
continue;
-
+
if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
// That can't be right. Register is killed but not re-defined and it's
// being reused. Let's fix that.
@@ -597,7 +595,7 @@ static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
}
} else {
// Check for subreg kills as well.
- // d4 =
+ // d4 =
// store d4, fi#0
// ...
// = s8<kill>
@@ -662,8 +660,7 @@ static void ReMaterialize(MachineBasicBlock &MBB,
assert(TID.getNumDefs() == 1 &&
"Don't know how to remat instructions that define > 1 values!");
#endif
- TII->reMaterialize(MBB, MII, DestReg,
- ReMatDefMI->getOperand(0).getSubReg(), ReMatDefMI, TRI);
+ TII->reMaterialize(MBB, MII, DestReg, 0, ReMatDefMI, *TRI);
MachineInstr *NewMI = prior(MII);
for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = NewMI->getOperand(i);
@@ -764,7 +761,7 @@ void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
I != E; ++I) {
unsigned Reg = I->first;
- const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg);
+ const TargetRegisterClass* RC = TRI->getMinimalPhysRegClass(Reg);
// FIXME: A temporary workaround. We can't reuse available value if it's
// not safe to move the def of the virtual register's class. e.g.
// X86::RFP* register classes. Do not add it as a live-in.
@@ -802,7 +799,7 @@ void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
if (It == SpillSlotsOrReMatsAvailable.end()) return;
unsigned Reg = It->second >> 1;
SpillSlotsOrReMatsAvailable.erase(It);
-
+
// This register may hold the value of multiple stack slots, only remove this
// stack slot from the set of values the register contains.
std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
@@ -832,7 +829,7 @@ unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
VirtRegMap &VRM) {
const TargetInstrInfo* TII = MF.getTarget().getInstrInfo();
const TargetRegisterInfo *TRI = Spills.getRegInfo();
-
+
if (Reuses.empty()) return PhysReg; // This is most often empty.
for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
@@ -848,12 +845,12 @@ unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
// Yup, use the reload register that we didn't use before.
unsigned NewReg = Op.AssignedPhysReg;
Rejected.insert(PhysReg);
- return GetRegForReload(RC, NewReg, MF, MI, Spills, MaybeDeadStores, Rejected,
- RegKills, KillOps, VRM);
+ return GetRegForReload(RC, NewReg, MF, MI, Spills, MaybeDeadStores,
+ Rejected, RegKills, KillOps, VRM);
} else {
// Otherwise, we might also have a problem if a previously reused
// value aliases the new register. If so, codegen the previous reload
- // and use this one.
+ // and use this one.
unsigned PRRU = Op.PhysRegReused;
if (TRI->regsOverlap(PRRU, PhysReg)) {
// Okay, we found out that an alias of a reused register
@@ -890,7 +887,7 @@ unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
bool DoReMat = NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT;
int SSorRMId = DoReMat
- ? VRM.getReMatId(NewOp.VirtReg) : NewOp.StackSlotOrReMat;
+ ? VRM.getReMatId(NewOp.VirtReg) : (int) NewOp.StackSlotOrReMat;
// Back-schedule reloads and remats.
MachineBasicBlock::iterator InsertLoc =
@@ -900,13 +897,13 @@ unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
if (DoReMat) {
ReMaterialize(*MBB, InsertLoc, NewPhysReg, NewOp.VirtReg, TII,
TRI, VRM);
- } else {
+ } else {
TII->loadRegFromStackSlot(*MBB, InsertLoc, NewPhysReg,
- NewOp.StackSlotOrReMat, AliasRC);
+ NewOp.StackSlotOrReMat, AliasRC, TRI);
MachineInstr *LoadMI = prior(InsertLoc);
VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
// Any stores to this stack slot are not dead anymore.
- MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
+ MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
++NumLoads;
}
Spills.ClobberPhysReg(NewPhysReg);
@@ -919,10 +916,10 @@ unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass *RC,
Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
DEBUG(dbgs() << '\t' << *prior(InsertLoc));
-
+
DEBUG(dbgs() << "Reuse undone!\n");
--NumReused;
-
+
// Finally, PhysReg is now available, go ahead and use it.
return PhysReg;
}
@@ -987,10 +984,17 @@ static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
SmallVector<unsigned, 4> Kills;
// Take a look at 2 instructions at most.
- for (unsigned Count = 0; Count < 2; ++Count) {
+ unsigned Count = 0;
+ while (Count < 2) {
if (MII == MBB.begin())
break;
MachineInstr *PrevMI = prior(MII);
+ MII = PrevMI;
+
+ if (PrevMI->isDebugValue())
+ continue; // Skip over dbg_value instructions.
+ ++Count;
+
for (unsigned i = 0, e = PrevMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = PrevMI->getOperand(i);
if (!MO.isReg() || MO.getReg() == 0)
@@ -1010,7 +1014,7 @@ static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
unsigned Kill = Kills[i];
if (!Defs[Kill] && !Uses[Kill] &&
- TRI->getPhysicalRegisterRegClass(Kill) == RC)
+ RC->contains(Kill))
return Kill;
}
for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
@@ -1019,8 +1023,6 @@ static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
Uses.set(*AS);
}
-
- MII = PrevMI;
}
return 0;
@@ -1037,1410 +1039,1506 @@ void AssignPhysToVirtReg(MachineInstr *MI, unsigned VirtReg, unsigned PhysReg,
}
namespace {
- struct RefSorter {
- bool operator()(const std::pair<MachineInstr*, int> &A,
- const std::pair<MachineInstr*, int> &B) {
- return A.second < B.second;
- }
- };
-}
+
+struct RefSorter {
+ bool operator()(const std::pair<MachineInstr*, int> &A,
+ const std::pair<MachineInstr*, int> &B) {
+ return A.second < B.second;
+ }
+};
// ***************************** //
// Local Spiller Implementation //
// ***************************** //
-namespace {
-
class LocalRewriter : public VirtRegRewriter {
- MachineRegisterInfo *RegInfo;
+ MachineRegisterInfo *MRI;
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
+ VirtRegMap *VRM;
BitVector AllocatableRegs;
DenseMap<MachineInstr*, unsigned> DistanceMap;
-public:
+ DenseMap<int, SmallVector<MachineInstr*,4> > Slot2DbgValues;
- bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
- LiveIntervals* LIs) {
- RegInfo = &MF.getRegInfo();
- TRI = MF.getTarget().getRegisterInfo();
- TII = MF.getTarget().getInstrInfo();
- AllocatableRegs = TRI->getAllocatableSet(MF);
- DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
- << MF.getFunction()->getName() << "':\n");
- DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
- " reloads!) ****\n");
- DEBUG(MF.dump());
-
- // Spills - Keep track of which spilled values are available in physregs
- // so that we can choose to reuse the physregs instead of emitting
- // reloads. This is usually refreshed per basic block.
- AvailableSpills Spills(TRI, TII);
-
- // Keep track of kill information.
- BitVector RegKills(TRI->getNumRegs());
- std::vector<MachineOperand*> KillOps;
- KillOps.resize(TRI->getNumRegs(), NULL);
-
- // SingleEntrySuccs - Successor blocks which have a single predecessor.
- SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
- SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
-
- // Traverse the basic blocks depth first.
- MachineBasicBlock *Entry = MF.begin();
- SmallPtrSet<MachineBasicBlock*,16> Visited;
- for (df_ext_iterator<MachineBasicBlock*,
- SmallPtrSet<MachineBasicBlock*,16> >
- DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
- DFI != E; ++DFI) {
- MachineBasicBlock *MBB = *DFI;
- if (!EarlyVisited.count(MBB))
- RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
-
- // If this MBB is the only predecessor of a successor. Keep the
- // availability information and visit it next.
- do {
- // Keep visiting single predecessor successor as long as possible.
- SinglePredSuccs.clear();
- findSinglePredSuccessor(MBB, SinglePredSuccs);
- if (SinglePredSuccs.empty())
- MBB = 0;
- else {
- // FIXME: More than one successors, each of which has MBB has
- // the only predecessor.
- MBB = SinglePredSuccs[0];
- if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
- Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
- RewriteMBB(*MBB, VRM, LIs, Spills, RegKills, KillOps);
- }
- }
- } while (MBB);
-
- // Clear the availability info.
- Spills.clear();
- }
+ MachineBasicBlock *MBB; // Basic block currently being processed.
- DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
- DEBUG(MF.dump());
-
- // Mark unused spill slots.
- MachineFrameInfo *MFI = MF.getFrameInfo();
- int SS = VRM.getLowSpillSlot();
- if (SS != VirtRegMap::NO_STACK_SLOT)
- for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
- if (!VRM.isSpillSlotUsed(SS)) {
- MFI->RemoveStackObject(SS);
- ++NumDSS;
- }
+public:
- return true;
- }
+ bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM,
+ LiveIntervals* LIs);
private:
- /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
- /// a scratch register is available.
- /// xorq %r12<kill>, %r13
- /// addq %rax, -184(%rbp)
- /// addq %r13, -184(%rbp)
- /// ==>
- /// xorq %r12<kill>, %r13
- /// movq -184(%rbp), %r12
- /// addq %rax, %r12
- /// addq %r13, %r12
- /// movq %r12, -184(%rbp)
bool OptimizeByUnfold2(unsigned VirtReg, int SS,
- MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
AvailableSpills &Spills,
BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- VirtRegMap &VRM) {
+ std::vector<MachineOperand*> &KillOps);
- MachineBasicBlock::iterator NextMII = llvm::next(MII);
- if (NextMII == MBB.end())
- return false;
+ bool OptimizeByUnfold(MachineBasicBlock::iterator &MII,
+ std::vector<MachineInstr*> &MaybeDeadStores,
+ AvailableSpills &Spills,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps);
- if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
- return false;
+ bool CommuteToFoldReload(MachineBasicBlock::iterator &MII,
+ unsigned VirtReg, unsigned SrcReg, int SS,
+ AvailableSpills &Spills,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps,
+ const TargetRegisterInfo *TRI);
- // Now let's see if the last couple of instructions happens to have freed up
- // a register.
- const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- unsigned PhysReg = FindFreeRegister(MII, MBB, RC, TRI, AllocatableRegs);
- if (!PhysReg)
- return false;
+ void SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
+ int Idx, unsigned PhysReg, int StackSlot,
+ const TargetRegisterClass *RC,
+ bool isAvailable, MachineInstr *&LastStore,
+ AvailableSpills &Spills,
+ SmallSet<MachineInstr*, 4> &ReMatDefs,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps);
- MachineFunction &MF = *MBB.getParent();
- TRI = MF.getTarget().getRegisterInfo();
- MachineInstr &MI = *MII;
- if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, VRM))
- return false;
+ void TransferDeadness(unsigned Reg, BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps);
- // If the next instruction also folds the same SS modref and can be unfoled,
- // then it's worthwhile to issue a load from SS into the free register and
- // then unfold these instructions.
- if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM))
- return false;
+ bool InsertEmergencySpills(MachineInstr *MI);
- // Back-schedule reloads and remats.
- ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, false, SS, TII, MF);
+ bool InsertRestores(MachineInstr *MI,
+ AvailableSpills &Spills,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps);
- // Load from SS to the spare physical register.
- TII->loadRegFromStackSlot(MBB, MII, PhysReg, SS, RC);
- // This invalidates Phys.
- Spills.ClobberPhysReg(PhysReg);
- // Remember it's available.
- Spills.addAvailable(SS, PhysReg);
- MaybeDeadStores[SS] = NULL;
+ bool InsertSpills(MachineInstr *MI);
- // Unfold current MI.
- SmallVector<MachineInstr*, 4> NewMIs;
- if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
+ void RewriteMBB(LiveIntervals *LIs,
+ AvailableSpills &Spills, BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps);
+};
+}
+
+bool LocalRewriter::runOnMachineFunction(MachineFunction &MF, VirtRegMap &vrm,
+ LiveIntervals* LIs) {
+ MRI = &MF.getRegInfo();
+ TRI = MF.getTarget().getRegisterInfo();
+ TII = MF.getTarget().getInstrInfo();
+ VRM = &vrm;
+ AllocatableRegs = TRI->getAllocatableSet(MF);
+ DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
+ << MF.getFunction()->getName() << "':\n");
+ DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
+ " reloads!) ****\n");
+ DEBUG(MF.dump());
+
+ // Spills - Keep track of which spilled values are available in physregs
+ // so that we can choose to reuse the physregs instead of emitting
+ // reloads. This is usually refreshed per basic block.
+ AvailableSpills Spills(TRI, TII);
+
+ // Keep track of kill information.
+ BitVector RegKills(TRI->getNumRegs());
+ std::vector<MachineOperand*> KillOps;
+ KillOps.resize(TRI->getNumRegs(), NULL);
+
+ // SingleEntrySuccs - Successor blocks which have a single predecessor.
+ SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
+ SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
+
+ // Traverse the basic blocks depth first.
+ MachineBasicBlock *Entry = MF.begin();
+ SmallPtrSet<MachineBasicBlock*,16> Visited;
+ for (df_ext_iterator<MachineBasicBlock*,
+ SmallPtrSet<MachineBasicBlock*,16> >
+ DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
+ DFI != E; ++DFI) {
+ MBB = *DFI;
+ if (!EarlyVisited.count(MBB))
+ RewriteMBB(LIs, Spills, RegKills, KillOps);
+
+ // If this MBB is the only predecessor of a successor. Keep the
+ // availability information and visit it next.
+ do {
+ // Keep visiting single predecessor successor as long as possible.
+ SinglePredSuccs.clear();
+ findSinglePredSuccessor(MBB, SinglePredSuccs);
+ if (SinglePredSuccs.empty())
+ MBB = 0;
+ else {
+ // FIXME: More than one successors, each of which has MBB has
+ // the only predecessor.
+ MBB = SinglePredSuccs[0];
+ if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
+ Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
+ RewriteMBB(LIs, Spills, RegKills, KillOps);
+ }
+ }
+ } while (MBB);
+
+ // Clear the availability info.
+ Spills.clear();
+ }
+
+ DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
+ DEBUG(MF.dump());
+
+ // Mark unused spill slots.
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ int SS = VRM->getLowSpillSlot();
+ if (SS != VirtRegMap::NO_STACK_SLOT) {
+ for (int e = VRM->getHighSpillSlot(); SS <= e; ++SS) {
+ SmallVector<MachineInstr*, 4> &DbgValues = Slot2DbgValues[SS];
+ if (!VRM->isSpillSlotUsed(SS)) {
+ MFI->RemoveStackObject(SS);
+ for (unsigned j = 0, ee = DbgValues.size(); j != ee; ++j) {
+ MachineInstr *DVMI = DbgValues[j];
+ MachineBasicBlock *DVMBB = DVMI->getParent();
+ DEBUG(dbgs() << "Removing debug info referencing FI#" << SS << '\n');
+ VRM->RemoveMachineInstrFromMaps(DVMI);
+ DVMBB->erase(DVMI);
+ }
+ ++NumDSS;
+ }
+ DbgValues.clear();
+ }
+ }
+ Slot2DbgValues.clear();
+
+ return true;
+}
+
+/// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
+/// a scratch register is available.
+/// xorq %r12<kill>, %r13
+/// addq %rax, -184(%rbp)
+/// addq %r13, -184(%rbp)
+/// ==>
+/// xorq %r12<kill>, %r13
+/// movq -184(%rbp), %r12
+/// addq %rax, %r12
+/// addq %r13, %r12
+/// movq %r12, -184(%rbp)
+bool LocalRewriter::
+OptimizeByUnfold2(unsigned VirtReg, int SS,
+ MachineBasicBlock::iterator &MII,
+ std::vector<MachineInstr*> &MaybeDeadStores,
+ AvailableSpills &Spills,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps) {
+
+ MachineBasicBlock::iterator NextMII = llvm::next(MII);
+ // Skip over dbg_value instructions.
+ while (NextMII != MBB->end() && NextMII->isDebugValue())
+ NextMII = llvm::next(NextMII);
+ if (NextMII == MBB->end())
+ return false;
+
+ if (TII->getOpcodeAfterMemoryUnfold(MII->getOpcode(), true, true) == 0)
+ return false;
+
+ // Now let's see if the last couple of instructions happens to have freed up
+ // a register.
+ const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
+ unsigned PhysReg = FindFreeRegister(MII, *MBB, RC, TRI, AllocatableRegs);
+ if (!PhysReg)
+ return false;
+
+ MachineFunction &MF = *MBB->getParent();
+ TRI = MF.getTarget().getRegisterInfo();
+ MachineInstr &MI = *MII;
+ if (!FoldsStackSlotModRef(MI, SS, PhysReg, TII, TRI, *VRM))
+ return false;
+
+ // If the next instruction also folds the same SS modref and can be unfoled,
+ // then it's worthwhile to issue a load from SS into the free register and
+ // then unfold these instructions.
+ if (!FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM))
+ return false;
+
+ // Back-schedule reloads and remats.
+ ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, false, SS, TII, MF);
+
+ // Load from SS to the spare physical register.
+ TII->loadRegFromStackSlot(*MBB, MII, PhysReg, SS, RC, TRI);
+ // This invalidates Phys.
+ Spills.ClobberPhysReg(PhysReg);
+ // Remember it's available.
+ Spills.addAvailable(SS, PhysReg);
+ MaybeDeadStores[SS] = NULL;
+
+ // Unfold current MI.
+ SmallVector<MachineInstr*, 4> NewMIs;
+ if (!TII->unfoldMemoryOperand(MF, &MI, VirtReg, false, false, NewMIs))
+ llvm_unreachable("Unable unfold the load / store folding instruction!");
+ assert(NewMIs.size() == 1);
+ AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
+ VRM->transferRestorePts(&MI, NewMIs[0]);
+ MII = MBB->insert(MII, NewMIs[0]);
+ InvalidateKills(MI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
+ ++NumModRefUnfold;
+
+ // Unfold next instructions that fold the same SS.
+ do {
+ MachineInstr &NextMI = *NextMII;
+ NextMII = llvm::next(NextMII);
+ NewMIs.clear();
+ if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
llvm_unreachable("Unable unfold the load / store folding instruction!");
assert(NewMIs.size() == 1);
AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
- VRM.transferRestorePts(&MI, NewMIs[0]);
- MII = MBB.insert(MII, NewMIs[0]);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
+ VRM->transferRestorePts(&NextMI, NewMIs[0]);
+ MBB->insert(NextMII, NewMIs[0]);
+ InvalidateKills(NextMI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(&NextMI);
+ MBB->erase(&NextMI);
++NumModRefUnfold;
-
- // Unfold next instructions that fold the same SS.
- do {
- MachineInstr &NextMI = *NextMII;
+ // Skip over dbg_value instructions.
+ while (NextMII != MBB->end() && NextMII->isDebugValue())
NextMII = llvm::next(NextMII);
- NewMIs.clear();
- if (!TII->unfoldMemoryOperand(MF, &NextMI, VirtReg, false, false, NewMIs))
- llvm_unreachable("Unable unfold the load / store folding instruction!");
- assert(NewMIs.size() == 1);
- AssignPhysToVirtReg(NewMIs[0], VirtReg, PhysReg, *TRI);
- VRM.transferRestorePts(&NextMI, NewMIs[0]);
- MBB.insert(NextMII, NewMIs[0]);
- InvalidateKills(NextMI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(&NextMI);
- MBB.erase(&NextMI);
- ++NumModRefUnfold;
- if (NextMII == MBB.end())
- break;
- } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, VRM));
+ if (NextMII == MBB->end())
+ break;
+ } while (FoldsStackSlotModRef(*NextMII, SS, PhysReg, TII, TRI, *VRM));
- // Store the value back into SS.
- TII->storeRegToStackSlot(MBB, NextMII, PhysReg, true, SS, RC);
- MachineInstr *StoreMI = prior(NextMII);
- VRM.addSpillSlotUse(SS, StoreMI);
- VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
+ // Store the value back into SS.
+ TII->storeRegToStackSlot(*MBB, NextMII, PhysReg, true, SS, RC, TRI);
+ MachineInstr *StoreMI = prior(NextMII);
+ VRM->addSpillSlotUse(SS, StoreMI);
+ VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
- return true;
- }
+ return true;
+}
- /// OptimizeByUnfold - Turn a store folding instruction into a load folding
- /// instruction. e.g.
- /// xorl %edi, %eax
- /// movl %eax, -32(%ebp)
- /// movl -36(%ebp), %eax
- /// orl %eax, -32(%ebp)
- /// ==>
- /// xorl %edi, %eax
- /// orl -36(%ebp), %eax
- /// mov %eax, -32(%ebp)
- /// This enables unfolding optimization for a subsequent instruction which will
- /// also eliminate the newly introduced store instruction.
- bool OptimizeByUnfold(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MII,
- std::vector<MachineInstr*> &MaybeDeadStores,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- VirtRegMap &VRM) {
- MachineFunction &MF = *MBB.getParent();
- MachineInstr &MI = *MII;
- unsigned UnfoldedOpc = 0;
- unsigned UnfoldPR = 0;
- unsigned UnfoldVR = 0;
- int FoldedSS = VirtRegMap::NO_STACK_SLOT;
- VirtRegMap::MI2VirtMapTy::const_iterator I, End;
- for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
- // Only transform a MI that folds a single register.
- if (UnfoldedOpc)
- return false;
- UnfoldVR = I->second.first;
- VirtRegMap::ModRef MR = I->second.second;
- // MI2VirtMap be can updated which invalidate the iterator.
- // Increment the iterator first.
- ++I;
- if (VRM.isAssignedReg(UnfoldVR))
+/// OptimizeByUnfold - Turn a store folding instruction into a load folding
+/// instruction. e.g.
+/// xorl %edi, %eax
+/// movl %eax, -32(%ebp)
+/// movl -36(%ebp), %eax
+/// orl %eax, -32(%ebp)
+/// ==>
+/// xorl %edi, %eax
+/// orl -36(%ebp), %eax
+/// mov %eax, -32(%ebp)
+/// This enables unfolding optimization for a subsequent instruction which will
+/// also eliminate the newly introduced store instruction.
+bool LocalRewriter::
+OptimizeByUnfold(MachineBasicBlock::iterator &MII,
+ std::vector<MachineInstr*> &MaybeDeadStores,
+ AvailableSpills &Spills,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps) {
+ MachineFunction &MF = *MBB->getParent();
+ MachineInstr &MI = *MII;
+ unsigned UnfoldedOpc = 0;
+ unsigned UnfoldPR = 0;
+ unsigned UnfoldVR = 0;
+ int FoldedSS = VirtRegMap::NO_STACK_SLOT;
+ VirtRegMap::MI2VirtMapTy::const_iterator I, End;
+ for (tie(I, End) = VRM->getFoldedVirts(&MI); I != End; ) {
+ // Only transform a MI that folds a single register.
+ if (UnfoldedOpc)
+ return false;
+ UnfoldVR = I->second.first;
+ VirtRegMap::ModRef MR = I->second.second;
+ // MI2VirtMap be can updated which invalidate the iterator.
+ // Increment the iterator first.
+ ++I;
+ if (VRM->isAssignedReg(UnfoldVR))
+ continue;
+ // If this reference is not a use, any previous store is now dead.
+ // Otherwise, the store to this stack slot is not dead anymore.
+ FoldedSS = VRM->getStackSlot(UnfoldVR);
+ MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
+ if (DeadStore && (MR & VirtRegMap::isModRef)) {
+ unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
+ if (!PhysReg || !DeadStore->readsRegister(PhysReg))
continue;
- // If this reference is not a use, any previous store is now dead.
- // Otherwise, the store to this stack slot is not dead anymore.
- FoldedSS = VRM.getStackSlot(UnfoldVR);
- MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
- if (DeadStore && (MR & VirtRegMap::isModRef)) {
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
- if (!PhysReg || !DeadStore->readsRegister(PhysReg))
- continue;
- UnfoldPR = PhysReg;
- UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
- false, true);
- }
+ UnfoldPR = PhysReg;
+ UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
+ false, true);
}
+ }
- if (!UnfoldedOpc) {
- if (!UnfoldVR)
- return false;
+ if (!UnfoldedOpc) {
+ if (!UnfoldVR)
+ return false;
- // Look for other unfolding opportunities.
- return OptimizeByUnfold2(UnfoldVR, FoldedSS, MBB, MII,
- MaybeDeadStores, Spills, RegKills, KillOps, VRM);
- }
+ // Look for other unfolding opportunities.
+ return OptimizeByUnfold2(UnfoldVR, FoldedSS, MII, MaybeDeadStores, Spills,
+ RegKills, KillOps);
+ }
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
- continue;
- unsigned VirtReg = MO.getReg();
- if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
- continue;
- if (VRM.isAssignedReg(VirtReg)) {
- unsigned PhysReg = VRM.getPhys(VirtReg);
- if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
- return false;
- } else if (VRM.isReMaterialized(VirtReg))
- continue;
- int SS = VRM.getStackSlot(VirtReg);
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
- if (PhysReg) {
- if (TRI->regsOverlap(PhysReg, UnfoldPR))
- return false;
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
+ continue;
+ unsigned VirtReg = MO.getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
+ continue;
+ if (VRM->isAssignedReg(VirtReg)) {
+ unsigned PhysReg = VRM->getPhys(VirtReg);
+ if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
+ return false;
+ } else if (VRM->isReMaterialized(VirtReg))
+ continue;
+ int SS = VRM->getStackSlot(VirtReg);
+ unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
+ if (PhysReg) {
+ if (TRI->regsOverlap(PhysReg, UnfoldPR))
+ return false;
+ continue;
+ }
+ if (VRM->hasPhys(VirtReg)) {
+ PhysReg = VRM->getPhys(VirtReg);
+ if (!TRI->regsOverlap(PhysReg, UnfoldPR))
continue;
- }
- if (VRM.hasPhys(VirtReg)) {
- PhysReg = VRM.getPhys(VirtReg);
- if (!TRI->regsOverlap(PhysReg, UnfoldPR))
- continue;
- }
+ }
- // Ok, we'll need to reload the value into a register which makes
- // it impossible to perform the store unfolding optimization later.
- // Let's see if it is possible to fold the load if the store is
- // unfolded. This allows us to perform the store unfolding
- // optimization.
- SmallVector<MachineInstr*, 4> NewMIs;
- if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
- assert(NewMIs.size() == 1);
- MachineInstr *NewMI = NewMIs.back();
- NewMIs.clear();
- int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
- assert(Idx != -1);
- SmallVector<unsigned, 1> Ops;
- Ops.push_back(Idx);
- MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
- if (FoldedMI) {
- VRM.addSpillSlotUse(SS, FoldedMI);
- if (!VRM.hasPhys(UnfoldVR))
- VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
- VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
- MII = MBB.insert(MII, FoldedMI);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
- MF.DeleteMachineInstr(NewMI);
- return true;
- }
- MF.DeleteMachineInstr(NewMI);
+ // Ok, we'll need to reload the value into a register which makes
+ // it impossible to perform the store unfolding optimization later.
+ // Let's see if it is possible to fold the load if the store is
+ // unfolded. This allows us to perform the store unfolding
+ // optimization.
+ SmallVector<MachineInstr*, 4> NewMIs;
+ if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
+ assert(NewMIs.size() == 1);
+ MachineInstr *NewMI = NewMIs.back();
+ MBB->insert(MII, NewMI);
+ NewMIs.clear();
+ int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
+ assert(Idx != -1);
+ SmallVector<unsigned, 1> Ops;
+ Ops.push_back(Idx);
+ MachineInstr *FoldedMI = TII->foldMemoryOperand(NewMI, Ops, SS);
+ NewMI->eraseFromParent();
+ if (FoldedMI) {
+ VRM->addSpillSlotUse(SS, FoldedMI);
+ if (!VRM->hasPhys(UnfoldVR))
+ VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
+ VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
+ MII = FoldedMI;
+ InvalidateKills(MI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
+ return true;
}
}
+ }
+ return false;
+}
+
+/// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
+/// where SrcReg is r1 and it is tied to r0. Return true if after
+/// commuting this instruction it will be r0 = op r2, r1.
+static bool CommuteChangesDestination(MachineInstr *DefMI,
+ const TargetInstrDesc &TID,
+ unsigned SrcReg,
+ const TargetInstrInfo *TII,
+ unsigned &DstIdx) {
+ if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
+ return false;
+ if (!DefMI->getOperand(1).isReg() ||
+ DefMI->getOperand(1).getReg() != SrcReg)
return false;
+ unsigned DefIdx;
+ if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
+ return false;
+ unsigned SrcIdx1, SrcIdx2;
+ if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
+ return false;
+ if (SrcIdx1 == 1 && SrcIdx2 == 2) {
+ DstIdx = 2;
+ return true;
}
+ return false;
+}
+
+/// CommuteToFoldReload -
+/// Look for
+/// r1 = load fi#1
+/// r1 = op r1, r2<kill>
+/// store r1, fi#1
+///
+/// If op is commutable and r2 is killed, then we can xform these to
+/// r2 = op r2, fi#1
+/// store r2, fi#1
+bool LocalRewriter::
+CommuteToFoldReload(MachineBasicBlock::iterator &MII,
+ unsigned VirtReg, unsigned SrcReg, int SS,
+ AvailableSpills &Spills,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps,
+ const TargetRegisterInfo *TRI) {
+ if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
+ return false;
- /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
- /// where SrcReg is r1 and it is tied to r0. Return true if after
- /// commuting this instruction it will be r0 = op r2, r1.
- static bool CommuteChangesDestination(MachineInstr *DefMI,
- const TargetInstrDesc &TID,
- unsigned SrcReg,
- const TargetInstrInfo *TII,
- unsigned &DstIdx) {
- if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
+ MachineInstr &MI = *MII;
+ MachineBasicBlock::iterator DefMII = prior(MII);
+ MachineInstr *DefMI = DefMII;
+ const TargetInstrDesc &TID = DefMI->getDesc();
+ unsigned NewDstIdx;
+ if (DefMII != MBB->begin() &&
+ TID.isCommutable() &&
+ CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
+ MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
+ unsigned NewReg = NewDstMO.getReg();
+ if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
+ return false;
+ MachineInstr *ReloadMI = prior(DefMII);
+ int FrameIdx;
+ unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
+ if (DestReg != SrcReg || FrameIdx != SS)
return false;
- if (!DefMI->getOperand(1).isReg() ||
- DefMI->getOperand(1).getReg() != SrcReg)
+ int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
+ if (UseIdx == -1)
return false;
unsigned DefIdx;
- if (!DefMI->isRegTiedToDefOperand(1, &DefIdx) || DefIdx != 0)
- return false;
- unsigned SrcIdx1, SrcIdx2;
- if (!TII->findCommutedOpIndices(DefMI, SrcIdx1, SrcIdx2))
+ if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
return false;
- if (SrcIdx1 == 1 && SrcIdx2 == 2) {
- DstIdx = 2;
- return true;
- }
- return false;
- }
+ assert(DefMI->getOperand(DefIdx).isReg() &&
+ DefMI->getOperand(DefIdx).getReg() == SrcReg);
- /// CommuteToFoldReload -
- /// Look for
- /// r1 = load fi#1
- /// r1 = op r1, r2<kill>
- /// store r1, fi#1
- ///
- /// If op is commutable and r2 is killed, then we can xform these to
- /// r2 = op r2, fi#1
- /// store r2, fi#1
- bool CommuteToFoldReload(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MII,
- unsigned VirtReg, unsigned SrcReg, int SS,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- const TargetRegisterInfo *TRI,
- VirtRegMap &VRM) {
- if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
+ // Now commute def instruction.
+ MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
+ if (!CommutedMI)
+ return false;
+ MBB->insert(MII, CommutedMI);
+ SmallVector<unsigned, 1> Ops;
+ Ops.push_back(NewDstIdx);
+ MachineInstr *FoldedMI = TII->foldMemoryOperand(CommutedMI, Ops, SS);
+ // Not needed since foldMemoryOperand returns new MI.
+ CommutedMI->eraseFromParent();
+ if (!FoldedMI)
return false;
- MachineFunction &MF = *MBB.getParent();
- MachineInstr &MI = *MII;
- MachineBasicBlock::iterator DefMII = prior(MII);
- MachineInstr *DefMI = DefMII;
- const TargetInstrDesc &TID = DefMI->getDesc();
- unsigned NewDstIdx;
- if (DefMII != MBB.begin() &&
- TID.isCommutable() &&
- CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
- MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
- unsigned NewReg = NewDstMO.getReg();
- if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
- return false;
- MachineInstr *ReloadMI = prior(DefMII);
- int FrameIdx;
- unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
- if (DestReg != SrcReg || FrameIdx != SS)
- return false;
- int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
- if (UseIdx == -1)
- return false;
- unsigned DefIdx;
- if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
- return false;
- assert(DefMI->getOperand(DefIdx).isReg() &&
- DefMI->getOperand(DefIdx).getReg() == SrcReg);
+ VRM->addSpillSlotUse(SS, FoldedMI);
+ VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
+ // Insert new def MI and spill MI.
+ const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
+ TII->storeRegToStackSlot(*MBB, &MI, NewReg, true, SS, RC, TRI);
+ MII = prior(MII);
+ MachineInstr *StoreMI = MII;
+ VRM->addSpillSlotUse(SS, StoreMI);
+ VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
+ MII = FoldedMI; // Update MII to backtrack.
+
+ // Delete all 3 old instructions.
+ InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(ReloadMI);
+ MBB->erase(ReloadMI);
+ InvalidateKills(*DefMI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(DefMI);
+ MBB->erase(DefMI);
+ InvalidateKills(MI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
- // Now commute def instruction.
- MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
- if (!CommutedMI)
- return false;
- SmallVector<unsigned, 1> Ops;
- Ops.push_back(NewDstIdx);
- MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
- // Not needed since foldMemoryOperand returns new MI.
- MF.DeleteMachineInstr(CommutedMI);
- if (!FoldedMI)
- return false;
+ // If NewReg was previously holding value of some SS, it's now clobbered.
+ // This has to be done now because it's a physical register. When this
+ // instruction is re-visited, it's ignored.
+ Spills.ClobberPhysReg(NewReg);
- VRM.addSpillSlotUse(SS, FoldedMI);
- VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
- // Insert new def MI and spill MI.
- const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
- MII = prior(MII);
- MachineInstr *StoreMI = MII;
- VRM.addSpillSlotUse(SS, StoreMI);
- VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
- MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack.
-
- // Delete all 3 old instructions.
- InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(ReloadMI);
- MBB.erase(ReloadMI);
- InvalidateKills(*DefMI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(DefMI);
- MBB.erase(DefMI);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
+ ++NumCommutes;
+ return true;
+ }
- // If NewReg was previously holding value of some SS, it's now clobbered.
- // This has to be done now because it's a physical register. When this
- // instruction is re-visited, it's ignored.
- Spills.ClobberPhysReg(NewReg);
+ return false;
+}
- ++NumCommutes;
- return true;
+/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
+/// the last store to the same slot is now dead. If so, remove the last store.
+void LocalRewriter::
+SpillRegToStackSlot(MachineBasicBlock::iterator &MII,
+ int Idx, unsigned PhysReg, int StackSlot,
+ const TargetRegisterClass *RC,
+ bool isAvailable, MachineInstr *&LastStore,
+ AvailableSpills &Spills,
+ SmallSet<MachineInstr*, 4> &ReMatDefs,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps) {
+
+ MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
+ TII->storeRegToStackSlot(*MBB, llvm::next(MII), PhysReg, true, StackSlot, RC,
+ TRI);
+ MachineInstr *StoreMI = prior(oldNextMII);
+ VRM->addSpillSlotUse(StackSlot, StoreMI);
+ DEBUG(dbgs() << "Store:\t" << *StoreMI);
+
+ // If there is a dead store to this stack slot, nuke it now.
+ if (LastStore) {
+ DEBUG(dbgs() << "Removed dead store:\t" << *LastStore);
+ ++NumDSE;
+ SmallVector<unsigned, 2> KillRegs;
+ InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
+ MachineBasicBlock::iterator PrevMII = LastStore;
+ bool CheckDef = PrevMII != MBB->begin();
+ if (CheckDef)
+ --PrevMII;
+ VRM->RemoveMachineInstrFromMaps(LastStore);
+ MBB->erase(LastStore);
+ if (CheckDef) {
+ // Look at defs of killed registers on the store. Mark the defs
+ // as dead since the store has been deleted and they aren't
+ // being reused.
+ for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
+ bool HasOtherDef = false;
+ if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef, TRI)) {
+ MachineInstr *DeadDef = PrevMII;
+ if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
+ // FIXME: This assumes a remat def does not have side effects.
+ VRM->RemoveMachineInstrFromMaps(DeadDef);
+ MBB->erase(DeadDef);
+ ++NumDRM;
+ }
+ }
+ }
}
+ }
+ // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
+ // the last of multiple instructions is the actual store.
+ LastStore = prior(oldNextMII);
+
+ // If the stack slot value was previously available in some other
+ // register, change it now. Otherwise, make the register available,
+ // in PhysReg.
+ Spills.ModifyStackSlotOrReMat(StackSlot);
+ Spills.ClobberPhysReg(PhysReg);
+ Spills.addAvailable(StackSlot, PhysReg, isAvailable);
+ ++NumStores;
+}
+
+/// isSafeToDelete - Return true if this instruction doesn't produce any side
+/// effect and all of its defs are dead.
+static bool isSafeToDelete(MachineInstr &MI) {
+ const TargetInstrDesc &TID = MI.getDesc();
+ if (TID.mayLoad() || TID.mayStore() || TID.isCall() || TID.isTerminator() ||
+ TID.isCall() || TID.isBarrier() || TID.isReturn() ||
+ TID.hasUnmodeledSideEffects())
return false;
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ if (!MO.isReg() || !MO.getReg())
+ continue;
+ if (MO.isDef() && !MO.isDead())
+ return false;
+ if (MO.isUse() && MO.isKill())
+ // FIXME: We can't remove kill markers or else the scavenger will assert.
+ // An alternative is to add a ADD pseudo instruction to replace kill
+ // markers.
+ return false;
}
+ return true;
+}
- /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
- /// the last store to the same slot is now dead. If so, remove the last store.
- void SpillRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MII,
- int Idx, unsigned PhysReg, int StackSlot,
- const TargetRegisterClass *RC,
- bool isAvailable, MachineInstr *&LastStore,
- AvailableSpills &Spills,
- SmallSet<MachineInstr*, 4> &ReMatDefs,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- VirtRegMap &VRM) {
+/// TransferDeadness - A identity copy definition is dead and it's being
+/// removed. Find the last def or use and mark it as dead / kill.
+void LocalRewriter::
+TransferDeadness(unsigned Reg, BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps) {
+ SmallPtrSet<MachineInstr*, 4> Seens;
+ SmallVector<std::pair<MachineInstr*, int>,8> Refs;
+ for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(Reg),
+ RE = MRI->reg_end(); RI != RE; ++RI) {
+ MachineInstr *UDMI = &*RI;
+ if (UDMI->isDebugValue() || UDMI->getParent() != MBB)
+ continue;
+ DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
+ if (DI == DistanceMap.end())
+ continue;
+ if (Seens.insert(UDMI))
+ Refs.push_back(std::make_pair(UDMI, DI->second));
+ }
- MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
- TII->storeRegToStackSlot(MBB, llvm::next(MII), PhysReg, true, StackSlot, RC);
- MachineInstr *StoreMI = prior(oldNextMII);
- VRM.addSpillSlotUse(StackSlot, StoreMI);
- DEBUG(dbgs() << "Store:\t" << *StoreMI);
+ if (Refs.empty())
+ return;
+ std::sort(Refs.begin(), Refs.end(), RefSorter());
- // If there is a dead store to this stack slot, nuke it now.
- if (LastStore) {
- DEBUG(dbgs() << "Removed dead store:\t" << *LastStore);
- ++NumDSE;
- SmallVector<unsigned, 2> KillRegs;
- InvalidateKills(*LastStore, TRI, RegKills, KillOps, &KillRegs);
- MachineBasicBlock::iterator PrevMII = LastStore;
- bool CheckDef = PrevMII != MBB.begin();
- if (CheckDef)
- --PrevMII;
- VRM.RemoveMachineInstrFromMaps(LastStore);
- MBB.erase(LastStore);
- if (CheckDef) {
- // Look at defs of killed registers on the store. Mark the defs
- // as dead since the store has been deleted and they aren't
- // being reused.
- for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
- bool HasOtherDef = false;
- if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef, TRI)) {
- MachineInstr *DeadDef = PrevMII;
- if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
- // FIXME: This assumes a remat def does not have side effects.
- VRM.RemoveMachineInstrFromMaps(DeadDef);
- MBB.erase(DeadDef);
- ++NumDRM;
- }
- }
- }
+ while (!Refs.empty()) {
+ MachineInstr *LastUDMI = Refs.back().first;
+ Refs.pop_back();
+
+ MachineOperand *LastUD = NULL;
+ for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = LastUDMI->getOperand(i);
+ if (!MO.isReg() || MO.getReg() != Reg)
+ continue;
+ if (!LastUD || (LastUD->isUse() && MO.isDef()))
+ LastUD = &MO;
+ if (LastUDMI->isRegTiedToDefOperand(i))
+ break;
+ }
+ if (LastUD->isDef()) {
+ // If the instruction has no side effect, delete it and propagate
+ // backward further. Otherwise, mark is dead and we are done.
+ if (!isSafeToDelete(*LastUDMI)) {
+ LastUD->setIsDead();
+ break;
}
+ VRM->RemoveMachineInstrFromMaps(LastUDMI);
+ MBB->erase(LastUDMI);
+ } else {
+ LastUD->setIsKill();
+ RegKills.set(Reg);
+ KillOps[Reg] = LastUD;
+ break;
}
+ }
+}
+
+/// InsertEmergencySpills - Insert emergency spills before MI if requested by
+/// VRM. Return true if spills were inserted.
+bool LocalRewriter::InsertEmergencySpills(MachineInstr *MI) {
+ if (!VRM->hasEmergencySpills(MI))
+ return false;
+ MachineBasicBlock::iterator MII = MI;
+ SmallSet<int, 4> UsedSS;
+ std::vector<unsigned> &EmSpills = VRM->getEmergencySpills(MI);
+ for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
+ unsigned PhysReg = EmSpills[i];
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysReg);
+ assert(RC && "Unable to determine register class!");
+ int SS = VRM->getEmergencySpillSlot(RC);
+ if (UsedSS.count(SS))
+ llvm_unreachable("Need to spill more than one physical registers!");
+ UsedSS.insert(SS);
+ TII->storeRegToStackSlot(*MBB, MII, PhysReg, true, SS, RC, TRI);
+ MachineInstr *StoreMI = prior(MII);
+ VRM->addSpillSlotUse(SS, StoreMI);
+
+ // Back-schedule reloads and remats.
+ MachineBasicBlock::iterator InsertLoc =
+ ComputeReloadLoc(llvm::next(MII), MBB->begin(), PhysReg, TRI, false, SS,
+ TII, *MBB->getParent());
- // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
- // the last of multiple instructions is the actual store.
- LastStore = prior(oldNextMII);
-
- // If the stack slot value was previously available in some other
- // register, change it now. Otherwise, make the register available,
- // in PhysReg.
- Spills.ModifyStackSlotOrReMat(StackSlot);
- Spills.ClobberPhysReg(PhysReg);
- Spills.addAvailable(StackSlot, PhysReg, isAvailable);
- ++NumStores;
+ TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SS, RC, TRI);
+
+ MachineInstr *LoadMI = prior(InsertLoc);
+ VRM->addSpillSlotUse(SS, LoadMI);
+ ++NumPSpills;
+ DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
}
+ return true;
+}
- /// isSafeToDelete - Return true if this instruction doesn't produce any side
- /// effect and all of its defs are dead.
- static bool isSafeToDelete(MachineInstr &MI) {
- const TargetInstrDesc &TID = MI.getDesc();
- if (TID.mayLoad() || TID.mayStore() || TID.isCall() || TID.isTerminator() ||
- TID.isCall() || TID.isBarrier() || TID.isReturn() ||
- TID.hasUnmodeledSideEffects())
- return false;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || !MO.getReg())
- continue;
- if (MO.isDef() && !MO.isDead())
- return false;
- if (MO.isUse() && MO.isKill())
- // FIXME: We can't remove kill markers or else the scavenger will assert.
- // An alternative is to add a ADD pseudo instruction to replace kill
- // markers.
- return false;
+/// InsertRestores - Restore registers before MI is requested by VRM. Return
+/// true is any instructions were inserted.
+bool LocalRewriter::InsertRestores(MachineInstr *MI,
+ AvailableSpills &Spills,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps) {
+ if (!VRM->isRestorePt(MI))
+ return false;
+ MachineBasicBlock::iterator MII = MI;
+ std::vector<unsigned> &RestoreRegs = VRM->getRestorePtRestores(MI);
+ for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
+ unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
+ if (!VRM->getPreSplitReg(VirtReg))
+ continue; // Split interval spilled again.
+ unsigned Phys = VRM->getPhys(VirtReg);
+ MRI->setPhysRegUsed(Phys);
+
+ // Check if the value being restored if available. If so, it must be
+ // from a predecessor BB that fallthrough into this BB. We do not
+ // expect:
+ // BB1:
+ // r1 = load fi#1
+ // ...
+ // = r1<kill>
+ // ... # r1 not clobbered
+ // ...
+ // = load fi#1
+ bool DoReMat = VRM->isReMaterialized(VirtReg);
+ int SSorRMId = DoReMat
+ ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
+ unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
+ if (InReg == Phys) {
+ // If the value is already available in the expected register, save
+ // a reload / remat.
+ if (SSorRMId)
+ DEBUG(dbgs() << "Reusing RM#"
+ << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
+ else
+ DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
+ DEBUG(dbgs() << " from physreg "
+ << TRI->getName(InReg) << " for vreg"
+ << VirtReg <<" instead of reloading into physreg "
+ << TRI->getName(Phys) << '\n');
+ ++NumOmitted;
+ continue;
+ } else if (InReg && InReg != Phys) {
+ if (SSorRMId)
+ DEBUG(dbgs() << "Reusing RM#"
+ << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
+ else
+ DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
+ DEBUG(dbgs() << " from physreg "
+ << TRI->getName(InReg) << " for vreg"
+ << VirtReg <<" by copying it into physreg "
+ << TRI->getName(Phys) << '\n');
+
+ // If the reloaded / remat value is available in another register,
+ // copy it to the desired register.
+
+ // Back-schedule reloads and remats.
+ MachineBasicBlock::iterator InsertLoc =
+ ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
+ *MBB->getParent());
+ MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), Phys)
+ .addReg(InReg, RegState::Kill);
+
+ // This invalidates Phys.
+ Spills.ClobberPhysReg(Phys);
+ // Remember it's available.
+ Spills.addAvailable(SSorRMId, Phys);
+
+ CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
+ UpdateKills(*CopyMI, TRI, RegKills, KillOps);
+
+ DEBUG(dbgs() << '\t' << *CopyMI);
+ ++NumCopified;
+ continue;
}
- return true;
- }
- /// TransferDeadness - A identity copy definition is dead and it's being
- /// removed. Find the last def or use and mark it as dead / kill.
- void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
- unsigned Reg, BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- VirtRegMap &VRM) {
- SmallPtrSet<MachineInstr*, 4> Seens;
- SmallVector<std::pair<MachineInstr*, int>,8> Refs;
- for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
- RE = RegInfo->reg_end(); RI != RE; ++RI) {
- MachineInstr *UDMI = &*RI;
- if (UDMI->getParent() != MBB)
- continue;
- DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
- if (DI == DistanceMap.end() || DI->second > CurDist)
- continue;
- if (Seens.insert(UDMI))
- Refs.push_back(std::make_pair(UDMI, DI->second));
+ // Back-schedule reloads and remats.
+ MachineBasicBlock::iterator InsertLoc =
+ ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
+ *MBB->getParent());
+
+ if (VRM->isReMaterialized(VirtReg)) {
+ ReMaterialize(*MBB, InsertLoc, Phys, VirtReg, TII, TRI, *VRM);
+ } else {
+ const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
+ TII->loadRegFromStackSlot(*MBB, InsertLoc, Phys, SSorRMId, RC, TRI);
+ MachineInstr *LoadMI = prior(InsertLoc);
+ VRM->addSpillSlotUse(SSorRMId, LoadMI);
+ ++NumLoads;
+ DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
}
- if (Refs.empty())
- return;
- std::sort(Refs.begin(), Refs.end(), RefSorter());
+ // This invalidates Phys.
+ Spills.ClobberPhysReg(Phys);
+ // Remember it's available.
+ Spills.addAvailable(SSorRMId, Phys);
- while (!Refs.empty()) {
- MachineInstr *LastUDMI = Refs.back().first;
- Refs.pop_back();
+ UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
+ DEBUG(dbgs() << '\t' << *prior(MII));
+ }
+ return true;
+}
- MachineOperand *LastUD = NULL;
- for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = LastUDMI->getOperand(i);
- if (!MO.isReg() || MO.getReg() != Reg)
- continue;
- if (!LastUD || (LastUD->isUse() && MO.isDef()))
- LastUD = &MO;
- if (LastUDMI->isRegTiedToDefOperand(i))
- break;
- }
- if (LastUD->isDef()) {
- // If the instruction has no side effect, delete it and propagate
- // backward further. Otherwise, mark is dead and we are done.
- if (!isSafeToDelete(*LastUDMI)) {
- LastUD->setIsDead();
- break;
- }
- VRM.RemoveMachineInstrFromMaps(LastUDMI);
- MBB->erase(LastUDMI);
- } else {
- LastUD->setIsKill();
- RegKills.set(Reg);
- KillOps[Reg] = LastUD;
- break;
- }
- }
+/// InsertEmergencySpills - Insert spills after MI if requested by VRM. Return
+/// true if spills were inserted.
+bool LocalRewriter::InsertSpills(MachineInstr *MI) {
+ if (!VRM->isSpillPt(MI))
+ return false;
+ MachineBasicBlock::iterator MII = MI;
+ std::vector<std::pair<unsigned,bool> > &SpillRegs =
+ VRM->getSpillPtSpills(MI);
+ for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
+ unsigned VirtReg = SpillRegs[i].first;
+ bool isKill = SpillRegs[i].second;
+ if (!VRM->getPreSplitReg(VirtReg))
+ continue; // Split interval spilled again.
+ const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
+ unsigned Phys = VRM->getPhys(VirtReg);
+ int StackSlot = VRM->getStackSlot(VirtReg);
+ MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
+ TII->storeRegToStackSlot(*MBB, llvm::next(MII), Phys, isKill, StackSlot,
+ RC, TRI);
+ MachineInstr *StoreMI = prior(oldNextMII);
+ VRM->addSpillSlotUse(StackSlot, StoreMI);
+ DEBUG(dbgs() << "Store:\t" << *StoreMI);
+ VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
}
+ return true;
+}
- /// rewriteMBB - Keep track of which spills are available even after the
- /// register allocator is done with them. If possible, avid reloading vregs.
- void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
- LiveIntervals *LIs,
- AvailableSpills &Spills, BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
- DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
- << MBB.getName() << "':\n");
-
- MachineFunction &MF = *MBB.getParent();
-
- // MaybeDeadStores - When we need to write a value back into a stack slot,
- // keep track of the inserted store. If the stack slot value is never read
- // (because the value was used from some available register, for example), and
- // subsequently stored to, the original store is dead. This map keeps track
- // of inserted stores that are not used. If we see a subsequent store to the
- // same stack slot, the original store is deleted.
- std::vector<MachineInstr*> MaybeDeadStores;
- MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
-
- // ReMatDefs - These are rematerializable def MIs which are not deleted.
- SmallSet<MachineInstr*, 4> ReMatDefs;
-
- // Clear kill info.
- SmallSet<unsigned, 2> KilledMIRegs;
- RegKills.reset();
- KillOps.clear();
- KillOps.resize(TRI->getNumRegs(), NULL);
-
- unsigned Dist = 0;
- DistanceMap.clear();
- for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
- MII != E; ) {
- MachineBasicBlock::iterator NextMII = llvm::next(MII);
-
- VirtRegMap::MI2VirtMapTy::const_iterator I, End;
- bool Erased = false;
- bool BackTracked = false;
- if (OptimizeByUnfold(MBB, MII,
- MaybeDeadStores, Spills, RegKills, KillOps, VRM))
- NextMII = llvm::next(MII);
+/// rewriteMBB - Keep track of which spills are available even after the
+/// register allocator is done with them. If possible, avoid reloading vregs.
+void
+LocalRewriter::RewriteMBB(LiveIntervals *LIs,
+ AvailableSpills &Spills, BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps) {
- MachineInstr &MI = *MII;
-
- if (VRM.hasEmergencySpills(&MI)) {
- // Spill physical register(s) in the rare case the allocator has run out
- // of registers to allocate.
- SmallSet<int, 4> UsedSS;
- std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
- for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
- unsigned PhysReg = EmSpills[i];
- const TargetRegisterClass *RC =
- TRI->getPhysicalRegisterRegClass(PhysReg);
- assert(RC && "Unable to determine register class!");
- int SS = VRM.getEmergencySpillSlot(RC);
- if (UsedSS.count(SS))
- llvm_unreachable("Need to spill more than one physical registers!");
- UsedSS.insert(SS);
- TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
- MachineInstr *StoreMI = prior(MII);
- VRM.addSpillSlotUse(SS, StoreMI);
-
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(llvm::next(MII), MBB.begin(), PhysReg, TRI, false,
- SS, TII, MF);
-
- TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SS, RC);
+ DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
+ << MBB->getName() << "':\n");
- MachineInstr *LoadMI = prior(InsertLoc);
- VRM.addSpillSlotUse(SS, LoadMI);
- ++NumPSpills;
- DistanceMap.insert(std::make_pair(LoadMI, Dist++));
- }
- NextMII = llvm::next(MII);
- }
+ MachineFunction &MF = *MBB->getParent();
- // Insert restores here if asked to.
- if (VRM.isRestorePt(&MI)) {
- std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
- for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
- unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
- if (!VRM.getPreSplitReg(VirtReg))
- continue; // Split interval spilled again.
- unsigned Phys = VRM.getPhys(VirtReg);
- RegInfo->setPhysRegUsed(Phys);
-
- // Check if the value being restored if available. If so, it must be
- // from a predecessor BB that fallthrough into this BB. We do not
- // expect:
- // BB1:
- // r1 = load fi#1
- // ...
- // = r1<kill>
- // ... # r1 not clobbered
- // ...
- // = load fi#1
- bool DoReMat = VRM.isReMaterialized(VirtReg);
- int SSorRMId = DoReMat
- ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
- const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
- if (InReg == Phys) {
- // If the value is already available in the expected register, save
- // a reload / remat.
- if (SSorRMId)
- DEBUG(dbgs() << "Reusing RM#"
- << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
- DEBUG(dbgs() << " from physreg "
- << TRI->getName(InReg) << " for vreg"
- << VirtReg <<" instead of reloading into physreg "
- << TRI->getName(Phys) << '\n');
- ++NumOmitted;
- continue;
- } else if (InReg && InReg != Phys) {
- if (SSorRMId)
- DEBUG(dbgs() << "Reusing RM#"
- << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << SSorRMId);
- DEBUG(dbgs() << " from physreg "
- << TRI->getName(InReg) << " for vreg"
- << VirtReg <<" by copying it into physreg "
- << TRI->getName(Phys) << '\n');
-
- // If the reloaded / remat value is available in another register,
- // copy it to the desired register.
-
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat,
- SSorRMId, TII, MF);
-
- TII->copyRegToReg(MBB, InsertLoc, Phys, InReg, RC, RC);
-
- // This invalidates Phys.
- Spills.ClobberPhysReg(Phys);
- // Remember it's available.
- Spills.addAvailable(SSorRMId, Phys);
-
- // Mark is killed.
- MachineInstr *CopyMI = prior(InsertLoc);
- CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
- MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
- KillOpnd->setIsKill();
- UpdateKills(*CopyMI, TRI, RegKills, KillOps);
-
- DEBUG(dbgs() << '\t' << *CopyMI);
- ++NumCopified;
- continue;
- }
+ // MaybeDeadStores - When we need to write a value back into a stack slot,
+ // keep track of the inserted store. If the stack slot value is never read
+ // (because the value was used from some available register, for example), and
+ // subsequently stored to, the original store is dead. This map keeps track
+ // of inserted stores that are not used. If we see a subsequent store to the
+ // same stack slot, the original store is deleted.
+ std::vector<MachineInstr*> MaybeDeadStores;
+ MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(MII, MBB.begin(), Phys, TRI, DoReMat,
- SSorRMId, TII, MF);
-
- if (VRM.isReMaterialized(VirtReg)) {
- ReMaterialize(MBB, InsertLoc, Phys, VirtReg, TII, TRI, VRM);
- } else {
- const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- TII->loadRegFromStackSlot(MBB, InsertLoc, Phys, SSorRMId, RC);
- MachineInstr *LoadMI = prior(InsertLoc);
- VRM.addSpillSlotUse(SSorRMId, LoadMI);
- ++NumLoads;
- DistanceMap.insert(std::make_pair(LoadMI, Dist++));
- }
+ // ReMatDefs - These are rematerializable def MIs which are not deleted.
+ SmallSet<MachineInstr*, 4> ReMatDefs;
- // This invalidates Phys.
- Spills.ClobberPhysReg(Phys);
- // Remember it's available.
- Spills.addAvailable(SSorRMId, Phys);
+ // Clear kill info.
+ SmallSet<unsigned, 2> KilledMIRegs;
- UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
- DEBUG(dbgs() << '\t' << *prior(MII));
- }
- }
+ // Keep track of the registers we have already spilled in case there are
+ // multiple defs of the same register in MI.
+ SmallSet<unsigned, 8> SpilledMIRegs;
- // Insert spills here if asked to.
- if (VRM.isSpillPt(&MI)) {
- std::vector<std::pair<unsigned,bool> > &SpillRegs =
- VRM.getSpillPtSpills(&MI);
- for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
- unsigned VirtReg = SpillRegs[i].first;
- bool isKill = SpillRegs[i].second;
- if (!VRM.getPreSplitReg(VirtReg))
- continue; // Split interval spilled again.
- const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
- unsigned Phys = VRM.getPhys(VirtReg);
- int StackSlot = VRM.getStackSlot(VirtReg);
- MachineBasicBlock::iterator oldNextMII = llvm::next(MII);
- TII->storeRegToStackSlot(MBB, llvm::next(MII), Phys, isKill, StackSlot, RC);
- MachineInstr *StoreMI = prior(oldNextMII);
- VRM.addSpillSlotUse(StackSlot, StoreMI);
- DEBUG(dbgs() << "Store:\t" << *StoreMI);
- VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
- }
- NextMII = llvm::next(MII);
- }
+ RegKills.reset();
+ KillOps.clear();
+ KillOps.resize(TRI->getNumRegs(), NULL);
- /// ReusedOperands - Keep track of operand reuse in case we need to undo
- /// reuse.
- ReuseInfo ReusedOperands(MI, TRI);
- SmallVector<unsigned, 4> VirtUseOps;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.getReg() == 0)
- continue; // Ignore non-register operands.
-
- unsigned VirtReg = MO.getReg();
- if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
- // Ignore physregs for spilling, but remember that it is used by this
- // function.
- RegInfo->setPhysRegUsed(VirtReg);
- continue;
- }
+ DistanceMap.clear();
+ for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
+ MII != E; ) {
+ MachineBasicBlock::iterator NextMII = llvm::next(MII);
- // We want to process implicit virtual register uses first.
- if (MO.isImplicit())
- // If the virtual register is implicitly defined, emit a implicit_def
- // before so scavenger knows it's "defined".
- // FIXME: This is a horrible hack done the by register allocator to
- // remat a definition with virtual register operand.
- VirtUseOps.insert(VirtUseOps.begin(), i);
- else
- VirtUseOps.push_back(i);
+ if (OptimizeByUnfold(MII, MaybeDeadStores, Spills, RegKills, KillOps))
+ NextMII = llvm::next(MII);
+
+ if (InsertEmergencySpills(MII))
+ NextMII = llvm::next(MII);
+
+ InsertRestores(MII, Spills, RegKills, KillOps);
+
+ if (InsertSpills(MII))
+ NextMII = llvm::next(MII);
+
+ bool Erased = false;
+ bool BackTracked = false;
+ MachineInstr &MI = *MII;
+
+ // Remember DbgValue's which reference stack slots.
+ if (MI.isDebugValue() && MI.getOperand(0).isFI())
+ Slot2DbgValues[MI.getOperand(0).getIndex()].push_back(&MI);
+
+ /// ReusedOperands - Keep track of operand reuse in case we need to undo
+ /// reuse.
+ ReuseInfo ReusedOperands(MI, TRI);
+ SmallVector<unsigned, 4> VirtUseOps;
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ if (!MO.isReg() || MO.getReg() == 0)
+ continue; // Ignore non-register operands.
+
+ unsigned VirtReg = MO.getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
+ // Ignore physregs for spilling, but remember that it is used by this
+ // function.
+ MRI->setPhysRegUsed(VirtReg);
+ continue;
}
- // Process all of the spilled uses and all non spilled reg references.
- SmallVector<int, 2> PotentialDeadStoreSlots;
- KilledMIRegs.clear();
- for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
- unsigned i = VirtUseOps[j];
- unsigned VirtReg = MI.getOperand(i).getReg();
- assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "Not a virtual register?");
-
- unsigned SubIdx = MI.getOperand(i).getSubReg();
- if (VRM.isAssignedReg(VirtReg)) {
- // This virtual register was assigned a physreg!
- unsigned Phys = VRM.getPhys(VirtReg);
- RegInfo->setPhysRegUsed(Phys);
- if (MI.getOperand(i).isDef())
- ReusedOperands.markClobbered(Phys);
- substitutePhysReg(MI.getOperand(i), Phys, *TRI);
- if (VRM.isImplicitlyDefined(VirtReg))
- // FIXME: Is this needed?
- BuildMI(MBB, &MI, MI.getDebugLoc(),
- TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
- continue;
- }
+ // We want to process implicit virtual register uses first.
+ if (MO.isImplicit())
+ // If the virtual register is implicitly defined, emit a implicit_def
+ // before so scavenger knows it's "defined".
+ // FIXME: This is a horrible hack done the by register allocator to
+ // remat a definition with virtual register operand.
+ VirtUseOps.insert(VirtUseOps.begin(), i);
+ else
+ VirtUseOps.push_back(i);
+ }
- // This virtual register is now known to be a spilled value.
- if (!MI.getOperand(i).isUse())
- continue; // Handle defs in the loop below (handle use&def here though)
-
- bool AvoidReload = MI.getOperand(i).isUndef();
- // Check if it is defined by an implicit def. It should not be spilled.
- // Note, this is for correctness reason. e.g.
- // 8 %reg1024<def> = IMPLICIT_DEF
- // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
- // The live range [12, 14) are not part of the r1024 live interval since
- // it's defined by an implicit def. It will not conflicts with live
- // interval of r1025. Now suppose both registers are spilled, you can
- // easily see a situation where both registers are reloaded before
- // the INSERT_SUBREG and both target registers that would overlap.
- bool DoReMat = VRM.isReMaterialized(VirtReg);
- int SSorRMId = DoReMat
- ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
- int ReuseSlot = SSorRMId;
-
- // Check to see if this stack slot is available.
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
-
- // If this is a sub-register use, make sure the reuse register is in the
- // right register class. For example, for x86 not all of the 32-bit
- // registers have accessible sub-registers.
- // Similarly so for EXTRACT_SUBREG. Consider this:
- // EDI = op
- // MOV32_mr fi#1, EDI
- // ...
- // = EXTRACT_SUBREG fi#1
- // fi#1 is available in EDI, but it cannot be reused because it's not in
- // the right register file.
- if (PhysReg && !AvoidReload && (SubIdx || MI.isExtractSubreg())) {
- const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- if (!RC->contains(PhysReg))
- PhysReg = 0;
- }
+ // Process all of the spilled uses and all non spilled reg references.
+ SmallVector<int, 2> PotentialDeadStoreSlots;
+ KilledMIRegs.clear();
+ for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
+ unsigned i = VirtUseOps[j];
+ unsigned VirtReg = MI.getOperand(i).getReg();
+ assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
+ "Not a virtual register?");
+
+ unsigned SubIdx = MI.getOperand(i).getSubReg();
+ if (VRM->isAssignedReg(VirtReg)) {
+ // This virtual register was assigned a physreg!
+ unsigned Phys = VRM->getPhys(VirtReg);
+ MRI->setPhysRegUsed(Phys);
+ if (MI.getOperand(i).isDef())
+ ReusedOperands.markClobbered(Phys);
+ substitutePhysReg(MI.getOperand(i), Phys, *TRI);
+ if (VRM->isImplicitlyDefined(VirtReg))
+ // FIXME: Is this needed?
+ BuildMI(*MBB, &MI, MI.getDebugLoc(),
+ TII->get(TargetOpcode::IMPLICIT_DEF), Phys);
+ continue;
+ }
- if (PhysReg && !AvoidReload) {
- // This spilled operand might be part of a two-address operand. If this
- // is the case, then changing it will necessarily require changing the
- // def part of the instruction as well. However, in some cases, we
- // aren't allowed to modify the reused register. If none of these cases
- // apply, reuse it.
- bool CanReuse = true;
- bool isTied = MI.isRegTiedToDefOperand(i);
- if (isTied) {
- // Okay, we have a two address operand. We can reuse this physreg as
- // long as we are allowed to clobber the value and there isn't an
- // earlier def that has already clobbered the physreg.
- CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
- Spills.canClobberPhysReg(PhysReg);
- }
-
- if (CanReuse) {
- // If this stack slot value is already available, reuse it!
- if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
- DEBUG(dbgs() << "Reusing RM#"
- << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
- DEBUG(dbgs() << " from physreg "
- << TRI->getName(PhysReg) << " for vreg"
- << VirtReg <<" instead of reloading into physreg "
- << TRI->getName(VRM.getPhys(VirtReg)) << '\n');
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
-
- // The only technical detail we have is that we don't know that
- // PhysReg won't be clobbered by a reloaded stack slot that occurs
- // later in the instruction. In particular, consider 'op V1, V2'.
- // If V1 is available in physreg R0, we would choose to reuse it
- // here, instead of reloading it into the register the allocator
- // indicated (say R1). However, V2 might have to be reloaded
- // later, and it might indicate that it needs to live in R0. When
- // this occurs, we need to have information available that
- // indicates it is safe to use R1 for the reload instead of R0.
- //
- // To further complicate matters, we might conflict with an alias,
- // or R0 and R1 might not be compatible with each other. In this
- // case, we actually insert a reload for V1 in R1, ensuring that
- // we can get at R0 or its alias.
- ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
- VRM.getPhys(VirtReg), VirtReg);
- if (isTied)
- // Only mark it clobbered if this is a use&def operand.
- ReusedOperands.markClobbered(PhysReg);
- ++NumReused;
-
- if (MI.getOperand(i).isKill() &&
- ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
-
- // The store of this spilled value is potentially dead, but we
- // won't know for certain until we've confirmed that the re-use
- // above is valid, which means waiting until the other operands
- // are processed. For now we just track the spill slot, we'll
- // remove it after the other operands are processed if valid.
-
- PotentialDeadStoreSlots.push_back(ReuseSlot);
- }
+ // This virtual register is now known to be a spilled value.
+ if (!MI.getOperand(i).isUse())
+ continue; // Handle defs in the loop below (handle use&def here though)
+
+ bool AvoidReload = MI.getOperand(i).isUndef();
+ // Check if it is defined by an implicit def. It should not be spilled.
+ // Note, this is for correctness reason. e.g.
+ // 8 %reg1024<def> = IMPLICIT_DEF
+ // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
+ // The live range [12, 14) are not part of the r1024 live interval since
+ // it's defined by an implicit def. It will not conflicts with live
+ // interval of r1025. Now suppose both registers are spilled, you can
+ // easily see a situation where both registers are reloaded before
+ // the INSERT_SUBREG and both target registers that would overlap.
+ bool DoReMat = VRM->isReMaterialized(VirtReg);
+ int SSorRMId = DoReMat
+ ? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
+ int ReuseSlot = SSorRMId;
+
+ // Check to see if this stack slot is available.
+ unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
+
+ // If this is a sub-register use, make sure the reuse register is in the
+ // right register class. For example, for x86 not all of the 32-bit
+ // registers have accessible sub-registers.
+ // Similarly so for EXTRACT_SUBREG. Consider this:
+ // EDI = op
+ // MOV32_mr fi#1, EDI
+ // ...
+ // = EXTRACT_SUBREG fi#1
+ // fi#1 is available in EDI, but it cannot be reused because it's not in
+ // the right register file.
+ if (PhysReg && !AvoidReload && SubIdx) {
+ const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
+ if (!RC->contains(PhysReg))
+ PhysReg = 0;
+ }
- // Mark is isKill if it's there no other uses of the same virtual
- // register and it's not a two-address operand. IsKill will be
- // unset if reg is reused.
- if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
- MI.getOperand(i).setIsKill();
- KilledMIRegs.insert(VirtReg);
+ if (PhysReg && !AvoidReload) {
+ // This spilled operand might be part of a two-address operand. If this
+ // is the case, then changing it will necessarily require changing the
+ // def part of the instruction as well. However, in some cases, we
+ // aren't allowed to modify the reused register. If none of these cases
+ // apply, reuse it.
+ bool CanReuse = true;
+ bool isTied = MI.isRegTiedToDefOperand(i);
+ if (isTied) {
+ // Okay, we have a two address operand. We can reuse this physreg as
+ // long as we are allowed to clobber the value and there isn't an
+ // earlier def that has already clobbered the physreg.
+ CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
+ Spills.canClobberPhysReg(PhysReg);
+ }
+ // If this is an asm, and a PhysReg alias is used elsewhere as an
+ // earlyclobber operand, we can't also use it as an input.
+ if (MI.isInlineAsm()) {
+ for (unsigned k = 0, e = MI.getNumOperands(); k != e; ++k) {
+ MachineOperand &MOk = MI.getOperand(k);
+ if (MOk.isReg() && MOk.isEarlyClobber() &&
+ TRI->regsOverlap(MOk.getReg(), PhysReg)) {
+ CanReuse = false;
+ DEBUG(dbgs() << "Not reusing physreg " << TRI->getName(PhysReg)
+ << " for vreg" << VirtReg << ": " << MOk << '\n');
+ break;
}
-
- continue;
- } // CanReuse
-
- // Otherwise we have a situation where we have a two-address instruction
- // whose mod/ref operand needs to be reloaded. This reload is already
- // available in some register "PhysReg", but if we used PhysReg as the
- // operand to our 2-addr instruction, the instruction would modify
- // PhysReg. This isn't cool if something later uses PhysReg and expects
- // to get its initial value.
- //
- // To avoid this problem, and to avoid doing a load right after a store,
- // we emit a copy from PhysReg into the designated register for this
- // operand.
- unsigned DesignatedReg = VRM.getPhys(VirtReg);
- assert(DesignatedReg && "Must map virtreg to physreg!");
-
- // Note that, if we reused a register for a previous operand, the
- // register we want to reload into might not actually be
- // available. If this occurs, use the register indicated by the
- // reuser.
- if (ReusedOperands.hasReuses())
- DesignatedReg = ReusedOperands.GetRegForReload(VirtReg,
- DesignatedReg, &MI,
- Spills, MaybeDeadStores, RegKills, KillOps, VRM);
-
- // If the mapped designated register is actually the physreg we have
- // incoming, we don't need to inserted a dead copy.
- if (DesignatedReg == PhysReg) {
- // If this stack slot value is already available, reuse it!
- if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
- DEBUG(dbgs() << "Reusing RM#"
- << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
- else
- DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
- DEBUG(dbgs() << " from physreg " << TRI->getName(PhysReg)
- << " for vreg" << VirtReg
- << " instead of reloading into same physreg.\n");
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
- ReusedOperands.markClobbered(RReg);
- ++NumReused;
- continue;
}
-
- const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- RegInfo->setPhysRegUsed(DesignatedReg);
- ReusedOperands.markClobbered(DesignatedReg);
-
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(&MI, MBB.begin(), PhysReg, TRI, DoReMat,
- SSorRMId, TII, MF);
-
- TII->copyRegToReg(MBB, InsertLoc, DesignatedReg, PhysReg, RC, RC);
-
- MachineInstr *CopyMI = prior(InsertLoc);
- CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
- UpdateKills(*CopyMI, TRI, RegKills, KillOps);
-
- // This invalidates DesignatedReg.
- Spills.ClobberPhysReg(DesignatedReg);
-
- Spills.addAvailable(ReuseSlot, DesignatedReg);
- unsigned RReg =
- SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
+ }
+
+ if (CanReuse) {
+ // If this stack slot value is already available, reuse it!
+ if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
+ DEBUG(dbgs() << "Reusing RM#"
+ << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
+ else
+ DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
+ DEBUG(dbgs() << " from physreg "
+ << TRI->getName(PhysReg) << " for vreg"
+ << VirtReg <<" instead of reloading into physreg "
+ << TRI->getName(VRM->getPhys(VirtReg)) << '\n');
+ unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
MI.getOperand(i).setReg(RReg);
MI.getOperand(i).setSubReg(0);
- DEBUG(dbgs() << '\t' << *prior(MII));
+
+ // The only technical detail we have is that we don't know that
+ // PhysReg won't be clobbered by a reloaded stack slot that occurs
+ // later in the instruction. In particular, consider 'op V1, V2'.
+ // If V1 is available in physreg R0, we would choose to reuse it
+ // here, instead of reloading it into the register the allocator
+ // indicated (say R1). However, V2 might have to be reloaded
+ // later, and it might indicate that it needs to live in R0. When
+ // this occurs, we need to have information available that
+ // indicates it is safe to use R1 for the reload instead of R0.
+ //
+ // To further complicate matters, we might conflict with an alias,
+ // or R0 and R1 might not be compatible with each other. In this
+ // case, we actually insert a reload for V1 in R1, ensuring that
+ // we can get at R0 or its alias.
+ ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
+ VRM->getPhys(VirtReg), VirtReg);
+ if (isTied)
+ // Only mark it clobbered if this is a use&def operand.
+ ReusedOperands.markClobbered(PhysReg);
++NumReused;
+
+ if (MI.getOperand(i).isKill() &&
+ ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
+
+ // The store of this spilled value is potentially dead, but we
+ // won't know for certain until we've confirmed that the re-use
+ // above is valid, which means waiting until the other operands
+ // are processed. For now we just track the spill slot, we'll
+ // remove it after the other operands are processed if valid.
+
+ PotentialDeadStoreSlots.push_back(ReuseSlot);
+ }
+
+ // Mark is isKill if it's there no other uses of the same virtual
+ // register and it's not a two-address operand. IsKill will be
+ // unset if reg is reused.
+ if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
+ MI.getOperand(i).setIsKill();
+ KilledMIRegs.insert(VirtReg);
+ }
+
continue;
- } // if (PhysReg)
-
- // Otherwise, reload it and remember that we have it.
- PhysReg = VRM.getPhys(VirtReg);
- assert(PhysReg && "Must map virtreg to physreg!");
+ } // CanReuse
+
+ // Otherwise we have a situation where we have a two-address instruction
+ // whose mod/ref operand needs to be reloaded. This reload is already
+ // available in some register "PhysReg", but if we used PhysReg as the
+ // operand to our 2-addr instruction, the instruction would modify
+ // PhysReg. This isn't cool if something later uses PhysReg and expects
+ // to get its initial value.
+ //
+ // To avoid this problem, and to avoid doing a load right after a store,
+ // we emit a copy from PhysReg into the designated register for this
+ // operand.
+ //
+ // This case also applies to an earlyclobber'd PhysReg.
+ unsigned DesignatedReg = VRM->getPhys(VirtReg);
+ assert(DesignatedReg && "Must map virtreg to physreg!");
// Note that, if we reused a register for a previous operand, the
// register we want to reload into might not actually be
// available. If this occurs, use the register indicated by the
// reuser.
if (ReusedOperands.hasReuses())
- PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
- Spills, MaybeDeadStores, RegKills, KillOps, VRM);
-
- RegInfo->setPhysRegUsed(PhysReg);
- ReusedOperands.markClobbered(PhysReg);
- if (AvoidReload)
- ++NumAvoided;
- else {
- // Back-schedule reloads and remats.
- MachineBasicBlock::iterator InsertLoc =
- ComputeReloadLoc(MII, MBB.begin(), PhysReg, TRI, DoReMat,
- SSorRMId, TII, MF);
-
- if (DoReMat) {
- ReMaterialize(MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, VRM);
- } else {
- const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- TII->loadRegFromStackSlot(MBB, InsertLoc, PhysReg, SSorRMId, RC);
- MachineInstr *LoadMI = prior(InsertLoc);
- VRM.addSpillSlotUse(SSorRMId, LoadMI);
- ++NumLoads;
- DistanceMap.insert(std::make_pair(LoadMI, Dist++));
- }
- // This invalidates PhysReg.
- Spills.ClobberPhysReg(PhysReg);
+ DesignatedReg = ReusedOperands.
+ GetRegForReload(VirtReg, DesignatedReg, &MI, Spills,
+ MaybeDeadStores, RegKills, KillOps, *VRM);
+
+ // If the mapped designated register is actually the physreg we have
+ // incoming, we don't need to inserted a dead copy.
+ if (DesignatedReg == PhysReg) {
+ // If this stack slot value is already available, reuse it!
+ if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
+ DEBUG(dbgs() << "Reusing RM#"
+ << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1);
+ else
+ DEBUG(dbgs() << "Reusing SS#" << ReuseSlot);
+ DEBUG(dbgs() << " from physreg " << TRI->getName(PhysReg)
+ << " for vreg" << VirtReg
+ << " instead of reloading into same physreg.\n");
+ unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
+ MI.getOperand(i).setReg(RReg);
+ MI.getOperand(i).setSubReg(0);
+ ReusedOperands.markClobbered(RReg);
+ ++NumReused;
+ continue;
+ }
- // Any stores to this stack slot are not dead anymore.
- if (!DoReMat)
- MaybeDeadStores[SSorRMId] = NULL;
- Spills.addAvailable(SSorRMId, PhysReg);
- // Assumes this is the last use. IsKill will be unset if reg is reused
- // unless it's a two-address operand.
- if (!MI.isRegTiedToDefOperand(i) &&
- KilledMIRegs.count(VirtReg) == 0) {
- MI.getOperand(i).setIsKill();
- KilledMIRegs.insert(VirtReg);
- }
+ MRI->setPhysRegUsed(DesignatedReg);
+ ReusedOperands.markClobbered(DesignatedReg);
- UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
- DEBUG(dbgs() << '\t' << *prior(InsertLoc));
- }
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
+ // Back-schedule reloads and remats.
+ MachineBasicBlock::iterator InsertLoc =
+ ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
+ SSorRMId, TII, MF);
+ MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI.getDebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ DesignatedReg).addReg(PhysReg);
+ CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
+ UpdateKills(*CopyMI, TRI, RegKills, KillOps);
+
+ // This invalidates DesignatedReg.
+ Spills.ClobberPhysReg(DesignatedReg);
+
+ Spills.addAvailable(ReuseSlot, DesignatedReg);
+ unsigned RReg =
+ SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
MI.getOperand(i).setReg(RReg);
MI.getOperand(i).setSubReg(0);
- }
+ DEBUG(dbgs() << '\t' << *prior(MII));
+ ++NumReused;
+ continue;
+ } // if (PhysReg)
- // Ok - now we can remove stores that have been confirmed dead.
- for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
- // This was the last use and the spilled value is still available
- // for reuse. That means the spill was unnecessary!
- int PDSSlot = PotentialDeadStoreSlots[j];
- MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
- if (DeadStore) {
- DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
- InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(DeadStore);
- MBB.erase(DeadStore);
- MaybeDeadStores[PDSSlot] = NULL;
- ++NumDSE;
+ // Otherwise, reload it and remember that we have it.
+ PhysReg = VRM->getPhys(VirtReg);
+ assert(PhysReg && "Must map virtreg to physreg!");
+
+ // Note that, if we reused a register for a previous operand, the
+ // register we want to reload into might not actually be
+ // available. If this occurs, use the register indicated by the
+ // reuser.
+ if (ReusedOperands.hasReuses())
+ PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
+ Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
+
+ MRI->setPhysRegUsed(PhysReg);
+ ReusedOperands.markClobbered(PhysReg);
+ if (AvoidReload)
+ ++NumAvoided;
+ else {
+ // Back-schedule reloads and remats.
+ MachineBasicBlock::iterator InsertLoc =
+ ComputeReloadLoc(MII, MBB->begin(), PhysReg, TRI, DoReMat,
+ SSorRMId, TII, MF);
+
+ if (DoReMat) {
+ ReMaterialize(*MBB, InsertLoc, PhysReg, VirtReg, TII, TRI, *VRM);
+ } else {
+ const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
+ TII->loadRegFromStackSlot(*MBB, InsertLoc, PhysReg, SSorRMId, RC,TRI);
+ MachineInstr *LoadMI = prior(InsertLoc);
+ VRM->addSpillSlotUse(SSorRMId, LoadMI);
+ ++NumLoads;
+ DistanceMap.insert(std::make_pair(LoadMI, DistanceMap.size()));
+ }
+ // This invalidates PhysReg.
+ Spills.ClobberPhysReg(PhysReg);
+
+ // Any stores to this stack slot are not dead anymore.
+ if (!DoReMat)
+ MaybeDeadStores[SSorRMId] = NULL;
+ Spills.addAvailable(SSorRMId, PhysReg);
+ // Assumes this is the last use. IsKill will be unset if reg is reused
+ // unless it's a two-address operand.
+ if (!MI.isRegTiedToDefOperand(i) &&
+ KilledMIRegs.count(VirtReg) == 0) {
+ MI.getOperand(i).setIsKill();
+ KilledMIRegs.insert(VirtReg);
}
+
+ UpdateKills(*prior(InsertLoc), TRI, RegKills, KillOps);
+ DEBUG(dbgs() << '\t' << *prior(InsertLoc));
}
+ unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
+ MI.getOperand(i).setReg(RReg);
+ MI.getOperand(i).setSubReg(0);
+ }
+ // Ok - now we can remove stores that have been confirmed dead.
+ for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
+ // This was the last use and the spilled value is still available
+ // for reuse. That means the spill was unnecessary!
+ int PDSSlot = PotentialDeadStoreSlots[j];
+ MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
+ if (DeadStore) {
+ DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
+ InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(DeadStore);
+ MBB->erase(DeadStore);
+ MaybeDeadStores[PDSSlot] = NULL;
+ ++NumDSE;
+ }
+ }
- DEBUG(dbgs() << '\t' << MI);
+ DEBUG(dbgs() << '\t' << MI);
- // If we have folded references to memory operands, make sure we clear all
- // physical registers that may contain the value of the spilled virtual
- // register
- SmallSet<int, 2> FoldedSS;
- for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
- unsigned VirtReg = I->second.first;
- VirtRegMap::ModRef MR = I->second.second;
- DEBUG(dbgs() << "Folded vreg: " << VirtReg << " MR: " << MR);
- // MI2VirtMap be can updated which invalidate the iterator.
- // Increment the iterator first.
- ++I;
- int SS = VRM.getStackSlot(VirtReg);
- if (SS == VirtRegMap::NO_STACK_SLOT)
- continue;
- FoldedSS.insert(SS);
- DEBUG(dbgs() << " - StackSlot: " << SS << "\n");
-
- // If this folded instruction is just a use, check to see if it's a
- // straight load from the virt reg slot.
- if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
- int FrameIdx;
- unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
- if (DestReg && FrameIdx == SS) {
- // If this spill slot is available, turn it into a copy (or nothing)
- // instead of leaving it as a load!
- if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
- DEBUG(dbgs() << "Promoted Load To Copy: " << MI);
- if (DestReg != InReg) {
- const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
- TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
- MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
- unsigned SubIdx = DefMO->getSubReg();
- // Revisit the copy so we make sure to notice the effects of the
- // operation on the destreg (either needing to RA it if it's
- // virtual or needing to clobber any values if it's physical).
- NextMII = &MI;
- --NextMII; // backtrack to the copy.
- NextMII->setAsmPrinterFlag(MachineInstr::ReloadReuse);
- // Propagate the sub-register index over.
- if (SubIdx) {
- DefMO = NextMII->findRegisterDefOperand(DestReg);
- DefMO->setSubReg(SubIdx);
- }
-
- // Mark is killed.
- MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
- KillOpnd->setIsKill();
-
- BackTracked = true;
- } else {
- DEBUG(dbgs() << "Removing now-noop copy: " << MI);
- // Unset last kill since it's being reused.
- InvalidateKill(InReg, TRI, RegKills, KillOps);
- Spills.disallowClobberPhysReg(InReg);
- }
+ // If we have folded references to memory operands, make sure we clear all
+ // physical registers that may contain the value of the spilled virtual
+ // register
- InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
- Erased = true;
- goto ProcessNextInst;
+ // Copy the folded virts to a small vector, we may change MI2VirtMap.
+ SmallVector<std::pair<unsigned, VirtRegMap::ModRef>, 4> FoldedVirts;
+ // C++0x FTW!
+ for (std::pair<VirtRegMap::MI2VirtMapTy::const_iterator,
+ VirtRegMap::MI2VirtMapTy::const_iterator> FVRange =
+ VRM->getFoldedVirts(&MI);
+ FVRange.first != FVRange.second; ++FVRange.first)
+ FoldedVirts.push_back(FVRange.first->second);
+
+ SmallSet<int, 2> FoldedSS;
+ for (unsigned FVI = 0, FVE = FoldedVirts.size(); FVI != FVE; ++FVI) {
+ unsigned VirtReg = FoldedVirts[FVI].first;
+ VirtRegMap::ModRef MR = FoldedVirts[FVI].second;
+ DEBUG(dbgs() << "Folded vreg: " << VirtReg << " MR: " << MR);
+
+ int SS = VRM->getStackSlot(VirtReg);
+ if (SS == VirtRegMap::NO_STACK_SLOT)
+ continue;
+ FoldedSS.insert(SS);
+ DEBUG(dbgs() << " - StackSlot: " << SS << "\n");
+
+ // If this folded instruction is just a use, check to see if it's a
+ // straight load from the virt reg slot.
+ if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
+ int FrameIdx;
+ unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
+ if (DestReg && FrameIdx == SS) {
+ // If this spill slot is available, turn it into a copy (or nothing)
+ // instead of leaving it as a load!
+ if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
+ DEBUG(dbgs() << "Promoted Load To Copy: " << MI);
+ if (DestReg != InReg) {
+ MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
+ MachineInstr *CopyMI = BuildMI(*MBB, &MI, MI.getDebugLoc(),
+ TII->get(TargetOpcode::COPY))
+ .addReg(DestReg, RegState::Define, DefMO->getSubReg())
+ .addReg(InReg, RegState::Kill);
+ // Revisit the copy so we make sure to notice the effects of the
+ // operation on the destreg (either needing to RA it if it's
+ // virtual or needing to clobber any values if it's physical).
+ NextMII = CopyMI;
+ NextMII->setAsmPrinterFlag(MachineInstr::ReloadReuse);
+ BackTracked = true;
+ } else {
+ DEBUG(dbgs() << "Removing now-noop copy: " << MI);
+ // Unset last kill since it's being reused.
+ InvalidateKill(InReg, TRI, RegKills, KillOps);
+ Spills.disallowClobberPhysReg(InReg);
}
- } else {
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
- SmallVector<MachineInstr*, 4> NewMIs;
- if (PhysReg &&
- TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
- MBB.insert(MII, NewMIs[0]);
+
+ InvalidateKills(MI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
+ Erased = true;
+ goto ProcessNextInst;
+ }
+ } else {
+ unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
+ SmallVector<MachineInstr*, 4> NewMIs;
+ if (PhysReg &&
+ TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)){
+ MBB->insert(MII, NewMIs[0]);
+ InvalidateKills(MI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
+ Erased = true;
+ --NextMII; // backtrack to the unfolded instruction.
+ BackTracked = true;
+ goto ProcessNextInst;
+ }
+ }
+ }
+
+ // If this reference is not a use, any previous store is now dead.
+ // Otherwise, the store to this stack slot is not dead anymore.
+ MachineInstr* DeadStore = MaybeDeadStores[SS];
+ if (DeadStore) {
+ bool isDead = !(MR & VirtRegMap::isRef);
+ MachineInstr *NewStore = NULL;
+ if (MR & VirtRegMap::isModRef) {
+ unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
+ SmallVector<MachineInstr*, 4> NewMIs;
+ // We can reuse this physreg as long as we are allowed to clobber
+ // the value and there isn't an earlier def that has already clobbered
+ // the physreg.
+ if (PhysReg &&
+ !ReusedOperands.isClobbered(PhysReg) &&
+ Spills.canClobberPhysReg(PhysReg) &&
+ !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
+ MachineOperand *KillOpnd =
+ DeadStore->findRegisterUseOperand(PhysReg, true);
+ // Note, if the store is storing a sub-register, it's possible the
+ // super-register is needed below.
+ if (KillOpnd && !KillOpnd->getSubReg() &&
+ TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
+ MBB->insert(MII, NewMIs[0]);
+ NewStore = NewMIs[1];
+ MBB->insert(MII, NewStore);
+ VRM->addSpillSlotUse(SS, NewStore);
InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
Erased = true;
+ --NextMII;
--NextMII; // backtrack to the unfolded instruction.
BackTracked = true;
- goto ProcessNextInst;
+ isDead = true;
+ ++NumSUnfold;
}
}
}
- // If this reference is not a use, any previous store is now dead.
- // Otherwise, the store to this stack slot is not dead anymore.
- MachineInstr* DeadStore = MaybeDeadStores[SS];
- if (DeadStore) {
- bool isDead = !(MR & VirtRegMap::isRef);
- MachineInstr *NewStore = NULL;
- if (MR & VirtRegMap::isModRef) {
- unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
- SmallVector<MachineInstr*, 4> NewMIs;
- // We can reuse this physreg as long as we are allowed to clobber
- // the value and there isn't an earlier def that has already clobbered
- // the physreg.
- if (PhysReg &&
- !ReusedOperands.isClobbered(PhysReg) &&
- Spills.canClobberPhysReg(PhysReg) &&
- !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
- MachineOperand *KillOpnd =
- DeadStore->findRegisterUseOperand(PhysReg, true);
- // Note, if the store is storing a sub-register, it's possible the
- // super-register is needed below.
- if (KillOpnd && !KillOpnd->getSubReg() &&
- TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
- MBB.insert(MII, NewMIs[0]);
- NewStore = NewMIs[1];
- MBB.insert(MII, NewStore);
- VRM.addSpillSlotUse(SS, NewStore);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
- Erased = true;
- --NextMII;
- --NextMII; // backtrack to the unfolded instruction.
- BackTracked = true;
- isDead = true;
- ++NumSUnfold;
- }
- }
- }
-
- if (isDead) { // Previous store is dead.
- // If we get here, the store is dead, nuke it now.
- DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
- InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(DeadStore);
- MBB.erase(DeadStore);
- if (!NewStore)
- ++NumDSE;
- }
+ if (isDead) { // Previous store is dead.
+ // If we get here, the store is dead, nuke it now.
+ DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore);
+ InvalidateKills(*DeadStore, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(DeadStore);
+ MBB->erase(DeadStore);
+ if (!NewStore)
+ ++NumDSE;
+ }
- MaybeDeadStores[SS] = NULL;
- if (NewStore) {
- // Treat this store as a spill merged into a copy. That makes the
- // stack slot value available.
- VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
- goto ProcessNextInst;
- }
+ MaybeDeadStores[SS] = NULL;
+ if (NewStore) {
+ // Treat this store as a spill merged into a copy. That makes the
+ // stack slot value available.
+ VRM->virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
+ goto ProcessNextInst;
}
+ }
- // If the spill slot value is available, and this is a new definition of
- // the value, the value is not available anymore.
- if (MR & VirtRegMap::isMod) {
- // Notice that the value in this stack slot has been modified.
- Spills.ModifyStackSlotOrReMat(SS);
-
- // If this is *just* a mod of the value, check to see if this is just a
- // store to the spill slot (i.e. the spill got merged into the copy). If
- // so, realize that the vreg is available now, and add the store to the
- // MaybeDeadStore info.
- int StackSlot;
- if (!(MR & VirtRegMap::isRef)) {
- if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
- assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- "Src hasn't been allocated yet?");
-
- if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
- Spills, RegKills, KillOps, TRI, VRM)) {
- NextMII = llvm::next(MII);
- BackTracked = true;
- goto ProcessNextInst;
- }
-
- // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
- // this as a potentially dead store in case there is a subsequent
- // store into the stack slot without a read from it.
- MaybeDeadStores[StackSlot] = &MI;
-
- // If the stack slot value was previously available in some other
- // register, change it now. Otherwise, make the register
- // available in PhysReg.
- Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
+ // If the spill slot value is available, and this is a new definition of
+ // the value, the value is not available anymore.
+ if (MR & VirtRegMap::isMod) {
+ // Notice that the value in this stack slot has been modified.
+ Spills.ModifyStackSlotOrReMat(SS);
+
+ // If this is *just* a mod of the value, check to see if this is just a
+ // store to the spill slot (i.e. the spill got merged into the copy). If
+ // so, realize that the vreg is available now, and add the store to the
+ // MaybeDeadStore info.
+ int StackSlot;
+ if (!(MR & VirtRegMap::isRef)) {
+ if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
+ assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
+ "Src hasn't been allocated yet?");
+
+ if (CommuteToFoldReload(MII, VirtReg, SrcReg, StackSlot,
+ Spills, RegKills, KillOps, TRI)) {
+ NextMII = llvm::next(MII);
+ BackTracked = true;
+ goto ProcessNextInst;
}
+
+ // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
+ // this as a potentially dead store in case there is a subsequent
+ // store into the stack slot without a read from it.
+ MaybeDeadStores[StackSlot] = &MI;
+
+ // If the stack slot value was previously available in some other
+ // register, change it now. Otherwise, make the register
+ // available in PhysReg.
+ Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
}
}
}
+ }
- // Process all of the spilled defs.
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI.getOperand(i);
- if (!(MO.isReg() && MO.getReg() && MO.isDef()))
- continue;
+ // Process all of the spilled defs.
+ SpilledMIRegs.clear();
+ for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI.getOperand(i);
+ if (!(MO.isReg() && MO.getReg() && MO.isDef()))
+ continue;
- unsigned VirtReg = MO.getReg();
- if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
- // Check to see if this is a noop copy. If so, eliminate the
- // instruction before considering the dest reg to be changed.
- // Also check if it's copying from an "undef", if so, we can't
- // eliminate this or else the undef marker is lost and it will
- // confuses the scavenger. This is extremely rare.
- unsigned Src, Dst, SrcSR, DstSR;
- if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst &&
- !MI.findRegisterUseOperand(Src)->isUndef()) {
- ++NumDCE;
- DEBUG(dbgs() << "Removing now-noop copy: " << MI);
- SmallVector<unsigned, 2> KillRegs;
- InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
- if (MO.isDead() && !KillRegs.empty()) {
- // Source register or an implicit super/sub-register use is killed.
- assert(KillRegs[0] == Dst ||
- TRI->isSubRegister(KillRegs[0], Dst) ||
- TRI->isSuperRegister(KillRegs[0], Dst));
- // Last def is now dead.
- TransferDeadness(&MBB, Dist, Src, RegKills, KillOps, VRM);
- }
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
- Erased = true;
- Spills.disallowClobberPhysReg(VirtReg);
- goto ProcessNextInst;
+ unsigned VirtReg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
+ // Check to see if this is a noop copy. If so, eliminate the
+ // instruction before considering the dest reg to be changed.
+ // Also check if it's copying from an "undef", if so, we can't
+ // eliminate this or else the undef marker is lost and it will
+ // confuses the scavenger. This is extremely rare.
+ if (MI.isIdentityCopy() && !MI.getOperand(1).isUndef() &&
+ MI.getNumOperands() == 2) {
+ ++NumDCE;
+ DEBUG(dbgs() << "Removing now-noop copy: " << MI);
+ SmallVector<unsigned, 2> KillRegs;
+ InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
+ if (MO.isDead() && !KillRegs.empty()) {
+ // Source register or an implicit super/sub-register use is killed.
+ assert(TRI->regsOverlap(KillRegs[0], MI.getOperand(0).getReg()));
+ // Last def is now dead.
+ TransferDeadness(MI.getOperand(1).getReg(), RegKills, KillOps);
}
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
+ Erased = true;
+ Spills.disallowClobberPhysReg(VirtReg);
+ goto ProcessNextInst;
+ }
- // If it's not a no-op copy, it clobbers the value in the destreg.
- Spills.ClobberPhysReg(VirtReg);
- ReusedOperands.markClobbered(VirtReg);
-
- // Check to see if this instruction is a load from a stack slot into
- // a register. If so, this provides the stack slot value in the reg.
- int FrameIdx;
- if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
- assert(DestReg == VirtReg && "Unknown load situation!");
-
- // If it is a folded reference, then it's not safe to clobber.
- bool Folded = FoldedSS.count(FrameIdx);
- // Otherwise, if it wasn't available, remember that it is now!
- Spills.addAvailable(FrameIdx, DestReg, !Folded);
- goto ProcessNextInst;
- }
-
- continue;
+ // If it's not a no-op copy, it clobbers the value in the destreg.
+ Spills.ClobberPhysReg(VirtReg);
+ ReusedOperands.markClobbered(VirtReg);
+
+ // Check to see if this instruction is a load from a stack slot into
+ // a register. If so, this provides the stack slot value in the reg.
+ int FrameIdx;
+ if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
+ assert(DestReg == VirtReg && "Unknown load situation!");
+
+ // If it is a folded reference, then it's not safe to clobber.
+ bool Folded = FoldedSS.count(FrameIdx);
+ // Otherwise, if it wasn't available, remember that it is now!
+ Spills.addAvailable(FrameIdx, DestReg, !Folded);
+ goto ProcessNextInst;
}
- unsigned SubIdx = MO.getSubReg();
- bool DoReMat = VRM.isReMaterialized(VirtReg);
- if (DoReMat)
- ReMatDefs.insert(&MI);
-
- // The only vregs left are stack slot definitions.
- int StackSlot = VRM.getStackSlot(VirtReg);
- const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
-
- // If this def is part of a two-address operand, make sure to execute
- // the store from the correct physical register.
- unsigned PhysReg;
- unsigned TiedOp;
- if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
- PhysReg = MI.getOperand(TiedOp).getReg();
- if (SubIdx) {
- unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
- assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
- "Can't find corresponding super-register!");
- PhysReg = SuperReg;
- }
- } else {
- PhysReg = VRM.getPhys(VirtReg);
- if (ReusedOperands.isClobbered(PhysReg)) {
- // Another def has taken the assigned physreg. It must have been a
- // use&def which got it due to reuse. Undo the reuse!
- PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
- Spills, MaybeDeadStores, RegKills, KillOps, VRM);
- }
+ continue;
+ }
+
+ unsigned SubIdx = MO.getSubReg();
+ bool DoReMat = VRM->isReMaterialized(VirtReg);
+ if (DoReMat)
+ ReMatDefs.insert(&MI);
+
+ // The only vregs left are stack slot definitions.
+ int StackSlot = VRM->getStackSlot(VirtReg);
+ const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
+
+ // If this def is part of a two-address operand, make sure to execute
+ // the store from the correct physical register.
+ unsigned PhysReg;
+ unsigned TiedOp;
+ if (MI.isRegTiedToUseOperand(i, &TiedOp)) {
+ PhysReg = MI.getOperand(TiedOp).getReg();
+ if (SubIdx) {
+ unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
+ assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
+ "Can't find corresponding super-register!");
+ PhysReg = SuperReg;
+ }
+ } else {
+ PhysReg = VRM->getPhys(VirtReg);
+ if (ReusedOperands.isClobbered(PhysReg)) {
+ // Another def has taken the assigned physreg. It must have been a
+ // use&def which got it due to reuse. Undo the reuse!
+ PhysReg = ReusedOperands.GetRegForReload(VirtReg, PhysReg, &MI,
+ Spills, MaybeDeadStores, RegKills, KillOps, *VRM);
}
+ }
- assert(PhysReg && "VR not assigned a physical register?");
- RegInfo->setPhysRegUsed(PhysReg);
- unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
- ReusedOperands.markClobbered(RReg);
- MI.getOperand(i).setReg(RReg);
- MI.getOperand(i).setSubReg(0);
+ assert(PhysReg && "VR not assigned a physical register?");
+ MRI->setPhysRegUsed(PhysReg);
+ unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
+ ReusedOperands.markClobbered(RReg);
+ MI.getOperand(i).setReg(RReg);
+ MI.getOperand(i).setSubReg(0);
+
+ if (!MO.isDead() && SpilledMIRegs.insert(VirtReg)) {
+ MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
+ SpillRegToStackSlot(MII, -1, PhysReg, StackSlot, RC, true,
+ LastStore, Spills, ReMatDefs, RegKills, KillOps);
+ NextMII = llvm::next(MII);
- if (!MO.isDead()) {
- MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
- SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
- LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
- NextMII = llvm::next(MII);
-
- // Check to see if this is a noop copy. If so, eliminate the
- // instruction before considering the dest reg to be changed.
- {
- unsigned Src, Dst, SrcSR, DstSR;
- if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
- ++NumDCE;
- DEBUG(dbgs() << "Removing now-noop copy: " << MI);
- InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
- Erased = true;
- UpdateKills(*LastStore, TRI, RegKills, KillOps);
- goto ProcessNextInst;
- }
- }
- }
+ // Check to see if this is a noop copy. If so, eliminate the
+ // instruction before considering the dest reg to be changed.
+ if (MI.isIdentityCopy()) {
+ ++NumDCE;
+ DEBUG(dbgs() << "Removing now-noop copy: " << MI);
+ InvalidateKills(MI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
+ Erased = true;
+ UpdateKills(*LastStore, TRI, RegKills, KillOps);
+ goto ProcessNextInst;
+ }
}
+ }
ProcessNextInst:
- // Delete dead instructions without side effects.
- if (!Erased && !BackTracked && isSafeToDelete(MI)) {
- InvalidateKills(MI, TRI, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
- Erased = true;
- }
- if (!Erased)
- DistanceMap.insert(std::make_pair(&MI, Dist++));
- if (!Erased && !BackTracked) {
- for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
- UpdateKills(*II, TRI, RegKills, KillOps);
- }
- MII = NextMII;
+ // Delete dead instructions without side effects.
+ if (!Erased && !BackTracked && isSafeToDelete(MI)) {
+ InvalidateKills(MI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
+ Erased = true;
}
-
+ if (!Erased)
+ DistanceMap.insert(std::make_pair(&MI, DistanceMap.size()));
+ if (!Erased && !BackTracked) {
+ for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
+ UpdateKills(*II, TRI, RegKills, KillOps);
+ }
+ MII = NextMII;
}
-};
-
}
llvm::VirtRegRewriter* llvm::createVirtRegRewriter() {
diff --git a/libclamav/c++/llvm/lib/CodeGen/VirtRegRewriter.h b/libclamav/c++/llvm/lib/CodeGen/VirtRegRewriter.h
index 44f9df6..93474e0 100644
--- a/libclamav/c++/llvm/lib/CodeGen/VirtRegRewriter.h
+++ b/libclamav/c++/llvm/lib/CodeGen/VirtRegRewriter.h
@@ -10,11 +10,10 @@
#ifndef LLVM_CODEGEN_VIRTREGREWRITER_H
#define LLVM_CODEGEN_VIRTREGREWRITER_H
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "VirtRegMap.h"
-
namespace llvm {
+ class LiveIntervals;
+ class MachineFunction;
+ class VirtRegMap;
/// VirtRegRewriter interface: Implementations of this interface assign
/// spilled virtual registers to stack slots, rewriting the code.
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
index b303510..66b8ee3 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -66,10 +66,39 @@ ExecutionEngine::~ExecutionEngine() {
delete Modules[i];
}
+namespace {
+// This class automatically deletes the memory block when the GlobalVariable is
+// destroyed.
+class GVMemoryBlock : public CallbackVH {
+ GVMemoryBlock(const GlobalVariable *GV)
+ : CallbackVH(const_cast<GlobalVariable*>(GV)) {}
+
+public:
+ // Returns the address the GlobalVariable should be written into. The
+ // GVMemoryBlock object prefixes that.
+ static char *Create(const GlobalVariable *GV, const TargetData& TD) {
+ const Type *ElTy = GV->getType()->getElementType();
+ size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
+ void *RawMemory = ::operator new(
+ TargetData::RoundUpAlignment(sizeof(GVMemoryBlock),
+ TD.getPreferredAlignment(GV))
+ + GVSize);
+ new(RawMemory) GVMemoryBlock(GV);
+ return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
+ }
+
+ virtual void deleted() {
+ // We allocated with operator new and with some extra memory hanging off the
+ // end, so don't just delete this. I'm not sure if this is actually
+ // required.
+ this->~GVMemoryBlock();
+ ::operator delete(this);
+ }
+};
+} // anonymous namespace
+
char* ExecutionEngine::getMemoryForGV(const GlobalVariable* GV) {
- const Type *ElTy = GV->getType()->getElementType();
- size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
- return new char[GVSize];
+ return GVMemoryBlock::Create(GV, *getTargetData());
}
/// removeModule - Remove a Module from the list of modules.
@@ -221,35 +250,55 @@ const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
return I != EEState.getGlobalAddressReverseMap(locked).end() ? I->second : 0;
}
-// CreateArgv - Turn a vector of strings into a nice argv style array of
-// pointers to null terminated strings.
-//
-static void *CreateArgv(LLVMContext &C, ExecutionEngine *EE,
- const std::vector<std::string> &InputArgv) {
+namespace {
+class ArgvArray {
+ char *Array;
+ std::vector<char*> Values;
+public:
+ ArgvArray() : Array(NULL) {}
+ ~ArgvArray() { clear(); }
+ void clear() {
+ delete[] Array;
+ Array = NULL;
+ for (size_t I = 0, E = Values.size(); I != E; ++I) {
+ delete[] Values[I];
+ }
+ Values.clear();
+ }
+ /// Turn a vector of strings into a nice argv style array of pointers to null
+ /// terminated strings.
+ void *reset(LLVMContext &C, ExecutionEngine *EE,
+ const std::vector<std::string> &InputArgv);
+};
+} // anonymous namespace
+void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
+ const std::vector<std::string> &InputArgv) {
+ clear(); // Free the old contents.
unsigned PtrSize = EE->getTargetData()->getPointerSize();
- char *Result = new char[(InputArgv.size()+1)*PtrSize];
+ Array = new char[(InputArgv.size()+1)*PtrSize];
- DEBUG(dbgs() << "JIT: ARGV = " << (void*)Result << "\n");
+ DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n");
const Type *SBytePtr = Type::getInt8PtrTy(C);
for (unsigned i = 0; i != InputArgv.size(); ++i) {
unsigned Size = InputArgv[i].size()+1;
char *Dest = new char[Size];
+ Values.push_back(Dest);
DEBUG(dbgs() << "JIT: ARGV[" << i << "] = " << (void*)Dest << "\n");
std::copy(InputArgv[i].begin(), InputArgv[i].end(), Dest);
Dest[Size-1] = 0;
- // Endian safe: Result[i] = (PointerTy)Dest;
- EE->StoreValueToMemory(PTOGV(Dest), (GenericValue*)(Result+i*PtrSize),
+ // Endian safe: Array[i] = (PointerTy)Dest;
+ EE->StoreValueToMemory(PTOGV(Dest), (GenericValue*)(Array+i*PtrSize),
SBytePtr);
}
// Null terminate it
EE->StoreValueToMemory(PTOGV(0),
- (GenericValue*)(Result+InputArgv.size()*PtrSize),
+ (GenericValue*)(Array+InputArgv.size()*PtrSize),
SBytePtr);
- return Result;
+ return Array;
}
@@ -330,34 +379,36 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn,
switch (NumArgs) {
case 3:
if (FTy->getParamType(2) != PPInt8Ty) {
- llvm_report_error("Invalid type for third argument of main() supplied");
+ report_fatal_error("Invalid type for third argument of main() supplied");
}
// FALLS THROUGH
case 2:
if (FTy->getParamType(1) != PPInt8Ty) {
- llvm_report_error("Invalid type for second argument of main() supplied");
+ report_fatal_error("Invalid type for second argument of main() supplied");
}
// FALLS THROUGH
case 1:
if (!FTy->getParamType(0)->isIntegerTy(32)) {
- llvm_report_error("Invalid type for first argument of main() supplied");
+ report_fatal_error("Invalid type for first argument of main() supplied");
}
// FALLS THROUGH
case 0:
if (!FTy->getReturnType()->isIntegerTy() &&
!FTy->getReturnType()->isVoidTy()) {
- llvm_report_error("Invalid return type of main() supplied");
+ report_fatal_error("Invalid return type of main() supplied");
}
break;
default:
- llvm_report_error("Invalid number of arguments of main() supplied");
+ report_fatal_error("Invalid number of arguments of main() supplied");
}
+ ArgvArray CArgv;
+ ArgvArray CEnv;
if (NumArgs) {
GVArgs.push_back(GVArgc); // Arg #0 = argc.
if (NumArgs > 1) {
// Arg #1 = argv.
- GVArgs.push_back(PTOGV(CreateArgv(Fn->getContext(), this, argv)));
+ GVArgs.push_back(PTOGV(CArgv.reset(Fn->getContext(), this, argv)));
assert(!isTargetNullPtr(this, GVTOP(GVArgs[1])) &&
"argv[0] was null after CreateArgv");
if (NumArgs > 2) {
@@ -365,7 +416,7 @@ int ExecutionEngine::runFunctionAsMain(Function *Fn,
for (unsigned i = 0; envp[i]; ++i)
EnvVars.push_back(envp[i]);
// Arg #2 = envp.
- GVArgs.push_back(PTOGV(CreateArgv(Fn->getContext(), this, EnvVars)));
+ GVArgs.push_back(PTOGV(CEnv.reset(Fn->getContext(), this, EnvVars)));
}
}
}
@@ -665,7 +716,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
case Instruction::FDiv:
GV.FloatVal = LHS.FloatVal / RHS.FloatVal; break;
case Instruction::FRem:
- GV.FloatVal = ::fmodf(LHS.FloatVal,RHS.FloatVal); break;
+ GV.FloatVal = std::fmod(LHS.FloatVal,RHS.FloatVal); break;
}
break;
case Type::DoubleTyID:
@@ -680,7 +731,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
case Instruction::FDiv:
GV.DoubleVal = LHS.DoubleVal / RHS.DoubleVal; break;
case Instruction::FRem:
- GV.DoubleVal = ::fmod(LHS.DoubleVal,RHS.DoubleVal); break;
+ GV.DoubleVal = std::fmod(LHS.DoubleVal,RHS.DoubleVal); break;
}
break;
case Type::X86_FP80TyID:
@@ -721,7 +772,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "ConstantExpr not handled: " << *CE;
- llvm_report_error(Msg.str());
+ report_fatal_error(Msg.str());
}
GenericValue Result;
@@ -757,7 +808,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "ERROR: Constant unimplemented for type: " << *C->getType();
- llvm_report_error(Msg.str());
+ report_fatal_error(Msg.str());
}
return Result;
}
@@ -885,7 +936,7 @@ void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
std::string msg;
raw_string_ostream Msg(msg);
Msg << "Cannot load value of type " << *Ty << "!";
- llvm_report_error(Msg.str());
+ report_fatal_error(Msg.str());
}
}
@@ -1001,7 +1052,7 @@ void ExecutionEngine::emitGlobals() {
sys::DynamicLibrary::SearchForAddressOfSymbol(I->getName()))
addGlobalMapping(I, SymAddr);
else {
- llvm_report_error("Could not resolve external global address: "
+ report_fatal_error("Could not resolve external global address: "
+I->getName());
}
}
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
index c7495d4..f8f1f4a 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -236,6 +236,10 @@ LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,
return 1;
}
+void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE, LLVMValueRef Fn) {
+ return unwrap(EE)->recompileAndRelinkFunction(unwrap<Function>(Fn));
+}
+
LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
return wrap(unwrap(EE)->getTargetData());
}
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/CMakeLists.txt b/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/CMakeLists.txt
deleted file mode 100644
index dff97fa..0000000
--- a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-add_llvm_library(LLVMInterpreter
- Execution.cpp
- ExternalFunctions.cpp
- Interpreter.cpp
- )
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
deleted file mode 100644
index a2aad5a..0000000
--- a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ /dev/null
@@ -1,1352 +0,0 @@
-//===-- Execution.cpp - Implement code to simulate the program ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the actual instruction interpreter.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "interpreter"
-#include "Interpreter.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Instructions.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include <algorithm>
-#include <cmath>
-using namespace llvm;
-
-STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
-
-static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
- cl::desc("make the interpreter print every volatile load and store"));
-
-//===----------------------------------------------------------------------===//
-// Various Helper Functions
-//===----------------------------------------------------------------------===//
-
-static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
- SF.Values[V] = Val;
-}
-
-//===----------------------------------------------------------------------===//
-// Binary Instruction Implementations
-//===----------------------------------------------------------------------===//
-
-#define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
- case Type::TY##TyID: \
- Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
- break
-
-static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
- switch (Ty->getTypeID()) {
- IMPLEMENT_BINARY_OPERATOR(+, Float);
- IMPLEMENT_BINARY_OPERATOR(+, Double);
- default:
- dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
-}
-
-static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
- switch (Ty->getTypeID()) {
- IMPLEMENT_BINARY_OPERATOR(-, Float);
- IMPLEMENT_BINARY_OPERATOR(-, Double);
- default:
- dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
-}
-
-static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
- switch (Ty->getTypeID()) {
- IMPLEMENT_BINARY_OPERATOR(*, Float);
- IMPLEMENT_BINARY_OPERATOR(*, Double);
- default:
- dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
-}
-
-static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
- switch (Ty->getTypeID()) {
- IMPLEMENT_BINARY_OPERATOR(/, Float);
- IMPLEMENT_BINARY_OPERATOR(/, Double);
- default:
- dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
-}
-
-static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
- switch (Ty->getTypeID()) {
- case Type::FloatTyID:
- Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
- break;
- case Type::DoubleTyID:
- Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
- break;
- default:
- dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
-}
-
-#define IMPLEMENT_INTEGER_ICMP(OP, TY) \
- case Type::IntegerTyID: \
- Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
- break;
-
-// Handle pointers specially because they must be compared with only as much
-// width as the host has. We _do not_ want to be comparing 64 bit values when
-// running on a 32-bit target, otherwise the upper 32 bits might mess up
-// comparisons if they contain garbage.
-#define IMPLEMENT_POINTER_ICMP(OP) \
- case Type::PointerTyID: \
- Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
- (void*)(intptr_t)Src2.PointerVal); \
- break;
-
-static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(eq,Ty);
- IMPLEMENT_POINTER_ICMP(==);
- default:
- dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(ne,Ty);
- IMPLEMENT_POINTER_ICMP(!=);
- default:
- dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(ult,Ty);
- IMPLEMENT_POINTER_ICMP(<);
- default:
- dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(slt,Ty);
- IMPLEMENT_POINTER_ICMP(<);
- default:
- dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(ugt,Ty);
- IMPLEMENT_POINTER_ICMP(>);
- default:
- dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(sgt,Ty);
- IMPLEMENT_POINTER_ICMP(>);
- default:
- dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(ule,Ty);
- IMPLEMENT_POINTER_ICMP(<=);
- default:
- dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(sle,Ty);
- IMPLEMENT_POINTER_ICMP(<=);
- default:
- dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(uge,Ty);
- IMPLEMENT_POINTER_ICMP(>=);
- default:
- dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_INTEGER_ICMP(sge,Ty);
- IMPLEMENT_POINTER_ICMP(>=);
- default:
- dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-void Interpreter::visitICmpInst(ICmpInst &I) {
- ExecutionContext &SF = ECStack.back();
- const Type *Ty = I.getOperand(0)->getType();
- GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
- GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
- GenericValue R; // Result
-
- switch (I.getPredicate()) {
- case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
- case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
- case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
- case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
- case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
- case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
- case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
- case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
- case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
- case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
- default:
- dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
- llvm_unreachable(0);
- }
-
- SetValue(&I, R, SF);
-}
-
-#define IMPLEMENT_FCMP(OP, TY) \
- case Type::TY##TyID: \
- Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
- break
-
-static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_FCMP(==, Float);
- IMPLEMENT_FCMP(==, Double);
- default:
- dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_FCMP(!=, Float);
- IMPLEMENT_FCMP(!=, Double);
-
- default:
- dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_FCMP(<=, Float);
- IMPLEMENT_FCMP(<=, Double);
- default:
- dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_FCMP(>=, Float);
- IMPLEMENT_FCMP(>=, Double);
- default:
- dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_FCMP(<, Float);
- IMPLEMENT_FCMP(<, Double);
- default:
- dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- switch (Ty->getTypeID()) {
- IMPLEMENT_FCMP(>, Float);
- IMPLEMENT_FCMP(>, Double);
- default:
- dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
- return Dest;
-}
-
-#define IMPLEMENT_UNORDERED(TY, X,Y) \
- if (TY->isFloatTy()) { \
- if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
- Dest.IntVal = APInt(1,true); \
- return Dest; \
- } \
- } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
- Dest.IntVal = APInt(1,true); \
- return Dest; \
- }
-
-
-static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- IMPLEMENT_UNORDERED(Ty, Src1, Src2)
- return executeFCMP_OEQ(Src1, Src2, Ty);
-}
-
-static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- IMPLEMENT_UNORDERED(Ty, Src1, Src2)
- return executeFCMP_ONE(Src1, Src2, Ty);
-}
-
-static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- IMPLEMENT_UNORDERED(Ty, Src1, Src2)
- return executeFCMP_OLE(Src1, Src2, Ty);
-}
-
-static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- IMPLEMENT_UNORDERED(Ty, Src1, Src2)
- return executeFCMP_OGE(Src1, Src2, Ty);
-}
-
-static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- IMPLEMENT_UNORDERED(Ty, Src1, Src2)
- return executeFCMP_OLT(Src1, Src2, Ty);
-}
-
-static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- IMPLEMENT_UNORDERED(Ty, Src1, Src2)
- return executeFCMP_OGT(Src1, Src2, Ty);
-}
-
-static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- if (Ty->isFloatTy())
- Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
- Src2.FloatVal == Src2.FloatVal));
- else
- Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
- Src2.DoubleVal == Src2.DoubleVal));
- return Dest;
-}
-
-static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
- const Type *Ty) {
- GenericValue Dest;
- if (Ty->isFloatTy())
- Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
- Src2.FloatVal != Src2.FloatVal));
- else
- Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
- Src2.DoubleVal != Src2.DoubleVal));
- return Dest;
-}
-
-void Interpreter::visitFCmpInst(FCmpInst &I) {
- ExecutionContext &SF = ECStack.back();
- const Type *Ty = I.getOperand(0)->getType();
- GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
- GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
- GenericValue R; // Result
-
- switch (I.getPredicate()) {
- case FCmpInst::FCMP_FALSE: R.IntVal = APInt(1,false); break;
- case FCmpInst::FCMP_TRUE: R.IntVal = APInt(1,true); break;
- case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
- case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
- default:
- dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
- llvm_unreachable(0);
- }
-
- SetValue(&I, R, SF);
-}
-
-static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
- GenericValue Src2, const Type *Ty) {
- GenericValue Result;
- switch (predicate) {
- case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
- case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
- case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
- case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
- case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
- case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
- case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
- case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
- case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
- case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
- case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
- case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
- case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
- case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
- case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
- case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
- case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
- case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
- case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
- case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
- case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
- case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
- case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
- case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
- case FCmpInst::FCMP_FALSE: {
- GenericValue Result;
- Result.IntVal = APInt(1, false);
- return Result;
- }
- case FCmpInst::FCMP_TRUE: {
- GenericValue Result;
- Result.IntVal = APInt(1, true);
- return Result;
- }
- default:
- dbgs() << "Unhandled Cmp predicate\n";
- llvm_unreachable(0);
- }
-}
-
-void Interpreter::visitBinaryOperator(BinaryOperator &I) {
- ExecutionContext &SF = ECStack.back();
- const Type *Ty = I.getOperand(0)->getType();
- GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
- GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
- GenericValue R; // Result
-
- switch (I.getOpcode()) {
- case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
- case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
- case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
- case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
- case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
- case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
- case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
- case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
- case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
- case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
- case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
- case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
- case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
- case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
- case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
- default:
- dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
- llvm_unreachable(0);
- }
-
- SetValue(&I, R, SF);
-}
-
-static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
- GenericValue Src3) {
- return Src1.IntVal == 0 ? Src3 : Src2;
-}
-
-void Interpreter::visitSelectInst(SelectInst &I) {
- ExecutionContext &SF = ECStack.back();
- GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
- GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
- GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
- GenericValue R = executeSelectInst(Src1, Src2, Src3);
- SetValue(&I, R, SF);
-}
-
-
-//===----------------------------------------------------------------------===//
-// Terminator Instruction Implementations
-//===----------------------------------------------------------------------===//
-
-void Interpreter::exitCalled(GenericValue GV) {
- // runAtExitHandlers() assumes there are no stack frames, but
- // if exit() was called, then it had a stack frame. Blow away
- // the stack before interpreting atexit handlers.
- ECStack.clear();
- runAtExitHandlers();
- exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
-}
-
-/// Pop the last stack frame off of ECStack and then copy the result
-/// back into the result variable if we are not returning void. The
-/// result variable may be the ExitValue, or the Value of the calling
-/// CallInst if there was a previous stack frame. This method may
-/// invalidate any ECStack iterators you have. This method also takes
-/// care of switching to the normal destination BB, if we are returning
-/// from an invoke.
-///
-void Interpreter::popStackAndReturnValueToCaller(const Type *RetTy,
- GenericValue Result) {
- // Pop the current stack frame.
- ECStack.pop_back();
-
- if (ECStack.empty()) { // Finished main. Put result into exit code...
- if (RetTy && RetTy->isIntegerTy()) { // Nonvoid return type?
- ExitValue = Result; // Capture the exit value of the program
- } else {
- memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
- }
- } else {
- // If we have a previous stack frame, and we have a previous call,
- // fill in the return value...
- ExecutionContext &CallingSF = ECStack.back();
- if (Instruction *I = CallingSF.Caller.getInstruction()) {
- // Save result...
- if (!CallingSF.Caller.getType()->isVoidTy())
- SetValue(I, Result, CallingSF);
- if (InvokeInst *II = dyn_cast<InvokeInst> (I))
- SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
- CallingSF.Caller = CallSite(); // We returned from the call...
- }
- }
-}
-
-void Interpreter::visitReturnInst(ReturnInst &I) {
- ExecutionContext &SF = ECStack.back();
- const Type *RetTy = Type::getVoidTy(I.getContext());
- GenericValue Result;
-
- // Save away the return value... (if we are not 'ret void')
- if (I.getNumOperands()) {
- RetTy = I.getReturnValue()->getType();
- Result = getOperandValue(I.getReturnValue(), SF);
- }
-
- popStackAndReturnValueToCaller(RetTy, Result);
-}
-
-void Interpreter::visitUnwindInst(UnwindInst &I) {
- // Unwind stack
- Instruction *Inst;
- do {
- ECStack.pop_back();
- if (ECStack.empty())
- llvm_report_error("Empty stack during unwind!");
- Inst = ECStack.back().Caller.getInstruction();
- } while (!(Inst && isa<InvokeInst>(Inst)));
-
- // Return from invoke
- ExecutionContext &InvokingSF = ECStack.back();
- InvokingSF.Caller = CallSite();
-
- // Go to exceptional destination BB of invoke instruction
- SwitchToNewBasicBlock(cast<InvokeInst>(Inst)->getUnwindDest(), InvokingSF);
-}
-
-void Interpreter::visitUnreachableInst(UnreachableInst &I) {
- llvm_report_error("Program executed an 'unreachable' instruction!");
-}
-
-void Interpreter::visitBranchInst(BranchInst &I) {
- ExecutionContext &SF = ECStack.back();
- BasicBlock *Dest;
-
- Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
- if (!I.isUnconditional()) {
- Value *Cond = I.getCondition();
- if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
- Dest = I.getSuccessor(1);
- }
- SwitchToNewBasicBlock(Dest, SF);
-}
-
-void Interpreter::visitSwitchInst(SwitchInst &I) {
- ExecutionContext &SF = ECStack.back();
- GenericValue CondVal = getOperandValue(I.getOperand(0), SF);
- const Type *ElTy = I.getOperand(0)->getType();
-
- // Check to see if any of the cases match...
- BasicBlock *Dest = 0;
- for (unsigned i = 2, e = I.getNumOperands(); i != e; i += 2)
- if (executeICMP_EQ(CondVal, getOperandValue(I.getOperand(i), SF), ElTy)
- .IntVal != 0) {
- Dest = cast<BasicBlock>(I.getOperand(i+1));
- break;
- }
-
- if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
- SwitchToNewBasicBlock(Dest, SF);
-}
-
-void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
- ExecutionContext &SF = ECStack.back();
- void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
- SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
-}
-
-
-// SwitchToNewBasicBlock - This method is used to jump to a new basic block.
-// This function handles the actual updating of block and instruction iterators
-// as well as execution of all of the PHI nodes in the destination block.
-//
-// This method does this because all of the PHI nodes must be executed
-// atomically, reading their inputs before any of the results are updated. Not
-// doing this can cause problems if the PHI nodes depend on other PHI nodes for
-// their inputs. If the input PHI node is updated before it is read, incorrect
-// results can happen. Thus we use a two phase approach.
-//
-void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
- BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
- SF.CurBB = Dest; // Update CurBB to branch destination
- SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
-
- if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
-
- // Loop over all of the PHI nodes in the current block, reading their inputs.
- std::vector<GenericValue> ResultValues;
-
- for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
- // Search for the value corresponding to this previous bb...
- int i = PN->getBasicBlockIndex(PrevBB);
- assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
- Value *IncomingValue = PN->getIncomingValue(i);
-
- // Save the incoming value for this PHI node...
- ResultValues.push_back(getOperandValue(IncomingValue, SF));
- }
-
- // Now loop over all of the PHI nodes setting their values...
- SF.CurInst = SF.CurBB->begin();
- for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
- PHINode *PN = cast<PHINode>(SF.CurInst);
- SetValue(PN, ResultValues[i], SF);
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Memory Instruction Implementations
-//===----------------------------------------------------------------------===//
-
-void Interpreter::visitAllocaInst(AllocaInst &I) {
- ExecutionContext &SF = ECStack.back();
-
- const Type *Ty = I.getType()->getElementType(); // Type to be allocated
-
- // Get the number of elements being allocated by the array...
- unsigned NumElements =
- getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
-
- unsigned TypeSize = (size_t)TD.getTypeAllocSize(Ty);
-
- // Avoid malloc-ing zero bytes, use max()...
- unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
-
- // Allocate enough memory to hold the type...
- void *Memory = malloc(MemToAlloc);
-
- DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x "
- << NumElements << " (Total: " << MemToAlloc << ") at "
- << uintptr_t(Memory) << '\n');
-
- GenericValue Result = PTOGV(Memory);
- assert(Result.PointerVal != 0 && "Null pointer returned by malloc!");
- SetValue(&I, Result, SF);
-
- if (I.getOpcode() == Instruction::Alloca)
- ECStack.back().Allocas.add(Memory);
-}
-
-// getElementOffset - The workhorse for getelementptr.
-//
-GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
- gep_type_iterator E,
- ExecutionContext &SF) {
- assert(Ptr->getType()->isPointerTy() &&
- "Cannot getElementOffset of a nonpointer type!");
-
- uint64_t Total = 0;
-
- for (; I != E; ++I) {
- if (const StructType *STy = dyn_cast<StructType>(*I)) {
- const StructLayout *SLO = TD.getStructLayout(STy);
-
- const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
- unsigned Index = unsigned(CPU->getZExtValue());
-
- Total += SLO->getElementOffset(Index);
- } else {
- const SequentialType *ST = cast<SequentialType>(*I);
- // Get the index number for the array... which must be long type...
- GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
-
- int64_t Idx;
- unsigned BitWidth =
- cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
- if (BitWidth == 32)
- Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
- else {
- assert(BitWidth == 64 && "Invalid index type for getelementptr");
- Idx = (int64_t)IdxGV.IntVal.getZExtValue();
- }
- Total += TD.getTypeAllocSize(ST->getElementType())*Idx;
- }
- }
-
- GenericValue Result;
- Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
- DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
- return Result;
-}
-
-void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeGEPOperation(I.getPointerOperand(),
- gep_type_begin(I), gep_type_end(I), SF), SF);
-}
-
-void Interpreter::visitLoadInst(LoadInst &I) {
- ExecutionContext &SF = ECStack.back();
- GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
- GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
- GenericValue Result;
- LoadValueFromMemory(Result, Ptr, I.getType());
- SetValue(&I, Result, SF);
- if (I.isVolatile() && PrintVolatile)
- dbgs() << "Volatile load " << I;
-}
-
-void Interpreter::visitStoreInst(StoreInst &I) {
- ExecutionContext &SF = ECStack.back();
- GenericValue Val = getOperandValue(I.getOperand(0), SF);
- GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
- StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
- I.getOperand(0)->getType());
- if (I.isVolatile() && PrintVolatile)
- dbgs() << "Volatile store: " << I;
-}
-
-//===----------------------------------------------------------------------===//
-// Miscellaneous Instruction Implementations
-//===----------------------------------------------------------------------===//
-
-void Interpreter::visitCallSite(CallSite CS) {
- ExecutionContext &SF = ECStack.back();
-
- // Check to see if this is an intrinsic function call...
- Function *F = CS.getCalledFunction();
- if (F && F->isDeclaration())
- switch (F->getIntrinsicID()) {
- case Intrinsic::not_intrinsic:
- break;
- case Intrinsic::vastart: { // va_start
- GenericValue ArgIndex;
- ArgIndex.UIntPairVal.first = ECStack.size() - 1;
- ArgIndex.UIntPairVal.second = 0;
- SetValue(CS.getInstruction(), ArgIndex, SF);
- return;
- }
- case Intrinsic::vaend: // va_end is a noop for the interpreter
- return;
- case Intrinsic::vacopy: // va_copy: dest = src
- SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
- return;
- default:
- // If it is an unknown intrinsic function, use the intrinsic lowering
- // class to transform it into hopefully tasty LLVM code.
- //
- BasicBlock::iterator me(CS.getInstruction());
- BasicBlock *Parent = CS.getInstruction()->getParent();
- bool atBegin(Parent->begin() == me);
- if (!atBegin)
- --me;
- IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
-
- // Restore the CurInst pointer to the first instruction newly inserted, if
- // any.
- if (atBegin) {
- SF.CurInst = Parent->begin();
- } else {
- SF.CurInst = me;
- ++SF.CurInst;
- }
- return;
- }
-
-
- SF.Caller = CS;
- std::vector<GenericValue> ArgVals;
- const unsigned NumArgs = SF.Caller.arg_size();
- ArgVals.reserve(NumArgs);
- uint16_t pNum = 1;
- for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
- e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
- Value *V = *i;
- ArgVals.push_back(getOperandValue(V, SF));
- }
-
- // To handle indirect calls, we must get the pointer value from the argument
- // and treat it as a function pointer.
- GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
- callFunction((Function*)GVTOP(SRC), ArgVals);
-}
-
-void Interpreter::visitShl(BinaryOperator &I) {
- ExecutionContext &SF = ECStack.back();
- GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
- GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
- GenericValue Dest;
- if (Src2.IntVal.getZExtValue() < Src1.IntVal.getBitWidth())
- Dest.IntVal = Src1.IntVal.shl(Src2.IntVal.getZExtValue());
- else
- Dest.IntVal = Src1.IntVal;
-
- SetValue(&I, Dest, SF);
-}
-
-void Interpreter::visitLShr(BinaryOperator &I) {
- ExecutionContext &SF = ECStack.back();
- GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
- GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
- GenericValue Dest;
- if (Src2.IntVal.getZExtValue() < Src1.IntVal.getBitWidth())
- Dest.IntVal = Src1.IntVal.lshr(Src2.IntVal.getZExtValue());
- else
- Dest.IntVal = Src1.IntVal;
-
- SetValue(&I, Dest, SF);
-}
-
-void Interpreter::visitAShr(BinaryOperator &I) {
- ExecutionContext &SF = ECStack.back();
- GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
- GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
- GenericValue Dest;
- if (Src2.IntVal.getZExtValue() < Src1.IntVal.getBitWidth())
- Dest.IntVal = Src1.IntVal.ashr(Src2.IntVal.getZExtValue());
- else
- Dest.IntVal = Src1.IntVal;
-
- SetValue(&I, Dest, SF);
-}
-
-GenericValue Interpreter::executeTruncInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- const IntegerType *DITy = cast<IntegerType>(DstTy);
- unsigned DBitWidth = DITy->getBitWidth();
- Dest.IntVal = Src.IntVal.trunc(DBitWidth);
- return Dest;
-}
-
-GenericValue Interpreter::executeSExtInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- const IntegerType *DITy = cast<IntegerType>(DstTy);
- unsigned DBitWidth = DITy->getBitWidth();
- Dest.IntVal = Src.IntVal.sext(DBitWidth);
- return Dest;
-}
-
-GenericValue Interpreter::executeZExtInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- const IntegerType *DITy = cast<IntegerType>(DstTy);
- unsigned DBitWidth = DITy->getBitWidth();
- Dest.IntVal = Src.IntVal.zext(DBitWidth);
- return Dest;
-}
-
-GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
- "Invalid FPTrunc instruction");
- Dest.FloatVal = (float) Src.DoubleVal;
- return Dest;
-}
-
-GenericValue Interpreter::executeFPExtInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
- "Invalid FPTrunc instruction");
- Dest.DoubleVal = (double) Src.FloatVal;
- return Dest;
-}
-
-GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- const Type *SrcTy = SrcVal->getType();
- uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
-
- if (SrcTy->getTypeID() == Type::FloatTyID)
- Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
- else
- Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
- return Dest;
-}
-
-GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- const Type *SrcTy = SrcVal->getType();
- uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
-
- if (SrcTy->getTypeID() == Type::FloatTyID)
- Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
- else
- Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
- return Dest;
-}
-
-GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
-
- if (DstTy->getTypeID() == Type::FloatTyID)
- Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
- else
- Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
- return Dest;
-}
-
-GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
-
- if (DstTy->getTypeID() == Type::FloatTyID)
- Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
- else
- Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
- return Dest;
-
-}
-
-GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
-
- Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
- return Dest;
-}
-
-GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
-
- uint32_t PtrSize = TD.getPointerSizeInBits();
- if (PtrSize != Src.IntVal.getBitWidth())
- Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
-
- Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
- return Dest;
-}
-
-GenericValue Interpreter::executeBitCastInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF) {
-
- const Type *SrcTy = SrcVal->getType();
- GenericValue Dest, Src = getOperandValue(SrcVal, SF);
- if (DstTy->isPointerTy()) {
- assert(SrcTy->isPointerTy() && "Invalid BitCast");
- Dest.PointerVal = Src.PointerVal;
- } else if (DstTy->isIntegerTy()) {
- if (SrcTy->isFloatTy()) {
- Dest.IntVal.zext(sizeof(Src.FloatVal) * CHAR_BIT);
- Dest.IntVal.floatToBits(Src.FloatVal);
- } else if (SrcTy->isDoubleTy()) {
- Dest.IntVal.zext(sizeof(Src.DoubleVal) * CHAR_BIT);
- Dest.IntVal.doubleToBits(Src.DoubleVal);
- } else if (SrcTy->isIntegerTy()) {
- Dest.IntVal = Src.IntVal;
- } else
- llvm_unreachable("Invalid BitCast");
- } else if (DstTy->isFloatTy()) {
- if (SrcTy->isIntegerTy())
- Dest.FloatVal = Src.IntVal.bitsToFloat();
- else
- Dest.FloatVal = Src.FloatVal;
- } else if (DstTy->isDoubleTy()) {
- if (SrcTy->isIntegerTy())
- Dest.DoubleVal = Src.IntVal.bitsToDouble();
- else
- Dest.DoubleVal = Src.DoubleVal;
- } else
- llvm_unreachable("Invalid Bitcast");
-
- return Dest;
-}
-
-void Interpreter::visitTruncInst(TruncInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitSExtInst(SExtInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitZExtInst(ZExtInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitFPTruncInst(FPTruncInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitFPExtInst(FPExtInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitUIToFPInst(UIToFPInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitSIToFPInst(SIToFPInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitFPToUIInst(FPToUIInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitFPToSIInst(FPToSIInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-void Interpreter::visitBitCastInst(BitCastInst &I) {
- ExecutionContext &SF = ECStack.back();
- SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
-}
-
-#define IMPLEMENT_VAARG(TY) \
- case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
-
-void Interpreter::visitVAArgInst(VAArgInst &I) {
- ExecutionContext &SF = ECStack.back();
-
- // Get the incoming valist parameter. LLI treats the valist as a
- // (ec-stack-depth var-arg-index) pair.
- GenericValue VAList = getOperandValue(I.getOperand(0), SF);
- GenericValue Dest;
- GenericValue Src = ECStack[VAList.UIntPairVal.first]
- .VarArgs[VAList.UIntPairVal.second];
- const Type *Ty = I.getType();
- switch (Ty->getTypeID()) {
- case Type::IntegerTyID: Dest.IntVal = Src.IntVal;
- IMPLEMENT_VAARG(Pointer);
- IMPLEMENT_VAARG(Float);
- IMPLEMENT_VAARG(Double);
- default:
- dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
- llvm_unreachable(0);
- }
-
- // Set the Value of this Instruction.
- SetValue(&I, Dest, SF);
-
- // Move the pointer to the next vararg.
- ++VAList.UIntPairVal.second;
-}
-
-GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
- ExecutionContext &SF) {
- switch (CE->getOpcode()) {
- case Instruction::Trunc:
- return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::ZExt:
- return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::SExt:
- return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::FPTrunc:
- return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::FPExt:
- return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::UIToFP:
- return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::SIToFP:
- return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::FPToUI:
- return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::FPToSI:
- return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::PtrToInt:
- return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::IntToPtr:
- return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::BitCast:
- return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
- case Instruction::GetElementPtr:
- return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
- gep_type_end(CE), SF);
- case Instruction::FCmp:
- case Instruction::ICmp:
- return executeCmpInst(CE->getPredicate(),
- getOperandValue(CE->getOperand(0), SF),
- getOperandValue(CE->getOperand(1), SF),
- CE->getOperand(0)->getType());
- case Instruction::Select:
- return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
- getOperandValue(CE->getOperand(1), SF),
- getOperandValue(CE->getOperand(2), SF));
- default :
- break;
- }
-
- // The cases below here require a GenericValue parameter for the result
- // so we initialize one, compute it and then return it.
- GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
- GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
- GenericValue Dest;
- const Type * Ty = CE->getOperand(0)->getType();
- switch (CE->getOpcode()) {
- case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
- case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
- case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
- case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
- case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
- case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
- case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
- case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
- case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
- case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
- case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
- case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
- case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
- case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
- case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
- case Instruction::Shl:
- Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
- break;
- case Instruction::LShr:
- Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
- break;
- case Instruction::AShr:
- Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
- break;
- default:
- dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
- llvm_unreachable(0);
- return GenericValue();
- }
- return Dest;
-}
-
-GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
- return getConstantExprValue(CE, SF);
- } else if (Constant *CPV = dyn_cast<Constant>(V)) {
- return getConstantValue(CPV);
- } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- return PTOGV(getPointerToGlobal(GV));
- } else {
- return SF.Values[V];
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Dispatch and Execution Code
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// callFunction - Execute the specified function...
-//
-void Interpreter::callFunction(Function *F,
- const std::vector<GenericValue> &ArgVals) {
- assert((ECStack.empty() || ECStack.back().Caller.getInstruction() == 0 ||
- ECStack.back().Caller.arg_size() == ArgVals.size()) &&
- "Incorrect number of arguments passed into function call!");
- // Make a new stack frame... and fill it in.
- ECStack.push_back(ExecutionContext());
- ExecutionContext &StackFrame = ECStack.back();
- StackFrame.CurFunction = F;
-
- // Special handling for external functions.
- if (F->isDeclaration()) {
- GenericValue Result = callExternalFunction (F, ArgVals);
- // Simulate a 'ret' instruction of the appropriate type.
- popStackAndReturnValueToCaller (F->getReturnType (), Result);
- return;
- }
-
- // Get pointers to first LLVM BB & Instruction in function.
- StackFrame.CurBB = F->begin();
- StackFrame.CurInst = StackFrame.CurBB->begin();
-
- // Run through the function arguments and initialize their values...
- assert((ArgVals.size() == F->arg_size() ||
- (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
- "Invalid number of values passed to function invocation!");
-
- // Handle non-varargs arguments...
- unsigned i = 0;
- for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
- AI != E; ++AI, ++i)
- SetValue(AI, ArgVals[i], StackFrame);
-
- // Handle varargs arguments...
- StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
-}
-
-
-void Interpreter::run() {
- while (!ECStack.empty()) {
- // Interpret a single instruction & increment the "PC".
- ExecutionContext &SF = ECStack.back(); // Current stack frame
- Instruction &I = *SF.CurInst++; // Increment before execute
-
- // Track the number of dynamic instructions executed.
- ++NumDynamicInsts;
-
- DEBUG(dbgs() << "About to interpret: " << I);
- visit(I); // Dispatch to one of the visit* methods...
-#if 0
- // This is not safe, as visiting the instruction could lower it and free I.
-DEBUG(
- if (!isa<CallInst>(I) && !isa<InvokeInst>(I) &&
- I.getType() != Type::VoidTy) {
- dbgs() << " --> ";
- const GenericValue &Val = SF.Values[&I];
- switch (I.getType()->getTypeID()) {
- default: llvm_unreachable("Invalid GenericValue Type");
- case Type::VoidTyID: dbgs() << "void"; break;
- case Type::FloatTyID: dbgs() << "float " << Val.FloatVal; break;
- case Type::DoubleTyID: dbgs() << "double " << Val.DoubleVal; break;
- case Type::PointerTyID: dbgs() << "void* " << intptr_t(Val.PointerVal);
- break;
- case Type::IntegerTyID:
- dbgs() << "i" << Val.IntVal.getBitWidth() << " "
- << Val.IntVal.toStringUnsigned(10)
- << " (0x" << Val.IntVal.toStringUnsigned(16) << ")\n";
- break;
- }
- });
-#endif
- }
-}
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
deleted file mode 100644
index 7b061d3..0000000
--- a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ /dev/null
@@ -1,490 +0,0 @@
-//===-- ExternalFunctions.cpp - Implement External Functions --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains both code to deal with invoking "external" functions, but
-// also contains code that implements "exported" external functions.
-//
-// There are currently two mechanisms for handling external functions in the
-// Interpreter. The first is to implement lle_* wrapper functions that are
-// specific to well-known library functions which manually translate the
-// arguments from GenericValues and make the call. If such a wrapper does
-// not exist, and libffi is available, then the Interpreter will attempt to
-// invoke the function using libffi, after finding its address.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Interpreter.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/Config/config.h" // Detect libffi
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/System/DynamicLibrary.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/System/Mutex.h"
-#include <csignal>
-#include <cstdio>
-#include <map>
-#include <cmath>
-#include <cstring>
-
-#ifdef HAVE_FFI_CALL
-#ifdef HAVE_FFI_H
-#include <ffi.h>
-#define USE_LIBFFI
-#elif HAVE_FFI_FFI_H
-#include <ffi/ffi.h>
-#define USE_LIBFFI
-#endif
-#endif
-
-using namespace llvm;
-
-static ManagedStatic<sys::Mutex> FunctionsLock;
-
-typedef GenericValue (*ExFunc)(const FunctionType *,
- const std::vector<GenericValue> &);
-static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions;
-static std::map<std::string, ExFunc> FuncNames;
-
-#ifdef USE_LIBFFI
-typedef void (*RawFunc)();
-static ManagedStatic<std::map<const Function *, RawFunc> > RawFunctions;
-#endif
-
-static Interpreter *TheInterpreter;
-
-static char getTypeID(const Type *Ty) {
- switch (Ty->getTypeID()) {
- case Type::VoidTyID: return 'V';
- case Type::IntegerTyID:
- switch (cast<IntegerType>(Ty)->getBitWidth()) {
- case 1: return 'o';
- case 8: return 'B';
- case 16: return 'S';
- case 32: return 'I';
- case 64: return 'L';
- default: return 'N';
- }
- case Type::FloatTyID: return 'F';
- case Type::DoubleTyID: return 'D';
- case Type::PointerTyID: return 'P';
- case Type::FunctionTyID:return 'M';
- case Type::StructTyID: return 'T';
- case Type::ArrayTyID: return 'A';
- case Type::OpaqueTyID: return 'O';
- default: return 'U';
- }
-}
-
-// Try to find address of external function given a Function object.
-// Please note, that interpreter doesn't know how to assemble a
-// real call in general case (this is JIT job), that's why it assumes,
-// that all external functions has the same (and pretty "general") signature.
-// The typical example of such functions are "lle_X_" ones.
-static ExFunc lookupFunction(const Function *F) {
- // Function not found, look it up... start by figuring out what the
- // composite function name should be.
- std::string ExtName = "lle_";
- const FunctionType *FT = F->getFunctionType();
- for (unsigned i = 0, e = FT->getNumContainedTypes(); i != e; ++i)
- ExtName += getTypeID(FT->getContainedType(i));
- ExtName + "_" + F->getNameStr();
-
- sys::ScopedLock Writer(*FunctionsLock);
- ExFunc FnPtr = FuncNames[ExtName];
- if (FnPtr == 0)
- FnPtr = FuncNames["lle_X_" + F->getNameStr()];
- if (FnPtr == 0) // Try calling a generic function... if it exists...
- FnPtr = (ExFunc)(intptr_t)
- sys::DynamicLibrary::SearchForAddressOfSymbol("lle_X_"+F->getNameStr());
- if (FnPtr != 0)
- ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later
- return FnPtr;
-}
-
-#ifdef USE_LIBFFI
-static ffi_type *ffiTypeFor(const Type *Ty) {
- switch (Ty->getTypeID()) {
- case Type::VoidTyID: return &ffi_type_void;
- case Type::IntegerTyID:
- switch (cast<IntegerType>(Ty)->getBitWidth()) {
- case 8: return &ffi_type_sint8;
- case 16: return &ffi_type_sint16;
- case 32: return &ffi_type_sint32;
- case 64: return &ffi_type_sint64;
- }
- case Type::FloatTyID: return &ffi_type_float;
- case Type::DoubleTyID: return &ffi_type_double;
- case Type::PointerTyID: return &ffi_type_pointer;
- default: break;
- }
- // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
- llvm_report_error("Type could not be mapped for use with libffi.");
- return NULL;
-}
-
-static void *ffiValueFor(const Type *Ty, const GenericValue &AV,
- void *ArgDataPtr) {
- switch (Ty->getTypeID()) {
- case Type::IntegerTyID:
- switch (cast<IntegerType>(Ty)->getBitWidth()) {
- case 8: {
- int8_t *I8Ptr = (int8_t *) ArgDataPtr;
- *I8Ptr = (int8_t) AV.IntVal.getZExtValue();
- return ArgDataPtr;
- }
- case 16: {
- int16_t *I16Ptr = (int16_t *) ArgDataPtr;
- *I16Ptr = (int16_t) AV.IntVal.getZExtValue();
- return ArgDataPtr;
- }
- case 32: {
- int32_t *I32Ptr = (int32_t *) ArgDataPtr;
- *I32Ptr = (int32_t) AV.IntVal.getZExtValue();
- return ArgDataPtr;
- }
- case 64: {
- int64_t *I64Ptr = (int64_t *) ArgDataPtr;
- *I64Ptr = (int64_t) AV.IntVal.getZExtValue();
- return ArgDataPtr;
- }
- }
- case Type::FloatTyID: {
- float *FloatPtr = (float *) ArgDataPtr;
- *FloatPtr = AV.FloatVal;
- return ArgDataPtr;
- }
- case Type::DoubleTyID: {
- double *DoublePtr = (double *) ArgDataPtr;
- *DoublePtr = AV.DoubleVal;
- return ArgDataPtr;
- }
- case Type::PointerTyID: {
- void **PtrPtr = (void **) ArgDataPtr;
- *PtrPtr = GVTOP(AV);
- return ArgDataPtr;
- }
- default: break;
- }
- // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
- llvm_report_error("Type value could not be mapped for use with libffi.");
- return NULL;
-}
-
-static bool ffiInvoke(RawFunc Fn, Function *F,
- const std::vector<GenericValue> &ArgVals,
- const TargetData *TD, GenericValue &Result) {
- ffi_cif cif;
- const FunctionType *FTy = F->getFunctionType();
- const unsigned NumArgs = F->arg_size();
-
- // TODO: We don't have type information about the remaining arguments, because
- // this information is never passed into ExecutionEngine::runFunction().
- if (ArgVals.size() > NumArgs && F->isVarArg()) {
- llvm_report_error("Calling external var arg function '" + F->getName()
- + "' is not supported by the Interpreter.");
- }
-
- unsigned ArgBytes = 0;
-
- std::vector<ffi_type*> args(NumArgs);
- for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
- A != E; ++A) {
- const unsigned ArgNo = A->getArgNo();
- const Type *ArgTy = FTy->getParamType(ArgNo);
- args[ArgNo] = ffiTypeFor(ArgTy);
- ArgBytes += TD->getTypeStoreSize(ArgTy);
- }
-
- SmallVector<uint8_t, 128> ArgData;
- ArgData.resize(ArgBytes);
- uint8_t *ArgDataPtr = ArgData.data();
- SmallVector<void*, 16> values(NumArgs);
- for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
- A != E; ++A) {
- const unsigned ArgNo = A->getArgNo();
- const Type *ArgTy = FTy->getParamType(ArgNo);
- values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr);
- ArgDataPtr += TD->getTypeStoreSize(ArgTy);
- }
-
- const Type *RetTy = FTy->getReturnType();
- ffi_type *rtype = ffiTypeFor(RetTy);
-
- if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, &args[0]) == FFI_OK) {
- SmallVector<uint8_t, 128> ret;
- if (RetTy->getTypeID() != Type::VoidTyID)
- ret.resize(TD->getTypeStoreSize(RetTy));
- ffi_call(&cif, Fn, ret.data(), values.data());
- switch (RetTy->getTypeID()) {
- case Type::IntegerTyID:
- switch (cast<IntegerType>(RetTy)->getBitWidth()) {
- case 8: Result.IntVal = APInt(8 , *(int8_t *) ret.data()); break;
- case 16: Result.IntVal = APInt(16, *(int16_t*) ret.data()); break;
- case 32: Result.IntVal = APInt(32, *(int32_t*) ret.data()); break;
- case 64: Result.IntVal = APInt(64, *(int64_t*) ret.data()); break;
- }
- break;
- case Type::FloatTyID: Result.FloatVal = *(float *) ret.data(); break;
- case Type::DoubleTyID: Result.DoubleVal = *(double*) ret.data(); break;
- case Type::PointerTyID: Result.PointerVal = *(void **) ret.data(); break;
- default: break;
- }
- return true;
- }
-
- return false;
-}
-#endif // USE_LIBFFI
-
-GenericValue Interpreter::callExternalFunction(Function *F,
- const std::vector<GenericValue> &ArgVals) {
- TheInterpreter = this;
-
- FunctionsLock->acquire();
-
- // Do a lookup to see if the function is in our cache... this should just be a
- // deferred annotation!
- std::map<const Function *, ExFunc>::iterator FI = ExportedFunctions->find(F);
- if (ExFunc Fn = (FI == ExportedFunctions->end()) ? lookupFunction(F)
- : FI->second) {
- FunctionsLock->release();
- return Fn(F->getFunctionType(), ArgVals);
- }
-
-#ifdef USE_LIBFFI
- std::map<const Function *, RawFunc>::iterator RF = RawFunctions->find(F);
- RawFunc RawFn;
- if (RF == RawFunctions->end()) {
- RawFn = (RawFunc)(intptr_t)
- sys::DynamicLibrary::SearchForAddressOfSymbol(F->getName());
- if (RawFn != 0)
- RawFunctions->insert(std::make_pair(F, RawFn)); // Cache for later
- } else {
- RawFn = RF->second;
- }
-
- FunctionsLock->release();
-
- GenericValue Result;
- if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getTargetData(), Result))
- return Result;
-#endif // USE_LIBFFI
-
- if (F->getName() == "__main")
- errs() << "Tried to execute an unknown external function: "
- << F->getType()->getDescription() << " __main\n";
- else
- llvm_report_error("Tried to execute an unknown external function: " +
- F->getType()->getDescription() + " " +F->getName());
-#ifndef USE_LIBFFI
- errs() << "Recompiling LLVM with --enable-libffi might help.\n";
-#endif
- return GenericValue();
-}
-
-
-//===----------------------------------------------------------------------===//
-// Functions "exported" to the running application...
-//
-
-// Visual Studio warns about returning GenericValue in extern "C" linkage
-#ifdef _MSC_VER
- #pragma warning(disable : 4190)
-#endif
-
-extern "C" { // Don't add C++ manglings to llvm mangling :)
-
-// void atexit(Function*)
-GenericValue lle_X_atexit(const FunctionType *FT,
- const std::vector<GenericValue> &Args) {
- assert(Args.size() == 1);
- TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
- GenericValue GV;
- GV.IntVal = 0;
- return GV;
-}
-
-// void exit(int)
-GenericValue lle_X_exit(const FunctionType *FT,
- const std::vector<GenericValue> &Args) {
- TheInterpreter->exitCalled(Args[0]);
- return GenericValue();
-}
-
-// void abort(void)
-GenericValue lle_X_abort(const FunctionType *FT,
- const std::vector<GenericValue> &Args) {
- //FIXME: should we report or raise here?
- //llvm_report_error("Interpreted program raised SIGABRT");
- raise (SIGABRT);
- return GenericValue();
-}
-
-// int sprintf(char *, const char *, ...) - a very rough implementation to make
-// output useful.
-GenericValue lle_X_sprintf(const FunctionType *FT,
- const std::vector<GenericValue> &Args) {
- char *OutputBuffer = (char *)GVTOP(Args[0]);
- const char *FmtStr = (const char *)GVTOP(Args[1]);
- unsigned ArgNo = 2;
-
- // printf should return # chars printed. This is completely incorrect, but
- // close enough for now.
- GenericValue GV;
- GV.IntVal = APInt(32, strlen(FmtStr));
- while (1) {
- switch (*FmtStr) {
- case 0: return GV; // Null terminator...
- default: // Normal nonspecial character
- sprintf(OutputBuffer++, "%c", *FmtStr++);
- break;
- case '\\': { // Handle escape codes
- sprintf(OutputBuffer, "%c%c", *FmtStr, *(FmtStr+1));
- FmtStr += 2; OutputBuffer += 2;
- break;
- }
- case '%': { // Handle format specifiers
- char FmtBuf[100] = "", Buffer[1000] = "";
- char *FB = FmtBuf;
- *FB++ = *FmtStr++;
- char Last = *FB++ = *FmtStr++;
- unsigned HowLong = 0;
- while (Last != 'c' && Last != 'd' && Last != 'i' && Last != 'u' &&
- Last != 'o' && Last != 'x' && Last != 'X' && Last != 'e' &&
- Last != 'E' && Last != 'g' && Last != 'G' && Last != 'f' &&
- Last != 'p' && Last != 's' && Last != '%') {
- if (Last == 'l' || Last == 'L') HowLong++; // Keep track of l's
- Last = *FB++ = *FmtStr++;
- }
- *FB = 0;
-
- switch (Last) {
- case '%':
- memcpy(Buffer, "%", 2); break;
- case 'c':
- sprintf(Buffer, FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
- break;
- case 'd': case 'i':
- case 'u': case 'o':
- case 'x': case 'X':
- if (HowLong >= 1) {
- if (HowLong == 1 &&
- TheInterpreter->getTargetData()->getPointerSizeInBits() == 64 &&
- sizeof(long) < sizeof(int64_t)) {
- // Make sure we use %lld with a 64 bit argument because we might be
- // compiling LLI on a 32 bit compiler.
- unsigned Size = strlen(FmtBuf);
- FmtBuf[Size] = FmtBuf[Size-1];
- FmtBuf[Size+1] = 0;
- FmtBuf[Size-1] = 'l';
- }
- sprintf(Buffer, FmtBuf, Args[ArgNo++].IntVal.getZExtValue());
- } else
- sprintf(Buffer, FmtBuf,uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
- break;
- case 'e': case 'E': case 'g': case 'G': case 'f':
- sprintf(Buffer, FmtBuf, Args[ArgNo++].DoubleVal); break;
- case 'p':
- sprintf(Buffer, FmtBuf, (void*)GVTOP(Args[ArgNo++])); break;
- case 's':
- sprintf(Buffer, FmtBuf, (char*)GVTOP(Args[ArgNo++])); break;
- default:
- errs() << "<unknown printf code '" << *FmtStr << "'!>";
- ArgNo++; break;
- }
- size_t Len = strlen(Buffer);
- memcpy(OutputBuffer, Buffer, Len + 1);
- OutputBuffer += Len;
- }
- break;
- }
- }
- return GV;
-}
-
-// int printf(const char *, ...) - a very rough implementation to make output
-// useful.
-GenericValue lle_X_printf(const FunctionType *FT,
- const std::vector<GenericValue> &Args) {
- char Buffer[10000];
- std::vector<GenericValue> NewArgs;
- NewArgs.push_back(PTOGV((void*)&Buffer[0]));
- NewArgs.insert(NewArgs.end(), Args.begin(), Args.end());
- GenericValue GV = lle_X_sprintf(FT, NewArgs);
- outs() << Buffer;
- return GV;
-}
-
-// int sscanf(const char *format, ...);
-GenericValue lle_X_sscanf(const FunctionType *FT,
- const std::vector<GenericValue> &args) {
- assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
-
- char *Args[10];
- for (unsigned i = 0; i < args.size(); ++i)
- Args[i] = (char*)GVTOP(args[i]);
-
- GenericValue GV;
- GV.IntVal = APInt(32, sscanf(Args[0], Args[1], Args[2], Args[3], Args[4],
- Args[5], Args[6], Args[7], Args[8], Args[9]));
- return GV;
-}
-
-// int scanf(const char *format, ...);
-GenericValue lle_X_scanf(const FunctionType *FT,
- const std::vector<GenericValue> &args) {
- assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
-
- char *Args[10];
- for (unsigned i = 0; i < args.size(); ++i)
- Args[i] = (char*)GVTOP(args[i]);
-
- GenericValue GV;
- GV.IntVal = APInt(32, scanf( Args[0], Args[1], Args[2], Args[3], Args[4],
- Args[5], Args[6], Args[7], Args[8], Args[9]));
- return GV;
-}
-
-// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
-// output useful.
-GenericValue lle_X_fprintf(const FunctionType *FT,
- const std::vector<GenericValue> &Args) {
- assert(Args.size() >= 2);
- char Buffer[10000];
- std::vector<GenericValue> NewArgs;
- NewArgs.push_back(PTOGV(Buffer));
- NewArgs.insert(NewArgs.end(), Args.begin()+1, Args.end());
- GenericValue GV = lle_X_sprintf(FT, NewArgs);
-
- fputs(Buffer, (FILE *) GVTOP(Args[0]));
- return GV;
-}
-
-} // End extern "C"
-
-// Done with externals; turn the warning back on
-#ifdef _MSC_VER
- #pragma warning(default: 4190)
-#endif
-
-
-void Interpreter::initializeExternalFunctions() {
- sys::ScopedLock Writer(*FunctionsLock);
- FuncNames["lle_X_atexit"] = lle_X_atexit;
- FuncNames["lle_X_exit"] = lle_X_exit;
- FuncNames["lle_X_abort"] = lle_X_abort;
-
- FuncNames["lle_X_printf"] = lle_X_printf;
- FuncNames["lle_X_sprintf"] = lle_X_sprintf;
- FuncNames["lle_X_sscanf"] = lle_X_sscanf;
- FuncNames["lle_X_scanf"] = lle_X_scanf;
- FuncNames["lle_X_fprintf"] = lle_X_fprintf;
-}
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
deleted file mode 100644
index 43e3453..0000000
--- a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Interpreter.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-//===- Interpreter.cpp - Top-Level LLVM Interpreter Implementation --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the top-level functionality for the LLVM interpreter.
-// This interpreter is designed to be a very simple, portable, inefficient
-// interpreter.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Interpreter.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include <cstring>
-using namespace llvm;
-
-namespace {
-
-static struct RegisterInterp {
- RegisterInterp() { Interpreter::Register(); }
-} InterpRegistrator;
-
-}
-
-extern "C" void LLVMLinkInInterpreter() { }
-
-/// create - Create a new interpreter object. This can never fail.
-///
-ExecutionEngine *Interpreter::create(Module *M, std::string* ErrStr) {
- // Tell this Module to materialize everything and release the GVMaterializer.
- if (M->MaterializeAllPermanently(ErrStr))
- // We got an error, just return 0
- return 0;
-
- return new Interpreter(M);
-}
-
-//===----------------------------------------------------------------------===//
-// Interpreter ctor - Initialize stuff
-//
-Interpreter::Interpreter(Module *M)
- : ExecutionEngine(M), TD(M) {
-
- memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
- setTargetData(&TD);
- // Initialize the "backend"
- initializeExecutionEngine();
- initializeExternalFunctions();
- emitGlobals();
-
- IL = new IntrinsicLowering(TD);
-}
-
-Interpreter::~Interpreter() {
- delete IL;
-}
-
-void Interpreter::runAtExitHandlers () {
- while (!AtExitHandlers.empty()) {
- callFunction(AtExitHandlers.back(), std::vector<GenericValue>());
- AtExitHandlers.pop_back();
- run();
- }
-}
-
-/// run - Start execution with the specified function and arguments.
-///
-GenericValue
-Interpreter::runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues) {
- assert (F && "Function *F was null at entry to run()");
-
- // Try extra hard not to pass extra args to a function that isn't
- // expecting them. C programmers frequently bend the rules and
- // declare main() with fewer parameters than it actually gets
- // passed, and the interpreter barfs if you pass a function more
- // parameters than it is declared to take. This does not attempt to
- // take into account gratuitous differences in declared types,
- // though.
- std::vector<GenericValue> ActualArgs;
- const unsigned ArgCount = F->getFunctionType()->getNumParams();
- for (unsigned i = 0; i < ArgCount; ++i)
- ActualArgs.push_back(ArgValues[i]);
-
- // Set up the function call.
- callFunction(F, ActualArgs);
-
- // Start executing the function.
- run();
-
- return ExitValue;
-}
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h b/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
deleted file mode 100644
index bc4200b..0000000
--- a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Interpreter.h
+++ /dev/null
@@ -1,244 +0,0 @@
-//===-- Interpreter.h ------------------------------------------*- C++ -*--===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This header file defines the interpreter structure
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLI_INTERPRETER_H
-#define LLI_INTERPRETER_H
-
-#include "llvm/Function.h"
-#include "llvm/ExecutionEngine/ExecutionEngine.h"
-#include "llvm/ExecutionEngine/GenericValue.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/System/DataTypes.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/InstVisitor.h"
-#include "llvm/Support/raw_ostream.h"
-namespace llvm {
-
-class IntrinsicLowering;
-struct FunctionInfo;
-template<typename T> class generic_gep_type_iterator;
-class ConstantExpr;
-typedef generic_gep_type_iterator<User::const_op_iterator> gep_type_iterator;
-
-
-// AllocaHolder - Object to track all of the blocks of memory allocated by
-// alloca. When the function returns, this object is popped off the execution
-// stack, which causes the dtor to be run, which frees all the alloca'd memory.
-//
-class AllocaHolder {
- friend class AllocaHolderHandle;
- std::vector<void*> Allocations;
- unsigned RefCnt;
-public:
- AllocaHolder() : RefCnt(0) {}
- void add(void *mem) { Allocations.push_back(mem); }
- ~AllocaHolder() {
- for (unsigned i = 0; i < Allocations.size(); ++i)
- free(Allocations[i]);
- }
-};
-
-// AllocaHolderHandle gives AllocaHolder value semantics so we can stick it into
-// a vector...
-//
-class AllocaHolderHandle {
- AllocaHolder *H;
-public:
- AllocaHolderHandle() : H(new AllocaHolder()) { H->RefCnt++; }
- AllocaHolderHandle(const AllocaHolderHandle &AH) : H(AH.H) { H->RefCnt++; }
- ~AllocaHolderHandle() { if (--H->RefCnt == 0) delete H; }
-
- void add(void *mem) { H->add(mem); }
-};
-
-typedef std::vector<GenericValue> ValuePlaneTy;
-
-// ExecutionContext struct - This struct represents one stack frame currently
-// executing.
-//
-struct ExecutionContext {
- Function *CurFunction;// The currently executing function
- BasicBlock *CurBB; // The currently executing BB
- BasicBlock::iterator CurInst; // The next instruction to execute
- std::map<Value *, GenericValue> Values; // LLVM values used in this invocation
- std::vector<GenericValue> VarArgs; // Values passed through an ellipsis
- CallSite Caller; // Holds the call that called subframes.
- // NULL if main func or debugger invoked fn
- AllocaHolderHandle Allocas; // Track memory allocated by alloca
-};
-
-// Interpreter - This class represents the entirety of the interpreter.
-//
-class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
- GenericValue ExitValue; // The return value of the called function
- TargetData TD;
- IntrinsicLowering *IL;
-
- // The runtime stack of executing code. The top of the stack is the current
- // function record.
- std::vector<ExecutionContext> ECStack;
-
- // AtExitHandlers - List of functions to call when the program exits,
- // registered with the atexit() library function.
- std::vector<Function*> AtExitHandlers;
-
-public:
- explicit Interpreter(Module *M);
- ~Interpreter();
-
- /// runAtExitHandlers - Run any functions registered by the program's calls to
- /// atexit(3), which we intercept and store in AtExitHandlers.
- ///
- void runAtExitHandlers();
-
- static void Register() {
- InterpCtor = create;
- }
-
- /// create - Create an interpreter ExecutionEngine. This can never fail.
- ///
- static ExecutionEngine *create(Module *M, std::string *ErrorStr = 0);
-
- /// run - Start execution with the specified function and arguments.
- ///
- virtual GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues);
-
- /// recompileAndRelinkFunction - For the interpreter, functions are always
- /// up-to-date.
- ///
- virtual void *recompileAndRelinkFunction(Function *F) {
- return getPointerToFunction(F);
- }
-
- /// freeMachineCodeForFunction - The interpreter does not generate any code.
- ///
- void freeMachineCodeForFunction(Function *F) { }
-
- // Methods used to execute code:
- // Place a call on the stack
- void callFunction(Function *F, const std::vector<GenericValue> &ArgVals);
- void run(); // Execute instructions until nothing left to do
-
- // Opcode Implementations
- void visitReturnInst(ReturnInst &I);
- void visitBranchInst(BranchInst &I);
- void visitSwitchInst(SwitchInst &I);
- void visitIndirectBrInst(IndirectBrInst &I);
-
- void visitBinaryOperator(BinaryOperator &I);
- void visitICmpInst(ICmpInst &I);
- void visitFCmpInst(FCmpInst &I);
- void visitAllocaInst(AllocaInst &I);
- void visitLoadInst(LoadInst &I);
- void visitStoreInst(StoreInst &I);
- void visitGetElementPtrInst(GetElementPtrInst &I);
- void visitPHINode(PHINode &PN) {
- llvm_unreachable("PHI nodes already handled!");
- }
- void visitTruncInst(TruncInst &I);
- void visitZExtInst(ZExtInst &I);
- void visitSExtInst(SExtInst &I);
- void visitFPTruncInst(FPTruncInst &I);
- void visitFPExtInst(FPExtInst &I);
- void visitUIToFPInst(UIToFPInst &I);
- void visitSIToFPInst(SIToFPInst &I);
- void visitFPToUIInst(FPToUIInst &I);
- void visitFPToSIInst(FPToSIInst &I);
- void visitPtrToIntInst(PtrToIntInst &I);
- void visitIntToPtrInst(IntToPtrInst &I);
- void visitBitCastInst(BitCastInst &I);
- void visitSelectInst(SelectInst &I);
-
-
- void visitCallSite(CallSite CS);
- void visitCallInst(CallInst &I) { visitCallSite (CallSite (&I)); }
- void visitInvokeInst(InvokeInst &I) { visitCallSite (CallSite (&I)); }
- void visitUnwindInst(UnwindInst &I);
- void visitUnreachableInst(UnreachableInst &I);
-
- void visitShl(BinaryOperator &I);
- void visitLShr(BinaryOperator &I);
- void visitAShr(BinaryOperator &I);
-
- void visitVAArgInst(VAArgInst &I);
- void visitInstruction(Instruction &I) {
- errs() << I;
- llvm_unreachable("Instruction not interpretable yet!");
- }
-
- GenericValue callExternalFunction(Function *F,
- const std::vector<GenericValue> &ArgVals);
- void exitCalled(GenericValue GV);
-
- void addAtExitHandler(Function *F) {
- AtExitHandlers.push_back(F);
- }
-
- GenericValue *getFirstVarArg () {
- return &(ECStack.back ().VarArgs[0]);
- }
-
- //FIXME: private:
-public:
- GenericValue executeGEPOperation(Value *Ptr, gep_type_iterator I,
- gep_type_iterator E, ExecutionContext &SF);
-
-private: // Helper functions
- // SwitchToNewBasicBlock - Start execution in a new basic block and run any
- // PHI nodes in the top of the block. This is used for intraprocedural
- // control flow.
- //
- void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF);
-
- void *getPointerToFunction(Function *F) { return (void*)F; }
- void *getPointerToBasicBlock(BasicBlock *BB) { return (void*)BB; }
-
- void initializeExecutionEngine() { }
- void initializeExternalFunctions();
- GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF);
- GenericValue getOperandValue(Value *V, ExecutionContext &SF);
- GenericValue executeTruncInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeSExtInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeZExtInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeFPTruncInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeFPExtInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeFPToUIInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeFPToSIInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeUIToFPInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeSIToFPInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executePtrToIntInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeIntToPtrInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeBitCastInst(Value *SrcVal, const Type *DstTy,
- ExecutionContext &SF);
- GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
- const Type *Ty, ExecutionContext &SF);
- void popStackAndReturnValueToCaller(const Type *RetTy, GenericValue Result);
-
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Makefile b/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Makefile
deleted file mode 100644
index 5def136..0000000
--- a/libclamav/c++/llvm/lib/ExecutionEngine/Interpreter/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-##===- lib/ExecutionEngine/Interpreter/Makefile ------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMInterpreter
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/Intercept.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/Intercept.cpp
index c00b60a..274f816 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/Intercept.cpp
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/Intercept.cpp
@@ -89,6 +89,10 @@ static int jit_atexit(void (*Fn)()) {
return 0; // Always successful
}
+static int jit_noop() {
+ return 0;
+}
+
//===----------------------------------------------------------------------===//
//
/// getPointerToNamedFunction - This method returns the address of the specified
@@ -104,6 +108,14 @@ void *JIT::getPointerToNamedFunction(const std::string &Name,
if (Name == "exit") return (void*)(intptr_t)&jit_exit;
if (Name == "atexit") return (void*)(intptr_t)&jit_atexit;
+ // We should not invoke parent's ctors/dtors from generated main()!
+ // On Mingw and Cygwin, the symbol __main is resolved to
+ // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
+ // (and register wrong callee's dtors with atexit(3)).
+ // We expect ExecutionEngine::runStaticConstructorsDestructors()
+ // is called before ExecutionEngine::runFunctionAsMain() is called.
+ if (Name == "__main") return (void*)(intptr_t)&jit_noop;
+
const char *NameStr = Name.c_str();
// If this is an asm specifier, skip the sentinal.
if (NameStr[0] == 1) ++NameStr;
@@ -142,7 +154,7 @@ void *JIT::getPointerToNamedFunction(const std::string &Name,
return RP;
if (AbortOnFailure) {
- llvm_report_error("Program used external function '"+Name+
+ report_fatal_error("Program used external function '"+Name+
"' which could not be resolved!");
}
return 0;
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JIT.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JIT.cpp
index b6c3367..116c791 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JIT.cpp
@@ -67,7 +67,7 @@ extern "C" void LLVMLinkInJIT() {
}
-#if defined(__GNUC__) && !defined(__ARM__EABI__)
+#if defined(__GNUC__) && !defined(__ARM_EABI__) && !defined(__USING_SJLJ_EXCEPTIONS__)
// libgcc defines the __register_frame function to dynamically register new
// dwarf frames for exception handling. This functionality is not portable
@@ -219,8 +219,8 @@ ExecutionEngine *JIT::createJIT(Module *M,
StringRef MArch,
StringRef MCPU,
const SmallVectorImpl<std::string>& MAttrs) {
- // Make sure we can resolve symbols in the program as well. The zero arg
- // to the function tells DynamicLibrary to load the program, not a library.
+ // Try to register the program as a source of symbols to resolve against.
+ sys::DynamicLibrary::LoadLibraryPermanently(0, NULL);
/* CLAMAV LOCAL: no dlopen */
// if (sys::DynamicLibrary::LoadLibraryPermanently(0, ErrorStr))
// return 0;
@@ -305,11 +305,11 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
// Turn the machine code intermediate representation into bytes in memory that
// may be executed.
if (TM.addPassesToEmitMachineCode(PM, *JCE, OptLevel)) {
- llvm_report_error("Target does not support machine code emission!");
+ report_fatal_error("Target does not support machine code emission!");
}
// Register routine for informing unwinding runtime about new EH frames
-#if defined(__GNUC__) && !defined(__ARM_EABI__)
+#if defined(__GNUC__) && !defined(__ARM_EABI__) && !defined(__USING_SJLJ_EXCEPTIONS__)
#if USE_KEYMGR
struct LibgccObjectInfo* LOI = (struct LibgccObjectInfo*)
_keymgr_get_and_lock_processwide_ptr(KEYMGR_GCC3_DW2_OBJ_LIST);
@@ -353,7 +353,7 @@ void JIT::addModule(Module *M) {
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
- llvm_report_error("Target does not support machine code emission!");
+ report_fatal_error("Target does not support machine code emission!");
}
// Initialize passes.
@@ -384,7 +384,7 @@ bool JIT::removeModule(Module *M) {
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
if (TM.addPassesToEmitMachineCode(PM, *JCE, CodeGenOpt::Default)) {
- llvm_report_error("Target does not support machine code emission!");
+ report_fatal_error("Target does not support machine code emission!");
}
// Initialize passes.
@@ -627,10 +627,7 @@ void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) {
void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
assert(!isAlreadyCodeGenerating && "Error: Recursive compilation detected!");
- // JIT the function
- isAlreadyCodeGenerating = true;
- jitstate->getPM(locked).run(*F);
- isAlreadyCodeGenerating = false;
+ jitTheFunction(F, locked);
// If the function referred to another function that had not yet been
// read from bitcode, and we are jitting non-lazily, emit it now.
@@ -641,10 +638,7 @@ void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
assert(!PF->hasAvailableExternallyLinkage() &&
"Externally-defined function should not be in pending list.");
- // JIT the function
- isAlreadyCodeGenerating = true;
- jitstate->getPM(locked).run(*PF);
- isAlreadyCodeGenerating = false;
+ jitTheFunction(PF, locked);
// Now that the function has been jitted, ask the JITEmitter to rewrite
// the stub with real address of the function.
@@ -652,6 +646,15 @@ void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
}
}
+void JIT::jitTheFunction(Function *F, const MutexGuard &locked) {
+ isAlreadyCodeGenerating = true;
+ jitstate->getPM(locked).run(*F);
+ isAlreadyCodeGenerating = false;
+
+ // clear basic block addresses after this function is done
+ getBasicBlockAddressMap(locked).clear();
+}
+
/// getPointerToFunction - This method is used to get the address of the
/// specified function, compiling it if neccesary.
///
@@ -666,7 +669,7 @@ void *JIT::getPointerToFunction(Function *F) {
// exists in this Module.
std::string ErrorMsg;
if (F->Materialize(&ErrorMsg)) {
- llvm_report_error("Error reading function '" + F->getName()+
+ report_fatal_error("Error reading function '" + F->getName()+
"' from bitcode file: " + ErrorMsg);
}
@@ -688,6 +691,41 @@ void *JIT::getPointerToFunction(Function *F) {
return Addr;
}
+void JIT::addPointerToBasicBlock(const BasicBlock *BB, void *Addr) {
+ MutexGuard locked(lock);
+
+ BasicBlockAddressMapTy::iterator I =
+ getBasicBlockAddressMap(locked).find(BB);
+ if (I == getBasicBlockAddressMap(locked).end()) {
+ getBasicBlockAddressMap(locked)[BB] = Addr;
+ } else {
+ // ignore repeats: some BBs can be split into few MBBs?
+ }
+}
+
+void JIT::clearPointerToBasicBlock(const BasicBlock *BB) {
+ MutexGuard locked(lock);
+ getBasicBlockAddressMap(locked).erase(BB);
+}
+
+void *JIT::getPointerToBasicBlock(BasicBlock *BB) {
+ // make sure it's function is compiled by JIT
+ (void)getPointerToFunction(BB->getParent());
+
+ // resolve basic block address
+ MutexGuard locked(lock);
+
+ BasicBlockAddressMapTy::iterator I =
+ getBasicBlockAddressMap(locked).find(BB);
+ if (I != getBasicBlockAddressMap(locked).end()) {
+ return I->second;
+ } else {
+ assert(0 && "JIT does not have BB address for address-of-label, was"
+ " it eliminated by optimizer?");
+ return 0;
+ }
+}
+
/// getOrEmitGlobalVariable - Return the address of the specified global
/// variable, possibly emitting it to memory if needed. This is used by the
/// Emitter.
@@ -705,7 +743,7 @@ void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) {
#endif
Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(GV->getName());
if (Ptr == 0) {
- llvm_report_error("Could not resolve external global address: "
+ report_fatal_error("Could not resolve external global address: "
+GV->getName());
}
addGlobalMapping(GV, Ptr);
@@ -755,7 +793,7 @@ char* JIT::getMemoryForGV(const GlobalVariable* GV) {
// situation. It's returned in the same block of memory as code which may
// not be writable.
if (isGVCompilationDisabled() && !GV->isConstant()) {
- llvm_report_error("Compilation of non-internal GlobalValue is disabled!");
+ report_fatal_error("Compilation of non-internal GlobalValue is disabled!");
}
// Some applications require globals and code to live together, so they may
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JIT.h b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JIT.h
index edae719..1d1763e 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JIT.h
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JIT.h
@@ -51,6 +51,10 @@ public:
class JIT : public ExecutionEngine {
+ /// types
+ typedef ValueMap<const BasicBlock *, void *>
+ BasicBlockAddressMapTy;
+ /// data
TargetMachine &TM; // The current target we are compiling to
TargetJITInfo &TJI; // The JITInfo for the target we are compiling to
JITCodeEmitter *JCE; // JCE object
@@ -67,6 +71,12 @@ class JIT : public ExecutionEngine {
JITState *jitstate;
+ /// BasicBlockAddressMap - A mapping between LLVM basic blocks and their
+ /// actualized version, only filled for basic blocks that have their address
+ /// taken.
+ BasicBlockAddressMapTy BasicBlockAddressMap;
+
+
JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
JITMemoryManager *JMM, CodeGenOpt::Level OptLevel,
bool AllocateGVsWithCode);
@@ -90,9 +100,9 @@ public:
CodeGenOpt::Level OptLevel =
CodeGenOpt::Default,
bool GVsWithCode = true,
- CodeModel::Model CMM = CodeModel::Default) {
+ CodeModel::Model CMM = CodeModel::Default) {
return ExecutionEngine::createJIT(M, Err, JMM, OptLevel, GVsWithCode,
- CMM);
+ CMM);
}
virtual void addModule(Module *M);
@@ -127,10 +137,15 @@ public:
///
void *getPointerToFunction(Function *F);
- void *getPointerToBasicBlock(BasicBlock *BB) {
- assert(0 && "JIT does not support address-of-label yet!");
- return 0;
- }
+ /// addPointerToBasicBlock - Adds address of the specific basic block.
+ void addPointerToBasicBlock(const BasicBlock *BB, void *Addr);
+
+ /// clearPointerToBasicBlock - Removes address of specific basic block.
+ void clearPointerToBasicBlock(const BasicBlock *BB);
+
+ /// getPointerToBasicBlock - This returns the address of the specified basic
+ /// block, assuming function is compiled.
+ void *getPointerToBasicBlock(BasicBlock *BB);
/// getOrEmitGlobalVariable - Return the address of the specified global
/// variable, possibly emitting it to memory if needed. This is used by the
@@ -197,11 +212,18 @@ public:
const JITEvent_EmittedFunctionDetails &Details);
void NotifyFreeingMachineCode(void *OldPtr);
+ BasicBlockAddressMapTy &
+ getBasicBlockAddressMap(const MutexGuard &) {
+ return BasicBlockAddressMap;
+ }
+
+
private:
static JITCodeEmitter *createEmitter(JIT &J, JITMemoryManager *JMM,
TargetMachine &tm);
void runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked);
void updateFunctionStub(Function *F);
+ void jitTheFunction(Function *F, const MutexGuard &locked);
protected:
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
index 565509c..6e11a3c 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDebugRegisterer.cpp
@@ -80,7 +80,7 @@ std::string JITDebugRegisterer::MakeELF(const Function *F, DebugInfo &I) {
// Copy the binary into the .text section. This isn't necessary, but it's
// useful to be able to disassemble the ELF by hand.
- ELFSection &Text = EW.getTextSection((Function *)F);
+ ELFSection &Text = EW.getTextSection(const_cast<Function *>(F));
Text.Addr = (uint64_t)I.FnStart;
// TODO: We could eliminate this copy if we somehow used a pointer/size pair
// instead of a vector.
@@ -90,8 +90,8 @@ std::string JITDebugRegisterer::MakeELF(const Function *F, DebugInfo &I) {
// section. This allows GDB to get a good stack trace, particularly on
// linux x86_64. Mark this as a PROGBITS section that needs to be loaded
// into memory at runtime.
- ELFSection &EH = EW.getSection(".eh_frame", ELFSection::SHT_PROGBITS,
- ELFSection::SHF_ALLOC);
+ ELFSection &EH = EW.getSection(".eh_frame", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC);
// Pointers in the DWARF EH info are all relative to the EH frame start,
// which is stored here.
EH.Addr = (uint64_t)I.EhStart;
@@ -102,9 +102,9 @@ std::string JITDebugRegisterer::MakeELF(const Function *F, DebugInfo &I) {
// Add this single function to the symbol table, so the debugger prints the
// name instead of '???'. We give the symbol default global visibility.
ELFSym *FnSym = ELFSym::getGV(F,
- ELFSym::STB_GLOBAL,
- ELFSym::STT_FUNC,
- ELFSym::STV_DEFAULT);
+ ELF::STB_GLOBAL,
+ ELF::STT_FUNC,
+ ELF::STV_DEFAULT);
FnSym->SectionIdx = Text.SectionIdx;
FnSym->Size = I.FnEnd - I.FnStart;
FnSym->Value = 0; // Offset from start of section.
@@ -165,7 +165,7 @@ void JITDebugRegisterer::RegisterFunction(const Function *F, DebugInfo &I) {
void JITDebugRegisterer::UnregisterFunctionInternal(
RegisteredFunctionsMap::iterator I) {
- jit_code_entry *JITCodeEntry = I->second.second;
+ jit_code_entry *&JITCodeEntry = I->second.second;
// Acquire the lock and do the unregistration.
{
@@ -190,6 +190,9 @@ void JITDebugRegisterer::UnregisterFunctionInternal(
__jit_debug_register_code();
}
+ delete JITCodeEntry;
+ JITCodeEntry = NULL;
+
// Free the ELF file in memory.
std::string &Buffer = I->second.first;
Buffer.clear();
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
index 946351b..1105bcc 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
@@ -23,6 +23,7 @@
#include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetFrameInfo.h"
@@ -51,7 +52,7 @@ unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
unsigned char* Result = 0;
- const std::vector<Function *> Personalities = MMI->getPersonalities();
+ const std::vector<const Function *> Personalities = MMI->getPersonalities();
EHFramePtr = EmitCommonEHFrame(Personalities[MMI->getPersonalityIndex()]);
Result = EmitEHFrame(Personalities[MMI->getPersonalityIndex()], EHFramePtr,
@@ -67,34 +68,29 @@ JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr,
unsigned PointerSize = TD->getPointerSize();
int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
PointerSize : -PointerSize;
- bool IsLocal = false;
- unsigned BaseLabelID = 0;
+ MCSymbol *BaseLabel = 0;
for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
const MachineMove &Move = Moves[i];
- unsigned LabelID = Move.getLabelID();
+ MCSymbol *Label = Move.getLabel();
- if (LabelID) {
- LabelID = MMI->MappedLabel(LabelID);
-
- // Throw out move if the label is invalid.
- if (!LabelID) continue;
- }
+ // Throw out move if the label is invalid.
+ if (Label && (*JCE->getLabelLocations())[Label] == 0)
+ continue;
intptr_t LabelPtr = 0;
- if (LabelID) LabelPtr = JCE->getLabelAddress(LabelID);
+ if (Label) LabelPtr = JCE->getLabelAddress(Label);
const MachineLocation &Dst = Move.getDestination();
const MachineLocation &Src = Move.getSource();
// Advance row if new location.
- if (BaseLabelPtr && LabelID && (BaseLabelID != LabelID || !IsLocal)) {
+ if (BaseLabelPtr && Label && BaseLabel != Label) {
JCE->emitByte(dwarf::DW_CFA_advance_loc4);
JCE->emitInt32(LabelPtr - BaseLabelPtr);
- BaseLabelID = LabelID;
+ BaseLabel = Label;
BaseLabelPtr = LabelPtr;
- IsLocal = true;
}
// If advancing cfa.
@@ -170,13 +166,6 @@ static bool PadLT(const LandingPadInfo *L, const LandingPadInfo *R) {
namespace {
-struct KeyInfo {
- static inline unsigned getEmptyKey() { return -1U; }
- static inline unsigned getTombstoneKey() { return -2U; }
- static unsigned getHashValue(const unsigned &Key) { return Key; }
- static bool isEqual(unsigned LHS, unsigned RHS) { return LHS == RHS; }
-};
-
/// ActionEntry - Structure describing an entry in the actions table.
struct ActionEntry {
int ValueForTypeID; // The value to write - may not be equal to the type id.
@@ -192,13 +181,13 @@ struct PadRange {
unsigned RangeIndex;
};
-typedef DenseMap<unsigned, PadRange, KeyInfo> RangeMapType;
+typedef DenseMap<MCSymbol*, PadRange> RangeMapType;
/// CallSiteEntry - Structure describing an entry in the call-site table.
struct CallSiteEntry {
- unsigned BeginLabel; // zero indicates the start of the function.
- unsigned EndLabel; // zero indicates the end of the function.
- unsigned PadLabel; // zero indicates that there is no landing pad.
+ MCSymbol *BeginLabel; // zero indicates the start of the function.
+ MCSymbol *EndLabel; // zero indicates the end of the function.
+ MCSymbol *PadLabel; // zero indicates that there is no landing pad.
unsigned Action;
};
@@ -210,9 +199,9 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
assert(MMI && "MachineModuleInfo not registered!");
// Map all labels and get rid of any dead landing pads.
- MMI->TidyLandingPads();
+ MMI->TidyLandingPads(JCE->getLabelLocations());
- const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
+ const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
if (PadInfos.empty()) return 0;
@@ -309,7 +298,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
const LandingPadInfo *LandingPad = LandingPads[i];
for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
- unsigned BeginLabel = LandingPad->BeginLabels[j];
+ MCSymbol *BeginLabel = LandingPad->BeginLabels[j];
assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
PadRange P = { i, j };
PadMap[BeginLabel] = P;
@@ -317,7 +306,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
}
bool MayThrow = false;
- unsigned LastLabel = 0;
+ MCSymbol *LastLabel = 0;
for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
I != E; ++I) {
for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
@@ -327,7 +316,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
continue;
}
- unsigned BeginLabel = MI->getOperand(0).getImm();
+ MCSymbol *BeginLabel = MI->getOperand(0).getMCSymbol();
assert(BeginLabel && "Invalid label!");
if (BeginLabel == LastLabel)
@@ -461,7 +450,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
// Emit the type ids.
for (unsigned M = TypeInfos.size(); M; --M) {
- GlobalVariable *GV = TypeInfos[M - 1];
+ const GlobalVariable *GV = TypeInfos[M - 1];
if (GV) {
if (TD->getPointerSize() == sizeof(int32_t))
@@ -606,447 +595,3 @@ JITDwarfEmitter::EmitEHFrame(const Function* Personality,
return StartEHPtr;
}
-
-unsigned JITDwarfEmitter::GetDwarfTableSizeInBytes(MachineFunction& F,
- JITCodeEmitter& jce,
- unsigned char* StartFunction,
- unsigned char* EndFunction) {
- const TargetMachine& TM = F.getTarget();
- TD = TM.getTargetData();
- stackGrowthDirection = TM.getFrameInfo()->getStackGrowthDirection();
- RI = TM.getRegisterInfo();
- JCE = &jce;
- unsigned FinalSize = 0;
-
- FinalSize += GetExceptionTableSizeInBytes(&F);
-
- const std::vector<Function *> Personalities = MMI->getPersonalities();
- FinalSize +=
- GetCommonEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()]);
-
- FinalSize += GetEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()],
- StartFunction);
-
- return FinalSize;
-}
-
-/// RoundUpToAlign - Add the specified alignment to FinalSize and returns
-/// the new value.
-static unsigned RoundUpToAlign(unsigned FinalSize, unsigned Alignment) {
- if (Alignment == 0) Alignment = 1;
- // Since we do not know where the buffer will be allocated, be pessimistic.
- return FinalSize + Alignment;
-}
-
-unsigned
-JITDwarfEmitter::GetEHFrameSizeInBytes(const Function* Personality,
- unsigned char* StartFunction) const {
- unsigned PointerSize = TD->getPointerSize();
- unsigned FinalSize = 0;
- // EH frame header.
- FinalSize += PointerSize;
- // FDE CIE Offset
- FinalSize += 3 * PointerSize;
- // If there is a personality and landing pads then point to the language
- // specific data area in the exception table.
- if (Personality) {
- FinalSize += MCAsmInfo::getULEB128Size(4);
- FinalSize += PointerSize;
- } else {
- FinalSize += MCAsmInfo::getULEB128Size(0);
- }
-
- // Indicate locations of function specific callee saved registers in
- // frame.
- FinalSize += GetFrameMovesSizeInBytes((intptr_t)StartFunction,
- MMI->getFrameMoves());
-
- FinalSize = RoundUpToAlign(FinalSize, 4);
-
- // Double zeroes for the unwind runtime
- FinalSize += 2 * PointerSize;
-
- return FinalSize;
-}
-
-unsigned JITDwarfEmitter::GetCommonEHFrameSizeInBytes(const Function* Personality)
- const {
-
- unsigned PointerSize = TD->getPointerSize();
- int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
- PointerSize : -PointerSize;
- unsigned FinalSize = 0;
- // EH Common Frame header
- FinalSize += PointerSize;
- FinalSize += 4;
- FinalSize += 1;
- FinalSize += Personality ? 5 : 3; // "zPLR" or "zR"
- FinalSize += MCAsmInfo::getULEB128Size(1);
- FinalSize += MCAsmInfo::getSLEB128Size(stackGrowth);
- FinalSize += 1;
-
- if (Personality) {
- FinalSize += MCAsmInfo::getULEB128Size(7);
-
- // Encoding
- FinalSize+= 1;
- //Personality
- FinalSize += PointerSize;
-
- FinalSize += MCAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
- FinalSize += MCAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
-
- } else {
- FinalSize += MCAsmInfo::getULEB128Size(1);
- FinalSize += MCAsmInfo::getULEB128Size(dwarf::DW_EH_PE_pcrel);
- }
-
- std::vector<MachineMove> Moves;
- RI->getInitialFrameState(Moves);
- FinalSize += GetFrameMovesSizeInBytes(0, Moves);
- FinalSize = RoundUpToAlign(FinalSize, 4);
- return FinalSize;
-}
-
-unsigned
-JITDwarfEmitter::GetFrameMovesSizeInBytes(intptr_t BaseLabelPtr,
- const std::vector<MachineMove> &Moves) const {
- unsigned PointerSize = TD->getPointerSize();
- int stackGrowth = stackGrowthDirection == TargetFrameInfo::StackGrowsUp ?
- PointerSize : -PointerSize;
- bool IsLocal = BaseLabelPtr;
- unsigned FinalSize = 0;
-
- for (unsigned i = 0, N = Moves.size(); i < N; ++i) {
- const MachineMove &Move = Moves[i];
- unsigned LabelID = Move.getLabelID();
-
- if (LabelID) {
- LabelID = MMI->MappedLabel(LabelID);
-
- // Throw out move if the label is invalid.
- if (!LabelID) continue;
- }
-
- intptr_t LabelPtr = 0;
- if (LabelID) LabelPtr = JCE->getLabelAddress(LabelID);
-
- const MachineLocation &Dst = Move.getDestination();
- const MachineLocation &Src = Move.getSource();
-
- // Advance row if new location.
- if (BaseLabelPtr && LabelID && (BaseLabelPtr != LabelPtr || !IsLocal)) {
- FinalSize++;
- FinalSize += PointerSize;
- BaseLabelPtr = LabelPtr;
- IsLocal = true;
- }
-
- // If advancing cfa.
- if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) {
- if (!Src.isReg()) {
- if (Src.getReg() == MachineLocation::VirtualFP) {
- ++FinalSize;
- } else {
- ++FinalSize;
- unsigned RegNum = RI->getDwarfRegNum(Src.getReg(), true);
- FinalSize += MCAsmInfo::getULEB128Size(RegNum);
- }
-
- int Offset = -Src.getOffset();
-
- FinalSize += MCAsmInfo::getULEB128Size(Offset);
- } else {
- llvm_unreachable("Machine move no supported yet.");
- }
- } else if (Src.isReg() &&
- Src.getReg() == MachineLocation::VirtualFP) {
- if (Dst.isReg()) {
- ++FinalSize;
- unsigned RegNum = RI->getDwarfRegNum(Dst.getReg(), true);
- FinalSize += MCAsmInfo::getULEB128Size(RegNum);
- } else {
- llvm_unreachable("Machine move no supported yet.");
- }
- } else {
- unsigned Reg = RI->getDwarfRegNum(Src.getReg(), true);
- int Offset = Dst.getOffset() / stackGrowth;
-
- if (Offset < 0) {
- ++FinalSize;
- FinalSize += MCAsmInfo::getULEB128Size(Reg);
- FinalSize += MCAsmInfo::getSLEB128Size(Offset);
- } else if (Reg < 64) {
- ++FinalSize;
- FinalSize += MCAsmInfo::getULEB128Size(Offset);
- } else {
- ++FinalSize;
- FinalSize += MCAsmInfo::getULEB128Size(Reg);
- FinalSize += MCAsmInfo::getULEB128Size(Offset);
- }
- }
- }
- return FinalSize;
-}
-
-unsigned
-JITDwarfEmitter::GetExceptionTableSizeInBytes(MachineFunction* MF) const {
- unsigned FinalSize = 0;
-
- // Map all labels and get rid of any dead landing pads.
- MMI->TidyLandingPads();
-
- const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
- const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
- const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
- if (PadInfos.empty()) return 0;
-
- // Sort the landing pads in order of their type ids. This is used to fold
- // duplicate actions.
- SmallVector<const LandingPadInfo *, 64> LandingPads;
- LandingPads.reserve(PadInfos.size());
- for (unsigned i = 0, N = PadInfos.size(); i != N; ++i)
- LandingPads.push_back(&PadInfos[i]);
- std::sort(LandingPads.begin(), LandingPads.end(), PadLT);
-
- // Negative type ids index into FilterIds, positive type ids index into
- // TypeInfos. The value written for a positive type id is just the type
- // id itself. For a negative type id, however, the value written is the
- // (negative) byte offset of the corresponding FilterIds entry. The byte
- // offset is usually equal to the type id, because the FilterIds entries
- // are written using a variable width encoding which outputs one byte per
- // entry as long as the value written is not too large, but can differ.
- // This kind of complication does not occur for positive type ids because
- // type infos are output using a fixed width encoding.
- // FilterOffsets[i] holds the byte offset corresponding to FilterIds[i].
- SmallVector<int, 16> FilterOffsets;
- FilterOffsets.reserve(FilterIds.size());
- int Offset = -1;
- for(std::vector<unsigned>::const_iterator I = FilterIds.begin(),
- E = FilterIds.end(); I != E; ++I) {
- FilterOffsets.push_back(Offset);
- Offset -= MCAsmInfo::getULEB128Size(*I);
- }
-
- // Compute the actions table and gather the first action index for each
- // landing pad site.
- SmallVector<ActionEntry, 32> Actions;
- SmallVector<unsigned, 64> FirstActions;
- FirstActions.reserve(LandingPads.size());
-
- int FirstAction = 0;
- unsigned SizeActions = 0;
- for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
- const LandingPadInfo *LP = LandingPads[i];
- const std::vector<int> &TypeIds = LP->TypeIds;
- const unsigned NumShared = i ? SharedTypeIds(LP, LandingPads[i-1]) : 0;
- unsigned SizeSiteActions = 0;
-
- if (NumShared < TypeIds.size()) {
- unsigned SizeAction = 0;
- ActionEntry *PrevAction = 0;
-
- if (NumShared) {
- const unsigned SizePrevIds = LandingPads[i-1]->TypeIds.size();
- assert(Actions.size());
- PrevAction = &Actions.back();
- SizeAction = MCAsmInfo::getSLEB128Size(PrevAction->NextAction) +
- MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
- for (unsigned j = NumShared; j != SizePrevIds; ++j) {
- SizeAction -= MCAsmInfo::getSLEB128Size(PrevAction->ValueForTypeID);
- SizeAction += -PrevAction->NextAction;
- PrevAction = PrevAction->Previous;
- }
- }
-
- // Compute the actions.
- for (unsigned I = NumShared, M = TypeIds.size(); I != M; ++I) {
- int TypeID = TypeIds[I];
- assert(-1-TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
- int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
- unsigned SizeTypeID = MCAsmInfo::getSLEB128Size(ValueForTypeID);
-
- int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
- SizeAction = SizeTypeID + MCAsmInfo::getSLEB128Size(NextAction);
- SizeSiteActions += SizeAction;
-
- ActionEntry Action = {ValueForTypeID, NextAction, PrevAction};
- Actions.push_back(Action);
-
- PrevAction = &Actions.back();
- }
-
- // Record the first action of the landing pad site.
- FirstAction = SizeActions + SizeSiteActions - SizeAction + 1;
- } // else identical - re-use previous FirstAction
-
- FirstActions.push_back(FirstAction);
-
- // Compute this sites contribution to size.
- SizeActions += SizeSiteActions;
- }
-
- // Compute the call-site table. Entries must be ordered by address.
- SmallVector<CallSiteEntry, 64> CallSites;
-
- RangeMapType PadMap;
- for (unsigned i = 0, N = LandingPads.size(); i != N; ++i) {
- const LandingPadInfo *LandingPad = LandingPads[i];
- for (unsigned j=0, E = LandingPad->BeginLabels.size(); j != E; ++j) {
- unsigned BeginLabel = LandingPad->BeginLabels[j];
- assert(!PadMap.count(BeginLabel) && "Duplicate landing pad labels!");
- PadRange P = { i, j };
- PadMap[BeginLabel] = P;
- }
- }
-
- bool MayThrow = false;
- unsigned LastLabel = 0;
- for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
- I != E; ++I) {
- for (MachineBasicBlock::const_iterator MI = I->begin(), E = I->end();
- MI != E; ++MI) {
- if (!MI->isLabel()) {
- MayThrow |= MI->getDesc().isCall();
- continue;
- }
-
- unsigned BeginLabel = MI->getOperand(0).getImm();
- assert(BeginLabel && "Invalid label!");
-
- if (BeginLabel == LastLabel)
- MayThrow = false;
-
- RangeMapType::iterator L = PadMap.find(BeginLabel);
-
- if (L == PadMap.end())
- continue;
-
- PadRange P = L->second;
- const LandingPadInfo *LandingPad = LandingPads[P.PadIndex];
-
- assert(BeginLabel == LandingPad->BeginLabels[P.RangeIndex] &&
- "Inconsistent landing pad map!");
-
- // If some instruction between the previous try-range and this one may
- // throw, create a call-site entry with no landing pad for the region
- // between the try-ranges.
- if (MayThrow) {
- CallSiteEntry Site = {LastLabel, BeginLabel, 0, 0};
- CallSites.push_back(Site);
- }
-
- LastLabel = LandingPad->EndLabels[P.RangeIndex];
- CallSiteEntry Site = {BeginLabel, LastLabel,
- LandingPad->LandingPadLabel, FirstActions[P.PadIndex]};
-
- assert(Site.BeginLabel && Site.EndLabel && Site.PadLabel &&
- "Invalid landing pad!");
-
- // Try to merge with the previous call-site.
- if (CallSites.size()) {
- CallSiteEntry &Prev = CallSites.back();
- if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
- // Extend the range of the previous entry.
- Prev.EndLabel = Site.EndLabel;
- continue;
- }
- }
-
- // Otherwise, create a new call-site.
- CallSites.push_back(Site);
- }
- }
- // If some instruction between the previous try-range and the end of the
- // function may throw, create a call-site entry with no landing pad for the
- // region following the try-range.
- if (MayThrow) {
- CallSiteEntry Site = {LastLabel, 0, 0, 0};
- CallSites.push_back(Site);
- }
-
- // Final tallies.
- unsigned SizeSites = CallSites.size() * (sizeof(int32_t) + // Site start.
- sizeof(int32_t) + // Site length.
- sizeof(int32_t)); // Landing pad.
- for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
- SizeSites += MCAsmInfo::getULEB128Size(CallSites[i].Action);
-
- unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();
-
- unsigned TypeOffset = sizeof(int8_t) + // Call site format
- // Call-site table length
- MCAsmInfo::getULEB128Size(SizeSites) +
- SizeSites + SizeActions + SizeTypes;
-
- unsigned TotalSize = sizeof(int8_t) + // LPStart format
- sizeof(int8_t) + // TType format
- MCAsmInfo::getULEB128Size(TypeOffset) + // TType base offset
- TypeOffset;
-
- unsigned SizeAlign = (4 - TotalSize) & 3;
-
- // Begin the exception table.
- FinalSize = RoundUpToAlign(FinalSize, 4);
- for (unsigned i = 0; i != SizeAlign; ++i) {
- ++FinalSize;
- }
-
- unsigned PointerSize = TD->getPointerSize();
-
- // Emit the header.
- ++FinalSize;
- // Asm->EOL("LPStart format (DW_EH_PE_omit)");
- ++FinalSize;
- // Asm->EOL("TType format (DW_EH_PE_absptr)");
- ++FinalSize;
- // Asm->EOL("TType base offset");
- ++FinalSize;
- // Asm->EOL("Call site format (DW_EH_PE_udata4)");
- ++FinalSize;
- // Asm->EOL("Call-site table length");
-
- // Emit the landing pad site information.
- for (unsigned i = 0; i < CallSites.size(); ++i) {
- CallSiteEntry &S = CallSites[i];
-
- // Asm->EOL("Region start");
- FinalSize += PointerSize;
-
- //Asm->EOL("Region length");
- FinalSize += PointerSize;
-
- // Asm->EOL("Landing pad");
- FinalSize += PointerSize;
-
- FinalSize += MCAsmInfo::getULEB128Size(S.Action);
- // Asm->EOL("Action");
- }
-
- // Emit the actions.
- for (unsigned I = 0, N = Actions.size(); I != N; ++I) {
- ActionEntry &Action = Actions[I];
-
- //Asm->EOL("TypeInfo index");
- FinalSize += MCAsmInfo::getSLEB128Size(Action.ValueForTypeID);
- //Asm->EOL("Next action");
- FinalSize += MCAsmInfo::getSLEB128Size(Action.NextAction);
- }
-
- // Emit the type ids.
- for (unsigned M = TypeInfos.size(); M; --M) {
- // Asm->EOL("TypeInfo");
- FinalSize += PointerSize;
- }
-
- // Emit the filter typeids.
- for (unsigned j = 0, M = FilterIds.size(); j < M; ++j) {
- unsigned TypeID = FilterIds[j];
- FinalSize += MCAsmInfo::getULEB128Size(TypeID);
- //Asm->EOL("Filter TypeInfo index");
- }
-
- FinalSize = RoundUpToAlign(FinalSize, 4);
-
- return FinalSize;
-}
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
index e627550..3095682 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITDwarfEmitter.h
@@ -49,17 +49,6 @@ class JITDwarfEmitter {
unsigned char* EndFunction,
unsigned char* ExceptionTable) const;
- unsigned GetExceptionTableSizeInBytes(MachineFunction* MF) const;
-
- unsigned
- GetFrameMovesSizeInBytes(intptr_t BaseLabelPtr,
- const std::vector<MachineMove> &Moves) const;
-
- unsigned GetCommonEHFrameSizeInBytes(const Function* Personality) const;
-
- unsigned GetEHFrameSizeInBytes(const Function* Personality,
- unsigned char* StartFunction) const;
-
public:
JITDwarfEmitter(JIT& jit);
@@ -71,11 +60,6 @@ public:
unsigned char* &EHFramePtr);
- unsigned GetDwarfTableSizeInBytes(MachineFunction& F,
- JITCodeEmitter& JCE,
- unsigned char* StartFunction,
- unsigned char* EndFunction);
-
void setModuleInfo(MachineModuleInfo* Info) {
MMI = Info;
}
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
index 783ebb4..4c0d078 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -23,6 +23,7 @@
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineCodeInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
@@ -30,8 +31,8 @@
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/CodeGen/MachineCodeInfo.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetJITInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -43,7 +44,6 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/System/Disassembler.h"
#include "llvm/System/Memory.h"
-#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -152,16 +152,6 @@ namespace {
FunctionToCallSitesMap[F].insert(CallSite);
}
- // Returns the Function of the stub if a stub was erased, or NULL if there
- // was no stub. This function uses the call-site->function map to find a
- // relevant function, but asserts that only stubs and not other call sites
- // will be passed in.
- Function *EraseStub(const MutexGuard &locked, void *Stub);
-
- void EraseAllCallSitesFor(const MutexGuard &locked, Function *F) {
- assert(locked.holds(TheJIT->lock));
- EraseAllCallSitesForPrelocked(F);
- }
void EraseAllCallSitesForPrelocked(Function *F);
// Erases _all_ call sites regardless of their function. This is used to
@@ -223,9 +213,6 @@ namespace {
/// specified GV address.
void *getGlobalValueIndirectSym(GlobalValue *V, void *GVAddress);
- void getRelocatableGVs(SmallVectorImpl<GlobalValue*> &GVs,
- SmallVectorImpl<void*> &Ptrs);
-
/// getGOTIndexForAddress - Return a new or existing index in the GOT for
/// an address. This function only manages slots, it does not manage the
/// contents of the slots or the memory associated with the GOT.
@@ -341,7 +328,7 @@ namespace {
/// LabelLocations - This vector is a mapping from Label ID's to their
/// address.
- std::vector<uintptr_t> LabelLocations;
+ DenseMap<MCSymbol*, uintptr_t> LabelLocations;
/// MMI - Machine module info for exception informations
MachineModuleInfo* MMI;
@@ -369,7 +356,7 @@ namespace {
ValueMap<const Function *, EmittedCode,
EmittedFunctionConfig> EmittedFunctions;
- DILocation PrevDLT;
+ DebugLoc PrevDL;
/// Instance of the JIT
JIT *TheJIT;
@@ -377,14 +364,14 @@ namespace {
public:
JITEmitter(JIT &jit, JITMemoryManager *JMM, TargetMachine &TM)
: SizeEstimate(0), Resolver(jit, *this), MMI(0), CurFn(0),
- EmittedFunctions(this), PrevDLT(NULL), TheJIT(&jit) {
+ EmittedFunctions(this), TheJIT(&jit) {
MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
if (jit.getJITInfo().needsGOT()) {
MemMgr->AllocateGOT();
DEBUG(dbgs() << "JIT is managing a GOT\n");
}
- if (DwarfExceptionHandling || JITEmitDebugInfo) {
+ if (JITExceptionHandling || JITEmitDebugInfo) {
DE.reset(new JITDwarfEmitter(jit));
}
if (JITEmitDebugInfo) {
@@ -398,7 +385,6 @@ namespace {
/// classof - Methods for support type inquiry through isa, cast, and
/// dyn_cast:
///
- static inline bool classof(const JITEmitter*) { return true; }
static inline bool classof(const MachineCodeEmitter*) { return true; }
JITResolver &getJITResolver() { return Resolver; }
@@ -435,6 +421,9 @@ namespace {
if (MBBLocations.size() <= (unsigned)MBB->getNumber())
MBBLocations.resize((MBB->getNumber()+1)*2);
MBBLocations[MBB->getNumber()] = getCurrentPCValue();
+ if (MBB->hasAddressTaken())
+ TheJIT->addPointerToBasicBlock(MBB->getBasicBlock(),
+ (void*)getCurrentPCValue());
DEBUG(dbgs() << "JIT: Emitting BB" << MBB->getNumber() << " at ["
<< (void*) getCurrentPCValue() << "]\n");
}
@@ -442,7 +431,7 @@ namespace {
virtual uintptr_t getConstantPoolEntryAddress(unsigned Entry) const;
virtual uintptr_t getJumpTableEntryAddress(unsigned Entry) const;
- virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const {
+ virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const{
assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
MBBLocations[MBB->getNumber()] && "MBB not emitted!");
return MBBLocations[MBB->getNumber()];
@@ -459,16 +448,17 @@ namespace {
virtual void processDebugLoc(DebugLoc DL, bool BeforePrintingInsn);
- virtual void emitLabel(uint64_t LabelID) {
- if (LabelLocations.size() <= LabelID)
- LabelLocations.resize((LabelID+1)*2);
- LabelLocations[LabelID] = getCurrentPCValue();
+ virtual void emitLabel(MCSymbol *Label) {
+ LabelLocations[Label] = getCurrentPCValue();
+ }
+
+ virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() {
+ return &LabelLocations;
}
- virtual uintptr_t getLabelAddress(uint64_t LabelID) const {
- assert(LabelLocations.size() > (unsigned)LabelID &&
- LabelLocations[LabelID] && "Label not emitted!");
- return LabelLocations[LabelID];
+ virtual uintptr_t getLabelAddress(MCSymbol *Label) const {
+ assert(LabelLocations.count(Label) && "Label not emitted!");
+ return LabelLocations.find(Label)->second;
}
virtual void setModuleInfo(MachineModuleInfo* Info) {
@@ -476,26 +466,10 @@ namespace {
if (DE.get()) DE->setModuleInfo(Info);
}
- void setMemoryExecutable() {
- MemMgr->setMemoryExecutable();
- }
-
- JITMemoryManager *getMemMgr() const { return MemMgr; }
-
private:
void *getPointerToGlobal(GlobalValue *GV, void *Reference,
bool MayNeedFarStub);
void *getPointerToGVIndirectSym(GlobalValue *V, void *Reference);
- unsigned addSizeOfGlobal(const GlobalVariable *GV, unsigned Size);
- unsigned addSizeOfGlobalsInConstantVal(
- const Constant *C, unsigned Size,
- SmallPtrSet<const GlobalVariable*, 8> &SeenGlobals,
- SmallVectorImpl<const GlobalVariable*> &Worklist);
- unsigned addSizeOfGlobalsInInitializer(
- const Constant *Init, unsigned Size,
- SmallPtrSet<const GlobalVariable*, 8> &SeenGlobals,
- SmallVectorImpl<const GlobalVariable*> &Worklist);
- unsigned GetSizeOfGlobalsInBytes(MachineFunction &MF);
};
}
@@ -503,39 +477,6 @@ void CallSiteValueMapConfig::onDelete(JITResolverState *JRS, Function *F) {
JRS->EraseAllCallSitesForPrelocked(F);
}
-Function *JITResolverState::EraseStub(const MutexGuard &locked, void *Stub) {
- CallSiteToFunctionMapTy::iterator C2F_I =
- CallSiteToFunctionMap.find(Stub);
- if (C2F_I == CallSiteToFunctionMap.end()) {
- // Not a stub.
- return NULL;
- }
-
- StubToResolverMap->UnregisterStubResolver(Stub);
-
- Function *const F = C2F_I->second;
-#ifndef NDEBUG
- void *RealStub = FunctionToLazyStubMap.lookup(F);
- assert(RealStub == Stub &&
- "Call-site that wasn't a stub passed in to EraseStub");
-#endif
- FunctionToLazyStubMap.erase(F);
- CallSiteToFunctionMap.erase(C2F_I);
-
- // Remove the stub from the function->call-sites map, and remove the whole
- // entry from the map if that was the last call site.
- FunctionToCallSitesMapTy::iterator F2C_I = FunctionToCallSitesMap.find(F);
- assert(F2C_I != FunctionToCallSitesMap.end() &&
- "FunctionToCallSitesMap broken");
- bool Erased = F2C_I->second.erase(Stub);
- (void)Erased;
- assert(Erased && "FunctionToCallSitesMap broken");
- if (F2C_I->second.empty())
- FunctionToCallSitesMap.erase(F2C_I);
-
- return F;
-}
-
void JITResolverState::EraseAllCallSitesForPrelocked(Function *F) {
FunctionToCallSitesMapTy::iterator F2C = FunctionToCallSitesMap.find(F);
if (F2C == FunctionToCallSitesMap.end())
@@ -686,28 +627,6 @@ unsigned JITResolver::getGOTIndexForAddr(void* addr) {
return idx;
}
-void JITResolver::getRelocatableGVs(SmallVectorImpl<GlobalValue*> &GVs,
- SmallVectorImpl<void*> &Ptrs) {
- MutexGuard locked(TheJIT->lock);
-
- const FunctionToLazyStubMapTy &FM = state.getFunctionToLazyStubMap(locked);
- GlobalToIndirectSymMapTy &GM = state.getGlobalToIndirectSymMap(locked);
-
- for (FunctionToLazyStubMapTy::const_iterator i = FM.begin(), e = FM.end();
- i != e; ++i){
- Function *F = i->first;
- if (F->isDeclaration() && F->hasExternalLinkage()) {
- GVs.push_back(i->first);
- Ptrs.push_back(i->second);
- }
- }
- for (GlobalToIndirectSymMapTy::iterator i = GM.begin(), e = GM.end();
- i != e; ++i) {
- GVs.push_back(i->first);
- Ptrs.push_back(i->second);
- }
-}
-
/// JITCompilerFn - This function is called when a lazy compilation stub has
/// been entered. It looks up which function this stub corresponds to, compiles
/// it if necessary, then returns the resultant function pointer.
@@ -740,7 +659,7 @@ void *JITResolver::JITCompilerFn(void *Stub) {
// If lazy compilation is disabled, emit a useful error message and abort.
if (!JR->TheJIT->isCompilingLazily()) {
- llvm_report_error("LLVM JIT requested to do lazy compilation of function '"
+ report_fatal_error("LLVM JIT requested to do lazy compilation of function '"
+ F->getName() + "' when lazy compiles are disabled!");
}
@@ -824,21 +743,19 @@ void *JITEmitter::getPointerToGVIndirectSym(GlobalValue *V, void *Reference) {
}
void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) {
- if (!DL.isUnknown()) {
- DILocation CurDLT = EmissionDetails.MF->getDILocation(DL);
-
- if (BeforePrintingInsn) {
- if (CurDLT.getScope().getNode() != 0
- && PrevDLT.getNode() != CurDLT.getNode()) {
- JITEvent_EmittedFunctionDetails::LineStart NextLine;
- NextLine.Address = getCurrentPCValue();
- NextLine.Loc = DL;
- EmissionDetails.LineStarts.push_back(NextLine);
- }
+ if (DL.isUnknown()) return;
+ if (!BeforePrintingInsn) return;
+
+ const LLVMContext &Context = EmissionDetails.MF->getFunction()->getContext();
- PrevDLT = CurDLT;
- }
+ if (DL.getScope(Context) != 0 && PrevDL != DL) {
+ JITEvent_EmittedFunctionDetails::LineStart NextLine;
+ NextLine.Address = getCurrentPCValue();
+ NextLine.Loc = DL;
+ EmissionDetails.LineStarts.push_back(NextLine);
}
+
+ PrevDL = DL;
}
static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
@@ -857,183 +774,6 @@ static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
return Size;
}
-static unsigned GetJumpTableSizeInBytes(MachineJumpTableInfo *MJTI, JIT *jit) {
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- if (JT.empty()) return 0;
-
- unsigned NumEntries = 0;
- for (unsigned i = 0, e = JT.size(); i != e; ++i)
- NumEntries += JT[i].MBBs.size();
-
- return NumEntries * MJTI->getEntrySize(*jit->getTargetData());
-}
-
-static uintptr_t RoundUpToAlign(uintptr_t Size, unsigned Alignment) {
- if (Alignment == 0) Alignment = 1;
- // Since we do not know where the buffer will be allocated, be pessimistic.
- return Size + Alignment;
-}
-
-/// addSizeOfGlobal - add the size of the global (plus any alignment padding)
-/// into the running total Size.
-
-unsigned JITEmitter::addSizeOfGlobal(const GlobalVariable *GV, unsigned Size) {
- const Type *ElTy = GV->getType()->getElementType();
- size_t GVSize = (size_t)TheJIT->getTargetData()->getTypeAllocSize(ElTy);
- size_t GVAlign =
- (size_t)TheJIT->getTargetData()->getPreferredAlignment(GV);
- DEBUG(dbgs() << "JIT: Adding in size " << GVSize << " alignment " << GVAlign);
- DEBUG(GV->dump());
- // Assume code section ends with worst possible alignment, so first
- // variable needs maximal padding.
- if (Size==0)
- Size = 1;
- Size = ((Size+GVAlign-1)/GVAlign)*GVAlign;
- Size += GVSize;
- return Size;
-}
-
-/// addSizeOfGlobalsInConstantVal - find any globals that we haven't seen yet
-/// but are referenced from the constant; put them in SeenGlobals and the
-/// Worklist, and add their size into the running total Size.
-
-unsigned JITEmitter::addSizeOfGlobalsInConstantVal(
- const Constant *C,
- unsigned Size,
- SmallPtrSet<const GlobalVariable*, 8> &SeenGlobals,
- SmallVectorImpl<const GlobalVariable*> &Worklist) {
- // If its undefined, return the garbage.
- if (isa<UndefValue>(C))
- return Size;
-
- // If the value is a ConstantExpr
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
- Constant *Op0 = CE->getOperand(0);
- switch (CE->getOpcode()) {
- case Instruction::GetElementPtr:
- case Instruction::Trunc:
- case Instruction::ZExt:
- case Instruction::SExt:
- case Instruction::FPTrunc:
- case Instruction::FPExt:
- case Instruction::UIToFP:
- case Instruction::SIToFP:
- case Instruction::FPToUI:
- case Instruction::FPToSI:
- case Instruction::PtrToInt:
- case Instruction::IntToPtr:
- case Instruction::BitCast: {
- Size = addSizeOfGlobalsInConstantVal(Op0, Size, SeenGlobals, Worklist);
- break;
- }
- case Instruction::Add:
- case Instruction::FAdd:
- case Instruction::Sub:
- case Instruction::FSub:
- case Instruction::Mul:
- case Instruction::FMul:
- case Instruction::UDiv:
- case Instruction::SDiv:
- case Instruction::URem:
- case Instruction::SRem:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor: {
- Size = addSizeOfGlobalsInConstantVal(Op0, Size, SeenGlobals, Worklist);
- Size = addSizeOfGlobalsInConstantVal(CE->getOperand(1), Size,
- SeenGlobals, Worklist);
- break;
- }
- default: {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "ConstantExpr not handled: " << *CE;
- llvm_report_error(Msg.str());
- }
- }
- }
-
- if (C->getType()->getTypeID() == Type::PointerTyID)
- if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(C))
- if (SeenGlobals.insert(GV)) {
- Worklist.push_back(GV);
- Size = addSizeOfGlobal(GV, Size);
- }
-
- return Size;
-}
-
-/// addSizeOfGLobalsInInitializer - handle any globals that we haven't seen yet
-/// but are referenced from the given initializer.
-
-unsigned JITEmitter::addSizeOfGlobalsInInitializer(
- const Constant *Init,
- unsigned Size,
- SmallPtrSet<const GlobalVariable*, 8> &SeenGlobals,
- SmallVectorImpl<const GlobalVariable*> &Worklist) {
- if (!isa<UndefValue>(Init) &&
- !isa<ConstantVector>(Init) &&
- !isa<ConstantAggregateZero>(Init) &&
- !isa<ConstantArray>(Init) &&
- !isa<ConstantStruct>(Init) &&
- Init->getType()->isFirstClassType())
- Size = addSizeOfGlobalsInConstantVal(Init, Size, SeenGlobals, Worklist);
- return Size;
-}
-
-/// GetSizeOfGlobalsInBytes - walk the code for the function, looking for
-/// globals; then walk the initializers of those globals looking for more.
-/// If their size has not been considered yet, add it into the running total
-/// Size.
-
-unsigned JITEmitter::GetSizeOfGlobalsInBytes(MachineFunction &MF) {
- unsigned Size = 0;
- SmallPtrSet<const GlobalVariable*, 8> SeenGlobals;
-
- for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
- MBB != E; ++MBB) {
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
- const TargetInstrDesc &Desc = I->getDesc();
- const MachineInstr &MI = *I;
- unsigned NumOps = Desc.getNumOperands();
- for (unsigned CurOp = 0; CurOp < NumOps; CurOp++) {
- const MachineOperand &MO = MI.getOperand(CurOp);
- if (MO.isGlobal()) {
- GlobalValue* V = MO.getGlobal();
- const GlobalVariable *GV = dyn_cast<const GlobalVariable>(V);
- if (!GV)
- continue;
- // If seen in previous function, it will have an entry here.
- if (TheJIT->getPointerToGlobalIfAvailable(GV))
- continue;
- // If seen earlier in this function, it will have an entry here.
- // FIXME: it should be possible to combine these tables, by
- // assuming the addresses of the new globals in this module
- // start at 0 (or something) and adjusting them after codegen
- // complete. Another possibility is to grab a marker bit in GV.
- if (SeenGlobals.insert(GV))
- // A variable as yet unseen. Add in its size.
- Size = addSizeOfGlobal(GV, Size);
- }
- }
- }
- }
- DEBUG(dbgs() << "JIT: About to look through initializers\n");
- // Look for more globals that are referenced only from initializers.
- SmallVector<const GlobalVariable*, 8> Worklist(
- SeenGlobals.begin(), SeenGlobals.end());
- while (!Worklist.empty()) {
- const GlobalVariable* GV = Worklist.back();
- Worklist.pop_back();
- if (GV->hasInitializer())
- Size = addSizeOfGlobalsInInitializer(GV->getInitializer(), Size,
- SeenGlobals, Worklist);
- }
-
- return Size;
-}
-
void JITEmitter::startFunction(MachineFunction &F) {
DEBUG(dbgs() << "JIT: Starting CodeGen of Function "
<< F.getFunction()->getName() << "\n");
@@ -1041,43 +781,8 @@ void JITEmitter::startFunction(MachineFunction &F) {
uintptr_t ActualSize = 0;
// Set the memory writable, if it's not already
MemMgr->setMemoryWritable();
- if (MemMgr->NeedsExactSize()) {
- DEBUG(dbgs() << "JIT: ExactSize\n");
- const TargetInstrInfo* TII = F.getTarget().getInstrInfo();
- MachineConstantPool *MCP = F.getConstantPool();
-
- // Ensure the constant pool/jump table info is at least 4-byte aligned.
- ActualSize = RoundUpToAlign(ActualSize, 16);
-
- // Add the alignment of the constant pool
- ActualSize = RoundUpToAlign(ActualSize, MCP->getConstantPoolAlignment());
-
- // Add the constant pool size
- ActualSize += GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
-
- if (MachineJumpTableInfo *MJTI = F.getJumpTableInfo()) {
- // Add the aligment of the jump table info
- ActualSize = RoundUpToAlign(ActualSize,
- MJTI->getEntryAlignment(*TheJIT->getTargetData()));
-
- // Add the jump table size
- ActualSize += GetJumpTableSizeInBytes(MJTI, TheJIT);
- }
-
- // Add the alignment for the function
- ActualSize = RoundUpToAlign(ActualSize,
- std::max(F.getFunction()->getAlignment(), 8U));
-
- // Add the function size
- ActualSize += TII->GetFunctionSizeInBytes(F);
-
- DEBUG(dbgs() << "JIT: ActualSize before globals " << ActualSize << "\n");
- // Add the size of the globals that will be allocated after this function.
- // These are all the ones referenced from this function that were not
- // previously allocated.
- ActualSize += GetSizeOfGlobalsInBytes(F);
- DEBUG(dbgs() << "JIT: ActualSize after globals " << ActualSize << "\n");
- } else if (SizeEstimate > 0) {
+
+ if (SizeEstimate > 0) {
// SizeEstimate will be non-zero on reallocation attempts.
ActualSize = SizeEstimate;
}
@@ -1215,6 +920,9 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
TheJIT->NotifyFunctionEmitted(*F.getFunction(), FnStart, FnEnd-FnStart,
EmissionDetails);
+ // Reset the previous debug location.
+ PrevDL = DebugLoc();
+
DEBUG(dbgs() << "JIT: Finished CodeGen of [" << (void*)FnStart
<< "] Function: " << F.getFunction()->getName()
<< ": " << (FnEnd-FnStart) << " bytes of text, "
@@ -1226,46 +934,42 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
// Mark code region readable and executable if it's not so already.
MemMgr->setMemoryExecutable();
- DEBUG(
- if (sys::hasDisassembler()) {
- dbgs() << "JIT: Disassembled code:\n";
- dbgs() << sys::disassembleBuffer(FnStart, FnEnd-FnStart,
- (uintptr_t)FnStart);
- } else {
- dbgs() << "JIT: Binary code:\n";
- uint8_t* q = FnStart;
- for (int i = 0; q < FnEnd; q += 4, ++i) {
- if (i == 4)
- i = 0;
- if (i == 0)
- dbgs() << "JIT: " << (long)(q - FnStart) << ": ";
- bool Done = false;
- for (int j = 3; j >= 0; --j) {
- if (q + j >= FnEnd)
- Done = true;
- else
- dbgs() << (unsigned short)q[j];
+ DEBUG({
+ if (sys::hasDisassembler()) {
+ dbgs() << "JIT: Disassembled code:\n";
+ dbgs() << sys::disassembleBuffer(FnStart, FnEnd-FnStart,
+ (uintptr_t)FnStart);
+ } else {
+ dbgs() << "JIT: Binary code:\n";
+ uint8_t* q = FnStart;
+ for (int i = 0; q < FnEnd; q += 4, ++i) {
+ if (i == 4)
+ i = 0;
+ if (i == 0)
+ dbgs() << "JIT: " << (long)(q - FnStart) << ": ";
+ bool Done = false;
+ for (int j = 3; j >= 0; --j) {
+ if (q + j >= FnEnd)
+ Done = true;
+ else
+ dbgs() << (unsigned short)q[j];
+ }
+ if (Done)
+ break;
+ dbgs() << ' ';
+ if (i == 3)
+ dbgs() << '\n';
}
- if (Done)
- break;
- dbgs() << ' ';
- if (i == 3)
- dbgs() << '\n';
+ dbgs()<< '\n';
}
- dbgs()<< '\n';
- }
- );
+ });
- if (DwarfExceptionHandling || JITEmitDebugInfo) {
+ if (JITExceptionHandling || JITEmitDebugInfo) {
uintptr_t ActualSize = 0;
SavedBufferBegin = BufferBegin;
SavedBufferEnd = BufferEnd;
SavedCurBufferPtr = CurBufferPtr;
- if (MemMgr->NeedsExactSize()) {
- ActualSize = DE->GetDwarfTableSizeInBytes(F, *this, FnStart, FnEnd);
- }
-
BufferBegin = CurBufferPtr = MemMgr->startExceptionTable(F.getFunction(),
ActualSize);
BufferEnd = BufferBegin+ActualSize;
@@ -1280,7 +984,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) {
BufferEnd = SavedBufferEnd;
CurBufferPtr = SavedCurBufferPtr;
- if (DwarfExceptionHandling) {
+ if (JITExceptionHandling) {
TheJIT->RegisterTable(FrameRegister);
}
@@ -1308,6 +1012,11 @@ void JITEmitter::retryWithMoreMemory(MachineFunction &F) {
deallocateMemForFunction(F.getFunction());
// Try again with at least twice as much free space.
SizeEstimate = (uintptr_t)(2 * (BufferEnd - BufferBegin));
+
+ for (MachineFunction::iterator MBB = F.begin(), E = F.end(); MBB != E; ++MBB){
+ if (MBB->hasAddressTaken())
+ TheJIT->clearPointerToBasicBlock(MBB->getBasicBlock());
+ }
}
/// deallocateMemForFunction - Deallocate all memory for the specified
@@ -1378,7 +1087,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
ConstPoolAddresses.push_back(CAddr);
if (CPE.isMachineConstantPoolEntry()) {
// FIXME: add support to lower machine constant pool values into bytes!
- llvm_report_error("Initialize memory with machine specific constant pool"
+ report_fatal_error("Initialize memory with machine specific constant pool"
"entry has not been implemented!");
}
TheJIT->InitializeMemory(CPE.Val.ConstVal, (void*)CAddr);
@@ -1393,6 +1102,8 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
if (TheJIT->getJITInfo().hasCustomJumpTables())
return;
+ if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_Inline)
+ return;
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
if (JT.empty()) return;
@@ -1420,6 +1131,8 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
switch (MJTI->getEntryKind()) {
+ case MachineJumpTableInfo::EK_Inline:
+ return;
case MachineJumpTableInfo::EK_BlockAddress: {
// EK_BlockAddress - Each entry is a plain address of block, e.g.:
// .word LBB123
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
index a17caa1..653e6f1 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/JITMemoryManager.cpp
@@ -15,6 +15,7 @@
#include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/GlobalValue.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
@@ -22,12 +23,9 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/System/Memory.h"
-#include <map>
#include <vector>
#include <cassert>
#include <climits>
-#include <cstdio>
-#include <cstdlib>
#include <cstring>
using namespace llvm;
@@ -614,8 +612,8 @@ sys::MemoryBlock DefaultJITMemoryManager::allocateNewSlab(size_t size) {
sys::MemoryBlock *LastSlabPtr = LastSlab.base() ? &LastSlab : 0;
sys::MemoryBlock B = sys::Memory::AllocateRWX(size, LastSlabPtr, &ErrMsg);
if (B.base() == 0) {
- llvm_report_error("Allocation failed when allocating new memory in the"
- " JIT\n" + ErrMsg);
+ report_fatal_error("Allocation failed when allocating new memory in the"
+ " JIT\n" + Twine(ErrMsg));
}
LastSlab = B;
++NumSlabs;
diff --git a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp
index 2baf979..1ca084b 100644
--- a/libclamav/c++/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp
+++ b/libclamav/c++/llvm/lib/ExecutionEngine/JIT/OProfileJITEventListener.cpp
@@ -19,6 +19,7 @@
#define DEBUG_TYPE "oprofile-jit-event-listener"
#include "llvm/Function.h"
#include "llvm/Metadata.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
@@ -77,10 +78,10 @@ class FilenameCache {
DenseMap<AssertingVH<MDNode>, std::string> Filenames;
public:
- const char *getFilename(DIScope Scope) {
- std::string &Filename = Filenames[Scope.getNode()];
+ const char *getFilename(MDNode *Scope) {
+ std::string &Filename = Filenames[Scope];
if (Filename.empty()) {
- Filename = Scope.getFilename();
+ Filename = DIScope(Scope).getFilename();
}
return Filename.c_str();
}
@@ -91,9 +92,9 @@ static debug_line_info LineStartToOProfileFormat(
uintptr_t Address, DebugLoc Loc) {
debug_line_info Result;
Result.vma = Address;
- DILocation DILoc = MF.getDILocation(Loc);
- Result.lineno = DILoc.getLineNumber();
- Result.filename = Filenames.getFilename(DILoc.getScope());
+ Result.lineno = Loc.getLine();
+ Result.filename = Filenames.getFilename(
+ Loc.getScope(MF.getFunction()->getContext()));
DEBUG(dbgs() << "Mapping " << reinterpret_cast<void*>(Result.vma) << " to "
<< Result.filename << ":" << Result.lineno << "\n");
return Result;
@@ -113,26 +114,43 @@ void OProfileJITEventListener::NotifyFunctionEmitted(
return;
}
- // Now we convert the line number information from the address/DebugLoc format
- // in Details to the address/filename/lineno format that OProfile expects.
- // OProfile 0.9.4 (and maybe later versions) has a bug that causes it to
- // ignore line numbers for addresses above 4G.
- FilenameCache Filenames;
- std::vector<debug_line_info> LineInfo;
- LineInfo.reserve(1 + Details.LineStarts.size());
- if (!Details.MF->getDefaultDebugLoc().isUnknown()) {
- LineInfo.push_back(LineStartToOProfileFormat(
- *Details.MF, Filenames,
- reinterpret_cast<uintptr_t>(FnStart),
- Details.MF->getDefaultDebugLoc()));
- }
- for (std::vector<EmittedFunctionDetails::LineStart>::const_iterator
+ if (!Details.LineStarts.empty()) {
+ // Now we convert the line number information from the address/DebugLoc
+ // format in Details to the address/filename/lineno format that OProfile
+ // expects. Note that OProfile 0.9.4 has a bug that causes it to ignore
+ // line numbers for addresses above 4G.
+ FilenameCache Filenames;
+ std::vector<debug_line_info> LineInfo;
+ LineInfo.reserve(1 + Details.LineStarts.size());
+
+ DebugLoc FirstLoc = Details.LineStarts[0].Loc;
+ assert(!FirstLoc.isUnknown()
+ && "LineStarts should not contain unknown DebugLocs");
+ MDNode *FirstLocScope = FirstLoc.getScope(F.getContext());
+ DISubprogram FunctionDI = getDISubprogram(FirstLocScope);
+ if (FunctionDI.Verify()) {
+ // If we have debug info for the function itself, use that as the line
+ // number of the first several instructions. Otherwise, after filling
+ // LineInfo, we'll adjust the address of the first line number to point at
+ // the start of the function.
+ debug_line_info line_info;
+ line_info.vma = reinterpret_cast<uintptr_t>(FnStart);
+ line_info.lineno = FunctionDI.getLineNumber();
+ line_info.filename = Filenames.getFilename(FirstLocScope);
+ LineInfo.push_back(line_info);
+ }
+
+ for (std::vector<EmittedFunctionDetails::LineStart>::const_iterator
I = Details.LineStarts.begin(), E = Details.LineStarts.end();
- I != E; ++I) {
- LineInfo.push_back(LineStartToOProfileFormat(
- *Details.MF, Filenames, I->Address, I->Loc));
- }
- if (!LineInfo.empty()) {
+ I != E; ++I) {
+ LineInfo.push_back(LineStartToOProfileFormat(
+ *Details.MF, Filenames, I->Address, I->Loc));
+ }
+
+ // In case the function didn't have line info of its own, adjust the first
+ // line info's address to include the start of the function.
+ LineInfo[0].vma = reinterpret_cast<uintptr_t>(FnStart);
+
if (op_write_debug_line_info(Agent, FnStart,
LineInfo.size(), &*LineInfo.begin()) == -1) {
DEBUG(dbgs()
diff --git a/libclamav/c++/llvm/lib/MC/CMakeLists.txt b/libclamav/c++/llvm/lib/MC/CMakeLists.txt
index 4cf71dc..60a3a3e 100644
--- a/libclamav/c++/llvm/lib/MC/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/MC/CMakeLists.txt
@@ -1,4 +1,5 @@
add_llvm_library(LLVMMC
+ ELFObjectWriter.cpp
MCAsmInfo.cpp
MCAsmInfoCOFF.cpp
MCAsmInfoDarwin.cpp
@@ -7,16 +8,26 @@ add_llvm_library(LLVMMC
MCCodeEmitter.cpp
MCContext.cpp
MCDisassembler.cpp
+ MCELFStreamer.cpp
MCExpr.cpp
MCInst.cpp
MCInstPrinter.cpp
+ MCLabel.cpp
+ MCDwarf.cpp
+ MCLoggingStreamer.cpp
MCMachOStreamer.cpp
MCNullStreamer.cpp
+ MCObjectStreamer.cpp
+ MCObjectWriter.cpp
MCSection.cpp
+ MCSectionCOFF.cpp
MCSectionELF.cpp
MCSectionMachO.cpp
MCStreamer.cpp
MCSymbol.cpp
MCValue.cpp
+ MachObjectWriter.cpp
+ WinCOFFStreamer.cpp
+ WinCOFFObjectWriter.cpp
TargetAsmBackend.cpp
)
diff --git a/libclamav/c++/llvm/lib/MC/ELFObjectWriter.cpp b/libclamav/c++/llvm/lib/MC/ELFObjectWriter.cpp
new file mode 100644
index 0000000..cf35b45
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/ELFObjectWriter.cpp
@@ -0,0 +1,973 @@
+//===- lib/MC/ELFObjectWriter.cpp - ELF File Writer -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ELF object file writer information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/ELFObjectWriter.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCELFSymbolFlags.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Target/TargetAsmBackend.h"
+
+#include "../Target/X86/X86FixupKinds.h"
+
+#include <vector>
+using namespace llvm;
+
+namespace {
+
+ class ELFObjectWriterImpl {
+ static bool isFixupKindX86PCRel(unsigned Kind) {
+ switch (Kind) {
+ default:
+ return false;
+ case X86::reloc_pcrel_1byte:
+ case X86::reloc_pcrel_4byte:
+ case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ return true;
+ }
+ }
+
+ /*static bool isFixupKindX86RIPRel(unsigned Kind) {
+ return Kind == X86::reloc_riprel_4byte ||
+ Kind == X86::reloc_riprel_4byte_movq_load;
+ }*/
+
+
+ /// ELFSymbolData - Helper struct for containing some precomputed information
+ /// on symbols.
+ struct ELFSymbolData {
+ MCSymbolData *SymbolData;
+ uint64_t StringIndex;
+ uint32_t SectionIndex;
+
+ // Support lexicographic sorting.
+ bool operator<(const ELFSymbolData &RHS) const {
+ return SymbolData->getSymbol().getName() <
+ RHS.SymbolData->getSymbol().getName();
+ }
+ };
+
+ /// @name Relocation Data
+ /// @{
+
+ struct ELFRelocationEntry {
+ // Make these big enough for both 32-bit and 64-bit
+ uint64_t r_offset;
+ uint64_t r_info;
+ uint64_t r_addend;
+
+ // Support lexicographic sorting.
+ bool operator<(const ELFRelocationEntry &RE) const {
+ return RE.r_offset < r_offset;
+ }
+ };
+
+ llvm::DenseMap<const MCSectionData*,
+ std::vector<ELFRelocationEntry> > Relocations;
+ DenseMap<const MCSection*, uint64_t> SectionStringTableIndex;
+
+ /// @}
+ /// @name Symbol Table Data
+ /// @{
+
+ SmallString<256> StringTable;
+ std::vector<ELFSymbolData> LocalSymbolData;
+ std::vector<ELFSymbolData> ExternalSymbolData;
+ std::vector<ELFSymbolData> UndefinedSymbolData;
+
+ /// @}
+
+ ELFObjectWriter *Writer;
+
+ raw_ostream &OS;
+
+ // This holds the current offset into the object file.
+ size_t FileOff;
+
+ unsigned Is64Bit : 1;
+
+ bool HasRelocationAddend;
+
+ // This holds the symbol table index of the last local symbol.
+ unsigned LastLocalSymbolIndex;
+ // This holds the .strtab section index.
+ unsigned StringTableIndex;
+
+ unsigned ShstrtabIndex;
+
+ public:
+ ELFObjectWriterImpl(ELFObjectWriter *_Writer, bool _Is64Bit,
+ bool _HasRelAddend)
+ : Writer(_Writer), OS(Writer->getStream()), FileOff(0),
+ Is64Bit(_Is64Bit), HasRelocationAddend(_HasRelAddend) {
+ }
+
+ void Write8(uint8_t Value) { Writer->Write8(Value); }
+ void Write16(uint16_t Value) { Writer->Write16(Value); }
+ void Write32(uint32_t Value) { Writer->Write32(Value); }
+ //void Write64(uint64_t Value) { Writer->Write64(Value); }
+ void WriteZeros(unsigned N) { Writer->WriteZeros(N); }
+ //void WriteBytes(StringRef Str, unsigned ZeroFillSize = 0) {
+ // Writer->WriteBytes(Str, ZeroFillSize);
+ //}
+
+ void WriteWord(uint64_t W) {
+ if (Is64Bit)
+ Writer->Write64(W);
+ else
+ Writer->Write32(W);
+ }
+
+ void String8(char *buf, uint8_t Value) {
+ buf[0] = Value;
+ }
+
+ void StringLE16(char *buf, uint16_t Value) {
+ buf[0] = char(Value >> 0);
+ buf[1] = char(Value >> 8);
+ }
+
+ void StringLE32(char *buf, uint32_t Value) {
+ StringLE16(buf, uint16_t(Value >> 0));
+ StringLE16(buf + 2, uint16_t(Value >> 16));
+ }
+
+ void StringLE64(char *buf, uint64_t Value) {
+ StringLE32(buf, uint32_t(Value >> 0));
+ StringLE32(buf + 4, uint32_t(Value >> 32));
+ }
+
+ void StringBE16(char *buf ,uint16_t Value) {
+ buf[0] = char(Value >> 8);
+ buf[1] = char(Value >> 0);
+ }
+
+ void StringBE32(char *buf, uint32_t Value) {
+ StringBE16(buf, uint16_t(Value >> 16));
+ StringBE16(buf + 2, uint16_t(Value >> 0));
+ }
+
+ void StringBE64(char *buf, uint64_t Value) {
+ StringBE32(buf, uint32_t(Value >> 32));
+ StringBE32(buf + 4, uint32_t(Value >> 0));
+ }
+
+ void String16(char *buf, uint16_t Value) {
+ if (Writer->isLittleEndian())
+ StringLE16(buf, Value);
+ else
+ StringBE16(buf, Value);
+ }
+
+ void String32(char *buf, uint32_t Value) {
+ if (Writer->isLittleEndian())
+ StringLE32(buf, Value);
+ else
+ StringBE32(buf, Value);
+ }
+
+ void String64(char *buf, uint64_t Value) {
+ if (Writer->isLittleEndian())
+ StringLE64(buf, Value);
+ else
+ StringBE64(buf, Value);
+ }
+
+ void WriteHeader(uint64_t SectionDataSize, unsigned NumberOfSections);
+
+ void WriteSymbolEntry(MCDataFragment *F, uint64_t name, uint8_t info,
+ uint64_t value, uint64_t size,
+ uint8_t other, uint16_t shndx);
+
+ void WriteSymbol(MCDataFragment *F, ELFSymbolData &MSD,
+ const MCAsmLayout &Layout);
+
+ void WriteSymbolTable(MCDataFragment *F, const MCAssembler &Asm,
+ const MCAsmLayout &Layout);
+
+ void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, uint64_t &FixedValue);
+
+ uint64_t getSymbolIndexInSymbolTable(const MCAssembler &Asm,
+ const MCSymbol *S);
+
+ /// ComputeSymbolTable - Compute the symbol table data
+ ///
+ /// \param StringTable [out] - The string table data.
+ /// \param StringIndexMap [out] - Map from symbol names to offsets in the
+ /// string table.
+ void ComputeSymbolTable(MCAssembler &Asm);
+
+ void WriteRelocation(MCAssembler &Asm, MCAsmLayout &Layout,
+ const MCSectionData &SD);
+
+ void WriteRelocations(MCAssembler &Asm, MCAsmLayout &Layout) {
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it) {
+ WriteRelocation(Asm, Layout, *it);
+ }
+ }
+
+ void CreateMetadataSections(MCAssembler &Asm, MCAsmLayout &Layout);
+
+ void ExecutePostLayoutBinding(MCAssembler &Asm) {
+ // Compute symbol table information.
+ ComputeSymbolTable(Asm);
+ }
+
+ void WriteSecHdrEntry(uint32_t Name, uint32_t Type, uint64_t Flags,
+ uint64_t Address, uint64_t Offset,
+ uint64_t Size, uint32_t Link, uint32_t Info,
+ uint64_t Alignment, uint64_t EntrySize);
+
+ void WriteRelocationsFragment(const MCAssembler &Asm, MCDataFragment *F,
+ const MCSectionData *SD);
+
+ void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout);
+ };
+
+}
+
+// Emit the ELF header.
+void ELFObjectWriterImpl::WriteHeader(uint64_t SectionDataSize,
+ unsigned NumberOfSections) {
+ // ELF Header
+ // ----------
+ //
+ // Note
+ // ----
+ // emitWord method behaves differently for ELF32 and ELF64, writing
+ // 4 bytes in the former and 8 in the latter.
+
+ Write8(0x7f); // e_ident[EI_MAG0]
+ Write8('E'); // e_ident[EI_MAG1]
+ Write8('L'); // e_ident[EI_MAG2]
+ Write8('F'); // e_ident[EI_MAG3]
+
+ Write8(Is64Bit ? ELF::ELFCLASS64 : ELF::ELFCLASS32); // e_ident[EI_CLASS]
+
+ // e_ident[EI_DATA]
+ Write8(Writer->isLittleEndian() ? ELF::ELFDATA2LSB : ELF::ELFDATA2MSB);
+
+ Write8(ELF::EV_CURRENT); // e_ident[EI_VERSION]
+ Write8(ELF::ELFOSABI_LINUX); // e_ident[EI_OSABI]
+ Write8(0); // e_ident[EI_ABIVERSION]
+
+ WriteZeros(ELF::EI_NIDENT - ELF::EI_PAD);
+
+ Write16(ELF::ET_REL); // e_type
+
+ // FIXME: Make this configurable
+ Write16(Is64Bit ? ELF::EM_X86_64 : ELF::EM_386); // e_machine = target
+
+ Write32(ELF::EV_CURRENT); // e_version
+ WriteWord(0); // e_entry, no entry point in .o file
+ WriteWord(0); // e_phoff, no program header for .o
+ WriteWord(SectionDataSize + (Is64Bit ? sizeof(ELF::Elf64_Ehdr) :
+ sizeof(ELF::Elf32_Ehdr))); // e_shoff = sec hdr table off in bytes
+
+ // FIXME: Make this configurable.
+ Write32(0); // e_flags = whatever the target wants
+
+ // e_ehsize = ELF header size
+ Write16(Is64Bit ? sizeof(ELF::Elf64_Ehdr) : sizeof(ELF::Elf32_Ehdr));
+
+ Write16(0); // e_phentsize = prog header entry size
+ Write16(0); // e_phnum = # prog header entries = 0
+
+ // e_shentsize = Section header entry size
+ Write16(Is64Bit ? sizeof(ELF::Elf64_Shdr) : sizeof(ELF::Elf32_Shdr));
+
+ // e_shnum = # of section header ents
+ Write16(NumberOfSections);
+
+ // e_shstrndx = Section # of '.shstrtab'
+ Write16(ShstrtabIndex);
+}
+
+void ELFObjectWriterImpl::WriteSymbolEntry(MCDataFragment *F, uint64_t name,
+ uint8_t info, uint64_t value,
+ uint64_t size, uint8_t other,
+ uint16_t shndx) {
+ if (Is64Bit) {
+ char buf[8];
+
+ String32(buf, name);
+ F->getContents() += StringRef(buf, 4); // st_name
+
+ String8(buf, info);
+ F->getContents() += StringRef(buf, 1); // st_info
+
+ String8(buf, other);
+ F->getContents() += StringRef(buf, 1); // st_other
+
+ String16(buf, shndx);
+ F->getContents() += StringRef(buf, 2); // st_shndx
+
+ String64(buf, value);
+ F->getContents() += StringRef(buf, 8); // st_value
+
+ String64(buf, size);
+ F->getContents() += StringRef(buf, 8); // st_size
+ } else {
+ char buf[4];
+
+ String32(buf, name);
+ F->getContents() += StringRef(buf, 4); // st_name
+
+ String32(buf, value);
+ F->getContents() += StringRef(buf, 4); // st_value
+
+ String32(buf, size);
+ F->getContents() += StringRef(buf, 4); // st_size
+
+ String8(buf, info);
+ F->getContents() += StringRef(buf, 1); // st_info
+
+ String8(buf, other);
+ F->getContents() += StringRef(buf, 1); // st_other
+
+ String16(buf, shndx);
+ F->getContents() += StringRef(buf, 2); // st_shndx
+ }
+}
+
+void ELFObjectWriterImpl::WriteSymbol(MCDataFragment *F, ELFSymbolData &MSD,
+ const MCAsmLayout &Layout) {
+ MCSymbolData &Data = *MSD.SymbolData;
+ uint8_t Info = (Data.getFlags() & 0xff);
+ uint8_t Other = ((Data.getFlags() & 0xf00) >> ELF_STV_Shift);
+ uint64_t Value = 0;
+ uint64_t Size = 0;
+ const MCExpr *ESize;
+
+ if (Data.isCommon() && Data.isExternal())
+ Value = Data.getCommonAlignment();
+
+ if (!Data.isCommon())
+ if (MCFragment *FF = Data.getFragment())
+ Value = Layout.getSymbolAddress(&Data) -
+ Layout.getSectionAddress(FF->getParent());
+
+ ESize = Data.getSize();
+ if (Data.getSize()) {
+ MCValue Res;
+ if (ESize->getKind() == MCExpr::Binary) {
+ const MCBinaryExpr *BE = static_cast<const MCBinaryExpr *>(ESize);
+
+ if (BE->EvaluateAsRelocatable(Res, &Layout)) {
+ MCSymbolData &A =
+ Layout.getAssembler().getSymbolData(Res.getSymA()->getSymbol());
+ MCSymbolData &B =
+ Layout.getAssembler().getSymbolData(Res.getSymB()->getSymbol());
+
+ Size = Layout.getSymbolAddress(&A) - Layout.getSymbolAddress(&B);
+ }
+ } else if (ESize->getKind() == MCExpr::Constant) {
+ Size = static_cast<const MCConstantExpr *>(ESize)->getValue();
+ } else {
+ assert(0 && "Unsupported size expression");
+ }
+ }
+
+ // Write out the symbol table entry
+ WriteSymbolEntry(F, MSD.StringIndex, Info, Value,
+ Size, Other, MSD.SectionIndex);
+}
+
+void ELFObjectWriterImpl::WriteSymbolTable(MCDataFragment *F,
+ const MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+ // The string table must be emitted first because we need the index
+ // into the string table for all the symbol names.
+ assert(StringTable.size() && "Missing string table");
+
+ // FIXME: Make sure the start of the symbol table is aligned.
+
+ // The first entry is the undefined symbol entry.
+ unsigned EntrySize = Is64Bit ? ELF::SYMENTRY_SIZE64 : ELF::SYMENTRY_SIZE32;
+ F->getContents().append(EntrySize, '\x00');
+
+ // Write the symbol table entries.
+ LastLocalSymbolIndex = LocalSymbolData.size() + 1;
+ for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i) {
+ ELFSymbolData &MSD = LocalSymbolData[i];
+ WriteSymbol(F, MSD, Layout);
+ }
+
+ // Write out a symbol table entry for each section.
+ // leaving out the just added .symtab which is at
+ // the very end
+ unsigned Index = 1;
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it, ++Index) {
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(it->getSection());
+ // Leave out relocations so we don't have indexes within
+ // the relocations messed up
+ if (Section.getType() == ELF::SHT_RELA || Section.getType() == ELF::SHT_REL)
+ continue;
+ if (Index == Asm.size())
+ continue;
+ WriteSymbolEntry(F, 0, ELF::STT_SECTION, 0, 0, ELF::STV_DEFAULT, Index);
+ LastLocalSymbolIndex++;
+ }
+
+ for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i) {
+ ELFSymbolData &MSD = ExternalSymbolData[i];
+ MCSymbolData &Data = *MSD.SymbolData;
+ assert((Data.getFlags() & ELF_STB_Global) &&
+ "External symbol requires STB_GLOBAL flag");
+ WriteSymbol(F, MSD, Layout);
+ if (Data.getFlags() & ELF_STB_Local)
+ LastLocalSymbolIndex++;
+ }
+
+ for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i) {
+ ELFSymbolData &MSD = UndefinedSymbolData[i];
+ MCSymbolData &Data = *MSD.SymbolData;
+ Data.setFlags(Data.getFlags() | ELF_STB_Global);
+ WriteSymbol(F, MSD, Layout);
+ if (Data.getFlags() & ELF_STB_Local)
+ LastLocalSymbolIndex++;
+ }
+}
+
+// FIXME: this is currently X86/X86_64 only
+void ELFObjectWriterImpl::RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup,
+ MCValue Target,
+ uint64_t &FixedValue) {
+ int64_t Addend = 0;
+ unsigned Index = 0;
+ int64_t Value = Target.getConstant();
+
+ if (!Target.isAbsolute()) {
+ const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
+ MCSymbolData &SD = Asm.getSymbolData(*Symbol);
+ const MCSymbolData *Base = Asm.getAtom(Layout, &SD);
+ MCFragment *F = SD.getFragment();
+
+ if (Base) {
+ if (F && (!Symbol->isInSection() || SD.isCommon()) && !SD.isExternal()) {
+ Index = F->getParent()->getOrdinal() + LocalSymbolData.size() + 1;
+ Value += Layout.getSymbolAddress(&SD);
+ } else
+ Index = getSymbolIndexInSymbolTable(Asm, Symbol);
+ if (Base != &SD)
+ Value += Layout.getSymbolAddress(&SD) - Layout.getSymbolAddress(Base);
+ Addend = Value;
+ // Compensate for the addend on i386.
+ if (Is64Bit)
+ Value = 0;
+ } else {
+ if (F) {
+ // Index of the section in .symtab against this symbol
+ // is being relocated + 2 (empty section + abs. symbols).
+ Index = F->getParent()->getOrdinal() + LocalSymbolData.size() + 1;
+
+ MCSectionData *FSD = F->getParent();
+ // Offset of the symbol in the section
+ Addend = Layout.getSymbolAddress(&SD) - Layout.getSectionAddress(FSD);
+ } else {
+ FixedValue = Value;
+ return;
+ }
+ }
+ }
+
+ FixedValue = Value;
+
+ // determine the type of the relocation
+ bool IsPCRel = isFixupKindX86PCRel(Fixup.getKind());
+ unsigned Type;
+ if (Is64Bit) {
+ if (IsPCRel) {
+ Type = ELF::R_X86_64_PC32;
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case FK_Data_8: Type = ELF::R_X86_64_64; break;
+ case X86::reloc_pcrel_4byte:
+ case FK_Data_4:
+ // check that the offset fits within a signed long
+ if (isInt<32>(Target.getConstant()))
+ Type = ELF::R_X86_64_32S;
+ else
+ Type = ELF::R_X86_64_32;
+ break;
+ case FK_Data_2: Type = ELF::R_X86_64_16; break;
+ case X86::reloc_pcrel_1byte:
+ case FK_Data_1: Type = ELF::R_X86_64_8; break;
+ }
+ }
+ } else {
+ if (IsPCRel) {
+ Type = ELF::R_386_PC32;
+ } else {
+ switch ((unsigned)Fixup.getKind()) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case X86::reloc_pcrel_4byte:
+ case FK_Data_4: Type = ELF::R_386_32; break;
+ case FK_Data_2: Type = ELF::R_386_16; break;
+ case X86::reloc_pcrel_1byte:
+ case FK_Data_1: Type = ELF::R_386_8; break;
+ }
+ }
+ }
+
+ ELFRelocationEntry ERE;
+
+ if (Is64Bit) {
+ struct ELF::Elf64_Rela ERE64;
+ ERE64.setSymbolAndType(Index, Type);
+ ERE.r_info = ERE64.r_info;
+ } else {
+ struct ELF::Elf32_Rela ERE32;
+ ERE32.setSymbolAndType(Index, Type);
+ ERE.r_info = ERE32.r_info;
+ }
+
+ ERE.r_offset = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
+
+ if (HasRelocationAddend)
+ ERE.r_addend = Addend;
+ else
+ ERE.r_addend = 0; // Silence compiler warning.
+
+ Relocations[Fragment->getParent()].push_back(ERE);
+}
+
+uint64_t
+ELFObjectWriterImpl::getSymbolIndexInSymbolTable(const MCAssembler &Asm,
+ const MCSymbol *S) {
+ MCSymbolData &SD = Asm.getSymbolData(*S);
+
+ // Local symbol.
+ if (!SD.isExternal() && !S->isUndefined())
+ return SD.getIndex() + /* empty symbol */ 1;
+
+ // External or undefined symbol.
+ return SD.getIndex() + Asm.size() + /* empty symbol */ 1;
+}
+
+void ELFObjectWriterImpl::ComputeSymbolTable(MCAssembler &Asm) {
+ // Build section lookup table.
+ DenseMap<const MCSection*, uint8_t> SectionIndexMap;
+ unsigned Index = 1;
+ for (MCAssembler::iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it, ++Index)
+ SectionIndexMap[&it->getSection()] = Index;
+
+ // Index 0 is always the empty string.
+ StringMap<uint64_t> StringIndexMap;
+ StringTable += '\x00';
+
+ // Add the data for local symbols.
+ for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
+ ie = Asm.symbol_end(); it != ie; ++it) {
+ const MCSymbol &Symbol = it->getSymbol();
+
+ // Ignore non-linker visible symbols.
+ if (!Asm.isSymbolLinkerVisible(Symbol))
+ continue;
+
+ if (it->isExternal() || Symbol.isUndefined())
+ continue;
+
+ uint64_t &Entry = StringIndexMap[Symbol.getName()];
+ if (!Entry) {
+ Entry = StringTable.size();
+ StringTable += Symbol.getName();
+ StringTable += '\x00';
+ }
+
+ ELFSymbolData MSD;
+ MSD.SymbolData = it;
+ MSD.StringIndex = Entry;
+
+ if (Symbol.isAbsolute()) {
+ MSD.SectionIndex = ELF::SHN_ABS;
+ LocalSymbolData.push_back(MSD);
+ } else {
+ MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
+ assert(MSD.SectionIndex && "Invalid section index!");
+ LocalSymbolData.push_back(MSD);
+ }
+ }
+
+ // Now add non-local symbols.
+ for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
+ ie = Asm.symbol_end(); it != ie; ++it) {
+ const MCSymbol &Symbol = it->getSymbol();
+
+ // Ignore non-linker visible symbols.
+ if (!Asm.isSymbolLinkerVisible(Symbol))
+ continue;
+
+ if (!it->isExternal() && !Symbol.isUndefined())
+ continue;
+
+ uint64_t &Entry = StringIndexMap[Symbol.getName()];
+ if (!Entry) {
+ Entry = StringTable.size();
+ StringTable += Symbol.getName();
+ StringTable += '\x00';
+ }
+
+ ELFSymbolData MSD;
+ MSD.SymbolData = it;
+ MSD.StringIndex = Entry;
+
+ if (Symbol.isUndefined()) {
+ MSD.SectionIndex = ELF::SHN_UNDEF;
+ // XXX: for some reason we dont Emit* this
+ it->setFlags(it->getFlags() | ELF_STB_Global);
+ UndefinedSymbolData.push_back(MSD);
+ } else if (Symbol.isAbsolute()) {
+ MSD.SectionIndex = ELF::SHN_ABS;
+ ExternalSymbolData.push_back(MSD);
+ } else if (it->isCommon()) {
+ MSD.SectionIndex = ELF::SHN_COMMON;
+ ExternalSymbolData.push_back(MSD);
+ } else {
+ MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
+ assert(MSD.SectionIndex && "Invalid section index!");
+ ExternalSymbolData.push_back(MSD);
+ }
+ }
+
+ // Symbols are required to be in lexicographic order.
+ array_pod_sort(LocalSymbolData.begin(), LocalSymbolData.end());
+ array_pod_sort(ExternalSymbolData.begin(), ExternalSymbolData.end());
+ array_pod_sort(UndefinedSymbolData.begin(), UndefinedSymbolData.end());
+
+ // Set the symbol indices. Local symbols must come before all other
+ // symbols with non-local bindings.
+ Index = 0;
+ for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i)
+ LocalSymbolData[i].SymbolData->setIndex(Index++);
+ for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i)
+ ExternalSymbolData[i].SymbolData->setIndex(Index++);
+ for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
+ UndefinedSymbolData[i].SymbolData->setIndex(Index++);
+}
+
+void ELFObjectWriterImpl::WriteRelocation(MCAssembler &Asm, MCAsmLayout &Layout,
+ const MCSectionData &SD) {
+ if (!Relocations[&SD].empty()) {
+ MCContext &Ctx = Asm.getContext();
+ const MCSection *RelaSection;
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(SD.getSection());
+
+ const StringRef SectionName = Section.getSectionName();
+ std::string RelaSectionName = HasRelocationAddend ? ".rela" : ".rel";
+ RelaSectionName += SectionName;
+
+ unsigned EntrySize;
+ if (HasRelocationAddend)
+ EntrySize = Is64Bit ? sizeof(ELF::Elf64_Rela) : sizeof(ELF::Elf32_Rela);
+ else
+ EntrySize = Is64Bit ? sizeof(ELF::Elf64_Rel) : sizeof(ELF::Elf32_Rel);
+
+ RelaSection = Ctx.getELFSection(RelaSectionName, HasRelocationAddend ?
+ ELF::SHT_RELA : ELF::SHT_REL, 0,
+ SectionKind::getReadOnly(),
+ false, EntrySize);
+
+ MCSectionData &RelaSD = Asm.getOrCreateSectionData(*RelaSection);
+ RelaSD.setAlignment(1);
+
+ MCDataFragment *F = new MCDataFragment(&RelaSD);
+
+ WriteRelocationsFragment(Asm, F, &SD);
+
+ Asm.AddSectionToTheEnd(RelaSD, Layout);
+ }
+}
+
+void ELFObjectWriterImpl::WriteSecHdrEntry(uint32_t Name, uint32_t Type,
+ uint64_t Flags, uint64_t Address,
+ uint64_t Offset, uint64_t Size,
+ uint32_t Link, uint32_t Info,
+ uint64_t Alignment,
+ uint64_t EntrySize) {
+ Write32(Name); // sh_name: index into string table
+ Write32(Type); // sh_type
+ WriteWord(Flags); // sh_flags
+ WriteWord(Address); // sh_addr
+ WriteWord(Offset); // sh_offset
+ WriteWord(Size); // sh_size
+ Write32(Link); // sh_link
+ Write32(Info); // sh_info
+ WriteWord(Alignment); // sh_addralign
+ WriteWord(EntrySize); // sh_entsize
+}
+
+void ELFObjectWriterImpl::WriteRelocationsFragment(const MCAssembler &Asm,
+ MCDataFragment *F,
+ const MCSectionData *SD) {
+ std::vector<ELFRelocationEntry> &Relocs = Relocations[SD];
+ // sort by the r_offset just like gnu as does
+ array_pod_sort(Relocs.begin(), Relocs.end());
+
+ for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
+ ELFRelocationEntry entry = Relocs[e - i - 1];
+
+ unsigned WordSize = Is64Bit ? 8 : 4;
+ F->getContents() += StringRef((const char *)&entry.r_offset, WordSize);
+ F->getContents() += StringRef((const char *)&entry.r_info, WordSize);
+
+ if (HasRelocationAddend)
+ F->getContents() += StringRef((const char *)&entry.r_addend, WordSize);
+ }
+}
+
+void ELFObjectWriterImpl::CreateMetadataSections(MCAssembler &Asm,
+ MCAsmLayout &Layout) {
+ MCContext &Ctx = Asm.getContext();
+ MCDataFragment *F;
+
+ WriteRelocations(Asm, Layout);
+
+ const MCSection *SymtabSection;
+ unsigned EntrySize = Is64Bit ? ELF::SYMENTRY_SIZE64 : ELF::SYMENTRY_SIZE32;
+
+ SymtabSection = Ctx.getELFSection(".symtab", ELF::SHT_SYMTAB, 0,
+ SectionKind::getReadOnly(),
+ false, EntrySize);
+
+ MCSectionData &SymtabSD = Asm.getOrCreateSectionData(*SymtabSection);
+
+ SymtabSD.setAlignment(Is64Bit ? 8 : 4);
+
+ F = new MCDataFragment(&SymtabSD);
+
+ // Symbol table
+ WriteSymbolTable(F, Asm, Layout);
+ Asm.AddSectionToTheEnd(SymtabSD, Layout);
+
+ const MCSection *StrtabSection;
+ StrtabSection = Ctx.getELFSection(".strtab", ELF::SHT_STRTAB, 0,
+ SectionKind::getReadOnly(), false);
+
+ MCSectionData &StrtabSD = Asm.getOrCreateSectionData(*StrtabSection);
+ StrtabSD.setAlignment(1);
+
+ // FIXME: This isn't right. If the sections get rearranged this will
+ // be wrong. We need a proper lookup.
+ StringTableIndex = Asm.size();
+
+ F = new MCDataFragment(&StrtabSD);
+ F->getContents().append(StringTable.begin(), StringTable.end());
+ Asm.AddSectionToTheEnd(StrtabSD, Layout);
+
+ const MCSection *ShstrtabSection;
+ ShstrtabSection = Ctx.getELFSection(".shstrtab", ELF::SHT_STRTAB, 0,
+ SectionKind::getReadOnly(), false);
+
+ MCSectionData &ShstrtabSD = Asm.getOrCreateSectionData(*ShstrtabSection);
+ ShstrtabSD.setAlignment(1);
+
+ F = new MCDataFragment(&ShstrtabSD);
+
+ // FIXME: This isn't right. If the sections get rearranged this will
+ // be wrong. We need a proper lookup.
+ ShstrtabIndex = Asm.size();
+
+ // Section header string table.
+ //
+ // The first entry of a string table holds a null character so skip
+ // section 0.
+ uint64_t Index = 1;
+ F->getContents() += '\x00';
+
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it) {
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(it->getSection());
+
+ // Remember the index into the string table so we can write it
+ // into the sh_name field of the section header table.
+ SectionStringTableIndex[&it->getSection()] = Index;
+
+ Index += Section.getSectionName().size() + 1;
+ F->getContents() += Section.getSectionName();
+ F->getContents() += '\x00';
+ }
+
+ Asm.AddSectionToTheEnd(ShstrtabSD, Layout);
+}
+
+void ELFObjectWriterImpl::WriteObject(const MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+ CreateMetadataSections(const_cast<MCAssembler&>(Asm),
+ const_cast<MCAsmLayout&>(Layout));
+
+ // Add 1 for the null section.
+ unsigned NumSections = Asm.size() + 1;
+
+ uint64_t SectionDataSize = 0;
+
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it) {
+ const MCSectionData &SD = *it;
+
+ // Get the size of the section in the output file (including padding).
+ uint64_t Size = Layout.getSectionFileSize(&SD);
+ SectionDataSize += Size;
+ }
+
+ // Write out the ELF header ...
+ WriteHeader(SectionDataSize, NumSections);
+ FileOff = Is64Bit ? sizeof(ELF::Elf64_Ehdr) : sizeof(ELF::Elf32_Ehdr);
+
+ // ... then all of the sections ...
+ DenseMap<const MCSection*, uint64_t> SectionOffsetMap;
+
+ DenseMap<const MCSection*, uint8_t> SectionIndexMap;
+
+ unsigned Index = 1;
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it) {
+ // Remember the offset into the file for this section.
+ SectionOffsetMap[&it->getSection()] = FileOff;
+
+ SectionIndexMap[&it->getSection()] = Index++;
+
+ const MCSectionData &SD = *it;
+ FileOff += Layout.getSectionFileSize(&SD);
+
+ Asm.WriteSectionData(it, Layout, Writer);
+ }
+
+ // ... and then the section header table.
+ // Should we align the section header table?
+ //
+ // Null section first.
+ WriteSecHdrEntry(0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it) {
+ const MCSectionData &SD = *it;
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF&>(SD.getSection());
+
+ uint64_t sh_link = 0;
+ uint64_t sh_info = 0;
+
+ switch(Section.getType()) {
+ case ELF::SHT_DYNAMIC:
+ sh_link = SectionStringTableIndex[&it->getSection()];
+ sh_info = 0;
+ break;
+
+ case ELF::SHT_REL:
+ case ELF::SHT_RELA: {
+ const MCSection *SymtabSection;
+ const MCSection *InfoSection;
+
+ SymtabSection = Asm.getContext().getELFSection(".symtab", ELF::SHT_SYMTAB, 0,
+ SectionKind::getReadOnly(),
+ false);
+ sh_link = SectionIndexMap[SymtabSection];
+
+ // Remove ".rel" and ".rela" prefixes.
+ unsigned SecNameLen = (Section.getType() == ELF::SHT_REL) ? 4 : 5;
+ StringRef SectionName = Section.getSectionName().substr(SecNameLen);
+
+ InfoSection = Asm.getContext().getELFSection(SectionName,
+ ELF::SHT_PROGBITS, 0,
+ SectionKind::getReadOnly(),
+ false);
+ sh_info = SectionIndexMap[InfoSection];
+ break;
+ }
+
+ case ELF::SHT_SYMTAB:
+ case ELF::SHT_DYNSYM:
+ sh_link = StringTableIndex;
+ sh_info = LastLocalSymbolIndex;
+ break;
+
+ case ELF::SHT_PROGBITS:
+ case ELF::SHT_STRTAB:
+ case ELF::SHT_NOBITS:
+ case ELF::SHT_NULL:
+ // Nothing to do.
+ break;
+
+ case ELF::SHT_HASH:
+ case ELF::SHT_GROUP:
+ case ELF::SHT_SYMTAB_SHNDX:
+ default:
+ assert(0 && "FIXME: sh_type value not supported!");
+ break;
+ }
+
+ WriteSecHdrEntry(SectionStringTableIndex[&it->getSection()],
+ Section.getType(), Section.getFlags(),
+ Layout.getSectionAddress(&SD),
+ SectionOffsetMap.lookup(&SD.getSection()),
+ Layout.getSectionSize(&SD), sh_link,
+ sh_info, SD.getAlignment(),
+ Section.getEntrySize());
+ }
+}
+
+ELFObjectWriter::ELFObjectWriter(raw_ostream &OS,
+ bool Is64Bit,
+ bool IsLittleEndian,
+ bool HasRelocationAddend)
+ : MCObjectWriter(OS, IsLittleEndian)
+{
+ Impl = new ELFObjectWriterImpl(this, Is64Bit, HasRelocationAddend);
+}
+
+ELFObjectWriter::~ELFObjectWriter() {
+ delete (ELFObjectWriterImpl*) Impl;
+}
+
+void ELFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm) {
+ ((ELFObjectWriterImpl*) Impl)->ExecutePostLayoutBinding(Asm);
+}
+
+void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ ((ELFObjectWriterImpl*) Impl)->RecordRelocation(Asm, Layout, Fragment, Fixup,
+ Target, FixedValue);
+}
+
+void ELFObjectWriter::WriteObject(const MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+ ((ELFObjectWriterImpl*) Impl)->WriteObject(Asm, Layout);
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCAsmInfo.cpp b/libclamav/c++/llvm/lib/MC/MCAsmInfo.cpp
index f3f063f..670b2e9 100644
--- a/libclamav/c++/llvm/lib/MC/MCAsmInfo.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCAsmInfo.cpp
@@ -21,6 +21,7 @@ using namespace llvm;
MCAsmInfo::MCAsmInfo() {
HasSubsectionsViaSymbols = false;
HasMachoZeroFillDirective = false;
+ HasMachoTBSSDirective = false;
HasStaticCtorDtorReferenceInStaticMode = false;
MaxInstLength = 4;
PCSymbol = "$";
@@ -35,6 +36,7 @@ MCAsmInfo::MCAsmInfo() {
AssemblerDialect = 0;
AllowQuotesInName = false;
AllowNameToStartWithDigit = false;
+ AllowPeriodsInName = true;
ZeroDirective = "\t.zero\t";
AsciiDirective = "\t.ascii\t";
AscizDirective = "\t.asciz\t";
@@ -60,18 +62,16 @@ MCAsmInfo::MCAsmInfo() {
LinkOnceDirective = 0;
HiddenVisibilityAttr = MCSA_Hidden;
ProtectedVisibilityAttr = MCSA_Protected;
- AbsoluteDebugSectionOffsets = false;
- AbsoluteEHSectionOffsets = false;
HasLEB128 = false;
HasDotLocAndDotFile = false;
SupportsDebugInformation = false;
ExceptionsType = ExceptionHandling::None;
DwarfRequiresFrameSection = true;
DwarfUsesInlineInfoSection = false;
- Is_EHSymbolPrivate = true;
- GlobalEHDirective = 0;
- SupportsWeakOmittedEHFrame = true;
+ DwarfUsesAbsoluteLabelForStmtList = true;
DwarfSectionOffsetDirective = 0;
+ DwarfUsesLabelOffsetForRanges = true;
+ HasMicrosoftFastStdCallMangling = false;
AsmTransCBE = 0;
}
diff --git a/libclamav/c++/llvm/lib/MC/MCAsmInfoCOFF.cpp b/libclamav/c++/llvm/lib/MC/MCAsmInfoCOFF.cpp
index 9130493..7fc7d7a 100644
--- a/libclamav/c++/llvm/lib/MC/MCAsmInfoCOFF.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCAsmInfoCOFF.cpp
@@ -31,8 +31,7 @@ MCAsmInfoCOFF::MCAsmInfoCOFF() {
// Set up DWARF directives
HasLEB128 = true; // Target asm supports leb128 directives (little-endian)
- AbsoluteDebugSectionOffsets = true;
- AbsoluteEHSectionOffsets = false;
SupportsDebugInformation = true;
DwarfSectionOffsetDirective = "\t.secrel32\t";
+ HasMicrosoftFastStdCallMangling = true;
}
diff --git a/libclamav/c++/llvm/lib/MC/MCAsmInfoDarwin.cpp b/libclamav/c++/llvm/lib/MC/MCAsmInfoDarwin.cpp
index da865ad..e0e261a 100644
--- a/libclamav/c++/llvm/lib/MC/MCAsmInfoDarwin.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCAsmInfoDarwin.cpp
@@ -35,24 +35,17 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() {
WeakRefDirective = "\t.weak_reference ";
ZeroDirective = "\t.space\t"; // ".space N" emits N zeros.
HasMachoZeroFillDirective = true; // Uses .zerofill
+ HasMachoTBSSDirective = true; // Uses .tbss
HasStaticCtorDtorReferenceInStaticMode = true;
HiddenVisibilityAttr = MCSA_PrivateExtern;
// Doesn't support protected visibility.
ProtectedVisibilityAttr = MCSA_Global;
-
HasDotTypeDotSizeDirective = false;
HasNoDeadStrip = true;
- // Note: Even though darwin has the .lcomm directive, it is just a synonym for
- // zerofill, so we prefer to use .zerofill.
- // _foo.eh symbols are currently always exported so that the linker knows
- // about them. This is not necessary on 10.6 and later, but it
- // doesn't hurt anything.
- // FIXME: I need to get this from Triple.
- Is_EHSymbolPrivate = false;
- GlobalEHDirective = "\t.globl\t";
- SupportsWeakOmittedEHFrame = false;
+ DwarfUsesAbsoluteLabelForStmtList = false;
+ DwarfUsesLabelOffsetForRanges = false;
}
diff --git a/libclamav/c++/llvm/lib/MC/MCAsmStreamer.cpp b/libclamav/c++/llvm/lib/MC/MCAsmStreamer.cpp
index 66a0a24..1cc8fb0 100644
--- a/libclamav/c++/llvm/lib/MC/MCAsmStreamer.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCAsmStreamer.cpp
@@ -16,6 +16,7 @@
#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ErrorHandling.h"
@@ -29,8 +30,8 @@ namespace {
class MCAsmStreamer : public MCStreamer {
formatted_raw_ostream &OS;
const MCAsmInfo &MAI;
- MCInstPrinter *InstPrinter;
- MCCodeEmitter *Emitter;
+ OwningPtr<MCInstPrinter> InstPrinter;
+ OwningPtr<MCCodeEmitter> Emitter;
SmallString<128> CommentToEmit;
raw_svector_ostream CommentStream;
@@ -41,11 +42,10 @@ class MCAsmStreamer : public MCStreamer {
public:
MCAsmStreamer(MCContext &Context, formatted_raw_ostream &os,
- const MCAsmInfo &mai,
bool isLittleEndian, bool isVerboseAsm, MCInstPrinter *printer,
MCCodeEmitter *emitter, bool showInst)
- : MCStreamer(Context), OS(os), MAI(mai), InstPrinter(printer),
- Emitter(emitter), CommentStream(CommentToEmit),
+ : MCStreamer(Context), OS(os), MAI(Context.getAsmInfo()),
+ InstPrinter(printer), Emitter(emitter), CommentStream(CommentToEmit),
IsLittleEndian(isLittleEndian), IsVerboseAsm(isVerboseAsm),
ShowInst(showInst) {
if (InstPrinter && IsVerboseAsm)
@@ -68,6 +68,9 @@ public:
/// isVerboseAsm - Return true if this streamer supports verbose assembly at
/// all.
virtual bool isVerboseAsm() const { return IsVerboseAsm; }
+
+ /// hasRawTextSupport - We support EmitRawText.
+ virtual bool hasRawTextSupport() const { return true; }
/// AddComment - Add a comment that can be emitted to the generated .s
/// file if applicable as a QoI issue to make the output of the compiler
@@ -106,7 +109,10 @@ public:
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
-
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol);
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass);
+ virtual void EmitCOFFSymbolType(int Type);
+ virtual void EndCOFFSymbolDef();
virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value);
virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment);
@@ -120,6 +126,9 @@ public:
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
unsigned Size = 0, unsigned ByteAlignment = 0);
+ virtual void EmitTBSSSymbol (const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0);
+
virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
virtual void EmitValue(const MCExpr *Value, unsigned Size,unsigned AddrSpace);
@@ -145,6 +154,11 @@ public:
virtual void EmitInstruction(const MCInst &Inst);
+ /// EmitRawText - If this file is backed by a assembly streamer, this dumps
+ /// the specified string in the output .s file. This capability is
+ /// indicated by the hasRawTextSupport() predicate.
+ virtual void EmitRawText(StringRef String);
+
virtual void Finish();
/// @}
@@ -195,7 +209,6 @@ void MCAsmStreamer::EmitCommentsAndEOL() {
CommentStream.resync();
}
-
static inline int64_t truncateToSize(int64_t Value, unsigned Bytes) {
assert(Bytes && "Invalid size!");
return Value & ((uint64_t) (int64_t) -1 >> (64 - Bytes * 8));
@@ -204,6 +217,7 @@ static inline int64_t truncateToSize(int64_t Value, unsigned Bytes) {
void MCAsmStreamer::SwitchSection(const MCSection *Section) {
assert(Section && "Cannot switch to a null section!");
if (Section != CurSection) {
+ PrevSection = CurSection;
CurSection = Section;
Section->PrintSwitchToSection(MAI, OS);
}
@@ -211,6 +225,7 @@ void MCAsmStreamer::SwitchSection(const MCSection *Section) {
void MCAsmStreamer::EmitLabel(MCSymbol *Symbol) {
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
+ assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
assert(CurSection && "Cannot emit before setting section!");
OS << *Symbol << ":";
@@ -227,16 +242,11 @@ void MCAsmStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
}
void MCAsmStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
- // Only absolute symbols can be redefined.
- assert((Symbol->isUndefined() || Symbol->isAbsolute()) &&
- "Cannot define a symbol twice!");
-
OS << *Symbol << " = " << *Value;
EmitEOL();
// FIXME: Lift context changes into super class.
- // FIXME: Set associated section.
- Symbol->setValue(Value);
+ Symbol->setVariableValue(Value);
}
void MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
@@ -266,19 +276,20 @@ void MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_Global: // .globl/.global
OS << MAI.getGlobalDirective();
break;
- case MCSA_Hidden: OS << ".hidden "; break;
- case MCSA_IndirectSymbol: OS << ".indirect_symbol "; break;
- case MCSA_Internal: OS << ".internal "; break;
- case MCSA_LazyReference: OS << ".lazy_reference "; break;
- case MCSA_Local: OS << ".local "; break;
- case MCSA_NoDeadStrip: OS << ".no_dead_strip "; break;
- case MCSA_PrivateExtern: OS << ".private_extern "; break;
- case MCSA_Protected: OS << ".protected "; break;
- case MCSA_Reference: OS << ".reference "; break;
- case MCSA_Weak: OS << ".weak "; break;
- case MCSA_WeakDefinition: OS << ".weak_definition "; break;
+ case MCSA_Hidden: OS << "\t.hidden\t"; break;
+ case MCSA_IndirectSymbol: OS << "\t.indirect_symbol\t"; break;
+ case MCSA_Internal: OS << "\t.internal\t"; break;
+ case MCSA_LazyReference: OS << "\t.lazy_reference\t"; break;
+ case MCSA_Local: OS << "\t.local\t"; break;
+ case MCSA_NoDeadStrip: OS << "\t.no_dead_strip\t"; break;
+ case MCSA_PrivateExtern: OS << "\t.private_extern\t"; break;
+ case MCSA_Protected: OS << "\t.protected\t"; break;
+ case MCSA_Reference: OS << "\t.reference\t"; break;
+ case MCSA_Weak: OS << "\t.weak\t"; break;
+ case MCSA_WeakDefinition: OS << "\t.weak_definition\t"; break;
// .weak_reference
case MCSA_WeakReference: OS << MAI.getWeakRefDirective(); break;
+ case MCSA_WeakDefAutoPrivate: OS << "\t.weak_def_can_be_hidden\t"; break;
}
OS << *Symbol;
@@ -290,6 +301,26 @@ void MCAsmStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
EmitEOL();
}
+void MCAsmStreamer::BeginCOFFSymbolDef(const MCSymbol *Symbol) {
+ OS << "\t.def\t " << *Symbol << ';';
+ EmitEOL();
+}
+
+void MCAsmStreamer::EmitCOFFSymbolStorageClass (int StorageClass) {
+ OS << "\t.scl\t" << StorageClass << ';';
+ EmitEOL();
+}
+
+void MCAsmStreamer::EmitCOFFSymbolType (int Type) {
+ OS << "\t.type\t" << Type << ';';
+ EmitEOL();
+}
+
+void MCAsmStreamer::EndCOFFSymbolDef() {
+ OS << "\t.endef";
+ EmitEOL();
+}
+
void MCAsmStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
assert(MAI.hasDotTypeDotSizeDirective());
OS << "\t.size\t" << *Symbol << ", " << *Value << '\n';
@@ -334,6 +365,23 @@ void MCAsmStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
EmitEOL();
}
+// .tbss sym, size, align
+// This depends that the symbol has already been mangled from the original,
+// e.g. _a.
+void MCAsmStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment) {
+ assert(Symbol != NULL && "Symbol shouldn't be NULL!");
+ // Instead of using the Section we'll just use the shortcut.
+ // This is a mach-o specific directive and section.
+ OS << ".tbss " << *Symbol << ", " << Size;
+
+ // Output align if we have it. We default to 1 so don't bother printing
+ // that.
+ if (ByteAlignment > 1) OS << ", " << Log2_32(ByteAlignment);
+
+ EmitEOL();
+}
+
static inline char toOctal(int X) { return (X&7)+'0'; }
static void PrintQuotedString(StringRef Data, raw_ostream &OS) {
@@ -624,39 +672,36 @@ void MCAsmStreamer::EmitInstruction(const MCInst &Inst) {
// Show the MCInst if enabled.
if (ShowInst) {
- raw_ostream &OS = GetCommentOS();
- OS << "<MCInst #" << Inst.getOpcode();
-
- StringRef InstName;
- if (InstPrinter)
- InstName = InstPrinter->getOpcodeName(Inst.getOpcode());
- if (!InstName.empty())
- OS << ' ' << InstName;
-
- for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i) {
- OS << "\n ";
- Inst.getOperand(i).print(OS, &MAI);
- }
- OS << ">\n";
+ Inst.dump_pretty(GetCommentOS(), &MAI, InstPrinter.get(), "\n ");
+ GetCommentOS() << "\n";
}
-
- // If we have an AsmPrinter, use that to print, otherwise dump the MCInst.
+
+ // If we have an AsmPrinter, use that to print, otherwise print the MCInst.
if (InstPrinter)
- InstPrinter->printInst(&Inst);
+ InstPrinter->printInst(&Inst, OS);
else
Inst.print(OS, &MAI);
EmitEOL();
}
+/// EmitRawText - If this file is backed by a assembly streamer, this dumps
+/// the specified string in the output .s file. This capability is
+/// indicated by the hasRawTextSupport() predicate.
+void MCAsmStreamer::EmitRawText(StringRef String) {
+ if (!String.empty() && String.back() == '\n')
+ String = String.substr(0, String.size()-1);
+ OS << String;
+ EmitEOL();
+}
+
void MCAsmStreamer::Finish() {
- OS.flush();
}
MCStreamer *llvm::createAsmStreamer(MCContext &Context,
formatted_raw_ostream &OS,
- const MCAsmInfo &MAI, bool isLittleEndian,
+ bool isLittleEndian,
bool isVerboseAsm, MCInstPrinter *IP,
MCCodeEmitter *CE, bool ShowInst) {
- return new MCAsmStreamer(Context, OS, MAI, isLittleEndian, isVerboseAsm,
+ return new MCAsmStreamer(Context, OS, isLittleEndian, isVerboseAsm,
IP, CE, ShowInst);
}
diff --git a/libclamav/c++/llvm/lib/MC/MCAssembler.cpp b/libclamav/c++/llvm/lib/MC/MCAssembler.cpp
index 96227db..f0e1d7f 100644
--- a/libclamav/c++/llvm/lib/MC/MCAssembler.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCAssembler.cpp
@@ -9,945 +9,186 @@
#define DEBUG_TYPE "assembler"
#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MachO.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Debug.h"
-
-// FIXME: Gross.
-#include "../Target/X86/X86FixupKinds.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Target/TargetAsmBackend.h"
#include <vector>
using namespace llvm;
-class MachObjectWriter;
-
+namespace {
+namespace stats {
STATISTIC(EmittedFragments, "Number of emitted assembler fragments");
+STATISTIC(EvaluateFixup, "Number of evaluated fixups");
+STATISTIC(FragmentLayouts, "Number of fragment layouts");
+STATISTIC(ObjectBytes, "Number of emitted object file bytes");
+STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
+STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
+STATISTIC(SectionLayouts, "Number of section layouts");
+}
+}
// FIXME FIXME FIXME: There are number of places in this file where we convert
// what is a 64-bit assembler value used for computation into a value in the
// object file, which may truncate it. We should detect that truncation where
// invalid and report errors back.
-static void WriteFileData(raw_ostream &OS, const MCSectionData &SD,
- MachObjectWriter &MOW);
-
-static uint64_t WriteNopData(uint64_t Count, MachObjectWriter &MOW);
-
-/// isVirtualSection - Check if this is a section which does not actually exist
-/// in the object file.
-static bool isVirtualSection(const MCSection &Section) {
- // FIXME: Lame.
- const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
- unsigned Type = SMO.getTypeAndAttributes() & MCSectionMachO::SECTION_TYPE;
- return (Type == MCSectionMachO::S_ZEROFILL);
-}
+/* *** */
-static unsigned getFixupKindLog2Size(unsigned Kind) {
- switch (Kind) {
- default: llvm_unreachable("invalid fixup kind!");
- case X86::reloc_pcrel_1byte:
- case FK_Data_1: return 0;
- case FK_Data_2: return 1;
- case X86::reloc_pcrel_4byte:
- case X86::reloc_riprel_4byte:
- case FK_Data_4: return 2;
- case FK_Data_8: return 3;
- }
+MCAsmLayout::MCAsmLayout(MCAssembler &Asm)
+ : Assembler(Asm), LastValidFragment(0)
+ {
+ // Compute the section layout order. Virtual sections must go last.
+ for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
+ if (!Asm.getBackend().isVirtualSection(it->getSection()))
+ SectionOrder.push_back(&*it);
+ for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
+ if (Asm.getBackend().isVirtualSection(it->getSection()))
+ SectionOrder.push_back(&*it);
}
-static bool isFixupKindPCRel(unsigned Kind) {
- switch (Kind) {
- default:
- return false;
- case X86::reloc_pcrel_1byte:
- case X86::reloc_pcrel_4byte:
- case X86::reloc_riprel_4byte:
+bool MCAsmLayout::isSectionUpToDate(const MCSectionData *SD) const {
+ // The first section is always up-to-date.
+ unsigned Index = SD->getLayoutOrder();
+ if (!Index)
return true;
- }
-}
-
-class MachObjectWriter {
- // See <mach-o/loader.h>.
- enum {
- Header_Magic32 = 0xFEEDFACE,
- Header_Magic64 = 0xFEEDFACF
- };
-
- static const unsigned Header32Size = 28;
- static const unsigned Header64Size = 32;
- static const unsigned SegmentLoadCommand32Size = 56;
- static const unsigned Section32Size = 68;
- static const unsigned SymtabLoadCommandSize = 24;
- static const unsigned DysymtabLoadCommandSize = 80;
- static const unsigned Nlist32Size = 12;
- static const unsigned RelocationInfoSize = 8;
-
- enum HeaderFileType {
- HFT_Object = 0x1
- };
-
- enum HeaderFlags {
- HF_SubsectionsViaSymbols = 0x2000
- };
-
- enum LoadCommandType {
- LCT_Segment = 0x1,
- LCT_Symtab = 0x2,
- LCT_Dysymtab = 0xb
- };
-
- // See <mach-o/nlist.h>.
- enum SymbolTypeType {
- STT_Undefined = 0x00,
- STT_Absolute = 0x02,
- STT_Section = 0x0e
- };
-
- enum SymbolTypeFlags {
- // If any of these bits are set, then the entry is a stab entry number (see
- // <mach-o/stab.h>. Otherwise the other masks apply.
- STF_StabsEntryMask = 0xe0,
-
- STF_TypeMask = 0x0e,
- STF_External = 0x01,
- STF_PrivateExtern = 0x10
- };
-
- /// IndirectSymbolFlags - Flags for encoding special values in the indirect
- /// symbol entry.
- enum IndirectSymbolFlags {
- ISF_Local = 0x80000000,
- ISF_Absolute = 0x40000000
- };
-
- /// RelocationFlags - Special flags for addresses.
- enum RelocationFlags {
- RF_Scattered = 0x80000000
- };
-
- enum RelocationInfoType {
- RIT_Vanilla = 0,
- RIT_Pair = 1,
- RIT_Difference = 2,
- RIT_PreboundLazyPointer = 3,
- RIT_LocalDifference = 4
- };
-
- /// MachSymbolData - Helper struct for containing some precomputed information
- /// on symbols.
- struct MachSymbolData {
- MCSymbolData *SymbolData;
- uint64_t StringIndex;
- uint8_t SectionIndex;
-
- // Support lexicographic sorting.
- bool operator<(const MachSymbolData &RHS) const {
- const std::string &Name = SymbolData->getSymbol().getName();
- return Name < RHS.SymbolData->getSymbol().getName();
- }
- };
-
- raw_ostream &OS;
- bool IsLSB;
-
-public:
- MachObjectWriter(raw_ostream &_OS, bool _IsLSB = true)
- : OS(_OS), IsLSB(_IsLSB) {
- }
-
- /// @name Helper Methods
- /// @{
-
- void Write8(uint8_t Value) {
- OS << char(Value);
- }
-
- void Write16(uint16_t Value) {
- if (IsLSB) {
- Write8(uint8_t(Value >> 0));
- Write8(uint8_t(Value >> 8));
- } else {
- Write8(uint8_t(Value >> 8));
- Write8(uint8_t(Value >> 0));
- }
- }
-
- void Write32(uint32_t Value) {
- if (IsLSB) {
- Write16(uint16_t(Value >> 0));
- Write16(uint16_t(Value >> 16));
- } else {
- Write16(uint16_t(Value >> 16));
- Write16(uint16_t(Value >> 0));
- }
- }
-
- void Write64(uint64_t Value) {
- if (IsLSB) {
- Write32(uint32_t(Value >> 0));
- Write32(uint32_t(Value >> 32));
- } else {
- Write32(uint32_t(Value >> 32));
- Write32(uint32_t(Value >> 0));
- }
- }
-
- void WriteZeros(unsigned N) {
- const char Zeros[16] = { 0 };
-
- for (unsigned i = 0, e = N / 16; i != e; ++i)
- OS << StringRef(Zeros, 16);
-
- OS << StringRef(Zeros, N % 16);
- }
-
- void WriteString(StringRef Str, unsigned ZeroFillSize = 0) {
- OS << Str;
- if (ZeroFillSize)
- WriteZeros(ZeroFillSize - Str.size());
- }
-
- /// @}
-
- void WriteHeader32(unsigned NumLoadCommands, unsigned LoadCommandsSize,
- bool SubsectionsViaSymbols) {
- uint32_t Flags = 0;
- if (SubsectionsViaSymbols)
- Flags |= HF_SubsectionsViaSymbols;
-
- // struct mach_header (28 bytes)
-
- uint64_t Start = OS.tell();
- (void) Start;
-
- Write32(Header_Magic32);
-
- // FIXME: Support cputype.
- Write32(MachO::CPUTypeI386);
- // FIXME: Support cpusubtype.
- Write32(MachO::CPUSubType_I386_ALL);
- Write32(HFT_Object);
- Write32(NumLoadCommands); // Object files have a single load command, the
- // segment.
- Write32(LoadCommandsSize);
- Write32(Flags);
-
- assert(OS.tell() - Start == Header32Size);
- }
-
- /// WriteSegmentLoadCommand32 - Write a 32-bit segment load command.
- ///
- /// \arg NumSections - The number of sections in this segment.
- /// \arg SectionDataSize - The total size of the sections.
- void WriteSegmentLoadCommand32(unsigned NumSections,
- uint64_t VMSize,
- uint64_t SectionDataStartOffset,
- uint64_t SectionDataSize) {
- // struct segment_command (56 bytes)
-
- uint64_t Start = OS.tell();
- (void) Start;
-
- Write32(LCT_Segment);
- Write32(SegmentLoadCommand32Size + NumSections * Section32Size);
-
- WriteString("", 16);
- Write32(0); // vmaddr
- Write32(VMSize); // vmsize
- Write32(SectionDataStartOffset); // file offset
- Write32(SectionDataSize); // file size
- Write32(0x7); // maxprot
- Write32(0x7); // initprot
- Write32(NumSections);
- Write32(0); // flags
-
- assert(OS.tell() - Start == SegmentLoadCommand32Size);
- }
-
- void WriteSection32(const MCSectionData &SD, uint64_t FileOffset,
- uint64_t RelocationsStart, unsigned NumRelocations) {
- // The offset is unused for virtual sections.
- if (isVirtualSection(SD.getSection())) {
- assert(SD.getFileSize() == 0 && "Invalid file size!");
- FileOffset = 0;
- }
-
- // struct section (68 bytes)
-
- uint64_t Start = OS.tell();
- (void) Start;
-
- // FIXME: cast<> support!
- const MCSectionMachO &Section =
- static_cast<const MCSectionMachO&>(SD.getSection());
- WriteString(Section.getSectionName(), 16);
- WriteString(Section.getSegmentName(), 16);
- Write32(SD.getAddress()); // address
- Write32(SD.getSize()); // size
- Write32(FileOffset);
-
- unsigned Flags = Section.getTypeAndAttributes();
- if (SD.hasInstructions())
- Flags |= MCSectionMachO::S_ATTR_SOME_INSTRUCTIONS;
-
- assert(isPowerOf2_32(SD.getAlignment()) && "Invalid alignment!");
- Write32(Log2_32(SD.getAlignment()));
- Write32(NumRelocations ? RelocationsStart : 0);
- Write32(NumRelocations);
- Write32(Flags);
- Write32(0); // reserved1
- Write32(Section.getStubSize()); // reserved2
-
- assert(OS.tell() - Start == Section32Size);
- }
-
- void WriteSymtabLoadCommand(uint32_t SymbolOffset, uint32_t NumSymbols,
- uint32_t StringTableOffset,
- uint32_t StringTableSize) {
- // struct symtab_command (24 bytes)
-
- uint64_t Start = OS.tell();
- (void) Start;
-
- Write32(LCT_Symtab);
- Write32(SymtabLoadCommandSize);
- Write32(SymbolOffset);
- Write32(NumSymbols);
- Write32(StringTableOffset);
- Write32(StringTableSize);
-
- assert(OS.tell() - Start == SymtabLoadCommandSize);
- }
-
- void WriteDysymtabLoadCommand(uint32_t FirstLocalSymbol,
- uint32_t NumLocalSymbols,
- uint32_t FirstExternalSymbol,
- uint32_t NumExternalSymbols,
- uint32_t FirstUndefinedSymbol,
- uint32_t NumUndefinedSymbols,
- uint32_t IndirectSymbolOffset,
- uint32_t NumIndirectSymbols) {
- // struct dysymtab_command (80 bytes)
-
- uint64_t Start = OS.tell();
- (void) Start;
-
- Write32(LCT_Dysymtab);
- Write32(DysymtabLoadCommandSize);
- Write32(FirstLocalSymbol);
- Write32(NumLocalSymbols);
- Write32(FirstExternalSymbol);
- Write32(NumExternalSymbols);
- Write32(FirstUndefinedSymbol);
- Write32(NumUndefinedSymbols);
- Write32(0); // tocoff
- Write32(0); // ntoc
- Write32(0); // modtaboff
- Write32(0); // nmodtab
- Write32(0); // extrefsymoff
- Write32(0); // nextrefsyms
- Write32(IndirectSymbolOffset);
- Write32(NumIndirectSymbols);
- Write32(0); // extreloff
- Write32(0); // nextrel
- Write32(0); // locreloff
- Write32(0); // nlocrel
-
- assert(OS.tell() - Start == DysymtabLoadCommandSize);
- }
-
- void WriteNlist32(MachSymbolData &MSD) {
- MCSymbolData &Data = *MSD.SymbolData;
- const MCSymbol &Symbol = Data.getSymbol();
- uint8_t Type = 0;
- uint16_t Flags = Data.getFlags();
- uint32_t Address = 0;
-
- // Set the N_TYPE bits. See <mach-o/nlist.h>.
- //
- // FIXME: Are the prebound or indirect fields possible here?
- if (Symbol.isUndefined())
- Type = STT_Undefined;
- else if (Symbol.isAbsolute())
- Type = STT_Absolute;
- else
- Type = STT_Section;
-
- // FIXME: Set STAB bits.
-
- if (Data.isPrivateExtern())
- Type |= STF_PrivateExtern;
-
- // Set external bit.
- if (Data.isExternal() || Symbol.isUndefined())
- Type |= STF_External;
-
- // Compute the symbol address.
- if (Symbol.isDefined()) {
- if (Symbol.isAbsolute()) {
- llvm_unreachable("FIXME: Not yet implemented!");
- } else {
- Address = Data.getFragment()->getAddress() + Data.getOffset();
- }
- } else if (Data.isCommon()) {
- // Common symbols are encoded with the size in the address
- // field, and their alignment in the flags.
- Address = Data.getCommonSize();
-
- // Common alignment is packed into the 'desc' bits.
- if (unsigned Align = Data.getCommonAlignment()) {
- unsigned Log2Size = Log2_32(Align);
- assert((1U << Log2Size) == Align && "Invalid 'common' alignment!");
- if (Log2Size > 15)
- llvm_report_error("invalid 'common' alignment '" +
- Twine(Align) + "'");
- // FIXME: Keep this mask with the SymbolFlags enumeration.
- Flags = (Flags & 0xF0FF) | (Log2Size << 8);
- }
- }
-
- // struct nlist (12 bytes)
-
- Write32(MSD.StringIndex);
- Write8(Type);
- Write8(MSD.SectionIndex);
-
- // The Mach-O streamer uses the lowest 16-bits of the flags for the 'desc'
- // value.
- Write16(Flags);
- Write32(Address);
- }
-
- struct MachRelocationEntry {
- uint32_t Word0;
- uint32_t Word1;
- };
- void ComputeScatteredRelocationInfo(MCAssembler &Asm, MCFragment &Fragment,
- MCAsmFixup &Fixup,
- const MCValue &Target,
- DenseMap<const MCSymbol*,MCSymbolData*> &SymbolMap,
- std::vector<MachRelocationEntry> &Relocs) {
- uint32_t Address = Fragment.getOffset() + Fixup.Offset;
- unsigned IsPCRel = 0;
- unsigned Log2Size = getFixupKindLog2Size(Fixup.Kind);
- unsigned Type = RIT_Vanilla;
-
- // See <reloc.h>.
- const MCSymbol *A = Target.getSymA();
- MCSymbolData *SD = SymbolMap.lookup(A);
- uint32_t Value = SD->getFragment()->getAddress() + SD->getOffset();
- uint32_t Value2 = 0;
-
- if (const MCSymbol *B = Target.getSymB()) {
- Type = RIT_LocalDifference;
-
- MCSymbolData *SD = SymbolMap.lookup(B);
- Value2 = SD->getFragment()->getAddress() + SD->getOffset();
- }
-
- // The value which goes in the fixup is current value of the expression.
- Fixup.FixedValue = Value - Value2 + Target.getConstant();
- if (isFixupKindPCRel(Fixup.Kind)) {
- Fixup.FixedValue -= Address;
- IsPCRel = 1;
- }
-
- MachRelocationEntry MRE;
- MRE.Word0 = ((Address << 0) |
- (Type << 24) |
- (Log2Size << 28) |
- (IsPCRel << 30) |
- RF_Scattered);
- MRE.Word1 = Value;
- Relocs.push_back(MRE);
-
- if (Type == RIT_LocalDifference) {
- Type = RIT_Pair;
-
- MachRelocationEntry MRE;
- MRE.Word0 = ((0 << 0) |
- (Type << 24) |
- (Log2Size << 28) |
- (0 << 30) |
- RF_Scattered);
- MRE.Word1 = Value2;
- Relocs.push_back(MRE);
- }
- }
-
- void ComputeRelocationInfo(MCAssembler &Asm, MCDataFragment &Fragment,
- MCAsmFixup &Fixup,
- DenseMap<const MCSymbol*,MCSymbolData*> &SymbolMap,
- std::vector<MachRelocationEntry> &Relocs) {
- MCValue Target;
- if (!Fixup.Value->EvaluateAsRelocatable(Target))
- llvm_report_error("expected relocatable expression");
-
- // If this is a difference or a local symbol plus an offset, then we need a
- // scattered relocation entry.
- if (Target.getSymB() ||
- (Target.getSymA() && !Target.getSymA()->isUndefined() &&
- Target.getConstant()))
- return ComputeScatteredRelocationInfo(Asm, Fragment, Fixup, Target,
- SymbolMap, Relocs);
-
- // See <reloc.h>.
- uint32_t Address = Fragment.getOffset() + Fixup.Offset;
- uint32_t Value = 0;
- unsigned Index = 0;
- unsigned IsPCRel = 0;
- unsigned Log2Size = getFixupKindLog2Size(Fixup.Kind);
- unsigned IsExtern = 0;
- unsigned Type = 0;
-
- if (Target.isAbsolute()) { // constant
- // SymbolNum of 0 indicates the absolute section.
- //
- // FIXME: When is this generated?
- Type = RIT_Vanilla;
- Value = 0;
- llvm_unreachable("FIXME: Not yet implemented!");
- } else {
- const MCSymbol *Symbol = Target.getSymA();
- MCSymbolData *SD = SymbolMap.lookup(Symbol);
-
- if (Symbol->isUndefined()) {
- IsExtern = 1;
- Index = SD->getIndex();
- Value = 0;
- } else {
- // The index is the section ordinal.
- //
- // FIXME: O(N)
- Index = 1;
- MCAssembler::iterator it = Asm.begin(), ie = Asm.end();
- for (; it != ie; ++it, ++Index)
- if (&*it == SD->getFragment()->getParent())
- break;
- assert(it != ie && "Unable to find section index!");
- Value = SD->getFragment()->getAddress() + SD->getOffset();
- }
-
- Type = RIT_Vanilla;
- }
-
- // The value which goes in the fixup is current value of the expression.
- Fixup.FixedValue = Value + Target.getConstant();
-
- if (isFixupKindPCRel(Fixup.Kind)) {
- Fixup.FixedValue -= Address;
- IsPCRel = 1;
- }
-
- // struct relocation_info (8 bytes)
- MachRelocationEntry MRE;
- MRE.Word0 = Address;
- MRE.Word1 = ((Index << 0) |
- (IsPCRel << 24) |
- (Log2Size << 25) |
- (IsExtern << 27) |
- (Type << 28));
- Relocs.push_back(MRE);
- }
-
- void BindIndirectSymbols(MCAssembler &Asm,
- DenseMap<const MCSymbol*,MCSymbolData*> &SymbolMap) {
- // This is the point where 'as' creates actual symbols for indirect symbols
- // (in the following two passes). It would be easier for us to do this
- // sooner when we see the attribute, but that makes getting the order in the
- // symbol table much more complicated than it is worth.
- //
- // FIXME: Revisit this when the dust settles.
-
- // Bind non lazy symbol pointers first.
- for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(),
- ie = Asm.indirect_symbol_end(); it != ie; ++it) {
- // FIXME: cast<> support!
- const MCSectionMachO &Section =
- static_cast<const MCSectionMachO&>(it->SectionData->getSection());
-
- unsigned Type =
- Section.getTypeAndAttributes() & MCSectionMachO::SECTION_TYPE;
- if (Type != MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS)
- continue;
-
- MCSymbolData *&Entry = SymbolMap[it->Symbol];
- if (!Entry)
- Entry = new MCSymbolData(*it->Symbol, 0, 0, &Asm);
- }
+ // Otherwise, sections are always implicitly computed when the preceeding
+ // fragment is layed out.
+ const MCSectionData *Prev = getSectionOrder()[Index - 1];
+ return isFragmentUpToDate(&(Prev->getFragmentList().back()));
+}
- // Then lazy symbol pointers and symbol stubs.
- for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(),
- ie = Asm.indirect_symbol_end(); it != ie; ++it) {
- // FIXME: cast<> support!
- const MCSectionMachO &Section =
- static_cast<const MCSectionMachO&>(it->SectionData->getSection());
-
- unsigned Type =
- Section.getTypeAndAttributes() & MCSectionMachO::SECTION_TYPE;
- if (Type != MCSectionMachO::S_LAZY_SYMBOL_POINTERS &&
- Type != MCSectionMachO::S_SYMBOL_STUBS)
- continue;
+bool MCAsmLayout::isFragmentUpToDate(const MCFragment *F) const {
+ return (LastValidFragment &&
+ F->getLayoutOrder() <= LastValidFragment->getLayoutOrder());
+}
- MCSymbolData *&Entry = SymbolMap[it->Symbol];
- if (!Entry) {
- Entry = new MCSymbolData(*it->Symbol, 0, 0, &Asm);
+void MCAsmLayout::UpdateForSlide(MCFragment *F, int SlideAmount) {
+ // If this fragment wasn't already up-to-date, we don't need to do anything.
+ if (!isFragmentUpToDate(F))
+ return;
- // Set the symbol type to undefined lazy, but only on construction.
- //
- // FIXME: Do not hardcode.
- Entry->setFlags(Entry->getFlags() | 0x0001);
- }
+ // Otherwise, reset the last valid fragment to the predecessor of the
+ // invalidated fragment.
+ LastValidFragment = F->getPrevNode();
+ if (!LastValidFragment) {
+ unsigned Index = F->getParent()->getLayoutOrder();
+ if (Index != 0) {
+ MCSectionData *Prev = getSectionOrder()[Index - 1];
+ LastValidFragment = &(Prev->getFragmentList().back());
}
}
+}
- /// ComputeSymbolTable - Compute the symbol table data
- ///
- /// \param StringTable [out] - The string table data.
- /// \param StringIndexMap [out] - Map from symbol names to offsets in the
- /// string table.
- void ComputeSymbolTable(MCAssembler &Asm, SmallString<256> &StringTable,
- std::vector<MachSymbolData> &LocalSymbolData,
- std::vector<MachSymbolData> &ExternalSymbolData,
- std::vector<MachSymbolData> &UndefinedSymbolData) {
- // Build section lookup table.
- DenseMap<const MCSection*, uint8_t> SectionIndexMap;
- unsigned Index = 1;
- for (MCAssembler::iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it, ++Index)
- SectionIndexMap[&it->getSection()] = Index;
- assert(Index <= 256 && "Too many sections!");
-
- // Index 0 is always the empty string.
- StringMap<uint64_t> StringIndexMap;
- StringTable += '\x00';
-
- // Build the symbol arrays and the string table, but only for non-local
- // symbols.
- //
- // The particular order that we collect the symbols and create the string
- // table, then sort the symbols is chosen to match 'as'. Even though it
- // doesn't matter for correctness, this is important for letting us diff .o
- // files.
- for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
- ie = Asm.symbol_end(); it != ie; ++it) {
- const MCSymbol &Symbol = it->getSymbol();
-
- // Ignore assembler temporaries.
- if (it->getSymbol().isTemporary())
- continue;
-
- if (!it->isExternal() && !Symbol.isUndefined())
- continue;
-
- uint64_t &Entry = StringIndexMap[Symbol.getName()];
- if (!Entry) {
- Entry = StringTable.size();
- StringTable += Symbol.getName();
- StringTable += '\x00';
- }
-
- MachSymbolData MSD;
- MSD.SymbolData = it;
- MSD.StringIndex = Entry;
-
- if (Symbol.isUndefined()) {
- MSD.SectionIndex = 0;
- UndefinedSymbolData.push_back(MSD);
- } else if (Symbol.isAbsolute()) {
- MSD.SectionIndex = 0;
- ExternalSymbolData.push_back(MSD);
- } else {
- MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
- assert(MSD.SectionIndex && "Invalid section index!");
- ExternalSymbolData.push_back(MSD);
- }
- }
-
- // Now add the data for local symbols.
- for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
- ie = Asm.symbol_end(); it != ie; ++it) {
- const MCSymbol &Symbol = it->getSymbol();
-
- // Ignore assembler temporaries.
- if (it->getSymbol().isTemporary())
- continue;
-
- if (it->isExternal() || Symbol.isUndefined())
- continue;
-
- uint64_t &Entry = StringIndexMap[Symbol.getName()];
- if (!Entry) {
- Entry = StringTable.size();
- StringTable += Symbol.getName();
- StringTable += '\x00';
- }
-
- MachSymbolData MSD;
- MSD.SymbolData = it;
- MSD.StringIndex = Entry;
-
- if (Symbol.isAbsolute()) {
- MSD.SectionIndex = 0;
- LocalSymbolData.push_back(MSD);
- } else {
- MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
- assert(MSD.SectionIndex && "Invalid section index!");
- LocalSymbolData.push_back(MSD);
- }
+void MCAsmLayout::EnsureValid(const MCFragment *F) const {
+ // Advance the layout position until the fragment is up-to-date.
+ while (!isFragmentUpToDate(F)) {
+ // Advance to the next fragment.
+ MCFragment *Cur = LastValidFragment;
+ if (Cur)
+ Cur = Cur->getNextNode();
+ if (!Cur) {
+ unsigned NextIndex = 0;
+ if (LastValidFragment)
+ NextIndex = LastValidFragment->getParent()->getLayoutOrder() + 1;
+ Cur = SectionOrder[NextIndex]->begin();
}
- // External and undefined symbols are required to be in lexicographic order.
- std::sort(ExternalSymbolData.begin(), ExternalSymbolData.end());
- std::sort(UndefinedSymbolData.begin(), UndefinedSymbolData.end());
-
- // Set the symbol indices.
- Index = 0;
- for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i)
- LocalSymbolData[i].SymbolData->setIndex(Index++);
- for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i)
- ExternalSymbolData[i].SymbolData->setIndex(Index++);
- for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
- UndefinedSymbolData[i].SymbolData->setIndex(Index++);
-
- // The string table is padded to a multiple of 4.
- while (StringTable.size() % 4)
- StringTable += '\x00';
+ const_cast<MCAsmLayout*>(this)->LayoutFragment(Cur);
}
+}
- void WriteObject(MCAssembler &Asm) {
- unsigned NumSections = Asm.size();
-
- // Compute the symbol -> symbol data map.
- //
- // FIXME: This should not be here.
- DenseMap<const MCSymbol*, MCSymbolData *> SymbolMap;
- for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
- ie = Asm.symbol_end(); it != ie; ++it)
- SymbolMap[&it->getSymbol()] = it;
-
- // Create symbol data for any indirect symbols.
- BindIndirectSymbols(Asm, SymbolMap);
-
- // Compute symbol table information.
- SmallString<256> StringTable;
- std::vector<MachSymbolData> LocalSymbolData;
- std::vector<MachSymbolData> ExternalSymbolData;
- std::vector<MachSymbolData> UndefinedSymbolData;
- unsigned NumSymbols = Asm.symbol_size();
-
- // No symbol table command is written if there are no symbols.
- if (NumSymbols)
- ComputeSymbolTable(Asm, StringTable, LocalSymbolData, ExternalSymbolData,
- UndefinedSymbolData);
-
- // The section data starts after the header, the segment load command (and
- // section headers) and the symbol table.
- unsigned NumLoadCommands = 1;
- uint64_t LoadCommandsSize =
- SegmentLoadCommand32Size + NumSections * Section32Size;
-
- // Add the symbol table load command sizes, if used.
- if (NumSymbols) {
- NumLoadCommands += 2;
- LoadCommandsSize += SymtabLoadCommandSize + DysymtabLoadCommandSize;
- }
-
- // Compute the total size of the section data, as well as its file size and
- // vm size.
- uint64_t SectionDataStart = Header32Size + LoadCommandsSize;
- uint64_t SectionDataSize = 0;
- uint64_t SectionDataFileSize = 0;
- uint64_t VMSize = 0;
- for (MCAssembler::iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it) {
- MCSectionData &SD = *it;
-
- VMSize = std::max(VMSize, SD.getAddress() + SD.getSize());
-
- if (isVirtualSection(SD.getSection()))
- continue;
-
- SectionDataSize = std::max(SectionDataSize,
- SD.getAddress() + SD.getSize());
- SectionDataFileSize = std::max(SectionDataFileSize,
- SD.getAddress() + SD.getFileSize());
- }
-
- // The section data is padded to 4 bytes.
- //
- // FIXME: Is this machine dependent?
- unsigned SectionDataPadding = OffsetToAlignment(SectionDataFileSize, 4);
- SectionDataFileSize += SectionDataPadding;
-
- // Write the prolog, starting with the header and load command...
- WriteHeader32(NumLoadCommands, LoadCommandsSize,
- Asm.getSubsectionsViaSymbols());
- WriteSegmentLoadCommand32(NumSections, VMSize,
- SectionDataStart, SectionDataSize);
-
- // ... and then the section headers.
- //
- // We also compute the section relocations while we do this. Note that
- // computing relocation info will also update the fixup to have the correct
- // value; this will overwrite the appropriate data in the fragment when it
- // is written.
- std::vector<MachRelocationEntry> RelocInfos;
- uint64_t RelocTableEnd = SectionDataStart + SectionDataFileSize;
- for (MCAssembler::iterator it = Asm.begin(),
- ie = Asm.end(); it != ie; ++it) {
- MCSectionData &SD = *it;
-
- // The assembler writes relocations in the reverse order they were seen.
- //
- // FIXME: It is probably more complicated than this.
- unsigned NumRelocsStart = RelocInfos.size();
- for (MCSectionData::reverse_iterator it2 = SD.rbegin(),
- ie2 = SD.rend(); it2 != ie2; ++it2)
- if (MCDataFragment *DF = dyn_cast<MCDataFragment>(&*it2))
- for (unsigned i = 0, e = DF->fixup_size(); i != e; ++i)
- ComputeRelocationInfo(Asm, *DF, DF->getFixups()[e - i - 1],
- SymbolMap, RelocInfos);
-
- unsigned NumRelocs = RelocInfos.size() - NumRelocsStart;
- uint64_t SectionStart = SectionDataStart + SD.getAddress();
- WriteSection32(SD, SectionStart, RelocTableEnd, NumRelocs);
- RelocTableEnd += NumRelocs * RelocationInfoSize;
- }
+void MCAsmLayout::FragmentReplaced(MCFragment *Src, MCFragment *Dst) {
+ if (LastValidFragment == Src)
+ LastValidFragment = Dst;
- // Write the symbol table load command, if used.
- if (NumSymbols) {
- unsigned FirstLocalSymbol = 0;
- unsigned NumLocalSymbols = LocalSymbolData.size();
- unsigned FirstExternalSymbol = FirstLocalSymbol + NumLocalSymbols;
- unsigned NumExternalSymbols = ExternalSymbolData.size();
- unsigned FirstUndefinedSymbol = FirstExternalSymbol + NumExternalSymbols;
- unsigned NumUndefinedSymbols = UndefinedSymbolData.size();
- unsigned NumIndirectSymbols = Asm.indirect_symbol_size();
- unsigned NumSymTabSymbols =
- NumLocalSymbols + NumExternalSymbols + NumUndefinedSymbols;
- uint64_t IndirectSymbolSize = NumIndirectSymbols * 4;
- uint64_t IndirectSymbolOffset = 0;
-
- // If used, the indirect symbols are written after the section data.
- if (NumIndirectSymbols)
- IndirectSymbolOffset = RelocTableEnd;
-
- // The symbol table is written after the indirect symbol data.
- uint64_t SymbolTableOffset = RelocTableEnd + IndirectSymbolSize;
-
- // The string table is written after symbol table.
- uint64_t StringTableOffset =
- SymbolTableOffset + NumSymTabSymbols * Nlist32Size;
- WriteSymtabLoadCommand(SymbolTableOffset, NumSymTabSymbols,
- StringTableOffset, StringTable.size());
-
- WriteDysymtabLoadCommand(FirstLocalSymbol, NumLocalSymbols,
- FirstExternalSymbol, NumExternalSymbols,
- FirstUndefinedSymbol, NumUndefinedSymbols,
- IndirectSymbolOffset, NumIndirectSymbols);
- }
+ Dst->Offset = Src->Offset;
+ Dst->EffectiveSize = Src->EffectiveSize;
+}
- // Write the actual section data.
- for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
- WriteFileData(OS, *it, *this);
+uint64_t MCAsmLayout::getFragmentAddress(const MCFragment *F) const {
+ assert(F->getParent() && "Missing section()!");
+ return getSectionAddress(F->getParent()) + getFragmentOffset(F);
+}
- // Write the extra padding.
- WriteZeros(SectionDataPadding);
+uint64_t MCAsmLayout::getFragmentEffectiveSize(const MCFragment *F) const {
+ EnsureValid(F);
+ assert(F->EffectiveSize != ~UINT64_C(0) && "Address not set!");
+ return F->EffectiveSize;
+}
- // Write the relocation entries.
- for (unsigned i = 0, e = RelocInfos.size(); i != e; ++i) {
- Write32(RelocInfos[i].Word0);
- Write32(RelocInfos[i].Word1);
- }
+uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const {
+ EnsureValid(F);
+ assert(F->Offset != ~UINT64_C(0) && "Address not set!");
+ return F->Offset;
+}
- // Write the symbol table data, if used.
- if (NumSymbols) {
- // Write the indirect symbol entries.
- for (MCAssembler::indirect_symbol_iterator
- it = Asm.indirect_symbol_begin(),
- ie = Asm.indirect_symbol_end(); it != ie; ++it) {
- // Indirect symbols in the non lazy symbol pointer section have some
- // special handling.
- const MCSectionMachO &Section =
- static_cast<const MCSectionMachO&>(it->SectionData->getSection());
- unsigned Type =
- Section.getTypeAndAttributes() & MCSectionMachO::SECTION_TYPE;
- if (Type == MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS) {
- // If this symbol is defined and internal, mark it as such.
- if (it->Symbol->isDefined() &&
- !SymbolMap.lookup(it->Symbol)->isExternal()) {
- uint32_t Flags = ISF_Local;
- if (it->Symbol->isAbsolute())
- Flags |= ISF_Absolute;
- Write32(Flags);
- continue;
- }
- }
+uint64_t MCAsmLayout::getSymbolAddress(const MCSymbolData *SD) const {
+ assert(SD->getFragment() && "Invalid getAddress() on undefined symbol!");
+ return getFragmentAddress(SD->getFragment()) + SD->getOffset();
+}
- Write32(SymbolMap[it->Symbol]->getIndex());
- }
+uint64_t MCAsmLayout::getSectionAddress(const MCSectionData *SD) const {
+ EnsureValid(SD->begin());
+ assert(SD->Address != ~UINT64_C(0) && "Address not set!");
+ return SD->Address;
+}
- // FIXME: Check that offsets match computed ones.
+uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const {
+ // The size is the last fragment's end offset.
+ const MCFragment &F = SD->getFragmentList().back();
+ return getFragmentOffset(&F) + getFragmentEffectiveSize(&F);
+}
- // Write the symbol table entries.
- for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i)
- WriteNlist32(LocalSymbolData[i]);
- for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i)
- WriteNlist32(ExternalSymbolData[i]);
- for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
- WriteNlist32(UndefinedSymbolData[i]);
+uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const {
+ // Virtual sections have no file size.
+ if (getAssembler().getBackend().isVirtualSection(SD->getSection()))
+ return 0;
- // Write the string table.
- OS << StringTable.str();
- }
- }
+ // Otherwise, the file size is the same as the address space size.
+ return getSectionAddressSize(SD);
+}
- void ApplyFixup(const MCAsmFixup &Fixup, MCDataFragment &DF) {
- unsigned Size = 1 << getFixupKindLog2Size(Fixup.Kind);
+uint64_t MCAsmLayout::getSectionSize(const MCSectionData *SD) const {
+ // The logical size is the address space size minus any tail padding.
+ uint64_t Size = getSectionAddressSize(SD);
+ const MCAlignFragment *AF =
+ dyn_cast<MCAlignFragment>(&(SD->getFragmentList().back()));
+ if (AF && AF->hasOnlyAlignAddress())
+ Size -= getFragmentEffectiveSize(AF);
- // FIXME: Endianness assumption.
- assert(Fixup.Offset + Size <= DF.getContents().size() &&
- "Invalid fixup offset!");
- for (unsigned i = 0; i != Size; ++i)
- DF.getContents()[Fixup.Offset + i] = uint8_t(Fixup.FixedValue >> (i * 8));
- }
-};
+ return Size;
+}
/* *** */
MCFragment::MCFragment() : Kind(FragmentType(~0)) {
}
+MCFragment::~MCFragment() {
+}
+
MCFragment::MCFragment(FragmentType _Kind, MCSectionData *_Parent)
- : Kind(_Kind),
- Parent(_Parent),
- FileSize(~UINT64_C(0))
+ : Kind(_Kind), Parent(_Parent), Atom(0), Offset(~UINT64_C(0)),
+ EffectiveSize(~UINT64_C(0))
{
if (Parent)
Parent->getFragmentList().push_back(this);
}
-MCFragment::~MCFragment() {
-}
-
-uint64_t MCFragment::getAddress() const {
- assert(getParent() && "Missing Section!");
- return getParent()->getAddress() + Offset;
-}
-
/* *** */
MCSectionData::MCSectionData() : Section(0) {}
@@ -956,8 +197,6 @@ MCSectionData::MCSectionData(const MCSection &_Section, MCAssembler *A)
: Section(&_Section),
Alignment(1),
Address(~UINT64_C(0)),
- Size(~UINT64_C(0)),
- FileSize(~UINT64_C(0)),
HasInstructions(false)
{
if (A)
@@ -972,7 +211,8 @@ MCSymbolData::MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment,
uint64_t _Offset, MCAssembler *A)
: Symbol(&_Symbol), Fragment(_Fragment), Offset(_Offset),
IsExternal(false), IsPrivateExtern(false),
- CommonSize(0), CommonAlign(0), Flags(0), Index(0)
+ CommonSize(0), SymbolSize(0), CommonAlign(0),
+ Flags(0), Index(0)
{
if (A)
A->getSymbolList().push_back(this);
@@ -980,183 +220,354 @@ MCSymbolData::MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment,
/* *** */
-MCAssembler::MCAssembler(MCContext &_Context, raw_ostream &_OS)
- : Context(_Context), OS(_OS), SubsectionsViaSymbols(false)
+MCAssembler::MCAssembler(MCContext &_Context, TargetAsmBackend &_Backend,
+ MCCodeEmitter &_Emitter, raw_ostream &_OS)
+ : Context(_Context), Backend(_Backend), Emitter(_Emitter),
+ OS(_OS), RelaxAll(false), SubsectionsViaSymbols(false)
{
}
MCAssembler::~MCAssembler() {
}
-void MCAssembler::LayoutSection(MCSectionData &SD) {
- uint64_t Address = SD.getAddress();
+static bool isScatteredFixupFullyResolvedSimple(const MCAssembler &Asm,
+ const MCFixup &Fixup,
+ const MCValue Target,
+ const MCSection *BaseSection) {
+ // The effective fixup address is
+ // addr(atom(A)) + offset(A)
+ // - addr(atom(B)) - offset(B)
+ // - addr(<base symbol>) + <fixup offset from base symbol>
+ // and the offsets are not relocatable, so the fixup is fully resolved when
+ // addr(atom(A)) - addr(atom(B)) - addr(<base symbol>)) == 0.
+ //
+ // The simple (Darwin, except on x86_64) way of dealing with this was to
+ // assume that any reference to a temporary symbol *must* be a temporary
+ // symbol in the same atom, unless the sections differ. Therefore, any PCrel
+ // relocation to a temporary symbol (in the same section) is fully
+ // resolved. This also works in conjunction with absolutized .set, which
+ // requires the compiler to use .set to absolutize the differences between
+ // symbols which the compiler knows to be assembly time constants, so we don't
+ // need to worry about considering symbol differences fully resolved.
+
+ // Non-relative fixups are only resolved if constant.
+ if (!BaseSection)
+ return Target.isAbsolute();
+
+ // Otherwise, relative fixups are only resolved if not a difference and the
+ // target is a temporary in the same section.
+ if (Target.isAbsolute() || Target.getSymB())
+ return false;
- for (MCSectionData::iterator it = SD.begin(), ie = SD.end(); it != ie; ++it) {
- MCFragment &F = *it;
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ if (!A->isTemporary() || !A->isInSection() ||
+ &A->getSection() != BaseSection)
+ return false;
- F.setOffset(Address - SD.getAddress());
+ return true;
+}
- // Evaluate fragment size.
- switch (F.getKind()) {
- case MCFragment::FT_Align: {
- MCAlignFragment &AF = cast<MCAlignFragment>(F);
+static bool isScatteredFixupFullyResolved(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFixup &Fixup,
+ const MCValue Target,
+ const MCSymbolData *BaseSymbol) {
+ // The effective fixup address is
+ // addr(atom(A)) + offset(A)
+ // - addr(atom(B)) - offset(B)
+ // - addr(BaseSymbol) + <fixup offset from base symbol>
+ // and the offsets are not relocatable, so the fixup is fully resolved when
+ // addr(atom(A)) - addr(atom(B)) - addr(BaseSymbol) == 0.
+ //
+ // Note that "false" is almost always conservatively correct (it means we emit
+ // a relocation which is unnecessary), except when it would force us to emit a
+ // relocation which the target cannot encode.
+
+ const MCSymbolData *A_Base = 0, *B_Base = 0;
+ if (const MCSymbolRefExpr *A = Target.getSymA()) {
+ // Modified symbol references cannot be resolved.
+ if (A->getKind() != MCSymbolRefExpr::VK_None)
+ return false;
+
+ A_Base = Asm.getAtom(Layout, &Asm.getSymbolData(A->getSymbol()));
+ if (!A_Base)
+ return false;
+ }
- uint64_t Size = OffsetToAlignment(Address, AF.getAlignment());
- if (Size > AF.getMaxBytesToEmit())
- AF.setFileSize(0);
- else
- AF.setFileSize(Size);
- break;
- }
+ if (const MCSymbolRefExpr *B = Target.getSymB()) {
+ // Modified symbol references cannot be resolved.
+ if (B->getKind() != MCSymbolRefExpr::VK_None)
+ return false;
- case MCFragment::FT_Data:
- case MCFragment::FT_Fill:
- F.setFileSize(F.getMaxFileSize());
- break;
+ B_Base = Asm.getAtom(Layout, &Asm.getSymbolData(B->getSymbol()));
+ if (!B_Base)
+ return false;
+ }
+
+ // If there is no base, A and B have to be the same atom for this fixup to be
+ // fully resolved.
+ if (!BaseSymbol)
+ return A_Base == B_Base;
+
+ // Otherwise, B must be missing and A must be the base.
+ return !B_Base && BaseSymbol == A_Base;
+}
- case MCFragment::FT_Org: {
- MCOrgFragment &OF = cast<MCOrgFragment>(F);
+bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
+ // Non-temporary labels should always be visible to the linker.
+ if (!Symbol.isTemporary())
+ return true;
- MCValue Target;
- if (!OF.getOffset().EvaluateAsRelocatable(Target))
- llvm_report_error("expected relocatable expression");
+ // Absolute temporary labels are never visible.
+ if (!Symbol.isInSection())
+ return false;
- if (!Target.isAbsolute())
- llvm_unreachable("FIXME: Not yet implemented!");
- uint64_t OrgOffset = Target.getConstant();
- uint64_t Offset = Address - SD.getAddress();
+ // Otherwise, check if the section requires symbols even for temporary labels.
+ return getBackend().doesSectionRequireSymbols(Symbol.getSection());
+}
- // FIXME: We need a way to communicate this error.
- if (OrgOffset < Offset)
- llvm_report_error("invalid .org offset '" + Twine(OrgOffset) +
- "' (at offset '" + Twine(Offset) + "'");
+const MCSymbolData *MCAssembler::getAtom(const MCAsmLayout &Layout,
+ const MCSymbolData *SD) const {
+ // Linker visible symbols define atoms.
+ if (isSymbolLinkerVisible(SD->getSymbol()))
+ return SD;
- F.setFileSize(OrgOffset - Offset);
- break;
- }
+ // Absolute and undefined symbols have no defining atom.
+ if (!SD->getFragment())
+ return 0;
- case MCFragment::FT_ZeroFill: {
- MCZeroFillFragment &ZFF = cast<MCZeroFillFragment>(F);
+ // Non-linker visible symbols in sections which can't be atomized have no
+ // defining atom.
+ if (!getBackend().isSectionAtomizable(
+ SD->getFragment()->getParent()->getSection()))
+ return 0;
- // Align the fragment offset; it is safe to adjust the offset freely since
- // this is only in virtual sections.
- uint64_t Aligned = RoundUpToAlignment(Address, ZFF.getAlignment());
- F.setOffset(Aligned - SD.getAddress());
+ // Otherwise, return the atom for the containing fragment.
+ return SD->getFragment()->getAtom();
+}
- // FIXME: This is misnamed.
- F.setFileSize(ZFF.getSize());
- break;
- }
+bool MCAssembler::EvaluateFixup(const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ MCValue &Target, uint64_t &Value) const {
+ ++stats::EvaluateFixup;
+
+ if (!Fixup.getValue()->EvaluateAsRelocatable(Target, &Layout))
+ report_fatal_error("expected relocatable expression");
+
+ // FIXME: How do non-scattered symbols work in ELF? I presume the linker
+ // doesn't support small relocations, but then under what criteria does the
+ // assembler allow symbol differences?
+
+ Value = Target.getConstant();
+
+ bool IsPCRel = Emitter.getFixupKindInfo(
+ Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel;
+ bool IsResolved = true;
+ if (const MCSymbolRefExpr *A = Target.getSymA()) {
+ if (A->getSymbol().isDefined())
+ Value += Layout.getSymbolAddress(&getSymbolData(A->getSymbol()));
+ else
+ IsResolved = false;
+ }
+ if (const MCSymbolRefExpr *B = Target.getSymB()) {
+ if (B->getSymbol().isDefined())
+ Value -= Layout.getSymbolAddress(&getSymbolData(B->getSymbol()));
+ else
+ IsResolved = false;
+ }
+
+ // If we are using scattered symbols, determine whether this value is actually
+ // resolved; scattering may cause atoms to move.
+ if (IsResolved && getBackend().hasScatteredSymbols()) {
+ if (getBackend().hasReliableSymbolDifference()) {
+ // If this is a PCrel relocation, find the base atom (identified by its
+ // symbol) that the fixup value is relative to.
+ const MCSymbolData *BaseSymbol = 0;
+ if (IsPCRel) {
+ BaseSymbol = DF->getAtom();
+ if (!BaseSymbol)
+ IsResolved = false;
+ }
+
+ if (IsResolved)
+ IsResolved = isScatteredFixupFullyResolved(*this, Layout, Fixup, Target,
+ BaseSymbol);
+ } else {
+ const MCSection *BaseSection = 0;
+ if (IsPCRel)
+ BaseSection = &DF->getParent()->getSection();
+
+ IsResolved = isScatteredFixupFullyResolvedSimple(*this, Fixup, Target,
+ BaseSection);
}
+ }
+
+ if (IsPCRel)
+ Value -= Layout.getFragmentAddress(DF) + Fixup.getOffset();
+
+ return IsResolved;
+}
+
+uint64_t MCAssembler::ComputeFragmentSize(MCAsmLayout &Layout,
+ const MCFragment &F,
+ uint64_t SectionAddress,
+ uint64_t FragmentOffset) const {
+ switch (F.getKind()) {
+ case MCFragment::FT_Data:
+ return cast<MCDataFragment>(F).getContents().size();
+ case MCFragment::FT_Fill:
+ return cast<MCFillFragment>(F).getSize();
+ case MCFragment::FT_Inst:
+ return cast<MCInstFragment>(F).getInstSize();
+
+ case MCFragment::FT_Align: {
+ const MCAlignFragment &AF = cast<MCAlignFragment>(F);
- Address += F.getFileSize();
+ assert((!AF.hasOnlyAlignAddress() || !AF.getNextNode()) &&
+ "Invalid OnlyAlignAddress bit, not the last fragment!");
+
+ uint64_t Size = OffsetToAlignment(SectionAddress + FragmentOffset,
+ AF.getAlignment());
+
+ // Honor MaxBytesToEmit.
+ if (Size > AF.getMaxBytesToEmit())
+ return 0;
+
+ return Size;
+ }
+
+ case MCFragment::FT_Org: {
+ const MCOrgFragment &OF = cast<MCOrgFragment>(F);
+
+ // FIXME: We should compute this sooner, we don't want to recurse here, and
+ // we would like to be more functional.
+ int64_t TargetLocation;
+ if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, &Layout))
+ report_fatal_error("expected assembly-time absolute expression");
+
+ // FIXME: We need a way to communicate this error.
+ int64_t Offset = TargetLocation - FragmentOffset;
+ if (Offset < 0)
+ report_fatal_error("invalid .org offset '" + Twine(TargetLocation) +
+ "' (at offset '" + Twine(FragmentOffset) + "'");
+
+ return Offset;
+ }
}
- // Set the section sizes.
- SD.setSize(Address - SD.getAddress());
- if (isVirtualSection(SD.getSection()))
- SD.setFileSize(0);
- else
- SD.setFileSize(Address - SD.getAddress());
+ assert(0 && "invalid fragment kind");
+ return 0;
}
-/// WriteNopData - Write optimal nops to the output file for the \arg Count
-/// bytes. This returns the number of bytes written. It may return 0 if
-/// the \arg Count is more than the maximum optimal nops.
-///
-/// FIXME this is X86 32-bit specific and should move to a better place.
-static uint64_t WriteNopData(uint64_t Count, MachObjectWriter &MOW) {
- static const uint8_t Nops[16][16] = {
- // nop
- {0x90},
- // xchg %ax,%ax
- {0x66, 0x90},
- // nopl (%[re]ax)
- {0x0f, 0x1f, 0x00},
- // nopl 0(%[re]ax)
- {0x0f, 0x1f, 0x40, 0x00},
- // nopl 0(%[re]ax,%[re]ax,1)
- {0x0f, 0x1f, 0x44, 0x00, 0x00},
- // nopw 0(%[re]ax,%[re]ax,1)
- {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
- // nopl 0L(%[re]ax)
- {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
- // nopl 0L(%[re]ax,%[re]ax,1)
- {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
- // nopw 0L(%[re]ax,%[re]ax,1)
- {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
- // nopw %cs:0L(%[re]ax,%[re]ax,1)
- {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
- // nopl 0(%[re]ax,%[re]ax,1)
- // nopw 0(%[re]ax,%[re]ax,1)
- {0x0f, 0x1f, 0x44, 0x00, 0x00,
- 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
- // nopw 0(%[re]ax,%[re]ax,1)
- // nopw 0(%[re]ax,%[re]ax,1)
- {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
- 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
- // nopw 0(%[re]ax,%[re]ax,1)
- // nopl 0L(%[re]ax) */
- {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
- 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
- // nopl 0L(%[re]ax)
- // nopl 0L(%[re]ax)
- {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
- // nopl 0L(%[re]ax)
- // nopl 0L(%[re]ax,%[re]ax,1)
- {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}
- };
-
- if (Count > 15)
- return 0;
+void MCAsmLayout::LayoutFile() {
+ // Initialize the first section and set the valid fragment layout point. All
+ // actual layout computations are done lazily.
+ LastValidFragment = 0;
+ if (!getSectionOrder().empty())
+ getSectionOrder().front()->Address = 0;
+}
+
+void MCAsmLayout::LayoutFragment(MCFragment *F) {
+ MCFragment *Prev = F->getPrevNode();
+
+ // We should never try to recompute something which is up-to-date.
+ assert(!isFragmentUpToDate(F) && "Attempt to recompute up-to-date fragment!");
+ // We should never try to compute the fragment layout if the section isn't
+ // up-to-date.
+ assert(isSectionUpToDate(F->getParent()) &&
+ "Attempt to compute fragment before it's section!");
+ // We should never try to compute the fragment layout if it's predecessor
+ // isn't up-to-date.
+ assert((!Prev || isFragmentUpToDate(Prev)) &&
+ "Attempt to compute fragment before it's predecessor!");
+
+ ++stats::FragmentLayouts;
+
+ // Compute the fragment start address.
+ uint64_t StartAddress = F->getParent()->Address;
+ uint64_t Address = StartAddress;
+ if (Prev)
+ Address += Prev->Offset + Prev->EffectiveSize;
+
+ // Compute fragment offset and size.
+ F->Offset = Address - StartAddress;
+ F->EffectiveSize = getAssembler().ComputeFragmentSize(*this, *F, StartAddress,
+ F->Offset);
+ LastValidFragment = F;
+
+ // If this is the last fragment in a section, update the next section address.
+ if (!F->getNextNode()) {
+ unsigned NextIndex = F->getParent()->getLayoutOrder() + 1;
+ if (NextIndex != getSectionOrder().size())
+ LayoutSection(getSectionOrder()[NextIndex]);
+ }
+}
+
+void MCAsmLayout::LayoutSection(MCSectionData *SD) {
+ unsigned SectionOrderIndex = SD->getLayoutOrder();
- for (uint64_t i = 0; i < Count; i++)
- MOW.Write8 (uint8_t(Nops[Count - 1][i]));
+ ++stats::SectionLayouts;
- return Count;
+ // Compute the section start address.
+ uint64_t StartAddress = 0;
+ if (SectionOrderIndex) {
+ MCSectionData *Prev = getSectionOrder()[SectionOrderIndex - 1];
+ StartAddress = getSectionAddress(Prev) + getSectionAddressSize(Prev);
+ }
+
+ // Honor the section alignment requirements.
+ StartAddress = RoundUpToAlignment(StartAddress, SD->getAlignment());
+
+ // Set the section address.
+ SD->Address = StartAddress;
}
-/// WriteFileData - Write the \arg F data to the output file.
-static void WriteFileData(raw_ostream &OS, const MCFragment &F,
- MachObjectWriter &MOW) {
- uint64_t Start = OS.tell();
+/// WriteFragmentData - Write the \arg F data to the output file.
+static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment &F, MCObjectWriter *OW) {
+ uint64_t Start = OW->getStream().tell();
(void) Start;
- ++EmittedFragments;
+ ++stats::EmittedFragments;
// FIXME: Embed in fragments instead?
+ uint64_t FragmentSize = Layout.getFragmentEffectiveSize(&F);
switch (F.getKind()) {
case MCFragment::FT_Align: {
MCAlignFragment &AF = cast<MCAlignFragment>(F);
- uint64_t Count = AF.getFileSize() / AF.getValueSize();
+ uint64_t Count = FragmentSize / AF.getValueSize();
+
+ assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!");
// FIXME: This error shouldn't actually occur (the front end should emit
// multiple .align directives to enforce the semantics it wants), but is
// severe enough that we want to report it. How to handle this?
- if (Count * AF.getValueSize() != AF.getFileSize())
- llvm_report_error("undefined .align directive, value size '" +
+ if (Count * AF.getValueSize() != FragmentSize)
+ report_fatal_error("undefined .align directive, value size '" +
Twine(AF.getValueSize()) +
"' is not a divisor of padding size '" +
- Twine(AF.getFileSize()) + "'");
+ Twine(FragmentSize) + "'");
// See if we are aligning with nops, and if so do that first to try to fill
// the Count bytes. Then if that did not fill any bytes or there are any
// bytes left to fill use the the Value and ValueSize to fill the rest.
- if (AF.getEmitNops()) {
- uint64_t NopByteCount = WriteNopData(Count, MOW);
- Count -= NopByteCount;
+ // If we are aligning with nops, ask that target to emit the right data.
+ if (AF.hasEmitNops()) {
+ if (!Asm.getBackend().WriteNopData(Count, OW))
+ report_fatal_error("unable to write nop sequence of " +
+ Twine(Count) + " bytes");
+ break;
}
+ // Otherwise, write out in multiples of the value size.
for (uint64_t i = 0; i != Count; ++i) {
switch (AF.getValueSize()) {
default:
assert(0 && "Invalid size!");
- case 1: MOW.Write8 (uint8_t (AF.getValue())); break;
- case 2: MOW.Write16(uint16_t(AF.getValue())); break;
- case 4: MOW.Write32(uint32_t(AF.getValue())); break;
- case 8: MOW.Write64(uint64_t(AF.getValue())); break;
+ case 1: OW->Write8 (uint8_t (AF.getValue())); break;
+ case 2: OW->Write16(uint16_t(AF.getValue())); break;
+ case 4: OW->Write32(uint32_t(AF.getValue())); break;
+ case 8: OW->Write64(uint64_t(AF.getValue())); break;
}
}
break;
@@ -1164,213 +575,465 @@ static void WriteFileData(raw_ostream &OS, const MCFragment &F,
case MCFragment::FT_Data: {
MCDataFragment &DF = cast<MCDataFragment>(F);
-
- // Apply the fixups.
- //
- // FIXME: Move elsewhere.
- for (MCDataFragment::const_fixup_iterator it = DF.fixup_begin(),
- ie = DF.fixup_end(); it != ie; ++it)
- MOW.ApplyFixup(*it, DF);
-
- OS << cast<MCDataFragment>(F).getContents().str();
+ assert(FragmentSize == DF.getContents().size() && "Invalid size!");
+ OW->WriteBytes(DF.getContents().str());
break;
}
case MCFragment::FT_Fill: {
MCFillFragment &FF = cast<MCFillFragment>(F);
- for (uint64_t i = 0, e = FF.getCount(); i != e; ++i) {
+
+ assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!");
+
+ for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) {
switch (FF.getValueSize()) {
default:
assert(0 && "Invalid size!");
- case 1: MOW.Write8 (uint8_t (FF.getValue())); break;
- case 2: MOW.Write16(uint16_t(FF.getValue())); break;
- case 4: MOW.Write32(uint32_t(FF.getValue())); break;
- case 8: MOW.Write64(uint64_t(FF.getValue())); break;
+ case 1: OW->Write8 (uint8_t (FF.getValue())); break;
+ case 2: OW->Write16(uint16_t(FF.getValue())); break;
+ case 4: OW->Write32(uint32_t(FF.getValue())); break;
+ case 8: OW->Write64(uint64_t(FF.getValue())); break;
}
}
break;
}
+ case MCFragment::FT_Inst:
+ llvm_unreachable("unexpected inst fragment after lowering");
+ break;
+
case MCFragment::FT_Org: {
MCOrgFragment &OF = cast<MCOrgFragment>(F);
- for (uint64_t i = 0, e = OF.getFileSize(); i != e; ++i)
- MOW.Write8(uint8_t(OF.getValue()));
+ for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
+ OW->Write8(uint8_t(OF.getValue()));
break;
}
-
- case MCFragment::FT_ZeroFill: {
- assert(0 && "Invalid zero fill fragment in concrete section!");
- break;
- }
}
- assert(OS.tell() - Start == F.getFileSize());
+ assert(OW->getStream().tell() - Start == FragmentSize);
}
-/// WriteFileData - Write the \arg SD data to the output file.
-static void WriteFileData(raw_ostream &OS, const MCSectionData &SD,
- MachObjectWriter &MOW) {
+void MCAssembler::WriteSectionData(const MCSectionData *SD,
+ const MCAsmLayout &Layout,
+ MCObjectWriter *OW) const {
// Ignore virtual sections.
- if (isVirtualSection(SD.getSection())) {
- assert(SD.getFileSize() == 0);
+ if (getBackend().isVirtualSection(SD->getSection())) {
+ assert(Layout.getSectionFileSize(SD) == 0 && "Invalid size for section!");
+
+ // Check that contents are only things legal inside a virtual section.
+ for (MCSectionData::const_iterator it = SD->begin(),
+ ie = SD->end(); it != ie; ++it) {
+ switch (it->getKind()) {
+ default:
+ assert(0 && "Invalid fragment in virtual section!");
+ case MCFragment::FT_Data: {
+ // Check that we aren't trying to write a non-zero contents (or fixups)
+ // into a virtual section. This is to support clients which use standard
+ // directives to fill the contents of virtual sections.
+ MCDataFragment &DF = cast<MCDataFragment>(*it);
+ assert(DF.fixup_begin() == DF.fixup_end() &&
+ "Cannot have fixups in virtual section!");
+ for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i)
+ assert(DF.getContents()[i] == 0 &&
+ "Invalid data value for virtual section!");
+ break;
+ }
+ case MCFragment::FT_Align:
+ // Check that we aren't trying to write a non-zero value into a virtual
+ // section.
+ assert((!cast<MCAlignFragment>(it)->getValueSize() ||
+ !cast<MCAlignFragment>(it)->getValue()) &&
+ "Invalid align in virtual section!");
+ break;
+ case MCFragment::FT_Fill:
+ assert(!cast<MCFillFragment>(it)->getValueSize() &&
+ "Invalid fill in virtual section!");
+ break;
+ }
+ }
+
return;
}
- uint64_t Start = OS.tell();
+ uint64_t Start = OW->getStream().tell();
(void) Start;
- for (MCSectionData::const_iterator it = SD.begin(),
- ie = SD.end(); it != ie; ++it)
- WriteFileData(OS, *it, MOW);
+ for (MCSectionData::const_iterator it = SD->begin(),
+ ie = SD->end(); it != ie; ++it)
+ WriteFragmentData(*this, Layout, *it, OW);
+
+ assert(OW->getStream().tell() - Start == Layout.getSectionFileSize(SD));
+}
+
+void MCAssembler::AddSectionToTheEnd(MCSectionData &SD, MCAsmLayout &Layout) {
+ // Create dummy fragments and assign section ordinals.
+ unsigned SectionIndex = 0;
+ for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it)
+ SectionIndex++;
+
+ SD.setOrdinal(SectionIndex);
+
+ // Assign layout order indices to sections and fragments.
+ unsigned FragmentIndex = 0;
+ unsigned i = 0;
+ for (unsigned e = Layout.getSectionOrder().size(); i != e; ++i) {
+ MCSectionData *SD = Layout.getSectionOrder()[i];
- // Add section padding.
- assert(SD.getFileSize() >= SD.getSize() && "Invalid section sizes!");
- MOW.WriteZeros(SD.getFileSize() - SD.getSize());
+ for (MCSectionData::iterator it2 = SD->begin(),
+ ie2 = SD->end(); it2 != ie2; ++it2)
+ FragmentIndex++;
+ }
+
+ SD.setLayoutOrder(i);
+ for (MCSectionData::iterator it2 = SD.begin(),
+ ie2 = SD.end(); it2 != ie2; ++it2) {
+ it2->setLayoutOrder(FragmentIndex++);
+ }
+ Layout.getSectionOrder().push_back(&SD);
+
+ Layout.LayoutSection(&SD);
+
+ // Layout until everything fits.
+ while (LayoutOnce(Layout))
+ continue;
- assert(OS.tell() - Start == SD.getFileSize());
}
-void MCAssembler::Finish() {
+void MCAssembler::Finish(MCObjectWriter *Writer) {
DEBUG_WITH_TYPE("mc-dump", {
llvm::errs() << "assembler backend - pre-layout\n--\n";
dump(); });
- // Layout the concrete sections and fragments.
- uint64_t Address = 0;
- MCSectionData *Prev = 0;
- for (iterator it = begin(), ie = end(); it != ie; ++it) {
- MCSectionData &SD = *it;
+ // Create the layout object.
+ MCAsmLayout Layout(*this);
+
+ // Insert additional align fragments for concrete sections to explicitly pad
+ // the previous section to match their alignment requirements. This is for
+ // 'gas' compatibility, it shouldn't strictly be necessary.
+ //
+ // FIXME: This may be Mach-O specific.
+ for (unsigned i = 1, e = Layout.getSectionOrder().size(); i < e; ++i) {
+ MCSectionData *SD = Layout.getSectionOrder()[i];
+
+ // Ignore sections without alignment requirements.
+ unsigned Align = SD->getAlignment();
+ if (Align <= 1)
+ continue;
- // Skip virtual sections.
- if (isVirtualSection(SD.getSection()))
+ // Ignore virtual sections, they don't cause file size modifications.
+ if (getBackend().isVirtualSection(SD->getSection()))
continue;
- // Align this section if necessary by adding padding bytes to the previous
+ // Otherwise, create a new align fragment at the end of the previous
// section.
- if (uint64_t Pad = OffsetToAlignment(Address, it->getAlignment())) {
- assert(Prev && "Missing prev section!");
- Prev->setFileSize(Prev->getFileSize() + Pad);
- Address += Pad;
- }
+ MCAlignFragment *AF = new MCAlignFragment(Align, 0, 1, Align,
+ Layout.getSectionOrder()[i - 1]);
+ AF->setOnlyAlignAddress(true);
+ }
- // Layout the section fragments and its size.
- SD.setAddress(Address);
- LayoutSection(SD);
- Address += SD.getFileSize();
+ // Create dummy fragments and assign section ordinals.
+ unsigned SectionIndex = 0;
+ for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
+ // Create dummy fragments to eliminate any empty sections, this simplifies
+ // layout.
+ if (it->getFragmentList().empty())
+ new MCFillFragment(0, 1, 0, it);
- Prev = &SD;
+ it->setOrdinal(SectionIndex++);
}
- // Layout the virtual sections.
- for (iterator it = begin(), ie = end(); it != ie; ++it) {
- MCSectionData &SD = *it;
-
- if (!isVirtualSection(SD.getSection()))
- continue;
+ // Assign layout order indices to sections and fragments.
+ unsigned FragmentIndex = 0;
+ for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) {
+ MCSectionData *SD = Layout.getSectionOrder()[i];
+ SD->setLayoutOrder(i);
- SD.setAddress(Address);
- LayoutSection(SD);
- Address += SD.getSize();
+ for (MCSectionData::iterator it2 = SD->begin(),
+ ie2 = SD->end(); it2 != ie2; ++it2)
+ it2->setLayoutOrder(FragmentIndex++);
}
+ // Layout until everything fits.
+ while (LayoutOnce(Layout))
+ continue;
+
DEBUG_WITH_TYPE("mc-dump", {
- llvm::errs() << "assembler backend - post-layout\n--\n";
+ llvm::errs() << "assembler backend - post-relaxation\n--\n";
dump(); });
- // Write the object file.
- MachObjectWriter MOW(OS);
- MOW.WriteObject(*this);
+ // Finalize the layout, including fragment lowering.
+ FinishLayout(Layout);
- OS.flush();
-}
+ DEBUG_WITH_TYPE("mc-dump", {
+ llvm::errs() << "assembler backend - final-layout\n--\n";
+ dump(); });
+ uint64_t StartOffset = OS.tell();
-// Debugging methods
+ llvm::OwningPtr<MCObjectWriter> OwnWriter(0);
+ if (Writer == 0) {
+ //no custom Writer_ : create the default one life-managed by OwningPtr
+ OwnWriter.reset(getBackend().createObjectWriter(OS));
+ Writer = OwnWriter.get();
+ if (!Writer)
+ report_fatal_error("unable to create object writer!");
+ }
-namespace llvm {
+ // Allow the object writer a chance to perform post-layout binding (for
+ // example, to set the index fields in the symbol data).
+ Writer->ExecutePostLayoutBinding(*this);
-raw_ostream &operator<<(raw_ostream &OS, const MCAsmFixup &AF) {
- OS << "<MCAsmFixup" << " Offset:" << AF.Offset << " Value:" << *AF.Value
- << " Kind:" << AF.Kind << ">";
- return OS;
-}
+ // Evaluate and apply the fixups, generating relocation entries as necessary.
+ for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
+ for (MCSectionData::iterator it2 = it->begin(),
+ ie2 = it->end(); it2 != ie2; ++it2) {
+ MCDataFragment *DF = dyn_cast<MCDataFragment>(it2);
+ if (!DF)
+ continue;
+
+ for (MCDataFragment::fixup_iterator it3 = DF->fixup_begin(),
+ ie3 = DF->fixup_end(); it3 != ie3; ++it3) {
+ MCFixup &Fixup = *it3;
+
+ // Evaluate the fixup.
+ MCValue Target;
+ uint64_t FixedValue;
+ if (!EvaluateFixup(Layout, Fixup, DF, Target, FixedValue)) {
+ // The fixup was unresolved, we need a relocation. Inform the object
+ // writer of the relocation, and give it an opportunity to adjust the
+ // fixup value if need be.
+ Writer->RecordRelocation(*this, Layout, DF, Fixup, Target,FixedValue);
+ }
+ getBackend().ApplyFixup(Fixup, *DF, FixedValue);
+ }
+ }
+ }
+
+ // Write the object file.
+ Writer->WriteObject(*this, Layout);
+
+ stats::ObjectBytes += OS.tell() - StartOffset;
}
-void MCFragment::dump() {
- raw_ostream &OS = llvm::errs();
+bool MCAssembler::FixupNeedsRelaxation(const MCFixup &Fixup,
+ const MCFragment *DF,
+ const MCAsmLayout &Layout) const {
+ if (getRelaxAll())
+ return true;
- OS << "<MCFragment " << (void*) this << " Offset:" << Offset
- << " FileSize:" << FileSize;
+ // If we cannot resolve the fixup value, it requires relaxation.
+ MCValue Target;
+ uint64_t Value;
+ if (!EvaluateFixup(Layout, Fixup, DF, Target, Value))
+ return true;
- OS << ">";
+ // Otherwise, relax if the value is too big for a (signed) i8.
+ //
+ // FIXME: This is target dependent!
+ return int64_t(Value) != int64_t(int8_t(Value));
}
-void MCAlignFragment::dump() {
- raw_ostream &OS = llvm::errs();
+bool MCAssembler::FragmentNeedsRelaxation(const MCInstFragment *IF,
+ const MCAsmLayout &Layout) const {
+ // If this inst doesn't ever need relaxation, ignore it. This occurs when we
+ // are intentionally pushing out inst fragments, or because we relaxed a
+ // previous instruction to one that doesn't need relaxation.
+ if (!getBackend().MayNeedRelaxation(IF->getInst()))
+ return false;
+
+ for (MCInstFragment::const_fixup_iterator it = IF->fixup_begin(),
+ ie = IF->fixup_end(); it != ie; ++it)
+ if (FixupNeedsRelaxation(*it, IF, Layout))
+ return true;
- OS << "<MCAlignFragment ";
- this->MCFragment::dump();
- OS << "\n ";
- OS << " Alignment:" << getAlignment()
- << " Value:" << getValue() << " ValueSize:" << getValueSize()
- << " MaxBytesToEmit:" << getMaxBytesToEmit() << ">";
+ return false;
}
-void MCDataFragment::dump() {
- raw_ostream &OS = llvm::errs();
+bool MCAssembler::LayoutOnce(MCAsmLayout &Layout) {
+ ++stats::RelaxationSteps;
- OS << "<MCDataFragment ";
- this->MCFragment::dump();
- OS << "\n ";
- OS << " Contents:[";
- for (unsigned i = 0, e = getContents().size(); i != e; ++i) {
- if (i) OS << ",";
- OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF);
- }
- OS << "] (" << getContents().size() << " bytes)";
-
- if (!getFixups().empty()) {
- OS << ",\n ";
- OS << " Fixups:[";
- for (fixup_iterator it = fixup_begin(), ie = fixup_end(); it != ie; ++it) {
- if (it != fixup_begin()) OS << ",\n ";
- OS << *it;
+ // Layout the sections in order.
+ Layout.LayoutFile();
+
+ // Scan for fragments that need relaxation.
+ bool WasRelaxed = false;
+ for (iterator it = begin(), ie = end(); it != ie; ++it) {
+ MCSectionData &SD = *it;
+
+ for (MCSectionData::iterator it2 = SD.begin(),
+ ie2 = SD.end(); it2 != ie2; ++it2) {
+ // Check if this is an instruction fragment that needs relaxation.
+ MCInstFragment *IF = dyn_cast<MCInstFragment>(it2);
+ if (!IF || !FragmentNeedsRelaxation(IF, Layout))
+ continue;
+
+ ++stats::RelaxedInstructions;
+
+ // FIXME-PERF: We could immediately lower out instructions if we can tell
+ // they are fully resolved, to avoid retesting on later passes.
+
+ // Relax the fragment.
+
+ MCInst Relaxed;
+ getBackend().RelaxInstruction(IF->getInst(), Relaxed);
+
+ // Encode the new instruction.
+ //
+ // FIXME-PERF: If it matters, we could let the target do this. It can
+ // probably do so more efficiently in many cases.
+ SmallVector<MCFixup, 4> Fixups;
+ SmallString<256> Code;
+ raw_svector_ostream VecOS(Code);
+ getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups);
+ VecOS.flush();
+
+ // Update the instruction fragment.
+ int SlideAmount = Code.size() - IF->getInstSize();
+ IF->setInst(Relaxed);
+ IF->getCode() = Code;
+ IF->getFixups().clear();
+ // FIXME: Eliminate copy.
+ for (unsigned i = 0, e = Fixups.size(); i != e; ++i)
+ IF->getFixups().push_back(Fixups[i]);
+
+ // Update the layout, and remember that we relaxed.
+ Layout.UpdateForSlide(IF, SlideAmount);
+ WasRelaxed = true;
}
- OS << "]";
}
- OS << ">";
+ return WasRelaxed;
}
-void MCFillFragment::dump() {
- raw_ostream &OS = llvm::errs();
+void MCAssembler::FinishLayout(MCAsmLayout &Layout) {
+ // Lower out any instruction fragments, to simplify the fixup application and
+ // output.
+ //
+ // FIXME-PERF: We don't have to do this, but the assumption is that it is
+ // cheap (we will mostly end up eliminating fragments and appending on to data
+ // fragments), so the extra complexity downstream isn't worth it. Evaluate
+ // this assumption.
+ for (iterator it = begin(), ie = end(); it != ie; ++it) {
+ MCSectionData &SD = *it;
+
+ for (MCSectionData::iterator it2 = SD.begin(),
+ ie2 = SD.end(); it2 != ie2; ++it2) {
+ MCInstFragment *IF = dyn_cast<MCInstFragment>(it2);
+ if (!IF)
+ continue;
- OS << "<MCFillFragment ";
- this->MCFragment::dump();
- OS << "\n ";
- OS << " Value:" << getValue() << " ValueSize:" << getValueSize()
- << " Count:" << getCount() << ">";
+ // Create a new data fragment for the instruction.
+ //
+ // FIXME-PERF: Reuse previous data fragment if possible.
+ MCDataFragment *DF = new MCDataFragment();
+ SD.getFragmentList().insert(it2, DF);
+
+ // Update the data fragments layout data.
+ DF->setParent(IF->getParent());
+ DF->setAtom(IF->getAtom());
+ DF->setLayoutOrder(IF->getLayoutOrder());
+ Layout.FragmentReplaced(IF, DF);
+
+ // Copy in the data and the fixups.
+ DF->getContents().append(IF->getCode().begin(), IF->getCode().end());
+ for (unsigned i = 0, e = IF->getFixups().size(); i != e; ++i)
+ DF->getFixups().push_back(IF->getFixups()[i]);
+
+ // Delete the instruction fragment and update the iterator.
+ SD.getFragmentList().erase(IF);
+ it2 = DF;
+ }
+ }
}
-void MCOrgFragment::dump() {
- raw_ostream &OS = llvm::errs();
+// Debugging methods
+
+namespace llvm {
+
+raw_ostream &operator<<(raw_ostream &OS, const MCFixup &AF) {
+ OS << "<MCFixup" << " Offset:" << AF.getOffset()
+ << " Value:" << *AF.getValue()
+ << " Kind:" << AF.getKind() << ">";
+ return OS;
+}
- OS << "<MCOrgFragment ";
- this->MCFragment::dump();
- OS << "\n ";
- OS << " Offset:" << getOffset() << " Value:" << getValue() << ">";
}
-void MCZeroFillFragment::dump() {
+void MCFragment::dump() {
raw_ostream &OS = llvm::errs();
- OS << "<MCZeroFillFragment ";
- this->MCFragment::dump();
- OS << "\n ";
- OS << " Size:" << getSize() << " Alignment:" << getAlignment() << ">";
+ OS << "<";
+ switch (getKind()) {
+ case MCFragment::FT_Align: OS << "MCAlignFragment"; break;
+ case MCFragment::FT_Data: OS << "MCDataFragment"; break;
+ case MCFragment::FT_Fill: OS << "MCFillFragment"; break;
+ case MCFragment::FT_Inst: OS << "MCInstFragment"; break;
+ case MCFragment::FT_Org: OS << "MCOrgFragment"; break;
+ }
+
+ OS << "<MCFragment " << (void*) this << " LayoutOrder:" << LayoutOrder
+ << " Offset:" << Offset << " EffectiveSize:" << EffectiveSize << ">";
+
+ switch (getKind()) {
+ case MCFragment::FT_Align: {
+ const MCAlignFragment *AF = cast<MCAlignFragment>(this);
+ if (AF->hasEmitNops())
+ OS << " (emit nops)";
+ if (AF->hasOnlyAlignAddress())
+ OS << " (only align section)";
+ OS << "\n ";
+ OS << " Alignment:" << AF->getAlignment()
+ << " Value:" << AF->getValue() << " ValueSize:" << AF->getValueSize()
+ << " MaxBytesToEmit:" << AF->getMaxBytesToEmit() << ">";
+ break;
+ }
+ case MCFragment::FT_Data: {
+ const MCDataFragment *DF = cast<MCDataFragment>(this);
+ OS << "\n ";
+ OS << " Contents:[";
+ const SmallVectorImpl<char> &Contents = DF->getContents();
+ for (unsigned i = 0, e = Contents.size(); i != e; ++i) {
+ if (i) OS << ",";
+ OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF);
+ }
+ OS << "] (" << Contents.size() << " bytes)";
+
+ if (!DF->getFixups().empty()) {
+ OS << ",\n ";
+ OS << " Fixups:[";
+ for (MCDataFragment::const_fixup_iterator it = DF->fixup_begin(),
+ ie = DF->fixup_end(); it != ie; ++it) {
+ if (it != DF->fixup_begin()) OS << ",\n ";
+ OS << *it;
+ }
+ OS << "]";
+ }
+ break;
+ }
+ case MCFragment::FT_Fill: {
+ const MCFillFragment *FF = cast<MCFillFragment>(this);
+ OS << " Value:" << FF->getValue() << " ValueSize:" << FF->getValueSize()
+ << " Size:" << FF->getSize();
+ break;
+ }
+ case MCFragment::FT_Inst: {
+ const MCInstFragment *IF = cast<MCInstFragment>(this);
+ OS << "\n ";
+ OS << " Inst:";
+ IF->getInst().dump_pretty(OS);
+ break;
+ }
+ case MCFragment::FT_Org: {
+ const MCOrgFragment *OF = cast<MCOrgFragment>(this);
+ OS << "\n ";
+ OS << " Offset:" << OF->getOffset() << " Value:" << OF->getValue();
+ break;
+ }
+ }
+ OS << ">";
}
void MCSectionData::dump() {
@@ -1378,8 +1041,7 @@ void MCSectionData::dump() {
OS << "<MCSectionData";
OS << " Alignment:" << getAlignment() << " Address:" << Address
- << " Size:" << Size << " FileSize:" << FileSize
- << " Fragments:[";
+ << " Fragments:[\n ";
for (iterator it = begin(), ie = end(); it != ie; ++it) {
if (it != begin()) OS << ",\n ";
it->dump();
@@ -1407,7 +1069,7 @@ void MCAssembler::dump() {
raw_ostream &OS = llvm::errs();
OS << "<MCAssembler\n";
- OS << " Sections:[";
+ OS << " Sections:[\n ";
for (iterator it = begin(), ie = end(); it != ie; ++it) {
if (it != begin()) OS << ",\n ";
it->dump();
@@ -1416,7 +1078,7 @@ void MCAssembler::dump() {
OS << " Symbols:[";
for (symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) {
- if (it != symbol_begin()) OS << ",\n ";
+ if (it != symbol_begin()) OS << ",\n ";
it->dump();
}
OS << "]>\n";
diff --git a/libclamav/c++/llvm/lib/MC/MCCodeEmitter.cpp b/libclamav/c++/llvm/lib/MC/MCCodeEmitter.cpp
index accb06c..d513237 100644
--- a/libclamav/c++/llvm/lib/MC/MCCodeEmitter.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCCodeEmitter.cpp
@@ -19,10 +19,10 @@ MCCodeEmitter::~MCCodeEmitter() {
const MCFixupKindInfo &MCCodeEmitter::getFixupKindInfo(MCFixupKind Kind) const {
static const MCFixupKindInfo Builtins[] = {
- { "FK_Data_1", 0, 8 },
- { "FK_Data_2", 0, 16 },
- { "FK_Data_4", 0, 32 },
- { "FK_Data_8", 0, 64 }
+ { "FK_Data_1", 0, 8, 0 },
+ { "FK_Data_2", 0, 16, 0 },
+ { "FK_Data_4", 0, 32, 0 },
+ { "FK_Data_8", 0, 64, 0 }
};
assert(Kind <= 3 && "Unknown fixup kind");
diff --git a/libclamav/c++/llvm/lib/MC/MCContext.cpp b/libclamav/c++/llvm/lib/MC/MCContext.cpp
index 45d2c02..e5586a0 100644
--- a/libclamav/c++/llvm/lib/MC/MCContext.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCContext.cpp
@@ -8,35 +8,68 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCLabel.h"
+#include "llvm/MC/MCDwarf.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
using namespace llvm;
-MCContext::MCContext() {
+typedef StringMap<const MCSectionMachO*> MachOUniqueMapTy;
+typedef StringMap<const MCSectionELF*> ELFUniqueMapTy;
+typedef StringMap<const MCSectionCOFF*> COFFUniqueMapTy;
+
+
+MCContext::MCContext(const MCAsmInfo &mai) : MAI(mai), NextUniqueID(0),
+ CurrentDwarfLoc(0,0,0,0,0) {
+ MachOUniquingMap = 0;
+ ELFUniquingMap = 0;
+ COFFUniquingMap = 0;
+
+ SecureLogFile = getenv("AS_SECURE_LOG_FILE");
+ SecureLog = 0;
+ SecureLogUsed = false;
+
+ DwarfLocSeen = false;
}
MCContext::~MCContext() {
- // NOTE: The sections are all allocated out of a bump pointer allocator,
+ // NOTE: The symbols are all allocated out of a bump pointer allocator,
// we don't need to free them here.
-}
+
+ // If we have the MachO uniquing map, free it.
+ delete (MachOUniqueMapTy*)MachOUniquingMap;
+ delete (ELFUniqueMapTy*)ELFUniquingMap;
+ delete (COFFUniqueMapTy*)COFFUniquingMap;
-MCSymbol *MCContext::CreateSymbol(StringRef Name) {
- assert(Name[0] != '\0' && "Normal symbols cannot be unnamed!");
-
- // Create and bind the symbol, and ensure that names are unique.
- MCSymbol *&Entry = Symbols[Name];
- assert(!Entry && "Duplicate symbol definition!");
- return Entry = new (*this) MCSymbol(Name, false);
+ // If the stream for the .secure_log_unique directive was created free it.
+ delete (raw_ostream*)SecureLog;
}
+//===----------------------------------------------------------------------===//
+// Symbol Manipulation
+//===----------------------------------------------------------------------===//
+
MCSymbol *MCContext::GetOrCreateSymbol(StringRef Name) {
- MCSymbol *&Entry = Symbols[Name];
- if (Entry) return Entry;
+ assert(!Name.empty() && "Normal symbols cannot be unnamed!");
+
+ // Determine whether this is an assembler temporary or normal label.
+ bool isTemporary = Name.startswith(MAI.getPrivateGlobalPrefix());
+
+ // Do the lookup and get the entire StringMapEntry. We want access to the
+ // key if we are creating the entry.
+ StringMapEntry<MCSymbol*> &Entry = Symbols.GetOrCreateValue(Name);
+ if (Entry.getValue()) return Entry.getValue();
- return Entry = new (*this) MCSymbol(Name, false);
+ // Ok, the entry doesn't already exist. Have the MCSymbol object itself refer
+ // to the copy of the string that is embedded in the StringMapEntry.
+ MCSymbol *Result = new (*this) MCSymbol(Entry.getKey(), isTemporary);
+ Entry.setValue(Result);
+ return Result;
}
MCSymbol *MCContext::GetOrCreateSymbol(const Twine &Name) {
@@ -45,18 +78,188 @@ MCSymbol *MCContext::GetOrCreateSymbol(const Twine &Name) {
return GetOrCreateSymbol(NameSV.str());
}
+MCSymbol *MCContext::CreateTempSymbol() {
+ return GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix()) +
+ "tmp" + Twine(NextUniqueID++));
+}
+
+unsigned MCContext::NextInstance(int64_t LocalLabelVal) {
+ MCLabel *&Label = Instances[LocalLabelVal];
+ if (!Label)
+ Label = new (*this) MCLabel(0);
+ return Label->incInstance();
+}
+
+unsigned MCContext::GetInstance(int64_t LocalLabelVal) {
+ MCLabel *&Label = Instances[LocalLabelVal];
+ if (!Label)
+ Label = new (*this) MCLabel(0);
+ return Label->getInstance();
+}
-MCSymbol *MCContext::CreateTemporarySymbol(StringRef Name) {
- // If unnamed, just create a symbol.
- if (Name.empty())
- new (*this) MCSymbol("", true);
-
- // Otherwise create as usual.
- MCSymbol *&Entry = Symbols[Name];
- assert(!Entry && "Duplicate symbol definition!");
- return Entry = new (*this) MCSymbol(Name, true);
+MCSymbol *MCContext::CreateDirectionalLocalSymbol(int64_t LocalLabelVal) {
+ return GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix()) +
+ Twine(LocalLabelVal) +
+ "\2" +
+ Twine(NextInstance(LocalLabelVal)));
+}
+MCSymbol *MCContext::GetDirectionalLocalSymbol(int64_t LocalLabelVal,
+ int bORf) {
+ return GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix()) +
+ Twine(LocalLabelVal) +
+ "\2" +
+ Twine(GetInstance(LocalLabelVal) + bORf));
}
MCSymbol *MCContext::LookupSymbol(StringRef Name) const {
return Symbols.lookup(Name);
}
+
+//===----------------------------------------------------------------------===//
+// Section Management
+//===----------------------------------------------------------------------===//
+
+const MCSectionMachO *MCContext::
+getMachOSection(StringRef Segment, StringRef Section,
+ unsigned TypeAndAttributes,
+ unsigned Reserved2, SectionKind Kind) {
+
+ // We unique sections by their segment/section pair. The returned section
+ // may not have the same flags as the requested section, if so this should be
+ // diagnosed by the client as an error.
+
+ // Create the map if it doesn't already exist.
+ if (MachOUniquingMap == 0)
+ MachOUniquingMap = new MachOUniqueMapTy();
+ MachOUniqueMapTy &Map = *(MachOUniqueMapTy*)MachOUniquingMap;
+
+ // Form the name to look up.
+ SmallString<64> Name;
+ Name += Segment;
+ Name.push_back(',');
+ Name += Section;
+
+ // Do the lookup, if we have a hit, return it.
+ const MCSectionMachO *&Entry = Map[Name.str()];
+ if (Entry) return Entry;
+
+ // Otherwise, return a new section.
+ return Entry = new (*this) MCSectionMachO(Segment, Section, TypeAndAttributes,
+ Reserved2, Kind);
+}
+
+
+const MCSection *MCContext::
+getELFSection(StringRef Section, unsigned Type, unsigned Flags,
+ SectionKind Kind, bool IsExplicit, unsigned EntrySize) {
+ if (ELFUniquingMap == 0)
+ ELFUniquingMap = new ELFUniqueMapTy();
+ ELFUniqueMapTy &Map = *(ELFUniqueMapTy*)ELFUniquingMap;
+
+ // Do the lookup, if we have a hit, return it.
+ StringMapEntry<const MCSectionELF*> &Entry = Map.GetOrCreateValue(Section);
+ if (Entry.getValue()) return Entry.getValue();
+
+ MCSectionELF *Result = new (*this) MCSectionELF(Entry.getKey(), Type, Flags,
+ Kind, IsExplicit, EntrySize);
+ Entry.setValue(Result);
+ return Result;
+}
+
+const MCSection *MCContext::getCOFFSection(StringRef Section,
+ unsigned Characteristics,
+ int Selection,
+ SectionKind Kind) {
+ if (COFFUniquingMap == 0)
+ COFFUniquingMap = new COFFUniqueMapTy();
+ COFFUniqueMapTy &Map = *(COFFUniqueMapTy*)COFFUniquingMap;
+
+ // Do the lookup, if we have a hit, return it.
+ StringMapEntry<const MCSectionCOFF*> &Entry = Map.GetOrCreateValue(Section);
+ if (Entry.getValue()) return Entry.getValue();
+
+ MCSectionCOFF *Result = new (*this) MCSectionCOFF(Entry.getKey(),
+ Characteristics,
+ Selection, Kind);
+
+ Entry.setValue(Result);
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// Dwarf Management
+//===----------------------------------------------------------------------===//
+
+/// GetDwarfFile - takes a file name an number to place in the dwarf file and
+/// directory tables. If the file number has already been allocated it is an
+/// error and zero is returned and the client reports the error, else the
+/// allocated file number is returned. The file numbers may be in any order.
+unsigned MCContext::GetDwarfFile(StringRef FileName, unsigned FileNumber) {
+ // TODO: a FileNumber of zero says to use the next available file number.
+ // Note: in GenericAsmParser::ParseDirectiveFile() FileNumber was checked
+ // to not be less than one. This needs to be change to be not less than zero.
+
+ // Make space for this FileNumber in the MCDwarfFiles vector if needed.
+ if (FileNumber >= MCDwarfFiles.size()) {
+ MCDwarfFiles.resize(FileNumber + 1);
+ } else {
+ MCDwarfFile *&ExistingFile = MCDwarfFiles[FileNumber];
+ if (ExistingFile)
+ // It is an error to use see the same number more than once.
+ return 0;
+ }
+
+ // Get the new MCDwarfFile slot for this FileNumber.
+ MCDwarfFile *&File = MCDwarfFiles[FileNumber];
+
+ // Separate the directory part from the basename of the FileName.
+ std::pair<StringRef, StringRef> Slash = FileName.rsplit('/');
+
+ // Find or make a entry in the MCDwarfDirs vector for this Directory.
+ StringRef Name;
+ unsigned DirIndex;
+ // Capture directory name.
+ if (Slash.second.empty()) {
+ Name = Slash.first;
+ DirIndex = 0; // For FileNames with no directories a DirIndex of 0 is used.
+ } else {
+ StringRef Directory = Slash.first;
+ Name = Slash.second;
+ for (DirIndex = 0; DirIndex < MCDwarfDirs.size(); DirIndex++) {
+ if (Directory == MCDwarfDirs[DirIndex])
+ break;
+ }
+ if (DirIndex >= MCDwarfDirs.size()) {
+ char *Buf = static_cast<char *>(Allocate(Directory.size()));
+ memcpy(Buf, Directory.data(), Directory.size());
+ MCDwarfDirs.push_back(StringRef(Buf, Directory.size()));
+ }
+ // The DirIndex is one based, as DirIndex of 0 is used for FileNames with
+ // no directories. MCDwarfDirs[] is unlike MCDwarfFiles[] in that the
+ // directory names are stored at MCDwarfDirs[DirIndex-1] where FileNames are
+ // stored at MCDwarfFiles[FileNumber].Name .
+ DirIndex++;
+ }
+
+ // Now make the MCDwarfFile entry and place it in the slot in the MCDwarfFiles
+ // vector.
+ char *Buf = static_cast<char *>(Allocate(Name.size()));
+ memcpy(Buf, Name.data(), Name.size());
+ File = new (*this) MCDwarfFile(StringRef(Buf, Name.size()), DirIndex);
+
+ // return the allocated FileNumber.
+ return FileNumber;
+}
+
+/// ValidateDwarfFileNumber - takes a dwarf file number and returns true if it
+/// currently is assigned and false otherwise.
+bool MCContext::ValidateDwarfFileNumber(unsigned FileNumber) {
+ if(FileNumber == 0 || FileNumber >= MCDwarfFiles.size())
+ return false;
+
+ MCDwarfFile *&ExistingFile = MCDwarfFiles[FileNumber];
+ if (ExistingFile)
+ return true;
+ else
+ return false;
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCDwarf.cpp b/libclamav/c++/llvm/lib/MC/MCDwarf.cpp
new file mode 100644
index 0000000..2da71f9
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/MCDwarf.cpp
@@ -0,0 +1,21 @@
+//===- lib/MC/MCDwarf.cpp - MCDwarf implementation ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+void MCDwarfFile::print(raw_ostream &OS) const {
+ OS << '"' << getName() << '"';
+}
+
+void MCDwarfFile::dump() const {
+ print(dbgs());
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCELFStreamer.cpp b/libclamav/c++/llvm/lib/MC/MCELFStreamer.cpp
new file mode 100644
index 0000000..570c391
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/MCELFStreamer.cpp
@@ -0,0 +1,408 @@
+//===- lib/MC/MCELFStreamer.cpp - ELF Object Output ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file assembles .s files and emits ELF .o object files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCStreamer.h"
+
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCELFSymbolFlags.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectStreamer.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetAsmBackend.h"
+
+using namespace llvm;
+
+namespace {
+
+class MCELFStreamer : public MCObjectStreamer {
+ void EmitInstToFragment(const MCInst &Inst);
+ void EmitInstToData(const MCInst &Inst);
+public:
+ MCELFStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter)
+ : MCObjectStreamer(Context, TAB, OS, Emitter) {}
+
+ ~MCELFStreamer() {}
+
+ /// @name MCStreamer Interface
+ /// @{
+
+ virtual void EmitLabel(MCSymbol *Symbol);
+ virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
+ virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
+ assert(0 && "ELF doesn't support this directive");
+ }
+ virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment);
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
+ assert(0 && "ELF doesn't support this directive");
+ }
+
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
+ assert(0 && "ELF doesn't support this directive");
+ }
+
+ virtual void EmitCOFFSymbolType(int Type) {
+ assert(0 && "ELF doesn't support this directive");
+ }
+
+ virtual void EndCOFFSymbolDef() {
+ assert(0 && "ELF doesn't support this directive");
+ }
+
+ virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+ SD.setSize(Value);
+ }
+
+ virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) {
+ assert(0 && "ELF doesn't support this directive");
+ }
+ virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
+ unsigned Size = 0, unsigned ByteAlignment = 0) {
+ assert(0 && "ELF doesn't support this directive");
+ }
+ virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0) {
+ assert(0 && "ELF doesn't support this directive");
+ }
+ virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
+ virtual void EmitValue(const MCExpr *Value, unsigned Size,unsigned AddrSpace);
+ virtual void EmitGPRel32Value(const MCExpr *Value) {
+ assert(0 && "ELF doesn't support this directive");
+ }
+ virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0);
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0);
+ virtual void EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value = 0);
+
+ virtual void EmitFileDirective(StringRef Filename);
+ virtual void EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
+ DEBUG(dbgs() << "FIXME: MCELFStreamer:EmitDwarfFileDirective not implemented\n");
+ }
+
+ virtual void EmitInstruction(const MCInst &Inst);
+ virtual void Finish();
+
+ /// @}
+};
+
+} // end anonymous namespace.
+
+void MCELFStreamer::EmitLabel(MCSymbol *Symbol) {
+ assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
+
+ // FIXME: This is wasteful, we don't necessarily need to create a data
+ // fragment. Instead, we should mark the symbol as pointing into the data
+ // fragment if it exists, otherwise we should just queue the label and set its
+ // fragment pointer when we emit the next fragment.
+ MCDataFragment *F = getOrCreateDataFragment();
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+ assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
+ SD.setFragment(F);
+ SD.setOffset(F->getContents().size());
+
+ Symbol->setSection(*CurSection);
+}
+
+void MCELFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
+ switch (Flag) {
+ case MCAF_SubsectionsViaSymbols:
+ getAssembler().setSubsectionsViaSymbols(true);
+ return;
+ }
+
+ assert(0 && "invalid assembler flag!");
+}
+
+void MCELFStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ // FIXME: Lift context changes into super class.
+ getAssembler().getOrCreateSymbolData(*Symbol);
+ Symbol->setVariableValue(AddValueSymbols(Value));
+}
+
+void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
+ MCSymbolAttr Attribute) {
+ // Indirect symbols are handled differently, to match how 'as' handles
+ // them. This makes writing matching .o files easier.
+ if (Attribute == MCSA_IndirectSymbol) {
+ // Note that we intentionally cannot use the symbol data here; this is
+ // important for matching the string table that 'as' generates.
+ IndirectSymbolData ISD;
+ ISD.Symbol = Symbol;
+ ISD.SectionData = getCurrentSectionData();
+ getAssembler().getIndirectSymbols().push_back(ISD);
+ return;
+ }
+
+ // Adding a symbol attribute always introduces the symbol, note that an
+ // important side effect of calling getOrCreateSymbolData here is to register
+ // the symbol with the assembler.
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+
+ // The implementation of symbol attributes is designed to match 'as', but it
+ // leaves much to desired. It doesn't really make sense to arbitrarily add and
+ // remove flags, but 'as' allows this (in particular, see .desc).
+ //
+ // In the future it might be worth trying to make these operations more well
+ // defined.
+ switch (Attribute) {
+ case MCSA_LazyReference:
+ case MCSA_Reference:
+ case MCSA_NoDeadStrip:
+ case MCSA_PrivateExtern:
+ case MCSA_WeakDefinition:
+ case MCSA_WeakDefAutoPrivate:
+ case MCSA_Invalid:
+ case MCSA_ELF_TypeIndFunction:
+ case MCSA_IndirectSymbol:
+ assert(0 && "Invalid symbol attribute for ELF!");
+ break;
+
+ case MCSA_Global:
+ SD.setFlags(SD.getFlags() | ELF_STB_Global);
+ SD.setExternal(true);
+ break;
+
+ case MCSA_WeakReference:
+ case MCSA_Weak:
+ SD.setFlags(SD.getFlags() | ELF_STB_Weak);
+ break;
+
+ case MCSA_Local:
+ SD.setFlags(SD.getFlags() | ELF_STB_Local);
+ break;
+
+ case MCSA_ELF_TypeFunction:
+ SD.setFlags(SD.getFlags() | ELF_STT_Func);
+ break;
+
+ case MCSA_ELF_TypeObject:
+ SD.setFlags(SD.getFlags() | ELF_STT_Object);
+ break;
+
+ case MCSA_ELF_TypeTLS:
+ SD.setFlags(SD.getFlags() | ELF_STT_Tls);
+ break;
+
+ case MCSA_ELF_TypeCommon:
+ SD.setFlags(SD.getFlags() | ELF_STT_Common);
+ break;
+
+ case MCSA_ELF_TypeNoType:
+ SD.setFlags(SD.getFlags() | ELF_STT_Notype);
+ break;
+
+ case MCSA_Protected:
+ SD.setFlags(SD.getFlags() | ELF_STV_Protected);
+ break;
+
+ case MCSA_Hidden:
+ SD.setFlags(SD.getFlags() | ELF_STV_Hidden);
+ break;
+
+ case MCSA_Internal:
+ SD.setFlags(SD.getFlags() | ELF_STV_Internal);
+ break;
+ }
+}
+
+void MCELFStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+
+ if ((SD.getFlags() & (0xf << ELF_STB_Shift)) == ELF_STB_Local) {
+ const MCSection *Section = getAssembler().getContext().getELFSection(".bss",
+ MCSectionELF::SHT_NOBITS,
+ MCSectionELF::SHF_WRITE |
+ MCSectionELF::SHF_ALLOC,
+ SectionKind::getBSS());
+
+ MCSectionData &SectData = getAssembler().getOrCreateSectionData(*Section);
+ MCFragment *F = new MCFillFragment(0, 0, Size, &SectData);
+ SD.setFragment(F);
+ Symbol->setSection(*Section);
+ SD.setSize(MCConstantExpr::Create(Size, getContext()));
+ }
+
+ SD.setFlags(SD.getFlags() | ELF_STB_Global);
+ SD.setExternal(true);
+
+ SD.setCommon(Size, ByteAlignment);
+}
+
+void MCELFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
+}
+
+void MCELFStreamer::EmitValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ MCDataFragment *DF = getOrCreateDataFragment();
+
+ // Avoid fixups when possible.
+ int64_t AbsValue;
+ if (AddValueSymbols(Value)->EvaluateAsAbsolute(AbsValue)) {
+ // FIXME: Endianness assumption.
+ for (unsigned i = 0; i != Size; ++i)
+ DF->getContents().push_back(uint8_t(AbsValue >> (i * 8)));
+ } else {
+ DF->addFixup(MCFixup::Create(DF->getContents().size(), AddValueSymbols(Value),
+ MCFixup::getKindForSize(Size)));
+ DF->getContents().resize(DF->getContents().size() + Size, 0);
+ }
+}
+
+void MCELFStreamer::EmitValueToAlignment(unsigned ByteAlignment,
+ int64_t Value, unsigned ValueSize,
+ unsigned MaxBytesToEmit) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ if (MaxBytesToEmit == 0)
+ MaxBytesToEmit = ByteAlignment;
+ new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
+ getCurrentSectionData());
+
+ // Update the maximum alignment on the current section if necessary.
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
+}
+
+void MCELFStreamer::EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ if (MaxBytesToEmit == 0)
+ MaxBytesToEmit = ByteAlignment;
+ MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit,
+ getCurrentSectionData());
+ F->setEmitNops(true);
+
+ // Update the maximum alignment on the current section if necessary.
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
+}
+
+void MCELFStreamer::EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value) {
+ // TODO: This is exactly the same as MCMachOStreamer. Consider merging into
+ // MCObjectStreamer.
+ new MCOrgFragment(*Offset, Value, getCurrentSectionData());
+}
+
+// Add a symbol for the file name of this module. This is the second
+// entry in the module's symbol table (the first being the null symbol).
+void MCELFStreamer::EmitFileDirective(StringRef Filename) {
+ MCSymbol *Symbol = getAssembler().getContext().GetOrCreateSymbol(Filename);
+ Symbol->setSection(*CurSection);
+ Symbol->setAbsolute();
+
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+
+ SD.setFlags(ELF_STT_File | ELF_STB_Local | ELF_STV_Default);
+}
+
+void MCELFStreamer::EmitInstToFragment(const MCInst &Inst) {
+ MCInstFragment *IF = new MCInstFragment(Inst, getCurrentSectionData());
+
+ // Add the fixups and data.
+ //
+ // FIXME: Revisit this design decision when relaxation is done, we may be
+ // able to get away with not storing any extra data in the MCInst.
+ SmallVector<MCFixup, 4> Fixups;
+ SmallString<256> Code;
+ raw_svector_ostream VecOS(Code);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ VecOS.flush();
+
+ IF->getCode() = Code;
+ IF->getFixups() = Fixups;
+}
+
+void MCELFStreamer::EmitInstToData(const MCInst &Inst) {
+ MCDataFragment *DF = getOrCreateDataFragment();
+
+ SmallVector<MCFixup, 4> Fixups;
+ SmallString<256> Code;
+ raw_svector_ostream VecOS(Code);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ VecOS.flush();
+
+ // Add the fixups and data.
+ for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
+ Fixups[i].setOffset(Fixups[i].getOffset() + DF->getContents().size());
+ DF->addFixup(Fixups[i]);
+ }
+ DF->getContents().append(Code.begin(), Code.end());
+}
+
+void MCELFStreamer::EmitInstruction(const MCInst &Inst) {
+ // Scan for values.
+ for (unsigned i = 0; i != Inst.getNumOperands(); ++i)
+ if (Inst.getOperand(i).isExpr())
+ AddValueSymbols(Inst.getOperand(i).getExpr());
+
+ getCurrentSectionData()->setHasInstructions(true);
+
+ // If this instruction doesn't need relaxation, just emit it as data.
+ if (!getAssembler().getBackend().MayNeedRelaxation(Inst)) {
+ EmitInstToData(Inst);
+ return;
+ }
+
+ // Otherwise, if we are relaxing everything, relax the instruction as much as
+ // possible and emit it as data.
+ if (getAssembler().getRelaxAll()) {
+ MCInst Relaxed;
+ getAssembler().getBackend().RelaxInstruction(Inst, Relaxed);
+ while (getAssembler().getBackend().MayNeedRelaxation(Relaxed))
+ getAssembler().getBackend().RelaxInstruction(Relaxed, Relaxed);
+ EmitInstToData(Relaxed);
+ return;
+ }
+
+ // Otherwise emit to a separate fragment.
+ EmitInstToFragment(Inst);
+}
+
+void MCELFStreamer::Finish() {
+ getAssembler().Finish();
+}
+
+MCStreamer *llvm::createELFStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *CE,
+ bool RelaxAll) {
+ MCELFStreamer *S = new MCELFStreamer(Context, TAB, OS, CE);
+ if (RelaxAll)
+ S->getAssembler().setRelaxAll(true);
+ return S;
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCExpr.cpp b/libclamav/c++/llvm/lib/MC/MCExpr.cpp
index e419043..343f334 100644
--- a/libclamav/c++/llvm/lib/MC/MCExpr.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCExpr.cpp
@@ -7,14 +7,26 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "mcexpr"
#include "llvm/MC/MCExpr.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetAsmBackend.h"
using namespace llvm;
+namespace {
+namespace stats {
+STATISTIC(MCExprEvaluate, "Number of MCExpr evaluations");
+}
+}
+
void MCExpr::print(raw_ostream &OS) const {
switch (getKind()) {
case MCExpr::Target:
@@ -24,14 +36,25 @@ void MCExpr::print(raw_ostream &OS) const {
return;
case MCExpr::SymbolRef: {
- const MCSymbol &Sym = cast<MCSymbolRefExpr>(*this).getSymbol();
-
+ const MCSymbolRefExpr &SRE = cast<MCSymbolRefExpr>(*this);
+ const MCSymbol &Sym = SRE.getSymbol();
+
+ if (SRE.getKind() == MCSymbolRefExpr::VK_ARM_HI16 ||
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_LO16)
+ OS << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
+
// Parenthesize names that start with $ so that they don't look like
// absolute names.
if (Sym.getName()[0] == '$')
OS << '(' << Sym << ')';
else
OS << Sym;
+
+ if (SRE.getKind() != MCSymbolRefExpr::VK_None &&
+ SRE.getKind() != MCSymbolRefExpr::VK_ARM_HI16 &&
+ SRE.getKind() != MCSymbolRefExpr::VK_ARM_LO16)
+ OS << '@' << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
+
return;
}
@@ -50,14 +73,14 @@ void MCExpr::print(raw_ostream &OS) const {
case MCExpr::Binary: {
const MCBinaryExpr &BE = cast<MCBinaryExpr>(*this);
-
+
// Only print parens around the LHS if it is non-trivial.
if (isa<MCConstantExpr>(BE.getLHS()) || isa<MCSymbolRefExpr>(BE.getLHS())) {
OS << *BE.getLHS();
} else {
OS << '(' << *BE.getLHS() << ')';
}
-
+
switch (BE.getOpcode()) {
default: assert(0 && "Invalid opcode!");
case MCBinaryExpr::Add:
@@ -68,7 +91,7 @@ void MCExpr::print(raw_ostream &OS) const {
return;
}
}
-
+
OS << '+';
break;
case MCBinaryExpr::And: OS << '&'; break;
@@ -89,7 +112,7 @@ void MCExpr::print(raw_ostream &OS) const {
case MCBinaryExpr::Sub: OS << '-'; break;
case MCBinaryExpr::Xor: OS << '^'; break;
}
-
+
// Only print parens around the LHS if it is non-trivial.
if (isa<MCConstantExpr>(BE.getRHS()) || isa<MCSymbolRefExpr>(BE.getRHS())) {
OS << *BE.getRHS();
@@ -124,39 +147,88 @@ const MCConstantExpr *MCConstantExpr::Create(int64_t Value, MCContext &Ctx) {
return new (Ctx) MCConstantExpr(Value);
}
+/* *** */
+
const MCSymbolRefExpr *MCSymbolRefExpr::Create(const MCSymbol *Sym,
+ VariantKind Kind,
MCContext &Ctx) {
- return new (Ctx) MCSymbolRefExpr(Sym);
+ return new (Ctx) MCSymbolRefExpr(Sym, Kind);
}
-const MCSymbolRefExpr *MCSymbolRefExpr::Create(StringRef Name, MCContext &Ctx) {
- return Create(Ctx.GetOrCreateSymbol(Name), Ctx);
+const MCSymbolRefExpr *MCSymbolRefExpr::Create(StringRef Name, VariantKind Kind,
+ MCContext &Ctx) {
+ return Create(Ctx.GetOrCreateSymbol(Name), Kind, Ctx);
+}
+
+StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) {
+ switch (Kind) {
+ default:
+ case VK_Invalid: return "<<invalid>>";
+ case VK_None: return "<<none>>";
+
+ case VK_GOT: return "GOT";
+ case VK_GOTOFF: return "GOTOFF";
+ case VK_GOTPCREL: return "GOTPCREL";
+ case VK_GOTTPOFF: return "GOTTPOFF";
+ case VK_INDNTPOFF: return "INDNTPOFF";
+ case VK_NTPOFF: return "NTPOFF";
+ case VK_PLT: return "PLT";
+ case VK_TLSGD: return "TLSGD";
+ case VK_TPOFF: return "TPOFF";
+ case VK_ARM_HI16: return ":upper16:";
+ case VK_ARM_LO16: return ":lower16:";
+ case VK_TLVP: return "TLVP";
+ }
}
+MCSymbolRefExpr::VariantKind
+MCSymbolRefExpr::getVariantKindForName(StringRef Name) {
+ return StringSwitch<VariantKind>(Name)
+ .Case("GOT", VK_GOT)
+ .Case("GOTOFF", VK_GOTOFF)
+ .Case("GOTPCREL", VK_GOTPCREL)
+ .Case("GOTTPOFF", VK_GOTTPOFF)
+ .Case("INDNTPOFF", VK_INDNTPOFF)
+ .Case("NTPOFF", VK_NTPOFF)
+ .Case("PLT", VK_PLT)
+ .Case("TLSGD", VK_TLSGD)
+ .Case("TPOFF", VK_TPOFF)
+ .Case("TLVP", VK_TLVP)
+ .Default(VK_Invalid);
+}
+
+/* *** */
+
void MCTargetExpr::Anchor() {}
/* *** */
-bool MCExpr::EvaluateAsAbsolute(int64_t &Res) const {
+bool MCExpr::EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout *Layout) const {
MCValue Value;
-
- if (!EvaluateAsRelocatable(Value) || !Value.isAbsolute())
+
+ // Fast path constants.
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(this)) {
+ Res = CE->getValue();
+ return true;
+ }
+
+ if (!EvaluateAsRelocatable(Value, Layout) || !Value.isAbsolute())
return false;
Res = Value.getConstant();
return true;
}
-static bool EvaluateSymbolicAdd(const MCValue &LHS, const MCSymbol *RHS_A,
- const MCSymbol *RHS_B, int64_t RHS_Cst,
+static bool EvaluateSymbolicAdd(const MCValue &LHS,const MCSymbolRefExpr *RHS_A,
+ const MCSymbolRefExpr *RHS_B, int64_t RHS_Cst,
MCValue &Res) {
// We can't add or subtract two symbols.
if ((LHS.getSymA() && RHS_A) ||
(LHS.getSymB() && RHS_B))
return false;
- const MCSymbol *A = LHS.getSymA() ? LHS.getSymA() : RHS_A;
- const MCSymbol *B = LHS.getSymB() ? LHS.getSymB() : RHS_B;
+ const MCSymbolRefExpr *A = LHS.getSymA() ? LHS.getSymA() : RHS_A;
+ const MCSymbolRefExpr *B = LHS.getSymB() ? LHS.getSymB() : RHS_B;
if (B) {
// If we have a negated symbol, then we must have also have a non-negated
// symbol in order to encode the expression. We can do this check later to
@@ -169,23 +241,46 @@ static bool EvaluateSymbolicAdd(const MCValue &LHS, const MCSymbol *RHS_A,
return true;
}
-bool MCExpr::EvaluateAsRelocatable(MCValue &Res) const {
+bool MCExpr::EvaluateAsRelocatable(MCValue &Res,
+ const MCAsmLayout *Layout) const {
+ ++stats::MCExprEvaluate;
+
switch (getKind()) {
case Target:
- return cast<MCTargetExpr>(this)->EvaluateAsRelocatableImpl(Res);
-
+ return cast<MCTargetExpr>(this)->EvaluateAsRelocatableImpl(Res, Layout);
+
case Constant:
Res = MCValue::get(cast<MCConstantExpr>(this)->getValue());
return true;
case SymbolRef: {
- const MCSymbol &Sym = cast<MCSymbolRefExpr>(this)->getSymbol();
+ const MCSymbolRefExpr *SRE = cast<MCSymbolRefExpr>(this);
+ const MCSymbol &Sym = SRE->getSymbol();
// Evaluate recursively if this is a variable.
- if (Sym.isVariable())
- return Sym.getValue()->EvaluateAsRelocatable(Res);
+ if (Sym.isVariable()) {
+ if (!Sym.getVariableValue()->EvaluateAsRelocatable(Res, Layout))
+ return false;
+
+ // Absolutize symbol differences between defined symbols when we have a
+ // layout object and the target requests it.
+ if (Layout && Res.getSymB() &&
+ Layout->getAssembler().getBackend().hasAbsolutizedSet() &&
+ Res.getSymA()->getSymbol().isDefined() &&
+ Res.getSymB()->getSymbol().isDefined()) {
+ MCSymbolData &A =
+ Layout->getAssembler().getSymbolData(Res.getSymA()->getSymbol());
+ MCSymbolData &B =
+ Layout->getAssembler().getSymbolData(Res.getSymB()->getSymbol());
+ Res = MCValue::get(+ Layout->getSymbolAddress(&A)
+ - Layout->getSymbolAddress(&B)
+ + Res.getConstant());
+ }
+
+ return true;
+ }
- Res = MCValue::get(&Sym, 0, 0);
+ Res = MCValue::get(SRE, 0, 0);
return true;
}
@@ -193,7 +288,7 @@ bool MCExpr::EvaluateAsRelocatable(MCValue &Res) const {
const MCUnaryExpr *AUE = cast<MCUnaryExpr>(this);
MCValue Value;
- if (!AUE->getSubExpr()->EvaluateAsRelocatable(Value))
+ if (!AUE->getSubExpr()->EvaluateAsRelocatable(Value, Layout))
return false;
switch (AUE->getOpcode()) {
@@ -206,13 +301,13 @@ bool MCExpr::EvaluateAsRelocatable(MCValue &Res) const {
/// -(a - b + const) ==> (b - a - const)
if (Value.getSymA() && !Value.getSymB())
return false;
- Res = MCValue::get(Value.getSymB(), Value.getSymA(),
- -Value.getConstant());
+ Res = MCValue::get(Value.getSymB(), Value.getSymA(),
+ -Value.getConstant());
break;
case MCUnaryExpr::Not:
if (!Value.isAbsolute())
return false;
- Res = MCValue::get(~Value.getConstant());
+ Res = MCValue::get(~Value.getConstant());
break;
case MCUnaryExpr::Plus:
Res = Value;
@@ -225,9 +320,9 @@ bool MCExpr::EvaluateAsRelocatable(MCValue &Res) const {
case Binary: {
const MCBinaryExpr *ABE = cast<MCBinaryExpr>(this);
MCValue LHSValue, RHSValue;
-
- if (!ABE->getLHS()->EvaluateAsRelocatable(LHSValue) ||
- !ABE->getRHS()->EvaluateAsRelocatable(RHSValue))
+
+ if (!ABE->getLHS()->EvaluateAsRelocatable(LHSValue, Layout) ||
+ !ABE->getRHS()->EvaluateAsRelocatable(RHSValue, Layout))
return false;
// We only support a few operations on non-constant expressions, handle
diff --git a/libclamav/c++/llvm/lib/MC/MCInst.cpp b/libclamav/c++/llvm/lib/MC/MCInst.cpp
index 0634c9f..4cb628b 100644
--- a/libclamav/c++/llvm/lib/MC/MCInst.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCInst.cpp
@@ -9,6 +9,7 @@
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInstPrinter.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -43,6 +44,22 @@ void MCInst::print(raw_ostream &OS, const MCAsmInfo *MAI) const {
OS << ">";
}
+void MCInst::dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI,
+ const MCInstPrinter *Printer,
+ StringRef Separator) const {
+ OS << "<MCInst #" << getOpcode();
+
+ // Show the instruction opcode name if we have access to a printer.
+ if (Printer)
+ OS << ' ' << Printer->getOpcodeName(getOpcode());
+
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ OS << Separator;
+ getOperand(i).print(OS, MAI);
+ }
+ OS << ">";
+}
+
void MCInst::dump() const {
print(dbgs(), 0);
dbgs() << "\n";
diff --git a/libclamav/c++/llvm/lib/MC/MCLabel.cpp b/libclamav/c++/llvm/lib/MC/MCLabel.cpp
new file mode 100644
index 0000000..9c0fc92
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/MCLabel.cpp
@@ -0,0 +1,21 @@
+//===- lib/MC/MCLabel.cpp - MCLabel implementation ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCLabel.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+void MCLabel::print(raw_ostream &OS) const {
+ OS << '"' << getInstance() << '"';
+}
+
+void MCLabel::dump() const {
+ print(dbgs());
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCLoggingStreamer.cpp b/libclamav/c++/llvm/lib/MC/MCLoggingStreamer.cpp
new file mode 100644
index 0000000..b96040a
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/MCLoggingStreamer.cpp
@@ -0,0 +1,208 @@
+//===- lib/MC/MCLoggingStreamer.cpp - API Logging Streamer ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+namespace {
+
+class MCLoggingStreamer : public MCStreamer {
+ llvm::OwningPtr<MCStreamer> Child;
+
+ raw_ostream &OS;
+
+public:
+ MCLoggingStreamer(MCStreamer *_Child, raw_ostream &_OS)
+ : MCStreamer(_Child->getContext()), Child(_Child), OS(_OS) {}
+
+ void LogCall(const char *Function) {
+ OS << Function << "\n";
+ }
+
+ void LogCall(const char *Function, const Twine &Message) {
+ OS << Function << ": " << Message << "\n";
+ }
+
+ virtual bool isVerboseAsm() const { return Child->isVerboseAsm(); }
+
+ virtual bool hasRawTextSupport() const { return Child->hasRawTextSupport(); }
+
+ virtual raw_ostream &GetCommentOS() { return Child->GetCommentOS(); }
+
+ virtual void AddComment(const Twine &T) {
+ LogCall("AddComment", T);
+ return Child->AddComment(T);
+ }
+
+ virtual void AddBlankLine() {
+ LogCall("AddBlankLine");
+ return Child->AddBlankLine();
+ }
+
+ virtual void SwitchSection(const MCSection *Section) {
+ CurSection = Section;
+ LogCall("SwitchSection");
+ return Child->SwitchSection(Section);
+ }
+
+ virtual void EmitLabel(MCSymbol *Symbol) {
+ LogCall("EmitLabel");
+ return Child->EmitLabel(Symbol);
+ }
+
+ virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {
+ LogCall("EmitAssemblerFlag");
+ return Child->EmitAssemblerFlag(Flag);
+ }
+
+ virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
+ LogCall("EmitAssignment");
+ return Child->EmitAssignment(Symbol, Value);
+ }
+
+ virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) {
+ LogCall("EmitSymbolAttribute");
+ return Child->EmitSymbolAttribute(Symbol, Attribute);
+ }
+
+ virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
+ LogCall("EmitSymbolDesc");
+ return Child->EmitSymbolDesc(Symbol, DescValue);
+ }
+
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
+ LogCall("BeginCOFFSymbolDef");
+ return Child->BeginCOFFSymbolDef(Symbol);
+ }
+
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
+ LogCall("EmitCOFFSymbolStorageClass");
+ return Child->EmitCOFFSymbolStorageClass(StorageClass);
+ }
+
+ virtual void EmitCOFFSymbolType(int Type) {
+ LogCall("EmitCOFFSymbolType");
+ return Child->EmitCOFFSymbolType(Type);
+ }
+
+ virtual void EndCOFFSymbolDef() {
+ LogCall("EndCOFFSymbolDef");
+ return Child->EndCOFFSymbolDef();
+ }
+
+ virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
+ LogCall("EmitELFSize");
+ return Child->EmitELFSize(Symbol, Value);
+ }
+
+ virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
+ LogCall("EmitCommonSymbol");
+ return Child->EmitCommonSymbol(Symbol, Size, ByteAlignment);
+ }
+
+ virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) {
+ LogCall("EmitLocalCommonSymbol");
+ return Child->EmitLocalCommonSymbol(Symbol, Size);
+ }
+
+ virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
+ unsigned Size = 0, unsigned ByteAlignment = 0) {
+ LogCall("EmitZerofill");
+ return Child->EmitZerofill(Section, Symbol, Size, ByteAlignment);
+ }
+
+ virtual void EmitTBSSSymbol (const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0) {
+ LogCall("EmitTBSSSymbol");
+ return Child->EmitTBSSSymbol(Section, Symbol, Size, ByteAlignment);
+ }
+
+ virtual void EmitBytes(StringRef Data, unsigned AddrSpace) {
+ LogCall("EmitBytes");
+ return Child->EmitBytes(Data, AddrSpace);
+ }
+
+ virtual void EmitValue(const MCExpr *Value, unsigned Size,unsigned AddrSpace){
+ LogCall("EmitValue");
+ return Child->EmitValue(Value, Size, AddrSpace);
+ }
+
+ virtual void EmitIntValue(uint64_t Value, unsigned Size, unsigned AddrSpace) {
+ LogCall("EmitIntValue");
+ return Child->EmitIntValue(Value, Size, AddrSpace);
+ }
+
+ virtual void EmitGPRel32Value(const MCExpr *Value) {
+ LogCall("EmitGPRel32Value");
+ return Child->EmitGPRel32Value(Value);
+ }
+
+ virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue,
+ unsigned AddrSpace) {
+ LogCall("EmitFill");
+ return Child->EmitFill(NumBytes, FillValue, AddrSpace);
+ }
+
+ virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0) {
+ LogCall("EmitValueToAlignment");
+ return Child->EmitValueToAlignment(ByteAlignment, Value,
+ ValueSize, MaxBytesToEmit);
+ }
+
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0) {
+ LogCall("EmitCodeAlignment");
+ return Child->EmitCodeAlignment(ByteAlignment, MaxBytesToEmit);
+ }
+
+ virtual void EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value = 0) {
+ LogCall("EmitValueToOffset");
+ return Child->EmitValueToOffset(Offset, Value);
+ }
+
+ virtual void EmitFileDirective(StringRef Filename) {
+ LogCall("EmitFileDirective", "FileName:" + Filename);
+ return Child->EmitFileDirective(Filename);
+ }
+
+ virtual void EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
+ LogCall("EmitDwarfFileDirective",
+ "FileNo:" + Twine(FileNo) + " Filename:" + Filename);
+ return Child->EmitDwarfFileDirective(FileNo, Filename);
+ }
+
+ virtual void EmitInstruction(const MCInst &Inst) {
+ LogCall("EmitInstruction");
+ return Child->EmitInstruction(Inst);
+ }
+
+ virtual void EmitRawText(StringRef String) {
+ LogCall("EmitRawText", "\"" + String + "\"");
+ return Child->EmitRawText(String);
+ }
+
+ virtual void Finish() {
+ LogCall("Finish");
+ return Child->Finish();
+ }
+
+};
+
+} // end anonymous namespace.
+
+MCStreamer *llvm::createLoggingStreamer(MCStreamer *Child, raw_ostream &OS) {
+ return new MCLoggingStreamer(Child, OS);
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCMachOStreamer.cpp b/libclamav/c++/llvm/lib/MC/MCMachOStreamer.cpp
index a7a8a5d..671874d 100644
--- a/libclamav/c++/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCMachOStreamer.cpp
@@ -14,106 +14,38 @@
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCMachOSymbolFlags.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCDwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetAsmBackend.h"
+
using namespace llvm;
namespace {
-class MCMachOStreamer : public MCStreamer {
- /// SymbolFlags - We store the value for the 'desc' symbol field in the lowest
- /// 16 bits of the implementation defined flags.
- enum SymbolFlags { // See <mach-o/nlist.h>.
- SF_DescFlagsMask = 0xFFFF,
-
- // Reference type flags.
- SF_ReferenceTypeMask = 0x0007,
- SF_ReferenceTypeUndefinedNonLazy = 0x0000,
- SF_ReferenceTypeUndefinedLazy = 0x0001,
- SF_ReferenceTypeDefined = 0x0002,
- SF_ReferenceTypePrivateDefined = 0x0003,
- SF_ReferenceTypePrivateUndefinedNonLazy = 0x0004,
- SF_ReferenceTypePrivateUndefinedLazy = 0x0005,
-
- // Other 'desc' flags.
- SF_NoDeadStrip = 0x0020,
- SF_WeakReference = 0x0040,
- SF_WeakDefinition = 0x0080
- };
-
-private:
- MCAssembler Assembler;
- MCCodeEmitter *Emitter;
- MCSectionData *CurSectionData;
- DenseMap<const MCSection*, MCSectionData*> SectionMap;
- DenseMap<const MCSymbol*, MCSymbolData*> SymbolMap;
-
+class MCMachOStreamer : public MCObjectStreamer {
private:
- MCFragment *getCurrentFragment() const {
- assert(CurSectionData && "No current section!");
-
- if (!CurSectionData->empty())
- return &CurSectionData->getFragmentList().back();
-
- return 0;
- }
-
- MCSectionData &getSectionData(const MCSection &Section) {
- MCSectionData *&Entry = SectionMap[&Section];
-
- if (!Entry)
- Entry = new MCSectionData(Section, &Assembler);
-
- return *Entry;
- }
-
- MCSymbolData &getSymbolData(const MCSymbol &Symbol) {
- MCSymbolData *&Entry = SymbolMap[&Symbol];
-
- if (!Entry)
- Entry = new MCSymbolData(Symbol, 0, 0, &Assembler);
-
- return *Entry;
- }
+ void EmitInstToFragment(const MCInst &Inst);
+ void EmitInstToData(const MCInst &Inst);
+ // FIXME: These will likely moved to a better place.
+ void MakeLineEntryForSection(const MCSection *Section);
+ const MCExpr * MakeStartMinusEndExpr(MCSymbol *Start, MCSymbol *End,
+ int IntVal);
+ void EmitDwarfFileTable(void);
public:
- MCMachOStreamer(MCContext &Context, raw_ostream &_OS, MCCodeEmitter *_Emitter)
- : MCStreamer(Context), Assembler(Context, _OS), Emitter(_Emitter),
- CurSectionData(0) {}
- ~MCMachOStreamer() {}
-
- const MCExpr *AddValueSymbols(const MCExpr *Value) {
- switch (Value->getKind()) {
- case MCExpr::Target: assert(0 && "Can't handle target exprs yet!");
- case MCExpr::Constant:
- break;
-
- case MCExpr::Binary: {
- const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
- AddValueSymbols(BE->getLHS());
- AddValueSymbols(BE->getRHS());
- break;
- }
-
- case MCExpr::SymbolRef:
- getSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
- break;
-
- case MCExpr::Unary:
- AddValueSymbols(cast<MCUnaryExpr>(Value)->getSubExpr());
- break;
- }
-
- return Value;
- }
+ MCMachOStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *Emitter)
+ : MCObjectStreamer(Context, TAB, OS, Emitter) {}
/// @name MCStreamer Interface
/// @{
- virtual void SwitchSection(const MCSection *Section);
virtual void EmitLabel(MCSymbol *Symbol);
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
@@ -121,6 +53,18 @@ public:
virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment);
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {
+ assert(0 && "macho doesn't support this directive");
+ }
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass) {
+ assert(0 && "macho doesn't support this directive");
+ }
+ virtual void EmitCOFFSymbolType(int Type) {
+ assert(0 && "macho doesn't support this directive");
+ }
+ virtual void EndCOFFSymbolDef() {
+ assert(0 && "macho doesn't support this directive");
+ }
virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
assert(0 && "macho doesn't support this directive");
}
@@ -129,6 +73,8 @@ public:
}
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
unsigned Size = 0, unsigned ByteAlignment = 0);
+ virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0);
virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
virtual void EmitValue(const MCExpr *Value, unsigned Size,unsigned AddrSpace);
virtual void EmitGPRel32Value(const MCExpr *Value) {
@@ -141,15 +87,22 @@ public:
unsigned MaxBytesToEmit = 0);
virtual void EmitValueToOffset(const MCExpr *Offset,
unsigned char Value = 0);
-
+
virtual void EmitFileDirective(StringRef Filename) {
- errs() << "FIXME: MCMachoStreamer:EmitFileDirective not implemented\n";
+ // FIXME: Just ignore the .file; it isn't important enough to fail the
+ // entire assembly.
+
+ //report_fatal_error("unsupported directive: '.file'");
}
virtual void EmitDwarfFileDirective(unsigned FileNo, StringRef Filename) {
- errs() << "FIXME: MCMachoStreamer:EmitDwarfFileDirective not implemented\n";
+ // FIXME: Just ignore the .file; it isn't important enough to fail the
+ // entire assembly.
+
+ //report_fatal_error("unsupported directive: '.file'");
}
-
+
virtual void EmitInstruction(const MCInst &Inst);
+
virtual void Finish();
/// @}
@@ -157,39 +110,45 @@ public:
} // end anonymous namespace.
-void MCMachOStreamer::SwitchSection(const MCSection *Section) {
- assert(Section && "Cannot switch to a null section!");
-
- // If already in this section, then this is a noop.
- if (Section == CurSection) return;
-
- CurSection = Section;
- CurSectionData = &getSectionData(*Section);
-}
-
void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
+ // TODO: This is almost exactly the same as WinCOFFStreamer. Consider merging
+ // into MCObjectStreamer.
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
+ assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
+ assert(CurSection && "Cannot emit before setting section!");
+
+ Symbol->setSection(*CurSection);
- // FIXME: We should also use offsets into Fill fragments.
- MCDataFragment *F = dyn_cast_or_null<MCDataFragment>(getCurrentFragment());
- if (!F)
- F = new MCDataFragment(CurSectionData);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
- MCSymbolData &SD = getSymbolData(*Symbol);
+ // We have to create a new fragment if this is an atom defining symbol,
+ // fragments cannot span atoms.
+ if (getAssembler().isSymbolLinkerVisible(SD.getSymbol()))
+ new MCDataFragment(getCurrentSectionData());
+
+ // FIXME: This is wasteful, we don't necessarily need to create a data
+ // fragment. Instead, we should mark the symbol as pointing into the data
+ // fragment if it exists, otherwise we should just queue the label and set its
+ // fragment pointer when we emit the next fragment.
+ MCDataFragment *F = getOrCreateDataFragment();
assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
SD.setFragment(F);
SD.setOffset(F->getContents().size());
- // This causes the reference type and weak reference flags to be cleared.
- SD.setFlags(SD.getFlags() & ~(SF_WeakReference | SF_ReferenceTypeMask));
-
- Symbol->setSection(*CurSection);
+ // This causes the reference type flag to be cleared. Darwin 'as' was "trying"
+ // to clear the weak reference and weak definition bits too, but the
+ // implementation was buggy. For now we just try to match 'as', for
+ // diffability.
+ //
+ // FIXME: Cleanup this code, these bits should be emitted based on semantic
+ // properties, not on the order of definition, etc.
+ SD.setFlags(SD.getFlags() & ~SF_ReferenceTypeMask);
}
void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
switch (Flag) {
case MCAF_SubsectionsViaSymbols:
- Assembler.setSubsectionsViaSymbols(true);
+ getAssembler().setSubsectionsViaSymbols(true);
return;
}
@@ -197,13 +156,11 @@ void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
}
void MCMachOStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
- // Only absolute symbols can be redefined.
- assert((Symbol->isUndefined() || Symbol->isAbsolute()) &&
- "Cannot define a symbol twice!");
-
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
// FIXME: Lift context changes into super class.
- // FIXME: Set associated section.
- Symbol->setValue(Value);
+ getAssembler().getOrCreateSymbolData(*Symbol);
+ Symbol->setVariableValue(AddValueSymbols(Value));
}
void MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
@@ -215,15 +172,15 @@ void MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
// important for matching the string table that 'as' generates.
IndirectSymbolData ISD;
ISD.Symbol = Symbol;
- ISD.SectionData = CurSectionData;
- Assembler.getIndirectSymbols().push_back(ISD);
+ ISD.SectionData = getCurrentSectionData();
+ getAssembler().getIndirectSymbols().push_back(ISD);
return;
}
// Adding a symbol attribute always introduces the symbol, note that an
- // important side effect of calling getSymbolData here is to register the
- // symbol with the assembler.
- MCSymbolData &SD = getSymbolData(*Symbol);
+ // important side effect of calling getOrCreateSymbolData here is to register
+ // the symbol with the assembler.
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
// The implementation of symbol attributes is designed to match 'as', but it
// leaves much to desired. It doesn't really make sense to arbitrarily add and
@@ -250,6 +207,13 @@ void MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_Global:
SD.setExternal(true);
+ // This effectively clears the undefined lazy bit, in Darwin 'as', although
+ // it isn't very consistent because it implements this as part of symbol
+ // lookup.
+ //
+ // FIXME: Cleanup this code, these bits should be emitted based on semantic
+ // properties, not on the order of definition, etc.
+ SD.setFlags(SD.getFlags() & ~SF_ReferenceTypeUndefinedLazy);
break;
case MCSA_LazyReference:
@@ -282,14 +246,19 @@ void MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
// it has to be in a coalesced section, but this isn't enforced.
SD.setFlags(SD.getFlags() | SF_WeakDefinition);
break;
+
+ case MCSA_WeakDefAutoPrivate:
+ SD.setFlags(SD.getFlags() | SF_WeakDefinition | SF_WeakReference);
+ break;
}
}
void MCMachOStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
// Encode the 'desc' value into the lowest implementation defined bits.
- assert(DescValue == (DescValue & SF_DescFlagsMask) &&
+ assert(DescValue == (DescValue & SF_DescFlagsMask) &&
"Invalid .desc value!");
- getSymbolData(*Symbol).setFlags(DescValue & SF_DescFlagsMask);
+ getAssembler().getOrCreateSymbolData(*Symbol).setFlags(
+ DescValue & SF_DescFlagsMask);
}
void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
@@ -297,14 +266,14 @@ void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
// FIXME: Darwin 'as' does appear to allow redef of a .comm by itself.
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- MCSymbolData &SD = getSymbolData(*Symbol);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
SD.setExternal(true);
SD.setCommon(Size, ByteAlignment);
}
void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
unsigned Size, unsigned ByteAlignment) {
- MCSectionData &SectData = getSectionData(*Section);
+ MCSectionData &SectData = getAssembler().getOrCreateSectionData(*Section);
// The symbol may not be present, which only creates the section.
if (!Symbol)
@@ -314,9 +283,13 @@ void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- MCSymbolData &SD = getSymbolData(*Symbol);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
- MCFragment *F = new MCZeroFillFragment(Size, ByteAlignment, &SectData);
+ // Emit an align fragment if necessary.
+ if (ByteAlignment != 1)
+ new MCAlignFragment(ByteAlignment, 0, 0, ByteAlignment, &SectData);
+
+ MCFragment *F = new MCFillFragment(0, 0, Size, &SectData);
SD.setFragment(F);
Symbol->setSection(*Section);
@@ -326,18 +299,25 @@ void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
SectData.setAlignment(ByteAlignment);
}
+// This should always be called with the thread local bss section. Like the
+// .zerofill directive this doesn't actually switch sections on us.
+void MCMachOStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment) {
+ EmitZerofill(Section, Symbol, Size, ByteAlignment);
+ return;
+}
+
void MCMachOStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
- MCDataFragment *DF = dyn_cast_or_null<MCDataFragment>(getCurrentFragment());
- if (!DF)
- DF = new MCDataFragment(CurSectionData);
- DF->getContents().append(Data.begin(), Data.end());
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
}
void MCMachOStreamer::EmitValue(const MCExpr *Value, unsigned Size,
unsigned AddrSpace) {
- MCDataFragment *DF = dyn_cast_or_null<MCDataFragment>(getCurrentFragment());
- if (!DF)
- DF = new MCDataFragment(CurSectionData);
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
+ MCDataFragment *DF = getOrCreateDataFragment();
// Avoid fixups when possible.
int64_t AbsValue;
@@ -346,9 +326,9 @@ void MCMachOStreamer::EmitValue(const MCExpr *Value, unsigned Size,
for (unsigned i = 0; i != Size; ++i)
DF->getContents().push_back(uint8_t(AbsValue >> (i * 8)));
} else {
- DF->getFixups().push_back(MCAsmFixup(DF->getContents().size(),
- *AddValueSymbols(Value),
- MCFixup::getKindForSize(Size)));
+ DF->addFixup(MCFixup::Create(DF->getContents().size(),
+ AddValueSymbols(Value),
+ MCFixup::getKindForSize(Size)));
DF->getContents().resize(DF->getContents().size() + Size, 0);
}
}
@@ -356,69 +336,343 @@ void MCMachOStreamer::EmitValue(const MCExpr *Value, unsigned Size,
void MCMachOStreamer::EmitValueToAlignment(unsigned ByteAlignment,
int64_t Value, unsigned ValueSize,
unsigned MaxBytesToEmit) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
if (MaxBytesToEmit == 0)
MaxBytesToEmit = ByteAlignment;
new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
- false /* EmitNops */, CurSectionData);
+ getCurrentSectionData());
// Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > CurSectionData->getAlignment())
- CurSectionData->setAlignment(ByteAlignment);
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
}
void MCMachOStreamer::EmitCodeAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit) {
+ // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into
+ // MCObjectStreamer.
if (MaxBytesToEmit == 0)
MaxBytesToEmit = ByteAlignment;
- // FIXME the 0x90 is the default x86 1 byte nop opcode.
- new MCAlignFragment(ByteAlignment, 0x90, 1, MaxBytesToEmit,
- true /* EmitNops */, CurSectionData);
+ MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit,
+ getCurrentSectionData());
+ F->setEmitNops(true);
// Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > CurSectionData->getAlignment())
- CurSectionData->setAlignment(ByteAlignment);
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
}
void MCMachOStreamer::EmitValueToOffset(const MCExpr *Offset,
unsigned char Value) {
- new MCOrgFragment(*Offset, Value, CurSectionData);
+ new MCOrgFragment(*Offset, Value, getCurrentSectionData());
}
-void MCMachOStreamer::EmitInstruction(const MCInst &Inst) {
- // Scan for values.
- for (unsigned i = 0; i != Inst.getNumOperands(); ++i)
- if (Inst.getOperand(i).isExpr())
- AddValueSymbols(Inst.getOperand(i).getExpr());
+void MCMachOStreamer::EmitInstToFragment(const MCInst &Inst) {
+ MCInstFragment *IF = new MCInstFragment(Inst, getCurrentSectionData());
- if (!Emitter)
- llvm_unreachable("no code emitter available!");
+ // Add the fixups and data.
+ //
+ // FIXME: Revisit this design decision when relaxation is done, we may be
+ // able to get away with not storing any extra data in the MCInst.
+ SmallVector<MCFixup, 4> Fixups;
+ SmallString<256> Code;
+ raw_svector_ostream VecOS(Code);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ VecOS.flush();
- CurSectionData->setHasInstructions(true);
+ IF->getCode() = Code;
+ IF->getFixups() = Fixups;
+}
+
+void MCMachOStreamer::EmitInstToData(const MCInst &Inst) {
+ MCDataFragment *DF = getOrCreateDataFragment();
SmallVector<MCFixup, 4> Fixups;
SmallString<256> Code;
raw_svector_ostream VecOS(Code);
- Emitter->EncodeInstruction(Inst, VecOS, Fixups);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
VecOS.flush();
// Add the fixups and data.
- MCDataFragment *DF = dyn_cast_or_null<MCDataFragment>(getCurrentFragment());
- if (!DF)
- DF = new MCDataFragment(CurSectionData);
for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
- MCFixup &F = Fixups[i];
- DF->getFixups().push_back(MCAsmFixup(DF->getContents().size()+F.getOffset(),
- *F.getValue(),
- F.getKind()));
+ Fixups[i].setOffset(Fixups[i].getOffset() + DF->getContents().size());
+ DF->addFixup(Fixups[i]);
}
DF->getContents().append(Code.begin(), Code.end());
}
+void MCMachOStreamer::EmitInstruction(const MCInst &Inst) {
+ // Scan for values.
+ for (unsigned i = Inst.getNumOperands(); i--; )
+ if (Inst.getOperand(i).isExpr())
+ AddValueSymbols(Inst.getOperand(i).getExpr());
+
+ getCurrentSectionData()->setHasInstructions(true);
+
+ // Now that a machine instruction has been assembled into this section, make
+ // a line entry for any .loc directive that has been seen.
+ MakeLineEntryForSection(getCurrentSection());
+
+ // If this instruction doesn't need relaxation, just emit it as data.
+ if (!getAssembler().getBackend().MayNeedRelaxation(Inst)) {
+ EmitInstToData(Inst);
+ return;
+ }
+
+ // Otherwise, if we are relaxing everything, relax the instruction as much as
+ // possible and emit it as data.
+ if (getAssembler().getRelaxAll()) {
+ MCInst Relaxed;
+ getAssembler().getBackend().RelaxInstruction(Inst, Relaxed);
+ while (getAssembler().getBackend().MayNeedRelaxation(Relaxed))
+ getAssembler().getBackend().RelaxInstruction(Relaxed, Relaxed);
+ EmitInstToData(Relaxed);
+ return;
+ }
+
+ // Otherwise emit to a separate fragment.
+ EmitInstToFragment(Inst);
+}
+
+//
+// This is called when an instruction is assembled into the specified section
+// and if there is information from the last .loc directive that has yet to have
+// a line entry made for it is made.
+//
+void MCMachOStreamer::MakeLineEntryForSection(const MCSection *Section) {
+ if (!getContext().getDwarfLocSeen())
+ return;
+
+ // Create a symbol at in the current section for use in the line entry.
+ MCSymbol *LineSym = getContext().CreateTempSymbol();
+ // Set the value of the symbol to use for the MCLineEntry.
+ EmitLabel(LineSym);
+
+ // Get the current .loc info saved in the context.
+ const MCDwarfLoc &DwarfLoc = getContext().getCurrentDwarfLoc();
+
+ // Create a (local) line entry with the symbol and the current .loc info.
+ MCLineEntry LineEntry(LineSym, DwarfLoc);
+
+ // clear DwarfLocSeen saying the current .loc info is now used.
+ getContext().clearDwarfLocSeen();
+
+ // Get the MCLineSection for this section, if one does not exist for this
+ // section create it.
+ DenseMap<const MCSection *, MCLineSection *> &MCLineSections =
+ getContext().getMCLineSections();
+ MCLineSection *LineSection = MCLineSections[Section];
+ if (!LineSection) {
+ // Create a new MCLineSection. This will be deleted after the dwarf line
+ // table is created using it by iterating through the MCLineSections
+ // DenseMap.
+ LineSection = new MCLineSection;
+ // Save a pointer to the new LineSection into the MCLineSections DenseMap.
+ MCLineSections[Section] = LineSection;
+ }
+
+ // Add the line entry to this section's entries.
+ LineSection->addLineEntry(LineEntry);
+}
+
+//
+// This helper routine returns an expression of End - Start + IntVal for use
+// by EmitDwarfFileTable() below.
+//
+const MCExpr * MCMachOStreamer::MakeStartMinusEndExpr(MCSymbol *Start,
+ MCSymbol *End,
+ int IntVal) {
+ MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
+ const MCExpr *Res =
+ MCSymbolRefExpr::Create(End, Variant, getContext());
+ const MCExpr *RHS =
+ MCSymbolRefExpr::Create(Start, Variant, getContext());
+ const MCExpr *Res1 =
+ MCBinaryExpr::Create(MCBinaryExpr::Sub, Res, RHS,getContext());
+ const MCExpr *Res2 =
+ MCConstantExpr::Create(IntVal, getContext());
+ const MCExpr *Res3 =
+ MCBinaryExpr::Create(MCBinaryExpr::Sub, Res1, Res2, getContext());
+ return Res3;
+}
+
+//
+// This emits the Dwarf file (and eventually the line) table.
+//
+void MCMachOStreamer::EmitDwarfFileTable(void) {
+ // For now make sure we don't put out the Dwarf file table if no .file
+ // directives were seen.
+ const std::vector<MCDwarfFile *> &MCDwarfFiles =
+ getContext().getMCDwarfFiles();
+ if (MCDwarfFiles.size() == 0)
+ return;
+
+ // This is the Mach-O section, for ELF it is the .debug_line section.
+ SwitchSection(getContext().getMachOSection("__DWARF", "__debug_line",
+ MCSectionMachO::S_ATTR_DEBUG,
+ 0, SectionKind::getDataRelLocal()));
+
+ // Create a symbol at the beginning of this section.
+ MCSymbol *LineStartSym = getContext().CreateTempSymbol();
+ // Set the value of the symbol, as we are at the start of the section.
+ EmitLabel(LineStartSym);
+
+ // Create a symbol for the end of the section (to be set when we get there).
+ MCSymbol *LineEndSym = getContext().CreateTempSymbol();
+
+ // The first 4 bytes is the total length of the information for this
+ // compilation unit (not including these 4 bytes for the length).
+ EmitValue(MakeStartMinusEndExpr(LineStartSym, LineEndSym, 4), 4, 0);
+
+ // Next 2 bytes is the Version, which is Dwarf 2.
+ EmitIntValue(2, 2);
+
+ // Create a symbol for the end of the prologue (to be set when we get there).
+ MCSymbol *ProEndSym = getContext().CreateTempSymbol(); // Lprologue_end
+
+ // Length of the prologue, is the next 4 bytes. Which is the start of the
+ // section to the end of the prologue. Not including the 4 bytes for the
+ // total length, the 2 bytes for the version, and these 4 bytes for the
+ // length of the prologue.
+ EmitValue(MakeStartMinusEndExpr(LineStartSym, ProEndSym, (4 + 2 + 4)), 4, 0);
+
+ // Parameters of the state machine, are next.
+ // Define the architecture-dependent minimum instruction length (in
+ // bytes). This value should be rather too small than too big. */
+ // DWARF2_LINE_MIN_INSN_LENGTH
+ EmitIntValue(1, 1);
+ // Flag that indicates the initial value of the is_stmt_start flag.
+ // DWARF2_LINE_DEFAULT_IS_STMT
+ EmitIntValue(1, 1);
+ // Minimum line offset in a special line info. opcode. This value
+ // was chosen to give a reasonable range of values. */
+ // DWARF2_LINE_BASE
+ EmitIntValue(uint64_t(-5), 1);
+ // Range of line offsets in a special line info. opcode.
+ // DWARF2_LINE_RANGE
+ EmitIntValue(14, 1);
+ // First special line opcode - leave room for the standard opcodes.
+ // DWARF2_LINE_OPCODE_BASE
+ EmitIntValue(13, 1);
+
+ // Standard opcode lengths
+ EmitIntValue(0, 1); // length of DW_LNS_copy
+ EmitIntValue(1, 1); // length of DW_LNS_advance_pc
+ EmitIntValue(1, 1); // length of DW_LNS_advance_line
+ EmitIntValue(1, 1); // length of DW_LNS_set_file
+ EmitIntValue(1, 1); // length of DW_LNS_set_column
+ EmitIntValue(0, 1); // length of DW_LNS_negate_stmt
+ EmitIntValue(0, 1); // length of DW_LNS_set_basic_block
+ EmitIntValue(0, 1); // length of DW_LNS_const_add_pc
+ EmitIntValue(1, 1); // length of DW_LNS_fixed_advance_pc
+ EmitIntValue(0, 1); // length of DW_LNS_set_prologue_end
+ EmitIntValue(0, 1); // length of DW_LNS_set_epilogue_begin
+ EmitIntValue(1, 1); // DW_LNS_set_isa
+
+ // Put out the directory and file tables.
+
+ // First the directory table.
+ const std::vector<StringRef> &MCDwarfDirs =
+ getContext().getMCDwarfDirs();
+ for (unsigned i = 0; i < MCDwarfDirs.size(); i++) {
+ EmitBytes(MCDwarfDirs[i], 0); // the DirectoryName
+ EmitBytes(StringRef("\0", 1), 0); // the null termination of the string
+ }
+ EmitIntValue(0, 1); // Terminate the directory list
+
+ // Second the file table.
+ for (unsigned i = 1; i < MCDwarfFiles.size(); i++) {
+ EmitBytes(MCDwarfFiles[i]->getName(), 0); // FileName
+ EmitBytes(StringRef("\0", 1), 0); // the null termination of the string
+ // FIXME the Directory number should be a .uleb128 not a .byte
+ EmitIntValue(MCDwarfFiles[i]->getDirIndex(), 1);
+ EmitIntValue(0, 1); // last modification timestamp (always 0)
+ EmitIntValue(0, 1); // filesize (always 0)
+ }
+ EmitIntValue(0, 1); // Terminate the file list
+
+ // This is the end of the prologue, so set the value of the symbol at the
+ // end of the prologue (that was used in a previous expression).
+ EmitLabel(ProEndSym);
+
+ // TODO: This is the point where the line tables would be emitted.
+
+ // Delete the MCLineSections that were created in
+ // MCMachOStreamer::MakeLineEntryForSection() and used to emit the line
+ // tables.
+ DenseMap<const MCSection *, MCLineSection *> &MCLineSections =
+ getContext().getMCLineSections();
+ for (DenseMap<const MCSection *, MCLineSection *>::iterator it =
+ MCLineSections.begin(), ie = MCLineSections.end(); it != ie; ++it) {
+ delete it->second;
+ }
+
+ // If there are no line tables emited then we emit:
+ // The following DW_LNE_set_address sequence to set the address to zero
+ // TODO test for 32-bit or 64-bit output
+ // This is the sequence for 32-bit code
+ EmitIntValue(0, 1);
+ EmitIntValue(5, 1);
+ EmitIntValue(2, 1);
+ EmitIntValue(0, 1);
+ EmitIntValue(0, 1);
+ EmitIntValue(0, 1);
+ EmitIntValue(0, 1);
+
+ // Lastly emit the DW_LNE_end_sequence which consists of 3 bytes '00 01 01'
+ // (00 is the code for extended opcodes, followed by a ULEB128 length of the
+ // extended opcode (01), and the DW_LNE_end_sequence (01).
+ EmitIntValue(0, 1); // DW_LNS_extended_op
+ EmitIntValue(1, 1); // ULEB128 length of the extended opcode
+ EmitIntValue(1, 1); // DW_LNE_end_sequence
+
+ // This is the end of the section, so set the value of the symbol at the end
+ // of this section (that was used in a previous expression).
+ EmitLabel(LineEndSym);
+}
+
void MCMachOStreamer::Finish() {
- Assembler.Finish();
+ // Dump out the dwarf file and directory tables (soon to include line table)
+ EmitDwarfFileTable();
+
+ // We have to set the fragment atom associations so we can relax properly for
+ // Mach-O.
+
+ // First, scan the symbol table to build a lookup table from fragments to
+ // defining symbols.
+ DenseMap<const MCFragment*, MCSymbolData*> DefiningSymbolMap;
+ for (MCAssembler::symbol_iterator it = getAssembler().symbol_begin(),
+ ie = getAssembler().symbol_end(); it != ie; ++it) {
+ if (getAssembler().isSymbolLinkerVisible(it->getSymbol()) &&
+ it->getFragment()) {
+ // An atom defining symbol should never be internal to a fragment.
+ assert(it->getOffset() == 0 && "Invalid offset in atom defining symbol!");
+ DefiningSymbolMap[it->getFragment()] = it;
+ }
+ }
+
+ // Set the fragment atom associations by tracking the last seen atom defining
+ // symbol.
+ for (MCAssembler::iterator it = getAssembler().begin(),
+ ie = getAssembler().end(); it != ie; ++it) {
+ MCSymbolData *CurrentAtom = 0;
+ for (MCSectionData::iterator it2 = it->begin(),
+ ie2 = it->end(); it2 != ie2; ++it2) {
+ if (MCSymbolData *SD = DefiningSymbolMap.lookup(it2))
+ CurrentAtom = SD;
+ it2->setAtom(CurrentAtom);
+ }
+ }
+
+ this->MCObjectStreamer::Finish();
}
-MCStreamer *llvm::createMachOStreamer(MCContext &Context, raw_ostream &OS,
- MCCodeEmitter *CE) {
- return new MCMachOStreamer(Context, OS, CE);
+MCStreamer *llvm::createMachOStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &OS, MCCodeEmitter *CE,
+ bool RelaxAll) {
+ MCMachOStreamer *S = new MCMachOStreamer(Context, TAB, OS, CE);
+ if (RelaxAll)
+ S->getAssembler().setRelaxAll(true);
+ return S;
}
diff --git a/libclamav/c++/llvm/lib/MC/MCNullStreamer.cpp b/libclamav/c++/llvm/lib/MC/MCNullStreamer.cpp
index ab61799..f7a2f20 100644
--- a/libclamav/c++/llvm/lib/MC/MCNullStreamer.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCNullStreamer.cpp
@@ -26,10 +26,15 @@ namespace {
/// @{
virtual void SwitchSection(const MCSection *Section) {
+ PrevSection = CurSection;
CurSection = Section;
}
- virtual void EmitLabel(MCSymbol *Symbol) {}
+ virtual void EmitLabel(MCSymbol *Symbol) {
+ assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
+ assert(CurSection && "Cannot emit before setting section!");
+ Symbol->setSection(*CurSection);
+ }
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag) {}
@@ -38,6 +43,12 @@ namespace {
virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute){}
virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {}
+
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol) {}
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass) {}
+ virtual void EmitCOFFSymbolType(int Type) {}
+ virtual void EndCOFFSymbolDef() {}
+
virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {}
virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) {}
@@ -45,7 +56,8 @@ namespace {
virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0,
unsigned Size = 0, unsigned ByteAlignment = 0) {}
-
+ virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment) {}
virtual void EmitBytes(StringRef Data, unsigned AddrSpace) {}
virtual void EmitValue(const MCExpr *Value, unsigned Size,
diff --git a/libclamav/c++/llvm/lib/MC/MCObjectStreamer.cpp b/libclamav/c++/llvm/lib/MC/MCObjectStreamer.cpp
new file mode 100644
index 0000000..2b2385e
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/MCObjectStreamer.cpp
@@ -0,0 +1,87 @@
+//===- lib/MC/MCObjectStreamer.cpp - Object File MCStreamer Interface -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCObjectStreamer.h"
+
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Target/TargetAsmBackend.h"
+using namespace llvm;
+
+MCObjectStreamer::MCObjectStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &_OS, MCCodeEmitter *_Emitter)
+ : MCStreamer(Context), Assembler(new MCAssembler(Context, TAB,
+ *_Emitter, _OS)),
+ CurSectionData(0)
+{
+}
+
+MCObjectStreamer::~MCObjectStreamer() {
+ delete &Assembler->getBackend();
+ delete &Assembler->getEmitter();
+ delete Assembler;
+}
+
+MCFragment *MCObjectStreamer::getCurrentFragment() const {
+ assert(getCurrentSectionData() && "No current section!");
+
+ if (!getCurrentSectionData()->empty())
+ return &getCurrentSectionData()->getFragmentList().back();
+
+ return 0;
+}
+
+MCDataFragment *MCObjectStreamer::getOrCreateDataFragment() const {
+ MCDataFragment *F = dyn_cast_or_null<MCDataFragment>(getCurrentFragment());
+ if (!F)
+ F = new MCDataFragment(getCurrentSectionData());
+ return F;
+}
+
+const MCExpr *MCObjectStreamer::AddValueSymbols(const MCExpr *Value) {
+ switch (Value->getKind()) {
+ case MCExpr::Target: llvm_unreachable("Can't handle target exprs yet!");
+ case MCExpr::Constant:
+ break;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value);
+ AddValueSymbols(BE->getLHS());
+ AddValueSymbols(BE->getRHS());
+ break;
+ }
+
+ case MCExpr::SymbolRef:
+ Assembler->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol());
+ break;
+
+ case MCExpr::Unary:
+ AddValueSymbols(cast<MCUnaryExpr>(Value)->getSubExpr());
+ break;
+ }
+
+ return Value;
+}
+
+void MCObjectStreamer::SwitchSection(const MCSection *Section) {
+ assert(Section && "Cannot switch to a null section!");
+
+ // If already in this section, then this is a noop.
+ if (Section == CurSection) return;
+
+ PrevSection = CurSection;
+ CurSection = Section;
+ CurSectionData = &getAssembler().getOrCreateSectionData(*Section);
+}
+
+void MCObjectStreamer::Finish() {
+ getAssembler().Finish();
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCObjectWriter.cpp b/libclamav/c++/llvm/lib/MC/MCObjectWriter.cpp
new file mode 100644
index 0000000..d117e82
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/MCObjectWriter.cpp
@@ -0,0 +1,15 @@
+//===- lib/MC/MCObjectWriter.cpp - MCObjectWriter implementation ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCObjectWriter.h"
+
+using namespace llvm;
+
+MCObjectWriter::~MCObjectWriter() {
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCSection.cpp b/libclamav/c++/llvm/lib/MC/MCSection.cpp
index 24c89ef..a792d56 100644
--- a/libclamav/c++/llvm/lib/MC/MCSection.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCSection.cpp
@@ -20,26 +20,3 @@ using namespace llvm;
MCSection::~MCSection() {
}
-//===----------------------------------------------------------------------===//
-// MCSectionCOFF
-//===----------------------------------------------------------------------===//
-
-MCSectionCOFF *MCSectionCOFF::
-Create(StringRef Name, bool IsDirective, SectionKind K, MCContext &Ctx) {
- return new (Ctx) MCSectionCOFF(Name, IsDirective, K);
-}
-
-void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI,
- raw_ostream &OS) const {
-
- if (isDirective()) {
- OS << getName() << '\n';
- return;
- }
- OS << "\t.section\t" << getName() << ",\"";
- if (getKind().isText())
- OS << 'x';
- if (getKind().isWriteable())
- OS << 'w';
- OS << "\"\n";
-}
diff --git a/libclamav/c++/llvm/lib/MC/MCSectionCOFF.cpp b/libclamav/c++/llvm/lib/MC/MCSectionCOFF.cpp
new file mode 100644
index 0000000..eb53160
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/MCSectionCOFF.cpp
@@ -0,0 +1,76 @@
+//===- lib/MC/MCSectionCOFF.cpp - COFF Code Section Representation --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCSectionCOFF.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+MCSectionCOFF::~MCSectionCOFF() {} // anchor.
+
+// ShouldOmitSectionDirective - Decides whether a '.section' directive
+// should be printed before the section name
+bool MCSectionCOFF::ShouldOmitSectionDirective(StringRef Name,
+ const MCAsmInfo &MAI) const {
+
+ // FIXME: Does .section .bss/.data/.text work everywhere??
+ if (Name == ".text" || Name == ".data" || Name == ".bss")
+ return true;
+
+ return false;
+}
+
+void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI,
+ raw_ostream &OS) const {
+
+ // standard sections don't require the '.section'
+ if (ShouldOmitSectionDirective(SectionName, MAI)) {
+ OS << '\t' << getSectionName() << '\n';
+ return;
+ }
+
+ OS << "\t.section\t" << getSectionName() << ",\"";
+ if (getKind().isText())
+ OS << 'x';
+ if (getKind().isWriteable())
+ OS << 'w';
+ else
+ OS << 'r';
+ if (getCharacteristics() & COFF::IMAGE_SCN_MEM_DISCARDABLE)
+ OS << 'n';
+ OS << "\"\n";
+
+ if (getCharacteristics() & COFF::IMAGE_SCN_LNK_COMDAT) {
+ switch (Selection) {
+ case COFF::IMAGE_COMDAT_SELECT_NODUPLICATES:
+ OS << "\t.linkonce one_only\n";
+ break;
+ case COFF::IMAGE_COMDAT_SELECT_ANY:
+ OS << "\t.linkonce discard\n";
+ break;
+ case COFF::IMAGE_COMDAT_SELECT_SAME_SIZE:
+ OS << "\t.linkonce same_size\n";
+ break;
+ case COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH:
+ OS << "\t.linkonce same_contents\n";
+ break;
+ //NOTE: as of binutils 2.20, there is no way to specifiy select largest
+ // with the .linkonce directive. For now, we treat it as an invalid
+ // comdat selection value.
+ case COFF::IMAGE_COMDAT_SELECT_LARGEST:
+ // OS << "\t.linkonce largest\n";
+ // break;
+ default:
+ assert (0 && "unsupported COFF selection type");
+ break;
+ }
+ }
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCSectionELF.cpp b/libclamav/c++/llvm/lib/MC/MCSectionELF.cpp
index ebfe269..a7599de 100644
--- a/libclamav/c++/llvm/lib/MC/MCSectionELF.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCSectionELF.cpp
@@ -14,11 +14,7 @@
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-MCSectionELF *MCSectionELF::
-Create(StringRef Section, unsigned Type, unsigned Flags,
- SectionKind K, bool isExplicit, MCContext &Ctx) {
- return new (Ctx) MCSectionELF(Section, Type, Flags, K, isExplicit);
-}
+MCSectionELF::~MCSectionELF() {} // anchor.
// ShouldOmitSectionDirective - Decides whether a '.section' directive
// should be printed before the section name
@@ -62,59 +58,63 @@ void MCSectionELF::PrintSwitchToSection(const MCAsmInfo &MAI,
OS << ",#write";
if (Flags & MCSectionELF::SHF_TLS)
OS << ",#tls";
- } else {
- OS << ",\"";
- if (Flags & MCSectionELF::SHF_ALLOC)
- OS << 'a';
- if (Flags & MCSectionELF::SHF_EXECINSTR)
- OS << 'x';
- if (Flags & MCSectionELF::SHF_WRITE)
- OS << 'w';
- if (Flags & MCSectionELF::SHF_MERGE)
- OS << 'M';
- if (Flags & MCSectionELF::SHF_STRINGS)
- OS << 'S';
- if (Flags & MCSectionELF::SHF_TLS)
- OS << 'T';
-
- // If there are target-specific flags, print them.
- if (Flags & ~MCSectionELF::TARGET_INDEP_SHF)
- PrintTargetSpecificSectionFlags(MAI, OS);
-
- OS << '"';
+ OS << '\n';
+ return;
+ }
+
+ OS << ",\"";
+ if (Flags & MCSectionELF::SHF_ALLOC)
+ OS << 'a';
+ if (Flags & MCSectionELF::SHF_EXECINSTR)
+ OS << 'x';
+ if (Flags & MCSectionELF::SHF_WRITE)
+ OS << 'w';
+ if (Flags & MCSectionELF::SHF_MERGE)
+ OS << 'M';
+ if (Flags & MCSectionELF::SHF_STRINGS)
+ OS << 'S';
+ if (Flags & MCSectionELF::SHF_TLS)
+ OS << 'T';
+
+ // If there are target-specific flags, print them.
+ if (Flags & MCSectionELF::XCORE_SHF_CP_SECTION)
+ OS << 'c';
+ if (Flags & MCSectionELF::XCORE_SHF_DP_SECTION)
+ OS << 'd';
+
+ OS << '"';
- if (ShouldPrintSectionType(Type)) {
- OS << ',';
-
- // If comment string is '@', e.g. as on ARM - use '%' instead
- if (MAI.getCommentString()[0] == '@')
- OS << '%';
- else
- OS << '@';
-
- if (Type == MCSectionELF::SHT_INIT_ARRAY)
- OS << "init_array";
- else if (Type == MCSectionELF::SHT_FINI_ARRAY)
- OS << "fini_array";
- else if (Type == MCSectionELF::SHT_PREINIT_ARRAY)
- OS << "preinit_array";
- else if (Type == MCSectionELF::SHT_NOBITS)
- OS << "nobits";
- else if (Type == MCSectionELF::SHT_PROGBITS)
- OS << "progbits";
-
- if (getKind().isMergeable1ByteCString()) {
- OS << ",1";
- } else if (getKind().isMergeable2ByteCString()) {
- OS << ",2";
- } else if (getKind().isMergeable4ByteCString() ||
- getKind().isMergeableConst4()) {
- OS << ",4";
- } else if (getKind().isMergeableConst8()) {
- OS << ",8";
- } else if (getKind().isMergeableConst16()) {
- OS << ",16";
- }
+ if (ShouldPrintSectionType(Type)) {
+ OS << ',';
+
+ // If comment string is '@', e.g. as on ARM - use '%' instead
+ if (MAI.getCommentString()[0] == '@')
+ OS << '%';
+ else
+ OS << '@';
+
+ if (Type == MCSectionELF::SHT_INIT_ARRAY)
+ OS << "init_array";
+ else if (Type == MCSectionELF::SHT_FINI_ARRAY)
+ OS << "fini_array";
+ else if (Type == MCSectionELF::SHT_PREINIT_ARRAY)
+ OS << "preinit_array";
+ else if (Type == MCSectionELF::SHT_NOBITS)
+ OS << "nobits";
+ else if (Type == MCSectionELF::SHT_PROGBITS)
+ OS << "progbits";
+
+ if (getKind().isMergeable1ByteCString()) {
+ OS << ",1";
+ } else if (getKind().isMergeable2ByteCString()) {
+ OS << ",2";
+ } else if (getKind().isMergeable4ByteCString() ||
+ getKind().isMergeableConst4()) {
+ OS << ",4";
+ } else if (getKind().isMergeableConst8()) {
+ OS << ",8";
+ } else if (getKind().isMergeableConst16()) {
+ OS << ",16";
}
}
diff --git a/libclamav/c++/llvm/lib/MC/MCSectionMachO.cpp b/libclamav/c++/llvm/lib/MC/MCSectionMachO.cpp
index 6cc67a2..ded3b20 100644
--- a/libclamav/c++/llvm/lib/MC/MCSectionMachO.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCSectionMachO.cpp
@@ -34,7 +34,14 @@ static const struct {
{ "interposing", "S_INTERPOSING" }, // 0x0D
{ "16byte_literals", "S_16BYTE_LITERALS" }, // 0x0E
{ 0, /*FIXME??*/ "S_DTRACE_DOF" }, // 0x0F
- { 0, /*FIXME??*/ "S_LAZY_DYLIB_SYMBOL_POINTERS" } // 0x10
+ { 0, /*FIXME??*/ "S_LAZY_DYLIB_SYMBOL_POINTERS" }, // 0x10
+ { "thread_local_regular", "S_THREAD_LOCAL_REGULAR" }, // 0x11
+ { "thread_local_zerofill", "S_THREAD_LOCAL_ZEROFILL" }, // 0x12
+ { "thread_local_variables", "S_THREAD_LOCAL_VARIABLES" }, // 0x13
+ { "thread_local_variable_pointers",
+ "S_THREAD_LOCAL_VARIABLE_POINTERS" }, // 0x14
+ { "thread_local_init_function_pointers",
+ "S_THREAD_LOCAL_INIT_FUNCTION_POINTERS"}, // 0x15
};
@@ -64,14 +71,22 @@ ENTRY(0 /*FIXME*/, S_ATTR_LOC_RELOC)
{ AttrFlagEnd, 0, 0 }
};
-
-MCSectionMachO *MCSectionMachO::
-Create(StringRef Segment, StringRef Section,
- unsigned TypeAndAttributes, unsigned Reserved2,
- SectionKind K, MCContext &Ctx) {
- // S_SYMBOL_STUBS must be set for Reserved2 to be non-zero.
- return new (Ctx) MCSectionMachO(Segment, Section, TypeAndAttributes,
- Reserved2, K);
+MCSectionMachO::MCSectionMachO(StringRef Segment, StringRef Section,
+ unsigned TAA, unsigned reserved2, SectionKind K)
+ : MCSection(SV_MachO, K), TypeAndAttributes(TAA), Reserved2(reserved2) {
+ assert(Segment.size() <= 16 && Section.size() <= 16 &&
+ "Segment or section string too long");
+ for (unsigned i = 0; i != 16; ++i) {
+ if (i < Segment.size())
+ SegmentName[i] = Segment[i];
+ else
+ SegmentName[i] = 0;
+
+ if (i < Section.size())
+ SectionName[i] = Section[i];
+ else
+ SectionName[i] = 0;
+ }
}
void MCSectionMachO::PrintSwitchToSection(const MCAsmInfo &MAI,
diff --git a/libclamav/c++/llvm/lib/MC/MCStreamer.cpp b/libclamav/c++/llvm/lib/MC/MCStreamer.cpp
index 15b3079..3e9d02e 100644
--- a/libclamav/c++/llvm/lib/MC/MCStreamer.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCStreamer.cpp
@@ -10,9 +10,13 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include <cstdlib>
using namespace llvm;
-MCStreamer::MCStreamer(MCContext &_Context) : Context(_Context), CurSection(0) {
+MCStreamer::MCStreamer(MCContext &Ctx) : Context(Ctx), CurSection(0),
+ PrevSection(0) {
}
MCStreamer::~MCStreamer() {
@@ -31,6 +35,11 @@ void MCStreamer::EmitIntValue(uint64_t Value, unsigned Size,
EmitValue(MCConstantExpr::Create(Value, getContext()), Size, AddrSpace);
}
+void MCStreamer::EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
+ unsigned AddrSpace) {
+ EmitValue(MCSymbolRefExpr::Create(Sym, getContext()), Size, AddrSpace);
+}
+
/// EmitFill - Emit NumBytes bytes worth of the value specified by
/// FillValue. This implements directives such as '.space'.
void MCStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue,
@@ -39,3 +48,18 @@ void MCStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue,
for (uint64_t i = 0, e = NumBytes; i != e; ++i)
EmitValue(E, 1, AddrSpace);
}
+
+/// EmitRawText - If this file is backed by an assembly streamer, this dumps
+/// the specified string in the output .s file. This capability is
+/// indicated by the hasRawTextSupport() predicate.
+void MCStreamer::EmitRawText(StringRef String) {
+ errs() << "EmitRawText called on an MCStreamer that doesn't support it, "
+ " something must not be fully mc'ized\n";
+ abort();
+}
+
+void MCStreamer::EmitRawText(const Twine &T) {
+ SmallString<128> Str;
+ T.toVector(Str);
+ EmitRawText(Str.str());
+}
diff --git a/libclamav/c++/llvm/lib/MC/MCSymbol.cpp b/libclamav/c++/llvm/lib/MC/MCSymbol.cpp
index 3fb1233..07751f7 100644
--- a/libclamav/c++/llvm/lib/MC/MCSymbol.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCSymbol.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -38,6 +39,17 @@ static bool NameNeedsQuoting(StringRef Str) {
return false;
}
+void MCSymbol::setVariableValue(const MCExpr *Value) {
+ assert(Value && "Invalid variable value!");
+ assert((isUndefined() || (isAbsolute() && isa<MCConstantExpr>(Value))) &&
+ "Invalid redefinition!");
+ this->Value = Value;
+
+ // Mark the variable as absolute as appropriate.
+ if (isa<MCConstantExpr>(Value))
+ setAbsolute();
+}
+
void MCSymbol::print(raw_ostream &OS) const {
// The name for this MCSymbol is required to be a valid target name. However,
// some targets support quoting names with funny characters. If the name
diff --git a/libclamav/c++/llvm/lib/MC/MCValue.cpp b/libclamav/c++/llvm/lib/MC/MCValue.cpp
index 043a49d..c6ea16c 100644
--- a/libclamav/c++/llvm/lib/MC/MCValue.cpp
+++ b/libclamav/c++/llvm/lib/MC/MCValue.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -19,10 +20,12 @@ void MCValue::print(raw_ostream &OS, const MCAsmInfo *MAI) const {
return;
}
- OS << *getSymA();
+ getSymA()->print(OS);
- if (getSymB())
- OS << " - " << *getSymB();
+ if (getSymB()) {
+ OS << " - ";
+ getSymB()->print(OS);
+ }
if (getConstant())
OS << " + " << getConstant();
diff --git a/libclamav/c++/llvm/lib/MC/MachObjectWriter.cpp b/libclamav/c++/llvm/lib/MC/MachObjectWriter.cpp
new file mode 100644
index 0000000..cffabfa
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/MachObjectWriter.cpp
@@ -0,0 +1,1229 @@
+//===- lib/MC/MachObjectWriter.cpp - Mach-O File Writer -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MachObjectWriter.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCMachOSymbolFlags.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachO.h"
+#include "llvm/Target/TargetAsmBackend.h"
+
+// FIXME: Gross.
+#include "../Target/X86/X86FixupKinds.h"
+
+#include <vector>
+using namespace llvm;
+
+static unsigned getFixupKindLog2Size(unsigned Kind) {
+ switch (Kind) {
+ default: llvm_unreachable("invalid fixup kind!");
+ case X86::reloc_pcrel_1byte:
+ case FK_Data_1: return 0;
+ case X86::reloc_pcrel_2byte:
+ case FK_Data_2: return 1;
+ case X86::reloc_pcrel_4byte:
+ case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ case FK_Data_4: return 2;
+ case FK_Data_8: return 3;
+ }
+}
+
+static bool isFixupKindPCRel(unsigned Kind) {
+ switch (Kind) {
+ default:
+ return false;
+ case X86::reloc_pcrel_1byte:
+ case X86::reloc_pcrel_2byte:
+ case X86::reloc_pcrel_4byte:
+ case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ return true;
+ }
+}
+
+static bool isFixupKindRIPRel(unsigned Kind) {
+ return Kind == X86::reloc_riprel_4byte ||
+ Kind == X86::reloc_riprel_4byte_movq_load;
+}
+
+static bool doesSymbolRequireExternRelocation(MCSymbolData *SD) {
+ // Undefined symbols are always extern.
+ if (SD->Symbol->isUndefined())
+ return true;
+
+ // References to weak definitions require external relocation entries; the
+ // definition may not always be the one in the same object file.
+ if (SD->getFlags() & SF_WeakDefinition)
+ return true;
+
+ // Otherwise, we can use an internal relocation.
+ return false;
+}
+
+namespace {
+
+class MachObjectWriterImpl {
+ // See <mach-o/loader.h>.
+ enum {
+ Header_Magic32 = 0xFEEDFACE,
+ Header_Magic64 = 0xFEEDFACF
+ };
+
+ enum {
+ Header32Size = 28,
+ Header64Size = 32,
+ SegmentLoadCommand32Size = 56,
+ SegmentLoadCommand64Size = 72,
+ Section32Size = 68,
+ Section64Size = 80,
+ SymtabLoadCommandSize = 24,
+ DysymtabLoadCommandSize = 80,
+ Nlist32Size = 12,
+ Nlist64Size = 16,
+ RelocationInfoSize = 8
+ };
+
+ enum HeaderFileType {
+ HFT_Object = 0x1
+ };
+
+ enum HeaderFlags {
+ HF_SubsectionsViaSymbols = 0x2000
+ };
+
+ enum LoadCommandType {
+ LCT_Segment = 0x1,
+ LCT_Symtab = 0x2,
+ LCT_Dysymtab = 0xb,
+ LCT_Segment64 = 0x19
+ };
+
+ // See <mach-o/nlist.h>.
+ enum SymbolTypeType {
+ STT_Undefined = 0x00,
+ STT_Absolute = 0x02,
+ STT_Section = 0x0e
+ };
+
+ enum SymbolTypeFlags {
+ // If any of these bits are set, then the entry is a stab entry number (see
+ // <mach-o/stab.h>. Otherwise the other masks apply.
+ STF_StabsEntryMask = 0xe0,
+
+ STF_TypeMask = 0x0e,
+ STF_External = 0x01,
+ STF_PrivateExtern = 0x10
+ };
+
+ /// IndirectSymbolFlags - Flags for encoding special values in the indirect
+ /// symbol entry.
+ enum IndirectSymbolFlags {
+ ISF_Local = 0x80000000,
+ ISF_Absolute = 0x40000000
+ };
+
+ /// RelocationFlags - Special flags for addresses.
+ enum RelocationFlags {
+ RF_Scattered = 0x80000000
+ };
+
+ enum RelocationInfoType {
+ RIT_Vanilla = 0,
+ RIT_Pair = 1,
+ RIT_Difference = 2,
+ RIT_PreboundLazyPointer = 3,
+ RIT_LocalDifference = 4,
+ RIT_TLV = 5
+ };
+
+ /// X86_64 uses its own relocation types.
+ enum RelocationInfoTypeX86_64 {
+ RIT_X86_64_Unsigned = 0,
+ RIT_X86_64_Signed = 1,
+ RIT_X86_64_Branch = 2,
+ RIT_X86_64_GOTLoad = 3,
+ RIT_X86_64_GOT = 4,
+ RIT_X86_64_Subtractor = 5,
+ RIT_X86_64_Signed1 = 6,
+ RIT_X86_64_Signed2 = 7,
+ RIT_X86_64_Signed4 = 8,
+ RIT_X86_64_TLV = 9
+ };
+
+ /// MachSymbolData - Helper struct for containing some precomputed information
+ /// on symbols.
+ struct MachSymbolData {
+ MCSymbolData *SymbolData;
+ uint64_t StringIndex;
+ uint8_t SectionIndex;
+
+ // Support lexicographic sorting.
+ bool operator<(const MachSymbolData &RHS) const {
+ return SymbolData->getSymbol().getName() <
+ RHS.SymbolData->getSymbol().getName();
+ }
+ };
+
+ /// @name Relocation Data
+ /// @{
+
+ struct MachRelocationEntry {
+ uint32_t Word0;
+ uint32_t Word1;
+ };
+
+ llvm::DenseMap<const MCSectionData*,
+ std::vector<MachRelocationEntry> > Relocations;
+ llvm::DenseMap<const MCSectionData*, unsigned> IndirectSymBase;
+
+ /// @}
+ /// @name Symbol Table Data
+ /// @{
+
+ SmallString<256> StringTable;
+ std::vector<MachSymbolData> LocalSymbolData;
+ std::vector<MachSymbolData> ExternalSymbolData;
+ std::vector<MachSymbolData> UndefinedSymbolData;
+
+ /// @}
+
+ MachObjectWriter *Writer;
+
+ raw_ostream &OS;
+
+ unsigned Is64Bit : 1;
+
+public:
+ MachObjectWriterImpl(MachObjectWriter *_Writer, bool _Is64Bit)
+ : Writer(_Writer), OS(Writer->getStream()), Is64Bit(_Is64Bit) {
+ }
+
+ void Write8(uint8_t Value) { Writer->Write8(Value); }
+ void Write16(uint16_t Value) { Writer->Write16(Value); }
+ void Write32(uint32_t Value) { Writer->Write32(Value); }
+ void Write64(uint64_t Value) { Writer->Write64(Value); }
+ void WriteZeros(unsigned N) { Writer->WriteZeros(N); }
+ void WriteBytes(StringRef Str, unsigned ZeroFillSize = 0) {
+ Writer->WriteBytes(Str, ZeroFillSize);
+ }
+
+ void WriteHeader(unsigned NumLoadCommands, unsigned LoadCommandsSize,
+ bool SubsectionsViaSymbols) {
+ uint32_t Flags = 0;
+
+ if (SubsectionsViaSymbols)
+ Flags |= HF_SubsectionsViaSymbols;
+
+ // struct mach_header (28 bytes) or
+ // struct mach_header_64 (32 bytes)
+
+ uint64_t Start = OS.tell();
+ (void) Start;
+
+ Write32(Is64Bit ? Header_Magic64 : Header_Magic32);
+
+ // FIXME: Support cputype.
+ Write32(Is64Bit ? MachO::CPUTypeX86_64 : MachO::CPUTypeI386);
+ // FIXME: Support cpusubtype.
+ Write32(MachO::CPUSubType_I386_ALL);
+ Write32(HFT_Object);
+ Write32(NumLoadCommands); // Object files have a single load command, the
+ // segment.
+ Write32(LoadCommandsSize);
+ Write32(Flags);
+ if (Is64Bit)
+ Write32(0); // reserved
+
+ assert(OS.tell() - Start == Is64Bit ? Header64Size : Header32Size);
+ }
+
+ /// WriteSegmentLoadCommand - Write a segment load command.
+ ///
+ /// \arg NumSections - The number of sections in this segment.
+ /// \arg SectionDataSize - The total size of the sections.
+ void WriteSegmentLoadCommand(unsigned NumSections,
+ uint64_t VMSize,
+ uint64_t SectionDataStartOffset,
+ uint64_t SectionDataSize) {
+ // struct segment_command (56 bytes) or
+ // struct segment_command_64 (72 bytes)
+
+ uint64_t Start = OS.tell();
+ (void) Start;
+
+ unsigned SegmentLoadCommandSize = Is64Bit ? SegmentLoadCommand64Size :
+ SegmentLoadCommand32Size;
+ Write32(Is64Bit ? LCT_Segment64 : LCT_Segment);
+ Write32(SegmentLoadCommandSize +
+ NumSections * (Is64Bit ? Section64Size : Section32Size));
+
+ WriteBytes("", 16);
+ if (Is64Bit) {
+ Write64(0); // vmaddr
+ Write64(VMSize); // vmsize
+ Write64(SectionDataStartOffset); // file offset
+ Write64(SectionDataSize); // file size
+ } else {
+ Write32(0); // vmaddr
+ Write32(VMSize); // vmsize
+ Write32(SectionDataStartOffset); // file offset
+ Write32(SectionDataSize); // file size
+ }
+ Write32(0x7); // maxprot
+ Write32(0x7); // initprot
+ Write32(NumSections);
+ Write32(0); // flags
+
+ assert(OS.tell() - Start == SegmentLoadCommandSize);
+ }
+
+ void WriteSection(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCSectionData &SD, uint64_t FileOffset,
+ uint64_t RelocationsStart, unsigned NumRelocations) {
+ uint64_t SectionSize = Layout.getSectionSize(&SD);
+
+ // The offset is unused for virtual sections.
+ if (Asm.getBackend().isVirtualSection(SD.getSection())) {
+ assert(Layout.getSectionFileSize(&SD) == 0 && "Invalid file size!");
+ FileOffset = 0;
+ }
+
+ // struct section (68 bytes) or
+ // struct section_64 (80 bytes)
+
+ uint64_t Start = OS.tell();
+ (void) Start;
+
+ const MCSectionMachO &Section = cast<MCSectionMachO>(SD.getSection());
+ WriteBytes(Section.getSectionName(), 16);
+ WriteBytes(Section.getSegmentName(), 16);
+ if (Is64Bit) {
+ Write64(Layout.getSectionAddress(&SD)); // address
+ Write64(SectionSize); // size
+ } else {
+ Write32(Layout.getSectionAddress(&SD)); // address
+ Write32(SectionSize); // size
+ }
+ Write32(FileOffset);
+
+ unsigned Flags = Section.getTypeAndAttributes();
+ if (SD.hasInstructions())
+ Flags |= MCSectionMachO::S_ATTR_SOME_INSTRUCTIONS;
+
+ assert(isPowerOf2_32(SD.getAlignment()) && "Invalid alignment!");
+ Write32(Log2_32(SD.getAlignment()));
+ Write32(NumRelocations ? RelocationsStart : 0);
+ Write32(NumRelocations);
+ Write32(Flags);
+ Write32(IndirectSymBase.lookup(&SD)); // reserved1
+ Write32(Section.getStubSize()); // reserved2
+ if (Is64Bit)
+ Write32(0); // reserved3
+
+ assert(OS.tell() - Start == Is64Bit ? Section64Size : Section32Size);
+ }
+
+ void WriteSymtabLoadCommand(uint32_t SymbolOffset, uint32_t NumSymbols,
+ uint32_t StringTableOffset,
+ uint32_t StringTableSize) {
+ // struct symtab_command (24 bytes)
+
+ uint64_t Start = OS.tell();
+ (void) Start;
+
+ Write32(LCT_Symtab);
+ Write32(SymtabLoadCommandSize);
+ Write32(SymbolOffset);
+ Write32(NumSymbols);
+ Write32(StringTableOffset);
+ Write32(StringTableSize);
+
+ assert(OS.tell() - Start == SymtabLoadCommandSize);
+ }
+
+ void WriteDysymtabLoadCommand(uint32_t FirstLocalSymbol,
+ uint32_t NumLocalSymbols,
+ uint32_t FirstExternalSymbol,
+ uint32_t NumExternalSymbols,
+ uint32_t FirstUndefinedSymbol,
+ uint32_t NumUndefinedSymbols,
+ uint32_t IndirectSymbolOffset,
+ uint32_t NumIndirectSymbols) {
+ // struct dysymtab_command (80 bytes)
+
+ uint64_t Start = OS.tell();
+ (void) Start;
+
+ Write32(LCT_Dysymtab);
+ Write32(DysymtabLoadCommandSize);
+ Write32(FirstLocalSymbol);
+ Write32(NumLocalSymbols);
+ Write32(FirstExternalSymbol);
+ Write32(NumExternalSymbols);
+ Write32(FirstUndefinedSymbol);
+ Write32(NumUndefinedSymbols);
+ Write32(0); // tocoff
+ Write32(0); // ntoc
+ Write32(0); // modtaboff
+ Write32(0); // nmodtab
+ Write32(0); // extrefsymoff
+ Write32(0); // nextrefsyms
+ Write32(IndirectSymbolOffset);
+ Write32(NumIndirectSymbols);
+ Write32(0); // extreloff
+ Write32(0); // nextrel
+ Write32(0); // locreloff
+ Write32(0); // nlocrel
+
+ assert(OS.tell() - Start == DysymtabLoadCommandSize);
+ }
+
+ void WriteNlist(MachSymbolData &MSD, const MCAsmLayout &Layout) {
+ MCSymbolData &Data = *MSD.SymbolData;
+ const MCSymbol &Symbol = Data.getSymbol();
+ uint8_t Type = 0;
+ uint16_t Flags = Data.getFlags();
+ uint32_t Address = 0;
+
+ // Set the N_TYPE bits. See <mach-o/nlist.h>.
+ //
+ // FIXME: Are the prebound or indirect fields possible here?
+ if (Symbol.isUndefined())
+ Type = STT_Undefined;
+ else if (Symbol.isAbsolute())
+ Type = STT_Absolute;
+ else
+ Type = STT_Section;
+
+ // FIXME: Set STAB bits.
+
+ if (Data.isPrivateExtern())
+ Type |= STF_PrivateExtern;
+
+ // Set external bit.
+ if (Data.isExternal() || Symbol.isUndefined())
+ Type |= STF_External;
+
+ // Compute the symbol address.
+ if (Symbol.isDefined()) {
+ if (Symbol.isAbsolute()) {
+ Address = cast<MCConstantExpr>(Symbol.getVariableValue())->getValue();
+ } else {
+ Address = Layout.getSymbolAddress(&Data);
+ }
+ } else if (Data.isCommon()) {
+ // Common symbols are encoded with the size in the address
+ // field, and their alignment in the flags.
+ Address = Data.getCommonSize();
+
+ // Common alignment is packed into the 'desc' bits.
+ if (unsigned Align = Data.getCommonAlignment()) {
+ unsigned Log2Size = Log2_32(Align);
+ assert((1U << Log2Size) == Align && "Invalid 'common' alignment!");
+ if (Log2Size > 15)
+ report_fatal_error("invalid 'common' alignment '" +
+ Twine(Align) + "'");
+ // FIXME: Keep this mask with the SymbolFlags enumeration.
+ Flags = (Flags & 0xF0FF) | (Log2Size << 8);
+ }
+ }
+
+ // struct nlist (12 bytes)
+
+ Write32(MSD.StringIndex);
+ Write8(Type);
+ Write8(MSD.SectionIndex);
+
+ // The Mach-O streamer uses the lowest 16-bits of the flags for the 'desc'
+ // value.
+ Write16(Flags);
+ if (Is64Bit)
+ Write64(Address);
+ else
+ Write32(Address);
+ }
+
+ // FIXME: We really need to improve the relocation validation. Basically, we
+ // want to implement a separate computation which evaluates the relocation
+ // entry as the linker would, and verifies that the resultant fixup value is
+ // exactly what the encoder wanted. This will catch several classes of
+ // problems:
+ //
+ // - Relocation entry bugs, the two algorithms are unlikely to have the same
+ // exact bug.
+ //
+ // - Relaxation issues, where we forget to relax something.
+ //
+ // - Input errors, where something cannot be correctly encoded. 'as' allows
+ // these through in many cases.
+
+ void RecordX86_64Relocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ unsigned IsPCRel = isFixupKindPCRel(Fixup.getKind());
+ unsigned IsRIPRel = isFixupKindRIPRel(Fixup.getKind());
+ unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
+
+ // See <reloc.h>.
+ uint32_t FixupOffset =
+ Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
+ uint32_t FixupAddress =
+ Layout.getFragmentAddress(Fragment) + Fixup.getOffset();
+ int64_t Value = 0;
+ unsigned Index = 0;
+ unsigned IsExtern = 0;
+ unsigned Type = 0;
+
+ Value = Target.getConstant();
+
+ if (IsPCRel) {
+ // Compensate for the relocation offset, Darwin x86_64 relocations only
+ // have the addend and appear to have attempted to define it to be the
+ // actual expression addend without the PCrel bias. However, instructions
+ // with data following the relocation are not accomodated for (see comment
+ // below regarding SIGNED{1,2,4}), so it isn't exactly that either.
+ Value += 1LL << Log2Size;
+ }
+
+ if (Target.isAbsolute()) { // constant
+ // SymbolNum of 0 indicates the absolute section.
+ Type = RIT_X86_64_Unsigned;
+ Index = 0;
+
+ // FIXME: I believe this is broken, I don't think the linker can
+ // understand it. I think it would require a local relocation, but I'm not
+ // sure if that would work either. The official way to get an absolute
+ // PCrel relocation is to use an absolute symbol (which we don't support
+ // yet).
+ if (IsPCRel) {
+ IsExtern = 1;
+ Type = RIT_X86_64_Branch;
+ }
+ } else if (Target.getSymB()) { // A - B + constant
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ MCSymbolData &A_SD = Asm.getSymbolData(*A);
+ const MCSymbolData *A_Base = Asm.getAtom(Layout, &A_SD);
+
+ const MCSymbol *B = &Target.getSymB()->getSymbol();
+ MCSymbolData &B_SD = Asm.getSymbolData(*B);
+ const MCSymbolData *B_Base = Asm.getAtom(Layout, &B_SD);
+
+ // Neither symbol can be modified.
+ if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None ||
+ Target.getSymB()->getKind() != MCSymbolRefExpr::VK_None)
+ report_fatal_error("unsupported relocation of modified symbol");
+
+ // We don't support PCrel relocations of differences. Darwin 'as' doesn't
+ // implement most of these correctly.
+ if (IsPCRel)
+ report_fatal_error("unsupported pc-relative relocation of difference");
+
+ // We don't currently support any situation where one or both of the
+ // symbols would require a local relocation. This is almost certainly
+ // unused and may not be possible to encode correctly.
+ if (!A_Base || !B_Base)
+ report_fatal_error("unsupported local relocations in difference");
+
+ // Darwin 'as' doesn't emit correct relocations for this (it ends up with
+ // a single SIGNED relocation); reject it for now.
+ if (A_Base == B_Base)
+ report_fatal_error("unsupported relocation with identical base");
+
+ Value += Layout.getSymbolAddress(&A_SD) - Layout.getSymbolAddress(A_Base);
+ Value -= Layout.getSymbolAddress(&B_SD) - Layout.getSymbolAddress(B_Base);
+
+ Index = A_Base->getIndex();
+ IsExtern = 1;
+ Type = RIT_X86_64_Unsigned;
+
+ MachRelocationEntry MRE;
+ MRE.Word0 = FixupOffset;
+ MRE.Word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (IsExtern << 27) |
+ (Type << 28));
+ Relocations[Fragment->getParent()].push_back(MRE);
+
+ Index = B_Base->getIndex();
+ IsExtern = 1;
+ Type = RIT_X86_64_Subtractor;
+ } else {
+ const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
+ MCSymbolData &SD = Asm.getSymbolData(*Symbol);
+ const MCSymbolData *Base = Asm.getAtom(Layout, &SD);
+
+ // Relocations inside debug sections always use local relocations when
+ // possible. This seems to be done because the debugger doesn't fully
+ // understand x86_64 relocation entries, and expects to find values that
+ // have already been fixed up.
+ if (Symbol->isInSection()) {
+ const MCSectionMachO &Section = static_cast<const MCSectionMachO&>(
+ Fragment->getParent()->getSection());
+ if (Section.hasAttribute(MCSectionMachO::S_ATTR_DEBUG))
+ Base = 0;
+ }
+
+ // x86_64 almost always uses external relocations, except when there is no
+ // symbol to use as a base address (a local symbol with no preceeding
+ // non-local symbol).
+ if (Base) {
+ Index = Base->getIndex();
+ IsExtern = 1;
+
+ // Add the local offset, if needed.
+ if (Base != &SD)
+ Value += Layout.getSymbolAddress(&SD) - Layout.getSymbolAddress(Base);
+ } else if (Symbol->isInSection()) {
+ // The index is the section ordinal (1-based).
+ Index = SD.getFragment()->getParent()->getOrdinal() + 1;
+ IsExtern = 0;
+ Value += Layout.getSymbolAddress(&SD);
+
+ if (IsPCRel)
+ Value -= FixupAddress + (1 << Log2Size);
+ } else {
+ report_fatal_error("unsupported relocation of undefined symbol '" +
+ Symbol->getName() + "'");
+ }
+
+ MCSymbolRefExpr::VariantKind Modifier = Target.getSymA()->getKind();
+ if (IsPCRel) {
+ if (IsRIPRel) {
+ if (Modifier == MCSymbolRefExpr::VK_GOTPCREL) {
+ // x86_64 distinguishes movq foo at GOTPCREL so that the linker can
+ // rewrite the movq to an leaq at link time if the symbol ends up in
+ // the same linkage unit.
+ if (unsigned(Fixup.getKind()) == X86::reloc_riprel_4byte_movq_load)
+ Type = RIT_X86_64_GOTLoad;
+ else
+ Type = RIT_X86_64_GOT;
+ } else if (Modifier == MCSymbolRefExpr::VK_TLVP) {
+ Type = RIT_X86_64_TLV;
+ } else if (Modifier != MCSymbolRefExpr::VK_None) {
+ report_fatal_error("unsupported symbol modifier in relocation");
+ } else {
+ Type = RIT_X86_64_Signed;
+
+ // The Darwin x86_64 relocation format has a problem where it cannot
+ // encode an address (L<foo> + <constant>) which is outside the atom
+ // containing L<foo>. Generally, this shouldn't occur but it does
+ // happen when we have a RIPrel instruction with data following the
+ // relocation entry (e.g., movb $012, L0(%rip)). Even with the PCrel
+ // adjustment Darwin x86_64 uses, the offset is still negative and
+ // the linker has no way to recognize this.
+ //
+ // To work around this, Darwin uses several special relocation types
+ // to indicate the offsets. However, the specification or
+ // implementation of these seems to also be incomplete; they should
+ // adjust the addend as well based on the actual encoded instruction
+ // (the additional bias), but instead appear to just look at the
+ // final offset.
+ switch (-(Target.getConstant() + (1LL << Log2Size))) {
+ case 1: Type = RIT_X86_64_Signed1; break;
+ case 2: Type = RIT_X86_64_Signed2; break;
+ case 4: Type = RIT_X86_64_Signed4; break;
+ }
+ }
+ } else {
+ if (Modifier != MCSymbolRefExpr::VK_None)
+ report_fatal_error("unsupported symbol modifier in branch "
+ "relocation");
+
+ Type = RIT_X86_64_Branch;
+ }
+ } else {
+ if (Modifier == MCSymbolRefExpr::VK_GOT) {
+ Type = RIT_X86_64_GOT;
+ } else if (Modifier == MCSymbolRefExpr::VK_GOTPCREL) {
+ // GOTPCREL is allowed as a modifier on non-PCrel instructions, in
+ // which case all we do is set the PCrel bit in the relocation entry;
+ // this is used with exception handling, for example. The source is
+ // required to include any necessary offset directly.
+ Type = RIT_X86_64_GOT;
+ IsPCRel = 1;
+ } else if (Modifier == MCSymbolRefExpr::VK_TLVP) {
+ report_fatal_error("TLVP symbol modifier should have been rip-rel");
+ } else if (Modifier != MCSymbolRefExpr::VK_None)
+ report_fatal_error("unsupported symbol modifier in relocation");
+ else
+ Type = RIT_X86_64_Unsigned;
+ }
+ }
+
+ // x86_64 always writes custom values into the fixups.
+ FixedValue = Value;
+
+ // struct relocation_info (8 bytes)
+ MachRelocationEntry MRE;
+ MRE.Word0 = FixupOffset;
+ MRE.Word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (IsExtern << 27) |
+ (Type << 28));
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ void RecordScatteredRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
+ unsigned IsPCRel = isFixupKindPCRel(Fixup.getKind());
+ unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
+ unsigned Type = RIT_Vanilla;
+
+ // See <reloc.h>.
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ MCSymbolData *A_SD = &Asm.getSymbolData(*A);
+
+ if (!A_SD->getFragment())
+ report_fatal_error("symbol '" + A->getName() +
+ "' can not be undefined in a subtraction expression");
+
+ uint32_t Value = Layout.getSymbolAddress(A_SD);
+ uint32_t Value2 = 0;
+
+ if (const MCSymbolRefExpr *B = Target.getSymB()) {
+ MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
+
+ if (!B_SD->getFragment())
+ report_fatal_error("symbol '" + B->getSymbol().getName() +
+ "' can not be undefined in a subtraction expression");
+
+ // Select the appropriate difference relocation type.
+ //
+ // Note that there is no longer any semantic difference between these two
+ // relocation types from the linkers point of view, this is done solely
+ // for pedantic compatibility with 'as'.
+ Type = A_SD->isExternal() ? RIT_Difference : RIT_LocalDifference;
+ Value2 = Layout.getSymbolAddress(B_SD);
+ }
+
+ // Relocations are written out in reverse order, so the PAIR comes first.
+ if (Type == RIT_Difference || Type == RIT_LocalDifference) {
+ MachRelocationEntry MRE;
+ MRE.Word0 = ((0 << 0) |
+ (RIT_Pair << 24) |
+ (Log2Size << 28) |
+ (IsPCRel << 30) |
+ RF_Scattered);
+ MRE.Word1 = Value2;
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ MachRelocationEntry MRE;
+ MRE.Word0 = ((FixupOffset << 0) |
+ (Type << 24) |
+ (Log2Size << 28) |
+ (IsPCRel << 30) |
+ RF_Scattered);
+ MRE.Word1 = Value;
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ void RecordTLVPRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ assert(Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP &&
+ !Is64Bit &&
+ "Should only be called with a 32-bit TLVP relocation!");
+
+ unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
+ uint32_t Value = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
+ unsigned IsPCRel = 0;
+
+ // Get the symbol data.
+ MCSymbolData *SD_A = &Asm.getSymbolData(Target.getSymA()->getSymbol());
+ unsigned Index = SD_A->getIndex();
+
+ // We're only going to have a second symbol in pic mode and it'll be a
+ // subtraction from the picbase. For 32-bit pic the addend is the difference
+ // between the picbase and the next address. For 32-bit static the addend
+ // is zero.
+ if (Target.getSymB()) {
+ // If this is a subtraction then we're pcrel.
+ uint32_t FixupAddress =
+ Layout.getFragmentAddress(Fragment) + Fixup.getOffset();
+ MCSymbolData *SD_B = &Asm.getSymbolData(Target.getSymB()->getSymbol());
+ IsPCRel = 1;
+ FixedValue = (FixupAddress - Layout.getSymbolAddress(SD_B) +
+ Target.getConstant());
+ FixedValue += 1ULL << Log2Size;
+ } else {
+ FixedValue = 0;
+ }
+
+ // struct relocation_info (8 bytes)
+ MachRelocationEntry MRE;
+ MRE.Word0 = Value;
+ MRE.Word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (1 << 27) | // Extern
+ (RIT_TLV << 28)); // Type
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, uint64_t &FixedValue) {
+ if (Is64Bit) {
+ RecordX86_64Relocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
+ return;
+ }
+
+ unsigned IsPCRel = isFixupKindPCRel(Fixup.getKind());
+ unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
+
+ // If this is a 32-bit TLVP reloc it's handled a bit differently.
+ if (Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP) {
+ RecordTLVPRelocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
+ return;
+ }
+
+ // If this is a difference or a defined symbol plus an offset, then we need
+ // a scattered relocation entry.
+ // Differences always require scattered relocations.
+ if (Target.getSymB())
+ return RecordScatteredRelocation(Asm, Layout, Fragment, Fixup,
+ Target, FixedValue);
+
+ // Get the symbol data, if any.
+ MCSymbolData *SD = 0;
+ if (Target.getSymA())
+ SD = &Asm.getSymbolData(Target.getSymA()->getSymbol());
+
+ // If this is an internal relocation with an offset, it also needs a
+ // scattered relocation entry.
+ uint32_t Offset = Target.getConstant();
+ if (IsPCRel)
+ Offset += 1 << Log2Size;
+ if (Offset && SD && !doesSymbolRequireExternRelocation(SD))
+ return RecordScatteredRelocation(Asm, Layout, Fragment, Fixup,
+ Target, FixedValue);
+
+ // See <reloc.h>.
+ uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
+ unsigned Index = 0;
+ unsigned IsExtern = 0;
+ unsigned Type = 0;
+
+ if (Target.isAbsolute()) { // constant
+ // SymbolNum of 0 indicates the absolute section.
+ //
+ // FIXME: Currently, these are never generated (see code below). I cannot
+ // find a case where they are actually emitted.
+ Type = RIT_Vanilla;
+ } else {
+ // Check whether we need an external or internal relocation.
+ if (doesSymbolRequireExternRelocation(SD)) {
+ IsExtern = 1;
+ Index = SD->getIndex();
+ // For external relocations, make sure to offset the fixup value to
+ // compensate for the addend of the symbol address, if it was
+ // undefined. This occurs with weak definitions, for example.
+ if (!SD->Symbol->isUndefined())
+ FixedValue -= Layout.getSymbolAddress(SD);
+ } else {
+ // The index is the section ordinal (1-based).
+ Index = SD->getFragment()->getParent()->getOrdinal() + 1;
+ }
+
+ Type = RIT_Vanilla;
+ }
+
+ // struct relocation_info (8 bytes)
+ MachRelocationEntry MRE;
+ MRE.Word0 = FixupOffset;
+ MRE.Word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (IsExtern << 27) |
+ (Type << 28));
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
+ void BindIndirectSymbols(MCAssembler &Asm) {
+ // This is the point where 'as' creates actual symbols for indirect symbols
+ // (in the following two passes). It would be easier for us to do this
+ // sooner when we see the attribute, but that makes getting the order in the
+ // symbol table much more complicated than it is worth.
+ //
+ // FIXME: Revisit this when the dust settles.
+
+ // Bind non lazy symbol pointers first.
+ unsigned IndirectIndex = 0;
+ for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(),
+ ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) {
+ const MCSectionMachO &Section =
+ cast<MCSectionMachO>(it->SectionData->getSection());
+
+ if (Section.getType() != MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS)
+ continue;
+
+ // Initialize the section indirect symbol base, if necessary.
+ if (!IndirectSymBase.count(it->SectionData))
+ IndirectSymBase[it->SectionData] = IndirectIndex;
+
+ Asm.getOrCreateSymbolData(*it->Symbol);
+ }
+
+ // Then lazy symbol pointers and symbol stubs.
+ IndirectIndex = 0;
+ for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(),
+ ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) {
+ const MCSectionMachO &Section =
+ cast<MCSectionMachO>(it->SectionData->getSection());
+
+ if (Section.getType() != MCSectionMachO::S_LAZY_SYMBOL_POINTERS &&
+ Section.getType() != MCSectionMachO::S_SYMBOL_STUBS)
+ continue;
+
+ // Initialize the section indirect symbol base, if necessary.
+ if (!IndirectSymBase.count(it->SectionData))
+ IndirectSymBase[it->SectionData] = IndirectIndex;
+
+ // Set the symbol type to undefined lazy, but only on construction.
+ //
+ // FIXME: Do not hardcode.
+ bool Created;
+ MCSymbolData &Entry = Asm.getOrCreateSymbolData(*it->Symbol, &Created);
+ if (Created)
+ Entry.setFlags(Entry.getFlags() | 0x0001);
+ }
+ }
+
+ /// ComputeSymbolTable - Compute the symbol table data
+ ///
+ /// \param StringTable [out] - The string table data.
+ /// \param StringIndexMap [out] - Map from symbol names to offsets in the
+ /// string table.
+ void ComputeSymbolTable(MCAssembler &Asm, SmallString<256> &StringTable,
+ std::vector<MachSymbolData> &LocalSymbolData,
+ std::vector<MachSymbolData> &ExternalSymbolData,
+ std::vector<MachSymbolData> &UndefinedSymbolData) {
+ // Build section lookup table.
+ DenseMap<const MCSection*, uint8_t> SectionIndexMap;
+ unsigned Index = 1;
+ for (MCAssembler::iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it, ++Index)
+ SectionIndexMap[&it->getSection()] = Index;
+ assert(Index <= 256 && "Too many sections!");
+
+ // Index 0 is always the empty string.
+ StringMap<uint64_t> StringIndexMap;
+ StringTable += '\x00';
+
+ // Build the symbol arrays and the string table, but only for non-local
+ // symbols.
+ //
+ // The particular order that we collect the symbols and create the string
+ // table, then sort the symbols is chosen to match 'as'. Even though it
+ // doesn't matter for correctness, this is important for letting us diff .o
+ // files.
+ for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
+ ie = Asm.symbol_end(); it != ie; ++it) {
+ const MCSymbol &Symbol = it->getSymbol();
+
+ // Ignore non-linker visible symbols.
+ if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
+ continue;
+
+ if (!it->isExternal() && !Symbol.isUndefined())
+ continue;
+
+ uint64_t &Entry = StringIndexMap[Symbol.getName()];
+ if (!Entry) {
+ Entry = StringTable.size();
+ StringTable += Symbol.getName();
+ StringTable += '\x00';
+ }
+
+ MachSymbolData MSD;
+ MSD.SymbolData = it;
+ MSD.StringIndex = Entry;
+
+ if (Symbol.isUndefined()) {
+ MSD.SectionIndex = 0;
+ UndefinedSymbolData.push_back(MSD);
+ } else if (Symbol.isAbsolute()) {
+ MSD.SectionIndex = 0;
+ ExternalSymbolData.push_back(MSD);
+ } else {
+ MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
+ assert(MSD.SectionIndex && "Invalid section index!");
+ ExternalSymbolData.push_back(MSD);
+ }
+ }
+
+ // Now add the data for local symbols.
+ for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
+ ie = Asm.symbol_end(); it != ie; ++it) {
+ const MCSymbol &Symbol = it->getSymbol();
+
+ // Ignore non-linker visible symbols.
+ if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
+ continue;
+
+ if (it->isExternal() || Symbol.isUndefined())
+ continue;
+
+ uint64_t &Entry = StringIndexMap[Symbol.getName()];
+ if (!Entry) {
+ Entry = StringTable.size();
+ StringTable += Symbol.getName();
+ StringTable += '\x00';
+ }
+
+ MachSymbolData MSD;
+ MSD.SymbolData = it;
+ MSD.StringIndex = Entry;
+
+ if (Symbol.isAbsolute()) {
+ MSD.SectionIndex = 0;
+ LocalSymbolData.push_back(MSD);
+ } else {
+ MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
+ assert(MSD.SectionIndex && "Invalid section index!");
+ LocalSymbolData.push_back(MSD);
+ }
+ }
+
+ // External and undefined symbols are required to be in lexicographic order.
+ std::sort(ExternalSymbolData.begin(), ExternalSymbolData.end());
+ std::sort(UndefinedSymbolData.begin(), UndefinedSymbolData.end());
+
+ // Set the symbol indices.
+ Index = 0;
+ for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i)
+ LocalSymbolData[i].SymbolData->setIndex(Index++);
+ for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i)
+ ExternalSymbolData[i].SymbolData->setIndex(Index++);
+ for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
+ UndefinedSymbolData[i].SymbolData->setIndex(Index++);
+
+ // The string table is padded to a multiple of 4.
+ while (StringTable.size() % 4)
+ StringTable += '\x00';
+ }
+
+ void ExecutePostLayoutBinding(MCAssembler &Asm) {
+ // Create symbol data for any indirect symbols.
+ BindIndirectSymbols(Asm);
+
+ // Compute symbol table information and bind symbol indices.
+ ComputeSymbolTable(Asm, StringTable, LocalSymbolData, ExternalSymbolData,
+ UndefinedSymbolData);
+ }
+
+ void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout) {
+ unsigned NumSections = Asm.size();
+
+ // The section data starts after the header, the segment load command (and
+ // section headers) and the symbol table.
+ unsigned NumLoadCommands = 1;
+ uint64_t LoadCommandsSize = Is64Bit ?
+ SegmentLoadCommand64Size + NumSections * Section64Size :
+ SegmentLoadCommand32Size + NumSections * Section32Size;
+
+ // Add the symbol table load command sizes, if used.
+ unsigned NumSymbols = LocalSymbolData.size() + ExternalSymbolData.size() +
+ UndefinedSymbolData.size();
+ if (NumSymbols) {
+ NumLoadCommands += 2;
+ LoadCommandsSize += SymtabLoadCommandSize + DysymtabLoadCommandSize;
+ }
+
+ // Compute the total size of the section data, as well as its file size and
+ // vm size.
+ uint64_t SectionDataStart = (Is64Bit ? Header64Size : Header32Size)
+ + LoadCommandsSize;
+ uint64_t SectionDataSize = 0;
+ uint64_t SectionDataFileSize = 0;
+ uint64_t VMSize = 0;
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it) {
+ const MCSectionData &SD = *it;
+ uint64_t Address = Layout.getSectionAddress(&SD);
+ uint64_t Size = Layout.getSectionSize(&SD);
+ uint64_t FileSize = Layout.getSectionFileSize(&SD);
+
+ VMSize = std::max(VMSize, Address + Size);
+
+ if (Asm.getBackend().isVirtualSection(SD.getSection()))
+ continue;
+
+ SectionDataSize = std::max(SectionDataSize, Address + Size);
+ SectionDataFileSize = std::max(SectionDataFileSize, Address + FileSize);
+ }
+
+ // The section data is padded to 4 bytes.
+ //
+ // FIXME: Is this machine dependent?
+ unsigned SectionDataPadding = OffsetToAlignment(SectionDataFileSize, 4);
+ SectionDataFileSize += SectionDataPadding;
+
+ // Write the prolog, starting with the header and load command...
+ WriteHeader(NumLoadCommands, LoadCommandsSize,
+ Asm.getSubsectionsViaSymbols());
+ WriteSegmentLoadCommand(NumSections, VMSize,
+ SectionDataStart, SectionDataSize);
+
+ // ... and then the section headers.
+ uint64_t RelocTableEnd = SectionDataStart + SectionDataFileSize;
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it) {
+ std::vector<MachRelocationEntry> &Relocs = Relocations[it];
+ unsigned NumRelocs = Relocs.size();
+ uint64_t SectionStart = SectionDataStart + Layout.getSectionAddress(it);
+ WriteSection(Asm, Layout, *it, SectionStart, RelocTableEnd, NumRelocs);
+ RelocTableEnd += NumRelocs * RelocationInfoSize;
+ }
+
+ // Write the symbol table load command, if used.
+ if (NumSymbols) {
+ unsigned FirstLocalSymbol = 0;
+ unsigned NumLocalSymbols = LocalSymbolData.size();
+ unsigned FirstExternalSymbol = FirstLocalSymbol + NumLocalSymbols;
+ unsigned NumExternalSymbols = ExternalSymbolData.size();
+ unsigned FirstUndefinedSymbol = FirstExternalSymbol + NumExternalSymbols;
+ unsigned NumUndefinedSymbols = UndefinedSymbolData.size();
+ unsigned NumIndirectSymbols = Asm.indirect_symbol_size();
+ unsigned NumSymTabSymbols =
+ NumLocalSymbols + NumExternalSymbols + NumUndefinedSymbols;
+ uint64_t IndirectSymbolSize = NumIndirectSymbols * 4;
+ uint64_t IndirectSymbolOffset = 0;
+
+ // If used, the indirect symbols are written after the section data.
+ if (NumIndirectSymbols)
+ IndirectSymbolOffset = RelocTableEnd;
+
+ // The symbol table is written after the indirect symbol data.
+ uint64_t SymbolTableOffset = RelocTableEnd + IndirectSymbolSize;
+
+ // The string table is written after symbol table.
+ uint64_t StringTableOffset =
+ SymbolTableOffset + NumSymTabSymbols * (Is64Bit ? Nlist64Size :
+ Nlist32Size);
+ WriteSymtabLoadCommand(SymbolTableOffset, NumSymTabSymbols,
+ StringTableOffset, StringTable.size());
+
+ WriteDysymtabLoadCommand(FirstLocalSymbol, NumLocalSymbols,
+ FirstExternalSymbol, NumExternalSymbols,
+ FirstUndefinedSymbol, NumUndefinedSymbols,
+ IndirectSymbolOffset, NumIndirectSymbols);
+ }
+
+ // Write the actual section data.
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it)
+ Asm.WriteSectionData(it, Layout, Writer);
+
+ // Write the extra padding.
+ WriteZeros(SectionDataPadding);
+
+ // Write the relocation entries.
+ for (MCAssembler::const_iterator it = Asm.begin(),
+ ie = Asm.end(); it != ie; ++it) {
+ // Write the section relocation entries, in reverse order to match 'as'
+ // (approximately, the exact algorithm is more complicated than this).
+ std::vector<MachRelocationEntry> &Relocs = Relocations[it];
+ for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
+ Write32(Relocs[e - i - 1].Word0);
+ Write32(Relocs[e - i - 1].Word1);
+ }
+ }
+
+ // Write the symbol table data, if used.
+ if (NumSymbols) {
+ // Write the indirect symbol entries.
+ for (MCAssembler::const_indirect_symbol_iterator
+ it = Asm.indirect_symbol_begin(),
+ ie = Asm.indirect_symbol_end(); it != ie; ++it) {
+ // Indirect symbols in the non lazy symbol pointer section have some
+ // special handling.
+ const MCSectionMachO &Section =
+ static_cast<const MCSectionMachO&>(it->SectionData->getSection());
+ if (Section.getType() == MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS) {
+ // If this symbol is defined and internal, mark it as such.
+ if (it->Symbol->isDefined() &&
+ !Asm.getSymbolData(*it->Symbol).isExternal()) {
+ uint32_t Flags = ISF_Local;
+ if (it->Symbol->isAbsolute())
+ Flags |= ISF_Absolute;
+ Write32(Flags);
+ continue;
+ }
+ }
+
+ Write32(Asm.getSymbolData(*it->Symbol).getIndex());
+ }
+
+ // FIXME: Check that offsets match computed ones.
+
+ // Write the symbol table entries.
+ for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i)
+ WriteNlist(LocalSymbolData[i], Layout);
+ for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i)
+ WriteNlist(ExternalSymbolData[i], Layout);
+ for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
+ WriteNlist(UndefinedSymbolData[i], Layout);
+
+ // Write the string table.
+ OS << StringTable.str();
+ }
+ }
+};
+
+}
+
+MachObjectWriter::MachObjectWriter(raw_ostream &OS,
+ bool Is64Bit,
+ bool IsLittleEndian)
+ : MCObjectWriter(OS, IsLittleEndian)
+{
+ Impl = new MachObjectWriterImpl(this, Is64Bit);
+}
+
+MachObjectWriter::~MachObjectWriter() {
+ delete (MachObjectWriterImpl*) Impl;
+}
+
+void MachObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm) {
+ ((MachObjectWriterImpl*) Impl)->ExecutePostLayoutBinding(Asm);
+}
+
+void MachObjectWriter::RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ ((MachObjectWriterImpl*) Impl)->RecordRelocation(Asm, Layout, Fragment, Fixup,
+ Target, FixedValue);
+}
+
+void MachObjectWriter::WriteObject(const MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+ ((MachObjectWriterImpl*) Impl)->WriteObject(Asm, Layout);
+}
diff --git a/libclamav/c++/llvm/lib/MC/Makefile b/libclamav/c++/llvm/lib/MC/Makefile
index a661fa6..bf8b7c0 100644
--- a/libclamav/c++/llvm/lib/MC/Makefile
+++ b/libclamav/c++/llvm/lib/MC/Makefile
@@ -10,7 +10,7 @@
LEVEL = ../..
LIBRARYNAME = LLVMMC
BUILD_ARCHIVE := 1
-PARALLEL_DIRS := MCParser
+PARALLEL_DIRS := MCParser MCDisassembler
include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/MC/TargetAsmBackend.cpp b/libclamav/c++/llvm/lib/MC/TargetAsmBackend.cpp
index 918d272..bbfddbe 100644
--- a/libclamav/c++/llvm/lib/MC/TargetAsmBackend.cpp
+++ b/libclamav/c++/llvm/lib/MC/TargetAsmBackend.cpp
@@ -11,7 +11,10 @@
using namespace llvm;
TargetAsmBackend::TargetAsmBackend(const Target &T)
- : TheTarget(T)
+ : TheTarget(T),
+ HasAbsolutizedSet(false),
+ HasReliableSymbolDifference(false),
+ HasScatteredSymbols(false)
{
}
diff --git a/libclamav/c++/llvm/lib/MC/WinCOFFObjectWriter.cpp b/libclamav/c++/llvm/lib/MC/WinCOFFObjectWriter.cpp
new file mode 100644
index 0000000..eeb2b96
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/WinCOFFObjectWriter.cpp
@@ -0,0 +1,774 @@
+//===-- llvm/MC/WinCOFFObjectWriter.cpp -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of a Win32 COFF object file writer.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "WinCOFFObjectWriter"
+
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCSectionCOFF.h"
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+
+#include "llvm/Support/COFF.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include "llvm/System/TimeValue.h"
+
+#include "../Target/X86/X86FixupKinds.h"
+
+#include <cstdio>
+
+using namespace llvm;
+
+namespace {
+typedef llvm::SmallString<COFF::NameSize> name;
+
+enum AuxiliaryType {
+ ATFunctionDefinition,
+ ATbfAndefSymbol,
+ ATWeakExternal,
+ ATFile,
+ ATSectionDefinition
+};
+
+struct AuxSymbol {
+ AuxiliaryType AuxType;
+ COFF::Auxiliary Aux;
+};
+
+class COFFSymbol {
+public:
+ COFF::symbol Data;
+
+ typedef llvm::SmallVector<AuxSymbol, 1> AuxiliarySymbols;
+
+ name Name;
+ size_t Index;
+ AuxiliarySymbols Aux;
+ COFFSymbol *Other;
+
+ MCSymbolData const *MCData;
+
+ COFFSymbol(llvm::StringRef name, size_t index);
+ size_t size() const;
+ void set_name_offset(uint32_t Offset);
+};
+
+// This class contains staging data for a COFF relocation entry.
+struct COFFRelocation {
+ COFF::relocation Data;
+ COFFSymbol *Symb;
+
+ COFFRelocation() : Symb(NULL) {}
+ static size_t size() { return COFF::RelocationSize; }
+};
+
+typedef std::vector<COFFRelocation> relocations;
+
+class COFFSection {
+public:
+ COFF::section Header;
+
+ std::string Name;
+ size_t Number;
+ MCSectionData const *MCData;
+ COFFSymbol *Symb;
+ relocations Relocations;
+
+ COFFSection(llvm::StringRef name, size_t Index);
+ static size_t size();
+};
+
+// This class holds the COFF string table.
+class StringTable {
+ typedef llvm::StringMap<size_t> map;
+ map Map;
+
+ void update_length();
+public:
+ std::vector<char> Data;
+
+ StringTable();
+ size_t size() const;
+ size_t insert(llvm::StringRef String);
+};
+
+class WinCOFFObjectWriter : public MCObjectWriter {
+public:
+
+ typedef std::vector<COFFSymbol*> symbols;
+ typedef std::vector<COFFSection*> sections;
+
+ typedef StringMap<COFFSymbol *> name_symbol_map;
+ typedef StringMap<COFFSection *> name_section_map;
+
+ typedef DenseMap<MCSymbolData const *, COFFSymbol *> symbol_map;
+ typedef DenseMap<MCSectionData const *, COFFSection *> section_map;
+
+ // Root level file contents.
+ bool Is64Bit;
+ COFF::header Header;
+ sections Sections;
+ symbols Symbols;
+ StringTable Strings;
+
+ // Maps used during object file creation.
+ section_map SectionMap;
+ symbol_map SymbolMap;
+
+ WinCOFFObjectWriter(raw_ostream &OS, bool is64Bit);
+ ~WinCOFFObjectWriter();
+
+ COFFSymbol *createSymbol(llvm::StringRef Name);
+ COFFSection *createSection(llvm::StringRef Name);
+
+ void InitCOFFEntity(COFFSymbol &Symbol);
+ void InitCOFFEntity(COFFSection &Section);
+
+ template <typename object_t, typename list_t>
+ object_t *createCOFFEntity(llvm::StringRef Name, list_t &List);
+
+ void DefineSection(MCSectionData const &SectionData);
+ void DefineSymbol(MCSymbolData const &SymbolData, MCAssembler &Assembler);
+
+ bool ExportSection(COFFSection *S);
+ bool ExportSymbol(MCSymbolData const &SymbolData, MCAssembler &Asm);
+
+ // Entity writing methods.
+
+ void WriteFileHeader(const COFF::header &Header);
+ void WriteSymbol(const COFFSymbol *S);
+ void WriteAuxiliarySymbols(const COFFSymbol::AuxiliarySymbols &S);
+ void WriteSectionHeader(const COFF::section &S);
+ void WriteRelocation(const COFF::relocation &R);
+
+ // MCObjectWriter interface implementation.
+
+ void ExecutePostLayoutBinding(MCAssembler &Asm);
+
+ void RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup,
+ MCValue Target,
+ uint64_t &FixedValue);
+
+ void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout);
+};
+}
+
+static inline void write_uint32_le(void *Data, uint32_t const &Value) {
+ uint8_t *Ptr = reinterpret_cast<uint8_t *>(Data);
+ Ptr[0] = (Value & 0x000000FF) >> 0;
+ Ptr[1] = (Value & 0x0000FF00) >> 8;
+ Ptr[2] = (Value & 0x00FF0000) >> 16;
+ Ptr[3] = (Value & 0xFF000000) >> 24;
+}
+
+static inline void write_uint16_le(void *Data, uint16_t const &Value) {
+ uint8_t *Ptr = reinterpret_cast<uint8_t *>(Data);
+ Ptr[0] = (Value & 0x00FF) >> 0;
+ Ptr[1] = (Value & 0xFF00) >> 8;
+}
+
+static inline void write_uint8_le(void *Data, uint8_t const &Value) {
+ uint8_t *Ptr = reinterpret_cast<uint8_t *>(Data);
+ Ptr[0] = (Value & 0xFF) >> 0;
+}
+
+//------------------------------------------------------------------------------
+// Symbol class implementation
+
+COFFSymbol::COFFSymbol(llvm::StringRef name, size_t index)
+ : Name(name.begin(), name.end()), Index(-1)
+ , Other(NULL), MCData(NULL) {
+ memset(&Data, 0, sizeof(Data));
+}
+
+size_t COFFSymbol::size() const {
+ return COFF::SymbolSize + (Data.NumberOfAuxSymbols * COFF::SymbolSize);
+}
+
+// In the case that the name does not fit within 8 bytes, the offset
+// into the string table is stored in the last 4 bytes instead, leaving
+// the first 4 bytes as 0.
+void COFFSymbol::set_name_offset(uint32_t Offset) {
+ write_uint32_le(Data.Name + 0, 0);
+ write_uint32_le(Data.Name + 4, Offset);
+}
+
+//------------------------------------------------------------------------------
+// Section class implementation
+
+COFFSection::COFFSection(llvm::StringRef name, size_t Index)
+ : Name(name), Number(Index + 1)
+ , MCData(NULL), Symb(NULL) {
+ memset(&Header, 0, sizeof(Header));
+}
+
+size_t COFFSection::size() {
+ return COFF::SectionSize;
+}
+
+//------------------------------------------------------------------------------
+// StringTable class implementation
+
+/// Write the length of the string table into Data.
+/// The length of the string table includes uint32 length header.
+void StringTable::update_length() {
+ write_uint32_le(&Data.front(), Data.size());
+}
+
+StringTable::StringTable() {
+ // The string table data begins with the length of the entire string table
+ // including the length header. Allocate space for this header.
+ Data.resize(4);
+}
+
+size_t StringTable::size() const {
+ return Data.size();
+}
+
+/// Add String to the table iff it is not already there.
+/// @returns the index into the string table where the string is now located.
+size_t StringTable::insert(llvm::StringRef String) {
+ map::iterator i = Map.find(String);
+
+ if (i != Map.end())
+ return i->second;
+
+ size_t Offset = Data.size();
+
+ // Insert string data into string table.
+ Data.insert(Data.end(), String.begin(), String.end());
+ Data.push_back('\0');
+
+ // Put a reference to it in the map.
+ Map[String] = Offset;
+
+ // Update the internal length field.
+ update_length();
+
+ return Offset;
+}
+
+//------------------------------------------------------------------------------
+// WinCOFFObjectWriter class implementation
+
+WinCOFFObjectWriter::WinCOFFObjectWriter(raw_ostream &OS, bool is64Bit)
+ : MCObjectWriter(OS, true)
+ , Is64Bit(is64Bit) {
+ memset(&Header, 0, sizeof(Header));
+
+ Is64Bit ? Header.Machine = COFF::IMAGE_FILE_MACHINE_AMD64
+ : Header.Machine = COFF::IMAGE_FILE_MACHINE_I386;
+}
+
+WinCOFFObjectWriter::~WinCOFFObjectWriter() {
+ for (symbols::iterator I = Symbols.begin(), E = Symbols.end(); I != E; ++I)
+ delete *I;
+ for (sections::iterator I = Sections.begin(), E = Sections.end(); I != E; ++I)
+ delete *I;
+}
+
+COFFSymbol *WinCOFFObjectWriter::createSymbol(llvm::StringRef Name) {
+ return createCOFFEntity<COFFSymbol>(Name, Symbols);
+}
+
+COFFSection *WinCOFFObjectWriter::createSection(llvm::StringRef Name) {
+ return createCOFFEntity<COFFSection>(Name, Sections);
+}
+
+/// This function initializes a symbol by entering its name into the string
+/// table if it is too long to fit in the symbol table header.
+void WinCOFFObjectWriter::InitCOFFEntity(COFFSymbol &S) {
+ if (S.Name.size() > COFF::NameSize) {
+ size_t StringTableEntry = Strings.insert(S.Name.c_str());
+
+ S.set_name_offset(StringTableEntry);
+ } else
+ memcpy(S.Data.Name, S.Name.c_str(), S.Name.size());
+}
+
+/// This function initializes a section by entering its name into the string
+/// table if it is too long to fit in the section table header.
+void WinCOFFObjectWriter::InitCOFFEntity(COFFSection &S) {
+ if (S.Name.size() > COFF::NameSize) {
+ size_t StringTableEntry = Strings.insert(S.Name.c_str());
+
+ // FIXME: Why is this number 999999? This number is never mentioned in the
+ // spec. I'm assuming this is due to the printed value needing to fit into
+ // the S.Header.Name field. In which case why not 9999999 (7 9's instead of
+ // 6)? The spec does not state if this entry should be null terminated in
+ // this case, and thus this seems to be the best way to do it. I think I
+ // just solved my own FIXME...
+ if (StringTableEntry > 999999)
+ report_fatal_error("COFF string table is greater than 999999 bytes.");
+
+ sprintf(S.Header.Name, "/%d", (unsigned)StringTableEntry);
+ } else
+ memcpy(S.Header.Name, S.Name.c_str(), S.Name.size());
+}
+
+/// A template used to lookup or create a symbol/section, and initialize it if
+/// needed.
+template <typename object_t, typename list_t>
+object_t *WinCOFFObjectWriter::createCOFFEntity(llvm::StringRef Name,
+ list_t &List) {
+ object_t *Object = new object_t(Name, List.size());
+
+ InitCOFFEntity(*Object);
+
+ List.push_back(Object);
+
+ return Object;
+}
+
+/// This function takes a section data object from the assembler
+/// and creates the associated COFF section staging object.
+void WinCOFFObjectWriter::DefineSection(MCSectionData const &SectionData) {
+ // FIXME: Not sure how to verify this (at least in a debug build).
+ MCSectionCOFF const &Sec =
+ static_cast<MCSectionCOFF const &>(SectionData.getSection());
+
+ COFFSection *coff_section = createSection(Sec.getSectionName());
+ COFFSymbol *coff_symbol = createSymbol(Sec.getSectionName());
+
+ coff_section->Symb = coff_symbol;
+ coff_symbol->Data.StorageClass = COFF::IMAGE_SYM_CLASS_STATIC;
+ coff_symbol->Data.SectionNumber = coff_section->Number;
+
+ // In this case the auxiliary symbol is a Section Definition.
+ coff_symbol->Aux.resize(1);
+ memset(&coff_symbol->Aux[0], 0, sizeof(coff_symbol->Aux[0]));
+ coff_symbol->Aux[0].AuxType = ATSectionDefinition;
+ coff_symbol->Aux[0].Aux.SectionDefinition.Number = coff_section->Number;
+ coff_symbol->Aux[0].Aux.SectionDefinition.Selection = Sec.getSelection();
+
+ coff_section->Header.Characteristics = Sec.getCharacteristics();
+
+ uint32_t &Characteristics = coff_section->Header.Characteristics;
+ switch (SectionData.getAlignment()) {
+ case 1: Characteristics |= COFF::IMAGE_SCN_ALIGN_1BYTES; break;
+ case 2: Characteristics |= COFF::IMAGE_SCN_ALIGN_2BYTES; break;
+ case 4: Characteristics |= COFF::IMAGE_SCN_ALIGN_4BYTES; break;
+ case 8: Characteristics |= COFF::IMAGE_SCN_ALIGN_8BYTES; break;
+ case 16: Characteristics |= COFF::IMAGE_SCN_ALIGN_16BYTES; break;
+ case 32: Characteristics |= COFF::IMAGE_SCN_ALIGN_32BYTES; break;
+ case 64: Characteristics |= COFF::IMAGE_SCN_ALIGN_64BYTES; break;
+ case 128: Characteristics |= COFF::IMAGE_SCN_ALIGN_128BYTES; break;
+ case 256: Characteristics |= COFF::IMAGE_SCN_ALIGN_256BYTES; break;
+ case 512: Characteristics |= COFF::IMAGE_SCN_ALIGN_512BYTES; break;
+ case 1024: Characteristics |= COFF::IMAGE_SCN_ALIGN_1024BYTES; break;
+ case 2048: Characteristics |= COFF::IMAGE_SCN_ALIGN_2048BYTES; break;
+ case 4096: Characteristics |= COFF::IMAGE_SCN_ALIGN_4096BYTES; break;
+ case 8192: Characteristics |= COFF::IMAGE_SCN_ALIGN_8192BYTES; break;
+ default:
+ llvm_unreachable("unsupported section alignment");
+ }
+
+ // Bind internal COFF section to MC section.
+ coff_section->MCData = &SectionData;
+ SectionMap[&SectionData] = coff_section;
+}
+
+/// This function takes a section data object from the assembler
+/// and creates the associated COFF symbol staging object.
+void WinCOFFObjectWriter::DefineSymbol(MCSymbolData const &SymbolData,
+ MCAssembler &Assembler) {
+ COFFSymbol *coff_symbol = createSymbol(SymbolData.getSymbol().getName());
+
+ coff_symbol->Data.Type = (SymbolData.getFlags() & 0x0000FFFF) >> 0;
+ coff_symbol->Data.StorageClass = (SymbolData.getFlags() & 0x00FF0000) >> 16;
+
+ // If no storage class was specified in the streamer, define it here.
+ if (coff_symbol->Data.StorageClass == 0) {
+ bool external = SymbolData.isExternal() || (SymbolData.Fragment == NULL);
+
+ coff_symbol->Data.StorageClass =
+ external ? COFF::IMAGE_SYM_CLASS_EXTERNAL : COFF::IMAGE_SYM_CLASS_STATIC;
+ }
+
+ if (SymbolData.getFlags() & COFF::SF_WeakReference) {
+ coff_symbol->Data.StorageClass = COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL;
+
+ const MCExpr *Value = SymbolData.getSymbol().getVariableValue();
+
+ // FIXME: This assert message isn't very good.
+ assert(Value->getKind() == MCExpr::SymbolRef &&
+ "Value must be a SymbolRef!");
+
+ const MCSymbolRefExpr *SymbolRef =
+ static_cast<const MCSymbolRefExpr *>(Value);
+
+ const MCSymbolData &OtherSymbolData =
+ Assembler.getSymbolData(SymbolRef->getSymbol());
+
+ // FIXME: This assert message isn't very good.
+ assert(SymbolMap.find(&OtherSymbolData) != SymbolMap.end() &&
+ "OtherSymbolData must be in the symbol map!");
+
+ coff_symbol->Other = SymbolMap[&OtherSymbolData];
+
+ // Setup the Weak External auxiliary symbol.
+ coff_symbol->Aux.resize(1);
+ memset(&coff_symbol->Aux[0], 0, sizeof(coff_symbol->Aux[0]));
+ coff_symbol->Aux[0].AuxType = ATWeakExternal;
+ coff_symbol->Aux[0].Aux.WeakExternal.TagIndex = 0;
+ coff_symbol->Aux[0].Aux.WeakExternal.Characteristics =
+ COFF::IMAGE_WEAK_EXTERN_SEARCH_LIBRARY;
+ }
+
+ // Bind internal COFF symbol to MC symbol.
+ coff_symbol->MCData = &SymbolData;
+ SymbolMap[&SymbolData] = coff_symbol;
+}
+
+bool WinCOFFObjectWriter::ExportSection(COFFSection *S) {
+ return (S->Header.Characteristics
+ & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA) == 0;
+}
+
+bool WinCOFFObjectWriter::ExportSymbol(MCSymbolData const &SymbolData,
+ MCAssembler &Asm) {
+ // This doesn't seem to be right. Strings referred to from the .data section
+ // need symbols so they can be linked to code in the .text section right?
+
+ // return Asm.isSymbolLinkerVisible (&SymbolData);
+
+ // For now, all symbols are exported, the linker will sort it out for us.
+ return true;
+}
+
+//------------------------------------------------------------------------------
+// entity writing methods
+
+void WinCOFFObjectWriter::WriteFileHeader(const COFF::header &Header) {
+ WriteLE16(Header.Machine);
+ WriteLE16(Header.NumberOfSections);
+ WriteLE32(Header.TimeDateStamp);
+ WriteLE32(Header.PointerToSymbolTable);
+ WriteLE32(Header.NumberOfSymbols);
+ WriteLE16(Header.SizeOfOptionalHeader);
+ WriteLE16(Header.Characteristics);
+}
+
+void WinCOFFObjectWriter::WriteSymbol(const COFFSymbol *S) {
+ WriteBytes(StringRef(S->Data.Name, COFF::NameSize));
+ WriteLE32(S->Data.Value);
+ WriteLE16(S->Data.SectionNumber);
+ WriteLE16(S->Data.Type);
+ Write8(S->Data.StorageClass);
+ Write8(S->Data.NumberOfAuxSymbols);
+ WriteAuxiliarySymbols(S->Aux);
+}
+
+void WinCOFFObjectWriter::WriteAuxiliarySymbols(
+ const COFFSymbol::AuxiliarySymbols &S) {
+ for(COFFSymbol::AuxiliarySymbols::const_iterator i = S.begin(), e = S.end();
+ i != e; ++i) {
+ switch(i->AuxType) {
+ case ATFunctionDefinition:
+ WriteLE32(i->Aux.FunctionDefinition.TagIndex);
+ WriteLE32(i->Aux.FunctionDefinition.TotalSize);
+ WriteLE32(i->Aux.FunctionDefinition.PointerToLinenumber);
+ WriteLE32(i->Aux.FunctionDefinition.PointerToNextFunction);
+ WriteZeros(sizeof(i->Aux.FunctionDefinition.unused));
+ break;
+ case ATbfAndefSymbol:
+ WriteZeros(sizeof(i->Aux.bfAndefSymbol.unused1));
+ WriteLE16(i->Aux.bfAndefSymbol.Linenumber);
+ WriteZeros(sizeof(i->Aux.bfAndefSymbol.unused2));
+ WriteLE32(i->Aux.bfAndefSymbol.PointerToNextFunction);
+ WriteZeros(sizeof(i->Aux.bfAndefSymbol.unused3));
+ break;
+ case ATWeakExternal:
+ WriteLE32(i->Aux.WeakExternal.TagIndex);
+ WriteLE32(i->Aux.WeakExternal.Characteristics);
+ WriteZeros(sizeof(i->Aux.WeakExternal.unused));
+ break;
+ case ATFile:
+ WriteBytes(StringRef(reinterpret_cast<const char *>(i->Aux.File.FileName),
+ sizeof(i->Aux.File.FileName)));
+ break;
+ case ATSectionDefinition:
+ WriteLE32(i->Aux.SectionDefinition.Length);
+ WriteLE16(i->Aux.SectionDefinition.NumberOfRelocations);
+ WriteLE16(i->Aux.SectionDefinition.NumberOfLinenumbers);
+ WriteLE32(i->Aux.SectionDefinition.CheckSum);
+ WriteLE16(i->Aux.SectionDefinition.Number);
+ Write8(i->Aux.SectionDefinition.Selection);
+ WriteZeros(sizeof(i->Aux.SectionDefinition.unused));
+ break;
+ }
+ }
+}
+
+void WinCOFFObjectWriter::WriteSectionHeader(const COFF::section &S) {
+ WriteBytes(StringRef(S.Name, COFF::NameSize));
+
+ WriteLE32(S.VirtualSize);
+ WriteLE32(S.VirtualAddress);
+ WriteLE32(S.SizeOfRawData);
+ WriteLE32(S.PointerToRawData);
+ WriteLE32(S.PointerToRelocations);
+ WriteLE32(S.PointerToLineNumbers);
+ WriteLE16(S.NumberOfRelocations);
+ WriteLE16(S.NumberOfLineNumbers);
+ WriteLE32(S.Characteristics);
+}
+
+void WinCOFFObjectWriter::WriteRelocation(const COFF::relocation &R) {
+ WriteLE32(R.VirtualAddress);
+ WriteLE32(R.SymbolTableIndex);
+ WriteLE16(R.Type);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// MCObjectWriter interface implementations
+
+void WinCOFFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm) {
+ // "Define" each section & symbol. This creates section & symbol
+ // entries in the staging area and gives them their final indexes.
+
+ for (MCAssembler::const_iterator i = Asm.begin(), e = Asm.end(); i != e; i++)
+ DefineSection(*i);
+
+ for (MCAssembler::const_symbol_iterator i = Asm.symbol_begin(),
+ e = Asm.symbol_end(); i != e; i++) {
+ if (ExportSymbol(*i, Asm))
+ DefineSymbol(*i, Asm);
+ }
+}
+
+void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup,
+ MCValue Target,
+ uint64_t &FixedValue) {
+ assert(Target.getSymA() != NULL && "Relocation must reference a symbol!");
+
+ const MCSymbol *A = &Target.getSymA()->getSymbol();
+ MCSymbolData &A_SD = Asm.getSymbolData(*A);
+
+ MCSectionData const *SectionData = Fragment->getParent();
+
+ // Mark this symbol as requiring an entry in the symbol table.
+ assert(SectionMap.find(SectionData) != SectionMap.end() &&
+ "Section must already have been defined in ExecutePostLayoutBinding!");
+ assert(SymbolMap.find(&A_SD) != SymbolMap.end() &&
+ "Symbol must already have been defined in ExecutePostLayoutBinding!");
+
+ COFFSection *coff_section = SectionMap[SectionData];
+ COFFSymbol *coff_symbol = SymbolMap[&A_SD];
+
+ if (Target.getSymB()) {
+ const MCSymbol *B = &Target.getSymB()->getSymbol();
+ MCSymbolData &B_SD = Asm.getSymbolData(*B);
+
+ FixedValue = Layout.getSymbolAddress(&A_SD) - Layout.getSymbolAddress(&B_SD);
+
+ // In the case where we have SymbA and SymB, we just need to store the delta
+ // between the two symbols. Update FixedValue to account for the delta, and
+ // skip recording the relocation.
+ return;
+ } else {
+ FixedValue = Target.getConstant();
+ }
+
+ COFFRelocation Reloc;
+
+ Reloc.Data.SymbolTableIndex = 0;
+ Reloc.Data.VirtualAddress = Layout.getFragmentOffset(Fragment);
+ Reloc.Symb = coff_symbol;
+
+ Reloc.Data.VirtualAddress += Fixup.getOffset();
+
+ switch (Fixup.getKind()) {
+ case X86::reloc_pcrel_4byte:
+ case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ Reloc.Data.Type = Is64Bit ? COFF::IMAGE_REL_AMD64_REL32
+ : COFF::IMAGE_REL_I386_REL32;
+ // FIXME: Can anyone explain what this does other than adjust for the size
+ // of the offset?
+ FixedValue += 4;
+ break;
+ case FK_Data_4:
+ Reloc.Data.Type = Is64Bit ? COFF::IMAGE_REL_AMD64_ADDR32
+ : COFF::IMAGE_REL_I386_DIR32;
+ break;
+ case FK_Data_8:
+ if (Is64Bit)
+ Reloc.Data.Type = COFF::IMAGE_REL_AMD64_ADDR64;
+ else
+ llvm_unreachable("unsupported relocation type");
+ break;
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+
+ coff_section->Relocations.push_back(Reloc);
+}
+
+void WinCOFFObjectWriter::WriteObject(const MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+ // Assign symbol and section indexes and offsets.
+
+ Header.NumberOfSymbols = 0;
+
+ for (symbols::iterator i = Symbols.begin(), e = Symbols.end(); i != e; i++) {
+ COFFSymbol *coff_symbol = *i;
+ MCSymbolData const *SymbolData = coff_symbol->MCData;
+
+ coff_symbol->Index = Header.NumberOfSymbols++;
+
+ // Update section number & offset for symbols that have them.
+ if ((SymbolData != NULL) && (SymbolData->Fragment != NULL)) {
+ COFFSection *coff_section = SectionMap[SymbolData->Fragment->getParent()];
+
+ coff_symbol->Data.SectionNumber = coff_section->Number;
+ coff_symbol->Data.Value = Layout.getFragmentOffset(SymbolData->Fragment)
+ + SymbolData->Offset;
+ }
+
+ // Update auxiliary symbol info.
+ coff_symbol->Data.NumberOfAuxSymbols = coff_symbol->Aux.size();
+ Header.NumberOfSymbols += coff_symbol->Data.NumberOfAuxSymbols;
+ }
+
+ // Fixup weak external references.
+ for (symbols::iterator i = Symbols.begin(), e = Symbols.end(); i != e; i++) {
+ COFFSymbol *symb = *i;
+
+ if (symb->Other != NULL) {
+ assert(symb->Aux.size() == 1 &&
+ "Symbol must contain one aux symbol!");
+ assert(symb->Aux[0].AuxType == ATWeakExternal &&
+ "Symbol's aux symbol must be a Weak External!");
+ symb->Aux[0].Aux.WeakExternal.TagIndex = symb->Other->Index;
+ }
+ }
+
+ // Assign file offsets to COFF object file structures.
+
+ unsigned offset = 0;
+
+ offset += COFF::HeaderSize;
+ offset += COFF::SectionSize * Asm.size();
+
+ Header.NumberOfSections = Sections.size();
+
+ for (MCAssembler::const_iterator i = Asm.begin(),
+ e = Asm.end();
+ i != e; i++) {
+ COFFSection *Sec = SectionMap[i];
+
+ Sec->Header.SizeOfRawData = Layout.getSectionFileSize(i);
+
+ if (ExportSection(Sec)) {
+ Sec->Header.PointerToRawData = offset;
+
+ offset += Sec->Header.SizeOfRawData;
+ }
+
+ if (Sec->Relocations.size() > 0) {
+ Sec->Header.NumberOfRelocations = Sec->Relocations.size();
+ Sec->Header.PointerToRelocations = offset;
+
+ offset += COFF::RelocationSize * Sec->Relocations.size();
+
+ for (relocations::iterator cr = Sec->Relocations.begin(),
+ er = Sec->Relocations.end();
+ cr != er; cr++) {
+ (*cr).Data.SymbolTableIndex = (*cr).Symb->Index;
+ }
+ }
+
+ assert(Sec->Symb->Aux.size() == 1 && "Section's symbol must have one aux!");
+ AuxSymbol &Aux = Sec->Symb->Aux[0];
+ assert(Aux.AuxType == ATSectionDefinition &&
+ "Section's symbol's aux symbol must be a Section Definition!");
+ Aux.Aux.SectionDefinition.Length = Sec->Header.SizeOfRawData;
+ Aux.Aux.SectionDefinition.NumberOfRelocations =
+ Sec->Header.NumberOfRelocations;
+ Aux.Aux.SectionDefinition.NumberOfLinenumbers =
+ Sec->Header.NumberOfLineNumbers;
+ }
+
+ Header.PointerToSymbolTable = offset;
+
+ Header.TimeDateStamp = sys::TimeValue::now().toEpochTime();
+
+ // Write it all to disk...
+ WriteFileHeader(Header);
+
+ {
+ sections::iterator i, ie;
+ MCAssembler::const_iterator j, je;
+
+ for (i = Sections.begin(), ie = Sections.end(); i != ie; i++)
+ WriteSectionHeader((*i)->Header);
+
+ for (i = Sections.begin(), ie = Sections.end(),
+ j = Asm.begin(), je = Asm.end();
+ (i != ie) && (j != je); i++, j++) {
+ if ((*i)->Header.PointerToRawData != 0) {
+ assert(OS.tell() == (*i)->Header.PointerToRawData &&
+ "Section::PointerToRawData is insane!");
+
+ Asm.WriteSectionData(j, Layout, this);
+ }
+
+ if ((*i)->Relocations.size() > 0) {
+ assert(OS.tell() == (*i)->Header.PointerToRelocations &&
+ "Section::PointerToRelocations is insane!");
+
+ for (relocations::const_iterator k = (*i)->Relocations.begin(),
+ ke = (*i)->Relocations.end();
+ k != ke; k++) {
+ WriteRelocation(k->Data);
+ }
+ } else
+ assert((*i)->Header.PointerToRelocations == 0 &&
+ "Section::PointerToRelocations is insane!");
+ }
+ }
+
+ assert(OS.tell() == Header.PointerToSymbolTable &&
+ "Header::PointerToSymbolTable is insane!");
+
+ for (symbols::iterator i = Symbols.begin(), e = Symbols.end(); i != e; i++)
+ WriteSymbol(*i);
+
+ OS.write((char const *)&Strings.Data.front(), Strings.Data.size());
+}
+
+//------------------------------------------------------------------------------
+// WinCOFFObjectWriter factory function
+
+namespace llvm {
+ MCObjectWriter *createWinCOFFObjectWriter(raw_ostream &OS, bool is64Bit) {
+ return new WinCOFFObjectWriter(OS, is64Bit);
+ }
+}
diff --git a/libclamav/c++/llvm/lib/MC/WinCOFFStreamer.cpp b/libclamav/c++/llvm/lib/MC/WinCOFFStreamer.cpp
new file mode 100644
index 0000000..8a194bf
--- /dev/null
+++ b/libclamav/c++/llvm/lib/MC/WinCOFFStreamer.cpp
@@ -0,0 +1,349 @@
+//===-- llvm/MC/WinCOFFStreamer.cpp -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of a Win32 COFF object file streamer.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "WinCOFFStreamer"
+
+#include "llvm/MC/MCObjectStreamer.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCAsmLayout.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCSectionCOFF.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/ADT/StringMap.h"
+
+#include "llvm/Support/COFF.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+namespace {
+class WinCOFFStreamer : public MCObjectStreamer {
+public:
+ MCSymbol const *CurSymbol;
+
+ WinCOFFStreamer(MCContext &Context,
+ TargetAsmBackend &TAB,
+ MCCodeEmitter &CE,
+ raw_ostream &OS);
+
+ void AddCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment, bool External);
+
+ // MCStreamer interface
+
+ virtual void EmitLabel(MCSymbol *Symbol);
+ virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
+ virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
+ virtual void BeginCOFFSymbolDef(MCSymbol const *Symbol);
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass);
+ virtual void EmitCOFFSymbolType(int Type);
+ virtual void EndCOFFSymbolDef();
+ virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment);
+ virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size);
+ virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
+ unsigned Size,unsigned ByteAlignment);
+ virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment);
+ virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
+ virtual void EmitValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace);
+ virtual void EmitGPRel32Value(const MCExpr *Value);
+ virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
+ unsigned ValueSize, unsigned MaxBytesToEmit);
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit);
+ virtual void EmitValueToOffset(const MCExpr *Offset, unsigned char Value);
+ virtual void EmitFileDirective(StringRef Filename);
+ virtual void EmitDwarfFileDirective(unsigned FileNo,StringRef Filename);
+ virtual void EmitInstruction(const MCInst &Instruction);
+ virtual void Finish();
+};
+} // end anonymous namespace.
+
+WinCOFFStreamer::WinCOFFStreamer(MCContext &Context,
+ TargetAsmBackend &TAB,
+ MCCodeEmitter &CE,
+ raw_ostream &OS)
+ : MCObjectStreamer(Context, TAB, OS, &CE)
+ , CurSymbol(NULL) {
+}
+
+void WinCOFFStreamer::AddCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment, bool External) {
+ assert(!Symbol->isInSection() && "Symbol must not already have a section!");
+
+ std::string SectionName(".bss$linkonce");
+ SectionName.append(Symbol->getName().begin(), Symbol->getName().end());
+
+ MCSymbolData &SymbolData = getAssembler().getOrCreateSymbolData(*Symbol);
+
+ unsigned Characteristics =
+ COFF::IMAGE_SCN_LNK_COMDAT |
+ COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE;
+
+ int Selection = COFF::IMAGE_COMDAT_SELECT_LARGEST;
+
+ const MCSection *Section = MCStreamer::getContext().getCOFFSection(
+ SectionName, Characteristics, Selection, SectionKind::getBSS());
+
+ MCSectionData &SectionData = getAssembler().getOrCreateSectionData(*Section);
+
+ if (SectionData.getAlignment() < ByteAlignment)
+ SectionData.setAlignment(ByteAlignment);
+
+ SymbolData.setExternal(External);
+
+ Symbol->setSection(*Section);
+
+ if (ByteAlignment != 1)
+ new MCAlignFragment(ByteAlignment, 0, 0, ByteAlignment, &SectionData);
+
+ SymbolData.setFragment(new MCFillFragment(0, 0, Size, &SectionData));
+}
+
+// MCStreamer interface
+
+void WinCOFFStreamer::EmitLabel(MCSymbol *Symbol) {
+ // TODO: This is copied almost exactly from the MachOStreamer. Consider
+ // merging into MCObjectStreamer?
+ assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
+ assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
+ assert(CurSection && "Cannot emit before setting section!");
+
+ Symbol->setSection(*CurSection);
+
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
+
+ // FIXME: This is wasteful, we don't necessarily need to create a data
+ // fragment. Instead, we should mark the symbol as pointing into the data
+ // fragment if it exists, otherwise we should just queue the label and set its
+ // fragment pointer when we emit the next fragment.
+ MCDataFragment *DF = getOrCreateDataFragment();
+
+ assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
+ SD.setFragment(DF);
+ SD.setOffset(DF->getContents().size());
+}
+
+void WinCOFFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
+ llvm_unreachable("not implemented");
+}
+
+void WinCOFFStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
+ // TODO: This is exactly the same as MachOStreamer. Consider merging into
+ // MCObjectStreamer.
+ getAssembler().getOrCreateSymbolData(*Symbol);
+ AddValueSymbols(Value);
+ Symbol->setVariableValue(Value);
+}
+
+void WinCOFFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
+ MCSymbolAttr Attribute) {
+ switch (Attribute) {
+ case MCSA_WeakReference:
+ getAssembler().getOrCreateSymbolData(*Symbol).modifyFlags(
+ COFF::SF_WeakReference,
+ COFF::SF_WeakReference);
+ break;
+
+ case MCSA_Global:
+ getAssembler().getOrCreateSymbolData(*Symbol).setExternal(true);
+ break;
+
+ default:
+ llvm_unreachable("unsupported attribute");
+ break;
+ }
+}
+
+void WinCOFFStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
+ llvm_unreachable("not implemented");
+}
+
+void WinCOFFStreamer::BeginCOFFSymbolDef(MCSymbol const *Symbol) {
+ assert(CurSymbol == NULL && "EndCOFFSymbolDef must be called between calls "
+ "to BeginCOFFSymbolDef!");
+ CurSymbol = Symbol;
+}
+
+void WinCOFFStreamer::EmitCOFFSymbolStorageClass(int StorageClass) {
+ assert(CurSymbol != NULL && "BeginCOFFSymbolDef must be called first!");
+ assert((StorageClass & ~0xFF) == 0 && "StorageClass must only have data in "
+ "the first byte!");
+
+ getAssembler().getOrCreateSymbolData(*CurSymbol).modifyFlags(
+ StorageClass << COFF::SF_ClassShift,
+ COFF::SF_ClassMask);
+}
+
+void WinCOFFStreamer::EmitCOFFSymbolType(int Type) {
+ assert(CurSymbol != NULL && "BeginCOFFSymbolDef must be called first!");
+ assert((Type & ~0xFFFF) == 0 && "Type must only have data in the first 2 "
+ "bytes");
+
+ getAssembler().getOrCreateSymbolData(*CurSymbol).modifyFlags(
+ Type << COFF::SF_TypeShift,
+ COFF::SF_TypeMask);
+}
+
+void WinCOFFStreamer::EndCOFFSymbolDef() {
+ assert(CurSymbol != NULL && "BeginCOFFSymbolDef must be called first!");
+ CurSymbol = NULL;
+}
+
+void WinCOFFStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
+ llvm_unreachable("not implemented");
+}
+
+void WinCOFFStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
+ AddCommonSymbol(Symbol, Size, ByteAlignment, true);
+}
+
+void WinCOFFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) {
+ AddCommonSymbol(Symbol, Size, 1, false);
+}
+
+void WinCOFFStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
+ unsigned Size,unsigned ByteAlignment) {
+ llvm_unreachable("not implemented");
+}
+
+void WinCOFFStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment) {
+ llvm_unreachable("not implemented");
+}
+
+void WinCOFFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
+ // TODO: This is copied exactly from the MachOStreamer. Consider merging into
+ // MCObjectStreamer?
+ getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end());
+}
+
+void WinCOFFStreamer::EmitValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace) {
+ assert(AddrSpace == 0 && "Address space must be 0!");
+
+ // TODO: This is copied exactly from the MachOStreamer. Consider merging into
+ // MCObjectStreamer?
+ MCDataFragment *DF = getOrCreateDataFragment();
+
+ // Avoid fixups when possible.
+ int64_t AbsValue;
+ if (AddValueSymbols(Value)->EvaluateAsAbsolute(AbsValue)) {
+ // FIXME: Endianness assumption.
+ for (unsigned i = 0; i != Size; ++i)
+ DF->getContents().push_back(uint8_t(AbsValue >> (i * 8)));
+ } else {
+ DF->addFixup(MCFixup::Create(DF->getContents().size(),
+ AddValueSymbols(Value),
+ MCFixup::getKindForSize(Size)));
+ DF->getContents().resize(DF->getContents().size() + Size, 0);
+ }
+}
+
+void WinCOFFStreamer::EmitGPRel32Value(const MCExpr *Value) {
+ llvm_unreachable("not implemented");
+}
+
+void WinCOFFStreamer::EmitValueToAlignment(unsigned ByteAlignment,
+ int64_t Value,
+ unsigned ValueSize,
+ unsigned MaxBytesToEmit) {
+ // TODO: This is copied exactly from the MachOStreamer. Consider merging into
+ // MCObjectStreamer?
+ if (MaxBytesToEmit == 0)
+ MaxBytesToEmit = ByteAlignment;
+ new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
+ getCurrentSectionData());
+
+ // Update the maximum alignment on the current section if necessary.
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
+}
+
+void WinCOFFStreamer::EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit) {
+ // TODO: This is copied exactly from the MachOStreamer. Consider merging into
+ // MCObjectStreamer?
+ if (MaxBytesToEmit == 0)
+ MaxBytesToEmit = ByteAlignment;
+ MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit,
+ getCurrentSectionData());
+ F->setEmitNops(true);
+
+ // Update the maximum alignment on the current section if necessary.
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
+}
+
+void WinCOFFStreamer::EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value) {
+ llvm_unreachable("not implemented");
+}
+
+void WinCOFFStreamer::EmitFileDirective(StringRef Filename) {
+ // Ignore for now, linkers don't care, and proper debug
+ // info will be a much large effort.
+}
+
+void WinCOFFStreamer::EmitDwarfFileDirective(unsigned FileNo,
+ StringRef Filename) {
+ llvm_unreachable("not implemented");
+}
+
+void WinCOFFStreamer::EmitInstruction(const MCInst &Instruction) {
+ for (unsigned i = 0, e = Instruction.getNumOperands(); i != e; ++i)
+ if (Instruction.getOperand(i).isExpr())
+ AddValueSymbols(Instruction.getOperand(i).getExpr());
+
+ getCurrentSectionData()->setHasInstructions(true);
+
+ MCInstFragment *Fragment =
+ new MCInstFragment(Instruction, getCurrentSectionData());
+
+ raw_svector_ostream VecOS(Fragment->getCode());
+
+ getAssembler().getEmitter().EncodeInstruction(Instruction, VecOS,
+ Fragment->getFixups());
+}
+
+void WinCOFFStreamer::Finish() {
+ MCObjectStreamer::Finish();
+}
+
+namespace llvm
+{
+ MCStreamer *createWinCOFFStreamer(MCContext &Context,
+ TargetAsmBackend &TAB,
+ MCCodeEmitter &CE,
+ raw_ostream &OS,
+ bool RelaxAll) {
+ WinCOFFStreamer *S = new WinCOFFStreamer(Context, TAB, CE, OS);
+ S->getAssembler().setRelaxAll(RelaxAll);
+ return S;
+ }
+}
diff --git a/libclamav/c++/llvm/lib/Support/APFloat.cpp b/libclamav/c++/llvm/lib/Support/APFloat.cpp
index 8f860a6..b87ddf9 100644
--- a/libclamav/c++/llvm/lib/Support/APFloat.cpp
+++ b/libclamav/c++/llvm/lib/Support/APFloat.cpp
@@ -65,7 +65,7 @@ namespace llvm {
pow(5, power) is
power * 815 / (351 * integerPartWidth) + 1
-
+
However, whilst the result may require only this many parts,
because we are multiplying two values to get it, the
multiplication may require an extra part with the excess part
@@ -100,15 +100,15 @@ hexDigitValue(unsigned int c)
unsigned int r;
r = c - '0';
- if(r <= 9)
+ if (r <= 9)
return r;
r = c - 'A';
- if(r <= 5)
+ if (r <= 5)
return r + 10;
r = c - 'a';
- if(r <= 5)
+ if (r <= 5)
return r + 10;
return -1U;
@@ -116,8 +116,8 @@ hexDigitValue(unsigned int c)
static inline void
assertArithmeticOK(const llvm::fltSemantics &semantics) {
- assert(semantics.arithmeticOK
- && "Compile-time arithmetic does not support these semantics");
+ assert(semantics.arithmeticOK &&
+ "Compile-time arithmetic does not support these semantics");
}
/* Return the value of a decimal exponent of the form
@@ -153,6 +153,7 @@ readExponent(StringRef::iterator begin, StringRef::iterator end)
value += absExponent * 10;
if (absExponent >= overlargeExponent) {
absExponent = overlargeExponent;
+ p = end; /* outwit assert below */
break;
}
absExponent = value;
@@ -179,37 +180,37 @@ totalExponent(StringRef::iterator p, StringRef::iterator end,
assert(p != end && "Exponent has no digits");
negative = *p == '-';
- if(*p == '-' || *p == '+') {
+ if (*p == '-' || *p == '+') {
p++;
assert(p != end && "Exponent has no digits");
}
unsignedExponent = 0;
overflow = false;
- for(; p != end; ++p) {
+ for (; p != end; ++p) {
unsigned int value;
value = decDigitValue(*p);
assert(value < 10U && "Invalid character in exponent");
unsignedExponent = unsignedExponent * 10 + value;
- if(unsignedExponent > 65535)
+ if (unsignedExponent > 65535)
overflow = true;
}
- if(exponentAdjustment > 65535 || exponentAdjustment < -65536)
+ if (exponentAdjustment > 65535 || exponentAdjustment < -65536)
overflow = true;
- if(!overflow) {
+ if (!overflow) {
exponent = unsignedExponent;
- if(negative)
+ if (negative)
exponent = -exponent;
exponent += exponentAdjustment;
- if(exponent > 65535 || exponent < -65536)
+ if (exponent > 65535 || exponent < -65536)
overflow = true;
}
- if(overflow)
+ if (overflow)
exponent = negative ? -65536: 65535;
return exponent;
@@ -221,15 +222,15 @@ skipLeadingZeroesAndAnyDot(StringRef::iterator begin, StringRef::iterator end,
{
StringRef::iterator p = begin;
*dot = end;
- while(*p == '0' && p != end)
+ while (*p == '0' && p != end)
p++;
- if(*p == '.') {
+ if (*p == '.') {
*dot = p++;
assert(end - begin != 1 && "Significand has no digits");
- while(*p == '0' && p != end)
+ while (*p == '0' && p != end)
p++;
}
@@ -323,13 +324,13 @@ trailingHexadecimalFraction(StringRef::iterator p, StringRef::iterator end,
/* If the first trailing digit isn't 0 or 8 we can work out the
fraction immediately. */
- if(digitValue > 8)
+ if (digitValue > 8)
return lfMoreThanHalf;
- else if(digitValue < 8 && digitValue > 0)
+ else if (digitValue < 8 && digitValue > 0)
return lfLessThanHalf;
/* Otherwise we need to find the first non-zero digit. */
- while(*p == '0')
+ while (*p == '0')
p++;
assert(p != end && "Invalid trailing hexadecimal fraction!");
@@ -338,7 +339,7 @@ trailingHexadecimalFraction(StringRef::iterator p, StringRef::iterator end,
/* If we ran off the end it is exactly zero or one-half, otherwise
a little more. */
- if(hexDigit == -1U)
+ if (hexDigit == -1U)
return digitValue == 0 ? lfExactlyZero: lfExactlyHalf;
else
return digitValue == 0 ? lfLessThanHalf: lfMoreThanHalf;
@@ -356,12 +357,12 @@ lostFractionThroughTruncation(const integerPart *parts,
lsb = APInt::tcLSB(parts, partCount);
/* Note this is guaranteed true if bits == 0, or LSB == -1U. */
- if(bits <= lsb)
+ if (bits <= lsb)
return lfExactlyZero;
- if(bits == lsb + 1)
+ if (bits == lsb + 1)
return lfExactlyHalf;
- if(bits <= partCount * integerPartWidth
- && APInt::tcExtractBit(parts, bits - 1))
+ if (bits <= partCount * integerPartWidth &&
+ APInt::tcExtractBit(parts, bits - 1))
return lfMoreThanHalf;
return lfLessThanHalf;
@@ -385,10 +386,10 @@ static lostFraction
combineLostFractions(lostFraction moreSignificant,
lostFraction lessSignificant)
{
- if(lessSignificant != lfExactlyZero) {
- if(moreSignificant == lfExactlyZero)
+ if (lessSignificant != lfExactlyZero) {
+ if (moreSignificant == lfExactlyZero)
moreSignificant = lfLessThanHalf;
- else if(moreSignificant == lfExactlyHalf)
+ else if (moreSignificant == lfExactlyHalf)
moreSignificant = lfMoreThanHalf;
}
@@ -468,7 +469,7 @@ powerOf5(integerPart *dst, unsigned int power)
15625, 78125 };
integerPart pow5s[maxPowerOfFiveParts * 2 + 5];
pow5s[0] = 78125 * 5;
-
+
unsigned int partsCount[16] = { 1 };
integerPart scratch[maxPowerOfFiveParts], *p1, *p2, *pow5;
unsigned int result;
@@ -588,14 +589,14 @@ APFloat::initialize(const fltSemantics *ourSemantics)
semantics = ourSemantics;
count = partCount();
- if(count > 1)
+ if (count > 1)
significand.parts = new integerPart[count];
}
void
APFloat::freeSignificand()
{
- if(partCount() > 1)
+ if (partCount() > 1)
delete [] significand.parts;
}
@@ -609,7 +610,7 @@ APFloat::assign(const APFloat &rhs)
exponent = rhs.exponent;
sign2 = rhs.sign2;
exponent2 = rhs.exponent2;
- if(category == fcNormal || category == fcNaN)
+ if (category == fcNormal || category == fcNaN)
copySignificand(rhs);
}
@@ -683,8 +684,8 @@ APFloat APFloat::makeNaN(const fltSemantics &Sem, bool SNaN, bool Negative,
APFloat &
APFloat::operator=(const APFloat &rhs)
{
- if(this != &rhs) {
- if(semantics != rhs.semantics) {
+ if (this != &rhs) {
+ if (semantics != rhs.semantics) {
freeSignificand();
initialize(rhs.semantics);
}
@@ -761,7 +762,7 @@ APFloat::APFloat(const fltSemantics &ourSemantics,
makeNaN();
}
-APFloat::APFloat(const fltSemantics &ourSemantics, const StringRef& text)
+APFloat::APFloat(const fltSemantics &ourSemantics, StringRef text)
{
assertArithmeticOK(ourSemantics);
initialize(&ourSemantics);
@@ -881,7 +882,7 @@ APFloat::multiplySignificand(const APFloat &rhs, const APFloat *addend)
precision = semantics->precision;
newPartsCount = partCountForBits(precision * 2);
- if(newPartsCount > 4)
+ if (newPartsCount > 4)
fullSignificand = new integerPart[newPartsCount];
else
fullSignificand = scratch;
@@ -896,7 +897,7 @@ APFloat::multiplySignificand(const APFloat &rhs, const APFloat *addend)
omsb = APInt::tcMSB(fullSignificand, newPartsCount) + 1;
exponent += rhs.exponent;
- if(addend) {
+ if (addend) {
Significand savedSignificand = significand;
const fltSemantics *savedSemantics = semantics;
fltSemantics extendedSemantics;
@@ -905,18 +906,17 @@ APFloat::multiplySignificand(const APFloat &rhs, const APFloat *addend)
/* Normalize our MSB. */
extendedPrecision = precision + precision - 1;
- if(omsb != extendedPrecision)
- {
- APInt::tcShiftLeft(fullSignificand, newPartsCount,
- extendedPrecision - omsb);
- exponent -= extendedPrecision - omsb;
- }
+ if (omsb != extendedPrecision) {
+ APInt::tcShiftLeft(fullSignificand, newPartsCount,
+ extendedPrecision - omsb);
+ exponent -= extendedPrecision - omsb;
+ }
/* Create new semantics. */
extendedSemantics = *semantics;
extendedSemantics.precision = extendedPrecision;
- if(newPartsCount == 1)
+ if (newPartsCount == 1)
significand.part = fullSignificand[0];
else
significand.parts = fullSignificand;
@@ -928,7 +928,7 @@ APFloat::multiplySignificand(const APFloat &rhs, const APFloat *addend)
lost_fraction = addOrSubtractSignificand(extendedAddend, false);
/* Restore our state. */
- if(newPartsCount == 1)
+ if (newPartsCount == 1)
fullSignificand[0] = significand.part;
significand = savedSignificand;
semantics = savedSemantics;
@@ -938,7 +938,7 @@ APFloat::multiplySignificand(const APFloat &rhs, const APFloat *addend)
exponent -= (precision - 1);
- if(omsb > precision) {
+ if (omsb > precision) {
unsigned int bits, significantParts;
lostFraction lf;
@@ -951,7 +951,7 @@ APFloat::multiplySignificand(const APFloat &rhs, const APFloat *addend)
APInt::tcAssign(lhsSignificand, fullSignificand, partsCount);
- if(newPartsCount > 4)
+ if (newPartsCount > 4)
delete [] fullSignificand;
return lost_fraction;
@@ -973,7 +973,7 @@ APFloat::divideSignificand(const APFloat &rhs)
rhsSignificand = rhs.significandParts();
partsCount = partCount();
- if(partsCount > 2)
+ if (partsCount > 2)
dividend = new integerPart[partsCount * 2];
else
dividend = scratch;
@@ -981,7 +981,7 @@ APFloat::divideSignificand(const APFloat &rhs)
divisor = dividend + partsCount;
/* Copy the dividend and divisor as they will be modified in-place. */
- for(i = 0; i < partsCount; i++) {
+ for (i = 0; i < partsCount; i++) {
dividend[i] = lhsSignificand[i];
divisor[i] = rhsSignificand[i];
lhsSignificand[i] = 0;
@@ -993,14 +993,14 @@ APFloat::divideSignificand(const APFloat &rhs)
/* Normalize the divisor. */
bit = precision - APInt::tcMSB(divisor, partsCount) - 1;
- if(bit) {
+ if (bit) {
exponent += bit;
APInt::tcShiftLeft(divisor, partsCount, bit);
}
/* Normalize the dividend. */
bit = precision - APInt::tcMSB(dividend, partsCount) - 1;
- if(bit) {
+ if (bit) {
exponent -= bit;
APInt::tcShiftLeft(dividend, partsCount, bit);
}
@@ -1008,15 +1008,15 @@ APFloat::divideSignificand(const APFloat &rhs)
/* Ensure the dividend >= divisor initially for the loop below.
Incidentally, this means that the division loop below is
guaranteed to set the integer bit to one. */
- if(APInt::tcCompare(dividend, divisor, partsCount) < 0) {
+ if (APInt::tcCompare(dividend, divisor, partsCount) < 0) {
exponent--;
APInt::tcShiftLeft(dividend, partsCount, 1);
assert(APInt::tcCompare(dividend, divisor, partsCount) >= 0);
}
/* Long division. */
- for(bit = precision; bit; bit -= 1) {
- if(APInt::tcCompare(dividend, divisor, partsCount) >= 0) {
+ for (bit = precision; bit; bit -= 1) {
+ if (APInt::tcCompare(dividend, divisor, partsCount) >= 0) {
APInt::tcSubtract(dividend, divisor, 0, partsCount);
APInt::tcSetBit(lhsSignificand, bit - 1);
}
@@ -1027,16 +1027,16 @@ APFloat::divideSignificand(const APFloat &rhs)
/* Figure out the lost fraction. */
int cmp = APInt::tcCompare(dividend, divisor, partsCount);
- if(cmp > 0)
+ if (cmp > 0)
lost_fraction = lfMoreThanHalf;
- else if(cmp == 0)
+ else if (cmp == 0)
lost_fraction = lfExactlyHalf;
- else if(APInt::tcIsZero(dividend, partsCount))
+ else if (APInt::tcIsZero(dividend, partsCount))
lost_fraction = lfExactlyZero;
else
lost_fraction = lfLessThanHalf;
- if(partsCount > 2)
+ if (partsCount > 2)
delete [] dividend;
return lost_fraction;
@@ -1072,7 +1072,7 @@ APFloat::shiftSignificandLeft(unsigned int bits)
{
assert(bits < semantics->precision);
- if(bits) {
+ if (bits) {
unsigned int partsCount = partCount();
APInt::tcShiftLeft(significandParts(), partsCount, bits);
@@ -1095,13 +1095,13 @@ APFloat::compareAbsoluteValue(const APFloat &rhs) const
/* If exponents are equal, do an unsigned bignum comparison of the
significands. */
- if(compare == 0)
+ if (compare == 0)
compare = APInt::tcCompare(significandParts(), rhs.significandParts(),
partCount());
- if(compare > 0)
+ if (compare > 0)
return cmpGreaterThan;
- else if(compare < 0)
+ else if (compare < 0)
return cmpLessThan;
else
return cmpEqual;
@@ -1113,14 +1113,13 @@ APFloat::opStatus
APFloat::handleOverflow(roundingMode rounding_mode)
{
/* Infinity? */
- if(rounding_mode == rmNearestTiesToEven
- || rounding_mode == rmNearestTiesToAway
- || (rounding_mode == rmTowardPositive && !sign)
- || (rounding_mode == rmTowardNegative && sign))
- {
- category = fcInfinity;
- return (opStatus) (opOverflow | opInexact);
- }
+ if (rounding_mode == rmNearestTiesToEven ||
+ rounding_mode == rmNearestTiesToAway ||
+ (rounding_mode == rmTowardPositive && !sign) ||
+ (rounding_mode == rmTowardNegative && sign)) {
+ category = fcInfinity;
+ return (opStatus) (opOverflow | opInexact);
+ }
/* Otherwise we become the largest finite number. */
category = fcNormal;
@@ -1155,11 +1154,11 @@ APFloat::roundAwayFromZero(roundingMode rounding_mode,
return lost_fraction == lfExactlyHalf || lost_fraction == lfMoreThanHalf;
case rmNearestTiesToEven:
- if(lost_fraction == lfMoreThanHalf)
+ if (lost_fraction == lfMoreThanHalf)
return true;
/* Our zeroes don't have a significand to test. */
- if(lost_fraction == lfExactlyHalf && category != fcZero)
+ if (lost_fraction == lfExactlyHalf && category != fcZero)
return APInt::tcExtractBit(significandParts(), bit);
return false;
@@ -1182,13 +1181,13 @@ APFloat::normalize(roundingMode rounding_mode,
unsigned int omsb; /* One, not zero, based MSB. */
int exponentChange;
- if(category != fcNormal)
+ if (category != fcNormal)
return opOK;
/* Before rounding normalize the exponent of fcNormal numbers. */
omsb = significandMSB() + 1;
- if(omsb) {
+ if (omsb) {
/* OMSB is numbered from 1. We want to place it in the integer
bit numbered PRECISON if possible, with a compensating change in
the exponent. */
@@ -1196,16 +1195,16 @@ APFloat::normalize(roundingMode rounding_mode,
/* If the resulting exponent is too high, overflow according to
the rounding mode. */
- if(exponent + exponentChange > semantics->maxExponent)
+ if (exponent + exponentChange > semantics->maxExponent)
return handleOverflow(rounding_mode);
/* Subnormal numbers have exponent minExponent, and their MSB
is forced based on that. */
- if(exponent + exponentChange < semantics->minExponent)
+ if (exponent + exponentChange < semantics->minExponent)
exponentChange = semantics->minExponent - exponent;
/* Shifting left is easy as we don't lose precision. */
- if(exponentChange < 0) {
+ if (exponentChange < 0) {
assert(lost_fraction == lfExactlyZero);
shiftSignificandLeft(-exponentChange);
@@ -1213,7 +1212,7 @@ APFloat::normalize(roundingMode rounding_mode,
return opOK;
}
- if(exponentChange > 0) {
+ if (exponentChange > 0) {
lostFraction lf;
/* Shift right and capture any new lost fraction. */
@@ -1222,7 +1221,7 @@ APFloat::normalize(roundingMode rounding_mode,
lost_fraction = combineLostFractions(lf, lost_fraction);
/* Keep OMSB up-to-date. */
- if(omsb > (unsigned) exponentChange)
+ if (omsb > (unsigned) exponentChange)
omsb -= exponentChange;
else
omsb = 0;
@@ -1234,28 +1233,28 @@ APFloat::normalize(roundingMode rounding_mode,
/* As specified in IEEE 754, since we do not trap we do not report
underflow for exact results. */
- if(lost_fraction == lfExactlyZero) {
+ if (lost_fraction == lfExactlyZero) {
/* Canonicalize zeroes. */
- if(omsb == 0)
+ if (omsb == 0)
category = fcZero;
return opOK;
}
/* Increment the significand if we're rounding away from zero. */
- if(roundAwayFromZero(rounding_mode, lost_fraction, 0)) {
- if(omsb == 0)
+ if (roundAwayFromZero(rounding_mode, lost_fraction, 0)) {
+ if (omsb == 0)
exponent = semantics->minExponent;
incrementSignificand();
omsb = significandMSB() + 1;
/* Did the significand increment overflow? */
- if(omsb == (unsigned) semantics->precision + 1) {
+ if (omsb == (unsigned) semantics->precision + 1) {
/* Renormalize by incrementing the exponent and shifting our
significand right one. However if we already have the
maximum exponent we overflow to infinity. */
- if(exponent == semantics->maxExponent) {
+ if (exponent == semantics->maxExponent) {
category = fcInfinity;
return (opStatus) (opOverflow | opInexact);
@@ -1269,14 +1268,14 @@ APFloat::normalize(roundingMode rounding_mode,
/* The normal case - we were and are not denormal, and any
significand increment above didn't overflow. */
- if(omsb == semantics->precision)
+ if (omsb == semantics->precision)
return opInexact;
/* We have a non-zero denormal. */
assert(omsb < semantics->precision);
/* Canonicalize zeroes. */
- if(omsb == 0)
+ if (omsb == 0)
category = fcZero;
/* The fcZero case is a denormal that underflowed to zero. */
@@ -1324,7 +1323,7 @@ APFloat::addOrSubtractSpecials(const APFloat &rhs, bool subtract)
case convolve(fcInfinity, fcInfinity):
/* Differently signed infinities can only be validly
subtracted. */
- if(((sign ^ rhs.sign)!=0) != subtract) {
+ if (((sign ^ rhs.sign)!=0) != subtract) {
makeNaN();
return opInvalidOp;
}
@@ -1352,7 +1351,7 @@ APFloat::addOrSubtractSignificand(const APFloat &rhs, bool subtract)
bits = exponent - rhs.exponent;
/* Subtraction is more subtle than one might naively expect. */
- if(subtract) {
+ if (subtract) {
APFloat temp_rhs(rhs);
bool reverse;
@@ -1381,16 +1380,16 @@ APFloat::addOrSubtractSignificand(const APFloat &rhs, bool subtract)
/* Invert the lost fraction - it was on the RHS and
subtracted. */
- if(lost_fraction == lfLessThanHalf)
+ if (lost_fraction == lfLessThanHalf)
lost_fraction = lfMoreThanHalf;
- else if(lost_fraction == lfMoreThanHalf)
+ else if (lost_fraction == lfMoreThanHalf)
lost_fraction = lfLessThanHalf;
/* The code above is intended to ensure that no borrow is
necessary. */
assert(!carry);
} else {
- if(bits > 0) {
+ if (bits > 0) {
APFloat temp_rhs(rhs);
lost_fraction = temp_rhs.shiftSignificandRight(bits);
@@ -1561,7 +1560,7 @@ APFloat::addOrSubtract(const APFloat &rhs, roundingMode rounding_mode,
fs = addOrSubtractSpecials(rhs, subtract);
/* This return code means it was not a simple case. */
- if(fs == opDivByZero) {
+ if (fs == opDivByZero) {
lostFraction lost_fraction;
lost_fraction = addOrSubtractSignificand(rhs, subtract);
@@ -1574,8 +1573,8 @@ APFloat::addOrSubtract(const APFloat &rhs, roundingMode rounding_mode,
/* If two numbers add (exactly) to zero, IEEE 754 decrees it is a
positive zero unless rounding to minus infinity, except that
adding two like-signed zeroes gives that zero. */
- if(category == fcZero) {
- if(rhs.category != fcZero || (sign == rhs.sign) == subtract)
+ if (category == fcZero) {
+ if (rhs.category != fcZero || (sign == rhs.sign) == subtract)
sign = (rounding_mode == rmTowardNegative);
}
@@ -1606,10 +1605,10 @@ APFloat::multiply(const APFloat &rhs, roundingMode rounding_mode)
sign ^= rhs.sign;
fs = multiplySpecials(rhs);
- if(category == fcNormal) {
+ if (category == fcNormal) {
lostFraction lost_fraction = multiplySignificand(rhs, 0);
fs = normalize(rounding_mode, lost_fraction);
- if(lost_fraction != lfExactlyZero)
+ if (lost_fraction != lfExactlyZero)
fs = (opStatus) (fs | opInexact);
}
@@ -1626,10 +1625,10 @@ APFloat::divide(const APFloat &rhs, roundingMode rounding_mode)
sign ^= rhs.sign;
fs = divideSpecials(rhs);
- if(category == fcNormal) {
+ if (category == fcNormal) {
lostFraction lost_fraction = divideSignificand(rhs);
fs = normalize(rounding_mode, lost_fraction);
- if(lost_fraction != lfExactlyZero)
+ if (lost_fraction != lfExactlyZero)
fs = (opStatus) (fs | opInexact);
}
@@ -1673,7 +1672,7 @@ APFloat::remainder(const APFloat &rhs)
return fs;
}
-/* Normalized llvm frem (C fmod).
+/* Normalized llvm frem (C fmod).
This is not currently correct in all cases. */
APFloat::opStatus
APFloat::mod(const APFloat &rhs, roundingMode rounding_mode)
@@ -1730,20 +1729,20 @@ APFloat::fusedMultiplyAdd(const APFloat &multiplicand,
/* If and only if all arguments are normal do we need to do an
extended-precision calculation. */
- if(category == fcNormal
- && multiplicand.category == fcNormal
- && addend.category == fcNormal) {
+ if (category == fcNormal &&
+ multiplicand.category == fcNormal &&
+ addend.category == fcNormal) {
lostFraction lost_fraction;
lost_fraction = multiplySignificand(multiplicand, &addend);
fs = normalize(rounding_mode, lost_fraction);
- if(lost_fraction != lfExactlyZero)
+ if (lost_fraction != lfExactlyZero)
fs = (opStatus) (fs | opInexact);
/* If two numbers add (exactly) to zero, IEEE 754 decrees it is a
positive zero unless rounding to minus infinity, except that
adding two like-signed zeroes gives that zero. */
- if(category == fcZero && sign != addend.sign)
+ if (category == fcZero && sign != addend.sign)
sign = (rounding_mode == rmTowardNegative);
} else {
fs = multiplySpecials(multiplicand);
@@ -1755,7 +1754,7 @@ APFloat::fusedMultiplyAdd(const APFloat &multiplicand,
If we need to do the addition we can do so with normal
precision. */
- if(fs == opOK)
+ if (fs == opOK)
fs = addOrSubtract(addend, rounding_mode, false);
}
@@ -1787,7 +1786,7 @@ APFloat::compare(const APFloat &rhs) const
case convolve(fcInfinity, fcNormal):
case convolve(fcInfinity, fcZero):
case convolve(fcNormal, fcZero):
- if(sign)
+ if (sign)
return cmpLessThan;
else
return cmpGreaterThan;
@@ -1795,15 +1794,15 @@ APFloat::compare(const APFloat &rhs) const
case convolve(fcNormal, fcInfinity):
case convolve(fcZero, fcInfinity):
case convolve(fcZero, fcNormal):
- if(rhs.sign)
+ if (rhs.sign)
return cmpGreaterThan;
else
return cmpLessThan;
case convolve(fcInfinity, fcInfinity):
- if(sign == rhs.sign)
+ if (sign == rhs.sign)
return cmpEqual;
- else if(sign)
+ else if (sign)
return cmpLessThan;
else
return cmpGreaterThan;
@@ -1816,8 +1815,8 @@ APFloat::compare(const APFloat &rhs) const
}
/* Two normal numbers. Do they have the same sign? */
- if(sign != rhs.sign) {
- if(sign)
+ if (sign != rhs.sign) {
+ if (sign)
result = cmpLessThan;
else
result = cmpGreaterThan;
@@ -1825,10 +1824,10 @@ APFloat::compare(const APFloat &rhs) const
/* Compare absolute values; invert result if negative. */
result = compareAbsoluteValue(rhs);
- if(sign) {
- if(result == cmpLessThan)
+ if (sign) {
+ if (result == cmpLessThan)
result = cmpGreaterThan;
- else if(result == cmpGreaterThan)
+ else if (result == cmpGreaterThan)
result = cmpLessThan;
}
}
@@ -1886,7 +1885,7 @@ APFloat::convert(const fltSemantics &toSemantics,
}
}
- if(category == fcNormal) {
+ if (category == fcNormal) {
/* Re-interpret our bit-pattern. */
exponent += toSemantics.precision - semantics->precision;
semantics = &toSemantics;
@@ -1911,7 +1910,7 @@ APFloat::convert(const fltSemantics &toSemantics,
// x87 long double).
if (APInt::tcLSB(significandParts(), newPartCount) < ushift)
*losesInfo = true;
- if (oldSemantics == &APFloat::x87DoubleExtended &&
+ if (oldSemantics == &APFloat::x87DoubleExtended &&
(!(*significandParts() & 0x8000000000000000ULL) ||
!(*significandParts() & 0x4000000000000000ULL)))
*losesInfo = true;
@@ -1956,12 +1955,12 @@ APFloat::convertToSignExtendedInteger(integerPart *parts, unsigned int width,
*isExact = false;
/* Handle the three special cases first. */
- if(category == fcInfinity || category == fcNaN)
+ if (category == fcInfinity || category == fcNaN)
return opInvalidOp;
dstPartsCount = partCountForBits(width);
- if(category == fcZero) {
+ if (category == fcZero) {
APInt::tcSet(parts, 0, dstPartsCount);
// Negative zero can't be represented as an int.
*isExact = !sign;
@@ -2004,8 +2003,8 @@ APFloat::convertToSignExtendedInteger(integerPart *parts, unsigned int width,
if (truncatedBits) {
lost_fraction = lostFractionThroughTruncation(src, partCount(),
truncatedBits);
- if (lost_fraction != lfExactlyZero
- && roundAwayFromZero(rounding_mode, lost_fraction, truncatedBits)) {
+ if (lost_fraction != lfExactlyZero &&
+ roundAwayFromZero(rounding_mode, lost_fraction, truncatedBits)) {
if (APInt::tcIncrement(parts, dstPartsCount))
return opInvalidOp; /* Overflow. */
}
@@ -2062,7 +2061,7 @@ APFloat::convertToInteger(integerPart *parts, unsigned int width,
{
opStatus fs;
- fs = convertToSignExtendedInteger(parts, width, isSigned, rounding_mode,
+ fs = convertToSignExtendedInteger(parts, width, isSigned, rounding_mode,
isExact);
if (fs == opInvalidOp) {
@@ -2149,8 +2148,8 @@ APFloat::convertFromSignExtendedInteger(const integerPart *src,
opStatus status;
assertArithmeticOK(*semantics);
- if (isSigned
- && APInt::tcExtractBit(src, srcCount * integerPartWidth - 1)) {
+ if (isSigned &&
+ APInt::tcExtractBit(src, srcCount * integerPartWidth - 1)) {
integerPart *copy;
/* If we're signed and negative negate a copy. */
@@ -2178,7 +2177,7 @@ APFloat::convertFromZeroExtendedInteger(const integerPart *parts,
APInt api = APInt(width, partCount, parts);
sign = false;
- if(isSigned && APInt::tcExtractBit(parts, width - 1)) {
+ if (isSigned && APInt::tcExtractBit(parts, width - 1)) {
sign = true;
api = -api;
}
@@ -2187,8 +2186,7 @@ APFloat::convertFromZeroExtendedInteger(const integerPart *parts,
}
APFloat::opStatus
-APFloat::convertFromHexadecimalString(const StringRef &s,
- roundingMode rounding_mode)
+APFloat::convertFromHexadecimalString(StringRef s, roundingMode rounding_mode)
{
lostFraction lost_fraction = lfExactlyZero;
integerPart *significand;
@@ -2209,10 +2207,10 @@ APFloat::convertFromHexadecimalString(const StringRef &s,
StringRef::iterator p = skipLeadingZeroesAndAnyDot(begin, end, &dot);
firstSignificantDigit = p;
- for(; p != end;) {
+ for (; p != end;) {
integerPart hex_value;
- if(*p == '.') {
+ if (*p == '.') {
assert(dot == end && "String contains multiple dots");
dot = p++;
if (p == end) {
@@ -2221,7 +2219,7 @@ APFloat::convertFromHexadecimalString(const StringRef &s,
}
hex_value = hexDigitValue(*p);
- if(hex_value == -1U) {
+ if (hex_value == -1U) {
break;
}
@@ -2231,13 +2229,13 @@ APFloat::convertFromHexadecimalString(const StringRef &s,
break;
} else {
/* Store the number whilst 4-bit nibbles remain. */
- if(bitPos) {
+ if (bitPos) {
bitPos -= 4;
hex_value <<= bitPos % integerPartWidth;
significand[bitPos / integerPartWidth] |= hex_value;
} else {
lost_fraction = trailingHexadecimalFraction(p, end, hex_value);
- while(p != end && hexDigitValue(*p) != -1U)
+ while (p != end && hexDigitValue(*p) != -1U)
p++;
break;
}
@@ -2251,7 +2249,7 @@ APFloat::convertFromHexadecimalString(const StringRef &s,
assert((dot == end || p - begin != 1) && "Significand has no digits");
/* Ignore the exponent if we are zero. */
- if(p != firstSignificantDigit) {
+ if (p != firstSignificantDigit) {
int expAdjustment;
/* Implicit hexadecimal point? */
@@ -2261,7 +2259,7 @@ APFloat::convertFromHexadecimalString(const StringRef &s,
/* Calculate the exponent adjustment implicit in the number of
significant digits. */
expAdjustment = static_cast<int>(dot - firstSignificantDigit);
- if(expAdjustment < 0)
+ if (expAdjustment < 0)
expAdjustment++;
expAdjustment = expAdjustment * 4 - 1;
@@ -2287,8 +2285,8 @@ APFloat::roundSignificandWithExponent(const integerPart *decSigParts,
integerPart pow5Parts[maxPowerOfFiveParts];
bool isNearest;
- isNearest = (rounding_mode == rmNearestTiesToEven
- || rounding_mode == rmNearestTiesToAway);
+ isNearest = (rounding_mode == rmNearestTiesToEven ||
+ rounding_mode == rmNearestTiesToAway);
parts = partCountForBits(semantics->precision + 11);
@@ -2363,7 +2361,7 @@ APFloat::roundSignificandWithExponent(const integerPart *decSigParts,
}
APFloat::opStatus
-APFloat::convertFromDecimalString(const StringRef &str, roundingMode rounding_mode)
+APFloat::convertFromDecimalString(StringRef str, roundingMode rounding_mode)
{
decimalInfo D;
opStatus fs;
@@ -2473,7 +2471,7 @@ APFloat::convertFromDecimalString(const StringRef &str, roundingMode rounding_mo
}
APFloat::opStatus
-APFloat::convertFromString(const StringRef &str, roundingMode rounding_mode)
+APFloat::convertFromString(StringRef str, roundingMode rounding_mode)
{
assertArithmeticOK(*semantics);
assert(!str.empty() && "Invalid string length");
@@ -2482,13 +2480,13 @@ APFloat::convertFromString(const StringRef &str, roundingMode rounding_mode)
StringRef::iterator p = str.begin();
size_t slen = str.size();
sign = *p == '-' ? 1 : 0;
- if(*p == '-' || *p == '+') {
+ if (*p == '-' || *p == '+') {
p++;
slen--;
assert(slen && "String has no digits");
}
- if(slen >= 2 && p[0] == '0' && (p[1] == 'x' || p[1] == 'X')) {
+ if (slen >= 2 && p[0] == '0' && (p[1] == 'x' || p[1] == 'X')) {
assert(slen - 2 && "Invalid string");
return convertFromHexadecimalString(StringRef(p + 2, slen - 2),
rounding_mode);
@@ -3013,7 +3011,7 @@ APFloat::initFromPPCDoubleDoubleAPInt(const APInt &api)
// exponent2 and significand2 are required to be 0; we don't check
category = fcInfinity;
} else if (myexponent==0x7ff && mysignificand!=0) {
- // exponent meaningless. So is the whole second word, but keep it
+ // exponent meaningless. So is the whole second word, but keep it
// for determinism.
category = fcNaN;
exponent2 = myexponent2;
@@ -3031,7 +3029,7 @@ APFloat::initFromPPCDoubleDoubleAPInt(const APInt &api)
exponent = -1022;
else
significandParts()[0] |= 0x10000000000000LL; // integer bit
- if (myexponent2==0)
+ if (myexponent2==0)
exponent2 = -1022;
else
significandParts()[1] |= 0x10000000000000LL; // integer bit
@@ -3217,8 +3215,8 @@ APFloat APFloat::getLargest(const fltSemantics &Sem, bool Negative) {
significand[i] = ~((integerPart) 0);
// ...and then clear the top bits for internal consistency.
- significand[N-1]
- &= (((integerPart) 1) << ((Sem.precision % integerPartWidth) - 1)) - 1;
+ significand[N-1] &=
+ (((integerPart) 1) << ((Sem.precision % integerPartWidth) - 1)) - 1;
return Val;
}
@@ -3247,8 +3245,8 @@ APFloat APFloat::getSmallestNormalized(const fltSemantics &Sem, bool Negative) {
Val.exponent = Sem.minExponent;
Val.zeroSignificand();
- Val.significandParts()[partCountForBits(Sem.precision)-1]
- |= (((integerPart) 1) << ((Sem.precision % integerPartWidth) - 1));
+ Val.significandParts()[partCountForBits(Sem.precision)-1] |=
+ (((integerPart) 1) << ((Sem.precision % integerPartWidth) - 1));
return Val;
}
@@ -3433,7 +3431,7 @@ void APFloat::toString(SmallVectorImpl<char> &Str,
// log2(N * 5^e) == log2(N) + e * log2(5)
// <= semantics->precision + e * 137 / 59
// (log_2(5) ~ 2.321928 < 2.322034 ~ 137/59)
-
+
unsigned precision = semantics->precision + 137 * texp / 59;
// Multiply significand by 5^e.
@@ -3442,7 +3440,7 @@ void APFloat::toString(SmallVectorImpl<char> &Str,
APInt five_to_the_i(precision, 5);
while (true) {
if (texp & 1) significand *= five_to_the_i;
-
+
texp >>= 1;
if (!texp) break;
five_to_the_i *= five_to_the_i;
diff --git a/libclamav/c++/llvm/lib/Support/APInt.cpp b/libclamav/c++/llvm/lib/Support/APInt.cpp
index 6a6384a..8a212a2 100644
--- a/libclamav/c++/llvm/lib/Support/APInt.cpp
+++ b/libclamav/c++/llvm/lib/Support/APInt.cpp
@@ -102,7 +102,7 @@ APInt::APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[])
clearUnusedBits();
}
-APInt::APInt(unsigned numbits, const StringRef& Str, uint8_t radix)
+APInt::APInt(unsigned numbits, StringRef Str, uint8_t radix)
: BitWidth(numbits), VAL(0) {
assert(BitWidth && "Bitwidth too small");
fromString(numbits, Str, radix);
@@ -613,7 +613,7 @@ APInt& APInt::flip(unsigned bitPosition) {
return *this;
}
-unsigned APInt::getBitsNeeded(const StringRef& str, uint8_t radix) {
+unsigned APInt::getBitsNeeded(StringRef str, uint8_t radix) {
assert(!str.empty() && "Invalid string length");
assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) &&
"Radix should be 2, 8, 10, or 16!");
@@ -702,15 +702,14 @@ static inline uint32_t hashword(const uint64_t *k64, size_t length)
a = b = c = 0xdeadbeef + (((uint32_t)length)<<2);
/*------------------------------------------------- handle most of the key */
- while (length > 3)
- {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a,b,c);
- length -= 3;
- k += 3;
- }
+ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 3;
+ k += 3;
+ }
/*------------------------------------------- handle the last 3 uint32_t's */
switch (length) { /* all the case statements fall through */
@@ -1383,13 +1382,12 @@ APInt APInt::sqrt() const {
// libc sqrt function which will probably use a hardware sqrt computation.
// This should be faster than the algorithm below.
if (magnitude < 52) {
-#ifdef _MSC_VER
- // Amazingly, VC++ doesn't have round().
+#if HAVE_ROUND
return APInt(BitWidth,
- uint64_t(::sqrt(double(isSingleWord()?VAL:pVal[0]))) + 0.5);
+ uint64_t(::round(::sqrt(double(isSingleWord()?VAL:pVal[0])))));
#else
return APInt(BitWidth,
- uint64_t(::round(::sqrt(double(isSingleWord()?VAL:pVal[0])))));
+ uint64_t(::sqrt(double(isSingleWord()?VAL:pVal[0]))) + 0.5);
#endif
}
@@ -2048,7 +2046,7 @@ void APInt::udivrem(const APInt &LHS, const APInt &RHS,
divide(LHS, lhsWords, RHS, rhsWords, &Quotient, &Remainder);
}
-void APInt::fromString(unsigned numbits, const StringRef& str, uint8_t radix) {
+void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) {
// Check our assumptions here
assert(!str.empty() && "Invalid string length");
assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) &&
@@ -2065,8 +2063,8 @@ void APInt::fromString(unsigned numbits, const StringRef& str, uint8_t radix) {
assert((slen <= numbits || radix != 2) && "Insufficient bit width");
assert(((slen-1)*3 <= numbits || radix != 8) && "Insufficient bit width");
assert(((slen-1)*4 <= numbits || radix != 16) && "Insufficient bit width");
- assert((((slen-1)*64)/22 <= numbits || radix != 10)
- && "Insufficient bit width");
+ assert((((slen-1)*64)/22 <= numbits || radix != 10) &&
+ "Insufficient bit width");
// Allocate memory
if (!isSingleWord())
@@ -2125,15 +2123,16 @@ void APInt::toString(SmallVectorImpl<char> &Str, unsigned Radix,
char *BufPtr = Buffer+65;
uint64_t N;
- if (Signed) {
+ if (!Signed) {
+ N = getZExtValue();
+ } else {
int64_t I = getSExtValue();
- if (I < 0) {
+ if (I >= 0) {
+ N = I;
+ } else {
Str.push_back('-');
- I = -I;
+ N = -(uint64_t)I;
}
- N = I;
- } else {
- N = getZExtValue();
}
while (N) {
@@ -2229,7 +2228,7 @@ namespace {
static inline integerPart
lowBitMask(unsigned int bits)
{
- assert (bits != 0 && bits <= integerPartWidth);
+ assert(bits != 0 && bits <= integerPartWidth);
return ~(integerPart) 0 >> (integerPartWidth - bits);
}
@@ -2306,10 +2305,10 @@ APInt::tcSet(integerPart *dst, integerPart part, unsigned int parts)
{
unsigned int i;
- assert (parts > 0);
+ assert(parts > 0);
dst[0] = part;
- for(i = 1; i < parts; i++)
+ for (i = 1; i < parts; i++)
dst[i] = 0;
}
@@ -2319,7 +2318,7 @@ APInt::tcAssign(integerPart *dst, const integerPart *src, unsigned int parts)
{
unsigned int i;
- for(i = 0; i < parts; i++)
+ for (i = 0; i < parts; i++)
dst[i] = src[i];
}
@@ -2329,7 +2328,7 @@ APInt::tcIsZero(const integerPart *src, unsigned int parts)
{
unsigned int i;
- for(i = 0; i < parts; i++)
+ for (i = 0; i < parts; i++)
if (src[i])
return false;
@@ -2340,8 +2339,8 @@ APInt::tcIsZero(const integerPart *src, unsigned int parts)
int
APInt::tcExtractBit(const integerPart *parts, unsigned int bit)
{
- return(parts[bit / integerPartWidth]
- & ((integerPart) 1 << bit % integerPartWidth)) != 0;
+ return (parts[bit / integerPartWidth] &
+ ((integerPart) 1 << bit % integerPartWidth)) != 0;
}
/* Set the given bit of a bignum. */
@@ -2366,7 +2365,7 @@ APInt::tcLSB(const integerPart *parts, unsigned int n)
{
unsigned int i, lsb;
- for(i = 0; i < n; i++) {
+ for (i = 0; i < n; i++) {
if (parts[i] != 0) {
lsb = partLSB(parts[i]);
@@ -2385,13 +2384,13 @@ APInt::tcMSB(const integerPart *parts, unsigned int n)
unsigned int msb;
do {
- --n;
+ --n;
- if (parts[n] != 0) {
- msb = partMSB(parts[n]);
+ if (parts[n] != 0) {
+ msb = partMSB(parts[n]);
- return msb + n * integerPartWidth;
- }
+ return msb + n * integerPartWidth;
+ }
} while (n);
return -1U;
@@ -2408,7 +2407,7 @@ APInt::tcExtract(integerPart *dst, unsigned int dstCount,const integerPart *src,
unsigned int firstSrcPart, dstParts, shift, n;
dstParts = (srcBits + integerPartWidth - 1) / integerPartWidth;
- assert (dstParts <= dstCount);
+ assert(dstParts <= dstCount);
firstSrcPart = srcLSB / integerPartWidth;
tcAssign (dst, src + firstSrcPart, dstParts);
@@ -2443,7 +2442,7 @@ APInt::tcAdd(integerPart *dst, const integerPart *rhs,
assert(c <= 1);
- for(i = 0; i < parts; i++) {
+ for (i = 0; i < parts; i++) {
integerPart l;
l = dst[i];
@@ -2468,7 +2467,7 @@ APInt::tcSubtract(integerPart *dst, const integerPart *rhs,
assert(c <= 1);
- for(i = 0; i < parts; i++) {
+ for (i = 0; i < parts; i++) {
integerPart l;
l = dst[i];
@@ -2518,7 +2517,7 @@ APInt::tcMultiplyPart(integerPart *dst, const integerPart *src,
/* N loops; minimum of dstParts and srcParts. */
n = dstParts < srcParts ? dstParts: srcParts;
- for(i = 0; i < n; i++) {
+ for (i = 0; i < n; i++) {
integerPart low, mid, high, srcPart;
/* [ LOW, HIGH ] = MULTIPLIER * SRC[i] + DST[i] + CARRY.
@@ -2583,7 +2582,7 @@ APInt::tcMultiplyPart(integerPart *dst, const integerPart *src,
non-zero. This is true if any remaining src parts are non-zero
and the multiplier is non-zero. */
if (multiplier)
- for(; i < srcParts; i++)
+ for (; i < srcParts; i++)
if (src[i])
return 1;
@@ -2608,7 +2607,7 @@ APInt::tcMultiply(integerPart *dst, const integerPart *lhs,
overflow = 0;
tcSet(dst, 0, parts);
- for(i = 0; i < parts; i++)
+ for (i = 0; i < parts; i++)
overflow |= tcMultiplyPart(&dst[i], lhs, rhs[i], 0, parts,
parts - i, true);
@@ -2634,7 +2633,7 @@ APInt::tcFullMultiply(integerPart *dst, const integerPart *lhs,
tcSet(dst, 0, rhsParts);
- for(n = 0; n < lhsParts; n++)
+ for (n = 0; n < lhsParts; n++)
tcMultiplyPart(&dst[n], rhs, lhs[n], 0, rhsParts, rhsParts + 1, true);
n = lhsParts + rhsParts;
@@ -2678,7 +2677,7 @@ APInt::tcDivide(integerPart *lhs, const integerPart *rhs,
/* Loop, subtracting SRHS if REMAINDER is greater and adding that to
the total. */
- for(;;) {
+ for (;;) {
int compare;
compare = tcCompare(remainder, srhs, parts);
@@ -2746,7 +2745,7 @@ APInt::tcShiftRight(integerPart *dst, unsigned int parts, unsigned int count)
/* Perform the shift. This leaves the most significant COUNT bits
of the result at zero. */
- for(i = 0; i < parts; i++) {
+ for (i = 0; i < parts; i++) {
integerPart part;
if (i + jump >= parts) {
@@ -2771,7 +2770,7 @@ APInt::tcAnd(integerPart *dst, const integerPart *rhs, unsigned int parts)
{
unsigned int i;
- for(i = 0; i < parts; i++)
+ for (i = 0; i < parts; i++)
dst[i] &= rhs[i];
}
@@ -2781,7 +2780,7 @@ APInt::tcOr(integerPart *dst, const integerPart *rhs, unsigned int parts)
{
unsigned int i;
- for(i = 0; i < parts; i++)
+ for (i = 0; i < parts; i++)
dst[i] |= rhs[i];
}
@@ -2791,7 +2790,7 @@ APInt::tcXor(integerPart *dst, const integerPart *rhs, unsigned int parts)
{
unsigned int i;
- for(i = 0; i < parts; i++)
+ for (i = 0; i < parts; i++)
dst[i] ^= rhs[i];
}
@@ -2801,7 +2800,7 @@ APInt::tcComplement(integerPart *dst, unsigned int parts)
{
unsigned int i;
- for(i = 0; i < parts; i++)
+ for (i = 0; i < parts; i++)
dst[i] = ~dst[i];
}
@@ -2830,7 +2829,7 @@ APInt::tcIncrement(integerPart *dst, unsigned int parts)
{
unsigned int i;
- for(i = 0; i < parts; i++)
+ for (i = 0; i < parts; i++)
if (++dst[i] != 0)
break;
diff --git a/libclamav/c++/llvm/lib/Support/Allocator.cpp b/libclamav/c++/llvm/lib/Support/Allocator.cpp
index d7cc20b..90df262 100644
--- a/libclamav/c++/llvm/lib/Support/Allocator.cpp
+++ b/libclamav/c++/llvm/lib/Support/Allocator.cpp
@@ -23,9 +23,7 @@ namespace llvm {
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
SlabAllocator &allocator)
: SlabSize(size), SizeThreshold(threshold), Allocator(allocator),
- CurSlab(0), BytesAllocated(0) {
- StartNewSlab();
-}
+ CurSlab(0), BytesAllocated(0) { }
BumpPtrAllocator::~BumpPtrAllocator() {
DeallocateSlabs(CurSlab);
@@ -72,30 +70,20 @@ void BumpPtrAllocator::DeallocateSlabs(MemSlab *Slab) {
/// Reset - Deallocate all but the current slab and reset the current pointer
/// to the beginning of it, freeing all memory allocated so far.
void BumpPtrAllocator::Reset() {
+ if (!CurSlab)
+ return;
DeallocateSlabs(CurSlab->NextPtr);
CurSlab->NextPtr = 0;
CurPtr = (char*)(CurSlab + 1);
End = ((char*)CurSlab) + CurSlab->Size;
}
-void BumpPtrAllocator::Reset(size_t Size, size_t Alignment, DTorFunction DTor) {
- if (Alignment == 0) Alignment = 1;
- MemSlab *Slab = CurSlab;
- while (Slab) {
- char *End = Slab == CurSlab ? CurPtr : (char*)Slab + Slab->Size;
- for (char *Ptr = (char*)(Slab+1); Ptr < End; Ptr += Size) {
- Ptr = AlignPtr(Ptr, Alignment);
- if (Ptr + Size <= End)
- DTor(Ptr);
- }
- Slab = Slab->NextPtr;
- }
- Reset();
-}
-
/// Allocate - Allocate space at the specified alignment.
///
void *BumpPtrAllocator::Allocate(size_t Size, size_t Alignment) {
+ if (!CurSlab) // Start a new slab if we haven't allocated one already.
+ StartNewSlab();
+
// Keep track of how many bytes we've allocated.
BytesAllocated += Size;
diff --git a/libclamav/c++/llvm/lib/Support/CMakeLists.txt b/libclamav/c++/llvm/lib/Support/CMakeLists.txt
index f1347f9..0c70a40 100644
--- a/libclamav/c++/llvm/lib/Support/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/Support/CMakeLists.txt
@@ -6,8 +6,10 @@ add_llvm_library(LLVMSupport
circular_raw_ostream.cpp
CommandLine.cpp
ConstantRange.cpp
+ CrashRecoveryContext.cpp
Debug.cpp
DeltaAlgorithm.cpp
+ DAGDeltaAlgorithm.cpp
Dwarf.cpp
ErrorHandling.cpp
FileUtilities.cpp
@@ -22,7 +24,6 @@ add_llvm_library(LLVMSupport
PluginLoader.cpp
PrettyStackTrace.cpp
Regex.cpp
- SlowOperationInformer.cpp
SmallPtrSet.cpp
SmallVector.cpp
SourceMgr.cpp
diff --git a/libclamav/c++/llvm/lib/Support/CommandLine.cpp b/libclamav/c++/llvm/lib/Support/CommandLine.cpp
index 2ab4103..ae66110 100644
--- a/libclamav/c++/llvm/lib/Support/CommandLine.cpp
+++ b/libclamav/c++/llvm/lib/Support/CommandLine.cpp
@@ -676,8 +676,8 @@ void cl::ParseCommandLineOptions(int argc, char **argv,
<< " positional arguments: See: " << argv[0] << " -help\n";
ErrorParsing = true;
- } else if (!HasUnlimitedPositionals
- && PositionalVals.size() > PositionalOpts.size()) {
+ } else if (!HasUnlimitedPositionals &&
+ PositionalVals.size() > PositionalOpts.size()) {
errs() << ProgramName
<< ": Too many positional arguments specified!\n"
<< "Can specify at most " << PositionalOpts.size()
@@ -1170,7 +1170,9 @@ public:
std::string CPU = sys::getHostCPUName();
if (CPU == "generic") CPU = "(unknown)";
OS << ".\n"
+#if (ENABLE_TIMESTAMPS == 1)
<< " Built " << __DATE__ << " (" << __TIME__ << ").\n"
+#endif
<< " Host: " << sys::getHostTriple() << '\n'
<< " Host CPU: " << CPU << '\n'
<< '\n'
diff --git a/libclamav/c++/llvm/lib/Support/ConstantRange.cpp b/libclamav/c++/llvm/lib/Support/ConstantRange.cpp
index 2746f7a..8ef3785 100644
--- a/libclamav/c++/llvm/lib/Support/ConstantRange.cpp
+++ b/libclamav/c++/llvm/lib/Support/ConstantRange.cpp
@@ -21,6 +21,7 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Constants.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -38,7 +39,7 @@ ConstantRange::ConstantRange(uint32_t BitWidth, bool Full) {
/// Initialize a range to hold the single specified value.
///
-ConstantRange::ConstantRange(const APInt & V) : Lower(V), Upper(V + 1) {}
+ConstantRange::ConstantRange(const APInt &V) : Lower(V), Upper(V + 1) {}
ConstantRange::ConstantRange(const APInt &L, const APInt &U) :
Lower(L), Upper(U) {
@@ -202,14 +203,12 @@ bool ConstantRange::contains(const APInt &V) const {
}
/// contains - Return true if the argument is a subset of this range.
-/// Two equal set contain each other. The empty set is considered to be
-/// contained by all other sets.
+/// Two equal sets contain each other. The empty set contained by all other
+/// sets.
///
bool ConstantRange::contains(const ConstantRange &Other) const {
- if (isFullSet()) return true;
- if (Other.isFullSet()) return false;
- if (Other.isEmptySet()) return true;
- if (isEmptySet()) return false;
+ if (isFullSet() || Other.isEmptySet()) return true;
+ if (isEmptySet() || Other.isFullSet()) return false;
if (!isWrappedSet()) {
if (Other.isWrappedSet())
@@ -235,46 +234,6 @@ ConstantRange ConstantRange::subtract(const APInt &Val) const {
return ConstantRange(Lower - Val, Upper - Val);
}
-
-// intersect1Wrapped - This helper function is used to intersect two ranges when
-// it is known that LHS is wrapped and RHS isn't.
-//
-ConstantRange
-ConstantRange::intersect1Wrapped(const ConstantRange &LHS,
- const ConstantRange &RHS) {
- assert(LHS.isWrappedSet() && !RHS.isWrappedSet());
-
- // Check to see if we overlap on the Left side of RHS...
- //
- if (RHS.Lower.ult(LHS.Upper)) {
- // We do overlap on the left side of RHS, see if we overlap on the right of
- // RHS...
- if (RHS.Upper.ugt(LHS.Lower)) {
- // Ok, the result overlaps on both the left and right sides. See if the
- // resultant interval will be smaller if we wrap or not...
- //
- if (LHS.getSetSize().ult(RHS.getSetSize()))
- return LHS;
- else
- return RHS;
-
- } else {
- // No overlap on the right, just on the left.
- return ConstantRange(RHS.Lower, LHS.Upper);
- }
- } else {
- // We don't overlap on the left side of RHS, see if we overlap on the right
- // of RHS...
- if (RHS.Upper.ugt(LHS.Lower)) {
- // Simple overlap...
- return ConstantRange(LHS.Lower, RHS.Upper);
- } else {
- // No overlap...
- return ConstantRange(LHS.getBitWidth(), false);
- }
- }
-}
-
/// intersectWith - Return the range that results from the intersection of this
/// range with another range. The resultant range is guaranteed to include all
/// elements contained in both input ranges, and to have the smallest possible
@@ -486,7 +445,7 @@ ConstantRange ConstantRange::truncate(uint32_t DstTySize) const {
assert(SrcTySize > DstTySize && "Not a value truncation");
APInt Size(APInt::getLowBitsSet(SrcTySize, DstTySize));
if (isFullSet() || getSetSize().ugt(Size))
- return ConstantRange(DstTySize);
+ return ConstantRange(DstTySize, /*isFullSet=*/true);
APInt L = Lower; L.trunc(DstTySize);
APInt U = Upper; U.trunc(DstTySize);
@@ -539,6 +498,27 @@ ConstantRange::add(const ConstantRange &Other) const {
}
ConstantRange
+ConstantRange::sub(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+ if (isFullSet() || Other.isFullSet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ APInt Spread_X = getSetSize(), Spread_Y = Other.getSetSize();
+ APInt NewLower = getLower() - Other.getLower();
+ APInt NewUpper = getUpper() - Other.getUpper() + 1;
+ if (NewLower == NewUpper)
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ ConstantRange X = ConstantRange(NewLower, NewUpper);
+ if (X.getSetSize().ult(Spread_X) || X.getSetSize().ult(Spread_Y))
+ // We've wrapped, therefore, full set.
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+
+ return X;
+}
+
+ConstantRange
ConstantRange::multiply(const ConstantRange &Other) const {
// TODO: If either operand is a single element and the multiply is known to
// be non-wrapping, round the result min and max value to the appropriate
@@ -616,40 +596,42 @@ ConstantRange::udiv(const ConstantRange &RHS) const {
}
ConstantRange
-ConstantRange::shl(const ConstantRange &Amount) const {
- if (isEmptySet())
- return *this;
+ConstantRange::shl(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
- APInt min = getUnsignedMin() << Amount.getUnsignedMin();
- APInt max = getUnsignedMax() << Amount.getUnsignedMax();
+ APInt min = getUnsignedMin().shl(Other.getUnsignedMin());
+ APInt max = getUnsignedMax().shl(Other.getUnsignedMax());
// there's no overflow!
APInt Zeros(getBitWidth(), getUnsignedMax().countLeadingZeros());
- if (Zeros.uge(Amount.getUnsignedMax()))
- return ConstantRange(min, max);
+ if (Zeros.ugt(Other.getUnsignedMax()))
+ return ConstantRange(min, max + 1);
// FIXME: implement the other tricky cases
- return ConstantRange(getBitWidth());
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
}
ConstantRange
-ConstantRange::ashr(const ConstantRange &Amount) const {
- if (isEmptySet())
- return *this;
+ConstantRange::lshr(const ConstantRange &Other) const {
+ if (isEmptySet() || Other.isEmptySet())
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+
+ APInt max = getUnsignedMax().lshr(Other.getUnsignedMin());
+ APInt min = getUnsignedMin().lshr(Other.getUnsignedMax());
+ if (min == max + 1)
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
- APInt min = getUnsignedMax().ashr(Amount.getUnsignedMin());
- APInt max = getUnsignedMin().ashr(Amount.getUnsignedMax());
- return ConstantRange(min, max);
+ return ConstantRange(min, max + 1);
}
-ConstantRange
-ConstantRange::lshr(const ConstantRange &Amount) const {
- if (isEmptySet())
- return *this;
-
- APInt min = getUnsignedMax().lshr(Amount.getUnsignedMin());
- APInt max = getUnsignedMin().lshr(Amount.getUnsignedMax());
- return ConstantRange(min, max);
+ConstantRange ConstantRange::inverse() const {
+ if (isFullSet()) {
+ return ConstantRange(getBitWidth(), /*isFullSet=*/false);
+ } else if (isEmptySet()) {
+ return ConstantRange(getBitWidth(), /*isFullSet=*/true);
+ }
+ return ConstantRange(Upper, Lower);
}
/// print - Print out the bounds to a stream...
@@ -668,5 +650,3 @@ void ConstantRange::print(raw_ostream &OS) const {
void ConstantRange::dump() const {
print(dbgs());
}
-
-
diff --git a/libclamav/c++/llvm/lib/Support/CrashRecoveryContext.cpp b/libclamav/c++/llvm/lib/Support/CrashRecoveryContext.cpp
new file mode 100644
index 0000000..49258ed
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Support/CrashRecoveryContext.cpp
@@ -0,0 +1,204 @@
+//===--- CrashRecoveryContext.cpp - Crash Recovery ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Config/config.h"
+#include "llvm/System/Mutex.h"
+#include "llvm/System/ThreadLocal.h"
+#include <setjmp.h>
+#include <cstdio>
+using namespace llvm;
+
+namespace {
+
+struct CrashRecoveryContextImpl;
+
+static sys::ThreadLocal<const CrashRecoveryContextImpl> CurrentContext;
+
+struct CrashRecoveryContextImpl {
+ CrashRecoveryContext *CRC;
+ std::string Backtrace;
+ ::jmp_buf JumpBuffer;
+ volatile unsigned Failed : 1;
+
+public:
+ CrashRecoveryContextImpl(CrashRecoveryContext *CRC) : CRC(CRC),
+ Failed(false) {
+ CurrentContext.set(this);
+ }
+ ~CrashRecoveryContextImpl() {
+ CurrentContext.erase();
+ }
+
+ void HandleCrash() {
+ // Eliminate the current context entry, to avoid re-entering in case the
+ // cleanup code crashes.
+ CurrentContext.erase();
+
+ assert(!Failed && "Crash recovery context already failed!");
+ Failed = true;
+
+ // FIXME: Stash the backtrace.
+
+ // Jump back to the RunSafely we were called under.
+ longjmp(JumpBuffer, 1);
+ }
+};
+
+}
+
+static sys::Mutex gCrashRecoveryContexMutex;
+static bool gCrashRecoveryEnabled = false;
+
+CrashRecoveryContext::~CrashRecoveryContext() {
+ CrashRecoveryContextImpl *CRCI = (CrashRecoveryContextImpl *) Impl;
+ delete CRCI;
+}
+
+CrashRecoveryContext *CrashRecoveryContext::GetCurrent() {
+ const CrashRecoveryContextImpl *CRCI = CurrentContext.get();
+ if (!CRCI)
+ return 0;
+
+ return CRCI->CRC;
+}
+
+#ifdef LLVM_ON_WIN32
+
+// FIXME: No real Win32 implementation currently.
+
+void CrashRecoveryContext::Enable() {
+ sys::ScopedLock L(gCrashRecoveryContexMutex);
+
+ if (gCrashRecoveryEnabled)
+ return;
+
+ gCrashRecoveryEnabled = true;
+}
+
+void CrashRecoveryContext::Disable() {
+ sys::ScopedLock L(gCrashRecoveryContexMutex);
+
+ if (!gCrashRecoveryEnabled)
+ return;
+
+ gCrashRecoveryEnabled = false;
+}
+
+#else
+
+// Generic POSIX implementation.
+//
+// This implementation relies on synchronous signals being delivered to the
+// current thread. We use a thread local object to keep track of the active
+// crash recovery context, and install signal handlers to invoke HandleCrash on
+// the active object.
+//
+// This implementation does not to attempt to chain signal handlers in any
+// reliable fashion -- if we get a signal outside of a crash recovery context we
+// simply disable crash recovery and raise the signal again.
+
+#include <signal.h>
+
+static int Signals[] = { SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGSEGV, SIGTRAP };
+static const unsigned NumSignals = sizeof(Signals) / sizeof(Signals[0]);
+static struct sigaction PrevActions[NumSignals];
+
+static void CrashRecoverySignalHandler(int Signal) {
+ // Lookup the current thread local recovery object.
+ const CrashRecoveryContextImpl *CRCI = CurrentContext.get();
+
+ if (!CRCI) {
+ // We didn't find a crash recovery context -- this means either we got a
+ // signal on a thread we didn't expect it on, the application got a signal
+ // outside of a crash recovery context, or something else went horribly
+ // wrong.
+ //
+ // Disable crash recovery and raise the signal again. The assumption here is
+ // that the enclosing application will terminate soon, and we won't want to
+ // attempt crash recovery again.
+ //
+ // This call of Disable isn't thread safe, but it doesn't actually matter.
+ CrashRecoveryContext::Disable();
+ raise(Signal);
+ }
+
+ // Unblock the signal we received.
+ sigset_t SigMask;
+ sigemptyset(&SigMask);
+ sigaddset(&SigMask, Signal);
+ sigprocmask(SIG_UNBLOCK, &SigMask, 0);
+
+ if (CRCI)
+ const_cast<CrashRecoveryContextImpl*>(CRCI)->HandleCrash();
+}
+
+void CrashRecoveryContext::Enable() {
+ sys::ScopedLock L(gCrashRecoveryContexMutex);
+
+ if (gCrashRecoveryEnabled)
+ return;
+
+ gCrashRecoveryEnabled = true;
+
+ // Setup the signal handler.
+ struct sigaction Handler;
+ Handler.sa_handler = CrashRecoverySignalHandler;
+ Handler.sa_flags = 0;
+ sigemptyset(&Handler.sa_mask);
+
+ for (unsigned i = 0; i != NumSignals; ++i) {
+ sigaction(Signals[i], &Handler, &PrevActions[i]);
+ }
+}
+
+void CrashRecoveryContext::Disable() {
+ sys::ScopedLock L(gCrashRecoveryContexMutex);
+
+ if (!gCrashRecoveryEnabled)
+ return;
+
+ gCrashRecoveryEnabled = false;
+
+ // Restore the previous signal handlers.
+ for (unsigned i = 0; i != NumSignals; ++i)
+ sigaction(Signals[i], &PrevActions[i], 0);
+}
+
+#endif
+
+bool CrashRecoveryContext::RunSafely(void (*Fn)(void*), void *UserData) {
+ // If crash recovery is disabled, do nothing.
+ if (gCrashRecoveryEnabled) {
+ assert(!Impl && "Crash recovery context already initialized!");
+ CrashRecoveryContextImpl *CRCI = new CrashRecoveryContextImpl(this);
+ Impl = CRCI;
+
+ if (setjmp(CRCI->JumpBuffer) != 0) {
+ return false;
+ }
+ }
+
+ Fn(UserData);
+ return true;
+}
+
+void CrashRecoveryContext::HandleCrash() {
+ CrashRecoveryContextImpl *CRCI = (CrashRecoveryContextImpl *) Impl;
+ assert(CRCI && "Crash recovery context never initialized!");
+ CRCI->HandleCrash();
+}
+
+const std::string &CrashRecoveryContext::getBacktrace() const {
+ CrashRecoveryContextImpl *CRC = (CrashRecoveryContextImpl *) Impl;
+ assert(CRC && "Crash recovery context never initialized!");
+ assert(CRC->Failed && "No crash was detected!");
+ return CRC->Backtrace;
+}
diff --git a/libclamav/c++/llvm/lib/Support/DAGDeltaAlgorithm.cpp b/libclamav/c++/llvm/lib/Support/DAGDeltaAlgorithm.cpp
new file mode 100644
index 0000000..8145664
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Support/DAGDeltaAlgorithm.cpp
@@ -0,0 +1,357 @@
+//===--- DAGDeltaAlgorithm.cpp - A DAG Minimization Algorithm --*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// The algorithm we use attempts to exploit the dependency information by
+// minimizing top-down. We start by constructing an initial root set R, and
+// then iteratively:
+//
+// 1. Minimize the set R using the test predicate:
+// P'(S) = P(S union pred*(S))
+//
+// 2. Extend R to R' = R union pred(R).
+//
+// until a fixed point is reached.
+//
+// The idea is that we want to quickly prune entire portions of the graph, so we
+// try to find high-level nodes that can be eliminated with all of their
+// dependents.
+//
+// FIXME: The current algorithm doesn't actually provide a strong guarantee
+// about the minimality of the result. The problem is that after adding nodes to
+// the required set, we no longer consider them for elimination. For strictly
+// well formed predicates, this doesn't happen, but it commonly occurs in
+// practice when there are unmodelled dependencies. I believe we can resolve
+// this by allowing the required set to be minimized as well, but need more test
+// cases first.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DAGDeltaAlgorithm.h"
+#include "llvm/ADT/DeltaAlgorithm.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <map>
+using namespace llvm;
+
+namespace {
+
+class DAGDeltaAlgorithmImpl {
+ friend class DeltaActiveSetHelper;
+
+public:
+ typedef DAGDeltaAlgorithm::change_ty change_ty;
+ typedef DAGDeltaAlgorithm::changeset_ty changeset_ty;
+ typedef DAGDeltaAlgorithm::changesetlist_ty changesetlist_ty;
+ typedef DAGDeltaAlgorithm::edge_ty edge_ty;
+
+private:
+ typedef std::vector<change_ty>::iterator pred_iterator_ty;
+ typedef std::vector<change_ty>::iterator succ_iterator_ty;
+ typedef std::set<change_ty>::iterator pred_closure_iterator_ty;
+ typedef std::set<change_ty>::iterator succ_closure_iterator_ty;
+
+ DAGDeltaAlgorithm &DDA;
+
+ const changeset_ty &Changes;
+ const std::vector<edge_ty> &Dependencies;
+
+ std::vector<change_ty> Roots;
+
+ /// Cache of failed test results. Successful test results are never cached
+ /// since we always reduce following a success. We maintain an independent
+ /// cache from that used by the individual delta passes because we may get
+ /// hits across multiple individual delta invocations.
+ mutable std::set<changeset_ty> FailedTestsCache;
+
+ // FIXME: Gross.
+ std::map<change_ty, std::vector<change_ty> > Predecessors;
+ std::map<change_ty, std::vector<change_ty> > Successors;
+
+ std::map<change_ty, std::set<change_ty> > PredClosure;
+ std::map<change_ty, std::set<change_ty> > SuccClosure;
+
+private:
+ pred_iterator_ty pred_begin(change_ty Node) {
+ assert(Predecessors.count(Node) && "Invalid node!");
+ return Predecessors[Node].begin();
+ }
+ pred_iterator_ty pred_end(change_ty Node) {
+ assert(Predecessors.count(Node) && "Invalid node!");
+ return Predecessors[Node].end();
+ }
+
+ pred_closure_iterator_ty pred_closure_begin(change_ty Node) {
+ assert(PredClosure.count(Node) && "Invalid node!");
+ return PredClosure[Node].begin();
+ }
+ pred_closure_iterator_ty pred_closure_end(change_ty Node) {
+ assert(PredClosure.count(Node) && "Invalid node!");
+ return PredClosure[Node].end();
+ }
+
+ succ_iterator_ty succ_begin(change_ty Node) {
+ assert(Successors.count(Node) && "Invalid node!");
+ return Successors[Node].begin();
+ }
+ succ_iterator_ty succ_end(change_ty Node) {
+ assert(Successors.count(Node) && "Invalid node!");
+ return Successors[Node].end();
+ }
+
+ succ_closure_iterator_ty succ_closure_begin(change_ty Node) {
+ assert(SuccClosure.count(Node) && "Invalid node!");
+ return SuccClosure[Node].begin();
+ }
+ succ_closure_iterator_ty succ_closure_end(change_ty Node) {
+ assert(SuccClosure.count(Node) && "Invalid node!");
+ return SuccClosure[Node].end();
+ }
+
+ void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets,
+ const changeset_ty &Required) {
+ DDA.UpdatedSearchState(Changes, Sets, Required);
+ }
+
+ /// ExecuteOneTest - Execute a single test predicate on the change set \arg S.
+ bool ExecuteOneTest(const changeset_ty &S) {
+ // Check dependencies invariant.
+ DEBUG({
+ for (changeset_ty::const_iterator it = S.begin(),
+ ie = S.end(); it != ie; ++it)
+ for (succ_iterator_ty it2 = succ_begin(*it),
+ ie2 = succ_end(*it); it2 != ie2; ++it2)
+ assert(S.count(*it2) && "Attempt to run invalid changeset!");
+ });
+
+ return DDA.ExecuteOneTest(S);
+ }
+
+public:
+ DAGDeltaAlgorithmImpl(DAGDeltaAlgorithm &_DDA,
+ const changeset_ty &_Changes,
+ const std::vector<edge_ty> &_Dependencies);
+
+ changeset_ty Run();
+
+ /// GetTestResult - Get the test result for the active set \arg Changes with
+ /// \arg Required changes from the cache, executing the test if necessary.
+ ///
+ /// \param Changes - The set of active changes being minimized, which should
+ /// have their pred closure included in the test.
+ /// \param Required - The set of changes which have previously been
+ /// established to be required.
+ /// \return - The test result.
+ bool GetTestResult(const changeset_ty &Changes, const changeset_ty &Required);
+};
+
+/// Helper object for minimizing an active set of changes.
+class DeltaActiveSetHelper : public DeltaAlgorithm {
+ DAGDeltaAlgorithmImpl &DDAI;
+
+ const changeset_ty &Required;
+
+protected:
+ /// UpdatedSearchState - Callback used when the search state changes.
+ virtual void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets) {
+ DDAI.UpdatedSearchState(Changes, Sets, Required);
+ }
+
+ virtual bool ExecuteOneTest(const changeset_ty &S) {
+ return DDAI.GetTestResult(S, Required);
+ }
+
+public:
+ DeltaActiveSetHelper(DAGDeltaAlgorithmImpl &_DDAI,
+ const changeset_ty &_Required)
+ : DDAI(_DDAI), Required(_Required) {}
+};
+
+}
+
+DAGDeltaAlgorithmImpl::DAGDeltaAlgorithmImpl(DAGDeltaAlgorithm &_DDA,
+ const changeset_ty &_Changes,
+ const std::vector<edge_ty>
+ &_Dependencies)
+ : DDA(_DDA),
+ Changes(_Changes),
+ Dependencies(_Dependencies)
+{
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it) {
+ Predecessors.insert(std::make_pair(*it, std::vector<change_ty>()));
+ Successors.insert(std::make_pair(*it, std::vector<change_ty>()));
+ }
+ for (std::vector<edge_ty>::const_iterator it = Dependencies.begin(),
+ ie = Dependencies.end(); it != ie; ++it) {
+ Predecessors[it->second].push_back(it->first);
+ Successors[it->first].push_back(it->second);
+ }
+
+ // Compute the roots.
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it)
+ if (succ_begin(*it) == succ_end(*it))
+ Roots.push_back(*it);
+
+ // Pre-compute the closure of the successor relation.
+ std::vector<change_ty> Worklist(Roots.begin(), Roots.end());
+ while (!Worklist.empty()) {
+ change_ty Change = Worklist.back();
+ Worklist.pop_back();
+
+ std::set<change_ty> &ChangeSuccs = SuccClosure[Change];
+ for (pred_iterator_ty it = pred_begin(Change),
+ ie = pred_end(Change); it != ie; ++it) {
+ SuccClosure[*it].insert(Change);
+ SuccClosure[*it].insert(ChangeSuccs.begin(), ChangeSuccs.end());
+ Worklist.push_back(*it);
+ }
+ }
+
+ // Invert to form the predecessor closure map.
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it)
+ PredClosure.insert(std::make_pair(*it, std::set<change_ty>()));
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it)
+ for (succ_closure_iterator_ty it2 = succ_closure_begin(*it),
+ ie2 = succ_closure_end(*it); it2 != ie2; ++it2)
+ PredClosure[*it2].insert(*it);
+
+ // Dump useful debug info.
+ DEBUG({
+ llvm::errs() << "-- DAGDeltaAlgorithmImpl --\n";
+ llvm::errs() << "Changes: [";
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it) {
+ if (it != Changes.begin()) llvm::errs() << ", ";
+ llvm::errs() << *it;
+
+ if (succ_begin(*it) != succ_end(*it)) {
+ llvm::errs() << "(";
+ for (succ_iterator_ty it2 = succ_begin(*it),
+ ie2 = succ_end(*it); it2 != ie2; ++it2) {
+ if (it2 != succ_begin(*it)) llvm::errs() << ", ";
+ llvm::errs() << "->" << *it2;
+ }
+ llvm::errs() << ")";
+ }
+ }
+ llvm::errs() << "]\n";
+
+ llvm::errs() << "Roots: [";
+ for (std::vector<change_ty>::const_iterator it = Roots.begin(),
+ ie = Roots.end(); it != ie; ++it) {
+ if (it != Roots.begin()) llvm::errs() << ", ";
+ llvm::errs() << *it;
+ }
+ llvm::errs() << "]\n";
+
+ llvm::errs() << "Predecessor Closure:\n";
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it) {
+ llvm::errs() << format(" %-4d: [", *it);
+ for (pred_closure_iterator_ty it2 = pred_closure_begin(*it),
+ ie2 = pred_closure_end(*it); it2 != ie2; ++it2) {
+ if (it2 != pred_closure_begin(*it)) llvm::errs() << ", ";
+ llvm::errs() << *it2;
+ }
+ llvm::errs() << "]\n";
+ }
+
+ llvm::errs() << "Successor Closure:\n";
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it) {
+ llvm::errs() << format(" %-4d: [", *it);
+ for (succ_closure_iterator_ty it2 = succ_closure_begin(*it),
+ ie2 = succ_closure_end(*it); it2 != ie2; ++it2) {
+ if (it2 != succ_closure_begin(*it)) llvm::errs() << ", ";
+ llvm::errs() << *it2;
+ }
+ llvm::errs() << "]\n";
+ }
+
+ llvm::errs() << "\n\n";
+ });
+}
+
+bool DAGDeltaAlgorithmImpl::GetTestResult(const changeset_ty &Changes,
+ const changeset_ty &Required) {
+ changeset_ty Extended(Required);
+ Extended.insert(Changes.begin(), Changes.end());
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it)
+ Extended.insert(pred_closure_begin(*it), pred_closure_end(*it));
+
+ if (FailedTestsCache.count(Extended))
+ return false;
+
+ bool Result = ExecuteOneTest(Extended);
+ if (!Result)
+ FailedTestsCache.insert(Extended);
+
+ return Result;
+}
+
+DAGDeltaAlgorithm::changeset_ty
+DAGDeltaAlgorithmImpl::Run() {
+ // The current set of changes we are minimizing, starting at the roots.
+ changeset_ty CurrentSet(Roots.begin(), Roots.end());
+
+ // The set of required changes.
+ changeset_ty Required;
+
+ // Iterate until the active set of changes is empty. Convergence is guaranteed
+ // assuming input was a DAG.
+ //
+ // Invariant: CurrentSet intersect Required == {}
+ // Invariant: Required == (Required union succ*(Required))
+ while (!CurrentSet.empty()) {
+ DEBUG({
+ llvm::errs() << "DAG_DD - " << CurrentSet.size() << " active changes, "
+ << Required.size() << " required changes\n";
+ });
+
+ // Minimize the current set of changes.
+ DeltaActiveSetHelper Helper(*this, Required);
+ changeset_ty CurrentMinSet = Helper.Run(CurrentSet);
+
+ // Update the set of required changes. Since
+ // CurrentMinSet subset CurrentSet
+ // and after the last iteration,
+ // succ(CurrentSet) subset Required
+ // then
+ // succ(CurrentMinSet) subset Required
+ // and our invariant on Required is maintained.
+ Required.insert(CurrentMinSet.begin(), CurrentMinSet.end());
+
+ // Replace the current set with the predecssors of the minimized set of
+ // active changes.
+ CurrentSet.clear();
+ for (changeset_ty::const_iterator it = CurrentMinSet.begin(),
+ ie = CurrentMinSet.end(); it != ie; ++it)
+ CurrentSet.insert(pred_begin(*it), pred_end(*it));
+
+ // FIXME: We could enforce CurrentSet intersect Required == {} here if we
+ // wanted to protect against cyclic graphs.
+ }
+
+ return Required;
+}
+
+DAGDeltaAlgorithm::changeset_ty
+DAGDeltaAlgorithm::Run(const changeset_ty &Changes,
+ const std::vector<edge_ty> &Dependencies) {
+ return DAGDeltaAlgorithmImpl(*this, Changes, Dependencies).Run();
+}
diff --git a/libclamav/c++/llvm/lib/Support/Debug.cpp b/libclamav/c++/llvm/lib/Support/Debug.cpp
index 82b4b8c..7f48f8a 100644
--- a/libclamav/c++/llvm/lib/Support/Debug.cpp
+++ b/libclamav/c++/llvm/lib/Support/Debug.cpp
@@ -51,12 +51,19 @@ DebugBufferSize("debug-buffer-size",
cl::init(0));
static std::string CurrentDebugType;
-static struct DebugOnlyOpt {
+
+namespace {
+
+struct DebugOnlyOpt {
void operator=(const std::string &Val) const {
DebugFlag |= !Val.empty();
CurrentDebugType = Val;
}
-} DebugOnlyOptLoc;
+};
+
+}
+
+static DebugOnlyOpt DebugOnlyOptLoc;
static cl::opt<DebugOnlyOpt, true, cl::parser<std::string> >
DebugOnly("debug-only", cl::desc("Enable a specific type of debug output"),
@@ -64,8 +71,7 @@ DebugOnly("debug-only", cl::desc("Enable a specific type of debug output"),
cl::location(DebugOnlyOptLoc), cl::ValueRequired);
// Signal handlers - dump debug output on termination.
-static void debug_user_sig_handler(void *Cookie)
-{
+static void debug_user_sig_handler(void *Cookie) {
// This is a bit sneaky. Since this is under #ifndef NDEBUG, we
// know that debug mode is enabled and dbgs() really is a
// circular_raw_ostream. If NDEBUG is defined, then dbgs() ==
diff --git a/libclamav/c++/llvm/lib/Support/DeltaAlgorithm.cpp b/libclamav/c++/llvm/lib/Support/DeltaAlgorithm.cpp
index d176548..9e52874 100644
--- a/libclamav/c++/llvm/lib/Support/DeltaAlgorithm.cpp
+++ b/libclamav/c++/llvm/lib/Support/DeltaAlgorithm.cpp
@@ -30,10 +30,10 @@ void DeltaAlgorithm::Split(const changeset_ty &S, changesetlist_ty &Res) {
// FIXME: This is really slow.
changeset_ty LHS, RHS;
- unsigned idx = 0;
+ unsigned idx = 0, N = S.size() / 2;
for (changeset_ty::const_iterator it = S.begin(),
ie = S.end(); it != ie; ++it, ++idx)
- ((idx & 1) ? LHS : RHS).insert(*it);
+ ((idx < N) ? LHS : RHS).insert(*it);
if (!LHS.empty())
Res.push_back(LHS);
if (!RHS.empty())
diff --git a/libclamav/c++/llvm/lib/Support/Dwarf.cpp b/libclamav/c++/llvm/lib/Support/Dwarf.cpp
index d1230b9..96ce9d3 100644
--- a/libclamav/c++/llvm/lib/Support/Dwarf.cpp
+++ b/libclamav/c++/llvm/lib/Support/Dwarf.cpp
@@ -86,8 +86,8 @@ const char *llvm::dwarf::TagString(unsigned Tag) {
///
const char *llvm::dwarf::ChildrenString(unsigned Children) {
switch (Children) {
- case DW_CHILDREN_no: return "CHILDREN_no";
- case DW_CHILDREN_yes: return "CHILDREN_yes";
+ case DW_CHILDREN_no: return "DW_CHILDREN_no";
+ case DW_CHILDREN_yes: return "DW_CHILDREN_yes";
}
return 0;
}
@@ -196,8 +196,9 @@ const char *llvm::dwarf::AttributeString(unsigned Attribute) {
case DW_AT_APPLE_flags: return "DW_AT_APPLE_flags";
case DW_AT_APPLE_isa: return "DW_AT_APPLE_isa";
case DW_AT_APPLE_block: return "DW_AT_APPLE_block";
- case DW_AT_APPLE_major_runtime_vers: return "DW_AT_APPLE_major_runtime_vers";
+ case DW_AT_APPLE_major_runtime_vers: return "DW_AT_APPLE_major_runtime_vers";
case DW_AT_APPLE_runtime_class: return "DW_AT_APPLE_runtime_class";
+ case DW_AT_APPLE_omit_frame_ptr: return "DW_AT_APPLE_omit_frame_ptr";
}
return 0;
}
@@ -206,27 +207,27 @@ const char *llvm::dwarf::AttributeString(unsigned Attribute) {
///
const char *llvm::dwarf::FormEncodingString(unsigned Encoding) {
switch (Encoding) {
- case DW_FORM_addr: return "FORM_addr";
- case DW_FORM_block2: return "FORM_block2";
- case DW_FORM_block4: return "FORM_block4";
- case DW_FORM_data2: return "FORM_data2";
- case DW_FORM_data4: return "FORM_data4";
- case DW_FORM_data8: return "FORM_data8";
- case DW_FORM_string: return "FORM_string";
- case DW_FORM_block: return "FORM_block";
- case DW_FORM_block1: return "FORM_block1";
- case DW_FORM_data1: return "FORM_data1";
- case DW_FORM_flag: return "FORM_flag";
- case DW_FORM_sdata: return "FORM_sdata";
- case DW_FORM_strp: return "FORM_strp";
- case DW_FORM_udata: return "FORM_udata";
- case DW_FORM_ref_addr: return "FORM_ref_addr";
- case DW_FORM_ref1: return "FORM_ref1";
- case DW_FORM_ref2: return "FORM_ref2";
- case DW_FORM_ref4: return "FORM_ref4";
- case DW_FORM_ref8: return "FORM_ref8";
- case DW_FORM_ref_udata: return "FORM_ref_udata";
- case DW_FORM_indirect: return "FORM_indirect";
+ case DW_FORM_addr: return "DW_FORM_addr";
+ case DW_FORM_block2: return "DW_FORM_block2";
+ case DW_FORM_block4: return "DW_FORM_block4";
+ case DW_FORM_data2: return "DW_FORM_data2";
+ case DW_FORM_data4: return "DW_FORM_data4";
+ case DW_FORM_data8: return "DW_FORM_data8";
+ case DW_FORM_string: return "DW_FORM_string";
+ case DW_FORM_block: return "DW_FORM_block";
+ case DW_FORM_block1: return "DW_FORM_block1";
+ case DW_FORM_data1: return "DW_FORM_data1";
+ case DW_FORM_flag: return "DW_FORM_flag";
+ case DW_FORM_sdata: return "DW_FORM_sdata";
+ case DW_FORM_strp: return "DW_FORM_strp";
+ case DW_FORM_udata: return "DW_FORM_udata";
+ case DW_FORM_ref_addr: return "DW_FORM_ref_addr";
+ case DW_FORM_ref1: return "DW_FORM_ref1";
+ case DW_FORM_ref2: return "DW_FORM_ref2";
+ case DW_FORM_ref4: return "DW_FORM_ref4";
+ case DW_FORM_ref8: return "DW_FORM_ref8";
+ case DW_FORM_ref_udata: return "DW_FORM_ref_udata";
+ case DW_FORM_indirect: return "DW_FORM_indirect";
}
return 0;
}
@@ -235,72 +236,159 @@ const char *llvm::dwarf::FormEncodingString(unsigned Encoding) {
/// encoding.
const char *llvm::dwarf::OperationEncodingString(unsigned Encoding) {
switch (Encoding) {
- case DW_OP_addr: return "OP_addr";
- case DW_OP_deref: return "OP_deref";
- case DW_OP_const1u: return "OP_const1u";
- case DW_OP_const1s: return "OP_const1s";
- case DW_OP_const2u: return "OP_const2u";
- case DW_OP_const2s: return "OP_const2s";
- case DW_OP_const4u: return "OP_const4u";
- case DW_OP_const4s: return "OP_const4s";
- case DW_OP_const8u: return "OP_const8u";
- case DW_OP_const8s: return "OP_const8s";
- case DW_OP_constu: return "OP_constu";
- case DW_OP_consts: return "OP_consts";
- case DW_OP_dup: return "OP_dup";
- case DW_OP_drop: return "OP_drop";
- case DW_OP_over: return "OP_over";
- case DW_OP_pick: return "OP_pick";
- case DW_OP_swap: return "OP_swap";
- case DW_OP_rot: return "OP_rot";
- case DW_OP_xderef: return "OP_xderef";
- case DW_OP_abs: return "OP_abs";
- case DW_OP_and: return "OP_and";
- case DW_OP_div: return "OP_div";
- case DW_OP_minus: return "OP_minus";
- case DW_OP_mod: return "OP_mod";
- case DW_OP_mul: return "OP_mul";
- case DW_OP_neg: return "OP_neg";
- case DW_OP_not: return "OP_not";
- case DW_OP_or: return "OP_or";
- case DW_OP_plus: return "OP_plus";
- case DW_OP_plus_uconst: return "OP_plus_uconst";
- case DW_OP_shl: return "OP_shl";
- case DW_OP_shr: return "OP_shr";
- case DW_OP_shra: return "OP_shra";
- case DW_OP_xor: return "OP_xor";
- case DW_OP_skip: return "OP_skip";
- case DW_OP_bra: return "OP_bra";
- case DW_OP_eq: return "OP_eq";
- case DW_OP_ge: return "OP_ge";
- case DW_OP_gt: return "OP_gt";
- case DW_OP_le: return "OP_le";
- case DW_OP_lt: return "OP_lt";
- case DW_OP_ne: return "OP_ne";
- case DW_OP_lit0: return "OP_lit0";
- case DW_OP_lit1: return "OP_lit1";
- case DW_OP_lit31: return "OP_lit31";
- case DW_OP_reg0: return "OP_reg0";
- case DW_OP_reg1: return "OP_reg1";
- case DW_OP_reg31: return "OP_reg31";
- case DW_OP_breg0: return "OP_breg0";
- case DW_OP_breg1: return "OP_breg1";
- case DW_OP_breg31: return "OP_breg31";
- case DW_OP_regx: return "OP_regx";
- case DW_OP_fbreg: return "OP_fbreg";
- case DW_OP_bregx: return "OP_bregx";
- case DW_OP_piece: return "OP_piece";
- case DW_OP_deref_size: return "OP_deref_size";
- case DW_OP_xderef_size: return "OP_xderef_size";
- case DW_OP_nop: return "OP_nop";
- case DW_OP_push_object_address: return "OP_push_object_address";
- case DW_OP_call2: return "OP_call2";
- case DW_OP_call4: return "OP_call4";
- case DW_OP_call_ref: return "OP_call_ref";
- case DW_OP_form_tls_address: return "OP_form_tls_address";
- case DW_OP_call_frame_cfa: return "OP_call_frame_cfa";
- case DW_OP_lo_user: return "OP_lo_user";
- case DW_OP_hi_user: return "OP_hi_user";
+ case DW_OP_addr: return "DW_OP_addr";
+ case DW_OP_deref: return "DW_OP_deref";
+ case DW_OP_const1u: return "DW_OP_const1u";
+ case DW_OP_const1s: return "DW_OP_const1s";
+ case DW_OP_const2u: return "DW_OP_const2u";
+ case DW_OP_const2s: return "DW_OP_const2s";
+ case DW_OP_const4u: return "DW_OP_const4u";
+ case DW_OP_const4s: return "DW_OP_const4s";
+ case DW_OP_const8u: return "DW_OP_const8u";
+ case DW_OP_const8s: return "DW_OP_const8s";
+ case DW_OP_constu: return "DW_OP_constu";
+ case DW_OP_consts: return "DW_OP_consts";
+ case DW_OP_dup: return "DW_OP_dup";
+ case DW_OP_drop: return "DW_OP_drop";
+ case DW_OP_over: return "DW_OP_over";
+ case DW_OP_pick: return "DW_OP_pick";
+ case DW_OP_swap: return "DW_OP_swap";
+ case DW_OP_rot: return "DW_OP_rot";
+ case DW_OP_xderef: return "DW_OP_xderef";
+ case DW_OP_abs: return "DW_OP_abs";
+ case DW_OP_and: return "DW_OP_and";
+ case DW_OP_div: return "DW_OP_div";
+ case DW_OP_minus: return "DW_OP_minus";
+ case DW_OP_mod: return "DW_OP_mod";
+ case DW_OP_mul: return "DW_OP_mul";
+ case DW_OP_neg: return "DW_OP_neg";
+ case DW_OP_not: return "DW_OP_not";
+ case DW_OP_or: return "DW_OP_or";
+ case DW_OP_plus: return "DW_OP_plus";
+ case DW_OP_plus_uconst: return "DW_OP_plus_uconst";
+ case DW_OP_shl: return "DW_OP_shl";
+ case DW_OP_shr: return "DW_OP_shr";
+ case DW_OP_shra: return "DW_OP_shra";
+ case DW_OP_xor: return "DW_OP_xor";
+ case DW_OP_skip: return "DW_OP_skip";
+ case DW_OP_bra: return "DW_OP_bra";
+ case DW_OP_eq: return "DW_OP_eq";
+ case DW_OP_ge: return "DW_OP_ge";
+ case DW_OP_gt: return "DW_OP_gt";
+ case DW_OP_le: return "DW_OP_le";
+ case DW_OP_lt: return "DW_OP_lt";
+ case DW_OP_ne: return "DW_OP_ne";
+ case DW_OP_lit0: return "DW_OP_lit0";
+ case DW_OP_lit1: return "DW_OP_lit1";
+ case DW_OP_lit2: return "DW_OP_lit2";
+ case DW_OP_lit3: return "DW_OP_lit3";
+ case DW_OP_lit4: return "DW_OP_lit4";
+ case DW_OP_lit5: return "DW_OP_lit5";
+ case DW_OP_lit6: return "DW_OP_lit6";
+ case DW_OP_lit7: return "DW_OP_lit7";
+ case DW_OP_lit8: return "DW_OP_lit8";
+ case DW_OP_lit9: return "DW_OP_lit9";
+ case DW_OP_lit10: return "DW_OP_lit10";
+ case DW_OP_lit11: return "DW_OP_lit11";
+ case DW_OP_lit12: return "DW_OP_lit12";
+ case DW_OP_lit13: return "DW_OP_lit13";
+ case DW_OP_lit14: return "DW_OP_lit14";
+ case DW_OP_lit15: return "DW_OP_lit15";
+ case DW_OP_lit16: return "DW_OP_lit16";
+ case DW_OP_lit17: return "DW_OP_lit17";
+ case DW_OP_lit18: return "DW_OP_lit18";
+ case DW_OP_lit19: return "DW_OP_lit19";
+ case DW_OP_lit20: return "DW_OP_lit20";
+ case DW_OP_lit21: return "DW_OP_lit21";
+ case DW_OP_lit22: return "DW_OP_lit22";
+ case DW_OP_lit23: return "DW_OP_lit23";
+ case DW_OP_lit24: return "DW_OP_lit24";
+ case DW_OP_lit25: return "DW_OP_lit25";
+ case DW_OP_lit26: return "DW_OP_lit26";
+ case DW_OP_lit27: return "DW_OP_lit27";
+ case DW_OP_lit28: return "DW_OP_lit28";
+ case DW_OP_lit29: return "DW_OP_lit29";
+ case DW_OP_lit30: return "DW_OP_lit30";
+ case DW_OP_lit31: return "DW_OP_lit31";
+ case DW_OP_reg0: return "DW_OP_reg0";
+ case DW_OP_reg1: return "DW_OP_reg1";
+ case DW_OP_reg2: return "DW_OP_reg2";
+ case DW_OP_reg3: return "DW_OP_reg3";
+ case DW_OP_reg4: return "DW_OP_reg4";
+ case DW_OP_reg5: return "DW_OP_reg5";
+ case DW_OP_reg6: return "DW_OP_reg6";
+ case DW_OP_reg7: return "DW_OP_reg7";
+ case DW_OP_reg8: return "DW_OP_reg8";
+ case DW_OP_reg9: return "DW_OP_reg9";
+ case DW_OP_reg10: return "DW_OP_reg10";
+ case DW_OP_reg11: return "DW_OP_reg11";
+ case DW_OP_reg12: return "DW_OP_reg12";
+ case DW_OP_reg13: return "DW_OP_reg13";
+ case DW_OP_reg14: return "DW_OP_reg14";
+ case DW_OP_reg15: return "DW_OP_reg15";
+ case DW_OP_reg16: return "DW_OP_reg16";
+ case DW_OP_reg17: return "DW_OP_reg17";
+ case DW_OP_reg18: return "DW_OP_reg18";
+ case DW_OP_reg19: return "DW_OP_reg19";
+ case DW_OP_reg20: return "DW_OP_reg20";
+ case DW_OP_reg21: return "DW_OP_reg21";
+ case DW_OP_reg22: return "DW_OP_reg22";
+ case DW_OP_reg23: return "DW_OP_reg23";
+ case DW_OP_reg24: return "DW_OP_reg24";
+ case DW_OP_reg25: return "DW_OP_reg25";
+ case DW_OP_reg26: return "DW_OP_reg26";
+ case DW_OP_reg27: return "DW_OP_reg27";
+ case DW_OP_reg28: return "DW_OP_reg28";
+ case DW_OP_reg29: return "DW_OP_reg29";
+ case DW_OP_reg30: return "DW_OP_reg30";
+ case DW_OP_reg31: return "DW_OP_reg31";
+ case DW_OP_breg0: return "DW_OP_breg0";
+ case DW_OP_breg1: return "DW_OP_breg1";
+ case DW_OP_breg2: return "DW_OP_breg2";
+ case DW_OP_breg3: return "DW_OP_breg3";
+ case DW_OP_breg4: return "DW_OP_breg4";
+ case DW_OP_breg5: return "DW_OP_breg5";
+ case DW_OP_breg6: return "DW_OP_breg6";
+ case DW_OP_breg7: return "DW_OP_breg7";
+ case DW_OP_breg8: return "DW_OP_breg8";
+ case DW_OP_breg9: return "DW_OP_breg9";
+ case DW_OP_breg10: return "DW_OP_breg10";
+ case DW_OP_breg11: return "DW_OP_breg11";
+ case DW_OP_breg12: return "DW_OP_breg12";
+ case DW_OP_breg13: return "DW_OP_breg13";
+ case DW_OP_breg14: return "DW_OP_breg14";
+ case DW_OP_breg15: return "DW_OP_breg15";
+ case DW_OP_breg16: return "DW_OP_breg16";
+ case DW_OP_breg17: return "DW_OP_breg17";
+ case DW_OP_breg18: return "DW_OP_breg18";
+ case DW_OP_breg19: return "DW_OP_breg19";
+ case DW_OP_breg20: return "DW_OP_breg20";
+ case DW_OP_breg21: return "DW_OP_breg21";
+ case DW_OP_breg22: return "DW_OP_breg22";
+ case DW_OP_breg23: return "DW_OP_breg23";
+ case DW_OP_breg24: return "DW_OP_breg24";
+ case DW_OP_breg25: return "DW_OP_breg25";
+ case DW_OP_breg26: return "DW_OP_breg26";
+ case DW_OP_breg27: return "DW_OP_breg27";
+ case DW_OP_breg28: return "DW_OP_breg28";
+ case DW_OP_breg29: return "DW_OP_breg29";
+ case DW_OP_breg30: return "DW_OP_breg30";
+ case DW_OP_breg31: return "DW_OP_breg31";
+ case DW_OP_regx: return "DW_OP_regx";
+ case DW_OP_fbreg: return "DW_OP_fbreg";
+ case DW_OP_bregx: return "DW_OP_bregx";
+ case DW_OP_piece: return "DW_OP_piece";
+ case DW_OP_deref_size: return "DW_OP_deref_size";
+ case DW_OP_xderef_size: return "DW_OP_xderef_size";
+ case DW_OP_nop: return "DW_OP_nop";
+ case DW_OP_push_object_address: return "DW_OP_push_object_address";
+ case DW_OP_call2: return "DW_OP_call2";
+ case DW_OP_call4: return "DW_OP_call4";
+ case DW_OP_call_ref: return "DW_OP_call_ref";
+ case DW_OP_form_tls_address: return "DW_OP_form_tls_address";
+ case DW_OP_call_frame_cfa: return "DW_OP_call_frame_cfa";
+ case DW_OP_lo_user: return "DW_OP_lo_user";
+ case DW_OP_hi_user: return "DW_OP_hi_user";
}
return 0;
}
@@ -309,23 +397,23 @@ const char *llvm::dwarf::OperationEncodingString(unsigned Encoding) {
/// encoding.
const char *llvm::dwarf::AttributeEncodingString(unsigned Encoding) {
switch (Encoding) {
- case DW_ATE_address: return "ATE_address";
- case DW_ATE_boolean: return "ATE_boolean";
- case DW_ATE_complex_float: return "ATE_complex_float";
- case DW_ATE_float: return "ATE_float";
- case DW_ATE_signed: return "ATE_signed";
- case DW_ATE_signed_char: return "ATE_signed_char";
- case DW_ATE_unsigned: return "ATE_unsigned";
- case DW_ATE_unsigned_char: return "ATE_unsigned_char";
- case DW_ATE_imaginary_float: return "ATE_imaginary_float";
- case DW_ATE_packed_decimal: return "ATE_packed_decimal";
- case DW_ATE_numeric_string: return "ATE_numeric_string";
- case DW_ATE_edited: return "ATE_edited";
- case DW_ATE_signed_fixed: return "ATE_signed_fixed";
- case DW_ATE_unsigned_fixed: return "ATE_unsigned_fixed";
- case DW_ATE_decimal_float: return "ATE_decimal_float";
- case DW_ATE_lo_user: return "ATE_lo_user";
- case DW_ATE_hi_user: return "ATE_hi_user";
+ case DW_ATE_address: return "DW_ATE_address";
+ case DW_ATE_boolean: return "DW_ATE_boolean";
+ case DW_ATE_complex_float: return "DW_ATE_complex_float";
+ case DW_ATE_float: return "DW_ATE_float";
+ case DW_ATE_signed: return "DW_ATE_signed";
+ case DW_ATE_signed_char: return "DW_ATE_signed_char";
+ case DW_ATE_unsigned: return "DW_ATE_unsigned";
+ case DW_ATE_unsigned_char: return "DW_ATE_unsigned_char";
+ case DW_ATE_imaginary_float: return "DW_ATE_imaginary_float";
+ case DW_ATE_packed_decimal: return "DW_ATE_packed_decimal";
+ case DW_ATE_numeric_string: return "DW_ATE_numeric_string";
+ case DW_ATE_edited: return "DW_ATE_edited";
+ case DW_ATE_signed_fixed: return "DW_ATE_signed_fixed";
+ case DW_ATE_unsigned_fixed: return "DW_ATE_unsigned_fixed";
+ case DW_ATE_decimal_float: return "DW_ATE_decimal_float";
+ case DW_ATE_lo_user: return "DW_ATE_lo_user";
+ case DW_ATE_hi_user: return "DW_ATE_hi_user";
}
return 0;
}
@@ -334,11 +422,11 @@ const char *llvm::dwarf::AttributeEncodingString(unsigned Encoding) {
/// attribute.
const char *llvm::dwarf::DecimalSignString(unsigned Sign) {
switch (Sign) {
- case DW_DS_unsigned: return "DS_unsigned";
- case DW_DS_leading_overpunch: return "DS_leading_overpunch";
- case DW_DS_trailing_overpunch: return "DS_trailing_overpunch";
- case DW_DS_leading_separate: return "DS_leading_separate";
- case DW_DS_trailing_separate: return "DS_trailing_separate";
+ case DW_DS_unsigned: return "DW_DS_unsigned";
+ case DW_DS_leading_overpunch: return "DW_DS_leading_overpunch";
+ case DW_DS_trailing_overpunch: return "DW_DS_trailing_overpunch";
+ case DW_DS_leading_separate: return "DW_DS_leading_separate";
+ case DW_DS_trailing_separate: return "DW_DS_trailing_separate";
}
return 0;
}
@@ -347,11 +435,11 @@ const char *llvm::dwarf::DecimalSignString(unsigned Sign) {
///
const char *llvm::dwarf::EndianityString(unsigned Endian) {
switch (Endian) {
- case DW_END_default: return "END_default";
- case DW_END_big: return "END_big";
- case DW_END_little: return "END_little";
- case DW_END_lo_user: return "END_lo_user";
- case DW_END_hi_user: return "END_hi_user";
+ case DW_END_default: return "DW_END_default";
+ case DW_END_big: return "DW_END_big";
+ case DW_END_little: return "DW_END_little";
+ case DW_END_lo_user: return "DW_END_lo_user";
+ case DW_END_hi_user: return "DW_END_hi_user";
}
return 0;
}
@@ -361,9 +449,9 @@ const char *llvm::dwarf::EndianityString(unsigned Endian) {
const char *llvm::dwarf::AccessibilityString(unsigned Access) {
switch (Access) {
// Accessibility codes
- case DW_ACCESS_public: return "ACCESS_public";
- case DW_ACCESS_protected: return "ACCESS_protected";
- case DW_ACCESS_private: return "ACCESS_private";
+ case DW_ACCESS_public: return "DW_ACCESS_public";
+ case DW_ACCESS_protected: return "DW_ACCESS_protected";
+ case DW_ACCESS_private: return "DW_ACCESS_private";
}
return 0;
}
@@ -372,9 +460,9 @@ const char *llvm::dwarf::AccessibilityString(unsigned Access) {
///
const char *llvm::dwarf::VisibilityString(unsigned Visibility) {
switch (Visibility) {
- case DW_VIS_local: return "VIS_local";
- case DW_VIS_exported: return "VIS_exported";
- case DW_VIS_qualified: return "VIS_qualified";
+ case DW_VIS_local: return "DW_VIS_local";
+ case DW_VIS_exported: return "DW_VIS_exported";
+ case DW_VIS_qualified: return "DW_VIS_qualified";
}
return 0;
}
@@ -383,9 +471,9 @@ const char *llvm::dwarf::VisibilityString(unsigned Visibility) {
///
const char *llvm::dwarf::VirtualityString(unsigned Virtuality) {
switch (Virtuality) {
- case DW_VIRTUALITY_none: return "VIRTUALITY_none";
- case DW_VIRTUALITY_virtual: return "VIRTUALITY_virtual";
- case DW_VIRTUALITY_pure_virtual: return "VIRTUALITY_pure_virtual";
+ case DW_VIRTUALITY_none: return "DW_VIRTUALITY_none";
+ case DW_VIRTUALITY_virtual: return "DW_VIRTUALITY_virtual";
+ case DW_VIRTUALITY_pure_virtual: return "DW_VIRTUALITY_pure_virtual";
}
return 0;
}
@@ -394,27 +482,27 @@ const char *llvm::dwarf::VirtualityString(unsigned Virtuality) {
///
const char *llvm::dwarf::LanguageString(unsigned Language) {
switch (Language) {
- case DW_LANG_C89: return "LANG_C89";
- case DW_LANG_C: return "LANG_C";
- case DW_LANG_Ada83: return "LANG_Ada83";
- case DW_LANG_C_plus_plus: return "LANG_C_plus_plus";
- case DW_LANG_Cobol74: return "LANG_Cobol74";
- case DW_LANG_Cobol85: return "LANG_Cobol85";
- case DW_LANG_Fortran77: return "LANG_Fortran77";
- case DW_LANG_Fortran90: return "LANG_Fortran90";
- case DW_LANG_Pascal83: return "LANG_Pascal83";
- case DW_LANG_Modula2: return "LANG_Modula2";
- case DW_LANG_Java: return "LANG_Java";
- case DW_LANG_C99: return "LANG_C99";
- case DW_LANG_Ada95: return "LANG_Ada95";
- case DW_LANG_Fortran95: return "LANG_Fortran95";
- case DW_LANG_PLI: return "LANG_PLI";
- case DW_LANG_ObjC: return "LANG_ObjC";
- case DW_LANG_ObjC_plus_plus: return "LANG_ObjC_plus_plus";
- case DW_LANG_UPC: return "LANG_UPC";
- case DW_LANG_D: return "LANG_D";
- case DW_LANG_lo_user: return "LANG_lo_user";
- case DW_LANG_hi_user: return "LANG_hi_user";
+ case DW_LANG_C89: return "DW_LANG_C89";
+ case DW_LANG_C: return "DW_LANG_C";
+ case DW_LANG_Ada83: return "DW_LANG_Ada83";
+ case DW_LANG_C_plus_plus: return "DW_LANG_C_plus_plus";
+ case DW_LANG_Cobol74: return "DW_LANG_Cobol74";
+ case DW_LANG_Cobol85: return "DW_LANG_Cobol85";
+ case DW_LANG_Fortran77: return "DW_LANG_Fortran77";
+ case DW_LANG_Fortran90: return "DW_LANG_Fortran90";
+ case DW_LANG_Pascal83: return "DW_LANG_Pascal83";
+ case DW_LANG_Modula2: return "DW_LANG_Modula2";
+ case DW_LANG_Java: return "DW_LANG_Java";
+ case DW_LANG_C99: return "DW_LANG_C99";
+ case DW_LANG_Ada95: return "DW_LANG_Ada95";
+ case DW_LANG_Fortran95: return "DW_LANG_Fortran95";
+ case DW_LANG_PLI: return "DW_LANG_PLI";
+ case DW_LANG_ObjC: return "DW_LANG_ObjC";
+ case DW_LANG_ObjC_plus_plus: return "DW_LANG_ObjC_plus_plus";
+ case DW_LANG_UPC: return "DW_LANG_UPC";
+ case DW_LANG_D: return "DW_LANG_D";
+ case DW_LANG_lo_user: return "DW_LANG_lo_user";
+ case DW_LANG_hi_user: return "DW_LANG_hi_user";
}
return 0;
}
@@ -423,10 +511,10 @@ const char *llvm::dwarf::LanguageString(unsigned Language) {
///
const char *llvm::dwarf::CaseString(unsigned Case) {
switch (Case) {
- case DW_ID_case_sensitive: return "ID_case_sensitive";
- case DW_ID_up_case: return "ID_up_case";
- case DW_ID_down_case: return "ID_down_case";
- case DW_ID_case_insensitive: return "ID_case_insensitive";
+ case DW_ID_case_sensitive: return "DW_ID_case_sensitive";
+ case DW_ID_up_case: return "DW_ID_up_case";
+ case DW_ID_down_case: return "DW_ID_down_case";
+ case DW_ID_case_insensitive: return "DW_ID_case_insensitive";
}
return 0;
}
@@ -435,11 +523,11 @@ const char *llvm::dwarf::CaseString(unsigned Case) {
///
const char *llvm::dwarf::ConventionString(unsigned Convention) {
switch (Convention) {
- case DW_CC_normal: return "CC_normal";
- case DW_CC_program: return "CC_program";
- case DW_CC_nocall: return "CC_nocall";
- case DW_CC_lo_user: return "CC_lo_user";
- case DW_CC_hi_user: return "CC_hi_user";
+ case DW_CC_normal: return "DW_CC_normal";
+ case DW_CC_program: return "DW_CC_program";
+ case DW_CC_nocall: return "DW_CC_nocall";
+ case DW_CC_lo_user: return "DW_CC_lo_user";
+ case DW_CC_hi_user: return "DW_CC_hi_user";
}
return 0;
}
@@ -448,10 +536,10 @@ const char *llvm::dwarf::ConventionString(unsigned Convention) {
///
const char *llvm::dwarf::InlineCodeString(unsigned Code) {
switch (Code) {
- case DW_INL_not_inlined: return "INL_not_inlined";
- case DW_INL_inlined: return "INL_inlined";
- case DW_INL_declared_not_inlined: return "INL_declared_not_inlined";
- case DW_INL_declared_inlined: return "INL_declared_inlined";
+ case DW_INL_not_inlined: return "DW_INL_not_inlined";
+ case DW_INL_inlined: return "DW_INL_inlined";
+ case DW_INL_declared_not_inlined: return "DW_INL_declared_not_inlined";
+ case DW_INL_declared_inlined: return "DW_INL_declared_inlined";
}
return 0;
}
@@ -460,8 +548,8 @@ const char *llvm::dwarf::InlineCodeString(unsigned Code) {
///
const char *llvm::dwarf::ArrayOrderString(unsigned Order) {
switch (Order) {
- case DW_ORD_row_major: return "ORD_row_major";
- case DW_ORD_col_major: return "ORD_col_major";
+ case DW_ORD_row_major: return "DW_ORD_row_major";
+ case DW_ORD_col_major: return "DW_ORD_col_major";
}
return 0;
}
@@ -470,8 +558,8 @@ const char *llvm::dwarf::ArrayOrderString(unsigned Order) {
/// descriptor.
const char *llvm::dwarf::DiscriminantString(unsigned Discriminant) {
switch (Discriminant) {
- case DW_DSC_label: return "DSC_label";
- case DW_DSC_range: return "DSC_range";
+ case DW_DSC_label: return "DW_DSC_label";
+ case DW_DSC_range: return "DW_DSC_range";
}
return 0;
}
@@ -480,18 +568,18 @@ const char *llvm::dwarf::DiscriminantString(unsigned Discriminant) {
///
const char *llvm::dwarf::LNStandardString(unsigned Standard) {
switch (Standard) {
- case DW_LNS_copy: return "LNS_copy";
- case DW_LNS_advance_pc: return "LNS_advance_pc";
- case DW_LNS_advance_line: return "LNS_advance_line";
- case DW_LNS_set_file: return "LNS_set_file";
- case DW_LNS_set_column: return "LNS_set_column";
- case DW_LNS_negate_stmt: return "LNS_negate_stmt";
- case DW_LNS_set_basic_block: return "LNS_set_basic_block";
- case DW_LNS_const_add_pc: return "LNS_const_add_pc";
- case DW_LNS_fixed_advance_pc: return "LNS_fixed_advance_pc";
- case DW_LNS_set_prologue_end: return "LNS_set_prologue_end";
- case DW_LNS_set_epilogue_begin: return "LNS_set_epilogue_begin";
- case DW_LNS_set_isa: return "LNS_set_isa";
+ case DW_LNS_copy: return "DW_LNS_copy";
+ case DW_LNS_advance_pc: return "DW_LNS_advance_pc";
+ case DW_LNS_advance_line: return "DW_LNS_advance_line";
+ case DW_LNS_set_file: return "DW_LNS_set_file";
+ case DW_LNS_set_column: return "DW_LNS_set_column";
+ case DW_LNS_negate_stmt: return "DW_LNS_negate_stmt";
+ case DW_LNS_set_basic_block: return "DW_LNS_set_basic_block";
+ case DW_LNS_const_add_pc: return "DW_LNS_const_add_pc";
+ case DW_LNS_fixed_advance_pc: return "DW_LNS_fixed_advance_pc";
+ case DW_LNS_set_prologue_end: return "DW_LNS_set_prologue_end";
+ case DW_LNS_set_epilogue_begin: return "DW_LNS_set_epilogue_begin";
+ case DW_LNS_set_isa: return "DW_LNS_set_isa";
}
return 0;
}
@@ -501,11 +589,11 @@ const char *llvm::dwarf::LNStandardString(unsigned Standard) {
const char *llvm::dwarf::LNExtendedString(unsigned Encoding) {
switch (Encoding) {
// Line Number Extended Opcode Encodings
- case DW_LNE_end_sequence: return "LNE_end_sequence";
- case DW_LNE_set_address: return "LNE_set_address";
- case DW_LNE_define_file: return "LNE_define_file";
- case DW_LNE_lo_user: return "LNE_lo_user";
- case DW_LNE_hi_user: return "LNE_hi_user";
+ case DW_LNE_end_sequence: return "DW_LNE_end_sequence";
+ case DW_LNE_set_address: return "DW_LNE_set_address";
+ case DW_LNE_define_file: return "DW_LNE_define_file";
+ case DW_LNE_lo_user: return "DW_LNE_lo_user";
+ case DW_LNE_hi_user: return "DW_LNE_hi_user";
}
return 0;
}
@@ -515,11 +603,11 @@ const char *llvm::dwarf::LNExtendedString(unsigned Encoding) {
const char *llvm::dwarf::MacinfoString(unsigned Encoding) {
switch (Encoding) {
// Macinfo Type Encodings
- case DW_MACINFO_define: return "MACINFO_define";
- case DW_MACINFO_undef: return "MACINFO_undef";
- case DW_MACINFO_start_file: return "MACINFO_start_file";
- case DW_MACINFO_end_file: return "MACINFO_end_file";
- case DW_MACINFO_vendor_ext: return "MACINFO_vendor_ext";
+ case DW_MACINFO_define: return "DW_MACINFO_define";
+ case DW_MACINFO_undef: return "DW_MACINFO_undef";
+ case DW_MACINFO_start_file: return "DW_MACINFO_start_file";
+ case DW_MACINFO_end_file: return "DW_MACINFO_end_file";
+ case DW_MACINFO_vendor_ext: return "DW_MACINFO_vendor_ext";
}
return 0;
}
@@ -528,33 +616,33 @@ const char *llvm::dwarf::MacinfoString(unsigned Encoding) {
/// encodings.
const char *llvm::dwarf::CallFrameString(unsigned Encoding) {
switch (Encoding) {
- case DW_CFA_advance_loc: return "CFA_advance_loc";
- case DW_CFA_offset: return "CFA_offset";
- case DW_CFA_restore: return "CFA_restore";
- case DW_CFA_set_loc: return "CFA_set_loc";
- case DW_CFA_advance_loc1: return "CFA_advance_loc1";
- case DW_CFA_advance_loc2: return "CFA_advance_loc2";
- case DW_CFA_advance_loc4: return "CFA_advance_loc4";
- case DW_CFA_offset_extended: return "CFA_offset_extended";
- case DW_CFA_restore_extended: return "CFA_restore_extended";
- case DW_CFA_undefined: return "CFA_undefined";
- case DW_CFA_same_value: return "CFA_same_value";
- case DW_CFA_register: return "CFA_register";
- case DW_CFA_remember_state: return "CFA_remember_state";
- case DW_CFA_restore_state: return "CFA_restore_state";
- case DW_CFA_def_cfa: return "CFA_def_cfa";
- case DW_CFA_def_cfa_register: return "CFA_def_cfa_register";
- case DW_CFA_def_cfa_offset: return "CFA_def_cfa_offset";
- case DW_CFA_def_cfa_expression: return "CFA_def_cfa_expression";
- case DW_CFA_expression: return "CFA_expression";
- case DW_CFA_offset_extended_sf: return "CFA_offset_extended_sf";
- case DW_CFA_def_cfa_sf: return "CFA_def_cfa_sf";
- case DW_CFA_def_cfa_offset_sf: return "CFA_def_cfa_offset_sf";
- case DW_CFA_val_offset: return "CFA_val_offset";
- case DW_CFA_val_offset_sf: return "CFA_val_offset_sf";
- case DW_CFA_val_expression: return "CFA_val_expression";
- case DW_CFA_lo_user: return "CFA_lo_user";
- case DW_CFA_hi_user: return "CFA_hi_user";
+ case DW_CFA_advance_loc: return "DW_CFA_advance_loc";
+ case DW_CFA_offset: return "DW_CFA_offset";
+ case DW_CFA_restore: return "DW_CFA_restore";
+ case DW_CFA_set_loc: return "DW_CFA_set_loc";
+ case DW_CFA_advance_loc1: return "DW_CFA_advance_loc1";
+ case DW_CFA_advance_loc2: return "DW_CFA_advance_loc2";
+ case DW_CFA_advance_loc4: return "DW_CFA_advance_loc4";
+ case DW_CFA_offset_extended: return "DW_CFA_offset_extended";
+ case DW_CFA_restore_extended: return "DW_CFA_restore_extended";
+ case DW_CFA_undefined: return "DW_CFA_undefined";
+ case DW_CFA_same_value: return "DW_CFA_same_value";
+ case DW_CFA_register: return "DW_CFA_register";
+ case DW_CFA_remember_state: return "DW_CFA_remember_state";
+ case DW_CFA_restore_state: return "DW_CFA_restore_state";
+ case DW_CFA_def_cfa: return "DW_CFA_def_cfa";
+ case DW_CFA_def_cfa_register: return "DW_CFA_def_cfa_register";
+ case DW_CFA_def_cfa_offset: return "DW_CFA_def_cfa_offset";
+ case DW_CFA_def_cfa_expression: return "DW_CFA_def_cfa_expression";
+ case DW_CFA_expression: return "DW_CFA_expression";
+ case DW_CFA_offset_extended_sf: return "DW_CFA_offset_extended_sf";
+ case DW_CFA_def_cfa_sf: return "DW_CFA_def_cfa_sf";
+ case DW_CFA_def_cfa_offset_sf: return "DW_CFA_def_cfa_offset_sf";
+ case DW_CFA_val_offset: return "DW_CFA_val_offset";
+ case DW_CFA_val_offset_sf: return "DW_CFA_val_offset_sf";
+ case DW_CFA_val_expression: return "DW_CFA_val_expression";
+ case DW_CFA_lo_user: return "DW_CFA_lo_user";
+ case DW_CFA_hi_user: return "DW_CFA_hi_user";
}
return 0;
}
diff --git a/libclamav/c++/llvm/lib/Support/ErrorHandling.cpp b/libclamav/c++/llvm/lib/Support/ErrorHandling.cpp
index 8bb1566..0b7af3e 100644
--- a/libclamav/c++/llvm/lib/Support/ErrorHandling.cpp
+++ b/libclamav/c++/llvm/lib/Support/ErrorHandling.cpp
@@ -1,4 +1,4 @@
-//===- lib/Support/ErrorHandling.cpp - Callbacks for errors -----*- C++ -*-===//
+//===- lib/Support/ErrorHandling.cpp - Callbacks for errors ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,28 +7,38 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines an API for error handling, it supersedes cerr+abort(), and
-// cerr+exit() style error handling.
-// Callbacks can be registered for these errors through this API.
+// This file defines an API used to indicate fatal error conditions. Non-fatal
+// errors (most of them) should be handled through LLVMContext.
+//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Signals.h"
#include "llvm/System/Threading.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Config/config.h"
#include <cassert>
#include <cstdlib>
+#if defined(HAVE_UNISTD_H)
+# include <unistd.h>
+#endif
+#if defined(_MSC_VER)
+# include <io.h>
+# include <fcntl.h>
+#endif
+
using namespace llvm;
using namespace std;
-static llvm_error_handler_t ErrorHandler = 0;
+static fatal_error_handler_t ErrorHandler = 0;
static void *ErrorHandlerUserData = 0;
-namespace llvm {
-void llvm_install_error_handler(llvm_error_handler_t handler,
- void *user_data) {
+void llvm::install_fatal_error_handler(fatal_error_handler_t handler,
+ void *user_data) {
assert(!llvm_is_multithreaded() &&
"Cannot register error handlers after starting multithreaded mode!\n");
assert(!ErrorHandler && "Error handler already registered!\n");
@@ -36,29 +46,42 @@ void llvm_install_error_handler(llvm_error_handler_t handler,
ErrorHandlerUserData = user_data;
}
-void llvm_remove_error_handler() {
+void llvm::remove_fatal_error_handler() {
ErrorHandler = 0;
}
-void llvm_report_error(const char *reason) {
- llvm_report_error(Twine(reason));
+void llvm::report_fatal_error(const char *Reason) {
+ report_fatal_error(Twine(Reason));
}
-void llvm_report_error(const std::string &reason) {
- llvm_report_error(Twine(reason));
+void llvm::report_fatal_error(const std::string &Reason) {
+ report_fatal_error(Twine(Reason));
}
-void llvm_report_error(const Twine &reason) {
- if (!ErrorHandler) {
- errs() << "LLVM ERROR: " << reason << "\n";
+void llvm::report_fatal_error(const Twine &Reason) {
+ if (ErrorHandler) {
+ ErrorHandler(ErrorHandlerUserData, Reason.str());
} else {
- ErrorHandler(ErrorHandlerUserData, reason.str());
+ // Blast the result out to stderr. We don't try hard to make sure this
+ // succeeds (e.g. handling EINTR) and we can't use errs() here because
+ // raw ostreams can call report_fatal_error.
+ SmallVector<char, 64> Buffer;
+ raw_svector_ostream OS(Buffer);
+ OS << "LLVM ERROR: " << Reason << "\n";
+ StringRef MessageStr = OS.str();
+ (void)::write(2, MessageStr.data(), MessageStr.size());
}
+
+ // If we reached here, we are failing ungracefully. Run the interrupt handlers
+ // to make sure any special cleanups get done, in particular that we remove
+ // files registered with RemoveFileOnSignal.
+ sys::RunInterruptHandlers();
+
exit(1);
}
-void llvm_unreachable_internal(const char *msg, const char *file,
- unsigned line) {
+void llvm::llvm_unreachable_internal(const char *msg, const char *file,
+ unsigned line) {
// This code intentionally doesn't call the ErrorHandler callback, because
// llvm_unreachable is intended to be used to indicate "impossible"
// situations, and not legitimate runtime errors.
@@ -70,5 +93,3 @@ void llvm_unreachable_internal(const char *msg, const char *file,
dbgs() << "!\n";
abort();
}
-}
-
diff --git a/libclamav/c++/llvm/lib/Support/FileUtilities.cpp b/libclamav/c++/llvm/lib/Support/FileUtilities.cpp
index 095395f..1bde2fe 100644
--- a/libclamav/c++/llvm/lib/Support/FileUtilities.cpp
+++ b/libclamav/c++/llvm/lib/Support/FileUtilities.cpp
@@ -51,7 +51,15 @@ static const char *BackupNumber(const char *Pos, const char *FirstChar) {
if (!isNumberChar(*Pos)) return Pos;
// Otherwise, return to the start of the number.
+ bool HasPeriod = false;
while (Pos > FirstChar && isNumberChar(Pos[-1])) {
+ // Backup over at most one period.
+ if (Pos[-1] == '.') {
+ if (HasPeriod)
+ break;
+ HasPeriod = true;
+ }
+
--Pos;
if (Pos > FirstChar && isSignedChar(Pos[0]) && !isExponentChar(Pos[-1]))
break;
@@ -204,16 +212,16 @@ int llvm::DiffFilesWithTolerance(const sys::PathWithStatus &FileA,
const char *F1P = File1Start;
const char *F2P = File2Start;
- if (A_size == B_size) {
- // Are the buffers identical? Common case: Handle this efficiently.
- if (std::memcmp(File1Start, File2Start, A_size) == 0)
- return 0;
+ // Are the buffers identical? Common case: Handle this efficiently.
+ if (A_size == B_size &&
+ std::memcmp(File1Start, File2Start, A_size) == 0)
+ return 0;
- if (AbsTol == 0 && RelTol == 0) {
- if (Error)
- *Error = "Files differ without tolerance allowance";
- return 1; // Files different!
- }
+ // Otherwise, we are done a tolerances are set.
+ if (AbsTol == 0 && RelTol == 0) {
+ if (Error)
+ *Error = "Files differ without tolerance allowance";
+ return 1; // Files different!
}
bool CompareFailed = false;
diff --git a/libclamav/c++/llvm/lib/Support/FoldingSet.cpp b/libclamav/c++/llvm/lib/Support/FoldingSet.cpp
index 954dc77..29b5952 100644
--- a/libclamav/c++/llvm/lib/Support/FoldingSet.cpp
+++ b/libclamav/c++/llvm/lib/Support/FoldingSet.cpp
@@ -15,6 +15,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Allocator.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
@@ -22,6 +23,37 @@
using namespace llvm;
//===----------------------------------------------------------------------===//
+// FoldingSetNodeIDRef Implementation
+
+/// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
+/// used to lookup the node in the FoldingSetImpl.
+unsigned FoldingSetNodeIDRef::ComputeHash() const {
+ // This is adapted from SuperFastHash by Paul Hsieh.
+ unsigned Hash = static_cast<unsigned>(Size);
+ for (const unsigned *BP = Data, *E = BP+Size; BP != E; ++BP) {
+ unsigned Data = *BP;
+ Hash += Data & 0xFFFF;
+ unsigned Tmp = ((Data >> 16) << 11) ^ Hash;
+ Hash = (Hash << 16) ^ Tmp;
+ Hash += Hash >> 11;
+ }
+
+ // Force "avalanching" of final 127 bits.
+ Hash ^= Hash << 3;
+ Hash += Hash >> 5;
+ Hash ^= Hash << 4;
+ Hash += Hash >> 17;
+ Hash ^= Hash << 25;
+ Hash += Hash >> 6;
+ return Hash;
+}
+
+bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const {
+ if (Size != RHS.Size) return false;
+ return memcmp(Data, RHS.Data, Size*sizeof(*Data)) == 0;
+}
+
+//===----------------------------------------------------------------------===//
// FoldingSetNodeID Implementation
/// Add* - Add various data types to Bit data.
@@ -103,33 +135,30 @@ void FoldingSetNodeID::AddString(StringRef String) {
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used to
/// lookup the node in the FoldingSetImpl.
unsigned FoldingSetNodeID::ComputeHash() const {
- // This is adapted from SuperFastHash by Paul Hsieh.
- unsigned Hash = static_cast<unsigned>(Bits.size());
- for (const unsigned *BP = &Bits[0], *E = BP+Bits.size(); BP != E; ++BP) {
- unsigned Data = *BP;
- Hash += Data & 0xFFFF;
- unsigned Tmp = ((Data >> 16) << 11) ^ Hash;
- Hash = (Hash << 16) ^ Tmp;
- Hash += Hash >> 11;
- }
-
- // Force "avalanching" of final 127 bits.
- Hash ^= Hash << 3;
- Hash += Hash >> 5;
- Hash ^= Hash << 4;
- Hash += Hash >> 17;
- Hash ^= Hash << 25;
- Hash += Hash >> 6;
- return Hash;
+ return FoldingSetNodeIDRef(Bits.data(), Bits.size()).ComputeHash();
}
/// operator== - Used to compare two nodes to each other.
///
bool FoldingSetNodeID::operator==(const FoldingSetNodeID &RHS)const{
- if (Bits.size() != RHS.Bits.size()) return false;
- return memcmp(&Bits[0], &RHS.Bits[0], Bits.size()*sizeof(Bits[0])) == 0;
+ return *this == FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size());
}
+/// operator== - Used to compare two nodes to each other.
+///
+bool FoldingSetNodeID::operator==(FoldingSetNodeIDRef RHS) const {
+ return FoldingSetNodeIDRef(Bits.data(), Bits.size()) == RHS;
+}
+
+/// Intern - Copy this node's data to a memory region allocated from the
+/// given allocator and return a FoldingSetNodeIDRef describing the
+/// interned data.
+FoldingSetNodeIDRef
+FoldingSetNodeID::Intern(BumpPtrAllocator &Allocator) const {
+ unsigned *New = Allocator.Allocate<unsigned>(Bits.size());
+ std::uninitialized_copy(Bits.begin(), Bits.end(), New);
+ return FoldingSetNodeIDRef(New, Bits.size());
+}
//===----------------------------------------------------------------------===//
/// Helper functions for FoldingSetImpl.
@@ -158,13 +187,20 @@ static void **GetBucketPtr(void *NextInBucketPtr) {
/// GetBucketFor - Hash the specified node ID and return the hash bucket for
/// the specified ID.
-static void **GetBucketFor(const FoldingSetNodeID &ID,
- void **Buckets, unsigned NumBuckets) {
+static void **GetBucketFor(unsigned Hash, void **Buckets, unsigned NumBuckets) {
// NumBuckets is always a power of 2.
- unsigned BucketNum = ID.ComputeHash() & (NumBuckets-1);
+ unsigned BucketNum = Hash & (NumBuckets-1);
return Buckets + BucketNum;
}
+/// AllocateBuckets - Allocated initialized bucket memory.
+static void **AllocateBuckets(unsigned NumBuckets) {
+ void **Buckets = static_cast<void**>(calloc(NumBuckets+1, sizeof(void*)));
+ // Set the very last bucket to be a non-null "pointer".
+ Buckets[NumBuckets] = reinterpret_cast<void*>(-1);
+ return Buckets;
+}
+
//===----------------------------------------------------------------------===//
// FoldingSetImpl Implementation
@@ -172,11 +208,11 @@ FoldingSetImpl::FoldingSetImpl(unsigned Log2InitSize) {
assert(5 < Log2InitSize && Log2InitSize < 32 &&
"Initial hash table size out of range");
NumBuckets = 1 << Log2InitSize;
- Buckets = new void*[NumBuckets+1];
- clear();
+ Buckets = AllocateBuckets(NumBuckets);
+ NumNodes = 0;
}
FoldingSetImpl::~FoldingSetImpl() {
- delete [] Buckets;
+ free(Buckets);
}
void FoldingSetImpl::clear() {
// Set all but the last bucket to null pointers.
@@ -197,11 +233,11 @@ void FoldingSetImpl::GrowHashTable() {
NumBuckets <<= 1;
// Clear out new buckets.
- Buckets = new void*[NumBuckets+1];
- clear();
+ Buckets = AllocateBuckets(NumBuckets);
+ NumNodes = 0;
// Walk the old buckets, rehashing nodes into their new place.
- FoldingSetNodeID ID;
+ FoldingSetNodeID TempID;
for (unsigned i = 0; i != OldNumBuckets; ++i) {
void *Probe = OldBuckets[i];
if (!Probe) continue;
@@ -211,13 +247,14 @@ void FoldingSetImpl::GrowHashTable() {
NodeInBucket->SetNextInBucket(0);
// Insert the node into the new bucket, after recomputing the hash.
- GetNodeProfile(ID, NodeInBucket);
- InsertNode(NodeInBucket, GetBucketFor(ID, Buckets, NumBuckets));
- ID.clear();
+ InsertNode(NodeInBucket,
+ GetBucketFor(ComputeNodeHash(NodeInBucket, TempID),
+ Buckets, NumBuckets));
+ TempID.clear();
}
}
- delete[] OldBuckets;
+ free(OldBuckets);
}
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
@@ -227,19 +264,18 @@ FoldingSetImpl::Node
*FoldingSetImpl::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
void *&InsertPos) {
- void **Bucket = GetBucketFor(ID, Buckets, NumBuckets);
+ void **Bucket = GetBucketFor(ID.ComputeHash(), Buckets, NumBuckets);
void *Probe = *Bucket;
InsertPos = 0;
- FoldingSetNodeID OtherID;
+ FoldingSetNodeID TempID;
while (Node *NodeInBucket = GetNextPtr(Probe)) {
- GetNodeProfile(OtherID, NodeInBucket);
- if (OtherID == ID)
+ if (NodeEquals(NodeInBucket, ID, TempID))
return NodeInBucket;
+ TempID.clear();
Probe = NodeInBucket->getNextInBucket();
- OtherID.clear();
}
// Didn't find the node, return null with the bucket as the InsertPos.
@@ -255,9 +291,8 @@ void FoldingSetImpl::InsertNode(Node *N, void *InsertPos) {
// Do we need to grow the hashtable?
if (NumNodes+1 > NumBuckets*2) {
GrowHashTable();
- FoldingSetNodeID ID;
- GetNodeProfile(ID, N);
- InsertPos = GetBucketFor(ID, Buckets, NumBuckets);
+ FoldingSetNodeID TempID;
+ InsertPos = GetBucketFor(ComputeNodeHash(N, TempID), Buckets, NumBuckets);
}
++NumNodes;
@@ -323,7 +358,7 @@ bool FoldingSetImpl::RemoveNode(Node *N) {
/// instead.
FoldingSetImpl::Node *FoldingSetImpl::GetOrInsertNode(FoldingSetImpl::Node *N) {
FoldingSetNodeID ID;
- GetNodeProfile(ID, N);
+ GetNodeProfile(N, ID);
void *IP;
if (Node *E = FindNodeOrInsertPos(ID, IP))
return E;
diff --git a/libclamav/c++/llvm/lib/Support/GraphWriter.cpp b/libclamav/c++/llvm/lib/Support/GraphWriter.cpp
index ec84f9b..fdd6285 100644
--- a/libclamav/c++/llvm/lib/Support/GraphWriter.cpp
+++ b/libclamav/c++/llvm/lib/Support/GraphWriter.cpp
@@ -130,28 +130,28 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
if (sys::Program::ExecuteAndWait(prog, &args[0], 0, 0, 0, 0, &ErrMsg)) {
errs() << "Error viewing graph " << Filename.str() << ": '"
<< ErrMsg << "\n";
- } else {
- errs() << " done. \n";
+ return;
+ }
+ errs() << " done. \n";
- sys::Path gv(LLVM_PATH_GV);
- args.clear();
- args.push_back(gv.c_str());
- args.push_back(PSFilename.c_str());
- args.push_back("--spartan");
- args.push_back(0);
-
- ErrMsg.clear();
- if (wait) {
- if (sys::Program::ExecuteAndWait(gv, &args[0],0,0,0,0,&ErrMsg))
- errs() << "Error viewing graph: " << ErrMsg << "\n";
- Filename.eraseFromDisk();
- PSFilename.eraseFromDisk();
- }
- else {
- sys::Program::ExecuteNoWait(gv, &args[0],0,0,0,&ErrMsg);
- errs() << "Remember to erase graph files: " << Filename.str() << " "
- << PSFilename.str() << "\n";
- }
+ sys::Path gv(LLVM_PATH_GV);
+ args.clear();
+ args.push_back(gv.c_str());
+ args.push_back(PSFilename.c_str());
+ args.push_back("--spartan");
+ args.push_back(0);
+
+ ErrMsg.clear();
+ if (wait) {
+ if (sys::Program::ExecuteAndWait(gv, &args[0],0,0,0,0,&ErrMsg))
+ errs() << "Error viewing graph: " << ErrMsg << "\n";
+ Filename.eraseFromDisk();
+ PSFilename.eraseFromDisk();
+ }
+ else {
+ sys::Program::ExecuteNoWait(gv, &args[0],0,0,0,&ErrMsg);
+ errs() << "Remember to erase graph files: " << Filename.str() << " "
+ << PSFilename.str() << "\n";
}
#elif HAVE_DOTTY
sys::Path dotty(LLVM_PATH_DOTTY);
@@ -166,7 +166,8 @@ void llvm::DisplayGraph(const sys::Path &Filename, bool wait,
errs() << "Error viewing graph " << Filename.str() << ": "
<< ErrMsg << "\n";
} else {
-#ifdef __MINGW32__ // Dotty spawns another app and doesn't wait until it returns
+// Dotty spawns another app and doesn't wait until it returns
+#if defined (__MINGW32__) || defined (_WINDOWS)
return;
#endif
Filename.eraseFromDisk();
diff --git a/libclamav/c++/llvm/lib/Support/MemoryBuffer.cpp b/libclamav/c++/llvm/lib/Support/MemoryBuffer.cpp
index eb046d0..542162d 100644
--- a/libclamav/c++/llvm/lib/Support/MemoryBuffer.cpp
+++ b/libclamav/c++/llvm/lib/Support/MemoryBuffer.cpp
@@ -14,6 +14,8 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/System/Errno.h"
#include "llvm/System/Path.h"
#include "llvm/System/Process.h"
#include "llvm/System/Program.h"
@@ -36,22 +38,7 @@ using namespace llvm;
// MemoryBuffer implementation itself.
//===----------------------------------------------------------------------===//
-MemoryBuffer::~MemoryBuffer() {
- if (MustDeleteBuffer)
- free((void*)BufferStart);
-}
-
-/// initCopyOf - Initialize this source buffer with a copy of the specified
-/// memory range. We make the copy so that we can null terminate it
-/// successfully.
-void MemoryBuffer::initCopyOf(const char *BufStart, const char *BufEnd) {
- size_t Size = BufEnd-BufStart;
- BufferStart = (char *)malloc(Size+1);
- BufferEnd = BufferStart+Size;
- memcpy(const_cast<char*>(BufferStart), BufStart, Size);
- *const_cast<char*>(BufferEnd) = 0; // Null terminate buffer.
- MustDeleteBuffer = true;
-}
+MemoryBuffer::~MemoryBuffer() { }
/// init - Initialize this MemoryBuffer as a reference to externally allocated
/// memory, memory that we know is already null terminated.
@@ -59,73 +46,94 @@ void MemoryBuffer::init(const char *BufStart, const char *BufEnd) {
assert(BufEnd[0] == 0 && "Buffer is not null terminated!");
BufferStart = BufStart;
BufferEnd = BufEnd;
- MustDeleteBuffer = false;
}
//===----------------------------------------------------------------------===//
// MemoryBufferMem implementation.
//===----------------------------------------------------------------------===//
+/// CopyStringRef - Copies contents of a StringRef into a block of memory and
+/// null-terminates it.
+static void CopyStringRef(char *Memory, StringRef Data) {
+ memcpy(Memory, Data.data(), Data.size());
+ Memory[Data.size()] = 0; // Null terminate string.
+}
+
+/// GetNamedBuffer - Allocates a new MemoryBuffer with Name copied after it.
+template <typename T>
+static T* GetNamedBuffer(StringRef Buffer, StringRef Name) {
+ char *Mem = static_cast<char*>(operator new(sizeof(T) + Name.size() + 1));
+ CopyStringRef(Mem + sizeof(T), Name);
+ return new (Mem) T(Buffer);
+}
+
namespace {
+/// MemoryBufferMem - Named MemoryBuffer pointing to a block of memory.
class MemoryBufferMem : public MemoryBuffer {
- std::string FileID;
public:
- MemoryBufferMem(const char *Start, const char *End, StringRef FID,
- bool Copy = false)
- : FileID(FID) {
- if (!Copy)
- init(Start, End);
- else
- initCopyOf(Start, End);
+ MemoryBufferMem(StringRef InputData) {
+ init(InputData.begin(), InputData.end());
}
-
+
virtual const char *getBufferIdentifier() const {
- return FileID.c_str();
+ // The name is stored after the class itself.
+ return reinterpret_cast<const char*>(this + 1);
}
};
}
/// getMemBuffer - Open the specified memory range as a MemoryBuffer. Note
/// that EndPtr[0] must be a null byte and be accessible!
-MemoryBuffer *MemoryBuffer::getMemBuffer(const char *StartPtr,
- const char *EndPtr,
- const char *BufferName) {
- return new MemoryBufferMem(StartPtr, EndPtr, BufferName);
+MemoryBuffer *MemoryBuffer::getMemBuffer(StringRef InputData,
+ StringRef BufferName) {
+ return GetNamedBuffer<MemoryBufferMem>(InputData, BufferName);
}
/// getMemBufferCopy - Open the specified memory range as a MemoryBuffer,
/// copying the contents and taking ownership of it. This has no requirements
/// on EndPtr[0].
-MemoryBuffer *MemoryBuffer::getMemBufferCopy(const char *StartPtr,
- const char *EndPtr,
- const char *BufferName) {
- return new MemoryBufferMem(StartPtr, EndPtr, BufferName, true);
+MemoryBuffer *MemoryBuffer::getMemBufferCopy(StringRef InputData,
+ StringRef BufferName) {
+ MemoryBuffer *Buf = getNewUninitMemBuffer(InputData.size(), BufferName);
+ if (!Buf) return 0;
+ memcpy(const_cast<char*>(Buf->getBufferStart()), InputData.data(),
+ InputData.size());
+ return Buf;
}
/// getNewUninitMemBuffer - Allocate a new MemoryBuffer of the specified size
-/// that is completely initialized to zeros. Note that the caller should
-/// initialize the memory allocated by this method. The memory is owned by
-/// the MemoryBuffer object.
+/// that is not initialized. Note that the caller should initialize the
+/// memory allocated by this method. The memory is owned by the MemoryBuffer
+/// object.
MemoryBuffer *MemoryBuffer::getNewUninitMemBuffer(size_t Size,
StringRef BufferName) {
- char *Buf = (char *)malloc(Size+1);
- if (!Buf) return 0;
- Buf[Size] = 0;
- MemoryBufferMem *SB = new MemoryBufferMem(Buf, Buf+Size, BufferName);
- // The memory for this buffer is owned by the MemoryBuffer.
- SB->MustDeleteBuffer = true;
- return SB;
+ // Allocate space for the MemoryBuffer, the data and the name. It is important
+ // that MemoryBuffer and data are aligned so PointerIntPair works with them.
+ size_t AlignedStringLen =
+ RoundUpToAlignment(sizeof(MemoryBufferMem) + BufferName.size() + 1,
+ sizeof(void*)); // TODO: Is sizeof(void*) enough?
+ size_t RealLen = AlignedStringLen + Size + 1;
+ char *Mem = static_cast<char*>(operator new(RealLen, std::nothrow));
+ if (!Mem) return 0;
+
+ // The name is stored after the class itself.
+ CopyStringRef(Mem + sizeof(MemoryBufferMem), BufferName);
+
+ // The buffer begins after the name and must be aligned.
+ char *Buf = Mem + AlignedStringLen;
+ Buf[Size] = 0; // Null terminate buffer.
+
+ return new (Mem) MemoryBufferMem(StringRef(Buf, Size));
}
/// getNewMemBuffer - Allocate a new MemoryBuffer of the specified size that
/// is completely initialized to zeros. Note that the caller should
/// initialize the memory allocated by this method. The memory is owned by
/// the MemoryBuffer object.
-MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size,
- const char *BufferName) {
+MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size, StringRef BufferName) {
MemoryBuffer *SB = getNewUninitMemBuffer(Size, BufferName);
if (!SB) return 0;
- memset(const_cast<char*>(SB->getBufferStart()), 0, Size+1);
+ memset(const_cast<char*>(SB->getBufferStart()), 0, Size);
return SB;
}
@@ -136,10 +144,20 @@ MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size,
/// returns an empty buffer.
MemoryBuffer *MemoryBuffer::getFileOrSTDIN(StringRef Filename,
std::string *ErrStr,
- int64_t FileSize) {
+ int64_t FileSize,
+ struct stat *FileInfo) {
if (Filename == "-")
- return getSTDIN();
- return getFile(Filename, ErrStr, FileSize);
+ return getSTDIN(ErrStr);
+ return getFile(Filename, ErrStr, FileSize, FileInfo);
+}
+
+MemoryBuffer *MemoryBuffer::getFileOrSTDIN(const char *Filename,
+ std::string *ErrStr,
+ int64_t FileSize,
+ struct stat *FileInfo) {
+ if (strcmp(Filename, "-") == 0)
+ return getSTDIN(ErrStr);
+ return getFile(Filename, ErrStr, FileSize, FileInfo);
}
//===----------------------------------------------------------------------===//
@@ -150,48 +168,56 @@ namespace {
/// MemoryBufferMMapFile - This represents a file that was mapped in with the
/// sys::Path::MapInFilePages method. When destroyed, it calls the
/// sys::Path::UnMapFilePages method.
-class MemoryBufferMMapFile : public MemoryBuffer {
- std::string Filename;
+class MemoryBufferMMapFile : public MemoryBufferMem {
public:
- MemoryBufferMMapFile(StringRef filename, const char *Pages, uint64_t Size)
- : Filename(filename) {
- init(Pages, Pages+Size);
- }
-
- virtual const char *getBufferIdentifier() const {
- return Filename.c_str();
- }
-
+ MemoryBufferMMapFile(StringRef Buffer)
+ : MemoryBufferMem(Buffer) { }
+
~MemoryBufferMMapFile() {
sys::Path::UnMapFilePages(getBufferStart(), getBufferSize());
}
};
+
+/// FileCloser - RAII object to make sure an FD gets closed properly.
+class FileCloser {
+ int FD;
+public:
+ explicit FileCloser(int FD) : FD(FD) {}
+ ~FileCloser() { ::close(FD); }
+};
}
MemoryBuffer *MemoryBuffer::getFile(StringRef Filename, std::string *ErrStr,
- int64_t FileSize) {
- int OpenFlags = 0;
+ int64_t FileSize, struct stat *FileInfo) {
+ SmallString<256> PathBuf(Filename.begin(), Filename.end());
+ return MemoryBuffer::getFile(PathBuf.c_str(), ErrStr, FileSize, FileInfo);
+}
+
+MemoryBuffer *MemoryBuffer::getFile(const char *Filename, std::string *ErrStr,
+ int64_t FileSize, struct stat *FileInfo) {
+ int OpenFlags = O_RDONLY;
#ifdef O_BINARY
OpenFlags |= O_BINARY; // Open input file in binary mode on win32.
#endif
- SmallString<256> PathBuf(Filename.begin(), Filename.end());
- int FD = ::open(PathBuf.c_str(), O_RDONLY|OpenFlags);
+ int FD = ::open(Filename, OpenFlags);
if (FD == -1) {
- if (ErrStr) *ErrStr = strerror(errno);
+ if (ErrStr) *ErrStr = sys::StrError();
return 0;
}
+ FileCloser FC(FD); // Close FD on return.
// If we don't know the file size, use fstat to find out. fstat on an open
// file descriptor is cheaper than stat on a random path.
- if (FileSize == -1) {
- struct stat FileInfo;
+ if (FileSize == -1 || FileInfo) {
+ struct stat MyFileInfo;
+ struct stat *FileInfoPtr = FileInfo? FileInfo : &MyFileInfo;
+
// TODO: This should use fstat64 when available.
- if (fstat(FD, &FileInfo) == -1) {
- if (ErrStr) *ErrStr = strerror(errno);
- ::close(FD);
+ if (fstat(FD, FileInfoPtr) == -1) {
+ if (ErrStr) *ErrStr = sys::StrError();
return 0;
}
- FileSize = FileInfo.st_size;
+ FileSize = FileInfoPtr->st_size;
}
@@ -204,9 +230,8 @@ MemoryBuffer *MemoryBuffer::getFile(StringRef Filename, std::string *ErrStr,
if (FileSize >= 4096*4 &&
(FileSize & (sys::Process::GetPageSize()-1)) != 0) {
if (const char *Pages = sys::Path::MapInFilePages(FD, FileSize)) {
- // Close the file descriptor, now that the whole file is in memory.
- ::close(FD);
- return new MemoryBufferMMapFile(Filename, Pages, FileSize);
+ return GetNamedBuffer<MemoryBufferMMapFile>(StringRef(Pages, FileSize),
+ Filename);
}
}
@@ -214,30 +239,31 @@ MemoryBuffer *MemoryBuffer::getFile(StringRef Filename, std::string *ErrStr,
if (!Buf) {
// Failed to create a buffer.
if (ErrStr) *ErrStr = "could not allocate buffer";
- ::close(FD);
return 0;
}
OwningPtr<MemoryBuffer> SB(Buf);
char *BufPtr = const_cast<char*>(SB->getBufferStart());
-
+
size_t BytesLeft = FileSize;
while (BytesLeft) {
ssize_t NumRead = ::read(FD, BufPtr, BytesLeft);
- if (NumRead > 0) {
- BytesLeft -= NumRead;
- BufPtr += NumRead;
- } else if (NumRead == -1 && errno == EINTR) {
- // try again
- } else {
- // error reading.
- if (ErrStr) *ErrStr = strerror(errno);
- close(FD);
+ if (NumRead == -1) {
+ if (errno == EINTR)
+ continue;
+ // Error while reading.
+ if (ErrStr) *ErrStr = sys::StrError();
return 0;
+ } else if (NumRead == 0) {
+ // We hit EOF early, truncate and terminate buffer.
+ Buf->BufferEnd = BufPtr;
+ *BufPtr = 0;
+ return SB.take();
}
+ BytesLeft -= NumRead;
+ BufPtr += NumRead;
}
- close(FD);
-
+
return SB.take();
}
@@ -245,34 +271,27 @@ MemoryBuffer *MemoryBuffer::getFile(StringRef Filename, std::string *ErrStr,
// MemoryBuffer::getSTDIN implementation.
//===----------------------------------------------------------------------===//
-namespace {
-class STDINBufferFile : public MemoryBuffer {
-public:
- virtual const char *getBufferIdentifier() const {
- return "<stdin>";
- }
-};
-}
-
-MemoryBuffer *MemoryBuffer::getSTDIN() {
- char Buffer[4096*4];
-
- std::vector<char> FileData;
-
+MemoryBuffer *MemoryBuffer::getSTDIN(std::string *ErrStr) {
// Read in all of the data from stdin, we cannot mmap stdin.
//
// FIXME: That isn't necessarily true, we should try to mmap stdin and
// fallback if it fails.
sys::Program::ChangeStdinToBinary();
- size_t ReadBytes;
+
+ const ssize_t ChunkSize = 4096*4;
+ SmallString<ChunkSize> Buffer;
+ ssize_t ReadBytes;
+ // Read into Buffer until we hit EOF.
do {
- ReadBytes = fread(Buffer, sizeof(char), sizeof(Buffer), stdin);
- FileData.insert(FileData.end(), Buffer, Buffer+ReadBytes);
- } while (ReadBytes == sizeof(Buffer));
-
- FileData.push_back(0); // &FileData[Size] is invalid. So is &*FileData.end().
- size_t Size = FileData.size();
- MemoryBuffer *B = new STDINBufferFile();
- B->initCopyOf(&FileData[0], &FileData[Size-1]);
- return B;
+ Buffer.reserve(Buffer.size() + ChunkSize);
+ ReadBytes = read(0, Buffer.end(), ChunkSize);
+ if (ReadBytes == -1) {
+ if (errno == EINTR) continue;
+ if (ErrStr) *ErrStr = sys::StrError();
+ return 0;
+ }
+ Buffer.set_size(Buffer.size() + ReadBytes);
+ } while (ReadBytes != 0);
+
+ return getMemBufferCopy(Buffer, "<stdin>");
}
diff --git a/libclamav/c++/llvm/lib/Support/PrettyStackTrace.cpp b/libclamav/c++/llvm/lib/Support/PrettyStackTrace.cpp
index 68b41a7..3c8a108 100644
--- a/libclamav/c++/llvm/lib/Support/PrettyStackTrace.cpp
+++ b/libclamav/c++/llvm/lib/Support/PrettyStackTrace.cpp
@@ -12,11 +12,17 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Config/config.h" // Get autoconf configuration settings
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/System/Signals.h"
#include "llvm/System/ThreadLocal.h"
#include "llvm/ADT/SmallString.h"
+
+#ifdef HAVE_CRASHREPORTERCLIENT_H
+#include <CrashReporterClient.h>
+#endif
+
using namespace llvm;
namespace llvm {
@@ -48,16 +54,25 @@ static void PrintCurStackTrace(raw_ostream &OS) {
OS.flush();
}
-// Integrate with crash reporter.
-#ifdef __APPLE__
-extern "C" const char *__crashreporter_info__;
-const char *__crashreporter_info__ = 0;
+// Integrate with crash reporter libraries.
+#if defined (__APPLE__) && defined (HAVE_CRASHREPORTERCLIENT_H)
+// If any clients of llvm try to link to libCrashReporterClient.a themselves,
+// only one crash info struct will be used.
+extern "C" {
+CRASH_REPORTER_CLIENT_HIDDEN
+struct crashreporter_annotations_t gCRAnnotations
+ __attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION)))
+ = { CRASHREPORTER_ANNOTATIONS_VERSION, 0, 0, 0, 0 };
+}
+#elif defined (__APPLE__)
+static const char *__crashreporter_info__ = 0;
+asm(".desc ___crashreporter_info__, 0x10");
#endif
/// CrashHandler - This callback is run if a fatal signal is delivered to the
/// process, it prints the pretty stack trace.
-static void CrashHandler(void *Cookie) {
+static void CrashHandler(void *) {
#ifndef __APPLE__
// On non-apple systems, just emit the crash stack trace to stderr.
PrintCurStackTrace(errs());
@@ -71,7 +86,12 @@ static void CrashHandler(void *Cookie) {
}
if (!TmpStr.empty()) {
+#ifndef HAVE_CRASHREPORTERCLIENT_H
__crashreporter_info__ = strdup(std::string(TmpStr.str()).c_str());
+#else
+ // Cast to void to avoid warning.
+ (void)CRSetCrashLogMessage(std::string(TmpStr.str()).c_str());
+#endif
errs() << TmpStr.str();
}
diff --git a/libclamav/c++/llvm/lib/Support/Regex.cpp b/libclamav/c++/llvm/lib/Support/Regex.cpp
index a7631de..309ffb0 100644
--- a/libclamav/c++/llvm/lib/Support/Regex.cpp
+++ b/libclamav/c++/llvm/lib/Support/Regex.cpp
@@ -19,7 +19,7 @@
#include <string>
using namespace llvm;
-Regex::Regex(const StringRef ®ex, unsigned Flags) {
+Regex::Regex(StringRef regex, unsigned Flags) {
unsigned flags = 0;
preg = new llvm_regex();
preg->re_endp = regex.end();
@@ -52,7 +52,7 @@ unsigned Regex::getNumMatches() const {
return preg->re_nsub;
}
-bool Regex::match(const StringRef &String, SmallVectorImpl<StringRef> *Matches){
+bool Regex::match(StringRef String, SmallVectorImpl<StringRef> *Matches){
unsigned nmatch = Matches ? preg->re_nsub+1 : 0;
// pmatch needs to have at least one element.
diff --git a/libclamav/c++/llvm/lib/Support/SlowOperationInformer.cpp b/libclamav/c++/llvm/lib/Support/SlowOperationInformer.cpp
deleted file mode 100644
index b4e9430..0000000
--- a/libclamav/c++/llvm/lib/Support/SlowOperationInformer.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-//===-- SlowOperationInformer.cpp - Keep the user informed ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the SlowOperationInformer class for the LLVM debugger.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/SlowOperationInformer.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Alarm.h"
-#include <sstream>
-#include <cassert>
-using namespace llvm;
-
-SlowOperationInformer::SlowOperationInformer(const std::string &Name)
- : OperationName(Name), LastPrintAmount(0) {
- sys::SetupAlarm(1);
-}
-
-SlowOperationInformer::~SlowOperationInformer() {
- sys::TerminateAlarm();
- if (LastPrintAmount) {
- // If we have printed something, make _sure_ we print the 100% amount, and
- // also print a newline.
- outs() << std::string(LastPrintAmount, '\b') << "Progress "
- << OperationName << ": 100% \n";
- }
-}
-
-/// progress - Clients should periodically call this method when they are in
-/// an exception-safe state. The Amount variable should indicate how far
-/// along the operation is, given in 1/10ths of a percent (in other words,
-/// Amount should range from 0 to 1000).
-bool SlowOperationInformer::progress(unsigned Amount) {
- int status = sys::AlarmStatus();
- if (status == -1) {
- outs() << "\n";
- LastPrintAmount = 0;
- return true;
- }
-
- // If we haven't spent enough time in this operation to warrant displaying the
- // progress bar, don't do so yet.
- if (status == 0)
- return false;
-
- // Delete whatever we printed last time.
- std::string ToPrint = std::string(LastPrintAmount, '\b');
-
- std::ostringstream OS;
- OS << "Progress " << OperationName << ": " << Amount/10;
- if (unsigned Rem = Amount % 10)
- OS << "." << Rem << "%";
- else
- OS << "% ";
-
- LastPrintAmount = OS.str().size();
- outs() << ToPrint+OS.str();
- outs().flush();
- return false;
-}
diff --git a/libclamav/c++/llvm/lib/Support/SmallPtrSet.cpp b/libclamav/c++/llvm/lib/Support/SmallPtrSet.cpp
index 68938fa..504e649 100644
--- a/libclamav/c++/llvm/lib/Support/SmallPtrSet.cpp
+++ b/libclamav/c++/llvm/lib/Support/SmallPtrSet.cpp
@@ -166,10 +166,13 @@ void SmallPtrSetImpl::Grow() {
}
}
-SmallPtrSetImpl::SmallPtrSetImpl(const SmallPtrSetImpl& that) {
+SmallPtrSetImpl::SmallPtrSetImpl(const void **SmallStorage,
+ const SmallPtrSetImpl& that) {
+ SmallArray = SmallStorage;
+
// If we're becoming small, prepare to insert into our stack space
if (that.isSmall()) {
- CurArray = &SmallArray[0];
+ CurArray = SmallArray;
// Otherwise, allocate new heap space (unless we were the same size)
} else {
CurArray = (const void**)malloc(sizeof(void*) * (that.CurArraySize+1));
@@ -197,7 +200,7 @@ void SmallPtrSetImpl::CopyFrom(const SmallPtrSetImpl &RHS) {
if (RHS.isSmall()) {
if (!isSmall())
free(CurArray);
- CurArray = &SmallArray[0];
+ CurArray = SmallArray;
// Otherwise, allocate new heap space (unless we were the same size)
} else if (CurArraySize != RHS.CurArraySize) {
if (isSmall())
diff --git a/libclamav/c++/llvm/lib/Support/SmallVector.cpp b/libclamav/c++/llvm/lib/Support/SmallVector.cpp
index 6821382..a89f149 100644
--- a/libclamav/c++/llvm/lib/Support/SmallVector.cpp
+++ b/libclamav/c++/llvm/lib/Support/SmallVector.cpp
@@ -18,18 +18,21 @@ using namespace llvm;
/// on POD-like datatypes and is out of line to reduce code duplication.
void SmallVectorBase::grow_pod(size_t MinSizeInBytes, size_t TSize) {
size_t CurSizeBytes = size_in_bytes();
- size_t NewCapacityInBytes = 2 * capacity_in_bytes();
+ size_t NewCapacityInBytes = 2 * capacity_in_bytes() + TSize; // Always grow.
if (NewCapacityInBytes < MinSizeInBytes)
NewCapacityInBytes = MinSizeInBytes;
- void *NewElts = operator new(NewCapacityInBytes);
-
- // Copy the elements over. No need to run dtors on PODs.
- memcpy(NewElts, this->BeginX, CurSizeBytes);
-
- // If this wasn't grown from the inline copy, deallocate the old space.
- if (!this->isSmall())
- operator delete(this->BeginX);
-
+
+ void *NewElts;
+ if (this->isSmall()) {
+ NewElts = malloc(NewCapacityInBytes);
+
+ // Copy the elements over. No need to run dtors on PODs.
+ memcpy(NewElts, this->BeginX, CurSizeBytes);
+ } else {
+ // If this wasn't grown from the inline copy, grow the allocated space.
+ NewElts = realloc(this->BeginX, NewCapacityInBytes);
+ }
+
this->EndX = (char*)NewElts+CurSizeBytes;
this->BeginX = NewElts;
this->CapacityX = (char*)this->BeginX + NewCapacityInBytes;
diff --git a/libclamav/c++/llvm/lib/Support/SourceMgr.cpp b/libclamav/c++/llvm/lib/Support/SourceMgr.cpp
index 83c7964..da5681c 100644
--- a/libclamav/c++/llvm/lib/Support/SourceMgr.cpp
+++ b/libclamav/c++/llvm/lib/Support/SourceMgr.cpp
@@ -168,13 +168,21 @@ SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, const std::string &Msg,
}
PrintedMsg += Msg;
- return SMDiagnostic(CurMB->getBufferIdentifier(), FindLineNumber(Loc, CurBuf),
+ return SMDiagnostic(*this, Loc,
+ CurMB->getBufferIdentifier(), FindLineNumber(Loc, CurBuf),
Loc.getPointer()-LineStart, PrintedMsg,
LineStr, ShowLine);
}
void SourceMgr::PrintMessage(SMLoc Loc, const std::string &Msg,
const char *Type, bool ShowLine) const {
+ // Report the message with the diagnostic handler if present.
+ if (DiagHandler) {
+ DiagHandler(GetMessage(Loc, Msg, Type, ShowLine),
+ DiagContext, DiagLocCookie);
+ return;
+ }
+
raw_ostream &OS = errs();
int CurBuf = FindBufferContainingLoc(Loc);
diff --git a/libclamav/c++/llvm/lib/Support/Statistic.cpp b/libclamav/c++/llvm/lib/Support/Statistic.cpp
index e787670..e32ab74 100644
--- a/libclamav/c++/llvm/lib/Support/Statistic.cpp
+++ b/libclamav/c++/llvm/lib/Support/Statistic.cpp
@@ -32,8 +32,8 @@
#include <cstring>
using namespace llvm;
-// GetLibSupportInfoOutputFile - Return a file stream to print our output on.
-namespace llvm { extern raw_ostream *GetLibSupportInfoOutputFile(); }
+// CreateInfoOutputFile - Return a file stream to print our output on.
+namespace llvm { extern raw_ostream *CreateInfoOutputFile(); }
/// -stats - Command line option to cause transformations to emit stats about
/// what they did.
@@ -44,13 +44,15 @@ Enabled("stats", cl::desc("Enable statistics output from program"));
namespace {
/// StatisticInfo - This class is used in a ManagedStatic so that it is created
-/// on demand (when the first statistic is bumped) and destroyed only when
+/// on demand (when the first statistic is bumped) and destroyed only when
/// llvm_shutdown is called. We print statistics from the destructor.
class StatisticInfo {
std::vector<const Statistic*> Stats;
+ friend void llvm::PrintStatistics();
+ friend void llvm::PrintStatistics(raw_ostream &OS);
public:
~StatisticInfo();
-
+
void addStatistic(const Statistic *S) {
Stats.push_back(S);
}
@@ -69,7 +71,7 @@ void Statistic::RegisterStatistic() {
if (!Initialized) {
if (Enabled)
StatInfo->addStatistic(this);
-
+
sys::MemoryFence();
// Remember we have been registered.
Initialized = true;
@@ -82,7 +84,7 @@ struct NameCompare {
bool operator()(const Statistic *LHS, const Statistic *RHS) const {
int Cmp = std::strcmp(LHS->getName(), RHS->getName());
if (Cmp != 0) return Cmp < 0;
-
+
// Secondary key is the description.
return std::strcmp(LHS->getDesc(), RHS->getDesc()) < 0;
}
@@ -92,42 +94,55 @@ struct NameCompare {
// Print information when destroyed, iff command line option is specified.
StatisticInfo::~StatisticInfo() {
- // Statistics not enabled?
- if (Stats.empty()) return;
+ llvm::PrintStatistics();
+}
- // Get the stream to write to.
- raw_ostream &OutStream = *GetLibSupportInfoOutputFile();
+void llvm::EnableStatistics() {
+ Enabled.setValue(true);
+}
+
+void llvm::PrintStatistics(raw_ostream &OS) {
+ StatisticInfo &Stats = *StatInfo;
// Figure out how long the biggest Value and Name fields are.
unsigned MaxNameLen = 0, MaxValLen = 0;
- for (size_t i = 0, e = Stats.size(); i != e; ++i) {
+ for (size_t i = 0, e = Stats.Stats.size(); i != e; ++i) {
MaxValLen = std::max(MaxValLen,
- (unsigned)utostr(Stats[i]->getValue()).size());
+ (unsigned)utostr(Stats.Stats[i]->getValue()).size());
MaxNameLen = std::max(MaxNameLen,
- (unsigned)std::strlen(Stats[i]->getName()));
+ (unsigned)std::strlen(Stats.Stats[i]->getName()));
}
-
+
// Sort the fields by name.
- std::stable_sort(Stats.begin(), Stats.end(), NameCompare());
+ std::stable_sort(Stats.Stats.begin(), Stats.Stats.end(), NameCompare());
// Print out the statistics header...
- OutStream << "===" << std::string(73, '-') << "===\n"
- << " ... Statistics Collected ...\n"
- << "===" << std::string(73, '-') << "===\n\n";
-
+ OS << "===" << std::string(73, '-') << "===\n"
+ << " ... Statistics Collected ...\n"
+ << "===" << std::string(73, '-') << "===\n\n";
+
// Print all of the statistics.
- for (size_t i = 0, e = Stats.size(); i != e; ++i) {
- std::string CountStr = utostr(Stats[i]->getValue());
- OutStream << std::string(MaxValLen-CountStr.size(), ' ')
- << CountStr << " " << Stats[i]->getName()
- << std::string(MaxNameLen-std::strlen(Stats[i]->getName()), ' ')
- << " - " << Stats[i]->getDesc() << "\n";
-
+ for (size_t i = 0, e = Stats.Stats.size(); i != e; ++i) {
+ std::string CountStr = utostr(Stats.Stats[i]->getValue());
+ OS << std::string(MaxValLen-CountStr.size(), ' ')
+ << CountStr << " " << Stats.Stats[i]->getName()
+ << std::string(MaxNameLen-std::strlen(Stats.Stats[i]->getName()), ' ')
+ << " - " << Stats.Stats[i]->getDesc() << "\n";
}
-
- OutStream << '\n'; // Flush the output stream...
- OutStream.flush();
-
- if (&OutStream != &outs() && &OutStream != &errs() && &OutStream != &dbgs())
- delete &OutStream; // Close the file.
+
+ OS << '\n'; // Flush the output stream.
+ OS.flush();
+
+}
+
+void llvm::PrintStatistics() {
+ StatisticInfo &Stats = *StatInfo;
+
+ // Statistics not enabled?
+ if (Stats.Stats.empty()) return;
+
+ // Get the stream to write to.
+ raw_ostream &OutStream = *CreateInfoOutputFile();
+ PrintStatistics(OutStream);
+ delete &OutStream; // Close the file.
}
diff --git a/libclamav/c++/llvm/lib/Support/StringPool.cpp b/libclamav/c++/llvm/lib/Support/StringPool.cpp
index 1ee917f..ff607cf 100644
--- a/libclamav/c++/llvm/lib/Support/StringPool.cpp
+++ b/libclamav/c++/llvm/lib/Support/StringPool.cpp
@@ -22,7 +22,7 @@ StringPool::~StringPool() {
assert(InternTable.empty() && "PooledStringPtr leaked!");
}
-PooledStringPtr StringPool::intern(const StringRef &Key) {
+PooledStringPtr StringPool::intern(StringRef Key) {
table_t::iterator I = InternTable.find(Key);
if (I != InternTable.end())
return PooledStringPtr(&*I);
diff --git a/libclamav/c++/llvm/lib/Support/StringRef.cpp b/libclamav/c++/llvm/lib/Support/StringRef.cpp
index 2b262dc..46f26b2 100644
--- a/libclamav/c++/llvm/lib/Support/StringRef.cpp
+++ b/libclamav/c++/llvm/lib/Support/StringRef.cpp
@@ -9,6 +9,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/APInt.h"
+#include <bitset>
using namespace llvm;
@@ -23,17 +24,45 @@ static char ascii_tolower(char x) {
return x;
}
+static bool ascii_isdigit(char x) {
+ return x >= '0' && x <= '9';
+}
+
/// compare_lower - Compare strings, ignoring case.
int StringRef::compare_lower(StringRef RHS) const {
for (size_t I = 0, E = min(Length, RHS.Length); I != E; ++I) {
- char LHC = ascii_tolower(Data[I]);
- char RHC = ascii_tolower(RHS.Data[I]);
+ unsigned char LHC = ascii_tolower(Data[I]);
+ unsigned char RHC = ascii_tolower(RHS.Data[I]);
if (LHC != RHC)
return LHC < RHC ? -1 : 1;
}
if (Length == RHS.Length)
- return 0;
+ return 0;
+ return Length < RHS.Length ? -1 : 1;
+}
+
+/// compare_numeric - Compare strings, handle embedded numbers.
+int StringRef::compare_numeric(StringRef RHS) const {
+ for (size_t I = 0, E = min(Length, RHS.Length); I != E; ++I) {
+ if (Data[I] == RHS.Data[I])
+ continue;
+ if (ascii_isdigit(Data[I]) && ascii_isdigit(RHS.Data[I])) {
+ // The longer sequence of numbers is larger. This doesn't really handle
+ // prefixed zeros well.
+ for (size_t J = I+1; J != E+1; ++J) {
+ bool ld = J < Length && ascii_isdigit(Data[J]);
+ bool rd = J < RHS.Length && ascii_isdigit(RHS.Data[J]);
+ if (ld != rd)
+ return rd ? -1 : 1;
+ if (!rd)
+ break;
+ }
+ }
+ return (unsigned char)Data[I] < (unsigned char)RHS.Data[I] ? -1 : 1;
+ }
+ if (Length == RHS.Length)
+ return 0;
return Length < RHS.Length ? -1 : 1;
}
@@ -125,11 +154,15 @@ size_t StringRef::rfind(StringRef Str) const {
/// find_first_of - Find the first character in the string that is in \arg
/// Chars, or npos if not found.
///
-/// Note: O(size() * Chars.size())
+/// Note: O(size() + Chars.size())
StringRef::size_type StringRef::find_first_of(StringRef Chars,
size_t From) const {
+ std::bitset<1 << CHAR_BIT> CharBits;
+ for (size_type i = 0; i != Chars.size(); ++i)
+ CharBits.set((unsigned char)Chars[i]);
+
for (size_type i = min(From, Length), e = Length; i != e; ++i)
- if (Chars.find(Data[i]) != npos)
+ if (CharBits.test((unsigned char)Data[i]))
return i;
return npos;
}
@@ -146,11 +179,15 @@ StringRef::size_type StringRef::find_first_not_of(char C, size_t From) const {
/// find_first_not_of - Find the first character in the string that is not
/// in the string \arg Chars, or npos if not found.
///
-/// Note: O(size() * Chars.size())
+/// Note: O(size() + Chars.size())
StringRef::size_type StringRef::find_first_not_of(StringRef Chars,
size_t From) const {
+ std::bitset<1 << CHAR_BIT> CharBits;
+ for (size_type i = 0; i != Chars.size(); ++i)
+ CharBits.set((unsigned char)Chars[i]);
+
for (size_type i = min(From, Length), e = Length; i != e; ++i)
- if (Chars.find(Data[i]) == npos)
+ if (!CharBits.test((unsigned char)Data[i]))
return i;
return npos;
}
diff --git a/libclamav/c++/llvm/lib/Support/SystemUtils.cpp b/libclamav/c++/llvm/lib/Support/SystemUtils.cpp
index 299032f..c8b260c 100644
--- a/libclamav/c++/llvm/lib/Support/SystemUtils.cpp
+++ b/libclamav/c++/llvm/lib/Support/SystemUtils.cpp
@@ -49,6 +49,16 @@ sys::Path llvm::FindExecutable(const std::string &ExeName,
Result.appendComponent(ExeName);
if (Result.canExecute())
return Result;
+ // If the path is absolute (and it usually is), call FindProgramByName to
+ // allow it to try platform-specific logic, such as appending a .exe suffix
+ // on Windows. Don't do this if we somehow have a relative path, because
+ // we don't want to go searching the PATH and accidentally find an unrelated
+ // version of the program.
+ if (Result.isAbsolute()) {
+ Result = sys::Program::FindProgramByName(Result.str());
+ if (!Result.empty())
+ return Result;
+ }
}
return sys::Path();
diff --git a/libclamav/c++/llvm/lib/Support/Timer.cpp b/libclamav/c++/llvm/lib/Support/Timer.cpp
index 4bdfac2..44ee177 100644
--- a/libclamav/c++/llvm/lib/Support/Timer.cpp
+++ b/libclamav/c++/llvm/lib/Support/Timer.cpp
@@ -11,20 +11,20 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Support/Debug.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Format.h"
+#include "llvm/System/Mutex.h"
#include "llvm/System/Process.h"
-#include <algorithm>
-#include <functional>
-#include <map>
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringMap.h"
using namespace llvm;
-// GetLibSupportInfoOutputFile - Return a file stream to print our output on.
-namespace llvm { extern raw_ostream *GetLibSupportInfoOutputFile(); }
+// CreateInfoOutputFile - Return a file stream to print our output on.
+namespace llvm { extern raw_ostream *CreateInfoOutputFile(); }
// getLibSupportInfoOutputFilename - This ugly hack is brought to you courtesy
// of constructor/destructor ordering being unspecified by C++. Basically the
@@ -53,117 +53,107 @@ namespace {
cl::Hidden, cl::location(getLibSupportInfoOutputFilename()));
}
+// CreateInfoOutputFile - Return a file stream to print our output on.
+raw_ostream *llvm::CreateInfoOutputFile() {
+ const std::string &OutputFilename = getLibSupportInfoOutputFilename();
+ if (OutputFilename.empty())
+ return new raw_fd_ostream(2, false); // stderr.
+ if (OutputFilename == "-")
+ return new raw_fd_ostream(1, false); // stdout.
+
+ // Append mode is used because the info output file is opened and closed
+ // each time -stats or -time-passes wants to print output to it. To
+ // compensate for this, the test-suite Makefiles have code to delete the
+ // info output file before running commands which write to it.
+ std::string Error;
+ raw_ostream *Result = new raw_fd_ostream(OutputFilename.c_str(),
+ Error, raw_fd_ostream::F_Append);
+ if (Error.empty())
+ return Result;
+
+ errs() << "Error opening info-output-file '"
+ << OutputFilename << " for appending!\n";
+ delete Result;
+ return new raw_fd_ostream(2, false); // stderr.
+}
+
+
static TimerGroup *DefaultTimerGroup = 0;
static TimerGroup *getDefaultTimerGroup() {
- TimerGroup* tmp = DefaultTimerGroup;
+ TimerGroup *tmp = DefaultTimerGroup;
sys::MemoryFence();
+ if (tmp) return tmp;
+
+ llvm_acquire_global_lock();
+ tmp = DefaultTimerGroup;
if (!tmp) {
- llvm_acquire_global_lock();
- tmp = DefaultTimerGroup;
- if (!tmp) {
- tmp = new TimerGroup("Miscellaneous Ungrouped Timers");
- sys::MemoryFence();
- DefaultTimerGroup = tmp;
- }
- llvm_release_global_lock();
+ tmp = new TimerGroup("Miscellaneous Ungrouped Timers");
+ sys::MemoryFence();
+ DefaultTimerGroup = tmp;
}
+ llvm_release_global_lock();
return tmp;
}
-Timer::Timer(const std::string &N)
- : Elapsed(0), UserTime(0), SystemTime(0), MemUsed(0), PeakMem(0), Name(N),
- Started(false), TG(getDefaultTimerGroup()) {
- TG->addTimer();
-}
-
-Timer::Timer(const std::string &N, TimerGroup &tg)
- : Elapsed(0), UserTime(0), SystemTime(0), MemUsed(0), PeakMem(0), Name(N),
- Started(false), TG(&tg) {
- TG->addTimer();
-}
+//===----------------------------------------------------------------------===//
+// Timer Implementation
+//===----------------------------------------------------------------------===//
-Timer::Timer(const Timer &T) {
- TG = T.TG;
- if (TG) TG->addTimer();
- operator=(T);
+void Timer::init(StringRef N) {
+ assert(TG == 0 && "Timer already initialized");
+ Name.assign(N.begin(), N.end());
+ Started = false;
+ TG = getDefaultTimerGroup();
+ TG->addTimer(*this);
}
-
-// Copy ctor, initialize with no TG member.
-Timer::Timer(bool, const Timer &T) {
- TG = T.TG; // Avoid assertion in operator=
- operator=(T); // Copy contents
- TG = 0;
+void Timer::init(StringRef N, TimerGroup &tg) {
+ assert(TG == 0 && "Timer already initialized");
+ Name.assign(N.begin(), N.end());
+ Started = false;
+ TG = &tg;
+ TG->addTimer(*this);
}
-
Timer::~Timer() {
- if (TG) {
- if (Started) {
- Started = false;
- TG->addTimerToPrint(*this);
- }
- TG->removeTimer();
- }
+ if (!TG) return; // Never initialized, or already cleared.
+ TG->removeTimer(*this);
}
static inline size_t getMemUsage() {
- if (TrackSpace)
- return sys::Process::GetMallocUsage();
- return 0;
+ if (!TrackSpace) return 0;
+ return sys::Process::GetMallocUsage();
}
-struct TimeRecord {
- double Elapsed, UserTime, SystemTime;
- ssize_t MemUsed;
-};
-
-static TimeRecord getTimeRecord(bool Start) {
+TimeRecord TimeRecord::getCurrentTime(bool Start) {
TimeRecord Result;
-
- sys::TimeValue now(0,0);
- sys::TimeValue user(0,0);
- sys::TimeValue sys(0,0);
-
- ssize_t MemUsed = 0;
+ sys::TimeValue now(0,0), user(0,0), sys(0,0);
+
if (Start) {
- MemUsed = getMemUsage();
- sys::Process::GetTimeUsage(now,user,sys);
+ Result.MemUsed = getMemUsage();
+ sys::Process::GetTimeUsage(now, user, sys);
} else {
- sys::Process::GetTimeUsage(now,user,sys);
- MemUsed = getMemUsage();
+ sys::Process::GetTimeUsage(now, user, sys);
+ Result.MemUsed = getMemUsage();
}
- Result.Elapsed = now.seconds() + now.microseconds() / 1000000.0;
- Result.UserTime = user.seconds() + user.microseconds() / 1000000.0;
- Result.SystemTime = sys.seconds() + sys.microseconds() / 1000000.0;
- Result.MemUsed = MemUsed;
-
+ Result.WallTime = now.seconds() + now.microseconds() / 1000000.0;
+ Result.UserTime = user.seconds() + user.microseconds() / 1000000.0;
+ Result.SystemTime = sys.seconds() + sys.microseconds() / 1000000.0;
return Result;
}
static ManagedStatic<std::vector<Timer*> > ActiveTimers;
void Timer::startTimer() {
- sys::SmartScopedLock<true> L(*TimerLock);
Started = true;
ActiveTimers->push_back(this);
- TimeRecord TR = getTimeRecord(true);
- Elapsed -= TR.Elapsed;
- UserTime -= TR.UserTime;
- SystemTime -= TR.SystemTime;
- MemUsed -= TR.MemUsed;
- PeakMemBase = TR.MemUsed;
+ Time -= TimeRecord::getCurrentTime(true);
}
void Timer::stopTimer() {
- sys::SmartScopedLock<true> L(*TimerLock);
- TimeRecord TR = getTimeRecord(false);
- Elapsed += TR.Elapsed;
- UserTime += TR.UserTime;
- SystemTime += TR.SystemTime;
- MemUsed += TR.MemUsed;
+ Time += TimeRecord::getCurrentTime(false);
if (ActiveTimers->back() == this) {
ActiveTimers->pop_back();
@@ -175,217 +165,229 @@ void Timer::stopTimer() {
}
}
-void Timer::sum(const Timer &T) {
- Elapsed += T.Elapsed;
- UserTime += T.UserTime;
- SystemTime += T.SystemTime;
- MemUsed += T.MemUsed;
- PeakMem += T.PeakMem;
+static void printVal(double Val, double Total, raw_ostream &OS) {
+ if (Total < 1e-7) // Avoid dividing by zero.
+ OS << " ----- ";
+ else {
+ OS << " " << format("%7.4f", Val) << " (";
+ OS << format("%5.1f", Val*100/Total) << "%)";
+ }
}
-/// addPeakMemoryMeasurement - This method should be called whenever memory
-/// usage needs to be checked. It adds a peak memory measurement to the
-/// currently active timers, which will be printed when the timer group prints
-///
-void Timer::addPeakMemoryMeasurement() {
- sys::SmartScopedLock<true> L(*TimerLock);
- size_t MemUsed = getMemUsage();
-
- for (std::vector<Timer*>::iterator I = ActiveTimers->begin(),
- E = ActiveTimers->end(); I != E; ++I)
- (*I)->PeakMem = std::max((*I)->PeakMem, MemUsed-(*I)->PeakMemBase);
+void TimeRecord::print(const TimeRecord &Total, raw_ostream &OS) const {
+ if (Total.getUserTime())
+ printVal(getUserTime(), Total.getUserTime(), OS);
+ if (Total.getSystemTime())
+ printVal(getSystemTime(), Total.getSystemTime(), OS);
+ if (Total.getProcessTime())
+ printVal(getProcessTime(), Total.getProcessTime(), OS);
+ printVal(getWallTime(), Total.getWallTime(), OS);
+
+ OS << " ";
+
+ if (Total.getMemUsed())
+ OS << format("%9lld", (long long)getMemUsed()) << " ";
}
+
//===----------------------------------------------------------------------===//
// NamedRegionTimer Implementation
//===----------------------------------------------------------------------===//
namespace {
-typedef std::map<std::string, Timer> Name2Timer;
-typedef std::map<std::string, std::pair<TimerGroup, Name2Timer> > Name2Pair;
+typedef StringMap<Timer> Name2TimerMap;
-}
-
-static ManagedStatic<Name2Timer> NamedTimers;
-
-static ManagedStatic<Name2Pair> NamedGroupedTimers;
-
-static Timer &getNamedRegionTimer(const std::string &Name) {
- sys::SmartScopedLock<true> L(*TimerLock);
- Name2Timer::iterator I = NamedTimers->find(Name);
- if (I != NamedTimers->end())
- return I->second;
+class Name2PairMap {
+ StringMap<std::pair<TimerGroup*, Name2TimerMap> > Map;
+public:
+ ~Name2PairMap() {
+ for (StringMap<std::pair<TimerGroup*, Name2TimerMap> >::iterator
+ I = Map.begin(), E = Map.end(); I != E; ++I)
+ delete I->second.first;
+ }
+
+ Timer &get(StringRef Name, StringRef GroupName) {
+ sys::SmartScopedLock<true> L(*TimerLock);
+
+ std::pair<TimerGroup*, Name2TimerMap> &GroupEntry = Map[GroupName];
+
+ if (!GroupEntry.first)
+ GroupEntry.first = new TimerGroup(GroupName);
+
+ Timer &T = GroupEntry.second[Name];
+ if (!T.isInitialized())
+ T.init(Name, *GroupEntry.first);
+ return T;
+ }
+};
- return NamedTimers->insert(I, std::make_pair(Name, Timer(Name)))->second;
}
-static Timer &getNamedRegionTimer(const std::string &Name,
- const std::string &GroupName) {
- sys::SmartScopedLock<true> L(*TimerLock);
-
- Name2Pair::iterator I = NamedGroupedTimers->find(GroupName);
- if (I == NamedGroupedTimers->end()) {
- TimerGroup TG(GroupName);
- std::pair<TimerGroup, Name2Timer> Pair(TG, Name2Timer());
- I = NamedGroupedTimers->insert(I, std::make_pair(GroupName, Pair));
- }
+static ManagedStatic<Name2TimerMap> NamedTimers;
+static ManagedStatic<Name2PairMap> NamedGroupedTimers;
- Name2Timer::iterator J = I->second.second.find(Name);
- if (J == I->second.second.end())
- J = I->second.second.insert(J,
- std::make_pair(Name,
- Timer(Name,
- I->second.first)));
-
- return J->second;
+static Timer &getNamedRegionTimer(StringRef Name) {
+ sys::SmartScopedLock<true> L(*TimerLock);
+
+ Timer &T = (*NamedTimers)[Name];
+ if (!T.isInitialized())
+ T.init(Name);
+ return T;
}
-NamedRegionTimer::NamedRegionTimer(const std::string &Name)
- : TimeRegion(getNamedRegionTimer(Name)) {}
+NamedRegionTimer::NamedRegionTimer(StringRef Name,
+ bool Enabled)
+ : TimeRegion(!Enabled ? 0 : &getNamedRegionTimer(Name)) {}
-NamedRegionTimer::NamedRegionTimer(const std::string &Name,
- const std::string &GroupName)
- : TimeRegion(getNamedRegionTimer(Name, GroupName)) {}
+NamedRegionTimer::NamedRegionTimer(StringRef Name, StringRef GroupName,
+ bool Enabled)
+ : TimeRegion(!Enabled ? 0 : &NamedGroupedTimers->get(Name, GroupName)) {}
//===----------------------------------------------------------------------===//
// TimerGroup Implementation
//===----------------------------------------------------------------------===//
+/// TimerGroupList - This is the global list of TimerGroups, maintained by the
+/// TimerGroup ctor/dtor and is protected by the TimerLock lock.
+static TimerGroup *TimerGroupList = 0;
-static void printVal(double Val, double Total, raw_ostream &OS) {
- if (Total < 1e-7) // Avoid dividing by zero...
- OS << " ----- ";
- else {
- OS << " " << format("%7.4f", Val) << " (";
- OS << format("%5.1f", Val*100/Total) << "%)";
- }
+TimerGroup::TimerGroup(StringRef name)
+ : Name(name.begin(), name.end()), FirstTimer(0) {
+
+ // Add the group to TimerGroupList.
+ sys::SmartScopedLock<true> L(*TimerLock);
+ if (TimerGroupList)
+ TimerGroupList->Prev = &Next;
+ Next = TimerGroupList;
+ Prev = &TimerGroupList;
+ TimerGroupList = this;
}
-void Timer::print(const Timer &Total, raw_ostream &OS) {
+TimerGroup::~TimerGroup() {
+ // If the timer group is destroyed before the timers it owns, accumulate and
+ // print the timing data.
+ while (FirstTimer != 0)
+ removeTimer(*FirstTimer);
+
+ // Remove the group from the TimerGroupList.
sys::SmartScopedLock<true> L(*TimerLock);
- if (Total.UserTime)
- printVal(UserTime, Total.UserTime, OS);
- if (Total.SystemTime)
- printVal(SystemTime, Total.SystemTime, OS);
- if (Total.getProcessTime())
- printVal(getProcessTime(), Total.getProcessTime(), OS);
- printVal(Elapsed, Total.Elapsed, OS);
-
- OS << " ";
-
- if (Total.MemUsed) {
- OS << format("%9lld", (long long)MemUsed) << " ";
- }
- if (Total.PeakMem) {
- if (PeakMem) {
- OS << format("%9lld", (long long)PeakMem) << " ";
- } else
- OS << " ";
- }
- OS << Name << "\n";
-
- Started = false; // Once printed, don't print again
+ *Prev = Next;
+ if (Next)
+ Next->Prev = Prev;
}
-// GetLibSupportInfoOutputFile - Return a file stream to print our output on...
-raw_ostream *
-llvm::GetLibSupportInfoOutputFile() {
- std::string &LibSupportInfoOutputFilename = getLibSupportInfoOutputFilename();
- if (LibSupportInfoOutputFilename.empty())
- return &errs();
- if (LibSupportInfoOutputFilename == "-")
- return &outs();
-
- std::string Error;
- raw_ostream *Result = new raw_fd_ostream(LibSupportInfoOutputFilename.c_str(),
- Error, raw_fd_ostream::F_Append);
- if (Error.empty())
- return Result;
-
- errs() << "Error opening info-output-file '"
- << LibSupportInfoOutputFilename << " for appending!\n";
- delete Result;
- return &errs();
+void TimerGroup::removeTimer(Timer &T) {
+ sys::SmartScopedLock<true> L(*TimerLock);
+
+ // If the timer was started, move its data to TimersToPrint.
+ if (T.Started)
+ TimersToPrint.push_back(std::make_pair(T.Time, T.Name));
+
+ T.TG = 0;
+
+ // Unlink the timer from our list.
+ *T.Prev = T.Next;
+ if (T.Next)
+ T.Next->Prev = T.Prev;
+
+ // Print the report when all timers in this group are destroyed if some of
+ // them were started.
+ if (FirstTimer != 0 || TimersToPrint.empty())
+ return;
+
+ raw_ostream *OutStream = CreateInfoOutputFile();
+ PrintQueuedTimers(*OutStream);
+ delete OutStream; // Close the file.
}
-
-void TimerGroup::removeTimer() {
+void TimerGroup::addTimer(Timer &T) {
sys::SmartScopedLock<true> L(*TimerLock);
- if (--NumTimers == 0 && !TimersToPrint.empty()) { // Print timing report...
- // Sort the timers in descending order by amount of time taken...
- std::sort(TimersToPrint.begin(), TimersToPrint.end(),
- std::greater<Timer>());
-
- // Figure out how many spaces to indent TimerGroup name...
- unsigned Padding = (80-Name.length())/2;
- if (Padding > 80) Padding = 0; // Don't allow "negative" numbers
-
- raw_ostream *OutStream = GetLibSupportInfoOutputFile();
-
- ++NumTimers;
- { // Scope to contain Total timer... don't allow total timer to drop us to
- // zero timers...
- Timer Total("TOTAL");
-
- for (unsigned i = 0, e = TimersToPrint.size(); i != e; ++i)
- Total.sum(TimersToPrint[i]);
-
- // Print out timing header...
- *OutStream << "===" << std::string(73, '-') << "===\n"
- << std::string(Padding, ' ') << Name << "\n"
- << "===" << std::string(73, '-')
- << "===\n";
-
- // If this is not an collection of ungrouped times, print the total time.
- // Ungrouped timers don't really make sense to add up. We still print the
- // TOTAL line to make the percentages make sense.
- if (this != DefaultTimerGroup) {
- *OutStream << " Total Execution Time: ";
-
- *OutStream << format("%5.4f", Total.getProcessTime()) << " seconds (";
- *OutStream << format("%5.4f", Total.getWallTime()) << " wall clock)\n";
- }
- *OutStream << "\n";
-
- if (Total.UserTime)
- *OutStream << " ---User Time---";
- if (Total.SystemTime)
- *OutStream << " --System Time--";
- if (Total.getProcessTime())
- *OutStream << " --User+System--";
- *OutStream << " ---Wall Time---";
- if (Total.getMemUsed())
- *OutStream << " ---Mem---";
- if (Total.getPeakMem())
- *OutStream << " -PeakMem-";
- *OutStream << " --- Name ---\n";
-
- // Loop through all of the timing data, printing it out...
- for (unsigned i = 0, e = TimersToPrint.size(); i != e; ++i)
- TimersToPrint[i].print(Total, *OutStream);
-
- Total.print(Total, *OutStream);
- *OutStream << '\n';
- OutStream->flush();
- }
- --NumTimers;
-
- TimersToPrint.clear();
-
- if (OutStream != &errs() && OutStream != &outs() && OutStream != &dbgs())
- delete OutStream; // Close the file...
+
+ // Add the timer to our list.
+ if (FirstTimer)
+ FirstTimer->Prev = &T.Next;
+ T.Next = FirstTimer;
+ T.Prev = &FirstTimer;
+ FirstTimer = &T;
+}
+
+void TimerGroup::PrintQueuedTimers(raw_ostream &OS) {
+ // Sort the timers in descending order by amount of time taken.
+ std::sort(TimersToPrint.begin(), TimersToPrint.end());
+
+ TimeRecord Total;
+ for (unsigned i = 0, e = TimersToPrint.size(); i != e; ++i)
+ Total += TimersToPrint[i].first;
+
+ // Print out timing header.
+ OS << "===" << std::string(73, '-') << "===\n";
+ // Figure out how many spaces to indent TimerGroup name.
+ unsigned Padding = (80-Name.length())/2;
+ if (Padding > 80) Padding = 0; // Don't allow "negative" numbers
+ OS.indent(Padding) << Name << '\n';
+ OS << "===" << std::string(73, '-') << "===\n";
+
+ // If this is not an collection of ungrouped times, print the total time.
+ // Ungrouped timers don't really make sense to add up. We still print the
+ // TOTAL line to make the percentages make sense.
+ if (this != DefaultTimerGroup) {
+ OS << " Total Execution Time: ";
+ OS << format("%5.4f", Total.getProcessTime()) << " seconds (";
+ OS << format("%5.4f", Total.getWallTime()) << " wall clock)\n";
+ }
+ OS << '\n';
+
+ if (Total.getUserTime())
+ OS << " ---User Time---";
+ if (Total.getSystemTime())
+ OS << " --System Time--";
+ if (Total.getProcessTime())
+ OS << " --User+System--";
+ OS << " ---Wall Time---";
+ if (Total.getMemUsed())
+ OS << " ---Mem---";
+ OS << " --- Name ---\n";
+
+ // Loop through all of the timing data, printing it out.
+ for (unsigned i = 0, e = TimersToPrint.size(); i != e; ++i) {
+ const std::pair<TimeRecord, std::string> &Entry = TimersToPrint[e-i-1];
+ Entry.first.print(Total, OS);
+ OS << Entry.second << '\n';
}
+
+ Total.print(Total, OS);
+ OS << "Total\n\n";
+ OS.flush();
+
+ TimersToPrint.clear();
}
-void TimerGroup::addTimer() {
+/// print - Print any started timers in this group and zero them.
+void TimerGroup::print(raw_ostream &OS) {
sys::SmartScopedLock<true> L(*TimerLock);
- ++NumTimers;
+
+ // See if any of our timers were started, if so add them to TimersToPrint and
+ // reset them.
+ for (Timer *T = FirstTimer; T; T = T->Next) {
+ if (!T->Started) continue;
+ TimersToPrint.push_back(std::make_pair(T->Time, T->Name));
+
+ // Clear out the time.
+ T->Started = 0;
+ T->Time = TimeRecord();
+ }
+
+ // If any timers were started, print the group.
+ if (!TimersToPrint.empty())
+ PrintQueuedTimers(OS);
}
-void TimerGroup::addTimerToPrint(const Timer &T) {
+/// printAll - This static method prints all timers and clears them all out.
+void TimerGroup::printAll(raw_ostream &OS) {
sys::SmartScopedLock<true> L(*TimerLock);
- TimersToPrint.push_back(Timer(true, T));
-}
+ for (TimerGroup *TG = TimerGroupList; TG; TG = TG->Next)
+ TG->print(OS);
+}
diff --git a/libclamav/c++/llvm/lib/Support/Triple.cpp b/libclamav/c++/llvm/lib/Support/Triple.cpp
index 61bf0a7..3a95b65 100644
--- a/libclamav/c++/llvm/lib/Support/Triple.cpp
+++ b/libclamav/c++/llvm/lib/Support/Triple.cpp
@@ -104,6 +104,7 @@ const char *Triple::getOSTypeName(OSType Kind) {
case Solaris: return "solaris";
case Win32: return "win32";
case Haiku: return "haiku";
+ case Minix: return "minix";
}
return "<invalid>";
@@ -189,7 +190,7 @@ Triple::ArchType Triple::getArchTypeForDarwinArchName(StringRef Str) {
return Triple::UnknownArch;
}
-// Returns architecture name that is unsderstood by the target assembler.
+// Returns architecture name that is understood by the target assembler.
const char *Triple::getArchNameForAssembler() {
if (getOS() != Triple::Darwin && getVendor() != Triple::Apple)
return NULL;
@@ -220,119 +221,238 @@ const char *Triple::getArchNameForAssembler() {
//
-void Triple::Parse() const {
- assert(!isInitialized() && "Invalid parse call.");
-
- StringRef ArchName = getArchName();
- StringRef VendorName = getVendorName();
- StringRef OSName = getOSName();
-
+Triple::ArchType Triple::ParseArch(StringRef ArchName) {
if (ArchName.size() == 4 && ArchName[0] == 'i' &&
ArchName[2] == '8' && ArchName[3] == '6' &&
ArchName[1] - '3' < 6) // i[3-9]86
- Arch = x86;
+ return x86;
else if (ArchName == "amd64" || ArchName == "x86_64")
- Arch = x86_64;
+ return x86_64;
else if (ArchName == "bfin")
- Arch = bfin;
+ return bfin;
else if (ArchName == "pic16")
- Arch = pic16;
+ return pic16;
else if (ArchName == "powerpc")
- Arch = ppc;
+ return ppc;
else if ((ArchName == "powerpc64") || (ArchName == "ppu"))
- Arch = ppc64;
+ return ppc64;
else if (ArchName == "mblaze")
- Arch = mblaze;
+ return mblaze;
else if (ArchName == "arm" ||
ArchName.startswith("armv") ||
ArchName == "xscale")
- Arch = arm;
+ return arm;
else if (ArchName == "thumb" ||
ArchName.startswith("thumbv"))
- Arch = thumb;
+ return thumb;
else if (ArchName.startswith("alpha"))
- Arch = alpha;
+ return alpha;
else if (ArchName == "spu" || ArchName == "cellspu")
- Arch = cellspu;
+ return cellspu;
else if (ArchName == "msp430")
- Arch = msp430;
+ return msp430;
else if (ArchName == "mips" || ArchName == "mipsallegrex")
- Arch = mips;
+ return mips;
else if (ArchName == "mipsel" || ArchName == "mipsallegrexel" ||
ArchName == "psp")
- Arch = mipsel;
+ return mipsel;
else if (ArchName == "sparc")
- Arch = sparc;
+ return sparc;
else if (ArchName == "sparcv9")
- Arch = sparcv9;
+ return sparcv9;
else if (ArchName == "s390x")
- Arch = systemz;
+ return systemz;
else if (ArchName == "tce")
- Arch = tce;
+ return tce;
else if (ArchName == "xcore")
- Arch = xcore;
+ return xcore;
else
- Arch = UnknownArch;
-
-
- // Handle some exceptional cases where the OS / environment components are
- // stuck into the vendor field.
- if (StringRef(getTriple()).count('-') == 1) {
- StringRef VendorName = getVendorName();
-
- if (VendorName.startswith("mingw32")) { // 'i386-mingw32', etc.
- Vendor = PC;
- OS = MinGW32;
- return;
- }
-
- // arm-elf is another example, but we don't currently parse anything about
- // the environment.
- }
+ return UnknownArch;
+}
+Triple::VendorType Triple::ParseVendor(StringRef VendorName) {
if (VendorName == "apple")
- Vendor = Apple;
+ return Apple;
else if (VendorName == "pc")
- Vendor = PC;
+ return PC;
else
- Vendor = UnknownVendor;
+ return UnknownVendor;
+}
+Triple::OSType Triple::ParseOS(StringRef OSName) {
if (OSName.startswith("auroraux"))
- OS = AuroraUX;
+ return AuroraUX;
else if (OSName.startswith("cygwin"))
- OS = Cygwin;
+ return Cygwin;
else if (OSName.startswith("darwin"))
- OS = Darwin;
+ return Darwin;
else if (OSName.startswith("dragonfly"))
- OS = DragonFly;
+ return DragonFly;
else if (OSName.startswith("freebsd"))
- OS = FreeBSD;
+ return FreeBSD;
else if (OSName.startswith("linux"))
- OS = Linux;
+ return Linux;
else if (OSName.startswith("lv2"))
- OS = Lv2;
+ return Lv2;
else if (OSName.startswith("mingw32"))
- OS = MinGW32;
+ return MinGW32;
else if (OSName.startswith("mingw64"))
- OS = MinGW64;
+ return MinGW64;
else if (OSName.startswith("netbsd"))
- OS = NetBSD;
+ return NetBSD;
else if (OSName.startswith("openbsd"))
- OS = OpenBSD;
+ return OpenBSD;
else if (OSName.startswith("psp"))
- OS = Psp;
+ return Psp;
else if (OSName.startswith("solaris"))
- OS = Solaris;
+ return Solaris;
else if (OSName.startswith("win32"))
- OS = Win32;
+ return Win32;
else if (OSName.startswith("haiku"))
- OS = Haiku;
+ return Haiku;
+ else if (OSName.startswith("minix"))
+ return Minix;
else
- OS = UnknownOS;
+ return UnknownOS;
+}
+
+void Triple::Parse() const {
+ assert(!isInitialized() && "Invalid parse call.");
+
+ Arch = ParseArch(getArchName());
+ Vendor = ParseVendor(getVendorName());
+ OS = ParseOS(getOSName());
assert(isInitialized() && "Failed to initialize!");
}
+std::string Triple::normalize(StringRef Str) {
+ // Parse into components.
+ SmallVector<StringRef, 4> Components;
+ for (size_t First = 0, Last = 0; Last != StringRef::npos; First = Last + 1) {
+ Last = Str.find('-', First);
+ Components.push_back(Str.slice(First, Last));
+ }
+
+ // If the first component corresponds to a known architecture, preferentially
+ // use it for the architecture. If the second component corresponds to a
+ // known vendor, preferentially use it for the vendor, etc. This avoids silly
+ // component movement when a component parses as (eg) both a valid arch and a
+ // valid os.
+ ArchType Arch = UnknownArch;
+ if (Components.size() > 0)
+ Arch = ParseArch(Components[0]);
+ VendorType Vendor = UnknownVendor;
+ if (Components.size() > 1)
+ Vendor = ParseVendor(Components[1]);
+ OSType OS = UnknownOS;
+ if (Components.size() > 2)
+ OS = ParseOS(Components[2]);
+
+ // Note which components are already in their final position. These will not
+ // be moved.
+ bool Found[3];
+ Found[0] = Arch != UnknownArch;
+ Found[1] = Vendor != UnknownVendor;
+ Found[2] = OS != UnknownOS;
+
+ // If they are not there already, permute the components into their canonical
+ // positions by seeing if they parse as a valid architecture, and if so moving
+ // the component to the architecture position etc.
+ for (unsigned Pos = 0; Pos != 3; ++Pos) {
+ if (Found[Pos])
+ continue; // Already in the canonical position.
+
+ for (unsigned Idx = 0; Idx != Components.size(); ++Idx) {
+ // Do not reparse any components that already matched.
+ if (Idx < 3 && Found[Idx])
+ continue;
+
+ // Does this component parse as valid for the target position?
+ bool Valid = false;
+ StringRef Comp = Components[Idx];
+ switch (Pos) {
+ default:
+ assert(false && "unexpected component type!");
+ case 0:
+ Arch = ParseArch(Comp);
+ Valid = Arch != UnknownArch;
+ break;
+ case 1:
+ Vendor = ParseVendor(Comp);
+ Valid = Vendor != UnknownVendor;
+ break;
+ case 2:
+ OS = ParseOS(Comp);
+ Valid = OS != UnknownOS;
+ break;
+ }
+ if (!Valid)
+ continue; // Nope, try the next component.
+
+ // Move the component to the target position, pushing any non-fixed
+ // components that are in the way to the right. This tends to give
+ // good results in the common cases of a forgotten vendor component
+ // or a wrongly positioned environment.
+ if (Pos < Idx) {
+ // Insert left, pushing the existing components to the right. For
+ // example, a-b-i386 -> i386-a-b when moving i386 to the front.
+ StringRef CurrentComponent(""); // The empty component.
+ // Replace the component we are moving with an empty component.
+ std::swap(CurrentComponent, Components[Idx]);
+ // Insert the component being moved at Pos, displacing any existing
+ // components to the right.
+ for (unsigned i = Pos; !CurrentComponent.empty(); ++i) {
+ // Skip over any fixed components.
+ while (i < 3 && Found[i]) ++i;
+ // Place the component at the new position, getting the component
+ // that was at this position - it will be moved right.
+ std::swap(CurrentComponent, Components[i]);
+ }
+ } else if (Pos > Idx) {
+ // Push right by inserting empty components until the component at Idx
+ // reaches the target position Pos. For example, pc-a -> -pc-a when
+ // moving pc to the second position.
+ do {
+ // Insert one empty component at Idx.
+ StringRef CurrentComponent(""); // The empty component.
+ for (unsigned i = Idx; i < Components.size(); ++i) {
+ // Skip over any fixed components.
+ while (i < 3 && Found[i]) ++i;
+ // Place the component at the new position, getting the component
+ // that was at this position - it will be moved right.
+ std::swap(CurrentComponent, Components[i]);
+ // If it was placed on top of an empty component then we are done.
+ if (CurrentComponent.empty())
+ break;
+ }
+ // The last component was pushed off the end - append it.
+ if (!CurrentComponent.empty())
+ Components.push_back(CurrentComponent);
+
+ // Advance Idx to the component's new position.
+ while (++Idx < 3 && Found[Idx]) {}
+ } while (Idx < Pos); // Add more until the final position is reached.
+ }
+ assert(Pos < Components.size() && Components[Pos] == Comp &&
+ "Component moved wrong!");
+ Found[Pos] = true;
+ break;
+ }
+ }
+
+ // Special case logic goes here. At this point Arch, Vendor and OS have the
+ // correct values for the computed components.
+
+ // Stick the corrected components back together to form the normalized string.
+ std::string Normalized;
+ for (unsigned i = 0, e = Components.size(); i != e; ++i) {
+ if (i) Normalized += '-';
+ Normalized += Components[i];
+ }
+ return Normalized;
+}
+
StringRef Triple::getArchName() const {
return StringRef(Data).split('-').first; // Isolate first component
}
diff --git a/libclamav/c++/llvm/lib/Support/Twine.cpp b/libclamav/c++/llvm/lib/Support/Twine.cpp
index 21504e9..b3ea013 100644
--- a/libclamav/c++/llvm/lib/Support/Twine.cpp
+++ b/libclamav/c++/llvm/lib/Support/Twine.cpp
@@ -48,10 +48,10 @@ void Twine::printOneChild(raw_ostream &OS, const void *Ptr,
OS << *static_cast<const StringRef*>(Ptr);
break;
case Twine::DecUIKind:
- OS << *static_cast<const unsigned int*>(Ptr);
+ OS << (unsigned)(uintptr_t)Ptr;
break;
case Twine::DecIKind:
- OS << *static_cast<const int*>(Ptr);
+ OS << (int)(intptr_t)Ptr;
break;
case Twine::DecULKind:
OS << *static_cast<const unsigned long*>(Ptr);
@@ -95,10 +95,10 @@ void Twine::printOneChildRepr(raw_ostream &OS, const void *Ptr,
<< static_cast<const StringRef*>(Ptr) << "\"";
break;
case Twine::DecUIKind:
- OS << "decUI:\"" << *static_cast<const unsigned int*>(Ptr) << "\"";
+ OS << "decUI:\"" << (unsigned)(uintptr_t)Ptr << "\"";
break;
case Twine::DecIKind:
- OS << "decI:\"" << *static_cast<const int*>(Ptr) << "\"";
+ OS << "decI:\"" << (int)(intptr_t)Ptr << "\"";
break;
case Twine::DecULKind:
OS << "decUL:\"" << *static_cast<const unsigned long*>(Ptr) << "\"";
diff --git a/libclamav/c++/llvm/lib/Support/circular_raw_ostream.cpp b/libclamav/c++/llvm/lib/Support/circular_raw_ostream.cpp
index e52996d..ca0d30d 100644
--- a/libclamav/c++/llvm/lib/Support/circular_raw_ostream.cpp
+++ b/libclamav/c++/llvm/lib/Support/circular_raw_ostream.cpp
@@ -1,4 +1,4 @@
-//===- circulat_raw_ostream.cpp - Implement the circular_raw_ostream class -===//
+//===- circular_raw_ostream.cpp - Implement circular_raw_ostream ----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,9 +12,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Support/circular_raw_ostream.h"
-
#include <algorithm>
-
using namespace llvm;
void circular_raw_ostream::write_impl(const char *Ptr, size_t Size) {
@@ -25,7 +23,8 @@ void circular_raw_ostream::write_impl(const char *Ptr, size_t Size) {
// Write into the buffer, wrapping if necessary.
while (Size != 0) {
- unsigned Bytes = std::min(Size, BufferSize - (Cur - BufferArray));
+ unsigned Bytes =
+ std::min(unsigned(Size), unsigned(BufferSize - (Cur - BufferArray)));
memcpy(Cur, Ptr, Bytes);
Size -= Bytes;
Cur += Bytes;
@@ -37,11 +36,10 @@ void circular_raw_ostream::write_impl(const char *Ptr, size_t Size) {
}
}
-void circular_raw_ostream::flushBufferWithBanner(void) {
+void circular_raw_ostream::flushBufferWithBanner() {
if (BufferSize != 0) {
// Write out the buffer
- int num = std::strlen(Banner);
- TheStream->write(Banner, num);
+ TheStream->write(Banner, std::strlen(Banner));
flushBuffer();
}
}
diff --git a/libclamav/c++/llvm/lib/Support/raw_ostream.cpp b/libclamav/c++/llvm/lib/Support/raw_ostream.cpp
index 071c924..dba46df 100644
--- a/libclamav/c++/llvm/lib/Support/raw_ostream.cpp
+++ b/libclamav/c++/llvm/lib/Support/raw_ostream.cpp
@@ -19,8 +19,10 @@
#include "llvm/Config/config.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/System/Signals.h"
#include "llvm/ADT/STLExtras.h"
#include <cctype>
+#include <cerrno>
#include <sys/stat.h>
#include <sys/types.h>
@@ -55,13 +57,6 @@ raw_ostream::~raw_ostream() {
if (BufferMode == InternalBuffer)
delete [] OutBufStart;
-
- // If there are any pending errors, report them now. Clients wishing
- // to avoid llvm_report_error calls should check for errors with
- // has_error() and clear the error flag with clear_error() before
- // destructing raw_ostream objects which may have errors.
- if (Error)
- llvm_report_error("IO failure on output stream.");
}
// An out of line virtual method to provide a home for the class vtable.
@@ -81,9 +76,9 @@ void raw_ostream::SetBuffered() {
SetUnbuffered();
}
-void raw_ostream::SetBufferAndMode(char *BufferStart, size_t Size,
+void raw_ostream::SetBufferAndMode(char *BufferStart, size_t Size,
BufferKind Mode) {
- assert(((Mode == Unbuffered && BufferStart == 0 && Size == 0) ||
+ assert(((Mode == Unbuffered && BufferStart == 0 && Size == 0) ||
(Mode != Unbuffered && BufferStart && Size)) &&
"stream must be unbuffered or have at least one byte");
// Make sure the current buffer is free of content (we can't flush here; the
@@ -104,11 +99,11 @@ raw_ostream &raw_ostream::operator<<(unsigned long N) {
// Zero is a special case.
if (N == 0)
return *this << '0';
-
+
char NumberBuffer[20];
char *EndPtr = NumberBuffer+sizeof(NumberBuffer);
char *CurPtr = EndPtr;
-
+
while (N) {
*--CurPtr = '0' + char(N % 10);
N /= 10;
@@ -121,7 +116,7 @@ raw_ostream &raw_ostream::operator<<(long N) {
*this << '-';
N = -N;
}
-
+
return this->operator<<(static_cast<unsigned long>(N));
}
@@ -133,7 +128,7 @@ raw_ostream &raw_ostream::operator<<(unsigned long long N) {
char NumberBuffer[20];
char *EndPtr = NumberBuffer+sizeof(NumberBuffer);
char *CurPtr = EndPtr;
-
+
while (N) {
*--CurPtr = '0' + char(N % 10);
N /= 10;
@@ -142,11 +137,12 @@ raw_ostream &raw_ostream::operator<<(unsigned long long N) {
}
raw_ostream &raw_ostream::operator<<(long long N) {
- if (N < 0) {
+ if (N < 0) {
*this << '-';
- N = -N;
+ // Avoid undefined behavior on INT64_MIN with a cast.
+ N = -(unsigned long long)N;
}
-
+
return this->operator<<(static_cast<unsigned long long>(N));
}
@@ -297,33 +293,33 @@ raw_ostream &raw_ostream::operator<<(const format_object_base &Fmt) {
size_t BufferBytesLeft = OutBufEnd - OutBufCur;
if (BufferBytesLeft > 3) {
size_t BytesUsed = Fmt.print(OutBufCur, BufferBytesLeft);
-
+
// Common case is that we have plenty of space.
if (BytesUsed <= BufferBytesLeft) {
OutBufCur += BytesUsed;
return *this;
}
-
+
// Otherwise, we overflowed and the return value tells us the size to try
// again with.
NextBufferSize = BytesUsed;
}
-
+
// If we got here, we didn't have enough space in the output buffer for the
// string. Try printing into a SmallVector that is resized to have enough
// space. Iterate until we win.
SmallVector<char, 128> V;
-
+
while (1) {
V.resize(NextBufferSize);
-
+
// Try formatting into the SmallVector.
size_t BytesUsed = Fmt.print(V.data(), NextBufferSize);
-
+
// If BytesUsed fit into the vector, we win.
if (BytesUsed <= NextBufferSize)
return write(V.data(), BytesUsed);
-
+
// Otherwise, try again with a new size.
assert(BytesUsed > NextBufferSize && "Didn't grow buffer!?");
NextBufferSize = BytesUsed;
@@ -339,7 +335,7 @@ raw_ostream &raw_ostream::indent(unsigned NumSpaces) {
// Usually the indentation is small, handle it with a fastpath.
if (NumSpaces < array_lengthof(Spaces))
return write(Spaces, NumSpaces);
-
+
while (NumSpaces) {
unsigned NumToWrite = std::min(NumSpaces,
(unsigned)array_lengthof(Spaces)-1);
@@ -367,69 +363,118 @@ void format_object_base::home() {
/// stream should be immediately destroyed; the string will be empty
/// if no error occurred.
raw_fd_ostream::raw_fd_ostream(const char *Filename, std::string &ErrorInfo,
- unsigned Flags) : pos(0) {
+ unsigned Flags) : Error(false), pos(0) {
assert(Filename != 0 && "Filename is null");
// Verify that we don't have both "append" and "excl".
assert((!(Flags & F_Excl) || !(Flags & F_Append)) &&
"Cannot specify both 'excl' and 'append' file creation flags!");
-
+
ErrorInfo.clear();
- // Handle "-" as stdout.
+ // Handle "-" as stdout. Note that when we do this, we consider ourself
+ // the owner of stdout. This means that we can do things like close the
+ // file descriptor when we're done and set the "binary" flag globally.
if (Filename[0] == '-' && Filename[1] == 0) {
FD = STDOUT_FILENO;
// If user requested binary then put stdout into binary mode if
// possible.
if (Flags & F_Binary)
sys::Program::ChangeStdoutToBinary();
- ShouldClose = false;
+ // Close stdout when we're done, to detect any output errors.
+ ShouldClose = true;
return;
}
-
+
int OpenFlags = O_WRONLY|O_CREAT;
#ifdef O_BINARY
if (Flags & F_Binary)
OpenFlags |= O_BINARY;
#endif
-
+
if (Flags & F_Append)
OpenFlags |= O_APPEND;
else
OpenFlags |= O_TRUNC;
if (Flags & F_Excl)
OpenFlags |= O_EXCL;
-
- FD = open(Filename, OpenFlags, 0664);
- if (FD < 0) {
- ErrorInfo = "Error opening output file '" + std::string(Filename) + "'";
- ShouldClose = false;
- } else {
- ShouldClose = true;
+
+ while ((FD = open(Filename, OpenFlags, 0664)) < 0) {
+ if (errno != EINTR) {
+ ErrorInfo = "Error opening output file '" + std::string(Filename) + "'";
+ ShouldClose = false;
+ return;
+ }
}
+
+ // Ok, we successfully opened the file, so it'll need to be closed.
+ ShouldClose = true;
}
raw_fd_ostream::~raw_fd_ostream() {
- if (FD < 0) return;
- flush();
- if (ShouldClose)
- if (::close(FD) != 0)
- error_detected();
+ if (FD >= 0) {
+ flush();
+ if (ShouldClose)
+ while (::close(FD) != 0)
+ if (errno != EINTR) {
+ error_detected();
+ break;
+ }
+ }
+
+ // If there are any pending errors, report them now. Clients wishing
+ // to avoid report_fatal_error calls should check for errors with
+ // has_error() and clear the error flag with clear_error() before
+ // destructing raw_ostream objects which may have errors.
+ if (has_error())
+ report_fatal_error("IO failure on output stream.");
}
void raw_fd_ostream::write_impl(const char *Ptr, size_t Size) {
- assert (FD >= 0 && "File already closed.");
+ assert(FD >= 0 && "File already closed.");
pos += Size;
- if (::write(FD, Ptr, Size) != (ssize_t) Size)
- error_detected();
+
+ do {
+ ssize_t ret = ::write(FD, Ptr, Size);
+
+ if (ret < 0) {
+ // If it's a recoverable error, swallow it and retry the write.
+ //
+ // Ideally we wouldn't ever see EAGAIN or EWOULDBLOCK here, since
+ // raw_ostream isn't designed to do non-blocking I/O. However, some
+ // programs, such as old versions of bjam, have mistakenly used
+ // O_NONBLOCK. For compatibility, emulate blocking semantics by
+ // spinning until the write succeeds. If you don't want spinning,
+ // don't use O_NONBLOCK file descriptors with raw_ostream.
+ if (errno == EINTR || errno == EAGAIN
+#ifdef EWOULDBLOCK
+ || errno == EWOULDBLOCK
+#endif
+ )
+ continue;
+
+ // Otherwise it's a non-recoverable error. Note it and quit.
+ error_detected();
+ break;
+ }
+
+ // The write may have written some or all of the data. Update the
+ // size and buffer pointer to reflect the remainder that needs
+ // to be written. If there are no bytes left, we're done.
+ Ptr += ret;
+ Size -= ret;
+ } while (Size > 0);
}
void raw_fd_ostream::close() {
- assert (ShouldClose);
+ assert(ShouldClose);
ShouldClose = false;
flush();
- if (::close(FD) != 0)
- error_detected();
+ while (::close(FD) != 0)
+ if (errno != EINTR) {
+ error_detected();
+ break;
+ }
FD = -1;
}
@@ -438,16 +483,17 @@ uint64_t raw_fd_ostream::seek(uint64_t off) {
pos = ::lseek(FD, off, SEEK_SET);
if (pos != off)
error_detected();
- return pos;
+ return pos;
}
size_t raw_fd_ostream::preferred_buffer_size() const {
-#if !defined(_MSC_VER) && !defined(__MINGW32__) // Windows has no st_blksize.
+#if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__minix)
+ // Windows and Minix have no st_blksize.
assert(FD >= 0 && "File not yet open!");
struct stat statbuf;
if (fstat(FD, &statbuf) != 0)
return 0;
-
+
// If this is a terminal, don't use buffering. Line buffering
// would be a more traditional thing to do, but it's not worth
// the complexity.
@@ -455,8 +501,9 @@ size_t raw_fd_ostream::preferred_buffer_size() const {
return 0;
// Return the preferred block size.
return statbuf.st_blksize;
-#endif
+#else
return raw_ostream::preferred_buffer_size();
+#endif
}
raw_ostream &raw_fd_ostream::changeColor(enum Colors colors, bool bold,
@@ -493,30 +540,24 @@ bool raw_fd_ostream::is_displayed() const {
}
//===----------------------------------------------------------------------===//
-// raw_stdout/err_ostream
+// outs(), errs(), nulls()
//===----------------------------------------------------------------------===//
-// Set buffer settings to model stdout and stderr behavior.
-// Set standard error to be unbuffered by default.
-raw_stdout_ostream::raw_stdout_ostream():raw_fd_ostream(STDOUT_FILENO, false) {}
-raw_stderr_ostream::raw_stderr_ostream():raw_fd_ostream(STDERR_FILENO, false,
- true) {}
-
-// An out of line virtual method to provide a home for the class vtable.
-void raw_stdout_ostream::handle() {}
-void raw_stderr_ostream::handle() {}
-
/// outs() - This returns a reference to a raw_ostream for standard output.
/// Use it like: outs() << "foo" << "bar";
raw_ostream &llvm::outs() {
- static raw_stdout_ostream S;
+ // Set buffer settings to model stdout behavior.
+ // Delete the file descriptor when the program exists, forcing error
+ // detection. If you don't want this behavior, don't use outs().
+ static raw_fd_ostream S(STDOUT_FILENO, true);
return S;
}
/// errs() - This returns a reference to a raw_ostream for standard error.
/// Use it like: errs() << "foo" << "bar";
raw_ostream &llvm::errs() {
- static raw_stderr_ostream S;
+ // Set standard error to be unbuffered by default.
+ static raw_fd_ostream S(STDERR_FILENO, false, true);
return S;
}
@@ -624,3 +665,34 @@ void raw_null_ostream::write_impl(const char *Ptr, size_t Size) {
uint64_t raw_null_ostream::current_pos() const {
return 0;
}
+
+//===----------------------------------------------------------------------===//
+// tool_output_file
+//===----------------------------------------------------------------------===//
+
+tool_output_file::CleanupInstaller::CleanupInstaller(const char *filename)
+ : Filename(filename), Keep(false) {
+ // Arrange for the file to be deleted if the process is killed.
+ if (Filename != "-")
+ sys::RemoveFileOnSignal(sys::Path(Filename));
+}
+
+tool_output_file::CleanupInstaller::~CleanupInstaller() {
+ // Delete the file if the client hasn't told us not to.
+ if (!Keep && Filename != "-")
+ sys::Path(Filename).eraseFromDisk();
+
+ // Ok, the file is successfully written and closed, or deleted. There's no
+ // further need to clean it up on signals.
+ if (Filename != "-")
+ sys::DontRemoveFileOnSignal(sys::Path(Filename));
+}
+
+tool_output_file::tool_output_file(const char *filename, std::string &ErrorInfo,
+ unsigned Flags)
+ : Installer(filename),
+ OS(filename, ErrorInfo, Flags) {
+ // If open fails, no cleanup is needed.
+ if (!ErrorInfo.empty())
+ Installer.Keep = true;
+}
diff --git a/libclamav/c++/llvm/lib/Support/regengine.inc b/libclamav/c++/llvm/lib/Support/regengine.inc
index bf55543..7e41f96 100644
--- a/libclamav/c++/llvm/lib/Support/regengine.inc
+++ b/libclamav/c++/llvm/lib/Support/regengine.inc
@@ -185,7 +185,7 @@ matcher(struct re_guts *g, const char *string, size_t nmatch,
endp = fast(m, start, stop, gf, gl);
if (endp == NULL) { /* a miss */
free(m->pmatch);
- free(m->lastpos);
+ free((void*)m->lastpos);
STATETEARDOWN(m);
return(REG_NOMATCH);
}
diff --git a/libclamav/c++/llvm/lib/System/CMakeLists.txt b/libclamav/c++/llvm/lib/System/CMakeLists.txt
index a56a1f7..b43c3af 100644
--- a/libclamav/c++/llvm/lib/System/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/System/CMakeLists.txt
@@ -12,10 +12,12 @@ add_llvm_library(LLVMSystem
Process.cpp
Program.cpp
RWMutex.cpp
+ SearchForAddressOfSpecialSymbol.cpp
Signals.cpp
ThreadLocal.cpp
Threading.cpp
TimeValue.cpp
+ Valgrind.cpp
Unix/Alarm.inc
Unix/Host.inc
Unix/Memory.inc
diff --git a/libclamav/c++/llvm/lib/System/Disassembler.cpp b/libclamav/c++/llvm/lib/System/Disassembler.cpp
index bad427a..139e3be 100644
--- a/libclamav/c++/llvm/lib/System/Disassembler.cpp
+++ b/libclamav/c++/llvm/lib/System/Disassembler.cpp
@@ -44,33 +44,29 @@ std::string llvm::sys::disassembleBuffer(uint8_t* start, size_t length,
uint64_t pc) {
std::stringstream res;
-#if defined (__i386__) || defined (__amd64__) || defined (__x86_64__)
+#if (defined (__i386__) || defined (__amd64__) || defined (__x86_64__)) \
+ && USE_UDIS86
unsigned bits;
# if defined(__i386__)
bits = 32;
# else
bits = 64;
# endif
-
-# if USE_UDIS86
+
ud_t ud_obj;
-
+
ud_init(&ud_obj);
ud_set_input_buffer(&ud_obj, start, length);
ud_set_mode(&ud_obj, bits);
ud_set_pc(&ud_obj, pc);
ud_set_syntax(&ud_obj, UD_SYN_ATT);
-
+
res << std::setbase(16)
<< std::setw(bits/4);
-
+
while (ud_disassemble(&ud_obj)) {
res << ud_insn_off(&ud_obj) << ":\t" << ud_insn_asm(&ud_obj) << "\n";
}
-# else
- res << "No disassembler available. See configure help for options.\n";
-# endif
-
#else
res << "No disassembler available. See configure help for options.\n";
#endif
diff --git a/libclamav/c++/llvm/lib/System/DynamicLibrary.cpp b/libclamav/c++/llvm/lib/System/DynamicLibrary.cpp
index ac4daae..6442b66 100644
--- a/libclamav/c++/llvm/lib/System/DynamicLibrary.cpp
+++ b/libclamav/c++/llvm/lib/System/DynamicLibrary.cpp
@@ -24,12 +24,18 @@
// Collection of symbol name/value pairs to be searched prior to any libraries.
static std::map<std::string, void*> *ExplicitSymbols = 0;
-static struct ExplicitSymbolsDeleter {
+namespace {
+
+struct ExplicitSymbolsDeleter {
~ExplicitSymbolsDeleter() {
if (ExplicitSymbols)
delete ExplicitSymbols;
}
-} Dummy;
+};
+
+}
+
+static ExplicitSymbolsDeleter Dummy;
void llvm::sys::DynamicLibrary::AddSymbol(const char* symbolName,
void *symbolValue) {
@@ -73,50 +79,29 @@ bool DynamicLibrary::LoadLibraryPermanently(const char *Filename,
if (ErrMsg) *ErrMsg = dlerror();
return true;
}
+#ifdef __CYGWIN__
+ // Cygwin searches symbols only in the main
+ // with the handle of dlopen(NULL, RTLD_GLOBAL).
+ if (Filename == NULL)
+ H = RTLD_DEFAULT;
+#endif
if (OpenedHandles == 0)
OpenedHandles = new std::vector<void *>();
OpenedHandles->push_back(H);
return false;
}
-static void *SearchForAddressOfSpecialSymbol(const char* symbolName) {
-#define EXPLICIT_SYMBOL(SYM) \
- extern void *SYM; if (!strcmp(symbolName, #SYM)) return &SYM
-
- // If this is darwin, it has some funky issues, try to solve them here. Some
- // important symbols are marked 'private external' which doesn't allow
- // SearchForAddressOfSymbol to find them. As such, we special case them here,
- // there is only a small handful of them.
-
-#ifdef __APPLE__
- {
- EXPLICIT_SYMBOL(__ashldi3);
- EXPLICIT_SYMBOL(__ashrdi3);
- EXPLICIT_SYMBOL(__cmpdi2);
- EXPLICIT_SYMBOL(__divdi3);
- EXPLICIT_SYMBOL(__eprintf);
- EXPLICIT_SYMBOL(__fixdfdi);
- EXPLICIT_SYMBOL(__fixsfdi);
- EXPLICIT_SYMBOL(__fixunsdfdi);
- EXPLICIT_SYMBOL(__fixunssfdi);
- EXPLICIT_SYMBOL(__floatdidf);
- EXPLICIT_SYMBOL(__floatdisf);
- EXPLICIT_SYMBOL(__lshrdi3);
- EXPLICIT_SYMBOL(__moddi3);
- EXPLICIT_SYMBOL(__udivdi3);
- EXPLICIT_SYMBOL(__umoddi3);
- }
-#endif
+using namespace llvm;
+using namespace llvm::sys;
-#ifdef __CYGWIN__
- {
- EXPLICIT_SYMBOL(_alloca);
- EXPLICIT_SYMBOL(__main);
- }
-#endif
+bool DynamicLibrary::LoadLibraryPermanently(const char *Filename,
+ std::string *ErrMsg) {
+ if (ErrMsg) *ErrMsg = "dlopen() not supported on this platform";
+ return true;
+}
-#undef EXPLICIT_SYMBOL
- return 0;
+namespace llvm {
+void *SearchForAddressOfSpecialSymbol(const char* symbolName);
}
void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) {
@@ -130,6 +115,7 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) {
return I->second;
}
+#if HAVE_DLFCN_H
// Now search the libraries.
if (OpenedHandles) {
for (std::vector<void *>::iterator I = OpenedHandles->begin(),
@@ -141,8 +127,9 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) {
}
}
}
+#endif
- if (void *Result = SearchForAddressOfSpecialSymbol(symbolName))
+ if (void *Result = llvm::SearchForAddressOfSpecialSymbol(symbolName))
return Result;
// This macro returns the address of a well-known, explicit symbol
diff --git a/libclamav/c++/llvm/lib/System/Memory.cpp b/libclamav/c++/llvm/lib/System/Memory.cpp
index e2d838d..ef23b8d 100644
--- a/libclamav/c++/llvm/lib/System/Memory.cpp
+++ b/libclamav/c++/llvm/lib/System/Memory.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/System/Memory.h"
+#include "llvm/System/Valgrind.h"
#include "llvm/Config/config.h"
namespace llvm {
@@ -68,4 +69,6 @@ void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr,
# endif
#endif // end apple
+
+ ValgrindDiscardTranslations(Addr, Len);
}
diff --git a/libclamav/c++/llvm/lib/System/Path.cpp b/libclamav/c++/llvm/lib/System/Path.cpp
index 6844530..4445c66 100644
--- a/libclamav/c++/llvm/lib/System/Path.cpp
+++ b/libclamav/c++/llvm/lib/System/Path.cpp
@@ -61,7 +61,7 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
if (memcmp(magic,"!<arch>\n",8) == 0)
return Archive_FileType;
break;
-
+
case '\177':
if (magic[1] == 'E' && magic[2] == 'L' && magic[3] == 'F') {
if (length >= 18 && magic[17] == 0)
@@ -76,11 +76,11 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
break;
case 0xCA:
- if (magic[1] == char(0xFE) && magic[2] == char(0xBA) &&
+ if (magic[1] == char(0xFE) && magic[2] == char(0xBA) &&
magic[3] == char(0xBE)) {
- // This is complicated by an overlap with Java class files.
+ // This is complicated by an overlap with Java class files.
// See the Mach-O section in /usr/share/file/magic for details.
- if (length >= 8 && magic[7] < 43)
+ if (length >= 8 && magic[7] < 43)
// FIXME: Universal Binary of any type.
return Mach_O_DynamicallyLinkedSharedLib_FileType;
}
@@ -89,18 +89,18 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
case 0xFE:
case 0xCE: {
uint16_t type = 0;
- if (magic[0] == char(0xFE) && magic[1] == char(0xED) &&
+ if (magic[0] == char(0xFE) && magic[1] == char(0xED) &&
magic[2] == char(0xFA) && magic[3] == char(0xCE)) {
/* Native endian */
if (length >= 16) type = magic[14] << 8 | magic[15];
- } else if (magic[0] == char(0xCE) && magic[1] == char(0xFA) &&
+ } else if (magic[0] == char(0xCE) && magic[1] == char(0xFA) &&
magic[2] == char(0xED) && magic[3] == char(0xFE)) {
/* Reverse endian */
if (length >= 14) type = magic[13] << 8 | magic[12];
}
switch (type) {
- default: break;
- case 1: return Mach_O_Object_FileType;
+ default: break;
+ case 1: return Mach_O_Object_FileType;
case 2: return Mach_O_Executable_FileType;
case 3: return Mach_O_FixedVirtualMemorySharedLib_FileType;
case 4: return Mach_O_Core_FileType;
@@ -136,26 +136,23 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
bool
Path::isArchive() const {
- if (canRead())
- return hasMagicNumber("!<arch>\012");
- return false;
+ return hasMagicNumber("!<arch>\012");
}
bool
Path::isDynamicLibrary() const {
- if (canRead()) {
- std::string Magic;
- if (getMagicNumber(Magic, 64))
- switch (IdentifyFileType(Magic.c_str(),
- static_cast<unsigned>(Magic.length()))) {
- default: return false;
- case Mach_O_FixedVirtualMemorySharedLib_FileType:
- case Mach_O_DynamicallyLinkedSharedLib_FileType:
- case Mach_O_DynamicallyLinkedSharedLibStub_FileType:
- case ELF_SharedObject_FileType:
- case COFF_FileType: return true;
- }
- }
+ std::string Magic;
+ if (getMagicNumber(Magic, 64))
+ switch (IdentifyFileType(Magic.c_str(),
+ static_cast<unsigned>(Magic.length()))) {
+ default: return false;
+ case Mach_O_FixedVirtualMemorySharedLib_FileType:
+ case Mach_O_DynamicallyLinkedSharedLib_FileType:
+ case Mach_O_DynamicallyLinkedSharedLibStub_FileType:
+ case ELF_SharedObject_FileType:
+ case COFF_FileType: return true;
+ }
+
return false;
}
@@ -222,38 +219,38 @@ static StringRef getDirnameCharSep(StringRef path, const char *Sep) {
"Sep must be a 1-character string literal.");
if (path.empty())
return ".";
-
+
// If the path is all slashes, return a single slash.
// Otherwise, remove all trailing slashes.
-
+
signed pos = static_cast<signed>(path.size()) - 1;
-
+
while (pos >= 0 && path[pos] == Sep[0])
--pos;
-
+
if (pos < 0)
return path[0] == Sep[0] ? Sep : ".";
-
+
// Any slashes left?
signed i = 0;
-
+
while (i < pos && path[i] != Sep[0])
++i;
-
+
if (i == pos) // No slashes? Return "."
return ".";
-
- // There is at least one slash left. Remove all trailing non-slashes.
+
+ // There is at least one slash left. Remove all trailing non-slashes.
while (pos >= 0 && path[pos] != Sep[0])
--pos;
-
+
// Remove any trailing slashes.
while (pos >= 0 && path[pos] == Sep[0])
--pos;
-
+
if (pos < 0)
return path[0] == Sep[0] ? Sep : ".";
-
+
return path.substr(0, pos+1);
}
diff --git a/libclamav/c++/llvm/lib/System/Program.cpp b/libclamav/c++/llvm/lib/System/Program.cpp
index a3049d4..cd58c2c 100644
--- a/libclamav/c++/llvm/lib/System/Program.cpp
+++ b/libclamav/c++/llvm/lib/System/Program.cpp
@@ -13,8 +13,7 @@
#include "llvm/System/Program.h"
#include "llvm/Config/config.h"
-
-namespace llvm {
+using namespace llvm;
using namespace sys;
//===----------------------------------------------------------------------===//
@@ -48,9 +47,6 @@ Program::ExecuteNoWait(const Path& path,
prg.Execute(path, args, envp, redirects, memoryLimit, ErrMsg);
}
-
-}
-
// Include the platform-specific parts of this class.
#ifdef LLVM_ON_UNIX
#include "Unix/Program.inc"
diff --git a/libclamav/c++/llvm/lib/System/RWMutex.cpp b/libclamav/c++/llvm/lib/System/RWMutex.cpp
index 5faf220..deb0470 100644
--- a/libclamav/c++/llvm/lib/System/RWMutex.cpp
+++ b/libclamav/c++/llvm/lib/System/RWMutex.cpp
@@ -71,23 +71,9 @@ RWMutexImpl::RWMutexImpl()
bzero(rwlock, sizeof(pthread_rwlock_t));
#endif
- pthread_rwlockattr_t attr;
-
- // Initialize the rwlock attributes
- int errorcode = pthread_rwlockattr_init(&attr);
- assert(errorcode == 0);
-
-#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__DragonFly__)
- // Make it a process local rwlock
- errorcode = pthread_rwlockattr_setpshared(&attr, PTHREAD_PROCESS_PRIVATE);
-#endif
-
// Initialize the rwlock
- errorcode = pthread_rwlock_init(rwlock, &attr);
- assert(errorcode == 0);
-
- // Destroy the attributes
- errorcode = pthread_rwlockattr_destroy(&attr);
+ int errorcode = pthread_rwlock_init(rwlock, NULL);
+ (void)errorcode;
assert(errorcode == 0);
// Assign the data member
diff --git a/libclamav/c++/llvm/lib/System/SearchForAddressOfSpecialSymbol.cpp b/libclamav/c++/llvm/lib/System/SearchForAddressOfSpecialSymbol.cpp
new file mode 100644
index 0000000..73b484c
--- /dev/null
+++ b/libclamav/c++/llvm/lib/System/SearchForAddressOfSpecialSymbol.cpp
@@ -0,0 +1,64 @@
+//===- SearchForAddressOfSpecialSymbol.cpp - Function addresses -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file pulls the addresses of certain symbols out of the linker. It must
+// include as few header files as possible because it declares the symbols as
+// void*, which would conflict with the actual symbol type if any header
+// declared it.
+//
+//===----------------------------------------------------------------------===//
+
+#include <string.h>
+
+// Must declare the symbols in the global namespace.
+static void *DoSearch(const char* symbolName) {
+#define EXPLICIT_SYMBOL(SYM) \
+ extern void *SYM; if (!strcmp(symbolName, #SYM)) return &SYM
+
+ // If this is darwin, it has some funky issues, try to solve them here. Some
+ // important symbols are marked 'private external' which doesn't allow
+ // SearchForAddressOfSymbol to find them. As such, we special case them here,
+ // there is only a small handful of them.
+
+#ifdef __APPLE__
+ {
+ EXPLICIT_SYMBOL(__ashldi3);
+ EXPLICIT_SYMBOL(__ashrdi3);
+ EXPLICIT_SYMBOL(__cmpdi2);
+ EXPLICIT_SYMBOL(__divdi3);
+ EXPLICIT_SYMBOL(__eprintf);
+ EXPLICIT_SYMBOL(__fixdfdi);
+ EXPLICIT_SYMBOL(__fixsfdi);
+ EXPLICIT_SYMBOL(__fixunsdfdi);
+ EXPLICIT_SYMBOL(__fixunssfdi);
+ EXPLICIT_SYMBOL(__floatdidf);
+ EXPLICIT_SYMBOL(__floatdisf);
+ EXPLICIT_SYMBOL(__lshrdi3);
+ EXPLICIT_SYMBOL(__moddi3);
+ EXPLICIT_SYMBOL(__udivdi3);
+ EXPLICIT_SYMBOL(__umoddi3);
+ }
+#endif
+
+#ifdef __CYGWIN__
+ {
+ EXPLICIT_SYMBOL(_alloca);
+ EXPLICIT_SYMBOL(__main);
+ }
+#endif
+
+#undef EXPLICIT_SYMBOL
+ return 0;
+}
+
+namespace llvm {
+void *SearchForAddressOfSpecialSymbol(const char* symbolName) {
+ return DoSearch(symbolName);
+}
+} // namespace llvm
diff --git a/libclamav/c++/llvm/lib/System/ThreadLocal.cpp b/libclamav/c++/llvm/lib/System/ThreadLocal.cpp
index e7054b5..f6a55a1 100644
--- a/libclamav/c++/llvm/lib/System/ThreadLocal.cpp
+++ b/libclamav/c++/llvm/lib/System/ThreadLocal.cpp
@@ -27,6 +27,7 @@ ThreadLocalImpl::ThreadLocalImpl() { }
ThreadLocalImpl::~ThreadLocalImpl() { }
void ThreadLocalImpl::setInstance(const void* d) { data = const_cast<void*>(d);}
const void* ThreadLocalImpl::getInstance() { return data; }
+void ThreadLocalImpl::removeInstance() { data = 0; }
}
#else
@@ -67,6 +68,10 @@ const void* ThreadLocalImpl::getInstance() {
return pthread_getspecific(*key);
}
+void ThreadLocalImpl::removeInstance() {
+ setInstance(0);
+}
+
}
#elif defined(LLVM_ON_UNIX)
diff --git a/libclamav/c++/llvm/lib/System/Unix/Mutex.inc b/libclamav/c++/llvm/lib/System/Unix/Mutex.inc
index 10e7ecb..4a5e28d 100644
--- a/libclamav/c++/llvm/lib/System/Unix/Mutex.inc
+++ b/libclamav/c++/llvm/lib/System/Unix/Mutex.inc
@@ -29,12 +29,6 @@ MutexImpl::~MutexImpl()
}
bool
-MutexImpl::MutexImpl()
-{
- return true;
-}
-
-bool
MutexImpl::release()
{
return true;
diff --git a/libclamav/c++/llvm/lib/System/Unix/Path.inc b/libclamav/c++/llvm/lib/System/Unix/Path.inc
index a99720c..47e4d1a 100644
--- a/libclamav/c++/llvm/lib/System/Unix/Path.inc
+++ b/libclamav/c++/llvm/lib/System/Unix/Path.inc
@@ -276,20 +276,20 @@ Path::GetCurrentDirectory() {
char pathname[MAXPATHLEN];
if (!getcwd(pathname,MAXPATHLEN)) {
assert (false && "Could not query current working directory.");
- return Path("");
+ return Path();
}
return Path(pathname);
}
-#ifdef __FreeBSD__
+#if defined(__FreeBSD__) || defined (__NetBSD__) || defined(__minix)
static int
test_dir(char buf[PATH_MAX], char ret[PATH_MAX],
const char *dir, const char *bin)
{
struct stat sb;
- snprintf(buf, PATH_MAX, "%s//%s", dir, bin);
+ snprintf(buf, PATH_MAX, "%s/%s", dir, bin);
if (realpath(buf, ret) == NULL)
return (1);
if (stat(buf, &sb) != 0)
@@ -334,7 +334,7 @@ getprogpath(char ret[PATH_MAX], const char *bin)
free(pv);
return (NULL);
}
-#endif // __FreeBSD__
+#endif // __FreeBSD__ || __NetBSD__
/// GetMainExecutable - Return the path to the main executable, given the
/// value of argv[0] from program startup.
@@ -350,7 +350,7 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) {
if (realpath(exe_path, link_path))
return Path(std::string(link_path));
}
-#elif defined(__FreeBSD__)
+#elif defined(__FreeBSD__) || defined (__NetBSD__) || defined(__minix)
char exe_path[PATH_MAX];
if (getprogpath(exe_path, argv0) != NULL)
@@ -408,7 +408,7 @@ Path::getSuffix() const {
std::string::size_type dot = path.rfind('.');
if (dot == std::string::npos || dot < slash)
- return StringRef("");
+ return StringRef();
else
return StringRef(path).substr(dot + 1);
}
@@ -421,10 +421,8 @@ bool Path::getMagicNumber(std::string &Magic, unsigned len) const {
return false;
ssize_t bytes_read = ::read(fd, Buf, len);
::close(fd);
- if (ssize_t(len) != bytes_read) {
- Magic.clear();
+ if (ssize_t(len) != bytes_read)
return false;
- }
Magic.assign(Buf, len);
return true;
}
@@ -454,7 +452,7 @@ Path::canWrite() const {
bool
Path::isRegularFile() const {
- // Get the status so we can determine if its a file or directory
+ // Get the status so we can determine if it's a file or directory
struct stat buf;
if (0 != stat(path.c_str(), &buf))
@@ -736,7 +734,7 @@ Path::createTemporaryFileOnDisk(bool reuse_current, std::string* ErrMsg) {
bool
Path::eraseFromDisk(bool remove_contents, std::string *ErrStr) const {
- // Get the status so we can determine if its a file or directory
+ // Get the status so we can determine if it's a file or directory.
struct stat buf;
if (0 != stat(path.c_str(), &buf)) {
MakeErrMsg(ErrStr, path + ": can't get status of file");
@@ -858,15 +856,20 @@ Path::makeUnique(bool reuse_current, std::string* ErrMsg) {
// Append an XXXXXX pattern to the end of the file for use with mkstemp,
// mktemp or our own implementation.
- std::string Buf(path);
+ // This uses std::vector instead of SmallVector to avoid a dependence on
+ // libSupport. And performance isn't critical here.
+ std::vector<char> Buf;
+ Buf.resize(path.size()+8);
+ char *FNBuffer = &Buf[0];
+ path.copy(FNBuffer,path.size());
if (isDirectory())
- Buf += "/XXXXXX";
+ strcpy(FNBuffer+path.size(), "/XXXXXX");
else
- Buf += "-XXXXXX";
+ strcpy(FNBuffer+path.size(), "-XXXXXX");
#if defined(HAVE_MKSTEMP)
int TempFD;
- if ((TempFD = mkstemp((char*)Buf.c_str())) == -1)
+ if ((TempFD = mkstemp(FNBuffer)) == -1)
return MakeErrMsg(ErrMsg, path + ": can't make unique filename");
// We don't need to hold the temp file descriptor... we will trust that no one
@@ -874,25 +877,30 @@ Path::makeUnique(bool reuse_current, std::string* ErrMsg) {
close(TempFD);
// Save the name
- path = Buf;
+ path = FNBuffer;
#elif defined(HAVE_MKTEMP)
// If we don't have mkstemp, use the old and obsolete mktemp function.
- if (mktemp(Buf.c_str()) == 0)
+ if (mktemp(FNBuffer) == 0)
return MakeErrMsg(ErrMsg, path + ": can't make unique filename");
// Save the name
- path = Buf;
+ path = FNBuffer;
#else
// Okay, looks like we have to do it all by our lonesome.
static unsigned FCounter = 0;
- unsigned offset = path.size() + 1;
- while (FCounter < 999999 && exists()) {
- sprintf(Buf.data()+offset, "%06u", ++FCounter);
- path = Buf;
- }
- if (FCounter > 999999)
- return MakeErrMsg(ErrMsg,
- path + ": can't make unique filename: too many files");
+ // Try to initialize with unique value.
+ if (FCounter == 0) FCounter = ((unsigned)getpid() & 0xFFFF) << 8;
+ char* pos = strstr(FNBuffer, "XXXXXX");
+ do {
+ if (++FCounter > 0xFFFFFF) {
+ return MakeErrMsg(ErrMsg,
+ path + ": can't make unique filename: too many files");
+ }
+ sprintf(pos, "%06X", FCounter);
+ path = FNBuffer;
+ } while (exists());
+ // POSSIBLE SECURITY BUG: An attacker can easily guess the name and exploit
+ // LLVM.
#endif
return false;
}
diff --git a/libclamav/c++/llvm/lib/System/Unix/Program.inc b/libclamav/c++/llvm/lib/System/Unix/Program.inc
index c10498a..0209f5a 100644
--- a/libclamav/c++/llvm/lib/System/Unix/Program.inc
+++ b/libclamav/c++/llvm/lib/System/Unix/Program.inc
@@ -30,6 +30,14 @@
#if HAVE_FCNTL_H
#include <fcntl.h>
#endif
+#ifdef HAVE_POSIX_SPAWN
+#include <spawn.h>
+#if !defined(__APPLE__)
+ extern char **environ;
+#else
+#include <crt_externs.h> // _NSGetEnviron
+#endif
+#endif
namespace llvm {
using namespace sys;
@@ -94,33 +102,52 @@ Program::FindProgramByName(const std::string& progName) {
}
static bool RedirectIO(const Path *Path, int FD, std::string* ErrMsg) {
- if (Path == 0)
- // Noop
+ if (Path == 0) // Noop
return false;
- std::string File;
+ const char *File;
if (Path->isEmpty())
// Redirect empty paths to /dev/null
File = "/dev/null";
else
- File = Path->str();
+ File = Path->c_str();
// Open the file
- int InFD = open(File.c_str(), FD == 0 ? O_RDONLY : O_WRONLY|O_CREAT, 0666);
+ int InFD = open(File, FD == 0 ? O_RDONLY : O_WRONLY|O_CREAT, 0666);
if (InFD == -1) {
- MakeErrMsg(ErrMsg, "Cannot open file '" + File + "' for "
+ MakeErrMsg(ErrMsg, "Cannot open file '" + std::string(File) + "' for "
+ (FD == 0 ? "input" : "output"));
return true;
}
// Install it as the requested FD
- if (-1 == dup2(InFD, FD)) {
+ if (dup2(InFD, FD) == -1) {
MakeErrMsg(ErrMsg, "Cannot dup2");
+ close(InFD);
return true;
}
close(InFD); // Close the original FD
return false;
}
+#ifdef HAVE_POSIX_SPAWN
+static bool RedirectIO_PS(const Path *Path, int FD, std::string *ErrMsg,
+ posix_spawn_file_actions_t &FileActions) {
+ if (Path == 0) // Noop
+ return false;
+ const char *File;
+ if (Path->isEmpty())
+ // Redirect empty paths to /dev/null
+ File = "/dev/null";
+ else
+ File = Path->c_str();
+
+ if (int Err = posix_spawn_file_actions_addopen(&FileActions, FD,
+ File, FD == 0 ? O_RDONLY : O_WRONLY|O_CREAT, 0666))
+ return MakeErrMsg(ErrMsg, "Cannot dup2", Err);
+ return false;
+}
+#endif
+
static void TimeOutHandler(int Sig) {
}
@@ -150,13 +177,55 @@ static void SetMemoryLimits (unsigned size)
}
bool
-Program::Execute(const Path& path,
- const char** args,
- const char** envp,
- const Path** redirects,
- unsigned memoryLimit,
- std::string* ErrMsg)
-{
+Program::Execute(const Path &path, const char **args, const char **envp,
+ const Path **redirects, unsigned memoryLimit,
+ std::string *ErrMsg) {
+ // If this OS has posix_spawn and there is no memory limit being implied, use
+ // posix_spawn. It is more efficient than fork/exec.
+#ifdef HAVE_POSIX_SPAWN
+ if (memoryLimit == 0) {
+ posix_spawn_file_actions_t FileActions;
+ posix_spawn_file_actions_init(&FileActions);
+
+ if (redirects) {
+ // Redirect stdin/stdout.
+ if (RedirectIO_PS(redirects[0], 0, ErrMsg, FileActions) ||
+ RedirectIO_PS(redirects[1], 1, ErrMsg, FileActions))
+ return false;
+ if (redirects[1] == 0 || redirects[2] == 0 ||
+ *redirects[1] != *redirects[2]) {
+ // Just redirect stderr
+ if (RedirectIO_PS(redirects[2], 2, ErrMsg, FileActions)) return false;
+ } else {
+ // If stdout and stderr should go to the same place, redirect stderr
+ // to the FD already open for stdout.
+ if (int Err = posix_spawn_file_actions_adddup2(&FileActions, 1, 2))
+ return !MakeErrMsg(ErrMsg, "Can't redirect stderr to stdout", Err);
+ }
+ }
+
+ if (!envp)
+#if !defined(__APPLE__)
+ envp = const_cast<const char **>(environ);
+#else
+ // environ is missing in dylibs.
+ envp = const_cast<const char **>(*_NSGetEnviron());
+#endif
+
+ pid_t PID;
+ int Err = posix_spawn(&PID, path.c_str(), &FileActions, /*attrp*/0,
+ const_cast<char **>(args), const_cast<char **>(envp));
+
+ posix_spawn_file_actions_destroy(&FileActions);
+
+ if (Err)
+ return !MakeErrMsg(ErrMsg, "posix_spawn failed", Err);
+
+ Data_ = reinterpret_cast<void*>(PID);
+ return true;
+ }
+#endif
+
if (!path.canExecute()) {
if (ErrMsg)
*ErrMsg = path.str() + " is not executable";
@@ -200,9 +269,12 @@ Program::Execute(const Path& path,
// Execute!
if (envp != 0)
- execve(path.c_str(), (char**)args, (char**)envp);
+ execve(path.c_str(),
+ const_cast<char **>(args),
+ const_cast<char **>(envp));
else
- execv(path.c_str(), (char**)args);
+ execv(path.c_str(),
+ const_cast<char **>(args));
// If the execve() failed, we should exit. Follow Unix protocol and
// return 127 if the executable was not found, and 126 otherwise.
// Use _exit rather than exit so that atexit functions and static
@@ -238,10 +310,9 @@ Program::Wait(unsigned secondsToWait,
// fact of having a handler at all causes the wait below to return with EINTR,
// unlike if we used SIG_IGN.
if (secondsToWait) {
- Act.sa_sigaction = 0;
+ memset(&Act, 0, sizeof(Act));
Act.sa_handler = TimeOutHandler;
sigemptyset(&Act.sa_mask);
- Act.sa_flags = 0;
sigaction(SIGALRM, &Act, &Old);
alarm(secondsToWait);
}
diff --git a/libclamav/c++/llvm/lib/System/Unix/Signals.inc b/libclamav/c++/llvm/lib/System/Unix/Signals.inc
index e5ec4df..20725b6 100644
--- a/libclamav/c++/llvm/lib/System/Unix/Signals.inc
+++ b/libclamav/c++/llvm/lib/System/Unix/Signals.inc
@@ -39,8 +39,8 @@ static SmartMutex<true> SignalsMutex;
/// InterruptFunction - The function to call if ctrl-c is pressed.
static void (*InterruptFunction)() = 0;
-static std::vector<sys::Path> *FilesToRemove = 0;
-static std::vector<std::pair<void(*)(void*), void*> > *CallBacksToRun = 0;
+static std::vector<sys::Path> FilesToRemove;
+static std::vector<std::pair<void(*)(void*), void*> > CallBacksToRun;
// IntSigs - Signals that may interrupt the program at any time.
static const int IntSigs[] = {
@@ -111,6 +111,14 @@ static void UnregisterHandlers() {
}
+/// RemoveFilesToRemove - Process the FilesToRemove list. This function
+/// should be called with the SignalsMutex lock held.
+static void RemoveFilesToRemove() {
+ while (!FilesToRemove.empty()) {
+ FilesToRemove.back().eraseFromDisk(true);
+ FilesToRemove.pop_back();
+ }
+}
// SignalHandler - The signal handler that runs.
static RETSIGTYPE SignalHandler(int Sig) {
@@ -126,11 +134,7 @@ static RETSIGTYPE SignalHandler(int Sig) {
sigprocmask(SIG_UNBLOCK, &SigMask, 0);
SignalsMutex.acquire();
- if (FilesToRemove != 0)
- while (!FilesToRemove->empty()) {
- FilesToRemove->back().eraseFromDisk(true);
- FilesToRemove->pop_back();
- }
+ RemoveFilesToRemove();
if (std::find(IntSigs, IntSigsEnd, Sig) != IntSigsEnd) {
if (InterruptFunction) {
@@ -149,12 +153,15 @@ static RETSIGTYPE SignalHandler(int Sig) {
SignalsMutex.release();
// Otherwise if it is a fault (like SEGV) run any handler.
- if (CallBacksToRun)
- for (unsigned i = 0, e = CallBacksToRun->size(); i != e; ++i)
- (*CallBacksToRun)[i].first((*CallBacksToRun)[i].second);
+ for (unsigned i = 0, e = CallBacksToRun.size(); i != e; ++i)
+ CallBacksToRun[i].first(CallBacksToRun[i].second);
}
-
+void llvm::sys::RunInterruptHandlers() {
+ SignalsMutex.acquire();
+ RemoveFilesToRemove();
+ SignalsMutex.release();
+}
void llvm::sys::SetInterruptFunction(void (*IF)()) {
SignalsMutex.acquire();
@@ -167,10 +174,7 @@ void llvm::sys::SetInterruptFunction(void (*IF)()) {
bool llvm::sys::RemoveFileOnSignal(const sys::Path &Filename,
std::string* ErrMsg) {
SignalsMutex.acquire();
- if (FilesToRemove == 0)
- FilesToRemove = new std::vector<sys::Path>();
-
- FilesToRemove->push_back(Filename);
+ FilesToRemove.push_back(Filename);
SignalsMutex.release();
@@ -178,13 +182,21 @@ bool llvm::sys::RemoveFileOnSignal(const sys::Path &Filename,
return false;
}
+// DontRemoveFileOnSignal - The public API
+void llvm::sys::DontRemoveFileOnSignal(const sys::Path &Filename) {
+ SignalsMutex.acquire();
+ std::vector<sys::Path>::reverse_iterator I =
+ std::find(FilesToRemove.rbegin(), FilesToRemove.rend(), Filename);
+ if (I != FilesToRemove.rend())
+ FilesToRemove.erase(I.base()-1);
+ SignalsMutex.release();
+}
+
/// AddSignalHandler - Add a function to be called when a signal is delivered
/// to the process. The handler can have a cookie passed to it to identify
/// what instance of the handler it is.
void llvm::sys::AddSignalHandler(void (*FnPtr)(void *), void *Cookie) {
- if (CallBacksToRun == 0)
- CallBacksToRun = new std::vector<std::pair<void(*)(void*), void*> >();
- CallBacksToRun->push_back(std::make_pair(FnPtr, Cookie));
+ CallBacksToRun.push_back(std::make_pair(FnPtr, Cookie));
RegisterHandlers();
}
@@ -252,3 +264,37 @@ void llvm::sys::PrintStackTraceOnErrorSignal() {
AddSignalHandler(PrintStackTrace, 0);
}
+
+/***/
+
+// On Darwin, raise sends a signal to the main thread instead of the current
+// thread. This has the unfortunate effect that assert() and abort() will end up
+// bypassing our crash recovery attempts. We work around this for anything in
+// the same linkage unit by just defining our own versions of the assert handler
+// and abort.
+
+#ifdef __APPLE__
+
+void __assert_rtn(const char *func,
+ const char *file,
+ int line,
+ const char *expr) {
+ if (func)
+ fprintf(stderr, "Assertion failed: (%s), function %s, file %s, line %d.\n",
+ expr, func, file, line);
+ else
+ fprintf(stderr, "Assertion failed: (%s), file %s, line %d.\n",
+ expr, file, line);
+ abort();
+}
+
+#include <signal.h>
+#include <pthread.h>
+
+void abort() {
+ pthread_kill(pthread_self(), SIGABRT);
+ usleep(1000);
+ __builtin_trap();
+}
+
+#endif
diff --git a/libclamav/c++/llvm/lib/System/Unix/ThreadLocal.inc b/libclamav/c++/llvm/lib/System/Unix/ThreadLocal.inc
index 83d554d..6769520 100644
--- a/libclamav/c++/llvm/lib/System/Unix/ThreadLocal.inc
+++ b/libclamav/c++/llvm/lib/System/Unix/ThreadLocal.inc
@@ -22,4 +22,5 @@ ThreadLocalImpl::ThreadLocalImpl() { }
ThreadLocalImpl::~ThreadLocalImpl() { }
void ThreadLocalImpl::setInstance(const void* d) { data = const_cast<void*>(d);}
const void* ThreadLocalImpl::getInstance() { return data; }
+void ThreadLocalImpl::removeInstance() { setInstance(0); }
}
diff --git a/libclamav/c++/llvm/lib/System/Valgrind.cpp b/libclamav/c++/llvm/lib/System/Valgrind.cpp
new file mode 100644
index 0000000..c76cfe4
--- /dev/null
+++ b/libclamav/c++/llvm/lib/System/Valgrind.cpp
@@ -0,0 +1,54 @@
+//===-- Valgrind.cpp - Implement Valgrind communication ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines Valgrind communication methods, if HAVE_VALGRIND_VALGRIND_H is
+// defined. If we have valgrind.h but valgrind isn't running, its macros are
+// no-ops.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/System/Valgrind.h"
+#include "llvm/Config/config.h"
+
+#if HAVE_VALGRIND_VALGRIND_H
+#include <valgrind/valgrind.h>
+
+static bool InitNotUnderValgrind() {
+ return !RUNNING_ON_VALGRIND;
+}
+
+// This bool is negated from what we'd expect because code may run before it
+// gets initialized. If that happens, it will appear to be 0 (false), and we
+// want that to cause the rest of the code in this file to run the
+// Valgrind-provided macros.
+static const bool NotUnderValgrind = InitNotUnderValgrind();
+
+bool llvm::sys::RunningOnValgrind() {
+ if (NotUnderValgrind)
+ return false;
+ return RUNNING_ON_VALGRIND;
+}
+
+void llvm::sys::ValgrindDiscardTranslations(const void *Addr, size_t Len) {
+ if (NotUnderValgrind)
+ return;
+
+ VALGRIND_DISCARD_TRANSLATIONS(Addr, Len);
+}
+
+#else // !HAVE_VALGRIND_VALGRIND_H
+
+bool llvm::sys::RunningOnValgrind() {
+ return false;
+}
+
+void llvm::sys::ValgrindDiscardTranslations(const void *Addr, size_t Len) {
+}
+
+#endif // !HAVE_VALGRIND_VALGRIND_H
diff --git a/libclamav/c++/llvm/lib/System/Win32/Path.inc b/libclamav/c++/llvm/lib/System/Win32/Path.inc
index b5f6374..4a6dbd3 100644
--- a/libclamav/c++/llvm/lib/System/Win32/Path.inc
+++ b/libclamav/c++/llvm/lib/System/Win32/Path.inc
@@ -126,7 +126,7 @@ Path::isValid() const {
}
void Path::makeAbsolute() {
- TCHAR FullPath[MAX_PATH + 1] = {0};
+ TCHAR FullPath[MAX_PATH + 1] = {0};
LPTSTR FilePart = NULL;
DWORD RetLength = ::GetFullPathNameA(path.c_str(),
@@ -161,7 +161,7 @@ Path::isAbsolute(const char *NameStart, unsigned NameLen) {
}
}
-bool
+bool
Path::isAbsolute() const {
// FIXME: This does not handle correctly an absolute path starting from
// a drive letter or in UNC format.
@@ -174,9 +174,9 @@ Path::isAbsolute() const {
default:
return path[0] == '/' || (path[1] == ':' && path[2] == '/');
}
-}
+}
-static Path *TempDirectory = NULL;
+static Path *TempDirectory;
Path
Path::GetTemporaryDirectory(std::string* ErrMsg) {
@@ -266,7 +266,7 @@ Path
Path::GetCurrentDirectory() {
char pathname[MAX_PATH];
::GetCurrentDirectoryA(MAX_PATH,pathname);
- return Path(pathname);
+ return Path(pathname);
}
/// GetMainExecutable - Return the path to the main executable, given the
@@ -281,12 +281,6 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) {
// FIXME: the above set of functions don't map to Windows very well.
-bool
-Path::isRootDirectory() const {
- size_t len = path.size();
- return len > 0 && path[len-1] == '/';
-}
-
StringRef Path::getDirname() const {
return getDirnameCharSep(path, "/");
}
@@ -406,8 +400,10 @@ PathWithStatus::getFileStatus(bool update, std::string *ErrStr) const {
for (unsigned i = 0; i < path.length(); ++i)
status.uniqueID += path[i];
- __int64 ft = *reinterpret_cast<__int64*>(&fi.ftLastWriteTime);
- status.modTime.fromWin32Time(ft);
+ ULARGE_INTEGER ui;
+ ui.LowPart = fi.ftLastWriteTime.dwLowDateTime;
+ ui.HighPart = fi.ftLastWriteTime.dwHighDateTime;
+ status.modTime.fromWin32Time(ui.QuadPart);
status.isDir = fi.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY;
fsIsValid = true;
@@ -448,7 +444,7 @@ Path::getDirectoryContents(std::set<Path>& result, std::string* ErrMsg) const {
MakeErrMsg(ErrMsg, path + ": can't get status of file");
return true;
}
-
+
if (!(fi.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
if (ErrMsg)
*ErrMsg = path + ": not a directory";
@@ -617,7 +613,7 @@ Path::createDirectoryOnDisk(bool create_parents, std::string* ErrMsg) {
*next = 0;
if (!CreateDirectory(pathname, NULL) &&
GetLastError() != ERROR_ALREADY_EXISTS)
- return MakeErrMsg(ErrMsg,
+ return MakeErrMsg(ErrMsg,
std::string(pathname) + ": Can't create directory: ");
*next++ = '/';
}
@@ -649,7 +645,7 @@ Path::eraseFromDisk(bool remove_contents, std::string *ErrStr) const {
WIN32_FILE_ATTRIBUTE_DATA fi;
if (!GetFileAttributesEx(path.c_str(), GetFileExInfoStandard, &fi))
return true;
-
+
if (fi.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
// If it doesn't exist, we're done.
if (!exists())
@@ -706,7 +702,7 @@ Path::eraseFromDisk(bool remove_contents, std::string *ErrStr) const {
pathname[lastchar] = 0;
if (!RemoveDirectory(pathname))
- return MakeErrMsg(ErrStr,
+ return MakeErrMsg(ErrStr,
std::string(pathname) + ": Can't destroy directory: ");
return false;
} else {
@@ -726,7 +722,7 @@ Path::eraseFromDisk(bool remove_contents, std::string *ErrStr) const {
bool Path::getMagicNumber(std::string& Magic, unsigned len) const {
assert(len < 1024 && "Request for magic string too long");
- char* buf = (char*) alloca(1 + len);
+ char* buf = reinterpret_cast<char*>(alloca(len));
HANDLE h = CreateFile(path.c_str(),
GENERIC_READ,
@@ -745,15 +741,14 @@ bool Path::getMagicNumber(std::string& Magic, unsigned len) const {
if (!ret || nRead != len)
return false;
- buf[len] = '\0';
- Magic = buf;
+ Magic = std::string(buf, len);
return true;
}
bool
Path::renamePathOnDisk(const Path& newName, std::string* ErrMsg) {
if (!MoveFileEx(path.c_str(), newName.c_str(), MOVEFILE_REPLACE_EXISTING))
- return MakeErrMsg(ErrMsg, "Can't move '" + path + "' to '" + newName.path
+ return MakeErrMsg(ErrMsg, "Can't move '" + path + "' to '" + newName.path
+ "': ");
return false;
}
@@ -764,7 +759,7 @@ Path::setStatusInfoOnDisk(const FileStatus &si, std::string *ErrMsg) const {
if (!si.isFile) {
return true;
}
-
+
HANDLE h = CreateFile(path.c_str(),
FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
@@ -783,8 +778,11 @@ Path::setStatusInfoOnDisk(const FileStatus &si, std::string *ErrMsg) const {
return MakeErrMsg(ErrMsg, path + ": GetFileInformationByHandle: ");
}
+ ULARGE_INTEGER ui;
+ ui.QuadPart = si.modTime.toWin32Time();
FILETIME ft;
- (uint64_t&)ft = si.modTime.toWin32Time();
+ ft.dwLowDateTime = ui.LowPart;
+ ft.dwHighDateTime = ui.HighPart;
BOOL ret = SetFileTime(h, NULL, &ft, &ft);
DWORD err = GetLastError();
CloseHandle(h);
diff --git a/libclamav/c++/llvm/lib/System/Win32/Program.inc b/libclamav/c++/llvm/lib/System/Win32/Program.inc
index a3b40d0..16bb28e 100644
--- a/libclamav/c++/llvm/lib/System/Win32/Program.inc
+++ b/libclamav/c++/llvm/lib/System/Win32/Program.inc
@@ -138,6 +138,24 @@ static bool ArgNeedsQuotes(const char *Str) {
return Str[0] == '\0' || strchr(Str, ' ') != 0;
}
+
+/// ArgLenWithQuotes - Check whether argument needs to be quoted when calling
+/// CreateProcess and returns length of quoted arg with escaped quotes
+static unsigned int ArgLenWithQuotes(const char *Str) {
+ unsigned int len = ArgNeedsQuotes(Str) ? 2 : 0;
+
+ while (*Str != '\0') {
+ if (*Str == '\"')
+ ++len;
+
+ ++len;
+ ++Str;
+ }
+
+ return len;
+}
+
+
bool
Program::Execute(const Path& path,
const char** args,
@@ -165,9 +183,7 @@ Program::Execute(const Path& path,
// First, determine the length of the command line.
unsigned len = 0;
for (unsigned i = 0; args[i]; i++) {
- len += strlen(args[i]) + 1;
- if (ArgNeedsQuotes(args[i]))
- len += 2;
+ len += ArgLenWithQuotes(args[i]) + 1;
}
// Now build the command line.
@@ -176,12 +192,18 @@ Program::Execute(const Path& path,
for (unsigned i = 0; args[i]; i++) {
const char *arg = args[i];
- size_t len = strlen(arg);
+
bool needsQuoting = ArgNeedsQuotes(arg);
if (needsQuoting)
*p++ = '"';
- memcpy(p, arg, len);
- p += len;
+
+ while (*arg != '\0') {
+ if (*arg == '\"')
+ *p++ = '\\';
+
+ *p++ = *arg++;
+ }
+
if (needsQuoting)
*p++ = '"';
*p++ = ' ';
diff --git a/libclamav/c++/llvm/lib/System/Win32/Signals.inc b/libclamav/c++/llvm/lib/System/Win32/Signals.inc
index f2b72ca..2498a26 100644
--- a/libclamav/c++/llvm/lib/System/Win32/Signals.inc
+++ b/libclamav/c++/llvm/lib/System/Win32/Signals.inc
@@ -140,6 +140,20 @@ bool sys::RemoveFileOnSignal(const sys::Path &Filename, std::string* ErrMsg) {
return false;
}
+// DontRemoveFileOnSignal - The public API
+void sys::DontRemoveFileOnSignal(const sys::Path &Filename) {
+ if (FilesToRemove == NULL)
+ return;
+
+ FilesToRemove->push_back(Filename);
+ std::vector<sys::Path>::reverse_iterator I =
+ std::find(FilesToRemove->rbegin(), FilesToRemove->rend(), Filename);
+ if (I != FilesToRemove->rend())
+ FilesToRemove->erase(I.base()-1);
+
+ LeaveCriticalSection(&CriticalSection);
+}
+
/// PrintStackTraceOnErrorSignal - When an error signal (such as SIBABRT or
/// SIGSEGV) is delivered to the process, print a stack trace and then exit.
void sys::PrintStackTraceOnErrorSignal() {
@@ -189,6 +203,10 @@ static void Cleanup() {
LeaveCriticalSection(&CriticalSection);
}
+void llvm::sys::RunInterruptHandlers() {
+ Cleanup();
+}
+
static LONG WINAPI LLVMUnhandledExceptionFilter(LPEXCEPTION_POINTERS ep) {
try {
Cleanup();
@@ -279,7 +297,7 @@ static LONG WINAPI LLVMUnhandledExceptionFilter(LPEXCEPTION_POINTERS ep) {
#ifdef _MSC_VER
if (ExitOnUnhandledExceptions)
- _exit(-3);
+ _exit(-3);
#endif
// Allow dialog box to pop up allowing choice to start debugger.
diff --git a/libclamav/c++/llvm/lib/System/Win32/ThreadLocal.inc b/libclamav/c++/llvm/lib/System/Win32/ThreadLocal.inc
index c8f7840..b8b933c 100644
--- a/libclamav/c++/llvm/lib/System/Win32/ThreadLocal.inc
+++ b/libclamav/c++/llvm/lib/System/Win32/ThreadLocal.inc
@@ -46,4 +46,8 @@ void ThreadLocalImpl::setInstance(const void* d){
assert(errorcode != 0);
}
+void ThreadLocalImpl::removeInstance() {
+ setInstance(0);
+}
+
}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARM.h b/libclamav/c++/llvm/lib/Target/ARM/ARM.h
deleted file mode 100644
index b08f942..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARM.h
+++ /dev/null
@@ -1,121 +0,0 @@
-//===-- ARM.h - Top-level interface for ARM representation---- --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the entry points for global functions defined in the LLVM
-// ARM back-end.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef TARGET_ARM_H
-#define TARGET_ARM_H
-
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetMachine.h"
-#include <cassert>
-
-namespace llvm {
-
-class ARMBaseTargetMachine;
-class FunctionPass;
-class JITCodeEmitter;
-class formatted_raw_ostream;
-
-// Enums corresponding to ARM condition codes
-namespace ARMCC {
- // The CondCodes constants map directly to the 4-bit encoding of the
- // condition field for predicated instructions.
- enum CondCodes {
- EQ,
- NE,
- HS,
- LO,
- MI,
- PL,
- VS,
- VC,
- HI,
- LS,
- GE,
- LT,
- GT,
- LE,
- AL
- };
-
- inline static CondCodes getOppositeCondition(CondCodes CC){
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case EQ: return NE;
- case NE: return EQ;
- case HS: return LO;
- case LO: return HS;
- case MI: return PL;
- case PL: return MI;
- case VS: return VC;
- case VC: return VS;
- case HI: return LS;
- case LS: return HI;
- case GE: return LT;
- case LT: return GE;
- case GT: return LE;
- case LE: return GT;
- }
- }
-}
-
-inline static const char *ARMCondCodeToString(ARMCC::CondCodes CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code");
- case ARMCC::EQ: return "eq";
- case ARMCC::NE: return "ne";
- case ARMCC::HS: return "hs";
- case ARMCC::LO: return "lo";
- case ARMCC::MI: return "mi";
- case ARMCC::PL: return "pl";
- case ARMCC::VS: return "vs";
- case ARMCC::VC: return "vc";
- case ARMCC::HI: return "hi";
- case ARMCC::LS: return "ls";
- case ARMCC::GE: return "ge";
- case ARMCC::LT: return "lt";
- case ARMCC::GT: return "gt";
- case ARMCC::LE: return "le";
- case ARMCC::AL: return "al";
- }
-}
-
-FunctionPass *createARMISelDag(ARMBaseTargetMachine &TM,
- CodeGenOpt::Level OptLevel);
-
-FunctionPass *createARMJITCodeEmitterPass(ARMBaseTargetMachine &TM,
- JITCodeEmitter &JCE);
-
-FunctionPass *createARMLoadStoreOptimizationPass(bool PreAlloc = false);
-FunctionPass *createARMExpandPseudoPass();
-FunctionPass *createARMConstantIslandPass();
-FunctionPass *createNEONPreAllocPass();
-FunctionPass *createNEONMoveFixPass();
-FunctionPass *createThumb2ITBlockPass();
-FunctionPass *createThumb2SizeReductionPass();
-
-extern Target TheARMTarget, TheThumbTarget;
-
-} // end namespace llvm;
-
-// Defines symbolic names for ARM registers. This defines a mapping from
-// register name to register number.
-//
-#include "ARMGenRegisterNames.inc"
-
-// Defines symbolic names for the ARM instructions.
-//
-#include "ARMGenInstrNames.inc"
-
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARM.td b/libclamav/c++/llvm/lib/Target/ARM/ARM.td
deleted file mode 100644
index 7033861..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARM.td
+++ /dev/null
@@ -1,149 +0,0 @@
-//===- ARM.td - Describe the ARM Target Machine -----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Target-independent interfaces which we are implementing
-//===----------------------------------------------------------------------===//
-
-include "llvm/Target/Target.td"
-
-//===----------------------------------------------------------------------===//
-// ARM Subtarget features.
-//
-
-def ArchV4T : SubtargetFeature<"v4t", "ARMArchVersion", "V4T",
- "ARM v4T">;
-def ArchV5T : SubtargetFeature<"v5t", "ARMArchVersion", "V5T",
- "ARM v5T">;
-def ArchV5TE : SubtargetFeature<"v5te", "ARMArchVersion", "V5TE",
- "ARM v5TE, v5TEj, v5TExp">;
-def ArchV6 : SubtargetFeature<"v6", "ARMArchVersion", "V6",
- "ARM v6">;
-def ArchV6T2 : SubtargetFeature<"v6t2", "ARMArchVersion", "V6T2",
- "ARM v6t2">;
-def ArchV7A : SubtargetFeature<"v7a", "ARMArchVersion", "V7A",
- "ARM v7A">;
-def FeatureVFP2 : SubtargetFeature<"vfp2", "ARMFPUType", "VFPv2",
- "Enable VFP2 instructions">;
-def FeatureVFP3 : SubtargetFeature<"vfp3", "ARMFPUType", "VFPv3",
- "Enable VFP3 instructions">;
-def FeatureNEON : SubtargetFeature<"neon", "ARMFPUType", "NEON",
- "Enable NEON instructions">;
-def FeatureThumb2 : SubtargetFeature<"thumb2", "ThumbMode", "Thumb2",
- "Enable Thumb2 instructions">;
-
-//===----------------------------------------------------------------------===//
-// ARM Processors supported.
-//
-
-include "ARMSchedule.td"
-
-class ProcNoItin<string Name, list<SubtargetFeature> Features>
- : Processor<Name, GenericItineraries, Features>;
-
-// V4 Processors.
-def : ProcNoItin<"generic", []>;
-def : ProcNoItin<"arm8", []>;
-def : ProcNoItin<"arm810", []>;
-def : ProcNoItin<"strongarm", []>;
-def : ProcNoItin<"strongarm110", []>;
-def : ProcNoItin<"strongarm1100", []>;
-def : ProcNoItin<"strongarm1110", []>;
-
-// V4T Processors.
-def : ProcNoItin<"arm7tdmi", [ArchV4T]>;
-def : ProcNoItin<"arm7tdmi-s", [ArchV4T]>;
-def : ProcNoItin<"arm710t", [ArchV4T]>;
-def : ProcNoItin<"arm720t", [ArchV4T]>;
-def : ProcNoItin<"arm9", [ArchV4T]>;
-def : ProcNoItin<"arm9tdmi", [ArchV4T]>;
-def : ProcNoItin<"arm920", [ArchV4T]>;
-def : ProcNoItin<"arm920t", [ArchV4T]>;
-def : ProcNoItin<"arm922t", [ArchV4T]>;
-def : ProcNoItin<"arm940t", [ArchV4T]>;
-def : ProcNoItin<"ep9312", [ArchV4T]>;
-
-// V5T Processors.
-def : ProcNoItin<"arm10tdmi", [ArchV5T]>;
-def : ProcNoItin<"arm1020t", [ArchV5T]>;
-
-// V5TE Processors.
-def : ProcNoItin<"arm9e", [ArchV5TE]>;
-def : ProcNoItin<"arm926ej-s", [ArchV5TE]>;
-def : ProcNoItin<"arm946e-s", [ArchV5TE]>;
-def : ProcNoItin<"arm966e-s", [ArchV5TE]>;
-def : ProcNoItin<"arm968e-s", [ArchV5TE]>;
-def : ProcNoItin<"arm10e", [ArchV5TE]>;
-def : ProcNoItin<"arm1020e", [ArchV5TE]>;
-def : ProcNoItin<"arm1022e", [ArchV5TE]>;
-def : ProcNoItin<"xscale", [ArchV5TE]>;
-def : ProcNoItin<"iwmmxt", [ArchV5TE]>;
-
-// V6 Processors.
-def : Processor<"arm1136j-s", ARMV6Itineraries, [ArchV6]>;
-def : Processor<"arm1136jf-s", ARMV6Itineraries, [ArchV6, FeatureVFP2]>;
-def : Processor<"arm1176jz-s", ARMV6Itineraries, [ArchV6]>;
-def : Processor<"arm1176jzf-s", ARMV6Itineraries, [ArchV6, FeatureVFP2]>;
-def : Processor<"mpcorenovfp", ARMV6Itineraries, [ArchV6]>;
-def : Processor<"mpcore", ARMV6Itineraries, [ArchV6, FeatureVFP2]>;
-
-// V6T2 Processors.
-def : Processor<"arm1156t2-s", ARMV6Itineraries,
- [ArchV6T2, FeatureThumb2]>;
-def : Processor<"arm1156t2f-s", ARMV6Itineraries,
- [ArchV6T2, FeatureThumb2, FeatureVFP2]>;
-
-// V7 Processors.
-def : Processor<"cortex-a8", CortexA8Itineraries,
- [ArchV7A, FeatureThumb2, FeatureNEON]>;
-def : ProcNoItin<"cortex-a9", [ArchV7A, FeatureThumb2, FeatureNEON]>;
-
-//===----------------------------------------------------------------------===//
-// Register File Description
-//===----------------------------------------------------------------------===//
-
-include "ARMRegisterInfo.td"
-
-include "ARMCallingConv.td"
-
-//===----------------------------------------------------------------------===//
-// Instruction Descriptions
-//===----------------------------------------------------------------------===//
-
-include "ARMInstrInfo.td"
-
-def ARMInstrInfo : InstrInfo {
- // Define how we want to layout our target-specific information field.
- let TSFlagsFields = ["AddrModeBits",
- "SizeFlag",
- "IndexModeBits",
- "Form",
- "isUnaryDataProc",
- "canXformTo16Bit",
- "Dom"];
- let TSFlagsShifts = [0,
- 4,
- 7,
- 9,
- 15,
- 16,
- 17];
-}
-
-//===----------------------------------------------------------------------===//
-// Declare the target which we are implementing
-//===----------------------------------------------------------------------===//
-
-def ARM : Target {
- // Pull in Instruction Info:
- let InstructionSet = ARMInstrInfo;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMAddressingModes.h b/libclamav/c++/llvm/lib/Target/ARM/ARMAddressingModes.h
deleted file mode 100644
index ddeb1b9..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMAddressingModes.h
+++ /dev/null
@@ -1,566 +0,0 @@
-//===- ARMAddressingModes.h - ARM Addressing Modes --------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the ARM addressing mode implementation stuff.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_ARM_ARMADDRESSINGMODES_H
-#define LLVM_TARGET_ARM_ARMADDRESSINGMODES_H
-
-#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/Support/MathExtras.h"
-#include <cassert>
-
-namespace llvm {
-
-/// ARM_AM - ARM Addressing Mode Stuff
-namespace ARM_AM {
- enum ShiftOpc {
- no_shift = 0,
- asr,
- lsl,
- lsr,
- ror,
- rrx
- };
-
- enum AddrOpc {
- add = '+', sub = '-'
- };
-
- static inline const char *getShiftOpcStr(ShiftOpc Op) {
- switch (Op) {
- default: assert(0 && "Unknown shift opc!");
- case ARM_AM::asr: return "asr";
- case ARM_AM::lsl: return "lsl";
- case ARM_AM::lsr: return "lsr";
- case ARM_AM::ror: return "ror";
- case ARM_AM::rrx: return "rrx";
- }
- }
-
- static inline ShiftOpc getShiftOpcForNode(SDValue N) {
- switch (N.getOpcode()) {
- default: return ARM_AM::no_shift;
- case ISD::SHL: return ARM_AM::lsl;
- case ISD::SRL: return ARM_AM::lsr;
- case ISD::SRA: return ARM_AM::asr;
- case ISD::ROTR: return ARM_AM::ror;
- //case ISD::ROTL: // Only if imm -> turn into ROTR.
- // Can't handle RRX here, because it would require folding a flag into
- // the addressing mode. :( This causes us to miss certain things.
- //case ARMISD::RRX: return ARM_AM::rrx;
- }
- }
-
- enum AMSubMode {
- bad_am_submode = 0,
- ia,
- ib,
- da,
- db
- };
-
- static inline const char *getAMSubModeStr(AMSubMode Mode) {
- switch (Mode) {
- default: assert(0 && "Unknown addressing sub-mode!");
- case ARM_AM::ia: return "ia";
- case ARM_AM::ib: return "ib";
- case ARM_AM::da: return "da";
- case ARM_AM::db: return "db";
- }
- }
-
- static inline const char *getAMSubModeAltStr(AMSubMode Mode, bool isLD) {
- switch (Mode) {
- default: assert(0 && "Unknown addressing sub-mode!");
- case ARM_AM::ia: return isLD ? "fd" : "ea";
- case ARM_AM::ib: return isLD ? "ed" : "fa";
- case ARM_AM::da: return isLD ? "fa" : "ed";
- case ARM_AM::db: return isLD ? "ea" : "fd";
- }
- }
-
- /// rotr32 - Rotate a 32-bit unsigned value right by a specified # bits.
- ///
- static inline unsigned rotr32(unsigned Val, unsigned Amt) {
- assert(Amt < 32 && "Invalid rotate amount");
- return (Val >> Amt) | (Val << ((32-Amt)&31));
- }
-
- /// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits.
- ///
- static inline unsigned rotl32(unsigned Val, unsigned Amt) {
- assert(Amt < 32 && "Invalid rotate amount");
- return (Val << Amt) | (Val >> ((32-Amt)&31));
- }
-
- //===--------------------------------------------------------------------===//
- // Addressing Mode #1: shift_operand with registers
- //===--------------------------------------------------------------------===//
- //
- // This 'addressing mode' is used for arithmetic instructions. It can
- // represent things like:
- // reg
- // reg [asr|lsl|lsr|ror|rrx] reg
- // reg [asr|lsl|lsr|ror|rrx] imm
- //
- // This is stored three operands [rega, regb, opc]. The first is the base
- // reg, the second is the shift amount (or reg0 if not present or imm). The
- // third operand encodes the shift opcode and the imm if a reg isn't present.
- //
- static inline unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm) {
- return ShOp | (Imm << 3);
- }
- static inline unsigned getSORegOffset(unsigned Op) {
- return Op >> 3;
- }
- static inline ShiftOpc getSORegShOp(unsigned Op) {
- return (ShiftOpc)(Op & 7);
- }
-
- /// getSOImmValImm - Given an encoded imm field for the reg/imm form, return
- /// the 8-bit imm value.
- static inline unsigned getSOImmValImm(unsigned Imm) {
- return Imm & 0xFF;
- }
- /// getSOImmValRot - Given an encoded imm field for the reg/imm form, return
- /// the rotate amount.
- static inline unsigned getSOImmValRot(unsigned Imm) {
- return (Imm >> 8) * 2;
- }
-
- /// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
- /// computing the rotate amount to use. If this immediate value cannot be
- /// handled with a single shifter-op, determine a good rotate amount that will
- /// take a maximal chunk of bits out of the immediate.
- static inline unsigned getSOImmValRotate(unsigned Imm) {
- // 8-bit (or less) immediates are trivially shifter_operands with a rotate
- // of zero.
- if ((Imm & ~255U) == 0) return 0;
-
- // Use CTZ to compute the rotate amount.
- unsigned TZ = CountTrailingZeros_32(Imm);
-
- // Rotate amount must be even. Something like 0x200 must be rotated 8 bits,
- // not 9.
- unsigned RotAmt = TZ & ~1;
-
- // If we can handle this spread, return it.
- if ((rotr32(Imm, RotAmt) & ~255U) == 0)
- return (32-RotAmt)&31; // HW rotates right, not left.
-
- // For values like 0xF000000F, we should skip the first run of ones, then
- // retry the hunt.
- if (Imm & 1) {
- unsigned TrailingOnes = CountTrailingZeros_32(~Imm);
- if (TrailingOnes != 32) { // Avoid overflow on 0xFFFFFFFF
- // Restart the search for a high-order bit after the initial seconds of
- // ones.
- unsigned TZ2 = CountTrailingZeros_32(Imm & ~((1 << TrailingOnes)-1));
-
- // Rotate amount must be even.
- unsigned RotAmt2 = TZ2 & ~1;
-
- // If this fits, use it.
- if (RotAmt2 != 32 && (rotr32(Imm, RotAmt2) & ~255U) == 0)
- return (32-RotAmt2)&31; // HW rotates right, not left.
- }
- }
-
- // Otherwise, we have no way to cover this span of bits with a single
- // shifter_op immediate. Return a chunk of bits that will be useful to
- // handle.
- return (32-RotAmt)&31; // HW rotates right, not left.
- }
-
- /// getSOImmVal - Given a 32-bit immediate, if it is something that can fit
- /// into an shifter_operand immediate operand, return the 12-bit encoding for
- /// it. If not, return -1.
- static inline int getSOImmVal(unsigned Arg) {
- // 8-bit (or less) immediates are trivially shifter_operands with a rotate
- // of zero.
- if ((Arg & ~255U) == 0) return Arg;
-
- unsigned RotAmt = getSOImmValRotate(Arg);
-
- // If this cannot be handled with a single shifter_op, bail out.
- if (rotr32(~255U, RotAmt) & Arg)
- return -1;
-
- // Encode this correctly.
- return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8);
- }
-
- /// isSOImmTwoPartVal - Return true if the specified value can be obtained by
- /// or'ing together two SOImmVal's.
- static inline bool isSOImmTwoPartVal(unsigned V) {
- // If this can be handled with a single shifter_op, bail out.
- V = rotr32(~255U, getSOImmValRotate(V)) & V;
- if (V == 0)
- return false;
-
- // If this can be handled with two shifter_op's, accept.
- V = rotr32(~255U, getSOImmValRotate(V)) & V;
- return V == 0;
- }
-
- /// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal,
- /// return the first chunk of it.
- static inline unsigned getSOImmTwoPartFirst(unsigned V) {
- return rotr32(255U, getSOImmValRotate(V)) & V;
- }
-
- /// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal,
- /// return the second chunk of it.
- static inline unsigned getSOImmTwoPartSecond(unsigned V) {
- // Mask out the first hunk.
- V = rotr32(~255U, getSOImmValRotate(V)) & V;
-
- // Take what's left.
- assert(V == (rotr32(255U, getSOImmValRotate(V)) & V));
- return V;
- }
-
- /// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed
- /// by a left shift. Returns the shift amount to use.
- static inline unsigned getThumbImmValShift(unsigned Imm) {
- // 8-bit (or less) immediates are trivially immediate operand with a shift
- // of zero.
- if ((Imm & ~255U) == 0) return 0;
-
- // Use CTZ to compute the shift amount.
- return CountTrailingZeros_32(Imm);
- }
-
- /// isThumbImmShiftedVal - Return true if the specified value can be obtained
- /// by left shifting a 8-bit immediate.
- static inline bool isThumbImmShiftedVal(unsigned V) {
- // If this can be handled with
- V = (~255U << getThumbImmValShift(V)) & V;
- return V == 0;
- }
-
- /// getThumbImm16ValShift - Try to handle Imm with a 16-bit immediate followed
- /// by a left shift. Returns the shift amount to use.
- static inline unsigned getThumbImm16ValShift(unsigned Imm) {
- // 16-bit (or less) immediates are trivially immediate operand with a shift
- // of zero.
- if ((Imm & ~65535U) == 0) return 0;
-
- // Use CTZ to compute the shift amount.
- return CountTrailingZeros_32(Imm);
- }
-
- /// isThumbImm16ShiftedVal - Return true if the specified value can be
- /// obtained by left shifting a 16-bit immediate.
- static inline bool isThumbImm16ShiftedVal(unsigned V) {
- // If this can be handled with
- V = (~65535U << getThumbImm16ValShift(V)) & V;
- return V == 0;
- }
-
- /// getThumbImmNonShiftedVal - If V is a value that satisfies
- /// isThumbImmShiftedVal, return the non-shiftd value.
- static inline unsigned getThumbImmNonShiftedVal(unsigned V) {
- return V >> getThumbImmValShift(V);
- }
-
-
- /// getT2SOImmValSplat - Return the 12-bit encoded representation
- /// if the specified value can be obtained by splatting the low 8 bits
- /// into every other byte or every byte of a 32-bit value. i.e.,
- /// 00000000 00000000 00000000 abcdefgh control = 0
- /// 00000000 abcdefgh 00000000 abcdefgh control = 1
- /// abcdefgh 00000000 abcdefgh 00000000 control = 2
- /// abcdefgh abcdefgh abcdefgh abcdefgh control = 3
- /// Return -1 if none of the above apply.
- /// See ARM Reference Manual A6.3.2.
- static inline int getT2SOImmValSplatVal(unsigned V) {
- unsigned u, Vs, Imm;
- // control = 0
- if ((V & 0xffffff00) == 0)
- return V;
-
- // If the value is zeroes in the first byte, just shift those off
- Vs = ((V & 0xff) == 0) ? V >> 8 : V;
- // Any passing value only has 8 bits of payload, splatted across the word
- Imm = Vs & 0xff;
- // Likewise, any passing values have the payload splatted into the 3rd byte
- u = Imm | (Imm << 16);
-
- // control = 1 or 2
- if (Vs == u)
- return (((Vs == V) ? 1 : 2) << 8) | Imm;
-
- // control = 3
- if (Vs == (u | (u << 8)))
- return (3 << 8) | Imm;
-
- return -1;
- }
-
- /// getT2SOImmValRotateVal - Return the 12-bit encoded representation if the
- /// specified value is a rotated 8-bit value. Return -1 if no rotation
- /// encoding is possible.
- /// See ARM Reference Manual A6.3.2.
- static inline int getT2SOImmValRotateVal(unsigned V) {
- unsigned RotAmt = CountLeadingZeros_32(V);
- if (RotAmt >= 24)
- return -1;
-
- // If 'Arg' can be handled with a single shifter_op return the value.
- if ((rotr32(0xff000000U, RotAmt) & V) == V)
- return (rotr32(V, 24 - RotAmt) & 0x7f) | ((RotAmt + 8) << 7);
-
- return -1;
- }
-
- /// getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit
- /// into a Thumb-2 shifter_operand immediate operand, return the 12-bit
- /// encoding for it. If not, return -1.
- /// See ARM Reference Manual A6.3.2.
- static inline int getT2SOImmVal(unsigned Arg) {
- // If 'Arg' is an 8-bit splat, then get the encoded value.
- int Splat = getT2SOImmValSplatVal(Arg);
- if (Splat != -1)
- return Splat;
-
- // If 'Arg' can be handled with a single shifter_op return the value.
- int Rot = getT2SOImmValRotateVal(Arg);
- if (Rot != -1)
- return Rot;
-
- return -1;
- }
-
- static inline unsigned getT2SOImmValRotate(unsigned V) {
- if ((V & ~255U) == 0) return 0;
- // Use CTZ to compute the rotate amount.
- unsigned RotAmt = CountTrailingZeros_32(V);
- return (32 - RotAmt) & 31;
- }
-
- static inline bool isT2SOImmTwoPartVal (unsigned Imm) {
- unsigned V = Imm;
- // Passing values can be any combination of splat values and shifter
- // values. If this can be handled with a single shifter or splat, bail
- // out. Those should be handled directly, not with a two-part val.
- if (getT2SOImmValSplatVal(V) != -1)
- return false;
- V = rotr32 (~255U, getT2SOImmValRotate(V)) & V;
- if (V == 0)
- return false;
-
- // If this can be handled as an immediate, accept.
- if (getT2SOImmVal(V) != -1) return true;
-
- // Likewise, try masking out a splat value first.
- V = Imm;
- if (getT2SOImmValSplatVal(V & 0xff00ff00U) != -1)
- V &= ~0xff00ff00U;
- else if (getT2SOImmValSplatVal(V & 0x00ff00ffU) != -1)
- V &= ~0x00ff00ffU;
- // If what's left can be handled as an immediate, accept.
- if (getT2SOImmVal(V) != -1) return true;
-
- // Otherwise, do not accept.
- return false;
- }
-
- static inline unsigned getT2SOImmTwoPartFirst(unsigned Imm) {
- assert (isT2SOImmTwoPartVal(Imm) &&
- "Immedate cannot be encoded as two part immediate!");
- // Try a shifter operand as one part
- unsigned V = rotr32 (~255, getT2SOImmValRotate(Imm)) & Imm;
- // If the rest is encodable as an immediate, then return it.
- if (getT2SOImmVal(V) != -1) return V;
-
- // Try masking out a splat value first.
- if (getT2SOImmValSplatVal(Imm & 0xff00ff00U) != -1)
- return Imm & 0xff00ff00U;
-
- // The other splat is all that's left as an option.
- assert (getT2SOImmValSplatVal(Imm & 0x00ff00ffU) != -1);
- return Imm & 0x00ff00ffU;
- }
-
- static inline unsigned getT2SOImmTwoPartSecond(unsigned Imm) {
- // Mask out the first hunk
- Imm ^= getT2SOImmTwoPartFirst(Imm);
- // Return what's left
- assert (getT2SOImmVal(Imm) != -1 &&
- "Unable to encode second part of T2 two part SO immediate");
- return Imm;
- }
-
-
- //===--------------------------------------------------------------------===//
- // Addressing Mode #2
- //===--------------------------------------------------------------------===//
- //
- // This is used for most simple load/store instructions.
- //
- // addrmode2 := reg +/- reg shop imm
- // addrmode2 := reg +/- imm12
- //
- // The first operand is always a Reg. The second operand is a reg if in
- // reg/reg form, otherwise it's reg#0. The third field encodes the operation
- // in bit 12, the immediate in bits 0-11, and the shift op in 13-15.
- //
- // If this addressing mode is a frame index (before prolog/epilog insertion
- // and code rewriting), this operand will have the form: FI#, reg0, <offs>
- // with no shift amount for the frame offset.
- //
- static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO) {
- assert(Imm12 < (1 << 12) && "Imm too large!");
- bool isSub = Opc == sub;
- return Imm12 | ((int)isSub << 12) | (SO << 13);
- }
- static inline unsigned getAM2Offset(unsigned AM2Opc) {
- return AM2Opc & ((1 << 12)-1);
- }
- static inline AddrOpc getAM2Op(unsigned AM2Opc) {
- return ((AM2Opc >> 12) & 1) ? sub : add;
- }
- static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) {
- return (ShiftOpc)(AM2Opc >> 13);
- }
-
-
- //===--------------------------------------------------------------------===//
- // Addressing Mode #3
- //===--------------------------------------------------------------------===//
- //
- // This is used for sign-extending loads, and load/store-pair instructions.
- //
- // addrmode3 := reg +/- reg
- // addrmode3 := reg +/- imm8
- //
- // The first operand is always a Reg. The second operand is a reg if in
- // reg/reg form, otherwise it's reg#0. The third field encodes the operation
- // in bit 8, the immediate in bits 0-7.
-
- /// getAM3Opc - This function encodes the addrmode3 opc field.
- static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset) {
- bool isSub = Opc == sub;
- return ((int)isSub << 8) | Offset;
- }
- static inline unsigned char getAM3Offset(unsigned AM3Opc) {
- return AM3Opc & 0xFF;
- }
- static inline AddrOpc getAM3Op(unsigned AM3Opc) {
- return ((AM3Opc >> 8) & 1) ? sub : add;
- }
-
- //===--------------------------------------------------------------------===//
- // Addressing Mode #4
- //===--------------------------------------------------------------------===//
- //
- // This is used for load / store multiple instructions.
- //
- // addrmode4 := reg, <mode>
- //
- // The four modes are:
- // IA - Increment after
- // IB - Increment before
- // DA - Decrement after
- // DB - Decrement before
- //
- // If the 4th bit (writeback)is set, then the base register is updated after
- // the memory transfer.
-
- static inline AMSubMode getAM4SubMode(unsigned Mode) {
- return (AMSubMode)(Mode & 0x7);
- }
-
- static inline unsigned getAM4ModeImm(AMSubMode SubMode, bool WB = false) {
- return (int)SubMode | ((int)WB << 3);
- }
-
- static inline bool getAM4WBFlag(unsigned Mode) {
- return (Mode >> 3) & 1;
- }
-
- //===--------------------------------------------------------------------===//
- // Addressing Mode #5
- //===--------------------------------------------------------------------===//
- //
- // This is used for coprocessor instructions, such as FP load/stores.
- //
- // addrmode5 := reg +/- imm8*4
- //
- // The first operand is always a Reg. The second operand encodes the
- // operation in bit 8 and the immediate in bits 0-7.
- //
- // This is also used for FP load/store multiple ops. The second operand
- // encodes the writeback mode in bit 8 and the number of registers (or 2
- // times the number of registers for DPR ops) in bits 0-7. In addition,
- // bits 9-11 encode one of the following two sub-modes:
- //
- // IA - Increment after
- // DB - Decrement before
-
- /// getAM5Opc - This function encodes the addrmode5 opc field.
- static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
- bool isSub = Opc == sub;
- return ((int)isSub << 8) | Offset;
- }
- static inline unsigned char getAM5Offset(unsigned AM5Opc) {
- return AM5Opc & 0xFF;
- }
- static inline AddrOpc getAM5Op(unsigned AM5Opc) {
- return ((AM5Opc >> 8) & 1) ? sub : add;
- }
-
- /// getAM5Opc - This function encodes the addrmode5 opc field for VLDM and
- /// VSTM instructions.
- static inline unsigned getAM5Opc(AMSubMode SubMode, bool WB,
- unsigned char Offset) {
- assert((SubMode == ia || SubMode == db) &&
- "Illegal addressing mode 5 sub-mode!");
- return ((int)SubMode << 9) | ((int)WB << 8) | Offset;
- }
- static inline AMSubMode getAM5SubMode(unsigned AM5Opc) {
- return (AMSubMode)((AM5Opc >> 9) & 0x7);
- }
- static inline bool getAM5WBFlag(unsigned AM5Opc) {
- return ((AM5Opc >> 8) & 1);
- }
-
- //===--------------------------------------------------------------------===//
- // Addressing Mode #6
- //===--------------------------------------------------------------------===//
- //
- // This is used for NEON load / store instructions.
- //
- // addrmode6 := reg with optional writeback and alignment
- //
- // This is stored in four operands [regaddr, regupdate, opc, align]. The
- // first is the address register. The second register holds the value of
- // a post-access increment for writeback or reg0 if no writeback or if the
- // writeback increment is the size of the memory access. The third
- // operand encodes whether there is writeback to the address register. The
- // fourth operand is the value of the alignment specifier to use or zero if
- // no explicit alignment.
-
- static inline unsigned getAM6Opc(bool WB = false) {
- return (int)WB;
- }
-
- static inline bool getAM6WBFlag(unsigned Mode) {
- return Mode & 1;
- }
-
-} // end namespace ARM_AM
-} // end namespace llvm
-
-#endif
-
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
deleted file mode 100644
index 8e537d8..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ /dev/null
@@ -1,1251 +0,0 @@
-//===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Base ARM implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARMBaseInstrInfo.h"
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMConstantPoolValue.h"
-#include "ARMGenInstrInfo.inc"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMRegisterInfo.h"
-#include "llvm/Constants.h"
-#include "llvm/Function.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/CodeGen/LiveVariables.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-using namespace llvm;
-
-static cl::opt<bool>
-EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
- cl::desc("Enable ARM 2-addr to 3-addr conv"));
-
-ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
- : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
- Subtarget(STI) {
-}
-
-MachineInstr *
-ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const {
- // FIXME: Thumb2 support.
-
- if (!EnableARM3Addr)
- return NULL;
-
- MachineInstr *MI = MBBI;
- MachineFunction &MF = *MI->getParent()->getParent();
- unsigned TSFlags = MI->getDesc().TSFlags;
- bool isPre = false;
- switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
- default: return NULL;
- case ARMII::IndexModePre:
- isPre = true;
- break;
- case ARMII::IndexModePost:
- break;
- }
-
- // Try splitting an indexed load/store to an un-indexed one plus an add/sub
- // operation.
- unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
- if (MemOpc == 0)
- return NULL;
-
- MachineInstr *UpdateMI = NULL;
- MachineInstr *MemMI = NULL;
- unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
- const TargetInstrDesc &TID = MI->getDesc();
- unsigned NumOps = TID.getNumOperands();
- bool isLoad = !TID.mayStore();
- const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
- const MachineOperand &Base = MI->getOperand(2);
- const MachineOperand &Offset = MI->getOperand(NumOps-3);
- unsigned WBReg = WB.getReg();
- unsigned BaseReg = Base.getReg();
- unsigned OffReg = Offset.getReg();
- unsigned OffImm = MI->getOperand(NumOps-2).getImm();
- ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
- switch (AddrMode) {
- default:
- assert(false && "Unknown indexed op!");
- return NULL;
- case ARMII::AddrMode2: {
- bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
- unsigned Amt = ARM_AM::getAM2Offset(OffImm);
- if (OffReg == 0) {
- if (ARM_AM::getSOImmVal(Amt) == -1)
- // Can't encode it in a so_imm operand. This transformation will
- // add more than 1 instruction. Abandon!
- return NULL;
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
- get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
- .addReg(BaseReg).addImm(Amt)
- .addImm(Pred).addReg(0).addReg(0);
- } else if (Amt != 0) {
- ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
- unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
- get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
- .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
- .addImm(Pred).addReg(0).addReg(0);
- } else
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
- get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
- .addReg(BaseReg).addReg(OffReg)
- .addImm(Pred).addReg(0).addReg(0);
- break;
- }
- case ARMII::AddrMode3 : {
- bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
- unsigned Amt = ARM_AM::getAM3Offset(OffImm);
- if (OffReg == 0)
- // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
- get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
- .addReg(BaseReg).addImm(Amt)
- .addImm(Pred).addReg(0).addReg(0);
- else
- UpdateMI = BuildMI(MF, MI->getDebugLoc(),
- get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
- .addReg(BaseReg).addReg(OffReg)
- .addImm(Pred).addReg(0).addReg(0);
- break;
- }
- }
-
- std::vector<MachineInstr*> NewMIs;
- if (isPre) {
- if (isLoad)
- MemMI = BuildMI(MF, MI->getDebugLoc(),
- get(MemOpc), MI->getOperand(0).getReg())
- .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
- else
- MemMI = BuildMI(MF, MI->getDebugLoc(),
- get(MemOpc)).addReg(MI->getOperand(1).getReg())
- .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
- NewMIs.push_back(MemMI);
- NewMIs.push_back(UpdateMI);
- } else {
- if (isLoad)
- MemMI = BuildMI(MF, MI->getDebugLoc(),
- get(MemOpc), MI->getOperand(0).getReg())
- .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
- else
- MemMI = BuildMI(MF, MI->getDebugLoc(),
- get(MemOpc)).addReg(MI->getOperand(1).getReg())
- .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
- if (WB.isDead())
- UpdateMI->getOperand(0).setIsDead();
- NewMIs.push_back(UpdateMI);
- NewMIs.push_back(MemMI);
- }
-
- // Transfer LiveVariables states, kill / dead info.
- if (LV) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
- unsigned Reg = MO.getReg();
-
- LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
- if (MO.isDef()) {
- MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
- if (MO.isDead())
- LV->addVirtualRegisterDead(Reg, NewMI);
- }
- if (MO.isUse() && MO.isKill()) {
- for (unsigned j = 0; j < 2; ++j) {
- // Look at the two new MI's in reverse order.
- MachineInstr *NewMI = NewMIs[j];
- if (!NewMI->readsRegister(Reg))
- continue;
- LV->addVirtualRegisterKilled(Reg, NewMI);
- if (VI.removeKill(MI))
- VI.Kills.push_back(NewMI);
- break;
- }
- }
- }
- }
- }
-
- MFI->insert(MBBI, NewMIs[1]);
- MFI->insert(MBBI, NewMIs[0]);
- return NewMIs[0];
-}
-
-// Branch analysis.
-bool
-ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
- // If the block has no terminators, it just falls into the block after it.
- MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
- return false;
-
- // Get the last instruction in the block.
- MachineInstr *LastInst = I;
-
- // If there is only one terminator instruction, process it.
- unsigned LastOpc = LastInst->getOpcode();
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
- if (isUncondBranchOpcode(LastOpc)) {
- TBB = LastInst->getOperand(0).getMBB();
- return false;
- }
- if (isCondBranchOpcode(LastOpc)) {
- // Block ends with fall-through condbranch.
- TBB = LastInst->getOperand(0).getMBB();
- Cond.push_back(LastInst->getOperand(1));
- Cond.push_back(LastInst->getOperand(2));
- return false;
- }
- return true; // Can't handle indirect branch.
- }
-
- // Get the instruction before it if it is a terminator.
- MachineInstr *SecondLastInst = I;
-
- // If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
- return true;
-
- // If the block ends with a B and a Bcc, handle it.
- unsigned SecondLastOpc = SecondLastInst->getOpcode();
- if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
- TBB = SecondLastInst->getOperand(0).getMBB();
- Cond.push_back(SecondLastInst->getOperand(1));
- Cond.push_back(SecondLastInst->getOperand(2));
- FBB = LastInst->getOperand(0).getMBB();
- return false;
- }
-
- // If the block ends with two unconditional branches, handle it. The second
- // one is not executed, so remove it.
- if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
- TBB = SecondLastInst->getOperand(0).getMBB();
- I = LastInst;
- if (AllowModify)
- I->eraseFromParent();
- return false;
- }
-
- // ...likewise if it ends with a branch table followed by an unconditional
- // branch. The branch folder can create these, and we must get rid of them for
- // correctness of Thumb constant islands.
- if ((isJumpTableBranchOpcode(SecondLastOpc) ||
- isIndirectBranchOpcode(SecondLastOpc)) &&
- isUncondBranchOpcode(LastOpc)) {
- I = LastInst;
- if (AllowModify)
- I->eraseFromParent();
- return true;
- }
-
- // Otherwise, can't handle this.
- return true;
-}
-
-
-unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin()) return 0;
- --I;
- if (!isUncondBranchOpcode(I->getOpcode()) &&
- !isCondBranchOpcode(I->getOpcode()))
- return 0;
-
- // Remove the branch.
- I->eraseFromParent();
-
- I = MBB.end();
-
- if (I == MBB.begin()) return 1;
- --I;
- if (!isCondBranchOpcode(I->getOpcode()))
- return 1;
-
- // Remove the branch.
- I->eraseFromParent();
- return 2;
-}
-
-unsigned
-ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl = DebugLoc::getUnknownLoc();
-
- ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
- int BOpc = !AFI->isThumbFunction()
- ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
- int BccOpc = !AFI->isThumbFunction()
- ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
-
- // Shouldn't be a fall through.
- assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- assert((Cond.size() == 2 || Cond.size() == 0) &&
- "ARM branch conditions have two components!");
-
- if (FBB == 0) {
- if (Cond.empty()) // Unconditional branch?
- BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
- else
- BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
- .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
- return 1;
- }
-
- // Two-way conditional branch.
- BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
- .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
- BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
- return 2;
-}
-
-bool ARMBaseInstrInfo::
-ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
- ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
- Cond[0].setImm(ARMCC::getOppositeCondition(CC));
- return false;
-}
-
-bool ARMBaseInstrInfo::
-PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const {
- unsigned Opc = MI->getOpcode();
- if (isUncondBranchOpcode(Opc)) {
- MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
- MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
- MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
- return true;
- }
-
- int PIdx = MI->findFirstPredOperandIdx();
- if (PIdx != -1) {
- MachineOperand &PMO = MI->getOperand(PIdx);
- PMO.setImm(Pred[0].getImm());
- MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
- return true;
- }
- return false;
-}
-
-bool ARMBaseInstrInfo::
-SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const {
- if (Pred1.size() > 2 || Pred2.size() > 2)
- return false;
-
- ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
- ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
- if (CC1 == CC2)
- return true;
-
- switch (CC1) {
- default:
- return false;
- case ARMCC::AL:
- return true;
- case ARMCC::HS:
- return CC2 == ARMCC::HI;
- case ARMCC::LS:
- return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
- case ARMCC::GE:
- return CC2 == ARMCC::GT;
- case ARMCC::LE:
- return CC2 == ARMCC::LT;
- }
-}
-
-bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const {
- // FIXME: This confuses implicit_def with optional CPSR def.
- const TargetInstrDesc &TID = MI->getDesc();
- if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
- return false;
-
- bool Found = false;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == ARM::CPSR) {
- Pred.push_back(MO);
- Found = true;
- }
- }
-
- return Found;
-}
-
-/// isPredicable - Return true if the specified instruction can be predicated.
-/// By default, this returns true for every instruction with a
-/// PredicateOperand.
-bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
- const TargetInstrDesc &TID = MI->getDesc();
- if (!TID.isPredicable())
- return false;
-
- if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
- ARMFunctionInfo *AFI =
- MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
- return AFI->isThumb2Function();
- }
- return true;
-}
-
-/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
-DISABLE_INLINE
-static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
- unsigned JTI);
-static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
- unsigned JTI) {
- assert(JTI < JT.size());
- return JT[JTI].MBBs.size();
-}
-
-/// GetInstSize - Return the size of the specified MachineInstr.
-///
-unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
- const MachineBasicBlock &MBB = *MI->getParent();
- const MachineFunction *MF = MBB.getParent();
- const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
-
- // Basic size info comes from the TSFlags field.
- const TargetInstrDesc &TID = MI->getDesc();
- unsigned TSFlags = TID.TSFlags;
-
- unsigned Opc = MI->getOpcode();
- switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
- default: {
- // If this machine instr is an inline asm, measure it.
- if (MI->getOpcode() == ARM::INLINEASM)
- return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
- if (MI->isLabel())
- return 0;
- switch (Opc) {
- default:
- llvm_unreachable("Unknown or unset size field for instr!");
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::KILL:
- case TargetOpcode::DBG_LABEL:
- case TargetOpcode::EH_LABEL:
- return 0;
- }
- break;
- }
- case ARMII::Size8Bytes: return 8; // ARM instruction x 2.
- case ARMII::Size4Bytes: return 4; // ARM / Thumb2 instruction.
- case ARMII::Size2Bytes: return 2; // Thumb1 instruction.
- case ARMII::SizeSpecial: {
- switch (Opc) {
- case ARM::CONSTPOOL_ENTRY:
- // If this machine instr is a constant pool entry, its size is recorded as
- // operand #2.
- return MI->getOperand(2).getImm();
- case ARM::Int_eh_sjlj_setjmp:
- return 24;
- case ARM::tInt_eh_sjlj_setjmp:
- return 14;
- case ARM::t2Int_eh_sjlj_setjmp:
- return 14;
- case ARM::BR_JTr:
- case ARM::BR_JTm:
- case ARM::BR_JTadd:
- case ARM::tBR_JTr:
- case ARM::t2BR_JT:
- case ARM::t2TBB:
- case ARM::t2TBH: {
- // These are jumptable branches, i.e. a branch followed by an inlined
- // jumptable. The size is 4 + 4 * number of entries. For TBB, each
- // entry is one byte; TBH two byte each.
- unsigned EntrySize = (Opc == ARM::t2TBB)
- ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
- unsigned NumOps = TID.getNumOperands();
- MachineOperand JTOP =
- MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
- unsigned JTI = JTOP.getIndex();
- const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- assert(MJTI != 0);
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- assert(JTI < JT.size());
- // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
- // 4 aligned. The assembler / linker may add 2 byte padding just before
- // the JT entries. The size does not include this padding; the
- // constant islands pass does separate bookkeeping for it.
- // FIXME: If we know the size of the function is less than (1 << 16) *2
- // bytes, we can use 16-bit entries instead. Then there won't be an
- // alignment issue.
- unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
- unsigned NumEntries = getNumJTEntries(JT, JTI);
- if (Opc == ARM::t2TBB && (NumEntries & 1))
- // Make sure the instruction that follows TBB is 2-byte aligned.
- // FIXME: Constant island pass should insert an "ALIGN" instruction
- // instead.
- ++NumEntries;
- return NumEntries * EntrySize + InstSize;
- }
- default:
- // Otherwise, pseudo-instruction sizes are zero.
- return 0;
- }
- }
- }
- return 0; // Not reached
-}
-
-/// Return true if the instruction is a register to register move and
-/// leave the source and dest operands in the passed parameters.
-///
-bool
-ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
- SrcSubIdx = DstSubIdx = 0; // No sub-registers.
-
- switch (MI.getOpcode()) {
- default: break;
- case ARM::VMOVS:
- case ARM::VMOVD:
- case ARM::VMOVDneon:
- case ARM::VMOVQ: {
- SrcReg = MI.getOperand(1).getReg();
- DstReg = MI.getOperand(0).getReg();
- return true;
- }
- case ARM::MOVr:
- case ARM::tMOVr:
- case ARM::tMOVgpr2tgpr:
- case ARM::tMOVtgpr2gpr:
- case ARM::tMOVgpr2gpr:
- case ARM::t2MOVr: {
- assert(MI.getDesc().getNumOperands() >= 2 &&
- MI.getOperand(0).isReg() &&
- MI.getOperand(1).isReg() &&
- "Invalid ARM MOV instruction");
- SrcReg = MI.getOperand(1).getReg();
- DstReg = MI.getOperand(0).getReg();
- return true;
- }
- }
-
- return false;
-}
-
-unsigned
-ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case ARM::LDR:
- case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isReg() &&
- MI->getOperand(3).isImm() &&
- MI->getOperand(2).getReg() == 0 &&
- MI->getOperand(3).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::t2LDRi12:
- case ARM::tRestore:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::VLDRD:
- case ARM::VLDRS:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
-
- return 0;
-}
-
-unsigned
-ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case ARM::STR:
- case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isReg() &&
- MI->getOperand(3).isImm() &&
- MI->getOperand(2).getReg() == 0 &&
- MI->getOperand(3).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::t2STRi12:
- case ARM::tSpill:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::VSTRD:
- case ARM::VSTRS:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
-
- return 0;
-}
-
-bool
-ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (I != MBB.end()) DL = I->getDebugLoc();
-
- // tGPR is used sometimes in ARM instructions that need to avoid using
- // certain registers. Just treat it as GPR here.
- if (DestRC == ARM::tGPRRegisterClass)
- DestRC = ARM::GPRRegisterClass;
- if (SrcRC == ARM::tGPRRegisterClass)
- SrcRC = ARM::GPRRegisterClass;
-
- if (DestRC != SrcRC) {
- if (DestRC->getSize() != SrcRC->getSize())
- return false;
-
- // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies.
- // Allow QPR / QPR_VFP2 / QPR_8 cross-class copies.
- if (DestRC->getSize() != 8 && DestRC->getSize() != 16)
- return false;
- }
-
- if (DestRC == ARM::GPRRegisterClass) {
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
- DestReg).addReg(SrcReg)));
- } else if (DestRC == ARM::SPRRegisterClass) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVS), DestReg)
- .addReg(SrcReg));
- } else if (DestRC == ARM::DPRRegisterClass) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVD), DestReg)
- .addReg(SrcReg));
- } else if (DestRC == ARM::DPR_VFP2RegisterClass ||
- DestRC == ARM::DPR_8RegisterClass ||
- SrcRC == ARM::DPR_VFP2RegisterClass ||
- SrcRC == ARM::DPR_8RegisterClass) {
- // Always use neon reg-reg move if source or dest is NEON-only regclass.
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVDneon),
- DestReg).addReg(SrcReg));
- } else if (DestRC == ARM::QPRRegisterClass ||
- DestRC == ARM::QPR_VFP2RegisterClass ||
- DestRC == ARM::QPR_8RegisterClass) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVQ),
- DestReg).addReg(SrcReg));
- } else {
- return false;
- }
-
- return true;
-}
-
-void ARMBaseInstrInfo::
-storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned SrcReg, bool isKill, int FI,
- const TargetRegisterClass *RC) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (I != MBB.end()) DL = I->getDebugLoc();
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- unsigned Align = MFI.getObjectAlignment(FI);
-
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOStore, 0,
- MFI.getObjectSize(FI),
- Align);
-
- // tGPR is used sometimes in ARM instructions that need to avoid using
- // certain registers. Just treat it as GPR here.
- if (RC == ARM::tGPRRegisterClass)
- RC = ARM::GPRRegisterClass;
-
- if (RC == ARM::GPRRegisterClass) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::DPRRegisterClass ||
- RC == ARM::DPR_VFP2RegisterClass ||
- RC == ARM::DPR_8RegisterClass) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::SPRRegisterClass) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- } else {
- assert((RC == ARM::QPRRegisterClass ||
- RC == ARM::QPR_VFP2RegisterClass) && "Unknown regclass!");
- // FIXME: Neon instructions should support predicates
- if (Align >= 16
- && (getRegisterInfo().canRealignStack(MF))) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64))
- .addFrameIndex(FI).addImm(0).addImm(0).addImm(128)
- .addMemOperand(MMO)
- .addReg(SrcReg, getKillRegState(isKill)));
- } else {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRQ)).
- addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- }
- }
-}
-
-void ARMBaseInstrInfo::
-loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, int FI,
- const TargetRegisterClass *RC) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (I != MBB.end()) DL = I->getDebugLoc();
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- unsigned Align = MFI.getObjectAlignment(FI);
-
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOLoad, 0,
- MFI.getObjectSize(FI),
- Align);
-
- // tGPR is used sometimes in ARM instructions that need to avoid using
- // certain registers. Just treat it as GPR here.
- if (RC == ARM::tGPRRegisterClass)
- RC = ARM::GPRRegisterClass;
-
- if (RC == ARM::GPRRegisterClass) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
- .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::DPRRegisterClass ||
- RC == ARM::DPR_VFP2RegisterClass ||
- RC == ARM::DPR_8RegisterClass) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::SPRRegisterClass) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- } else {
- assert((RC == ARM::QPRRegisterClass ||
- RC == ARM::QPR_VFP2RegisterClass ||
- RC == ARM::QPR_8RegisterClass) && "Unknown regclass!");
- if (Align >= 16
- && (getRegisterInfo().canRealignStack(MF))) {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
- .addFrameIndex(FI).addImm(0).addImm(0).addImm(128)
- .addMemOperand(MMO));
- } else {
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRQ), DestReg)
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- }
- }
-}
-
-MachineInstr *ARMBaseInstrInfo::
-foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops, int FI) const {
- if (Ops.size() != 1) return NULL;
-
- unsigned OpNum = Ops[0];
- unsigned Opc = MI->getOpcode();
- MachineInstr *NewMI = NULL;
- if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
- // If it is updating CPSR, then it cannot be folded.
- if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
- return NULL;
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned SrcSubReg = MI->getOperand(1).getSubReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- if (Opc == ARM::MOVr)
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
- else // ARM::t2MOVr
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned DstSubReg = MI->getOperand(0).getSubReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- if (Opc == ARM::MOVr)
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef), DstSubReg)
- .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
- else // ARM::t2MOVr
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef), DstSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- }
- } else if (Opc == ARM::tMOVgpr2gpr ||
- Opc == ARM::tMOVtgpr2gpr ||
- Opc == ARM::tMOVgpr2tgpr) {
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned SrcSubReg = MI->getOperand(1).getSubReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned DstSubReg = MI->getOperand(0).getSubReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef),
- DstSubReg)
- .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
- }
- } else if (Opc == ARM::VMOVS) {
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned SrcSubReg = MI->getOperand(1).getSubReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRS))
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI)
- .addImm(0).addImm(Pred).addReg(PredReg);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned DstSubReg = MI->getOperand(0).getSubReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRS))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef),
- DstSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- }
- }
- else if (Opc == ARM::VMOVD) {
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned SrcSubReg = MI->getOperand(1).getSubReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRD))
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned DstSubReg = MI->getOperand(0).getSubReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRD))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef),
- DstSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- }
- }
-
- return NewMI;
-}
-
-MachineInstr*
-ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- // FIXME
- return 0;
-}
-
-bool
-ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- if (Ops.size() != 1) return false;
-
- unsigned Opc = MI->getOpcode();
- if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
- // If it is updating CPSR, then it cannot be folded.
- return MI->getOperand(4).getReg() != ARM::CPSR ||
- MI->getOperand(4).isDead();
- } else if (Opc == ARM::tMOVgpr2gpr ||
- Opc == ARM::tMOVtgpr2gpr ||
- Opc == ARM::tMOVgpr2tgpr) {
- return true;
- } else if (Opc == ARM::VMOVS || Opc == ARM::VMOVD) {
- return true;
- } else if (Opc == ARM::VMOVDneon || Opc == ARM::VMOVQ) {
- return false; // FIXME
- }
-
- return false;
-}
-
-/// Create a copy of a const pool value. Update CPI to the new index and return
-/// the label UID.
-static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
- MachineConstantPool *MCP = MF.getConstantPool();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
-
- const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
- assert(MCPE.isMachineConstantPoolEntry() &&
- "Expecting a machine constantpool entry!");
- ARMConstantPoolValue *ACPV =
- static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
-
- unsigned PCLabelId = AFI->createConstPoolEntryUId();
- ARMConstantPoolValue *NewCPV = 0;
- if (ACPV->isGlobalValue())
- NewCPV = new ARMConstantPoolValue(ACPV->getGV(), PCLabelId,
- ARMCP::CPValue, 4);
- else if (ACPV->isExtSymbol())
- NewCPV = new ARMConstantPoolValue(MF.getFunction()->getContext(),
- ACPV->getSymbol(), PCLabelId, 4);
- else if (ACPV->isBlockAddress())
- NewCPV = new ARMConstantPoolValue(ACPV->getBlockAddress(), PCLabelId,
- ARMCP::CPBlockAddress, 4);
- else
- llvm_unreachable("Unexpected ARM constantpool value type!!");
- CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
- return PCLabelId;
-}
-
-void ARMBaseInstrInfo::
-reMaterialize(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const {
- if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
- DestReg = TRI->getSubReg(DestReg, SubIdx);
- SubIdx = 0;
- }
-
- unsigned Opcode = Orig->getOpcode();
- switch (Opcode) {
- default: {
- MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
- MI->getOperand(0).setReg(DestReg);
- MBB.insert(I, MI);
- break;
- }
- case ARM::tLDRpci_pic:
- case ARM::t2LDRpci_pic: {
- MachineFunction &MF = *MBB.getParent();
- unsigned CPI = Orig->getOperand(1).getIndex();
- unsigned PCLabelId = duplicateCPV(MF, CPI);
- MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
- DestReg)
- .addConstantPoolIndex(CPI).addImm(PCLabelId);
- (*MIB).setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
- break;
- }
- }
-
- MachineInstr *NewMI = prior(I);
- NewMI->getOperand(0).setSubReg(SubIdx);
-}
-
-MachineInstr *
-ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
- MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF);
- switch(Orig->getOpcode()) {
- case ARM::tLDRpci_pic:
- case ARM::t2LDRpci_pic: {
- unsigned CPI = Orig->getOperand(1).getIndex();
- unsigned PCLabelId = duplicateCPV(MF, CPI);
- Orig->getOperand(1).setIndex(CPI);
- Orig->getOperand(2).setImm(PCLabelId);
- break;
- }
- }
- return MI;
-}
-
-bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1) const {
- int Opcode = MI0->getOpcode();
- if (Opcode == ARM::t2LDRpci ||
- Opcode == ARM::t2LDRpci_pic ||
- Opcode == ARM::tLDRpci ||
- Opcode == ARM::tLDRpci_pic) {
- if (MI1->getOpcode() != Opcode)
- return false;
- if (MI0->getNumOperands() != MI1->getNumOperands())
- return false;
-
- const MachineOperand &MO0 = MI0->getOperand(1);
- const MachineOperand &MO1 = MI1->getOperand(1);
- if (MO0.getOffset() != MO1.getOffset())
- return false;
-
- const MachineFunction *MF = MI0->getParent()->getParent();
- const MachineConstantPool *MCP = MF->getConstantPool();
- int CPI0 = MO0.getIndex();
- int CPI1 = MO1.getIndex();
- const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
- const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
- ARMConstantPoolValue *ACPV0 =
- static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
- ARMConstantPoolValue *ACPV1 =
- static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
- return ACPV0->hasSameValue(ACPV1);
- }
-
- return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
-}
-
-/// getInstrPredicate - If instruction is predicated, returns its predicate
-/// condition, otherwise returns AL. It also returns the condition code
-/// register by reference.
-ARMCC::CondCodes
-llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
- int PIdx = MI->findFirstPredOperandIdx();
- if (PIdx == -1) {
- PredReg = 0;
- return ARMCC::AL;
- }
-
- PredReg = MI->getOperand(PIdx+1).getReg();
- return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
-}
-
-
-int llvm::getMatchingCondBranchOpcode(int Opc) {
- if (Opc == ARM::B)
- return ARM::Bcc;
- else if (Opc == ARM::tB)
- return ARM::tBcc;
- else if (Opc == ARM::t2B)
- return ARM::t2Bcc;
-
- llvm_unreachable("Unknown unconditional branch opcode!");
- return 0;
-}
-
-
-void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned BaseReg, int NumBytes,
- ARMCC::CondCodes Pred, unsigned PredReg,
- const ARMBaseInstrInfo &TII) {
- bool isSub = NumBytes < 0;
- if (isSub) NumBytes = -NumBytes;
-
- while (NumBytes) {
- unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
- unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
- assert(ThisVal && "Didn't extract field correctly");
-
- // We will handle these bits from offset, clear them.
- NumBytes &= ~ThisVal;
-
- assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
-
- // Build the new ADD / SUB.
- unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
- BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
- .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
- .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
- BaseReg = DestReg;
- }
-}
-
-bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
- unsigned FrameReg, int &Offset,
- const ARMBaseInstrInfo &TII) {
- unsigned Opcode = MI.getOpcode();
- const TargetInstrDesc &Desc = MI.getDesc();
- unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
- bool isSub = false;
-
- // Memory operands in inline assembly always use AddrMode2.
- if (Opcode == ARM::INLINEASM)
- AddrMode = ARMII::AddrMode2;
-
- if (Opcode == ARM::ADDri) {
- Offset += MI.getOperand(FrameRegIdx+1).getImm();
- if (Offset == 0) {
- // Turn it into a move.
- MI.setDesc(TII.get(ARM::MOVr));
- MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
- MI.RemoveOperand(FrameRegIdx+1);
- Offset = 0;
- return true;
- } else if (Offset < 0) {
- Offset = -Offset;
- isSub = true;
- MI.setDesc(TII.get(ARM::SUBri));
- }
-
- // Common case: small offset, fits into instruction.
- if (ARM_AM::getSOImmVal(Offset) != -1) {
- // Replace the FrameIndex with sp / fp
- MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
- MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
- Offset = 0;
- return true;
- }
-
- // Otherwise, pull as much of the immedidate into this ADDri/SUBri
- // as possible.
- unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
- unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
-
- // We will handle these bits from offset, clear them.
- Offset &= ~ThisImmVal;
-
- // Get the properly encoded SOImmVal field.
- assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
- "Bit extraction didn't work?");
- MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
- } else {
- unsigned ImmIdx = 0;
- int InstrOffs = 0;
- unsigned NumBits = 0;
- unsigned Scale = 1;
- switch (AddrMode) {
- case ARMII::AddrMode2: {
- ImmIdx = FrameRegIdx+2;
- InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
- if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
- InstrOffs *= -1;
- NumBits = 12;
- break;
- }
- case ARMII::AddrMode3: {
- ImmIdx = FrameRegIdx+2;
- InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
- if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
- InstrOffs *= -1;
- NumBits = 8;
- break;
- }
- case ARMII::AddrMode4:
- case ARMII::AddrMode6:
- // Can't fold any offset even if it's zero.
- return false;
- case ARMII::AddrMode5: {
- ImmIdx = FrameRegIdx+1;
- InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
- if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
- InstrOffs *= -1;
- NumBits = 8;
- Scale = 4;
- break;
- }
- default:
- llvm_unreachable("Unsupported addressing mode!");
- break;
- }
-
- Offset += InstrOffs * Scale;
- assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
- if (Offset < 0) {
- Offset = -Offset;
- isSub = true;
- }
-
- // Attempt to fold address comp. if opcode has offset bits
- if (NumBits > 0) {
- // Common case: small offset, fits into instruction.
- MachineOperand &ImmOp = MI.getOperand(ImmIdx);
- int ImmedOffset = Offset / Scale;
- unsigned Mask = (1 << NumBits) - 1;
- if ((unsigned)Offset <= Mask * Scale) {
- // Replace the FrameIndex with sp
- MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
- if (isSub)
- ImmedOffset |= 1 << NumBits;
- ImmOp.ChangeToImmediate(ImmedOffset);
- Offset = 0;
- return true;
- }
-
- // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
- ImmedOffset = ImmedOffset & Mask;
- if (isSub)
- ImmedOffset |= 1 << NumBits;
- ImmOp.ChangeToImmediate(ImmedOffset);
- Offset &= ~(Mask*Scale);
- }
- }
-
- Offset = (isSub) ? -Offset : Offset;
- return Offset == 0;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/libclamav/c++/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
deleted file mode 100644
index 0194231..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ /dev/null
@@ -1,375 +0,0 @@
-//===- ARMBaseInstrInfo.h - ARM Base Instruction Information ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Base ARM implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMBASEINSTRUCTIONINFO_H
-#define ARMBASEINSTRUCTIONINFO_H
-
-#include "ARM.h"
-#include "ARMRegisterInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-namespace llvm {
-
-/// ARMII - This namespace holds all of the target specific flags that
-/// instruction info tracks.
-///
-namespace ARMII {
- enum {
- //===------------------------------------------------------------------===//
- // Instruction Flags.
-
- //===------------------------------------------------------------------===//
- // This four-bit field describes the addressing mode used.
-
- AddrModeMask = 0xf,
- AddrModeNone = 0,
- AddrMode1 = 1,
- AddrMode2 = 2,
- AddrMode3 = 3,
- AddrMode4 = 4,
- AddrMode5 = 5,
- AddrMode6 = 6,
- AddrModeT1_1 = 7,
- AddrModeT1_2 = 8,
- AddrModeT1_4 = 9,
- AddrModeT1_s = 10, // i8 * 4 for pc and sp relative data
- AddrModeT2_i12 = 11,
- AddrModeT2_i8 = 12,
- AddrModeT2_so = 13,
- AddrModeT2_pc = 14, // +/- i12 for pc relative data
- AddrModeT2_i8s4 = 15, // i8 * 4
-
- // Size* - Flags to keep track of the size of an instruction.
- SizeShift = 4,
- SizeMask = 7 << SizeShift,
- SizeSpecial = 1, // 0 byte pseudo or special case.
- Size8Bytes = 2,
- Size4Bytes = 3,
- Size2Bytes = 4,
-
- // IndexMode - Unindex, pre-indexed, or post-indexed. Only valid for load
- // and store ops
- IndexModeShift = 7,
- IndexModeMask = 3 << IndexModeShift,
- IndexModePre = 1,
- IndexModePost = 2,
-
- //===------------------------------------------------------------------===//
- // Instruction encoding formats.
- //
- FormShift = 9,
- FormMask = 0x3f << FormShift,
-
- // Pseudo instructions
- Pseudo = 0 << FormShift,
-
- // Multiply instructions
- MulFrm = 1 << FormShift,
-
- // Branch instructions
- BrFrm = 2 << FormShift,
- BrMiscFrm = 3 << FormShift,
-
- // Data Processing instructions
- DPFrm = 4 << FormShift,
- DPSoRegFrm = 5 << FormShift,
-
- // Load and Store
- LdFrm = 6 << FormShift,
- StFrm = 7 << FormShift,
- LdMiscFrm = 8 << FormShift,
- StMiscFrm = 9 << FormShift,
- LdStMulFrm = 10 << FormShift,
-
- LdStExFrm = 28 << FormShift,
-
- // Miscellaneous arithmetic instructions
- ArithMiscFrm = 11 << FormShift,
-
- // Extend instructions
- ExtFrm = 12 << FormShift,
-
- // VFP formats
- VFPUnaryFrm = 13 << FormShift,
- VFPBinaryFrm = 14 << FormShift,
- VFPConv1Frm = 15 << FormShift,
- VFPConv2Frm = 16 << FormShift,
- VFPConv3Frm = 17 << FormShift,
- VFPConv4Frm = 18 << FormShift,
- VFPConv5Frm = 19 << FormShift,
- VFPLdStFrm = 20 << FormShift,
- VFPLdStMulFrm = 21 << FormShift,
- VFPMiscFrm = 22 << FormShift,
-
- // Thumb format
- ThumbFrm = 23 << FormShift,
-
- // NEON format
- NEONFrm = 24 << FormShift,
- NEONGetLnFrm = 25 << FormShift,
- NEONSetLnFrm = 26 << FormShift,
- NEONDupFrm = 27 << FormShift,
-
- //===------------------------------------------------------------------===//
- // Misc flags.
-
- // UnaryDP - Indicates this is a unary data processing instruction, i.e.
- // it doesn't have a Rn operand.
- UnaryDP = 1 << 15,
-
- // Xform16Bit - Indicates this Thumb2 instruction may be transformed into
- // a 16-bit Thumb instruction if certain conditions are met.
- Xform16Bit = 1 << 16,
-
- //===------------------------------------------------------------------===//
- // Code domain.
- DomainShift = 17,
- DomainMask = 3 << DomainShift,
- DomainGeneral = 0 << DomainShift,
- DomainVFP = 1 << DomainShift,
- DomainNEON = 2 << DomainShift,
-
- //===------------------------------------------------------------------===//
- // Field shifts - such shifts are used to set field while generating
- // machine instructions.
- M_BitShift = 5,
- ShiftImmShift = 5,
- ShiftShift = 7,
- N_BitShift = 7,
- ImmHiShift = 8,
- SoRotImmShift = 8,
- RegRsShift = 8,
- ExtRotImmShift = 10,
- RegRdLoShift = 12,
- RegRdShift = 12,
- RegRdHiShift = 16,
- RegRnShift = 16,
- S_BitShift = 20,
- W_BitShift = 21,
- AM3_I_BitShift = 22,
- D_BitShift = 22,
- U_BitShift = 23,
- P_BitShift = 24,
- I_BitShift = 25,
- CondShift = 28
- };
-
- /// Target Operand Flag enum.
- enum TOF {
- //===------------------------------------------------------------------===//
- // ARM Specific MachineOperand flags.
-
- MO_NO_FLAG,
-
- /// MO_LO16 - On a symbol operand, this represents a relocation containing
- /// lower 16 bit of the address. Used only via movw instruction.
- MO_LO16,
-
- /// MO_HI16 - On a symbol operand, this represents a relocation containing
- /// higher 16 bit of the address. Used only via movt instruction.
- MO_HI16
- };
-}
-
-class ARMBaseInstrInfo : public TargetInstrInfoImpl {
- const ARMSubtarget& Subtarget;
-protected:
- // Can be only subclassed.
- explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
-public:
- // Return the non-pre/post incrementing version of 'Opc'. Return 0
- // if there is not such an opcode.
- virtual unsigned getUnindexedOpcode(unsigned Opc) const =0;
-
- virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const;
-
- virtual const ARMBaseRegisterInfo &getRegisterInfo() const =0;
- const ARMSubtarget &getSubtarget() const { return Subtarget; }
-
- // Branch analysis.
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
-
- virtual
- bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
-
- // Predication support.
- bool isPredicated(const MachineInstr *MI) const {
- int PIdx = MI->findFirstPredOperandIdx();
- return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
- }
-
- ARMCC::CondCodes getPredicate(const MachineInstr *MI) const {
- int PIdx = MI->findFirstPredOperandIdx();
- return PIdx != -1 ? (ARMCC::CondCodes)MI->getOperand(PIdx).getImm()
- : ARMCC::AL;
- }
-
- virtual
- bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const;
-
- virtual
- bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const;
-
- virtual bool DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const;
-
- virtual bool isPredicable(MachineInstr *MI) const;
-
- /// GetInstSize - Returns the size of the specified MachineInstr.
- ///
- virtual unsigned GetInstSizeInBytes(const MachineInstr* MI) const;
-
- /// Return true if the instruction is a register to register move and return
- /// the source and dest operands and their sub-register indices by reference.
- virtual bool isMoveInstr(const MachineInstr &MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
-
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const;
-
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const;
-
- virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC) const;
-
- virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC) const;
-
- virtual bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const;
-
- virtual void reMaterialize(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const;
-
- MachineInstr *duplicate(MachineInstr *Orig, MachineFunction &MF) const;
-
- virtual bool produceSameValue(const MachineInstr *MI0,
- const MachineInstr *MI1) const;
-};
-
-static inline
-const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) {
- return MIB.addImm((int64_t)ARMCC::AL).addReg(0);
-}
-
-static inline
-const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
- return MIB.addReg(0);
-}
-
-static inline
-const MachineInstrBuilder &AddDefaultT1CC(const MachineInstrBuilder &MIB,
- bool isDead = false) {
- return MIB.addReg(ARM::CPSR, getDefRegState(true) | getDeadRegState(isDead));
-}
-
-static inline
-const MachineInstrBuilder &AddNoT1CC(const MachineInstrBuilder &MIB) {
- return MIB.addReg(0);
-}
-
-static inline
-bool isUncondBranchOpcode(int Opc) {
- return Opc == ARM::B || Opc == ARM::tB || Opc == ARM::t2B;
-}
-
-static inline
-bool isCondBranchOpcode(int Opc) {
- return Opc == ARM::Bcc || Opc == ARM::tBcc || Opc == ARM::t2Bcc;
-}
-
-static inline
-bool isJumpTableBranchOpcode(int Opc) {
- return Opc == ARM::BR_JTr || Opc == ARM::BR_JTm || Opc == ARM::BR_JTadd ||
- Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT;
-}
-
-static inline
-bool isIndirectBranchOpcode(int Opc) {
- return Opc == ARM::BRIND || Opc == ARM::MOVPCRX || Opc == ARM::tBRIND;
-}
-
-/// getInstrPredicate - If instruction is predicated, returns its predicate
-/// condition, otherwise returns AL. It also returns the condition code
-/// register by reference.
-ARMCC::CondCodes getInstrPredicate(const MachineInstr *MI, unsigned &PredReg);
-
-int getMatchingCondBranchOpcode(int Opc);
-
-/// emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of
-/// instructions to materializea destreg = basereg + immediate in ARM / Thumb2
-/// code.
-void emitARMRegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned BaseReg, int NumBytes,
- ARMCC::CondCodes Pred, unsigned PredReg,
- const ARMBaseInstrInfo &TII);
-
-void emitT2RegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned BaseReg, int NumBytes,
- ARMCC::CondCodes Pred, unsigned PredReg,
- const ARMBaseInstrInfo &TII);
-
-
-/// rewriteARMFrameIndex / rewriteT2FrameIndex -
-/// Rewrite MI to access 'Offset' bytes from the FP. Return false if the
-/// offset could not be handled directly in MI, and return the left-over
-/// portion by reference.
-bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
- unsigned FrameReg, int &Offset,
- const ARMBaseInstrInfo &TII);
-
-bool rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
- unsigned FrameReg, int &Offset,
- const ARMBaseInstrInfo &TII);
-
-} // End llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
deleted file mode 100644
index 2bd1be5..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ /dev/null
@@ -1,1505 +0,0 @@
-//===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the base ARM implementation of TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMBaseInstrInfo.h"
-#include "ARMBaseRegisterInfo.h"
-#include "ARMInstrInfo.h"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMSubtarget.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/CommandLine.h"
-using namespace llvm;
-
-static cl::opt<bool>
-ReuseFrameIndexVals("arm-reuse-frame-index-vals", cl::Hidden, cl::init(true),
- cl::desc("Reuse repeated frame index values"));
-
-unsigned ARMBaseRegisterInfo::getRegisterNumbering(unsigned RegEnum,
- bool *isSPVFP) {
- if (isSPVFP)
- *isSPVFP = false;
-
- using namespace ARM;
- switch (RegEnum) {
- default:
- llvm_unreachable("Unknown ARM register!");
- case R0: case D0: case Q0: return 0;
- case R1: case D1: case Q1: return 1;
- case R2: case D2: case Q2: return 2;
- case R3: case D3: case Q3: return 3;
- case R4: case D4: case Q4: return 4;
- case R5: case D5: case Q5: return 5;
- case R6: case D6: case Q6: return 6;
- case R7: case D7: case Q7: return 7;
- case R8: case D8: case Q8: return 8;
- case R9: case D9: case Q9: return 9;
- case R10: case D10: case Q10: return 10;
- case R11: case D11: case Q11: return 11;
- case R12: case D12: case Q12: return 12;
- case SP: case D13: case Q13: return 13;
- case LR: case D14: case Q14: return 14;
- case PC: case D15: case Q15: return 15;
-
- case D16: return 16;
- case D17: return 17;
- case D18: return 18;
- case D19: return 19;
- case D20: return 20;
- case D21: return 21;
- case D22: return 22;
- case D23: return 23;
- case D24: return 24;
- case D25: return 25;
- case D26: return 26;
- case D27: return 27;
- case D28: return 28;
- case D29: return 29;
- case D30: return 30;
- case D31: return 31;
-
- case S0: case S1: case S2: case S3:
- case S4: case S5: case S6: case S7:
- case S8: case S9: case S10: case S11:
- case S12: case S13: case S14: case S15:
- case S16: case S17: case S18: case S19:
- case S20: case S21: case S22: case S23:
- case S24: case S25: case S26: case S27:
- case S28: case S29: case S30: case S31: {
- if (isSPVFP)
- *isSPVFP = true;
- switch (RegEnum) {
- default: return 0; // Avoid compile time warning.
- case S0: return 0;
- case S1: return 1;
- case S2: return 2;
- case S3: return 3;
- case S4: return 4;
- case S5: return 5;
- case S6: return 6;
- case S7: return 7;
- case S8: return 8;
- case S9: return 9;
- case S10: return 10;
- case S11: return 11;
- case S12: return 12;
- case S13: return 13;
- case S14: return 14;
- case S15: return 15;
- case S16: return 16;
- case S17: return 17;
- case S18: return 18;
- case S19: return 19;
- case S20: return 20;
- case S21: return 21;
- case S22: return 22;
- case S23: return 23;
- case S24: return 24;
- case S25: return 25;
- case S26: return 26;
- case S27: return 27;
- case S28: return 28;
- case S29: return 29;
- case S30: return 30;
- case S31: return 31;
- }
- }
- }
-}
-
-ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &sti)
- : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
- TII(tii), STI(sti),
- FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11) {
-}
-
-const unsigned*
-ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- static const unsigned CalleeSavedRegs[] = {
- ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
- ARM::R7, ARM::R6, ARM::R5, ARM::R4,
-
- ARM::D15, ARM::D14, ARM::D13, ARM::D12,
- ARM::D11, ARM::D10, ARM::D9, ARM::D8,
- 0
- };
-
- static const unsigned DarwinCalleeSavedRegs[] = {
- // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
- // register.
- ARM::LR, ARM::R7, ARM::R6, ARM::R5, ARM::R4,
- ARM::R11, ARM::R10, ARM::R8,
-
- ARM::D15, ARM::D14, ARM::D13, ARM::D12,
- ARM::D11, ARM::D10, ARM::D9, ARM::D8,
- 0
- };
- return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
-}
-
-const TargetRegisterClass* const *
-ARMBaseRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
-
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- 0
- };
-
- static const TargetRegisterClass * const ThumbCalleeSavedRegClasses[] = {
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::tGPRRegClass,
- &ARM::tGPRRegClass,&ARM::tGPRRegClass,&ARM::tGPRRegClass,
-
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- 0
- };
-
- static const TargetRegisterClass * const DarwinCalleeSavedRegClasses[] = {
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass,
-
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- 0
- };
-
- static const TargetRegisterClass * const DarwinThumbCalleeSavedRegClasses[] ={
- &ARM::GPRRegClass, &ARM::tGPRRegClass, &ARM::tGPRRegClass,
- &ARM::tGPRRegClass, &ARM::tGPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass,
-
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- 0
- };
-
- if (STI.isThumb1Only()) {
- return STI.isTargetDarwin()
- ? DarwinThumbCalleeSavedRegClasses : ThumbCalleeSavedRegClasses;
- }
- return STI.isTargetDarwin()
- ? DarwinCalleeSavedRegClasses : CalleeSavedRegClasses;
-}
-
-BitVector ARMBaseRegisterInfo::
-getReservedRegs(const MachineFunction &MF) const {
- // FIXME: avoid re-calculating this everytime.
- BitVector Reserved(getNumRegs());
- Reserved.set(ARM::SP);
- Reserved.set(ARM::PC);
- if (STI.isTargetDarwin() || hasFP(MF))
- Reserved.set(FramePtr);
- // Some targets reserve R9.
- if (STI.isR9Reserved())
- Reserved.set(ARM::R9);
- return Reserved;
-}
-
-bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
- unsigned Reg) const {
- switch (Reg) {
- default: break;
- case ARM::SP:
- case ARM::PC:
- return true;
- case ARM::R7:
- case ARM::R11:
- if (FramePtr == Reg && (STI.isTargetDarwin() || hasFP(MF)))
- return true;
- break;
- case ARM::R9:
- return STI.isR9Reserved();
- }
-
- return false;
-}
-
-const TargetRegisterClass *
-ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B,
- unsigned SubIdx) const {
- switch (SubIdx) {
- default: return 0;
- case 1:
- case 2:
- case 3:
- case 4:
- // S sub-registers.
- if (A->getSize() == 8) {
- if (B == &ARM::SPR_8RegClass)
- return &ARM::DPR_8RegClass;
- assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
- if (A == &ARM::DPR_8RegClass)
- return A;
- return &ARM::DPR_VFP2RegClass;
- }
-
- assert(A->getSize() == 16 && "Expecting a Q register class!");
- if (B == &ARM::SPR_8RegClass)
- return &ARM::QPR_8RegClass;
- return &ARM::QPR_VFP2RegClass;
- case 5:
- case 6:
- // D sub-registers.
- if (B == &ARM::DPR_VFP2RegClass)
- return &ARM::QPR_VFP2RegClass;
- if (B == &ARM::DPR_8RegClass)
- return &ARM::QPR_8RegClass;
- return A;
- }
- return 0;
-}
-
-const TargetRegisterClass *
-ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
- return ARM::GPRRegisterClass;
-}
-
-/// getAllocationOrder - Returns the register allocation order for a specified
-/// register class in the form of a pair of TargetRegisterClass iterators.
-std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
-ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
- unsigned HintType, unsigned HintReg,
- const MachineFunction &MF) const {
- // Alternative register allocation orders when favoring even / odd registers
- // of register pairs.
-
- // No FP, R9 is available.
- static const unsigned GPREven1[] = {
- ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
- ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
- ARM::R9, ARM::R11
- };
- static const unsigned GPROdd1[] = {
- ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
- ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
- ARM::R8, ARM::R10
- };
-
- // FP is R7, R9 is available.
- static const unsigned GPREven2[] = {
- ARM::R0, ARM::R2, ARM::R4, ARM::R8, ARM::R10,
- ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
- ARM::R9, ARM::R11
- };
- static const unsigned GPROdd2[] = {
- ARM::R1, ARM::R3, ARM::R5, ARM::R9, ARM::R11,
- ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
- ARM::R8, ARM::R10
- };
-
- // FP is R11, R9 is available.
- static const unsigned GPREven3[] = {
- ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
- ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
- ARM::R9
- };
- static const unsigned GPROdd3[] = {
- ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
- ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
- ARM::R8
- };
-
- // No FP, R9 is not available.
- static const unsigned GPREven4[] = {
- ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R10,
- ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
- ARM::R11
- };
- static const unsigned GPROdd4[] = {
- ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R11,
- ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
- ARM::R10
- };
-
- // FP is R7, R9 is not available.
- static const unsigned GPREven5[] = {
- ARM::R0, ARM::R2, ARM::R4, ARM::R10,
- ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
- ARM::R11
- };
- static const unsigned GPROdd5[] = {
- ARM::R1, ARM::R3, ARM::R5, ARM::R11,
- ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
- ARM::R10
- };
-
- // FP is R11, R9 is not available.
- static const unsigned GPREven6[] = {
- ARM::R0, ARM::R2, ARM::R4, ARM::R6,
- ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
- };
- static const unsigned GPROdd6[] = {
- ARM::R1, ARM::R3, ARM::R5, ARM::R7,
- ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
- };
-
-
- if (HintType == ARMRI::RegPairEven) {
- if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
- // It's no longer possible to fulfill this hint. Return the default
- // allocation order.
- return std::make_pair(RC->allocation_order_begin(MF),
- RC->allocation_order_end(MF));
-
- if (!STI.isTargetDarwin() && !hasFP(MF)) {
- if (!STI.isR9Reserved())
- return std::make_pair(GPREven1,
- GPREven1 + (sizeof(GPREven1)/sizeof(unsigned)));
- else
- return std::make_pair(GPREven4,
- GPREven4 + (sizeof(GPREven4)/sizeof(unsigned)));
- } else if (FramePtr == ARM::R7) {
- if (!STI.isR9Reserved())
- return std::make_pair(GPREven2,
- GPREven2 + (sizeof(GPREven2)/sizeof(unsigned)));
- else
- return std::make_pair(GPREven5,
- GPREven5 + (sizeof(GPREven5)/sizeof(unsigned)));
- } else { // FramePtr == ARM::R11
- if (!STI.isR9Reserved())
- return std::make_pair(GPREven3,
- GPREven3 + (sizeof(GPREven3)/sizeof(unsigned)));
- else
- return std::make_pair(GPREven6,
- GPREven6 + (sizeof(GPREven6)/sizeof(unsigned)));
- }
- } else if (HintType == ARMRI::RegPairOdd) {
- if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
- // It's no longer possible to fulfill this hint. Return the default
- // allocation order.
- return std::make_pair(RC->allocation_order_begin(MF),
- RC->allocation_order_end(MF));
-
- if (!STI.isTargetDarwin() && !hasFP(MF)) {
- if (!STI.isR9Reserved())
- return std::make_pair(GPROdd1,
- GPROdd1 + (sizeof(GPROdd1)/sizeof(unsigned)));
- else
- return std::make_pair(GPROdd4,
- GPROdd4 + (sizeof(GPROdd4)/sizeof(unsigned)));
- } else if (FramePtr == ARM::R7) {
- if (!STI.isR9Reserved())
- return std::make_pair(GPROdd2,
- GPROdd2 + (sizeof(GPROdd2)/sizeof(unsigned)));
- else
- return std::make_pair(GPROdd5,
- GPROdd5 + (sizeof(GPROdd5)/sizeof(unsigned)));
- } else { // FramePtr == ARM::R11
- if (!STI.isR9Reserved())
- return std::make_pair(GPROdd3,
- GPROdd3 + (sizeof(GPROdd3)/sizeof(unsigned)));
- else
- return std::make_pair(GPROdd6,
- GPROdd6 + (sizeof(GPROdd6)/sizeof(unsigned)));
- }
- }
- return std::make_pair(RC->allocation_order_begin(MF),
- RC->allocation_order_end(MF));
-}
-
-/// ResolveRegAllocHint - Resolves the specified register allocation hint
-/// to a physical register. Returns the physical register if it is successful.
-unsigned
-ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
- const MachineFunction &MF) const {
- if (Reg == 0 || !isPhysicalRegister(Reg))
- return 0;
- if (Type == 0)
- return Reg;
- else if (Type == (unsigned)ARMRI::RegPairOdd)
- // Odd register.
- return getRegisterPairOdd(Reg, MF);
- else if (Type == (unsigned)ARMRI::RegPairEven)
- // Even register.
- return getRegisterPairEven(Reg, MF);
- return 0;
-}
-
-void
-ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
- MachineFunction &MF) const {
- MachineRegisterInfo *MRI = &MF.getRegInfo();
- std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
- if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
- Hint.first == (unsigned)ARMRI::RegPairEven) &&
- Hint.second && TargetRegisterInfo::isVirtualRegister(Hint.second)) {
- // If 'Reg' is one of the even / odd register pair and it's now changed
- // (e.g. coalesced) into a different register. The other register of the
- // pair allocation hint must be updated to reflect the relationship
- // change.
- unsigned OtherReg = Hint.second;
- Hint = MRI->getRegAllocationHint(OtherReg);
- if (Hint.second == Reg)
- // Make sure the pair has not already divorced.
- MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
- }
-}
-
-/// hasFP - Return true if the specified function should have a dedicated frame
-/// pointer register. This is true if the function has variable sized allocas
-/// or if frame pointer elimination is disabled.
-///
-bool ARMBaseRegisterInfo::hasFP(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- return ((NoFramePointerElim && MFI->hasCalls())||
- needsStackRealignment(MF) ||
- MFI->hasVarSizedObjects() ||
- MFI->isFrameAddressTaken());
-}
-
-bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- return (RealignStack &&
- !AFI->isThumb1OnlyFunction() &&
- !MFI->hasVarSizedObjects());
-}
-
-bool ARMBaseRegisterInfo::
-needsStackRealignment(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
- return (RealignStack &&
- !AFI->isThumb1OnlyFunction() &&
- (MFI->getMaxAlignment() > StackAlign) &&
- !MFI->hasVarSizedObjects());
-}
-
-bool ARMBaseRegisterInfo::
-cannotEliminateFrame(const MachineFunction &MF) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- if (NoFramePointerElim && MFI->hasCalls())
- return true;
- return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
- || needsStackRealignment(MF);
-}
-
-/// estimateStackSize - Estimate and return the size of the frame.
-static unsigned estimateStackSize(MachineFunction &MF) {
- const MachineFrameInfo *FFI = MF.getFrameInfo();
- int Offset = 0;
- for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
- int FixedOff = -FFI->getObjectOffset(i);
- if (FixedOff > Offset) Offset = FixedOff;
- }
- for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
- if (FFI->isDeadObjectIndex(i))
- continue;
- Offset += FFI->getObjectSize(i);
- unsigned Align = FFI->getObjectAlignment(i);
- // Adjust to alignment boundary
- Offset = (Offset+Align-1)/Align*Align;
- }
- return (unsigned)Offset;
-}
-
-/// estimateRSStackSizeLimit - Look at each instruction that references stack
-/// frames and return the stack size limit beyond which some of these
-/// instructions will require a scratch register during their expansion later.
-unsigned
-ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
- unsigned Limit = (1 << 12) - 1;
- for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
- for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- if (!I->getOperand(i).isFI()) continue;
-
- const TargetInstrDesc &Desc = TII.get(I->getOpcode());
- unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
- if (AddrMode == ARMII::AddrMode3 ||
- AddrMode == ARMII::AddrModeT2_i8)
- return (1 << 8) - 1;
-
- if (AddrMode == ARMII::AddrMode5 ||
- AddrMode == ARMII::AddrModeT2_i8s4)
- Limit = std::min(Limit, ((1U << 8) - 1) * 4);
-
- if (AddrMode == ARMII::AddrModeT2_i12 && hasFP(MF))
- // When the stack offset is negative, we will end up using
- // the i8 instructions instead.
- return (1 << 8) - 1;
-
- if (AddrMode == ARMII::AddrMode6)
- return 0;
- break; // At most one FI per instruction
- }
- }
- }
-
- return Limit;
-}
-
-void
-ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const {
- // This tells PEI to spill the FP as if it is any other callee-save register
- // to take advantage the eliminateFrameIndex machinery. This also ensures it
- // is spilled in the order specified by getCalleeSavedRegs() to make it easier
- // to combine multiple loads / stores.
- bool CanEliminateFrame = true;
- bool CS1Spilled = false;
- bool LRSpilled = false;
- unsigned NumGPRSpills = 0;
- SmallVector<unsigned, 4> UnspilledCS1GPRs;
- SmallVector<unsigned, 4> UnspilledCS2GPRs;
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
-
- // Spill R4 if Thumb2 function requires stack realignment - it will be used as
- // scratch register.
- // FIXME: It will be better just to find spare register here.
- if (needsStackRealignment(MF) &&
- AFI->isThumb2Function())
- MF.getRegInfo().setPhysRegUsed(ARM::R4);
-
- // Don't spill FP if the frame can be eliminated. This is determined
- // by scanning the callee-save registers to see if any is used.
- const unsigned *CSRegs = getCalleeSavedRegs();
- const TargetRegisterClass* const *CSRegClasses = getCalleeSavedRegClasses();
- for (unsigned i = 0; CSRegs[i]; ++i) {
- unsigned Reg = CSRegs[i];
- bool Spilled = false;
- if (MF.getRegInfo().isPhysRegUsed(Reg)) {
- AFI->setCSRegisterIsSpilled(Reg);
- Spilled = true;
- CanEliminateFrame = false;
- } else {
- // Check alias registers too.
- for (const unsigned *Aliases = getAliasSet(Reg); *Aliases; ++Aliases) {
- if (MF.getRegInfo().isPhysRegUsed(*Aliases)) {
- Spilled = true;
- CanEliminateFrame = false;
- }
- }
- }
-
- if (CSRegClasses[i] == ARM::GPRRegisterClass ||
- CSRegClasses[i] == ARM::tGPRRegisterClass) {
- if (Spilled) {
- NumGPRSpills++;
-
- if (!STI.isTargetDarwin()) {
- if (Reg == ARM::LR)
- LRSpilled = true;
- CS1Spilled = true;
- continue;
- }
-
- // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
- switch (Reg) {
- case ARM::LR:
- LRSpilled = true;
- // Fallthrough
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- CS1Spilled = true;
- break;
- default:
- break;
- }
- } else {
- if (!STI.isTargetDarwin()) {
- UnspilledCS1GPRs.push_back(Reg);
- continue;
- }
-
- switch (Reg) {
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- case ARM::LR:
- UnspilledCS1GPRs.push_back(Reg);
- break;
- default:
- UnspilledCS2GPRs.push_back(Reg);
- break;
- }
- }
- }
- }
-
- bool ForceLRSpill = false;
- if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
- unsigned FnSize = TII.GetFunctionSizeInBytes(MF);
- // Force LR to be spilled if the Thumb function size is > 2048. This enables
- // use of BL to implement far jump. If it turns out that it's not needed
- // then the branch fix up path will undo it.
- if (FnSize >= (1 << 11)) {
- CanEliminateFrame = false;
- ForceLRSpill = true;
- }
- }
-
- // If any of the stack slot references may be out of range of an immediate
- // offset, make sure a register (or a spill slot) is available for the
- // register scavenger. Note that if we're indexing off the frame pointer, the
- // effective stack size is 4 bytes larger since the FP points to the stack
- // slot of the previous FP.
- bool BigStack = RS &&
- estimateStackSize(MF) + (hasFP(MF) ? 4 : 0) >= estimateRSStackSizeLimit(MF);
-
- bool ExtraCSSpill = false;
- if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) {
- AFI->setHasStackFrame(true);
-
- // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
- // Spill LR as well so we can fold BX_RET to the registers restore (LDM).
- if (!LRSpilled && CS1Spilled) {
- MF.getRegInfo().setPhysRegUsed(ARM::LR);
- AFI->setCSRegisterIsSpilled(ARM::LR);
- NumGPRSpills++;
- UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
- UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
- ForceLRSpill = false;
- ExtraCSSpill = true;
- }
-
- // Darwin ABI requires FP to point to the stack slot that contains the
- // previous FP.
- if (STI.isTargetDarwin() || hasFP(MF)) {
- MF.getRegInfo().setPhysRegUsed(FramePtr);
- NumGPRSpills++;
- }
-
- // If stack and double are 8-byte aligned and we are spilling an odd number
- // of GPRs. Spill one extra callee save GPR so we won't have to pad between
- // the integer and double callee save areas.
- unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
- if (TargetAlign == 8 && (NumGPRSpills & 1)) {
- if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
- for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
- unsigned Reg = UnspilledCS1GPRs[i];
- // Don't spill high register if the function is thumb1
- if (!AFI->isThumb1OnlyFunction() ||
- isARMLowRegister(Reg) || Reg == ARM::LR) {
- MF.getRegInfo().setPhysRegUsed(Reg);
- AFI->setCSRegisterIsSpilled(Reg);
- if (!isReservedReg(MF, Reg))
- ExtraCSSpill = true;
- break;
- }
- }
- } else if (!UnspilledCS2GPRs.empty() &&
- !AFI->isThumb1OnlyFunction()) {
- unsigned Reg = UnspilledCS2GPRs.front();
- MF.getRegInfo().setPhysRegUsed(Reg);
- AFI->setCSRegisterIsSpilled(Reg);
- if (!isReservedReg(MF, Reg))
- ExtraCSSpill = true;
- }
- }
-
- // Estimate if we might need to scavenge a register at some point in order
- // to materialize a stack offset. If so, either spill one additional
- // callee-saved register or reserve a special spill slot to facilitate
- // register scavenging. Thumb1 needs a spill slot for stack pointer
- // adjustments also, even when the frame itself is small.
- if (BigStack && !ExtraCSSpill) {
- // If any non-reserved CS register isn't spilled, just spill one or two
- // extra. That should take care of it!
- unsigned NumExtras = TargetAlign / 4;
- SmallVector<unsigned, 2> Extras;
- while (NumExtras && !UnspilledCS1GPRs.empty()) {
- unsigned Reg = UnspilledCS1GPRs.back();
- UnspilledCS1GPRs.pop_back();
- if (!isReservedReg(MF, Reg)) {
- Extras.push_back(Reg);
- NumExtras--;
- }
- }
- // For non-Thumb1 functions, also check for hi-reg CS registers
- if (!AFI->isThumb1OnlyFunction()) {
- while (NumExtras && !UnspilledCS2GPRs.empty()) {
- unsigned Reg = UnspilledCS2GPRs.back();
- UnspilledCS2GPRs.pop_back();
- if (!isReservedReg(MF, Reg)) {
- Extras.push_back(Reg);
- NumExtras--;
- }
- }
- }
- if (Extras.size() && NumExtras == 0) {
- for (unsigned i = 0, e = Extras.size(); i != e; ++i) {
- MF.getRegInfo().setPhysRegUsed(Extras[i]);
- AFI->setCSRegisterIsSpilled(Extras[i]);
- }
- } else if (!AFI->isThumb1OnlyFunction()) {
- // note: Thumb1 functions spill to R12, not the stack. Reserve a slot
- // closest to SP or frame pointer.
- const TargetRegisterClass *RC = ARM::GPRRegisterClass;
- MachineFrameInfo *MFI = MF.getFrameInfo();
- RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
- }
- }
- }
-
- if (ForceLRSpill) {
- MF.getRegInfo().setPhysRegUsed(ARM::LR);
- AFI->setCSRegisterIsSpilled(ARM::LR);
- AFI->setLRIsSpilledForFarJump(true);
- }
-}
-
-unsigned ARMBaseRegisterInfo::getRARegister() const {
- return ARM::LR;
-}
-
-unsigned
-ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
- if (STI.isTargetDarwin() || hasFP(MF))
- return FramePtr;
- return ARM::SP;
-}
-
-int
-ARMBaseRegisterInfo::getFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const {
- const MachineFrameInfo *MFI = MF.getFrameInfo();
- const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
- bool isFixed = MFI->isFixedObjectIndex(FI);
-
- FrameReg = ARM::SP;
- if (AFI->isGPRCalleeSavedArea1Frame(FI))
- Offset -= AFI->getGPRCalleeSavedArea1Offset();
- else if (AFI->isGPRCalleeSavedArea2Frame(FI))
- Offset -= AFI->getGPRCalleeSavedArea2Offset();
- else if (AFI->isDPRCalleeSavedAreaFrame(FI))
- Offset -= AFI->getDPRCalleeSavedAreaOffset();
- else if (needsStackRealignment(MF)) {
- // When dynamically realigning the stack, use the frame pointer for
- // parameters, and the stack pointer for locals.
- assert (hasFP(MF) && "dynamic stack realignment without a FP!");
- if (isFixed) {
- FrameReg = getFrameRegister(MF);
- Offset -= AFI->getFramePtrSpillOffset();
- }
- } else if (hasFP(MF) && AFI->hasStackFrame()) {
- if (isFixed || MFI->hasVarSizedObjects()) {
- // Use frame pointer to reference fixed objects unless this is a
- // frameless function.
- FrameReg = getFrameRegister(MF);
- Offset -= AFI->getFramePtrSpillOffset();
- } else if (AFI->isThumb2Function()) {
- // In Thumb2 mode, the negative offset is very limited.
- int FPOffset = Offset - AFI->getFramePtrSpillOffset();
- if (FPOffset >= -255 && FPOffset < 0) {
- FrameReg = getFrameRegister(MF);
- Offset = FPOffset;
- }
- }
- }
- return Offset;
-}
-
-
-int
-ARMBaseRegisterInfo::getFrameIndexOffset(const MachineFunction &MF,
- int FI) const {
- unsigned FrameReg;
- return getFrameIndexReference(MF, FI, FrameReg);
-}
-
-unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
- llvm_unreachable("What is the exception register");
- return 0;
-}
-
-unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
- llvm_unreachable("What is the exception handler register");
- return 0;
-}
-
-int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
- return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
-}
-
-unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
- const MachineFunction &MF) const {
- switch (Reg) {
- default: break;
- // Return 0 if either register of the pair is a special register.
- // So no R12, etc.
- case ARM::R1:
- return ARM::R0;
- case ARM::R3:
- return ARM::R2;
- case ARM::R5:
- return ARM::R4;
- case ARM::R7:
- return isReservedReg(MF, ARM::R7) ? 0 : ARM::R6;
- case ARM::R9:
- return isReservedReg(MF, ARM::R9) ? 0 :ARM::R8;
- case ARM::R11:
- return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
-
- case ARM::S1:
- return ARM::S0;
- case ARM::S3:
- return ARM::S2;
- case ARM::S5:
- return ARM::S4;
- case ARM::S7:
- return ARM::S6;
- case ARM::S9:
- return ARM::S8;
- case ARM::S11:
- return ARM::S10;
- case ARM::S13:
- return ARM::S12;
- case ARM::S15:
- return ARM::S14;
- case ARM::S17:
- return ARM::S16;
- case ARM::S19:
- return ARM::S18;
- case ARM::S21:
- return ARM::S20;
- case ARM::S23:
- return ARM::S22;
- case ARM::S25:
- return ARM::S24;
- case ARM::S27:
- return ARM::S26;
- case ARM::S29:
- return ARM::S28;
- case ARM::S31:
- return ARM::S30;
-
- case ARM::D1:
- return ARM::D0;
- case ARM::D3:
- return ARM::D2;
- case ARM::D5:
- return ARM::D4;
- case ARM::D7:
- return ARM::D6;
- case ARM::D9:
- return ARM::D8;
- case ARM::D11:
- return ARM::D10;
- case ARM::D13:
- return ARM::D12;
- case ARM::D15:
- return ARM::D14;
- case ARM::D17:
- return ARM::D16;
- case ARM::D19:
- return ARM::D18;
- case ARM::D21:
- return ARM::D20;
- case ARM::D23:
- return ARM::D22;
- case ARM::D25:
- return ARM::D24;
- case ARM::D27:
- return ARM::D26;
- case ARM::D29:
- return ARM::D28;
- case ARM::D31:
- return ARM::D30;
- }
-
- return 0;
-}
-
-unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
- const MachineFunction &MF) const {
- switch (Reg) {
- default: break;
- // Return 0 if either register of the pair is a special register.
- // So no R12, etc.
- case ARM::R0:
- return ARM::R1;
- case ARM::R2:
- return ARM::R3;
- case ARM::R4:
- return ARM::R5;
- case ARM::R6:
- return isReservedReg(MF, ARM::R7) ? 0 : ARM::R7;
- case ARM::R8:
- return isReservedReg(MF, ARM::R9) ? 0 :ARM::R9;
- case ARM::R10:
- return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
-
- case ARM::S0:
- return ARM::S1;
- case ARM::S2:
- return ARM::S3;
- case ARM::S4:
- return ARM::S5;
- case ARM::S6:
- return ARM::S7;
- case ARM::S8:
- return ARM::S9;
- case ARM::S10:
- return ARM::S11;
- case ARM::S12:
- return ARM::S13;
- case ARM::S14:
- return ARM::S15;
- case ARM::S16:
- return ARM::S17;
- case ARM::S18:
- return ARM::S19;
- case ARM::S20:
- return ARM::S21;
- case ARM::S22:
- return ARM::S23;
- case ARM::S24:
- return ARM::S25;
- case ARM::S26:
- return ARM::S27;
- case ARM::S28:
- return ARM::S29;
- case ARM::S30:
- return ARM::S31;
-
- case ARM::D0:
- return ARM::D1;
- case ARM::D2:
- return ARM::D3;
- case ARM::D4:
- return ARM::D5;
- case ARM::D6:
- return ARM::D7;
- case ARM::D8:
- return ARM::D9;
- case ARM::D10:
- return ARM::D11;
- case ARM::D12:
- return ARM::D13;
- case ARM::D14:
- return ARM::D15;
- case ARM::D16:
- return ARM::D17;
- case ARM::D18:
- return ARM::D19;
- case ARM::D20:
- return ARM::D21;
- case ARM::D22:
- return ARM::D23;
- case ARM::D24:
- return ARM::D25;
- case ARM::D26:
- return ARM::D27;
- case ARM::D28:
- return ARM::D29;
- case ARM::D30:
- return ARM::D31;
- }
-
- return 0;
-}
-
-/// emitLoadConstPool - Emits a load from constpool to materialize the
-/// specified immediate.
-void ARMBaseRegisterInfo::
-emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx, int Val,
- ARMCC::CondCodes Pred,
- unsigned PredReg) const {
- MachineFunction &MF = *MBB.getParent();
- MachineConstantPool *ConstantPool = MF.getConstantPool();
- Constant *C =
- ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
- unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
-
- BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
- .addReg(DestReg, getDefRegState(true), SubIdx)
- .addConstantPoolIndex(Idx)
- .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
-}
-
-bool ARMBaseRegisterInfo::
-requiresRegisterScavenging(const MachineFunction &MF) const {
- return true;
-}
-
-bool ARMBaseRegisterInfo::
-requiresFrameIndexScavenging(const MachineFunction &MF) const {
- return true;
-}
-
-// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
-// not required, we reserve argument space for call sites in the function
-// immediately on entry to the current function. This eliminates the need for
-// add/sub sp brackets around call sites. Returns true if the call frame is
-// included as part of the stack frame.
-bool ARMBaseRegisterInfo::
-hasReservedCallFrame(MachineFunction &MF) const {
- const MachineFrameInfo *FFI = MF.getFrameInfo();
- unsigned CFSize = FFI->getMaxCallFrameSize();
- // It's not always a good idea to include the call frame as part of the
- // stack frame. ARM (especially Thumb) has small immediate offset to
- // address the stack frame. So a large call frame can cause poor codegen
- // and may even makes it impossible to scavenge a register.
- if (CFSize >= ((1 << 12) - 1) / 2) // Half of imm12
- return false;
-
- return !MF.getFrameInfo()->hasVarSizedObjects();
-}
-
-// canSimplifyCallFramePseudos - If there is a reserved call frame, the
-// call frame pseudos can be simplified. Unlike most targets, having a FP
-// is not sufficient here since we still may reference some objects via SP
-// even when FP is available in Thumb2 mode.
-bool ARMBaseRegisterInfo::
-canSimplifyCallFramePseudos(MachineFunction &MF) const {
- return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
-}
-
-static void
-emitSPUpdate(bool isARM,
- MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
- DebugLoc dl, const ARMBaseInstrInfo &TII,
- int NumBytes,
- ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
- if (isARM)
- emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
- Pred, PredReg, TII);
- else
- emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
- Pred, PredReg, TII);
-}
-
-
-void ARMBaseRegisterInfo::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
- if (!hasReservedCallFrame(MF)) {
- // If we have alloca, convert as follows:
- // ADJCALLSTACKDOWN -> sub, sp, sp, amount
- // ADJCALLSTACKUP -> add, sp, sp, amount
- MachineInstr *Old = I;
- DebugLoc dl = Old->getDebugLoc();
- unsigned Amount = Old->getOperand(0).getImm();
- if (Amount != 0) {
- // We need to keep the stack aligned properly. To do this, we round the
- // amount of space needed for the outgoing arguments up to the next
- // alignment boundary.
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
- Amount = (Amount+Align-1)/Align*Align;
-
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- assert(!AFI->isThumb1OnlyFunction() &&
- "This eliminateCallFramePseudoInstr does not support Thumb1!");
- bool isARM = !AFI->isThumbFunction();
-
- // Replace the pseudo instruction with a new instruction...
- unsigned Opc = Old->getOpcode();
- int PIdx = Old->findFirstPredOperandIdx();
- ARMCC::CondCodes Pred = (PIdx == -1)
- ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
- if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
- // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
- unsigned PredReg = Old->getOperand(2).getReg();
- emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
- } else {
- // Note: PredReg is operand 3 for ADJCALLSTACKUP.
- unsigned PredReg = Old->getOperand(3).getReg();
- assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
- emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
- }
- }
- }
- MBB.erase(I);
-}
-
-unsigned
-ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, int *Value,
- RegScavenger *RS) const {
- unsigned i = 0;
- MachineInstr &MI = *II;
- MachineBasicBlock &MBB = *MI.getParent();
- MachineFunction &MF = *MBB.getParent();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- assert(!AFI->isThumb1OnlyFunction() &&
- "This eliminateFrameIndex does not support Thumb1!");
-
- while (!MI.getOperand(i).isFI()) {
- ++i;
- assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
- }
-
- int FrameIndex = MI.getOperand(i).getIndex();
- unsigned FrameReg;
-
- int Offset = getFrameIndexReference(MF, FrameIndex, FrameReg);
- if (FrameReg != ARM::SP)
- SPAdj = 0;
- Offset += SPAdj;
-
- // Modify MI as necessary to handle as much of 'Offset' as possible
- bool Done = false;
- if (!AFI->isThumbFunction())
- Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
- else {
- assert(AFI->isThumb2Function());
- Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
- }
- if (Done)
- return 0;
-
- // If we get here, the immediate doesn't fit into the instruction. We folded
- // as much as possible above, handle the rest, providing a register that is
- // SP+LargeImm.
- assert((Offset ||
- (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
- (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
- "This code isn't needed if offset already handled!");
-
- unsigned ScratchReg = 0;
- int PIdx = MI.findFirstPredOperandIdx();
- ARMCC::CondCodes Pred = (PIdx == -1)
- ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
- unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
- if (Offset == 0)
- // Must be addrmode4/6.
- MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
- else {
- ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
- if (Value) *Value = Offset;
- if (!AFI->isThumbFunction())
- emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
- Offset, Pred, PredReg, TII);
- else {
- assert(AFI->isThumb2Function());
- emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
- Offset, Pred, PredReg, TII);
- }
- MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
- if (!ReuseFrameIndexVals)
- ScratchReg = 0;
- }
- return ScratchReg;
-}
-
-/// Move iterator past the next bunch of callee save load / store ops for
-/// the particular spill area (1: integer area 1, 2: integer area 2,
-/// 3: fp area, 0: don't care).
-static void movePastCSLoadStoreOps(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- int Opc1, int Opc2, unsigned Area,
- const ARMSubtarget &STI) {
- while (MBBI != MBB.end() &&
- ((MBBI->getOpcode() == Opc1) || (MBBI->getOpcode() == Opc2)) &&
- MBBI->getOperand(1).isFI()) {
- if (Area != 0) {
- bool Done = false;
- unsigned Category = 0;
- switch (MBBI->getOperand(0).getReg()) {
- case ARM::R4: case ARM::R5: case ARM::R6: case ARM::R7:
- case ARM::LR:
- Category = 1;
- break;
- case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
- Category = STI.isTargetDarwin() ? 2 : 1;
- break;
- case ARM::D8: case ARM::D9: case ARM::D10: case ARM::D11:
- case ARM::D12: case ARM::D13: case ARM::D14: case ARM::D15:
- Category = 3;
- break;
- default:
- Done = true;
- break;
- }
- if (Done || Category != Area)
- break;
- }
-
- ++MBBI;
- }
-}
-
-void ARMBaseRegisterInfo::
-emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- assert(!AFI->isThumb1OnlyFunction() &&
- "This emitPrologue does not support Thumb1!");
- bool isARM = !AFI->isThumbFunction();
- unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
- unsigned NumBytes = MFI->getStackSize();
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- DebugLoc dl = (MBBI != MBB.end() ?
- MBBI->getDebugLoc() : DebugLoc::getUnknownLoc());
-
- // Determine the sizes of each callee-save spill areas and record which frame
- // belongs to which callee-save spill areas.
- unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
- int FramePtrSpillFI = 0;
-
- // Allocate the vararg register save area. This is not counted in NumBytes.
- if (VARegSaveSize)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize);
-
- if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
- return;
- }
-
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- int FI = CSI[i].getFrameIdx();
- switch (Reg) {
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- case ARM::LR:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- AFI->addGPRCalleeSavedArea1Frame(FI);
- GPRCS1Size += 4;
- break;
- case ARM::R8:
- case ARM::R9:
- case ARM::R10:
- case ARM::R11:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- if (STI.isTargetDarwin()) {
- AFI->addGPRCalleeSavedArea2Frame(FI);
- GPRCS2Size += 4;
- } else {
- AFI->addGPRCalleeSavedArea1Frame(FI);
- GPRCS1Size += 4;
- }
- break;
- default:
- AFI->addDPRCalleeSavedAreaFrame(FI);
- DPRCSSize += 8;
- }
- }
-
- // Build the new SUBri to adjust SP for integer callee-save spill area 1.
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS1Size);
- movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 1, STI);
-
- // Set FP to point to the stack slot that contains the previous FP.
- // For Darwin, FP is R7, which has now been stored in spill area 1.
- // Otherwise, if this is not Darwin, all the callee-saved registers go
- // into spill area 1, including the FP in R11. In either case, it is
- // now safe to emit this assignment.
- if (STI.isTargetDarwin() || hasFP(MF)) {
- unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri;
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr)
- .addFrameIndex(FramePtrSpillFI).addImm(0);
- AddDefaultCC(AddDefaultPred(MIB));
- }
-
- // Build the new SUBri to adjust SP for integer callee-save spill area 2.
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS2Size);
-
- // Build the new SUBri to adjust SP for FP callee-save spill area.
- movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 2, STI);
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRCSSize);
-
- // Determine starting offsets of spill areas.
- unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
- unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
- unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
- if (STI.isTargetDarwin() || hasFP(MF))
- AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
- NumBytes);
- AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
- AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
- AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
-
- movePastCSLoadStoreOps(MBB, MBBI, ARM::VSTRD, 0, 3, STI);
- NumBytes = DPRCSOffset;
- if (NumBytes) {
- // Adjust SP after all the callee-save spills.
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
- }
-
- if (STI.isTargetELF() && hasFP(MF)) {
- MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
- AFI->getFramePtrSpillOffset());
- }
-
- AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
- AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
- AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
-
- // If we need dynamic stack realignment, do it here.
- if (needsStackRealignment(MF)) {
- unsigned MaxAlign = MFI->getMaxAlignment();
- assert (!AFI->isThumb1OnlyFunction());
- if (!AFI->isThumbFunction()) {
- // Emit bic sp, sp, MaxAlign
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
- TII.get(ARM::BICri), ARM::SP)
- .addReg(ARM::SP, RegState::Kill)
- .addImm(MaxAlign-1)));
- } else {
- // We cannot use sp as source/dest register here, thus we're emitting the
- // following sequence:
- // mov r4, sp
- // bic r4, r4, MaxAlign
- // mov sp, r4
- // FIXME: It will be better just to find spare register here.
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4)
- .addReg(ARM::SP, RegState::Kill);
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
- TII.get(ARM::t2BICri), ARM::R4)
- .addReg(ARM::R4, RegState::Kill)
- .addImm(MaxAlign-1)));
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
- .addReg(ARM::R4, RegState::Kill);
- }
- }
-}
-
-static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
- for (unsigned i = 0; CSRegs[i]; ++i)
- if (Reg == CSRegs[i])
- return true;
- return false;
-}
-
-static bool isCSRestore(MachineInstr *MI,
- const ARMBaseInstrInfo &TII,
- const unsigned *CSRegs) {
- return ((MI->getOpcode() == (int)ARM::VLDRD ||
- MI->getOpcode() == (int)ARM::LDR ||
- MI->getOpcode() == (int)ARM::t2LDRi12) &&
- MI->getOperand(1).isFI() &&
- isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
-}
-
-void ARMBaseRegisterInfo::
-emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- assert(MBBI->getDesc().isReturn() &&
- "Can only insert epilog into returning blocks");
- DebugLoc dl = MBBI->getDebugLoc();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- assert(!AFI->isThumb1OnlyFunction() &&
- "This emitEpilogue does not support Thumb1!");
- bool isARM = !AFI->isThumbFunction();
-
- unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
- int NumBytes = (int)MFI->getStackSize();
-
- if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
- } else {
- // Unwind MBBI to point to first LDR / VLDRD.
- const unsigned *CSRegs = getCalleeSavedRegs();
- if (MBBI != MBB.begin()) {
- do
- --MBBI;
- while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
- if (!isCSRestore(MBBI, TII, CSRegs))
- ++MBBI;
- }
-
- // Move SP to start of FP callee save spill area.
- NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
- AFI->getGPRCalleeSavedArea2Size() +
- AFI->getDPRCalleeSavedAreaSize());
-
- // Darwin ABI requires FP to point to the stack slot that contains the
- // previous FP.
- bool HasFP = hasFP(MF);
- if ((STI.isTargetDarwin() && NumBytes) || HasFP) {
- NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
- // Reset SP based on frame pointer only if the stack frame extends beyond
- // frame pointer stack slot or target is ELF and the function has FP.
- if (HasFP ||
- AFI->getGPRCalleeSavedArea2Size() ||
- AFI->getDPRCalleeSavedAreaSize() ||
- AFI->getDPRCalleeSavedAreaOffset()) {
- if (NumBytes) {
- if (isARM)
- emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
- ARMCC::AL, 0, TII);
- else
- emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
- ARMCC::AL, 0, TII);
- } else {
- // Thumb2 or ARM.
- if (isARM)
- BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
- .addReg(FramePtr)
- .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
- else
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
- .addReg(FramePtr);
- }
- }
- } else if (NumBytes)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
-
- // Move SP to start of integer callee save spill area 2.
- movePastCSLoadStoreOps(MBB, MBBI, ARM::VLDRD, 0, 3, STI);
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getDPRCalleeSavedAreaSize());
-
- // Move SP to start of integer callee save spill area 1.
- movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 2, STI);
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea2Size());
-
- // Move SP to SP upon entry to the function.
- movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 1, STI);
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size());
- }
-
- if (VARegSaveSize)
- emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
-}
-
-#include "ARMGenRegisterInfo.inc"
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/libclamav/c++/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
deleted file mode 100644
index 64f6ff1..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ /dev/null
@@ -1,164 +0,0 @@
-//===- ARMBaseRegisterInfo.h - ARM Register Information Impl ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the base ARM implementation of TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMBASEREGISTERINFO_H
-#define ARMBASEREGISTERINFO_H
-
-#include "ARM.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "ARMGenRegisterInfo.h.inc"
-
-namespace llvm {
- class ARMSubtarget;
- class ARMBaseInstrInfo;
- class Type;
-
-/// Register allocation hints.
-namespace ARMRI {
- enum {
- RegPairOdd = 1,
- RegPairEven = 2
- };
-}
-
-/// isARMLowRegister - Returns true if the register is low register r0-r7.
-///
-static inline bool isARMLowRegister(unsigned Reg) {
- using namespace ARM;
- switch (Reg) {
- case R0: case R1: case R2: case R3:
- case R4: case R5: case R6: case R7:
- return true;
- default:
- return false;
- }
-}
-
-struct ARMBaseRegisterInfo : public ARMGenRegisterInfo {
-protected:
- const ARMBaseInstrInfo &TII;
- const ARMSubtarget &STI;
-
- /// FramePtr - ARM physical register used as frame ptr.
- unsigned FramePtr;
-
- // Can be only subclassed.
- explicit ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &STI);
-
- // Return the opcode that implements 'Op', or 0 if no opcode
- unsigned getOpcode(int Op) const;
-
-public:
- /// getRegisterNumbering - Given the enum value for some register, e.g.
- /// ARM::LR, return the number that it corresponds to (e.g. 14). It
- /// also returns true in isSPVFP if the register is a single precision
- /// VFP register.
- static unsigned getRegisterNumbering(unsigned RegEnum, bool *isSPVFP = 0);
-
- /// Code Generation virtual methods...
- const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
-
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction *MF = 0) const;
-
- BitVector getReservedRegs(const MachineFunction &MF) const;
-
- /// getMatchingSuperRegClass - Return a subclass of the specified register
- /// class A so that each register in it has a sub-register of the
- /// specified sub-register index which is in the specified register class B.
- virtual const TargetRegisterClass *
- getMatchingSuperRegClass(const TargetRegisterClass *A,
- const TargetRegisterClass *B, unsigned Idx) const;
-
- const TargetRegisterClass *getPointerRegClass(unsigned Kind = 0) const;
-
- std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
- getAllocationOrder(const TargetRegisterClass *RC,
- unsigned HintType, unsigned HintReg,
- const MachineFunction &MF) const;
-
- unsigned ResolveRegAllocHint(unsigned Type, unsigned Reg,
- const MachineFunction &MF) const;
-
- void UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
- MachineFunction &MF) const;
-
- bool hasFP(const MachineFunction &MF) const;
-
- bool canRealignStack(const MachineFunction &MF) const;
- bool needsStackRealignment(const MachineFunction &MF) const;
-
- bool cannotEliminateFrame(const MachineFunction &MF) const;
-
- void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS = NULL) const;
-
- // Debug information queries.
- unsigned getRARegister() const;
- unsigned getFrameRegister(const MachineFunction &MF) const;
- int getFrameIndexReference(const MachineFunction &MF, int FI,
- unsigned &FrameReg) const;
- int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
-
- // Exception handling queries.
- unsigned getEHExceptionRegister() const;
- unsigned getEHHandlerRegister() const;
-
- int getDwarfRegNum(unsigned RegNum, bool isEH) const;
-
- bool isLowRegister(unsigned Reg) const;
-
-
- /// emitLoadConstPool - Emits a load from constpool to materialize the
- /// specified immediate.
- virtual void emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx,
- int Val,
- ARMCC::CondCodes Pred = ARMCC::AL,
- unsigned PredReg = 0) const;
-
- /// Code Generation virtual methods...
- virtual bool isReservedReg(const MachineFunction &MF, unsigned Reg) const;
-
- virtual bool requiresRegisterScavenging(const MachineFunction &MF) const;
-
- virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const;
-
- virtual bool hasReservedCallFrame(MachineFunction &MF) const;
- virtual bool canSimplifyCallFramePseudos(MachineFunction &MF) const;
-
- virtual void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
-
- virtual unsigned eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, int *Value = NULL,
- RegScavenger *RS = NULL) const;
-
- virtual void emitPrologue(MachineFunction &MF) const;
- virtual void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-
-private:
- unsigned estimateRSStackSizeLimit(MachineFunction &MF) const;
-
- unsigned getRegisterPairEven(unsigned Reg, const MachineFunction &MF) const;
-
- unsigned getRegisterPairOdd(unsigned Reg, const MachineFunction &MF) const;
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMBuildAttrs.h b/libclamav/c++/llvm/lib/Target/ARM/ARMBuildAttrs.h
deleted file mode 100644
index 3b38375..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMBuildAttrs.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//===-------- ARMBuildAttrs.h - ARM Build Attributes ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains enumerations and support routines for ARM build attributes
-// as defined in ARM ABI addenda document (ABI release 2.07).
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef __TARGET_ARMBUILDATTRS_H__
-#define __TARGET_ARMBUILDATTRS_H__
-
-namespace ARMBuildAttrs {
- enum {
- File = 1,
- Section = 2,
- Symbol = 3,
- CPU_raw_name = 4,
- CPU_name = 5,
- CPU_arch = 6,
- CPU_arch_profile = 7,
- ARM_ISA_use = 8,
- THUMB_ISA_use = 9,
- VFP_arch = 10,
- WMMX_arch = 11,
- Advanced_SIMD_arch = 12,
- PCS_config = 13,
- ABI_PCS_R9_use = 14,
- ABI_PCS_RW_data = 15,
- ABI_PCS_RO_data = 16,
- ABI_PCS_GOT_use = 17,
- ABI_PCS_wchar_t = 18,
- ABI_FP_rounding = 19,
- ABI_FP_denormal = 20,
- ABI_FP_exceptions = 21,
- ABI_FP_user_exceptions = 22,
- ABI_FP_number_model = 23,
- ABI_align8_needed = 24,
- ABI_align8_preserved = 25,
- ABI_enum_size = 26,
- ABI_HardFP_use = 27,
- ABI_VFP_args = 28,
- ABI_WMMX_args = 29,
- ABI_optimization_goals = 30,
- ABI_FP_optimization_goals = 31,
- compatibility = 32,
- CPU_unaligned_access = 34,
- VFP_HP_extension = 36,
- ABI_FP_16bit_format = 38,
- nodefaults = 64,
- also_compatible_with = 65,
- T2EE_use = 66,
- conformance = 67,
- Virtualization_use = 68,
- MPextension_use = 70
- };
-}
-
-#endif // __TARGET_ARMBUILDATTRS_H__
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMCallingConv.td b/libclamav/c++/llvm/lib/Target/ARM/ARMCallingConv.td
deleted file mode 100644
index 8fdb07f..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMCallingConv.td
+++ /dev/null
@@ -1,132 +0,0 @@
-//===- ARMCallingConv.td - Calling Conventions for ARM ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// This describes the calling conventions for ARM architecture.
-//===----------------------------------------------------------------------===//
-
-/// CCIfSubtarget - Match if the current subtarget has a feature F.
-class CCIfSubtarget<string F, CCAction A>:
- CCIf<!strconcat("State.getTarget().getSubtarget<ARMSubtarget>().", F), A>;
-
-/// CCIfAlign - Match of the original alignment of the arg
-class CCIfAlign<string Align, CCAction A>:
- CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;
-
-//===----------------------------------------------------------------------===//
-// ARM APCS Calling Convention
-//===----------------------------------------------------------------------===//
-def CC_ARM_APCS : CallingConv<[
-
- CCIfType<[i8, i16], CCPromoteToType<i32>>,
-
- // Handle all vector types as either f64 or v2f64.
- CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
- CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
-
- // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack
- CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>,
-
- CCIfType<[f32], CCBitConvertToType<i32>>,
- CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>,
-
- CCIfType<[i32], CCAssignToStack<4, 4>>,
- CCIfType<[f64], CCAssignToStack<8, 4>>,
- CCIfType<[v2f64], CCAssignToStack<16, 4>>
-]>;
-
-def RetCC_ARM_APCS : CallingConv<[
- CCIfType<[f32], CCBitConvertToType<i32>>,
-
- // Handle all vector types as either f64 or v2f64.
- CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
- CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
-
- CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>,
-
- CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>,
- CCIfType<[i64], CCAssignToRegWithShadow<[R0, R2], [R1, R3]>>
-]>;
-
-//===----------------------------------------------------------------------===//
-// ARM AAPCS (EABI) Calling Convention, common parts
-//===----------------------------------------------------------------------===//
-
-def CC_ARM_AAPCS_Common : CallingConv<[
-
- CCIfType<[i8, i16], CCPromoteToType<i32>>,
-
- // i64/f64 is passed in even pairs of GPRs
- // i64 is 8-aligned i32 here, so we may need to eat R1 as a pad register
- // (and the same is true for f64 if VFP is not enabled)
- CCIfType<[i32], CCIfAlign<"8", CCAssignToRegWithShadow<[R0, R2], [R0, R1]>>>,
- CCIfType<[i32], CCIf<"State.getNextStackOffset() == 0 &&"
- "ArgFlags.getOrigAlign() != 8",
- CCAssignToReg<[R0, R1, R2, R3]>>>,
-
- CCIfType<[i32], CCIfAlign<"8", CCAssignToStack<4, 8>>>,
- CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
- CCIfType<[f64], CCAssignToStack<8, 8>>,
- CCIfType<[v2f64], CCAssignToStack<16, 8>>
-]>;
-
-def RetCC_ARM_AAPCS_Common : CallingConv<[
- CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>,
- CCIfType<[i64], CCAssignToRegWithShadow<[R0, R2], [R1, R3]>>
-]>;
-
-//===----------------------------------------------------------------------===//
-// ARM AAPCS (EABI) Calling Convention
-//===----------------------------------------------------------------------===//
-
-def CC_ARM_AAPCS : CallingConv<[
- // Handle all vector types as either f64 or v2f64.
- CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
- CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
-
- CCIfType<[f64, v2f64], CCCustom<"CC_ARM_AAPCS_Custom_f64">>,
- CCIfType<[f32], CCBitConvertToType<i32>>,
- CCDelegateTo<CC_ARM_AAPCS_Common>
-]>;
-
-def RetCC_ARM_AAPCS : CallingConv<[
- // Handle all vector types as either f64 or v2f64.
- CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
- CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
-
- CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_AAPCS_Custom_f64">>,
- CCIfType<[f32], CCBitConvertToType<i32>>,
- CCDelegateTo<RetCC_ARM_AAPCS_Common>
-]>;
-
-//===----------------------------------------------------------------------===//
-// ARM AAPCS-VFP (EABI) Calling Convention
-//===----------------------------------------------------------------------===//
-
-def CC_ARM_AAPCS_VFP : CallingConv<[
- // Handle all vector types as either f64 or v2f64.
- CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
- CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
-
- CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>,
- CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
- CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7, S8,
- S9, S10, S11, S12, S13, S14, S15]>>,
- CCDelegateTo<CC_ARM_AAPCS_Common>
-]>;
-
-def RetCC_ARM_AAPCS_VFP : CallingConv<[
- // Handle all vector types as either f64 or v2f64.
- CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
- CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
-
- CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>,
- CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
- CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7, S8,
- S9, S10, S11, S12, S13, S14, S15]>>,
- CCDelegateTo<RetCC_ARM_AAPCS_Common>
-]>;
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMCodeEmitter.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
deleted file mode 100644
index 108a244..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
+++ /dev/null
@@ -1,1368 +0,0 @@
-//===-- ARM/ARMCodeEmitter.cpp - Convert ARM code to machine code ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the pass that transforms the ARM machine instructions into
-// relocatable machine code.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "jit"
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMConstantPoolValue.h"
-#include "ARMInstrInfo.h"
-#include "ARMRelocations.h"
-#include "ARMSubtarget.h"
-#include "ARMTargetMachine.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/PassManager.h"
-#include "llvm/CodeGen/JITCodeEmitter.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#ifndef NDEBUG
-#include <iomanip>
-#endif
-using namespace llvm;
-
-STATISTIC(NumEmitted, "Number of machine instructions emitted");
-
-namespace {
-
- class ARMCodeEmitter : public MachineFunctionPass {
- ARMJITInfo *JTI;
- const ARMInstrInfo *II;
- const TargetData *TD;
- const ARMSubtarget *Subtarget;
- TargetMachine &TM;
- JITCodeEmitter &MCE;
- const std::vector<MachineConstantPoolEntry> *MCPEs;
- const std::vector<MachineJumpTableEntry> *MJTEs;
- bool IsPIC;
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<MachineModuleInfo>();
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- static char ID;
- public:
- ARMCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
- : MachineFunctionPass(&ID), JTI(0), II((ARMInstrInfo*)tm.getInstrInfo()),
- TD(tm.getTargetData()), TM(tm),
- MCE(mce), MCPEs(0), MJTEs(0),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
-
- /// getBinaryCodeForInstr - This function, generated by the
- /// CodeEmitterGenerator using TableGen, produces the binary encoding for
- /// machine instructions.
- unsigned getBinaryCodeForInstr(const MachineInstr &MI);
-
- bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "ARM Machine Code Emitter";
- }
-
- void emitInstruction(const MachineInstr &MI);
-
- private:
-
- void emitWordLE(unsigned Binary);
- void emitDWordLE(uint64_t Binary);
- void emitConstPoolInstruction(const MachineInstr &MI);
- void emitMOVi2piecesInstruction(const MachineInstr &MI);
- void emitLEApcrelJTInstruction(const MachineInstr &MI);
- void emitPseudoMoveInstruction(const MachineInstr &MI);
- void addPCLabel(unsigned LabelID);
- void emitPseudoInstruction(const MachineInstr &MI);
- unsigned getMachineSoRegOpValue(const MachineInstr &MI,
- const TargetInstrDesc &TID,
- const MachineOperand &MO,
- unsigned OpIdx);
-
- unsigned getMachineSoImmOpValue(unsigned SoImm);
-
- unsigned getAddrModeSBit(const MachineInstr &MI,
- const TargetInstrDesc &TID) const;
-
- void emitDataProcessingInstruction(const MachineInstr &MI,
- unsigned ImplicitRd = 0,
- unsigned ImplicitRn = 0);
-
- void emitLoadStoreInstruction(const MachineInstr &MI,
- unsigned ImplicitRd = 0,
- unsigned ImplicitRn = 0);
-
- void emitMiscLoadStoreInstruction(const MachineInstr &MI,
- unsigned ImplicitRn = 0);
-
- void emitLoadStoreMultipleInstruction(const MachineInstr &MI);
-
- void emitMulFrmInstruction(const MachineInstr &MI);
-
- void emitExtendInstruction(const MachineInstr &MI);
-
- void emitMiscArithInstruction(const MachineInstr &MI);
-
- void emitBranchInstruction(const MachineInstr &MI);
-
- void emitInlineJumpTable(unsigned JTIndex);
-
- void emitMiscBranchInstruction(const MachineInstr &MI);
-
- void emitVFPArithInstruction(const MachineInstr &MI);
-
- void emitVFPConversionInstruction(const MachineInstr &MI);
-
- void emitVFPLoadStoreInstruction(const MachineInstr &MI);
-
- void emitVFPLoadStoreMultipleInstruction(const MachineInstr &MI);
-
- void emitMiscInstruction(const MachineInstr &MI);
-
- /// getMachineOpValue - Return binary encoding of operand. If the machine
- /// operand requires relocation, record the relocation and return zero.
- unsigned getMachineOpValue(const MachineInstr &MI,const MachineOperand &MO);
- unsigned getMachineOpValue(const MachineInstr &MI, unsigned OpIdx) {
- return getMachineOpValue(MI, MI.getOperand(OpIdx));
- }
-
- /// getShiftOp - Return the shift opcode (bit[6:5]) of the immediate value.
- ///
- unsigned getShiftOp(unsigned Imm) const ;
-
- /// Routines that handle operands which add machine relocations which are
- /// fixed up by the relocation stage.
- void emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
- bool MayNeedFarStub, bool Indirect,
- intptr_t ACPV = 0);
- void emitExternalSymbolAddress(const char *ES, unsigned Reloc);
- void emitConstPoolAddress(unsigned CPI, unsigned Reloc);
- void emitJumpTableAddress(unsigned JTIndex, unsigned Reloc);
- void emitMachineBasicBlock(MachineBasicBlock *BB, unsigned Reloc,
- intptr_t JTBase = 0);
- };
-}
-
-char ARMCodeEmitter::ID = 0;
-
-/// createARMJITCodeEmitterPass - Return a pass that emits the collected ARM
-/// code to the specified MCE object.
-FunctionPass *llvm::createARMJITCodeEmitterPass(ARMBaseTargetMachine &TM,
- JITCodeEmitter &JCE) {
- return new ARMCodeEmitter(TM, JCE);
-}
-
-bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
- assert((MF.getTarget().getRelocationModel() != Reloc::Default ||
- MF.getTarget().getRelocationModel() != Reloc::Static) &&
- "JIT relocation model must be set to static or default!");
- JTI = ((ARMTargetMachine&)MF.getTarget()).getJITInfo();
- II = ((ARMTargetMachine&)MF.getTarget()).getInstrInfo();
- TD = ((ARMTargetMachine&)MF.getTarget()).getTargetData();
- Subtarget = &TM.getSubtarget<ARMSubtarget>();
- MCPEs = &MF.getConstantPool()->getConstants();
- MJTEs = 0;
- if (MF.getJumpTableInfo()) MJTEs = &MF.getJumpTableInfo()->getJumpTables();
- IsPIC = TM.getRelocationModel() == Reloc::PIC_;
- JTI->Initialize(MF, IsPIC);
- MCE.setModuleInfo(&getAnalysis<MachineModuleInfo>());
-
- do {
- DEBUG(errs() << "JITTing function '"
- << MF.getFunction()->getName() << "'\n");
- MCE.startFunction(MF);
- for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
- MBB != E; ++MBB) {
- MCE.StartMachineBasicBlock(MBB);
- for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I)
- emitInstruction(*I);
- }
- } while (MCE.finishFunction(MF));
-
- return false;
-}
-
-/// getShiftOp - Return the shift opcode (bit[6:5]) of the immediate value.
-///
-unsigned ARMCodeEmitter::getShiftOp(unsigned Imm) const {
- switch (ARM_AM::getAM2ShiftOpc(Imm)) {
- default: llvm_unreachable("Unknown shift opc!");
- case ARM_AM::asr: return 2;
- case ARM_AM::lsl: return 0;
- case ARM_AM::lsr: return 1;
- case ARM_AM::ror:
- case ARM_AM::rrx: return 3;
- }
- return 0;
-}
-
-/// getMachineOpValue - Return binary encoding of operand. If the machine
-/// operand requires relocation, record the relocation and return zero.
-unsigned ARMCodeEmitter::getMachineOpValue(const MachineInstr &MI,
- const MachineOperand &MO) {
- if (MO.isReg())
- return ARMRegisterInfo::getRegisterNumbering(MO.getReg());
- else if (MO.isImm())
- return static_cast<unsigned>(MO.getImm());
- else if (MO.isGlobal())
- emitGlobalAddress(MO.getGlobal(), ARM::reloc_arm_branch, true, false);
- else if (MO.isSymbol())
- emitExternalSymbolAddress(MO.getSymbolName(), ARM::reloc_arm_branch);
- else if (MO.isCPI()) {
- const TargetInstrDesc &TID = MI.getDesc();
- // For VFP load, the immediate offset is multiplied by 4.
- unsigned Reloc = ((TID.TSFlags & ARMII::FormMask) == ARMII::VFPLdStFrm)
- ? ARM::reloc_arm_vfp_cp_entry : ARM::reloc_arm_cp_entry;
- emitConstPoolAddress(MO.getIndex(), Reloc);
- } else if (MO.isJTI())
- emitJumpTableAddress(MO.getIndex(), ARM::reloc_arm_relative);
- else if (MO.isMBB())
- emitMachineBasicBlock(MO.getMBB(), ARM::reloc_arm_branch);
- else {
-#ifndef NDEBUG
- errs() << MO;
-#endif
- llvm_unreachable(0);
- }
- return 0;
-}
-
-/// emitGlobalAddress - Emit the specified address to the code stream.
-///
-void ARMCodeEmitter::emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
- bool MayNeedFarStub, bool Indirect,
- intptr_t ACPV) {
- MachineRelocation MR = Indirect
- ? MachineRelocation::getIndirectSymbol(MCE.getCurrentPCOffset(), Reloc,
- GV, ACPV, MayNeedFarStub)
- : MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
- GV, ACPV, MayNeedFarStub);
- MCE.addRelocation(MR);
-}
-
-/// emitExternalSymbolAddress - Arrange for the address of an external symbol to
-/// be emitted to the current location in the function, and allow it to be PC
-/// relative.
-void ARMCodeEmitter::emitExternalSymbolAddress(const char *ES, unsigned Reloc) {
- MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
- Reloc, ES));
-}
-
-/// emitConstPoolAddress - Arrange for the address of an constant pool
-/// to be emitted to the current location in the function, and allow it to be PC
-/// relative.
-void ARMCodeEmitter::emitConstPoolAddress(unsigned CPI, unsigned Reloc) {
- // Tell JIT emitter we'll resolve the address.
- MCE.addRelocation(MachineRelocation::getConstPool(MCE.getCurrentPCOffset(),
- Reloc, CPI, 0, true));
-}
-
-/// emitJumpTableAddress - Arrange for the address of a jump table to
-/// be emitted to the current location in the function, and allow it to be PC
-/// relative.
-void ARMCodeEmitter::emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) {
- MCE.addRelocation(MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(),
- Reloc, JTIndex, 0, true));
-}
-
-/// emitMachineBasicBlock - Emit the specified address basic block.
-void ARMCodeEmitter::emitMachineBasicBlock(MachineBasicBlock *BB,
- unsigned Reloc, intptr_t JTBase) {
- MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
- Reloc, BB, JTBase));
-}
-
-void ARMCodeEmitter::emitWordLE(unsigned Binary) {
- DEBUG(errs() << " 0x";
- errs().write_hex(Binary) << "\n");
- MCE.emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitDWordLE(uint64_t Binary) {
- DEBUG(errs() << " 0x";
- errs().write_hex(Binary) << "\n");
- MCE.emitDWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitInstruction(const MachineInstr &MI) {
- DEBUG(errs() << "JIT: " << (void*)MCE.getCurrentPCValue() << ":\t" << MI);
-
- MCE.processDebugLoc(MI.getDebugLoc(), true);
-
- NumEmitted++; // Keep track of the # of mi's emitted
- switch (MI.getDesc().TSFlags & ARMII::FormMask) {
- default: {
- llvm_unreachable("Unhandled instruction encoding format!");
- break;
- }
- case ARMII::Pseudo:
- emitPseudoInstruction(MI);
- break;
- case ARMII::DPFrm:
- case ARMII::DPSoRegFrm:
- emitDataProcessingInstruction(MI);
- break;
- case ARMII::LdFrm:
- case ARMII::StFrm:
- emitLoadStoreInstruction(MI);
- break;
- case ARMII::LdMiscFrm:
- case ARMII::StMiscFrm:
- emitMiscLoadStoreInstruction(MI);
- break;
- case ARMII::LdStMulFrm:
- emitLoadStoreMultipleInstruction(MI);
- break;
- case ARMII::MulFrm:
- emitMulFrmInstruction(MI);
- break;
- case ARMII::ExtFrm:
- emitExtendInstruction(MI);
- break;
- case ARMII::ArithMiscFrm:
- emitMiscArithInstruction(MI);
- break;
- case ARMII::BrFrm:
- emitBranchInstruction(MI);
- break;
- case ARMII::BrMiscFrm:
- emitMiscBranchInstruction(MI);
- break;
- // VFP instructions.
- case ARMII::VFPUnaryFrm:
- case ARMII::VFPBinaryFrm:
- emitVFPArithInstruction(MI);
- break;
- case ARMII::VFPConv1Frm:
- case ARMII::VFPConv2Frm:
- case ARMII::VFPConv3Frm:
- case ARMII::VFPConv4Frm:
- case ARMII::VFPConv5Frm:
- emitVFPConversionInstruction(MI);
- break;
- case ARMII::VFPLdStFrm:
- emitVFPLoadStoreInstruction(MI);
- break;
- case ARMII::VFPLdStMulFrm:
- emitVFPLoadStoreMultipleInstruction(MI);
- break;
- case ARMII::VFPMiscFrm:
- emitMiscInstruction(MI);
- break;
- }
- MCE.processDebugLoc(MI.getDebugLoc(), false);
-}
-
-void ARMCodeEmitter::emitConstPoolInstruction(const MachineInstr &MI) {
- unsigned CPI = MI.getOperand(0).getImm(); // CP instruction index.
- unsigned CPIndex = MI.getOperand(1).getIndex(); // Actual cp entry index.
- const MachineConstantPoolEntry &MCPE = (*MCPEs)[CPIndex];
-
- // Remember the CONSTPOOL_ENTRY address for later relocation.
- JTI->addConstantPoolEntryAddr(CPI, MCE.getCurrentPCValue());
-
- // Emit constpool island entry. In most cases, the actual values will be
- // resolved and relocated after code emission.
- if (MCPE.isMachineConstantPoolEntry()) {
- ARMConstantPoolValue *ACPV =
- static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
-
- DEBUG(errs() << " ** ARM constant pool #" << CPI << " @ "
- << (void*)MCE.getCurrentPCValue() << " " << *ACPV << '\n');
-
- assert(ACPV->isGlobalValue() && "unsupported constant pool value");
- GlobalValue *GV = ACPV->getGV();
- if (GV) {
- Reloc::Model RelocM = TM.getRelocationModel();
- emitGlobalAddress(GV, ARM::reloc_arm_machine_cp_entry,
- isa<Function>(GV),
- Subtarget->GVIsIndirectSymbol(GV, RelocM),
- (intptr_t)ACPV);
- } else {
- emitExternalSymbolAddress(ACPV->getSymbol(), ARM::reloc_arm_absolute);
- }
- emitWordLE(0);
- } else {
- Constant *CV = MCPE.Val.ConstVal;
-
- DEBUG({
- errs() << " ** Constant pool #" << CPI << " @ "
- << (void*)MCE.getCurrentPCValue() << " ";
- if (const Function *F = dyn_cast<Function>(CV))
- errs() << F->getName();
- else
- errs() << *CV;
- errs() << '\n';
- });
-
- if (GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
- emitGlobalAddress(GV, ARM::reloc_arm_absolute, isa<Function>(GV), false);
- emitWordLE(0);
- } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
- uint32_t Val = *(uint32_t*)CI->getValue().getRawData();
- emitWordLE(Val);
- } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
- if (CFP->getType()->isFloatTy())
- emitWordLE(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
- else if (CFP->getType()->isDoubleTy())
- emitDWordLE(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
- else {
- llvm_unreachable("Unable to handle this constantpool entry!");
- }
- } else {
- llvm_unreachable("Unable to handle this constantpool entry!");
- }
- }
-}
-
-void ARMCodeEmitter::emitMOVi2piecesInstruction(const MachineInstr &MI) {
- const MachineOperand &MO0 = MI.getOperand(0);
- const MachineOperand &MO1 = MI.getOperand(1);
- assert(MO1.isImm() && ARM_AM::getSOImmVal(MO1.isImm()) != -1 &&
- "Not a valid so_imm value!");
- unsigned V1 = ARM_AM::getSOImmTwoPartFirst(MO1.getImm());
- unsigned V2 = ARM_AM::getSOImmTwoPartSecond(MO1.getImm());
-
- // Emit the 'mov' instruction.
- unsigned Binary = 0xd << 21; // mov: Insts{24-21} = 0b1101
-
- // Set the conditional execution predicate.
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- // Encode Rd.
- Binary |= getMachineOpValue(MI, MO0) << ARMII::RegRdShift;
-
- // Encode so_imm.
- // Set bit I(25) to identify this is the immediate form of <shifter_op>
- Binary |= 1 << ARMII::I_BitShift;
- Binary |= getMachineSoImmOpValue(V1);
- emitWordLE(Binary);
-
- // Now the 'orr' instruction.
- Binary = 0xc << 21; // orr: Insts{24-21} = 0b1100
-
- // Set the conditional execution predicate.
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- // Encode Rd.
- Binary |= getMachineOpValue(MI, MO0) << ARMII::RegRdShift;
-
- // Encode Rn.
- Binary |= getMachineOpValue(MI, MO0) << ARMII::RegRnShift;
-
- // Encode so_imm.
- // Set bit I(25) to identify this is the immediate form of <shifter_op>
- Binary |= 1 << ARMII::I_BitShift;
- Binary |= getMachineSoImmOpValue(V2);
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitLEApcrelJTInstruction(const MachineInstr &MI) {
- // It's basically add r, pc, (LJTI - $+8)
-
- const TargetInstrDesc &TID = MI.getDesc();
-
- // Emit the 'add' instruction.
- unsigned Binary = 0x4 << 21; // add: Insts{24-31} = 0b0100
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- // Encode S bit if MI modifies CPSR.
- Binary |= getAddrModeSBit(MI, TID);
-
- // Encode Rd.
- Binary |= getMachineOpValue(MI, 0) << ARMII::RegRdShift;
-
- // Encode Rn which is PC.
- Binary |= ARMRegisterInfo::getRegisterNumbering(ARM::PC) << ARMII::RegRnShift;
-
- // Encode the displacement.
- Binary |= 1 << ARMII::I_BitShift;
- emitJumpTableAddress(MI.getOperand(1).getIndex(), ARM::reloc_arm_jt_base);
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitPseudoMoveInstruction(const MachineInstr &MI) {
- unsigned Opcode = MI.getDesc().Opcode;
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- // Encode S bit if MI modifies CPSR.
- if (Opcode == ARM::MOVsrl_flag || Opcode == ARM::MOVsra_flag)
- Binary |= 1 << ARMII::S_BitShift;
-
- // Encode register def if there is one.
- Binary |= getMachineOpValue(MI, 0) << ARMII::RegRdShift;
-
- // Encode the shift operation.
- switch (Opcode) {
- default: break;
- case ARM::MOVrx:
- // rrx
- Binary |= 0x6 << 4;
- break;
- case ARM::MOVsrl_flag:
- // lsr #1
- Binary |= (0x2 << 4) | (1 << 7);
- break;
- case ARM::MOVsra_flag:
- // asr #1
- Binary |= (0x4 << 4) | (1 << 7);
- break;
- }
-
- // Encode register Rm.
- Binary |= getMachineOpValue(MI, 1);
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::addPCLabel(unsigned LabelID) {
- DEBUG(errs() << " ** LPC" << LabelID << " @ "
- << (void*)MCE.getCurrentPCValue() << '\n');
- JTI->addPCLabelAddr(LabelID, MCE.getCurrentPCValue());
-}
-
-void ARMCodeEmitter::emitPseudoInstruction(const MachineInstr &MI) {
- unsigned Opcode = MI.getDesc().Opcode;
- switch (Opcode) {
- default:
- llvm_unreachable("ARMCodeEmitter::emitPseudoInstruction");
- // FIXME: Add support for MOVimm32.
- case TargetOpcode::INLINEASM: {
- // We allow inline assembler nodes with empty bodies - they can
- // implicitly define registers, which is ok for JIT.
- if (MI.getOperand(0).getSymbolName()[0]) {
- llvm_report_error("JIT does not support inline asm!");
- }
- break;
- }
- case TargetOpcode::DBG_LABEL:
- case TargetOpcode::EH_LABEL:
- MCE.emitLabel(MI.getOperand(0).getImm());
- break;
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::KILL:
- // Do nothing.
- break;
- case ARM::CONSTPOOL_ENTRY:
- emitConstPoolInstruction(MI);
- break;
- case ARM::PICADD: {
- // Remember of the address of the PC label for relocation later.
- addPCLabel(MI.getOperand(2).getImm());
- // PICADD is just an add instruction that implicitly read pc.
- emitDataProcessingInstruction(MI, 0, ARM::PC);
- break;
- }
- case ARM::PICLDR:
- case ARM::PICLDRB:
- case ARM::PICSTR:
- case ARM::PICSTRB: {
- // Remember of the address of the PC label for relocation later.
- addPCLabel(MI.getOperand(2).getImm());
- // These are just load / store instructions that implicitly read pc.
- emitLoadStoreInstruction(MI, 0, ARM::PC);
- break;
- }
- case ARM::PICLDRH:
- case ARM::PICLDRSH:
- case ARM::PICLDRSB:
- case ARM::PICSTRH: {
- // Remember of the address of the PC label for relocation later.
- addPCLabel(MI.getOperand(2).getImm());
- // These are just load / store instructions that implicitly read pc.
- emitMiscLoadStoreInstruction(MI, ARM::PC);
- break;
- }
- case ARM::MOVi2pieces:
- // Two instructions to materialize a constant.
- emitMOVi2piecesInstruction(MI);
- break;
- case ARM::LEApcrelJT:
- // Materialize jumptable address.
- emitLEApcrelJTInstruction(MI);
- break;
- case ARM::MOVrx:
- case ARM::MOVsrl_flag:
- case ARM::MOVsra_flag:
- emitPseudoMoveInstruction(MI);
- break;
- }
-}
-
-unsigned ARMCodeEmitter::getMachineSoRegOpValue(
- const MachineInstr &MI,
- const TargetInstrDesc &TID,
- const MachineOperand &MO,
- unsigned OpIdx) {
- unsigned Binary = getMachineOpValue(MI, MO);
-
- const MachineOperand &MO1 = MI.getOperand(OpIdx + 1);
- const MachineOperand &MO2 = MI.getOperand(OpIdx + 2);
- ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(MO2.getImm());
-
- // Encode the shift opcode.
- unsigned SBits = 0;
- unsigned Rs = MO1.getReg();
- if (Rs) {
- // Set shift operand (bit[7:4]).
- // LSL - 0001
- // LSR - 0011
- // ASR - 0101
- // ROR - 0111
- // RRX - 0110 and bit[11:8] clear.
- switch (SOpc) {
- default: llvm_unreachable("Unknown shift opc!");
- case ARM_AM::lsl: SBits = 0x1; break;
- case ARM_AM::lsr: SBits = 0x3; break;
- case ARM_AM::asr: SBits = 0x5; break;
- case ARM_AM::ror: SBits = 0x7; break;
- case ARM_AM::rrx: SBits = 0x6; break;
- }
- } else {
- // Set shift operand (bit[6:4]).
- // LSL - 000
- // LSR - 010
- // ASR - 100
- // ROR - 110
- switch (SOpc) {
- default: llvm_unreachable("Unknown shift opc!");
- case ARM_AM::lsl: SBits = 0x0; break;
- case ARM_AM::lsr: SBits = 0x2; break;
- case ARM_AM::asr: SBits = 0x4; break;
- case ARM_AM::ror: SBits = 0x6; break;
- }
- }
- Binary |= SBits << 4;
- if (SOpc == ARM_AM::rrx)
- return Binary;
-
- // Encode the shift operation Rs or shift_imm (except rrx).
- if (Rs) {
- // Encode Rs bit[11:8].
- assert(ARM_AM::getSORegOffset(MO2.getImm()) == 0);
- return Binary |
- (ARMRegisterInfo::getRegisterNumbering(Rs) << ARMII::RegRsShift);
- }
-
- // Encode shift_imm bit[11:7].
- return Binary | ARM_AM::getSORegOffset(MO2.getImm()) << 7;
-}
-
-unsigned ARMCodeEmitter::getMachineSoImmOpValue(unsigned SoImm) {
- int SoImmVal = ARM_AM::getSOImmVal(SoImm);
- assert(SoImmVal != -1 && "Not a valid so_imm value!");
-
- // Encode rotate_imm.
- unsigned Binary = (ARM_AM::getSOImmValRot((unsigned)SoImmVal) >> 1)
- << ARMII::SoRotImmShift;
-
- // Encode immed_8.
- Binary |= ARM_AM::getSOImmValImm((unsigned)SoImmVal);
- return Binary;
-}
-
-unsigned ARMCodeEmitter::getAddrModeSBit(const MachineInstr &MI,
- const TargetInstrDesc &TID) const {
- for (unsigned i = MI.getNumOperands(), e = TID.getNumOperands(); i != e; --i){
- const MachineOperand &MO = MI.getOperand(i-1);
- if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)
- return 1 << ARMII::S_BitShift;
- }
- return 0;
-}
-
-void ARMCodeEmitter::emitDataProcessingInstruction(
- const MachineInstr &MI,
- unsigned ImplicitRd,
- unsigned ImplicitRn) {
- const TargetInstrDesc &TID = MI.getDesc();
-
- if (TID.Opcode == ARM::BFC) {
- llvm_report_error("ARMv6t2 JIT is not yet supported.");
- }
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- // Encode S bit if MI modifies CPSR.
- Binary |= getAddrModeSBit(MI, TID);
-
- // Encode register def if there is one.
- unsigned NumDefs = TID.getNumDefs();
- unsigned OpIdx = 0;
- if (NumDefs)
- Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
- else if (ImplicitRd)
- // Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRd)
- << ARMII::RegRdShift);
-
- // If this is a two-address operand, skip it. e.g. MOVCCr operand 1.
- if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
- ++OpIdx;
-
- // Encode first non-shifter register operand if there is one.
- bool isUnary = TID.TSFlags & ARMII::UnaryDP;
- if (!isUnary) {
- if (ImplicitRn)
- // Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRn)
- << ARMII::RegRnShift);
- else {
- Binary |= getMachineOpValue(MI, OpIdx) << ARMII::RegRnShift;
- ++OpIdx;
- }
- }
-
- // Encode shifter operand.
- const MachineOperand &MO = MI.getOperand(OpIdx);
- if ((TID.TSFlags & ARMII::FormMask) == ARMII::DPSoRegFrm) {
- // Encode SoReg.
- emitWordLE(Binary | getMachineSoRegOpValue(MI, TID, MO, OpIdx));
- return;
- }
-
- if (MO.isReg()) {
- // Encode register Rm.
- emitWordLE(Binary | ARMRegisterInfo::getRegisterNumbering(MO.getReg()));
- return;
- }
-
- // Encode so_imm.
- Binary |= getMachineSoImmOpValue((unsigned)MO.getImm());
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitLoadStoreInstruction(
- const MachineInstr &MI,
- unsigned ImplicitRd,
- unsigned ImplicitRn) {
- const TargetInstrDesc &TID = MI.getDesc();
- unsigned Form = TID.TSFlags & ARMII::FormMask;
- bool IsPrePost = (TID.TSFlags & ARMII::IndexModeMask) != 0;
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- unsigned OpIdx = 0;
-
- // Operand 0 of a pre- and post-indexed store is the address base
- // writeback. Skip it.
- bool Skipped = false;
- if (IsPrePost && Form == ARMII::StFrm) {
- ++OpIdx;
- Skipped = true;
- }
-
- // Set first operand
- if (ImplicitRd)
- // Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRd)
- << ARMII::RegRdShift);
- else
- Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
-
- // Set second operand
- if (ImplicitRn)
- // Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRn)
- << ARMII::RegRnShift);
- else
- Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
-
- // If this is a two-address operand, skip it. e.g. LDR_PRE.
- if (!Skipped && TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
- ++OpIdx;
-
- const MachineOperand &MO2 = MI.getOperand(OpIdx);
- unsigned AM2Opc = (ImplicitRn == ARM::PC)
- ? 0 : MI.getOperand(OpIdx+1).getImm();
-
- // Set bit U(23) according to sign of immed value (positive or negative).
- Binary |= ((ARM_AM::getAM2Op(AM2Opc) == ARM_AM::add ? 1 : 0) <<
- ARMII::U_BitShift);
- if (!MO2.getReg()) { // is immediate
- if (ARM_AM::getAM2Offset(AM2Opc))
- // Set the value of offset_12 field
- Binary |= ARM_AM::getAM2Offset(AM2Opc);
- emitWordLE(Binary);
- return;
- }
-
- // Set bit I(25), because this is not in immediate enconding.
- Binary |= 1 << ARMII::I_BitShift;
- assert(TargetRegisterInfo::isPhysicalRegister(MO2.getReg()));
- // Set bit[3:0] to the corresponding Rm register
- Binary |= ARMRegisterInfo::getRegisterNumbering(MO2.getReg());
-
- // If this instr is in scaled register offset/index instruction, set
- // shift_immed(bit[11:7]) and shift(bit[6:5]) fields.
- if (unsigned ShImm = ARM_AM::getAM2Offset(AM2Opc)) {
- Binary |= getShiftOp(AM2Opc) << ARMII::ShiftImmShift; // shift
- Binary |= ShImm << ARMII::ShiftShift; // shift_immed
- }
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI,
- unsigned ImplicitRn) {
- const TargetInstrDesc &TID = MI.getDesc();
- unsigned Form = TID.TSFlags & ARMII::FormMask;
- bool IsPrePost = (TID.TSFlags & ARMII::IndexModeMask) != 0;
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- unsigned OpIdx = 0;
-
- // Operand 0 of a pre- and post-indexed store is the address base
- // writeback. Skip it.
- bool Skipped = false;
- if (IsPrePost && Form == ARMII::StMiscFrm) {
- ++OpIdx;
- Skipped = true;
- }
-
- // Set first operand
- Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
-
- // Skip LDRD and STRD's second operand.
- if (TID.Opcode == ARM::LDRD || TID.Opcode == ARM::STRD)
- ++OpIdx;
-
- // Set second operand
- if (ImplicitRn)
- // Special handling for implicit use (e.g. PC).
- Binary |= (ARMRegisterInfo::getRegisterNumbering(ImplicitRn)
- << ARMII::RegRnShift);
- else
- Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
-
- // If this is a two-address operand, skip it. e.g. LDRH_POST.
- if (!Skipped && TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
- ++OpIdx;
-
- const MachineOperand &MO2 = MI.getOperand(OpIdx);
- unsigned AM3Opc = (ImplicitRn == ARM::PC)
- ? 0 : MI.getOperand(OpIdx+1).getImm();
-
- // Set bit U(23) according to sign of immed value (positive or negative)
- Binary |= ((ARM_AM::getAM3Op(AM3Opc) == ARM_AM::add ? 1 : 0) <<
- ARMII::U_BitShift);
-
- // If this instr is in register offset/index encoding, set bit[3:0]
- // to the corresponding Rm register.
- if (MO2.getReg()) {
- Binary |= ARMRegisterInfo::getRegisterNumbering(MO2.getReg());
- emitWordLE(Binary);
- return;
- }
-
- // This instr is in immediate offset/index encoding, set bit 22 to 1.
- Binary |= 1 << ARMII::AM3_I_BitShift;
- if (unsigned ImmOffs = ARM_AM::getAM3Offset(AM3Opc)) {
- // Set operands
- Binary |= (ImmOffs >> 4) << ARMII::ImmHiShift; // immedH
- Binary |= (ImmOffs & 0xF); // immedL
- }
-
- emitWordLE(Binary);
-}
-
-static unsigned getAddrModeUPBits(unsigned Mode) {
- unsigned Binary = 0;
-
- // Set addressing mode by modifying bits U(23) and P(24)
- // IA - Increment after - bit U = 1 and bit P = 0
- // IB - Increment before - bit U = 1 and bit P = 1
- // DA - Decrement after - bit U = 0 and bit P = 0
- // DB - Decrement before - bit U = 0 and bit P = 1
- switch (Mode) {
- default: llvm_unreachable("Unknown addressing sub-mode!");
- case ARM_AM::da: break;
- case ARM_AM::db: Binary |= 0x1 << ARMII::P_BitShift; break;
- case ARM_AM::ia: Binary |= 0x1 << ARMII::U_BitShift; break;
- case ARM_AM::ib: Binary |= 0x3 << ARMII::U_BitShift; break;
- }
-
- return Binary;
-}
-
-void ARMCodeEmitter::emitLoadStoreMultipleInstruction(
- const MachineInstr &MI) {
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- // Set base address operand
- Binary |= getMachineOpValue(MI, 0) << ARMII::RegRnShift;
-
- // Set addressing mode by modifying bits U(23) and P(24)
- const MachineOperand &MO = MI.getOperand(1);
- Binary |= getAddrModeUPBits(ARM_AM::getAM4SubMode(MO.getImm()));
-
- // Set bit W(21)
- if (ARM_AM::getAM4WBFlag(MO.getImm()))
- Binary |= 0x1 << ARMII::W_BitShift;
-
- // Set registers
- for (unsigned i = 5, e = MI.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.isImplicit())
- break;
- unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(MO.getReg());
- assert(TargetRegisterInfo::isPhysicalRegister(MO.getReg()) &&
- RegNum < 16);
- Binary |= 0x1 << RegNum;
- }
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitMulFrmInstruction(const MachineInstr &MI) {
- const TargetInstrDesc &TID = MI.getDesc();
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- // Encode S bit if MI modifies CPSR.
- Binary |= getAddrModeSBit(MI, TID);
-
- // 32x32->64bit operations have two destination registers. The number
- // of register definitions will tell us if that's what we're dealing with.
- unsigned OpIdx = 0;
- if (TID.getNumDefs() == 2)
- Binary |= getMachineOpValue (MI, OpIdx++) << ARMII::RegRdLoShift;
-
- // Encode Rd
- Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdHiShift;
-
- // Encode Rm
- Binary |= getMachineOpValue(MI, OpIdx++);
-
- // Encode Rs
- Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRsShift;
-
- // Many multiple instructions (e.g. MLA) have three src operands. Encode
- // it as Rn (for multiply, that's in the same offset as RdLo.
- if (TID.getNumOperands() > OpIdx &&
- !TID.OpInfo[OpIdx].isPredicate() &&
- !TID.OpInfo[OpIdx].isOptionalDef())
- Binary |= getMachineOpValue(MI, OpIdx) << ARMII::RegRdLoShift;
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitExtendInstruction(const MachineInstr &MI) {
- const TargetInstrDesc &TID = MI.getDesc();
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- unsigned OpIdx = 0;
-
- // Encode Rd
- Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
-
- const MachineOperand &MO1 = MI.getOperand(OpIdx++);
- const MachineOperand &MO2 = MI.getOperand(OpIdx);
- if (MO2.isReg()) {
- // Two register operand form.
- // Encode Rn.
- Binary |= getMachineOpValue(MI, MO1) << ARMII::RegRnShift;
-
- // Encode Rm.
- Binary |= getMachineOpValue(MI, MO2);
- ++OpIdx;
- } else {
- Binary |= getMachineOpValue(MI, MO1);
- }
-
- // Encode rot imm (0, 8, 16, or 24) if it has a rotate immediate operand.
- if (MI.getOperand(OpIdx).isImm() &&
- !TID.OpInfo[OpIdx].isPredicate() &&
- !TID.OpInfo[OpIdx].isOptionalDef())
- Binary |= (getMachineOpValue(MI, OpIdx) / 8) << ARMII::ExtRotImmShift;
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) {
- const TargetInstrDesc &TID = MI.getDesc();
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- unsigned OpIdx = 0;
-
- // Encode Rd
- Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
-
- const MachineOperand &MO = MI.getOperand(OpIdx++);
- if (OpIdx == TID.getNumOperands() ||
- TID.OpInfo[OpIdx].isPredicate() ||
- TID.OpInfo[OpIdx].isOptionalDef()) {
- // Encode Rm and it's done.
- Binary |= getMachineOpValue(MI, MO);
- emitWordLE(Binary);
- return;
- }
-
- // Encode Rn.
- Binary |= getMachineOpValue(MI, MO) << ARMII::RegRnShift;
-
- // Encode Rm.
- Binary |= getMachineOpValue(MI, OpIdx++);
-
- // Encode shift_imm.
- unsigned ShiftAmt = MI.getOperand(OpIdx).getImm();
- assert(ShiftAmt < 32 && "shift_imm range is 0 to 31!");
- Binary |= ShiftAmt << ARMII::ShiftShift;
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitBranchInstruction(const MachineInstr &MI) {
- const TargetInstrDesc &TID = MI.getDesc();
-
- if (TID.Opcode == ARM::TPsoft) {
- llvm_unreachable("ARM::TPsoft FIXME"); // FIXME
- }
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- // Set signed_immed_24 field
- Binary |= getMachineOpValue(MI, 0);
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitInlineJumpTable(unsigned JTIndex) {
- // Remember the base address of the inline jump table.
- uintptr_t JTBase = MCE.getCurrentPCValue();
- JTI->addJumpTableBaseAddr(JTIndex, JTBase);
- DEBUG(errs() << " ** Jump Table #" << JTIndex << " @ " << (void*)JTBase
- << '\n');
-
- // Now emit the jump table entries.
- const std::vector<MachineBasicBlock*> &MBBs = (*MJTEs)[JTIndex].MBBs;
- for (unsigned i = 0, e = MBBs.size(); i != e; ++i) {
- if (IsPIC)
- // DestBB address - JT base.
- emitMachineBasicBlock(MBBs[i], ARM::reloc_arm_pic_jt, JTBase);
- else
- // Absolute DestBB address.
- emitMachineBasicBlock(MBBs[i], ARM::reloc_arm_absolute);
- emitWordLE(0);
- }
-}
-
-void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) {
- const TargetInstrDesc &TID = MI.getDesc();
-
- // Handle jump tables.
- if (TID.Opcode == ARM::BR_JTr || TID.Opcode == ARM::BR_JTadd) {
- // First emit a ldr pc, [] instruction.
- emitDataProcessingInstruction(MI, ARM::PC);
-
- // Then emit the inline jump table.
- unsigned JTIndex =
- (TID.Opcode == ARM::BR_JTr)
- ? MI.getOperand(1).getIndex() : MI.getOperand(2).getIndex();
- emitInlineJumpTable(JTIndex);
- return;
- } else if (TID.Opcode == ARM::BR_JTm) {
- // First emit a ldr pc, [] instruction.
- emitLoadStoreInstruction(MI, ARM::PC);
-
- // Then emit the inline jump table.
- emitInlineJumpTable(MI.getOperand(3).getIndex());
- return;
- }
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- if (TID.Opcode == ARM::BX_RET || TID.Opcode == ARM::MOVPCLR)
- // The return register is LR.
- Binary |= ARMRegisterInfo::getRegisterNumbering(ARM::LR);
- else
- // otherwise, set the return register
- Binary |= getMachineOpValue(MI, 0);
-
- emitWordLE(Binary);
-}
-
-static unsigned encodeVFPRd(const MachineInstr &MI, unsigned OpIdx) {
- unsigned RegD = MI.getOperand(OpIdx).getReg();
- unsigned Binary = 0;
- bool isSPVFP = false;
- RegD = ARMRegisterInfo::getRegisterNumbering(RegD, &isSPVFP);
- if (!isSPVFP)
- Binary |= RegD << ARMII::RegRdShift;
- else {
- Binary |= ((RegD & 0x1E) >> 1) << ARMII::RegRdShift;
- Binary |= (RegD & 0x01) << ARMII::D_BitShift;
- }
- return Binary;
-}
-
-static unsigned encodeVFPRn(const MachineInstr &MI, unsigned OpIdx) {
- unsigned RegN = MI.getOperand(OpIdx).getReg();
- unsigned Binary = 0;
- bool isSPVFP = false;
- RegN = ARMRegisterInfo::getRegisterNumbering(RegN, &isSPVFP);
- if (!isSPVFP)
- Binary |= RegN << ARMII::RegRnShift;
- else {
- Binary |= ((RegN & 0x1E) >> 1) << ARMII::RegRnShift;
- Binary |= (RegN & 0x01) << ARMII::N_BitShift;
- }
- return Binary;
-}
-
-static unsigned encodeVFPRm(const MachineInstr &MI, unsigned OpIdx) {
- unsigned RegM = MI.getOperand(OpIdx).getReg();
- unsigned Binary = 0;
- bool isSPVFP = false;
- RegM = ARMRegisterInfo::getRegisterNumbering(RegM, &isSPVFP);
- if (!isSPVFP)
- Binary |= RegM;
- else {
- Binary |= ((RegM & 0x1E) >> 1);
- Binary |= (RegM & 0x01) << ARMII::M_BitShift;
- }
- return Binary;
-}
-
-void ARMCodeEmitter::emitVFPArithInstruction(const MachineInstr &MI) {
- const TargetInstrDesc &TID = MI.getDesc();
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- unsigned OpIdx = 0;
- assert((Binary & ARMII::D_BitShift) == 0 &&
- (Binary & ARMII::N_BitShift) == 0 &&
- (Binary & ARMII::M_BitShift) == 0 && "VFP encoding bug!");
-
- // Encode Dd / Sd.
- Binary |= encodeVFPRd(MI, OpIdx++);
-
- // If this is a two-address operand, skip it, e.g. FMACD.
- if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
- ++OpIdx;
-
- // Encode Dn / Sn.
- if ((TID.TSFlags & ARMII::FormMask) == ARMII::VFPBinaryFrm)
- Binary |= encodeVFPRn(MI, OpIdx++);
-
- if (OpIdx == TID.getNumOperands() ||
- TID.OpInfo[OpIdx].isPredicate() ||
- TID.OpInfo[OpIdx].isOptionalDef()) {
- // FCMPEZD etc. has only one operand.
- emitWordLE(Binary);
- return;
- }
-
- // Encode Dm / Sm.
- Binary |= encodeVFPRm(MI, OpIdx);
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitVFPConversionInstruction(
- const MachineInstr &MI) {
- const TargetInstrDesc &TID = MI.getDesc();
- unsigned Form = TID.TSFlags & ARMII::FormMask;
-
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- switch (Form) {
- default: break;
- case ARMII::VFPConv1Frm:
- case ARMII::VFPConv2Frm:
- case ARMII::VFPConv3Frm:
- // Encode Dd / Sd.
- Binary |= encodeVFPRd(MI, 0);
- break;
- case ARMII::VFPConv4Frm:
- // Encode Dn / Sn.
- Binary |= encodeVFPRn(MI, 0);
- break;
- case ARMII::VFPConv5Frm:
- // Encode Dm / Sm.
- Binary |= encodeVFPRm(MI, 0);
- break;
- }
-
- switch (Form) {
- default: break;
- case ARMII::VFPConv1Frm:
- // Encode Dm / Sm.
- Binary |= encodeVFPRm(MI, 1);
- break;
- case ARMII::VFPConv2Frm:
- case ARMII::VFPConv3Frm:
- // Encode Dn / Sn.
- Binary |= encodeVFPRn(MI, 1);
- break;
- case ARMII::VFPConv4Frm:
- case ARMII::VFPConv5Frm:
- // Encode Dd / Sd.
- Binary |= encodeVFPRd(MI, 1);
- break;
- }
-
- if (Form == ARMII::VFPConv5Frm)
- // Encode Dn / Sn.
- Binary |= encodeVFPRn(MI, 2);
- else if (Form == ARMII::VFPConv3Frm)
- // Encode Dm / Sm.
- Binary |= encodeVFPRm(MI, 2);
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitVFPLoadStoreInstruction(const MachineInstr &MI) {
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- unsigned OpIdx = 0;
-
- // Encode Dd / Sd.
- Binary |= encodeVFPRd(MI, OpIdx++);
-
- // Encode address base.
- const MachineOperand &Base = MI.getOperand(OpIdx++);
- Binary |= getMachineOpValue(MI, Base) << ARMII::RegRnShift;
-
- // If there is a non-zero immediate offset, encode it.
- if (Base.isReg()) {
- const MachineOperand &Offset = MI.getOperand(OpIdx);
- if (unsigned ImmOffs = ARM_AM::getAM5Offset(Offset.getImm())) {
- if (ARM_AM::getAM5Op(Offset.getImm()) == ARM_AM::add)
- Binary |= 1 << ARMII::U_BitShift;
- Binary |= ImmOffs;
- emitWordLE(Binary);
- return;
- }
- }
-
- // If immediate offset is omitted, default to +0.
- Binary |= 1 << ARMII::U_BitShift;
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitVFPLoadStoreMultipleInstruction(
- const MachineInstr &MI) {
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- // Set base address operand
- Binary |= getMachineOpValue(MI, 0) << ARMII::RegRnShift;
-
- // Set addressing mode by modifying bits U(23) and P(24)
- const MachineOperand &MO = MI.getOperand(1);
- Binary |= getAddrModeUPBits(ARM_AM::getAM5SubMode(MO.getImm()));
-
- // Set bit W(21)
- if (ARM_AM::getAM5WBFlag(MO.getImm()))
- Binary |= 0x1 << ARMII::W_BitShift;
-
- // First register is encoded in Dd.
- Binary |= encodeVFPRd(MI, 5);
-
- // Number of registers are encoded in offset field.
- unsigned NumRegs = 1;
- for (unsigned i = 6, e = MI.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.isImplicit())
- break;
- ++NumRegs;
- }
- Binary |= NumRegs * 2;
-
- emitWordLE(Binary);
-}
-
-void ARMCodeEmitter::emitMiscInstruction(const MachineInstr &MI) {
- // Part of binary is determined by TableGn.
- unsigned Binary = getBinaryCodeForInstr(MI);
-
- // Set the conditional execution predicate
- Binary |= II->getPredicate(&MI) << ARMII::CondShift;
-
- emitWordLE(Binary);
-}
-
-#include "ARMGenCodeEmitter.inc"
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
deleted file mode 100644
index 8fa3c04..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ /dev/null
@@ -1,1825 +0,0 @@
-//===-- ARMConstantIslandPass.cpp - ARM constant islands --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a pass that splits the constant pool up into 'islands'
-// which are scattered through-out the function. This is required due to the
-// limited pc-relative displacements that ARM has.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "arm-cp-islands"
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMInstrInfo.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/CommandLine.h"
-#include <algorithm>
-using namespace llvm;
-
-STATISTIC(NumCPEs, "Number of constpool entries");
-STATISTIC(NumSplit, "Number of uncond branches inserted");
-STATISTIC(NumCBrFixed, "Number of cond branches fixed");
-STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
-STATISTIC(NumTBs, "Number of table branches generated");
-STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
-STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
-STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
-STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
-STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
-
-
-static cl::opt<bool>
-AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
- cl::desc("Adjust basic block layout to better use TB[BH]"));
-
-namespace {
- /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
- /// requires constant pool entries to be scattered among the instructions
- /// inside a function. To do this, it completely ignores the normal LLVM
- /// constant pool; instead, it places constants wherever it feels like with
- /// special instructions.
- ///
- /// The terminology used in this pass includes:
- /// Islands - Clumps of constants placed in the function.
- /// Water - Potential places where an island could be formed.
- /// CPE - A constant pool entry that has been placed somewhere, which
- /// tracks a list of users.
- class ARMConstantIslands : public MachineFunctionPass {
- /// BBSizes - The size of each MachineBasicBlock in bytes of code, indexed
- /// by MBB Number. The two-byte pads required for Thumb alignment are
- /// counted as part of the following block (i.e., the offset and size for
- /// a padded block will both be ==2 mod 4).
- std::vector<unsigned> BBSizes;
-
- /// BBOffsets - the offset of each MBB in bytes, starting from 0.
- /// The two-byte pads required for Thumb alignment are counted as part of
- /// the following block.
- std::vector<unsigned> BBOffsets;
-
- /// WaterList - A sorted list of basic blocks where islands could be placed
- /// (i.e. blocks that don't fall through to the following block, due
- /// to a return, unreachable, or unconditional branch).
- std::vector<MachineBasicBlock*> WaterList;
-
- /// NewWaterList - The subset of WaterList that was created since the
- /// previous iteration by inserting unconditional branches.
- SmallSet<MachineBasicBlock*, 4> NewWaterList;
-
- typedef std::vector<MachineBasicBlock*>::iterator water_iterator;
-
- /// CPUser - One user of a constant pool, keeping the machine instruction
- /// pointer, the constant pool being referenced, and the max displacement
- /// allowed from the instruction to the CP. The HighWaterMark records the
- /// highest basic block where a new CPEntry can be placed. To ensure this
- /// pass terminates, the CP entries are initially placed at the end of the
- /// function and then move monotonically to lower addresses. The
- /// exception to this rule is when the current CP entry for a particular
- /// CPUser is out of range, but there is another CP entry for the same
- /// constant value in range. We want to use the existing in-range CP
- /// entry, but if it later moves out of range, the search for new water
- /// should resume where it left off. The HighWaterMark is used to record
- /// that point.
- struct CPUser {
- MachineInstr *MI;
- MachineInstr *CPEMI;
- MachineBasicBlock *HighWaterMark;
- unsigned MaxDisp;
- bool NegOk;
- bool IsSoImm;
- CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
- bool neg, bool soimm)
- : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) {
- HighWaterMark = CPEMI->getParent();
- }
- };
-
- /// CPUsers - Keep track of all of the machine instructions that use various
- /// constant pools and their max displacement.
- std::vector<CPUser> CPUsers;
-
- /// CPEntry - One per constant pool entry, keeping the machine instruction
- /// pointer, the constpool index, and the number of CPUser's which
- /// reference this entry.
- struct CPEntry {
- MachineInstr *CPEMI;
- unsigned CPI;
- unsigned RefCount;
- CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
- : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
- };
-
- /// CPEntries - Keep track of all of the constant pool entry machine
- /// instructions. For each original constpool index (i.e. those that
- /// existed upon entry to this pass), it keeps a vector of entries.
- /// Original elements are cloned as we go along; the clones are
- /// put in the vector of the original element, but have distinct CPIs.
- std::vector<std::vector<CPEntry> > CPEntries;
-
- /// ImmBranch - One per immediate branch, keeping the machine instruction
- /// pointer, conditional or unconditional, the max displacement,
- /// and (if isCond is true) the corresponding unconditional branch
- /// opcode.
- struct ImmBranch {
- MachineInstr *MI;
- unsigned MaxDisp : 31;
- bool isCond : 1;
- int UncondBr;
- ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, int ubr)
- : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
- };
-
- /// ImmBranches - Keep track of all the immediate branch instructions.
- ///
- std::vector<ImmBranch> ImmBranches;
-
- /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
- ///
- SmallVector<MachineInstr*, 4> PushPopMIs;
-
- /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
- SmallVector<MachineInstr*, 4> T2JumpTables;
-
- /// HasFarJump - True if any far jump instruction has been emitted during
- /// the branch fix up pass.
- bool HasFarJump;
-
- /// HasInlineAsm - True if the function contains inline assembly.
- bool HasInlineAsm;
-
- const TargetInstrInfo *TII;
- const ARMSubtarget *STI;
- ARMFunctionInfo *AFI;
- bool isThumb;
- bool isThumb1;
- bool isThumb2;
- public:
- static char ID;
- ARMConstantIslands() : MachineFunctionPass(&ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "ARM constant island placement and branch shortening pass";
- }
-
- private:
- void DoInitialPlacement(MachineFunction &MF,
- std::vector<MachineInstr*> &CPEMIs);
- CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
- void JumpTableFunctionScan(MachineFunction &MF);
- void InitialFunctionScan(MachineFunction &MF,
- const std::vector<MachineInstr*> &CPEMIs);
- MachineBasicBlock *SplitBlockBeforeInstr(MachineInstr *MI);
- void UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB);
- void AdjustBBOffsetsAfter(MachineBasicBlock *BB, int delta);
- bool DecrementOldEntry(unsigned CPI, MachineInstr* CPEMI);
- int LookForExistingCPEntry(CPUser& U, unsigned UserOffset);
- bool LookForWater(CPUser&U, unsigned UserOffset, water_iterator &WaterIter);
- void CreateNewWater(unsigned CPUserIndex, unsigned UserOffset,
- MachineBasicBlock *&NewMBB);
- bool HandleConstantPoolUser(MachineFunction &MF, unsigned CPUserIndex);
- void RemoveDeadCPEMI(MachineInstr *CPEMI);
- bool RemoveUnusedCPEntries();
- bool CPEIsInRange(MachineInstr *MI, unsigned UserOffset,
- MachineInstr *CPEMI, unsigned Disp, bool NegOk,
- bool DoDump = false);
- bool WaterIsInRange(unsigned UserOffset, MachineBasicBlock *Water,
- CPUser &U);
- bool OffsetIsInRange(unsigned UserOffset, unsigned TrialOffset,
- unsigned Disp, bool NegativeOK, bool IsSoImm = false);
- bool BBIsInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
- bool FixUpImmediateBr(MachineFunction &MF, ImmBranch &Br);
- bool FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br);
- bool FixUpUnconditionalBr(MachineFunction &MF, ImmBranch &Br);
- bool UndoLRSpillRestore();
- bool OptimizeThumb2Instructions(MachineFunction &MF);
- bool OptimizeThumb2Branches(MachineFunction &MF);
- bool ReorderThumb2JumpTables(MachineFunction &MF);
- bool OptimizeThumb2JumpTables(MachineFunction &MF);
- MachineBasicBlock *AdjustJTTargetBlockForward(MachineBasicBlock *BB,
- MachineBasicBlock *JTBB);
-
- unsigned GetOffsetOf(MachineInstr *MI) const;
- void dumpBBs();
- void verify(MachineFunction &MF);
- };
- char ARMConstantIslands::ID = 0;
-}
-
-/// verify - check BBOffsets, BBSizes, alignment of islands
-void ARMConstantIslands::verify(MachineFunction &MF) {
- assert(BBOffsets.size() == BBSizes.size());
- for (unsigned i = 1, e = BBOffsets.size(); i != e; ++i)
- assert(BBOffsets[i-1]+BBSizes[i-1] == BBOffsets[i]);
- if (!isThumb)
- return;
-#ifndef NDEBUG
- for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock *MBB = MBBI;
- if (!MBB->empty() &&
- MBB->begin()->getOpcode() == ARM::CONSTPOOL_ENTRY) {
- unsigned MBBId = MBB->getNumber();
- assert(HasInlineAsm ||
- (BBOffsets[MBBId]%4 == 0 && BBSizes[MBBId]%4 == 0) ||
- (BBOffsets[MBBId]%4 != 0 && BBSizes[MBBId]%4 != 0));
- }
- }
- for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
- CPUser &U = CPUsers[i];
- unsigned UserOffset = GetOffsetOf(U.MI) + (isThumb ? 4 : 8);
- unsigned CPEOffset = GetOffsetOf(U.CPEMI);
- unsigned Disp = UserOffset < CPEOffset ? CPEOffset - UserOffset :
- UserOffset - CPEOffset;
- assert(Disp <= U.MaxDisp || "Constant pool entry out of range!");
- }
-#endif
-}
-
-/// print block size and offset information - debugging
-void ARMConstantIslands::dumpBBs() {
- for (unsigned J = 0, E = BBOffsets.size(); J !=E; ++J) {
- DEBUG(errs() << "block " << J << " offset " << BBOffsets[J]
- << " size " << BBSizes[J] << "\n");
- }
-}
-
-/// createARMConstantIslandPass - returns an instance of the constpool
-/// island pass.
-FunctionPass *llvm::createARMConstantIslandPass() {
- return new ARMConstantIslands();
-}
-
-bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
- MachineConstantPool &MCP = *MF.getConstantPool();
-
- TII = MF.getTarget().getInstrInfo();
- AFI = MF.getInfo<ARMFunctionInfo>();
- STI = &MF.getTarget().getSubtarget<ARMSubtarget>();
-
- isThumb = AFI->isThumbFunction();
- isThumb1 = AFI->isThumb1OnlyFunction();
- isThumb2 = AFI->isThumb2Function();
-
- HasFarJump = false;
- HasInlineAsm = false;
-
- // Renumber all of the machine basic blocks in the function, guaranteeing that
- // the numbers agree with the position of the block in the function.
- MF.RenumberBlocks();
-
- // Try to reorder and otherwise adjust the block layout to make good use
- // of the TB[BH] instructions.
- bool MadeChange = false;
- if (isThumb2 && AdjustJumpTableBlocks) {
- JumpTableFunctionScan(MF);
- MadeChange |= ReorderThumb2JumpTables(MF);
- // Data is out of date, so clear it. It'll be re-computed later.
- T2JumpTables.clear();
- // Blocks may have shifted around. Keep the numbering up to date.
- MF.RenumberBlocks();
- }
-
- // Thumb1 functions containing constant pools get 4-byte alignment.
- // This is so we can keep exact track of where the alignment padding goes.
-
- // ARM and Thumb2 functions need to be 4-byte aligned.
- if (!isThumb1)
- MF.EnsureAlignment(2); // 2 = log2(4)
-
- // Perform the initial placement of the constant pool entries. To start with,
- // we put them all at the end of the function.
- std::vector<MachineInstr*> CPEMIs;
- if (!MCP.isEmpty()) {
- DoInitialPlacement(MF, CPEMIs);
- if (isThumb1)
- MF.EnsureAlignment(2); // 2 = log2(4)
- }
-
- /// The next UID to take is the first unused one.
- AFI->initConstPoolEntryUId(CPEMIs.size());
-
- // Do the initial scan of the function, building up information about the
- // sizes of each block, the location of all the water, and finding all of the
- // constant pool users.
- InitialFunctionScan(MF, CPEMIs);
- CPEMIs.clear();
-
- /// Remove dead constant pool entries.
- RemoveUnusedCPEntries();
-
- // Iteratively place constant pool entries and fix up branches until there
- // is no change.
- unsigned NoCPIters = 0, NoBRIters = 0;
- while (true) {
- bool CPChange = false;
- for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
- CPChange |= HandleConstantPoolUser(MF, i);
- if (CPChange && ++NoCPIters > 30)
- llvm_unreachable("Constant Island pass failed to converge!");
- DEBUG(dumpBBs());
-
- // Clear NewWaterList now. If we split a block for branches, it should
- // appear as "new water" for the next iteration of constant pool placement.
- NewWaterList.clear();
-
- bool BRChange = false;
- for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
- BRChange |= FixUpImmediateBr(MF, ImmBranches[i]);
- if (BRChange && ++NoBRIters > 30)
- llvm_unreachable("Branch Fix Up pass failed to converge!");
- DEBUG(dumpBBs());
-
- if (!CPChange && !BRChange)
- break;
- MadeChange = true;
- }
-
- // Shrink 32-bit Thumb2 branch, load, and store instructions.
- if (isThumb2)
- MadeChange |= OptimizeThumb2Instructions(MF);
-
- // After a while, this might be made debug-only, but it is not expensive.
- verify(MF);
-
- // If LR has been forced spilled and no far jumps (i.e. BL) has been issued.
- // Undo the spill / restore of LR if possible.
- if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
- MadeChange |= UndoLRSpillRestore();
-
- BBSizes.clear();
- BBOffsets.clear();
- WaterList.clear();
- CPUsers.clear();
- CPEntries.clear();
- ImmBranches.clear();
- PushPopMIs.clear();
- T2JumpTables.clear();
-
- return MadeChange;
-}
-
-/// DoInitialPlacement - Perform the initial placement of the constant pool
-/// entries. To start with, we put them all at the end of the function.
-void ARMConstantIslands::DoInitialPlacement(MachineFunction &MF,
- std::vector<MachineInstr*> &CPEMIs) {
- // Create the basic block to hold the CPE's.
- MachineBasicBlock *BB = MF.CreateMachineBasicBlock();
- MF.push_back(BB);
-
- // Add all of the constants from the constant pool to the end block, use an
- // identity mapping of CPI's to CPE's.
- const std::vector<MachineConstantPoolEntry> &CPs =
- MF.getConstantPool()->getConstants();
-
- const TargetData &TD = *MF.getTarget().getTargetData();
- for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
- unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
- // Verify that all constant pool entries are a multiple of 4 bytes. If not,
- // we would have to pad them out or something so that instructions stay
- // aligned.
- assert((Size & 3) == 0 && "CP Entry not multiple of 4 bytes!");
- MachineInstr *CPEMI =
- BuildMI(BB, DebugLoc::getUnknownLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
- .addImm(i).addConstantPoolIndex(i).addImm(Size);
- CPEMIs.push_back(CPEMI);
-
- // Add a new CPEntry, but no corresponding CPUser yet.
- std::vector<CPEntry> CPEs;
- CPEs.push_back(CPEntry(CPEMI, i));
- CPEntries.push_back(CPEs);
- NumCPEs++;
- DEBUG(errs() << "Moved CPI#" << i << " to end of function as #" << i
- << "\n");
- }
-}
-
-/// BBHasFallthrough - Return true if the specified basic block can fallthrough
-/// into the block immediately after it.
-static bool BBHasFallthrough(MachineBasicBlock *MBB) {
- // Get the next machine basic block in the function.
- MachineFunction::iterator MBBI = MBB;
- if (llvm::next(MBBI) == MBB->getParent()->end()) // Can't fall off end of function.
- return false;
-
- MachineBasicBlock *NextBB = llvm::next(MBBI);
- for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
- E = MBB->succ_end(); I != E; ++I)
- if (*I == NextBB)
- return true;
-
- return false;
-}
-
-/// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
-/// look up the corresponding CPEntry.
-ARMConstantIslands::CPEntry
-*ARMConstantIslands::findConstPoolEntry(unsigned CPI,
- const MachineInstr *CPEMI) {
- std::vector<CPEntry> &CPEs = CPEntries[CPI];
- // Number of entries per constpool index should be small, just do a
- // linear search.
- for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
- if (CPEs[i].CPEMI == CPEMI)
- return &CPEs[i];
- }
- return NULL;
-}
-
-/// JumpTableFunctionScan - Do a scan of the function, building up
-/// information about the sizes of each block and the locations of all
-/// the jump tables.
-void ARMConstantIslands::JumpTableFunctionScan(MachineFunction &MF) {
- for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
-
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I)
- if (I->getDesc().isBranch() && I->getOpcode() == ARM::t2BR_JT)
- T2JumpTables.push_back(I);
- }
-}
-
-/// InitialFunctionScan - Do the initial scan of the function, building up
-/// information about the sizes of each block, the location of all the water,
-/// and finding all of the constant pool users.
-void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
- const std::vector<MachineInstr*> &CPEMIs) {
- // First thing, see if the function has any inline assembly in it. If so,
- // we have to be conservative about alignment assumptions, as we don't
- // know for sure the size of any instructions in the inline assembly.
- for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I)
- if (I->getOpcode() == ARM::INLINEASM)
- HasInlineAsm = true;
- }
-
- // Now go back through the instructions and build up our data structures
- unsigned Offset = 0;
- for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
- MBBI != E; ++MBBI) {
- MachineBasicBlock &MBB = *MBBI;
-
- // If this block doesn't fall through into the next MBB, then this is
- // 'water' that a constant pool island could be placed.
- if (!BBHasFallthrough(&MBB))
- WaterList.push_back(&MBB);
-
- unsigned MBBSize = 0;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- // Add instruction size to MBBSize.
- MBBSize += TII->GetInstSizeInBytes(I);
-
- int Opc = I->getOpcode();
- if (I->getDesc().isBranch()) {
- bool isCond = false;
- unsigned Bits = 0;
- unsigned Scale = 1;
- int UOpc = Opc;
- switch (Opc) {
- default:
- continue; // Ignore other JT branches
- case ARM::tBR_JTr:
- // A Thumb1 table jump may involve padding; for the offsets to
- // be right, functions containing these must be 4-byte aligned.
- MF.EnsureAlignment(2U);
- if ((Offset+MBBSize)%4 != 0 || HasInlineAsm)
- // FIXME: Add a pseudo ALIGN instruction instead.
- MBBSize += 2; // padding
- continue; // Does not get an entry in ImmBranches
- case ARM::t2BR_JT:
- T2JumpTables.push_back(I);
- continue; // Does not get an entry in ImmBranches
- case ARM::Bcc:
- isCond = true;
- UOpc = ARM::B;
- // Fallthrough
- case ARM::B:
- Bits = 24;
- Scale = 4;
- break;
- case ARM::tBcc:
- isCond = true;
- UOpc = ARM::tB;
- Bits = 8;
- Scale = 2;
- break;
- case ARM::tB:
- Bits = 11;
- Scale = 2;
- break;
- case ARM::t2Bcc:
- isCond = true;
- UOpc = ARM::t2B;
- Bits = 20;
- Scale = 2;
- break;
- case ARM::t2B:
- Bits = 24;
- Scale = 2;
- break;
- }
-
- // Record this immediate branch.
- unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
- ImmBranches.push_back(ImmBranch(I, MaxOffs, isCond, UOpc));
- }
-
- if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
- PushPopMIs.push_back(I);
-
- if (Opc == ARM::CONSTPOOL_ENTRY)
- continue;
-
- // Scan the instructions for constant pool operands.
- for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
- if (I->getOperand(op).isCPI()) {
- // We found one. The addressing mode tells us the max displacement
- // from the PC that this instruction permits.
-
- // Basic size info comes from the TSFlags field.
- unsigned Bits = 0;
- unsigned Scale = 1;
- bool NegOk = false;
- bool IsSoImm = false;
-
- switch (Opc) {
- default:
- llvm_unreachable("Unknown addressing mode for CP reference!");
- break;
-
- // Taking the address of a CP entry.
- case ARM::LEApcrel:
- // This takes a SoImm, which is 8 bit immediate rotated. We'll
- // pretend the maximum offset is 255 * 4. Since each instruction
- // 4 byte wide, this is always correct. We'll check for other
- // displacements that fits in a SoImm as well.
- Bits = 8;
- Scale = 4;
- NegOk = true;
- IsSoImm = true;
- break;
- case ARM::t2LEApcrel:
- Bits = 12;
- NegOk = true;
- break;
- case ARM::tLEApcrel:
- Bits = 8;
- Scale = 4;
- break;
-
- case ARM::LDR:
- case ARM::LDRcp:
- case ARM::t2LDRpci:
- Bits = 12; // +-offset_12
- NegOk = true;
- break;
-
- case ARM::tLDRpci:
- case ARM::tLDRcp:
- Bits = 8;
- Scale = 4; // +(offset_8*4)
- break;
-
- case ARM::VLDRD:
- case ARM::VLDRS:
- Bits = 8;
- Scale = 4; // +-(offset_8*4)
- NegOk = true;
- break;
- }
-
- // Remember that this is a user of a CP entry.
- unsigned CPI = I->getOperand(op).getIndex();
- MachineInstr *CPEMI = CPEMIs[CPI];
- unsigned MaxOffs = ((1 << Bits)-1) * Scale;
- CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm));
-
- // Increment corresponding CPEntry reference count.
- CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
- assert(CPE && "Cannot find a corresponding CPEntry!");
- CPE->RefCount++;
-
- // Instructions can only use one CP entry, don't bother scanning the
- // rest of the operands.
- break;
- }
- }
-
- // In thumb mode, if this block is a constpool island, we may need padding
- // so it's aligned on 4 byte boundary.
- if (isThumb &&
- !MBB.empty() &&
- MBB.begin()->getOpcode() == ARM::CONSTPOOL_ENTRY &&
- ((Offset%4) != 0 || HasInlineAsm))
- MBBSize += 2;
-
- BBSizes.push_back(MBBSize);
- BBOffsets.push_back(Offset);
- Offset += MBBSize;
- }
-}
-
-/// GetOffsetOf - Return the current offset of the specified machine instruction
-/// from the start of the function. This offset changes as stuff is moved
-/// around inside the function.
-unsigned ARMConstantIslands::GetOffsetOf(MachineInstr *MI) const {
- MachineBasicBlock *MBB = MI->getParent();
-
- // The offset is composed of two things: the sum of the sizes of all MBB's
- // before this instruction's block, and the offset from the start of the block
- // it is in.
- unsigned Offset = BBOffsets[MBB->getNumber()];
-
- // If we're looking for a CONSTPOOL_ENTRY in Thumb, see if this block has
- // alignment padding, and compensate if so.
- if (isThumb &&
- MI->getOpcode() == ARM::CONSTPOOL_ENTRY &&
- (Offset%4 != 0 || HasInlineAsm))
- Offset += 2;
-
- // Sum instructions before MI in MBB.
- for (MachineBasicBlock::iterator I = MBB->begin(); ; ++I) {
- assert(I != MBB->end() && "Didn't find MI in its own basic block?");
- if (&*I == MI) return Offset;
- Offset += TII->GetInstSizeInBytes(I);
- }
-}
-
-/// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
-/// ID.
-static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
- const MachineBasicBlock *RHS) {
- return LHS->getNumber() < RHS->getNumber();
-}
-
-/// UpdateForInsertedWaterBlock - When a block is newly inserted into the
-/// machine function, it upsets all of the block numbers. Renumber the blocks
-/// and update the arrays that parallel this numbering.
-void ARMConstantIslands::UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
- // Renumber the MBB's to keep them consequtive.
- NewBB->getParent()->RenumberBlocks(NewBB);
-
- // Insert a size into BBSizes to align it properly with the (newly
- // renumbered) block numbers.
- BBSizes.insert(BBSizes.begin()+NewBB->getNumber(), 0);
-
- // Likewise for BBOffsets.
- BBOffsets.insert(BBOffsets.begin()+NewBB->getNumber(), 0);
-
- // Next, update WaterList. Specifically, we need to add NewMBB as having
- // available water after it.
- water_iterator IP =
- std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
- CompareMBBNumbers);
- WaterList.insert(IP, NewBB);
-}
-
-
-/// Split the basic block containing MI into two blocks, which are joined by
-/// an unconditional branch. Update data structures and renumber blocks to
-/// account for this change and returns the newly created block.
-MachineBasicBlock *ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) {
- MachineBasicBlock *OrigBB = MI->getParent();
- MachineFunction &MF = *OrigBB->getParent();
-
- // Create a new MBB for the code after the OrigBB.
- MachineBasicBlock *NewBB =
- MF.CreateMachineBasicBlock(OrigBB->getBasicBlock());
- MachineFunction::iterator MBBI = OrigBB; ++MBBI;
- MF.insert(MBBI, NewBB);
-
- // Splice the instructions starting with MI over to NewBB.
- NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
-
- // Add an unconditional branch from OrigBB to NewBB.
- // Note the new unconditional branch is not being recorded.
- // There doesn't seem to be meaningful DebugInfo available; this doesn't
- // correspond to anything in the source.
- unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
- BuildMI(OrigBB, DebugLoc::getUnknownLoc(), TII->get(Opc)).addMBB(NewBB);
- NumSplit++;
-
- // Update the CFG. All succs of OrigBB are now succs of NewBB.
- while (!OrigBB->succ_empty()) {
- MachineBasicBlock *Succ = *OrigBB->succ_begin();
- OrigBB->removeSuccessor(Succ);
- NewBB->addSuccessor(Succ);
-
- // This pass should be run after register allocation, so there should be no
- // PHI nodes to update.
- assert((Succ->empty() || !Succ->begin()->isPHI())
- && "PHI nodes should be eliminated by now!");
- }
-
- // OrigBB branches to NewBB.
- OrigBB->addSuccessor(NewBB);
-
- // Update internal data structures to account for the newly inserted MBB.
- // This is almost the same as UpdateForInsertedWaterBlock, except that
- // the Water goes after OrigBB, not NewBB.
- MF.RenumberBlocks(NewBB);
-
- // Insert a size into BBSizes to align it properly with the (newly
- // renumbered) block numbers.
- BBSizes.insert(BBSizes.begin()+NewBB->getNumber(), 0);
-
- // Likewise for BBOffsets.
- BBOffsets.insert(BBOffsets.begin()+NewBB->getNumber(), 0);
-
- // Next, update WaterList. Specifically, we need to add OrigMBB as having
- // available water after it (but not if it's already there, which happens
- // when splitting before a conditional branch that is followed by an
- // unconditional branch - in that case we want to insert NewBB).
- water_iterator IP =
- std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
- CompareMBBNumbers);
- MachineBasicBlock* WaterBB = *IP;
- if (WaterBB == OrigBB)
- WaterList.insert(llvm::next(IP), NewBB);
- else
- WaterList.insert(IP, OrigBB);
- NewWaterList.insert(OrigBB);
-
- // Figure out how large the first NewMBB is. (It cannot
- // contain a constpool_entry or tablejump.)
- unsigned NewBBSize = 0;
- for (MachineBasicBlock::iterator I = NewBB->begin(), E = NewBB->end();
- I != E; ++I)
- NewBBSize += TII->GetInstSizeInBytes(I);
-
- unsigned OrigBBI = OrigBB->getNumber();
- unsigned NewBBI = NewBB->getNumber();
- // Set the size of NewBB in BBSizes.
- BBSizes[NewBBI] = NewBBSize;
-
- // We removed instructions from UserMBB, subtract that off from its size.
- // Add 2 or 4 to the block to count the unconditional branch we added to it.
- int delta = isThumb1 ? 2 : 4;
- BBSizes[OrigBBI] -= NewBBSize - delta;
-
- // ...and adjust BBOffsets for NewBB accordingly.
- BBOffsets[NewBBI] = BBOffsets[OrigBBI] + BBSizes[OrigBBI];
-
- // All BBOffsets following these blocks must be modified.
- AdjustBBOffsetsAfter(NewBB, delta);
-
- return NewBB;
-}
-
-/// OffsetIsInRange - Checks whether UserOffset (the location of a constant pool
-/// reference) is within MaxDisp of TrialOffset (a proposed location of a
-/// constant pool entry).
-bool ARMConstantIslands::OffsetIsInRange(unsigned UserOffset,
- unsigned TrialOffset, unsigned MaxDisp,
- bool NegativeOK, bool IsSoImm) {
- // On Thumb offsets==2 mod 4 are rounded down by the hardware for
- // purposes of the displacement computation; compensate for that here.
- // Effectively, the valid range of displacements is 2 bytes smaller for such
- // references.
- unsigned TotalAdj = 0;
- if (isThumb && UserOffset%4 !=0) {
- UserOffset -= 2;
- TotalAdj = 2;
- }
- // CPEs will be rounded up to a multiple of 4.
- if (isThumb && TrialOffset%4 != 0) {
- TrialOffset += 2;
- TotalAdj += 2;
- }
-
- // In Thumb2 mode, later branch adjustments can shift instructions up and
- // cause alignment change. In the worst case scenario this can cause the
- // user's effective address to be subtracted by 2 and the CPE's address to
- // be plus 2.
- if (isThumb2 && TotalAdj != 4)
- MaxDisp -= (4 - TotalAdj);
-
- if (UserOffset <= TrialOffset) {
- // User before the Trial.
- if (TrialOffset - UserOffset <= MaxDisp)
- return true;
- // FIXME: Make use full range of soimm values.
- } else if (NegativeOK) {
- if (UserOffset - TrialOffset <= MaxDisp)
- return true;
- // FIXME: Make use full range of soimm values.
- }
- return false;
-}
-
-/// WaterIsInRange - Returns true if a CPE placed after the specified
-/// Water (a basic block) will be in range for the specific MI.
-
-bool ARMConstantIslands::WaterIsInRange(unsigned UserOffset,
- MachineBasicBlock* Water, CPUser &U) {
- unsigned MaxDisp = U.MaxDisp;
- unsigned CPEOffset = BBOffsets[Water->getNumber()] +
- BBSizes[Water->getNumber()];
-
- // If the CPE is to be inserted before the instruction, that will raise
- // the offset of the instruction.
- if (CPEOffset < UserOffset)
- UserOffset += U.CPEMI->getOperand(2).getImm();
-
- return OffsetIsInRange(UserOffset, CPEOffset, MaxDisp, U.NegOk, U.IsSoImm);
-}
-
-/// CPEIsInRange - Returns true if the distance between specific MI and
-/// specific ConstPool entry instruction can fit in MI's displacement field.
-bool ARMConstantIslands::CPEIsInRange(MachineInstr *MI, unsigned UserOffset,
- MachineInstr *CPEMI, unsigned MaxDisp,
- bool NegOk, bool DoDump) {
- unsigned CPEOffset = GetOffsetOf(CPEMI);
- assert((CPEOffset%4 == 0 || HasInlineAsm) && "Misaligned CPE");
-
- if (DoDump) {
- DEBUG(errs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
- << " max delta=" << MaxDisp
- << " insn address=" << UserOffset
- << " CPE address=" << CPEOffset
- << " offset=" << int(CPEOffset-UserOffset) << "\t" << *MI);
- }
-
- return OffsetIsInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
-}
-
-#ifndef NDEBUG
-/// BBIsJumpedOver - Return true of the specified basic block's only predecessor
-/// unconditionally branches to its only successor.
-static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
- if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
- return false;
-
- MachineBasicBlock *Succ = *MBB->succ_begin();
- MachineBasicBlock *Pred = *MBB->pred_begin();
- MachineInstr *PredMI = &Pred->back();
- if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
- || PredMI->getOpcode() == ARM::t2B)
- return PredMI->getOperand(0).getMBB() == Succ;
- return false;
-}
-#endif // NDEBUG
-
-void ARMConstantIslands::AdjustBBOffsetsAfter(MachineBasicBlock *BB,
- int delta) {
- MachineFunction::iterator MBBI = BB; MBBI = llvm::next(MBBI);
- for(unsigned i = BB->getNumber()+1, e = BB->getParent()->getNumBlockIDs();
- i < e; ++i) {
- BBOffsets[i] += delta;
- // If some existing blocks have padding, adjust the padding as needed, a
- // bit tricky. delta can be negative so don't use % on that.
- if (!isThumb)
- continue;
- MachineBasicBlock *MBB = MBBI;
- if (!MBB->empty() && !HasInlineAsm) {
- // Constant pool entries require padding.
- if (MBB->begin()->getOpcode() == ARM::CONSTPOOL_ENTRY) {
- unsigned OldOffset = BBOffsets[i] - delta;
- if ((OldOffset%4) == 0 && (BBOffsets[i]%4) != 0) {
- // add new padding
- BBSizes[i] += 2;
- delta += 2;
- } else if ((OldOffset%4) != 0 && (BBOffsets[i]%4) == 0) {
- // remove existing padding
- BBSizes[i] -= 2;
- delta -= 2;
- }
- }
- // Thumb1 jump tables require padding. They should be at the end;
- // following unconditional branches are removed by AnalyzeBranch.
- MachineInstr *ThumbJTMI = prior(MBB->end());
- if (ThumbJTMI->getOpcode() == ARM::tBR_JTr) {
- unsigned NewMIOffset = GetOffsetOf(ThumbJTMI);
- unsigned OldMIOffset = NewMIOffset - delta;
- if ((OldMIOffset%4) == 0 && (NewMIOffset%4) != 0) {
- // remove existing padding
- BBSizes[i] -= 2;
- delta -= 2;
- } else if ((OldMIOffset%4) != 0 && (NewMIOffset%4) == 0) {
- // add new padding
- BBSizes[i] += 2;
- delta += 2;
- }
- }
- if (delta==0)
- return;
- }
- MBBI = llvm::next(MBBI);
- }
-}
-
-/// DecrementOldEntry - find the constant pool entry with index CPI
-/// and instruction CPEMI, and decrement its refcount. If the refcount
-/// becomes 0 remove the entry and instruction. Returns true if we removed
-/// the entry, false if we didn't.
-
-bool ARMConstantIslands::DecrementOldEntry(unsigned CPI, MachineInstr *CPEMI) {
- // Find the old entry. Eliminate it if it is no longer used.
- CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
- assert(CPE && "Unexpected!");
- if (--CPE->RefCount == 0) {
- RemoveDeadCPEMI(CPEMI);
- CPE->CPEMI = NULL;
- NumCPEs--;
- return true;
- }
- return false;
-}
-
-/// LookForCPEntryInRange - see if the currently referenced CPE is in range;
-/// if not, see if an in-range clone of the CPE is in range, and if so,
-/// change the data structures so the user references the clone. Returns:
-/// 0 = no existing entry found
-/// 1 = entry found, and there were no code insertions or deletions
-/// 2 = entry found, and there were code insertions or deletions
-int ARMConstantIslands::LookForExistingCPEntry(CPUser& U, unsigned UserOffset)
-{
- MachineInstr *UserMI = U.MI;
- MachineInstr *CPEMI = U.CPEMI;
-
- // Check to see if the CPE is already in-range.
- if (CPEIsInRange(UserMI, UserOffset, CPEMI, U.MaxDisp, U.NegOk, true)) {
- DEBUG(errs() << "In range\n");
- return 1;
- }
-
- // No. Look for previously created clones of the CPE that are in range.
- unsigned CPI = CPEMI->getOperand(1).getIndex();
- std::vector<CPEntry> &CPEs = CPEntries[CPI];
- for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
- // We already tried this one
- if (CPEs[i].CPEMI == CPEMI)
- continue;
- // Removing CPEs can leave empty entries, skip
- if (CPEs[i].CPEMI == NULL)
- continue;
- if (CPEIsInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.MaxDisp, U.NegOk)) {
- DEBUG(errs() << "Replacing CPE#" << CPI << " with CPE#"
- << CPEs[i].CPI << "\n");
- // Point the CPUser node to the replacement
- U.CPEMI = CPEs[i].CPEMI;
- // Change the CPI in the instruction operand to refer to the clone.
- for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
- if (UserMI->getOperand(j).isCPI()) {
- UserMI->getOperand(j).setIndex(CPEs[i].CPI);
- break;
- }
- // Adjust the refcount of the clone...
- CPEs[i].RefCount++;
- // ...and the original. If we didn't remove the old entry, none of the
- // addresses changed, so we don't need another pass.
- return DecrementOldEntry(CPI, CPEMI) ? 2 : 1;
- }
- }
- return 0;
-}
-
-/// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
-/// the specific unconditional branch instruction.
-static inline unsigned getUnconditionalBrDisp(int Opc) {
- switch (Opc) {
- case ARM::tB:
- return ((1<<10)-1)*2;
- case ARM::t2B:
- return ((1<<23)-1)*2;
- default:
- break;
- }
-
- return ((1<<23)-1)*4;
-}
-
-/// LookForWater - Look for an existing entry in the WaterList in which
-/// we can place the CPE referenced from U so it's within range of U's MI.
-/// Returns true if found, false if not. If it returns true, WaterIter
-/// is set to the WaterList entry. For Thumb, prefer water that will not
-/// introduce padding to water that will. To ensure that this pass
-/// terminates, the CPE location for a particular CPUser is only allowed to
-/// move to a lower address, so search backward from the end of the list and
-/// prefer the first water that is in range.
-bool ARMConstantIslands::LookForWater(CPUser &U, unsigned UserOffset,
- water_iterator &WaterIter) {
- if (WaterList.empty())
- return false;
-
- bool FoundWaterThatWouldPad = false;
- water_iterator IPThatWouldPad;
- for (water_iterator IP = prior(WaterList.end()),
- B = WaterList.begin();; --IP) {
- MachineBasicBlock* WaterBB = *IP;
- // Check if water is in range and is either at a lower address than the
- // current "high water mark" or a new water block that was created since
- // the previous iteration by inserting an unconditional branch. In the
- // latter case, we want to allow resetting the high water mark back to
- // this new water since we haven't seen it before. Inserting branches
- // should be relatively uncommon and when it does happen, we want to be
- // sure to take advantage of it for all the CPEs near that block, so that
- // we don't insert more branches than necessary.
- if (WaterIsInRange(UserOffset, WaterBB, U) &&
- (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
- NewWaterList.count(WaterBB))) {
- unsigned WBBId = WaterBB->getNumber();
- if (isThumb &&
- (BBOffsets[WBBId] + BBSizes[WBBId])%4 != 0) {
- // This is valid Water, but would introduce padding. Remember
- // it in case we don't find any Water that doesn't do this.
- if (!FoundWaterThatWouldPad) {
- FoundWaterThatWouldPad = true;
- IPThatWouldPad = IP;
- }
- } else {
- WaterIter = IP;
- return true;
- }
- }
- if (IP == B)
- break;
- }
- if (FoundWaterThatWouldPad) {
- WaterIter = IPThatWouldPad;
- return true;
- }
- return false;
-}
-
-/// CreateNewWater - No existing WaterList entry will work for
-/// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
-/// block is used if in range, and the conditional branch munged so control
-/// flow is correct. Otherwise the block is split to create a hole with an
-/// unconditional branch around it. In either case NewMBB is set to a
-/// block following which the new island can be inserted (the WaterList
-/// is not adjusted).
-void ARMConstantIslands::CreateNewWater(unsigned CPUserIndex,
- unsigned UserOffset,
- MachineBasicBlock *&NewMBB) {
- CPUser &U = CPUsers[CPUserIndex];
- MachineInstr *UserMI = U.MI;
- MachineInstr *CPEMI = U.CPEMI;
- MachineBasicBlock *UserMBB = UserMI->getParent();
- unsigned OffsetOfNextBlock = BBOffsets[UserMBB->getNumber()] +
- BBSizes[UserMBB->getNumber()];
- assert(OffsetOfNextBlock== BBOffsets[UserMBB->getNumber()+1]);
-
- // If the block does not end in an unconditional branch already, and if the
- // end of the block is within range, make new water there. (The addition
- // below is for the unconditional branch we will be adding: 4 bytes on ARM +
- // Thumb2, 2 on Thumb1. Possible Thumb1 alignment padding is allowed for
- // inside OffsetIsInRange.
- if (BBHasFallthrough(UserMBB) &&
- OffsetIsInRange(UserOffset, OffsetOfNextBlock + (isThumb1 ? 2: 4),
- U.MaxDisp, U.NegOk, U.IsSoImm)) {
- DEBUG(errs() << "Split at end of block\n");
- if (&UserMBB->back() == UserMI)
- assert(BBHasFallthrough(UserMBB) && "Expected a fallthrough BB!");
- NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
- // Add an unconditional branch from UserMBB to fallthrough block.
- // Record it for branch lengthening; this new branch will not get out of
- // range, but if the preceding conditional branch is out of range, the
- // targets will be exchanged, and the altered branch may be out of
- // range, so the machinery has to know about it.
- int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
- BuildMI(UserMBB, DebugLoc::getUnknownLoc(),
- TII->get(UncondBr)).addMBB(NewMBB);
- unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
- ImmBranches.push_back(ImmBranch(&UserMBB->back(),
- MaxDisp, false, UncondBr));
- int delta = isThumb1 ? 2 : 4;
- BBSizes[UserMBB->getNumber()] += delta;
- AdjustBBOffsetsAfter(UserMBB, delta);
- } else {
- // What a big block. Find a place within the block to split it.
- // This is a little tricky on Thumb1 since instructions are 2 bytes
- // and constant pool entries are 4 bytes: if instruction I references
- // island CPE, and instruction I+1 references CPE', it will
- // not work well to put CPE as far forward as possible, since then
- // CPE' cannot immediately follow it (that location is 2 bytes
- // farther away from I+1 than CPE was from I) and we'd need to create
- // a new island. So, we make a first guess, then walk through the
- // instructions between the one currently being looked at and the
- // possible insertion point, and make sure any other instructions
- // that reference CPEs will be able to use the same island area;
- // if not, we back up the insertion point.
-
- // The 4 in the following is for the unconditional branch we'll be
- // inserting (allows for long branch on Thumb1). Alignment of the
- // island is handled inside OffsetIsInRange.
- unsigned BaseInsertOffset = UserOffset + U.MaxDisp -4;
- // This could point off the end of the block if we've already got
- // constant pool entries following this block; only the last one is
- // in the water list. Back past any possible branches (allow for a
- // conditional and a maximally long unconditional).
- if (BaseInsertOffset >= BBOffsets[UserMBB->getNumber()+1])
- BaseInsertOffset = BBOffsets[UserMBB->getNumber()+1] -
- (isThumb1 ? 6 : 8);
- unsigned EndInsertOffset = BaseInsertOffset +
- CPEMI->getOperand(2).getImm();
- MachineBasicBlock::iterator MI = UserMI;
- ++MI;
- unsigned CPUIndex = CPUserIndex+1;
- for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
- Offset < BaseInsertOffset;
- Offset += TII->GetInstSizeInBytes(MI),
- MI = llvm::next(MI)) {
- if (CPUIndex < CPUsers.size() && CPUsers[CPUIndex].MI == MI) {
- CPUser &U = CPUsers[CPUIndex];
- if (!OffsetIsInRange(Offset, EndInsertOffset,
- U.MaxDisp, U.NegOk, U.IsSoImm)) {
- BaseInsertOffset -= (isThumb1 ? 2 : 4);
- EndInsertOffset -= (isThumb1 ? 2 : 4);
- }
- // This is overly conservative, as we don't account for CPEMIs
- // being reused within the block, but it doesn't matter much.
- EndInsertOffset += CPUsers[CPUIndex].CPEMI->getOperand(2).getImm();
- CPUIndex++;
- }
- }
- DEBUG(errs() << "Split in middle of big block\n");
- NewMBB = SplitBlockBeforeInstr(prior(MI));
- }
-}
-
-/// HandleConstantPoolUser - Analyze the specified user, checking to see if it
-/// is out-of-range. If so, pick up the constant pool value and move it some
-/// place in-range. Return true if we changed any addresses (thus must run
-/// another pass of branch lengthening), false otherwise.
-bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
- unsigned CPUserIndex) {
- CPUser &U = CPUsers[CPUserIndex];
- MachineInstr *UserMI = U.MI;
- MachineInstr *CPEMI = U.CPEMI;
- unsigned CPI = CPEMI->getOperand(1).getIndex();
- unsigned Size = CPEMI->getOperand(2).getImm();
- // Compute this only once, it's expensive. The 4 or 8 is the value the
- // hardware keeps in the PC.
- unsigned UserOffset = GetOffsetOf(UserMI) + (isThumb ? 4 : 8);
-
- // See if the current entry is within range, or there is a clone of it
- // in range.
- int result = LookForExistingCPEntry(U, UserOffset);
- if (result==1) return false;
- else if (result==2) return true;
-
- // No existing clone of this CPE is within range.
- // We will be generating a new clone. Get a UID for it.
- unsigned ID = AFI->createConstPoolEntryUId();
-
- // Look for water where we can place this CPE.
- MachineBasicBlock *NewIsland = MF.CreateMachineBasicBlock();
- MachineBasicBlock *NewMBB;
- water_iterator IP;
- if (LookForWater(U, UserOffset, IP)) {
- DEBUG(errs() << "found water in range\n");
- MachineBasicBlock *WaterBB = *IP;
-
- // If the original WaterList entry was "new water" on this iteration,
- // propagate that to the new island. This is just keeping NewWaterList
- // updated to match the WaterList, which will be updated below.
- if (NewWaterList.count(WaterBB)) {
- NewWaterList.erase(WaterBB);
- NewWaterList.insert(NewIsland);
- }
- // The new CPE goes before the following block (NewMBB).
- NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
-
- } else {
- // No water found.
- DEBUG(errs() << "No water found\n");
- CreateNewWater(CPUserIndex, UserOffset, NewMBB);
-
- // SplitBlockBeforeInstr adds to WaterList, which is important when it is
- // called while handling branches so that the water will be seen on the
- // next iteration for constant pools, but in this context, we don't want
- // it. Check for this so it will be removed from the WaterList.
- // Also remove any entry from NewWaterList.
- MachineBasicBlock *WaterBB = prior(MachineFunction::iterator(NewMBB));
- IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
- if (IP != WaterList.end())
- NewWaterList.erase(WaterBB);
-
- // We are adding new water. Update NewWaterList.
- NewWaterList.insert(NewIsland);
- }
-
- // Remove the original WaterList entry; we want subsequent insertions in
- // this vicinity to go after the one we're about to insert. This
- // considerably reduces the number of times we have to move the same CPE
- // more than once and is also important to ensure the algorithm terminates.
- if (IP != WaterList.end())
- WaterList.erase(IP);
-
- // Okay, we know we can put an island before NewMBB now, do it!
- MF.insert(NewMBB, NewIsland);
-
- // Update internal data structures to account for the newly inserted MBB.
- UpdateForInsertedWaterBlock(NewIsland);
-
- // Decrement the old entry, and remove it if refcount becomes 0.
- DecrementOldEntry(CPI, CPEMI);
-
- // Now that we have an island to add the CPE to, clone the original CPE and
- // add it to the island.
- U.HighWaterMark = NewIsland;
- U.CPEMI = BuildMI(NewIsland, DebugLoc::getUnknownLoc(),
- TII->get(ARM::CONSTPOOL_ENTRY))
- .addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
- CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
- NumCPEs++;
-
- BBOffsets[NewIsland->getNumber()] = BBOffsets[NewMBB->getNumber()];
- // Compensate for .align 2 in thumb mode.
- if (isThumb && (BBOffsets[NewIsland->getNumber()]%4 != 0 || HasInlineAsm))
- Size += 2;
- // Increase the size of the island block to account for the new entry.
- BBSizes[NewIsland->getNumber()] += Size;
- AdjustBBOffsetsAfter(NewIsland, Size);
-
- // Finally, change the CPI in the instruction operand to be ID.
- for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
- if (UserMI->getOperand(i).isCPI()) {
- UserMI->getOperand(i).setIndex(ID);
- break;
- }
-
- DEBUG(errs() << " Moved CPE to #" << ID << " CPI=" << CPI
- << '\t' << *UserMI);
-
- return true;
-}
-
-/// RemoveDeadCPEMI - Remove a dead constant pool entry instruction. Update
-/// sizes and offsets of impacted basic blocks.
-void ARMConstantIslands::RemoveDeadCPEMI(MachineInstr *CPEMI) {
- MachineBasicBlock *CPEBB = CPEMI->getParent();
- unsigned Size = CPEMI->getOperand(2).getImm();
- CPEMI->eraseFromParent();
- BBSizes[CPEBB->getNumber()] -= Size;
- // All succeeding offsets have the current size value added in, fix this.
- if (CPEBB->empty()) {
- // In thumb1 mode, the size of island may be padded by two to compensate for
- // the alignment requirement. Then it will now be 2 when the block is
- // empty, so fix this.
- // All succeeding offsets have the current size value added in, fix this.
- if (BBSizes[CPEBB->getNumber()] != 0) {
- Size += BBSizes[CPEBB->getNumber()];
- BBSizes[CPEBB->getNumber()] = 0;
- }
- }
- AdjustBBOffsetsAfter(CPEBB, -Size);
- // An island has only one predecessor BB and one successor BB. Check if
- // this BB's predecessor jumps directly to this BB's successor. This
- // shouldn't happen currently.
- assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
- // FIXME: remove the empty blocks after all the work is done?
-}
-
-/// RemoveUnusedCPEntries - Remove constant pool entries whose refcounts
-/// are zero.
-bool ARMConstantIslands::RemoveUnusedCPEntries() {
- unsigned MadeChange = false;
- for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
- std::vector<CPEntry> &CPEs = CPEntries[i];
- for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
- if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
- RemoveDeadCPEMI(CPEs[j].CPEMI);
- CPEs[j].CPEMI = NULL;
- MadeChange = true;
- }
- }
- }
- return MadeChange;
-}
-
-/// BBIsInRange - Returns true if the distance between specific MI and
-/// specific BB can fit in MI's displacement field.
-bool ARMConstantIslands::BBIsInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
- unsigned MaxDisp) {
- unsigned PCAdj = isThumb ? 4 : 8;
- unsigned BrOffset = GetOffsetOf(MI) + PCAdj;
- unsigned DestOffset = BBOffsets[DestBB->getNumber()];
-
- DEBUG(errs() << "Branch of destination BB#" << DestBB->getNumber()
- << " from BB#" << MI->getParent()->getNumber()
- << " max delta=" << MaxDisp
- << " from " << GetOffsetOf(MI) << " to " << DestOffset
- << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
-
- if (BrOffset <= DestOffset) {
- // Branch before the Dest.
- if (DestOffset-BrOffset <= MaxDisp)
- return true;
- } else {
- if (BrOffset-DestOffset <= MaxDisp)
- return true;
- }
- return false;
-}
-
-/// FixUpImmediateBr - Fix up an immediate branch whose destination is too far
-/// away to fit in its displacement field.
-bool ARMConstantIslands::FixUpImmediateBr(MachineFunction &MF, ImmBranch &Br) {
- MachineInstr *MI = Br.MI;
- MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
-
- // Check to see if the DestBB is already in-range.
- if (BBIsInRange(MI, DestBB, Br.MaxDisp))
- return false;
-
- if (!Br.isCond)
- return FixUpUnconditionalBr(MF, Br);
- return FixUpConditionalBr(MF, Br);
-}
-
-/// FixUpUnconditionalBr - Fix up an unconditional branch whose destination is
-/// too far away to fit in its displacement field. If the LR register has been
-/// spilled in the epilogue, then we can use BL to implement a far jump.
-/// Otherwise, add an intermediate branch instruction to a branch.
-bool
-ARMConstantIslands::FixUpUnconditionalBr(MachineFunction &MF, ImmBranch &Br) {
- MachineInstr *MI = Br.MI;
- MachineBasicBlock *MBB = MI->getParent();
- if (!isThumb1)
- llvm_unreachable("FixUpUnconditionalBr is Thumb1 only!");
-
- // Use BL to implement far jump.
- Br.MaxDisp = (1 << 21) * 2;
- MI->setDesc(TII->get(ARM::tBfar));
- BBSizes[MBB->getNumber()] += 2;
- AdjustBBOffsetsAfter(MBB, 2);
- HasFarJump = true;
- NumUBrFixed++;
-
- DEBUG(errs() << " Changed B to long jump " << *MI);
-
- return true;
-}
-
-/// FixUpConditionalBr - Fix up a conditional branch whose destination is too
-/// far away to fit in its displacement field. It is converted to an inverse
-/// conditional branch + an unconditional branch to the destination.
-bool
-ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) {
- MachineInstr *MI = Br.MI;
- MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
-
- // Add an unconditional branch to the destination and invert the branch
- // condition to jump over it:
- // blt L1
- // =>
- // bge L2
- // b L1
- // L2:
- ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
- CC = ARMCC::getOppositeCondition(CC);
- unsigned CCReg = MI->getOperand(2).getReg();
-
- // If the branch is at the end of its MBB and that has a fall-through block,
- // direct the updated conditional branch to the fall-through block. Otherwise,
- // split the MBB before the next instruction.
- MachineBasicBlock *MBB = MI->getParent();
- MachineInstr *BMI = &MBB->back();
- bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
-
- NumCBrFixed++;
- if (BMI != MI) {
- if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
- BMI->getOpcode() == Br.UncondBr) {
- // Last MI in the BB is an unconditional branch. Can we simply invert the
- // condition and swap destinations:
- // beq L1
- // b L2
- // =>
- // bne L2
- // b L1
- MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
- if (BBIsInRange(MI, NewDest, Br.MaxDisp)) {
- DEBUG(errs() << " Invert Bcc condition and swap its destination with "
- << *BMI);
- BMI->getOperand(0).setMBB(DestBB);
- MI->getOperand(0).setMBB(NewDest);
- MI->getOperand(1).setImm(CC);
- return true;
- }
- }
- }
-
- if (NeedSplit) {
- SplitBlockBeforeInstr(MI);
- // No need for the branch to the next block. We're adding an unconditional
- // branch to the destination.
- int delta = TII->GetInstSizeInBytes(&MBB->back());
- BBSizes[MBB->getNumber()] -= delta;
- MachineBasicBlock* SplitBB = llvm::next(MachineFunction::iterator(MBB));
- AdjustBBOffsetsAfter(SplitBB, -delta);
- MBB->back().eraseFromParent();
- // BBOffsets[SplitBB] is wrong temporarily, fixed below
- }
- MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
-
- DEBUG(errs() << " Insert B to BB#" << DestBB->getNumber()
- << " also invert condition and change dest. to BB#"
- << NextBB->getNumber() << "\n");
-
- // Insert a new conditional branch and a new unconditional branch.
- // Also update the ImmBranch as well as adding a new entry for the new branch.
- BuildMI(MBB, DebugLoc::getUnknownLoc(),
- TII->get(MI->getOpcode()))
- .addMBB(NextBB).addImm(CC).addReg(CCReg);
- Br.MI = &MBB->back();
- BBSizes[MBB->getNumber()] += TII->GetInstSizeInBytes(&MBB->back());
- BuildMI(MBB, DebugLoc::getUnknownLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
- BBSizes[MBB->getNumber()] += TII->GetInstSizeInBytes(&MBB->back());
- unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
- ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
-
- // Remove the old conditional branch. It may or may not still be in MBB.
- BBSizes[MI->getParent()->getNumber()] -= TII->GetInstSizeInBytes(MI);
- MI->eraseFromParent();
-
- // The net size change is an addition of one unconditional branch.
- int delta = TII->GetInstSizeInBytes(&MBB->back());
- AdjustBBOffsetsAfter(MBB, delta);
- return true;
-}
-
-/// UndoLRSpillRestore - Remove Thumb push / pop instructions that only spills
-/// LR / restores LR to pc. FIXME: This is done here because it's only possible
-/// to do this if tBfar is not used.
-bool ARMConstantIslands::UndoLRSpillRestore() {
- bool MadeChange = false;
- for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
- MachineInstr *MI = PushPopMIs[i];
- // First two operands are predicates, the third is a zero since there
- // is no writeback.
- if (MI->getOpcode() == ARM::tPOP_RET &&
- MI->getOperand(3).getReg() == ARM::PC &&
- MI->getNumExplicitOperands() == 4) {
- BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET));
- MI->eraseFromParent();
- MadeChange = true;
- }
- }
- return MadeChange;
-}
-
-bool ARMConstantIslands::OptimizeThumb2Instructions(MachineFunction &MF) {
- bool MadeChange = false;
-
- // Shrink ADR and LDR from constantpool.
- for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
- CPUser &U = CPUsers[i];
- unsigned Opcode = U.MI->getOpcode();
- unsigned NewOpc = 0;
- unsigned Scale = 1;
- unsigned Bits = 0;
- switch (Opcode) {
- default: break;
- case ARM::t2LEApcrel:
- if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
- NewOpc = ARM::tLEApcrel;
- Bits = 8;
- Scale = 4;
- }
- break;
- case ARM::t2LDRpci:
- if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
- NewOpc = ARM::tLDRpci;
- Bits = 8;
- Scale = 4;
- }
- break;
- }
-
- if (!NewOpc)
- continue;
-
- unsigned UserOffset = GetOffsetOf(U.MI) + 4;
- unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
- // FIXME: Check if offset is multiple of scale if scale is not 4.
- if (CPEIsInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
- U.MI->setDesc(TII->get(NewOpc));
- MachineBasicBlock *MBB = U.MI->getParent();
- BBSizes[MBB->getNumber()] -= 2;
- AdjustBBOffsetsAfter(MBB, -2);
- ++NumT2CPShrunk;
- MadeChange = true;
- }
- }
-
- MadeChange |= OptimizeThumb2Branches(MF);
- MadeChange |= OptimizeThumb2JumpTables(MF);
- return MadeChange;
-}
-
-bool ARMConstantIslands::OptimizeThumb2Branches(MachineFunction &MF) {
- bool MadeChange = false;
-
- for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i) {
- ImmBranch &Br = ImmBranches[i];
- unsigned Opcode = Br.MI->getOpcode();
- unsigned NewOpc = 0;
- unsigned Scale = 1;
- unsigned Bits = 0;
- switch (Opcode) {
- default: break;
- case ARM::t2B:
- NewOpc = ARM::tB;
- Bits = 11;
- Scale = 2;
- break;
- case ARM::t2Bcc: {
- NewOpc = ARM::tBcc;
- Bits = 8;
- Scale = 2;
- break;
- }
- }
- if (NewOpc) {
- unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
- MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
- if (BBIsInRange(Br.MI, DestBB, MaxOffs)) {
- Br.MI->setDesc(TII->get(NewOpc));
- MachineBasicBlock *MBB = Br.MI->getParent();
- BBSizes[MBB->getNumber()] -= 2;
- AdjustBBOffsetsAfter(MBB, -2);
- ++NumT2BrShrunk;
- MadeChange = true;
- }
- }
-
- Opcode = Br.MI->getOpcode();
- if (Opcode != ARM::tBcc)
- continue;
-
- NewOpc = 0;
- unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(Br.MI, PredReg);
- if (Pred == ARMCC::EQ)
- NewOpc = ARM::tCBZ;
- else if (Pred == ARMCC::NE)
- NewOpc = ARM::tCBNZ;
- if (!NewOpc)
- continue;
- MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
- // Check if the distance is within 126. Subtract starting offset by 2
- // because the cmp will be eliminated.
- unsigned BrOffset = GetOffsetOf(Br.MI) + 4 - 2;
- unsigned DestOffset = BBOffsets[DestBB->getNumber()];
- if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) {
- MachineBasicBlock::iterator CmpMI = Br.MI; --CmpMI;
- if (CmpMI->getOpcode() == ARM::tCMPzi8) {
- unsigned Reg = CmpMI->getOperand(0).getReg();
- Pred = llvm::getInstrPredicate(CmpMI, PredReg);
- if (Pred == ARMCC::AL &&
- CmpMI->getOperand(1).getImm() == 0 &&
- isARMLowRegister(Reg)) {
- MachineBasicBlock *MBB = Br.MI->getParent();
- MachineInstr *NewBR =
- BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc))
- .addReg(Reg).addMBB(DestBB, Br.MI->getOperand(0).getTargetFlags());
- CmpMI->eraseFromParent();
- Br.MI->eraseFromParent();
- Br.MI = NewBR;
- BBSizes[MBB->getNumber()] -= 2;
- AdjustBBOffsetsAfter(MBB, -2);
- ++NumCBZ;
- MadeChange = true;
- }
- }
- }
- }
-
- return MadeChange;
-}
-
-/// OptimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
-/// jumptables when it's possible.
-bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
- bool MadeChange = false;
-
- // FIXME: After the tables are shrunk, can we get rid some of the
- // constantpool tables?
- MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
- if (MJTI == 0) return false;
-
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
- MachineInstr *MI = T2JumpTables[i];
- const TargetInstrDesc &TID = MI->getDesc();
- unsigned NumOps = TID.getNumOperands();
- unsigned JTOpIdx = NumOps - (TID.isPredicable() ? 3 : 2);
- MachineOperand JTOP = MI->getOperand(JTOpIdx);
- unsigned JTI = JTOP.getIndex();
- assert(JTI < JT.size());
-
- bool ByteOk = true;
- bool HalfWordOk = true;
- unsigned JTOffset = GetOffsetOf(MI) + 4;
- const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
- for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
- MachineBasicBlock *MBB = JTBBs[j];
- unsigned DstOffset = BBOffsets[MBB->getNumber()];
- // Negative offset is not ok. FIXME: We should change BB layout to make
- // sure all the branches are forward.
- if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
- ByteOk = false;
- unsigned TBHLimit = ((1<<16)-1)*2;
- if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
- HalfWordOk = false;
- if (!ByteOk && !HalfWordOk)
- break;
- }
-
- if (ByteOk || HalfWordOk) {
- MachineBasicBlock *MBB = MI->getParent();
- unsigned BaseReg = MI->getOperand(0).getReg();
- bool BaseRegKill = MI->getOperand(0).isKill();
- if (!BaseRegKill)
- continue;
- unsigned IdxReg = MI->getOperand(1).getReg();
- bool IdxRegKill = MI->getOperand(1).isKill();
- MachineBasicBlock::iterator PrevI = MI;
- if (PrevI == MBB->begin())
- continue;
-
- MachineInstr *AddrMI = --PrevI;
- bool OptOk = true;
- // Examine the instruction that calculate the jumptable entry address.
- // If it's not the one just before the t2BR_JT, we won't delete it, then
- // it's not worth doing the optimization.
- for (unsigned k = 0, eee = AddrMI->getNumOperands(); k != eee; ++k) {
- const MachineOperand &MO = AddrMI->getOperand(k);
- if (!MO.isReg() || !MO.getReg())
- continue;
- if (MO.isDef() && MO.getReg() != BaseReg) {
- OptOk = false;
- break;
- }
- if (MO.isUse() && !MO.isKill() && MO.getReg() != IdxReg) {
- OptOk = false;
- break;
- }
- }
- if (!OptOk)
- continue;
-
- // The previous instruction should be a tLEApcrel or t2LEApcrelJT, we want
- // to delete it as well.
- MachineInstr *LeaMI = --PrevI;
- if ((LeaMI->getOpcode() != ARM::tLEApcrelJT &&
- LeaMI->getOpcode() != ARM::t2LEApcrelJT) ||
- LeaMI->getOperand(0).getReg() != BaseReg)
- OptOk = false;
-
- if (!OptOk)
- continue;
-
- unsigned Opc = ByteOk ? ARM::t2TBB : ARM::t2TBH;
- MachineInstr *NewJTMI = BuildMI(MBB, MI->getDebugLoc(), TII->get(Opc))
- .addReg(IdxReg, getKillRegState(IdxRegKill))
- .addJumpTableIndex(JTI, JTOP.getTargetFlags())
- .addImm(MI->getOperand(JTOpIdx+1).getImm());
- // FIXME: Insert an "ALIGN" instruction to ensure the next instruction
- // is 2-byte aligned. For now, asm printer will fix it up.
- unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI);
- unsigned OrigSize = TII->GetInstSizeInBytes(AddrMI);
- OrigSize += TII->GetInstSizeInBytes(LeaMI);
- OrigSize += TII->GetInstSizeInBytes(MI);
-
- AddrMI->eraseFromParent();
- LeaMI->eraseFromParent();
- MI->eraseFromParent();
-
- int delta = OrigSize - NewSize;
- BBSizes[MBB->getNumber()] -= delta;
- AdjustBBOffsetsAfter(MBB, -delta);
-
- ++NumTBs;
- MadeChange = true;
- }
- }
-
- return MadeChange;
-}
-
-/// ReorderThumb2JumpTables - Adjust the function's block layout to ensure that
-/// jump tables always branch forwards, since that's what tbb and tbh need.
-bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) {
- bool MadeChange = false;
-
- MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
- if (MJTI == 0) return false;
-
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
- MachineInstr *MI = T2JumpTables[i];
- const TargetInstrDesc &TID = MI->getDesc();
- unsigned NumOps = TID.getNumOperands();
- unsigned JTOpIdx = NumOps - (TID.isPredicable() ? 3 : 2);
- MachineOperand JTOP = MI->getOperand(JTOpIdx);
- unsigned JTI = JTOP.getIndex();
- assert(JTI < JT.size());
-
- // We prefer if target blocks for the jump table come after the jump
- // instruction so we can use TB[BH]. Loop through the target blocks
- // and try to adjust them such that that's true.
- int JTNumber = MI->getParent()->getNumber();
- const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
- for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
- MachineBasicBlock *MBB = JTBBs[j];
- int DTNumber = MBB->getNumber();
-
- if (DTNumber < JTNumber) {
- // The destination precedes the switch. Try to move the block forward
- // so we have a positive offset.
- MachineBasicBlock *NewBB =
- AdjustJTTargetBlockForward(MBB, MI->getParent());
- if (NewBB)
- MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
- MadeChange = true;
- }
- }
- }
-
- return MadeChange;
-}
-
-MachineBasicBlock *ARMConstantIslands::
-AdjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB)
-{
- MachineFunction &MF = *BB->getParent();
-
- // If it's the destination block is terminated by an unconditional branch,
- // try to move it; otherwise, create a new block following the jump
- // table that branches back to the actual target. This is a very simple
- // heuristic. FIXME: We can definitely improve it.
- MachineBasicBlock *TBB = 0, *FBB = 0;
- SmallVector<MachineOperand, 4> Cond;
- SmallVector<MachineOperand, 4> CondPrior;
- MachineFunction::iterator BBi = BB;
- MachineFunction::iterator OldPrior = prior(BBi);
-
- // If the block terminator isn't analyzable, don't try to move the block
- bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond);
-
- // If the block ends in an unconditional branch, move it. The prior block
- // has to have an analyzable terminator for us to move this one. Be paranoid
- // and make sure we're not trying to move the entry block of the function.
- if (!B && Cond.empty() && BB != MF.begin() &&
- !TII->AnalyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
- BB->moveAfter(JTBB);
- OldPrior->updateTerminator();
- BB->updateTerminator();
- // Update numbering to account for the block being moved.
- MF.RenumberBlocks();
- ++NumJTMoved;
- return NULL;
- }
-
- // Create a new MBB for the code after the jump BB.
- MachineBasicBlock *NewBB =
- MF.CreateMachineBasicBlock(JTBB->getBasicBlock());
- MachineFunction::iterator MBBI = JTBB; ++MBBI;
- MF.insert(MBBI, NewBB);
-
- // Add an unconditional branch from NewBB to BB.
- // There doesn't seem to be meaningful DebugInfo available; this doesn't
- // correspond directly to anything in the source.
- assert (isThumb2 && "Adjusting for TB[BH] but not in Thumb2?");
- BuildMI(NewBB, DebugLoc::getUnknownLoc(), TII->get(ARM::t2B)).addMBB(BB);
-
- // Update internal data structures to account for the newly inserted MBB.
- MF.RenumberBlocks(NewBB);
-
- // Update the CFG.
- NewBB->addSuccessor(BB);
- JTBB->removeSuccessor(BB);
- JTBB->addSuccessor(NewBB);
-
- ++NumJTInserted;
- return NewBB;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
deleted file mode 100644
index 90dd0c7..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMConstantPoolValue.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
-//===- ARMConstantPoolValue.cpp - ARM constantpool value --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the ARM specific constantpool value class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARMConstantPoolValue.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/Constant.h"
-#include "llvm/Constants.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/Type.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cstdlib>
-using namespace llvm;
-
-ARMConstantPoolValue::ARMConstantPoolValue(Constant *cval, unsigned id,
- ARMCP::ARMCPKind K,
- unsigned char PCAdj,
- const char *Modif,
- bool AddCA)
- : MachineConstantPoolValue((const Type*)cval->getType()),
- CVal(cval), S(NULL), LabelId(id), Kind(K), PCAdjust(PCAdj),
- Modifier(Modif), AddCurrentAddress(AddCA) {}
-
-ARMConstantPoolValue::ARMConstantPoolValue(LLVMContext &C,
- const char *s, unsigned id,
- unsigned char PCAdj,
- const char *Modif,
- bool AddCA)
- : MachineConstantPoolValue((const Type*)Type::getInt32Ty(C)),
- CVal(NULL), S(strdup(s)), LabelId(id), Kind(ARMCP::CPExtSymbol),
- PCAdjust(PCAdj), Modifier(Modif), AddCurrentAddress(AddCA) {}
-
-ARMConstantPoolValue::ARMConstantPoolValue(GlobalValue *gv, const char *Modif)
- : MachineConstantPoolValue((const Type*)Type::getInt32Ty(gv->getContext())),
- CVal(gv), S(NULL), LabelId(0), Kind(ARMCP::CPValue), PCAdjust(0),
- Modifier(Modif) {}
-
-GlobalValue *ARMConstantPoolValue::getGV() const {
- return dyn_cast_or_null<GlobalValue>(CVal);
-}
-
-BlockAddress *ARMConstantPoolValue::getBlockAddress() const {
- return dyn_cast_or_null<BlockAddress>(CVal);
-}
-
-int ARMConstantPoolValue::getExistingMachineCPValue(MachineConstantPool *CP,
- unsigned Alignment) {
- unsigned AlignMask = Alignment - 1;
- const std::vector<MachineConstantPoolEntry> Constants = CP->getConstants();
- for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
- if (Constants[i].isMachineConstantPoolEntry() &&
- (Constants[i].getAlignment() & AlignMask) == 0) {
- ARMConstantPoolValue *CPV =
- (ARMConstantPoolValue *)Constants[i].Val.MachineCPVal;
- if (CPV->CVal == CVal &&
- CPV->LabelId == LabelId &&
- CPV->PCAdjust == PCAdjust &&
- (CPV->S == S || strcmp(CPV->S, S) == 0) &&
- (CPV->Modifier == Modifier || strcmp(CPV->Modifier, Modifier) == 0))
- return i;
- }
- }
-
- return -1;
-}
-
-ARMConstantPoolValue::~ARMConstantPoolValue() {
- free((void*)S);
-}
-
-void
-ARMConstantPoolValue::AddSelectionDAGCSEId(FoldingSetNodeID &ID) {
- ID.AddPointer(CVal);
- ID.AddPointer(S);
- ID.AddInteger(LabelId);
- ID.AddInteger(PCAdjust);
-}
-
-bool
-ARMConstantPoolValue::hasSameValue(ARMConstantPoolValue *ACPV) {
- if (ACPV->Kind == Kind &&
- ACPV->CVal == CVal &&
- ACPV->PCAdjust == PCAdjust &&
- (ACPV->S == S || strcmp(ACPV->S, S) == 0) &&
- (ACPV->Modifier == Modifier || strcmp(ACPV->Modifier, Modifier) == 0)) {
- if (ACPV->LabelId == LabelId)
- return true;
- // Two PC relative constpool entries containing the same GV address or
- // external symbols. FIXME: What about blockaddress?
- if (Kind == ARMCP::CPValue || Kind == ARMCP::CPExtSymbol)
- return true;
- }
- return false;
-}
-
-void ARMConstantPoolValue::dump() const {
- errs() << " " << *this;
-}
-
-
-void ARMConstantPoolValue::print(raw_ostream &O) const {
- if (CVal)
- O << CVal->getName();
- else
- O << S;
- if (Modifier) O << "(" << Modifier << ")";
- if (PCAdjust != 0) {
- O << "-(LPC" << LabelId << "+" << (unsigned)PCAdjust;
- if (AddCurrentAddress) O << "-.";
- O << ")";
- }
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMConstantPoolValue.h b/libclamav/c++/llvm/lib/Target/ARM/ARMConstantPoolValue.h
deleted file mode 100644
index 741acde..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMConstantPoolValue.h
+++ /dev/null
@@ -1,100 +0,0 @@
-//===- ARMConstantPoolValue.h - ARM constantpool value ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the ARM specific constantpool value class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_ARM_CONSTANTPOOLVALUE_H
-#define LLVM_TARGET_ARM_CONSTANTPOOLVALUE_H
-
-#include "llvm/CodeGen/MachineConstantPool.h"
-
-namespace llvm {
-
-class Constant;
-class BlockAddress;
-class GlobalValue;
-class LLVMContext;
-
-namespace ARMCP {
- enum ARMCPKind {
- CPValue,
- CPExtSymbol,
- CPBlockAddress,
- CPLSDA
- };
-}
-
-/// ARMConstantPoolValue - ARM specific constantpool value. This is used to
-/// represent PC-relative displacement between the address of the load
-/// instruction and the constant being loaded, i.e. (&GV-(LPIC+8)).
-class ARMConstantPoolValue : public MachineConstantPoolValue {
- Constant *CVal; // Constant being loaded.
- const char *S; // ExtSymbol being loaded.
- unsigned LabelId; // Label id of the load.
- ARMCP::ARMCPKind Kind; // Kind of constant.
- unsigned char PCAdjust; // Extra adjustment if constantpool is pc-relative.
- // 8 for ARM, 4 for Thumb.
- const char *Modifier; // GV modifier i.e. (&GV(modifier)-(LPIC+8))
- bool AddCurrentAddress;
-
-public:
- ARMConstantPoolValue(Constant *cval, unsigned id,
- ARMCP::ARMCPKind Kind = ARMCP::CPValue,
- unsigned char PCAdj = 0, const char *Modifier = NULL,
- bool AddCurrentAddress = false);
- ARMConstantPoolValue(LLVMContext &C, const char *s, unsigned id,
- unsigned char PCAdj = 0, const char *Modifier = NULL,
- bool AddCurrentAddress = false);
- ARMConstantPoolValue(GlobalValue *GV, const char *Modifier);
- ARMConstantPoolValue();
- ~ARMConstantPoolValue();
-
- GlobalValue *getGV() const;
- const char *getSymbol() const { return S; }
- BlockAddress *getBlockAddress() const;
- const char *getModifier() const { return Modifier; }
- bool hasModifier() const { return Modifier != NULL; }
- bool mustAddCurrentAddress() const { return AddCurrentAddress; }
- unsigned getLabelId() const { return LabelId; }
- unsigned char getPCAdjustment() const { return PCAdjust; }
- bool isGlobalValue() const { return Kind == ARMCP::CPValue; }
- bool isExtSymbol() const { return Kind == ARMCP::CPExtSymbol; }
- bool isBlockAddress() { return Kind == ARMCP::CPBlockAddress; }
- bool isLSDA() { return Kind == ARMCP::CPLSDA; }
-
- virtual unsigned getRelocationInfo() const {
- // FIXME: This is conservatively claiming that these entries require a
- // relocation, we may be able to do better than this.
- return 2;
- }
-
- virtual int getExistingMachineCPValue(MachineConstantPool *CP,
- unsigned Alignment);
-
- virtual void AddSelectionDAGCSEId(FoldingSetNodeID &ID);
-
- /// hasSameValue - Return true if this ARM constpool value
- /// can share the same constantpool entry as another ARM constpool value.
- bool hasSameValue(ARMConstantPoolValue *ACPV);
-
- void print(raw_ostream *O) const { if (O) print(*O); }
- void print(raw_ostream &O) const;
- void dump() const;
-};
-
-inline raw_ostream &operator<<(raw_ostream &O, const ARMConstantPoolValue &V) {
- V.print(O);
- return O;
-}
-
-} // End llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
deleted file mode 100644
index 1b8727d..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ /dev/null
@@ -1,128 +0,0 @@
-//===-- ARMExpandPseudoInsts.cpp - Expand pseudo instructions -----*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a pass that expand pseudo instructions into target
-// instructions to allow proper scheduling, if-conversion, and other late
-// optimizations. This pass should be run after register allocation but before
-// post- regalloc scheduling pass.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "arm-pseudo"
-#include "ARM.h"
-#include "ARMBaseInstrInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-
-using namespace llvm;
-
-namespace {
- class ARMExpandPseudo : public MachineFunctionPass {
- public:
- static char ID;
- ARMExpandPseudo() : MachineFunctionPass(&ID) {}
-
- const TargetInstrInfo *TII;
-
- virtual bool runOnMachineFunction(MachineFunction &Fn);
-
- virtual const char *getPassName() const {
- return "ARM pseudo instruction expansion pass";
- }
-
- private:
- bool ExpandMBB(MachineBasicBlock &MBB);
- };
- char ARMExpandPseudo::ID = 0;
-}
-
-bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
- bool Modified = false;
-
- MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
- while (MBBI != E) {
- MachineInstr &MI = *MBBI;
- MachineBasicBlock::iterator NMBBI = llvm::next(MBBI);
-
- unsigned Opcode = MI.getOpcode();
- switch (Opcode) {
- default: break;
- case ARM::tLDRpci_pic:
- case ARM::t2LDRpci_pic: {
- unsigned NewLdOpc = (Opcode == ARM::tLDRpci_pic)
- ? ARM::tLDRpci : ARM::t2LDRpci;
- unsigned DstReg = MI.getOperand(0).getReg();
- if (!MI.getOperand(0).isDead()) {
- MachineInstr *NewMI =
- AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
- TII->get(NewLdOpc), DstReg)
- .addOperand(MI.getOperand(1)));
- NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
- BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD))
- .addReg(DstReg, getDefRegState(true))
- .addReg(DstReg)
- .addOperand(MI.getOperand(2));
- }
- MI.eraseFromParent();
- Modified = true;
- break;
- }
- case ARM::t2MOVi32imm: {
- unsigned DstReg = MI.getOperand(0).getReg();
- if (!MI.getOperand(0).isDead()) {
- const MachineOperand &MO = MI.getOperand(1);
- MachineInstrBuilder LO16, HI16;
-
- LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::t2MOVi16),
- DstReg);
- HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::t2MOVTi16))
- .addReg(DstReg, getDefRegState(true)).addReg(DstReg);
-
- if (MO.isImm()) {
- unsigned Imm = MO.getImm();
- unsigned Lo16 = Imm & 0xffff;
- unsigned Hi16 = (Imm >> 16) & 0xffff;
- LO16 = LO16.addImm(Lo16);
- HI16 = HI16.addImm(Hi16);
- } else {
- GlobalValue *GV = MO.getGlobal();
- unsigned TF = MO.getTargetFlags();
- LO16 = LO16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_LO16);
- HI16 = HI16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_HI16);
- // FIXME: What's about memoperands?
- }
- AddDefaultPred(LO16);
- AddDefaultPred(HI16);
- }
- MI.eraseFromParent();
- Modified = true;
- }
- // FIXME: expand t2MOVi32imm
- }
- MBBI = NMBBI;
- }
-
- return Modified;
-}
-
-bool ARMExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
- TII = MF.getTarget().getInstrInfo();
-
- bool Modified = false;
- for (MachineFunction::iterator MFI = MF.begin(), E = MF.end(); MFI != E;
- ++MFI)
- Modified |= ExpandMBB(*MFI);
- return Modified;
-}
-
-/// createARMExpandPseudoPass - returns an instance of the pseudo instruction
-/// expansion pass.
-FunctionPass *llvm::createARMExpandPseudoPass() {
- return new ARMExpandPseudo();
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMFrameInfo.h b/libclamav/c++/llvm/lib/Target/ARM/ARMFrameInfo.h
deleted file mode 100644
index d5dae24..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMFrameInfo.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//===-- ARMTargetFrameInfo.h - Define TargetFrameInfo for ARM ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARM_FRAMEINFO_H
-#define ARM_FRAMEINFO_H
-
-#include "ARM.h"
-#include "ARMSubtarget.h"
-#include "llvm/Target/TargetFrameInfo.h"
-
-namespace llvm {
-
-class ARMFrameInfo : public TargetFrameInfo {
-public:
- explicit ARMFrameInfo(const ARMSubtarget &ST)
- : TargetFrameInfo(StackGrowsDown, ST.getStackAlignment(), 0, 4) {
- }
-};
-
-} // End llvm namespace
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
deleted file mode 100644
index 013e00a..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ /dev/null
@@ -1,1963 +0,0 @@
-//===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines an instruction selector for the ARM target.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMISelLowering.h"
-#include "ARMTargetMachine.h"
-#include "llvm/CallingConv.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-//===--------------------------------------------------------------------===//
-/// ARMDAGToDAGISel - ARM specific code to select ARM machine
-/// instructions for SelectionDAG operations.
-///
-namespace {
-class ARMDAGToDAGISel : public SelectionDAGISel {
- ARMBaseTargetMachine &TM;
-
- /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
- /// make the right decision when generating code for different targets.
- const ARMSubtarget *Subtarget;
-
-public:
- explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
- CodeGenOpt::Level OptLevel)
- : SelectionDAGISel(tm, OptLevel), TM(tm),
- Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
- }
-
- virtual const char *getPassName() const {
- return "ARM Instruction Selection";
- }
-
- /// getI32Imm - Return a target constant of type i32 with the specified
- /// value.
- inline SDValue getI32Imm(unsigned Imm) {
- return CurDAG->getTargetConstant(Imm, MVT::i32);
- }
-
- SDNode *Select(SDNode *N);
-
- bool SelectShifterOperandReg(SDNode *Op, SDValue N, SDValue &A,
- SDValue &B, SDValue &C);
- bool SelectAddrMode2(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Offset, SDValue &Opc);
- bool SelectAddrMode2Offset(SDNode *Op, SDValue N,
- SDValue &Offset, SDValue &Opc);
- bool SelectAddrMode3(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Offset, SDValue &Opc);
- bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
- SDValue &Offset, SDValue &Opc);
- bool SelectAddrMode4(SDNode *Op, SDValue N, SDValue &Addr,
- SDValue &Mode);
- bool SelectAddrMode5(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Offset);
- bool SelectAddrMode6(SDNode *Op, SDValue N, SDValue &Addr, SDValue &Update,
- SDValue &Opc, SDValue &Align);
-
- bool SelectAddrModePC(SDNode *Op, SDValue N, SDValue &Offset,
- SDValue &Label);
-
- bool SelectThumbAddrModeRR(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Offset);
- bool SelectThumbAddrModeRI5(SDNode *Op, SDValue N, unsigned Scale,
- SDValue &Base, SDValue &OffImm,
- SDValue &Offset);
- bool SelectThumbAddrModeS1(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm, SDValue &Offset);
- bool SelectThumbAddrModeS2(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm, SDValue &Offset);
- bool SelectThumbAddrModeS4(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm, SDValue &Offset);
- bool SelectThumbAddrModeSP(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm);
-
- bool SelectT2ShifterOperandReg(SDNode *Op, SDValue N,
- SDValue &BaseReg, SDValue &Opc);
- bool SelectT2AddrModeImm12(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm);
- bool SelectT2AddrModeImm8(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm);
- bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
- SDValue &OffImm);
- bool SelectT2AddrModeImm8s4(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffImm);
- bool SelectT2AddrModeSoReg(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &OffReg, SDValue &ShImm);
-
- // Include the pieces autogenerated from the target description.
-#include "ARMGenDAGISel.inc"
-
-private:
- /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
- /// ARM.
- SDNode *SelectARMIndexedLoad(SDNode *N);
- SDNode *SelectT2IndexedLoad(SDNode *N);
-
- /// SelectDYN_ALLOC - Select dynamic alloc for Thumb.
- SDNode *SelectDYN_ALLOC(SDNode *N);
-
- /// SelectVLD - Select NEON load intrinsics. NumVecs should
- /// be 2, 3 or 4. The opcode arrays specify the instructions used for
- /// loads of D registers and even subregs and odd subregs of Q registers.
- /// For NumVecs == 2, QOpcodes1 is not used.
- SDNode *SelectVLD(SDNode *N, unsigned NumVecs, unsigned *DOpcodes,
- unsigned *QOpcodes0, unsigned *QOpcodes1);
-
- /// SelectVST - Select NEON store intrinsics. NumVecs should
- /// be 2, 3 or 4. The opcode arrays specify the instructions used for
- /// stores of D registers and even subregs and odd subregs of Q registers.
- /// For NumVecs == 2, QOpcodes1 is not used.
- SDNode *SelectVST(SDNode *N, unsigned NumVecs, unsigned *DOpcodes,
- unsigned *QOpcodes0, unsigned *QOpcodes1);
-
- /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
- /// be 2, 3 or 4. The opcode arrays specify the instructions used for
- /// load/store of D registers and even subregs and odd subregs of Q registers.
- SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad, unsigned NumVecs,
- unsigned *DOpcodes, unsigned *QOpcodes0,
- unsigned *QOpcodes1);
-
- /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
- SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, unsigned Opc);
-
- /// SelectCMOVOp - Select CMOV instructions for ARM.
- SDNode *SelectCMOVOp(SDNode *N);
- SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR,
- SDValue InFlag);
- SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR,
- SDValue InFlag);
- SDNode *SelectT2CMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR,
- SDValue InFlag);
- SDNode *SelectARMCMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR,
- SDValue InFlag);
-
- /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
- /// inline asm expressions.
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps);
-
- /// PairDRegs - Insert a pair of double registers into an implicit def to
- /// form a quad register.
- SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1);
-};
-}
-
-/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
-/// operand. If so Imm will receive the 32-bit value.
-static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
- if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
- Imm = cast<ConstantSDNode>(N)->getZExtValue();
- return true;
- }
- return false;
-}
-
-// isInt32Immediate - This method tests to see if a constant operand.
-// If so Imm will receive the 32 bit value.
-static bool isInt32Immediate(SDValue N, unsigned &Imm) {
- return isInt32Immediate(N.getNode(), Imm);
-}
-
-// isOpcWithIntImmediate - This method tests to see if the node is a specific
-// opcode and that it has a immediate integer right operand.
-// If so Imm will receive the 32 bit value.
-static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
- return N->getOpcode() == Opc &&
- isInt32Immediate(N->getOperand(1).getNode(), Imm);
-}
-
-
-bool ARMDAGToDAGISel::SelectShifterOperandReg(SDNode *Op,
- SDValue N,
- SDValue &BaseReg,
- SDValue &ShReg,
- SDValue &Opc) {
- ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
-
- // Don't match base register only case. That is matched to a separate
- // lower complexity pattern with explicit register operand.
- if (ShOpcVal == ARM_AM::no_shift) return false;
-
- BaseReg = N.getOperand(0);
- unsigned ShImmVal = 0;
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- ShReg = CurDAG->getRegister(0, MVT::i32);
- ShImmVal = RHS->getZExtValue() & 31;
- } else {
- ShReg = N.getOperand(1);
- }
- Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
- MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectAddrMode2(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &Offset,
- SDValue &Opc) {
- if (N.getOpcode() == ISD::MUL) {
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- // X * [3,5,9] -> X + X * [2,4,8] etc.
- int RHSC = (int)RHS->getZExtValue();
- if (RHSC & 1) {
- RHSC = RHSC & ~1;
- ARM_AM::AddrOpc AddSub = ARM_AM::add;
- if (RHSC < 0) {
- AddSub = ARM_AM::sub;
- RHSC = - RHSC;
- }
- if (isPowerOf2_32(RHSC)) {
- unsigned ShAmt = Log2_32(RHSC);
- Base = Offset = N.getOperand(0);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
- ARM_AM::lsl),
- MVT::i32);
- return true;
- }
- }
- }
- }
-
- if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
- Base = N;
- if (N.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- } else if (N.getOpcode() == ARMISD::Wrapper &&
- !(Subtarget->useMovt() &&
- N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
- Base = N.getOperand(0);
- }
- Offset = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
- ARM_AM::no_shift),
- MVT::i32);
- return true;
- }
-
- // Match simple R +/- imm12 operands.
- if (N.getOpcode() == ISD::ADD)
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC >= 0 && RHSC < 0x1000) ||
- (RHSC < 0 && RHSC > -0x1000)) { // 12 bits.
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- Offset = CurDAG->getRegister(0, MVT::i32);
-
- ARM_AM::AddrOpc AddSub = ARM_AM::add;
- if (RHSC < 0) {
- AddSub = ARM_AM::sub;
- RHSC = - RHSC;
- }
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
- ARM_AM::no_shift),
- MVT::i32);
- return true;
- }
- }
-
- // Otherwise this is R +/- [possibly shifted] R.
- ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
- ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
- unsigned ShAmt = 0;
-
- Base = N.getOperand(0);
- Offset = N.getOperand(1);
-
- if (ShOpcVal != ARM_AM::no_shift) {
- // Check to see if the RHS of the shift is a constant, if not, we can't fold
- // it.
- if (ConstantSDNode *Sh =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
- ShAmt = Sh->getZExtValue();
- Offset = N.getOperand(1).getOperand(0);
- } else {
- ShOpcVal = ARM_AM::no_shift;
- }
- }
-
- // Try matching (R shl C) + (R).
- if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
- ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
- if (ShOpcVal != ARM_AM::no_shift) {
- // Check to see if the RHS of the shift is a constant, if not, we can't
- // fold it.
- if (ConstantSDNode *Sh =
- dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
- ShAmt = Sh->getZExtValue();
- Offset = N.getOperand(0).getOperand(0);
- Base = N.getOperand(1);
- } else {
- ShOpcVal = ARM_AM::no_shift;
- }
- }
- }
-
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
- MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDNode *Op, SDValue N,
- SDValue &Offset, SDValue &Opc) {
- unsigned Opcode = Op->getOpcode();
- ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
- ? cast<LoadSDNode>(Op)->getAddressingMode()
- : cast<StoreSDNode>(Op)->getAddressingMode();
- ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
- ? ARM_AM::add : ARM_AM::sub;
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
- int Val = (int)C->getZExtValue();
- if (Val >= 0 && Val < 0x1000) { // 12 bits.
- Offset = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
- ARM_AM::no_shift),
- MVT::i32);
- return true;
- }
- }
-
- Offset = N;
- ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
- unsigned ShAmt = 0;
- if (ShOpcVal != ARM_AM::no_shift) {
- // Check to see if the RHS of the shift is a constant, if not, we can't fold
- // it.
- if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- ShAmt = Sh->getZExtValue();
- Offset = N.getOperand(0);
- } else {
- ShOpcVal = ARM_AM::no_shift;
- }
- }
-
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
- MVT::i32);
- return true;
-}
-
-
-bool ARMDAGToDAGISel::SelectAddrMode3(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &Offset,
- SDValue &Opc) {
- if (N.getOpcode() == ISD::SUB) {
- // X - C is canonicalize to X + -C, no need to handle it here.
- Base = N.getOperand(0);
- Offset = N.getOperand(1);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
- return true;
- }
-
- if (N.getOpcode() != ISD::ADD) {
- Base = N;
- if (N.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- Offset = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
- return true;
- }
-
- // If the RHS is +/- imm8, fold into addr mode.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC >= 0 && RHSC < 256) ||
- (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- Offset = CurDAG->getRegister(0, MVT::i32);
-
- ARM_AM::AddrOpc AddSub = ARM_AM::add;
- if (RHSC < 0) {
- AddSub = ARM_AM::sub;
- RHSC = - RHSC;
- }
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
- return true;
- }
- }
-
- Base = N.getOperand(0);
- Offset = N.getOperand(1);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
- SDValue &Offset, SDValue &Opc) {
- unsigned Opcode = Op->getOpcode();
- ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
- ? cast<LoadSDNode>(Op)->getAddressingMode()
- : cast<StoreSDNode>(Op)->getAddressingMode();
- ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
- ? ARM_AM::add : ARM_AM::sub;
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
- int Val = (int)C->getZExtValue();
- if (Val >= 0 && Val < 256) {
- Offset = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
- return true;
- }
- }
-
- Offset = N;
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectAddrMode4(SDNode *Op, SDValue N,
- SDValue &Addr, SDValue &Mode) {
- Addr = N;
- Mode = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectAddrMode5(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &Offset) {
- if (N.getOpcode() != ISD::ADD) {
- Base = N;
- if (N.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- } else if (N.getOpcode() == ARMISD::Wrapper &&
- !(Subtarget->useMovt() &&
- N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
- Base = N.getOperand(0);
- }
- Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
- MVT::i32);
- return true;
- }
-
- // If the RHS is +/- imm8, fold into addr mode.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC & 3) == 0) { // The constant is implicitly multiplied by 4.
- RHSC >>= 2;
- if ((RHSC >= 0 && RHSC < 256) ||
- (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
-
- ARM_AM::AddrOpc AddSub = ARM_AM::add;
- if (RHSC < 0) {
- AddSub = ARM_AM::sub;
- RHSC = - RHSC;
- }
- Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
- MVT::i32);
- return true;
- }
- }
- }
-
- Base = N;
- Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
- MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Op, SDValue N,
- SDValue &Addr, SDValue &Update,
- SDValue &Opc, SDValue &Align) {
- Addr = N;
- // Default to no writeback.
- Update = CurDAG->getRegister(0, MVT::i32);
- Opc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(false), MVT::i32);
- // Default to no alignment.
- Align = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectAddrModePC(SDNode *Op, SDValue N,
- SDValue &Offset, SDValue &Label) {
- if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
- Offset = N.getOperand(0);
- SDValue N1 = N.getOperand(1);
- Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
- MVT::i32);
- return true;
- }
- return false;
-}
-
-bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &Offset){
- // FIXME dl should come from the parent load or store, not the address
- DebugLoc dl = Op->getDebugLoc();
- if (N.getOpcode() != ISD::ADD) {
- ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
- if (!NC || NC->getZExtValue() != 0)
- return false;
-
- Base = Offset = N;
- return true;
- }
-
- Base = N.getOperand(0);
- Offset = N.getOperand(1);
- return true;
-}
-
-bool
-ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDNode *Op, SDValue N,
- unsigned Scale, SDValue &Base,
- SDValue &OffImm, SDValue &Offset) {
- if (Scale == 4) {
- SDValue TmpBase, TmpOffImm;
- if (SelectThumbAddrModeSP(Op, N, TmpBase, TmpOffImm))
- return false; // We want to select tLDRspi / tSTRspi instead.
- if (N.getOpcode() == ARMISD::Wrapper &&
- N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
- return false; // We want to select tLDRpci instead.
- }
-
- if (N.getOpcode() != ISD::ADD) {
- if (N.getOpcode() == ARMISD::Wrapper &&
- !(Subtarget->useMovt() &&
- N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
- Base = N.getOperand(0);
- } else
- Base = N;
-
- Offset = CurDAG->getRegister(0, MVT::i32);
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
- }
-
- // Thumb does not have [sp, r] address mode.
- RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
- RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
- if ((LHSR && LHSR->getReg() == ARM::SP) ||
- (RHSR && RHSR->getReg() == ARM::SP)) {
- Base = N;
- Offset = CurDAG->getRegister(0, MVT::i32);
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
- }
-
- // If the RHS is + imm5 * scale, fold into addr mode.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC & (Scale-1)) == 0) { // The constant is implicitly multiplied.
- RHSC /= Scale;
- if (RHSC >= 0 && RHSC < 32) {
- Base = N.getOperand(0);
- Offset = CurDAG->getRegister(0, MVT::i32);
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
- return true;
- }
- }
- }
-
- Base = N.getOperand(0);
- Offset = N.getOperand(1);
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm,
- SDValue &Offset) {
- return SelectThumbAddrModeRI5(Op, N, 1, Base, OffImm, Offset);
-}
-
-bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm,
- SDValue &Offset) {
- return SelectThumbAddrModeRI5(Op, N, 2, Base, OffImm, Offset);
-}
-
-bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm,
- SDValue &Offset) {
- return SelectThumbAddrModeRI5(Op, N, 4, Base, OffImm, Offset);
-}
-
-bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm) {
- if (N.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
- }
-
- if (N.getOpcode() != ISD::ADD)
- return false;
-
- RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
- if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
- (LHSR && LHSR->getReg() == ARM::SP)) {
- // If the RHS is + imm8 * scale, fold into addr mode.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if ((RHSC & 3) == 0) { // The constant is implicitly multiplied.
- RHSC >>= 2;
- if (RHSC >= 0 && RHSC < 256) {
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
- return true;
- }
- }
- }
- }
-
- return false;
-}
-
-bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDNode *Op, SDValue N,
- SDValue &BaseReg,
- SDValue &Opc) {
- ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
-
- // Don't match base register only case. That is matched to a separate
- // lower complexity pattern with explicit register operand.
- if (ShOpcVal == ARM_AM::no_shift) return false;
-
- BaseReg = N.getOperand(0);
- unsigned ShImmVal = 0;
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- ShImmVal = RHS->getZExtValue() & 31;
- Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
- return true;
- }
-
- return false;
-}
-
-bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm) {
- // Match simple R + imm12 operands.
-
- // Base only.
- if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
- if (N.getOpcode() == ISD::FrameIndex) {
- // Match frame index...
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
- } else if (N.getOpcode() == ARMISD::Wrapper &&
- !(Subtarget->useMovt() &&
- N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::TargetConstantPool)
- return false; // We want to select t2LDRpci instead.
- } else
- Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
- }
-
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- if (SelectT2AddrModeImm8(Op, N, Base, OffImm))
- // Let t2LDRi8 handle (R - imm8).
- return false;
-
- int RHSC = (int)RHS->getZExtValue();
- if (N.getOpcode() == ISD::SUB)
- RHSC = -RHSC;
-
- if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
- return true;
- }
- }
-
- // Base only.
- Base = N;
- OffImm = CurDAG->getTargetConstant(0, MVT::i32);
- return true;
-}
-
-bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm) {
- // Match simple R - imm8 operands.
- if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::SUB) {
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getSExtValue();
- if (N.getOpcode() == ISD::SUB)
- RHSC = -RHSC;
-
- if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
- Base = N.getOperand(0);
- if (Base.getOpcode() == ISD::FrameIndex) {
- int FI = cast<FrameIndexSDNode>(Base)->getIndex();
- Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- }
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
- return true;
- }
- }
- }
-
- return false;
-}
-
-bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
- SDValue &OffImm){
- unsigned Opcode = Op->getOpcode();
- ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
- ? cast<LoadSDNode>(Op)->getAddressingMode()
- : cast<StoreSDNode>(Op)->getAddressingMode();
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N)) {
- int RHSC = (int)RHS->getZExtValue();
- if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
- OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
- ? CurDAG->getTargetConstant(RHSC, MVT::i32)
- : CurDAG->getTargetConstant(-RHSC, MVT::i32);
- return true;
- }
- }
-
- return false;
-}
-
-bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDNode *Op, SDValue N,
- SDValue &Base, SDValue &OffImm) {
- if (N.getOpcode() == ISD::ADD) {
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if (((RHSC & 0x3) == 0) &&
- ((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) { // 8 bits.
- Base = N.getOperand(0);
- OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
- return true;
- }
- }
- } else if (N.getOpcode() == ISD::SUB) {
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { // 8 bits.
- Base = N.getOperand(0);
- OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
- return true;
- }
- }
- }
-
- return false;
-}
-
-bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDNode *Op, SDValue N,
- SDValue &Base,
- SDValue &OffReg, SDValue &ShImm) {
- // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
- if (N.getOpcode() != ISD::ADD)
- return false;
-
- // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
- return false;
- else if (RHSC < 0 && RHSC >= -255) // 8 bits
- return false;
- }
-
- // Look for (R + R) or (R + (R << [1,2,3])).
- unsigned ShAmt = 0;
- Base = N.getOperand(0);
- OffReg = N.getOperand(1);
-
- // Swap if it is ((R << c) + R).
- ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg);
- if (ShOpcVal != ARM_AM::lsl) {
- ShOpcVal = ARM_AM::getShiftOpcForNode(Base);
- if (ShOpcVal == ARM_AM::lsl)
- std::swap(Base, OffReg);
- }
-
- if (ShOpcVal == ARM_AM::lsl) {
- // Check to see if the RHS of the shift is a constant, if not, we can't fold
- // it.
- if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
- ShAmt = Sh->getZExtValue();
- if (ShAmt >= 4) {
- ShAmt = 0;
- ShOpcVal = ARM_AM::no_shift;
- } else
- OffReg = OffReg.getOperand(0);
- } else {
- ShOpcVal = ARM_AM::no_shift;
- }
- }
-
- ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
-
- return true;
-}
-
-//===--------------------------------------------------------------------===//
-
-/// getAL - Returns a ARMCC::AL immediate node.
-static inline SDValue getAL(SelectionDAG *CurDAG) {
- return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
-}
-
-SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
- LoadSDNode *LD = cast<LoadSDNode>(N);
- ISD::MemIndexedMode AM = LD->getAddressingMode();
- if (AM == ISD::UNINDEXED)
- return NULL;
-
- EVT LoadedVT = LD->getMemoryVT();
- SDValue Offset, AMOpc;
- bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
- unsigned Opcode = 0;
- bool Match = false;
- if (LoadedVT == MVT::i32 &&
- SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) {
- Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST;
- Match = true;
- } else if (LoadedVT == MVT::i16 &&
- SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
- Match = true;
- Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
- ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
- : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
- } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
- if (LD->getExtensionType() == ISD::SEXTLOAD) {
- if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
- Match = true;
- Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
- }
- } else {
- if (SelectAddrMode2Offset(N, LD->getOffset(), Offset, AMOpc)) {
- Match = true;
- Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST;
- }
- }
- }
-
- if (Match) {
- SDValue Chain = LD->getChain();
- SDValue Base = LD->getBasePtr();
- SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
- CurDAG->getRegister(0, MVT::i32), Chain };
- return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
- MVT::Other, Ops, 6);
- }
-
- return NULL;
-}
-
-SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
- LoadSDNode *LD = cast<LoadSDNode>(N);
- ISD::MemIndexedMode AM = LD->getAddressingMode();
- if (AM == ISD::UNINDEXED)
- return NULL;
-
- EVT LoadedVT = LD->getMemoryVT();
- bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
- SDValue Offset;
- bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
- unsigned Opcode = 0;
- bool Match = false;
- if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
- switch (LoadedVT.getSimpleVT().SimpleTy) {
- case MVT::i32:
- Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
- break;
- case MVT::i16:
- if (isSExtLd)
- Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
- else
- Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
- break;
- case MVT::i8:
- case MVT::i1:
- if (isSExtLd)
- Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
- else
- Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
- break;
- default:
- return NULL;
- }
- Match = true;
- }
-
- if (Match) {
- SDValue Chain = LD->getChain();
- SDValue Base = LD->getBasePtr();
- SDValue Ops[]= { Base, Offset, getAL(CurDAG),
- CurDAG->getRegister(0, MVT::i32), Chain };
- return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
- MVT::Other, Ops, 5);
- }
-
- return NULL;
-}
-
-SDNode *ARMDAGToDAGISel::SelectDYN_ALLOC(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
- EVT VT = N->getValueType(0);
- SDValue Chain = N->getOperand(0);
- SDValue Size = N->getOperand(1);
- SDValue Align = N->getOperand(2);
- SDValue SP = CurDAG->getRegister(ARM::SP, MVT::i32);
- int32_t AlignVal = cast<ConstantSDNode>(Align)->getSExtValue();
- if (AlignVal < 0)
- // We need to align the stack. Use Thumb1 tAND which is the only thumb
- // instruction that can read and write SP. This matches to a pseudo
- // instruction that has a chain to ensure the result is written back to
- // the stack pointer.
- SP = SDValue(CurDAG->getMachineNode(ARM::tANDsp, dl, VT, SP, Align), 0);
-
- bool isC = isa<ConstantSDNode>(Size);
- uint32_t C = isC ? cast<ConstantSDNode>(Size)->getZExtValue() : ~0UL;
- // Handle the most common case for both Thumb1 and Thumb2:
- // tSUBspi - immediate is between 0 ... 508 inclusive.
- if (C <= 508 && ((C & 3) == 0))
- // FIXME: tSUBspi encode scale 4 implicitly.
- return CurDAG->SelectNodeTo(N, ARM::tSUBspi_, VT, MVT::Other, SP,
- CurDAG->getTargetConstant(C/4, MVT::i32),
- Chain);
-
- if (Subtarget->isThumb1Only()) {
- // Use tADDspr since Thumb1 does not have a sub r, sp, r. ARMISelLowering
- // should have negated the size operand already. FIXME: We can't insert
- // new target independent node at this stage so we are forced to negate
- // it earlier. Is there a better solution?
- return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, MVT::Other, SP, Size,
- Chain);
- } else if (Subtarget->isThumb2()) {
- if (isC && Predicate_t2_so_imm(Size.getNode())) {
- // t2SUBrSPi
- SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, MVT::i32), Chain };
- return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi_, VT, MVT::Other, Ops, 3);
- } else if (isC && Predicate_imm0_4095(Size.getNode())) {
- // t2SUBrSPi12
- SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, MVT::i32), Chain };
- return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi12_, VT, MVT::Other, Ops, 3);
- } else {
- // t2SUBrSPs
- SDValue Ops[] = { SP, Size,
- getI32Imm(ARM_AM::getSORegOpc(ARM_AM::lsl,0)), Chain };
- return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPs_, VT, MVT::Other, Ops, 4);
- }
- }
-
- // FIXME: Add ADD / SUB sp instructions for ARM.
- return 0;
-}
-
-/// PairDRegs - Insert a pair of double registers into an implicit def to
-/// form a quad register.
-SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) {
- DebugLoc dl = V0.getNode()->getDebugLoc();
- SDValue Undef =
- SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0);
- SDValue SubReg0 = CurDAG->getTargetConstant(ARM::DSUBREG_0, MVT::i32);
- SDValue SubReg1 = CurDAG->getTargetConstant(ARM::DSUBREG_1, MVT::i32);
- SDNode *Pair = CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
- VT, Undef, V0, SubReg0);
- return CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
- VT, SDValue(Pair, 0), V1, SubReg1);
-}
-
-/// GetNEONSubregVT - Given a type for a 128-bit NEON vector, return the type
-/// for a 64-bit subregister of the vector.
-static EVT GetNEONSubregVT(EVT VT) {
- switch (VT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("unhandled NEON type");
- case MVT::v16i8: return MVT::v8i8;
- case MVT::v8i16: return MVT::v4i16;
- case MVT::v4f32: return MVT::v2f32;
- case MVT::v4i32: return MVT::v2i32;
- case MVT::v2i64: return MVT::v1i64;
- }
-}
-
-SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
- unsigned *DOpcodes, unsigned *QOpcodes0,
- unsigned *QOpcodes1) {
- assert(NumVecs >=2 && NumVecs <= 4 && "VLD NumVecs out-of-range");
- DebugLoc dl = N->getDebugLoc();
-
- SDValue MemAddr, MemUpdate, MemOpc, Align;
- if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, MemUpdate, MemOpc, Align))
- return NULL;
-
- SDValue Chain = N->getOperand(0);
- EVT VT = N->getValueType(0);
- bool is64BitVector = VT.is64BitVector();
-
- unsigned OpcodeIndex;
- switch (VT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("unhandled vld type");
- // Double-register operations:
- case MVT::v8i8: OpcodeIndex = 0; break;
- case MVT::v4i16: OpcodeIndex = 1; break;
- case MVT::v2f32:
- case MVT::v2i32: OpcodeIndex = 2; break;
- case MVT::v1i64: OpcodeIndex = 3; break;
- // Quad-register operations:
- case MVT::v16i8: OpcodeIndex = 0; break;
- case MVT::v8i16: OpcodeIndex = 1; break;
- case MVT::v4f32:
- case MVT::v4i32: OpcodeIndex = 2; break;
- }
-
- SDValue Pred = CurDAG->getTargetConstant(14, MVT::i32);
- SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
- if (is64BitVector) {
- unsigned Opc = DOpcodes[OpcodeIndex];
- const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Align,
- Pred, PredReg, Chain };
- std::vector<EVT> ResTys(NumVecs, VT);
- ResTys.push_back(MVT::Other);
- return CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 7);
- }
-
- EVT RegVT = GetNEONSubregVT(VT);
- if (NumVecs == 2) {
- // Quad registers are directly supported for VLD2,
- // loading 2 pairs of D regs.
- unsigned Opc = QOpcodes0[OpcodeIndex];
- const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc, Align,
- Pred, PredReg, Chain };
- std::vector<EVT> ResTys(4, VT);
- ResTys.push_back(MVT::Other);
- SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 7);
- Chain = SDValue(VLd, 4);
-
- // Combine the even and odd subregs to produce the result.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDNode *Q = PairDRegs(VT, SDValue(VLd, 2*Vec), SDValue(VLd, 2*Vec+1));
- ReplaceUses(SDValue(N, Vec), SDValue(Q, 0));
- }
- } else {
- // Otherwise, quad registers are loaded with two separate instructions,
- // where one loads the even registers and the other loads the odd registers.
-
- // Enable writeback to the address register.
- MemOpc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(true), MVT::i32);
-
- std::vector<EVT> ResTys(NumVecs, RegVT);
- ResTys.push_back(MemAddr.getValueType());
- ResTys.push_back(MVT::Other);
-
- // Load the even subregs.
- unsigned Opc = QOpcodes0[OpcodeIndex];
- const SDValue OpsA[] = { MemAddr, MemUpdate, MemOpc, Align,
- Pred, PredReg, Chain };
- SDNode *VLdA = CurDAG->getMachineNode(Opc, dl, ResTys, OpsA, 7);
- Chain = SDValue(VLdA, NumVecs+1);
-
- // Load the odd subregs.
- Opc = QOpcodes1[OpcodeIndex];
- const SDValue OpsB[] = { SDValue(VLdA, NumVecs), MemUpdate, MemOpc,
- Align, Pred, PredReg, Chain };
- SDNode *VLdB = CurDAG->getMachineNode(Opc, dl, ResTys, OpsB, 7);
- Chain = SDValue(VLdB, NumVecs+1);
-
- // Combine the even and odd subregs to produce the result.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDNode *Q = PairDRegs(VT, SDValue(VLdA, Vec), SDValue(VLdB, Vec));
- ReplaceUses(SDValue(N, Vec), SDValue(Q, 0));
- }
- }
- ReplaceUses(SDValue(N, NumVecs), Chain);
- return NULL;
-}
-
-SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
- unsigned *DOpcodes, unsigned *QOpcodes0,
- unsigned *QOpcodes1) {
- assert(NumVecs >=2 && NumVecs <= 4 && "VST NumVecs out-of-range");
- DebugLoc dl = N->getDebugLoc();
-
- SDValue MemAddr, MemUpdate, MemOpc, Align;
- if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, MemUpdate, MemOpc, Align))
- return NULL;
-
- SDValue Chain = N->getOperand(0);
- EVT VT = N->getOperand(3).getValueType();
- bool is64BitVector = VT.is64BitVector();
-
- unsigned OpcodeIndex;
- switch (VT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("unhandled vst type");
- // Double-register operations:
- case MVT::v8i8: OpcodeIndex = 0; break;
- case MVT::v4i16: OpcodeIndex = 1; break;
- case MVT::v2f32:
- case MVT::v2i32: OpcodeIndex = 2; break;
- case MVT::v1i64: OpcodeIndex = 3; break;
- // Quad-register operations:
- case MVT::v16i8: OpcodeIndex = 0; break;
- case MVT::v8i16: OpcodeIndex = 1; break;
- case MVT::v4f32:
- case MVT::v4i32: OpcodeIndex = 2; break;
- }
-
- SDValue Pred = CurDAG->getTargetConstant(14, MVT::i32);
- SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
-
- SmallVector<SDValue, 8> Ops;
- Ops.push_back(MemAddr);
- Ops.push_back(MemUpdate);
- Ops.push_back(MemOpc);
- Ops.push_back(Align);
-
- if (is64BitVector) {
- unsigned Opc = DOpcodes[OpcodeIndex];
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(N->getOperand(Vec+3));
- Ops.push_back(Pred);
- Ops.push_back(PredReg);
- Ops.push_back(Chain);
- return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), NumVecs+7);
- }
-
- EVT RegVT = GetNEONSubregVT(VT);
- if (NumVecs == 2) {
- // Quad registers are directly supported for VST2,
- // storing 2 pairs of D regs.
- unsigned Opc = QOpcodes0[OpcodeIndex];
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
- N->getOperand(Vec+3)));
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
- N->getOperand(Vec+3)));
- }
- Ops.push_back(Pred);
- Ops.push_back(PredReg);
- Ops.push_back(Chain);
- return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), 11);
- }
-
- // Otherwise, quad registers are stored with two separate instructions,
- // where one stores the even registers and the other stores the odd registers.
-
- // Enable writeback to the address register.
- MemOpc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(true), MVT::i32);
-
- // Store the even subregs.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::DSUBREG_0, dl, RegVT,
- N->getOperand(Vec+3)));
- Ops.push_back(Pred);
- Ops.push_back(PredReg);
- Ops.push_back(Chain);
- unsigned Opc = QOpcodes0[OpcodeIndex];
- SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+7);
- Chain = SDValue(VStA, 1);
-
- // Store the odd subregs.
- Ops[0] = SDValue(VStA, 0); // MemAddr
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops[Vec+4] = CurDAG->getTargetExtractSubreg(ARM::DSUBREG_1, dl, RegVT,
- N->getOperand(Vec+3));
- Ops[NumVecs+4] = Pred;
- Ops[NumVecs+5] = PredReg;
- Ops[NumVecs+6] = Chain;
- Opc = QOpcodes1[OpcodeIndex];
- SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+7);
- Chain = SDValue(VStB, 1);
- ReplaceUses(SDValue(N, 0), Chain);
- return NULL;
-}
-
-SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
- unsigned NumVecs, unsigned *DOpcodes,
- unsigned *QOpcodes0,
- unsigned *QOpcodes1) {
- assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
- DebugLoc dl = N->getDebugLoc();
-
- SDValue MemAddr, MemUpdate, MemOpc, Align;
- if (!SelectAddrMode6(N, N->getOperand(2), MemAddr, MemUpdate, MemOpc, Align))
- return NULL;
-
- SDValue Chain = N->getOperand(0);
- unsigned Lane =
- cast<ConstantSDNode>(N->getOperand(NumVecs+3))->getZExtValue();
- EVT VT = IsLoad ? N->getValueType(0) : N->getOperand(3).getValueType();
- bool is64BitVector = VT.is64BitVector();
-
- // Quad registers are handled by load/store of subregs. Find the subreg info.
- unsigned NumElts = 0;
- int SubregIdx = 0;
- EVT RegVT = VT;
- if (!is64BitVector) {
- RegVT = GetNEONSubregVT(VT);
- NumElts = RegVT.getVectorNumElements();
- SubregIdx = (Lane < NumElts) ? ARM::DSUBREG_0 : ARM::DSUBREG_1;
- }
-
- unsigned OpcodeIndex;
- switch (VT.getSimpleVT().SimpleTy) {
- default: llvm_unreachable("unhandled vld/vst lane type");
- // Double-register operations:
- case MVT::v8i8: OpcodeIndex = 0; break;
- case MVT::v4i16: OpcodeIndex = 1; break;
- case MVT::v2f32:
- case MVT::v2i32: OpcodeIndex = 2; break;
- // Quad-register operations:
- case MVT::v8i16: OpcodeIndex = 0; break;
- case MVT::v4f32:
- case MVT::v4i32: OpcodeIndex = 1; break;
- }
-
- SDValue Pred = CurDAG->getTargetConstant(14, MVT::i32);
- SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
-
- SmallVector<SDValue, 9> Ops;
- Ops.push_back(MemAddr);
- Ops.push_back(MemUpdate);
- Ops.push_back(MemOpc);
- Ops.push_back(Align);
-
- unsigned Opc = 0;
- if (is64BitVector) {
- Opc = DOpcodes[OpcodeIndex];
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(N->getOperand(Vec+3));
- } else {
- // Check if this is loading the even or odd subreg of a Q register.
- if (Lane < NumElts) {
- Opc = QOpcodes0[OpcodeIndex];
- } else {
- Lane -= NumElts;
- Opc = QOpcodes1[OpcodeIndex];
- }
- // Extract the subregs of the input vector.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
- N->getOperand(Vec+3)));
- }
- Ops.push_back(getI32Imm(Lane));
- Ops.push_back(Pred);
- Ops.push_back(PredReg);
- Ops.push_back(Chain);
-
- if (!IsLoad)
- return CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops.data(), NumVecs+8);
-
- std::vector<EVT> ResTys(NumVecs, RegVT);
- ResTys.push_back(MVT::Other);
- SDNode *VLdLn =
- CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), NumVecs+8);
- // For a 64-bit vector load to D registers, nothing more needs to be done.
- if (is64BitVector)
- return VLdLn;
-
- // For 128-bit vectors, take the 64-bit results of the load and insert them
- // as subregs into the result.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDValue QuadVec = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
- N->getOperand(Vec+3),
- SDValue(VLdLn, Vec));
- ReplaceUses(SDValue(N, Vec), QuadVec);
- }
-
- Chain = SDValue(VLdLn, NumVecs);
- ReplaceUses(SDValue(N, NumVecs), Chain);
- return NULL;
-}
-
-SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
- unsigned Opc) {
- if (!Subtarget->hasV6T2Ops())
- return NULL;
-
- unsigned Shl_imm = 0;
- if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
- assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
- unsigned Srl_imm = 0;
- if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
- assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
- unsigned Width = 32 - Srl_imm;
- int LSB = Srl_imm - Shl_imm;
- if (LSB < 0)
- return NULL;
- SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
- SDValue Ops[] = { N->getOperand(0).getOperand(0),
- CurDAG->getTargetConstant(LSB, MVT::i32),
- CurDAG->getTargetConstant(Width, MVT::i32),
- getAL(CurDAG), Reg0 };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
- }
- }
- return NULL;
-}
-
-SDNode *ARMDAGToDAGISel::
-SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
- SDValue CPTmp0;
- SDValue CPTmp1;
- if (SelectT2ShifterOperandReg(N, TrueVal, CPTmp0, CPTmp1)) {
- unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
- unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
- unsigned Opc = 0;
- switch (SOShOp) {
- case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break;
- case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break;
- case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break;
- case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
- default:
- llvm_unreachable("Unknown so_reg opcode!");
- break;
- }
- SDValue SOShImm =
- CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6);
- }
- return 0;
-}
-
-SDNode *ARMDAGToDAGISel::
-SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
- SDValue CPTmp0;
- SDValue CPTmp1;
- SDValue CPTmp2;
- if (SelectShifterOperandReg(N, TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N, ARM::MOVCCs, MVT::i32, Ops, 7);
- }
- return 0;
-}
-
-SDNode *ARMDAGToDAGISel::
-SelectT2CMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
- ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
- if (!T)
- return 0;
-
- if (Predicate_t2_so_imm(TrueVal.getNode())) {
- SDValue True = CurDAG->getTargetConstant(T->getZExtValue(), MVT::i32);
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N,
- ARM::t2MOVCCi, MVT::i32, Ops, 5);
- }
- return 0;
-}
-
-SDNode *ARMDAGToDAGISel::
-SelectARMCMOVSoImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
- ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
- ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
- if (!T)
- return 0;
-
- if (Predicate_so_imm(TrueVal.getNode())) {
- SDValue True = CurDAG->getTargetConstant(T->getZExtValue(), MVT::i32);
- SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
- return CurDAG->SelectNodeTo(N,
- ARM::MOVCCi, MVT::i32, Ops, 5);
- }
- return 0;
-}
-
-SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
- EVT VT = N->getValueType(0);
- SDValue FalseVal = N->getOperand(0);
- SDValue TrueVal = N->getOperand(1);
- SDValue CC = N->getOperand(2);
- SDValue CCR = N->getOperand(3);
- SDValue InFlag = N->getOperand(4);
- assert(CC.getOpcode() == ISD::Constant);
- assert(CCR.getOpcode() == ISD::Register);
- ARMCC::CondCodes CCVal =
- (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue();
-
- if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
- // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
- // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
- // Pattern complexity = 18 cost = 1 size = 0
- SDValue CPTmp0;
- SDValue CPTmp1;
- SDValue CPTmp2;
- if (Subtarget->isThumb()) {
- SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
- CCVal, CCR, InFlag);
- if (!Res)
- Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal,
- ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
- if (Res)
- return Res;
- } else {
- SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal,
- CCVal, CCR, InFlag);
- if (!Res)
- Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal,
- ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
- if (Res)
- return Res;
- }
-
- // Pattern: (ARMcmov:i32 GPR:i32:$false,
- // (imm:i32)<<P:Predicate_so_imm>>:$true,
- // (imm:i32):$cc)
- // Emits: (MOVCCi:i32 GPR:i32:$false,
- // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
- // Pattern complexity = 10 cost = 1 size = 0
- if (Subtarget->isThumb()) {
- SDNode *Res = SelectT2CMOVSoImmOp(N, FalseVal, TrueVal,
- CCVal, CCR, InFlag);
- if (!Res)
- Res = SelectT2CMOVSoImmOp(N, TrueVal, FalseVal,
- ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
- if (Res)
- return Res;
- } else {
- SDNode *Res = SelectARMCMOVSoImmOp(N, FalseVal, TrueVal,
- CCVal, CCR, InFlag);
- if (!Res)
- Res = SelectARMCMOVSoImmOp(N, TrueVal, FalseVal,
- ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
- if (Res)
- return Res;
- }
- }
-
- // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
- // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
- // Pattern complexity = 6 cost = 1 size = 0
- //
- // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
- // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
- // Pattern complexity = 6 cost = 11 size = 0
- //
- // Also FCPYScc and FCPYDcc.
- SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32);
- SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
- unsigned Opc = 0;
- switch (VT.getSimpleVT().SimpleTy) {
- default: assert(false && "Illegal conditional move type!");
- break;
- case MVT::i32:
- Opc = Subtarget->isThumb()
- ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
- : ARM::MOVCCr;
- break;
- case MVT::f32:
- Opc = ARM::VMOVScc;
- break;
- case MVT::f64:
- Opc = ARM::VMOVDcc;
- break;
- }
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
-}
-
-SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
- DebugLoc dl = N->getDebugLoc();
-
- if (N->isMachineOpcode())
- return NULL; // Already selected.
-
- switch (N->getOpcode()) {
- default: break;
- case ISD::Constant: {
- unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
- bool UseCP = true;
- if (Subtarget->hasThumb2())
- // Thumb2-aware targets have the MOVT instruction, so all immediates can
- // be done with MOV + MOVT, at worst.
- UseCP = 0;
- else {
- if (Subtarget->isThumb()) {
- UseCP = (Val > 255 && // MOV
- ~Val > 255 && // MOV + MVN
- !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
- } else
- UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
- ARM_AM::getSOImmVal(~Val) == -1 && // MVN
- !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
- }
-
- if (UseCP) {
- SDValue CPIdx =
- CurDAG->getTargetConstantPool(ConstantInt::get(
- Type::getInt32Ty(*CurDAG->getContext()), Val),
- TLI.getPointerTy());
-
- SDNode *ResNode;
- if (Subtarget->isThumb1Only()) {
- SDValue Pred = CurDAG->getTargetConstant(14, MVT::i32);
- SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
- SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
- ResNode = CurDAG->getMachineNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
- Ops, 4);
- } else {
- SDValue Ops[] = {
- CPIdx,
- CurDAG->getRegister(0, MVT::i32),
- CurDAG->getTargetConstant(0, MVT::i32),
- getAL(CurDAG),
- CurDAG->getRegister(0, MVT::i32),
- CurDAG->getEntryNode()
- };
- ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
- Ops, 6);
- }
- ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
- return NULL;
- }
-
- // Other cases are autogenerated.
- break;
- }
- case ISD::FrameIndex: {
- // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
- int FI = cast<FrameIndexSDNode>(N)->getIndex();
- SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
- if (Subtarget->isThumb1Only()) {
- return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
- CurDAG->getTargetConstant(0, MVT::i32));
- } else {
- unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
- ARM::t2ADDri : ARM::ADDri);
- SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
- CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
- }
- }
- case ARMISD::DYN_ALLOC:
- return SelectDYN_ALLOC(N);
- case ISD::SRL:
- if (SDNode *I = SelectV6T2BitfieldExtractOp(N,
- Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX))
- return I;
- break;
- case ISD::SRA:
- if (SDNode *I = SelectV6T2BitfieldExtractOp(N,
- Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX))
- return I;
- break;
- case ISD::MUL:
- if (Subtarget->isThumb1Only())
- break;
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
- unsigned RHSV = C->getZExtValue();
- if (!RHSV) break;
- if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
- unsigned ShImm = Log2_32(RHSV-1);
- if (ShImm >= 32)
- break;
- SDValue V = N->getOperand(0);
- ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
- SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
- SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
- if (Subtarget->isThumb()) {
- SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
- } else {
- SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
- }
- }
- if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
- unsigned ShImm = Log2_32(RHSV+1);
- if (ShImm >= 32)
- break;
- SDValue V = N->getOperand(0);
- ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
- SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
- SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
- if (Subtarget->isThumb()) {
- SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 5);
- } else {
- SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
- }
- }
- }
- break;
- case ISD::AND: {
- // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
- // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
- // are entirely contributed by c2 and lower 16-bits are entirely contributed
- // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
- // Select it to: "movt x, ((c1 & 0xffff) >> 16)
- EVT VT = N->getValueType(0);
- if (VT != MVT::i32)
- break;
- unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
- ? ARM::t2MOVTi16
- : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
- if (!Opc)
- break;
- SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- if (!N1C)
- break;
- if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
- SDValue N2 = N0.getOperand(1);
- ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
- if (!N2C)
- break;
- unsigned N1CVal = N1C->getZExtValue();
- unsigned N2CVal = N2C->getZExtValue();
- if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
- (N1CVal & 0xffffU) == 0xffffU &&
- (N2CVal & 0xffffU) == 0x0U) {
- SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
- MVT::i32);
- SDValue Ops[] = { N0.getOperand(0), Imm16,
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4);
- }
- }
- break;
- }
- case ARMISD::VMOVRRD:
- return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
- N->getOperand(0), getAL(CurDAG),
- CurDAG->getRegister(0, MVT::i32));
- case ISD::UMUL_LOHI: {
- if (Subtarget->isThumb1Only())
- break;
- if (Subtarget->isThumb()) {
- SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
- CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops,4);
- } else {
- SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
- CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5);
- }
- }
- case ISD::SMUL_LOHI: {
- if (Subtarget->isThumb1Only())
- break;
- if (Subtarget->isThumb()) {
- SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops,4);
- } else {
- SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
- getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
- CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5);
- }
- }
- case ISD::LOAD: {
- SDNode *ResNode = 0;
- if (Subtarget->isThumb() && Subtarget->hasThumb2())
- ResNode = SelectT2IndexedLoad(N);
- else
- ResNode = SelectARMIndexedLoad(N);
- if (ResNode)
- return ResNode;
- // Other cases are autogenerated.
- break;
- }
- case ARMISD::BRCOND: {
- // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
- // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
- // Pattern complexity = 6 cost = 1 size = 0
-
- // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
- // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
- // Pattern complexity = 6 cost = 1 size = 0
-
- // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
- // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
- // Pattern complexity = 6 cost = 1 size = 0
-
- unsigned Opc = Subtarget->isThumb() ?
- ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
- SDValue Chain = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDValue N2 = N->getOperand(2);
- SDValue N3 = N->getOperand(3);
- SDValue InFlag = N->getOperand(4);
- assert(N1.getOpcode() == ISD::BasicBlock);
- assert(N2.getOpcode() == ISD::Constant);
- assert(N3.getOpcode() == ISD::Register);
-
- SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
- cast<ConstantSDNode>(N2)->getZExtValue()),
- MVT::i32);
- SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
- SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
- MVT::Flag, Ops, 5);
- Chain = SDValue(ResNode, 0);
- if (N->getNumValues() == 2) {
- InFlag = SDValue(ResNode, 1);
- ReplaceUses(SDValue(N, 1), InFlag);
- }
- ReplaceUses(SDValue(N, 0),
- SDValue(Chain.getNode(), Chain.getResNo()));
- return NULL;
- }
- case ARMISD::CMOV:
- return SelectCMOVOp(N);
- case ARMISD::CNEG: {
- EVT VT = N->getValueType(0);
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
- SDValue N2 = N->getOperand(2);
- SDValue N3 = N->getOperand(3);
- SDValue InFlag = N->getOperand(4);
- assert(N2.getOpcode() == ISD::Constant);
- assert(N3.getOpcode() == ISD::Register);
-
- SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
- cast<ConstantSDNode>(N2)->getZExtValue()),
- MVT::i32);
- SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
- unsigned Opc = 0;
- switch (VT.getSimpleVT().SimpleTy) {
- default: assert(false && "Illegal conditional move type!");
- break;
- case MVT::f32:
- Opc = ARM::VNEGScc;
- break;
- case MVT::f64:
- Opc = ARM::VNEGDcc;
- break;
- }
- return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
- }
-
- case ARMISD::VZIP: {
- unsigned Opc = 0;
- EVT VT = N->getValueType(0);
- switch (VT.getSimpleVT().SimpleTy) {
- default: return NULL;
- case MVT::v8i8: Opc = ARM::VZIPd8; break;
- case MVT::v4i16: Opc = ARM::VZIPd16; break;
- case MVT::v2f32:
- case MVT::v2i32: Opc = ARM::VZIPd32; break;
- case MVT::v16i8: Opc = ARM::VZIPq8; break;
- case MVT::v8i16: Opc = ARM::VZIPq16; break;
- case MVT::v4f32:
- case MVT::v4i32: Opc = ARM::VZIPq32; break;
- }
- SDValue Pred = CurDAG->getTargetConstant(14, MVT::i32);
- SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
- SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
- return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
- }
- case ARMISD::VUZP: {
- unsigned Opc = 0;
- EVT VT = N->getValueType(0);
- switch (VT.getSimpleVT().SimpleTy) {
- default: return NULL;
- case MVT::v8i8: Opc = ARM::VUZPd8; break;
- case MVT::v4i16: Opc = ARM::VUZPd16; break;
- case MVT::v2f32:
- case MVT::v2i32: Opc = ARM::VUZPd32; break;
- case MVT::v16i8: Opc = ARM::VUZPq8; break;
- case MVT::v8i16: Opc = ARM::VUZPq16; break;
- case MVT::v4f32:
- case MVT::v4i32: Opc = ARM::VUZPq32; break;
- }
- SDValue Pred = CurDAG->getTargetConstant(14, MVT::i32);
- SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
- SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
- return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
- }
- case ARMISD::VTRN: {
- unsigned Opc = 0;
- EVT VT = N->getValueType(0);
- switch (VT.getSimpleVT().SimpleTy) {
- default: return NULL;
- case MVT::v8i8: Opc = ARM::VTRNd8; break;
- case MVT::v4i16: Opc = ARM::VTRNd16; break;
- case MVT::v2f32:
- case MVT::v2i32: Opc = ARM::VTRNd32; break;
- case MVT::v16i8: Opc = ARM::VTRNq8; break;
- case MVT::v8i16: Opc = ARM::VTRNq16; break;
- case MVT::v4f32:
- case MVT::v4i32: Opc = ARM::VTRNq32; break;
- }
- SDValue Pred = CurDAG->getTargetConstant(14, MVT::i32);
- SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
- SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
- return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
- }
-
- case ISD::INTRINSIC_VOID:
- case ISD::INTRINSIC_W_CHAIN: {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
- switch (IntNo) {
- default:
- break;
-
- case Intrinsic::arm_neon_vld2: {
- unsigned DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
- ARM::VLD2d32, ARM::VLD2d64 };
- unsigned QOpcodes[] = { ARM::VLD2q8, ARM::VLD2q16, ARM::VLD2q32 };
- return SelectVLD(N, 2, DOpcodes, QOpcodes, 0);
- }
-
- case Intrinsic::arm_neon_vld3: {
- unsigned DOpcodes[] = { ARM::VLD3d8, ARM::VLD3d16,
- ARM::VLD3d32, ARM::VLD3d64 };
- unsigned QOpcodes0[] = { ARM::VLD3q8a, ARM::VLD3q16a, ARM::VLD3q32a };
- unsigned QOpcodes1[] = { ARM::VLD3q8b, ARM::VLD3q16b, ARM::VLD3q32b };
- return SelectVLD(N, 3, DOpcodes, QOpcodes0, QOpcodes1);
- }
-
- case Intrinsic::arm_neon_vld4: {
- unsigned DOpcodes[] = { ARM::VLD4d8, ARM::VLD4d16,
- ARM::VLD4d32, ARM::VLD4d64 };
- unsigned QOpcodes0[] = { ARM::VLD4q8a, ARM::VLD4q16a, ARM::VLD4q32a };
- unsigned QOpcodes1[] = { ARM::VLD4q8b, ARM::VLD4q16b, ARM::VLD4q32b };
- return SelectVLD(N, 4, DOpcodes, QOpcodes0, QOpcodes1);
- }
-
- case Intrinsic::arm_neon_vld2lane: {
- unsigned DOpcodes[] = { ARM::VLD2LNd8, ARM::VLD2LNd16, ARM::VLD2LNd32 };
- unsigned QOpcodes0[] = { ARM::VLD2LNq16a, ARM::VLD2LNq32a };
- unsigned QOpcodes1[] = { ARM::VLD2LNq16b, ARM::VLD2LNq32b };
- return SelectVLDSTLane(N, true, 2, DOpcodes, QOpcodes0, QOpcodes1);
- }
-
- case Intrinsic::arm_neon_vld3lane: {
- unsigned DOpcodes[] = { ARM::VLD3LNd8, ARM::VLD3LNd16, ARM::VLD3LNd32 };
- unsigned QOpcodes0[] = { ARM::VLD3LNq16a, ARM::VLD3LNq32a };
- unsigned QOpcodes1[] = { ARM::VLD3LNq16b, ARM::VLD3LNq32b };
- return SelectVLDSTLane(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
- }
-
- case Intrinsic::arm_neon_vld4lane: {
- unsigned DOpcodes[] = { ARM::VLD4LNd8, ARM::VLD4LNd16, ARM::VLD4LNd32 };
- unsigned QOpcodes0[] = { ARM::VLD4LNq16a, ARM::VLD4LNq32a };
- unsigned QOpcodes1[] = { ARM::VLD4LNq16b, ARM::VLD4LNq32b };
- return SelectVLDSTLane(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
- }
-
- case Intrinsic::arm_neon_vst2: {
- unsigned DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
- ARM::VST2d32, ARM::VST2d64 };
- unsigned QOpcodes[] = { ARM::VST2q8, ARM::VST2q16, ARM::VST2q32 };
- return SelectVST(N, 2, DOpcodes, QOpcodes, 0);
- }
-
- case Intrinsic::arm_neon_vst3: {
- unsigned DOpcodes[] = { ARM::VST3d8, ARM::VST3d16,
- ARM::VST3d32, ARM::VST3d64 };
- unsigned QOpcodes0[] = { ARM::VST3q8a, ARM::VST3q16a, ARM::VST3q32a };
- unsigned QOpcodes1[] = { ARM::VST3q8b, ARM::VST3q16b, ARM::VST3q32b };
- return SelectVST(N, 3, DOpcodes, QOpcodes0, QOpcodes1);
- }
-
- case Intrinsic::arm_neon_vst4: {
- unsigned DOpcodes[] = { ARM::VST4d8, ARM::VST4d16,
- ARM::VST4d32, ARM::VST4d64 };
- unsigned QOpcodes0[] = { ARM::VST4q8a, ARM::VST4q16a, ARM::VST4q32a };
- unsigned QOpcodes1[] = { ARM::VST4q8b, ARM::VST4q16b, ARM::VST4q32b };
- return SelectVST(N, 4, DOpcodes, QOpcodes0, QOpcodes1);
- }
-
- case Intrinsic::arm_neon_vst2lane: {
- unsigned DOpcodes[] = { ARM::VST2LNd8, ARM::VST2LNd16, ARM::VST2LNd32 };
- unsigned QOpcodes0[] = { ARM::VST2LNq16a, ARM::VST2LNq32a };
- unsigned QOpcodes1[] = { ARM::VST2LNq16b, ARM::VST2LNq32b };
- return SelectVLDSTLane(N, false, 2, DOpcodes, QOpcodes0, QOpcodes1);
- }
-
- case Intrinsic::arm_neon_vst3lane: {
- unsigned DOpcodes[] = { ARM::VST3LNd8, ARM::VST3LNd16, ARM::VST3LNd32 };
- unsigned QOpcodes0[] = { ARM::VST3LNq16a, ARM::VST3LNq32a };
- unsigned QOpcodes1[] = { ARM::VST3LNq16b, ARM::VST3LNq32b };
- return SelectVLDSTLane(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
- }
-
- case Intrinsic::arm_neon_vst4lane: {
- unsigned DOpcodes[] = { ARM::VST4LNd8, ARM::VST4LNd16, ARM::VST4LNd32 };
- unsigned QOpcodes0[] = { ARM::VST4LNq16a, ARM::VST4LNq32a };
- unsigned QOpcodes1[] = { ARM::VST4LNq16b, ARM::VST4LNq32b };
- return SelectVLDSTLane(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
- }
- }
- }
- }
-
- return SelectCode(N);
-}
-
-bool ARMDAGToDAGISel::
-SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
- std::vector<SDValue> &OutOps) {
- assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
- // Require the address to be in a register. That is safe for all ARM
- // variants and it is hard to do anything much smarter without knowing
- // how the operand is used.
- OutOps.push_back(Op);
- return false;
-}
-
-/// createARMISelDag - This pass converts a legalized DAG into a
-/// ARM-specific DAG, ready for instruction scheduling.
-///
-FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
- CodeGenOpt::Level OptLevel) {
- return new ARMDAGToDAGISel(TM, OptLevel);
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMISelLowering.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMISelLowering.cpp
deleted file mode 100644
index 6a2c6bb..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ /dev/null
@@ -1,4675 +0,0 @@
-//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that ARM uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMConstantPoolValue.h"
-#include "ARMISelLowering.h"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMPerfectShuffle.h"
-#include "ARMRegisterInfo.h"
-#include "ARMSubtarget.h"
-#include "ARMTargetMachine.h"
-#include "ARMTargetObjectFile.h"
-#include "llvm/CallingConv.h"
-#include "llvm/Constants.h"
-#include "llvm/Function.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/Instruction.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/Type.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/VectorExtras.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
-#include <sstream>
-using namespace llvm;
-
-static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State);
-
-void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
- EVT PromotedBitwiseVT) {
- if (VT != PromotedLdStVT) {
- setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
- PromotedLdStVT.getSimpleVT());
-
- setOperationAction(ISD::STORE, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::STORE, VT.getSimpleVT(),
- PromotedLdStVT.getSimpleVT());
- }
-
- EVT ElemTy = VT.getVectorElementType();
- if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
- setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom);
- if (ElemTy == MVT::i8 || ElemTy == MVT::i16)
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
- if (ElemTy != MVT::i32) {
- setOperationAction(ISD::SINT_TO_FP, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::UINT_TO_FP, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FP_TO_SINT, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FP_TO_UINT, VT.getSimpleVT(), Expand);
- }
- setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand);
- if (VT.isInteger()) {
- setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom);
- setOperationAction(ISD::SRL, VT.getSimpleVT(), Custom);
- }
-
- // Promote all bit-wise operations.
- if (VT.isInteger() && VT != PromotedBitwiseVT) {
- setOperationAction(ISD::AND, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::AND, VT.getSimpleVT(),
- PromotedBitwiseVT.getSimpleVT());
- setOperationAction(ISD::OR, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::OR, VT.getSimpleVT(),
- PromotedBitwiseVT.getSimpleVT());
- setOperationAction(ISD::XOR, VT.getSimpleVT(), Promote);
- AddPromotedToType (ISD::XOR, VT.getSimpleVT(),
- PromotedBitwiseVT.getSimpleVT());
- }
-
- // Neon does not support vector divide/remainder operations.
- setOperationAction(ISD::SDIV, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::UDIV, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FDIV, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::SREM, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::UREM, VT.getSimpleVT(), Expand);
- setOperationAction(ISD::FREM, VT.getSimpleVT(), Expand);
-}
-
-void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
- addRegisterClass(VT, ARM::DPRRegisterClass);
- addTypeForNEON(VT, MVT::f64, MVT::v2i32);
-}
-
-void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
- addRegisterClass(VT, ARM::QPRRegisterClass);
- addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
-}
-
-static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
- if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin())
- return new TargetLoweringObjectFileMachO();
- return new ARMElfTargetObjectFile();
-}
-
-ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
- : TargetLowering(TM, createTLOF(TM)) {
- Subtarget = &TM.getSubtarget<ARMSubtarget>();
-
- if (Subtarget->isTargetDarwin()) {
- // Uses VFP for Thumb libfuncs if available.
- if (Subtarget->isThumb() && Subtarget->hasVFP2()) {
- // Single-precision floating-point arithmetic.
- setLibcallName(RTLIB::ADD_F32, "__addsf3vfp");
- setLibcallName(RTLIB::SUB_F32, "__subsf3vfp");
- setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp");
- setLibcallName(RTLIB::DIV_F32, "__divsf3vfp");
-
- // Double-precision floating-point arithmetic.
- setLibcallName(RTLIB::ADD_F64, "__adddf3vfp");
- setLibcallName(RTLIB::SUB_F64, "__subdf3vfp");
- setLibcallName(RTLIB::MUL_F64, "__muldf3vfp");
- setLibcallName(RTLIB::DIV_F64, "__divdf3vfp");
-
- // Single-precision comparisons.
- setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp");
- setLibcallName(RTLIB::UNE_F32, "__nesf2vfp");
- setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp");
- setLibcallName(RTLIB::OLE_F32, "__lesf2vfp");
- setLibcallName(RTLIB::OGE_F32, "__gesf2vfp");
- setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp");
- setLibcallName(RTLIB::UO_F32, "__unordsf2vfp");
- setLibcallName(RTLIB::O_F32, "__unordsf2vfp");
-
- setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
- setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
-
- // Double-precision comparisons.
- setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp");
- setLibcallName(RTLIB::UNE_F64, "__nedf2vfp");
- setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp");
- setLibcallName(RTLIB::OLE_F64, "__ledf2vfp");
- setLibcallName(RTLIB::OGE_F64, "__gedf2vfp");
- setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp");
- setLibcallName(RTLIB::UO_F64, "__unorddf2vfp");
- setLibcallName(RTLIB::O_F64, "__unorddf2vfp");
-
- setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE);
- setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE);
- setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE);
- setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE);
- setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ);
-
- // Floating-point to integer conversions.
- // i64 conversions are done via library routines even when generating VFP
- // instructions, so use the same ones.
- setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp");
- setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp");
- setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp");
- setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp");
-
- // Conversions between floating types.
- setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp");
- setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp");
-
- // Integer to floating-point conversions.
- // i64 conversions are done via library routines even when generating VFP
- // instructions, so use the same ones.
- // FIXME: There appears to be some naming inconsistency in ARM libgcc:
- // e.g., __floatunsidf vs. __floatunssidfvfp.
- setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
- setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
- setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
- setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp");
- }
- }
-
- // These libcalls are not available in 32-bit.
- setLibcallName(RTLIB::SHL_I128, 0);
- setLibcallName(RTLIB::SRL_I128, 0);
- setLibcallName(RTLIB::SRA_I128, 0);
-
- // Libcalls should use the AAPCS base standard ABI, even if hard float
- // is in effect, as per the ARM RTABI specification, section 4.1.2.
- if (Subtarget->isAAPCS_ABI()) {
- for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
- setLibcallCallingConv(static_cast<RTLIB::Libcall>(i),
- CallingConv::ARM_AAPCS);
- }
- }
-
- if (Subtarget->isThumb1Only())
- addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
- else
- addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
- if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
- addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
- addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
-
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);
- }
-
- if (Subtarget->hasNEON()) {
- addDRTypeForNEON(MVT::v2f32);
- addDRTypeForNEON(MVT::v8i8);
- addDRTypeForNEON(MVT::v4i16);
- addDRTypeForNEON(MVT::v2i32);
- addDRTypeForNEON(MVT::v1i64);
-
- addQRTypeForNEON(MVT::v4f32);
- addQRTypeForNEON(MVT::v2f64);
- addQRTypeForNEON(MVT::v16i8);
- addQRTypeForNEON(MVT::v8i16);
- addQRTypeForNEON(MVT::v4i32);
- addQRTypeForNEON(MVT::v2i64);
-
- // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
- // neither Neon nor VFP support any arithmetic operations on it.
- setOperationAction(ISD::FADD, MVT::v2f64, Expand);
- setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
- setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
- setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
- setOperationAction(ISD::FREM, MVT::v2f64, Expand);
- setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
- setOperationAction(ISD::VSETCC, MVT::v2f64, Expand);
- setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
- setOperationAction(ISD::FABS, MVT::v2f64, Expand);
- setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
- setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
- setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
- setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
- setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
- setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
- setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
- setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
- setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
- setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
- setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
- setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
- setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
- setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
- setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
-
- // Neon does not support some operations on v1i64 and v2i64 types.
- setOperationAction(ISD::MUL, MVT::v1i64, Expand);
- setOperationAction(ISD::MUL, MVT::v2i64, Expand);
- setOperationAction(ISD::VSETCC, MVT::v1i64, Expand);
- setOperationAction(ISD::VSETCC, MVT::v2i64, Expand);
-
- setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
- setTargetDAGCombine(ISD::SHL);
- setTargetDAGCombine(ISD::SRL);
- setTargetDAGCombine(ISD::SRA);
- setTargetDAGCombine(ISD::SIGN_EXTEND);
- setTargetDAGCombine(ISD::ZERO_EXTEND);
- setTargetDAGCombine(ISD::ANY_EXTEND);
- setTargetDAGCombine(ISD::SELECT_CC);
- }
-
- computeRegisterProperties();
-
- // ARM does not have f32 extending load.
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
-
- // ARM does not have i1 sign extending load.
- setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
-
- // ARM supports all 4 flavors of integer indexed load / store.
- if (!Subtarget->isThumb1Only()) {
- for (unsigned im = (unsigned)ISD::PRE_INC;
- im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
- setIndexedLoadAction(im, MVT::i1, Legal);
- setIndexedLoadAction(im, MVT::i8, Legal);
- setIndexedLoadAction(im, MVT::i16, Legal);
- setIndexedLoadAction(im, MVT::i32, Legal);
- setIndexedStoreAction(im, MVT::i1, Legal);
- setIndexedStoreAction(im, MVT::i8, Legal);
- setIndexedStoreAction(im, MVT::i16, Legal);
- setIndexedStoreAction(im, MVT::i32, Legal);
- }
- }
-
- // i64 operation support.
- if (Subtarget->isThumb1Only()) {
- setOperationAction(ISD::MUL, MVT::i64, Expand);
- setOperationAction(ISD::MULHU, MVT::i32, Expand);
- setOperationAction(ISD::MULHS, MVT::i32, Expand);
- setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
- setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
- } else {
- setOperationAction(ISD::MUL, MVT::i64, Expand);
- setOperationAction(ISD::MULHU, MVT::i32, Expand);
- if (!Subtarget->hasV6Ops())
- setOperationAction(ISD::MULHS, MVT::i32, Expand);
- }
- setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
- setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
- setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
- setOperationAction(ISD::SRL, MVT::i64, Custom);
- setOperationAction(ISD::SRA, MVT::i64, Custom);
-
- // ARM does not have ROTL.
- setOperationAction(ISD::ROTL, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ, MVT::i32, Custom);
- setOperationAction(ISD::CTPOP, MVT::i32, Expand);
- if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
- setOperationAction(ISD::CTLZ, MVT::i32, Expand);
-
- // Only ARMv6 has BSWAP.
- if (!Subtarget->hasV6Ops())
- setOperationAction(ISD::BSWAP, MVT::i32, Expand);
-
- // These are expanded into libcalls.
- setOperationAction(ISD::SDIV, MVT::i32, Expand);
- setOperationAction(ISD::UDIV, MVT::i32, Expand);
- setOperationAction(ISD::SREM, MVT::i32, Expand);
- setOperationAction(ISD::UREM, MVT::i32, Expand);
- setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
- setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
-
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
- setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
- setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
- setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
- setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
-
- // Use the default implementation.
- setOperationAction(ISD::VASTART, MVT::Other, Custom);
- setOperationAction(ISD::VAARG, MVT::Other, Expand);
- setOperationAction(ISD::VACOPY, MVT::Other, Expand);
- setOperationAction(ISD::VAEND, MVT::Other, Expand);
- setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
- setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
- // FIXME: Shouldn't need this, since no register is used, but the legalizer
- // doesn't yet know how to not do that for SjLj.
- setExceptionSelectorRegister(ARM::R0);
- if (Subtarget->isThumb())
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
- else
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
- setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
-
- if (!Subtarget->hasV6Ops() && !Subtarget->isThumb2()) {
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
- }
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
- if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only())
- // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
- // iff target supports vfp2.
- setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
-
- // We want to custom lower some of our intrinsics.
- setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
-
- setOperationAction(ISD::SETCC, MVT::i32, Expand);
- setOperationAction(ISD::SETCC, MVT::f32, Expand);
- setOperationAction(ISD::SETCC, MVT::f64, Expand);
- setOperationAction(ISD::SELECT, MVT::i32, Expand);
- setOperationAction(ISD::SELECT, MVT::f32, Expand);
- setOperationAction(ISD::SELECT, MVT::f64, Expand);
- setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
- setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
-
- setOperationAction(ISD::BRCOND, MVT::Other, Expand);
- setOperationAction(ISD::BR_CC, MVT::i32, Custom);
- setOperationAction(ISD::BR_CC, MVT::f32, Custom);
- setOperationAction(ISD::BR_CC, MVT::f64, Custom);
- setOperationAction(ISD::BR_JT, MVT::Other, Custom);
-
- // We don't support sin/cos/fmod/copysign/pow
- setOperationAction(ISD::FSIN, MVT::f64, Expand);
- setOperationAction(ISD::FSIN, MVT::f32, Expand);
- setOperationAction(ISD::FCOS, MVT::f32, Expand);
- setOperationAction(ISD::FCOS, MVT::f64, Expand);
- setOperationAction(ISD::FREM, MVT::f64, Expand);
- setOperationAction(ISD::FREM, MVT::f32, Expand);
- if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
- }
- setOperationAction(ISD::FPOW, MVT::f64, Expand);
- setOperationAction(ISD::FPOW, MVT::f32, Expand);
-
- // int <-> fp are custom expanded into bit_convert + ARMISD ops.
- if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
- setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
- setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
- setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
- }
-
- // We have target-specific dag combine patterns for the following nodes:
- // ARMISD::VMOVRRD - No need to call setTargetDAGCombine
- setTargetDAGCombine(ISD::ADD);
- setTargetDAGCombine(ISD::SUB);
-
- setStackPointerRegisterToSaveRestore(ARM::SP);
- setSchedulingPreference(SchedulingForRegPressure);
-
- // FIXME: If-converter should use instruction latency to determine
- // profitability rather than relying on fixed limits.
- if (Subtarget->getCPUString() == "generic") {
- // Generic (and overly aggressive) if-conversion limits.
- setIfCvtBlockSizeLimit(10);
- setIfCvtDupBlockSizeLimit(2);
- } else if (Subtarget->hasV6Ops()) {
- setIfCvtBlockSizeLimit(2);
- setIfCvtDupBlockSizeLimit(1);
- } else {
- setIfCvtBlockSizeLimit(3);
- setIfCvtDupBlockSizeLimit(2);
- }
-
- maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type
- // Do not enable CodePlacementOpt for now: it currently runs after the
- // ARMConstantIslandPass and messes up branch relaxation and placement
- // of constant islands.
- // benefitFromCodePlacementOpt = true;
-}
-
-const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- default: return 0;
- case ARMISD::Wrapper: return "ARMISD::Wrapper";
- case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
- case ARMISD::CALL: return "ARMISD::CALL";
- case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
- case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
- case ARMISD::tCALL: return "ARMISD::tCALL";
- case ARMISD::BRCOND: return "ARMISD::BRCOND";
- case ARMISD::BR_JT: return "ARMISD::BR_JT";
- case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
- case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
- case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
- case ARMISD::CMP: return "ARMISD::CMP";
- case ARMISD::CMPZ: return "ARMISD::CMPZ";
- case ARMISD::CMPFP: return "ARMISD::CMPFP";
- case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
- case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
- case ARMISD::CMOV: return "ARMISD::CMOV";
- case ARMISD::CNEG: return "ARMISD::CNEG";
-
- case ARMISD::RBIT: return "ARMISD::RBIT";
-
- case ARMISD::FTOSI: return "ARMISD::FTOSI";
- case ARMISD::FTOUI: return "ARMISD::FTOUI";
- case ARMISD::SITOF: return "ARMISD::SITOF";
- case ARMISD::UITOF: return "ARMISD::UITOF";
-
- case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
- case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
- case ARMISD::RRX: return "ARMISD::RRX";
-
- case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD";
- case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR";
-
- case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
- case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
-
- case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
-
- case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
-
- case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER";
- case ARMISD::SYNCBARRIER: return "ARMISD::SYNCBARRIER";
-
- case ARMISD::VCEQ: return "ARMISD::VCEQ";
- case ARMISD::VCGE: return "ARMISD::VCGE";
- case ARMISD::VCGEU: return "ARMISD::VCGEU";
- case ARMISD::VCGT: return "ARMISD::VCGT";
- case ARMISD::VCGTU: return "ARMISD::VCGTU";
- case ARMISD::VTST: return "ARMISD::VTST";
-
- case ARMISD::VSHL: return "ARMISD::VSHL";
- case ARMISD::VSHRs: return "ARMISD::VSHRs";
- case ARMISD::VSHRu: return "ARMISD::VSHRu";
- case ARMISD::VSHLLs: return "ARMISD::VSHLLs";
- case ARMISD::VSHLLu: return "ARMISD::VSHLLu";
- case ARMISD::VSHLLi: return "ARMISD::VSHLLi";
- case ARMISD::VSHRN: return "ARMISD::VSHRN";
- case ARMISD::VRSHRs: return "ARMISD::VRSHRs";
- case ARMISD::VRSHRu: return "ARMISD::VRSHRu";
- case ARMISD::VRSHRN: return "ARMISD::VRSHRN";
- case ARMISD::VQSHLs: return "ARMISD::VQSHLs";
- case ARMISD::VQSHLu: return "ARMISD::VQSHLu";
- case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu";
- case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs";
- case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu";
- case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu";
- case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs";
- case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu";
- case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu";
- case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu";
- case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
- case ARMISD::VDUP: return "ARMISD::VDUP";
- case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
- case ARMISD::VEXT: return "ARMISD::VEXT";
- case ARMISD::VREV64: return "ARMISD::VREV64";
- case ARMISD::VREV32: return "ARMISD::VREV32";
- case ARMISD::VREV16: return "ARMISD::VREV16";
- case ARMISD::VZIP: return "ARMISD::VZIP";
- case ARMISD::VUZP: return "ARMISD::VUZP";
- case ARMISD::VTRN: return "ARMISD::VTRN";
- case ARMISD::FMAX: return "ARMISD::FMAX";
- case ARMISD::FMIN: return "ARMISD::FMIN";
- }
-}
-
-/// getFunctionAlignment - Return the Log2 alignment of this function.
-unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const {
- return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 0 : 1;
-}
-
-//===----------------------------------------------------------------------===//
-// Lowering Code
-//===----------------------------------------------------------------------===//
-
-/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
-static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
- switch (CC) {
- default: llvm_unreachable("Unknown condition code!");
- case ISD::SETNE: return ARMCC::NE;
- case ISD::SETEQ: return ARMCC::EQ;
- case ISD::SETGT: return ARMCC::GT;
- case ISD::SETGE: return ARMCC::GE;
- case ISD::SETLT: return ARMCC::LT;
- case ISD::SETLE: return ARMCC::LE;
- case ISD::SETUGT: return ARMCC::HI;
- case ISD::SETUGE: return ARMCC::HS;
- case ISD::SETULT: return ARMCC::LO;
- case ISD::SETULE: return ARMCC::LS;
- }
-}
-
-/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
-static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
- ARMCC::CondCodes &CondCode2) {
- CondCode2 = ARMCC::AL;
- switch (CC) {
- default: llvm_unreachable("Unknown FP condition!");
- case ISD::SETEQ:
- case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
- case ISD::SETGT:
- case ISD::SETOGT: CondCode = ARMCC::GT; break;
- case ISD::SETGE:
- case ISD::SETOGE: CondCode = ARMCC::GE; break;
- case ISD::SETOLT: CondCode = ARMCC::MI; break;
- case ISD::SETOLE: CondCode = ARMCC::LS; break;
- case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
- case ISD::SETO: CondCode = ARMCC::VC; break;
- case ISD::SETUO: CondCode = ARMCC::VS; break;
- case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
- case ISD::SETUGT: CondCode = ARMCC::HI; break;
- case ISD::SETUGE: CondCode = ARMCC::PL; break;
- case ISD::SETLT:
- case ISD::SETULT: CondCode = ARMCC::LT; break;
- case ISD::SETLE:
- case ISD::SETULE: CondCode = ARMCC::LE; break;
- case ISD::SETNE:
- case ISD::SETUNE: CondCode = ARMCC::NE; break;
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Calling Convention Implementation
-//===----------------------------------------------------------------------===//
-
-#include "ARMGenCallingConv.inc"
-
-// APCS f64 is in register pairs, possibly split to stack
-static bool f64AssignAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- CCState &State, bool CanFail) {
- static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
-
- // Try to get the first register.
- if (unsigned Reg = State.AllocateReg(RegList, 4))
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- else {
- // For the 2nd half of a v2f64, do not fail.
- if (CanFail)
- return false;
-
- // Put the whole thing on the stack.
- State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
- State.AllocateStack(8, 4),
- LocVT, LocInfo));
- return true;
- }
-
- // Try to get the second register.
- if (unsigned Reg = State.AllocateReg(RegList, 4))
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- else
- State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
- State.AllocateStack(4, 4),
- LocVT, LocInfo));
- return true;
-}
-
-static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
- if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
- return false;
- if (LocVT == MVT::v2f64 &&
- !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
- return false;
- return true; // we handled it
-}
-
-// AAPCS f64 is in aligned register pairs
-static bool f64AssignAAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- CCState &State, bool CanFail) {
- static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
- static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
-
- unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
- if (Reg == 0) {
- // For the 2nd half of a v2f64, do not just fail.
- if (CanFail)
- return false;
-
- // Put the whole thing on the stack.
- State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
- State.AllocateStack(8, 8),
- LocVT, LocInfo));
- return true;
- }
-
- unsigned i;
- for (i = 0; i < 2; ++i)
- if (HiRegList[i] == Reg)
- break;
-
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
- LocVT, LocInfo));
- return true;
-}
-
-static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
- if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
- return false;
- if (LocVT == MVT::v2f64 &&
- !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
- return false;
- return true; // we handled it
-}
-
-static bool f64RetAssign(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo, CCState &State) {
- static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
- static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
-
- unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
- if (Reg == 0)
- return false; // we didn't handle it
-
- unsigned i;
- for (i = 0; i < 2; ++i)
- if (HiRegList[i] == Reg)
- break;
-
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
- LocVT, LocInfo));
- return true;
-}
-
-static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
- if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
- return false;
- if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
- return false;
- return true; // we handled it
-}
-
-static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
- CCValAssign::LocInfo &LocInfo,
- ISD::ArgFlagsTy &ArgFlags,
- CCState &State) {
- return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
- State);
-}
-
-/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
-/// given CallingConvention value.
-CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
- bool Return,
- bool isVarArg) const {
- switch (CC) {
- default:
- llvm_unreachable("Unsupported calling convention");
- case CallingConv::C:
- case CallingConv::Fast:
- // Use target triple & subtarget features to do actual dispatch.
- if (Subtarget->isAAPCS_ABI()) {
- if (Subtarget->hasVFP2() &&
- FloatABIType == FloatABI::Hard && !isVarArg)
- return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
- else
- return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
- } else
- return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
- case CallingConv::ARM_AAPCS_VFP:
- return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
- case CallingConv::ARM_AAPCS:
- return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
- case CallingConv::ARM_APCS:
- return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
- }
-}
-
-/// LowerCallResult - Lower the result values of a call into the
-/// appropriate copies out of appropriate physical registers.
-SDValue
-ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
-
- // Assign locations to each value returned by this call.
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
- RVLocs, *DAG.getContext());
- CCInfo.AnalyzeCallResult(Ins,
- CCAssignFnForNode(CallConv, /* Return*/ true,
- isVarArg));
-
- // Copy all of the result registers out of their specified physreg.
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
- CCValAssign VA = RVLocs[i];
-
- SDValue Val;
- if (VA.needsCustom()) {
- // Handle f64 or half of a v2f64.
- SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
- InFlag);
- Chain = Lo.getValue(1);
- InFlag = Lo.getValue(2);
- VA = RVLocs[++i]; // skip ahead to next loc
- SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
- InFlag);
- Chain = Hi.getValue(1);
- InFlag = Hi.getValue(2);
- Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
-
- if (VA.getLocVT() == MVT::v2f64) {
- SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
- Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
- DAG.getConstant(0, MVT::i32));
-
- VA = RVLocs[++i]; // skip ahead to next loc
- Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
- Chain = Lo.getValue(1);
- InFlag = Lo.getValue(2);
- VA = RVLocs[++i]; // skip ahead to next loc
- Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
- Chain = Hi.getValue(1);
- InFlag = Hi.getValue(2);
- Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
- Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
- DAG.getConstant(1, MVT::i32));
- }
- } else {
- Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
- InFlag);
- Chain = Val.getValue(1);
- InFlag = Val.getValue(2);
- }
-
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::BCvt:
- Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val);
- break;
- }
-
- InVals.push_back(Val);
- }
-
- return Chain;
-}
-
-/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
-/// by "Src" to address "Dst" of size "Size". Alignment information is
-/// specified by the specific parameter attribute. The copy will be passed as
-/// a byval function parameter.
-/// Sometimes what we are copying is the end of a larger object, the part that
-/// does not fit in registers.
-static SDValue
-CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
- ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
- DebugLoc dl) {
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
- return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- /*AlwaysInline=*/false, NULL, 0, NULL, 0);
-}
-
-/// LowerMemOpCallTo - Store the argument to the stack.
-SDValue
-ARMTargetLowering::LowerMemOpCallTo(SDValue Chain,
- SDValue StackPtr, SDValue Arg,
- DebugLoc dl, SelectionDAG &DAG,
- const CCValAssign &VA,
- ISD::ArgFlagsTy Flags) {
- unsigned LocMemOffset = VA.getLocMemOffset();
- SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
- PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
- if (Flags.isByVal()) {
- return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
- }
- return DAG.getStore(Chain, dl, Arg, PtrOff,
- PseudoSourceValue::getStack(), LocMemOffset,
- false, false, 0);
-}
-
-void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
- SDValue Chain, SDValue &Arg,
- RegsToPassVector &RegsToPass,
- CCValAssign &VA, CCValAssign &NextVA,
- SDValue &StackPtr,
- SmallVector<SDValue, 8> &MemOpChains,
- ISD::ArgFlagsTy Flags) {
-
- SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
- DAG.getVTList(MVT::i32, MVT::i32), Arg);
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
-
- if (NextVA.isRegLoc())
- RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1)));
- else {
- assert(NextVA.isMemLoc());
- if (StackPtr.getNode() == 0)
- StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
-
- MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1),
- dl, DAG, NextVA,
- Flags));
- }
-}
-
-/// LowerCall - Lowering a call into a callseq_start <-
-/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
-/// nodes.
-SDValue
-ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
- // ARM target does not yet support tail call optimization.
- isTailCall = false;
-
- // Analyze operands of the call, assigning locations to each operand.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
- *DAG.getContext());
- CCInfo.AnalyzeCallOperands(Outs,
- CCAssignFnForNode(CallConv, /* Return*/ false,
- isVarArg));
-
- // Get a count of how many bytes are to be pushed on the stack.
- unsigned NumBytes = CCInfo.getNextStackOffset();
-
- // Adjust the stack pointer for the new arguments...
- // These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
-
- SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
-
- RegsToPassVector RegsToPass;
- SmallVector<SDValue, 8> MemOpChains;
-
- // Walk the register/memloc assignments, inserting copies/loads. In the case
- // of tail call optimization, arguments are handled later.
- for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
- i != e;
- ++i, ++realArgIdx) {
- CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[realArgIdx].Val;
- ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
-
- // Promote the value if needed.
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::SExt:
- Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::ZExt:
- Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::AExt:
- Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
- break;
- case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
- break;
- }
-
- // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
- if (VA.needsCustom()) {
- if (VA.getLocVT() == MVT::v2f64) {
- SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
- DAG.getConstant(0, MVT::i32));
- SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
- DAG.getConstant(1, MVT::i32));
-
- PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
- VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
-
- VA = ArgLocs[++i]; // skip ahead to next loc
- if (VA.isRegLoc()) {
- PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
- VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
- } else {
- assert(VA.isMemLoc());
-
- MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
- dl, DAG, VA, Flags));
- }
- } else {
- PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
- StackPtr, MemOpChains, Flags);
- }
- } else if (VA.isRegLoc()) {
- RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
- } else {
- assert(VA.isMemLoc());
-
- MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
- dl, DAG, VA, Flags));
- }
- }
-
- if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
-
- // Build a sequence of copy-to-reg nodes chained together with token chain
- // and flag operands which copy the outgoing args into the appropriate regs.
- SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
-
- // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
- // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
- // node so that legalize doesn't hack it.
- bool isDirect = false;
- bool isARMFunc = false;
- bool isLocalARMFunc = false;
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- GlobalValue *GV = G->getGlobal();
- isDirect = true;
- bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
- bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
- getTargetMachine().getRelocationModel() != Reloc::Static;
- isARMFunc = !Subtarget->isThumb() || isStub;
- // ARM call to a local ARM function is predicable.
- isLocalARMFunc = !Subtarget->isThumb() && !isExt;
- // tBX takes a register source operand.
- if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
- ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV,
- ARMPCLabelIndex,
- ARMCP::CPValue, 4);
- SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- Callee = DAG.getLoad(getPointerTy(), dl,
- DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
- getPointerTy(), Callee, PICLabel);
- } else
- Callee = DAG.getTargetGlobalAddress(GV, getPointerTy());
- } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- isDirect = true;
- bool isStub = Subtarget->isTargetDarwin() &&
- getTargetMachine().getRelocationModel() != Reloc::Static;
- isARMFunc = !Subtarget->isThumb() || isStub;
- // tBX takes a register source operand.
- const char *Sym = S->getSymbol();
- if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
- ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
- Sym, ARMPCLabelIndex, 4);
- SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- Callee = DAG.getLoad(getPointerTy(), dl,
- DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
- getPointerTy(), Callee, PICLabel);
- } else
- Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
- }
-
- // FIXME: handle tail calls differently.
- unsigned CallOpc;
- if (Subtarget->isThumb()) {
- if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
- CallOpc = ARMISD::CALL_NOLINK;
- else
- CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL;
- } else {
- CallOpc = (isDirect || Subtarget->hasV5TOps())
- ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
- : ARMISD::CALL_NOLINK;
- }
- if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) {
- // implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
- Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag);
- InFlag = Chain.getValue(1);
- }
-
- std::vector<SDValue> Ops;
- Ops.push_back(Chain);
- Ops.push_back(Callee);
-
- // Add argument registers to the end of the list so that they are known live
- // into the call.
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
- Ops.push_back(DAG.getRegister(RegsToPass[i].first,
- RegsToPass[i].second.getValueType()));
-
- if (InFlag.getNode())
- Ops.push_back(InFlag);
- // Returns a chain and a flag for retval copy to use.
- Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
- &Ops[0], Ops.size());
- InFlag = Chain.getValue(1);
-
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag);
- if (!Ins.empty())
- InFlag = Chain.getValue(1);
-
- // Handle result values, copying them out of physregs into vregs that we
- // return.
- return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins,
- dl, DAG, InVals);
-}
-
-SDValue
-ARMTargetLowering::LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- DebugLoc dl, SelectionDAG &DAG) {
-
- // CCValAssign - represent the assignment of the return value to a location.
- SmallVector<CCValAssign, 16> RVLocs;
-
- // CCState - Info about the registers and stack slots.
- CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
- *DAG.getContext());
-
- // Analyze outgoing return values.
- CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true,
- isVarArg));
-
- // If this is the first return lowered for this function, add
- // the regs to the liveout set for the function.
- if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
- for (unsigned i = 0; i != RVLocs.size(); ++i)
- if (RVLocs[i].isRegLoc())
- DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
- }
-
- SDValue Flag;
-
- // Copy the result values into the output registers.
- for (unsigned i = 0, realRVLocIdx = 0;
- i != RVLocs.size();
- ++i, ++realRVLocIdx) {
- CCValAssign &VA = RVLocs[i];
- assert(VA.isRegLoc() && "Can only return in registers!");
-
- SDValue Arg = Outs[realRVLocIdx].Val;
-
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::BCvt:
- Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
- break;
- }
-
- if (VA.needsCustom()) {
- if (VA.getLocVT() == MVT::v2f64) {
- // Extract the first half and return it in two registers.
- SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
- DAG.getConstant(0, MVT::i32));
- SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
- DAG.getVTList(MVT::i32, MVT::i32), Half);
-
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
- Flag = Chain.getValue(1);
- VA = RVLocs[++i]; // skip ahead to next loc
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- HalfGPRs.getValue(1), Flag);
- Flag = Chain.getValue(1);
- VA = RVLocs[++i]; // skip ahead to next loc
-
- // Extract the 2nd half and fall through to handle it as an f64 value.
- Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
- DAG.getConstant(1, MVT::i32));
- }
- // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
- // available.
- SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
- DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
- Flag = Chain.getValue(1);
- VA = RVLocs[++i]; // skip ahead to next loc
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
- Flag);
- } else
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
-
- // Guarantee that all emitted copies are
- // stuck together, avoiding something bad.
- Flag = Chain.getValue(1);
- }
-
- SDValue result;
- if (Flag.getNode())
- result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
- else // Return Void
- result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
-
- return result;
-}
-
-// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
-// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
-// one of the above mentioned nodes. It has to be wrapped because otherwise
-// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
-// be used to form addressing mode. These wrapped nodes will be selected
-// into MOVi.
-static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
- EVT PtrVT = Op.getValueType();
- // FIXME there is no actual debug info here
- DebugLoc dl = Op.getDebugLoc();
- ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
- SDValue Res;
- if (CP->isMachineConstantPoolEntry())
- Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
- CP->getAlignment());
- else
- Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
- CP->getAlignment());
- return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
-}
-
-SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) {
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = 0;
- DebugLoc DL = Op.getDebugLoc();
- EVT PtrVT = getPointerTy();
- BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
- Reloc::Model RelocM = getTargetMachine().getRelocationModel();
- SDValue CPAddr;
- if (RelocM == Reloc::Static) {
- CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
- } else {
- unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
- ARMPCLabelIndex = AFI->createConstPoolEntryUId();
- ARMConstantPoolValue *CPV = new ARMConstantPoolValue(BA, ARMPCLabelIndex,
- ARMCP::CPBlockAddress,
- PCAdj);
- CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
- }
- CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
- SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- if (RelocM == Reloc::Static)
- return Result;
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
-}
-
-// Lower ISD::GlobalTLSAddress using the "general dynamic" model
-SDValue
-ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
- SelectionDAG &DAG) {
- DebugLoc dl = GA->getDebugLoc();
- EVT PtrVT = getPointerTy();
- unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
- ARMConstantPoolValue *CPV =
- new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
- ARMCP::CPValue, PCAdj, "tlsgd", true);
- SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
- Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
- Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- SDValue Chain = Argument.getValue(1);
-
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
-
- // call __tls_get_addr.
- ArgListTy Args;
- ArgListEntry Entry;
- Entry.Node = Argument;
- Entry.Ty = (const Type *) Type::getInt32Ty(*DAG.getContext());
- Args.push_back(Entry);
- // FIXME: is there useful debug info available here?
- std::pair<SDValue, SDValue> CallResult =
- LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()),
- false, false, false, false,
- 0, CallingConv::C, false, /*isReturnValueUsed=*/true,
- DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl);
- return CallResult.first;
-}
-
-// Lower ISD::GlobalTLSAddress using the "initial exec" or
-// "local exec" model.
-SDValue
-ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
- SelectionDAG &DAG) {
- GlobalValue *GV = GA->getGlobal();
- DebugLoc dl = GA->getDebugLoc();
- SDValue Offset;
- SDValue Chain = DAG.getEntryNode();
- EVT PtrVT = getPointerTy();
- // Get the Thread Pointer
- SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
-
- if (GV->isDeclaration()) {
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
- // Initial exec model.
- unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
- ARMConstantPoolValue *CPV =
- new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex,
- ARMCP::CPValue, PCAdj, "gottpoff", true);
- Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
- Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
- Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- Chain = Offset.getValue(1);
-
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
-
- Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- } else {
- // local exec model
- ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, "tpoff");
- Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
- Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
- Offset = DAG.getLoad(PtrVT, dl, Chain, Offset,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- }
-
- // The address of the thread local variable is the add of the thread
- // pointer with the offset of the variable.
- return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
-}
-
-SDValue
-ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) {
- // TODO: implement the "local dynamic" model
- assert(Subtarget->isTargetELF() &&
- "TLS not implemented for non-ELF targets");
- GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
- // If the relocation model is PIC, use the "General Dynamic" TLS Model,
- // otherwise use the "Local Exec" TLS Model
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
- return LowerToTLSGeneralDynamicModel(GA, DAG);
- else
- return LowerToTLSExecModels(GA, DAG);
-}
-
-SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
- SelectionDAG &DAG) {
- EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
- GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- Reloc::Model RelocM = getTargetMachine().getRelocationModel();
- if (RelocM == Reloc::PIC_) {
- bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
- ARMConstantPoolValue *CPV =
- new ARMConstantPoolValue(GV, UseGOTOFF ? "GOTOFF" : "GOT");
- SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
- CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- SDValue Chain = Result.getValue(1);
- SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
- Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT);
- if (!UseGOTOFF)
- Result = DAG.getLoad(PtrVT, dl, Chain, Result,
- PseudoSourceValue::getGOT(), 0,
- false, false, 0);
- return Result;
- } else {
- // If we have T2 ops, we can materialize the address directly via movt/movw
- // pair. This is always cheaper.
- if (Subtarget->useMovt()) {
- return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
- DAG.getTargetGlobalAddress(GV, PtrVT));
- } else {
- SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- }
- }
-}
-
-SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
- SelectionDAG &DAG) {
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = 0;
- EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
- GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- Reloc::Model RelocM = getTargetMachine().getRelocationModel();
- SDValue CPAddr;
- if (RelocM == Reloc::Static)
- CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
- else {
- ARMPCLabelIndex = AFI->createConstPoolEntryUId();
- unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8);
- ARMConstantPoolValue *CPV =
- new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj);
- CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
- }
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
-
- SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- SDValue Chain = Result.getValue(1);
-
- if (RelocM == Reloc::PIC_) {
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
- }
-
- if (Subtarget->GVIsIndirectSymbol(GV, RelocM))
- Result = DAG.getLoad(PtrVT, dl, Chain, Result,
- PseudoSourceValue::getGOT(), 0,
- false, false, 0);
-
- return Result;
-}
-
-SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
- SelectionDAG &DAG){
- assert(Subtarget->isTargetELF() &&
- "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
- EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
- unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
- ARMConstantPoolValue *CPV = new ARMConstantPoolValue(*DAG.getContext(),
- "_GLOBAL_OFFSET_TABLE_",
- ARMPCLabelIndex, PCAdj);
- SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
-}
-
-SDValue
-ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget) {
- unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- DebugLoc dl = Op.getDebugLoc();
- switch (IntNo) {
- default: return SDValue(); // Don't custom lower most intrinsics.
- case Intrinsic::arm_thread_pointer: {
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
- }
- case Intrinsic::eh_sjlj_lsda: {
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
- EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
- Reloc::Model RelocM = getTargetMachine().getRelocationModel();
- SDValue CPAddr;
- unsigned PCAdj = (RelocM != Reloc::PIC_)
- ? 0 : (Subtarget->isThumb() ? 4 : 8);
- ARMConstantPoolValue *CPV =
- new ARMConstantPoolValue(MF.getFunction(), ARMPCLabelIndex,
- ARMCP::CPLSDA, PCAdj);
- CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
- CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- SDValue Result =
- DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
- PseudoSourceValue::getConstantPool(), 0,
- false, false, 0);
- SDValue Chain = Result.getValue(1);
-
- if (RelocM == Reloc::PIC_) {
- SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
- Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
- }
- return Result;
- }
- case Intrinsic::eh_sjlj_setjmp:
- SDValue Val = Subtarget->isThumb() ?
- DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::SP, MVT::i32) :
- DAG.getConstant(0, MVT::i32);
- return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(1),
- Val);
- }
-}
-
-static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget) {
- DebugLoc dl = Op.getDebugLoc();
- SDValue Op5 = Op.getOperand(5);
- SDValue Res;
- unsigned isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue();
- if (isDeviceBarrier) {
- if (Subtarget->hasV7Ops())
- Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0));
- else
- Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- } else {
- if (Subtarget->hasV7Ops())
- Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
- else
- Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- }
- return Res;
-}
-
-static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
- unsigned VarArgsFrameIndex) {
- // vastart just stores the address of the VarArgsFrameIndex slot into the
- // memory location argument.
- DebugLoc dl = Op.getDebugLoc();
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
- const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
- return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0,
- false, false, 0);
-}
-
-SDValue
-ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
- SDNode *Node = Op.getNode();
- DebugLoc dl = Node->getDebugLoc();
- EVT VT = Node->getValueType(0);
- SDValue Chain = Op.getOperand(0);
- SDValue Size = Op.getOperand(1);
- SDValue Align = Op.getOperand(2);
-
- // Chain the dynamic stack allocation so that it doesn't modify the stack
- // pointer when other instructions are using the stack.
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
-
- unsigned AlignVal = cast<ConstantSDNode>(Align)->getZExtValue();
- unsigned StackAlign = getTargetMachine().getFrameInfo()->getStackAlignment();
- if (AlignVal > StackAlign)
- // Do this now since selection pass cannot introduce new target
- // independent node.
- Align = DAG.getConstant(-(uint64_t)AlignVal, VT);
-
- // In Thumb1 mode, there isn't a "sub r, sp, r" instruction, we will end up
- // using a "add r, sp, r" instead. Negate the size now so we don't have to
- // do even more horrible hack later.
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- if (AFI->isThumb1OnlyFunction()) {
- bool Negate = true;
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Size);
- if (C) {
- uint32_t Val = C->getZExtValue();
- if (Val <= 508 && ((Val & 3) == 0))
- Negate = false;
- }
- if (Negate)
- Size = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, VT), Size);
- }
-
- SDVTList VTList = DAG.getVTList(VT, MVT::Other);
- SDValue Ops1[] = { Chain, Size, Align };
- SDValue Res = DAG.getNode(ARMISD::DYN_ALLOC, dl, VTList, Ops1, 3);
- Chain = Res.getValue(1);
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
- DAG.getIntPtrConstant(0, true), SDValue());
- SDValue Ops2[] = { Res, Chain };
- return DAG.getMergeValues(Ops2, 2, dl);
-}
-
-SDValue
-ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
- SDValue &Root, SelectionDAG &DAG,
- DebugLoc dl) {
- MachineFunction &MF = DAG.getMachineFunction();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
-
- TargetRegisterClass *RC;
- if (AFI->isThumb1OnlyFunction())
- RC = ARM::tGPRRegisterClass;
- else
- RC = ARM::GPRRegisterClass;
-
- // Transform the arguments stored in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
- SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
-
- SDValue ArgValue2;
- if (NextVA.isMemLoc()) {
- unsigned ArgSize = NextVA.getLocVT().getSizeInBits()/8;
- MachineFrameInfo *MFI = MF.getFrameInfo();
- int FI = MFI->CreateFixedObject(ArgSize, NextVA.getLocMemOffset(),
- true, false);
-
- // Create load node to retrieve arguments from the stack.
- SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN,
- PseudoSourceValue::getFixedStack(FI), 0,
- false, false, 0);
- } else {
- Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
- ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
- }
-
- return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
-}
-
-SDValue
-ARMTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg>
- &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
-
- MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
-
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
-
- // Assign locations to all of the incoming arguments.
- SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
- *DAG.getContext());
- CCInfo.AnalyzeFormalArguments(Ins,
- CCAssignFnForNode(CallConv, /* Return*/ false,
- isVarArg));
-
- SmallVector<SDValue, 16> ArgValues;
-
- for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- CCValAssign &VA = ArgLocs[i];
-
- // Arguments stored in registers.
- if (VA.isRegLoc()) {
- EVT RegVT = VA.getLocVT();
-
- SDValue ArgValue;
- if (VA.needsCustom()) {
- // f64 and vector types are split up into multiple registers or
- // combinations of registers and stack slots.
- RegVT = MVT::i32;
-
- if (VA.getLocVT() == MVT::v2f64) {
- SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
- Chain, DAG, dl);
- VA = ArgLocs[++i]; // skip ahead to next loc
- SDValue ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
- Chain, DAG, dl);
- ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
- ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
- ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
- ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
- ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
- } else
- ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
-
- } else {
- TargetRegisterClass *RC;
-
- if (RegVT == MVT::f32)
- RC = ARM::SPRRegisterClass;
- else if (RegVT == MVT::f64)
- RC = ARM::DPRRegisterClass;
- else if (RegVT == MVT::v2f64)
- RC = ARM::QPRRegisterClass;
- else if (RegVT == MVT::i32)
- RC = (AFI->isThumb1OnlyFunction() ?
- ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
- else
- llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
-
- // Transform the arguments in physical registers into virtual ones.
- unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
- ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
- }
-
- // If this is an 8 or 16-bit value, it is really passed promoted
- // to 32 bits. Insert an assert[sz]ext to capture this, then
- // truncate to the right size.
- switch (VA.getLocInfo()) {
- default: llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full: break;
- case CCValAssign::BCvt:
- ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
- break;
- case CCValAssign::SExt:
- ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
- ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
- break;
- case CCValAssign::ZExt:
- ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
- DAG.getValueType(VA.getValVT()));
- ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
- break;
- }
-
- InVals.push_back(ArgValue);
-
- } else { // VA.isRegLoc()
-
- // sanity check
- assert(VA.isMemLoc());
- assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
-
- unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
- int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
- true, false);
-
- // Create load nodes to retrieve arguments from the stack.
- SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
- InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
- PseudoSourceValue::getFixedStack(FI), 0,
- false, false, 0));
- }
- }
-
- // varargs
- if (isVarArg) {
- static const unsigned GPRArgRegs[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3
- };
-
- unsigned NumGPRs = CCInfo.getFirstUnallocated
- (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
-
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
- unsigned VARegSize = (4 - NumGPRs) * 4;
- unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
- unsigned ArgOffset = CCInfo.getNextStackOffset();
- if (VARegSaveSize) {
- // If this function is vararg, store any remaining integer argument regs
- // to their spots on the stack so that they may be loaded by deferencing
- // the result of va_next.
- AFI->setVarArgsRegSaveSize(VARegSaveSize);
- VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset +
- VARegSaveSize - VARegSize,
- true, false);
- SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
-
- SmallVector<SDValue, 4> MemOps;
- for (; NumGPRs < 4; ++NumGPRs) {
- TargetRegisterClass *RC;
- if (AFI->isThumb1OnlyFunction())
- RC = ARM::tGPRRegisterClass;
- else
- RC = ARM::GPRRegisterClass;
-
- unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
- SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
- SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
- PseudoSourceValue::getFixedStack(VarArgsFrameIndex), 0,
- false, false, 0);
- MemOps.push_back(Store);
- FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
- DAG.getConstant(4, getPointerTy()));
- }
- if (!MemOps.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &MemOps[0], MemOps.size());
- } else
- // This will point to the next argument passed via stack.
- VarArgsFrameIndex = MFI->CreateFixedObject(4, ArgOffset, true, false);
- }
-
- return Chain;
-}
-
-/// isFloatingPointZero - Return true if this is +0.0.
-static bool isFloatingPointZero(SDValue Op) {
- if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
- return CFP->getValueAPF().isPosZero();
- else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
- // Maybe this has already been legalized into the constant pool?
- if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
- SDValue WrapperOp = Op.getOperand(1).getOperand(0);
- if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
- return CFP->getValueAPF().isPosZero();
- }
- }
- return false;
-}
-
-/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
-/// the given operands.
-SDValue
-ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &ARMCC, SelectionDAG &DAG, DebugLoc dl) {
- if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
- unsigned C = RHSC->getZExtValue();
- if (!isLegalICmpImmediate(C)) {
- // Constant does not fit, try adjusting it by one?
- switch (CC) {
- default: break;
- case ISD::SETLT:
- case ISD::SETGE:
- if (isLegalICmpImmediate(C-1)) {
- CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
- RHS = DAG.getConstant(C-1, MVT::i32);
- }
- break;
- case ISD::SETULT:
- case ISD::SETUGE:
- if (C > 0 && isLegalICmpImmediate(C-1)) {
- CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
- RHS = DAG.getConstant(C-1, MVT::i32);
- }
- break;
- case ISD::SETLE:
- case ISD::SETGT:
- if (isLegalICmpImmediate(C+1)) {
- CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
- RHS = DAG.getConstant(C+1, MVT::i32);
- }
- break;
- case ISD::SETULE:
- case ISD::SETUGT:
- if (C < 0xffffffff && isLegalICmpImmediate(C+1)) {
- CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
- RHS = DAG.getConstant(C+1, MVT::i32);
- }
- break;
- }
- }
- }
-
- ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
- ARMISD::NodeType CompareType;
- switch (CondCode) {
- default:
- CompareType = ARMISD::CMP;
- break;
- case ARMCC::EQ:
- case ARMCC::NE:
- // Uses only Z Flag
- CompareType = ARMISD::CMPZ;
- break;
- }
- ARMCC = DAG.getConstant(CondCode, MVT::i32);
- return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS);
-}
-
-/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
-static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
- DebugLoc dl) {
- SDValue Cmp;
- if (!isFloatingPointZero(RHS))
- Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS);
- else
- Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS);
- return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp);
-}
-
-SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
- SDValue LHS = Op.getOperand(0);
- SDValue RHS = Op.getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
- SDValue TrueVal = Op.getOperand(2);
- SDValue FalseVal = Op.getOperand(3);
- DebugLoc dl = Op.getDebugLoc();
-
- if (LHS.getValueType() == MVT::i32) {
- SDValue ARMCC;
- SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
- return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp);
- }
-
- ARMCC::CondCodes CondCode, CondCode2;
- FPCCToARMCC(CC, CondCode, CondCode2);
-
- SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
- SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
- SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
- ARMCC, CCR, Cmp);
- if (CondCode2 != ARMCC::AL) {
- SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32);
- // FIXME: Needs another CMP because flag can have but one use.
- SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
- Result = DAG.getNode(ARMISD::CMOV, dl, VT,
- Result, TrueVal, ARMCC2, CCR, Cmp2);
- }
- return Result;
-}
-
-SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
- SDValue Chain = Op.getOperand(0);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
- SDValue LHS = Op.getOperand(2);
- SDValue RHS = Op.getOperand(3);
- SDValue Dest = Op.getOperand(4);
- DebugLoc dl = Op.getDebugLoc();
-
- if (LHS.getValueType() == MVT::i32) {
- SDValue ARMCC;
- SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
- return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
- Chain, Dest, ARMCC, CCR,Cmp);
- }
-
- assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
- ARMCC::CondCodes CondCode, CondCode2;
- FPCCToARMCC(CC, CondCode, CondCode2);
-
- SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
- SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
- SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp };
- SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
- if (CondCode2 != ARMCC::AL) {
- ARMCC = DAG.getConstant(CondCode2, MVT::i32);
- SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) };
- Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
- }
- return Res;
-}
-
-SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) {
- SDValue Chain = Op.getOperand(0);
- SDValue Table = Op.getOperand(1);
- SDValue Index = Op.getOperand(2);
- DebugLoc dl = Op.getDebugLoc();
-
- EVT PTy = getPointerTy();
- JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
- ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
- SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
- SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
- Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
- Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
- SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
- if (Subtarget->isThumb2()) {
- // Thumb2 uses a two-level jump. That is, it jumps into the jump table
- // which does another jump to the destination. This also makes it easier
- // to translate it to TBB / TBH later.
- // FIXME: This might not work if the function is extremely large.
- return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
- Addr, Op.getOperand(2), JTI, UId);
- }
- if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
- Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
- PseudoSourceValue::getJumpTable(), 0,
- false, false, 0);
- Chain = Addr.getValue(1);
- Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
- return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
- } else {
- Addr = DAG.getLoad(PTy, dl, Chain, Addr,
- PseudoSourceValue::getJumpTable(), 0, false, false, 0);
- Chain = Addr.getValue(1);
- return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
- }
-}
-
-static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
- DebugLoc dl = Op.getDebugLoc();
- unsigned Opc =
- Op.getOpcode() == ISD::FP_TO_SINT ? ARMISD::FTOSI : ARMISD::FTOUI;
- Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
-}
-
-static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
- unsigned Opc =
- Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF;
-
- Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
- return DAG.getNode(Opc, dl, VT, Op);
-}
-
-static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
- // Implement fcopysign with a fabs and a conditional fneg.
- SDValue Tmp0 = Op.getOperand(0);
- SDValue Tmp1 = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
- EVT VT = Op.getValueType();
- EVT SrcVT = Tmp1.getValueType();
- SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
- SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG, dl);
- SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32);
- SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp);
-}
-
-SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
- MFI->setFrameAddressIsTaken(true);
- EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
- unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
- ? ARM::R7 : ARM::R11;
- SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
- while (Depth--)
- FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, NULL, 0,
- false, false, 0);
- return FrameAddr;
-}
-
-SDValue
-ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff){
- // Do repeated 4-byte loads and stores. To be improved.
- // This requires 4-byte alignment.
- if ((Align & 3) != 0)
- return SDValue();
- // This requires the copy size to be a constant, preferrably
- // within a subtarget-specific limit.
- ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
- if (!ConstantSize)
- return SDValue();
- uint64_t SizeVal = ConstantSize->getZExtValue();
- if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold())
- return SDValue();
-
- unsigned BytesLeft = SizeVal & 3;
- unsigned NumMemOps = SizeVal >> 2;
- unsigned EmittedNumMemOps = 0;
- EVT VT = MVT::i32;
- unsigned VTSize = 4;
- unsigned i = 0;
- const unsigned MAX_LOADS_IN_LDM = 6;
- SDValue TFOps[MAX_LOADS_IN_LDM];
- SDValue Loads[MAX_LOADS_IN_LDM];
- uint64_t SrcOff = 0, DstOff = 0;
-
- // Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the
- // same number of stores. The loads and stores will get combined into
- // ldm/stm later on.
- while (EmittedNumMemOps < NumMemOps) {
- for (i = 0;
- i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
- Loads[i] = DAG.getLoad(VT, dl, Chain,
- DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
- DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcSVOff + SrcOff, false, false, 0);
- TFOps[i] = Loads[i].getValue(1);
- SrcOff += VTSize;
- }
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
-
- for (i = 0;
- i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
- TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
- DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
- DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstSVOff + DstOff, false, false, 0);
- DstOff += VTSize;
- }
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
-
- EmittedNumMemOps += i;
- }
-
- if (BytesLeft == 0)
- return Chain;
-
- // Issue loads / stores for the trailing (1 - 3) bytes.
- unsigned BytesLeftSave = BytesLeft;
- i = 0;
- while (BytesLeft) {
- if (BytesLeft >= 2) {
- VT = MVT::i16;
- VTSize = 2;
- } else {
- VT = MVT::i8;
- VTSize = 1;
- }
-
- Loads[i] = DAG.getLoad(VT, dl, Chain,
- DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
- DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcSVOff + SrcOff, false, false, 0);
- TFOps[i] = Loads[i].getValue(1);
- ++i;
- SrcOff += VTSize;
- BytesLeft -= VTSize;
- }
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
-
- i = 0;
- BytesLeft = BytesLeftSave;
- while (BytesLeft) {
- if (BytesLeft >= 2) {
- VT = MVT::i16;
- VTSize = 2;
- } else {
- VT = MVT::i8;
- VTSize = 1;
- }
-
- TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
- DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
- DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstSVOff + DstOff, false, false, 0);
- ++i;
- DstOff += VTSize;
- BytesLeft -= VTSize;
- }
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
-}
-
-static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
- SDValue Op = N->getOperand(0);
- DebugLoc dl = N->getDebugLoc();
- if (N->getValueType(0) == MVT::f64) {
- // Turn i64->f64 into VMOVDRR.
- SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
- DAG.getConstant(0, MVT::i32));
- SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
- DAG.getConstant(1, MVT::i32));
- return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
- }
-
- // Turn f64->i64 into VMOVRRD.
- SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
- DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
-
- // Merge the pieces into a single i64 value.
- return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
-}
-
-/// getZeroVector - Returns a vector of specified type with all zero elements.
-///
-static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
- assert(VT.isVector() && "Expected a vector type");
-
- // Zero vectors are used to represent vector negation and in those cases
- // will be implemented with the NEON VNEG instruction. However, VNEG does
- // not support i64 elements, so sometimes the zero vectors will need to be
- // explicitly constructed. For those cases, and potentially other uses in
- // the future, always build zero vectors as <16 x i8> or <8 x i8> bitcasted
- // to their dest type. This ensures they get CSE'd.
- SDValue Vec;
- SDValue Cst = DAG.getTargetConstant(0, MVT::i8);
- SmallVector<SDValue, 8> Ops;
- MVT TVT;
-
- if (VT.getSizeInBits() == 64) {
- Ops.assign(8, Cst); TVT = MVT::v8i8;
- } else {
- Ops.assign(16, Cst); TVT = MVT::v16i8;
- }
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
-
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
-}
-
-/// getOnesVector - Returns a vector of specified type with all bits set.
-///
-static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
- assert(VT.isVector() && "Expected a vector type");
-
- // Always build ones vectors as <16 x i8> or <8 x i8> bitcasted to their
- // dest type. This ensures they get CSE'd.
- SDValue Vec;
- SDValue Cst = DAG.getTargetConstant(0xFF, MVT::i8);
- SmallVector<SDValue, 8> Ops;
- MVT TVT;
-
- if (VT.getSizeInBits() == 64) {
- Ops.assign(8, Cst); TVT = MVT::v8i8;
- } else {
- Ops.assign(16, Cst); TVT = MVT::v16i8;
- }
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
-
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
-}
-
-/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
-/// i32 values and take a 2 x i32 value to shift plus a shift amount.
-SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- EVT VT = Op.getValueType();
- unsigned VTBits = VT.getSizeInBits();
- DebugLoc dl = Op.getDebugLoc();
- SDValue ShOpLo = Op.getOperand(0);
- SDValue ShOpHi = Op.getOperand(1);
- SDValue ShAmt = Op.getOperand(2);
- SDValue ARMCC;
- unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
-
- assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
-
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, MVT::i32), ShAmt);
- SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, MVT::i32));
- SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
- SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
-
- SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
- ARMCC, DAG, dl);
- SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
- SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC,
- CCR, Cmp);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, 2, dl);
-}
-
-/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
-/// i32 values and take a 2 x i32 value to shift plus a shift amount.
-SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- EVT VT = Op.getValueType();
- unsigned VTBits = VT.getSizeInBits();
- DebugLoc dl = Op.getDebugLoc();
- SDValue ShOpLo = Op.getOperand(0);
- SDValue ShOpHi = Op.getOperand(1);
- SDValue ShAmt = Op.getOperand(2);
- SDValue ARMCC;
-
- assert(Op.getOpcode() == ISD::SHL_PARTS);
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
- DAG.getConstant(VTBits, MVT::i32), ShAmt);
- SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
- DAG.getConstant(VTBits, MVT::i32));
- SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
- SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
-
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
- SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
- ARMCC, DAG, dl);
- SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
- SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMCC,
- CCR, Cmp);
-
- SDValue Ops[2] = { Lo, Hi };
- return DAG.getMergeValues(Ops, 2, dl);
-}
-
-static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
- const ARMSubtarget *ST) {
- EVT VT = N->getValueType(0);
- DebugLoc dl = N->getDebugLoc();
-
- if (!ST->hasV6T2Ops())
- return SDValue();
-
- SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0));
- return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
-}
-
-static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
- const ARMSubtarget *ST) {
- EVT VT = N->getValueType(0);
- DebugLoc dl = N->getDebugLoc();
-
- // Lower vector shifts on NEON to use VSHL.
- if (VT.isVector()) {
- assert(ST->hasNEON() && "unexpected vector shift");
-
- // Left shifts translate directly to the vshiftu intrinsic.
- if (N->getOpcode() == ISD::SHL)
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
- N->getOperand(0), N->getOperand(1));
-
- assert((N->getOpcode() == ISD::SRA ||
- N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
-
- // NEON uses the same intrinsics for both left and right shifts. For
- // right shifts, the shift amounts are negative, so negate the vector of
- // shift amounts.
- EVT ShiftVT = N->getOperand(1).getValueType();
- SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
- getZeroVector(ShiftVT, DAG, dl),
- N->getOperand(1));
- Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
- Intrinsic::arm_neon_vshifts :
- Intrinsic::arm_neon_vshiftu);
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
- DAG.getConstant(vshiftInt, MVT::i32),
- N->getOperand(0), NegatedCount);
- }
-
- // We can get here for a node like i32 = ISD::SHL i32, i64
- if (VT != MVT::i64)
- return SDValue();
-
- assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
- "Unknown shift to lower!");
-
- // We only lower SRA, SRL of 1 here, all others use generic lowering.
- if (!isa<ConstantSDNode>(N->getOperand(1)) ||
- cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
- return SDValue();
-
- // If we are in thumb mode, we don't have RRX.
- if (ST->isThumb1Only()) return SDValue();
-
- // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
- SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
- DAG.getConstant(0, MVT::i32));
- SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
- DAG.getConstant(1, MVT::i32));
-
- // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
- // captures the result into a carry flag.
- unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
- Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
-
- // The low part is an ARMISD::RRX operand, which shifts the carry in.
- Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
-
- // Merge the pieces into a single i64 value.
- return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
-}
-
-static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
- SDValue TmpOp0, TmpOp1;
- bool Invert = false;
- bool Swap = false;
- unsigned Opc = 0;
-
- SDValue Op0 = Op.getOperand(0);
- SDValue Op1 = Op.getOperand(1);
- SDValue CC = Op.getOperand(2);
- EVT VT = Op.getValueType();
- ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
- DebugLoc dl = Op.getDebugLoc();
-
- if (Op.getOperand(1).getValueType().isFloatingPoint()) {
- switch (SetCCOpcode) {
- default: llvm_unreachable("Illegal FP comparison"); break;
- case ISD::SETUNE:
- case ISD::SETNE: Invert = true; // Fallthrough
- case ISD::SETOEQ:
- case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
- case ISD::SETOLT:
- case ISD::SETLT: Swap = true; // Fallthrough
- case ISD::SETOGT:
- case ISD::SETGT: Opc = ARMISD::VCGT; break;
- case ISD::SETOLE:
- case ISD::SETLE: Swap = true; // Fallthrough
- case ISD::SETOGE:
- case ISD::SETGE: Opc = ARMISD::VCGE; break;
- case ISD::SETUGE: Swap = true; // Fallthrough
- case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
- case ISD::SETUGT: Swap = true; // Fallthrough
- case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
- case ISD::SETUEQ: Invert = true; // Fallthrough
- case ISD::SETONE:
- // Expand this to (OLT | OGT).
- TmpOp0 = Op0;
- TmpOp1 = Op1;
- Opc = ISD::OR;
- Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
- Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
- break;
- case ISD::SETUO: Invert = true; // Fallthrough
- case ISD::SETO:
- // Expand this to (OLT | OGE).
- TmpOp0 = Op0;
- TmpOp1 = Op1;
- Opc = ISD::OR;
- Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
- Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
- break;
- }
- } else {
- // Integer comparisons.
- switch (SetCCOpcode) {
- default: llvm_unreachable("Illegal integer comparison"); break;
- case ISD::SETNE: Invert = true;
- case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
- case ISD::SETLT: Swap = true;
- case ISD::SETGT: Opc = ARMISD::VCGT; break;
- case ISD::SETLE: Swap = true;
- case ISD::SETGE: Opc = ARMISD::VCGE; break;
- case ISD::SETULT: Swap = true;
- case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
- case ISD::SETULE: Swap = true;
- case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
- }
-
- // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
- if (Opc == ARMISD::VCEQ) {
-
- SDValue AndOp;
- if (ISD::isBuildVectorAllZeros(Op1.getNode()))
- AndOp = Op0;
- else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
- AndOp = Op1;
-
- // Ignore bitconvert.
- if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT)
- AndOp = AndOp.getOperand(0);
-
- if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
- Opc = ARMISD::VTST;
- Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0));
- Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1));
- Invert = !Invert;
- }
- }
- }
-
- if (Swap)
- std::swap(Op0, Op1);
-
- SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
-
- if (Invert)
- Result = DAG.getNOT(dl, Result, VT);
-
- return Result;
-}
-
-/// isVMOVSplat - Check if the specified splat value corresponds to an immediate
-/// VMOV instruction, and if so, return the constant being splatted.
-static SDValue isVMOVSplat(uint64_t SplatBits, uint64_t SplatUndef,
- unsigned SplatBitSize, SelectionDAG &DAG) {
- switch (SplatBitSize) {
- case 8:
- // Any 1-byte value is OK.
- assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
- return DAG.getTargetConstant(SplatBits, MVT::i8);
-
- case 16:
- // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
- if ((SplatBits & ~0xff) == 0 ||
- (SplatBits & ~0xff00) == 0)
- return DAG.getTargetConstant(SplatBits, MVT::i16);
- break;
-
- case 32:
- // NEON's 32-bit VMOV supports splat values where:
- // * only one byte is nonzero, or
- // * the least significant byte is 0xff and the second byte is nonzero, or
- // * the least significant 2 bytes are 0xff and the third is nonzero.
- if ((SplatBits & ~0xff) == 0 ||
- (SplatBits & ~0xff00) == 0 ||
- (SplatBits & ~0xff0000) == 0 ||
- (SplatBits & ~0xff000000) == 0)
- return DAG.getTargetConstant(SplatBits, MVT::i32);
-
- if ((SplatBits & ~0xffff) == 0 &&
- ((SplatBits | SplatUndef) & 0xff) == 0xff)
- return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32);
-
- if ((SplatBits & ~0xffffff) == 0 &&
- ((SplatBits | SplatUndef) & 0xffff) == 0xffff)
- return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32);
-
- // Note: there are a few 32-bit splat values (specifically: 00ffff00,
- // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
- // VMOV.I32. A (very) minor optimization would be to replicate the value
- // and fall through here to test for a valid 64-bit splat. But, then the
- // caller would also need to check and handle the change in size.
- break;
-
- case 64: {
- // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
- uint64_t BitMask = 0xff;
- uint64_t Val = 0;
- for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
- if (((SplatBits | SplatUndef) & BitMask) == BitMask)
- Val |= BitMask;
- else if ((SplatBits & BitMask) != 0)
- return SDValue();
- BitMask <<= 8;
- }
- return DAG.getTargetConstant(Val, MVT::i64);
- }
-
- default:
- llvm_unreachable("unexpected size for isVMOVSplat");
- break;
- }
-
- return SDValue();
-}
-
-/// getVMOVImm - If this is a build_vector of constants which can be
-/// formed by using a VMOV instruction of the specified element size,
-/// return the constant being splatted. The ByteSize field indicates the
-/// number of bytes of each element [1248].
-SDValue ARM::getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
- BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
- APInt SplatBits, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
- HasAnyUndefs, ByteSize * 8))
- return SDValue();
-
- if (SplatBitSize > ByteSize * 8)
- return SDValue();
-
- return isVMOVSplat(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
- SplatBitSize, DAG);
-}
-
-static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
- bool &ReverseVEXT, unsigned &Imm) {
- unsigned NumElts = VT.getVectorNumElements();
- ReverseVEXT = false;
- Imm = M[0];
-
- // If this is a VEXT shuffle, the immediate value is the index of the first
- // element. The other shuffle indices must be the successive elements after
- // the first one.
- unsigned ExpectedElt = Imm;
- for (unsigned i = 1; i < NumElts; ++i) {
- // Increment the expected index. If it wraps around, it may still be
- // a VEXT but the source vectors must be swapped.
- ExpectedElt += 1;
- if (ExpectedElt == NumElts * 2) {
- ExpectedElt = 0;
- ReverseVEXT = true;
- }
-
- if (ExpectedElt != static_cast<unsigned>(M[i]))
- return false;
- }
-
- // Adjust the index value if the source operands will be swapped.
- if (ReverseVEXT)
- Imm -= NumElts;
-
- return true;
-}
-
-/// isVREVMask - Check if a vector shuffle corresponds to a VREV
-/// instruction with the specified blocksize. (The order of the elements
-/// within each block of the vector is reversed.)
-static bool isVREVMask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned BlockSize) {
- assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
- "Only possible block sizes for VREV are: 16, 32, 64");
-
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
- if (EltSz == 64)
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
- unsigned BlockElts = M[0] + 1;
-
- if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
- return false;
-
- for (unsigned i = 0; i < NumElts; ++i) {
- if ((unsigned) M[i] !=
- (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
- return false;
- }
-
- return true;
-}
-
-static bool isVTRNMask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
- if (EltSz == 64)
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
- WhichResult = (M[0] == 0 ? 0 : 1);
- for (unsigned i = 0; i < NumElts; i += 2) {
- if ((unsigned) M[i] != i + WhichResult ||
- (unsigned) M[i+1] != i + NumElts + WhichResult)
- return false;
- }
- return true;
-}
-
-/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
-/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
-/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
-static bool isVTRN_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
- if (EltSz == 64)
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
- WhichResult = (M[0] == 0 ? 0 : 1);
- for (unsigned i = 0; i < NumElts; i += 2) {
- if ((unsigned) M[i] != i + WhichResult ||
- (unsigned) M[i+1] != i + WhichResult)
- return false;
- }
- return true;
-}
-
-static bool isVUZPMask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
- if (EltSz == 64)
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
- WhichResult = (M[0] == 0 ? 0 : 1);
- for (unsigned i = 0; i != NumElts; ++i) {
- if ((unsigned) M[i] != 2 * i + WhichResult)
- return false;
- }
-
- // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
- if (VT.is64BitVector() && EltSz == 32)
- return false;
-
- return true;
-}
-
-/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
-/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
-/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
-static bool isVUZP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
- if (EltSz == 64)
- return false;
-
- unsigned Half = VT.getVectorNumElements() / 2;
- WhichResult = (M[0] == 0 ? 0 : 1);
- for (unsigned j = 0; j != 2; ++j) {
- unsigned Idx = WhichResult;
- for (unsigned i = 0; i != Half; ++i) {
- if ((unsigned) M[i + j * Half] != Idx)
- return false;
- Idx += 2;
- }
- }
-
- // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
- if (VT.is64BitVector() && EltSz == 32)
- return false;
-
- return true;
-}
-
-static bool isVZIPMask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
- if (EltSz == 64)
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
- WhichResult = (M[0] == 0 ? 0 : 1);
- unsigned Idx = WhichResult * NumElts / 2;
- for (unsigned i = 0; i != NumElts; i += 2) {
- if ((unsigned) M[i] != Idx ||
- (unsigned) M[i+1] != Idx + NumElts)
- return false;
- Idx += 1;
- }
-
- // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
- if (VT.is64BitVector() && EltSz == 32)
- return false;
-
- return true;
-}
-
-/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
-/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
-/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
-static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
- unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
- if (EltSz == 64)
- return false;
-
- unsigned NumElts = VT.getVectorNumElements();
- WhichResult = (M[0] == 0 ? 0 : 1);
- unsigned Idx = WhichResult * NumElts / 2;
- for (unsigned i = 0; i != NumElts; i += 2) {
- if ((unsigned) M[i] != Idx ||
- (unsigned) M[i+1] != Idx)
- return false;
- Idx += 1;
- }
-
- // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
- if (VT.is64BitVector() && EltSz == 32)
- return false;
-
- return true;
-}
-
-
-static SDValue BuildSplat(SDValue Val, EVT VT, SelectionDAG &DAG, DebugLoc dl) {
- // Canonicalize all-zeros and all-ones vectors.
- ConstantSDNode *ConstVal = cast<ConstantSDNode>(Val.getNode());
- if (ConstVal->isNullValue())
- return getZeroVector(VT, DAG, dl);
- if (ConstVal->isAllOnesValue())
- return getOnesVector(VT, DAG, dl);
-
- EVT CanonicalVT;
- if (VT.is64BitVector()) {
- switch (Val.getValueType().getSizeInBits()) {
- case 8: CanonicalVT = MVT::v8i8; break;
- case 16: CanonicalVT = MVT::v4i16; break;
- case 32: CanonicalVT = MVT::v2i32; break;
- case 64: CanonicalVT = MVT::v1i64; break;
- default: llvm_unreachable("unexpected splat element type"); break;
- }
- } else {
- assert(VT.is128BitVector() && "unknown splat vector size");
- switch (Val.getValueType().getSizeInBits()) {
- case 8: CanonicalVT = MVT::v16i8; break;
- case 16: CanonicalVT = MVT::v8i16; break;
- case 32: CanonicalVT = MVT::v4i32; break;
- case 64: CanonicalVT = MVT::v2i64; break;
- default: llvm_unreachable("unexpected splat element type"); break;
- }
- }
-
- // Build a canonical splat for this value.
- SmallVector<SDValue, 8> Ops;
- Ops.assign(CanonicalVT.getVectorNumElements(), Val);
- SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, &Ops[0],
- Ops.size());
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Res);
-}
-
-// If this is a case we can't handle, return null and let the default
-// expansion code take care of it.
-static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
- BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
- DebugLoc dl = Op.getDebugLoc();
- EVT VT = Op.getValueType();
-
- APInt SplatBits, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
- if (SplatBitSize <= 64) {
- SDValue Val = isVMOVSplat(SplatBits.getZExtValue(),
- SplatUndef.getZExtValue(), SplatBitSize, DAG);
- if (Val.getNode())
- return BuildSplat(Val, VT, DAG, dl);
- }
- }
-
- // If there are only 2 elements in a 128-bit vector, insert them into an
- // undef vector. This handles the common case for 128-bit vector argument
- // passing, where the insertions should be translated to subreg accesses
- // with no real instructions.
- if (VT.is128BitVector() && Op.getNumOperands() == 2) {
- SDValue Val = DAG.getUNDEF(VT);
- SDValue Op0 = Op.getOperand(0);
- SDValue Op1 = Op.getOperand(1);
- if (Op0.getOpcode() != ISD::UNDEF)
- Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op0,
- DAG.getIntPtrConstant(0));
- if (Op1.getOpcode() != ISD::UNDEF)
- Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, Op1,
- DAG.getIntPtrConstant(1));
- return Val;
- }
-
- return SDValue();
-}
-
-/// isShuffleMaskLegal - Targets can use this to indicate that they only
-/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
-/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
-/// are assumed to be legal.
-bool
-ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
- EVT VT) const {
- if (VT.getVectorNumElements() == 4 &&
- (VT.is128BitVector() || VT.is64BitVector())) {
- unsigned PFIndexes[4];
- for (unsigned i = 0; i != 4; ++i) {
- if (M[i] < 0)
- PFIndexes[i] = 8;
- else
- PFIndexes[i] = M[i];
- }
-
- // Compute the index in the perfect shuffle table.
- unsigned PFTableIndex =
- PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
- unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
- unsigned Cost = (PFEntry >> 30);
-
- if (Cost <= 4)
- return true;
- }
-
- bool ReverseVEXT;
- unsigned Imm, WhichResult;
-
- return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
- isVREVMask(M, VT, 64) ||
- isVREVMask(M, VT, 32) ||
- isVREVMask(M, VT, 16) ||
- isVEXTMask(M, VT, ReverseVEXT, Imm) ||
- isVTRNMask(M, VT, WhichResult) ||
- isVUZPMask(M, VT, WhichResult) ||
- isVZIPMask(M, VT, WhichResult) ||
- isVTRN_v_undef_Mask(M, VT, WhichResult) ||
- isVUZP_v_undef_Mask(M, VT, WhichResult) ||
- isVZIP_v_undef_Mask(M, VT, WhichResult));
-}
-
-/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
-/// the specified operations to build the shuffle.
-static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
- SDValue RHS, SelectionDAG &DAG,
- DebugLoc dl) {
- unsigned OpNum = (PFEntry >> 26) & 0x0F;
- unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
- unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
-
- enum {
- OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
- OP_VREV,
- OP_VDUP0,
- OP_VDUP1,
- OP_VDUP2,
- OP_VDUP3,
- OP_VEXT1,
- OP_VEXT2,
- OP_VEXT3,
- OP_VUZPL, // VUZP, left result
- OP_VUZPR, // VUZP, right result
- OP_VZIPL, // VZIP, left result
- OP_VZIPR, // VZIP, right result
- OP_VTRNL, // VTRN, left result
- OP_VTRNR // VTRN, right result
- };
-
- if (OpNum == OP_COPY) {
- if (LHSID == (1*9+2)*9+3) return LHS;
- assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
- return RHS;
- }
-
- SDValue OpLHS, OpRHS;
- OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
- OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
- EVT VT = OpLHS.getValueType();
-
- switch (OpNum) {
- default: llvm_unreachable("Unknown shuffle opcode!");
- case OP_VREV:
- return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
- case OP_VDUP0:
- case OP_VDUP1:
- case OP_VDUP2:
- case OP_VDUP3:
- return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
- OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32));
- case OP_VEXT1:
- case OP_VEXT2:
- case OP_VEXT3:
- return DAG.getNode(ARMISD::VEXT, dl, VT,
- OpLHS, OpRHS,
- DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32));
- case OP_VUZPL:
- case OP_VUZPR:
- return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
- OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
- case OP_VZIPL:
- case OP_VZIPR:
- return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
- OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
- case OP_VTRNL:
- case OP_VTRNR:
- return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
- OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
- }
-}
-
-static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
- SDValue V1 = Op.getOperand(0);
- SDValue V2 = Op.getOperand(1);
- DebugLoc dl = Op.getDebugLoc();
- EVT VT = Op.getValueType();
- ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
- SmallVector<int, 8> ShuffleMask;
-
- // Convert shuffles that are directly supported on NEON to target-specific
- // DAG nodes, instead of keeping them as shuffles and matching them again
- // during code selection. This is more efficient and avoids the possibility
- // of inconsistencies between legalization and selection.
- // FIXME: floating-point vectors should be canonicalized to integer vectors
- // of the same time so that they get CSEd properly.
- SVN->getMask(ShuffleMask);
-
- if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
- int Lane = SVN->getSplatIndex();
- // If this is undef splat, generate it via "just" vdup, if possible.
- if (Lane == -1) Lane = 0;
-
- if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
- return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
- }
- return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
- DAG.getConstant(Lane, MVT::i32));
- }
-
- bool ReverseVEXT;
- unsigned Imm;
- if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
- if (ReverseVEXT)
- std::swap(V1, V2);
- return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
- DAG.getConstant(Imm, MVT::i32));
- }
-
- if (isVREVMask(ShuffleMask, VT, 64))
- return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
- if (isVREVMask(ShuffleMask, VT, 32))
- return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
- if (isVREVMask(ShuffleMask, VT, 16))
- return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
-
- // Check for Neon shuffles that modify both input vectors in place.
- // If both results are used, i.e., if there are two shuffles with the same
- // source operands and with masks corresponding to both results of one of
- // these operations, DAG memoization will ensure that a single node is
- // used for both shuffles.
- unsigned WhichResult;
- if (isVTRNMask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
- V1, V2).getValue(WhichResult);
- if (isVUZPMask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
- V1, V2).getValue(WhichResult);
- if (isVZIPMask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
- V1, V2).getValue(WhichResult);
-
- if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
- V1, V1).getValue(WhichResult);
- if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
- V1, V1).getValue(WhichResult);
- if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
- V1, V1).getValue(WhichResult);
-
- // If the shuffle is not directly supported and it has 4 elements, use
- // the PerfectShuffle-generated table to synthesize it from other shuffles.
- if (VT.getVectorNumElements() == 4 &&
- (VT.is128BitVector() || VT.is64BitVector())) {
- unsigned PFIndexes[4];
- for (unsigned i = 0; i != 4; ++i) {
- if (ShuffleMask[i] < 0)
- PFIndexes[i] = 8;
- else
- PFIndexes[i] = ShuffleMask[i];
- }
-
- // Compute the index in the perfect shuffle table.
- unsigned PFTableIndex =
- PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
-
- unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
- unsigned Cost = (PFEntry >> 30);
-
- if (Cost <= 4)
- return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
- }
-
- return SDValue();
-}
-
-static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
- EVT VT = Op.getValueType();
- DebugLoc dl = Op.getDebugLoc();
- SDValue Vec = Op.getOperand(0);
- SDValue Lane = Op.getOperand(1);
- assert(VT == MVT::i32 &&
- Vec.getValueType().getVectorElementType().getSizeInBits() < 32 &&
- "unexpected type for custom-lowering vector extract");
- return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
-}
-
-static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
- // The only time a CONCAT_VECTORS operation can have legal types is when
- // two 64-bit vectors are concatenated to a 128-bit vector.
- assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
- "unexpected CONCAT_VECTORS");
- DebugLoc dl = Op.getDebugLoc();
- SDValue Val = DAG.getUNDEF(MVT::v2f64);
- SDValue Op0 = Op.getOperand(0);
- SDValue Op1 = Op.getOperand(1);
- if (Op0.getOpcode() != ISD::UNDEF)
- Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0),
- DAG.getIntPtrConstant(0));
- if (Op1.getOpcode() != ISD::UNDEF)
- Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1),
- DAG.getIntPtrConstant(1));
- return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val);
-}
-
-SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
- switch (Op.getOpcode()) {
- default: llvm_unreachable("Don't know how to custom lower this!");
- case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
- case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
- case ISD::GlobalAddress:
- return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) :
- LowerGlobalAddressELF(Op, DAG);
- case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
- case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
- case ISD::BR_CC: return LowerBR_CC(Op, DAG);
- case ISD::BR_JT: return LowerBR_JT(Op, DAG);
- case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
- case ISD::VASTART: return LowerVASTART(Op, DAG, VarArgsFrameIndex);
- case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG, Subtarget);
- case ISD::SINT_TO_FP:
- case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
- case ISD::FP_TO_SINT:
- case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
- case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
- case ISD::RETURNADDR: break;
- case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
- case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG);
- case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
- Subtarget);
- case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG);
- case ISD::SHL:
- case ISD::SRL:
- case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
- case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG);
- case ISD::SRL_PARTS:
- case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG);
- case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
- case ISD::VSETCC: return LowerVSETCC(Op, DAG);
- case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
- case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
- case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
- case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
- }
- return SDValue();
-}
-
-/// ReplaceNodeResults - Replace the results of node with an illegal result
-/// type with new values built out of custom code.
-void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
- SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) {
- switch (N->getOpcode()) {
- default:
- llvm_unreachable("Don't know how to custom expand this!");
- return;
- case ISD::BIT_CONVERT:
- Results.push_back(ExpandBIT_CONVERT(N, DAG));
- return;
- case ISD::SRL:
- case ISD::SRA: {
- SDValue Res = LowerShift(N, DAG, Subtarget);
- if (Res.getNode())
- Results.push_back(Res);
- return;
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// ARM Scheduler Hooks
-//===----------------------------------------------------------------------===//
-
-MachineBasicBlock *
-ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size) const {
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptr = MI->getOperand(1).getReg();
- unsigned oldval = MI->getOperand(2).getReg();
- unsigned newval = MI->getOperand(3).getReg();
- unsigned scratch = BB->getParent()->getRegInfo()
- .createVirtualRegister(ARM::GPRRegisterClass);
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
- bool isThumb2 = Subtarget->isThumb2();
-
- unsigned ldrOpc, strOpc;
- switch (Size) {
- default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
- case 1:
- ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
- strOpc = isThumb2 ? ARM::t2LDREXB : ARM::STREXB;
- break;
- case 2:
- ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
- strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
- break;
- case 4:
- ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
- strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
- break;
- }
-
- MachineFunction *MF = BB->getParent();
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = BB;
- ++It; // insert the new blocks after the current block
-
- MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MF->insert(It, loop1MBB);
- MF->insert(It, loop2MBB);
- MF->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
-
- // thisMBB:
- // ...
- // fallthrough --> loop1MBB
- BB->addSuccessor(loop1MBB);
-
- // loop1MBB:
- // ldrex dest, [ptr]
- // cmp dest, oldval
- // bne exitMBB
- BB = loop1MBB;
- AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr));
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
- .addReg(dest).addReg(oldval));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
- BB->addSuccessor(loop2MBB);
- BB->addSuccessor(exitMBB);
-
- // loop2MBB:
- // strex scratch, newval, [ptr]
- // cmp scratch, #0
- // bne loop1MBB
- BB = loop2MBB;
- AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval)
- .addReg(ptr));
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
- .addReg(scratch).addImm(0));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
- BB->addSuccessor(loop1MBB);
- BB->addSuccessor(exitMBB);
-
- // exitMBB:
- // ...
- BB = exitMBB;
-
- MF->DeleteMachineInstr(MI); // The instruction is gone now.
-
- return BB;
-}
-
-MachineBasicBlock *
-ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
- unsigned Size, unsigned BinOpcode) const {
- // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction *MF = BB->getParent();
- MachineFunction::iterator It = BB;
- ++It;
-
- unsigned dest = MI->getOperand(0).getReg();
- unsigned ptr = MI->getOperand(1).getReg();
- unsigned incr = MI->getOperand(2).getReg();
- DebugLoc dl = MI->getDebugLoc();
-
- bool isThumb2 = Subtarget->isThumb2();
- unsigned ldrOpc, strOpc;
- switch (Size) {
- default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
- case 1:
- ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB;
- strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB;
- break;
- case 2:
- ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH;
- strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH;
- break;
- case 4:
- ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX;
- strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX;
- break;
- }
-
- MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
- MF->insert(It, loopMBB);
- MF->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
-
- MachineRegisterInfo &RegInfo = MF->getRegInfo();
- unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
- unsigned scratch2 = (!BinOpcode) ? incr :
- RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
-
- // thisMBB:
- // ...
- // fallthrough --> loopMBB
- BB->addSuccessor(loopMBB);
-
- // loopMBB:
- // ldrex dest, ptr
- // <binop> scratch2, dest, incr
- // strex scratch, scratch2, ptr
- // cmp scratch, #0
- // bne- loopMBB
- // fallthrough --> exitMBB
- BB = loopMBB;
- AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr));
- if (BinOpcode) {
- // operand order needs to go the other way for NAND
- if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr)
- AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
- addReg(incr).addReg(dest)).addReg(0);
- else
- AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2).
- addReg(dest).addReg(incr)).addReg(0);
- }
-
- AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2)
- .addReg(ptr));
- AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
- .addReg(scratch).addImm(0));
- BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
- .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
-
- BB->addSuccessor(loopMBB);
- BB->addSuccessor(exitMBB);
-
- // exitMBB:
- // ...
- BB = exitMBB;
-
- MF->DeleteMachineInstr(MI); // The instruction is gone now.
-
- return BB;
-}
-
-MachineBasicBlock *
-ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
- const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
- DebugLoc dl = MI->getDebugLoc();
- bool isThumb2 = Subtarget->isThumb2();
- switch (MI->getOpcode()) {
- default:
- MI->dump();
- llvm_unreachable("Unexpected instr type to insert");
-
- case ARM::ATOMIC_LOAD_ADD_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
- case ARM::ATOMIC_LOAD_ADD_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
- case ARM::ATOMIC_LOAD_ADD_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr);
-
- case ARM::ATOMIC_LOAD_AND_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
- case ARM::ATOMIC_LOAD_AND_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
- case ARM::ATOMIC_LOAD_AND_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
-
- case ARM::ATOMIC_LOAD_OR_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
- case ARM::ATOMIC_LOAD_OR_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
- case ARM::ATOMIC_LOAD_OR_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr);
-
- case ARM::ATOMIC_LOAD_XOR_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
- case ARM::ATOMIC_LOAD_XOR_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
- case ARM::ATOMIC_LOAD_XOR_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr);
-
- case ARM::ATOMIC_LOAD_NAND_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
- case ARM::ATOMIC_LOAD_NAND_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
- case ARM::ATOMIC_LOAD_NAND_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr);
-
- case ARM::ATOMIC_LOAD_SUB_I8:
- return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
- case ARM::ATOMIC_LOAD_SUB_I16:
- return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
- case ARM::ATOMIC_LOAD_SUB_I32:
- return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr);
-
- case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0);
- case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0);
- case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0);
-
- case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1);
- case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2);
- case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4);
-
- case ARM::tMOVCCr_pseudo: {
- // To "insert" a SELECT_CC instruction, we actually have to insert the
- // diamond control-flow pattern. The incoming instruction knows the
- // destination vreg to set, the condition code register to branch on, the
- // true/false values to select between, and a branch opcode to use.
- const BasicBlock *LLVM_BB = BB->getBasicBlock();
- MachineFunction::iterator It = BB;
- ++It;
-
- // thisMBB:
- // ...
- // TrueVal = ...
- // cmpTY ccX, r1, r2
- // bCC copy1MBB
- // fallthrough --> copy0MBB
- MachineBasicBlock *thisMBB = BB;
- MachineFunction *F = BB->getParent();
- MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
- MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
- .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
- F->insert(It, copy0MBB);
- F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- // Also inform sdisel of the edge changes.
- for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
- E = BB->succ_end(); I != E; ++I) {
- EM->insert(std::make_pair(*I, sinkMBB));
- sinkMBB->addSuccessor(*I);
- }
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while (!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
- BB->addSuccessor(copy0MBB);
- BB->addSuccessor(sinkMBB);
-
- // copy0MBB:
- // %FalseValue = ...
- // # fallthrough to sinkMBB
- BB = copy0MBB;
-
- // Update machine-CFG edges
- BB->addSuccessor(sinkMBB);
-
- // sinkMBB:
- // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
- // ...
- BB = sinkMBB;
- BuildMI(BB, dl, TII->get(ARM::PHI), MI->getOperand(0).getReg())
- .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
- .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
-
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
- return BB;
- }
-
- case ARM::tANDsp:
- case ARM::tADDspr_:
- case ARM::tSUBspi_:
- case ARM::t2SUBrSPi_:
- case ARM::t2SUBrSPi12_:
- case ARM::t2SUBrSPs_: {
- MachineFunction *MF = BB->getParent();
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned SrcReg = MI->getOperand(1).getReg();
- bool DstIsDead = MI->getOperand(0).isDead();
- bool SrcIsKill = MI->getOperand(1).isKill();
-
- if (SrcReg != ARM::SP) {
- // Copy the source to SP from virtual register.
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(SrcReg);
- unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
- ? ARM::tMOVtgpr2gpr : ARM::tMOVgpr2gpr;
- BuildMI(BB, dl, TII->get(CopyOpc), ARM::SP)
- .addReg(SrcReg, getKillRegState(SrcIsKill));
- }
-
- unsigned OpOpc = 0;
- bool NeedPred = false, NeedCC = false, NeedOp3 = false;
- switch (MI->getOpcode()) {
- default:
- llvm_unreachable("Unexpected pseudo instruction!");
- case ARM::tANDsp:
- OpOpc = ARM::tAND;
- NeedPred = true;
- break;
- case ARM::tADDspr_:
- OpOpc = ARM::tADDspr;
- break;
- case ARM::tSUBspi_:
- OpOpc = ARM::tSUBspi;
- break;
- case ARM::t2SUBrSPi_:
- OpOpc = ARM::t2SUBrSPi;
- NeedPred = true; NeedCC = true;
- break;
- case ARM::t2SUBrSPi12_:
- OpOpc = ARM::t2SUBrSPi12;
- NeedPred = true;
- break;
- case ARM::t2SUBrSPs_:
- OpOpc = ARM::t2SUBrSPs;
- NeedPred = true; NeedCC = true; NeedOp3 = true;
- break;
- }
- MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(OpOpc), ARM::SP);
- if (OpOpc == ARM::tAND)
- AddDefaultT1CC(MIB);
- MIB.addReg(ARM::SP);
- MIB.addOperand(MI->getOperand(2));
- if (NeedOp3)
- MIB.addOperand(MI->getOperand(3));
- if (NeedPred)
- AddDefaultPred(MIB);
- if (NeedCC)
- AddDefaultCC(MIB);
-
- // Copy the result from SP to virtual register.
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(DstReg);
- unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
- ? ARM::tMOVgpr2tgpr : ARM::tMOVgpr2gpr;
- BuildMI(BB, dl, TII->get(CopyOpc))
- .addReg(DstReg, getDefRegState(true) | getDeadRegState(DstIsDead))
- .addReg(ARM::SP);
- MF->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
- return BB;
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// ARM Optimization Hooks
-//===----------------------------------------------------------------------===//
-
-static
-SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
- TargetLowering::DAGCombinerInfo &DCI) {
- SelectionDAG &DAG = DCI.DAG;
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT VT = N->getValueType(0);
- unsigned Opc = N->getOpcode();
- bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
- SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
- SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2);
- ISD::CondCode CC = ISD::SETCC_INVALID;
-
- if (isSlctCC) {
- CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get();
- } else {
- SDValue CCOp = Slct.getOperand(0);
- if (CCOp.getOpcode() == ISD::SETCC)
- CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get();
- }
-
- bool DoXform = false;
- bool InvCC = false;
- assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) &&
- "Bad input!");
-
- if (LHS.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(LHS)->isNullValue()) {
- DoXform = true;
- } else if (CC != ISD::SETCC_INVALID &&
- RHS.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(RHS)->isNullValue()) {
- std::swap(LHS, RHS);
- SDValue Op0 = Slct.getOperand(0);
- EVT OpVT = isSlctCC ? Op0.getValueType() :
- Op0.getOperand(0).getValueType();
- bool isInt = OpVT.isInteger();
- CC = ISD::getSetCCInverse(CC, isInt);
-
- if (!TLI.isCondCodeLegal(CC, OpVT))
- return SDValue(); // Inverse operator isn't legal.
-
- DoXform = true;
- InvCC = true;
- }
-
- if (DoXform) {
- SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS);
- if (isSlctCC)
- return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result,
- Slct.getOperand(0), Slct.getOperand(1), CC);
- SDValue CCOp = Slct.getOperand(0);
- if (InvCC)
- CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(),
- CCOp.getOperand(0), CCOp.getOperand(1), CC);
- return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT,
- CCOp, OtherOp, Result);
- }
- return SDValue();
-}
-
-/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
-static SDValue PerformADDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- // added by evan in r37685 with no testcase.
- SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
-
- // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
- if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
- SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
- if (Result.getNode()) return Result;
- }
- if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
- SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
- if (Result.getNode()) return Result;
- }
-
- return SDValue();
-}
-
-/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
-static SDValue PerformSUBCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- // added by evan in r37685 with no testcase.
- SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
-
- // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
- if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
- SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
- if (Result.getNode()) return Result;
- }
-
- return SDValue();
-}
-
-/// PerformVMOVRRDCombine - Target-specific dag combine xforms for
-/// ARMISD::VMOVRRD.
-static SDValue PerformVMOVRRDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- // fmrrd(fmdrr x, y) -> x,y
- SDValue InDouble = N->getOperand(0);
- if (InDouble.getOpcode() == ARMISD::VMOVDRR)
- return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
- return SDValue();
-}
-
-/// getVShiftImm - Check if this is a valid build_vector for the immediate
-/// operand of a vector shift operation, where all the elements of the
-/// build_vector must have the same constant integer value.
-static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
- // Ignore bit_converts.
- while (Op.getOpcode() == ISD::BIT_CONVERT)
- Op = Op.getOperand(0);
- BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
- APInt SplatBits, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
- HasAnyUndefs, ElementBits) ||
- SplatBitSize > ElementBits)
- return false;
- Cnt = SplatBits.getSExtValue();
- return true;
-}
-
-/// isVShiftLImm - Check if this is a valid build_vector for the immediate
-/// operand of a vector shift left operation. That value must be in the range:
-/// 0 <= Value < ElementBits for a left shift; or
-/// 0 <= Value <= ElementBits for a long left shift.
-static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
- assert(VT.isVector() && "vector shift count is not a vector type");
- unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
- if (! getVShiftImm(Op, ElementBits, Cnt))
- return false;
- return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
-}
-
-/// isVShiftRImm - Check if this is a valid build_vector for the immediate
-/// operand of a vector shift right operation. For a shift opcode, the value
-/// is positive, but for an intrinsic the value count must be negative. The
-/// absolute value must be in the range:
-/// 1 <= |Value| <= ElementBits for a right shift; or
-/// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
-static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
- int64_t &Cnt) {
- assert(VT.isVector() && "vector shift count is not a vector type");
- unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
- if (! getVShiftImm(Op, ElementBits, Cnt))
- return false;
- if (isIntrinsic)
- Cnt = -Cnt;
- return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
-}
-
-/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
-static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
- unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
- switch (IntNo) {
- default:
- // Don't do anything for most intrinsics.
- break;
-
- // Vector shifts: check for immediate versions and lower them.
- // Note: This is done during DAG combining instead of DAG legalizing because
- // the build_vectors for 64-bit vector element shift counts are generally
- // not legal, and it is hard to see their values after they get legalized to
- // loads from a constant pool.
- case Intrinsic::arm_neon_vshifts:
- case Intrinsic::arm_neon_vshiftu:
- case Intrinsic::arm_neon_vshiftls:
- case Intrinsic::arm_neon_vshiftlu:
- case Intrinsic::arm_neon_vshiftn:
- case Intrinsic::arm_neon_vrshifts:
- case Intrinsic::arm_neon_vrshiftu:
- case Intrinsic::arm_neon_vrshiftn:
- case Intrinsic::arm_neon_vqshifts:
- case Intrinsic::arm_neon_vqshiftu:
- case Intrinsic::arm_neon_vqshiftsu:
- case Intrinsic::arm_neon_vqshiftns:
- case Intrinsic::arm_neon_vqshiftnu:
- case Intrinsic::arm_neon_vqshiftnsu:
- case Intrinsic::arm_neon_vqrshiftns:
- case Intrinsic::arm_neon_vqrshiftnu:
- case Intrinsic::arm_neon_vqrshiftnsu: {
- EVT VT = N->getOperand(1).getValueType();
- int64_t Cnt;
- unsigned VShiftOpc = 0;
-
- switch (IntNo) {
- case Intrinsic::arm_neon_vshifts:
- case Intrinsic::arm_neon_vshiftu:
- if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
- VShiftOpc = ARMISD::VSHL;
- break;
- }
- if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
- VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
- ARMISD::VSHRs : ARMISD::VSHRu);
- break;
- }
- return SDValue();
-
- case Intrinsic::arm_neon_vshiftls:
- case Intrinsic::arm_neon_vshiftlu:
- if (isVShiftLImm(N->getOperand(2), VT, true, Cnt))
- break;
- llvm_unreachable("invalid shift count for vshll intrinsic");
-
- case Intrinsic::arm_neon_vrshifts:
- case Intrinsic::arm_neon_vrshiftu:
- if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
- break;
- return SDValue();
-
- case Intrinsic::arm_neon_vqshifts:
- case Intrinsic::arm_neon_vqshiftu:
- if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
- break;
- return SDValue();
-
- case Intrinsic::arm_neon_vqshiftsu:
- if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
- break;
- llvm_unreachable("invalid shift count for vqshlu intrinsic");
-
- case Intrinsic::arm_neon_vshiftn:
- case Intrinsic::arm_neon_vrshiftn:
- case Intrinsic::arm_neon_vqshiftns:
- case Intrinsic::arm_neon_vqshiftnu:
- case Intrinsic::arm_neon_vqshiftnsu:
- case Intrinsic::arm_neon_vqrshiftns:
- case Intrinsic::arm_neon_vqrshiftnu:
- case Intrinsic::arm_neon_vqrshiftnsu:
- // Narrowing shifts require an immediate right shift.
- if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
- break;
- llvm_unreachable("invalid shift count for narrowing vector shift intrinsic");
-
- default:
- llvm_unreachable("unhandled vector shift");
- }
-
- switch (IntNo) {
- case Intrinsic::arm_neon_vshifts:
- case Intrinsic::arm_neon_vshiftu:
- // Opcode already set above.
- break;
- case Intrinsic::arm_neon_vshiftls:
- case Intrinsic::arm_neon_vshiftlu:
- if (Cnt == VT.getVectorElementType().getSizeInBits())
- VShiftOpc = ARMISD::VSHLLi;
- else
- VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ?
- ARMISD::VSHLLs : ARMISD::VSHLLu);
- break;
- case Intrinsic::arm_neon_vshiftn:
- VShiftOpc = ARMISD::VSHRN; break;
- case Intrinsic::arm_neon_vrshifts:
- VShiftOpc = ARMISD::VRSHRs; break;
- case Intrinsic::arm_neon_vrshiftu:
- VShiftOpc = ARMISD::VRSHRu; break;
- case Intrinsic::arm_neon_vrshiftn:
- VShiftOpc = ARMISD::VRSHRN; break;
- case Intrinsic::arm_neon_vqshifts:
- VShiftOpc = ARMISD::VQSHLs; break;
- case Intrinsic::arm_neon_vqshiftu:
- VShiftOpc = ARMISD::VQSHLu; break;
- case Intrinsic::arm_neon_vqshiftsu:
- VShiftOpc = ARMISD::VQSHLsu; break;
- case Intrinsic::arm_neon_vqshiftns:
- VShiftOpc = ARMISD::VQSHRNs; break;
- case Intrinsic::arm_neon_vqshiftnu:
- VShiftOpc = ARMISD::VQSHRNu; break;
- case Intrinsic::arm_neon_vqshiftnsu:
- VShiftOpc = ARMISD::VQSHRNsu; break;
- case Intrinsic::arm_neon_vqrshiftns:
- VShiftOpc = ARMISD::VQRSHRNs; break;
- case Intrinsic::arm_neon_vqrshiftnu:
- VShiftOpc = ARMISD::VQRSHRNu; break;
- case Intrinsic::arm_neon_vqrshiftnsu:
- VShiftOpc = ARMISD::VQRSHRNsu; break;
- }
-
- return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
- N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
- }
-
- case Intrinsic::arm_neon_vshiftins: {
- EVT VT = N->getOperand(1).getValueType();
- int64_t Cnt;
- unsigned VShiftOpc = 0;
-
- if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
- VShiftOpc = ARMISD::VSLI;
- else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
- VShiftOpc = ARMISD::VSRI;
- else {
- llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
- }
-
- return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
- N->getOperand(1), N->getOperand(2),
- DAG.getConstant(Cnt, MVT::i32));
- }
-
- case Intrinsic::arm_neon_vqrshifts:
- case Intrinsic::arm_neon_vqrshiftu:
- // No immediate versions of these to check for.
- break;
- }
-
- return SDValue();
-}
-
-/// PerformShiftCombine - Checks for immediate versions of vector shifts and
-/// lowers them. As with the vector shift intrinsics, this is done during DAG
-/// combining instead of DAG legalizing because the build_vectors for 64-bit
-/// vector element shift counts are generally not legal, and it is hard to see
-/// their values after they get legalized to loads from a constant pool.
-static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
- const ARMSubtarget *ST) {
- EVT VT = N->getValueType(0);
-
- // Nothing to be done for scalar shifts.
- if (! VT.isVector())
- return SDValue();
-
- assert(ST->hasNEON() && "unexpected vector shift");
- int64_t Cnt;
-
- switch (N->getOpcode()) {
- default: llvm_unreachable("unexpected shift opcode");
-
- case ISD::SHL:
- if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
- return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
- DAG.getConstant(Cnt, MVT::i32));
- break;
-
- case ISD::SRA:
- case ISD::SRL:
- if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
- unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
- ARMISD::VSHRs : ARMISD::VSHRu);
- return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
- DAG.getConstant(Cnt, MVT::i32));
- }
- }
- return SDValue();
-}
-
-/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
-/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
-static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
- const ARMSubtarget *ST) {
- SDValue N0 = N->getOperand(0);
-
- // Check for sign- and zero-extensions of vector extract operations of 8-
- // and 16-bit vector elements. NEON supports these directly. They are
- // handled during DAG combining because type legalization will promote them
- // to 32-bit types and it is messy to recognize the operations after that.
- if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
- SDValue Vec = N0.getOperand(0);
- SDValue Lane = N0.getOperand(1);
- EVT VT = N->getValueType(0);
- EVT EltVT = N0.getValueType();
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-
- if (VT == MVT::i32 &&
- (EltVT == MVT::i8 || EltVT == MVT::i16) &&
- TLI.isTypeLegal(Vec.getValueType())) {
-
- unsigned Opc = 0;
- switch (N->getOpcode()) {
- default: llvm_unreachable("unexpected opcode");
- case ISD::SIGN_EXTEND:
- Opc = ARMISD::VGETLANEs;
- break;
- case ISD::ZERO_EXTEND:
- case ISD::ANY_EXTEND:
- Opc = ARMISD::VGETLANEu;
- break;
- }
- return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane);
- }
- }
-
- return SDValue();
-}
-
-/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC
-/// to match f32 max/min patterns to use NEON vmax/vmin instructions.
-static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
- const ARMSubtarget *ST) {
- // If the target supports NEON, try to use vmax/vmin instructions for f32
- // selects like "x < y ? x : y". Unless the FiniteOnlyFPMath option is set,
- // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is
- // a NaN; only do the transformation when it matches that behavior.
-
- // For now only do this when using NEON for FP operations; if using VFP, it
- // is not obvious that the benefit outweighs the cost of switching to the
- // NEON pipeline.
- if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() ||
- N->getValueType(0) != MVT::f32)
- return SDValue();
-
- SDValue CondLHS = N->getOperand(0);
- SDValue CondRHS = N->getOperand(1);
- SDValue LHS = N->getOperand(2);
- SDValue RHS = N->getOperand(3);
- ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
-
- unsigned Opcode = 0;
- bool IsReversed;
- if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) {
- IsReversed = false; // x CC y ? x : y
- } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) {
- IsReversed = true ; // x CC y ? y : x
- } else {
- return SDValue();
- }
-
- bool IsUnordered;
- switch (CC) {
- default: break;
- case ISD::SETOLT:
- case ISD::SETOLE:
- case ISD::SETLT:
- case ISD::SETLE:
- case ISD::SETULT:
- case ISD::SETULE:
- // If LHS is NaN, an ordered comparison will be false and the result will
- // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS
- // != NaN. Likewise, for unordered comparisons, check for RHS != NaN.
- IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE);
- if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
- break;
- // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin
- // will return -0, so vmin can only be used for unsafe math or if one of
- // the operands is known to be nonzero.
- if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) &&
- !UnsafeFPMath &&
- !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
- break;
- Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN;
- break;
-
- case ISD::SETOGT:
- case ISD::SETOGE:
- case ISD::SETGT:
- case ISD::SETGE:
- case ISD::SETUGT:
- case ISD::SETUGE:
- // If LHS is NaN, an ordered comparison will be false and the result will
- // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS
- // != NaN. Likewise, for unordered comparisons, check for RHS != NaN.
- IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE);
- if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS))
- break;
- // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax
- // will return +0, so vmax can only be used for unsafe math or if one of
- // the operands is known to be nonzero.
- if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) &&
- !UnsafeFPMath &&
- !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
- break;
- Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX;
- break;
- }
-
- if (!Opcode)
- return SDValue();
- return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS);
-}
-
-SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
- switch (N->getOpcode()) {
- default: break;
- case ISD::ADD: return PerformADDCombine(N, DCI);
- case ISD::SUB: return PerformSUBCombine(N, DCI);
- case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
- case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
- case ISD::SHL:
- case ISD::SRA:
- case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget);
- case ISD::SIGN_EXTEND:
- case ISD::ZERO_EXTEND:
- case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
- case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget);
- }
- return SDValue();
-}
-
-bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
- if (!Subtarget->hasV6Ops())
- // Pre-v6 does not support unaligned mem access.
- return false;
- else {
- // v6+ may or may not support unaligned mem access depending on the system
- // configuration.
- // FIXME: This is pretty conservative. Should we provide cmdline option to
- // control the behaviour?
- if (!Subtarget->isTargetDarwin())
- return false;
- }
-
- switch (VT.getSimpleVT().SimpleTy) {
- default:
- return false;
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- return true;
- // FIXME: VLD1 etc with standard alignment is legal.
- }
-}
-
-static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
- if (V < 0)
- return false;
-
- unsigned Scale = 1;
- switch (VT.getSimpleVT().SimpleTy) {
- default: return false;
- case MVT::i1:
- case MVT::i8:
- // Scale == 1;
- break;
- case MVT::i16:
- // Scale == 2;
- Scale = 2;
- break;
- case MVT::i32:
- // Scale == 4;
- Scale = 4;
- break;
- }
-
- if ((V & (Scale - 1)) != 0)
- return false;
- V /= Scale;
- return V == (V & ((1LL << 5) - 1));
-}
-
-static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
- const ARMSubtarget *Subtarget) {
- bool isNeg = false;
- if (V < 0) {
- isNeg = true;
- V = - V;
- }
-
- switch (VT.getSimpleVT().SimpleTy) {
- default: return false;
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- // + imm12 or - imm8
- if (isNeg)
- return V == (V & ((1LL << 8) - 1));
- return V == (V & ((1LL << 12) - 1));
- case MVT::f32:
- case MVT::f64:
- // Same as ARM mode. FIXME: NEON?
- if (!Subtarget->hasVFP2())
- return false;
- if ((V & 3) != 0)
- return false;
- V >>= 2;
- return V == (V & ((1LL << 8) - 1));
- }
-}
-
-/// isLegalAddressImmediate - Return true if the integer value can be used
-/// as the offset of the target addressing mode for load / store of the
-/// given type.
-static bool isLegalAddressImmediate(int64_t V, EVT VT,
- const ARMSubtarget *Subtarget) {
- if (V == 0)
- return true;
-
- if (!VT.isSimple())
- return false;
-
- if (Subtarget->isThumb1Only())
- return isLegalT1AddressImmediate(V, VT);
- else if (Subtarget->isThumb2())
- return isLegalT2AddressImmediate(V, VT, Subtarget);
-
- // ARM mode.
- if (V < 0)
- V = - V;
- switch (VT.getSimpleVT().SimpleTy) {
- default: return false;
- case MVT::i1:
- case MVT::i8:
- case MVT::i32:
- // +- imm12
- return V == (V & ((1LL << 12) - 1));
- case MVT::i16:
- // +- imm8
- return V == (V & ((1LL << 8) - 1));
- case MVT::f32:
- case MVT::f64:
- if (!Subtarget->hasVFP2()) // FIXME: NEON?
- return false;
- if ((V & 3) != 0)
- return false;
- V >>= 2;
- return V == (V & ((1LL << 8) - 1));
- }
-}
-
-bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
- EVT VT) const {
- int Scale = AM.Scale;
- if (Scale < 0)
- return false;
-
- switch (VT.getSimpleVT().SimpleTy) {
- default: return false;
- case MVT::i1:
- case MVT::i8:
- case MVT::i16:
- case MVT::i32:
- if (Scale == 1)
- return true;
- // r + r << imm
- Scale = Scale & ~1;
- return Scale == 2 || Scale == 4 || Scale == 8;
- case MVT::i64:
- // r + r
- if (((unsigned)AM.HasBaseReg + Scale) <= 2)
- return true;
- return false;
- case MVT::isVoid:
- // Note, we allow "void" uses (basically, uses that aren't loads or
- // stores), because arm allows folding a scale into many arithmetic
- // operations. This should be made more precise and revisited later.
-
- // Allow r << imm, but the imm has to be a multiple of two.
- if (Scale & 1) return false;
- return isPowerOf2_32(Scale);
- }
-}
-
-/// isLegalAddressingMode - Return true if the addressing mode represented
-/// by AM is legal for this target, for a load/store of the specified type.
-bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
- const Type *Ty) const {
- EVT VT = getValueType(Ty, true);
- if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
- return false;
-
- // Can never fold addr of global into load/store.
- if (AM.BaseGV)
- return false;
-
- switch (AM.Scale) {
- case 0: // no scale reg, must be "r+i" or "r", or "i".
- break;
- case 1:
- if (Subtarget->isThumb1Only())
- return false;
- // FALL THROUGH.
- default:
- // ARM doesn't support any R+R*scale+imm addr modes.
- if (AM.BaseOffs)
- return false;
-
- if (!VT.isSimple())
- return false;
-
- if (Subtarget->isThumb2())
- return isLegalT2ScaledAddressingMode(AM, VT);
-
- int Scale = AM.Scale;
- switch (VT.getSimpleVT().SimpleTy) {
- default: return false;
- case MVT::i1:
- case MVT::i8:
- case MVT::i32:
- if (Scale < 0) Scale = -Scale;
- if (Scale == 1)
- return true;
- // r + r << imm
- return isPowerOf2_32(Scale & ~1);
- case MVT::i16:
- case MVT::i64:
- // r + r
- if (((unsigned)AM.HasBaseReg + Scale) <= 2)
- return true;
- return false;
-
- case MVT::isVoid:
- // Note, we allow "void" uses (basically, uses that aren't loads or
- // stores), because arm allows folding a scale into many arithmetic
- // operations. This should be made more precise and revisited later.
-
- // Allow r << imm, but the imm has to be a multiple of two.
- if (Scale & 1) return false;
- return isPowerOf2_32(Scale);
- }
- break;
- }
- return true;
-}
-
-/// isLegalICmpImmediate - Return true if the specified immediate is legal
-/// icmp immediate, that is the target has icmp instructions which can compare
-/// a register against the immediate without having to materialize the
-/// immediate into a register.
-bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
- if (!Subtarget->isThumb())
- return ARM_AM::getSOImmVal(Imm) != -1;
- if (Subtarget->isThumb2())
- return ARM_AM::getT2SOImmVal(Imm) != -1;
- return Imm >= 0 && Imm <= 255;
-}
-
-static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
- bool isSEXTLoad, SDValue &Base,
- SDValue &Offset, bool &isInc,
- SelectionDAG &DAG) {
- if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
- return false;
-
- if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
- // AddressingMode 3
- Base = Ptr->getOperand(0);
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if (RHSC < 0 && RHSC > -256) {
- assert(Ptr->getOpcode() == ISD::ADD);
- isInc = false;
- Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
- return true;
- }
- }
- isInc = (Ptr->getOpcode() == ISD::ADD);
- Offset = Ptr->getOperand(1);
- return true;
- } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
- // AddressingMode 2
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if (RHSC < 0 && RHSC > -0x1000) {
- assert(Ptr->getOpcode() == ISD::ADD);
- isInc = false;
- Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
- Base = Ptr->getOperand(0);
- return true;
- }
- }
-
- if (Ptr->getOpcode() == ISD::ADD) {
- isInc = true;
- ARM_AM::ShiftOpc ShOpcVal= ARM_AM::getShiftOpcForNode(Ptr->getOperand(0));
- if (ShOpcVal != ARM_AM::no_shift) {
- Base = Ptr->getOperand(1);
- Offset = Ptr->getOperand(0);
- } else {
- Base = Ptr->getOperand(0);
- Offset = Ptr->getOperand(1);
- }
- return true;
- }
-
- isInc = (Ptr->getOpcode() == ISD::ADD);
- Base = Ptr->getOperand(0);
- Offset = Ptr->getOperand(1);
- return true;
- }
-
- // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
- return false;
-}
-
-static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
- bool isSEXTLoad, SDValue &Base,
- SDValue &Offset, bool &isInc,
- SelectionDAG &DAG) {
- if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
- return false;
-
- Base = Ptr->getOperand(0);
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
- int RHSC = (int)RHS->getZExtValue();
- if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
- assert(Ptr->getOpcode() == ISD::ADD);
- isInc = false;
- Offset = DAG.getConstant(-RHSC, RHS->getValueType(0));
- return true;
- } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
- isInc = Ptr->getOpcode() == ISD::ADD;
- Offset = DAG.getConstant(RHSC, RHS->getValueType(0));
- return true;
- }
- }
-
- return false;
-}
-
-/// getPreIndexedAddressParts - returns true by value, base pointer and
-/// offset pointer and addressing mode by reference if the node's address
-/// can be legally represented as pre-indexed load / store address.
-bool
-ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
- SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const {
- if (Subtarget->isThumb1Only())
- return false;
-
- EVT VT;
- SDValue Ptr;
- bool isSEXTLoad = false;
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- Ptr = LD->getBasePtr();
- VT = LD->getMemoryVT();
- isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
- } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- Ptr = ST->getBasePtr();
- VT = ST->getMemoryVT();
- } else
- return false;
-
- bool isInc;
- bool isLegal = false;
- if (Subtarget->isThumb2())
- isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
- Offset, isInc, DAG);
- else
- isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
- Offset, isInc, DAG);
- if (!isLegal)
- return false;
-
- AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
- return true;
-}
-
-/// getPostIndexedAddressParts - returns true by value, base pointer and
-/// offset pointer and addressing mode by reference if this node can be
-/// combined with a load / store to form a post-indexed load / store.
-bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
- SDValue &Base,
- SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const {
- if (Subtarget->isThumb1Only())
- return false;
-
- EVT VT;
- SDValue Ptr;
- bool isSEXTLoad = false;
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- VT = LD->getMemoryVT();
- isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
- } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- VT = ST->getMemoryVT();
- } else
- return false;
-
- bool isInc;
- bool isLegal = false;
- if (Subtarget->isThumb2())
- isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
- isInc, DAG);
- else
- isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
- isInc, DAG);
- if (!isLegal)
- return false;
-
- AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
- return true;
-}
-
-void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
- KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
- switch (Op.getOpcode()) {
- default: break;
- case ARMISD::CMOV: {
- // Bits are known zero/one if known on the LHS and RHS.
- DAG.ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
- if (KnownZero == 0 && KnownOne == 0) return;
-
- APInt KnownZeroRHS, KnownOneRHS;
- DAG.ComputeMaskedBits(Op.getOperand(1), Mask,
- KnownZeroRHS, KnownOneRHS, Depth+1);
- KnownZero &= KnownZeroRHS;
- KnownOne &= KnownOneRHS;
- return;
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// ARM Inline Assembly Support
-//===----------------------------------------------------------------------===//
-
-/// getConstraintType - Given a constraint letter, return the type of
-/// constraint it is for this target.
-ARMTargetLowering::ConstraintType
-ARMTargetLowering::getConstraintType(const std::string &Constraint) const {
- if (Constraint.size() == 1) {
- switch (Constraint[0]) {
- default: break;
- case 'l': return C_RegisterClass;
- case 'w': return C_RegisterClass;
- }
- }
- return TargetLowering::getConstraintType(Constraint);
-}
-
-std::pair<unsigned, const TargetRegisterClass*>
-ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
- if (Constraint.size() == 1) {
- // GCC ARM Constraint Letters
- switch (Constraint[0]) {
- case 'l':
- if (Subtarget->isThumb())
- return std::make_pair(0U, ARM::tGPRRegisterClass);
- else
- return std::make_pair(0U, ARM::GPRRegisterClass);
- case 'r':
- return std::make_pair(0U, ARM::GPRRegisterClass);
- case 'w':
- if (VT == MVT::f32)
- return std::make_pair(0U, ARM::SPRRegisterClass);
- if (VT.getSizeInBits() == 64)
- return std::make_pair(0U, ARM::DPRRegisterClass);
- if (VT.getSizeInBits() == 128)
- return std::make_pair(0U, ARM::QPRRegisterClass);
- break;
- }
- }
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
-}
-
-std::vector<unsigned> ARMTargetLowering::
-getRegClassForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const {
- if (Constraint.size() != 1)
- return std::vector<unsigned>();
-
- switch (Constraint[0]) { // GCC ARM Constraint Letters
- default: break;
- case 'l':
- return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7,
- 0);
- case 'r':
- return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7,
- ARM::R8, ARM::R9, ARM::R10, ARM::R11,
- ARM::R12, ARM::LR, 0);
- case 'w':
- if (VT == MVT::f32)
- return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3,
- ARM::S4, ARM::S5, ARM::S6, ARM::S7,
- ARM::S8, ARM::S9, ARM::S10, ARM::S11,
- ARM::S12,ARM::S13,ARM::S14,ARM::S15,
- ARM::S16,ARM::S17,ARM::S18,ARM::S19,
- ARM::S20,ARM::S21,ARM::S22,ARM::S23,
- ARM::S24,ARM::S25,ARM::S26,ARM::S27,
- ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0);
- if (VT.getSizeInBits() == 64)
- return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3,
- ARM::D4, ARM::D5, ARM::D6, ARM::D7,
- ARM::D8, ARM::D9, ARM::D10,ARM::D11,
- ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0);
- if (VT.getSizeInBits() == 128)
- return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
- ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0);
- break;
- }
-
- return std::vector<unsigned>();
-}
-
-/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
-/// vector. If it is invalid, don't add anything to Ops.
-void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
- char Constraint,
- bool hasMemory,
- std::vector<SDValue>&Ops,
- SelectionDAG &DAG) const {
- SDValue Result(0, 0);
-
- switch (Constraint) {
- default: break;
- case 'I': case 'J': case 'K': case 'L':
- case 'M': case 'N': case 'O':
- ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
- if (!C)
- return;
-
- int64_t CVal64 = C->getSExtValue();
- int CVal = (int) CVal64;
- // None of these constraints allow values larger than 32 bits. Check
- // that the value fits in an int.
- if (CVal != CVal64)
- return;
-
- switch (Constraint) {
- case 'I':
- if (Subtarget->isThumb1Only()) {
- // This must be a constant between 0 and 255, for ADD
- // immediates.
- if (CVal >= 0 && CVal <= 255)
- break;
- } else if (Subtarget->isThumb2()) {
- // A constant that can be used as an immediate value in a
- // data-processing instruction.
- if (ARM_AM::getT2SOImmVal(CVal) != -1)
- break;
- } else {
- // A constant that can be used as an immediate value in a
- // data-processing instruction.
- if (ARM_AM::getSOImmVal(CVal) != -1)
- break;
- }
- return;
-
- case 'J':
- if (Subtarget->isThumb()) { // FIXME thumb2
- // This must be a constant between -255 and -1, for negated ADD
- // immediates. This can be used in GCC with an "n" modifier that
- // prints the negated value, for use with SUB instructions. It is
- // not useful otherwise but is implemented for compatibility.
- if (CVal >= -255 && CVal <= -1)
- break;
- } else {
- // This must be a constant between -4095 and 4095. It is not clear
- // what this constraint is intended for. Implemented for
- // compatibility with GCC.
- if (CVal >= -4095 && CVal <= 4095)
- break;
- }
- return;
-
- case 'K':
- if (Subtarget->isThumb1Only()) {
- // A 32-bit value where only one byte has a nonzero value. Exclude
- // zero to match GCC. This constraint is used by GCC internally for
- // constants that can be loaded with a move/shift combination.
- // It is not useful otherwise but is implemented for compatibility.
- if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
- break;
- } else if (Subtarget->isThumb2()) {
- // A constant whose bitwise inverse can be used as an immediate
- // value in a data-processing instruction. This can be used in GCC
- // with a "B" modifier that prints the inverted value, for use with
- // BIC and MVN instructions. It is not useful otherwise but is
- // implemented for compatibility.
- if (ARM_AM::getT2SOImmVal(~CVal) != -1)
- break;
- } else {
- // A constant whose bitwise inverse can be used as an immediate
- // value in a data-processing instruction. This can be used in GCC
- // with a "B" modifier that prints the inverted value, for use with
- // BIC and MVN instructions. It is not useful otherwise but is
- // implemented for compatibility.
- if (ARM_AM::getSOImmVal(~CVal) != -1)
- break;
- }
- return;
-
- case 'L':
- if (Subtarget->isThumb1Only()) {
- // This must be a constant between -7 and 7,
- // for 3-operand ADD/SUB immediate instructions.
- if (CVal >= -7 && CVal < 7)
- break;
- } else if (Subtarget->isThumb2()) {
- // A constant whose negation can be used as an immediate value in a
- // data-processing instruction. This can be used in GCC with an "n"
- // modifier that prints the negated value, for use with SUB
- // instructions. It is not useful otherwise but is implemented for
- // compatibility.
- if (ARM_AM::getT2SOImmVal(-CVal) != -1)
- break;
- } else {
- // A constant whose negation can be used as an immediate value in a
- // data-processing instruction. This can be used in GCC with an "n"
- // modifier that prints the negated value, for use with SUB
- // instructions. It is not useful otherwise but is implemented for
- // compatibility.
- if (ARM_AM::getSOImmVal(-CVal) != -1)
- break;
- }
- return;
-
- case 'M':
- if (Subtarget->isThumb()) { // FIXME thumb2
- // This must be a multiple of 4 between 0 and 1020, for
- // ADD sp + immediate.
- if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
- break;
- } else {
- // A power of two or a constant between 0 and 32. This is used in
- // GCC for the shift amount on shifted register operands, but it is
- // useful in general for any shift amounts.
- if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
- break;
- }
- return;
-
- case 'N':
- if (Subtarget->isThumb()) { // FIXME thumb2
- // This must be a constant between 0 and 31, for shift amounts.
- if (CVal >= 0 && CVal <= 31)
- break;
- }
- return;
-
- case 'O':
- if (Subtarget->isThumb()) { // FIXME thumb2
- // This must be a multiple of 4 between -508 and 508, for
- // ADD/SUB sp = sp + immediate.
- if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
- break;
- }
- return;
- }
- Result = DAG.getTargetConstant(CVal, Op.getValueType());
- break;
- }
-
- if (Result.getNode()) {
- Ops.push_back(Result);
- return;
- }
- return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
- Ops, DAG);
-}
-
-bool
-ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
- // The ARM target isn't yet aware of offsets.
- return false;
-}
-
-int ARM::getVFPf32Imm(const APFloat &FPImm) {
- APInt Imm = FPImm.bitcastToAPInt();
- uint32_t Sign = Imm.lshr(31).getZExtValue() & 1;
- int32_t Exp = (Imm.lshr(23).getSExtValue() & 0xff) - 127; // -126 to 127
- int64_t Mantissa = Imm.getZExtValue() & 0x7fffff; // 23 bits
-
- // We can handle 4 bits of mantissa.
- // mantissa = (16+UInt(e:f:g:h))/16.
- if (Mantissa & 0x7ffff)
- return -1;
- Mantissa >>= 19;
- if ((Mantissa & 0xf) != Mantissa)
- return -1;
-
- // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
- if (Exp < -3 || Exp > 4)
- return -1;
- Exp = ((Exp+3) & 0x7) ^ 4;
-
- return ((int)Sign << 7) | (Exp << 4) | Mantissa;
-}
-
-int ARM::getVFPf64Imm(const APFloat &FPImm) {
- APInt Imm = FPImm.bitcastToAPInt();
- uint64_t Sign = Imm.lshr(63).getZExtValue() & 1;
- int64_t Exp = (Imm.lshr(52).getSExtValue() & 0x7ff) - 1023; // -1022 to 1023
- uint64_t Mantissa = Imm.getZExtValue() & 0xfffffffffffffLL;
-
- // We can handle 4 bits of mantissa.
- // mantissa = (16+UInt(e:f:g:h))/16.
- if (Mantissa & 0xffffffffffffLL)
- return -1;
- Mantissa >>= 48;
- if ((Mantissa & 0xf) != Mantissa)
- return -1;
-
- // We can handle 3 bits of exponent: exp == UInt(NOT(b):c:d)-3
- if (Exp < -3 || Exp > 4)
- return -1;
- Exp = ((Exp+3) & 0x7) ^ 4;
-
- return ((int)Sign << 7) | (Exp << 4) | Mantissa;
-}
-
-/// isFPImmLegal - Returns true if the target can instruction select the
-/// specified FP immediate natively. If false, the legalizer will
-/// materialize the FP immediate as a load from a constant pool.
-bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
- if (!Subtarget->hasVFP3())
- return false;
- if (VT == MVT::f32)
- return ARM::getVFPf32Imm(Imm) != -1;
- if (VT == MVT::f64)
- return ARM::getVFPf64Imm(Imm) != -1;
- return false;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMISelLowering.h b/libclamav/c++/llvm/lib/Target/ARM/ARMISelLowering.h
deleted file mode 100644
index f8f8adc..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMISelLowering.h
+++ /dev/null
@@ -1,353 +0,0 @@
-//===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interfaces that ARM uses to lower LLVM code into a
-// selection DAG.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMISELLOWERING_H
-#define ARMISELLOWERING_H
-
-#include "ARMSubtarget.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include <vector>
-
-namespace llvm {
- class ARMConstantPoolValue;
-
- namespace ARMISD {
- // ARM Specific DAG Nodes
- enum NodeType {
- // Start the numbering where the builtin ops and target ops leave off.
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
-
- Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
- // TargetExternalSymbol, and TargetGlobalAddress.
- WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
-
- CALL, // Function call.
- CALL_PRED, // Function call that's predicable.
- CALL_NOLINK, // Function call with branch not branch-and-link.
- tCALL, // Thumb function call.
- BRCOND, // Conditional branch.
- BR_JT, // Jumptable branch.
- BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
- RET_FLAG, // Return with a flag operand.
-
- PIC_ADD, // Add with a PC operand and a PIC label.
-
- CMP, // ARM compare instructions.
- CMPZ, // ARM compare that sets only Z flag.
- CMPFP, // ARM VFP compare instruction, sets FPSCR.
- CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
- FMSTAT, // ARM fmstat instruction.
- CMOV, // ARM conditional move instructions.
- CNEG, // ARM conditional negate instructions.
-
- RBIT, // ARM bitreverse instruction
-
- FTOSI, // FP to sint within a FP register.
- FTOUI, // FP to uint within a FP register.
- SITOF, // sint to FP within a FP register.
- UITOF, // uint to FP within a FP register.
-
- SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
- SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
- RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
-
- VMOVRRD, // double to two gprs.
- VMOVDRR, // Two gprs to double.
-
- EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
- EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
-
- THREAD_POINTER,
-
- DYN_ALLOC, // Dynamic allocation on the stack.
-
- MEMBARRIER, // Memory barrier
- SYNCBARRIER, // Memory sync barrier
-
- VCEQ, // Vector compare equal.
- VCGE, // Vector compare greater than or equal.
- VCGEU, // Vector compare unsigned greater than or equal.
- VCGT, // Vector compare greater than.
- VCGTU, // Vector compare unsigned greater than.
- VTST, // Vector test bits.
-
- // Vector shift by immediate:
- VSHL, // ...left
- VSHRs, // ...right (signed)
- VSHRu, // ...right (unsigned)
- VSHLLs, // ...left long (signed)
- VSHLLu, // ...left long (unsigned)
- VSHLLi, // ...left long (with maximum shift count)
- VSHRN, // ...right narrow
-
- // Vector rounding shift by immediate:
- VRSHRs, // ...right (signed)
- VRSHRu, // ...right (unsigned)
- VRSHRN, // ...right narrow
-
- // Vector saturating shift by immediate:
- VQSHLs, // ...left (signed)
- VQSHLu, // ...left (unsigned)
- VQSHLsu, // ...left (signed to unsigned)
- VQSHRNs, // ...right narrow (signed)
- VQSHRNu, // ...right narrow (unsigned)
- VQSHRNsu, // ...right narrow (signed to unsigned)
-
- // Vector saturating rounding shift by immediate:
- VQRSHRNs, // ...right narrow (signed)
- VQRSHRNu, // ...right narrow (unsigned)
- VQRSHRNsu, // ...right narrow (signed to unsigned)
-
- // Vector shift and insert:
- VSLI, // ...left
- VSRI, // ...right
-
- // Vector get lane (VMOV scalar to ARM core register)
- // (These are used for 8- and 16-bit element types only.)
- VGETLANEu, // zero-extend vector extract element
- VGETLANEs, // sign-extend vector extract element
-
- // Vector duplicate:
- VDUP,
- VDUPLANE,
-
- // Vector shuffles:
- VEXT, // extract
- VREV64, // reverse elements within 64-bit doublewords
- VREV32, // reverse elements within 32-bit words
- VREV16, // reverse elements within 16-bit halfwords
- VZIP, // zip (interleave)
- VUZP, // unzip (deinterleave)
- VTRN, // transpose
-
- // Floating-point max and min:
- FMAX,
- FMIN
- };
- }
-
- /// Define some predicates that are used for node matching.
- namespace ARM {
- /// getVMOVImm - If this is a build_vector of constants which can be
- /// formed by using a VMOV instruction of the specified element size,
- /// return the constant being splatted. The ByteSize field indicates the
- /// number of bytes of each element [1248].
- SDValue getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
-
- /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be
- /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd)
- /// instruction, returns its 8-bit integer representation. Otherwise,
- /// returns -1.
- int getVFPf32Imm(const APFloat &FPImm);
- int getVFPf64Imm(const APFloat &FPImm);
- }
-
- //===--------------------------------------------------------------------===//
- // ARMTargetLowering - ARM Implementation of the TargetLowering interface
-
- class ARMTargetLowering : public TargetLowering {
- int VarArgsFrameIndex; // FrameIndex for start of varargs area.
- public:
- explicit ARMTargetLowering(TargetMachine &TM);
-
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
-
- /// ReplaceNodeResults - Replace the results of node with an illegal result
- /// type with new values built out of custom code.
- ///
- virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG);
-
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-
- virtual const char *getTargetNodeName(unsigned Opcode) const;
-
- virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*>*) const;
-
- /// allowsUnalignedMemoryAccesses - Returns true if the target allows
- /// unaligned memory accesses. of the specified type.
- /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
- virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
-
- /// isLegalAddressingMode - Return true if the addressing mode represented
- /// by AM is legal for this target, for a load/store of the specified type.
- virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
- bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
-
- /// isLegalICmpImmediate - Return true if the specified immediate is legal
- /// icmp immediate, that is the target has icmp instructions which can compare
- /// a register against the immediate without having to materialize the
- /// immediate into a register.
- virtual bool isLegalICmpImmediate(int64_t Imm) const;
-
- /// getPreIndexedAddressParts - returns true by value, base pointer and
- /// offset pointer and addressing mode by reference if the node's address
- /// can be legally represented as pre-indexed load / store address.
- virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
- SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const;
-
- /// getPostIndexedAddressParts - returns true by value, base pointer and
- /// offset pointer and addressing mode by reference if this node can be
- /// combined with a load / store to form a post-indexed load / store.
- virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
- SDValue &Base, SDValue &Offset,
- ISD::MemIndexedMode &AM,
- SelectionDAG &DAG) const;
-
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- const APInt &Mask,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const;
-
-
- ConstraintType getConstraintType(const std::string &Constraint) const;
- std::pair<unsigned, const TargetRegisterClass*>
- getRegForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
- std::vector<unsigned>
- getRegClassForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const;
-
- /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
- /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
- /// true it means one of the asm constraint of the inline asm instruction
- /// being processed is 'm'.
- virtual void LowerAsmOperandForConstraint(SDValue Op,
- char ConstraintLetter,
- bool hasMemory,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const;
-
- virtual const ARMSubtarget* getSubtarget() {
- return Subtarget;
- }
-
- /// getFunctionAlignment - Return the Log2 alignment of this function.
- virtual unsigned getFunctionAlignment(const Function *F) const;
-
- bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
- bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
-
- /// isFPImmLegal - Returns true if the target can instruction select the
- /// specified FP immediate natively. If false, the legalizer will
- /// materialize the FP immediate as a load from a constant pool.
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
-
- private:
- /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
- /// make the right decision when generating code for different targets.
- const ARMSubtarget *Subtarget;
-
- /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
- ///
- unsigned ARMPCLabelIndex;
-
- void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
- void addDRTypeForNEON(EVT VT);
- void addQRTypeForNEON(EVT VT);
-
- typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
- void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
- SDValue Chain, SDValue &Arg,
- RegsToPassVector &RegsToPass,
- CCValAssign &VA, CCValAssign &NextVA,
- SDValue &StackPtr,
- SmallVector<SDValue, 8> &MemOpChains,
- ISD::ArgFlagsTy Flags);
- SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
- SDValue &Root, SelectionDAG &DAG, DebugLoc dl);
-
- CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, bool isVarArg) const;
- SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
- DebugLoc dl, SelectionDAG &DAG,
- const CCValAssign &VA,
- ISD::ArgFlagsTy Flags);
- SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG);
- SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget);
- SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG);
- SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG);
- SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG);
- SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
- SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
- SelectionDAG &DAG);
- SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
- SelectionDAG &DAG);
- SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG);
- SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG);
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG);
- SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
- SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG);
- SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG);
- SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG);
-
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff);
- SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
-
- virtual SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
-
- virtual SDValue
- LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg,
- bool &isTailCall,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- DebugLoc dl, SelectionDAG &DAG);
-
- SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &ARMCC, SelectionDAG &DAG, DebugLoc dl);
-
- MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size) const;
- MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
- MachineBasicBlock *BB,
- unsigned Size,
- unsigned BinOpcode) const;
-
- };
-}
-
-#endif // ARMISELLOWERING_H
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrFormats.td b/libclamav/c++/llvm/lib/Target/ARM/ARMInstrFormats.td
deleted file mode 100644
index 76595fa..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ /dev/null
@@ -1,1629 +0,0 @@
-//===- ARMInstrFormats.td - ARM Instruction Formats --*- tablegen -*---------=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-//
-// ARM Instruction Format Definitions.
-//
-
-// Format specifies the encoding used by the instruction. This is part of the
-// ad-hoc solution used to emit machine instruction encodings by our machine
-// code emitter.
-class Format<bits<5> val> {
- bits<5> Value = val;
-}
-
-def Pseudo : Format<0>;
-def MulFrm : Format<1>;
-def BrFrm : Format<2>;
-def BrMiscFrm : Format<3>;
-
-def DPFrm : Format<4>;
-def DPSoRegFrm : Format<5>;
-
-def LdFrm : Format<6>;
-def StFrm : Format<7>;
-def LdMiscFrm : Format<8>;
-def StMiscFrm : Format<9>;
-def LdStMulFrm : Format<10>;
-
-def LdStExFrm : Format<28>;
-
-def ArithMiscFrm : Format<11>;
-def ExtFrm : Format<12>;
-
-def VFPUnaryFrm : Format<13>;
-def VFPBinaryFrm : Format<14>;
-def VFPConv1Frm : Format<15>;
-def VFPConv2Frm : Format<16>;
-def VFPConv3Frm : Format<17>;
-def VFPConv4Frm : Format<18>;
-def VFPConv5Frm : Format<19>;
-def VFPLdStFrm : Format<20>;
-def VFPLdStMulFrm : Format<21>;
-def VFPMiscFrm : Format<22>;
-
-def ThumbFrm : Format<23>;
-
-def NEONFrm : Format<24>;
-def NEONGetLnFrm : Format<25>;
-def NEONSetLnFrm : Format<26>;
-def NEONDupFrm : Format<27>;
-
-def MiscFrm : Format<29>;
-def ThumbMiscFrm : Format<30>;
-
-// Misc flags.
-
-// the instruction has a Rn register operand.
-// UnaryDP - Indicates this is a unary data processing instruction, i.e.
-// it doesn't have a Rn operand.
-class UnaryDP { bit isUnaryDataProc = 1; }
-
-// Xform16Bit - Indicates this Thumb2 instruction may be transformed into
-// a 16-bit Thumb instruction if certain conditions are met.
-class Xform16Bit { bit canXformTo16Bit = 1; }
-
-//===----------------------------------------------------------------------===//
-// ARM Instruction flags. These need to match ARMInstrInfo.h.
-//
-
-// Addressing mode.
-class AddrMode<bits<4> val> {
- bits<4> Value = val;
-}
-def AddrModeNone : AddrMode<0>;
-def AddrMode1 : AddrMode<1>;
-def AddrMode2 : AddrMode<2>;
-def AddrMode3 : AddrMode<3>;
-def AddrMode4 : AddrMode<4>;
-def AddrMode5 : AddrMode<5>;
-def AddrMode6 : AddrMode<6>;
-def AddrModeT1_1 : AddrMode<7>;
-def AddrModeT1_2 : AddrMode<8>;
-def AddrModeT1_4 : AddrMode<9>;
-def AddrModeT1_s : AddrMode<10>;
-def AddrModeT2_i12: AddrMode<11>;
-def AddrModeT2_i8 : AddrMode<12>;
-def AddrModeT2_so : AddrMode<13>;
-def AddrModeT2_pc : AddrMode<14>;
-def AddrModeT2_i8s4 : AddrMode<15>;
-
-// Instruction size.
-class SizeFlagVal<bits<3> val> {
- bits<3> Value = val;
-}
-def SizeInvalid : SizeFlagVal<0>; // Unset.
-def SizeSpecial : SizeFlagVal<1>; // Pseudo or special.
-def Size8Bytes : SizeFlagVal<2>;
-def Size4Bytes : SizeFlagVal<3>;
-def Size2Bytes : SizeFlagVal<4>;
-
-// Load / store index mode.
-class IndexMode<bits<2> val> {
- bits<2> Value = val;
-}
-def IndexModeNone : IndexMode<0>;
-def IndexModePre : IndexMode<1>;
-def IndexModePost : IndexMode<2>;
-
-// Instruction execution domain.
-class Domain<bits<2> val> {
- bits<2> Value = val;
-}
-def GenericDomain : Domain<0>;
-def VFPDomain : Domain<1>; // Instructions in VFP domain only
-def NeonDomain : Domain<2>; // Instructions in Neon domain only
-def VFPNeonDomain : Domain<3>; // Instructions in both VFP & Neon domains
-
-//===----------------------------------------------------------------------===//
-
-// ARM special operands.
-//
-
-// ARM Predicate operand. Default to 14 = always (AL). Second part is CC
-// register whose default is 0 (no register).
-def pred : PredicateOperand<OtherVT, (ops i32imm, CCR),
- (ops (i32 14), (i32 zero_reg))> {
- let PrintMethod = "printPredicateOperand";
-}
-
-// Conditional code result for instructions whose 's' bit is set, e.g. subs.
-def cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 zero_reg))> {
- let PrintMethod = "printSBitModifierOperand";
-}
-
-// Same as cc_out except it defaults to setting CPSR.
-def s_cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 CPSR))> {
- let PrintMethod = "printSBitModifierOperand";
-}
-
-//===----------------------------------------------------------------------===//
-
-// ARM Instruction templates.
-//
-
-class InstTemplate<AddrMode am, SizeFlagVal sz, IndexMode im,
- Format f, Domain d, string cstr, InstrItinClass itin>
- : Instruction {
- let Namespace = "ARM";
-
- // TSFlagsFields
- AddrMode AM = am;
- bits<4> AddrModeBits = AM.Value;
-
- SizeFlagVal SZ = sz;
- bits<3> SizeFlag = SZ.Value;
-
- IndexMode IM = im;
- bits<2> IndexModeBits = IM.Value;
-
- Format F = f;
- bits<5> Form = F.Value;
-
- Domain D = d;
- bits<2> Dom = D.Value;
-
- //
- // Attributes specific to ARM instructions...
- //
- bit isUnaryDataProc = 0;
- bit canXformTo16Bit = 0;
-
- let Constraints = cstr;
- let Itinerary = itin;
-}
-
-class Encoding {
- field bits<32> Inst;
-}
-
-class InstARM<AddrMode am, SizeFlagVal sz, IndexMode im,
- Format f, Domain d, string cstr, InstrItinClass itin>
- : InstTemplate<am, sz, im, f, d, cstr, itin>, Encoding;
-
-// This Encoding-less class is used by Thumb1 to specify the encoding bits later
-// on by adding flavors to specific instructions.
-class InstThumb<AddrMode am, SizeFlagVal sz, IndexMode im,
- Format f, Domain d, string cstr, InstrItinClass itin>
- : InstTemplate<am, sz, im, f, d, cstr, itin>;
-
-class PseudoInst<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : InstARM<AddrModeNone, SizeSpecial, IndexModeNone, Pseudo, GenericDomain,
- "", itin> {
- let OutOperandList = oops;
- let InOperandList = iops;
- let AsmString = asm;
- let Pattern = pattern;
-}
-
-// Almost all ARM instructions are predicable.
-class I<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- IndexMode im, Format f, InstrItinClass itin,
- string opc, string asm, string cstr,
- list<dag> pattern>
- : InstARM<am, sz, im, f, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [IsARM];
-}
-// A few are not predicable
-class InoP<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- IndexMode im, Format f, InstrItinClass itin,
- string opc, string asm, string cstr,
- list<dag> pattern>
- : InstARM<am, sz, im, f, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = iops;
- let AsmString = !strconcat(opc, asm);
- let Pattern = pattern;
- let isPredicable = 0;
- list<Predicate> Predicates = [IsARM];
-}
-
-// Same as I except it can optionally modify CPSR. Note it's modeled as
-// an input operand since by default it's a zero register. It will
-// become an implicit def once it's "flipped".
-class sI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- IndexMode im, Format f, InstrItinClass itin,
- string opc, string asm, string cstr,
- list<dag> pattern>
- : InstARM<am, sz, im, f, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p, cc_out:$s));
- let AsmString = !strconcat(opc, !strconcat("${p}${s}", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [IsARM];
-}
-
-// Special cases
-class XI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- IndexMode im, Format f, InstrItinClass itin,
- string asm, string cstr, list<dag> pattern>
- : InstARM<am, sz, im, f, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = iops;
- let AsmString = asm;
- let Pattern = pattern;
- list<Predicate> Predicates = [IsARM];
-}
-
-class AI<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern>;
-class AsI<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : sI<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern>;
-class AXI<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern>;
-class AInoP<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : InoP<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern>;
-
-// Ctrl flow instructions
-class ABI<bits<4> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, BrFrm, itin,
- opc, asm, "", pattern> {
- let Inst{27-24} = opcod;
-}
-class ABXI<bits<4> opcod, dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, BrFrm, itin,
- asm, "", pattern> {
- let Inst{27-24} = opcod;
-}
-class ABXIx2<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrModeNone, Size8Bytes, IndexModeNone, BrMiscFrm, itin,
- asm, "", pattern>;
-
-// BR_JT instructions
-class JTI<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrModeNone, SizeSpecial, IndexModeNone, BrMiscFrm, itin,
- asm, "", pattern>;
-
-
-// Atomic load/store instructions
-
-class AIldrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, LdStExFrm, itin,
- opc, asm, "", pattern> {
- let Inst{27-23} = 0b00011;
- let Inst{22-21} = opcod;
- let Inst{20} = 1;
- let Inst{11-0} = 0b111110011111;
-}
-class AIstrex<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, LdStExFrm, itin,
- opc, asm, "", pattern> {
- let Inst{27-23} = 0b00011;
- let Inst{22-21} = opcod;
- let Inst{20} = 0;
- let Inst{11-4} = 0b11111001;
-}
-
-// addrmode1 instructions
-class AI1<bits<4> opcod, dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode1, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{24-21} = opcod;
- let Inst{27-26} = {0,0};
-}
-class AsI1<bits<4> opcod, dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : sI<oops, iops, AddrMode1, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{24-21} = opcod;
- let Inst{27-26} = {0,0};
-}
-class AXI1<bits<4> opcod, dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode1, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{24-21} = opcod;
- let Inst{27-26} = {0,0};
-}
-class AI1x2<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode1, Size8Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern>;
-
-
-// addrmode2 loads and stores
-class AI2<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{27-26} = {0,1};
-}
-
-// loads
-class AI2ldw<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-class AXI2ldw<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-class AI2ldb<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-class AXI2ldb<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-
-// stores
-class AI2stw<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-class AXI2stw<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-class AI2stb<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-class AXI2stb<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode2, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-
-// Pre-indexed loads
-class AI2ldwpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-class AI2ldbpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-
-// Pre-indexed stores
-class AI2stwpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 1; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-class AI2stbpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 1; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-26} = {0,1};
-}
-
-// Post-indexed loads
-class AI2ldwpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 0; // P bit
- let Inst{27-26} = {0,1};
-}
-class AI2ldbpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 0; // P bit
- let Inst{27-26} = {0,1};
-}
-
-// Post-indexed stores
-class AI2stwpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 0; // P bit
- let Inst{27-26} = {0,1};
-}
-class AI2stbpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode2, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 1; // B bit
- let Inst{24} = 0; // P bit
- let Inst{27-26} = {0,1};
-}
-
-// addrmode3 instructions
-class AI3<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern>;
-class AXI3<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern>;
-
-// loads
-class AI3ldh<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AXI3ldh<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
-}
-class AI3ldsh<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AXI3ldsh<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
-}
-class AI3ldsb<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AXI3ldsb<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
-}
-class AI3ldd<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-
-// stores
-class AI3sth<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AXI3sth<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
-}
-class AI3std<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-
-// Pre-indexed loads
-class AI3ldhpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3ldshpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3ldsbpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3lddpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-
-
-// Pre-indexed stores
-class AI3sthpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3stdpr<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePre, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 1; // W bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b000;
-}
-
-// Post-indexed loads
-class AI3ldhpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3ldshpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3ldsbpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3lddpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 0; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-
-// Post-indexed stores
-class AI3sthpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr,pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 0; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-class AI3stdpo<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : I<oops, iops, AddrMode3, Size4Bytes, IndexModePost, f, itin,
- opc, asm, cstr, pattern> {
- let Inst{4} = 1;
- let Inst{5} = 1; // H bit
- let Inst{6} = 1; // S bit
- let Inst{7} = 1;
- let Inst{20} = 0; // L bit
- let Inst{21} = 0; // W bit
- let Inst{24} = 0; // P bit
- let Inst{27-25} = 0b000;
-}
-
-
-// addrmode4 instructions
-class AXI4ld<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode4, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 1; // L bit
- let Inst{22} = 0; // S bit
- let Inst{27-25} = 0b100;
-}
-class AXI4st<dag oops, dag iops, Format f, InstrItinClass itin,
- string asm, list<dag> pattern>
- : XI<oops, iops, AddrMode4, Size4Bytes, IndexModeNone, f, itin,
- asm, "", pattern> {
- let Inst{20} = 0; // L bit
- let Inst{22} = 0; // S bit
- let Inst{27-25} = 0b100;
-}
-
-// Unsigned multiply, multiply-accumulate instructions.
-class AMul1I<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, MulFrm, itin,
- opc, asm, "", pattern> {
- let Inst{7-4} = 0b1001;
- let Inst{20} = 0; // S bit
- let Inst{27-21} = opcod;
-}
-class AsMul1I<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : sI<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, MulFrm, itin,
- opc, asm, "", pattern> {
- let Inst{7-4} = 0b1001;
- let Inst{27-21} = opcod;
-}
-
-// Most significant word multiply
-class AMul2I<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, MulFrm, itin,
- opc, asm, "", pattern> {
- let Inst{7-4} = 0b1001;
- let Inst{20} = 1;
- let Inst{27-21} = opcod;
-}
-
-// SMUL<x><y> / SMULW<y> / SMLA<x><y> / SMLAW<x><y>
-class AMulxyI<bits<7> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, MulFrm, itin,
- opc, asm, "", pattern> {
- let Inst{4} = 0;
- let Inst{7} = 1;
- let Inst{20} = 0;
- let Inst{27-21} = opcod;
-}
-
-// Extend instructions.
-class AExtI<bits<8> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, ExtFrm, itin,
- opc, asm, "", pattern> {
- let Inst{7-4} = 0b0111;
- let Inst{27-20} = opcod;
-}
-
-// Misc Arithmetic instructions.
-class AMiscA1I<bits<8> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, ArithMiscFrm, itin,
- opc, asm, "", pattern> {
- let Inst{27-20} = opcod;
-}
-
-//===----------------------------------------------------------------------===//
-
-// ARMPat - Same as Pat<>, but requires that the compiler be in ARM mode.
-class ARMPat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [IsARM];
-}
-class ARMV5TEPat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [IsARM, HasV5TE];
-}
-class ARMV6Pat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [IsARM, HasV6];
-}
-
-//===----------------------------------------------------------------------===//
-//
-// Thumb Instruction Format Definitions.
-//
-
-// TI - Thumb instruction.
-
-class ThumbI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin, string asm, string cstr, list<dag> pattern>
- : InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = iops;
- let AsmString = asm;
- let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb];
-}
-
-class TI<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern>
- : ThumbI<oops, iops, AddrModeNone, Size2Bytes, itin, asm, "", pattern>;
-
-// Two-address instructions
-class TIt<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern>
- : ThumbI<oops, iops, AddrModeNone, Size2Bytes, itin, asm, "$lhs = $dst", pattern>;
-
-// tBL, tBX 32-bit instructions
-class TIx2<bits<5> opcod1, bits<2> opcod2, bit opcod3,
- dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern>
- : ThumbI<oops, iops, AddrModeNone, Size4Bytes, itin, asm, "", pattern>, Encoding {
- let Inst{31-27} = opcod1;
- let Inst{15-14} = opcod2;
- let Inst{12} = opcod3;
-}
-
-// BR_JT instructions
-class TJTI<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern>
- : ThumbI<oops, iops, AddrModeNone, SizeSpecial, itin, asm, "", pattern>;
-
-// Thumb1 only
-class Thumb1I<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin, string asm, string cstr, list<dag> pattern>
- : InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = iops;
- let AsmString = asm;
- let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb1Only];
-}
-
-class T1I<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : Thumb1I<oops, iops, AddrModeNone, Size2Bytes, itin, asm, "", pattern>;
-class T1Ix2<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : Thumb1I<oops, iops, AddrModeNone, Size4Bytes, itin, asm, "", pattern>;
-class T1JTI<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : Thumb1I<oops, iops, AddrModeNone, SizeSpecial, itin, asm, "", pattern>;
-
-// Two-address instructions
-class T1It<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : Thumb1I<oops, iops, AddrModeNone, Size2Bytes, itin,
- asm, "$lhs = $dst", pattern>;
-
-// Thumb1 instruction that can either be predicated or set CPSR.
-class Thumb1sI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
- let OutOperandList = !con(oops, (ops s_cc_out:$s));
- let InOperandList = !con(iops, (ops pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${s}${p}", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb1Only];
-}
-
-class T1sI<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1sI<oops, iops, AddrModeNone, Size2Bytes, itin, opc, asm, "", pattern>;
-
-// Two-address instructions
-class T1sIt<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1sI<oops, iops, AddrModeNone, Size2Bytes, itin, opc, asm,
- "$lhs = $dst", pattern>;
-
-// Thumb1 instruction that can be predicated.
-class Thumb1pI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb1Only];
-}
-
-class T1pI<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1pI<oops, iops, AddrModeNone, Size2Bytes, itin, opc, asm, "", pattern>;
-
-// Two-address instructions
-class T1pIt<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1pI<oops, iops, AddrModeNone, Size2Bytes, itin, opc, asm,
- "$lhs = $dst", pattern>;
-
-class T1pI1<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1pI<oops, iops, AddrModeT1_1, Size2Bytes, itin, opc, asm, "", pattern>;
-class T1pI2<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1pI<oops, iops, AddrModeT1_2, Size2Bytes, itin, opc, asm, "", pattern>;
-class T1pI4<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb1pI<oops, iops, AddrModeT1_4, Size2Bytes, itin, opc, asm, "", pattern>;
-class T1pIs<dag oops, dag iops,
- InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : Thumb1pI<oops, iops, AddrModeT1_s, Size2Bytes, itin, opc, asm, "", pattern>;
-
-class Encoding16 : Encoding {
- let Inst{31-16} = 0x0000;
-}
-
-// A6.2 16-bit Thumb instruction encoding
-class T1Encoding<bits<6> opcode> : Encoding16 {
- let Inst{15-10} = opcode;
-}
-
-// A6.2.1 Shift (immediate), add, subtract, move, and compare encoding.
-class T1General<bits<5> opcode> : Encoding16 {
- let Inst{15-14} = 0b00;
- let Inst{13-9} = opcode;
-}
-
-// A6.2.2 Data-processing encoding.
-class T1DataProcessing<bits<4> opcode> : Encoding16 {
- let Inst{15-10} = 0b010000;
- let Inst{9-6} = opcode;
-}
-
-// A6.2.3 Special data instructions and branch and exchange encoding.
-class T1Special<bits<4> opcode> : Encoding16 {
- let Inst{15-10} = 0b010001;
- let Inst{9-6} = opcode;
-}
-
-// A6.2.4 Load/store single data item encoding.
-class T1LoadStore<bits<4> opA, bits<3> opB> : Encoding16 {
- let Inst{15-12} = opA;
- let Inst{11-9} = opB;
-}
-class T1LdSt<bits<3> opB> : T1LoadStore<0b0101, opB>;
-class T1LdSt4Imm<bits<3> opB> : T1LoadStore<0b0110, opB>; // Immediate, 4 bytes
-class T1LdSt1Imm<bits<3> opB> : T1LoadStore<0b0111, opB>; // Immediate, 1 byte
-class T1LdSt2Imm<bits<3> opB> : T1LoadStore<0b1000, opB>; // Immediate, 2 bytes
-class T1LdStSP<bits<3> opB> : T1LoadStore<0b1001, opB>; // SP relative
-
-// A6.2.5 Miscellaneous 16-bit instructions encoding.
-class T1Misc<bits<7> opcode> : Encoding16 {
- let Inst{15-12} = 0b1011;
- let Inst{11-5} = opcode;
-}
-
-// Thumb2I - Thumb2 instruction. Almost all Thumb2 instructions are predicable.
-class Thumb2I<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb2];
-}
-
-// Same as Thumb2I except it can optionally modify CPSR. Note it's modeled as
-// an input operand since by default it's a zero register. It will
-// become an implicit def once it's "flipped".
-// FIXME: This uses unified syntax so {s} comes before {p}. We should make it
-// more consistent.
-class Thumb2sI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p, cc_out:$s));
- let AsmString = !strconcat(opc, !strconcat("${s}${p}", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb2];
-}
-
-// Special cases
-class Thumb2XI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin,
- string asm, string cstr, list<dag> pattern>
- : InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = iops;
- let AsmString = asm;
- let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb2];
-}
-
-class ThumbXI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin,
- string asm, string cstr, list<dag> pattern>
- : InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = iops;
- let AsmString = asm;
- let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb1Only];
-}
-
-class T2I<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb2I<oops, iops, AddrModeNone, Size4Bytes, itin, opc, asm, "", pattern>;
-class T2Ii12<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb2I<oops, iops, AddrModeT2_i12, Size4Bytes, itin, opc, asm, "", pattern>;
-class T2Ii8<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb2I<oops, iops, AddrModeT2_i8, Size4Bytes, itin, opc, asm, "", pattern>;
-class T2Iso<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb2I<oops, iops, AddrModeT2_so, Size4Bytes, itin, opc, asm, "", pattern>;
-class T2Ipc<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb2I<oops, iops, AddrModeT2_pc, Size4Bytes, itin, opc, asm, "", pattern>;
-class T2Ii8s4<bit P, bit W, bit load, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb2I<oops, iops, AddrModeT2_i8s4, Size4Bytes, itin, opc, asm, "",
- pattern> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b00;
- let Inst{24} = P;
- let Inst{23} = ?; // The U bit.
- let Inst{22} = 1;
- let Inst{21} = W;
- let Inst{20} = load;
-}
-
-class T2sI<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb2sI<oops, iops, AddrModeNone, Size4Bytes, itin, opc, asm, "", pattern>;
-
-class T2XI<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : Thumb2XI<oops, iops, AddrModeNone, Size4Bytes, itin, asm, "", pattern>;
-class T2JTI<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : Thumb2XI<oops, iops, AddrModeNone, SizeSpecial, itin, asm, "", pattern>;
-
-class T2Ix2<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : Thumb2I<oops, iops, AddrModeNone, Size8Bytes, itin, opc, asm, "", pattern>;
-
-
-// T2Iidxldst - Thumb2 indexed load / store instructions.
-class T2Iidxldst<bit signed, bits<2> opcod, bit load, bit pre,
- dag oops, dag iops,
- AddrMode am, IndexMode im, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : InstARM<am, Size4Bytes, im, ThumbFrm, GenericDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [IsThumb2];
- let Inst{31-27} = 0b11111;
- let Inst{26-25} = 0b00;
- let Inst{24} = signed;
- let Inst{23} = 0;
- let Inst{22-21} = opcod;
- let Inst{20} = load;
- let Inst{11} = 1;
- // (P, W) = (1, 1) Pre-indexed or (0, 1) Post-indexed
- let Inst{10} = pre; // The P bit.
- let Inst{8} = 1; // The W bit.
-}
-
-// Helper class for disassembly only
-// A6.3.16 & A6.3.17
-// T2Imac - Thumb2 multiply [accumulate, and absolute difference] instructions.
-class T2I_mac<bit long, bits<3> op22_20, bits<4> op7_4, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : T2I<oops, iops, itin, opc, asm, pattern> {
- let Inst{31-27} = 0b11111;
- let Inst{26-24} = 0b011;
- let Inst{23} = long;
- let Inst{22-20} = op22_20;
- let Inst{7-4} = op7_4;
-}
-
-// Tv5Pat - Same as Pat<>, but requires V5T Thumb mode.
-class Tv5Pat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [IsThumb1Only, HasV5T];
-}
-
-// T1Pat - Same as Pat<>, but requires that the compiler be in Thumb1 mode.
-class T1Pat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [IsThumb1Only];
-}
-
-// T2Pat - Same as Pat<>, but requires that the compiler be in Thumb2 mode.
-class T2Pat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [IsThumb2];
-}
-
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// ARM VFP Instruction templates.
-//
-
-// Almost all VFP instructions are predicable.
-class VFPI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- IndexMode im, Format f, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : InstARM<am, sz, im, f, VFPDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p));
- let AsmString = !strconcat(opc, !strconcat("${p}", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [HasVFP2];
-}
-
-// Special cases
-class VFPXI<dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- IndexMode im, Format f, InstrItinClass itin,
- string asm, string cstr, list<dag> pattern>
- : InstARM<am, sz, im, f, VFPDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = iops;
- let AsmString = asm;
- let Pattern = pattern;
- list<Predicate> Predicates = [HasVFP2];
-}
-
-class VFPAI<dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : VFPI<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, f, itin,
- opc, asm, "", pattern>;
-
-// ARM VFP addrmode5 loads and stores
-class ADI5<bits<4> opcod1, bits<2> opcod2, dag oops, dag iops,
- InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : VFPI<oops, iops, AddrMode5, Size4Bytes, IndexModeNone,
- VFPLdStFrm, itin, opc, asm, "", pattern> {
- // TODO: Mark the instructions with the appropriate subtarget info.
- let Inst{27-24} = opcod1;
- let Inst{21-20} = opcod2;
- let Inst{11-8} = 0b1011;
-
- // 64-bit loads & stores operate on both NEON and VFP pipelines.
- let Dom = VFPNeonDomain.Value;
-}
-
-class ASI5<bits<4> opcod1, bits<2> opcod2, dag oops, dag iops,
- InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : VFPI<oops, iops, AddrMode5, Size4Bytes, IndexModeNone,
- VFPLdStFrm, itin, opc, asm, "", pattern> {
- // TODO: Mark the instructions with the appropriate subtarget info.
- let Inst{27-24} = opcod1;
- let Inst{21-20} = opcod2;
- let Inst{11-8} = 0b1010;
-}
-
-// Load / store multiple
-class AXDI5<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : VFPXI<oops, iops, AddrMode5, Size4Bytes, IndexModeNone,
- VFPLdStMulFrm, itin, asm, "", pattern> {
- // TODO: Mark the instructions with the appropriate subtarget info.
- let Inst{27-25} = 0b110;
- let Inst{11-8} = 0b1011;
-
- // 64-bit loads & stores operate on both NEON and VFP pipelines.
- let Dom = VFPNeonDomain.Value;
-}
-
-class AXSI5<dag oops, dag iops, InstrItinClass itin,
- string asm, list<dag> pattern>
- : VFPXI<oops, iops, AddrMode5, Size4Bytes, IndexModeNone,
- VFPLdStMulFrm, itin, asm, "", pattern> {
- // TODO: Mark the instructions with the appropriate subtarget info.
- let Inst{27-25} = 0b110;
- let Inst{11-8} = 0b1010;
-}
-
-// Double precision, unary
-class ADuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
- bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc,
- string asm, list<dag> pattern>
- : VFPAI<oops, iops, VFPUnaryFrm, itin, opc, asm, pattern> {
- let Inst{27-23} = opcod1;
- let Inst{21-20} = opcod2;
- let Inst{19-16} = opcod3;
- let Inst{11-8} = 0b1011;
- let Inst{7-6} = opcod4;
- let Inst{4} = opcod5;
-}
-
-// Double precision, binary
-class ADbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
- dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : VFPAI<oops, iops, VFPBinaryFrm, itin, opc, asm, pattern> {
- let Inst{27-23} = opcod1;
- let Inst{21-20} = opcod2;
- let Inst{11-8} = 0b1011;
- let Inst{6} = op6;
- let Inst{4} = op4;
-}
-
-// Single precision, unary
-class ASuI<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
- bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc,
- string asm, list<dag> pattern>
- : VFPAI<oops, iops, VFPUnaryFrm, itin, opc, asm, pattern> {
- let Inst{27-23} = opcod1;
- let Inst{21-20} = opcod2;
- let Inst{19-16} = opcod3;
- let Inst{11-8} = 0b1010;
- let Inst{7-6} = opcod4;
- let Inst{4} = opcod5;
-}
-
-// Single precision unary, if no NEON
-// Same as ASuI except not available if NEON is enabled
-class ASuIn<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<2> opcod4,
- bit opcod5, dag oops, dag iops, InstrItinClass itin, string opc,
- string asm, list<dag> pattern>
- : ASuI<opcod1, opcod2, opcod3, opcod4, opcod5, oops, iops, itin, opc, asm,
- pattern> {
- list<Predicate> Predicates = [HasVFP2,DontUseNEONForFP];
-}
-
-// Single precision, binary
-class ASbI<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : VFPAI<oops, iops, VFPBinaryFrm, itin, opc, asm, pattern> {
- let Inst{27-23} = opcod1;
- let Inst{21-20} = opcod2;
- let Inst{11-8} = 0b1010;
- let Inst{6} = op6;
- let Inst{4} = op4;
-}
-
-// Single precision binary, if no NEON
-// Same as ASbI except not available if NEON is enabled
-class ASbIn<bits<5> opcod1, bits<2> opcod2, bit op6, bit op4, dag oops,
- dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : ASbI<opcod1, opcod2, op6, op4, oops, iops, itin, opc, asm, pattern> {
- list<Predicate> Predicates = [HasVFP2,DontUseNEONForFP];
-}
-
-// VFP conversion instructions
-class AVConv1I<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<4> opcod4,
- dag oops, dag iops, InstrItinClass itin, string opc, string asm,
- list<dag> pattern>
- : VFPAI<oops, iops, VFPConv1Frm, itin, opc, asm, pattern> {
- let Inst{27-23} = opcod1;
- let Inst{21-20} = opcod2;
- let Inst{19-16} = opcod3;
- let Inst{11-8} = opcod4;
- let Inst{6} = 1;
- let Inst{4} = 0;
-}
-
-// VFP conversion between floating-point and fixed-point
-class AVConv1XI<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4, bit op5,
- dag oops, dag iops, InstrItinClass itin, string opc, string asm,
- list<dag> pattern>
- : AVConv1I<op1, op2, op3, op4, oops, iops, itin, opc, asm, pattern> {
- // size (fixed-point number): sx == 0 ? 16 : 32
- let Inst{7} = op5; // sx
-}
-
-// VFP conversion instructions, if no NEON
-class AVConv1In<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3, bits<4> opcod4,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
- pattern> {
- list<Predicate> Predicates = [HasVFP2,DontUseNEONForFP];
-}
-
-class AVConvXI<bits<8> opcod1, bits<4> opcod2, dag oops, dag iops, Format f,
- InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : VFPAI<oops, iops, f, itin, opc, asm, pattern> {
- let Inst{27-20} = opcod1;
- let Inst{11-8} = opcod2;
- let Inst{4} = 1;
-}
-
-class AVConv2I<bits<8> opcod1, bits<4> opcod2, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : AVConvXI<opcod1, opcod2, oops, iops, VFPConv2Frm, itin, opc, asm, pattern>;
-
-class AVConv3I<bits<8> opcod1, bits<4> opcod2, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : AVConvXI<opcod1, opcod2, oops, iops, VFPConv3Frm, itin, opc, asm, pattern>;
-
-class AVConv4I<bits<8> opcod1, bits<4> opcod2, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : AVConvXI<opcod1, opcod2, oops, iops, VFPConv4Frm, itin, opc, asm, pattern>;
-
-class AVConv5I<bits<8> opcod1, bits<4> opcod2, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : AVConvXI<opcod1, opcod2, oops, iops, VFPConv5Frm, itin, opc, asm, pattern>;
-
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// ARM NEON Instruction templates.
-//
-
-class NeonI<dag oops, dag iops, AddrMode am, IndexMode im, InstrItinClass itin,
- string opc, string dt, string asm, string cstr, list<dag> pattern>
- : InstARM<am, Size4Bytes, im, NEONFrm, NeonDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p));
- let AsmString = !strconcat(
- !strconcat(!strconcat(opc, "${p}"), !strconcat(".", dt)),
- !strconcat("\t", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [HasNEON];
-}
-
-// Same as NeonI except it does not have a "data type" specifier.
-class NeonXI<dag oops, dag iops, AddrMode am, IndexMode im, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : InstARM<am, Size4Bytes, im, NEONFrm, NeonDomain, cstr, itin> {
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p));
- let AsmString = !strconcat(!strconcat(opc, "${p}"), !strconcat("\t", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [HasNEON];
-}
-
-class NI<dag oops, dag iops, InstrItinClass itin, string opc, string asm,
- list<dag> pattern>
- : NeonXI<oops, iops, AddrModeNone, IndexModeNone, itin, opc, asm, "",
- pattern> {
-}
-
-class NI4<dag oops, dag iops, InstrItinClass itin, string opc,
- string asm, list<dag> pattern>
- : NeonXI<oops, iops, AddrMode4, IndexModeNone, itin, opc, asm, "",
- pattern> {
-}
-
-class NLdSt<bit op23, bits<2> op21_20, bits<4> op11_8, bits<4> op7_4,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string dt, string asm, string cstr, list<dag> pattern>
- : NeonI<oops, iops, AddrMode6, IndexModeNone, itin, opc, dt, asm, cstr,
- pattern> {
- let Inst{31-24} = 0b11110100;
- let Inst{23} = op23;
- let Inst{21-20} = op21_20;
- let Inst{11-8} = op11_8;
- let Inst{7-4} = op7_4;
-}
-
-class NDataI<dag oops, dag iops, InstrItinClass itin,
- string opc, string dt, string asm, string cstr, list<dag> pattern>
- : NeonI<oops, iops, AddrModeNone, IndexModeNone, itin, opc, dt, asm,
- cstr, pattern> {
- let Inst{31-25} = 0b1111001;
-}
-
-class NDataXI<dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : NeonXI<oops, iops, AddrModeNone, IndexModeNone, itin, opc, asm,
- cstr, pattern> {
- let Inst{31-25} = 0b1111001;
-}
-
-// NEON "one register and a modified immediate" format.
-class N1ModImm<bit op23, bits<3> op21_19, bits<4> op11_8, bit op7, bit op6,
- bit op5, bit op4,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string dt, string asm, string cstr, list<dag> pattern>
- : NDataI<oops, iops, itin, opc, dt, asm, cstr, pattern> {
- let Inst{23} = op23;
- let Inst{21-19} = op21_19;
- let Inst{11-8} = op11_8;
- let Inst{7} = op7;
- let Inst{6} = op6;
- let Inst{5} = op5;
- let Inst{4} = op4;
-}
-
-// NEON 2 vector register format.
-class N2V<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16,
- bits<5> op11_7, bit op6, bit op4,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string dt, string asm, string cstr, list<dag> pattern>
- : NDataI<oops, iops, itin, opc, dt, asm, cstr, pattern> {
- let Inst{24-23} = op24_23;
- let Inst{21-20} = op21_20;
- let Inst{19-18} = op19_18;
- let Inst{17-16} = op17_16;
- let Inst{11-7} = op11_7;
- let Inst{6} = op6;
- let Inst{4} = op4;
-}
-
-// Same as N2V except it doesn't have a datatype suffix.
-class N2VX<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16,
- bits<5> op11_7, bit op6, bit op4,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : NDataXI<oops, iops, itin, opc, asm, cstr, pattern> {
- let Inst{24-23} = op24_23;
- let Inst{21-20} = op21_20;
- let Inst{19-18} = op19_18;
- let Inst{17-16} = op17_16;
- let Inst{11-7} = op11_7;
- let Inst{6} = op6;
- let Inst{4} = op4;
-}
-
-// NEON 2 vector register with immediate.
-class N2VImm<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string dt, string asm, string cstr, list<dag> pattern>
- : NDataI<oops, iops, itin, opc, dt, asm, cstr, pattern> {
- let Inst{24} = op24;
- let Inst{23} = op23;
- let Inst{11-8} = op11_8;
- let Inst{7} = op7;
- let Inst{6} = op6;
- let Inst{4} = op4;
-}
-
-// NEON 3 vector register format.
-class N3V<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string dt, string asm, string cstr, list<dag> pattern>
- : NDataI<oops, iops, itin, opc, dt, asm, cstr, pattern> {
- let Inst{24} = op24;
- let Inst{23} = op23;
- let Inst{21-20} = op21_20;
- let Inst{11-8} = op11_8;
- let Inst{6} = op6;
- let Inst{4} = op4;
-}
-
-// Same as N3VX except it doesn't have a data type suffix.
-class N3VX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, string cstr, list<dag> pattern>
- : NDataXI<oops, iops, itin, opc, asm, cstr, pattern> {
- let Inst{24} = op24;
- let Inst{23} = op23;
- let Inst{21-20} = op21_20;
- let Inst{11-8} = op11_8;
- let Inst{6} = op6;
- let Inst{4} = op4;
-}
-
-// NEON VMOVs between scalar and core registers.
-class NVLaneOp<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
- dag oops, dag iops, Format f, InstrItinClass itin,
- string opc, string dt, string asm, list<dag> pattern>
- : InstARM<AddrModeNone, Size4Bytes, IndexModeNone, f, GenericDomain,
- "", itin> {
- let Inst{27-20} = opcod1;
- let Inst{11-8} = opcod2;
- let Inst{6-5} = opcod3;
- let Inst{4} = 1;
-
- let OutOperandList = oops;
- let InOperandList = !con(iops, (ops pred:$p));
- let AsmString = !strconcat(
- !strconcat(!strconcat(opc, "${p}"), !strconcat(".", dt)),
- !strconcat("\t", asm));
- let Pattern = pattern;
- list<Predicate> Predicates = [HasNEON];
-}
-class NVGetLane<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string dt, string asm, list<dag> pattern>
- : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NEONGetLnFrm, itin,
- opc, dt, asm, pattern>;
-class NVSetLane<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string dt, string asm, list<dag> pattern>
- : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NEONSetLnFrm, itin,
- opc, dt, asm, pattern>;
-class NVDup<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
- dag oops, dag iops, InstrItinClass itin,
- string opc, string dt, string asm, list<dag> pattern>
- : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NEONDupFrm, itin,
- opc, dt, asm, pattern>;
-
-// NEONFPPat - Same as Pat<>, but requires that the compiler be using NEON
-// for single-precision FP.
-class NEONFPPat<dag pattern, dag result> : Pat<pattern, result> {
- list<Predicate> Predicates = [HasNEON,UseNEONForFP];
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMInstrInfo.cpp
deleted file mode 100644
index 85f6b40..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-//===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the ARM implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARMInstrInfo.h"
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMGenInstrInfo.inc"
-#include "ARMMachineFunctionInfo.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/CodeGen/LiveVariables.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/MC/MCAsmInfo.h"
-using namespace llvm;
-
-ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI(*this, STI) {
-}
-
-unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const {
- switch (Opc) {
- default: break;
- case ARM::LDR_PRE:
- case ARM::LDR_POST:
- return ARM::LDR;
- case ARM::LDRH_PRE:
- case ARM::LDRH_POST:
- return ARM::LDRH;
- case ARM::LDRB_PRE:
- case ARM::LDRB_POST:
- return ARM::LDRB;
- case ARM::LDRSH_PRE:
- case ARM::LDRSH_POST:
- return ARM::LDRSH;
- case ARM::LDRSB_PRE:
- case ARM::LDRSB_POST:
- return ARM::LDRSB;
- case ARM::STR_PRE:
- case ARM::STR_POST:
- return ARM::STR;
- case ARM::STRH_PRE:
- case ARM::STRH_POST:
- return ARM::STRH;
- case ARM::STRB_PRE:
- case ARM::STRB_POST:
- return ARM::STRB;
- }
-
- return 0;
-}
-
-void ARMInstrInfo::
-reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SubIdx, const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const {
- DebugLoc dl = Orig->getDebugLoc();
- unsigned Opcode = Orig->getOpcode();
- switch (Opcode) {
- default:
- break;
- case ARM::MOVi2pieces: {
- RI.emitLoadConstPool(MBB, I, dl,
- DestReg, SubIdx,
- Orig->getOperand(1).getImm(),
- (ARMCC::CondCodes)Orig->getOperand(2).getImm(),
- Orig->getOperand(3).getReg());
- MachineInstr *NewMI = prior(I);
- NewMI->getOperand(0).setSubReg(SubIdx);
- return;
- }
- }
-
- return ARMBaseInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig, TRI);
-}
-
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrInfo.h b/libclamav/c++/llvm/lib/Target/ARM/ARMInstrInfo.h
deleted file mode 100644
index d4199d1..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrInfo.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===- ARMInstrInfo.h - ARM Instruction Information -------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the ARM implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMINSTRUCTIONINFO_H
-#define ARMINSTRUCTIONINFO_H
-
-#include "llvm/Target/TargetInstrInfo.h"
-#include "ARMBaseInstrInfo.h"
-#include "ARMRegisterInfo.h"
-#include "ARMSubtarget.h"
-#include "ARM.h"
-
-namespace llvm {
- class ARMSubtarget;
-
-class ARMInstrInfo : public ARMBaseInstrInfo {
- ARMRegisterInfo RI;
-public:
- explicit ARMInstrInfo(const ARMSubtarget &STI);
-
- // Return the non-pre/post incrementing version of 'Opc'. Return 0
- // if there is not such an opcode.
- unsigned getUnindexedOpcode(unsigned Opc) const;
-
- void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SubIdx,
- const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const;
-
- /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
- /// such, whenever a client has an instance of instruction info, it should
- /// always be able to get register info as well (through this method).
- ///
- const ARMRegisterInfo &getRegisterInfo() const { return RI; }
-};
-
-}
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrInfo.td b/libclamav/c++/llvm/lib/Target/ARM/ARMInstrInfo.td
deleted file mode 100644
index 3812aba..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ /dev/null
@@ -1,2873 +0,0 @@
-//===- ARMInstrInfo.td - Target Description for ARM Target -*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the ARM instructions in TableGen format.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// ARM specific DAG Nodes.
-//
-
-// Type profiles.
-def SDT_ARMCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>;
-def SDT_ARMCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>, SDTCisVT<1, i32> ]>;
-
-def SDT_ARMSaveCallPC : SDTypeProfile<0, 1, []>;
-
-def SDT_ARMcall : SDTypeProfile<0, -1, [SDTCisInt<0>]>;
-
-def SDT_ARMCMov : SDTypeProfile<1, 3,
- [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
- SDTCisVT<3, i32>]>;
-
-def SDT_ARMBrcond : SDTypeProfile<0, 2,
- [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>]>;
-
-def SDT_ARMBrJT : SDTypeProfile<0, 3,
- [SDTCisPtrTy<0>, SDTCisVT<1, i32>,
- SDTCisVT<2, i32>]>;
-
-def SDT_ARMBr2JT : SDTypeProfile<0, 4,
- [SDTCisPtrTy<0>, SDTCisVT<1, i32>,
- SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
-
-def SDT_ARMCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
-
-def SDT_ARMPICAdd : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>,
- SDTCisPtrTy<1>, SDTCisVT<2, i32>]>;
-
-def SDT_ARMThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
-def SDT_ARMEH_SJLJ_Setjmp : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisPtrTy<1>,
- SDTCisInt<2>]>;
-
-def SDT_ARMMEMBARRIERV7 : SDTypeProfile<0, 0, []>;
-def SDT_ARMSYNCBARRIERV7 : SDTypeProfile<0, 0, []>;
-def SDT_ARMMEMBARRIERV6 : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def SDT_ARMSYNCBARRIERV6 : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-
-// Node definitions.
-def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
-def ARMWrapperJT : SDNode<"ARMISD::WrapperJT", SDTIntBinOp>;
-
-def ARMcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_ARMCallSeqStart,
- [SDNPHasChain, SDNPOutFlag]>;
-def ARMcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_ARMCallSeqEnd,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
-
-def ARMcall : SDNode<"ARMISD::CALL", SDT_ARMcall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
-def ARMcall_pred : SDNode<"ARMISD::CALL_PRED", SDT_ARMcall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
-def ARMcall_nolink : SDNode<"ARMISD::CALL_NOLINK", SDT_ARMcall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
-
-def ARMretflag : SDNode<"ARMISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
-
-def ARMcmov : SDNode<"ARMISD::CMOV", SDT_ARMCMov,
- [SDNPInFlag]>;
-def ARMcneg : SDNode<"ARMISD::CNEG", SDT_ARMCMov,
- [SDNPInFlag]>;
-
-def ARMbrcond : SDNode<"ARMISD::BRCOND", SDT_ARMBrcond,
- [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
-
-def ARMbrjt : SDNode<"ARMISD::BR_JT", SDT_ARMBrJT,
- [SDNPHasChain]>;
-def ARMbr2jt : SDNode<"ARMISD::BR2_JT", SDT_ARMBr2JT,
- [SDNPHasChain]>;
-
-def ARMcmp : SDNode<"ARMISD::CMP", SDT_ARMCmp,
- [SDNPOutFlag]>;
-
-def ARMcmpZ : SDNode<"ARMISD::CMPZ", SDT_ARMCmp,
- [SDNPOutFlag,SDNPCommutative]>;
-
-def ARMpic_add : SDNode<"ARMISD::PIC_ADD", SDT_ARMPICAdd>;
-
-def ARMsrl_flag : SDNode<"ARMISD::SRL_FLAG", SDTIntUnaryOp, [SDNPOutFlag]>;
-def ARMsra_flag : SDNode<"ARMISD::SRA_FLAG", SDTIntUnaryOp, [SDNPOutFlag]>;
-def ARMrrx : SDNode<"ARMISD::RRX" , SDTIntUnaryOp, [SDNPInFlag ]>;
-
-def ARMthread_pointer: SDNode<"ARMISD::THREAD_POINTER", SDT_ARMThreadPointer>;
-def ARMeh_sjlj_setjmp: SDNode<"ARMISD::EH_SJLJ_SETJMP", SDT_ARMEH_SJLJ_Setjmp>;
-
-def ARMMemBarrierV7 : SDNode<"ARMISD::MEMBARRIER", SDT_ARMMEMBARRIERV7,
- [SDNPHasChain]>;
-def ARMSyncBarrierV7 : SDNode<"ARMISD::SYNCBARRIER", SDT_ARMMEMBARRIERV7,
- [SDNPHasChain]>;
-def ARMMemBarrierV6 : SDNode<"ARMISD::MEMBARRIER", SDT_ARMMEMBARRIERV6,
- [SDNPHasChain]>;
-def ARMSyncBarrierV6 : SDNode<"ARMISD::SYNCBARRIER", SDT_ARMMEMBARRIERV6,
- [SDNPHasChain]>;
-
-def ARMrbit : SDNode<"ARMISD::RBIT", SDTIntUnaryOp>;
-
-//===----------------------------------------------------------------------===//
-// ARM Instruction Predicate Definitions.
-//
-def HasV4T : Predicate<"Subtarget->hasV4TOps()">;
-def NoV4T : Predicate<"!Subtarget->hasV4TOps()">;
-def HasV5T : Predicate<"Subtarget->hasV5TOps()">;
-def HasV5TE : Predicate<"Subtarget->hasV5TEOps()">;
-def HasV6 : Predicate<"Subtarget->hasV6Ops()">;
-def HasV6T2 : Predicate<"Subtarget->hasV6T2Ops()">;
-def NoV6T2 : Predicate<"!Subtarget->hasV6T2Ops()">;
-def HasV7 : Predicate<"Subtarget->hasV7Ops()">;
-def HasVFP2 : Predicate<"Subtarget->hasVFP2()">;
-def HasVFP3 : Predicate<"Subtarget->hasVFP3()">;
-def HasNEON : Predicate<"Subtarget->hasNEON()">;
-def UseNEONForFP : Predicate<"Subtarget->useNEONForSinglePrecisionFP()">;
-def DontUseNEONForFP : Predicate<"!Subtarget->useNEONForSinglePrecisionFP()">;
-def IsThumb : Predicate<"Subtarget->isThumb()">;
-def IsThumb1Only : Predicate<"Subtarget->isThumb1Only()">;
-def IsThumb2 : Predicate<"Subtarget->isThumb2()">;
-def IsARM : Predicate<"!Subtarget->isThumb()">;
-def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
-def IsNotDarwin : Predicate<"!Subtarget->isTargetDarwin()">;
-
-// FIXME: Eventually this will be just "hasV6T2Ops".
-def UseMovt : Predicate<"Subtarget->useMovt()">;
-def DontUseMovt : Predicate<"!Subtarget->useMovt()">;
-
-//===----------------------------------------------------------------------===//
-// ARM Flag Definitions.
-
-class RegConstraint<string C> {
- string Constraints = C;
-}
-
-//===----------------------------------------------------------------------===//
-// ARM specific transformation functions and pattern fragments.
-//
-
-// so_imm_neg_XFORM - Return a so_imm value packed into the format described for
-// so_imm_neg def below.
-def so_imm_neg_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
-}]>;
-
-// so_imm_not_XFORM - Return a so_imm value packed into the format described for
-// so_imm_not def below.
-def so_imm_not_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
-}]>;
-
-// rot_imm predicate - True if the 32-bit immediate is equal to 8, 16, or 24.
-def rot_imm : PatLeaf<(i32 imm), [{
- int32_t v = (int32_t)N->getZExtValue();
- return v == 8 || v == 16 || v == 24;
-}]>;
-
-/// imm1_15 predicate - True if the 32-bit immediate is in the range [1,15].
-def imm1_15 : PatLeaf<(i32 imm), [{
- return (int32_t)N->getZExtValue() >= 1 && (int32_t)N->getZExtValue() < 16;
-}]>;
-
-/// imm16_31 predicate - True if the 32-bit immediate is in the range [16,31].
-def imm16_31 : PatLeaf<(i32 imm), [{
- return (int32_t)N->getZExtValue() >= 16 && (int32_t)N->getZExtValue() < 32;
-}]>;
-
-def so_imm_neg :
- PatLeaf<(imm), [{
- return ARM_AM::getSOImmVal(-(int)N->getZExtValue()) != -1;
- }], so_imm_neg_XFORM>;
-
-def so_imm_not :
- PatLeaf<(imm), [{
- return ARM_AM::getSOImmVal(~(int)N->getZExtValue()) != -1;
- }], so_imm_not_XFORM>;
-
-// sext_16_node predicate - True if the SDNode is sign-extended 16 or more bits.
-def sext_16_node : PatLeaf<(i32 GPR:$a), [{
- return CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17;
-}]>;
-
-/// bf_inv_mask_imm predicate - An AND mask to clear an arbitrary width bitfield
-/// e.g., 0xf000ffff
-def bf_inv_mask_imm : Operand<i32>,
- PatLeaf<(imm), [{
- uint32_t v = (uint32_t)N->getZExtValue();
- if (v == 0xffffffff)
- return 0;
- // there can be 1's on either or both "outsides", all the "inside"
- // bits must be 0's
- unsigned int lsb = 0, msb = 31;
- while (v & (1 << msb)) --msb;
- while (v & (1 << lsb)) ++lsb;
- for (unsigned int i = lsb; i <= msb; ++i) {
- if (v & (1 << i))
- return 0;
- }
- return 1;
-}] > {
- let PrintMethod = "printBitfieldInvMaskImmOperand";
-}
-
-/// Split a 32-bit immediate into two 16 bit parts.
-def lo16 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() & 0xffff,
- MVT::i32);
-}]>;
-
-def hi16 : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, MVT::i32);
-}]>;
-
-def lo16AllZero : PatLeaf<(i32 imm), [{
- // Returns true if all low 16-bits are 0.
- return (((uint32_t)N->getZExtValue()) & 0xFFFFUL) == 0;
-}], hi16>;
-
-/// imm0_65535 predicate - True if the 32-bit immediate is in the range
-/// [0.65535].
-def imm0_65535 : PatLeaf<(i32 imm), [{
- return (uint32_t)N->getZExtValue() < 65536;
-}]>;
-
-class BinOpFrag<dag res> : PatFrag<(ops node:$LHS, node:$RHS), res>;
-class UnOpFrag <dag res> : PatFrag<(ops node:$Src), res>;
-
-/// adde and sube predicates - True based on whether the carry flag output
-/// will be needed or not.
-def adde_dead_carry :
- PatFrag<(ops node:$LHS, node:$RHS), (adde node:$LHS, node:$RHS),
- [{return !N->hasAnyUseOfValue(1);}]>;
-def sube_dead_carry :
- PatFrag<(ops node:$LHS, node:$RHS), (sube node:$LHS, node:$RHS),
- [{return !N->hasAnyUseOfValue(1);}]>;
-def adde_live_carry :
- PatFrag<(ops node:$LHS, node:$RHS), (adde node:$LHS, node:$RHS),
- [{return N->hasAnyUseOfValue(1);}]>;
-def sube_live_carry :
- PatFrag<(ops node:$LHS, node:$RHS), (sube node:$LHS, node:$RHS),
- [{return N->hasAnyUseOfValue(1);}]>;
-
-//===----------------------------------------------------------------------===//
-// Operand Definitions.
-//
-
-// Branch target.
-def brtarget : Operand<OtherVT>;
-
-// A list of registers separated by comma. Used by load/store multiple.
-def reglist : Operand<i32> {
- let PrintMethod = "printRegisterList";
-}
-
-// An operand for the CONSTPOOL_ENTRY pseudo-instruction.
-def cpinst_operand : Operand<i32> {
- let PrintMethod = "printCPInstOperand";
-}
-
-def jtblock_operand : Operand<i32> {
- let PrintMethod = "printJTBlockOperand";
-}
-def jt2block_operand : Operand<i32> {
- let PrintMethod = "printJT2BlockOperand";
-}
-
-// Local PC labels.
-def pclabel : Operand<i32> {
- let PrintMethod = "printPCLabel";
-}
-
-// shifter_operand operands: so_reg and so_imm.
-def so_reg : Operand<i32>, // reg reg imm
- ComplexPattern<i32, 3, "SelectShifterOperandReg",
- [shl,srl,sra,rotr]> {
- let PrintMethod = "printSORegOperand";
- let MIOperandInfo = (ops GPR, GPR, i32imm);
-}
-
-// so_imm - Match a 32-bit shifter_operand immediate operand, which is an
-// 8-bit immediate rotated by an arbitrary number of bits. so_imm values are
-// represented in the imm field in the same 12-bit form that they are encoded
-// into so_imm instructions: the 8-bit immediate is the least significant bits
-// [bits 0-7], the 4-bit shift amount is the next 4 bits [bits 8-11].
-def so_imm : Operand<i32>,
- PatLeaf<(imm), [{
- return ARM_AM::getSOImmVal(N->getZExtValue()) != -1;
- }]> {
- let PrintMethod = "printSOImmOperand";
-}
-
-// Break so_imm's up into two pieces. This handles immediates with up to 16
-// bits set in them. This uses so_imm2part to match and so_imm2part_[12] to
-// get the first/second pieces.
-def so_imm2part : Operand<i32>,
- PatLeaf<(imm), [{
- return ARM_AM::isSOImmTwoPartVal((unsigned)N->getZExtValue());
- }]> {
- let PrintMethod = "printSOImm2PartOperand";
-}
-
-def so_imm2part_1 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getSOImmTwoPartFirst((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-def so_imm2part_2 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getSOImmTwoPartSecond((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-def so_neg_imm2part : Operand<i32>, PatLeaf<(imm), [{
- return ARM_AM::isSOImmTwoPartVal(-(int)N->getZExtValue());
- }]> {
- let PrintMethod = "printSOImm2PartOperand";
-}
-
-def so_neg_imm2part_1 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getSOImmTwoPartFirst(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-def so_neg_imm2part_2 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getSOImmTwoPartSecond(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-/// imm0_31 predicate - True if the 32-bit immediate is in the range [0,31].
-def imm0_31 : Operand<i32>, PatLeaf<(imm), [{
- return (int32_t)N->getZExtValue() < 32;
-}]>;
-
-// Define ARM specific addressing modes.
-
-// addrmode2 := reg +/- reg shop imm
-// addrmode2 := reg +/- imm12
-//
-def addrmode2 : Operand<i32>,
- ComplexPattern<i32, 3, "SelectAddrMode2", []> {
- let PrintMethod = "printAddrMode2Operand";
- let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
-}
-
-def am2offset : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrMode2Offset", []> {
- let PrintMethod = "printAddrMode2OffsetOperand";
- let MIOperandInfo = (ops GPR, i32imm);
-}
-
-// addrmode3 := reg +/- reg
-// addrmode3 := reg +/- imm8
-//
-def addrmode3 : Operand<i32>,
- ComplexPattern<i32, 3, "SelectAddrMode3", []> {
- let PrintMethod = "printAddrMode3Operand";
- let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
-}
-
-def am3offset : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrMode3Offset", []> {
- let PrintMethod = "printAddrMode3OffsetOperand";
- let MIOperandInfo = (ops GPR, i32imm);
-}
-
-// addrmode4 := reg, <mode|W>
-//
-def addrmode4 : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrMode4", []> {
- let PrintMethod = "printAddrMode4Operand";
- let MIOperandInfo = (ops GPR, i32imm);
-}
-
-// addrmode5 := reg +/- imm8*4
-//
-def addrmode5 : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrMode5", []> {
- let PrintMethod = "printAddrMode5Operand";
- let MIOperandInfo = (ops GPR, i32imm);
-}
-
-// addrmode6 := reg with optional writeback
-//
-def addrmode6 : Operand<i32>,
- ComplexPattern<i32, 4, "SelectAddrMode6", []> {
- let PrintMethod = "printAddrMode6Operand";
- let MIOperandInfo = (ops GPR:$addr, GPR:$upd, i32imm, i32imm);
-}
-
-// addrmodepc := pc + reg
-//
-def addrmodepc : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrModePC", []> {
- let PrintMethod = "printAddrModePCOperand";
- let MIOperandInfo = (ops GPR, i32imm);
-}
-
-def nohash_imm : Operand<i32> {
- let PrintMethod = "printNoHashImmediate";
-}
-
-//===----------------------------------------------------------------------===//
-
-include "ARMInstrFormats.td"
-
-//===----------------------------------------------------------------------===//
-// Multiclass helpers...
-//
-
-/// AsI1_bin_irs - Defines a set of (op r, {so_imm|r|so_reg}) patterns for a
-/// binop that produces a value.
-multiclass AsI1_bin_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- def ri : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_imm:$b), DPFrm,
- IIC_iALUi, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]> {
- let Inst{25} = 1;
- }
- def rr : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm,
- IIC_iALUr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]> {
- let Inst{11-4} = 0b00000000;
- let Inst{25} = 0;
- let isCommutable = Commutable;
- }
- def rs : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPSoRegFrm,
- IIC_iALUsr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]> {
- let Inst{25} = 0;
- }
-}
-
-/// AI1_bin_s_irs - Similar to AsI1_bin_irs except it sets the 's' bit so the
-/// instruction modifies the CPSR register.
-let Defs = [CPSR] in {
-multiclass AI1_bin_s_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- def ri : AI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_imm:$b), DPFrm,
- IIC_iALUi, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]> {
- let Inst{20} = 1;
- let Inst{25} = 1;
- }
- def rr : AI1<opcod, (outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm,
- IIC_iALUr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]> {
- let isCommutable = Commutable;
- let Inst{11-4} = 0b00000000;
- let Inst{20} = 1;
- let Inst{25} = 0;
- }
- def rs : AI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPSoRegFrm,
- IIC_iALUsr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]> {
- let Inst{20} = 1;
- let Inst{25} = 0;
- }
-}
-}
-
-/// AI1_cmp_irs - Defines a set of (op r, {so_imm|r|so_reg}) cmp / test
-/// patterns. Similar to AsI1_bin_irs except the instruction does not produce
-/// a explicit result, only implicitly set CPSR.
-let Defs = [CPSR] in {
-multiclass AI1_cmp_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- def ri : AI1<opcod, (outs), (ins GPR:$a, so_imm:$b), DPFrm, IIC_iCMPi,
- opc, "\t$a, $b",
- [(opnode GPR:$a, so_imm:$b)]> {
- let Inst{20} = 1;
- let Inst{25} = 1;
- }
- def rr : AI1<opcod, (outs), (ins GPR:$a, GPR:$b), DPFrm, IIC_iCMPr,
- opc, "\t$a, $b",
- [(opnode GPR:$a, GPR:$b)]> {
- let Inst{11-4} = 0b00000000;
- let Inst{20} = 1;
- let Inst{25} = 0;
- let isCommutable = Commutable;
- }
- def rs : AI1<opcod, (outs), (ins GPR:$a, so_reg:$b), DPSoRegFrm, IIC_iCMPsr,
- opc, "\t$a, $b",
- [(opnode GPR:$a, so_reg:$b)]> {
- let Inst{20} = 1;
- let Inst{25} = 0;
- }
-}
-}
-
-/// AI_unary_rrot - A unary operation with two forms: one whose operand is a
-/// register and one whose operand is a register rotated by 8/16/24.
-/// FIXME: Remove the 'r' variant. Its rot_imm is zero.
-multiclass AI_unary_rrot<bits<8> opcod, string opc, PatFrag opnode> {
- def r : AExtI<opcod, (outs GPR:$dst), (ins GPR:$src),
- IIC_iUNAr, opc, "\t$dst, $src",
- [(set GPR:$dst, (opnode GPR:$src))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{11-10} = 0b00;
- let Inst{19-16} = 0b1111;
- }
- def r_rot : AExtI<opcod, (outs GPR:$dst), (ins GPR:$src, i32imm:$rot),
- IIC_iUNAsi, opc, "\t$dst, $src, ror $rot",
- [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{19-16} = 0b1111;
- }
-}
-
-multiclass AI_unary_rrot_np<bits<8> opcod, string opc> {
- def r : AExtI<opcod, (outs GPR:$dst), (ins GPR:$src),
- IIC_iUNAr, opc, "\t$dst, $src",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]> {
- let Inst{11-10} = 0b00;
- let Inst{19-16} = 0b1111;
- }
- def r_rot : AExtI<opcod, (outs GPR:$dst), (ins GPR:$src, i32imm:$rot),
- IIC_iUNAsi, opc, "\t$dst, $src, ror $rot",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]> {
- let Inst{19-16} = 0b1111;
- }
-}
-
-/// AI_bin_rrot - A binary operation with two forms: one whose operand is a
-/// register and one whose operand is a register rotated by 8/16/24.
-multiclass AI_bin_rrot<bits<8> opcod, string opc, PatFrag opnode> {
- def rr : AExtI<opcod, (outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS),
- IIC_iALUr, opc, "\t$dst, $LHS, $RHS",
- [(set GPR:$dst, (opnode GPR:$LHS, GPR:$RHS))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{11-10} = 0b00;
- }
- def rr_rot : AExtI<opcod, (outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS,
- i32imm:$rot),
- IIC_iALUsi, opc, "\t$dst, $LHS, $RHS, ror $rot",
- [(set GPR:$dst, (opnode GPR:$LHS,
- (rotr GPR:$RHS, rot_imm:$rot)))]>,
- Requires<[IsARM, HasV6]>;
-}
-
-// For disassembly only.
-multiclass AI_bin_rrot_np<bits<8> opcod, string opc> {
- def rr : AExtI<opcod, (outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS),
- IIC_iALUr, opc, "\t$dst, $LHS, $RHS",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]> {
- let Inst{11-10} = 0b00;
- }
- def rr_rot : AExtI<opcod, (outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS,
- i32imm:$rot),
- IIC_iALUsi, opc, "\t$dst, $LHS, $RHS, ror $rot",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]>;
-}
-
-/// AI1_adde_sube_irs - Define instructions and patterns for adde and sube.
-let Uses = [CPSR] in {
-multiclass AI1_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- def ri : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_imm:$b),
- DPFrm, IIC_iALUi, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]>,
- Requires<[IsARM]> {
- let Inst{25} = 1;
- }
- def rr : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- DPFrm, IIC_iALUr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]>,
- Requires<[IsARM]> {
- let isCommutable = Commutable;
- let Inst{11-4} = 0b00000000;
- let Inst{25} = 0;
- }
- def rs : AsI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_reg:$b),
- DPSoRegFrm, IIC_iALUsr, opc, "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]>,
- Requires<[IsARM]> {
- let Inst{25} = 0;
- }
-}
-// Carry setting variants
-let Defs = [CPSR] in {
-multiclass AI1_adde_sube_s_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- def Sri : AXI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_imm:$b),
- DPFrm, IIC_iALUi, !strconcat(opc, "\t$dst, $a, $b"),
- [(set GPR:$dst, (opnode GPR:$a, so_imm:$b))]>,
- Requires<[IsARM]> {
- let Inst{20} = 1;
- let Inst{25} = 1;
- }
- def Srr : AXI1<opcod, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- DPFrm, IIC_iALUr, !strconcat(opc, "\t$dst, $a, $b"),
- [(set GPR:$dst, (opnode GPR:$a, GPR:$b))]>,
- Requires<[IsARM]> {
- let Inst{11-4} = 0b00000000;
- let Inst{20} = 1;
- let Inst{25} = 0;
- }
- def Srs : AXI1<opcod, (outs GPR:$dst), (ins GPR:$a, so_reg:$b),
- DPSoRegFrm, IIC_iALUsr, !strconcat(opc, "\t$dst, $a, $b"),
- [(set GPR:$dst, (opnode GPR:$a, so_reg:$b))]>,
- Requires<[IsARM]> {
- let Inst{20} = 1;
- let Inst{25} = 0;
- }
-}
-}
-}
-
-//===----------------------------------------------------------------------===//
-// Instructions
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Miscellaneous Instructions.
-//
-
-/// CONSTPOOL_ENTRY - This instruction represents a floating constant pool in
-/// the function. The first operand is the ID# for this instruction, the second
-/// is the index into the MachineConstantPool that this is, the third is the
-/// size in bytes of this constant pool entry.
-let neverHasSideEffects = 1, isNotDuplicable = 1 in
-def CONSTPOOL_ENTRY :
-PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
- i32imm:$size), NoItinerary,
- "${instid:label} ${cpidx:cpentry}", []>;
-
-// FIXME: Marking these as hasSideEffects is necessary to prevent machine DCE
-// from removing one half of the matched pairs. That breaks PEI, which assumes
-// these will always be in pairs, and asserts if it finds otherwise. Better way?
-let Defs = [SP], Uses = [SP], hasSideEffects = 1 in {
-def ADJCALLSTACKUP :
-PseudoInst<(outs), (ins i32imm:$amt1, i32imm:$amt2, pred:$p), NoItinerary,
- "@ ADJCALLSTACKUP $amt1",
- [(ARMcallseq_end timm:$amt1, timm:$amt2)]>;
-
-def ADJCALLSTACKDOWN :
-PseudoInst<(outs), (ins i32imm:$amt, pred:$p), NoItinerary,
- "@ ADJCALLSTACKDOWN $amt",
- [(ARMcallseq_start timm:$amt)]>;
-}
-
-def NOP : AI<(outs), (ins), MiscFrm, NoItinerary, "nop", "",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{7-0} = 0b00000000;
-}
-
-def YIELD : AI<(outs), (ins), MiscFrm, NoItinerary, "yield", "",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{7-0} = 0b00000001;
-}
-
-def WFE : AI<(outs), (ins), MiscFrm, NoItinerary, "wfe", "",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{7-0} = 0b00000010;
-}
-
-def WFI : AI<(outs), (ins), MiscFrm, NoItinerary, "wfi", "",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{7-0} = 0b00000011;
-}
-
-def SEL : AI<(outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm, NoItinerary, "sel",
- "\t$dst, $a, $b",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]> {
- let Inst{27-20} = 0b01101000;
- let Inst{7-4} = 0b1011;
-}
-
-def SEV : AI<(outs), (ins), MiscFrm, NoItinerary, "sev", "",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{7-0} = 0b00000100;
-}
-
-// The i32imm operand $val can be used by a debugger to store more information
-// about the breakpoint.
-def BKPT : AI<(outs), (ins i32imm:$val), MiscFrm, NoItinerary, "bkpt", "\t$val",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM]> {
- let Inst{27-20} = 0b00010010;
- let Inst{7-4} = 0b0111;
-}
-
-// Change Processor State is a system instruction -- for disassembly only.
-// The singleton $opt operand contains the following information:
-// opt{4-0} = mode from Inst{4-0}
-// opt{5} = changemode from Inst{17}
-// opt{8-6} = AIF from Inst{8-6}
-// opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
-def CPS : AXI<(outs),(ins i32imm:$opt), MiscFrm, NoItinerary, "cps${opt:cps}",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM]> {
- let Inst{31-28} = 0b1111;
- let Inst{27-20} = 0b00010000;
- let Inst{16} = 0;
- let Inst{5} = 0;
-}
-
-// Preload signals the memory system of possible future data/instruction access.
-// These are for disassembly only.
-multiclass APreLoad<bit data, bit read, string opc> {
-
- def i : AXI<(outs), (ins GPR:$base, i32imm:$imm), MiscFrm, NoItinerary,
- !strconcat(opc, "\t[$base, $imm]"), []> {
- let Inst{31-26} = 0b111101;
- let Inst{25} = 0; // 0 for immediate form
- let Inst{24} = data;
- let Inst{22} = read;
- let Inst{21-20} = 0b01;
- }
-
- def r : AXI<(outs), (ins addrmode2:$addr), MiscFrm, NoItinerary,
- !strconcat(opc, "\t$addr"), []> {
- let Inst{31-26} = 0b111101;
- let Inst{25} = 1; // 1 for register form
- let Inst{24} = data;
- let Inst{22} = read;
- let Inst{21-20} = 0b01;
- let Inst{4} = 0;
- }
-}
-
-defm PLD : APreLoad<1, 1, "pld">;
-defm PLDW : APreLoad<1, 0, "pldw">;
-defm PLI : APreLoad<0, 1, "pli">;
-
-def SETENDBE : AXI<(outs),(ins), MiscFrm, NoItinerary, "setend\tbe",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM]> {
- let Inst{31-28} = 0b1111;
- let Inst{27-20} = 0b00010000;
- let Inst{16} = 1;
- let Inst{9} = 1;
- let Inst{7-4} = 0b0000;
-}
-
-def SETENDLE : AXI<(outs),(ins), MiscFrm, NoItinerary, "setend\tle",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM]> {
- let Inst{31-28} = 0b1111;
- let Inst{27-20} = 0b00010000;
- let Inst{16} = 1;
- let Inst{9} = 0;
- let Inst{7-4} = 0b0000;
-}
-
-def DBG : AI<(outs), (ins i32imm:$opt), MiscFrm, NoItinerary, "dbg", "\t$opt",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV7]> {
- let Inst{27-16} = 0b001100100000;
- let Inst{7-4} = 0b1111;
-}
-
-// A5.4 Permanently UNDEFINED instructions.
-def TRAP : AI<(outs), (ins), MiscFrm, NoItinerary, "trap", "",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM]> {
- let Inst{27-25} = 0b011;
- let Inst{24-20} = 0b11111;
- let Inst{7-5} = 0b111;
- let Inst{4} = 0b1;
-}
-
-// Address computation and loads and stores in PIC mode.
-let isNotDuplicable = 1 in {
-def PICADD : AXI1<0b0100, (outs GPR:$dst), (ins GPR:$a, pclabel:$cp, pred:$p),
- Pseudo, IIC_iALUr, "\n$cp:\n\tadd$p\t$dst, pc, $a",
- [(set GPR:$dst, (ARMpic_add GPR:$a, imm:$cp))]>;
-
-let AddedComplexity = 10 in {
-def PICLDR : AXI2ldw<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldr$p\t$dst, $addr",
- [(set GPR:$dst, (load addrmodepc:$addr))]>;
-
-def PICLDRH : AXI3ldh<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldrh${p}\t$dst, $addr",
- [(set GPR:$dst, (zextloadi16 addrmodepc:$addr))]>;
-
-def PICLDRB : AXI2ldb<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldrb${p}\t$dst, $addr",
- [(set GPR:$dst, (zextloadi8 addrmodepc:$addr))]>;
-
-def PICLDRSH : AXI3ldsh<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldrsh${p}\t$dst, $addr",
- [(set GPR:$dst, (sextloadi16 addrmodepc:$addr))]>;
-
-def PICLDRSB : AXI3ldsb<(outs GPR:$dst), (ins addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iLoadr, "\n${addr:label}:\n\tldrsb${p}\t$dst, $addr",
- [(set GPR:$dst, (sextloadi8 addrmodepc:$addr))]>;
-}
-let AddedComplexity = 10 in {
-def PICSTR : AXI2stw<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iStorer, "\n${addr:label}:\n\tstr$p\t$src, $addr",
- [(store GPR:$src, addrmodepc:$addr)]>;
-
-def PICSTRH : AXI3sth<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iStorer, "\n${addr:label}:\n\tstrh${p}\t$src, $addr",
- [(truncstorei16 GPR:$src, addrmodepc:$addr)]>;
-
-def PICSTRB : AXI2stb<(outs), (ins GPR:$src, addrmodepc:$addr, pred:$p),
- Pseudo, IIC_iStorer, "\n${addr:label}:\n\tstrb${p}\t$src, $addr",
- [(truncstorei8 GPR:$src, addrmodepc:$addr)]>;
-}
-} // isNotDuplicable = 1
-
-
-// LEApcrel - Load a pc-relative address into a register without offending the
-// assembler.
-def LEApcrel : AXI1<0x0, (outs GPR:$dst), (ins i32imm:$label, pred:$p),
- Pseudo, IIC_iALUi,
- !strconcat(!strconcat(".set ${:private}PCRELV${:uid}, ($label-(",
- "${:private}PCRELL${:uid}+8))\n"),
- !strconcat("${:private}PCRELL${:uid}:\n\t",
- "add$p\t$dst, pc, #${:private}PCRELV${:uid}")),
- []>;
-
-def LEApcrelJT : AXI1<0x0, (outs GPR:$dst),
- (ins i32imm:$label, nohash_imm:$id, pred:$p),
- Pseudo, IIC_iALUi,
- !strconcat(!strconcat(".set ${:private}PCRELV${:uid}, "
- "(${label}_${id}-(",
- "${:private}PCRELL${:uid}+8))\n"),
- !strconcat("${:private}PCRELL${:uid}:\n\t",
- "add$p\t$dst, pc, #${:private}PCRELV${:uid}")),
- []> {
- let Inst{25} = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// Control Flow Instructions.
-//
-
-let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
- // ARMV4T and above
- def BX_RET : AI<(outs), (ins), BrMiscFrm, IIC_Br,
- "bx", "\tlr", [(ARMretflag)]>,
- Requires<[IsARM, HasV4T]> {
- let Inst{3-0} = 0b1110;
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- }
-
- // ARMV4 only
- def MOVPCLR : AI<(outs), (ins), BrMiscFrm, IIC_Br,
- "mov", "\tpc, lr", [(ARMretflag)]>,
- Requires<[IsARM, NoV4T]> {
- let Inst{11-0} = 0b000000001110;
- let Inst{15-12} = 0b1111;
- let Inst{19-16} = 0b0000;
- let Inst{27-20} = 0b00011010;
- }
-}
-
-// Indirect branches
-let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
- // ARMV4T and above
- def BRIND : AXI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br, "bx\t$dst",
- [(brind GPR:$dst)]>,
- Requires<[IsARM, HasV4T]> {
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- let Inst{31-28} = 0b1110;
- }
-
- // ARMV4 only
- def MOVPCRX : AXI<(outs), (ins GPR:$dst), BrMiscFrm, IIC_Br, "mov\tpc, $dst",
- [(brind GPR:$dst)]>,
- Requires<[IsARM, NoV4T]> {
- let Inst{11-4} = 0b00000000;
- let Inst{15-12} = 0b1111;
- let Inst{19-16} = 0b0000;
- let Inst{27-20} = 0b00011010;
- let Inst{31-28} = 0b1110;
- }
-}
-
-// FIXME: remove when we have a way to marking a MI with these properties.
-// FIXME: Should pc be an implicit operand like PICADD, etc?
-let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
- hasExtraDefRegAllocReq = 1 in
- def LDM_RET : AXI4ld<(outs),
- (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops),
- LdStMulFrm, IIC_Br, "ldm${addr:submode}${p}\t$addr, $wb",
- []>;
-
-// On non-Darwin platforms R9 is callee-saved.
-let isCall = 1,
- Defs = [R0, R1, R2, R3, R12, LR,
- D0, D1, D2, D3, D4, D5, D6, D7,
- D16, D17, D18, D19, D20, D21, D22, D23,
- D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in {
- def BL : ABXI<0b1011, (outs), (ins i32imm:$func, variable_ops),
- IIC_Br, "bl\t${func:call}",
- [(ARMcall tglobaladdr:$func)]>,
- Requires<[IsARM, IsNotDarwin]> {
- let Inst{31-28} = 0b1110;
- }
-
- def BL_pred : ABI<0b1011, (outs), (ins i32imm:$func, variable_ops),
- IIC_Br, "bl", "\t${func:call}",
- [(ARMcall_pred tglobaladdr:$func)]>,
- Requires<[IsARM, IsNotDarwin]>;
-
- // ARMv5T and above
- def BLX : AXI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
- IIC_Br, "blx\t$func",
- [(ARMcall GPR:$func)]>,
- Requires<[IsARM, HasV5T, IsNotDarwin]> {
- let Inst{7-4} = 0b0011;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- }
-
- // ARMv4T
- // Note: Restrict $func to the tGPR regclass to prevent it being in LR.
- def BX : ABXIx2<(outs), (ins tGPR:$func, variable_ops),
- IIC_Br, "mov\tlr, pc\n\tbx\t$func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, HasV4T, IsNotDarwin]> {
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- }
-
- // ARMv4
- def BMOVPCRX : ABXIx2<(outs), (ins tGPR:$func, variable_ops),
- IIC_Br, "mov\tlr, pc\n\tmov\tpc, $func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, NoV4T, IsNotDarwin]> {
- let Inst{11-4} = 0b00000000;
- let Inst{15-12} = 0b1111;
- let Inst{19-16} = 0b0000;
- let Inst{27-20} = 0b00011010;
- }
-}
-
-// On Darwin R9 is call-clobbered.
-let isCall = 1,
- Defs = [R0, R1, R2, R3, R9, R12, LR,
- D0, D1, D2, D3, D4, D5, D6, D7,
- D16, D17, D18, D19, D20, D21, D22, D23,
- D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in {
- def BLr9 : ABXI<0b1011, (outs), (ins i32imm:$func, variable_ops),
- IIC_Br, "bl\t${func:call}",
- [(ARMcall tglobaladdr:$func)]>, Requires<[IsARM, IsDarwin]> {
- let Inst{31-28} = 0b1110;
- }
-
- def BLr9_pred : ABI<0b1011, (outs), (ins i32imm:$func, variable_ops),
- IIC_Br, "bl", "\t${func:call}",
- [(ARMcall_pred tglobaladdr:$func)]>,
- Requires<[IsARM, IsDarwin]>;
-
- // ARMv5T and above
- def BLXr9 : AXI<(outs), (ins GPR:$func, variable_ops), BrMiscFrm,
- IIC_Br, "blx\t$func",
- [(ARMcall GPR:$func)]>, Requires<[IsARM, HasV5T, IsDarwin]> {
- let Inst{7-4} = 0b0011;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- }
-
- // ARMv4T
- // Note: Restrict $func to the tGPR regclass to prevent it being in LR.
- def BXr9 : ABXIx2<(outs), (ins tGPR:$func, variable_ops),
- IIC_Br, "mov\tlr, pc\n\tbx\t$func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, HasV4T, IsDarwin]> {
- let Inst{7-4} = 0b0001;
- let Inst{19-8} = 0b111111111111;
- let Inst{27-20} = 0b00010010;
- }
-
- // ARMv4
- def BMOVPCRXr9 : ABXIx2<(outs), (ins tGPR:$func, variable_ops),
- IIC_Br, "mov\tlr, pc\n\tmov\tpc, $func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsARM, NoV4T, IsDarwin]> {
- let Inst{11-4} = 0b00000000;
- let Inst{15-12} = 0b1111;
- let Inst{19-16} = 0b0000;
- let Inst{27-20} = 0b00011010;
- }
-}
-
-let isBranch = 1, isTerminator = 1 in {
- // B is "predicable" since it can be xformed into a Bcc.
- let isBarrier = 1 in {
- let isPredicable = 1 in
- def B : ABXI<0b1010, (outs), (ins brtarget:$target), IIC_Br,
- "b\t$target", [(br bb:$target)]>;
-
- let isNotDuplicable = 1, isIndirectBranch = 1 in {
- def BR_JTr : JTI<(outs), (ins GPR:$target, jtblock_operand:$jt, i32imm:$id),
- IIC_Br, "mov\tpc, $target \n$jt",
- [(ARMbrjt GPR:$target, tjumptable:$jt, imm:$id)]> {
- let Inst{11-4} = 0b00000000;
- let Inst{15-12} = 0b1111;
- let Inst{20} = 0; // S Bit
- let Inst{24-21} = 0b1101;
- let Inst{27-25} = 0b000;
- }
- def BR_JTm : JTI<(outs),
- (ins addrmode2:$target, jtblock_operand:$jt, i32imm:$id),
- IIC_Br, "ldr\tpc, $target \n$jt",
- [(ARMbrjt (i32 (load addrmode2:$target)), tjumptable:$jt,
- imm:$id)]> {
- let Inst{15-12} = 0b1111;
- let Inst{20} = 1; // L bit
- let Inst{21} = 0; // W bit
- let Inst{22} = 0; // B bit
- let Inst{24} = 1; // P bit
- let Inst{27-25} = 0b011;
- }
- def BR_JTadd : JTI<(outs),
- (ins GPR:$target, GPR:$idx, jtblock_operand:$jt, i32imm:$id),
- IIC_Br, "add\tpc, $target, $idx \n$jt",
- [(ARMbrjt (add GPR:$target, GPR:$idx), tjumptable:$jt,
- imm:$id)]> {
- let Inst{15-12} = 0b1111;
- let Inst{20} = 0; // S bit
- let Inst{24-21} = 0b0100;
- let Inst{27-25} = 0b000;
- }
- } // isNotDuplicable = 1, isIndirectBranch = 1
- } // isBarrier = 1
-
- // FIXME: should be able to write a pattern for ARMBrcond, but can't use
- // a two-value operand where a dag node expects two operands. :(
- def Bcc : ABI<0b1010, (outs), (ins brtarget:$target),
- IIC_Br, "b", "\t$target",
- [/*(ARMbrcond bb:$target, imm:$cc, CCR:$ccr)*/]>;
-}
-
-// Branch and Exchange Jazelle -- for disassembly only
-def BXJ : ABI<0b0001, (outs), (ins GPR:$func), NoItinerary, "bxj", "\t$func",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0010;
- //let Inst{19-8} = 0xfff;
- let Inst{7-4} = 0b0010;
-}
-
-// Secure Monitor Call is a system instruction -- for disassembly only
-def SMC : ABI<0b0001, (outs), (ins i32imm:$opt), NoItinerary, "smc", "\t$opt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0110;
- let Inst{7-4} = 0b0111;
-}
-
-// Supervisor Call (Software Interrupt) -- for disassembly only
-let isCall = 1 in {
-def SVC : ABI<0b1111, (outs), (ins i32imm:$svc), IIC_Br, "svc", "\t$svc",
- [/* For disassembly only; pattern left blank */]>;
-}
-
-// Store Return State is a system instruction -- for disassembly only
-def SRSW : ABXI<{1,0,0,?}, (outs), (ins addrmode4:$addr, i32imm:$mode),
- NoItinerary, "srs${addr:submode}\tsp!, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{22-20} = 0b110; // W = 1
-}
-
-def SRS : ABXI<{1,0,0,?}, (outs), (ins addrmode4:$addr, i32imm:$mode),
- NoItinerary, "srs${addr:submode}\tsp, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{22-20} = 0b100; // W = 0
-}
-
-// Return From Exception is a system instruction -- for disassembly only
-def RFEW : ABXI<{1,0,0,?}, (outs), (ins addrmode4:$addr, GPR:$base),
- NoItinerary, "rfe${addr:submode}\t$base!",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{22-20} = 0b011; // W = 1
-}
-
-def RFE : ABXI<{1,0,0,?}, (outs), (ins addrmode4:$addr, GPR:$base),
- NoItinerary, "rfe${addr:submode}\t$base",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{22-20} = 0b001; // W = 0
-}
-
-//===----------------------------------------------------------------------===//
-// Load / store Instructions.
-//
-
-// Load
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def LDR : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm, IIC_iLoadr,
- "ldr", "\t$dst, $addr",
- [(set GPR:$dst, (load addrmode2:$addr))]>;
-
-// Special LDR for loads from non-pc-relative constpools.
-let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1 in
-def LDRcp : AI2ldw<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm, IIC_iLoadr,
- "ldr", "\t$dst, $addr", []>;
-
-// Loads with zero extension
-def LDRH : AI3ldh<(outs GPR:$dst), (ins addrmode3:$addr), LdMiscFrm,
- IIC_iLoadr, "ldrh", "\t$dst, $addr",
- [(set GPR:$dst, (zextloadi16 addrmode3:$addr))]>;
-
-def LDRB : AI2ldb<(outs GPR:$dst), (ins addrmode2:$addr), LdFrm,
- IIC_iLoadr, "ldrb", "\t$dst, $addr",
- [(set GPR:$dst, (zextloadi8 addrmode2:$addr))]>;
-
-// Loads with sign extension
-def LDRSH : AI3ldsh<(outs GPR:$dst), (ins addrmode3:$addr), LdMiscFrm,
- IIC_iLoadr, "ldrsh", "\t$dst, $addr",
- [(set GPR:$dst, (sextloadi16 addrmode3:$addr))]>;
-
-def LDRSB : AI3ldsb<(outs GPR:$dst), (ins addrmode3:$addr), LdMiscFrm,
- IIC_iLoadr, "ldrsb", "\t$dst, $addr",
- [(set GPR:$dst, (sextloadi8 addrmode3:$addr))]>;
-
-let mayLoad = 1, hasExtraDefRegAllocReq = 1 in {
-// Load doubleword
-def LDRD : AI3ldd<(outs GPR:$dst1, GPR:$dst2), (ins addrmode3:$addr), LdMiscFrm,
- IIC_iLoadr, "ldrd", "\t$dst1, $addr",
- []>, Requires<[IsARM, HasV5TE]>;
-
-// Indexed loads
-def LDR_PRE : AI2ldwpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode2:$addr), LdFrm, IIC_iLoadru,
- "ldr", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDR_POST : AI2ldwpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, am2offset:$offset), LdFrm, IIC_iLoadru,
- "ldr", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
-
-def LDRH_PRE : AI3ldhpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode3:$addr), LdMiscFrm, IIC_iLoadru,
- "ldrh", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDRH_POST : AI3ldhpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrh", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
-
-def LDRB_PRE : AI2ldbpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode2:$addr), LdFrm, IIC_iLoadru,
- "ldrb", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDRB_POST : AI2ldbpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am2offset:$offset), LdFrm, IIC_iLoadru,
- "ldrb", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
-
-def LDRSH_PRE : AI3ldshpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode3:$addr), LdMiscFrm, IIC_iLoadru,
- "ldrsh", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDRSH_POST: AI3ldshpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrsh", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
-
-def LDRSB_PRE : AI3ldsbpr<(outs GPR:$dst, GPR:$base_wb),
- (ins addrmode3:$addr), LdMiscFrm, IIC_iLoadru,
- "ldrsb", "\t$dst, $addr!", "$addr.base = $base_wb", []>;
-
-def LDRSB_POST: AI3ldsbpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrsb", "\t$dst, [$base], $offset", "$base = $base_wb", []>;
-
-// For disassembly only
-def LDRD_PRE : AI3lddpr<(outs GPR:$dst1, GPR:$dst2, GPR:$base_wb),
- (ins addrmode3:$addr), LdMiscFrm, IIC_iLoadr,
- "ldrd", "\t$dst1, $dst2, $addr!", "$addr.base = $base_wb", []>,
- Requires<[IsARM, HasV5TE]>;
-
-// For disassembly only
-def LDRD_POST : AI3lddpo<(outs GPR:$dst1, GPR:$dst2, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadr,
- "ldrd", "\t$dst1, $dst2, [$base], $offset", "$base = $base_wb", []>,
- Requires<[IsARM, HasV5TE]>;
-
-}
-
-// LDRT, LDRBT, LDRSBT, LDRHT, LDRSHT are for disassembly only.
-
-def LDRT : AI2ldwpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, am2offset:$offset), LdFrm, IIC_iLoadru,
- "ldrt", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
- let Inst{21} = 1; // overwrite
-}
-
-def LDRBT : AI2ldbpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am2offset:$offset), LdFrm, IIC_iLoadru,
- "ldrbt", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
- let Inst{21} = 1; // overwrite
-}
-
-def LDRSBT : AI3ldsbpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am2offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrsbt", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
- let Inst{21} = 1; // overwrite
-}
-
-def LDRHT : AI3ldhpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, am3offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrht", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
- let Inst{21} = 1; // overwrite
-}
-
-def LDRSHT : AI3ldshpo<(outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base,am3offset:$offset), LdMiscFrm, IIC_iLoadru,
- "ldrsht", "\t$dst, [$base], $offset", "$base = $base_wb", []> {
- let Inst{21} = 1; // overwrite
-}
-
-// Store
-def STR : AI2stw<(outs), (ins GPR:$src, addrmode2:$addr), StFrm, IIC_iStorer,
- "str", "\t$src, $addr",
- [(store GPR:$src, addrmode2:$addr)]>;
-
-// Stores with truncate
-def STRH : AI3sth<(outs), (ins GPR:$src, addrmode3:$addr), StMiscFrm,
- IIC_iStorer, "strh", "\t$src, $addr",
- [(truncstorei16 GPR:$src, addrmode3:$addr)]>;
-
-def STRB : AI2stb<(outs), (ins GPR:$src, addrmode2:$addr), StFrm, IIC_iStorer,
- "strb", "\t$src, $addr",
- [(truncstorei8 GPR:$src, addrmode2:$addr)]>;
-
-// Store doubleword
-let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
-def STRD : AI3std<(outs), (ins GPR:$src1, GPR:$src2, addrmode3:$addr),
- StMiscFrm, IIC_iStorer,
- "strd", "\t$src1, $addr", []>, Requires<[IsARM, HasV5TE]>;
-
-// Indexed stores
-def STR_PRE : AI2stwpr<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, am2offset:$offset),
- StFrm, IIC_iStoreru,
- "str", "\t$src, [$base, $offset]!", "$base = $base_wb",
- [(set GPR:$base_wb,
- (pre_store GPR:$src, GPR:$base, am2offset:$offset))]>;
-
-def STR_POST : AI2stwpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "str", "\t$src, [$base], $offset", "$base = $base_wb",
- [(set GPR:$base_wb,
- (post_store GPR:$src, GPR:$base, am2offset:$offset))]>;
-
-def STRH_PRE : AI3sthpr<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
- "strh", "\t$src, [$base, $offset]!", "$base = $base_wb",
- [(set GPR:$base_wb,
- (pre_truncsti16 GPR:$src, GPR:$base,am3offset:$offset))]>;
-
-def STRH_POST: AI3sthpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
- "strh", "\t$src, [$base], $offset", "$base = $base_wb",
- [(set GPR:$base_wb, (post_truncsti16 GPR:$src,
- GPR:$base, am3offset:$offset))]>;
-
-def STRB_PRE : AI2stbpr<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "strb", "\t$src, [$base, $offset]!", "$base = $base_wb",
- [(set GPR:$base_wb, (pre_truncsti8 GPR:$src,
- GPR:$base, am2offset:$offset))]>;
-
-def STRB_POST: AI2stbpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "strb", "\t$src, [$base], $offset", "$base = $base_wb",
- [(set GPR:$base_wb, (post_truncsti8 GPR:$src,
- GPR:$base, am2offset:$offset))]>;
-
-// For disassembly only
-def STRD_PRE : AI3stdpr<(outs GPR:$base_wb),
- (ins GPR:$src1, GPR:$src2, GPR:$base, am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
- "strd", "\t$src1, $src2, [$base, $offset]!",
- "$base = $base_wb", []>;
-
-// For disassembly only
-def STRD_POST: AI3stdpo<(outs GPR:$base_wb),
- (ins GPR:$src1, GPR:$src2, GPR:$base, am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
- "strd", "\t$src1, $src2, [$base], $offset",
- "$base = $base_wb", []>;
-
-// STRT, STRBT, and STRHT are for disassembly only.
-
-def STRT : AI2stwpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "strt", "\t$src, [$base], $offset", "$base = $base_wb",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{21} = 1; // overwrite
-}
-
-def STRBT : AI2stbpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am2offset:$offset),
- StFrm, IIC_iStoreru,
- "strbt", "\t$src, [$base], $offset", "$base = $base_wb",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{21} = 1; // overwrite
-}
-
-def STRHT: AI3sthpo<(outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base,am3offset:$offset),
- StMiscFrm, IIC_iStoreru,
- "strht", "\t$src, [$base], $offset", "$base = $base_wb",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{21} = 1; // overwrite
-}
-
-//===----------------------------------------------------------------------===//
-// Load / store multiple Instructions.
-//
-
-let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
-def LDM : AXI4ld<(outs),
- (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops),
- LdStMulFrm, IIC_iLoadm, "ldm${addr:submode}${p}\t$addr, $wb",
- []>;
-
-let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
-def STM : AXI4st<(outs),
- (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops),
- LdStMulFrm, IIC_iStorem, "stm${addr:submode}${p}\t$addr, $wb",
- []>;
-
-//===----------------------------------------------------------------------===//
-// Move Instructions.
-//
-
-let neverHasSideEffects = 1 in
-def MOVr : AsI1<0b1101, (outs GPR:$dst), (ins GPR:$src), DPFrm, IIC_iMOVr,
- "mov", "\t$dst, $src", []>, UnaryDP {
- let Inst{11-4} = 0b00000000;
- let Inst{25} = 0;
-}
-
-def MOVs : AsI1<0b1101, (outs GPR:$dst), (ins so_reg:$src),
- DPSoRegFrm, IIC_iMOVsr,
- "mov", "\t$dst, $src", [(set GPR:$dst, so_reg:$src)]>, UnaryDP {
- let Inst{25} = 0;
-}
-
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def MOVi : AsI1<0b1101, (outs GPR:$dst), (ins so_imm:$src), DPFrm, IIC_iMOVi,
- "mov", "\t$dst, $src", [(set GPR:$dst, so_imm:$src)]>, UnaryDP {
- let Inst{25} = 1;
-}
-
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def MOVi16 : AI1<0b1000, (outs GPR:$dst), (ins i32imm:$src),
- DPFrm, IIC_iMOVi,
- "movw", "\t$dst, $src",
- [(set GPR:$dst, imm0_65535:$src)]>,
- Requires<[IsARM, HasV6T2]>, UnaryDP {
- let Inst{20} = 0;
- let Inst{25} = 1;
-}
-
-let Constraints = "$src = $dst" in
-def MOVTi16 : AI1<0b1010, (outs GPR:$dst), (ins GPR:$src, i32imm:$imm),
- DPFrm, IIC_iMOVi,
- "movt", "\t$dst, $imm",
- [(set GPR:$dst,
- (or (and GPR:$src, 0xffff),
- lo16AllZero:$imm))]>, UnaryDP,
- Requires<[IsARM, HasV6T2]> {
- let Inst{20} = 0;
- let Inst{25} = 1;
-}
-
-def : ARMPat<(or GPR:$src, 0xffff0000), (MOVTi16 GPR:$src, 0xffff)>,
- Requires<[IsARM, HasV6T2]>;
-
-let Uses = [CPSR] in
-def MOVrx : AsI1<0b1101, (outs GPR:$dst), (ins GPR:$src), Pseudo, IIC_iMOVsi,
- "mov", "\t$dst, $src, rrx",
- [(set GPR:$dst, (ARMrrx GPR:$src))]>, UnaryDP;
-
-// These aren't really mov instructions, but we have to define them this way
-// due to flag operands.
-
-let Defs = [CPSR] in {
-def MOVsrl_flag : AI1<0b1101, (outs GPR:$dst), (ins GPR:$src), Pseudo,
- IIC_iMOVsi, "movs", "\t$dst, $src, lsr #1",
- [(set GPR:$dst, (ARMsrl_flag GPR:$src))]>, UnaryDP;
-def MOVsra_flag : AI1<0b1101, (outs GPR:$dst), (ins GPR:$src), Pseudo,
- IIC_iMOVsi, "movs", "\t$dst, $src, asr #1",
- [(set GPR:$dst, (ARMsra_flag GPR:$src))]>, UnaryDP;
-}
-
-//===----------------------------------------------------------------------===//
-// Extend Instructions.
-//
-
-// Sign extenders
-
-defm SXTB : AI_unary_rrot<0b01101010,
- "sxtb", UnOpFrag<(sext_inreg node:$Src, i8)>>;
-defm SXTH : AI_unary_rrot<0b01101011,
- "sxth", UnOpFrag<(sext_inreg node:$Src, i16)>>;
-
-defm SXTAB : AI_bin_rrot<0b01101010,
- "sxtab", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS, i8))>>;
-defm SXTAH : AI_bin_rrot<0b01101011,
- "sxtah", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS,i16))>>;
-
-// For disassembly only
-defm SXTB16 : AI_unary_rrot_np<0b01101000, "sxtb16">;
-
-// For disassembly only
-defm SXTAB16 : AI_bin_rrot_np<0b01101000, "sxtab16">;
-
-// Zero extenders
-
-let AddedComplexity = 16 in {
-defm UXTB : AI_unary_rrot<0b01101110,
- "uxtb" , UnOpFrag<(and node:$Src, 0x000000FF)>>;
-defm UXTH : AI_unary_rrot<0b01101111,
- "uxth" , UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
-defm UXTB16 : AI_unary_rrot<0b01101100,
- "uxtb16", UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
-
-def : ARMV6Pat<(and (shl GPR:$Src, (i32 8)), 0xFF00FF),
- (UXTB16r_rot GPR:$Src, 24)>;
-def : ARMV6Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
- (UXTB16r_rot GPR:$Src, 8)>;
-
-defm UXTAB : AI_bin_rrot<0b01101110, "uxtab",
- BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
-defm UXTAH : AI_bin_rrot<0b01101111, "uxtah",
- BinOpFrag<(add node:$LHS, (and node:$RHS, 0xFFFF))>>;
-}
-
-// This isn't safe in general, the add is two 16-bit units, not a 32-bit add.
-// For disassembly only
-defm UXTAB16 : AI_bin_rrot_np<0b01101100, "uxtab16">;
-
-
-def SBFX : I<(outs GPR:$dst),
- (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
- AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iALUi,
- "sbfx", "\t$dst, $src, $lsb, $width", "", []>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-21} = 0b0111101;
- let Inst{6-4} = 0b101;
-}
-
-def UBFX : I<(outs GPR:$dst),
- (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
- AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iALUi,
- "ubfx", "\t$dst, $src, $lsb, $width", "", []>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-21} = 0b0111111;
- let Inst{6-4} = 0b101;
-}
-
-//===----------------------------------------------------------------------===//
-// Arithmetic Instructions.
-//
-
-defm ADD : AsI1_bin_irs<0b0100, "add",
- BinOpFrag<(add node:$LHS, node:$RHS)>, 1>;
-defm SUB : AsI1_bin_irs<0b0010, "sub",
- BinOpFrag<(sub node:$LHS, node:$RHS)>>;
-
-// ADD and SUB with 's' bit set.
-defm ADDS : AI1_bin_s_irs<0b0100, "adds",
- BinOpFrag<(addc node:$LHS, node:$RHS)>, 1>;
-defm SUBS : AI1_bin_s_irs<0b0010, "subs",
- BinOpFrag<(subc node:$LHS, node:$RHS)>>;
-
-defm ADC : AI1_adde_sube_irs<0b0101, "adc",
- BinOpFrag<(adde_dead_carry node:$LHS, node:$RHS)>, 1>;
-defm SBC : AI1_adde_sube_irs<0b0110, "sbc",
- BinOpFrag<(sube_dead_carry node:$LHS, node:$RHS)>>;
-defm ADCS : AI1_adde_sube_s_irs<0b0101, "adcs",
- BinOpFrag<(adde_live_carry node:$LHS, node:$RHS)>, 1>;
-defm SBCS : AI1_adde_sube_s_irs<0b0110, "sbcs",
- BinOpFrag<(sube_live_carry node:$LHS, node:$RHS) >>;
-
-// These don't define reg/reg forms, because they are handled above.
-def RSBri : AsI1<0b0011, (outs GPR:$dst), (ins GPR:$a, so_imm:$b), DPFrm,
- IIC_iALUi, "rsb", "\t$dst, $a, $b",
- [(set GPR:$dst, (sub so_imm:$b, GPR:$a))]> {
- let Inst{25} = 1;
-}
-
-def RSBrs : AsI1<0b0011, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPSoRegFrm,
- IIC_iALUsr, "rsb", "\t$dst, $a, $b",
- [(set GPR:$dst, (sub so_reg:$b, GPR:$a))]> {
- let Inst{25} = 0;
-}
-
-// RSB with 's' bit set.
-let Defs = [CPSR] in {
-def RSBSri : AI1<0b0011, (outs GPR:$dst), (ins GPR:$a, so_imm:$b), DPFrm,
- IIC_iALUi, "rsbs", "\t$dst, $a, $b",
- [(set GPR:$dst, (subc so_imm:$b, GPR:$a))]> {
- let Inst{20} = 1;
- let Inst{25} = 1;
-}
-def RSBSrs : AI1<0b0011, (outs GPR:$dst), (ins GPR:$a, so_reg:$b), DPSoRegFrm,
- IIC_iALUsr, "rsbs", "\t$dst, $a, $b",
- [(set GPR:$dst, (subc so_reg:$b, GPR:$a))]> {
- let Inst{20} = 1;
- let Inst{25} = 0;
-}
-}
-
-let Uses = [CPSR] in {
-def RSCri : AsI1<0b0111, (outs GPR:$dst), (ins GPR:$a, so_imm:$b),
- DPFrm, IIC_iALUi, "rsc", "\t$dst, $a, $b",
- [(set GPR:$dst, (sube_dead_carry so_imm:$b, GPR:$a))]>,
- Requires<[IsARM]> {
- let Inst{25} = 1;
-}
-def RSCrs : AsI1<0b0111, (outs GPR:$dst), (ins GPR:$a, so_reg:$b),
- DPSoRegFrm, IIC_iALUsr, "rsc", "\t$dst, $a, $b",
- [(set GPR:$dst, (sube_dead_carry so_reg:$b, GPR:$a))]>,
- Requires<[IsARM]> {
- let Inst{25} = 0;
-}
-}
-
-// FIXME: Allow these to be predicated.
-let Defs = [CPSR], Uses = [CPSR] in {
-def RSCSri : AXI1<0b0111, (outs GPR:$dst), (ins GPR:$a, so_imm:$b),
- DPFrm, IIC_iALUi, "rscs\t$dst, $a, $b",
- [(set GPR:$dst, (sube_dead_carry so_imm:$b, GPR:$a))]>,
- Requires<[IsARM]> {
- let Inst{20} = 1;
- let Inst{25} = 1;
-}
-def RSCSrs : AXI1<0b0111, (outs GPR:$dst), (ins GPR:$a, so_reg:$b),
- DPSoRegFrm, IIC_iALUsr, "rscs\t$dst, $a, $b",
- [(set GPR:$dst, (sube_dead_carry so_reg:$b, GPR:$a))]>,
- Requires<[IsARM]> {
- let Inst{20} = 1;
- let Inst{25} = 0;
-}
-}
-
-// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
-def : ARMPat<(add GPR:$src, so_imm_neg:$imm),
- (SUBri GPR:$src, so_imm_neg:$imm)>;
-
-//def : ARMPat<(addc GPR:$src, so_imm_neg:$imm),
-// (SUBSri GPR:$src, so_imm_neg:$imm)>;
-//def : ARMPat<(adde GPR:$src, so_imm_neg:$imm),
-// (SBCri GPR:$src, so_imm_neg:$imm)>;
-
-// Note: These are implemented in C++ code, because they have to generate
-// ADD/SUBrs instructions, which use a complex pattern that a xform function
-// cannot produce.
-// (mul X, 2^n+1) -> (add (X << n), X)
-// (mul X, 2^n-1) -> (rsb X, (X << n))
-
-// ARM Arithmetic Instruction -- for disassembly only
-// GPR:$dst = GPR:$a op GPR:$b
-class AAI<bits<8> op27_20, bits<4> op7_4, string opc>
- : AI<(outs GPR:$dst), (ins GPR:$a, GPR:$b), DPFrm, IIC_iALUr,
- opc, "\t$dst, $a, $b",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-20} = op27_20;
- let Inst{7-4} = op7_4;
-}
-
-// Saturating add/subtract -- for disassembly only
-
-def QADD : AAI<0b00010000, 0b0101, "qadd">;
-def QADD16 : AAI<0b01100010, 0b0001, "qadd16">;
-def QADD8 : AAI<0b01100010, 0b1001, "qadd8">;
-def QASX : AAI<0b01100010, 0b0011, "qasx">;
-def QDADD : AAI<0b00010100, 0b0101, "qdadd">;
-def QDSUB : AAI<0b00010110, 0b0101, "qdsub">;
-def QSAX : AAI<0b01100010, 0b0101, "qsax">;
-def QSUB : AAI<0b00010010, 0b0101, "qsub">;
-def QSUB16 : AAI<0b01100010, 0b0111, "qsub16">;
-def QSUB8 : AAI<0b01100010, 0b1111, "qsub8">;
-def UQADD16 : AAI<0b01100110, 0b0001, "uqadd16">;
-def UQADD8 : AAI<0b01100110, 0b1001, "uqadd8">;
-def UQASX : AAI<0b01100110, 0b0011, "uqasx">;
-def UQSAX : AAI<0b01100110, 0b0101, "uqsax">;
-def UQSUB16 : AAI<0b01100110, 0b0111, "uqsub16">;
-def UQSUB8 : AAI<0b01100110, 0b1111, "uqsub8">;
-
-// Signed/Unsigned add/subtract -- for disassembly only
-
-def SASX : AAI<0b01100001, 0b0011, "sasx">;
-def SADD16 : AAI<0b01100001, 0b0001, "sadd16">;
-def SADD8 : AAI<0b01100001, 0b1001, "sadd8">;
-def SSAX : AAI<0b01100001, 0b0101, "ssax">;
-def SSUB16 : AAI<0b01100001, 0b0111, "ssub16">;
-def SSUB8 : AAI<0b01100001, 0b1111, "ssub8">;
-def UASX : AAI<0b01100101, 0b0011, "uasx">;
-def UADD16 : AAI<0b01100101, 0b0001, "uadd16">;
-def UADD8 : AAI<0b01100101, 0b1001, "uadd8">;
-def USAX : AAI<0b01100101, 0b0101, "usax">;
-def USUB16 : AAI<0b01100101, 0b0111, "usub16">;
-def USUB8 : AAI<0b01100101, 0b1111, "usub8">;
-
-// Signed/Unsigned halving add/subtract -- for disassembly only
-
-def SHASX : AAI<0b01100011, 0b0011, "shasx">;
-def SHADD16 : AAI<0b01100011, 0b0001, "shadd16">;
-def SHADD8 : AAI<0b01100011, 0b1001, "shadd8">;
-def SHSAX : AAI<0b01100011, 0b0101, "shsax">;
-def SHSUB16 : AAI<0b01100011, 0b0111, "shsub16">;
-def SHSUB8 : AAI<0b01100011, 0b1111, "shsub8">;
-def UHASX : AAI<0b01100111, 0b0011, "uhasx">;
-def UHADD16 : AAI<0b01100111, 0b0001, "uhadd16">;
-def UHADD8 : AAI<0b01100111, 0b1001, "uhadd8">;
-def UHSAX : AAI<0b01100111, 0b0101, "uhsax">;
-def UHSUB16 : AAI<0b01100111, 0b0111, "uhsub16">;
-def UHSUB8 : AAI<0b01100111, 0b1111, "uhsub8">;
-
-// Unsigned Sum of Absolute Differences [and Accumulate] -- for disassembly only
-
-def USAD8 : AI<(outs GPR:$dst), (ins GPR:$a, GPR:$b),
- MulFrm /* for convenience */, NoItinerary, "usad8",
- "\t$dst, $a, $b", []>,
- Requires<[IsARM, HasV6]> {
- let Inst{27-20} = 0b01111000;
- let Inst{15-12} = 0b1111;
- let Inst{7-4} = 0b0001;
-}
-def USADA8 : AI<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- MulFrm /* for convenience */, NoItinerary, "usada8",
- "\t$dst, $a, $b, $acc", []>,
- Requires<[IsARM, HasV6]> {
- let Inst{27-20} = 0b01111000;
- let Inst{7-4} = 0b0001;
-}
-
-// Signed/Unsigned saturate -- for disassembly only
-
-def SSATlsl : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a, i32imm:$shamt),
- DPFrm, NoItinerary, "ssat", "\t$dst, $bit_pos, $a, lsl $shamt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-21} = 0b0110101;
- let Inst{6-4} = 0b001;
-}
-
-def SSATasr : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a, i32imm:$shamt),
- DPFrm, NoItinerary, "ssat", "\t$dst, $bit_pos, $a, asr $shamt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-21} = 0b0110101;
- let Inst{6-4} = 0b101;
-}
-
-def SSAT16 : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a), DPFrm,
- NoItinerary, "ssat16", "\t$dst, $bit_pos, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-20} = 0b01101010;
- let Inst{7-4} = 0b0011;
-}
-
-def USATlsl : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a, i32imm:$shamt),
- DPFrm, NoItinerary, "usat", "\t$dst, $bit_pos, $a, lsl $shamt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-21} = 0b0110111;
- let Inst{6-4} = 0b001;
-}
-
-def USATasr : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a, i32imm:$shamt),
- DPFrm, NoItinerary, "usat", "\t$dst, $bit_pos, $a, asr $shamt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-21} = 0b0110111;
- let Inst{6-4} = 0b101;
-}
-
-def USAT16 : AI<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a), DPFrm,
- NoItinerary, "usat16", "\t$dst, $bit_pos, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-20} = 0b01101110;
- let Inst{7-4} = 0b0011;
-}
-
-//===----------------------------------------------------------------------===//
-// Bitwise Instructions.
-//
-
-defm AND : AsI1_bin_irs<0b0000, "and",
- BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
-defm ORR : AsI1_bin_irs<0b1100, "orr",
- BinOpFrag<(or node:$LHS, node:$RHS)>, 1>;
-defm EOR : AsI1_bin_irs<0b0001, "eor",
- BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
-defm BIC : AsI1_bin_irs<0b1110, "bic",
- BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
-
-def BFC : I<(outs GPR:$dst), (ins GPR:$src, bf_inv_mask_imm:$imm),
- AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iUNAsi,
- "bfc", "\t$dst, $imm", "$src = $dst",
- [(set GPR:$dst, (and GPR:$src, bf_inv_mask_imm:$imm))]>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-21} = 0b0111110;
- let Inst{6-0} = 0b0011111;
-}
-
-// A8.6.18 BFI - Bitfield insert (Encoding A1)
-// Added for disassembler with the pattern field purposely left blank.
-def BFI : I<(outs GPR:$dst), (ins GPR:$src, bf_inv_mask_imm:$imm),
- AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iUNAsi,
- "bfi", "\t$dst, $src, $imm", "",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{27-21} = 0b0111110;
- let Inst{6-4} = 0b001; // Rn: Inst{3-0} != 15
-}
-
-def MVNr : AsI1<0b1111, (outs GPR:$dst), (ins GPR:$src), DPFrm, IIC_iMOVr,
- "mvn", "\t$dst, $src",
- [(set GPR:$dst, (not GPR:$src))]>, UnaryDP {
- let Inst{25} = 0;
- let Inst{11-4} = 0b00000000;
-}
-def MVNs : AsI1<0b1111, (outs GPR:$dst), (ins so_reg:$src), DPSoRegFrm,
- IIC_iMOVsr, "mvn", "\t$dst, $src",
- [(set GPR:$dst, (not so_reg:$src))]>, UnaryDP {
- let Inst{25} = 0;
-}
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def MVNi : AsI1<0b1111, (outs GPR:$dst), (ins so_imm:$imm), DPFrm,
- IIC_iMOVi, "mvn", "\t$dst, $imm",
- [(set GPR:$dst, so_imm_not:$imm)]>,UnaryDP {
- let Inst{25} = 1;
-}
-
-def : ARMPat<(and GPR:$src, so_imm_not:$imm),
- (BICri GPR:$src, so_imm_not:$imm)>;
-
-//===----------------------------------------------------------------------===//
-// Multiply Instructions.
-//
-
-let isCommutable = 1 in
-def MUL : AsMul1I<0b0000000, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, "mul", "\t$dst, $a, $b",
- [(set GPR:$dst, (mul GPR:$a, GPR:$b))]>;
-
-def MLA : AsMul1I<0b0000001, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "mla", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (add (mul GPR:$a, GPR:$b), GPR:$c))]>;
-
-def MLS : AMul1I<0b0000011, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "mls", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (sub GPR:$c, (mul GPR:$a, GPR:$b)))]>,
- Requires<[IsARM, HasV6T2]>;
-
-// Extra precision multiplies with low / high results
-let neverHasSideEffects = 1 in {
-let isCommutable = 1 in {
-def SMULL : AsMul1I<0b0000110, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMUL64,
- "smull", "\t$ldst, $hdst, $a, $b", []>;
-
-def UMULL : AsMul1I<0b0000100, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMUL64,
- "umull", "\t$ldst, $hdst, $a, $b", []>;
-}
-
-// Multiply + accumulate
-def SMLAL : AsMul1I<0b0000111, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMAC64,
- "smlal", "\t$ldst, $hdst, $a, $b", []>;
-
-def UMLAL : AsMul1I<0b0000101, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMAC64,
- "umlal", "\t$ldst, $hdst, $a, $b", []>;
-
-def UMAAL : AMul1I <0b0000010, (outs GPR:$ldst, GPR:$hdst),
- (ins GPR:$a, GPR:$b), IIC_iMAC64,
- "umaal", "\t$ldst, $hdst, $a, $b", []>,
- Requires<[IsARM, HasV6]>;
-} // neverHasSideEffects
-
-// Most significant word multiply
-def SMMUL : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, "smmul", "\t$dst, $a, $b",
- [(set GPR:$dst, (mulhs GPR:$a, GPR:$b))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0001;
- let Inst{15-12} = 0b1111;
-}
-
-def SMMULR : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, "smmulr", "\t$dst, $a, $b",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0011; // R = 1
- let Inst{15-12} = 0b1111;
-}
-
-def SMMLA : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "smmla", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (add (mulhs GPR:$a, GPR:$b), GPR:$c))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0001;
-}
-
-def SMMLAR : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "smmlar", "\t$dst, $a, $b, $c",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0011; // R = 1
-}
-
-def SMMLS : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "smmls", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (sub GPR:$c, (mulhs GPR:$a, GPR:$b)))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b1101;
-}
-
-def SMMLSR : AMul2I <0b0111010, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c),
- IIC_iMAC32, "smmlsr", "\t$dst, $a, $b, $c",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b1111; // R = 1
-}
-
-multiclass AI_smul<string opc, PatFrag opnode> {
- def BB : AMulxyI<0b0001011, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, !strconcat(opc, "bb"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16),
- (sext_inreg GPR:$b, i16)))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 0;
- }
-
- def BT : AMulxyI<0b0001011, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, !strconcat(opc, "bt"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16),
- (sra GPR:$b, (i32 16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 1;
- }
-
- def TB : AMulxyI<0b0001011, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, !strconcat(opc, "tb"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sra GPR:$a, (i32 16)),
- (sext_inreg GPR:$b, i16)))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 0;
- }
-
- def TT : AMulxyI<0b0001011, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL32, !strconcat(opc, "tt"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sra GPR:$a, (i32 16)),
- (sra GPR:$b, (i32 16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 1;
- }
-
- def WB : AMulxyI<0b0001001, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL16, !strconcat(opc, "wb"), "\t$dst, $a, $b",
- [(set GPR:$dst, (sra (opnode GPR:$a,
- (sext_inreg GPR:$b, i16)), (i32 16)))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 0;
- }
-
- def WT : AMulxyI<0b0001001, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMUL16, !strconcat(opc, "wt"), "\t$dst, $a, $b",
- [(set GPR:$dst, (sra (opnode GPR:$a,
- (sra GPR:$b, (i32 16))), (i32 16)))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 1;
- }
-}
-
-
-multiclass AI_smla<string opc, PatFrag opnode> {
- def BB : AMulxyI<0b0001000, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "bb"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc,
- (opnode (sext_inreg GPR:$a, i16),
- (sext_inreg GPR:$b, i16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 0;
- }
-
- def BT : AMulxyI<0b0001000, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "bt"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (opnode (sext_inreg GPR:$a, i16),
- (sra GPR:$b, (i32 16)))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 1;
- }
-
- def TB : AMulxyI<0b0001000, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "tb"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, (i32 16)),
- (sext_inreg GPR:$b, i16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 0;
- }
-
- def TT : AMulxyI<0b0001000, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "tt"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, (i32 16)),
- (sra GPR:$b, (i32 16)))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 1;
- }
-
- def WB : AMulxyI<0b0001001, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "wb"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a,
- (sext_inreg GPR:$b, i16)), (i32 16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 0;
- }
-
- def WT : AMulxyI<0b0001001, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- IIC_iMAC16, !strconcat(opc, "wt"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a,
- (sra GPR:$b, (i32 16))), (i32 16))))]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 1;
- }
-}
-
-defm SMUL : AI_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
-defm SMLA : AI_smla<"smla", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
-
-// Halfword multiply accumulate long: SMLAL<x><y> -- for disassembly only
-def SMLALBB : AMulxyI<0b0001010,(outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- IIC_iMAC64, "smlalbb", "\t$ldst, $hdst, $a, $b",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 0;
-}
-
-def SMLALBT : AMulxyI<0b0001010,(outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- IIC_iMAC64, "smlalbt", "\t$ldst, $hdst, $a, $b",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 0;
- let Inst{6} = 1;
-}
-
-def SMLALTB : AMulxyI<0b0001010,(outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- IIC_iMAC64, "smlaltb", "\t$ldst, $hdst, $a, $b",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 0;
-}
-
-def SMLALTT : AMulxyI<0b0001010,(outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- IIC_iMAC64, "smlaltt", "\t$ldst, $hdst, $a, $b",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV5TE]> {
- let Inst{5} = 1;
- let Inst{6} = 1;
-}
-
-// Helper class for AI_smld -- for disassembly only
-class AMulDualI<bit long, bit sub, bit swap, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm>
- : AI<oops, iops, MulFrm, itin, opc, asm, []>, Requires<[IsARM, HasV6]> {
- let Inst{4} = 1;
- let Inst{5} = swap;
- let Inst{6} = sub;
- let Inst{7} = 0;
- let Inst{21-20} = 0b00;
- let Inst{22} = long;
- let Inst{27-23} = 0b01110;
-}
-
-multiclass AI_smld<bit sub, string opc> {
-
- def D : AMulDualI<0, sub, 0, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- NoItinerary, !strconcat(opc, "d"), "\t$dst, $a, $b, $acc">;
-
- def DX : AMulDualI<0, sub, 1, (outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc),
- NoItinerary, !strconcat(opc, "dx"), "\t$dst, $a, $b, $acc">;
-
- def LD : AMulDualI<1, sub, 0, (outs GPR:$ldst,GPR:$hdst), (ins GPR:$a,GPR:$b),
- NoItinerary, !strconcat(opc, "ld"), "\t$ldst, $hdst, $a, $b">;
-
- def LDX : AMulDualI<1, sub, 1, (outs GPR:$ldst,GPR:$hdst),(ins GPR:$a,GPR:$b),
- NoItinerary, !strconcat(opc, "ldx"),"\t$ldst, $hdst, $a, $b">;
-
-}
-
-defm SMLA : AI_smld<0, "smla">;
-defm SMLS : AI_smld<1, "smls">;
-
-multiclass AI_sdml<bit sub, string opc> {
-
- def D : AMulDualI<0, sub, 0, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- NoItinerary, !strconcat(opc, "d"), "\t$dst, $a, $b"> {
- let Inst{15-12} = 0b1111;
- }
-
- def DX : AMulDualI<0, sub, 1, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- NoItinerary, !strconcat(opc, "dx"), "\t$dst, $a, $b"> {
- let Inst{15-12} = 0b1111;
- }
-
-}
-
-defm SMUA : AI_sdml<0, "smua">;
-defm SMUS : AI_sdml<1, "smus">;
-
-//===----------------------------------------------------------------------===//
-// Misc. Arithmetic Instructions.
-//
-
-def CLZ : AMiscA1I<0b000010110, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "clz", "\t$dst, $src",
- [(set GPR:$dst, (ctlz GPR:$src))]>, Requires<[IsARM, HasV5T]> {
- let Inst{7-4} = 0b0001;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
-
-def RBIT : AMiscA1I<0b01101111, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "rbit", "\t$dst, $src",
- [(set GPR:$dst, (ARMrbit GPR:$src))]>,
- Requires<[IsARM, HasV6T2]> {
- let Inst{7-4} = 0b0011;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
-
-def REV : AMiscA1I<0b01101011, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "rev", "\t$dst, $src",
- [(set GPR:$dst, (bswap GPR:$src))]>, Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b0011;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
-
-def REV16 : AMiscA1I<0b01101011, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "rev16", "\t$dst, $src",
- [(set GPR:$dst,
- (or (and (srl GPR:$src, (i32 8)), 0xFF),
- (or (and (shl GPR:$src, (i32 8)), 0xFF00),
- (or (and (srl GPR:$src, (i32 8)), 0xFF0000),
- (and (shl GPR:$src, (i32 8)), 0xFF000000)))))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b1011;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
-
-def REVSH : AMiscA1I<0b01101111, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "revsh", "\t$dst, $src",
- [(set GPR:$dst,
- (sext_inreg
- (or (srl (and GPR:$src, 0xFF00), (i32 8)),
- (shl GPR:$src, (i32 8))), i16))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{7-4} = 0b1011;
- let Inst{11-8} = 0b1111;
- let Inst{19-16} = 0b1111;
-}
-
-def PKHBT : AMiscA1I<0b01101000, (outs GPR:$dst),
- (ins GPR:$src1, GPR:$src2, i32imm:$shamt),
- IIC_iALUsi, "pkhbt", "\t$dst, $src1, $src2, lsl $shamt",
- [(set GPR:$dst, (or (and GPR:$src1, 0xFFFF),
- (and (shl GPR:$src2, (i32 imm:$shamt)),
- 0xFFFF0000)))]>,
- Requires<[IsARM, HasV6]> {
- let Inst{6-4} = 0b001;
-}
-
-// Alternate cases for PKHBT where identities eliminate some nodes.
-def : ARMV6Pat<(or (and GPR:$src1, 0xFFFF), (and GPR:$src2, 0xFFFF0000)),
- (PKHBT GPR:$src1, GPR:$src2, 0)>;
-def : ARMV6Pat<(or (and GPR:$src1, 0xFFFF), (shl GPR:$src2, imm16_31:$shamt)),
- (PKHBT GPR:$src1, GPR:$src2, imm16_31:$shamt)>;
-
-
-def PKHTB : AMiscA1I<0b01101000, (outs GPR:$dst),
- (ins GPR:$src1, GPR:$src2, i32imm:$shamt),
- IIC_iALUsi, "pkhtb", "\t$dst, $src1, $src2, asr $shamt",
- [(set GPR:$dst, (or (and GPR:$src1, 0xFFFF0000),
- (and (sra GPR:$src2, imm16_31:$shamt),
- 0xFFFF)))]>, Requires<[IsARM, HasV6]> {
- let Inst{6-4} = 0b101;
-}
-
-// Alternate cases for PKHTB where identities eliminate some nodes. Note that
-// a shift amount of 0 is *not legal* here, it is PKHBT instead.
-def : ARMV6Pat<(or (and GPR:$src1, 0xFFFF0000), (srl GPR:$src2, (i32 16))),
- (PKHTB GPR:$src1, GPR:$src2, 16)>;
-def : ARMV6Pat<(or (and GPR:$src1, 0xFFFF0000),
- (and (srl GPR:$src2, imm1_15:$shamt), 0xFFFF)),
- (PKHTB GPR:$src1, GPR:$src2, imm1_15:$shamt)>;
-
-//===----------------------------------------------------------------------===//
-// Comparison Instructions...
-//
-
-defm CMP : AI1_cmp_irs<0b1010, "cmp",
- BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
-//FIXME: Disable CMN, as CCodes are backwards from compare expectations
-// Compare-to-zero still works out, just not the relationals
-//defm CMN : AI1_cmp_irs<0b1011, "cmn",
-// BinOpFrag<(ARMcmp node:$LHS,(ineg node:$RHS))>>;
-
-// Note that TST/TEQ don't set all the same flags that CMP does!
-defm TST : AI1_cmp_irs<0b1000, "tst",
- BinOpFrag<(ARMcmpZ (and node:$LHS, node:$RHS), 0)>, 1>;
-defm TEQ : AI1_cmp_irs<0b1001, "teq",
- BinOpFrag<(ARMcmpZ (xor node:$LHS, node:$RHS), 0)>, 1>;
-
-defm CMPz : AI1_cmp_irs<0b1010, "cmp",
- BinOpFrag<(ARMcmpZ node:$LHS, node:$RHS)>>;
-defm CMNz : AI1_cmp_irs<0b1011, "cmn",
- BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>>;
-
-//def : ARMPat<(ARMcmp GPR:$src, so_imm_neg:$imm),
-// (CMNri GPR:$src, so_imm_neg:$imm)>;
-
-def : ARMPat<(ARMcmpZ GPR:$src, so_imm_neg:$imm),
- (CMNzri GPR:$src, so_imm_neg:$imm)>;
-
-
-// Conditional moves
-// FIXME: should be able to write a pattern for ARMcmov, but can't use
-// a two-value operand where a dag node expects two operands. :(
-def MOVCCr : AI1<0b1101, (outs GPR:$dst), (ins GPR:$false, GPR:$true), DPFrm,
- IIC_iCMOVr, "mov", "\t$dst, $true",
- [/*(set GPR:$dst, (ARMcmov GPR:$false, GPR:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst">, UnaryDP {
- let Inst{11-4} = 0b00000000;
- let Inst{25} = 0;
-}
-
-def MOVCCs : AI1<0b1101, (outs GPR:$dst),
- (ins GPR:$false, so_reg:$true), DPSoRegFrm, IIC_iCMOVsr,
- "mov", "\t$dst, $true",
- [/*(set GPR:$dst, (ARMcmov GPR:$false, so_reg:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst">, UnaryDP {
- let Inst{25} = 0;
-}
-
-def MOVCCi : AI1<0b1101, (outs GPR:$dst),
- (ins GPR:$false, so_imm:$true), DPFrm, IIC_iCMOVi,
- "mov", "\t$dst, $true",
- [/*(set GPR:$dst, (ARMcmov GPR:$false, so_imm:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst">, UnaryDP {
- let Inst{25} = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// Atomic operations intrinsics
-//
-
-// memory barriers protect the atomic sequences
-let hasSideEffects = 1 in {
-def Int_MemBarrierV7 : AInoP<(outs), (ins),
- Pseudo, NoItinerary,
- "dmb", "",
- [(ARMMemBarrierV7)]>,
- Requires<[IsARM, HasV7]> {
- let Inst{31-4} = 0xf57ff05;
- // FIXME: add support for options other than a full system DMB
- // See DMB disassembly-only variants below.
- let Inst{3-0} = 0b1111;
-}
-
-def Int_SyncBarrierV7 : AInoP<(outs), (ins),
- Pseudo, NoItinerary,
- "dsb", "",
- [(ARMSyncBarrierV7)]>,
- Requires<[IsARM, HasV7]> {
- let Inst{31-4} = 0xf57ff04;
- // FIXME: add support for options other than a full system DSB
- // See DSB disassembly-only variants below.
- let Inst{3-0} = 0b1111;
-}
-
-def Int_MemBarrierV6 : AInoP<(outs), (ins GPR:$zero),
- Pseudo, NoItinerary,
- "mcr", "\tp15, 0, $zero, c7, c10, 5",
- [(ARMMemBarrierV6 GPR:$zero)]>,
- Requires<[IsARM, HasV6]> {
- // FIXME: add support for options other than a full system DMB
- // FIXME: add encoding
-}
-
-def Int_SyncBarrierV6 : AInoP<(outs), (ins GPR:$zero),
- Pseudo, NoItinerary,
- "mcr", "\tp15, 0, $zero, c7, c10, 4",
- [(ARMSyncBarrierV6 GPR:$zero)]>,
- Requires<[IsARM, HasV6]> {
- // FIXME: add support for options other than a full system DSB
- // FIXME: add encoding
-}
-}
-
-// Helper class for multiclass MemB -- for disassembly only
-class AMBI<string opc, string asm>
- : AInoP<(outs), (ins), MiscFrm, NoItinerary, opc, asm,
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV7]> {
- let Inst{31-20} = 0xf57;
-}
-
-multiclass MemB<bits<4> op7_4, string opc> {
-
- def st : AMBI<opc, "\tst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b1110;
- }
-
- def ish : AMBI<opc, "\tish"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b1011;
- }
-
- def ishst : AMBI<opc, "\tishst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b1010;
- }
-
- def nsh : AMBI<opc, "\tnsh"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0111;
- }
-
- def nshst : AMBI<opc, "\tnshst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0110;
- }
-
- def osh : AMBI<opc, "\tosh"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0011;
- }
-
- def oshst : AMBI<opc, "\toshst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0010;
- }
-}
-
-// These DMB variants are for disassembly only.
-defm DMB : MemB<0b0101, "dmb">;
-
-// These DSB variants are for disassembly only.
-defm DSB : MemB<0b0100, "dsb">;
-
-// ISB has only full system option -- for disassembly only
-def ISBsy : AMBI<"isb", ""> {
- let Inst{7-4} = 0b0110;
- let Inst{3-0} = 0b1111;
-}
-
-let usesCustomInserter = 1 in {
- let Uses = [CPSR] in {
- def ATOMIC_LOAD_ADD_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_ADD_I8 PSEUDO!",
- [(set GPR:$dst, (atomic_load_add_8 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_SUB_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_SUB_I8 PSEUDO!",
- [(set GPR:$dst, (atomic_load_sub_8 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_AND_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_AND_I8 PSEUDO!",
- [(set GPR:$dst, (atomic_load_and_8 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_OR_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_OR_I8 PSEUDO!",
- [(set GPR:$dst, (atomic_load_or_8 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_XOR_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_XOR_I8 PSEUDO!",
- [(set GPR:$dst, (atomic_load_xor_8 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_NAND_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_NAND_I8 PSEUDO!",
- [(set GPR:$dst, (atomic_load_nand_8 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_ADD_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_ADD_I16 PSEUDO!",
- [(set GPR:$dst, (atomic_load_add_16 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_SUB_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_SUB_I16 PSEUDO!",
- [(set GPR:$dst, (atomic_load_sub_16 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_AND_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_AND_I16 PSEUDO!",
- [(set GPR:$dst, (atomic_load_and_16 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_OR_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_OR_I16 PSEUDO!",
- [(set GPR:$dst, (atomic_load_or_16 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_XOR_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_XOR_I16 PSEUDO!",
- [(set GPR:$dst, (atomic_load_xor_16 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_NAND_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_NAND_I16 PSEUDO!",
- [(set GPR:$dst, (atomic_load_nand_16 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_ADD_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_ADD_I32 PSEUDO!",
- [(set GPR:$dst, (atomic_load_add_32 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_SUB_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_SUB_I32 PSEUDO!",
- [(set GPR:$dst, (atomic_load_sub_32 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_AND_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_AND_I32 PSEUDO!",
- [(set GPR:$dst, (atomic_load_and_32 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_OR_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_OR_I32 PSEUDO!",
- [(set GPR:$dst, (atomic_load_or_32 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_XOR_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_XOR_I32 PSEUDO!",
- [(set GPR:$dst, (atomic_load_xor_32 GPR:$ptr, GPR:$incr))]>;
- def ATOMIC_LOAD_NAND_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$incr), NoItinerary,
- "${:comment} ATOMIC_LOAD_NAND_I32 PSEUDO!",
- [(set GPR:$dst, (atomic_load_nand_32 GPR:$ptr, GPR:$incr))]>;
-
- def ATOMIC_SWAP_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_SWAP_I8 PSEUDO!",
- [(set GPR:$dst, (atomic_swap_8 GPR:$ptr, GPR:$new))]>;
- def ATOMIC_SWAP_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_SWAP_I16 PSEUDO!",
- [(set GPR:$dst, (atomic_swap_16 GPR:$ptr, GPR:$new))]>;
- def ATOMIC_SWAP_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_SWAP_I32 PSEUDO!",
- [(set GPR:$dst, (atomic_swap_32 GPR:$ptr, GPR:$new))]>;
-
- def ATOMIC_CMP_SWAP_I8 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_CMP_SWAP_I8 PSEUDO!",
- [(set GPR:$dst, (atomic_cmp_swap_8 GPR:$ptr, GPR:$old, GPR:$new))]>;
- def ATOMIC_CMP_SWAP_I16 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_CMP_SWAP_I16 PSEUDO!",
- [(set GPR:$dst, (atomic_cmp_swap_16 GPR:$ptr, GPR:$old, GPR:$new))]>;
- def ATOMIC_CMP_SWAP_I32 : PseudoInst<
- (outs GPR:$dst), (ins GPR:$ptr, GPR:$old, GPR:$new), NoItinerary,
- "${:comment} ATOMIC_CMP_SWAP_I32 PSEUDO!",
- [(set GPR:$dst, (atomic_cmp_swap_32 GPR:$ptr, GPR:$old, GPR:$new))]>;
-}
-}
-
-let mayLoad = 1 in {
-def LDREXB : AIldrex<0b10, (outs GPR:$dest), (ins GPR:$ptr), NoItinerary,
- "ldrexb", "\t$dest, [$ptr]",
- []>;
-def LDREXH : AIldrex<0b11, (outs GPR:$dest), (ins GPR:$ptr), NoItinerary,
- "ldrexh", "\t$dest, [$ptr]",
- []>;
-def LDREX : AIldrex<0b00, (outs GPR:$dest), (ins GPR:$ptr), NoItinerary,
- "ldrex", "\t$dest, [$ptr]",
- []>;
-def LDREXD : AIldrex<0b01, (outs GPR:$dest, GPR:$dest2), (ins GPR:$ptr),
- NoItinerary,
- "ldrexd", "\t$dest, $dest2, [$ptr]",
- []>;
-}
-
-let mayStore = 1, Constraints = "@earlyclobber $success" in {
-def STREXB : AIstrex<0b10, (outs GPR:$success), (ins GPR:$src, GPR:$ptr),
- NoItinerary,
- "strexb", "\t$success, $src, [$ptr]",
- []>;
-def STREXH : AIstrex<0b11, (outs GPR:$success), (ins GPR:$src, GPR:$ptr),
- NoItinerary,
- "strexh", "\t$success, $src, [$ptr]",
- []>;
-def STREX : AIstrex<0b00, (outs GPR:$success), (ins GPR:$src, GPR:$ptr),
- NoItinerary,
- "strex", "\t$success, $src, [$ptr]",
- []>;
-def STREXD : AIstrex<0b01, (outs GPR:$success),
- (ins GPR:$src, GPR:$src2, GPR:$ptr),
- NoItinerary,
- "strexd", "\t$success, $src, $src2, [$ptr]",
- []>;
-}
-
-// Clear-Exclusive is for disassembly only.
-def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV7]> {
- let Inst{31-20} = 0xf57;
- let Inst{7-4} = 0b0001;
-}
-
-// SWP/SWPB are deprecated in V6/V7 and for disassembly only.
-let mayLoad = 1 in {
-def SWP : AI<(outs GPR:$dst), (ins GPR:$src, GPR:$ptr), LdStExFrm, NoItinerary,
- "swp", "\t$dst, $src, [$ptr]",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-23} = 0b00010;
- let Inst{22} = 0; // B = 0
- let Inst{21-20} = 0b00;
- let Inst{7-4} = 0b1001;
-}
-
-def SWPB : AI<(outs GPR:$dst), (ins GPR:$src, GPR:$ptr), LdStExFrm, NoItinerary,
- "swpb", "\t$dst, $src, [$ptr]",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-23} = 0b00010;
- let Inst{22} = 1; // B = 1
- let Inst{21-20} = 0b00;
- let Inst{7-4} = 0b1001;
-}
-}
-
-//===----------------------------------------------------------------------===//
-// TLS Instructions
-//
-
-// __aeabi_read_tp preserves the registers r1-r3.
-let isCall = 1,
- Defs = [R0, R12, LR, CPSR] in {
- def TPsoft : ABXI<0b1011, (outs), (ins), IIC_Br,
- "bl\t__aeabi_read_tp",
- [(set R0, ARMthread_pointer)]>;
-}
-
-//===----------------------------------------------------------------------===//
-// SJLJ Exception handling intrinsics
-// eh_sjlj_setjmp() is an instruction sequence to store the return
-// address and save #0 in R0 for the non-longjmp case.
-// Since by its nature we may be coming from some other function to get
-// here, and we're using the stack frame for the containing function to
-// save/restore registers, we can't keep anything live in regs across
-// the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon
-// when we get here from a longjmp(). We force everthing out of registers
-// except for our own input by listing the relevant registers in Defs. By
-// doing so, we also cause the prologue/epilogue code to actively preserve
-// all of the callee-saved resgisters, which is exactly what we want.
-// A constant value is passed in $val, and we use the location as a scratch.
-let Defs =
- [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, D0,
- D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
- D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30,
- D31 ] in {
- def Int_eh_sjlj_setjmp : XI<(outs), (ins GPR:$src, GPR:$val),
- AddrModeNone, SizeSpecial, IndexModeNone,
- Pseudo, NoItinerary,
- "str\tsp, [$src, #+8] @ eh_setjmp begin\n\t"
- "add\t$val, pc, #8\n\t"
- "str\t$val, [$src, #+4]\n\t"
- "mov\tr0, #0\n\t"
- "add\tpc, pc, #0\n\t"
- "mov\tr0, #1 @ eh_setjmp end", "",
- [(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>;
-}
-
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//
-
-// Large immediate handling.
-
-// Two piece so_imms.
-let isReMaterializable = 1 in
-def MOVi2pieces : AI1x2<(outs GPR:$dst), (ins so_imm2part:$src),
- Pseudo, IIC_iMOVi,
- "mov", "\t$dst, $src",
- [(set GPR:$dst, so_imm2part:$src)]>,
- Requires<[IsARM, NoV6T2]>;
-
-def : ARMPat<(or GPR:$LHS, so_imm2part:$RHS),
- (ORRri (ORRri GPR:$LHS, (so_imm2part_1 imm:$RHS)),
- (so_imm2part_2 imm:$RHS))>;
-def : ARMPat<(xor GPR:$LHS, so_imm2part:$RHS),
- (EORri (EORri GPR:$LHS, (so_imm2part_1 imm:$RHS)),
- (so_imm2part_2 imm:$RHS))>;
-def : ARMPat<(add GPR:$LHS, so_imm2part:$RHS),
- (ADDri (ADDri GPR:$LHS, (so_imm2part_1 imm:$RHS)),
- (so_imm2part_2 imm:$RHS))>;
-def : ARMPat<(add GPR:$LHS, so_neg_imm2part:$RHS),
- (SUBri (SUBri GPR:$LHS, (so_neg_imm2part_1 imm:$RHS)),
- (so_neg_imm2part_2 imm:$RHS))>;
-
-// 32-bit immediate using movw + movt.
-// This is a single pseudo instruction, the benefit is that it can be remat'd
-// as a single unit instead of having to handle reg inputs.
-// FIXME: Remove this when we can do generalized remat.
-let isReMaterializable = 1 in
-def MOVi32imm : AI1x2<(outs GPR:$dst), (ins i32imm:$src), Pseudo, IIC_iMOVi,
- "movw", "\t$dst, ${src:lo16}\n\tmovt${p}\t$dst, ${src:hi16}",
- [(set GPR:$dst, (i32 imm:$src))]>,
- Requires<[IsARM, HasV6T2]>;
-
-// ConstantPool, GlobalAddress, and JumpTable
-def : ARMPat<(ARMWrapper tglobaladdr :$dst), (LEApcrel tglobaladdr :$dst)>,
- Requires<[IsARM, DontUseMovt]>;
-def : ARMPat<(ARMWrapper tconstpool :$dst), (LEApcrel tconstpool :$dst)>;
-def : ARMPat<(ARMWrapper tglobaladdr :$dst), (MOVi32imm tglobaladdr :$dst)>,
- Requires<[IsARM, UseMovt]>;
-def : ARMPat<(ARMWrapperJT tjumptable:$dst, imm:$id),
- (LEApcrelJT tjumptable:$dst, imm:$id)>;
-
-// TODO: add,sub,and, 3-instr forms?
-
-
-// Direct calls
-def : ARMPat<(ARMcall texternalsym:$func), (BL texternalsym:$func)>,
- Requires<[IsARM, IsNotDarwin]>;
-def : ARMPat<(ARMcall texternalsym:$func), (BLr9 texternalsym:$func)>,
- Requires<[IsARM, IsDarwin]>;
-
-// zextload i1 -> zextload i8
-def : ARMPat<(zextloadi1 addrmode2:$addr), (LDRB addrmode2:$addr)>;
-
-// extload -> zextload
-def : ARMPat<(extloadi1 addrmode2:$addr), (LDRB addrmode2:$addr)>;
-def : ARMPat<(extloadi8 addrmode2:$addr), (LDRB addrmode2:$addr)>;
-def : ARMPat<(extloadi16 addrmode3:$addr), (LDRH addrmode3:$addr)>;
-
-def : ARMPat<(extloadi8 addrmodepc:$addr), (PICLDRB addrmodepc:$addr)>;
-def : ARMPat<(extloadi16 addrmodepc:$addr), (PICLDRH addrmodepc:$addr)>;
-
-// smul* and smla*
-def : ARMV5TEPat<(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
- (sra (shl GPR:$b, (i32 16)), (i32 16))),
- (SMULBB GPR:$a, GPR:$b)>;
-def : ARMV5TEPat<(mul sext_16_node:$a, sext_16_node:$b),
- (SMULBB GPR:$a, GPR:$b)>;
-def : ARMV5TEPat<(mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
- (sra GPR:$b, (i32 16))),
- (SMULBT GPR:$a, GPR:$b)>;
-def : ARMV5TEPat<(mul sext_16_node:$a, (sra GPR:$b, (i32 16))),
- (SMULBT GPR:$a, GPR:$b)>;
-def : ARMV5TEPat<(mul (sra GPR:$a, (i32 16)),
- (sra (shl GPR:$b, (i32 16)), (i32 16))),
- (SMULTB GPR:$a, GPR:$b)>;
-def : ARMV5TEPat<(mul (sra GPR:$a, (i32 16)), sext_16_node:$b),
- (SMULTB GPR:$a, GPR:$b)>;
-def : ARMV5TEPat<(sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))),
- (i32 16)),
- (SMULWB GPR:$a, GPR:$b)>;
-def : ARMV5TEPat<(sra (mul GPR:$a, sext_16_node:$b), (i32 16)),
- (SMULWB GPR:$a, GPR:$b)>;
-
-def : ARMV5TEPat<(add GPR:$acc,
- (mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
- (sra (shl GPR:$b, (i32 16)), (i32 16)))),
- (SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
- (mul sext_16_node:$a, sext_16_node:$b)),
- (SMLABB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
- (mul (sra (shl GPR:$a, (i32 16)), (i32 16)),
- (sra GPR:$b, (i32 16)))),
- (SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
- (mul sext_16_node:$a, (sra GPR:$b, (i32 16)))),
- (SMLABT GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
- (mul (sra GPR:$a, (i32 16)),
- (sra (shl GPR:$b, (i32 16)), (i32 16)))),
- (SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
- (mul (sra GPR:$a, (i32 16)), sext_16_node:$b)),
- (SMLATB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
- (sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))),
- (i32 16))),
- (SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
-def : ARMV5TEPat<(add GPR:$acc,
- (sra (mul GPR:$a, sext_16_node:$b), (i32 16))),
- (SMLAWB GPR:$a, GPR:$b, GPR:$acc)>;
-
-//===----------------------------------------------------------------------===//
-// Thumb Support
-//
-
-include "ARMInstrThumb.td"
-
-//===----------------------------------------------------------------------===//
-// Thumb2 Support
-//
-
-include "ARMInstrThumb2.td"
-
-//===----------------------------------------------------------------------===//
-// Floating Point Support
-//
-
-include "ARMInstrVFP.td"
-
-//===----------------------------------------------------------------------===//
-// Advanced SIMD (NEON) Support
-//
-
-include "ARMInstrNEON.td"
-
-//===----------------------------------------------------------------------===//
-// Coprocessor Instructions. For disassembly only.
-//
-
-def CDP : ABI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- nohash_imm:$CRd, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "cdp", "\tp$cop, $opc1, cr$CRd, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{4} = 0;
-}
-
-def CDP2 : ABXI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- nohash_imm:$CRd, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "cdp2\tp$cop, $opc1, cr$CRd, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{4} = 0;
-}
-
-class ACI<dag oops, dag iops, string opc, string asm>
- : I<oops, iops, AddrModeNone, Size4Bytes, IndexModeNone, BrFrm, NoItinerary,
- opc, asm, "", [/* For disassembly only; pattern left blank */]> {
- let Inst{27-25} = 0b110;
-}
-
-multiclass LdStCop<bits<4> op31_28, bit load, string opc> {
-
- def _OFFSET : ACI<(outs),
- (ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr),
- opc, "\tp$cop, cr$CRd, $addr"> {
- let Inst{31-28} = op31_28;
- let Inst{24} = 1; // P = 1
- let Inst{21} = 0; // W = 0
- let Inst{22} = 0; // D = 0
- let Inst{20} = load;
- }
-
- def _PRE : ACI<(outs),
- (ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr),
- opc, "\tp$cop, cr$CRd, $addr!"> {
- let Inst{31-28} = op31_28;
- let Inst{24} = 1; // P = 1
- let Inst{21} = 1; // W = 1
- let Inst{22} = 0; // D = 0
- let Inst{20} = load;
- }
-
- def _POST : ACI<(outs),
- (ins nohash_imm:$cop, nohash_imm:$CRd, GPR:$base, am2offset:$offset),
- opc, "\tp$cop, cr$CRd, [$base], $offset"> {
- let Inst{31-28} = op31_28;
- let Inst{24} = 0; // P = 0
- let Inst{21} = 1; // W = 1
- let Inst{22} = 0; // D = 0
- let Inst{20} = load;
- }
-
- def _OPTION : ACI<(outs),
- (ins nohash_imm:$cop, nohash_imm:$CRd, GPR:$base, i32imm:$option),
- opc, "\tp$cop, cr$CRd, [$base], $option"> {
- let Inst{31-28} = op31_28;
- let Inst{24} = 0; // P = 0
- let Inst{23} = 1; // U = 1
- let Inst{21} = 0; // W = 0
- let Inst{22} = 0; // D = 0
- let Inst{20} = load;
- }
-
- def L_OFFSET : ACI<(outs),
- (ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr),
- opc, "l\tp$cop, cr$CRd, $addr"> {
- let Inst{31-28} = op31_28;
- let Inst{24} = 1; // P = 1
- let Inst{21} = 0; // W = 0
- let Inst{22} = 1; // D = 1
- let Inst{20} = load;
- }
-
- def L_PRE : ACI<(outs),
- (ins nohash_imm:$cop, nohash_imm:$CRd, addrmode2:$addr),
- opc, "l\tp$cop, cr$CRd, $addr!"> {
- let Inst{31-28} = op31_28;
- let Inst{24} = 1; // P = 1
- let Inst{21} = 1; // W = 1
- let Inst{22} = 1; // D = 1
- let Inst{20} = load;
- }
-
- def L_POST : ACI<(outs),
- (ins nohash_imm:$cop, nohash_imm:$CRd, GPR:$base, am2offset:$offset),
- opc, "l\tp$cop, cr$CRd, [$base], $offset"> {
- let Inst{31-28} = op31_28;
- let Inst{24} = 0; // P = 0
- let Inst{21} = 1; // W = 1
- let Inst{22} = 1; // D = 1
- let Inst{20} = load;
- }
-
- def L_OPTION : ACI<(outs),
- (ins nohash_imm:$cop, nohash_imm:$CRd, GPR:$base, nohash_imm:$option),
- opc, "l\tp$cop, cr$CRd, [$base], $option"> {
- let Inst{31-28} = op31_28;
- let Inst{24} = 0; // P = 0
- let Inst{23} = 1; // U = 1
- let Inst{21} = 0; // W = 0
- let Inst{22} = 1; // D = 1
- let Inst{20} = load;
- }
-}
-
-defm LDC : LdStCop<{?,?,?,?}, 1, "ldc">;
-defm LDC2 : LdStCop<0b1111, 1, "ldc2">;
-defm STC : LdStCop<{?,?,?,?}, 0, "stc">;
-defm STC2 : LdStCop<0b1111, 0, "stc2">;
-
-def MCR : ABI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- GPR:$Rt, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "mcr", "\tp$cop, $opc1, $Rt, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{20} = 0;
- let Inst{4} = 1;
-}
-
-def MCR2 : ABXI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- GPR:$Rt, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "mcr2\tp$cop, $opc1, $Rt, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{20} = 0;
- let Inst{4} = 1;
-}
-
-def MRC : ABI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- GPR:$Rt, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "mrc", "\tp$cop, $opc1, $Rt, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{20} = 1;
- let Inst{4} = 1;
-}
-
-def MRC2 : ABXI<0b1110, (outs), (ins nohash_imm:$cop, i32imm:$opc1,
- GPR:$Rt, nohash_imm:$CRn, nohash_imm:$CRm, i32imm:$opc2),
- NoItinerary, "mrc2\tp$cop, $opc1, $Rt, cr$CRn, cr$CRm, $opc2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{20} = 1;
- let Inst{4} = 1;
-}
-
-def MCRR : ABI<0b1100, (outs), (ins nohash_imm:$cop, i32imm:$opc,
- GPR:$Rt, GPR:$Rt2, nohash_imm:$CRm),
- NoItinerary, "mcrr", "\tp$cop, $opc, $Rt, $Rt2, cr$CRm",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0100;
-}
-
-def MCRR2 : ABXI<0b1100, (outs), (ins nohash_imm:$cop, i32imm:$opc,
- GPR:$Rt, GPR:$Rt2, nohash_imm:$CRm),
- NoItinerary, "mcrr2\tp$cop, $opc, $Rt, $Rt2, cr$CRm",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{23-20} = 0b0100;
-}
-
-def MRRC : ABI<0b1100, (outs), (ins nohash_imm:$cop, i32imm:$opc,
- GPR:$Rt, GPR:$Rt2, nohash_imm:$CRm),
- NoItinerary, "mrrc", "\tp$cop, $opc, $Rt, $Rt2, cr$CRm",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0101;
-}
-
-def MRRC2 : ABXI<0b1100, (outs), (ins nohash_imm:$cop, i32imm:$opc,
- GPR:$Rt, GPR:$Rt2, nohash_imm:$CRm),
- NoItinerary, "mrrc2\tp$cop, $opc, $Rt, $Rt2, cr$CRm",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-28} = 0b1111;
- let Inst{23-20} = 0b0101;
-}
-
-//===----------------------------------------------------------------------===//
-// Move between special register and ARM core register -- for disassembly only
-//
-
-def MRS : ABI<0b0001,(outs GPR:$dst),(ins), NoItinerary, "mrs", "\t$dst, cpsr",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0000;
- let Inst{7-4} = 0b0000;
-}
-
-def MRSsys : ABI<0b0001,(outs GPR:$dst),(ins), NoItinerary,"mrs","\t$dst, spsr",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0100;
- let Inst{7-4} = 0b0000;
-}
-
-// FIXME: mask is ignored for the time being.
-def MSR : ABI<0b0001,(outs),(ins GPR:$src), NoItinerary, "msr", "\tcpsr, $src",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0010;
- let Inst{7-4} = 0b0000;
-}
-
-// FIXME: mask is ignored for the time being.
-def MSRi : ABI<0b0011,(outs),(ins so_imm:$a), NoItinerary, "msr", "\tcpsr, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0010;
- let Inst{7-4} = 0b0000;
-}
-
-// FIXME: mask is ignored for the time being.
-def MSRsys : ABI<0b0001,(outs),(ins GPR:$src),NoItinerary,"msr","\tspsr, $src",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0110;
- let Inst{7-4} = 0b0000;
-}
-
-// FIXME: mask is ignored for the time being.
-def MSRsysi : ABI<0b0011,(outs),(ins so_imm:$a),NoItinerary,"msr","\tspsr, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{23-20} = 0b0110;
- let Inst{7-4} = 0b0000;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrNEON.td b/libclamav/c++/llvm/lib/Target/ARM/ARMInstrNEON.td
deleted file mode 100644
index 3aa0810..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ /dev/null
@@ -1,3264 +0,0 @@
-//===- ARMInstrNEON.td - NEON support for ARM -----------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the ARM NEON instruction set.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// NEON-specific DAG Nodes.
-//===----------------------------------------------------------------------===//
-
-def SDTARMVCMP : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<1, 2>]>;
-
-def NEONvceq : SDNode<"ARMISD::VCEQ", SDTARMVCMP>;
-def NEONvcge : SDNode<"ARMISD::VCGE", SDTARMVCMP>;
-def NEONvcgeu : SDNode<"ARMISD::VCGEU", SDTARMVCMP>;
-def NEONvcgt : SDNode<"ARMISD::VCGT", SDTARMVCMP>;
-def NEONvcgtu : SDNode<"ARMISD::VCGTU", SDTARMVCMP>;
-def NEONvtst : SDNode<"ARMISD::VTST", SDTARMVCMP>;
-
-// Types for vector shift by immediates. The "SHX" version is for long and
-// narrow operations where the source and destination vectors have different
-// types. The "SHINS" version is for shift and insert operations.
-def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
- SDTCisVT<2, i32>]>;
-def SDTARMVSHX : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
- SDTCisVT<2, i32>]>;
-def SDTARMVSHINS : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
- SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
-
-def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
-def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
-def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
-def NEONvshlls : SDNode<"ARMISD::VSHLLs", SDTARMVSHX>;
-def NEONvshllu : SDNode<"ARMISD::VSHLLu", SDTARMVSHX>;
-def NEONvshlli : SDNode<"ARMISD::VSHLLi", SDTARMVSHX>;
-def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
-
-def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
-def NEONvrshru : SDNode<"ARMISD::VRSHRu", SDTARMVSH>;
-def NEONvrshrn : SDNode<"ARMISD::VRSHRN", SDTARMVSHX>;
-
-def NEONvqshls : SDNode<"ARMISD::VQSHLs", SDTARMVSH>;
-def NEONvqshlu : SDNode<"ARMISD::VQSHLu", SDTARMVSH>;
-def NEONvqshlsu : SDNode<"ARMISD::VQSHLsu", SDTARMVSH>;
-def NEONvqshrns : SDNode<"ARMISD::VQSHRNs", SDTARMVSHX>;
-def NEONvqshrnu : SDNode<"ARMISD::VQSHRNu", SDTARMVSHX>;
-def NEONvqshrnsu : SDNode<"ARMISD::VQSHRNsu", SDTARMVSHX>;
-
-def NEONvqrshrns : SDNode<"ARMISD::VQRSHRNs", SDTARMVSHX>;
-def NEONvqrshrnu : SDNode<"ARMISD::VQRSHRNu", SDTARMVSHX>;
-def NEONvqrshrnsu : SDNode<"ARMISD::VQRSHRNsu", SDTARMVSHX>;
-
-def NEONvsli : SDNode<"ARMISD::VSLI", SDTARMVSHINS>;
-def NEONvsri : SDNode<"ARMISD::VSRI", SDTARMVSHINS>;
-
-def SDTARMVGETLN : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
- SDTCisVT<2, i32>]>;
-def NEONvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
-def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
-
-def NEONvdup : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
-
-// VDUPLANE can produce a quad-register result from a double-register source,
-// so the result is not constrained to match the source.
-def NEONvduplane : SDNode<"ARMISD::VDUPLANE",
- SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
- SDTCisVT<2, i32>]>>;
-
-def SDTARMVEXT : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
- SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
-def NEONvext : SDNode<"ARMISD::VEXT", SDTARMVEXT>;
-
-def SDTARMVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
-def NEONvrev64 : SDNode<"ARMISD::VREV64", SDTARMVSHUF>;
-def NEONvrev32 : SDNode<"ARMISD::VREV32", SDTARMVSHUF>;
-def NEONvrev16 : SDNode<"ARMISD::VREV16", SDTARMVSHUF>;
-
-def SDTARMVSHUF2 : SDTypeProfile<2, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
- SDTCisSameAs<0, 2>,
- SDTCisSameAs<0, 3>]>;
-def NEONzip : SDNode<"ARMISD::VZIP", SDTARMVSHUF2>;
-def NEONuzp : SDNode<"ARMISD::VUZP", SDTARMVSHUF2>;
-def NEONtrn : SDNode<"ARMISD::VTRN", SDTARMVSHUF2>;
-
-def SDTARMFMAX : SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisSameAs<0, 1>,
- SDTCisSameAs<0, 2>]>;
-def NEONfmax : SDNode<"ARMISD::FMAX", SDTARMFMAX>;
-def NEONfmin : SDNode<"ARMISD::FMIN", SDTARMFMAX>;
-
-//===----------------------------------------------------------------------===//
-// NEON operand definitions
-//===----------------------------------------------------------------------===//
-
-// addrmode_neonldstm := reg
-//
-/* TODO: Take advantage of vldm.
-def addrmode_neonldstm : Operand<i32>,
- ComplexPattern<i32, 2, "SelectAddrModeNeonLdStM", []> {
- let PrintMethod = "printAddrNeonLdStMOperand";
- let MIOperandInfo = (ops GPR, i32imm);
-}
-*/
-
-def h8imm : Operand<i8> {
- let PrintMethod = "printHex8ImmOperand";
-}
-def h16imm : Operand<i16> {
- let PrintMethod = "printHex16ImmOperand";
-}
-def h32imm : Operand<i32> {
- let PrintMethod = "printHex32ImmOperand";
-}
-def h64imm : Operand<i64> {
- let PrintMethod = "printHex64ImmOperand";
-}
-
-//===----------------------------------------------------------------------===//
-// NEON load / store instructions
-//===----------------------------------------------------------------------===//
-
-/* TODO: Take advantage of vldm.
-let mayLoad = 1, hasExtraDefRegAllocReq = 1 in {
-def VLDMD : NI<(outs),
- (ins addrmode_neonldstm:$addr, reglist:$dst1, variable_ops),
- IIC_fpLoadm, "vldm", "${addr:submode} ${addr:base}, $dst1", []> {
- let Inst{27-25} = 0b110;
- let Inst{20} = 1;
- let Inst{11-9} = 0b101;
-}
-
-def VLDMS : NI<(outs),
- (ins addrmode_neonldstm:$addr, reglist:$dst1, variable_ops),
- IIC_fpLoadm, "vldm", "${addr:submode} ${addr:base}, $dst1", []> {
- let Inst{27-25} = 0b110;
- let Inst{20} = 1;
- let Inst{11-9} = 0b101;
-}
-}
-*/
-
-// Use vldmia to load a Q register as a D register pair.
-def VLDRQ : NI4<(outs QPR:$dst), (ins addrmode4:$addr), IIC_fpLoadm,
- "vldmia", "$addr, ${dst:dregpair}",
- [(set QPR:$dst, (v2f64 (load addrmode4:$addr)))]> {
- let Inst{27-25} = 0b110;
- let Inst{24} = 0; // P bit
- let Inst{23} = 1; // U bit
- let Inst{20} = 1;
- let Inst{11-8} = 0b1011;
-}
-
-// Use vstmia to store a Q register as a D register pair.
-def VSTRQ : NI4<(outs), (ins QPR:$src, addrmode4:$addr), IIC_fpStorem,
- "vstmia", "$addr, ${src:dregpair}",
- [(store (v2f64 QPR:$src), addrmode4:$addr)]> {
- let Inst{27-25} = 0b110;
- let Inst{24} = 0; // P bit
- let Inst{23} = 1; // U bit
- let Inst{20} = 0;
- let Inst{11-8} = 0b1011;
-}
-
-// VLD1 : Vector Load (multiple single elements)
-class VLD1D<bits<4> op7_4, string OpcodeStr, string Dt,
- ValueType Ty, Intrinsic IntOp>
- : NLdSt<0,0b10,0b0111,op7_4, (outs DPR:$dst), (ins addrmode6:$addr), IIC_VLD1,
- OpcodeStr, Dt, "\\{$dst\\}, $addr", "",
- [(set DPR:$dst, (Ty (IntOp addrmode6:$addr)))]>;
-class VLD1Q<bits<4> op7_4, string OpcodeStr, string Dt,
- ValueType Ty, Intrinsic IntOp>
- : NLdSt<0,0b10,0b1010,op7_4, (outs QPR:$dst), (ins addrmode6:$addr), IIC_VLD1,
- OpcodeStr, Dt, "${dst:dregpair}, $addr", "",
- [(set QPR:$dst, (Ty (IntOp addrmode6:$addr)))]>;
-
-def VLD1d8 : VLD1D<0b0000, "vld1", "8", v8i8, int_arm_neon_vld1>;
-def VLD1d16 : VLD1D<0b0100, "vld1", "16", v4i16, int_arm_neon_vld1>;
-def VLD1d32 : VLD1D<0b1000, "vld1", "32", v2i32, int_arm_neon_vld1>;
-def VLD1df : VLD1D<0b1000, "vld1", "32", v2f32, int_arm_neon_vld1>;
-def VLD1d64 : VLD1D<0b1100, "vld1", "64", v1i64, int_arm_neon_vld1>;
-
-def VLD1q8 : VLD1Q<0b0000, "vld1", "8", v16i8, int_arm_neon_vld1>;
-def VLD1q16 : VLD1Q<0b0100, "vld1", "16", v8i16, int_arm_neon_vld1>;
-def VLD1q32 : VLD1Q<0b1000, "vld1", "32", v4i32, int_arm_neon_vld1>;
-def VLD1qf : VLD1Q<0b1000, "vld1", "32", v4f32, int_arm_neon_vld1>;
-def VLD1q64 : VLD1Q<0b1100, "vld1", "64", v2i64, int_arm_neon_vld1>;
-
-// These (dreg triple/quadruple) are for disassembly only.
-class VLD1D3<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0, 0b10, 0b0110, op7_4, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3),
- (ins addrmode6:$addr), IIC_VLD1, OpcodeStr, Dt,
- "\\{$dst1, $dst2, $dst3\\}, $addr", "",
- [/* For disassembly only; pattern left blank */]>;
-class VLD1D4<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b10,0b0010,op7_4,(outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$addr), IIC_VLD1, OpcodeStr, Dt,
- "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr", "",
- [/* For disassembly only; pattern left blank */]>;
-
-def VLD1d8T : VLD1D3<0b0000, "vld1", "8">;
-def VLD1d16T : VLD1D3<0b0100, "vld1", "16">;
-def VLD1d32T : VLD1D3<0b1000, "vld1", "32">;
-//def VLD1d64T : VLD1D3<0b1100, "vld1", "64">;
-
-def VLD1d8Q : VLD1D4<0b0000, "vld1", "8">;
-def VLD1d16Q : VLD1D4<0b0100, "vld1", "16">;
-def VLD1d32Q : VLD1D4<0b1000, "vld1", "32">;
-//def VLD1d64Q : VLD1D4<0b1100, "vld1", "64">;
-
-
-let mayLoad = 1, hasExtraDefRegAllocReq = 1 in {
-
-// VLD2 : Vector Load (multiple 2-element structures)
-class VLD2D<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b10,0b1000,op7_4, (outs DPR:$dst1, DPR:$dst2),
- (ins addrmode6:$addr), IIC_VLD2,
- OpcodeStr, Dt, "\\{$dst1, $dst2\\}, $addr", "", []>;
-class VLD2Q<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b10,0b0011,op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$addr), IIC_VLD2,
- OpcodeStr, Dt, "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr",
- "", []>;
-
-def VLD2d8 : VLD2D<0b0000, "vld2", "8">;
-def VLD2d16 : VLD2D<0b0100, "vld2", "16">;
-def VLD2d32 : VLD2D<0b1000, "vld2", "32">;
-def VLD2d64 : NLdSt<0,0b10,0b1010,0b1100, (outs DPR:$dst1, DPR:$dst2),
- (ins addrmode6:$addr), IIC_VLD1,
- "vld1", "64", "\\{$dst1, $dst2\\}, $addr", "", []>;
-
-def VLD2q8 : VLD2Q<0b0000, "vld2", "8">;
-def VLD2q16 : VLD2Q<0b0100, "vld2", "16">;
-def VLD2q32 : VLD2Q<0b1000, "vld2", "32">;
-
-// These (double-spaced dreg pair) are for disassembly only.
-class VLD2Ddbl<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b10,0b1001,op7_4, (outs DPR:$dst1, DPR:$dst2),
- (ins addrmode6:$addr), IIC_VLD2,
- OpcodeStr, Dt, "\\{$dst1, $dst2\\}, $addr", "", []>;
-
-def VLD2d8D : VLD2Ddbl<0b0000, "vld2", "8">;
-def VLD2d16D : VLD2Ddbl<0b0100, "vld2", "16">;
-def VLD2d32D : VLD2Ddbl<0b1000, "vld2", "32">;
-
-// VLD3 : Vector Load (multiple 3-element structures)
-class VLD3D<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b10,0b0100,op7_4, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3),
- (ins addrmode6:$addr), IIC_VLD3,
- OpcodeStr, Dt, "\\{$dst1, $dst2, $dst3\\}, $addr", "", []>;
-class VLD3WB<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b10,0b0101,op7_4, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, GPR:$wb),
- (ins addrmode6:$addr), IIC_VLD3,
- OpcodeStr, Dt, "\\{$dst1, $dst2, $dst3\\}, $addr",
- "$addr.addr = $wb", []>;
-
-def VLD3d8 : VLD3D<0b0000, "vld3", "8">;
-def VLD3d16 : VLD3D<0b0100, "vld3", "16">;
-def VLD3d32 : VLD3D<0b1000, "vld3", "32">;
-def VLD3d64 : NLdSt<0,0b10,0b0110,0b1100,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3),
- (ins addrmode6:$addr), IIC_VLD1,
- "vld1", "64", "\\{$dst1, $dst2, $dst3\\}, $addr", "", []>;
-
-// vld3 to double-spaced even registers.
-def VLD3q8a : VLD3WB<0b0000, "vld3", "8">;
-def VLD3q16a : VLD3WB<0b0100, "vld3", "16">;
-def VLD3q32a : VLD3WB<0b1000, "vld3", "32">;
-
-// vld3 to double-spaced odd registers.
-def VLD3q8b : VLD3WB<0b0000, "vld3", "8">;
-def VLD3q16b : VLD3WB<0b0100, "vld3", "16">;
-def VLD3q32b : VLD3WB<0b1000, "vld3", "32">;
-
-// VLD4 : Vector Load (multiple 4-element structures)
-class VLD4D<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b10,0b0000,op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$addr), IIC_VLD4,
- OpcodeStr, Dt, "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr",
- "", []>;
-class VLD4WB<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b10,0b0001,op7_4,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
- (ins addrmode6:$addr), IIC_VLD4,
- OpcodeStr, Dt, "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr",
- "$addr.addr = $wb", []>;
-
-def VLD4d8 : VLD4D<0b0000, "vld4", "8">;
-def VLD4d16 : VLD4D<0b0100, "vld4", "16">;
-def VLD4d32 : VLD4D<0b1000, "vld4", "32">;
-def VLD4d64 : NLdSt<0,0b10,0b0010,0b1100,
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$addr), IIC_VLD1,
- "vld1", "64", "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr",
- "", []>;
-
-// vld4 to double-spaced even registers.
-def VLD4q8a : VLD4WB<0b0000, "vld4", "8">;
-def VLD4q16a : VLD4WB<0b0100, "vld4", "16">;
-def VLD4q32a : VLD4WB<0b1000, "vld4", "32">;
-
-// vld4 to double-spaced odd registers.
-def VLD4q8b : VLD4WB<0b0000, "vld4", "8">;
-def VLD4q16b : VLD4WB<0b0100, "vld4", "16">;
-def VLD4q32b : VLD4WB<0b1000, "vld4", "32">;
-
-// VLD1LN : Vector Load (single element to one lane)
-// FIXME: Not yet implemented.
-
-// VLD2LN : Vector Load (single 2-element structure to one lane)
-class VLD2LN<bits<4> op11_8, string OpcodeStr, string Dt>
- : NLdSt<1,0b10,op11_8,{?,?,?,?}, (outs DPR:$dst1, DPR:$dst2),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, nohash_imm:$lane),
- IIC_VLD2, OpcodeStr, Dt, "\\{$dst1[$lane], $dst2[$lane]\\}, $addr",
- "$src1 = $dst1, $src2 = $dst2", []>;
-
-// vld2 to single-spaced registers.
-def VLD2LNd8 : VLD2LN<0b0001, "vld2", "8">;
-def VLD2LNd16 : VLD2LN<0b0101, "vld2", "16"> { let Inst{5} = 0; }
-def VLD2LNd32 : VLD2LN<0b1001, "vld2", "32"> { let Inst{6} = 0; }
-
-// vld2 to double-spaced even registers.
-def VLD2LNq16a: VLD2LN<0b0101, "vld2", "16"> { let Inst{5} = 1; }
-def VLD2LNq32a: VLD2LN<0b1001, "vld2", "32"> { let Inst{6} = 1; }
-
-// vld2 to double-spaced odd registers.
-def VLD2LNq16b: VLD2LN<0b0101, "vld2", "16"> { let Inst{5} = 1; }
-def VLD2LNq32b: VLD2LN<0b1001, "vld2", "32"> { let Inst{6} = 1; }
-
-// VLD3LN : Vector Load (single 3-element structure to one lane)
-class VLD3LN<bits<4> op11_8, string OpcodeStr, string Dt>
- : NLdSt<1,0b10,op11_8,{?,?,?,?}, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
- nohash_imm:$lane), IIC_VLD3, OpcodeStr, Dt,
- "\\{$dst1[$lane], $dst2[$lane], $dst3[$lane]\\}, $addr",
- "$src1 = $dst1, $src2 = $dst2, $src3 = $dst3", []>;
-
-// vld3 to single-spaced registers.
-def VLD3LNd8 : VLD3LN<0b0010, "vld3", "8"> { let Inst{4} = 0; }
-def VLD3LNd16 : VLD3LN<0b0110, "vld3", "16"> { let Inst{5-4} = 0b00; }
-def VLD3LNd32 : VLD3LN<0b1010, "vld3", "32"> { let Inst{6-4} = 0b000; }
-
-// vld3 to double-spaced even registers.
-def VLD3LNq16a: VLD3LN<0b0110, "vld3", "16"> { let Inst{5-4} = 0b10; }
-def VLD3LNq32a: VLD3LN<0b1010, "vld3", "32"> { let Inst{6-4} = 0b100; }
-
-// vld3 to double-spaced odd registers.
-def VLD3LNq16b: VLD3LN<0b0110, "vld3", "16"> { let Inst{5-4} = 0b10; }
-def VLD3LNq32b: VLD3LN<0b1010, "vld3", "32"> { let Inst{6-4} = 0b100; }
-
-// VLD4LN : Vector Load (single 4-element structure to one lane)
-class VLD4LN<bits<4> op11_8, string OpcodeStr, string Dt>
- : NLdSt<1,0b10,op11_8,{?,?,?,?},
- (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
- nohash_imm:$lane), IIC_VLD4, OpcodeStr, Dt,
- "\\{$dst1[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $addr",
- "$src1 = $dst1, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []>;
-
-// vld4 to single-spaced registers.
-def VLD4LNd8 : VLD4LN<0b0011, "vld4", "8">;
-def VLD4LNd16 : VLD4LN<0b0111, "vld4", "16"> { let Inst{5} = 0; }
-def VLD4LNd32 : VLD4LN<0b1011, "vld4", "32"> { let Inst{6} = 0; }
-
-// vld4 to double-spaced even registers.
-def VLD4LNq16a: VLD4LN<0b0111, "vld4", "16"> { let Inst{5} = 1; }
-def VLD4LNq32a: VLD4LN<0b1011, "vld4", "32"> { let Inst{6} = 1; }
-
-// vld4 to double-spaced odd registers.
-def VLD4LNq16b: VLD4LN<0b0111, "vld4", "16"> { let Inst{5} = 1; }
-def VLD4LNq32b: VLD4LN<0b1011, "vld4", "32"> { let Inst{6} = 1; }
-
-// VLD1DUP : Vector Load (single element to all lanes)
-// VLD2DUP : Vector Load (single 2-element structure to all lanes)
-// VLD3DUP : Vector Load (single 3-element structure to all lanes)
-// VLD4DUP : Vector Load (single 4-element structure to all lanes)
-// FIXME: Not yet implemented.
-} // mayLoad = 1, hasExtraDefRegAllocReq = 1
-
-// VST1 : Vector Store (multiple single elements)
-class VST1D<bits<4> op7_4, string OpcodeStr, string Dt,
- ValueType Ty, Intrinsic IntOp>
- : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$addr, DPR:$src), IIC_VST,
- OpcodeStr, Dt, "\\{$src\\}, $addr", "",
- [(IntOp addrmode6:$addr, (Ty DPR:$src))]>;
-class VST1Q<bits<4> op7_4, string OpcodeStr, string Dt,
- ValueType Ty, Intrinsic IntOp>
- : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins addrmode6:$addr, QPR:$src), IIC_VST,
- OpcodeStr, Dt, "${src:dregpair}, $addr", "",
- [(IntOp addrmode6:$addr, (Ty QPR:$src))]>;
-
-let hasExtraSrcRegAllocReq = 1 in {
-def VST1d8 : VST1D<0b0000, "vst1", "8", v8i8, int_arm_neon_vst1>;
-def VST1d16 : VST1D<0b0100, "vst1", "16", v4i16, int_arm_neon_vst1>;
-def VST1d32 : VST1D<0b1000, "vst1", "32", v2i32, int_arm_neon_vst1>;
-def VST1df : VST1D<0b1000, "vst1", "32", v2f32, int_arm_neon_vst1>;
-def VST1d64 : VST1D<0b1100, "vst1", "64", v1i64, int_arm_neon_vst1>;
-
-def VST1q8 : VST1Q<0b0000, "vst1", "8", v16i8, int_arm_neon_vst1>;
-def VST1q16 : VST1Q<0b0100, "vst1", "16", v8i16, int_arm_neon_vst1>;
-def VST1q32 : VST1Q<0b1000, "vst1", "32", v4i32, int_arm_neon_vst1>;
-def VST1qf : VST1Q<0b1000, "vst1", "32", v4f32, int_arm_neon_vst1>;
-def VST1q64 : VST1Q<0b1100, "vst1", "64", v2i64, int_arm_neon_vst1>;
-} // hasExtraSrcRegAllocReq
-
-// These (dreg triple/quadruple) are for disassembly only.
-class VST1D3<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0, 0b00, 0b0110, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3), IIC_VST,
- OpcodeStr, Dt,
- "\\{$src1, $src2, $src3\\}, $addr", "",
- [/* For disassembly only; pattern left blank */]>;
-class VST1D4<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0, 0b00, 0b0010, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST, OpcodeStr, Dt,
- "\\{$src1, $src2, $src3, $src4\\}, $addr", "",
- [/* For disassembly only; pattern left blank */]>;
-
-def VST1d8T : VST1D3<0b0000, "vst1", "8">;
-def VST1d16T : VST1D3<0b0100, "vst1", "16">;
-def VST1d32T : VST1D3<0b1000, "vst1", "32">;
-//def VST1d64T : VST1D3<0b1100, "vst1", "64">;
-
-def VST1d8Q : VST1D4<0b0000, "vst1", "8">;
-def VST1d16Q : VST1D4<0b0100, "vst1", "16">;
-def VST1d32Q : VST1D4<0b1000, "vst1", "32">;
-//def VST1d64Q : VST1D4<0b1100, "vst1", "64">;
-
-
-let mayStore = 1, hasExtraSrcRegAllocReq = 1 in {
-
-// VST2 : Vector Store (multiple 2-element structures)
-class VST2D<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b00,0b1000,op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2), IIC_VST,
- OpcodeStr, Dt, "\\{$src1, $src2\\}, $addr", "", []>;
-class VST2Q<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b00,0b0011,op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST, OpcodeStr, Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr",
- "", []>;
-
-def VST2d8 : VST2D<0b0000, "vst2", "8">;
-def VST2d16 : VST2D<0b0100, "vst2", "16">;
-def VST2d32 : VST2D<0b1000, "vst2", "32">;
-def VST2d64 : NLdSt<0,0b00,0b1010,0b1100, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2), IIC_VST,
- "vst1", "64", "\\{$src1, $src2\\}, $addr", "", []>;
-
-def VST2q8 : VST2Q<0b0000, "vst2", "8">;
-def VST2q16 : VST2Q<0b0100, "vst2", "16">;
-def VST2q32 : VST2Q<0b1000, "vst2", "32">;
-
-// These (double-spaced dreg pair) are for disassembly only.
-class VST2Ddbl<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0, 0b00, 0b1001, op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2), IIC_VST,
- OpcodeStr, Dt, "\\{$src1, $src2\\}, $addr", "", []>;
-
-def VST2d8D : VST2Ddbl<0b0000, "vst2", "8">;
-def VST2d16D : VST2Ddbl<0b0100, "vst2", "16">;
-def VST2d32D : VST2Ddbl<0b1000, "vst2", "32">;
-
-// VST3 : Vector Store (multiple 3-element structures)
-class VST3D<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b00,0b0100,op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3), IIC_VST,
- OpcodeStr, Dt, "\\{$src1, $src2, $src3\\}, $addr", "", []>;
-class VST3WB<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b00,0b0101,op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3), IIC_VST,
- OpcodeStr, Dt, "\\{$src1, $src2, $src3\\}, $addr",
- "$addr.addr = $wb", []>;
-
-def VST3d8 : VST3D<0b0000, "vst3", "8">;
-def VST3d16 : VST3D<0b0100, "vst3", "16">;
-def VST3d32 : VST3D<0b1000, "vst3", "32">;
-def VST3d64 : NLdSt<0,0b00,0b0110,0b1100, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3),
- IIC_VST,
- "vst1", "64", "\\{$src1, $src2, $src3\\}, $addr", "", []>;
-
-// vst3 to double-spaced even registers.
-def VST3q8a : VST3WB<0b0000, "vst3", "8">;
-def VST3q16a : VST3WB<0b0100, "vst3", "16">;
-def VST3q32a : VST3WB<0b1000, "vst3", "32">;
-
-// vst3 to double-spaced odd registers.
-def VST3q8b : VST3WB<0b0000, "vst3", "8">;
-def VST3q16b : VST3WB<0b0100, "vst3", "16">;
-def VST3q32b : VST3WB<0b1000, "vst3", "32">;
-
-// VST4 : Vector Store (multiple 4-element structures)
-class VST4D<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b00,0b0000,op7_4, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST, OpcodeStr, Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr",
- "", []>;
-class VST4WB<bits<4> op7_4, string OpcodeStr, string Dt>
- : NLdSt<0,0b00,0b0001,op7_4, (outs GPR:$wb),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
- IIC_VST, OpcodeStr, Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr",
- "$addr.addr = $wb", []>;
-
-def VST4d8 : VST4D<0b0000, "vst4", "8">;
-def VST4d16 : VST4D<0b0100, "vst4", "16">;
-def VST4d32 : VST4D<0b1000, "vst4", "32">;
-def VST4d64 : NLdSt<0,0b00,0b0010,0b1100, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
- DPR:$src4), IIC_VST,
- "vst1", "64", "\\{$src1, $src2, $src3, $src4\\}, $addr",
- "", []>;
-
-// vst4 to double-spaced even registers.
-def VST4q8a : VST4WB<0b0000, "vst4", "8">;
-def VST4q16a : VST4WB<0b0100, "vst4", "16">;
-def VST4q32a : VST4WB<0b1000, "vst4", "32">;
-
-// vst4 to double-spaced odd registers.
-def VST4q8b : VST4WB<0b0000, "vst4", "8">;
-def VST4q16b : VST4WB<0b0100, "vst4", "16">;
-def VST4q32b : VST4WB<0b1000, "vst4", "32">;
-
-// VST1LN : Vector Store (single element from one lane)
-// FIXME: Not yet implemented.
-
-// VST2LN : Vector Store (single 2-element structure from one lane)
-class VST2LN<bits<4> op11_8, string OpcodeStr, string Dt>
- : NLdSt<1,0b00,op11_8,{?,?,?,?}, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, nohash_imm:$lane),
- IIC_VST, OpcodeStr, Dt, "\\{$src1[$lane], $src2[$lane]\\}, $addr",
- "", []>;
-
-// vst2 to single-spaced registers.
-def VST2LNd8 : VST2LN<0b0001, "vst2", "8">;
-def VST2LNd16 : VST2LN<0b0101, "vst2", "16"> { let Inst{5} = 0; }
-def VST2LNd32 : VST2LN<0b1001, "vst2", "32"> { let Inst{6} = 0; }
-
-// vst2 to double-spaced even registers.
-def VST2LNq16a: VST2LN<0b0101, "vst2", "16"> { let Inst{5} = 1; }
-def VST2LNq32a: VST2LN<0b1001, "vst2", "32"> { let Inst{6} = 1; }
-
-// vst2 to double-spaced odd registers.
-def VST2LNq16b: VST2LN<0b0101, "vst2", "16"> { let Inst{5} = 1; }
-def VST2LNq32b: VST2LN<0b1001, "vst2", "32"> { let Inst{6} = 1; }
-
-// VST3LN : Vector Store (single 3-element structure from one lane)
-class VST3LN<bits<4> op11_8, string OpcodeStr, string Dt>
- : NLdSt<1,0b00,op11_8,{?,?,?,?}, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3,
- nohash_imm:$lane), IIC_VST, OpcodeStr, Dt,
- "\\{$src1[$lane], $src2[$lane], $src3[$lane]\\}, $addr", "", []>;
-
-// vst3 to single-spaced registers.
-def VST3LNd8 : VST3LN<0b0010, "vst3", "8"> { let Inst{4} = 0; }
-def VST3LNd16 : VST3LN<0b0110, "vst3", "16"> { let Inst{5-4} = 0b00; }
-def VST3LNd32 : VST3LN<0b1010, "vst3", "32"> { let Inst{6-4} = 0b000; }
-
-// vst3 to double-spaced even registers.
-def VST3LNq16a: VST3LN<0b0110, "vst3", "16"> { let Inst{5-4} = 0b10; }
-def VST3LNq32a: VST3LN<0b1010, "vst3", "32"> { let Inst{6-4} = 0b100; }
-
-// vst3 to double-spaced odd registers.
-def VST3LNq16b: VST3LN<0b0110, "vst3", "16"> { let Inst{5-4} = 0b10; }
-def VST3LNq32b: VST3LN<0b1010, "vst3", "32"> { let Inst{6-4} = 0b100; }
-
-// VST4LN : Vector Store (single 4-element structure from one lane)
-class VST4LN<bits<4> op11_8, string OpcodeStr, string Dt>
- : NLdSt<1,0b00,op11_8,{?,?,?,?}, (outs),
- (ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
- nohash_imm:$lane), IIC_VST, OpcodeStr, Dt,
- "\\{$src1[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $addr",
- "", []>;
-
-// vst4 to single-spaced registers.
-def VST4LNd8 : VST4LN<0b0011, "vst4", "8">;
-def VST4LNd16 : VST4LN<0b0111, "vst4", "16"> { let Inst{5} = 0; }
-def VST4LNd32 : VST4LN<0b1011, "vst4", "32"> { let Inst{6} = 0; }
-
-// vst4 to double-spaced even registers.
-def VST4LNq16a: VST4LN<0b0111, "vst4", "16"> { let Inst{5} = 1; }
-def VST4LNq32a: VST4LN<0b1011, "vst4", "32"> { let Inst{6} = 1; }
-
-// vst4 to double-spaced odd registers.
-def VST4LNq16b: VST4LN<0b0111, "vst4", "16"> { let Inst{5} = 1; }
-def VST4LNq32b: VST4LN<0b1011, "vst4", "32"> { let Inst{6} = 1; }
-
-} // mayStore = 1, hasExtraSrcRegAllocReq = 1
-
-
-//===----------------------------------------------------------------------===//
-// NEON pattern fragments
-//===----------------------------------------------------------------------===//
-
-// Extract D sub-registers of Q registers.
-// (arm_dsubreg_0 is 5; arm_dsubreg_1 is 6)
-def DSubReg_i8_reg : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(5 + N->getZExtValue() / 8, MVT::i32);
-}]>;
-def DSubReg_i16_reg : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(5 + N->getZExtValue() / 4, MVT::i32);
-}]>;
-def DSubReg_i32_reg : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(5 + N->getZExtValue() / 2, MVT::i32);
-}]>;
-def DSubReg_f64_reg : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(5 + N->getZExtValue(), MVT::i32);
-}]>;
-def DSubReg_f64_other_reg : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(5 + (1 - N->getZExtValue()), MVT::i32);
-}]>;
-
-// Extract S sub-registers of Q/D registers.
-// (arm_ssubreg_0 is 1; arm_ssubreg_1 is 2; etc.)
-def SSubReg_f32_reg : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(1 + N->getZExtValue(), MVT::i32);
-}]>;
-
-// Translate lane numbers from Q registers to D subregs.
-def SubReg_i8_lane : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
-}]>;
-def SubReg_i16_lane : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
-}]>;
-def SubReg_i32_lane : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
-}]>;
-
-//===----------------------------------------------------------------------===//
-// Instruction Classes
-//===----------------------------------------------------------------------===//
-
-// Basic 2-register operations: single-, double- and quad-register.
-class N2VS<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
- string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
- (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src),
- IIC_VUNAD, OpcodeStr, Dt, "$dst, $src", "", []>;
-class N2VD<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
- string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src), IIC_VUNAD, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src))))]>;
-class N2VQ<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
- string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src), IIC_VUNAQ, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src))))]>;
-
-// Basic 2-register intrinsics, both double- and quad-register.
-class N2VDInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src), itin, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
-class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src), itin, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
-
-// Narrow 2-register intrinsics.
-class N2VNInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyD, ValueType TyQ, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$dst),
- (ins QPR:$src), itin, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src))))]>;
-
-// Long 2-register intrinsics (currently only used for VMOVL).
-class N2VLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyQ, ValueType TyD, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$dst),
- (ins DPR:$src), itin, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (TyQ (IntOp (TyD DPR:$src))))]>;
-
-// 2-register shuffles (VTRN/VZIP/VUZP), both double- and quad-register.
-class N2VDShuffle<bits<2> op19_18, bits<5> op11_7, string OpcodeStr, string Dt>
- : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 0, 0, (outs DPR:$dst1, DPR:$dst2),
- (ins DPR:$src1, DPR:$src2), IIC_VPERMD,
- OpcodeStr, Dt, "$dst1, $dst2",
- "$src1 = $dst1, $src2 = $dst2", []>;
-class N2VQShuffle<bits<2> op19_18, bits<5> op11_7,
- InstrItinClass itin, string OpcodeStr, string Dt>
- : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 1, 0, (outs QPR:$dst1, QPR:$dst2),
- (ins QPR:$src1, QPR:$src2), itin, OpcodeStr, Dt, "$dst1, $dst2",
- "$src1 = $dst1, $src2 = $dst2", []>;
-
-// Basic 3-register operations: single-, double- and quad-register.
-class N3VS<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
- SDNode OpNode, bit Commutable>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src1, DPR_VFP2:$src2), IIC_VBIND,
- OpcodeStr, Dt, "$dst, $src1, $src2", "", []> {
- let isCommutable = Commutable;
-}
-
-class N3VD<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
- let isCommutable = Commutable;
-}
-// Same as N3VD but no data type.
-class N3VDX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr,
- ValueType ResTy, ValueType OpTy,
- SDNode OpNode, bit Commutable>
- : N3VX<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), itin,
- OpcodeStr, "$dst, $src1, $src2", "",
- [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src1), (OpTy DPR:$src2))))]>{
- let isCommutable = Commutable;
-}
-class N3VDSL<bits<2> op21_20, bits<4> op11_8,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType Ty, SDNode ShOp>
- : N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (Ty DPR:$dst),
- (Ty (ShOp (Ty DPR:$src1),
- (Ty (NEONvduplane (Ty DPR_VFP2:$src2), imm:$lane)))))]>{
- let isCommutable = 0;
-}
-class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
- string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
- : N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- IIC_VMULi16D, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (Ty DPR:$dst),
- (Ty (ShOp (Ty DPR:$src1),
- (Ty (NEONvduplane (Ty DPR_8:$src2), imm:$lane)))))]> {
- let isCommutable = 0;
-}
-
-class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
- : N3V<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
- let isCommutable = Commutable;
-}
-class N3VQX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr,
- ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
- : N3VX<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), itin,
- OpcodeStr, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src1), (OpTy QPR:$src2))))]>{
- let isCommutable = Commutable;
-}
-class N3VQSL<bits<2> op21_20, bits<4> op11_8,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, SDNode ShOp>
- : N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins QPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (ShOp (ResTy QPR:$src1),
- (ResTy (NEONvduplane (OpTy DPR_VFP2:$src2),
- imm:$lane)))))]> {
- let isCommutable = 0;
-}
-class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, SDNode ShOp>
- : N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins QPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- IIC_VMULi16Q, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (ShOp (ResTy QPR:$src1),
- (ResTy (NEONvduplane (OpTy DPR_8:$src2),
- imm:$lane)))))]> {
- let isCommutable = 0;
-}
-
-// Basic 3-register intrinsics, both double- and quad-register.
-class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
- let isCommutable = Commutable;
-}
-class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
- : N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (Ty DPR:$dst),
- (Ty (IntOp (Ty DPR:$src1),
- (Ty (NEONvduplane (Ty DPR_VFP2:$src2),
- imm:$lane)))))]> {
- let isCommutable = 0;
-}
-class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
- : N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (Ty DPR:$dst),
- (Ty (IntOp (Ty DPR:$src1),
- (Ty (NEONvduplane (Ty DPR_8:$src2),
- imm:$lane)))))]> {
- let isCommutable = 0;
-}
-
-class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
- : N3V<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
- let isCommutable = Commutable;
-}
-class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins QPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (ResTy QPR:$src1),
- (ResTy (NEONvduplane (OpTy DPR_VFP2:$src2),
- imm:$lane)))))]> {
- let isCommutable = 0;
-}
-class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins QPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (ResTy QPR:$src1),
- (ResTy (NEONvduplane (OpTy DPR_8:$src2),
- imm:$lane)))))]> {
- let isCommutable = 0;
-}
-
-// Multiply-Add/Sub operations: single-, double- and quad-register.
-class N3VSMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType Ty, SDNode MulOp, SDNode OpNode>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR_VFP2:$dst),
- (ins DPR_VFP2:$src1, DPR_VFP2:$src2, DPR_VFP2:$src3), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst", []>;
-
-class N3VDMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType Ty, SDNode MulOp, SDNode OpNode>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set DPR:$dst, (Ty (OpNode DPR:$src1,
- (Ty (MulOp DPR:$src2, DPR:$src3)))))]>;
-class N3VDMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt,
- ValueType Ty, SDNode MulOp, SDNode ShOp>
- : N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (Ty DPR:$dst),
- (Ty (ShOp (Ty DPR:$src1),
- (Ty (MulOp DPR:$src2,
- (Ty (NEONvduplane (Ty DPR_VFP2:$src3),
- imm:$lane)))))))]>;
-class N3VDMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt,
- ValueType Ty, SDNode MulOp, SDNode ShOp>
- : N3V<0, 1, op21_20, op11_8, 1, 0,
- (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, DPR_8:$src3, nohash_imm:$lane), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (Ty DPR:$dst),
- (Ty (ShOp (Ty DPR:$src1),
- (Ty (MulOp DPR:$src2,
- (Ty (NEONvduplane (Ty DPR_8:$src3),
- imm:$lane)))))))]>;
-
-class N3VQMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt, ValueType Ty,
- SDNode MulOp, SDNode OpNode>
- : N3V<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst, (Ty (OpNode QPR:$src1,
- (Ty (MulOp QPR:$src2, QPR:$src3)))))]>;
-class N3VQMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
- SDNode MulOp, SDNode ShOp>
- : N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (ResTy QPR:$dst),
- (ResTy (ShOp (ResTy QPR:$src1),
- (ResTy (MulOp QPR:$src2,
- (ResTy (NEONvduplane (OpTy DPR_VFP2:$src3),
- imm:$lane)))))))]>;
-class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy,
- SDNode MulOp, SDNode ShOp>
- : N3V<1, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, DPR_8:$src3, nohash_imm:$lane), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (ResTy QPR:$dst),
- (ResTy (ShOp (ResTy QPR:$src1),
- (ResTy (MulOp QPR:$src2,
- (ResTy (NEONvduplane (OpTy DPR_8:$src3),
- imm:$lane)))))))]>;
-
-// Neon 3-argument intrinsics, both double- and quad-register.
-// The destination register is also used as the first source operand register.
-class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1),
- (OpTy DPR:$src2), (OpTy DPR:$src3))))]>;
-class N3VQInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N3V<op24, op23, op21_20, op11_8, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1),
- (OpTy QPR:$src2), (OpTy QPR:$src3))))]>;
-
-// Neon Long 3-argument intrinsic. The destination register is
-// a quad-register and is also used as the first source operand register.
-class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyQ, ValueType TyD, Intrinsic IntOp>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins QPR:$src1, DPR:$src2, DPR:$src3), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst,
- (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$src2), (TyD DPR:$src3))))]>;
-class N3VLInt3SL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst),
- (ins QPR:$src1, DPR:$src2, DPR_VFP2:$src3, nohash_imm:$lane), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (ResTy QPR:$src1),
- (OpTy DPR:$src2),
- (OpTy (NEONvduplane (OpTy DPR_VFP2:$src3),
- imm:$lane)))))]>;
-class N3VLInt3SL16<bit op24, bits<2> op21_20, bits<4> op11_8,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst),
- (ins QPR:$src1, DPR:$src2, DPR_8:$src3, nohash_imm:$lane), itin,
- OpcodeStr, Dt, "$dst, $src2, $src3[$lane]", "$src1 = $dst",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (ResTy QPR:$src1),
- (OpTy DPR:$src2),
- (OpTy (NEONvduplane (OpTy DPR_8:$src3),
- imm:$lane)))))]>;
-
-// Narrowing 3-register intrinsics.
-class N3VNInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt, ValueType TyD, ValueType TyQ,
- Intrinsic IntOp, bit Commutable>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs DPR:$dst), (ins QPR:$src1, QPR:$src2), IIC_VBINi4D,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src1), (TyQ QPR:$src2))))]> {
- let isCommutable = Commutable;
-}
-
-// Long 3-register intrinsics.
-class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType TyQ, ValueType TyD, Intrinsic IntOp, bit Commutable>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins DPR:$src1, DPR:$src2), itin,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (TyQ (IntOp (TyD DPR:$src1), (TyD DPR:$src2))))]> {
- let isCommutable = Commutable;
-}
-class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins DPR:$src1, DPR_VFP2:$src2, nohash_imm:$lane),
- itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (OpTy DPR:$src1),
- (OpTy (NEONvduplane (OpTy DPR_VFP2:$src2),
- imm:$lane)))))]>;
-class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N3V<op24, 1, op21_20, op11_8, 1, 0,
- (outs QPR:$dst), (ins DPR:$src1, DPR_8:$src2, nohash_imm:$lane),
- itin, OpcodeStr, Dt, "$dst, $src1, $src2[$lane]", "",
- [(set (ResTy QPR:$dst),
- (ResTy (IntOp (OpTy DPR:$src1),
- (OpTy (NEONvduplane (OpTy DPR_8:$src2),
- imm:$lane)))))]>;
-
-// Wide 3-register intrinsics.
-class N3VWInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD,
- Intrinsic IntOp, bit Commutable>
- : N3V<op24, op23, op21_20, op11_8, 0, op4,
- (outs QPR:$dst), (ins QPR:$src1, DPR:$src2), IIC_VSUBiD,
- OpcodeStr, Dt, "$dst, $src1, $src2", "",
- [(set QPR:$dst, (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$src2))))]> {
- let isCommutable = Commutable;
-}
-
-// Pairwise long 2-register intrinsics, both double- and quad-register.
-class N2VDPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src), IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
-class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src), IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
-
-// Pairwise long 2-register accumulate intrinsics,
-// both double- and quad-register.
-// The destination register is also used as the first source operand register.
-class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2), IIC_VPALiD,
- OpcodeStr, Dt, "$dst, $src2", "$src1 = $dst",
- [(set DPR:$dst, (ResTy (IntOp (ResTy DPR:$src1), (OpTy DPR:$src2))))]>;
-class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
- bits<2> op17_16, bits<5> op11_7, bit op4,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
- : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2), IIC_VPALiQ,
- OpcodeStr, Dt, "$dst, $src2", "$src1 = $dst",
- [(set QPR:$dst, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$src2))))]>;
-
-// Shift by immediate,
-// both double- and quad-register.
-class N2VDSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType Ty, SDNode OpNode>
- : N2VImm<op24, op23, op11_8, op7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM), itin,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set DPR:$dst, (Ty (OpNode (Ty DPR:$src), (i32 imm:$SIMM))))]>;
-class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType Ty, SDNode OpNode>
- : N2VImm<op24, op23, op11_8, op7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM), itin,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set QPR:$dst, (Ty (OpNode (Ty QPR:$src), (i32 imm:$SIMM))))]>;
-
-// Long shift by immediate.
-class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, SDNode OpNode>
- : N2VImm<op24, op23, op11_8, op7, op6, op4,
- (outs QPR:$dst), (ins DPR:$src, i32imm:$SIMM), IIC_VSHLiD,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set QPR:$dst, (ResTy (OpNode (OpTy DPR:$src),
- (i32 imm:$SIMM))))]>;
-
-// Narrow shift by immediate.
-class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, SDNode OpNode>
- : N2VImm<op24, op23, op11_8, op7, op6, op4,
- (outs DPR:$dst), (ins QPR:$src, i32imm:$SIMM), itin,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set DPR:$dst, (ResTy (OpNode (OpTy QPR:$src),
- (i32 imm:$SIMM))))]>;
-
-// Shift right by immediate and accumulate,
-// both double- and quad-register.
-class N2VDShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
- string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
- : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, i32imm:$SIMM), IIC_VPALiD,
- OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
- [(set DPR:$dst, (Ty (add DPR:$src1,
- (Ty (ShOp DPR:$src2, (i32 imm:$SIMM))))))]>;
-class N2VQShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
- string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
- : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, i32imm:$SIMM), IIC_VPALiD,
- OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
- [(set QPR:$dst, (Ty (add QPR:$src1,
- (Ty (ShOp QPR:$src2, (i32 imm:$SIMM))))))]>;
-
-// Shift by immediate and insert,
-// both double- and quad-register.
-class N2VDShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
- string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
- : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, i32imm:$SIMM), IIC_VSHLiD,
- OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
- [(set DPR:$dst, (Ty (ShOp DPR:$src1, DPR:$src2, (i32 imm:$SIMM))))]>;
-class N2VQShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
- string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
- : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, i32imm:$SIMM), IIC_VSHLiQ,
- OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
- [(set QPR:$dst, (Ty (ShOp QPR:$src1, QPR:$src2, (i32 imm:$SIMM))))]>;
-
-// Convert, with fractional bits immediate,
-// both double- and quad-register.
-class N2VCvtD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
- string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
- Intrinsic IntOp>
- : N2VImm<op24, op23, op11_8, op7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM), IIC_VUNAD,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src), (i32 imm:$SIMM))))]>;
-class N2VCvtQ<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
- string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
- Intrinsic IntOp>
- : N2VImm<op24, op23, op11_8, op7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM), IIC_VUNAQ,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src), (i32 imm:$SIMM))))]>;
-
-//===----------------------------------------------------------------------===//
-// Multiclasses
-//===----------------------------------------------------------------------===//
-
-// Abbreviations used in multiclass suffixes:
-// Q = quarter int (8 bit) elements
-// H = half int (16 bit) elements
-// S = single int (32 bit) elements
-// D = double int (64 bit) elements
-
-// Neon 2-register vector operations -- for disassembly only.
-
-// First with only element sizes of 8, 16 and 32 bits:
-multiclass N2V_QHS_cmp<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
- bits<5> op11_7, bit op4, string opc, string Dt,
- string asm> {
- // 64-bit vector types.
- def v8i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- opc, !strconcat(Dt, "8"), asm, "", []>;
- def v4i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- opc, !strconcat(Dt, "16"), asm, "", []>;
- def v2i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- opc, !strconcat(Dt, "32"), asm, "", []>;
- def v2f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- opc, "f32", asm, "", []> {
- let Inst{10} = 1; // overwrite F = 1
- }
-
- // 128-bit vector types.
- def v16i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- opc, !strconcat(Dt, "8"), asm, "", []>;
- def v8i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- opc, !strconcat(Dt, "16"), asm, "", []>;
- def v4i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- opc, !strconcat(Dt, "32"), asm, "", []>;
- def v4f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- opc, "f32", asm, "", []> {
- let Inst{10} = 1; // overwrite F = 1
- }
-}
-
-// Neon 3-register vector operations.
-
-// First with only element sizes of 8, 16 and 32 bits:
-multiclass N3V_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
- InstrItinClass itinD16, InstrItinClass itinD32,
- InstrItinClass itinQ16, InstrItinClass itinQ32,
- string OpcodeStr, string Dt,
- SDNode OpNode, bit Commutable = 0> {
- // 64-bit vector types.
- def v8i8 : N3VD<op24, op23, 0b00, op11_8, op4, itinD16,
- OpcodeStr, !strconcat(Dt, "8"),
- v8i8, v8i8, OpNode, Commutable>;
- def v4i16 : N3VD<op24, op23, 0b01, op11_8, op4, itinD16,
- OpcodeStr, !strconcat(Dt, "16"),
- v4i16, v4i16, OpNode, Commutable>;
- def v2i32 : N3VD<op24, op23, 0b10, op11_8, op4, itinD32,
- OpcodeStr, !strconcat(Dt, "32"),
- v2i32, v2i32, OpNode, Commutable>;
-
- // 128-bit vector types.
- def v16i8 : N3VQ<op24, op23, 0b00, op11_8, op4, itinQ16,
- OpcodeStr, !strconcat(Dt, "8"),
- v16i8, v16i8, OpNode, Commutable>;
- def v8i16 : N3VQ<op24, op23, 0b01, op11_8, op4, itinQ16,
- OpcodeStr, !strconcat(Dt, "16"),
- v8i16, v8i16, OpNode, Commutable>;
- def v4i32 : N3VQ<op24, op23, 0b10, op11_8, op4, itinQ32,
- OpcodeStr, !strconcat(Dt, "32"),
- v4i32, v4i32, OpNode, Commutable>;
-}
-
-multiclass N3VSL_HS<bits<4> op11_8, string OpcodeStr, string Dt, SDNode ShOp> {
- def v4i16 : N3VDSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
- v4i16, ShOp>;
- def v2i32 : N3VDSL<0b10, op11_8, IIC_VMULi32D, OpcodeStr, !strconcat(Dt,"32"),
- v2i32, ShOp>;
- def v8i16 : N3VQSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
- v8i16, v4i16, ShOp>;
- def v4i32 : N3VQSL<0b10, op11_8, IIC_VMULi32Q, OpcodeStr, !strconcat(Dt,"32"),
- v4i32, v2i32, ShOp>;
-}
-
-// ....then also with element size 64 bits:
-multiclass N3V_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
- InstrItinClass itinD, InstrItinClass itinQ,
- string OpcodeStr, string Dt,
- SDNode OpNode, bit Commutable = 0>
- : N3V_QHS<op24, op23, op11_8, op4, itinD, itinD, itinQ, itinQ,
- OpcodeStr, Dt, OpNode, Commutable> {
- def v1i64 : N3VD<op24, op23, 0b11, op11_8, op4, itinD,
- OpcodeStr, !strconcat(Dt, "64"),
- v1i64, v1i64, OpNode, Commutable>;
- def v2i64 : N3VQ<op24, op23, 0b11, op11_8, op4, itinQ,
- OpcodeStr, !strconcat(Dt, "64"),
- v2i64, v2i64, OpNode, Commutable>;
-}
-
-
-// Neon Narrowing 2-register vector intrinsics,
-// source operand element sizes of 16, 32 and 64 bits:
-multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
- bits<5> op11_7, bit op6, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- Intrinsic IntOp> {
- def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
- itin, OpcodeStr, !strconcat(Dt, "16"),
- v8i8, v8i16, IntOp>;
- def v4i16 : N2VNInt<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
- itin, OpcodeStr, !strconcat(Dt, "32"),
- v4i16, v4i32, IntOp>;
- def v2i32 : N2VNInt<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
- itin, OpcodeStr, !strconcat(Dt, "64"),
- v2i32, v2i64, IntOp>;
-}
-
-
-// Neon Lengthening 2-register vector intrinsic (currently specific to VMOVL).
-// source operand element sizes of 16, 32 and 64 bits:
-multiclass N2VLInt_QHS<bits<2> op24_23, bits<5> op11_7, bit op6, bit op4,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
- def v8i16 : N2VLInt<op24_23, 0b00, 0b10, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
- OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, IntOp>;
- def v4i32 : N2VLInt<op24_23, 0b01, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
- OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
- def v2i64 : N2VLInt<op24_23, 0b10, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
- OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
-}
-
-
-// Neon 3-register vector intrinsics.
-
-// First with only element sizes of 16 and 32 bits:
-multiclass N3VInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
- InstrItinClass itinD16, InstrItinClass itinD32,
- InstrItinClass itinQ16, InstrItinClass itinQ32,
- string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0> {
- // 64-bit vector types.
- def v4i16 : N3VDInt<op24, op23, 0b01, op11_8, op4, itinD16,
- OpcodeStr, !strconcat(Dt, "16"),
- v4i16, v4i16, IntOp, Commutable>;
- def v2i32 : N3VDInt<op24, op23, 0b10, op11_8, op4, itinD32,
- OpcodeStr, !strconcat(Dt, "32"),
- v2i32, v2i32, IntOp, Commutable>;
-
- // 128-bit vector types.
- def v8i16 : N3VQInt<op24, op23, 0b01, op11_8, op4, itinQ16,
- OpcodeStr, !strconcat(Dt, "16"),
- v8i16, v8i16, IntOp, Commutable>;
- def v4i32 : N3VQInt<op24, op23, 0b10, op11_8, op4, itinQ32,
- OpcodeStr, !strconcat(Dt, "32"),
- v4i32, v4i32, IntOp, Commutable>;
-}
-
-multiclass N3VIntSL_HS<bits<4> op11_8,
- InstrItinClass itinD16, InstrItinClass itinD32,
- InstrItinClass itinQ16, InstrItinClass itinQ32,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
- def v4i16 : N3VDIntSL16<0b01, op11_8, itinD16,
- OpcodeStr, !strconcat(Dt, "16"), v4i16, IntOp>;
- def v2i32 : N3VDIntSL<0b10, op11_8, itinD32,
- OpcodeStr, !strconcat(Dt, "32"), v2i32, IntOp>;
- def v8i16 : N3VQIntSL16<0b01, op11_8, itinQ16,
- OpcodeStr, !strconcat(Dt, "16"), v8i16, v4i16, IntOp>;
- def v4i32 : N3VQIntSL<0b10, op11_8, itinQ32,
- OpcodeStr, !strconcat(Dt, "32"), v4i32, v2i32, IntOp>;
-}
-
-// ....then also with element size of 8 bits:
-multiclass N3VInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
- InstrItinClass itinD16, InstrItinClass itinD32,
- InstrItinClass itinQ16, InstrItinClass itinQ32,
- string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0>
- : N3VInt_HS<op24, op23, op11_8, op4, itinD16, itinD32, itinQ16, itinQ32,
- OpcodeStr, Dt, IntOp, Commutable> {
- def v8i8 : N3VDInt<op24, op23, 0b00, op11_8, op4, itinD16,
- OpcodeStr, !strconcat(Dt, "8"),
- v8i8, v8i8, IntOp, Commutable>;
- def v16i8 : N3VQInt<op24, op23, 0b00, op11_8, op4, itinQ16,
- OpcodeStr, !strconcat(Dt, "8"),
- v16i8, v16i8, IntOp, Commutable>;
-}
-
-// ....then also with element size of 64 bits:
-multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
- InstrItinClass itinD16, InstrItinClass itinD32,
- InstrItinClass itinQ16, InstrItinClass itinQ32,
- string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0>
- : N3VInt_QHS<op24, op23, op11_8, op4, itinD16, itinD32, itinQ16, itinQ32,
- OpcodeStr, Dt, IntOp, Commutable> {
- def v1i64 : N3VDInt<op24, op23, 0b11, op11_8, op4, itinD32,
- OpcodeStr, !strconcat(Dt, "64"),
- v1i64, v1i64, IntOp, Commutable>;
- def v2i64 : N3VQInt<op24, op23, 0b11, op11_8, op4, itinQ32,
- OpcodeStr, !strconcat(Dt, "64"),
- v2i64, v2i64, IntOp, Commutable>;
-}
-
-
-// Neon Narrowing 3-register vector intrinsics,
-// source operand element sizes of 16, 32 and 64 bits:
-multiclass N3VNInt_HSD<bit op24, bit op23, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0> {
- def v8i8 : N3VNInt<op24, op23, 0b00, op11_8, op4,
- OpcodeStr, !strconcat(Dt, "16"),
- v8i8, v8i16, IntOp, Commutable>;
- def v4i16 : N3VNInt<op24, op23, 0b01, op11_8, op4,
- OpcodeStr, !strconcat(Dt, "32"),
- v4i16, v4i32, IntOp, Commutable>;
- def v2i32 : N3VNInt<op24, op23, 0b10, op11_8, op4,
- OpcodeStr, !strconcat(Dt, "64"),
- v2i32, v2i64, IntOp, Commutable>;
-}
-
-
-// Neon Long 3-register vector intrinsics.
-
-// First with only element sizes of 16 and 32 bits:
-multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0> {
- def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, itin,
- OpcodeStr, !strconcat(Dt, "16"),
- v4i32, v4i16, IntOp, Commutable>;
- def v2i64 : N3VLInt<op24, op23, 0b10, op11_8, op4, itin,
- OpcodeStr, !strconcat(Dt, "32"),
- v2i64, v2i32, IntOp, Commutable>;
-}
-
-multiclass N3VLIntSL_HS<bit op24, bits<4> op11_8,
- InstrItinClass itin, string OpcodeStr, string Dt,
- Intrinsic IntOp> {
- def v4i16 : N3VLIntSL16<op24, 0b01, op11_8, itin,
- OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
- def v2i32 : N3VLIntSL<op24, 0b10, op11_8, itin,
- OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
-}
-
-// ....then also with element size of 8 bits:
-multiclass N3VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0>
- : N3VLInt_HS<op24, op23, op11_8, op4, itin, OpcodeStr, Dt,
- IntOp, Commutable> {
- def v8i16 : N3VLInt<op24, op23, 0b00, op11_8, op4, itin,
- OpcodeStr, !strconcat(Dt, "8"),
- v8i16, v8i8, IntOp, Commutable>;
-}
-
-
-// Neon Wide 3-register vector intrinsics,
-// source operand element sizes of 8, 16 and 32 bits:
-multiclass N3VWInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt,
- Intrinsic IntOp, bit Commutable = 0> {
- def v8i16 : N3VWInt<op24, op23, 0b00, op11_8, op4,
- OpcodeStr, !strconcat(Dt, "8"),
- v8i16, v8i8, IntOp, Commutable>;
- def v4i32 : N3VWInt<op24, op23, 0b01, op11_8, op4,
- OpcodeStr, !strconcat(Dt, "16"),
- v4i32, v4i16, IntOp, Commutable>;
- def v2i64 : N3VWInt<op24, op23, 0b10, op11_8, op4,
- OpcodeStr, !strconcat(Dt, "32"),
- v2i64, v2i32, IntOp, Commutable>;
-}
-
-
-// Neon Multiply-Op vector operations,
-// element sizes of 8, 16 and 32 bits:
-multiclass N3VMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
- InstrItinClass itinD16, InstrItinClass itinD32,
- InstrItinClass itinQ16, InstrItinClass itinQ32,
- string OpcodeStr, string Dt, SDNode OpNode> {
- // 64-bit vector types.
- def v8i8 : N3VDMulOp<op24, op23, 0b00, op11_8, op4, itinD16,
- OpcodeStr, !strconcat(Dt, "8"), v8i8, mul, OpNode>;
- def v4i16 : N3VDMulOp<op24, op23, 0b01, op11_8, op4, itinD16,
- OpcodeStr, !strconcat(Dt, "16"), v4i16, mul, OpNode>;
- def v2i32 : N3VDMulOp<op24, op23, 0b10, op11_8, op4, itinD32,
- OpcodeStr, !strconcat(Dt, "32"), v2i32, mul, OpNode>;
-
- // 128-bit vector types.
- def v16i8 : N3VQMulOp<op24, op23, 0b00, op11_8, op4, itinQ16,
- OpcodeStr, !strconcat(Dt, "8"), v16i8, mul, OpNode>;
- def v8i16 : N3VQMulOp<op24, op23, 0b01, op11_8, op4, itinQ16,
- OpcodeStr, !strconcat(Dt, "16"), v8i16, mul, OpNode>;
- def v4i32 : N3VQMulOp<op24, op23, 0b10, op11_8, op4, itinQ32,
- OpcodeStr, !strconcat(Dt, "32"), v4i32, mul, OpNode>;
-}
-
-multiclass N3VMulOpSL_HS<bits<4> op11_8,
- InstrItinClass itinD16, InstrItinClass itinD32,
- InstrItinClass itinQ16, InstrItinClass itinQ32,
- string OpcodeStr, string Dt, SDNode ShOp> {
- def v4i16 : N3VDMulOpSL16<0b01, op11_8, itinD16,
- OpcodeStr, !strconcat(Dt, "16"), v4i16, mul, ShOp>;
- def v2i32 : N3VDMulOpSL<0b10, op11_8, itinD32,
- OpcodeStr, !strconcat(Dt, "32"), v2i32, mul, ShOp>;
- def v8i16 : N3VQMulOpSL16<0b01, op11_8, itinQ16,
- OpcodeStr, !strconcat(Dt, "16"), v8i16, v4i16,
- mul, ShOp>;
- def v4i32 : N3VQMulOpSL<0b10, op11_8, itinQ32,
- OpcodeStr, !strconcat(Dt, "32"), v4i32, v2i32,
- mul, ShOp>;
-}
-
-// Neon 3-argument intrinsics,
-// element sizes of 8, 16 and 32 bits:
-multiclass N3VInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
- // 64-bit vector types.
- def v8i8 : N3VDInt3<op24, op23, 0b00, op11_8, op4, IIC_VMACi16D,
- OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
- def v4i16 : N3VDInt3<op24, op23, 0b01, op11_8, op4, IIC_VMACi16D,
- OpcodeStr, !strconcat(Dt, "16"), v4i16, v4i16, IntOp>;
- def v2i32 : N3VDInt3<op24, op23, 0b10, op11_8, op4, IIC_VMACi32D,
- OpcodeStr, !strconcat(Dt, "32"), v2i32, v2i32, IntOp>;
-
- // 128-bit vector types.
- def v16i8 : N3VQInt3<op24, op23, 0b00, op11_8, op4, IIC_VMACi16Q,
- OpcodeStr, !strconcat(Dt, "8"), v16i8, v16i8, IntOp>;
- def v8i16 : N3VQInt3<op24, op23, 0b01, op11_8, op4, IIC_VMACi16Q,
- OpcodeStr, !strconcat(Dt, "16"), v8i16, v8i16, IntOp>;
- def v4i32 : N3VQInt3<op24, op23, 0b10, op11_8, op4, IIC_VMACi32Q,
- OpcodeStr, !strconcat(Dt, "32"), v4i32, v4i32, IntOp>;
-}
-
-
-// Neon Long 3-argument intrinsics.
-
-// First with only element sizes of 16 and 32 bits:
-multiclass N3VLInt3_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
- def v4i32 : N3VLInt3<op24, op23, 0b01, op11_8, op4, IIC_VMACi16D,
- OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
- def v2i64 : N3VLInt3<op24, op23, 0b10, op11_8, op4, IIC_VMACi16D,
- OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
-}
-
-multiclass N3VLInt3SL_HS<bit op24, bits<4> op11_8,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
- def v4i16 : N3VLInt3SL16<op24, 0b01, op11_8, IIC_VMACi16D,
- OpcodeStr, !strconcat(Dt,"16"), v4i32, v4i16, IntOp>;
- def v2i32 : N3VLInt3SL<op24, 0b10, op11_8, IIC_VMACi32D,
- OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
-}
-
-// ....then also with element size of 8 bits:
-multiclass N3VLInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt, Intrinsic IntOp>
- : N3VLInt3_HS<op24, op23, op11_8, op4, OpcodeStr, Dt, IntOp> {
- def v8i16 : N3VLInt3<op24, op23, 0b00, op11_8, op4, IIC_VMACi16D,
- OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, IntOp>;
-}
-
-
-// Neon 2-register vector intrinsics,
-// element sizes of 8, 16 and 32 bits:
-multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
- bits<5> op11_7, bit op4,
- InstrItinClass itinD, InstrItinClass itinQ,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
- // 64-bit vector types.
- def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
- itinD, OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
- def v4i16 : N2VDInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
- itinD, OpcodeStr, !strconcat(Dt, "16"),v4i16,v4i16,IntOp>;
- def v2i32 : N2VDInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
- itinD, OpcodeStr, !strconcat(Dt, "32"),v2i32,v2i32,IntOp>;
-
- // 128-bit vector types.
- def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
- itinQ, OpcodeStr, !strconcat(Dt, "8"), v16i8,v16i8,IntOp>;
- def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
- itinQ, OpcodeStr, !strconcat(Dt, "16"),v8i16,v8i16,IntOp>;
- def v4i32 : N2VQInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
- itinQ, OpcodeStr, !strconcat(Dt, "32"),v4i32,v4i32,IntOp>;
-}
-
-
-// Neon Pairwise long 2-register intrinsics,
-// element sizes of 8, 16 and 32 bits:
-multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
- bits<5> op11_7, bit op4,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
- // 64-bit vector types.
- def v8i8 : N2VDPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
- def v4i16 : N2VDPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "16"), v2i32, v4i16, IntOp>;
- def v2i32 : N2VDPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "32"), v1i64, v2i32, IntOp>;
-
- // 128-bit vector types.
- def v16i8 : N2VQPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "8"), v8i16, v16i8, IntOp>;
- def v8i16 : N2VQPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "16"), v4i32, v8i16, IntOp>;
- def v4i32 : N2VQPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "32"), v2i64, v4i32, IntOp>;
-}
-
-
-// Neon Pairwise long 2-register accumulate intrinsics,
-// element sizes of 8, 16 and 32 bits:
-multiclass N2VPLInt2_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
- bits<5> op11_7, bit op4,
- string OpcodeStr, string Dt, Intrinsic IntOp> {
- // 64-bit vector types.
- def v8i8 : N2VDPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
- def v4i16 : N2VDPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "16"), v2i32, v4i16, IntOp>;
- def v2i32 : N2VDPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "32"), v1i64, v2i32, IntOp>;
-
- // 128-bit vector types.
- def v16i8 : N2VQPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "8"), v8i16, v16i8, IntOp>;
- def v8i16 : N2VQPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "16"), v4i32, v8i16, IntOp>;
- def v4i32 : N2VQPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
- OpcodeStr, !strconcat(Dt, "32"), v2i64, v4i32, IntOp>;
-}
-
-
-// Neon 2-register vector shift by immediate,
-// element sizes of 8, 16, 32 and 64 bits:
-multiclass N2VSh_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
- InstrItinClass itin, string OpcodeStr, string Dt,
- SDNode OpNode> {
- // 64-bit vector types.
- def v8i8 : N2VDSh<op24, op23, op11_8, 0, op4, itin,
- OpcodeStr, !strconcat(Dt, "8"), v8i8, OpNode> {
- let Inst{21-19} = 0b001; // imm6 = 001xxx
- }
- def v4i16 : N2VDSh<op24, op23, op11_8, 0, op4, itin,
- OpcodeStr, !strconcat(Dt, "16"), v4i16, OpNode> {
- let Inst{21-20} = 0b01; // imm6 = 01xxxx
- }
- def v2i32 : N2VDSh<op24, op23, op11_8, 0, op4, itin,
- OpcodeStr, !strconcat(Dt, "32"), v2i32, OpNode> {
- let Inst{21} = 0b1; // imm6 = 1xxxxx
- }
- def v1i64 : N2VDSh<op24, op23, op11_8, 1, op4, itin,
- OpcodeStr, !strconcat(Dt, "64"), v1i64, OpNode>;
- // imm6 = xxxxxx
-
- // 128-bit vector types.
- def v16i8 : N2VQSh<op24, op23, op11_8, 0, op4, itin,
- OpcodeStr, !strconcat(Dt, "8"), v16i8, OpNode> {
- let Inst{21-19} = 0b001; // imm6 = 001xxx
- }
- def v8i16 : N2VQSh<op24, op23, op11_8, 0, op4, itin,
- OpcodeStr, !strconcat(Dt, "16"), v8i16, OpNode> {
- let Inst{21-20} = 0b01; // imm6 = 01xxxx
- }
- def v4i32 : N2VQSh<op24, op23, op11_8, 0, op4, itin,
- OpcodeStr, !strconcat(Dt, "32"), v4i32, OpNode> {
- let Inst{21} = 0b1; // imm6 = 1xxxxx
- }
- def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, itin,
- OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
- // imm6 = xxxxxx
-}
-
-
-// Neon Shift-Accumulate vector operations,
-// element sizes of 8, 16, 32 and 64 bits:
-multiclass N2VShAdd_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
- string OpcodeStr, string Dt, SDNode ShOp> {
- // 64-bit vector types.
- def v8i8 : N2VDShAdd<op24, op23, op11_8, 0, op4,
- OpcodeStr, !strconcat(Dt, "8"), v8i8, ShOp> {
- let Inst{21-19} = 0b001; // imm6 = 001xxx
- }
- def v4i16 : N2VDShAdd<op24, op23, op11_8, 0, op4,
- OpcodeStr, !strconcat(Dt, "16"), v4i16, ShOp> {
- let Inst{21-20} = 0b01; // imm6 = 01xxxx
- }
- def v2i32 : N2VDShAdd<op24, op23, op11_8, 0, op4,
- OpcodeStr, !strconcat(Dt, "32"), v2i32, ShOp> {
- let Inst{21} = 0b1; // imm6 = 1xxxxx
- }
- def v1i64 : N2VDShAdd<op24, op23, op11_8, 1, op4,
- OpcodeStr, !strconcat(Dt, "64"), v1i64, ShOp>;
- // imm6 = xxxxxx
-
- // 128-bit vector types.
- def v16i8 : N2VQShAdd<op24, op23, op11_8, 0, op4,
- OpcodeStr, !strconcat(Dt, "8"), v16i8, ShOp> {
- let Inst{21-19} = 0b001; // imm6 = 001xxx
- }
- def v8i16 : N2VQShAdd<op24, op23, op11_8, 0, op4,
- OpcodeStr, !strconcat(Dt, "16"), v8i16, ShOp> {
- let Inst{21-20} = 0b01; // imm6 = 01xxxx
- }
- def v4i32 : N2VQShAdd<op24, op23, op11_8, 0, op4,
- OpcodeStr, !strconcat(Dt, "32"), v4i32, ShOp> {
- let Inst{21} = 0b1; // imm6 = 1xxxxx
- }
- def v2i64 : N2VQShAdd<op24, op23, op11_8, 1, op4,
- OpcodeStr, !strconcat(Dt, "64"), v2i64, ShOp>;
- // imm6 = xxxxxx
-}
-
-
-// Neon Shift-Insert vector operations,
-// element sizes of 8, 16, 32 and 64 bits:
-multiclass N2VShIns_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
- string OpcodeStr, SDNode ShOp> {
- // 64-bit vector types.
- def v8i8 : N2VDShIns<op24, op23, op11_8, 0, op4,
- OpcodeStr, "8", v8i8, ShOp> {
- let Inst{21-19} = 0b001; // imm6 = 001xxx
- }
- def v4i16 : N2VDShIns<op24, op23, op11_8, 0, op4,
- OpcodeStr, "16", v4i16, ShOp> {
- let Inst{21-20} = 0b01; // imm6 = 01xxxx
- }
- def v2i32 : N2VDShIns<op24, op23, op11_8, 0, op4,
- OpcodeStr, "32", v2i32, ShOp> {
- let Inst{21} = 0b1; // imm6 = 1xxxxx
- }
- def v1i64 : N2VDShIns<op24, op23, op11_8, 1, op4,
- OpcodeStr, "64", v1i64, ShOp>;
- // imm6 = xxxxxx
-
- // 128-bit vector types.
- def v16i8 : N2VQShIns<op24, op23, op11_8, 0, op4,
- OpcodeStr, "8", v16i8, ShOp> {
- let Inst{21-19} = 0b001; // imm6 = 001xxx
- }
- def v8i16 : N2VQShIns<op24, op23, op11_8, 0, op4,
- OpcodeStr, "16", v8i16, ShOp> {
- let Inst{21-20} = 0b01; // imm6 = 01xxxx
- }
- def v4i32 : N2VQShIns<op24, op23, op11_8, 0, op4,
- OpcodeStr, "32", v4i32, ShOp> {
- let Inst{21} = 0b1; // imm6 = 1xxxxx
- }
- def v2i64 : N2VQShIns<op24, op23, op11_8, 1, op4,
- OpcodeStr, "64", v2i64, ShOp>;
- // imm6 = xxxxxx
-}
-
-// Neon Shift Long operations,
-// element sizes of 8, 16, 32 bits:
-multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
- bit op4, string OpcodeStr, string Dt, SDNode OpNode> {
- def v8i16 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
- OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode> {
- let Inst{21-19} = 0b001; // imm6 = 001xxx
- }
- def v4i32 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
- OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode> {
- let Inst{21-20} = 0b01; // imm6 = 01xxxx
- }
- def v2i64 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
- OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode> {
- let Inst{21} = 0b1; // imm6 = 1xxxxx
- }
-}
-
-// Neon Shift Narrow operations,
-// element sizes of 16, 32, 64 bits:
-multiclass N2VNSh_HSD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
- bit op4, InstrItinClass itin, string OpcodeStr, string Dt,
- SDNode OpNode> {
- def v8i8 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
- OpcodeStr, !strconcat(Dt, "16"), v8i8, v8i16, OpNode> {
- let Inst{21-19} = 0b001; // imm6 = 001xxx
- }
- def v4i16 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
- OpcodeStr, !strconcat(Dt, "32"), v4i16, v4i32, OpNode> {
- let Inst{21-20} = 0b01; // imm6 = 01xxxx
- }
- def v2i32 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
- OpcodeStr, !strconcat(Dt, "64"), v2i32, v2i64, OpNode> {
- let Inst{21} = 0b1; // imm6 = 1xxxxx
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Instruction Definitions.
-//===----------------------------------------------------------------------===//
-
-// Vector Add Operations.
-
-// VADD : Vector Add (integer and floating-point)
-defm VADD : N3V_QHSD<0, 0, 0b1000, 0, IIC_VBINiD, IIC_VBINiQ, "vadd", "i",
- add, 1>;
-def VADDfd : N3VD<0, 0, 0b00, 0b1101, 0, IIC_VBIND, "vadd", "f32",
- v2f32, v2f32, fadd, 1>;
-def VADDfq : N3VQ<0, 0, 0b00, 0b1101, 0, IIC_VBINQ, "vadd", "f32",
- v4f32, v4f32, fadd, 1>;
-// VADDL : Vector Add Long (Q = D + D)
-defm VADDLs : N3VLInt_QHS<0,1,0b0000,0, IIC_VSHLiD, "vaddl", "s",
- int_arm_neon_vaddls, 1>;
-defm VADDLu : N3VLInt_QHS<1,1,0b0000,0, IIC_VSHLiD, "vaddl", "u",
- int_arm_neon_vaddlu, 1>;
-// VADDW : Vector Add Wide (Q = Q + D)
-defm VADDWs : N3VWInt_QHS<0,1,0b0001,0, "vaddw", "s", int_arm_neon_vaddws, 0>;
-defm VADDWu : N3VWInt_QHS<1,1,0b0001,0, "vaddw", "u", int_arm_neon_vaddwu, 0>;
-// VHADD : Vector Halving Add
-defm VHADDs : N3VInt_QHS<0,0,0b0000,0, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vhadd", "s", int_arm_neon_vhadds, 1>;
-defm VHADDu : N3VInt_QHS<1,0,0b0000,0, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vhadd", "u", int_arm_neon_vhaddu, 1>;
-// VRHADD : Vector Rounding Halving Add
-defm VRHADDs : N3VInt_QHS<0,0,0b0001,0, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vrhadd", "s", int_arm_neon_vrhadds, 1>;
-defm VRHADDu : N3VInt_QHS<1,0,0b0001,0, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vrhadd", "u", int_arm_neon_vrhaddu, 1>;
-// VQADD : Vector Saturating Add
-defm VQADDs : N3VInt_QHSD<0,0,0b0000,1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vqadd", "s", int_arm_neon_vqadds, 1>;
-defm VQADDu : N3VInt_QHSD<1,0,0b0000,1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vqadd", "u", int_arm_neon_vqaddu, 1>;
-// VADDHN : Vector Add and Narrow Returning High Half (D = Q + Q)
-defm VADDHN : N3VNInt_HSD<0,1,0b0100,0, "vaddhn", "i",
- int_arm_neon_vaddhn, 1>;
-// VRADDHN : Vector Rounding Add and Narrow Returning High Half (D = Q + Q)
-defm VRADDHN : N3VNInt_HSD<1,1,0b0100,0, "vraddhn", "i",
- int_arm_neon_vraddhn, 1>;
-
-// Vector Multiply Operations.
-
-// VMUL : Vector Multiply (integer, polynomial and floating-point)
-defm VMUL : N3V_QHS<0, 0, 0b1001, 1, IIC_VMULi16D, IIC_VMULi32D,
- IIC_VMULi16Q, IIC_VMULi32Q, "vmul", "i", mul, 1>;
-def VMULpd : N3VDInt<1, 0, 0b00, 0b1001, 1, IIC_VMULi16D, "vmul", "p8",
- v8i8, v8i8, int_arm_neon_vmulp, 1>;
-def VMULpq : N3VQInt<1, 0, 0b00, 0b1001, 1, IIC_VMULi16Q, "vmul", "p8",
- v16i8, v16i8, int_arm_neon_vmulp, 1>;
-def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, IIC_VBIND, "vmul", "f32",
- v2f32, v2f32, fmul, 1>;
-def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, IIC_VBINQ, "vmul", "f32",
- v4f32, v4f32, fmul, 1>;
-defm VMULsl : N3VSL_HS<0b1000, "vmul", "i", mul>;
-def VMULslfd : N3VDSL<0b10, 0b1001, IIC_VBIND, "vmul", "f32", v2f32, fmul>;
-def VMULslfq : N3VQSL<0b10, 0b1001, IIC_VBINQ, "vmul", "f32", v4f32,
- v2f32, fmul>;
-
-def : Pat<(v8i16 (mul (v8i16 QPR:$src1),
- (v8i16 (NEONvduplane (v8i16 QPR:$src2), imm:$lane)))),
- (v8i16 (VMULslv8i16 (v8i16 QPR:$src1),
- (v4i16 (EXTRACT_SUBREG QPR:$src2,
- (DSubReg_i16_reg imm:$lane))),
- (SubReg_i16_lane imm:$lane)))>;
-def : Pat<(v4i32 (mul (v4i32 QPR:$src1),
- (v4i32 (NEONvduplane (v4i32 QPR:$src2), imm:$lane)))),
- (v4i32 (VMULslv4i32 (v4i32 QPR:$src1),
- (v2i32 (EXTRACT_SUBREG QPR:$src2,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-def : Pat<(v4f32 (fmul (v4f32 QPR:$src1),
- (v4f32 (NEONvduplane (v4f32 QPR:$src2), imm:$lane)))),
- (v4f32 (VMULslfq (v4f32 QPR:$src1),
- (v2f32 (EXTRACT_SUBREG QPR:$src2,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-
-// VQDMULH : Vector Saturating Doubling Multiply Returning High Half
-defm VQDMULH : N3VInt_HS<0, 0, 0b1011, 0, IIC_VMULi16D, IIC_VMULi32D,
- IIC_VMULi16Q, IIC_VMULi32Q,
- "vqdmulh", "s", int_arm_neon_vqdmulh, 1>;
-defm VQDMULHsl: N3VIntSL_HS<0b1100, IIC_VMULi16D, IIC_VMULi32D,
- IIC_VMULi16Q, IIC_VMULi32Q,
- "vqdmulh", "s", int_arm_neon_vqdmulh>;
-def : Pat<(v8i16 (int_arm_neon_vqdmulh (v8i16 QPR:$src1),
- (v8i16 (NEONvduplane (v8i16 QPR:$src2),
- imm:$lane)))),
- (v8i16 (VQDMULHslv8i16 (v8i16 QPR:$src1),
- (v4i16 (EXTRACT_SUBREG QPR:$src2,
- (DSubReg_i16_reg imm:$lane))),
- (SubReg_i16_lane imm:$lane)))>;
-def : Pat<(v4i32 (int_arm_neon_vqdmulh (v4i32 QPR:$src1),
- (v4i32 (NEONvduplane (v4i32 QPR:$src2),
- imm:$lane)))),
- (v4i32 (VQDMULHslv4i32 (v4i32 QPR:$src1),
- (v2i32 (EXTRACT_SUBREG QPR:$src2,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-
-// VQRDMULH : Vector Rounding Saturating Doubling Multiply Returning High Half
-defm VQRDMULH : N3VInt_HS<1, 0, 0b1011, 0, IIC_VMULi16D, IIC_VMULi32D,
- IIC_VMULi16Q, IIC_VMULi32Q,
- "vqrdmulh", "s", int_arm_neon_vqrdmulh, 1>;
-defm VQRDMULHsl : N3VIntSL_HS<0b1101, IIC_VMULi16D, IIC_VMULi32D,
- IIC_VMULi16Q, IIC_VMULi32Q,
- "vqrdmulh", "s", int_arm_neon_vqrdmulh>;
-def : Pat<(v8i16 (int_arm_neon_vqrdmulh (v8i16 QPR:$src1),
- (v8i16 (NEONvduplane (v8i16 QPR:$src2),
- imm:$lane)))),
- (v8i16 (VQRDMULHslv8i16 (v8i16 QPR:$src1),
- (v4i16 (EXTRACT_SUBREG QPR:$src2,
- (DSubReg_i16_reg imm:$lane))),
- (SubReg_i16_lane imm:$lane)))>;
-def : Pat<(v4i32 (int_arm_neon_vqrdmulh (v4i32 QPR:$src1),
- (v4i32 (NEONvduplane (v4i32 QPR:$src2),
- imm:$lane)))),
- (v4i32 (VQRDMULHslv4i32 (v4i32 QPR:$src1),
- (v2i32 (EXTRACT_SUBREG QPR:$src2,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-
-// VMULL : Vector Multiply Long (integer and polynomial) (Q = D * D)
-defm VMULLs : N3VLInt_QHS<0,1,0b1100,0, IIC_VMULi16D, "vmull", "s",
- int_arm_neon_vmulls, 1>;
-defm VMULLu : N3VLInt_QHS<1,1,0b1100,0, IIC_VMULi16D, "vmull", "u",
- int_arm_neon_vmullu, 1>;
-def VMULLp : N3VLInt<0, 1, 0b00, 0b1110, 0, IIC_VMULi16D, "vmull", "p8",
- v8i16, v8i8, int_arm_neon_vmullp, 1>;
-defm VMULLsls : N3VLIntSL_HS<0, 0b1010, IIC_VMULi16D, "vmull", "s",
- int_arm_neon_vmulls>;
-defm VMULLslu : N3VLIntSL_HS<1, 0b1010, IIC_VMULi16D, "vmull", "u",
- int_arm_neon_vmullu>;
-
-// VQDMULL : Vector Saturating Doubling Multiply Long (Q = D * D)
-defm VQDMULL : N3VLInt_HS<0,1,0b1101,0, IIC_VMULi16D, "vqdmull", "s",
- int_arm_neon_vqdmull, 1>;
-defm VQDMULLsl: N3VLIntSL_HS<0, 0b1011, IIC_VMULi16D, "vqdmull", "s",
- int_arm_neon_vqdmull>;
-
-// Vector Multiply-Accumulate and Multiply-Subtract Operations.
-
-// VMLA : Vector Multiply Accumulate (integer and floating-point)
-defm VMLA : N3VMulOp_QHS<0, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
- IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
-def VMLAfd : N3VDMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACD, "vmla", "f32",
- v2f32, fmul, fadd>;
-def VMLAfq : N3VQMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACQ, "vmla", "f32",
- v4f32, fmul, fadd>;
-defm VMLAsl : N3VMulOpSL_HS<0b0000, IIC_VMACi16D, IIC_VMACi32D,
- IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
-def VMLAslfd : N3VDMulOpSL<0b10, 0b0001, IIC_VMACD, "vmla", "f32",
- v2f32, fmul, fadd>;
-def VMLAslfq : N3VQMulOpSL<0b10, 0b0001, IIC_VMACQ, "vmla", "f32",
- v4f32, v2f32, fmul, fadd>;
-
-def : Pat<(v8i16 (add (v8i16 QPR:$src1),
- (mul (v8i16 QPR:$src2),
- (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
- (v8i16 (VMLAslv8i16 (v8i16 QPR:$src1), (v8i16 QPR:$src2),
- (v4i16 (EXTRACT_SUBREG QPR:$src3,
- (DSubReg_i16_reg imm:$lane))),
- (SubReg_i16_lane imm:$lane)))>;
-
-def : Pat<(v4i32 (add (v4i32 QPR:$src1),
- (mul (v4i32 QPR:$src2),
- (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
- (v4i32 (VMLAslv4i32 (v4i32 QPR:$src1), (v4i32 QPR:$src2),
- (v2i32 (EXTRACT_SUBREG QPR:$src3,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-
-def : Pat<(v4f32 (fadd (v4f32 QPR:$src1),
- (fmul (v4f32 QPR:$src2),
- (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
- (v4f32 (VMLAslfq (v4f32 QPR:$src1),
- (v4f32 QPR:$src2),
- (v2f32 (EXTRACT_SUBREG QPR:$src3,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-
-// VMLAL : Vector Multiply Accumulate Long (Q += D * D)
-defm VMLALs : N3VLInt3_QHS<0,1,0b1000,0, "vmlal", "s", int_arm_neon_vmlals>;
-defm VMLALu : N3VLInt3_QHS<1,1,0b1000,0, "vmlal", "u", int_arm_neon_vmlalu>;
-
-defm VMLALsls : N3VLInt3SL_HS<0, 0b0010, "vmlal", "s", int_arm_neon_vmlals>;
-defm VMLALslu : N3VLInt3SL_HS<1, 0b0010, "vmlal", "u", int_arm_neon_vmlalu>;
-
-// VQDMLAL : Vector Saturating Doubling Multiply Accumulate Long (Q += D * D)
-defm VQDMLAL : N3VLInt3_HS<0, 1, 0b1001, 0, "vqdmlal", "s",
- int_arm_neon_vqdmlal>;
-defm VQDMLALsl: N3VLInt3SL_HS<0, 0b0011, "vqdmlal", "s", int_arm_neon_vqdmlal>;
-
-// VMLS : Vector Multiply Subtract (integer and floating-point)
-defm VMLS : N3VMulOp_QHS<1, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
- IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
-def VMLSfd : N3VDMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACD, "vmls", "f32",
- v2f32, fmul, fsub>;
-def VMLSfq : N3VQMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACQ, "vmls", "f32",
- v4f32, fmul, fsub>;
-defm VMLSsl : N3VMulOpSL_HS<0b0100, IIC_VMACi16D, IIC_VMACi32D,
- IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
-def VMLSslfd : N3VDMulOpSL<0b10, 0b0101, IIC_VMACD, "vmls", "f32",
- v2f32, fmul, fsub>;
-def VMLSslfq : N3VQMulOpSL<0b10, 0b0101, IIC_VMACQ, "vmls", "f32",
- v4f32, v2f32, fmul, fsub>;
-
-def : Pat<(v8i16 (sub (v8i16 QPR:$src1),
- (mul (v8i16 QPR:$src2),
- (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
- (v8i16 (VMLSslv8i16 (v8i16 QPR:$src1), (v8i16 QPR:$src2),
- (v4i16 (EXTRACT_SUBREG QPR:$src3,
- (DSubReg_i16_reg imm:$lane))),
- (SubReg_i16_lane imm:$lane)))>;
-
-def : Pat<(v4i32 (sub (v4i32 QPR:$src1),
- (mul (v4i32 QPR:$src2),
- (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
- (v4i32 (VMLSslv4i32 (v4i32 QPR:$src1), (v4i32 QPR:$src2),
- (v2i32 (EXTRACT_SUBREG QPR:$src3,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-
-def : Pat<(v4f32 (fsub (v4f32 QPR:$src1),
- (fmul (v4f32 QPR:$src2),
- (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
- (v4f32 (VMLSslfq (v4f32 QPR:$src1), (v4f32 QPR:$src2),
- (v2f32 (EXTRACT_SUBREG QPR:$src3,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-
-// VMLSL : Vector Multiply Subtract Long (Q -= D * D)
-defm VMLSLs : N3VLInt3_QHS<0,1,0b1010,0, "vmlsl", "s", int_arm_neon_vmlsls>;
-defm VMLSLu : N3VLInt3_QHS<1,1,0b1010,0, "vmlsl", "u", int_arm_neon_vmlslu>;
-
-defm VMLSLsls : N3VLInt3SL_HS<0, 0b0110, "vmlsl", "s", int_arm_neon_vmlsls>;
-defm VMLSLslu : N3VLInt3SL_HS<1, 0b0110, "vmlsl", "u", int_arm_neon_vmlslu>;
-
-// VQDMLSL : Vector Saturating Doubling Multiply Subtract Long (Q -= D * D)
-defm VQDMLSL : N3VLInt3_HS<0, 1, 0b1011, 0, "vqdmlsl", "s",
- int_arm_neon_vqdmlsl>;
-defm VQDMLSLsl: N3VLInt3SL_HS<0, 0b111, "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
-
-// Vector Subtract Operations.
-
-// VSUB : Vector Subtract (integer and floating-point)
-defm VSUB : N3V_QHSD<1, 0, 0b1000, 0, IIC_VSUBiD, IIC_VSUBiQ,
- "vsub", "i", sub, 0>;
-def VSUBfd : N3VD<0, 0, 0b10, 0b1101, 0, IIC_VBIND, "vsub", "f32",
- v2f32, v2f32, fsub, 0>;
-def VSUBfq : N3VQ<0, 0, 0b10, 0b1101, 0, IIC_VBINQ, "vsub", "f32",
- v4f32, v4f32, fsub, 0>;
-// VSUBL : Vector Subtract Long (Q = D - D)
-defm VSUBLs : N3VLInt_QHS<0,1,0b0010,0, IIC_VSHLiD, "vsubl", "s",
- int_arm_neon_vsubls, 1>;
-defm VSUBLu : N3VLInt_QHS<1,1,0b0010,0, IIC_VSHLiD, "vsubl", "u",
- int_arm_neon_vsublu, 1>;
-// VSUBW : Vector Subtract Wide (Q = Q - D)
-defm VSUBWs : N3VWInt_QHS<0,1,0b0011,0, "vsubw", "s", int_arm_neon_vsubws, 0>;
-defm VSUBWu : N3VWInt_QHS<1,1,0b0011,0, "vsubw", "u", int_arm_neon_vsubwu, 0>;
-// VHSUB : Vector Halving Subtract
-defm VHSUBs : N3VInt_QHS<0, 0, 0b0010, 0, IIC_VBINi4D, IIC_VBINi4D,
- IIC_VBINi4Q, IIC_VBINi4Q,
- "vhsub", "s", int_arm_neon_vhsubs, 0>;
-defm VHSUBu : N3VInt_QHS<1, 0, 0b0010, 0, IIC_VBINi4D, IIC_VBINi4D,
- IIC_VBINi4Q, IIC_VBINi4Q,
- "vhsub", "u", int_arm_neon_vhsubu, 0>;
-// VQSUB : Vector Saturing Subtract
-defm VQSUBs : N3VInt_QHSD<0, 0, 0b0010, 1, IIC_VBINi4D, IIC_VBINi4D,
- IIC_VBINi4Q, IIC_VBINi4Q,
- "vqsub", "s", int_arm_neon_vqsubs, 0>;
-defm VQSUBu : N3VInt_QHSD<1, 0, 0b0010, 1, IIC_VBINi4D, IIC_VBINi4D,
- IIC_VBINi4Q, IIC_VBINi4Q,
- "vqsub", "u", int_arm_neon_vqsubu, 0>;
-// VSUBHN : Vector Subtract and Narrow Returning High Half (D = Q - Q)
-defm VSUBHN : N3VNInt_HSD<0,1,0b0110,0, "vsubhn", "i",
- int_arm_neon_vsubhn, 0>;
-// VRSUBHN : Vector Rounding Subtract and Narrow Returning High Half (D=Q-Q)
-defm VRSUBHN : N3VNInt_HSD<1,1,0b0110,0, "vrsubhn", "i",
- int_arm_neon_vrsubhn, 0>;
-
-// Vector Comparisons.
-
-// VCEQ : Vector Compare Equal
-defm VCEQ : N3V_QHS<1, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vceq", "i", NEONvceq, 1>;
-def VCEQfd : N3VD<0,0,0b00,0b1110,0, IIC_VBIND, "vceq", "f32", v2i32, v2f32,
- NEONvceq, 1>;
-def VCEQfq : N3VQ<0,0,0b00,0b1110,0, IIC_VBINQ, "vceq", "f32", v4i32, v4f32,
- NEONvceq, 1>;
-// For disassembly only.
-defm VCEQz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00010, 0, "vceq", "i",
- "$dst, $src, #0">;
-
-// VCGE : Vector Compare Greater Than or Equal
-defm VCGEs : N3V_QHS<0, 0, 0b0011, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vcge", "s", NEONvcge, 0>;
-defm VCGEu : N3V_QHS<1, 0, 0b0011, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vcge", "u", NEONvcgeu, 0>;
-def VCGEfd : N3VD<1,0,0b00,0b1110,0, IIC_VBIND, "vcge", "f32",
- v2i32, v2f32, NEONvcge, 0>;
-def VCGEfq : N3VQ<1,0,0b00,0b1110,0, IIC_VBINQ, "vcge", "f32", v4i32, v4f32,
- NEONvcge, 0>;
-// For disassembly only.
-defm VCGEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00001, 0, "vcge", "s",
- "$dst, $src, #0">;
-// For disassembly only.
-defm VCLEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00011, 0, "vcle", "s",
- "$dst, $src, #0">;
-
-// VCGT : Vector Compare Greater Than
-defm VCGTs : N3V_QHS<0, 0, 0b0011, 0, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vcgt", "s", NEONvcgt, 0>;
-defm VCGTu : N3V_QHS<1, 0, 0b0011, 0, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vcgt", "u", NEONvcgtu, 0>;
-def VCGTfd : N3VD<1,0,0b10,0b1110,0, IIC_VBIND, "vcgt", "f32", v2i32, v2f32,
- NEONvcgt, 0>;
-def VCGTfq : N3VQ<1,0,0b10,0b1110,0, IIC_VBINQ, "vcgt", "f32", v4i32, v4f32,
- NEONvcgt, 0>;
-// For disassembly only.
-defm VCGTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00000, 0, "vcgt", "s",
- "$dst, $src, #0">;
-// For disassembly only.
-defm VCLTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00100, 0, "vclt", "s",
- "$dst, $src, #0">;
-
-// VACGE : Vector Absolute Compare Greater Than or Equal (aka VCAGE)
-def VACGEd : N3VDInt<1, 0, 0b00, 0b1110, 1, IIC_VBIND, "vacge", "f32",
- v2i32, v2f32, int_arm_neon_vacged, 0>;
-def VACGEq : N3VQInt<1, 0, 0b00, 0b1110, 1, IIC_VBINQ, "vacge", "f32",
- v4i32, v4f32, int_arm_neon_vacgeq, 0>;
-// VACGT : Vector Absolute Compare Greater Than (aka VCAGT)
-def VACGTd : N3VDInt<1, 0, 0b10, 0b1110, 1, IIC_VBIND, "vacgt", "f32",
- v2i32, v2f32, int_arm_neon_vacgtd, 0>;
-def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, IIC_VBINQ, "vacgt", "f32",
- v4i32, v4f32, int_arm_neon_vacgtq, 0>;
-// VTST : Vector Test Bits
-defm VTST : N3V_QHS<0, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vtst", "", NEONvtst, 1>;
-
-// Vector Bitwise Operations.
-
-// VAND : Vector Bitwise AND
-def VANDd : N3VDX<0, 0, 0b00, 0b0001, 1, IIC_VBINiD, "vand",
- v2i32, v2i32, and, 1>;
-def VANDq : N3VQX<0, 0, 0b00, 0b0001, 1, IIC_VBINiQ, "vand",
- v4i32, v4i32, and, 1>;
-
-// VEOR : Vector Bitwise Exclusive OR
-def VEORd : N3VDX<1, 0, 0b00, 0b0001, 1, IIC_VBINiD, "veor",
- v2i32, v2i32, xor, 1>;
-def VEORq : N3VQX<1, 0, 0b00, 0b0001, 1, IIC_VBINiQ, "veor",
- v4i32, v4i32, xor, 1>;
-
-// VORR : Vector Bitwise OR
-def VORRd : N3VDX<0, 0, 0b10, 0b0001, 1, IIC_VBINiD, "vorr",
- v2i32, v2i32, or, 1>;
-def VORRq : N3VQX<0, 0, 0b10, 0b0001, 1, IIC_VBINiQ, "vorr",
- v4i32, v4i32, or, 1>;
-
-// VBIC : Vector Bitwise Bit Clear (AND NOT)
-def VBICd : N3VX<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2), IIC_VBINiD,
- "vbic", "$dst, $src1, $src2", "",
- [(set DPR:$dst, (v2i32 (and DPR:$src1,
- (vnot_conv DPR:$src2))))]>;
-def VBICq : N3VX<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2), IIC_VBINiQ,
- "vbic", "$dst, $src1, $src2", "",
- [(set QPR:$dst, (v4i32 (and QPR:$src1,
- (vnot_conv QPR:$src2))))]>;
-
-// VORN : Vector Bitwise OR NOT
-def VORNd : N3VX<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2), IIC_VBINiD,
- "vorn", "$dst, $src1, $src2", "",
- [(set DPR:$dst, (v2i32 (or DPR:$src1,
- (vnot_conv DPR:$src2))))]>;
-def VORNq : N3VX<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2), IIC_VBINiQ,
- "vorn", "$dst, $src1, $src2", "",
- [(set QPR:$dst, (v4i32 (or QPR:$src1,
- (vnot_conv QPR:$src2))))]>;
-
-// VMVN : Vector Bitwise NOT
-def VMVNd : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 0, 0,
- (outs DPR:$dst), (ins DPR:$src), IIC_VSHLiD,
- "vmvn", "$dst, $src", "",
- [(set DPR:$dst, (v2i32 (vnot DPR:$src)))]>;
-def VMVNq : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 1, 0,
- (outs QPR:$dst), (ins QPR:$src), IIC_VSHLiD,
- "vmvn", "$dst, $src", "",
- [(set QPR:$dst, (v4i32 (vnot QPR:$src)))]>;
-def : Pat<(v2i32 (vnot_conv DPR:$src)), (VMVNd DPR:$src)>;
-def : Pat<(v4i32 (vnot_conv QPR:$src)), (VMVNq QPR:$src)>;
-
-// VBSL : Vector Bitwise Select
-def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
- (ins DPR:$src1, DPR:$src2, DPR:$src3), IIC_VCNTiD,
- "vbsl", "$dst, $src2, $src3", "$src1 = $dst",
- [(set DPR:$dst,
- (v2i32 (or (and DPR:$src2, DPR:$src1),
- (and DPR:$src3, (vnot_conv DPR:$src1)))))]>;
-def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
- (ins QPR:$src1, QPR:$src2, QPR:$src3), IIC_VCNTiQ,
- "vbsl", "$dst, $src2, $src3", "$src1 = $dst",
- [(set QPR:$dst,
- (v4i32 (or (and QPR:$src2, QPR:$src1),
- (and QPR:$src3, (vnot_conv QPR:$src1)))))]>;
-
-// VBIF : Vector Bitwise Insert if False
-// like VBSL but with: "vbif $dst, $src3, $src1", "$src2 = $dst",
-def VBIFd : N3VX<1, 0, 0b11, 0b0001, 0, 1,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3),
- IIC_VBINiD, "vbif", "$dst, $src2, $src3", "$src1 = $dst",
- [/* For disassembly only; pattern left blank */]>;
-def VBIFq : N3VX<1, 0, 0b11, 0b0001, 1, 1,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3),
- IIC_VBINiQ, "vbif", "$dst, $src2, $src3", "$src1 = $dst",
- [/* For disassembly only; pattern left blank */]>;
-
-// VBIT : Vector Bitwise Insert if True
-// like VBSL but with: "vbit $dst, $src2, $src1", "$src3 = $dst",
-def VBITd : N3VX<1, 0, 0b10, 0b0001, 0, 1,
- (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3),
- IIC_VBINiD, "vbit", "$dst, $src2, $src3", "$src1 = $dst",
- [/* For disassembly only; pattern left blank */]>;
-def VBITq : N3VX<1, 0, 0b10, 0b0001, 1, 1,
- (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3),
- IIC_VBINiQ, "vbit", "$dst, $src2, $src3", "$src1 = $dst",
- [/* For disassembly only; pattern left blank */]>;
-
-// VBIT/VBIF are not yet implemented. The TwoAddress pass will not go looking
-// for equivalent operations with different register constraints; it just
-// inserts copies.
-
-// Vector Absolute Differences.
-
-// VABD : Vector Absolute Difference
-defm VABDs : N3VInt_QHS<0, 0, 0b0111, 0, IIC_VBINi4D, IIC_VBINi4D,
- IIC_VBINi4Q, IIC_VBINi4Q,
- "vabd", "s", int_arm_neon_vabds, 0>;
-defm VABDu : N3VInt_QHS<1, 0, 0b0111, 0, IIC_VBINi4D, IIC_VBINi4D,
- IIC_VBINi4Q, IIC_VBINi4Q,
- "vabd", "u", int_arm_neon_vabdu, 0>;
-def VABDfd : N3VDInt<1, 0, 0b10, 0b1101, 0, IIC_VBIND,
- "vabd", "f32", v2f32, v2f32, int_arm_neon_vabds, 0>;
-def VABDfq : N3VQInt<1, 0, 0b10, 0b1101, 0, IIC_VBINQ,
- "vabd", "f32", v4f32, v4f32, int_arm_neon_vabds, 0>;
-
-// VABDL : Vector Absolute Difference Long (Q = | D - D |)
-defm VABDLs : N3VLInt_QHS<0,1,0b0111,0, IIC_VBINi4Q,
- "vabdl", "s", int_arm_neon_vabdls, 0>;
-defm VABDLu : N3VLInt_QHS<1,1,0b0111,0, IIC_VBINi4Q,
- "vabdl", "u", int_arm_neon_vabdlu, 0>;
-
-// VABA : Vector Absolute Difference and Accumulate
-defm VABAs : N3VInt3_QHS<0,0,0b0111,1, "vaba", "s", int_arm_neon_vabas>;
-defm VABAu : N3VInt3_QHS<1,0,0b0111,1, "vaba", "u", int_arm_neon_vabau>;
-
-// VABAL : Vector Absolute Difference and Accumulate Long (Q += | D - D |)
-defm VABALs : N3VLInt3_QHS<0,1,0b0101,0, "vabal", "s", int_arm_neon_vabals>;
-defm VABALu : N3VLInt3_QHS<1,1,0b0101,0, "vabal", "u", int_arm_neon_vabalu>;
-
-// Vector Maximum and Minimum.
-
-// VMAX : Vector Maximum
-defm VMAXs : N3VInt_QHS<0,0,0b0110,0, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vmax", "s", int_arm_neon_vmaxs, 1>;
-defm VMAXu : N3VInt_QHS<1,0,0b0110,0, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vmax", "u", int_arm_neon_vmaxu, 1>;
-def VMAXfd : N3VDInt<0, 0, 0b00, 0b1111, 0, IIC_VBIND, "vmax", "f32",
- v2f32, v2f32, int_arm_neon_vmaxs, 1>;
-def VMAXfq : N3VQInt<0, 0, 0b00, 0b1111, 0, IIC_VBINQ, "vmax", "f32",
- v4f32, v4f32, int_arm_neon_vmaxs, 1>;
-
-// VMIN : Vector Minimum
-defm VMINs : N3VInt_QHS<0,0,0b0110,1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vmin", "s", int_arm_neon_vmins, 1>;
-defm VMINu : N3VInt_QHS<1,0,0b0110,1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
- IIC_VBINi4Q, "vmin", "u", int_arm_neon_vminu, 1>;
-def VMINfd : N3VDInt<0, 0, 0b10, 0b1111, 0, IIC_VBIND, "vmin", "f32",
- v2f32, v2f32, int_arm_neon_vmins, 1>;
-def VMINfq : N3VQInt<0, 0, 0b10, 0b1111, 0, IIC_VBINQ, "vmin", "f32",
- v4f32, v4f32, int_arm_neon_vmins, 1>;
-
-// Vector Pairwise Operations.
-
-// VPADD : Vector Pairwise Add
-def VPADDi8 : N3VDInt<0, 0, 0b00, 0b1011, 1, IIC_VBINiD, "vpadd", "i8",
- v8i8, v8i8, int_arm_neon_vpadd, 0>;
-def VPADDi16 : N3VDInt<0, 0, 0b01, 0b1011, 1, IIC_VBINiD, "vpadd", "i16",
- v4i16, v4i16, int_arm_neon_vpadd, 0>;
-def VPADDi32 : N3VDInt<0, 0, 0b10, 0b1011, 1, IIC_VBINiD, "vpadd", "i32",
- v2i32, v2i32, int_arm_neon_vpadd, 0>;
-def VPADDf : N3VDInt<1, 0, 0b00, 0b1101, 0, IIC_VBIND, "vpadd", "f32",
- v2f32, v2f32, int_arm_neon_vpadd, 0>;
-
-// VPADDL : Vector Pairwise Add Long
-defm VPADDLs : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpaddl", "s",
- int_arm_neon_vpaddls>;
-defm VPADDLu : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpaddl", "u",
- int_arm_neon_vpaddlu>;
-
-// VPADAL : Vector Pairwise Add and Accumulate Long
-defm VPADALs : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b01100, 0, "vpadal", "s",
- int_arm_neon_vpadals>;
-defm VPADALu : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b01101, 0, "vpadal", "u",
- int_arm_neon_vpadalu>;
-
-// VPMAX : Vector Pairwise Maximum
-def VPMAXs8 : N3VDInt<0, 0, 0b00, 0b1010, 0, IIC_VBINi4D, "vpmax", "s8",
- v8i8, v8i8, int_arm_neon_vpmaxs, 0>;
-def VPMAXs16 : N3VDInt<0, 0, 0b01, 0b1010, 0, IIC_VBINi4D, "vpmax", "s16",
- v4i16, v4i16, int_arm_neon_vpmaxs, 0>;
-def VPMAXs32 : N3VDInt<0, 0, 0b10, 0b1010, 0, IIC_VBINi4D, "vpmax", "s32",
- v2i32, v2i32, int_arm_neon_vpmaxs, 0>;
-def VPMAXu8 : N3VDInt<1, 0, 0b00, 0b1010, 0, IIC_VBINi4D, "vpmax", "u8",
- v8i8, v8i8, int_arm_neon_vpmaxu, 0>;
-def VPMAXu16 : N3VDInt<1, 0, 0b01, 0b1010, 0, IIC_VBINi4D, "vpmax", "u16",
- v4i16, v4i16, int_arm_neon_vpmaxu, 0>;
-def VPMAXu32 : N3VDInt<1, 0, 0b10, 0b1010, 0, IIC_VBINi4D, "vpmax", "u32",
- v2i32, v2i32, int_arm_neon_vpmaxu, 0>;
-def VPMAXf : N3VDInt<1, 0, 0b00, 0b1111, 0, IIC_VBINi4D, "vpmax", "f32",
- v2f32, v2f32, int_arm_neon_vpmaxs, 0>;
-
-// VPMIN : Vector Pairwise Minimum
-def VPMINs8 : N3VDInt<0, 0, 0b00, 0b1010, 1, IIC_VBINi4D, "vpmin", "s8",
- v8i8, v8i8, int_arm_neon_vpmins, 0>;
-def VPMINs16 : N3VDInt<0, 0, 0b01, 0b1010, 1, IIC_VBINi4D, "vpmin", "s16",
- v4i16, v4i16, int_arm_neon_vpmins, 0>;
-def VPMINs32 : N3VDInt<0, 0, 0b10, 0b1010, 1, IIC_VBINi4D, "vpmin", "s32",
- v2i32, v2i32, int_arm_neon_vpmins, 0>;
-def VPMINu8 : N3VDInt<1, 0, 0b00, 0b1010, 1, IIC_VBINi4D, "vpmin", "u8",
- v8i8, v8i8, int_arm_neon_vpminu, 0>;
-def VPMINu16 : N3VDInt<1, 0, 0b01, 0b1010, 1, IIC_VBINi4D, "vpmin", "u16",
- v4i16, v4i16, int_arm_neon_vpminu, 0>;
-def VPMINu32 : N3VDInt<1, 0, 0b10, 0b1010, 1, IIC_VBINi4D, "vpmin", "u32",
- v2i32, v2i32, int_arm_neon_vpminu, 0>;
-def VPMINf : N3VDInt<1, 0, 0b10, 0b1111, 0, IIC_VBINi4D, "vpmin", "f32",
- v2f32, v2f32, int_arm_neon_vpmins, 0>;
-
-// Vector Reciprocal and Reciprocal Square Root Estimate and Step.
-
-// VRECPE : Vector Reciprocal Estimate
-def VRECPEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
- IIC_VUNAD, "vrecpe", "u32",
- v2i32, v2i32, int_arm_neon_vrecpe>;
-def VRECPEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
- IIC_VUNAQ, "vrecpe", "u32",
- v4i32, v4i32, int_arm_neon_vrecpe>;
-def VRECPEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0,
- IIC_VUNAD, "vrecpe", "f32",
- v2f32, v2f32, int_arm_neon_vrecpe>;
-def VRECPEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0,
- IIC_VUNAQ, "vrecpe", "f32",
- v4f32, v4f32, int_arm_neon_vrecpe>;
-
-// VRECPS : Vector Reciprocal Step
-def VRECPSfd : N3VDInt<0, 0, 0b00, 0b1111, 1,
- IIC_VRECSD, "vrecps", "f32",
- v2f32, v2f32, int_arm_neon_vrecps, 1>;
-def VRECPSfq : N3VQInt<0, 0, 0b00, 0b1111, 1,
- IIC_VRECSQ, "vrecps", "f32",
- v4f32, v4f32, int_arm_neon_vrecps, 1>;
-
-// VRSQRTE : Vector Reciprocal Square Root Estimate
-def VRSQRTEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0,
- IIC_VUNAD, "vrsqrte", "u32",
- v2i32, v2i32, int_arm_neon_vrsqrte>;
-def VRSQRTEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0,
- IIC_VUNAQ, "vrsqrte", "u32",
- v4i32, v4i32, int_arm_neon_vrsqrte>;
-def VRSQRTEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
- IIC_VUNAD, "vrsqrte", "f32",
- v2f32, v2f32, int_arm_neon_vrsqrte>;
-def VRSQRTEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
- IIC_VUNAQ, "vrsqrte", "f32",
- v4f32, v4f32, int_arm_neon_vrsqrte>;
-
-// VRSQRTS : Vector Reciprocal Square Root Step
-def VRSQRTSfd : N3VDInt<0, 0, 0b10, 0b1111, 1,
- IIC_VRECSD, "vrsqrts", "f32",
- v2f32, v2f32, int_arm_neon_vrsqrts, 1>;
-def VRSQRTSfq : N3VQInt<0, 0, 0b10, 0b1111, 1,
- IIC_VRECSQ, "vrsqrts", "f32",
- v4f32, v4f32, int_arm_neon_vrsqrts, 1>;
-
-// Vector Shifts.
-
-// VSHL : Vector Shift
-defm VSHLs : N3VInt_QHSD<0, 0, 0b0100, 0, IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ,
- IIC_VSHLiQ, "vshl", "s", int_arm_neon_vshifts, 0>;
-defm VSHLu : N3VInt_QHSD<1, 0, 0b0100, 0, IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ,
- IIC_VSHLiQ, "vshl", "u", int_arm_neon_vshiftu, 0>;
-// VSHL : Vector Shift Left (Immediate)
-defm VSHLi : N2VSh_QHSD<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl>;
-// VSHR : Vector Shift Right (Immediate)
-defm VSHRs : N2VSh_QHSD<0, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "s", NEONvshrs>;
-defm VSHRu : N2VSh_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u", NEONvshru>;
-
-// VSHLL : Vector Shift Left Long
-defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s", NEONvshlls>;
-defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u", NEONvshllu>;
-
-// VSHLL : Vector Shift Left Long (with maximum shift count)
-class N2VLShMax<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
- bit op6, bit op4, string OpcodeStr, string Dt, ValueType ResTy,
- ValueType OpTy, SDNode OpNode>
- : N2VLSh<op24, op23, op11_8, op7, op6, op4, OpcodeStr, Dt,
- ResTy, OpTy, OpNode> {
- let Inst{21-16} = op21_16;
-}
-def VSHLLi8 : N2VLShMax<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll", "i8",
- v8i16, v8i8, NEONvshlli>;
-def VSHLLi16 : N2VLShMax<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll", "i16",
- v4i32, v4i16, NEONvshlli>;
-def VSHLLi32 : N2VLShMax<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll", "i32",
- v2i64, v2i32, NEONvshlli>;
-
-// VSHRN : Vector Shift Right and Narrow
-defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
- NEONvshrn>;
-
-// VRSHL : Vector Rounding Shift
-defm VRSHLs : N3VInt_QHSD<0,0,0b0101,0, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vrshl", "s", int_arm_neon_vrshifts,0>;
-defm VRSHLu : N3VInt_QHSD<1,0,0b0101,0, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vrshl", "u", int_arm_neon_vrshiftu,0>;
-// VRSHR : Vector Rounding Shift Right
-defm VRSHRs : N2VSh_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s", NEONvrshrs>;
-defm VRSHRu : N2VSh_QHSD<1,1,0b0010,1, IIC_VSHLi4D, "vrshr", "u", NEONvrshru>;
-
-// VRSHRN : Vector Rounding Shift Right and Narrow
-defm VRSHRN : N2VNSh_HSD<0, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vrshrn", "i",
- NEONvrshrn>;
-
-// VQSHL : Vector Saturating Shift
-defm VQSHLs : N3VInt_QHSD<0,0,0b0100,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vqshl", "s", int_arm_neon_vqshifts,0>;
-defm VQSHLu : N3VInt_QHSD<1,0,0b0100,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vqshl", "u", int_arm_neon_vqshiftu,0>;
-// VQSHL : Vector Saturating Shift Left (Immediate)
-defm VQSHLsi : N2VSh_QHSD<0,1,0b0111,1, IIC_VSHLi4D, "vqshl", "s", NEONvqshls>;
-defm VQSHLui : N2VSh_QHSD<1,1,0b0111,1, IIC_VSHLi4D, "vqshl", "u", NEONvqshlu>;
-// VQSHLU : Vector Saturating Shift Left (Immediate, Unsigned)
-defm VQSHLsu : N2VSh_QHSD<1,1,0b0110,1, IIC_VSHLi4D, "vqshlu","s",NEONvqshlsu>;
-
-// VQSHRN : Vector Saturating Shift Right and Narrow
-defm VQSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "s",
- NEONvqshrns>;
-defm VQSHRNu : N2VNSh_HSD<1, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "u",
- NEONvqshrnu>;
-
-// VQSHRUN : Vector Saturating Shift Right and Narrow (Unsigned)
-defm VQSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 0, 1, IIC_VSHLi4D, "vqshrun", "s",
- NEONvqshrnsu>;
-
-// VQRSHL : Vector Saturating Rounding Shift
-defm VQRSHLs : N3VInt_QHSD<0,0,0b0101,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vqrshl", "s",
- int_arm_neon_vqrshifts, 0>;
-defm VQRSHLu : N3VInt_QHSD<1,0,0b0101,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vqrshl", "u",
- int_arm_neon_vqrshiftu, 0>;
-
-// VQRSHRN : Vector Saturating Rounding Shift Right and Narrow
-defm VQRSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "s",
- NEONvqrshrns>;
-defm VQRSHRNu : N2VNSh_HSD<1, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "u",
- NEONvqrshrnu>;
-
-// VQRSHRUN : Vector Saturating Rounding Shift Right and Narrow (Unsigned)
-defm VQRSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vqrshrun", "s",
- NEONvqrshrnsu>;
-
-// VSRA : Vector Shift Right and Accumulate
-defm VSRAs : N2VShAdd_QHSD<0, 1, 0b0001, 1, "vsra", "s", NEONvshrs>;
-defm VSRAu : N2VShAdd_QHSD<1, 1, 0b0001, 1, "vsra", "u", NEONvshru>;
-// VRSRA : Vector Rounding Shift Right and Accumulate
-defm VRSRAs : N2VShAdd_QHSD<0, 1, 0b0011, 1, "vrsra", "s", NEONvrshrs>;
-defm VRSRAu : N2VShAdd_QHSD<1, 1, 0b0011, 1, "vrsra", "u", NEONvrshru>;
-
-// VSLI : Vector Shift Left and Insert
-defm VSLI : N2VShIns_QHSD<1, 1, 0b0101, 1, "vsli", NEONvsli>;
-// VSRI : Vector Shift Right and Insert
-defm VSRI : N2VShIns_QHSD<1, 1, 0b0100, 1, "vsri", NEONvsri>;
-
-// Vector Absolute and Saturating Absolute.
-
-// VABS : Vector Absolute Value
-defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0,
- IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s",
- int_arm_neon_vabs>;
-def VABSfd : N2VDInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
- IIC_VUNAD, "vabs", "f32",
- v2f32, v2f32, int_arm_neon_vabs>;
-def VABSfq : N2VQInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
- IIC_VUNAQ, "vabs", "f32",
- v4f32, v4f32, int_arm_neon_vabs>;
-
-// VQABS : Vector Saturating Absolute Value
-defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
- IIC_VQUNAiD, IIC_VQUNAiQ, "vqabs", "s",
- int_arm_neon_vqabs>;
-
-// Vector Negate.
-
-def vneg : PatFrag<(ops node:$in), (sub immAllZerosV, node:$in)>;
-def vneg_conv : PatFrag<(ops node:$in), (sub immAllZerosV_bc, node:$in)>;
-
-class VNEGD<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$dst), (ins DPR:$src),
- IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (Ty (vneg DPR:$src)))]>;
-class VNEGQ<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$dst), (ins QPR:$src),
- IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (Ty (vneg QPR:$src)))]>;
-
-// VNEG : Vector Negate
-def VNEGs8d : VNEGD<0b00, "vneg", "s8", v8i8>;
-def VNEGs16d : VNEGD<0b01, "vneg", "s16", v4i16>;
-def VNEGs32d : VNEGD<0b10, "vneg", "s32", v2i32>;
-def VNEGs8q : VNEGQ<0b00, "vneg", "s8", v16i8>;
-def VNEGs16q : VNEGQ<0b01, "vneg", "s16", v8i16>;
-def VNEGs32q : VNEGQ<0b10, "vneg", "s32", v4i32>;
-
-// VNEG : Vector Negate (floating-point)
-def VNEGfd : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
- (outs DPR:$dst), (ins DPR:$src), IIC_VUNAD,
- "vneg", "f32", "$dst, $src", "",
- [(set DPR:$dst, (v2f32 (fneg DPR:$src)))]>;
-def VNEGf32q : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 1, 0,
- (outs QPR:$dst), (ins QPR:$src), IIC_VUNAQ,
- "vneg", "f32", "$dst, $src", "",
- [(set QPR:$dst, (v4f32 (fneg QPR:$src)))]>;
-
-def : Pat<(v8i8 (vneg_conv DPR:$src)), (VNEGs8d DPR:$src)>;
-def : Pat<(v4i16 (vneg_conv DPR:$src)), (VNEGs16d DPR:$src)>;
-def : Pat<(v2i32 (vneg_conv DPR:$src)), (VNEGs32d DPR:$src)>;
-def : Pat<(v16i8 (vneg_conv QPR:$src)), (VNEGs8q QPR:$src)>;
-def : Pat<(v8i16 (vneg_conv QPR:$src)), (VNEGs16q QPR:$src)>;
-def : Pat<(v4i32 (vneg_conv QPR:$src)), (VNEGs32q QPR:$src)>;
-
-// VQNEG : Vector Saturating Negate
-defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0,
- IIC_VQUNAiD, IIC_VQUNAiQ, "vqneg", "s",
- int_arm_neon_vqneg>;
-
-// Vector Bit Counting Operations.
-
-// VCLS : Vector Count Leading Sign Bits
-defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0,
- IIC_VCNTiD, IIC_VCNTiQ, "vcls", "s",
- int_arm_neon_vcls>;
-// VCLZ : Vector Count Leading Zeros
-defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0,
- IIC_VCNTiD, IIC_VCNTiQ, "vclz", "i",
- int_arm_neon_vclz>;
-// VCNT : Vector Count One Bits
-def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
- IIC_VCNTiD, "vcnt", "8",
- v8i8, v8i8, int_arm_neon_vcnt>;
-def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
- IIC_VCNTiQ, "vcnt", "8",
- v16i8, v16i8, int_arm_neon_vcnt>;
-
-// Vector Swap -- for disassembly only.
-def VSWPd : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 0, 0,
- (outs DPR:$dst), (ins DPR:$src), NoItinerary,
- "vswp", "$dst, $src", "", []>;
-def VSWPq : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 1, 0,
- (outs QPR:$dst), (ins QPR:$src), NoItinerary,
- "vswp", "$dst, $src", "", []>;
-
-// Vector Move Operations.
-
-// VMOV : Vector Move (Register)
-
-def VMOVDneon: N3VX<0, 0, 0b10, 0b0001, 0, 1, (outs DPR:$dst), (ins DPR:$src),
- IIC_VMOVD, "vmov", "$dst, $src", "", []>;
-def VMOVQ : N3VX<0, 0, 0b10, 0b0001, 1, 1, (outs QPR:$dst), (ins QPR:$src),
- IIC_VMOVD, "vmov", "$dst, $src", "", []>;
-
-// VMOV : Vector Move (Immediate)
-
-// VMOV_get_imm8 xform function: convert build_vector to VMOV.i8 imm.
-def VMOV_get_imm8 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 1, *CurDAG);
-}]>;
-def vmovImm8 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 1, *CurDAG).getNode() != 0;
-}], VMOV_get_imm8>;
-
-// VMOV_get_imm16 xform function: convert build_vector to VMOV.i16 imm.
-def VMOV_get_imm16 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 2, *CurDAG);
-}]>;
-def vmovImm16 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 2, *CurDAG).getNode() != 0;
-}], VMOV_get_imm16>;
-
-// VMOV_get_imm32 xform function: convert build_vector to VMOV.i32 imm.
-def VMOV_get_imm32 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 4, *CurDAG);
-}]>;
-def vmovImm32 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 4, *CurDAG).getNode() != 0;
-}], VMOV_get_imm32>;
-
-// VMOV_get_imm64 xform function: convert build_vector to VMOV.i64 imm.
-def VMOV_get_imm64 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 8, *CurDAG);
-}]>;
-def vmovImm64 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 8, *CurDAG).getNode() != 0;
-}], VMOV_get_imm64>;
-
-// Note: Some of the cmode bits in the following VMOV instructions need to
-// be encoded based on the immed values.
-
-def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$dst),
- (ins h8imm:$SIMM), IIC_VMOVImm,
- "vmov", "i8", "$dst, $SIMM", "",
- [(set DPR:$dst, (v8i8 vmovImm8:$SIMM))]>;
-def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$dst),
- (ins h8imm:$SIMM), IIC_VMOVImm,
- "vmov", "i8", "$dst, $SIMM", "",
- [(set QPR:$dst, (v16i8 vmovImm8:$SIMM))]>;
-
-def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,?}, 0, 0, {?}, 1, (outs DPR:$dst),
- (ins h16imm:$SIMM), IIC_VMOVImm,
- "vmov", "i16", "$dst, $SIMM", "",
- [(set DPR:$dst, (v4i16 vmovImm16:$SIMM))]>;
-def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,?}, 0, 1, {?}, 1, (outs QPR:$dst),
- (ins h16imm:$SIMM), IIC_VMOVImm,
- "vmov", "i16", "$dst, $SIMM", "",
- [(set QPR:$dst, (v8i16 vmovImm16:$SIMM))]>;
-
-def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, {?}, 1, (outs DPR:$dst),
- (ins h32imm:$SIMM), IIC_VMOVImm,
- "vmov", "i32", "$dst, $SIMM", "",
- [(set DPR:$dst, (v2i32 vmovImm32:$SIMM))]>;
-def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, {?}, 1, (outs QPR:$dst),
- (ins h32imm:$SIMM), IIC_VMOVImm,
- "vmov", "i32", "$dst, $SIMM", "",
- [(set QPR:$dst, (v4i32 vmovImm32:$SIMM))]>;
-
-def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$dst),
- (ins h64imm:$SIMM), IIC_VMOVImm,
- "vmov", "i64", "$dst, $SIMM", "",
- [(set DPR:$dst, (v1i64 vmovImm64:$SIMM))]>;
-def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$dst),
- (ins h64imm:$SIMM), IIC_VMOVImm,
- "vmov", "i64", "$dst, $SIMM", "",
- [(set QPR:$dst, (v2i64 vmovImm64:$SIMM))]>;
-
-// VMOV : Vector Get Lane (move scalar to ARM core register)
-
-def VGETLNs8 : NVGetLane<{1,1,1,0,0,1,?,1}, 0b1011, {?,?},
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "s8", "$dst, $src[$lane]",
- [(set GPR:$dst, (NEONvgetlanes (v8i8 DPR:$src),
- imm:$lane))]>;
-def VGETLNs16 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, {?,1},
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "s16", "$dst, $src[$lane]",
- [(set GPR:$dst, (NEONvgetlanes (v4i16 DPR:$src),
- imm:$lane))]>;
-def VGETLNu8 : NVGetLane<{1,1,1,0,1,1,?,1}, 0b1011, {?,?},
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "u8", "$dst, $src[$lane]",
- [(set GPR:$dst, (NEONvgetlaneu (v8i8 DPR:$src),
- imm:$lane))]>;
-def VGETLNu16 : NVGetLane<{1,1,1,0,1,0,?,1}, 0b1011, {?,1},
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "u16", "$dst, $src[$lane]",
- [(set GPR:$dst, (NEONvgetlaneu (v4i16 DPR:$src),
- imm:$lane))]>;
-def VGETLNi32 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, 0b00,
- (outs GPR:$dst), (ins DPR:$src, nohash_imm:$lane),
- IIC_VMOVSI, "vmov", "32", "$dst, $src[$lane]",
- [(set GPR:$dst, (extractelt (v2i32 DPR:$src),
- imm:$lane))]>;
-// def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td
-def : Pat<(NEONvgetlanes (v16i8 QPR:$src), imm:$lane),
- (VGETLNs8 (v8i8 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_i8_reg imm:$lane))),
- (SubReg_i8_lane imm:$lane))>;
-def : Pat<(NEONvgetlanes (v8i16 QPR:$src), imm:$lane),
- (VGETLNs16 (v4i16 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_i16_reg imm:$lane))),
- (SubReg_i16_lane imm:$lane))>;
-def : Pat<(NEONvgetlaneu (v16i8 QPR:$src), imm:$lane),
- (VGETLNu8 (v8i8 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_i8_reg imm:$lane))),
- (SubReg_i8_lane imm:$lane))>;
-def : Pat<(NEONvgetlaneu (v8i16 QPR:$src), imm:$lane),
- (VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_i16_reg imm:$lane))),
- (SubReg_i16_lane imm:$lane))>;
-def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane),
- (VGETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane))>;
-def : Pat<(extractelt (v2f32 DPR:$src1), imm:$src2),
- (EXTRACT_SUBREG (v2f32 (COPY_TO_REGCLASS (v2f32 DPR:$src1),DPR_VFP2)),
- (SSubReg_f32_reg imm:$src2))>;
-def : Pat<(extractelt (v4f32 QPR:$src1), imm:$src2),
- (EXTRACT_SUBREG (v4f32 (COPY_TO_REGCLASS (v4f32 QPR:$src1),QPR_VFP2)),
- (SSubReg_f32_reg imm:$src2))>;
-//def : Pat<(extractelt (v2i64 QPR:$src1), imm:$src2),
-// (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
-def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
- (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
-
-
-// VMOV : Vector Set Lane (move ARM core register to scalar)
-
-let Constraints = "$src1 = $dst" in {
-def VSETLNi8 : NVSetLane<{1,1,1,0,0,1,?,0}, 0b1011, {?,?}, (outs DPR:$dst),
- (ins DPR:$src1, GPR:$src2, nohash_imm:$lane),
- IIC_VMOVISL, "vmov", "8", "$dst[$lane], $src2",
- [(set DPR:$dst, (vector_insert (v8i8 DPR:$src1),
- GPR:$src2, imm:$lane))]>;
-def VSETLNi16 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, {?,1}, (outs DPR:$dst),
- (ins DPR:$src1, GPR:$src2, nohash_imm:$lane),
- IIC_VMOVISL, "vmov", "16", "$dst[$lane], $src2",
- [(set DPR:$dst, (vector_insert (v4i16 DPR:$src1),
- GPR:$src2, imm:$lane))]>;
-def VSETLNi32 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, 0b00, (outs DPR:$dst),
- (ins DPR:$src1, GPR:$src2, nohash_imm:$lane),
- IIC_VMOVISL, "vmov", "32", "$dst[$lane], $src2",
- [(set DPR:$dst, (insertelt (v2i32 DPR:$src1),
- GPR:$src2, imm:$lane))]>;
-}
-def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane),
- (v16i8 (INSERT_SUBREG QPR:$src1,
- (VSETLNi8 (v8i8 (EXTRACT_SUBREG QPR:$src1,
- (DSubReg_i8_reg imm:$lane))),
- GPR:$src2, (SubReg_i8_lane imm:$lane)),
- (DSubReg_i8_reg imm:$lane)))>;
-def : Pat<(vector_insert (v8i16 QPR:$src1), GPR:$src2, imm:$lane),
- (v8i16 (INSERT_SUBREG QPR:$src1,
- (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
- (DSubReg_i16_reg imm:$lane))),
- GPR:$src2, (SubReg_i16_lane imm:$lane)),
- (DSubReg_i16_reg imm:$lane)))>;
-def : Pat<(insertelt (v4i32 QPR:$src1), GPR:$src2, imm:$lane),
- (v4i32 (INSERT_SUBREG QPR:$src1,
- (VSETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src1,
- (DSubReg_i32_reg imm:$lane))),
- GPR:$src2, (SubReg_i32_lane imm:$lane)),
- (DSubReg_i32_reg imm:$lane)))>;
-
-def : Pat<(v2f32 (insertelt DPR:$src1, SPR:$src2, imm:$src3)),
- (INSERT_SUBREG (v2f32 (COPY_TO_REGCLASS DPR:$src1, DPR_VFP2)),
- SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
-def : Pat<(v4f32 (insertelt QPR:$src1, SPR:$src2, imm:$src3)),
- (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS QPR:$src1, QPR_VFP2)),
- SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
-
-//def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
-// (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
-def : Pat<(v2f64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
- (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
-
-def : Pat<(v2f32 (scalar_to_vector SPR:$src)),
- (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$src, arm_ssubreg_0)>;
-def : Pat<(v2f64 (scalar_to_vector DPR:$src)),
- (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), DPR:$src, arm_dsubreg_0)>;
-def : Pat<(v4f32 (scalar_to_vector SPR:$src)),
- (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), SPR:$src, arm_ssubreg_0)>;
-
-def : Pat<(v8i8 (scalar_to_vector GPR:$src)),
- (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
-def : Pat<(v4i16 (scalar_to_vector GPR:$src)),
- (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
-def : Pat<(v2i32 (scalar_to_vector GPR:$src)),
- (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
-
-def : Pat<(v16i8 (scalar_to_vector GPR:$src)),
- (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
- (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
- arm_dsubreg_0)>;
-def : Pat<(v8i16 (scalar_to_vector GPR:$src)),
- (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
- (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
- arm_dsubreg_0)>;
-def : Pat<(v4i32 (scalar_to_vector GPR:$src)),
- (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
- (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
- arm_dsubreg_0)>;
-
-// VDUP : Vector Duplicate (from ARM core register to all elements)
-
-class VDUPD<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
- : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$dst), (ins GPR:$src),
- IIC_VMOVIS, "vdup", Dt, "$dst, $src",
- [(set DPR:$dst, (Ty (NEONvdup (i32 GPR:$src))))]>;
-class VDUPQ<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
- : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$dst), (ins GPR:$src),
- IIC_VMOVIS, "vdup", Dt, "$dst, $src",
- [(set QPR:$dst, (Ty (NEONvdup (i32 GPR:$src))))]>;
-
-def VDUP8d : VDUPD<0b11101100, 0b00, "8", v8i8>;
-def VDUP16d : VDUPD<0b11101000, 0b01, "16", v4i16>;
-def VDUP32d : VDUPD<0b11101000, 0b00, "32", v2i32>;
-def VDUP8q : VDUPQ<0b11101110, 0b00, "8", v16i8>;
-def VDUP16q : VDUPQ<0b11101010, 0b01, "16", v8i16>;
-def VDUP32q : VDUPQ<0b11101010, 0b00, "32", v4i32>;
-
-def VDUPfd : NVDup<0b11101000, 0b1011, 0b00, (outs DPR:$dst), (ins GPR:$src),
- IIC_VMOVIS, "vdup", "32", "$dst, $src",
- [(set DPR:$dst, (v2f32 (NEONvdup
- (f32 (bitconvert GPR:$src)))))]>;
-def VDUPfq : NVDup<0b11101010, 0b1011, 0b00, (outs QPR:$dst), (ins GPR:$src),
- IIC_VMOVIS, "vdup", "32", "$dst, $src",
- [(set QPR:$dst, (v4f32 (NEONvdup
- (f32 (bitconvert GPR:$src)))))]>;
-
-// VDUP : Vector Duplicate Lane (from scalar to all elements)
-
-class VDUPLND<bits<2> op19_18, bits<2> op17_16,
- string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 0, 0,
- (outs DPR:$dst), (ins DPR:$src, nohash_imm:$lane), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src[$lane]", "",
- [(set DPR:$dst, (Ty (NEONvduplane (Ty DPR:$src), imm:$lane)))]>;
-
-class VDUPLNQ<bits<2> op19_18, bits<2> op17_16, string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy>
- : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 1, 0,
- (outs QPR:$dst), (ins DPR:$src, nohash_imm:$lane), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src[$lane]", "",
- [(set QPR:$dst, (ResTy (NEONvduplane (OpTy DPR:$src), imm:$lane)))]>;
-
-// Inst{19-16} is partially specified depending on the element size.
-
-def VDUPLN8d : VDUPLND<{?,?}, {?,1}, "vdup", "8", v8i8>;
-def VDUPLN16d : VDUPLND<{?,?}, {1,0}, "vdup", "16", v4i16>;
-def VDUPLN32d : VDUPLND<{?,1}, {0,0}, "vdup", "32", v2i32>;
-def VDUPLNfd : VDUPLND<{?,1}, {0,0}, "vdup", "32", v2f32>;
-def VDUPLN8q : VDUPLNQ<{?,?}, {?,1}, "vdup", "8", v16i8, v8i8>;
-def VDUPLN16q : VDUPLNQ<{?,?}, {1,0}, "vdup", "16", v8i16, v4i16>;
-def VDUPLN32q : VDUPLNQ<{?,1}, {0,0}, "vdup", "32", v4i32, v2i32>;
-def VDUPLNfq : VDUPLNQ<{?,1}, {0,0}, "vdup", "32", v4f32, v2f32>;
-
-def : Pat<(v16i8 (NEONvduplane (v16i8 QPR:$src), imm:$lane)),
- (v16i8 (VDUPLN8q (v8i8 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_i8_reg imm:$lane))),
- (SubReg_i8_lane imm:$lane)))>;
-def : Pat<(v8i16 (NEONvduplane (v8i16 QPR:$src), imm:$lane)),
- (v8i16 (VDUPLN16q (v4i16 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_i16_reg imm:$lane))),
- (SubReg_i16_lane imm:$lane)))>;
-def : Pat<(v4i32 (NEONvduplane (v4i32 QPR:$src), imm:$lane)),
- (v4i32 (VDUPLN32q (v2i32 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-def : Pat<(v4f32 (NEONvduplane (v4f32 QPR:$src), imm:$lane)),
- (v4f32 (VDUPLNfq (v2f32 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_i32_reg imm:$lane))),
- (SubReg_i32_lane imm:$lane)))>;
-
-def VDUPfdf : N2V<0b11, 0b11, {?,1}, {0,0}, 0b11000, 0, 0,
- (outs DPR:$dst), (ins SPR:$src),
- IIC_VMOVD, "vdup", "32", "$dst, ${src:lane}", "",
- [(set DPR:$dst, (v2f32 (NEONvdup (f32 SPR:$src))))]>;
-
-def VDUPfqf : N2V<0b11, 0b11, {?,1}, {0,0}, 0b11000, 1, 0,
- (outs QPR:$dst), (ins SPR:$src),
- IIC_VMOVD, "vdup", "32", "$dst, ${src:lane}", "",
- [(set QPR:$dst, (v4f32 (NEONvdup (f32 SPR:$src))))]>;
-
-def : Pat<(v2i64 (NEONvduplane (v2i64 QPR:$src), imm:$lane)),
- (INSERT_SUBREG QPR:$src,
- (i64 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_f64_reg imm:$lane))),
- (DSubReg_f64_other_reg imm:$lane))>;
-def : Pat<(v2f64 (NEONvduplane (v2f64 QPR:$src), imm:$lane)),
- (INSERT_SUBREG QPR:$src,
- (f64 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_f64_reg imm:$lane))),
- (DSubReg_f64_other_reg imm:$lane))>;
-
-// VMOVN : Vector Narrowing Move
-defm VMOVN : N2VNInt_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVD,
- "vmovn", "i", int_arm_neon_vmovn>;
-// VQMOVN : Vector Saturating Narrowing Move
-defm VQMOVNs : N2VNInt_HSD<0b11,0b11,0b10,0b00101,0,0, IIC_VQUNAiD,
- "vqmovn", "s", int_arm_neon_vqmovns>;
-defm VQMOVNu : N2VNInt_HSD<0b11,0b11,0b10,0b00101,1,0, IIC_VQUNAiD,
- "vqmovn", "u", int_arm_neon_vqmovnu>;
-defm VQMOVNsu : N2VNInt_HSD<0b11,0b11,0b10,0b00100,1,0, IIC_VQUNAiD,
- "vqmovun", "s", int_arm_neon_vqmovnsu>;
-// VMOVL : Vector Lengthening Move
-defm VMOVLs : N2VLInt_QHS<0b01,0b10100,0,1, "vmovl", "s",
- int_arm_neon_vmovls>;
-defm VMOVLu : N2VLInt_QHS<0b11,0b10100,0,1, "vmovl", "u",
- int_arm_neon_vmovlu>;
-
-// Vector Conversions.
-
-// VCVT : Vector Convert Between Floating-Point and Integers
-def VCVTf2sd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
- v2i32, v2f32, fp_to_sint>;
-def VCVTf2ud : N2VD<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
- v2i32, v2f32, fp_to_uint>;
-def VCVTs2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
- v2f32, v2i32, sint_to_fp>;
-def VCVTu2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
- v2f32, v2i32, uint_to_fp>;
-
-def VCVTf2sq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
- v4i32, v4f32, fp_to_sint>;
-def VCVTf2uq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
- v4i32, v4f32, fp_to_uint>;
-def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
- v4f32, v4i32, sint_to_fp>;
-def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
- v4f32, v4i32, uint_to_fp>;
-
-// VCVT : Vector Convert Between Floating-Point and Fixed-Point.
-def VCVTf2xsd : N2VCvtD<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
- v2i32, v2f32, int_arm_neon_vcvtfp2fxs>;
-def VCVTf2xud : N2VCvtD<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
- v2i32, v2f32, int_arm_neon_vcvtfp2fxu>;
-def VCVTxs2fd : N2VCvtD<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
- v2f32, v2i32, int_arm_neon_vcvtfxs2fp>;
-def VCVTxu2fd : N2VCvtD<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
- v2f32, v2i32, int_arm_neon_vcvtfxu2fp>;
-
-def VCVTf2xsq : N2VCvtQ<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
- v4i32, v4f32, int_arm_neon_vcvtfp2fxs>;
-def VCVTf2xuq : N2VCvtQ<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
- v4i32, v4f32, int_arm_neon_vcvtfp2fxu>;
-def VCVTxs2fq : N2VCvtQ<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
- v4f32, v4i32, int_arm_neon_vcvtfxs2fp>;
-def VCVTxu2fq : N2VCvtQ<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
- v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
-
-// Vector Reverse.
-
-// VREV64 : Vector Reverse elements within 64-bit doublewords
-
-class VREV64D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 0, 0, (outs DPR:$dst),
- (ins DPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (Ty (NEONvrev64 (Ty DPR:$src))))]>;
-class VREV64Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 1, 0, (outs QPR:$dst),
- (ins QPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (Ty (NEONvrev64 (Ty QPR:$src))))]>;
-
-def VREV64d8 : VREV64D<0b00, "vrev64", "8", v8i8>;
-def VREV64d16 : VREV64D<0b01, "vrev64", "16", v4i16>;
-def VREV64d32 : VREV64D<0b10, "vrev64", "32", v2i32>;
-def VREV64df : VREV64D<0b10, "vrev64", "32", v2f32>;
-
-def VREV64q8 : VREV64Q<0b00, "vrev64", "8", v16i8>;
-def VREV64q16 : VREV64Q<0b01, "vrev64", "16", v8i16>;
-def VREV64q32 : VREV64Q<0b10, "vrev64", "32", v4i32>;
-def VREV64qf : VREV64Q<0b10, "vrev64", "32", v4f32>;
-
-// VREV32 : Vector Reverse elements within 32-bit words
-
-class VREV32D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 0, 0, (outs DPR:$dst),
- (ins DPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (Ty (NEONvrev32 (Ty DPR:$src))))]>;
-class VREV32Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 1, 0, (outs QPR:$dst),
- (ins QPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (Ty (NEONvrev32 (Ty QPR:$src))))]>;
-
-def VREV32d8 : VREV32D<0b00, "vrev32", "8", v8i8>;
-def VREV32d16 : VREV32D<0b01, "vrev32", "16", v4i16>;
-
-def VREV32q8 : VREV32Q<0b00, "vrev32", "8", v16i8>;
-def VREV32q16 : VREV32Q<0b01, "vrev32", "16", v8i16>;
-
-// VREV16 : Vector Reverse elements within 16-bit halfwords
-
-class VREV16D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 0, 0, (outs DPR:$dst),
- (ins DPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (Ty (NEONvrev16 (Ty DPR:$src))))]>;
-class VREV16Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
- : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 1, 0, (outs QPR:$dst),
- (ins QPR:$src), IIC_VMOVD,
- OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (Ty (NEONvrev16 (Ty QPR:$src))))]>;
-
-def VREV16d8 : VREV16D<0b00, "vrev16", "8", v8i8>;
-def VREV16q8 : VREV16Q<0b00, "vrev16", "8", v16i8>;
-
-// Other Vector Shuffles.
-
-// VEXT : Vector Extract
-
-class VEXTd<string OpcodeStr, string Dt, ValueType Ty>
- : N3V<0,1,0b11,{?,?,?,?},0,0, (outs DPR:$dst),
- (ins DPR:$lhs, DPR:$rhs, i32imm:$index), IIC_VEXTD,
- OpcodeStr, Dt, "$dst, $lhs, $rhs, $index", "",
- [(set DPR:$dst, (Ty (NEONvext (Ty DPR:$lhs),
- (Ty DPR:$rhs), imm:$index)))]>;
-
-class VEXTq<string OpcodeStr, string Dt, ValueType Ty>
- : N3V<0,1,0b11,{?,?,?,?},1,0, (outs QPR:$dst),
- (ins QPR:$lhs, QPR:$rhs, i32imm:$index), IIC_VEXTQ,
- OpcodeStr, Dt, "$dst, $lhs, $rhs, $index", "",
- [(set QPR:$dst, (Ty (NEONvext (Ty QPR:$lhs),
- (Ty QPR:$rhs), imm:$index)))]>;
-
-def VEXTd8 : VEXTd<"vext", "8", v8i8>;
-def VEXTd16 : VEXTd<"vext", "16", v4i16>;
-def VEXTd32 : VEXTd<"vext", "32", v2i32>;
-def VEXTdf : VEXTd<"vext", "32", v2f32>;
-
-def VEXTq8 : VEXTq<"vext", "8", v16i8>;
-def VEXTq16 : VEXTq<"vext", "16", v8i16>;
-def VEXTq32 : VEXTq<"vext", "32", v4i32>;
-def VEXTqf : VEXTq<"vext", "32", v4f32>;
-
-// VTRN : Vector Transpose
-
-def VTRNd8 : N2VDShuffle<0b00, 0b00001, "vtrn", "8">;
-def VTRNd16 : N2VDShuffle<0b01, 0b00001, "vtrn", "16">;
-def VTRNd32 : N2VDShuffle<0b10, 0b00001, "vtrn", "32">;
-
-def VTRNq8 : N2VQShuffle<0b00, 0b00001, IIC_VPERMQ, "vtrn", "8">;
-def VTRNq16 : N2VQShuffle<0b01, 0b00001, IIC_VPERMQ, "vtrn", "16">;
-def VTRNq32 : N2VQShuffle<0b10, 0b00001, IIC_VPERMQ, "vtrn", "32">;
-
-// VUZP : Vector Unzip (Deinterleave)
-
-def VUZPd8 : N2VDShuffle<0b00, 0b00010, "vuzp", "8">;
-def VUZPd16 : N2VDShuffle<0b01, 0b00010, "vuzp", "16">;
-def VUZPd32 : N2VDShuffle<0b10, 0b00010, "vuzp", "32">;
-
-def VUZPq8 : N2VQShuffle<0b00, 0b00010, IIC_VPERMQ3, "vuzp", "8">;
-def VUZPq16 : N2VQShuffle<0b01, 0b00010, IIC_VPERMQ3, "vuzp", "16">;
-def VUZPq32 : N2VQShuffle<0b10, 0b00010, IIC_VPERMQ3, "vuzp", "32">;
-
-// VZIP : Vector Zip (Interleave)
-
-def VZIPd8 : N2VDShuffle<0b00, 0b00011, "vzip", "8">;
-def VZIPd16 : N2VDShuffle<0b01, 0b00011, "vzip", "16">;
-def VZIPd32 : N2VDShuffle<0b10, 0b00011, "vzip", "32">;
-
-def VZIPq8 : N2VQShuffle<0b00, 0b00011, IIC_VPERMQ3, "vzip", "8">;
-def VZIPq16 : N2VQShuffle<0b01, 0b00011, IIC_VPERMQ3, "vzip", "16">;
-def VZIPq32 : N2VQShuffle<0b10, 0b00011, IIC_VPERMQ3, "vzip", "32">;
-
-// Vector Table Lookup and Table Extension.
-
-// VTBL : Vector Table Lookup
-def VTBL1
- : N3V<1,1,0b11,0b1000,0,0, (outs DPR:$dst),
- (ins DPR:$tbl1, DPR:$src), IIC_VTB1,
- "vtbl", "8", "$dst, \\{$tbl1\\}, $src", "",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl1 DPR:$tbl1, DPR:$src)))]>;
-let hasExtraSrcRegAllocReq = 1 in {
-def VTBL2
- : N3V<1,1,0b11,0b1001,0,0, (outs DPR:$dst),
- (ins DPR:$tbl1, DPR:$tbl2, DPR:$src), IIC_VTB2,
- "vtbl", "8", "$dst, \\{$tbl1, $tbl2\\}, $src", "",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl2
- DPR:$tbl1, DPR:$tbl2, DPR:$src)))]>;
-def VTBL3
- : N3V<1,1,0b11,0b1010,0,0, (outs DPR:$dst),
- (ins DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src), IIC_VTB3,
- "vtbl", "8", "$dst, \\{$tbl1, $tbl2, $tbl3\\}, $src", "",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl3
- DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src)))]>;
-def VTBL4
- : N3V<1,1,0b11,0b1011,0,0, (outs DPR:$dst),
- (ins DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src), IIC_VTB4,
- "vtbl", "8", "$dst, \\{$tbl1, $tbl2, $tbl3, $tbl4\\}, $src", "",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl4 DPR:$tbl1, DPR:$tbl2,
- DPR:$tbl3, DPR:$tbl4, DPR:$src)))]>;
-} // hasExtraSrcRegAllocReq = 1
-
-// VTBX : Vector Table Extension
-def VTBX1
- : N3V<1,1,0b11,0b1000,1,0, (outs DPR:$dst),
- (ins DPR:$orig, DPR:$tbl1, DPR:$src), IIC_VTBX1,
- "vtbx", "8", "$dst, \\{$tbl1\\}, $src", "$orig = $dst",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx1
- DPR:$orig, DPR:$tbl1, DPR:$src)))]>;
-let hasExtraSrcRegAllocReq = 1 in {
-def VTBX2
- : N3V<1,1,0b11,0b1001,1,0, (outs DPR:$dst),
- (ins DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$src), IIC_VTBX2,
- "vtbx", "8", "$dst, \\{$tbl1, $tbl2\\}, $src", "$orig = $dst",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx2
- DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$src)))]>;
-def VTBX3
- : N3V<1,1,0b11,0b1010,1,0, (outs DPR:$dst),
- (ins DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src), IIC_VTBX3,
- "vtbx", "8", "$dst, \\{$tbl1, $tbl2, $tbl3\\}, $src", "$orig = $dst",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx3 DPR:$orig, DPR:$tbl1,
- DPR:$tbl2, DPR:$tbl3, DPR:$src)))]>;
-def VTBX4
- : N3V<1,1,0b11,0b1011,1,0, (outs DPR:$dst), (ins DPR:$orig, DPR:$tbl1,
- DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src), IIC_VTBX4,
- "vtbx", "8", "$dst, \\{$tbl1, $tbl2, $tbl3, $tbl4\\}, $src",
- "$orig = $dst",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx4 DPR:$orig, DPR:$tbl1,
- DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src)))]>;
-} // hasExtraSrcRegAllocReq = 1
-
-//===----------------------------------------------------------------------===//
-// NEON instructions for single-precision FP math
-//===----------------------------------------------------------------------===//
-
-class N2VSPat<SDNode OpNode, ValueType ResTy, ValueType OpTy, NeonI Inst>
- : NEONFPPat<(ResTy (OpNode SPR:$a)),
- (EXTRACT_SUBREG (Inst (INSERT_SUBREG (OpTy (IMPLICIT_DEF)),
- SPR:$a, arm_ssubreg_0)),
- arm_ssubreg_0)>;
-
-class N3VSPat<SDNode OpNode, NeonI Inst>
- : NEONFPPat<(f32 (OpNode SPR:$a, SPR:$b)),
- (EXTRACT_SUBREG (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$a, arm_ssubreg_0),
- (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$b, arm_ssubreg_0)),
- arm_ssubreg_0)>;
-
-class N3VSMulOpPat<SDNode MulNode, SDNode OpNode, NeonI Inst>
- : NEONFPPat<(f32 (OpNode SPR:$acc, (f32 (MulNode SPR:$a, SPR:$b)))),
- (EXTRACT_SUBREG (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$acc, arm_ssubreg_0),
- (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$a, arm_ssubreg_0),
- (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
- SPR:$b, arm_ssubreg_0)),
- arm_ssubreg_0)>;
-
-// These need separate instructions because they must use DPR_VFP2 register
-// class which have SPR sub-registers.
-
-// Vector Add Operations used for single-precision FP
-let neverHasSideEffects = 1 in
-def VADDfd_sfp : N3VS<0,0,0b00,0b1101,0, "vadd", "f32", v2f32, v2f32, fadd, 1>;
-def : N3VSPat<fadd, VADDfd_sfp>;
-
-// Vector Sub Operations used for single-precision FP
-let neverHasSideEffects = 1 in
-def VSUBfd_sfp : N3VS<0,0,0b10,0b1101,0, "vsub", "f32", v2f32, v2f32, fsub, 0>;
-def : N3VSPat<fsub, VSUBfd_sfp>;
-
-// Vector Multiply Operations used for single-precision FP
-let neverHasSideEffects = 1 in
-def VMULfd_sfp : N3VS<1,0,0b00,0b1101,1, "vmul", "f32", v2f32, v2f32, fmul, 1>;
-def : N3VSPat<fmul, VMULfd_sfp>;
-
-// Vector Multiply-Accumulate/Subtract used for single-precision FP
-// vml[as].f32 can cause 4-8 cycle stalls in following ASIMD instructions, so
-// we want to avoid them for now. e.g., alternating vmla/vadd instructions.
-
-//let neverHasSideEffects = 1 in
-//def VMLAfd_sfp : N3VSMulOp<0,0,0b00,0b1101,1, IIC_VMACD, "vmla", "f32",
-// v2f32, fmul, fadd>;
-//def : N3VSMulOpPat<fmul, fadd, VMLAfd_sfp>;
-
-//let neverHasSideEffects = 1 in
-//def VMLSfd_sfp : N3VSMulOp<0,0,0b10,0b1101,1, IIC_VMACD, "vmls", "f32",
-// v2f32, fmul, fsub>;
-//def : N3VSMulOpPat<fmul, fsub, VMLSfd_sfp>;
-
-// Vector Absolute used for single-precision FP
-let neverHasSideEffects = 1 in
-def VABSfd_sfp : N2V<0b11, 0b11, 0b10, 0b01, 0b01110, 0, 0,
- (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src), IIC_VUNAD,
- "vabs", "f32", "$dst, $src", "", []>;
-def : N2VSPat<fabs, f32, v2f32, VABSfd_sfp>;
-
-// Vector Negate used for single-precision FP
-let neverHasSideEffects = 1 in
-def VNEGfd_sfp : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
- (outs DPR_VFP2:$dst), (ins DPR_VFP2:$src), IIC_VUNAD,
- "vneg", "f32", "$dst, $src", "", []>;
-def : N2VSPat<fneg, f32, v2f32, VNEGfd_sfp>;
-
-// Vector Maximum used for single-precision FP
-let neverHasSideEffects = 1 in
-def VMAXfd_sfp : N3V<0, 0, 0b00, 0b1111, 0, 0, (outs DPR_VFP2:$dst),
- (ins DPR_VFP2:$src1, DPR_VFP2:$src2), IIC_VBIND,
- "vmax", "f32", "$dst, $src1, $src2", "", []>;
-def : N3VSPat<NEONfmax, VMAXfd_sfp>;
-
-// Vector Minimum used for single-precision FP
-let neverHasSideEffects = 1 in
-def VMINfd_sfp : N3V<0, 0, 0b00, 0b1111, 0, 0, (outs DPR_VFP2:$dst),
- (ins DPR_VFP2:$src1, DPR_VFP2:$src2), IIC_VBIND,
- "vmin", "f32", "$dst, $src1, $src2", "", []>;
-def : N3VSPat<NEONfmin, VMINfd_sfp>;
-
-// Vector Convert between single-precision FP and integer
-let neverHasSideEffects = 1 in
-def VCVTf2sd_sfp : N2VS<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
- v2i32, v2f32, fp_to_sint>;
-def : N2VSPat<arm_ftosi, f32, v2f32, VCVTf2sd_sfp>;
-
-let neverHasSideEffects = 1 in
-def VCVTf2ud_sfp : N2VS<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
- v2i32, v2f32, fp_to_uint>;
-def : N2VSPat<arm_ftoui, f32, v2f32, VCVTf2ud_sfp>;
-
-let neverHasSideEffects = 1 in
-def VCVTs2fd_sfp : N2VS<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
- v2f32, v2i32, sint_to_fp>;
-def : N2VSPat<arm_sitof, f32, v2i32, VCVTs2fd_sfp>;
-
-let neverHasSideEffects = 1 in
-def VCVTu2fd_sfp : N2VS<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
- v2f32, v2i32, uint_to_fp>;
-def : N2VSPat<arm_uitof, f32, v2i32, VCVTu2fd_sfp>;
-
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//===----------------------------------------------------------------------===//
-
-// bit_convert
-def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (v1i64 DPR:$src)>;
-def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (v1i64 DPR:$src)>;
-def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (v1i64 DPR:$src)>;
-def : Pat<(v1i64 (bitconvert (f64 DPR:$src))), (v1i64 DPR:$src)>;
-def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (v1i64 DPR:$src)>;
-def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (v2i32 DPR:$src)>;
-def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (v2i32 DPR:$src)>;
-def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (v2i32 DPR:$src)>;
-def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (v2i32 DPR:$src)>;
-def : Pat<(v2i32 (bitconvert (v2f32 DPR:$src))), (v2i32 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (v4i16 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (v8i8 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (v8i8 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (v8i8 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (v8i8 DPR:$src)>;
-def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (v8i8 DPR:$src)>;
-def : Pat<(f64 (bitconvert (v1i64 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (f64 DPR:$src)>;
-def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (v2f32 DPR:$src)>;
-def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (v2f32 DPR:$src)>;
-def : Pat<(v2f32 (bitconvert (v2i32 DPR:$src))), (v2f32 DPR:$src)>;
-def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (v2f32 DPR:$src)>;
-def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (v2f32 DPR:$src)>;
-
-def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (v2i64 QPR:$src)>;
-def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (v2i64 QPR:$src)>;
-def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (v2i64 QPR:$src)>;
-def : Pat<(v2i64 (bitconvert (v2f64 QPR:$src))), (v2i64 QPR:$src)>;
-def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (v2i64 QPR:$src)>;
-def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (v4i32 QPR:$src)>;
-def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (v4i32 QPR:$src)>;
-def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (v4i32 QPR:$src)>;
-def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>;
-def : Pat<(v4i32 (bitconvert (v4f32 QPR:$src))), (v4i32 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (v8i16 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (v16i8 QPR:$src)>;
-def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (v4f32 QPR:$src)>;
-def : Pat<(v4f32 (bitconvert (v4i32 QPR:$src))), (v4f32 QPR:$src)>;
-def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (v4f32 QPR:$src)>;
-def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (v4f32 QPR:$src)>;
-def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (v4f32 QPR:$src)>;
-def : Pat<(v2f64 (bitconvert (v2i64 QPR:$src))), (v2f64 QPR:$src)>;
-def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
-def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
-def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
-def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrThumb.td b/libclamav/c++/llvm/lib/Target/ARM/ARMInstrThumb.td
deleted file mode 100644
index 786dd65..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrThumb.td
+++ /dev/null
@@ -1,1007 +0,0 @@
-//===- ARMInstrThumb.td - Thumb support for ARM ---------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the Thumb instruction set.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Thumb specific DAG Nodes.
-//
-
-def ARMtcall : SDNode<"ARMISD::tCALL", SDT_ARMcall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
-
-def imm_neg_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
-}]>;
-def imm_comp_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
-}]>;
-
-
-/// imm0_7 predicate - True if the 32-bit immediate is in the range [0,7].
-def imm0_7 : PatLeaf<(i32 imm), [{
- return (uint32_t)N->getZExtValue() < 8;
-}]>;
-def imm0_7_neg : PatLeaf<(i32 imm), [{
- return (uint32_t)-N->getZExtValue() < 8;
-}], imm_neg_XFORM>;
-
-def imm0_255 : PatLeaf<(i32 imm), [{
- return (uint32_t)N->getZExtValue() < 256;
-}]>;
-def imm0_255_comp : PatLeaf<(i32 imm), [{
- return ~((uint32_t)N->getZExtValue()) < 256;
-}]>;
-
-def imm8_255 : PatLeaf<(i32 imm), [{
- return (uint32_t)N->getZExtValue() >= 8 && (uint32_t)N->getZExtValue() < 256;
-}]>;
-def imm8_255_neg : PatLeaf<(i32 imm), [{
- unsigned Val = -N->getZExtValue();
- return Val >= 8 && Val < 256;
-}], imm_neg_XFORM>;
-
-// Break imm's up into two pieces: an immediate + a left shift.
-// This uses thumb_immshifted to match and thumb_immshifted_val and
-// thumb_immshifted_shamt to get the val/shift pieces.
-def thumb_immshifted : PatLeaf<(imm), [{
- return ARM_AM::isThumbImmShiftedVal((unsigned)N->getZExtValue());
-}]>;
-
-def thumb_immshifted_val : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getThumbImmNonShiftedVal((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-def thumb_immshifted_shamt : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getThumbImmValShift((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-// Scaled 4 immediate.
-def t_imm_s4 : Operand<i32> {
- let PrintMethod = "printThumbS4ImmOperand";
-}
-
-// Define Thumb specific addressing modes.
-
-// t_addrmode_rr := reg + reg
-//
-def t_addrmode_rr : Operand<i32>,
- ComplexPattern<i32, 2, "SelectThumbAddrModeRR", []> {
- let PrintMethod = "printThumbAddrModeRROperand";
- let MIOperandInfo = (ops tGPR:$base, tGPR:$offsreg);
-}
-
-// t_addrmode_s4 := reg + reg
-// reg + imm5 * 4
-//
-def t_addrmode_s4 : Operand<i32>,
- ComplexPattern<i32, 3, "SelectThumbAddrModeS4", []> {
- let PrintMethod = "printThumbAddrModeS4Operand";
- let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm, tGPR:$offsreg);
-}
-
-// t_addrmode_s2 := reg + reg
-// reg + imm5 * 2
-//
-def t_addrmode_s2 : Operand<i32>,
- ComplexPattern<i32, 3, "SelectThumbAddrModeS2", []> {
- let PrintMethod = "printThumbAddrModeS2Operand";
- let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm, tGPR:$offsreg);
-}
-
-// t_addrmode_s1 := reg + reg
-// reg + imm5
-//
-def t_addrmode_s1 : Operand<i32>,
- ComplexPattern<i32, 3, "SelectThumbAddrModeS1", []> {
- let PrintMethod = "printThumbAddrModeS1Operand";
- let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm, tGPR:$offsreg);
-}
-
-// t_addrmode_sp := sp + imm8 * 4
-//
-def t_addrmode_sp : Operand<i32>,
- ComplexPattern<i32, 2, "SelectThumbAddrModeSP", []> {
- let PrintMethod = "printThumbAddrModeSPOperand";
- let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
-}
-
-//===----------------------------------------------------------------------===//
-// Miscellaneous Instructions.
-//
-
-// FIXME: Marking these as hasSideEffects is necessary to prevent machine DCE
-// from removing one half of the matched pairs. That breaks PEI, which assumes
-// these will always be in pairs, and asserts if it finds otherwise. Better way?
-let Defs = [SP], Uses = [SP], hasSideEffects = 1 in {
-def tADJCALLSTACKUP :
-PseudoInst<(outs), (ins i32imm:$amt1, i32imm:$amt2), NoItinerary,
- "@ tADJCALLSTACKUP $amt1",
- [(ARMcallseq_end imm:$amt1, imm:$amt2)]>, Requires<[IsThumb1Only]>;
-
-def tADJCALLSTACKDOWN :
-PseudoInst<(outs), (ins i32imm:$amt), NoItinerary,
- "@ tADJCALLSTACKDOWN $amt",
- [(ARMcallseq_start imm:$amt)]>, Requires<[IsThumb1Only]>;
-}
-
-def tNOP : T1pI<(outs), (ins), NoItinerary, "nop", "",
- [/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b00000000;
-}
-
-def tYIELD : T1pI<(outs), (ins), NoItinerary, "yield", "",
- [/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b00010000;
-}
-
-def tWFE : T1pI<(outs), (ins), NoItinerary, "wfe", "",
- [/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b00100000;
-}
-
-def tWFI : T1pI<(outs), (ins), NoItinerary, "wfi", "",
- [/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b00110000;
-}
-
-def tSEV : T1pI<(outs), (ins), NoItinerary, "sev", "",
- [/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b11;
- let Inst{7-0} = 0b01000000;
-}
-
-def tSETENDBE : T1I<(outs), (ins), NoItinerary, "setend\tbe",
- [/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101101> {
- let Inst{9-5} = 0b10010;
- let Inst{3} = 1;
-}
-
-def tSETENDLE : T1I<(outs), (ins), NoItinerary, "setend\tle",
- [/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101101> {
- let Inst{9-5} = 0b10010;
- let Inst{3} = 0;
-}
-
-// The i32imm operand $val can be used by a debugger to store more information
-// about the breakpoint.
-def tBKPT : T1I<(outs), (ins i32imm:$val), NoItinerary, "bkpt\t$val",
- [/* For disassembly only; pattern left blank */]>,
- T1Encoding<0b101111> {
- let Inst{9-8} = 0b10;
-}
-
-// Change Processor State is a system instruction -- for disassembly only.
-// The singleton $opt operand contains the following information:
-// opt{4-0} = mode ==> don't care
-// opt{5} = changemode ==> 0 (false for 16-bit Thumb instr)
-// opt{8-6} = AIF from Inst{2-0}
-// opt{10-9} = 1:imod from Inst{4} with 0b10 as enable and 0b11 as disable
-//
-// The opt{4-0} and opt{5} sub-fields are to accommodate 32-bit Thumb and ARM
-// CPS which has more options.
-def tCPS : T1I<(outs), (ins i32imm:$opt), NoItinerary, "cps${opt:cps}",
- [/* For disassembly only; pattern left blank */]>,
- T1Misc<0b0110011>;
-
-// For both thumb1 and thumb2.
-let isNotDuplicable = 1 in
-def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp), IIC_iALUr,
- "\n$cp:\n\tadd\t$dst, pc",
- [(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>,
- T1Special<{0,0,?,?}> {
- let Inst{6-3} = 0b1111; // A8.6.6 Rm = pc
-}
-
-// PC relative add.
-def tADDrPCi : T1I<(outs tGPR:$dst), (ins t_imm_s4:$rhs), IIC_iALUi,
- "add\t$dst, pc, $rhs", []>,
- T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
-
-// ADD rd, sp, #imm8
-def tADDrSPi : T1I<(outs tGPR:$dst), (ins GPR:$sp, t_imm_s4:$rhs), IIC_iALUi,
- "add\t$dst, $sp, $rhs", []>,
- T1Encoding<{1,0,1,0,1,?}>; // A6.2 & A8.6.8
-
-// ADD sp, sp, #imm7
-def tADDspi : TIt<(outs GPR:$dst), (ins GPR:$lhs, t_imm_s4:$rhs), IIC_iALUi,
- "add\t$dst, $rhs", []>,
- T1Misc<{0,0,0,0,0,?,?}>; // A6.2.5 & A8.6.8
-
-// SUB sp, sp, #imm7
-def tSUBspi : TIt<(outs GPR:$dst), (ins GPR:$lhs, t_imm_s4:$rhs), IIC_iALUi,
- "sub\t$dst, $rhs", []>,
- T1Misc<{0,0,0,0,1,?,?}>; // A6.2.5 & A8.6.215
-
-// ADD rm, sp
-def tADDrSP : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
- "add\t$dst, $rhs", []>,
- T1Special<{0,0,?,?}> {
- let Inst{6-3} = 0b1101; // A8.6.9 Encoding T1
-}
-
-// ADD sp, rm
-def tADDspr : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
- "add\t$dst, $rhs", []>,
- T1Special<{0,0,?,?}> {
- // A8.6.9 Encoding T2
- let Inst{7} = 1;
- let Inst{2-0} = 0b101;
-}
-
-// Pseudo instruction that will expand into a tSUBspi + a copy.
-let usesCustomInserter = 1 in { // Expanded after instruction selection.
-def tSUBspi_ : PseudoInst<(outs GPR:$dst), (ins GPR:$lhs, t_imm_s4:$rhs),
- NoItinerary, "@ sub\t$dst, $rhs", []>;
-
-def tADDspr_ : PseudoInst<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
- NoItinerary, "@ add\t$dst, $rhs", []>;
-
-let Defs = [CPSR] in
-def tANDsp : PseudoInst<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
- NoItinerary, "@ and\t$dst, $rhs", []>;
-} // usesCustomInserter
-
-//===----------------------------------------------------------------------===//
-// Control Flow Instructions.
-//
-
-let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
- def tBX_RET : TI<(outs), (ins), IIC_Br, "bx\tlr", [(ARMretflag)]>,
- T1Special<{1,1,0,?}> { // A6.2.3 & A8.6.25
- let Inst{6-3} = 0b1110; // Rm = lr
- }
- // Alternative return instruction used by vararg functions.
- def tBX_RET_vararg : TI<(outs), (ins tGPR:$target), IIC_Br, "bx\t$target",[]>,
- T1Special<{1,1,0,?}>; // A6.2.3 & A8.6.25
-}
-
-// Indirect branches
-let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
- def tBRIND : TI<(outs), (ins GPR:$dst), IIC_Br, "mov\tpc, $dst",
- [(brind GPR:$dst)]>,
- T1Special<{1,0,1,?}> {
- // <Rd> = Inst{7:2-0} = pc
- let Inst{2-0} = 0b111;
- }
-}
-
-// FIXME: remove when we have a way to marking a MI with these properties.
-let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
- hasExtraDefRegAllocReq = 1 in
-def tPOP_RET : T1I<(outs), (ins pred:$p, reglist:$wb, variable_ops), IIC_Br,
- "pop${p}\t$wb", []>,
- T1Misc<{1,1,0,?,?,?,?}>;
-
-let isCall = 1,
- Defs = [R0, R1, R2, R3, R12, LR,
- D0, D1, D2, D3, D4, D5, D6, D7,
- D16, D17, D18, D19, D20, D21, D22, D23,
- D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in {
- // Also used for Thumb2
- def tBL : TIx2<0b11110, 0b11, 1,
- (outs), (ins i32imm:$func, variable_ops), IIC_Br,
- "bl\t${func:call}",
- [(ARMtcall tglobaladdr:$func)]>,
- Requires<[IsThumb, IsNotDarwin]>;
-
- // ARMv5T and above, also used for Thumb2
- def tBLXi : TIx2<0b11110, 0b11, 0,
- (outs), (ins i32imm:$func, variable_ops), IIC_Br,
- "blx\t${func:call}",
- [(ARMcall tglobaladdr:$func)]>,
- Requires<[IsThumb, HasV5T, IsNotDarwin]>;
-
- // Also used for Thumb2
- def tBLXr : TI<(outs), (ins GPR:$func, variable_ops), IIC_Br,
- "blx\t$func",
- [(ARMtcall GPR:$func)]>,
- Requires<[IsThumb, HasV5T, IsNotDarwin]>,
- T1Special<{1,1,1,?}>; // A6.2.3 & A8.6.24;
-
- // ARMv4T
- def tBX : TIx2<{?,?,?,?,?}, {?,?}, ?,
- (outs), (ins tGPR:$func, variable_ops), IIC_Br,
- "mov\tlr, pc\n\tbx\t$func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsThumb1Only, IsNotDarwin]>;
-}
-
-// On Darwin R9 is call-clobbered.
-let isCall = 1,
- Defs = [R0, R1, R2, R3, R9, R12, LR,
- D0, D1, D2, D3, D4, D5, D6, D7,
- D16, D17, D18, D19, D20, D21, D22, D23,
- D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in {
- // Also used for Thumb2
- def tBLr9 : TIx2<0b11110, 0b11, 1,
- (outs), (ins i32imm:$func, variable_ops), IIC_Br,
- "bl\t${func:call}",
- [(ARMtcall tglobaladdr:$func)]>,
- Requires<[IsThumb, IsDarwin]>;
-
- // ARMv5T and above, also used for Thumb2
- def tBLXi_r9 : TIx2<0b11110, 0b11, 0,
- (outs), (ins i32imm:$func, variable_ops), IIC_Br,
- "blx\t${func:call}",
- [(ARMcall tglobaladdr:$func)]>,
- Requires<[IsThumb, HasV5T, IsDarwin]>;
-
- // Also used for Thumb2
- def tBLXr_r9 : TI<(outs), (ins GPR:$func, variable_ops), IIC_Br,
- "blx\t$func",
- [(ARMtcall GPR:$func)]>,
- Requires<[IsThumb, HasV5T, IsDarwin]>,
- T1Special<{1,1,1,?}>; // A6.2.3 & A8.6.24
-
- // ARMv4T
- def tBXr9 : TIx2<{?,?,?,?,?}, {?,?}, ?,
- (outs), (ins tGPR:$func, variable_ops), IIC_Br,
- "mov\tlr, pc\n\tbx\t$func",
- [(ARMcall_nolink tGPR:$func)]>,
- Requires<[IsThumb1Only, IsDarwin]>;
-}
-
-let isBranch = 1, isTerminator = 1 in {
- let isBarrier = 1 in {
- let isPredicable = 1 in
- def tB : T1I<(outs), (ins brtarget:$target), IIC_Br,
- "b\t$target", [(br bb:$target)]>,
- T1Encoding<{1,1,1,0,0,?}>;
-
- // Far jump
- let Defs = [LR] in
- def tBfar : TIx2<0b11110, 0b11, 1, (outs), (ins brtarget:$target), IIC_Br,
- "bl\t$target\t@ far jump",[]>;
-
- def tBR_JTr : T1JTI<(outs),
- (ins tGPR:$target, jtblock_operand:$jt, i32imm:$id),
- IIC_Br, "mov\tpc, $target\n\t.align\t2\n$jt",
- [(ARMbrjt tGPR:$target, tjumptable:$jt, imm:$id)]>,
- Encoding16 {
- let Inst{15-7} = 0b010001101;
- let Inst{2-0} = 0b111;
- }
- }
-}
-
-// FIXME: should be able to write a pattern for ARMBrcond, but can't use
-// a two-value operand where a dag node expects two operands. :(
-let isBranch = 1, isTerminator = 1 in
- def tBcc : T1I<(outs), (ins brtarget:$target, pred:$cc), IIC_Br,
- "b$cc\t$target",
- [/*(ARMbrcond bb:$target, imm:$cc)*/]>,
- T1Encoding<{1,1,0,1,?,?}>;
-
-// Compare and branch on zero / non-zero
-let isBranch = 1, isTerminator = 1 in {
- def tCBZ : T1I<(outs), (ins tGPR:$cmp, brtarget:$target), IIC_Br,
- "cbz\t$cmp, $target", []>,
- T1Misc<{0,0,?,1,?,?,?}>;
-
- def tCBNZ : T1I<(outs), (ins tGPR:$cmp, brtarget:$target), IIC_Br,
- "cbnz\t$cmp, $target", []>,
- T1Misc<{1,0,?,1,?,?,?}>;
-}
-
-// A8.6.218 Supervisor Call (Software Interrupt) -- for disassembly only
-// A8.6.16 B: Encoding T1
-// If Inst{11-8} == 0b1111 then SEE SVC
-let isCall = 1 in {
-def tSVC : T1pI<(outs), (ins i32imm:$svc), IIC_Br, "svc", "\t$svc", []>,
- Encoding16 {
- let Inst{15-12} = 0b1101;
- let Inst{11-8} = 0b1111;
-}
-}
-
-// A8.6.16 B: Encoding T1 -- for disassembly only
-// If Inst{11-8} == 0b1110 then UNDEFINED
-def tTRAP : T1I<(outs), (ins), IIC_Br, "trap", []>, Encoding16 {
- let Inst{15-12} = 0b1101;
- let Inst{11-8} = 0b1110;
-}
-
-//===----------------------------------------------------------------------===//
-// Load Store Instructions.
-//
-
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def tLDR : T1pI4<(outs tGPR:$dst), (ins t_addrmode_s4:$addr), IIC_iLoadr,
- "ldr", "\t$dst, $addr",
- [(set tGPR:$dst, (load t_addrmode_s4:$addr))]>,
- T1LdSt<0b100>;
-def tLDRi: T1pI4<(outs tGPR:$dst), (ins t_addrmode_s4:$addr), IIC_iLoadr,
- "ldr", "\t$dst, $addr",
- []>,
- T1LdSt4Imm<{1,?,?}>;
-
-def tLDRB : T1pI1<(outs tGPR:$dst), (ins t_addrmode_s1:$addr), IIC_iLoadr,
- "ldrb", "\t$dst, $addr",
- [(set tGPR:$dst, (zextloadi8 t_addrmode_s1:$addr))]>,
- T1LdSt<0b110>;
-def tLDRBi: T1pI1<(outs tGPR:$dst), (ins t_addrmode_s1:$addr), IIC_iLoadr,
- "ldrb", "\t$dst, $addr",
- []>,
- T1LdSt1Imm<{1,?,?}>;
-
-def tLDRH : T1pI2<(outs tGPR:$dst), (ins t_addrmode_s2:$addr), IIC_iLoadr,
- "ldrh", "\t$dst, $addr",
- [(set tGPR:$dst, (zextloadi16 t_addrmode_s2:$addr))]>,
- T1LdSt<0b101>;
-def tLDRHi: T1pI2<(outs tGPR:$dst), (ins t_addrmode_s2:$addr), IIC_iLoadr,
- "ldrh", "\t$dst, $addr",
- []>,
- T1LdSt2Imm<{1,?,?}>;
-
-let AddedComplexity = 10 in
-def tLDRSB : T1pI1<(outs tGPR:$dst), (ins t_addrmode_rr:$addr), IIC_iLoadr,
- "ldrsb", "\t$dst, $addr",
- [(set tGPR:$dst, (sextloadi8 t_addrmode_rr:$addr))]>,
- T1LdSt<0b011>;
-
-let AddedComplexity = 10 in
-def tLDRSH : T1pI2<(outs tGPR:$dst), (ins t_addrmode_rr:$addr), IIC_iLoadr,
- "ldrsh", "\t$dst, $addr",
- [(set tGPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>,
- T1LdSt<0b111>;
-
-let canFoldAsLoad = 1 in
-def tLDRspi : T1pIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr), IIC_iLoadi,
- "ldr", "\t$dst, $addr",
- [(set tGPR:$dst, (load t_addrmode_sp:$addr))]>,
- T1LdStSP<{1,?,?}>;
-
-// Special instruction for restore. It cannot clobber condition register
-// when it's expanded by eliminateCallFramePseudoInstr().
-let canFoldAsLoad = 1, mayLoad = 1 in
-def tRestore : T1pIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr), IIC_iLoadi,
- "ldr", "\t$dst, $addr", []>,
- T1LdStSP<{1,?,?}>;
-
-// Load tconstpool
-// FIXME: Use ldr.n to work around a Darwin assembler bug.
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def tLDRpci : T1pIs<(outs tGPR:$dst), (ins i32imm:$addr), IIC_iLoadi,
- "ldr", ".n\t$dst, $addr",
- [(set tGPR:$dst, (load (ARMWrapper tconstpool:$addr)))]>,
- T1Encoding<{0,1,0,0,1,?}>; // A6.2 & A8.6.59
-
-// Special LDR for loads from non-pc-relative constpools.
-let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1 in
-def tLDRcp : T1pIs<(outs tGPR:$dst), (ins i32imm:$addr), IIC_iLoadi,
- "ldr", "\t$dst, $addr", []>,
- T1LdStSP<{1,?,?}>;
-
-def tSTR : T1pI4<(outs), (ins tGPR:$src, t_addrmode_s4:$addr), IIC_iStorer,
- "str", "\t$src, $addr",
- [(store tGPR:$src, t_addrmode_s4:$addr)]>,
- T1LdSt<0b000>;
-def tSTRi: T1pI4<(outs), (ins tGPR:$src, t_addrmode_s4:$addr), IIC_iStorer,
- "str", "\t$src, $addr",
- []>,
- T1LdSt4Imm<{0,?,?}>;
-
-def tSTRB : T1pI1<(outs), (ins tGPR:$src, t_addrmode_s1:$addr), IIC_iStorer,
- "strb", "\t$src, $addr",
- [(truncstorei8 tGPR:$src, t_addrmode_s1:$addr)]>,
- T1LdSt<0b010>;
-def tSTRBi: T1pI1<(outs), (ins tGPR:$src, t_addrmode_s1:$addr), IIC_iStorer,
- "strb", "\t$src, $addr",
- []>,
- T1LdSt1Imm<{0,?,?}>;
-
-def tSTRH : T1pI2<(outs), (ins tGPR:$src, t_addrmode_s2:$addr), IIC_iStorer,
- "strh", "\t$src, $addr",
- [(truncstorei16 tGPR:$src, t_addrmode_s2:$addr)]>,
- T1LdSt<0b001>;
-def tSTRHi: T1pI2<(outs), (ins tGPR:$src, t_addrmode_s2:$addr), IIC_iStorer,
- "strh", "\t$src, $addr",
- []>,
- T1LdSt2Imm<{0,?,?}>;
-
-def tSTRspi : T1pIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr), IIC_iStorei,
- "str", "\t$src, $addr",
- [(store tGPR:$src, t_addrmode_sp:$addr)]>,
- T1LdStSP<{0,?,?}>;
-
-let mayStore = 1 in {
-// Special instruction for spill. It cannot clobber condition register
-// when it's expanded by eliminateCallFramePseudoInstr().
-def tSpill : T1pIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr), IIC_iStorei,
- "str", "\t$src, $addr", []>,
- T1LdStSP<{0,?,?}>;
-}
-
-//===----------------------------------------------------------------------===//
-// Load / store multiple Instructions.
-//
-
-// These requires base address to be written back or one of the loaded regs.
-let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
-def tLDM : T1I<(outs),
- (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops),
- IIC_iLoadm,
- "ldm${addr:submode}${p}\t$addr, $wb", []>,
- T1Encoding<{1,1,0,0,1,?}>; // A6.2 & A8.6.53
-
-let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
-def tSTM : T1I<(outs),
- (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops),
- IIC_iStorem,
- "stm${addr:submode}${p}\t$addr, $wb", []>,
- T1Encoding<{1,1,0,0,0,?}>; // A6.2 & A8.6.189
-
-let mayLoad = 1, Uses = [SP], Defs = [SP], hasExtraDefRegAllocReq = 1 in
-def tPOP : T1I<(outs), (ins pred:$p, reglist:$wb, variable_ops), IIC_Br,
- "pop${p}\t$wb", []>,
- T1Misc<{1,1,0,?,?,?,?}>;
-
-let mayStore = 1, Uses = [SP], Defs = [SP], hasExtraSrcRegAllocReq = 1 in
-def tPUSH : T1I<(outs), (ins pred:$p, reglist:$wb, variable_ops), IIC_Br,
- "push${p}\t$wb", []>,
- T1Misc<{0,1,0,?,?,?,?}>;
-
-//===----------------------------------------------------------------------===//
-// Arithmetic Instructions.
-//
-
-// Add with carry register
-let isCommutable = 1, Uses = [CPSR] in
-def tADC : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "adc", "\t$dst, $rhs",
- [(set tGPR:$dst, (adde tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0101>;
-
-// Add immediate
-def tADDi3 : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi,
- "add", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, imm0_7:$rhs))]>,
- T1General<0b01110>;
-
-def tADDi8 : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi,
- "add", "\t$dst, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, imm8_255:$rhs))]>,
- T1General<{1,1,0,?,?}>;
-
-// Add register
-let isCommutable = 1 in
-def tADDrr : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "add", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, tGPR:$rhs))]>,
- T1General<0b01100>;
-
-let neverHasSideEffects = 1 in
-def tADDhirr : T1pIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
- "add", "\t$dst, $rhs", []>,
- T1Special<{0,0,?,?}>;
-
-// And register
-let isCommutable = 1 in
-def tAND : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "and", "\t$dst, $rhs",
- [(set tGPR:$dst, (and tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0000>;
-
-// ASR immediate
-def tASRri : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iMOVsi,
- "asr", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (sra tGPR:$lhs, (i32 imm:$rhs)))]>,
- T1General<{0,1,0,?,?}>;
-
-// ASR register
-def tASRrr : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr,
- "asr", "\t$dst, $rhs",
- [(set tGPR:$dst, (sra tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0100>;
-
-// BIC register
-def tBIC : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "bic", "\t$dst, $rhs",
- [(set tGPR:$dst, (and tGPR:$lhs, (not tGPR:$rhs)))]>,
- T1DataProcessing<0b1110>;
-
-// CMN register
-let Defs = [CPSR] in {
-//FIXME: Disable CMN, as CCodes are backwards from compare expectations
-// Compare-to-zero still works out, just not the relationals
-//def tCMN : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
-// "cmn", "\t$lhs, $rhs",
-// [(ARMcmp tGPR:$lhs, (ineg tGPR:$rhs))]>,
-// T1DataProcessing<0b1011>;
-def tCMNz : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
- "cmn", "\t$lhs, $rhs",
- [(ARMcmpZ tGPR:$lhs, (ineg tGPR:$rhs))]>,
- T1DataProcessing<0b1011>;
-}
-
-// CMP immediate
-let Defs = [CPSR] in {
-def tCMPi8 : T1pI<(outs), (ins tGPR:$lhs, i32imm:$rhs), IIC_iCMPi,
- "cmp", "\t$lhs, $rhs",
- [(ARMcmp tGPR:$lhs, imm0_255:$rhs)]>,
- T1General<{1,0,1,?,?}>;
-def tCMPzi8 : T1pI<(outs), (ins tGPR:$lhs, i32imm:$rhs), IIC_iCMPi,
- "cmp", "\t$lhs, $rhs",
- [(ARMcmpZ tGPR:$lhs, imm0_255:$rhs)]>,
- T1General<{1,0,1,?,?}>;
-}
-
-// CMP register
-let Defs = [CPSR] in {
-def tCMPr : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
- "cmp", "\t$lhs, $rhs",
- [(ARMcmp tGPR:$lhs, tGPR:$rhs)]>,
- T1DataProcessing<0b1010>;
-def tCMPzr : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
- "cmp", "\t$lhs, $rhs",
- [(ARMcmpZ tGPR:$lhs, tGPR:$rhs)]>,
- T1DataProcessing<0b1010>;
-
-def tCMPhir : T1pI<(outs), (ins GPR:$lhs, GPR:$rhs), IIC_iCMPr,
- "cmp", "\t$lhs, $rhs", []>,
- T1Special<{0,1,?,?}>;
-def tCMPzhir : T1pI<(outs), (ins GPR:$lhs, GPR:$rhs), IIC_iCMPr,
- "cmp", "\t$lhs, $rhs", []>,
- T1Special<{0,1,?,?}>;
-}
-
-
-// XOR register
-let isCommutable = 1 in
-def tEOR : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "eor", "\t$dst, $rhs",
- [(set tGPR:$dst, (xor tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0001>;
-
-// LSL immediate
-def tLSLri : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iMOVsi,
- "lsl", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (shl tGPR:$lhs, (i32 imm:$rhs)))]>,
- T1General<{0,0,0,?,?}>;
-
-// LSL register
-def tLSLrr : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr,
- "lsl", "\t$dst, $rhs",
- [(set tGPR:$dst, (shl tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0010>;
-
-// LSR immediate
-def tLSRri : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iMOVsi,
- "lsr", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (srl tGPR:$lhs, (i32 imm:$rhs)))]>,
- T1General<{0,0,1,?,?}>;
-
-// LSR register
-def tLSRrr : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr,
- "lsr", "\t$dst, $rhs",
- [(set tGPR:$dst, (srl tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0011>;
-
-// move register
-def tMOVi8 : T1sI<(outs tGPR:$dst), (ins i32imm:$src), IIC_iMOVi,
- "mov", "\t$dst, $src",
- [(set tGPR:$dst, imm0_255:$src)]>,
- T1General<{1,0,0,?,?}>;
-
-// TODO: A7-73: MOV(2) - mov setting flag.
-
-
-let neverHasSideEffects = 1 in {
-// FIXME: Make this predicable.
-def tMOVr : T1I<(outs tGPR:$dst), (ins tGPR:$src), IIC_iMOVr,
- "mov\t$dst, $src", []>,
- T1Special<0b1000>;
-let Defs = [CPSR] in
-def tMOVSr : T1I<(outs tGPR:$dst), (ins tGPR:$src), IIC_iMOVr,
- "movs\t$dst, $src", []>, Encoding16 {
- let Inst{15-6} = 0b0000000000;
-}
-
-// FIXME: Make these predicable.
-def tMOVgpr2tgpr : T1I<(outs tGPR:$dst), (ins GPR:$src), IIC_iMOVr,
- "mov\t$dst, $src", []>,
- T1Special<{1,0,0,?}>;
-def tMOVtgpr2gpr : T1I<(outs GPR:$dst), (ins tGPR:$src), IIC_iMOVr,
- "mov\t$dst, $src", []>,
- T1Special<{1,0,?,0}>;
-def tMOVgpr2gpr : T1I<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVr,
- "mov\t$dst, $src", []>,
- T1Special<{1,0,?,?}>;
-} // neverHasSideEffects
-
-// multiply register
-let isCommutable = 1 in
-def tMUL : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMUL32,
- "mul", "\t$dst, $rhs, $dst", /* A8.6.105 MUL Encoding T1 */
- [(set tGPR:$dst, (mul tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b1101>;
-
-// move inverse register
-def tMVN : T1sI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iMOVr,
- "mvn", "\t$dst, $src",
- [(set tGPR:$dst, (not tGPR:$src))]>,
- T1DataProcessing<0b1111>;
-
-// bitwise or register
-let isCommutable = 1 in
-def tORR : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "orr", "\t$dst, $rhs",
- [(set tGPR:$dst, (or tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b1100>;
-
-// swaps
-def tREV : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "rev", "\t$dst, $src",
- [(set tGPR:$dst, (bswap tGPR:$src))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{1,0,1,0,0,0,?}>;
-
-def tREV16 : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "rev16", "\t$dst, $src",
- [(set tGPR:$dst,
- (or (and (srl tGPR:$src, (i32 8)), 0xFF),
- (or (and (shl tGPR:$src, (i32 8)), 0xFF00),
- (or (and (srl tGPR:$src, (i32 8)), 0xFF0000),
- (and (shl tGPR:$src, (i32 8)), 0xFF000000)))))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{1,0,1,0,0,1,?}>;
-
-def tREVSH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "revsh", "\t$dst, $src",
- [(set tGPR:$dst,
- (sext_inreg
- (or (srl (and tGPR:$src, 0xFF00), (i32 8)),
- (shl tGPR:$src, (i32 8))), i16))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{1,0,1,0,1,1,?}>;
-
-// rotate right register
-def tROR : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr,
- "ror", "\t$dst, $rhs",
- [(set tGPR:$dst, (rotr tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0111>;
-
-// negate register
-def tRSB : T1sI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iALUi,
- "rsb", "\t$dst, $src, #0",
- [(set tGPR:$dst, (ineg tGPR:$src))]>,
- T1DataProcessing<0b1001>;
-
-// Subtract with carry register
-let Uses = [CPSR] in
-def tSBC : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "sbc", "\t$dst, $rhs",
- [(set tGPR:$dst, (sube tGPR:$lhs, tGPR:$rhs))]>,
- T1DataProcessing<0b0110>;
-
-// Subtract immediate
-def tSUBi3 : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi,
- "sub", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, imm0_7_neg:$rhs))]>,
- T1General<0b01111>;
-
-def tSUBi8 : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi,
- "sub", "\t$dst, $rhs",
- [(set tGPR:$dst, (add tGPR:$lhs, imm8_255_neg:$rhs))]>,
- T1General<{1,1,1,?,?}>;
-
-// subtract register
-def tSUBrr : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr,
- "sub", "\t$dst, $lhs, $rhs",
- [(set tGPR:$dst, (sub tGPR:$lhs, tGPR:$rhs))]>,
- T1General<0b01101>;
-
-// TODO: A7-96: STMIA - store multiple.
-
-// sign-extend byte
-def tSXTB : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "sxtb", "\t$dst, $src",
- [(set tGPR:$dst, (sext_inreg tGPR:$src, i8))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{0,0,1,0,0,1,?}>;
-
-// sign-extend short
-def tSXTH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "sxth", "\t$dst, $src",
- [(set tGPR:$dst, (sext_inreg tGPR:$src, i16))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{0,0,1,0,0,0,?}>;
-
-// test
-let isCommutable = 1, Defs = [CPSR] in
-def tTST : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr,
- "tst", "\t$lhs, $rhs",
- [(ARMcmpZ (and tGPR:$lhs, tGPR:$rhs), 0)]>,
- T1DataProcessing<0b1000>;
-
-// zero-extend byte
-def tUXTB : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "uxtb", "\t$dst, $src",
- [(set tGPR:$dst, (and tGPR:$src, 0xFF))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{0,0,1,0,1,1,?}>;
-
-// zero-extend short
-def tUXTH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr,
- "uxth", "\t$dst, $src",
- [(set tGPR:$dst, (and tGPR:$src, 0xFFFF))]>,
- Requires<[IsThumb1Only, HasV6]>,
- T1Misc<{0,0,1,0,1,0,?}>;
-
-
-// Conditional move tMOVCCr - Used to implement the Thumb SELECT_CC operation.
-// Expanded after instruction selection into a branch sequence.
-let usesCustomInserter = 1 in // Expanded after instruction selection.
- def tMOVCCr_pseudo :
- PseudoInst<(outs tGPR:$dst), (ins tGPR:$false, tGPR:$true, pred:$cc),
- NoItinerary, "@ tMOVCCr $cc",
- [/*(set tGPR:$dst, (ARMcmov tGPR:$false, tGPR:$true, imm:$cc))*/]>;
-
-
-// 16-bit movcc in IT blocks for Thumb2.
-def tMOVCCr : T1pIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iCMOVr,
- "mov", "\t$dst, $rhs", []>,
- T1Special<{1,0,?,?}>;
-
-def tMOVCCi : T1pIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iCMOVi,
- "mov", "\t$dst, $rhs", []>,
- T1General<{1,0,0,?,?}>;
-
-// tLEApcrel - Load a pc-relative address into a register without offending the
-// assembler.
-def tLEApcrel : T1I<(outs tGPR:$dst), (ins i32imm:$label, pred:$p), IIC_iALUi,
- "adr$p\t$dst, #$label", []>,
- T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
-
-def tLEApcrelJT : T1I<(outs tGPR:$dst),
- (ins i32imm:$label, nohash_imm:$id, pred:$p),
- IIC_iALUi, "adr$p\t$dst, #${label}_${id}", []>,
- T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
-
-//===----------------------------------------------------------------------===//
-// TLS Instructions
-//
-
-// __aeabi_read_tp preserves the registers r1-r3.
-let isCall = 1,
- Defs = [R0, LR] in {
- def tTPsoft : TIx2<0b11110, 0b11, 1, (outs), (ins), IIC_Br,
- "bl\t__aeabi_read_tp",
- [(set R0, ARMthread_pointer)]>;
-}
-
-// SJLJ Exception handling intrinsics
-// eh_sjlj_setjmp() is an instruction sequence to store the return
-// address and save #0 in R0 for the non-longjmp case.
-// Since by its nature we may be coming from some other function to get
-// here, and we're using the stack frame for the containing function to
-// save/restore registers, we can't keep anything live in regs across
-// the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon
-// when we get here from a longjmp(). We force everthing out of registers
-// except for our own input by listing the relevant registers in Defs. By
-// doing so, we also cause the prologue/epilogue code to actively preserve
-// all of the callee-saved resgisters, which is exactly what we want.
-// The current SP is passed in $val, and we reuse the reg as a scratch.
-let Defs =
- [ R0, R1, R2, R3, R4, R5, R6, R7, R12 ] in {
- def tInt_eh_sjlj_setjmp : ThumbXI<(outs),(ins tGPR:$src, tGPR:$val),
- AddrModeNone, SizeSpecial, NoItinerary,
- "str\t$val, [$src, #8]\t@ begin eh.setjmp\n"
- "\tmov\t$val, pc\n"
- "\tadds\t$val, #9\n"
- "\tstr\t$val, [$src, #4]\n"
- "\tmovs\tr0, #0\n"
- "\tb\t1f\n"
- "\tmovs\tr0, #1\t@ end eh.setjmp\n"
- "1:", "",
- [(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>;
-}
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//
-
-// Add with carry
-def : T1Pat<(addc tGPR:$lhs, imm0_7:$rhs),
- (tADDi3 tGPR:$lhs, imm0_7:$rhs)>;
-def : T1Pat<(addc tGPR:$lhs, imm8_255:$rhs),
- (tADDi8 tGPR:$lhs, imm8_255:$rhs)>;
-def : T1Pat<(addc tGPR:$lhs, tGPR:$rhs),
- (tADDrr tGPR:$lhs, tGPR:$rhs)>;
-
-// Subtract with carry
-def : T1Pat<(addc tGPR:$lhs, imm0_7_neg:$rhs),
- (tSUBi3 tGPR:$lhs, imm0_7_neg:$rhs)>;
-def : T1Pat<(addc tGPR:$lhs, imm8_255_neg:$rhs),
- (tSUBi8 tGPR:$lhs, imm8_255_neg:$rhs)>;
-def : T1Pat<(subc tGPR:$lhs, tGPR:$rhs),
- (tSUBrr tGPR:$lhs, tGPR:$rhs)>;
-
-// ConstantPool, GlobalAddress
-def : T1Pat<(ARMWrapper tglobaladdr :$dst), (tLEApcrel tglobaladdr :$dst)>;
-def : T1Pat<(ARMWrapper tconstpool :$dst), (tLEApcrel tconstpool :$dst)>;
-
-// JumpTable
-def : T1Pat<(ARMWrapperJT tjumptable:$dst, imm:$id),
- (tLEApcrelJT tjumptable:$dst, imm:$id)>;
-
-// Direct calls
-def : T1Pat<(ARMtcall texternalsym:$func), (tBL texternalsym:$func)>,
- Requires<[IsThumb, IsNotDarwin]>;
-def : T1Pat<(ARMtcall texternalsym:$func), (tBLr9 texternalsym:$func)>,
- Requires<[IsThumb, IsDarwin]>;
-
-def : Tv5Pat<(ARMcall texternalsym:$func), (tBLXi texternalsym:$func)>,
- Requires<[IsThumb, HasV5T, IsNotDarwin]>;
-def : Tv5Pat<(ARMcall texternalsym:$func), (tBLXi_r9 texternalsym:$func)>,
- Requires<[IsThumb, HasV5T, IsDarwin]>;
-
-// Indirect calls to ARM routines
-def : Tv5Pat<(ARMcall GPR:$dst), (tBLXr GPR:$dst)>,
- Requires<[IsThumb, HasV5T, IsNotDarwin]>;
-def : Tv5Pat<(ARMcall GPR:$dst), (tBLXr_r9 GPR:$dst)>,
- Requires<[IsThumb, HasV5T, IsDarwin]>;
-
-// zextload i1 -> zextload i8
-def : T1Pat<(zextloadi1 t_addrmode_s1:$addr),
- (tLDRB t_addrmode_s1:$addr)>;
-
-// extload -> zextload
-def : T1Pat<(extloadi1 t_addrmode_s1:$addr), (tLDRB t_addrmode_s1:$addr)>;
-def : T1Pat<(extloadi8 t_addrmode_s1:$addr), (tLDRB t_addrmode_s1:$addr)>;
-def : T1Pat<(extloadi16 t_addrmode_s2:$addr), (tLDRH t_addrmode_s2:$addr)>;
-
-// If it's impossible to use [r,r] address mode for sextload, select to
-// ldr{b|h} + sxt{b|h} instead.
-def : T1Pat<(sextloadi8 t_addrmode_s1:$addr),
- (tSXTB (tLDRB t_addrmode_s1:$addr))>,
- Requires<[IsThumb1Only, HasV6]>;
-def : T1Pat<(sextloadi16 t_addrmode_s2:$addr),
- (tSXTH (tLDRH t_addrmode_s2:$addr))>,
- Requires<[IsThumb1Only, HasV6]>;
-
-def : T1Pat<(sextloadi8 t_addrmode_s1:$addr),
- (tASRri (tLSLri (tLDRB t_addrmode_s1:$addr), 24), 24)>;
-def : T1Pat<(sextloadi16 t_addrmode_s1:$addr),
- (tASRri (tLSLri (tLDRH t_addrmode_s1:$addr), 16), 16)>;
-
-// Large immediate handling.
-
-// Two piece imms.
-def : T1Pat<(i32 thumb_immshifted:$src),
- (tLSLri (tMOVi8 (thumb_immshifted_val imm:$src)),
- (thumb_immshifted_shamt imm:$src))>;
-
-def : T1Pat<(i32 imm0_255_comp:$src),
- (tMVN (tMOVi8 (imm_comp_XFORM imm:$src)))>;
-
-// Pseudo instruction that combines ldr from constpool and add pc. This should
-// be expanded into two instructions late to allow if-conversion and
-// scheduling.
-let isReMaterializable = 1 in
-def tLDRpci_pic : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr, pclabel:$cp),
- NoItinerary, "@ ldr.n\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
- [(set GPR:$dst, (ARMpic_add (load (ARMWrapper tconstpool:$addr)),
- imm:$cp))]>,
- Requires<[IsThumb1Only]>;
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrThumb2.td b/libclamav/c++/llvm/lib/Target/ARM/ARMInstrThumb2.td
deleted file mode 100644
index 6241766..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ /dev/null
@@ -1,2662 +0,0 @@
-//===- ARMInstrThumb2.td - Thumb2 support for ARM -------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the Thumb2 instruction set.
-//
-//===----------------------------------------------------------------------===//
-
-// IT block predicate field
-def it_pred : Operand<i32> {
- let PrintMethod = "printMandatoryPredicateOperand";
-}
-
-// IT block condition mask
-def it_mask : Operand<i32> {
- let PrintMethod = "printThumbITMask";
-}
-
-// Table branch address
-def tb_addrmode : Operand<i32> {
- let PrintMethod = "printTBAddrMode";
-}
-
-// Shifted operands. No register controlled shifts for Thumb2.
-// Note: We do not support rrx shifted operands yet.
-def t2_so_reg : Operand<i32>, // reg imm
- ComplexPattern<i32, 2, "SelectT2ShifterOperandReg",
- [shl,srl,sra,rotr]> {
- let PrintMethod = "printT2SOOperand";
- let MIOperandInfo = (ops GPR, i32imm);
-}
-
-// t2_so_imm_not_XFORM - Return the complement of a t2_so_imm value
-def t2_so_imm_not_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
-}]>;
-
-// t2_so_imm_neg_XFORM - Return the negation of a t2_so_imm value
-def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
- return CurDAG->getTargetConstant(-((int)N->getZExtValue()), MVT::i32);
-}]>;
-
-// t2_so_imm - Match a 32-bit immediate operand, which is an
-// 8-bit immediate rotated by an arbitrary number of bits, or an 8-bit
-// immediate splatted into multiple bytes of the word. t2_so_imm values are
-// represented in the imm field in the same 12-bit form that they are encoded
-// into t2_so_imm instructions: the 8-bit immediate is the least significant
-// bits [bits 0-7], the 4-bit shift/splat amount is the next 4 bits [bits 8-11].
-def t2_so_imm : Operand<i32>,
- PatLeaf<(imm), [{
- return ARM_AM::getT2SOImmVal((uint32_t)N->getZExtValue()) != -1;
-}]>;
-
-// t2_so_imm_not - Match an immediate that is a complement
-// of a t2_so_imm.
-def t2_so_imm_not : Operand<i32>,
- PatLeaf<(imm), [{
- return ARM_AM::getT2SOImmVal(~((uint32_t)N->getZExtValue())) != -1;
-}], t2_so_imm_not_XFORM>;
-
-// t2_so_imm_neg - Match an immediate that is a negation of a t2_so_imm.
-def t2_so_imm_neg : Operand<i32>,
- PatLeaf<(imm), [{
- return ARM_AM::getT2SOImmVal(-((int)N->getZExtValue())) != -1;
-}], t2_so_imm_neg_XFORM>;
-
-// Break t2_so_imm's up into two pieces. This handles immediates with up to 16
-// bits set in them. This uses t2_so_imm2part to match and t2_so_imm2part_[12]
-// to get the first/second pieces.
-def t2_so_imm2part : Operand<i32>,
- PatLeaf<(imm), [{
- return ARM_AM::isT2SOImmTwoPartVal((unsigned)N->getZExtValue());
- }]> {
-}
-
-def t2_so_imm2part_1 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getT2SOImmTwoPartFirst((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-def t2_so_imm2part_2 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getT2SOImmTwoPartSecond((unsigned)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-def t2_so_neg_imm2part : Operand<i32>, PatLeaf<(imm), [{
- return ARM_AM::isT2SOImmTwoPartVal(-(int)N->getZExtValue());
- }]> {
-}
-
-def t2_so_neg_imm2part_1 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getT2SOImmTwoPartFirst(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-def t2_so_neg_imm2part_2 : SDNodeXForm<imm, [{
- unsigned V = ARM_AM::getT2SOImmTwoPartSecond(-(int)N->getZExtValue());
- return CurDAG->getTargetConstant(V, MVT::i32);
-}]>;
-
-/// imm1_31 predicate - True if the 32-bit immediate is in the range [1,31].
-def imm1_31 : PatLeaf<(i32 imm), [{
- return (int32_t)N->getZExtValue() >= 1 && (int32_t)N->getZExtValue() < 32;
-}]>;
-
-/// imm0_4095 predicate - True if the 32-bit immediate is in the range [0.4095].
-def imm0_4095 : Operand<i32>,
- PatLeaf<(i32 imm), [{
- return (uint32_t)N->getZExtValue() < 4096;
-}]>;
-
-def imm0_4095_neg : PatLeaf<(i32 imm), [{
- return (uint32_t)(-N->getZExtValue()) < 4096;
-}], imm_neg_XFORM>;
-
-def imm0_255_neg : PatLeaf<(i32 imm), [{
- return (uint32_t)(-N->getZExtValue()) < 255;
-}], imm_neg_XFORM>;
-
-// Define Thumb2 specific addressing modes.
-
-// t2addrmode_imm12 := reg + imm12
-def t2addrmode_imm12 : Operand<i32>,
- ComplexPattern<i32, 2, "SelectT2AddrModeImm12", []> {
- let PrintMethod = "printT2AddrModeImm12Operand";
- let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
-}
-
-// t2addrmode_imm8 := reg +/- imm8
-def t2addrmode_imm8 : Operand<i32>,
- ComplexPattern<i32, 2, "SelectT2AddrModeImm8", []> {
- let PrintMethod = "printT2AddrModeImm8Operand";
- let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
-}
-
-def t2am_imm8_offset : Operand<i32>,
- ComplexPattern<i32, 1, "SelectT2AddrModeImm8Offset", []>{
- let PrintMethod = "printT2AddrModeImm8OffsetOperand";
-}
-
-// t2addrmode_imm8s4 := reg +/- (imm8 << 2)
-def t2addrmode_imm8s4 : Operand<i32>,
- ComplexPattern<i32, 2, "SelectT2AddrModeImm8s4", []> {
- let PrintMethod = "printT2AddrModeImm8s4Operand";
- let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
-}
-
-// t2addrmode_so_reg := reg + (reg << imm2)
-def t2addrmode_so_reg : Operand<i32>,
- ComplexPattern<i32, 3, "SelectT2AddrModeSoReg", []> {
- let PrintMethod = "printT2AddrModeSoRegOperand";
- let MIOperandInfo = (ops GPR:$base, GPR:$offsreg, i32imm:$offsimm);
-}
-
-
-//===----------------------------------------------------------------------===//
-// Multiclass helpers...
-//
-
-/// T2I_un_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns for a
-/// unary operation that produces a value. These are predicable and can be
-/// changed to modify CPSR.
-multiclass T2I_un_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Cheap = 0, bit ReMat = 0> {
- // shifted imm
- def i : T2sI<(outs GPR:$dst), (ins t2_so_imm:$src), IIC_iMOVi,
- opc, "\t$dst, $src",
- [(set GPR:$dst, (opnode t2_so_imm:$src))]> {
- let isAsCheapAsAMove = Cheap;
- let isReMaterializable = ReMat;
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15} = 0;
- }
- // register
- def r : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVr,
- opc, ".w\t$dst, $src",
- [(set GPR:$dst, (opnode GPR:$src))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{14-12} = 0b000; // imm3
- let Inst{7-6} = 0b00; // imm2
- let Inst{5-4} = 0b00; // type
- }
- // shifted register
- def s : T2I<(outs GPR:$dst), (ins t2_so_reg:$src), IIC_iMOVsi,
- opc, ".w\t$dst, $src",
- [(set GPR:$dst, (opnode t2_so_reg:$src))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- }
-}
-
-/// T2I_bin_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns for a
-// binary operation that produces a value. These are predicable and can be
-/// changed to modify CPSR.
-multiclass T2I_bin_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0, string wide =""> {
- // shifted imm
- def ri : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- opc, "\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
- let Inst{15} = 0;
- }
- // register
- def rr : T2sI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
- opc, !strconcat(wide, "\t$dst, $lhs, $rhs"),
- [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]> {
- let isCommutable = Commutable;
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
- let Inst{14-12} = 0b000; // imm3
- let Inst{7-6} = 0b00; // imm2
- let Inst{5-4} = 0b00; // type
- }
- // shifted register
- def rs : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- opc, !strconcat(wide, "\t$dst, $lhs, $rhs"),
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = ?; // The S bit.
- }
-}
-
-/// T2I_bin_w_irs - Same as T2I_bin_irs except these operations need
-// the ".w" prefix to indicate that they are wide.
-multiclass T2I_bin_w_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> :
- T2I_bin_irs<opcod, opc, opnode, Commutable, ".w">;
-
-/// T2I_rbin_is - Same as T2I_bin_irs except the order of operands are
-/// reversed. It doesn't define the 'rr' form since it's handled by its
-/// T2I_bin_irs counterpart.
-multiclass T2I_rbin_is<bits<4> opcod, string opc, PatFrag opnode> {
- // shifted imm
- def ri : T2I<(outs GPR:$dst), (ins GPR:$rhs, t2_so_imm:$lhs), IIC_iALUi,
- opc, ".w\t$dst, $rhs, $lhs",
- [(set GPR:$dst, (opnode t2_so_imm:$lhs, GPR:$rhs))]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = opcod;
- let Inst{20} = 0; // The S bit.
- let Inst{15} = 0;
- }
- // shifted register
- def rs : T2I<(outs GPR:$dst), (ins GPR:$rhs, t2_so_reg:$lhs), IIC_iALUsi,
- opc, "\t$dst, $rhs, $lhs",
- [(set GPR:$dst, (opnode t2_so_reg:$lhs, GPR:$rhs))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 0; // The S bit.
- }
-}
-
-/// T2I_bin_s_irs - Similar to T2I_bin_irs except it sets the 's' bit so the
-/// instruction modifies the CPSR register.
-let Defs = [CPSR] in {
-multiclass T2I_bin_s_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- // shifted imm
- def ri : T2I<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- !strconcat(opc, "s"), ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- let Inst{15} = 0;
- }
- // register
- def rr : T2I<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
- !strconcat(opc, "s"), ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]> {
- let isCommutable = Commutable;
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- let Inst{14-12} = 0b000; // imm3
- let Inst{7-6} = 0b00; // imm2
- let Inst{5-4} = 0b00; // type
- }
- // shifted register
- def rs : T2I<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- !strconcat(opc, "s"), ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- }
-}
-}
-
-/// T2I_bin_ii12rs - Defines a set of (op reg, {so_imm|imm0_4095|r|so_reg})
-/// patterns for a binary operation that produces a value.
-multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode,
- bit Commutable = 0> {
- // shifted imm
- def ri : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24} = 1;
- let Inst{23-21} = op23_21;
- let Inst{20} = 0; // The S bit.
- let Inst{15} = 0;
- }
- // 12-bit imm
- def ri12 : T2sI<(outs GPR:$dst), (ins GPR:$lhs, imm0_4095:$rhs), IIC_iALUi,
- !strconcat(opc, "w"), "\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, imm0_4095:$rhs))]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24} = 0;
- let Inst{23-21} = op23_21;
- let Inst{20} = 0; // The S bit.
- let Inst{15} = 0;
- }
- // register
- def rr : T2sI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]> {
- let isCommutable = Commutable;
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24} = 1;
- let Inst{23-21} = op23_21;
- let Inst{20} = 0; // The S bit.
- let Inst{14-12} = 0b000; // imm3
- let Inst{7-6} = 0b00; // imm2
- let Inst{5-4} = 0b00; // type
- }
- // shifted register
- def rs : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24} = 1;
- let Inst{23-21} = op23_21;
- let Inst{20} = 0; // The S bit.
- }
-}
-
-/// T2I_adde_sube_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns
-/// for a binary operation that produces a value and use the carry
-/// bit. It's not predicable.
-let Uses = [CPSR] in {
-multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- // shifted imm
- def ri : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- opc, "\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]>,
- Requires<[IsThumb2]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = opcod;
- let Inst{20} = 0; // The S bit.
- let Inst{15} = 0;
- }
- // register
- def rr : T2sI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]>,
- Requires<[IsThumb2]> {
- let isCommutable = Commutable;
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 0; // The S bit.
- let Inst{14-12} = 0b000; // imm3
- let Inst{7-6} = 0b00; // imm2
- let Inst{5-4} = 0b00; // type
- }
- // shifted register
- def rs : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]>,
- Requires<[IsThumb2]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 0; // The S bit.
- }
-}
-
-// Carry setting variants
-let Defs = [CPSR] in {
-multiclass T2I_adde_sube_s_irs<bits<4> opcod, string opc, PatFrag opnode,
- bit Commutable = 0> {
- // shifted imm
- def ri : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi,
- opc, "\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]>,
- Requires<[IsThumb2]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- let Inst{15} = 0;
- }
- // register
- def rr : T2sI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]>,
- Requires<[IsThumb2]> {
- let isCommutable = Commutable;
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- let Inst{14-12} = 0b000; // imm3
- let Inst{7-6} = 0b00; // imm2
- let Inst{5-4} = 0b00; // type
- }
- // shifted register
- def rs : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]>,
- Requires<[IsThumb2]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- }
-}
-}
-}
-
-/// T2I_rbin_s_is - Same as T2I_rbin_is except sets 's' bit.
-let Defs = [CPSR] in {
-multiclass T2I_rbin_s_is<bits<4> opcod, string opc, PatFrag opnode> {
- // shifted imm
- def ri : T2XI<(outs GPR:$dst), (ins GPR:$rhs, t2_so_imm:$lhs, cc_out:$s),
- IIC_iALUi,
- !strconcat(opc, "${s}.w\t$dst, $rhs, $lhs"),
- [(set GPR:$dst, (opnode t2_so_imm:$lhs, GPR:$rhs))]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- let Inst{15} = 0;
- }
- // shifted register
- def rs : T2XI<(outs GPR:$dst), (ins GPR:$rhs, t2_so_reg:$lhs, cc_out:$s),
- IIC_iALUsi,
- !strconcat(opc, "${s}\t$dst, $rhs, $lhs"),
- [(set GPR:$dst, (opnode t2_so_reg:$lhs, GPR:$rhs))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- }
-}
-}
-
-/// T2I_sh_ir - Defines a set of (op reg, {so_imm|r}) patterns for a shift /
-// rotate operation that produces a value.
-multiclass T2I_sh_ir<bits<2> opcod, string opc, PatFrag opnode> {
- // 5-bit imm
- def ri : T2sI<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs), IIC_iMOVsi,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, imm1_31:$rhs))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-21} = 0b010010;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{5-4} = opcod;
- }
- // register
- def rr : T2sI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iMOVsr,
- opc, ".w\t$dst, $lhs, $rhs",
- [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-21} = opcod;
- let Inst{15-12} = 0b1111;
- let Inst{7-4} = 0b0000;
- }
-}
-
-/// T2I_cmp_irs - Defines a set of (op r, {so_imm|r|so_reg}) cmp / test
-/// patterns. Similar to T2I_bin_irs except the instruction does not produce
-/// a explicit result, only implicitly set CPSR.
-let Defs = [CPSR] in {
-multiclass T2I_cmp_irs<bits<4> opcod, string opc, PatFrag opnode> {
- // shifted imm
- def ri : T2I<(outs), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iCMPi,
- opc, ".w\t$lhs, $rhs",
- [(opnode GPR:$lhs, t2_so_imm:$rhs)]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- let Inst{15} = 0;
- let Inst{11-8} = 0b1111; // Rd
- }
- // register
- def rr : T2I<(outs), (ins GPR:$lhs, GPR:$rhs), IIC_iCMPr,
- opc, ".w\t$lhs, $rhs",
- [(opnode GPR:$lhs, GPR:$rhs)]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- let Inst{14-12} = 0b000; // imm3
- let Inst{11-8} = 0b1111; // Rd
- let Inst{7-6} = 0b00; // imm2
- let Inst{5-4} = 0b00; // type
- }
- // shifted register
- def rs : T2I<(outs), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iCMPsi,
- opc, ".w\t$lhs, $rhs",
- [(opnode GPR:$lhs, t2_so_reg:$rhs)]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = opcod;
- let Inst{20} = 1; // The S bit.
- let Inst{11-8} = 0b1111; // Rd
- }
-}
-}
-
-/// T2I_ld - Defines a set of (op r, {imm12|imm8|so_reg}) load patterns.
-multiclass T2I_ld<bit signed, bits<2> opcod, string opc, PatFrag opnode> {
- def i12 : T2Ii12<(outs GPR:$dst), (ins t2addrmode_imm12:$addr), IIC_iLoadi,
- opc, ".w\t$dst, $addr",
- [(set GPR:$dst, (opnode t2addrmode_imm12:$addr))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-25} = 0b00;
- let Inst{24} = signed;
- let Inst{23} = 1;
- let Inst{22-21} = opcod;
- let Inst{20} = 1; // load
- }
- def i8 : T2Ii8 <(outs GPR:$dst), (ins t2addrmode_imm8:$addr), IIC_iLoadi,
- opc, "\t$dst, $addr",
- [(set GPR:$dst, (opnode t2addrmode_imm8:$addr))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-25} = 0b00;
- let Inst{24} = signed;
- let Inst{23} = 0;
- let Inst{22-21} = opcod;
- let Inst{20} = 1; // load
- let Inst{11} = 1;
- // Offset: index==TRUE, wback==FALSE
- let Inst{10} = 1; // The P bit.
- let Inst{8} = 0; // The W bit.
- }
- def s : T2Iso <(outs GPR:$dst), (ins t2addrmode_so_reg:$addr), IIC_iLoadr,
- opc, ".w\t$dst, $addr",
- [(set GPR:$dst, (opnode t2addrmode_so_reg:$addr))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-25} = 0b00;
- let Inst{24} = signed;
- let Inst{23} = 0;
- let Inst{22-21} = opcod;
- let Inst{20} = 1; // load
- let Inst{11-6} = 0b000000;
- }
- def pci : T2Ipc <(outs GPR:$dst), (ins i32imm:$addr), IIC_iLoadi,
- opc, ".w\t$dst, $addr",
- [(set GPR:$dst, (opnode (ARMWrapper tconstpool:$addr)))]> {
- let isReMaterializable = 1;
- let Inst{31-27} = 0b11111;
- let Inst{26-25} = 0b00;
- let Inst{24} = signed;
- let Inst{23} = ?; // add = (U == '1')
- let Inst{22-21} = opcod;
- let Inst{20} = 1; // load
- let Inst{19-16} = 0b1111; // Rn
- }
-}
-
-/// T2I_st - Defines a set of (op r, {imm12|imm8|so_reg}) store patterns.
-multiclass T2I_st<bits<2> opcod, string opc, PatFrag opnode> {
- def i12 : T2Ii12<(outs), (ins GPR:$src, t2addrmode_imm12:$addr), IIC_iStorei,
- opc, ".w\t$src, $addr",
- [(opnode GPR:$src, t2addrmode_imm12:$addr)]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0001;
- let Inst{22-21} = opcod;
- let Inst{20} = 0; // !load
- }
- def i8 : T2Ii8 <(outs), (ins GPR:$src, t2addrmode_imm8:$addr), IIC_iStorei,
- opc, "\t$src, $addr",
- [(opnode GPR:$src, t2addrmode_imm8:$addr)]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0000;
- let Inst{22-21} = opcod;
- let Inst{20} = 0; // !load
- let Inst{11} = 1;
- // Offset: index==TRUE, wback==FALSE
- let Inst{10} = 1; // The P bit.
- let Inst{8} = 0; // The W bit.
- }
- def s : T2Iso <(outs), (ins GPR:$src, t2addrmode_so_reg:$addr), IIC_iStorer,
- opc, ".w\t$src, $addr",
- [(opnode GPR:$src, t2addrmode_so_reg:$addr)]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0000;
- let Inst{22-21} = opcod;
- let Inst{20} = 0; // !load
- let Inst{11-6} = 0b000000;
- }
-}
-
-/// T2I_unary_rrot - A unary operation with two forms: one whose operand is a
-/// register and one whose operand is a register rotated by 8/16/24.
-multiclass T2I_unary_rrot<bits<3> opcod, string opc, PatFrag opnode> {
- def r : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- opc, ".w\t$dst, $src",
- [(set GPR:$dst, (opnode GPR:$src))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = 0b00; // rotate
- }
- def r_rot : T2I<(outs GPR:$dst), (ins GPR:$src, i32imm:$rot), IIC_iUNAsi,
- opc, ".w\t$dst, $src, ror $rot",
- [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
- }
-}
-
-// SXTB16 and UXTB16 do not need the .w qualifier.
-multiclass T2I_unary_rrot_nw<bits<3> opcod, string opc, PatFrag opnode> {
- def r : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- opc, "\t$dst, $src",
- [(set GPR:$dst, (opnode GPR:$src))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = 0b00; // rotate
- }
- def r_rot : T2I<(outs GPR:$dst), (ins GPR:$src, i32imm:$rot), IIC_iUNAsi,
- opc, "\t$dst, $src, ror $rot",
- [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
- }
-}
-
-// DO variant - disassembly only, no pattern
-
-multiclass T2I_unary_rrot_DO<bits<3> opcod, string opc> {
- def r : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- opc, "\t$dst, $src", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = 0b00; // rotate
- }
- def r_rot : T2I<(outs GPR:$dst), (ins GPR:$src, i32imm:$rot), IIC_iUNAsi,
- opc, "\t$dst, $src, ror $rot", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
- }
-}
-
-/// T2I_bin_rrot - A binary operation with two forms: one whose operand is a
-/// register and one whose operand is a register rotated by 8/16/24.
-multiclass T2I_bin_rrot<bits<3> opcod, string opc, PatFrag opnode> {
- def rr : T2I<(outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS), IIC_iALUr,
- opc, "\t$dst, $LHS, $RHS",
- [(set GPR:$dst, (opnode GPR:$LHS, GPR:$RHS))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = 0b00; // rotate
- }
- def rr_rot : T2I<(outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS, i32imm:$rot),
- IIC_iALUsr, opc, "\t$dst, $LHS, $RHS, ror $rot",
- [(set GPR:$dst, (opnode GPR:$LHS,
- (rotr GPR:$RHS, rot_imm:$rot)))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
- }
-}
-
-// DO variant - disassembly only, no pattern
-
-multiclass T2I_bin_rrot_DO<bits<3> opcod, string opc> {
- def rr : T2I<(outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS), IIC_iALUr,
- opc, "\t$dst, $LHS, $RHS", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = 0b00; // rotate
- }
- def rr_rot : T2I<(outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS, i32imm:$rot),
- IIC_iALUsr, opc, "\t$dst, $LHS, $RHS, ror $rot", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0100;
- let Inst{22-20} = opcod;
- let Inst{15-12} = 0b1111;
- let Inst{7} = 1;
- let Inst{5-4} = {?,?}; // rotate
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Instructions
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Miscellaneous Instructions.
-//
-
-// LEApcrel - Load a pc-relative address into a register without offending the
-// assembler.
-def t2LEApcrel : T2XI<(outs GPR:$dst), (ins i32imm:$label, pred:$p), IIC_iALUi,
- "adr$p.w\t$dst, #$label", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25-24} = 0b10;
- // Inst{23:21} = '11' (add = FALSE) or '00' (add = TRUE)
- let Inst{22} = 0;
- let Inst{20} = 0;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15} = 0;
-}
-def t2LEApcrelJT : T2XI<(outs GPR:$dst),
- (ins i32imm:$label, nohash_imm:$id, pred:$p), IIC_iALUi,
- "adr$p.w\t$dst, #${label}_${id}", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25-24} = 0b10;
- // Inst{23:21} = '11' (add = FALSE) or '00' (add = TRUE)
- let Inst{22} = 0;
- let Inst{20} = 0;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15} = 0;
-}
-
-// ADD r, sp, {so_imm|i12}
-def t2ADDrSPi : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_imm:$imm),
- IIC_iALUi, "add", ".w\t$dst, $sp, $imm", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = 0b1000;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
- let Inst{15} = 0;
-}
-def t2ADDrSPi12 : T2I<(outs GPR:$dst), (ins GPR:$sp, imm0_4095:$imm),
- IIC_iALUi, "addw", "\t$dst, $sp, $imm", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-21} = 0b0000;
- let Inst{20} = 0; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
- let Inst{15} = 0;
-}
-
-// ADD r, sp, so_reg
-def t2ADDrSPs : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_reg:$rhs),
- IIC_iALUsi, "add", ".w\t$dst, $sp, $rhs", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = 0b1000;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
- let Inst{15} = 0;
-}
-
-// SUB r, sp, {so_imm|i12}
-def t2SUBrSPi : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_imm:$imm),
- IIC_iALUi, "sub", ".w\t$dst, $sp, $imm", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = 0b1101;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
- let Inst{15} = 0;
-}
-def t2SUBrSPi12 : T2I<(outs GPR:$dst), (ins GPR:$sp, imm0_4095:$imm),
- IIC_iALUi, "subw", "\t$dst, $sp, $imm", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-21} = 0b0101;
- let Inst{20} = 0; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
- let Inst{15} = 0;
-}
-
-// SUB r, sp, so_reg
-def t2SUBrSPs : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_reg:$rhs),
- IIC_iALUsi,
- "sub", "\t$dst, $sp, $rhs", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = 0b1101;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1101; // Rn = sp
- let Inst{15} = 0;
-}
-
-// Signed and unsigned division, for disassembly only
-def t2SDIV : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iALUi,
- "sdiv", "\t$dst, $a, $b", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-21} = 0b011100;
- let Inst{20} = 0b1;
- let Inst{15-12} = 0b1111;
- let Inst{7-4} = 0b1111;
-}
-
-def t2UDIV : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iALUi,
- "udiv", "\t$dst, $a, $b", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-21} = 0b011101;
- let Inst{20} = 0b1;
- let Inst{15-12} = 0b1111;
- let Inst{7-4} = 0b1111;
-}
-
-// Pseudo instruction that will expand into a t2SUBrSPi + a copy.
-let usesCustomInserter = 1 in { // Expanded after instruction selection.
-def t2SUBrSPi_ : PseudoInst<(outs GPR:$dst), (ins GPR:$sp, t2_so_imm:$imm),
- NoItinerary, "@ sub.w\t$dst, $sp, $imm", []>;
-def t2SUBrSPi12_ : PseudoInst<(outs GPR:$dst), (ins GPR:$sp, imm0_4095:$imm),
- NoItinerary, "@ subw\t$dst, $sp, $imm", []>;
-def t2SUBrSPs_ : PseudoInst<(outs GPR:$dst), (ins GPR:$sp, t2_so_reg:$rhs),
- NoItinerary, "@ sub\t$dst, $sp, $rhs", []>;
-} // usesCustomInserter
-
-
-//===----------------------------------------------------------------------===//
-// Load / store Instructions.
-//
-
-// Load
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-defm t2LDR : T2I_ld<0, 0b10, "ldr", UnOpFrag<(load node:$Src)>>;
-
-// Loads with zero extension
-defm t2LDRH : T2I_ld<0, 0b01, "ldrh", UnOpFrag<(zextloadi16 node:$Src)>>;
-defm t2LDRB : T2I_ld<0, 0b00, "ldrb", UnOpFrag<(zextloadi8 node:$Src)>>;
-
-// Loads with sign extension
-defm t2LDRSH : T2I_ld<1, 0b01, "ldrsh", UnOpFrag<(sextloadi16 node:$Src)>>;
-defm t2LDRSB : T2I_ld<1, 0b00, "ldrsb", UnOpFrag<(sextloadi8 node:$Src)>>;
-
-let mayLoad = 1, hasExtraDefRegAllocReq = 1 in {
-// Load doubleword
-def t2LDRDi8 : T2Ii8s4<1, 0, 1, (outs GPR:$dst1, GPR:$dst2),
- (ins t2addrmode_imm8s4:$addr),
- IIC_iLoadi, "ldrd", "\t$dst1, $addr", []>;
-def t2LDRDpci : T2Ii8s4<?, ?, 1, (outs GPR:$dst1, GPR:$dst2),
- (ins i32imm:$addr), IIC_iLoadi,
- "ldrd", "\t$dst1, $addr", []> {
- let Inst{19-16} = 0b1111; // Rn
-}
-}
-
-// zextload i1 -> zextload i8
-def : T2Pat<(zextloadi1 t2addrmode_imm12:$addr),
- (t2LDRBi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(zextloadi1 t2addrmode_imm8:$addr),
- (t2LDRBi8 t2addrmode_imm8:$addr)>;
-def : T2Pat<(zextloadi1 t2addrmode_so_reg:$addr),
- (t2LDRBs t2addrmode_so_reg:$addr)>;
-def : T2Pat<(zextloadi1 (ARMWrapper tconstpool:$addr)),
- (t2LDRBpci tconstpool:$addr)>;
-
-// extload -> zextload
-// FIXME: Reduce the number of patterns by legalizing extload to zextload
-// earlier?
-def : T2Pat<(extloadi1 t2addrmode_imm12:$addr),
- (t2LDRBi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(extloadi1 t2addrmode_imm8:$addr),
- (t2LDRBi8 t2addrmode_imm8:$addr)>;
-def : T2Pat<(extloadi1 t2addrmode_so_reg:$addr),
- (t2LDRBs t2addrmode_so_reg:$addr)>;
-def : T2Pat<(extloadi1 (ARMWrapper tconstpool:$addr)),
- (t2LDRBpci tconstpool:$addr)>;
-
-def : T2Pat<(extloadi8 t2addrmode_imm12:$addr),
- (t2LDRBi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(extloadi8 t2addrmode_imm8:$addr),
- (t2LDRBi8 t2addrmode_imm8:$addr)>;
-def : T2Pat<(extloadi8 t2addrmode_so_reg:$addr),
- (t2LDRBs t2addrmode_so_reg:$addr)>;
-def : T2Pat<(extloadi8 (ARMWrapper tconstpool:$addr)),
- (t2LDRBpci tconstpool:$addr)>;
-
-def : T2Pat<(extloadi16 t2addrmode_imm12:$addr),
- (t2LDRHi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(extloadi16 t2addrmode_imm8:$addr),
- (t2LDRHi8 t2addrmode_imm8:$addr)>;
-def : T2Pat<(extloadi16 t2addrmode_so_reg:$addr),
- (t2LDRHs t2addrmode_so_reg:$addr)>;
-def : T2Pat<(extloadi16 (ARMWrapper tconstpool:$addr)),
- (t2LDRHpci tconstpool:$addr)>;
-
-// Indexed loads
-let mayLoad = 1 in {
-def t2LDR_PRE : T2Iidxldst<0, 0b10, 1, 1, (outs GPR:$dst, GPR:$base_wb),
- (ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldr", "\t$dst, $addr!", "$addr.base = $base_wb",
- []>;
-
-def t2LDR_POST : T2Iidxldst<0, 0b10, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldr", "\t$dst, [$base], $offset", "$base = $base_wb",
- []>;
-
-def t2LDRB_PRE : T2Iidxldst<0, 0b00, 1, 1, (outs GPR:$dst, GPR:$base_wb),
- (ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldrb", "\t$dst, $addr!", "$addr.base = $base_wb",
- []>;
-def t2LDRB_POST : T2Iidxldst<0, 0b00, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldrb", "\t$dst, [$base], $offset", "$base = $base_wb",
- []>;
-
-def t2LDRH_PRE : T2Iidxldst<0, 0b01, 1, 1, (outs GPR:$dst, GPR:$base_wb),
- (ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldrh", "\t$dst, $addr!", "$addr.base = $base_wb",
- []>;
-def t2LDRH_POST : T2Iidxldst<0, 0b01, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldrh", "\t$dst, [$base], $offset", "$base = $base_wb",
- []>;
-
-def t2LDRSB_PRE : T2Iidxldst<1, 0b00, 1, 1, (outs GPR:$dst, GPR:$base_wb),
- (ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldrsb", "\t$dst, $addr!", "$addr.base = $base_wb",
- []>;
-def t2LDRSB_POST : T2Iidxldst<1, 0b00, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldrsb", "\t$dst, [$base], $offset", "$base = $base_wb",
- []>;
-
-def t2LDRSH_PRE : T2Iidxldst<1, 0b01, 1, 1, (outs GPR:$dst, GPR:$base_wb),
- (ins t2addrmode_imm8:$addr),
- AddrModeT2_i8, IndexModePre, IIC_iLoadiu,
- "ldrsh", "\t$dst, $addr!", "$addr.base = $base_wb",
- []>;
-def t2LDRSH_POST : T2Iidxldst<1, 0b01, 1, 0, (outs GPR:$dst, GPR:$base_wb),
- (ins GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iLoadiu,
- "ldrsh", "\t$dst, [$base], $offset", "$base = $base_wb",
- []>;
-}
-
-// LDRT, LDRBT, LDRHT, LDRSBT, LDRSHT all have offset mode (PUW=0b110) and are
-// for disassembly only.
-// Ref: A8.6.57 LDR (immediate, Thumb) Encoding T4
-class T2IldT<bit signed, bits<2> type, string opc>
- : T2Ii8<(outs GPR:$dst), (ins t2addrmode_imm8:$addr), IIC_iLoadi, opc,
- "\t$dst, $addr", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-25} = 0b00;
- let Inst{24} = signed;
- let Inst{23} = 0;
- let Inst{22-21} = type;
- let Inst{20} = 1; // load
- let Inst{11} = 1;
- let Inst{10-8} = 0b110; // PUW.
-}
-
-def t2LDRT : T2IldT<0, 0b10, "ldrt">;
-def t2LDRBT : T2IldT<0, 0b00, "ldrbt">;
-def t2LDRHT : T2IldT<0, 0b01, "ldrht">;
-def t2LDRSBT : T2IldT<1, 0b00, "ldrsbt">;
-def t2LDRSHT : T2IldT<1, 0b01, "ldrsht">;
-
-// Store
-defm t2STR :T2I_st<0b10,"str", BinOpFrag<(store node:$LHS, node:$RHS)>>;
-defm t2STRB:T2I_st<0b00,"strb",BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>;
-defm t2STRH:T2I_st<0b01,"strh",BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>;
-
-// Store doubleword
-let mayLoad = 1, hasExtraSrcRegAllocReq = 1 in
-def t2STRDi8 : T2Ii8s4<1, 0, 0, (outs),
- (ins GPR:$src1, GPR:$src2, t2addrmode_imm8s4:$addr),
- IIC_iStorer, "strd", "\t$src1, $addr", []>;
-
-// Indexed stores
-def t2STR_PRE : T2Iidxldst<0, 0b10, 0, 1, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePre, IIC_iStoreiu,
- "str", "\t$src, [$base, $offset]!", "$base = $base_wb",
- [(set GPR:$base_wb,
- (pre_store GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
-
-def t2STR_POST : T2Iidxldst<0, 0b10, 0, 0, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iStoreiu,
- "str", "\t$src, [$base], $offset", "$base = $base_wb",
- [(set GPR:$base_wb,
- (post_store GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
-
-def t2STRH_PRE : T2Iidxldst<0, 0b01, 0, 1, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePre, IIC_iStoreiu,
- "strh", "\t$src, [$base, $offset]!", "$base = $base_wb",
- [(set GPR:$base_wb,
- (pre_truncsti16 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
-
-def t2STRH_POST : T2Iidxldst<0, 0b01, 0, 0, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iStoreiu,
- "strh", "\t$src, [$base], $offset", "$base = $base_wb",
- [(set GPR:$base_wb,
- (post_truncsti16 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
-
-def t2STRB_PRE : T2Iidxldst<0, 0b00, 0, 1, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePre, IIC_iStoreiu,
- "strb", "\t$src, [$base, $offset]!", "$base = $base_wb",
- [(set GPR:$base_wb,
- (pre_truncsti8 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
-
-def t2STRB_POST : T2Iidxldst<0, 0b00, 0, 0, (outs GPR:$base_wb),
- (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset),
- AddrModeT2_i8, IndexModePost, IIC_iStoreiu,
- "strb", "\t$src, [$base], $offset", "$base = $base_wb",
- [(set GPR:$base_wb,
- (post_truncsti8 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>;
-
-// STRT, STRBT, STRHT all have offset mode (PUW=0b110) and are for disassembly
-// only.
-// Ref: A8.6.193 STR (immediate, Thumb) Encoding T4
-class T2IstT<bits<2> type, string opc>
- : T2Ii8<(outs GPR:$src), (ins t2addrmode_imm8:$addr), IIC_iStorei, opc,
- "\t$src, $addr", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-25} = 0b00;
- let Inst{24} = 0; // not signed
- let Inst{23} = 0;
- let Inst{22-21} = type;
- let Inst{20} = 0; // store
- let Inst{11} = 1;
- let Inst{10-8} = 0b110; // PUW
-}
-
-def t2STRT : T2IstT<0b10, "strt">;
-def t2STRBT : T2IstT<0b00, "strbt">;
-def t2STRHT : T2IstT<0b01, "strht">;
-
-// FIXME: ldrd / strd pre / post variants
-
-// T2Ipl (Preload Data/Instruction) signals the memory system of possible future
-// data/instruction access. These are for disassembly only.
-multiclass T2Ipl<bit instr, bit write, string opc> {
-
- def i12 : T2I<(outs), (ins t2addrmode_imm12:$addr), IIC_iLoadi, opc,
- "\t$addr", []> {
- let Inst{31-25} = 0b1111100;
- let Inst{24} = instr;
- let Inst{23} = 1; // U = 1
- let Inst{22} = 0;
- let Inst{21} = write;
- let Inst{20} = 1;
- let Inst{15-12} = 0b1111;
- }
-
- def i8 : T2I<(outs), (ins t2addrmode_imm8:$addr), IIC_iLoadi, opc,
- "\t$addr", []> {
- let Inst{31-25} = 0b1111100;
- let Inst{24} = instr;
- let Inst{23} = 0; // U = 0
- let Inst{22} = 0;
- let Inst{21} = write;
- let Inst{20} = 1;
- let Inst{15-12} = 0b1111;
- let Inst{11-8} = 0b1100;
- }
-
- // A8.6.118 #0 and #-0 differs. Translates -0 to -1, -1 to -2, ..., etc.
- def pci : T2I<(outs), (ins GPR:$base, i32imm:$imm), IIC_iLoadi, opc,
- "\t[pc, ${imm:negzero}]", []> {
- let Inst{31-25} = 0b1111100;
- let Inst{24} = instr;
- let Inst{23} = ?; // add = (U == 1)
- let Inst{22} = 0;
- let Inst{21} = write;
- let Inst{20} = 1;
- let Inst{19-16} = 0b1111; // Rn = 0b1111
- let Inst{15-12} = 0b1111;
- }
-
- def r : T2I<(outs), (ins GPR:$base, GPR:$a), IIC_iLoadi, opc,
- "\t[$base, $a]", []> {
- let Inst{31-25} = 0b1111100;
- let Inst{24} = instr;
- let Inst{23} = 0; // add = TRUE for T1
- let Inst{22} = 0;
- let Inst{21} = write;
- let Inst{20} = 1;
- let Inst{15-12} = 0b1111;
- let Inst{11-6} = 0000000;
- let Inst{5-4} = 0b00; // no shift is applied
- }
-
- def s : T2I<(outs), (ins GPR:$base, GPR:$a, i32imm:$shamt), IIC_iLoadi, opc,
- "\t[$base, $a, lsl $shamt]", []> {
- let Inst{31-25} = 0b1111100;
- let Inst{24} = instr;
- let Inst{23} = 0; // add = TRUE for T1
- let Inst{22} = 0;
- let Inst{21} = write;
- let Inst{20} = 1;
- let Inst{15-12} = 0b1111;
- let Inst{11-6} = 0000000;
- }
-}
-
-defm t2PLD : T2Ipl<0, 0, "pld">;
-defm t2PLDW : T2Ipl<0, 1, "pldw">;
-defm t2PLI : T2Ipl<1, 0, "pli">;
-
-//===----------------------------------------------------------------------===//
-// Load / store multiple Instructions.
-//
-
-let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
-def t2LDM : T2XI<(outs),
- (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops),
- IIC_iLoadm, "ldm${addr:submode}${p}${addr:wide}\t$addr, $wb", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b00;
- let Inst{24-23} = {?, ?}; // IA: '01', DB: '10'
- let Inst{22} = 0;
- let Inst{21} = ?; // The W bit.
- let Inst{20} = 1; // Load
-}
-
-let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
-def t2STM : T2XI<(outs),
- (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops),
- IIC_iStorem, "stm${addr:submode}${p}${addr:wide}\t$addr, $wb", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b00;
- let Inst{24-23} = {?, ?}; // IA: '01', DB: '10'
- let Inst{22} = 0;
- let Inst{21} = ?; // The W bit.
- let Inst{20} = 0; // Store
-}
-
-//===----------------------------------------------------------------------===//
-// Move Instructions.
-//
-
-let neverHasSideEffects = 1 in
-def t2MOVr : T2sI<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVr,
- "mov", ".w\t$dst, $src", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = 0b0010;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{14-12} = 0b000;
- let Inst{7-4} = 0b0000;
-}
-
-// AddedComplexity to ensure isel tries t2MOVi before t2MOVi16.
-let isReMaterializable = 1, isAsCheapAsAMove = 1, AddedComplexity = 1 in
-def t2MOVi : T2sI<(outs GPR:$dst), (ins t2_so_imm:$src), IIC_iMOVi,
- "mov", ".w\t$dst, $src",
- [(set GPR:$dst, t2_so_imm:$src)]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = 0b0010;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15} = 0;
-}
-
-let isReMaterializable = 1, isAsCheapAsAMove = 1 in
-def t2MOVi16 : T2I<(outs GPR:$dst), (ins i32imm:$src), IIC_iMOVi,
- "movw", "\t$dst, $src",
- [(set GPR:$dst, imm0_65535:$src)]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-21} = 0b0010;
- let Inst{20} = 0; // The S bit.
- let Inst{15} = 0;
-}
-
-let Constraints = "$src = $dst" in
-def t2MOVTi16 : T2I<(outs GPR:$dst), (ins GPR:$src, i32imm:$imm), IIC_iMOVi,
- "movt", "\t$dst, $imm",
- [(set GPR:$dst,
- (or (and GPR:$src, 0xffff), lo16AllZero:$imm))]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-21} = 0b0110;
- let Inst{20} = 0; // The S bit.
- let Inst{15} = 0;
-}
-
-def : T2Pat<(or GPR:$src, 0xffff0000), (t2MOVTi16 GPR:$src, 0xffff)>;
-
-//===----------------------------------------------------------------------===//
-// Extend Instructions.
-//
-
-// Sign extenders
-
-defm t2SXTB : T2I_unary_rrot<0b100, "sxtb",
- UnOpFrag<(sext_inreg node:$Src, i8)>>;
-defm t2SXTH : T2I_unary_rrot<0b000, "sxth",
- UnOpFrag<(sext_inreg node:$Src, i16)>>;
-defm t2SXTB16 : T2I_unary_rrot_DO<0b010, "sxtb16">;
-
-defm t2SXTAB : T2I_bin_rrot<0b100, "sxtab",
- BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS, i8))>>;
-defm t2SXTAH : T2I_bin_rrot<0b000, "sxtah",
- BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS,i16))>>;
-defm t2SXTAB16 : T2I_bin_rrot_DO<0b010, "sxtab16">;
-
-// TODO: SXT(A){B|H}16 - done for disassembly only
-
-// Zero extenders
-
-let AddedComplexity = 16 in {
-defm t2UXTB : T2I_unary_rrot<0b101, "uxtb",
- UnOpFrag<(and node:$Src, 0x000000FF)>>;
-defm t2UXTH : T2I_unary_rrot<0b001, "uxth",
- UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
-defm t2UXTB16 : T2I_unary_rrot_nw<0b011, "uxtb16",
- UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
-
-def : T2Pat<(and (shl GPR:$Src, (i32 8)), 0xFF00FF),
- (t2UXTB16r_rot GPR:$Src, 24)>;
-def : T2Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
- (t2UXTB16r_rot GPR:$Src, 8)>;
-
-defm t2UXTAB : T2I_bin_rrot<0b101, "uxtab",
- BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
-defm t2UXTAH : T2I_bin_rrot<0b001, "uxtah",
- BinOpFrag<(add node:$LHS, (and node:$RHS, 0xFFFF))>>;
-defm t2UXTAB16 : T2I_bin_rrot_DO<0b011, "uxtab16">;
-}
-
-//===----------------------------------------------------------------------===//
-// Arithmetic Instructions.
-//
-
-defm t2ADD : T2I_bin_ii12rs<0b000, "add",
- BinOpFrag<(add node:$LHS, node:$RHS)>, 1>;
-defm t2SUB : T2I_bin_ii12rs<0b101, "sub",
- BinOpFrag<(sub node:$LHS, node:$RHS)>>;
-
-// ADD and SUB with 's' bit set. No 12-bit immediate (T4) variants.
-defm t2ADDS : T2I_bin_s_irs <0b1000, "add",
- BinOpFrag<(addc node:$LHS, node:$RHS)>, 1>;
-defm t2SUBS : T2I_bin_s_irs <0b1101, "sub",
- BinOpFrag<(subc node:$LHS, node:$RHS)>>;
-
-defm t2ADC : T2I_adde_sube_irs<0b1010, "adc",
- BinOpFrag<(adde_dead_carry node:$LHS, node:$RHS)>, 1>;
-defm t2SBC : T2I_adde_sube_irs<0b1011, "sbc",
- BinOpFrag<(sube_dead_carry node:$LHS, node:$RHS)>>;
-defm t2ADCS : T2I_adde_sube_s_irs<0b1010, "adc",
- BinOpFrag<(adde_live_carry node:$LHS, node:$RHS)>, 1>;
-defm t2SBCS : T2I_adde_sube_s_irs<0b1011, "sbc",
- BinOpFrag<(sube_live_carry node:$LHS, node:$RHS)>>;
-
-// RSB
-defm t2RSB : T2I_rbin_is <0b1110, "rsb",
- BinOpFrag<(sub node:$LHS, node:$RHS)>>;
-defm t2RSBS : T2I_rbin_s_is <0b1110, "rsb",
- BinOpFrag<(subc node:$LHS, node:$RHS)>>;
-
-// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
-let AddedComplexity = 1 in
-def : T2Pat<(add GPR:$src, imm0_255_neg:$imm),
- (t2SUBri GPR:$src, imm0_255_neg:$imm)>;
-def : T2Pat<(add GPR:$src, t2_so_imm_neg:$imm),
- (t2SUBri GPR:$src, t2_so_imm_neg:$imm)>;
-def : T2Pat<(add GPR:$src, imm0_4095_neg:$imm),
- (t2SUBri12 GPR:$src, imm0_4095_neg:$imm)>;
-
-// Select Bytes -- for disassembly only
-
-def t2SEL : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), NoItinerary, "sel",
- "\t$dst, $a, $b", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-24} = 0b010;
- let Inst{23} = 0b1;
- let Inst{22-20} = 0b010;
- let Inst{15-12} = 0b1111;
- let Inst{7} = 0b1;
- let Inst{6-4} = 0b000;
-}
-
-// A6.3.13, A6.3.14, A6.3.15 Parallel addition and subtraction (signed/unsigned)
-// And Miscellaneous operations -- for disassembly only
-class T2I_pam<bits<3> op22_20, bits<4> op7_4, string opc>
- : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), NoItinerary, opc,
- "\t$dst, $a, $b", [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0101;
- let Inst{22-20} = op22_20;
- let Inst{15-12} = 0b1111;
- let Inst{7-4} = op7_4;
-}
-
-// Saturating add/subtract -- for disassembly only
-
-def t2QADD : T2I_pam<0b000, 0b1000, "qadd">;
-def t2QADD16 : T2I_pam<0b001, 0b0001, "qadd16">;
-def t2QADD8 : T2I_pam<0b000, 0b0001, "qadd8">;
-def t2QASX : T2I_pam<0b010, 0b0001, "qasx">;
-def t2QDADD : T2I_pam<0b000, 0b1001, "qdadd">;
-def t2QDSUB : T2I_pam<0b000, 0b1011, "qdsub">;
-def t2QSAX : T2I_pam<0b110, 0b0001, "qsax">;
-def t2QSUB : T2I_pam<0b000, 0b1010, "qsub">;
-def t2QSUB16 : T2I_pam<0b101, 0b0001, "qsub16">;
-def t2QSUB8 : T2I_pam<0b100, 0b0001, "qsub8">;
-def t2UQADD16 : T2I_pam<0b001, 0b0101, "uqadd16">;
-def t2UQADD8 : T2I_pam<0b000, 0b0101, "uqadd8">;
-def t2UQASX : T2I_pam<0b010, 0b0101, "uqasx">;
-def t2UQSAX : T2I_pam<0b110, 0b0101, "uqsax">;
-def t2UQSUB16 : T2I_pam<0b101, 0b0101, "uqsub16">;
-def t2UQSUB8 : T2I_pam<0b100, 0b0101, "uqsub8">;
-
-// Signed/Unsigned add/subtract -- for disassembly only
-
-def t2SASX : T2I_pam<0b010, 0b0000, "sasx">;
-def t2SADD16 : T2I_pam<0b001, 0b0000, "sadd16">;
-def t2SADD8 : T2I_pam<0b000, 0b0000, "sadd8">;
-def t2SSAX : T2I_pam<0b110, 0b0000, "ssax">;
-def t2SSUB16 : T2I_pam<0b101, 0b0000, "ssub16">;
-def t2SSUB8 : T2I_pam<0b100, 0b0000, "ssub8">;
-def t2UASX : T2I_pam<0b010, 0b0100, "uasx">;
-def t2UADD16 : T2I_pam<0b001, 0b0100, "uadd16">;
-def t2UADD8 : T2I_pam<0b000, 0b0100, "uadd8">;
-def t2USAX : T2I_pam<0b110, 0b0100, "usax">;
-def t2USUB16 : T2I_pam<0b101, 0b0100, "usub16">;
-def t2USUB8 : T2I_pam<0b100, 0b0100, "usub8">;
-
-// Signed/Unsigned halving add/subtract -- for disassembly only
-
-def t2SHASX : T2I_pam<0b010, 0b0010, "shasx">;
-def t2SHADD16 : T2I_pam<0b001, 0b0010, "shadd16">;
-def t2SHADD8 : T2I_pam<0b000, 0b0010, "shadd8">;
-def t2SHSAX : T2I_pam<0b110, 0b0010, "shsax">;
-def t2SHSUB16 : T2I_pam<0b101, 0b0010, "shsub16">;
-def t2SHSUB8 : T2I_pam<0b100, 0b0010, "shsub8">;
-def t2UHASX : T2I_pam<0b010, 0b0110, "uhasx">;
-def t2UHADD16 : T2I_pam<0b001, 0b0110, "uhadd16">;
-def t2UHADD8 : T2I_pam<0b000, 0b0110, "uhadd8">;
-def t2UHSAX : T2I_pam<0b110, 0b0110, "uhsax">;
-def t2UHSUB16 : T2I_pam<0b101, 0b0110, "uhsub16">;
-def t2UHSUB8 : T2I_pam<0b100, 0b0110, "uhsub8">;
-
-// Unsigned Sum of Absolute Differences [and Accumulate] -- for disassembly only
-
-def t2USAD8 : T2I_mac<0, 0b111, 0b0000, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- NoItinerary, "usad8", "\t$dst, $a, $b", []> {
- let Inst{15-12} = 0b1111;
-}
-def t2USADA8 : T2I_mac<0, 0b111, 0b0000, (outs GPR:$dst),
- (ins GPR:$a, GPR:$b, GPR:$acc), NoItinerary, "usada8",
- "\t$dst, $a, $b, $acc", []>;
-
-// Signed/Unsigned saturate -- for disassembly only
-
-def t2SSATlsl : T2I<(outs GPR:$dst), (ins i32imm:$bit_pos,GPR:$a,i32imm:$shamt),
- NoItinerary, "ssat", "\t$dst, $bit_pos, $a, lsl $shamt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{25-22} = 0b1100;
- let Inst{20} = 0;
- let Inst{15} = 0;
- let Inst{21} = 0; // sh = '0'
-}
-
-def t2SSATasr : T2I<(outs GPR:$dst), (ins i32imm:$bit_pos,GPR:$a,i32imm:$shamt),
- NoItinerary, "ssat", "\t$dst, $bit_pos, $a, asr $shamt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{25-22} = 0b1100;
- let Inst{20} = 0;
- let Inst{15} = 0;
- let Inst{21} = 1; // sh = '1'
-}
-
-def t2SSAT16 : T2I<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a), NoItinerary,
- "ssat16", "\t$dst, $bit_pos, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{25-22} = 0b1100;
- let Inst{20} = 0;
- let Inst{15} = 0;
- let Inst{21} = 1; // sh = '1'
- let Inst{14-12} = 0b000; // imm3 = '000'
- let Inst{7-6} = 0b00; // imm2 = '00'
-}
-
-def t2USATlsl : T2I<(outs GPR:$dst), (ins i32imm:$bit_pos,GPR:$a,i32imm:$shamt),
- NoItinerary, "usat", "\t$dst, $bit_pos, $a, lsl $shamt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{25-22} = 0b1110;
- let Inst{20} = 0;
- let Inst{15} = 0;
- let Inst{21} = 0; // sh = '0'
-}
-
-def t2USATasr : T2I<(outs GPR:$dst), (ins i32imm:$bit_pos,GPR:$a,i32imm:$shamt),
- NoItinerary, "usat", "\t$dst, $bit_pos, $a, asr $shamt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{25-22} = 0b1110;
- let Inst{20} = 0;
- let Inst{15} = 0;
- let Inst{21} = 1; // sh = '1'
-}
-
-def t2USAT16 : T2I<(outs GPR:$dst), (ins i32imm:$bit_pos, GPR:$a), NoItinerary,
- "usat16", "\t$dst, $bit_pos, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{25-22} = 0b1110;
- let Inst{20} = 0;
- let Inst{15} = 0;
- let Inst{21} = 1; // sh = '1'
- let Inst{14-12} = 0b000; // imm3 = '000'
- let Inst{7-6} = 0b00; // imm2 = '00'
-}
-
-//===----------------------------------------------------------------------===//
-// Shift and rotate Instructions.
-//
-
-defm t2LSL : T2I_sh_ir<0b00, "lsl", BinOpFrag<(shl node:$LHS, node:$RHS)>>;
-defm t2LSR : T2I_sh_ir<0b01, "lsr", BinOpFrag<(srl node:$LHS, node:$RHS)>>;
-defm t2ASR : T2I_sh_ir<0b10, "asr", BinOpFrag<(sra node:$LHS, node:$RHS)>>;
-defm t2ROR : T2I_sh_ir<0b11, "ror", BinOpFrag<(rotr node:$LHS, node:$RHS)>>;
-
-let Uses = [CPSR] in {
-def t2MOVrx : T2sI<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
- "rrx", "\t$dst, $src",
- [(set GPR:$dst, (ARMrrx GPR:$src))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = 0b0010;
- let Inst{20} = ?; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{14-12} = 0b000;
- let Inst{7-4} = 0b0011;
-}
-}
-
-let Defs = [CPSR] in {
-def t2MOVsrl_flag : T2XI<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
- "lsrs.w\t$dst, $src, #1",
- [(set GPR:$dst, (ARMsrl_flag GPR:$src))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = 0b0010;
- let Inst{20} = 1; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{5-4} = 0b01; // Shift type.
- // Shift amount = Inst{14-12:7-6} = 1.
- let Inst{14-12} = 0b000;
- let Inst{7-6} = 0b01;
-}
-def t2MOVsra_flag : T2XI<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi,
- "asrs.w\t$dst, $src, #1",
- [(set GPR:$dst, (ARMsra_flag GPR:$src))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = 0b0010;
- let Inst{20} = 1; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{5-4} = 0b10; // Shift type.
- // Shift amount = Inst{14-12:7-6} = 1.
- let Inst{14-12} = 0b000;
- let Inst{7-6} = 0b01;
-}
-}
-
-//===----------------------------------------------------------------------===//
-// Bitwise Instructions.
-//
-
-defm t2AND : T2I_bin_w_irs<0b0000, "and",
- BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
-defm t2ORR : T2I_bin_w_irs<0b0010, "orr",
- BinOpFrag<(or node:$LHS, node:$RHS)>, 1>;
-defm t2EOR : T2I_bin_w_irs<0b0100, "eor",
- BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
-
-defm t2BIC : T2I_bin_w_irs<0b0001, "bic",
- BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
-
-let Constraints = "$src = $dst" in
-def t2BFC : T2I<(outs GPR:$dst), (ins GPR:$src, bf_inv_mask_imm:$imm),
- IIC_iUNAsi, "bfc", "\t$dst, $imm",
- [(set GPR:$dst, (and GPR:$src, bf_inv_mask_imm:$imm))]> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-20} = 0b10110;
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15} = 0;
-}
-
-def t2SBFX : T2I<(outs GPR:$dst), (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
- IIC_iALUi, "sbfx", "\t$dst, $src, $lsb, $width", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-20} = 0b10100;
- let Inst{15} = 0;
-}
-
-def t2UBFX : T2I<(outs GPR:$dst), (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
- IIC_iALUi, "ubfx", "\t$dst, $src, $lsb, $width", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-20} = 0b11100;
- let Inst{15} = 0;
-}
-
-// A8.6.18 BFI - Bitfield insert (Encoding T1)
-// Added for disassembler with the pattern field purposely left blank.
-// FIXME: Utilize this instruction in codgen.
-def t2BFI : T2I<(outs GPR:$dst), (ins GPR:$src, imm0_31:$lsb, imm0_31:$width),
- IIC_iALUi, "bfi", "\t$dst, $src, $lsb, $width", []> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 1;
- let Inst{24-20} = 0b10110;
- let Inst{15} = 0;
-}
-
-defm t2ORN : T2I_bin_irs<0b0011, "orn", BinOpFrag<(or node:$LHS,
- (not node:$RHS))>>;
-
-// Prefer over of t2EORri ra, rb, -1 because mvn has 16-bit version
-let AddedComplexity = 1 in
-defm t2MVN : T2I_un_irs <0b0011, "mvn", UnOpFrag<(not node:$Src)>, 1, 1>;
-
-
-def : T2Pat<(and GPR:$src, t2_so_imm_not:$imm),
- (t2BICri GPR:$src, t2_so_imm_not:$imm)>;
-
-// FIXME: Disable this pattern on Darwin to workaround an assembler bug.
-def : T2Pat<(or GPR:$src, t2_so_imm_not:$imm),
- (t2ORNri GPR:$src, t2_so_imm_not:$imm)>,
- Requires<[IsThumb2]>;
-
-def : T2Pat<(t2_so_imm_not:$src),
- (t2MVNi t2_so_imm_not:$src)>;
-
-//===----------------------------------------------------------------------===//
-// Multiply Instructions.
-//
-let isCommutable = 1 in
-def t2MUL: T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32,
- "mul", "\t$dst, $a, $b",
- [(set GPR:$dst, (mul GPR:$a, GPR:$b))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b000;
- let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate)
- let Inst{7-4} = 0b0000; // Multiply
-}
-
-def t2MLA: T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32,
- "mla", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (add (mul GPR:$a, GPR:$b), GPR:$c))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b000;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-4} = 0b0000; // Multiply
-}
-
-def t2MLS: T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32,
- "mls", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (sub GPR:$c, (mul GPR:$a, GPR:$b)))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b000;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-4} = 0b0001; // Multiply and Subtract
-}
-
-// Extra precision multiplies with low / high results
-let neverHasSideEffects = 1 in {
-let isCommutable = 1 in {
-def t2SMULL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMUL64,
- "smull", "\t$ldst, $hdst, $a, $b", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b000;
- let Inst{7-4} = 0b0000;
-}
-
-def t2UMULL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMUL64,
- "umull", "\t$ldst, $hdst, $a, $b", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b010;
- let Inst{7-4} = 0b0000;
-}
-} // isCommutable
-
-// Multiply + accumulate
-def t2SMLAL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMAC64,
- "smlal", "\t$ldst, $hdst, $a, $b", []>{
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b100;
- let Inst{7-4} = 0b0000;
-}
-
-def t2UMLAL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMAC64,
- "umlal", "\t$ldst, $hdst, $a, $b", []>{
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b110;
- let Inst{7-4} = 0b0000;
-}
-
-def t2UMAAL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMAC64,
- "umaal", "\t$ldst, $hdst, $a, $b", []>{
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0111;
- let Inst{22-20} = 0b110;
- let Inst{7-4} = 0b0110;
-}
-} // neverHasSideEffects
-
-// Rounding variants of the below included for disassembly only
-
-// Most significant word multiply
-def t2SMMUL : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32,
- "smmul", "\t$dst, $a, $b",
- [(set GPR:$dst, (mulhs GPR:$a, GPR:$b))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b101;
- let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate)
- let Inst{7-4} = 0b0000; // No Rounding (Inst{4} = 0)
-}
-
-def t2SMMULR : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32,
- "smmulr", "\t$dst, $a, $b", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b101;
- let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate)
- let Inst{7-4} = 0b0001; // Rounding (Inst{4} = 1)
-}
-
-def t2SMMLA : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32,
- "smmla", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (add (mulhs GPR:$a, GPR:$b), GPR:$c))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b101;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-4} = 0b0000; // No Rounding (Inst{4} = 0)
-}
-
-def t2SMMLAR : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32,
- "smmlar", "\t$dst, $a, $b, $c", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b101;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-4} = 0b0001; // Rounding (Inst{4} = 1)
-}
-
-def t2SMMLS : T2I <(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32,
- "smmls", "\t$dst, $a, $b, $c",
- [(set GPR:$dst, (sub GPR:$c, (mulhs GPR:$a, GPR:$b)))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b110;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-4} = 0b0000; // No Rounding (Inst{4} = 0)
-}
-
-def t2SMMLSR : T2I <(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32,
- "smmlsr", "\t$dst, $a, $b, $c", []> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b110;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-4} = 0b0001; // Rounding (Inst{4} = 1)
-}
-
-multiclass T2I_smul<string opc, PatFrag opnode> {
- def BB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32,
- !strconcat(opc, "bb"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16),
- (sext_inreg GPR:$b, i16)))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b001;
- let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate)
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b00;
- }
-
- def BT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32,
- !strconcat(opc, "bt"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16),
- (sra GPR:$b, (i32 16))))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b001;
- let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate)
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b01;
- }
-
- def TB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32,
- !strconcat(opc, "tb"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sra GPR:$a, (i32 16)),
- (sext_inreg GPR:$b, i16)))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b001;
- let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate)
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b10;
- }
-
- def TT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32,
- !strconcat(opc, "tt"), "\t$dst, $a, $b",
- [(set GPR:$dst, (opnode (sra GPR:$a, (i32 16)),
- (sra GPR:$b, (i32 16))))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b001;
- let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate)
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b11;
- }
-
- def WB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL16,
- !strconcat(opc, "wb"), "\t$dst, $a, $b",
- [(set GPR:$dst, (sra (opnode GPR:$a,
- (sext_inreg GPR:$b, i16)), (i32 16)))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b011;
- let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate)
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b00;
- }
-
- def WT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL16,
- !strconcat(opc, "wt"), "\t$dst, $a, $b",
- [(set GPR:$dst, (sra (opnode GPR:$a,
- (sra GPR:$b, (i32 16))), (i32 16)))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b011;
- let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate)
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b01;
- }
-}
-
-
-multiclass T2I_smla<string opc, PatFrag opnode> {
- def BB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16,
- !strconcat(opc, "bb"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc,
- (opnode (sext_inreg GPR:$a, i16),
- (sext_inreg GPR:$b, i16))))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b001;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b00;
- }
-
- def BT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16,
- !strconcat(opc, "bt"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (opnode (sext_inreg GPR:$a, i16),
- (sra GPR:$b, (i32 16)))))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b001;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b01;
- }
-
- def TB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16,
- !strconcat(opc, "tb"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, (i32 16)),
- (sext_inreg GPR:$b, i16))))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b001;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b10;
- }
-
- def TT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16,
- !strconcat(opc, "tt"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, (i32 16)),
- (sra GPR:$b, (i32 16)))))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b001;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b11;
- }
-
- def WB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16,
- !strconcat(opc, "wb"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a,
- (sext_inreg GPR:$b, i16)), (i32 16))))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b011;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b00;
- }
-
- def WT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16,
- !strconcat(opc, "wt"), "\t$dst, $a, $b, $acc",
- [(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a,
- (sra GPR:$b, (i32 16))), (i32 16))))]> {
- let Inst{31-27} = 0b11111;
- let Inst{26-23} = 0b0110;
- let Inst{22-20} = 0b011;
- let Inst{15-12} = {?, ?, ?, ?}; // Ra
- let Inst{7-6} = 0b00;
- let Inst{5-4} = 0b01;
- }
-}
-
-defm t2SMUL : T2I_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
-defm t2SMLA : T2I_smla<"smla", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
-
-// Halfword multiple accumulate long: SMLAL<x><y> -- for disassembly only
-def t2SMLALBB : T2I_mac<1, 0b100, 0b1000, (outs GPR:$ldst,GPR:$hdst),
- (ins GPR:$a,GPR:$b), IIC_iMAC64, "smlalbb", "\t$ldst, $hdst, $a, $b",
- [/* For disassembly only; pattern left blank */]>;
-def t2SMLALBT : T2I_mac<1, 0b100, 0b1001, (outs GPR:$ldst,GPR:$hdst),
- (ins GPR:$a,GPR:$b), IIC_iMAC64, "smlalbt", "\t$ldst, $hdst, $a, $b",
- [/* For disassembly only; pattern left blank */]>;
-def t2SMLALTB : T2I_mac<1, 0b100, 0b1010, (outs GPR:$ldst,GPR:$hdst),
- (ins GPR:$a,GPR:$b), IIC_iMAC64, "smlaltb", "\t$ldst, $hdst, $a, $b",
- [/* For disassembly only; pattern left blank */]>;
-def t2SMLALTT : T2I_mac<1, 0b100, 0b1011, (outs GPR:$ldst,GPR:$hdst),
- (ins GPR:$a,GPR:$b), IIC_iMAC64, "smlaltt", "\t$ldst, $hdst, $a, $b",
- [/* For disassembly only; pattern left blank */]>;
-
-// Dual halfword multiple: SMUAD, SMUSD, SMLAD, SMLSD, SMLALD, SMLSLD
-// These are for disassembly only.
-
-def t2SMUAD : T2I_mac<0, 0b010, 0b0000, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMAC32, "smuad", "\t$dst, $a, $b", []> {
- let Inst{15-12} = 0b1111;
-}
-def t2SMUADX : T2I_mac<0, 0b010, 0b0001, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMAC32, "smuadx", "\t$dst, $a, $b", []> {
- let Inst{15-12} = 0b1111;
-}
-def t2SMUSD : T2I_mac<0, 0b100, 0b0000, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMAC32, "smusd", "\t$dst, $a, $b", []> {
- let Inst{15-12} = 0b1111;
-}
-def t2SMUSDX : T2I_mac<0, 0b100, 0b0001, (outs GPR:$dst), (ins GPR:$a, GPR:$b),
- IIC_iMAC32, "smusdx", "\t$dst, $a, $b", []> {
- let Inst{15-12} = 0b1111;
-}
-def t2SMLAD : T2I_mac<0, 0b010, 0b0000, (outs GPR:$dst),
- (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC32, "smlad",
- "\t$dst, $a, $b, $acc", []>;
-def t2SMLADX : T2I_mac<0, 0b010, 0b0001, (outs GPR:$dst),
- (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC32, "smladx",
- "\t$dst, $a, $b, $acc", []>;
-def t2SMLSD : T2I_mac<0, 0b100, 0b0000, (outs GPR:$dst),
- (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC32, "smlsd",
- "\t$dst, $a, $b, $acc", []>;
-def t2SMLSDX : T2I_mac<0, 0b100, 0b0001, (outs GPR:$dst),
- (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC32, "smlsdx",
- "\t$dst, $a, $b, $acc", []>;
-def t2SMLALD : T2I_mac<1, 0b100, 0b1100, (outs GPR:$ldst,GPR:$hdst),
- (ins GPR:$a,GPR:$b), IIC_iMAC64, "smlald",
- "\t$ldst, $hdst, $a, $b", []>;
-def t2SMLALDX : T2I_mac<1, 0b100, 0b1101, (outs GPR:$ldst,GPR:$hdst),
- (ins GPR:$a,GPR:$b), IIC_iMAC64, "smlaldx",
- "\t$ldst, $hdst, $a, $b", []>;
-def t2SMLSLD : T2I_mac<1, 0b101, 0b1100, (outs GPR:$ldst,GPR:$hdst),
- (ins GPR:$a,GPR:$b), IIC_iMAC64, "smlsld",
- "\t$ldst, $hdst, $a, $b", []>;
-def t2SMLSLDX : T2I_mac<1, 0b101, 0b1101, (outs GPR:$ldst,GPR:$hdst),
- (ins GPR:$a,GPR:$b), IIC_iMAC64, "smlsldx",
- "\t$ldst, $hdst, $a, $b", []>;
-
-//===----------------------------------------------------------------------===//
-// Misc. Arithmetic Instructions.
-//
-
-class T2I_misc<bits<2> op1, bits<2> op2, dag oops, dag iops,
- InstrItinClass itin, string opc, string asm, list<dag> pattern>
- : T2I<oops, iops, itin, opc, asm, pattern> {
- let Inst{31-27} = 0b11111;
- let Inst{26-22} = 0b01010;
- let Inst{21-20} = op1;
- let Inst{15-12} = 0b1111;
- let Inst{7-6} = 0b10;
- let Inst{5-4} = op2;
-}
-
-def t2CLZ : T2I_misc<0b11, 0b00, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "clz", "\t$dst, $src", [(set GPR:$dst, (ctlz GPR:$src))]>;
-
-def t2RBIT : T2I_misc<0b01, 0b10, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "rbit", "\t$dst, $src",
- [(set GPR:$dst, (ARMrbit GPR:$src))]>;
-
-def t2REV : T2I_misc<0b01, 0b00, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "rev", ".w\t$dst, $src", [(set GPR:$dst, (bswap GPR:$src))]>;
-
-def t2REV16 : T2I_misc<0b01, 0b01, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "rev16", ".w\t$dst, $src",
- [(set GPR:$dst,
- (or (and (srl GPR:$src, (i32 8)), 0xFF),
- (or (and (shl GPR:$src, (i32 8)), 0xFF00),
- (or (and (srl GPR:$src, (i32 8)), 0xFF0000),
- (and (shl GPR:$src, (i32 8)), 0xFF000000)))))]>;
-
-def t2REVSH : T2I_misc<0b01, 0b11, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
- "revsh", ".w\t$dst, $src",
- [(set GPR:$dst,
- (sext_inreg
- (or (srl (and GPR:$src, 0xFF00), (i32 8)),
- (shl GPR:$src, (i32 8))), i16))]>;
-
-def t2PKHBT : T2I<(outs GPR:$dst), (ins GPR:$src1, GPR:$src2, i32imm:$shamt),
- IIC_iALUsi, "pkhbt", "\t$dst, $src1, $src2, lsl $shamt",
- [(set GPR:$dst, (or (and GPR:$src1, 0xFFFF),
- (and (shl GPR:$src2, (i32 imm:$shamt)),
- 0xFFFF0000)))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-20} = 0b01100;
- let Inst{5} = 0; // BT form
- let Inst{4} = 0;
-}
-
-// Alternate cases for PKHBT where identities eliminate some nodes.
-def : T2Pat<(or (and GPR:$src1, 0xFFFF), (and GPR:$src2, 0xFFFF0000)),
- (t2PKHBT GPR:$src1, GPR:$src2, 0)>;
-def : T2Pat<(or (and GPR:$src1, 0xFFFF), (shl GPR:$src2, imm16_31:$shamt)),
- (t2PKHBT GPR:$src1, GPR:$src2, imm16_31:$shamt)>;
-
-def t2PKHTB : T2I<(outs GPR:$dst), (ins GPR:$src1, GPR:$src2, i32imm:$shamt),
- IIC_iALUsi, "pkhtb", "\t$dst, $src1, $src2, asr $shamt",
- [(set GPR:$dst, (or (and GPR:$src1, 0xFFFF0000),
- (and (sra GPR:$src2, imm16_31:$shamt),
- 0xFFFF)))]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-20} = 0b01100;
- let Inst{5} = 1; // TB form
- let Inst{4} = 0;
-}
-
-// Alternate cases for PKHTB where identities eliminate some nodes. Note that
-// a shift amount of 0 is *not legal* here, it is PKHBT instead.
-def : T2Pat<(or (and GPR:$src1, 0xFFFF0000), (srl GPR:$src2, (i32 16))),
- (t2PKHTB GPR:$src1, GPR:$src2, 16)>;
-def : T2Pat<(or (and GPR:$src1, 0xFFFF0000),
- (and (srl GPR:$src2, imm1_15:$shamt), 0xFFFF)),
- (t2PKHTB GPR:$src1, GPR:$src2, imm1_15:$shamt)>;
-
-//===----------------------------------------------------------------------===//
-// Comparison Instructions...
-//
-
-defm t2CMP : T2I_cmp_irs<0b1101, "cmp",
- BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>;
-defm t2CMPz : T2I_cmp_irs<0b1101, "cmp",
- BinOpFrag<(ARMcmpZ node:$LHS, node:$RHS)>>;
-
-//FIXME: Disable CMN, as CCodes are backwards from compare expectations
-// Compare-to-zero still works out, just not the relationals
-//defm t2CMN : T2I_cmp_irs<0b1000, "cmn",
-// BinOpFrag<(ARMcmp node:$LHS,(ineg node:$RHS))>>;
-defm t2CMNz : T2I_cmp_irs<0b1000, "cmn",
- BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>>;
-
-//def : T2Pat<(ARMcmp GPR:$src, t2_so_imm_neg:$imm),
-// (t2CMNri GPR:$src, t2_so_imm_neg:$imm)>;
-
-def : T2Pat<(ARMcmpZ GPR:$src, t2_so_imm_neg:$imm),
- (t2CMNzri GPR:$src, t2_so_imm_neg:$imm)>;
-
-defm t2TST : T2I_cmp_irs<0b0000, "tst",
- BinOpFrag<(ARMcmpZ (and node:$LHS, node:$RHS), 0)>>;
-defm t2TEQ : T2I_cmp_irs<0b0100, "teq",
- BinOpFrag<(ARMcmpZ (xor node:$LHS, node:$RHS), 0)>>;
-
-// A8.6.27 CBNZ, CBZ - Compare and branch on (non)zero.
-// Short range conditional branch. Looks awesome for loops. Need to figure
-// out how to use this one.
-
-
-// Conditional moves
-// FIXME: should be able to write a pattern for ARMcmov, but can't use
-// a two-value operand where a dag node expects two operands. :(
-def t2MOVCCr : T2I<(outs GPR:$dst), (ins GPR:$false, GPR:$true), IIC_iCMOVr,
- "mov", ".w\t$dst, $true",
- [/*(set GPR:$dst, (ARMcmov GPR:$false, GPR:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst"> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = 0b0010;
- let Inst{20} = 0; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{14-12} = 0b000;
- let Inst{7-4} = 0b0000;
-}
-
-def t2MOVCCi : T2I<(outs GPR:$dst), (ins GPR:$false, t2_so_imm:$true),
- IIC_iCMOVi, "mov", ".w\t$dst, $true",
-[/*(set GPR:$dst, (ARMcmov GPR:$false, t2_so_imm:$true, imm:$cc, CCR:$ccr))*/]>,
- RegConstraint<"$false = $dst"> {
- let Inst{31-27} = 0b11110;
- let Inst{25} = 0;
- let Inst{24-21} = 0b0010;
- let Inst{20} = 0; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{15} = 0;
-}
-
-class T2I_movcc_sh<bits<2> opcod, dag oops, dag iops, InstrItinClass itin,
- string opc, string asm, list<dag> pattern>
- : T2I<oops, iops, itin, opc, asm, pattern> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b01;
- let Inst{24-21} = 0b0010;
- let Inst{20} = 0; // The S bit.
- let Inst{19-16} = 0b1111; // Rn
- let Inst{5-4} = opcod; // Shift type.
-}
-def t2MOVCClsl : T2I_movcc_sh<0b00, (outs GPR:$dst),
- (ins GPR:$false, GPR:$true, i32imm:$rhs),
- IIC_iCMOVsi, "lsl", ".w\t$dst, $true, $rhs", []>,
- RegConstraint<"$false = $dst">;
-def t2MOVCClsr : T2I_movcc_sh<0b01, (outs GPR:$dst),
- (ins GPR:$false, GPR:$true, i32imm:$rhs),
- IIC_iCMOVsi, "lsr", ".w\t$dst, $true, $rhs", []>,
- RegConstraint<"$false = $dst">;
-def t2MOVCCasr : T2I_movcc_sh<0b10, (outs GPR:$dst),
- (ins GPR:$false, GPR:$true, i32imm:$rhs),
- IIC_iCMOVsi, "asr", ".w\t$dst, $true, $rhs", []>,
- RegConstraint<"$false = $dst">;
-def t2MOVCCror : T2I_movcc_sh<0b11, (outs GPR:$dst),
- (ins GPR:$false, GPR:$true, i32imm:$rhs),
- IIC_iCMOVsi, "ror", ".w\t$dst, $true, $rhs", []>,
- RegConstraint<"$false = $dst">;
-
-//===----------------------------------------------------------------------===//
-// Atomic operations intrinsics
-//
-
-// memory barriers protect the atomic sequences
-let hasSideEffects = 1 in {
-def t2Int_MemBarrierV7 : AInoP<(outs), (ins),
- Pseudo, NoItinerary,
- "dmb", "",
- [(ARMMemBarrierV7)]>,
- Requires<[IsThumb2]> {
- let Inst{31-4} = 0xF3BF8F5;
- // FIXME: add support for options other than a full system DMB
- let Inst{3-0} = 0b1111;
-}
-
-def t2Int_SyncBarrierV7 : AInoP<(outs), (ins),
- Pseudo, NoItinerary,
- "dsb", "",
- [(ARMSyncBarrierV7)]>,
- Requires<[IsThumb2]> {
- let Inst{31-4} = 0xF3BF8F4;
- // FIXME: add support for options other than a full system DSB
- let Inst{3-0} = 0b1111;
-}
-}
-
-// Helper class for multiclass T2MemB -- for disassembly only
-class T2I_memb<string opc, string asm>
- : T2I<(outs), (ins), NoItinerary, opc, asm,
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsThumb2, HasV7]> {
- let Inst{31-20} = 0xf3b;
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
-}
-
-multiclass T2MemB<bits<4> op7_4, string opc> {
-
- def st : T2I_memb<opc, "\tst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b1110;
- }
-
- def ish : T2I_memb<opc, "\tish"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b1011;
- }
-
- def ishst : T2I_memb<opc, "\tishst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b1010;
- }
-
- def nsh : T2I_memb<opc, "\tnsh"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0111;
- }
-
- def nshst : T2I_memb<opc, "\tnshst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0110;
- }
-
- def osh : T2I_memb<opc, "\tosh"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0011;
- }
-
- def oshst : T2I_memb<opc, "\toshst"> {
- let Inst{7-4} = op7_4;
- let Inst{3-0} = 0b0010;
- }
-}
-
-// These DMB variants are for disassembly only.
-defm t2DMB : T2MemB<0b0101, "dmb">;
-
-// These DSB variants are for disassembly only.
-defm t2DSB : T2MemB<0b0100, "dsb">;
-
-// ISB has only full system option -- for disassembly only
-def t2ISBsy : T2I_memb<"isb", ""> {
- let Inst{7-4} = 0b0110;
- let Inst{3-0} = 0b1111;
-}
-
-class T2I_ldrex<bits<2> opcod, dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin, string opc, string asm, string cstr,
- list<dag> pattern, bits<4> rt2 = 0b1111>
- : Thumb2I<oops, iops, am, sz, itin, opc, asm, cstr, pattern> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001101;
- let Inst{11-8} = rt2;
- let Inst{7-6} = 0b01;
- let Inst{5-4} = opcod;
- let Inst{3-0} = 0b1111;
-}
-class T2I_strex<bits<2> opcod, dag oops, dag iops, AddrMode am, SizeFlagVal sz,
- InstrItinClass itin, string opc, string asm, string cstr,
- list<dag> pattern, bits<4> rt2 = 0b1111>
- : Thumb2I<oops, iops, am, sz, itin, opc, asm, cstr, pattern> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001100;
- let Inst{11-8} = rt2;
- let Inst{7-6} = 0b01;
- let Inst{5-4} = opcod;
-}
-
-let mayLoad = 1 in {
-def t2LDREXB : T2I_ldrex<0b00, (outs GPR:$dest), (ins GPR:$ptr), AddrModeNone,
- Size4Bytes, NoItinerary, "ldrexb", "\t$dest, [$ptr]",
- "", []>;
-def t2LDREXH : T2I_ldrex<0b01, (outs GPR:$dest), (ins GPR:$ptr), AddrModeNone,
- Size4Bytes, NoItinerary, "ldrexh", "\t$dest, [$ptr]",
- "", []>;
-def t2LDREX : Thumb2I<(outs GPR:$dest), (ins GPR:$ptr), AddrModeNone,
- Size4Bytes, NoItinerary,
- "ldrex", "\t$dest, [$ptr]", "",
- []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000101;
- let Inst{11-8} = 0b1111;
- let Inst{7-0} = 0b00000000; // imm8 = 0
-}
-def t2LDREXD : T2I_ldrex<0b11, (outs GPR:$dest, GPR:$dest2), (ins GPR:$ptr),
- AddrModeNone, Size4Bytes, NoItinerary,
- "ldrexd", "\t$dest, $dest2, [$ptr]", "",
- [], {?, ?, ?, ?}>;
-}
-
-let mayStore = 1, Constraints = "@earlyclobber $success" in {
-def t2STREXB : T2I_strex<0b00, (outs GPR:$success), (ins GPR:$src, GPR:$ptr),
- AddrModeNone, Size4Bytes, NoItinerary,
- "strexb", "\t$success, $src, [$ptr]", "", []>;
-def t2STREXH : T2I_strex<0b01, (outs GPR:$success), (ins GPR:$src, GPR:$ptr),
- AddrModeNone, Size4Bytes, NoItinerary,
- "strexh", "\t$success, $src, [$ptr]", "", []>;
-def t2STREX : Thumb2I<(outs GPR:$success), (ins GPR:$src, GPR:$ptr),
- AddrModeNone, Size4Bytes, NoItinerary,
- "strex", "\t$success, $src, [$ptr]", "",
- []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000100;
- let Inst{7-0} = 0b00000000; // imm8 = 0
-}
-def t2STREXD : T2I_strex<0b11, (outs GPR:$success),
- (ins GPR:$src, GPR:$src2, GPR:$ptr),
- AddrModeNone, Size4Bytes, NoItinerary,
- "strexd", "\t$success, $src, $src2, [$ptr]", "", [],
- {?, ?, ?, ?}>;
-}
-
-// Clear-Exclusive is for disassembly only.
-def t2CLREX : T2I<(outs), (ins), NoItinerary, "clrex", "",
- [/* For disassembly only; pattern left blank */]>,
- Requires<[IsARM, HasV7]> {
- let Inst{31-20} = 0xf3b;
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
- let Inst{7-4} = 0b0010;
-}
-
-//===----------------------------------------------------------------------===//
-// TLS Instructions
-//
-
-// __aeabi_read_tp preserves the registers r1-r3.
-let isCall = 1,
- Defs = [R0, R12, LR, CPSR] in {
- def t2TPsoft : T2XI<(outs), (ins), IIC_Br,
- "bl\t__aeabi_read_tp",
- [(set R0, ARMthread_pointer)]> {
- let Inst{31-27} = 0b11110;
- let Inst{15-14} = 0b11;
- let Inst{12} = 1;
- }
-}
-
-//===----------------------------------------------------------------------===//
-// SJLJ Exception handling intrinsics
-// eh_sjlj_setjmp() is an instruction sequence to store the return
-// address and save #0 in R0 for the non-longjmp case.
-// Since by its nature we may be coming from some other function to get
-// here, and we're using the stack frame for the containing function to
-// save/restore registers, we can't keep anything live in regs across
-// the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon
-// when we get here from a longjmp(). We force everthing out of registers
-// except for our own input by listing the relevant registers in Defs. By
-// doing so, we also cause the prologue/epilogue code to actively preserve
-// all of the callee-saved resgisters, which is exactly what we want.
-// The current SP is passed in $val, and we reuse the reg as a scratch.
-let Defs =
- [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, D0,
- D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
- D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30,
- D31 ] in {
- def t2Int_eh_sjlj_setjmp : Thumb2XI<(outs), (ins GPR:$src, tGPR:$val),
- AddrModeNone, SizeSpecial, NoItinerary,
- "str\t$val, [$src, #8]\t@ begin eh.setjmp\n"
- "\tmov\t$val, pc\n"
- "\tadds\t$val, #9\n"
- "\tstr\t$val, [$src, #4]\n"
- "\tmovs\tr0, #0\n"
- "\tb\t1f\n"
- "\tmovs\tr0, #1\t@ end eh.setjmp\n"
- "1:", "",
- [(set R0, (ARMeh_sjlj_setjmp GPR:$src, tGPR:$val))]>;
-}
-
-
-
-//===----------------------------------------------------------------------===//
-// Control-Flow Instructions
-//
-
-// FIXME: remove when we have a way to marking a MI with these properties.
-// FIXME: $dst1 should be a def. But the extra ops must be in the end of the
-// operand list.
-// FIXME: Should pc be an implicit operand like PICADD, etc?
-let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
- hasExtraDefRegAllocReq = 1 in
- def t2LDM_RET : T2XI<(outs),
- (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops),
- IIC_Br, "ldm${addr:submode}${p}${addr:wide}\t$addr, $wb",
- []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-25} = 0b00;
- let Inst{24-23} = {?, ?}; // IA: '01', DB: '10'
- let Inst{22} = 0;
- let Inst{21} = ?; // The W bit.
- let Inst{20} = 1; // Load
-}
-
-let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
-let isPredicable = 1 in
-def t2B : T2XI<(outs), (ins brtarget:$target), IIC_Br,
- "b.w\t$target",
- [(br bb:$target)]> {
- let Inst{31-27} = 0b11110;
- let Inst{15-14} = 0b10;
- let Inst{12} = 1;
-}
-
-let isNotDuplicable = 1, isIndirectBranch = 1 in {
-def t2BR_JT :
- T2JTI<(outs),
- (ins GPR:$target, GPR:$index, jt2block_operand:$jt, i32imm:$id),
- IIC_Br, "mov\tpc, $target\n$jt",
- [(ARMbr2jt GPR:$target, GPR:$index, tjumptable:$jt, imm:$id)]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0100100;
- let Inst{19-16} = 0b1111;
- let Inst{14-12} = 0b000;
- let Inst{11-8} = 0b1111; // Rd = pc
- let Inst{7-4} = 0b0000;
-}
-
-// FIXME: Add a non-pc based case that can be predicated.
-def t2TBB :
- T2JTI<(outs),
- (ins tb_addrmode:$index, jt2block_operand:$jt, i32imm:$id),
- IIC_Br, "tbb\t$index\n$jt", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001101;
- let Inst{19-16} = 0b1111; // Rn = pc (table follows this instruction)
- let Inst{15-8} = 0b11110000;
- let Inst{7-4} = 0b0000; // B form
-}
-
-def t2TBH :
- T2JTI<(outs),
- (ins tb_addrmode:$index, jt2block_operand:$jt, i32imm:$id),
- IIC_Br, "tbh\t$index\n$jt", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001101;
- let Inst{19-16} = 0b1111; // Rn = pc (table follows this instruction)
- let Inst{15-8} = 0b11110000;
- let Inst{7-4} = 0b0001; // H form
-}
-
-// Generic versions of the above two instructions, for disassembly only
-
-def t2TBBgen : T2I<(outs), (ins GPR:$a, GPR:$b), IIC_Br,
- "tbb", "\t[$a, $b]", []>{
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001101;
- let Inst{15-8} = 0b11110000;
- let Inst{7-4} = 0b0000; // B form
-}
-
-def t2TBHgen : T2I<(outs), (ins GPR:$a, GPR:$b), IIC_Br,
- "tbh", "\t[$a, $b, lsl #1]", []> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0001101;
- let Inst{15-8} = 0b11110000;
- let Inst{7-4} = 0b0001; // H form
-}
-} // isNotDuplicable, isIndirectBranch
-
-} // isBranch, isTerminator, isBarrier
-
-// FIXME: should be able to write a pattern for ARMBrcond, but can't use
-// a two-value operand where a dag node expects two operands. :(
-let isBranch = 1, isTerminator = 1 in
-def t2Bcc : T2I<(outs), (ins brtarget:$target), IIC_Br,
- "b", ".w\t$target",
- [/*(ARMbrcond bb:$target, imm:$cc)*/]> {
- let Inst{31-27} = 0b11110;
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
-}
-
-
-// IT block
-def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask),
- AddrModeNone, Size2Bytes, IIC_iALUx,
- "it$mask\t$cc", "", []> {
- // 16-bit instruction.
- let Inst{31-16} = 0x0000;
- let Inst{15-8} = 0b10111111;
-}
-
-// Branch and Exchange Jazelle -- for disassembly only
-// Rm = Inst{19-16}
-def t2BXJ : T2I<(outs), (ins GPR:$func), NoItinerary, "bxj", "\t$func",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-20} = 0b111100;
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
-}
-
-// Change Processor State is a system instruction -- for disassembly only.
-// The singleton $opt operand contains the following information:
-// opt{4-0} = mode from Inst{4-0}
-// opt{5} = changemode from Inst{17}
-// opt{8-6} = AIF from Inst{8-6}
-// opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
-def t2CPS : T2XI<(outs),(ins i32imm:$opt), NoItinerary, "cps${opt:cps}",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-20} = 0b111010;
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
-}
-
-// A6.3.4 Branches and miscellaneous control
-// Table A6-14 Change Processor State, and hint instructions
-// Helper class for disassembly only.
-class T2I_hint<bits<8> op7_0, string opc, string asm>
- : T2I<(outs), (ins), NoItinerary, opc, asm,
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-20} = 0xf3a;
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
- let Inst{10-8} = 0b000;
- let Inst{7-0} = op7_0;
-}
-
-def t2NOP : T2I_hint<0b00000000, "nop", ".w">;
-def t2YIELD : T2I_hint<0b00000001, "yield", ".w">;
-def t2WFE : T2I_hint<0b00000010, "wfe", ".w">;
-def t2WFI : T2I_hint<0b00000011, "wfi", ".w">;
-def t2SEV : T2I_hint<0b00000100, "sev", ".w">;
-
-def t2DBG : T2I<(outs),(ins i32imm:$opt), NoItinerary, "dbg", "\t$opt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-20} = 0xf3a;
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
- let Inst{10-8} = 0b000;
- let Inst{7-4} = 0b1111;
-}
-
-// Secure Monitor Call is a system instruction -- for disassembly only
-// Option = Inst{19-16}
-def t2SMC : T2I<(outs), (ins i32imm:$opt), NoItinerary, "smc", "\t$opt",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26-20} = 0b1111111;
- let Inst{15-12} = 0b1000;
-}
-
-// Store Return State is a system instruction -- for disassembly only
-def t2SRSDBW : T2I<(outs),(ins i32imm:$mode),NoItinerary,"srsdb","\tsp!, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000010; // W = 1
-}
-
-def t2SRSDB : T2I<(outs),(ins i32imm:$mode),NoItinerary,"srsdb","\tsp, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000000; // W = 0
-}
-
-def t2SRSIAW : T2I<(outs),(ins i32imm:$mode),NoItinerary,"srsia","\tsp!, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0011010; // W = 1
-}
-
-def t2SRSIA : T2I<(outs), (ins i32imm:$mode),NoItinerary,"srsia","\tsp, $mode",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0011000; // W = 0
-}
-
-// Return From Exception is a system instruction -- for disassembly only
-def t2RFEDBW : T2I<(outs), (ins GPR:$base), NoItinerary, "rfedb", "\t$base!",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000011; // W = 1
-}
-
-def t2RFEDB : T2I<(outs), (ins GPR:$base), NoItinerary, "rfeab", "\t$base",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0000001; // W = 0
-}
-
-def t2RFEIAW : T2I<(outs), (ins GPR:$base), NoItinerary, "rfeia", "\t$base!",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0011011; // W = 1
-}
-
-def t2RFEIA : T2I<(outs), (ins GPR:$base), NoItinerary, "rfeia", "\t$base",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11101;
- let Inst{26-20} = 0b0011001; // W = 0
-}
-
-//===----------------------------------------------------------------------===//
-// Non-Instruction Patterns
-//
-
-// Two piece so_imms.
-def : T2Pat<(or GPR:$LHS, t2_so_imm2part:$RHS),
- (t2ORRri (t2ORRri GPR:$LHS, (t2_so_imm2part_1 imm:$RHS)),
- (t2_so_imm2part_2 imm:$RHS))>;
-def : T2Pat<(xor GPR:$LHS, t2_so_imm2part:$RHS),
- (t2EORri (t2EORri GPR:$LHS, (t2_so_imm2part_1 imm:$RHS)),
- (t2_so_imm2part_2 imm:$RHS))>;
-def : T2Pat<(add GPR:$LHS, t2_so_imm2part:$RHS),
- (t2ADDri (t2ADDri GPR:$LHS, (t2_so_imm2part_1 imm:$RHS)),
- (t2_so_imm2part_2 imm:$RHS))>;
-def : T2Pat<(add GPR:$LHS, t2_so_neg_imm2part:$RHS),
- (t2SUBri (t2SUBri GPR:$LHS, (t2_so_neg_imm2part_1 imm:$RHS)),
- (t2_so_neg_imm2part_2 imm:$RHS))>;
-
-// 32-bit immediate using movw + movt.
-// This is a single pseudo instruction to make it re-materializable. Remove
-// when we can do generalized remat.
-let isReMaterializable = 1 in
-def t2MOVi32imm : T2Ix2<(outs GPR:$dst), (ins i32imm:$src), IIC_iMOVi,
- "movw", "\t$dst, ${src:lo16}\n\tmovt${p}\t$dst, ${src:hi16}",
- [(set GPR:$dst, (i32 imm:$src))]>;
-
-// ConstantPool, GlobalAddress, and JumpTable
-def : T2Pat<(ARMWrapper tglobaladdr :$dst), (t2LEApcrel tglobaladdr :$dst)>,
- Requires<[IsThumb2, DontUseMovt]>;
-def : T2Pat<(ARMWrapper tconstpool :$dst), (t2LEApcrel tconstpool :$dst)>;
-def : T2Pat<(ARMWrapper tglobaladdr :$dst), (t2MOVi32imm tglobaladdr :$dst)>,
- Requires<[IsThumb2, UseMovt]>;
-
-def : T2Pat<(ARMWrapperJT tjumptable:$dst, imm:$id),
- (t2LEApcrelJT tjumptable:$dst, imm:$id)>;
-
-// Pseudo instruction that combines ldr from constpool and add pc. This should
-// be expanded into two instructions late to allow if-conversion and
-// scheduling.
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def t2LDRpci_pic : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr, pclabel:$cp),
- NoItinerary, "@ ldr.w\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
- [(set GPR:$dst, (ARMpic_add (load (ARMWrapper tconstpool:$addr)),
- imm:$cp))]>,
- Requires<[IsThumb2]>;
-
-//===----------------------------------------------------------------------===//
-// Move between special register and ARM core register -- for disassembly only
-//
-
-// Rd = Instr{11-8}
-def t2MRS : T2I<(outs GPR:$dst), (ins), NoItinerary, "mrs", "\t$dst, cpsr",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-21} = 0b11111;
- let Inst{20} = 0; // The R bit.
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
-}
-
-// Rd = Instr{11-8}
-def t2MRSsys : T2I<(outs GPR:$dst), (ins), NoItinerary, "mrs", "\t$dst, spsr",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-21} = 0b11111;
- let Inst{20} = 1; // The R bit.
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
-}
-
-// FIXME: mask is ignored for the time being.
-// Rn = Inst{19-16}
-def t2MSR : T2I<(outs), (ins GPR:$src), NoItinerary, "msr", "\tcpsr, $src",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-21} = 0b11100;
- let Inst{20} = 0; // The R bit.
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
-}
-
-// FIXME: mask is ignored for the time being.
-// Rn = Inst{19-16}
-def t2MSRsys : T2I<(outs), (ins GPR:$src), NoItinerary, "msr", "\tspsr, $src",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{31-27} = 0b11110;
- let Inst{26} = 0;
- let Inst{25-21} = 0b11100;
- let Inst{20} = 1; // The R bit.
- let Inst{15-14} = 0b10;
- let Inst{12} = 0;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrVFP.td b/libclamav/c++/llvm/lib/Target/ARM/ARMInstrVFP.td
deleted file mode 100644
index 7c117ed..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ /dev/null
@@ -1,663 +0,0 @@
-//===- ARMInstrVFP.td - VFP support for ARM -------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the ARM VFP instruction set.
-//
-//===----------------------------------------------------------------------===//
-
-def SDT_FTOI :
-SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
-def SDT_ITOF :
-SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
-def SDT_CMPFP0 :
-SDTypeProfile<0, 1, [SDTCisFP<0>]>;
-def SDT_VMOVDRR :
-SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
- SDTCisSameAs<1, 2>]>;
-
-def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
-def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
-def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
-def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
-def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInFlag,SDNPOutFlag]>;
-def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutFlag]>;
-def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0",SDT_CMPFP0, [SDNPOutFlag]>;
-def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
-
-//===----------------------------------------------------------------------===//
-// Operand Definitions.
-//
-
-
-def vfp_f32imm : Operand<f32>,
- PatLeaf<(f32 fpimm), [{
- return ARM::getVFPf32Imm(N->getValueAPF()) != -1;
- }]> {
- let PrintMethod = "printVFPf32ImmOperand";
-}
-
-def vfp_f64imm : Operand<f64>,
- PatLeaf<(f64 fpimm), [{
- return ARM::getVFPf64Imm(N->getValueAPF()) != -1;
- }]> {
- let PrintMethod = "printVFPf64ImmOperand";
-}
-
-
-//===----------------------------------------------------------------------===//
-// Load / store Instructions.
-//
-
-let canFoldAsLoad = 1, isReMaterializable = 1 in {
-def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$dst), (ins addrmode5:$addr),
- IIC_fpLoad64, "vldr", ".64\t$dst, $addr",
- [(set DPR:$dst, (load addrmode5:$addr))]>;
-
-def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$dst), (ins addrmode5:$addr),
- IIC_fpLoad32, "vldr", ".32\t$dst, $addr",
- [(set SPR:$dst, (load addrmode5:$addr))]>;
-} // canFoldAsLoad
-
-def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$src, addrmode5:$addr),
- IIC_fpStore64, "vstr", ".64\t$src, $addr",
- [(store DPR:$src, addrmode5:$addr)]>;
-
-def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$src, addrmode5:$addr),
- IIC_fpStore32, "vstr", ".32\t$src, $addr",
- [(store SPR:$src, addrmode5:$addr)]>;
-
-//===----------------------------------------------------------------------===//
-// Load / store multiple Instructions.
-//
-
-let mayLoad = 1, hasExtraDefRegAllocReq = 1 in {
-def VLDMD : AXDI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$wb,
- variable_ops), IIC_fpLoadm,
- "vldm${addr:submode}${p}\t${addr:base}, $wb",
- []> {
- let Inst{20} = 1;
-}
-
-def VLDMS : AXSI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$wb,
- variable_ops), IIC_fpLoadm,
- "vldm${addr:submode}${p}\t${addr:base}, $wb",
- []> {
- let Inst{20} = 1;
-}
-} // mayLoad, hasExtraDefRegAllocReq
-
-let mayStore = 1, hasExtraSrcRegAllocReq = 1 in {
-def VSTMD : AXDI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$wb,
- variable_ops), IIC_fpStorem,
- "vstm${addr:submode}${p}\t${addr:base}, $wb",
- []> {
- let Inst{20} = 0;
-}
-
-def VSTMS : AXSI5<(outs), (ins addrmode5:$addr, pred:$p, reglist:$wb,
- variable_ops), IIC_fpStorem,
- "vstm${addr:submode}${p}\t${addr:base}, $wb",
- []> {
- let Inst{20} = 0;
-}
-} // mayStore, hasExtraSrcRegAllocReq
-
-// FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
-
-//===----------------------------------------------------------------------===//
-// FP Binary Operations.
-//
-
-def VADDD : ADbI<0b11100, 0b11, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpALU64, "vadd", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fadd DPR:$a, DPR:$b))]>;
-
-def VADDS : ASbIn<0b11100, 0b11, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpALU32, "vadd", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fadd SPR:$a, SPR:$b))]>;
-
-// These are encoded as unary instructions.
-let Defs = [FPSCR] in {
-def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins DPR:$a, DPR:$b),
- IIC_fpCMP64, "vcmpe", ".f64\t$a, $b",
- [(arm_cmpfp DPR:$a, DPR:$b)]>;
-
-def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins DPR:$a, DPR:$b),
- IIC_fpCMP64, "vcmp", ".f64\t$a, $b",
- [/* For disassembly only; pattern left blank */]>;
-
-def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins SPR:$a, SPR:$b),
- IIC_fpCMP32, "vcmpe", ".f32\t$a, $b",
- [(arm_cmpfp SPR:$a, SPR:$b)]>;
-
-def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins SPR:$a, SPR:$b),
- IIC_fpCMP32, "vcmp", ".f32\t$a, $b",
- [/* For disassembly only; pattern left blank */]>;
-}
-
-def VDIVD : ADbI<0b11101, 0b00, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpDIV64, "vdiv", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fdiv DPR:$a, DPR:$b))]>;
-
-def VDIVS : ASbI<0b11101, 0b00, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpDIV32, "vdiv", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fdiv SPR:$a, SPR:$b))]>;
-
-def VMULD : ADbI<0b11100, 0b10, 0, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpMUL64, "vmul", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fmul DPR:$a, DPR:$b))]>;
-
-def VMULS : ASbIn<0b11100, 0b10, 0, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpMUL32, "vmul", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fmul SPR:$a, SPR:$b))]>;
-
-def VNMULD : ADbI<0b11100, 0b10, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpMUL64, "vnmul", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fneg (fmul DPR:$a, DPR:$b)))]>;
-
-def VNMULS : ASbI<0b11100, 0b10, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpMUL32, "vnmul", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fneg (fmul SPR:$a, SPR:$b)))]>;
-
-// Match reassociated forms only if not sign dependent rounding.
-def : Pat<(fmul (fneg DPR:$a), DPR:$b),
- (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
-def : Pat<(fmul (fneg SPR:$a), SPR:$b),
- (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
-
-
-def VSUBD : ADbI<0b11100, 0b11, 1, 0, (outs DPR:$dst), (ins DPR:$a, DPR:$b),
- IIC_fpALU64, "vsub", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fsub DPR:$a, DPR:$b))]>;
-
-def VSUBS : ASbIn<0b11100, 0b11, 1, 0, (outs SPR:$dst), (ins SPR:$a, SPR:$b),
- IIC_fpALU32, "vsub", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fsub SPR:$a, SPR:$b))]>;
-
-//===----------------------------------------------------------------------===//
-// FP Unary Operations.
-//
-
-def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
- IIC_fpUNA64, "vabs", ".f64\t$dst, $a",
- [(set DPR:$dst, (fabs DPR:$a))]>;
-
-def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,(outs SPR:$dst), (ins SPR:$a),
- IIC_fpUNA32, "vabs", ".f32\t$dst, $a",
- [(set SPR:$dst, (fabs SPR:$a))]>;
-
-let Defs = [FPSCR] in {
-def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins DPR:$a),
- IIC_fpCMP64, "vcmpe", ".f64\t$a, #0",
- [(arm_cmpfp0 DPR:$a)]>;
-
-def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins DPR:$a),
- IIC_fpCMP64, "vcmp", ".f64\t$a, #0",
- [/* For disassembly only; pattern left blank */]>;
-
-def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins SPR:$a),
- IIC_fpCMP32, "vcmpe", ".f32\t$a, #0",
- [(arm_cmpfp0 SPR:$a)]>;
-
-def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins SPR:$a),
- IIC_fpCMP32, "vcmp", ".f32\t$a, #0",
- [/* For disassembly only; pattern left blank */]>;
-}
-
-def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0, (outs DPR:$dst), (ins SPR:$a),
- IIC_fpCVTDS, "vcvt", ".f64.f32\t$dst, $a",
- [(set DPR:$dst, (fextend SPR:$a))]>;
-
-// Special case encoding: bits 11-8 is 0b1011.
-def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
- IIC_fpCVTSD, "vcvt", ".f32.f64\t$dst, $a",
- [(set SPR:$dst, (fround DPR:$a))]> {
- let Inst{27-23} = 0b11101;
- let Inst{21-16} = 0b110111;
- let Inst{11-8} = 0b1011;
- let Inst{7-6} = 0b11;
- let Inst{4} = 0;
-}
-
-// Between half-precision and single-precision. For disassembly only.
-
-def VCVTBSH : ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
- /* FIXME */ IIC_fpCVTDS, "vcvtb", ".f32.f16\t$dst, $a",
- [/* For disassembly only; pattern left blank */]>;
-
-def VCVTBHS : ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
- /* FIXME */ IIC_fpCVTDS, "vcvtb", ".f16.f32\t$dst, $a",
- [/* For disassembly only; pattern left blank */]>;
-
-def VCVTTSH : ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
- /* FIXME */ IIC_fpCVTDS, "vcvtt", ".f32.f16\t$dst, $a",
- [/* For disassembly only; pattern left blank */]>;
-
-def VCVTTHS : ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
- /* FIXME */ IIC_fpCVTDS, "vcvtt", ".f16.f32\t$dst, $a",
- [/* For disassembly only; pattern left blank */]>;
-
-let neverHasSideEffects = 1 in {
-def VMOVD: ADuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
- IIC_fpUNA64, "vmov", ".f64\t$dst, $a", []>;
-
-def VMOVS: ASuI<0b11101, 0b11, 0b0000, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
- IIC_fpUNA32, "vmov", ".f32\t$dst, $a", []>;
-} // neverHasSideEffects
-
-def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0, (outs DPR:$dst), (ins DPR:$a),
- IIC_fpUNA64, "vneg", ".f64\t$dst, $a",
- [(set DPR:$dst, (fneg DPR:$a))]>;
-
-def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,(outs SPR:$dst), (ins SPR:$a),
- IIC_fpUNA32, "vneg", ".f32\t$dst, $a",
- [(set SPR:$dst, (fneg SPR:$a))]>;
-
-def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs DPR:$dst), (ins DPR:$a),
- IIC_fpSQRT64, "vsqrt", ".f64\t$dst, $a",
- [(set DPR:$dst, (fsqrt DPR:$a))]>;
-
-def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
- IIC_fpSQRT32, "vsqrt", ".f32\t$dst, $a",
- [(set SPR:$dst, (fsqrt SPR:$a))]>;
-
-//===----------------------------------------------------------------------===//
-// FP <-> GPR Copies. Int <-> FP Conversions.
-//
-
-def VMOVRS : AVConv2I<0b11100001, 0b1010, (outs GPR:$dst), (ins SPR:$src),
- IIC_VMOVSI, "vmov", "\t$dst, $src",
- [(set GPR:$dst, (bitconvert SPR:$src))]>;
-
-def VMOVSR : AVConv4I<0b11100000, 0b1010, (outs SPR:$dst), (ins GPR:$src),
- IIC_VMOVIS, "vmov", "\t$dst, $src",
- [(set SPR:$dst, (bitconvert GPR:$src))]>;
-
-def VMOVRRD : AVConv3I<0b11000101, 0b1011,
- (outs GPR:$wb, GPR:$dst2), (ins DPR:$src),
- IIC_VMOVDI, "vmov", "\t$wb, $dst2, $src",
- [/* FIXME: Can't write pattern for multiple result instr*/]> {
- let Inst{7-6} = 0b00;
-}
-
-def VMOVRRS : AVConv3I<0b11000101, 0b1010,
- (outs GPR:$wb, GPR:$dst2), (ins SPR:$src1, SPR:$src2),
- IIC_VMOVDI, "vmov", "\t$wb, $dst2, $src1, $src2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{7-6} = 0b00;
-}
-
-// FMDHR: GPR -> SPR
-// FMDLR: GPR -> SPR
-
-def VMOVDRR : AVConv5I<0b11000100, 0b1011,
- (outs DPR:$dst), (ins GPR:$src1, GPR:$src2),
- IIC_VMOVID, "vmov", "\t$dst, $src1, $src2",
- [(set DPR:$dst, (arm_fmdrr GPR:$src1, GPR:$src2))]> {
- let Inst{7-6} = 0b00;
-}
-
-def VMOVSRR : AVConv5I<0b11000100, 0b1010,
- (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
- IIC_VMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{7-6} = 0b00;
-}
-
-// FMRDH: SPR -> GPR
-// FMRDL: SPR -> GPR
-// FMRRS: SPR -> GPR
-// FMRX : SPR system reg -> GPR
-
-// FMSRR: GPR -> SPR
-
-// FMXR: GPR -> VFP Sstem reg
-
-
-// Int to FP:
-
-def VSITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
- (outs DPR:$dst), (ins SPR:$a),
- IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a",
- [(set DPR:$dst, (arm_sitof SPR:$a))]> {
- let Inst{7} = 1; // s32
-}
-
-def VSITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
- (outs SPR:$dst),(ins SPR:$a),
- IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a",
- [(set SPR:$dst, (arm_sitof SPR:$a))]> {
- let Inst{7} = 1; // s32
-}
-
-def VUITOD : AVConv1I<0b11101, 0b11, 0b1000, 0b1011,
- (outs DPR:$dst), (ins SPR:$a),
- IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a",
- [(set DPR:$dst, (arm_uitof SPR:$a))]> {
- let Inst{7} = 0; // u32
-}
-
-def VUITOS : AVConv1In<0b11101, 0b11, 0b1000, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a",
- [(set SPR:$dst, (arm_uitof SPR:$a))]> {
- let Inst{7} = 0; // u32
-}
-
-// FP to Int:
-// Always set Z bit in the instruction, i.e. "round towards zero" variants.
-
-def VTOSIZD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
- (outs SPR:$dst), (ins DPR:$a),
- IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a",
- [(set SPR:$dst, (arm_ftosi DPR:$a))]> {
- let Inst{7} = 1; // Z bit
-}
-
-def VTOSIZS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a",
- [(set SPR:$dst, (arm_ftosi SPR:$a))]> {
- let Inst{7} = 1; // Z bit
-}
-
-def VTOUIZD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
- (outs SPR:$dst), (ins DPR:$a),
- IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a",
- [(set SPR:$dst, (arm_ftoui DPR:$a))]> {
- let Inst{7} = 1; // Z bit
-}
-
-def VTOUIZS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a",
- [(set SPR:$dst, (arm_ftoui SPR:$a))]> {
- let Inst{7} = 1; // Z bit
-}
-
-// And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
-// For disassembly only.
-
-def VTOSIRD : AVConv1I<0b11101, 0b11, 0b1101, 0b1011,
- (outs SPR:$dst), (ins DPR:$a),
- IIC_fpCVTDI, "vcvtr", ".s32.f64\t$dst, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{7} = 0; // Z bit
-}
-
-def VTOSIRS : AVConv1In<0b11101, 0b11, 0b1101, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTSI, "vcvtr", ".s32.f32\t$dst, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{7} = 0; // Z bit
-}
-
-def VTOUIRD : AVConv1I<0b11101, 0b11, 0b1100, 0b1011,
- (outs SPR:$dst), (ins DPR:$a),
- IIC_fpCVTDI, "vcvtr", ".u32.f64\t$dst, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{7} = 0; // Z bit
-}
-
-def VTOUIRS : AVConv1In<0b11101, 0b11, 0b1100, 0b1010,
- (outs SPR:$dst), (ins SPR:$a),
- IIC_fpCVTSI, "vcvtr", ".u32.f32\t$dst, $a",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{7} = 0; // Z bit
-}
-
-// Convert between floating-point and fixed-point
-// Data type for fixed-point naming convention:
-// S16 (U=0, sx=0) -> SH
-// U16 (U=1, sx=0) -> UH
-// S32 (U=0, sx=1) -> SL
-// U32 (U=1, sx=1) -> UL
-
-let Constraints = "$a = $dst" in {
-
-// FP to Fixed-Point:
-
-def VTOSHS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 0,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VTOUHS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 0,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VTOSLS : AVConv1XI<0b11101, 0b11, 0b1110, 0b1010, 1,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VTOULS : AVConv1XI<0b11101, 0b11, 0b1111, 0b1010, 1,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VTOSHD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 0,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VTOUHD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 0,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VTOSLD : AVConv1XI<0b11101, 0b11, 0b1110, 0b1011, 1,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VTOULD : AVConv1XI<0b11101, 0b11, 0b1111, 0b1011, 1,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-// Fixed-Point to FP:
-
-def VSHTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 0,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VUHTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 0,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VSLTOS : AVConv1XI<0b11101, 0b11, 0b1010, 0b1010, 1,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VULTOS : AVConv1XI<0b11101, 0b11, 0b1011, 0b1010, 1,
- (outs SPR:$dst), (ins SPR:$a, i32imm:$fbits),
- IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VSHTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 0,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VUHTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 0,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VSLTOD : AVConv1XI<0b11101, 0b11, 0b1010, 0b1011, 1,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-def VULTOD : AVConv1XI<0b11101, 0b11, 0b1011, 0b1011, 1,
- (outs DPR:$dst), (ins DPR:$a, i32imm:$fbits),
- IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits",
- [/* For disassembly only; pattern left blank */]>;
-
-} // End of 'let Constraints = "$src = $dst" in'
-
-//===----------------------------------------------------------------------===//
-// FP FMA Operations.
-//
-
-def VMLAD : ADbI<0b11100, 0b00, 0, 0,
- (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
- IIC_fpMAC64, "vmla", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fadd (fmul DPR:$a, DPR:$b), DPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
- (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
- IIC_fpMAC32, "vmla", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fadd (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
- (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
- IIC_fpMAC64, "vnmls", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fsub (fmul DPR:$a, DPR:$b), DPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
- (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
- IIC_fpMAC32, "vnmls", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fsub (fmul SPR:$a, SPR:$b), SPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def VMLSD : ADbI<0b11100, 0b00, 1, 0,
- (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
- IIC_fpMAC64, "vmls", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fadd (fneg (fmul DPR:$a, DPR:$b)), DPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
- (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
- IIC_fpMAC32, "vmls", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fadd (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def : Pat<(fsub DPR:$dstin, (fmul DPR:$a, DPR:$b)),
- (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>, Requires<[DontUseNEONForFP]>;
-def : Pat<(fsub SPR:$dstin, (fmul SPR:$a, SPR:$b)),
- (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>, Requires<[DontUseNEONForFP]>;
-
-def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
- (outs DPR:$dst), (ins DPR:$dstin, DPR:$a, DPR:$b),
- IIC_fpMAC64, "vnmla", ".f64\t$dst, $a, $b",
- [(set DPR:$dst, (fsub (fneg (fmul DPR:$a, DPR:$b)), DPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
- (outs SPR:$dst), (ins SPR:$dstin, SPR:$a, SPR:$b),
- IIC_fpMAC32, "vnmla", ".f32\t$dst, $a, $b",
- [(set SPR:$dst, (fsub (fneg (fmul SPR:$a, SPR:$b)), SPR:$dstin))]>,
- RegConstraint<"$dstin = $dst">;
-
-//===----------------------------------------------------------------------===//
-// FP Conditional moves.
-//
-
-def VMOVDcc : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
- (outs DPR:$dst), (ins DPR:$false, DPR:$true),
- IIC_fpUNA64, "vmov", ".f64\t$dst, $true",
- [/*(set DPR:$dst, (ARMcmov DPR:$false, DPR:$true, imm:$cc))*/]>,
- RegConstraint<"$false = $dst">;
-
-def VMOVScc : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
- (outs SPR:$dst), (ins SPR:$false, SPR:$true),
- IIC_fpUNA32, "vmov", ".f32\t$dst, $true",
- [/*(set SPR:$dst, (ARMcmov SPR:$false, SPR:$true, imm:$cc))*/]>,
- RegConstraint<"$false = $dst">;
-
-def VNEGDcc : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
- (outs DPR:$dst), (ins DPR:$false, DPR:$true),
- IIC_fpUNA64, "vneg", ".f64\t$dst, $true",
- [/*(set DPR:$dst, (ARMcneg DPR:$false, DPR:$true, imm:$cc))*/]>,
- RegConstraint<"$false = $dst">;
-
-def VNEGScc : ASuI<0b11101, 0b11, 0b0001, 0b01, 0,
- (outs SPR:$dst), (ins SPR:$false, SPR:$true),
- IIC_fpUNA32, "vneg", ".f32\t$dst, $true",
- [/*(set SPR:$dst, (ARMcneg SPR:$false, SPR:$true, imm:$cc))*/]>,
- RegConstraint<"$false = $dst">;
-
-
-//===----------------------------------------------------------------------===//
-// Misc.
-//
-
-// APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
-// to APSR.
-let Defs = [CPSR], Uses = [FPSCR] in
-def FMSTAT : VFPAI<(outs), (ins), VFPMiscFrm, IIC_fpSTAT, "vmrs",
- "\tapsr_nzcv, fpscr",
- [(arm_fmstat)]> {
- let Inst{27-20} = 0b11101111;
- let Inst{19-16} = 0b0001;
- let Inst{15-12} = 0b1111;
- let Inst{11-8} = 0b1010;
- let Inst{7} = 0;
- let Inst{4} = 1;
-}
-
-// FPSCR <-> GPR (for disassembly only)
-
-let Uses = [FPSCR] in {
-def VMRS : VFPAI<(outs GPR:$dst), (ins), VFPMiscFrm, IIC_fpSTAT, "vmrs",
- "\t$dst, fpscr",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-20} = 0b11101111;
- let Inst{19-16} = 0b0001;
- let Inst{11-8} = 0b1010;
- let Inst{7} = 0;
- let Inst{4} = 1;
-}
-}
-
-let Defs = [FPSCR] in {
-def VMSR : VFPAI<(outs), (ins GPR:$src), VFPMiscFrm, IIC_fpSTAT, "vmsr",
- "\tfpscr, $src",
- [/* For disassembly only; pattern left blank */]> {
- let Inst{27-20} = 0b11101110;
- let Inst{19-16} = 0b0001;
- let Inst{11-8} = 0b1010;
- let Inst{7} = 0;
- let Inst{4} = 1;
-}
-}
-
-// Materialize FP immediates. VFP3 only.
-let isReMaterializable = 1 in {
-def FCONSTD : VFPAI<(outs DPR:$dst), (ins vfp_f64imm:$imm),
- VFPMiscFrm, IIC_VMOVImm,
- "vmov", ".f64\t$dst, $imm",
- [(set DPR:$dst, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
- let Inst{27-23} = 0b11101;
- let Inst{21-20} = 0b11;
- let Inst{11-9} = 0b101;
- let Inst{8} = 1;
- let Inst{7-4} = 0b0000;
-}
-
-def FCONSTS : VFPAI<(outs SPR:$dst), (ins vfp_f32imm:$imm),
- VFPMiscFrm, IIC_VMOVImm,
- "vmov", ".f32\t$dst, $imm",
- [(set SPR:$dst, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
- let Inst{27-23} = 0b11101;
- let Inst{21-20} = 0b11;
- let Inst{11-9} = 0b101;
- let Inst{8} = 0;
- let Inst{7-4} = 0b0000;
-}
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMJITInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMJITInfo.cpp
deleted file mode 100644
index 8c0b720..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMJITInfo.cpp
+++ /dev/null
@@ -1,323 +0,0 @@
-//===-- ARMJITInfo.cpp - Implement the JIT interfaces for the ARM target --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the JIT interfaces for the ARM target.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "jit"
-#include "ARMJITInfo.h"
-#include "ARMInstrInfo.h"
-#include "ARMConstantPoolValue.h"
-#include "ARMRelocations.h"
-#include "ARMSubtarget.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/JITCodeEmitter.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Memory.h"
-#include <cstdlib>
-using namespace llvm;
-
-void ARMJITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
- llvm_report_error("ARMJITInfo::replaceMachineCodeForFunction");
-}
-
-/// JITCompilerFunction - This contains the address of the JIT function used to
-/// compile a function lazily.
-static TargetJITInfo::JITCompilerFn JITCompilerFunction;
-
-// Get the ASMPREFIX for the current host. This is often '_'.
-#ifndef __USER_LABEL_PREFIX__
-#define __USER_LABEL_PREFIX__
-#endif
-#define GETASMPREFIX2(X) #X
-#define GETASMPREFIX(X) GETASMPREFIX2(X)
-#define ASMPREFIX GETASMPREFIX(__USER_LABEL_PREFIX__)
-
-// CompilationCallback stub - We can't use a C function with inline assembly in
-// it, because we the prolog/epilog inserted by GCC won't work for us (we need
-// to preserve more context and manipulate the stack directly). Instead,
-// write our own wrapper, which does things our way, so we have complete
-// control over register saving and restoring.
-extern "C" {
-#if defined(__arm__)
- void ARMCompilationCallback();
- asm(
- ".text\n"
- ".align 2\n"
- ".globl " ASMPREFIX "ARMCompilationCallback\n"
- ASMPREFIX "ARMCompilationCallback:\n"
- // Save caller saved registers since they may contain stuff
- // for the real target function right now. We have to act as if this
- // whole compilation callback doesn't exist as far as the caller is
- // concerned, so we can't just preserve the callee saved regs.
- "stmdb sp!, {r0, r1, r2, r3, lr}\n"
-#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
- "fstmfdd sp!, {d0, d1, d2, d3, d4, d5, d6, d7}\n"
-#endif
- // The LR contains the address of the stub function on entry.
- // pass it as the argument to the C part of the callback
- "mov r0, lr\n"
- "sub sp, sp, #4\n"
- // Call the C portion of the callback
- "bl " ASMPREFIX "ARMCompilationCallbackC\n"
- "add sp, sp, #4\n"
- // Restoring the LR to the return address of the function that invoked
- // the stub and de-allocating the stack space for it requires us to
- // swap the two saved LR values on the stack, as they're backwards
- // for what we need since the pop instruction has a pre-determined
- // order for the registers.
- // +--------+
- // 0 | LR | Original return address
- // +--------+
- // 1 | LR | Stub address (start of stub)
- // 2-5 | R3..R0 | Saved registers (we need to preserve all regs)
- // 6-20 | D0..D7 | Saved VFP registers
- // +--------+
- //
-#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
- // Restore VFP caller-saved registers.
- "fldmfdd sp!, {d0, d1, d2, d3, d4, d5, d6, d7}\n"
-#endif
- //
- // We need to exchange the values in slots 0 and 1 so we can
- // return to the address in slot 1 with the address in slot 0
- // restored to the LR.
- "ldr r0, [sp,#20]\n"
- "ldr r1, [sp,#16]\n"
- "str r1, [sp,#20]\n"
- "str r0, [sp,#16]\n"
- // Return to the (newly modified) stub to invoke the real function.
- // The above twiddling of the saved return addresses allows us to
- // deallocate everything, including the LR the stub saved, all in one
- // pop instruction.
- "ldmia sp!, {r0, r1, r2, r3, lr, pc}\n"
- );
-#else // Not an ARM host
- void ARMCompilationCallback() {
- llvm_unreachable("Cannot call ARMCompilationCallback() on a non-ARM arch!");
- }
-#endif
-}
-
-/// ARMCompilationCallbackC - This is the target-specific function invoked
-/// by the function stub when we did not know the real target of a call.
-/// This function must locate the start of the stub or call site and pass
-/// it into the JIT compiler function.
-extern "C" void ARMCompilationCallbackC(intptr_t StubAddr) {
- // Get the address of the compiled code for this function.
- intptr_t NewVal = (intptr_t)JITCompilerFunction((void*)StubAddr);
-
- // Rewrite the call target... so that we don't end up here every time we
- // execute the call. We're replacing the first two instructions of the
- // stub with:
- // ldr pc, [pc,#-4]
- // <addr>
- if (!sys::Memory::setRangeWritable((void*)StubAddr, 8)) {
- llvm_unreachable("ERROR: Unable to mark stub writable");
- }
- *(intptr_t *)StubAddr = 0xe51ff004; // ldr pc, [pc, #-4]
- *(intptr_t *)(StubAddr+4) = NewVal;
- if (!sys::Memory::setRangeExecutable((void*)StubAddr, 8)) {
- llvm_unreachable("ERROR: Unable to mark stub executable");
- }
-}
-
-TargetJITInfo::LazyResolverFn
-ARMJITInfo::getLazyResolverFunction(JITCompilerFn F) {
- JITCompilerFunction = F;
- return ARMCompilationCallback;
-}
-
-void *ARMJITInfo::emitGlobalValueIndirectSym(const GlobalValue *GV, void *Ptr,
- JITCodeEmitter &JCE) {
- uint8_t Buffer[4];
- uint8_t *Cur = Buffer;
- MachineCodeEmitter::emitWordLEInto(Cur, (intptr_t)Ptr);
- void *PtrAddr = JCE.allocIndirectGV(
- GV, Buffer, sizeof(Buffer), /*Alignment=*/4);
- addIndirectSymAddr(Ptr, (intptr_t)PtrAddr);
- return PtrAddr;
-}
-
-TargetJITInfo::StubLayout ARMJITInfo::getStubLayout() {
- // The stub contains up to 3 4-byte instructions, aligned at 4 bytes, and a
- // 4-byte address. See emitFunctionStub for details.
- StubLayout Result = {16, 4};
- return Result;
-}
-
-void *ARMJITInfo::emitFunctionStub(const Function* F, void *Fn,
- JITCodeEmitter &JCE) {
- void *Addr;
- // If this is just a call to an external function, emit a branch instead of a
- // call. The code is the same except for one bit of the last instruction.
- if (Fn != (void*)(intptr_t)ARMCompilationCallback) {
- // Branch to the corresponding function addr.
- if (IsPIC) {
- // The stub is 16-byte size and 4-aligned.
- intptr_t LazyPtr = getIndirectSymAddr(Fn);
- if (!LazyPtr) {
- // In PIC mode, the function stub is loading a lazy-ptr.
- LazyPtr= (intptr_t)emitGlobalValueIndirectSym((GlobalValue*)F, Fn, JCE);
- DEBUG(if (F)
- errs() << "JIT: Indirect symbol emitted at [" << LazyPtr
- << "] for GV '" << F->getName() << "'\n";
- else
- errs() << "JIT: Stub emitted at [" << LazyPtr
- << "] for external function at '" << Fn << "'\n");
- }
- JCE.emitAlignment(4);
- Addr = (void*)JCE.getCurrentPCValue();
- if (!sys::Memory::setRangeWritable(Addr, 16)) {
- llvm_unreachable("ERROR: Unable to mark stub writable");
- }
- JCE.emitWordLE(0xe59fc004); // ldr ip, [pc, #+4]
- JCE.emitWordLE(0xe08fc00c); // L_func$scv: add ip, pc, ip
- JCE.emitWordLE(0xe59cf000); // ldr pc, [ip]
- JCE.emitWordLE(LazyPtr - (intptr_t(Addr)+4+8)); // func - (L_func$scv+8)
- sys::Memory::InvalidateInstructionCache(Addr, 16);
- if (!sys::Memory::setRangeExecutable(Addr, 16)) {
- llvm_unreachable("ERROR: Unable to mark stub executable");
- }
- } else {
- // The stub is 8-byte size and 4-aligned.
- JCE.emitAlignment(4);
- Addr = (void*)JCE.getCurrentPCValue();
- if (!sys::Memory::setRangeWritable(Addr, 8)) {
- llvm_unreachable("ERROR: Unable to mark stub writable");
- }
- JCE.emitWordLE(0xe51ff004); // ldr pc, [pc, #-4]
- JCE.emitWordLE((intptr_t)Fn); // addr of function
- sys::Memory::InvalidateInstructionCache(Addr, 8);
- if (!sys::Memory::setRangeExecutable(Addr, 8)) {
- llvm_unreachable("ERROR: Unable to mark stub executable");
- }
- }
- } else {
- // The compilation callback will overwrite the first two words of this
- // stub with indirect branch instructions targeting the compiled code.
- // This stub sets the return address to restart the stub, so that
- // the new branch will be invoked when we come back.
- //
- // Branch and link to the compilation callback.
- // The stub is 16-byte size and 4-byte aligned.
- JCE.emitAlignment(4);
- Addr = (void*)JCE.getCurrentPCValue();
- if (!sys::Memory::setRangeWritable(Addr, 16)) {
- llvm_unreachable("ERROR: Unable to mark stub writable");
- }
- // Save LR so the callback can determine which stub called it.
- // The compilation callback is responsible for popping this prior
- // to returning.
- JCE.emitWordLE(0xe92d4000); // push {lr}
- // Set the return address to go back to the start of this stub.
- JCE.emitWordLE(0xe24fe00c); // sub lr, pc, #12
- // Invoke the compilation callback.
- JCE.emitWordLE(0xe51ff004); // ldr pc, [pc, #-4]
- // The address of the compilation callback.
- JCE.emitWordLE((intptr_t)ARMCompilationCallback);
- sys::Memory::InvalidateInstructionCache(Addr, 16);
- if (!sys::Memory::setRangeExecutable(Addr, 16)) {
- llvm_unreachable("ERROR: Unable to mark stub executable");
- }
- }
-
- return Addr;
-}
-
-intptr_t ARMJITInfo::resolveRelocDestAddr(MachineRelocation *MR) const {
- ARM::RelocationType RT = (ARM::RelocationType)MR->getRelocationType();
- switch (RT) {
- default:
- return (intptr_t)(MR->getResultPointer());
- case ARM::reloc_arm_pic_jt:
- // Destination address - jump table base.
- return (intptr_t)(MR->getResultPointer()) - MR->getConstantVal();
- case ARM::reloc_arm_jt_base:
- // Jump table base address.
- return getJumpTableBaseAddr(MR->getJumpTableIndex());
- case ARM::reloc_arm_cp_entry:
- case ARM::reloc_arm_vfp_cp_entry:
- // Constant pool entry address.
- return getConstantPoolEntryAddr(MR->getConstantPoolIndex());
- case ARM::reloc_arm_machine_cp_entry: {
- ARMConstantPoolValue *ACPV = (ARMConstantPoolValue*)MR->getConstantVal();
- assert((!ACPV->hasModifier() && !ACPV->mustAddCurrentAddress()) &&
- "Can't handle this machine constant pool entry yet!");
- intptr_t Addr = (intptr_t)(MR->getResultPointer());
- Addr -= getPCLabelAddr(ACPV->getLabelId()) + ACPV->getPCAdjustment();
- return Addr;
- }
- }
-}
-
-/// relocate - Before the JIT can run a block of code that has been emitted,
-/// it must rewrite the code to contain the actual addresses of any
-/// referenced global symbols.
-void ARMJITInfo::relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char* GOTBase) {
- for (unsigned i = 0; i != NumRelocs; ++i, ++MR) {
- void *RelocPos = (char*)Function + MR->getMachineCodeOffset();
- intptr_t ResultPtr = resolveRelocDestAddr(MR);
- switch ((ARM::RelocationType)MR->getRelocationType()) {
- case ARM::reloc_arm_cp_entry:
- case ARM::reloc_arm_vfp_cp_entry:
- case ARM::reloc_arm_relative: {
- // It is necessary to calculate the correct PC relative value. We
- // subtract the base addr from the target addr to form a byte offset.
- ResultPtr = ResultPtr - (intptr_t)RelocPos - 8;
- // If the result is positive, set bit U(23) to 1.
- if (ResultPtr >= 0)
- *((intptr_t*)RelocPos) |= 1 << ARMII::U_BitShift;
- else {
- // Otherwise, obtain the absolute value and set bit U(23) to 0.
- *((intptr_t*)RelocPos) &= ~(1 << ARMII::U_BitShift);
- ResultPtr = - ResultPtr;
- }
- // Set the immed value calculated.
- // VFP immediate offset is multiplied by 4.
- if (MR->getRelocationType() == ARM::reloc_arm_vfp_cp_entry)
- ResultPtr = ResultPtr >> 2;
- *((intptr_t*)RelocPos) |= ResultPtr;
- // Set register Rn to PC.
- *((intptr_t*)RelocPos) |=
- ARMRegisterInfo::getRegisterNumbering(ARM::PC) << ARMII::RegRnShift;
- break;
- }
- case ARM::reloc_arm_pic_jt:
- case ARM::reloc_arm_machine_cp_entry:
- case ARM::reloc_arm_absolute: {
- // These addresses have already been resolved.
- *((intptr_t*)RelocPos) |= (intptr_t)ResultPtr;
- break;
- }
- case ARM::reloc_arm_branch: {
- // It is necessary to calculate the correct value of signed_immed_24
- // field. We subtract the base addr from the target addr to form a
- // byte offset, which must be inside the range -33554432 and +33554428.
- // Then, we set the signed_immed_24 field of the instruction to bits
- // [25:2] of the byte offset. More details ARM-ARM p. A4-11.
- ResultPtr = ResultPtr - (intptr_t)RelocPos - 8;
- ResultPtr = (ResultPtr & 0x03FFFFFC) >> 2;
- assert(ResultPtr >= -33554432 && ResultPtr <= 33554428);
- *((intptr_t*)RelocPos) |= ResultPtr;
- break;
- }
- case ARM::reloc_arm_jt_base: {
- // JT base - (instruction addr + 8)
- ResultPtr = ResultPtr - (intptr_t)RelocPos - 8;
- *((intptr_t*)RelocPos) |= ResultPtr;
- break;
- }
- }
- }
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMJITInfo.h b/libclamav/c++/llvm/lib/Target/ARM/ARMJITInfo.h
deleted file mode 100644
index ff332b7..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMJITInfo.h
+++ /dev/null
@@ -1,182 +0,0 @@
-//===- ARMJITInfo.h - ARM implementation of the JIT interface --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declaration of the ARMJITInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMJITINFO_H
-#define ARMJITINFO_H
-
-#include "ARMMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/Target/TargetJITInfo.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
-
-namespace llvm {
- class ARMTargetMachine;
-
- class ARMJITInfo : public TargetJITInfo {
- // ConstPoolId2AddrMap - A map from constant pool ids to the corresponding
- // CONSTPOOL_ENTRY addresses.
- SmallVector<intptr_t, 16> ConstPoolId2AddrMap;
-
- // JumpTableId2AddrMap - A map from inline jumptable ids to the
- // corresponding inline jump table bases.
- SmallVector<intptr_t, 16> JumpTableId2AddrMap;
-
- // PCLabelMap - A map from PC labels to addresses.
- DenseMap<unsigned, intptr_t> PCLabelMap;
-
- // Sym2IndirectSymMap - A map from symbol (GlobalValue and ExternalSymbol)
- // addresses to their indirect symbol addresses.
- DenseMap<void*, intptr_t> Sym2IndirectSymMap;
-
- // IsPIC - True if the relocation model is PIC. This is used to determine
- // how to codegen function stubs.
- bool IsPIC;
-
- public:
- explicit ARMJITInfo() : IsPIC(false) { useGOT = false; }
-
- /// replaceMachineCodeForFunction - Make it so that calling the function
- /// whose machine code is at OLD turns into a call to NEW, perhaps by
- /// overwriting OLD with a branch to NEW. This is used for self-modifying
- /// code.
- ///
- virtual void replaceMachineCodeForFunction(void *Old, void *New);
-
- /// emitGlobalValueIndirectSym - Use the specified JITCodeEmitter object
- /// to emit an indirect symbol which contains the address of the specified
- /// ptr.
- virtual void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
- JITCodeEmitter &JCE);
-
- // getStubLayout - Returns the size and alignment of the largest call stub
- // on ARM.
- virtual StubLayout getStubLayout();
-
- /// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
- /// small native function that simply calls the function at the specified
- /// address.
- virtual void *emitFunctionStub(const Function* F, void *Fn,
- JITCodeEmitter &JCE);
-
- /// getLazyResolverFunction - Expose the lazy resolver to the JIT.
- virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
-
- /// relocate - Before the JIT can run a block of code that has been emitted,
- /// it must rewrite the code to contain the actual addresses of any
- /// referenced global symbols.
- virtual void relocate(void *Function, MachineRelocation *MR,
- unsigned NumRelocs, unsigned char* GOTBase);
-
- /// hasCustomConstantPool - Allows a target to specify that constant
- /// pool address resolution is handled by the target.
- virtual bool hasCustomConstantPool() const { return true; }
-
- /// hasCustomJumpTables - Allows a target to specify that jumptables
- /// are emitted by the target.
- virtual bool hasCustomJumpTables() const { return true; }
-
- /// allocateSeparateGVMemory - If true, globals should be placed in
- /// separately allocated heap memory rather than in the same
- /// code memory allocated by JITCodeEmitter.
- virtual bool allocateSeparateGVMemory() const {
-#ifdef __APPLE__
- return true;
-#else
- return false;
-#endif
- }
-
- /// Initialize - Initialize internal stage for the function being JITted.
- /// Resize constant pool ids to CONSTPOOL_ENTRY addresses map; resize
- /// jump table ids to jump table bases map; remember if codegen relocation
- /// model is PIC.
- void Initialize(const MachineFunction &MF, bool isPIC) {
- const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- ConstPoolId2AddrMap.resize(AFI->getNumConstPoolEntries());
- JumpTableId2AddrMap.resize(AFI->getNumJumpTables());
- IsPIC = isPIC;
- }
-
- /// getConstantPoolEntryAddr - The ARM target puts all constant
- /// pool entries into constant islands. This returns the address of the
- /// constant pool entry of the specified index.
- intptr_t getConstantPoolEntryAddr(unsigned CPI) const {
- assert(CPI < ConstPoolId2AddrMap.size());
- return ConstPoolId2AddrMap[CPI];
- }
-
- /// addConstantPoolEntryAddr - Map a Constant Pool Index to the address
- /// where its associated value is stored. When relocations are processed,
- /// this value will be used to resolve references to the constant.
- void addConstantPoolEntryAddr(unsigned CPI, intptr_t Addr) {
- assert(CPI < ConstPoolId2AddrMap.size());
- ConstPoolId2AddrMap[CPI] = Addr;
- }
-
- /// getJumpTableBaseAddr - The ARM target inline all jump tables within
- /// text section of the function. This returns the address of the base of
- /// the jump table of the specified index.
- intptr_t getJumpTableBaseAddr(unsigned JTI) const {
- assert(JTI < JumpTableId2AddrMap.size());
- return JumpTableId2AddrMap[JTI];
- }
-
- /// addJumpTableBaseAddr - Map a jump table index to the address where
- /// the corresponding inline jump table is emitted. When relocations are
- /// processed, this value will be used to resolve references to the
- /// jump table.
- void addJumpTableBaseAddr(unsigned JTI, intptr_t Addr) {
- assert(JTI < JumpTableId2AddrMap.size());
- JumpTableId2AddrMap[JTI] = Addr;
- }
-
- /// getPCLabelAddr - Retrieve the address of the PC label of the specified id.
- intptr_t getPCLabelAddr(unsigned Id) const {
- DenseMap<unsigned, intptr_t>::const_iterator I = PCLabelMap.find(Id);
- assert(I != PCLabelMap.end());
- return I->second;
- }
-
- /// addPCLabelAddr - Remember the address of the specified PC label.
- void addPCLabelAddr(unsigned Id, intptr_t Addr) {
- PCLabelMap.insert(std::make_pair(Id, Addr));
- }
-
- /// getIndirectSymAddr - Retrieve the address of the indirect symbol of the
- /// specified symbol located at address. Returns 0 if the indirect symbol
- /// has not been emitted.
- intptr_t getIndirectSymAddr(void *Addr) const {
- DenseMap<void*,intptr_t>::const_iterator I= Sym2IndirectSymMap.find(Addr);
- if (I != Sym2IndirectSymMap.end())
- return I->second;
- return 0;
- }
-
- /// addIndirectSymAddr - Add a mapping from address of an emitted symbol to
- /// its indirect symbol address.
- void addIndirectSymAddr(void *SymAddr, intptr_t IndSymAddr) {
- Sym2IndirectSymMap.insert(std::make_pair(SymAddr, IndSymAddr));
- }
-
- private:
- /// resolveRelocDestAddr - Resolve the resulting address of the relocation
- /// if it's not already solved. Constantpool entries must be resolved by
- /// ARM target.
- intptr_t resolveRelocDestAddr(MachineRelocation *MR) const;
- };
-}
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
deleted file mode 100644
index 19f1e3b..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ /dev/null
@@ -1,1636 +0,0 @@
-//===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a pass that performs load / store related peephole
-// optimizations. This pass should be run after register allocation.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "arm-ldst-opt"
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMBaseInstrInfo.h"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMRegisterInfo.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-STATISTIC(NumLDMGened , "Number of ldm instructions generated");
-STATISTIC(NumSTMGened , "Number of stm instructions generated");
-STATISTIC(NumVLDMGened, "Number of vldm instructions generated");
-STATISTIC(NumVSTMGened, "Number of vstm instructions generated");
-STATISTIC(NumLdStMoved, "Number of load / store instructions moved");
-STATISTIC(NumLDRDFormed,"Number of ldrd created before allocation");
-STATISTIC(NumSTRDFormed,"Number of strd created before allocation");
-STATISTIC(NumLDRD2LDM, "Number of ldrd instructions turned back into ldm");
-STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm");
-STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's");
-STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's");
-
-/// ARMAllocLoadStoreOpt - Post- register allocation pass the combine
-/// load / store instructions to form ldm / stm instructions.
-
-namespace {
- struct ARMLoadStoreOpt : public MachineFunctionPass {
- static char ID;
- ARMLoadStoreOpt() : MachineFunctionPass(&ID) {}
-
- const TargetInstrInfo *TII;
- const TargetRegisterInfo *TRI;
- ARMFunctionInfo *AFI;
- RegScavenger *RS;
- bool isThumb2;
-
- virtual bool runOnMachineFunction(MachineFunction &Fn);
-
- virtual const char *getPassName() const {
- return "ARM load / store optimization pass";
- }
-
- private:
- struct MemOpQueueEntry {
- int Offset;
- unsigned Position;
- MachineBasicBlock::iterator MBBI;
- bool Merged;
- MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
- : Offset(o), Position(p), MBBI(i), Merged(false) {}
- };
- typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
- typedef MemOpQueue::iterator MemOpQueueIter;
-
- bool MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
- int Offset, unsigned Base, bool BaseKill, int Opcode,
- ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch,
- DebugLoc dl, SmallVector<std::pair<unsigned, bool>, 8> &Regs);
- void MergeOpsUpdate(MachineBasicBlock &MBB,
- MemOpQueue &MemOps,
- unsigned memOpsBegin,
- unsigned memOpsEnd,
- unsigned insertAfter,
- int Offset,
- unsigned Base,
- bool BaseKill,
- int Opcode,
- ARMCC::CondCodes Pred,
- unsigned PredReg,
- unsigned Scratch,
- DebugLoc dl,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges);
- void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
- int Opcode, unsigned Size,
- ARMCC::CondCodes Pred, unsigned PredReg,
- unsigned Scratch, MemOpQueue &MemOps,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges);
-
- void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
- bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI);
- bool MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const TargetInstrInfo *TII,
- bool &Advance,
- MachineBasicBlock::iterator &I);
- bool MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- bool &Advance,
- MachineBasicBlock::iterator &I);
- bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
- bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
- };
- char ARMLoadStoreOpt::ID = 0;
-}
-
-static int getLoadStoreMultipleOpcode(int Opcode) {
- switch (Opcode) {
- case ARM::LDR:
- NumLDMGened++;
- return ARM::LDM;
- case ARM::STR:
- NumSTMGened++;
- return ARM::STM;
- case ARM::t2LDRi8:
- case ARM::t2LDRi12:
- NumLDMGened++;
- return ARM::t2LDM;
- case ARM::t2STRi8:
- case ARM::t2STRi12:
- NumSTMGened++;
- return ARM::t2STM;
- case ARM::VLDRS:
- NumVLDMGened++;
- return ARM::VLDMS;
- case ARM::VSTRS:
- NumVSTMGened++;
- return ARM::VSTMS;
- case ARM::VLDRD:
- NumVLDMGened++;
- return ARM::VLDMD;
- case ARM::VSTRD:
- NumVSTMGened++;
- return ARM::VSTMD;
- default: llvm_unreachable("Unhandled opcode!");
- }
- return 0;
-}
-
-static bool isT2i32Load(unsigned Opc) {
- return Opc == ARM::t2LDRi12 || Opc == ARM::t2LDRi8;
-}
-
-static bool isi32Load(unsigned Opc) {
- return Opc == ARM::LDR || isT2i32Load(Opc);
-}
-
-static bool isT2i32Store(unsigned Opc) {
- return Opc == ARM::t2STRi12 || Opc == ARM::t2STRi8;
-}
-
-static bool isi32Store(unsigned Opc) {
- return Opc == ARM::STR || isT2i32Store(Opc);
-}
-
-/// MergeOps - Create and insert a LDM or STM with Base as base register and
-/// registers in Regs as the register operands that would be loaded / stored.
-/// It returns true if the transformation is done.
-bool
-ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- int Offset, unsigned Base, bool BaseKill,
- int Opcode, ARMCC::CondCodes Pred,
- unsigned PredReg, unsigned Scratch, DebugLoc dl,
- SmallVector<std::pair<unsigned, bool>, 8> &Regs) {
- // Only a single register to load / store. Don't bother.
- unsigned NumRegs = Regs.size();
- if (NumRegs <= 1)
- return false;
-
- ARM_AM::AMSubMode Mode = ARM_AM::ia;
- bool isAM4 = isi32Load(Opcode) || isi32Store(Opcode);
- if (isAM4 && Offset == 4) {
- if (isThumb2)
- // Thumb2 does not support ldmib / stmib.
- return false;
- Mode = ARM_AM::ib;
- } else if (isAM4 && Offset == -4 * (int)NumRegs + 4) {
- if (isThumb2)
- // Thumb2 does not support ldmda / stmda.
- return false;
- Mode = ARM_AM::da;
- } else if (isAM4 && Offset == -4 * (int)NumRegs) {
- Mode = ARM_AM::db;
- } else if (Offset != 0) {
- // If starting offset isn't zero, insert a MI to materialize a new base.
- // But only do so if it is cost effective, i.e. merging more than two
- // loads / stores.
- if (NumRegs <= 2)
- return false;
-
- unsigned NewBase;
- if (isi32Load(Opcode))
- // If it is a load, then just use one of the destination register to
- // use as the new base.
- NewBase = Regs[NumRegs-1].first;
- else {
- // Use the scratch register to use as a new base.
- NewBase = Scratch;
- if (NewBase == 0)
- return false;
- }
- int BaseOpc = !isThumb2
- ? ARM::ADDri
- : ((Base == ARM::SP) ? ARM::t2ADDrSPi : ARM::t2ADDri);
- if (Offset < 0) {
- BaseOpc = !isThumb2
- ? ARM::SUBri
- : ((Base == ARM::SP) ? ARM::t2SUBrSPi : ARM::t2SUBri);
- Offset = - Offset;
- }
- int ImmedOffset = isThumb2
- ? ARM_AM::getT2SOImmVal(Offset) : ARM_AM::getSOImmVal(Offset);
- if (ImmedOffset == -1)
- // FIXME: Try t2ADDri12 or t2SUBri12?
- return false; // Probably not worth it then.
-
- BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
- .addReg(Base, getKillRegState(BaseKill)).addImm(Offset)
- .addImm(Pred).addReg(PredReg).addReg(0);
- Base = NewBase;
- BaseKill = true; // New base is always killed right its use.
- }
-
- bool isDPR = Opcode == ARM::VLDRD || Opcode == ARM::VSTRD;
- bool isDef = isi32Load(Opcode) || Opcode == ARM::VLDRS || Opcode == ARM::VLDRD;
- Opcode = getLoadStoreMultipleOpcode(Opcode);
- MachineInstrBuilder MIB = (isAM4)
- ? BuildMI(MBB, MBBI, dl, TII->get(Opcode))
- .addReg(Base, getKillRegState(BaseKill))
- .addImm(ARM_AM::getAM4ModeImm(Mode)).addImm(Pred).addReg(PredReg)
- : BuildMI(MBB, MBBI, dl, TII->get(Opcode))
- .addReg(Base, getKillRegState(BaseKill))
- .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs))
- .addImm(Pred).addReg(PredReg);
- MIB.addReg(0); // Add optional writeback (0 for now).
- for (unsigned i = 0; i != NumRegs; ++i)
- MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
- | getKillRegState(Regs[i].second));
-
- return true;
-}
-
-// MergeOpsUpdate - call MergeOps and update MemOps and merges accordingly on
-// success.
-void ARMLoadStoreOpt::
-MergeOpsUpdate(MachineBasicBlock &MBB,
- MemOpQueue &memOps,
- unsigned memOpsBegin,
- unsigned memOpsEnd,
- unsigned insertAfter,
- int Offset,
- unsigned Base,
- bool BaseKill,
- int Opcode,
- ARMCC::CondCodes Pred,
- unsigned PredReg,
- unsigned Scratch,
- DebugLoc dl,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
- // First calculate which of the registers should be killed by the merged
- // instruction.
- SmallVector<std::pair<unsigned, bool>, 8> Regs;
- const unsigned insertPos = memOps[insertAfter].Position;
- for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
- const MachineOperand &MO = memOps[i].MBBI->getOperand(0);
- unsigned Reg = MO.getReg();
- bool isKill = MO.isKill();
-
- // If we are inserting the merged operation after an unmerged operation that
- // uses the same register, make sure to transfer any kill flag.
- for (unsigned j = memOpsEnd, e = memOps.size(); !isKill && j != e; ++j)
- if (memOps[j].Position<insertPos) {
- const MachineOperand &MOJ = memOps[j].MBBI->getOperand(0);
- if (MOJ.getReg() == Reg && MOJ.isKill())
- isKill = true;
- }
-
- Regs.push_back(std::make_pair(Reg, isKill));
- }
-
- // Try to do the merge.
- MachineBasicBlock::iterator Loc = memOps[insertAfter].MBBI;
- Loc++;
- if (!MergeOps(MBB, Loc, Offset, Base, BaseKill, Opcode,
- Pred, PredReg, Scratch, dl, Regs))
- return;
-
- // Merge succeeded, update records.
- Merges.push_back(prior(Loc));
- for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
- // Remove kill flags from any unmerged memops that come before insertPos.
- if (Regs[i-memOpsBegin].second)
- for (unsigned j = memOpsEnd, e = memOps.size(); j != e; ++j)
- if (memOps[j].Position<insertPos) {
- MachineOperand &MOJ = memOps[j].MBBI->getOperand(0);
- if (MOJ.getReg() == Regs[i-memOpsBegin].first && MOJ.isKill())
- MOJ.setIsKill(false);
- }
- MBB.erase(memOps[i].MBBI);
- memOps[i].Merged = true;
- }
-}
-
-/// MergeLDR_STR - Merge a number of load / store instructions into one or more
-/// load / store multiple instructions.
-void
-ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
- unsigned Base, int Opcode, unsigned Size,
- ARMCC::CondCodes Pred, unsigned PredReg,
- unsigned Scratch, MemOpQueue &MemOps,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
- bool isAM4 = isi32Load(Opcode) || isi32Store(Opcode);
- int Offset = MemOps[SIndex].Offset;
- int SOffset = Offset;
- unsigned insertAfter = SIndex;
- MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI;
- DebugLoc dl = Loc->getDebugLoc();
- const MachineOperand &PMO = Loc->getOperand(0);
- unsigned PReg = PMO.getReg();
- unsigned PRegNum = PMO.isUndef() ? UINT_MAX
- : ARMRegisterInfo::getRegisterNumbering(PReg);
-
- for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
- int NewOffset = MemOps[i].Offset;
- const MachineOperand &MO = MemOps[i].MBBI->getOperand(0);
- unsigned Reg = MO.getReg();
- unsigned RegNum = MO.isUndef() ? UINT_MAX
- : ARMRegisterInfo::getRegisterNumbering(Reg);
- // AM4 - register numbers in ascending order.
- // AM5 - consecutive register numbers in ascending order.
- if (Reg != ARM::SP &&
- NewOffset == Offset + (int)Size &&
- ((isAM4 && RegNum > PRegNum) || RegNum == PRegNum+1)) {
- Offset += Size;
- PRegNum = RegNum;
- } else {
- // Can't merge this in. Try merge the earlier ones first.
- MergeOpsUpdate(MBB, MemOps, SIndex, i, insertAfter, SOffset,
- Base, false, Opcode, Pred, PredReg, Scratch, dl, Merges);
- MergeLDR_STR(MBB, i, Base, Opcode, Size, Pred, PredReg, Scratch,
- MemOps, Merges);
- return;
- }
-
- if (MemOps[i].Position > MemOps[insertAfter].Position)
- insertAfter = i;
- }
-
- bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1;
- MergeOpsUpdate(MBB, MemOps, SIndex, MemOps.size(), insertAfter, SOffset,
- Base, BaseKill, Opcode, Pred, PredReg, Scratch, dl, Merges);
- return;
-}
-
-static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
- unsigned Bytes, unsigned Limit,
- ARMCC::CondCodes Pred, unsigned PredReg){
- unsigned MyPredReg = 0;
- if (!MI)
- return false;
- if (MI->getOpcode() != ARM::t2SUBri &&
- MI->getOpcode() != ARM::t2SUBrSPi &&
- MI->getOpcode() != ARM::t2SUBrSPi12 &&
- MI->getOpcode() != ARM::tSUBspi &&
- MI->getOpcode() != ARM::SUBri)
- return false;
-
- // Make sure the offset fits in 8 bits.
- if (Bytes <= 0 || (Limit && Bytes >= Limit))
- return false;
-
- unsigned Scale = (MI->getOpcode() == ARM::tSUBspi) ? 4 : 1; // FIXME
- return (MI->getOperand(0).getReg() == Base &&
- MI->getOperand(1).getReg() == Base &&
- (MI->getOperand(2).getImm()*Scale) == Bytes &&
- llvm::getInstrPredicate(MI, MyPredReg) == Pred &&
- MyPredReg == PredReg);
-}
-
-static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
- unsigned Bytes, unsigned Limit,
- ARMCC::CondCodes Pred, unsigned PredReg){
- unsigned MyPredReg = 0;
- if (!MI)
- return false;
- if (MI->getOpcode() != ARM::t2ADDri &&
- MI->getOpcode() != ARM::t2ADDrSPi &&
- MI->getOpcode() != ARM::t2ADDrSPi12 &&
- MI->getOpcode() != ARM::tADDspi &&
- MI->getOpcode() != ARM::ADDri)
- return false;
-
- if (Bytes <= 0 || (Limit && Bytes >= Limit))
- // Make sure the offset fits in 8 bits.
- return false;
-
- unsigned Scale = (MI->getOpcode() == ARM::tADDspi) ? 4 : 1; // FIXME
- return (MI->getOperand(0).getReg() == Base &&
- MI->getOperand(1).getReg() == Base &&
- (MI->getOperand(2).getImm()*Scale) == Bytes &&
- llvm::getInstrPredicate(MI, MyPredReg) == Pred &&
- MyPredReg == PredReg);
-}
-
-static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
- switch (MI->getOpcode()) {
- default: return 0;
- case ARM::LDR:
- case ARM::STR:
- case ARM::t2LDRi8:
- case ARM::t2LDRi12:
- case ARM::t2STRi8:
- case ARM::t2STRi12:
- case ARM::VLDRS:
- case ARM::VSTRS:
- return 4;
- case ARM::VLDRD:
- case ARM::VSTRD:
- return 8;
- case ARM::LDM:
- case ARM::STM:
- case ARM::t2LDM:
- case ARM::t2STM:
- return (MI->getNumOperands() - 5) * 4;
- case ARM::VLDMS:
- case ARM::VSTMS:
- case ARM::VLDMD:
- case ARM::VSTMD:
- return ARM_AM::getAM5Offset(MI->getOperand(1).getImm()) * 4;
- }
-}
-
-/// MergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
-/// register into the LDM/STM/VLDM{D|S}/VSTM{D|S} op when possible:
-///
-/// stmia rn, <ra, rb, rc>
-/// rn := rn + 4 * 3;
-/// =>
-/// stmia rn!, <ra, rb, rc>
-///
-/// rn := rn - 4 * 3;
-/// ldmia rn, <ra, rb, rc>
-/// =>
-/// ldmdb rn!, <ra, rb, rc>
-bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- bool &Advance,
- MachineBasicBlock::iterator &I) {
- MachineInstr *MI = MBBI;
- unsigned Base = MI->getOperand(0).getReg();
- unsigned Bytes = getLSMultipleTransferSize(MI);
- unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
- int Opcode = MI->getOpcode();
- bool isAM4 = Opcode == ARM::LDM || Opcode == ARM::t2LDM ||
- Opcode == ARM::STM || Opcode == ARM::t2STM;
-
- if (isAM4) {
- if (ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()))
- return false;
-
- // Can't use the updating AM4 sub-mode if the base register is also a dest
- // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
- for (unsigned i = 3, e = MI->getNumOperands(); i != e; ++i) {
- if (MI->getOperand(i).getReg() == Base)
- return false;
- }
-
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
- if (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
- if (Mode == ARM_AM::ia &&
- isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
- MI->getOperand(4).setReg(Base);
- MI->getOperand(4).setIsDef();
- MBB.erase(PrevMBBI);
- return true;
- } else if (Mode == ARM_AM::ib &&
- isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- MBB.erase(PrevMBBI);
- return true;
- }
- }
-
- if (MBBI != MBB.end()) {
- MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
- if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
- isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- if (NextMBBI == I) {
- Advance = true;
- ++I;
- }
- MBB.erase(NextMBBI);
- return true;
- } else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
- isMatchingDecrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- if (NextMBBI == I) {
- Advance = true;
- ++I;
- }
- MBB.erase(NextMBBI);
- return true;
- }
- }
- } else {
- // VLDM{D|S}, VSTM{D|S} addressing mode 5 ops.
- if (ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()))
- return false;
-
- ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
- unsigned Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
- if (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
- if (Mode == ARM_AM::ia &&
- isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- MBB.erase(PrevMBBI);
- return true;
- }
- }
-
- if (MBBI != MBB.end()) {
- MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
- if (Mode == ARM_AM::ia &&
- isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
- MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
- MI->getOperand(4).setReg(Base); // WB to base
- MI->getOperand(4).setIsDef();
- if (NextMBBI == I) {
- Advance = true;
- ++I;
- }
- MBB.erase(NextMBBI);
- }
- return true;
- }
- }
-
- return false;
-}
-
-static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
- switch (Opc) {
- case ARM::LDR: return ARM::LDR_PRE;
- case ARM::STR: return ARM::STR_PRE;
- case ARM::VLDRS: return ARM::VLDMS;
- case ARM::VLDRD: return ARM::VLDMD;
- case ARM::VSTRS: return ARM::VSTMS;
- case ARM::VSTRD: return ARM::VSTMD;
- case ARM::t2LDRi8:
- case ARM::t2LDRi12:
- return ARM::t2LDR_PRE;
- case ARM::t2STRi8:
- case ARM::t2STRi12:
- return ARM::t2STR_PRE;
- default: llvm_unreachable("Unhandled opcode!");
- }
- return 0;
-}
-
-static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
- switch (Opc) {
- case ARM::LDR: return ARM::LDR_POST;
- case ARM::STR: return ARM::STR_POST;
- case ARM::VLDRS: return ARM::VLDMS;
- case ARM::VLDRD: return ARM::VLDMD;
- case ARM::VSTRS: return ARM::VSTMS;
- case ARM::VSTRD: return ARM::VSTMD;
- case ARM::t2LDRi8:
- case ARM::t2LDRi12:
- return ARM::t2LDR_POST;
- case ARM::t2STRi8:
- case ARM::t2STRi12:
- return ARM::t2STR_POST;
- default: llvm_unreachable("Unhandled opcode!");
- }
- return 0;
-}
-
-/// MergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
-/// register into the LDR/STR/FLD{D|S}/FST{D|S} op when possible:
-bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const TargetInstrInfo *TII,
- bool &Advance,
- MachineBasicBlock::iterator &I) {
- MachineInstr *MI = MBBI;
- unsigned Base = MI->getOperand(1).getReg();
- bool BaseKill = MI->getOperand(1).isKill();
- unsigned Bytes = getLSMultipleTransferSize(MI);
- int Opcode = MI->getOpcode();
- DebugLoc dl = MI->getDebugLoc();
- bool isAM5 = Opcode == ARM::VLDRD || Opcode == ARM::VLDRS ||
- Opcode == ARM::VSTRD || Opcode == ARM::VSTRS;
- bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
- if (isAM2 && ARM_AM::getAM2Offset(MI->getOperand(3).getImm()) != 0)
- return false;
- else if (isAM5 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0)
- return false;
- else if (isT2i32Load(Opcode) || isT2i32Store(Opcode))
- if (MI->getOperand(2).getImm() != 0)
- return false;
-
- bool isLd = isi32Load(Opcode) || Opcode == ARM::VLDRS || Opcode == ARM::VLDRD;
- // Can't do the merge if the destination register is the same as the would-be
- // writeback register.
- if (isLd && MI->getOperand(0).getReg() == Base)
- return false;
-
- unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
- bool DoMerge = false;
- ARM_AM::AddrOpc AddSub = ARM_AM::add;
- unsigned NewOpc = 0;
- // AM2 - 12 bits, thumb2 - 8 bits.
- unsigned Limit = isAM5 ? 0 : (isAM2 ? 0x1000 : 0x100);
- if (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
- if (isMatchingDecrement(PrevMBBI, Base, Bytes, Limit, Pred, PredReg)) {
- DoMerge = true;
- AddSub = ARM_AM::sub;
- NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
- } else if (!isAM5 &&
- isMatchingIncrement(PrevMBBI, Base, Bytes, Limit,Pred,PredReg)) {
- DoMerge = true;
- NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
- }
- if (DoMerge)
- MBB.erase(PrevMBBI);
- }
-
- if (!DoMerge && MBBI != MBB.end()) {
- MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
- if (!isAM5 &&
- isMatchingDecrement(NextMBBI, Base, Bytes, Limit, Pred, PredReg)) {
- DoMerge = true;
- AddSub = ARM_AM::sub;
- NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
- } else if (isMatchingIncrement(NextMBBI, Base, Bytes, Limit,Pred,PredReg)) {
- DoMerge = true;
- NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
- }
- if (DoMerge) {
- if (NextMBBI == I) {
- Advance = true;
- ++I;
- }
- MBB.erase(NextMBBI);
- }
- }
-
- if (!DoMerge)
- return false;
-
- bool isDPR = NewOpc == ARM::VLDMD || NewOpc == ARM::VSTMD;
- unsigned Offset = 0;
- if (isAM5)
- Offset = ARM_AM::getAM5Opc((AddSub == ARM_AM::sub)
- ? ARM_AM::db
- : ARM_AM::ia, true, (isDPR ? 2 : 1));
- else if (isAM2)
- Offset = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
- else
- Offset = AddSub == ARM_AM::sub ? -Bytes : Bytes;
- if (isLd) {
- if (isAM5)
- // VLDMS, VLDMD
- BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
- .addReg(Base, getKillRegState(BaseKill))
- .addImm(Offset).addImm(Pred).addReg(PredReg)
- .addReg(Base, getDefRegState(true)) // WB base register
- .addReg(MI->getOperand(0).getReg(), RegState::Define);
- else if (isAM2)
- // LDR_PRE, LDR_POST,
- BuildMI(MBB, MBBI, dl, TII->get(NewOpc), MI->getOperand(0).getReg())
- .addReg(Base, RegState::Define)
- .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
- else
- // t2LDR_PRE, t2LDR_POST
- BuildMI(MBB, MBBI, dl, TII->get(NewOpc), MI->getOperand(0).getReg())
- .addReg(Base, RegState::Define)
- .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
- } else {
- MachineOperand &MO = MI->getOperand(0);
- if (isAM5)
- // VSTMS, VSTMD
- BuildMI(MBB, MBBI, dl, TII->get(NewOpc)).addReg(Base).addImm(Offset)
- .addImm(Pred).addReg(PredReg)
- .addReg(Base, getDefRegState(true)) // WB base register
- .addReg(MO.getReg(), getKillRegState(MO.isKill()));
- else if (isAM2)
- // STR_PRE, STR_POST
- BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
- .addReg(MO.getReg(), getKillRegState(MO.isKill()))
- .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
- else
- // t2STR_PRE, t2STR_POST
- BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
- .addReg(MO.getReg(), getKillRegState(MO.isKill()))
- .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
- }
- MBB.erase(MBBI);
-
- return true;
-}
-
-/// isMemoryOp - Returns true if instruction is a memory operations (that this
-/// pass is capable of operating on).
-static bool isMemoryOp(const MachineInstr *MI) {
- if (MI->hasOneMemOperand()) {
- const MachineMemOperand *MMO = *MI->memoperands_begin();
-
- // Don't touch volatile memory accesses - we may be changing their order.
- if (MMO->isVolatile())
- return false;
-
- // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
- // not.
- if (MMO->getAlignment() < 4)
- return false;
- }
-
- // str <undef> could probably be eliminated entirely, but for now we just want
- // to avoid making a mess of it.
- // FIXME: Use str <undef> as a wildcard to enable better stm folding.
- if (MI->getNumOperands() > 0 && MI->getOperand(0).isReg() &&
- MI->getOperand(0).isUndef())
- return false;
-
- // Likewise don't mess with references to undefined addresses.
- if (MI->getNumOperands() > 1 && MI->getOperand(1).isReg() &&
- MI->getOperand(1).isUndef())
- return false;
-
- int Opcode = MI->getOpcode();
- switch (Opcode) {
- default: break;
- case ARM::LDR:
- case ARM::STR:
- return MI->getOperand(1).isReg() && MI->getOperand(2).getReg() == 0;
- case ARM::VLDRS:
- case ARM::VSTRS:
- return MI->getOperand(1).isReg();
- case ARM::VLDRD:
- case ARM::VSTRD:
- return MI->getOperand(1).isReg();
- case ARM::t2LDRi8:
- case ARM::t2LDRi12:
- case ARM::t2STRi8:
- case ARM::t2STRi12:
- return MI->getOperand(1).isReg();
- }
- return false;
-}
-
-/// AdvanceRS - Advance register scavenger to just before the earliest memory
-/// op that is being merged.
-void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
- MachineBasicBlock::iterator Loc = MemOps[0].MBBI;
- unsigned Position = MemOps[0].Position;
- for (unsigned i = 1, e = MemOps.size(); i != e; ++i) {
- if (MemOps[i].Position < Position) {
- Position = MemOps[i].Position;
- Loc = MemOps[i].MBBI;
- }
- }
-
- if (Loc != MBB.begin())
- RS->forward(prior(Loc));
-}
-
-static int getMemoryOpOffset(const MachineInstr *MI) {
- int Opcode = MI->getOpcode();
- bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
- bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
- unsigned NumOperands = MI->getDesc().getNumOperands();
- unsigned OffField = MI->getOperand(NumOperands-3).getImm();
-
- if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
- Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
- Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8)
- return OffField;
-
- int Offset = isAM2
- ? ARM_AM::getAM2Offset(OffField)
- : (isAM3 ? ARM_AM::getAM3Offset(OffField)
- : ARM_AM::getAM5Offset(OffField) * 4);
- if (isAM2) {
- if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub)
- Offset = -Offset;
- } else if (isAM3) {
- if (ARM_AM::getAM3Op(OffField) == ARM_AM::sub)
- Offset = -Offset;
- } else {
- if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub)
- Offset = -Offset;
- }
- return Offset;
-}
-
-static void InsertLDR_STR(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- int OffImm, bool isDef,
- DebugLoc dl, unsigned NewOpc,
- unsigned Reg, bool RegDeadKill, bool RegUndef,
- unsigned BaseReg, bool BaseKill, bool BaseUndef,
- unsigned OffReg, bool OffKill, bool OffUndef,
- ARMCC::CondCodes Pred, unsigned PredReg,
- const TargetInstrInfo *TII, bool isT2) {
- int Offset = OffImm;
- if (!isT2) {
- if (OffImm < 0)
- Offset = ARM_AM::getAM2Opc(ARM_AM::sub, -OffImm, ARM_AM::no_shift);
- else
- Offset = ARM_AM::getAM2Opc(ARM_AM::add, OffImm, ARM_AM::no_shift);
- }
- if (isDef) {
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
- TII->get(NewOpc))
- .addReg(Reg, getDefRegState(true) | getDeadRegState(RegDeadKill))
- .addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
- if (!isT2)
- MIB.addReg(OffReg, getKillRegState(OffKill)|getUndefRegState(OffUndef));
- MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
- } else {
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
- TII->get(NewOpc))
- .addReg(Reg, getKillRegState(RegDeadKill) | getUndefRegState(RegUndef))
- .addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
- if (!isT2)
- MIB.addReg(OffReg, getKillRegState(OffKill)|getUndefRegState(OffUndef));
- MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
- }
-}
-
-bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI) {
- MachineInstr *MI = &*MBBI;
- unsigned Opcode = MI->getOpcode();
- if (Opcode == ARM::LDRD || Opcode == ARM::STRD ||
- Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8) {
- unsigned EvenReg = MI->getOperand(0).getReg();
- unsigned OddReg = MI->getOperand(1).getReg();
- unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false);
- unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false);
- if ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum)
- return false;
-
- bool isT2 = Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8;
- bool isLd = Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8;
- bool EvenDeadKill = isLd ?
- MI->getOperand(0).isDead() : MI->getOperand(0).isKill();
- bool EvenUndef = MI->getOperand(0).isUndef();
- bool OddDeadKill = isLd ?
- MI->getOperand(1).isDead() : MI->getOperand(1).isKill();
- bool OddUndef = MI->getOperand(1).isUndef();
- const MachineOperand &BaseOp = MI->getOperand(2);
- unsigned BaseReg = BaseOp.getReg();
- bool BaseKill = BaseOp.isKill();
- bool BaseUndef = BaseOp.isUndef();
- unsigned OffReg = isT2 ? 0 : MI->getOperand(3).getReg();
- bool OffKill = isT2 ? false : MI->getOperand(3).isKill();
- bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef();
- int OffImm = getMemoryOpOffset(MI);
- unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(MI, PredReg);
-
- if (OddRegNum > EvenRegNum && OffReg == 0 && OffImm == 0) {
- // Ascending register numbers and no offset. It's safe to change it to a
- // ldm or stm.
- unsigned NewOpc = (isLd)
- ? (isT2 ? ARM::t2LDM : ARM::LDM)
- : (isT2 ? ARM::t2STM : ARM::STM);
- if (isLd) {
- BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
- .addReg(BaseReg, getKillRegState(BaseKill))
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
- .addImm(Pred).addReg(PredReg)
- .addReg(0)
- .addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill))
- .addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill));
- ++NumLDRD2LDM;
- } else {
- BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
- .addReg(BaseReg, getKillRegState(BaseKill))
- .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
- .addImm(Pred).addReg(PredReg)
- .addReg(0)
- .addReg(EvenReg,
- getKillRegState(EvenDeadKill) | getUndefRegState(EvenUndef))
- .addReg(OddReg,
- getKillRegState(OddDeadKill) | getUndefRegState(OddUndef));
- ++NumSTRD2STM;
- }
- } else {
- // Split into two instructions.
- assert((!isT2 || !OffReg) &&
- "Thumb2 ldrd / strd does not encode offset register!");
- unsigned NewOpc = (isLd)
- ? (isT2 ? (OffImm < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDR)
- : (isT2 ? (OffImm < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STR);
- DebugLoc dl = MBBI->getDebugLoc();
- // If this is a load and base register is killed, it may have been
- // re-defed by the load, make sure the first load does not clobber it.
- if (isLd &&
- (BaseKill || OffKill) &&
- (TRI->regsOverlap(EvenReg, BaseReg) ||
- (OffReg && TRI->regsOverlap(EvenReg, OffReg)))) {
- assert(!TRI->regsOverlap(OddReg, BaseReg) &&
- (!OffReg || !TRI->regsOverlap(OddReg, OffReg)));
- InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
- OddReg, OddDeadKill, false,
- BaseReg, false, BaseUndef, OffReg, false, OffUndef,
- Pred, PredReg, TII, isT2);
- InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
- EvenReg, EvenDeadKill, false,
- BaseReg, BaseKill, BaseUndef, OffReg, OffKill, OffUndef,
- Pred, PredReg, TII, isT2);
- } else {
- if (OddReg == EvenReg && EvenDeadKill) {
- // If the two source operands are the same, the kill marker is probably
- // on the first one. e.g.
- // t2STRDi8 %R5<kill>, %R5, %R9<kill>, 0, 14, %reg0
- EvenDeadKill = false;
- OddDeadKill = true;
- }
- InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
- EvenReg, EvenDeadKill, EvenUndef,
- BaseReg, false, BaseUndef, OffReg, false, OffUndef,
- Pred, PredReg, TII, isT2);
- InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
- OddReg, OddDeadKill, OddUndef,
- BaseReg, BaseKill, BaseUndef, OffReg, OffKill, OffUndef,
- Pred, PredReg, TII, isT2);
- }
- if (isLd)
- ++NumLDRD2LDR;
- else
- ++NumSTRD2STR;
- }
-
- MBBI = prior(MBBI);
- MBB.erase(MI);
- }
- return false;
-}
-
-/// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR
-/// ops of the same base and incrementing offset into LDM / STM ops.
-bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
- unsigned NumMerges = 0;
- unsigned NumMemOps = 0;
- MemOpQueue MemOps;
- unsigned CurrBase = 0;
- int CurrOpc = -1;
- unsigned CurrSize = 0;
- ARMCC::CondCodes CurrPred = ARMCC::AL;
- unsigned CurrPredReg = 0;
- unsigned Position = 0;
- SmallVector<MachineBasicBlock::iterator,4> Merges;
-
- RS->enterBasicBlock(&MBB);
- MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
- while (MBBI != E) {
- if (FixInvalidRegPairOp(MBB, MBBI))
- continue;
-
- bool Advance = false;
- bool TryMerge = false;
- bool Clobber = false;
-
- bool isMemOp = isMemoryOp(MBBI);
- if (isMemOp) {
- int Opcode = MBBI->getOpcode();
- unsigned Size = getLSMultipleTransferSize(MBBI);
- unsigned Base = MBBI->getOperand(1).getReg();
- unsigned PredReg = 0;
- ARMCC::CondCodes Pred = llvm::getInstrPredicate(MBBI, PredReg);
- int Offset = getMemoryOpOffset(MBBI);
- // Watch out for:
- // r4 := ldr [r5]
- // r5 := ldr [r5, #4]
- // r6 := ldr [r5, #8]
- //
- // The second ldr has effectively broken the chain even though it
- // looks like the later ldr(s) use the same base register. Try to
- // merge the ldr's so far, including this one. But don't try to
- // combine the following ldr(s).
- Clobber = (isi32Load(Opcode) && Base == MBBI->getOperand(0).getReg());
- if (CurrBase == 0 && !Clobber) {
- // Start of a new chain.
- CurrBase = Base;
- CurrOpc = Opcode;
- CurrSize = Size;
- CurrPred = Pred;
- CurrPredReg = PredReg;
- MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
- NumMemOps++;
- Advance = true;
- } else {
- if (Clobber) {
- TryMerge = true;
- Advance = true;
- }
-
- if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
- // No need to match PredReg.
- // Continue adding to the queue.
- if (Offset > MemOps.back().Offset) {
- MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
- NumMemOps++;
- Advance = true;
- } else {
- for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
- I != E; ++I) {
- if (Offset < I->Offset) {
- MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
- NumMemOps++;
- Advance = true;
- break;
- } else if (Offset == I->Offset) {
- // Collision! This can't be merged!
- break;
- }
- }
- }
- }
- }
- }
-
- if (Advance) {
- ++Position;
- ++MBBI;
- if (MBBI == E)
- // Reach the end of the block, try merging the memory instructions.
- TryMerge = true;
- } else
- TryMerge = true;
-
- if (TryMerge) {
- if (NumMemOps > 1) {
- // Try to find a free register to use as a new base in case it's needed.
- // First advance to the instruction just before the start of the chain.
- AdvanceRS(MBB, MemOps);
- // Find a scratch register.
- unsigned Scratch = RS->FindUnusedReg(ARM::GPRRegisterClass);
- // Process the load / store instructions.
- RS->forward(prior(MBBI));
-
- // Merge ops.
- Merges.clear();
- MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize,
- CurrPred, CurrPredReg, Scratch, MemOps, Merges);
-
- // Try folding preceeding/trailing base inc/dec into the generated
- // LDM/STM ops.
- for (unsigned i = 0, e = Merges.size(); i < e; ++i)
- if (MergeBaseUpdateLSMultiple(MBB, Merges[i], Advance, MBBI))
- ++NumMerges;
- NumMerges += Merges.size();
-
- // Try folding preceeding/trailing base inc/dec into those load/store
- // that were not merged to form LDM/STM ops.
- for (unsigned i = 0; i != NumMemOps; ++i)
- if (!MemOps[i].Merged)
- if (MergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI))
- ++NumMerges;
-
- // RS may be pointing to an instruction that's deleted.
- RS->skipTo(prior(MBBI));
- } else if (NumMemOps == 1) {
- // Try folding preceeding/trailing base inc/dec into the single
- // load/store.
- if (MergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) {
- ++NumMerges;
- RS->forward(prior(MBBI));
- }
- }
-
- CurrBase = 0;
- CurrOpc = -1;
- CurrSize = 0;
- CurrPred = ARMCC::AL;
- CurrPredReg = 0;
- if (NumMemOps) {
- MemOps.clear();
- NumMemOps = 0;
- }
-
- // If iterator hasn't been advanced and this is not a memory op, skip it.
- // It can't start a new chain anyway.
- if (!Advance && !isMemOp && MBBI != E) {
- ++Position;
- ++MBBI;
- }
- }
- }
- return NumMerges > 0;
-}
-
-namespace {
- struct OffsetCompare {
- bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
- int LOffset = getMemoryOpOffset(LHS);
- int ROffset = getMemoryOpOffset(RHS);
- assert(LHS == RHS || LOffset != ROffset);
- return LOffset > ROffset;
- }
- };
-}
-
-/// MergeReturnIntoLDM - If this is a exit BB, try merging the return op
-/// (bx lr) into the preceeding stack restore so it directly restore the value
-/// of LR into pc.
-/// ldmfd sp!, {r7, lr}
-/// bx lr
-/// =>
-/// ldmfd sp!, {r7, pc}
-bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
- if (MBB.empty()) return false;
-
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- if (MBBI != MBB.begin() &&
- (MBBI->getOpcode() == ARM::BX_RET || MBBI->getOpcode() == ARM::tBX_RET)) {
- MachineInstr *PrevMI = prior(MBBI);
- if (PrevMI->getOpcode() == ARM::LDM || PrevMI->getOpcode() == ARM::t2LDM) {
- MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
- if (MO.getReg() != ARM::LR)
- return false;
- unsigned NewOpc = isThumb2 ? ARM::t2LDM_RET : ARM::LDM_RET;
- PrevMI->setDesc(TII->get(NewOpc));
- MO.setReg(ARM::PC);
- MBB.erase(MBBI);
- return true;
- }
- }
- return false;
-}
-
-bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- const TargetMachine &TM = Fn.getTarget();
- AFI = Fn.getInfo<ARMFunctionInfo>();
- TII = TM.getInstrInfo();
- TRI = TM.getRegisterInfo();
- RS = new RegScavenger();
- isThumb2 = AFI->isThumb2Function();
-
- bool Modified = false;
- for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
- ++MFI) {
- MachineBasicBlock &MBB = *MFI;
- Modified |= LoadStoreMultipleOpti(MBB);
- Modified |= MergeReturnIntoLDM(MBB);
- }
-
- delete RS;
- return Modified;
-}
-
-
-/// ARMPreAllocLoadStoreOpt - Pre- register allocation pass that move
-/// load / stores from consecutive locations close to make it more
-/// likely they will be combined later.
-
-namespace {
- struct ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
- static char ID;
- ARMPreAllocLoadStoreOpt() : MachineFunctionPass(&ID) {}
-
- const TargetData *TD;
- const TargetInstrInfo *TII;
- const TargetRegisterInfo *TRI;
- const ARMSubtarget *STI;
- MachineRegisterInfo *MRI;
- MachineFunction *MF;
-
- virtual bool runOnMachineFunction(MachineFunction &Fn);
-
- virtual const char *getPassName() const {
- return "ARM pre- register allocation load / store optimization pass";
- }
-
- private:
- bool CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, DebugLoc &dl,
- unsigned &NewOpc, unsigned &EvenReg,
- unsigned &OddReg, unsigned &BaseReg,
- unsigned &OffReg, int &Offset,
- unsigned &PredReg, ARMCC::CondCodes &Pred,
- bool &isT2);
- bool RescheduleOps(MachineBasicBlock *MBB,
- SmallVector<MachineInstr*, 4> &Ops,
- unsigned Base, bool isLd,
- DenseMap<MachineInstr*, unsigned> &MI2LocMap);
- bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
- };
- char ARMPreAllocLoadStoreOpt::ID = 0;
-}
-
-bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
- TD = Fn.getTarget().getTargetData();
- TII = Fn.getTarget().getInstrInfo();
- TRI = Fn.getTarget().getRegisterInfo();
- STI = &Fn.getTarget().getSubtarget<ARMSubtarget>();
- MRI = &Fn.getRegInfo();
- MF = &Fn;
-
- bool Modified = false;
- for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
- ++MFI)
- Modified |= RescheduleLoadStoreInstrs(MFI);
-
- return Modified;
-}
-
-static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
- MachineBasicBlock::iterator I,
- MachineBasicBlock::iterator E,
- SmallPtrSet<MachineInstr*, 4> &MemOps,
- SmallSet<unsigned, 4> &MemRegs,
- const TargetRegisterInfo *TRI) {
- // Are there stores / loads / calls between them?
- // FIXME: This is overly conservative. We should make use of alias information
- // some day.
- SmallSet<unsigned, 4> AddedRegPressure;
- while (++I != E) {
- if (MemOps.count(&*I))
- continue;
- const TargetInstrDesc &TID = I->getDesc();
- if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects())
- return false;
- if (isLd && TID.mayStore())
- return false;
- if (!isLd) {
- if (TID.mayLoad())
- return false;
- // It's not safe to move the first 'str' down.
- // str r1, [r0]
- // strh r5, [r0]
- // str r4, [r0, #+4]
- if (TID.mayStore())
- return false;
- }
- for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
- MachineOperand &MO = I->getOperand(j);
- if (!MO.isReg())
- continue;
- unsigned Reg = MO.getReg();
- if (MO.isDef() && TRI->regsOverlap(Reg, Base))
- return false;
- if (Reg != Base && !MemRegs.count(Reg))
- AddedRegPressure.insert(Reg);
- }
- }
-
- // Estimate register pressure increase due to the transformation.
- if (MemRegs.size() <= 4)
- // Ok if we are moving small number of instructions.
- return true;
- return AddedRegPressure.size() <= MemRegs.size() * 2;
-}
-
-bool
-ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
- DebugLoc &dl,
- unsigned &NewOpc, unsigned &EvenReg,
- unsigned &OddReg, unsigned &BaseReg,
- unsigned &OffReg, int &Offset,
- unsigned &PredReg,
- ARMCC::CondCodes &Pred,
- bool &isT2) {
- // Make sure we're allowed to generate LDRD/STRD.
- if (!STI->hasV5TEOps())
- return false;
-
- // FIXME: VLDRS / VSTRS -> VLDRD / VSTRD
- unsigned Scale = 1;
- unsigned Opcode = Op0->getOpcode();
- if (Opcode == ARM::LDR)
- NewOpc = ARM::LDRD;
- else if (Opcode == ARM::STR)
- NewOpc = ARM::STRD;
- else if (Opcode == ARM::t2LDRi8 || Opcode == ARM::t2LDRi12) {
- NewOpc = ARM::t2LDRDi8;
- Scale = 4;
- isT2 = true;
- } else if (Opcode == ARM::t2STRi8 || Opcode == ARM::t2STRi12) {
- NewOpc = ARM::t2STRDi8;
- Scale = 4;
- isT2 = true;
- } else
- return false;
-
- // Make sure the offset registers match.
- if (!isT2 &&
- (Op0->getOperand(2).getReg() != Op1->getOperand(2).getReg()))
- return false;
-
- // Must sure the base address satisfies i64 ld / st alignment requirement.
- if (!Op0->hasOneMemOperand() ||
- !(*Op0->memoperands_begin())->getValue() ||
- (*Op0->memoperands_begin())->isVolatile())
- return false;
-
- unsigned Align = (*Op0->memoperands_begin())->getAlignment();
- Function *Func = MF->getFunction();
- unsigned ReqAlign = STI->hasV6Ops()
- ? TD->getPrefTypeAlignment(Type::getInt64Ty(Func->getContext()))
- : 8; // Pre-v6 need 8-byte align
- if (Align < ReqAlign)
- return false;
-
- // Then make sure the immediate offset fits.
- int OffImm = getMemoryOpOffset(Op0);
- if (isT2) {
- if (OffImm < 0) {
- if (OffImm < -255)
- // Can't fall back to t2LDRi8 / t2STRi8.
- return false;
- } else {
- int Limit = (1 << 8) * Scale;
- if (OffImm >= Limit || (OffImm & (Scale-1)))
- return false;
- }
- Offset = OffImm;
- } else {
- ARM_AM::AddrOpc AddSub = ARM_AM::add;
- if (OffImm < 0) {
- AddSub = ARM_AM::sub;
- OffImm = - OffImm;
- }
- int Limit = (1 << 8) * Scale;
- if (OffImm >= Limit || (OffImm & (Scale-1)))
- return false;
- Offset = ARM_AM::getAM3Opc(AddSub, OffImm);
- }
- EvenReg = Op0->getOperand(0).getReg();
- OddReg = Op1->getOperand(0).getReg();
- if (EvenReg == OddReg)
- return false;
- BaseReg = Op0->getOperand(1).getReg();
- if (!isT2)
- OffReg = Op0->getOperand(2).getReg();
- Pred = llvm::getInstrPredicate(Op0, PredReg);
- dl = Op0->getDebugLoc();
- return true;
-}
-
-bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
- SmallVector<MachineInstr*, 4> &Ops,
- unsigned Base, bool isLd,
- DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
- bool RetVal = false;
-
- // Sort by offset (in reverse order).
- std::sort(Ops.begin(), Ops.end(), OffsetCompare());
-
- // The loads / stores of the same base are in order. Scan them from first to
- // last and check for the followins:
- // 1. Any def of base.
- // 2. Any gaps.
- while (Ops.size() > 1) {
- unsigned FirstLoc = ~0U;
- unsigned LastLoc = 0;
- MachineInstr *FirstOp = 0;
- MachineInstr *LastOp = 0;
- int LastOffset = 0;
- unsigned LastOpcode = 0;
- unsigned LastBytes = 0;
- unsigned NumMove = 0;
- for (int i = Ops.size() - 1; i >= 0; --i) {
- MachineInstr *Op = Ops[i];
- unsigned Loc = MI2LocMap[Op];
- if (Loc <= FirstLoc) {
- FirstLoc = Loc;
- FirstOp = Op;
- }
- if (Loc >= LastLoc) {
- LastLoc = Loc;
- LastOp = Op;
- }
-
- unsigned Opcode = Op->getOpcode();
- if (LastOpcode && Opcode != LastOpcode)
- break;
-
- int Offset = getMemoryOpOffset(Op);
- unsigned Bytes = getLSMultipleTransferSize(Op);
- if (LastBytes) {
- if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
- break;
- }
- LastOffset = Offset;
- LastBytes = Bytes;
- LastOpcode = Opcode;
- if (++NumMove == 8) // FIXME: Tune this limit.
- break;
- }
-
- if (NumMove <= 1)
- Ops.pop_back();
- else {
- SmallPtrSet<MachineInstr*, 4> MemOps;
- SmallSet<unsigned, 4> MemRegs;
- for (int i = NumMove-1; i >= 0; --i) {
- MemOps.insert(Ops[i]);
- MemRegs.insert(Ops[i]->getOperand(0).getReg());
- }
-
- // Be conservative, if the instructions are too far apart, don't
- // move them. We want to limit the increase of register pressure.
- bool DoMove = (LastLoc - FirstLoc) <= NumMove*4; // FIXME: Tune this.
- if (DoMove)
- DoMove = IsSafeAndProfitableToMove(isLd, Base, FirstOp, LastOp,
- MemOps, MemRegs, TRI);
- if (!DoMove) {
- for (unsigned i = 0; i != NumMove; ++i)
- Ops.pop_back();
- } else {
- // This is the new location for the loads / stores.
- MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
- while (InsertPos != MBB->end() && MemOps.count(InsertPos))
- ++InsertPos;
-
- // If we are moving a pair of loads / stores, see if it makes sense
- // to try to allocate a pair of registers that can form register pairs.
- MachineInstr *Op0 = Ops.back();
- MachineInstr *Op1 = Ops[Ops.size()-2];
- unsigned EvenReg = 0, OddReg = 0;
- unsigned BaseReg = 0, OffReg = 0, PredReg = 0;
- ARMCC::CondCodes Pred = ARMCC::AL;
- bool isT2 = false;
- unsigned NewOpc = 0;
- int Offset = 0;
- DebugLoc dl;
- if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc,
- EvenReg, OddReg, BaseReg, OffReg,
- Offset, PredReg, Pred, isT2)) {
- Ops.pop_back();
- Ops.pop_back();
-
- // Form the pair instruction.
- if (isLd) {
- MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos,
- dl, TII->get(NewOpc))
- .addReg(EvenReg, RegState::Define)
- .addReg(OddReg, RegState::Define)
- .addReg(BaseReg);
- if (!isT2)
- MIB.addReg(OffReg);
- MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
- ++NumLDRDFormed;
- } else {
- MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos,
- dl, TII->get(NewOpc))
- .addReg(EvenReg)
- .addReg(OddReg)
- .addReg(BaseReg);
- if (!isT2)
- MIB.addReg(OffReg);
- MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
- ++NumSTRDFormed;
- }
- MBB->erase(Op0);
- MBB->erase(Op1);
-
- // Add register allocation hints to form register pairs.
- MRI->setRegAllocationHint(EvenReg, ARMRI::RegPairEven, OddReg);
- MRI->setRegAllocationHint(OddReg, ARMRI::RegPairOdd, EvenReg);
- } else {
- for (unsigned i = 0; i != NumMove; ++i) {
- MachineInstr *Op = Ops.back();
- Ops.pop_back();
- MBB->splice(InsertPos, MBB, Op);
- }
- }
-
- NumLdStMoved += NumMove;
- RetVal = true;
- }
- }
- }
-
- return RetVal;
-}
-
-bool
-ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
- bool RetVal = false;
-
- DenseMap<MachineInstr*, unsigned> MI2LocMap;
- DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2LdsMap;
- DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2StsMap;
- SmallVector<unsigned, 4> LdBases;
- SmallVector<unsigned, 4> StBases;
-
- unsigned Loc = 0;
- MachineBasicBlock::iterator MBBI = MBB->begin();
- MachineBasicBlock::iterator E = MBB->end();
- while (MBBI != E) {
- for (; MBBI != E; ++MBBI) {
- MachineInstr *MI = MBBI;
- const TargetInstrDesc &TID = MI->getDesc();
- if (TID.isCall() || TID.isTerminator()) {
- // Stop at barriers.
- ++MBBI;
- break;
- }
-
- MI2LocMap[MI] = Loc++;
- if (!isMemoryOp(MI))
- continue;
- unsigned PredReg = 0;
- if (llvm::getInstrPredicate(MI, PredReg) != ARMCC::AL)
- continue;
-
- int Opc = MI->getOpcode();
- bool isLd = isi32Load(Opc) || Opc == ARM::VLDRS || Opc == ARM::VLDRD;
- unsigned Base = MI->getOperand(1).getReg();
- int Offset = getMemoryOpOffset(MI);
-
- bool StopHere = false;
- if (isLd) {
- DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
- Base2LdsMap.find(Base);
- if (BI != Base2LdsMap.end()) {
- for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
- if (Offset == getMemoryOpOffset(BI->second[i])) {
- StopHere = true;
- break;
- }
- }
- if (!StopHere)
- BI->second.push_back(MI);
- } else {
- SmallVector<MachineInstr*, 4> MIs;
- MIs.push_back(MI);
- Base2LdsMap[Base] = MIs;
- LdBases.push_back(Base);
- }
- } else {
- DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
- Base2StsMap.find(Base);
- if (BI != Base2StsMap.end()) {
- for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
- if (Offset == getMemoryOpOffset(BI->second[i])) {
- StopHere = true;
- break;
- }
- }
- if (!StopHere)
- BI->second.push_back(MI);
- } else {
- SmallVector<MachineInstr*, 4> MIs;
- MIs.push_back(MI);
- Base2StsMap[Base] = MIs;
- StBases.push_back(Base);
- }
- }
-
- if (StopHere) {
- // Found a duplicate (a base+offset combination that's seen earlier).
- // Backtrack.
- --Loc;
- break;
- }
- }
-
- // Re-schedule loads.
- for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
- unsigned Base = LdBases[i];
- SmallVector<MachineInstr*, 4> &Lds = Base2LdsMap[Base];
- if (Lds.size() > 1)
- RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
- }
-
- // Re-schedule stores.
- for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
- unsigned Base = StBases[i];
- SmallVector<MachineInstr*, 4> &Sts = Base2StsMap[Base];
- if (Sts.size() > 1)
- RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
- }
-
- if (MBBI != E) {
- Base2LdsMap.clear();
- Base2StsMap.clear();
- LdBases.clear();
- StBases.clear();
- }
- }
-
- return RetVal;
-}
-
-
-/// createARMLoadStoreOptimizationPass - returns an instance of the load / store
-/// optimization pass.
-FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
- if (PreAlloc)
- return new ARMPreAllocLoadStoreOpt();
- return new ARMLoadStoreOpt();
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMMCAsmInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMMCAsmInfo.cpp
deleted file mode 100644
index ccd6add..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMMCAsmInfo.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-//===-- ARMMCAsmInfo.cpp - ARM asm properties -------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declarations of the ARMMCAsmInfo properties.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARMMCAsmInfo.h"
-using namespace llvm;
-
-static const char *const arm_asm_table[] = {
- "{r0}", "r0",
- "{r1}", "r1",
- "{r2}", "r2",
- "{r3}", "r3",
- "{r4}", "r4",
- "{r5}", "r5",
- "{r6}", "r6",
- "{r7}", "r7",
- "{r8}", "r8",
- "{r9}", "r9",
- "{r10}", "r10",
- "{r11}", "r11",
- "{r12}", "r12",
- "{r13}", "r13",
- "{r14}", "r14",
- "{lr}", "lr",
- "{sp}", "sp",
- "{ip}", "ip",
- "{fp}", "fp",
- "{sl}", "sl",
- "{memory}", "memory",
- "{cc}", "cc",
- 0,0
-};
-
-ARMMCAsmInfoDarwin::ARMMCAsmInfoDarwin() {
- AsmTransCBE = arm_asm_table;
- Data64bitsDirective = 0;
- CommentString = "@";
- SupportsDebugInformation = true;
-
- // Exceptions handling
- ExceptionsType = ExceptionHandling::SjLj;
- AbsoluteEHSectionOffsets = false;
-}
-
-ARMELFMCAsmInfo::ARMELFMCAsmInfo() {
- // ".comm align is in bytes but .align is pow-2."
- AlignmentIsInBytes = false;
-
- Data64bitsDirective = 0;
- CommentString = "@";
-
- HasLEB128 = true;
- AbsoluteDebugSectionOffsets = true;
- PrivateGlobalPrefix = ".L";
- WeakRefDirective = "\t.weak\t";
- HasLCOMMDirective = true;
-
- DwarfRequiresFrameSection = false;
-
- SupportsDebugInformation = true;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMMCAsmInfo.h b/libclamav/c++/llvm/lib/Target/ARM/ARMMCAsmInfo.h
deleted file mode 100644
index 90f7822..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMMCAsmInfo.h
+++ /dev/null
@@ -1,31 +0,0 @@
-//=====-- ARMMCAsmInfo.h - ARM asm properties -------------*- C++ -*--====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the declaration of the ARMMCAsmInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ARMTARGETASMINFO_H
-#define LLVM_ARMTARGETASMINFO_H
-
-#include "llvm/MC/MCAsmInfoDarwin.h"
-
-namespace llvm {
-
- struct ARMMCAsmInfoDarwin : public MCAsmInfoDarwin {
- explicit ARMMCAsmInfoDarwin();
- };
-
- struct ARMELFMCAsmInfo : public MCAsmInfo {
- explicit ARMELFMCAsmInfo();
- };
-
-} // namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h b/libclamav/c++/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
deleted file mode 100644
index c998ede..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ /dev/null
@@ -1,229 +0,0 @@
-//====- ARMMachineFuctionInfo.h - ARM machine function info -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares ARM-specific per-machine-function information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMMACHINEFUNCTIONINFO_H
-#define ARMMACHINEFUNCTIONINFO_H
-
-#include "ARMSubtarget.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/ADT/BitVector.h"
-
-namespace llvm {
-
-/// ARMFunctionInfo - This class is derived from MachineFunction private
-/// ARM target-specific information for each MachineFunction.
-class ARMFunctionInfo : public MachineFunctionInfo {
-
- /// isThumb - True if this function is compiled under Thumb mode.
- /// Used to initialized Align, so must precede it.
- bool isThumb;
-
- /// hasThumb2 - True if the target architecture supports Thumb2. Do not use
- /// to determine if function is compiled under Thumb mode, for that use
- /// 'isThumb'.
- bool hasThumb2;
-
- /// VarArgsRegSaveSize - Size of the register save area for vararg functions.
- ///
- unsigned VarArgsRegSaveSize;
-
- /// HasStackFrame - True if this function has a stack frame. Set by
- /// processFunctionBeforeCalleeSavedScan().
- bool HasStackFrame;
-
- /// LRSpilledForFarJump - True if the LR register has been for spilled to
- /// enable far jump.
- bool LRSpilledForFarJump;
-
- /// FramePtrSpillOffset - If HasStackFrame, this records the frame pointer
- /// spill stack offset.
- unsigned FramePtrSpillOffset;
-
- /// GPRCS1Offset, GPRCS2Offset, DPRCSOffset - Starting offset of callee saved
- /// register spills areas. For Mac OS X:
- ///
- /// GPR callee-saved (1) : r4, r5, r6, r7, lr
- /// --------------------------------------------
- /// GPR callee-saved (2) : r8, r10, r11
- /// --------------------------------------------
- /// DPR callee-saved : d8 - d15
- unsigned GPRCS1Offset;
- unsigned GPRCS2Offset;
- unsigned DPRCSOffset;
-
- /// GPRCS1Size, GPRCS2Size, DPRCSSize - Sizes of callee saved register spills
- /// areas.
- unsigned GPRCS1Size;
- unsigned GPRCS2Size;
- unsigned DPRCSSize;
-
- /// GPRCS1Frames, GPRCS2Frames, DPRCSFrames - Keeps track of frame indices
- /// which belong to these spill areas.
- BitVector GPRCS1Frames;
- BitVector GPRCS2Frames;
- BitVector DPRCSFrames;
-
- /// SpilledCSRegs - A BitVector mask of all spilled callee-saved registers.
- ///
- BitVector SpilledCSRegs;
-
- /// JumpTableUId - Unique id for jumptables.
- ///
- unsigned JumpTableUId;
-
- unsigned ConstPoolEntryUId;
-
-public:
- ARMFunctionInfo() :
- isThumb(false),
- hasThumb2(false),
- VarArgsRegSaveSize(0), HasStackFrame(false),
- LRSpilledForFarJump(false),
- FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
- GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
- GPRCS1Frames(0), GPRCS2Frames(0), DPRCSFrames(0),
- JumpTableUId(0), ConstPoolEntryUId(0) {}
-
- explicit ARMFunctionInfo(MachineFunction &MF) :
- isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()),
- hasThumb2(MF.getTarget().getSubtarget<ARMSubtarget>().hasThumb2()),
- VarArgsRegSaveSize(0), HasStackFrame(false),
- LRSpilledForFarJump(false),
- FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
- GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
- GPRCS1Frames(32), GPRCS2Frames(32), DPRCSFrames(32),
- SpilledCSRegs(MF.getTarget().getRegisterInfo()->getNumRegs()),
- JumpTableUId(0), ConstPoolEntryUId(0) {}
-
- bool isThumbFunction() const { return isThumb; }
- bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; }
- bool isThumb2Function() const { return isThumb && hasThumb2; }
-
- unsigned getVarArgsRegSaveSize() const { return VarArgsRegSaveSize; }
- void setVarArgsRegSaveSize(unsigned s) { VarArgsRegSaveSize = s; }
-
- bool hasStackFrame() const { return HasStackFrame; }
- void setHasStackFrame(bool s) { HasStackFrame = s; }
-
- bool isLRSpilledForFarJump() const { return LRSpilledForFarJump; }
- void setLRIsSpilledForFarJump(bool s) { LRSpilledForFarJump = s; }
-
- unsigned getFramePtrSpillOffset() const { return FramePtrSpillOffset; }
- void setFramePtrSpillOffset(unsigned o) { FramePtrSpillOffset = o; }
-
- unsigned getGPRCalleeSavedArea1Offset() const { return GPRCS1Offset; }
- unsigned getGPRCalleeSavedArea2Offset() const { return GPRCS2Offset; }
- unsigned getDPRCalleeSavedAreaOffset() const { return DPRCSOffset; }
-
- void setGPRCalleeSavedArea1Offset(unsigned o) { GPRCS1Offset = o; }
- void setGPRCalleeSavedArea2Offset(unsigned o) { GPRCS2Offset = o; }
- void setDPRCalleeSavedAreaOffset(unsigned o) { DPRCSOffset = o; }
-
- unsigned getGPRCalleeSavedArea1Size() const { return GPRCS1Size; }
- unsigned getGPRCalleeSavedArea2Size() const { return GPRCS2Size; }
- unsigned getDPRCalleeSavedAreaSize() const { return DPRCSSize; }
-
- void setGPRCalleeSavedArea1Size(unsigned s) { GPRCS1Size = s; }
- void setGPRCalleeSavedArea2Size(unsigned s) { GPRCS2Size = s; }
- void setDPRCalleeSavedAreaSize(unsigned s) { DPRCSSize = s; }
-
- bool isGPRCalleeSavedArea1Frame(int fi) const {
- if (fi < 0 || fi >= (int)GPRCS1Frames.size())
- return false;
- return GPRCS1Frames[fi];
- }
- bool isGPRCalleeSavedArea2Frame(int fi) const {
- if (fi < 0 || fi >= (int)GPRCS2Frames.size())
- return false;
- return GPRCS2Frames[fi];
- }
- bool isDPRCalleeSavedAreaFrame(int fi) const {
- if (fi < 0 || fi >= (int)DPRCSFrames.size())
- return false;
- return DPRCSFrames[fi];
- }
-
- void addGPRCalleeSavedArea1Frame(int fi) {
- if (fi >= 0) {
- int Size = GPRCS1Frames.size();
- if (fi >= Size) {
- Size *= 2;
- if (fi >= Size)
- Size = fi+1;
- GPRCS1Frames.resize(Size);
- }
- GPRCS1Frames[fi] = true;
- }
- }
- void addGPRCalleeSavedArea2Frame(int fi) {
- if (fi >= 0) {
- int Size = GPRCS2Frames.size();
- if (fi >= Size) {
- Size *= 2;
- if (fi >= Size)
- Size = fi+1;
- GPRCS2Frames.resize(Size);
- }
- GPRCS2Frames[fi] = true;
- }
- }
- void addDPRCalleeSavedAreaFrame(int fi) {
- if (fi >= 0) {
- int Size = DPRCSFrames.size();
- if (fi >= Size) {
- Size *= 2;
- if (fi >= Size)
- Size = fi+1;
- DPRCSFrames.resize(Size);
- }
- DPRCSFrames[fi] = true;
- }
- }
-
- void setCSRegisterIsSpilled(unsigned Reg) {
- SpilledCSRegs.set(Reg);
- }
-
- bool isCSRegisterSpilled(unsigned Reg) const {
- return SpilledCSRegs[Reg];
- }
-
- const BitVector &getSpilledCSRegisters() const {
- return SpilledCSRegs;
- }
-
- unsigned createJumpTableUId() {
- return JumpTableUId++;
- }
-
- unsigned getNumJumpTables() const {
- return JumpTableUId;
- }
-
- void initConstPoolEntryUId(unsigned UId) {
- ConstPoolEntryUId = UId;
- }
-
- unsigned getNumConstPoolEntries() const {
- return ConstPoolEntryUId;
- }
-
- unsigned createConstPoolEntryUId() {
- return ConstPoolEntryUId++;
- }
-};
-} // End llvm namespace
-
-#endif // ARMMACHINEFUNCTIONINFO_H
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMPerfectShuffle.h b/libclamav/c++/llvm/lib/Target/ARM/ARMPerfectShuffle.h
deleted file mode 100644
index 5ff7c38..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMPerfectShuffle.h
+++ /dev/null
@@ -1,6586 +0,0 @@
-//===-- ARMPerfectShuffle.h - NEON Perfect Shuffle Table ------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file, which was autogenerated by llvm-PerfectShuffle, contains data
-// for the optimal way to build a perfect shuffle using neon instructions.
-//
-//===----------------------------------------------------------------------===//
-
-// 31 entries have cost 0
-// 242 entries have cost 1
-// 1447 entries have cost 2
-// 3602 entries have cost 3
-// 1237 entries have cost 4
-// 2 entries have cost 5
-
-// This table is 6561*4 = 26244 bytes in size.
-static const unsigned PerfectShuffleTable[6561+1] = {
- 135053414U, // <0,0,0,0>: Cost 1 vdup0 LHS
- 1543503974U, // <0,0,0,1>: Cost 2 vext2 <0,0,0,0>, LHS
- 2618572962U, // <0,0,0,2>: Cost 3 vext2 <0,2,0,0>, <0,2,0,0>
- 2568054923U, // <0,0,0,3>: Cost 3 vext1 <3,0,0,0>, <3,0,0,0>
- 1476398390U, // <0,0,0,4>: Cost 2 vext1 <0,0,0,0>, RHS
- 2550140624U, // <0,0,0,5>: Cost 3 vext1 <0,0,0,0>, <5,1,7,3>
- 2550141434U, // <0,0,0,6>: Cost 3 vext1 <0,0,0,0>, <6,2,7,3>
- 2591945711U, // <0,0,0,7>: Cost 3 vext1 <7,0,0,0>, <7,0,0,0>
- 135053414U, // <0,0,0,u>: Cost 1 vdup0 LHS
- 2886516736U, // <0,0,1,0>: Cost 3 vzipl LHS, <0,0,0,0>
- 1812775014U, // <0,0,1,1>: Cost 2 vzipl LHS, LHS
- 1618133094U, // <0,0,1,2>: Cost 2 vext3 <1,2,3,0>, LHS
- 2625209292U, // <0,0,1,3>: Cost 3 vext2 <1,3,0,0>, <1,3,0,0>
- 2886558034U, // <0,0,1,4>: Cost 3 vzipl LHS, <0,4,1,5>
- 2617246864U, // <0,0,1,5>: Cost 3 vext2 <0,0,0,0>, <1,5,3,7>
- 3659723031U, // <0,0,1,6>: Cost 4 vext1 <6,0,0,1>, <6,0,0,1>
- 2591953904U, // <0,0,1,7>: Cost 3 vext1 <7,0,0,1>, <7,0,0,1>
- 1812775581U, // <0,0,1,u>: Cost 2 vzipl LHS, LHS
- 3020734464U, // <0,0,2,0>: Cost 3 vtrnl LHS, <0,0,0,0>
- 3020734474U, // <0,0,2,1>: Cost 3 vtrnl LHS, <0,0,1,1>
- 1946992742U, // <0,0,2,2>: Cost 2 vtrnl LHS, LHS
- 2631181989U, // <0,0,2,3>: Cost 3 vext2 <2,3,0,0>, <2,3,0,0>
- 3020734668U, // <0,0,2,4>: Cost 3 vtrnl LHS, <0,2,4,6>
- 3826550569U, // <0,0,2,5>: Cost 4 vuzpl <0,2,0,2>, <2,4,5,6>
- 2617247674U, // <0,0,2,6>: Cost 3 vext2 <0,0,0,0>, <2,6,3,7>
- 2591962097U, // <0,0,2,7>: Cost 3 vext1 <7,0,0,2>, <7,0,0,2>
- 1946992796U, // <0,0,2,u>: Cost 2 vtrnl LHS, LHS
- 2635163787U, // <0,0,3,0>: Cost 3 vext2 <3,0,0,0>, <3,0,0,0>
- 2686419196U, // <0,0,3,1>: Cost 3 vext3 <0,3,1,0>, <0,3,1,0>
- 2686492933U, // <0,0,3,2>: Cost 3 vext3 <0,3,2,0>, <0,3,2,0>
- 2617248156U, // <0,0,3,3>: Cost 3 vext2 <0,0,0,0>, <3,3,3,3>
- 2617248258U, // <0,0,3,4>: Cost 3 vext2 <0,0,0,0>, <3,4,5,6>
- 3826551298U, // <0,0,3,5>: Cost 4 vuzpl <0,2,0,2>, <3,4,5,6>
- 3690990200U, // <0,0,3,6>: Cost 4 vext2 <0,0,0,0>, <3,6,0,7>
- 3713551042U, // <0,0,3,7>: Cost 4 vext2 <3,7,0,0>, <3,7,0,0>
- 2635163787U, // <0,0,3,u>: Cost 3 vext2 <3,0,0,0>, <3,0,0,0>
- 2617248658U, // <0,0,4,0>: Cost 3 vext2 <0,0,0,0>, <4,0,5,1>
- 2888450150U, // <0,0,4,1>: Cost 3 vzipl <0,4,1,5>, LHS
- 3021570150U, // <0,0,4,2>: Cost 3 vtrnl <0,2,4,6>, LHS
- 3641829519U, // <0,0,4,3>: Cost 4 vext1 <3,0,0,4>, <3,0,0,4>
- 3021570252U, // <0,0,4,4>: Cost 3 vtrnl <0,2,4,6>, <0,2,4,6>
- 1543507254U, // <0,0,4,5>: Cost 2 vext2 <0,0,0,0>, RHS
- 2752810294U, // <0,0,4,6>: Cost 3 vuzpl <0,2,0,2>, RHS
- 3786998152U, // <0,0,4,7>: Cost 4 vext3 <4,7,5,0>, <0,4,7,5>
- 1543507497U, // <0,0,4,u>: Cost 2 vext2 <0,0,0,0>, RHS
- 2684354972U, // <0,0,5,0>: Cost 3 vext3 <0,0,0,0>, <0,5,0,7>
- 2617249488U, // <0,0,5,1>: Cost 3 vext2 <0,0,0,0>, <5,1,7,3>
- 3765617070U, // <0,0,5,2>: Cost 4 vext3 <1,2,3,0>, <0,5,2,7>
- 3635865780U, // <0,0,5,3>: Cost 4 vext1 <2,0,0,5>, <3,0,4,5>
- 2617249734U, // <0,0,5,4>: Cost 3 vext2 <0,0,0,0>, <5,4,7,6>
- 2617249796U, // <0,0,5,5>: Cost 3 vext2 <0,0,0,0>, <5,5,5,5>
- 2718712274U, // <0,0,5,6>: Cost 3 vext3 <5,6,7,0>, <0,5,6,7>
- 2617249960U, // <0,0,5,7>: Cost 3 vext2 <0,0,0,0>, <5,7,5,7>
- 2720039396U, // <0,0,5,u>: Cost 3 vext3 <5,u,7,0>, <0,5,u,7>
- 2684355053U, // <0,0,6,0>: Cost 3 vext3 <0,0,0,0>, <0,6,0,7>
- 3963609190U, // <0,0,6,1>: Cost 4 vzipl <0,6,2,7>, LHS
- 2617250298U, // <0,0,6,2>: Cost 3 vext2 <0,0,0,0>, <6,2,7,3>
- 3796435464U, // <0,0,6,3>: Cost 4 vext3 <6,3,7,0>, <0,6,3,7>
- 3659762998U, // <0,0,6,4>: Cost 4 vext1 <6,0,0,6>, RHS
- 3659763810U, // <0,0,6,5>: Cost 4 vext1 <6,0,0,6>, <5,6,7,0>
- 2617250616U, // <0,0,6,6>: Cost 3 vext2 <0,0,0,0>, <6,6,6,6>
- 2657727309U, // <0,0,6,7>: Cost 3 vext2 <6,7,0,0>, <6,7,0,0>
- 2658390942U, // <0,0,6,u>: Cost 3 vext2 <6,u,0,0>, <6,u,0,0>
- 2659054575U, // <0,0,7,0>: Cost 3 vext2 <7,0,0,0>, <7,0,0,0>
- 3635880854U, // <0,0,7,1>: Cost 4 vext1 <2,0,0,7>, <1,2,3,0>
- 3635881401U, // <0,0,7,2>: Cost 4 vext1 <2,0,0,7>, <2,0,0,7>
- 3734787298U, // <0,0,7,3>: Cost 4 vext2 <7,3,0,0>, <7,3,0,0>
- 2617251174U, // <0,0,7,4>: Cost 3 vext2 <0,0,0,0>, <7,4,5,6>
- 3659772002U, // <0,0,7,5>: Cost 4 vext1 <6,0,0,7>, <5,6,7,0>
- 3659772189U, // <0,0,7,6>: Cost 4 vext1 <6,0,0,7>, <6,0,0,7>
- 2617251436U, // <0,0,7,7>: Cost 3 vext2 <0,0,0,0>, <7,7,7,7>
- 2659054575U, // <0,0,7,u>: Cost 3 vext2 <7,0,0,0>, <7,0,0,0>
- 135053414U, // <0,0,u,0>: Cost 1 vdup0 LHS
- 1817419878U, // <0,0,u,1>: Cost 2 vzipl LHS, LHS
- 1947435110U, // <0,0,u,2>: Cost 2 vtrnl LHS, LHS
- 2568120467U, // <0,0,u,3>: Cost 3 vext1 <3,0,0,u>, <3,0,0,u>
- 1476463926U, // <0,0,u,4>: Cost 2 vext1 <0,0,0,u>, RHS
- 1543510170U, // <0,0,u,5>: Cost 2 vext2 <0,0,0,0>, RHS
- 2752813210U, // <0,0,u,6>: Cost 3 vuzpl <0,2,0,2>, RHS
- 2592011255U, // <0,0,u,7>: Cost 3 vext1 <7,0,0,u>, <7,0,0,u>
- 135053414U, // <0,0,u,u>: Cost 1 vdup0 LHS
- 2618581002U, // <0,1,0,0>: Cost 3 vext2 <0,2,0,1>, <0,0,1,1>
- 1557446758U, // <0,1,0,1>: Cost 2 vext2 <2,3,0,1>, LHS
- 2618581155U, // <0,1,0,2>: Cost 3 vext2 <0,2,0,1>, <0,2,0,1>
- 2690548468U, // <0,1,0,3>: Cost 3 vext3 <1,0,3,0>, <1,0,3,0>
- 2626543954U, // <0,1,0,4>: Cost 3 vext2 <1,5,0,1>, <0,4,1,5>
- 4094985216U, // <0,1,0,5>: Cost 4 vtrnl <0,2,0,2>, <1,3,5,7>
- 2592019278U, // <0,1,0,6>: Cost 3 vext1 <7,0,1,0>, <6,7,0,1>
- 2592019448U, // <0,1,0,7>: Cost 3 vext1 <7,0,1,0>, <7,0,1,0>
- 1557447325U, // <0,1,0,u>: Cost 2 vext2 <2,3,0,1>, LHS
- 1476476938U, // <0,1,1,0>: Cost 2 vext1 <0,0,1,1>, <0,0,1,1>
- 2886517556U, // <0,1,1,1>: Cost 3 vzipl LHS, <1,1,1,1>
- 2886517654U, // <0,1,1,2>: Cost 3 vzipl LHS, <1,2,3,0>
- 2886517720U, // <0,1,1,3>: Cost 3 vzipl LHS, <1,3,1,3>
- 1476480310U, // <0,1,1,4>: Cost 2 vext1 <0,0,1,1>, RHS
- 2886558864U, // <0,1,1,5>: Cost 3 vzipl LHS, <1,5,3,7>
- 2550223354U, // <0,1,1,6>: Cost 3 vext1 <0,0,1,1>, <6,2,7,3>
- 2550223856U, // <0,1,1,7>: Cost 3 vext1 <0,0,1,1>, <7,0,0,1>
- 1476482862U, // <0,1,1,u>: Cost 2 vext1 <0,0,1,1>, LHS
- 1494401126U, // <0,1,2,0>: Cost 2 vext1 <3,0,1,2>, LHS
- 3020735284U, // <0,1,2,1>: Cost 3 vtrnl LHS, <1,1,1,1>
- 2562172349U, // <0,1,2,2>: Cost 3 vext1 <2,0,1,2>, <2,0,1,2>
- 835584U, // <0,1,2,3>: Cost 0 copy LHS
- 1494404406U, // <0,1,2,4>: Cost 2 vext1 <3,0,1,2>, RHS
- 3020735488U, // <0,1,2,5>: Cost 3 vtrnl LHS, <1,3,5,7>
- 2631190458U, // <0,1,2,6>: Cost 3 vext2 <2,3,0,1>, <2,6,3,7>
- 1518294010U, // <0,1,2,7>: Cost 2 vext1 <7,0,1,2>, <7,0,1,2>
- 835584U, // <0,1,2,u>: Cost 0 copy LHS
- 2692318156U, // <0,1,3,0>: Cost 3 vext3 <1,3,0,0>, <1,3,0,0>
- 2691875800U, // <0,1,3,1>: Cost 3 vext3 <1,2,3,0>, <1,3,1,3>
- 2691875806U, // <0,1,3,2>: Cost 3 vext3 <1,2,3,0>, <1,3,2,0>
- 2692539367U, // <0,1,3,3>: Cost 3 vext3 <1,3,3,0>, <1,3,3,0>
- 2562182454U, // <0,1,3,4>: Cost 3 vext1 <2,0,1,3>, RHS
- 2691875840U, // <0,1,3,5>: Cost 3 vext3 <1,2,3,0>, <1,3,5,7>
- 2692760578U, // <0,1,3,6>: Cost 3 vext3 <1,3,6,0>, <1,3,6,0>
- 2639817411U, // <0,1,3,7>: Cost 3 vext2 <3,7,0,1>, <3,7,0,1>
- 2691875863U, // <0,1,3,u>: Cost 3 vext3 <1,2,3,0>, <1,3,u,3>
- 2568159334U, // <0,1,4,0>: Cost 3 vext1 <3,0,1,4>, LHS
- 4095312692U, // <0,1,4,1>: Cost 4 vtrnl <0,2,4,6>, <1,1,1,1>
- 2568160934U, // <0,1,4,2>: Cost 3 vext1 <3,0,1,4>, <2,3,0,1>
- 2568161432U, // <0,1,4,3>: Cost 3 vext1 <3,0,1,4>, <3,0,1,4>
- 2568162614U, // <0,1,4,4>: Cost 3 vext1 <3,0,1,4>, RHS
- 1557450038U, // <0,1,4,5>: Cost 2 vext2 <2,3,0,1>, RHS
- 2754235702U, // <0,1,4,6>: Cost 3 vuzpl <0,4,1,5>, RHS
- 2592052220U, // <0,1,4,7>: Cost 3 vext1 <7,0,1,4>, <7,0,1,4>
- 1557450281U, // <0,1,4,u>: Cost 2 vext2 <2,3,0,1>, RHS
- 3765617775U, // <0,1,5,0>: Cost 4 vext3 <1,2,3,0>, <1,5,0,1>
- 2647781007U, // <0,1,5,1>: Cost 3 vext2 <5,1,0,1>, <5,1,0,1>
- 3704934138U, // <0,1,5,2>: Cost 4 vext2 <2,3,0,1>, <5,2,3,0>
- 2691875984U, // <0,1,5,3>: Cost 3 vext3 <1,2,3,0>, <1,5,3,7>
- 2657734598U, // <0,1,5,4>: Cost 3 vext2 <6,7,0,1>, <5,4,7,6>
- 2650435539U, // <0,1,5,5>: Cost 3 vext2 <5,5,0,1>, <5,5,0,1>
- 2651099172U, // <0,1,5,6>: Cost 3 vext2 <5,6,0,1>, <5,6,0,1>
- 2651762805U, // <0,1,5,7>: Cost 3 vext2 <5,7,0,1>, <5,7,0,1>
- 2691876029U, // <0,1,5,u>: Cost 3 vext3 <1,2,3,0>, <1,5,u,7>
- 2592063590U, // <0,1,6,0>: Cost 3 vext1 <7,0,1,6>, LHS
- 3765617871U, // <0,1,6,1>: Cost 4 vext3 <1,2,3,0>, <1,6,1,7>
- 2654417337U, // <0,1,6,2>: Cost 3 vext2 <6,2,0,1>, <6,2,0,1>
- 3765617889U, // <0,1,6,3>: Cost 4 vext3 <1,2,3,0>, <1,6,3,7>
- 2592066870U, // <0,1,6,4>: Cost 3 vext1 <7,0,1,6>, RHS
- 3765617907U, // <0,1,6,5>: Cost 4 vext3 <1,2,3,0>, <1,6,5,7>
- 2657071869U, // <0,1,6,6>: Cost 3 vext2 <6,6,0,1>, <6,6,0,1>
- 1583993678U, // <0,1,6,7>: Cost 2 vext2 <6,7,0,1>, <6,7,0,1>
- 1584657311U, // <0,1,6,u>: Cost 2 vext2 <6,u,0,1>, <6,u,0,1>
- 2657735672U, // <0,1,7,0>: Cost 3 vext2 <6,7,0,1>, <7,0,1,0>
- 2657735808U, // <0,1,7,1>: Cost 3 vext2 <6,7,0,1>, <7,1,7,1>
- 2631193772U, // <0,1,7,2>: Cost 3 vext2 <2,3,0,1>, <7,2,3,0>
- 2661053667U, // <0,1,7,3>: Cost 3 vext2 <7,3,0,1>, <7,3,0,1>
- 2657736038U, // <0,1,7,4>: Cost 3 vext2 <6,7,0,1>, <7,4,5,6>
- 3721524621U, // <0,1,7,5>: Cost 4 vext2 <5,1,0,1>, <7,5,1,0>
- 2657736158U, // <0,1,7,6>: Cost 3 vext2 <6,7,0,1>, <7,6,1,0>
- 2657736300U, // <0,1,7,7>: Cost 3 vext2 <6,7,0,1>, <7,7,7,7>
- 2657736322U, // <0,1,7,u>: Cost 3 vext2 <6,7,0,1>, <7,u,1,2>
- 1494450278U, // <0,1,u,0>: Cost 2 vext1 <3,0,1,u>, LHS
- 1557452590U, // <0,1,u,1>: Cost 2 vext2 <2,3,0,1>, LHS
- 2754238254U, // <0,1,u,2>: Cost 3 vuzpl <0,4,1,5>, LHS
- 835584U, // <0,1,u,3>: Cost 0 copy LHS
- 1494453558U, // <0,1,u,4>: Cost 2 vext1 <3,0,1,u>, RHS
- 1557452954U, // <0,1,u,5>: Cost 2 vext2 <2,3,0,1>, RHS
- 2754238618U, // <0,1,u,6>: Cost 3 vuzpl <0,4,1,5>, RHS
- 1518343168U, // <0,1,u,7>: Cost 2 vext1 <7,0,1,u>, <7,0,1,u>
- 835584U, // <0,1,u,u>: Cost 0 copy LHS
- 2752299008U, // <0,2,0,0>: Cost 3 vuzpl LHS, <0,0,0,0>
- 1544847462U, // <0,2,0,1>: Cost 2 vext2 <0,2,0,2>, LHS
- 1678557286U, // <0,2,0,2>: Cost 2 vuzpl LHS, LHS
- 2696521165U, // <0,2,0,3>: Cost 3 vext3 <2,0,3,0>, <2,0,3,0>
- 2752340172U, // <0,2,0,4>: Cost 3 vuzpl LHS, <0,2,4,6>
- 2691876326U, // <0,2,0,5>: Cost 3 vext3 <1,2,3,0>, <2,0,5,7>
- 2618589695U, // <0,2,0,6>: Cost 3 vext2 <0,2,0,2>, <0,6,2,7>
- 2592093185U, // <0,2,0,7>: Cost 3 vext1 <7,0,2,0>, <7,0,2,0>
- 1678557340U, // <0,2,0,u>: Cost 2 vuzpl LHS, LHS
- 2618589942U, // <0,2,1,0>: Cost 3 vext2 <0,2,0,2>, <1,0,3,2>
- 2752299828U, // <0,2,1,1>: Cost 3 vuzpl LHS, <1,1,1,1>
- 2886518376U, // <0,2,1,2>: Cost 3 vzipl LHS, <2,2,2,2>
- 2752299766U, // <0,2,1,3>: Cost 3 vuzpl LHS, <1,0,3,2>
- 2550295862U, // <0,2,1,4>: Cost 3 vext1 <0,0,2,1>, RHS
- 2752340992U, // <0,2,1,5>: Cost 3 vuzpl LHS, <1,3,5,7>
- 2886559674U, // <0,2,1,6>: Cost 3 vzipl LHS, <2,6,3,7>
- 3934208106U, // <0,2,1,7>: Cost 4 vuzpr <7,0,1,2>, <0,1,2,7>
- 2752340771U, // <0,2,1,u>: Cost 3 vuzpl LHS, <1,0,u,2>
- 1476558868U, // <0,2,2,0>: Cost 2 vext1 <0,0,2,2>, <0,0,2,2>
- 2226628029U, // <0,2,2,1>: Cost 3 vrev <2,0,1,2>
- 2752300648U, // <0,2,2,2>: Cost 3 vuzpl LHS, <2,2,2,2>
- 3020736114U, // <0,2,2,3>: Cost 3 vtrnl LHS, <2,2,3,3>
- 1476562230U, // <0,2,2,4>: Cost 2 vext1 <0,0,2,2>, RHS
- 2550304464U, // <0,2,2,5>: Cost 3 vext1 <0,0,2,2>, <5,1,7,3>
- 2618591162U, // <0,2,2,6>: Cost 3 vext2 <0,2,0,2>, <2,6,3,7>
- 2550305777U, // <0,2,2,7>: Cost 3 vext1 <0,0,2,2>, <7,0,0,2>
- 1476564782U, // <0,2,2,u>: Cost 2 vext1 <0,0,2,2>, LHS
- 2618591382U, // <0,2,3,0>: Cost 3 vext2 <0,2,0,2>, <3,0,1,2>
- 2752301206U, // <0,2,3,1>: Cost 3 vuzpl LHS, <3,0,1,2>
- 3826043121U, // <0,2,3,2>: Cost 4 vuzpl LHS, <3,1,2,3>
- 2752301468U, // <0,2,3,3>: Cost 3 vuzpl LHS, <3,3,3,3>
- 2618591746U, // <0,2,3,4>: Cost 3 vext2 <0,2,0,2>, <3,4,5,6>
- 2752301570U, // <0,2,3,5>: Cost 3 vuzpl LHS, <3,4,5,6>
- 3830688102U, // <0,2,3,6>: Cost 4 vuzpl LHS, <3,2,6,3>
- 2698807012U, // <0,2,3,7>: Cost 3 vext3 <2,3,7,0>, <2,3,7,0>
- 2752301269U, // <0,2,3,u>: Cost 3 vuzpl LHS, <3,0,u,2>
- 2562261094U, // <0,2,4,0>: Cost 3 vext1 <2,0,2,4>, LHS
- 4095313828U, // <0,2,4,1>: Cost 4 vtrnl <0,2,4,6>, <2,6,1,3>
- 2226718152U, // <0,2,4,2>: Cost 3 vrev <2,0,2,4>
- 2568235169U, // <0,2,4,3>: Cost 3 vext1 <3,0,2,4>, <3,0,2,4>
- 2562264374U, // <0,2,4,4>: Cost 3 vext1 <2,0,2,4>, RHS
- 1544850742U, // <0,2,4,5>: Cost 2 vext2 <0,2,0,2>, RHS
- 1678560566U, // <0,2,4,6>: Cost 2 vuzpl LHS, RHS
- 2592125957U, // <0,2,4,7>: Cost 3 vext1 <7,0,2,4>, <7,0,2,4>
- 1678560584U, // <0,2,4,u>: Cost 2 vuzpl LHS, RHS
- 2691876686U, // <0,2,5,0>: Cost 3 vext3 <1,2,3,0>, <2,5,0,7>
- 2618592976U, // <0,2,5,1>: Cost 3 vext2 <0,2,0,2>, <5,1,7,3>
- 3765618528U, // <0,2,5,2>: Cost 4 vext3 <1,2,3,0>, <2,5,2,7>
- 3765618536U, // <0,2,5,3>: Cost 4 vext3 <1,2,3,0>, <2,5,3,6>
- 2618593222U, // <0,2,5,4>: Cost 3 vext2 <0,2,0,2>, <5,4,7,6>
- 2752303108U, // <0,2,5,5>: Cost 3 vuzpl LHS, <5,5,5,5>
- 2618593378U, // <0,2,5,6>: Cost 3 vext2 <0,2,0,2>, <5,6,7,0>
- 2824785206U, // <0,2,5,7>: Cost 3 vuzpr <1,0,3,2>, RHS
- 2824785207U, // <0,2,5,u>: Cost 3 vuzpr <1,0,3,2>, RHS
- 2752303950U, // <0,2,6,0>: Cost 3 vuzpl LHS, <6,7,0,1>
- 3830690081U, // <0,2,6,1>: Cost 4 vuzpl LHS, <6,0,1,2>
- 2618593786U, // <0,2,6,2>: Cost 3 vext2 <0,2,0,2>, <6,2,7,3>
- 2691876794U, // <0,2,6,3>: Cost 3 vext3 <1,2,3,0>, <2,6,3,7>
- 2752303990U, // <0,2,6,4>: Cost 3 vuzpl LHS, <6,7,4,5>
- 3830690445U, // <0,2,6,5>: Cost 4 vuzpl LHS, <6,4,5,6>
- 2752303928U, // <0,2,6,6>: Cost 3 vuzpl LHS, <6,6,6,6>
- 2657743695U, // <0,2,6,7>: Cost 3 vext2 <6,7,0,2>, <6,7,0,2>
- 2691876839U, // <0,2,6,u>: Cost 3 vext3 <1,2,3,0>, <2,6,u,7>
- 2659070961U, // <0,2,7,0>: Cost 3 vext2 <7,0,0,2>, <7,0,0,2>
- 2659734594U, // <0,2,7,1>: Cost 3 vext2 <7,1,0,2>, <7,1,0,2>
- 3734140051U, // <0,2,7,2>: Cost 4 vext2 <7,2,0,2>, <7,2,0,2>
- 2701166596U, // <0,2,7,3>: Cost 3 vext3 <2,7,3,0>, <2,7,3,0>
- 2662389094U, // <0,2,7,4>: Cost 3 vext2 <7,5,0,2>, <7,4,5,6>
- 2662389126U, // <0,2,7,5>: Cost 3 vext2 <7,5,0,2>, <7,5,0,2>
- 3736794583U, // <0,2,7,6>: Cost 4 vext2 <7,6,0,2>, <7,6,0,2>
- 2752304748U, // <0,2,7,7>: Cost 3 vuzpl LHS, <7,7,7,7>
- 2659070961U, // <0,2,7,u>: Cost 3 vext2 <7,0,0,2>, <7,0,0,2>
- 1476608026U, // <0,2,u,0>: Cost 2 vext1 <0,0,2,u>, <0,0,2,u>
- 1544853294U, // <0,2,u,1>: Cost 2 vext2 <0,2,0,2>, LHS
- 1678563118U, // <0,2,u,2>: Cost 2 vuzpl LHS, LHS
- 3021178482U, // <0,2,u,3>: Cost 3 vtrnl LHS, <2,2,3,3>
- 1476611382U, // <0,2,u,4>: Cost 2 vext1 <0,0,2,u>, RHS
- 1544853658U, // <0,2,u,5>: Cost 2 vext2 <0,2,0,2>, RHS
- 1678563482U, // <0,2,u,6>: Cost 2 vuzpl LHS, RHS
- 2824785449U, // <0,2,u,7>: Cost 3 vuzpr <1,0,3,2>, RHS
- 1678563172U, // <0,2,u,u>: Cost 2 vuzpl LHS, LHS
- 2556329984U, // <0,3,0,0>: Cost 3 vext1 <1,0,3,0>, <0,0,0,0>
- 2686421142U, // <0,3,0,1>: Cost 3 vext3 <0,3,1,0>, <3,0,1,2>
- 2562303437U, // <0,3,0,2>: Cost 3 vext1 <2,0,3,0>, <2,0,3,0>
- 4094986652U, // <0,3,0,3>: Cost 4 vtrnl <0,2,0,2>, <3,3,3,3>
- 2556333366U, // <0,3,0,4>: Cost 3 vext1 <1,0,3,0>, RHS
- 4094986754U, // <0,3,0,5>: Cost 4 vtrnl <0,2,0,2>, <3,4,5,6>
- 3798796488U, // <0,3,0,6>: Cost 4 vext3 <6,7,3,0>, <3,0,6,7>
- 3776530634U, // <0,3,0,7>: Cost 4 vext3 <3,0,7,0>, <3,0,7,0>
- 2556335918U, // <0,3,0,u>: Cost 3 vext1 <1,0,3,0>, LHS
- 2886518934U, // <0,3,1,0>: Cost 3 vzipl LHS, <3,0,1,2>
- 2556338933U, // <0,3,1,1>: Cost 3 vext1 <1,0,3,1>, <1,0,3,1>
- 2691877105U, // <0,3,1,2>: Cost 3 vext3 <1,2,3,0>, <3,1,2,3>
- 2886519196U, // <0,3,1,3>: Cost 3 vzipl LHS, <3,3,3,3>
- 2886519298U, // <0,3,1,4>: Cost 3 vzipl LHS, <3,4,5,6>
- 4095740418U, // <0,3,1,5>: Cost 4 vtrnl <0,3,1,4>, <3,4,5,6>
- 3659944242U, // <0,3,1,6>: Cost 4 vext1 <6,0,3,1>, <6,0,3,1>
- 3769600286U, // <0,3,1,7>: Cost 4 vext3 <1,u,3,0>, <3,1,7,3>
- 2886519582U, // <0,3,1,u>: Cost 3 vzipl LHS, <3,u,1,2>
- 1482604646U, // <0,3,2,0>: Cost 2 vext1 <1,0,3,2>, LHS
- 1482605302U, // <0,3,2,1>: Cost 2 vext1 <1,0,3,2>, <1,0,3,2>
- 2556348008U, // <0,3,2,2>: Cost 3 vext1 <1,0,3,2>, <2,2,2,2>
- 3020736924U, // <0,3,2,3>: Cost 3 vtrnl LHS, <3,3,3,3>
- 1482607926U, // <0,3,2,4>: Cost 2 vext1 <1,0,3,2>, RHS
- 3020737026U, // <0,3,2,5>: Cost 3 vtrnl LHS, <3,4,5,6>
- 2598154746U, // <0,3,2,6>: Cost 3 vext1 <u,0,3,2>, <6,2,7,3>
- 2598155258U, // <0,3,2,7>: Cost 3 vext1 <u,0,3,2>, <7,0,1,2>
- 1482610478U, // <0,3,2,u>: Cost 2 vext1 <1,0,3,2>, LHS
- 3692341398U, // <0,3,3,0>: Cost 4 vext2 <0,2,0,3>, <3,0,1,2>
- 2635851999U, // <0,3,3,1>: Cost 3 vext2 <3,1,0,3>, <3,1,0,3>
- 3636069840U, // <0,3,3,2>: Cost 4 vext1 <2,0,3,3>, <2,0,3,3>
- 2691877276U, // <0,3,3,3>: Cost 3 vext3 <1,2,3,0>, <3,3,3,3>
- 3961522690U, // <0,3,3,4>: Cost 4 vzipl <0,3,1,4>, <3,4,5,6>
- 3826797058U, // <0,3,3,5>: Cost 4 vuzpl <0,2,3,5>, <3,4,5,6>
- 3703622282U, // <0,3,3,6>: Cost 4 vext2 <2,1,0,3>, <3,6,2,7>
- 3769600452U, // <0,3,3,7>: Cost 4 vext3 <1,u,3,0>, <3,3,7,7>
- 2640497430U, // <0,3,3,u>: Cost 3 vext2 <3,u,0,3>, <3,u,0,3>
- 3962194070U, // <0,3,4,0>: Cost 4 vzipl <0,4,1,5>, <3,0,1,2>
- 2232617112U, // <0,3,4,1>: Cost 3 vrev <3,0,1,4>
- 2232690849U, // <0,3,4,2>: Cost 3 vrev <3,0,2,4>
- 4095314332U, // <0,3,4,3>: Cost 4 vtrnl <0,2,4,6>, <3,3,3,3>
- 3962194434U, // <0,3,4,4>: Cost 4 vzipl <0,4,1,5>, <3,4,5,6>
- 2691877378U, // <0,3,4,5>: Cost 3 vext3 <1,2,3,0>, <3,4,5,6>
- 3826765110U, // <0,3,4,6>: Cost 4 vuzpl <0,2,3,1>, RHS
- 3665941518U, // <0,3,4,7>: Cost 4 vext1 <7,0,3,4>, <7,0,3,4>
- 2691877405U, // <0,3,4,u>: Cost 3 vext3 <1,2,3,0>, <3,4,u,6>
- 3630112870U, // <0,3,5,0>: Cost 4 vext1 <1,0,3,5>, LHS
- 3630113526U, // <0,3,5,1>: Cost 4 vext1 <1,0,3,5>, <1,0,3,2>
- 4035199734U, // <0,3,5,2>: Cost 4 vzipr <1,4,0,5>, <1,0,3,2>
- 3769600578U, // <0,3,5,3>: Cost 4 vext3 <1,u,3,0>, <3,5,3,7>
- 2232846516U, // <0,3,5,4>: Cost 3 vrev <3,0,4,5>
- 3779037780U, // <0,3,5,5>: Cost 4 vext3 <3,4,5,0>, <3,5,5,7>
- 2718714461U, // <0,3,5,6>: Cost 3 vext3 <5,6,7,0>, <3,5,6,7>
- 2706106975U, // <0,3,5,7>: Cost 3 vext3 <3,5,7,0>, <3,5,7,0>
- 2233141464U, // <0,3,5,u>: Cost 3 vrev <3,0,u,5>
- 2691877496U, // <0,3,6,0>: Cost 3 vext3 <1,2,3,0>, <3,6,0,7>
- 3727511914U, // <0,3,6,1>: Cost 4 vext2 <6,1,0,3>, <6,1,0,3>
- 3765619338U, // <0,3,6,2>: Cost 4 vext3 <1,2,3,0>, <3,6,2,7>
- 3765619347U, // <0,3,6,3>: Cost 4 vext3 <1,2,3,0>, <3,6,3,7>
- 3765987996U, // <0,3,6,4>: Cost 4 vext3 <1,2,u,0>, <3,6,4,7>
- 3306670270U, // <0,3,6,5>: Cost 4 vrev <3,0,5,6>
- 3792456365U, // <0,3,6,6>: Cost 4 vext3 <5,6,7,0>, <3,6,6,6>
- 2706770608U, // <0,3,6,7>: Cost 3 vext3 <3,6,7,0>, <3,6,7,0>
- 2706844345U, // <0,3,6,u>: Cost 3 vext3 <3,6,u,0>, <3,6,u,0>
- 3769600707U, // <0,3,7,0>: Cost 4 vext3 <1,u,3,0>, <3,7,0,1>
- 2659742787U, // <0,3,7,1>: Cost 3 vext2 <7,1,0,3>, <7,1,0,3>
- 3636102612U, // <0,3,7,2>: Cost 4 vext1 <2,0,3,7>, <2,0,3,7>
- 3769600740U, // <0,3,7,3>: Cost 4 vext3 <1,u,3,0>, <3,7,3,7>
- 3769600747U, // <0,3,7,4>: Cost 4 vext3 <1,u,3,0>, <3,7,4,5>
- 3769600758U, // <0,3,7,5>: Cost 4 vext3 <1,u,3,0>, <3,7,5,7>
- 3659993400U, // <0,3,7,6>: Cost 4 vext1 <6,0,3,7>, <6,0,3,7>
- 3781176065U, // <0,3,7,7>: Cost 4 vext3 <3,7,7,0>, <3,7,7,0>
- 2664388218U, // <0,3,7,u>: Cost 3 vext2 <7,u,0,3>, <7,u,0,3>
- 1482653798U, // <0,3,u,0>: Cost 2 vext1 <1,0,3,u>, LHS
- 1482654460U, // <0,3,u,1>: Cost 2 vext1 <1,0,3,u>, <1,0,3,u>
- 2556397160U, // <0,3,u,2>: Cost 3 vext1 <1,0,3,u>, <2,2,2,2>
- 3021179292U, // <0,3,u,3>: Cost 3 vtrnl LHS, <3,3,3,3>
- 1482657078U, // <0,3,u,4>: Cost 2 vext1 <1,0,3,u>, RHS
- 3021179394U, // <0,3,u,5>: Cost 3 vtrnl LHS, <3,4,5,6>
- 2598203898U, // <0,3,u,6>: Cost 3 vext1 <u,0,3,u>, <6,2,7,3>
- 2708097874U, // <0,3,u,7>: Cost 3 vext3 <3,u,7,0>, <3,u,7,0>
- 1482659630U, // <0,3,u,u>: Cost 2 vext1 <1,0,3,u>, LHS
- 2617278468U, // <0,4,0,0>: Cost 3 vext2 <0,0,0,4>, <0,0,0,4>
- 2618605670U, // <0,4,0,1>: Cost 3 vext2 <0,2,0,4>, LHS
- 2618605734U, // <0,4,0,2>: Cost 3 vext2 <0,2,0,4>, <0,2,0,4>
- 3642091695U, // <0,4,0,3>: Cost 4 vext1 <3,0,4,0>, <3,0,4,0>
- 2753134796U, // <0,4,0,4>: Cost 3 vuzpl <0,2,4,6>, <0,2,4,6>
- 2718714770U, // <0,4,0,5>: Cost 3 vext3 <5,6,7,0>, <4,0,5,1>
- 3021245750U, // <0,4,0,6>: Cost 3 vtrnl <0,2,0,2>, RHS
- 3665982483U, // <0,4,0,7>: Cost 4 vext1 <7,0,4,0>, <7,0,4,0>
- 3021245768U, // <0,4,0,u>: Cost 3 vtrnl <0,2,0,2>, RHS
- 2568355942U, // <0,4,1,0>: Cost 3 vext1 <3,0,4,1>, LHS
- 3692348212U, // <0,4,1,1>: Cost 4 vext2 <0,2,0,4>, <1,1,1,1>
- 3692348310U, // <0,4,1,2>: Cost 4 vext2 <0,2,0,4>, <1,2,3,0>
- 2568358064U, // <0,4,1,3>: Cost 3 vext1 <3,0,4,1>, <3,0,4,1>
- 2568359222U, // <0,4,1,4>: Cost 3 vext1 <3,0,4,1>, RHS
- 1812778294U, // <0,4,1,5>: Cost 2 vzipl LHS, RHS
- 3022671158U, // <0,4,1,6>: Cost 3 vtrnl <0,4,1,5>, RHS
- 2592248852U, // <0,4,1,7>: Cost 3 vext1 <7,0,4,1>, <7,0,4,1>
- 1812778537U, // <0,4,1,u>: Cost 2 vzipl LHS, RHS
- 2568364134U, // <0,4,2,0>: Cost 3 vext1 <3,0,4,2>, LHS
- 2238573423U, // <0,4,2,1>: Cost 3 vrev <4,0,1,2>
- 3692349032U, // <0,4,2,2>: Cost 4 vext2 <0,2,0,4>, <2,2,2,2>
- 2631214761U, // <0,4,2,3>: Cost 3 vext2 <2,3,0,4>, <2,3,0,4>
- 2568367414U, // <0,4,2,4>: Cost 3 vext1 <3,0,4,2>, RHS
- 2887028022U, // <0,4,2,5>: Cost 3 vzipl <0,2,0,2>, RHS
- 1946996022U, // <0,4,2,6>: Cost 2 vtrnl LHS, RHS
- 2592257045U, // <0,4,2,7>: Cost 3 vext1 <7,0,4,2>, <7,0,4,2>
- 1946996040U, // <0,4,2,u>: Cost 2 vtrnl LHS, RHS
- 3692349590U, // <0,4,3,0>: Cost 4 vext2 <0,2,0,4>, <3,0,1,2>
- 3826878614U, // <0,4,3,1>: Cost 4 vuzpl <0,2,4,6>, <3,0,1,2>
- 3826878625U, // <0,4,3,2>: Cost 4 vuzpl <0,2,4,6>, <3,0,2,4>
- 3692349852U, // <0,4,3,3>: Cost 4 vext2 <0,2,0,4>, <3,3,3,3>
- 3692349954U, // <0,4,3,4>: Cost 4 vext2 <0,2,0,4>, <3,4,5,6>
- 3826878978U, // <0,4,3,5>: Cost 4 vuzpl <0,2,4,6>, <3,4,5,6>
- 4095200566U, // <0,4,3,6>: Cost 4 vtrnl <0,2,3,1>, RHS
- 3713583814U, // <0,4,3,7>: Cost 4 vext2 <3,7,0,4>, <3,7,0,4>
- 3692350238U, // <0,4,3,u>: Cost 4 vext2 <0,2,0,4>, <3,u,1,2>
- 2550464552U, // <0,4,4,0>: Cost 3 vext1 <0,0,4,4>, <0,0,4,4>
- 3962194914U, // <0,4,4,1>: Cost 4 vzipl <0,4,1,5>, <4,1,5,0>
- 3693677631U, // <0,4,4,2>: Cost 4 vext2 <0,4,0,4>, <4,2,6,3>
- 3642124467U, // <0,4,4,3>: Cost 4 vext1 <3,0,4,4>, <3,0,4,4>
- 2718715088U, // <0,4,4,4>: Cost 3 vext3 <5,6,7,0>, <4,4,4,4>
- 2618608950U, // <0,4,4,5>: Cost 3 vext2 <0,2,0,4>, RHS
- 2753137974U, // <0,4,4,6>: Cost 3 vuzpl <0,2,4,6>, RHS
- 3666015255U, // <0,4,4,7>: Cost 4 vext1 <7,0,4,4>, <7,0,4,4>
- 2618609193U, // <0,4,4,u>: Cost 3 vext2 <0,2,0,4>, RHS
- 2568388710U, // <0,4,5,0>: Cost 3 vext1 <3,0,4,5>, LHS
- 2568389526U, // <0,4,5,1>: Cost 3 vext1 <3,0,4,5>, <1,2,3,0>
- 3636159963U, // <0,4,5,2>: Cost 4 vext1 <2,0,4,5>, <2,0,4,5>
- 2568390836U, // <0,4,5,3>: Cost 3 vext1 <3,0,4,5>, <3,0,4,5>
- 2568391990U, // <0,4,5,4>: Cost 3 vext1 <3,0,4,5>, RHS
- 2718715180U, // <0,4,5,5>: Cost 3 vext3 <5,6,7,0>, <4,5,5,6>
- 1618136374U, // <0,4,5,6>: Cost 2 vext3 <1,2,3,0>, RHS
- 2592281624U, // <0,4,5,7>: Cost 3 vext1 <7,0,4,5>, <7,0,4,5>
- 1618136392U, // <0,4,5,u>: Cost 2 vext3 <1,2,3,0>, RHS
- 2550480938U, // <0,4,6,0>: Cost 3 vext1 <0,0,4,6>, <0,0,4,6>
- 3826880801U, // <0,4,6,1>: Cost 4 vuzpl <0,2,4,6>, <6,0,1,2>
- 2562426332U, // <0,4,6,2>: Cost 3 vext1 <2,0,4,6>, <2,0,4,6>
- 3786190181U, // <0,4,6,3>: Cost 4 vext3 <4,6,3,0>, <4,6,3,0>
- 2718715252U, // <0,4,6,4>: Cost 3 vext3 <5,6,7,0>, <4,6,4,6>
- 3826881165U, // <0,4,6,5>: Cost 4 vuzpl <0,2,4,6>, <6,4,5,6>
- 2712669568U, // <0,4,6,6>: Cost 3 vext3 <4,6,6,0>, <4,6,6,0>
- 2657760081U, // <0,4,6,7>: Cost 3 vext2 <6,7,0,4>, <6,7,0,4>
- 2718715284U, // <0,4,6,u>: Cost 3 vext3 <5,6,7,0>, <4,6,u,2>
- 3654090854U, // <0,4,7,0>: Cost 4 vext1 <5,0,4,7>, LHS
- 3934229326U, // <0,4,7,1>: Cost 4 vuzpr <7,0,1,4>, <6,7,0,1>
- 3734156437U, // <0,4,7,2>: Cost 4 vext2 <7,2,0,4>, <7,2,0,4>
- 3734820070U, // <0,4,7,3>: Cost 4 vext2 <7,3,0,4>, <7,3,0,4>
- 3654094134U, // <0,4,7,4>: Cost 4 vext1 <5,0,4,7>, RHS
- 2713259464U, // <0,4,7,5>: Cost 3 vext3 <4,7,5,0>, <4,7,5,0>
- 2713333201U, // <0,4,7,6>: Cost 3 vext3 <4,7,6,0>, <4,7,6,0>
- 3654095866U, // <0,4,7,7>: Cost 4 vext1 <5,0,4,7>, <7,0,1,2>
- 2713259464U, // <0,4,7,u>: Cost 3 vext3 <4,7,5,0>, <4,7,5,0>
- 2568413286U, // <0,4,u,0>: Cost 3 vext1 <3,0,4,u>, LHS
- 2618611502U, // <0,4,u,1>: Cost 3 vext2 <0,2,0,4>, LHS
- 2753140526U, // <0,4,u,2>: Cost 3 vuzpl <0,2,4,6>, LHS
- 2568415415U, // <0,4,u,3>: Cost 3 vext1 <3,0,4,u>, <3,0,4,u>
- 2568416566U, // <0,4,u,4>: Cost 3 vext1 <3,0,4,u>, RHS
- 1817423158U, // <0,4,u,5>: Cost 2 vzipl LHS, RHS
- 1947438390U, // <0,4,u,6>: Cost 2 vtrnl LHS, RHS
- 2592306203U, // <0,4,u,7>: Cost 3 vext1 <7,0,4,u>, <7,0,4,u>
- 1947438408U, // <0,4,u,u>: Cost 2 vtrnl LHS, RHS
- 3630219264U, // <0,5,0,0>: Cost 4 vext1 <1,0,5,0>, <0,0,0,0>
- 2625912934U, // <0,5,0,1>: Cost 3 vext2 <1,4,0,5>, LHS
- 3692355748U, // <0,5,0,2>: Cost 4 vext2 <0,2,0,5>, <0,2,0,2>
- 3693019384U, // <0,5,0,3>: Cost 4 vext2 <0,3,0,5>, <0,3,0,5>
- 3630222646U, // <0,5,0,4>: Cost 4 vext1 <1,0,5,0>, RHS
- 3699655062U, // <0,5,0,5>: Cost 4 vext2 <1,4,0,5>, <0,5,0,1>
- 2718715508U, // <0,5,0,6>: Cost 3 vext3 <5,6,7,0>, <5,0,6,1>
- 3087011126U, // <0,5,0,7>: Cost 3 vtrnr <0,0,0,0>, RHS
- 2625913501U, // <0,5,0,u>: Cost 3 vext2 <1,4,0,5>, LHS
- 1500659814U, // <0,5,1,0>: Cost 2 vext1 <4,0,5,1>, LHS
- 2886520528U, // <0,5,1,1>: Cost 3 vzipl LHS, <5,1,7,3>
- 2574403176U, // <0,5,1,2>: Cost 3 vext1 <4,0,5,1>, <2,2,2,2>
- 2574403734U, // <0,5,1,3>: Cost 3 vext1 <4,0,5,1>, <3,0,1,2>
- 1500662674U, // <0,5,1,4>: Cost 2 vext1 <4,0,5,1>, <4,0,5,1>
- 2886520836U, // <0,5,1,5>: Cost 3 vzipl LHS, <5,5,5,5>
- 2886520930U, // <0,5,1,6>: Cost 3 vzipl LHS, <5,6,7,0>
- 2718715600U, // <0,5,1,7>: Cost 3 vext3 <5,6,7,0>, <5,1,7,3>
- 1500665646U, // <0,5,1,u>: Cost 2 vext1 <4,0,5,1>, LHS
- 2556493926U, // <0,5,2,0>: Cost 3 vext1 <1,0,5,2>, LHS
- 2244546120U, // <0,5,2,1>: Cost 3 vrev <5,0,1,2>
- 3692357256U, // <0,5,2,2>: Cost 4 vext2 <0,2,0,5>, <2,2,5,7>
- 2568439994U, // <0,5,2,3>: Cost 3 vext1 <3,0,5,2>, <3,0,5,2>
- 2556497206U, // <0,5,2,4>: Cost 3 vext1 <1,0,5,2>, RHS
- 3020738564U, // <0,5,2,5>: Cost 3 vtrnl LHS, <5,5,5,5>
- 4027877161U, // <0,5,2,6>: Cost 4 vzipr <0,2,0,2>, <2,4,5,6>
- 3093220662U, // <0,5,2,7>: Cost 3 vtrnr <1,0,3,2>, RHS
- 3093220663U, // <0,5,2,u>: Cost 3 vtrnr <1,0,3,2>, RHS
- 3699656854U, // <0,5,3,0>: Cost 4 vext2 <1,4,0,5>, <3,0,1,2>
- 3699656927U, // <0,5,3,1>: Cost 4 vext2 <1,4,0,5>, <3,1,0,3>
- 3699657006U, // <0,5,3,2>: Cost 4 vext2 <1,4,0,5>, <3,2,0,1>
- 3699657116U, // <0,5,3,3>: Cost 4 vext2 <1,4,0,5>, <3,3,3,3>
- 2637859284U, // <0,5,3,4>: Cost 3 vext2 <3,4,0,5>, <3,4,0,5>
- 3790319453U, // <0,5,3,5>: Cost 4 vext3 <5,3,5,0>, <5,3,5,0>
- 3699657354U, // <0,5,3,6>: Cost 4 vext2 <1,4,0,5>, <3,6,2,7>
- 2716725103U, // <0,5,3,7>: Cost 3 vext3 <5,3,7,0>, <5,3,7,0>
- 2716798840U, // <0,5,3,u>: Cost 3 vext3 <5,3,u,0>, <5,3,u,0>
- 2661747602U, // <0,5,4,0>: Cost 3 vext2 <7,4,0,5>, <4,0,5,1>
- 3630252810U, // <0,5,4,1>: Cost 4 vext1 <1,0,5,4>, <1,0,5,4>
- 3636225507U, // <0,5,4,2>: Cost 4 vext1 <2,0,5,4>, <2,0,5,4>
- 3716910172U, // <0,5,4,3>: Cost 4 vext2 <4,3,0,5>, <4,3,0,5>
- 3962195892U, // <0,5,4,4>: Cost 4 vzipl <0,4,1,5>, <5,4,5,6>
- 2625916214U, // <0,5,4,5>: Cost 3 vext2 <1,4,0,5>, RHS
- 3718901071U, // <0,5,4,6>: Cost 4 vext2 <4,6,0,5>, <4,6,0,5>
- 2718715846U, // <0,5,4,7>: Cost 3 vext3 <5,6,7,0>, <5,4,7,6>
- 2625916457U, // <0,5,4,u>: Cost 3 vext2 <1,4,0,5>, RHS
- 3791278034U, // <0,5,5,0>: Cost 4 vext3 <5,5,0,0>, <5,5,0,0>
- 3791351771U, // <0,5,5,1>: Cost 4 vext3 <5,5,1,0>, <5,5,1,0>
- 3318386260U, // <0,5,5,2>: Cost 4 vrev <5,0,2,5>
- 3791499245U, // <0,5,5,3>: Cost 4 vext3 <5,5,3,0>, <5,5,3,0>
- 3318533734U, // <0,5,5,4>: Cost 4 vrev <5,0,4,5>
- 2718715908U, // <0,5,5,5>: Cost 3 vext3 <5,6,7,0>, <5,5,5,5>
- 2657767522U, // <0,5,5,6>: Cost 3 vext2 <6,7,0,5>, <5,6,7,0>
- 2718715928U, // <0,5,5,7>: Cost 3 vext3 <5,6,7,0>, <5,5,7,7>
- 2718715937U, // <0,5,5,u>: Cost 3 vext3 <5,6,7,0>, <5,5,u,7>
- 2592358502U, // <0,5,6,0>: Cost 3 vext1 <7,0,5,6>, LHS
- 3792015404U, // <0,5,6,1>: Cost 4 vext3 <5,6,1,0>, <5,6,1,0>
- 3731509754U, // <0,5,6,2>: Cost 4 vext2 <6,7,0,5>, <6,2,7,3>
- 3785748546U, // <0,5,6,3>: Cost 4 vext3 <4,5,6,0>, <5,6,3,4>
- 2592361782U, // <0,5,6,4>: Cost 3 vext1 <7,0,5,6>, RHS
- 2592362594U, // <0,5,6,5>: Cost 3 vext1 <7,0,5,6>, <5,6,7,0>
- 3785748576U, // <0,5,6,6>: Cost 4 vext3 <4,5,6,0>, <5,6,6,7>
- 1644974178U, // <0,5,6,7>: Cost 2 vext3 <5,6,7,0>, <5,6,7,0>
- 1645047915U, // <0,5,6,u>: Cost 2 vext3 <5,6,u,0>, <5,6,u,0>
- 2562506854U, // <0,5,7,0>: Cost 3 vext1 <2,0,5,7>, LHS
- 2562507670U, // <0,5,7,1>: Cost 3 vext1 <2,0,5,7>, <1,2,3,0>
- 2562508262U, // <0,5,7,2>: Cost 3 vext1 <2,0,5,7>, <2,0,5,7>
- 3636250774U, // <0,5,7,3>: Cost 4 vext1 <2,0,5,7>, <3,0,1,2>
- 2562510134U, // <0,5,7,4>: Cost 3 vext1 <2,0,5,7>, RHS
- 2718716072U, // <0,5,7,5>: Cost 3 vext3 <5,6,7,0>, <5,7,5,7>
- 2718716074U, // <0,5,7,6>: Cost 3 vext3 <5,6,7,0>, <5,7,6,0>
- 2719379635U, // <0,5,7,7>: Cost 3 vext3 <5,7,7,0>, <5,7,7,0>
- 2562512686U, // <0,5,7,u>: Cost 3 vext1 <2,0,5,7>, LHS
- 1500717158U, // <0,5,u,0>: Cost 2 vext1 <4,0,5,u>, LHS
- 2625918766U, // <0,5,u,1>: Cost 3 vext2 <1,4,0,5>, LHS
- 2719674583U, // <0,5,u,2>: Cost 3 vext3 <5,u,2,0>, <5,u,2,0>
- 2568489152U, // <0,5,u,3>: Cost 3 vext1 <3,0,5,u>, <3,0,5,u>
- 1500720025U, // <0,5,u,4>: Cost 2 vext1 <4,0,5,u>, <4,0,5,u>
- 2625919130U, // <0,5,u,5>: Cost 3 vext2 <1,4,0,5>, RHS
- 2586407243U, // <0,5,u,6>: Cost 3 vext1 <6,0,5,u>, <6,0,5,u>
- 1646301444U, // <0,5,u,7>: Cost 2 vext3 <5,u,7,0>, <5,u,7,0>
- 1646375181U, // <0,5,u,u>: Cost 2 vext3 <5,u,u,0>, <5,u,u,0>
- 2586411110U, // <0,6,0,0>: Cost 3 vext1 <6,0,6,0>, LHS
- 2619949158U, // <0,6,0,1>: Cost 3 vext2 <0,4,0,6>, LHS
- 2619949220U, // <0,6,0,2>: Cost 3 vext2 <0,4,0,6>, <0,2,0,2>
- 3785748789U, // <0,6,0,3>: Cost 4 vext3 <4,5,6,0>, <6,0,3,4>
- 2619949386U, // <0,6,0,4>: Cost 3 vext2 <0,4,0,6>, <0,4,0,6>
- 2586415202U, // <0,6,0,5>: Cost 3 vext1 <6,0,6,0>, <5,6,7,0>
- 2586415436U, // <0,6,0,6>: Cost 3 vext1 <6,0,6,0>, <6,0,6,0>
- 2952793398U, // <0,6,0,7>: Cost 3 vzipr <0,0,0,0>, RHS
- 2619949725U, // <0,6,0,u>: Cost 3 vext2 <0,4,0,6>, LHS
- 2562531430U, // <0,6,1,0>: Cost 3 vext1 <2,0,6,1>, LHS
- 3693691700U, // <0,6,1,1>: Cost 4 vext2 <0,4,0,6>, <1,1,1,1>
- 2886521338U, // <0,6,1,2>: Cost 3 vzipl LHS, <6,2,7,3>
- 3693691864U, // <0,6,1,3>: Cost 4 vext2 <0,4,0,6>, <1,3,1,3>
- 2562534710U, // <0,6,1,4>: Cost 3 vext1 <2,0,6,1>, RHS
- 2580450932U, // <0,6,1,5>: Cost 3 vext1 <5,0,6,1>, <5,0,6,1>
- 2886521656U, // <0,6,1,6>: Cost 3 vzipl LHS, <6,6,6,6>
- 2966736182U, // <0,6,1,7>: Cost 3 vzipr <2,3,0,1>, RHS
- 2966736183U, // <0,6,1,u>: Cost 3 vzipr <2,3,0,1>, RHS
- 1500741734U, // <0,6,2,0>: Cost 2 vext1 <4,0,6,2>, LHS
- 2250518817U, // <0,6,2,1>: Cost 3 vrev <6,0,1,2>
- 2574485096U, // <0,6,2,2>: Cost 3 vext1 <4,0,6,2>, <2,2,2,2>
- 2631894694U, // <0,6,2,3>: Cost 3 vext2 <2,4,0,6>, <2,3,0,1>
- 1500744604U, // <0,6,2,4>: Cost 2 vext1 <4,0,6,2>, <4,0,6,2>
- 2574487248U, // <0,6,2,5>: Cost 3 vext1 <4,0,6,2>, <5,1,7,3>
- 3020739384U, // <0,6,2,6>: Cost 3 vtrnl LHS, <6,6,6,6>
- 2954136886U, // <0,6,2,7>: Cost 3 vzipr <0,2,0,2>, RHS
- 1500747566U, // <0,6,2,u>: Cost 2 vext1 <4,0,6,2>, LHS
- 3693693078U, // <0,6,3,0>: Cost 4 vext2 <0,4,0,6>, <3,0,1,2>
- 3705637136U, // <0,6,3,1>: Cost 4 vext2 <2,4,0,6>, <3,1,5,7>
- 3705637192U, // <0,6,3,2>: Cost 4 vext2 <2,4,0,6>, <3,2,3,0>
- 3693693340U, // <0,6,3,3>: Cost 4 vext2 <0,4,0,6>, <3,3,3,3>
- 2637867477U, // <0,6,3,4>: Cost 3 vext2 <3,4,0,6>, <3,4,0,6>
- 3705637424U, // <0,6,3,5>: Cost 4 vext2 <2,4,0,6>, <3,5,1,7>
- 3666154056U, // <0,6,3,6>: Cost 4 vext1 <7,0,6,3>, <6,3,7,0>
- 2722697800U, // <0,6,3,7>: Cost 3 vext3 <6,3,7,0>, <6,3,7,0>
- 2722771537U, // <0,6,3,u>: Cost 3 vext3 <6,3,u,0>, <6,3,u,0>
- 2562556006U, // <0,6,4,0>: Cost 3 vext1 <2,0,6,4>, LHS
- 4095316257U, // <0,6,4,1>: Cost 4 vtrnl <0,2,4,6>, <6,0,1,2>
- 2562557420U, // <0,6,4,2>: Cost 3 vext1 <2,0,6,4>, <2,0,6,4>
- 3636299926U, // <0,6,4,3>: Cost 4 vext1 <2,0,6,4>, <3,0,1,2>
- 2562559286U, // <0,6,4,4>: Cost 3 vext1 <2,0,6,4>, RHS
- 2619952438U, // <0,6,4,5>: Cost 3 vext2 <0,4,0,6>, RHS
- 2723287696U, // <0,6,4,6>: Cost 3 vext3 <6,4,6,0>, <6,4,6,0>
- 4027895094U, // <0,6,4,7>: Cost 4 vzipr <0,2,0,4>, RHS
- 2619952681U, // <0,6,4,u>: Cost 3 vext2 <0,4,0,6>, RHS
- 2718716594U, // <0,6,5,0>: Cost 3 vext3 <5,6,7,0>, <6,5,0,7>
- 3648250774U, // <0,6,5,1>: Cost 4 vext1 <4,0,6,5>, <1,2,3,0>
- 3792458436U, // <0,6,5,2>: Cost 4 vext3 <5,6,7,0>, <6,5,2,7>
- 3705638767U, // <0,6,5,3>: Cost 5 vext2 <2,4,0,6>, <5,3,7,0>
- 3648252831U, // <0,6,5,4>: Cost 4 vext1 <4,0,6,5>, <4,0,6,5>
- 3797619416U, // <0,6,5,5>: Cost 4 vext3 <6,5,5,0>, <6,5,5,0>
- 3792458472U, // <0,6,5,6>: Cost 4 vext3 <5,6,7,0>, <6,5,6,7>
- 4035202358U, // <0,6,5,7>: Cost 4 vzipr <1,4,0,5>, RHS
- 2718716594U, // <0,6,5,u>: Cost 3 vext3 <5,6,7,0>, <6,5,0,7>
- 3786412796U, // <0,6,6,0>: Cost 4 vext3 <4,6,6,0>, <6,6,0,0>
- 3792458504U, // <0,6,6,1>: Cost 4 vext3 <5,6,7,0>, <6,6,1,3>
- 3728200126U, // <0,6,6,2>: Cost 4 vext2 <6,2,0,6>, <6,2,0,6>
- 3798135575U, // <0,6,6,3>: Cost 4 vext3 <6,6,3,0>, <6,6,3,0>
- 3786412836U, // <0,6,6,4>: Cost 4 vext3 <4,6,6,0>, <6,6,4,4>
- 3792458543U, // <0,6,6,5>: Cost 4 vext3 <5,6,7,0>, <6,6,5,6>
- 2718716728U, // <0,6,6,6>: Cost 3 vext3 <5,6,7,0>, <6,6,6,6>
- 2718716738U, // <0,6,6,7>: Cost 3 vext3 <5,6,7,0>, <6,6,7,7>
- 2718716747U, // <0,6,6,u>: Cost 3 vext3 <5,6,7,0>, <6,6,u,7>
- 2718716750U, // <0,6,7,0>: Cost 3 vext3 <5,6,7,0>, <6,7,0,1>
- 2724909910U, // <0,6,7,1>: Cost 3 vext3 <6,7,1,0>, <6,7,1,0>
- 3636323823U, // <0,6,7,2>: Cost 4 vext1 <2,0,6,7>, <2,0,6,7>
- 2725057384U, // <0,6,7,3>: Cost 3 vext3 <6,7,3,0>, <6,7,3,0>
- 2718716790U, // <0,6,7,4>: Cost 3 vext3 <5,6,7,0>, <6,7,4,5>
- 2718716800U, // <0,6,7,5>: Cost 3 vext3 <5,6,7,0>, <6,7,5,6>
- 3792458629U, // <0,6,7,6>: Cost 4 vext3 <5,6,7,0>, <6,7,6,2>
- 2725352332U, // <0,6,7,7>: Cost 3 vext3 <6,7,7,0>, <6,7,7,0>
- 2718716822U, // <0,6,7,u>: Cost 3 vext3 <5,6,7,0>, <6,7,u,1>
- 1500790886U, // <0,6,u,0>: Cost 2 vext1 <4,0,6,u>, LHS
- 2619954990U, // <0,6,u,1>: Cost 3 vext2 <0,4,0,6>, LHS
- 2562590192U, // <0,6,u,2>: Cost 3 vext1 <2,0,6,u>, <2,0,6,u>
- 2725721017U, // <0,6,u,3>: Cost 3 vext3 <6,u,3,0>, <6,u,3,0>
- 1500793762U, // <0,6,u,4>: Cost 2 vext1 <4,0,6,u>, <4,0,6,u>
- 2619955354U, // <0,6,u,5>: Cost 3 vext2 <0,4,0,6>, RHS
- 2725942228U, // <0,6,u,6>: Cost 3 vext3 <6,u,6,0>, <6,u,6,0>
- 2954186038U, // <0,6,u,7>: Cost 3 vzipr <0,2,0,u>, RHS
- 1500796718U, // <0,6,u,u>: Cost 2 vext1 <4,0,6,u>, LHS
- 2256401391U, // <0,7,0,0>: Cost 3 vrev <7,0,0,0>
- 2632564838U, // <0,7,0,1>: Cost 3 vext2 <2,5,0,7>, LHS
- 2256548865U, // <0,7,0,2>: Cost 3 vrev <7,0,2,0>
- 3700998396U, // <0,7,0,3>: Cost 4 vext2 <1,6,0,7>, <0,3,1,0>
- 2718716952U, // <0,7,0,4>: Cost 3 vext3 <5,6,7,0>, <7,0,4,5>
- 2718716962U, // <0,7,0,5>: Cost 3 vext3 <5,6,7,0>, <7,0,5,6>
- 2621284845U, // <0,7,0,6>: Cost 3 vext2 <0,6,0,7>, <0,6,0,7>
- 3904685542U, // <0,7,0,7>: Cost 4 vuzpr <2,0,5,7>, <2,0,5,7>
- 2632565405U, // <0,7,0,u>: Cost 3 vext2 <2,5,0,7>, LHS
- 2256409584U, // <0,7,1,0>: Cost 3 vrev <7,0,0,1>
- 3706307380U, // <0,7,1,1>: Cost 4 vext2 <2,5,0,7>, <1,1,1,1>
- 2632565654U, // <0,7,1,2>: Cost 3 vext2 <2,5,0,7>, <1,2,3,0>
- 3769603168U, // <0,7,1,3>: Cost 4 vext3 <1,u,3,0>, <7,1,3,5>
- 2256704532U, // <0,7,1,4>: Cost 3 vrev <7,0,4,1>
- 3769603184U, // <0,7,1,5>: Cost 4 vext3 <1,u,3,0>, <7,1,5,3>
- 3700999366U, // <0,7,1,6>: Cost 4 vext2 <1,6,0,7>, <1,6,0,7>
- 2886522476U, // <0,7,1,7>: Cost 3 vzipl LHS, <7,7,7,7>
- 2256999480U, // <0,7,1,u>: Cost 3 vrev <7,0,u,1>
- 2586501222U, // <0,7,2,0>: Cost 3 vext1 <6,0,7,2>, LHS
- 1182749690U, // <0,7,2,1>: Cost 2 vrev <7,0,1,2>
- 3636356595U, // <0,7,2,2>: Cost 4 vext1 <2,0,7,2>, <2,0,7,2>
- 2727711916U, // <0,7,2,3>: Cost 3 vext3 <7,2,3,0>, <7,2,3,0>
- 2586504502U, // <0,7,2,4>: Cost 3 vext1 <6,0,7,2>, RHS
- 2632566606U, // <0,7,2,5>: Cost 3 vext2 <2,5,0,7>, <2,5,0,7>
- 2586505559U, // <0,7,2,6>: Cost 3 vext1 <6,0,7,2>, <6,0,7,2>
- 3020740204U, // <0,7,2,7>: Cost 3 vtrnl LHS, <7,7,7,7>
- 1183265849U, // <0,7,2,u>: Cost 2 vrev <7,0,u,2>
- 3701000342U, // <0,7,3,0>: Cost 4 vext2 <1,6,0,7>, <3,0,1,2>
- 3706308849U, // <0,7,3,1>: Cost 4 vext2 <2,5,0,7>, <3,1,2,3>
- 3330315268U, // <0,7,3,2>: Cost 4 vrev <7,0,2,3>
- 3706309020U, // <0,7,3,3>: Cost 4 vext2 <2,5,0,7>, <3,3,3,3>
- 3706309122U, // <0,7,3,4>: Cost 4 vext2 <2,5,0,7>, <3,4,5,6>
- 3712281127U, // <0,7,3,5>: Cost 4 vext2 <3,5,0,7>, <3,5,0,7>
- 2639202936U, // <0,7,3,6>: Cost 3 vext2 <3,6,0,7>, <3,6,0,7>
- 3802412321U, // <0,7,3,7>: Cost 4 vext3 <7,3,7,0>, <7,3,7,0>
- 2640530202U, // <0,7,3,u>: Cost 3 vext2 <3,u,0,7>, <3,u,0,7>
- 3654287462U, // <0,7,4,0>: Cost 4 vext1 <5,0,7,4>, LHS
- 2256507900U, // <0,7,4,1>: Cost 3 vrev <7,0,1,4>
- 2256581637U, // <0,7,4,2>: Cost 3 vrev <7,0,2,4>
- 3660262008U, // <0,7,4,3>: Cost 4 vext1 <6,0,7,4>, <3,6,0,7>
- 3786413405U, // <0,7,4,4>: Cost 4 vext3 <4,6,6,0>, <7,4,4,6>
- 2632568118U, // <0,7,4,5>: Cost 3 vext2 <2,5,0,7>, RHS
- 3718917457U, // <0,7,4,6>: Cost 4 vext2 <4,6,0,7>, <4,6,0,7>
- 3787003255U, // <0,7,4,7>: Cost 4 vext3 <4,7,5,0>, <7,4,7,5>
- 2632568361U, // <0,7,4,u>: Cost 3 vext2 <2,5,0,7>, RHS
- 3706310268U, // <0,7,5,0>: Cost 4 vext2 <2,5,0,7>, <5,0,7,0>
- 3792459156U, // <0,7,5,1>: Cost 4 vext3 <5,6,7,0>, <7,5,1,7>
- 3330331654U, // <0,7,5,2>: Cost 4 vrev <7,0,2,5>
- 3722899255U, // <0,7,5,3>: Cost 4 vext2 <5,3,0,7>, <5,3,0,7>
- 2256737304U, // <0,7,5,4>: Cost 3 vrev <7,0,4,5>
- 3724226521U, // <0,7,5,5>: Cost 4 vext2 <5,5,0,7>, <5,5,0,7>
- 2718717377U, // <0,7,5,6>: Cost 3 vext3 <5,6,7,0>, <7,5,6,7>
- 2729997763U, // <0,7,5,7>: Cost 3 vext3 <7,5,7,0>, <7,5,7,0>
- 2720044499U, // <0,7,5,u>: Cost 3 vext3 <5,u,7,0>, <7,5,u,7>
- 3712946517U, // <0,7,6,0>: Cost 4 vext2 <3,6,0,7>, <6,0,7,0>
- 2256524286U, // <0,7,6,1>: Cost 3 vrev <7,0,1,6>
- 3792459246U, // <0,7,6,2>: Cost 4 vext3 <5,6,7,0>, <7,6,2,7>
- 3796440567U, // <0,7,6,3>: Cost 4 vext3 <6,3,7,0>, <7,6,3,7>
- 3654307126U, // <0,7,6,4>: Cost 4 vext1 <5,0,7,6>, RHS
- 2656457394U, // <0,7,6,5>: Cost 3 vext2 <6,5,0,7>, <6,5,0,7>
- 3792459281U, // <0,7,6,6>: Cost 4 vext3 <5,6,7,0>, <7,6,6,6>
- 2730661396U, // <0,7,6,7>: Cost 3 vext3 <7,6,7,0>, <7,6,7,0>
- 2658448293U, // <0,7,6,u>: Cost 3 vext2 <6,u,0,7>, <6,u,0,7>
- 3787003431U, // <0,7,7,0>: Cost 4 vext3 <4,7,5,0>, <7,7,0,1>
- 3654312854U, // <0,7,7,1>: Cost 4 vext1 <5,0,7,7>, <1,2,3,0>
- 3654313446U, // <0,7,7,2>: Cost 4 vext1 <5,0,7,7>, <2,0,5,7>
- 3804771905U, // <0,7,7,3>: Cost 4 vext3 <7,7,3,0>, <7,7,3,0>
- 3654315318U, // <0,7,7,4>: Cost 4 vext1 <5,0,7,7>, RHS
- 3654315651U, // <0,7,7,5>: Cost 4 vext1 <5,0,7,7>, <5,0,7,7>
- 3660288348U, // <0,7,7,6>: Cost 4 vext1 <6,0,7,7>, <6,0,7,7>
- 2718717548U, // <0,7,7,7>: Cost 3 vext3 <5,6,7,0>, <7,7,7,7>
- 2664420990U, // <0,7,7,u>: Cost 3 vext2 <7,u,0,7>, <7,u,0,7>
- 2256466935U, // <0,7,u,0>: Cost 3 vrev <7,0,0,u>
- 1182798848U, // <0,7,u,1>: Cost 2 vrev <7,0,1,u>
- 2256614409U, // <0,7,u,2>: Cost 3 vrev <7,0,2,u>
- 2731693714U, // <0,7,u,3>: Cost 3 vext3 <7,u,3,0>, <7,u,3,0>
- 2256761883U, // <0,7,u,4>: Cost 3 vrev <7,0,4,u>
- 2632571034U, // <0,7,u,5>: Cost 3 vext2 <2,5,0,7>, RHS
- 2669066421U, // <0,7,u,6>: Cost 3 vext2 <u,6,0,7>, <u,6,0,7>
- 2731988662U, // <0,7,u,7>: Cost 3 vext3 <7,u,7,0>, <7,u,7,0>
- 1183315007U, // <0,7,u,u>: Cost 2 vrev <7,0,u,u>
- 135053414U, // <0,u,0,0>: Cost 1 vdup0 LHS
- 1544896614U, // <0,u,0,1>: Cost 2 vext2 <0,2,0,u>, LHS
- 1678999654U, // <0,u,0,2>: Cost 2 vuzpl LHS, LHS
- 2691880677U, // <0,u,0,3>: Cost 3 vext3 <1,2,3,0>, <u,0,3,2>
- 1476988214U, // <0,u,0,4>: Cost 2 vext1 <0,0,u,0>, RHS
- 2718791419U, // <0,u,0,5>: Cost 3 vext3 <5,6,u,0>, <u,0,5,6>
- 3021248666U, // <0,u,0,6>: Cost 3 vtrnl <0,2,0,2>, RHS
- 2592535607U, // <0,u,0,7>: Cost 3 vext1 <7,0,u,0>, <7,0,u,0>
- 135053414U, // <0,u,0,u>: Cost 1 vdup0 LHS
- 1476993097U, // <0,u,1,0>: Cost 2 vext1 <0,0,u,1>, <0,0,u,1>
- 1812780846U, // <0,u,1,1>: Cost 2 vzipl LHS, LHS
- 1618138926U, // <0,u,1,2>: Cost 2 vext3 <1,2,3,0>, LHS
- 2752742134U, // <0,u,1,3>: Cost 3 vuzpl LHS, <1,0,3,2>
- 1476996406U, // <0,u,1,4>: Cost 2 vext1 <0,0,u,1>, RHS
- 1812781210U, // <0,u,1,5>: Cost 2 vzipl LHS, RHS
- 2887006416U, // <0,u,1,6>: Cost 3 vzipl LHS, <u,6,3,7>
- 2966736200U, // <0,u,1,7>: Cost 3 vzipr <2,3,0,1>, RHS
- 1812781413U, // <0,u,1,u>: Cost 2 vzipl LHS, LHS
- 1482973286U, // <0,u,2,0>: Cost 2 vext1 <1,0,u,2>, LHS
- 1482973987U, // <0,u,2,1>: Cost 2 vext1 <1,0,u,2>, <1,0,u,2>
- 1946998574U, // <0,u,2,2>: Cost 2 vtrnl LHS, LHS
- 835584U, // <0,u,2,3>: Cost 0 copy LHS
- 1482976566U, // <0,u,2,4>: Cost 2 vext1 <1,0,u,2>, RHS
- 3020781631U, // <0,u,2,5>: Cost 3 vtrnl LHS, <u,4,5,6>
- 1946998938U, // <0,u,2,6>: Cost 2 vtrnl LHS, RHS
- 1518810169U, // <0,u,2,7>: Cost 2 vext1 <7,0,u,2>, <7,0,u,2>
- 835584U, // <0,u,2,u>: Cost 0 copy LHS
- 2618640534U, // <0,u,3,0>: Cost 3 vext2 <0,2,0,u>, <3,0,1,2>
- 2752743574U, // <0,u,3,1>: Cost 3 vuzpl LHS, <3,0,1,2>
- 2636556597U, // <0,u,3,2>: Cost 3 vext2 <3,2,0,u>, <3,2,0,u>
- 2752743836U, // <0,u,3,3>: Cost 3 vuzpl LHS, <3,3,3,3>
- 2618640898U, // <0,u,3,4>: Cost 3 vext2 <0,2,0,u>, <3,4,5,6>
- 2752743938U, // <0,u,3,5>: Cost 3 vuzpl LHS, <3,4,5,6>
- 2639202936U, // <0,u,3,6>: Cost 3 vext2 <3,6,0,7>, <3,6,0,7>
- 2639874762U, // <0,u,3,7>: Cost 3 vext2 <3,7,0,u>, <3,7,0,u>
- 2752743637U, // <0,u,3,u>: Cost 3 vuzpl LHS, <3,0,u,2>
- 2562703462U, // <0,u,4,0>: Cost 3 vext1 <2,0,u,4>, LHS
- 2888455982U, // <0,u,4,1>: Cost 3 vzipl <0,4,1,5>, LHS
- 3021575982U, // <0,u,4,2>: Cost 3 vtrnl <0,2,4,6>, LHS
- 2568677591U, // <0,u,4,3>: Cost 3 vext1 <3,0,u,4>, <3,0,u,4>
- 2562706742U, // <0,u,4,4>: Cost 3 vext1 <2,0,u,4>, RHS
- 1544899894U, // <0,u,4,5>: Cost 2 vext2 <0,2,0,u>, RHS
- 1679002934U, // <0,u,4,6>: Cost 2 vuzpl LHS, RHS
- 2718718033U, // <0,u,4,7>: Cost 3 vext3 <5,6,7,0>, <u,4,7,6>
- 1679002952U, // <0,u,4,u>: Cost 2 vuzpl LHS, RHS
- 2568683622U, // <0,u,5,0>: Cost 3 vext1 <3,0,u,5>, LHS
- 2568684438U, // <0,u,5,1>: Cost 3 vext1 <3,0,u,5>, <1,2,3,0>
- 3765622902U, // <0,u,5,2>: Cost 4 vext3 <1,2,3,0>, <u,5,2,7>
- 2691881087U, // <0,u,5,3>: Cost 3 vext3 <1,2,3,0>, <u,5,3,7>
- 2568686902U, // <0,u,5,4>: Cost 3 vext1 <3,0,u,5>, RHS
- 2650492890U, // <0,u,5,5>: Cost 3 vext2 <5,5,0,u>, <5,5,0,u>
- 1618139290U, // <0,u,5,6>: Cost 2 vext3 <1,2,3,0>, RHS
- 2824834358U, // <0,u,5,7>: Cost 3 vuzpr <1,0,3,u>, RHS
- 1618139308U, // <0,u,5,u>: Cost 2 vext3 <1,2,3,0>, RHS
- 2592579686U, // <0,u,6,0>: Cost 3 vext1 <7,0,u,6>, LHS
- 2262496983U, // <0,u,6,1>: Cost 3 vrev <u,0,1,6>
- 2654474688U, // <0,u,6,2>: Cost 3 vext2 <6,2,0,u>, <6,2,0,u>
- 2691881168U, // <0,u,6,3>: Cost 3 vext3 <1,2,3,0>, <u,6,3,7>
- 2592582966U, // <0,u,6,4>: Cost 3 vext1 <7,0,u,6>, RHS
- 2656465587U, // <0,u,6,5>: Cost 3 vext2 <6,5,0,u>, <6,5,0,u>
- 2657129220U, // <0,u,6,6>: Cost 3 vext2 <6,6,0,u>, <6,6,0,u>
- 1584051029U, // <0,u,6,7>: Cost 2 vext2 <6,7,0,u>, <6,7,0,u>
- 1584714662U, // <0,u,6,u>: Cost 2 vext2 <6,u,0,u>, <6,u,0,u>
- 2562728038U, // <0,u,7,0>: Cost 3 vext1 <2,0,u,7>, LHS
- 2562728854U, // <0,u,7,1>: Cost 3 vext1 <2,0,u,7>, <1,2,3,0>
- 2562729473U, // <0,u,7,2>: Cost 3 vext1 <2,0,u,7>, <2,0,u,7>
- 2661111018U, // <0,u,7,3>: Cost 3 vext2 <7,3,0,u>, <7,3,0,u>
- 2562731318U, // <0,u,7,4>: Cost 3 vext1 <2,0,u,7>, RHS
- 2718718258U, // <0,u,7,5>: Cost 3 vext3 <5,6,7,0>, <u,7,5,6>
- 2586620261U, // <0,u,7,6>: Cost 3 vext1 <6,0,u,7>, <6,0,u,7>
- 2657793644U, // <0,u,7,7>: Cost 3 vext2 <6,7,0,u>, <7,7,7,7>
- 2562733870U, // <0,u,7,u>: Cost 3 vext1 <2,0,u,7>, LHS
- 135053414U, // <0,u,u,0>: Cost 1 vdup0 LHS
- 1544902446U, // <0,u,u,1>: Cost 2 vext2 <0,2,0,u>, LHS
- 1679005486U, // <0,u,u,2>: Cost 2 vuzpl LHS, LHS
- 835584U, // <0,u,u,3>: Cost 0 copy LHS
- 1483025718U, // <0,u,u,4>: Cost 2 vext1 <1,0,u,u>, RHS
- 1544902810U, // <0,u,u,5>: Cost 2 vext2 <0,2,0,u>, RHS
- 1679005850U, // <0,u,u,6>: Cost 2 vuzpl LHS, RHS
- 1518859327U, // <0,u,u,7>: Cost 2 vext1 <7,0,u,u>, <7,0,u,u>
- 835584U, // <0,u,u,u>: Cost 0 copy LHS
- 2689744896U, // <1,0,0,0>: Cost 3 vext3 <0,u,1,1>, <0,0,0,0>
- 1610694666U, // <1,0,0,1>: Cost 2 vext3 <0,0,1,1>, <0,0,1,1>
- 2689744916U, // <1,0,0,2>: Cost 3 vext3 <0,u,1,1>, <0,0,2,2>
- 2619310332U, // <1,0,0,3>: Cost 3 vext2 <0,3,1,0>, <0,3,1,0>
- 2684657701U, // <1,0,0,4>: Cost 3 vext3 <0,0,4,1>, <0,0,4,1>
- 2620637598U, // <1,0,0,5>: Cost 3 vext2 <0,5,1,0>, <0,5,1,0>
- 3708977654U, // <1,0,0,6>: Cost 4 vext2 <3,0,1,0>, <0,6,1,7>
- 3666351168U, // <1,0,0,7>: Cost 4 vext1 <7,1,0,0>, <7,1,0,0>
- 1611210825U, // <1,0,0,u>: Cost 2 vext3 <0,0,u,1>, <0,0,u,1>
- 2556780646U, // <1,0,1,0>: Cost 3 vext1 <1,1,0,1>, LHS
- 2556781355U, // <1,0,1,1>: Cost 3 vext1 <1,1,0,1>, <1,1,0,1>
- 1616003174U, // <1,0,1,2>: Cost 2 vext3 <0,u,1,1>, LHS
- 3693052888U, // <1,0,1,3>: Cost 4 vext2 <0,3,1,0>, <1,3,1,3>
- 2556783926U, // <1,0,1,4>: Cost 3 vext1 <1,1,0,1>, RHS
- 2580672143U, // <1,0,1,5>: Cost 3 vext1 <5,1,0,1>, <5,1,0,1>
- 2724839566U, // <1,0,1,6>: Cost 3 vext3 <6,7,0,1>, <0,1,6,7>
- 3654415354U, // <1,0,1,7>: Cost 4 vext1 <5,1,0,1>, <7,0,1,2>
- 1616003228U, // <1,0,1,u>: Cost 2 vext3 <0,u,1,1>, LHS
- 2685690019U, // <1,0,2,0>: Cost 3 vext3 <0,2,0,1>, <0,2,0,1>
- 2685763756U, // <1,0,2,1>: Cost 3 vext3 <0,2,1,1>, <0,2,1,1>
- 2698297524U, // <1,0,2,2>: Cost 3 vext3 <2,3,0,1>, <0,2,2,0>
- 2685911230U, // <1,0,2,3>: Cost 3 vext3 <0,2,3,1>, <0,2,3,1>
- 2689745100U, // <1,0,2,4>: Cost 3 vext3 <0,u,1,1>, <0,2,4,6>
- 3764814038U, // <1,0,2,5>: Cost 4 vext3 <1,1,1,1>, <0,2,5,7>
- 2724839640U, // <1,0,2,6>: Cost 3 vext3 <6,7,0,1>, <0,2,6,0>
- 2592625658U, // <1,0,2,7>: Cost 3 vext1 <7,1,0,2>, <7,0,1,2>
- 2686279915U, // <1,0,2,u>: Cost 3 vext3 <0,2,u,1>, <0,2,u,1>
- 3087843328U, // <1,0,3,0>: Cost 3 vtrnr LHS, <0,0,0,0>
- 3087843338U, // <1,0,3,1>: Cost 3 vtrnr LHS, <0,0,1,1>
- 67944550U, // <1,0,3,2>: Cost 1 vrev LHS
- 2568743135U, // <1,0,3,3>: Cost 3 vext1 <3,1,0,3>, <3,1,0,3>
- 2562772278U, // <1,0,3,4>: Cost 3 vext1 <2,1,0,3>, RHS
- 4099850454U, // <1,0,3,5>: Cost 4 vtrnl <1,0,3,2>, <0,2,5,7>
- 3704998538U, // <1,0,3,6>: Cost 4 vext2 <2,3,1,0>, <3,6,2,7>
- 2592633923U, // <1,0,3,7>: Cost 3 vext1 <7,1,0,3>, <7,1,0,3>
- 68386972U, // <1,0,3,u>: Cost 1 vrev LHS
- 2620640146U, // <1,0,4,0>: Cost 3 vext2 <0,5,1,0>, <4,0,5,1>
- 2689745234U, // <1,0,4,1>: Cost 3 vext3 <0,u,1,1>, <0,4,1,5>
- 2689745244U, // <1,0,4,2>: Cost 3 vext3 <0,u,1,1>, <0,4,2,6>
- 3760980320U, // <1,0,4,3>: Cost 4 vext3 <0,4,3,1>, <0,4,3,1>
- 3761054057U, // <1,0,4,4>: Cost 4 vext3 <0,4,4,1>, <0,4,4,1>
- 2619313462U, // <1,0,4,5>: Cost 3 vext2 <0,3,1,0>, RHS
- 3761201531U, // <1,0,4,6>: Cost 4 vext3 <0,4,6,1>, <0,4,6,1>
- 3666383940U, // <1,0,4,7>: Cost 4 vext1 <7,1,0,4>, <7,1,0,4>
- 2619313705U, // <1,0,4,u>: Cost 3 vext2 <0,3,1,0>, RHS
- 4029300736U, // <1,0,5,0>: Cost 4 vzipr <0,4,1,5>, <0,0,0,0>
- 2895249510U, // <1,0,5,1>: Cost 3 vzipl <1,5,3,7>, LHS
- 3028287590U, // <1,0,5,2>: Cost 3 vtrnl <1,3,5,7>, LHS
- 3642501345U, // <1,0,5,3>: Cost 4 vext1 <3,1,0,5>, <3,1,0,5>
- 2215592058U, // <1,0,5,4>: Cost 3 vrev <0,1,4,5>
- 3724242907U, // <1,0,5,5>: Cost 4 vext2 <5,5,1,0>, <5,5,1,0>
- 3724906540U, // <1,0,5,6>: Cost 4 vext2 <5,6,1,0>, <5,6,1,0>
- 3911118134U, // <1,0,5,7>: Cost 4 vuzpr <3,1,3,0>, RHS
- 3028287644U, // <1,0,5,u>: Cost 3 vtrnl <1,3,5,7>, LHS
- 3762086375U, // <1,0,6,0>: Cost 4 vext3 <0,6,0,1>, <0,6,0,1>
- 2698297846U, // <1,0,6,1>: Cost 3 vext3 <2,3,0,1>, <0,6,1,7>
- 3760022015U, // <1,0,6,2>: Cost 4 vext3 <0,2,u,1>, <0,6,2,7>
- 3642509538U, // <1,0,6,3>: Cost 4 vext1 <3,1,0,6>, <3,1,0,6>
- 3762381323U, // <1,0,6,4>: Cost 4 vext3 <0,6,4,1>, <0,6,4,1>
- 3730215604U, // <1,0,6,5>: Cost 4 vext2 <6,5,1,0>, <6,5,1,0>
- 3730879237U, // <1,0,6,6>: Cost 4 vext2 <6,6,1,0>, <6,6,1,0>
- 2657801046U, // <1,0,6,7>: Cost 3 vext2 <6,7,1,0>, <6,7,1,0>
- 2658464679U, // <1,0,6,u>: Cost 3 vext2 <6,u,1,0>, <6,u,1,0>
- 2659128312U, // <1,0,7,0>: Cost 3 vext2 <7,0,1,0>, <7,0,1,0>
- 4047898278U, // <1,0,7,1>: Cost 4 vzipr <3,5,1,7>, <2,3,0,1>
- 2215460970U, // <1,0,7,2>: Cost 3 vrev <0,1,2,7>
- 3734861035U, // <1,0,7,3>: Cost 4 vext2 <7,3,1,0>, <7,3,1,0>
- 3731543398U, // <1,0,7,4>: Cost 4 vext2 <6,7,1,0>, <7,4,5,6>
- 3736188301U, // <1,0,7,5>: Cost 4 vext2 <7,5,1,0>, <7,5,1,0>
- 2663110110U, // <1,0,7,6>: Cost 3 vext2 <7,6,1,0>, <7,6,1,0>
- 3731543660U, // <1,0,7,7>: Cost 4 vext2 <6,7,1,0>, <7,7,7,7>
- 2664437376U, // <1,0,7,u>: Cost 3 vext2 <7,u,1,0>, <7,u,1,0>
- 3087884288U, // <1,0,u,0>: Cost 3 vtrnr LHS, <0,0,0,0>
- 1616003730U, // <1,0,u,1>: Cost 2 vext3 <0,u,1,1>, <0,u,1,1>
- 67985515U, // <1,0,u,2>: Cost 1 vrev LHS
- 2689893028U, // <1,0,u,3>: Cost 3 vext3 <0,u,3,1>, <0,u,3,1>
- 2689745586U, // <1,0,u,4>: Cost 3 vext3 <0,u,1,1>, <0,u,4,6>
- 2619316378U, // <1,0,u,5>: Cost 3 vext2 <0,3,1,0>, RHS
- 2669082807U, // <1,0,u,6>: Cost 3 vext2 <u,6,1,0>, <u,6,1,0>
- 2592674888U, // <1,0,u,7>: Cost 3 vext1 <7,1,0,u>, <7,1,0,u>
- 68427937U, // <1,0,u,u>: Cost 1 vrev LHS
- 1543585802U, // <1,1,0,0>: Cost 2 vext2 <0,0,1,1>, <0,0,1,1>
- 1548894310U, // <1,1,0,1>: Cost 2 vext2 <0,u,1,1>, LHS
- 2618654892U, // <1,1,0,2>: Cost 3 vext2 <0,2,1,1>, <0,2,1,1>
- 2689745654U, // <1,1,0,3>: Cost 3 vext3 <0,u,1,1>, <1,0,3,2>
- 2622636370U, // <1,1,0,4>: Cost 3 vext2 <0,u,1,1>, <0,4,1,5>
- 2620645791U, // <1,1,0,5>: Cost 3 vext2 <0,5,1,1>, <0,5,1,1>
- 3696378367U, // <1,1,0,6>: Cost 4 vext2 <0,u,1,1>, <0,6,2,7>
- 3666424905U, // <1,1,0,7>: Cost 4 vext1 <7,1,1,0>, <7,1,1,0>
- 1548894866U, // <1,1,0,u>: Cost 2 vext2 <0,u,1,1>, <0,u,1,1>
- 1483112550U, // <1,1,1,0>: Cost 2 vext1 <1,1,1,1>, LHS
- 202162278U, // <1,1,1,1>: Cost 1 vdup1 LHS
- 2622636950U, // <1,1,1,2>: Cost 3 vext2 <0,u,1,1>, <1,2,3,0>
- 2622637016U, // <1,1,1,3>: Cost 3 vext2 <0,u,1,1>, <1,3,1,3>
- 1483115830U, // <1,1,1,4>: Cost 2 vext1 <1,1,1,1>, RHS
- 2622637200U, // <1,1,1,5>: Cost 3 vext2 <0,u,1,1>, <1,5,3,7>
- 2622637263U, // <1,1,1,6>: Cost 3 vext2 <0,u,1,1>, <1,6,1,7>
- 2592691274U, // <1,1,1,7>: Cost 3 vext1 <7,1,1,1>, <7,1,1,1>
- 202162278U, // <1,1,1,u>: Cost 1 vdup1 LHS
- 2550890588U, // <1,1,2,0>: Cost 3 vext1 <0,1,1,2>, <0,1,1,2>
- 2617329183U, // <1,1,2,1>: Cost 3 vext2 <0,0,1,1>, <2,1,3,1>
- 2622637672U, // <1,1,2,2>: Cost 3 vext2 <0,u,1,1>, <2,2,2,2>
- 2622637734U, // <1,1,2,3>: Cost 3 vext2 <0,u,1,1>, <2,3,0,1>
- 2550893878U, // <1,1,2,4>: Cost 3 vext1 <0,1,1,2>, RHS
- 3696379744U, // <1,1,2,5>: Cost 4 vext2 <0,u,1,1>, <2,5,2,7>
- 2622638010U, // <1,1,2,6>: Cost 3 vext2 <0,u,1,1>, <2,6,3,7>
- 3804554170U, // <1,1,2,7>: Cost 4 vext3 <7,7,0,1>, <1,2,7,0>
- 2622638139U, // <1,1,2,u>: Cost 3 vext2 <0,u,1,1>, <2,u,0,1>
- 2622638230U, // <1,1,3,0>: Cost 3 vext2 <0,u,1,1>, <3,0,1,2>
- 3087844148U, // <1,1,3,1>: Cost 3 vtrnr LHS, <1,1,1,1>
- 4161585244U, // <1,1,3,2>: Cost 4 vtrnr LHS, <0,1,1,2>
- 2014101606U, // <1,1,3,3>: Cost 2 vtrnr LHS, LHS
- 2622638594U, // <1,1,3,4>: Cost 3 vext2 <0,u,1,1>, <3,4,5,6>
- 2689745920U, // <1,1,3,5>: Cost 3 vext3 <0,u,1,1>, <1,3,5,7>
- 3763487753U, // <1,1,3,6>: Cost 4 vext3 <0,u,1,1>, <1,3,6,7>
- 2592707660U, // <1,1,3,7>: Cost 3 vext1 <7,1,1,3>, <7,1,1,3>
- 2014101611U, // <1,1,3,u>: Cost 2 vtrnr LHS, LHS
- 2556878950U, // <1,1,4,0>: Cost 3 vext1 <1,1,1,4>, LHS
- 2221335351U, // <1,1,4,1>: Cost 3 vrev <1,1,1,4>
- 3696380988U, // <1,1,4,2>: Cost 4 vext2 <0,u,1,1>, <4,2,6,0>
- 3763487805U, // <1,1,4,3>: Cost 4 vext3 <0,u,1,1>, <1,4,3,5>
- 2556882230U, // <1,1,4,4>: Cost 3 vext1 <1,1,1,4>, RHS
- 1548897590U, // <1,1,4,5>: Cost 2 vext2 <0,u,1,1>, RHS
- 2758184246U, // <1,1,4,6>: Cost 3 vuzpl <1,1,1,1>, RHS
- 3666457677U, // <1,1,4,7>: Cost 4 vext1 <7,1,1,4>, <7,1,1,4>
- 1548897833U, // <1,1,4,u>: Cost 2 vext2 <0,u,1,1>, RHS
- 2693653615U, // <1,1,5,0>: Cost 3 vext3 <1,5,0,1>, <1,5,0,1>
- 2617331408U, // <1,1,5,1>: Cost 3 vext2 <0,0,1,1>, <5,1,7,3>
- 4029302934U, // <1,1,5,2>: Cost 4 vzipr <0,4,1,5>, <3,0,1,2>
- 2689746064U, // <1,1,5,3>: Cost 3 vext3 <0,u,1,1>, <1,5,3,7>
- 2221564755U, // <1,1,5,4>: Cost 3 vrev <1,1,4,5>
- 2955559250U, // <1,1,5,5>: Cost 3 vzipr <0,4,1,5>, <0,4,1,5>
- 2617331810U, // <1,1,5,6>: Cost 3 vext2 <0,0,1,1>, <5,6,7,0>
- 2825293110U, // <1,1,5,7>: Cost 3 vuzpr <1,1,1,1>, RHS
- 2689746109U, // <1,1,5,u>: Cost 3 vext3 <0,u,1,1>, <1,5,u,7>
- 3696382241U, // <1,1,6,0>: Cost 4 vext2 <0,u,1,1>, <6,0,1,2>
- 2689746127U, // <1,1,6,1>: Cost 3 vext3 <0,u,1,1>, <1,6,1,7>
- 2617332218U, // <1,1,6,2>: Cost 3 vext2 <0,0,1,1>, <6,2,7,3>
- 3763487969U, // <1,1,6,3>: Cost 4 vext3 <0,u,1,1>, <1,6,3,7>
- 3696382605U, // <1,1,6,4>: Cost 4 vext2 <0,u,1,1>, <6,4,5,6>
- 4029309266U, // <1,1,6,5>: Cost 4 vzipr <0,4,1,6>, <0,4,1,5>
- 2617332536U, // <1,1,6,6>: Cost 3 vext2 <0,0,1,1>, <6,6,6,6>
- 2724840702U, // <1,1,6,7>: Cost 3 vext3 <6,7,0,1>, <1,6,7,0>
- 2725504263U, // <1,1,6,u>: Cost 3 vext3 <6,u,0,1>, <1,6,u,0>
- 2617332720U, // <1,1,7,0>: Cost 3 vext2 <0,0,1,1>, <7,0,0,1>
- 2659800138U, // <1,1,7,1>: Cost 3 vext2 <7,1,1,1>, <7,1,1,1>
- 3691074717U, // <1,1,7,2>: Cost 4 vext2 <0,0,1,1>, <7,2,1,3>
- 4167811174U, // <1,1,7,3>: Cost 4 vtrnr <1,1,5,7>, LHS
- 2617333094U, // <1,1,7,4>: Cost 3 vext2 <0,0,1,1>, <7,4,5,6>
- 3295396702U, // <1,1,7,5>: Cost 4 vrev <1,1,5,7>
- 3803891014U, // <1,1,7,6>: Cost 4 vext3 <7,6,0,1>, <1,7,6,0>
- 2617333356U, // <1,1,7,7>: Cost 3 vext2 <0,0,1,1>, <7,7,7,7>
- 2659800138U, // <1,1,7,u>: Cost 3 vext2 <7,1,1,1>, <7,1,1,1>
- 1483112550U, // <1,1,u,0>: Cost 2 vext1 <1,1,1,1>, LHS
- 202162278U, // <1,1,u,1>: Cost 1 vdup1 LHS
- 2622642056U, // <1,1,u,2>: Cost 3 vext2 <0,u,1,1>, <u,2,3,3>
- 2014142566U, // <1,1,u,3>: Cost 2 vtrnr LHS, LHS
- 1483115830U, // <1,1,u,4>: Cost 2 vext1 <1,1,1,1>, RHS
- 1548900506U, // <1,1,u,5>: Cost 2 vext2 <0,u,1,1>, RHS
- 2622642384U, // <1,1,u,6>: Cost 3 vext2 <0,u,1,1>, <u,6,3,7>
- 2825293353U, // <1,1,u,7>: Cost 3 vuzpr <1,1,1,1>, RHS
- 202162278U, // <1,1,u,u>: Cost 1 vdup1 LHS
- 2635251712U, // <1,2,0,0>: Cost 3 vext2 <3,0,1,2>, <0,0,0,0>
- 1561509990U, // <1,2,0,1>: Cost 2 vext2 <3,0,1,2>, LHS
- 2618663085U, // <1,2,0,2>: Cost 3 vext2 <0,2,1,2>, <0,2,1,2>
- 2696529358U, // <1,2,0,3>: Cost 3 vext3 <2,0,3,1>, <2,0,3,1>
- 2635252050U, // <1,2,0,4>: Cost 3 vext2 <3,0,1,2>, <0,4,1,5>
- 3769533926U, // <1,2,0,5>: Cost 4 vext3 <1,u,2,1>, <2,0,5,7>
- 2621317617U, // <1,2,0,6>: Cost 3 vext2 <0,6,1,2>, <0,6,1,2>
- 2659140170U, // <1,2,0,7>: Cost 3 vext2 <7,0,1,2>, <0,7,2,1>
- 1561510557U, // <1,2,0,u>: Cost 2 vext2 <3,0,1,2>, LHS
- 2623308516U, // <1,2,1,0>: Cost 3 vext2 <1,0,1,2>, <1,0,1,2>
- 2635252532U, // <1,2,1,1>: Cost 3 vext2 <3,0,1,2>, <1,1,1,1>
- 2631271318U, // <1,2,1,2>: Cost 3 vext2 <2,3,1,2>, <1,2,3,0>
- 2958180454U, // <1,2,1,3>: Cost 3 vzipr <0,u,1,1>, LHS
- 2550959414U, // <1,2,1,4>: Cost 3 vext1 <0,1,2,1>, RHS
- 2635252880U, // <1,2,1,5>: Cost 3 vext2 <3,0,1,2>, <1,5,3,7>
- 2635252952U, // <1,2,1,6>: Cost 3 vext2 <3,0,1,2>, <1,6,2,7>
- 3732882731U, // <1,2,1,7>: Cost 4 vext2 <7,0,1,2>, <1,7,3,0>
- 2958180459U, // <1,2,1,u>: Cost 3 vzipr <0,u,1,1>, LHS
- 2629281213U, // <1,2,2,0>: Cost 3 vext2 <2,0,1,2>, <2,0,1,2>
- 2635253280U, // <1,2,2,1>: Cost 3 vext2 <3,0,1,2>, <2,1,3,2>
- 2618664552U, // <1,2,2,2>: Cost 3 vext2 <0,2,1,2>, <2,2,2,2>
- 2689746546U, // <1,2,2,3>: Cost 3 vext3 <0,u,1,1>, <2,2,3,3>
- 3764815485U, // <1,2,2,4>: Cost 4 vext3 <1,1,1,1>, <2,2,4,5>
- 3760023176U, // <1,2,2,5>: Cost 4 vext3 <0,2,u,1>, <2,2,5,7>
- 2635253690U, // <1,2,2,6>: Cost 3 vext2 <3,0,1,2>, <2,6,3,7>
- 2659141610U, // <1,2,2,7>: Cost 3 vext2 <7,0,1,2>, <2,7,0,1>
- 2689746591U, // <1,2,2,u>: Cost 3 vext3 <0,u,1,1>, <2,2,u,3>
- 403488870U, // <1,2,3,0>: Cost 1 vext1 LHS, LHS
- 1477231350U, // <1,2,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 1477232232U, // <1,2,3,2>: Cost 2 vext1 LHS, <2,2,2,2>
- 1477233052U, // <1,2,3,3>: Cost 2 vext1 LHS, <3,3,3,3>
- 403492150U, // <1,2,3,4>: Cost 1 vext1 LHS, RHS
- 1525010128U, // <1,2,3,5>: Cost 2 vext1 LHS, <5,1,7,3>
- 1525010938U, // <1,2,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 1525011450U, // <1,2,3,7>: Cost 2 vext1 LHS, <7,0,1,2>
- 403494702U, // <1,2,3,u>: Cost 1 vext1 LHS, LHS
- 2641226607U, // <1,2,4,0>: Cost 3 vext2 <4,0,1,2>, <4,0,1,2>
- 3624723446U, // <1,2,4,1>: Cost 4 vext1 <0,1,2,4>, <1,3,4,6>
- 3301123609U, // <1,2,4,2>: Cost 4 vrev <2,1,2,4>
- 2598759198U, // <1,2,4,3>: Cost 3 vext1 <u,1,2,4>, <3,u,1,2>
- 2659142864U, // <1,2,4,4>: Cost 3 vext2 <7,0,1,2>, <4,4,4,4>
- 1561513270U, // <1,2,4,5>: Cost 2 vext2 <3,0,1,2>, RHS
- 2659143028U, // <1,2,4,6>: Cost 3 vext2 <7,0,1,2>, <4,6,4,6>
- 2659143112U, // <1,2,4,7>: Cost 3 vext2 <7,0,1,2>, <4,7,5,0>
- 1561513513U, // <1,2,4,u>: Cost 2 vext2 <3,0,1,2>, RHS
- 2550988902U, // <1,2,5,0>: Cost 3 vext1 <0,1,2,5>, LHS
- 2550989824U, // <1,2,5,1>: Cost 3 vext1 <0,1,2,5>, <1,3,5,7>
- 3624732264U, // <1,2,5,2>: Cost 4 vext1 <0,1,2,5>, <2,2,2,2>
- 2955559014U, // <1,2,5,3>: Cost 3 vzipr <0,4,1,5>, LHS
- 2550992182U, // <1,2,5,4>: Cost 3 vext1 <0,1,2,5>, RHS
- 2659143684U, // <1,2,5,5>: Cost 3 vext2 <7,0,1,2>, <5,5,5,5>
- 2659143778U, // <1,2,5,6>: Cost 3 vext2 <7,0,1,2>, <5,6,7,0>
- 2659143848U, // <1,2,5,7>: Cost 3 vext2 <7,0,1,2>, <5,7,5,7>
- 2550994734U, // <1,2,5,u>: Cost 3 vext1 <0,1,2,5>, LHS
- 2700289945U, // <1,2,6,0>: Cost 3 vext3 <2,6,0,1>, <2,6,0,1>
- 2635256232U, // <1,2,6,1>: Cost 3 vext2 <3,0,1,2>, <6,1,7,2>
- 2659144186U, // <1,2,6,2>: Cost 3 vext2 <7,0,1,2>, <6,2,7,3>
- 2689746874U, // <1,2,6,3>: Cost 3 vext3 <0,u,1,1>, <2,6,3,7>
- 3763488705U, // <1,2,6,4>: Cost 4 vext3 <0,u,1,1>, <2,6,4,5>
- 3763488716U, // <1,2,6,5>: Cost 4 vext3 <0,u,1,1>, <2,6,5,7>
- 2659144504U, // <1,2,6,6>: Cost 3 vext2 <7,0,1,2>, <6,6,6,6>
- 2657817432U, // <1,2,6,7>: Cost 3 vext2 <6,7,1,2>, <6,7,1,2>
- 2689746919U, // <1,2,6,u>: Cost 3 vext3 <0,u,1,1>, <2,6,u,7>
- 1585402874U, // <1,2,7,0>: Cost 2 vext2 <7,0,1,2>, <7,0,1,2>
- 2659144770U, // <1,2,7,1>: Cost 3 vext2 <7,0,1,2>, <7,1,0,2>
- 3708998858U, // <1,2,7,2>: Cost 4 vext2 <3,0,1,2>, <7,2,6,3>
- 2635257059U, // <1,2,7,3>: Cost 3 vext2 <3,0,1,2>, <7,3,0,1>
- 2659145062U, // <1,2,7,4>: Cost 3 vext2 <7,0,1,2>, <7,4,5,6>
- 3732886916U, // <1,2,7,5>: Cost 4 vext2 <7,0,1,2>, <7,5,0,0>
- 3732886998U, // <1,2,7,6>: Cost 4 vext2 <7,0,1,2>, <7,6,0,1>
- 2659145255U, // <1,2,7,7>: Cost 3 vext2 <7,0,1,2>, <7,7,0,1>
- 1590711938U, // <1,2,7,u>: Cost 2 vext2 <7,u,1,2>, <7,u,1,2>
- 403529835U, // <1,2,u,0>: Cost 1 vext1 LHS, LHS
- 1477272310U, // <1,2,u,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 1477273192U, // <1,2,u,2>: Cost 2 vext1 LHS, <2,2,2,2>
- 1477273750U, // <1,2,u,3>: Cost 2 vext1 LHS, <3,0,1,2>
- 403533110U, // <1,2,u,4>: Cost 1 vext1 LHS, RHS
- 1561516186U, // <1,2,u,5>: Cost 2 vext2 <3,0,1,2>, RHS
- 1525051898U, // <1,2,u,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 1525052410U, // <1,2,u,7>: Cost 2 vext1 LHS, <7,0,1,2>
- 403535662U, // <1,2,u,u>: Cost 1 vext1 LHS, LHS
- 2819407872U, // <1,3,0,0>: Cost 3 vuzpr LHS, <0,0,0,0>
- 1551564902U, // <1,3,0,1>: Cost 2 vext2 <1,3,1,3>, LHS
- 2819408630U, // <1,3,0,2>: Cost 3 vuzpr LHS, <1,0,3,2>
- 2619334911U, // <1,3,0,3>: Cost 3 vext2 <0,3,1,3>, <0,3,1,3>
- 2625306962U, // <1,3,0,4>: Cost 3 vext2 <1,3,1,3>, <0,4,1,5>
- 3832725879U, // <1,3,0,5>: Cost 4 vuzpl <1,2,3,0>, <0,4,5,6>
- 3699048959U, // <1,3,0,6>: Cost 4 vext2 <1,3,1,3>, <0,6,2,7>
- 3776538827U, // <1,3,0,7>: Cost 4 vext3 <3,0,7,1>, <3,0,7,1>
- 1551565469U, // <1,3,0,u>: Cost 2 vext2 <1,3,1,3>, LHS
- 2618671862U, // <1,3,1,0>: Cost 3 vext2 <0,2,1,3>, <1,0,3,2>
- 2819408692U, // <1,3,1,1>: Cost 3 vuzpr LHS, <1,1,1,1>
- 2624643975U, // <1,3,1,2>: Cost 3 vext2 <1,2,1,3>, <1,2,1,3>
- 1745666150U, // <1,3,1,3>: Cost 2 vuzpr LHS, LHS
- 2557005110U, // <1,3,1,4>: Cost 3 vext1 <1,1,3,1>, RHS
- 2625307792U, // <1,3,1,5>: Cost 3 vext2 <1,3,1,3>, <1,5,3,7>
- 3698386127U, // <1,3,1,6>: Cost 4 vext2 <1,2,1,3>, <1,6,1,7>
- 2592838748U, // <1,3,1,7>: Cost 3 vext1 <7,1,3,1>, <7,1,3,1>
- 1745666155U, // <1,3,1,u>: Cost 2 vuzpr LHS, LHS
- 2819408790U, // <1,3,2,0>: Cost 3 vuzpr LHS, <1,2,3,0>
- 2625308193U, // <1,3,2,1>: Cost 3 vext2 <1,3,1,3>, <2,1,3,3>
- 2819408036U, // <1,3,2,2>: Cost 3 vuzpr LHS, <0,2,0,2>
- 2819851890U, // <1,3,2,3>: Cost 3 vuzpr LHS, <2,2,3,3>
- 2819408794U, // <1,3,2,4>: Cost 3 vuzpr LHS, <1,2,3,4>
- 3893149890U, // <1,3,2,5>: Cost 4 vuzpr LHS, <0,2,3,5>
- 2819408076U, // <1,3,2,6>: Cost 3 vuzpr LHS, <0,2,4,6>
- 3772041583U, // <1,3,2,7>: Cost 4 vext3 <2,3,0,1>, <3,2,7,3>
- 2819408042U, // <1,3,2,u>: Cost 3 vuzpr LHS, <0,2,0,u>
- 1483276390U, // <1,3,3,0>: Cost 2 vext1 <1,1,3,3>, LHS
- 1483277128U, // <1,3,3,1>: Cost 2 vext1 <1,1,3,3>, <1,1,3,3>
- 2557019752U, // <1,3,3,2>: Cost 3 vext1 <1,1,3,3>, <2,2,2,2>
- 2819408856U, // <1,3,3,3>: Cost 3 vuzpr LHS, <1,3,1,3>
- 1483279670U, // <1,3,3,4>: Cost 2 vext1 <1,1,3,3>, RHS
- 2819409614U, // <1,3,3,5>: Cost 3 vuzpr LHS, <2,3,4,5>
- 2598826490U, // <1,3,3,6>: Cost 3 vext1 <u,1,3,3>, <6,2,7,3>
- 3087844352U, // <1,3,3,7>: Cost 3 vtrnr LHS, <1,3,5,7>
- 1483282222U, // <1,3,3,u>: Cost 2 vext1 <1,1,3,3>, LHS
- 2568970342U, // <1,3,4,0>: Cost 3 vext1 <3,1,3,4>, LHS
- 2568971224U, // <1,3,4,1>: Cost 3 vext1 <3,1,3,4>, <1,3,1,3>
- 3832761290U, // <1,3,4,2>: Cost 4 vuzpl <1,2,3,4>, <4,1,2,3>
- 2233428219U, // <1,3,4,3>: Cost 3 vrev <3,1,3,4>
- 2568973622U, // <1,3,4,4>: Cost 3 vext1 <3,1,3,4>, RHS
- 1551568182U, // <1,3,4,5>: Cost 2 vext2 <1,3,1,3>, RHS
- 2819410434U, // <1,3,4,6>: Cost 3 vuzpr LHS, <3,4,5,6>
- 3666605151U, // <1,3,4,7>: Cost 4 vext1 <7,1,3,4>, <7,1,3,4>
- 1551568425U, // <1,3,4,u>: Cost 2 vext2 <1,3,1,3>, RHS
- 2563006566U, // <1,3,5,0>: Cost 3 vext1 <2,1,3,5>, LHS
- 2568979456U, // <1,3,5,1>: Cost 3 vext1 <3,1,3,5>, <1,3,5,7>
- 2563008035U, // <1,3,5,2>: Cost 3 vext1 <2,1,3,5>, <2,1,3,5>
- 2233436412U, // <1,3,5,3>: Cost 3 vrev <3,1,3,5>
- 2563009846U, // <1,3,5,4>: Cost 3 vext1 <2,1,3,5>, RHS
- 2867187716U, // <1,3,5,5>: Cost 3 vuzpr LHS, <5,5,5,5>
- 2655834214U, // <1,3,5,6>: Cost 3 vext2 <6,4,1,3>, <5,6,7,4>
- 1745669430U, // <1,3,5,7>: Cost 2 vuzpr LHS, RHS
- 1745669431U, // <1,3,5,u>: Cost 2 vuzpr LHS, RHS
- 2867187810U, // <1,3,6,0>: Cost 3 vuzpr LHS, <5,6,7,0>
- 3699052931U, // <1,3,6,1>: Cost 4 vext2 <1,3,1,3>, <6,1,3,1>
- 2654507460U, // <1,3,6,2>: Cost 3 vext2 <6,2,1,3>, <6,2,1,3>
- 3766291091U, // <1,3,6,3>: Cost 4 vext3 <1,3,3,1>, <3,6,3,7>
- 2655834726U, // <1,3,6,4>: Cost 3 vext2 <6,4,1,3>, <6,4,1,3>
- 3923384562U, // <1,3,6,5>: Cost 4 vuzpr <5,1,7,3>, <u,6,7,5>
- 2657161992U, // <1,3,6,6>: Cost 3 vext2 <6,6,1,3>, <6,6,1,3>
- 2819852218U, // <1,3,6,7>: Cost 3 vuzpr LHS, <2,6,3,7>
- 2819852219U, // <1,3,6,u>: Cost 3 vuzpr LHS, <2,6,3,u>
- 2706926275U, // <1,3,7,0>: Cost 3 vext3 <3,7,0,1>, <3,7,0,1>
- 2659816524U, // <1,3,7,1>: Cost 3 vext2 <7,1,1,3>, <7,1,1,3>
- 3636766245U, // <1,3,7,2>: Cost 4 vext1 <2,1,3,7>, <2,1,3,7>
- 2867187903U, // <1,3,7,3>: Cost 3 vuzpr LHS, <5,7,u,3>
- 2625312102U, // <1,3,7,4>: Cost 3 vext2 <1,3,1,3>, <7,4,5,6>
- 2867188598U, // <1,3,7,5>: Cost 3 vuzpr LHS, <6,7,4,5>
- 3728250344U, // <1,3,7,6>: Cost 4 vext2 <6,2,1,3>, <7,6,2,1>
- 2867187880U, // <1,3,7,7>: Cost 3 vuzpr LHS, <5,7,5,7>
- 2707516171U, // <1,3,7,u>: Cost 3 vext3 <3,7,u,1>, <3,7,u,1>
- 1483317350U, // <1,3,u,0>: Cost 2 vext1 <1,1,3,u>, LHS
- 1483318093U, // <1,3,u,1>: Cost 2 vext1 <1,1,3,u>, <1,1,3,u>
- 2819410718U, // <1,3,u,2>: Cost 3 vuzpr LHS, <3,u,1,2>
- 1745666717U, // <1,3,u,3>: Cost 2 vuzpr LHS, LHS
- 1483320630U, // <1,3,u,4>: Cost 2 vext1 <1,1,3,u>, RHS
- 1551571098U, // <1,3,u,5>: Cost 2 vext2 <1,3,1,3>, RHS
- 2819410758U, // <1,3,u,6>: Cost 3 vuzpr LHS, <3,u,5,6>
- 1745669673U, // <1,3,u,7>: Cost 2 vuzpr LHS, RHS
- 1745666722U, // <1,3,u,u>: Cost 2 vuzpr LHS, LHS
- 2617352205U, // <1,4,0,0>: Cost 3 vext2 <0,0,1,4>, <0,0,1,4>
- 2619342950U, // <1,4,0,1>: Cost 3 vext2 <0,3,1,4>, LHS
- 3692421295U, // <1,4,0,2>: Cost 4 vext2 <0,2,1,4>, <0,2,1,4>
- 2619343104U, // <1,4,0,3>: Cost 3 vext2 <0,3,1,4>, <0,3,1,4>
- 2617352530U, // <1,4,0,4>: Cost 3 vext2 <0,0,1,4>, <0,4,1,5>
- 1634880402U, // <1,4,0,5>: Cost 2 vext3 <4,0,5,1>, <4,0,5,1>
- 2713930652U, // <1,4,0,6>: Cost 3 vext3 <4,u,5,1>, <4,0,6,2>
- 3732898396U, // <1,4,0,7>: Cost 4 vext2 <7,0,1,4>, <0,7,4,1>
- 1635101613U, // <1,4,0,u>: Cost 2 vext3 <4,0,u,1>, <4,0,u,1>
- 3693085430U, // <1,4,1,0>: Cost 4 vext2 <0,3,1,4>, <1,0,3,2>
- 2623988535U, // <1,4,1,1>: Cost 3 vext2 <1,1,1,4>, <1,1,1,4>
- 3693085590U, // <1,4,1,2>: Cost 4 vext2 <0,3,1,4>, <1,2,3,0>
- 3692422134U, // <1,4,1,3>: Cost 4 vext2 <0,2,1,4>, <1,3,4,6>
- 3693085726U, // <1,4,1,4>: Cost 4 vext2 <0,3,1,4>, <1,4,0,1>
- 2892401974U, // <1,4,1,5>: Cost 3 vzipl <1,1,1,1>, RHS
- 3026619702U, // <1,4,1,6>: Cost 3 vtrnl <1,1,1,1>, RHS
- 3800206324U, // <1,4,1,7>: Cost 4 vext3 <7,0,4,1>, <4,1,7,0>
- 2892402217U, // <1,4,1,u>: Cost 3 vzipl <1,1,1,1>, RHS
- 3966978927U, // <1,4,2,0>: Cost 4 vzipl <1,2,3,4>, <4,0,1,2>
- 3966979018U, // <1,4,2,1>: Cost 4 vzipl <1,2,3,4>, <4,1,2,3>
- 3693086312U, // <1,4,2,2>: Cost 4 vext2 <0,3,1,4>, <2,2,2,2>
- 2635269798U, // <1,4,2,3>: Cost 3 vext2 <3,0,1,4>, <2,3,0,1>
- 3966979280U, // <1,4,2,4>: Cost 4 vzipl <1,2,3,4>, <4,4,4,4>
- 2893204790U, // <1,4,2,5>: Cost 3 vzipl <1,2,3,0>, RHS
- 3693086650U, // <1,4,2,6>: Cost 4 vext2 <0,3,1,4>, <2,6,3,7>
- 3666662502U, // <1,4,2,7>: Cost 4 vext1 <7,1,4,2>, <7,1,4,2>
- 2893205033U, // <1,4,2,u>: Cost 3 vzipl <1,2,3,0>, RHS
- 2563063910U, // <1,4,3,0>: Cost 3 vext1 <2,1,4,3>, LHS
- 2563064730U, // <1,4,3,1>: Cost 3 vext1 <2,1,4,3>, <1,2,3,4>
- 2563065386U, // <1,4,3,2>: Cost 3 vext1 <2,1,4,3>, <2,1,4,3>
- 3693087132U, // <1,4,3,3>: Cost 4 vext2 <0,3,1,4>, <3,3,3,3>
- 2619345410U, // <1,4,3,4>: Cost 3 vext2 <0,3,1,4>, <3,4,5,6>
- 3087843666U, // <1,4,3,5>: Cost 3 vtrnr LHS, <0,4,1,5>
- 3087843676U, // <1,4,3,6>: Cost 3 vtrnr LHS, <0,4,2,6>
- 3666670695U, // <1,4,3,7>: Cost 4 vext1 <7,1,4,3>, <7,1,4,3>
- 3087843669U, // <1,4,3,u>: Cost 3 vtrnr LHS, <0,4,1,u>
- 2620672914U, // <1,4,4,0>: Cost 3 vext2 <0,5,1,4>, <4,0,5,1>
- 3630842706U, // <1,4,4,1>: Cost 4 vext1 <1,1,4,4>, <1,1,4,4>
- 3313069003U, // <1,4,4,2>: Cost 4 vrev <4,1,2,4>
- 3642788100U, // <1,4,4,3>: Cost 4 vext1 <3,1,4,4>, <3,1,4,4>
- 2713930960U, // <1,4,4,4>: Cost 3 vext3 <4,u,5,1>, <4,4,4,4>
- 2619346230U, // <1,4,4,5>: Cost 3 vext2 <0,3,1,4>, RHS
- 2713930980U, // <1,4,4,6>: Cost 3 vext3 <4,u,5,1>, <4,4,6,6>
- 3736882642U, // <1,4,4,7>: Cost 4 vext2 <7,6,1,4>, <4,7,6,1>
- 2619346473U, // <1,4,4,u>: Cost 3 vext2 <0,3,1,4>, RHS
- 2557108326U, // <1,4,5,0>: Cost 3 vext1 <1,1,4,5>, LHS
- 2557109075U, // <1,4,5,1>: Cost 3 vext1 <1,1,4,5>, <1,1,4,5>
- 2598913774U, // <1,4,5,2>: Cost 3 vext1 <u,1,4,5>, <2,3,u,1>
- 3630852246U, // <1,4,5,3>: Cost 4 vext1 <1,1,4,5>, <3,0,1,2>
- 2557111606U, // <1,4,5,4>: Cost 3 vext1 <1,1,4,5>, RHS
- 2895252790U, // <1,4,5,5>: Cost 3 vzipl <1,5,3,7>, RHS
- 1616006454U, // <1,4,5,6>: Cost 2 vext3 <0,u,1,1>, RHS
- 3899059510U, // <1,4,5,7>: Cost 4 vuzpr <1,1,1,4>, RHS
- 1616006472U, // <1,4,5,u>: Cost 2 vext3 <0,u,1,1>, RHS
- 2557116518U, // <1,4,6,0>: Cost 3 vext1 <1,1,4,6>, LHS
- 2557117236U, // <1,4,6,1>: Cost 3 vext1 <1,1,4,6>, <1,1,1,1>
- 3630859880U, // <1,4,6,2>: Cost 4 vext1 <1,1,4,6>, <2,2,2,2>
- 2569062550U, // <1,4,6,3>: Cost 3 vext1 <3,1,4,6>, <3,0,1,2>
- 2557119798U, // <1,4,6,4>: Cost 3 vext1 <1,1,4,6>, RHS
- 3763490174U, // <1,4,6,5>: Cost 4 vext3 <0,u,1,1>, <4,6,5,7>
- 3763490183U, // <1,4,6,6>: Cost 4 vext3 <0,u,1,1>, <4,6,6,7>
- 2712751498U, // <1,4,6,7>: Cost 3 vext3 <4,6,7,1>, <4,6,7,1>
- 2557122350U, // <1,4,6,u>: Cost 3 vext1 <1,1,4,6>, LHS
- 2659161084U, // <1,4,7,0>: Cost 3 vext2 <7,0,1,4>, <7,0,1,4>
- 3732903040U, // <1,4,7,1>: Cost 4 vext2 <7,0,1,4>, <7,1,7,1>
- 3734230174U, // <1,4,7,2>: Cost 4 vext2 <7,2,1,4>, <7,2,1,4>
- 3734893807U, // <1,4,7,3>: Cost 4 vext2 <7,3,1,4>, <7,3,1,4>
- 3660729654U, // <1,4,7,4>: Cost 4 vext1 <6,1,4,7>, RHS
- 3786493384U, // <1,4,7,5>: Cost 4 vext3 <4,6,7,1>, <4,7,5,0>
- 2713341394U, // <1,4,7,6>: Cost 3 vext3 <4,7,6,1>, <4,7,6,1>
- 3660731386U, // <1,4,7,7>: Cost 4 vext1 <6,1,4,7>, <7,0,1,2>
- 2664470148U, // <1,4,7,u>: Cost 3 vext2 <7,u,1,4>, <7,u,1,4>
- 2557132902U, // <1,4,u,0>: Cost 3 vext1 <1,1,4,u>, LHS
- 2619348782U, // <1,4,u,1>: Cost 3 vext2 <0,3,1,4>, LHS
- 2563106351U, // <1,4,u,2>: Cost 3 vext1 <2,1,4,u>, <2,1,4,u>
- 2713783816U, // <1,4,u,3>: Cost 3 vext3 <4,u,3,1>, <4,u,3,1>
- 2622666815U, // <1,4,u,4>: Cost 3 vext2 <0,u,1,4>, <u,4,5,6>
- 1640189466U, // <1,4,u,5>: Cost 2 vext3 <4,u,5,1>, <4,u,5,1>
- 1616006697U, // <1,4,u,6>: Cost 2 vext3 <0,u,1,1>, RHS
- 2712751498U, // <1,4,u,7>: Cost 3 vext3 <4,6,7,1>, <4,6,7,1>
- 1616006715U, // <1,4,u,u>: Cost 2 vext3 <0,u,1,1>, RHS
- 2620014592U, // <1,5,0,0>: Cost 3 vext2 <0,4,1,5>, <0,0,0,0>
- 1546272870U, // <1,5,0,1>: Cost 2 vext2 <0,4,1,5>, LHS
- 2618687664U, // <1,5,0,2>: Cost 3 vext2 <0,2,1,5>, <0,2,1,5>
- 3693093120U, // <1,5,0,3>: Cost 4 vext2 <0,3,1,5>, <0,3,1,4>
- 1546273106U, // <1,5,0,4>: Cost 2 vext2 <0,4,1,5>, <0,4,1,5>
- 2620678563U, // <1,5,0,5>: Cost 3 vext2 <0,5,1,5>, <0,5,1,5>
- 2714668660U, // <1,5,0,6>: Cost 3 vext3 <5,0,6,1>, <5,0,6,1>
- 3772042877U, // <1,5,0,7>: Cost 4 vext3 <2,3,0,1>, <5,0,7,1>
- 1546273437U, // <1,5,0,u>: Cost 2 vext2 <0,4,1,5>, LHS
- 2620015350U, // <1,5,1,0>: Cost 3 vext2 <0,4,1,5>, <1,0,3,2>
- 2620015412U, // <1,5,1,1>: Cost 3 vext2 <0,4,1,5>, <1,1,1,1>
- 2620015510U, // <1,5,1,2>: Cost 3 vext2 <0,4,1,5>, <1,2,3,0>
- 2618688512U, // <1,5,1,3>: Cost 3 vext2 <0,2,1,5>, <1,3,5,7>
- 2620015677U, // <1,5,1,4>: Cost 3 vext2 <0,4,1,5>, <1,4,3,5>
- 2620015727U, // <1,5,1,5>: Cost 3 vext2 <0,4,1,5>, <1,5,0,1>
- 2620015859U, // <1,5,1,6>: Cost 3 vext2 <0,4,1,5>, <1,6,5,7>
- 3093728566U, // <1,5,1,7>: Cost 3 vtrnr <1,1,1,1>, RHS
- 2620015981U, // <1,5,1,u>: Cost 3 vext2 <0,4,1,5>, <1,u,1,3>
- 3692430816U, // <1,5,2,0>: Cost 4 vext2 <0,2,1,5>, <2,0,5,1>
- 2620016163U, // <1,5,2,1>: Cost 3 vext2 <0,4,1,5>, <2,1,3,5>
- 2620016232U, // <1,5,2,2>: Cost 3 vext2 <0,4,1,5>, <2,2,2,2>
- 2620016294U, // <1,5,2,3>: Cost 3 vext2 <0,4,1,5>, <2,3,0,1>
- 3693758221U, // <1,5,2,4>: Cost 4 vext2 <0,4,1,5>, <2,4,2,5>
- 3692431209U, // <1,5,2,5>: Cost 4 vext2 <0,2,1,5>, <2,5,3,7>
- 2620016570U, // <1,5,2,6>: Cost 3 vext2 <0,4,1,5>, <2,6,3,7>
- 4173598006U, // <1,5,2,7>: Cost 4 vtrnr <2,1,3,2>, RHS
- 2620016699U, // <1,5,2,u>: Cost 3 vext2 <0,4,1,5>, <2,u,0,1>
- 2620016790U, // <1,5,3,0>: Cost 3 vext2 <0,4,1,5>, <3,0,1,2>
- 2569110672U, // <1,5,3,1>: Cost 3 vext1 <3,1,5,3>, <1,5,3,7>
- 3693758785U, // <1,5,3,2>: Cost 4 vext2 <0,4,1,5>, <3,2,2,2>
- 2620017052U, // <1,5,3,3>: Cost 3 vext2 <0,4,1,5>, <3,3,3,3>
- 2620017154U, // <1,5,3,4>: Cost 3 vext2 <0,4,1,5>, <3,4,5,6>
- 3135623172U, // <1,5,3,5>: Cost 3 vtrnr LHS, <5,5,5,5>
- 4161587048U, // <1,5,3,6>: Cost 4 vtrnr LHS, <2,5,3,6>
- 2014104886U, // <1,5,3,7>: Cost 2 vtrnr LHS, RHS
- 2014104887U, // <1,5,3,u>: Cost 2 vtrnr LHS, RHS
- 2620017554U, // <1,5,4,0>: Cost 3 vext2 <0,4,1,5>, <4,0,5,1>
- 2620017634U, // <1,5,4,1>: Cost 3 vext2 <0,4,1,5>, <4,1,5,0>
- 3693759551U, // <1,5,4,2>: Cost 4 vext2 <0,4,1,5>, <4,2,6,3>
- 3642861837U, // <1,5,4,3>: Cost 4 vext1 <3,1,5,4>, <3,1,5,4>
- 2575092710U, // <1,5,4,4>: Cost 3 vext1 <4,1,5,4>, <4,1,5,4>
- 1546276150U, // <1,5,4,5>: Cost 2 vext2 <0,4,1,5>, RHS
- 2759855414U, // <1,5,4,6>: Cost 3 vuzpl <1,3,5,7>, RHS
- 2713931718U, // <1,5,4,7>: Cost 3 vext3 <4,u,5,1>, <5,4,7,6>
- 1546276393U, // <1,5,4,u>: Cost 2 vext2 <0,4,1,5>, RHS
- 2557182054U, // <1,5,5,0>: Cost 3 vext1 <1,1,5,5>, LHS
- 2557182812U, // <1,5,5,1>: Cost 3 vext1 <1,1,5,5>, <1,1,5,5>
- 3630925347U, // <1,5,5,2>: Cost 4 vext1 <1,1,5,5>, <2,1,3,5>
- 4029301675U, // <1,5,5,3>: Cost 4 vzipr <0,4,1,5>, <1,2,5,3>
- 2557185334U, // <1,5,5,4>: Cost 3 vext1 <1,1,5,5>, RHS
- 2713931780U, // <1,5,5,5>: Cost 3 vext3 <4,u,5,1>, <5,5,5,5>
- 2667794530U, // <1,5,5,6>: Cost 3 vext2 <u,4,1,5>, <5,6,7,0>
- 2713931800U, // <1,5,5,7>: Cost 3 vext3 <4,u,5,1>, <5,5,7,7>
- 2557187886U, // <1,5,5,u>: Cost 3 vext1 <1,1,5,5>, LHS
- 2718208036U, // <1,5,6,0>: Cost 3 vext3 <5,6,0,1>, <5,6,0,1>
- 2620019115U, // <1,5,6,1>: Cost 3 vext2 <0,4,1,5>, <6,1,7,5>
- 2667794938U, // <1,5,6,2>: Cost 3 vext2 <u,4,1,5>, <6,2,7,3>
- 3787673666U, // <1,5,6,3>: Cost 4 vext3 <4,u,5,1>, <5,6,3,4>
- 3693761165U, // <1,5,6,4>: Cost 4 vext2 <0,4,1,5>, <6,4,5,6>
- 3319279297U, // <1,5,6,5>: Cost 4 vrev <5,1,5,6>
- 2667795256U, // <1,5,6,6>: Cost 3 vext2 <u,4,1,5>, <6,6,6,6>
- 2713931874U, // <1,5,6,7>: Cost 3 vext3 <4,u,5,1>, <5,6,7,0>
- 2713931883U, // <1,5,6,u>: Cost 3 vext3 <4,u,5,1>, <5,6,u,0>
- 2557198438U, // <1,5,7,0>: Cost 3 vext1 <1,1,5,7>, LHS
- 2557199156U, // <1,5,7,1>: Cost 3 vext1 <1,1,5,7>, <1,1,1,1>
- 2569143974U, // <1,5,7,2>: Cost 3 vext1 <3,1,5,7>, <2,3,0,1>
- 2569144592U, // <1,5,7,3>: Cost 3 vext1 <3,1,5,7>, <3,1,5,7>
- 2557201718U, // <1,5,7,4>: Cost 3 vext1 <1,1,5,7>, RHS
- 2713931944U, // <1,5,7,5>: Cost 3 vext3 <4,u,5,1>, <5,7,5,7>
- 3787673770U, // <1,5,7,6>: Cost 4 vext3 <4,u,5,1>, <5,7,6,0>
- 2719387828U, // <1,5,7,7>: Cost 3 vext3 <5,7,7,1>, <5,7,7,1>
- 2557204270U, // <1,5,7,u>: Cost 3 vext1 <1,1,5,7>, LHS
- 2620020435U, // <1,5,u,0>: Cost 3 vext2 <0,4,1,5>, <u,0,1,2>
- 1546278702U, // <1,5,u,1>: Cost 2 vext2 <0,4,1,5>, LHS
- 2620020616U, // <1,5,u,2>: Cost 3 vext2 <0,4,1,5>, <u,2,3,3>
- 2620020668U, // <1,5,u,3>: Cost 3 vext2 <0,4,1,5>, <u,3,0,1>
- 1594054682U, // <1,5,u,4>: Cost 2 vext2 <u,4,1,5>, <u,4,1,5>
- 1546279066U, // <1,5,u,5>: Cost 2 vext2 <0,4,1,5>, RHS
- 2620020944U, // <1,5,u,6>: Cost 3 vext2 <0,4,1,5>, <u,6,3,7>
- 2014145846U, // <1,5,u,7>: Cost 2 vtrnr LHS, RHS
- 2014145847U, // <1,5,u,u>: Cost 2 vtrnr LHS, RHS
- 3692437504U, // <1,6,0,0>: Cost 4 vext2 <0,2,1,6>, <0,0,0,0>
- 2618695782U, // <1,6,0,1>: Cost 3 vext2 <0,2,1,6>, LHS
- 2618695857U, // <1,6,0,2>: Cost 3 vext2 <0,2,1,6>, <0,2,1,6>
- 3794161970U, // <1,6,0,3>: Cost 4 vext3 <6,0,3,1>, <6,0,3,1>
- 2620023122U, // <1,6,0,4>: Cost 3 vext2 <0,4,1,6>, <0,4,1,5>
- 2620686756U, // <1,6,0,5>: Cost 3 vext2 <0,5,1,6>, <0,5,1,6>
- 2621350389U, // <1,6,0,6>: Cost 3 vext2 <0,6,1,6>, <0,6,1,6>
- 4028599606U, // <1,6,0,7>: Cost 4 vzipr <0,3,1,0>, RHS
- 2618696349U, // <1,6,0,u>: Cost 3 vext2 <0,2,1,6>, LHS
- 3692438262U, // <1,6,1,0>: Cost 4 vext2 <0,2,1,6>, <1,0,3,2>
- 2625995572U, // <1,6,1,1>: Cost 3 vext2 <1,4,1,6>, <1,1,1,1>
- 3692438422U, // <1,6,1,2>: Cost 4 vext2 <0,2,1,6>, <1,2,3,0>
- 3692438488U, // <1,6,1,3>: Cost 4 vext2 <0,2,1,6>, <1,3,1,3>
- 2625995820U, // <1,6,1,4>: Cost 3 vext2 <1,4,1,6>, <1,4,1,6>
- 3692438672U, // <1,6,1,5>: Cost 4 vext2 <0,2,1,6>, <1,5,3,7>
- 3692438720U, // <1,6,1,6>: Cost 4 vext2 <0,2,1,6>, <1,6,0,1>
- 2958183734U, // <1,6,1,7>: Cost 3 vzipr <0,u,1,1>, RHS
- 2958183735U, // <1,6,1,u>: Cost 3 vzipr <0,u,1,1>, RHS
- 2721526201U, // <1,6,2,0>: Cost 3 vext3 <6,2,0,1>, <6,2,0,1>
- 3692439097U, // <1,6,2,1>: Cost 4 vext2 <0,2,1,6>, <2,1,6,0>
- 3692439144U, // <1,6,2,2>: Cost 4 vext2 <0,2,1,6>, <2,2,2,2>
- 3692439206U, // <1,6,2,3>: Cost 4 vext2 <0,2,1,6>, <2,3,0,1>
- 3636948278U, // <1,6,2,4>: Cost 4 vext1 <2,1,6,2>, RHS
- 3787674092U, // <1,6,2,5>: Cost 4 vext3 <4,u,5,1>, <6,2,5,7>
- 2618697658U, // <1,6,2,6>: Cost 3 vext2 <0,2,1,6>, <2,6,3,7>
- 2970799414U, // <1,6,2,7>: Cost 3 vzipr <3,0,1,2>, RHS
- 2970799415U, // <1,6,2,u>: Cost 3 vzipr <3,0,1,2>, RHS
- 2563211366U, // <1,6,3,0>: Cost 3 vext1 <2,1,6,3>, LHS
- 3699738854U, // <1,6,3,1>: Cost 4 vext2 <1,4,1,6>, <3,1,1,1>
- 2563212860U, // <1,6,3,2>: Cost 3 vext1 <2,1,6,3>, <2,1,6,3>
- 3692439964U, // <1,6,3,3>: Cost 4 vext2 <0,2,1,6>, <3,3,3,3>
- 2563214646U, // <1,6,3,4>: Cost 3 vext1 <2,1,6,3>, RHS
- 4191820018U, // <1,6,3,5>: Cost 4 vtrnr <5,1,7,3>, <u,6,7,5>
- 2587103648U, // <1,6,3,6>: Cost 3 vext1 <6,1,6,3>, <6,1,6,3>
- 3087845306U, // <1,6,3,7>: Cost 3 vtrnr LHS, <2,6,3,7>
- 3087845307U, // <1,6,3,u>: Cost 3 vtrnr LHS, <2,6,3,u>
- 3693767570U, // <1,6,4,0>: Cost 4 vext2 <0,4,1,6>, <4,0,5,1>
- 3693767650U, // <1,6,4,1>: Cost 4 vext2 <0,4,1,6>, <4,1,5,0>
- 3636962877U, // <1,6,4,2>: Cost 4 vext1 <2,1,6,4>, <2,1,6,4>
- 3325088134U, // <1,6,4,3>: Cost 4 vrev <6,1,3,4>
- 3693767898U, // <1,6,4,4>: Cost 4 vext2 <0,4,1,6>, <4,4,5,5>
- 2618699062U, // <1,6,4,5>: Cost 3 vext2 <0,2,1,6>, RHS
- 3833670966U, // <1,6,4,6>: Cost 4 vuzpl <1,3,6,7>, RHS
- 4028632374U, // <1,6,4,7>: Cost 4 vzipr <0,3,1,4>, RHS
- 2618699305U, // <1,6,4,u>: Cost 3 vext2 <0,2,1,6>, RHS
- 3693768264U, // <1,6,5,0>: Cost 4 vext2 <0,4,1,6>, <5,0,1,2>
- 3630998373U, // <1,6,5,1>: Cost 4 vext1 <1,1,6,5>, <1,1,6,5>
- 3636971070U, // <1,6,5,2>: Cost 4 vext1 <2,1,6,5>, <2,1,6,5>
- 3642943767U, // <1,6,5,3>: Cost 4 vext1 <3,1,6,5>, <3,1,6,5>
- 3693768628U, // <1,6,5,4>: Cost 4 vext2 <0,4,1,6>, <5,4,5,6>
- 3732918276U, // <1,6,5,5>: Cost 4 vext2 <7,0,1,6>, <5,5,5,5>
- 2620690530U, // <1,6,5,6>: Cost 3 vext2 <0,5,1,6>, <5,6,7,0>
- 2955562294U, // <1,6,5,7>: Cost 3 vzipr <0,4,1,5>, RHS
- 2955562295U, // <1,6,5,u>: Cost 3 vzipr <0,4,1,5>, RHS
- 2724180733U, // <1,6,6,0>: Cost 3 vext3 <6,6,0,1>, <6,6,0,1>
- 3631006566U, // <1,6,6,1>: Cost 4 vext1 <1,1,6,6>, <1,1,6,6>
- 3631007674U, // <1,6,6,2>: Cost 4 vext1 <1,1,6,6>, <2,6,3,7>
- 3692442184U, // <1,6,6,3>: Cost 4 vext2 <0,2,1,6>, <6,3,7,0>
- 3631009078U, // <1,6,6,4>: Cost 4 vext1 <1,1,6,6>, RHS
- 3787674416U, // <1,6,6,5>: Cost 4 vext3 <4,u,5,1>, <6,6,5,7>
- 2713932600U, // <1,6,6,6>: Cost 3 vext3 <4,u,5,1>, <6,6,6,6>
- 2713932610U, // <1,6,6,7>: Cost 3 vext3 <4,u,5,1>, <6,6,7,7>
- 2713932619U, // <1,6,6,u>: Cost 3 vext3 <4,u,5,1>, <6,6,u,7>
- 1651102542U, // <1,6,7,0>: Cost 2 vext3 <6,7,0,1>, <6,7,0,1>
- 2724918103U, // <1,6,7,1>: Cost 3 vext3 <6,7,1,1>, <6,7,1,1>
- 2698302306U, // <1,6,7,2>: Cost 3 vext3 <2,3,0,1>, <6,7,2,3>
- 3642960153U, // <1,6,7,3>: Cost 4 vext1 <3,1,6,7>, <3,1,6,7>
- 2713932662U, // <1,6,7,4>: Cost 3 vext3 <4,u,5,1>, <6,7,4,5>
- 2725213051U, // <1,6,7,5>: Cost 3 vext3 <6,7,5,1>, <6,7,5,1>
- 2724844426U, // <1,6,7,6>: Cost 3 vext3 <6,7,0,1>, <6,7,6,7>
- 4035956022U, // <1,6,7,7>: Cost 4 vzipr <1,5,1,7>, RHS
- 1651692438U, // <1,6,7,u>: Cost 2 vext3 <6,7,u,1>, <6,7,u,1>
- 1651766175U, // <1,6,u,0>: Cost 2 vext3 <6,u,0,1>, <6,u,0,1>
- 2618701614U, // <1,6,u,1>: Cost 3 vext2 <0,2,1,6>, LHS
- 3135663508U, // <1,6,u,2>: Cost 3 vtrnr LHS, <4,6,u,2>
- 3692443580U, // <1,6,u,3>: Cost 4 vext2 <0,2,1,6>, <u,3,0,1>
- 2713932743U, // <1,6,u,4>: Cost 3 vext3 <4,u,5,1>, <6,u,4,5>
- 2618701978U, // <1,6,u,5>: Cost 3 vext2 <0,2,1,6>, RHS
- 2622683344U, // <1,6,u,6>: Cost 3 vext2 <0,u,1,6>, <u,6,3,7>
- 3087886266U, // <1,6,u,7>: Cost 3 vtrnr LHS, <2,6,3,7>
- 1652356071U, // <1,6,u,u>: Cost 2 vext3 <6,u,u,1>, <6,u,u,1>
- 2726171632U, // <1,7,0,0>: Cost 3 vext3 <7,0,0,1>, <7,0,0,1>
- 2626666598U, // <1,7,0,1>: Cost 3 vext2 <1,5,1,7>, LHS
- 3695100067U, // <1,7,0,2>: Cost 4 vext2 <0,6,1,7>, <0,2,0,1>
- 3707044102U, // <1,7,0,3>: Cost 4 vext2 <2,6,1,7>, <0,3,2,1>
- 2726466580U, // <1,7,0,4>: Cost 3 vext3 <7,0,4,1>, <7,0,4,1>
- 3654921933U, // <1,7,0,5>: Cost 4 vext1 <5,1,7,0>, <5,1,7,0>
- 2621358582U, // <1,7,0,6>: Cost 3 vext2 <0,6,1,7>, <0,6,1,7>
- 2622022215U, // <1,7,0,7>: Cost 3 vext2 <0,7,1,7>, <0,7,1,7>
- 2626667165U, // <1,7,0,u>: Cost 3 vext2 <1,5,1,7>, LHS
- 2593128550U, // <1,7,1,0>: Cost 3 vext1 <7,1,7,1>, LHS
- 2626667316U, // <1,7,1,1>: Cost 3 vext2 <1,5,1,7>, <1,1,1,1>
- 3700409238U, // <1,7,1,2>: Cost 4 vext2 <1,5,1,7>, <1,2,3,0>
- 2257294428U, // <1,7,1,3>: Cost 3 vrev <7,1,3,1>
- 2593131830U, // <1,7,1,4>: Cost 3 vext1 <7,1,7,1>, RHS
- 2626667646U, // <1,7,1,5>: Cost 3 vext2 <1,5,1,7>, <1,5,1,7>
- 2627331279U, // <1,7,1,6>: Cost 3 vext2 <1,6,1,7>, <1,6,1,7>
- 2593133696U, // <1,7,1,7>: Cost 3 vext1 <7,1,7,1>, <7,1,7,1>
- 2628658545U, // <1,7,1,u>: Cost 3 vext2 <1,u,1,7>, <1,u,1,7>
- 2587164774U, // <1,7,2,0>: Cost 3 vext1 <6,1,7,2>, LHS
- 3701073445U, // <1,7,2,1>: Cost 4 vext2 <1,6,1,7>, <2,1,3,7>
- 3700409960U, // <1,7,2,2>: Cost 4 vext2 <1,5,1,7>, <2,2,2,2>
- 2638612134U, // <1,7,2,3>: Cost 3 vext2 <3,5,1,7>, <2,3,0,1>
- 2587168054U, // <1,7,2,4>: Cost 3 vext1 <6,1,7,2>, RHS
- 3706382167U, // <1,7,2,5>: Cost 4 vext2 <2,5,1,7>, <2,5,1,7>
- 2587169192U, // <1,7,2,6>: Cost 3 vext1 <6,1,7,2>, <6,1,7,2>
- 3660911610U, // <1,7,2,7>: Cost 4 vext1 <6,1,7,2>, <7,0,1,2>
- 2587170606U, // <1,7,2,u>: Cost 3 vext1 <6,1,7,2>, LHS
- 1507459174U, // <1,7,3,0>: Cost 2 vext1 <5,1,7,3>, LHS
- 2569257984U, // <1,7,3,1>: Cost 3 vext1 <3,1,7,3>, <1,3,5,7>
- 2581202536U, // <1,7,3,2>: Cost 3 vext1 <5,1,7,3>, <2,2,2,2>
- 2569259294U, // <1,7,3,3>: Cost 3 vext1 <3,1,7,3>, <3,1,7,3>
- 1507462454U, // <1,7,3,4>: Cost 2 vext1 <5,1,7,3>, RHS
- 1507462864U, // <1,7,3,5>: Cost 2 vext1 <5,1,7,3>, <5,1,7,3>
- 2581205498U, // <1,7,3,6>: Cost 3 vext1 <5,1,7,3>, <6,2,7,3>
- 2581206010U, // <1,7,3,7>: Cost 3 vext1 <5,1,7,3>, <7,0,1,2>
- 1507465006U, // <1,7,3,u>: Cost 2 vext1 <5,1,7,3>, LHS
- 2728826164U, // <1,7,4,0>: Cost 3 vext3 <7,4,0,1>, <7,4,0,1>
- 3654951732U, // <1,7,4,1>: Cost 4 vext1 <5,1,7,4>, <1,1,1,1>
- 3330987094U, // <1,7,4,2>: Cost 4 vrev <7,1,2,4>
- 3331060831U, // <1,7,4,3>: Cost 4 vrev <7,1,3,4>
- 3787674971U, // <1,7,4,4>: Cost 4 vext3 <4,u,5,1>, <7,4,4,4>
- 2626669878U, // <1,7,4,5>: Cost 3 vext2 <1,5,1,7>, RHS
- 3785979241U, // <1,7,4,6>: Cost 4 vext3 <4,6,0,1>, <7,4,6,0>
- 3787085176U, // <1,7,4,7>: Cost 4 vext3 <4,7,6,1>, <7,4,7,6>
- 2626670121U, // <1,7,4,u>: Cost 3 vext2 <1,5,1,7>, RHS
- 2569273446U, // <1,7,5,0>: Cost 3 vext1 <3,1,7,5>, LHS
- 2569274368U, // <1,7,5,1>: Cost 3 vext1 <3,1,7,5>, <1,3,5,7>
- 3643016808U, // <1,7,5,2>: Cost 4 vext1 <3,1,7,5>, <2,2,2,2>
- 2569275680U, // <1,7,5,3>: Cost 3 vext1 <3,1,7,5>, <3,1,7,5>
- 2569276726U, // <1,7,5,4>: Cost 3 vext1 <3,1,7,5>, RHS
- 4102034790U, // <1,7,5,5>: Cost 4 vtrnl <1,3,5,7>, <7,4,5,6>
- 2651222067U, // <1,7,5,6>: Cost 3 vext2 <5,6,1,7>, <5,6,1,7>
- 3899378998U, // <1,7,5,7>: Cost 4 vuzpr <1,1,5,7>, RHS
- 2569279278U, // <1,7,5,u>: Cost 3 vext1 <3,1,7,5>, LHS
- 2730153430U, // <1,7,6,0>: Cost 3 vext3 <7,6,0,1>, <7,6,0,1>
- 2724845022U, // <1,7,6,1>: Cost 3 vext3 <6,7,0,1>, <7,6,1,0>
- 3643025338U, // <1,7,6,2>: Cost 4 vext1 <3,1,7,6>, <2,6,3,7>
- 3643025697U, // <1,7,6,3>: Cost 4 vext1 <3,1,7,6>, <3,1,7,6>
- 3643026742U, // <1,7,6,4>: Cost 4 vext1 <3,1,7,6>, RHS
- 3654971091U, // <1,7,6,5>: Cost 4 vext1 <5,1,7,6>, <5,1,7,6>
- 3787675153U, // <1,7,6,6>: Cost 4 vext3 <4,u,5,1>, <7,6,6,6>
- 2724845076U, // <1,7,6,7>: Cost 3 vext3 <6,7,0,1>, <7,6,7,0>
- 2725508637U, // <1,7,6,u>: Cost 3 vext3 <6,u,0,1>, <7,6,u,0>
- 2730817063U, // <1,7,7,0>: Cost 3 vext3 <7,7,0,1>, <7,7,0,1>
- 3631088436U, // <1,7,7,1>: Cost 4 vext1 <1,1,7,7>, <1,1,1,1>
- 3660949158U, // <1,7,7,2>: Cost 4 vext1 <6,1,7,7>, <2,3,0,1>
- 3801904705U, // <1,7,7,3>: Cost 4 vext3 <7,3,0,1>, <7,7,3,0>
- 3631090998U, // <1,7,7,4>: Cost 4 vext1 <1,1,7,7>, RHS
- 2662503828U, // <1,7,7,5>: Cost 3 vext2 <7,5,1,7>, <7,5,1,7>
- 3660951981U, // <1,7,7,6>: Cost 4 vext1 <6,1,7,7>, <6,1,7,7>
- 2713933420U, // <1,7,7,7>: Cost 3 vext3 <4,u,5,1>, <7,7,7,7>
- 2731406959U, // <1,7,7,u>: Cost 3 vext3 <7,7,u,1>, <7,7,u,1>
- 1507500134U, // <1,7,u,0>: Cost 2 vext1 <5,1,7,u>, LHS
- 2626672430U, // <1,7,u,1>: Cost 3 vext2 <1,5,1,7>, LHS
- 2581243496U, // <1,7,u,2>: Cost 3 vext1 <5,1,7,u>, <2,2,2,2>
- 2569300259U, // <1,7,u,3>: Cost 3 vext1 <3,1,7,u>, <3,1,7,u>
- 1507503414U, // <1,7,u,4>: Cost 2 vext1 <5,1,7,u>, RHS
- 1507503829U, // <1,7,u,5>: Cost 2 vext1 <5,1,7,u>, <5,1,7,u>
- 2581246458U, // <1,7,u,6>: Cost 3 vext1 <5,1,7,u>, <6,2,7,3>
- 2581246970U, // <1,7,u,7>: Cost 3 vext1 <5,1,7,u>, <7,0,1,2>
- 1507505966U, // <1,7,u,u>: Cost 2 vext1 <5,1,7,u>, LHS
- 1543643153U, // <1,u,0,0>: Cost 2 vext2 <0,0,1,u>, <0,0,1,u>
- 1546297446U, // <1,u,0,1>: Cost 2 vext2 <0,4,1,u>, LHS
- 2819448852U, // <1,u,0,2>: Cost 3 vuzpr LHS, <0,0,2,2>
- 2619375876U, // <1,u,0,3>: Cost 3 vext2 <0,3,1,u>, <0,3,1,u>
- 1546297685U, // <1,u,0,4>: Cost 2 vext2 <0,4,1,u>, <0,4,1,u>
- 1658771190U, // <1,u,0,5>: Cost 2 vext3 <u,0,5,1>, <u,0,5,1>
- 2736789248U, // <1,u,0,6>: Cost 3 vext3 <u,7,0,1>, <u,0,6,2>
- 2659189376U, // <1,u,0,7>: Cost 3 vext2 <7,0,1,u>, <0,7,u,1>
- 1546298013U, // <1,u,0,u>: Cost 2 vext2 <0,4,1,u>, LHS
- 1483112550U, // <1,u,1,0>: Cost 2 vext1 <1,1,1,1>, LHS
- 202162278U, // <1,u,1,1>: Cost 1 vdup1 LHS
- 1616009006U, // <1,u,1,2>: Cost 2 vext3 <0,u,1,1>, LHS
- 1745707110U, // <1,u,1,3>: Cost 2 vuzpr LHS, LHS
- 1483115830U, // <1,u,1,4>: Cost 2 vext1 <1,1,1,1>, RHS
- 2620040336U, // <1,u,1,5>: Cost 3 vext2 <0,4,1,u>, <1,5,3,7>
- 3026622618U, // <1,u,1,6>: Cost 3 vtrnl <1,1,1,1>, RHS
- 2958183752U, // <1,u,1,7>: Cost 3 vzipr <0,u,1,1>, RHS
- 202162278U, // <1,u,1,u>: Cost 1 vdup1 LHS
- 2819449750U, // <1,u,2,0>: Cost 3 vuzpr LHS, <1,2,3,0>
- 2893207342U, // <1,u,2,1>: Cost 3 vzipl <1,2,3,0>, LHS
- 2819448996U, // <1,u,2,2>: Cost 3 vuzpr LHS, <0,2,0,2>
- 2819450482U, // <1,u,2,3>: Cost 3 vuzpr LHS, <2,2,3,3>
- 2819449754U, // <1,u,2,4>: Cost 3 vuzpr LHS, <1,2,3,4>
- 2893207706U, // <1,u,2,5>: Cost 3 vzipl <1,2,3,0>, RHS
- 2819449036U, // <1,u,2,6>: Cost 3 vuzpr LHS, <0,2,4,6>
- 2970799432U, // <1,u,2,7>: Cost 3 vzipr <3,0,1,2>, RHS
- 2819449002U, // <1,u,2,u>: Cost 3 vuzpr LHS, <0,2,0,u>
- 403931292U, // <1,u,3,0>: Cost 1 vext1 LHS, LHS
- 1477673718U, // <1,u,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 115726126U, // <1,u,3,2>: Cost 1 vrev LHS
- 2014102173U, // <1,u,3,3>: Cost 2 vtrnr LHS, LHS
- 403934518U, // <1,u,3,4>: Cost 1 vext1 LHS, RHS
- 1507536601U, // <1,u,3,5>: Cost 2 vext1 <5,1,u,3>, <5,1,u,3>
- 1525453306U, // <1,u,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 2014105129U, // <1,u,3,7>: Cost 2 vtrnr LHS, RHS
- 403937070U, // <1,u,3,u>: Cost 1 vext1 LHS, LHS
- 2620042157U, // <1,u,4,0>: Cost 3 vext2 <0,4,1,u>, <4,0,u,1>
- 2620042237U, // <1,u,4,1>: Cost 3 vext2 <0,4,1,u>, <4,1,u,0>
- 2263217967U, // <1,u,4,2>: Cost 3 vrev <u,1,2,4>
- 2569341224U, // <1,u,4,3>: Cost 3 vext1 <3,1,u,4>, <3,1,u,4>
- 2569342262U, // <1,u,4,4>: Cost 3 vext1 <3,1,u,4>, RHS
- 1546300726U, // <1,u,4,5>: Cost 2 vext2 <0,4,1,u>, RHS
- 2819449180U, // <1,u,4,6>: Cost 3 vuzpr LHS, <0,4,2,6>
- 2724845649U, // <1,u,4,7>: Cost 3 vext3 <6,7,0,1>, <u,4,7,6>
- 1546300969U, // <1,u,4,u>: Cost 2 vext2 <0,4,1,u>, RHS
- 2551431270U, // <1,u,5,0>: Cost 3 vext1 <0,1,u,5>, LHS
- 2551432192U, // <1,u,5,1>: Cost 3 vext1 <0,1,u,5>, <1,3,5,7>
- 3028293422U, // <1,u,5,2>: Cost 3 vtrnl <1,3,5,7>, LHS
- 2955559068U, // <1,u,5,3>: Cost 3 vzipr <0,4,1,5>, LHS
- 2551434550U, // <1,u,5,4>: Cost 3 vext1 <0,1,u,5>, RHS
- 2895255706U, // <1,u,5,5>: Cost 3 vzipl <1,5,3,7>, RHS
- 1616009370U, // <1,u,5,6>: Cost 2 vext3 <0,u,1,1>, RHS
- 1745710390U, // <1,u,5,7>: Cost 2 vuzpr LHS, RHS
- 1745710391U, // <1,u,5,u>: Cost 2 vuzpr LHS, RHS
- 2653221159U, // <1,u,6,0>: Cost 3 vext2 <6,0,1,u>, <6,0,1,u>
- 2725509303U, // <1,u,6,1>: Cost 3 vext3 <6,u,0,1>, <u,6,1,0>
- 2659193338U, // <1,u,6,2>: Cost 3 vext2 <7,0,1,u>, <6,2,7,3>
- 2689751248U, // <1,u,6,3>: Cost 3 vext3 <0,u,1,1>, <u,6,3,7>
- 2867228774U, // <1,u,6,4>: Cost 3 vuzpr LHS, <5,6,7,4>
- 3764820194U, // <1,u,6,5>: Cost 4 vext3 <1,1,1,1>, <u,6,5,7>
- 2657202957U, // <1,u,6,6>: Cost 3 vext2 <6,6,1,u>, <6,6,1,u>
- 2819450810U, // <1,u,6,7>: Cost 3 vuzpr LHS, <2,6,3,7>
- 2819450811U, // <1,u,6,u>: Cost 3 vuzpr LHS, <2,6,3,u>
- 1585452032U, // <1,u,7,0>: Cost 2 vext2 <7,0,1,u>, <7,0,1,u>
- 2557420340U, // <1,u,7,1>: Cost 3 vext1 <1,1,u,7>, <1,1,1,1>
- 2569365158U, // <1,u,7,2>: Cost 3 vext1 <3,1,u,7>, <2,3,0,1>
- 2569365803U, // <1,u,7,3>: Cost 3 vext1 <3,1,u,7>, <3,1,u,7>
- 2557422902U, // <1,u,7,4>: Cost 3 vext1 <1,1,u,7>, RHS
- 2662512021U, // <1,u,7,5>: Cost 3 vext2 <7,5,1,u>, <7,5,1,u>
- 2724845884U, // <1,u,7,6>: Cost 3 vext3 <6,7,0,1>, <u,7,6,7>
- 2659194476U, // <1,u,7,7>: Cost 3 vext2 <7,0,1,u>, <7,7,7,7>
- 1590761096U, // <1,u,7,u>: Cost 2 vext2 <7,u,1,u>, <7,u,1,u>
- 403972257U, // <1,u,u,0>: Cost 1 vext1 LHS, LHS
- 202162278U, // <1,u,u,1>: Cost 1 vdup1 LHS
- 115767091U, // <1,u,u,2>: Cost 1 vrev LHS
- 1745707677U, // <1,u,u,3>: Cost 2 vuzpr LHS, LHS
- 403975478U, // <1,u,u,4>: Cost 1 vext1 LHS, RHS
- 1546303642U, // <1,u,u,5>: Cost 2 vext2 <0,4,1,u>, RHS
- 1616009613U, // <1,u,u,6>: Cost 2 vext3 <0,u,1,1>, RHS
- 1745710633U, // <1,u,u,7>: Cost 2 vuzpr LHS, RHS
- 403978030U, // <1,u,u,u>: Cost 1 vext1 LHS, LHS
- 2551463936U, // <2,0,0,0>: Cost 3 vext1 <0,2,0,0>, <0,0,0,0>
- 2685698058U, // <2,0,0,1>: Cost 3 vext3 <0,2,0,2>, <0,0,1,1>
- 1610776596U, // <2,0,0,2>: Cost 2 vext3 <0,0,2,2>, <0,0,2,2>
- 2619384069U, // <2,0,0,3>: Cost 3 vext2 <0,3,2,0>, <0,3,2,0>
- 2551467318U, // <2,0,0,4>: Cost 3 vext1 <0,2,0,0>, RHS
- 3899836596U, // <2,0,0,5>: Cost 4 vuzpr <1,2,3,0>, <3,0,4,5>
- 2621374968U, // <2,0,0,6>: Cost 3 vext2 <0,6,2,0>, <0,6,2,0>
- 4168271334U, // <2,0,0,7>: Cost 4 vtrnr <1,2,3,0>, <2,0,5,7>
- 1611219018U, // <2,0,0,u>: Cost 2 vext3 <0,0,u,2>, <0,0,u,2>
- 2551472138U, // <2,0,1,0>: Cost 3 vext1 <0,2,0,1>, <0,0,1,1>
- 2690564186U, // <2,0,1,1>: Cost 3 vext3 <1,0,3,2>, <0,1,1,0>
- 1611956326U, // <2,0,1,2>: Cost 2 vext3 <0,2,0,2>, LHS
- 2826092646U, // <2,0,1,3>: Cost 3 vuzpr <1,2,3,0>, LHS
- 2551475510U, // <2,0,1,4>: Cost 3 vext1 <0,2,0,1>, RHS
- 3692463248U, // <2,0,1,5>: Cost 4 vext2 <0,2,2,0>, <1,5,3,7>
- 2587308473U, // <2,0,1,6>: Cost 3 vext1 <6,2,0,1>, <6,2,0,1>
- 3661050874U, // <2,0,1,7>: Cost 4 vext1 <6,2,0,1>, <7,0,1,2>
- 1611956380U, // <2,0,1,u>: Cost 2 vext3 <0,2,0,2>, LHS
- 1477738598U, // <2,0,2,0>: Cost 2 vext1 <0,2,0,2>, LHS
- 2551481078U, // <2,0,2,1>: Cost 3 vext1 <0,2,0,2>, <1,0,3,2>
- 2551481796U, // <2,0,2,2>: Cost 3 vext1 <0,2,0,2>, <2,0,2,0>
- 2551482518U, // <2,0,2,3>: Cost 3 vext1 <0,2,0,2>, <3,0,1,2>
- 1477741878U, // <2,0,2,4>: Cost 2 vext1 <0,2,0,2>, RHS
- 2551484112U, // <2,0,2,5>: Cost 3 vext1 <0,2,0,2>, <5,1,7,3>
- 2551484759U, // <2,0,2,6>: Cost 3 vext1 <0,2,0,2>, <6,0,7,2>
- 2551485434U, // <2,0,2,7>: Cost 3 vext1 <0,2,0,2>, <7,0,1,2>
- 1477744430U, // <2,0,2,u>: Cost 2 vext1 <0,2,0,2>, LHS
- 2953625600U, // <2,0,3,0>: Cost 3 vzipr LHS, <0,0,0,0>
- 2953627302U, // <2,0,3,1>: Cost 3 vzipr LHS, <2,3,0,1>
- 2953625764U, // <2,0,3,2>: Cost 3 vzipr LHS, <0,2,0,2>
- 4027369695U, // <2,0,3,3>: Cost 4 vzipr LHS, <3,1,0,3>
- 3625233718U, // <2,0,3,4>: Cost 4 vext1 <0,2,0,3>, RHS
- 3899836110U, // <2,0,3,5>: Cost 4 vuzpr <1,2,3,0>, <2,3,4,5>
- 4032012618U, // <2,0,3,6>: Cost 4 vzipr LHS, <0,4,0,6>
- 3899835392U, // <2,0,3,7>: Cost 4 vuzpr <1,2,3,0>, <1,3,5,7>
- 2953625770U, // <2,0,3,u>: Cost 3 vzipr LHS, <0,2,0,u>
- 2551496806U, // <2,0,4,0>: Cost 3 vext1 <0,2,0,4>, LHS
- 2685698386U, // <2,0,4,1>: Cost 3 vext3 <0,2,0,2>, <0,4,1,5>
- 2685698396U, // <2,0,4,2>: Cost 3 vext3 <0,2,0,2>, <0,4,2,6>
- 3625240726U, // <2,0,4,3>: Cost 4 vext1 <0,2,0,4>, <3,0,1,2>
- 2551500086U, // <2,0,4,4>: Cost 3 vext1 <0,2,0,4>, RHS
- 2618723638U, // <2,0,4,5>: Cost 3 vext2 <0,2,2,0>, RHS
- 2765409590U, // <2,0,4,6>: Cost 3 vuzpl <2,3,0,1>, RHS
- 3799990664U, // <2,0,4,7>: Cost 4 vext3 <7,0,1,2>, <0,4,7,5>
- 2685698450U, // <2,0,4,u>: Cost 3 vext3 <0,2,0,2>, <0,4,u,6>
- 3625246822U, // <2,0,5,0>: Cost 4 vext1 <0,2,0,5>, LHS
- 3289776304U, // <2,0,5,1>: Cost 4 vrev <0,2,1,5>
- 2690564526U, // <2,0,5,2>: Cost 3 vext3 <1,0,3,2>, <0,5,2,7>
- 3289923778U, // <2,0,5,3>: Cost 4 vrev <0,2,3,5>
- 2216255691U, // <2,0,5,4>: Cost 3 vrev <0,2,4,5>
- 3726307332U, // <2,0,5,5>: Cost 4 vext2 <5,u,2,0>, <5,5,5,5>
- 3726307426U, // <2,0,5,6>: Cost 4 vext2 <5,u,2,0>, <5,6,7,0>
- 2826095926U, // <2,0,5,7>: Cost 3 vuzpr <1,2,3,0>, RHS
- 2216550639U, // <2,0,5,u>: Cost 3 vrev <0,2,u,5>
- 4162420736U, // <2,0,6,0>: Cost 4 vtrnr <0,2,4,6>, <0,0,0,0>
- 2901885030U, // <2,0,6,1>: Cost 3 vzipl <2,6,3,7>, LHS
- 2685698559U, // <2,0,6,2>: Cost 3 vext3 <0,2,0,2>, <0,6,2,7>
- 3643173171U, // <2,0,6,3>: Cost 4 vext1 <3,2,0,6>, <3,2,0,6>
- 2216263884U, // <2,0,6,4>: Cost 3 vrev <0,2,4,6>
- 3730289341U, // <2,0,6,5>: Cost 4 vext2 <6,5,2,0>, <6,5,2,0>
- 3726308152U, // <2,0,6,6>: Cost 4 vext2 <5,u,2,0>, <6,6,6,6>
- 3899836346U, // <2,0,6,7>: Cost 4 vuzpr <1,2,3,0>, <2,6,3,7>
- 2216558832U, // <2,0,6,u>: Cost 3 vrev <0,2,u,6>
- 2659202049U, // <2,0,7,0>: Cost 3 vext2 <7,0,2,0>, <7,0,2,0>
- 3726308437U, // <2,0,7,1>: Cost 4 vext2 <5,u,2,0>, <7,1,2,3>
- 2726249034U, // <2,0,7,2>: Cost 3 vext3 <7,0,1,2>, <0,7,2,1>
- 3734934772U, // <2,0,7,3>: Cost 4 vext2 <7,3,2,0>, <7,3,2,0>
- 3726308710U, // <2,0,7,4>: Cost 4 vext2 <5,u,2,0>, <7,4,5,6>
- 3726308814U, // <2,0,7,5>: Cost 4 vext2 <5,u,2,0>, <7,5,u,2>
- 3736925671U, // <2,0,7,6>: Cost 4 vext2 <7,6,2,0>, <7,6,2,0>
- 3726308972U, // <2,0,7,7>: Cost 4 vext2 <5,u,2,0>, <7,7,7,7>
- 2659202049U, // <2,0,7,u>: Cost 3 vext2 <7,0,2,0>, <7,0,2,0>
- 1477787750U, // <2,0,u,0>: Cost 2 vext1 <0,2,0,u>, LHS
- 2953668262U, // <2,0,u,1>: Cost 3 vzipr LHS, <2,3,0,1>
- 1611956893U, // <2,0,u,2>: Cost 2 vext3 <0,2,0,2>, LHS
- 2551531670U, // <2,0,u,3>: Cost 3 vext1 <0,2,0,u>, <3,0,1,2>
- 1477791030U, // <2,0,u,4>: Cost 2 vext1 <0,2,0,u>, RHS
- 2618726554U, // <2,0,u,5>: Cost 3 vext2 <0,2,2,0>, RHS
- 2765412506U, // <2,0,u,6>: Cost 3 vuzpl <2,3,0,1>, RHS
- 2826096169U, // <2,0,u,7>: Cost 3 vuzpr <1,2,3,0>, RHS
- 1611956947U, // <2,0,u,u>: Cost 2 vext3 <0,2,0,2>, LHS
- 2569453670U, // <2,1,0,0>: Cost 3 vext1 <3,2,1,0>, LHS
- 2619392102U, // <2,1,0,1>: Cost 3 vext2 <0,3,2,1>, LHS
- 3759440619U, // <2,1,0,2>: Cost 4 vext3 <0,2,0,2>, <1,0,2,0>
- 1616823030U, // <2,1,0,3>: Cost 2 vext3 <1,0,3,2>, <1,0,3,2>
- 2569456950U, // <2,1,0,4>: Cost 3 vext1 <3,2,1,0>, RHS
- 2690712328U, // <2,1,0,5>: Cost 3 vext3 <1,0,5,2>, <1,0,5,2>
- 3661115841U, // <2,1,0,6>: Cost 4 vext1 <6,2,1,0>, <6,2,1,0>
- 2622046794U, // <2,1,0,7>: Cost 3 vext2 <0,7,2,1>, <0,7,2,1>
- 1617191715U, // <2,1,0,u>: Cost 2 vext3 <1,0,u,2>, <1,0,u,2>
- 2551545958U, // <2,1,1,0>: Cost 3 vext1 <0,2,1,1>, LHS
- 2685698868U, // <2,1,1,1>: Cost 3 vext3 <0,2,0,2>, <1,1,1,1>
- 2628682646U, // <2,1,1,2>: Cost 3 vext2 <1,u,2,1>, <1,2,3,0>
- 2685698888U, // <2,1,1,3>: Cost 3 vext3 <0,2,0,2>, <1,1,3,3>
- 2551549238U, // <2,1,1,4>: Cost 3 vext1 <0,2,1,1>, RHS
- 3693134992U, // <2,1,1,5>: Cost 4 vext2 <0,3,2,1>, <1,5,3,7>
- 3661124034U, // <2,1,1,6>: Cost 4 vext1 <6,2,1,1>, <6,2,1,1>
- 3625292794U, // <2,1,1,7>: Cost 4 vext1 <0,2,1,1>, <7,0,1,2>
- 2685698933U, // <2,1,1,u>: Cost 3 vext3 <0,2,0,2>, <1,1,u,3>
- 2551554150U, // <2,1,2,0>: Cost 3 vext1 <0,2,1,2>, LHS
- 3893649571U, // <2,1,2,1>: Cost 4 vuzpr <0,2,0,1>, <0,2,0,1>
- 2551555688U, // <2,1,2,2>: Cost 3 vext1 <0,2,1,2>, <2,2,2,2>
- 2685698966U, // <2,1,2,3>: Cost 3 vext3 <0,2,0,2>, <1,2,3,0>
- 2551557430U, // <2,1,2,4>: Cost 3 vext1 <0,2,1,2>, RHS
- 3763422123U, // <2,1,2,5>: Cost 4 vext3 <0,u,0,2>, <1,2,5,3>
- 3693135802U, // <2,1,2,6>: Cost 4 vext2 <0,3,2,1>, <2,6,3,7>
- 2726249402U, // <2,1,2,7>: Cost 3 vext3 <7,0,1,2>, <1,2,7,0>
- 2685699011U, // <2,1,2,u>: Cost 3 vext3 <0,2,0,2>, <1,2,u,0>
- 2551562342U, // <2,1,3,0>: Cost 3 vext1 <0,2,1,3>, LHS
- 2953625610U, // <2,1,3,1>: Cost 3 vzipr LHS, <0,0,1,1>
- 2953627798U, // <2,1,3,2>: Cost 3 vzipr LHS, <3,0,1,2>
- 2953626584U, // <2,1,3,3>: Cost 3 vzipr LHS, <1,3,1,3>
- 2551565622U, // <2,1,3,4>: Cost 3 vext1 <0,2,1,3>, RHS
- 2953625938U, // <2,1,3,5>: Cost 3 vzipr LHS, <0,4,1,5>
- 2587398596U, // <2,1,3,6>: Cost 3 vext1 <6,2,1,3>, <6,2,1,3>
- 4032013519U, // <2,1,3,7>: Cost 4 vzipr LHS, <1,6,1,7>
- 2953625617U, // <2,1,3,u>: Cost 3 vzipr LHS, <0,0,1,u>
- 2690565154U, // <2,1,4,0>: Cost 3 vext3 <1,0,3,2>, <1,4,0,5>
- 3625313270U, // <2,1,4,1>: Cost 4 vext1 <0,2,1,4>, <1,3,4,6>
- 3771532340U, // <2,1,4,2>: Cost 4 vext3 <2,2,2,2>, <1,4,2,5>
- 1148404634U, // <2,1,4,3>: Cost 2 vrev <1,2,3,4>
- 3625315638U, // <2,1,4,4>: Cost 4 vext1 <0,2,1,4>, RHS
- 2619395382U, // <2,1,4,5>: Cost 3 vext2 <0,3,2,1>, RHS
- 3837242678U, // <2,1,4,6>: Cost 4 vuzpl <2,0,1,2>, RHS
- 3799991394U, // <2,1,4,7>: Cost 4 vext3 <7,0,1,2>, <1,4,7,6>
- 1148773319U, // <2,1,4,u>: Cost 2 vrev <1,2,u,4>
- 2551578726U, // <2,1,5,0>: Cost 3 vext1 <0,2,1,5>, LHS
- 2551579648U, // <2,1,5,1>: Cost 3 vext1 <0,2,1,5>, <1,3,5,7>
- 3625321952U, // <2,1,5,2>: Cost 4 vext1 <0,2,1,5>, <2,0,5,1>
- 2685699216U, // <2,1,5,3>: Cost 3 vext3 <0,2,0,2>, <1,5,3,7>
- 2551582006U, // <2,1,5,4>: Cost 3 vext1 <0,2,1,5>, RHS
- 3740913668U, // <2,1,5,5>: Cost 4 vext2 <u,3,2,1>, <5,5,5,5>
- 3661156806U, // <2,1,5,6>: Cost 4 vext1 <6,2,1,5>, <6,2,1,5>
- 3893652790U, // <2,1,5,7>: Cost 4 vuzpr <0,2,0,1>, RHS
- 2685699261U, // <2,1,5,u>: Cost 3 vext3 <0,2,0,2>, <1,5,u,7>
- 2551586918U, // <2,1,6,0>: Cost 3 vext1 <0,2,1,6>, LHS
- 3625329398U, // <2,1,6,1>: Cost 4 vext1 <0,2,1,6>, <1,0,3,2>
- 2551588794U, // <2,1,6,2>: Cost 3 vext1 <0,2,1,6>, <2,6,3,7>
- 3088679014U, // <2,1,6,3>: Cost 3 vtrnr <0,2,4,6>, LHS
- 2551590198U, // <2,1,6,4>: Cost 3 vext1 <0,2,1,6>, RHS
- 4029382994U, // <2,1,6,5>: Cost 4 vzipr <0,4,2,6>, <0,4,1,5>
- 3625333560U, // <2,1,6,6>: Cost 4 vext1 <0,2,1,6>, <6,6,6,6>
- 3731624800U, // <2,1,6,7>: Cost 4 vext2 <6,7,2,1>, <6,7,2,1>
- 2551592750U, // <2,1,6,u>: Cost 3 vext1 <0,2,1,6>, LHS
- 2622051322U, // <2,1,7,0>: Cost 3 vext2 <0,7,2,1>, <7,0,1,2>
- 3733615699U, // <2,1,7,1>: Cost 4 vext2 <7,1,2,1>, <7,1,2,1>
- 3795125538U, // <2,1,7,2>: Cost 4 vext3 <6,1,7,2>, <1,7,2,0>
- 2222171037U, // <2,1,7,3>: Cost 3 vrev <1,2,3,7>
- 3740915046U, // <2,1,7,4>: Cost 4 vext2 <u,3,2,1>, <7,4,5,6>
- 3296060335U, // <2,1,7,5>: Cost 4 vrev <1,2,5,7>
- 3736933864U, // <2,1,7,6>: Cost 4 vext2 <7,6,2,1>, <7,6,2,1>
- 3805300055U, // <2,1,7,7>: Cost 4 vext3 <7,u,1,2>, <1,7,7,u>
- 2669827714U, // <2,1,7,u>: Cost 3 vext2 <u,7,2,1>, <7,u,1,2>
- 2551603302U, // <2,1,u,0>: Cost 3 vext1 <0,2,1,u>, LHS
- 2953666570U, // <2,1,u,1>: Cost 3 vzipr LHS, <0,0,1,1>
- 2953668758U, // <2,1,u,2>: Cost 3 vzipr LHS, <3,0,1,2>
- 1148437406U, // <2,1,u,3>: Cost 2 vrev <1,2,3,u>
- 2551606582U, // <2,1,u,4>: Cost 3 vext1 <0,2,1,u>, RHS
- 2953666898U, // <2,1,u,5>: Cost 3 vzipr LHS, <0,4,1,5>
- 2587398596U, // <2,1,u,6>: Cost 3 vext1 <6,2,1,3>, <6,2,1,3>
- 2669828370U, // <2,1,u,7>: Cost 3 vext2 <u,7,2,1>, <u,7,2,1>
- 1148806091U, // <2,1,u,u>: Cost 2 vrev <1,2,u,u>
- 1543667732U, // <2,2,0,0>: Cost 2 vext2 <0,0,2,2>, <0,0,2,2>
- 1548976230U, // <2,2,0,1>: Cost 2 vext2 <0,u,2,2>, LHS
- 2685699524U, // <2,2,0,2>: Cost 3 vext3 <0,2,0,2>, <2,0,2,0>
- 2685699535U, // <2,2,0,3>: Cost 3 vext3 <0,2,0,2>, <2,0,3,2>
- 2551614774U, // <2,2,0,4>: Cost 3 vext1 <0,2,2,0>, RHS
- 3704422830U, // <2,2,0,5>: Cost 4 vext2 <2,2,2,2>, <0,5,2,7>
- 3893657642U, // <2,2,0,6>: Cost 4 vuzpr <0,2,0,2>, <0,0,4,6>
- 3770574323U, // <2,2,0,7>: Cost 4 vext3 <2,0,7,2>, <2,0,7,2>
- 1548976796U, // <2,2,0,u>: Cost 2 vext2 <0,u,2,2>, <0,u,2,2>
- 2622718710U, // <2,2,1,0>: Cost 3 vext2 <0,u,2,2>, <1,0,3,2>
- 2622718772U, // <2,2,1,1>: Cost 3 vext2 <0,u,2,2>, <1,1,1,1>
- 2622718870U, // <2,2,1,2>: Cost 3 vext2 <0,u,2,2>, <1,2,3,0>
- 2819915878U, // <2,2,1,3>: Cost 3 vuzpr <0,2,0,2>, LHS
- 3625364790U, // <2,2,1,4>: Cost 4 vext1 <0,2,2,1>, RHS
- 2622719120U, // <2,2,1,5>: Cost 3 vext2 <0,u,2,2>, <1,5,3,7>
- 3760031292U, // <2,2,1,6>: Cost 4 vext3 <0,2,u,2>, <2,1,6,3>
- 3667170468U, // <2,2,1,7>: Cost 4 vext1 <7,2,2,1>, <7,2,2,1>
- 2819915883U, // <2,2,1,u>: Cost 3 vuzpr <0,2,0,2>, LHS
- 1489829990U, // <2,2,2,0>: Cost 2 vext1 <2,2,2,2>, LHS
- 2563572470U, // <2,2,2,1>: Cost 3 vext1 <2,2,2,2>, <1,0,3,2>
- 269271142U, // <2,2,2,2>: Cost 1 vdup2 LHS
- 2685699698U, // <2,2,2,3>: Cost 3 vext3 <0,2,0,2>, <2,2,3,3>
- 1489833270U, // <2,2,2,4>: Cost 2 vext1 <2,2,2,2>, RHS
- 2685699720U, // <2,2,2,5>: Cost 3 vext3 <0,2,0,2>, <2,2,5,7>
- 2622719930U, // <2,2,2,6>: Cost 3 vext2 <0,u,2,2>, <2,6,3,7>
- 2593436837U, // <2,2,2,7>: Cost 3 vext1 <7,2,2,2>, <7,2,2,2>
- 269271142U, // <2,2,2,u>: Cost 1 vdup2 LHS
- 2685699750U, // <2,2,3,0>: Cost 3 vext3 <0,2,0,2>, <2,3,0,1>
- 2690565806U, // <2,2,3,1>: Cost 3 vext3 <1,0,3,2>, <2,3,1,0>
- 2953627240U, // <2,2,3,2>: Cost 3 vzipr LHS, <2,2,2,2>
- 1879883878U, // <2,2,3,3>: Cost 2 vzipr LHS, LHS
- 2685699790U, // <2,2,3,4>: Cost 3 vext3 <0,2,0,2>, <2,3,4,5>
- 3893659342U, // <2,2,3,5>: Cost 4 vuzpr <0,2,0,2>, <2,3,4,5>
- 2958270812U, // <2,2,3,6>: Cost 3 vzipr LHS, <0,4,2,6>
- 2593445030U, // <2,2,3,7>: Cost 3 vext1 <7,2,2,3>, <7,2,2,3>
- 1879883883U, // <2,2,3,u>: Cost 2 vzipr LHS, LHS
- 2551644262U, // <2,2,4,0>: Cost 3 vext1 <0,2,2,4>, LHS
- 3625386742U, // <2,2,4,1>: Cost 4 vext1 <0,2,2,4>, <1,0,3,2>
- 2551645902U, // <2,2,4,2>: Cost 3 vext1 <0,2,2,4>, <2,3,4,5>
- 3759441686U, // <2,2,4,3>: Cost 4 vext3 <0,2,0,2>, <2,4,3,5>
- 2551647542U, // <2,2,4,4>: Cost 3 vext1 <0,2,2,4>, RHS
- 1548979510U, // <2,2,4,5>: Cost 2 vext2 <0,u,2,2>, RHS
- 2764901686U, // <2,2,4,6>: Cost 3 vuzpl <2,2,2,2>, RHS
- 3667195047U, // <2,2,4,7>: Cost 4 vext1 <7,2,2,4>, <7,2,2,4>
- 1548979753U, // <2,2,4,u>: Cost 2 vext2 <0,u,2,2>, RHS
- 3696463432U, // <2,2,5,0>: Cost 4 vext2 <0,u,2,2>, <5,0,1,2>
- 2617413328U, // <2,2,5,1>: Cost 3 vext2 <0,0,2,2>, <5,1,7,3>
- 2685699936U, // <2,2,5,2>: Cost 3 vext3 <0,2,0,2>, <2,5,2,7>
- 4027383910U, // <2,2,5,3>: Cost 4 vzipr <0,1,2,5>, LHS
- 2228201085U, // <2,2,5,4>: Cost 3 vrev <2,2,4,5>
- 2617413636U, // <2,2,5,5>: Cost 3 vext2 <0,0,2,2>, <5,5,5,5>
- 2617413730U, // <2,2,5,6>: Cost 3 vext2 <0,0,2,2>, <5,6,7,0>
- 2819919158U, // <2,2,5,7>: Cost 3 vuzpr <0,2,0,2>, RHS
- 2819919159U, // <2,2,5,u>: Cost 3 vuzpr <0,2,0,2>, RHS
- 3625402554U, // <2,2,6,0>: Cost 4 vext1 <0,2,2,6>, <0,2,2,6>
- 3760031652U, // <2,2,6,1>: Cost 4 vext3 <0,2,u,2>, <2,6,1,3>
- 2617414138U, // <2,2,6,2>: Cost 3 vext2 <0,0,2,2>, <6,2,7,3>
- 2685700026U, // <2,2,6,3>: Cost 3 vext3 <0,2,0,2>, <2,6,3,7>
- 3625405750U, // <2,2,6,4>: Cost 4 vext1 <0,2,2,6>, RHS
- 3760031692U, // <2,2,6,5>: Cost 4 vext3 <0,2,u,2>, <2,6,5,7>
- 3088679116U, // <2,2,6,6>: Cost 3 vtrnr <0,2,4,6>, <0,2,4,6>
- 2657891169U, // <2,2,6,7>: Cost 3 vext2 <6,7,2,2>, <6,7,2,2>
- 2685700071U, // <2,2,6,u>: Cost 3 vext3 <0,2,0,2>, <2,6,u,7>
- 2726250474U, // <2,2,7,0>: Cost 3 vext3 <7,0,1,2>, <2,7,0,1>
- 3704427616U, // <2,2,7,1>: Cost 4 vext2 <2,2,2,2>, <7,1,3,5>
- 2660545701U, // <2,2,7,2>: Cost 3 vext2 <7,2,2,2>, <7,2,2,2>
- 4030718054U, // <2,2,7,3>: Cost 4 vzipr <0,6,2,7>, LHS
- 2617415014U, // <2,2,7,4>: Cost 3 vext2 <0,0,2,2>, <7,4,5,6>
- 3302033032U, // <2,2,7,5>: Cost 4 vrev <2,2,5,7>
- 3661246929U, // <2,2,7,6>: Cost 4 vext1 <6,2,2,7>, <6,2,2,7>
- 2617415276U, // <2,2,7,7>: Cost 3 vext2 <0,0,2,2>, <7,7,7,7>
- 2731558962U, // <2,2,7,u>: Cost 3 vext3 <7,u,1,2>, <2,7,u,1>
- 1489829990U, // <2,2,u,0>: Cost 2 vext1 <2,2,2,2>, LHS
- 1548982062U, // <2,2,u,1>: Cost 2 vext2 <0,u,2,2>, LHS
- 269271142U, // <2,2,u,2>: Cost 1 vdup2 LHS
- 1879924838U, // <2,2,u,3>: Cost 2 vzipr LHS, LHS
- 1489833270U, // <2,2,u,4>: Cost 2 vext1 <2,2,2,2>, RHS
- 1548982426U, // <2,2,u,5>: Cost 2 vext2 <0,u,2,2>, RHS
- 2953666908U, // <2,2,u,6>: Cost 3 vzipr LHS, <0,4,2,6>
- 2819919401U, // <2,2,u,7>: Cost 3 vuzpr <0,2,0,2>, RHS
- 269271142U, // <2,2,u,u>: Cost 1 vdup2 LHS
- 1544339456U, // <2,3,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
- 470597734U, // <2,3,0,1>: Cost 1 vext2 LHS, LHS
- 1548984484U, // <2,3,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
- 2619408648U, // <2,3,0,3>: Cost 3 vext2 <0,3,2,3>, <0,3,2,3>
- 1548984658U, // <2,3,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
- 2665857454U, // <2,3,0,5>: Cost 3 vext2 LHS, <0,5,2,7>
- 2622726655U, // <2,3,0,6>: Cost 3 vext2 LHS, <0,6,2,7>
- 2593494188U, // <2,3,0,7>: Cost 3 vext1 <7,2,3,0>, <7,2,3,0>
- 470598301U, // <2,3,0,u>: Cost 1 vext2 LHS, LHS
- 1544340214U, // <2,3,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
- 1544340276U, // <2,3,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
- 1544340374U, // <2,3,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
- 1548985304U, // <2,3,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
- 2551696694U, // <2,3,1,4>: Cost 3 vext1 <0,2,3,1>, RHS
- 1548985488U, // <2,3,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
- 2622727375U, // <2,3,1,6>: Cost 3 vext2 LHS, <1,6,1,7>
- 2665858347U, // <2,3,1,7>: Cost 3 vext2 LHS, <1,7,3,0>
- 1548985709U, // <2,3,1,u>: Cost 2 vext2 LHS, <1,u,1,3>
- 2622727613U, // <2,3,2,0>: Cost 3 vext2 LHS, <2,0,1,2>
- 2622727711U, // <2,3,2,1>: Cost 3 vext2 LHS, <2,1,3,1>
- 1544341096U, // <2,3,2,2>: Cost 2 vext2 LHS, <2,2,2,2>
- 1544341158U, // <2,3,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
- 2622727958U, // <2,3,2,4>: Cost 3 vext2 LHS, <2,4,3,5>
- 2622728032U, // <2,3,2,5>: Cost 3 vext2 LHS, <2,5,2,7>
- 1548986298U, // <2,3,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
- 2665859050U, // <2,3,2,7>: Cost 3 vext2 LHS, <2,7,0,1>
- 1548986427U, // <2,3,2,u>: Cost 2 vext2 LHS, <2,u,0,1>
- 1548986518U, // <2,3,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
- 2622728415U, // <2,3,3,1>: Cost 3 vext2 LHS, <3,1,0,3>
- 1489913458U, // <2,3,3,2>: Cost 2 vext1 <2,2,3,3>, <2,2,3,3>
- 1544341916U, // <2,3,3,3>: Cost 2 vext2 LHS, <3,3,3,3>
- 1548986882U, // <2,3,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
- 2665859632U, // <2,3,3,5>: Cost 3 vext2 LHS, <3,5,1,7>
- 2234304870U, // <2,3,3,6>: Cost 3 vrev <3,2,6,3>
- 2958271632U, // <2,3,3,7>: Cost 3 vzipr LHS, <1,5,3,7>
- 1548987166U, // <2,3,3,u>: Cost 2 vext2 LHS, <3,u,1,2>
- 1483948134U, // <2,3,4,0>: Cost 2 vext1 <1,2,3,4>, LHS
- 1483948954U, // <2,3,4,1>: Cost 2 vext1 <1,2,3,4>, <1,2,3,4>
- 2622729276U, // <2,3,4,2>: Cost 3 vext2 LHS, <4,2,6,0>
- 2557692054U, // <2,3,4,3>: Cost 3 vext1 <1,2,3,4>, <3,0,1,2>
- 1483951414U, // <2,3,4,4>: Cost 2 vext1 <1,2,3,4>, RHS
- 470601014U, // <2,3,4,5>: Cost 1 vext2 LHS, RHS
- 1592118644U, // <2,3,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
- 2593526960U, // <2,3,4,7>: Cost 3 vext1 <7,2,3,4>, <7,2,3,4>
- 470601257U, // <2,3,4,u>: Cost 1 vext2 LHS, RHS
- 2551726182U, // <2,3,5,0>: Cost 3 vext1 <0,2,3,5>, LHS
- 1592118992U, // <2,3,5,1>: Cost 2 vext2 LHS, <5,1,7,3>
- 2665860862U, // <2,3,5,2>: Cost 3 vext2 LHS, <5,2,3,4>
- 2551728642U, // <2,3,5,3>: Cost 3 vext1 <0,2,3,5>, <3,4,5,6>
- 1592119238U, // <2,3,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
- 1592119300U, // <2,3,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
- 1592119394U, // <2,3,5,6>: Cost 2 vext2 LHS, <5,6,7,0>
- 1592119464U, // <2,3,5,7>: Cost 2 vext2 LHS, <5,7,5,7>
- 1592119545U, // <2,3,5,u>: Cost 2 vext2 LHS, <5,u,5,7>
- 2622730529U, // <2,3,6,0>: Cost 3 vext2 LHS, <6,0,1,2>
- 2557707164U, // <2,3,6,1>: Cost 3 vext1 <1,2,3,6>, <1,2,3,6>
- 1592119802U, // <2,3,6,2>: Cost 2 vext2 LHS, <6,2,7,3>
- 2665861682U, // <2,3,6,3>: Cost 3 vext2 LHS, <6,3,4,5>
- 2622730893U, // <2,3,6,4>: Cost 3 vext2 LHS, <6,4,5,6>
- 2665861810U, // <2,3,6,5>: Cost 3 vext2 LHS, <6,5,0,7>
- 1592120120U, // <2,3,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
- 1592120142U, // <2,3,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
- 1592120223U, // <2,3,6,u>: Cost 2 vext2 LHS, <6,u,0,1>
- 1592120314U, // <2,3,7,0>: Cost 2 vext2 LHS, <7,0,1,2>
- 2659890261U, // <2,3,7,1>: Cost 3 vext2 <7,1,2,3>, <7,1,2,3>
- 2660553894U, // <2,3,7,2>: Cost 3 vext2 <7,2,2,3>, <7,2,2,3>
- 2665862371U, // <2,3,7,3>: Cost 3 vext2 LHS, <7,3,0,1>
- 1592120678U, // <2,3,7,4>: Cost 2 vext2 LHS, <7,4,5,6>
- 2665862534U, // <2,3,7,5>: Cost 3 vext2 LHS, <7,5,0,2>
- 2665862614U, // <2,3,7,6>: Cost 3 vext2 LHS, <7,6,0,1>
- 1592120940U, // <2,3,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
- 1592120962U, // <2,3,7,u>: Cost 2 vext2 LHS, <7,u,1,2>
- 1548990163U, // <2,3,u,0>: Cost 2 vext2 LHS, <u,0,1,2>
- 470603566U, // <2,3,u,1>: Cost 1 vext2 LHS, LHS
- 1548990341U, // <2,3,u,2>: Cost 2 vext2 LHS, <u,2,3,0>
- 1548990396U, // <2,3,u,3>: Cost 2 vext2 LHS, <u,3,0,1>
- 1548990527U, // <2,3,u,4>: Cost 2 vext2 LHS, <u,4,5,6>
- 470603930U, // <2,3,u,5>: Cost 1 vext2 LHS, RHS
- 1548990672U, // <2,3,u,6>: Cost 2 vext2 LHS, <u,6,3,7>
- 1592121600U, // <2,3,u,7>: Cost 2 vext2 LHS, <u,7,0,1>
- 470604133U, // <2,3,u,u>: Cost 1 vext2 LHS, LHS
- 2617425942U, // <2,4,0,0>: Cost 3 vext2 <0,0,2,4>, <0,0,2,4>
- 2618753126U, // <2,4,0,1>: Cost 3 vext2 <0,2,2,4>, LHS
- 2618753208U, // <2,4,0,2>: Cost 3 vext2 <0,2,2,4>, <0,2,2,4>
- 2619416841U, // <2,4,0,3>: Cost 3 vext2 <0,3,2,4>, <0,3,2,4>
- 2587593628U, // <2,4,0,4>: Cost 3 vext1 <6,2,4,0>, <4,0,6,2>
- 2712832914U, // <2,4,0,5>: Cost 3 vext3 <4,6,u,2>, <4,0,5,1>
- 1634962332U, // <2,4,0,6>: Cost 2 vext3 <4,0,6,2>, <4,0,6,2>
- 3799993252U, // <2,4,0,7>: Cost 4 vext3 <7,0,1,2>, <4,0,7,1>
- 1634962332U, // <2,4,0,u>: Cost 2 vext3 <4,0,6,2>, <4,0,6,2>
- 2619417334U, // <2,4,1,0>: Cost 3 vext2 <0,3,2,4>, <1,0,3,2>
- 3692495668U, // <2,4,1,1>: Cost 4 vext2 <0,2,2,4>, <1,1,1,1>
- 2625389466U, // <2,4,1,2>: Cost 3 vext2 <1,3,2,4>, <1,2,3,4>
- 2826125414U, // <2,4,1,3>: Cost 3 vuzpr <1,2,3,4>, LHS
- 3699794995U, // <2,4,1,4>: Cost 4 vext2 <1,4,2,4>, <1,4,2,4>
- 3692496016U, // <2,4,1,5>: Cost 4 vext2 <0,2,2,4>, <1,5,3,7>
- 3763424238U, // <2,4,1,6>: Cost 4 vext3 <0,u,0,2>, <4,1,6,3>
- 3667317942U, // <2,4,1,7>: Cost 4 vext1 <7,2,4,1>, <7,2,4,1>
- 2826125419U, // <2,4,1,u>: Cost 3 vuzpr <1,2,3,4>, LHS
- 2629371336U, // <2,4,2,0>: Cost 3 vext2 <2,0,2,4>, <2,0,2,4>
- 3699131946U, // <2,4,2,1>: Cost 4 vext2 <1,3,2,4>, <2,1,4,3>
- 2630698602U, // <2,4,2,2>: Cost 3 vext2 <2,2,2,4>, <2,2,2,4>
- 2618754766U, // <2,4,2,3>: Cost 3 vext2 <0,2,2,4>, <2,3,4,5>
- 2826126234U, // <2,4,2,4>: Cost 3 vuzpr <1,2,3,4>, <1,2,3,4>
- 2899119414U, // <2,4,2,5>: Cost 3 vzipl <2,2,2,2>, RHS
- 3033337142U, // <2,4,2,6>: Cost 3 vtrnl <2,2,2,2>, RHS
- 3800214597U, // <2,4,2,7>: Cost 4 vext3 <7,0,4,2>, <4,2,7,0>
- 2899119657U, // <2,4,2,u>: Cost 3 vzipl <2,2,2,2>, RHS
- 2635344033U, // <2,4,3,0>: Cost 3 vext2 <3,0,2,4>, <3,0,2,4>
- 4032012325U, // <2,4,3,1>: Cost 4 vzipr LHS, <0,0,4,1>
- 3692497228U, // <2,4,3,2>: Cost 4 vext2 <0,2,2,4>, <3,2,3,4>
- 3692497308U, // <2,4,3,3>: Cost 4 vext2 <0,2,2,4>, <3,3,3,3>
- 3001404624U, // <2,4,3,4>: Cost 3 vzipr LHS, <4,4,4,4>
- 2953627342U, // <2,4,3,5>: Cost 3 vzipr LHS, <2,3,4,5>
- 2953625804U, // <2,4,3,6>: Cost 3 vzipr LHS, <0,2,4,6>
- 3899868160U, // <2,4,3,7>: Cost 4 vuzpr <1,2,3,4>, <1,3,5,7>
- 2953625806U, // <2,4,3,u>: Cost 3 vzipr LHS, <0,2,4,u>
- 2710916266U, // <2,4,4,0>: Cost 3 vext3 <4,4,0,2>, <4,4,0,2>
- 3899869648U, // <2,4,4,1>: Cost 4 vuzpr <1,2,3,4>, <3,4,0,1>
- 3899869658U, // <2,4,4,2>: Cost 4 vuzpr <1,2,3,4>, <3,4,1,2>
- 3899868930U, // <2,4,4,3>: Cost 4 vuzpr <1,2,3,4>, <2,4,1,3>
- 2712833232U, // <2,4,4,4>: Cost 3 vext3 <4,6,u,2>, <4,4,4,4>
- 2618756406U, // <2,4,4,5>: Cost 3 vext2 <0,2,2,4>, RHS
- 2765737270U, // <2,4,4,6>: Cost 3 vuzpl <2,3,4,5>, RHS
- 4168304426U, // <2,4,4,7>: Cost 4 vtrnr <1,2,3,4>, <2,4,5,7>
- 2618756649U, // <2,4,4,u>: Cost 3 vext2 <0,2,2,4>, RHS
- 2551800011U, // <2,4,5,0>: Cost 3 vext1 <0,2,4,5>, <0,2,4,5>
- 2569716470U, // <2,4,5,1>: Cost 3 vext1 <3,2,4,5>, <1,0,3,2>
- 2563745405U, // <2,4,5,2>: Cost 3 vext1 <2,2,4,5>, <2,2,4,5>
- 2569718102U, // <2,4,5,3>: Cost 3 vext1 <3,2,4,5>, <3,2,4,5>
- 2551803190U, // <2,4,5,4>: Cost 3 vext1 <0,2,4,5>, RHS
- 3625545732U, // <2,4,5,5>: Cost 4 vext1 <0,2,4,5>, <5,5,5,5>
- 1611959606U, // <2,4,5,6>: Cost 2 vext3 <0,2,0,2>, RHS
- 2826128694U, // <2,4,5,7>: Cost 3 vuzpr <1,2,3,4>, RHS
- 1611959624U, // <2,4,5,u>: Cost 2 vext3 <0,2,0,2>, RHS
- 1478066278U, // <2,4,6,0>: Cost 2 vext1 <0,2,4,6>, LHS
- 2551808758U, // <2,4,6,1>: Cost 3 vext1 <0,2,4,6>, <1,0,3,2>
- 2551809516U, // <2,4,6,2>: Cost 3 vext1 <0,2,4,6>, <2,0,6,4>
- 2551810198U, // <2,4,6,3>: Cost 3 vext1 <0,2,4,6>, <3,0,1,2>
- 1478069558U, // <2,4,6,4>: Cost 2 vext1 <0,2,4,6>, RHS
- 2901888310U, // <2,4,6,5>: Cost 3 vzipl <2,6,3,7>, RHS
- 2551812920U, // <2,4,6,6>: Cost 3 vext1 <0,2,4,6>, <6,6,6,6>
- 2726251914U, // <2,4,6,7>: Cost 3 vext3 <7,0,1,2>, <4,6,7,1>
- 1478072110U, // <2,4,6,u>: Cost 2 vext1 <0,2,4,6>, LHS
- 2659234821U, // <2,4,7,0>: Cost 3 vext2 <7,0,2,4>, <7,0,2,4>
- 3786722726U, // <2,4,7,1>: Cost 4 vext3 <4,7,1,2>, <4,7,1,2>
- 3734303911U, // <2,4,7,2>: Cost 4 vext2 <7,2,2,4>, <7,2,2,4>
- 3734967544U, // <2,4,7,3>: Cost 4 vext2 <7,3,2,4>, <7,3,2,4>
- 3727005030U, // <2,4,7,4>: Cost 4 vext2 <6,0,2,4>, <7,4,5,6>
- 2726251976U, // <2,4,7,5>: Cost 3 vext3 <7,0,1,2>, <4,7,5,0>
- 2726251986U, // <2,4,7,6>: Cost 3 vext3 <7,0,1,2>, <4,7,6,1>
- 3727005292U, // <2,4,7,7>: Cost 4 vext2 <6,0,2,4>, <7,7,7,7>
- 2659234821U, // <2,4,7,u>: Cost 3 vext2 <7,0,2,4>, <7,0,2,4>
- 1478082662U, // <2,4,u,0>: Cost 2 vext1 <0,2,4,u>, LHS
- 2618758958U, // <2,4,u,1>: Cost 3 vext2 <0,2,2,4>, LHS
- 2551826024U, // <2,4,u,2>: Cost 3 vext1 <0,2,4,u>, <2,2,2,2>
- 2551826582U, // <2,4,u,3>: Cost 3 vext1 <0,2,4,u>, <3,0,1,2>
- 1478085942U, // <2,4,u,4>: Cost 2 vext1 <0,2,4,u>, RHS
- 2953668302U, // <2,4,u,5>: Cost 3 vzipr LHS, <2,3,4,5>
- 1611959849U, // <2,4,u,6>: Cost 2 vext3 <0,2,0,2>, RHS
- 2826128937U, // <2,4,u,7>: Cost 3 vuzpr <1,2,3,4>, RHS
- 1611959867U, // <2,4,u,u>: Cost 2 vext3 <0,2,0,2>, RHS
- 3691839488U, // <2,5,0,0>: Cost 4 vext2 <0,1,2,5>, <0,0,0,0>
- 2618097766U, // <2,5,0,1>: Cost 3 vext2 <0,1,2,5>, LHS
- 2620088484U, // <2,5,0,2>: Cost 3 vext2 <0,4,2,5>, <0,2,0,2>
- 2619425034U, // <2,5,0,3>: Cost 3 vext2 <0,3,2,5>, <0,3,2,5>
- 2620088667U, // <2,5,0,4>: Cost 3 vext2 <0,4,2,5>, <0,4,2,5>
- 2620752300U, // <2,5,0,5>: Cost 3 vext2 <0,5,2,5>, <0,5,2,5>
- 3693830655U, // <2,5,0,6>: Cost 4 vext2 <0,4,2,5>, <0,6,2,7>
- 3094531382U, // <2,5,0,7>: Cost 3 vtrnr <1,2,3,0>, RHS
- 2618098333U, // <2,5,0,u>: Cost 3 vext2 <0,1,2,5>, LHS
- 3691840246U, // <2,5,1,0>: Cost 4 vext2 <0,1,2,5>, <1,0,3,2>
- 3691840308U, // <2,5,1,1>: Cost 4 vext2 <0,1,2,5>, <1,1,1,1>
- 2626061206U, // <2,5,1,2>: Cost 3 vext2 <1,4,2,5>, <1,2,3,0>
- 2618098688U, // <2,5,1,3>: Cost 3 vext2 <0,1,2,5>, <1,3,5,7>
- 2626061364U, // <2,5,1,4>: Cost 3 vext2 <1,4,2,5>, <1,4,2,5>
- 3691840656U, // <2,5,1,5>: Cost 4 vext2 <0,1,2,5>, <1,5,3,7>
- 3789082310U, // <2,5,1,6>: Cost 4 vext3 <5,1,6,2>, <5,1,6,2>
- 2712833744U, // <2,5,1,7>: Cost 3 vext3 <4,6,u,2>, <5,1,7,3>
- 2628715896U, // <2,5,1,u>: Cost 3 vext2 <1,u,2,5>, <1,u,2,5>
- 3693831613U, // <2,5,2,0>: Cost 4 vext2 <0,4,2,5>, <2,0,1,2>
- 4026698642U, // <2,5,2,1>: Cost 4 vzipr <0,0,2,2>, <4,0,5,1>
- 2632033896U, // <2,5,2,2>: Cost 3 vext2 <2,4,2,5>, <2,2,2,2>
- 3691841190U, // <2,5,2,3>: Cost 4 vext2 <0,1,2,5>, <2,3,0,1>
- 2632034061U, // <2,5,2,4>: Cost 3 vext2 <2,4,2,5>, <2,4,2,5>
- 3691841352U, // <2,5,2,5>: Cost 4 vext2 <0,1,2,5>, <2,5,0,1>
- 3691841466U, // <2,5,2,6>: Cost 4 vext2 <0,1,2,5>, <2,6,3,7>
- 3088354614U, // <2,5,2,7>: Cost 3 vtrnr <0,2,0,2>, RHS
- 3088354615U, // <2,5,2,u>: Cost 3 vtrnr <0,2,0,2>, RHS
- 2557829222U, // <2,5,3,0>: Cost 3 vext1 <1,2,5,3>, LHS
- 2557830059U, // <2,5,3,1>: Cost 3 vext1 <1,2,5,3>, <1,2,5,3>
- 2575746766U, // <2,5,3,2>: Cost 3 vext1 <4,2,5,3>, <2,3,4,5>
- 3691841948U, // <2,5,3,3>: Cost 4 vext2 <0,1,2,5>, <3,3,3,3>
- 2619427330U, // <2,5,3,4>: Cost 3 vext2 <0,3,2,5>, <3,4,5,6>
- 2581720847U, // <2,5,3,5>: Cost 3 vext1 <5,2,5,3>, <5,2,5,3>
- 2953628162U, // <2,5,3,6>: Cost 3 vzipr LHS, <3,4,5,6>
- 2953626624U, // <2,5,3,7>: Cost 3 vzipr LHS, <1,3,5,7>
- 2953626625U, // <2,5,3,u>: Cost 3 vzipr LHS, <1,3,5,u>
- 2569781350U, // <2,5,4,0>: Cost 3 vext1 <3,2,5,4>, LHS
- 3631580076U, // <2,5,4,1>: Cost 4 vext1 <1,2,5,4>, <1,2,5,4>
- 2569782990U, // <2,5,4,2>: Cost 3 vext1 <3,2,5,4>, <2,3,4,5>
- 2569783646U, // <2,5,4,3>: Cost 3 vext1 <3,2,5,4>, <3,2,5,4>
- 2569784630U, // <2,5,4,4>: Cost 3 vext1 <3,2,5,4>, RHS
- 2618101046U, // <2,5,4,5>: Cost 3 vext2 <0,1,2,5>, RHS
- 3893905922U, // <2,5,4,6>: Cost 4 vuzpr <0,2,3,5>, <3,4,5,6>
- 3094564150U, // <2,5,4,7>: Cost 3 vtrnr <1,2,3,4>, RHS
- 2618101289U, // <2,5,4,u>: Cost 3 vext2 <0,1,2,5>, RHS
- 2551873638U, // <2,5,5,0>: Cost 3 vext1 <0,2,5,5>, LHS
- 3637560320U, // <2,5,5,1>: Cost 4 vext1 <2,2,5,5>, <1,3,5,7>
- 3637560966U, // <2,5,5,2>: Cost 4 vext1 <2,2,5,5>, <2,2,5,5>
- 3723030343U, // <2,5,5,3>: Cost 4 vext2 <5,3,2,5>, <5,3,2,5>
- 2551876918U, // <2,5,5,4>: Cost 3 vext1 <0,2,5,5>, RHS
- 2712834052U, // <2,5,5,5>: Cost 3 vext3 <4,6,u,2>, <5,5,5,5>
- 4028713474U, // <2,5,5,6>: Cost 4 vzipr <0,3,2,5>, <3,4,5,6>
- 2712834072U, // <2,5,5,7>: Cost 3 vext3 <4,6,u,2>, <5,5,7,7>
- 2712834081U, // <2,5,5,u>: Cost 3 vext3 <4,6,u,2>, <5,5,u,7>
- 2575769702U, // <2,5,6,0>: Cost 3 vext1 <4,2,5,6>, LHS
- 3631596462U, // <2,5,6,1>: Cost 4 vext1 <1,2,5,6>, <1,2,5,6>
- 2655924730U, // <2,5,6,2>: Cost 3 vext2 <6,4,2,5>, <6,2,7,3>
- 3643541856U, // <2,5,6,3>: Cost 4 vext1 <3,2,5,6>, <3,2,5,6>
- 2655924849U, // <2,5,6,4>: Cost 3 vext2 <6,4,2,5>, <6,4,2,5>
- 3787755607U, // <2,5,6,5>: Cost 4 vext3 <4,u,6,2>, <5,6,5,7>
- 4029385218U, // <2,5,6,6>: Cost 4 vzipr <0,4,2,6>, <3,4,5,6>
- 3088682294U, // <2,5,6,7>: Cost 3 vtrnr <0,2,4,6>, RHS
- 3088682295U, // <2,5,6,u>: Cost 3 vtrnr <0,2,4,6>, RHS
- 2563833958U, // <2,5,7,0>: Cost 3 vext1 <2,2,5,7>, LHS
- 2551890678U, // <2,5,7,1>: Cost 3 vext1 <0,2,5,7>, <1,0,3,2>
- 2563835528U, // <2,5,7,2>: Cost 3 vext1 <2,2,5,7>, <2,2,5,7>
- 3637577878U, // <2,5,7,3>: Cost 4 vext1 <2,2,5,7>, <3,0,1,2>
- 2563837238U, // <2,5,7,4>: Cost 3 vext1 <2,2,5,7>, RHS
- 2712834216U, // <2,5,7,5>: Cost 3 vext3 <4,6,u,2>, <5,7,5,7>
- 2712834220U, // <2,5,7,6>: Cost 3 vext3 <4,6,u,2>, <5,7,6,2>
- 4174449974U, // <2,5,7,7>: Cost 4 vtrnr <2,2,5,7>, RHS
- 2563839790U, // <2,5,7,u>: Cost 3 vext1 <2,2,5,7>, LHS
- 2563842150U, // <2,5,u,0>: Cost 3 vext1 <2,2,5,u>, LHS
- 2618103598U, // <2,5,u,1>: Cost 3 vext2 <0,1,2,5>, LHS
- 2563843721U, // <2,5,u,2>: Cost 3 vext1 <2,2,5,u>, <2,2,5,u>
- 2569816418U, // <2,5,u,3>: Cost 3 vext1 <3,2,5,u>, <3,2,5,u>
- 2622748735U, // <2,5,u,4>: Cost 3 vext2 <0,u,2,5>, <u,4,5,6>
- 2618103962U, // <2,5,u,5>: Cost 3 vext2 <0,1,2,5>, RHS
- 2953669122U, // <2,5,u,6>: Cost 3 vzipr LHS, <3,4,5,6>
- 2953667584U, // <2,5,u,7>: Cost 3 vzipr LHS, <1,3,5,7>
- 2618104165U, // <2,5,u,u>: Cost 3 vext2 <0,1,2,5>, LHS
- 2620096512U, // <2,6,0,0>: Cost 3 vext2 <0,4,2,6>, <0,0,0,0>
- 1546354790U, // <2,6,0,1>: Cost 2 vext2 <0,4,2,6>, LHS
- 2620096676U, // <2,6,0,2>: Cost 3 vext2 <0,4,2,6>, <0,2,0,2>
- 3693838588U, // <2,6,0,3>: Cost 4 vext2 <0,4,2,6>, <0,3,1,0>
- 1546355036U, // <2,6,0,4>: Cost 2 vext2 <0,4,2,6>, <0,4,2,6>
- 3694502317U, // <2,6,0,5>: Cost 4 vext2 <0,5,2,6>, <0,5,2,6>
- 2551911246U, // <2,6,0,6>: Cost 3 vext1 <0,2,6,0>, <6,7,0,1>
- 2720723287U, // <2,6,0,7>: Cost 3 vext3 <6,0,7,2>, <6,0,7,2>
- 1546355357U, // <2,6,0,u>: Cost 2 vext2 <0,4,2,6>, LHS
- 2620097270U, // <2,6,1,0>: Cost 3 vext2 <0,4,2,6>, <1,0,3,2>
- 2620097332U, // <2,6,1,1>: Cost 3 vext2 <0,4,2,6>, <1,1,1,1>
- 2620097430U, // <2,6,1,2>: Cost 3 vext2 <0,4,2,6>, <1,2,3,0>
- 2820243558U, // <2,6,1,3>: Cost 3 vuzpr <0,2,4,6>, LHS
- 2620097598U, // <2,6,1,4>: Cost 3 vext2 <0,4,2,6>, <1,4,3,6>
- 2620097680U, // <2,6,1,5>: Cost 3 vext2 <0,4,2,6>, <1,5,3,7>
- 3693839585U, // <2,6,1,6>: Cost 4 vext2 <0,4,2,6>, <1,6,3,7>
- 2721386920U, // <2,6,1,7>: Cost 3 vext3 <6,1,7,2>, <6,1,7,2>
- 2820243563U, // <2,6,1,u>: Cost 3 vuzpr <0,2,4,6>, LHS
- 2714014137U, // <2,6,2,0>: Cost 3 vext3 <4,u,6,2>, <6,2,0,1>
- 2712834500U, // <2,6,2,1>: Cost 3 vext3 <4,6,u,2>, <6,2,1,3>
- 2620098152U, // <2,6,2,2>: Cost 3 vext2 <0,4,2,6>, <2,2,2,2>
- 2620098214U, // <2,6,2,3>: Cost 3 vext2 <0,4,2,6>, <2,3,0,1>
- 2632042254U, // <2,6,2,4>: Cost 3 vext2 <2,4,2,6>, <2,4,2,6>
- 2712834540U, // <2,6,2,5>: Cost 3 vext3 <4,6,u,2>, <6,2,5,7>
- 2820243660U, // <2,6,2,6>: Cost 3 vuzpr <0,2,4,6>, <0,2,4,6>
- 2958265654U, // <2,6,2,7>: Cost 3 vzipr <0,u,2,2>, RHS
- 2620098619U, // <2,6,2,u>: Cost 3 vext2 <0,4,2,6>, <2,u,0,1>
- 2620098710U, // <2,6,3,0>: Cost 3 vext2 <0,4,2,6>, <3,0,1,2>
- 3893986982U, // <2,6,3,1>: Cost 4 vuzpr <0,2,4,6>, <2,3,0,1>
- 2569848762U, // <2,6,3,2>: Cost 3 vext1 <3,2,6,3>, <2,6,3,7>
- 2620098972U, // <2,6,3,3>: Cost 3 vext2 <0,4,2,6>, <3,3,3,3>
- 2620099074U, // <2,6,3,4>: Cost 3 vext2 <0,4,2,6>, <3,4,5,6>
- 3893987022U, // <2,6,3,5>: Cost 4 vuzpr <0,2,4,6>, <2,3,4,5>
- 3001404644U, // <2,6,3,6>: Cost 3 vzipr LHS, <4,4,6,6>
- 1879887158U, // <2,6,3,7>: Cost 2 vzipr LHS, RHS
- 1879887159U, // <2,6,3,u>: Cost 2 vzipr LHS, RHS
- 2620099484U, // <2,6,4,0>: Cost 3 vext2 <0,4,2,6>, <4,0,6,2>
- 2620099566U, // <2,6,4,1>: Cost 3 vext2 <0,4,2,6>, <4,1,6,3>
- 2620099644U, // <2,6,4,2>: Cost 3 vext2 <0,4,2,6>, <4,2,6,0>
- 3643599207U, // <2,6,4,3>: Cost 4 vext1 <3,2,6,4>, <3,2,6,4>
- 2575830080U, // <2,6,4,4>: Cost 3 vext1 <4,2,6,4>, <4,2,6,4>
- 1546358070U, // <2,6,4,5>: Cost 2 vext2 <0,4,2,6>, RHS
- 2667875700U, // <2,6,4,6>: Cost 3 vext2 <u,4,2,6>, <4,6,4,6>
- 4028042550U, // <2,6,4,7>: Cost 4 vzipr <0,2,2,4>, RHS
- 1546358313U, // <2,6,4,u>: Cost 2 vext2 <0,4,2,6>, RHS
- 3693841992U, // <2,6,5,0>: Cost 4 vext2 <0,4,2,6>, <5,0,1,2>
- 2667876048U, // <2,6,5,1>: Cost 3 vext2 <u,4,2,6>, <5,1,7,3>
- 2712834756U, // <2,6,5,2>: Cost 3 vext3 <4,6,u,2>, <6,5,2,7>
- 3643607400U, // <2,6,5,3>: Cost 4 vext1 <3,2,6,5>, <3,2,6,5>
- 2252091873U, // <2,6,5,4>: Cost 3 vrev <6,2,4,5>
- 2667876356U, // <2,6,5,5>: Cost 3 vext2 <u,4,2,6>, <5,5,5,5>
- 2667876450U, // <2,6,5,6>: Cost 3 vext2 <u,4,2,6>, <5,6,7,0>
- 2820246838U, // <2,6,5,7>: Cost 3 vuzpr <0,2,4,6>, RHS
- 2820246839U, // <2,6,5,u>: Cost 3 vuzpr <0,2,4,6>, RHS
- 2563899494U, // <2,6,6,0>: Cost 3 vext1 <2,2,6,6>, LHS
- 3893988683U, // <2,6,6,1>: Cost 4 vuzpr <0,2,4,6>, <4,6,0,1>
- 2563901072U, // <2,6,6,2>: Cost 3 vext1 <2,2,6,6>, <2,2,6,6>
- 3893987236U, // <2,6,6,3>: Cost 4 vuzpr <0,2,4,6>, <2,6,1,3>
- 2563902774U, // <2,6,6,4>: Cost 3 vext1 <2,2,6,6>, RHS
- 3893988723U, // <2,6,6,5>: Cost 4 vuzpr <0,2,4,6>, <4,6,4,5>
- 2712834872U, // <2,6,6,6>: Cost 3 vext3 <4,6,u,2>, <6,6,6,6>
- 2955644214U, // <2,6,6,7>: Cost 3 vzipr <0,4,2,6>, RHS
- 2955644215U, // <2,6,6,u>: Cost 3 vzipr <0,4,2,6>, RHS
- 2712834894U, // <2,6,7,0>: Cost 3 vext3 <4,6,u,2>, <6,7,0,1>
- 2724926296U, // <2,6,7,1>: Cost 3 vext3 <6,7,1,2>, <6,7,1,2>
- 2725000033U, // <2,6,7,2>: Cost 3 vext3 <6,7,2,2>, <6,7,2,2>
- 2702365544U, // <2,6,7,3>: Cost 3 vext3 <3,0,1,2>, <6,7,3,0>
- 2712834934U, // <2,6,7,4>: Cost 3 vext3 <4,6,u,2>, <6,7,4,5>
- 3776107393U, // <2,6,7,5>: Cost 4 vext3 <3,0,1,2>, <6,7,5,7>
- 2725294981U, // <2,6,7,6>: Cost 3 vext3 <6,7,6,2>, <6,7,6,2>
- 2726253452U, // <2,6,7,7>: Cost 3 vext3 <7,0,1,2>, <6,7,7,0>
- 2712834966U, // <2,6,7,u>: Cost 3 vext3 <4,6,u,2>, <6,7,u,1>
- 2620102355U, // <2,6,u,0>: Cost 3 vext2 <0,4,2,6>, <u,0,1,2>
- 1546360622U, // <2,6,u,1>: Cost 2 vext2 <0,4,2,6>, LHS
- 2620102536U, // <2,6,u,2>: Cost 3 vext2 <0,4,2,6>, <u,2,3,3>
- 2820244125U, // <2,6,u,3>: Cost 3 vuzpr <0,2,4,6>, LHS
- 1594136612U, // <2,6,u,4>: Cost 2 vext2 <u,4,2,6>, <u,4,2,6>
- 1546360986U, // <2,6,u,5>: Cost 2 vext2 <0,4,2,6>, RHS
- 2620102864U, // <2,6,u,6>: Cost 3 vext2 <0,4,2,6>, <u,6,3,7>
- 1879928118U, // <2,6,u,7>: Cost 2 vzipr LHS, RHS
- 1879928119U, // <2,6,u,u>: Cost 2 vzipr LHS, RHS
- 2726179825U, // <2,7,0,0>: Cost 3 vext3 <7,0,0,2>, <7,0,0,2>
- 1652511738U, // <2,7,0,1>: Cost 2 vext3 <7,0,1,2>, <7,0,1,2>
- 2621431972U, // <2,7,0,2>: Cost 3 vext2 <0,6,2,7>, <0,2,0,2>
- 2257949868U, // <2,7,0,3>: Cost 3 vrev <7,2,3,0>
- 2726474773U, // <2,7,0,4>: Cost 3 vext3 <7,0,4,2>, <7,0,4,2>
- 2620768686U, // <2,7,0,5>: Cost 3 vext2 <0,5,2,7>, <0,5,2,7>
- 2621432319U, // <2,7,0,6>: Cost 3 vext2 <0,6,2,7>, <0,6,2,7>
- 2599760953U, // <2,7,0,7>: Cost 3 vext1 <u,2,7,0>, <7,0,u,2>
- 1653027897U, // <2,7,0,u>: Cost 2 vext3 <7,0,u,2>, <7,0,u,2>
- 2639348470U, // <2,7,1,0>: Cost 3 vext2 <3,6,2,7>, <1,0,3,2>
- 3695174452U, // <2,7,1,1>: Cost 4 vext2 <0,6,2,7>, <1,1,1,1>
- 3695174550U, // <2,7,1,2>: Cost 4 vext2 <0,6,2,7>, <1,2,3,0>
- 3694511104U, // <2,7,1,3>: Cost 4 vext2 <0,5,2,7>, <1,3,5,7>
- 3713090594U, // <2,7,1,4>: Cost 4 vext2 <3,6,2,7>, <1,4,0,5>
- 3693184144U, // <2,7,1,5>: Cost 4 vext2 <0,3,2,7>, <1,5,3,7>
- 2627405016U, // <2,7,1,6>: Cost 3 vext2 <1,6,2,7>, <1,6,2,7>
- 3799995519U, // <2,7,1,7>: Cost 4 vext3 <7,0,1,2>, <7,1,7,0>
- 2639348470U, // <2,7,1,u>: Cost 3 vext2 <3,6,2,7>, <1,0,3,2>
- 3695175101U, // <2,7,2,0>: Cost 4 vext2 <0,6,2,7>, <2,0,1,2>
- 3643655168U, // <2,7,2,1>: Cost 4 vext1 <3,2,7,2>, <1,3,5,7>
- 2257892517U, // <2,7,2,2>: Cost 3 vrev <7,2,2,2>
- 3695175334U, // <2,7,2,3>: Cost 4 vext2 <0,6,2,7>, <2,3,0,1>
- 3695175465U, // <2,7,2,4>: Cost 4 vext2 <0,6,2,7>, <2,4,5,6>
- 2632714080U, // <2,7,2,5>: Cost 3 vext2 <2,5,2,7>, <2,5,2,7>
- 2633377713U, // <2,7,2,6>: Cost 3 vext2 <2,6,2,7>, <2,6,2,7>
- 3695175658U, // <2,7,2,7>: Cost 4 vext2 <0,6,2,7>, <2,7,0,1>
- 2634704979U, // <2,7,2,u>: Cost 3 vext2 <2,u,2,7>, <2,u,2,7>
- 1514094694U, // <2,7,3,0>: Cost 2 vext1 <6,2,7,3>, LHS
- 2569921680U, // <2,7,3,1>: Cost 3 vext1 <3,2,7,3>, <1,5,3,7>
- 2587838056U, // <2,7,3,2>: Cost 3 vext1 <6,2,7,3>, <2,2,2,2>
- 2569922927U, // <2,7,3,3>: Cost 3 vext1 <3,2,7,3>, <3,2,7,3>
- 1514097974U, // <2,7,3,4>: Cost 2 vext1 <6,2,7,3>, RHS
- 2581868321U, // <2,7,3,5>: Cost 3 vext1 <5,2,7,3>, <5,2,7,3>
- 1514099194U, // <2,7,3,6>: Cost 2 vext1 <6,2,7,3>, <6,2,7,3>
- 2587841530U, // <2,7,3,7>: Cost 3 vext1 <6,2,7,3>, <7,0,1,2>
- 1514100526U, // <2,7,3,u>: Cost 2 vext1 <6,2,7,3>, LHS
- 2708706617U, // <2,7,4,0>: Cost 3 vext3 <4,0,6,2>, <7,4,0,6>
- 3649643418U, // <2,7,4,1>: Cost 4 vext1 <4,2,7,4>, <1,2,3,4>
- 3649644330U, // <2,7,4,2>: Cost 4 vext1 <4,2,7,4>, <2,4,5,7>
- 2257982640U, // <2,7,4,3>: Cost 3 vrev <7,2,3,4>
- 3649645641U, // <2,7,4,4>: Cost 4 vext1 <4,2,7,4>, <4,2,7,4>
- 2621435190U, // <2,7,4,5>: Cost 3 vext2 <0,6,2,7>, RHS
- 2712835441U, // <2,7,4,6>: Cost 3 vext3 <4,6,u,2>, <7,4,6,u>
- 3799995762U, // <2,7,4,7>: Cost 4 vext3 <7,0,1,2>, <7,4,7,0>
- 2621435433U, // <2,7,4,u>: Cost 3 vext2 <0,6,2,7>, RHS
- 2729497990U, // <2,7,5,0>: Cost 3 vext3 <7,5,0,2>, <7,5,0,2>
- 3643679744U, // <2,7,5,1>: Cost 4 vext1 <3,2,7,5>, <1,3,5,7>
- 3637708424U, // <2,7,5,2>: Cost 4 vext1 <2,2,7,5>, <2,2,5,7>
- 3643681137U, // <2,7,5,3>: Cost 4 vext1 <3,2,7,5>, <3,2,7,5>
- 2599800118U, // <2,7,5,4>: Cost 3 vext1 <u,2,7,5>, RHS
- 3786577334U, // <2,7,5,5>: Cost 4 vext3 <4,6,u,2>, <7,5,5,5>
- 3786577345U, // <2,7,5,6>: Cost 4 vext3 <4,6,u,2>, <7,5,6,7>
- 2599802214U, // <2,7,5,7>: Cost 3 vext1 <u,2,7,5>, <7,4,5,6>
- 2599802670U, // <2,7,5,u>: Cost 3 vext1 <u,2,7,5>, LHS
- 2581889126U, // <2,7,6,0>: Cost 3 vext1 <5,2,7,6>, LHS
- 3643687936U, // <2,7,6,1>: Cost 4 vext1 <3,2,7,6>, <1,3,5,7>
- 2663240186U, // <2,7,6,2>: Cost 3 vext2 <7,6,2,7>, <6,2,7,3>
- 3643689330U, // <2,7,6,3>: Cost 4 vext1 <3,2,7,6>, <3,2,7,6>
- 2581892406U, // <2,7,6,4>: Cost 3 vext1 <5,2,7,6>, RHS
- 2581892900U, // <2,7,6,5>: Cost 3 vext1 <5,2,7,6>, <5,2,7,6>
- 2587865597U, // <2,7,6,6>: Cost 3 vext1 <6,2,7,6>, <6,2,7,6>
- 3786577428U, // <2,7,6,7>: Cost 4 vext3 <4,6,u,2>, <7,6,7,0>
- 2581894958U, // <2,7,6,u>: Cost 3 vext1 <5,2,7,6>, LHS
- 2726254119U, // <2,7,7,0>: Cost 3 vext3 <7,0,1,2>, <7,7,0,1>
- 3804640817U, // <2,7,7,1>: Cost 4 vext3 <7,7,1,2>, <7,7,1,2>
- 3637724826U, // <2,7,7,2>: Cost 4 vext1 <2,2,7,7>, <2,2,7,7>
- 3734992123U, // <2,7,7,3>: Cost 4 vext2 <7,3,2,7>, <7,3,2,7>
- 2552040758U, // <2,7,7,4>: Cost 3 vext1 <0,2,7,7>, RHS
- 3799995992U, // <2,7,7,5>: Cost 4 vext3 <7,0,1,2>, <7,7,5,5>
- 2663241198U, // <2,7,7,6>: Cost 3 vext2 <7,6,2,7>, <7,6,2,7>
- 2712835692U, // <2,7,7,7>: Cost 3 vext3 <4,6,u,2>, <7,7,7,7>
- 2731562607U, // <2,7,7,u>: Cost 3 vext3 <7,u,1,2>, <7,7,u,1>
- 1514135654U, // <2,7,u,0>: Cost 2 vext1 <6,2,7,u>, LHS
- 1657820802U, // <2,7,u,1>: Cost 2 vext3 <7,u,1,2>, <7,u,1,2>
- 2587879016U, // <2,7,u,2>: Cost 3 vext1 <6,2,7,u>, <2,2,2,2>
- 2569963892U, // <2,7,u,3>: Cost 3 vext1 <3,2,7,u>, <3,2,7,u>
- 1514138934U, // <2,7,u,4>: Cost 2 vext1 <6,2,7,u>, RHS
- 2621438106U, // <2,7,u,5>: Cost 3 vext2 <0,6,2,7>, RHS
- 1514140159U, // <2,7,u,6>: Cost 2 vext1 <6,2,7,u>, <6,2,7,u>
- 2587882490U, // <2,7,u,7>: Cost 3 vext1 <6,2,7,u>, <7,0,1,2>
- 1514141486U, // <2,7,u,u>: Cost 2 vext1 <6,2,7,u>, LHS
- 1544380416U, // <2,u,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
- 470638699U, // <2,u,0,1>: Cost 1 vext2 LHS, LHS
- 1544380580U, // <2,u,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
- 1658631909U, // <2,u,0,3>: Cost 2 vext3 <u,0,3,2>, <u,0,3,2>
- 1544380754U, // <2,u,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
- 2665898414U, // <2,u,0,5>: Cost 3 vext2 LHS, <0,5,2,7>
- 1658853120U, // <2,u,0,6>: Cost 2 vext3 <u,0,6,2>, <u,0,6,2>
- 3094531625U, // <2,u,0,7>: Cost 3 vtrnr <1,2,3,0>, RHS
- 470639261U, // <2,u,0,u>: Cost 1 vext2 LHS, LHS
- 1544381174U, // <2,u,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
- 1544381236U, // <2,u,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
- 1544381334U, // <2,u,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
- 1544381400U, // <2,u,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
- 2618123325U, // <2,u,1,4>: Cost 3 vext2 LHS, <1,4,3,5>
- 1544381584U, // <2,u,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
- 2618123489U, // <2,u,1,6>: Cost 3 vext2 LHS, <1,6,3,7>
- 2726254427U, // <2,u,1,7>: Cost 3 vext3 <7,0,1,2>, <u,1,7,3>
- 1544381823U, // <2,u,1,u>: Cost 2 vext2 LHS, <1,u,3,3>
- 1478328422U, // <2,u,2,0>: Cost 2 vext1 <0,2,u,2>, LHS
- 2618123807U, // <2,u,2,1>: Cost 3 vext2 LHS, <2,1,3,1>
- 269271142U, // <2,u,2,2>: Cost 1 vdup2 LHS
- 1544382118U, // <2,u,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
- 1478331702U, // <2,u,2,4>: Cost 2 vext1 <0,2,u,2>, RHS
- 2618124136U, // <2,u,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
- 1544382394U, // <2,u,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
- 3088354857U, // <2,u,2,7>: Cost 3 vtrnr <0,2,0,2>, RHS
- 269271142U, // <2,u,2,u>: Cost 1 vdup2 LHS
- 1544382614U, // <2,u,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
- 2953627374U, // <2,u,3,1>: Cost 3 vzipr LHS, <2,3,u,1>
- 1490282143U, // <2,u,3,2>: Cost 2 vext1 <2,2,u,3>, <2,2,u,3>
- 1879883932U, // <2,u,3,3>: Cost 2 vzipr LHS, LHS
- 1544382978U, // <2,u,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
- 2953627378U, // <2,u,3,5>: Cost 3 vzipr LHS, <2,3,u,5>
- 1514172931U, // <2,u,3,6>: Cost 2 vext1 <6,2,u,3>, <6,2,u,3>
- 1879887176U, // <2,u,3,7>: Cost 2 vzipr LHS, RHS
- 1879883937U, // <2,u,3,u>: Cost 2 vzipr LHS, LHS
- 1484316774U, // <2,u,4,0>: Cost 2 vext1 <1,2,u,4>, LHS
- 1484317639U, // <2,u,4,1>: Cost 2 vext1 <1,2,u,4>, <1,2,u,4>
- 2552088270U, // <2,u,4,2>: Cost 3 vext1 <0,2,u,4>, <2,3,4,5>
- 1190213513U, // <2,u,4,3>: Cost 2 vrev <u,2,3,4>
- 1484320054U, // <2,u,4,4>: Cost 2 vext1 <1,2,u,4>, RHS
- 470641974U, // <2,u,4,5>: Cost 1 vext2 LHS, RHS
- 1592159604U, // <2,u,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
- 3094564393U, // <2,u,4,7>: Cost 3 vtrnr <1,2,3,4>, RHS
- 470642217U, // <2,u,4,u>: Cost 1 vext2 LHS, RHS
- 2552094959U, // <2,u,5,0>: Cost 3 vext1 <0,2,u,5>, <0,2,u,5>
- 1592159952U, // <2,u,5,1>: Cost 2 vext2 LHS, <5,1,7,3>
- 2564040353U, // <2,u,5,2>: Cost 3 vext1 <2,2,u,5>, <2,2,u,5>
- 2690275455U, // <2,u,5,3>: Cost 3 vext3 <0,u,u,2>, <u,5,3,7>
- 1592160198U, // <2,u,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
- 1592160260U, // <2,u,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
- 1611962522U, // <2,u,5,6>: Cost 2 vext3 <0,2,0,2>, RHS
- 1592160424U, // <2,u,5,7>: Cost 2 vext2 LHS, <5,7,5,7>
- 1611962540U, // <2,u,5,u>: Cost 2 vext3 <0,2,0,2>, RHS
- 1478361190U, // <2,u,6,0>: Cost 2 vext1 <0,2,u,6>, LHS
- 2552103670U, // <2,u,6,1>: Cost 3 vext1 <0,2,u,6>, <1,0,3,2>
- 1592160762U, // <2,u,6,2>: Cost 2 vext2 LHS, <6,2,7,3>
- 2685704400U, // <2,u,6,3>: Cost 3 vext3 <0,2,0,2>, <u,6,3,7>
- 1478364470U, // <2,u,6,4>: Cost 2 vext1 <0,2,u,6>, RHS
- 2901891226U, // <2,u,6,5>: Cost 3 vzipl <2,6,3,7>, RHS
- 1592161080U, // <2,u,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
- 1592161102U, // <2,u,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
- 1478367022U, // <2,u,6,u>: Cost 2 vext1 <0,2,u,6>, LHS
- 1592161274U, // <2,u,7,0>: Cost 2 vext2 LHS, <7,0,1,2>
- 2659931226U, // <2,u,7,1>: Cost 3 vext2 <7,1,2,u>, <7,1,2,u>
- 2564056739U, // <2,u,7,2>: Cost 3 vext1 <2,2,u,7>, <2,2,u,7>
- 2665903331U, // <2,u,7,3>: Cost 3 vext2 LHS, <7,3,0,1>
- 1592161638U, // <2,u,7,4>: Cost 2 vext2 LHS, <7,4,5,6>
- 2665903494U, // <2,u,7,5>: Cost 3 vext2 LHS, <7,5,0,2>
- 2587947527U, // <2,u,7,6>: Cost 3 vext1 <6,2,u,7>, <6,2,u,7>
- 1592161900U, // <2,u,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
- 1592161922U, // <2,u,7,u>: Cost 2 vext2 LHS, <7,u,1,2>
- 1478377574U, // <2,u,u,0>: Cost 2 vext1 <0,2,u,u>, LHS
- 470644526U, // <2,u,u,1>: Cost 1 vext2 LHS, LHS
- 269271142U, // <2,u,u,2>: Cost 1 vdup2 LHS
- 1879924892U, // <2,u,u,3>: Cost 2 vzipr LHS, LHS
- 1478380854U, // <2,u,u,4>: Cost 2 vext1 <0,2,u,u>, RHS
- 470644890U, // <2,u,u,5>: Cost 1 vext2 LHS, RHS
- 1611962765U, // <2,u,u,6>: Cost 2 vext3 <0,2,0,2>, RHS
- 1879928136U, // <2,u,u,7>: Cost 2 vzipr LHS, RHS
- 470645093U, // <2,u,u,u>: Cost 1 vext2 LHS, LHS
- 1611448320U, // <3,0,0,0>: Cost 2 vext3 LHS, <0,0,0,0>
- 1611890698U, // <3,0,0,1>: Cost 2 vext3 LHS, <0,0,1,1>
- 1611890708U, // <3,0,0,2>: Cost 2 vext3 LHS, <0,0,2,2>
- 3763576860U, // <3,0,0,3>: Cost 4 vext3 LHS, <0,0,3,1>
- 2689835045U, // <3,0,0,4>: Cost 3 vext3 LHS, <0,0,4,1>
- 3698508206U, // <3,0,0,5>: Cost 4 vext2 <1,2,3,0>, <0,5,2,7>
- 3763576887U, // <3,0,0,6>: Cost 4 vext3 LHS, <0,0,6,1>
- 3667678434U, // <3,0,0,7>: Cost 4 vext1 <7,3,0,0>, <7,3,0,0>
- 1616093258U, // <3,0,0,u>: Cost 2 vext3 LHS, <0,0,u,2>
- 1490337894U, // <3,0,1,0>: Cost 2 vext1 <2,3,0,1>, LHS
- 2685632602U, // <3,0,1,1>: Cost 3 vext3 LHS, <0,1,1,0>
- 537706598U, // <3,0,1,2>: Cost 1 vext3 LHS, LHS
- 2624766936U, // <3,0,1,3>: Cost 3 vext2 <1,2,3,0>, <1,3,1,3>
- 1490341174U, // <3,0,1,4>: Cost 2 vext1 <2,3,0,1>, RHS
- 2624767120U, // <3,0,1,5>: Cost 3 vext2 <1,2,3,0>, <1,5,3,7>
- 2732966030U, // <3,0,1,6>: Cost 3 vext3 LHS, <0,1,6,7>
- 2593944803U, // <3,0,1,7>: Cost 3 vext1 <7,3,0,1>, <7,3,0,1>
- 537706652U, // <3,0,1,u>: Cost 1 vext3 LHS, LHS
- 1611890852U, // <3,0,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
- 2685632684U, // <3,0,2,1>: Cost 3 vext3 LHS, <0,2,1,1>
- 2685632692U, // <3,0,2,2>: Cost 3 vext3 LHS, <0,2,2,0>
- 2685632702U, // <3,0,2,3>: Cost 3 vext3 LHS, <0,2,3,1>
- 1611890892U, // <3,0,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
- 2732966102U, // <3,0,2,5>: Cost 3 vext3 LHS, <0,2,5,7>
- 2624767930U, // <3,0,2,6>: Cost 3 vext2 <1,2,3,0>, <2,6,3,7>
- 2685632744U, // <3,0,2,7>: Cost 3 vext3 LHS, <0,2,7,7>
- 1611890924U, // <3,0,2,u>: Cost 2 vext3 LHS, <0,2,u,2>
- 2624768150U, // <3,0,3,0>: Cost 3 vext2 <1,2,3,0>, <3,0,1,2>
- 2685632764U, // <3,0,3,1>: Cost 3 vext3 LHS, <0,3,1,0>
- 2685632774U, // <3,0,3,2>: Cost 3 vext3 LHS, <0,3,2,1>
- 2624768412U, // <3,0,3,3>: Cost 3 vext2 <1,2,3,0>, <3,3,3,3>
- 2624768514U, // <3,0,3,4>: Cost 3 vext2 <1,2,3,0>, <3,4,5,6>
- 3702491714U, // <3,0,3,5>: Cost 4 vext2 <1,u,3,0>, <3,5,3,7>
- 2624768632U, // <3,0,3,6>: Cost 3 vext2 <1,2,3,0>, <3,6,0,7>
- 3702491843U, // <3,0,3,7>: Cost 4 vext2 <1,u,3,0>, <3,7,0,1>
- 2686959934U, // <3,0,3,u>: Cost 3 vext3 <0,3,u,3>, <0,3,u,3>
- 2689835336U, // <3,0,4,0>: Cost 3 vext3 LHS, <0,4,0,4>
- 1611891026U, // <3,0,4,1>: Cost 2 vext3 LHS, <0,4,1,5>
- 1611891036U, // <3,0,4,2>: Cost 2 vext3 LHS, <0,4,2,6>
- 3763577184U, // <3,0,4,3>: Cost 4 vext3 LHS, <0,4,3,1>
- 2689835374U, // <3,0,4,4>: Cost 3 vext3 LHS, <0,4,4,6>
- 1551027510U, // <3,0,4,5>: Cost 2 vext2 <1,2,3,0>, RHS
- 2666573172U, // <3,0,4,6>: Cost 3 vext2 <u,2,3,0>, <4,6,4,6>
- 3667711206U, // <3,0,4,7>: Cost 4 vext1 <7,3,0,4>, <7,3,0,4>
- 1616093586U, // <3,0,4,u>: Cost 2 vext3 LHS, <0,4,u,6>
- 2685190556U, // <3,0,5,0>: Cost 3 vext3 LHS, <0,5,0,7>
- 2666573520U, // <3,0,5,1>: Cost 3 vext2 <u,2,3,0>, <5,1,7,3>
- 3040886886U, // <3,0,5,2>: Cost 3 vtrnl <3,4,5,6>, LHS
- 3625912834U, // <3,0,5,3>: Cost 4 vext1 <0,3,0,5>, <3,4,5,6>
- 2666573766U, // <3,0,5,4>: Cost 3 vext2 <u,2,3,0>, <5,4,7,6>
- 2666573828U, // <3,0,5,5>: Cost 3 vext2 <u,2,3,0>, <5,5,5,5>
- 2732966354U, // <3,0,5,6>: Cost 3 vext3 LHS, <0,5,6,7>
- 2666573992U, // <3,0,5,7>: Cost 3 vext2 <u,2,3,0>, <5,7,5,7>
- 3040886940U, // <3,0,5,u>: Cost 3 vtrnl <3,4,5,6>, LHS
- 2685190637U, // <3,0,6,0>: Cost 3 vext3 LHS, <0,6,0,7>
- 2732966390U, // <3,0,6,1>: Cost 3 vext3 LHS, <0,6,1,7>
- 2689835519U, // <3,0,6,2>: Cost 3 vext3 LHS, <0,6,2,7>
- 3667724438U, // <3,0,6,3>: Cost 4 vext1 <7,3,0,6>, <3,0,1,2>
- 3763577355U, // <3,0,6,4>: Cost 4 vext3 LHS, <0,6,4,1>
- 3806708243U, // <3,0,6,5>: Cost 4 vext3 LHS, <0,6,5,0>
- 2666574648U, // <3,0,6,6>: Cost 3 vext2 <u,2,3,0>, <6,6,6,6>
- 2657948520U, // <3,0,6,7>: Cost 3 vext2 <6,7,3,0>, <6,7,3,0>
- 2689835573U, // <3,0,6,u>: Cost 3 vext3 LHS, <0,6,u,7>
- 2666574842U, // <3,0,7,0>: Cost 3 vext2 <u,2,3,0>, <7,0,1,2>
- 2685633095U, // <3,0,7,1>: Cost 3 vext3 LHS, <0,7,1,7>
- 2660603052U, // <3,0,7,2>: Cost 3 vext2 <7,2,3,0>, <7,2,3,0>
- 3643844997U, // <3,0,7,3>: Cost 4 vext1 <3,3,0,7>, <3,3,0,7>
- 2666575206U, // <3,0,7,4>: Cost 3 vext2 <u,2,3,0>, <7,4,5,6>
- 3655790391U, // <3,0,7,5>: Cost 4 vext1 <5,3,0,7>, <5,3,0,7>
- 3731690968U, // <3,0,7,6>: Cost 4 vext2 <6,7,3,0>, <7,6,0,3>
- 2666575468U, // <3,0,7,7>: Cost 3 vext2 <u,2,3,0>, <7,7,7,7>
- 2664584850U, // <3,0,7,u>: Cost 3 vext2 <7,u,3,0>, <7,u,3,0>
- 1616093834U, // <3,0,u,0>: Cost 2 vext3 LHS, <0,u,0,2>
- 1611891346U, // <3,0,u,1>: Cost 2 vext3 LHS, <0,u,1,1>
- 537707165U, // <3,0,u,2>: Cost 1 vext3 LHS, LHS
- 2689835684U, // <3,0,u,3>: Cost 3 vext3 LHS, <0,u,3,1>
- 1616093874U, // <3,0,u,4>: Cost 2 vext3 LHS, <0,u,4,6>
- 1551030426U, // <3,0,u,5>: Cost 2 vext2 <1,2,3,0>, RHS
- 2624772304U, // <3,0,u,6>: Cost 3 vext2 <1,2,3,0>, <u,6,3,7>
- 2594002154U, // <3,0,u,7>: Cost 3 vext1 <7,3,0,u>, <7,3,0,u>
- 537707219U, // <3,0,u,u>: Cost 1 vext3 LHS, LHS
- 2552201318U, // <3,1,0,0>: Cost 3 vext1 <0,3,1,0>, LHS
- 2618802278U, // <3,1,0,1>: Cost 3 vext2 <0,2,3,1>, LHS
- 2618802366U, // <3,1,0,2>: Cost 3 vext2 <0,2,3,1>, <0,2,3,1>
- 1611449078U, // <3,1,0,3>: Cost 2 vext3 LHS, <1,0,3,2>
- 2552204598U, // <3,1,0,4>: Cost 3 vext1 <0,3,1,0>, RHS
- 2732966663U, // <3,1,0,5>: Cost 3 vext3 LHS, <1,0,5,1>
- 3906258396U, // <3,1,0,6>: Cost 4 vuzpr <2,3,0,1>, <2,0,4,6>
- 3667752171U, // <3,1,0,7>: Cost 4 vext1 <7,3,1,0>, <7,3,1,0>
- 1611891491U, // <3,1,0,u>: Cost 2 vext3 LHS, <1,0,u,2>
- 2689835819U, // <3,1,1,0>: Cost 3 vext3 LHS, <1,1,0,1>
- 1611449140U, // <3,1,1,1>: Cost 2 vext3 LHS, <1,1,1,1>
- 2624775063U, // <3,1,1,2>: Cost 3 vext2 <1,2,3,1>, <1,2,3,1>
- 1611891528U, // <3,1,1,3>: Cost 2 vext3 LHS, <1,1,3,3>
- 2689835859U, // <3,1,1,4>: Cost 3 vext3 LHS, <1,1,4,5>
- 2689835868U, // <3,1,1,5>: Cost 3 vext3 LHS, <1,1,5,5>
- 3763577701U, // <3,1,1,6>: Cost 4 vext3 LHS, <1,1,6,5>
- 3765273452U, // <3,1,1,7>: Cost 4 vext3 <1,1,7,3>, <1,1,7,3>
- 1611891573U, // <3,1,1,u>: Cost 2 vext3 LHS, <1,1,u,3>
- 2629420494U, // <3,1,2,0>: Cost 3 vext2 <2,0,3,1>, <2,0,3,1>
- 2689835911U, // <3,1,2,1>: Cost 3 vext3 LHS, <1,2,1,3>
- 2564163248U, // <3,1,2,2>: Cost 3 vext1 <2,3,1,2>, <2,3,1,2>
- 1611449238U, // <3,1,2,3>: Cost 2 vext3 LHS, <1,2,3,0>
- 2564164918U, // <3,1,2,4>: Cost 3 vext1 <2,3,1,2>, RHS
- 2689835947U, // <3,1,2,5>: Cost 3 vext3 LHS, <1,2,5,3>
- 3692545978U, // <3,1,2,6>: Cost 4 vext2 <0,2,3,1>, <2,6,3,7>
- 2732966842U, // <3,1,2,7>: Cost 3 vext3 LHS, <1,2,7,0>
- 1611891651U, // <3,1,2,u>: Cost 2 vext3 LHS, <1,2,u,0>
- 1484456038U, // <3,1,3,0>: Cost 2 vext1 <1,3,1,3>, LHS
- 1611891672U, // <3,1,3,1>: Cost 2 vext3 LHS, <1,3,1,3>
- 2685633502U, // <3,1,3,2>: Cost 3 vext3 LHS, <1,3,2,0>
- 2685633512U, // <3,1,3,3>: Cost 3 vext3 LHS, <1,3,3,1>
- 1484459318U, // <3,1,3,4>: Cost 2 vext1 <1,3,1,3>, RHS
- 1611891712U, // <3,1,3,5>: Cost 2 vext3 LHS, <1,3,5,7>
- 2689836041U, // <3,1,3,6>: Cost 3 vext3 LHS, <1,3,6,7>
- 2733409294U, // <3,1,3,7>: Cost 3 vext3 LHS, <1,3,7,3>
- 1611891735U, // <3,1,3,u>: Cost 2 vext3 LHS, <1,3,u,3>
- 2552234086U, // <3,1,4,0>: Cost 3 vext1 <0,3,1,4>, LHS
- 2732966955U, // <3,1,4,1>: Cost 3 vext3 LHS, <1,4,1,5>
- 2732966964U, // <3,1,4,2>: Cost 3 vext3 LHS, <1,4,2,5>
- 2685633597U, // <3,1,4,3>: Cost 3 vext3 LHS, <1,4,3,5>
- 2552237366U, // <3,1,4,4>: Cost 3 vext1 <0,3,1,4>, RHS
- 2618805558U, // <3,1,4,5>: Cost 3 vext2 <0,2,3,1>, RHS
- 2769472822U, // <3,1,4,6>: Cost 3 vuzpl <3,0,1,2>, RHS
- 3667784943U, // <3,1,4,7>: Cost 4 vext1 <7,3,1,4>, <7,3,1,4>
- 2685633642U, // <3,1,4,u>: Cost 3 vext3 LHS, <1,4,u,5>
- 2689836143U, // <3,1,5,0>: Cost 3 vext3 LHS, <1,5,0,1>
- 2564187280U, // <3,1,5,1>: Cost 3 vext1 <2,3,1,5>, <1,5,3,7>
- 2564187827U, // <3,1,5,2>: Cost 3 vext1 <2,3,1,5>, <2,3,1,5>
- 1611891856U, // <3,1,5,3>: Cost 2 vext3 LHS, <1,5,3,7>
- 2689836183U, // <3,1,5,4>: Cost 3 vext3 LHS, <1,5,4,5>
- 3759375522U, // <3,1,5,5>: Cost 4 vext3 LHS, <1,5,5,7>
- 3720417378U, // <3,1,5,6>: Cost 4 vext2 <4,u,3,1>, <5,6,7,0>
- 2832518454U, // <3,1,5,7>: Cost 3 vuzpr <2,3,0,1>, RHS
- 1611891901U, // <3,1,5,u>: Cost 2 vext3 LHS, <1,5,u,7>
- 3763578048U, // <3,1,6,0>: Cost 4 vext3 LHS, <1,6,0,1>
- 2689836239U, // <3,1,6,1>: Cost 3 vext3 LHS, <1,6,1,7>
- 2732967128U, // <3,1,6,2>: Cost 3 vext3 LHS, <1,6,2,7>
- 2685633761U, // <3,1,6,3>: Cost 3 vext3 LHS, <1,6,3,7>
- 3763578088U, // <3,1,6,4>: Cost 4 vext3 LHS, <1,6,4,5>
- 2689836275U, // <3,1,6,5>: Cost 3 vext3 LHS, <1,6,5,7>
- 3763578108U, // <3,1,6,6>: Cost 4 vext3 LHS, <1,6,6,7>
- 2732967166U, // <3,1,6,7>: Cost 3 vext3 LHS, <1,6,7,0>
- 2685633806U, // <3,1,6,u>: Cost 3 vext3 LHS, <1,6,u,7>
- 3631972454U, // <3,1,7,0>: Cost 4 vext1 <1,3,1,7>, LHS
- 2659947612U, // <3,1,7,1>: Cost 3 vext2 <7,1,3,1>, <7,1,3,1>
- 4036102294U, // <3,1,7,2>: Cost 4 vzipr <1,5,3,7>, <3,0,1,2>
- 3095396454U, // <3,1,7,3>: Cost 3 vtrnr <1,3,5,7>, LHS
- 3631975734U, // <3,1,7,4>: Cost 4 vext1 <1,3,1,7>, RHS
- 2222982144U, // <3,1,7,5>: Cost 3 vrev <1,3,5,7>
- 3296797705U, // <3,1,7,6>: Cost 4 vrev <1,3,6,7>
- 3720418924U, // <3,1,7,7>: Cost 4 vext2 <4,u,3,1>, <7,7,7,7>
- 3095396459U, // <3,1,7,u>: Cost 3 vtrnr <1,3,5,7>, LHS
- 1484496998U, // <3,1,u,0>: Cost 2 vext1 <1,3,1,u>, LHS
- 1611892077U, // <3,1,u,1>: Cost 2 vext3 LHS, <1,u,1,3>
- 2685633907U, // <3,1,u,2>: Cost 3 vext3 LHS, <1,u,2,0>
- 1611892092U, // <3,1,u,3>: Cost 2 vext3 LHS, <1,u,3,0>
- 1484500278U, // <3,1,u,4>: Cost 2 vext1 <1,3,1,u>, RHS
- 1611892117U, // <3,1,u,5>: Cost 2 vext3 LHS, <1,u,5,7>
- 2685633950U, // <3,1,u,6>: Cost 3 vext3 LHS, <1,u,6,7>
- 2832518697U, // <3,1,u,7>: Cost 3 vuzpr <2,3,0,1>, RHS
- 1611892140U, // <3,1,u,u>: Cost 2 vext3 LHS, <1,u,u,3>
- 2623455232U, // <3,2,0,0>: Cost 3 vext2 <1,0,3,2>, <0,0,0,0>
- 1549713510U, // <3,2,0,1>: Cost 2 vext2 <1,0,3,2>, LHS
- 2689836484U, // <3,2,0,2>: Cost 3 vext3 LHS, <2,0,2,0>
- 2685633997U, // <3,2,0,3>: Cost 3 vext3 LHS, <2,0,3,0>
- 2623455570U, // <3,2,0,4>: Cost 3 vext2 <1,0,3,2>, <0,4,1,5>
- 2732967398U, // <3,2,0,5>: Cost 3 vext3 LHS, <2,0,5,7>
- 2689836524U, // <3,2,0,6>: Cost 3 vext3 LHS, <2,0,6,4>
- 2229044964U, // <3,2,0,7>: Cost 3 vrev <2,3,7,0>
- 1549714077U, // <3,2,0,u>: Cost 2 vext2 <1,0,3,2>, LHS
- 1549714166U, // <3,2,1,0>: Cost 2 vext2 <1,0,3,2>, <1,0,3,2>
- 2623456052U, // <3,2,1,1>: Cost 3 vext2 <1,0,3,2>, <1,1,1,1>
- 2623456150U, // <3,2,1,2>: Cost 3 vext2 <1,0,3,2>, <1,2,3,0>
- 2685634079U, // <3,2,1,3>: Cost 3 vext3 LHS, <2,1,3,1>
- 2552286518U, // <3,2,1,4>: Cost 3 vext1 <0,3,2,1>, RHS
- 2623456400U, // <3,2,1,5>: Cost 3 vext2 <1,0,3,2>, <1,5,3,7>
- 2689836604U, // <3,2,1,6>: Cost 3 vext3 LHS, <2,1,6,3>
- 3667834101U, // <3,2,1,7>: Cost 4 vext1 <7,3,2,1>, <7,3,2,1>
- 1155385070U, // <3,2,1,u>: Cost 2 vrev <2,3,u,1>
- 2689836629U, // <3,2,2,0>: Cost 3 vext3 LHS, <2,2,0,1>
- 2689836640U, // <3,2,2,1>: Cost 3 vext3 LHS, <2,2,1,3>
- 1611449960U, // <3,2,2,2>: Cost 2 vext3 LHS, <2,2,2,2>
- 1611892338U, // <3,2,2,3>: Cost 2 vext3 LHS, <2,2,3,3>
- 2689836669U, // <3,2,2,4>: Cost 3 vext3 LHS, <2,2,4,5>
- 2689836680U, // <3,2,2,5>: Cost 3 vext3 LHS, <2,2,5,7>
- 2689836688U, // <3,2,2,6>: Cost 3 vext3 LHS, <2,2,6,6>
- 3763578518U, // <3,2,2,7>: Cost 4 vext3 LHS, <2,2,7,3>
- 1611892383U, // <3,2,2,u>: Cost 2 vext3 LHS, <2,2,u,3>
- 1611450022U, // <3,2,3,0>: Cost 2 vext3 LHS, <2,3,0,1>
- 2685191854U, // <3,2,3,1>: Cost 3 vext3 LHS, <2,3,1,0>
- 2685191865U, // <3,2,3,2>: Cost 3 vext3 LHS, <2,3,2,2>
- 2685191875U, // <3,2,3,3>: Cost 3 vext3 LHS, <2,3,3,3>
- 1611450062U, // <3,2,3,4>: Cost 2 vext3 LHS, <2,3,4,5>
- 2732967635U, // <3,2,3,5>: Cost 3 vext3 LHS, <2,3,5,1>
- 2732967645U, // <3,2,3,6>: Cost 3 vext3 LHS, <2,3,6,2>
- 2732967652U, // <3,2,3,7>: Cost 3 vext3 LHS, <2,3,7,0>
- 1611450094U, // <3,2,3,u>: Cost 2 vext3 LHS, <2,3,u,1>
- 2558279782U, // <3,2,4,0>: Cost 3 vext1 <1,3,2,4>, LHS
- 2558280602U, // <3,2,4,1>: Cost 3 vext1 <1,3,2,4>, <1,2,3,4>
- 2732967692U, // <3,2,4,2>: Cost 3 vext3 LHS, <2,4,2,4>
- 2685634326U, // <3,2,4,3>: Cost 3 vext3 LHS, <2,4,3,5>
- 2558283062U, // <3,2,4,4>: Cost 3 vext1 <1,3,2,4>, RHS
- 1549716790U, // <3,2,4,5>: Cost 2 vext2 <1,0,3,2>, RHS
- 2689836844U, // <3,2,4,6>: Cost 3 vext3 LHS, <2,4,6,0>
- 2229077736U, // <3,2,4,7>: Cost 3 vrev <2,3,7,4>
- 1549717033U, // <3,2,4,u>: Cost 2 vext2 <1,0,3,2>, RHS
- 2552316006U, // <3,2,5,0>: Cost 3 vext1 <0,3,2,5>, LHS
- 2228643507U, // <3,2,5,1>: Cost 3 vrev <2,3,1,5>
- 2689836896U, // <3,2,5,2>: Cost 3 vext3 LHS, <2,5,2,7>
- 2685634408U, // <3,2,5,3>: Cost 3 vext3 LHS, <2,5,3,6>
- 1155122894U, // <3,2,5,4>: Cost 2 vrev <2,3,4,5>
- 2665263108U, // <3,2,5,5>: Cost 3 vext2 <u,0,3,2>, <5,5,5,5>
- 2689836932U, // <3,2,5,6>: Cost 3 vext3 LHS, <2,5,6,7>
- 2665263272U, // <3,2,5,7>: Cost 3 vext2 <u,0,3,2>, <5,7,5,7>
- 1155417842U, // <3,2,5,u>: Cost 2 vrev <2,3,u,5>
- 2689836953U, // <3,2,6,0>: Cost 3 vext3 LHS, <2,6,0,1>
- 2689836964U, // <3,2,6,1>: Cost 3 vext3 LHS, <2,6,1,3>
- 2689836976U, // <3,2,6,2>: Cost 3 vext3 LHS, <2,6,2,6>
- 1611892666U, // <3,2,6,3>: Cost 2 vext3 LHS, <2,6,3,7>
- 2689836993U, // <3,2,6,4>: Cost 3 vext3 LHS, <2,6,4,5>
- 2689837004U, // <3,2,6,5>: Cost 3 vext3 LHS, <2,6,5,7>
- 2689837013U, // <3,2,6,6>: Cost 3 vext3 LHS, <2,6,6,7>
- 2665263950U, // <3,2,6,7>: Cost 3 vext2 <u,0,3,2>, <6,7,0,1>
- 1611892711U, // <3,2,6,u>: Cost 2 vext3 LHS, <2,6,u,7>
- 2665264122U, // <3,2,7,0>: Cost 3 vext2 <u,0,3,2>, <7,0,1,2>
- 2623460419U, // <3,2,7,1>: Cost 3 vext2 <1,0,3,2>, <7,1,0,3>
- 4169138340U, // <3,2,7,2>: Cost 4 vtrnr <1,3,5,7>, <0,2,0,2>
- 2962358374U, // <3,2,7,3>: Cost 3 vzipr <1,5,3,7>, LHS
- 2665264486U, // <3,2,7,4>: Cost 3 vext2 <u,0,3,2>, <7,4,5,6>
- 2228954841U, // <3,2,7,5>: Cost 3 vrev <2,3,5,7>
- 2229028578U, // <3,2,7,6>: Cost 3 vrev <2,3,6,7>
- 2665264748U, // <3,2,7,7>: Cost 3 vext2 <u,0,3,2>, <7,7,7,7>
- 2962358379U, // <3,2,7,u>: Cost 3 vzipr <1,5,3,7>, LHS
- 1611892795U, // <3,2,u,0>: Cost 2 vext3 LHS, <2,u,0,1>
- 1549719342U, // <3,2,u,1>: Cost 2 vext2 <1,0,3,2>, LHS
- 1611449960U, // <3,2,u,2>: Cost 2 vext3 LHS, <2,2,2,2>
- 1611892824U, // <3,2,u,3>: Cost 2 vext3 LHS, <2,u,3,3>
- 1611892835U, // <3,2,u,4>: Cost 2 vext3 LHS, <2,u,4,5>
- 1549719706U, // <3,2,u,5>: Cost 2 vext2 <1,0,3,2>, RHS
- 2689837168U, // <3,2,u,6>: Cost 3 vext3 LHS, <2,u,6,0>
- 2665265408U, // <3,2,u,7>: Cost 3 vext2 <u,0,3,2>, <u,7,0,1>
- 1611892867U, // <3,2,u,u>: Cost 2 vext3 LHS, <2,u,u,1>
- 2685192331U, // <3,3,0,0>: Cost 3 vext3 LHS, <3,0,0,0>
- 1611450518U, // <3,3,0,1>: Cost 2 vext3 LHS, <3,0,1,2>
- 2685634717U, // <3,3,0,2>: Cost 3 vext3 LHS, <3,0,2,0>
- 2564294806U, // <3,3,0,3>: Cost 3 vext1 <2,3,3,0>, <3,0,1,2>
- 2685634736U, // <3,3,0,4>: Cost 3 vext3 LHS, <3,0,4,1>
- 2732968122U, // <3,3,0,5>: Cost 3 vext3 LHS, <3,0,5,2>
- 3763579075U, // <3,3,0,6>: Cost 4 vext3 LHS, <3,0,6,2>
- 4034053264U, // <3,3,0,7>: Cost 4 vzipr <1,2,3,0>, <1,5,3,7>
- 1611450581U, // <3,3,0,u>: Cost 2 vext3 LHS, <3,0,u,2>
- 2685192415U, // <3,3,1,0>: Cost 3 vext3 LHS, <3,1,0,3>
- 1550385992U, // <3,3,1,1>: Cost 2 vext2 <1,1,3,3>, <1,1,3,3>
- 2685192433U, // <3,3,1,2>: Cost 3 vext3 LHS, <3,1,2,3>
- 2685634808U, // <3,3,1,3>: Cost 3 vext3 LHS, <3,1,3,1>
- 2558332214U, // <3,3,1,4>: Cost 3 vext1 <1,3,3,1>, RHS
- 2685634828U, // <3,3,1,5>: Cost 3 vext3 LHS, <3,1,5,3>
- 3759376661U, // <3,3,1,6>: Cost 4 vext3 LHS, <3,1,6,3>
- 2703477022U, // <3,3,1,7>: Cost 3 vext3 <3,1,7,3>, <3,1,7,3>
- 1555031423U, // <3,3,1,u>: Cost 2 vext2 <1,u,3,3>, <1,u,3,3>
- 2564309094U, // <3,3,2,0>: Cost 3 vext1 <2,3,3,2>, LHS
- 2630100513U, // <3,3,2,1>: Cost 3 vext2 <2,1,3,3>, <2,1,3,3>
- 1557022322U, // <3,3,2,2>: Cost 2 vext2 <2,2,3,3>, <2,2,3,3>
- 2685192520U, // <3,3,2,3>: Cost 3 vext3 LHS, <3,2,3,0>
- 2564312374U, // <3,3,2,4>: Cost 3 vext1 <2,3,3,2>, RHS
- 2732968286U, // <3,3,2,5>: Cost 3 vext3 LHS, <3,2,5,4>
- 2685634918U, // <3,3,2,6>: Cost 3 vext3 LHS, <3,2,6,3>
- 2704140655U, // <3,3,2,7>: Cost 3 vext3 <3,2,7,3>, <3,2,7,3>
- 1561004120U, // <3,3,2,u>: Cost 2 vext2 <2,u,3,3>, <2,u,3,3>
- 1496547430U, // <3,3,3,0>: Cost 2 vext1 <3,3,3,3>, LHS
- 2624129256U, // <3,3,3,1>: Cost 3 vext2 <1,1,3,3>, <3,1,1,3>
- 2630764866U, // <3,3,3,2>: Cost 3 vext2 <2,2,3,3>, <3,2,2,3>
- 336380006U, // <3,3,3,3>: Cost 1 vdup3 LHS
- 1496550710U, // <3,3,3,4>: Cost 2 vext1 <3,3,3,3>, RHS
- 2732968368U, // <3,3,3,5>: Cost 3 vext3 LHS, <3,3,5,5>
- 2624129683U, // <3,3,3,6>: Cost 3 vext2 <1,1,3,3>, <3,6,3,7>
- 2594182400U, // <3,3,3,7>: Cost 3 vext1 <7,3,3,3>, <7,3,3,3>
- 336380006U, // <3,3,3,u>: Cost 1 vdup3 LHS
- 2558353510U, // <3,3,4,0>: Cost 3 vext1 <1,3,3,4>, LHS
- 2558354411U, // <3,3,4,1>: Cost 3 vext1 <1,3,3,4>, <1,3,3,4>
- 2564327108U, // <3,3,4,2>: Cost 3 vext1 <2,3,3,4>, <2,3,3,4>
- 2564327938U, // <3,3,4,3>: Cost 3 vext1 <2,3,3,4>, <3,4,5,6>
- 2960343962U, // <3,3,4,4>: Cost 3 vzipr <1,2,3,4>, <1,2,3,4>
- 1611893250U, // <3,3,4,5>: Cost 2 vext3 LHS, <3,4,5,6>
- 2771619126U, // <3,3,4,6>: Cost 3 vuzpl <3,3,3,3>, RHS
- 4034086032U, // <3,3,4,7>: Cost 4 vzipr <1,2,3,4>, <1,5,3,7>
- 1611893277U, // <3,3,4,u>: Cost 2 vext3 LHS, <3,4,u,6>
- 2558361702U, // <3,3,5,0>: Cost 3 vext1 <1,3,3,5>, LHS
- 2558362604U, // <3,3,5,1>: Cost 3 vext1 <1,3,3,5>, <1,3,3,5>
- 2558363342U, // <3,3,5,2>: Cost 3 vext1 <1,3,3,5>, <2,3,4,5>
- 2732968512U, // <3,3,5,3>: Cost 3 vext3 LHS, <3,5,3,5>
- 2558364982U, // <3,3,5,4>: Cost 3 vext1 <1,3,3,5>, RHS
- 3101279950U, // <3,3,5,5>: Cost 3 vtrnr <2,3,4,5>, <2,3,4,5>
- 2665934946U, // <3,3,5,6>: Cost 3 vext2 <u,1,3,3>, <5,6,7,0>
- 2826636598U, // <3,3,5,7>: Cost 3 vuzpr <1,3,1,3>, RHS
- 2826636599U, // <3,3,5,u>: Cost 3 vuzpr <1,3,1,3>, RHS
- 2732968568U, // <3,3,6,0>: Cost 3 vext3 LHS, <3,6,0,7>
- 3763579521U, // <3,3,6,1>: Cost 4 vext3 LHS, <3,6,1,7>
- 2732968586U, // <3,3,6,2>: Cost 3 vext3 LHS, <3,6,2,7>
- 2732968595U, // <3,3,6,3>: Cost 3 vext3 LHS, <3,6,3,7>
- 2732968604U, // <3,3,6,4>: Cost 3 vext3 LHS, <3,6,4,7>
- 3763579557U, // <3,3,6,5>: Cost 4 vext3 LHS, <3,6,5,7>
- 2732968621U, // <3,3,6,6>: Cost 3 vext3 LHS, <3,6,6,6>
- 2657973099U, // <3,3,6,7>: Cost 3 vext2 <6,7,3,3>, <6,7,3,3>
- 2658636732U, // <3,3,6,u>: Cost 3 vext2 <6,u,3,3>, <6,u,3,3>
- 2558378086U, // <3,3,7,0>: Cost 3 vext1 <1,3,3,7>, LHS
- 2558378990U, // <3,3,7,1>: Cost 3 vext1 <1,3,3,7>, <1,3,3,7>
- 2564351687U, // <3,3,7,2>: Cost 3 vext1 <2,3,3,7>, <2,3,3,7>
- 2661291264U, // <3,3,7,3>: Cost 3 vext2 <7,3,3,3>, <7,3,3,3>
- 2558381366U, // <3,3,7,4>: Cost 3 vext1 <1,3,3,7>, RHS
- 2732968694U, // <3,3,7,5>: Cost 3 vext3 LHS, <3,7,5,7>
- 3781126907U, // <3,3,7,6>: Cost 4 vext3 <3,7,6,3>, <3,7,6,3>
- 3095397376U, // <3,3,7,7>: Cost 3 vtrnr <1,3,5,7>, <1,3,5,7>
- 2558383918U, // <3,3,7,u>: Cost 3 vext1 <1,3,3,7>, LHS
- 1496547430U, // <3,3,u,0>: Cost 2 vext1 <3,3,3,3>, LHS
- 1611893534U, // <3,3,u,1>: Cost 2 vext3 LHS, <3,u,1,2>
- 1592858504U, // <3,3,u,2>: Cost 2 vext2 <u,2,3,3>, <u,2,3,3>
- 336380006U, // <3,3,u,3>: Cost 1 vdup3 LHS
- 1496550710U, // <3,3,u,4>: Cost 2 vext1 <3,3,3,3>, RHS
- 1611893574U, // <3,3,u,5>: Cost 2 vext3 LHS, <3,u,5,6>
- 2690280268U, // <3,3,u,6>: Cost 3 vext3 LHS, <3,u,6,3>
- 2826636841U, // <3,3,u,7>: Cost 3 vuzpr <1,3,1,3>, RHS
- 336380006U, // <3,3,u,u>: Cost 1 vdup3 LHS
- 2624798720U, // <3,4,0,0>: Cost 3 vext2 <1,2,3,4>, <0,0,0,0>
- 1551056998U, // <3,4,0,1>: Cost 2 vext2 <1,2,3,4>, LHS
- 2624798884U, // <3,4,0,2>: Cost 3 vext2 <1,2,3,4>, <0,2,0,2>
- 3693232384U, // <3,4,0,3>: Cost 4 vext2 <0,3,3,4>, <0,3,1,4>
- 2624799058U, // <3,4,0,4>: Cost 3 vext2 <1,2,3,4>, <0,4,1,5>
- 1659227026U, // <3,4,0,5>: Cost 2 vext3 LHS, <4,0,5,1>
- 1659227036U, // <3,4,0,6>: Cost 2 vext3 LHS, <4,0,6,2>
- 3667973382U, // <3,4,0,7>: Cost 4 vext1 <7,3,4,0>, <7,3,4,0>
- 1551057565U, // <3,4,0,u>: Cost 2 vext2 <1,2,3,4>, LHS
- 2624799478U, // <3,4,1,0>: Cost 3 vext2 <1,2,3,4>, <1,0,3,2>
- 2624799540U, // <3,4,1,1>: Cost 3 vext2 <1,2,3,4>, <1,1,1,1>
- 1551057818U, // <3,4,1,2>: Cost 2 vext2 <1,2,3,4>, <1,2,3,4>
- 2624799704U, // <3,4,1,3>: Cost 3 vext2 <1,2,3,4>, <1,3,1,3>
- 2564377910U, // <3,4,1,4>: Cost 3 vext1 <2,3,4,1>, RHS
- 2689838050U, // <3,4,1,5>: Cost 3 vext3 LHS, <4,1,5,0>
- 2689838062U, // <3,4,1,6>: Cost 3 vext3 LHS, <4,1,6,3>
- 2628117807U, // <3,4,1,7>: Cost 3 vext2 <1,7,3,4>, <1,7,3,4>
- 1555039616U, // <3,4,1,u>: Cost 2 vext2 <1,u,3,4>, <1,u,3,4>
- 3626180710U, // <3,4,2,0>: Cost 4 vext1 <0,3,4,2>, LHS
- 2624800298U, // <3,4,2,1>: Cost 3 vext2 <1,2,3,4>, <2,1,4,3>
- 2624800360U, // <3,4,2,2>: Cost 3 vext2 <1,2,3,4>, <2,2,2,2>
- 2624800422U, // <3,4,2,3>: Cost 3 vext2 <1,2,3,4>, <2,3,0,1>
- 2624800514U, // <3,4,2,4>: Cost 3 vext2 <1,2,3,4>, <2,4,1,3>
- 2709965878U, // <3,4,2,5>: Cost 3 vext3 <4,2,5,3>, <4,2,5,3>
- 2689838140U, // <3,4,2,6>: Cost 3 vext3 LHS, <4,2,6,0>
- 2634090504U, // <3,4,2,7>: Cost 3 vext2 <2,7,3,4>, <2,7,3,4>
- 2689838158U, // <3,4,2,u>: Cost 3 vext3 LHS, <4,2,u,0>
- 2624800918U, // <3,4,3,0>: Cost 3 vext2 <1,2,3,4>, <3,0,1,2>
- 2636081403U, // <3,4,3,1>: Cost 3 vext2 <3,1,3,4>, <3,1,3,4>
- 2636745036U, // <3,4,3,2>: Cost 3 vext2 <3,2,3,4>, <3,2,3,4>
- 2624801180U, // <3,4,3,3>: Cost 3 vext2 <1,2,3,4>, <3,3,3,3>
- 2624801232U, // <3,4,3,4>: Cost 3 vext2 <1,2,3,4>, <3,4,0,1>
- 2905836854U, // <3,4,3,5>: Cost 3 vzipl <3,3,3,3>, RHS
- 3040054582U, // <3,4,3,6>: Cost 3 vtrnl <3,3,3,3>, RHS
- 3702524611U, // <3,4,3,7>: Cost 4 vext2 <1,u,3,4>, <3,7,0,1>
- 2624801566U, // <3,4,3,u>: Cost 3 vext2 <1,2,3,4>, <3,u,1,2>
- 2564399206U, // <3,4,4,0>: Cost 3 vext1 <2,3,4,4>, LHS
- 2564400026U, // <3,4,4,1>: Cost 3 vext1 <2,3,4,4>, <1,2,3,4>
- 2564400845U, // <3,4,4,2>: Cost 3 vext1 <2,3,4,4>, <2,3,4,4>
- 2570373542U, // <3,4,4,3>: Cost 3 vext1 <3,3,4,4>, <3,3,4,4>
- 1659227344U, // <3,4,4,4>: Cost 2 vext3 LHS, <4,4,4,4>
- 1551060278U, // <3,4,4,5>: Cost 2 vext2 <1,2,3,4>, RHS
- 1659227364U, // <3,4,4,6>: Cost 2 vext3 LHS, <4,4,6,6>
- 3668006154U, // <3,4,4,7>: Cost 4 vext1 <7,3,4,4>, <7,3,4,4>
- 1551060521U, // <3,4,4,u>: Cost 2 vext2 <1,2,3,4>, RHS
- 1490665574U, // <3,4,5,0>: Cost 2 vext1 <2,3,4,5>, LHS
- 2689838341U, // <3,4,5,1>: Cost 3 vext3 LHS, <4,5,1,3>
- 1490667214U, // <3,4,5,2>: Cost 2 vext1 <2,3,4,5>, <2,3,4,5>
- 2564409494U, // <3,4,5,3>: Cost 3 vext1 <2,3,4,5>, <3,0,1,2>
- 1490668854U, // <3,4,5,4>: Cost 2 vext1 <2,3,4,5>, RHS
- 2689838381U, // <3,4,5,5>: Cost 3 vext3 LHS, <4,5,5,7>
- 537709878U, // <3,4,5,6>: Cost 1 vext3 LHS, RHS
- 2594272523U, // <3,4,5,7>: Cost 3 vext1 <7,3,4,5>, <7,3,4,5>
- 537709896U, // <3,4,5,u>: Cost 1 vext3 LHS, RHS
- 2689838411U, // <3,4,6,0>: Cost 3 vext3 LHS, <4,6,0,1>
- 2558444534U, // <3,4,6,1>: Cost 3 vext1 <1,3,4,6>, <1,3,4,6>
- 2666607098U, // <3,4,6,2>: Cost 3 vext2 <u,2,3,4>, <6,2,7,3>
- 2558446082U, // <3,4,6,3>: Cost 3 vext1 <1,3,4,6>, <3,4,5,6>
- 1659227508U, // <3,4,6,4>: Cost 2 vext3 LHS, <4,6,4,6>
- 2689838462U, // <3,4,6,5>: Cost 3 vext3 LHS, <4,6,5,7>
- 2689838471U, // <3,4,6,6>: Cost 3 vext3 LHS, <4,6,6,7>
- 2657981292U, // <3,4,6,7>: Cost 3 vext2 <6,7,3,4>, <6,7,3,4>
- 1659227540U, // <3,4,6,u>: Cost 2 vext3 LHS, <4,6,u,2>
- 2666607610U, // <3,4,7,0>: Cost 3 vext2 <u,2,3,4>, <7,0,1,2>
- 3702527072U, // <3,4,7,1>: Cost 4 vext2 <1,u,3,4>, <7,1,3,5>
- 2660635824U, // <3,4,7,2>: Cost 3 vext2 <7,2,3,4>, <7,2,3,4>
- 3644139945U, // <3,4,7,3>: Cost 4 vext1 <3,3,4,7>, <3,3,4,7>
- 2666607974U, // <3,4,7,4>: Cost 3 vext2 <u,2,3,4>, <7,4,5,6>
- 2732969416U, // <3,4,7,5>: Cost 3 vext3 LHS, <4,7,5,0>
- 2732969425U, // <3,4,7,6>: Cost 3 vext3 LHS, <4,7,6,0>
- 2666608236U, // <3,4,7,7>: Cost 3 vext2 <u,2,3,4>, <7,7,7,7>
- 2664617622U, // <3,4,7,u>: Cost 3 vext2 <7,u,3,4>, <7,u,3,4>
- 1490690150U, // <3,4,u,0>: Cost 2 vext1 <2,3,4,u>, LHS
- 1551062830U, // <3,4,u,1>: Cost 2 vext2 <1,2,3,4>, LHS
- 1490691793U, // <3,4,u,2>: Cost 2 vext1 <2,3,4,u>, <2,3,4,u>
- 2624804796U, // <3,4,u,3>: Cost 3 vext2 <1,2,3,4>, <u,3,0,1>
- 1490693430U, // <3,4,u,4>: Cost 2 vext1 <2,3,4,u>, RHS
- 1551063194U, // <3,4,u,5>: Cost 2 vext2 <1,2,3,4>, RHS
- 537710121U, // <3,4,u,6>: Cost 1 vext3 LHS, RHS
- 2594297102U, // <3,4,u,7>: Cost 3 vext1 <7,3,4,u>, <7,3,4,u>
- 537710139U, // <3,4,u,u>: Cost 1 vext3 LHS, RHS
- 3692576768U, // <3,5,0,0>: Cost 4 vext2 <0,2,3,5>, <0,0,0,0>
- 2618835046U, // <3,5,0,1>: Cost 3 vext2 <0,2,3,5>, LHS
- 2618835138U, // <3,5,0,2>: Cost 3 vext2 <0,2,3,5>, <0,2,3,5>
- 3692577024U, // <3,5,0,3>: Cost 4 vext2 <0,2,3,5>, <0,3,1,4>
- 2689838690U, // <3,5,0,4>: Cost 3 vext3 LHS, <5,0,4,1>
- 2732969579U, // <3,5,0,5>: Cost 3 vext3 LHS, <5,0,5,1>
- 2732969588U, // <3,5,0,6>: Cost 3 vext3 LHS, <5,0,6,1>
- 2246963055U, // <3,5,0,7>: Cost 3 vrev <5,3,7,0>
- 2618835613U, // <3,5,0,u>: Cost 3 vext2 <0,2,3,5>, LHS
- 2594308198U, // <3,5,1,0>: Cost 3 vext1 <7,3,5,1>, LHS
- 3692577588U, // <3,5,1,1>: Cost 4 vext2 <0,2,3,5>, <1,1,1,1>
- 2624807835U, // <3,5,1,2>: Cost 3 vext2 <1,2,3,5>, <1,2,3,5>
- 2625471468U, // <3,5,1,3>: Cost 3 vext2 <1,3,3,5>, <1,3,3,5>
- 2626135101U, // <3,5,1,4>: Cost 3 vext2 <1,4,3,5>, <1,4,3,5>
- 2594311888U, // <3,5,1,5>: Cost 3 vext1 <7,3,5,1>, <5,1,7,3>
- 3699877107U, // <3,5,1,6>: Cost 4 vext2 <1,4,3,5>, <1,6,5,7>
- 1641680592U, // <3,5,1,7>: Cost 2 vext3 <5,1,7,3>, <5,1,7,3>
- 1641754329U, // <3,5,1,u>: Cost 2 vext3 <5,1,u,3>, <5,1,u,3>
- 3692578274U, // <3,5,2,0>: Cost 4 vext2 <0,2,3,5>, <2,0,5,3>
- 2630116899U, // <3,5,2,1>: Cost 3 vext2 <2,1,3,5>, <2,1,3,5>
- 3692578408U, // <3,5,2,2>: Cost 4 vext2 <0,2,3,5>, <2,2,2,2>
- 2625472206U, // <3,5,2,3>: Cost 3 vext2 <1,3,3,5>, <2,3,4,5>
- 2632107798U, // <3,5,2,4>: Cost 3 vext2 <2,4,3,5>, <2,4,3,5>
- 2715938575U, // <3,5,2,5>: Cost 3 vext3 <5,2,5,3>, <5,2,5,3>
- 3692578746U, // <3,5,2,6>: Cost 4 vext2 <0,2,3,5>, <2,6,3,7>
- 2716086049U, // <3,5,2,7>: Cost 3 vext3 <5,2,7,3>, <5,2,7,3>
- 2634762330U, // <3,5,2,u>: Cost 3 vext2 <2,u,3,5>, <2,u,3,5>
- 3692578966U, // <3,5,3,0>: Cost 4 vext2 <0,2,3,5>, <3,0,1,2>
- 2636089596U, // <3,5,3,1>: Cost 3 vext2 <3,1,3,5>, <3,1,3,5>
- 3699214668U, // <3,5,3,2>: Cost 4 vext2 <1,3,3,5>, <3,2,3,4>
- 2638080412U, // <3,5,3,3>: Cost 3 vext2 <3,4,3,5>, <3,3,3,3>
- 2618837506U, // <3,5,3,4>: Cost 3 vext2 <0,2,3,5>, <3,4,5,6>
- 2832844494U, // <3,5,3,5>: Cost 3 vuzpr <2,3,4,5>, <2,3,4,5>
- 4033415682U, // <3,5,3,6>: Cost 4 vzipr <1,1,3,3>, <3,4,5,6>
- 3095072054U, // <3,5,3,7>: Cost 3 vtrnr <1,3,1,3>, RHS
- 3095072055U, // <3,5,3,u>: Cost 3 vtrnr <1,3,1,3>, RHS
- 2600304742U, // <3,5,4,0>: Cost 3 vext1 <u,3,5,4>, LHS
- 3763580815U, // <3,5,4,1>: Cost 4 vext3 LHS, <5,4,1,5>
- 2564474582U, // <3,5,4,2>: Cost 3 vext1 <2,3,5,4>, <2,3,5,4>
- 3699879044U, // <3,5,4,3>: Cost 4 vext2 <1,4,3,5>, <4,3,5,0>
- 2600308022U, // <3,5,4,4>: Cost 3 vext1 <u,3,5,4>, RHS
- 2618838326U, // <3,5,4,5>: Cost 3 vext2 <0,2,3,5>, RHS
- 2772454710U, // <3,5,4,6>: Cost 3 vuzpl <3,4,5,6>, RHS
- 1659228102U, // <3,5,4,7>: Cost 2 vext3 LHS, <5,4,7,6>
- 1659228111U, // <3,5,4,u>: Cost 2 vext3 LHS, <5,4,u,6>
- 2570453094U, // <3,5,5,0>: Cost 3 vext1 <3,3,5,5>, LHS
- 2624810704U, // <3,5,5,1>: Cost 3 vext2 <1,2,3,5>, <5,1,7,3>
- 2570454734U, // <3,5,5,2>: Cost 3 vext1 <3,3,5,5>, <2,3,4,5>
- 2570455472U, // <3,5,5,3>: Cost 3 vext1 <3,3,5,5>, <3,3,5,5>
- 2570456374U, // <3,5,5,4>: Cost 3 vext1 <3,3,5,5>, RHS
- 1659228164U, // <3,5,5,5>: Cost 2 vext3 LHS, <5,5,5,5>
- 2732969998U, // <3,5,5,6>: Cost 3 vext3 LHS, <5,5,6,6>
- 1659228184U, // <3,5,5,7>: Cost 2 vext3 LHS, <5,5,7,7>
- 1659228193U, // <3,5,5,u>: Cost 2 vext3 LHS, <5,5,u,7>
- 2732970020U, // <3,5,6,0>: Cost 3 vext3 LHS, <5,6,0,1>
- 2732970035U, // <3,5,6,1>: Cost 3 vext3 LHS, <5,6,1,7>
- 2564490968U, // <3,5,6,2>: Cost 3 vext1 <2,3,5,6>, <2,3,5,6>
- 2732970050U, // <3,5,6,3>: Cost 3 vext3 LHS, <5,6,3,4>
- 2732970060U, // <3,5,6,4>: Cost 3 vext3 LHS, <5,6,4,5>
- 2732970071U, // <3,5,6,5>: Cost 3 vext3 LHS, <5,6,5,7>
- 2732970080U, // <3,5,6,6>: Cost 3 vext3 LHS, <5,6,6,7>
- 1659228258U, // <3,5,6,7>: Cost 2 vext3 LHS, <5,6,7,0>
- 1659228267U, // <3,5,6,u>: Cost 2 vext3 LHS, <5,6,u,0>
- 1484783718U, // <3,5,7,0>: Cost 2 vext1 <1,3,5,7>, LHS
- 1484784640U, // <3,5,7,1>: Cost 2 vext1 <1,3,5,7>, <1,3,5,7>
- 2558527080U, // <3,5,7,2>: Cost 3 vext1 <1,3,5,7>, <2,2,2,2>
- 2558527638U, // <3,5,7,3>: Cost 3 vext1 <1,3,5,7>, <3,0,1,2>
- 1484786998U, // <3,5,7,4>: Cost 2 vext1 <1,3,5,7>, RHS
- 1659228328U, // <3,5,7,5>: Cost 2 vext3 LHS, <5,7,5,7>
- 2732970154U, // <3,5,7,6>: Cost 3 vext3 LHS, <5,7,6,0>
- 2558531180U, // <3,5,7,7>: Cost 3 vext1 <1,3,5,7>, <7,7,7,7>
- 1484789550U, // <3,5,7,u>: Cost 2 vext1 <1,3,5,7>, LHS
- 1484791910U, // <3,5,u,0>: Cost 2 vext1 <1,3,5,u>, LHS
- 1484792833U, // <3,5,u,1>: Cost 2 vext1 <1,3,5,u>, <1,3,5,u>
- 2558535272U, // <3,5,u,2>: Cost 3 vext1 <1,3,5,u>, <2,2,2,2>
- 2558535830U, // <3,5,u,3>: Cost 3 vext1 <1,3,5,u>, <3,0,1,2>
- 1484795190U, // <3,5,u,4>: Cost 2 vext1 <1,3,5,u>, RHS
- 1659228409U, // <3,5,u,5>: Cost 2 vext3 LHS, <5,u,5,7>
- 2772457626U, // <3,5,u,6>: Cost 3 vuzpl <3,4,5,6>, RHS
- 1646326023U, // <3,5,u,7>: Cost 2 vext3 <5,u,7,3>, <5,u,7,3>
- 1484797742U, // <3,5,u,u>: Cost 2 vext1 <1,3,5,u>, LHS
- 2558541926U, // <3,6,0,0>: Cost 3 vext1 <1,3,6,0>, LHS
- 2689839393U, // <3,6,0,1>: Cost 3 vext3 LHS, <6,0,1,2>
- 2689839404U, // <3,6,0,2>: Cost 3 vext3 LHS, <6,0,2,4>
- 3706519808U, // <3,6,0,3>: Cost 4 vext2 <2,5,3,6>, <0,3,1,4>
- 2689839420U, // <3,6,0,4>: Cost 3 vext3 LHS, <6,0,4,2>
- 2732970314U, // <3,6,0,5>: Cost 3 vext3 LHS, <6,0,5,7>
- 2732970316U, // <3,6,0,6>: Cost 3 vext3 LHS, <6,0,6,0>
- 2960313654U, // <3,6,0,7>: Cost 3 vzipr <1,2,3,0>, RHS
- 2689839456U, // <3,6,0,u>: Cost 3 vext3 LHS, <6,0,u,2>
- 3763581290U, // <3,6,1,0>: Cost 4 vext3 LHS, <6,1,0,3>
- 3763581297U, // <3,6,1,1>: Cost 4 vext3 LHS, <6,1,1,1>
- 2624816028U, // <3,6,1,2>: Cost 3 vext2 <1,2,3,6>, <1,2,3,6>
- 3763581315U, // <3,6,1,3>: Cost 4 vext3 LHS, <6,1,3,1>
- 2626143294U, // <3,6,1,4>: Cost 3 vext2 <1,4,3,6>, <1,4,3,6>
- 3763581335U, // <3,6,1,5>: Cost 4 vext3 LHS, <6,1,5,3>
- 2721321376U, // <3,6,1,6>: Cost 3 vext3 <6,1,6,3>, <6,1,6,3>
- 2721395113U, // <3,6,1,7>: Cost 3 vext3 <6,1,7,3>, <6,1,7,3>
- 2628797826U, // <3,6,1,u>: Cost 3 vext2 <1,u,3,6>, <1,u,3,6>
- 2594390118U, // <3,6,2,0>: Cost 3 vext1 <7,3,6,2>, LHS
- 2721616324U, // <3,6,2,1>: Cost 3 vext3 <6,2,1,3>, <6,2,1,3>
- 2630788725U, // <3,6,2,2>: Cost 3 vext2 <2,2,3,6>, <2,2,3,6>
- 3763581395U, // <3,6,2,3>: Cost 4 vext3 LHS, <6,2,3,0>
- 2632115991U, // <3,6,2,4>: Cost 3 vext2 <2,4,3,6>, <2,4,3,6>
- 2632779624U, // <3,6,2,5>: Cost 3 vext2 <2,5,3,6>, <2,5,3,6>
- 2594394618U, // <3,6,2,6>: Cost 3 vext1 <7,3,6,2>, <6,2,7,3>
- 1648316922U, // <3,6,2,7>: Cost 2 vext3 <6,2,7,3>, <6,2,7,3>
- 1648390659U, // <3,6,2,u>: Cost 2 vext3 <6,2,u,3>, <6,2,u,3>
- 3693914262U, // <3,6,3,0>: Cost 4 vext2 <0,4,3,6>, <3,0,1,2>
- 3638281176U, // <3,6,3,1>: Cost 4 vext1 <2,3,6,3>, <1,3,1,3>
- 3696568678U, // <3,6,3,2>: Cost 4 vext2 <0,u,3,6>, <3,2,6,3>
- 2638088604U, // <3,6,3,3>: Cost 3 vext2 <3,4,3,6>, <3,3,3,3>
- 2632780290U, // <3,6,3,4>: Cost 3 vext2 <2,5,3,6>, <3,4,5,6>
- 3712494145U, // <3,6,3,5>: Cost 4 vext2 <3,5,3,6>, <3,5,3,6>
- 3698559612U, // <3,6,3,6>: Cost 4 vext2 <1,2,3,6>, <3,6,1,2>
- 2959674678U, // <3,6,3,7>: Cost 3 vzipr <1,1,3,3>, RHS
- 2959674679U, // <3,6,3,u>: Cost 3 vzipr <1,1,3,3>, RHS
- 3763581536U, // <3,6,4,0>: Cost 4 vext3 LHS, <6,4,0,6>
- 2722943590U, // <3,6,4,1>: Cost 3 vext3 <6,4,1,3>, <6,4,1,3>
- 2732970609U, // <3,6,4,2>: Cost 3 vext3 LHS, <6,4,2,5>
- 3698560147U, // <3,6,4,3>: Cost 4 vext2 <1,2,3,6>, <4,3,6,6>
- 2732970628U, // <3,6,4,4>: Cost 3 vext3 LHS, <6,4,4,6>
- 2689839757U, // <3,6,4,5>: Cost 3 vext3 LHS, <6,4,5,6>
- 2732970640U, // <3,6,4,6>: Cost 3 vext3 LHS, <6,4,6,0>
- 2960346422U, // <3,6,4,7>: Cost 3 vzipr <1,2,3,4>, RHS
- 2689839784U, // <3,6,4,u>: Cost 3 vext3 LHS, <6,4,u,6>
- 2576498790U, // <3,6,5,0>: Cost 3 vext1 <4,3,6,5>, LHS
- 3650241270U, // <3,6,5,1>: Cost 4 vext1 <4,3,6,5>, <1,0,3,2>
- 2732970692U, // <3,6,5,2>: Cost 3 vext3 LHS, <6,5,2,7>
- 2576501250U, // <3,6,5,3>: Cost 3 vext1 <4,3,6,5>, <3,4,5,6>
- 2576501906U, // <3,6,5,4>: Cost 3 vext1 <4,3,6,5>, <4,3,6,5>
- 3650244622U, // <3,6,5,5>: Cost 4 vext1 <4,3,6,5>, <5,5,6,6>
- 4114633528U, // <3,6,5,6>: Cost 4 vtrnl <3,4,5,6>, <6,6,6,6>
- 2732970735U, // <3,6,5,7>: Cost 3 vext3 LHS, <6,5,7,5>
- 2576504622U, // <3,6,5,u>: Cost 3 vext1 <4,3,6,5>, LHS
- 2732970749U, // <3,6,6,0>: Cost 3 vext3 LHS, <6,6,0,1>
- 2724270856U, // <3,6,6,1>: Cost 3 vext3 <6,6,1,3>, <6,6,1,3>
- 2624819706U, // <3,6,6,2>: Cost 3 vext2 <1,2,3,6>, <6,2,7,3>
- 3656223234U, // <3,6,6,3>: Cost 4 vext1 <5,3,6,6>, <3,4,5,6>
- 2732970788U, // <3,6,6,4>: Cost 3 vext3 LHS, <6,6,4,4>
- 2732970800U, // <3,6,6,5>: Cost 3 vext3 LHS, <6,6,5,7>
- 1659228984U, // <3,6,6,6>: Cost 2 vext3 LHS, <6,6,6,6>
- 1659228994U, // <3,6,6,7>: Cost 2 vext3 LHS, <6,6,7,7>
- 1659229003U, // <3,6,6,u>: Cost 2 vext3 LHS, <6,6,u,7>
- 1659229006U, // <3,6,7,0>: Cost 2 vext3 LHS, <6,7,0,1>
- 2558600201U, // <3,6,7,1>: Cost 3 vext1 <1,3,6,7>, <1,3,6,7>
- 2558601146U, // <3,6,7,2>: Cost 3 vext1 <1,3,6,7>, <2,6,3,7>
- 2725081963U, // <3,6,7,3>: Cost 3 vext3 <6,7,3,3>, <6,7,3,3>
- 1659229046U, // <3,6,7,4>: Cost 2 vext3 LHS, <6,7,4,5>
- 2715423611U, // <3,6,7,5>: Cost 3 vext3 <5,1,7,3>, <6,7,5,1>
- 2722059141U, // <3,6,7,6>: Cost 3 vext3 <6,2,7,3>, <6,7,6,2>
- 2962361654U, // <3,6,7,7>: Cost 3 vzipr <1,5,3,7>, RHS
- 1659229078U, // <3,6,7,u>: Cost 2 vext3 LHS, <6,7,u,1>
- 1659229087U, // <3,6,u,0>: Cost 2 vext3 LHS, <6,u,0,1>
- 2689840041U, // <3,6,u,1>: Cost 3 vext3 LHS, <6,u,1,2>
- 2558609339U, // <3,6,u,2>: Cost 3 vext1 <1,3,6,u>, <2,6,3,u>
- 2576525853U, // <3,6,u,3>: Cost 3 vext1 <4,3,6,u>, <3,4,u,6>
- 1659229127U, // <3,6,u,4>: Cost 2 vext3 LHS, <6,u,4,5>
- 2689840081U, // <3,6,u,5>: Cost 3 vext3 LHS, <6,u,5,6>
- 1659228984U, // <3,6,u,6>: Cost 2 vext3 LHS, <6,6,6,6>
- 1652298720U, // <3,6,u,7>: Cost 2 vext3 <6,u,7,3>, <6,u,7,3>
- 1659229159U, // <3,6,u,u>: Cost 2 vext3 LHS, <6,u,u,1>
- 2626813952U, // <3,7,0,0>: Cost 3 vext2 <1,5,3,7>, <0,0,0,0>
- 1553072230U, // <3,7,0,1>: Cost 2 vext2 <1,5,3,7>, LHS
- 2626814116U, // <3,7,0,2>: Cost 3 vext2 <1,5,3,7>, <0,2,0,2>
- 3700556028U, // <3,7,0,3>: Cost 4 vext2 <1,5,3,7>, <0,3,1,0>
- 2626814290U, // <3,7,0,4>: Cost 3 vext2 <1,5,3,7>, <0,4,1,5>
- 2582507375U, // <3,7,0,5>: Cost 3 vext1 <5,3,7,0>, <5,3,7,0>
- 2588480072U, // <3,7,0,6>: Cost 3 vext1 <6,3,7,0>, <6,3,7,0>
- 2732971055U, // <3,7,0,7>: Cost 3 vext3 LHS, <7,0,7,1>
- 1553072797U, // <3,7,0,u>: Cost 2 vext2 <1,5,3,7>, LHS
- 2626814710U, // <3,7,1,0>: Cost 3 vext2 <1,5,3,7>, <1,0,3,2>
- 2626814772U, // <3,7,1,1>: Cost 3 vext2 <1,5,3,7>, <1,1,1,1>
- 2626814870U, // <3,7,1,2>: Cost 3 vext2 <1,5,3,7>, <1,2,3,0>
- 2625487854U, // <3,7,1,3>: Cost 3 vext2 <1,3,3,7>, <1,3,3,7>
- 2582514998U, // <3,7,1,4>: Cost 3 vext1 <5,3,7,1>, RHS
- 1553073296U, // <3,7,1,5>: Cost 2 vext2 <1,5,3,7>, <1,5,3,7>
- 2627478753U, // <3,7,1,6>: Cost 3 vext2 <1,6,3,7>, <1,6,3,7>
- 2727367810U, // <3,7,1,7>: Cost 3 vext3 <7,1,7,3>, <7,1,7,3>
- 1555064195U, // <3,7,1,u>: Cost 2 vext2 <1,u,3,7>, <1,u,3,7>
- 2588491878U, // <3,7,2,0>: Cost 3 vext1 <6,3,7,2>, LHS
- 3700557318U, // <3,7,2,1>: Cost 4 vext2 <1,5,3,7>, <2,1,0,3>
- 2626815592U, // <3,7,2,2>: Cost 3 vext2 <1,5,3,7>, <2,2,2,2>
- 2626815654U, // <3,7,2,3>: Cost 3 vext2 <1,5,3,7>, <2,3,0,1>
- 2588495158U, // <3,7,2,4>: Cost 3 vext1 <6,3,7,2>, RHS
- 2632787817U, // <3,7,2,5>: Cost 3 vext2 <2,5,3,7>, <2,5,3,7>
- 1559709626U, // <3,7,2,6>: Cost 2 vext2 <2,6,3,7>, <2,6,3,7>
- 2728031443U, // <3,7,2,7>: Cost 3 vext3 <7,2,7,3>, <7,2,7,3>
- 1561036892U, // <3,7,2,u>: Cost 2 vext2 <2,u,3,7>, <2,u,3,7>
- 2626816150U, // <3,7,3,0>: Cost 3 vext2 <1,5,3,7>, <3,0,1,2>
- 2626816268U, // <3,7,3,1>: Cost 3 vext2 <1,5,3,7>, <3,1,5,3>
- 2633451878U, // <3,7,3,2>: Cost 3 vext2 <2,6,3,7>, <3,2,6,3>
- 2626816412U, // <3,7,3,3>: Cost 3 vext2 <1,5,3,7>, <3,3,3,3>
- 2626816514U, // <3,7,3,4>: Cost 3 vext2 <1,5,3,7>, <3,4,5,6>
- 2638760514U, // <3,7,3,5>: Cost 3 vext2 <3,5,3,7>, <3,5,3,7>
- 2639424147U, // <3,7,3,6>: Cost 3 vext2 <3,6,3,7>, <3,6,3,7>
- 2826961920U, // <3,7,3,7>: Cost 3 vuzpr <1,3,5,7>, <1,3,5,7>
- 2626816798U, // <3,7,3,u>: Cost 3 vext2 <1,5,3,7>, <3,u,1,2>
- 2582536294U, // <3,7,4,0>: Cost 3 vext1 <5,3,7,4>, LHS
- 2582537360U, // <3,7,4,1>: Cost 3 vext1 <5,3,7,4>, <1,5,3,7>
- 2588510138U, // <3,7,4,2>: Cost 3 vext1 <6,3,7,4>, <2,6,3,7>
- 3700558996U, // <3,7,4,3>: Cost 4 vext2 <1,5,3,7>, <4,3,6,7>
- 2582539574U, // <3,7,4,4>: Cost 3 vext1 <5,3,7,4>, RHS
- 1553075510U, // <3,7,4,5>: Cost 2 vext2 <1,5,3,7>, RHS
- 2588512844U, // <3,7,4,6>: Cost 3 vext1 <6,3,7,4>, <6,3,7,4>
- 2564625766U, // <3,7,4,7>: Cost 3 vext1 <2,3,7,4>, <7,4,5,6>
- 1553075753U, // <3,7,4,u>: Cost 2 vext2 <1,5,3,7>, RHS
- 2732971398U, // <3,7,5,0>: Cost 3 vext3 LHS, <7,5,0,2>
- 2626817744U, // <3,7,5,1>: Cost 3 vext2 <1,5,3,7>, <5,1,7,3>
- 3700559649U, // <3,7,5,2>: Cost 4 vext2 <1,5,3,7>, <5,2,7,3>
- 2626817903U, // <3,7,5,3>: Cost 3 vext2 <1,5,3,7>, <5,3,7,0>
- 2258728203U, // <3,7,5,4>: Cost 3 vrev <7,3,4,5>
- 2732971446U, // <3,7,5,5>: Cost 3 vext3 LHS, <7,5,5,5>
- 2732971457U, // <3,7,5,6>: Cost 3 vext3 LHS, <7,5,6,7>
- 2826964278U, // <3,7,5,7>: Cost 3 vuzpr <1,3,5,7>, RHS
- 2826964279U, // <3,7,5,u>: Cost 3 vuzpr <1,3,5,7>, RHS
- 2732971478U, // <3,7,6,0>: Cost 3 vext3 LHS, <7,6,0,1>
- 2732971486U, // <3,7,6,1>: Cost 3 vext3 LHS, <7,6,1,0>
- 2633454074U, // <3,7,6,2>: Cost 3 vext2 <2,6,3,7>, <6,2,7,3>
- 2633454152U, // <3,7,6,3>: Cost 3 vext2 <2,6,3,7>, <6,3,7,0>
- 2732971518U, // <3,7,6,4>: Cost 3 vext3 LHS, <7,6,4,5>
- 2732971526U, // <3,7,6,5>: Cost 3 vext3 LHS, <7,6,5,4>
- 2732971537U, // <3,7,6,6>: Cost 3 vext3 LHS, <7,6,6,6>
- 2732971540U, // <3,7,6,7>: Cost 3 vext3 LHS, <7,6,7,0>
- 2726041124U, // <3,7,6,u>: Cost 3 vext3 <6,u,7,3>, <7,6,u,7>
- 2570616934U, // <3,7,7,0>: Cost 3 vext1 <3,3,7,7>, LHS
- 2570617856U, // <3,7,7,1>: Cost 3 vext1 <3,3,7,7>, <1,3,5,7>
- 2564646635U, // <3,7,7,2>: Cost 3 vext1 <2,3,7,7>, <2,3,7,7>
- 2570619332U, // <3,7,7,3>: Cost 3 vext1 <3,3,7,7>, <3,3,7,7>
- 2570620214U, // <3,7,7,4>: Cost 3 vext1 <3,3,7,7>, RHS
- 2582564726U, // <3,7,7,5>: Cost 3 vext1 <5,3,7,7>, <5,3,7,7>
- 2588537423U, // <3,7,7,6>: Cost 3 vext1 <6,3,7,7>, <6,3,7,7>
- 1659229804U, // <3,7,7,7>: Cost 2 vext3 LHS, <7,7,7,7>
- 1659229804U, // <3,7,7,u>: Cost 2 vext3 LHS, <7,7,7,7>
- 2626819795U, // <3,7,u,0>: Cost 3 vext2 <1,5,3,7>, <u,0,1,2>
- 1553078062U, // <3,7,u,1>: Cost 2 vext2 <1,5,3,7>, LHS
- 2626819973U, // <3,7,u,2>: Cost 3 vext2 <1,5,3,7>, <u,2,3,0>
- 2826961565U, // <3,7,u,3>: Cost 3 vuzpr <1,3,5,7>, LHS
- 2626820159U, // <3,7,u,4>: Cost 3 vext2 <1,5,3,7>, <u,4,5,6>
- 1553078426U, // <3,7,u,5>: Cost 2 vext2 <1,5,3,7>, RHS
- 1595545808U, // <3,7,u,6>: Cost 2 vext2 <u,6,3,7>, <u,6,3,7>
- 1659229804U, // <3,7,u,7>: Cost 2 vext3 LHS, <7,7,7,7>
- 1553078629U, // <3,7,u,u>: Cost 2 vext2 <1,5,3,7>, LHS
- 1611448320U, // <3,u,0,0>: Cost 2 vext3 LHS, <0,0,0,0>
- 1611896531U, // <3,u,0,1>: Cost 2 vext3 LHS, <u,0,1,2>
- 1659672284U, // <3,u,0,2>: Cost 2 vext3 LHS, <u,0,2,2>
- 1616099045U, // <3,u,0,3>: Cost 2 vext3 LHS, <u,0,3,2>
- 2685638381U, // <3,u,0,4>: Cost 3 vext3 LHS, <u,0,4,1>
- 1663874806U, // <3,u,0,5>: Cost 2 vext3 LHS, <u,0,5,1>
- 1663874816U, // <3,u,0,6>: Cost 2 vext3 LHS, <u,0,6,2>
- 2960313672U, // <3,u,0,7>: Cost 3 vzipr <1,2,3,0>, RHS
- 1611896594U, // <3,u,0,u>: Cost 2 vext3 LHS, <u,0,u,2>
- 1549763324U, // <3,u,1,0>: Cost 2 vext2 <1,0,3,u>, <1,0,3,u>
- 1550426957U, // <3,u,1,1>: Cost 2 vext2 <1,1,3,u>, <1,1,3,u>
- 537712430U, // <3,u,1,2>: Cost 1 vext3 LHS, LHS
- 1616541495U, // <3,u,1,3>: Cost 2 vext3 LHS, <u,1,3,3>
- 1490930998U, // <3,u,1,4>: Cost 2 vext1 <2,3,u,1>, RHS
- 1553081489U, // <3,u,1,5>: Cost 2 vext2 <1,5,3,u>, <1,5,3,u>
- 2627486946U, // <3,u,1,6>: Cost 3 vext2 <1,6,3,u>, <1,6,3,u>
- 1659230043U, // <3,u,1,7>: Cost 2 vext3 LHS, <u,1,7,3>
- 537712484U, // <3,u,1,u>: Cost 1 vext3 LHS, LHS
- 1611890852U, // <3,u,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
- 2624833102U, // <3,u,2,1>: Cost 3 vext2 <1,2,3,u>, <2,1,u,3>
- 1557063287U, // <3,u,2,2>: Cost 2 vext2 <2,2,3,u>, <2,2,3,u>
- 1616099205U, // <3,u,2,3>: Cost 2 vext3 LHS, <u,2,3,0>
- 1611890892U, // <3,u,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
- 2689841054U, // <3,u,2,5>: Cost 3 vext3 LHS, <u,2,5,7>
- 1559717819U, // <3,u,2,6>: Cost 2 vext2 <2,6,3,u>, <2,6,3,u>
- 1659230124U, // <3,u,2,7>: Cost 2 vext3 LHS, <u,2,7,3>
- 1616541618U, // <3,u,2,u>: Cost 2 vext3 LHS, <u,2,u,0>
- 1611896764U, // <3,u,3,0>: Cost 2 vext3 LHS, <u,3,0,1>
- 1484973079U, // <3,u,3,1>: Cost 2 vext1 <1,3,u,3>, <1,3,u,3>
- 2685638607U, // <3,u,3,2>: Cost 3 vext3 LHS, <u,3,2,2>
- 336380006U, // <3,u,3,3>: Cost 1 vdup3 LHS
- 1611896804U, // <3,u,3,4>: Cost 2 vext3 LHS, <u,3,4,5>
- 1616541679U, // <3,u,3,5>: Cost 2 vext3 LHS, <u,3,5,7>
- 2690283512U, // <3,u,3,6>: Cost 3 vext3 LHS, <u,3,6,7>
- 2959674696U, // <3,u,3,7>: Cost 3 vzipr <1,1,3,3>, RHS
- 336380006U, // <3,u,3,u>: Cost 1 vdup3 LHS
- 2558722150U, // <3,u,4,0>: Cost 3 vext1 <1,3,u,4>, LHS
- 1659672602U, // <3,u,4,1>: Cost 2 vext3 LHS, <u,4,1,5>
- 1659672612U, // <3,u,4,2>: Cost 2 vext3 LHS, <u,4,2,6>
- 2689841196U, // <3,u,4,3>: Cost 3 vext3 LHS, <u,4,3,5>
- 1659227344U, // <3,u,4,4>: Cost 2 vext3 LHS, <4,4,4,4>
- 1611896895U, // <3,u,4,5>: Cost 2 vext3 LHS, <u,4,5,6>
- 1663875144U, // <3,u,4,6>: Cost 2 vext3 LHS, <u,4,6,6>
- 1659230289U, // <3,u,4,7>: Cost 2 vext3 LHS, <u,4,7,6>
- 1611896922U, // <3,u,4,u>: Cost 2 vext3 LHS, <u,4,u,6>
- 1490960486U, // <3,u,5,0>: Cost 2 vext1 <2,3,u,5>, LHS
- 2689841261U, // <3,u,5,1>: Cost 3 vext3 LHS, <u,5,1,7>
- 1490962162U, // <3,u,5,2>: Cost 2 vext1 <2,3,u,5>, <2,3,u,5>
- 1616541823U, // <3,u,5,3>: Cost 2 vext3 LHS, <u,5,3,7>
- 1490963766U, // <3,u,5,4>: Cost 2 vext1 <2,3,u,5>, RHS
- 1659228164U, // <3,u,5,5>: Cost 2 vext3 LHS, <5,5,5,5>
- 537712794U, // <3,u,5,6>: Cost 1 vext3 LHS, RHS
- 1659230371U, // <3,u,5,7>: Cost 2 vext3 LHS, <u,5,7,7>
- 537712812U, // <3,u,5,u>: Cost 1 vext3 LHS, RHS
- 2689841327U, // <3,u,6,0>: Cost 3 vext3 LHS, <u,6,0,1>
- 2558739482U, // <3,u,6,1>: Cost 3 vext1 <1,3,u,6>, <1,3,u,6>
- 2689841351U, // <3,u,6,2>: Cost 3 vext3 LHS, <u,6,2,7>
- 1616099536U, // <3,u,6,3>: Cost 2 vext3 LHS, <u,6,3,7>
- 1659227508U, // <3,u,6,4>: Cost 2 vext3 LHS, <4,6,4,6>
- 2690283746U, // <3,u,6,5>: Cost 3 vext3 LHS, <u,6,5,7>
- 1659228984U, // <3,u,6,6>: Cost 2 vext3 LHS, <6,6,6,6>
- 1659230445U, // <3,u,6,7>: Cost 2 vext3 LHS, <u,6,7,0>
- 1616099581U, // <3,u,6,u>: Cost 2 vext3 LHS, <u,6,u,7>
- 1485004902U, // <3,u,7,0>: Cost 2 vext1 <1,3,u,7>, LHS
- 1485005851U, // <3,u,7,1>: Cost 2 vext1 <1,3,u,7>, <1,3,u,7>
- 2558748264U, // <3,u,7,2>: Cost 3 vext1 <1,3,u,7>, <2,2,2,2>
- 3095397021U, // <3,u,7,3>: Cost 3 vtrnr <1,3,5,7>, LHS
- 1485008182U, // <3,u,7,4>: Cost 2 vext1 <1,3,u,7>, RHS
- 1659228328U, // <3,u,7,5>: Cost 2 vext3 LHS, <5,7,5,7>
- 2722060599U, // <3,u,7,6>: Cost 3 vext3 <6,2,7,3>, <u,7,6,2>
- 1659229804U, // <3,u,7,7>: Cost 2 vext3 LHS, <7,7,7,7>
- 1485010734U, // <3,u,7,u>: Cost 2 vext1 <1,3,u,7>, LHS
- 1616099665U, // <3,u,u,0>: Cost 2 vext3 LHS, <u,u,0,1>
- 1611897179U, // <3,u,u,1>: Cost 2 vext3 LHS, <u,u,1,2>
- 537712997U, // <3,u,u,2>: Cost 1 vext3 LHS, LHS
- 336380006U, // <3,u,u,3>: Cost 1 vdup3 LHS
- 1616099705U, // <3,u,u,4>: Cost 2 vext3 LHS, <u,u,4,5>
- 1611897219U, // <3,u,u,5>: Cost 2 vext3 LHS, <u,u,5,6>
- 537713037U, // <3,u,u,6>: Cost 1 vext3 LHS, RHS
- 1659230607U, // <3,u,u,7>: Cost 2 vext3 LHS, <u,u,7,0>
- 537713051U, // <3,u,u,u>: Cost 1 vext3 LHS, LHS
- 2691907584U, // <4,0,0,0>: Cost 3 vext3 <1,2,3,4>, <0,0,0,0>
- 2691907594U, // <4,0,0,1>: Cost 3 vext3 <1,2,3,4>, <0,0,1,1>
- 2691907604U, // <4,0,0,2>: Cost 3 vext3 <1,2,3,4>, <0,0,2,2>
- 3709862144U, // <4,0,0,3>: Cost 4 vext2 <3,1,4,0>, <0,3,1,4>
- 2684682280U, // <4,0,0,4>: Cost 3 vext3 <0,0,4,4>, <0,0,4,4>
- 3694600633U, // <4,0,0,5>: Cost 4 vext2 <0,5,4,0>, <0,5,4,0>
- 3291431290U, // <4,0,0,6>: Cost 4 vrev <0,4,6,0>
- 3668342067U, // <4,0,0,7>: Cost 4 vext1 <7,4,0,0>, <7,4,0,0>
- 2691907657U, // <4,0,0,u>: Cost 3 vext3 <1,2,3,4>, <0,0,u,1>
- 2570715238U, // <4,0,1,0>: Cost 3 vext1 <3,4,0,1>, LHS
- 2570716058U, // <4,0,1,1>: Cost 3 vext1 <3,4,0,1>, <1,2,3,4>
- 1618165862U, // <4,0,1,2>: Cost 2 vext3 <1,2,3,4>, LHS
- 2570717648U, // <4,0,1,3>: Cost 3 vext1 <3,4,0,1>, <3,4,0,1>
- 2570718518U, // <4,0,1,4>: Cost 3 vext1 <3,4,0,1>, RHS
- 2594607206U, // <4,0,1,5>: Cost 3 vext1 <7,4,0,1>, <5,6,7,4>
- 3662377563U, // <4,0,1,6>: Cost 4 vext1 <6,4,0,1>, <6,4,0,1>
- 2594608436U, // <4,0,1,7>: Cost 3 vext1 <7,4,0,1>, <7,4,0,1>
- 1618165916U, // <4,0,1,u>: Cost 2 vext3 <1,2,3,4>, LHS
- 2685714598U, // <4,0,2,0>: Cost 3 vext3 <0,2,0,4>, <0,2,0,4>
- 3759530159U, // <4,0,2,1>: Cost 4 vext3 <0,2,1,4>, <0,2,1,4>
- 2685862072U, // <4,0,2,2>: Cost 3 vext3 <0,2,2,4>, <0,2,2,4>
- 2631476937U, // <4,0,2,3>: Cost 3 vext2 <2,3,4,0>, <2,3,4,0>
- 2685714636U, // <4,0,2,4>: Cost 3 vext3 <0,2,0,4>, <0,2,4,6>
- 3765649622U, // <4,0,2,5>: Cost 4 vext3 <1,2,3,4>, <0,2,5,7>
- 2686157020U, // <4,0,2,6>: Cost 3 vext3 <0,2,6,4>, <0,2,6,4>
- 3668358453U, // <4,0,2,7>: Cost 4 vext1 <7,4,0,2>, <7,4,0,2>
- 2686304494U, // <4,0,2,u>: Cost 3 vext3 <0,2,u,4>, <0,2,u,4>
- 3632529510U, // <4,0,3,0>: Cost 4 vext1 <1,4,0,3>, LHS
- 2686451968U, // <4,0,3,1>: Cost 3 vext3 <0,3,1,4>, <0,3,1,4>
- 2686525705U, // <4,0,3,2>: Cost 3 vext3 <0,3,2,4>, <0,3,2,4>
- 3760341266U, // <4,0,3,3>: Cost 4 vext3 <0,3,3,4>, <0,3,3,4>
- 3632532790U, // <4,0,3,4>: Cost 4 vext1 <1,4,0,3>, RHS
- 3913254606U, // <4,0,3,5>: Cost 4 vuzpr <3,4,5,0>, <2,3,4,5>
- 3705219740U, // <4,0,3,6>: Cost 4 vext2 <2,3,4,0>, <3,6,4,7>
- 3713845990U, // <4,0,3,7>: Cost 4 vext2 <3,7,4,0>, <3,7,4,0>
- 2686451968U, // <4,0,3,u>: Cost 3 vext3 <0,3,1,4>, <0,3,1,4>
- 2552823910U, // <4,0,4,0>: Cost 3 vext1 <0,4,0,4>, LHS
- 2691907922U, // <4,0,4,1>: Cost 3 vext3 <1,2,3,4>, <0,4,1,5>
- 2691907932U, // <4,0,4,2>: Cost 3 vext3 <1,2,3,4>, <0,4,2,6>
- 3626567830U, // <4,0,4,3>: Cost 4 vext1 <0,4,0,4>, <3,0,1,2>
- 2552827190U, // <4,0,4,4>: Cost 3 vext1 <0,4,0,4>, RHS
- 2631478582U, // <4,0,4,5>: Cost 3 vext2 <2,3,4,0>, RHS
- 3626570017U, // <4,0,4,6>: Cost 4 vext1 <0,4,0,4>, <6,0,1,2>
- 3668374839U, // <4,0,4,7>: Cost 4 vext1 <7,4,0,4>, <7,4,0,4>
- 2552829742U, // <4,0,4,u>: Cost 3 vext1 <0,4,0,4>, LHS
- 2558804070U, // <4,0,5,0>: Cost 3 vext1 <1,4,0,5>, LHS
- 1839644774U, // <4,0,5,1>: Cost 2 vzipl RHS, LHS
- 2913386660U, // <4,0,5,2>: Cost 3 vzipl RHS, <0,2,0,2>
- 2570750420U, // <4,0,5,3>: Cost 3 vext1 <3,4,0,5>, <3,4,0,5>
- 2558807350U, // <4,0,5,4>: Cost 3 vext1 <1,4,0,5>, RHS
- 3987128750U, // <4,0,5,5>: Cost 4 vzipl RHS, <0,5,2,7>
- 3987128822U, // <4,0,5,6>: Cost 4 vzipl RHS, <0,6,1,7>
- 2594641208U, // <4,0,5,7>: Cost 3 vext1 <7,4,0,5>, <7,4,0,5>
- 1839645341U, // <4,0,5,u>: Cost 2 vzipl RHS, LHS
- 2552840294U, // <4,0,6,0>: Cost 3 vext1 <0,4,0,6>, LHS
- 3047604234U, // <4,0,6,1>: Cost 3 vtrnl RHS, <0,0,1,1>
- 1973862502U, // <4,0,6,2>: Cost 2 vtrnl RHS, LHS
- 2570758613U, // <4,0,6,3>: Cost 3 vext1 <3,4,0,6>, <3,4,0,6>
- 2552843574U, // <4,0,6,4>: Cost 3 vext1 <0,4,0,6>, RHS
- 2217664887U, // <4,0,6,5>: Cost 3 vrev <0,4,5,6>
- 3662418528U, // <4,0,6,6>: Cost 4 vext1 <6,4,0,6>, <6,4,0,6>
- 2658022257U, // <4,0,6,7>: Cost 3 vext2 <6,7,4,0>, <6,7,4,0>
- 1973862556U, // <4,0,6,u>: Cost 2 vtrnl RHS, LHS
- 3731764218U, // <4,0,7,0>: Cost 4 vext2 <6,7,4,0>, <7,0,1,2>
- 3988324454U, // <4,0,7,1>: Cost 4 vzipl <4,7,5,0>, LHS
- 4122034278U, // <4,0,7,2>: Cost 4 vtrnl <4,6,7,1>, LHS
- 3735082246U, // <4,0,7,3>: Cost 4 vext2 <7,3,4,0>, <7,3,4,0>
- 3731764536U, // <4,0,7,4>: Cost 4 vext2 <6,7,4,0>, <7,4,0,5>
- 3937145718U, // <4,0,7,5>: Cost 4 vuzpr <7,4,5,0>, <6,7,4,5>
- 3737073145U, // <4,0,7,6>: Cost 4 vext2 <7,6,4,0>, <7,6,4,0>
- 3731764844U, // <4,0,7,7>: Cost 4 vext2 <6,7,4,0>, <7,7,7,7>
- 4122034332U, // <4,0,7,u>: Cost 4 vtrnl <4,6,7,1>, LHS
- 2552856678U, // <4,0,u,0>: Cost 3 vext1 <0,4,0,u>, LHS
- 1841635430U, // <4,0,u,1>: Cost 2 vzipl RHS, LHS
- 1618166429U, // <4,0,u,2>: Cost 2 vext3 <1,2,3,4>, LHS
- 2570774999U, // <4,0,u,3>: Cost 3 vext1 <3,4,0,u>, <3,4,0,u>
- 2552859958U, // <4,0,u,4>: Cost 3 vext1 <0,4,0,u>, RHS
- 2631481498U, // <4,0,u,5>: Cost 3 vext2 <2,3,4,0>, RHS
- 2686157020U, // <4,0,u,6>: Cost 3 vext3 <0,2,6,4>, <0,2,6,4>
- 2594665787U, // <4,0,u,7>: Cost 3 vext1 <7,4,0,u>, <7,4,0,u>
- 1618166483U, // <4,0,u,u>: Cost 2 vext3 <1,2,3,4>, LHS
- 2617548837U, // <4,1,0,0>: Cost 3 vext2 <0,0,4,1>, <0,0,4,1>
- 2622857318U, // <4,1,0,1>: Cost 3 vext2 <0,u,4,1>, LHS
- 3693281484U, // <4,1,0,2>: Cost 4 vext2 <0,3,4,1>, <0,2,4,6>
- 2691908342U, // <4,1,0,3>: Cost 3 vext3 <1,2,3,4>, <1,0,3,2>
- 2622857554U, // <4,1,0,4>: Cost 3 vext2 <0,u,4,1>, <0,4,1,5>
- 3764470538U, // <4,1,0,5>: Cost 4 vext3 <1,0,5,4>, <1,0,5,4>
- 3695272459U, // <4,1,0,6>: Cost 4 vext2 <0,6,4,1>, <0,6,4,1>
- 3733094980U, // <4,1,0,7>: Cost 4 vext2 <7,0,4,1>, <0,7,1,4>
- 2622857885U, // <4,1,0,u>: Cost 3 vext2 <0,u,4,1>, LHS
- 3696599798U, // <4,1,1,0>: Cost 4 vext2 <0,u,4,1>, <1,0,3,2>
- 2691097399U, // <4,1,1,1>: Cost 3 vext3 <1,1,1,4>, <1,1,1,4>
- 2631484314U, // <4,1,1,2>: Cost 3 vext2 <2,3,4,1>, <1,2,3,4>
- 2691908424U, // <4,1,1,3>: Cost 3 vext3 <1,2,3,4>, <1,1,3,3>
- 3696600125U, // <4,1,1,4>: Cost 4 vext2 <0,u,4,1>, <1,4,3,5>
- 3696600175U, // <4,1,1,5>: Cost 4 vext2 <0,u,4,1>, <1,5,0,1>
- 3696600307U, // <4,1,1,6>: Cost 4 vext2 <0,u,4,1>, <1,6,5,7>
- 3668423997U, // <4,1,1,7>: Cost 4 vext1 <7,4,1,1>, <7,4,1,1>
- 2691908469U, // <4,1,1,u>: Cost 3 vext3 <1,2,3,4>, <1,1,u,3>
- 2570797158U, // <4,1,2,0>: Cost 3 vext1 <3,4,1,2>, LHS
- 2570797978U, // <4,1,2,1>: Cost 3 vext1 <3,4,1,2>, <1,2,3,4>
- 3696600680U, // <4,1,2,2>: Cost 4 vext2 <0,u,4,1>, <2,2,2,2>
- 1618166682U, // <4,1,2,3>: Cost 2 vext3 <1,2,3,4>, <1,2,3,4>
- 2570800438U, // <4,1,2,4>: Cost 3 vext1 <3,4,1,2>, RHS
- 3765650347U, // <4,1,2,5>: Cost 4 vext3 <1,2,3,4>, <1,2,5,3>
- 3696601018U, // <4,1,2,6>: Cost 4 vext2 <0,u,4,1>, <2,6,3,7>
- 3668432190U, // <4,1,2,7>: Cost 4 vext1 <7,4,1,2>, <7,4,1,2>
- 1618535367U, // <4,1,2,u>: Cost 2 vext3 <1,2,u,4>, <1,2,u,4>
- 2564833382U, // <4,1,3,0>: Cost 3 vext1 <2,4,1,3>, LHS
- 2691908568U, // <4,1,3,1>: Cost 3 vext3 <1,2,3,4>, <1,3,1,3>
- 2691908578U, // <4,1,3,2>: Cost 3 vext3 <1,2,3,4>, <1,3,2,4>
- 2692572139U, // <4,1,3,3>: Cost 3 vext3 <1,3,3,4>, <1,3,3,4>
- 2564836662U, // <4,1,3,4>: Cost 3 vext1 <2,4,1,3>, RHS
- 2691908608U, // <4,1,3,5>: Cost 3 vext3 <1,2,3,4>, <1,3,5,7>
- 2588725862U, // <4,1,3,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
- 3662468090U, // <4,1,3,7>: Cost 4 vext1 <6,4,1,3>, <7,0,1,2>
- 2691908631U, // <4,1,3,u>: Cost 3 vext3 <1,2,3,4>, <1,3,u,3>
- 3760194590U, // <4,1,4,0>: Cost 4 vext3 <0,3,1,4>, <1,4,0,1>
- 3693947874U, // <4,1,4,1>: Cost 4 vext2 <0,4,4,1>, <4,1,5,0>
- 3765650484U, // <4,1,4,2>: Cost 4 vext3 <1,2,3,4>, <1,4,2,5>
- 3113877606U, // <4,1,4,3>: Cost 3 vtrnr <4,4,4,4>, LHS
- 3760194630U, // <4,1,4,4>: Cost 4 vext3 <0,3,1,4>, <1,4,4,5>
- 2622860598U, // <4,1,4,5>: Cost 3 vext2 <0,u,4,1>, RHS
- 3297436759U, // <4,1,4,6>: Cost 4 vrev <1,4,6,4>
- 3800007772U, // <4,1,4,7>: Cost 4 vext3 <7,0,1,4>, <1,4,7,0>
- 2622860841U, // <4,1,4,u>: Cost 3 vext2 <0,u,4,1>, RHS
- 1479164006U, // <4,1,5,0>: Cost 2 vext1 <0,4,1,5>, LHS
- 2552906486U, // <4,1,5,1>: Cost 3 vext1 <0,4,1,5>, <1,0,3,2>
- 2552907299U, // <4,1,5,2>: Cost 3 vext1 <0,4,1,5>, <2,1,3,5>
- 2552907926U, // <4,1,5,3>: Cost 3 vext1 <0,4,1,5>, <3,0,1,2>
- 1479167286U, // <4,1,5,4>: Cost 2 vext1 <0,4,1,5>, RHS
- 2913387664U, // <4,1,5,5>: Cost 3 vzipl RHS, <1,5,3,7>
- 2600686074U, // <4,1,5,6>: Cost 3 vext1 <u,4,1,5>, <6,2,7,3>
- 2600686586U, // <4,1,5,7>: Cost 3 vext1 <u,4,1,5>, <7,0,1,2>
- 1479169838U, // <4,1,5,u>: Cost 2 vext1 <0,4,1,5>, LHS
- 2552914022U, // <4,1,6,0>: Cost 3 vext1 <0,4,1,6>, LHS
- 2558886708U, // <4,1,6,1>: Cost 3 vext1 <1,4,1,6>, <1,1,1,1>
- 4028205206U, // <4,1,6,2>: Cost 4 vzipr <0,2,4,6>, <3,0,1,2>
- 3089858662U, // <4,1,6,3>: Cost 3 vtrnr <0,4,2,6>, LHS
- 2552917302U, // <4,1,6,4>: Cost 3 vext1 <0,4,1,6>, RHS
- 2223637584U, // <4,1,6,5>: Cost 3 vrev <1,4,5,6>
- 4121347081U, // <4,1,6,6>: Cost 4 vtrnl RHS, <1,3,6,7>
- 3721155406U, // <4,1,6,7>: Cost 4 vext2 <5,0,4,1>, <6,7,0,1>
- 2552919854U, // <4,1,6,u>: Cost 3 vext1 <0,4,1,6>, LHS
- 2659357716U, // <4,1,7,0>: Cost 3 vext2 <7,0,4,1>, <7,0,4,1>
- 3733763173U, // <4,1,7,1>: Cost 4 vext2 <7,1,4,1>, <7,1,4,1>
- 3734426806U, // <4,1,7,2>: Cost 4 vext2 <7,2,4,1>, <7,2,4,1>
- 2695226671U, // <4,1,7,3>: Cost 3 vext3 <1,7,3,4>, <1,7,3,4>
- 3721155942U, // <4,1,7,4>: Cost 4 vext2 <5,0,4,1>, <7,4,5,6>
- 3721155976U, // <4,1,7,5>: Cost 4 vext2 <5,0,4,1>, <7,5,0,4>
- 3662500458U, // <4,1,7,6>: Cost 4 vext1 <6,4,1,7>, <6,4,1,7>
- 3721156204U, // <4,1,7,7>: Cost 4 vext2 <5,0,4,1>, <7,7,7,7>
- 2659357716U, // <4,1,7,u>: Cost 3 vext2 <7,0,4,1>, <7,0,4,1>
- 1479188582U, // <4,1,u,0>: Cost 2 vext1 <0,4,1,u>, LHS
- 2552931062U, // <4,1,u,1>: Cost 3 vext1 <0,4,1,u>, <1,0,3,2>
- 2552931944U, // <4,1,u,2>: Cost 3 vext1 <0,4,1,u>, <2,2,2,2>
- 1622148480U, // <4,1,u,3>: Cost 2 vext3 <1,u,3,4>, <1,u,3,4>
- 1479191862U, // <4,1,u,4>: Cost 2 vext1 <0,4,1,u>, RHS
- 2622863514U, // <4,1,u,5>: Cost 3 vext2 <0,u,4,1>, RHS
- 2588725862U, // <4,1,u,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
- 2600686586U, // <4,1,u,7>: Cost 3 vext1 <u,4,1,5>, <7,0,1,2>
- 1479194414U, // <4,1,u,u>: Cost 2 vext1 <0,4,1,u>, LHS
- 2617557030U, // <4,2,0,0>: Cost 3 vext2 <0,0,4,2>, <0,0,4,2>
- 2622865510U, // <4,2,0,1>: Cost 3 vext2 <0,u,4,2>, LHS
- 2622865612U, // <4,2,0,2>: Cost 3 vext2 <0,u,4,2>, <0,2,4,6>
- 3693289753U, // <4,2,0,3>: Cost 4 vext2 <0,3,4,2>, <0,3,4,2>
- 2635473244U, // <4,2,0,4>: Cost 3 vext2 <3,0,4,2>, <0,4,2,6>
- 3765650918U, // <4,2,0,5>: Cost 4 vext3 <1,2,3,4>, <2,0,5,7>
- 2696775148U, // <4,2,0,6>: Cost 3 vext3 <2,0,6,4>, <2,0,6,4>
- 3695944285U, // <4,2,0,7>: Cost 4 vext2 <0,7,4,2>, <0,7,4,2>
- 2622866077U, // <4,2,0,u>: Cost 3 vext2 <0,u,4,2>, LHS
- 3696607990U, // <4,2,1,0>: Cost 4 vext2 <0,u,4,2>, <1,0,3,2>
- 3696608052U, // <4,2,1,1>: Cost 4 vext2 <0,u,4,2>, <1,1,1,1>
- 3696608150U, // <4,2,1,2>: Cost 4 vext2 <0,u,4,2>, <1,2,3,0>
- 3895574630U, // <4,2,1,3>: Cost 4 vuzpr <0,4,u,2>, LHS
- 2691909162U, // <4,2,1,4>: Cost 3 vext3 <1,2,3,4>, <2,1,4,3>
- 3696608400U, // <4,2,1,5>: Cost 4 vext2 <0,u,4,2>, <1,5,3,7>
- 3760784956U, // <4,2,1,6>: Cost 4 vext3 <0,4,0,4>, <2,1,6,3>
- 3773908549U, // <4,2,1,7>: Cost 5 vext3 <2,5,7,4>, <2,1,7,3>
- 2691909162U, // <4,2,1,u>: Cost 3 vext3 <1,2,3,4>, <2,1,4,3>
- 3696608748U, // <4,2,2,0>: Cost 4 vext2 <0,u,4,2>, <2,0,6,4>
- 3696608828U, // <4,2,2,1>: Cost 4 vext2 <0,u,4,2>, <2,1,6,3>
- 2691909224U, // <4,2,2,2>: Cost 3 vext3 <1,2,3,4>, <2,2,2,2>
- 2691909234U, // <4,2,2,3>: Cost 3 vext3 <1,2,3,4>, <2,2,3,3>
- 3759605368U, // <4,2,2,4>: Cost 4 vext3 <0,2,2,4>, <2,2,4,0>
- 3696609156U, // <4,2,2,5>: Cost 4 vext2 <0,u,4,2>, <2,5,6,7>
- 3760785040U, // <4,2,2,6>: Cost 4 vext3 <0,4,0,4>, <2,2,6,6>
- 3668505927U, // <4,2,2,7>: Cost 4 vext1 <7,4,2,2>, <7,4,2,2>
- 2691909279U, // <4,2,2,u>: Cost 3 vext3 <1,2,3,4>, <2,2,u,3>
- 2691909286U, // <4,2,3,0>: Cost 3 vext3 <1,2,3,4>, <2,3,0,1>
- 3764840111U, // <4,2,3,1>: Cost 4 vext3 <1,1,1,4>, <2,3,1,1>
- 3765651129U, // <4,2,3,2>: Cost 4 vext3 <1,2,3,4>, <2,3,2,2>
- 2698544836U, // <4,2,3,3>: Cost 3 vext3 <2,3,3,4>, <2,3,3,4>
- 2685863630U, // <4,2,3,4>: Cost 3 vext3 <0,2,2,4>, <2,3,4,5>
- 2698692310U, // <4,2,3,5>: Cost 3 vext3 <2,3,5,4>, <2,3,5,4>
- 3772507871U, // <4,2,3,6>: Cost 4 vext3 <2,3,6,4>, <2,3,6,4>
- 2698839784U, // <4,2,3,7>: Cost 3 vext3 <2,3,7,4>, <2,3,7,4>
- 2691909358U, // <4,2,3,u>: Cost 3 vext3 <1,2,3,4>, <2,3,u,1>
- 2564915302U, // <4,2,4,0>: Cost 3 vext1 <2,4,2,4>, LHS
- 2564916122U, // <4,2,4,1>: Cost 3 vext1 <2,4,2,4>, <1,2,3,4>
- 2564917004U, // <4,2,4,2>: Cost 3 vext1 <2,4,2,4>, <2,4,2,4>
- 2699208469U, // <4,2,4,3>: Cost 3 vext3 <2,4,3,4>, <2,4,3,4>
- 2564918582U, // <4,2,4,4>: Cost 3 vext1 <2,4,2,4>, RHS
- 2622868790U, // <4,2,4,5>: Cost 3 vext2 <0,u,4,2>, RHS
- 2229667632U, // <4,2,4,6>: Cost 3 vrev <2,4,6,4>
- 3800082229U, // <4,2,4,7>: Cost 4 vext3 <7,0,2,4>, <2,4,7,0>
- 2622869033U, // <4,2,4,u>: Cost 3 vext2 <0,u,4,2>, RHS
- 2552979558U, // <4,2,5,0>: Cost 3 vext1 <0,4,2,5>, LHS
- 2558952342U, // <4,2,5,1>: Cost 3 vext1 <1,4,2,5>, <1,2,3,0>
- 2564925032U, // <4,2,5,2>: Cost 3 vext1 <2,4,2,5>, <2,2,2,2>
- 2967060582U, // <4,2,5,3>: Cost 3 vzipr <2,3,4,5>, LHS
- 2552982838U, // <4,2,5,4>: Cost 3 vext1 <0,4,2,5>, RHS
- 3987130190U, // <4,2,5,5>: Cost 4 vzipl RHS, <2,5,0,7>
- 2913388474U, // <4,2,5,6>: Cost 3 vzipl RHS, <2,6,3,7>
- 3895577910U, // <4,2,5,7>: Cost 4 vuzpr <0,4,u,2>, RHS
- 2552985390U, // <4,2,5,u>: Cost 3 vext1 <0,4,2,5>, LHS
- 1479245926U, // <4,2,6,0>: Cost 2 vext1 <0,4,2,6>, LHS
- 2552988406U, // <4,2,6,1>: Cost 3 vext1 <0,4,2,6>, <1,0,3,2>
- 2552989288U, // <4,2,6,2>: Cost 3 vext1 <0,4,2,6>, <2,2,2,2>
- 2954461286U, // <4,2,6,3>: Cost 3 vzipr <0,2,4,6>, LHS
- 1479249206U, // <4,2,6,4>: Cost 2 vext1 <0,4,2,6>, RHS
- 2229610281U, // <4,2,6,5>: Cost 3 vrev <2,4,5,6>
- 2600767994U, // <4,2,6,6>: Cost 3 vext1 <u,4,2,6>, <6,2,7,3>
- 2600768506U, // <4,2,6,7>: Cost 3 vext1 <u,4,2,6>, <7,0,1,2>
- 1479251758U, // <4,2,6,u>: Cost 2 vext1 <0,4,2,6>, LHS
- 2659365909U, // <4,2,7,0>: Cost 3 vext2 <7,0,4,2>, <7,0,4,2>
- 3733771366U, // <4,2,7,1>: Cost 4 vext2 <7,1,4,2>, <7,1,4,2>
- 3734434999U, // <4,2,7,2>: Cost 4 vext2 <7,2,4,2>, <7,2,4,2>
- 2701199368U, // <4,2,7,3>: Cost 3 vext3 <2,7,3,4>, <2,7,3,4>
- 4175774618U, // <4,2,7,4>: Cost 4 vtrnr <2,4,5,7>, <1,2,3,4>
- 3303360298U, // <4,2,7,5>: Cost 4 vrev <2,4,5,7>
- 3727136217U, // <4,2,7,6>: Cost 4 vext2 <6,0,4,2>, <7,6,0,4>
- 3727136364U, // <4,2,7,7>: Cost 4 vext2 <6,0,4,2>, <7,7,7,7>
- 2659365909U, // <4,2,7,u>: Cost 3 vext2 <7,0,4,2>, <7,0,4,2>
- 1479262310U, // <4,2,u,0>: Cost 2 vext1 <0,4,2,u>, LHS
- 2553004790U, // <4,2,u,1>: Cost 3 vext1 <0,4,2,u>, <1,0,3,2>
- 2553005672U, // <4,2,u,2>: Cost 3 vext1 <0,4,2,u>, <2,2,2,2>
- 2954477670U, // <4,2,u,3>: Cost 3 vzipr <0,2,4,u>, LHS
- 1479265590U, // <4,2,u,4>: Cost 2 vext1 <0,4,2,u>, RHS
- 2622871706U, // <4,2,u,5>: Cost 3 vext2 <0,u,4,2>, RHS
- 2229700404U, // <4,2,u,6>: Cost 3 vrev <2,4,6,u>
- 2600784890U, // <4,2,u,7>: Cost 3 vext1 <u,4,2,u>, <7,0,1,2>
- 1479268142U, // <4,2,u,u>: Cost 2 vext1 <0,4,2,u>, LHS
- 3765651595U, // <4,3,0,0>: Cost 4 vext3 <1,2,3,4>, <3,0,0,0>
- 2691909782U, // <4,3,0,1>: Cost 3 vext3 <1,2,3,4>, <3,0,1,2>
- 2702452897U, // <4,3,0,2>: Cost 3 vext3 <3,0,2,4>, <3,0,2,4>
- 3693297946U, // <4,3,0,3>: Cost 4 vext2 <0,3,4,3>, <0,3,4,3>
- 3760711856U, // <4,3,0,4>: Cost 4 vext3 <0,3,u,4>, <3,0,4,1>
- 2235533820U, // <4,3,0,5>: Cost 3 vrev <3,4,5,0>
- 3309349381U, // <4,3,0,6>: Cost 4 vrev <3,4,6,0>
- 3668563278U, // <4,3,0,7>: Cost 4 vext1 <7,4,3,0>, <7,4,3,0>
- 2691909845U, // <4,3,0,u>: Cost 3 vext3 <1,2,3,4>, <3,0,u,2>
- 2235173328U, // <4,3,1,0>: Cost 3 vrev <3,4,0,1>
- 3764840678U, // <4,3,1,1>: Cost 4 vext3 <1,1,1,4>, <3,1,1,1>
- 2630173594U, // <4,3,1,2>: Cost 3 vext2 <2,1,4,3>, <1,2,3,4>
- 2703190267U, // <4,3,1,3>: Cost 3 vext3 <3,1,3,4>, <3,1,3,4>
- 3760195840U, // <4,3,1,4>: Cost 4 vext3 <0,3,1,4>, <3,1,4,0>
- 3765651724U, // <4,3,1,5>: Cost 4 vext3 <1,2,3,4>, <3,1,5,3>
- 3309357574U, // <4,3,1,6>: Cost 4 vrev <3,4,6,1>
- 3769633054U, // <4,3,1,7>: Cost 4 vext3 <1,u,3,4>, <3,1,7,3>
- 2703558952U, // <4,3,1,u>: Cost 3 vext3 <3,1,u,4>, <3,1,u,4>
- 3626770534U, // <4,3,2,0>: Cost 4 vext1 <0,4,3,2>, LHS
- 2630174250U, // <4,3,2,1>: Cost 3 vext2 <2,1,4,3>, <2,1,4,3>
- 3765651777U, // <4,3,2,2>: Cost 4 vext3 <1,2,3,4>, <3,2,2,2>
- 2703853900U, // <4,3,2,3>: Cost 3 vext3 <3,2,3,4>, <3,2,3,4>
- 3626773814U, // <4,3,2,4>: Cost 4 vext1 <0,4,3,2>, RHS
- 2704001374U, // <4,3,2,5>: Cost 3 vext3 <3,2,5,4>, <3,2,5,4>
- 3765651814U, // <4,3,2,6>: Cost 4 vext3 <1,2,3,4>, <3,2,6,3>
- 3769633135U, // <4,3,2,7>: Cost 4 vext3 <1,u,3,4>, <3,2,7,3>
- 2634819681U, // <4,3,2,u>: Cost 3 vext2 <2,u,4,3>, <2,u,4,3>
- 3765651839U, // <4,3,3,0>: Cost 4 vext3 <1,2,3,4>, <3,3,0,1>
- 3765651848U, // <4,3,3,1>: Cost 4 vext3 <1,2,3,4>, <3,3,1,1>
- 3710552404U, // <4,3,3,2>: Cost 4 vext2 <3,2,4,3>, <3,2,4,3>
- 2691910044U, // <4,3,3,3>: Cost 3 vext3 <1,2,3,4>, <3,3,3,3>
- 2704591270U, // <4,3,3,4>: Cost 3 vext3 <3,3,4,4>, <3,3,4,4>
- 3769633202U, // <4,3,3,5>: Cost 4 vext3 <1,u,3,4>, <3,3,5,7>
- 3703917212U, // <4,3,3,6>: Cost 4 vext2 <2,1,4,3>, <3,6,4,7>
- 3769633220U, // <4,3,3,7>: Cost 4 vext3 <1,u,3,4>, <3,3,7,7>
- 2691910044U, // <4,3,3,u>: Cost 3 vext3 <1,2,3,4>, <3,3,3,3>
- 2691910096U, // <4,3,4,0>: Cost 3 vext3 <1,2,3,4>, <3,4,0,1>
- 2691910106U, // <4,3,4,1>: Cost 3 vext3 <1,2,3,4>, <3,4,1,2>
- 2564990741U, // <4,3,4,2>: Cost 3 vext1 <2,4,3,4>, <2,4,3,4>
- 3765651946U, // <4,3,4,3>: Cost 4 vext3 <1,2,3,4>, <3,4,3,0>
- 2691910136U, // <4,3,4,4>: Cost 3 vext3 <1,2,3,4>, <3,4,4,5>
- 2686454274U, // <4,3,4,5>: Cost 3 vext3 <0,3,1,4>, <3,4,5,6>
- 2235640329U, // <4,3,4,6>: Cost 3 vrev <3,4,6,4>
- 3801483792U, // <4,3,4,7>: Cost 4 vext3 <7,2,3,4>, <3,4,7,2>
- 2691910168U, // <4,3,4,u>: Cost 3 vext3 <1,2,3,4>, <3,4,u,1>
- 2559025254U, // <4,3,5,0>: Cost 3 vext1 <1,4,3,5>, LHS
- 2559026237U, // <4,3,5,1>: Cost 3 vext1 <1,4,3,5>, <1,4,3,5>
- 2564998862U, // <4,3,5,2>: Cost 3 vext1 <2,4,3,5>, <2,3,4,5>
- 2570971548U, // <4,3,5,3>: Cost 3 vext1 <3,4,3,5>, <3,3,3,3>
- 2559028534U, // <4,3,5,4>: Cost 3 vext1 <1,4,3,5>, RHS
- 4163519477U, // <4,3,5,5>: Cost 4 vtrnr <0,4,1,5>, <1,3,4,5>
- 3309390346U, // <4,3,5,6>: Cost 4 vrev <3,4,6,5>
- 2706139747U, // <4,3,5,7>: Cost 3 vext3 <3,5,7,4>, <3,5,7,4>
- 2559031086U, // <4,3,5,u>: Cost 3 vext1 <1,4,3,5>, LHS
- 2559033446U, // <4,3,6,0>: Cost 3 vext1 <1,4,3,6>, LHS
- 2559034430U, // <4,3,6,1>: Cost 3 vext1 <1,4,3,6>, <1,4,3,6>
- 2565007127U, // <4,3,6,2>: Cost 3 vext1 <2,4,3,6>, <2,4,3,6>
- 2570979740U, // <4,3,6,3>: Cost 3 vext1 <3,4,3,6>, <3,3,3,3>
- 2559036726U, // <4,3,6,4>: Cost 3 vext1 <1,4,3,6>, RHS
- 1161841154U, // <4,3,6,5>: Cost 2 vrev <3,4,5,6>
- 4028203932U, // <4,3,6,6>: Cost 4 vzipr <0,2,4,6>, <1,2,3,6>
- 2706803380U, // <4,3,6,7>: Cost 3 vext3 <3,6,7,4>, <3,6,7,4>
- 1162062365U, // <4,3,6,u>: Cost 2 vrev <3,4,u,6>
- 3769633475U, // <4,3,7,0>: Cost 4 vext3 <1,u,3,4>, <3,7,0,1>
- 3769633488U, // <4,3,7,1>: Cost 4 vext3 <1,u,3,4>, <3,7,1,5>
- 3638757144U, // <4,3,7,2>: Cost 4 vext1 <2,4,3,7>, <2,4,3,7>
- 3769633508U, // <4,3,7,3>: Cost 4 vext3 <1,u,3,4>, <3,7,3,7>
- 3769633515U, // <4,3,7,4>: Cost 4 vext3 <1,u,3,4>, <3,7,4,5>
- 3769633526U, // <4,3,7,5>: Cost 4 vext3 <1,u,3,4>, <3,7,5,7>
- 3662647932U, // <4,3,7,6>: Cost 4 vext1 <6,4,3,7>, <6,4,3,7>
- 3781208837U, // <4,3,7,7>: Cost 4 vext3 <3,7,7,4>, <3,7,7,4>
- 3769633547U, // <4,3,7,u>: Cost 4 vext3 <1,u,3,4>, <3,7,u,1>
- 2559049830U, // <4,3,u,0>: Cost 3 vext1 <1,4,3,u>, LHS
- 2691910430U, // <4,3,u,1>: Cost 3 vext3 <1,2,3,4>, <3,u,1,2>
- 2565023513U, // <4,3,u,2>: Cost 3 vext1 <2,4,3,u>, <2,4,3,u>
- 2707835698U, // <4,3,u,3>: Cost 3 vext3 <3,u,3,4>, <3,u,3,4>
- 2559053110U, // <4,3,u,4>: Cost 3 vext1 <1,4,3,u>, RHS
- 1161857540U, // <4,3,u,5>: Cost 2 vrev <3,4,5,u>
- 2235673101U, // <4,3,u,6>: Cost 3 vrev <3,4,6,u>
- 2708130646U, // <4,3,u,7>: Cost 3 vext3 <3,u,7,4>, <3,u,7,4>
- 1162078751U, // <4,3,u,u>: Cost 2 vrev <3,4,u,u>
- 2617573416U, // <4,4,0,0>: Cost 3 vext2 <0,0,4,4>, <0,0,4,4>
- 1570373734U, // <4,4,0,1>: Cost 2 vext2 <4,4,4,4>, LHS
- 2779676774U, // <4,4,0,2>: Cost 3 vuzpl <4,6,4,6>, LHS
- 3760196480U, // <4,4,0,3>: Cost 4 vext3 <0,3,1,4>, <4,0,3,1>
- 2576977100U, // <4,4,0,4>: Cost 3 vext1 <4,4,4,0>, <4,4,4,0>
- 2718747538U, // <4,4,0,5>: Cost 3 vext3 <5,6,7,4>, <4,0,5,1>
- 2718747548U, // <4,4,0,6>: Cost 3 vext3 <5,6,7,4>, <4,0,6,2>
- 3668637015U, // <4,4,0,7>: Cost 4 vext1 <7,4,4,0>, <7,4,4,0>
- 1570374301U, // <4,4,0,u>: Cost 2 vext2 <4,4,4,4>, LHS
- 2644116214U, // <4,4,1,0>: Cost 3 vext2 <4,4,4,4>, <1,0,3,2>
- 2644116276U, // <4,4,1,1>: Cost 3 vext2 <4,4,4,4>, <1,1,1,1>
- 2691910602U, // <4,4,1,2>: Cost 3 vext3 <1,2,3,4>, <4,1,2,3>
- 2644116440U, // <4,4,1,3>: Cost 3 vext2 <4,4,4,4>, <1,3,1,3>
- 2711227356U, // <4,4,1,4>: Cost 3 vext3 <4,4,4,4>, <4,1,4,3>
- 2709310438U, // <4,4,1,5>: Cost 3 vext3 <4,1,5,4>, <4,1,5,4>
- 3765652462U, // <4,4,1,6>: Cost 4 vext3 <1,2,3,4>, <4,1,6,3>
- 3768970231U, // <4,4,1,7>: Cost 4 vext3 <1,7,3,4>, <4,1,7,3>
- 2695891968U, // <4,4,1,u>: Cost 3 vext3 <1,u,3,4>, <4,1,u,3>
- 3703260634U, // <4,4,2,0>: Cost 4 vext2 <2,0,4,4>, <2,0,4,4>
- 3765652499U, // <4,4,2,1>: Cost 4 vext3 <1,2,3,4>, <4,2,1,4>
- 2644117096U, // <4,4,2,2>: Cost 3 vext2 <4,4,4,4>, <2,2,2,2>
- 2631509709U, // <4,4,2,3>: Cost 3 vext2 <2,3,4,4>, <2,3,4,4>
- 2644117269U, // <4,4,2,4>: Cost 3 vext2 <4,4,4,4>, <2,4,3,4>
- 3705251698U, // <4,4,2,5>: Cost 4 vext2 <2,3,4,4>, <2,5,4,7>
- 2710047808U, // <4,4,2,6>: Cost 3 vext3 <4,2,6,4>, <4,2,6,4>
- 3783863369U, // <4,4,2,7>: Cost 4 vext3 <4,2,7,4>, <4,2,7,4>
- 2634827874U, // <4,4,2,u>: Cost 3 vext2 <2,u,4,4>, <2,u,4,4>
- 2644117654U, // <4,4,3,0>: Cost 3 vext2 <4,4,4,4>, <3,0,1,2>
- 3638797210U, // <4,4,3,1>: Cost 4 vext1 <2,4,4,3>, <1,2,3,4>
- 3638798082U, // <4,4,3,2>: Cost 4 vext1 <2,4,4,3>, <2,4,1,3>
- 2637482406U, // <4,4,3,3>: Cost 3 vext2 <3,3,4,4>, <3,3,4,4>
- 2638146039U, // <4,4,3,4>: Cost 3 vext2 <3,4,4,4>, <3,4,4,4>
- 3913287374U, // <4,4,3,5>: Cost 4 vuzpr <3,4,5,4>, <2,3,4,5>
- 3765652625U, // <4,4,3,6>: Cost 4 vext3 <1,2,3,4>, <4,3,6,4>
- 3713878762U, // <4,4,3,7>: Cost 4 vext2 <3,7,4,4>, <3,7,4,4>
- 2637482406U, // <4,4,3,u>: Cost 3 vext2 <3,3,4,4>, <3,3,4,4>
- 1503264870U, // <4,4,4,0>: Cost 2 vext1 <4,4,4,4>, LHS
- 2577007514U, // <4,4,4,1>: Cost 3 vext1 <4,4,4,4>, <1,2,3,4>
- 2577008232U, // <4,4,4,2>: Cost 3 vext1 <4,4,4,4>, <2,2,2,2>
- 2571037175U, // <4,4,4,3>: Cost 3 vext1 <3,4,4,4>, <3,4,4,4>
- 161926454U, // <4,4,4,4>: Cost 1 vdup0 RHS
- 1570377014U, // <4,4,4,5>: Cost 2 vext2 <4,4,4,4>, RHS
- 2779680054U, // <4,4,4,6>: Cost 3 vuzpl <4,6,4,6>, RHS
- 2594927963U, // <4,4,4,7>: Cost 3 vext1 <7,4,4,4>, <7,4,4,4>
- 161926454U, // <4,4,4,u>: Cost 1 vdup0 RHS
- 2571042918U, // <4,4,5,0>: Cost 3 vext1 <3,4,4,5>, LHS
- 2571043738U, // <4,4,5,1>: Cost 3 vext1 <3,4,4,5>, <1,2,3,4>
- 3638814495U, // <4,4,5,2>: Cost 4 vext1 <2,4,4,5>, <2,4,4,5>
- 2571045368U, // <4,4,5,3>: Cost 3 vext1 <3,4,4,5>, <3,4,4,5>
- 2571046198U, // <4,4,5,4>: Cost 3 vext1 <3,4,4,5>, RHS
- 1839648054U, // <4,4,5,5>: Cost 2 vzipl RHS, RHS
- 1618169142U, // <4,4,5,6>: Cost 2 vext3 <1,2,3,4>, RHS
- 2594936156U, // <4,4,5,7>: Cost 3 vext1 <7,4,4,5>, <7,4,4,5>
- 1618169160U, // <4,4,5,u>: Cost 2 vext3 <1,2,3,4>, RHS
- 2553135206U, // <4,4,6,0>: Cost 3 vext1 <0,4,4,6>, LHS
- 3626877686U, // <4,4,6,1>: Cost 4 vext1 <0,4,4,6>, <1,0,3,2>
- 2565080782U, // <4,4,6,2>: Cost 3 vext1 <2,4,4,6>, <2,3,4,5>
- 2571053561U, // <4,4,6,3>: Cost 3 vext1 <3,4,4,6>, <3,4,4,6>
- 2553138486U, // <4,4,6,4>: Cost 3 vext1 <0,4,4,6>, RHS
- 2241555675U, // <4,4,6,5>: Cost 3 vrev <4,4,5,6>
- 1973865782U, // <4,4,6,6>: Cost 2 vtrnl RHS, RHS
- 2658055029U, // <4,4,6,7>: Cost 3 vext2 <6,7,4,4>, <6,7,4,4>
- 1973865800U, // <4,4,6,u>: Cost 2 vtrnl RHS, RHS
- 2644120570U, // <4,4,7,0>: Cost 3 vext2 <4,4,4,4>, <7,0,1,2>
- 3638829978U, // <4,4,7,1>: Cost 4 vext1 <2,4,4,7>, <1,2,3,4>
- 3638830881U, // <4,4,7,2>: Cost 4 vext1 <2,4,4,7>, <2,4,4,7>
- 3735115018U, // <4,4,7,3>: Cost 4 vext2 <7,3,4,4>, <7,3,4,4>
- 2662036827U, // <4,4,7,4>: Cost 3 vext2 <7,4,4,4>, <7,4,4,4>
- 2713292236U, // <4,4,7,5>: Cost 3 vext3 <4,7,5,4>, <4,7,5,4>
- 2713365973U, // <4,4,7,6>: Cost 3 vext3 <4,7,6,4>, <4,7,6,4>
- 2644121196U, // <4,4,7,7>: Cost 3 vext2 <4,4,4,4>, <7,7,7,7>
- 2662036827U, // <4,4,7,u>: Cost 3 vext2 <7,4,4,4>, <7,4,4,4>
- 1503297638U, // <4,4,u,0>: Cost 2 vext1 <4,4,4,u>, LHS
- 1570379566U, // <4,4,u,1>: Cost 2 vext2 <4,4,4,4>, LHS
- 2779682606U, // <4,4,u,2>: Cost 3 vuzpl <4,6,4,6>, LHS
- 2571069947U, // <4,4,u,3>: Cost 3 vext1 <3,4,4,u>, <3,4,4,u>
- 161926454U, // <4,4,u,4>: Cost 1 vdup0 RHS
- 1841638710U, // <4,4,u,5>: Cost 2 vzipl RHS, RHS
- 1618169385U, // <4,4,u,6>: Cost 2 vext3 <1,2,3,4>, RHS
- 2594960735U, // <4,4,u,7>: Cost 3 vext1 <7,4,4,u>, <7,4,4,u>
- 161926454U, // <4,4,u,u>: Cost 1 vdup0 RHS
- 2631516160U, // <4,5,0,0>: Cost 3 vext2 <2,3,4,5>, <0,0,0,0>
- 1557774438U, // <4,5,0,1>: Cost 2 vext2 <2,3,4,5>, LHS
- 2618908875U, // <4,5,0,2>: Cost 3 vext2 <0,2,4,5>, <0,2,4,5>
- 2571078140U, // <4,5,0,3>: Cost 3 vext1 <3,4,5,0>, <3,4,5,0>
- 2626871634U, // <4,5,0,4>: Cost 3 vext2 <1,5,4,5>, <0,4,1,5>
- 3705258414U, // <4,5,0,5>: Cost 4 vext2 <2,3,4,5>, <0,5,2,7>
- 2594968438U, // <4,5,0,6>: Cost 3 vext1 <7,4,5,0>, <6,7,4,5>
- 2594968928U, // <4,5,0,7>: Cost 3 vext1 <7,4,5,0>, <7,4,5,0>
- 1557775005U, // <4,5,0,u>: Cost 2 vext2 <2,3,4,5>, LHS
- 2631516918U, // <4,5,1,0>: Cost 3 vext2 <2,3,4,5>, <1,0,3,2>
- 2624217939U, // <4,5,1,1>: Cost 3 vext2 <1,1,4,5>, <1,1,4,5>
- 2631517078U, // <4,5,1,2>: Cost 3 vext2 <2,3,4,5>, <1,2,3,0>
- 2821341286U, // <4,5,1,3>: Cost 3 vuzpr <0,4,1,5>, LHS
- 3895086054U, // <4,5,1,4>: Cost 4 vuzpr <0,4,1,5>, <4,1,5,4>
- 2626872471U, // <4,5,1,5>: Cost 3 vext2 <1,5,4,5>, <1,5,4,5>
- 3895083131U, // <4,5,1,6>: Cost 4 vuzpr <0,4,1,5>, <0,1,4,6>
- 2718748368U, // <4,5,1,7>: Cost 3 vext3 <5,6,7,4>, <5,1,7,3>
- 2821341291U, // <4,5,1,u>: Cost 3 vuzpr <0,4,1,5>, LHS
- 2571092070U, // <4,5,2,0>: Cost 3 vext1 <3,4,5,2>, LHS
- 3699287585U, // <4,5,2,1>: Cost 4 vext2 <1,3,4,5>, <2,1,3,3>
- 2630854269U, // <4,5,2,2>: Cost 3 vext2 <2,2,4,5>, <2,2,4,5>
- 1557776078U, // <4,5,2,3>: Cost 2 vext2 <2,3,4,5>, <2,3,4,5>
- 2631517974U, // <4,5,2,4>: Cost 3 vext2 <2,3,4,5>, <2,4,3,5>
- 3692652384U, // <4,5,2,5>: Cost 4 vext2 <0,2,4,5>, <2,5,2,7>
- 2631518138U, // <4,5,2,6>: Cost 3 vext2 <2,3,4,5>, <2,6,3,7>
- 4164013366U, // <4,5,2,7>: Cost 4 vtrnr <0,4,u,2>, RHS
- 1561094243U, // <4,5,2,u>: Cost 2 vext2 <2,u,4,5>, <2,u,4,5>
- 2631518358U, // <4,5,3,0>: Cost 3 vext2 <2,3,4,5>, <3,0,1,2>
- 3895084710U, // <4,5,3,1>: Cost 4 vuzpr <0,4,1,5>, <2,3,0,1>
- 2631518540U, // <4,5,3,2>: Cost 3 vext2 <2,3,4,5>, <3,2,3,4>
- 2631518620U, // <4,5,3,3>: Cost 3 vext2 <2,3,4,5>, <3,3,3,3>
- 2631518716U, // <4,5,3,4>: Cost 3 vext2 <2,3,4,5>, <3,4,5,0>
- 2631518784U, // <4,5,3,5>: Cost 3 vext2 <2,3,4,5>, <3,5,3,5>
- 2658060980U, // <4,5,3,6>: Cost 3 vext2 <6,7,4,5>, <3,6,7,4>
- 2640145131U, // <4,5,3,7>: Cost 3 vext2 <3,7,4,5>, <3,7,4,5>
- 2631519006U, // <4,5,3,u>: Cost 3 vext2 <2,3,4,5>, <3,u,1,2>
- 2571108454U, // <4,5,4,0>: Cost 3 vext1 <3,4,5,4>, LHS
- 3632907342U, // <4,5,4,1>: Cost 4 vext1 <1,4,5,4>, <1,4,5,4>
- 2571110094U, // <4,5,4,2>: Cost 3 vext1 <3,4,5,4>, <2,3,4,5>
- 2571110912U, // <4,5,4,3>: Cost 3 vext1 <3,4,5,4>, <3,4,5,4>
- 2571111734U, // <4,5,4,4>: Cost 3 vext1 <3,4,5,4>, RHS
- 1557777718U, // <4,5,4,5>: Cost 2 vext2 <2,3,4,5>, RHS
- 2645454195U, // <4,5,4,6>: Cost 3 vext2 <4,6,4,5>, <4,6,4,5>
- 2718748614U, // <4,5,4,7>: Cost 3 vext3 <5,6,7,4>, <5,4,7,6>
- 1557777961U, // <4,5,4,u>: Cost 2 vext2 <2,3,4,5>, RHS
- 1503346790U, // <4,5,5,0>: Cost 2 vext1 <4,4,5,5>, LHS
- 2913398480U, // <4,5,5,1>: Cost 3 vzipl RHS, <5,1,7,3>
- 2631519998U, // <4,5,5,2>: Cost 3 vext2 <2,3,4,5>, <5,2,3,4>
- 2577090710U, // <4,5,5,3>: Cost 3 vext1 <4,4,5,5>, <3,0,1,2>
- 1503349978U, // <4,5,5,4>: Cost 2 vext1 <4,4,5,5>, <4,4,5,5>
- 2631520260U, // <4,5,5,5>: Cost 3 vext2 <2,3,4,5>, <5,5,5,5>
- 2913390690U, // <4,5,5,6>: Cost 3 vzipl RHS, <5,6,7,0>
- 2821344566U, // <4,5,5,7>: Cost 3 vuzpr <0,4,1,5>, RHS
- 1503352622U, // <4,5,5,u>: Cost 2 vext1 <4,4,5,5>, LHS
- 1497383014U, // <4,5,6,0>: Cost 2 vext1 <3,4,5,6>, LHS
- 2559181904U, // <4,5,6,1>: Cost 3 vext1 <1,4,5,6>, <1,4,5,6>
- 2565154601U, // <4,5,6,2>: Cost 3 vext1 <2,4,5,6>, <2,4,5,6>
- 1497385474U, // <4,5,6,3>: Cost 2 vext1 <3,4,5,6>, <3,4,5,6>
- 1497386294U, // <4,5,6,4>: Cost 2 vext1 <3,4,5,6>, RHS
- 3047608324U, // <4,5,6,5>: Cost 3 vtrnl RHS, <5,5,5,5>
- 2571129656U, // <4,5,6,6>: Cost 3 vext1 <3,4,5,6>, <6,6,6,6>
- 27705344U, // <4,5,6,7>: Cost 0 copy RHS
- 27705344U, // <4,5,6,u>: Cost 0 copy RHS
- 2565161062U, // <4,5,7,0>: Cost 3 vext1 <2,4,5,7>, LHS
- 2565161882U, // <4,5,7,1>: Cost 3 vext1 <2,4,5,7>, <1,2,3,4>
- 2565162794U, // <4,5,7,2>: Cost 3 vext1 <2,4,5,7>, <2,4,5,7>
- 2661381387U, // <4,5,7,3>: Cost 3 vext2 <7,3,4,5>, <7,3,4,5>
- 2565164342U, // <4,5,7,4>: Cost 3 vext1 <2,4,5,7>, RHS
- 2718748840U, // <4,5,7,5>: Cost 3 vext3 <5,6,7,4>, <5,7,5,7>
- 2718748846U, // <4,5,7,6>: Cost 3 vext3 <5,6,7,4>, <5,7,6,4>
- 2719412407U, // <4,5,7,7>: Cost 3 vext3 <5,7,7,4>, <5,7,7,4>
- 2565166894U, // <4,5,7,u>: Cost 3 vext1 <2,4,5,7>, LHS
- 1497399398U, // <4,5,u,0>: Cost 2 vext1 <3,4,5,u>, LHS
- 1557780270U, // <4,5,u,1>: Cost 2 vext2 <2,3,4,5>, LHS
- 2631522181U, // <4,5,u,2>: Cost 3 vext2 <2,3,4,5>, <u,2,3,0>
- 1497401860U, // <4,5,u,3>: Cost 2 vext1 <3,4,5,u>, <3,4,5,u>
- 1497402678U, // <4,5,u,4>: Cost 2 vext1 <3,4,5,u>, RHS
- 1557780634U, // <4,5,u,5>: Cost 2 vext2 <2,3,4,5>, RHS
- 2631522512U, // <4,5,u,6>: Cost 3 vext2 <2,3,4,5>, <u,6,3,7>
- 27705344U, // <4,5,u,7>: Cost 0 copy RHS
- 27705344U, // <4,5,u,u>: Cost 0 copy RHS
- 2618916864U, // <4,6,0,0>: Cost 3 vext2 <0,2,4,6>, <0,0,0,0>
- 1545175142U, // <4,6,0,1>: Cost 2 vext2 <0,2,4,6>, LHS
- 1545175244U, // <4,6,0,2>: Cost 2 vext2 <0,2,4,6>, <0,2,4,6>
- 3692658940U, // <4,6,0,3>: Cost 4 vext2 <0,2,4,6>, <0,3,1,0>
- 2618917202U, // <4,6,0,4>: Cost 3 vext2 <0,2,4,6>, <0,4,1,5>
- 3852910806U, // <4,6,0,5>: Cost 4 vuzpl RHS, <0,2,5,7>
- 2253525648U, // <4,6,0,6>: Cost 3 vrev <6,4,6,0>
- 4040764726U, // <4,6,0,7>: Cost 4 vzipr <2,3,4,0>, RHS
- 1545175709U, // <4,6,0,u>: Cost 2 vext2 <0,2,4,6>, LHS
- 2618917622U, // <4,6,1,0>: Cost 3 vext2 <0,2,4,6>, <1,0,3,2>
- 2618917684U, // <4,6,1,1>: Cost 3 vext2 <0,2,4,6>, <1,1,1,1>
- 2618917782U, // <4,6,1,2>: Cost 3 vext2 <0,2,4,6>, <1,2,3,0>
- 2618917848U, // <4,6,1,3>: Cost 3 vext2 <0,2,4,6>, <1,3,1,3>
- 3692659773U, // <4,6,1,4>: Cost 4 vext2 <0,2,4,6>, <1,4,3,5>
- 2618918032U, // <4,6,1,5>: Cost 3 vext2 <0,2,4,6>, <1,5,3,7>
- 3692659937U, // <4,6,1,6>: Cost 4 vext2 <0,2,4,6>, <1,6,3,7>
- 4032146742U, // <4,6,1,7>: Cost 4 vzipr <0,u,4,1>, RHS
- 2618918253U, // <4,6,1,u>: Cost 3 vext2 <0,2,4,6>, <1,u,1,3>
- 2618918380U, // <4,6,2,0>: Cost 3 vext2 <0,2,4,6>, <2,0,6,4>
- 2618918460U, // <4,6,2,1>: Cost 3 vext2 <0,2,4,6>, <2,1,6,3>
- 2618918504U, // <4,6,2,2>: Cost 3 vext2 <0,2,4,6>, <2,2,2,2>
- 2618918566U, // <4,6,2,3>: Cost 3 vext2 <0,2,4,6>, <2,3,0,1>
- 2618918679U, // <4,6,2,4>: Cost 3 vext2 <0,2,4,6>, <2,4,3,6>
- 2618918788U, // <4,6,2,5>: Cost 3 vext2 <0,2,4,6>, <2,5,6,7>
- 2618918842U, // <4,6,2,6>: Cost 3 vext2 <0,2,4,6>, <2,6,3,7>
- 2718749178U, // <4,6,2,7>: Cost 3 vext3 <5,6,7,4>, <6,2,7,3>
- 2618918971U, // <4,6,2,u>: Cost 3 vext2 <0,2,4,6>, <2,u,0,1>
- 2618919062U, // <4,6,3,0>: Cost 3 vext2 <0,2,4,6>, <3,0,1,2>
- 2636171526U, // <4,6,3,1>: Cost 3 vext2 <3,1,4,6>, <3,1,4,6>
- 3692661057U, // <4,6,3,2>: Cost 4 vext2 <0,2,4,6>, <3,2,2,2>
- 2618919324U, // <4,6,3,3>: Cost 3 vext2 <0,2,4,6>, <3,3,3,3>
- 2618919426U, // <4,6,3,4>: Cost 3 vext2 <0,2,4,6>, <3,4,5,6>
- 2638826058U, // <4,6,3,5>: Cost 3 vext2 <3,5,4,6>, <3,5,4,6>
- 3913303030U, // <4,6,3,6>: Cost 4 vuzpr <3,4,5,6>, <1,3,4,6>
- 2722730572U, // <4,6,3,7>: Cost 3 vext3 <6,3,7,4>, <6,3,7,4>
- 2618919710U, // <4,6,3,u>: Cost 3 vext2 <0,2,4,6>, <3,u,1,2>
- 2565210214U, // <4,6,4,0>: Cost 3 vext1 <2,4,6,4>, LHS
- 2718749286U, // <4,6,4,1>: Cost 3 vext3 <5,6,7,4>, <6,4,1,3>
- 2565211952U, // <4,6,4,2>: Cost 3 vext1 <2,4,6,4>, <2,4,6,4>
- 2571184649U, // <4,6,4,3>: Cost 3 vext1 <3,4,6,4>, <3,4,6,4>
- 2565213494U, // <4,6,4,4>: Cost 3 vext1 <2,4,6,4>, RHS
- 1545178422U, // <4,6,4,5>: Cost 2 vext2 <0,2,4,6>, RHS
- 1705430326U, // <4,6,4,6>: Cost 2 vuzpl RHS, RHS
- 2595075437U, // <4,6,4,7>: Cost 3 vext1 <7,4,6,4>, <7,4,6,4>
- 1545178665U, // <4,6,4,u>: Cost 2 vext2 <0,2,4,6>, RHS
- 2565218406U, // <4,6,5,0>: Cost 3 vext1 <2,4,6,5>, LHS
- 2645462736U, // <4,6,5,1>: Cost 3 vext2 <4,6,4,6>, <5,1,7,3>
- 2913399290U, // <4,6,5,2>: Cost 3 vzipl RHS, <6,2,7,3>
- 3913305394U, // <4,6,5,3>: Cost 4 vuzpr <3,4,5,6>, <4,5,6,3>
- 2645462982U, // <4,6,5,4>: Cost 3 vext2 <4,6,4,6>, <5,4,7,6>
- 2779172868U, // <4,6,5,5>: Cost 3 vuzpl RHS, <5,5,5,5>
- 2913391416U, // <4,6,5,6>: Cost 3 vzipl RHS, <6,6,6,6>
- 2821426486U, // <4,6,5,7>: Cost 3 vuzpr <0,4,2,6>, RHS
- 2821426487U, // <4,6,5,u>: Cost 3 vuzpr <0,4,2,6>, RHS
- 1503428710U, // <4,6,6,0>: Cost 2 vext1 <4,4,6,6>, LHS
- 2577171190U, // <4,6,6,1>: Cost 3 vext1 <4,4,6,6>, <1,0,3,2>
- 2645463546U, // <4,6,6,2>: Cost 3 vext2 <4,6,4,6>, <6,2,7,3>
- 2577172630U, // <4,6,6,3>: Cost 3 vext1 <4,4,6,6>, <3,0,1,2>
- 1503431908U, // <4,6,6,4>: Cost 2 vext1 <4,4,6,6>, <4,4,6,6>
- 2253501069U, // <4,6,6,5>: Cost 3 vrev <6,4,5,6>
- 2618921784U, // <4,6,6,6>: Cost 3 vext2 <0,2,4,6>, <6,6,6,6>
- 2954464566U, // <4,6,6,7>: Cost 3 vzipr <0,2,4,6>, RHS
- 1503434542U, // <4,6,6,u>: Cost 2 vext1 <4,4,6,6>, LHS
- 2645464058U, // <4,6,7,0>: Cost 3 vext2 <4,6,4,6>, <7,0,1,2>
- 2779173882U, // <4,6,7,1>: Cost 3 vuzpl RHS, <7,0,1,2>
- 3638978355U, // <4,6,7,2>: Cost 4 vext1 <2,4,6,7>, <2,4,6,7>
- 2725090156U, // <4,6,7,3>: Cost 3 vext3 <6,7,3,4>, <6,7,3,4>
- 2645464422U, // <4,6,7,4>: Cost 3 vext2 <4,6,4,6>, <7,4,5,6>
- 2779174246U, // <4,6,7,5>: Cost 3 vuzpl RHS, <7,4,5,6>
- 3852915914U, // <4,6,7,6>: Cost 4 vuzpl RHS, <7,2,6,3>
- 2779174508U, // <4,6,7,7>: Cost 3 vuzpl RHS, <7,7,7,7>
- 2779173945U, // <4,6,7,u>: Cost 3 vuzpl RHS, <7,0,u,2>
- 1503445094U, // <4,6,u,0>: Cost 2 vext1 <4,4,6,u>, LHS
- 1545180974U, // <4,6,u,1>: Cost 2 vext2 <0,2,4,6>, LHS
- 1705432878U, // <4,6,u,2>: Cost 2 vuzpl RHS, LHS
- 2618922940U, // <4,6,u,3>: Cost 3 vext2 <0,2,4,6>, <u,3,0,1>
- 1503448294U, // <4,6,u,4>: Cost 2 vext1 <4,4,6,u>, <4,4,6,u>
- 1545181338U, // <4,6,u,5>: Cost 2 vext2 <0,2,4,6>, RHS
- 1705433242U, // <4,6,u,6>: Cost 2 vuzpl RHS, RHS
- 2954480950U, // <4,6,u,7>: Cost 3 vzipr <0,2,4,u>, RHS
- 1545181541U, // <4,6,u,u>: Cost 2 vext2 <0,2,4,6>, LHS
- 3706601472U, // <4,7,0,0>: Cost 4 vext2 <2,5,4,7>, <0,0,0,0>
- 2632859750U, // <4,7,0,1>: Cost 3 vext2 <2,5,4,7>, LHS
- 2726343685U, // <4,7,0,2>: Cost 3 vext3 <7,0,2,4>, <7,0,2,4>
- 3701293312U, // <4,7,0,3>: Cost 4 vext2 <1,6,4,7>, <0,3,1,4>
- 3706601810U, // <4,7,0,4>: Cost 4 vext2 <2,5,4,7>, <0,4,1,5>
- 2259424608U, // <4,7,0,5>: Cost 3 vrev <7,4,5,0>
- 3695321617U, // <4,7,0,6>: Cost 4 vext2 <0,6,4,7>, <0,6,4,7>
- 3800454194U, // <4,7,0,7>: Cost 4 vext3 <7,0,7,4>, <7,0,7,4>
- 2632860317U, // <4,7,0,u>: Cost 3 vext2 <2,5,4,7>, LHS
- 2259064116U, // <4,7,1,0>: Cost 3 vrev <7,4,0,1>
- 3700630324U, // <4,7,1,1>: Cost 4 vext2 <1,5,4,7>, <1,1,1,1>
- 2632860570U, // <4,7,1,2>: Cost 3 vext2 <2,5,4,7>, <1,2,3,4>
- 3769635936U, // <4,7,1,3>: Cost 4 vext3 <1,u,3,4>, <7,1,3,5>
- 3656920374U, // <4,7,1,4>: Cost 4 vext1 <5,4,7,1>, RHS
- 3700630681U, // <4,7,1,5>: Cost 4 vext2 <1,5,4,7>, <1,5,4,7>
- 3701294314U, // <4,7,1,6>: Cost 4 vext2 <1,6,4,7>, <1,6,4,7>
- 3793818754U, // <4,7,1,7>: Cost 4 vext3 <5,u,7,4>, <7,1,7,3>
- 2259654012U, // <4,7,1,u>: Cost 3 vrev <7,4,u,1>
- 3656925286U, // <4,7,2,0>: Cost 4 vext1 <5,4,7,2>, LHS
- 3706603050U, // <4,7,2,1>: Cost 4 vext2 <2,5,4,7>, <2,1,4,3>
- 3706603112U, // <4,7,2,2>: Cost 4 vext2 <2,5,4,7>, <2,2,2,2>
- 2727744688U, // <4,7,2,3>: Cost 3 vext3 <7,2,3,4>, <7,2,3,4>
- 3705939745U, // <4,7,2,4>: Cost 4 vext2 <2,4,4,7>, <2,4,4,7>
- 2632861554U, // <4,7,2,5>: Cost 3 vext2 <2,5,4,7>, <2,5,4,7>
- 3706603450U, // <4,7,2,6>: Cost 4 vext2 <2,5,4,7>, <2,6,3,7>
- 3792491731U, // <4,7,2,7>: Cost 4 vext3 <5,6,7,4>, <7,2,7,3>
- 2634852453U, // <4,7,2,u>: Cost 3 vext2 <2,u,4,7>, <2,u,4,7>
- 3706603670U, // <4,7,3,0>: Cost 4 vext2 <2,5,4,7>, <3,0,1,2>
- 3662906266U, // <4,7,3,1>: Cost 4 vext1 <6,4,7,3>, <1,2,3,4>
- 3725183326U, // <4,7,3,2>: Cost 4 vext2 <5,6,4,7>, <3,2,5,4>
- 3706603932U, // <4,7,3,3>: Cost 4 vext2 <2,5,4,7>, <3,3,3,3>
- 3701295618U, // <4,7,3,4>: Cost 4 vext2 <1,6,4,7>, <3,4,5,6>
- 2638834251U, // <4,7,3,5>: Cost 3 vext2 <3,5,4,7>, <3,5,4,7>
- 2639497884U, // <4,7,3,6>: Cost 3 vext2 <3,6,4,7>, <3,6,4,7>
- 3802445093U, // <4,7,3,7>: Cost 4 vext3 <7,3,7,4>, <7,3,7,4>
- 2640825150U, // <4,7,3,u>: Cost 3 vext2 <3,u,4,7>, <3,u,4,7>
- 2718750004U, // <4,7,4,0>: Cost 3 vext3 <5,6,7,4>, <7,4,0,1>
- 3706604490U, // <4,7,4,1>: Cost 4 vext2 <2,5,4,7>, <4,1,2,3>
- 3656943474U, // <4,7,4,2>: Cost 4 vext1 <5,4,7,4>, <2,5,4,7>
- 3779884371U, // <4,7,4,3>: Cost 4 vext3 <3,5,7,4>, <7,4,3,5>
- 2259383643U, // <4,7,4,4>: Cost 3 vrev <7,4,4,4>
- 2632863030U, // <4,7,4,5>: Cost 3 vext2 <2,5,4,7>, RHS
- 2259531117U, // <4,7,4,6>: Cost 3 vrev <7,4,6,4>
- 3907340074U, // <4,7,4,7>: Cost 4 vuzpr <2,4,5,7>, <2,4,5,7>
- 2632863273U, // <4,7,4,u>: Cost 3 vext2 <2,5,4,7>, RHS
- 2913391610U, // <4,7,5,0>: Cost 3 vzipl RHS, <7,0,1,2>
- 3645006848U, // <4,7,5,1>: Cost 4 vext1 <3,4,7,5>, <1,3,5,7>
- 2589181646U, // <4,7,5,2>: Cost 3 vext1 <6,4,7,5>, <2,3,4,5>
- 3645008403U, // <4,7,5,3>: Cost 4 vext1 <3,4,7,5>, <3,4,7,5>
- 2913391974U, // <4,7,5,4>: Cost 3 vzipl RHS, <7,4,5,6>
- 2583211973U, // <4,7,5,5>: Cost 3 vext1 <5,4,7,5>, <5,4,7,5>
- 2589184670U, // <4,7,5,6>: Cost 3 vext1 <6,4,7,5>, <6,4,7,5>
- 2913392236U, // <4,7,5,7>: Cost 3 vzipl RHS, <7,7,7,7>
- 2913392258U, // <4,7,5,u>: Cost 3 vzipl RHS, <7,u,1,2>
- 1509474406U, // <4,7,6,0>: Cost 2 vext1 <5,4,7,6>, LHS
- 3047609338U, // <4,7,6,1>: Cost 3 vtrnl RHS, <7,0,1,2>
- 2583217768U, // <4,7,6,2>: Cost 3 vext1 <5,4,7,6>, <2,2,2,2>
- 2583218326U, // <4,7,6,3>: Cost 3 vext1 <5,4,7,6>, <3,0,1,2>
- 1509477686U, // <4,7,6,4>: Cost 2 vext1 <5,4,7,6>, RHS
- 1509478342U, // <4,7,6,5>: Cost 2 vext1 <5,4,7,6>, <5,4,7,6>
- 2583220730U, // <4,7,6,6>: Cost 3 vext1 <5,4,7,6>, <6,2,7,3>
- 3047609964U, // <4,7,6,7>: Cost 3 vtrnl RHS, <7,7,7,7>
- 1509480238U, // <4,7,6,u>: Cost 2 vext1 <5,4,7,6>, LHS
- 3650994278U, // <4,7,7,0>: Cost 4 vext1 <4,4,7,7>, LHS
- 3650995098U, // <4,7,7,1>: Cost 4 vext1 <4,4,7,7>, <1,2,3,4>
- 3650996010U, // <4,7,7,2>: Cost 4 vext1 <4,4,7,7>, <2,4,5,7>
- 3804804677U, // <4,7,7,3>: Cost 4 vext3 <7,7,3,4>, <7,7,3,4>
- 3650997486U, // <4,7,7,4>: Cost 4 vext1 <4,4,7,7>, <4,4,7,7>
- 2662725039U, // <4,7,7,5>: Cost 3 vext2 <7,5,4,7>, <7,5,4,7>
- 3662942880U, // <4,7,7,6>: Cost 4 vext1 <6,4,7,7>, <6,4,7,7>
- 2718750316U, // <4,7,7,7>: Cost 3 vext3 <5,6,7,4>, <7,7,7,7>
- 2664715938U, // <4,7,7,u>: Cost 3 vext2 <7,u,4,7>, <7,u,4,7>
- 1509490790U, // <4,7,u,0>: Cost 2 vext1 <5,4,7,u>, LHS
- 2632865582U, // <4,7,u,1>: Cost 3 vext2 <2,5,4,7>, LHS
- 2583234152U, // <4,7,u,2>: Cost 3 vext1 <5,4,7,u>, <2,2,2,2>
- 2583234710U, // <4,7,u,3>: Cost 3 vext1 <5,4,7,u>, <3,0,1,2>
- 1509494070U, // <4,7,u,4>: Cost 2 vext1 <5,4,7,u>, RHS
- 1509494728U, // <4,7,u,5>: Cost 2 vext1 <5,4,7,u>, <5,4,7,u>
- 2583237114U, // <4,7,u,6>: Cost 3 vext1 <5,4,7,u>, <6,2,7,3>
- 3047757420U, // <4,7,u,7>: Cost 3 vtrnl RHS, <7,7,7,7>
- 1509496622U, // <4,7,u,u>: Cost 2 vext1 <5,4,7,u>, LHS
- 2618933248U, // <4,u,0,0>: Cost 3 vext2 <0,2,4,u>, <0,0,0,0>
- 1545191526U, // <4,u,0,1>: Cost 2 vext2 <0,2,4,u>, LHS
- 1545191630U, // <4,u,0,2>: Cost 2 vext2 <0,2,4,u>, <0,2,4,u>
- 2691913445U, // <4,u,0,3>: Cost 3 vext3 <1,2,3,4>, <u,0,3,2>
- 2618933586U, // <4,u,0,4>: Cost 3 vext2 <0,2,4,u>, <0,4,1,5>
- 2265397305U, // <4,u,0,5>: Cost 3 vrev <u,4,5,0>
- 2595189625U, // <4,u,0,6>: Cost 3 vext1 <7,4,u,0>, <6,7,4,u>
- 2595190139U, // <4,u,0,7>: Cost 3 vext1 <7,4,u,0>, <7,4,u,0>
- 1545192093U, // <4,u,0,u>: Cost 2 vext2 <0,2,4,u>, LHS
- 2618934006U, // <4,u,1,0>: Cost 3 vext2 <0,2,4,u>, <1,0,3,2>
- 2618934068U, // <4,u,1,1>: Cost 3 vext2 <0,2,4,u>, <1,1,1,1>
- 1618171694U, // <4,u,1,2>: Cost 2 vext3 <1,2,3,4>, LHS
- 2618934232U, // <4,u,1,3>: Cost 3 vext2 <0,2,4,u>, <1,3,1,3>
- 2695894848U, // <4,u,1,4>: Cost 3 vext3 <1,u,3,4>, <u,1,4,3>
- 2618934416U, // <4,u,1,5>: Cost 3 vext2 <0,2,4,u>, <1,5,3,7>
- 3692676321U, // <4,u,1,6>: Cost 4 vext2 <0,2,4,u>, <1,6,3,7>
- 2718750555U, // <4,u,1,7>: Cost 3 vext3 <5,6,7,4>, <u,1,7,3>
- 1618171748U, // <4,u,1,u>: Cost 2 vext3 <1,2,3,4>, LHS
- 2553397350U, // <4,u,2,0>: Cost 3 vext1 <0,4,u,2>, LHS
- 2630215215U, // <4,u,2,1>: Cost 3 vext2 <2,1,4,u>, <2,1,4,u>
- 2618934888U, // <4,u,2,2>: Cost 3 vext2 <0,2,4,u>, <2,2,2,2>
- 1557800657U, // <4,u,2,3>: Cost 2 vext2 <2,3,4,u>, <2,3,4,u>
- 2618935065U, // <4,u,2,4>: Cost 3 vext2 <0,2,4,u>, <2,4,3,u>
- 2733864859U, // <4,u,2,5>: Cost 3 vext3 <u,2,5,4>, <u,2,5,4>
- 2618935226U, // <4,u,2,6>: Cost 3 vext2 <0,2,4,u>, <2,6,3,7>
- 2718750636U, // <4,u,2,7>: Cost 3 vext3 <5,6,7,4>, <u,2,7,3>
- 1561118822U, // <4,u,2,u>: Cost 2 vext2 <2,u,4,u>, <2,u,4,u>
- 2618935446U, // <4,u,3,0>: Cost 3 vext2 <0,2,4,u>, <3,0,1,2>
- 2779318422U, // <4,u,3,1>: Cost 3 vuzpl RHS, <3,0,1,2>
- 2636851545U, // <4,u,3,2>: Cost 3 vext2 <3,2,4,u>, <3,2,4,u>
- 2618935708U, // <4,u,3,3>: Cost 3 vext2 <0,2,4,u>, <3,3,3,3>
- 2618935810U, // <4,u,3,4>: Cost 3 vext2 <0,2,4,u>, <3,4,5,6>
- 2691913711U, // <4,u,3,5>: Cost 3 vext3 <1,2,3,4>, <u,3,5,7>
- 2588725862U, // <4,u,3,6>: Cost 3 vext1 <6,4,1,3>, <6,4,1,3>
- 2640169710U, // <4,u,3,7>: Cost 3 vext2 <3,7,4,u>, <3,7,4,u>
- 2618936094U, // <4,u,3,u>: Cost 3 vext2 <0,2,4,u>, <3,u,1,2>
- 1503559782U, // <4,u,4,0>: Cost 2 vext1 <4,4,u,4>, LHS
- 2692282391U, // <4,u,4,1>: Cost 3 vext3 <1,2,u,4>, <u,4,1,2>
- 2565359426U, // <4,u,4,2>: Cost 3 vext1 <2,4,u,4>, <2,4,u,4>
- 2571332123U, // <4,u,4,3>: Cost 3 vext1 <3,4,u,4>, <3,4,u,4>
- 161926454U, // <4,u,4,4>: Cost 1 vdup0 RHS
- 1545194806U, // <4,u,4,5>: Cost 2 vext2 <0,2,4,u>, RHS
- 1705577782U, // <4,u,4,6>: Cost 2 vuzpl RHS, RHS
- 2718750801U, // <4,u,4,7>: Cost 3 vext3 <5,6,7,4>, <u,4,7,6>
- 161926454U, // <4,u,4,u>: Cost 1 vdup0 RHS
- 1479164006U, // <4,u,5,0>: Cost 2 vext1 <0,4,1,5>, LHS
- 1839650606U, // <4,u,5,1>: Cost 2 vzipl RHS, LHS
- 2565367502U, // <4,u,5,2>: Cost 3 vext1 <2,4,u,5>, <2,3,4,5>
- 3089777309U, // <4,u,5,3>: Cost 3 vtrnr <0,4,1,5>, LHS
- 1479167286U, // <4,u,5,4>: Cost 2 vext1 <0,4,1,5>, RHS
- 1839650970U, // <4,u,5,5>: Cost 2 vzipl RHS, RHS
- 1618172058U, // <4,u,5,6>: Cost 2 vext3 <1,2,3,4>, RHS
- 3089780265U, // <4,u,5,7>: Cost 3 vtrnr <0,4,1,5>, RHS
- 1618172076U, // <4,u,5,u>: Cost 2 vext3 <1,2,3,4>, RHS
- 1479688294U, // <4,u,6,0>: Cost 2 vext1 <0,4,u,6>, LHS
- 2553430774U, // <4,u,6,1>: Cost 3 vext1 <0,4,u,6>, <1,0,3,2>
- 1973868334U, // <4,u,6,2>: Cost 2 vtrnl RHS, LHS
- 1497606685U, // <4,u,6,3>: Cost 2 vext1 <3,4,u,6>, <3,4,u,6>
- 1479691574U, // <4,u,6,4>: Cost 2 vext1 <0,4,u,6>, RHS
- 1509552079U, // <4,u,6,5>: Cost 2 vext1 <5,4,u,6>, <5,4,u,6>
- 1973868698U, // <4,u,6,6>: Cost 2 vtrnl RHS, RHS
- 27705344U, // <4,u,6,7>: Cost 0 copy RHS
- 27705344U, // <4,u,6,u>: Cost 0 copy RHS
- 2565382246U, // <4,u,7,0>: Cost 3 vext1 <2,4,u,7>, LHS
- 2565383066U, // <4,u,7,1>: Cost 3 vext1 <2,4,u,7>, <1,2,3,4>
- 2565384005U, // <4,u,7,2>: Cost 3 vext1 <2,4,u,7>, <2,4,u,7>
- 2661405966U, // <4,u,7,3>: Cost 3 vext2 <7,3,4,u>, <7,3,4,u>
- 2565385526U, // <4,u,7,4>: Cost 3 vext1 <2,4,u,7>, RHS
- 2779321702U, // <4,u,7,5>: Cost 3 vuzpl RHS, <7,4,5,6>
- 2589274793U, // <4,u,7,6>: Cost 3 vext1 <6,4,u,7>, <6,4,u,7>
- 2779321964U, // <4,u,7,7>: Cost 3 vuzpl RHS, <7,7,7,7>
- 2565388078U, // <4,u,7,u>: Cost 3 vext1 <2,4,u,7>, LHS
- 1479704678U, // <4,u,u,0>: Cost 2 vext1 <0,4,u,u>, LHS
- 1545197358U, // <4,u,u,1>: Cost 2 vext2 <0,2,4,u>, LHS
- 1618172261U, // <4,u,u,2>: Cost 2 vext3 <1,2,3,4>, LHS
- 1497623071U, // <4,u,u,3>: Cost 2 vext1 <3,4,u,u>, <3,4,u,u>
- 161926454U, // <4,u,u,4>: Cost 1 vdup0 RHS
- 1545197722U, // <4,u,u,5>: Cost 2 vext2 <0,2,4,u>, RHS
- 1618172301U, // <4,u,u,6>: Cost 2 vext3 <1,2,3,4>, RHS
- 27705344U, // <4,u,u,7>: Cost 0 copy RHS
- 27705344U, // <4,u,u,u>: Cost 0 copy RHS
- 2687123456U, // <5,0,0,0>: Cost 3 vext3 <0,4,1,5>, <0,0,0,0>
- 2687123466U, // <5,0,0,1>: Cost 3 vext3 <0,4,1,5>, <0,0,1,1>
- 2687123476U, // <5,0,0,2>: Cost 3 vext3 <0,4,1,5>, <0,0,2,2>
- 3710599434U, // <5,0,0,3>: Cost 4 vext2 <3,2,5,0>, <0,3,2,5>
- 2642166098U, // <5,0,0,4>: Cost 3 vext2 <4,1,5,0>, <0,4,1,5>
- 3657060306U, // <5,0,0,5>: Cost 4 vext1 <5,5,0,0>, <5,5,0,0>
- 3292094923U, // <5,0,0,6>: Cost 4 vrev <0,5,6,0>
- 3669005700U, // <5,0,0,7>: Cost 4 vext1 <7,5,0,0>, <7,5,0,0>
- 2687123530U, // <5,0,0,u>: Cost 3 vext3 <0,4,1,5>, <0,0,u,2>
- 2559434854U, // <5,0,1,0>: Cost 3 vext1 <1,5,0,1>, LHS
- 2559435887U, // <5,0,1,1>: Cost 3 vext1 <1,5,0,1>, <1,5,0,1>
- 1613381734U, // <5,0,1,2>: Cost 2 vext3 <0,4,1,5>, LHS
- 3698656256U, // <5,0,1,3>: Cost 4 vext2 <1,2,5,0>, <1,3,5,7>
- 2559438134U, // <5,0,1,4>: Cost 3 vext1 <1,5,0,1>, RHS
- 2583326675U, // <5,0,1,5>: Cost 3 vext1 <5,5,0,1>, <5,5,0,1>
- 3715908851U, // <5,0,1,6>: Cost 4 vext2 <4,1,5,0>, <1,6,5,7>
- 3657069562U, // <5,0,1,7>: Cost 4 vext1 <5,5,0,1>, <7,0,1,2>
- 1613381788U, // <5,0,1,u>: Cost 2 vext3 <0,4,1,5>, LHS
- 2686017700U, // <5,0,2,0>: Cost 3 vext3 <0,2,4,5>, <0,2,0,2>
- 2685796528U, // <5,0,2,1>: Cost 3 vext3 <0,2,1,5>, <0,2,1,5>
- 2698625208U, // <5,0,2,2>: Cost 3 vext3 <2,3,4,5>, <0,2,2,4>
- 2685944002U, // <5,0,2,3>: Cost 3 vext3 <0,2,3,5>, <0,2,3,5>
- 2686017739U, // <5,0,2,4>: Cost 3 vext3 <0,2,4,5>, <0,2,4,5>
- 2686091476U, // <5,0,2,5>: Cost 3 vext3 <0,2,5,5>, <0,2,5,5>
- 2725167324U, // <5,0,2,6>: Cost 3 vext3 <6,7,4,5>, <0,2,6,4>
- 2595280230U, // <5,0,2,7>: Cost 3 vext1 <7,5,0,2>, <7,4,5,6>
- 2686312687U, // <5,0,2,u>: Cost 3 vext3 <0,2,u,5>, <0,2,u,5>
- 3760128248U, // <5,0,3,0>: Cost 4 vext3 <0,3,0,5>, <0,3,0,5>
- 3759685888U, // <5,0,3,1>: Cost 4 vext3 <0,2,3,5>, <0,3,1,4>
- 2686533898U, // <5,0,3,2>: Cost 3 vext3 <0,3,2,5>, <0,3,2,5>
- 3760349459U, // <5,0,3,3>: Cost 4 vext3 <0,3,3,5>, <0,3,3,5>
- 2638187004U, // <5,0,3,4>: Cost 3 vext2 <3,4,5,0>, <3,4,5,0>
- 3776348452U, // <5,0,3,5>: Cost 4 vext3 <3,0,4,5>, <0,3,5,4>
- 3713256094U, // <5,0,3,6>: Cost 4 vext2 <3,6,5,0>, <3,6,5,0>
- 3914064896U, // <5,0,3,7>: Cost 4 vuzpr <3,5,7,0>, <1,3,5,7>
- 2686976320U, // <5,0,3,u>: Cost 3 vext3 <0,3,u,5>, <0,3,u,5>
- 2559459430U, // <5,0,4,0>: Cost 3 vext1 <1,5,0,4>, LHS
- 1613381970U, // <5,0,4,1>: Cost 2 vext3 <0,4,1,5>, <0,4,1,5>
- 2687123804U, // <5,0,4,2>: Cost 3 vext3 <0,4,1,5>, <0,4,2,6>
- 3761013092U, // <5,0,4,3>: Cost 4 vext3 <0,4,3,5>, <0,4,3,5>
- 2559462710U, // <5,0,4,4>: Cost 3 vext1 <1,5,0,4>, RHS
- 2638187830U, // <5,0,4,5>: Cost 3 vext2 <3,4,5,0>, RHS
- 3761234303U, // <5,0,4,6>: Cost 4 vext3 <0,4,6,5>, <0,4,6,5>
- 2646150600U, // <5,0,4,7>: Cost 3 vext2 <4,7,5,0>, <4,7,5,0>
- 1613381970U, // <5,0,4,u>: Cost 2 vext3 <0,4,1,5>, <0,4,1,5>
- 3766763926U, // <5,0,5,0>: Cost 4 vext3 <1,4,0,5>, <0,5,0,1>
- 2919268454U, // <5,0,5,1>: Cost 3 vzipl <5,5,5,5>, LHS
- 3053486182U, // <5,0,5,2>: Cost 3 vtrnl <5,5,5,5>, LHS
- 3723210589U, // <5,0,5,3>: Cost 4 vext2 <5,3,5,0>, <5,3,5,0>
- 3766763966U, // <5,0,5,4>: Cost 4 vext3 <1,4,0,5>, <0,5,4,5>
- 2650796031U, // <5,0,5,5>: Cost 3 vext2 <5,5,5,0>, <5,5,5,0>
- 3719893090U, // <5,0,5,6>: Cost 4 vext2 <4,7,5,0>, <5,6,7,0>
- 3914067254U, // <5,0,5,7>: Cost 4 vuzpr <3,5,7,0>, RHS
- 2919269021U, // <5,0,5,u>: Cost 3 vzipl <5,5,5,5>, LHS
- 4047519744U, // <5,0,6,0>: Cost 4 vzipr <3,4,5,6>, <0,0,0,0>
- 2920038502U, // <5,0,6,1>: Cost 3 vzipl <5,6,7,0>, LHS
- 3759759871U, // <5,0,6,2>: Cost 4 vext3 <0,2,4,5>, <0,6,2,7>
- 3645164070U, // <5,0,6,3>: Cost 4 vext1 <3,5,0,6>, <3,5,0,6>
- 3762414095U, // <5,0,6,4>: Cost 4 vext3 <0,6,4,5>, <0,6,4,5>
- 3993780690U, // <5,0,6,5>: Cost 4 vzipl <5,6,7,0>, <0,5,6,7>
- 3719893816U, // <5,0,6,6>: Cost 4 vext2 <4,7,5,0>, <6,6,6,6>
- 2662077302U, // <5,0,6,7>: Cost 3 vext2 <7,4,5,0>, <6,7,4,5>
- 2920039069U, // <5,0,6,u>: Cost 3 vzipl <5,6,7,0>, LHS
- 2565455974U, // <5,0,7,0>: Cost 3 vext1 <2,5,0,7>, LHS
- 2565456790U, // <5,0,7,1>: Cost 3 vext1 <2,5,0,7>, <1,2,3,0>
- 2565457742U, // <5,0,7,2>: Cost 3 vext1 <2,5,0,7>, <2,5,0,7>
- 3639199894U, // <5,0,7,3>: Cost 4 vext1 <2,5,0,7>, <3,0,1,2>
- 2565459254U, // <5,0,7,4>: Cost 3 vext1 <2,5,0,7>, RHS
- 2589347938U, // <5,0,7,5>: Cost 3 vext1 <6,5,0,7>, <5,6,7,0>
- 2589348530U, // <5,0,7,6>: Cost 3 vext1 <6,5,0,7>, <6,5,0,7>
- 4188456422U, // <5,0,7,7>: Cost 4 vtrnr RHS, <2,0,5,7>
- 2565461806U, // <5,0,7,u>: Cost 3 vext1 <2,5,0,7>, LHS
- 2687124106U, // <5,0,u,0>: Cost 3 vext3 <0,4,1,5>, <0,u,0,2>
- 1616036502U, // <5,0,u,1>: Cost 2 vext3 <0,u,1,5>, <0,u,1,5>
- 1613382301U, // <5,0,u,2>: Cost 2 vext3 <0,4,1,5>, LHS
- 2689925800U, // <5,0,u,3>: Cost 3 vext3 <0,u,3,5>, <0,u,3,5>
- 2687124146U, // <5,0,u,4>: Cost 3 vext3 <0,4,1,5>, <0,u,4,6>
- 2638190746U, // <5,0,u,5>: Cost 3 vext2 <3,4,5,0>, RHS
- 2589356723U, // <5,0,u,6>: Cost 3 vext1 <6,5,0,u>, <6,5,0,u>
- 2595280230U, // <5,0,u,7>: Cost 3 vext1 <7,5,0,2>, <7,4,5,6>
- 1613382355U, // <5,0,u,u>: Cost 2 vext3 <0,4,1,5>, LHS
- 2646818816U, // <5,1,0,0>: Cost 3 vext2 <4,u,5,1>, <0,0,0,0>
- 1573077094U, // <5,1,0,1>: Cost 2 vext2 <4,u,5,1>, LHS
- 2646818980U, // <5,1,0,2>: Cost 3 vext2 <4,u,5,1>, <0,2,0,2>
- 2687124214U, // <5,1,0,3>: Cost 3 vext3 <0,4,1,5>, <1,0,3,2>
- 2641510738U, // <5,1,0,4>: Cost 3 vext2 <4,0,5,1>, <0,4,1,5>
- 2641510814U, // <5,1,0,5>: Cost 3 vext2 <4,0,5,1>, <0,5,1,0>
- 3720561142U, // <5,1,0,6>: Cost 4 vext2 <4,u,5,1>, <0,6,1,7>
- 3298141357U, // <5,1,0,7>: Cost 4 vrev <1,5,7,0>
- 1573077661U, // <5,1,0,u>: Cost 2 vext2 <4,u,5,1>, LHS
- 2223891567U, // <5,1,1,0>: Cost 3 vrev <1,5,0,1>
- 2687124276U, // <5,1,1,1>: Cost 3 vext3 <0,4,1,5>, <1,1,1,1>
- 2646819734U, // <5,1,1,2>: Cost 3 vext2 <4,u,5,1>, <1,2,3,0>
- 2687124296U, // <5,1,1,3>: Cost 3 vext3 <0,4,1,5>, <1,1,3,3>
- 2691326803U, // <5,1,1,4>: Cost 3 vext3 <1,1,4,5>, <1,1,4,5>
- 2691400540U, // <5,1,1,5>: Cost 3 vext3 <1,1,5,5>, <1,1,5,5>
- 3765216101U, // <5,1,1,6>: Cost 4 vext3 <1,1,6,5>, <1,1,6,5>
- 3765289838U, // <5,1,1,7>: Cost 4 vext3 <1,1,7,5>, <1,1,7,5>
- 2687124341U, // <5,1,1,u>: Cost 3 vext3 <0,4,1,5>, <1,1,u,3>
- 3297641584U, // <5,1,2,0>: Cost 4 vrev <1,5,0,2>
- 3763520391U, // <5,1,2,1>: Cost 4 vext3 <0,u,1,5>, <1,2,1,3>
- 2646820456U, // <5,1,2,2>: Cost 3 vext2 <4,u,5,1>, <2,2,2,2>
- 2687124374U, // <5,1,2,3>: Cost 3 vext3 <0,4,1,5>, <1,2,3,0>
- 2691990436U, // <5,1,2,4>: Cost 3 vext3 <1,2,4,5>, <1,2,4,5>
- 2687124395U, // <5,1,2,5>: Cost 3 vext3 <0,4,1,5>, <1,2,5,3>
- 2646820794U, // <5,1,2,6>: Cost 3 vext2 <4,u,5,1>, <2,6,3,7>
- 3808199610U, // <5,1,2,7>: Cost 4 vext3 <u,3,4,5>, <1,2,7,0>
- 2687124419U, // <5,1,2,u>: Cost 3 vext3 <0,4,1,5>, <1,2,u,0>
- 2577440870U, // <5,1,3,0>: Cost 3 vext1 <4,5,1,3>, LHS
- 2687124440U, // <5,1,3,1>: Cost 3 vext3 <0,4,1,5>, <1,3,1,3>
- 3759686627U, // <5,1,3,2>: Cost 4 vext3 <0,2,3,5>, <1,3,2,5>
- 2692580332U, // <5,1,3,3>: Cost 3 vext3 <1,3,3,5>, <1,3,3,5>
- 2687124469U, // <5,1,3,4>: Cost 3 vext3 <0,4,1,5>, <1,3,4,5>
- 2685207552U, // <5,1,3,5>: Cost 3 vext3 <0,1,2,5>, <1,3,5,7>
- 3760866313U, // <5,1,3,6>: Cost 4 vext3 <0,4,1,5>, <1,3,6,7>
- 2692875280U, // <5,1,3,7>: Cost 3 vext3 <1,3,7,5>, <1,3,7,5>
- 2687124503U, // <5,1,3,u>: Cost 3 vext3 <0,4,1,5>, <1,3,u,3>
- 1567771538U, // <5,1,4,0>: Cost 2 vext2 <4,0,5,1>, <4,0,5,1>
- 2693096491U, // <5,1,4,1>: Cost 3 vext3 <1,4,1,5>, <1,4,1,5>
- 2693170228U, // <5,1,4,2>: Cost 3 vext3 <1,4,2,5>, <1,4,2,5>
- 2687124541U, // <5,1,4,3>: Cost 3 vext3 <0,4,1,5>, <1,4,3,5>
- 2646822096U, // <5,1,4,4>: Cost 3 vext2 <4,u,5,1>, <4,4,4,4>
- 1573080374U, // <5,1,4,5>: Cost 2 vext2 <4,u,5,1>, RHS
- 2646822260U, // <5,1,4,6>: Cost 3 vext2 <4,u,5,1>, <4,6,4,6>
- 3298174129U, // <5,1,4,7>: Cost 4 vrev <1,5,7,4>
- 1573080602U, // <5,1,4,u>: Cost 2 vext2 <4,u,5,1>, <4,u,5,1>
- 2687124591U, // <5,1,5,0>: Cost 3 vext3 <0,4,1,5>, <1,5,0,1>
- 2646822543U, // <5,1,5,1>: Cost 3 vext2 <4,u,5,1>, <5,1,0,1>
- 3760866433U, // <5,1,5,2>: Cost 4 vext3 <0,4,1,5>, <1,5,2,1>
- 2687124624U, // <5,1,5,3>: Cost 3 vext3 <0,4,1,5>, <1,5,3,7>
- 2687124631U, // <5,1,5,4>: Cost 3 vext3 <0,4,1,5>, <1,5,4,5>
- 2646822916U, // <5,1,5,5>: Cost 3 vext2 <4,u,5,1>, <5,5,5,5>
- 2646823010U, // <5,1,5,6>: Cost 3 vext2 <4,u,5,1>, <5,6,7,0>
- 2646823080U, // <5,1,5,7>: Cost 3 vext2 <4,u,5,1>, <5,7,5,7>
- 2687124663U, // <5,1,5,u>: Cost 3 vext3 <0,4,1,5>, <1,5,u,1>
- 2553577574U, // <5,1,6,0>: Cost 3 vext1 <0,5,1,6>, LHS
- 3763520719U, // <5,1,6,1>: Cost 4 vext3 <0,u,1,5>, <1,6,1,7>
- 2646823418U, // <5,1,6,2>: Cost 3 vext2 <4,u,5,1>, <6,2,7,3>
- 3760866529U, // <5,1,6,3>: Cost 4 vext3 <0,4,1,5>, <1,6,3,7>
- 2553580854U, // <5,1,6,4>: Cost 3 vext1 <0,5,1,6>, RHS
- 2687124723U, // <5,1,6,5>: Cost 3 vext3 <0,4,1,5>, <1,6,5,7>
- 2646823736U, // <5,1,6,6>: Cost 3 vext2 <4,u,5,1>, <6,6,6,6>
- 2646823758U, // <5,1,6,7>: Cost 3 vext2 <4,u,5,1>, <6,7,0,1>
- 2646823839U, // <5,1,6,u>: Cost 3 vext2 <4,u,5,1>, <6,u,0,1>
- 2559557734U, // <5,1,7,0>: Cost 3 vext1 <1,5,1,7>, LHS
- 2559558452U, // <5,1,7,1>: Cost 3 vext1 <1,5,1,7>, <1,1,1,1>
- 2571503270U, // <5,1,7,2>: Cost 3 vext1 <3,5,1,7>, <2,3,0,1>
- 2040971366U, // <5,1,7,3>: Cost 2 vtrnr RHS, LHS
- 2559561014U, // <5,1,7,4>: Cost 3 vext1 <1,5,1,7>, RHS
- 2595393232U, // <5,1,7,5>: Cost 3 vext1 <7,5,1,7>, <5,1,7,3>
- 4188455035U, // <5,1,7,6>: Cost 4 vtrnr RHS, <0,1,4,6>
- 2646824556U, // <5,1,7,7>: Cost 3 vext2 <4,u,5,1>, <7,7,7,7>
- 2040971371U, // <5,1,7,u>: Cost 2 vtrnr RHS, LHS
- 1591662326U, // <5,1,u,0>: Cost 2 vext2 <u,0,5,1>, <u,0,5,1>
- 1573082926U, // <5,1,u,1>: Cost 2 vext2 <4,u,5,1>, LHS
- 2695824760U, // <5,1,u,2>: Cost 3 vext3 <1,u,2,5>, <1,u,2,5>
- 2040979558U, // <5,1,u,3>: Cost 2 vtrnr RHS, LHS
- 2687124874U, // <5,1,u,4>: Cost 3 vext3 <0,4,1,5>, <1,u,4,5>
- 1573083290U, // <5,1,u,5>: Cost 2 vext2 <4,u,5,1>, RHS
- 2646825168U, // <5,1,u,6>: Cost 3 vext2 <4,u,5,1>, <u,6,3,7>
- 2646825216U, // <5,1,u,7>: Cost 3 vext2 <4,u,5,1>, <u,7,0,1>
- 2040979563U, // <5,1,u,u>: Cost 2 vtrnr RHS, LHS
- 3702652928U, // <5,2,0,0>: Cost 4 vext2 <1,u,5,2>, <0,0,0,0>
- 2628911206U, // <5,2,0,1>: Cost 3 vext2 <1,u,5,2>, LHS
- 2641518756U, // <5,2,0,2>: Cost 3 vext2 <4,0,5,2>, <0,2,0,2>
- 3759760847U, // <5,2,0,3>: Cost 4 vext3 <0,2,4,5>, <2,0,3,2>
- 3760866775U, // <5,2,0,4>: Cost 4 vext3 <0,4,1,5>, <2,0,4,1>
- 3759539680U, // <5,2,0,5>: Cost 4 vext3 <0,2,1,5>, <2,0,5,1>
- 3760866796U, // <5,2,0,6>: Cost 4 vext3 <0,4,1,5>, <2,0,6,4>
- 3304114054U, // <5,2,0,7>: Cost 4 vrev <2,5,7,0>
- 2628911773U, // <5,2,0,u>: Cost 3 vext2 <1,u,5,2>, LHS
- 2623603464U, // <5,2,1,0>: Cost 3 vext2 <1,0,5,2>, <1,0,5,2>
- 3698008921U, // <5,2,1,1>: Cost 4 vext2 <1,1,5,2>, <1,1,5,2>
- 3633325603U, // <5,2,1,2>: Cost 4 vext1 <1,5,2,1>, <2,1,3,5>
- 2687125027U, // <5,2,1,3>: Cost 3 vext3 <0,4,1,5>, <2,1,3,5>
- 3633327414U, // <5,2,1,4>: Cost 4 vext1 <1,5,2,1>, RHS
- 3759539760U, // <5,2,1,5>: Cost 4 vext3 <0,2,1,5>, <2,1,5,0>
- 3760866876U, // <5,2,1,6>: Cost 4 vext3 <0,4,1,5>, <2,1,6,3>
- 3304122247U, // <5,2,1,7>: Cost 4 vrev <2,5,7,1>
- 2687125072U, // <5,2,1,u>: Cost 3 vext3 <0,4,1,5>, <2,1,u,5>
- 3633332326U, // <5,2,2,0>: Cost 4 vext1 <1,5,2,2>, LHS
- 3759760992U, // <5,2,2,1>: Cost 4 vext3 <0,2,4,5>, <2,2,1,3>
- 2687125096U, // <5,2,2,2>: Cost 3 vext3 <0,4,1,5>, <2,2,2,2>
- 2687125106U, // <5,2,2,3>: Cost 3 vext3 <0,4,1,5>, <2,2,3,3>
- 2697963133U, // <5,2,2,4>: Cost 3 vext3 <2,2,4,5>, <2,2,4,5>
- 3759466120U, // <5,2,2,5>: Cost 4 vext3 <0,2,0,5>, <2,2,5,7>
- 3760866960U, // <5,2,2,6>: Cost 4 vext3 <0,4,1,5>, <2,2,6,6>
- 3771926168U, // <5,2,2,7>: Cost 4 vext3 <2,2,7,5>, <2,2,7,5>
- 2687125151U, // <5,2,2,u>: Cost 3 vext3 <0,4,1,5>, <2,2,u,3>
- 2687125158U, // <5,2,3,0>: Cost 3 vext3 <0,4,1,5>, <2,3,0,1>
- 2698405555U, // <5,2,3,1>: Cost 3 vext3 <2,3,1,5>, <2,3,1,5>
- 2577516238U, // <5,2,3,2>: Cost 3 vext1 <4,5,2,3>, <2,3,4,5>
- 3759687365U, // <5,2,3,3>: Cost 4 vext3 <0,2,3,5>, <2,3,3,5>
- 1624884942U, // <5,2,3,4>: Cost 2 vext3 <2,3,4,5>, <2,3,4,5>
- 2698700503U, // <5,2,3,5>: Cost 3 vext3 <2,3,5,5>, <2,3,5,5>
- 3772368608U, // <5,2,3,6>: Cost 4 vext3 <2,3,4,5>, <2,3,6,5>
- 3702655716U, // <5,2,3,7>: Cost 4 vext2 <1,u,5,2>, <3,7,3,7>
- 1625179890U, // <5,2,3,u>: Cost 2 vext3 <2,3,u,5>, <2,3,u,5>
- 2641521555U, // <5,2,4,0>: Cost 3 vext2 <4,0,5,2>, <4,0,5,2>
- 3772368642U, // <5,2,4,1>: Cost 4 vext3 <2,3,4,5>, <2,4,1,3>
- 2699142925U, // <5,2,4,2>: Cost 3 vext3 <2,4,2,5>, <2,4,2,5>
- 2698626838U, // <5,2,4,3>: Cost 3 vext3 <2,3,4,5>, <2,4,3,5>
- 2698626848U, // <5,2,4,4>: Cost 3 vext3 <2,3,4,5>, <2,4,4,6>
- 2628914486U, // <5,2,4,5>: Cost 3 vext2 <1,u,5,2>, RHS
- 2645503353U, // <5,2,4,6>: Cost 3 vext2 <4,6,5,2>, <4,6,5,2>
- 3304146826U, // <5,2,4,7>: Cost 4 vrev <2,5,7,4>
- 2628914729U, // <5,2,4,u>: Cost 3 vext2 <1,u,5,2>, RHS
- 2553643110U, // <5,2,5,0>: Cost 3 vext1 <0,5,2,5>, LHS
- 3758950227U, // <5,2,5,1>: Cost 4 vext3 <0,1,2,5>, <2,5,1,3>
- 3759761248U, // <5,2,5,2>: Cost 4 vext3 <0,2,4,5>, <2,5,2,7>
- 2982396006U, // <5,2,5,3>: Cost 3 vzipr <4,u,5,5>, LHS
- 2553646390U, // <5,2,5,4>: Cost 3 vext1 <0,5,2,5>, RHS
- 2553647108U, // <5,2,5,5>: Cost 3 vext1 <0,5,2,5>, <5,5,5,5>
- 3760867204U, // <5,2,5,6>: Cost 4 vext3 <0,4,1,5>, <2,5,6,7>
- 3702657141U, // <5,2,5,7>: Cost 4 vext2 <1,u,5,2>, <5,7,0,1>
- 2982396011U, // <5,2,5,u>: Cost 3 vzipr <4,u,5,5>, LHS
- 3627393126U, // <5,2,6,0>: Cost 4 vext1 <0,5,2,6>, LHS
- 3760867236U, // <5,2,6,1>: Cost 4 vext3 <0,4,1,5>, <2,6,1,3>
- 2645504506U, // <5,2,6,2>: Cost 3 vext2 <4,6,5,2>, <6,2,7,3>
- 2687125434U, // <5,2,6,3>: Cost 3 vext3 <0,4,1,5>, <2,6,3,7>
- 2700617665U, // <5,2,6,4>: Cost 3 vext3 <2,6,4,5>, <2,6,4,5>
- 3760867276U, // <5,2,6,5>: Cost 4 vext3 <0,4,1,5>, <2,6,5,7>
- 3763521493U, // <5,2,6,6>: Cost 4 vext3 <0,u,1,5>, <2,6,6,7>
- 3719246670U, // <5,2,6,7>: Cost 4 vext2 <4,6,5,2>, <6,7,0,1>
- 2687125479U, // <5,2,6,u>: Cost 3 vext3 <0,4,1,5>, <2,6,u,7>
- 2565603430U, // <5,2,7,0>: Cost 3 vext1 <2,5,2,7>, LHS
- 2553660150U, // <5,2,7,1>: Cost 3 vext1 <0,5,2,7>, <1,0,3,2>
- 2565605216U, // <5,2,7,2>: Cost 3 vext1 <2,5,2,7>, <2,5,2,7>
- 2961178726U, // <5,2,7,3>: Cost 3 vzipr <1,3,5,7>, LHS
- 2565606710U, // <5,2,7,4>: Cost 3 vext1 <2,5,2,7>, RHS
- 4034920552U, // <5,2,7,5>: Cost 4 vzipr <1,3,5,7>, <0,1,2,5>
- 3114713292U, // <5,2,7,6>: Cost 3 vtrnr RHS, <0,2,4,6>
- 3702658668U, // <5,2,7,7>: Cost 4 vext2 <1,u,5,2>, <7,7,7,7>
- 2961178731U, // <5,2,7,u>: Cost 3 vzipr <1,3,5,7>, LHS
- 2687125563U, // <5,2,u,0>: Cost 3 vext3 <0,4,1,5>, <2,u,0,1>
- 2628917038U, // <5,2,u,1>: Cost 3 vext2 <1,u,5,2>, LHS
- 2565613409U, // <5,2,u,2>: Cost 3 vext1 <2,5,2,u>, <2,5,2,u>
- 2687125592U, // <5,2,u,3>: Cost 3 vext3 <0,4,1,5>, <2,u,3,3>
- 1628203107U, // <5,2,u,4>: Cost 2 vext3 <2,u,4,5>, <2,u,4,5>
- 2628917402U, // <5,2,u,5>: Cost 3 vext2 <1,u,5,2>, RHS
- 2702092405U, // <5,2,u,6>: Cost 3 vext3 <2,u,6,5>, <2,u,6,5>
- 3304179598U, // <5,2,u,7>: Cost 4 vrev <2,5,7,u>
- 1628498055U, // <5,2,u,u>: Cost 2 vext3 <2,u,u,5>, <2,u,u,5>
- 3760867467U, // <5,3,0,0>: Cost 4 vext3 <0,4,1,5>, <3,0,0,0>
- 2687125654U, // <5,3,0,1>: Cost 3 vext3 <0,4,1,5>, <3,0,1,2>
- 3759761565U, // <5,3,0,2>: Cost 4 vext3 <0,2,4,5>, <3,0,2,0>
- 3633391766U, // <5,3,0,3>: Cost 4 vext1 <1,5,3,0>, <3,0,1,2>
- 2687125680U, // <5,3,0,4>: Cost 3 vext3 <0,4,1,5>, <3,0,4,1>
- 3760277690U, // <5,3,0,5>: Cost 4 vext3 <0,3,2,5>, <3,0,5,2>
- 3310013014U, // <5,3,0,6>: Cost 4 vrev <3,5,6,0>
- 2236344927U, // <5,3,0,7>: Cost 3 vrev <3,5,7,0>
- 2687125717U, // <5,3,0,u>: Cost 3 vext3 <0,4,1,5>, <3,0,u,2>
- 3760867551U, // <5,3,1,0>: Cost 4 vext3 <0,4,1,5>, <3,1,0,3>
- 3760867558U, // <5,3,1,1>: Cost 4 vext3 <0,4,1,5>, <3,1,1,1>
- 2624938923U, // <5,3,1,2>: Cost 3 vext2 <1,2,5,3>, <1,2,5,3>
- 2703198460U, // <5,3,1,3>: Cost 3 vext3 <3,1,3,5>, <3,1,3,5>
- 3760867587U, // <5,3,1,4>: Cost 4 vext3 <0,4,1,5>, <3,1,4,3>
- 2636219536U, // <5,3,1,5>: Cost 3 vext2 <3,1,5,3>, <1,5,3,7>
- 3698681075U, // <5,3,1,6>: Cost 4 vext2 <1,2,5,3>, <1,6,5,7>
- 2703493408U, // <5,3,1,7>: Cost 3 vext3 <3,1,7,5>, <3,1,7,5>
- 2628920721U, // <5,3,1,u>: Cost 3 vext2 <1,u,5,3>, <1,u,5,3>
- 3766765870U, // <5,3,2,0>: Cost 4 vext3 <1,4,0,5>, <3,2,0,1>
- 3698681379U, // <5,3,2,1>: Cost 4 vext2 <1,2,5,3>, <2,1,3,5>
- 3760867649U, // <5,3,2,2>: Cost 4 vext3 <0,4,1,5>, <3,2,2,2>
- 2698627404U, // <5,3,2,3>: Cost 3 vext3 <2,3,4,5>, <3,2,3,4>
- 2703935830U, // <5,3,2,4>: Cost 3 vext3 <3,2,4,5>, <3,2,4,5>
- 2698627422U, // <5,3,2,5>: Cost 3 vext3 <2,3,4,5>, <3,2,5,4>
- 3760867686U, // <5,3,2,6>: Cost 4 vext3 <0,4,1,5>, <3,2,6,3>
- 3769788783U, // <5,3,2,7>: Cost 4 vext3 <1,u,5,5>, <3,2,7,3>
- 2701945209U, // <5,3,2,u>: Cost 3 vext3 <2,u,4,5>, <3,2,u,4>
- 3760867711U, // <5,3,3,0>: Cost 4 vext3 <0,4,1,5>, <3,3,0,1>
- 2636220684U, // <5,3,3,1>: Cost 3 vext2 <3,1,5,3>, <3,1,5,3>
- 3772369298U, // <5,3,3,2>: Cost 4 vext3 <2,3,4,5>, <3,3,2,2>
- 2687125916U, // <5,3,3,3>: Cost 3 vext3 <0,4,1,5>, <3,3,3,3>
- 2704599463U, // <5,3,3,4>: Cost 3 vext3 <3,3,4,5>, <3,3,4,5>
- 2704673200U, // <5,3,3,5>: Cost 3 vext3 <3,3,5,5>, <3,3,5,5>
- 3709962935U, // <5,3,3,6>: Cost 4 vext2 <3,1,5,3>, <3,6,7,7>
- 3772369346U, // <5,3,3,7>: Cost 4 vext3 <2,3,4,5>, <3,3,7,5>
- 2704894411U, // <5,3,3,u>: Cost 3 vext3 <3,3,u,5>, <3,3,u,5>
- 2704968148U, // <5,3,4,0>: Cost 3 vext3 <3,4,0,5>, <3,4,0,5>
- 3698682850U, // <5,3,4,1>: Cost 4 vext2 <1,2,5,3>, <4,1,5,0>
- 2642857014U, // <5,3,4,2>: Cost 3 vext2 <4,2,5,3>, <4,2,5,3>
- 2705189359U, // <5,3,4,3>: Cost 3 vext3 <3,4,3,5>, <3,4,3,5>
- 2705263096U, // <5,3,4,4>: Cost 3 vext3 <3,4,4,5>, <3,4,4,5>
- 2685946370U, // <5,3,4,5>: Cost 3 vext3 <0,2,3,5>, <3,4,5,6>
- 3779152394U, // <5,3,4,6>: Cost 4 vext3 <3,4,6,5>, <3,4,6,5>
- 2236377699U, // <5,3,4,7>: Cost 3 vrev <3,5,7,4>
- 2687126045U, // <5,3,4,u>: Cost 3 vext3 <0,4,1,5>, <3,4,u,6>
- 2571632742U, // <5,3,5,0>: Cost 3 vext1 <3,5,3,5>, LHS
- 2559689870U, // <5,3,5,1>: Cost 3 vext1 <1,5,3,5>, <1,5,3,5>
- 2571634382U, // <5,3,5,2>: Cost 3 vext1 <3,5,3,5>, <2,3,4,5>
- 2571635264U, // <5,3,5,3>: Cost 3 vext1 <3,5,3,5>, <3,5,3,5>
- 2571636022U, // <5,3,5,4>: Cost 3 vext1 <3,5,3,5>, RHS
- 2559692804U, // <5,3,5,5>: Cost 3 vext1 <1,5,3,5>, <5,5,5,5>
- 3720581218U, // <5,3,5,6>: Cost 4 vext2 <4,u,5,3>, <5,6,7,0>
- 2236385892U, // <5,3,5,7>: Cost 3 vrev <3,5,7,5>
- 2571638574U, // <5,3,5,u>: Cost 3 vext1 <3,5,3,5>, LHS
- 2565668966U, // <5,3,6,0>: Cost 3 vext1 <2,5,3,6>, LHS
- 3633439887U, // <5,3,6,1>: Cost 4 vext1 <1,5,3,6>, <1,5,3,6>
- 2565670760U, // <5,3,6,2>: Cost 3 vext1 <2,5,3,6>, <2,5,3,6>
- 2565671426U, // <5,3,6,3>: Cost 3 vext1 <2,5,3,6>, <3,4,5,6>
- 2565672246U, // <5,3,6,4>: Cost 3 vext1 <2,5,3,6>, RHS
- 3639414630U, // <5,3,6,5>: Cost 4 vext1 <2,5,3,6>, <5,3,6,0>
- 4047521640U, // <5,3,6,6>: Cost 4 vzipr <3,4,5,6>, <2,5,3,6>
- 2725169844U, // <5,3,6,7>: Cost 3 vext3 <6,7,4,5>, <3,6,7,4>
- 2565674798U, // <5,3,6,u>: Cost 3 vext1 <2,5,3,6>, LHS
- 1485963366U, // <5,3,7,0>: Cost 2 vext1 <1,5,3,7>, LHS
- 1485964432U, // <5,3,7,1>: Cost 2 vext1 <1,5,3,7>, <1,5,3,7>
- 2559706728U, // <5,3,7,2>: Cost 3 vext1 <1,5,3,7>, <2,2,2,2>
- 2559707286U, // <5,3,7,3>: Cost 3 vext1 <1,5,3,7>, <3,0,1,2>
- 1485966646U, // <5,3,7,4>: Cost 2 vext1 <1,5,3,7>, RHS
- 2559708880U, // <5,3,7,5>: Cost 3 vext1 <1,5,3,7>, <5,1,7,3>
- 2601513466U, // <5,3,7,6>: Cost 3 vext1 <u,5,3,7>, <6,2,7,3>
- 3114714112U, // <5,3,7,7>: Cost 3 vtrnr RHS, <1,3,5,7>
- 1485969198U, // <5,3,7,u>: Cost 2 vext1 <1,5,3,7>, LHS
- 1485971558U, // <5,3,u,0>: Cost 2 vext1 <1,5,3,u>, LHS
- 1485972625U, // <5,3,u,1>: Cost 2 vext1 <1,5,3,u>, <1,5,3,u>
- 2559714920U, // <5,3,u,2>: Cost 3 vext1 <1,5,3,u>, <2,2,2,2>
- 2559715478U, // <5,3,u,3>: Cost 3 vext1 <1,5,3,u>, <3,0,1,2>
- 1485974838U, // <5,3,u,4>: Cost 2 vext1 <1,5,3,u>, RHS
- 2687126342U, // <5,3,u,5>: Cost 3 vext3 <0,4,1,5>, <3,u,5,6>
- 2601521658U, // <5,3,u,6>: Cost 3 vext1 <u,5,3,u>, <6,2,7,3>
- 2236410471U, // <5,3,u,7>: Cost 3 vrev <3,5,7,u>
- 1485977390U, // <5,3,u,u>: Cost 2 vext1 <1,5,3,u>, LHS
- 3627491430U, // <5,4,0,0>: Cost 4 vext1 <0,5,4,0>, LHS
- 2636890214U, // <5,4,0,1>: Cost 3 vext2 <3,2,5,4>, LHS
- 3703333028U, // <5,4,0,2>: Cost 4 vext2 <2,0,5,4>, <0,2,0,2>
- 3782249348U, // <5,4,0,3>: Cost 4 vext3 <4,0,3,5>, <4,0,3,5>
- 2642198866U, // <5,4,0,4>: Cost 3 vext2 <4,1,5,4>, <0,4,1,5>
- 2687126418U, // <5,4,0,5>: Cost 3 vext3 <0,4,1,5>, <4,0,5,1>
- 2242243887U, // <5,4,0,6>: Cost 3 vrev <4,5,6,0>
- 3316059448U, // <5,4,0,7>: Cost 4 vrev <4,5,7,0>
- 2636890781U, // <5,4,0,u>: Cost 3 vext2 <3,2,5,4>, LHS
- 2241809658U, // <5,4,1,0>: Cost 3 vrev <4,5,0,1>
- 3698025307U, // <5,4,1,1>: Cost 4 vext2 <1,1,5,4>, <1,1,5,4>
- 3698688940U, // <5,4,1,2>: Cost 4 vext2 <1,2,5,4>, <1,2,5,4>
- 3698689024U, // <5,4,1,3>: Cost 4 vext2 <1,2,5,4>, <1,3,5,7>
- 3700016206U, // <5,4,1,4>: Cost 4 vext2 <1,4,5,4>, <1,4,5,4>
- 2687126498U, // <5,4,1,5>: Cost 3 vext3 <0,4,1,5>, <4,1,5,0>
- 3760868336U, // <5,4,1,6>: Cost 4 vext3 <0,4,1,5>, <4,1,6,5>
- 3316067641U, // <5,4,1,7>: Cost 4 vrev <4,5,7,1>
- 2242399554U, // <5,4,1,u>: Cost 3 vrev <4,5,u,1>
- 3703334371U, // <5,4,2,0>: Cost 4 vext2 <2,0,5,4>, <2,0,5,4>
- 3703998004U, // <5,4,2,1>: Cost 4 vext2 <2,1,5,4>, <2,1,5,4>
- 3704661637U, // <5,4,2,2>: Cost 4 vext2 <2,2,5,4>, <2,2,5,4>
- 2636891854U, // <5,4,2,3>: Cost 3 vext2 <3,2,5,4>, <2,3,4,5>
- 3705988903U, // <5,4,2,4>: Cost 4 vext2 <2,4,5,4>, <2,4,5,4>
- 2698628150U, // <5,4,2,5>: Cost 3 vext3 <2,3,4,5>, <4,2,5,3>
- 3760868415U, // <5,4,2,6>: Cost 4 vext3 <0,4,1,5>, <4,2,6,3>
- 3783871562U, // <5,4,2,7>: Cost 4 vext3 <4,2,7,5>, <4,2,7,5>
- 2666752099U, // <5,4,2,u>: Cost 3 vext2 <u,2,5,4>, <2,u,4,5>
- 3639459942U, // <5,4,3,0>: Cost 4 vext1 <2,5,4,3>, LHS
- 3709970701U, // <5,4,3,1>: Cost 4 vext2 <3,1,5,4>, <3,1,5,4>
- 2636892510U, // <5,4,3,2>: Cost 3 vext2 <3,2,5,4>, <3,2,5,4>
- 3710634396U, // <5,4,3,3>: Cost 4 vext2 <3,2,5,4>, <3,3,3,3>
- 2638219776U, // <5,4,3,4>: Cost 3 vext2 <3,4,5,4>, <3,4,5,4>
- 3766987908U, // <5,4,3,5>: Cost 4 vext3 <1,4,3,5>, <4,3,5,0>
- 2710719634U, // <5,4,3,6>: Cost 3 vext3 <4,3,6,5>, <4,3,6,5>
- 3914097664U, // <5,4,3,7>: Cost 4 vuzpr <3,5,7,4>, <1,3,5,7>
- 2640874308U, // <5,4,3,u>: Cost 3 vext2 <3,u,5,4>, <3,u,5,4>
- 2583642214U, // <5,4,4,0>: Cost 3 vext1 <5,5,4,4>, LHS
- 2642201574U, // <5,4,4,1>: Cost 3 vext2 <4,1,5,4>, <4,1,5,4>
- 3710635062U, // <5,4,4,2>: Cost 4 vext2 <3,2,5,4>, <4,2,5,3>
- 3717270664U, // <5,4,4,3>: Cost 4 vext2 <4,3,5,4>, <4,3,5,4>
- 2713963728U, // <5,4,4,4>: Cost 3 vext3 <4,u,5,5>, <4,4,4,4>
- 1637567706U, // <5,4,4,5>: Cost 2 vext3 <4,4,5,5>, <4,4,5,5>
- 2242276659U, // <5,4,4,6>: Cost 3 vrev <4,5,6,4>
- 2646183372U, // <5,4,4,7>: Cost 3 vext2 <4,7,5,4>, <4,7,5,4>
- 1637788917U, // <5,4,4,u>: Cost 2 vext3 <4,4,u,5>, <4,4,u,5>
- 2559762534U, // <5,4,5,0>: Cost 3 vext1 <1,5,4,5>, LHS
- 2559763607U, // <5,4,5,1>: Cost 3 vext1 <1,5,4,5>, <1,5,4,5>
- 2698628366U, // <5,4,5,2>: Cost 3 vext3 <2,3,4,5>, <4,5,2,3>
- 3633506454U, // <5,4,5,3>: Cost 4 vext1 <1,5,4,5>, <3,0,1,2>
- 2559765814U, // <5,4,5,4>: Cost 3 vext1 <1,5,4,5>, RHS
- 2583654395U, // <5,4,5,5>: Cost 3 vext1 <5,5,4,5>, <5,5,4,5>
- 1613385014U, // <5,4,5,6>: Cost 2 vext3 <0,4,1,5>, RHS
- 3901639990U, // <5,4,5,7>: Cost 4 vuzpr <1,5,0,4>, RHS
- 1613385032U, // <5,4,5,u>: Cost 2 vext3 <0,4,1,5>, RHS
- 2559770726U, // <5,4,6,0>: Cost 3 vext1 <1,5,4,6>, LHS
- 2559771648U, // <5,4,6,1>: Cost 3 vext1 <1,5,4,6>, <1,3,5,7>
- 3633514088U, // <5,4,6,2>: Cost 4 vext1 <1,5,4,6>, <2,2,2,2>
- 2571717122U, // <5,4,6,3>: Cost 3 vext1 <3,5,4,6>, <3,4,5,6>
- 2559774006U, // <5,4,6,4>: Cost 3 vext1 <1,5,4,6>, RHS
- 2712636796U, // <5,4,6,5>: Cost 3 vext3 <4,6,5,5>, <4,6,5,5>
- 3760868743U, // <5,4,6,6>: Cost 4 vext3 <0,4,1,5>, <4,6,6,7>
- 2712784270U, // <5,4,6,7>: Cost 3 vext3 <4,6,7,5>, <4,6,7,5>
- 2559776558U, // <5,4,6,u>: Cost 3 vext1 <1,5,4,6>, LHS
- 2565750886U, // <5,4,7,0>: Cost 3 vext1 <2,5,4,7>, LHS
- 2565751706U, // <5,4,7,1>: Cost 3 vext1 <2,5,4,7>, <1,2,3,4>
- 2565752690U, // <5,4,7,2>: Cost 3 vext1 <2,5,4,7>, <2,5,4,7>
- 2571725387U, // <5,4,7,3>: Cost 3 vext1 <3,5,4,7>, <3,5,4,7>
- 2565754166U, // <5,4,7,4>: Cost 3 vext1 <2,5,4,7>, RHS
- 3114713426U, // <5,4,7,5>: Cost 3 vtrnr RHS, <0,4,1,5>
- 94817590U, // <5,4,7,6>: Cost 1 vrev RHS
- 2595616175U, // <5,4,7,7>: Cost 3 vext1 <7,5,4,7>, <7,5,4,7>
- 94965064U, // <5,4,7,u>: Cost 1 vrev RHS
- 2559787110U, // <5,4,u,0>: Cost 3 vext1 <1,5,4,u>, LHS
- 2559788186U, // <5,4,u,1>: Cost 3 vext1 <1,5,4,u>, <1,5,4,u>
- 2242014483U, // <5,4,u,2>: Cost 3 vrev <4,5,2,u>
- 2667419628U, // <5,4,u,3>: Cost 3 vext2 <u,3,5,4>, <u,3,5,4>
- 2559790390U, // <5,4,u,4>: Cost 3 vext1 <1,5,4,u>, RHS
- 1640222238U, // <5,4,u,5>: Cost 2 vext3 <4,u,5,5>, <4,u,5,5>
- 94825783U, // <5,4,u,6>: Cost 1 vrev RHS
- 2714111536U, // <5,4,u,7>: Cost 3 vext3 <4,u,7,5>, <4,u,7,5>
- 94973257U, // <5,4,u,u>: Cost 1 vrev RHS
- 2646851584U, // <5,5,0,0>: Cost 3 vext2 <4,u,5,5>, <0,0,0,0>
- 1573109862U, // <5,5,0,1>: Cost 2 vext2 <4,u,5,5>, LHS
- 2646851748U, // <5,5,0,2>: Cost 3 vext2 <4,u,5,5>, <0,2,0,2>
- 3760279130U, // <5,5,0,3>: Cost 4 vext3 <0,3,2,5>, <5,0,3,2>
- 2687127138U, // <5,5,0,4>: Cost 3 vext3 <0,4,1,5>, <5,0,4,1>
- 2248142847U, // <5,5,0,5>: Cost 3 vrev <5,5,5,0>
- 3720593910U, // <5,5,0,6>: Cost 4 vext2 <4,u,5,5>, <0,6,1,7>
- 4182502710U, // <5,5,0,7>: Cost 4 vtrnr <3,5,7,0>, RHS
- 1573110429U, // <5,5,0,u>: Cost 2 vext2 <4,u,5,5>, LHS
- 2646852342U, // <5,5,1,0>: Cost 3 vext2 <4,u,5,5>, <1,0,3,2>
- 2624291676U, // <5,5,1,1>: Cost 3 vext2 <1,1,5,5>, <1,1,5,5>
- 2646852502U, // <5,5,1,2>: Cost 3 vext2 <4,u,5,5>, <1,2,3,0>
- 2646852568U, // <5,5,1,3>: Cost 3 vext2 <4,u,5,5>, <1,3,1,3>
- 2715217591U, // <5,5,1,4>: Cost 3 vext3 <5,1,4,5>, <5,1,4,5>
- 2628936848U, // <5,5,1,5>: Cost 3 vext2 <1,u,5,5>, <1,5,3,7>
- 3698033907U, // <5,5,1,6>: Cost 4 vext2 <1,1,5,5>, <1,6,5,7>
- 2713964240U, // <5,5,1,7>: Cost 3 vext3 <4,u,5,5>, <5,1,7,3>
- 2628937107U, // <5,5,1,u>: Cost 3 vext2 <1,u,5,5>, <1,u,5,5>
- 3645497446U, // <5,5,2,0>: Cost 4 vext1 <3,5,5,2>, LHS
- 3760869099U, // <5,5,2,1>: Cost 4 vext3 <0,4,1,5>, <5,2,1,3>
- 2646853224U, // <5,5,2,2>: Cost 3 vext2 <4,u,5,5>, <2,2,2,2>
- 2698628862U, // <5,5,2,3>: Cost 3 vext3 <2,3,4,5>, <5,2,3,4>
- 3772370694U, // <5,5,2,4>: Cost 4 vext3 <2,3,4,5>, <5,2,4,3>
- 2713964303U, // <5,5,2,5>: Cost 3 vext3 <4,u,5,5>, <5,2,5,3>
- 2646853562U, // <5,5,2,6>: Cost 3 vext2 <4,u,5,5>, <2,6,3,7>
- 4038198272U, // <5,5,2,7>: Cost 4 vzipr <1,u,5,2>, <1,3,5,7>
- 2701946667U, // <5,5,2,u>: Cost 3 vext3 <2,u,4,5>, <5,2,u,4>
- 2646853782U, // <5,5,3,0>: Cost 3 vext2 <4,u,5,5>, <3,0,1,2>
- 3698034922U, // <5,5,3,1>: Cost 4 vext2 <1,1,5,5>, <3,1,1,5>
- 3702679919U, // <5,5,3,2>: Cost 4 vext2 <1,u,5,5>, <3,2,7,3>
- 2637564336U, // <5,5,3,3>: Cost 3 vext2 <3,3,5,5>, <3,3,5,5>
- 2646854146U, // <5,5,3,4>: Cost 3 vext2 <4,u,5,5>, <3,4,5,6>
- 2638891602U, // <5,5,3,5>: Cost 3 vext2 <3,5,5,5>, <3,5,5,5>
- 3702680247U, // <5,5,3,6>: Cost 4 vext2 <1,u,5,5>, <3,6,7,7>
- 3702680259U, // <5,5,3,7>: Cost 4 vext2 <1,u,5,5>, <3,7,0,1>
- 2646854430U, // <5,5,3,u>: Cost 3 vext2 <4,u,5,5>, <3,u,1,2>
- 2646854546U, // <5,5,4,0>: Cost 3 vext2 <4,u,5,5>, <4,0,5,1>
- 2642209767U, // <5,5,4,1>: Cost 3 vext2 <4,1,5,5>, <4,1,5,5>
- 3711306806U, // <5,5,4,2>: Cost 4 vext2 <3,3,5,5>, <4,2,5,3>
- 3645516369U, // <5,5,4,3>: Cost 4 vext1 <3,5,5,4>, <3,5,5,4>
- 1570458842U, // <5,5,4,4>: Cost 2 vext2 <4,4,5,5>, <4,4,5,5>
- 1573113142U, // <5,5,4,5>: Cost 2 vext2 <4,u,5,5>, RHS
- 2645527932U, // <5,5,4,6>: Cost 3 vext2 <4,6,5,5>, <4,6,5,5>
- 2713964486U, // <5,5,4,7>: Cost 3 vext3 <4,u,5,5>, <5,4,7,6>
- 1573113374U, // <5,5,4,u>: Cost 2 vext2 <4,u,5,5>, <4,u,5,5>
- 1509982310U, // <5,5,5,0>: Cost 2 vext1 <5,5,5,5>, LHS
- 2646855376U, // <5,5,5,1>: Cost 3 vext2 <4,u,5,5>, <5,1,7,3>
- 2583725672U, // <5,5,5,2>: Cost 3 vext1 <5,5,5,5>, <2,2,2,2>
- 2583726230U, // <5,5,5,3>: Cost 3 vext1 <5,5,5,5>, <3,0,1,2>
- 1509985590U, // <5,5,5,4>: Cost 2 vext1 <5,5,5,5>, RHS
- 229035318U, // <5,5,5,5>: Cost 1 vdup1 RHS
- 2646855778U, // <5,5,5,6>: Cost 3 vext2 <4,u,5,5>, <5,6,7,0>
- 2646855848U, // <5,5,5,7>: Cost 3 vext2 <4,u,5,5>, <5,7,5,7>
- 229035318U, // <5,5,5,u>: Cost 1 vdup1 RHS
- 2577760358U, // <5,5,6,0>: Cost 3 vext1 <4,5,5,6>, LHS
- 3633587361U, // <5,5,6,1>: Cost 4 vext1 <1,5,5,6>, <1,5,5,6>
- 2646856186U, // <5,5,6,2>: Cost 3 vext2 <4,u,5,5>, <6,2,7,3>
- 3633588738U, // <5,5,6,3>: Cost 4 vext1 <1,5,5,6>, <3,4,5,6>
- 2718535756U, // <5,5,6,4>: Cost 3 vext3 <5,6,4,5>, <5,6,4,5>
- 2644202223U, // <5,5,6,5>: Cost 3 vext2 <4,4,5,5>, <6,5,7,5>
- 2973780482U, // <5,5,6,6>: Cost 3 vzipr <3,4,5,6>, <3,4,5,6>
- 2646856526U, // <5,5,6,7>: Cost 3 vext2 <4,u,5,5>, <6,7,0,1>
- 2646856607U, // <5,5,6,u>: Cost 3 vext2 <4,u,5,5>, <6,u,0,1>
- 2571796582U, // <5,5,7,0>: Cost 3 vext1 <3,5,5,7>, LHS
- 3633595392U, // <5,5,7,1>: Cost 4 vext1 <1,5,5,7>, <1,3,5,7>
- 2571798222U, // <5,5,7,2>: Cost 3 vext1 <3,5,5,7>, <2,3,4,5>
- 2571799124U, // <5,5,7,3>: Cost 3 vext1 <3,5,5,7>, <3,5,5,7>
- 2571799862U, // <5,5,7,4>: Cost 3 vext1 <3,5,5,7>, RHS
- 3114717188U, // <5,5,7,5>: Cost 3 vtrnr RHS, <5,5,5,5>
- 4034923010U, // <5,5,7,6>: Cost 4 vzipr <1,3,5,7>, <3,4,5,6>
- 2040974646U, // <5,5,7,7>: Cost 2 vtrnr RHS, RHS
- 2040974647U, // <5,5,7,u>: Cost 2 vtrnr RHS, RHS
- 1509982310U, // <5,5,u,0>: Cost 2 vext1 <5,5,5,5>, LHS
- 1573115694U, // <5,5,u,1>: Cost 2 vext2 <4,u,5,5>, LHS
- 2571806414U, // <5,5,u,2>: Cost 3 vext1 <3,5,5,u>, <2,3,4,5>
- 2571807317U, // <5,5,u,3>: Cost 3 vext1 <3,5,5,u>, <3,5,5,u>
- 1509985590U, // <5,5,u,4>: Cost 2 vext1 <5,5,5,5>, RHS
- 229035318U, // <5,5,u,5>: Cost 1 vdup1 RHS
- 2646857936U, // <5,5,u,6>: Cost 3 vext2 <4,u,5,5>, <u,6,3,7>
- 2040982838U, // <5,5,u,7>: Cost 2 vtrnr RHS, RHS
- 229035318U, // <5,5,u,u>: Cost 1 vdup1 RHS
- 2638233600U, // <5,6,0,0>: Cost 3 vext2 <3,4,5,6>, <0,0,0,0>
- 1564491878U, // <5,6,0,1>: Cost 2 vext2 <3,4,5,6>, LHS
- 2632261796U, // <5,6,0,2>: Cost 3 vext2 <2,4,5,6>, <0,2,0,2>
- 2638233856U, // <5,6,0,3>: Cost 3 vext2 <3,4,5,6>, <0,3,1,4>
- 2638233938U, // <5,6,0,4>: Cost 3 vext2 <3,4,5,6>, <0,4,1,5>
- 3706003885U, // <5,6,0,5>: Cost 4 vext2 <2,4,5,6>, <0,5,2,6>
- 3706003967U, // <5,6,0,6>: Cost 4 vext2 <2,4,5,6>, <0,6,2,7>
- 4047473974U, // <5,6,0,7>: Cost 4 vzipr <3,4,5,0>, RHS
- 1564492445U, // <5,6,0,u>: Cost 2 vext2 <3,4,5,6>, LHS
- 2638234358U, // <5,6,1,0>: Cost 3 vext2 <3,4,5,6>, <1,0,3,2>
- 2638234420U, // <5,6,1,1>: Cost 3 vext2 <3,4,5,6>, <1,1,1,1>
- 2638234518U, // <5,6,1,2>: Cost 3 vext2 <3,4,5,6>, <1,2,3,0>
- 2638234584U, // <5,6,1,3>: Cost 3 vext2 <3,4,5,6>, <1,3,1,3>
- 2626290768U, // <5,6,1,4>: Cost 3 vext2 <1,4,5,6>, <1,4,5,6>
- 2638234768U, // <5,6,1,5>: Cost 3 vext2 <3,4,5,6>, <1,5,3,7>
- 3700032719U, // <5,6,1,6>: Cost 4 vext2 <1,4,5,6>, <1,6,1,7>
- 2982366518U, // <5,6,1,7>: Cost 3 vzipr <4,u,5,1>, RHS
- 2628945300U, // <5,6,1,u>: Cost 3 vext2 <1,u,5,6>, <1,u,5,6>
- 3706004925U, // <5,6,2,0>: Cost 4 vext2 <2,4,5,6>, <2,0,1,2>
- 3711976966U, // <5,6,2,1>: Cost 4 vext2 <3,4,5,6>, <2,1,0,3>
- 2638235240U, // <5,6,2,2>: Cost 3 vext2 <3,4,5,6>, <2,2,2,2>
- 2638235302U, // <5,6,2,3>: Cost 3 vext2 <3,4,5,6>, <2,3,0,1>
- 2632263465U, // <5,6,2,4>: Cost 3 vext2 <2,4,5,6>, <2,4,5,6>
- 2638235496U, // <5,6,2,5>: Cost 3 vext2 <3,4,5,6>, <2,5,3,6>
- 2638235578U, // <5,6,2,6>: Cost 3 vext2 <3,4,5,6>, <2,6,3,7>
- 2713965050U, // <5,6,2,7>: Cost 3 vext3 <4,u,5,5>, <6,2,7,3>
- 2634917997U, // <5,6,2,u>: Cost 3 vext2 <2,u,5,6>, <2,u,5,6>
- 2638235798U, // <5,6,3,0>: Cost 3 vext2 <3,4,5,6>, <3,0,1,2>
- 3711977695U, // <5,6,3,1>: Cost 4 vext2 <3,4,5,6>, <3,1,0,3>
- 3710650720U, // <5,6,3,2>: Cost 4 vext2 <3,2,5,6>, <3,2,5,6>
- 2638236060U, // <5,6,3,3>: Cost 3 vext2 <3,4,5,6>, <3,3,3,3>
- 1564494338U, // <5,6,3,4>: Cost 2 vext2 <3,4,5,6>, <3,4,5,6>
- 2638236234U, // <5,6,3,5>: Cost 3 vext2 <3,4,5,6>, <3,5,4,6>
- 3711978104U, // <5,6,3,6>: Cost 4 vext2 <3,4,5,6>, <3,6,0,7>
- 4034227510U, // <5,6,3,7>: Cost 4 vzipr <1,2,5,3>, RHS
- 1567148870U, // <5,6,3,u>: Cost 2 vext2 <3,u,5,6>, <3,u,5,6>
- 2577817702U, // <5,6,4,0>: Cost 3 vext1 <4,5,6,4>, LHS
- 3700034544U, // <5,6,4,1>: Cost 4 vext2 <1,4,5,6>, <4,1,6,5>
- 2723033713U, // <5,6,4,2>: Cost 3 vext3 <6,4,2,5>, <6,4,2,5>
- 2638236818U, // <5,6,4,3>: Cost 3 vext2 <3,4,5,6>, <4,3,6,5>
- 2644208859U, // <5,6,4,4>: Cost 3 vext2 <4,4,5,6>, <4,4,5,6>
- 1564495158U, // <5,6,4,5>: Cost 2 vext2 <3,4,5,6>, RHS
- 2645536125U, // <5,6,4,6>: Cost 3 vext2 <4,6,5,6>, <4,6,5,6>
- 2723402398U, // <5,6,4,7>: Cost 3 vext3 <6,4,7,5>, <6,4,7,5>
- 1564495401U, // <5,6,4,u>: Cost 2 vext2 <3,4,5,6>, RHS
- 2577825894U, // <5,6,5,0>: Cost 3 vext1 <4,5,6,5>, LHS
- 2662125264U, // <5,6,5,1>: Cost 3 vext2 <7,4,5,6>, <5,1,7,3>
- 3775836867U, // <5,6,5,2>: Cost 4 vext3 <2,u,6,5>, <6,5,2,6>
- 3711979343U, // <5,6,5,3>: Cost 4 vext2 <3,4,5,6>, <5,3,3,4>
- 2650181556U, // <5,6,5,4>: Cost 3 vext2 <5,4,5,6>, <5,4,5,6>
- 2662125572U, // <5,6,5,5>: Cost 3 vext2 <7,4,5,6>, <5,5,5,5>
- 2638237732U, // <5,6,5,6>: Cost 3 vext2 <3,4,5,6>, <5,6,0,1>
- 2982399286U, // <5,6,5,7>: Cost 3 vzipr <4,u,5,5>, RHS
- 2982399287U, // <5,6,5,u>: Cost 3 vzipr <4,u,5,5>, RHS
- 2583806054U, // <5,6,6,0>: Cost 3 vext1 <5,5,6,6>, LHS
- 3711979910U, // <5,6,6,1>: Cost 4 vext2 <3,4,5,6>, <6,1,3,4>
- 2662126074U, // <5,6,6,2>: Cost 3 vext2 <7,4,5,6>, <6,2,7,3>
- 2583808514U, // <5,6,6,3>: Cost 3 vext1 <5,5,6,6>, <3,4,5,6>
- 2583809334U, // <5,6,6,4>: Cost 3 vext1 <5,5,6,6>, RHS
- 2583810062U, // <5,6,6,5>: Cost 3 vext1 <5,5,6,6>, <5,5,6,6>
- 2638238520U, // <5,6,6,6>: Cost 3 vext2 <3,4,5,6>, <6,6,6,6>
- 2973781302U, // <5,6,6,7>: Cost 3 vzipr <3,4,5,6>, RHS
- 2973781303U, // <5,6,6,u>: Cost 3 vzipr <3,4,5,6>, RHS
- 430358630U, // <5,6,7,0>: Cost 1 vext1 RHS, LHS
- 1504101110U, // <5,6,7,1>: Cost 2 vext1 RHS, <1,0,3,2>
- 1504101992U, // <5,6,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 1504102550U, // <5,6,7,3>: Cost 2 vext1 RHS, <3,0,1,2>
- 430361910U, // <5,6,7,4>: Cost 1 vext1 RHS, RHS
- 1504104390U, // <5,6,7,5>: Cost 2 vext1 RHS, <5,4,7,6>
- 1504105272U, // <5,6,7,6>: Cost 2 vext1 RHS, <6,6,6,6>
- 1504106092U, // <5,6,7,7>: Cost 2 vext1 RHS, <7,7,7,7>
- 430364462U, // <5,6,7,u>: Cost 1 vext1 RHS, LHS
- 430366822U, // <5,6,u,0>: Cost 1 vext1 RHS, LHS
- 1564497710U, // <5,6,u,1>: Cost 2 vext2 <3,4,5,6>, LHS
- 1504110184U, // <5,6,u,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 1504110742U, // <5,6,u,3>: Cost 2 vext1 RHS, <3,0,1,2>
- 430370103U, // <5,6,u,4>: Cost 1 vext1 RHS, RHS
- 1564498074U, // <5,6,u,5>: Cost 2 vext2 <3,4,5,6>, RHS
- 1504113146U, // <5,6,u,6>: Cost 2 vext1 RHS, <6,2,7,3>
- 1504113658U, // <5,6,u,7>: Cost 2 vext1 RHS, <7,0,1,2>
- 430372654U, // <5,6,u,u>: Cost 1 vext1 RHS, LHS
- 2625634304U, // <5,7,0,0>: Cost 3 vext2 <1,3,5,7>, <0,0,0,0>
- 1551892582U, // <5,7,0,1>: Cost 2 vext2 <1,3,5,7>, LHS
- 2625634468U, // <5,7,0,2>: Cost 3 vext2 <1,3,5,7>, <0,2,0,2>
- 2571889247U, // <5,7,0,3>: Cost 3 vext1 <3,5,7,0>, <3,5,7,0>
- 2625634642U, // <5,7,0,4>: Cost 3 vext2 <1,3,5,7>, <0,4,1,5>
- 2595778728U, // <5,7,0,5>: Cost 3 vext1 <7,5,7,0>, <5,7,5,7>
- 3699376639U, // <5,7,0,6>: Cost 4 vext2 <1,3,5,7>, <0,6,2,7>
- 2260235715U, // <5,7,0,7>: Cost 3 vrev <7,5,7,0>
- 1551893149U, // <5,7,0,u>: Cost 2 vext2 <1,3,5,7>, LHS
- 2625635062U, // <5,7,1,0>: Cost 3 vext2 <1,3,5,7>, <1,0,3,2>
- 2624308020U, // <5,7,1,1>: Cost 3 vext2 <1,1,5,7>, <1,1,1,1>
- 2625635222U, // <5,7,1,2>: Cost 3 vext2 <1,3,5,7>, <1,2,3,0>
- 1551893504U, // <5,7,1,3>: Cost 2 vext2 <1,3,5,7>, <1,3,5,7>
- 2571898166U, // <5,7,1,4>: Cost 3 vext1 <3,5,7,1>, RHS
- 2625635472U, // <5,7,1,5>: Cost 3 vext2 <1,3,5,7>, <1,5,3,7>
- 2627626227U, // <5,7,1,6>: Cost 3 vext2 <1,6,5,7>, <1,6,5,7>
- 3702031684U, // <5,7,1,7>: Cost 4 vext2 <1,7,5,7>, <1,7,5,7>
- 1555211669U, // <5,7,1,u>: Cost 2 vext2 <1,u,5,7>, <1,u,5,7>
- 2629617126U, // <5,7,2,0>: Cost 3 vext2 <2,0,5,7>, <2,0,5,7>
- 3699377670U, // <5,7,2,1>: Cost 4 vext2 <1,3,5,7>, <2,1,0,3>
- 2625635944U, // <5,7,2,2>: Cost 3 vext2 <1,3,5,7>, <2,2,2,2>
- 2625636006U, // <5,7,2,3>: Cost 3 vext2 <1,3,5,7>, <2,3,0,1>
- 2632271658U, // <5,7,2,4>: Cost 3 vext2 <2,4,5,7>, <2,4,5,7>
- 2625636201U, // <5,7,2,5>: Cost 3 vext2 <1,3,5,7>, <2,5,3,7>
- 2625636282U, // <5,7,2,6>: Cost 3 vext2 <1,3,5,7>, <2,6,3,7>
- 3708004381U, // <5,7,2,7>: Cost 4 vext2 <2,7,5,7>, <2,7,5,7>
- 2625636411U, // <5,7,2,u>: Cost 3 vext2 <1,3,5,7>, <2,u,0,1>
- 2625636502U, // <5,7,3,0>: Cost 3 vext2 <1,3,5,7>, <3,0,1,2>
- 2625636604U, // <5,7,3,1>: Cost 3 vext2 <1,3,5,7>, <3,1,3,5>
- 3699378478U, // <5,7,3,2>: Cost 4 vext2 <1,3,5,7>, <3,2,0,1>
- 2625636764U, // <5,7,3,3>: Cost 3 vext2 <1,3,5,7>, <3,3,3,3>
- 2625636866U, // <5,7,3,4>: Cost 3 vext2 <1,3,5,7>, <3,4,5,6>
- 2625636959U, // <5,7,3,5>: Cost 3 vext2 <1,3,5,7>, <3,5,7,0>
- 3699378808U, // <5,7,3,6>: Cost 4 vext2 <1,3,5,7>, <3,6,0,7>
- 2640235254U, // <5,7,3,7>: Cost 3 vext2 <3,7,5,7>, <3,7,5,7>
- 2625637150U, // <5,7,3,u>: Cost 3 vext2 <1,3,5,7>, <3,u,1,2>
- 2571919462U, // <5,7,4,0>: Cost 3 vext1 <3,5,7,4>, LHS
- 2571920384U, // <5,7,4,1>: Cost 3 vext1 <3,5,7,4>, <1,3,5,7>
- 3699379260U, // <5,7,4,2>: Cost 4 vext2 <1,3,5,7>, <4,2,6,0>
- 2571922019U, // <5,7,4,3>: Cost 3 vext1 <3,5,7,4>, <3,5,7,4>
- 2571922742U, // <5,7,4,4>: Cost 3 vext1 <3,5,7,4>, RHS
- 1551895862U, // <5,7,4,5>: Cost 2 vext2 <1,3,5,7>, RHS
- 2846277980U, // <5,7,4,6>: Cost 3 vuzpr RHS, <0,4,2,6>
- 2646207951U, // <5,7,4,7>: Cost 3 vext2 <4,7,5,7>, <4,7,5,7>
- 1551896105U, // <5,7,4,u>: Cost 2 vext2 <1,3,5,7>, RHS
- 2583871590U, // <5,7,5,0>: Cost 3 vext1 <5,5,7,5>, LHS
- 2652180176U, // <5,7,5,1>: Cost 3 vext2 <5,7,5,7>, <5,1,7,3>
- 2625638177U, // <5,7,5,2>: Cost 3 vext2 <1,3,5,7>, <5,2,7,3>
- 2625638262U, // <5,7,5,3>: Cost 3 vext2 <1,3,5,7>, <5,3,7,7>
- 2583874870U, // <5,7,5,4>: Cost 3 vext1 <5,5,7,5>, RHS
- 2846281732U, // <5,7,5,5>: Cost 3 vuzpr RHS, <5,5,5,5>
- 2651517015U, // <5,7,5,6>: Cost 3 vext2 <5,6,5,7>, <5,6,5,7>
- 1772539190U, // <5,7,5,7>: Cost 2 vuzpr RHS, RHS
- 1772539191U, // <5,7,5,u>: Cost 2 vuzpr RHS, RHS
- 2846281826U, // <5,7,6,0>: Cost 3 vuzpr RHS, <5,6,7,0>
- 3699380615U, // <5,7,6,1>: Cost 4 vext2 <1,3,5,7>, <6,1,3,5>
- 2846281108U, // <5,7,6,2>: Cost 3 vuzpr RHS, <4,6,u,2>
- 2589854210U, // <5,7,6,3>: Cost 3 vext1 <6,5,7,6>, <3,4,5,6>
- 2846281830U, // <5,7,6,4>: Cost 3 vuzpr RHS, <5,6,7,4>
- 2725467658U, // <5,7,6,5>: Cost 3 vext3 <6,7,u,5>, <7,6,5,u>
- 2846281076U, // <5,7,6,6>: Cost 3 vuzpr RHS, <4,6,4,6>
- 2846279610U, // <5,7,6,7>: Cost 3 vuzpr RHS, <2,6,3,7>
- 2846279611U, // <5,7,6,u>: Cost 3 vuzpr RHS, <2,6,3,u>
- 1510146150U, // <5,7,7,0>: Cost 2 vext1 <5,5,7,7>, LHS
- 2846282574U, // <5,7,7,1>: Cost 3 vuzpr RHS, <6,7,0,1>
- 2583889512U, // <5,7,7,2>: Cost 3 vext1 <5,5,7,7>, <2,2,2,2>
- 2846281919U, // <5,7,7,3>: Cost 3 vuzpr RHS, <5,7,u,3>
- 1510149430U, // <5,7,7,4>: Cost 2 vext1 <5,5,7,7>, RHS
- 1510150168U, // <5,7,7,5>: Cost 2 vext1 <5,5,7,7>, <5,5,7,7>
- 2583892474U, // <5,7,7,6>: Cost 3 vext1 <5,5,7,7>, <6,2,7,3>
- 2625640044U, // <5,7,7,7>: Cost 3 vext2 <1,3,5,7>, <7,7,7,7>
- 1510151982U, // <5,7,7,u>: Cost 2 vext1 <5,5,7,7>, LHS
- 1510154342U, // <5,7,u,0>: Cost 2 vext1 <5,5,7,u>, LHS
- 1551898414U, // <5,7,u,1>: Cost 2 vext2 <1,3,5,7>, LHS
- 2625640325U, // <5,7,u,2>: Cost 3 vext2 <1,3,5,7>, <u,2,3,0>
- 1772536477U, // <5,7,u,3>: Cost 2 vuzpr RHS, LHS
- 1510157622U, // <5,7,u,4>: Cost 2 vext1 <5,5,7,u>, RHS
- 1551898778U, // <5,7,u,5>: Cost 2 vext2 <1,3,5,7>, RHS
- 2625640656U, // <5,7,u,6>: Cost 3 vext2 <1,3,5,7>, <u,6,3,7>
- 1772539433U, // <5,7,u,7>: Cost 2 vuzpr RHS, RHS
- 1551898981U, // <5,7,u,u>: Cost 2 vext2 <1,3,5,7>, LHS
- 2625642496U, // <5,u,0,0>: Cost 3 vext2 <1,3,5,u>, <0,0,0,0>
- 1551900774U, // <5,u,0,1>: Cost 2 vext2 <1,3,5,u>, LHS
- 2625642660U, // <5,u,0,2>: Cost 3 vext2 <1,3,5,u>, <0,2,0,2>
- 2698630885U, // <5,u,0,3>: Cost 3 vext3 <2,3,4,5>, <u,0,3,2>
- 2687129325U, // <5,u,0,4>: Cost 3 vext3 <0,4,1,5>, <u,0,4,1>
- 2689783542U, // <5,u,0,5>: Cost 3 vext3 <0,u,1,5>, <u,0,5,1>
- 2266134675U, // <5,u,0,6>: Cost 3 vrev <u,5,6,0>
- 2595853772U, // <5,u,0,7>: Cost 3 vext1 <7,5,u,0>, <7,5,u,0>
- 1551901341U, // <5,u,0,u>: Cost 2 vext2 <1,3,5,u>, LHS
- 2625643254U, // <5,u,1,0>: Cost 3 vext2 <1,3,5,u>, <1,0,3,2>
- 2625643316U, // <5,u,1,1>: Cost 3 vext2 <1,3,5,u>, <1,1,1,1>
- 1613387566U, // <5,u,1,2>: Cost 2 vext3 <0,4,1,5>, LHS
- 1551901697U, // <5,u,1,3>: Cost 2 vext2 <1,3,5,u>, <1,3,5,u>
- 2626307154U, // <5,u,1,4>: Cost 3 vext2 <1,4,5,u>, <1,4,5,u>
- 2689783622U, // <5,u,1,5>: Cost 3 vext3 <0,u,1,5>, <u,1,5,0>
- 2627634420U, // <5,u,1,6>: Cost 3 vext2 <1,6,5,u>, <1,6,5,u>
- 2982366536U, // <5,u,1,7>: Cost 3 vzipr <4,u,5,1>, RHS
- 1613387620U, // <5,u,1,u>: Cost 2 vext3 <0,4,1,5>, LHS
- 2846286742U, // <5,u,2,0>: Cost 3 vuzpr RHS, <1,2,3,0>
- 2685796528U, // <5,u,2,1>: Cost 3 vext3 <0,2,1,5>, <0,2,1,5>
- 2625644136U, // <5,u,2,2>: Cost 3 vext2 <1,3,5,u>, <2,2,2,2>
- 2687129480U, // <5,u,2,3>: Cost 3 vext3 <0,4,1,5>, <u,2,3,3>
- 2632279851U, // <5,u,2,4>: Cost 3 vext2 <2,4,5,u>, <2,4,5,u>
- 2625644394U, // <5,u,2,5>: Cost 3 vext2 <1,3,5,u>, <2,5,3,u>
- 2625644474U, // <5,u,2,6>: Cost 3 vext2 <1,3,5,u>, <2,6,3,7>
- 2713966508U, // <5,u,2,7>: Cost 3 vext3 <4,u,5,5>, <u,2,7,3>
- 2625644603U, // <5,u,2,u>: Cost 3 vext2 <1,3,5,u>, <2,u,0,1>
- 2687129532U, // <5,u,3,0>: Cost 3 vext3 <0,4,1,5>, <u,3,0,1>
- 2636261649U, // <5,u,3,1>: Cost 3 vext2 <3,1,5,u>, <3,1,5,u>
- 2636925282U, // <5,u,3,2>: Cost 3 vext2 <3,2,5,u>, <3,2,5,u>
- 2625644956U, // <5,u,3,3>: Cost 3 vext2 <1,3,5,u>, <3,3,3,3>
- 1564510724U, // <5,u,3,4>: Cost 2 vext2 <3,4,5,u>, <3,4,5,u>
- 2625645160U, // <5,u,3,5>: Cost 3 vext2 <1,3,5,u>, <3,5,u,0>
- 2734610422U, // <5,u,3,6>: Cost 3 vext3 <u,3,6,5>, <u,3,6,5>
- 2640243447U, // <5,u,3,7>: Cost 3 vext2 <3,7,5,u>, <3,7,5,u>
- 1567165256U, // <5,u,3,u>: Cost 2 vext2 <3,u,5,u>, <3,u,5,u>
- 1567828889U, // <5,u,4,0>: Cost 2 vext2 <4,0,5,u>, <4,0,5,u>
- 1661163546U, // <5,u,4,1>: Cost 2 vext3 <u,4,1,5>, <u,4,1,5>
- 2734463012U, // <5,u,4,2>: Cost 3 vext3 <u,3,4,5>, <u,4,2,6>
- 2698631212U, // <5,u,4,3>: Cost 3 vext3 <2,3,4,5>, <u,4,3,5>
- 1570458842U, // <5,u,4,4>: Cost 2 vext2 <4,4,5,5>, <4,4,5,5>
- 1551904054U, // <5,u,4,5>: Cost 2 vext2 <1,3,5,u>, RHS
- 2846286172U, // <5,u,4,6>: Cost 3 vuzpr RHS, <0,4,2,6>
- 2646216144U, // <5,u,4,7>: Cost 3 vext2 <4,7,5,u>, <4,7,5,u>
- 1551904297U, // <5,u,4,u>: Cost 2 vext2 <1,3,5,u>, RHS
- 1509982310U, // <5,u,5,0>: Cost 2 vext1 <5,5,5,5>, LHS
- 2560058555U, // <5,u,5,1>: Cost 3 vext1 <1,5,u,5>, <1,5,u,5>
- 2698926194U, // <5,u,5,2>: Cost 3 vext3 <2,3,u,5>, <u,5,2,3>
- 2698631295U, // <5,u,5,3>: Cost 3 vext3 <2,3,4,5>, <u,5,3,7>
- 1509985590U, // <5,u,5,4>: Cost 2 vext1 <5,5,5,5>, RHS
- 229035318U, // <5,u,5,5>: Cost 1 vdup1 RHS
- 1613387930U, // <5,u,5,6>: Cost 2 vext3 <0,4,1,5>, RHS
- 1772547382U, // <5,u,5,7>: Cost 2 vuzpr RHS, RHS
- 229035318U, // <5,u,5,u>: Cost 1 vdup1 RHS
- 2566037606U, // <5,u,6,0>: Cost 3 vext1 <2,5,u,6>, LHS
- 2920044334U, // <5,u,6,1>: Cost 3 vzipl <5,6,7,0>, LHS
- 2566039445U, // <5,u,6,2>: Cost 3 vext1 <2,5,u,6>, <2,5,u,6>
- 2687129808U, // <5,u,6,3>: Cost 3 vext3 <0,4,1,5>, <u,6,3,7>
- 2566040886U, // <5,u,6,4>: Cost 3 vext1 <2,5,u,6>, RHS
- 2920044698U, // <5,u,6,5>: Cost 3 vzipl <5,6,7,0>, RHS
- 2846289268U, // <5,u,6,6>: Cost 3 vuzpr RHS, <4,6,4,6>
- 2973781320U, // <5,u,6,7>: Cost 3 vzipr <3,4,5,6>, RHS
- 2687129853U, // <5,u,6,u>: Cost 3 vext3 <0,4,1,5>, <u,6,u,7>
- 430506086U, // <5,u,7,0>: Cost 1 vext1 RHS, LHS
- 1486333117U, // <5,u,7,1>: Cost 2 vext1 <1,5,u,7>, <1,5,u,7>
- 1504249448U, // <5,u,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 2040971933U, // <5,u,7,3>: Cost 2 vtrnr RHS, LHS
- 430509384U, // <5,u,7,4>: Cost 1 vext1 RHS, RHS
- 1504251600U, // <5,u,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
- 118708378U, // <5,u,7,6>: Cost 1 vrev RHS
- 2040974889U, // <5,u,7,7>: Cost 2 vtrnr RHS, RHS
- 430511918U, // <5,u,7,u>: Cost 1 vext1 RHS, LHS
- 430514278U, // <5,u,u,0>: Cost 1 vext1 RHS, LHS
- 1551906606U, // <5,u,u,1>: Cost 2 vext2 <1,3,5,u>, LHS
- 1613388133U, // <5,u,u,2>: Cost 2 vext3 <0,4,1,5>, LHS
- 1772544669U, // <5,u,u,3>: Cost 2 vuzpr RHS, LHS
- 430517577U, // <5,u,u,4>: Cost 1 vext1 RHS, RHS
- 229035318U, // <5,u,u,5>: Cost 1 vdup1 RHS
- 118716571U, // <5,u,u,6>: Cost 1 vrev RHS
- 1772547625U, // <5,u,u,7>: Cost 2 vuzpr RHS, RHS
- 430520110U, // <5,u,u,u>: Cost 1 vext1 RHS, LHS
- 2686025728U, // <6,0,0,0>: Cost 3 vext3 <0,2,4,6>, <0,0,0,0>
- 2686025738U, // <6,0,0,1>: Cost 3 vext3 <0,2,4,6>, <0,0,1,1>
- 2686025748U, // <6,0,0,2>: Cost 3 vext3 <0,2,4,6>, <0,0,2,2>
- 3779084320U, // <6,0,0,3>: Cost 4 vext3 <3,4,5,6>, <0,0,3,5>
- 2642903388U, // <6,0,0,4>: Cost 3 vext2 <4,2,6,0>, <0,4,2,6>
- 3657723939U, // <6,0,0,5>: Cost 4 vext1 <5,6,0,0>, <5,6,0,0>
- 3926676514U, // <6,0,0,6>: Cost 4 vuzpr <5,6,7,0>, <7,0,5,6>
- 3926675786U, // <6,0,0,7>: Cost 4 vuzpr <5,6,7,0>, <6,0,5,7>
- 2686025802U, // <6,0,0,u>: Cost 3 vext3 <0,2,4,6>, <0,0,u,2>
- 2566070374U, // <6,0,1,0>: Cost 3 vext1 <2,6,0,1>, LHS
- 3759767642U, // <6,0,1,1>: Cost 4 vext3 <0,2,4,6>, <0,1,1,0>
- 1612284006U, // <6,0,1,2>: Cost 2 vext3 <0,2,4,6>, LHS
- 2583988738U, // <6,0,1,3>: Cost 3 vext1 <5,6,0,1>, <3,4,5,6>
- 2566073654U, // <6,0,1,4>: Cost 3 vext1 <2,6,0,1>, RHS
- 2583990308U, // <6,0,1,5>: Cost 3 vext1 <5,6,0,1>, <5,6,0,1>
- 2589963005U, // <6,0,1,6>: Cost 3 vext1 <6,6,0,1>, <6,6,0,1>
- 2595935702U, // <6,0,1,7>: Cost 3 vext1 <7,6,0,1>, <7,6,0,1>
- 1612284060U, // <6,0,1,u>: Cost 2 vext3 <0,2,4,6>, LHS
- 2686025892U, // <6,0,2,0>: Cost 3 vext3 <0,2,4,6>, <0,2,0,2>
- 2685804721U, // <6,0,2,1>: Cost 3 vext3 <0,2,1,6>, <0,2,1,6>
- 3759620282U, // <6,0,2,2>: Cost 4 vext3 <0,2,2,6>, <0,2,2,6>
- 2705342658U, // <6,0,2,3>: Cost 3 vext3 <3,4,5,6>, <0,2,3,5>
- 1612284108U, // <6,0,2,4>: Cost 2 vext3 <0,2,4,6>, <0,2,4,6>
- 3706029956U, // <6,0,2,5>: Cost 4 vext2 <2,4,6,0>, <2,5,6,7>
- 2686173406U, // <6,0,2,6>: Cost 3 vext3 <0,2,6,6>, <0,2,6,6>
- 3651769338U, // <6,0,2,7>: Cost 4 vext1 <4,6,0,2>, <7,0,1,2>
- 1612579056U, // <6,0,2,u>: Cost 2 vext3 <0,2,u,6>, <0,2,u,6>
- 3706030230U, // <6,0,3,0>: Cost 4 vext2 <2,4,6,0>, <3,0,1,2>
- 2705342720U, // <6,0,3,1>: Cost 3 vext3 <3,4,5,6>, <0,3,1,4>
- 2705342730U, // <6,0,3,2>: Cost 3 vext3 <3,4,5,6>, <0,3,2,5>
- 3706030492U, // <6,0,3,3>: Cost 4 vext2 <2,4,6,0>, <3,3,3,3>
- 2644896258U, // <6,0,3,4>: Cost 3 vext2 <4,5,6,0>, <3,4,5,6>
- 3718638154U, // <6,0,3,5>: Cost 4 vext2 <4,5,6,0>, <3,5,4,6>
- 3729918619U, // <6,0,3,6>: Cost 4 vext2 <6,4,6,0>, <3,6,4,6>
- 3926672384U, // <6,0,3,7>: Cost 4 vuzpr <5,6,7,0>, <1,3,5,7>
- 2705342784U, // <6,0,3,u>: Cost 3 vext3 <3,4,5,6>, <0,3,u,5>
- 2687058250U, // <6,0,4,0>: Cost 3 vext3 <0,4,0,6>, <0,4,0,6>
- 2686026066U, // <6,0,4,1>: Cost 3 vext3 <0,2,4,6>, <0,4,1,5>
- 1613463900U, // <6,0,4,2>: Cost 2 vext3 <0,4,2,6>, <0,4,2,6>
- 3761021285U, // <6,0,4,3>: Cost 4 vext3 <0,4,3,6>, <0,4,3,6>
- 2687353198U, // <6,0,4,4>: Cost 3 vext3 <0,4,4,6>, <0,4,4,6>
- 2632289590U, // <6,0,4,5>: Cost 3 vext2 <2,4,6,0>, RHS
- 2645560704U, // <6,0,4,6>: Cost 3 vext2 <4,6,6,0>, <4,6,6,0>
- 2646224337U, // <6,0,4,7>: Cost 3 vext2 <4,7,6,0>, <4,7,6,0>
- 1613906322U, // <6,0,4,u>: Cost 2 vext3 <0,4,u,6>, <0,4,u,6>
- 3651788902U, // <6,0,5,0>: Cost 4 vext1 <4,6,0,5>, LHS
- 2687795620U, // <6,0,5,1>: Cost 3 vext3 <0,5,1,6>, <0,5,1,6>
- 3761611181U, // <6,0,5,2>: Cost 4 vext3 <0,5,2,6>, <0,5,2,6>
- 3723284326U, // <6,0,5,3>: Cost 4 vext2 <5,3,6,0>, <5,3,6,0>
- 2646224838U, // <6,0,5,4>: Cost 3 vext2 <4,7,6,0>, <5,4,7,6>
- 3718639630U, // <6,0,5,5>: Cost 4 vext2 <4,5,6,0>, <5,5,6,6>
- 2652196962U, // <6,0,5,6>: Cost 3 vext2 <5,7,6,0>, <5,6,7,0>
- 2852932918U, // <6,0,5,7>: Cost 3 vuzpr <5,6,7,0>, RHS
- 2852932919U, // <6,0,5,u>: Cost 3 vuzpr <5,6,7,0>, RHS
- 2852933730U, // <6,0,6,0>: Cost 3 vuzpr <5,6,7,0>, <5,6,7,0>
- 2925985894U, // <6,0,6,1>: Cost 3 vzipl <6,6,6,6>, LHS
- 3060203622U, // <6,0,6,2>: Cost 3 vtrnl <6,6,6,6>, LHS
- 3718640178U, // <6,0,6,3>: Cost 4 vext2 <4,5,6,0>, <6,3,4,5>
- 2656178832U, // <6,0,6,4>: Cost 3 vext2 <6,4,6,0>, <6,4,6,0>
- 3725939378U, // <6,0,6,5>: Cost 4 vext2 <5,7,6,0>, <6,5,0,7>
- 2657506098U, // <6,0,6,6>: Cost 3 vext2 <6,6,6,0>, <6,6,6,0>
- 2619020110U, // <6,0,6,7>: Cost 3 vext2 <0,2,6,0>, <6,7,0,1>
- 2925986461U, // <6,0,6,u>: Cost 3 vzipl <6,6,6,6>, LHS
- 2572091494U, // <6,0,7,0>: Cost 3 vext1 <3,6,0,7>, LHS
- 2572092310U, // <6,0,7,1>: Cost 3 vext1 <3,6,0,7>, <1,2,3,0>
- 2980495524U, // <6,0,7,2>: Cost 3 vzipr RHS, <0,2,0,2>
- 2572094072U, // <6,0,7,3>: Cost 3 vext1 <3,6,0,7>, <3,6,0,7>
- 2572094774U, // <6,0,7,4>: Cost 3 vext1 <3,6,0,7>, RHS
- 4054238242U, // <6,0,7,5>: Cost 4 vzipr RHS, <1,4,0,5>
- 3645837653U, // <6,0,7,6>: Cost 4 vext1 <3,6,0,7>, <6,0,7,0>
- 4054239054U, // <6,0,7,7>: Cost 4 vzipr RHS, <2,5,0,7>
- 2572097326U, // <6,0,7,u>: Cost 3 vext1 <3,6,0,7>, LHS
- 2686026378U, // <6,0,u,0>: Cost 3 vext3 <0,2,4,6>, <0,u,0,2>
- 2686026386U, // <6,0,u,1>: Cost 3 vext3 <0,2,4,6>, <0,u,1,1>
- 1612284573U, // <6,0,u,2>: Cost 2 vext3 <0,2,4,6>, LHS
- 2705343144U, // <6,0,u,3>: Cost 3 vext3 <3,4,5,6>, <0,u,3,5>
- 1616265906U, // <6,0,u,4>: Cost 2 vext3 <0,u,4,6>, <0,u,4,6>
- 2632292506U, // <6,0,u,5>: Cost 3 vext2 <2,4,6,0>, RHS
- 2590020356U, // <6,0,u,6>: Cost 3 vext1 <6,6,0,u>, <6,6,0,u>
- 2852933161U, // <6,0,u,7>: Cost 3 vuzpr <5,6,7,0>, RHS
- 1612284627U, // <6,0,u,u>: Cost 2 vext3 <0,2,4,6>, LHS
- 2595995750U, // <6,1,0,0>: Cost 3 vext1 <7,6,1,0>, LHS
- 2646229094U, // <6,1,0,1>: Cost 3 vext2 <4,7,6,1>, LHS
- 3694092492U, // <6,1,0,2>: Cost 4 vext2 <0,4,6,1>, <0,2,4,6>
- 2686026486U, // <6,1,0,3>: Cost 3 vext3 <0,2,4,6>, <1,0,3,2>
- 2595999030U, // <6,1,0,4>: Cost 3 vext1 <7,6,1,0>, RHS
- 3767730952U, // <6,1,0,5>: Cost 4 vext3 <1,5,4,6>, <1,0,5,2>
- 2596000590U, // <6,1,0,6>: Cost 3 vext1 <7,6,1,0>, <6,7,0,1>
- 2596001246U, // <6,1,0,7>: Cost 3 vext1 <7,6,1,0>, <7,6,1,0>
- 2686026531U, // <6,1,0,u>: Cost 3 vext3 <0,2,4,6>, <1,0,u,2>
- 3763602219U, // <6,1,1,0>: Cost 4 vext3 <0,u,2,6>, <1,1,0,1>
- 2686026548U, // <6,1,1,1>: Cost 3 vext3 <0,2,4,6>, <1,1,1,1>
- 3764929346U, // <6,1,1,2>: Cost 4 vext3 <1,1,2,6>, <1,1,2,6>
- 2686026568U, // <6,1,1,3>: Cost 3 vext3 <0,2,4,6>, <1,1,3,3>
- 2691334996U, // <6,1,1,4>: Cost 3 vext3 <1,1,4,6>, <1,1,4,6>
- 3760874332U, // <6,1,1,5>: Cost 4 vext3 <0,4,1,6>, <1,1,5,5>
- 3765224294U, // <6,1,1,6>: Cost 4 vext3 <1,1,6,6>, <1,1,6,6>
- 3669751263U, // <6,1,1,7>: Cost 4 vext1 <7,6,1,1>, <7,6,1,1>
- 2686026613U, // <6,1,1,u>: Cost 3 vext3 <0,2,4,6>, <1,1,u,3>
- 2554208358U, // <6,1,2,0>: Cost 3 vext1 <0,6,1,2>, LHS
- 3763602311U, // <6,1,2,1>: Cost 4 vext3 <0,u,2,6>, <1,2,1,3>
- 3639895971U, // <6,1,2,2>: Cost 4 vext1 <2,6,1,2>, <2,6,1,2>
- 2686026646U, // <6,1,2,3>: Cost 3 vext3 <0,2,4,6>, <1,2,3,0>
- 2554211638U, // <6,1,2,4>: Cost 3 vext1 <0,6,1,2>, RHS
- 3760874411U, // <6,1,2,5>: Cost 4 vext3 <0,4,1,6>, <1,2,5,3>
- 2554212858U, // <6,1,2,6>: Cost 3 vext1 <0,6,1,2>, <6,2,7,3>
- 3802973114U, // <6,1,2,7>: Cost 4 vext3 <7,4,5,6>, <1,2,7,0>
- 2686026691U, // <6,1,2,u>: Cost 3 vext3 <0,2,4,6>, <1,2,u,0>
- 2566160486U, // <6,1,3,0>: Cost 3 vext1 <2,6,1,3>, LHS
- 2686026712U, // <6,1,3,1>: Cost 3 vext3 <0,2,4,6>, <1,3,1,3>
- 2686026724U, // <6,1,3,2>: Cost 3 vext3 <0,2,4,6>, <1,3,2,6>
- 3759768552U, // <6,1,3,3>: Cost 4 vext3 <0,2,4,6>, <1,3,3,1>
- 2692662262U, // <6,1,3,4>: Cost 3 vext3 <1,3,4,6>, <1,3,4,6>
- 2686026752U, // <6,1,3,5>: Cost 3 vext3 <0,2,4,6>, <1,3,5,7>
- 2590053128U, // <6,1,3,6>: Cost 3 vext1 <6,6,1,3>, <6,6,1,3>
- 3663795194U, // <6,1,3,7>: Cost 4 vext1 <6,6,1,3>, <7,0,1,2>
- 2686026775U, // <6,1,3,u>: Cost 3 vext3 <0,2,4,6>, <1,3,u,3>
- 2641587099U, // <6,1,4,0>: Cost 3 vext2 <4,0,6,1>, <4,0,6,1>
- 2693104684U, // <6,1,4,1>: Cost 3 vext3 <1,4,1,6>, <1,4,1,6>
- 3639912357U, // <6,1,4,2>: Cost 4 vext1 <2,6,1,4>, <2,6,1,4>
- 2687206462U, // <6,1,4,3>: Cost 3 vext3 <0,4,2,6>, <1,4,3,6>
- 3633941814U, // <6,1,4,4>: Cost 4 vext1 <1,6,1,4>, RHS
- 2693399632U, // <6,1,4,5>: Cost 3 vext3 <1,4,5,6>, <1,4,5,6>
- 3765077075U, // <6,1,4,6>: Cost 4 vext3 <1,1,4,6>, <1,4,6,0>
- 2646232530U, // <6,1,4,7>: Cost 3 vext2 <4,7,6,1>, <4,7,6,1>
- 2687206507U, // <6,1,4,u>: Cost 3 vext3 <0,4,2,6>, <1,4,u,6>
- 2647559796U, // <6,1,5,0>: Cost 3 vext2 <5,0,6,1>, <5,0,6,1>
- 3765077118U, // <6,1,5,1>: Cost 4 vext3 <1,1,4,6>, <1,5,1,7>
- 3767583878U, // <6,1,5,2>: Cost 4 vext3 <1,5,2,6>, <1,5,2,6>
- 2686026896U, // <6,1,5,3>: Cost 3 vext3 <0,2,4,6>, <1,5,3,7>
- 2693989528U, // <6,1,5,4>: Cost 3 vext3 <1,5,4,6>, <1,5,4,6>
- 3767805089U, // <6,1,5,5>: Cost 4 vext3 <1,5,5,6>, <1,5,5,6>
- 2652868706U, // <6,1,5,6>: Cost 3 vext2 <5,u,6,1>, <5,6,7,0>
- 3908250934U, // <6,1,5,7>: Cost 4 vuzpr <2,6,0,1>, RHS
- 2686026941U, // <6,1,5,u>: Cost 3 vext3 <0,2,4,6>, <1,5,u,7>
- 2554241126U, // <6,1,6,0>: Cost 3 vext1 <0,6,1,6>, LHS
- 3763602639U, // <6,1,6,1>: Cost 4 vext3 <0,u,2,6>, <1,6,1,7>
- 3759547607U, // <6,1,6,2>: Cost 4 vext3 <0,2,1,6>, <1,6,2,6>
- 3115221094U, // <6,1,6,3>: Cost 3 vtrnr <4,6,4,6>, LHS
- 2554244406U, // <6,1,6,4>: Cost 3 vext1 <0,6,1,6>, RHS
- 3760874739U, // <6,1,6,5>: Cost 4 vext3 <0,4,1,6>, <1,6,5,7>
- 2554245944U, // <6,1,6,6>: Cost 3 vext1 <0,6,1,6>, <6,6,6,6>
- 3719975758U, // <6,1,6,7>: Cost 4 vext2 <4,7,6,1>, <6,7,0,1>
- 3115221099U, // <6,1,6,u>: Cost 3 vtrnr <4,6,4,6>, LHS
- 2560221286U, // <6,1,7,0>: Cost 3 vext1 <1,6,1,7>, LHS
- 2560222415U, // <6,1,7,1>: Cost 3 vext1 <1,6,1,7>, <1,6,1,7>
- 2980497558U, // <6,1,7,2>: Cost 3 vzipr RHS, <3,0,1,2>
- 3103211622U, // <6,1,7,3>: Cost 3 vtrnr <2,6,3,7>, LHS
- 2560224566U, // <6,1,7,4>: Cost 3 vext1 <1,6,1,7>, RHS
- 2980495698U, // <6,1,7,5>: Cost 3 vzipr RHS, <0,4,1,5>
- 3633967526U, // <6,1,7,6>: Cost 4 vext1 <1,6,1,7>, <6,1,7,0>
- 4054237686U, // <6,1,7,7>: Cost 4 vzipr RHS, <0,6,1,7>
- 2560227118U, // <6,1,7,u>: Cost 3 vext1 <1,6,1,7>, LHS
- 2560229478U, // <6,1,u,0>: Cost 3 vext1 <1,6,1,u>, LHS
- 2686027117U, // <6,1,u,1>: Cost 3 vext3 <0,2,4,6>, <1,u,1,3>
- 2686027129U, // <6,1,u,2>: Cost 3 vext3 <0,2,4,6>, <1,u,2,6>
- 2686027132U, // <6,1,u,3>: Cost 3 vext3 <0,2,4,6>, <1,u,3,0>
- 2687206795U, // <6,1,u,4>: Cost 3 vext3 <0,4,2,6>, <1,u,4,6>
- 2686027157U, // <6,1,u,5>: Cost 3 vext3 <0,2,4,6>, <1,u,5,7>
- 2590094093U, // <6,1,u,6>: Cost 3 vext1 <6,6,1,u>, <6,6,1,u>
- 2596066790U, // <6,1,u,7>: Cost 3 vext1 <7,6,1,u>, <7,6,1,u>
- 2686027177U, // <6,1,u,u>: Cost 3 vext3 <0,2,4,6>, <1,u,u,0>
- 2646900736U, // <6,2,0,0>: Cost 3 vext2 <4,u,6,2>, <0,0,0,0>
- 1573159014U, // <6,2,0,1>: Cost 2 vext2 <4,u,6,2>, LHS
- 2646900900U, // <6,2,0,2>: Cost 3 vext2 <4,u,6,2>, <0,2,0,2>
- 3759769037U, // <6,2,0,3>: Cost 4 vext3 <0,2,4,6>, <2,0,3,0>
- 2641592668U, // <6,2,0,4>: Cost 3 vext2 <4,0,6,2>, <0,4,2,6>
- 3779085794U, // <6,2,0,5>: Cost 4 vext3 <3,4,5,6>, <2,0,5,3>
- 2686027244U, // <6,2,0,6>: Cost 3 vext3 <0,2,4,6>, <2,0,6,4>
- 3669816807U, // <6,2,0,7>: Cost 4 vext1 <7,6,2,0>, <7,6,2,0>
- 1573159581U, // <6,2,0,u>: Cost 2 vext2 <4,u,6,2>, LHS
- 2230527897U, // <6,2,1,0>: Cost 3 vrev <2,6,0,1>
- 2646901556U, // <6,2,1,1>: Cost 3 vext2 <4,u,6,2>, <1,1,1,1>
- 2646901654U, // <6,2,1,2>: Cost 3 vext2 <4,u,6,2>, <1,2,3,0>
- 2847047782U, // <6,2,1,3>: Cost 3 vuzpr <4,6,u,2>, LHS
- 3771049517U, // <6,2,1,4>: Cost 4 vext3 <2,1,4,6>, <2,1,4,6>
- 2646901904U, // <6,2,1,5>: Cost 3 vext2 <4,u,6,2>, <1,5,3,7>
- 2686027324U, // <6,2,1,6>: Cost 3 vext3 <0,2,4,6>, <2,1,6,3>
- 3669825000U, // <6,2,1,7>: Cost 4 vext1 <7,6,2,1>, <7,6,2,1>
- 2231117793U, // <6,2,1,u>: Cost 3 vrev <2,6,u,1>
- 3763603029U, // <6,2,2,0>: Cost 4 vext3 <0,u,2,6>, <2,2,0,1>
- 3759769184U, // <6,2,2,1>: Cost 4 vext3 <0,2,4,6>, <2,2,1,3>
- 2686027368U, // <6,2,2,2>: Cost 3 vext3 <0,2,4,6>, <2,2,2,2>
- 2686027378U, // <6,2,2,3>: Cost 3 vext3 <0,2,4,6>, <2,2,3,3>
- 2697971326U, // <6,2,2,4>: Cost 3 vext3 <2,2,4,6>, <2,2,4,6>
- 3759769224U, // <6,2,2,5>: Cost 4 vext3 <0,2,4,6>, <2,2,5,7>
- 2698118800U, // <6,2,2,6>: Cost 3 vext3 <2,2,6,6>, <2,2,6,6>
- 3920794092U, // <6,2,2,7>: Cost 4 vuzpr <4,6,u,2>, <6,2,5,7>
- 2686027423U, // <6,2,2,u>: Cost 3 vext3 <0,2,4,6>, <2,2,u,3>
- 2686027430U, // <6,2,3,0>: Cost 3 vext3 <0,2,4,6>, <2,3,0,1>
- 3759769262U, // <6,2,3,1>: Cost 4 vext3 <0,2,4,6>, <2,3,1,0>
- 2698487485U, // <6,2,3,2>: Cost 3 vext3 <2,3,2,6>, <2,3,2,6>
- 2705344196U, // <6,2,3,3>: Cost 3 vext3 <3,4,5,6>, <2,3,3,4>
- 2686027470U, // <6,2,3,4>: Cost 3 vext3 <0,2,4,6>, <2,3,4,5>
- 2698708696U, // <6,2,3,5>: Cost 3 vext3 <2,3,5,6>, <2,3,5,6>
- 2724660961U, // <6,2,3,6>: Cost 3 vext3 <6,6,6,6>, <2,3,6,6>
- 2729232104U, // <6,2,3,7>: Cost 3 vext3 <7,4,5,6>, <2,3,7,4>
- 2686027502U, // <6,2,3,u>: Cost 3 vext3 <0,2,4,6>, <2,3,u,1>
- 1567853468U, // <6,2,4,0>: Cost 2 vext2 <4,0,6,2>, <4,0,6,2>
- 3759769351U, // <6,2,4,1>: Cost 4 vext3 <0,2,4,6>, <2,4,1,u>
- 2699151118U, // <6,2,4,2>: Cost 3 vext3 <2,4,2,6>, <2,4,2,6>
- 2686027543U, // <6,2,4,3>: Cost 3 vext3 <0,2,4,6>, <2,4,3,6>
- 2699298592U, // <6,2,4,4>: Cost 3 vext3 <2,4,4,6>, <2,4,4,6>
- 1573162294U, // <6,2,4,5>: Cost 2 vext2 <4,u,6,2>, RHS
- 2686027564U, // <6,2,4,6>: Cost 3 vext3 <0,2,4,6>, <2,4,6,0>
- 3719982547U, // <6,2,4,7>: Cost 4 vext2 <4,7,6,2>, <4,7,6,2>
- 1573162532U, // <6,2,4,u>: Cost 2 vext2 <4,u,6,2>, <4,u,6,2>
- 3779086154U, // <6,2,5,0>: Cost 4 vext3 <3,4,5,6>, <2,5,0,3>
- 2646904528U, // <6,2,5,1>: Cost 3 vext2 <4,u,6,2>, <5,1,7,3>
- 3759769440U, // <6,2,5,2>: Cost 4 vext3 <0,2,4,6>, <2,5,2,7>
- 2699888488U, // <6,2,5,3>: Cost 3 vext3 <2,5,3,6>, <2,5,3,6>
- 2230855617U, // <6,2,5,4>: Cost 3 vrev <2,6,4,5>
- 2646904836U, // <6,2,5,5>: Cost 3 vext2 <4,u,6,2>, <5,5,5,5>
- 2646904930U, // <6,2,5,6>: Cost 3 vext2 <4,u,6,2>, <5,6,7,0>
- 2847051062U, // <6,2,5,7>: Cost 3 vuzpr <4,6,u,2>, RHS
- 2700257173U, // <6,2,5,u>: Cost 3 vext3 <2,5,u,6>, <2,5,u,6>
- 2687207321U, // <6,2,6,0>: Cost 3 vext3 <0,4,2,6>, <2,6,0,1>
- 2686027684U, // <6,2,6,1>: Cost 3 vext3 <0,2,4,6>, <2,6,1,3>
- 2566260656U, // <6,2,6,2>: Cost 3 vext1 <2,6,2,6>, <2,6,2,6>
- 2685806522U, // <6,2,6,3>: Cost 3 vext3 <0,2,1,6>, <2,6,3,7>
- 2687207361U, // <6,2,6,4>: Cost 3 vext3 <0,4,2,6>, <2,6,4,5>
- 2686027724U, // <6,2,6,5>: Cost 3 vext3 <0,2,4,6>, <2,6,5,7>
- 2646905656U, // <6,2,6,6>: Cost 3 vext2 <4,u,6,2>, <6,6,6,6>
- 2646905678U, // <6,2,6,7>: Cost 3 vext2 <4,u,6,2>, <6,7,0,1>
- 2686027751U, // <6,2,6,u>: Cost 3 vext3 <0,2,4,6>, <2,6,u,7>
- 2554323046U, // <6,2,7,0>: Cost 3 vext1 <0,6,2,7>, LHS
- 2572239606U, // <6,2,7,1>: Cost 3 vext1 <3,6,2,7>, <1,0,3,2>
- 2566268849U, // <6,2,7,2>: Cost 3 vext1 <2,6,2,7>, <2,6,2,7>
- 1906753638U, // <6,2,7,3>: Cost 2 vzipr RHS, LHS
- 2554326326U, // <6,2,7,4>: Cost 3 vext1 <0,6,2,7>, RHS
- 3304687564U, // <6,2,7,5>: Cost 4 vrev <2,6,5,7>
- 2980495708U, // <6,2,7,6>: Cost 3 vzipr RHS, <0,4,2,6>
- 2646906476U, // <6,2,7,7>: Cost 3 vext2 <4,u,6,2>, <7,7,7,7>
- 1906753643U, // <6,2,7,u>: Cost 2 vzipr RHS, LHS
- 1591744256U, // <6,2,u,0>: Cost 2 vext2 <u,0,6,2>, <u,0,6,2>
- 1573164846U, // <6,2,u,1>: Cost 2 vext2 <4,u,6,2>, LHS
- 2701805650U, // <6,2,u,2>: Cost 3 vext3 <2,u,2,6>, <2,u,2,6>
- 1906761830U, // <6,2,u,3>: Cost 2 vzipr RHS, LHS
- 2686027875U, // <6,2,u,4>: Cost 3 vext3 <0,2,4,6>, <2,u,4,5>
- 1573165210U, // <6,2,u,5>: Cost 2 vext2 <4,u,6,2>, RHS
- 2686322800U, // <6,2,u,6>: Cost 3 vext3 <0,2,u,6>, <2,u,6,0>
- 2847051305U, // <6,2,u,7>: Cost 3 vuzpr <4,6,u,2>, RHS
- 1906761835U, // <6,2,u,u>: Cost 2 vzipr RHS, LHS
- 3759769739U, // <6,3,0,0>: Cost 4 vext3 <0,2,4,6>, <3,0,0,0>
- 2686027926U, // <6,3,0,1>: Cost 3 vext3 <0,2,4,6>, <3,0,1,2>
- 2686027937U, // <6,3,0,2>: Cost 3 vext3 <0,2,4,6>, <3,0,2,4>
- 3640027286U, // <6,3,0,3>: Cost 4 vext1 <2,6,3,0>, <3,0,1,2>
- 2687207601U, // <6,3,0,4>: Cost 3 vext3 <0,4,2,6>, <3,0,4,2>
- 2705344698U, // <6,3,0,5>: Cost 3 vext3 <3,4,5,6>, <3,0,5,2>
- 3663917847U, // <6,3,0,6>: Cost 4 vext1 <6,6,3,0>, <6,6,3,0>
- 2237008560U, // <6,3,0,7>: Cost 3 vrev <3,6,7,0>
- 2686027989U, // <6,3,0,u>: Cost 3 vext3 <0,2,4,6>, <3,0,u,2>
- 3759769823U, // <6,3,1,0>: Cost 4 vext3 <0,2,4,6>, <3,1,0,3>
- 3759769830U, // <6,3,1,1>: Cost 4 vext3 <0,2,4,6>, <3,1,1,1>
- 3759769841U, // <6,3,1,2>: Cost 4 vext3 <0,2,4,6>, <3,1,2,3>
- 3759769848U, // <6,3,1,3>: Cost 4 vext3 <0,2,4,6>, <3,1,3,1>
- 2703280390U, // <6,3,1,4>: Cost 3 vext3 <3,1,4,6>, <3,1,4,6>
- 3759769868U, // <6,3,1,5>: Cost 4 vext3 <0,2,4,6>, <3,1,5,3>
- 3704063194U, // <6,3,1,6>: Cost 4 vext2 <2,1,6,3>, <1,6,3,0>
- 3767732510U, // <6,3,1,7>: Cost 4 vext3 <1,5,4,6>, <3,1,7,3>
- 2703280390U, // <6,3,1,u>: Cost 3 vext3 <3,1,4,6>, <3,1,4,6>
- 3704063468U, // <6,3,2,0>: Cost 4 vext2 <2,1,6,3>, <2,0,6,4>
- 2630321724U, // <6,3,2,1>: Cost 3 vext2 <2,1,6,3>, <2,1,6,3>
- 3759769921U, // <6,3,2,2>: Cost 4 vext3 <0,2,4,6>, <3,2,2,2>
- 3759769928U, // <6,3,2,3>: Cost 4 vext3 <0,2,4,6>, <3,2,3,0>
- 3704063767U, // <6,3,2,4>: Cost 4 vext2 <2,1,6,3>, <2,4,3,6>
- 3704063876U, // <6,3,2,5>: Cost 4 vext2 <2,1,6,3>, <2,5,6,7>
- 2636957626U, // <6,3,2,6>: Cost 3 vext2 <3,2,6,3>, <2,6,3,7>
- 3777907058U, // <6,3,2,7>: Cost 4 vext3 <3,2,7,6>, <3,2,7,6>
- 2630321724U, // <6,3,2,u>: Cost 3 vext2 <2,1,6,3>, <2,1,6,3>
- 3759769983U, // <6,3,3,0>: Cost 4 vext3 <0,2,4,6>, <3,3,0,1>
- 3710036245U, // <6,3,3,1>: Cost 4 vext2 <3,1,6,3>, <3,1,6,3>
- 2636958054U, // <6,3,3,2>: Cost 3 vext2 <3,2,6,3>, <3,2,6,3>
- 2686028188U, // <6,3,3,3>: Cost 3 vext3 <0,2,4,6>, <3,3,3,3>
- 2704607656U, // <6,3,3,4>: Cost 3 vext3 <3,3,4,6>, <3,3,4,6>
- 3773041072U, // <6,3,3,5>: Cost 4 vext3 <2,4,4,6>, <3,3,5,5>
- 3711363731U, // <6,3,3,6>: Cost 4 vext2 <3,3,6,3>, <3,6,3,7>
- 3767732676U, // <6,3,3,7>: Cost 4 vext3 <1,5,4,6>, <3,3,7,7>
- 2707999179U, // <6,3,3,u>: Cost 3 vext3 <3,u,5,6>, <3,3,u,5>
- 2584232038U, // <6,3,4,0>: Cost 3 vext1 <5,6,3,4>, LHS
- 2642267118U, // <6,3,4,1>: Cost 3 vext2 <4,1,6,3>, <4,1,6,3>
- 2642930751U, // <6,3,4,2>: Cost 3 vext2 <4,2,6,3>, <4,2,6,3>
- 2705197552U, // <6,3,4,3>: Cost 3 vext3 <3,4,3,6>, <3,4,3,6>
- 2584235318U, // <6,3,4,4>: Cost 3 vext1 <5,6,3,4>, RHS
- 1631603202U, // <6,3,4,5>: Cost 2 vext3 <3,4,5,6>, <3,4,5,6>
- 2654211444U, // <6,3,4,6>: Cost 3 vext2 <6,1,6,3>, <4,6,4,6>
- 2237041332U, // <6,3,4,7>: Cost 3 vrev <3,6,7,4>
- 1631824413U, // <6,3,4,u>: Cost 2 vext3 <3,4,u,6>, <3,4,u,6>
- 3640066150U, // <6,3,5,0>: Cost 4 vext1 <2,6,3,5>, LHS
- 3772746288U, // <6,3,5,1>: Cost 4 vext3 <2,4,0,6>, <3,5,1,7>
- 3640067790U, // <6,3,5,2>: Cost 4 vext1 <2,6,3,5>, <2,3,4,5>
- 3773041216U, // <6,3,5,3>: Cost 4 vext3 <2,4,4,6>, <3,5,3,5>
- 2705934922U, // <6,3,5,4>: Cost 3 vext3 <3,5,4,6>, <3,5,4,6>
- 3773041236U, // <6,3,5,5>: Cost 4 vext3 <2,4,4,6>, <3,5,5,7>
- 3779086940U, // <6,3,5,6>: Cost 4 vext3 <3,4,5,6>, <3,5,6,6>
- 3767732831U, // <6,3,5,7>: Cost 4 vext3 <1,5,4,6>, <3,5,7,0>
- 2706229870U, // <6,3,5,u>: Cost 3 vext3 <3,5,u,6>, <3,5,u,6>
- 2602164326U, // <6,3,6,0>: Cost 3 vext1 <u,6,3,6>, LHS
- 2654212512U, // <6,3,6,1>: Cost 3 vext2 <6,1,6,3>, <6,1,6,3>
- 2566334393U, // <6,3,6,2>: Cost 3 vext1 <2,6,3,6>, <2,6,3,6>
- 3704066588U, // <6,3,6,3>: Cost 4 vext2 <2,1,6,3>, <6,3,2,1>
- 2602167524U, // <6,3,6,4>: Cost 3 vext1 <u,6,3,6>, <4,4,6,6>
- 3710702321U, // <6,3,6,5>: Cost 4 vext2 <3,2,6,3>, <6,5,7,7>
- 2724661933U, // <6,3,6,6>: Cost 3 vext3 <6,6,6,6>, <3,6,6,6>
- 3710702465U, // <6,3,6,7>: Cost 4 vext2 <3,2,6,3>, <6,7,5,7>
- 2602170158U, // <6,3,6,u>: Cost 3 vext1 <u,6,3,6>, LHS
- 1492598886U, // <6,3,7,0>: Cost 2 vext1 <2,6,3,7>, LHS
- 2560369889U, // <6,3,7,1>: Cost 3 vext1 <1,6,3,7>, <1,6,3,7>
- 1492600762U, // <6,3,7,2>: Cost 2 vext1 <2,6,3,7>, <2,6,3,7>
- 2566342806U, // <6,3,7,3>: Cost 3 vext1 <2,6,3,7>, <3,0,1,2>
- 1492602166U, // <6,3,7,4>: Cost 2 vext1 <2,6,3,7>, RHS
- 2602176208U, // <6,3,7,5>: Cost 3 vext1 <u,6,3,7>, <5,1,7,3>
- 2566345210U, // <6,3,7,6>: Cost 3 vext1 <2,6,3,7>, <6,2,7,3>
- 2980496528U, // <6,3,7,7>: Cost 3 vzipr RHS, <1,5,3,7>
- 1492604718U, // <6,3,7,u>: Cost 2 vext1 <2,6,3,7>, LHS
- 1492607078U, // <6,3,u,0>: Cost 2 vext1 <2,6,3,u>, LHS
- 2686028574U, // <6,3,u,1>: Cost 3 vext3 <0,2,4,6>, <3,u,1,2>
- 1492608955U, // <6,3,u,2>: Cost 2 vext1 <2,6,3,u>, <2,6,3,u>
- 2566350998U, // <6,3,u,3>: Cost 3 vext1 <2,6,3,u>, <3,0,1,2>
- 1492610358U, // <6,3,u,4>: Cost 2 vext1 <2,6,3,u>, RHS
- 1634257734U, // <6,3,u,5>: Cost 2 vext3 <3,u,5,6>, <3,u,5,6>
- 2566353489U, // <6,3,u,6>: Cost 3 vext1 <2,6,3,u>, <6,3,u,0>
- 2980504720U, // <6,3,u,7>: Cost 3 vzipr RHS, <1,5,3,7>
- 1492612910U, // <6,3,u,u>: Cost 2 vext1 <2,6,3,u>, LHS
- 3703406592U, // <6,4,0,0>: Cost 4 vext2 <2,0,6,4>, <0,0,0,0>
- 2629664870U, // <6,4,0,1>: Cost 3 vext2 <2,0,6,4>, LHS
- 2629664972U, // <6,4,0,2>: Cost 3 vext2 <2,0,6,4>, <0,2,4,6>
- 3779087232U, // <6,4,0,3>: Cost 4 vext3 <3,4,5,6>, <4,0,3,1>
- 2642936156U, // <6,4,0,4>: Cost 3 vext2 <4,2,6,4>, <0,4,2,6>
- 2712570770U, // <6,4,0,5>: Cost 3 vext3 <4,6,4,6>, <4,0,5,1>
- 2687208348U, // <6,4,0,6>: Cost 3 vext3 <0,4,2,6>, <4,0,6,2>
- 3316723081U, // <6,4,0,7>: Cost 4 vrev <4,6,7,0>
- 2629665437U, // <6,4,0,u>: Cost 3 vext2 <2,0,6,4>, LHS
- 2242473291U, // <6,4,1,0>: Cost 3 vrev <4,6,0,1>
- 3700089652U, // <6,4,1,1>: Cost 4 vext2 <1,4,6,4>, <1,1,1,1>
- 3703407510U, // <6,4,1,2>: Cost 4 vext2 <2,0,6,4>, <1,2,3,0>
- 2852962406U, // <6,4,1,3>: Cost 3 vuzpr <5,6,7,4>, LHS
- 3628166454U, // <6,4,1,4>: Cost 4 vext1 <0,6,4,1>, RHS
- 3760876514U, // <6,4,1,5>: Cost 4 vext3 <0,4,1,6>, <4,1,5,0>
- 2687208430U, // <6,4,1,6>: Cost 3 vext3 <0,4,2,6>, <4,1,6,3>
- 3316731274U, // <6,4,1,7>: Cost 4 vrev <4,6,7,1>
- 2243063187U, // <6,4,1,u>: Cost 3 vrev <4,6,u,1>
- 2629666284U, // <6,4,2,0>: Cost 3 vext2 <2,0,6,4>, <2,0,6,4>
- 3703408188U, // <6,4,2,1>: Cost 4 vext2 <2,0,6,4>, <2,1,6,3>
- 3703408232U, // <6,4,2,2>: Cost 4 vext2 <2,0,6,4>, <2,2,2,2>
- 3703408294U, // <6,4,2,3>: Cost 4 vext2 <2,0,6,4>, <2,3,0,1>
- 2632320816U, // <6,4,2,4>: Cost 3 vext2 <2,4,6,4>, <2,4,6,4>
- 2923384118U, // <6,4,2,5>: Cost 3 vzipl <6,2,7,3>, RHS
- 2687208508U, // <6,4,2,6>: Cost 3 vext3 <0,4,2,6>, <4,2,6,0>
- 3760950341U, // <6,4,2,7>: Cost 4 vext3 <0,4,2,6>, <4,2,7,0>
- 2634975348U, // <6,4,2,u>: Cost 3 vext2 <2,u,6,4>, <2,u,6,4>
- 3703408790U, // <6,4,3,0>: Cost 4 vext2 <2,0,6,4>, <3,0,1,2>
- 3316305238U, // <6,4,3,1>: Cost 4 vrev <4,6,1,3>
- 3703408947U, // <6,4,3,2>: Cost 4 vext2 <2,0,6,4>, <3,2,0,6>
- 3703409052U, // <6,4,3,3>: Cost 4 vext2 <2,0,6,4>, <3,3,3,3>
- 2644929026U, // <6,4,3,4>: Cost 3 vext2 <4,5,6,4>, <3,4,5,6>
- 3718670922U, // <6,4,3,5>: Cost 4 vext2 <4,5,6,4>, <3,5,4,6>
- 2705345682U, // <6,4,3,6>: Cost 3 vext3 <3,4,5,6>, <4,3,6,5>
- 3926705152U, // <6,4,3,7>: Cost 4 vuzpr <5,6,7,4>, <1,3,5,7>
- 2668817222U, // <6,4,3,u>: Cost 3 vext2 <u,5,6,4>, <3,u,5,6>
- 2590277734U, // <6,4,4,0>: Cost 3 vext1 <6,6,4,4>, LHS
- 3716017135U, // <6,4,4,1>: Cost 4 vext2 <4,1,6,4>, <4,1,6,4>
- 2642938944U, // <6,4,4,2>: Cost 3 vext2 <4,2,6,4>, <4,2,6,4>
- 3717344401U, // <6,4,4,3>: Cost 4 vext2 <4,3,6,4>, <4,3,6,4>
- 2712571088U, // <6,4,4,4>: Cost 3 vext3 <4,6,4,6>, <4,4,4,4>
- 2629668150U, // <6,4,4,5>: Cost 3 vext2 <2,0,6,4>, RHS
- 1637649636U, // <6,4,4,6>: Cost 2 vext3 <4,4,6,6>, <4,4,6,6>
- 2646257109U, // <6,4,4,7>: Cost 3 vext2 <4,7,6,4>, <4,7,6,4>
- 1637649636U, // <6,4,4,u>: Cost 2 vext3 <4,4,6,6>, <4,4,6,6>
- 2566398054U, // <6,4,5,0>: Cost 3 vext1 <2,6,4,5>, LHS
- 3760876805U, // <6,4,5,1>: Cost 4 vext3 <0,4,1,6>, <4,5,1,3>
- 2566399937U, // <6,4,5,2>: Cost 3 vext1 <2,6,4,5>, <2,6,4,5>
- 2584316418U, // <6,4,5,3>: Cost 3 vext1 <5,6,4,5>, <3,4,5,6>
- 2566401334U, // <6,4,5,4>: Cost 3 vext1 <2,6,4,5>, RHS
- 2584318028U, // <6,4,5,5>: Cost 3 vext1 <5,6,4,5>, <5,6,4,5>
- 1612287286U, // <6,4,5,6>: Cost 2 vext3 <0,2,4,6>, RHS
- 2852965686U, // <6,4,5,7>: Cost 3 vuzpr <5,6,7,4>, RHS
- 1612287304U, // <6,4,5,u>: Cost 2 vext3 <0,2,4,6>, RHS
- 1504608358U, // <6,4,6,0>: Cost 2 vext1 <4,6,4,6>, LHS
- 2578350838U, // <6,4,6,1>: Cost 3 vext1 <4,6,4,6>, <1,0,3,2>
- 2578351720U, // <6,4,6,2>: Cost 3 vext1 <4,6,4,6>, <2,2,2,2>
- 2578352278U, // <6,4,6,3>: Cost 3 vext1 <4,6,4,6>, <3,0,1,2>
- 1504611638U, // <6,4,6,4>: Cost 2 vext1 <4,6,4,6>, RHS
- 2578353872U, // <6,4,6,5>: Cost 3 vext1 <4,6,4,6>, <5,1,7,3>
- 2578354682U, // <6,4,6,6>: Cost 3 vext1 <4,6,4,6>, <6,2,7,3>
- 2578355194U, // <6,4,6,7>: Cost 3 vext1 <4,6,4,6>, <7,0,1,2>
- 1504614190U, // <6,4,6,u>: Cost 2 vext1 <4,6,4,6>, LHS
- 2572386406U, // <6,4,7,0>: Cost 3 vext1 <3,6,4,7>, LHS
- 2572387226U, // <6,4,7,1>: Cost 3 vext1 <3,6,4,7>, <1,2,3,4>
- 3640157902U, // <6,4,7,2>: Cost 4 vext1 <2,6,4,7>, <2,3,4,5>
- 2572389020U, // <6,4,7,3>: Cost 3 vext1 <3,6,4,7>, <3,6,4,7>
- 2572389686U, // <6,4,7,4>: Cost 3 vext1 <3,6,4,7>, RHS
- 2980497102U, // <6,4,7,5>: Cost 3 vzipr RHS, <2,3,4,5>
- 2980495564U, // <6,4,7,6>: Cost 3 vzipr RHS, <0,2,4,6>
- 4054239090U, // <6,4,7,7>: Cost 4 vzipr RHS, <2,5,4,7>
- 2572392238U, // <6,4,7,u>: Cost 3 vext1 <3,6,4,7>, LHS
- 1504608358U, // <6,4,u,0>: Cost 2 vext1 <4,6,4,6>, LHS
- 2629670702U, // <6,4,u,1>: Cost 3 vext2 <2,0,6,4>, LHS
- 2566424516U, // <6,4,u,2>: Cost 3 vext1 <2,6,4,u>, <2,6,4,u>
- 2584340994U, // <6,4,u,3>: Cost 3 vext1 <5,6,4,u>, <3,4,5,6>
- 1640156694U, // <6,4,u,4>: Cost 2 vext3 <4,u,4,6>, <4,u,4,6>
- 2629671066U, // <6,4,u,5>: Cost 3 vext2 <2,0,6,4>, RHS
- 1612287529U, // <6,4,u,6>: Cost 2 vext3 <0,2,4,6>, RHS
- 2852965929U, // <6,4,u,7>: Cost 3 vuzpr <5,6,7,4>, RHS
- 1612287547U, // <6,4,u,u>: Cost 2 vext3 <0,2,4,6>, RHS
- 3708723200U, // <6,5,0,0>: Cost 4 vext2 <2,u,6,5>, <0,0,0,0>
- 2634981478U, // <6,5,0,1>: Cost 3 vext2 <2,u,6,5>, LHS
- 3694125260U, // <6,5,0,2>: Cost 4 vext2 <0,4,6,5>, <0,2,4,6>
- 3779087962U, // <6,5,0,3>: Cost 4 vext3 <3,4,5,6>, <5,0,3,2>
- 3760877154U, // <6,5,0,4>: Cost 4 vext3 <0,4,1,6>, <5,0,4,1>
- 4195110916U, // <6,5,0,5>: Cost 4 vtrnr <5,6,7,0>, <5,5,5,5>
- 3696779775U, // <6,5,0,6>: Cost 4 vext2 <0,u,6,5>, <0,6,2,7>
- 1175212130U, // <6,5,0,7>: Cost 2 vrev <5,6,7,0>
- 1175285867U, // <6,5,0,u>: Cost 2 vrev <5,6,u,0>
- 2248445988U, // <6,5,1,0>: Cost 3 vrev <5,6,0,1>
- 3698107237U, // <6,5,1,1>: Cost 4 vext2 <1,1,6,5>, <1,1,6,5>
- 3708724118U, // <6,5,1,2>: Cost 4 vext2 <2,u,6,5>, <1,2,3,0>
- 3908575334U, // <6,5,1,3>: Cost 4 vuzpr <2,6,4,5>, LHS
- 3716023376U, // <6,5,1,4>: Cost 4 vext2 <4,1,6,5>, <1,4,5,6>
- 3708724368U, // <6,5,1,5>: Cost 4 vext2 <2,u,6,5>, <1,5,3,7>
- 3767733960U, // <6,5,1,6>: Cost 4 vext3 <1,5,4,6>, <5,1,6,4>
- 2712571600U, // <6,5,1,7>: Cost 3 vext3 <4,6,4,6>, <5,1,7,3>
- 2712571609U, // <6,5,1,u>: Cost 3 vext3 <4,6,4,6>, <5,1,u,3>
- 2578391142U, // <6,5,2,0>: Cost 3 vext1 <4,6,5,2>, LHS
- 3704079934U, // <6,5,2,1>: Cost 4 vext2 <2,1,6,5>, <2,1,6,5>
- 3708724840U, // <6,5,2,2>: Cost 4 vext2 <2,u,6,5>, <2,2,2,2>
- 3705407182U, // <6,5,2,3>: Cost 4 vext2 <2,3,6,5>, <2,3,4,5>
- 2578394422U, // <6,5,2,4>: Cost 3 vext1 <4,6,5,2>, RHS
- 3717351272U, // <6,5,2,5>: Cost 4 vext2 <4,3,6,5>, <2,5,3,6>
- 2634983354U, // <6,5,2,6>: Cost 3 vext2 <2,u,6,5>, <2,6,3,7>
- 3115486518U, // <6,5,2,7>: Cost 3 vtrnr <4,6,u,2>, RHS
- 2634983541U, // <6,5,2,u>: Cost 3 vext2 <2,u,6,5>, <2,u,6,5>
- 3708725398U, // <6,5,3,0>: Cost 4 vext2 <2,u,6,5>, <3,0,1,2>
- 3710052631U, // <6,5,3,1>: Cost 4 vext2 <3,1,6,5>, <3,1,6,5>
- 3708725606U, // <6,5,3,2>: Cost 4 vext2 <2,u,6,5>, <3,2,6,3>
- 3708725660U, // <6,5,3,3>: Cost 4 vext2 <2,u,6,5>, <3,3,3,3>
- 2643610114U, // <6,5,3,4>: Cost 3 vext2 <4,3,6,5>, <3,4,5,6>
- 3717352010U, // <6,5,3,5>: Cost 4 vext2 <4,3,6,5>, <3,5,4,6>
- 3773632358U, // <6,5,3,6>: Cost 4 vext3 <2,5,3,6>, <5,3,6,0>
- 2248978533U, // <6,5,3,7>: Cost 3 vrev <5,6,7,3>
- 2249052270U, // <6,5,3,u>: Cost 3 vrev <5,6,u,3>
- 2596323430U, // <6,5,4,0>: Cost 3 vext1 <7,6,5,4>, LHS
- 3716025328U, // <6,5,4,1>: Cost 4 vext2 <4,1,6,5>, <4,1,6,5>
- 3716688961U, // <6,5,4,2>: Cost 4 vext2 <4,2,6,5>, <4,2,6,5>
- 2643610770U, // <6,5,4,3>: Cost 3 vext2 <4,3,6,5>, <4,3,6,5>
- 2596326710U, // <6,5,4,4>: Cost 3 vext1 <7,6,5,4>, RHS
- 2634984758U, // <6,5,4,5>: Cost 3 vext2 <2,u,6,5>, RHS
- 3767734199U, // <6,5,4,6>: Cost 4 vext3 <1,5,4,6>, <5,4,6,0>
- 1643696070U, // <6,5,4,7>: Cost 2 vext3 <5,4,7,6>, <5,4,7,6>
- 1643769807U, // <6,5,4,u>: Cost 2 vext3 <5,4,u,6>, <5,4,u,6>
- 2578415718U, // <6,5,5,0>: Cost 3 vext1 <4,6,5,5>, LHS
- 3652158198U, // <6,5,5,1>: Cost 4 vext1 <4,6,5,5>, <1,0,3,2>
- 3652159080U, // <6,5,5,2>: Cost 4 vext1 <4,6,5,5>, <2,2,2,2>
- 3652159638U, // <6,5,5,3>: Cost 4 vext1 <4,6,5,5>, <3,0,1,2>
- 2578418998U, // <6,5,5,4>: Cost 3 vext1 <4,6,5,5>, RHS
- 2712571908U, // <6,5,5,5>: Cost 3 vext3 <4,6,4,6>, <5,5,5,5>
- 2718027790U, // <6,5,5,6>: Cost 3 vext3 <5,5,6,6>, <5,5,6,6>
- 2712571928U, // <6,5,5,7>: Cost 3 vext3 <4,6,4,6>, <5,5,7,7>
- 2712571937U, // <6,5,5,u>: Cost 3 vext3 <4,6,4,6>, <5,5,u,7>
- 2705346596U, // <6,5,6,0>: Cost 3 vext3 <3,4,5,6>, <5,6,0,1>
- 3767144496U, // <6,5,6,1>: Cost 4 vext3 <1,4,5,6>, <5,6,1,4>
- 3773116473U, // <6,5,6,2>: Cost 4 vext3 <2,4,5,6>, <5,6,2,4>
- 2705346626U, // <6,5,6,3>: Cost 3 vext3 <3,4,5,6>, <5,6,3,4>
- 2705346636U, // <6,5,6,4>: Cost 3 vext3 <3,4,5,6>, <5,6,4,5>
- 3908577217U, // <6,5,6,5>: Cost 4 vuzpr <2,6,4,5>, <2,6,4,5>
- 2578428728U, // <6,5,6,6>: Cost 3 vext1 <4,6,5,6>, <6,6,6,6>
- 2712572002U, // <6,5,6,7>: Cost 3 vext3 <4,6,4,6>, <5,6,7,0>
- 2705346668U, // <6,5,6,u>: Cost 3 vext3 <3,4,5,6>, <5,6,u,1>
- 2560516198U, // <6,5,7,0>: Cost 3 vext1 <1,6,5,7>, LHS
- 2560517363U, // <6,5,7,1>: Cost 3 vext1 <1,6,5,7>, <1,6,5,7>
- 2566490060U, // <6,5,7,2>: Cost 3 vext1 <2,6,5,7>, <2,6,5,7>
- 3634260118U, // <6,5,7,3>: Cost 4 vext1 <1,6,5,7>, <3,0,1,2>
- 2560519478U, // <6,5,7,4>: Cost 3 vext1 <1,6,5,7>, RHS
- 2980498650U, // <6,5,7,5>: Cost 3 vzipr RHS, <4,4,5,5>
- 2980497922U, // <6,5,7,6>: Cost 3 vzipr RHS, <3,4,5,6>
- 3103214902U, // <6,5,7,7>: Cost 3 vtrnr <2,6,3,7>, RHS
- 2560522030U, // <6,5,7,u>: Cost 3 vext1 <1,6,5,7>, LHS
- 2560524390U, // <6,5,u,0>: Cost 3 vext1 <1,6,5,u>, LHS
- 2560525556U, // <6,5,u,1>: Cost 3 vext1 <1,6,5,u>, <1,6,5,u>
- 2566498253U, // <6,5,u,2>: Cost 3 vext1 <2,6,5,u>, <2,6,5,u>
- 2646931439U, // <6,5,u,3>: Cost 3 vext2 <4,u,6,5>, <u,3,5,7>
- 2560527670U, // <6,5,u,4>: Cost 3 vext1 <1,6,5,u>, RHS
- 2634987674U, // <6,5,u,5>: Cost 3 vext2 <2,u,6,5>, RHS
- 2980506114U, // <6,5,u,6>: Cost 3 vzipr RHS, <3,4,5,6>
- 1175277674U, // <6,5,u,7>: Cost 2 vrev <5,6,7,u>
- 1175351411U, // <6,5,u,u>: Cost 2 vrev <5,6,u,u>
- 2578448486U, // <6,6,0,0>: Cost 3 vext1 <4,6,6,0>, LHS
- 1573191782U, // <6,6,0,1>: Cost 2 vext2 <4,u,6,6>, LHS
- 2686030124U, // <6,6,0,2>: Cost 3 vext3 <0,2,4,6>, <6,0,2,4>
- 3779088690U, // <6,6,0,3>: Cost 4 vext3 <3,4,5,6>, <6,0,3,1>
- 2687209788U, // <6,6,0,4>: Cost 3 vext3 <0,4,2,6>, <6,0,4,2>
- 3652194000U, // <6,6,0,5>: Cost 4 vext1 <4,6,6,0>, <5,1,7,3>
- 2254852914U, // <6,6,0,6>: Cost 3 vrev <6,6,6,0>
- 4041575734U, // <6,6,0,7>: Cost 4 vzipr <2,4,6,0>, RHS
- 1573192349U, // <6,6,0,u>: Cost 2 vext2 <4,u,6,6>, LHS
- 2646934262U, // <6,6,1,0>: Cost 3 vext2 <4,u,6,6>, <1,0,3,2>
- 2646934324U, // <6,6,1,1>: Cost 3 vext2 <4,u,6,6>, <1,1,1,1>
- 2646934422U, // <6,6,1,2>: Cost 3 vext2 <4,u,6,6>, <1,2,3,0>
- 2846785638U, // <6,6,1,3>: Cost 3 vuzpr <4,6,4,6>, LHS
- 3760951694U, // <6,6,1,4>: Cost 4 vext3 <0,4,2,6>, <6,1,4,3>
- 2646934672U, // <6,6,1,5>: Cost 3 vext2 <4,u,6,6>, <1,5,3,7>
- 2712572320U, // <6,6,1,6>: Cost 3 vext3 <4,6,4,6>, <6,1,6,3>
- 3775549865U, // <6,6,1,7>: Cost 4 vext3 <2,u,2,6>, <6,1,7,3>
- 2846785643U, // <6,6,1,u>: Cost 3 vuzpr <4,6,4,6>, LHS
- 3759772094U, // <6,6,2,0>: Cost 4 vext3 <0,2,4,6>, <6,2,0,6>
- 3704751676U, // <6,6,2,1>: Cost 4 vext2 <2,2,6,6>, <2,1,6,3>
- 2631009936U, // <6,6,2,2>: Cost 3 vext2 <2,2,6,6>, <2,2,6,6>
- 2646935206U, // <6,6,2,3>: Cost 3 vext2 <4,u,6,6>, <2,3,0,1>
- 3759772127U, // <6,6,2,4>: Cost 4 vext3 <0,2,4,6>, <6,2,4,3>
- 3704752004U, // <6,6,2,5>: Cost 4 vext2 <2,2,6,6>, <2,5,6,7>
- 2646935482U, // <6,6,2,6>: Cost 3 vext2 <4,u,6,6>, <2,6,3,7>
- 2712572410U, // <6,6,2,7>: Cost 3 vext3 <4,6,4,6>, <6,2,7,3>
- 2712572419U, // <6,6,2,u>: Cost 3 vext3 <4,6,4,6>, <6,2,u,3>
- 2646935702U, // <6,6,3,0>: Cost 3 vext2 <4,u,6,6>, <3,0,1,2>
- 3777024534U, // <6,6,3,1>: Cost 4 vext3 <3,1,4,6>, <6,3,1,4>
- 3704752453U, // <6,6,3,2>: Cost 4 vext2 <2,2,6,6>, <3,2,2,6>
- 2646935964U, // <6,6,3,3>: Cost 3 vext2 <4,u,6,6>, <3,3,3,3>
- 2705347122U, // <6,6,3,4>: Cost 3 vext3 <3,4,5,6>, <6,3,4,5>
- 3779678778U, // <6,6,3,5>: Cost 4 vext3 <3,5,4,6>, <6,3,5,4>
- 2657553069U, // <6,6,3,6>: Cost 3 vext2 <6,6,6,6>, <3,6,6,6>
- 4039609654U, // <6,6,3,7>: Cost 4 vzipr <2,1,6,3>, RHS
- 2708001366U, // <6,6,3,u>: Cost 3 vext3 <3,u,5,6>, <6,3,u,5>
- 2578481254U, // <6,6,4,0>: Cost 3 vext1 <4,6,6,4>, LHS
- 3652223734U, // <6,6,4,1>: Cost 4 vext1 <4,6,6,4>, <1,0,3,2>
- 3760951922U, // <6,6,4,2>: Cost 4 vext3 <0,4,2,6>, <6,4,2,6>
- 3779089019U, // <6,6,4,3>: Cost 4 vext3 <3,4,5,6>, <6,4,3,6>
- 1570540772U, // <6,6,4,4>: Cost 2 vext2 <4,4,6,6>, <4,4,6,6>
- 1573195062U, // <6,6,4,5>: Cost 2 vext2 <4,u,6,6>, RHS
- 2712572560U, // <6,6,4,6>: Cost 3 vext3 <4,6,4,6>, <6,4,6,0>
- 2723410591U, // <6,6,4,7>: Cost 3 vext3 <6,4,7,6>, <6,4,7,6>
- 1573195304U, // <6,6,4,u>: Cost 2 vext2 <4,u,6,6>, <4,u,6,6>
- 3640287334U, // <6,6,5,0>: Cost 4 vext1 <2,6,6,5>, LHS
- 2646937296U, // <6,6,5,1>: Cost 3 vext2 <4,u,6,6>, <5,1,7,3>
- 3640289235U, // <6,6,5,2>: Cost 4 vext1 <2,6,6,5>, <2,6,6,5>
- 3720679279U, // <6,6,5,3>: Cost 4 vext2 <4,u,6,6>, <5,3,7,0>
- 2646937542U, // <6,6,5,4>: Cost 3 vext2 <4,u,6,6>, <5,4,7,6>
- 2646937604U, // <6,6,5,5>: Cost 3 vext2 <4,u,6,6>, <5,5,5,5>
- 2646937698U, // <6,6,5,6>: Cost 3 vext2 <4,u,6,6>, <5,6,7,0>
- 2846788918U, // <6,6,5,7>: Cost 3 vuzpr <4,6,4,6>, RHS
- 2846788919U, // <6,6,5,u>: Cost 3 vuzpr <4,6,4,6>, RHS
- 1516699750U, // <6,6,6,0>: Cost 2 vext1 <6,6,6,6>, LHS
- 2590442230U, // <6,6,6,1>: Cost 3 vext1 <6,6,6,6>, <1,0,3,2>
- 2646938106U, // <6,6,6,2>: Cost 3 vext2 <4,u,6,6>, <6,2,7,3>
- 2590443670U, // <6,6,6,3>: Cost 3 vext1 <6,6,6,6>, <3,0,1,2>
- 1516703030U, // <6,6,6,4>: Cost 2 vext1 <6,6,6,6>, RHS
- 2590445264U, // <6,6,6,5>: Cost 3 vext1 <6,6,6,6>, <5,1,7,3>
- 296144182U, // <6,6,6,6>: Cost 1 vdup2 RHS
- 2712572738U, // <6,6,6,7>: Cost 3 vext3 <4,6,4,6>, <6,6,7,7>
- 296144182U, // <6,6,6,u>: Cost 1 vdup2 RHS
- 2566561894U, // <6,6,7,0>: Cost 3 vext1 <2,6,6,7>, LHS
- 3634332924U, // <6,6,7,1>: Cost 4 vext1 <1,6,6,7>, <1,6,6,7>
- 2566563797U, // <6,6,7,2>: Cost 3 vext1 <2,6,6,7>, <2,6,6,7>
- 2584480258U, // <6,6,7,3>: Cost 3 vext1 <5,6,6,7>, <3,4,5,6>
- 2566565174U, // <6,6,7,4>: Cost 3 vext1 <2,6,6,7>, RHS
- 2717438846U, // <6,6,7,5>: Cost 3 vext3 <5,4,7,6>, <6,7,5,4>
- 2980500280U, // <6,6,7,6>: Cost 3 vzipr RHS, <6,6,6,6>
- 1906756918U, // <6,6,7,7>: Cost 2 vzipr RHS, RHS
- 1906756919U, // <6,6,7,u>: Cost 2 vzipr RHS, RHS
- 1516699750U, // <6,6,u,0>: Cost 2 vext1 <6,6,6,6>, LHS
- 1573197614U, // <6,6,u,1>: Cost 2 vext2 <4,u,6,6>, LHS
- 2566571990U, // <6,6,u,2>: Cost 3 vext1 <2,6,6,u>, <2,6,6,u>
- 2846786205U, // <6,6,u,3>: Cost 3 vuzpr <4,6,4,6>, LHS
- 1516703030U, // <6,6,u,4>: Cost 2 vext1 <6,6,6,6>, RHS
- 1573197978U, // <6,6,u,5>: Cost 2 vext2 <4,u,6,6>, RHS
- 296144182U, // <6,6,u,6>: Cost 1 vdup2 RHS
- 1906765110U, // <6,6,u,7>: Cost 2 vzipr RHS, RHS
- 296144182U, // <6,6,u,u>: Cost 1 vdup2 RHS
- 1571209216U, // <6,7,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
- 497467494U, // <6,7,0,1>: Cost 1 vext2 RHS, LHS
- 1571209380U, // <6,7,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
- 2644951292U, // <6,7,0,3>: Cost 3 vext2 RHS, <0,3,1,0>
- 1571209554U, // <6,7,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
- 1510756450U, // <6,7,0,5>: Cost 2 vext1 <5,6,7,0>, <5,6,7,0>
- 2644951542U, // <6,7,0,6>: Cost 3 vext2 RHS, <0,6,1,7>
- 2584499194U, // <6,7,0,7>: Cost 3 vext1 <5,6,7,0>, <7,0,1,2>
- 497468061U, // <6,7,0,u>: Cost 1 vext2 RHS, LHS
- 1571209974U, // <6,7,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
- 1571210036U, // <6,7,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
- 1571210134U, // <6,7,1,2>: Cost 2 vext2 RHS, <1,2,3,0>
- 1571210200U, // <6,7,1,3>: Cost 2 vext2 RHS, <1,3,1,3>
- 2644952098U, // <6,7,1,4>: Cost 3 vext2 RHS, <1,4,0,5>
- 1571210384U, // <6,7,1,5>: Cost 2 vext2 RHS, <1,5,3,7>
- 2644952271U, // <6,7,1,6>: Cost 3 vext2 RHS, <1,6,1,7>
- 2578535418U, // <6,7,1,7>: Cost 3 vext1 <4,6,7,1>, <7,0,1,2>
- 1571210605U, // <6,7,1,u>: Cost 2 vext2 RHS, <1,u,1,3>
- 2644952509U, // <6,7,2,0>: Cost 3 vext2 RHS, <2,0,1,2>
- 2644952582U, // <6,7,2,1>: Cost 3 vext2 RHS, <2,1,0,3>
- 1571210856U, // <6,7,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
- 1571210918U, // <6,7,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
- 2644952828U, // <6,7,2,4>: Cost 3 vext2 RHS, <2,4,0,6>
- 2633009028U, // <6,7,2,5>: Cost 3 vext2 <2,5,6,7>, <2,5,6,7>
- 1571211194U, // <6,7,2,6>: Cost 2 vext2 RHS, <2,6,3,7>
- 2668840938U, // <6,7,2,7>: Cost 3 vext2 RHS, <2,7,0,1>
- 1571211323U, // <6,7,2,u>: Cost 2 vext2 RHS, <2,u,0,1>
- 1571211414U, // <6,7,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
- 2644953311U, // <6,7,3,1>: Cost 3 vext2 RHS, <3,1,0,3>
- 2644953390U, // <6,7,3,2>: Cost 3 vext2 RHS, <3,2,0,1>
- 1571211676U, // <6,7,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
- 1571211778U, // <6,7,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
- 2644953648U, // <6,7,3,5>: Cost 3 vext2 RHS, <3,5,1,7>
- 2644953720U, // <6,7,3,6>: Cost 3 vext2 RHS, <3,6,0,7>
- 2644953795U, // <6,7,3,7>: Cost 3 vext2 RHS, <3,7,0,1>
- 1571212062U, // <6,7,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
- 1573202834U, // <6,7,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
- 2644954058U, // <6,7,4,1>: Cost 3 vext2 RHS, <4,1,2,3>
- 2644954166U, // <6,7,4,2>: Cost 3 vext2 RHS, <4,2,5,3>
- 2644954258U, // <6,7,4,3>: Cost 3 vext2 RHS, <4,3,6,5>
- 1571212496U, // <6,7,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
- 497470774U, // <6,7,4,5>: Cost 1 vext2 RHS, RHS
- 1573203316U, // <6,7,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
- 2646281688U, // <6,7,4,7>: Cost 3 vext2 <4,7,6,7>, <4,7,6,7>
- 497471017U, // <6,7,4,u>: Cost 1 vext2 RHS, RHS
- 2644954696U, // <6,7,5,0>: Cost 3 vext2 RHS, <5,0,1,2>
- 1573203664U, // <6,7,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
- 2644954878U, // <6,7,5,2>: Cost 3 vext2 RHS, <5,2,3,4>
- 2644954991U, // <6,7,5,3>: Cost 3 vext2 RHS, <5,3,7,0>
- 1571213254U, // <6,7,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
- 1571213316U, // <6,7,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
- 1571213410U, // <6,7,5,6>: Cost 2 vext2 RHS, <5,6,7,0>
- 1573204136U, // <6,7,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
- 1573204217U, // <6,7,5,u>: Cost 2 vext2 RHS, <5,u,5,7>
- 2644955425U, // <6,7,6,0>: Cost 3 vext2 RHS, <6,0,1,2>
- 2644955561U, // <6,7,6,1>: Cost 3 vext2 RHS, <6,1,7,3>
- 1573204474U, // <6,7,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
- 2644955698U, // <6,7,6,3>: Cost 3 vext2 RHS, <6,3,4,5>
- 2644955789U, // <6,7,6,4>: Cost 3 vext2 RHS, <6,4,5,6>
- 2644955889U, // <6,7,6,5>: Cost 3 vext2 RHS, <6,5,7,7>
- 1571214136U, // <6,7,6,6>: Cost 2 vext2 RHS, <6,6,6,6>
- 1571214158U, // <6,7,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
- 1573204895U, // <6,7,6,u>: Cost 2 vext2 RHS, <6,u,0,1>
- 1573204986U, // <6,7,7,0>: Cost 2 vext2 RHS, <7,0,1,2>
- 2572608656U, // <6,7,7,1>: Cost 3 vext1 <3,6,7,7>, <1,5,3,7>
- 2644956362U, // <6,7,7,2>: Cost 3 vext2 RHS, <7,2,6,3>
- 2572610231U, // <6,7,7,3>: Cost 3 vext1 <3,6,7,7>, <3,6,7,7>
- 1573205350U, // <6,7,7,4>: Cost 2 vext2 RHS, <7,4,5,6>
- 2646947220U, // <6,7,7,5>: Cost 3 vext2 RHS, <7,5,1,7>
- 1516786498U, // <6,7,7,6>: Cost 2 vext1 <6,6,7,7>, <6,6,7,7>
- 1571214956U, // <6,7,7,7>: Cost 2 vext2 RHS, <7,7,7,7>
- 1573205634U, // <6,7,7,u>: Cost 2 vext2 RHS, <7,u,1,2>
- 1571215059U, // <6,7,u,0>: Cost 2 vext2 RHS, <u,0,1,2>
- 497473326U, // <6,7,u,1>: Cost 1 vext2 RHS, LHS
- 1571215237U, // <6,7,u,2>: Cost 2 vext2 RHS, <u,2,3,0>
- 1571215292U, // <6,7,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
- 1571215423U, // <6,7,u,4>: Cost 2 vext2 RHS, <u,4,5,6>
- 497473690U, // <6,7,u,5>: Cost 1 vext2 RHS, RHS
- 1571215568U, // <6,7,u,6>: Cost 2 vext2 RHS, <u,6,3,7>
- 1573206272U, // <6,7,u,7>: Cost 2 vext2 RHS, <u,7,0,1>
- 497473893U, // <6,7,u,u>: Cost 1 vext2 RHS, LHS
- 1571217408U, // <6,u,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
- 497475686U, // <6,u,0,1>: Cost 1 vext2 RHS, LHS
- 1571217572U, // <6,u,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
- 2689865445U, // <6,u,0,3>: Cost 3 vext3 <0,u,2,6>, <u,0,3,2>
- 1571217746U, // <6,u,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
- 1510830187U, // <6,u,0,5>: Cost 2 vext1 <5,6,u,0>, <5,6,u,0>
- 2644959734U, // <6,u,0,6>: Cost 3 vext2 RHS, <0,6,1,7>
- 1193130221U, // <6,u,0,7>: Cost 2 vrev <u,6,7,0>
- 497476253U, // <6,u,0,u>: Cost 1 vext2 RHS, LHS
- 1571218166U, // <6,u,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
- 1571218228U, // <6,u,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
- 1612289838U, // <6,u,1,2>: Cost 2 vext3 <0,2,4,6>, LHS
- 1571218392U, // <6,u,1,3>: Cost 2 vext2 RHS, <1,3,1,3>
- 2566663478U, // <6,u,1,4>: Cost 3 vext1 <2,6,u,1>, RHS
- 1571218576U, // <6,u,1,5>: Cost 2 vext2 RHS, <1,5,3,7>
- 2644960463U, // <6,u,1,6>: Cost 3 vext2 RHS, <1,6,1,7>
- 2717439835U, // <6,u,1,7>: Cost 3 vext3 <5,4,7,6>, <u,1,7,3>
- 1612289892U, // <6,u,1,u>: Cost 2 vext3 <0,2,4,6>, LHS
- 1504870502U, // <6,u,2,0>: Cost 2 vext1 <4,6,u,2>, LHS
- 2644960774U, // <6,u,2,1>: Cost 3 vext2 RHS, <2,1,0,3>
- 1571219048U, // <6,u,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
- 1571219110U, // <6,u,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
- 1504873782U, // <6,u,2,4>: Cost 2 vext1 <4,6,u,2>, RHS
- 2633017221U, // <6,u,2,5>: Cost 3 vext2 <2,5,6,u>, <2,5,6,u>
- 1571219386U, // <6,u,2,6>: Cost 2 vext2 RHS, <2,6,3,7>
- 2712573868U, // <6,u,2,7>: Cost 3 vext3 <4,6,4,6>, <u,2,7,3>
- 1571219515U, // <6,u,2,u>: Cost 2 vext2 RHS, <2,u,0,1>
- 1571219606U, // <6,u,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
- 2644961503U, // <6,u,3,1>: Cost 3 vext2 RHS, <3,1,0,3>
- 2566678499U, // <6,u,3,2>: Cost 3 vext1 <2,6,u,3>, <2,6,u,3>
- 1571219868U, // <6,u,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
- 1571219970U, // <6,u,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
- 2689865711U, // <6,u,3,5>: Cost 3 vext3 <0,u,2,6>, <u,3,5,7>
- 2708002806U, // <6,u,3,6>: Cost 3 vext3 <3,u,5,6>, <u,3,6,5>
- 2644961987U, // <6,u,3,7>: Cost 3 vext2 RHS, <3,7,0,1>
- 1571220254U, // <6,u,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
- 1571220370U, // <6,u,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
- 2644962250U, // <6,u,4,1>: Cost 3 vext2 RHS, <4,1,2,3>
- 1661245476U, // <6,u,4,2>: Cost 2 vext3 <u,4,2,6>, <u,4,2,6>
- 2686031917U, // <6,u,4,3>: Cost 3 vext3 <0,2,4,6>, <u,4,3,6>
- 1571220688U, // <6,u,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
- 497478967U, // <6,u,4,5>: Cost 1 vext2 RHS, RHS
- 1571220852U, // <6,u,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
- 1661614161U, // <6,u,4,7>: Cost 2 vext3 <u,4,7,6>, <u,4,7,6>
- 497479209U, // <6,u,4,u>: Cost 1 vext2 RHS, RHS
- 2566692966U, // <6,u,5,0>: Cost 3 vext1 <2,6,u,5>, LHS
- 1571221200U, // <6,u,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
- 2566694885U, // <6,u,5,2>: Cost 3 vext1 <2,6,u,5>, <2,6,u,5>
- 2689865855U, // <6,u,5,3>: Cost 3 vext3 <0,u,2,6>, <u,5,3,7>
- 1571221446U, // <6,u,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
- 1571221508U, // <6,u,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
- 1612290202U, // <6,u,5,6>: Cost 2 vext3 <0,2,4,6>, RHS
- 1571221672U, // <6,u,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
- 1612290220U, // <6,u,5,u>: Cost 2 vext3 <0,2,4,6>, RHS
- 1504903270U, // <6,u,6,0>: Cost 2 vext1 <4,6,u,6>, LHS
- 2644963752U, // <6,u,6,1>: Cost 3 vext2 RHS, <6,1,7,2>
- 1571222010U, // <6,u,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
- 2686032080U, // <6,u,6,3>: Cost 3 vext3 <0,2,4,6>, <u,6,3,7>
- 1504906550U, // <6,u,6,4>: Cost 2 vext1 <4,6,u,6>, RHS
- 2644964079U, // <6,u,6,5>: Cost 3 vext2 RHS, <6,5,7,5>
- 296144182U, // <6,u,6,6>: Cost 1 vdup2 RHS
- 1571222350U, // <6,u,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
- 296144182U, // <6,u,6,u>: Cost 1 vdup2 RHS
- 1492967526U, // <6,u,7,0>: Cost 2 vext1 <2,6,u,7>, LHS
- 2560738574U, // <6,u,7,1>: Cost 3 vext1 <1,6,u,7>, <1,6,u,7>
- 1492969447U, // <6,u,7,2>: Cost 2 vext1 <2,6,u,7>, <2,6,u,7>
- 1906753692U, // <6,u,7,3>: Cost 2 vzipr RHS, LHS
- 1492970806U, // <6,u,7,4>: Cost 2 vext1 <2,6,u,7>, RHS
- 2980495761U, // <6,u,7,5>: Cost 3 vzipr RHS, <0,4,u,5>
- 1516860235U, // <6,u,7,6>: Cost 2 vext1 <6,6,u,7>, <6,6,u,7>
- 1906756936U, // <6,u,7,7>: Cost 2 vzipr RHS, RHS
- 1492973358U, // <6,u,7,u>: Cost 2 vext1 <2,6,u,7>, LHS
- 1492975718U, // <6,u,u,0>: Cost 2 vext1 <2,6,u,u>, LHS
- 497481518U, // <6,u,u,1>: Cost 1 vext2 RHS, LHS
- 1612290405U, // <6,u,u,2>: Cost 2 vext3 <0,2,4,6>, LHS
- 1571223484U, // <6,u,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
- 1492978998U, // <6,u,u,4>: Cost 2 vext1 <2,6,u,u>, RHS
- 497481882U, // <6,u,u,5>: Cost 1 vext2 RHS, RHS
- 296144182U, // <6,u,u,6>: Cost 1 vdup2 RHS
- 1906765128U, // <6,u,u,7>: Cost 2 vzipr RHS, RHS
- 497482085U, // <6,u,u,u>: Cost 1 vext2 RHS, LHS
- 1638318080U, // <7,0,0,0>: Cost 2 vext3 RHS, <0,0,0,0>
- 1638318090U, // <7,0,0,1>: Cost 2 vext3 RHS, <0,0,1,1>
- 1638318100U, // <7,0,0,2>: Cost 2 vext3 RHS, <0,0,2,2>
- 3646442178U, // <7,0,0,3>: Cost 4 vext1 <3,7,0,0>, <3,7,0,0>
- 2712059941U, // <7,0,0,4>: Cost 3 vext3 RHS, <0,0,4,1>
- 2651603364U, // <7,0,0,5>: Cost 3 vext2 <5,6,7,0>, <0,5,1,6>
- 2590618445U, // <7,0,0,6>: Cost 3 vext1 <6,7,0,0>, <6,7,0,0>
- 3785801798U, // <7,0,0,7>: Cost 4 vext3 RHS, <0,0,7,7>
- 1638318153U, // <7,0,0,u>: Cost 2 vext3 RHS, <0,0,u,1>
- 1516879974U, // <7,0,1,0>: Cost 2 vext1 <6,7,0,1>, LHS
- 2693922911U, // <7,0,1,1>: Cost 3 vext3 <1,5,3,7>, <0,1,1,5>
- 564576358U, // <7,0,1,2>: Cost 1 vext3 RHS, LHS
- 2638996480U, // <7,0,1,3>: Cost 3 vext2 <3,5,7,0>, <1,3,5,7>
- 1516883254U, // <7,0,1,4>: Cost 2 vext1 <6,7,0,1>, RHS
- 2649613456U, // <7,0,1,5>: Cost 3 vext2 <5,3,7,0>, <1,5,3,7>
- 1516884814U, // <7,0,1,6>: Cost 2 vext1 <6,7,0,1>, <6,7,0,1>
- 2590626808U, // <7,0,1,7>: Cost 3 vext1 <6,7,0,1>, <7,0,1,0>
- 564576412U, // <7,0,1,u>: Cost 1 vext3 RHS, LHS
- 1638318244U, // <7,0,2,0>: Cost 2 vext3 RHS, <0,2,0,2>
- 2692743344U, // <7,0,2,1>: Cost 3 vext3 <1,3,5,7>, <0,2,1,5>
- 2712060084U, // <7,0,2,2>: Cost 3 vext3 RHS, <0,2,2,0>
- 2712060094U, // <7,0,2,3>: Cost 3 vext3 RHS, <0,2,3,1>
- 1638318284U, // <7,0,2,4>: Cost 2 vext3 RHS, <0,2,4,6>
- 2712060118U, // <7,0,2,5>: Cost 3 vext3 RHS, <0,2,5,7>
- 2651604922U, // <7,0,2,6>: Cost 3 vext2 <5,6,7,0>, <2,6,3,7>
- 2686255336U, // <7,0,2,7>: Cost 3 vext3 <0,2,7,7>, <0,2,7,7>
- 1638318316U, // <7,0,2,u>: Cost 2 vext3 RHS, <0,2,u,2>
- 2651605142U, // <7,0,3,0>: Cost 3 vext2 <5,6,7,0>, <3,0,1,2>
- 2712060156U, // <7,0,3,1>: Cost 3 vext3 RHS, <0,3,1,0>
- 2712060165U, // <7,0,3,2>: Cost 3 vext3 RHS, <0,3,2,0>
- 2651605404U, // <7,0,3,3>: Cost 3 vext2 <5,6,7,0>, <3,3,3,3>
- 2651605506U, // <7,0,3,4>: Cost 3 vext2 <5,6,7,0>, <3,4,5,6>
- 2638998111U, // <7,0,3,5>: Cost 3 vext2 <3,5,7,0>, <3,5,7,0>
- 2639661744U, // <7,0,3,6>: Cost 3 vext2 <3,6,7,0>, <3,6,7,0>
- 3712740068U, // <7,0,3,7>: Cost 4 vext2 <3,5,7,0>, <3,7,3,7>
- 2640989010U, // <7,0,3,u>: Cost 3 vext2 <3,u,7,0>, <3,u,7,0>
- 2712060232U, // <7,0,4,0>: Cost 3 vext3 RHS, <0,4,0,4>
- 1638318418U, // <7,0,4,1>: Cost 2 vext3 RHS, <0,4,1,5>
- 1638318428U, // <7,0,4,2>: Cost 2 vext3 RHS, <0,4,2,6>
- 3646474950U, // <7,0,4,3>: Cost 4 vext1 <3,7,0,4>, <3,7,0,4>
- 2712060270U, // <7,0,4,4>: Cost 3 vext3 RHS, <0,4,4,6>
- 1577864502U, // <7,0,4,5>: Cost 2 vext2 <5,6,7,0>, RHS
- 2651606388U, // <7,0,4,6>: Cost 3 vext2 <5,6,7,0>, <4,6,4,6>
- 3787792776U, // <7,0,4,7>: Cost 4 vext3 RHS, <0,4,7,5>
- 1638318481U, // <7,0,4,u>: Cost 2 vext3 RHS, <0,4,u,5>
- 2590654566U, // <7,0,5,0>: Cost 3 vext1 <6,7,0,5>, LHS
- 2651606736U, // <7,0,5,1>: Cost 3 vext2 <5,6,7,0>, <5,1,7,3>
- 2712060334U, // <7,0,5,2>: Cost 3 vext3 RHS, <0,5,2,7>
- 2649616239U, // <7,0,5,3>: Cost 3 vext2 <5,3,7,0>, <5,3,7,0>
- 2651606982U, // <7,0,5,4>: Cost 3 vext2 <5,6,7,0>, <5,4,7,6>
- 2651607044U, // <7,0,5,5>: Cost 3 vext2 <5,6,7,0>, <5,5,5,5>
- 1577865314U, // <7,0,5,6>: Cost 2 vext2 <5,6,7,0>, <5,6,7,0>
- 2651607208U, // <7,0,5,7>: Cost 3 vext2 <5,6,7,0>, <5,7,5,7>
- 1579192580U, // <7,0,5,u>: Cost 2 vext2 <5,u,7,0>, <5,u,7,0>
- 2688393709U, // <7,0,6,0>: Cost 3 vext3 <0,6,0,7>, <0,6,0,7>
- 2712060406U, // <7,0,6,1>: Cost 3 vext3 RHS, <0,6,1,7>
- 2688541183U, // <7,0,6,2>: Cost 3 vext3 <0,6,2,7>, <0,6,2,7>
- 2655588936U, // <7,0,6,3>: Cost 3 vext2 <6,3,7,0>, <6,3,7,0>
- 3762430481U, // <7,0,6,4>: Cost 4 vext3 <0,6,4,7>, <0,6,4,7>
- 2651607730U, // <7,0,6,5>: Cost 3 vext2 <5,6,7,0>, <6,5,0,7>
- 2651607864U, // <7,0,6,6>: Cost 3 vext2 <5,6,7,0>, <6,6,6,6>
- 2651607886U, // <7,0,6,7>: Cost 3 vext2 <5,6,7,0>, <6,7,0,1>
- 2688983605U, // <7,0,6,u>: Cost 3 vext3 <0,6,u,7>, <0,6,u,7>
- 2651608058U, // <7,0,7,0>: Cost 3 vext2 <5,6,7,0>, <7,0,1,2>
- 2932703334U, // <7,0,7,1>: Cost 3 vzipl <7,7,7,7>, LHS
- 3066921062U, // <7,0,7,2>: Cost 3 vtrnl <7,7,7,7>, LHS
- 3712742678U, // <7,0,7,3>: Cost 4 vext2 <3,5,7,0>, <7,3,5,7>
- 2651608422U, // <7,0,7,4>: Cost 3 vext2 <5,6,7,0>, <7,4,5,6>
- 2651608513U, // <7,0,7,5>: Cost 3 vext2 <5,6,7,0>, <7,5,6,7>
- 2663552532U, // <7,0,7,6>: Cost 3 vext2 <7,6,7,0>, <7,6,7,0>
- 2651608684U, // <7,0,7,7>: Cost 3 vext2 <5,6,7,0>, <7,7,7,7>
- 2651608706U, // <7,0,7,u>: Cost 3 vext2 <5,6,7,0>, <7,u,1,2>
- 1638318730U, // <7,0,u,0>: Cost 2 vext3 RHS, <0,u,0,2>
- 1638318738U, // <7,0,u,1>: Cost 2 vext3 RHS, <0,u,1,1>
- 564576925U, // <7,0,u,2>: Cost 1 vext3 RHS, LHS
- 2572765898U, // <7,0,u,3>: Cost 3 vext1 <3,7,0,u>, <3,7,0,u>
- 1638318770U, // <7,0,u,4>: Cost 2 vext3 RHS, <0,u,4,6>
- 1577867418U, // <7,0,u,5>: Cost 2 vext2 <5,6,7,0>, RHS
- 1516942165U, // <7,0,u,6>: Cost 2 vext1 <6,7,0,u>, <6,7,0,u>
- 2651609344U, // <7,0,u,7>: Cost 3 vext2 <5,6,7,0>, <u,7,0,1>
- 564576979U, // <7,0,u,u>: Cost 1 vext3 RHS, LHS
- 2590687334U, // <7,1,0,0>: Cost 3 vext1 <6,7,1,0>, LHS
- 2639003750U, // <7,1,0,1>: Cost 3 vext2 <3,5,7,1>, LHS
- 2793357414U, // <7,1,0,2>: Cost 3 vuzpl <7,0,1,2>, LHS
- 1638318838U, // <7,1,0,3>: Cost 2 vext3 RHS, <1,0,3,2>
- 2590690614U, // <7,1,0,4>: Cost 3 vext1 <6,7,1,0>, RHS
- 2712060679U, // <7,1,0,5>: Cost 3 vext3 RHS, <1,0,5,1>
- 2590692182U, // <7,1,0,6>: Cost 3 vext1 <6,7,1,0>, <6,7,1,0>
- 3785802521U, // <7,1,0,7>: Cost 4 vext3 RHS, <1,0,7,1>
- 1638318883U, // <7,1,0,u>: Cost 2 vext3 RHS, <1,0,u,2>
- 2712060715U, // <7,1,1,0>: Cost 3 vext3 RHS, <1,1,0,1>
- 1638318900U, // <7,1,1,1>: Cost 2 vext3 RHS, <1,1,1,1>
- 3774300994U, // <7,1,1,2>: Cost 4 vext3 <2,6,3,7>, <1,1,2,6>
- 1638318920U, // <7,1,1,3>: Cost 2 vext3 RHS, <1,1,3,3>
- 2712060755U, // <7,1,1,4>: Cost 3 vext3 RHS, <1,1,4,5>
- 2691416926U, // <7,1,1,5>: Cost 3 vext3 <1,1,5,7>, <1,1,5,7>
- 2590700375U, // <7,1,1,6>: Cost 3 vext1 <6,7,1,1>, <6,7,1,1>
- 3765158766U, // <7,1,1,7>: Cost 4 vext3 <1,1,5,7>, <1,1,7,5>
- 1638318965U, // <7,1,1,u>: Cost 2 vext3 RHS, <1,1,u,3>
- 2712060796U, // <7,1,2,0>: Cost 3 vext3 RHS, <1,2,0,1>
- 2712060807U, // <7,1,2,1>: Cost 3 vext3 RHS, <1,2,1,3>
- 3712747112U, // <7,1,2,2>: Cost 4 vext2 <3,5,7,1>, <2,2,2,2>
- 1638318998U, // <7,1,2,3>: Cost 2 vext3 RHS, <1,2,3,0>
- 2712060836U, // <7,1,2,4>: Cost 3 vext3 RHS, <1,2,4,5>
- 2712060843U, // <7,1,2,5>: Cost 3 vext3 RHS, <1,2,5,3>
- 2590708568U, // <7,1,2,6>: Cost 3 vext1 <6,7,1,2>, <6,7,1,2>
- 2735948730U, // <7,1,2,7>: Cost 3 vext3 RHS, <1,2,7,0>
- 1638319043U, // <7,1,2,u>: Cost 2 vext3 RHS, <1,2,u,0>
- 2712060876U, // <7,1,3,0>: Cost 3 vext3 RHS, <1,3,0,0>
- 1638319064U, // <7,1,3,1>: Cost 2 vext3 RHS, <1,3,1,3>
- 2712060894U, // <7,1,3,2>: Cost 3 vext3 RHS, <1,3,2,0>
- 2692596718U, // <7,1,3,3>: Cost 3 vext3 <1,3,3,7>, <1,3,3,7>
- 2712060917U, // <7,1,3,4>: Cost 3 vext3 RHS, <1,3,4,5>
- 1619002368U, // <7,1,3,5>: Cost 2 vext3 <1,3,5,7>, <1,3,5,7>
- 2692817929U, // <7,1,3,6>: Cost 3 vext3 <1,3,6,7>, <1,3,6,7>
- 2735948814U, // <7,1,3,7>: Cost 3 vext3 RHS, <1,3,7,3>
- 1619223579U, // <7,1,3,u>: Cost 2 vext3 <1,3,u,7>, <1,3,u,7>
- 2712060962U, // <7,1,4,0>: Cost 3 vext3 RHS, <1,4,0,5>
- 2712060971U, // <7,1,4,1>: Cost 3 vext3 RHS, <1,4,1,5>
- 2712060980U, // <7,1,4,2>: Cost 3 vext3 RHS, <1,4,2,5>
- 2712060989U, // <7,1,4,3>: Cost 3 vext3 RHS, <1,4,3,5>
- 3785802822U, // <7,1,4,4>: Cost 4 vext3 RHS, <1,4,4,5>
- 2639007030U, // <7,1,4,5>: Cost 3 vext2 <3,5,7,1>, RHS
- 2645642634U, // <7,1,4,6>: Cost 3 vext2 <4,6,7,1>, <4,6,7,1>
- 3719384520U, // <7,1,4,7>: Cost 4 vext2 <4,6,7,1>, <4,7,5,0>
- 2639007273U, // <7,1,4,u>: Cost 3 vext2 <3,5,7,1>, RHS
- 2572812390U, // <7,1,5,0>: Cost 3 vext1 <3,7,1,5>, LHS
- 2693776510U, // <7,1,5,1>: Cost 3 vext3 <1,5,1,7>, <1,5,1,7>
- 3774301318U, // <7,1,5,2>: Cost 4 vext3 <2,6,3,7>, <1,5,2,6>
- 1620182160U, // <7,1,5,3>: Cost 2 vext3 <1,5,3,7>, <1,5,3,7>
- 2572815670U, // <7,1,5,4>: Cost 3 vext1 <3,7,1,5>, RHS
- 3766486178U, // <7,1,5,5>: Cost 4 vext3 <1,3,5,7>, <1,5,5,7>
- 2651615331U, // <7,1,5,6>: Cost 3 vext2 <5,6,7,1>, <5,6,7,1>
- 2652278964U, // <7,1,5,7>: Cost 3 vext2 <5,7,7,1>, <5,7,7,1>
- 1620550845U, // <7,1,5,u>: Cost 2 vext3 <1,5,u,7>, <1,5,u,7>
- 3768108230U, // <7,1,6,0>: Cost 4 vext3 <1,6,0,7>, <1,6,0,7>
- 2694440143U, // <7,1,6,1>: Cost 3 vext3 <1,6,1,7>, <1,6,1,7>
- 2712061144U, // <7,1,6,2>: Cost 3 vext3 RHS, <1,6,2,7>
- 2694587617U, // <7,1,6,3>: Cost 3 vext3 <1,6,3,7>, <1,6,3,7>
- 3768403178U, // <7,1,6,4>: Cost 4 vext3 <1,6,4,7>, <1,6,4,7>
- 2694735091U, // <7,1,6,5>: Cost 3 vext3 <1,6,5,7>, <1,6,5,7>
- 3768550652U, // <7,1,6,6>: Cost 4 vext3 <1,6,6,7>, <1,6,6,7>
- 2652279630U, // <7,1,6,7>: Cost 3 vext2 <5,7,7,1>, <6,7,0,1>
- 2694956302U, // <7,1,6,u>: Cost 3 vext3 <1,6,u,7>, <1,6,u,7>
- 2645644282U, // <7,1,7,0>: Cost 3 vext2 <4,6,7,1>, <7,0,1,2>
- 2859062094U, // <7,1,7,1>: Cost 3 vuzpr <6,7,0,1>, <6,7,0,1>
- 3779462437U, // <7,1,7,2>: Cost 4 vext3 <3,5,1,7>, <1,7,2,3>
- 3121938534U, // <7,1,7,3>: Cost 3 vtrnr <5,7,5,7>, LHS
- 2554916150U, // <7,1,7,4>: Cost 3 vext1 <0,7,1,7>, RHS
- 3769140548U, // <7,1,7,5>: Cost 4 vext3 <1,7,5,7>, <1,7,5,7>
- 3726022164U, // <7,1,7,6>: Cost 4 vext2 <5,7,7,1>, <7,6,7,0>
- 2554918508U, // <7,1,7,7>: Cost 3 vext1 <0,7,1,7>, <7,7,7,7>
- 3121938539U, // <7,1,7,u>: Cost 3 vtrnr <5,7,5,7>, LHS
- 2572836966U, // <7,1,u,0>: Cost 3 vext1 <3,7,1,u>, LHS
- 1638319469U, // <7,1,u,1>: Cost 2 vext3 RHS, <1,u,1,3>
- 2712061299U, // <7,1,u,2>: Cost 3 vext3 RHS, <1,u,2,0>
- 1622173059U, // <7,1,u,3>: Cost 2 vext3 <1,u,3,7>, <1,u,3,7>
- 2572840246U, // <7,1,u,4>: Cost 3 vext1 <3,7,1,u>, RHS
- 1622320533U, // <7,1,u,5>: Cost 2 vext3 <1,u,5,7>, <1,u,5,7>
- 2696136094U, // <7,1,u,6>: Cost 3 vext3 <1,u,6,7>, <1,u,6,7>
- 2859060777U, // <7,1,u,7>: Cost 3 vuzpr <6,7,0,1>, RHS
- 1622541744U, // <7,1,u,u>: Cost 2 vext3 <1,u,u,7>, <1,u,u,7>
- 2712061364U, // <7,2,0,0>: Cost 3 vext3 RHS, <2,0,0,2>
- 2712061373U, // <7,2,0,1>: Cost 3 vext3 RHS, <2,0,1,2>
- 2712061380U, // <7,2,0,2>: Cost 3 vext3 RHS, <2,0,2,0>
- 2712061389U, // <7,2,0,3>: Cost 3 vext3 RHS, <2,0,3,0>
- 2712061404U, // <7,2,0,4>: Cost 3 vext3 RHS, <2,0,4,6>
- 2696725990U, // <7,2,0,5>: Cost 3 vext3 <2,0,5,7>, <2,0,5,7>
- 2712061417U, // <7,2,0,6>: Cost 3 vext3 RHS, <2,0,6,1>
- 3785803251U, // <7,2,0,7>: Cost 4 vext3 RHS, <2,0,7,2>
- 2696947201U, // <7,2,0,u>: Cost 3 vext3 <2,0,u,7>, <2,0,u,7>
- 2712061446U, // <7,2,1,0>: Cost 3 vext3 RHS, <2,1,0,3>
- 3785803276U, // <7,2,1,1>: Cost 4 vext3 RHS, <2,1,1,0>
- 3785803285U, // <7,2,1,2>: Cost 4 vext3 RHS, <2,1,2,0>
- 2712061471U, // <7,2,1,3>: Cost 3 vext3 RHS, <2,1,3,1>
- 2712061482U, // <7,2,1,4>: Cost 3 vext3 RHS, <2,1,4,3>
- 3766486576U, // <7,2,1,5>: Cost 4 vext3 <1,3,5,7>, <2,1,5,0>
- 2712061500U, // <7,2,1,6>: Cost 3 vext3 RHS, <2,1,6,3>
- 2602718850U, // <7,2,1,7>: Cost 3 vext1 <u,7,2,1>, <7,u,1,2>
- 2712061516U, // <7,2,1,u>: Cost 3 vext3 RHS, <2,1,u,1>
- 2712061525U, // <7,2,2,0>: Cost 3 vext3 RHS, <2,2,0,1>
- 2712061536U, // <7,2,2,1>: Cost 3 vext3 RHS, <2,2,1,3>
- 1638319720U, // <7,2,2,2>: Cost 2 vext3 RHS, <2,2,2,2>
- 1638319730U, // <7,2,2,3>: Cost 2 vext3 RHS, <2,2,3,3>
- 2712061565U, // <7,2,2,4>: Cost 3 vext3 RHS, <2,2,4,5>
- 2698053256U, // <7,2,2,5>: Cost 3 vext3 <2,2,5,7>, <2,2,5,7>
- 2712061584U, // <7,2,2,6>: Cost 3 vext3 RHS, <2,2,6,6>
- 3771795096U, // <7,2,2,7>: Cost 4 vext3 <2,2,5,7>, <2,2,7,5>
- 1638319775U, // <7,2,2,u>: Cost 2 vext3 RHS, <2,2,u,3>
- 1638319782U, // <7,2,3,0>: Cost 2 vext3 RHS, <2,3,0,1>
- 2693924531U, // <7,2,3,1>: Cost 3 vext3 <1,5,3,7>, <2,3,1,5>
- 2700560061U, // <7,2,3,2>: Cost 3 vext3 <2,6,3,7>, <2,3,2,6>
- 2693924551U, // <7,2,3,3>: Cost 3 vext3 <1,5,3,7>, <2,3,3,7>
- 1638319822U, // <7,2,3,4>: Cost 2 vext3 RHS, <2,3,4,5>
- 2698716889U, // <7,2,3,5>: Cost 3 vext3 <2,3,5,7>, <2,3,5,7>
- 2712061665U, // <7,2,3,6>: Cost 3 vext3 RHS, <2,3,6,6>
- 2735949540U, // <7,2,3,7>: Cost 3 vext3 RHS, <2,3,7,0>
- 1638319854U, // <7,2,3,u>: Cost 2 vext3 RHS, <2,3,u,1>
- 2712061692U, // <7,2,4,0>: Cost 3 vext3 RHS, <2,4,0,6>
- 2712061698U, // <7,2,4,1>: Cost 3 vext3 RHS, <2,4,1,3>
- 2712061708U, // <7,2,4,2>: Cost 3 vext3 RHS, <2,4,2,4>
- 2712061718U, // <7,2,4,3>: Cost 3 vext3 RHS, <2,4,3,5>
- 2712061728U, // <7,2,4,4>: Cost 3 vext3 RHS, <2,4,4,6>
- 2699380522U, // <7,2,4,5>: Cost 3 vext3 <2,4,5,7>, <2,4,5,7>
- 2712061740U, // <7,2,4,6>: Cost 3 vext3 RHS, <2,4,6,0>
- 3809691445U, // <7,2,4,7>: Cost 4 vext3 RHS, <2,4,7,0>
- 2699601733U, // <7,2,4,u>: Cost 3 vext3 <2,4,u,7>, <2,4,u,7>
- 2699675470U, // <7,2,5,0>: Cost 3 vext3 <2,5,0,7>, <2,5,0,7>
- 3766486867U, // <7,2,5,1>: Cost 4 vext3 <1,3,5,7>, <2,5,1,3>
- 2699822944U, // <7,2,5,2>: Cost 3 vext3 <2,5,2,7>, <2,5,2,7>
- 2692745065U, // <7,2,5,3>: Cost 3 vext3 <1,3,5,7>, <2,5,3,7>
- 2699970418U, // <7,2,5,4>: Cost 3 vext3 <2,5,4,7>, <2,5,4,7>
- 3766486907U, // <7,2,5,5>: Cost 4 vext3 <1,3,5,7>, <2,5,5,7>
- 2700117892U, // <7,2,5,6>: Cost 3 vext3 <2,5,6,7>, <2,5,6,7>
- 3771795334U, // <7,2,5,7>: Cost 4 vext3 <2,2,5,7>, <2,5,7,0>
- 2692745110U, // <7,2,5,u>: Cost 3 vext3 <1,3,5,7>, <2,5,u,7>
- 2572894310U, // <7,2,6,0>: Cost 3 vext1 <3,7,2,6>, LHS
- 2712061860U, // <7,2,6,1>: Cost 3 vext3 RHS, <2,6,1,3>
- 2700486577U, // <7,2,6,2>: Cost 3 vext3 <2,6,2,7>, <2,6,2,7>
- 1626818490U, // <7,2,6,3>: Cost 2 vext3 <2,6,3,7>, <2,6,3,7>
- 2572897590U, // <7,2,6,4>: Cost 3 vext1 <3,7,2,6>, RHS
- 2700707788U, // <7,2,6,5>: Cost 3 vext3 <2,6,5,7>, <2,6,5,7>
- 2700781525U, // <7,2,6,6>: Cost 3 vext3 <2,6,6,7>, <2,6,6,7>
- 3774597086U, // <7,2,6,7>: Cost 4 vext3 <2,6,7,7>, <2,6,7,7>
- 1627187175U, // <7,2,6,u>: Cost 2 vext3 <2,6,u,7>, <2,6,u,7>
- 2735949802U, // <7,2,7,0>: Cost 3 vext3 RHS, <2,7,0,1>
- 3780200434U, // <7,2,7,1>: Cost 4 vext3 <3,6,2,7>, <2,7,1,0>
- 3773564928U, // <7,2,7,2>: Cost 4 vext3 <2,5,2,7>, <2,7,2,5>
- 2986541158U, // <7,2,7,3>: Cost 3 vzipr <5,5,7,7>, LHS
- 2554989878U, // <7,2,7,4>: Cost 3 vext1 <0,7,2,7>, RHS
- 3775113245U, // <7,2,7,5>: Cost 4 vext3 <2,7,5,7>, <2,7,5,7>
- 4060283228U, // <7,2,7,6>: Cost 4 vzipr <5,5,7,7>, <0,4,2,6>
- 2554992236U, // <7,2,7,7>: Cost 3 vext1 <0,7,2,7>, <7,7,7,7>
- 2986541163U, // <7,2,7,u>: Cost 3 vzipr <5,5,7,7>, LHS
- 1638320187U, // <7,2,u,0>: Cost 2 vext3 RHS, <2,u,0,1>
- 2693924936U, // <7,2,u,1>: Cost 3 vext3 <1,5,3,7>, <2,u,1,5>
- 1638319720U, // <7,2,u,2>: Cost 2 vext3 RHS, <2,2,2,2>
- 1628145756U, // <7,2,u,3>: Cost 2 vext3 <2,u,3,7>, <2,u,3,7>
- 1638320227U, // <7,2,u,4>: Cost 2 vext3 RHS, <2,u,4,5>
- 2702035054U, // <7,2,u,5>: Cost 3 vext3 <2,u,5,7>, <2,u,5,7>
- 2702108791U, // <7,2,u,6>: Cost 3 vext3 <2,u,6,7>, <2,u,6,7>
- 2735949945U, // <7,2,u,7>: Cost 3 vext3 RHS, <2,u,7,0>
- 1628514441U, // <7,2,u,u>: Cost 2 vext3 <2,u,u,7>, <2,u,u,7>
- 2712062091U, // <7,3,0,0>: Cost 3 vext3 RHS, <3,0,0,0>
- 1638320278U, // <7,3,0,1>: Cost 2 vext3 RHS, <3,0,1,2>
- 2712062109U, // <7,3,0,2>: Cost 3 vext3 RHS, <3,0,2,0>
- 2590836886U, // <7,3,0,3>: Cost 3 vext1 <6,7,3,0>, <3,0,1,2>
- 2712062128U, // <7,3,0,4>: Cost 3 vext3 RHS, <3,0,4,1>
- 2712062138U, // <7,3,0,5>: Cost 3 vext3 RHS, <3,0,5,2>
- 2590839656U, // <7,3,0,6>: Cost 3 vext1 <6,7,3,0>, <6,7,3,0>
- 3311414017U, // <7,3,0,7>: Cost 4 vrev <3,7,7,0>
- 1638320341U, // <7,3,0,u>: Cost 2 vext3 RHS, <3,0,u,2>
- 2237164227U, // <7,3,1,0>: Cost 3 vrev <3,7,0,1>
- 2712062182U, // <7,3,1,1>: Cost 3 vext3 RHS, <3,1,1,1>
- 2712062193U, // <7,3,1,2>: Cost 3 vext3 RHS, <3,1,2,3>
- 2692745468U, // <7,3,1,3>: Cost 3 vext3 <1,3,5,7>, <3,1,3,5>
- 2712062214U, // <7,3,1,4>: Cost 3 vext3 RHS, <3,1,4,6>
- 2693925132U, // <7,3,1,5>: Cost 3 vext3 <1,5,3,7>, <3,1,5,3>
- 3768183059U, // <7,3,1,6>: Cost 4 vext3 <1,6,1,7>, <3,1,6,1>
- 2692745504U, // <7,3,1,7>: Cost 3 vext3 <1,3,5,7>, <3,1,7,5>
- 2696063273U, // <7,3,1,u>: Cost 3 vext3 <1,u,5,7>, <3,1,u,5>
- 2712062254U, // <7,3,2,0>: Cost 3 vext3 RHS, <3,2,0,1>
- 2712062262U, // <7,3,2,1>: Cost 3 vext3 RHS, <3,2,1,0>
- 2712062273U, // <7,3,2,2>: Cost 3 vext3 RHS, <3,2,2,2>
- 2712062280U, // <7,3,2,3>: Cost 3 vext3 RHS, <3,2,3,0>
- 2712062294U, // <7,3,2,4>: Cost 3 vext3 RHS, <3,2,4,5>
- 2712062302U, // <7,3,2,5>: Cost 3 vext3 RHS, <3,2,5,4>
- 2700560742U, // <7,3,2,6>: Cost 3 vext3 <2,6,3,7>, <3,2,6,3>
- 2712062319U, // <7,3,2,7>: Cost 3 vext3 RHS, <3,2,7,3>
- 2712062325U, // <7,3,2,u>: Cost 3 vext3 RHS, <3,2,u,0>
- 2712062335U, // <7,3,3,0>: Cost 3 vext3 RHS, <3,3,0,1>
- 2636368158U, // <7,3,3,1>: Cost 3 vext2 <3,1,7,3>, <3,1,7,3>
- 2637031791U, // <7,3,3,2>: Cost 3 vext2 <3,2,7,3>, <3,2,7,3>
- 1638320540U, // <7,3,3,3>: Cost 2 vext3 RHS, <3,3,3,3>
- 2712062374U, // <7,3,3,4>: Cost 3 vext3 RHS, <3,3,4,4>
- 2704689586U, // <7,3,3,5>: Cost 3 vext3 <3,3,5,7>, <3,3,5,7>
- 2590864235U, // <7,3,3,6>: Cost 3 vext1 <6,7,3,3>, <6,7,3,3>
- 2704837060U, // <7,3,3,7>: Cost 3 vext3 <3,3,7,7>, <3,3,7,7>
- 1638320540U, // <7,3,3,u>: Cost 2 vext3 RHS, <3,3,3,3>
- 2712062416U, // <7,3,4,0>: Cost 3 vext3 RHS, <3,4,0,1>
- 2712062426U, // <7,3,4,1>: Cost 3 vext3 RHS, <3,4,1,2>
- 2566981640U, // <7,3,4,2>: Cost 3 vext1 <2,7,3,4>, <2,7,3,4>
- 2712062447U, // <7,3,4,3>: Cost 3 vext3 RHS, <3,4,3,5>
- 2712062456U, // <7,3,4,4>: Cost 3 vext3 RHS, <3,4,4,5>
- 1638320642U, // <7,3,4,5>: Cost 2 vext3 RHS, <3,4,5,6>
- 2648313204U, // <7,3,4,6>: Cost 3 vext2 <5,1,7,3>, <4,6,4,6>
- 3311446789U, // <7,3,4,7>: Cost 4 vrev <3,7,7,4>
- 1638320669U, // <7,3,4,u>: Cost 2 vext3 RHS, <3,4,u,6>
- 2602819686U, // <7,3,5,0>: Cost 3 vext1 <u,7,3,5>, LHS
- 1574571728U, // <7,3,5,1>: Cost 2 vext2 <5,1,7,3>, <5,1,7,3>
- 2648977185U, // <7,3,5,2>: Cost 3 vext2 <5,2,7,3>, <5,2,7,3>
- 2705869378U, // <7,3,5,3>: Cost 3 vext3 <3,5,3,7>, <3,5,3,7>
- 2237491947U, // <7,3,5,4>: Cost 3 vrev <3,7,4,5>
- 2706016852U, // <7,3,5,5>: Cost 3 vext3 <3,5,5,7>, <3,5,5,7>
- 2648313954U, // <7,3,5,6>: Cost 3 vext2 <5,1,7,3>, <5,6,7,0>
- 2692745823U, // <7,3,5,7>: Cost 3 vext3 <1,3,5,7>, <3,5,7,0>
- 1579217159U, // <7,3,5,u>: Cost 2 vext2 <5,u,7,3>, <5,u,7,3>
- 2706311800U, // <7,3,6,0>: Cost 3 vext3 <3,6,0,7>, <3,6,0,7>
- 2654286249U, // <7,3,6,1>: Cost 3 vext2 <6,1,7,3>, <6,1,7,3>
- 1581208058U, // <7,3,6,2>: Cost 2 vext2 <6,2,7,3>, <6,2,7,3>
- 2706533011U, // <7,3,6,3>: Cost 3 vext3 <3,6,3,7>, <3,6,3,7>
- 2706606748U, // <7,3,6,4>: Cost 3 vext3 <3,6,4,7>, <3,6,4,7>
- 3780422309U, // <7,3,6,5>: Cost 4 vext3 <3,6,5,7>, <3,6,5,7>
- 2712062637U, // <7,3,6,6>: Cost 3 vext3 RHS, <3,6,6,6>
- 2706827959U, // <7,3,6,7>: Cost 3 vext3 <3,6,7,7>, <3,6,7,7>
- 1585189856U, // <7,3,6,u>: Cost 2 vext2 <6,u,7,3>, <6,u,7,3>
- 2693925571U, // <7,3,7,0>: Cost 3 vext3 <1,5,3,7>, <3,7,0,1>
- 2693925584U, // <7,3,7,1>: Cost 3 vext3 <1,5,3,7>, <3,7,1,5>
- 2700561114U, // <7,3,7,2>: Cost 3 vext3 <2,6,3,7>, <3,7,2,6>
- 2572978916U, // <7,3,7,3>: Cost 3 vext1 <3,7,3,7>, <3,7,3,7>
- 2693925611U, // <7,3,7,4>: Cost 3 vext3 <1,5,3,7>, <3,7,4,5>
- 2707344118U, // <7,3,7,5>: Cost 3 vext3 <3,7,5,7>, <3,7,5,7>
- 2654950894U, // <7,3,7,6>: Cost 3 vext2 <6,2,7,3>, <7,6,2,7>
- 2648315500U, // <7,3,7,7>: Cost 3 vext2 <5,1,7,3>, <7,7,7,7>
- 2693925643U, // <7,3,7,u>: Cost 3 vext3 <1,5,3,7>, <3,7,u,1>
- 2237221578U, // <7,3,u,0>: Cost 3 vrev <3,7,0,u>
- 1638320926U, // <7,3,u,1>: Cost 2 vext3 RHS, <3,u,1,2>
- 1593153452U, // <7,3,u,2>: Cost 2 vext2 <u,2,7,3>, <u,2,7,3>
- 1638320540U, // <7,3,u,3>: Cost 2 vext3 RHS, <3,3,3,3>
- 2237516526U, // <7,3,u,4>: Cost 3 vrev <3,7,4,u>
- 1638320966U, // <7,3,u,5>: Cost 2 vext3 RHS, <3,u,5,6>
- 2712062796U, // <7,3,u,6>: Cost 3 vext3 RHS, <3,u,6,3>
- 2692967250U, // <7,3,u,7>: Cost 3 vext3 <1,3,u,7>, <3,u,7,0>
- 1638320989U, // <7,3,u,u>: Cost 2 vext3 RHS, <3,u,u,2>
- 2651635712U, // <7,4,0,0>: Cost 3 vext2 <5,6,7,4>, <0,0,0,0>
- 1577893990U, // <7,4,0,1>: Cost 2 vext2 <5,6,7,4>, LHS
- 2651635876U, // <7,4,0,2>: Cost 3 vext2 <5,6,7,4>, <0,2,0,2>
- 3785804672U, // <7,4,0,3>: Cost 4 vext3 RHS, <4,0,3,1>
- 2651636050U, // <7,4,0,4>: Cost 3 vext2 <5,6,7,4>, <0,4,1,5>
- 1638468498U, // <7,4,0,5>: Cost 2 vext3 RHS, <4,0,5,1>
- 1638468508U, // <7,4,0,6>: Cost 2 vext3 RHS, <4,0,6,2>
- 3787795364U, // <7,4,0,7>: Cost 4 vext3 RHS, <4,0,7,1>
- 1640459181U, // <7,4,0,u>: Cost 2 vext3 RHS, <4,0,u,1>
- 2651636470U, // <7,4,1,0>: Cost 3 vext2 <5,6,7,4>, <1,0,3,2>
- 2651636532U, // <7,4,1,1>: Cost 3 vext2 <5,6,7,4>, <1,1,1,1>
- 2712062922U, // <7,4,1,2>: Cost 3 vext3 RHS, <4,1,2,3>
- 2639029248U, // <7,4,1,3>: Cost 3 vext2 <3,5,7,4>, <1,3,5,7>
- 2712062940U, // <7,4,1,4>: Cost 3 vext3 RHS, <4,1,4,3>
- 2712062946U, // <7,4,1,5>: Cost 3 vext3 RHS, <4,1,5,0>
- 2712062958U, // <7,4,1,6>: Cost 3 vext3 RHS, <4,1,6,3>
- 3785804791U, // <7,4,1,7>: Cost 4 vext3 RHS, <4,1,7,3>
- 2712062973U, // <7,4,1,u>: Cost 3 vext3 RHS, <4,1,u,0>
- 3785804807U, // <7,4,2,0>: Cost 4 vext3 RHS, <4,2,0,1>
- 3785804818U, // <7,4,2,1>: Cost 4 vext3 RHS, <4,2,1,3>
- 2651637352U, // <7,4,2,2>: Cost 3 vext2 <5,6,7,4>, <2,2,2,2>
- 2651637414U, // <7,4,2,3>: Cost 3 vext2 <5,6,7,4>, <2,3,0,1>
- 3716753194U, // <7,4,2,4>: Cost 4 vext2 <4,2,7,4>, <2,4,5,7>
- 2712063030U, // <7,4,2,5>: Cost 3 vext3 RHS, <4,2,5,3>
- 2712063036U, // <7,4,2,6>: Cost 3 vext3 RHS, <4,2,6,0>
- 3773123658U, // <7,4,2,7>: Cost 4 vext3 <2,4,5,7>, <4,2,7,5>
- 2712063054U, // <7,4,2,u>: Cost 3 vext3 RHS, <4,2,u,0>
- 2651637910U, // <7,4,3,0>: Cost 3 vext2 <5,6,7,4>, <3,0,1,2>
- 3712772348U, // <7,4,3,1>: Cost 4 vext2 <3,5,7,4>, <3,1,3,5>
- 3785804906U, // <7,4,3,2>: Cost 4 vext3 RHS, <4,3,2,1>
- 2651638172U, // <7,4,3,3>: Cost 3 vext2 <5,6,7,4>, <3,3,3,3>
- 2651638274U, // <7,4,3,4>: Cost 3 vext2 <5,6,7,4>, <3,4,5,6>
- 2639030883U, // <7,4,3,5>: Cost 3 vext2 <3,5,7,4>, <3,5,7,4>
- 2712063122U, // <7,4,3,6>: Cost 3 vext3 RHS, <4,3,6,5>
- 3712772836U, // <7,4,3,7>: Cost 4 vext2 <3,5,7,4>, <3,7,3,7>
- 2641021782U, // <7,4,3,u>: Cost 3 vext2 <3,u,7,4>, <3,u,7,4>
- 2714053802U, // <7,4,4,0>: Cost 3 vext3 RHS, <4,4,0,2>
- 3785804978U, // <7,4,4,1>: Cost 4 vext3 RHS, <4,4,1,1>
- 3716754505U, // <7,4,4,2>: Cost 4 vext2 <4,2,7,4>, <4,2,7,4>
- 3785804998U, // <7,4,4,3>: Cost 4 vext3 RHS, <4,4,3,3>
- 1638321360U, // <7,4,4,4>: Cost 2 vext3 RHS, <4,4,4,4>
- 1638468826U, // <7,4,4,5>: Cost 2 vext3 RHS, <4,4,5,5>
- 1638468836U, // <7,4,4,6>: Cost 2 vext3 RHS, <4,4,6,6>
- 3785215214U, // <7,4,4,7>: Cost 4 vext3 <4,4,7,7>, <4,4,7,7>
- 1640459509U, // <7,4,4,u>: Cost 2 vext3 RHS, <4,4,u,5>
- 1517207654U, // <7,4,5,0>: Cost 2 vext1 <6,7,4,5>, LHS
- 2573034640U, // <7,4,5,1>: Cost 3 vext1 <3,7,4,5>, <1,5,3,7>
- 2712063246U, // <7,4,5,2>: Cost 3 vext3 RHS, <4,5,2,3>
- 2573036267U, // <7,4,5,3>: Cost 3 vext1 <3,7,4,5>, <3,7,4,5>
- 1517210934U, // <7,4,5,4>: Cost 2 vext1 <6,7,4,5>, RHS
- 2711989549U, // <7,4,5,5>: Cost 3 vext3 <4,5,5,7>, <4,5,5,7>
- 564579638U, // <7,4,5,6>: Cost 1 vext3 RHS, RHS
- 2651639976U, // <7,4,5,7>: Cost 3 vext2 <5,6,7,4>, <5,7,5,7>
- 564579656U, // <7,4,5,u>: Cost 1 vext3 RHS, RHS
- 2712063307U, // <7,4,6,0>: Cost 3 vext3 RHS, <4,6,0,1>
- 3767668056U, // <7,4,6,1>: Cost 4 vext3 <1,5,3,7>, <4,6,1,5>
- 2651640314U, // <7,4,6,2>: Cost 3 vext2 <5,6,7,4>, <6,2,7,3>
- 2655621708U, // <7,4,6,3>: Cost 3 vext2 <6,3,7,4>, <6,3,7,4>
- 1638468980U, // <7,4,6,4>: Cost 2 vext3 RHS, <4,6,4,6>
- 2712063358U, // <7,4,6,5>: Cost 3 vext3 RHS, <4,6,5,7>
- 2712063367U, // <7,4,6,6>: Cost 3 vext3 RHS, <4,6,6,7>
- 2712210826U, // <7,4,6,7>: Cost 3 vext3 RHS, <4,6,7,1>
- 1638469012U, // <7,4,6,u>: Cost 2 vext3 RHS, <4,6,u,2>
- 2651640826U, // <7,4,7,0>: Cost 3 vext2 <5,6,7,4>, <7,0,1,2>
- 3773713830U, // <7,4,7,1>: Cost 4 vext3 <2,5,4,7>, <4,7,1,2>
- 3773713842U, // <7,4,7,2>: Cost 4 vext3 <2,5,4,7>, <4,7,2,5>
- 3780349372U, // <7,4,7,3>: Cost 4 vext3 <3,6,4,7>, <4,7,3,6>
- 2651641140U, // <7,4,7,4>: Cost 3 vext2 <5,6,7,4>, <7,4,0,1>
- 2712210888U, // <7,4,7,5>: Cost 3 vext3 RHS, <4,7,5,0>
- 2712210898U, // <7,4,7,6>: Cost 3 vext3 RHS, <4,7,6,1>
- 2651641452U, // <7,4,7,7>: Cost 3 vext2 <5,6,7,4>, <7,7,7,7>
- 2713538026U, // <7,4,7,u>: Cost 3 vext3 <4,7,u,7>, <4,7,u,7>
- 1517232230U, // <7,4,u,0>: Cost 2 vext1 <6,7,4,u>, LHS
- 1577899822U, // <7,4,u,1>: Cost 2 vext2 <5,6,7,4>, LHS
- 2712063489U, // <7,4,u,2>: Cost 3 vext3 RHS, <4,u,2,3>
- 2573060846U, // <7,4,u,3>: Cost 3 vext1 <3,7,4,u>, <3,7,4,u>
- 1640312342U, // <7,4,u,4>: Cost 2 vext3 RHS, <4,u,4,6>
- 1638469146U, // <7,4,u,5>: Cost 2 vext3 RHS, <4,u,5,1>
- 564579881U, // <7,4,u,6>: Cost 1 vext3 RHS, RHS
- 2714054192U, // <7,4,u,7>: Cost 3 vext3 RHS, <4,u,7,5>
- 564579899U, // <7,4,u,u>: Cost 1 vext3 RHS, RHS
- 2579038310U, // <7,5,0,0>: Cost 3 vext1 <4,7,5,0>, LHS
- 2636382310U, // <7,5,0,1>: Cost 3 vext2 <3,1,7,5>, LHS
- 2796339302U, // <7,5,0,2>: Cost 3 vuzpl <7,4,5,6>, LHS
- 3646810719U, // <7,5,0,3>: Cost 4 vext1 <3,7,5,0>, <3,5,7,0>
- 2712063586U, // <7,5,0,4>: Cost 3 vext3 RHS, <5,0,4,1>
- 2735951467U, // <7,5,0,5>: Cost 3 vext3 RHS, <5,0,5,1>
- 2735951476U, // <7,5,0,6>: Cost 3 vext3 RHS, <5,0,6,1>
- 2579043322U, // <7,5,0,7>: Cost 3 vext1 <4,7,5,0>, <7,0,1,2>
- 2636382877U, // <7,5,0,u>: Cost 3 vext2 <3,1,7,5>, LHS
- 2712211087U, // <7,5,1,0>: Cost 3 vext3 RHS, <5,1,0,1>
- 3698180916U, // <7,5,1,1>: Cost 4 vext2 <1,1,7,5>, <1,1,1,1>
- 3710124950U, // <7,5,1,2>: Cost 4 vext2 <3,1,7,5>, <1,2,3,0>
- 2636383232U, // <7,5,1,3>: Cost 3 vext2 <3,1,7,5>, <1,3,5,7>
- 2712211127U, // <7,5,1,4>: Cost 3 vext3 RHS, <5,1,4,5>
- 2590994128U, // <7,5,1,5>: Cost 3 vext1 <6,7,5,1>, <5,1,7,3>
- 2590995323U, // <7,5,1,6>: Cost 3 vext1 <6,7,5,1>, <6,7,5,1>
- 1638469328U, // <7,5,1,7>: Cost 2 vext3 RHS, <5,1,7,3>
- 1638469337U, // <7,5,1,u>: Cost 2 vext3 RHS, <5,1,u,3>
- 3785805536U, // <7,5,2,0>: Cost 4 vext3 RHS, <5,2,0,1>
- 3785805544U, // <7,5,2,1>: Cost 4 vext3 RHS, <5,2,1,0>
- 3704817288U, // <7,5,2,2>: Cost 4 vext2 <2,2,7,5>, <2,2,5,7>
- 2712063742U, // <7,5,2,3>: Cost 3 vext3 RHS, <5,2,3,4>
- 3716761386U, // <7,5,2,4>: Cost 4 vext2 <4,2,7,5>, <2,4,5,7>
- 2714054415U, // <7,5,2,5>: Cost 3 vext3 RHS, <5,2,5,3>
- 3774304024U, // <7,5,2,6>: Cost 4 vext3 <2,6,3,7>, <5,2,6,3>
- 2712063777U, // <7,5,2,7>: Cost 3 vext3 RHS, <5,2,7,3>
- 2712063787U, // <7,5,2,u>: Cost 3 vext3 RHS, <5,2,u,4>
- 3634888806U, // <7,5,3,0>: Cost 4 vext1 <1,7,5,3>, LHS
- 2636384544U, // <7,5,3,1>: Cost 3 vext2 <3,1,7,5>, <3,1,7,5>
- 3710790001U, // <7,5,3,2>: Cost 4 vext2 <3,2,7,5>, <3,2,7,5>
- 3710126492U, // <7,5,3,3>: Cost 4 vext2 <3,1,7,5>, <3,3,3,3>
- 3634892086U, // <7,5,3,4>: Cost 4 vext1 <1,7,5,3>, RHS
- 2639039076U, // <7,5,3,5>: Cost 3 vext2 <3,5,7,5>, <3,5,7,5>
- 3713444533U, // <7,5,3,6>: Cost 4 vext2 <3,6,7,5>, <3,6,7,5>
- 2693926767U, // <7,5,3,7>: Cost 3 vext3 <1,5,3,7>, <5,3,7,0>
- 2712063864U, // <7,5,3,u>: Cost 3 vext3 RHS, <5,3,u,0>
- 2579071078U, // <7,5,4,0>: Cost 3 vext1 <4,7,5,4>, LHS
- 3646841856U, // <7,5,4,1>: Cost 4 vext1 <3,7,5,4>, <1,3,5,7>
- 3716762698U, // <7,5,4,2>: Cost 4 vext2 <4,2,7,5>, <4,2,7,5>
- 3646843491U, // <7,5,4,3>: Cost 4 vext1 <3,7,5,4>, <3,5,7,4>
- 2579074358U, // <7,5,4,4>: Cost 3 vext1 <4,7,5,4>, RHS
- 2636385590U, // <7,5,4,5>: Cost 3 vext2 <3,1,7,5>, RHS
- 2645675406U, // <7,5,4,6>: Cost 3 vext2 <4,6,7,5>, <4,6,7,5>
- 1638322118U, // <7,5,4,7>: Cost 2 vext3 RHS, <5,4,7,6>
- 1638469583U, // <7,5,4,u>: Cost 2 vext3 RHS, <5,4,u,6>
- 2714054611U, // <7,5,5,0>: Cost 3 vext3 RHS, <5,5,0,1>
- 2652974800U, // <7,5,5,1>: Cost 3 vext2 <5,u,7,5>, <5,1,7,3>
- 3710127905U, // <7,5,5,2>: Cost 4 vext2 <3,1,7,5>, <5,2,7,3>
- 3785805808U, // <7,5,5,3>: Cost 4 vext3 RHS, <5,5,3,3>
- 2712211450U, // <7,5,5,4>: Cost 3 vext3 RHS, <5,5,4,4>
- 1638322180U, // <7,5,5,5>: Cost 2 vext3 RHS, <5,5,5,5>
- 2712064014U, // <7,5,5,6>: Cost 3 vext3 RHS, <5,5,6,6>
- 1638469656U, // <7,5,5,7>: Cost 2 vext3 RHS, <5,5,7,7>
- 1638469665U, // <7,5,5,u>: Cost 2 vext3 RHS, <5,5,u,7>
- 2712064036U, // <7,5,6,0>: Cost 3 vext3 RHS, <5,6,0,1>
- 2714054707U, // <7,5,6,1>: Cost 3 vext3 RHS, <5,6,1,7>
- 3785805879U, // <7,5,6,2>: Cost 4 vext3 RHS, <5,6,2,2>
- 2712064066U, // <7,5,6,3>: Cost 3 vext3 RHS, <5,6,3,4>
- 2712064076U, // <7,5,6,4>: Cost 3 vext3 RHS, <5,6,4,5>
- 2714054743U, // <7,5,6,5>: Cost 3 vext3 RHS, <5,6,5,7>
- 2712064096U, // <7,5,6,6>: Cost 3 vext3 RHS, <5,6,6,7>
- 1638322274U, // <7,5,6,7>: Cost 2 vext3 RHS, <5,6,7,0>
- 1638469739U, // <7,5,6,u>: Cost 2 vext3 RHS, <5,6,u,0>
- 1511325798U, // <7,5,7,0>: Cost 2 vext1 <5,7,5,7>, LHS
- 2692747392U, // <7,5,7,1>: Cost 3 vext3 <1,3,5,7>, <5,7,1,3>
- 2585069160U, // <7,5,7,2>: Cost 3 vext1 <5,7,5,7>, <2,2,2,2>
- 2573126390U, // <7,5,7,3>: Cost 3 vext1 <3,7,5,7>, <3,7,5,7>
- 1511329078U, // <7,5,7,4>: Cost 2 vext1 <5,7,5,7>, RHS
- 1638469800U, // <7,5,7,5>: Cost 2 vext3 RHS, <5,7,5,7>
- 2712211626U, // <7,5,7,6>: Cost 3 vext3 RHS, <5,7,6,0>
- 2712211636U, // <7,5,7,7>: Cost 3 vext3 RHS, <5,7,7,1>
- 1638469823U, // <7,5,7,u>: Cost 2 vext3 RHS, <5,7,u,3>
- 1511333990U, // <7,5,u,0>: Cost 2 vext1 <5,7,5,u>, LHS
- 2636388142U, // <7,5,u,1>: Cost 3 vext2 <3,1,7,5>, LHS
- 2712211671U, // <7,5,u,2>: Cost 3 vext3 RHS, <5,u,2,0>
- 2573134583U, // <7,5,u,3>: Cost 3 vext1 <3,7,5,u>, <3,7,5,u>
- 1511337270U, // <7,5,u,4>: Cost 2 vext1 <5,7,5,u>, RHS
- 1638469881U, // <7,5,u,5>: Cost 2 vext3 RHS, <5,u,5,7>
- 2712064258U, // <7,5,u,6>: Cost 3 vext3 RHS, <5,u,6,7>
- 1638469892U, // <7,5,u,7>: Cost 2 vext3 RHS, <5,u,7,0>
- 1638469904U, // <7,5,u,u>: Cost 2 vext3 RHS, <5,u,u,3>
- 2650324992U, // <7,6,0,0>: Cost 3 vext2 <5,4,7,6>, <0,0,0,0>
- 1576583270U, // <7,6,0,1>: Cost 2 vext2 <5,4,7,6>, LHS
- 2712064300U, // <7,6,0,2>: Cost 3 vext3 RHS, <6,0,2,4>
- 2255295336U, // <7,6,0,3>: Cost 3 vrev <6,7,3,0>
- 2712064316U, // <7,6,0,4>: Cost 3 vext3 RHS, <6,0,4,2>
- 2585088098U, // <7,6,0,5>: Cost 3 vext1 <5,7,6,0>, <5,6,7,0>
- 2735952204U, // <7,6,0,6>: Cost 3 vext3 RHS, <6,0,6,0>
- 2712211799U, // <7,6,0,7>: Cost 3 vext3 RHS, <6,0,7,2>
- 1576583837U, // <7,6,0,u>: Cost 2 vext2 <5,4,7,6>, LHS
- 1181340494U, // <7,6,1,0>: Cost 2 vrev <6,7,0,1>
- 2650325812U, // <7,6,1,1>: Cost 3 vext2 <5,4,7,6>, <1,1,1,1>
- 2650325910U, // <7,6,1,2>: Cost 3 vext2 <5,4,7,6>, <1,2,3,0>
- 2650325976U, // <7,6,1,3>: Cost 3 vext2 <5,4,7,6>, <1,3,1,3>
- 2579123510U, // <7,6,1,4>: Cost 3 vext1 <4,7,6,1>, RHS
- 2650326160U, // <7,6,1,5>: Cost 3 vext2 <5,4,7,6>, <1,5,3,7>
- 2714055072U, // <7,6,1,6>: Cost 3 vext3 RHS, <6,1,6,3>
- 2712064425U, // <7,6,1,7>: Cost 3 vext3 RHS, <6,1,7,3>
- 1181930390U, // <7,6,1,u>: Cost 2 vrev <6,7,u,1>
- 2712211897U, // <7,6,2,0>: Cost 3 vext3 RHS, <6,2,0,1>
- 2714055108U, // <7,6,2,1>: Cost 3 vext3 RHS, <6,2,1,3>
- 2650326632U, // <7,6,2,2>: Cost 3 vext2 <5,4,7,6>, <2,2,2,2>
- 2650326694U, // <7,6,2,3>: Cost 3 vext2 <5,4,7,6>, <2,3,0,1>
- 2714055137U, // <7,6,2,4>: Cost 3 vext3 RHS, <6,2,4,5>
- 2714055148U, // <7,6,2,5>: Cost 3 vext3 RHS, <6,2,5,7>
- 2650326970U, // <7,6,2,6>: Cost 3 vext2 <5,4,7,6>, <2,6,3,7>
- 1638470138U, // <7,6,2,7>: Cost 2 vext3 RHS, <6,2,7,3>
- 1638470147U, // <7,6,2,u>: Cost 2 vext3 RHS, <6,2,u,3>
- 2650327190U, // <7,6,3,0>: Cost 3 vext2 <5,4,7,6>, <3,0,1,2>
- 2255172441U, // <7,6,3,1>: Cost 3 vrev <6,7,1,3>
- 2255246178U, // <7,6,3,2>: Cost 3 vrev <6,7,2,3>
- 2650327452U, // <7,6,3,3>: Cost 3 vext2 <5,4,7,6>, <3,3,3,3>
- 2712064562U, // <7,6,3,4>: Cost 3 vext3 RHS, <6,3,4,5>
- 2650327627U, // <7,6,3,5>: Cost 3 vext2 <5,4,7,6>, <3,5,4,7>
- 3713452726U, // <7,6,3,6>: Cost 4 vext2 <3,6,7,6>, <3,6,7,6>
- 2700563016U, // <7,6,3,7>: Cost 3 vext3 <2,6,3,7>, <6,3,7,0>
- 2712064593U, // <7,6,3,u>: Cost 3 vext3 RHS, <6,3,u,0>
- 2650327954U, // <7,6,4,0>: Cost 3 vext2 <5,4,7,6>, <4,0,5,1>
- 2735952486U, // <7,6,4,1>: Cost 3 vext3 RHS, <6,4,1,3>
- 2735952497U, // <7,6,4,2>: Cost 3 vext3 RHS, <6,4,2,5>
- 2255328108U, // <7,6,4,3>: Cost 3 vrev <6,7,3,4>
- 2712212100U, // <7,6,4,4>: Cost 3 vext3 RHS, <6,4,4,6>
- 1576586550U, // <7,6,4,5>: Cost 2 vext2 <5,4,7,6>, RHS
- 2714055312U, // <7,6,4,6>: Cost 3 vext3 RHS, <6,4,6,0>
- 2712212126U, // <7,6,4,7>: Cost 3 vext3 RHS, <6,4,7,5>
- 1576586793U, // <7,6,4,u>: Cost 2 vext2 <5,4,7,6>, RHS
- 2579152998U, // <7,6,5,0>: Cost 3 vext1 <4,7,6,5>, LHS
- 2650328784U, // <7,6,5,1>: Cost 3 vext2 <5,4,7,6>, <5,1,7,3>
- 2714055364U, // <7,6,5,2>: Cost 3 vext3 RHS, <6,5,2,7>
- 3785806538U, // <7,6,5,3>: Cost 4 vext3 RHS, <6,5,3,4>
- 1576587206U, // <7,6,5,4>: Cost 2 vext2 <5,4,7,6>, <5,4,7,6>
- 2650329092U, // <7,6,5,5>: Cost 3 vext2 <5,4,7,6>, <5,5,5,5>
- 2650329186U, // <7,6,5,6>: Cost 3 vext2 <5,4,7,6>, <5,6,7,0>
- 2712064753U, // <7,6,5,7>: Cost 3 vext3 RHS, <6,5,7,7>
- 1181963162U, // <7,6,5,u>: Cost 2 vrev <6,7,u,5>
- 2714055421U, // <7,6,6,0>: Cost 3 vext3 RHS, <6,6,0,1>
- 2714055432U, // <7,6,6,1>: Cost 3 vext3 RHS, <6,6,1,3>
- 2650329594U, // <7,6,6,2>: Cost 3 vext2 <5,4,7,6>, <6,2,7,3>
- 3785806619U, // <7,6,6,3>: Cost 4 vext3 RHS, <6,6,3,4>
- 2712212260U, // <7,6,6,4>: Cost 3 vext3 RHS, <6,6,4,4>
- 2714055472U, // <7,6,6,5>: Cost 3 vext3 RHS, <6,6,5,7>
- 1638323000U, // <7,6,6,6>: Cost 2 vext3 RHS, <6,6,6,6>
- 1638470466U, // <7,6,6,7>: Cost 2 vext3 RHS, <6,6,7,7>
- 1638470475U, // <7,6,6,u>: Cost 2 vext3 RHS, <6,6,u,7>
- 1638323022U, // <7,6,7,0>: Cost 2 vext3 RHS, <6,7,0,1>
- 2712064854U, // <7,6,7,1>: Cost 3 vext3 RHS, <6,7,1,0>
- 2712064865U, // <7,6,7,2>: Cost 3 vext3 RHS, <6,7,2,2>
- 2712064872U, // <7,6,7,3>: Cost 3 vext3 RHS, <6,7,3,0>
- 1638323062U, // <7,6,7,4>: Cost 2 vext3 RHS, <6,7,4,5>
- 2712064894U, // <7,6,7,5>: Cost 3 vext3 RHS, <6,7,5,4>
- 2712064905U, // <7,6,7,6>: Cost 3 vext3 RHS, <6,7,6,6>
- 2712064915U, // <7,6,7,7>: Cost 3 vext3 RHS, <6,7,7,7>
- 1638323094U, // <7,6,7,u>: Cost 2 vext3 RHS, <6,7,u,1>
- 1638470559U, // <7,6,u,0>: Cost 2 vext3 RHS, <6,u,0,1>
- 1576589102U, // <7,6,u,1>: Cost 2 vext2 <5,4,7,6>, LHS
- 2712212402U, // <7,6,u,2>: Cost 3 vext3 RHS, <6,u,2,2>
- 2712212409U, // <7,6,u,3>: Cost 3 vext3 RHS, <6,u,3,0>
- 1638470599U, // <7,6,u,4>: Cost 2 vext3 RHS, <6,u,4,5>
- 1576589466U, // <7,6,u,5>: Cost 2 vext2 <5,4,7,6>, RHS
- 1638323000U, // <7,6,u,6>: Cost 2 vext3 RHS, <6,6,6,6>
- 1638470624U, // <7,6,u,7>: Cost 2 vext3 RHS, <6,u,7,3>
- 1638470631U, // <7,6,u,u>: Cost 2 vext3 RHS, <6,u,u,1>
- 2712065007U, // <7,7,0,0>: Cost 3 vext3 RHS, <7,0,0,0>
- 1638323194U, // <7,7,0,1>: Cost 2 vext3 RHS, <7,0,1,2>
- 2712065025U, // <7,7,0,2>: Cost 3 vext3 RHS, <7,0,2,0>
- 3646958337U, // <7,7,0,3>: Cost 4 vext1 <3,7,7,0>, <3,7,7,0>
- 2712065044U, // <7,7,0,4>: Cost 3 vext3 RHS, <7,0,4,1>
- 2585161907U, // <7,7,0,5>: Cost 3 vext1 <5,7,7,0>, <5,7,7,0>
- 2591134604U, // <7,7,0,6>: Cost 3 vext1 <6,7,7,0>, <6,7,7,0>
- 2591134714U, // <7,7,0,7>: Cost 3 vext1 <6,7,7,0>, <7,0,1,2>
- 1638323257U, // <7,7,0,u>: Cost 2 vext3 RHS, <7,0,u,2>
- 2712065091U, // <7,7,1,0>: Cost 3 vext3 RHS, <7,1,0,3>
- 2712065098U, // <7,7,1,1>: Cost 3 vext3 RHS, <7,1,1,1>
- 2712065109U, // <7,7,1,2>: Cost 3 vext3 RHS, <7,1,2,3>
- 2692748384U, // <7,7,1,3>: Cost 3 vext3 <1,3,5,7>, <7,1,3,5>
- 2585169206U, // <7,7,1,4>: Cost 3 vext1 <5,7,7,1>, RHS
- 2693928048U, // <7,7,1,5>: Cost 3 vext3 <1,5,3,7>, <7,1,5,3>
- 2585170766U, // <7,7,1,6>: Cost 3 vext1 <5,7,7,1>, <6,7,0,1>
- 2735953024U, // <7,7,1,7>: Cost 3 vext3 RHS, <7,1,7,1>
- 2695918731U, // <7,7,1,u>: Cost 3 vext3 <1,u,3,7>, <7,1,u,3>
- 3770471574U, // <7,7,2,0>: Cost 4 vext3 <2,0,5,7>, <7,2,0,5>
- 3785807002U, // <7,7,2,1>: Cost 4 vext3 RHS, <7,2,1,0>
- 2712065189U, // <7,7,2,2>: Cost 3 vext3 RHS, <7,2,2,2>
- 2712065196U, // <7,7,2,3>: Cost 3 vext3 RHS, <7,2,3,0>
- 3773125818U, // <7,7,2,4>: Cost 4 vext3 <2,4,5,7>, <7,2,4,5>
- 3766490305U, // <7,7,2,5>: Cost 4 vext3 <1,3,5,7>, <7,2,5,3>
- 2700563658U, // <7,7,2,6>: Cost 3 vext3 <2,6,3,7>, <7,2,6,3>
- 2735953107U, // <7,7,2,7>: Cost 3 vext3 RHS, <7,2,7,3>
- 2701890780U, // <7,7,2,u>: Cost 3 vext3 <2,u,3,7>, <7,2,u,3>
- 2712065251U, // <7,7,3,0>: Cost 3 vext3 RHS, <7,3,0,1>
- 3766490350U, // <7,7,3,1>: Cost 4 vext3 <1,3,5,7>, <7,3,1,3>
- 3774305530U, // <7,7,3,2>: Cost 4 vext3 <2,6,3,7>, <7,3,2,6>
- 2637728196U, // <7,7,3,3>: Cost 3 vext2 <3,3,7,7>, <3,3,7,7>
- 2712065291U, // <7,7,3,4>: Cost 3 vext3 RHS, <7,3,4,5>
- 2585186486U, // <7,7,3,5>: Cost 3 vext1 <5,7,7,3>, <5,7,7,3>
- 2639719095U, // <7,7,3,6>: Cost 3 vext2 <3,6,7,7>, <3,6,7,7>
- 2640382728U, // <7,7,3,7>: Cost 3 vext2 <3,7,7,7>, <3,7,7,7>
- 2641046361U, // <7,7,3,u>: Cost 3 vext2 <3,u,7,7>, <3,u,7,7>
- 2712212792U, // <7,7,4,0>: Cost 3 vext3 RHS, <7,4,0,5>
- 3646989312U, // <7,7,4,1>: Cost 4 vext1 <3,7,7,4>, <1,3,5,7>
- 3785807176U, // <7,7,4,2>: Cost 4 vext3 RHS, <7,4,2,3>
- 3646991109U, // <7,7,4,3>: Cost 4 vext1 <3,7,7,4>, <3,7,7,4>
- 2712065371U, // <7,7,4,4>: Cost 3 vext3 RHS, <7,4,4,4>
- 1638323558U, // <7,7,4,5>: Cost 2 vext3 RHS, <7,4,5,6>
- 2712212845U, // <7,7,4,6>: Cost 3 vext3 RHS, <7,4,6,4>
- 2591167846U, // <7,7,4,7>: Cost 3 vext1 <6,7,7,4>, <7,4,5,6>
- 1638323585U, // <7,7,4,u>: Cost 2 vext3 RHS, <7,4,u,6>
- 2585198694U, // <7,7,5,0>: Cost 3 vext1 <5,7,7,5>, LHS
- 2712212884U, // <7,7,5,1>: Cost 3 vext3 RHS, <7,5,1,7>
- 3711471393U, // <7,7,5,2>: Cost 4 vext2 <3,3,7,7>, <5,2,7,3>
- 2649673590U, // <7,7,5,3>: Cost 3 vext2 <5,3,7,7>, <5,3,7,7>
- 2712065455U, // <7,7,5,4>: Cost 3 vext3 RHS, <7,5,4,7>
- 1577259032U, // <7,7,5,5>: Cost 2 vext2 <5,5,7,7>, <5,5,7,7>
- 2712065473U, // <7,7,5,6>: Cost 3 vext3 RHS, <7,5,6,7>
- 2712212936U, // <7,7,5,7>: Cost 3 vext3 RHS, <7,5,7,5>
- 1579249931U, // <7,7,5,u>: Cost 2 vext2 <5,u,7,7>, <5,u,7,7>
- 2591178854U, // <7,7,6,0>: Cost 3 vext1 <6,7,7,6>, LHS
- 2735953374U, // <7,7,6,1>: Cost 3 vext3 RHS, <7,6,1,0>
- 2712212974U, // <7,7,6,2>: Cost 3 vext3 RHS, <7,6,2,7>
- 2655646287U, // <7,7,6,3>: Cost 3 vext2 <6,3,7,7>, <6,3,7,7>
- 2591182134U, // <7,7,6,4>: Cost 3 vext1 <6,7,7,6>, RHS
- 2656973553U, // <7,7,6,5>: Cost 3 vext2 <6,5,7,7>, <6,5,7,7>
- 1583895362U, // <7,7,6,6>: Cost 2 vext2 <6,6,7,7>, <6,6,7,7>
- 2712065556U, // <7,7,6,7>: Cost 3 vext3 RHS, <7,6,7,0>
- 1585222628U, // <7,7,6,u>: Cost 2 vext2 <6,u,7,7>, <6,u,7,7>
- 1523417190U, // <7,7,7,0>: Cost 2 vext1 <7,7,7,7>, LHS
- 2597159670U, // <7,7,7,1>: Cost 3 vext1 <7,7,7,7>, <1,0,3,2>
- 2597160552U, // <7,7,7,2>: Cost 3 vext1 <7,7,7,7>, <2,2,2,2>
- 2597161110U, // <7,7,7,3>: Cost 3 vext1 <7,7,7,7>, <3,0,1,2>
- 1523420470U, // <7,7,7,4>: Cost 2 vext1 <7,7,7,7>, RHS
- 2651002296U, // <7,7,7,5>: Cost 3 vext2 <5,5,7,7>, <7,5,5,7>
- 2657637906U, // <7,7,7,6>: Cost 3 vext2 <6,6,7,7>, <7,6,6,7>
- 363253046U, // <7,7,7,7>: Cost 1 vdup3 RHS
- 363253046U, // <7,7,7,u>: Cost 1 vdup3 RHS
- 1523417190U, // <7,7,u,0>: Cost 2 vext1 <7,7,7,7>, LHS
- 1638471298U, // <7,7,u,1>: Cost 2 vext3 RHS, <7,u,1,2>
- 2712213132U, // <7,7,u,2>: Cost 3 vext3 RHS, <7,u,2,3>
- 2712213138U, // <7,7,u,3>: Cost 3 vext3 RHS, <7,u,3,0>
- 1523420470U, // <7,7,u,4>: Cost 2 vext1 <7,7,7,7>, RHS
- 1638471338U, // <7,7,u,5>: Cost 2 vext3 RHS, <7,u,5,6>
- 1595840756U, // <7,7,u,6>: Cost 2 vext2 <u,6,7,7>, <u,6,7,7>
- 363253046U, // <7,7,u,7>: Cost 1 vdup3 RHS
- 363253046U, // <7,7,u,u>: Cost 1 vdup3 RHS
- 1638318080U, // <7,u,0,0>: Cost 2 vext3 RHS, <0,0,0,0>
- 1638323923U, // <7,u,0,1>: Cost 2 vext3 RHS, <u,0,1,2>
- 1662211804U, // <7,u,0,2>: Cost 2 vext3 RHS, <u,0,2,2>
- 1638323941U, // <7,u,0,3>: Cost 2 vext3 RHS, <u,0,3,2>
- 2712065773U, // <7,u,0,4>: Cost 3 vext3 RHS, <u,0,4,1>
- 1662359286U, // <7,u,0,5>: Cost 2 vext3 RHS, <u,0,5,1>
- 1662359296U, // <7,u,0,6>: Cost 2 vext3 RHS, <u,0,6,2>
- 2987150664U, // <7,u,0,7>: Cost 3 vzipr <5,6,7,0>, RHS
- 1638323986U, // <7,u,0,u>: Cost 2 vext3 RHS, <u,0,u,2>
- 1517469798U, // <7,u,1,0>: Cost 2 vext1 <6,7,u,1>, LHS
- 1638318900U, // <7,u,1,1>: Cost 2 vext3 RHS, <1,1,1,1>
- 564582190U, // <7,u,1,2>: Cost 1 vext3 RHS, LHS
- 1638324023U, // <7,u,1,3>: Cost 2 vext3 RHS, <u,1,3,3>
- 1517473078U, // <7,u,1,4>: Cost 2 vext1 <6,7,u,1>, RHS
- 2693928777U, // <7,u,1,5>: Cost 3 vext3 <1,5,3,7>, <u,1,5,3>
- 1517474710U, // <7,u,1,6>: Cost 2 vext1 <6,7,u,1>, <6,7,u,1>
- 1640462171U, // <7,u,1,7>: Cost 2 vext3 RHS, <u,1,7,3>
- 564582244U, // <7,u,1,u>: Cost 1 vext3 RHS, LHS
- 1638318244U, // <7,u,2,0>: Cost 2 vext3 RHS, <0,2,0,2>
- 2712065907U, // <7,u,2,1>: Cost 3 vext3 RHS, <u,2,1,0>
- 1638319720U, // <7,u,2,2>: Cost 2 vext3 RHS, <2,2,2,2>
- 1638324101U, // <7,u,2,3>: Cost 2 vext3 RHS, <u,2,3,0>
- 1638318284U, // <7,u,2,4>: Cost 2 vext3 RHS, <0,2,4,6>
- 2712065947U, // <7,u,2,5>: Cost 3 vext3 RHS, <u,2,5,4>
- 2700564387U, // <7,u,2,6>: Cost 3 vext3 <2,6,3,7>, <u,2,6,3>
- 1640314796U, // <7,u,2,7>: Cost 2 vext3 RHS, <u,2,7,3>
- 1638324146U, // <7,u,2,u>: Cost 2 vext3 RHS, <u,2,u,0>
- 1638324156U, // <7,u,3,0>: Cost 2 vext3 RHS, <u,3,0,1>
- 1638319064U, // <7,u,3,1>: Cost 2 vext3 RHS, <1,3,1,3>
- 2700564435U, // <7,u,3,2>: Cost 3 vext3 <2,6,3,7>, <u,3,2,6>
- 1638320540U, // <7,u,3,3>: Cost 2 vext3 RHS, <3,3,3,3>
- 1638324196U, // <7,u,3,4>: Cost 2 vext3 RHS, <u,3,4,5>
- 1638324207U, // <7,u,3,5>: Cost 2 vext3 RHS, <u,3,5,7>
- 2700564472U, // <7,u,3,6>: Cost 3 vext3 <2,6,3,7>, <u,3,6,7>
- 2695919610U, // <7,u,3,7>: Cost 3 vext3 <1,u,3,7>, <u,3,7,0>
- 1638324228U, // <7,u,3,u>: Cost 2 vext3 RHS, <u,3,u,1>
- 2712066061U, // <7,u,4,0>: Cost 3 vext3 RHS, <u,4,0,1>
- 1662212122U, // <7,u,4,1>: Cost 2 vext3 RHS, <u,4,1,5>
- 1662212132U, // <7,u,4,2>: Cost 2 vext3 RHS, <u,4,2,6>
- 2712066092U, // <7,u,4,3>: Cost 3 vext3 RHS, <u,4,3,5>
- 1638321360U, // <7,u,4,4>: Cost 2 vext3 RHS, <4,4,4,4>
- 1638324287U, // <7,u,4,5>: Cost 2 vext3 RHS, <u,4,5,6>
- 1662359624U, // <7,u,4,6>: Cost 2 vext3 RHS, <u,4,6,6>
- 1640314961U, // <7,u,4,7>: Cost 2 vext3 RHS, <u,4,7,6>
- 1638324314U, // <7,u,4,u>: Cost 2 vext3 RHS, <u,4,u,6>
- 1517502566U, // <7,u,5,0>: Cost 2 vext1 <6,7,u,5>, LHS
- 1574612693U, // <7,u,5,1>: Cost 2 vext2 <5,1,7,u>, <5,1,7,u>
- 2712066162U, // <7,u,5,2>: Cost 3 vext3 RHS, <u,5,2,3>
- 1638324351U, // <7,u,5,3>: Cost 2 vext3 RHS, <u,5,3,7>
- 1576603592U, // <7,u,5,4>: Cost 2 vext2 <5,4,7,u>, <5,4,7,u>
- 1577267225U, // <7,u,5,5>: Cost 2 vext2 <5,5,7,u>, <5,5,7,u>
- 564582554U, // <7,u,5,6>: Cost 1 vext3 RHS, RHS
- 1640462499U, // <7,u,5,7>: Cost 2 vext3 RHS, <u,5,7,7>
- 564582572U, // <7,u,5,u>: Cost 1 vext3 RHS, RHS
- 2712066223U, // <7,u,6,0>: Cost 3 vext3 RHS, <u,6,0,1>
- 2712066238U, // <7,u,6,1>: Cost 3 vext3 RHS, <u,6,1,7>
- 1581249023U, // <7,u,6,2>: Cost 2 vext2 <6,2,7,u>, <6,2,7,u>
- 1638324432U, // <7,u,6,3>: Cost 2 vext3 RHS, <u,6,3,7>
- 1638468980U, // <7,u,6,4>: Cost 2 vext3 RHS, <4,6,4,6>
- 2712066274U, // <7,u,6,5>: Cost 3 vext3 RHS, <u,6,5,7>
- 1583903555U, // <7,u,6,6>: Cost 2 vext2 <6,6,7,u>, <6,6,7,u>
- 1640315117U, // <7,u,6,7>: Cost 2 vext3 RHS, <u,6,7,0>
- 1638324477U, // <7,u,6,u>: Cost 2 vext3 RHS, <u,6,u,7>
- 1638471936U, // <7,u,7,0>: Cost 2 vext3 RHS, <u,7,0,1>
- 2692970763U, // <7,u,7,1>: Cost 3 vext3 <1,3,u,7>, <u,7,1,3>
- 2700933399U, // <7,u,7,2>: Cost 3 vext3 <2,6,u,7>, <u,7,2,6>
- 2573347601U, // <7,u,7,3>: Cost 3 vext1 <3,7,u,7>, <3,7,u,7>
- 1638471976U, // <7,u,7,4>: Cost 2 vext3 RHS, <u,7,4,5>
- 1511551171U, // <7,u,7,5>: Cost 2 vext1 <5,7,u,7>, <5,7,u,7>
- 2712213815U, // <7,u,7,6>: Cost 3 vext3 RHS, <u,7,6,2>
- 363253046U, // <7,u,7,7>: Cost 1 vdup3 RHS
- 363253046U, // <7,u,7,u>: Cost 1 vdup3 RHS
- 1638324561U, // <7,u,u,0>: Cost 2 vext3 RHS, <u,u,0,1>
- 1638324571U, // <7,u,u,1>: Cost 2 vext3 RHS, <u,u,1,2>
- 564582757U, // <7,u,u,2>: Cost 1 vext3 RHS, LHS
- 1638324587U, // <7,u,u,3>: Cost 2 vext3 RHS, <u,u,3,0>
- 1638324601U, // <7,u,u,4>: Cost 2 vext3 RHS, <u,u,4,5>
- 1638324611U, // <7,u,u,5>: Cost 2 vext3 RHS, <u,u,5,6>
- 564582797U, // <7,u,u,6>: Cost 1 vext3 RHS, RHS
- 363253046U, // <7,u,u,7>: Cost 1 vdup3 RHS
- 564582811U, // <7,u,u,u>: Cost 1 vext3 RHS, LHS
- 135053414U, // <u,0,0,0>: Cost 1 vdup0 LHS
- 1611489290U, // <u,0,0,1>: Cost 2 vext3 LHS, <0,0,1,1>
- 1611489300U, // <u,0,0,2>: Cost 2 vext3 LHS, <0,0,2,2>
- 2568054923U, // <u,0,0,3>: Cost 3 vext1 <3,0,0,0>, <3,0,0,0>
- 1481706806U, // <u,0,0,4>: Cost 2 vext1 <0,u,0,0>, RHS
- 2555449040U, // <u,0,0,5>: Cost 3 vext1 <0,u,0,0>, <5,1,7,3>
- 2591282078U, // <u,0,0,6>: Cost 3 vext1 <6,u,0,0>, <6,u,0,0>
- 2591945711U, // <u,0,0,7>: Cost 3 vext1 <7,0,0,0>, <7,0,0,0>
- 135053414U, // <u,0,0,u>: Cost 1 vdup0 LHS
- 1493655654U, // <u,0,1,0>: Cost 2 vext1 <2,u,0,1>, LHS
- 1860550758U, // <u,0,1,1>: Cost 2 vzipl LHS, LHS
- 537747563U, // <u,0,1,2>: Cost 1 vext3 LHS, LHS
- 2625135576U, // <u,0,1,3>: Cost 3 vext2 <1,2,u,0>, <1,3,1,3>
- 1493658934U, // <u,0,1,4>: Cost 2 vext1 <2,u,0,1>, RHS
- 2625135760U, // <u,0,1,5>: Cost 3 vext2 <1,2,u,0>, <1,5,3,7>
- 1517548447U, // <u,0,1,6>: Cost 2 vext1 <6,u,0,1>, <6,u,0,1>
- 2591290362U, // <u,0,1,7>: Cost 3 vext1 <6,u,0,1>, <7,0,1,2>
- 537747612U, // <u,0,1,u>: Cost 1 vext3 LHS, LHS
- 1611489444U, // <u,0,2,0>: Cost 2 vext3 LHS, <0,2,0,2>
- 2685231276U, // <u,0,2,1>: Cost 3 vext3 LHS, <0,2,1,1>
- 1994768486U, // <u,0,2,2>: Cost 2 vtrnl LHS, LHS
- 2685231294U, // <u,0,2,3>: Cost 3 vext3 LHS, <0,2,3,1>
- 1611489484U, // <u,0,2,4>: Cost 2 vext3 LHS, <0,2,4,6>
- 2712068310U, // <u,0,2,5>: Cost 3 vext3 RHS, <0,2,5,7>
- 2625136570U, // <u,0,2,6>: Cost 3 vext2 <1,2,u,0>, <2,6,3,7>
- 2591962097U, // <u,0,2,7>: Cost 3 vext1 <7,0,0,2>, <7,0,0,2>
- 1611489516U, // <u,0,2,u>: Cost 2 vext3 LHS, <0,2,u,2>
- 2954067968U, // <u,0,3,0>: Cost 3 vzipr LHS, <0,0,0,0>
- 2685231356U, // <u,0,3,1>: Cost 3 vext3 LHS, <0,3,1,0>
- 72589981U, // <u,0,3,2>: Cost 1 vrev LHS
- 2625137052U, // <u,0,3,3>: Cost 3 vext2 <1,2,u,0>, <3,3,3,3>
- 2625137154U, // <u,0,3,4>: Cost 3 vext2 <1,2,u,0>, <3,4,5,6>
- 2639071848U, // <u,0,3,5>: Cost 3 vext2 <3,5,u,0>, <3,5,u,0>
- 2639735481U, // <u,0,3,6>: Cost 3 vext2 <3,6,u,0>, <3,6,u,0>
- 2597279354U, // <u,0,3,7>: Cost 3 vext1 <7,u,0,3>, <7,u,0,3>
- 73032403U, // <u,0,3,u>: Cost 1 vrev LHS
- 2687074636U, // <u,0,4,0>: Cost 3 vext3 <0,4,0,u>, <0,4,0,u>
- 1611489618U, // <u,0,4,1>: Cost 2 vext3 LHS, <0,4,1,5>
- 1611489628U, // <u,0,4,2>: Cost 2 vext3 LHS, <0,4,2,6>
- 3629222038U, // <u,0,4,3>: Cost 4 vext1 <0,u,0,4>, <3,0,1,2>
- 2555481398U, // <u,0,4,4>: Cost 3 vext1 <0,u,0,4>, RHS
- 1551396150U, // <u,0,4,5>: Cost 2 vext2 <1,2,u,0>, RHS
- 2651680116U, // <u,0,4,6>: Cost 3 vext2 <5,6,u,0>, <4,6,4,6>
- 2646150600U, // <u,0,4,7>: Cost 3 vext2 <4,7,5,0>, <4,7,5,0>
- 1611932050U, // <u,0,4,u>: Cost 2 vext3 LHS, <0,4,u,6>
- 2561458278U, // <u,0,5,0>: Cost 3 vext1 <1,u,0,5>, LHS
- 1863532646U, // <u,0,5,1>: Cost 2 vzipl RHS, LHS
- 2712068526U, // <u,0,5,2>: Cost 3 vext3 RHS, <0,5,2,7>
- 2649689976U, // <u,0,5,3>: Cost 3 vext2 <5,3,u,0>, <5,3,u,0>
- 2220237489U, // <u,0,5,4>: Cost 3 vrev <0,u,4,5>
- 2651680772U, // <u,0,5,5>: Cost 3 vext2 <5,6,u,0>, <5,5,5,5>
- 1577939051U, // <u,0,5,6>: Cost 2 vext2 <5,6,u,0>, <5,6,u,0>
- 2830077238U, // <u,0,5,7>: Cost 3 vuzpr <1,u,3,0>, RHS
- 1579266317U, // <u,0,5,u>: Cost 2 vext2 <5,u,u,0>, <5,u,u,0>
- 2555494502U, // <u,0,6,0>: Cost 3 vext1 <0,u,0,6>, LHS
- 2712068598U, // <u,0,6,1>: Cost 3 vext3 RHS, <0,6,1,7>
- 1997750374U, // <u,0,6,2>: Cost 2 vtrnl RHS, LHS
- 2655662673U, // <u,0,6,3>: Cost 3 vext2 <6,3,u,0>, <6,3,u,0>
- 2555497782U, // <u,0,6,4>: Cost 3 vext1 <0,u,0,6>, RHS
- 2651681459U, // <u,0,6,5>: Cost 3 vext2 <5,6,u,0>, <6,5,0,u>
- 2651681592U, // <u,0,6,6>: Cost 3 vext2 <5,6,u,0>, <6,6,6,6>
- 2651681614U, // <u,0,6,7>: Cost 3 vext2 <5,6,u,0>, <6,7,0,1>
- 1997750428U, // <u,0,6,u>: Cost 2 vtrnl RHS, LHS
- 2567446630U, // <u,0,7,0>: Cost 3 vext1 <2,u,0,7>, LHS
- 2567447446U, // <u,0,7,1>: Cost 3 vext1 <2,u,0,7>, <1,2,3,0>
- 2567448641U, // <u,0,7,2>: Cost 3 vext1 <2,u,0,7>, <2,u,0,7>
- 2573421338U, // <u,0,7,3>: Cost 3 vext1 <3,u,0,7>, <3,u,0,7>
- 2567449910U, // <u,0,7,4>: Cost 3 vext1 <2,u,0,7>, RHS
- 2651682242U, // <u,0,7,5>: Cost 3 vext2 <5,6,u,0>, <7,5,6,u>
- 2591339429U, // <u,0,7,6>: Cost 3 vext1 <6,u,0,7>, <6,u,0,7>
- 2651682412U, // <u,0,7,7>: Cost 3 vext2 <5,6,u,0>, <7,7,7,7>
- 2567452462U, // <u,0,7,u>: Cost 3 vext1 <2,u,0,7>, LHS
- 135053414U, // <u,0,u,0>: Cost 1 vdup0 LHS
- 1611489938U, // <u,0,u,1>: Cost 2 vext3 LHS, <0,u,1,1>
- 537748125U, // <u,0,u,2>: Cost 1 vext3 LHS, LHS
- 2685674148U, // <u,0,u,3>: Cost 3 vext3 LHS, <0,u,3,1>
- 1611932338U, // <u,0,u,4>: Cost 2 vext3 LHS, <0,u,4,6>
- 1551399066U, // <u,0,u,5>: Cost 2 vext2 <1,2,u,0>, RHS
- 1517605798U, // <u,0,u,6>: Cost 2 vext1 <6,u,0,u>, <6,u,0,u>
- 2830077481U, // <u,0,u,7>: Cost 3 vuzpr <1,u,3,0>, RHS
- 537748179U, // <u,0,u,u>: Cost 1 vext3 LHS, LHS
- 1544101961U, // <u,1,0,0>: Cost 2 vext2 <0,0,u,1>, <0,0,u,1>
- 1558036582U, // <u,1,0,1>: Cost 2 vext2 <2,3,u,1>, LHS
- 2619171051U, // <u,1,0,2>: Cost 3 vext2 <0,2,u,1>, <0,2,u,1>
- 1611490038U, // <u,1,0,3>: Cost 2 vext3 LHS, <1,0,3,2>
- 2555522358U, // <u,1,0,4>: Cost 3 vext1 <0,u,1,0>, RHS
- 2712068871U, // <u,1,0,5>: Cost 3 vext3 RHS, <1,0,5,1>
- 2591355815U, // <u,1,0,6>: Cost 3 vext1 <6,u,1,0>, <6,u,1,0>
- 2597328512U, // <u,1,0,7>: Cost 3 vext1 <7,u,1,0>, <7,u,1,0>
- 1611490083U, // <u,1,0,u>: Cost 2 vext3 LHS, <1,0,u,2>
- 1481785446U, // <u,1,1,0>: Cost 2 vext1 <0,u,1,1>, LHS
- 202162278U, // <u,1,1,1>: Cost 1 vdup1 LHS
- 2555528808U, // <u,1,1,2>: Cost 3 vext1 <0,u,1,1>, <2,2,2,2>
- 1611490120U, // <u,1,1,3>: Cost 2 vext3 LHS, <1,1,3,3>
- 1481788726U, // <u,1,1,4>: Cost 2 vext1 <0,u,1,1>, RHS
- 2689876828U, // <u,1,1,5>: Cost 3 vext3 LHS, <1,1,5,5>
- 2591364008U, // <u,1,1,6>: Cost 3 vext1 <6,u,1,1>, <6,u,1,1>
- 2592691274U, // <u,1,1,7>: Cost 3 vext1 <7,1,1,1>, <7,1,1,1>
- 202162278U, // <u,1,1,u>: Cost 1 vdup1 LHS
- 1499709542U, // <u,1,2,0>: Cost 2 vext1 <3,u,1,2>, LHS
- 2689876871U, // <u,1,2,1>: Cost 3 vext3 LHS, <1,2,1,3>
- 2631116445U, // <u,1,2,2>: Cost 3 vext2 <2,2,u,1>, <2,2,u,1>
- 835584U, // <u,1,2,3>: Cost 0 copy LHS
- 1499712822U, // <u,1,2,4>: Cost 2 vext1 <3,u,1,2>, RHS
- 2689876907U, // <u,1,2,5>: Cost 3 vext3 LHS, <1,2,5,3>
- 2631780282U, // <u,1,2,6>: Cost 3 vext2 <2,3,u,1>, <2,6,3,7>
- 1523603074U, // <u,1,2,7>: Cost 2 vext1 <7,u,1,2>, <7,u,1,2>
- 835584U, // <u,1,2,u>: Cost 0 copy LHS
- 1487773798U, // <u,1,3,0>: Cost 2 vext1 <1,u,1,3>, LHS
- 1611490264U, // <u,1,3,1>: Cost 2 vext3 LHS, <1,3,1,3>
- 2685232094U, // <u,1,3,2>: Cost 3 vext3 LHS, <1,3,2,0>
- 2018746470U, // <u,1,3,3>: Cost 2 vtrnr LHS, LHS
- 1487777078U, // <u,1,3,4>: Cost 2 vext1 <1,u,1,3>, RHS
- 1611490304U, // <u,1,3,5>: Cost 2 vext3 LHS, <1,3,5,7>
- 2685674505U, // <u,1,3,6>: Cost 3 vext3 LHS, <1,3,6,7>
- 2640407307U, // <u,1,3,7>: Cost 3 vext2 <3,7,u,1>, <3,7,u,1>
- 1611490327U, // <u,1,3,u>: Cost 2 vext3 LHS, <1,3,u,3>
- 1567992749U, // <u,1,4,0>: Cost 2 vext2 <4,0,u,1>, <4,0,u,1>
- 2693121070U, // <u,1,4,1>: Cost 3 vext3 <1,4,1,u>, <1,4,1,u>
- 2693194807U, // <u,1,4,2>: Cost 3 vext3 <1,4,2,u>, <1,4,2,u>
- 1152386432U, // <u,1,4,3>: Cost 2 vrev <1,u,3,4>
- 2555555126U, // <u,1,4,4>: Cost 3 vext1 <0,u,1,4>, RHS
- 1558039862U, // <u,1,4,5>: Cost 2 vext2 <2,3,u,1>, RHS
- 2645716371U, // <u,1,4,6>: Cost 3 vext2 <4,6,u,1>, <4,6,u,1>
- 2597361284U, // <u,1,4,7>: Cost 3 vext1 <7,u,1,4>, <7,u,1,4>
- 1152755117U, // <u,1,4,u>: Cost 2 vrev <1,u,u,4>
- 1481818214U, // <u,1,5,0>: Cost 2 vext1 <0,u,1,5>, LHS
- 2555560694U, // <u,1,5,1>: Cost 3 vext1 <0,u,1,5>, <1,0,3,2>
- 2555561576U, // <u,1,5,2>: Cost 3 vext1 <0,u,1,5>, <2,2,2,2>
- 1611490448U, // <u,1,5,3>: Cost 2 vext3 LHS, <1,5,3,7>
- 1481821494U, // <u,1,5,4>: Cost 2 vext1 <0,u,1,5>, RHS
- 2651025435U, // <u,1,5,5>: Cost 3 vext2 <5,5,u,1>, <5,5,u,1>
- 2651689068U, // <u,1,5,6>: Cost 3 vext2 <5,6,u,1>, <5,6,u,1>
- 2823966006U, // <u,1,5,7>: Cost 3 vuzpr <0,u,1,1>, RHS
- 1611932861U, // <u,1,5,u>: Cost 2 vext3 LHS, <1,5,u,7>
- 2555568230U, // <u,1,6,0>: Cost 3 vext1 <0,u,1,6>, LHS
- 2689877199U, // <u,1,6,1>: Cost 3 vext3 LHS, <1,6,1,7>
- 2712069336U, // <u,1,6,2>: Cost 3 vext3 RHS, <1,6,2,7>
- 2685232353U, // <u,1,6,3>: Cost 3 vext3 LHS, <1,6,3,7>
- 2555571510U, // <u,1,6,4>: Cost 3 vext1 <0,u,1,6>, RHS
- 2689877235U, // <u,1,6,5>: Cost 3 vext3 LHS, <1,6,5,7>
- 2657661765U, // <u,1,6,6>: Cost 3 vext2 <6,6,u,1>, <6,6,u,1>
- 1584583574U, // <u,1,6,7>: Cost 2 vext2 <6,7,u,1>, <6,7,u,1>
- 1585247207U, // <u,1,6,u>: Cost 2 vext2 <6,u,u,1>, <6,u,u,1>
- 2561548390U, // <u,1,7,0>: Cost 3 vext1 <1,u,1,7>, LHS
- 2561549681U, // <u,1,7,1>: Cost 3 vext1 <1,u,1,7>, <1,u,1,7>
- 2573493926U, // <u,1,7,2>: Cost 3 vext1 <3,u,1,7>, <2,3,0,1>
- 2042962022U, // <u,1,7,3>: Cost 2 vtrnr RHS, LHS
- 2561551670U, // <u,1,7,4>: Cost 3 vext1 <1,u,1,7>, RHS
- 2226300309U, // <u,1,7,5>: Cost 3 vrev <1,u,5,7>
- 2658325990U, // <u,1,7,6>: Cost 3 vext2 <6,7,u,1>, <7,6,1,u>
- 2658326124U, // <u,1,7,7>: Cost 3 vext2 <6,7,u,1>, <7,7,7,7>
- 2042962027U, // <u,1,7,u>: Cost 2 vtrnr RHS, LHS
- 1481842790U, // <u,1,u,0>: Cost 2 vext1 <0,u,1,u>, LHS
- 202162278U, // <u,1,u,1>: Cost 1 vdup1 LHS
- 2685674867U, // <u,1,u,2>: Cost 3 vext3 LHS, <1,u,2,0>
- 835584U, // <u,1,u,3>: Cost 0 copy LHS
- 1481846070U, // <u,1,u,4>: Cost 2 vext1 <0,u,1,u>, RHS
- 1611933077U, // <u,1,u,5>: Cost 2 vext3 LHS, <1,u,5,7>
- 2685674910U, // <u,1,u,6>: Cost 3 vext3 LHS, <1,u,6,7>
- 1523652232U, // <u,1,u,7>: Cost 2 vext1 <7,u,1,u>, <7,u,1,u>
- 835584U, // <u,1,u,u>: Cost 0 copy LHS
- 1544110154U, // <u,2,0,0>: Cost 2 vext2 <0,0,u,2>, <0,0,u,2>
- 1545437286U, // <u,2,0,1>: Cost 2 vext2 <0,2,u,2>, LHS
- 1545437420U, // <u,2,0,2>: Cost 2 vext2 <0,2,u,2>, <0,2,u,2>
- 2685232589U, // <u,2,0,3>: Cost 3 vext3 LHS, <2,0,3,0>
- 2619179346U, // <u,2,0,4>: Cost 3 vext2 <0,2,u,2>, <0,4,1,5>
- 2712069606U, // <u,2,0,5>: Cost 3 vext3 RHS, <2,0,5,7>
- 2689877484U, // <u,2,0,6>: Cost 3 vext3 LHS, <2,0,6,4>
- 2659656273U, // <u,2,0,7>: Cost 3 vext2 <7,0,u,2>, <0,7,2,u>
- 1545437853U, // <u,2,0,u>: Cost 2 vext2 <0,2,u,2>, LHS
- 1550082851U, // <u,2,1,0>: Cost 2 vext2 <1,0,u,2>, <1,0,u,2>
- 2619179828U, // <u,2,1,1>: Cost 3 vext2 <0,2,u,2>, <1,1,1,1>
- 2619179926U, // <u,2,1,2>: Cost 3 vext2 <0,2,u,2>, <1,2,3,0>
- 2685232671U, // <u,2,1,3>: Cost 3 vext3 LHS, <2,1,3,1>
- 2555604278U, // <u,2,1,4>: Cost 3 vext1 <0,u,2,1>, RHS
- 2619180176U, // <u,2,1,5>: Cost 3 vext2 <0,2,u,2>, <1,5,3,7>
- 2689877564U, // <u,2,1,6>: Cost 3 vext3 LHS, <2,1,6,3>
- 2602718850U, // <u,2,1,7>: Cost 3 vext1 <u,7,2,1>, <7,u,1,2>
- 1158703235U, // <u,2,1,u>: Cost 2 vrev <2,u,u,1>
- 1481867366U, // <u,2,2,0>: Cost 2 vext1 <0,u,2,2>, LHS
- 2555609846U, // <u,2,2,1>: Cost 3 vext1 <0,u,2,2>, <1,0,3,2>
- 269271142U, // <u,2,2,2>: Cost 1 vdup2 LHS
- 1611490930U, // <u,2,2,3>: Cost 2 vext3 LHS, <2,2,3,3>
- 1481870646U, // <u,2,2,4>: Cost 2 vext1 <0,u,2,2>, RHS
- 2689877640U, // <u,2,2,5>: Cost 3 vext3 LHS, <2,2,5,7>
- 2619180986U, // <u,2,2,6>: Cost 3 vext2 <0,2,u,2>, <2,6,3,7>
- 2593436837U, // <u,2,2,7>: Cost 3 vext1 <7,2,2,2>, <7,2,2,2>
- 269271142U, // <u,2,2,u>: Cost 1 vdup2 LHS
- 408134301U, // <u,2,3,0>: Cost 1 vext1 LHS, LHS
- 1481876214U, // <u,2,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 1481877096U, // <u,2,3,2>: Cost 2 vext1 LHS, <2,2,2,2>
- 1880326246U, // <u,2,3,3>: Cost 2 vzipr LHS, LHS
- 408137014U, // <u,2,3,4>: Cost 1 vext1 LHS, RHS
- 1529654992U, // <u,2,3,5>: Cost 2 vext1 LHS, <5,1,7,3>
- 1529655802U, // <u,2,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 1529656314U, // <u,2,3,7>: Cost 2 vext1 LHS, <7,0,1,2>
- 408139566U, // <u,2,3,u>: Cost 1 vext1 LHS, LHS
- 1567853468U, // <u,2,4,0>: Cost 2 vext2 <4,0,6,2>, <4,0,6,2>
- 2561598362U, // <u,2,4,1>: Cost 3 vext1 <1,u,2,4>, <1,2,3,4>
- 2555627214U, // <u,2,4,2>: Cost 3 vext1 <0,u,2,4>, <2,3,4,5>
- 2685232918U, // <u,2,4,3>: Cost 3 vext3 LHS, <2,4,3,5>
- 2555628854U, // <u,2,4,4>: Cost 3 vext1 <0,u,2,4>, RHS
- 1545440566U, // <u,2,4,5>: Cost 2 vext2 <0,2,u,2>, RHS
- 1571982740U, // <u,2,4,6>: Cost 2 vext2 <4,6,u,2>, <4,6,u,2>
- 2592125957U, // <u,2,4,7>: Cost 3 vext1 <7,0,2,4>, <7,0,2,4>
- 1545440809U, // <u,2,4,u>: Cost 2 vext2 <0,2,u,2>, RHS
- 2555633766U, // <u,2,5,0>: Cost 3 vext1 <0,u,2,5>, LHS
- 2561606550U, // <u,2,5,1>: Cost 3 vext1 <1,u,2,5>, <1,2,3,0>
- 2689877856U, // <u,2,5,2>: Cost 3 vext3 LHS, <2,5,2,7>
- 2685233000U, // <u,2,5,3>: Cost 3 vext3 LHS, <2,5,3,6>
- 1158441059U, // <u,2,5,4>: Cost 2 vrev <2,u,4,5>
- 2645725188U, // <u,2,5,5>: Cost 3 vext2 <4,6,u,2>, <5,5,5,5>
- 2689877892U, // <u,2,5,6>: Cost 3 vext3 LHS, <2,5,6,7>
- 2823900470U, // <u,2,5,7>: Cost 3 vuzpr <0,u,0,2>, RHS
- 1158736007U, // <u,2,5,u>: Cost 2 vrev <2,u,u,5>
- 1481900134U, // <u,2,6,0>: Cost 2 vext1 <0,u,2,6>, LHS
- 2555642614U, // <u,2,6,1>: Cost 3 vext1 <0,u,2,6>, <1,0,3,2>
- 2555643496U, // <u,2,6,2>: Cost 3 vext1 <0,u,2,6>, <2,2,2,2>
- 1611491258U, // <u,2,6,3>: Cost 2 vext3 LHS, <2,6,3,7>
- 1481903414U, // <u,2,6,4>: Cost 2 vext1 <0,u,2,6>, RHS
- 2689877964U, // <u,2,6,5>: Cost 3 vext3 LHS, <2,6,5,7>
- 2689877973U, // <u,2,6,6>: Cost 3 vext3 LHS, <2,6,6,7>
- 2645726030U, // <u,2,6,7>: Cost 3 vext2 <4,6,u,2>, <6,7,0,1>
- 1611933671U, // <u,2,6,u>: Cost 2 vext3 LHS, <2,6,u,7>
- 1585919033U, // <u,2,7,0>: Cost 2 vext2 <7,0,u,2>, <7,0,u,2>
- 2573566710U, // <u,2,7,1>: Cost 3 vext1 <3,u,2,7>, <1,0,3,2>
- 2567596115U, // <u,2,7,2>: Cost 3 vext1 <2,u,2,7>, <2,u,2,7>
- 1906901094U, // <u,2,7,3>: Cost 2 vzipr RHS, LHS
- 2555653430U, // <u,2,7,4>: Cost 3 vext1 <0,u,2,7>, RHS
- 2800080230U, // <u,2,7,5>: Cost 3 vuzpl LHS, <7,4,5,6>
- 2980643164U, // <u,2,7,6>: Cost 3 vzipr RHS, <0,4,2,6>
- 2645726828U, // <u,2,7,7>: Cost 3 vext2 <4,6,u,2>, <7,7,7,7>
- 1906901099U, // <u,2,7,u>: Cost 2 vzipr RHS, LHS
- 408175266U, // <u,2,u,0>: Cost 1 vext1 LHS, LHS
- 1545443118U, // <u,2,u,1>: Cost 2 vext2 <0,2,u,2>, LHS
- 269271142U, // <u,2,u,2>: Cost 1 vdup2 LHS
- 1611491416U, // <u,2,u,3>: Cost 2 vext3 LHS, <2,u,3,3>
- 408177974U, // <u,2,u,4>: Cost 1 vext1 LHS, RHS
- 1545443482U, // <u,2,u,5>: Cost 2 vext2 <0,2,u,2>, RHS
- 1726339226U, // <u,2,u,6>: Cost 2 vuzpl LHS, RHS
- 1529697274U, // <u,2,u,7>: Cost 2 vext1 LHS, <7,0,1,2>
- 408180526U, // <u,2,u,u>: Cost 1 vext1 LHS, LHS
- 1544781824U, // <u,3,0,0>: Cost 2 vext2 LHS, <0,0,0,0>
- 471040156U, // <u,3,0,1>: Cost 1 vext2 LHS, LHS
- 1544781988U, // <u,3,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
- 2618523900U, // <u,3,0,3>: Cost 3 vext2 LHS, <0,3,1,0>
- 1544782162U, // <u,3,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
- 2238188352U, // <u,3,0,5>: Cost 3 vrev <3,u,5,0>
- 2623169023U, // <u,3,0,6>: Cost 3 vext2 LHS, <0,6,2,7>
- 2238335826U, // <u,3,0,7>: Cost 3 vrev <3,u,7,0>
- 471040669U, // <u,3,0,u>: Cost 1 vext2 LHS, LHS
- 1544782582U, // <u,3,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
- 1544782644U, // <u,3,1,1>: Cost 2 vext2 LHS, <1,1,1,1>
- 1544782742U, // <u,3,1,2>: Cost 2 vext2 LHS, <1,2,3,0>
- 1544782808U, // <u,3,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
- 2618524733U, // <u,3,1,4>: Cost 3 vext2 LHS, <1,4,3,5>
- 1544782992U, // <u,3,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
- 2618524897U, // <u,3,1,6>: Cost 3 vext2 LHS, <1,6,3,7>
- 2703517987U, // <u,3,1,7>: Cost 3 vext3 <3,1,7,u>, <3,1,7,u>
- 1544783213U, // <u,3,1,u>: Cost 2 vext2 LHS, <1,u,1,3>
- 1529716838U, // <u,3,2,0>: Cost 2 vext1 <u,u,3,2>, LHS
- 1164167966U, // <u,3,2,1>: Cost 2 vrev <3,u,1,2>
- 1544783464U, // <u,3,2,2>: Cost 2 vext2 LHS, <2,2,2,2>
- 1544783526U, // <u,3,2,3>: Cost 2 vext2 LHS, <2,3,0,1>
- 1529720118U, // <u,3,2,4>: Cost 2 vext1 <u,u,3,2>, RHS
- 2618525544U, // <u,3,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
- 1544783802U, // <u,3,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
- 2704181620U, // <u,3,2,7>: Cost 3 vext3 <3,2,7,u>, <3,2,7,u>
- 1544783931U, // <u,3,2,u>: Cost 2 vext2 LHS, <2,u,0,1>
- 1544784022U, // <u,3,3,0>: Cost 2 vext2 LHS, <3,0,1,2>
- 1487922559U, // <u,3,3,1>: Cost 2 vext1 <1,u,3,3>, <1,u,3,3>
- 1493895256U, // <u,3,3,2>: Cost 2 vext1 <2,u,3,3>, <2,u,3,3>
- 336380006U, // <u,3,3,3>: Cost 1 vdup3 LHS
- 1544784386U, // <u,3,3,4>: Cost 2 vext2 LHS, <3,4,5,6>
- 2824054478U, // <u,3,3,5>: Cost 3 vuzpr LHS, <2,3,4,5>
- 2238286668U, // <u,3,3,6>: Cost 3 vrev <3,u,6,3>
- 2954069136U, // <u,3,3,7>: Cost 3 vzipr LHS, <1,5,3,7>
- 336380006U, // <u,3,3,u>: Cost 1 vdup3 LHS
- 1487929446U, // <u,3,4,0>: Cost 2 vext1 <1,u,3,4>, LHS
- 1487930752U, // <u,3,4,1>: Cost 2 vext1 <1,u,3,4>, <1,u,3,4>
- 2623171644U, // <u,3,4,2>: Cost 3 vext2 LHS, <4,2,6,0>
- 2561673366U, // <u,3,4,3>: Cost 3 vext1 <1,u,3,4>, <3,0,1,2>
- 1487932726U, // <u,3,4,4>: Cost 2 vext1 <1,u,3,4>, RHS
- 471043382U, // <u,3,4,5>: Cost 1 vext2 LHS, RHS
- 1592561012U, // <u,3,4,6>: Cost 2 vext2 LHS, <4,6,4,6>
- 2238368598U, // <u,3,4,7>: Cost 3 vrev <3,u,7,4>
- 471043625U, // <u,3,4,u>: Cost 1 vext2 LHS, RHS
- 2555707494U, // <u,3,5,0>: Cost 3 vext1 <0,u,3,5>, LHS
- 1574645465U, // <u,3,5,1>: Cost 2 vext2 <5,1,u,3>, <5,1,u,3>
- 2567653106U, // <u,3,5,2>: Cost 3 vext1 <2,u,3,5>, <2,3,u,5>
- 2555709954U, // <u,3,5,3>: Cost 3 vext1 <0,u,3,5>, <3,4,5,6>
- 1592561606U, // <u,3,5,4>: Cost 2 vext2 LHS, <5,4,7,6>
- 1592561668U, // <u,3,5,5>: Cost 2 vext2 LHS, <5,5,5,5>
- 1592561762U, // <u,3,5,6>: Cost 2 vext2 LHS, <5,6,7,0>
- 1750314294U, // <u,3,5,7>: Cost 2 vuzpr LHS, RHS
- 1750314295U, // <u,3,5,u>: Cost 2 vuzpr LHS, RHS
- 2623172897U, // <u,3,6,0>: Cost 3 vext2 LHS, <6,0,1,2>
- 2561688962U, // <u,3,6,1>: Cost 3 vext1 <1,u,3,6>, <1,u,3,6>
- 1581281795U, // <u,3,6,2>: Cost 2 vext2 <6,2,u,3>, <6,2,u,3>
- 2706541204U, // <u,3,6,3>: Cost 3 vext3 <3,6,3,u>, <3,6,3,u>
- 2623173261U, // <u,3,6,4>: Cost 3 vext2 LHS, <6,4,5,6>
- 1164495686U, // <u,3,6,5>: Cost 2 vrev <3,u,5,6>
- 1592562488U, // <u,3,6,6>: Cost 2 vext2 LHS, <6,6,6,6>
- 1592562510U, // <u,3,6,7>: Cost 2 vext2 LHS, <6,7,0,1>
- 1164716897U, // <u,3,6,u>: Cost 2 vrev <3,u,u,6>
- 1487954022U, // <u,3,7,0>: Cost 2 vext1 <1,u,3,7>, LHS
- 1487955331U, // <u,3,7,1>: Cost 2 vext1 <1,u,3,7>, <1,u,3,7>
- 1493928028U, // <u,3,7,2>: Cost 2 vext1 <2,u,3,7>, <2,u,3,7>
- 2561697942U, // <u,3,7,3>: Cost 3 vext1 <1,u,3,7>, <3,0,1,2>
- 1487957302U, // <u,3,7,4>: Cost 2 vext1 <1,u,3,7>, RHS
- 2707352311U, // <u,3,7,5>: Cost 3 vext3 <3,7,5,u>, <3,7,5,u>
- 2655024623U, // <u,3,7,6>: Cost 3 vext2 <6,2,u,3>, <7,6,2,u>
- 1592563308U, // <u,3,7,7>: Cost 2 vext2 LHS, <7,7,7,7>
- 1487959854U, // <u,3,7,u>: Cost 2 vext1 <1,u,3,7>, LHS
- 1544787667U, // <u,3,u,0>: Cost 2 vext2 LHS, <u,0,1,2>
- 471045934U, // <u,3,u,1>: Cost 1 vext2 LHS, LHS
- 1549432709U, // <u,3,u,2>: Cost 2 vext2 LHS, <u,2,3,0>
- 336380006U, // <u,3,u,3>: Cost 1 vdup3 LHS
- 1544788031U, // <u,3,u,4>: Cost 2 vext2 LHS, <u,4,5,6>
- 471046298U, // <u,3,u,5>: Cost 1 vext2 LHS, RHS
- 1549433040U, // <u,3,u,6>: Cost 2 vext2 LHS, <u,6,3,7>
- 1750314537U, // <u,3,u,7>: Cost 2 vuzpr LHS, RHS
- 471046501U, // <u,3,u,u>: Cost 1 vext2 LHS, LHS
- 2625167360U, // <u,4,0,0>: Cost 3 vext2 <1,2,u,4>, <0,0,0,0>
- 1551425638U, // <u,4,0,1>: Cost 2 vext2 <1,2,u,4>, LHS
- 2619195630U, // <u,4,0,2>: Cost 3 vext2 <0,2,u,4>, <0,2,u,4>
- 2619343104U, // <u,4,0,3>: Cost 3 vext2 <0,3,1,4>, <0,3,1,4>
- 2625167698U, // <u,4,0,4>: Cost 3 vext2 <1,2,u,4>, <0,4,1,5>
- 1638329234U, // <u,4,0,5>: Cost 2 vext3 RHS, <4,0,5,1>
- 1638329244U, // <u,4,0,6>: Cost 2 vext3 RHS, <4,0,6,2>
- 3787803556U, // <u,4,0,7>: Cost 4 vext3 RHS, <4,0,7,1>
- 1551426205U, // <u,4,0,u>: Cost 2 vext2 <1,2,u,4>, LHS
- 2555748454U, // <u,4,1,0>: Cost 3 vext1 <0,u,4,1>, LHS
- 2625168180U, // <u,4,1,1>: Cost 3 vext2 <1,2,u,4>, <1,1,1,1>
- 1551426503U, // <u,4,1,2>: Cost 2 vext2 <1,2,u,4>, <1,2,u,4>
- 2625168344U, // <u,4,1,3>: Cost 3 vext2 <1,2,u,4>, <1,3,1,3>
- 2555751734U, // <u,4,1,4>: Cost 3 vext1 <0,u,4,1>, RHS
- 1860554038U, // <u,4,1,5>: Cost 2 vzipl LHS, RHS
- 2689879022U, // <u,4,1,6>: Cost 3 vext3 LHS, <4,1,6,3>
- 2592248852U, // <u,4,1,7>: Cost 3 vext1 <7,0,4,1>, <7,0,4,1>
- 1555408301U, // <u,4,1,u>: Cost 2 vext2 <1,u,u,4>, <1,u,u,4>
- 2555756646U, // <u,4,2,0>: Cost 3 vext1 <0,u,4,2>, LHS
- 2625168943U, // <u,4,2,1>: Cost 3 vext2 <1,2,u,4>, <2,1,4,u>
- 2625169000U, // <u,4,2,2>: Cost 3 vext2 <1,2,u,4>, <2,2,2,2>
- 2619197134U, // <u,4,2,3>: Cost 3 vext2 <0,2,u,4>, <2,3,4,5>
- 2555759926U, // <u,4,2,4>: Cost 3 vext1 <0,u,4,2>, RHS
- 2712071222U, // <u,4,2,5>: Cost 3 vext3 RHS, <4,2,5,3>
- 1994771766U, // <u,4,2,6>: Cost 2 vtrnl LHS, RHS
- 2592257045U, // <u,4,2,7>: Cost 3 vext1 <7,0,4,2>, <7,0,4,2>
- 1994771784U, // <u,4,2,u>: Cost 2 vtrnl LHS, RHS
- 2625169558U, // <u,4,3,0>: Cost 3 vext2 <1,2,u,4>, <3,0,1,2>
- 2567709594U, // <u,4,3,1>: Cost 3 vext1 <2,u,4,3>, <1,2,3,4>
- 2567710817U, // <u,4,3,2>: Cost 3 vext1 <2,u,4,3>, <2,u,4,3>
- 2625169820U, // <u,4,3,3>: Cost 3 vext2 <1,2,u,4>, <3,3,3,3>
- 2625169922U, // <u,4,3,4>: Cost 3 vext2 <1,2,u,4>, <3,4,5,6>
- 2954069710U, // <u,4,3,5>: Cost 3 vzipr LHS, <2,3,4,5>
- 2954068172U, // <u,4,3,6>: Cost 3 vzipr LHS, <0,2,4,6>
- 3903849472U, // <u,4,3,7>: Cost 4 vuzpr <1,u,3,4>, <1,3,5,7>
- 2954068174U, // <u,4,3,u>: Cost 3 vzipr LHS, <0,2,4,u>
- 1505919078U, // <u,4,4,0>: Cost 2 vext1 <4,u,4,4>, LHS
- 2567717831U, // <u,4,4,1>: Cost 3 vext1 <2,u,4,4>, <1,2,u,4>
- 2567719010U, // <u,4,4,2>: Cost 3 vext1 <2,u,4,4>, <2,u,4,4>
- 2570373542U, // <u,4,4,3>: Cost 3 vext1 <3,3,4,4>, <3,3,4,4>
- 161926454U, // <u,4,4,4>: Cost 1 vdup0 RHS
- 1551428918U, // <u,4,4,5>: Cost 2 vext2 <1,2,u,4>, RHS
- 1638329572U, // <u,4,4,6>: Cost 2 vext3 RHS, <4,4,6,6>
- 2594927963U, // <u,4,4,7>: Cost 3 vext1 <7,4,4,4>, <7,4,4,4>
- 161926454U, // <u,4,4,u>: Cost 1 vdup0 RHS
- 1493983334U, // <u,4,5,0>: Cost 2 vext1 <2,u,4,5>, LHS
- 2689879301U, // <u,4,5,1>: Cost 3 vext3 LHS, <4,5,1,3>
- 1493985379U, // <u,4,5,2>: Cost 2 vext1 <2,u,4,5>, <2,u,4,5>
- 2567727254U, // <u,4,5,3>: Cost 3 vext1 <2,u,4,5>, <3,0,1,2>
- 1493986614U, // <u,4,5,4>: Cost 2 vext1 <2,u,4,5>, RHS
- 1863535926U, // <u,4,5,5>: Cost 2 vzipl RHS, RHS
- 537750838U, // <u,4,5,6>: Cost 1 vext3 LHS, RHS
- 2830110006U, // <u,4,5,7>: Cost 3 vuzpr <1,u,3,4>, RHS
- 537750856U, // <u,4,5,u>: Cost 1 vext3 LHS, RHS
- 1482047590U, // <u,4,6,0>: Cost 2 vext1 <0,u,4,6>, LHS
- 2555790070U, // <u,4,6,1>: Cost 3 vext1 <0,u,4,6>, <1,0,3,2>
- 2555790952U, // <u,4,6,2>: Cost 3 vext1 <0,u,4,6>, <2,2,2,2>
- 2555791510U, // <u,4,6,3>: Cost 3 vext1 <0,u,4,6>, <3,0,1,2>
- 1482050870U, // <u,4,6,4>: Cost 2 vext1 <0,u,4,6>, RHS
- 2689879422U, // <u,4,6,5>: Cost 3 vext3 LHS, <4,6,5,7>
- 1997753654U, // <u,4,6,6>: Cost 2 vtrnl RHS, RHS
- 2712071562U, // <u,4,6,7>: Cost 3 vext3 RHS, <4,6,7,1>
- 1482053422U, // <u,4,6,u>: Cost 2 vext1 <0,u,4,6>, LHS
- 2567741542U, // <u,4,7,0>: Cost 3 vext1 <2,u,4,7>, LHS
- 2567742362U, // <u,4,7,1>: Cost 3 vext1 <2,u,4,7>, <1,2,3,4>
- 2567743589U, // <u,4,7,2>: Cost 3 vext1 <2,u,4,7>, <2,u,4,7>
- 2573716286U, // <u,4,7,3>: Cost 3 vext1 <3,u,4,7>, <3,u,4,7>
- 2567744822U, // <u,4,7,4>: Cost 3 vext1 <2,u,4,7>, RHS
- 2712071624U, // <u,4,7,5>: Cost 3 vext3 RHS, <4,7,5,0>
- 96808489U, // <u,4,7,6>: Cost 1 vrev RHS
- 2651715180U, // <u,4,7,7>: Cost 3 vext2 <5,6,u,4>, <7,7,7,7>
- 96955963U, // <u,4,7,u>: Cost 1 vrev RHS
- 1482063974U, // <u,4,u,0>: Cost 2 vext1 <0,u,4,u>, LHS
- 1551431470U, // <u,4,u,1>: Cost 2 vext2 <1,2,u,4>, LHS
- 1494009958U, // <u,4,u,2>: Cost 2 vext1 <2,u,4,u>, <2,u,4,u>
- 2555807894U, // <u,4,u,3>: Cost 3 vext1 <0,u,4,u>, <3,0,1,2>
- 161926454U, // <u,4,u,4>: Cost 1 vdup0 RHS
- 1551431834U, // <u,4,u,5>: Cost 2 vext2 <1,2,u,4>, RHS
- 537751081U, // <u,4,u,6>: Cost 1 vext3 LHS, RHS
- 2830110249U, // <u,4,u,7>: Cost 3 vuzpr <1,u,3,4>, RHS
- 537751099U, // <u,4,u,u>: Cost 1 vext3 LHS, RHS
- 2631811072U, // <u,5,0,0>: Cost 3 vext2 <2,3,u,5>, <0,0,0,0>
- 1558069350U, // <u,5,0,1>: Cost 2 vext2 <2,3,u,5>, LHS
- 2619203823U, // <u,5,0,2>: Cost 3 vext2 <0,2,u,5>, <0,2,u,5>
- 2619867456U, // <u,5,0,3>: Cost 3 vext2 <0,3,u,5>, <0,3,u,5>
- 1546273106U, // <u,5,0,4>: Cost 2 vext2 <0,4,1,5>, <0,4,1,5>
- 2733010539U, // <u,5,0,5>: Cost 3 vext3 LHS, <5,0,5,1>
- 2597622682U, // <u,5,0,6>: Cost 3 vext1 <7,u,5,0>, <6,7,u,5>
- 1176539396U, // <u,5,0,7>: Cost 2 vrev <5,u,7,0>
- 1558069917U, // <u,5,0,u>: Cost 2 vext2 <2,3,u,5>, LHS
- 1505968230U, // <u,5,1,0>: Cost 2 vext1 <4,u,5,1>, LHS
- 2624512887U, // <u,5,1,1>: Cost 3 vext2 <1,1,u,5>, <1,1,u,5>
- 2631811990U, // <u,5,1,2>: Cost 3 vext2 <2,3,u,5>, <1,2,3,0>
- 2618541056U, // <u,5,1,3>: Cost 3 vext2 <0,1,u,5>, <1,3,5,7>
- 1505971510U, // <u,5,1,4>: Cost 2 vext1 <4,u,5,1>, RHS
- 2627167419U, // <u,5,1,5>: Cost 3 vext2 <1,5,u,5>, <1,5,u,5>
- 2579714554U, // <u,5,1,6>: Cost 3 vext1 <4,u,5,1>, <6,2,7,3>
- 1638330064U, // <u,5,1,7>: Cost 2 vext3 RHS, <5,1,7,3>
- 1638477529U, // <u,5,1,u>: Cost 2 vext3 RHS, <5,1,u,3>
- 2561802342U, // <u,5,2,0>: Cost 3 vext1 <1,u,5,2>, LHS
- 2561803264U, // <u,5,2,1>: Cost 3 vext1 <1,u,5,2>, <1,3,5,7>
- 2631149217U, // <u,5,2,2>: Cost 3 vext2 <2,2,u,5>, <2,2,u,5>
- 1558071026U, // <u,5,2,3>: Cost 2 vext2 <2,3,u,5>, <2,3,u,5>
- 2561805622U, // <u,5,2,4>: Cost 3 vext1 <1,u,5,2>, RHS
- 2714062607U, // <u,5,2,5>: Cost 3 vext3 RHS, <5,2,5,3>
- 2631813050U, // <u,5,2,6>: Cost 3 vext2 <2,3,u,5>, <2,6,3,7>
- 3092335926U, // <u,5,2,7>: Cost 3 vtrnr <0,u,0,2>, RHS
- 1561389191U, // <u,5,2,u>: Cost 2 vext2 <2,u,u,5>, <2,u,u,5>
- 2561810534U, // <u,5,3,0>: Cost 3 vext1 <1,u,5,3>, LHS
- 2561811857U, // <u,5,3,1>: Cost 3 vext1 <1,u,5,3>, <1,u,5,3>
- 2631813474U, // <u,5,3,2>: Cost 3 vext2 <2,3,u,5>, <3,2,5,u>
- 2631813532U, // <u,5,3,3>: Cost 3 vext2 <2,3,u,5>, <3,3,3,3>
- 2619869698U, // <u,5,3,4>: Cost 3 vext2 <0,3,u,5>, <3,4,5,6>
- 3001847002U, // <u,5,3,5>: Cost 3 vzipr LHS, <4,4,5,5>
- 2954070530U, // <u,5,3,6>: Cost 3 vzipr LHS, <3,4,5,6>
- 2018749750U, // <u,5,3,7>: Cost 2 vtrnr LHS, RHS
- 2018749751U, // <u,5,3,u>: Cost 2 vtrnr LHS, RHS
- 2573762662U, // <u,5,4,0>: Cost 3 vext1 <3,u,5,4>, LHS
- 2620017634U, // <u,5,4,1>: Cost 3 vext2 <0,4,1,5>, <4,1,5,0>
- 2573764338U, // <u,5,4,2>: Cost 3 vext1 <3,u,5,4>, <2,3,u,5>
- 2573765444U, // <u,5,4,3>: Cost 3 vext1 <3,u,5,4>, <3,u,5,4>
- 1570680053U, // <u,5,4,4>: Cost 2 vext2 <4,4,u,5>, <4,4,u,5>
- 1558072630U, // <u,5,4,5>: Cost 2 vext2 <2,3,u,5>, RHS
- 2645749143U, // <u,5,4,6>: Cost 3 vext2 <4,6,u,5>, <4,6,u,5>
- 1638330310U, // <u,5,4,7>: Cost 2 vext3 RHS, <5,4,7,6>
- 1558072873U, // <u,5,4,u>: Cost 2 vext2 <2,3,u,5>, RHS
- 1506000998U, // <u,5,5,0>: Cost 2 vext1 <4,u,5,5>, LHS
- 2561827984U, // <u,5,5,1>: Cost 3 vext1 <1,u,5,5>, <1,5,3,7>
- 2579744360U, // <u,5,5,2>: Cost 3 vext1 <4,u,5,5>, <2,2,2,2>
- 2579744918U, // <u,5,5,3>: Cost 3 vext1 <4,u,5,5>, <3,0,1,2>
- 1506004278U, // <u,5,5,4>: Cost 2 vext1 <4,u,5,5>, RHS
- 229035318U, // <u,5,5,5>: Cost 1 vdup1 RHS
- 2712072206U, // <u,5,5,6>: Cost 3 vext3 RHS, <5,5,6,6>
- 1638330392U, // <u,5,5,7>: Cost 2 vext3 RHS, <5,5,7,7>
- 229035318U, // <u,5,5,u>: Cost 1 vdup1 RHS
- 1500037222U, // <u,5,6,0>: Cost 2 vext1 <3,u,5,6>, LHS
- 2561836436U, // <u,5,6,1>: Cost 3 vext1 <1,u,5,6>, <1,u,5,6>
- 2567809133U, // <u,5,6,2>: Cost 3 vext1 <2,u,5,6>, <2,u,5,6>
- 1500040006U, // <u,5,6,3>: Cost 2 vext1 <3,u,5,6>, <3,u,5,6>
- 1500040502U, // <u,5,6,4>: Cost 2 vext1 <3,u,5,6>, RHS
- 2714062935U, // <u,5,6,5>: Cost 3 vext3 RHS, <5,6,5,7>
- 2712072288U, // <u,5,6,6>: Cost 3 vext3 RHS, <5,6,6,7>
- 27705344U, // <u,5,6,7>: Cost 0 copy RHS
- 27705344U, // <u,5,6,u>: Cost 0 copy RHS
- 1488101478U, // <u,5,7,0>: Cost 2 vext1 <1,u,5,7>, LHS
- 1488102805U, // <u,5,7,1>: Cost 2 vext1 <1,u,5,7>, <1,u,5,7>
- 2561844840U, // <u,5,7,2>: Cost 3 vext1 <1,u,5,7>, <2,2,2,2>
- 2561845398U, // <u,5,7,3>: Cost 3 vext1 <1,u,5,7>, <3,0,1,2>
- 1488104758U, // <u,5,7,4>: Cost 2 vext1 <1,u,5,7>, RHS
- 1638330536U, // <u,5,7,5>: Cost 2 vext3 RHS, <5,7,5,7>
- 2712072362U, // <u,5,7,6>: Cost 3 vext3 RHS, <5,7,6,0>
- 2042965302U, // <u,5,7,7>: Cost 2 vtrnr RHS, RHS
- 1488107310U, // <u,5,7,u>: Cost 2 vext1 <1,u,5,7>, LHS
- 1488109670U, // <u,5,u,0>: Cost 2 vext1 <1,u,5,u>, LHS
- 1488110998U, // <u,5,u,1>: Cost 2 vext1 <1,u,5,u>, <1,u,5,u>
- 2561853032U, // <u,5,u,2>: Cost 3 vext1 <1,u,5,u>, <2,2,2,2>
- 1500056392U, // <u,5,u,3>: Cost 2 vext1 <3,u,5,u>, <3,u,5,u>
- 1488112950U, // <u,5,u,4>: Cost 2 vext1 <1,u,5,u>, RHS
- 229035318U, // <u,5,u,5>: Cost 1 vdup1 RHS
- 2954111490U, // <u,5,u,6>: Cost 3 vzipr LHS, <3,4,5,6>
- 27705344U, // <u,5,u,7>: Cost 0 copy RHS
- 27705344U, // <u,5,u,u>: Cost 0 copy RHS
- 2619211776U, // <u,6,0,0>: Cost 3 vext2 <0,2,u,6>, <0,0,0,0>
- 1545470054U, // <u,6,0,1>: Cost 2 vext2 <0,2,u,6>, LHS
- 1545470192U, // <u,6,0,2>: Cost 2 vext2 <0,2,u,6>, <0,2,u,6>
- 2255958969U, // <u,6,0,3>: Cost 3 vrev <6,u,3,0>
- 1546797458U, // <u,6,0,4>: Cost 2 vext2 <0,4,u,6>, <0,4,u,6>
- 2720624971U, // <u,6,0,5>: Cost 3 vext3 <6,0,5,u>, <6,0,5,u>
- 2256180180U, // <u,6,0,6>: Cost 3 vrev <6,u,6,0>
- 2960682294U, // <u,6,0,7>: Cost 3 vzipr <1,2,u,0>, RHS
- 1545470621U, // <u,6,0,u>: Cost 2 vext2 <0,2,u,6>, LHS
- 1182004127U, // <u,6,1,0>: Cost 2 vrev <6,u,0,1>
- 2619212596U, // <u,6,1,1>: Cost 3 vext2 <0,2,u,6>, <1,1,1,1>
- 2619212694U, // <u,6,1,2>: Cost 3 vext2 <0,2,u,6>, <1,2,3,0>
- 2619212760U, // <u,6,1,3>: Cost 3 vext2 <0,2,u,6>, <1,3,1,3>
- 2626511979U, // <u,6,1,4>: Cost 3 vext2 <1,4,u,6>, <1,4,u,6>
- 2619212944U, // <u,6,1,5>: Cost 3 vext2 <0,2,u,6>, <1,5,3,7>
- 2714063264U, // <u,6,1,6>: Cost 3 vext3 RHS, <6,1,6,3>
- 2967326006U, // <u,6,1,7>: Cost 3 vzipr <2,3,u,1>, RHS
- 1182594023U, // <u,6,1,u>: Cost 2 vrev <6,u,u,1>
- 1506050150U, // <u,6,2,0>: Cost 2 vext1 <4,u,6,2>, LHS
- 2579792630U, // <u,6,2,1>: Cost 3 vext1 <4,u,6,2>, <1,0,3,2>
- 2619213416U, // <u,6,2,2>: Cost 3 vext2 <0,2,u,6>, <2,2,2,2>
- 2619213478U, // <u,6,2,3>: Cost 3 vext2 <0,2,u,6>, <2,3,0,1>
- 1506053430U, // <u,6,2,4>: Cost 2 vext1 <4,u,6,2>, RHS
- 2633148309U, // <u,6,2,5>: Cost 3 vext2 <2,5,u,6>, <2,5,u,6>
- 2619213754U, // <u,6,2,6>: Cost 3 vext2 <0,2,u,6>, <2,6,3,7>
- 1638330874U, // <u,6,2,7>: Cost 2 vext3 RHS, <6,2,7,3>
- 1638478339U, // <u,6,2,u>: Cost 2 vext3 RHS, <6,2,u,3>
- 2619213974U, // <u,6,3,0>: Cost 3 vext2 <0,2,u,6>, <3,0,1,2>
- 2255836074U, // <u,6,3,1>: Cost 3 vrev <6,u,1,3>
- 2255909811U, // <u,6,3,2>: Cost 3 vrev <6,u,2,3>
- 2619214236U, // <u,6,3,3>: Cost 3 vext2 <0,2,u,6>, <3,3,3,3>
- 1564715549U, // <u,6,3,4>: Cost 2 vext2 <3,4,u,6>, <3,4,u,6>
- 2639121006U, // <u,6,3,5>: Cost 3 vext2 <3,5,u,6>, <3,5,u,6>
- 3001847012U, // <u,6,3,6>: Cost 3 vzipr LHS, <4,4,6,6>
- 1880329526U, // <u,6,3,7>: Cost 2 vzipr LHS, RHS
- 1880329527U, // <u,6,3,u>: Cost 2 vzipr LHS, RHS
- 2567864422U, // <u,6,4,0>: Cost 3 vext1 <2,u,6,4>, LHS
- 2733011558U, // <u,6,4,1>: Cost 3 vext3 LHS, <6,4,1,3>
- 2567866484U, // <u,6,4,2>: Cost 3 vext1 <2,u,6,4>, <2,u,6,4>
- 2638458005U, // <u,6,4,3>: Cost 3 vext2 <3,4,u,6>, <4,3,6,u>
- 1570540772U, // <u,6,4,4>: Cost 2 vext2 <4,4,6,6>, <4,4,6,6>
- 1545473334U, // <u,6,4,5>: Cost 2 vext2 <0,2,u,6>, RHS
- 1572015512U, // <u,6,4,6>: Cost 2 vext2 <4,6,u,6>, <4,6,u,6>
- 2960715062U, // <u,6,4,7>: Cost 3 vzipr <1,2,u,4>, RHS
- 1545473577U, // <u,6,4,u>: Cost 2 vext2 <0,2,u,6>, RHS
- 2567872614U, // <u,6,5,0>: Cost 3 vext1 <2,u,6,5>, LHS
- 2645757648U, // <u,6,5,1>: Cost 3 vext2 <4,6,u,6>, <5,1,7,3>
- 2567874490U, // <u,6,5,2>: Cost 3 vext1 <2,u,6,5>, <2,6,3,7>
- 2576501250U, // <u,6,5,3>: Cost 3 vext1 <4,3,6,5>, <3,4,5,6>
- 1576660943U, // <u,6,5,4>: Cost 2 vext2 <5,4,u,6>, <5,4,u,6>
- 2645757956U, // <u,6,5,5>: Cost 3 vext2 <4,6,u,6>, <5,5,5,5>
- 2645758050U, // <u,6,5,6>: Cost 3 vext2 <4,6,u,6>, <5,6,7,0>
- 2824080694U, // <u,6,5,7>: Cost 3 vuzpr <0,u,2,6>, RHS
- 1182626795U, // <u,6,5,u>: Cost 2 vrev <6,u,u,5>
- 1506082918U, // <u,6,6,0>: Cost 2 vext1 <4,u,6,6>, LHS
- 2579825398U, // <u,6,6,1>: Cost 3 vext1 <4,u,6,6>, <1,0,3,2>
- 2645758458U, // <u,6,6,2>: Cost 3 vext2 <4,6,u,6>, <6,2,7,3>
- 2579826838U, // <u,6,6,3>: Cost 3 vext1 <4,u,6,6>, <3,0,1,2>
- 1506086198U, // <u,6,6,4>: Cost 2 vext1 <4,u,6,6>, RHS
- 2579828432U, // <u,6,6,5>: Cost 3 vext1 <4,u,6,6>, <5,1,7,3>
- 296144182U, // <u,6,6,6>: Cost 1 vdup2 RHS
- 1638331202U, // <u,6,6,7>: Cost 2 vext3 RHS, <6,6,7,7>
- 296144182U, // <u,6,6,u>: Cost 1 vdup2 RHS
- 432349286U, // <u,6,7,0>: Cost 1 vext1 RHS, LHS
- 1506091766U, // <u,6,7,1>: Cost 2 vext1 RHS, <1,0,3,2>
- 1506092648U, // <u,6,7,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 1506093206U, // <u,6,7,3>: Cost 2 vext1 RHS, <3,0,1,2>
- 432352809U, // <u,6,7,4>: Cost 1 vext1 RHS, RHS
- 1506094800U, // <u,6,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
- 1506095610U, // <u,6,7,6>: Cost 2 vext1 RHS, <6,2,7,3>
- 1906904374U, // <u,6,7,7>: Cost 2 vzipr RHS, RHS
- 432355118U, // <u,6,7,u>: Cost 1 vext1 RHS, LHS
- 432357478U, // <u,6,u,0>: Cost 1 vext1 RHS, LHS
- 1545475886U, // <u,6,u,1>: Cost 2 vext2 <0,2,u,6>, LHS
- 1506100840U, // <u,6,u,2>: Cost 2 vext1 RHS, <2,2,2,2>
- 1506101398U, // <u,6,u,3>: Cost 2 vext1 RHS, <3,0,1,2>
- 432361002U, // <u,6,u,4>: Cost 1 vext1 RHS, RHS
- 1545476250U, // <u,6,u,5>: Cost 2 vext2 <0,2,u,6>, RHS
- 296144182U, // <u,6,u,6>: Cost 1 vdup2 RHS
- 1880370486U, // <u,6,u,7>: Cost 2 vzipr LHS, RHS
- 432363310U, // <u,6,u,u>: Cost 1 vext1 RHS, LHS
- 1571356672U, // <u,7,0,0>: Cost 2 vext2 RHS, <0,0,0,0>
- 497614950U, // <u,7,0,1>: Cost 1 vext2 RHS, LHS
- 1571356836U, // <u,7,0,2>: Cost 2 vext2 RHS, <0,2,0,2>
- 2573880146U, // <u,7,0,3>: Cost 3 vext1 <3,u,7,0>, <3,u,7,0>
- 1571357010U, // <u,7,0,4>: Cost 2 vext2 RHS, <0,4,1,5>
- 1512083716U, // <u,7,0,5>: Cost 2 vext1 <5,u,7,0>, <5,u,7,0>
- 2621874741U, // <u,7,0,6>: Cost 3 vext2 <0,6,u,7>, <0,6,u,7>
- 2585826298U, // <u,7,0,7>: Cost 3 vext1 <5,u,7,0>, <7,0,1,2>
- 497615517U, // <u,7,0,u>: Cost 1 vext2 RHS, LHS
- 1571357430U, // <u,7,1,0>: Cost 2 vext2 RHS, <1,0,3,2>
- 1571357492U, // <u,7,1,1>: Cost 2 vext2 RHS, <1,1,1,1>
- 1571357590U, // <u,7,1,2>: Cost 2 vext2 RHS, <1,2,3,0>
- 1552114715U, // <u,7,1,3>: Cost 2 vext2 <1,3,u,7>, <1,3,u,7>
- 2573888822U, // <u,7,1,4>: Cost 3 vext1 <3,u,7,1>, RHS
- 1553441981U, // <u,7,1,5>: Cost 2 vext2 <1,5,u,7>, <1,5,u,7>
- 2627847438U, // <u,7,1,6>: Cost 3 vext2 <1,6,u,7>, <1,6,u,7>
- 2727408775U, // <u,7,1,7>: Cost 3 vext3 <7,1,7,u>, <7,1,7,u>
- 1555432880U, // <u,7,1,u>: Cost 2 vext2 <1,u,u,7>, <1,u,u,7>
- 2629838337U, // <u,7,2,0>: Cost 3 vext2 <2,0,u,7>, <2,0,u,7>
- 1188058754U, // <u,7,2,1>: Cost 2 vrev <7,u,1,2>
- 1571358312U, // <u,7,2,2>: Cost 2 vext2 RHS, <2,2,2,2>
- 1571358374U, // <u,7,2,3>: Cost 2 vext2 RHS, <2,3,0,1>
- 2632492869U, // <u,7,2,4>: Cost 3 vext2 <2,4,u,7>, <2,4,u,7>
- 2633156502U, // <u,7,2,5>: Cost 3 vext2 <2,5,u,7>, <2,5,u,7>
- 1560078311U, // <u,7,2,6>: Cost 2 vext2 <2,6,u,7>, <2,6,u,7>
- 2728072408U, // <u,7,2,7>: Cost 3 vext3 <7,2,7,u>, <7,2,7,u>
- 1561405577U, // <u,7,2,u>: Cost 2 vext2 <2,u,u,7>, <2,u,u,7>
- 1571358870U, // <u,7,3,0>: Cost 2 vext2 RHS, <3,0,1,2>
- 2627184913U, // <u,7,3,1>: Cost 3 vext2 <1,5,u,7>, <3,1,5,u>
- 2633820523U, // <u,7,3,2>: Cost 3 vext2 <2,6,u,7>, <3,2,6,u>
- 1571359132U, // <u,7,3,3>: Cost 2 vext2 RHS, <3,3,3,3>
- 1571359234U, // <u,7,3,4>: Cost 2 vext2 RHS, <3,4,5,6>
- 1512108295U, // <u,7,3,5>: Cost 2 vext1 <5,u,7,3>, <5,u,7,3>
- 1518080992U, // <u,7,3,6>: Cost 2 vext1 <6,u,7,3>, <6,u,7,3>
- 2640456465U, // <u,7,3,7>: Cost 3 vext2 <3,7,u,7>, <3,7,u,7>
- 1571359518U, // <u,7,3,u>: Cost 2 vext2 RHS, <3,u,1,2>
- 1571359634U, // <u,7,4,0>: Cost 2 vext2 RHS, <4,0,5,1>
- 2573911067U, // <u,7,4,1>: Cost 3 vext1 <3,u,7,4>, <1,3,u,7>
- 2645101622U, // <u,7,4,2>: Cost 3 vext2 RHS, <4,2,5,3>
- 2573912918U, // <u,7,4,3>: Cost 3 vext1 <3,u,7,4>, <3,u,7,4>
- 1571359952U, // <u,7,4,4>: Cost 2 vext2 RHS, <4,4,4,4>
- 497618248U, // <u,7,4,5>: Cost 1 vext2 RHS, RHS
- 1571360116U, // <u,7,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
- 2645102024U, // <u,7,4,7>: Cost 3 vext2 RHS, <4,7,5,0>
- 497618473U, // <u,7,4,u>: Cost 1 vext2 RHS, RHS
- 2645102152U, // <u,7,5,0>: Cost 3 vext2 RHS, <5,0,1,2>
- 1571360464U, // <u,7,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
- 2645102334U, // <u,7,5,2>: Cost 3 vext2 RHS, <5,2,3,4>
- 2645102447U, // <u,7,5,3>: Cost 3 vext2 RHS, <5,3,7,0>
- 1571360710U, // <u,7,5,4>: Cost 2 vext2 RHS, <5,4,7,6>
- 1571360772U, // <u,7,5,5>: Cost 2 vext2 RHS, <5,5,5,5>
- 1571360866U, // <u,7,5,6>: Cost 2 vext2 RHS, <5,6,7,0>
- 1571360936U, // <u,7,5,7>: Cost 2 vext2 RHS, <5,7,5,7>
- 1571361017U, // <u,7,5,u>: Cost 2 vext2 RHS, <5,u,5,7>
- 1530044518U, // <u,7,6,0>: Cost 2 vext1 <u,u,7,6>, LHS
- 2645103016U, // <u,7,6,1>: Cost 3 vext2 RHS, <6,1,7,2>
- 1571361274U, // <u,7,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
- 2645103154U, // <u,7,6,3>: Cost 3 vext2 RHS, <6,3,4,5>
- 1530047798U, // <u,7,6,4>: Cost 2 vext1 <u,u,7,6>, RHS
- 1188386474U, // <u,7,6,5>: Cost 2 vrev <7,u,5,6>
- 1571361592U, // <u,7,6,6>: Cost 2 vext2 RHS, <6,6,6,6>
- 1571361614U, // <u,7,6,7>: Cost 2 vext2 RHS, <6,7,0,1>
- 1571361695U, // <u,7,6,u>: Cost 2 vext2 RHS, <6,u,0,1>
- 1571361786U, // <u,7,7,0>: Cost 2 vext2 RHS, <7,0,1,2>
- 2573935616U, // <u,7,7,1>: Cost 3 vext1 <3,u,7,7>, <1,3,5,7>
- 2645103781U, // <u,7,7,2>: Cost 3 vext2 RHS, <7,2,2,2>
- 2573937497U, // <u,7,7,3>: Cost 3 vext1 <3,u,7,7>, <3,u,7,7>
- 1571362150U, // <u,7,7,4>: Cost 2 vext2 RHS, <7,4,5,6>
- 1512141067U, // <u,7,7,5>: Cost 2 vext1 <5,u,7,7>, <5,u,7,7>
- 1518113764U, // <u,7,7,6>: Cost 2 vext1 <6,u,7,7>, <6,u,7,7>
- 363253046U, // <u,7,7,7>: Cost 1 vdup3 RHS
- 363253046U, // <u,7,7,u>: Cost 1 vdup3 RHS
- 1571362515U, // <u,7,u,0>: Cost 2 vext2 RHS, <u,0,1,2>
- 497620782U, // <u,7,u,1>: Cost 1 vext2 RHS, LHS
- 1571362693U, // <u,7,u,2>: Cost 2 vext2 RHS, <u,2,3,0>
- 1571362748U, // <u,7,u,3>: Cost 2 vext2 RHS, <u,3,0,1>
- 1571362879U, // <u,7,u,4>: Cost 2 vext2 RHS, <u,4,5,6>
- 497621146U, // <u,7,u,5>: Cost 1 vext2 RHS, RHS
- 1571363024U, // <u,7,u,6>: Cost 2 vext2 RHS, <u,6,3,7>
- 363253046U, // <u,7,u,7>: Cost 1 vdup3 RHS
- 497621349U, // <u,7,u,u>: Cost 1 vext2 RHS, LHS
- 135053414U, // <u,u,0,0>: Cost 1 vdup0 LHS
- 471081121U, // <u,u,0,1>: Cost 1 vext2 LHS, LHS
- 1544822948U, // <u,u,0,2>: Cost 2 vext2 LHS, <0,2,0,2>
- 1616140005U, // <u,u,0,3>: Cost 2 vext3 LHS, <u,0,3,2>
- 1544823122U, // <u,u,0,4>: Cost 2 vext2 LHS, <0,4,1,5>
- 1512157453U, // <u,u,0,5>: Cost 2 vext1 <5,u,u,0>, <5,u,u,0>
- 1662220032U, // <u,u,0,6>: Cost 2 vext3 RHS, <u,0,6,2>
- 1194457487U, // <u,u,0,7>: Cost 2 vrev <u,u,7,0>
- 471081629U, // <u,u,0,u>: Cost 1 vext2 LHS, LHS
- 1544823542U, // <u,u,1,0>: Cost 2 vext2 LHS, <1,0,3,2>
- 202162278U, // <u,u,1,1>: Cost 1 vdup1 LHS
- 537753390U, // <u,u,1,2>: Cost 1 vext3 LHS, LHS
- 1544823768U, // <u,u,1,3>: Cost 2 vext2 LHS, <1,3,1,3>
- 1494248758U, // <u,u,1,4>: Cost 2 vext1 <2,u,u,1>, RHS
- 1544823952U, // <u,u,1,5>: Cost 2 vext2 LHS, <1,5,3,7>
- 1518138343U, // <u,u,1,6>: Cost 2 vext1 <6,u,u,1>, <6,u,u,1>
- 1640322907U, // <u,u,1,7>: Cost 2 vext3 RHS, <u,1,7,3>
- 537753444U, // <u,u,1,u>: Cost 1 vext3 LHS, LHS
- 1482309734U, // <u,u,2,0>: Cost 2 vext1 <0,u,u,2>, LHS
- 1194031451U, // <u,u,2,1>: Cost 2 vrev <u,u,1,2>
- 269271142U, // <u,u,2,2>: Cost 1 vdup2 LHS
- 835584U, // <u,u,2,3>: Cost 0 copy LHS
- 1482313014U, // <u,u,2,4>: Cost 2 vext1 <0,u,u,2>, RHS
- 2618566504U, // <u,u,2,5>: Cost 3 vext2 LHS, <2,5,3,6>
- 1544824762U, // <u,u,2,6>: Cost 2 vext2 LHS, <2,6,3,7>
- 1638479788U, // <u,u,2,7>: Cost 2 vext3 RHS, <u,2,7,3>
- 835584U, // <u,u,2,u>: Cost 0 copy LHS
- 408576723U, // <u,u,3,0>: Cost 1 vext1 LHS, LHS
- 1482318582U, // <u,u,3,1>: Cost 2 vext1 LHS, <1,0,3,2>
- 120371557U, // <u,u,3,2>: Cost 1 vrev LHS
- 336380006U, // <u,u,3,3>: Cost 1 vdup3 LHS
- 408579382U, // <u,u,3,4>: Cost 1 vext1 LHS, RHS
- 1616140271U, // <u,u,3,5>: Cost 2 vext3 LHS, <u,3,5,7>
- 1530098170U, // <u,u,3,6>: Cost 2 vext1 LHS, <6,2,7,3>
- 1880329544U, // <u,u,3,7>: Cost 2 vzipr LHS, RHS
- 408581934U, // <u,u,3,u>: Cost 1 vext1 LHS, LHS
- 1488298086U, // <u,u,4,0>: Cost 2 vext1 <1,u,u,4>, LHS
- 1488299437U, // <u,u,4,1>: Cost 2 vext1 <1,u,u,4>, <1,u,u,4>
- 1659271204U, // <u,u,4,2>: Cost 2 vext3 LHS, <u,4,2,6>
- 1194195311U, // <u,u,4,3>: Cost 2 vrev <u,u,3,4>
- 161926454U, // <u,u,4,4>: Cost 1 vdup0 RHS
- 471084342U, // <u,u,4,5>: Cost 1 vext2 LHS, RHS
- 1571368308U, // <u,u,4,6>: Cost 2 vext2 RHS, <4,6,4,6>
- 1640323153U, // <u,u,4,7>: Cost 2 vext3 RHS, <u,4,7,6>
- 471084585U, // <u,u,4,u>: Cost 1 vext2 LHS, RHS
- 1494278246U, // <u,u,5,0>: Cost 2 vext1 <2,u,u,5>, LHS
- 1571368656U, // <u,u,5,1>: Cost 2 vext2 RHS, <5,1,7,3>
- 1494280327U, // <u,u,5,2>: Cost 2 vext1 <2,u,u,5>, <2,u,u,5>
- 1616140415U, // <u,u,5,3>: Cost 2 vext3 LHS, <u,5,3,7>
- 1494281526U, // <u,u,5,4>: Cost 2 vext1 <2,u,u,5>, RHS
- 229035318U, // <u,u,5,5>: Cost 1 vdup1 RHS
- 537753754U, // <u,u,5,6>: Cost 1 vext3 LHS, RHS
- 1750355254U, // <u,u,5,7>: Cost 2 vuzpr LHS, RHS
- 537753772U, // <u,u,5,u>: Cost 1 vext3 LHS, RHS
- 1482342502U, // <u,u,6,0>: Cost 2 vext1 <0,u,u,6>, LHS
- 2556084982U, // <u,u,6,1>: Cost 3 vext1 <0,u,u,6>, <1,0,3,2>
- 1571369466U, // <u,u,6,2>: Cost 2 vext2 RHS, <6,2,7,3>
- 1611938000U, // <u,u,6,3>: Cost 2 vext3 LHS, <u,6,3,7>
- 1482345782U, // <u,u,6,4>: Cost 2 vext1 <0,u,u,6>, RHS
- 1194359171U, // <u,u,6,5>: Cost 2 vrev <u,u,5,6>
- 296144182U, // <u,u,6,6>: Cost 1 vdup2 RHS
- 27705344U, // <u,u,6,7>: Cost 0 copy RHS
- 27705344U, // <u,u,6,u>: Cost 0 copy RHS
- 432496742U, // <u,u,7,0>: Cost 1 vext1 RHS, LHS
- 1488324016U, // <u,u,7,1>: Cost 2 vext1 <1,u,u,7>, <1,u,u,7>
- 1494296713U, // <u,u,7,2>: Cost 2 vext1 <2,u,u,7>, <2,u,u,7>
- 1906901148U, // <u,u,7,3>: Cost 2 vzipr RHS, LHS
- 432500283U, // <u,u,7,4>: Cost 1 vext1 RHS, RHS
- 1506242256U, // <u,u,7,5>: Cost 2 vext1 RHS, <5,1,7,3>
- 120699277U, // <u,u,7,6>: Cost 1 vrev RHS
- 363253046U, // <u,u,7,7>: Cost 1 vdup3 RHS
- 432502574U, // <u,u,7,u>: Cost 1 vext1 RHS, LHS
- 408617688U, // <u,u,u,0>: Cost 1 vext1 LHS, LHS
- 471086894U, // <u,u,u,1>: Cost 1 vext2 LHS, LHS
- 537753957U, // <u,u,u,2>: Cost 1 vext3 LHS, LHS
- 835584U, // <u,u,u,3>: Cost 0 copy LHS
- 408620342U, // <u,u,u,4>: Cost 1 vext1 LHS, RHS
- 471087258U, // <u,u,u,5>: Cost 1 vext2 LHS, RHS
- 537753997U, // <u,u,u,6>: Cost 1 vext3 LHS, RHS
- 27705344U, // <u,u,u,7>: Cost 0 copy RHS
- 835584U, // <u,u,u,u>: Cost 0 copy LHS
- 0
-};
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMRegisterInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
deleted file mode 100644
index d5bc3f6..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMRegisterInfo.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//===- ARMRegisterInfo.cpp - ARM Register Information -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the ARM implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMBaseInstrInfo.h"
-#include "ARMInstrInfo.h"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMRegisterInfo.h"
-#include "ARMSubtarget.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/RegisterScavenging.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/SmallVector.h"
-using namespace llvm;
-
-ARMRegisterInfo::ARMRegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &sti)
- : ARMBaseRegisterInfo(tii, sti) {
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMRegisterInfo.h b/libclamav/c++/llvm/lib/Target/ARM/ARMRegisterInfo.h
deleted file mode 100644
index 041afd0..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMRegisterInfo.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//===- ARMRegisterInfo.h - ARM Register Information Impl --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the ARM implementation of the TargetRegisterInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMREGISTERINFO_H
-#define ARMREGISTERINFO_H
-
-#include "ARM.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "ARMBaseRegisterInfo.h"
-
-namespace llvm {
- class ARMSubtarget;
- class ARMBaseInstrInfo;
- class Type;
-
-namespace ARM {
- /// SubregIndex - The index of various subregister classes. Note that
- /// these indices must be kept in sync with the class indices in the
- /// ARMRegisterInfo.td file.
- enum SubregIndex {
- SSUBREG_0 = 1, SSUBREG_1 = 2, SSUBREG_2 = 3, SSUBREG_3 = 4,
- DSUBREG_0 = 5, DSUBREG_1 = 6
- };
-}
-
-struct ARMRegisterInfo : public ARMBaseRegisterInfo {
-public:
- ARMRegisterInfo(const ARMBaseInstrInfo &tii, const ARMSubtarget &STI);
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMRegisterInfo.td b/libclamav/c++/llvm/lib/Target/ARM/ARMRegisterInfo.td
deleted file mode 100644
index 0d4200c..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMRegisterInfo.td
+++ /dev/null
@@ -1,410 +0,0 @@
-//===- ARMRegisterInfo.td - ARM Register defs -------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Declarations that describe the ARM register file
-//===----------------------------------------------------------------------===//
-
-// Registers are identified with 4-bit ID numbers.
-class ARMReg<bits<4> num, string n, list<Register> subregs = []> : Register<n> {
- field bits<4> Num;
- let Namespace = "ARM";
- let SubRegs = subregs;
-}
-
-class ARMFReg<bits<6> num, string n> : Register<n> {
- field bits<6> Num;
- let Namespace = "ARM";
-}
-
-// Integer registers
-def R0 : ARMReg< 0, "r0">, DwarfRegNum<[0]>;
-def R1 : ARMReg< 1, "r1">, DwarfRegNum<[1]>;
-def R2 : ARMReg< 2, "r2">, DwarfRegNum<[2]>;
-def R3 : ARMReg< 3, "r3">, DwarfRegNum<[3]>;
-def R4 : ARMReg< 4, "r4">, DwarfRegNum<[4]>;
-def R5 : ARMReg< 5, "r5">, DwarfRegNum<[5]>;
-def R6 : ARMReg< 6, "r6">, DwarfRegNum<[6]>;
-def R7 : ARMReg< 7, "r7">, DwarfRegNum<[7]>;
-def R8 : ARMReg< 8, "r8">, DwarfRegNum<[8]>;
-def R9 : ARMReg< 9, "r9">, DwarfRegNum<[9]>;
-def R10 : ARMReg<10, "r10">, DwarfRegNum<[10]>;
-def R11 : ARMReg<11, "r11">, DwarfRegNum<[11]>;
-def R12 : ARMReg<12, "r12">, DwarfRegNum<[12]>;
-def SP : ARMReg<13, "sp">, DwarfRegNum<[13]>;
-def LR : ARMReg<14, "lr">, DwarfRegNum<[14]>;
-def PC : ARMReg<15, "pc">, DwarfRegNum<[15]>;
-
-// Float registers
-def S0 : ARMFReg< 0, "s0">; def S1 : ARMFReg< 1, "s1">;
-def S2 : ARMFReg< 2, "s2">; def S3 : ARMFReg< 3, "s3">;
-def S4 : ARMFReg< 4, "s4">; def S5 : ARMFReg< 5, "s5">;
-def S6 : ARMFReg< 6, "s6">; def S7 : ARMFReg< 7, "s7">;
-def S8 : ARMFReg< 8, "s8">; def S9 : ARMFReg< 9, "s9">;
-def S10 : ARMFReg<10, "s10">; def S11 : ARMFReg<11, "s11">;
-def S12 : ARMFReg<12, "s12">; def S13 : ARMFReg<13, "s13">;
-def S14 : ARMFReg<14, "s14">; def S15 : ARMFReg<15, "s15">;
-def S16 : ARMFReg<16, "s16">; def S17 : ARMFReg<17, "s17">;
-def S18 : ARMFReg<18, "s18">; def S19 : ARMFReg<19, "s19">;
-def S20 : ARMFReg<20, "s20">; def S21 : ARMFReg<21, "s21">;
-def S22 : ARMFReg<22, "s22">; def S23 : ARMFReg<23, "s23">;
-def S24 : ARMFReg<24, "s24">; def S25 : ARMFReg<25, "s25">;
-def S26 : ARMFReg<26, "s26">; def S27 : ARMFReg<27, "s27">;
-def S28 : ARMFReg<28, "s28">; def S29 : ARMFReg<29, "s29">;
-def S30 : ARMFReg<30, "s30">; def S31 : ARMFReg<31, "s31">;
-def SDummy : ARMFReg<63, "sINVALID">;
-
-// Aliases of the F* registers used to hold 64-bit fp values (doubles)
-def D0 : ARMReg< 0, "d0", [S0, S1]>;
-def D1 : ARMReg< 1, "d1", [S2, S3]>;
-def D2 : ARMReg< 2, "d2", [S4, S5]>;
-def D3 : ARMReg< 3, "d3", [S6, S7]>;
-def D4 : ARMReg< 4, "d4", [S8, S9]>;
-def D5 : ARMReg< 5, "d5", [S10, S11]>;
-def D6 : ARMReg< 6, "d6", [S12, S13]>;
-def D7 : ARMReg< 7, "d7", [S14, S15]>;
-def D8 : ARMReg< 8, "d8", [S16, S17]>;
-def D9 : ARMReg< 9, "d9", [S18, S19]>;
-def D10 : ARMReg<10, "d10", [S20, S21]>;
-def D11 : ARMReg<11, "d11", [S22, S23]>;
-def D12 : ARMReg<12, "d12", [S24, S25]>;
-def D13 : ARMReg<13, "d13", [S26, S27]>;
-def D14 : ARMReg<14, "d14", [S28, S29]>;
-def D15 : ARMReg<15, "d15", [S30, S31]>;
-
-// VFP3 defines 16 additional double registers
-def D16 : ARMFReg<16, "d16">; def D17 : ARMFReg<17, "d17">;
-def D18 : ARMFReg<18, "d18">; def D19 : ARMFReg<19, "d19">;
-def D20 : ARMFReg<20, "d20">; def D21 : ARMFReg<21, "d21">;
-def D22 : ARMFReg<22, "d22">; def D23 : ARMFReg<23, "d23">;
-def D24 : ARMFReg<24, "d24">; def D25 : ARMFReg<25, "d25">;
-def D26 : ARMFReg<26, "d26">; def D27 : ARMFReg<27, "d27">;
-def D28 : ARMFReg<28, "d28">; def D29 : ARMFReg<29, "d29">;
-def D30 : ARMFReg<30, "d30">; def D31 : ARMFReg<31, "d31">;
-
-// Advanced SIMD (NEON) defines 16 quad-word aliases
-def Q0 : ARMReg< 0, "q0", [D0, D1]>;
-def Q1 : ARMReg< 1, "q1", [D2, D3]>;
-def Q2 : ARMReg< 2, "q2", [D4, D5]>;
-def Q3 : ARMReg< 3, "q3", [D6, D7]>;
-def Q4 : ARMReg< 4, "q4", [D8, D9]>;
-def Q5 : ARMReg< 5, "q5", [D10, D11]>;
-def Q6 : ARMReg< 6, "q6", [D12, D13]>;
-def Q7 : ARMReg< 7, "q7", [D14, D15]>;
-def Q8 : ARMReg< 8, "q8", [D16, D17]>;
-def Q9 : ARMReg< 9, "q9", [D18, D19]>;
-def Q10 : ARMReg<10, "q10", [D20, D21]>;
-def Q11 : ARMReg<11, "q11", [D22, D23]>;
-def Q12 : ARMReg<12, "q12", [D24, D25]>;
-def Q13 : ARMReg<13, "q13", [D26, D27]>;
-def Q14 : ARMReg<14, "q14", [D28, D29]>;
-def Q15 : ARMReg<15, "q15", [D30, D31]>;
-
-// Current Program Status Register.
-def CPSR : ARMReg<0, "cpsr">;
-
-def FPSCR : ARMReg<1, "fpscr">;
-
-// Register classes.
-//
-// pc == Program Counter
-// lr == Link Register
-// sp == Stack Pointer
-// r12 == ip (scratch)
-// r7 == Frame Pointer (thumb-style backtraces)
-// r9 == May be reserved as Thread Register
-// r11 == Frame Pointer (arm-style backtraces)
-// r10 == Stack Limit
-//
-def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
- R7, R8, R9, R10, R11, R12,
- SP, LR, PC]> {
- let MethodProtos = [{
- iterator allocation_order_begin(const MachineFunction &MF) const;
- iterator allocation_order_end(const MachineFunction &MF) const;
- }];
- let MethodBodies = [{
- // FP is R11, R9 is available.
- static const unsigned ARM_GPR_AO_1[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7,
- ARM::R8, ARM::R9, ARM::R10,
- ARM::R11 };
- // FP is R11, R9 is not available.
- static const unsigned ARM_GPR_AO_2[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7,
- ARM::R8, ARM::R10,
- ARM::R11 };
- // FP is R7, R9 is available as non-callee-saved register.
- // This is used by Darwin.
- static const unsigned ARM_GPR_AO_3[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R9, ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6,
- ARM::R8, ARM::R10,ARM::R11,ARM::R7 };
- // FP is R7, R9 is not available.
- static const unsigned ARM_GPR_AO_4[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6,
- ARM::R8, ARM::R10,ARM::R11,
- ARM::R7 };
- // FP is R7, R9 is available as callee-saved register.
- // This is used by non-Darwin platform in Thumb mode.
- static const unsigned ARM_GPR_AO_5[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R12,ARM::LR,
- ARM::R4, ARM::R5, ARM::R6,
- ARM::R8, ARM::R9, ARM::R10,ARM::R11,ARM::R7 };
-
- // For Thumb1 mode, we don't want to allocate hi regs at all, as we
- // don't know how to spill them. If we make our prologue/epilogue code
- // smarter at some point, we can go back to using the above allocation
- // orders for the Thumb1 instructions that know how to use hi regs.
- static const unsigned THUMB_GPR_AO[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7 };
-
- GPRClass::iterator
- GPRClass::allocation_order_begin(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- if (Subtarget.isThumb1Only())
- return THUMB_GPR_AO;
- if (Subtarget.isTargetDarwin()) {
- if (Subtarget.isR9Reserved())
- return ARM_GPR_AO_4;
- else
- return ARM_GPR_AO_3;
- } else {
- if (Subtarget.isR9Reserved())
- return ARM_GPR_AO_2;
- else if (Subtarget.isThumb())
- return ARM_GPR_AO_5;
- else
- return ARM_GPR_AO_1;
- }
- }
-
- GPRClass::iterator
- GPRClass::allocation_order_end(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- GPRClass::iterator I;
-
- if (Subtarget.isThumb1Only()) {
- I = THUMB_GPR_AO + (sizeof(THUMB_GPR_AO)/sizeof(unsigned));
- // Mac OS X requires FP not to be clobbered for backtracing purpose.
- return (Subtarget.isTargetDarwin() || RI->hasFP(MF)) ? I-1 : I;
- }
-
- if (Subtarget.isTargetDarwin()) {
- if (Subtarget.isR9Reserved())
- I = ARM_GPR_AO_4 + (sizeof(ARM_GPR_AO_4)/sizeof(unsigned));
- else
- I = ARM_GPR_AO_3 + (sizeof(ARM_GPR_AO_3)/sizeof(unsigned));
- } else {
- if (Subtarget.isR9Reserved())
- I = ARM_GPR_AO_2 + (sizeof(ARM_GPR_AO_2)/sizeof(unsigned));
- else if (Subtarget.isThumb())
- I = ARM_GPR_AO_5 + (sizeof(ARM_GPR_AO_5)/sizeof(unsigned));
- else
- I = ARM_GPR_AO_1 + (sizeof(ARM_GPR_AO_1)/sizeof(unsigned));
- }
-
- // Mac OS X requires FP not to be clobbered for backtracing purpose.
- return (Subtarget.isTargetDarwin() || RI->hasFP(MF)) ? I-1 : I;
- }
- }];
-}
-
-// Thumb registers are R0-R7 normally. Some instructions can still use
-// the general GPR register class above (MOV, e.g.)
-def tGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, R7]> {
- let MethodProtos = [{
- iterator allocation_order_begin(const MachineFunction &MF) const;
- iterator allocation_order_end(const MachineFunction &MF) const;
- }];
- let MethodBodies = [{
- static const unsigned THUMB_tGPR_AO[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3,
- ARM::R4, ARM::R5, ARM::R6, ARM::R7 };
-
- // FP is R7, only low registers available.
- tGPRClass::iterator
- tGPRClass::allocation_order_begin(const MachineFunction &MF) const {
- return THUMB_tGPR_AO;
- }
-
- tGPRClass::iterator
- tGPRClass::allocation_order_end(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const TargetRegisterInfo *RI = TM.getRegisterInfo();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- tGPRClass::iterator I =
- THUMB_tGPR_AO + (sizeof(THUMB_tGPR_AO)/sizeof(unsigned));
- // Mac OS X requires FP not to be clobbered for backtracing purpose.
- return (Subtarget.isTargetDarwin() || RI->hasFP(MF)) ? I-1 : I;
- }
- }];
-}
-
-// Scalar single precision floating point register class..
-def SPR : RegisterClass<"ARM", [f32], 32, [S0, S1, S2, S3, S4, S5, S6, S7, S8,
- S9, S10, S11, S12, S13, S14, S15, S16, S17, S18, S19, S20, S21, S22,
- S23, S24, S25, S26, S27, S28, S29, S30, S31]>;
-
-// Subset of SPR which can be used as a source of NEON scalars for 16-bit
-// operations
-def SPR_8 : RegisterClass<"ARM", [f32], 32,
- [S0, S1, S2, S3, S4, S5, S6, S7,
- S8, S9, S10, S11, S12, S13, S14, S15]>;
-
-// Dummy f32 regclass to represent impossible subreg indices.
-def SPR_INVALID : RegisterClass<"ARM", [f32], 32, [SDummy]> {
- let CopyCost = -1;
-}
-
-// Scalar double precision floating point / generic 64-bit vector register
-// class.
-// ARM requires only word alignment for double. It's more performant if it
-// is double-word alignment though.
-def DPR : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
- [D0, D1, D2, D3, D4, D5, D6, D7,
- D8, D9, D10, D11, D12, D13, D14, D15,
- D16, D17, D18, D19, D20, D21, D22, D23,
- D24, D25, D26, D27, D28, D29, D30, D31]> {
- let SubRegClassList = [SPR_INVALID, SPR_INVALID];
- let MethodProtos = [{
- iterator allocation_order_begin(const MachineFunction &MF) const;
- iterator allocation_order_end(const MachineFunction &MF) const;
- }];
- let MethodBodies = [{
- // VFP2
- static const unsigned ARM_DPR_VFP2[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3,
- ARM::D4, ARM::D5, ARM::D6, ARM::D7,
- ARM::D8, ARM::D9, ARM::D10, ARM::D11,
- ARM::D12, ARM::D13, ARM::D14, ARM::D15 };
- // VFP3
- static const unsigned ARM_DPR_VFP3[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3,
- ARM::D4, ARM::D5, ARM::D6, ARM::D7,
- ARM::D8, ARM::D9, ARM::D10, ARM::D11,
- ARM::D12, ARM::D13, ARM::D14, ARM::D15,
- ARM::D16, ARM::D17, ARM::D18, ARM::D19,
- ARM::D20, ARM::D21, ARM::D22, ARM::D23,
- ARM::D24, ARM::D25, ARM::D26, ARM::D27,
- ARM::D28, ARM::D29, ARM::D30, ARM::D31 };
- DPRClass::iterator
- DPRClass::allocation_order_begin(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- if (Subtarget.hasVFP3())
- return ARM_DPR_VFP3;
- return ARM_DPR_VFP2;
- }
-
- DPRClass::iterator
- DPRClass::allocation_order_end(const MachineFunction &MF) const {
- const TargetMachine &TM = MF.getTarget();
- const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
- if (Subtarget.hasVFP3())
- return ARM_DPR_VFP3 + (sizeof(ARM_DPR_VFP3)/sizeof(unsigned));
- else
- return ARM_DPR_VFP2 + (sizeof(ARM_DPR_VFP2)/sizeof(unsigned));
- }
- }];
-}
-
-// Subset of DPR that are accessible with VFP2 (and so that also have
-// 32-bit SPR subregs).
-def DPR_VFP2 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
- [D0, D1, D2, D3, D4, D5, D6, D7,
- D8, D9, D10, D11, D12, D13, D14, D15]> {
- let SubRegClassList = [SPR, SPR];
-}
-
-// Subset of DPR which can be used as a source of NEON scalars for 16-bit
-// operations
-def DPR_8 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
- [D0, D1, D2, D3, D4, D5, D6, D7]> {
- let SubRegClassList = [SPR_8, SPR_8];
-}
-
-// Generic 128-bit vector register class.
-def QPR : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 128,
- [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7,
- Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15]> {
- let SubRegClassList = [SPR_INVALID, SPR_INVALID, SPR_INVALID, SPR_INVALID,
- DPR, DPR];
-}
-
-// Subset of QPR that have 32-bit SPR subregs.
-def QPR_VFP2 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- 128,
- [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]> {
- let SubRegClassList = [SPR, SPR, SPR, SPR, DPR_VFP2, DPR_VFP2];
-}
-
-// Subset of QPR that have DPR_8 and SPR_8 subregs.
-def QPR_8 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- 128,
- [Q0, Q1, Q2, Q3]> {
- let SubRegClassList = [SPR_8, SPR_8, SPR_8, SPR_8, DPR_8, DPR_8];
-}
-
-// Condition code registers.
-def CCR : RegisterClass<"ARM", [i32], 32, [CPSR]>;
-
-//===----------------------------------------------------------------------===//
-// Subregister Set Definitions... now that we have all of the pieces, define the
-// sub registers for each register.
-//
-
-def arm_ssubreg_0 : PatLeaf<(i32 1)>;
-def arm_ssubreg_1 : PatLeaf<(i32 2)>;
-def arm_ssubreg_2 : PatLeaf<(i32 3)>;
-def arm_ssubreg_3 : PatLeaf<(i32 4)>;
-def arm_dsubreg_0 : PatLeaf<(i32 5)>;
-def arm_dsubreg_1 : PatLeaf<(i32 6)>;
-
-// S sub-registers of D registers.
-def : SubRegSet<1, [D0, D1, D2, D3, D4, D5, D6, D7,
- D8, D9, D10, D11, D12, D13, D14, D15],
- [S0, S2, S4, S6, S8, S10, S12, S14,
- S16, S18, S20, S22, S24, S26, S28, S30]>;
-def : SubRegSet<2, [D0, D1, D2, D3, D4, D5, D6, D7,
- D8, D9, D10, D11, D12, D13, D14, D15],
- [S1, S3, S5, S7, S9, S11, S13, S15,
- S17, S19, S21, S23, S25, S27, S29, S31]>;
-
-// S sub-registers of Q registers.
-def : SubRegSet<1, [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7],
- [S0, S4, S8, S12, S16, S20, S24, S28]>;
-def : SubRegSet<2, [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7],
- [S1, S5, S9, S13, S17, S21, S25, S29]>;
-def : SubRegSet<3, [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7],
- [S2, S6, S10, S14, S18, S22, S26, S30]>;
-def : SubRegSet<4, [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7],
- [S3, S7, S11, S15, S19, S23, S27, S31]>;
-
-// D sub-registers of Q registers.
-def : SubRegSet<5, [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7,
- Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15],
- [D0, D2, D4, D6, D8, D10, D12, D14,
- D16, D18, D20, D22, D24, D26, D28, D30]>;
-def : SubRegSet<6, [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7,
- Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15],
- [D1, D3, D5, D7, D9, D11, D13, D15,
- D17, D19, D21, D23, D25, D27, D29, D31]>;
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMRelocations.h b/libclamav/c++/llvm/lib/Target/ARM/ARMRelocations.h
deleted file mode 100644
index 2cc2950..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMRelocations.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===- ARMRelocations.h - ARM Code Relocations ------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the ARM target-specific relocation types.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMRELOCATIONS_H
-#define ARMRELOCATIONS_H
-
-#include "llvm/CodeGen/MachineRelocation.h"
-
-namespace llvm {
- namespace ARM {
- enum RelocationType {
- // reloc_arm_absolute - Absolute relocation, just add the relocated value
- // to the value already in memory.
- reloc_arm_absolute,
-
- // reloc_arm_relative - PC relative relocation, add the relocated value to
- // the value already in memory, after we adjust it for where the PC is.
- reloc_arm_relative,
-
- // reloc_arm_cp_entry - PC relative relocation for constpool_entry's whose
- // addresses are kept locally in a map.
- reloc_arm_cp_entry,
-
- // reloc_arm_vfp_cp_entry - Same as reloc_arm_cp_entry except the offset
- // should be divided by 4.
- reloc_arm_vfp_cp_entry,
-
- // reloc_arm_machine_cp_entry - Relocation of a ARM machine constantpool
- // entry.
- reloc_arm_machine_cp_entry,
-
- // reloc_arm_jt_base - PC relative relocation for jump tables whose
- // addresses are kept locally in a map.
- reloc_arm_jt_base,
-
- // reloc_arm_pic_jt - PIC jump table entry relocation: dest bb - jt base.
- reloc_arm_pic_jt,
-
- // reloc_arm_branch - Branch address relocation.
- reloc_arm_branch
- };
- }
-}
-
-#endif
-
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMSchedule.td b/libclamav/c++/llvm/lib/Target/ARM/ARMSchedule.td
deleted file mode 100644
index fc4c5f5..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMSchedule.td
+++ /dev/null
@@ -1,160 +0,0 @@
-//===- ARMSchedule.td - ARM Scheduling Definitions ---------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// Functional units across ARM processors
-//
-def FU_Issue : FuncUnit; // issue
-def FU_Pipe0 : FuncUnit; // pipeline 0
-def FU_Pipe1 : FuncUnit; // pipeline 1
-def FU_LdSt0 : FuncUnit; // pipeline 0 load/store
-def FU_LdSt1 : FuncUnit; // pipeline 1 load/store
-def FU_NPipe : FuncUnit; // NEON ALU/MUL pipe
-def FU_NLSPipe : FuncUnit; // NEON LS pipe
-
-//===----------------------------------------------------------------------===//
-// Instruction Itinerary classes used for ARM
-//
-def IIC_iALUx : InstrItinClass;
-def IIC_iALUi : InstrItinClass;
-def IIC_iALUr : InstrItinClass;
-def IIC_iALUsi : InstrItinClass;
-def IIC_iALUsr : InstrItinClass;
-def IIC_iUNAr : InstrItinClass;
-def IIC_iUNAsi : InstrItinClass;
-def IIC_iUNAsr : InstrItinClass;
-def IIC_iCMPi : InstrItinClass;
-def IIC_iCMPr : InstrItinClass;
-def IIC_iCMPsi : InstrItinClass;
-def IIC_iCMPsr : InstrItinClass;
-def IIC_iMOVi : InstrItinClass;
-def IIC_iMOVr : InstrItinClass;
-def IIC_iMOVsi : InstrItinClass;
-def IIC_iMOVsr : InstrItinClass;
-def IIC_iCMOVi : InstrItinClass;
-def IIC_iCMOVr : InstrItinClass;
-def IIC_iCMOVsi : InstrItinClass;
-def IIC_iCMOVsr : InstrItinClass;
-def IIC_iMUL16 : InstrItinClass;
-def IIC_iMAC16 : InstrItinClass;
-def IIC_iMUL32 : InstrItinClass;
-def IIC_iMAC32 : InstrItinClass;
-def IIC_iMUL64 : InstrItinClass;
-def IIC_iMAC64 : InstrItinClass;
-def IIC_iLoadi : InstrItinClass;
-def IIC_iLoadr : InstrItinClass;
-def IIC_iLoadsi : InstrItinClass;
-def IIC_iLoadiu : InstrItinClass;
-def IIC_iLoadru : InstrItinClass;
-def IIC_iLoadsiu : InstrItinClass;
-def IIC_iLoadm : InstrItinClass;
-def IIC_iStorei : InstrItinClass;
-def IIC_iStorer : InstrItinClass;
-def IIC_iStoresi : InstrItinClass;
-def IIC_iStoreiu : InstrItinClass;
-def IIC_iStoreru : InstrItinClass;
-def IIC_iStoresiu : InstrItinClass;
-def IIC_iStorem : InstrItinClass;
-def IIC_Br : InstrItinClass;
-def IIC_fpSTAT : InstrItinClass;
-def IIC_fpUNA32 : InstrItinClass;
-def IIC_fpUNA64 : InstrItinClass;
-def IIC_fpCMP32 : InstrItinClass;
-def IIC_fpCMP64 : InstrItinClass;
-def IIC_fpCVTSD : InstrItinClass;
-def IIC_fpCVTDS : InstrItinClass;
-def IIC_fpCVTIS : InstrItinClass;
-def IIC_fpCVTID : InstrItinClass;
-def IIC_fpCVTSI : InstrItinClass;
-def IIC_fpCVTDI : InstrItinClass;
-def IIC_fpALU32 : InstrItinClass;
-def IIC_fpALU64 : InstrItinClass;
-def IIC_fpMUL32 : InstrItinClass;
-def IIC_fpMUL64 : InstrItinClass;
-def IIC_fpMAC32 : InstrItinClass;
-def IIC_fpMAC64 : InstrItinClass;
-def IIC_fpDIV32 : InstrItinClass;
-def IIC_fpDIV64 : InstrItinClass;
-def IIC_fpSQRT32 : InstrItinClass;
-def IIC_fpSQRT64 : InstrItinClass;
-def IIC_fpLoad32 : InstrItinClass;
-def IIC_fpLoad64 : InstrItinClass;
-def IIC_fpLoadm : InstrItinClass;
-def IIC_fpStore32 : InstrItinClass;
-def IIC_fpStore64 : InstrItinClass;
-def IIC_fpStorem : InstrItinClass;
-def IIC_VLD1 : InstrItinClass;
-def IIC_VLD2 : InstrItinClass;
-def IIC_VLD3 : InstrItinClass;
-def IIC_VLD4 : InstrItinClass;
-def IIC_VST : InstrItinClass;
-def IIC_VUNAD : InstrItinClass;
-def IIC_VUNAQ : InstrItinClass;
-def IIC_VBIND : InstrItinClass;
-def IIC_VBINQ : InstrItinClass;
-def IIC_VMOVImm : InstrItinClass;
-def IIC_VMOVD : InstrItinClass;
-def IIC_VMOVQ : InstrItinClass;
-def IIC_VMOVIS : InstrItinClass;
-def IIC_VMOVID : InstrItinClass;
-def IIC_VMOVISL : InstrItinClass;
-def IIC_VMOVSI : InstrItinClass;
-def IIC_VMOVDI : InstrItinClass;
-def IIC_VPERMD : InstrItinClass;
-def IIC_VPERMQ : InstrItinClass;
-def IIC_VPERMQ3 : InstrItinClass;
-def IIC_VMACD : InstrItinClass;
-def IIC_VMACQ : InstrItinClass;
-def IIC_VRECSD : InstrItinClass;
-def IIC_VRECSQ : InstrItinClass;
-def IIC_VCNTiD : InstrItinClass;
-def IIC_VCNTiQ : InstrItinClass;
-def IIC_VUNAiD : InstrItinClass;
-def IIC_VUNAiQ : InstrItinClass;
-def IIC_VQUNAiD : InstrItinClass;
-def IIC_VQUNAiQ : InstrItinClass;
-def IIC_VBINiD : InstrItinClass;
-def IIC_VBINiQ : InstrItinClass;
-def IIC_VSUBiD : InstrItinClass;
-def IIC_VSUBiQ : InstrItinClass;
-def IIC_VBINi4D : InstrItinClass;
-def IIC_VBINi4Q : InstrItinClass;
-def IIC_VSHLiD : InstrItinClass;
-def IIC_VSHLiQ : InstrItinClass;
-def IIC_VSHLi4D : InstrItinClass;
-def IIC_VSHLi4Q : InstrItinClass;
-def IIC_VPALiD : InstrItinClass;
-def IIC_VPALiQ : InstrItinClass;
-def IIC_VMULi16D : InstrItinClass;
-def IIC_VMULi32D : InstrItinClass;
-def IIC_VMULi16Q : InstrItinClass;
-def IIC_VMULi32Q : InstrItinClass;
-def IIC_VMACi16D : InstrItinClass;
-def IIC_VMACi32D : InstrItinClass;
-def IIC_VMACi16Q : InstrItinClass;
-def IIC_VMACi32Q : InstrItinClass;
-def IIC_VEXTD : InstrItinClass;
-def IIC_VEXTQ : InstrItinClass;
-def IIC_VTB1 : InstrItinClass;
-def IIC_VTB2 : InstrItinClass;
-def IIC_VTB3 : InstrItinClass;
-def IIC_VTB4 : InstrItinClass;
-def IIC_VTBX1 : InstrItinClass;
-def IIC_VTBX2 : InstrItinClass;
-def IIC_VTBX3 : InstrItinClass;
-def IIC_VTBX4 : InstrItinClass;
-
-//===----------------------------------------------------------------------===//
-// Processor instruction itineraries.
-
-def GenericItineraries : ProcessorItineraries<[]>;
-
-
-include "ARMScheduleV6.td"
-include "ARMScheduleV7.td"
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMScheduleV6.td b/libclamav/c++/llvm/lib/Target/ARM/ARMScheduleV6.td
deleted file mode 100644
index 0fef466..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMScheduleV6.td
+++ /dev/null
@@ -1,200 +0,0 @@
-//===- ARMScheduleV6.td - ARM v6 Scheduling Definitions ----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the itinerary class data for the ARM v6 processors.
-//
-//===----------------------------------------------------------------------===//
-
-// Model based on ARM1176
-//
-// Scheduling information derived from "ARM1176JZF-S Technical Reference Manual".
-//
-def ARMV6Itineraries : ProcessorItineraries<[
- //
- // No operand cycles
- InstrItinData<IIC_iALUx , [InstrStage<1, [FU_Pipe0]>]>,
- //
- // Binary Instructions that produce a result
- InstrItinData<IIC_iALUi , [InstrStage<1, [FU_Pipe0]>], [2, 2]>,
- InstrItinData<IIC_iALUr , [InstrStage<1, [FU_Pipe0]>], [2, 2, 2]>,
- InstrItinData<IIC_iALUsi , [InstrStage<1, [FU_Pipe0]>], [2, 2, 1]>,
- InstrItinData<IIC_iALUsr , [InstrStage<2, [FU_Pipe0]>], [3, 3, 2, 1]>,
- //
- // Unary Instructions that produce a result
- InstrItinData<IIC_iUNAr , [InstrStage<1, [FU_Pipe0]>], [2, 2]>,
- InstrItinData<IIC_iUNAsi , [InstrStage<1, [FU_Pipe0]>], [2, 1]>,
- InstrItinData<IIC_iUNAsr , [InstrStage<2, [FU_Pipe0]>], [3, 2, 1]>,
- //
- // Compare instructions
- InstrItinData<IIC_iCMPi , [InstrStage<1, [FU_Pipe0]>], [2]>,
- InstrItinData<IIC_iCMPr , [InstrStage<1, [FU_Pipe0]>], [2, 2]>,
- InstrItinData<IIC_iCMPsi , [InstrStage<1, [FU_Pipe0]>], [2, 1]>,
- InstrItinData<IIC_iCMPsr , [InstrStage<2, [FU_Pipe0]>], [3, 2, 1]>,
- //
- // Move instructions, unconditional
- InstrItinData<IIC_iMOVi , [InstrStage<1, [FU_Pipe0]>], [2]>,
- InstrItinData<IIC_iMOVr , [InstrStage<1, [FU_Pipe0]>], [2, 2]>,
- InstrItinData<IIC_iMOVsi , [InstrStage<1, [FU_Pipe0]>], [2, 1]>,
- InstrItinData<IIC_iMOVsr , [InstrStage<2, [FU_Pipe0]>], [3, 2, 1]>,
- //
- // Move instructions, conditional
- InstrItinData<IIC_iCMOVi , [InstrStage<1, [FU_Pipe0]>], [3]>,
- InstrItinData<IIC_iCMOVr , [InstrStage<1, [FU_Pipe0]>], [3, 2]>,
- InstrItinData<IIC_iCMOVsi , [InstrStage<1, [FU_Pipe0]>], [3, 1]>,
- InstrItinData<IIC_iCMOVsr , [InstrStage<1, [FU_Pipe0]>], [4, 2, 1]>,
-
- // Integer multiply pipeline
- //
- InstrItinData<IIC_iMUL16 , [InstrStage<1, [FU_Pipe0]>], [4, 1, 1]>,
- InstrItinData<IIC_iMAC16 , [InstrStage<1, [FU_Pipe0]>], [4, 1, 1, 2]>,
- InstrItinData<IIC_iMUL32 , [InstrStage<2, [FU_Pipe0]>], [5, 1, 1]>,
- InstrItinData<IIC_iMAC32 , [InstrStage<2, [FU_Pipe0]>], [5, 1, 1, 2]>,
- InstrItinData<IIC_iMUL64 , [InstrStage<3, [FU_Pipe0]>], [6, 1, 1]>,
- InstrItinData<IIC_iMAC64 , [InstrStage<3, [FU_Pipe0]>], [6, 1, 1, 2]>,
-
- // Integer load pipeline
- //
- // Immediate offset
- InstrItinData<IIC_iLoadi , [InstrStage<1, [FU_Pipe0]>], [4, 1]>,
- //
- // Register offset
- InstrItinData<IIC_iLoadr , [InstrStage<1, [FU_Pipe0]>], [4, 1, 1]>,
- //
- // Scaled register offset, issues over 2 cycles
- InstrItinData<IIC_iLoadsi , [InstrStage<2, [FU_Pipe0]>], [5, 2, 1]>,
- //
- // Immediate offset with update
- InstrItinData<IIC_iLoadiu , [InstrStage<1, [FU_Pipe0]>], [4, 2, 1]>,
- //
- // Register offset with update
- InstrItinData<IIC_iLoadru , [InstrStage<1, [FU_Pipe0]>], [4, 2, 1, 1]>,
- //
- // Scaled register offset with update, issues over 2 cycles
- InstrItinData<IIC_iLoadsiu , [InstrStage<2, [FU_Pipe0]>], [5, 2, 2, 1]>,
-
- //
- // Load multiple
- InstrItinData<IIC_iLoadm , [InstrStage<3, [FU_Pipe0]>]>,
-
- // Integer store pipeline
- //
- // Immediate offset
- InstrItinData<IIC_iStorei , [InstrStage<1, [FU_Pipe0]>], [2, 1]>,
- //
- // Register offset
- InstrItinData<IIC_iStorer , [InstrStage<1, [FU_Pipe0]>], [2, 1, 1]>,
-
- //
- // Scaled register offset, issues over 2 cycles
- InstrItinData<IIC_iStoresi , [InstrStage<2, [FU_Pipe0]>], [2, 2, 1]>,
- //
- // Immediate offset with update
- InstrItinData<IIC_iStoreiu , [InstrStage<1, [FU_Pipe0]>], [2, 2, 1]>,
- //
- // Register offset with update
- InstrItinData<IIC_iStoreru , [InstrStage<1, [FU_Pipe0]>], [2, 2, 1, 1]>,
- //
- // Scaled register offset with update, issues over 2 cycles
- InstrItinData<IIC_iStoresiu, [InstrStage<2, [FU_Pipe0]>], [2, 2, 2, 1]>,
- //
- // Store multiple
- InstrItinData<IIC_iStorem , [InstrStage<3, [FU_Pipe0]>]>,
-
- // Branch
- //
- // no delay slots, so the latency of a branch is unimportant
- InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0]>]>,
-
- // VFP
- // Issue through integer pipeline, and execute in NEON unit. We assume
- // RunFast mode so that NFP pipeline is used for single-precision when
- // possible.
- //
- // FP Special Register to Integer Register File Move
- InstrItinData<IIC_fpSTAT , [InstrStage<1, [FU_Pipe0]>], [3]>,
- //
- // Single-precision FP Unary
- InstrItinData<IIC_fpUNA32 , [InstrStage<1, [FU_Pipe0]>], [5, 2]>,
- //
- // Double-precision FP Unary
- InstrItinData<IIC_fpUNA64 , [InstrStage<1, [FU_Pipe0]>], [5, 2]>,
- //
- // Single-precision FP Compare
- InstrItinData<IIC_fpCMP32 , [InstrStage<1, [FU_Pipe0]>], [2, 2]>,
- //
- // Double-precision FP Compare
- InstrItinData<IIC_fpCMP64 , [InstrStage<1, [FU_Pipe0]>], [2, 2]>,
- //
- // Single to Double FP Convert
- InstrItinData<IIC_fpCVTSD , [InstrStage<1, [FU_Pipe0]>], [5, 2]>,
- //
- // Double to Single FP Convert
- InstrItinData<IIC_fpCVTDS , [InstrStage<1, [FU_Pipe0]>], [5, 2]>,
- //
- // Single-Precision FP to Integer Convert
- InstrItinData<IIC_fpCVTSI , [InstrStage<1, [FU_Pipe0]>], [9, 2]>,
- //
- // Double-Precision FP to Integer Convert
- InstrItinData<IIC_fpCVTDI , [InstrStage<1, [FU_Pipe0]>], [9, 2]>,
- //
- // Integer to Single-Precision FP Convert
- InstrItinData<IIC_fpCVTIS , [InstrStage<1, [FU_Pipe0]>], [9, 2]>,
- //
- // Integer to Double-Precision FP Convert
- InstrItinData<IIC_fpCVTID , [InstrStage<1, [FU_Pipe0]>], [9, 2]>,
- //
- // Single-precision FP ALU
- InstrItinData<IIC_fpALU32 , [InstrStage<1, [FU_Pipe0]>], [9, 2, 2]>,
- //
- // Double-precision FP ALU
- InstrItinData<IIC_fpALU64 , [InstrStage<1, [FU_Pipe0]>], [9, 2, 2]>,
- //
- // Single-precision FP Multiply
- InstrItinData<IIC_fpMUL32 , [InstrStage<1, [FU_Pipe0]>], [9, 2, 2]>,
- //
- // Double-precision FP Multiply
- InstrItinData<IIC_fpMUL64 , [InstrStage<2, [FU_Pipe0]>], [9, 2, 2]>,
- //
- // Single-precision FP MAC
- InstrItinData<IIC_fpMAC32 , [InstrStage<1, [FU_Pipe0]>], [9, 2, 2, 2]>,
- //
- // Double-precision FP MAC
- InstrItinData<IIC_fpMAC64 , [InstrStage<2, [FU_Pipe0]>], [9, 2, 2, 2]>,
- //
- // Single-precision FP DIV
- InstrItinData<IIC_fpDIV32 , [InstrStage<15, [FU_Pipe0]>], [20, 2, 2]>,
- //
- // Double-precision FP DIV
- InstrItinData<IIC_fpDIV64 , [InstrStage<29, [FU_Pipe0]>], [34, 2, 2]>,
- //
- // Single-precision FP SQRT
- InstrItinData<IIC_fpSQRT32 , [InstrStage<15, [FU_Pipe0]>], [20, 2, 2]>,
- //
- // Double-precision FP SQRT
- InstrItinData<IIC_fpSQRT64 , [InstrStage<29, [FU_Pipe0]>], [34, 2, 2]>,
- //
- // Single-precision FP Load
- InstrItinData<IIC_fpLoad32 , [InstrStage<1, [FU_Pipe0]>], [5, 2, 2]>,
- //
- // Double-precision FP Load
- InstrItinData<IIC_fpLoad64 , [InstrStage<1, [FU_Pipe0]>], [5, 2, 2]>,
- //
- // FP Load Multiple
- InstrItinData<IIC_fpLoadm , [InstrStage<3, [FU_Pipe0]>]>,
- //
- // Single-precision FP Store
- InstrItinData<IIC_fpStore32 , [InstrStage<1, [FU_Pipe0]>], [2, 2, 2]>,
- //
- // Double-precision FP Store
- // use FU_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStore64 , [InstrStage<1, [FU_Pipe0]>], [2, 2, 2]>,
- //
- // FP Store Multiple
- InstrItinData<IIC_fpStorem , [InstrStage<3, [FU_Pipe0]>]>
-]>;
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMScheduleV7.td b/libclamav/c++/llvm/lib/Target/ARM/ARMScheduleV7.td
deleted file mode 100644
index bbbf413..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMScheduleV7.td
+++ /dev/null
@@ -1,587 +0,0 @@
-//===- ARMScheduleV7.td - ARM v7 Scheduling Definitions ----*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the itinerary class data for the ARM v7 processors.
-//
-//===----------------------------------------------------------------------===//
-
-//
-// Scheduling information derived from "Cortex-A8 Technical Reference Manual".
-//
-// Dual issue pipeline represented by FU_Pipe0 | FU_Pipe1
-//
-def CortexA8Itineraries : ProcessorItineraries<[
-
- // Two fully-pipelined integer ALU pipelines
- //
- // No operand cycles
- InstrItinData<IIC_iALUx , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>]>,
- //
- // Binary Instructions that produce a result
- InstrItinData<IIC_iALUi , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 2]>,
- InstrItinData<IIC_iALUr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 2, 2]>,
- InstrItinData<IIC_iALUsi , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 2, 1]>,
- InstrItinData<IIC_iALUsr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 2, 1, 1]>,
- //
- // Unary Instructions that produce a result
- InstrItinData<IIC_iUNAr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 2]>,
- InstrItinData<IIC_iUNAsi , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iUNAsr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 1, 1]>,
- //
- // Compare instructions
- InstrItinData<IIC_iCMPi , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2]>,
- InstrItinData<IIC_iCMPr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 2]>,
- InstrItinData<IIC_iCMPsi , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iCMPsr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 1, 1]>,
- //
- // Move instructions, unconditional
- InstrItinData<IIC_iMOVi , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [1]>,
- InstrItinData<IIC_iMOVr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [1, 1]>,
- InstrItinData<IIC_iMOVsi , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [1, 1]>,
- InstrItinData<IIC_iMOVsr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [1, 1, 1]>,
- //
- // Move instructions, conditional
- InstrItinData<IIC_iCMOVi , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2]>,
- InstrItinData<IIC_iCMOVr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iCMOVsi , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iCMOVsr , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>], [2, 1, 1]>,
-
- // Integer multiply pipeline
- // Result written in E5, but that is relative to the last cycle of multicycle,
- // so we use 6 for those cases
- //
- InstrItinData<IIC_iMUL16 , [InstrStage<1, [FU_Pipe0]>], [5, 1, 1]>,
- InstrItinData<IIC_iMAC16 , [InstrStage<1, [FU_Pipe1], 0>,
- InstrStage<2, [FU_Pipe0]>], [6, 1, 1, 4]>,
- InstrItinData<IIC_iMUL32 , [InstrStage<1, [FU_Pipe1], 0>,
- InstrStage<2, [FU_Pipe0]>], [6, 1, 1]>,
- InstrItinData<IIC_iMAC32 , [InstrStage<1, [FU_Pipe1], 0>,
- InstrStage<2, [FU_Pipe0]>], [6, 1, 1, 4]>,
- InstrItinData<IIC_iMUL64 , [InstrStage<2, [FU_Pipe1], 0>,
- InstrStage<3, [FU_Pipe0]>], [6, 6, 1, 1]>,
- InstrItinData<IIC_iMAC64 , [InstrStage<2, [FU_Pipe1], 0>,
- InstrStage<3, [FU_Pipe0]>], [6, 6, 1, 1]>,
-
- // Integer load pipeline
- //
- // loads have an extra cycle of latency, but are fully pipelined
- // use FU_Issue to enforce the 1 load/store per cycle limit
- //
- // Immediate offset
- InstrItinData<IIC_iLoadi , [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [3, 1]>,
- //
- // Register offset
- InstrItinData<IIC_iLoadr , [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [3, 1, 1]>,
- //
- // Scaled register offset, issues over 2 cycles
- InstrItinData<IIC_iLoadsi , [InstrStage<2, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0], 0>,
- InstrStage<1, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [4, 1, 1]>,
- //
- // Immediate offset with update
- InstrItinData<IIC_iLoadiu , [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [3, 2, 1]>,
- //
- // Register offset with update
- InstrItinData<IIC_iLoadru , [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [3, 2, 1, 1]>,
- //
- // Scaled register offset with update, issues over 2 cycles
- InstrItinData<IIC_iLoadsiu , [InstrStage<2, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0], 0>,
- InstrStage<1, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [4, 3, 1, 1]>,
- //
- // Load multiple
- InstrItinData<IIC_iLoadm , [InstrStage<2, [FU_Issue], 0>,
- InstrStage<2, [FU_Pipe0], 0>,
- InstrStage<2, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>]>,
-
- // Integer store pipeline
- //
- // use FU_Issue to enforce the 1 load/store per cycle limit
- //
- // Immediate offset
- InstrItinData<IIC_iStorei , [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [3, 1]>,
- //
- // Register offset
- InstrItinData<IIC_iStorer , [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [3, 1, 1]>,
- //
- // Scaled register offset, issues over 2 cycles
- InstrItinData<IIC_iStoresi , [InstrStage<2, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0], 0>,
- InstrStage<1, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [3, 1, 1]>,
- //
- // Immediate offset with update
- InstrItinData<IIC_iStoreiu , [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [2, 3, 1]>,
- //
- // Register offset with update
- InstrItinData<IIC_iStoreru , [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [2, 3, 1, 1]>,
- //
- // Scaled register offset with update, issues over 2 cycles
- InstrItinData<IIC_iStoresiu, [InstrStage<2, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0], 0>,
- InstrStage<1, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>], [3, 3, 1, 1]>,
- //
- // Store multiple
- InstrItinData<IIC_iStorem , [InstrStage<2, [FU_Issue], 0>,
- InstrStage<2, [FU_Pipe0], 0>,
- InstrStage<2, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0]>]>,
-
- // Branch
- //
- // no delay slots, so the latency of a branch is unimportant
- InstrItinData<IIC_Br , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>]>,
-
- // VFP
- // Issue through integer pipeline, and execute in NEON unit. We assume
- // RunFast mode so that NFP pipeline is used for single-precision when
- // possible.
- //
- // FP Special Register to Integer Register File Move
- InstrItinData<IIC_fpSTAT , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>]>,
- //
- // Single-precision FP Unary
- InstrItinData<IIC_fpUNA32 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [7, 1]>,
- //
- // Double-precision FP Unary
- InstrItinData<IIC_fpUNA64 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<4, [FU_NPipe], 0>,
- InstrStage<4, [FU_NLSPipe]>], [4, 1]>,
- //
- // Single-precision FP Compare
- InstrItinData<IIC_fpCMP32 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [1, 1]>,
- //
- // Double-precision FP Compare
- InstrItinData<IIC_fpCMP64 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<4, [FU_NPipe], 0>,
- InstrStage<4, [FU_NLSPipe]>], [4, 1]>,
- //
- // Single to Double FP Convert
- InstrItinData<IIC_fpCVTSD , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<7, [FU_NPipe], 0>,
- InstrStage<7, [FU_NLSPipe]>], [7, 1]>,
- //
- // Double to Single FP Convert
- InstrItinData<IIC_fpCVTDS , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<5, [FU_NPipe], 0>,
- InstrStage<5, [FU_NLSPipe]>], [5, 1]>,
- //
- // Single-Precision FP to Integer Convert
- InstrItinData<IIC_fpCVTSI , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [7, 1]>,
- //
- // Double-Precision FP to Integer Convert
- InstrItinData<IIC_fpCVTDI , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<8, [FU_NPipe], 0>,
- InstrStage<8, [FU_NLSPipe]>], [8, 1]>,
- //
- // Integer to Single-Precision FP Convert
- InstrItinData<IIC_fpCVTIS , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [7, 1]>,
- //
- // Integer to Double-Precision FP Convert
- InstrItinData<IIC_fpCVTID , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<8, [FU_NPipe], 0>,
- InstrStage<8, [FU_NLSPipe]>], [8, 1]>,
- //
- // Single-precision FP ALU
- InstrItinData<IIC_fpALU32 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [7, 1, 1]>,
- //
- // Double-precision FP ALU
- InstrItinData<IIC_fpALU64 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<9, [FU_NPipe], 0>,
- InstrStage<9, [FU_NLSPipe]>], [9, 1, 1]>,
- //
- // Single-precision FP Multiply
- InstrItinData<IIC_fpMUL32 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [7, 1, 1]>,
- //
- // Double-precision FP Multiply
- InstrItinData<IIC_fpMUL64 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<11, [FU_NPipe], 0>,
- InstrStage<11, [FU_NLSPipe]>], [11, 1, 1]>,
- //
- // Single-precision FP MAC
- InstrItinData<IIC_fpMAC32 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [7, 2, 1, 1]>,
- //
- // Double-precision FP MAC
- InstrItinData<IIC_fpMAC64 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<19, [FU_NPipe], 0>,
- InstrStage<19, [FU_NLSPipe]>], [19, 2, 1, 1]>,
- //
- // Single-precision FP DIV
- InstrItinData<IIC_fpDIV32 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<20, [FU_NPipe], 0>,
- InstrStage<20, [FU_NLSPipe]>], [20, 1, 1]>,
- //
- // Double-precision FP DIV
- InstrItinData<IIC_fpDIV64 , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<29, [FU_NPipe], 0>,
- InstrStage<29, [FU_NLSPipe]>], [29, 1, 1]>,
- //
- // Single-precision FP SQRT
- InstrItinData<IIC_fpSQRT32, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<19, [FU_NPipe], 0>,
- InstrStage<19, [FU_NLSPipe]>], [19, 1]>,
- //
- // Double-precision FP SQRT
- InstrItinData<IIC_fpSQRT64, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<29, [FU_NPipe], 0>,
- InstrStage<29, [FU_NLSPipe]>], [29, 1]>,
- //
- // Single-precision FP Load
- // use FU_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpLoad32, [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>]>,
- //
- // Double-precision FP Load
- // use FU_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpLoad64, [InstrStage<2, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0], 0>,
- InstrStage<1, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>]>,
- //
- // FP Load Multiple
- // use FU_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpLoadm, [InstrStage<3, [FU_Issue], 0>,
- InstrStage<2, [FU_Pipe0], 0>,
- InstrStage<2, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>]>,
- //
- // Single-precision FP Store
- // use FU_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStore32,[InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>]>,
- //
- // Double-precision FP Store
- // use FU_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStore64,[InstrStage<2, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0], 0>,
- InstrStage<1, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>]>,
- //
- // FP Store Multiple
- // use FU_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStorem, [InstrStage<3, [FU_Issue], 0>,
- InstrStage<2, [FU_Pipe0], 0>,
- InstrStage<2, [FU_Pipe1]>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>]>,
-
- // NEON
- // Issue through integer pipeline, and execute in NEON unit.
- //
- // VLD1
- InstrItinData<IIC_VLD1, [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>]>,
- //
- // VLD2
- InstrItinData<IIC_VLD2, [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>], [2, 2, 1]>,
- //
- // VLD3
- InstrItinData<IIC_VLD3, [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>], [2, 2, 2, 1]>,
- //
- // VLD4
- InstrItinData<IIC_VLD4, [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>], [2, 2, 2, 2, 1]>,
- //
- // VST
- InstrItinData<IIC_VST, [InstrStage<1, [FU_Issue], 0>,
- InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_LdSt0], 0>,
- InstrStage<1, [FU_NLSPipe]>]>,
- //
- // Double-register FP Unary
- InstrItinData<IIC_VUNAD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [5, 2]>,
- //
- // Quad-register FP Unary
- // Result written in N5, but that is relative to the last cycle of multicycle,
- // so we use 6 for those cases
- InstrItinData<IIC_VUNAQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [6, 2]>,
- //
- // Double-register FP Binary
- InstrItinData<IIC_VBIND, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [5, 2, 2]>,
- //
- // Quad-register FP Binary
- // Result written in N5, but that is relative to the last cycle of multicycle,
- // so we use 6 for those cases
- InstrItinData<IIC_VBINQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [6, 2, 2]>,
- //
- // Move Immediate
- InstrItinData<IIC_VMOVImm, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [3]>,
- //
- // Double-register Permute Move
- InstrItinData<IIC_VMOVD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>], [2, 1]>,
- //
- // Quad-register Permute Move
- // Result written in N2, but that is relative to the last cycle of multicycle,
- // so we use 3 for those cases
- InstrItinData<IIC_VMOVQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NLSPipe]>], [3, 1]>,
- //
- // Integer to Single-precision Move
- InstrItinData<IIC_VMOVIS , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>], [2, 1]>,
- //
- // Integer to Double-precision Move
- InstrItinData<IIC_VMOVID , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>], [2, 1, 1]>,
- //
- // Single-precision to Integer Move
- InstrItinData<IIC_VMOVSI , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>], [20, 1]>,
- //
- // Double-precision to Integer Move
- InstrItinData<IIC_VMOVDI , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>], [20, 20, 1]>,
- //
- // Integer to Lane Move
- InstrItinData<IIC_VMOVISL , [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NLSPipe]>], [3, 1, 1]>,
- //
- // Double-register Permute
- InstrItinData<IIC_VPERMD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>], [2, 2, 1, 1]>,
- //
- // Quad-register Permute
- // Result written in N2, but that is relative to the last cycle of multicycle,
- // so we use 3 for those cases
- InstrItinData<IIC_VPERMQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NLSPipe]>], [3, 3, 1, 1]>,
- //
- // Quad-register Permute (3 cycle issue)
- // Result written in N2, but that is relative to the last cycle of multicycle,
- // so we use 4 for those cases
- InstrItinData<IIC_VPERMQ3, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>,
- InstrStage<1, [FU_NPipe], 0>,
- InstrStage<2, [FU_NLSPipe]>], [4, 4, 1, 1]>,
- //
- // Double-register FP Multiple-Accumulate
- InstrItinData<IIC_VMACD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [9, 2, 2, 3]>,
- //
- // Quad-register FP Multiple-Accumulate
- // Result written in N9, but that is relative to the last cycle of multicycle,
- // so we use 10 for those cases
- InstrItinData<IIC_VMACQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [10, 2, 2, 3]>,
- //
- // Double-register Reciprical Step
- InstrItinData<IIC_VRECSD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [9, 2, 2]>,
- //
- // Quad-register Reciprical Step
- InstrItinData<IIC_VRECSQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [10, 2, 2]>,
- //
- // Double-register Integer Count
- InstrItinData<IIC_VCNTiD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [3, 2, 2]>,
- //
- // Quad-register Integer Count
- // Result written in N3, but that is relative to the last cycle of multicycle,
- // so we use 4 for those cases
- InstrItinData<IIC_VCNTiQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [4, 2, 2]>,
- //
- // Double-register Integer Unary
- InstrItinData<IIC_VUNAiD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [4, 2]>,
- //
- // Quad-register Integer Unary
- InstrItinData<IIC_VUNAiQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [4, 2]>,
- //
- // Double-register Integer Q-Unary
- InstrItinData<IIC_VQUNAiD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [4, 1]>,
- //
- // Quad-register Integer CountQ-Unary
- InstrItinData<IIC_VQUNAiQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [4, 1]>,
- //
- // Double-register Integer Binary
- InstrItinData<IIC_VBINiD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [3, 2, 2]>,
- //
- // Quad-register Integer Binary
- InstrItinData<IIC_VBINiQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [3, 2, 2]>,
- //
- // Double-register Integer Binary (4 cycle)
- InstrItinData<IIC_VBINi4D, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [4, 2, 1]>,
- //
- // Quad-register Integer Binary (4 cycle)
- InstrItinData<IIC_VBINi4Q, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [4, 2, 1]>,
- //
- // Double-register Integer Subtract
- InstrItinData<IIC_VSUBiD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [3, 2, 1]>,
- //
- // Quad-register Integer Subtract
- InstrItinData<IIC_VSUBiQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [3, 2, 1]>,
- //
- // Double-register Integer Shift
- InstrItinData<IIC_VSHLiD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [3, 1, 1]>,
- //
- // Quad-register Integer Shift
- InstrItinData<IIC_VSHLiQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [4, 1, 1]>,
- //
- // Double-register Integer Shift (4 cycle)
- InstrItinData<IIC_VSHLi4D, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [4, 1, 1]>,
- //
- // Quad-register Integer Shift (4 cycle)
- InstrItinData<IIC_VSHLi4Q, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [5, 1, 1]>,
- //
- // Double-register Integer Pair Add Long
- InstrItinData<IIC_VPALiD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [6, 3, 2, 1]>,
- //
- // Quad-register Integer Pair Add Long
- InstrItinData<IIC_VPALiQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [7, 3, 2, 1]>,
- //
- // Double-register Integer Multiply (.8, .16)
- InstrItinData<IIC_VMULi16D, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [6, 2, 2]>,
- //
- // Double-register Integer Multiply (.32)
- InstrItinData<IIC_VMULi32D, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [7, 2, 1]>,
- //
- // Quad-register Integer Multiply (.8, .16)
- InstrItinData<IIC_VMULi16Q, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [7, 2, 2]>,
- //
- // Quad-register Integer Multiply (.32)
- InstrItinData<IIC_VMULi32Q, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>,
- InstrStage<2, [FU_NLSPipe], 0>,
- InstrStage<3, [FU_NPipe]>], [9, 2, 1]>,
- //
- // Double-register Integer Multiply-Accumulate (.8, .16)
- InstrItinData<IIC_VMACi16D, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>], [6, 2, 2, 3]>,
- //
- // Double-register Integer Multiply-Accumulate (.32)
- InstrItinData<IIC_VMACi32D, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [7, 2, 1, 3]>,
- //
- // Quad-register Integer Multiply-Accumulate (.8, .16)
- InstrItinData<IIC_VMACi16Q, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NPipe]>], [7, 2, 2, 3]>,
- //
- // Quad-register Integer Multiply-Accumulate (.32)
- InstrItinData<IIC_VMACi32Q, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NPipe]>,
- InstrStage<2, [FU_NLSPipe], 0>,
- InstrStage<3, [FU_NPipe]>], [9, 2, 1, 3]>,
- //
- // Double-register VEXT
- InstrItinData<IIC_VEXTD, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>], [2, 1, 1]>,
- //
- // Quad-register VEXT
- InstrItinData<IIC_VEXTQ, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NLSPipe]>], [3, 1, 1]>,
- //
- // VTB
- InstrItinData<IIC_VTB1, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NLSPipe]>], [3, 2, 1]>,
- InstrItinData<IIC_VTB2, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NLSPipe]>], [3, 2, 2, 1]>,
- InstrItinData<IIC_VTB3, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>,
- InstrStage<1, [FU_NPipe], 0>,
- InstrStage<2, [FU_NLSPipe]>], [4, 2, 2, 3, 1]>,
- InstrItinData<IIC_VTB4, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>,
- InstrStage<1, [FU_NPipe], 0>,
- InstrStage<2, [FU_NLSPipe]>], [4, 2, 2, 3, 3, 1]>,
- //
- // VTBX
- InstrItinData<IIC_VTBX1, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NLSPipe]>], [3, 1, 2, 1]>,
- InstrItinData<IIC_VTBX2, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<2, [FU_NLSPipe]>], [3, 1, 2, 2, 1]>,
- InstrItinData<IIC_VTBX3, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>,
- InstrStage<1, [FU_NPipe], 0>,
- InstrStage<2, [FU_NLSPipe]>], [4, 1, 2, 2, 3, 1]>,
- InstrItinData<IIC_VTBX4, [InstrStage<1, [FU_Pipe0, FU_Pipe1]>,
- InstrStage<1, [FU_NLSPipe]>,
- InstrStage<1, [FU_NPipe], 0>,
- InstrStage<2, [FU_NLSPipe]>], [4, 1, 2, 2, 3, 3, 1]>
-]>;
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMSubtarget.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMSubtarget.cpp
deleted file mode 100644
index 622034b..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMSubtarget.cpp
+++ /dev/null
@@ -1,185 +0,0 @@
-//===-- ARMSubtarget.cpp - ARM Subtarget Information ------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the ARM specific subclass of TargetSubtarget.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARMSubtarget.h"
-#include "ARMGenSubtarget.inc"
-#include "llvm/GlobalValue.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/ADT/SmallVector.h"
-using namespace llvm;
-
-static cl::opt<bool>
-ReserveR9("arm-reserve-r9", cl::Hidden,
- cl::desc("Reserve R9, making it unavailable as GPR"));
-static cl::opt<bool>
-UseNEONFP("arm-use-neon-fp",
- cl::desc("Use NEON for single-precision FP"),
- cl::init(false), cl::Hidden);
-
-static cl::opt<bool>
-UseMOVT("arm-use-movt",
- cl::init(true), cl::Hidden);
-
-ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &FS,
- bool isT)
- : ARMArchVersion(V4)
- , ARMFPUType(None)
- , UseNEONForSinglePrecisionFP(UseNEONFP)
- , IsThumb(isT)
- , ThumbMode(Thumb1)
- , PostRAScheduler(false)
- , IsR9Reserved(ReserveR9)
- , UseMovt(UseMOVT)
- , stackAlignment(4)
- , CPUString("generic")
- , TargetType(isELF) // Default to ELF unless otherwise specified.
- , TargetABI(ARM_ABI_APCS) {
- // default to soft float ABI
- if (FloatABIType == FloatABI::Default)
- FloatABIType = FloatABI::Soft;
-
- // Determine default and user specified characteristics
-
- // Parse features string.
- CPUString = ParseSubtargetFeatures(FS, CPUString);
-
- // When no arch is specified either by CPU or by attributes, make the default
- // ARMv4T.
- if (CPUString == "generic" && (FS.empty() || FS == "generic"))
- ARMArchVersion = V4T;
-
- // Set the boolean corresponding to the current target triple, or the default
- // if one cannot be determined, to true.
- unsigned Len = TT.length();
- unsigned Idx = 0;
-
- if (Len >= 5 && TT.substr(0, 4) == "armv")
- Idx = 4;
- else if (Len >= 6 && TT.substr(0, 5) == "thumb") {
- IsThumb = true;
- if (Len >= 7 && TT[5] == 'v')
- Idx = 6;
- }
- if (Idx) {
- unsigned SubVer = TT[Idx];
- if (SubVer >= '7' && SubVer <= '9') {
- ARMArchVersion = V7A;
- } else if (SubVer == '6') {
- ARMArchVersion = V6;
- if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == '2')
- ARMArchVersion = V6T2;
- } else if (SubVer == '5') {
- ARMArchVersion = V5T;
- if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == 'e')
- ARMArchVersion = V5TE;
- } else if (SubVer == '4') {
- if (Len >= Idx+2 && TT[Idx+1] == 't')
- ARMArchVersion = V4T;
- else
- ARMArchVersion = V4;
- }
- }
-
- // Thumb2 implies at least V6T2.
- if (ARMArchVersion >= V6T2)
- ThumbMode = Thumb2;
- else if (ThumbMode >= Thumb2)
- ARMArchVersion = V6T2;
-
- if (Len >= 10) {
- if (TT.find("-darwin") != std::string::npos)
- // arm-darwin
- TargetType = isDarwin;
- }
-
- if (TT.find("eabi") != std::string::npos)
- TargetABI = ARM_ABI_AAPCS;
-
- if (isAAPCS_ABI())
- stackAlignment = 8;
-
- if (isTargetDarwin())
- IsR9Reserved = ReserveR9 | (ARMArchVersion < V6);
-
- if (!isThumb() || hasThumb2())
- PostRAScheduler = true;
-
- // Set CPU specific features.
- if (CPUString == "cortex-a8") {
- // On Cortex-a8, it's faster to perform some single-precision FP
- // operations with NEON instructions.
- if (UseNEONFP.getPosition() == 0)
- UseNEONForSinglePrecisionFP = true;
- }
-}
-
-/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol.
-bool
-ARMSubtarget::GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) const {
- if (RelocM == Reloc::Static)
- return false;
-
- // Materializable GVs (in JIT lazy compilation mode) do not require an extra
- // load from stub.
- bool isDecl = GV->isDeclaration() && !GV->isMaterializable();
-
- if (!isTargetDarwin()) {
- // Extra load is needed for all externally visible.
- if (GV->hasLocalLinkage() || GV->hasHiddenVisibility())
- return false;
- return true;
- } else {
- if (RelocM == Reloc::PIC_) {
- // If this is a strong reference to a definition, it is definitely not
- // through a stub.
- if (!isDecl && !GV->isWeakForLinker())
- return false;
-
- // Unless we have a symbol with hidden visibility, we have to go through a
- // normal $non_lazy_ptr stub because this symbol might be resolved late.
- if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
- return true;
-
- // If symbol visibility is hidden, we have a stub for common symbol
- // references and external declarations.
- if (isDecl || GV->hasCommonLinkage())
- // Hidden $non_lazy_ptr reference.
- return true;
-
- return false;
- } else {
- // If this is a strong reference to a definition, it is definitely not
- // through a stub.
- if (!isDecl && !GV->isWeakForLinker())
- return false;
-
- // Unless we have a symbol with hidden visibility, we have to go through a
- // normal $non_lazy_ptr stub because this symbol might be resolved late.
- if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
- return true;
- }
- }
-
- return false;
-}
-
-bool ARMSubtarget::enablePostRAScheduler(
- CodeGenOpt::Level OptLevel,
- TargetSubtarget::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtarget::ANTIDEP_CRITICAL;
- CriticalPathRCs.clear();
- CriticalPathRCs.push_back(&ARM::GPRRegClass);
- return PostRAScheduler && OptLevel >= CodeGenOpt::Default;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMSubtarget.h b/libclamav/c++/llvm/lib/Target/ARM/ARMSubtarget.h
deleted file mode 100644
index 6980851..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMSubtarget.h
+++ /dev/null
@@ -1,156 +0,0 @@
-//=====---- ARMSubtarget.h - Define Subtarget for the ARM -----*- C++ -*--====//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the ARM specific subclass of TargetSubtarget.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMSUBTARGET_H
-#define ARMSUBTARGET_H
-
-#include "llvm/Target/TargetInstrItineraries.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetSubtarget.h"
-#include "ARMBaseRegisterInfo.h"
-#include <string>
-
-namespace llvm {
-class GlobalValue;
-
-class ARMSubtarget : public TargetSubtarget {
-protected:
- enum ARMArchEnum {
- V4, V4T, V5T, V5TE, V6, V6T2, V7A
- };
-
- enum ARMFPEnum {
- None, VFPv2, VFPv3, NEON
- };
-
- enum ThumbTypeEnum {
- Thumb1,
- Thumb2
- };
-
- /// ARMArchVersion - ARM architecture version: V4, V4T (base), V5T, V5TE,
- /// V6, V6T2, V7A.
- ARMArchEnum ARMArchVersion;
-
- /// ARMFPUType - Floating Point Unit type.
- ARMFPEnum ARMFPUType;
-
- /// UseNEONForSinglePrecisionFP - if the NEONFP attribute has been
- /// specified. Use the method useNEONForSinglePrecisionFP() to
- /// determine if NEON should actually be used.
- bool UseNEONForSinglePrecisionFP;
-
- /// IsThumb - True if we are in thumb mode, false if in ARM mode.
- bool IsThumb;
-
- /// ThumbMode - Indicates supported Thumb version.
- ThumbTypeEnum ThumbMode;
-
- /// PostRAScheduler - True if using post-register-allocation scheduler.
- bool PostRAScheduler;
-
- /// IsR9Reserved - True if R9 is a not available as general purpose register.
- bool IsR9Reserved;
-
- /// UseMovt - True if MOVT / MOVW pairs are used for materialization of 32-bit
- /// imms (including global addresses).
- bool UseMovt;
-
- /// stackAlignment - The minimum alignment known to hold of the stack frame on
- /// entry to the function and which must be maintained by every function.
- unsigned stackAlignment;
-
- /// CPUString - String name of used CPU.
- std::string CPUString;
-
- /// Selected instruction itineraries (one entry per itinerary class.)
- InstrItineraryData InstrItins;
-
- public:
- enum {
- isELF, isDarwin
- } TargetType;
-
- enum {
- ARM_ABI_APCS,
- ARM_ABI_AAPCS // ARM EABI
- } TargetABI;
-
- /// This constructor initializes the data members to match that
- /// of the specified triple.
- ///
- ARMSubtarget(const std::string &TT, const std::string &FS, bool isThumb);
-
- /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
- /// that still makes it profitable to inline the call.
- unsigned getMaxInlineSizeThreshold() const {
- // FIXME: For now, we don't lower memcpy's to loads / stores for Thumb.
- // Change this once Thumb ldmia / stmia support is added.
- return isThumb() ? 0 : 64;
- }
- /// ParseSubtargetFeatures - Parses features string setting specified
- /// subtarget options. Definition of function is auto generated by tblgen.
- std::string ParseSubtargetFeatures(const std::string &FS,
- const std::string &CPU);
-
- bool hasV4TOps() const { return ARMArchVersion >= V4T; }
- bool hasV5TOps() const { return ARMArchVersion >= V5T; }
- bool hasV5TEOps() const { return ARMArchVersion >= V5TE; }
- bool hasV6Ops() const { return ARMArchVersion >= V6; }
- bool hasV6T2Ops() const { return ARMArchVersion >= V6T2; }
- bool hasV7Ops() const { return ARMArchVersion >= V7A; }
-
- bool hasVFP2() const { return ARMFPUType >= VFPv2; }
- bool hasVFP3() const { return ARMFPUType >= VFPv3; }
- bool hasNEON() const { return ARMFPUType >= NEON; }
- bool useNEONForSinglePrecisionFP() const {
- return hasNEON() && UseNEONForSinglePrecisionFP; }
-
- bool isTargetDarwin() const { return TargetType == isDarwin; }
- bool isTargetELF() const { return TargetType == isELF; }
-
- bool isAPCS_ABI() const { return TargetABI == ARM_ABI_APCS; }
- bool isAAPCS_ABI() const { return TargetABI == ARM_ABI_AAPCS; }
-
- bool isThumb() const { return IsThumb; }
- bool isThumb1Only() const { return IsThumb && (ThumbMode == Thumb1); }
- bool isThumb2() const { return IsThumb && (ThumbMode == Thumb2); }
- bool hasThumb2() const { return ThumbMode >= Thumb2; }
-
- bool isR9Reserved() const { return IsR9Reserved; }
-
- bool useMovt() const { return UseMovt && hasV6T2Ops(); }
-
- const std::string & getCPUString() const { return CPUString; }
-
- /// enablePostRAScheduler - True at 'More' optimization.
- bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtarget::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const;
-
- /// getInstrItins - Return the instruction itineraies based on subtarget
- /// selection.
- const InstrItineraryData &getInstrItineraryData() const { return InstrItins; }
-
- /// getStackAlignment - Returns the minimum alignment known to hold of the
- /// stack frame on entry to the function and which must be maintained by every
- /// function for this subtarget.
- unsigned getStackAlignment() const { return stackAlignment; }
-
- /// GVIsIndirectSymbol - true if the GV will be accessed via an indirect
- /// symbol.
- bool GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) const;
-};
-} // End llvm namespace
-
-#endif // ARMSUBTARGET_H
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/libclamav/c++/llvm/lib/Target/ARM/ARMTargetMachine.cpp
deleted file mode 100644
index 7233f5c..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-//===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARMTargetMachine.h"
-#include "ARMMCAsmInfo.h"
-#include "ARMFrameInfo.h"
-#include "ARM.h"
-#include "llvm/PassManager.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegistry.h"
-using namespace llvm;
-
-static const MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
- Triple TheTriple(TT);
- switch (TheTriple.getOS()) {
- case Triple::Darwin:
- return new ARMMCAsmInfoDarwin();
- default:
- return new ARMELFMCAsmInfo();
- }
-}
-
-
-extern "C" void LLVMInitializeARMTarget() {
- // Register the target.
- RegisterTargetMachine<ARMTargetMachine> X(TheARMTarget);
- RegisterTargetMachine<ThumbTargetMachine> Y(TheThumbTarget);
-
- // Register the target asm info.
- RegisterAsmInfoFn A(TheARMTarget, createMCAsmInfo);
- RegisterAsmInfoFn B(TheThumbTarget, createMCAsmInfo);
-}
-
-/// TargetMachine ctor - Create an ARM architecture model.
-///
-ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T,
- const std::string &TT,
- const std::string &FS,
- bool isThumb)
- : LLVMTargetMachine(T, TT),
- Subtarget(TT, FS, isThumb),
- FrameInfo(Subtarget),
- JITInfo(),
- InstrItins(Subtarget.getInstrItineraryData()) {
- DefRelocModel = getRelocationModel();
-}
-
-ARMTargetMachine::ARMTargetMachine(const Target &T, const std::string &TT,
- const std::string &FS)
- : ARMBaseTargetMachine(T, TT, FS, false), InstrInfo(Subtarget),
- DataLayout(Subtarget.isAPCS_ABI() ?
- std::string("e-p:32:32-f64:32:32-i64:32:32-n32") :
- std::string("e-p:32:32-f64:64:64-i64:64:64-n32")),
- TLInfo(*this) {
-}
-
-ThumbTargetMachine::ThumbTargetMachine(const Target &T, const std::string &TT,
- const std::string &FS)
- : ARMBaseTargetMachine(T, TT, FS, true),
- InstrInfo(Subtarget.hasThumb2()
- ? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget))
- : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),
- DataLayout(Subtarget.isAPCS_ABI() ?
- std::string("e-p:32:32-f64:32:32-i64:32:32-"
- "i16:16:32-i8:8:32-i1:8:32-a:0:32-n32") :
- std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "i16:16:32-i8:8:32-i1:8:32-a:0:32-n32")),
- TLInfo(*this) {
-}
-
-
-
-// Pass Pipeline Configuration
-bool ARMBaseTargetMachine::addInstSelector(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- PM.add(createARMISelDag(*this, OptLevel));
- return false;
-}
-
-bool ARMBaseTargetMachine::addPreRegAlloc(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- if (Subtarget.hasNEON())
- PM.add(createNEONPreAllocPass());
-
- // FIXME: temporarily disabling load / store optimization pass for Thumb1.
- if (OptLevel != CodeGenOpt::None && !Subtarget.isThumb1Only())
- PM.add(createARMLoadStoreOptimizationPass(true));
- return true;
-}
-
-bool ARMBaseTargetMachine::addPreSched2(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- // FIXME: temporarily disabling load / store optimization pass for Thumb1.
- if (OptLevel != CodeGenOpt::None && !Subtarget.isThumb1Only())
- PM.add(createARMLoadStoreOptimizationPass());
-
- // Expand some pseudo instructions into multiple instructions to allow
- // proper scheduling.
- PM.add(createARMExpandPseudoPass());
-
- return true;
-}
-
-bool ARMBaseTargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- // FIXME: temporarily disabling load / store optimization pass for Thumb1.
- if (OptLevel != CodeGenOpt::None) {
- if (!Subtarget.isThumb1Only())
- PM.add(createIfConverterPass());
- if (Subtarget.hasNEON())
- PM.add(createNEONMoveFixPass());
- }
-
- if (Subtarget.isThumb2()) {
- PM.add(createThumb2ITBlockPass());
- PM.add(createThumb2SizeReductionPass());
- }
-
- PM.add(createARMConstantIslandPass());
- return true;
-}
-
-bool ARMBaseTargetMachine::addCodeEmitter(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel,
- JITCodeEmitter &JCE) {
- // FIXME: Move this to TargetJITInfo!
- if (DefRelocModel == Reloc::Default)
- setRelocationModel(Reloc::Static);
-
- // Machine code emitter pass for ARM.
- PM.add(createARMJITCodeEmitterPass(*this, JCE));
- return false;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMTargetMachine.h b/libclamav/c++/llvm/lib/Target/ARM/ARMTargetMachine.h
deleted file mode 100644
index 88e67e3..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMTargetMachine.h
+++ /dev/null
@@ -1,109 +0,0 @@
-//===-- ARMTargetMachine.h - Define TargetMachine for ARM -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the ARM specific subclass of TargetMachine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMTARGETMACHINE_H
-#define ARMTARGETMACHINE_H
-
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetData.h"
-#include "ARMInstrInfo.h"
-#include "ARMFrameInfo.h"
-#include "ARMJITInfo.h"
-#include "ARMSubtarget.h"
-#include "ARMISelLowering.h"
-#include "Thumb1InstrInfo.h"
-#include "Thumb2InstrInfo.h"
-
-namespace llvm {
-
-class ARMBaseTargetMachine : public LLVMTargetMachine {
-protected:
- ARMSubtarget Subtarget;
-
-private:
- ARMFrameInfo FrameInfo;
- ARMJITInfo JITInfo;
- InstrItineraryData InstrItins;
- Reloc::Model DefRelocModel; // Reloc model before it's overridden.
-
-public:
- ARMBaseTargetMachine(const Target &T, const std::string &TT,
- const std::string &FS, bool isThumb);
-
- virtual const ARMFrameInfo *getFrameInfo() const { return &FrameInfo; }
- virtual ARMJITInfo *getJITInfo() { return &JITInfo; }
- virtual const ARMSubtarget *getSubtargetImpl() const { return &Subtarget; }
- virtual const InstrItineraryData getInstrItineraryData() const {
- return InstrItins;
- }
-
- // Pass Pipeline Configuration
- virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreRegAlloc(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreSched2(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
- virtual bool addCodeEmitter(PassManagerBase &PM, CodeGenOpt::Level OptLevel,
- JITCodeEmitter &MCE);
-};
-
-/// ARMTargetMachine - ARM target machine.
-///
-class ARMTargetMachine : public ARMBaseTargetMachine {
- ARMInstrInfo InstrInfo;
- const TargetData DataLayout; // Calculates type size & alignment
- ARMTargetLowering TLInfo;
-public:
- ARMTargetMachine(const Target &T, const std::string &TT,
- const std::string &FS);
-
- virtual const ARMRegisterInfo *getRegisterInfo() const {
- return &InstrInfo.getRegisterInfo();
- }
-
- virtual ARMTargetLowering *getTargetLowering() const {
- return const_cast<ARMTargetLowering*>(&TLInfo);
- }
-
- virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; }
- virtual const TargetData *getTargetData() const { return &DataLayout; }
-};
-
-/// ThumbTargetMachine - Thumb target machine.
-/// Due to the way architectures are handled, this represents both
-/// Thumb-1 and Thumb-2.
-///
-class ThumbTargetMachine : public ARMBaseTargetMachine {
- ARMBaseInstrInfo *InstrInfo; // either Thumb1InstrInfo or Thumb2InstrInfo
- const TargetData DataLayout; // Calculates type size & alignment
- ARMTargetLowering TLInfo;
-public:
- ThumbTargetMachine(const Target &T, const std::string &TT,
- const std::string &FS);
-
- /// returns either Thumb1RegisterInfo or Thumb2RegisterInfo
- virtual const ARMBaseRegisterInfo *getRegisterInfo() const {
- return &InstrInfo->getRegisterInfo();
- }
-
- virtual ARMTargetLowering *getTargetLowering() const {
- return const_cast<ARMTargetLowering*>(&TLInfo);
- }
-
- /// returns either Thumb1InstrInfo or Thumb2InstrInfo
- virtual const ARMBaseInstrInfo *getInstrInfo() const { return InstrInfo; }
- virtual const TargetData *getTargetData() const { return &DataLayout; }
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/ARMTargetObjectFile.h b/libclamav/c++/llvm/lib/Target/ARM/ARMTargetObjectFile.h
deleted file mode 100644
index a488c0a..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/ARMTargetObjectFile.h
+++ /dev/null
@@ -1,39 +0,0 @@
-//===-- llvm/Target/ARMTargetObjectFile.h - ARM Object Info -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_ARM_TARGETOBJECTFILE_H
-#define LLVM_TARGET_ARM_TARGETOBJECTFILE_H
-
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/MC/MCSectionELF.h"
-
-namespace llvm {
-
- class ARMElfTargetObjectFile : public TargetLoweringObjectFileELF {
- public:
- ARMElfTargetObjectFile() : TargetLoweringObjectFileELF() {}
-
- void Initialize(MCContext &Ctx, const TargetMachine &TM) {
- TargetLoweringObjectFileELF::Initialize(Ctx, TM);
-
- if (TM.getSubtarget<ARMSubtarget>().isAAPCS_ABI()) {
- StaticCtorSection =
- getELFSection(".init_array", MCSectionELF::SHT_INIT_ARRAY,
- MCSectionELF::SHF_WRITE | MCSectionELF::SHF_ALLOC,
- SectionKind::getDataRel());
- StaticDtorSection =
- getELFSection(".fini_array", MCSectionELF::SHT_FINI_ARRAY,
- MCSectionELF::SHF_WRITE | MCSectionELF::SHF_ALLOC,
- SectionKind::getDataRel());
- }
- }
- };
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/libclamav/c++/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
deleted file mode 100644
index 89c7769..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ /dev/null
@@ -1,739 +0,0 @@
-//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARM.h"
-#include "llvm/MC/MCParser/MCAsmLexer.h"
-#include "llvm/MC/MCParser/MCAsmParser.h"
-#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/Target/TargetAsmParser.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Twine.h"
-using namespace llvm;
-
-namespace {
-struct ARMOperand;
-
-// The shift types for register controlled shifts in arm memory addressing
-enum ShiftType {
- Lsl,
- Lsr,
- Asr,
- Ror,
- Rrx
-};
-
-class ARMAsmParser : public TargetAsmParser {
- MCAsmParser &Parser;
-
-private:
- MCAsmParser &getParser() const { return Parser; }
-
- MCAsmLexer &getLexer() const { return Parser.getLexer(); }
-
- void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
-
- bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
-
- bool MaybeParseRegister(ARMOperand &Op, bool ParseWriteBack);
-
- bool ParseRegisterList(ARMOperand &Op);
-
- bool ParseMemory(ARMOperand &Op);
-
- bool ParseMemoryOffsetReg(bool &Negative,
- bool &OffsetRegShifted,
- enum ShiftType &ShiftType,
- const MCExpr *&ShiftAmount,
- const MCExpr *&Offset,
- bool &OffsetIsReg,
- int &OffsetRegNum);
-
- bool ParseShift(enum ShiftType &St, const MCExpr *&ShiftAmount);
-
- bool ParseOperand(ARMOperand &Op);
-
- bool ParseDirectiveWord(unsigned Size, SMLoc L);
-
- bool ParseDirectiveThumb(SMLoc L);
-
- bool ParseDirectiveThumbFunc(SMLoc L);
-
- bool ParseDirectiveCode(SMLoc L);
-
- bool ParseDirectiveSyntax(SMLoc L);
-
- // TODO - For now hacked versions of the next two are in here in this file to
- // allow some parser testing until the table gen versions are implemented.
-
- /// @name Auto-generated Match Functions
- /// {
- bool MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCInst &Inst);
-
- /// MatchRegisterName - Match the given string to a register name and return
- /// its register number, or -1 if there is no match. To allow return values
- /// to be used directly in register lists, arm registers have values between
- /// 0 and 15.
- int MatchRegisterName(const StringRef &Name);
-
- /// }
-
-
-public:
- ARMAsmParser(const Target &T, MCAsmParser &_Parser)
- : TargetAsmParser(T), Parser(_Parser) {}
-
- virtual bool ParseInstruction(const StringRef &Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- virtual bool ParseDirective(AsmToken DirectiveID);
-};
-
-/// ARMOperand - Instances of this class represent a parsed ARM machine
-/// instruction.
-struct ARMOperand : public MCParsedAsmOperand {
- enum {
- Token,
- Register,
- Immediate,
- Memory
- } Kind;
-
-
- union {
- struct {
- const char *Data;
- unsigned Length;
- } Tok;
-
- struct {
- unsigned RegNum;
- bool Writeback;
- } Reg;
-
- struct {
- const MCExpr *Val;
- } Imm;
-
- // This is for all forms of ARM address expressions
- struct {
- unsigned BaseRegNum;
- unsigned OffsetRegNum; // used when OffsetIsReg is true
- const MCExpr *Offset; // used when OffsetIsReg is false
- const MCExpr *ShiftAmount; // used when OffsetRegShifted is true
- enum ShiftType ShiftType; // used when OffsetRegShifted is true
- unsigned
- OffsetRegShifted : 1, // only used when OffsetIsReg is true
- Preindexed : 1,
- Postindexed : 1,
- OffsetIsReg : 1,
- Negative : 1, // only used when OffsetIsReg is true
- Writeback : 1;
- } Mem;
-
- };
-
- StringRef getToken() const {
- assert(Kind == Token && "Invalid access!");
- return StringRef(Tok.Data, Tok.Length);
- }
-
- unsigned getReg() const {
- assert(Kind == Register && "Invalid access!");
- return Reg.RegNum;
- }
-
- const MCExpr *getImm() const {
- assert(Kind == Immediate && "Invalid access!");
- return Imm.Val;
- }
-
- bool isToken() const {return Kind == Token; }
-
- bool isReg() const { return Kind == Register; }
-
- void addRegOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getReg()));
- }
-
- static ARMOperand CreateToken(StringRef Str) {
- ARMOperand Res;
- Res.Kind = Token;
- Res.Tok.Data = Str.data();
- Res.Tok.Length = Str.size();
- return Res;
- }
-
- static ARMOperand CreateReg(unsigned RegNum, bool Writeback) {
- ARMOperand Res;
- Res.Kind = Register;
- Res.Reg.RegNum = RegNum;
- Res.Reg.Writeback = Writeback;
- return Res;
- }
-
- static ARMOperand CreateImm(const MCExpr *Val) {
- ARMOperand Res;
- Res.Kind = Immediate;
- Res.Imm.Val = Val;
- return Res;
- }
-
- static ARMOperand CreateMem(unsigned BaseRegNum, bool OffsetIsReg,
- const MCExpr *Offset, unsigned OffsetRegNum,
- bool OffsetRegShifted, enum ShiftType ShiftType,
- const MCExpr *ShiftAmount, bool Preindexed,
- bool Postindexed, bool Negative, bool Writeback) {
- ARMOperand Res;
- Res.Kind = Memory;
- Res.Mem.BaseRegNum = BaseRegNum;
- Res.Mem.OffsetIsReg = OffsetIsReg;
- Res.Mem.Offset = Offset;
- Res.Mem.OffsetRegNum = OffsetRegNum;
- Res.Mem.OffsetRegShifted = OffsetRegShifted;
- Res.Mem.ShiftType = ShiftType;
- Res.Mem.ShiftAmount = ShiftAmount;
- Res.Mem.Preindexed = Preindexed;
- Res.Mem.Postindexed = Postindexed;
- Res.Mem.Negative = Negative;
- Res.Mem.Writeback = Writeback;
- return Res;
- }
-};
-
-} // end anonymous namespace.
-
-/// Try to parse a register name. The token must be an Identifier when called,
-/// and if it is a register name a Reg operand is created, the token is eaten
-/// and false is returned. Else true is returned and no token is eaten.
-/// TODO this is likely to change to allow different register types and or to
-/// parse for a specific register type.
-bool ARMAsmParser::MaybeParseRegister(ARMOperand &Op, bool ParseWriteBack) {
- const AsmToken &Tok = Parser.getTok();
- assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
-
- // FIXME: Validate register for the current architecture; we have to do
- // validation later, so maybe there is no need for this here.
- int RegNum;
-
- RegNum = MatchRegisterName(Tok.getString());
- if (RegNum == -1)
- return true;
- Parser.Lex(); // Eat identifier token.
-
- bool Writeback = false;
- if (ParseWriteBack) {
- const AsmToken &ExclaimTok = Parser.getTok();
- if (ExclaimTok.is(AsmToken::Exclaim)) {
- Writeback = true;
- Parser.Lex(); // Eat exclaim token
- }
- }
-
- Op = ARMOperand::CreateReg(RegNum, Writeback);
-
- return false;
-}
-
-/// Parse a register list, return false if successful else return true or an
-/// error. The first token must be a '{' when called.
-bool ARMAsmParser::ParseRegisterList(ARMOperand &Op) {
- assert(Parser.getTok().is(AsmToken::LCurly) &&
- "Token is not an Left Curly Brace");
- Parser.Lex(); // Eat left curly brace token.
-
- const AsmToken &RegTok = Parser.getTok();
- SMLoc RegLoc = RegTok.getLoc();
- if (RegTok.isNot(AsmToken::Identifier))
- return Error(RegLoc, "register expected");
- int RegNum = MatchRegisterName(RegTok.getString());
- if (RegNum == -1)
- return Error(RegLoc, "register expected");
- Parser.Lex(); // Eat identifier token.
- unsigned RegList = 1 << RegNum;
-
- int HighRegNum = RegNum;
- // TODO ranges like "{Rn-Rm}"
- while (Parser.getTok().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat comma token.
-
- const AsmToken &RegTok = Parser.getTok();
- SMLoc RegLoc = RegTok.getLoc();
- if (RegTok.isNot(AsmToken::Identifier))
- return Error(RegLoc, "register expected");
- int RegNum = MatchRegisterName(RegTok.getString());
- if (RegNum == -1)
- return Error(RegLoc, "register expected");
-
- if (RegList & (1 << RegNum))
- Warning(RegLoc, "register duplicated in register list");
- else if (RegNum <= HighRegNum)
- Warning(RegLoc, "register not in ascending order in register list");
- RegList |= 1 << RegNum;
- HighRegNum = RegNum;
-
- Parser.Lex(); // Eat identifier token.
- }
- const AsmToken &RCurlyTok = Parser.getTok();
- if (RCurlyTok.isNot(AsmToken::RCurly))
- return Error(RCurlyTok.getLoc(), "'}' expected");
- Parser.Lex(); // Eat left curly brace token.
-
- return false;
-}
-
-/// Parse an arm memory expression, return false if successful else return true
-/// or an error. The first token must be a '[' when called.
-/// TODO Only preindexing and postindexing addressing are started, unindexed
-/// with option, etc are still to do.
-bool ARMAsmParser::ParseMemory(ARMOperand &Op) {
- assert(Parser.getTok().is(AsmToken::LBrac) &&
- "Token is not an Left Bracket");
- Parser.Lex(); // Eat left bracket token.
-
- const AsmToken &BaseRegTok = Parser.getTok();
- if (BaseRegTok.isNot(AsmToken::Identifier))
- return Error(BaseRegTok.getLoc(), "register expected");
- if (MaybeParseRegister(Op, false))
- return Error(BaseRegTok.getLoc(), "register expected");
- int BaseRegNum = Op.getReg();
-
- bool Preindexed = false;
- bool Postindexed = false;
- bool OffsetIsReg = false;
- bool Negative = false;
- bool Writeback = false;
-
- // First look for preindexed address forms, that is after the "[Rn" we now
- // have to see if the next token is a comma.
- const AsmToken &Tok = Parser.getTok();
- if (Tok.is(AsmToken::Comma)) {
- Preindexed = true;
- Parser.Lex(); // Eat comma token.
- int OffsetRegNum;
- bool OffsetRegShifted;
- enum ShiftType ShiftType;
- const MCExpr *ShiftAmount;
- const MCExpr *Offset;
- if(ParseMemoryOffsetReg(Negative, OffsetRegShifted, ShiftType, ShiftAmount,
- Offset, OffsetIsReg, OffsetRegNum))
- return true;
- const AsmToken &RBracTok = Parser.getTok();
- if (RBracTok.isNot(AsmToken::RBrac))
- return Error(RBracTok.getLoc(), "']' expected");
- Parser.Lex(); // Eat right bracket token.
-
- const AsmToken &ExclaimTok = Parser.getTok();
- if (ExclaimTok.is(AsmToken::Exclaim)) {
- Writeback = true;
- Parser.Lex(); // Eat exclaim token
- }
- Op = ARMOperand::CreateMem(BaseRegNum, OffsetIsReg, Offset, OffsetRegNum,
- OffsetRegShifted, ShiftType, ShiftAmount,
- Preindexed, Postindexed, Negative, Writeback);
- return false;
- }
- // The "[Rn" we have so far was not followed by a comma.
- else if (Tok.is(AsmToken::RBrac)) {
- // This is a post indexing addressing forms, that is a ']' follows after
- // the "[Rn".
- Postindexed = true;
- Writeback = true;
- Parser.Lex(); // Eat right bracket token.
-
- int OffsetRegNum = 0;
- bool OffsetRegShifted = false;
- enum ShiftType ShiftType;
- const MCExpr *ShiftAmount;
- const MCExpr *Offset;
-
- const AsmToken &NextTok = Parser.getTok();
- if (NextTok.isNot(AsmToken::EndOfStatement)) {
- if (NextTok.isNot(AsmToken::Comma))
- return Error(NextTok.getLoc(), "',' expected");
- Parser.Lex(); // Eat comma token.
- if(ParseMemoryOffsetReg(Negative, OffsetRegShifted, ShiftType,
- ShiftAmount, Offset, OffsetIsReg, OffsetRegNum))
- return true;
- }
-
- Op = ARMOperand::CreateMem(BaseRegNum, OffsetIsReg, Offset, OffsetRegNum,
- OffsetRegShifted, ShiftType, ShiftAmount,
- Preindexed, Postindexed, Negative, Writeback);
- return false;
- }
-
- return true;
-}
-
-/// Parse the offset of a memory operand after we have seen "[Rn," or "[Rn],"
-/// we will parse the following (were +/- means that a plus or minus is
-/// optional):
-/// +/-Rm
-/// +/-Rm, shift
-/// #offset
-/// we return false on success or an error otherwise.
-bool ARMAsmParser::ParseMemoryOffsetReg(bool &Negative,
- bool &OffsetRegShifted,
- enum ShiftType &ShiftType,
- const MCExpr *&ShiftAmount,
- const MCExpr *&Offset,
- bool &OffsetIsReg,
- int &OffsetRegNum) {
- ARMOperand Op;
- Negative = false;
- OffsetRegShifted = false;
- OffsetIsReg = false;
- OffsetRegNum = -1;
- const AsmToken &NextTok = Parser.getTok();
- if (NextTok.is(AsmToken::Plus))
- Parser.Lex(); // Eat plus token.
- else if (NextTok.is(AsmToken::Minus)) {
- Negative = true;
- Parser.Lex(); // Eat minus token
- }
- // See if there is a register following the "[Rn," or "[Rn]," we have so far.
- const AsmToken &OffsetRegTok = Parser.getTok();
- if (OffsetRegTok.is(AsmToken::Identifier)) {
- OffsetIsReg = !MaybeParseRegister(Op, false);
- if (OffsetIsReg)
- OffsetRegNum = Op.getReg();
- }
- // If we parsed a register as the offset then their can be a shift after that
- if (OffsetRegNum != -1) {
- // Look for a comma then a shift
- const AsmToken &Tok = Parser.getTok();
- if (Tok.is(AsmToken::Comma)) {
- Parser.Lex(); // Eat comma token.
-
- const AsmToken &Tok = Parser.getTok();
- if (ParseShift(ShiftType, ShiftAmount))
- return Error(Tok.getLoc(), "shift expected");
- OffsetRegShifted = true;
- }
- }
- else { // the "[Rn," or "[Rn,]" we have so far was not followed by "Rm"
- // Look for #offset following the "[Rn," or "[Rn],"
- const AsmToken &HashTok = Parser.getTok();
- if (HashTok.isNot(AsmToken::Hash))
- return Error(HashTok.getLoc(), "'#' expected");
- Parser.Lex(); // Eat hash token.
-
- if (getParser().ParseExpression(Offset))
- return true;
- }
- return false;
-}
-
-/// ParseShift as one of these two:
-/// ( lsl | lsr | asr | ror ) , # shift_amount
-/// rrx
-/// and returns true if it parses a shift otherwise it returns false.
-bool ARMAsmParser::ParseShift(ShiftType &St, const MCExpr *&ShiftAmount) {
- const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Identifier))
- return true;
- const StringRef &ShiftName = Tok.getString();
- if (ShiftName == "lsl" || ShiftName == "LSL")
- St = Lsl;
- else if (ShiftName == "lsr" || ShiftName == "LSR")
- St = Lsr;
- else if (ShiftName == "asr" || ShiftName == "ASR")
- St = Asr;
- else if (ShiftName == "ror" || ShiftName == "ROR")
- St = Ror;
- else if (ShiftName == "rrx" || ShiftName == "RRX")
- St = Rrx;
- else
- return true;
- Parser.Lex(); // Eat shift type token.
-
- // Rrx stands alone.
- if (St == Rrx)
- return false;
-
- // Otherwise, there must be a '#' and a shift amount.
- const AsmToken &HashTok = Parser.getTok();
- if (HashTok.isNot(AsmToken::Hash))
- return Error(HashTok.getLoc(), "'#' expected");
- Parser.Lex(); // Eat hash token.
-
- if (getParser().ParseExpression(ShiftAmount))
- return true;
-
- return false;
-}
-
-/// A hack to allow some testing, to be replaced by a real table gen version.
-int ARMAsmParser::MatchRegisterName(const StringRef &Name) {
- if (Name == "r0" || Name == "R0")
- return 0;
- else if (Name == "r1" || Name == "R1")
- return 1;
- else if (Name == "r2" || Name == "R2")
- return 2;
- else if (Name == "r3" || Name == "R3")
- return 3;
- else if (Name == "r3" || Name == "R3")
- return 3;
- else if (Name == "r4" || Name == "R4")
- return 4;
- else if (Name == "r5" || Name == "R5")
- return 5;
- else if (Name == "r6" || Name == "R6")
- return 6;
- else if (Name == "r7" || Name == "R7")
- return 7;
- else if (Name == "r8" || Name == "R8")
- return 8;
- else if (Name == "r9" || Name == "R9")
- return 9;
- else if (Name == "r10" || Name == "R10")
- return 10;
- else if (Name == "r11" || Name == "R11" || Name == "fp")
- return 11;
- else if (Name == "r12" || Name == "R12" || Name == "ip")
- return 12;
- else if (Name == "r13" || Name == "R13" || Name == "sp")
- return 13;
- else if (Name == "r14" || Name == "R14" || Name == "lr")
- return 14;
- else if (Name == "r15" || Name == "R15" || Name == "pc")
- return 15;
- return -1;
-}
-
-/// A hack to allow some testing, to be replaced by a real table gen version.
-bool ARMAsmParser::
-MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCInst &Inst) {
- ARMOperand &Op0 = *(ARMOperand*)Operands[0];
- assert(Op0.Kind == ARMOperand::Token && "First operand not a Token");
- const StringRef &Mnemonic = Op0.getToken();
- if (Mnemonic == "add" ||
- Mnemonic == "stmfd" ||
- Mnemonic == "str" ||
- Mnemonic == "ldmfd" ||
- Mnemonic == "ldr" ||
- Mnemonic == "mov" ||
- Mnemonic == "sub" ||
- Mnemonic == "bl" ||
- Mnemonic == "push" ||
- Mnemonic == "blx" ||
- Mnemonic == "pop") {
- // Hard-coded to a valid instruction, till we have a real matcher.
- Inst = MCInst();
- Inst.setOpcode(ARM::MOVr);
- Inst.addOperand(MCOperand::CreateReg(2));
- Inst.addOperand(MCOperand::CreateReg(2));
- Inst.addOperand(MCOperand::CreateImm(0));
- Inst.addOperand(MCOperand::CreateImm(0));
- Inst.addOperand(MCOperand::CreateReg(0));
- return false;
- }
-
- return true;
-}
-
-/// Parse a arm instruction operand. For now this parses the operand regardless
-/// of the mnemonic.
-bool ARMAsmParser::ParseOperand(ARMOperand &Op) {
- switch (getLexer().getKind()) {
- case AsmToken::Identifier:
- if (!MaybeParseRegister(Op, true))
- return false;
- // This was not a register so parse other operands that start with an
- // identifier (like labels) as expressions and create them as immediates.
- const MCExpr *IdVal;
- if (getParser().ParseExpression(IdVal))
- return true;
- Op = ARMOperand::CreateImm(IdVal);
- return false;
- case AsmToken::LBrac:
- return ParseMemory(Op);
- case AsmToken::LCurly:
- return ParseRegisterList(Op);
- case AsmToken::Hash:
- // #42 -> immediate.
- // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
- Parser.Lex();
- const MCExpr *ImmVal;
- if (getParser().ParseExpression(ImmVal))
- return true;
- Op = ARMOperand::CreateImm(ImmVal);
- return false;
- default:
- return Error(Parser.getTok().getLoc(), "unexpected token in operand");
- }
-}
-
-/// Parse an arm instruction mnemonic followed by its operands.
-bool ARMAsmParser::ParseInstruction(const StringRef &Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- Operands.push_back(new ARMOperand(ARMOperand::CreateToken(Name)));
-
- SMLoc Loc = Parser.getTok().getLoc();
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
-
- // Read the first operand.
- ARMOperand Op;
- if (ParseOperand(Op)) return true;
- Operands.push_back(new ARMOperand(Op));
-
- while (getLexer().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
-
- // Parse and remember the operand.
- if (ParseOperand(Op)) return true;
- Operands.push_back(new ARMOperand(Op));
- }
- }
- return false;
-}
-
-/// ParseDirective parses the arm specific directives
-bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
- StringRef IDVal = DirectiveID.getIdentifier();
- if (IDVal == ".word")
- return ParseDirectiveWord(4, DirectiveID.getLoc());
- else if (IDVal == ".thumb")
- return ParseDirectiveThumb(DirectiveID.getLoc());
- else if (IDVal == ".thumb_func")
- return ParseDirectiveThumbFunc(DirectiveID.getLoc());
- else if (IDVal == ".code")
- return ParseDirectiveCode(DirectiveID.getLoc());
- else if (IDVal == ".syntax")
- return ParseDirectiveSyntax(DirectiveID.getLoc());
- return true;
-}
-
-/// ParseDirectiveWord
-/// ::= .word [ expression (, expression)* ]
-bool ARMAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- for (;;) {
- const MCExpr *Value;
- if (getParser().ParseExpression(Value))
- return true;
-
- getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
-
- if (getLexer().is(AsmToken::EndOfStatement))
- break;
-
- // FIXME: Improve diagnostic.
- if (getLexer().isNot(AsmToken::Comma))
- return Error(L, "unexpected token in directive");
- Parser.Lex();
- }
- }
-
- Parser.Lex();
- return false;
-}
-
-/// ParseDirectiveThumb
-/// ::= .thumb
-bool ARMAsmParser::ParseDirectiveThumb(SMLoc L) {
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(L, "unexpected token in directive");
- Parser.Lex();
-
- // TODO: set thumb mode
- // TODO: tell the MC streamer the mode
- // getParser().getStreamer().Emit???();
- return false;
-}
-
-/// ParseDirectiveThumbFunc
-/// ::= .thumbfunc symbol_name
-bool ARMAsmParser::ParseDirectiveThumbFunc(SMLoc L) {
- const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
- return Error(L, "unexpected token in .syntax directive");
- StringRef ATTRIBUTE_UNUSED SymbolName = Parser.getTok().getIdentifier();
- Parser.Lex(); // Consume the identifier token.
-
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(L, "unexpected token in directive");
- Parser.Lex();
-
- // TODO: mark symbol as a thumb symbol
- // getParser().getStreamer().Emit???();
- return false;
-}
-
-/// ParseDirectiveSyntax
-/// ::= .syntax unified | divided
-bool ARMAsmParser::ParseDirectiveSyntax(SMLoc L) {
- const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Identifier))
- return Error(L, "unexpected token in .syntax directive");
- const StringRef &Mode = Tok.getString();
- bool unified_syntax;
- if (Mode == "unified" || Mode == "UNIFIED") {
- Parser.Lex();
- unified_syntax = true;
- }
- else if (Mode == "divided" || Mode == "DIVIDED") {
- Parser.Lex();
- unified_syntax = false;
- }
- else
- return Error(L, "unrecognized syntax mode in .syntax directive");
-
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(Parser.getTok().getLoc(), "unexpected token in directive");
- Parser.Lex();
-
- // TODO tell the MC streamer the mode
- // getParser().getStreamer().Emit???();
- return false;
-}
-
-/// ParseDirectiveCode
-/// ::= .code 16 | 32
-bool ARMAsmParser::ParseDirectiveCode(SMLoc L) {
- const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Integer))
- return Error(L, "unexpected token in .code directive");
- int64_t Val = Parser.getTok().getIntVal();
- bool thumb_mode;
- if (Val == 16) {
- Parser.Lex();
- thumb_mode = true;
- }
- else if (Val == 32) {
- Parser.Lex();
- thumb_mode = false;
- }
- else
- return Error(L, "invalid operand to .code directive");
-
- if (getLexer().isNot(AsmToken::EndOfStatement))
- return Error(Parser.getTok().getLoc(), "unexpected token in directive");
- Parser.Lex();
-
- // TODO tell the MC streamer the mode
- // getParser().getStreamer().Emit???();
- return false;
-}
-
-/// Force static initialization.
-extern "C" void LLVMInitializeARMAsmParser() {
- RegisterAsmParser<ARMAsmParser> X(TheARMTarget);
- RegisterAsmParser<ARMAsmParser> Y(TheThumbTarget);
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmParser/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/ARM/AsmParser/CMakeLists.txt
deleted file mode 100644
index 308c6cf..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmParser/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMARMAsmParser
- ARMAsmParser.cpp
- )
-
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmParser/Makefile b/libclamav/c++/llvm/lib/Target/ARM/AsmParser/Makefile
deleted file mode 100644
index 97e5612..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/ARM/AsmParser/Makefile -------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMARMAsmParser
-
-# Hack: we need to include 'main' ARM target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp b/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
deleted file mode 100644
index d6d595c..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
+++ /dev/null
@@ -1,1303 +0,0 @@
-//===-- ARMAsmPrinter.cpp - Print machine code to an ARM .s file ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to GAS-format ARM assembly language.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "ARM.h"
-#include "ARMBuildAttrs.h"
-#include "ARMAddressingModes.h"
-#include "ARMConstantPoolValue.h"
-#include "ARMInstPrinter.h"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMMCInstLower.h"
-#include "ARMTargetMachine.h"
-#include "llvm/Constants.h"
-#include "llvm/Module.h"
-#include "llvm/Type.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/DwarfWriter.h"
-#include "llvm/CodeGen/MachineModuleInfoImpls.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSet.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/MathExtras.h"
-#include <cctype>
-using namespace llvm;
-
-static cl::opt<bool>
-EnableMCInst("enable-arm-mcinst-printer", cl::Hidden,
- cl::desc("enable experimental asmprinter gunk in the arm backend"));
-
-namespace {
- class ARMAsmPrinter : public AsmPrinter {
-
- /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
- /// make the right decision when printing asm code for different targets.
- const ARMSubtarget *Subtarget;
-
- /// AFI - Keep a pointer to ARMFunctionInfo for the current
- /// MachineFunction.
- ARMFunctionInfo *AFI;
-
- /// MCP - Keep a pointer to constantpool entries of the current
- /// MachineFunction.
- const MachineConstantPool *MCP;
-
- public:
- explicit ARMAsmPrinter(formatted_raw_ostream &O, TargetMachine &TM,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *T)
- : AsmPrinter(O, TM, Ctx, Streamer, T), AFI(NULL), MCP(NULL) {
- Subtarget = &TM.getSubtarget<ARMSubtarget>();
- }
-
- virtual const char *getPassName() const {
- return "ARM Assembly Printer";
- }
-
- void printInstructionThroughMCStreamer(const MachineInstr *MI);
-
-
- void printOperand(const MachineInstr *MI, int OpNum,
- const char *Modifier = 0);
- void printSOImmOperand(const MachineInstr *MI, int OpNum);
- void printSOImm2PartOperand(const MachineInstr *MI, int OpNum);
- void printSORegOperand(const MachineInstr *MI, int OpNum);
- void printAddrMode2Operand(const MachineInstr *MI, int OpNum);
- void printAddrMode2OffsetOperand(const MachineInstr *MI, int OpNum);
- void printAddrMode3Operand(const MachineInstr *MI, int OpNum);
- void printAddrMode3OffsetOperand(const MachineInstr *MI, int OpNum);
- void printAddrMode4Operand(const MachineInstr *MI, int OpNum,
- const char *Modifier = 0);
- void printAddrMode5Operand(const MachineInstr *MI, int OpNum,
- const char *Modifier = 0);
- void printAddrMode6Operand(const MachineInstr *MI, int OpNum);
- void printAddrModePCOperand(const MachineInstr *MI, int OpNum,
- const char *Modifier = 0);
- void printBitfieldInvMaskImmOperand (const MachineInstr *MI, int OpNum);
-
- void printThumbS4ImmOperand(const MachineInstr *MI, int OpNum);
- void printThumbITMask(const MachineInstr *MI, int OpNum);
- void printThumbAddrModeRROperand(const MachineInstr *MI, int OpNum);
- void printThumbAddrModeRI5Operand(const MachineInstr *MI, int OpNum,
- unsigned Scale);
- void printThumbAddrModeS1Operand(const MachineInstr *MI, int OpNum);
- void printThumbAddrModeS2Operand(const MachineInstr *MI, int OpNum);
- void printThumbAddrModeS4Operand(const MachineInstr *MI, int OpNum);
- void printThumbAddrModeSPOperand(const MachineInstr *MI, int OpNum);
-
- void printT2SOOperand(const MachineInstr *MI, int OpNum);
- void printT2AddrModeImm12Operand(const MachineInstr *MI, int OpNum);
- void printT2AddrModeImm8Operand(const MachineInstr *MI, int OpNum);
- void printT2AddrModeImm8s4Operand(const MachineInstr *MI, int OpNum);
- void printT2AddrModeImm8OffsetOperand(const MachineInstr *MI, int OpNum);
- void printT2AddrModeSoRegOperand(const MachineInstr *MI, int OpNum);
-
- void printPredicateOperand(const MachineInstr *MI, int OpNum);
- void printMandatoryPredicateOperand(const MachineInstr *MI, int OpNum);
- void printSBitModifierOperand(const MachineInstr *MI, int OpNum);
- void printPCLabel(const MachineInstr *MI, int OpNum);
- void printRegisterList(const MachineInstr *MI, int OpNum);
- void printCPInstOperand(const MachineInstr *MI, int OpNum,
- const char *Modifier);
- void printJTBlockOperand(const MachineInstr *MI, int OpNum);
- void printJT2BlockOperand(const MachineInstr *MI, int OpNum);
- void printTBAddrMode(const MachineInstr *MI, int OpNum);
- void printNoHashImmediate(const MachineInstr *MI, int OpNum);
- void printVFPf32ImmOperand(const MachineInstr *MI, int OpNum);
- void printVFPf64ImmOperand(const MachineInstr *MI, int OpNum);
-
- void printHex8ImmOperand(const MachineInstr *MI, int OpNum) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xff);
- }
- void printHex16ImmOperand(const MachineInstr *MI, int OpNum) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffff);
- }
- void printHex32ImmOperand(const MachineInstr *MI, int OpNum) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffffffff);
- }
- void printHex64ImmOperand(const MachineInstr *MI, int OpNum) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm());
- }
-
- virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant, const char *ExtraCode);
- virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant,
- const char *ExtraCode);
-
- void printInstruction(const MachineInstr *MI); // autogenerated.
- static const char *getRegisterName(unsigned RegNo);
-
- virtual void EmitInstruction(const MachineInstr *MI);
- bool runOnMachineFunction(MachineFunction &F);
-
- virtual void EmitConstantPool() {} // we emit constant pools customly!
- virtual void EmitFunctionEntryLabel();
- void EmitStartOfAsmFile(Module &M);
- void EmitEndOfAsmFile(Module &M);
-
- MCSymbol *GetARMSetPICJumpTableLabel2(unsigned uid, unsigned uid2,
- const MachineBasicBlock *MBB) const;
- MCSymbol *GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const;
-
- /// EmitMachineConstantPoolValue - Print a machine constantpool value to
- /// the .s file.
- virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
- switch (TM.getTargetData()->getTypeAllocSize(MCPV->getType())) {
- case 1: O << MAI->getData8bitsDirective(0); break;
- case 2: O << MAI->getData16bitsDirective(0); break;
- case 4: O << MAI->getData32bitsDirective(0); break;
- default: assert(0 && "Unknown CPV size");
- }
-
- ARMConstantPoolValue *ACPV = static_cast<ARMConstantPoolValue*>(MCPV);
- SmallString<128> TmpNameStr;
-
- if (ACPV->isLSDA()) {
- raw_svector_ostream(TmpNameStr) << MAI->getPrivateGlobalPrefix() <<
- "_LSDA_" << getFunctionNumber();
- O << TmpNameStr.str();
- } else if (ACPV->isBlockAddress()) {
- O << GetBlockAddressSymbol(ACPV->getBlockAddress())->getName();
- } else if (ACPV->isGlobalValue()) {
- GlobalValue *GV = ACPV->getGV();
- bool isIndirect = Subtarget->isTargetDarwin() &&
- Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel());
- if (!isIndirect)
- O << *GetGlobalValueSymbol(GV);
- else {
- // FIXME: Remove this when Darwin transition to @GOT like syntax.
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- O << *Sym;
-
- MachineModuleInfoMachO &MMIMachO =
- MMI->getObjFileInfo<MachineModuleInfoMachO>();
- MCSymbol *&StubSym =
- GV->hasHiddenVisibility() ? MMIMachO.getHiddenGVStubEntry(Sym) :
- MMIMachO.getGVStubEntry(Sym);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
- }
- } else {
- assert(ACPV->isExtSymbol() && "unrecognized constant pool value");
- O << *GetExternalSymbolSymbol(ACPV->getSymbol());
- }
-
- if (ACPV->hasModifier()) O << "(" << ACPV->getModifier() << ")";
- if (ACPV->getPCAdjustment() != 0) {
- O << "-(" << MAI->getPrivateGlobalPrefix() << "PC"
- << getFunctionNumber() << "_" << ACPV->getLabelId()
- << "+" << (unsigned)ACPV->getPCAdjustment();
- if (ACPV->mustAddCurrentAddress())
- O << "-.";
- O << ')';
- }
- OutStreamer.AddBlankLine();
- }
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AsmPrinter::getAnalysisUsage(AU);
- AU.setPreservesAll();
- AU.addRequired<MachineModuleInfo>();
- AU.addRequired<DwarfWriter>();
- }
- };
-} // end of anonymous namespace
-
-#include "ARMGenAsmWriter.inc"
-
-void ARMAsmPrinter::EmitFunctionEntryLabel() {
- if (AFI->isThumbFunction()) {
- O << "\t.code\t16\n";
- O << "\t.thumb_func";
- if (Subtarget->isTargetDarwin())
- O << '\t' << *CurrentFnSym;
- O << '\n';
- }
-
- OutStreamer.EmitLabel(CurrentFnSym);
-}
-
-/// runOnMachineFunction - This uses the printInstruction()
-/// method to print assembly for each instruction.
-///
-bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
- AFI = MF.getInfo<ARMFunctionInfo>();
- MCP = MF.getConstantPool();
-
- return AsmPrinter::runOnMachineFunction(MF);
-}
-
-void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
- const char *Modifier) {
- const MachineOperand &MO = MI->getOperand(OpNum);
- unsigned TF = MO.getTargetFlags();
-
- switch (MO.getType()) {
- default:
- assert(0 && "<unknown operand type>");
- case MachineOperand::MO_Register: {
- unsigned Reg = MO.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
- if (Modifier && strcmp(Modifier, "dregpair") == 0) {
- unsigned DRegLo = TRI->getSubReg(Reg, 5); // arm_dsubreg_0
- unsigned DRegHi = TRI->getSubReg(Reg, 6); // arm_dsubreg_1
- O << '{'
- << getRegisterName(DRegLo) << ',' << getRegisterName(DRegHi)
- << '}';
- } else if (Modifier && strcmp(Modifier, "lane") == 0) {
- unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
- unsigned DReg = TRI->getMatchingSuperReg(Reg, RegNum & 1 ? 2 : 1,
- &ARM::DPR_VFP2RegClass);
- O << getRegisterName(DReg) << '[' << (RegNum & 1) << ']';
- } else {
- assert(!MO.getSubReg() && "Subregs should be eliminated!");
- O << getRegisterName(Reg);
- }
- break;
- }
- case MachineOperand::MO_Immediate: {
- int64_t Imm = MO.getImm();
- O << '#';
- if ((Modifier && strcmp(Modifier, "lo16") == 0) ||
- (TF & ARMII::MO_LO16))
- O << ":lower16:";
- else if ((Modifier && strcmp(Modifier, "hi16") == 0) ||
- (TF & ARMII::MO_HI16))
- O << ":upper16:";
- O << Imm;
- break;
- }
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol(OutContext);
- return;
- case MachineOperand::MO_GlobalAddress: {
- bool isCallOp = Modifier && !strcmp(Modifier, "call");
- GlobalValue *GV = MO.getGlobal();
-
- if ((Modifier && strcmp(Modifier, "lo16") == 0) ||
- (TF & ARMII::MO_LO16))
- O << ":lower16:";
- else if ((Modifier && strcmp(Modifier, "hi16") == 0) ||
- (TF & ARMII::MO_HI16))
- O << ":upper16:";
- O << *GetGlobalValueSymbol(GV);
-
- printOffset(MO.getOffset());
-
- if (isCallOp && Subtarget->isTargetELF() &&
- TM.getRelocationModel() == Reloc::PIC_)
- O << "(PLT)";
- break;
- }
- case MachineOperand::MO_ExternalSymbol: {
- bool isCallOp = Modifier && !strcmp(Modifier, "call");
- O << *GetExternalSymbolSymbol(MO.getSymbolName());
-
- if (isCallOp && Subtarget->isTargetELF() &&
- TM.getRelocationModel() == Reloc::PIC_)
- O << "(PLT)";
- break;
- }
- case MachineOperand::MO_ConstantPoolIndex:
- O << *GetCPISymbol(MO.getIndex());
- break;
- case MachineOperand::MO_JumpTableIndex:
- O << *GetJTISymbol(MO.getIndex());
- break;
- }
-}
-
-static void printSOImm(formatted_raw_ostream &O, int64_t V, bool VerboseAsm,
- const MCAsmInfo *MAI) {
- // Break it up into two parts that make up a shifter immediate.
- V = ARM_AM::getSOImmVal(V);
- assert(V != -1 && "Not a valid so_imm value!");
-
- unsigned Imm = ARM_AM::getSOImmValImm(V);
- unsigned Rot = ARM_AM::getSOImmValRot(V);
-
- // Print low-level immediate formation info, per
- // A5.1.3: "Data-processing operands - Immediate".
- if (Rot) {
- O << "#" << Imm << ", " << Rot;
- // Pretty printed version.
- if (VerboseAsm) {
- O.PadToColumn(MAI->getCommentColumn());
- O << MAI->getCommentString() << ' ';
- O << (int)ARM_AM::rotr32(Imm, Rot);
- }
- } else {
- O << "#" << Imm;
- }
-}
-
-/// printSOImmOperand - SOImm is 4-bit rotate amount in bits 8-11 with 8-bit
-/// immediate in bits 0-7.
-void ARMAsmPrinter::printSOImmOperand(const MachineInstr *MI, int OpNum) {
- const MachineOperand &MO = MI->getOperand(OpNum);
- assert(MO.isImm() && "Not a valid so_imm value!");
- printSOImm(O, MO.getImm(), VerboseAsm, MAI);
-}
-
-/// printSOImm2PartOperand - SOImm is broken into two pieces using a 'mov'
-/// followed by an 'orr' to materialize.
-void ARMAsmPrinter::printSOImm2PartOperand(const MachineInstr *MI, int OpNum) {
- const MachineOperand &MO = MI->getOperand(OpNum);
- assert(MO.isImm() && "Not a valid so_imm value!");
- unsigned V1 = ARM_AM::getSOImmTwoPartFirst(MO.getImm());
- unsigned V2 = ARM_AM::getSOImmTwoPartSecond(MO.getImm());
- printSOImm(O, V1, VerboseAsm, MAI);
- O << "\n\torr";
- printPredicateOperand(MI, 2);
- O << "\t";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 0);
- O << ", ";
- printSOImm(O, V2, VerboseAsm, MAI);
-}
-
-// so_reg is a 4-operand unit corresponding to register forms of the A5.1
-// "Addressing Mode 1 - Data-processing operands" forms. This includes:
-// REG 0 0 - e.g. R5
-// REG REG 0,SH_OPC - e.g. R5, ROR R3
-// REG 0 IMM,SH_OPC - e.g. R5, LSL #3
-void ARMAsmPrinter::printSORegOperand(const MachineInstr *MI, int Op) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- const MachineOperand &MO3 = MI->getOperand(Op+2);
-
- O << getRegisterName(MO1.getReg());
-
- // Print the shift opc.
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(MO3.getImm()))
- << " ";
-
- if (MO2.getReg()) {
- O << getRegisterName(MO2.getReg());
- assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0);
- } else {
- O << "#" << ARM_AM::getSORegOffset(MO3.getImm());
- }
-}
-
-void ARMAsmPrinter::printAddrMode2Operand(const MachineInstr *MI, int Op) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- const MachineOperand &MO3 = MI->getOperand(Op+2);
-
- if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
- printOperand(MI, Op);
- return;
- }
-
- O << "[" << getRegisterName(MO1.getReg());
-
- if (!MO2.getReg()) {
- if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
- O << ", #"
- << (char)ARM_AM::getAM2Op(MO3.getImm())
- << ARM_AM::getAM2Offset(MO3.getImm());
- O << "]";
- return;
- }
-
- O << ", "
- << (char)ARM_AM::getAM2Op(MO3.getImm())
- << getRegisterName(MO2.getReg());
-
- if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm()))
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO3.getImm()))
- << " #" << ShImm;
- O << "]";
-}
-
-void ARMAsmPrinter::printAddrMode2OffsetOperand(const MachineInstr *MI, int Op){
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
-
- if (!MO1.getReg()) {
- unsigned ImmOffs = ARM_AM::getAM2Offset(MO2.getImm());
- assert(ImmOffs && "Malformed indexed load / store!");
- O << "#"
- << (char)ARM_AM::getAM2Op(MO2.getImm())
- << ImmOffs;
- return;
- }
-
- O << (char)ARM_AM::getAM2Op(MO2.getImm())
- << getRegisterName(MO1.getReg());
-
- if (unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()))
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO2.getImm()))
- << " #" << ShImm;
-}
-
-void ARMAsmPrinter::printAddrMode3Operand(const MachineInstr *MI, int Op) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- const MachineOperand &MO3 = MI->getOperand(Op+2);
-
- assert(TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
- O << "[" << getRegisterName(MO1.getReg());
-
- if (MO2.getReg()) {
- O << ", "
- << (char)ARM_AM::getAM3Op(MO3.getImm())
- << getRegisterName(MO2.getReg())
- << "]";
- return;
- }
-
- if (unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()))
- O << ", #"
- << (char)ARM_AM::getAM3Op(MO3.getImm())
- << ImmOffs;
- O << "]";
-}
-
-void ARMAsmPrinter::printAddrMode3OffsetOperand(const MachineInstr *MI, int Op){
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
-
- if (MO1.getReg()) {
- O << (char)ARM_AM::getAM3Op(MO2.getImm())
- << getRegisterName(MO1.getReg());
- return;
- }
-
- unsigned ImmOffs = ARM_AM::getAM3Offset(MO2.getImm());
- assert(ImmOffs && "Malformed indexed load / store!");
- O << "#"
- << (char)ARM_AM::getAM3Op(MO2.getImm())
- << ImmOffs;
-}
-
-void ARMAsmPrinter::printAddrMode4Operand(const MachineInstr *MI, int Op,
- const char *Modifier) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MO2.getImm());
- if (Modifier && strcmp(Modifier, "submode") == 0) {
- if (MO1.getReg() == ARM::SP) {
- // FIXME
- bool isLDM = (MI->getOpcode() == ARM::LDM ||
- MI->getOpcode() == ARM::LDM_RET ||
- MI->getOpcode() == ARM::t2LDM ||
- MI->getOpcode() == ARM::t2LDM_RET);
- O << ARM_AM::getAMSubModeAltStr(Mode, isLDM);
- } else
- O << ARM_AM::getAMSubModeStr(Mode);
- } else if (Modifier && strcmp(Modifier, "wide") == 0) {
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MO2.getImm());
- if (Mode == ARM_AM::ia)
- O << ".w";
- } else {
- printOperand(MI, Op);
- if (ARM_AM::getAM4WBFlag(MO2.getImm()))
- O << "!";
- }
-}
-
-void ARMAsmPrinter::printAddrMode5Operand(const MachineInstr *MI, int Op,
- const char *Modifier) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
-
- if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
- printOperand(MI, Op);
- return;
- }
-
- assert(TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
-
- if (Modifier && strcmp(Modifier, "submode") == 0) {
- ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MO2.getImm());
- O << ARM_AM::getAMSubModeStr(Mode);
- return;
- } else if (Modifier && strcmp(Modifier, "base") == 0) {
- // Used for FSTM{D|S} and LSTM{D|S} operations.
- O << getRegisterName(MO1.getReg());
- if (ARM_AM::getAM5WBFlag(MO2.getImm()))
- O << "!";
- return;
- }
-
- O << "[" << getRegisterName(MO1.getReg());
-
- if (unsigned ImmOffs = ARM_AM::getAM5Offset(MO2.getImm())) {
- O << ", #"
- << (char)ARM_AM::getAM5Op(MO2.getImm())
- << ImmOffs*4;
- }
- O << "]";
-}
-
-void ARMAsmPrinter::printAddrMode6Operand(const MachineInstr *MI, int Op) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- const MachineOperand &MO3 = MI->getOperand(Op+2);
- const MachineOperand &MO4 = MI->getOperand(Op+3);
-
- O << "[" << getRegisterName(MO1.getReg());
- if (MO4.getImm()) {
- // FIXME: Both darwin as and GNU as violate ARM docs here.
- O << ", :" << MO4.getImm();
- }
- O << "]";
-
- if (ARM_AM::getAM6WBFlag(MO3.getImm())) {
- if (MO2.getReg() == 0)
- O << "!";
- else
- O << ", " << getRegisterName(MO2.getReg());
- }
-}
-
-void ARMAsmPrinter::printAddrModePCOperand(const MachineInstr *MI, int Op,
- const char *Modifier) {
- if (Modifier && strcmp(Modifier, "label") == 0) {
- printPCLabel(MI, Op+1);
- return;
- }
-
- const MachineOperand &MO1 = MI->getOperand(Op);
- assert(TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
- O << "[pc, +" << getRegisterName(MO1.getReg()) << "]";
-}
-
-void
-ARMAsmPrinter::printBitfieldInvMaskImmOperand(const MachineInstr *MI, int Op) {
- const MachineOperand &MO = MI->getOperand(Op);
- uint32_t v = ~MO.getImm();
- int32_t lsb = CountTrailingZeros_32(v);
- int32_t width = (32 - CountLeadingZeros_32 (v)) - lsb;
- assert(MO.isImm() && "Not a valid bf_inv_mask_imm value!");
- O << "#" << lsb << ", #" << width;
-}
-
-//===--------------------------------------------------------------------===//
-
-void ARMAsmPrinter::printThumbS4ImmOperand(const MachineInstr *MI, int Op) {
- O << "#" << MI->getOperand(Op).getImm() * 4;
-}
-
-void
-ARMAsmPrinter::printThumbITMask(const MachineInstr *MI, int Op) {
- // (3 - the number of trailing zeros) is the number of then / else.
- unsigned Mask = MI->getOperand(Op).getImm();
- unsigned NumTZ = CountTrailingZeros_32(Mask);
- assert(NumTZ <= 3 && "Invalid IT mask!");
- for (unsigned Pos = 3, e = NumTZ; Pos > e; --Pos) {
- bool T = (Mask & (1 << Pos)) == 0;
- if (T)
- O << 't';
- else
- O << 'e';
- }
-}
-
-void
-ARMAsmPrinter::printThumbAddrModeRROperand(const MachineInstr *MI, int Op) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- O << "[" << getRegisterName(MO1.getReg());
- O << ", " << getRegisterName(MO2.getReg()) << "]";
-}
-
-void
-ARMAsmPrinter::printThumbAddrModeRI5Operand(const MachineInstr *MI, int Op,
- unsigned Scale) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- const MachineOperand &MO3 = MI->getOperand(Op+2);
-
- if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
- printOperand(MI, Op);
- return;
- }
-
- O << "[" << getRegisterName(MO1.getReg());
- if (MO3.getReg())
- O << ", " << getRegisterName(MO3.getReg());
- else if (unsigned ImmOffs = MO2.getImm())
- O << ", #+" << ImmOffs * Scale;
- O << "]";
-}
-
-void
-ARMAsmPrinter::printThumbAddrModeS1Operand(const MachineInstr *MI, int Op) {
- printThumbAddrModeRI5Operand(MI, Op, 1);
-}
-void
-ARMAsmPrinter::printThumbAddrModeS2Operand(const MachineInstr *MI, int Op) {
- printThumbAddrModeRI5Operand(MI, Op, 2);
-}
-void
-ARMAsmPrinter::printThumbAddrModeS4Operand(const MachineInstr *MI, int Op) {
- printThumbAddrModeRI5Operand(MI, Op, 4);
-}
-
-void ARMAsmPrinter::printThumbAddrModeSPOperand(const MachineInstr *MI,int Op) {
- const MachineOperand &MO1 = MI->getOperand(Op);
- const MachineOperand &MO2 = MI->getOperand(Op+1);
- O << "[" << getRegisterName(MO1.getReg());
- if (unsigned ImmOffs = MO2.getImm())
- O << ", #+" << ImmOffs*4;
- O << "]";
-}
-
-//===--------------------------------------------------------------------===//
-
-// Constant shifts t2_so_reg is a 2-operand unit corresponding to the Thumb2
-// register with shift forms.
-// REG 0 0 - e.g. R5
-// REG IMM, SH_OPC - e.g. R5, LSL #3
-void ARMAsmPrinter::printT2SOOperand(const MachineInstr *MI, int OpNum) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
-
- unsigned Reg = MO1.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(Reg));
- O << getRegisterName(Reg);
-
- // Print the shift opc.
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(MO2.getImm()))
- << " ";
-
- assert(MO2.isImm() && "Not a valid t2_so_reg value!");
- O << "#" << ARM_AM::getSORegOffset(MO2.getImm());
-}
-
-void ARMAsmPrinter::printT2AddrModeImm12Operand(const MachineInstr *MI,
- int OpNum) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
-
- O << "[" << getRegisterName(MO1.getReg());
-
- unsigned OffImm = MO2.getImm();
- if (OffImm) // Don't print +0.
- O << ", #+" << OffImm;
- O << "]";
-}
-
-void ARMAsmPrinter::printT2AddrModeImm8Operand(const MachineInstr *MI,
- int OpNum) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
-
- O << "[" << getRegisterName(MO1.getReg());
-
- int32_t OffImm = (int32_t)MO2.getImm();
- // Don't print +0.
- if (OffImm < 0)
- O << ", #-" << -OffImm;
- else if (OffImm > 0)
- O << ", #+" << OffImm;
- O << "]";
-}
-
-void ARMAsmPrinter::printT2AddrModeImm8s4Operand(const MachineInstr *MI,
- int OpNum) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
-
- O << "[" << getRegisterName(MO1.getReg());
-
- int32_t OffImm = (int32_t)MO2.getImm() / 4;
- // Don't print +0.
- if (OffImm < 0)
- O << ", #-" << -OffImm * 4;
- else if (OffImm > 0)
- O << ", #+" << OffImm * 4;
- O << "]";
-}
-
-void ARMAsmPrinter::printT2AddrModeImm8OffsetOperand(const MachineInstr *MI,
- int OpNum) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- int32_t OffImm = (int32_t)MO1.getImm();
- // Don't print +0.
- if (OffImm < 0)
- O << "#-" << -OffImm;
- else if (OffImm > 0)
- O << "#+" << OffImm;
-}
-
-void ARMAsmPrinter::printT2AddrModeSoRegOperand(const MachineInstr *MI,
- int OpNum) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1);
- const MachineOperand &MO3 = MI->getOperand(OpNum+2);
-
- O << "[" << getRegisterName(MO1.getReg());
-
- assert(MO2.getReg() && "Invalid so_reg load / store address!");
- O << ", " << getRegisterName(MO2.getReg());
-
- unsigned ShAmt = MO3.getImm();
- if (ShAmt) {
- assert(ShAmt <= 3 && "Not a valid Thumb2 addressing mode!");
- O << ", lsl #" << ShAmt;
- }
- O << "]";
-}
-
-
-//===--------------------------------------------------------------------===//
-
-void ARMAsmPrinter::printPredicateOperand(const MachineInstr *MI, int OpNum) {
- ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(OpNum).getImm();
- if (CC != ARMCC::AL)
- O << ARMCondCodeToString(CC);
-}
-
-void ARMAsmPrinter::printMandatoryPredicateOperand(const MachineInstr *MI,
- int OpNum) {
- ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(OpNum).getImm();
- O << ARMCondCodeToString(CC);
-}
-
-void ARMAsmPrinter::printSBitModifierOperand(const MachineInstr *MI, int OpNum){
- unsigned Reg = MI->getOperand(OpNum).getReg();
- if (Reg) {
- assert(Reg == ARM::CPSR && "Expect ARM CPSR register!");
- O << 's';
- }
-}
-
-void ARMAsmPrinter::printPCLabel(const MachineInstr *MI, int OpNum) {
- int Id = (int)MI->getOperand(OpNum).getImm();
- O << MAI->getPrivateGlobalPrefix()
- << "PC" << getFunctionNumber() << "_" << Id;
-}
-
-void ARMAsmPrinter::printRegisterList(const MachineInstr *MI, int OpNum) {
- O << "{";
- // Always skip the first operand, it's the optional (and implicit writeback).
- for (unsigned i = OpNum+1, e = MI->getNumOperands(); i != e; ++i) {
- if (MI->getOperand(i).isImplicit())
- continue;
- if ((int)i != OpNum+1) O << ", ";
- printOperand(MI, i);
- }
- O << "}";
-}
-
-void ARMAsmPrinter::printCPInstOperand(const MachineInstr *MI, int OpNum,
- const char *Modifier) {
- assert(Modifier && "This operand only works with a modifier!");
- // There are two aspects to a CONSTANTPOOL_ENTRY operand, the label and the
- // data itself.
- if (!strcmp(Modifier, "label")) {
- unsigned ID = MI->getOperand(OpNum).getImm();
- OutStreamer.EmitLabel(GetCPISymbol(ID));
- } else {
- assert(!strcmp(Modifier, "cpentry") && "Unknown modifier for CPE");
- unsigned CPI = MI->getOperand(OpNum).getIndex();
-
- const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
-
- if (MCPE.isMachineConstantPoolEntry()) {
- EmitMachineConstantPoolValue(MCPE.Val.MachineCPVal);
- } else {
- EmitGlobalConstant(MCPE.Val.ConstVal);
- }
- }
-}
-
-MCSymbol *ARMAsmPrinter::
-GetARMSetPICJumpTableLabel2(unsigned uid, unsigned uid2,
- const MachineBasicBlock *MBB) const {
- SmallString<60> Name;
- raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix()
- << getFunctionNumber() << '_' << uid << '_' << uid2
- << "_set_" << MBB->getNumber();
- return OutContext.GetOrCreateSymbol(Name.str());
-}
-
-MCSymbol *ARMAsmPrinter::
-GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const {
- SmallString<60> Name;
- raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix() << "JTI"
- << getFunctionNumber() << '_' << uid << '_' << uid2;
- return OutContext.GetOrCreateSymbol(Name.str());
-}
-
-void ARMAsmPrinter::printJTBlockOperand(const MachineInstr *MI, int OpNum) {
- assert(!Subtarget->isThumb2() && "Thumb2 should use double-jump jumptables!");
-
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1); // Unique Id
-
- unsigned JTI = MO1.getIndex();
- MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel2(JTI, MO2.getImm());
- OutStreamer.EmitLabel(JTISymbol);
-
- const char *JTEntryDirective = MAI->getData32bitsDirective();
-
- const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
- bool UseSet= MAI->hasSetDirective() && TM.getRelocationModel() == Reloc::PIC_;
- SmallPtrSet<MachineBasicBlock*, 8> JTSets;
- for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
- MachineBasicBlock *MBB = JTBBs[i];
- bool isNew = JTSets.insert(MBB);
-
- if (UseSet && isNew) {
- O << "\t.set\t"
- << *GetARMSetPICJumpTableLabel2(JTI, MO2.getImm(), MBB) << ','
- << *MBB->getSymbol(OutContext) << '-' << *JTISymbol << '\n';
- }
-
- O << JTEntryDirective << ' ';
- if (UseSet)
- O << *GetARMSetPICJumpTableLabel2(JTI, MO2.getImm(), MBB);
- else if (TM.getRelocationModel() == Reloc::PIC_)
- O << *MBB->getSymbol(OutContext) << '-' << *JTISymbol;
- else
- O << *MBB->getSymbol(OutContext);
-
- if (i != e-1)
- O << '\n';
- }
-}
-
-void ARMAsmPrinter::printJT2BlockOperand(const MachineInstr *MI, int OpNum) {
- const MachineOperand &MO1 = MI->getOperand(OpNum);
- const MachineOperand &MO2 = MI->getOperand(OpNum+1); // Unique Id
- unsigned JTI = MO1.getIndex();
-
- MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel2(JTI, MO2.getImm());
- OutStreamer.EmitLabel(JTISymbol);
-
- const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
- bool ByteOffset = false, HalfWordOffset = false;
- if (MI->getOpcode() == ARM::t2TBB)
- ByteOffset = true;
- else if (MI->getOpcode() == ARM::t2TBH)
- HalfWordOffset = true;
-
- for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
- MachineBasicBlock *MBB = JTBBs[i];
- if (ByteOffset)
- O << MAI->getData8bitsDirective();
- else if (HalfWordOffset)
- O << MAI->getData16bitsDirective();
-
- if (ByteOffset || HalfWordOffset)
- O << '(' << *MBB->getSymbol(OutContext) << "-" << *JTISymbol << ")/2";
- else
- O << "\tb.w " << *MBB->getSymbol(OutContext);
-
- if (i != e-1)
- O << '\n';
- }
-
- // Make sure the instruction that follows TBB is 2-byte aligned.
- // FIXME: Constant island pass should insert an "ALIGN" instruction instead.
- if (ByteOffset && (JTBBs.size() & 1)) {
- O << '\n';
- EmitAlignment(1);
- }
-}
-
-void ARMAsmPrinter::printTBAddrMode(const MachineInstr *MI, int OpNum) {
- O << "[pc, " << getRegisterName(MI->getOperand(OpNum).getReg());
- if (MI->getOpcode() == ARM::t2TBH)
- O << ", lsl #1";
- O << ']';
-}
-
-void ARMAsmPrinter::printNoHashImmediate(const MachineInstr *MI, int OpNum) {
- O << MI->getOperand(OpNum).getImm();
-}
-
-void ARMAsmPrinter::printVFPf32ImmOperand(const MachineInstr *MI, int OpNum) {
- const ConstantFP *FP = MI->getOperand(OpNum).getFPImm();
- O << '#' << FP->getValueAPF().convertToFloat();
- if (VerboseAsm) {
- O.PadToColumn(MAI->getCommentColumn());
- O << MAI->getCommentString() << ' ';
- WriteAsOperand(O, FP, /*PrintType=*/false);
- }
-}
-
-void ARMAsmPrinter::printVFPf64ImmOperand(const MachineInstr *MI, int OpNum) {
- const ConstantFP *FP = MI->getOperand(OpNum).getFPImm();
- O << '#' << FP->getValueAPF().convertToDouble();
- if (VerboseAsm) {
- O.PadToColumn(MAI->getCommentColumn());
- O << MAI->getCommentString() << ' ';
- WriteAsOperand(O, FP, /*PrintType=*/false);
- }
-}
-
-bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
- unsigned AsmVariant, const char *ExtraCode){
- // Does this asm operand have a single letter operand modifier?
- if (ExtraCode && ExtraCode[0]) {
- if (ExtraCode[1] != 0) return true; // Unknown modifier.
-
- switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
- case 'a': // Print as a memory address.
- if (MI->getOperand(OpNum).isReg()) {
- O << "[" << getRegisterName(MI->getOperand(OpNum).getReg()) << "]";
- return false;
- }
- // Fallthrough
- case 'c': // Don't print "#" before an immediate operand.
- if (!MI->getOperand(OpNum).isImm())
- return true;
- printNoHashImmediate(MI, OpNum);
- return false;
- case 'P': // Print a VFP double precision register.
- case 'q': // Print a NEON quad precision register.
- printOperand(MI, OpNum);
- return false;
- case 'Q':
- if (TM.getTargetData()->isLittleEndian())
- break;
- // Fallthrough
- case 'R':
- if (TM.getTargetData()->isBigEndian())
- break;
- // Fallthrough
- case 'H': // Write second word of DI / DF reference.
- // Verify that this operand has two consecutive registers.
- if (!MI->getOperand(OpNum).isReg() ||
- OpNum+1 == MI->getNumOperands() ||
- !MI->getOperand(OpNum+1).isReg())
- return true;
- ++OpNum; // Return the high-part.
- }
- }
-
- printOperand(MI, OpNum);
- return false;
-}
-
-bool ARMAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
- unsigned OpNum, unsigned AsmVariant,
- const char *ExtraCode) {
- if (ExtraCode && ExtraCode[0])
- return true; // Unknown modifier.
-
- const MachineOperand &MO = MI->getOperand(OpNum);
- assert(MO.isReg() && "unexpected inline asm memory operand");
- O << "[" << getRegisterName(MO.getReg()) << "]";
- return false;
-}
-
-void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- if (EnableMCInst) {
- printInstructionThroughMCStreamer(MI);
- } else {
- int Opc = MI->getOpcode();
- if (Opc == ARM::CONSTPOOL_ENTRY)
- EmitAlignment(2);
-
- printInstruction(MI);
- OutStreamer.AddBlankLine();
- }
-}
-
-void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
- if (Subtarget->isTargetDarwin()) {
- Reloc::Model RelocM = TM.getRelocationModel();
- if (RelocM == Reloc::PIC_ || RelocM == Reloc::DynamicNoPIC) {
- // Declare all the text sections up front (before the DWARF sections
- // emitted by AsmPrinter::doInitialization) so the assembler will keep
- // them together at the beginning of the object file. This helps
- // avoid out-of-range branches that are due a fundamental limitation of
- // the way symbol offsets are encoded with the current Darwin ARM
- // relocations.
- TargetLoweringObjectFileMachO &TLOFMacho =
- static_cast<TargetLoweringObjectFileMachO &>(getObjFileLowering());
- OutStreamer.SwitchSection(TLOFMacho.getTextSection());
- OutStreamer.SwitchSection(TLOFMacho.getTextCoalSection());
- OutStreamer.SwitchSection(TLOFMacho.getConstTextCoalSection());
- if (RelocM == Reloc::DynamicNoPIC) {
- const MCSection *sect =
- TLOFMacho.getMachOSection("__TEXT", "__symbol_stub4",
- MCSectionMachO::S_SYMBOL_STUBS,
- 12, SectionKind::getText());
- OutStreamer.SwitchSection(sect);
- } else {
- const MCSection *sect =
- TLOFMacho.getMachOSection("__TEXT", "__picsymbolstub4",
- MCSectionMachO::S_SYMBOL_STUBS,
- 16, SectionKind::getText());
- OutStreamer.SwitchSection(sect);
- }
- }
- }
-
- // Use unified assembler syntax.
- O << "\t.syntax unified\n";
-
- // Emit ARM Build Attributes
- if (Subtarget->isTargetELF()) {
- // CPU Type
- std::string CPUString = Subtarget->getCPUString();
- if (CPUString != "generic")
- O << "\t.cpu " << CPUString << '\n';
-
- // FIXME: Emit FPU type
- if (Subtarget->hasVFP2())
- O << "\t.eabi_attribute " << ARMBuildAttrs::VFP_arch << ", 2\n";
-
- // Signal various FP modes.
- if (!UnsafeFPMath)
- O << "\t.eabi_attribute " << ARMBuildAttrs::ABI_FP_denormal << ", 1\n"
- << "\t.eabi_attribute " << ARMBuildAttrs::ABI_FP_exceptions << ", 1\n";
-
- if (FiniteOnlyFPMath())
- O << "\t.eabi_attribute " << ARMBuildAttrs::ABI_FP_number_model << ", 1\n";
- else
- O << "\t.eabi_attribute " << ARMBuildAttrs::ABI_FP_number_model << ", 3\n";
-
- // 8-bytes alignment stuff.
- O << "\t.eabi_attribute " << ARMBuildAttrs::ABI_align8_needed << ", 1\n"
- << "\t.eabi_attribute " << ARMBuildAttrs::ABI_align8_preserved << ", 1\n";
-
- // Hard float. Use both S and D registers and conform to AAPCS-VFP.
- if (Subtarget->isAAPCS_ABI() && FloatABIType == FloatABI::Hard)
- O << "\t.eabi_attribute " << ARMBuildAttrs::ABI_HardFP_use << ", 3\n"
- << "\t.eabi_attribute " << ARMBuildAttrs::ABI_VFP_args << ", 1\n";
-
- // FIXME: Should we signal R9 usage?
- }
-}
-
-
-void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetDarwin()) {
- // All darwin targets use mach-o.
- TargetLoweringObjectFileMachO &TLOFMacho =
- static_cast<TargetLoweringObjectFileMachO &>(getObjFileLowering());
- MachineModuleInfoMachO &MMIMacho =
- MMI->getObjFileInfo<MachineModuleInfoMachO>();
-
- O << '\n';
-
- // Output non-lazy-pointers for external and common global variables.
- MachineModuleInfoMachO::SymbolListTy Stubs = MMIMacho.GetGVStubList();
-
- if (!Stubs.empty()) {
- // Switch with ".non_lazy_symbol_pointer" directive.
- OutStreamer.SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
- EmitAlignment(2);
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- O << *Stubs[i].first << ":\n\t.indirect_symbol ";
- O << *Stubs[i].second << "\n\t.long\t0\n";
- }
- }
-
- Stubs = MMIMacho.GetHiddenGVStubList();
- if (!Stubs.empty()) {
- OutStreamer.SwitchSection(getObjFileLowering().getDataSection());
- EmitAlignment(2);
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i)
- O << *Stubs[i].first << ":\n\t.long " << *Stubs[i].second << "\n";
- }
-
- // Funny Darwin hack: This flag tells the linker that no global symbols
- // contain code that falls through to other global symbols (e.g. the obvious
- // implementation of multiple entry points). If this doesn't occur, the
- // linker can safely perform dead code stripping. Since LLVM never
- // generates code that does this, it is always safe to set.
- OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
- }
-}
-
-//===----------------------------------------------------------------------===//
-
-void ARMAsmPrinter::printInstructionThroughMCStreamer(const MachineInstr *MI) {
- ARMMCInstLower MCInstLowering(OutContext, *Mang, *this);
- switch (MI->getOpcode()) {
- case ARM::t2MOVi32imm:
- assert(0 && "Should be lowered by thumb2it pass");
- default: break;
- case ARM::PICADD: { // FIXME: Remove asm string from td file.
- // This is a pseudo op for a label + instruction sequence, which looks like:
- // LPC0:
- // add r0, pc, r0
- // This adds the address of LPC0 to r0.
-
- // Emit the label.
- // FIXME: MOVE TO SHARED PLACE.
- unsigned Id = (unsigned)MI->getOperand(2).getImm();
- const char *Prefix = MAI->getPrivateGlobalPrefix();
- MCSymbol *Label =OutContext.GetOrCreateSymbol(Twine(Prefix)
- + "PC" + Twine(getFunctionNumber()) + "_" + Twine(Id));
- OutStreamer.EmitLabel(Label);
-
-
- // Form and emit tha dd.
- MCInst AddInst;
- AddInst.setOpcode(ARM::ADDrr);
- AddInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
- AddInst.addOperand(MCOperand::CreateReg(ARM::PC));
- AddInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg()));
- OutStreamer.EmitInstruction(AddInst);
- return;
- }
- case ARM::CONSTPOOL_ENTRY: { // FIXME: Remove asm string from td file.
- /// CONSTPOOL_ENTRY - This instruction represents a floating constant pool
- /// in the function. The first operand is the ID# for this instruction, the
- /// second is the index into the MachineConstantPool that this is, the third
- /// is the size in bytes of this constant pool entry.
- unsigned LabelId = (unsigned)MI->getOperand(0).getImm();
- unsigned CPIdx = (unsigned)MI->getOperand(1).getIndex();
-
- EmitAlignment(2);
- OutStreamer.EmitLabel(GetCPISymbol(LabelId));
-
- const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPIdx];
- if (MCPE.isMachineConstantPoolEntry())
- EmitMachineConstantPoolValue(MCPE.Val.MachineCPVal);
- else
- EmitGlobalConstant(MCPE.Val.ConstVal);
-
- return;
- }
- case ARM::MOVi2pieces: { // FIXME: Remove asmstring from td file.
- // This is a hack that lowers as a two instruction sequence.
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned ImmVal = (unsigned)MI->getOperand(1).getImm();
-
- unsigned SOImmValV1 = ARM_AM::getSOImmTwoPartFirst(ImmVal);
- unsigned SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal);
-
- {
- MCInst TmpInst;
- TmpInst.setOpcode(ARM::MOVi);
- TmpInst.addOperand(MCOperand::CreateReg(DstReg));
- TmpInst.addOperand(MCOperand::CreateImm(SOImmValV1));
-
- // Predicate.
- TmpInst.addOperand(MCOperand::CreateImm(MI->getOperand(2).getImm()));
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(3).getReg()));
-
- TmpInst.addOperand(MCOperand::CreateReg(0)); // cc_out
- OutStreamer.EmitInstruction(TmpInst);
- }
-
- {
- MCInst TmpInst;
- TmpInst.setOpcode(ARM::ORRri);
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // dstreg
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // inreg
- TmpInst.addOperand(MCOperand::CreateImm(SOImmValV2)); // so_imm
- // Predicate.
- TmpInst.addOperand(MCOperand::CreateImm(MI->getOperand(2).getImm()));
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(3).getReg()));
-
- TmpInst.addOperand(MCOperand::CreateReg(0)); // cc_out
- OutStreamer.EmitInstruction(TmpInst);
- }
- return;
- }
- case ARM::MOVi32imm: { // FIXME: Remove asmstring from td file.
- // This is a hack that lowers as a two instruction sequence.
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned ImmVal = (unsigned)MI->getOperand(1).getImm();
-
- {
- MCInst TmpInst;
- TmpInst.setOpcode(ARM::MOVi16);
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // dstreg
- TmpInst.addOperand(MCOperand::CreateImm(ImmVal & 65535)); // lower16(imm)
-
- // Predicate.
- TmpInst.addOperand(MCOperand::CreateImm(MI->getOperand(2).getImm()));
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(3).getReg()));
-
- OutStreamer.EmitInstruction(TmpInst);
- }
-
- {
- MCInst TmpInst;
- TmpInst.setOpcode(ARM::MOVTi16);
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // dstreg
- TmpInst.addOperand(MCOperand::CreateReg(DstReg)); // srcreg
- TmpInst.addOperand(MCOperand::CreateImm(ImmVal >> 16)); // upper16(imm)
-
- // Predicate.
- TmpInst.addOperand(MCOperand::CreateImm(MI->getOperand(2).getImm()));
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(3).getReg()));
-
- OutStreamer.EmitInstruction(TmpInst);
- }
-
- return;
- }
- }
-
- MCInst TmpInst;
- MCInstLowering.Lower(MI, TmpInst);
- OutStreamer.EmitInstruction(TmpInst);
-}
-
-//===----------------------------------------------------------------------===//
-// Target Registry Stuff
-//===----------------------------------------------------------------------===//
-
-static MCInstPrinter *createARMMCInstPrinter(const Target &T,
- unsigned SyntaxVariant,
- const MCAsmInfo &MAI,
- raw_ostream &O) {
- if (SyntaxVariant == 0)
- return new ARMInstPrinter(O, MAI, false);
- return 0;
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializeARMAsmPrinter() {
- RegisterAsmPrinter<ARMAsmPrinter> X(TheARMTarget);
- RegisterAsmPrinter<ARMAsmPrinter> Y(TheThumbTarget);
-
- TargetRegistry::RegisterMCInstPrinter(TheARMTarget, createARMMCInstPrinter);
- TargetRegistry::RegisterMCInstPrinter(TheThumbTarget, createARMMCInstPrinter);
-}
-
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp b/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
deleted file mode 100644
index a2084b0..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
+++ /dev/null
@@ -1,362 +0,0 @@
-//===-- ARMInstPrinter.cpp - Convert ARM MCInst to assembly syntax --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints an ARM MCInst to a .s file.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "ARM.h" // FIXME: FACTOR ENUMS BETTER.
-#include "ARMInstPrinter.h"
-#include "ARMAddressingModes.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-// Include the auto-generated portion of the assembly writer.
-#define MachineInstr MCInst
-#define ARMAsmPrinter ARMInstPrinter // FIXME: REMOVE.
-#include "ARMGenAsmWriter.inc"
-#undef MachineInstr
-#undef ARMAsmPrinter
-
-void ARMInstPrinter::printInst(const MCInst *MI) { printInstruction(MI); }
-
-void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
- const char *Modifier) {
- const MCOperand &Op = MI->getOperand(OpNo);
- if (Op.isReg()) {
- unsigned Reg = Op.getReg();
- if (Modifier && strcmp(Modifier, "dregpair") == 0) {
- // FIXME: Breaks e.g. ARM/vmul.ll.
- assert(0);
- /*
- unsigned DRegLo = TRI->getSubReg(Reg, 5); // arm_dsubreg_0
- unsigned DRegHi = TRI->getSubReg(Reg, 6); // arm_dsubreg_1
- O << '{'
- << getRegisterName(DRegLo) << ',' << getRegisterName(DRegHi)
- << '}';*/
- } else if (Modifier && strcmp(Modifier, "lane") == 0) {
- assert(0);
- /*
- unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
- unsigned DReg = TRI->getMatchingSuperReg(Reg, RegNum & 1 ? 2 : 1,
- &ARM::DPR_VFP2RegClass);
- O << getRegisterName(DReg) << '[' << (RegNum & 1) << ']';
- */
- } else {
- O << getRegisterName(Reg);
- }
- } else if (Op.isImm()) {
- assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
- O << '#' << Op.getImm();
- } else {
- assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
- assert(Op.isExpr() && "unknown operand kind in printOperand");
- O << *Op.getExpr();
- }
-}
-
-static void printSOImm(raw_ostream &O, int64_t V, bool VerboseAsm,
- const MCAsmInfo *MAI) {
- // Break it up into two parts that make up a shifter immediate.
- V = ARM_AM::getSOImmVal(V);
- assert(V != -1 && "Not a valid so_imm value!");
-
- unsigned Imm = ARM_AM::getSOImmValImm(V);
- unsigned Rot = ARM_AM::getSOImmValRot(V);
-
- // Print low-level immediate formation info, per
- // A5.1.3: "Data-processing operands - Immediate".
- if (Rot) {
- O << "#" << Imm << ", " << Rot;
- // Pretty printed version.
- if (VerboseAsm)
- O << ' ' << MAI->getCommentString()
- << ' ' << (int)ARM_AM::rotr32(Imm, Rot);
- } else {
- O << "#" << Imm;
- }
-}
-
-
-/// printSOImmOperand - SOImm is 4-bit rotate amount in bits 8-11 with 8-bit
-/// immediate in bits 0-7.
-void ARMInstPrinter::printSOImmOperand(const MCInst *MI, unsigned OpNum) {
- const MCOperand &MO = MI->getOperand(OpNum);
- assert(MO.isImm() && "Not a valid so_imm value!");
- printSOImm(O, MO.getImm(), VerboseAsm, &MAI);
-}
-
-/// printSOImm2PartOperand - SOImm is broken into two pieces using a 'mov'
-/// followed by an 'orr' to materialize.
-void ARMInstPrinter::printSOImm2PartOperand(const MCInst *MI, unsigned OpNum) {
- // FIXME: REMOVE this method.
- abort();
-}
-
-// so_reg is a 4-operand unit corresponding to register forms of the A5.1
-// "Addressing Mode 1 - Data-processing operands" forms. This includes:
-// REG 0 0 - e.g. R5
-// REG REG 0,SH_OPC - e.g. R5, ROR R3
-// REG 0 IMM,SH_OPC - e.g. R5, LSL #3
-void ARMInstPrinter::printSORegOperand(const MCInst *MI, unsigned OpNum) {
- const MCOperand &MO1 = MI->getOperand(OpNum);
- const MCOperand &MO2 = MI->getOperand(OpNum+1);
- const MCOperand &MO3 = MI->getOperand(OpNum+2);
-
- O << getRegisterName(MO1.getReg());
-
- // Print the shift opc.
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(MO3.getImm()))
- << ' ';
-
- if (MO2.getReg()) {
- O << getRegisterName(MO2.getReg());
- assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0);
- } else {
- O << "#" << ARM_AM::getSORegOffset(MO3.getImm());
- }
-}
-
-
-void ARMInstPrinter::printAddrMode2Operand(const MCInst *MI, unsigned Op) {
- const MCOperand &MO1 = MI->getOperand(Op);
- const MCOperand &MO2 = MI->getOperand(Op+1);
- const MCOperand &MO3 = MI->getOperand(Op+2);
-
- if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
- printOperand(MI, Op);
- return;
- }
-
- O << "[" << getRegisterName(MO1.getReg());
-
- if (!MO2.getReg()) {
- if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
- O << ", #"
- << (char)ARM_AM::getAM2Op(MO3.getImm())
- << ARM_AM::getAM2Offset(MO3.getImm());
- O << "]";
- return;
- }
-
- O << ", "
- << (char)ARM_AM::getAM2Op(MO3.getImm())
- << getRegisterName(MO2.getReg());
-
- if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm()))
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO3.getImm()))
- << " #" << ShImm;
- O << "]";
-}
-
-void ARMInstPrinter::printAddrMode2OffsetOperand(const MCInst *MI,
- unsigned OpNum) {
- const MCOperand &MO1 = MI->getOperand(OpNum);
- const MCOperand &MO2 = MI->getOperand(OpNum+1);
-
- if (!MO1.getReg()) {
- unsigned ImmOffs = ARM_AM::getAM2Offset(MO2.getImm());
- assert(ImmOffs && "Malformed indexed load / store!");
- O << '#' << (char)ARM_AM::getAM2Op(MO2.getImm()) << ImmOffs;
- return;
- }
-
- O << (char)ARM_AM::getAM2Op(MO2.getImm()) << getRegisterName(MO1.getReg());
-
- if (unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()))
- O << ", "
- << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO2.getImm()))
- << " #" << ShImm;
-}
-
-void ARMInstPrinter::printAddrMode3Operand(const MCInst *MI, unsigned OpNum) {
- const MCOperand &MO1 = MI->getOperand(OpNum);
- const MCOperand &MO2 = MI->getOperand(OpNum+1);
- const MCOperand &MO3 = MI->getOperand(OpNum+2);
-
- O << '[' << getRegisterName(MO1.getReg());
-
- if (MO2.getReg()) {
- O << ", " << (char)ARM_AM::getAM3Op(MO3.getImm())
- << getRegisterName(MO2.getReg()) << ']';
- return;
- }
-
- if (unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()))
- O << ", #"
- << (char)ARM_AM::getAM3Op(MO3.getImm())
- << ImmOffs;
- O << ']';
-}
-
-void ARMInstPrinter::printAddrMode3OffsetOperand(const MCInst *MI,
- unsigned OpNum) {
- const MCOperand &MO1 = MI->getOperand(OpNum);
- const MCOperand &MO2 = MI->getOperand(OpNum+1);
-
- if (MO1.getReg()) {
- O << (char)ARM_AM::getAM3Op(MO2.getImm())
- << getRegisterName(MO1.getReg());
- return;
- }
-
- unsigned ImmOffs = ARM_AM::getAM3Offset(MO2.getImm());
- assert(ImmOffs && "Malformed indexed load / store!");
- O << "#"
- << (char)ARM_AM::getAM3Op(MO2.getImm())
- << ImmOffs;
-}
-
-
-void ARMInstPrinter::printAddrMode4Operand(const MCInst *MI, unsigned OpNum,
- const char *Modifier) {
- const MCOperand &MO1 = MI->getOperand(OpNum);
- const MCOperand &MO2 = MI->getOperand(OpNum+1);
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MO2.getImm());
- if (Modifier && strcmp(Modifier, "submode") == 0) {
- if (MO1.getReg() == ARM::SP) {
- // FIXME
- bool isLDM = (MI->getOpcode() == ARM::LDM ||
- MI->getOpcode() == ARM::LDM_RET ||
- MI->getOpcode() == ARM::t2LDM ||
- MI->getOpcode() == ARM::t2LDM_RET);
- O << ARM_AM::getAMSubModeAltStr(Mode, isLDM);
- } else
- O << ARM_AM::getAMSubModeStr(Mode);
- } else if (Modifier && strcmp(Modifier, "wide") == 0) {
- ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MO2.getImm());
- if (Mode == ARM_AM::ia)
- O << ".w";
- } else {
- printOperand(MI, OpNum);
- if (ARM_AM::getAM4WBFlag(MO2.getImm()))
- O << "!";
- }
-}
-
-void ARMInstPrinter::printAddrMode5Operand(const MCInst *MI, unsigned OpNum,
- const char *Modifier) {
- const MCOperand &MO1 = MI->getOperand(OpNum);
- const MCOperand &MO2 = MI->getOperand(OpNum+1);
-
- if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
- printOperand(MI, OpNum);
- return;
- }
-
- if (Modifier && strcmp(Modifier, "submode") == 0) {
- ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MO2.getImm());
- O << ARM_AM::getAMSubModeStr(Mode);
- return;
- } else if (Modifier && strcmp(Modifier, "base") == 0) {
- // Used for FSTM{D|S} and LSTM{D|S} operations.
- O << getRegisterName(MO1.getReg());
- if (ARM_AM::getAM5WBFlag(MO2.getImm()))
- O << "!";
- return;
- }
-
- O << "[" << getRegisterName(MO1.getReg());
-
- if (unsigned ImmOffs = ARM_AM::getAM5Offset(MO2.getImm())) {
- O << ", #"
- << (char)ARM_AM::getAM5Op(MO2.getImm())
- << ImmOffs*4;
- }
- O << "]";
-}
-
-void ARMInstPrinter::printAddrMode6Operand(const MCInst *MI, unsigned OpNum) {
- const MCOperand &MO1 = MI->getOperand(OpNum);
- const MCOperand &MO2 = MI->getOperand(OpNum+1);
- const MCOperand &MO3 = MI->getOperand(OpNum+2);
-
- // FIXME: No support yet for specifying alignment.
- O << '[' << getRegisterName(MO1.getReg()) << ']';
-
- if (ARM_AM::getAM6WBFlag(MO3.getImm())) {
- if (MO2.getReg() == 0)
- O << '!';
- else
- O << ", " << getRegisterName(MO2.getReg());
- }
-}
-
-void ARMInstPrinter::printAddrModePCOperand(const MCInst *MI, unsigned OpNum,
- const char *Modifier) {
- assert(0 && "FIXME: Implement printAddrModePCOperand");
-}
-
-void ARMInstPrinter::printBitfieldInvMaskImmOperand (const MCInst *MI,
- unsigned OpNum) {
- const MCOperand &MO = MI->getOperand(OpNum);
- uint32_t v = ~MO.getImm();
- int32_t lsb = CountTrailingZeros_32(v);
- int32_t width = (32 - CountLeadingZeros_32 (v)) - lsb;
- assert(MO.isImm() && "Not a valid bf_inv_mask_imm value!");
- O << '#' << lsb << ", #" << width;
-}
-
-void ARMInstPrinter::printRegisterList(const MCInst *MI, unsigned OpNum) {
- O << "{";
- // Always skip the first operand, it's the optional (and implicit writeback).
- for (unsigned i = OpNum+1, e = MI->getNumOperands(); i != e; ++i) {
- if (i != OpNum+1) O << ", ";
- O << getRegisterName(MI->getOperand(i).getReg());
- }
- O << "}";
-}
-
-void ARMInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNum) {
- ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(OpNum).getImm();
- if (CC != ARMCC::AL)
- O << ARMCondCodeToString(CC);
-}
-
-void ARMInstPrinter::printMandatoryPredicateOperand(const MCInst *MI,
- unsigned OpNum) {
- ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(OpNum).getImm();
- O << ARMCondCodeToString(CC);
-}
-
-void ARMInstPrinter::printSBitModifierOperand(const MCInst *MI, unsigned OpNum){
- if (MI->getOperand(OpNum).getReg()) {
- assert(MI->getOperand(OpNum).getReg() == ARM::CPSR &&
- "Expect ARM CPSR register!");
- O << 's';
- }
-}
-
-
-
-void ARMInstPrinter::printCPInstOperand(const MCInst *MI, unsigned OpNum,
- const char *Modifier) {
- // FIXME: remove this.
- abort();
-}
-
-void ARMInstPrinter::printNoHashImmediate(const MCInst *MI, unsigned OpNum) {
- O << MI->getOperand(OpNum).getImm();
-}
-
-
-void ARMInstPrinter::printPCLabel(const MCInst *MI, unsigned OpNum) {
- // FIXME: remove this.
- abort();
-}
-
-void ARMInstPrinter::printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum) {
- O << "#" << MI->getOperand(OpNum).getImm() * 4;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h b/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
deleted file mode 100644
index b7964c9..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
+++ /dev/null
@@ -1,97 +0,0 @@
-//===-- ARMInstPrinter.h - Convert ARM MCInst to assembly syntax ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints an ARM MCInst to a .s file.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARMINSTPRINTER_H
-#define ARMINSTPRINTER_H
-
-#include "llvm/MC/MCInstPrinter.h"
-
-namespace llvm {
- class MCOperand;
-
-class ARMInstPrinter : public MCInstPrinter {
- bool VerboseAsm;
-public:
- ARMInstPrinter(raw_ostream &O, const MCAsmInfo &MAI, bool verboseAsm)
- : MCInstPrinter(O, MAI), VerboseAsm(verboseAsm) {}
-
- virtual void printInst(const MCInst *MI);
-
- // Autogenerated by tblgen.
- void printInstruction(const MCInst *MI);
- static const char *getRegisterName(unsigned RegNo);
-
-
- void printOperand(const MCInst *MI, unsigned OpNo,
- const char *Modifier = 0);
-
- void printSOImmOperand(const MCInst *MI, unsigned OpNum);
- void printSOImm2PartOperand(const MCInst *MI, unsigned OpNum);
-
- void printSORegOperand(const MCInst *MI, unsigned OpNum);
- void printAddrMode2Operand(const MCInst *MI, unsigned OpNum);
- void printAddrMode2OffsetOperand(const MCInst *MI, unsigned OpNum);
- void printAddrMode3Operand(const MCInst *MI, unsigned OpNum);
- void printAddrMode3OffsetOperand(const MCInst *MI, unsigned OpNum);
- void printAddrMode4Operand(const MCInst *MI, unsigned OpNum,
- const char *Modifier = 0);
- void printAddrMode5Operand(const MCInst *MI, unsigned OpNum,
- const char *Modifier = 0);
- void printAddrMode6Operand(const MCInst *MI, unsigned OpNum);
- void printAddrModePCOperand(const MCInst *MI, unsigned OpNum,
- const char *Modifier = 0);
-
- void printBitfieldInvMaskImmOperand(const MCInst *MI, unsigned OpNum);
-
- void printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum);
- void printThumbITMask(const MCInst *MI, unsigned OpNum) {}
- void printThumbAddrModeRROperand(const MCInst *MI, unsigned OpNum) {}
- void printThumbAddrModeRI5Operand(const MCInst *MI, unsigned OpNum,
- unsigned Scale) {}
- void printThumbAddrModeS1Operand(const MCInst *MI, unsigned OpNum) {}
- void printThumbAddrModeS2Operand(const MCInst *MI, unsigned OpNum) {}
- void printThumbAddrModeS4Operand(const MCInst *MI, unsigned OpNum) {}
- void printThumbAddrModeSPOperand(const MCInst *MI, unsigned OpNum) {}
-
- void printT2SOOperand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeImm12Operand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeImm8Operand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeImm8s4Operand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeImm8OffsetOperand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeSoRegOperand(const MCInst *MI, unsigned OpNum) {}
-
- void printPredicateOperand(const MCInst *MI, unsigned OpNum);
- void printMandatoryPredicateOperand(const MCInst *MI, unsigned OpNum);
- void printSBitModifierOperand(const MCInst *MI, unsigned OpNum);
- void printRegisterList(const MCInst *MI, unsigned OpNum);
- void printCPInstOperand(const MCInst *MI, unsigned OpNum,
- const char *Modifier);
- void printJTBlockOperand(const MCInst *MI, unsigned OpNum) {}
- void printJT2BlockOperand(const MCInst *MI, unsigned OpNum) {}
- void printTBAddrMode(const MCInst *MI, unsigned OpNum) {}
- void printNoHashImmediate(const MCInst *MI, unsigned OpNum);
- void printVFPf32ImmOperand(const MCInst *MI, int OpNum) {}
- void printVFPf64ImmOperand(const MCInst *MI, int OpNum) {}
- void printHex8ImmOperand(const MCInst *MI, int OpNum) {}
- void printHex16ImmOperand(const MCInst *MI, int OpNum) {}
- void printHex32ImmOperand(const MCInst *MI, int OpNum) {}
- void printHex64ImmOperand(const MCInst *MI, int OpNum) {}
-
- void printPCLabel(const MCInst *MI, unsigned OpNum);
- // FIXME: Implement.
- void PrintSpecial(const MCInst *MI, const char *Kind) {}
-};
-
-}
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp b/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp
deleted file mode 100644
index 1b2dd48..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.cpp
+++ /dev/null
@@ -1,161 +0,0 @@
-//===-- ARMMCInstLower.cpp - Convert ARM MachineInstr to an MCInst --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains code to lower ARM MachineInstrs to their corresponding
-// MCInst records.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARMMCInstLower.h"
-//#include "llvm/CodeGen/MachineModuleInfoImpls.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-//#include "llvm/MC/MCStreamer.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/SmallString.h"
-using namespace llvm;
-
-
-#if 0
-const ARMSubtarget &ARMMCInstLower::getSubtarget() const {
- return AsmPrinter.getSubtarget();
-}
-
-MachineModuleInfoMachO &ARMMCInstLower::getMachOMMI() const {
- assert(getSubtarget().isTargetDarwin() &&"Can only get MachO info on darwin");
- return AsmPrinter.MMI->getObjFileInfo<MachineModuleInfoMachO>();
-}
-#endif
-
-MCSymbol *ARMMCInstLower::
-GetGlobalAddressSymbol(const MachineOperand &MO) const {
- // FIXME: HANDLE PLT references how??
- switch (MO.getTargetFlags()) {
- default: assert(0 && "Unknown target flag on GV operand");
- case 0: break;
- }
-
- return Printer.GetGlobalValueSymbol(MO.getGlobal());
-}
-
-MCSymbol *ARMMCInstLower::
-GetExternalSymbolSymbol(const MachineOperand &MO) const {
- // FIXME: HANDLE PLT references how??
- switch (MO.getTargetFlags()) {
- default: assert(0 && "Unknown target flag on GV operand");
- case 0: break;
- }
-
- return Printer.GetExternalSymbolSymbol(MO.getSymbolName());
-}
-
-
-
-MCSymbol *ARMMCInstLower::
-GetJumpTableSymbol(const MachineOperand &MO) const {
- SmallString<256> Name;
- raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "JTI"
- << Printer.getFunctionNumber() << '_' << MO.getIndex();
-
-#if 0
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- }
-#endif
-
- // Create a symbol for the name.
- return Ctx.GetOrCreateSymbol(Name.str());
-}
-
-MCSymbol *ARMMCInstLower::
-GetConstantPoolIndexSymbol(const MachineOperand &MO) const {
- SmallString<256> Name;
- raw_svector_ostream(Name) << Printer.MAI->getPrivateGlobalPrefix() << "CPI"
- << Printer.getFunctionNumber() << '_' << MO.getIndex();
-
-#if 0
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- }
-#endif
-
- // Create a symbol for the name.
- return Ctx.GetOrCreateSymbol(Name.str());
-}
-
-MCOperand ARMMCInstLower::
-LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const {
- // FIXME: We would like an efficient form for this, so we don't have to do a
- // lot of extra uniquing.
- const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, Ctx);
-
-#if 0
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- }
-#endif
-
- if (!MO.isJTI() && MO.getOffset())
- Expr = MCBinaryExpr::CreateAdd(Expr,
- MCConstantExpr::Create(MO.getOffset(), Ctx),
- Ctx);
- return MCOperand::CreateExpr(Expr);
-}
-
-
-void ARMMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
- OutMI.setOpcode(MI->getOpcode());
-
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
-
- MCOperand MCOp;
- switch (MO.getType()) {
- default:
- MI->dump();
- assert(0 && "unknown operand type");
- case MachineOperand::MO_Register:
- // Ignore all implicit register operands.
- if (MO.isImplicit()) continue;
- assert(!MO.getSubReg() && "Subregs should be eliminated!");
- MCOp = MCOperand::CreateReg(MO.getReg());
- break;
- case MachineOperand::MO_Immediate:
- MCOp = MCOperand::CreateImm(MO.getImm());
- break;
- case MachineOperand::MO_MachineBasicBlock:
- MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
- MO.getMBB()->getSymbol(Ctx), Ctx));
- break;
- case MachineOperand::MO_GlobalAddress:
- MCOp = LowerSymbolOperand(MO, GetGlobalAddressSymbol(MO));
- break;
- case MachineOperand::MO_ExternalSymbol:
- MCOp = LowerSymbolOperand(MO, GetExternalSymbolSymbol(MO));
- break;
- case MachineOperand::MO_JumpTableIndex:
- MCOp = LowerSymbolOperand(MO, GetJumpTableSymbol(MO));
- break;
- case MachineOperand::MO_ConstantPoolIndex:
- MCOp = LowerSymbolOperand(MO, GetConstantPoolIndexSymbol(MO));
- break;
- case MachineOperand::MO_BlockAddress:
- MCOp = LowerSymbolOperand(MO, Printer.GetBlockAddressSymbol(
- MO.getBlockAddress()));
- break;
- }
-
- OutMI.addOperand(MCOp);
- }
-
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.h b/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.h
deleted file mode 100644
index 383d30d..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/ARMMCInstLower.h
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- ARMMCInstLower.h - Lower MachineInstr to MCInst -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef ARM_MCINSTLOWER_H
-#define ARM_MCINSTLOWER_H
-
-#include "llvm/Support/Compiler.h"
-
-namespace llvm {
- class AsmPrinter;
- class MCAsmInfo;
- class MCContext;
- class MCInst;
- class MCOperand;
- class MCSymbol;
- class MachineInstr;
- class MachineModuleInfoMachO;
- class MachineOperand;
- class Mangler;
- //class ARMSubtarget;
-
-/// ARMMCInstLower - This class is used to lower an MachineInstr into an MCInst.
-class VISIBILITY_HIDDEN ARMMCInstLower {
- MCContext &Ctx;
- Mangler &Mang;
- AsmPrinter &Printer;
-
- //const ARMSubtarget &getSubtarget() const;
-public:
- ARMMCInstLower(MCContext &ctx, Mangler &mang, AsmPrinter &printer)
- : Ctx(ctx), Mang(mang), Printer(printer) {}
-
- void Lower(const MachineInstr *MI, MCInst &OutMI) const;
-
- //MCSymbol *GetPICBaseSymbol() const;
- MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
- MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const;
- MCSymbol *GetJumpTableSymbol(const MachineOperand &MO) const;
- MCSymbol *GetConstantPoolIndexSymbol(const MachineOperand &MO) const;
- MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
-
-/*
-private:
- MachineModuleInfoMachO &getMachOMMI() const;
- */
-};
-
-}
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/CMakeLists.txt
deleted file mode 100644
index 4e299f8..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMARMAsmPrinter
- ARMAsmPrinter.cpp
- ARMInstPrinter.cpp
- ARMMCInstLower.cpp
- )
-add_dependencies(LLVMARMAsmPrinter ARMCodeGenTable_gen)
diff --git a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/Makefile b/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/Makefile
deleted file mode 100644
index 208becc..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/AsmPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/ARM/AsmPrinter/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMARMAsmPrinter
-
-# Hack: we need to include 'main' arm target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/Target/ARM/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/ARM/CMakeLists.txt
deleted file mode 100644
index 964551f..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/CMakeLists.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-set(LLVM_TARGET_DEFINITIONS ARM.td)
-
-tablegen(ARMGenRegisterInfo.h.inc -gen-register-desc-header)
-tablegen(ARMGenRegisterNames.inc -gen-register-enums)
-tablegen(ARMGenRegisterInfo.inc -gen-register-desc)
-tablegen(ARMGenInstrNames.inc -gen-instr-enums)
-tablegen(ARMGenInstrInfo.inc -gen-instr-desc)
-tablegen(ARMGenCodeEmitter.inc -gen-emitter)
-tablegen(ARMGenAsmWriter.inc -gen-asm-writer)
-tablegen(ARMGenDAGISel.inc -gen-dag-isel)
-tablegen(ARMGenCallingConv.inc -gen-callingconv)
-tablegen(ARMGenSubtarget.inc -gen-subtarget)
-
-add_llvm_target(ARMCodeGen
- ARMBaseInstrInfo.cpp
- ARMBaseRegisterInfo.cpp
- ARMCodeEmitter.cpp
- ARMConstantIslandPass.cpp
- ARMConstantPoolValue.cpp
- ARMExpandPseudoInsts.cpp
- ARMISelDAGToDAG.cpp
- ARMISelLowering.cpp
- ARMInstrInfo.cpp
- ARMJITInfo.cpp
- ARMLoadStoreOptimizer.cpp
- ARMMCAsmInfo.cpp
- ARMRegisterInfo.cpp
- ARMSubtarget.cpp
- ARMTargetMachine.cpp
- NEONMoveFix.cpp
- NEONPreAllocPass.cpp
- Thumb1InstrInfo.cpp
- Thumb1RegisterInfo.cpp
- Thumb2ITBlockPass.cpp
- Thumb2InstrInfo.cpp
- Thumb2RegisterInfo.cpp
- Thumb2SizeReduction.cpp
- )
-
-target_link_libraries (LLVMARMCodeGen LLVMSelectionDAG)
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Makefile b/libclamav/c++/llvm/lib/Target/ARM/Makefile
deleted file mode 100644
index a8dd38c..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- lib/Target/ARM/Makefile -----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMARMCodeGen
-TARGET = ARM
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = ARMGenRegisterInfo.h.inc ARMGenRegisterNames.inc \
- ARMGenRegisterInfo.inc ARMGenInstrNames.inc \
- ARMGenInstrInfo.inc ARMGenAsmWriter.inc \
- ARMGenDAGISel.inc ARMGenSubtarget.inc \
- ARMGenCodeEmitter.inc ARMGenCallingConv.inc
-
-DIRS = AsmPrinter AsmParser TargetInfo
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/Target/ARM/NEONMoveFix.cpp b/libclamav/c++/llvm/lib/Target/ARM/NEONMoveFix.cpp
deleted file mode 100644
index 3c0414d..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/NEONMoveFix.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-//===-- NEONMoveFix.cpp - Convert vfp reg-reg moves into neon ---*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "neon-mov-fix"
-#include "ARM.h"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMInstrInfo.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-STATISTIC(NumVMovs, "Number of reg-reg moves converted");
-
-namespace {
- struct NEONMoveFixPass : public MachineFunctionPass {
- static char ID;
- NEONMoveFixPass() : MachineFunctionPass(&ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &Fn);
-
- virtual const char *getPassName() const {
- return "NEON reg-reg move conversion";
- }
-
- private:
- const TargetRegisterInfo *TRI;
- const ARMBaseInstrInfo *TII;
-
- typedef DenseMap<unsigned, const MachineInstr*> RegMap;
-
- bool InsertMoves(MachineBasicBlock &MBB);
- };
- char NEONMoveFixPass::ID = 0;
-}
-
-bool NEONMoveFixPass::InsertMoves(MachineBasicBlock &MBB) {
- RegMap Defs;
- bool Modified = false;
-
- // Walk over MBB tracking the def points of the registers.
- MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
- MachineBasicBlock::iterator NextMII;
- for (; MII != E; MII = NextMII) {
- NextMII = llvm::next(MII);
- MachineInstr *MI = &*MII;
-
- if (MI->getOpcode() == ARM::VMOVD &&
- !TII->isPredicated(MI)) {
- unsigned SrcReg = MI->getOperand(1).getReg();
- // If we do not find an instruction defining the reg, this means the
- // register should be live-in for this BB. It's always to better to use
- // NEON reg-reg moves.
- unsigned Domain = ARMII::DomainNEON;
- RegMap::iterator DefMI = Defs.find(SrcReg);
- if (DefMI != Defs.end()) {
- Domain = DefMI->second->getDesc().TSFlags & ARMII::DomainMask;
- // Instructions in general domain are subreg accesses.
- // Map them to NEON reg-reg moves.
- if (Domain == ARMII::DomainGeneral)
- Domain = ARMII::DomainNEON;
- }
-
- if (Domain & ARMII::DomainNEON) {
- // Convert VMOVD to VMOVDneon
- unsigned DestReg = MI->getOperand(0).getReg();
-
- DEBUG({errs() << "vmov convert: "; MI->dump();});
-
- // It's safe to ignore imp-defs / imp-uses here, since:
- // - We're running late, no intelligent condegen passes should be run
- // afterwards
- // - The imp-defs / imp-uses are superregs only, we don't care about
- // them.
- AddDefaultPred(BuildMI(MBB, *MI, MI->getDebugLoc(),
- TII->get(ARM::VMOVDneon), DestReg).addReg(SrcReg));
- MBB.erase(MI);
- MachineBasicBlock::iterator I = prior(NextMII);
- MI = &*I;
-
- DEBUG({errs() << " into: "; MI->dump();});
-
- Modified = true;
- ++NumVMovs;
- } else {
- assert((Domain & ARMII::DomainVFP) && "Invalid domain!");
- // Do nothing.
- }
- }
-
- // Update def information.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand& MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef())
- continue;
- unsigned MOReg = MO.getReg();
-
- Defs[MOReg] = MI;
- // Catch subregs as well.
- for (const unsigned *R = TRI->getSubRegisters(MOReg); *R; ++R)
- Defs[*R] = MI;
- }
- }
-
- return Modified;
-}
-
-bool NEONMoveFixPass::runOnMachineFunction(MachineFunction &Fn) {
- ARMFunctionInfo *AFI = Fn.getInfo<ARMFunctionInfo>();
- const TargetMachine &TM = Fn.getTarget();
-
- if (AFI->isThumbFunction())
- return false;
-
- TRI = TM.getRegisterInfo();
- TII = static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo());
-
- bool Modified = false;
- for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
- ++MFI) {
- MachineBasicBlock &MBB = *MFI;
- Modified |= InsertMoves(MBB);
- }
-
- return Modified;
-}
-
-/// createNEONMoveFixPass - Returns an instance of the NEON reg-reg moves fix
-/// pass.
-FunctionPass *llvm::createNEONMoveFixPass() {
- return new NEONMoveFixPass();
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/NEONPreAllocPass.cpp b/libclamav/c++/llvm/lib/Target/ARM/NEONPreAllocPass.cpp
deleted file mode 100644
index d9942c8..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/NEONPreAllocPass.cpp
+++ /dev/null
@@ -1,394 +0,0 @@
-//===-- NEONPreAllocPass.cpp - Allocate adjacent NEON registers--*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "neon-prealloc"
-#include "ARM.h"
-#include "ARMInstrInfo.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-using namespace llvm;
-
-namespace {
- class NEONPreAllocPass : public MachineFunctionPass {
- const TargetInstrInfo *TII;
-
- public:
- static char ID;
- NEONPreAllocPass() : MachineFunctionPass(&ID) {}
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "NEON register pre-allocation pass";
- }
-
- private:
- bool PreAllocNEONRegisters(MachineBasicBlock &MBB);
- };
-
- char NEONPreAllocPass::ID = 0;
-}
-
-static bool isNEONMultiRegOp(int Opcode, unsigned &FirstOpnd, unsigned &NumRegs,
- unsigned &Offset, unsigned &Stride) {
- // Default to unit stride with no offset.
- Stride = 1;
- Offset = 0;
-
- switch (Opcode) {
- default:
- break;
-
- case ARM::VLD2d8:
- case ARM::VLD2d16:
- case ARM::VLD2d32:
- case ARM::VLD2d64:
- case ARM::VLD2LNd8:
- case ARM::VLD2LNd16:
- case ARM::VLD2LNd32:
- FirstOpnd = 0;
- NumRegs = 2;
- return true;
-
- case ARM::VLD2q8:
- case ARM::VLD2q16:
- case ARM::VLD2q32:
- FirstOpnd = 0;
- NumRegs = 4;
- return true;
-
- case ARM::VLD2LNq16a:
- case ARM::VLD2LNq32a:
- FirstOpnd = 0;
- NumRegs = 2;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VLD2LNq16b:
- case ARM::VLD2LNq32b:
- FirstOpnd = 0;
- NumRegs = 2;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VLD3d8:
- case ARM::VLD3d16:
- case ARM::VLD3d32:
- case ARM::VLD3d64:
- case ARM::VLD3LNd8:
- case ARM::VLD3LNd16:
- case ARM::VLD3LNd32:
- FirstOpnd = 0;
- NumRegs = 3;
- return true;
-
- case ARM::VLD3q8a:
- case ARM::VLD3q16a:
- case ARM::VLD3q32a:
- FirstOpnd = 0;
- NumRegs = 3;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VLD3q8b:
- case ARM::VLD3q16b:
- case ARM::VLD3q32b:
- FirstOpnd = 0;
- NumRegs = 3;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VLD3LNq16a:
- case ARM::VLD3LNq32a:
- FirstOpnd = 0;
- NumRegs = 3;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VLD3LNq16b:
- case ARM::VLD3LNq32b:
- FirstOpnd = 0;
- NumRegs = 3;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VLD4d8:
- case ARM::VLD4d16:
- case ARM::VLD4d32:
- case ARM::VLD4d64:
- case ARM::VLD4LNd8:
- case ARM::VLD4LNd16:
- case ARM::VLD4LNd32:
- FirstOpnd = 0;
- NumRegs = 4;
- return true;
-
- case ARM::VLD4q8a:
- case ARM::VLD4q16a:
- case ARM::VLD4q32a:
- FirstOpnd = 0;
- NumRegs = 4;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VLD4q8b:
- case ARM::VLD4q16b:
- case ARM::VLD4q32b:
- FirstOpnd = 0;
- NumRegs = 4;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VLD4LNq16a:
- case ARM::VLD4LNq32a:
- FirstOpnd = 0;
- NumRegs = 4;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VLD4LNq16b:
- case ARM::VLD4LNq32b:
- FirstOpnd = 0;
- NumRegs = 4;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VST2d8:
- case ARM::VST2d16:
- case ARM::VST2d32:
- case ARM::VST2d64:
- case ARM::VST2LNd8:
- case ARM::VST2LNd16:
- case ARM::VST2LNd32:
- FirstOpnd = 4;
- NumRegs = 2;
- return true;
-
- case ARM::VST2q8:
- case ARM::VST2q16:
- case ARM::VST2q32:
- FirstOpnd = 4;
- NumRegs = 4;
- return true;
-
- case ARM::VST2LNq16a:
- case ARM::VST2LNq32a:
- FirstOpnd = 4;
- NumRegs = 2;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VST2LNq16b:
- case ARM::VST2LNq32b:
- FirstOpnd = 4;
- NumRegs = 2;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VST3d8:
- case ARM::VST3d16:
- case ARM::VST3d32:
- case ARM::VST3d64:
- case ARM::VST3LNd8:
- case ARM::VST3LNd16:
- case ARM::VST3LNd32:
- FirstOpnd = 4;
- NumRegs = 3;
- return true;
-
- case ARM::VST3q8a:
- case ARM::VST3q16a:
- case ARM::VST3q32a:
- FirstOpnd = 5;
- NumRegs = 3;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VST3q8b:
- case ARM::VST3q16b:
- case ARM::VST3q32b:
- FirstOpnd = 5;
- NumRegs = 3;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VST3LNq16a:
- case ARM::VST3LNq32a:
- FirstOpnd = 4;
- NumRegs = 3;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VST3LNq16b:
- case ARM::VST3LNq32b:
- FirstOpnd = 4;
- NumRegs = 3;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VST4d8:
- case ARM::VST4d16:
- case ARM::VST4d32:
- case ARM::VST4d64:
- case ARM::VST4LNd8:
- case ARM::VST4LNd16:
- case ARM::VST4LNd32:
- FirstOpnd = 4;
- NumRegs = 4;
- return true;
-
- case ARM::VST4q8a:
- case ARM::VST4q16a:
- case ARM::VST4q32a:
- FirstOpnd = 5;
- NumRegs = 4;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VST4q8b:
- case ARM::VST4q16b:
- case ARM::VST4q32b:
- FirstOpnd = 5;
- NumRegs = 4;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VST4LNq16a:
- case ARM::VST4LNq32a:
- FirstOpnd = 4;
- NumRegs = 4;
- Offset = 0;
- Stride = 2;
- return true;
-
- case ARM::VST4LNq16b:
- case ARM::VST4LNq32b:
- FirstOpnd = 4;
- NumRegs = 4;
- Offset = 1;
- Stride = 2;
- return true;
-
- case ARM::VTBL2:
- FirstOpnd = 1;
- NumRegs = 2;
- return true;
-
- case ARM::VTBL3:
- FirstOpnd = 1;
- NumRegs = 3;
- return true;
-
- case ARM::VTBL4:
- FirstOpnd = 1;
- NumRegs = 4;
- return true;
-
- case ARM::VTBX2:
- FirstOpnd = 2;
- NumRegs = 2;
- return true;
-
- case ARM::VTBX3:
- FirstOpnd = 2;
- NumRegs = 3;
- return true;
-
- case ARM::VTBX4:
- FirstOpnd = 2;
- NumRegs = 4;
- return true;
- }
-
- return false;
-}
-
-bool NEONPreAllocPass::PreAllocNEONRegisters(MachineBasicBlock &MBB) {
- bool Modified = false;
-
- MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
- for (; MBBI != E; ++MBBI) {
- MachineInstr *MI = &*MBBI;
- unsigned FirstOpnd, NumRegs, Offset, Stride;
- if (!isNEONMultiRegOp(MI->getOpcode(), FirstOpnd, NumRegs, Offset, Stride))
- continue;
-
- MachineBasicBlock::iterator NextI = llvm::next(MBBI);
- for (unsigned R = 0; R < NumRegs; ++R) {
- MachineOperand &MO = MI->getOperand(FirstOpnd + R);
- assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand");
- unsigned VirtReg = MO.getReg();
- assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "expected a virtual register");
-
- // For now, just assign a fixed set of adjacent registers.
- // This leaves plenty of room for future improvements.
- static const unsigned NEONDRegs[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3,
- ARM::D4, ARM::D5, ARM::D6, ARM::D7
- };
- MO.setReg(NEONDRegs[Offset + R * Stride]);
-
- if (MO.isUse()) {
- // Insert a copy from VirtReg.
- TII->copyRegToReg(MBB, MBBI, MO.getReg(), VirtReg,
- ARM::DPRRegisterClass, ARM::DPRRegisterClass);
- if (MO.isKill()) {
- MachineInstr *CopyMI = prior(MBBI);
- CopyMI->findRegisterUseOperand(VirtReg)->setIsKill();
- }
- MO.setIsKill();
- } else if (MO.isDef() && !MO.isDead()) {
- // Add a copy to VirtReg.
- TII->copyRegToReg(MBB, NextI, VirtReg, MO.getReg(),
- ARM::DPRRegisterClass, ARM::DPRRegisterClass);
- }
- }
- }
-
- return Modified;
-}
-
-bool NEONPreAllocPass::runOnMachineFunction(MachineFunction &MF) {
- TII = MF.getTarget().getInstrInfo();
-
- bool Modified = false;
- for (MachineFunction::iterator MFI = MF.begin(), E = MF.end(); MFI != E;
- ++MFI) {
- MachineBasicBlock &MBB = *MFI;
- Modified |= PreAllocNEONRegisters(MBB);
- }
-
- return Modified;
-}
-
-/// createNEONPreAllocPass - returns an instance of the NEON register
-/// pre-allocation pass.
-FunctionPass *llvm::createNEONPreAllocPass() {
- return new NEONPreAllocPass();
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/README-Thumb.txt b/libclamav/c++/llvm/lib/Target/ARM/README-Thumb.txt
deleted file mode 100644
index 6b605bb..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/README-Thumb.txt
+++ /dev/null
@@ -1,248 +0,0 @@
-//===---------------------------------------------------------------------===//
-// Random ideas for the ARM backend (Thumb specific).
-//===---------------------------------------------------------------------===//
-
-* Add support for compiling functions in both ARM and Thumb mode, then taking
- the smallest.
-
-* Add support for compiling individual basic blocks in thumb mode, when in a
- larger ARM function. This can be used for presumed cold code, like paths
- to abort (failure path of asserts), EH handling code, etc.
-
-* Thumb doesn't have normal pre/post increment addressing modes, but you can
- load/store 32-bit integers with pre/postinc by using load/store multiple
- instrs with a single register.
-
-* Make better use of high registers r8, r10, r11, r12 (ip). Some variants of add
- and cmp instructions can use high registers. Also, we can use them as
- temporaries to spill values into.
-
-* In thumb mode, short, byte, and bool preferred alignments are currently set
- to 4 to accommodate ISA restriction (i.e. add sp, #imm, imm must be multiple
- of 4).
-
-//===---------------------------------------------------------------------===//
-
-Potential jumptable improvements:
-
-* If we know function size is less than (1 << 16) * 2 bytes, we can use 16-bit
- jumptable entries (e.g. (L1 - L2) >> 1). Or even smaller entries if the
- function is even smaller. This also applies to ARM.
-
-* Thumb jumptable codegen can improve given some help from the assembler. This
- is what we generate right now:
-
- .set PCRELV0, (LJTI1_0_0-(LPCRELL0+4))
-LPCRELL0:
- mov r1, #PCRELV0
- add r1, pc
- ldr r0, [r0, r1]
- mov pc, r0
- .align 2
-LJTI1_0_0:
- .long LBB1_3
- ...
-
-Note there is another pc relative add that we can take advantage of.
- add r1, pc, #imm_8 * 4
-
-We should be able to generate:
-
-LPCRELL0:
- add r1, LJTI1_0_0
- ldr r0, [r0, r1]
- mov pc, r0
- .align 2
-LJTI1_0_0:
- .long LBB1_3
-
-if the assembler can translate the add to:
- add r1, pc, #((LJTI1_0_0-(LPCRELL0+4))&0xfffffffc)
-
-Note the assembler also does something similar to constpool load:
-LPCRELL0:
- ldr r0, LCPI1_0
-=>
- ldr r0, pc, #((LCPI1_0-(LPCRELL0+4))&0xfffffffc)
-
-
-//===---------------------------------------------------------------------===//
-
-We compiles the following:
-
-define i16 @func_entry_2E_ce(i32 %i) {
- switch i32 %i, label %bb12.exitStub [
- i32 0, label %bb4.exitStub
- i32 1, label %bb9.exitStub
- i32 2, label %bb4.exitStub
- i32 3, label %bb4.exitStub
- i32 7, label %bb9.exitStub
- i32 8, label %bb.exitStub
- i32 9, label %bb9.exitStub
- ]
-
-bb12.exitStub:
- ret i16 0
-
-bb4.exitStub:
- ret i16 1
-
-bb9.exitStub:
- ret i16 2
-
-bb.exitStub:
- ret i16 3
-}
-
-into:
-
-_func_entry_2E_ce:
- mov r2, #1
- lsl r2, r0
- cmp r0, #9
- bhi LBB1_4 @bb12.exitStub
-LBB1_1: @newFuncRoot
- mov r1, #13
- tst r2, r1
- bne LBB1_5 @bb4.exitStub
-LBB1_2: @newFuncRoot
- ldr r1, LCPI1_0
- tst r2, r1
- bne LBB1_6 @bb9.exitStub
-LBB1_3: @newFuncRoot
- mov r1, #1
- lsl r1, r1, #8
- tst r2, r1
- bne LBB1_7 @bb.exitStub
-LBB1_4: @bb12.exitStub
- mov r0, #0
- bx lr
-LBB1_5: @bb4.exitStub
- mov r0, #1
- bx lr
-LBB1_6: @bb9.exitStub
- mov r0, #2
- bx lr
-LBB1_7: @bb.exitStub
- mov r0, #3
- bx lr
-LBB1_8:
- .align 2
-LCPI1_0:
- .long 642
-
-
-gcc compiles to:
-
- cmp r0, #9
- @ lr needed for prologue
- bhi L2
- ldr r3, L11
- mov r2, #1
- mov r1, r2, asl r0
- ands r0, r3, r2, asl r0
- movne r0, #2
- bxne lr
- tst r1, #13
- beq L9
-L3:
- mov r0, r2
- bx lr
-L9:
- tst r1, #256
- movne r0, #3
- bxne lr
-L2:
- mov r0, #0
- bx lr
-L12:
- .align 2
-L11:
- .long 642
-
-
-GCC is doing a couple of clever things here:
- 1. It is predicating one of the returns. This isn't a clear win though: in
- cases where that return isn't taken, it is replacing one condbranch with
- two 'ne' predicated instructions.
- 2. It is sinking the shift of "1 << i" into the tst, and using ands instead of
- tst. This will probably require whole function isel.
- 3. GCC emits:
- tst r1, #256
- we emit:
- mov r1, #1
- lsl r1, r1, #8
- tst r2, r1
-
-
-//===---------------------------------------------------------------------===//
-
-When spilling in thumb mode and the sp offset is too large to fit in the ldr /
-str offset field, we load the offset from a constpool entry and add it to sp:
-
-ldr r2, LCPI
-add r2, sp
-ldr r2, [r2]
-
-These instructions preserve the condition code which is important if the spill
-is between a cmp and a bcc instruction. However, we can use the (potentially)
-cheaper sequnce if we know it's ok to clobber the condition register.
-
-add r2, sp, #255 * 4
-add r2, #132
-ldr r2, [r2, #7 * 4]
-
-This is especially bad when dynamic alloca is used. The all fixed size stack
-objects are referenced off the frame pointer with negative offsets. See
-oggenc for an example.
-
-
-//===---------------------------------------------------------------------===//
-
-Poor codegen test/CodeGen/ARM/select.ll f7:
-
- ldr r5, LCPI1_0
-LPC0:
- add r5, pc
- ldr r6, LCPI1_1
- ldr r2, LCPI1_2
- mov r3, r6
- mov lr, pc
- bx r5
-
-//===---------------------------------------------------------------------===//
-
-Make register allocator / spiller smarter so we can re-materialize "mov r, imm",
-etc. Almost all Thumb instructions clobber condition code.
-
-//===---------------------------------------------------------------------===//
-
-Add ldmia, stmia support.
-
-//===---------------------------------------------------------------------===//
-
-Thumb load / store address mode offsets are scaled. The values kept in the
-instruction operands are pre-scale values. This probably ought to be changed
-to avoid extra work when we convert Thumb2 instructions to Thumb1 instructions.
-
-//===---------------------------------------------------------------------===//
-
-We need to make (some of the) Thumb1 instructions predicable. That will allow
-shrinking of predicated Thumb2 instructions. To allow this, we need to be able
-to toggle the 's' bit since they do not set CPSR when they are inside IT blocks.
-
-//===---------------------------------------------------------------------===//
-
-Make use of hi register variants of cmp: tCMPhir / tCMPZhir.
-
-//===---------------------------------------------------------------------===//
-
-Thumb1 immediate field sometimes keep pre-scaled values. See
-Thumb1RegisterInfo::eliminateFrameIndex. This is inconsistent from ARM and
-Thumb2.
-
-//===---------------------------------------------------------------------===//
-
-Rather than having tBR_JTr print a ".align 2" and constant island pass pad it,
-add a target specific ALIGN instruction instead. That way, GetInstSizeInBytes
-won't have to over-estimate. It can also be used for loop alignment pass.
diff --git a/libclamav/c++/llvm/lib/Target/ARM/README-Thumb2.txt b/libclamav/c++/llvm/lib/Target/ARM/README-Thumb2.txt
deleted file mode 100644
index e7c2552..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/README-Thumb2.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-//===---------------------------------------------------------------------===//
-// Random ideas for the ARM backend (Thumb2 specific).
-//===---------------------------------------------------------------------===//
-
-Make sure jumptable destinations are below the jumptable in order to make use
-of tbb / tbh.
diff --git a/libclamav/c++/llvm/lib/Target/ARM/README.txt b/libclamav/c++/llvm/lib/Target/ARM/README.txt
deleted file mode 100644
index 57b65cf..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/README.txt
+++ /dev/null
@@ -1,589 +0,0 @@
-//===---------------------------------------------------------------------===//
-// Random ideas for the ARM backend.
-//===---------------------------------------------------------------------===//
-
-Reimplement 'select' in terms of 'SEL'.
-
-* We would really like to support UXTAB16, but we need to prove that the
- add doesn't need to overflow between the two 16-bit chunks.
-
-* Implement pre/post increment support. (e.g. PR935)
-* Implement smarter constant generation for binops with large immediates.
-
-A few ARMv6T2 ops should be pattern matched: BFI, SBFX, and UBFX
-
-//===---------------------------------------------------------------------===//
-
-Crazy idea: Consider code that uses lots of 8-bit or 16-bit values. By the
-time regalloc happens, these values are now in a 32-bit register, usually with
-the top-bits known to be sign or zero extended. If spilled, we should be able
-to spill these to a 8-bit or 16-bit stack slot, zero or sign extending as part
-of the reload.
-
-Doing this reduces the size of the stack frame (important for thumb etc), and
-also increases the likelihood that we will be able to reload multiple values
-from the stack with a single load.
-
-//===---------------------------------------------------------------------===//
-
-The constant island pass is in good shape. Some cleanups might be desirable,
-but there is unlikely to be much improvement in the generated code.
-
-1. There may be some advantage to trying to be smarter about the initial
-placement, rather than putting everything at the end.
-
-2. There might be some compile-time efficiency to be had by representing
-consecutive islands as a single block rather than multiple blocks.
-
-3. Use a priority queue to sort constant pool users in inverse order of
- position so we always process the one closed to the end of functions
- first. This may simply CreateNewWater.
-
-//===---------------------------------------------------------------------===//
-
-Eliminate copysign custom expansion. We are still generating crappy code with
-default expansion + if-conversion.
-
-//===---------------------------------------------------------------------===//
-
-Eliminate one instruction from:
-
-define i32 @_Z6slow4bii(i32 %x, i32 %y) {
- %tmp = icmp sgt i32 %x, %y
- %retval = select i1 %tmp, i32 %x, i32 %y
- ret i32 %retval
-}
-
-__Z6slow4bii:
- cmp r0, r1
- movgt r1, r0
- mov r0, r1
- bx lr
-=>
-
-__Z6slow4bii:
- cmp r0, r1
- movle r0, r1
- bx lr
-
-//===---------------------------------------------------------------------===//
-
-Implement long long "X-3" with instructions that fold the immediate in. These
-were disabled due to badness with the ARM carry flag on subtracts.
-
-//===---------------------------------------------------------------------===//
-
-More load / store optimizations:
-1) Better representation for block transfer? This is from Olden/power:
-
- fldd d0, [r4]
- fstd d0, [r4, #+32]
- fldd d0, [r4, #+8]
- fstd d0, [r4, #+40]
- fldd d0, [r4, #+16]
- fstd d0, [r4, #+48]
- fldd d0, [r4, #+24]
- fstd d0, [r4, #+56]
-
-If we can spare the registers, it would be better to use fldm and fstm here.
-Need major register allocator enhancement though.
-
-2) Can we recognize the relative position of constantpool entries? i.e. Treat
-
- ldr r0, LCPI17_3
- ldr r1, LCPI17_4
- ldr r2, LCPI17_5
-
- as
- ldr r0, LCPI17
- ldr r1, LCPI17+4
- ldr r2, LCPI17+8
-
- Then the ldr's can be combined into a single ldm. See Olden/power.
-
-Note for ARM v4 gcc uses ldmia to load a pair of 32-bit values to represent a
-double 64-bit FP constant:
-
- adr r0, L6
- ldmia r0, {r0-r1}
-
- .align 2
-L6:
- .long -858993459
- .long 1074318540
-
-3) struct copies appear to be done field by field
-instead of by words, at least sometimes:
-
-struct foo { int x; short s; char c1; char c2; };
-void cpy(struct foo*a, struct foo*b) { *a = *b; }
-
-llvm code (-O2)
- ldrb r3, [r1, #+6]
- ldr r2, [r1]
- ldrb r12, [r1, #+7]
- ldrh r1, [r1, #+4]
- str r2, [r0]
- strh r1, [r0, #+4]
- strb r3, [r0, #+6]
- strb r12, [r0, #+7]
-gcc code (-O2)
- ldmia r1, {r1-r2}
- stmia r0, {r1-r2}
-
-In this benchmark poor handling of aggregate copies has shown up as
-having a large effect on size, and possibly speed as well (we don't have
-a good way to measure on ARM).
-
-//===---------------------------------------------------------------------===//
-
-* Consider this silly example:
-
-double bar(double x) {
- double r = foo(3.1);
- return x+r;
-}
-
-_bar:
- stmfd sp!, {r4, r5, r7, lr}
- add r7, sp, #8
- mov r4, r0
- mov r5, r1
- fldd d0, LCPI1_0
- fmrrd r0, r1, d0
- bl _foo
- fmdrr d0, r4, r5
- fmsr s2, r0
- fsitod d1, s2
- faddd d0, d1, d0
- fmrrd r0, r1, d0
- ldmfd sp!, {r4, r5, r7, pc}
-
-Ignore the prologue and epilogue stuff for a second. Note
- mov r4, r0
- mov r5, r1
-the copys to callee-save registers and the fact they are only being used by the
-fmdrr instruction. It would have been better had the fmdrr been scheduled
-before the call and place the result in a callee-save DPR register. The two
-mov ops would not have been necessary.
-
-//===---------------------------------------------------------------------===//
-
-Calling convention related stuff:
-
-* gcc's parameter passing implementation is terrible and we suffer as a result:
-
-e.g.
-struct s {
- double d1;
- int s1;
-};
-
-void foo(struct s S) {
- printf("%g, %d\n", S.d1, S.s1);
-}
-
-'S' is passed via registers r0, r1, r2. But gcc stores them to the stack, and
-then reload them to r1, r2, and r3 before issuing the call (r0 contains the
-address of the format string):
-
- stmfd sp!, {r7, lr}
- add r7, sp, #0
- sub sp, sp, #12
- stmia sp, {r0, r1, r2}
- ldmia sp, {r1-r2}
- ldr r0, L5
- ldr r3, [sp, #8]
-L2:
- add r0, pc, r0
- bl L_printf$stub
-
-Instead of a stmia, ldmia, and a ldr, wouldn't it be better to do three moves?
-
-* Return an aggregate type is even worse:
-
-e.g.
-struct s foo(void) {
- struct s S = {1.1, 2};
- return S;
-}
-
- mov ip, r0
- ldr r0, L5
- sub sp, sp, #12
-L2:
- add r0, pc, r0
- @ lr needed for prologue
- ldmia r0, {r0, r1, r2}
- stmia sp, {r0, r1, r2}
- stmia ip, {r0, r1, r2}
- mov r0, ip
- add sp, sp, #12
- bx lr
-
-r0 (and later ip) is the hidden parameter from caller to store the value in. The
-first ldmia loads the constants into r0, r1, r2. The last stmia stores r0, r1,
-r2 into the address passed in. However, there is one additional stmia that
-stores r0, r1, and r2 to some stack location. The store is dead.
-
-The llvm-gcc generated code looks like this:
-
-csretcc void %foo(%struct.s* %agg.result) {
-entry:
- %S = alloca %struct.s, align 4 ; <%struct.s*> [#uses=1]
- %memtmp = alloca %struct.s ; <%struct.s*> [#uses=1]
- cast %struct.s* %S to sbyte* ; <sbyte*>:0 [#uses=2]
- call void %llvm.memcpy.i32( sbyte* %0, sbyte* cast ({ double, int }* %C.0.904 to sbyte*), uint 12, uint 4 )
- cast %struct.s* %agg.result to sbyte* ; <sbyte*>:1 [#uses=2]
- call void %llvm.memcpy.i32( sbyte* %1, sbyte* %0, uint 12, uint 0 )
- cast %struct.s* %memtmp to sbyte* ; <sbyte*>:2 [#uses=1]
- call void %llvm.memcpy.i32( sbyte* %2, sbyte* %1, uint 12, uint 0 )
- ret void
-}
-
-llc ends up issuing two memcpy's (the first memcpy becomes 3 loads from
-constantpool). Perhaps we should 1) fix llvm-gcc so the memcpy is translated
-into a number of load and stores, or 2) custom lower memcpy (of small size) to
-be ldmia / stmia. I think option 2 is better but the current register
-allocator cannot allocate a chunk of registers at a time.
-
-A feasible temporary solution is to use specific physical registers at the
-lowering time for small (<= 4 words?) transfer size.
-
-* ARM CSRet calling convention requires the hidden argument to be returned by
-the callee.
-
-//===---------------------------------------------------------------------===//
-
-We can definitely do a better job on BB placements to eliminate some branches.
-It's very common to see llvm generated assembly code that looks like this:
-
-LBB3:
- ...
-LBB4:
-...
- beq LBB3
- b LBB2
-
-If BB4 is the only predecessor of BB3, then we can emit BB3 after BB4. We can
-then eliminate beq and and turn the unconditional branch to LBB2 to a bne.
-
-See McCat/18-imp/ComputeBoundingBoxes for an example.
-
-//===---------------------------------------------------------------------===//
-
-Pre-/post- indexed load / stores:
-
-1) We should not make the pre/post- indexed load/store transform if the base ptr
-is guaranteed to be live beyond the load/store. This can happen if the base
-ptr is live out of the block we are performing the optimization. e.g.
-
-mov r1, r2
-ldr r3, [r1], #4
-...
-
-vs.
-
-ldr r3, [r2]
-add r1, r2, #4
-...
-
-In most cases, this is just a wasted optimization. However, sometimes it can
-negatively impact the performance because two-address code is more restrictive
-when it comes to scheduling.
-
-Unfortunately, liveout information is currently unavailable during DAG combine
-time.
-
-2) Consider spliting a indexed load / store into a pair of add/sub + load/store
- to solve #1 (in TwoAddressInstructionPass.cpp).
-
-3) Enhance LSR to generate more opportunities for indexed ops.
-
-4) Once we added support for multiple result patterns, write indexed loads
- patterns instead of C++ instruction selection code.
-
-5) Use VLDM / VSTM to emulate indexed FP load / store.
-
-//===---------------------------------------------------------------------===//
-
-Implement support for some more tricky ways to materialize immediates. For
-example, to get 0xffff8000, we can use:
-
-mov r9, #&3f8000
-sub r9, r9, #&400000
-
-//===---------------------------------------------------------------------===//
-
-We sometimes generate multiple add / sub instructions to update sp in prologue
-and epilogue if the inc / dec value is too large to fit in a single immediate
-operand. In some cases, perhaps it might be better to load the value from a
-constantpool instead.
-
-//===---------------------------------------------------------------------===//
-
-GCC generates significantly better code for this function.
-
-int foo(int StackPtr, unsigned char *Line, unsigned char *Stack, int LineLen) {
- int i = 0;
-
- if (StackPtr != 0) {
- while (StackPtr != 0 && i < (((LineLen) < (32768))? (LineLen) : (32768)))
- Line[i++] = Stack[--StackPtr];
- if (LineLen > 32768)
- {
- while (StackPtr != 0 && i < LineLen)
- {
- i++;
- --StackPtr;
- }
- }
- }
- return StackPtr;
-}
-
-//===---------------------------------------------------------------------===//
-
-This should compile to the mlas instruction:
-int mlas(int x, int y, int z) { return ((x * y + z) < 0) ? 7 : 13; }
-
-//===---------------------------------------------------------------------===//
-
-At some point, we should triage these to see if they still apply to us:
-
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19598
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=18560
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=27016
-
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11831
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11826
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11825
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11824
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11823
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11820
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=10982
-
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=10242
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=9831
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=9760
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=9759
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=9703
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=9702
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=9663
-
-http://www.inf.u-szeged.hu/gcc-arm/
-http://citeseer.ist.psu.edu/debus04linktime.html
-
-//===---------------------------------------------------------------------===//
-
-gcc generates smaller code for this function at -O2 or -Os:
-
-void foo(signed char* p) {
- if (*p == 3)
- bar();
- else if (*p == 4)
- baz();
- else if (*p == 5)
- quux();
-}
-
-llvm decides it's a good idea to turn the repeated if...else into a
-binary tree, as if it were a switch; the resulting code requires -1
-compare-and-branches when *p<=2 or *p==5, the same number if *p==4
-or *p>6, and +1 if *p==3. So it should be a speed win
-(on balance). However, the revised code is larger, with 4 conditional
-branches instead of 3.
-
-More seriously, there is a byte->word extend before
-each comparison, where there should be only one, and the condition codes
-are not remembered when the same two values are compared twice.
-
-//===---------------------------------------------------------------------===//
-
-More LSR enhancements possible:
-
-1. Teach LSR about pre- and post- indexed ops to allow iv increment be merged
- in a load / store.
-2. Allow iv reuse even when a type conversion is required. For example, i8
- and i32 load / store addressing modes are identical.
-
-
-//===---------------------------------------------------------------------===//
-
-This:
-
-int foo(int a, int b, int c, int d) {
- long long acc = (long long)a * (long long)b;
- acc += (long long)c * (long long)d;
- return (int)(acc >> 32);
-}
-
-Should compile to use SMLAL (Signed Multiply Accumulate Long) which multiplies
-two signed 32-bit values to produce a 64-bit value, and accumulates this with
-a 64-bit value.
-
-We currently get this with both v4 and v6:
-
-_foo:
- smull r1, r0, r1, r0
- smull r3, r2, r3, r2
- adds r3, r3, r1
- adc r0, r2, r0
- bx lr
-
-//===---------------------------------------------------------------------===//
-
-This:
- #include <algorithm>
- std::pair<unsigned, bool> full_add(unsigned a, unsigned b)
- { return std::make_pair(a + b, a + b < a); }
- bool no_overflow(unsigned a, unsigned b)
- { return !full_add(a, b).second; }
-
-Should compile to:
-
-_Z8full_addjj:
- adds r2, r1, r2
- movcc r1, #0
- movcs r1, #1
- str r2, [r0, #0]
- strb r1, [r0, #4]
- mov pc, lr
-
-_Z11no_overflowjj:
- cmn r0, r1
- movcs r0, #0
- movcc r0, #1
- mov pc, lr
-
-not:
-
-__Z8full_addjj:
- add r3, r2, r1
- str r3, [r0]
- mov r2, #1
- mov r12, #0
- cmp r3, r1
- movlo r12, r2
- str r12, [r0, #+4]
- bx lr
-__Z11no_overflowjj:
- add r3, r1, r0
- mov r2, #1
- mov r1, #0
- cmp r3, r0
- movhs r1, r2
- mov r0, r1
- bx lr
-
-//===---------------------------------------------------------------------===//
-
-Some of the NEON intrinsics may be appropriate for more general use, either
-as target-independent intrinsics or perhaps elsewhere in the ARM backend.
-Some of them may also be lowered to target-independent SDNodes, and perhaps
-some new SDNodes could be added.
-
-For example, maximum, minimum, and absolute value operations are well-defined
-and standard operations, both for vector and scalar types.
-
-The current NEON-specific intrinsics for count leading zeros and count one
-bits could perhaps be replaced by the target-independent ctlz and ctpop
-intrinsics. It may also make sense to add a target-independent "ctls"
-intrinsic for "count leading sign bits". Likewise, the backend could use
-the target-independent SDNodes for these operations.
-
-ARMv6 has scalar saturating and halving adds and subtracts. The same
-intrinsics could possibly be used for both NEON's vector implementations of
-those operations and the ARMv6 scalar versions.
-
-//===---------------------------------------------------------------------===//
-
-ARM::MOVCCr is commutable (by flipping the condition). But we need to implement
-ARMInstrInfo::commuteInstruction() to support it.
-
-//===---------------------------------------------------------------------===//
-
-Split out LDR (literal) from normal ARM LDR instruction. Also consider spliting
-LDR into imm12 and so_reg forms. This allows us to clean up some code. e.g.
-ARMLoadStoreOptimizer does not need to look at LDR (literal) and LDR (so_reg)
-while ARMConstantIslandPass only need to worry about LDR (literal).
-
-//===---------------------------------------------------------------------===//
-
-Constant island pass should make use of full range SoImm values for LEApcrel.
-Be careful though as the last attempt caused infinite looping on lencod.
-
-//===---------------------------------------------------------------------===//
-
-Predication issue. This function:
-
-extern unsigned array[ 128 ];
-int foo( int x ) {
- int y;
- y = array[ x & 127 ];
- if ( x & 128 )
- y = 123456789 & ( y >> 2 );
- else
- y = 123456789 & y;
- return y;
-}
-
-compiles to:
-
-_foo:
- and r1, r0, #127
- ldr r2, LCPI1_0
- ldr r2, [r2]
- ldr r1, [r2, +r1, lsl #2]
- mov r2, r1, lsr #2
- tst r0, #128
- moveq r2, r1
- ldr r0, LCPI1_1
- and r0, r2, r0
- bx lr
-
-It would be better to do something like this, to fold the shift into the
-conditional move:
-
- and r1, r0, #127
- ldr r2, LCPI1_0
- ldr r2, [r2]
- ldr r1, [r2, +r1, lsl #2]
- tst r0, #128
- movne r1, r1, lsr #2
- ldr r0, LCPI1_1
- and r0, r1, r0
- bx lr
-
-it saves an instruction and a register.
-
-//===---------------------------------------------------------------------===//
-
-It might be profitable to cse MOVi16 if there are lots of 32-bit immediates
-with the same bottom half.
-
-//===---------------------------------------------------------------------===//
-
-Robert Muth started working on an alternate jump table implementation that
-does not put the tables in-line in the text. This is more like the llvm
-default jump table implementation. This might be useful sometime. Several
-revisions of patches are on the mailing list, beginning at:
-http://lists.cs.uiuc.edu/pipermail/llvmdev/2009-June/022763.html
-
-//===---------------------------------------------------------------------===//
-
-Make use of the "rbit" instruction.
-
-//===---------------------------------------------------------------------===//
-
-Take a look at test/CodeGen/Thumb2/machine-licm.ll. ARM should be taught how
-to licm and cse the unnecessary load from cp#1.
-
-//===---------------------------------------------------------------------===//
-
-The CMN instruction sets the flags like an ADD instruction, while CMP sets
-them like a subtract. Therefore to be able to use CMN for comparisons other
-than the Z bit, we'll need additional logic to reverse the conditionals
-associated with the comparison. Perhaps a pseudo-instruction for the comparison,
-with a post-codegen pass to clean up and handle the condition codes?
-See PR5694 for testcase.
diff --git a/libclamav/c++/llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
deleted file mode 100644
index 163a0a9..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/TargetInfo/ARMTargetInfo.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-//===-- ARMTargetInfo.cpp - ARM Target Implementation ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARM.h"
-#include "llvm/Module.h"
-#include "llvm/Target/TargetRegistry.h"
-using namespace llvm;
-
-Target llvm::TheARMTarget, llvm::TheThumbTarget;
-
-extern "C" void LLVMInitializeARMTargetInfo() {
- RegisterTarget<Triple::arm, /*HasJIT=*/true>
- X(TheARMTarget, "arm", "ARM");
-
- RegisterTarget<Triple::thumb, /*HasJIT=*/true>
- Y(TheThumbTarget, "thumb", "Thumb");
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/TargetInfo/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/ARM/TargetInfo/CMakeLists.txt
deleted file mode 100644
index 3910bb0..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/TargetInfo/CMakeLists.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMARMInfo
- ARMTargetInfo.cpp
- )
-
-add_dependencies(LLVMARMInfo ARMCodeGenTable_gen)
diff --git a/libclamav/c++/llvm/lib/Target/ARM/TargetInfo/Makefile b/libclamav/c++/llvm/lib/Target/ARM/TargetInfo/Makefile
deleted file mode 100644
index 6292ab1..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/TargetInfo/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/ARM/TargetInfo/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMARMInfo
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
deleted file mode 100644
index 7f42c82..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ /dev/null
@@ -1,250 +0,0 @@
-//===- Thumb1InstrInfo.cpp - Thumb-1 Instruction Information ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Thumb-1 implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Thumb1InstrInfo.h"
-#include "ARM.h"
-#include "ARMGenInstrInfo.inc"
-#include "ARMMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/ADT/SmallVector.h"
-#include "Thumb1InstrInfo.h"
-
-using namespace llvm;
-
-Thumb1InstrInfo::Thumb1InstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI(*this, STI) {
-}
-
-unsigned Thumb1InstrInfo::getUnindexedOpcode(unsigned Opc) const {
- return 0;
-}
-
-bool Thumb1InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (I != MBB.end()) DL = I->getDebugLoc();
-
- if (DestRC == ARM::GPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVgpr2gpr), DestReg).addReg(SrcReg);
- return true;
- } else if (SrcRC == ARM::tGPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVtgpr2gpr), DestReg).addReg(SrcReg);
- return true;
- }
- } else if (DestRC == ARM::tGPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVgpr2tgpr), DestReg).addReg(SrcReg);
- return true;
- } else if (SrcRC == ARM::tGPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
- return true;
- }
- }
-
- return false;
-}
-
-bool Thumb1InstrInfo::
-canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- if (Ops.size() != 1) return false;
-
- unsigned OpNum = Ops[0];
- unsigned Opc = MI->getOpcode();
- switch (Opc) {
- default: break;
- case ARM::tMOVr:
- case ARM::tMOVtgpr2gpr:
- case ARM::tMOVgpr2tgpr:
- case ARM::tMOVgpr2gpr: {
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- !isARMLowRegister(SrcReg))
- // tSpill cannot take a high register operand.
- return false;
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
- !isARMLowRegister(DstReg))
- // tRestore cannot target a high register operand.
- return false;
- }
- return true;
- }
- }
-
- return false;
-}
-
-void Thumb1InstrInfo::
-storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned SrcReg, bool isKill, int FI,
- const TargetRegisterClass *RC) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (I != MBB.end()) DL = I->getDebugLoc();
-
- assert((RC == ARM::tGPRRegisterClass ||
- (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- isARMLowRegister(SrcReg))) && "Unknown regclass!");
-
- if (RC == ARM::tGPRRegisterClass ||
- (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- isARMLowRegister(SrcReg))) {
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOStore, 0,
- MFI.getObjectSize(FI),
- MFI.getObjectAlignment(FI));
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tSpill))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- }
-}
-
-void Thumb1InstrInfo::
-loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, int FI,
- const TargetRegisterClass *RC) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (I != MBB.end()) DL = I->getDebugLoc();
-
- assert((RC == ARM::tGPRRegisterClass ||
- (TargetRegisterInfo::isPhysicalRegister(DestReg) &&
- isARMLowRegister(DestReg))) && "Unknown regclass!");
-
- if (RC == ARM::tGPRRegisterClass ||
- (TargetRegisterInfo::isPhysicalRegister(DestReg) &&
- isARMLowRegister(DestReg))) {
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOLoad, 0,
- MFI.getObjectSize(FI),
- MFI.getObjectAlignment(FI));
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::tRestore), DestReg)
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- }
-}
-
-bool Thumb1InstrInfo::
-spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const {
- if (CSI.empty())
- return false;
-
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, get(ARM::tPUSH));
- AddDefaultPred(MIB);
- MIB.addReg(0); // No write back.
- for (unsigned i = CSI.size(); i != 0; --i) {
- unsigned Reg = CSI[i-1].getReg();
- // Add the callee-saved register as live-in. It's killed at the spill.
- MBB.addLiveIn(Reg);
- MIB.addReg(Reg, RegState::Kill);
- }
- return true;
-}
-
-bool Thumb1InstrInfo::
-restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const {
- MachineFunction &MF = *MBB.getParent();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- if (CSI.empty())
- return false;
-
- bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
- DebugLoc DL = MI->getDebugLoc();
- MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::tPOP));
- AddDefaultPred(MIB);
- MIB.addReg(0); // No write back.
-
- bool NumRegs = false;
- for (unsigned i = CSI.size(); i != 0; --i) {
- unsigned Reg = CSI[i-1].getReg();
- if (Reg == ARM::LR) {
- // Special epilogue for vararg functions. See emitEpilogue
- if (isVarArg)
- continue;
- Reg = ARM::PC;
- (*MIB).setDesc(get(ARM::tPOP_RET));
- MI = MBB.erase(MI);
- }
- MIB.addReg(Reg, getDefRegState(true));
- NumRegs = true;
- }
-
- // It's illegal to emit pop instruction without operands.
- if (NumRegs)
- MBB.insert(MI, &*MIB);
-
- return true;
-}
-
-MachineInstr *Thumb1InstrInfo::
-foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops, int FI) const {
- if (Ops.size() != 1) return NULL;
-
- unsigned OpNum = Ops[0];
- unsigned Opc = MI->getOpcode();
- MachineInstr *NewMI = NULL;
- switch (Opc) {
- default: break;
- case ARM::tMOVr:
- case ARM::tMOVtgpr2gpr:
- case ARM::tMOVgpr2tgpr:
- case ARM::tMOVgpr2gpr: {
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- !isARMLowRegister(SrcReg))
- // tSpill cannot take a high register operand.
- break;
- NewMI = AddDefaultPred(BuildMI(MF, MI->getDebugLoc(), get(ARM::tSpill))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0));
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
- !isARMLowRegister(DstReg))
- // tRestore cannot target a high register operand.
- break;
- bool isDead = MI->getOperand(0).isDead();
- NewMI = AddDefaultPred(BuildMI(MF, MI->getDebugLoc(), get(ARM::tRestore))
- .addReg(DstReg,
- RegState::Define | getDeadRegState(isDead))
- .addFrameIndex(FI).addImm(0));
- }
- break;
- }
- }
-
- return NewMI;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb1InstrInfo.h b/libclamav/c++/llvm/lib/Target/ARM/Thumb1InstrInfo.h
deleted file mode 100644
index 516ddf1..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb1InstrInfo.h
+++ /dev/null
@@ -1,79 +0,0 @@
-//===- Thumb1InstrInfo.h - Thumb-1 Instruction Information ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Thumb-1 implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef THUMB1INSTRUCTIONINFO_H
-#define THUMB1INSTRUCTIONINFO_H
-
-#include "llvm/Target/TargetInstrInfo.h"
-#include "ARM.h"
-#include "ARMInstrInfo.h"
-#include "Thumb1RegisterInfo.h"
-
-namespace llvm {
- class ARMSubtarget;
-
-class Thumb1InstrInfo : public ARMBaseInstrInfo {
- Thumb1RegisterInfo RI;
-public:
- explicit Thumb1InstrInfo(const ARMSubtarget &STI);
-
- // Return the non-pre/post incrementing version of 'Opc'. Return 0
- // if there is not such an opcode.
- unsigned getUnindexedOpcode(unsigned Opc) const;
-
- /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
- /// such, whenever a client has an instance of instruction info, it should
- /// always be able to get register info as well (through this method).
- ///
- const Thumb1RegisterInfo &getRegisterInfo() const { return RI; }
-
- bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const;
- bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const;
-
- bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const;
- void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC) const;
-
- void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC) const;
-
- bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
-
- MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
-};
-}
-
-#endif // THUMB1INSTRUCTIONINFO_H
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
deleted file mode 100644
index 163d1e9..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
+++ /dev/null
@@ -1,864 +0,0 @@
-//===- Thumb1RegisterInfo.cpp - Thumb-1 Register Information ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Thumb-1 implementation of the TargetRegisterInfo
-// class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMBaseInstrInfo.h"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMSubtarget.h"
-#include "Thumb1InstrInfo.h"
-#include "Thumb1RegisterInfo.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-Thumb1RegisterInfo::Thumb1RegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &sti)
- : ARMBaseRegisterInfo(tii, sti) {
-}
-
-/// emitLoadConstPool - Emits a load from constpool to materialize the
-/// specified immediate.
-void Thumb1RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx,
- int Val,
- ARMCC::CondCodes Pred,
- unsigned PredReg) const {
- MachineFunction &MF = *MBB.getParent();
- MachineConstantPool *ConstantPool = MF.getConstantPool();
- Constant *C = ConstantInt::get(
- Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
- unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
-
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRcp))
- .addReg(DestReg, getDefRegState(true), SubIdx)
- .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg);
-}
-
-const TargetRegisterClass*
-Thumb1RegisterInfo::getPhysicalRegisterRegClass(unsigned Reg, EVT VT) const {
- if (isARMLowRegister(Reg))
- return ARM::tGPRRegisterClass;
- switch (Reg) {
- default:
- break;
- case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
- case ARM::R12: case ARM::SP: case ARM::LR: case ARM::PC:
- return ARM::GPRRegisterClass;
- }
-
- return TargetRegisterInfo::getPhysicalRegisterRegClass(Reg, VT);
-}
-
-bool Thumb1RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
- const MachineFrameInfo *FFI = MF.getFrameInfo();
- unsigned CFSize = FFI->getMaxCallFrameSize();
- // It's not always a good idea to include the call frame as part of the
- // stack frame. ARM (especially Thumb) has small immediate offset to
- // address the stack frame. So a large call frame can cause poor codegen
- // and may even makes it impossible to scavenge a register.
- if (CFSize >= ((1 << 8) - 1) * 4 / 2) // Half of imm8 * 4
- return false;
-
- return !MF.getFrameInfo()->hasVarSizedObjects();
-}
-
-
-/// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
-/// a destreg = basereg + immediate in Thumb code. Materialize the immediate
-/// in a register using mov / mvn sequences or load the immediate from a
-/// constpool entry.
-static
-void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned DestReg, unsigned BaseReg,
- int NumBytes, bool CanChangeCC,
- const TargetInstrInfo &TII,
- const Thumb1RegisterInfo& MRI,
- DebugLoc dl) {
- MachineFunction &MF = *MBB.getParent();
- bool isHigh = !isARMLowRegister(DestReg) ||
- (BaseReg != 0 && !isARMLowRegister(BaseReg));
- bool isSub = false;
- // Subtract doesn't have high register version. Load the negative value
- // if either base or dest register is a high register. Also, if do not
- // issue sub as part of the sequence if condition register is to be
- // preserved.
- if (NumBytes < 0 && !isHigh && CanChangeCC) {
- isSub = true;
- NumBytes = -NumBytes;
- }
- unsigned LdReg = DestReg;
- if (DestReg == ARM::SP) {
- assert(BaseReg == ARM::SP && "Unexpected!");
- LdReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass);
- }
-
- if (NumBytes <= 255 && NumBytes >= 0)
- AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
- .addImm(NumBytes);
- else if (NumBytes < 0 && NumBytes >= -255) {
- AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
- .addImm(NumBytes);
- AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg))
- .addReg(LdReg, RegState::Kill);
- } else
- MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes);
-
- // Emit add / sub.
- int Opc = (isSub) ? ARM::tSUBrr : (isHigh ? ARM::tADDhirr : ARM::tADDrr);
- MachineInstrBuilder MIB =
- BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
- if (Opc != ARM::tADDhirr)
- MIB = AddDefaultT1CC(MIB);
- if (DestReg == ARM::SP || isSub)
- MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
- else
- MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
- AddDefaultPred(MIB);
-}
-
-/// calcNumMI - Returns the number of instructions required to materialize
-/// the specific add / sub r, c instruction.
-static unsigned calcNumMI(int Opc, int ExtraOpc, unsigned Bytes,
- unsigned NumBits, unsigned Scale) {
- unsigned NumMIs = 0;
- unsigned Chunk = ((1 << NumBits) - 1) * Scale;
-
- if (Opc == ARM::tADDrSPi) {
- unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
- Bytes -= ThisVal;
- NumMIs++;
- NumBits = 8;
- Scale = 1; // Followed by a number of tADDi8.
- Chunk = ((1 << NumBits) - 1) * Scale;
- }
-
- NumMIs += Bytes / Chunk;
- if ((Bytes % Chunk) != 0)
- NumMIs++;
- if (ExtraOpc)
- NumMIs++;
- return NumMIs;
-}
-
-/// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
-/// a destreg = basereg + immediate in Thumb code.
-static
-void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned DestReg, unsigned BaseReg,
- int NumBytes, const TargetInstrInfo &TII,
- const Thumb1RegisterInfo& MRI,
- DebugLoc dl) {
- bool isSub = NumBytes < 0;
- unsigned Bytes = (unsigned)NumBytes;
- if (isSub) Bytes = -NumBytes;
- bool isMul4 = (Bytes & 3) == 0;
- bool isTwoAddr = false;
- bool DstNotEqBase = false;
- unsigned NumBits = 1;
- unsigned Scale = 1;
- int Opc = 0;
- int ExtraOpc = 0;
- bool NeedCC = false;
- bool NeedPred = false;
-
- if (DestReg == BaseReg && BaseReg == ARM::SP) {
- assert(isMul4 && "Thumb sp inc / dec size must be multiple of 4!");
- NumBits = 7;
- Scale = 4;
- Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
- isTwoAddr = true;
- } else if (!isSub && BaseReg == ARM::SP) {
- // r1 = add sp, 403
- // =>
- // r1 = add sp, 100 * 4
- // r1 = add r1, 3
- if (!isMul4) {
- Bytes &= ~3;
- ExtraOpc = ARM::tADDi3;
- }
- NumBits = 8;
- Scale = 4;
- Opc = ARM::tADDrSPi;
- } else {
- // sp = sub sp, c
- // r1 = sub sp, c
- // r8 = sub sp, c
- if (DestReg != BaseReg)
- DstNotEqBase = true;
- NumBits = 8;
- if (DestReg == ARM::SP) {
- Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
- assert(isMul4 && "Thumb sp inc / dec size must be multiple of 4!");
- NumBits = 7;
- Scale = 4;
- } else {
- Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
- NumBits = 8;
- NeedPred = NeedCC = true;
- }
- isTwoAddr = true;
- }
-
- unsigned NumMIs = calcNumMI(Opc, ExtraOpc, Bytes, NumBits, Scale);
- unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
- if (NumMIs > Threshold) {
- // This will expand into too many instructions. Load the immediate from a
- // constpool entry.
- emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII,
- MRI, dl);
- return;
- }
-
- if (DstNotEqBase) {
- if (isARMLowRegister(DestReg) && isARMLowRegister(BaseReg)) {
- // If both are low registers, emit DestReg = add BaseReg, max(Imm, 7)
- unsigned Chunk = (1 << 3) - 1;
- unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
- Bytes -= ThisVal;
- const TargetInstrDesc &TID = TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3);
- const MachineInstrBuilder MIB =
- AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TID, DestReg));
- AddDefaultPred(MIB.addReg(BaseReg, RegState::Kill).addImm(ThisVal));
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
- .addReg(BaseReg, RegState::Kill);
- }
- BaseReg = DestReg;
- }
-
- unsigned Chunk = ((1 << NumBits) - 1) * Scale;
- while (Bytes) {
- unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
- Bytes -= ThisVal;
- ThisVal /= Scale;
- // Build the new tADD / tSUB.
- if (isTwoAddr) {
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
- if (NeedCC)
- MIB = AddDefaultT1CC(MIB);
- MIB .addReg(DestReg).addImm(ThisVal);
- if (NeedPred)
- MIB = AddDefaultPred(MIB);
- }
- else {
- bool isKill = BaseReg != ARM::SP;
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
- if (NeedCC)
- MIB = AddDefaultT1CC(MIB);
- MIB.addReg(BaseReg, getKillRegState(isKill)).addImm(ThisVal);
- if (NeedPred)
- MIB = AddDefaultPred(MIB);
- BaseReg = DestReg;
-
- if (Opc == ARM::tADDrSPi) {
- // r4 = add sp, imm
- // r4 = add r4, imm
- // ...
- NumBits = 8;
- Scale = 1;
- Chunk = ((1 << NumBits) - 1) * Scale;
- Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
- NeedPred = NeedCC = isTwoAddr = true;
- }
- }
- }
-
- if (ExtraOpc) {
- const TargetInstrDesc &TID = TII.get(ExtraOpc);
- AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TID, DestReg))
- .addReg(DestReg, RegState::Kill)
- .addImm(((unsigned)NumBytes) & 3));
- }
-}
-
-static void emitSPUpdate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- const TargetInstrInfo &TII, DebugLoc dl,
- const Thumb1RegisterInfo &MRI,
- int NumBytes) {
- emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII,
- MRI, dl);
-}
-
-void Thumb1RegisterInfo::
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const {
- if (!hasReservedCallFrame(MF)) {
- // If we have alloca, convert as follows:
- // ADJCALLSTACKDOWN -> sub, sp, sp, amount
- // ADJCALLSTACKUP -> add, sp, sp, amount
- MachineInstr *Old = I;
- DebugLoc dl = Old->getDebugLoc();
- unsigned Amount = Old->getOperand(0).getImm();
- if (Amount != 0) {
- // We need to keep the stack aligned properly. To do this, we round the
- // amount of space needed for the outgoing arguments up to the next
- // alignment boundary.
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
- Amount = (Amount+Align-1)/Align*Align;
-
- // Replace the pseudo instruction with a new instruction...
- unsigned Opc = Old->getOpcode();
- if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
- emitSPUpdate(MBB, I, TII, dl, *this, -Amount);
- } else {
- assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
- emitSPUpdate(MBB, I, TII, dl, *this, Amount);
- }
- }
- }
- MBB.erase(I);
-}
-
-/// emitThumbConstant - Emit a series of instructions to materialize a
-/// constant.
-static void emitThumbConstant(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned DestReg, int Imm,
- const TargetInstrInfo &TII,
- const Thumb1RegisterInfo& MRI,
- DebugLoc dl) {
- bool isSub = Imm < 0;
- if (isSub) Imm = -Imm;
-
- int Chunk = (1 << 8) - 1;
- int ThisVal = (Imm > Chunk) ? Chunk : Imm;
- Imm -= ThisVal;
- AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8),
- DestReg))
- .addImm(ThisVal));
- if (Imm > 0)
- emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII, MRI, dl);
- if (isSub) {
- const TargetInstrDesc &TID = TII.get(ARM::tRSB);
- AddDefaultPred(AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TID, DestReg))
- .addReg(DestReg, RegState::Kill));
- }
-}
-
-static void removeOperands(MachineInstr &MI, unsigned i) {
- unsigned Op = i;
- for (unsigned e = MI.getNumOperands(); i != e; ++i)
- MI.RemoveOperand(Op);
-}
-
-int Thumb1RegisterInfo::
-rewriteFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
- unsigned FrameReg, int Offset,
- unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const
-{
- // if/when eliminateFrameIndex() conforms with ARMBaseRegisterInfo
- // version then can pull out Thumb1 specific parts here
- return 0;
-}
-
-/// saveScavengerRegister - Spill the register so it can be used by the
-/// register scavenger. Return true.
-bool
-Thumb1RegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- MachineBasicBlock::iterator &UseMI,
- const TargetRegisterClass *RC,
- unsigned Reg) const {
- // Thumb1 can't use the emergency spill slot on the stack because
- // ldr/str immediate offsets must be positive, and if we're referencing
- // off the frame pointer (if, for example, there are alloca() calls in
- // the function, the offset will be negative. Use R12 instead since that's
- // a call clobbered register that we know won't be used in Thumb1 mode.
- DebugLoc DL = DebugLoc::getUnknownLoc();
- BuildMI(MBB, I, DL, TII.get(ARM::tMOVtgpr2gpr)).
- addReg(ARM::R12, RegState::Define).addReg(Reg, RegState::Kill);
-
- // The UseMI is where we would like to restore the register. If there's
- // interference with R12 before then, however, we'll need to restore it
- // before that instead and adjust the UseMI.
- bool done = false;
- for (MachineBasicBlock::iterator II = I; !done && II != UseMI ; ++II) {
- // If this instruction affects R12, adjust our restore point.
- for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = II->getOperand(i);
- if (!MO.isReg() || MO.isUndef() || !MO.getReg() ||
- TargetRegisterInfo::isVirtualRegister(MO.getReg()))
- continue;
- if (MO.getReg() == ARM::R12) {
- UseMI = II;
- done = true;
- break;
- }
- }
- }
- // Restore the register from R12
- BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVgpr2tgpr)).
- addReg(Reg, RegState::Define).addReg(ARM::R12, RegState::Kill);
-
- return true;
-}
-
-unsigned
-Thumb1RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, int *Value,
- RegScavenger *RS) const{
- unsigned VReg = 0;
- unsigned i = 0;
- MachineInstr &MI = *II;
- MachineBasicBlock &MBB = *MI.getParent();
- MachineFunction &MF = *MBB.getParent();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- DebugLoc dl = MI.getDebugLoc();
-
- while (!MI.getOperand(i).isFI()) {
- ++i;
- assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
- }
-
- unsigned FrameReg = ARM::SP;
- int FrameIndex = MI.getOperand(i).getIndex();
- int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
- MF.getFrameInfo()->getStackSize() + SPAdj;
-
- if (AFI->isGPRCalleeSavedArea1Frame(FrameIndex))
- Offset -= AFI->getGPRCalleeSavedArea1Offset();
- else if (AFI->isGPRCalleeSavedArea2Frame(FrameIndex))
- Offset -= AFI->getGPRCalleeSavedArea2Offset();
- else if (MF.getFrameInfo()->hasVarSizedObjects()) {
- assert(SPAdj == 0 && hasFP(MF) && "Unexpected");
- // There are alloca()'s in this function, must reference off the frame
- // pointer instead.
- FrameReg = getFrameRegister(MF);
- Offset -= AFI->getFramePtrSpillOffset();
- }
-
- unsigned Opcode = MI.getOpcode();
- const TargetInstrDesc &Desc = MI.getDesc();
- unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
-
- if (Opcode == ARM::tADDrSPi) {
- Offset += MI.getOperand(i+1).getImm();
-
- // Can't use tADDrSPi if it's based off the frame pointer.
- unsigned NumBits = 0;
- unsigned Scale = 1;
- if (FrameReg != ARM::SP) {
- Opcode = ARM::tADDi3;
- MI.setDesc(TII.get(Opcode));
- NumBits = 3;
- } else {
- NumBits = 8;
- Scale = 4;
- assert((Offset & 3) == 0 &&
- "Thumb add/sub sp, #imm immediate must be multiple of 4!");
- }
-
- unsigned PredReg;
- if (Offset == 0 && getInstrPredicate(&MI, PredReg) == ARMCC::AL) {
- // Turn it into a move.
- MI.setDesc(TII.get(ARM::tMOVgpr2tgpr));
- MI.getOperand(i).ChangeToRegister(FrameReg, false);
- // Remove offset and remaining explicit predicate operands.
- do MI.RemoveOperand(i+1);
- while (MI.getNumOperands() > i+1 &&
- (!MI.getOperand(i+1).isReg() || !MI.getOperand(i+1).isImm()));
- return 0;
- }
-
- // Common case: small offset, fits into instruction.
- unsigned Mask = (1 << NumBits) - 1;
- if (((Offset / Scale) & ~Mask) == 0) {
- // Replace the FrameIndex with sp / fp
- if (Opcode == ARM::tADDi3) {
- removeOperands(MI, i);
- MachineInstrBuilder MIB(&MI);
- AddDefaultPred(AddDefaultT1CC(MIB).addReg(FrameReg)
- .addImm(Offset / Scale));
- } else {
- MI.getOperand(i).ChangeToRegister(FrameReg, false);
- MI.getOperand(i+1).ChangeToImmediate(Offset / Scale);
- }
- return 0;
- }
-
- unsigned DestReg = MI.getOperand(0).getReg();
- unsigned Bytes = (Offset > 0) ? Offset : -Offset;
- unsigned NumMIs = calcNumMI(Opcode, 0, Bytes, NumBits, Scale);
- // MI would expand into a large number of instructions. Don't try to
- // simplify the immediate.
- if (NumMIs > 2) {
- emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII,
- *this, dl);
- MBB.erase(II);
- return 0;
- }
-
- if (Offset > 0) {
- // Translate r0 = add sp, imm to
- // r0 = add sp, 255*4
- // r0 = add r0, (imm - 255*4)
- if (Opcode == ARM::tADDi3) {
- removeOperands(MI, i);
- MachineInstrBuilder MIB(&MI);
- AddDefaultPred(AddDefaultT1CC(MIB).addReg(FrameReg).addImm(Mask));
- } else {
- MI.getOperand(i).ChangeToRegister(FrameReg, false);
- MI.getOperand(i+1).ChangeToImmediate(Mask);
- }
- Offset = (Offset - Mask * Scale);
- MachineBasicBlock::iterator NII = llvm::next(II);
- emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII,
- *this, dl);
- } else {
- // Translate r0 = add sp, -imm to
- // r0 = -imm (this is then translated into a series of instructons)
- // r0 = add r0, sp
- emitThumbConstant(MBB, II, DestReg, Offset, TII, *this, dl);
-
- MI.setDesc(TII.get(ARM::tADDhirr));
- MI.getOperand(i).ChangeToRegister(DestReg, false, false, true);
- MI.getOperand(i+1).ChangeToRegister(FrameReg, false);
- if (Opcode == ARM::tADDi3) {
- MachineInstrBuilder MIB(&MI);
- AddDefaultPred(MIB);
- }
- }
- return 0;
- } else {
- unsigned ImmIdx = 0;
- int InstrOffs = 0;
- unsigned NumBits = 0;
- unsigned Scale = 1;
- switch (AddrMode) {
- case ARMII::AddrModeT1_s: {
- ImmIdx = i+1;
- InstrOffs = MI.getOperand(ImmIdx).getImm();
- NumBits = (FrameReg == ARM::SP) ? 8 : 5;
- Scale = 4;
- break;
- }
- default:
- llvm_unreachable("Unsupported addressing mode!");
- break;
- }
-
- Offset += InstrOffs * Scale;
- assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
-
- // Common case: small offset, fits into instruction.
- MachineOperand &ImmOp = MI.getOperand(ImmIdx);
- int ImmedOffset = Offset / Scale;
- unsigned Mask = (1 << NumBits) - 1;
- if ((unsigned)Offset <= Mask * Scale) {
- // Replace the FrameIndex with sp
- MI.getOperand(i).ChangeToRegister(FrameReg, false);
- ImmOp.ChangeToImmediate(ImmedOffset);
- return 0;
- }
-
- bool isThumSpillRestore = Opcode == ARM::tRestore || Opcode == ARM::tSpill;
- if (AddrMode == ARMII::AddrModeT1_s) {
- // Thumb tLDRspi, tSTRspi. These will change to instructions that use
- // a different base register.
- NumBits = 5;
- Mask = (1 << NumBits) - 1;
- }
- // If this is a thumb spill / restore, we will be using a constpool load to
- // materialize the offset.
- if (AddrMode == ARMII::AddrModeT1_s && isThumSpillRestore)
- ImmOp.ChangeToImmediate(0);
- else {
- // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
- ImmedOffset = ImmedOffset & Mask;
- ImmOp.ChangeToImmediate(ImmedOffset);
- Offset &= ~(Mask*Scale);
- }
- }
-
- // If we get here, the immediate doesn't fit into the instruction. We folded
- // as much as possible above, handle the rest, providing a register that is
- // SP+LargeImm.
- assert(Offset && "This code isn't needed if offset already handled!");
-
- // Remove predicate first.
- int PIdx = MI.findFirstPredOperandIdx();
- if (PIdx != -1)
- removeOperands(MI, PIdx);
-
- if (Desc.mayLoad()) {
- // Use the destination register to materialize sp + offset.
- unsigned TmpReg = MI.getOperand(0).getReg();
- bool UseRR = false;
- if (Opcode == ARM::tRestore) {
- if (FrameReg == ARM::SP)
- emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
- Offset, false, TII, *this, dl);
- else {
- emitLoadConstPool(MBB, II, dl, TmpReg, 0, Offset);
- UseRR = true;
- }
- } else {
- emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII,
- *this, dl);
- }
-
- MI.setDesc(TII.get(ARM::tLDR));
- MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
- if (UseRR)
- // Use [reg, reg] addrmode.
- MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
- else // tLDR has an extra register operand.
- MI.addOperand(MachineOperand::CreateReg(0, false));
- } else if (Desc.mayStore()) {
- VReg = MF.getRegInfo().createVirtualRegister(ARM::tGPRRegisterClass);
- assert (Value && "Frame index virtual allocated, but Value arg is NULL!");
- *Value = Offset;
- bool UseRR = false;
-
- if (Opcode == ARM::tSpill) {
- if (FrameReg == ARM::SP)
- emitThumbRegPlusImmInReg(MBB, II, VReg, FrameReg,
- Offset, false, TII, *this, dl);
- else {
- emitLoadConstPool(MBB, II, dl, VReg, 0, Offset);
- UseRR = true;
- }
- } else
- emitThumbRegPlusImmediate(MBB, II, VReg, FrameReg, Offset, TII,
- *this, dl);
- MI.setDesc(TII.get(ARM::tSTR));
- MI.getOperand(i).ChangeToRegister(VReg, false, false, true);
- if (UseRR) // Use [reg, reg] addrmode.
- MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
- else // tSTR has an extra register operand.
- MI.addOperand(MachineOperand::CreateReg(0, false));
- } else
- assert(false && "Unexpected opcode!");
-
- // Add predicate back if it's needed.
- if (MI.getDesc().isPredicable()) {
- MachineInstrBuilder MIB(&MI);
- AddDefaultPred(MIB);
- }
- return VReg;
-}
-
-void Thumb1RegisterInfo::emitPrologue(MachineFunction &MF) const {
- MachineBasicBlock &MBB = MF.front();
- MachineBasicBlock::iterator MBBI = MBB.begin();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
- unsigned NumBytes = MFI->getStackSize();
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- DebugLoc dl = (MBBI != MBB.end() ?
- MBBI->getDebugLoc() : DebugLoc::getUnknownLoc());
-
- // Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
- NumBytes = (NumBytes + 3) & ~3;
- MFI->setStackSize(NumBytes);
-
- // Determine the sizes of each callee-save spill areas and record which frame
- // belongs to which callee-save spill areas.
- unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
- int FramePtrSpillFI = 0;
-
- if (VARegSaveSize)
- emitSPUpdate(MBB, MBBI, TII, dl, *this, -VARegSaveSize);
-
- if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
- return;
- }
-
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned Reg = CSI[i].getReg();
- int FI = CSI[i].getFrameIdx();
- switch (Reg) {
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- case ARM::LR:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- AFI->addGPRCalleeSavedArea1Frame(FI);
- GPRCS1Size += 4;
- break;
- case ARM::R8:
- case ARM::R9:
- case ARM::R10:
- case ARM::R11:
- if (Reg == FramePtr)
- FramePtrSpillFI = FI;
- if (STI.isTargetDarwin()) {
- AFI->addGPRCalleeSavedArea2Frame(FI);
- GPRCS2Size += 4;
- } else {
- AFI->addGPRCalleeSavedArea1Frame(FI);
- GPRCS1Size += 4;
- }
- break;
- default:
- AFI->addDPRCalleeSavedAreaFrame(FI);
- DPRCSSize += 8;
- }
- }
-
- if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH) {
- ++MBBI;
- if (MBBI != MBB.end())
- dl = MBBI->getDebugLoc();
- }
-
- // Darwin ABI requires FP to point to the stack slot that contains the
- // previous FP.
- if (STI.isTargetDarwin() || hasFP(MF)) {
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr)
- .addFrameIndex(FramePtrSpillFI).addImm(0);
- }
-
- // Determine starting offsets of spill areas.
- unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
- unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
- unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
- AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) + NumBytes);
- AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
- AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
- AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
-
- NumBytes = DPRCSOffset;
- if (NumBytes) {
- // Insert it after all the callee-save spills.
- emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
- }
-
- if (STI.isTargetELF() && hasFP(MF)) {
- MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
- AFI->getFramePtrSpillOffset());
- }
-
- AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
- AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
- AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
-}
-
-static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
- for (unsigned i = 0; CSRegs[i]; ++i)
- if (Reg == CSRegs[i])
- return true;
- return false;
-}
-
-static bool isCSRestore(MachineInstr *MI, const unsigned *CSRegs) {
- if (MI->getOpcode() == ARM::tRestore &&
- MI->getOperand(1).isFI() &&
- isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs))
- return true;
- else if (MI->getOpcode() == ARM::tPOP) {
- // The first three operands are predicates and such. The last two are
- // imp-def and imp-use of SP. Check everything in between.
- for (int i = 3, e = MI->getNumOperands() - 2; i != e; ++i)
- if (!isCalleeSavedRegister(MI->getOperand(i).getReg(), CSRegs))
- return false;
- return true;
- }
- return false;
-}
-
-void Thumb1RegisterInfo::emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const {
- MachineBasicBlock::iterator MBBI = prior(MBB.end());
- assert((MBBI->getOpcode() == ARM::tBX_RET ||
- MBBI->getOpcode() == ARM::tPOP_RET) &&
- "Can only insert epilog into returning blocks");
- DebugLoc dl = MBBI->getDebugLoc();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
- unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
- int NumBytes = (int)MFI->getStackSize();
- const unsigned *CSRegs = getCalleeSavedRegs();
-
- if (!AFI->hasStackFrame()) {
- if (NumBytes != 0)
- emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
- } else {
- // Unwind MBBI to point to first LDR / VLDRD.
- if (MBBI != MBB.begin()) {
- do
- --MBBI;
- while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
- if (!isCSRestore(MBBI, CSRegs))
- ++MBBI;
- }
-
- // Move SP to start of FP callee save spill area.
- NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
- AFI->getGPRCalleeSavedArea2Size() +
- AFI->getDPRCalleeSavedAreaSize());
-
- if (hasFP(MF)) {
- NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
- // Reset SP based on frame pointer only if the stack frame extends beyond
- // frame pointer stack slot or target is ELF and the function has FP.
- if (NumBytes)
- emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes,
- TII, *this, dl);
- else
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
- .addReg(FramePtr);
- } else {
- if (MBBI->getOpcode() == ARM::tBX_RET &&
- &MBB.front() != MBBI &&
- prior(MBBI)->getOpcode() == ARM::tPOP) {
- MachineBasicBlock::iterator PMBBI = prior(MBBI);
- emitSPUpdate(MBB, PMBBI, TII, dl, *this, NumBytes);
- } else
- emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
- }
- }
-
- if (VARegSaveSize) {
- // Move back past the callee-saved register restoration
- while (MBBI != MBB.end() && isCSRestore(MBBI, CSRegs))
- ++MBBI;
- // Epilogue for vararg functions: pop LR to R3 and branch off it.
- AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP)))
- .addReg(0) // No write back.
- .addReg(ARM::R3, RegState::Define);
-
- emitSPUpdate(MBB, MBBI, TII, dl, *this, VARegSaveSize);
-
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX_RET_vararg))
- .addReg(ARM::R3, RegState::Kill);
- // erase the old tBX_RET instruction
- MBB.erase(MBBI);
- }
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb1RegisterInfo.h b/libclamav/c++/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
deleted file mode 100644
index 37ad388..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
+++ /dev/null
@@ -1,70 +0,0 @@
-//===- Thumb1RegisterInfo.h - Thumb-1 Register Information Impl -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Thumb-1 implementation of the TargetRegisterInfo
-// class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef THUMB1REGISTERINFO_H
-#define THUMB1REGISTERINFO_H
-
-#include "ARM.h"
-#include "ARMRegisterInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-
-namespace llvm {
- class ARMSubtarget;
- class ARMBaseInstrInfo;
- class Type;
-
-struct Thumb1RegisterInfo : public ARMBaseRegisterInfo {
-public:
- Thumb1RegisterInfo(const ARMBaseInstrInfo &tii, const ARMSubtarget &STI);
-
- /// emitLoadConstPool - Emits a load from constpool to materialize the
- /// specified immediate.
- void emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx, int Val,
- ARMCC::CondCodes Pred = ARMCC::AL,
- unsigned PredReg = 0) const;
-
- /// Code Generation virtual methods...
- const TargetRegisterClass *
- getPhysicalRegisterRegClass(unsigned Reg, EVT VT = MVT::Other) const;
-
- bool hasReservedCallFrame(MachineFunction &MF) const;
-
- void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
-
- // rewrite MI to access 'Offset' bytes from the FP. Return the offset that
- // could not be handled directly in MI.
- int rewriteFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
- unsigned FrameReg, int Offset,
- unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const;
-
- bool saveScavengerRegister(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- MachineBasicBlock::iterator &UseMI,
- const TargetRegisterClass *RC,
- unsigned Reg) const;
- unsigned eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, int *Value = NULL,
- RegScavenger *RS = NULL) const;
-
- void emitPrologue(MachineFunction &MF) const;
- void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
-};
-}
-
-#endif // THUMB1REGISTERINFO_H
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/libclamav/c++/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
deleted file mode 100644
index f5ba155..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-//===-- Thumb2ITBlockPass.cpp - Insert Thumb IT blocks ----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "thumb2-it"
-#include "ARM.h"
-#include "ARMMachineFunctionInfo.h"
-#include "Thumb2InstrInfo.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-STATISTIC(NumITs, "Number of IT blocks inserted");
-
-namespace {
- struct Thumb2ITBlockPass : public MachineFunctionPass {
- static char ID;
- Thumb2ITBlockPass() : MachineFunctionPass(&ID) {}
-
- const Thumb2InstrInfo *TII;
- ARMFunctionInfo *AFI;
-
- virtual bool runOnMachineFunction(MachineFunction &Fn);
-
- virtual const char *getPassName() const {
- return "Thumb IT blocks insertion pass";
- }
-
- private:
- bool InsertITBlocks(MachineBasicBlock &MBB);
- };
- char Thumb2ITBlockPass::ID = 0;
-}
-
-static ARMCC::CondCodes getPredicate(const MachineInstr *MI, unsigned &PredReg){
- unsigned Opc = MI->getOpcode();
- if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
- return ARMCC::AL;
- return llvm::getInstrPredicate(MI, PredReg);
-}
-
-bool Thumb2ITBlockPass::InsertITBlocks(MachineBasicBlock &MBB) {
- bool Modified = false;
-
- MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
- while (MBBI != E) {
- MachineInstr *MI = &*MBBI;
- DebugLoc dl = MI->getDebugLoc();
- unsigned PredReg = 0;
- ARMCC::CondCodes CC = getPredicate(MI, PredReg);
-
- if (CC == ARMCC::AL) {
- ++MBBI;
- continue;
- }
-
- // Insert an IT instruction.
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(ARM::t2IT))
- .addImm(CC);
- ++MBBI;
-
- // Finalize IT mask.
- ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
- unsigned Mask = 0, Pos = 3;
- // Branches, including tricky ones like LDM_RET, need to end an IT
- // block so check the instruction we just put in the block.
- while (MBBI != E && Pos &&
- (!MI->getDesc().isBranch() && !MI->getDesc().isReturn())) {
- MachineInstr *NMI = &*MBBI;
- MI = NMI;
- DebugLoc ndl = NMI->getDebugLoc();
- unsigned NPredReg = 0;
- ARMCC::CondCodes NCC = getPredicate(NMI, NPredReg);
- if (NCC == OCC) {
- Mask |= (1 << Pos);
- } else if (NCC != CC)
- break;
- --Pos;
- ++MBBI;
- }
- Mask |= (1 << Pos);
- MIB.addImm(Mask);
- Modified = true;
- ++NumITs;
- }
-
- return Modified;
-}
-
-bool Thumb2ITBlockPass::runOnMachineFunction(MachineFunction &Fn) {
- const TargetMachine &TM = Fn.getTarget();
- AFI = Fn.getInfo<ARMFunctionInfo>();
- TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo());
-
- if (!AFI->isThumbFunction())
- return false;
-
- bool Modified = false;
- for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
- ++MFI) {
- MachineBasicBlock &MBB = *MFI;
- Modified |= InsertITBlocks(MBB);
- }
-
- return Modified;
-}
-
-/// createThumb2ITBlockPass - Returns an instance of the Thumb2 IT blocks
-/// insertion pass.
-FunctionPass *llvm::createThumb2ITBlockPass() {
- return new Thumb2ITBlockPass();
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
deleted file mode 100644
index 20f13f1..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ /dev/null
@@ -1,482 +0,0 @@
-//===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Thumb-2 implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "Thumb2InstrInfo.h"
-#include "ARM.h"
-#include "ARMConstantPoolValue.h"
-#include "ARMAddressingModes.h"
-#include "ARMGenInstrInfo.inc"
-#include "ARMMachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineMemOperand.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/ADT/SmallVector.h"
-#include "Thumb2InstrInfo.h"
-
-using namespace llvm;
-
-Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI(*this, STI) {
-}
-
-unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const {
- // FIXME
- return 0;
-}
-
-bool
-Thumb2InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (I != MBB.end()) DL = I->getDebugLoc();
-
- if (DestRC == ARM::GPRRegisterClass &&
- SrcRC == ARM::GPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVgpr2gpr), DestReg).addReg(SrcReg);
- return true;
- } else if (DestRC == ARM::GPRRegisterClass &&
- SrcRC == ARM::tGPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVtgpr2gpr), DestReg).addReg(SrcReg);
- return true;
- } else if (DestRC == ARM::tGPRRegisterClass &&
- SrcRC == ARM::GPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVgpr2tgpr), DestReg).addReg(SrcReg);
- return true;
- }
-
- // Handle SPR, DPR, and QPR copies.
- return ARMBaseInstrInfo::copyRegToReg(MBB, I, DestReg, SrcReg, DestRC, SrcRC);
-}
-
-void Thumb2InstrInfo::
-storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned SrcReg, bool isKill, int FI,
- const TargetRegisterClass *RC) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (I != MBB.end()) DL = I->getDebugLoc();
-
- if (RC == ARM::GPRRegisterClass) {
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOStore, 0,
- MFI.getObjectSize(FI),
- MFI.getObjectAlignment(FI));
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2STRi12))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- return;
- }
-
- ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC);
-}
-
-void Thumb2InstrInfo::
-loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, int FI,
- const TargetRegisterClass *RC) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (I != MBB.end()) DL = I->getDebugLoc();
-
- if (RC == ARM::GPRRegisterClass) {
- MachineFunction &MF = *MBB.getParent();
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
- MachineMemOperand::MOLoad, 0,
- MFI.getObjectSize(FI),
- MFI.getObjectAlignment(FI));
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg)
- .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- return;
- }
-
- ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC);
-}
-
-void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, DebugLoc dl,
- unsigned DestReg, unsigned BaseReg, int NumBytes,
- ARMCC::CondCodes Pred, unsigned PredReg,
- const ARMBaseInstrInfo &TII) {
- bool isSub = NumBytes < 0;
- if (isSub) NumBytes = -NumBytes;
-
- // If profitable, use a movw or movt to materialize the offset.
- // FIXME: Use the scavenger to grab a scratch register.
- if (DestReg != ARM::SP && DestReg != BaseReg &&
- NumBytes >= 4096 &&
- ARM_AM::getT2SOImmVal(NumBytes) == -1) {
- bool Fits = false;
- if (NumBytes < 65536) {
- // Use a movw to materialize the 16-bit constant.
- BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg)
- .addImm(NumBytes)
- .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
- Fits = true;
- } else if ((NumBytes & 0xffff) == 0) {
- // Use a movt to materialize the 32-bit constant.
- BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg)
- .addReg(DestReg)
- .addImm(NumBytes >> 16)
- .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
- Fits = true;
- }
-
- if (Fits) {
- if (isSub) {
- BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg)
- .addReg(BaseReg, RegState::Kill)
- .addReg(DestReg, RegState::Kill)
- .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
- } else {
- BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg)
- .addReg(DestReg, RegState::Kill)
- .addReg(BaseReg, RegState::Kill)
- .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
- }
- return;
- }
- }
-
- while (NumBytes) {
- unsigned ThisVal = NumBytes;
- unsigned Opc = 0;
- if (DestReg == ARM::SP && BaseReg != ARM::SP) {
- // mov sp, rn. Note t2MOVr cannot be used.
- BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr),DestReg).addReg(BaseReg);
- BaseReg = ARM::SP;
- continue;
- }
-
- if (BaseReg == ARM::SP) {
- // sub sp, sp, #imm7
- if (DestReg == ARM::SP && (ThisVal < ((1 << 7)-1) * 4)) {
- assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?");
- Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
- // FIXME: Fix Thumb1 immediate encoding.
- BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
- .addReg(BaseReg).addImm(ThisVal/4);
- NumBytes = 0;
- continue;
- }
-
- // sub rd, sp, so_imm
- Opc = isSub ? ARM::t2SUBrSPi : ARM::t2ADDrSPi;
- if (ARM_AM::getT2SOImmVal(NumBytes) != -1) {
- NumBytes = 0;
- } else {
- // FIXME: Move this to ARMAddressingModes.h?
- unsigned RotAmt = CountLeadingZeros_32(ThisVal);
- ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
- NumBytes &= ~ThisVal;
- assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
- "Bit extraction didn't work?");
- }
- } else {
- assert(DestReg != ARM::SP && BaseReg != ARM::SP);
- Opc = isSub ? ARM::t2SUBri : ARM::t2ADDri;
- if (ARM_AM::getT2SOImmVal(NumBytes) != -1) {
- NumBytes = 0;
- } else if (ThisVal < 4096) {
- Opc = isSub ? ARM::t2SUBri12 : ARM::t2ADDri12;
- NumBytes = 0;
- } else {
- // FIXME: Move this to ARMAddressingModes.h?
- unsigned RotAmt = CountLeadingZeros_32(ThisVal);
- ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
- NumBytes &= ~ThisVal;
- assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
- "Bit extraction didn't work?");
- }
- }
-
- // Build the new ADD / SUB.
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
- .addReg(BaseReg, RegState::Kill)
- .addImm(ThisVal)));
-
- BaseReg = DestReg;
- }
-}
-
-static unsigned
-negativeOffsetOpcode(unsigned opcode)
-{
- switch (opcode) {
- case ARM::t2LDRi12: return ARM::t2LDRi8;
- case ARM::t2LDRHi12: return ARM::t2LDRHi8;
- case ARM::t2LDRBi12: return ARM::t2LDRBi8;
- case ARM::t2LDRSHi12: return ARM::t2LDRSHi8;
- case ARM::t2LDRSBi12: return ARM::t2LDRSBi8;
- case ARM::t2STRi12: return ARM::t2STRi8;
- case ARM::t2STRBi12: return ARM::t2STRBi8;
- case ARM::t2STRHi12: return ARM::t2STRHi8;
-
- case ARM::t2LDRi8:
- case ARM::t2LDRHi8:
- case ARM::t2LDRBi8:
- case ARM::t2LDRSHi8:
- case ARM::t2LDRSBi8:
- case ARM::t2STRi8:
- case ARM::t2STRBi8:
- case ARM::t2STRHi8:
- return opcode;
-
- default:
- break;
- }
-
- return 0;
-}
-
-static unsigned
-positiveOffsetOpcode(unsigned opcode)
-{
- switch (opcode) {
- case ARM::t2LDRi8: return ARM::t2LDRi12;
- case ARM::t2LDRHi8: return ARM::t2LDRHi12;
- case ARM::t2LDRBi8: return ARM::t2LDRBi12;
- case ARM::t2LDRSHi8: return ARM::t2LDRSHi12;
- case ARM::t2LDRSBi8: return ARM::t2LDRSBi12;
- case ARM::t2STRi8: return ARM::t2STRi12;
- case ARM::t2STRBi8: return ARM::t2STRBi12;
- case ARM::t2STRHi8: return ARM::t2STRHi12;
-
- case ARM::t2LDRi12:
- case ARM::t2LDRHi12:
- case ARM::t2LDRBi12:
- case ARM::t2LDRSHi12:
- case ARM::t2LDRSBi12:
- case ARM::t2STRi12:
- case ARM::t2STRBi12:
- case ARM::t2STRHi12:
- return opcode;
-
- default:
- break;
- }
-
- return 0;
-}
-
-static unsigned
-immediateOffsetOpcode(unsigned opcode)
-{
- switch (opcode) {
- case ARM::t2LDRs: return ARM::t2LDRi12;
- case ARM::t2LDRHs: return ARM::t2LDRHi12;
- case ARM::t2LDRBs: return ARM::t2LDRBi12;
- case ARM::t2LDRSHs: return ARM::t2LDRSHi12;
- case ARM::t2LDRSBs: return ARM::t2LDRSBi12;
- case ARM::t2STRs: return ARM::t2STRi12;
- case ARM::t2STRBs: return ARM::t2STRBi12;
- case ARM::t2STRHs: return ARM::t2STRHi12;
-
- case ARM::t2LDRi12:
- case ARM::t2LDRHi12:
- case ARM::t2LDRBi12:
- case ARM::t2LDRSHi12:
- case ARM::t2LDRSBi12:
- case ARM::t2STRi12:
- case ARM::t2STRBi12:
- case ARM::t2STRHi12:
- case ARM::t2LDRi8:
- case ARM::t2LDRHi8:
- case ARM::t2LDRBi8:
- case ARM::t2LDRSHi8:
- case ARM::t2LDRSBi8:
- case ARM::t2STRi8:
- case ARM::t2STRBi8:
- case ARM::t2STRHi8:
- return opcode;
-
- default:
- break;
- }
-
- return 0;
-}
-
-bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
- unsigned FrameReg, int &Offset,
- const ARMBaseInstrInfo &TII) {
- unsigned Opcode = MI.getOpcode();
- const TargetInstrDesc &Desc = MI.getDesc();
- unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
- bool isSub = false;
-
- // Memory operands in inline assembly always use AddrModeT2_i12.
- if (Opcode == ARM::INLINEASM)
- AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
-
- if (Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
- Offset += MI.getOperand(FrameRegIdx+1).getImm();
-
- bool isSP = FrameReg == ARM::SP;
- unsigned PredReg;
- if (Offset == 0 && getInstrPredicate(&MI, PredReg) == ARMCC::AL) {
- // Turn it into a move.
- MI.setDesc(TII.get(ARM::tMOVgpr2gpr));
- MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
- // Remove offset and remaining explicit predicate operands.
- do MI.RemoveOperand(FrameRegIdx+1);
- while (MI.getNumOperands() > FrameRegIdx+1 &&
- (!MI.getOperand(FrameRegIdx+1).isReg() ||
- !MI.getOperand(FrameRegIdx+1).isImm()));
- return true;
- }
-
- if (Offset < 0) {
- Offset = -Offset;
- isSub = true;
- MI.setDesc(TII.get(isSP ? ARM::t2SUBrSPi : ARM::t2SUBri));
- } else {
- MI.setDesc(TII.get(isSP ? ARM::t2ADDrSPi : ARM::t2ADDri));
- }
-
- // Common case: small offset, fits into instruction.
- if (ARM_AM::getT2SOImmVal(Offset) != -1) {
- MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
- MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
- Offset = 0;
- return true;
- }
- // Another common case: imm12.
- if (Offset < 4096) {
- unsigned NewOpc = isSP
- ? (isSub ? ARM::t2SUBrSPi12 : ARM::t2ADDrSPi12)
- : (isSub ? ARM::t2SUBri12 : ARM::t2ADDri12);
- MI.setDesc(TII.get(NewOpc));
- MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
- MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
- Offset = 0;
- return true;
- }
-
- // Otherwise, extract 8 adjacent bits from the immediate into this
- // t2ADDri/t2SUBri.
- unsigned RotAmt = CountLeadingZeros_32(Offset);
- unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt);
-
- // We will handle these bits from offset, clear them.
- Offset &= ~ThisImmVal;
-
- assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 &&
- "Bit extraction didn't work?");
- MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
- } else {
-
- // AddrMode4 and AddrMode6 cannot handle any offset.
- if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
- return false;
-
- // AddrModeT2_so cannot handle any offset. If there is no offset
- // register then we change to an immediate version.
- unsigned NewOpc = Opcode;
- if (AddrMode == ARMII::AddrModeT2_so) {
- unsigned OffsetReg = MI.getOperand(FrameRegIdx+1).getReg();
- if (OffsetReg != 0) {
- MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
- return Offset == 0;
- }
-
- MI.RemoveOperand(FrameRegIdx+1);
- MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
- NewOpc = immediateOffsetOpcode(Opcode);
- AddrMode = ARMII::AddrModeT2_i12;
- }
-
- unsigned NumBits = 0;
- unsigned Scale = 1;
- if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) {
- // i8 supports only negative, and i12 supports only positive, so
- // based on Offset sign convert Opcode to the appropriate
- // instruction
- Offset += MI.getOperand(FrameRegIdx+1).getImm();
- if (Offset < 0) {
- NewOpc = negativeOffsetOpcode(Opcode);
- NumBits = 8;
- isSub = true;
- Offset = -Offset;
- } else {
- NewOpc = positiveOffsetOpcode(Opcode);
- NumBits = 12;
- }
- } else if (AddrMode == ARMII::AddrMode5) {
- // VFP address mode.
- const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
- int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
- if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
- InstrOffs *= -1;
- NumBits = 8;
- Scale = 4;
- Offset += InstrOffs * 4;
- assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
- if (Offset < 0) {
- Offset = -Offset;
- isSub = true;
- }
- } else {
- llvm_unreachable("Unsupported addressing mode!");
- }
-
- if (NewOpc != Opcode)
- MI.setDesc(TII.get(NewOpc));
-
- MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1);
-
- // Attempt to fold address computation
- // Common case: small offset, fits into instruction.
- int ImmedOffset = Offset / Scale;
- unsigned Mask = (1 << NumBits) - 1;
- if ((unsigned)Offset <= Mask * Scale) {
- // Replace the FrameIndex with fp/sp
- MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
- if (isSub) {
- if (AddrMode == ARMII::AddrMode5)
- // FIXME: Not consistent.
- ImmedOffset |= 1 << NumBits;
- else
- ImmedOffset = -ImmedOffset;
- }
- ImmOp.ChangeToImmediate(ImmedOffset);
- Offset = 0;
- return true;
- }
-
- // Otherwise, offset doesn't fit. Pull in what we can to simplify
- ImmedOffset = ImmedOffset & Mask;
- if (isSub) {
- if (AddrMode == ARMII::AddrMode5)
- // FIXME: Not consistent.
- ImmedOffset |= 1 << NumBits;
- else {
- ImmedOffset = -ImmedOffset;
- if (ImmedOffset == 0)
- // Change the opcode back if the encoded offset is zero.
- MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc)));
- }
- }
- ImmOp.ChangeToImmediate(ImmedOffset);
- Offset &= ~(Mask*Scale);
- }
-
- Offset = (isSub) ? -Offset : Offset;
- return Offset == 0;
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/libclamav/c++/llvm/lib/Target/ARM/Thumb2InstrInfo.h
deleted file mode 100644
index a0f89a6..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ /dev/null
@@ -1,58 +0,0 @@
-//===- Thumb2InstrInfo.h - Thumb-2 Instruction Information ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Thumb-2 implementation of the TargetInstrInfo class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef THUMB2INSTRUCTIONINFO_H
-#define THUMB2INSTRUCTIONINFO_H
-
-#include "llvm/Target/TargetInstrInfo.h"
-#include "ARM.h"
-#include "ARMInstrInfo.h"
-#include "Thumb2RegisterInfo.h"
-
-namespace llvm {
- class ARMSubtarget;
-
-class Thumb2InstrInfo : public ARMBaseInstrInfo {
- Thumb2RegisterInfo RI;
-public:
- explicit Thumb2InstrInfo(const ARMSubtarget &STI);
-
- // Return the non-pre/post incrementing version of 'Opc'. Return 0
- // if there is not such an opcode.
- unsigned getUnindexedOpcode(unsigned Opc) const;
-
- bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const;
-
- void storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC) const;
-
- void loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC) const;
-
- /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
- /// such, whenever a client has an instance of instruction info, it should
- /// always be able to get register info as well (through this method).
- ///
- const Thumb2RegisterInfo &getRegisterInfo() const { return RI; }
-};
-}
-
-#endif // THUMB2INSTRUCTIONINFO_H
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp b/libclamav/c++/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
deleted file mode 100644
index f24d3e2..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb2RegisterInfo.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-//===- Thumb2RegisterInfo.cpp - Thumb-2 Register Information ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Thumb-2 implementation of the TargetRegisterInfo
-// class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMBaseInstrInfo.h"
-#include "ARMMachineFunctionInfo.h"
-#include "ARMSubtarget.h"
-#include "Thumb2InstrInfo.h"
-#include "Thumb2RegisterInfo.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineLocation.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetFrameInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/ErrorHandling.h"
-using namespace llvm;
-
-Thumb2RegisterInfo::Thumb2RegisterInfo(const ARMBaseInstrInfo &tii,
- const ARMSubtarget &sti)
- : ARMBaseRegisterInfo(tii, sti) {
-}
-
-/// emitLoadConstPool - Emits a load from constpool to materialize the
-/// specified immediate.
-void Thumb2RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx,
- int Val,
- ARMCC::CondCodes Pred,
- unsigned PredReg) const {
- MachineFunction &MF = *MBB.getParent();
- MachineConstantPool *ConstantPool = MF.getConstantPool();
- Constant *C = ConstantInt::get(
- Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
- unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
-
- BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
- .addReg(DestReg, getDefRegState(true), SubIdx)
- .addConstantPoolIndex(Idx).addImm((int64_t)ARMCC::AL).addReg(0);
-}
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb2RegisterInfo.h b/libclamav/c++/llvm/lib/Target/ARM/Thumb2RegisterInfo.h
deleted file mode 100644
index b3cf2e5..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb2RegisterInfo.h
+++ /dev/null
@@ -1,42 +0,0 @@
-//===- Thumb2RegisterInfo.h - Thumb-2 Register Information Impl -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the Thumb-2 implementation of the TargetRegisterInfo
-// class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef THUMB2REGISTERINFO_H
-#define THUMB2REGISTERINFO_H
-
-#include "ARM.h"
-#include "ARMRegisterInfo.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-
-namespace llvm {
- class ARMSubtarget;
- class ARMBaseInstrInfo;
- class Type;
-
-struct Thumb2RegisterInfo : public ARMBaseRegisterInfo {
-public:
- Thumb2RegisterInfo(const ARMBaseInstrInfo &tii, const ARMSubtarget &STI);
-
- /// emitLoadConstPool - Emits a load from constpool to materialize the
- /// specified immediate.
- void emitLoadConstPool(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- DebugLoc dl,
- unsigned DestReg, unsigned SubIdx, int Val,
- ARMCC::CondCodes Pred = ARMCC::AL,
- unsigned PredReg = 0) const;
-};
-}
-
-#endif // THUMB2REGISTERINFO_H
diff --git a/libclamav/c++/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/libclamav/c++/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
deleted file mode 100644
index 5086eff..0000000
--- a/libclamav/c++/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ /dev/null
@@ -1,709 +0,0 @@
-//===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "t2-reduce-size"
-#include "ARM.h"
-#include "ARMAddressingModes.h"
-#include "ARMBaseRegisterInfo.h"
-#include "ARMBaseInstrInfo.h"
-#include "Thumb2InstrInfo.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones");
-STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones");
-STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones");
-
-static cl::opt<int> ReduceLimit("t2-reduce-limit",
- cl::init(-1), cl::Hidden);
-static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2",
- cl::init(-1), cl::Hidden);
-static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3",
- cl::init(-1), cl::Hidden);
-
-namespace {
- /// ReduceTable - A static table with information on mapping from wide
- /// opcodes to narrow
- struct ReduceEntry {
- unsigned WideOpc; // Wide opcode
- unsigned NarrowOpc1; // Narrow opcode to transform to
- unsigned NarrowOpc2; // Narrow opcode when it's two-address
- uint8_t Imm1Limit; // Limit of immediate field (bits)
- uint8_t Imm2Limit; // Limit of immediate field when it's two-address
- unsigned LowRegs1 : 1; // Only possible if low-registers are used
- unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr)
- unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa.
- // 1 - No cc field.
- // 2 - Always set CPSR.
- unsigned PredCC2 : 2;
- unsigned Special : 1; // Needs to be dealt with specially
- };
-
- static const ReduceEntry ReduceTable[] = {
- // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C, S
- { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0 },
- { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0 },
- { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0 },
- // Note: immediate scale is 4.
- { ARM::t2ADDrSPi,ARM::tADDrSPi,0, 8, 0, 1, 0, 1,0, 0 },
- { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 1 },
- { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 1 },
- { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 0 },
- { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 0 },
- { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 0 },
- { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 0 },
- //FIXME: Disable CMN, as CCodes are backwards from compare expectations
- //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0 },
- { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0 },
- { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0 },
- { ARM::t2CMPzri,ARM::tCMPzi8, 0, 8, 0, 1, 0, 2,0, 0 },
- { ARM::t2CMPzrr,ARM::tCMPzhir,0, 0, 0, 0, 0, 2,0, 0 },
- { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 0 },
- // FIXME: adr.n immediate offset must be multiple of 4.
- //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0 },
- { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 0 },
- { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 0 },
- { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 0 },
- { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 0 },
- { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 0 },
- { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1 },
- // FIXME: Do we need the 16-bit 'S' variant?
- { ARM::t2MOVr,ARM::tMOVgpr2gpr,0, 0, 0, 0, 0, 1,0, 0 },
- { ARM::t2MOVCCr,0, ARM::tMOVCCr, 0, 0, 0, 0, 0,1, 0 },
- { ARM::t2MOVCCi,0, ARM::tMOVCCi, 0, 8, 0, 1, 0,1, 0 },
- { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 0 },
- { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0 },
- { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 0 },
- { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0 },
- { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0 },
- { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0 },
- { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 0 },
- { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 1 },
- { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0 },
- { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0 },
- { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0 },
- { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0 },
- { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0 },
- { ARM::t2SXTBr, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0 },
- { ARM::t2SXTHr, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0 },
- { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0 },
- { ARM::t2UXTBr, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0 },
- { ARM::t2UXTHr, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0 },
-
- // FIXME: Clean this up after splitting each Thumb load / store opcode
- // into multiple ones.
- { ARM::t2LDRi12,ARM::tLDR, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 1 },
- { ARM::t2LDRs, ARM::tLDR, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRBi12,ARM::tLDRB, 0, 5, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRBs, ARM::tLDRB, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRHi12,ARM::tLDRH, 0, 5, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRHs, ARM::tLDRH, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRi12,ARM::tSTR, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 1 },
- { ARM::t2STRs, ARM::tSTR, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRBi12,ARM::tSTRB, 0, 5, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRBs, ARM::tSTRB, 0, 0, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRHi12,ARM::tSTRH, 0, 5, 0, 1, 0, 0,0, 1 },
- { ARM::t2STRHs, ARM::tSTRH, 0, 0, 0, 1, 0, 0,0, 1 },
-
- { ARM::t2LDM_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 1 },
- { ARM::t2LDM, ARM::tLDM, ARM::tPOP, 0, 0, 1, 1, 1,1, 1 },
- { ARM::t2STM, ARM::tSTM, ARM::tPUSH, 0, 0, 1, 1, 1,1, 1 },
- };
-
- class Thumb2SizeReduce : public MachineFunctionPass {
- public:
- static char ID;
- Thumb2SizeReduce();
-
- const Thumb2InstrInfo *TII;
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const {
- return "Thumb2 instruction size reduction pass";
- }
-
- private:
- /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
- DenseMap<unsigned, unsigned> ReduceOpcodeMap;
-
- bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
- bool is2Addr, ARMCC::CondCodes Pred,
- bool LiveCPSR, bool &HasCC, bool &CCDead);
-
- bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
- const ReduceEntry &Entry);
-
- bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
- const ReduceEntry &Entry, bool LiveCPSR);
-
- /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
- /// instruction.
- bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
- const ReduceEntry &Entry,
- bool LiveCPSR);
-
- /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
- /// non-two-address instruction.
- bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
- const ReduceEntry &Entry,
- bool LiveCPSR);
-
- /// ReduceMBB - Reduce width of instructions in the specified basic block.
- bool ReduceMBB(MachineBasicBlock &MBB);
- };
- char Thumb2SizeReduce::ID = 0;
-}
-
-Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(&ID) {
- for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
- unsigned FromOpc = ReduceTable[i].WideOpc;
- if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
- assert(false && "Duplicated entries?");
- }
-}
-
-static bool HasImplicitCPSRDef(const TargetInstrDesc &TID) {
- for (const unsigned *Regs = TID.ImplicitDefs; *Regs; ++Regs)
- if (*Regs == ARM::CPSR)
- return true;
- return false;
-}
-
-bool
-Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
- bool is2Addr, ARMCC::CondCodes Pred,
- bool LiveCPSR, bool &HasCC, bool &CCDead) {
- if ((is2Addr && Entry.PredCC2 == 0) ||
- (!is2Addr && Entry.PredCC1 == 0)) {
- if (Pred == ARMCC::AL) {
- // Not predicated, must set CPSR.
- if (!HasCC) {
- // Original instruction was not setting CPSR, but CPSR is not
- // currently live anyway. It's ok to set it. The CPSR def is
- // dead though.
- if (!LiveCPSR) {
- HasCC = true;
- CCDead = true;
- return true;
- }
- return false;
- }
- } else {
- // Predicated, must not set CPSR.
- if (HasCC)
- return false;
- }
- } else if ((is2Addr && Entry.PredCC2 == 2) ||
- (!is2Addr && Entry.PredCC1 == 2)) {
- /// Old opcode has an optional def of CPSR.
- if (HasCC)
- return true;
- // If both old opcode does not implicit CPSR def, then it's not ok since
- // these new opcodes CPSR def is not meant to be thrown away. e.g. CMP.
- if (!HasImplicitCPSRDef(MI->getDesc()))
- return false;
- HasCC = true;
- } else {
- // 16-bit instruction does not set CPSR.
- if (HasCC)
- return false;
- }
-
- return true;
-}
-
-static bool VerifyLowRegs(MachineInstr *MI) {
- unsigned Opc = MI->getOpcode();
- bool isPCOk = (Opc == ARM::t2LDM_RET) || (Opc == ARM::t2LDM);
- bool isLROk = (Opc == ARM::t2STM);
- bool isSPOk = isPCOk || isLROk || (Opc == ARM::t2ADDrSPi);
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || MO.isImplicit())
- continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0 || Reg == ARM::CPSR)
- continue;
- if (isPCOk && Reg == ARM::PC)
- continue;
- if (isLROk && Reg == ARM::LR)
- continue;
- if (Reg == ARM::SP) {
- if (isSPOk)
- continue;
- if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12))
- // Special case for these ldr / str with sp as base register.
- continue;
- }
- if (!isARMLowRegister(Reg))
- return false;
- }
- return true;
-}
-
-bool
-Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
- const ReduceEntry &Entry) {
- if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt))
- return false;
-
- unsigned Scale = 1;
- bool HasImmOffset = false;
- bool HasShift = false;
- bool HasOffReg = true;
- bool isLdStMul = false;
- unsigned Opc = Entry.NarrowOpc1;
- unsigned OpNum = 3; // First 'rest' of operands.
- uint8_t ImmLimit = Entry.Imm1Limit;
- switch (Entry.WideOpc) {
- default:
- llvm_unreachable("Unexpected Thumb2 load / store opcode!");
- case ARM::t2LDRi12:
- case ARM::t2STRi12: {
- unsigned BaseReg = MI->getOperand(1).getReg();
- if (BaseReg == ARM::SP) {
- Opc = Entry.NarrowOpc2;
- ImmLimit = Entry.Imm2Limit;
- HasOffReg = false;
- }
- Scale = 4;
- HasImmOffset = true;
- break;
- }
- case ARM::t2LDRBi12:
- case ARM::t2STRBi12:
- HasImmOffset = true;
- break;
- case ARM::t2LDRHi12:
- case ARM::t2STRHi12:
- Scale = 2;
- HasImmOffset = true;
- break;
- case ARM::t2LDRs:
- case ARM::t2LDRBs:
- case ARM::t2LDRHs:
- case ARM::t2LDRSBs:
- case ARM::t2LDRSHs:
- case ARM::t2STRs:
- case ARM::t2STRBs:
- case ARM::t2STRHs:
- HasShift = true;
- OpNum = 4;
- break;
- case ARM::t2LDM_RET:
- case ARM::t2LDM:
- case ARM::t2STM: {
- OpNum = 0;
- unsigned BaseReg = MI->getOperand(0).getReg();
- unsigned Mode = MI->getOperand(1).getImm();
- if (BaseReg == ARM::SP && ARM_AM::getAM4WBFlag(Mode)) {
- Opc = Entry.NarrowOpc2;
- OpNum = 2;
- } else if (Entry.WideOpc == ARM::t2LDM_RET ||
- !isARMLowRegister(BaseReg) ||
- !ARM_AM::getAM4WBFlag(Mode) ||
- ARM_AM::getAM4SubMode(Mode) != ARM_AM::ia) {
- return false;
- }
- isLdStMul = true;
- break;
- }
- }
-
- unsigned OffsetReg = 0;
- bool OffsetKill = false;
- if (HasShift) {
- OffsetReg = MI->getOperand(2).getReg();
- OffsetKill = MI->getOperand(2).isKill();
- if (MI->getOperand(3).getImm())
- // Thumb1 addressing mode doesn't support shift.
- return false;
- }
-
- unsigned OffsetImm = 0;
- if (HasImmOffset) {
- OffsetImm = MI->getOperand(2).getImm();
- unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
- if ((OffsetImm & (Scale-1)) || OffsetImm > MaxOffset)
- // Make sure the immediate field fits.
- return false;
- }
-
- // Add the 16-bit load / store instruction.
- // FIXME: Thumb1 addressing mode encode both immediate and register offset.
- DebugLoc dl = MI->getDebugLoc();
- MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, TII->get(Opc));
- if (!isLdStMul) {
- MIB.addOperand(MI->getOperand(0)).addOperand(MI->getOperand(1));
- if (Opc != ARM::tLDRSB && Opc != ARM::tLDRSH) {
- // tLDRSB and tLDRSH do not have an immediate offset field. On the other
- // hand, it must have an offset register.
- // FIXME: Remove this special case.
- MIB.addImm(OffsetImm/Scale);
- }
- assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!");
-
- if (HasOffReg)
- MIB.addReg(OffsetReg, getKillRegState(OffsetKill));
- }
-
- // Transfer the rest of operands.
- for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum)
- MIB.addOperand(MI->getOperand(OpNum));
-
- // Transfer memoperands.
- (*MIB).setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
-
- DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
-
- MBB.erase(MI);
- ++NumLdSts;
- return true;
-}
-
-bool
-Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
- const ReduceEntry &Entry,
- bool LiveCPSR) {
- if (Entry.LowRegs1 && !VerifyLowRegs(MI))
- return false;
-
- const TargetInstrDesc &TID = MI->getDesc();
- if (TID.mayLoad() || TID.mayStore())
- return ReduceLoadStore(MBB, MI, Entry);
-
- unsigned Opc = MI->getOpcode();
- switch (Opc) {
- default: break;
- case ARM::t2ADDSri:
- case ARM::t2ADDSrr: {
- unsigned PredReg = 0;
- if (getInstrPredicate(MI, PredReg) == ARMCC::AL) {
- switch (Opc) {
- default: break;
- case ARM::t2ADDSri: {
- if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR))
- return true;
- // fallthrough
- }
- case ARM::t2ADDSrr:
- return ReduceToNarrow(MBB, MI, Entry, LiveCPSR);
- }
- }
- break;
- }
- case ARM::t2RSBri:
- case ARM::t2RSBSri:
- if (MI->getOperand(2).getImm() == 0)
- return ReduceToNarrow(MBB, MI, Entry, LiveCPSR);
- break;
- case ARM::t2MOVi16:
- // Can convert only 'pure' immediate operands, not immediates obtained as
- // globals' addresses.
- if (MI->getOperand(1).isImm())
- return ReduceToNarrow(MBB, MI, Entry, LiveCPSR);
- break;
- }
- return false;
-}
-
-bool
-Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
- const ReduceEntry &Entry,
- bool LiveCPSR) {
-
- if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
- return false;
-
- const TargetInstrDesc &TID = MI->getDesc();
- unsigned Reg0 = MI->getOperand(0).getReg();
- unsigned Reg1 = MI->getOperand(1).getReg();
- if (Reg0 != Reg1)
- return false;
- if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
- return false;
- if (Entry.Imm2Limit) {
- unsigned Imm = MI->getOperand(2).getImm();
- unsigned Limit = (1 << Entry.Imm2Limit) - 1;
- if (Imm > Limit)
- return false;
- } else {
- unsigned Reg2 = MI->getOperand(2).getReg();
- if (Entry.LowRegs2 && !isARMLowRegister(Reg2))
- return false;
- }
-
- // Check if it's possible / necessary to transfer the predicate.
- const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc2);
- unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
- bool SkipPred = false;
- if (Pred != ARMCC::AL) {
- if (!NewTID.isPredicable())
- // Can't transfer predicate, fail.
- return false;
- } else {
- SkipPred = !NewTID.isPredicable();
- }
-
- bool HasCC = false;
- bool CCDead = false;
- if (TID.hasOptionalDef()) {
- unsigned NumOps = TID.getNumOperands();
- HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
- if (HasCC && MI->getOperand(NumOps-1).isDead())
- CCDead = true;
- }
- if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead))
- return false;
-
- // Add the 16-bit instruction.
- DebugLoc dl = MI->getDebugLoc();
- MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewTID);
- MIB.addOperand(MI->getOperand(0));
- if (NewTID.hasOptionalDef()) {
- if (HasCC)
- AddDefaultT1CC(MIB, CCDead);
- else
- AddNoT1CC(MIB);
- }
-
- // Transfer the rest of operands.
- unsigned NumOps = TID.getNumOperands();
- for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
- if (i < NumOps && TID.OpInfo[i].isOptionalDef())
- continue;
- if (SkipPred && TID.OpInfo[i].isPredicate())
- continue;
- MIB.addOperand(MI->getOperand(i));
- }
-
- DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
-
- MBB.erase(MI);
- ++Num2Addrs;
- return true;
-}
-
-bool
-Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
- const ReduceEntry &Entry,
- bool LiveCPSR) {
- if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
- return false;
-
- unsigned Limit = ~0U;
- unsigned Scale = (Entry.WideOpc == ARM::t2ADDrSPi) ? 4 : 1;
- if (Entry.Imm1Limit)
- Limit = ((1 << Entry.Imm1Limit) - 1) * Scale;
-
- const TargetInstrDesc &TID = MI->getDesc();
- for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
- if (TID.OpInfo[i].isPredicate())
- continue;
- const MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg()) {
- unsigned Reg = MO.getReg();
- if (!Reg || Reg == ARM::CPSR)
- continue;
- if (Entry.WideOpc == ARM::t2ADDrSPi && Reg == ARM::SP)
- continue;
- if (Entry.LowRegs1 && !isARMLowRegister(Reg))
- return false;
- } else if (MO.isImm() &&
- !TID.OpInfo[i].isPredicate()) {
- if (((unsigned)MO.getImm()) > Limit || (MO.getImm() & (Scale-1)) != 0)
- return false;
- }
- }
-
- // Check if it's possible / necessary to transfer the predicate.
- const TargetInstrDesc &NewTID = TII->get(Entry.NarrowOpc1);
- unsigned PredReg = 0;
- ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
- bool SkipPred = false;
- if (Pred != ARMCC::AL) {
- if (!NewTID.isPredicable())
- // Can't transfer predicate, fail.
- return false;
- } else {
- SkipPred = !NewTID.isPredicable();
- }
-
- bool HasCC = false;
- bool CCDead = false;
- if (TID.hasOptionalDef()) {
- unsigned NumOps = TID.getNumOperands();
- HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
- if (HasCC && MI->getOperand(NumOps-1).isDead())
- CCDead = true;
- }
- if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead))
- return false;
-
- // Add the 16-bit instruction.
- DebugLoc dl = MI->getDebugLoc();
- MachineInstrBuilder MIB = BuildMI(MBB, *MI, dl, NewTID);
- MIB.addOperand(MI->getOperand(0));
- if (NewTID.hasOptionalDef()) {
- if (HasCC)
- AddDefaultT1CC(MIB, CCDead);
- else
- AddNoT1CC(MIB);
- }
-
- // Transfer the rest of operands.
- unsigned NumOps = TID.getNumOperands();
- for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
- if (i < NumOps && TID.OpInfo[i].isOptionalDef())
- continue;
- if ((TID.getOpcode() == ARM::t2RSBSri ||
- TID.getOpcode() == ARM::t2RSBri) && i == 2)
- // Skip the zero immediate operand, it's now implicit.
- continue;
- bool isPred = (i < NumOps && TID.OpInfo[i].isPredicate());
- if (SkipPred && isPred)
- continue;
- const MachineOperand &MO = MI->getOperand(i);
- if (Scale > 1 && !isPred && MO.isImm())
- MIB.addImm(MO.getImm() / Scale);
- else {
- if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR)
- // Skip implicit def of CPSR. Either it's modeled as an optional
- // def now or it's already an implicit def on the new instruction.
- continue;
- MIB.addOperand(MO);
- }
- }
- if (!TID.isPredicable() && NewTID.isPredicable())
- AddDefaultPred(MIB);
-
- DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
-
- MBB.erase(MI);
- ++NumNarrows;
- return true;
-}
-
-static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR) {
- bool HasDef = false;
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.isUndef() || MO.isUse())
- continue;
- if (MO.getReg() != ARM::CPSR)
- continue;
- if (!MO.isDead())
- HasDef = true;
- }
-
- return HasDef || LiveCPSR;
-}
-
-static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
- for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI.getOperand(i);
- if (!MO.isReg() || MO.isUndef() || MO.isDef())
- continue;
- if (MO.getReg() != ARM::CPSR)
- continue;
- assert(LiveCPSR && "CPSR liveness tracking is wrong!");
- if (MO.isKill()) {
- LiveCPSR = false;
- break;
- }
- }
-
- return LiveCPSR;
-}
-
-bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
- bool Modified = false;
-
- bool LiveCPSR = false;
- // Yes, CPSR could be livein.
- for (MachineBasicBlock::const_livein_iterator I = MBB.livein_begin(),
- E = MBB.livein_end(); I != E; ++I) {
- if (*I == ARM::CPSR) {
- LiveCPSR = true;
- break;
- }
- }
-
- MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
- MachineBasicBlock::iterator NextMII;
- for (; MII != E; MII = NextMII) {
- NextMII = llvm::next(MII);
-
- MachineInstr *MI = &*MII;
- LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR);
-
- unsigned Opcode = MI->getOpcode();
- DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode);
- if (OPI != ReduceOpcodeMap.end()) {
- const ReduceEntry &Entry = ReduceTable[OPI->second];
- // Ignore "special" cases for now.
- if (Entry.Special) {
- if (ReduceSpecial(MBB, MI, Entry, LiveCPSR)) {
- Modified = true;
- MachineBasicBlock::iterator I = prior(NextMII);
- MI = &*I;
- }
- goto ProcessNext;
- }
-
- // Try to transform to a 16-bit two-address instruction.
- if (Entry.NarrowOpc2 && ReduceTo2Addr(MBB, MI, Entry, LiveCPSR)) {
- Modified = true;
- MachineBasicBlock::iterator I = prior(NextMII);
- MI = &*I;
- goto ProcessNext;
- }
-
- // Try to transform ro a 16-bit non-two-address instruction.
- if (Entry.NarrowOpc1 && ReduceToNarrow(MBB, MI, Entry, LiveCPSR)) {
- Modified = true;
- MachineBasicBlock::iterator I = prior(NextMII);
- MI = &*I;
- }
- }
-
- ProcessNext:
- LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR);
- }
-
- return Modified;
-}
-
-bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
- const TargetMachine &TM = MF.getTarget();
- TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo());
-
- bool Modified = false;
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
- Modified |= ReduceMBB(*I);
- return Modified;
-}
-
-/// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size
-/// reduction pass.
-FunctionPass *llvm::createThumb2SizeReductionPass() {
- return new Thumb2SizeReduce();
-}
diff --git a/libclamav/c++/llvm/lib/Target/Mangler.cpp b/libclamav/c++/llvm/lib/Target/Mangler.cpp
index ef6defc..49efe75 100644
--- a/libclamav/c++/llvm/lib/Target/Mangler.cpp
+++ b/libclamav/c++/llvm/lib/Target/Mangler.cpp
@@ -12,17 +12,22 @@
//===----------------------------------------------------------------------===//
#include "llvm/Target/Mangler.h"
-#include "llvm/GlobalValue.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Function.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
using namespace llvm;
-static bool isAcceptableChar(char C) {
+static bool isAcceptableChar(char C, bool AllowPeriod) {
if ((C < 'a' || C > 'z') &&
(C < 'A' || C > 'Z') &&
(C < '0' || C > '9') &&
- C != '_' && C != '$' && C != '.' && C != '@')
+ C != '_' && C != '$' && C != '@' &&
+ !(AllowPeriod && C == '.'))
return false;
return true;
}
@@ -50,8 +55,9 @@ static bool NameNeedsEscaping(StringRef Str, const MCAsmInfo &MAI) {
// If any of the characters in the string is an unacceptable character, force
// quotes.
+ bool AllowPeriod = MAI.doesAllowPeriodsInName();
for (unsigned i = 0, e = Str.size(); i != e; ++i)
- if (!isAcceptableChar(Str[i]))
+ if (!isAcceptableChar(Str[i], AllowPeriod))
return true;
return false;
}
@@ -59,17 +65,17 @@ static bool NameNeedsEscaping(StringRef Str, const MCAsmInfo &MAI) {
/// appendMangledName - Add the specified string in mangled form if it uses
/// any unusual characters.
static void appendMangledName(SmallVectorImpl<char> &OutName, StringRef Str,
- const MCAsmInfo *MAI) {
+ const MCAsmInfo &MAI) {
// The first character is not allowed to be a number unless the target
// explicitly allows it.
- if ((MAI == 0 || !MAI->doesAllowNameToStartWithDigit()) &&
- Str[0] >= '0' && Str[0] <= '9') {
+ if (!MAI.doesAllowNameToStartWithDigit() && Str[0] >= '0' && Str[0] <= '9') {
MangleLetter(OutName, Str[0]);
Str = Str.substr(1);
}
-
+
+ bool AllowPeriod = MAI.doesAllowPeriodsInName();
for (unsigned i = 0, e = Str.size(); i != e; ++i) {
- if (!isAcceptableChar(Str[i]))
+ if (!isAcceptableChar(Str[i], AllowPeriod))
MangleLetter(OutName, Str[i]);
else
OutName.push_back(Str[i]);
@@ -100,6 +106,8 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
StringRef Name = GVName.toStringRef(TmpData);
assert(!Name.empty() && "getNameWithPrefix requires non-empty name");
+ const MCAsmInfo &MAI = Context.getAsmInfo();
+
// If the global name is not led with \1, add the appropriate prefixes.
if (Name[0] == '\1') {
Name = Name.substr(1);
@@ -134,7 +142,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
// On systems that do not allow quoted names, we need to mangle most
// strange characters.
if (!MAI.doesAllowQuotesInName())
- return appendMangledName(OutName, Name, &MAI);
+ return appendMangledName(OutName, Name, MAI);
// Okay, the system allows quoted strings. We can quote most anything, the
// only characters that need escaping are " and \n.
@@ -142,6 +150,26 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
return appendMangledQuotedName(OutName, Name);
}
+/// AddFastCallStdCallSuffix - Microsoft fastcall and stdcall functions require
+/// a suffix on their name indicating the number of words of arguments they
+/// take.
+static void AddFastCallStdCallSuffix(SmallVectorImpl<char> &OutName,
+ const Function *F, const TargetData &TD) {
+ // Calculate arguments size total.
+ unsigned ArgWords = 0;
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ const Type *Ty = AI->getType();
+ // 'Dereference' type in case of byval parameter attribute
+ if (AI->hasByValAttr())
+ Ty = cast<PointerType>(Ty)->getElementType();
+ // Size should be aligned to DWORD boundary
+ ArgWords += ((TD.getTypeAllocSize(Ty) + 3)/4)*4;
+ }
+
+ raw_svector_ostream(OutName) << '@' << ArgWords;
+}
+
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
/// and the specified global variable's name. If the global variable doesn't
@@ -152,20 +180,48 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
ManglerPrefixTy PrefixTy = Mangler::Default;
if (GV->hasPrivateLinkage() || isImplicitlyPrivate)
PrefixTy = Mangler::Private;
- else if (GV->hasLinkerPrivateLinkage())
+ else if (GV->hasLinkerPrivateLinkage() || GV->hasLinkerPrivateWeakLinkage() ||
+ GV->hasLinkerPrivateWeakDefAutoLinkage())
PrefixTy = Mangler::LinkerPrivate;
// If this global has a name, handle it simply.
- if (GV->hasName())
- return getNameWithPrefix(OutName, GV->getName(), PrefixTy);
+ if (GV->hasName()) {
+ getNameWithPrefix(OutName, GV->getName(), PrefixTy);
+ } else {
+ // Get the ID for the global, assigning a new one if we haven't got one
+ // already.
+ unsigned &ID = AnonGlobalIDs[GV];
+ if (ID == 0) ID = NextAnonGlobalID++;
- // Get the ID for the global, assigning a new one if we haven't got one
- // already.
- unsigned &ID = AnonGlobalIDs[GV];
- if (ID == 0) ID = NextAnonGlobalID++;
+ // Must mangle the global into a unique ID.
+ getNameWithPrefix(OutName, "__unnamed_" + Twine(ID), PrefixTy);
+ }
- // Must mangle the global into a unique ID.
- getNameWithPrefix(OutName, "__unnamed_" + Twine(ID), PrefixTy);
+ // If we are supposed to add a microsoft-style suffix for stdcall/fastcall,
+ // add it.
+ if (Context.getAsmInfo().hasMicrosoftFastStdCallMangling()) {
+ if (const Function *F = dyn_cast<Function>(GV)) {
+ CallingConv::ID CC = F->getCallingConv();
+
+ // fastcall functions need to start with @.
+ // FIXME: This logic seems unlikely to be right.
+ if (CC == CallingConv::X86_FastCall) {
+ if (OutName[0] == '_')
+ OutName[0] = '@';
+ else
+ OutName.insert(OutName.begin(), '@');
+ }
+
+ // fastcall and stdcall functions usually need @42 at the end to specify
+ // the argument info.
+ const FunctionType *FT = F->getFunctionType();
+ if ((CC == CallingConv::X86_FastCall || CC == CallingConv::X86_StdCall) &&
+ // "Pure" variadic functions do not receive @0 suffix.
+ (!FT->isVarArg() || FT->getNumParams() == 0 ||
+ (FT->getNumParams() == 1 && F->hasStructRetAttr())))
+ AddFastCallStdCallSuffix(OutName, F, TD);
+ }
+ }
}
/// getNameWithPrefix - Fill OutName with the name of the appropriate prefix
@@ -177,3 +233,13 @@ std::string Mangler::getNameWithPrefix(const GlobalValue *GV,
getNameWithPrefix(Buf, GV, isImplicitlyPrivate);
return std::string(Buf.begin(), Buf.end());
}
+
+/// getSymbol - Return the MCSymbol for the specified global value. This
+/// symbol is the main label that is the address of the global.
+MCSymbol *Mangler::getSymbol(const GlobalValue *GV) {
+ SmallString<60> NameStr;
+ getNameWithPrefix(NameStr, GV, false);
+ return Context.GetOrCreateSymbol(NameStr.str());
+}
+
+
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/AsmPrinter/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/PowerPC/AsmPrinter/CMakeLists.txt
deleted file mode 100644
index 236b264..0000000
--- a/libclamav/c++/llvm/lib/Target/PowerPC/AsmPrinter/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMPowerPCAsmPrinter
- PPCAsmPrinter.cpp
- )
-add_dependencies(LLVMPowerPCAsmPrinter PowerPCCodeGenTable_gen)
\ No newline at end of file
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/AsmPrinter/Makefile b/libclamav/c++/llvm/lib/Target/PowerPC/AsmPrinter/Makefile
deleted file mode 100644
index 269ef92..0000000
--- a/libclamav/c++/llvm/lib/Target/PowerPC/AsmPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/PowerPC/AsmPrinter/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMPowerPCAsmPrinter
-
-# Hack: we need to include 'main' PowerPC target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp
deleted file mode 100644
index ac901d0..0000000
--- a/libclamav/c++/llvm/lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp
+++ /dev/null
@@ -1,848 +0,0 @@
-//===-- PPCAsmPrinter.cpp - Print machine instrs to PowerPC assembly --------=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to PowerPC assembly language. This printer is
-// the output mechanism used by `llc'.
-//
-// Documentation at http://developer.apple.com/documentation/DeveloperTools/
-// Reference/Assembler/ASMIntroduction/chapter_1_section_1.html
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asmprinter"
-#include "PPC.h"
-#include "PPCPredicates.h"
-#include "PPCTargetMachine.h"
-#include "PPCSubtarget.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/DwarfWriter.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineModuleInfoImpls.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSet.h"
-#include "llvm/ADT/SmallString.h"
-using namespace llvm;
-
-namespace {
- class PPCAsmPrinter : public AsmPrinter {
- protected:
- DenseMap<const MCSymbol*, const MCSymbol*> TOC;
- const PPCSubtarget &Subtarget;
- uint64_t LabelID;
- public:
- explicit PPCAsmPrinter(formatted_raw_ostream &O, TargetMachine &TM,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *T)
- : AsmPrinter(O, TM, Ctx, Streamer, T),
- Subtarget(TM.getSubtarget<PPCSubtarget>()), LabelID(0) {}
-
- virtual const char *getPassName() const {
- return "PowerPC Assembly Printer";
- }
-
- PPCTargetMachine &getTM() {
- return static_cast<PPCTargetMachine&>(TM);
- }
-
- unsigned enumRegToMachineReg(unsigned enumReg) {
- switch (enumReg) {
- default: llvm_unreachable("Unhandled register!");
- case PPC::CR0: return 0;
- case PPC::CR1: return 1;
- case PPC::CR2: return 2;
- case PPC::CR3: return 3;
- case PPC::CR4: return 4;
- case PPC::CR5: return 5;
- case PPC::CR6: return 6;
- case PPC::CR7: return 7;
- }
- llvm_unreachable(0);
- }
-
- /// printInstruction - This method is automatically generated by tablegen
- /// from the instruction set description. This method returns true if the
- /// machine instruction was sufficiently described to print it, otherwise it
- /// returns false.
- void printInstruction(const MachineInstr *MI);
- static const char *getRegisterName(unsigned RegNo);
-
-
- virtual void EmitInstruction(const MachineInstr *MI);
- void printOp(const MachineOperand &MO);
-
- /// stripRegisterPrefix - This method strips the character prefix from a
- /// register name so that only the number is left. Used by for linux asm.
- const char *stripRegisterPrefix(const char *RegName) {
- switch (RegName[0]) {
- case 'r':
- case 'f':
- case 'v': return RegName + 1;
- case 'c': if (RegName[1] == 'r') return RegName + 2;
- }
-
- return RegName;
- }
-
- /// printRegister - Print register according to target requirements.
- ///
- void printRegister(const MachineOperand &MO, bool R0AsZero) {
- unsigned RegNo = MO.getReg();
- assert(TargetRegisterInfo::isPhysicalRegister(RegNo) && "Not physreg??");
-
- // If we should use 0 for R0.
- if (R0AsZero && RegNo == PPC::R0) {
- O << "0";
- return;
- }
-
- const char *RegName = getRegisterName(RegNo);
- // Linux assembler (Others?) does not take register mnemonics.
- // FIXME - What about special registers used in mfspr/mtspr?
- if (!Subtarget.isDarwin()) RegName = stripRegisterPrefix(RegName);
- O << RegName;
- }
-
- void printOperand(const MachineInstr *MI, unsigned OpNo) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- if (MO.isReg()) {
- printRegister(MO, false);
- } else if (MO.isImm()) {
- O << MO.getImm();
- } else {
- printOp(MO);
- }
- }
-
- bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode);
- bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode);
-
-
- void printS5ImmOperand(const MachineInstr *MI, unsigned OpNo) {
- char value = MI->getOperand(OpNo).getImm();
- value = (value << (32-5)) >> (32-5);
- O << (int)value;
- }
- void printU5ImmOperand(const MachineInstr *MI, unsigned OpNo) {
- unsigned char value = MI->getOperand(OpNo).getImm();
- assert(value <= 31 && "Invalid u5imm argument!");
- O << (unsigned int)value;
- }
- void printU6ImmOperand(const MachineInstr *MI, unsigned OpNo) {
- unsigned char value = MI->getOperand(OpNo).getImm();
- assert(value <= 63 && "Invalid u6imm argument!");
- O << (unsigned int)value;
- }
- void printS16ImmOperand(const MachineInstr *MI, unsigned OpNo) {
- O << (short)MI->getOperand(OpNo).getImm();
- }
- void printU16ImmOperand(const MachineInstr *MI, unsigned OpNo) {
- O << (unsigned short)MI->getOperand(OpNo).getImm();
- }
- void printS16X4ImmOperand(const MachineInstr *MI, unsigned OpNo) {
- if (MI->getOperand(OpNo).isImm()) {
- O << (short)(MI->getOperand(OpNo).getImm()*4);
- } else {
- O << "lo16(";
- printOp(MI->getOperand(OpNo));
- if (TM.getRelocationModel() == Reloc::PIC_)
- O << "-\"L" << getFunctionNumber() << "$pb\")";
- else
- O << ')';
- }
- }
- void printBranchOperand(const MachineInstr *MI, unsigned OpNo) {
- // Branches can take an immediate operand. This is used by the branch
- // selection pass to print $+8, an eight byte displacement from the PC.
- if (MI->getOperand(OpNo).isImm()) {
- O << "$+" << MI->getOperand(OpNo).getImm()*4;
- } else {
- printOp(MI->getOperand(OpNo));
- }
- }
- void printCallOperand(const MachineInstr *MI, unsigned OpNo) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- if (TM.getRelocationModel() != Reloc::Static) {
- if (MO.getType() == MachineOperand::MO_GlobalAddress) {
- GlobalValue *GV = MO.getGlobal();
- if (GV->isDeclaration() || GV->isWeakForLinker()) {
- // Dynamically-resolved functions need a stub for the function.
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$stub");
- MCSymbol *&StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
- O << *Sym;
- return;
- }
- }
- if (MO.getType() == MachineOperand::MO_ExternalSymbol) {
- SmallString<128> TempNameStr;
- TempNameStr += StringRef(MO.getSymbolName());
- TempNameStr += StringRef("$stub");
-
- MCSymbol *Sym = GetExternalSymbolSymbol(TempNameStr.str());
- MCSymbol *&StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym == 0)
- StubSym = GetExternalSymbolSymbol(MO.getSymbolName());
- O << *Sym;
- return;
- }
- }
-
- printOp(MI->getOperand(OpNo));
- }
- void printAbsAddrOperand(const MachineInstr *MI, unsigned OpNo) {
- O << (int)MI->getOperand(OpNo).getImm()*4;
- }
- void printPICLabel(const MachineInstr *MI, unsigned OpNo) {
- O << "\"L" << getFunctionNumber() << "$pb\"\n";
- O << "\"L" << getFunctionNumber() << "$pb\":";
- }
- void printSymbolHi(const MachineInstr *MI, unsigned OpNo) {
- if (MI->getOperand(OpNo).isImm()) {
- printS16ImmOperand(MI, OpNo);
- } else {
- if (Subtarget.isDarwin()) O << "ha16(";
- printOp(MI->getOperand(OpNo));
- if (TM.getRelocationModel() == Reloc::PIC_)
- O << "-\"L" << getFunctionNumber() << "$pb\"";
- if (Subtarget.isDarwin())
- O << ')';
- else
- O << "@ha";
- }
- }
- void printSymbolLo(const MachineInstr *MI, unsigned OpNo) {
- if (MI->getOperand(OpNo).isImm()) {
- printS16ImmOperand(MI, OpNo);
- } else {
- if (Subtarget.isDarwin()) O << "lo16(";
- printOp(MI->getOperand(OpNo));
- if (TM.getRelocationModel() == Reloc::PIC_)
- O << "-\"L" << getFunctionNumber() << "$pb\"";
- if (Subtarget.isDarwin())
- O << ')';
- else
- O << "@l";
- }
- }
- void printcrbitm(const MachineInstr *MI, unsigned OpNo) {
- unsigned CCReg = MI->getOperand(OpNo).getReg();
- unsigned RegNo = enumRegToMachineReg(CCReg);
- O << (0x80 >> RegNo);
- }
- // The new addressing mode printers.
- void printMemRegImm(const MachineInstr *MI, unsigned OpNo) {
- printSymbolLo(MI, OpNo);
- O << '(';
- if (MI->getOperand(OpNo+1).isReg() &&
- MI->getOperand(OpNo+1).getReg() == PPC::R0)
- O << "0";
- else
- printOperand(MI, OpNo+1);
- O << ')';
- }
- void printMemRegImmShifted(const MachineInstr *MI, unsigned OpNo) {
- if (MI->getOperand(OpNo).isImm())
- printS16X4ImmOperand(MI, OpNo);
- else
- printSymbolLo(MI, OpNo);
- O << '(';
- if (MI->getOperand(OpNo+1).isReg() &&
- MI->getOperand(OpNo+1).getReg() == PPC::R0)
- O << "0";
- else
- printOperand(MI, OpNo+1);
- O << ')';
- }
-
- void printMemRegReg(const MachineInstr *MI, unsigned OpNo) {
- // When used as the base register, r0 reads constant zero rather than
- // the value contained in the register. For this reason, the darwin
- // assembler requires that we print r0 as 0 (no r) when used as the base.
- const MachineOperand &MO = MI->getOperand(OpNo);
- printRegister(MO, true);
- O << ", ";
- printOperand(MI, OpNo+1);
- }
-
- void printTOCEntryLabel(const MachineInstr *MI, unsigned OpNo) {
- const MachineOperand &MO = MI->getOperand(OpNo);
-
- assert(MO.getType() == MachineOperand::MO_GlobalAddress);
-
- const MCSymbol *Sym = GetGlobalValueSymbol(MO.getGlobal());
-
- // Map symbol -> label of TOC entry.
- const MCSymbol *&TOCEntry = TOC[Sym];
- if (TOCEntry == 0)
- TOCEntry = OutContext.
- GetOrCreateSymbol(StringRef(MAI->getPrivateGlobalPrefix()) + "C" +
- Twine(LabelID++));
-
- O << *TOCEntry << "@toc";
- }
-
- void printPredicateOperand(const MachineInstr *MI, unsigned OpNo,
- const char *Modifier);
- };
-
- /// PPCLinuxAsmPrinter - PowerPC assembly printer, customized for Linux
- class PPCLinuxAsmPrinter : public PPCAsmPrinter {
- public:
- explicit PPCLinuxAsmPrinter(formatted_raw_ostream &O, TargetMachine &TM,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *T)
- : PPCAsmPrinter(O, TM, Ctx, Streamer, T) {}
-
- virtual const char *getPassName() const {
- return "Linux PPC Assembly Printer";
- }
-
- bool doFinalization(Module &M);
-
- virtual void EmitFunctionEntryLabel();
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequired<MachineModuleInfo>();
- AU.addRequired<DwarfWriter>();
- PPCAsmPrinter::getAnalysisUsage(AU);
- }
- };
-
- /// PPCDarwinAsmPrinter - PowerPC assembly printer, customized for Darwin/Mac
- /// OS X
- class PPCDarwinAsmPrinter : public PPCAsmPrinter {
- formatted_raw_ostream &OS;
- public:
- explicit PPCDarwinAsmPrinter(formatted_raw_ostream &O, TargetMachine &TM,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *T)
- : PPCAsmPrinter(O, TM, Ctx, Streamer, T), OS(O) {}
-
- virtual const char *getPassName() const {
- return "Darwin PPC Assembly Printer";
- }
-
- bool doFinalization(Module &M);
- void EmitStartOfAsmFile(Module &M);
-
- void EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs);
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequired<MachineModuleInfo>();
- AU.addRequired<DwarfWriter>();
- PPCAsmPrinter::getAnalysisUsage(AU);
- }
- };
-} // end of anonymous namespace
-
-// Include the auto-generated portion of the assembly writer
-#include "PPCGenAsmWriter.inc"
-
-void PPCAsmPrinter::printOp(const MachineOperand &MO) {
- switch (MO.getType()) {
- case MachineOperand::MO_Immediate:
- llvm_unreachable("printOp() does not handle immediate values");
-
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol(OutContext);
- return;
- case MachineOperand::MO_JumpTableIndex:
- O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
- << '_' << MO.getIndex();
- // FIXME: PIC relocation model
- return;
- case MachineOperand::MO_ConstantPoolIndex:
- O << MAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber()
- << '_' << MO.getIndex();
- return;
- case MachineOperand::MO_BlockAddress:
- O << *GetBlockAddressSymbol(MO.getBlockAddress());
- return;
- case MachineOperand::MO_ExternalSymbol: {
- // Computing the address of an external symbol, not calling it.
- if (TM.getRelocationModel() == Reloc::Static) {
- O << *GetExternalSymbolSymbol(MO.getSymbolName());
- return;
- }
-
- MCSymbol *NLPSym =
- OutContext.GetOrCreateSymbol(StringRef(MAI->getGlobalPrefix())+
- MO.getSymbolName()+"$non_lazy_ptr");
- MCSymbol *&StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(NLPSym);
- if (StubSym == 0)
- StubSym = GetExternalSymbolSymbol(MO.getSymbolName());
-
- O << *NLPSym;
- return;
- }
- case MachineOperand::MO_GlobalAddress: {
- // Computing the address of a global symbol, not calling it.
- GlobalValue *GV = MO.getGlobal();
- MCSymbol *SymToPrint;
-
- // External or weakly linked global variables need non-lazily-resolved stubs
- if (TM.getRelocationModel() != Reloc::Static &&
- (GV->isDeclaration() || GV->isWeakForLinker())) {
- if (!GV->hasHiddenVisibility()) {
- SymToPrint = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- MCSymbol *&StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(SymToPrint);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
- } else if (GV->isDeclaration() || GV->hasCommonLinkage() ||
- GV->hasAvailableExternallyLinkage()) {
- SymToPrint = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
-
- MCSymbol *&StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().
- getHiddenGVStubEntry(SymToPrint);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
- } else {
- SymToPrint = GetGlobalValueSymbol(GV);
- }
- } else {
- SymToPrint = GetGlobalValueSymbol(GV);
- }
-
- O << *SymToPrint;
-
- printOffset(MO.getOffset());
- return;
- }
-
- default:
- O << "<unknown operand type: " << MO.getType() << ">";
- return;
- }
-}
-
-/// PrintAsmOperand - Print out an operand for an inline asm expression.
-///
-bool PPCAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode) {
- // Does this asm operand have a single letter operand modifier?
- if (ExtraCode && ExtraCode[0]) {
- if (ExtraCode[1] != 0) return true; // Unknown modifier.
-
- switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
- case 'c': // Don't print "$" before a global var name or constant.
- // PPC never has a prefix.
- printOperand(MI, OpNo);
- return false;
- case 'L': // Write second word of DImode reference.
- // Verify that this operand has two consecutive registers.
- if (!MI->getOperand(OpNo).isReg() ||
- OpNo+1 == MI->getNumOperands() ||
- !MI->getOperand(OpNo+1).isReg())
- return true;
- ++OpNo; // Return the high-part.
- break;
- case 'I':
- // Write 'i' if an integer constant, otherwise nothing. Used to print
- // addi vs add, etc.
- if (MI->getOperand(OpNo).isImm())
- O << "i";
- return false;
- }
- }
-
- printOperand(MI, OpNo);
- return false;
-}
-
-// At the moment, all inline asm memory operands are a single register.
-// In any case, the output of this routine should always be just one
-// assembler operand.
-
-bool PPCAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode) {
- if (ExtraCode && ExtraCode[0])
- return true; // Unknown modifier.
- assert (MI->getOperand(OpNo).isReg());
- O << "0(";
- printOperand(MI, OpNo);
- O << ")";
- return false;
-}
-
-void PPCAsmPrinter::printPredicateOperand(const MachineInstr *MI, unsigned OpNo,
- const char *Modifier) {
- assert(Modifier && "Must specify 'cc' or 'reg' as predicate op modifier!");
- unsigned Code = MI->getOperand(OpNo).getImm();
- if (!strcmp(Modifier, "cc")) {
- switch ((PPC::Predicate)Code) {
- case PPC::PRED_ALWAYS: return; // Don't print anything for always.
- case PPC::PRED_LT: O << "lt"; return;
- case PPC::PRED_LE: O << "le"; return;
- case PPC::PRED_EQ: O << "eq"; return;
- case PPC::PRED_GE: O << "ge"; return;
- case PPC::PRED_GT: O << "gt"; return;
- case PPC::PRED_NE: O << "ne"; return;
- case PPC::PRED_UN: O << "un"; return;
- case PPC::PRED_NU: O << "nu"; return;
- }
-
- } else {
- assert(!strcmp(Modifier, "reg") &&
- "Need to specify 'cc' or 'reg' as predicate op modifier!");
- // Don't print the register for 'always'.
- if (Code == PPC::PRED_ALWAYS) return;
- printOperand(MI, OpNo+1);
- }
-}
-
-
-/// EmitInstruction -- Print out a single PowerPC MI in Darwin syntax to
-/// the current output stream.
-///
-void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
- // Check for slwi/srwi mnemonics.
- if (MI->getOpcode() == PPC::RLWINM) {
- unsigned char SH = MI->getOperand(2).getImm();
- unsigned char MB = MI->getOperand(3).getImm();
- unsigned char ME = MI->getOperand(4).getImm();
- bool useSubstituteMnemonic = false;
- if (SH <= 31 && MB == 0 && ME == (31-SH)) {
- O << "\tslwi "; useSubstituteMnemonic = true;
- }
- if (SH <= 31 && MB == (32-SH) && ME == 31) {
- O << "\tsrwi "; useSubstituteMnemonic = true;
- SH = 32-SH;
- }
- if (useSubstituteMnemonic) {
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", " << (unsigned int)SH;
- OutStreamer.AddBlankLine();
- return;
- }
- }
-
- if ((MI->getOpcode() == PPC::OR || MI->getOpcode() == PPC::OR8) &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
- O << "\tmr ";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- OutStreamer.AddBlankLine();
- return;
- }
-
- if (MI->getOpcode() == PPC::RLDICR) {
- unsigned char SH = MI->getOperand(2).getImm();
- unsigned char ME = MI->getOperand(3).getImm();
- // rldicr RA, RS, SH, 63-SH == sldi RA, RS, SH
- if (63-SH == ME) {
- O << "\tsldi ";
- printOperand(MI, 0);
- O << ", ";
- printOperand(MI, 1);
- O << ", " << (unsigned int)SH;
- OutStreamer.AddBlankLine();
- return;
- }
- }
-
- printInstruction(MI);
- OutStreamer.AddBlankLine();
-}
-
-void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
- if (!Subtarget.isPPC64()) // linux/ppc32 - Normal entry label.
- return AsmPrinter::EmitFunctionEntryLabel();
-
- // Emit an official procedure descriptor.
- // FIXME 64-bit SVR4: Use MCSection here!
- O << "\t.section\t\".opd\",\"aw\"\n";
- O << "\t.align 3\n";
- OutStreamer.EmitLabel(CurrentFnSym);
- O << "\t.quad .L." << *CurrentFnSym << ",.TOC. at tocbase\n";
- O << "\t.previous\n";
- O << ".L." << *CurrentFnSym << ":\n";
-}
-
-
-bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
- const TargetData *TD = TM.getTargetData();
-
- bool isPPC64 = TD->getPointerSizeInBits() == 64;
-
- if (isPPC64 && !TOC.empty()) {
- // FIXME 64-bit SVR4: Use MCSection here?
- O << "\t.section\t\".toc\",\"aw\"\n";
-
- // FIXME: This is nondeterminstic!
- for (DenseMap<const MCSymbol*, const MCSymbol*>::iterator I = TOC.begin(),
- E = TOC.end(); I != E; ++I) {
- O << *I->second << ":\n";
- O << "\t.tc " << *I->first << "[TC]," << *I->first << '\n';
- }
- }
-
- return AsmPrinter::doFinalization(M);
-}
-
-void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) {
- static const char *const CPUDirectives[] = {
- "",
- "ppc",
- "ppc601",
- "ppc602",
- "ppc603",
- "ppc7400",
- "ppc750",
- "ppc970",
- "ppc64"
- };
-
- unsigned Directive = Subtarget.getDarwinDirective();
- if (Subtarget.isGigaProcessor() && Directive < PPC::DIR_970)
- Directive = PPC::DIR_970;
- if (Subtarget.hasAltivec() && Directive < PPC::DIR_7400)
- Directive = PPC::DIR_7400;
- if (Subtarget.isPPC64() && Directive < PPC::DIR_970)
- Directive = PPC::DIR_64;
- assert(Directive <= PPC::DIR_64 && "Directive out of range.");
- O << "\t.machine " << CPUDirectives[Directive] << '\n';
-
- // Prime text sections so they are adjacent. This reduces the likelihood a
- // large data or debug section causes a branch to exceed 16M limit.
- TargetLoweringObjectFileMachO &TLOFMacho =
- static_cast<TargetLoweringObjectFileMachO &>(getObjFileLowering());
- OutStreamer.SwitchSection(TLOFMacho.getTextCoalSection());
- if (TM.getRelocationModel() == Reloc::PIC_) {
- OutStreamer.SwitchSection(
- TLOFMacho.getMachOSection("__TEXT", "__picsymbolstub1",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- 32, SectionKind::getText()));
- } else if (TM.getRelocationModel() == Reloc::DynamicNoPIC) {
- OutStreamer.SwitchSection(
- TLOFMacho.getMachOSection("__TEXT","__symbol_stub1",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- 16, SectionKind::getText()));
- }
- OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
-}
-
-static const MCSymbol *GetLazyPtr(const MCSymbol *Sym, MCContext &Ctx) {
- // Remove $stub suffix, add $lazy_ptr.
- SmallString<128> TmpStr(Sym->getName().begin(), Sym->getName().end()-5);
- TmpStr += "$lazy_ptr";
- return Ctx.GetOrCreateSymbol(TmpStr.str());
-}
-
-static const MCSymbol *GetAnonSym(const MCSymbol *Sym, MCContext &Ctx) {
- // Add $tmp suffix to $stub, yielding $stub$tmp.
- SmallString<128> TmpStr(Sym->getName().begin(), Sym->getName().end());
- TmpStr += "$tmp";
- return Ctx.GetOrCreateSymbol(TmpStr.str());
-}
-
-void PPCDarwinAsmPrinter::
-EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
- bool isPPC64 = TM.getTargetData()->getPointerSizeInBits() == 64;
-
- TargetLoweringObjectFileMachO &TLOFMacho =
- static_cast<TargetLoweringObjectFileMachO &>(getObjFileLowering());
-
- // .lazy_symbol_pointer
- const MCSection *LSPSection = TLOFMacho.getLazySymbolPointerSection();
-
- // Output stubs for dynamically-linked functions
- if (TM.getRelocationModel() == Reloc::PIC_) {
- const MCSection *StubSection =
- TLOFMacho.getMachOSection("__TEXT", "__picsymbolstub1",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- 32, SectionKind::getText());
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- OutStreamer.SwitchSection(StubSection);
- EmitAlignment(4);
-
- const MCSymbol *Stub = Stubs[i].first;
- const MCSymbol *RawSym = Stubs[i].second;
- const MCSymbol *LazyPtr = GetLazyPtr(Stub, OutContext);
- const MCSymbol *AnonSymbol = GetAnonSym(Stub, OutContext);
-
- O << *Stub << ":\n";
- O << "\t.indirect_symbol " << *RawSym << '\n';
- O << "\tmflr r0\n";
- O << "\tbcl 20,31," << *AnonSymbol << '\n';
- O << *AnonSymbol << ":\n";
- O << "\tmflr r11\n";
- O << "\taddis r11,r11,ha16(" << *LazyPtr << '-' << *AnonSymbol
- << ")\n";
- O << "\tmtlr r0\n";
- O << (isPPC64 ? "\tldu" : "\tlwzu") << " r12,lo16(" << *LazyPtr
- << '-' << *AnonSymbol << ")(r11)\n";
- O << "\tmtctr r12\n";
- O << "\tbctr\n";
-
- OutStreamer.SwitchSection(LSPSection);
- O << *LazyPtr << ":\n";
- O << "\t.indirect_symbol " << *RawSym << '\n';
- O << (isPPC64 ? "\t.quad" : "\t.long") << " dyld_stub_binding_helper\n";
- }
- O << '\n';
- return;
- }
-
- const MCSection *StubSection =
- TLOFMacho.getMachOSection("__TEXT","__symbol_stub1",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- 16, SectionKind::getText());
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- const MCSymbol *Stub = Stubs[i].first;
- const MCSymbol *RawSym = Stubs[i].second;
- const MCSymbol *LazyPtr = GetLazyPtr(Stub, OutContext);
-
- OutStreamer.SwitchSection(StubSection);
- EmitAlignment(4);
- O << *Stub << ":\n";
- O << "\t.indirect_symbol " << *RawSym << '\n';
- O << "\tlis r11,ha16(" << *LazyPtr << ")\n";
- O << (isPPC64 ? "\tldu" : "\tlwzu") << " r12,lo16(" << *LazyPtr
- << ")(r11)\n";
- O << "\tmtctr r12\n";
- O << "\tbctr\n";
- OutStreamer.SwitchSection(LSPSection);
- O << *LazyPtr << ":\n";
- O << "\t.indirect_symbol " << *RawSym << '\n';
- O << (isPPC64 ? "\t.quad" : "\t.long") << " dyld_stub_binding_helper\n";
- }
-
- O << '\n';
-}
-
-
-bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
- bool isPPC64 = TM.getTargetData()->getPointerSizeInBits() == 64;
-
- // Darwin/PPC always uses mach-o.
- TargetLoweringObjectFileMachO &TLOFMacho =
- static_cast<TargetLoweringObjectFileMachO &>(getObjFileLowering());
- MachineModuleInfoMachO &MMIMacho =
- MMI->getObjFileInfo<MachineModuleInfoMachO>();
-
- MachineModuleInfoMachO::SymbolListTy Stubs = MMIMacho.GetFnStubList();
- if (!Stubs.empty())
- EmitFunctionStubs(Stubs);
-
- if (MAI->doesSupportExceptionHandling() && MMI) {
- // Add the (possibly multiple) personalities to the set of global values.
- // Only referenced functions get into the Personalities list.
- const std::vector<Function *> &Personalities = MMI->getPersonalities();
- for (std::vector<Function *>::const_iterator I = Personalities.begin(),
- E = Personalities.end(); I != E; ++I) {
- if (*I) {
- MCSymbol *NLPSym = GetSymbolWithGlobalValueBase(*I, "$non_lazy_ptr");
- MCSymbol *&StubSym = MMIMacho.getGVStubEntry(NLPSym);
- StubSym = GetGlobalValueSymbol(*I);
- }
- }
- }
-
- // Output stubs for dynamically-linked functions.
- Stubs = MMIMacho.GetGVStubList();
-
- // Output macho stubs for external and common global variables.
- if (!Stubs.empty()) {
- // Switch with ".non_lazy_symbol_pointer" directive.
- OutStreamer.SwitchSection(TLOFMacho.getNonLazySymbolPointerSection());
- EmitAlignment(isPPC64 ? 3 : 2);
-
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- O << *Stubs[i].first << ":\n";
- O << "\t.indirect_symbol " << *Stubs[i].second << '\n';
- O << (isPPC64 ? "\t.quad\t0\n" : "\t.long\t0\n");
- }
- }
-
- Stubs = MMIMacho.GetHiddenGVStubList();
- if (!Stubs.empty()) {
- OutStreamer.SwitchSection(getObjFileLowering().getDataSection());
- EmitAlignment(isPPC64 ? 3 : 2);
-
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- O << *Stubs[i].first << ":\n";
- O << (isPPC64 ? "\t.quad\t" : "\t.long\t") << *Stubs[i].second << '\n';
- }
- }
-
- // Funny Darwin hack: This flag tells the linker that no global symbols
- // contain code that falls through to other global symbols (e.g. the obvious
- // implementation of multiple entry points). If this doesn't occur, the
- // linker can safely perform dead code stripping. Since LLVM never generates
- // code that does this, it is always safe to set.
- OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
-
- return AsmPrinter::doFinalization(M);
-}
-
-
-
-/// createPPCAsmPrinterPass - Returns a pass that prints the PPC assembly code
-/// for a MachineFunction to the given output stream, in a format that the
-/// Darwin assembler can deal with.
-///
-static AsmPrinter *createPPCAsmPrinterPass(formatted_raw_ostream &o,
- TargetMachine &tm,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *tai) {
- const PPCSubtarget *Subtarget = &tm.getSubtarget<PPCSubtarget>();
-
- if (Subtarget->isDarwin())
- return new PPCDarwinAsmPrinter(o, tm, Ctx, Streamer, tai);
- return new PPCLinuxAsmPrinter(o, tm, Ctx, Streamer, tai);
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializePowerPCAsmPrinter() {
- TargetRegistry::RegisterAsmPrinter(ThePPC32Target, createPPCAsmPrinterPass);
- TargetRegistry::RegisterAsmPrinter(ThePPC64Target, createPPCAsmPrinterPass);
-}
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/PowerPC/CMakeLists.txt
index c997c5c..7ffc5eb 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/CMakeLists.txt
@@ -24,6 +24,7 @@ add_llvm_target(PowerPCCodeGen
PPCRegisterInfo.cpp
PPCSubtarget.cpp
PPCTargetMachine.cpp
+ PPCSelectionDAGInfo.cpp
)
target_link_libraries (LLVMPowerPCCodeGen LLVMSelectionDAG)
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPC.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPC.td
index 08f5bb4..27644b2 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPC.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPC.td
@@ -96,14 +96,6 @@ def : Processor<"ppc64", G5Itineraries,
include "PPCCallingConv.td"
def PPCInstrInfo : InstrInfo {
- // Define how we want to layout our TargetSpecific information field... This
- // should be kept up-to-date with the fields in the PPCInstrInfo.h file.
- let TSFlagsFields = ["PPC970_First",
- "PPC970_Single",
- "PPC970_Cracked",
- "PPC970_Unit"];
- let TSFlagsShifts = [0, 1, 2, 3];
-
let isLittleEndianEncoding = 1;
}
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
index a752421..e161d23 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -10,7 +10,7 @@
// This file contains a pass that scans a machine function to determine which
// conditional branches need more than 16 bits of displacement to reach their
// target basic block. It does this in two passes; a calculation of basic block
-// positions pass, and a branch psuedo op to machine branch opcode pass. This
+// positions pass, and a branch pseudo op to machine branch opcode pass. This
// pass should be run last, just before the assembly printer.
//
//===----------------------------------------------------------------------===//
@@ -31,7 +31,7 @@ STATISTIC(NumExpanded, "Number of branches expanded to long format");
namespace {
struct PPCBSel : public MachineFunctionPass {
static char ID;
- PPCBSel() : MachineFunctionPass(&ID) {}
+ PPCBSel() : MachineFunctionPass(ID) {}
/// BlockSizes - The sizes of the basic blocks in the function.
std::vector<unsigned> BlockSizes;
@@ -53,7 +53,8 @@ FunctionPass *llvm::createPPCBranchSelectionPass() {
}
bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
- const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
+ const PPCInstrInfo *TII =
+ static_cast<const PPCInstrInfo*>(Fn.getTarget().getInstrInfo());
// Give the blocks of the function a dense, in-order, numbering.
Fn.RenumberBlocks();
BlockSizes.resize(Fn.getNumBlockIDs());
@@ -130,7 +131,7 @@ bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
}
// If this branch is in range, ignore it.
- if (isInt16(BranchSize)) {
+ if (isInt<16>(BranchSize)) {
MBBStartOffset += 4;
continue;
}
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCCallingConv.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCCallingConv.td
index 155fba2..441db94 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCCallingConv.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCCallingConv.td
@@ -1,4 +1,4 @@
-//===- PPCCallingConv.td - Calling Conventions for PowerPC ------*- C++ -*-===//
+//===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
index 327470d..df9ab52 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCCodeEmitter.cpp
@@ -30,6 +30,7 @@ namespace {
class PPCCodeEmitter : public MachineFunctionPass {
TargetMachine &TM;
JITCodeEmitter &MCE;
+ MachineModuleInfo *MMI;
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineModuleInfo>();
@@ -44,7 +45,7 @@ namespace {
public:
PPCCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
- : MachineFunctionPass(&ID), TM(tm), MCE(mce) {}
+ : MachineFunctionPass(ID), TM(tm), MCE(mce) {}
/// getBinaryCodeForInstr - This function, generated by the
/// CodeEmitterGenerator using TableGen, produces the binary encoding for
@@ -87,7 +88,8 @@ bool PPCCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
MF.getTarget().getRelocationModel() != Reloc::Static) &&
"JIT relocation model must be set to static or default!");
- MCE.setModuleInfo(&getAnalysis<MachineModuleInfo>());
+ MMI = &getAnalysis<MachineModuleInfo>();
+ MCE.setModuleInfo(MMI);
do {
MovePCtoLROffset = 0;
MCE.startFunction(MF);
@@ -108,9 +110,9 @@ void PPCCodeEmitter::emitBasicBlock(MachineBasicBlock &MBB) {
default:
MCE.emitWordBE(getBinaryCodeForInstr(MI));
break;
- case TargetOpcode::DBG_LABEL:
+ case TargetOpcode::PROLOG_LABEL:
case TargetOpcode::EH_LABEL:
- MCE.emitLabel(MI.getOperand(0).getImm());
+ MCE.emitLabel(MI.getOperand(0).getMCSymbol());
break;
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
@@ -200,7 +202,7 @@ unsigned PPCCodeEmitter::getMachineOpValue(const MachineInstr &MI,
MachineRelocation R;
if (MO.isGlobal()) {
R = MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
- MO.getGlobal(), 0,
+ const_cast<GlobalValue *>(MO.getGlobal()), 0,
isa<Function>(MO.getGlobal()));
} else if (MO.isSymbol()) {
R = MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index 66dfd4b..db11fde 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -78,7 +78,7 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
isLoad = TID.mayLoad();
isStore = TID.mayStore();
- unsigned TSFlags = TID.TSFlags;
+ uint64_t TSFlags = TID.TSFlags;
isFirst = TSFlags & PPCII::PPC970_First;
isSingle = TSFlags & PPCII::PPC970_Single;
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 9d79c0d..00eebb8 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -16,7 +16,6 @@
#include "PPC.h"
#include "PPCPredicates.h"
#include "PPCTargetMachine.h"
-#include "PPCISelLowering.h"
#include "PPCHazardRecognizers.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -41,8 +40,8 @@ namespace {
/// instructions for SelectionDAG operations.
///
class PPCDAGToDAGISel : public SelectionDAGISel {
- PPCTargetMachine &TM;
- PPCTargetLowering &PPCLowering;
+ const PPCTargetMachine &TM;
+ const PPCTargetLowering &PPCLowering;
const PPCSubtarget &PPCSubTarget;
unsigned GlobalBaseReg;
public:
@@ -215,7 +214,7 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
const TargetInstrInfo &TII = *TM.getInstrInfo();
MachineBasicBlock &EntryBB = *Fn.begin();
- DebugLoc dl = DebugLoc::getUnknownLoc();
+ DebugLoc dl;
// Emit the following code into the entry block:
// InVRSAVE = MFVRSAVE
// UpdatedVRSAVE = UPDATE_VRSAVE InVRSAVE
@@ -253,7 +252,7 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
// Insert the set of GlobalBaseReg into the first MBB of the function
MachineBasicBlock &FirstMBB = MF->front();
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- DebugLoc dl = DebugLoc::getUnknownLoc();
+ DebugLoc dl;
if (PPCLowering.getPointerTy() == MVT::i32) {
GlobalBaseReg = RegInfo->createVirtualRegister(PPC::GPRCRegisterClass);
@@ -470,11 +469,11 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
if (CC == ISD::SETEQ || CC == ISD::SETNE) {
if (isInt32Immediate(RHS, Imm)) {
// SETEQ/SETNE comparison with 16-bit immediate, fold it.
- if (isUInt16(Imm))
+ if (isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
// If this is a 16-bit signed immediate, fold it.
- if (isInt16((int)Imm))
+ if (isInt<16>((int)Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
@@ -494,7 +493,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
}
Opc = PPC::CMPLW;
} else if (ISD::isUnsignedIntSetCC(CC)) {
- if (isInt32Immediate(RHS, Imm) && isUInt16(Imm))
+ if (isInt32Immediate(RHS, Imm) && isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
Opc = PPC::CMPLW;
@@ -511,11 +510,11 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
if (CC == ISD::SETEQ || CC == ISD::SETNE) {
if (isInt64Immediate(RHS.getNode(), Imm)) {
// SETEQ/SETNE comparison with 16-bit immediate, fold it.
- if (isUInt16(Imm))
+ if (isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
// If this is a 16-bit signed immediate, fold it.
- if (isInt16(Imm))
+ if (isInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
@@ -528,7 +527,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
// xoris r0,r3,0x1234
// cmpldi cr0,r0,0x5678
// beq cr0,L6
- if (isUInt32(Imm)) {
+ if (isUInt<32>(Imm)) {
SDValue Xor(CurDAG->getMachineNode(PPC::XORIS8, dl, MVT::i64, LHS,
getI64Imm(Imm >> 16)), 0);
return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, Xor,
@@ -537,7 +536,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
}
Opc = PPC::CMPLD;
} else if (ISD::isUnsignedIntSetCC(CC)) {
- if (isInt64Immediate(RHS.getNode(), Imm) && isUInt16(Imm))
+ if (isInt64Immediate(RHS.getNode(), Imm) && isUInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS,
getI64Imm(Imm & 0xFFFF)), 0);
Opc = PPC::CMPLD;
@@ -713,8 +712,9 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
if (PPCSubTarget.isGigaProcessor() && OtherCondIdx == -1)
IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
CCReg), 0);
- else
- IntCR = SDValue(CurDAG->getMachineNode(PPC::MFCR, dl, MVT::i32, CCReg), 0);
+ else
+ IntCR = SDValue(CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32,
+ CR7Reg, CCReg), 0);
SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31),
getI32Imm(31), getI32Imm(31) };
@@ -761,12 +761,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
unsigned Shift = 0;
// If it can't be represented as a 32 bit value.
- if (!isInt32(Imm)) {
+ if (!isInt<32>(Imm)) {
Shift = CountTrailingZeros_64(Imm);
int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
// If the shifted value fits 32 bits.
- if (isInt32(ImmSh)) {
+ if (isInt<32>(ImmSh)) {
// Go with the shifted value.
Imm = ImmSh;
} else {
@@ -785,7 +785,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
unsigned Hi = (Imm >> 16) & 0xFFFF;
// Simple value.
- if (isInt16(Imm)) {
+ if (isInt<16>(Imm)) {
// Just the Lo bits.
Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, getI32Imm(Lo));
} else if (Lo) {
@@ -849,7 +849,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32,
N->getOperand(0), InFlag);
else
- return CurDAG->getMachineNode(PPC::MFCR, dl, MVT::i32, InFlag);
+ return CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32,
+ N->getOperand(0), InFlag);
}
case ISD::SDIV: {
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index efb0e21..14d1b15 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -13,9 +13,9 @@
#include "PPCISelLowering.h"
#include "PPCMachineFunctionInfo.h"
+#include "PPCPerfectShuffle.h"
#include "PPCPredicates.h"
#include "PPCTargetMachine.h"
-#include "PPCPerfectShuffle.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/CodeGen/CallingConvLower.h"
@@ -60,10 +60,10 @@ cl::desc("enable preincrement load/store generation on PPC (experimental)"),
static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) {
if (TM.getSubtargetImpl()->isDarwin())
return new TargetLoweringObjectFileMachO();
+
return new TargetLoweringObjectFileELF();
}
-
PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
: TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) {
@@ -397,7 +397,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area.
unsigned PPCTargetLowering::getByValTypeAlignment(const Type *Ty) const {
- TargetMachine &TM = getTargetMachine();
+ const TargetMachine &TM = getTargetMachine();
// Darwin passes everything on 4 byte boundary.
if (TM.getSubtarget<PPCSubtarget>().isDarwin())
return 4;
@@ -476,7 +476,7 @@ static bool isFloatingPointZero(SDValue Op) {
else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
// Maybe this has already been legalized into the constant pool?
if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
return CFP->getValueAPF().isZero();
}
return false;
@@ -1095,10 +1095,10 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
//===----------------------------------------------------------------------===//
SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
- Constant *C = CP->getConstVal();
+ const Constant *C = CP->getConstVal();
SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
SDValue Zero = DAG.getConstant(0, PtrVT);
// FIXME there isn't really any debug info here
@@ -1122,14 +1122,14 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
// With PIC, the first instruction is actually "GR+hi(&G)".
Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
DAG.getNode(PPCISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(), PtrVT), Hi);
+ DebugLoc(), PtrVT), Hi);
}
Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
return Lo;
}
-SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
@@ -1155,7 +1155,7 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
// With PIC, the first instruction is actually "GR+hi(&G)".
Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
DAG.getNode(PPCISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(), PtrVT), Hi);
+ DebugLoc(), PtrVT), Hi);
}
Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
@@ -1163,16 +1163,17 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
}
SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
llvm_unreachable("TLS not implemented for PPC.");
return SDValue(); // Not reached
}
-SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
+ SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
DebugLoc DL = Op.getDebugLoc();
- BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
+ const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
SDValue TgtBA = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true);
SDValue Zero = DAG.getConstant(0, PtrVT);
SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, TgtBA, Zero);
@@ -1192,21 +1193,21 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) {
// With PIC, the first instruction is actually "GR+hi(&G)".
Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(PPCISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(), PtrVT), Hi);
+ DebugLoc(), PtrVT), Hi);
}
return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
}
SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
- GlobalValue *GV = GSDN->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
- SDValue Zero = DAG.getConstant(0, PtrVT);
// FIXME there isn't really any debug info here
DebugLoc dl = GSDN->getDebugLoc();
+ const GlobalValue *GV = GSDN->getGlobal();
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GSDN->getOffset());
+ SDValue Zero = DAG.getConstant(0, PtrVT);
const TargetMachine &TM = DAG.getTarget();
@@ -1233,7 +1234,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
// With PIC, the first instruction is actually "GR+hi(&G)".
Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
DAG.getNode(PPCISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(), PtrVT), Hi);
+ DebugLoc(), PtrVT), Hi);
}
Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
@@ -1247,7 +1248,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
false, false, 0);
}
-SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
DebugLoc dl = Op.getDebugLoc();
@@ -1291,17 +1292,14 @@ SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
}
SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
- int VarArgsFrameIndex,
- int VarArgsStackOffset,
- unsigned VarArgsNumGPR,
- unsigned VarArgsNumFPR,
- const PPCSubtarget &Subtarget) {
+ const PPCSubtarget &Subtarget) const {
llvm_unreachable("VAARG not yet implemented for the SVR4 ABI!");
return SDValue(); // Not reached
}
-SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op,
+ SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Trmp = Op.getOperand(1); // trampoline
SDValue FPtr = Op.getOperand(2); // nested function
@@ -1343,18 +1341,17 @@ SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
}
SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
- int VarArgsFrameIndex,
- int VarArgsStackOffset,
- unsigned VarArgsNumGPR,
- unsigned VarArgsNumFPR,
- const PPCSubtarget &Subtarget) {
+ const PPCSubtarget &Subtarget) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+
DebugLoc dl = Op.getDebugLoc();
if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
+ SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0,
false, false, 0);
@@ -1385,14 +1382,16 @@ SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
// } va_list[1];
- SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i32);
- SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i32);
+ SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32);
+ SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32);
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- SDValue StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT);
- SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
+ SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
+ PtrVT);
+ SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
+ PtrVT);
uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
@@ -1525,7 +1524,8 @@ PPCTargetLowering::LowerFormalArguments(SDValue Chain,
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals)
+ const {
if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) {
return LowerFormalArguments_SVR4(Chain, CallConv, isVarArg, Ins,
dl, DAG, InVals);
@@ -1542,7 +1542,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
// 32-bit SVR4 ABI Stack Frame Layout:
// +-----------------------------------+
@@ -1575,6 +1575,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
+ PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Potential tail calls could cause overwriting of argument stack slots.
@@ -1630,7 +1631,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8;
int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
- isImmutable, false);
+ isImmutable);
// Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
@@ -1688,24 +1689,26 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
};
const unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
- VarArgsNumGPR = CCInfo.getFirstUnallocated(GPArgRegs, NumGPArgRegs);
- VarArgsNumFPR = CCInfo.getFirstUnallocated(FPArgRegs, NumFPArgRegs);
+ FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs,
+ NumGPArgRegs));
+ FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs,
+ NumFPArgRegs));
// Make room for NumGPArgRegs and NumFPArgRegs.
int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8;
- VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
- CCInfo.getNextStackOffset(),
- true, false);
+ FuncInfo->setVarArgsStackOffset(
+ MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
+ CCInfo.getNextStackOffset(), true));
- VarArgsFrameIndex = MFI->CreateStackObject(Depth, 8, false);
- SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
+ FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false));
+ SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
// The fixed integer arguments of a variadic function are
// stored to the VarArgsFrameIndex on the stack.
unsigned GPRIndex = 0;
- for (; GPRIndex != VarArgsNumGPR; ++GPRIndex) {
+ for (; GPRIndex != FuncInfo->getVarArgsNumGPR(); ++GPRIndex) {
SDValue Val = DAG.getRegister(GPArgRegs[GPRIndex], PtrVT);
SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0,
false, false, 0);
@@ -1736,7 +1739,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// The double arguments are stored to the VarArgsFrameIndex
// on the stack.
unsigned FPRIndex = 0;
- for (FPRIndex = 0; FPRIndex != VarArgsNumFPR; ++FPRIndex) {
+ for (FPRIndex = 0; FPRIndex != FuncInfo->getVarArgsNumFPR(); ++FPRIndex) {
SDValue Val = DAG.getRegister(FPArgRegs[FPRIndex], MVT::f64);
SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0,
false, false, 0);
@@ -1775,11 +1778,12 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
const SmallVectorImpl<ISD::InputArg>
&Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
// TODO: add description of PPC stack frame format, or at least some docs.
//
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
+ PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
@@ -1906,7 +1910,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
CurArgOffset = CurArgOffset + (4 - ObjSize);
}
// The value of the object is its address.
- int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true, false);
+ int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
InVals.push_back(FIN);
if (ObjSize==1 || ObjSize==2) {
@@ -1931,7 +1935,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// the object.
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
- int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true, false);
+ int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0,
@@ -2057,7 +2061,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
if (needsLoad) {
int FI = MFI->CreateFixedObject(ObjSize,
CurArgOffset + (ArgSize - ObjSize),
- isImmutable, false);
+ isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0,
false, false, 0);
@@ -2090,9 +2094,10 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
if (isVarArg) {
int Depth = ArgOffset;
- VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
- Depth, true, false);
- SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
+ FuncInfo->setVarArgsFrameIndex(
+ MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
+ Depth, true));
+ SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
// If this function is vararg, store any remaining integer argument regs
// to their spots on the stack so that they may be loaded by deferencing the
@@ -2131,6 +2136,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
unsigned CC,
const SmallVectorImpl<ISD::OutputArg>
&Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
unsigned &nAltivecParamsAtEnd) {
// Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. We start with 24/48 bytes, which is
@@ -2147,9 +2153,9 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
// 16-byte aligned.
nAltivecParamsAtEnd = 0;
for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
- EVT ArgVT = Arg.getValueType();
+ EVT ArgVT = Outs[i].VT;
// Varargs Altivec parameters are padded to a 16 byte boundary.
if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
@@ -2308,8 +2314,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64,
isDarwinABI);
int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
- NewRetAddrLoc,
- true, false);
+ NewRetAddrLoc, true);
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
@@ -2322,7 +2327,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
int NewFPLoc =
SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64, isDarwinABI);
int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc,
- true, false);
+ true);
SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
PseudoSourceValue::getFixedStack(NewFPIdx), 0,
@@ -2340,7 +2345,7 @@ CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
int Offset = ArgOffset + SPDiff;
uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
- int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true,false);
+ int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
SDValue FIN = DAG.getFrameIndex(FI, VT);
TailCallArgumentInfo Info;
@@ -2359,7 +2364,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
SDValue &LROpOut,
SDValue &FPOpOut,
bool isDarwinABI,
- DebugLoc dl) {
+ DebugLoc dl) const {
if (SPDiff) {
// Load the LR and FP stack slot for later adjusting.
EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
@@ -2392,7 +2397,7 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- false, NULL, 0, NULL, 0);
+ false, false, NULL, 0, NULL, 0);
}
/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
@@ -2461,6 +2466,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin;
+
bool needIndirectCall = true;
if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
// If this is an absolute destination address, use the munged value.
@@ -2475,14 +2481,15 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(),
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
Callee.getValueType());
needIndirectCall = false;
- } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ }
+ }
+ if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
Callee.getValueType());
needIndirectCall = false;
- }
}
if (needIndirectCall) {
// Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
@@ -2594,7 +2601,7 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCRetInfo(CallConv, isVarArg, getTargetMachine(),
@@ -2625,7 +2632,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
SDValue &Callee,
int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
std::vector<EVT> NodeTys;
SmallVector<SDValue, 8> Ops;
unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
@@ -2711,20 +2718,21 @@ PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
if (isTailCall)
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
Ins, DAG);
if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) {
return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg,
- isTailCall, Outs, Ins,
+ isTailCall, Outs, OutVals, Ins,
dl, DAG, InVals);
} else {
return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
- isTailCall, Outs, Ins,
+ isTailCall, Outs, OutVals, Ins,
dl, DAG, InVals);
}
}
@@ -2734,16 +2742,16 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
// See PPCTargetLowering::LowerFormalArguments_SVR4() for a description
// of the 32-bit SVR4 ABI stack frame layout.
assert((CallConv == CallingConv::C ||
CallConv == CallingConv::Fast) && "Unknown calling convention!");
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
unsigned PtrByteSize = 4;
MachineFunction &MF = DAG.getMachineFunction();
@@ -2775,7 +2783,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
unsigned NumArgs = Outs.size();
for (unsigned i = 0; i != NumArgs; ++i) {
- EVT ArgVT = Outs[i].Val.getValueType();
+ EVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
bool Result;
@@ -2844,7 +2852,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
i != e;
++i) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (Flags.isByVal()) {
@@ -2940,9 +2948,10 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
unsigned NumOps = Outs.size();
@@ -2967,7 +2976,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// prereserved space for [SP][CR][LR][3 x unused].
unsigned NumBytes =
CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv,
- Outs,
+ Outs, OutVals,
nAltivecParamsAtEnd);
// Calculate by how many bytes the stack has to be adjusted in case of tail
@@ -3031,7 +3040,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
SmallVector<SDValue, 8> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
// PtrOff will be used to store the current argument to the stack if a
@@ -3057,7 +3066,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Everything else is passed left-justified.
EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, dl, Chain, Arg,
NULL, 0, VT, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -3234,8 +3243,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
ArgOffset = ((ArgOffset+15)/16)*16;
ArgOffset += 12*16;
for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = Outs[i].Val;
- EVT ArgType = Arg.getValueType();
+ SDValue Arg = OutVals[i];
+ EVT ArgType = Outs[i].VT;
if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
if (++j > NumVRs) {
@@ -3270,6 +3279,16 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
false, false, 0);
}
+ // On Darwin, R12 must contain the address of an indirect callee. This does
+ // not mean the MTCTR instruction must use R12; it's easier to model this as
+ // an extra parameter, so do that.
+ if (!isTailCall &&
+ !dyn_cast<GlobalAddressSDNode>(Callee) &&
+ !dyn_cast<ExternalSymbolSDNode>(Callee) &&
+ !isBLACompatibleAddress(Callee, DAG))
+ RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
+ PPC::R12), Callee));
+
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into the appropriate regs.
SDValue InFlag;
@@ -3293,7 +3312,8 @@ SDValue
PPCTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- DebugLoc dl, SelectionDAG &DAG) {
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
@@ -3314,7 +3334,7 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- Outs[i].Val, Flag);
+ OutVals[i], Flag);
Flag = Chain.getValue(1);
}
@@ -3325,7 +3345,7 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
}
SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) {
+ const PPCSubtarget &Subtarget) const {
// When we pop the dynamic allocation we need to restore the SP link.
DebugLoc dl = Op.getDebugLoc();
@@ -3372,8 +3392,7 @@ PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
// Find out what the fix offset of the frame pointer save area.
int LROffset = PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI);
// Allocate the frame index for frame pointer save area.
- RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset,
- true, false);
+ RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true);
// Save the result.
FI->setReturnAddrSaveIndex(RASI);
}
@@ -3399,8 +3418,7 @@ PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
isDarwinABI);
// Allocate the frame index for frame pointer save area.
- FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset,
- true, false);
+ FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
// Save the result.
FI->setFramePointerSaveIndex(FPSI);
}
@@ -3409,7 +3427,7 @@ PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG,
- const PPCSubtarget &Subtarget) {
+ const PPCSubtarget &Subtarget) const {
// Get the inputs.
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
@@ -3430,7 +3448,7 @@ SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
/// possible.
-SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
// Not FP? Not a fsel.
if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
!Op.getOperand(2).getValueType().isFloatingPoint())
@@ -3504,7 +3522,7 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
// FIXME: Split this code up when LegalizeDAGTypes lands.
SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
- DebugLoc dl) {
+ DebugLoc dl) const {
assert(Op.getOperand(0).getValueType().isFloatingPoint());
SDValue Src = Op.getOperand(0);
if (Src.getValueType() == MVT::f32)
@@ -3539,7 +3557,8 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
false, false, 0);
}
-SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
+ SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
// Don't handle ppc_fp128 here; let it be lowered to a libcall.
if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
@@ -3588,7 +3607,8 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
return FP;
}
-SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
+ SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
/*
The rounding mode is in bits 30:31 of FPSR, and has the following
@@ -3651,7 +3671,7 @@ SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) {
ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
}
-SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
DebugLoc dl = Op.getDebugLoc();
@@ -3680,7 +3700,7 @@ SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) {
return DAG.getMergeValues(OutOps, 2, dl);
}
-SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
unsigned BitWidth = VT.getSizeInBits();
@@ -3709,7 +3729,7 @@ SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) {
return DAG.getMergeValues(OutOps, 2, dl);
}
-SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
@@ -3810,7 +3830,8 @@ static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
// selects to a single instruction, return Op. Otherwise, if we can codegen
// this case more efficiently than a constant pool load, lower it to the
// sequence of ops that should be used.
-SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
@@ -3934,17 +3955,17 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
}
// t = vsplti c, result = vsldoi t, t, 1
- if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
+ if (SextVal == ((i << 8) | (i < 0 ? 0xFF : 0))) {
SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl);
}
// t = vsplti c, result = vsldoi t, t, 2
- if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
+ if (SextVal == ((i << 16) | (i < 0 ? 0xFFFF : 0))) {
SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl);
}
// t = vsplti c, result = vsldoi t, t, 3
- if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
+ if (SextVal == ((i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl);
}
@@ -4052,7 +4073,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
/// return the code it can be lowered into. Worst case, it can always be
/// lowered into a vperm.
SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
@@ -4218,7 +4239,7 @@ static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc,
/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
/// lower, do it, otherwise return null.
SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
// If this is a lowered altivec predicate compare, CompareOpc is set to the
// opcode number of the comparison.
DebugLoc dl = Op.getDebugLoc();
@@ -4230,8 +4251,8 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
// If this is a non-dot comparison, make the VCMP node and we are done.
if (!isDot) {
SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
- Op.getOperand(1), Op.getOperand(2),
- DAG.getConstant(CompareOpc, MVT::i32));
+ Op.getOperand(1), Op.getOperand(2),
+ DAG.getConstant(CompareOpc, MVT::i32));
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp);
}
@@ -4286,12 +4307,12 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
}
SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
// Create a stack slot that is 16-byte aligned.
MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
int FrameIdx = FrameInfo->CreateStackObject(16, 16, false);
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ EVT PtrVT = getPointerTy();
SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
// Store the input value into Value#0 of the stack slot.
@@ -4303,7 +4324,7 @@ SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
false, false, 0);
}
-SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
if (Op.getValueType() == MVT::v4i32) {
SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
@@ -4364,7 +4385,7 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
/// LowerOperation - Provide custom lowering hooks for some operations.
///
-SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Wasn't expecting to be able to lower this!");
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
@@ -4375,12 +4396,10 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
case ISD::VASTART:
- return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
- VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
+ return LowerVASTART(Op, DAG, PPCSubTarget);
case ISD::VAARG:
- return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
- VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
+ return LowerVAARG(Op, DAG, PPCSubTarget);
case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
case ISD::DYNAMIC_STACKALLOC:
@@ -4414,7 +4433,7 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
DebugLoc dl = N->getDebugLoc();
switch (N->getOpcode()) {
default:
@@ -4513,7 +4532,10 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loopMBB);
F->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned TmpReg = (!BinOpcode) ? incr :
@@ -4578,7 +4600,10 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loopMBB);
F->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
const TargetRegisterClass *RC =
@@ -4679,8 +4704,7 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
MachineBasicBlock *
PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
+ MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
// To "insert" these instructions we actually have to insert their
@@ -4712,26 +4736,22 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
unsigned SelectPred = MI->getOperand(4).getImm();
DebugLoc dl = MI->getDebugLoc();
- BuildMI(BB, dl, TII->get(PPC::BCC))
- .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- // Also inform sdisel of the edge changes.
- for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
- E = BB->succ_end(); I != E; ++I) {
- EM->insert(std::make_pair(*I, sinkMBB));
- sinkMBB->addSuccessor(*I);
- }
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while (!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
+ BuildMI(BB, dl, TII->get(PPC::BCC))
+ .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
@@ -4744,7 +4764,8 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(BB, dl, TII->get(PPC::PHI), MI->getOperand(0).getReg())
+ BuildMI(*BB, BB->begin(), dl,
+ TII->get(PPC::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
}
@@ -4830,7 +4851,10 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
F->insert(It, loop2MBB);
F->insert(It, midMBB);
F->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// thisMBB:
// ...
@@ -4898,7 +4922,10 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
F->insert(It, loop2MBB);
F->insert(It, midMBB);
F->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
const TargetRegisterClass *RC =
@@ -5024,7 +5051,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
llvm_unreachable("Unexpected instr type to insert");
}
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -5034,26 +5061,26 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
- TargetMachine &TM = getTargetMachine();
+ const TargetMachine &TM = getTargetMachine();
SelectionDAG &DAG = DCI.DAG;
DebugLoc dl = N->getDebugLoc();
switch (N->getOpcode()) {
default: break;
case PPCISD::SHL:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getZExtValue() == 0) // 0 << V -> 0.
+ if (C->isNullValue()) // 0 << V -> 0.
return N->getOperand(0);
}
break;
case PPCISD::SRL:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getZExtValue() == 0) // 0 >>u V -> 0.
+ if (C->isNullValue()) // 0 >>u V -> 0.
return N->getOperand(0);
}
break;
case PPCISD::SRA:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getZExtValue() == 0 || // 0 >>s V -> 0.
+ if (C->isNullValue() || // 0 >>s V -> 0.
C->isAllOnesValue()) // -1 >>s V -> -1.
return N->getOperand(0);
}
@@ -5379,11 +5406,8 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
-/// vector. If it is invalid, don't add anything to Ops. If hasMemory is true
-/// it means one of the asm constraint of the inline asm instruction being
-/// processed is 'm'.
+/// vector. If it is invalid, don't add anything to Ops.
void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter,
- bool hasMemory,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
SDValue Result(0,0);
@@ -5442,7 +5466,7 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter,
}
// Handle standard constraint letters.
- TargetLowering::LowerAsmOperandForConstraint(Op, Letter, hasMemory, Ops, DAG);
+ TargetLowering::LowerAsmOperandForConstraint(Op, Letter, Ops, DAG);
}
// isLegalAddressingMode - Return true if the addressing mode represented
@@ -5493,46 +5517,62 @@ bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
return false;
}
-SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MFI->setReturnAddressIsTaken(true);
+
DebugLoc dl = Op.getDebugLoc();
- // Depths > 0 not supported yet!
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
- return SDValue();
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
- MachineFunction &MF = DAG.getMachineFunction();
+ // Make sure the function does not optimize away the store of the RA to
+ // the stack.
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
+ FuncInfo->setLRStoreRequired();
+ bool isPPC64 = PPCSubTarget.isPPC64();
+ bool isDarwinABI = PPCSubTarget.isDarwinABI();
+
+ if (Depth > 0) {
+ SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
+ SDValue Offset =
+
+ DAG.getConstant(PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI),
+ isPPC64? MVT::i64 : MVT::i32);
+ return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
+ DAG.getNode(ISD::ADD, dl, getPointerTy(),
+ FrameAddr, Offset),
+ NULL, 0, false, false, 0);
+ }
// Just load the return address off the stack.
SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
-
- // Make sure the function really does not optimize away the store of the RA
- // to the stack.
- FuncInfo->setLRStoreRequired();
- return DAG.getLoad(getPointerTy(), dl,
- DAG.getEntryNode(), RetAddrFI, NULL, 0,
- false, false, 0);
+ return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
+ RetAddrFI, NULL, 0, false, false, 0);
}
-SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
+SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
- // Depths > 0 not supported yet!
- if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
- return SDValue();
+ unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = PtrVT == MVT::i64;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
- bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects())
- && MFI->getStackSize();
-
- if (isPPC64)
- return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::X31 : PPC::X1,
- MVT::i64);
- else
- return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::R31 : PPC::R1,
- MVT::i32);
+ MFI->setFrameAddressIsTaken(true);
+ bool is31 = (DisableFramePointerElim(MF) || MFI->hasVarSizedObjects()) &&
+ MFI->getStackSize() &&
+ !MF.getFunction()->hasFnAttr(Attribute::Naked);
+ unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) :
+ (is31 ? PPC::R31 : PPC::R1);
+ SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
+ PtrVT);
+ while (Depth--)
+ FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
+ FrameAddr, NULL, 0, false, false, 0);
+ return FrameAddr;
}
bool
@@ -5541,9 +5581,23 @@ PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
return false;
}
-EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
- bool isSrcConst, bool isSrcStr,
- SelectionDAG &DAG) const {
+/// getOptimalMemOpType - Returns the target specific optimal type for load
+/// and store operations as a result of memset, memcpy, and memmove
+/// lowering. If DstAlign is zero that means it's safe to destination
+/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
+/// means there isn't a need to check it against alignment requirement,
+/// probably because the source does not need to be loaded. If
+/// 'NonScalarIntSafe' is true, that means it's safe to return a
+/// non-scalar-integer type, e.g. empty string source, constant, or loaded
+/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
+/// constant so it does not need to be loaded.
+/// It returns EVT::Other if the type should be determined using generic
+/// target-independent logic.
+EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
+ unsigned DstAlign, unsigned SrcAlign,
+ bool NonScalarIntSafe,
+ bool MemcpyStrSrc,
+ MachineFunction &MF) const {
if (this->PPCSubTarget.isPPC64()) {
return MVT::i64;
} else {
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelLowering.h b/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 9c390ac..700816f 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -111,9 +111,10 @@ namespace llvm {
/// Return with a flag operand, matched by 'blr'
RET_FLAG,
- /// R32 = MFCR(CRREG, INFLAG) - Represents the MFCR/MFOCRF instructions.
- /// This copies the bits corresponding to the specified CRREG into the
- /// resultant GPR. Bits corresponding to other CR regs are undefined.
+ /// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF
+ /// instructions. This copies the bits corresponding to the specified
+ /// CRREG into the resultant GPR. Bits corresponding to other CR regs
+ /// are undefined.
MFCR,
/// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
@@ -233,14 +234,8 @@ namespace llvm {
}
class PPCTargetLowering : public TargetLowering {
- int VarArgsFrameIndex; // FrameIndex for start of varargs area.
- int VarArgsStackOffset; // StackOffset for start of stack
- // arguments.
- unsigned VarArgsNumGPR; // Index of the first unused integer
- // register for parameter passing.
- unsigned VarArgsNumFPR; // Index of the first unused double
- // register for parameter passing.
const PPCSubtarget &PPCSubTarget;
+
public:
explicit PPCTargetLowering(PPCTargetMachine &TM);
@@ -285,13 +280,13 @@ namespace llvm {
/// LowerOperation - Provide custom lowering hooks for some operations.
///
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
+ virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG);
+ SelectionDAG &DAG) const;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
@@ -302,9 +297,9 @@ namespace llvm {
const SelectionDAG &DAG,
unsigned Depth = 0) const;
- virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const;
+ virtual MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
MachineBasicBlock *MBB, bool is64Bit,
unsigned BinOpcode) const;
@@ -323,12 +318,9 @@ namespace llvm {
unsigned getByValTypeAlignment(const Type *Ty) const;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
- /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
- /// true it means one of the asm constraint of the inline asm instruction
- /// being processed is 'm'.
+ /// vector. If it is invalid, don't add anything to Ops.
virtual void LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
@@ -347,9 +339,22 @@ namespace llvm {
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
- virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align,
- bool isSrcConst, bool isSrcStr,
- SelectionDAG &DAG) const;
+ /// getOptimalMemOpType - Returns the target specific optimal type for load
+ /// and store operations as a result of memset, memcpy, and memmove
+ /// lowering. If DstAlign is zero that means it's safe to destination
+ /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
+ /// means there isn't a need to check it against alignment requirement,
+ /// probably because the source does not need to be loaded. If
+ /// 'NonScalarIntSafe' is true, that means it's safe to return a
+ /// non-scalar-integer type, e.g. empty string source, constant, or loaded
+ /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
+ /// constant so it does not need to be loaded.
+ /// It returns EVT::Other if the type should be determined using generic
+ /// target-independent logic.
+ virtual EVT
+ getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
+ bool NonScalarIntSafe, bool MemcpyStrSrc,
+ MachineFunction &MF) const;
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const;
@@ -371,46 +376,43 @@ namespace llvm {
SDValue &LROpOut,
SDValue &FPOpOut,
bool isDarwinABI,
- DebugLoc dl);
-
- SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
- SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG);
- SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG);
- SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
- SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
- SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG);
- SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG);
- SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG);
+ DebugLoc dl) const;
+
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
- int VarArgsFrameIndex, int VarArgsStackOffset,
- unsigned VarArgsNumGPR, unsigned VarArgsNumFPR,
- const PPCSubtarget &Subtarget);
- SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, int VarArgsFrameIndex,
- int VarArgsStackOffset, unsigned VarArgsNumGPR,
- unsigned VarArgsNumFPR, const PPCSubtarget &Subtarget);
+ const PPCSubtarget &Subtarget) const;
+ SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG,
+ const PPCSubtarget &Subtarget) const;
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget);
+ const PPCSubtarget &Subtarget) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
- const PPCSubtarget &Subtarget);
- SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl);
- SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG);
- SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG);
- SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG);
- SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG);
- SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG);
- SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG);
- SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
- SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG);
- SDValue LowerMUL(SDValue Op, SelectionDAG &DAG);
+ const PPCSubtarget &Subtarget) const;
+ SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl) const;
+ SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
SDValue FinishCall(CallingConv::ID CallConv, DebugLoc dl, bool isTailCall,
bool isVarArg,
SelectionDAG &DAG,
@@ -420,56 +422,60 @@ namespace llvm {
SDValue &Callee,
int SPDiff, unsigned NumBytes,
const SmallVectorImpl<ISD::InputArg> &Ins,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- DebugLoc dl, SelectionDAG &DAG);
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const;
SDValue
LowerFormalArguments_Darwin(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
SDValue
LowerFormalArguments_SVR4(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
SDValue
LowerCall_Darwin(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
SDValue
LowerCall_SVR4(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
};
}
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
index 3f4d329..256370f 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -15,6 +15,10 @@
// Altivec transformation functions and pattern fragments.
//
+// Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be
+// of that type.
+def vnot_ppc : PatFrag<(ops node:$in),
+ (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>;
def vpkuhum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
@@ -35,33 +39,33 @@ def vpkuwum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
def vmrglb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
+ (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
}]>;
def vmrglh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
+ (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
}]>;
def vmrglw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
+ (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
}]>;
def vmrghb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
+ (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
}]>;
def vmrghh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
+ (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
}]>;
def vmrghw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
+ (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
}]>;
def vmrglb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
+ (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, true);
}]>;
def vmrglh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
@@ -321,7 +325,8 @@ def VAND : VXForm_1<1028, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
[(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
def VANDC : VXForm_1<1092, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vandc $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
+ [(set VRRC:$vD, (and (v4i32 VRRC:$vA),
+ (vnot_ppc VRRC:$vB)))]>;
def VCFSX : VXForm_1<842, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB),
"vcfsx $vD, $vB, $UIMM", VecFP,
@@ -435,7 +440,8 @@ def VSUM4UBS: VX1_Int<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs>;
def VNOR : VXForm_1<1284, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vnor $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
+ [(set VRRC:$vD, (vnot_ppc (or (v4i32 VRRC:$vA),
+ VRRC:$vB)))]>;
def VOR : VXForm_1<1156, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vor $vD, $vA, $vB", VecFP,
[(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
@@ -640,12 +646,11 @@ def:Pat<(vmrghw_unary_shuffle (v16i8 VRRC:$vA), undef),
(VMRGHW VRRC:$vA, VRRC:$vA)>;
// Logical Operations
-def : Pat<(v4i32 (vnot VRRC:$vA)), (VNOR VRRC:$vA, VRRC:$vA)>;
-def : Pat<(v4i32 (vnot_conv VRRC:$vA)), (VNOR VRRC:$vA, VRRC:$vA)>;
+def : Pat<(v4i32 (vnot_ppc VRRC:$vA)), (VNOR VRRC:$vA, VRRC:$vA)>;
-def : Pat<(v4i32 (vnot_conv (or VRRC:$A, VRRC:$B))),
+def : Pat<(v4i32 (vnot_ppc (or VRRC:$A, VRRC:$B))),
(VNOR VRRC:$A, VRRC:$B)>;
-def : Pat<(v4i32 (and VRRC:$A, (vnot_conv VRRC:$B))),
+def : Pat<(v4i32 (and VRRC:$A, (vnot_ppc VRRC:$B))),
(VANDC VRRC:$A, VRRC:$B)>;
def : Pat<(fmul VRRC:$vA, VRRC:$vB),
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrFormats.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrFormats.td
index 54cebcd..4357bdc 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrFormats.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrFormats.td
@@ -23,13 +23,18 @@ class I<bits<6> opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin>
let InOperandList = IOL;
let AsmString = asmstr;
let Itinerary = itin;
-
- /// These fields correspond to the fields in PPCInstrInfo.h. Any changes to
- /// these must be reflected there! See comments there for what these are.
+
bits<1> PPC970_First = 0;
bits<1> PPC970_Single = 0;
bits<1> PPC970_Cracked = 0;
bits<3> PPC970_Unit = 0;
+
+ /// These fields correspond to the fields in PPCInstrInfo.h. Any changes to
+ /// these must be reflected there! See comments there for what these are.
+ let TSFlags{0} = PPC970_First;
+ let TSFlags{1} = PPC970_Single;
+ let TSFlags{2} = PPC970_Cracked;
+ let TSFlags{5-3} = PPC970_Unit;
}
class PPC970_DGroup_First { bits<1> PPC970_First = 1; }
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 9895bea..c17108f 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -18,82 +18,27 @@
#include "PPCGenInstrInfo.inc"
#include "PPCTargetMachine.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/MC/MCAsmInfo.h"
-using namespace llvm;
+namespace llvm {
extern cl::opt<bool> EnablePPC32RS; // FIXME (64-bit): See PPCRegisterInfo.cpp.
extern cl::opt<bool> EnablePPC64RS; // FIXME (64-bit): See PPCRegisterInfo.cpp.
+}
+
+using namespace llvm;
PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm)
: TargetInstrInfoImpl(PPCInsts, array_lengthof(PPCInsts)), TM(tm),
RI(*TM.getSubtargetImpl(), *this) {}
-bool PPCInstrInfo::isMoveInstr(const MachineInstr& MI,
- unsigned& sourceReg,
- unsigned& destReg,
- unsigned& sourceSubIdx,
- unsigned& destSubIdx) const {
- sourceSubIdx = destSubIdx = 0; // No sub-registers.
-
- unsigned oc = MI.getOpcode();
- if (oc == PPC::OR || oc == PPC::OR8 || oc == PPC::VOR ||
- oc == PPC::OR4To8 || oc == PPC::OR8To4) { // or r1, r2, r2
- assert(MI.getNumOperands() >= 3 &&
- MI.getOperand(0).isReg() &&
- MI.getOperand(1).isReg() &&
- MI.getOperand(2).isReg() &&
- "invalid PPC OR instruction!");
- if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) {
- sourceReg = MI.getOperand(1).getReg();
- destReg = MI.getOperand(0).getReg();
- return true;
- }
- } else if (oc == PPC::ADDI) { // addi r1, r2, 0
- assert(MI.getNumOperands() >= 3 &&
- MI.getOperand(0).isReg() &&
- MI.getOperand(2).isImm() &&
- "invalid PPC ADDI instruction!");
- if (MI.getOperand(1).isReg() && MI.getOperand(2).getImm() == 0) {
- sourceReg = MI.getOperand(1).getReg();
- destReg = MI.getOperand(0).getReg();
- return true;
- }
- } else if (oc == PPC::ORI) { // ori r1, r2, 0
- assert(MI.getNumOperands() >= 3 &&
- MI.getOperand(0).isReg() &&
- MI.getOperand(1).isReg() &&
- MI.getOperand(2).isImm() &&
- "invalid PPC ORI instruction!");
- if (MI.getOperand(2).getImm() == 0) {
- sourceReg = MI.getOperand(1).getReg();
- destReg = MI.getOperand(0).getReg();
- return true;
- }
- } else if (oc == PPC::FMR || oc == PPC::FMRSD) { // fmr r1, r2
- assert(MI.getNumOperands() >= 2 &&
- MI.getOperand(0).isReg() &&
- MI.getOperand(1).isReg() &&
- "invalid PPC FMR instruction");
- sourceReg = MI.getOperand(1).getReg();
- destReg = MI.getOperand(0).getReg();
- return true;
- } else if (oc == PPC::MCRF) { // mcrf cr1, cr2
- assert(MI.getNumOperands() >= 2 &&
- MI.getOperand(0).isReg() &&
- MI.getOperand(1).isReg() &&
- "invalid PPC MCRF instruction");
- sourceReg = MI.getOperand(1).getReg();
- destReg = MI.getOperand(0).getReg();
- return true;
- }
- return false;
-}
-
unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
@@ -199,9 +144,7 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
+ DebugLoc DL;
BuildMI(MBB, MI, DL, get(PPC::NOP));
}
@@ -213,7 +156,15 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
bool AllowModify) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
+ if (I == MBB.begin())
+ return false;
+ --I;
+ while (I->isDebugValue()) {
+ if (I == MBB.begin())
+ return false;
+ --I;
+ }
+ if (!isUnpredicatedTerminator(I))
return false;
// Get the last instruction in the block.
@@ -281,6 +232,11 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin()) return 0;
--I;
+ while (I->isDebugValue()) {
+ if (I == MBB.begin())
+ return 0;
+ --I;
+ }
if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC)
return 0;
@@ -302,9 +258,8 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl = DebugLoc::getUnknownLoc();
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
@@ -313,52 +268,46 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
// One-way branch.
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch
- BuildMI(&MBB, dl, get(PPC::B)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB);
else // Conditional branch
- BuildMI(&MBB, dl, get(PPC::BCC))
+ BuildMI(&MBB, DL, get(PPC::BCC))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
return 1;
}
// Two-way Conditional Branch.
- BuildMI(&MBB, dl, get(PPC::BCC))
+ BuildMI(&MBB, DL, get(PPC::BCC))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
- BuildMI(&MBB, dl, get(PPC::B)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB);
return 2;
}
-bool PPCInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const {
- if (DestRC != SrcRC) {
- // Not yet supported!
- return false;
- }
-
- DebugLoc DL = DebugLoc::getUnknownLoc();
- if (MI != MBB.end()) DL = MI->getDebugLoc();
-
- if (DestRC == PPC::GPRCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::OR), DestReg).addReg(SrcReg).addReg(SrcReg);
- } else if (DestRC == PPC::G8RCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::OR8), DestReg).addReg(SrcReg).addReg(SrcReg);
- } else if (DestRC == PPC::F4RCRegisterClass ||
- DestRC == PPC::F8RCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::FMR), DestReg).addReg(SrcReg);
- } else if (DestRC == PPC::CRRCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::MCRF), DestReg).addReg(SrcReg);
- } else if (DestRC == PPC::VRRCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::VOR), DestReg).addReg(SrcReg).addReg(SrcReg);
- } else if (DestRC == PPC::CRBITRCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::CROR), DestReg).addReg(SrcReg).addReg(SrcReg);
- } else {
- // Attempt to copy register that is not GPR or FPR
- return false;
- }
-
- return true;
+void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ unsigned Opc;
+ if (PPC::GPRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::OR;
+ else if (PPC::G8RCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::OR8;
+ else if (PPC::F4RCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::FMR;
+ else if (PPC::CRRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::MCRF;
+ else if (PPC::VRRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::VOR;
+ else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::CROR;
+ else
+ llvm_unreachable("Impossible reg-to-reg copy");
+
+ const TargetInstrDesc &TID = get(Opc);
+ if (TID.getNumOperands() == 3)
+ BuildMI(MBB, I, DL, TID, DestReg)
+ .addReg(SrcReg).addReg(SrcReg, getKillRegState(KillSrc));
+ else
+ BuildMI(MBB, I, DL, TID, DestReg).addReg(SrcReg, getKillRegState(KillSrc));
}
bool
@@ -367,7 +316,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
int FrameIdx,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const{
- DebugLoc DL = DebugLoc::getUnknownLoc();
+ DebugLoc DL;
if (RC == PPC::GPRCRegisterClass) {
if (SrcReg != PPC::LR) {
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW))
@@ -430,7 +379,8 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
// issue a MFCR to save all of the CRBits.
unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
PPC::R2 : PPC::R0;
- NewMIs.push_back(BuildMI(MF, DL, get(PPC::MFCR), ScratchReg));
+ NewMIs.push_back(BuildMI(MF, DL, get(PPC::MFCRpseud), ScratchReg)
+ .addReg(SrcReg, getKillRegState(isKill)));
// If the saved register wasn't CR0, shift the bits left so that they are
// in CR0's slot.
@@ -504,7 +454,8 @@ void
PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIdx,
- const TargetRegisterClass *RC) const {
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
MachineFunction &MF = *MBB.getParent();
SmallVector<MachineInstr*, 4> NewMIs;
@@ -515,6 +466,14 @@ PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
MBB.insert(MI, NewMIs[i]);
+
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIdx),
+ MachineMemOperand::MOStore, /*Offset=*/0,
+ MFI.getObjectSize(FrameIdx),
+ MFI.getObjectAlignment(FrameIdx));
+ NewMIs.back()->addMemOperand(MF, MMO);
}
void
@@ -619,131 +578,35 @@ void
PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC) const {
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
MachineFunction &MF = *MBB.getParent();
SmallVector<MachineInstr*, 4> NewMIs;
- DebugLoc DL = DebugLoc::getUnknownLoc();
+ DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs);
for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
MBB.insert(MI, NewMIs[i]);
-}
-/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
-/// copy instructions, turning them into load/store instructions.
-MachineInstr *PPCInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
- if (Ops.size() != 1) return NULL;
-
- // Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
- // it takes more than one instruction to store it.
- unsigned Opc = MI->getOpcode();
- unsigned OpNum = Ops[0];
-
- MachineInstr *NewMI = NULL;
- if ((Opc == PPC::OR &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
- if (OpNum == 0) { // move -> store
- unsigned InReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::STW))
- .addReg(InReg,
- getKillRegState(isKill) |
- getUndefRegState(isUndef)),
- FrameIndex);
- } else { // move -> load
- unsigned OutReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::LWZ))
- .addReg(OutReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef)),
- FrameIndex);
- }
- } else if ((Opc == PPC::OR8 &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
- if (OpNum == 0) { // move -> store
- unsigned InReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::STD))
- .addReg(InReg,
- getKillRegState(isKill) |
- getUndefRegState(isUndef)),
- FrameIndex);
- } else { // move -> load
- unsigned OutReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::LD))
- .addReg(OutReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef)),
- FrameIndex);
- }
- } else if (Opc == PPC::FMR || Opc == PPC::FMRSD) {
- // The register may be F4RC or F8RC, and that determines the memory op.
- unsigned OrigReg = MI->getOperand(OpNum).getReg();
- // We cannot tell the register class from a physreg alone.
- if (TargetRegisterInfo::isPhysicalRegister(OrigReg))
- return NULL;
- const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(OrigReg);
- const bool is64 = RC == PPC::F8RCRegisterClass;
-
- if (OpNum == 0) { // move -> store
- unsigned InReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(),
- get(is64 ? PPC::STFD : PPC::STFS))
- .addReg(InReg,
- getKillRegState(isKill) |
- getUndefRegState(isUndef)),
- FrameIndex);
- } else { // move -> load
- unsigned OutReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(),
- get(is64 ? PPC::LFD : PPC::LFS))
- .addReg(OutReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef)),
- FrameIndex);
- }
- }
-
- return NewMI;
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIdx),
+ MachineMemOperand::MOLoad, /*Offset=*/0,
+ MFI.getObjectSize(FrameIdx),
+ MFI.getObjectAlignment(FrameIdx));
+ NewMIs.back()->addMemOperand(MF, MMO);
}
-bool PPCInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- if (Ops.size() != 1) return false;
-
- // Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
- // it takes more than one instruction to store it.
- unsigned Opc = MI->getOpcode();
-
- if ((Opc == PPC::OR &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
- return true;
- else if ((Opc == PPC::OR8 &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
- return true;
- else if (Opc == PPC::FMR || Opc == PPC::FMRSD)
- return true;
-
- return false;
+MachineInstr*
+PPCInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
+ int FrameIx, uint64_t Offset,
+ const MDNode *MDPtr,
+ DebugLoc DL) const {
+ MachineInstrBuilder MIB = BuildMI(MF, DL, get(PPC::DBG_VALUE));
+ addFrameReference(MIB, FrameIx, 0, false).addImm(Offset).addMetadata(MDPtr);
+ return &*MIB;
}
-
bool PPCInstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 2 && "Invalid PPC branch opcode!");
@@ -762,9 +625,10 @@ unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const char *AsmStr = MI->getOperand(0).getSymbolName();
return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
}
- case PPC::DBG_LABEL:
+ case PPC::PROLOG_LABEL:
case PPC::EH_LABEL:
case PPC::GC_LABEL:
+ case PPC::DBG_VALUE:
return 0;
default:
return 4; // PowerPC instructions are all 4 bytes
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.h
index 57facac..fc7b7b3 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.h
@@ -82,12 +82,6 @@ public:
///
virtual const PPCRegisterInfo &getRegisterInfo() const { return RI; }
- /// Return true if the instruction is a register to register move and return
- /// the source and dest operands and their sub-register indices by reference.
- virtual bool isMoveInstr(const MachineInstr &MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
-
unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
unsigned isStoreToStackSlot(const MachineInstr *MI,
@@ -109,40 +103,31 @@ public:
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC) const;
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC) const;
-
- /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
- /// copy instructions, turning them into load/store instructions.
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
-
- virtual bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
+ virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
+ int FrameIx,
+ uint64_t Offset,
+ const MDNode *MDPtr,
+ DebugLoc DL) const;
+
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 845cd8f..eb100ec 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -111,9 +111,11 @@ def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_PPCCallSeqEnd,
def SDT_PPCCall : SDTypeProfile<0, -1, [SDTCisInt<0>]>;
def PPCcall_Darwin : SDNode<"PPCISD::CALL_Darwin", SDT_PPCCall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ SDNPVariadic]>;
def PPCcall_SVR4 : SDNode<"PPCISD::CALL_SVR4", SDT_PPCCall,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ SDNPVariadic]>;
def PPCnop : SDNode<"PPCISD::NOP", SDT_PPCnop, [SDNPInFlag, SDNPOutFlag]>;
def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>,
[SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
@@ -124,16 +126,18 @@ def PPCtoc_restore : SDNode<"PPCISD::TOC_RESTORE", SDTypeProfile<0, 0, []>,
def PPCmtctr : SDNode<"PPCISD::MTCTR", SDT_PPCCall,
[SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
def PPCbctrl_Darwin : SDNode<"PPCISD::BCTRL_Darwin", SDTNone,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ SDNPVariadic]>;
def PPCbctrl_SVR4 : SDNode<"PPCISD::BCTRL_SVR4", SDTNone,
- [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag,
+ SDNPVariadic]>;
def retflag : SDNode<"PPCISD::RET_FLAG", SDTNone,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
def PPCtc_return : SDNode<"PPCISD::TC_RETURN", SDT_PPCTC_ret,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
def PPCvcmp : SDNode<"PPCISD::VCMP" , SDT_PPCvcmp, []>;
def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutFlag]>;
@@ -658,7 +662,7 @@ def STWCX : XForm_1<31, 150, (outs), (ins GPRC:$rS, memrr:$dst),
[(PPCstcx GPRC:$rS, xoaddr:$dst)]>,
isDOT;
-let isBarrier = 1, hasCtrlDep = 1 in
+let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
def TRAP : XForm_24<31, 4, (outs), (ins), "trap", LdStGeneral, [(trap)]>;
//===----------------------------------------------------------------------===//
@@ -858,7 +862,6 @@ def STFDX : XForm_28<31, 727, (outs), (ins F8RC:$frS, memrr:$dst),
[(store F8RC:$frS, xaddr:$dst)]>;
}
-let isBarrier = 1 in
def SYNC : XForm_24_sync<31, 598, (outs), (ins),
"sync", LdStSync,
[(int_ppc_sync)]>;
@@ -1019,9 +1022,7 @@ let Uses = [RM] in {
}
}
-/// FMR is split into 2 versions, one for 4/8 byte FP, and one for extending.
-///
-/// Note that these are defined as pseudo-ops on the PPC970 because they are
+/// Note that FMR is defined as pseudo-ops on the PPC970 because they are
/// often coalesced away and we don't want the dispatch group builder to think
/// that they will fill slots (which could cause the load of a LSU reject to
/// sneak into a d-group with a store).
@@ -1029,10 +1030,6 @@ def FMR : XForm_26<63, 72, (outs F4RC:$frD), (ins F4RC:$frB),
"fmr $frD, $frB", FPGeneral,
[]>, // (set F4RC:$frD, F4RC:$frB)
PPC970_Unit_Pseudo;
-def FMRSD : XForm_26<63, 72, (outs F8RC:$frD), (ins F4RC:$frB),
- "fmr $frD, $frB", FPGeneral,
- [(set F8RC:$frD, (fextend F4RC:$frB))]>,
- PPC970_Unit_Pseudo;
let PPC970_Unit = 3 in { // FPU Operations.
// These are artificially split into two different forms, for 4/8 byte FP.
@@ -1114,14 +1111,17 @@ def MFVRSAVE : XFXForm_1_ext<31, 339, 256, (outs GPRC:$rT), (ins),
def MTCRF : XFXForm_5<31, 144, (outs), (ins crbitm:$FXM, GPRC:$rS),
"mtcrf $FXM, $rS", BrMCRX>,
PPC970_MicroCode, PPC970_Unit_CRU;
-// FIXME: this Uses all the CR registers. Marking it as such is
-// necessary for DeadMachineInstructionElim to do the right thing.
-// However, marking it also exposes PR 2964, and causes crashes in
-// the Local RA because it doesn't like this sequence:
+
+// This is a pseudo for MFCR, which implicitly uses all 8 of its subregisters;
+// declaring that here gives the local register allocator problems with this:
// vreg = MCRF CR0
// MFCR <kill of whatever preg got assigned to vreg>
-// For now DeadMachineInstructionElim is turned off, so don't do the marking.
-def MFCR : XFXForm_3<31, 19, (outs GPRC:$rT), (ins), "mfcr $rT", SprMFCR>,
+// while not declaring it breaks DeadMachineInstructionElimination.
+// As it turns out, in all cases where we currently use this,
+// we're only interested in one subregister of it. Represent this in the
+// instruction to keep the register allocator from becoming confused.
+def MFCRpseud: XFXForm_3<31, 19, (outs GPRC:$rT), (ins crbitm:$FXM),
+ "mfcr $rT ${:comment} $FXM", SprMFCR>,
PPC970_MicroCode, PPC970_Unit_CRU;
def MFOCRF: XFXForm_5a<31, 19, (outs GPRC:$rT), (ins crbitm:$FXM),
"mfcr $rT, $FXM", SprMFCR>,
@@ -1470,10 +1470,13 @@ def : Pat<(extloadi16 iaddr:$src),
(LHZ iaddr:$src)>;
def : Pat<(extloadi16 xaddr:$src),
(LHZX xaddr:$src)>;
-def : Pat<(extloadf32 iaddr:$src),
- (FMRSD (LFS iaddr:$src))>;
-def : Pat<(extloadf32 xaddr:$src),
- (FMRSD (LFSX xaddr:$src))>;
+def : Pat<(f64 (extloadf32 iaddr:$src)),
+ (COPY_TO_REGCLASS (LFS iaddr:$src), F8RC)>;
+def : Pat<(f64 (extloadf32 xaddr:$src)),
+ (COPY_TO_REGCLASS (LFSX xaddr:$src), F8RC)>;
+
+def : Pat<(f64 (fextend F4RC:$src)),
+ (COPY_TO_REGCLASS F4RC:$src, F8RC)>;
// Memory barriers
def : Pat<(membarrier (i32 imm /*ll*/),
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp
index b37aee8..3644c79 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCMCAsmInfo.cpp
@@ -38,7 +38,6 @@ PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) {
UsesELFSectionDirectiveForBSS = true;
// Debug Information
- AbsoluteDebugSectionOffsets = true;
SupportsDebugInformation = true;
PCSymbol = ".";
@@ -49,7 +48,6 @@ PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) {
// Exceptions handling
if (!is64Bit)
ExceptionsType = ExceptionHandling::Dwarf;
- AbsoluteEHSectionOffsets = false;
ZeroDirective = "\t.space\t";
Data64bitsDirective = is64Bit ? "\t.quad\t" : 0;
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h b/libclamav/c++/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h
index b359dd3..e2649c8 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCMachineFunctionInfo.h
@@ -58,6 +58,18 @@ private:
/// how the caller's stack pointer should be calculated (epilog/dynamicalloc).
bool HasFastCall;
+ /// VarArgsFrameIndex - FrameIndex for start of varargs area.
+ int VarArgsFrameIndex;
+ /// VarArgsStackOffset - StackOffset for start of stack
+ /// arguments.
+ int VarArgsStackOffset;
+ /// VarArgsNumGPR - Index of the first unused integer
+ /// register for parameter passing.
+ unsigned VarArgsNumGPR;
+ /// VarArgsNumFPR - Index of the first unused double
+ /// register for parameter passing.
+ unsigned VarArgsNumFPR;
+
public:
explicit PPCFunctionInfo(MachineFunction &MF)
: FramePointerSaveIndex(0),
@@ -66,7 +78,11 @@ public:
LRStoreRequired(false),
MinReservedArea(0),
TailCallSPDelta(0),
- HasFastCall(false) {}
+ HasFastCall(false),
+ VarArgsFrameIndex(0),
+ VarArgsStackOffset(0),
+ VarArgsNumGPR(0),
+ VarArgsNumFPR(0) {}
int getFramePointerSaveIndex() const { return FramePointerSaveIndex; }
void setFramePointerSaveIndex(int Idx) { FramePointerSaveIndex = Idx; }
@@ -96,6 +112,18 @@ public:
void setHasFastCall() { HasFastCall = true; }
bool hasFastCall() const { return HasFastCall;}
+
+ int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
+ void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
+
+ int getVarArgsStackOffset() const { return VarArgsStackOffset; }
+ void setVarArgsStackOffset(int Offset) { VarArgsStackOffset = Offset; }
+
+ unsigned getVarArgsNumGPR() const { return VarArgsNumGPR; }
+ void setVarArgsNumGPR(unsigned Num) { VarArgsNumGPR = Num; }
+
+ unsigned getVarArgsNumFPR() const { return VarArgsNumFPR; }
+ void setVarArgsNumFPR(unsigned Num) { VarArgsNumFPR = Num; }
};
} // end of namespace llvm
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 0b509ac..653e143 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -43,7 +43,6 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include <cstdlib>
-using namespace llvm;
// FIXME This disables some code that aligns the stack to a boundary
// bigger than the default (16 bytes on Darwin) when there is a stack local
@@ -56,14 +55,19 @@ using namespace llvm;
#define ALIGN_STACK 0
// FIXME (64-bit): Eventually enable by default.
+namespace llvm {
cl::opt<bool> EnablePPC32RS("enable-ppc32-regscavenger",
- cl::init(false),
- cl::desc("Enable PPC32 register scavenger"),
- cl::Hidden);
+ cl::init(false),
+ cl::desc("Enable PPC32 register scavenger"),
+ cl::Hidden);
cl::opt<bool> EnablePPC64RS("enable-ppc64-regscavenger",
- cl::init(false),
- cl::desc("Enable PPC64 register scavenger"),
- cl::Hidden);
+ cl::init(false),
+ cl::desc("Enable PPC64 register scavenger"),
+ cl::Hidden);
+}
+
+using namespace llvm;
+
#define EnableRegisterScavenging \
((EnablePPC32RS && !Subtarget.isPPC64()) || \
(EnablePPC64RS && Subtarget.isPPC64()))
@@ -265,147 +269,16 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return Subtarget.isPPC64() ? SVR4_64_CalleeSavedRegs : SVR4_CalleeSavedRegs;
}
-const TargetRegisterClass* const*
-PPCRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- // 32-bit Darwin calling convention.
- static const TargetRegisterClass * const Darwin32_CalleeSavedRegClasses[] = {
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
-
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,
-
- &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
-
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
-
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
-
- &PPC::GPRCRegClass, 0
- };
-
- // 32-bit SVR4 calling convention.
- static const TargetRegisterClass * const SVR4_CalleeSavedRegClasses[] = {
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
-
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,
-
- &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
-
- &PPC::VRSAVERCRegClass,
-
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
-
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
-
- 0
- };
-
- // 64-bit Darwin calling convention.
- static const TargetRegisterClass * const Darwin64_CalleeSavedRegClasses[] = {
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
-
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,
-
- &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
-
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
-
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
-
- &PPC::G8RCRegClass, 0
- };
-
- // 64-bit SVR4 calling convention.
- static const TargetRegisterClass * const SVR4_64_CalleeSavedRegClasses[] = {
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
-
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,
-
- &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
-
- &PPC::VRSAVERCRegClass,
-
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
-
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
-
- 0
- };
-
- if (Subtarget.isDarwinABI())
- return Subtarget.isPPC64() ? Darwin64_CalleeSavedRegClasses :
- Darwin32_CalleeSavedRegClasses;
-
- return Subtarget.isPPC64() ? SVR4_64_CalleeSavedRegClasses
- : SVR4_CalleeSavedRegClasses;
-}
-
// needsFP - Return true if the specified function should have a dedicated frame
// pointer register. This is true if the function has variable sized allocas or
// if frame pointer elimination is disabled.
//
static bool needsFP(const MachineFunction &MF) {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- return NoFramePointerElim || MFI->hasVarSizedObjects() ||
+ // Naked functions have no stack frame pushed, so we don't have a frame pointer.
+ if (MF.getFunction()->hasFnAttr(Attribute::Naked))
+ return false;
+ return DisableFramePointerElim(MF) || MFI->hasVarSizedObjects() ||
(GuaranteedTailCallOpt && MF.getInfo<PPCFunctionInfo>()->hasFastCall());
}
@@ -512,7 +385,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineInstr *MI = I;
DebugLoc dl = MI->getDebugLoc();
- if (isInt16(CalleeAmt)) {
+ if (isInt<16>(CalleeAmt)) {
BuildMI(MBB, I, dl, TII.get(ADDIInstr), StackReg).addReg(StackReg).
addImm(CalleeAmt);
} else {
@@ -576,8 +449,8 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
// Get stack alignments.
unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
unsigned MaxAlign = MFI->getMaxAlignment();
- assert(MaxAlign <= TargetAlign &&
- "Dynamic alloca with large aligns not supported");
+ if (MaxAlign > TargetAlign)
+ report_fatal_error("Dynamic alloca with large aligns not supported");
// Determine the previous frame's address. If FrameSize can't be
// represented as 16 bits or we need special alignment, then we load the
@@ -596,7 +469,7 @@ void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II,
else
Reg = PPC::R0;
- if (MaxAlign < TargetAlign && isInt16(FrameSize)) {
+ if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) {
BuildMI(MBB, II, dl, TII.get(PPC::ADDI), Reg)
.addReg(PPC::R31)
.addImm(FrameSize);
@@ -682,19 +555,15 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
const TargetRegisterClass *RC = Subtarget.isPPC64() ? G8RC : GPRC;
unsigned Reg = findScratchRegister(II, RS, RC, SPAdj);
+ unsigned SrcReg = MI.getOperand(0).getReg();
// We need to store the CR in the low 4-bits of the saved value. First, issue
- // an MFCR to save all of the CRBits. Add an implicit kill of the CR.
- if (!MI.getOperand(0).isKill())
- BuildMI(MBB, II, dl, TII.get(PPC::MFCR), Reg);
- else
- // Implicitly kill the CR register.
- BuildMI(MBB, II, dl, TII.get(PPC::MFCR), Reg)
- .addReg(MI.getOperand(0).getReg(), RegState::ImplicitKill);
+ // an MFCRpsued to save all of the CRBits and, if needed, kill the SrcReg.
+ BuildMI(MBB, II, dl, TII.get(PPC::MFCRpseud), Reg)
+ .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
// If the saved register wasn't CR0, shift the bits left so that they are in
// CR0's slot.
- unsigned SrcReg = MI.getOperand(0).getReg();
if (SrcReg != PPC::CR0)
// rlwinm rA, rA, ShiftBits, 0, 31.
BuildMI(MBB, II, dl, TII.get(PPC::RLWINM), Reg)
@@ -711,10 +580,9 @@ void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
MBB.erase(II);
}
-unsigned
+void
PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, int *Value,
- RegScavenger *RS) const {
+ int SPAdj, RegScavenger *RS) const {
assert(SPAdj == 0 && "Unexpected");
// Get the instruction.
@@ -753,14 +621,14 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
if (FPSI && FrameIndex == FPSI &&
(OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) {
lowerDynamicAlloc(II, SPAdj, RS);
- return 0;
+ return;
}
// Special case for pseudo-op SPILL_CR.
if (EnableRegisterScavenging) // FIXME (64-bit): Enable by default.
if (OpC == PPC::SPILL_CR) {
lowerCRSpilling(II, FrameIndex, SPAdj, RS);
- return 0;
+ return;
}
// Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
@@ -790,7 +658,10 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// If we're not using a Frame Pointer that has been set to the value of the
// SP before having the stack size subtracted from it, then add the stack size
// to Offset to get the correct offset.
- Offset += MFI->getStackSize();
+ // Naked functions have stack size 0, although getStackSize may not reflect that
+ // because we didn't call all the pieces that compute it for naked functions.
+ if (!MF.getFunction()->hasFnAttr(Attribute::Naked))
+ Offset += MFI->getStackSize();
// If we can, encode the offset directly into the instruction. If this is a
// normal PPC "ri" instruction, any 16-bit value can be safely encoded. If
@@ -798,11 +669,11 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// clear can be encoded. This is extremely uncommon, because normally you
// only "std" to a stack slot that is at least 4-byte aligned, but it can
// happen in invalid code.
- if (isInt16(Offset) && (!isIXAddr || (Offset & 3) == 0)) {
+ if (isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0)) {
if (isIXAddr)
Offset >>= 2; // The actual encoded value has the low two bits zero.
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
- return 0;
+ return;
}
// The offset doesn't fit into a single register, scavenge one to build the
@@ -838,11 +709,10 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
} else {
OperandBase = OffsetOperandNo;
}
-
+
unsigned StackReg = MI.getOperand(FIOperandNo).getReg();
MI.getOperand(OperandBase).ChangeToRegister(StackReg, false);
MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false);
- return 0;
}
/// VRRegNo - Map from a numbered VR register to its enum value.
@@ -999,7 +869,7 @@ void PPCRegisterInfo::determineFrameLayout(MachineFunction &MF) const {
if (!DisableRedZone &&
FrameSize <= 224 && // Fits in red zone.
!MFI->hasVarSizedObjects() && // No dynamic alloca.
- !MFI->hasCalls() && // No calls.
+ !MFI->adjustsStack() && // No calls.
(!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment.
// No need for frame
MFI->setStackSize(0);
@@ -1054,8 +924,7 @@ PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(isPPC64,
isDarwinABI);
// Allocate the frame index for frame pointer save area.
- FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset,
- true, false);
+ FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
// Save the result.
FI->setFramePointerSaveIndex(FPSI);
}
@@ -1063,8 +932,7 @@ PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// Reserve stack space to move the linkage area to in case of a tail call.
int TCSPDelta = 0;
if (GuaranteedTailCallOpt && (TCSPDelta = FI->getTailCallSPDelta()) < 0) {
- MF.getFrameInfo()->CreateFixedObject(-1 * TCSPDelta, TCSPDelta,
- true, false);
+ MF.getFrameInfo()->CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true);
}
// Reserve a slot closest to SP or frame pointer if we have a dynalloc or
@@ -1121,9 +989,7 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- const TargetRegisterClass *RC = CSI[i].getRegClass();
-
- if (RC == PPC::GPRCRegisterClass) {
+ if (PPC::GPRCRegisterClass->contains(Reg)) {
HasGPSaveArea = true;
GPRegs.push_back(CSI[i]);
@@ -1131,7 +997,7 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
if (Reg < MinGPR) {
MinGPR = Reg;
}
- } else if (RC == PPC::G8RCRegisterClass) {
+ } else if (PPC::G8RCRegisterClass->contains(Reg)) {
HasG8SaveArea = true;
G8Regs.push_back(CSI[i]);
@@ -1139,7 +1005,7 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
if (Reg < MinG8R) {
MinG8R = Reg;
}
- } else if (RC == PPC::F8RCRegisterClass) {
+ } else if (PPC::F8RCRegisterClass->contains(Reg)) {
HasFPSaveArea = true;
FPRegs.push_back(CSI[i]);
@@ -1148,12 +1014,12 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
MinFPR = Reg;
}
// FIXME SVR4: Disable CR save area for now.
- } else if ( RC == PPC::CRBITRCRegisterClass
- || RC == PPC::CRRCRegisterClass) {
+ } else if (PPC::CRBITRCRegisterClass->contains(Reg)
+ || PPC::CRRCRegisterClass->contains(Reg)) {
// HasCRSaveArea = true;
- } else if (RC == PPC::VRSAVERCRegisterClass) {
+ } else if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
HasVRSAVESaveArea = true;
- } else if (RC == PPC::VRRCRegisterClass) {
+ } else if (PPC::VRRCRegisterClass->contains(Reg)) {
HasVRSaveArea = true;
VRegs.push_back(CSI[i]);
@@ -1234,9 +1100,10 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
// which have the CR/CRBIT register class?
// Adjust the frame index of the CR spill slot.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- const TargetRegisterClass *RC = CSI[i].getRegClass();
+ unsigned Reg = CSI[i].getReg();
- if (RC == PPC::CRBITRCRegisterClass || RC == PPC::CRRCRegisterClass) {
+ if (PPC::CRBITRCRegisterClass->contains(Reg) ||
+ PPC::CRRCRegisterClass->contains(Reg)) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
@@ -1251,9 +1118,9 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
// which have the VRSAVE register class?
// Adjust the frame index of the VRSAVE spill slot.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- const TargetRegisterClass *RC = CSI[i].getRegClass();
+ unsigned Reg = CSI[i].getReg();
- if (RC == PPC::VRSAVERCRegisterClass) {
+ if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
@@ -1280,14 +1147,14 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
- DebugLoc dl = DebugLoc::getUnknownLoc();
- bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) ||
+ MachineModuleInfo &MMI = MF.getMMI();
+ DebugLoc dl;
+ bool needsFrameMoves = MMI.hasDebugInfo() ||
!MF.getFunction()->doesNotThrow() ||
UnwindTablesMandatory;
// Prepare for frame info.
- unsigned FrameLabelId = 0;
+ MCSymbol *FrameLabel = 0;
// Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it,
// process it.
@@ -1375,8 +1242,9 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
if (!isPPC64) {
// PPC32.
if (ALIGN_STACK && MaxAlign > TargetAlign) {
- assert(isPowerOf2_32(MaxAlign)&&isInt16(MaxAlign)&&"Invalid alignment!");
- assert(isInt16(NegFrameSize) && "Unhandled stack size and alignment!");
+ assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
+ "Invalid alignment!");
+ assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), PPC::R0)
.addReg(PPC::R1)
@@ -1390,7 +1258,7 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
.addReg(PPC::R1)
.addReg(PPC::R1)
.addReg(PPC::R0);
- } else if (isInt16(NegFrameSize)) {
+ } else if (isInt<16>(NegFrameSize)) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::STWU), PPC::R1)
.addReg(PPC::R1)
.addImm(NegFrameSize)
@@ -1408,8 +1276,9 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
}
} else { // PPC64.
if (ALIGN_STACK && MaxAlign > TargetAlign) {
- assert(isPowerOf2_32(MaxAlign)&&isInt16(MaxAlign)&&"Invalid alignment!");
- assert(isInt16(NegFrameSize) && "Unhandled stack size and alignment!");
+ assert(isPowerOf2_32(MaxAlign) && isInt<16>(MaxAlign) &&
+ "Invalid alignment!");
+ assert(isInt<16>(NegFrameSize) && "Unhandled stack size and alignment!");
BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), PPC::X0)
.addReg(PPC::X1)
@@ -1422,7 +1291,7 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
.addReg(PPC::X1)
.addReg(PPC::X1)
.addReg(PPC::X0);
- } else if (isInt16(NegFrameSize)) {
+ } else if (isInt<16>(NegFrameSize)) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::STDU), PPC::X1)
.addReg(PPC::X1)
.addImm(NegFrameSize / 4)
@@ -1440,39 +1309,39 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
}
}
- std::vector<MachineMove> &Moves = MMI->getFrameMoves();
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
// Add the "machine moves" for the instructions we generated above, but in
// reverse order.
if (needsFrameMoves) {
// Mark effective beginning of when frame pointer becomes valid.
- FrameLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, dl, TII.get(PPC::DBG_LABEL)).addImm(FrameLabelId);
+ FrameLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(FrameLabel);
// Show update of SP.
if (NegFrameSize) {
MachineLocation SPDst(MachineLocation::VirtualFP);
MachineLocation SPSrc(MachineLocation::VirtualFP, NegFrameSize);
- Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
} else {
MachineLocation SP(isPPC64 ? PPC::X31 : PPC::R31);
- Moves.push_back(MachineMove(FrameLabelId, SP, SP));
+ Moves.push_back(MachineMove(FrameLabel, SP, SP));
}
if (HasFP) {
MachineLocation FPDst(MachineLocation::VirtualFP, FPOffset);
MachineLocation FPSrc(isPPC64 ? PPC::X31 : PPC::R31);
- Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc));
+ Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
}
if (MustSaveLR) {
MachineLocation LRDst(MachineLocation::VirtualFP, LROffset);
MachineLocation LRSrc(isPPC64 ? PPC::LR8 : PPC::LR);
- Moves.push_back(MachineMove(FrameLabelId, LRDst, LRSrc));
+ Moves.push_back(MachineMove(FrameLabel, LRDst, LRSrc));
}
}
- unsigned ReadyLabelId = 0;
+ MCSymbol *ReadyLabel = 0;
// If there is a frame pointer, copy R1 into R31
if (HasFP) {
@@ -1487,20 +1356,20 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
}
if (needsFrameMoves) {
- ReadyLabelId = MMI->NextLabelID();
+ ReadyLabel = MMI.getContext().CreateTempSymbol();
// Mark effective beginning of when frame pointer is ready.
- BuildMI(MBB, MBBI, dl, TII.get(PPC::DBG_LABEL)).addImm(ReadyLabelId);
+ BuildMI(MBB, MBBI, dl, TII.get(PPC::PROLOG_LABEL)).addSym(ReadyLabel);
MachineLocation FPDst(HasFP ? (isPPC64 ? PPC::X31 : PPC::R31) :
(isPPC64 ? PPC::X1 : PPC::R1));
MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc));
+ Moves.push_back(MachineMove(ReadyLabel, FPDst, FPSrc));
}
}
if (needsFrameMoves) {
- unsigned LabelId = HasFP ? ReadyLabelId : FrameLabelId;
+ MCSymbol *Label = HasFP ? ReadyLabel : FrameLabel;
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
@@ -1510,7 +1379,7 @@ PPCRegisterInfo::emitPrologue(MachineFunction &MF) const {
if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(LabelId, CSDst, CSSrc));
+ Moves.push_back(MachineMove(Label, CSDst, CSSrc));
}
}
}
@@ -1519,7 +1388,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = prior(MBB.end());
unsigned RetOpcode = MBBI->getOpcode();
- DebugLoc dl = DebugLoc::getUnknownLoc();
+ DebugLoc dl;
assert( (RetOpcode == PPC::BLR ||
RetOpcode == PPC::TCRETURNri ||
@@ -1591,7 +1460,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
// enabled (=> hasFastCall()==true) the fastcc call might contain a tail
// call which invalidates the stack pointer value in SP(0). So we use the
// value of R31 in this case.
- if (FI->hasFastCall() && isInt16(FrameSize)) {
+ if (FI->hasFastCall() && isInt<16>(FrameSize)) {
assert(hasFP(MF) && "Expecting a valid the frame pointer.");
BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
.addReg(PPC::R31).addImm(FrameSize);
@@ -1605,7 +1474,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
.addReg(PPC::R1)
.addReg(PPC::R31)
.addReg(PPC::R0);
- } else if (isInt16(FrameSize) &&
+ } else if (isInt<16>(FrameSize) &&
(!ALIGN_STACK || TargetAlign >= MaxAlign) &&
!MFI->hasVarSizedObjects()) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), PPC::R1)
@@ -1615,7 +1484,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
.addImm(0).addReg(PPC::R1);
}
} else {
- if (FI->hasFastCall() && isInt16(FrameSize)) {
+ if (FI->hasFastCall() && isInt<16>(FrameSize)) {
assert(hasFP(MF) && "Expecting a valid the frame pointer.");
BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
.addReg(PPC::X31).addImm(FrameSize);
@@ -1629,7 +1498,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
.addReg(PPC::X1)
.addReg(PPC::X31)
.addReg(PPC::X0);
- } else if (isInt16(FrameSize) && TargetAlign >= MaxAlign &&
+ } else if (isInt<16>(FrameSize) && TargetAlign >= MaxAlign &&
!MFI->hasVarSizedObjects()) {
BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI8), PPC::X1)
.addReg(PPC::X1).addImm(FrameSize);
@@ -1678,7 +1547,7 @@ void PPCRegisterInfo::emitEpilogue(MachineFunction &MF,
unsigned LISInstr = isPPC64 ? PPC::LIS8 : PPC::LIS;
unsigned ORIInstr = isPPC64 ? PPC::ORI8 : PPC::ORI;
- if (CallerAllocatedAmt && isInt16(CallerAllocatedAmt)) {
+ if (CallerAllocatedAmt && isInt<16>(CallerAllocatedAmt)) {
BuildMI(MBB, MBBI, dl, TII.get(ADDIInstr), StackReg)
.addReg(StackReg).addImm(CallerAllocatedAmt);
} else {
@@ -1754,4 +1623,3 @@ int PPCRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
}
#include "PPCGenRegisterInfo.inc"
-
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.h b/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
index 3aeed80..890b24b 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -42,9 +42,6 @@ public:
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction *MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
/// targetHandlesStackFrameRounding - Returns true if the target is
@@ -66,9 +63,8 @@ public:
int SPAdj, RegScavenger *RS) const;
void lowerCRSpilling(MachineBasicBlock::iterator II, unsigned FrameIndex,
int SPAdj, RegScavenger *RS) const;
- unsigned eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, int *Value = NULL,
- RegScavenger *RS = NULL) const;
+ void eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, RegScavenger *RS = NULL) const;
/// determineFrameLayout - Determine the size of the frame and maximum call
/// frame size.
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
index 1cb7340..8604f54 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
@@ -10,6 +10,15 @@
//
//===----------------------------------------------------------------------===//
+let Namespace = "PPC" in {
+def sub_lt : SubRegIndex;
+def sub_gt : SubRegIndex;
+def sub_eq : SubRegIndex;
+def sub_un : SubRegIndex;
+def sub_32 : SubRegIndex;
+}
+
+
class PPCReg<string n> : Register<n> {
let Namespace = "PPC";
}
@@ -25,6 +34,7 @@ class GPR<bits<5> num, string n> : PPCReg<n> {
class GP8<GPR SubReg, string n> : PPCReg<n> {
field bits<5> Num = SubReg.Num;
let SubRegs = [SubReg];
+ let SubRegIndices = [sub_32];
}
// SPR - One of the 32-bit special-purpose registers
@@ -225,6 +235,7 @@ def CR7EQ : CRBIT<30, "30">, DwarfRegNum<[0]>;
def CR7UN : CRBIT<31, "31">, DwarfRegNum<[0]>;
// Condition registers
+let SubRegIndices = [sub_lt, sub_gt, sub_eq, sub_un] in {
def CR0 : CR<0, "cr0", [CR0LT, CR0GT, CR0EQ, CR0UN]>, DwarfRegNum<[68]>;
def CR1 : CR<1, "cr1", [CR1LT, CR1GT, CR1EQ, CR1UN]>, DwarfRegNum<[69]>;
def CR2 : CR<2, "cr2", [CR2LT, CR2GT, CR2EQ, CR2UN]>, DwarfRegNum<[70]>;
@@ -233,15 +244,7 @@ def CR4 : CR<4, "cr4", [CR4LT, CR4GT, CR4EQ, CR4UN]>, DwarfRegNum<[72]>;
def CR5 : CR<5, "cr5", [CR5LT, CR5GT, CR5EQ, CR5UN]>, DwarfRegNum<[73]>;
def CR6 : CR<6, "cr6", [CR6LT, CR6GT, CR6EQ, CR6UN]>, DwarfRegNum<[74]>;
def CR7 : CR<7, "cr7", [CR7LT, CR7GT, CR7EQ, CR7UN]>, DwarfRegNum<[75]>;
-
-def : SubRegSet<1, [CR0, CR1, CR2, CR3, CR4, CR5, CR6, CR7],
- [CR0LT, CR1LT, CR2LT, CR3LT, CR4LT, CR5LT, CR6LT, CR7LT]>;
-def : SubRegSet<2, [CR0, CR1, CR2, CR3, CR4, CR5, CR6, CR7],
- [CR0GT, CR1GT, CR2GT, CR3GT, CR4GT, CR5GT, CR6GT, CR7GT]>;
-def : SubRegSet<3, [CR0, CR1, CR2, CR3, CR4, CR5, CR6, CR7],
- [CR0EQ, CR1EQ, CR2EQ, CR3EQ, CR4EQ, CR5EQ, CR6EQ, CR7EQ]>;
-def : SubRegSet<4, [CR0, CR1, CR2, CR3, CR4, CR5, CR6, CR7],
- [CR0UN, CR1UN, CR2UN, CR3UN, CR4UN, CR5UN, CR6UN, CR7UN]>;
+}
// Link register
def LR : SPR<8, "lr">, DwarfRegNum<[65]>;
@@ -372,7 +375,7 @@ def CRBITRC : RegisterClass<"PPC", [i32], 32,
def CRRC : RegisterClass<"PPC", [i32], 32, [CR0, CR1, CR5, CR6, CR7, CR2,
CR3, CR4]>
{
- let SubRegClassList = [CRBITRC, CRBITRC, CRBITRC, CRBITRC];
+ let SubRegClasses = [(CRBITRC sub_lt, sub_gt, sub_eq, sub_un)];
}
def CTRRC : RegisterClass<"PPC", [i32], 32, [CTR]>;
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCSchedule.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCSchedule.td
index d589414..9664f14 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCSchedule.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCSchedule.td
@@ -15,8 +15,6 @@ def SLU : FuncUnit; // Store/load unit
def SRU : FuncUnit; // special register unit
def IU1 : FuncUnit; // integer unit 1 (simple)
def IU2 : FuncUnit; // integer unit 2 (complex)
-def IU3 : FuncUnit; // integer unit 3 (7450 simple)
-def IU4 : FuncUnit; // integer unit 4 (7450 simple)
def FPU1 : FuncUnit; // floating point unit 1
def FPU2 : FuncUnit; // floating point unit 2
def VPU : FuncUnit; // vector permutation unit
@@ -24,7 +22,6 @@ def VIU1 : FuncUnit; // vector integer unit 1 (simple)
def VIU2 : FuncUnit; // vector integer unit 2 (complex)
def VFPU : FuncUnit; // vector floating point unit
-
//===----------------------------------------------------------------------===//
// Instruction Itinerary classes used for PowerPC
//
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG3.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG3.td
index f72194d..7344763 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG3.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG3.td
@@ -12,7 +12,8 @@
//===----------------------------------------------------------------------===//
-def G3Itineraries : ProcessorItineraries<[
+def G3Itineraries : ProcessorItineraries<
+ [IU1, IU2, FPU1, BPU, SRU, SLU], [
InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntDivW , [InstrStage<19, [IU1]>]>,
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG4.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG4.td
index 92ed20f..7efc693 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG4.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG4.td
@@ -11,7 +11,8 @@
//
//===----------------------------------------------------------------------===//
-def G4Itineraries : ProcessorItineraries<[
+def G4Itineraries : ProcessorItineraries<
+ [IU1, IU2, SLU, SRU, BPU, FPU1, VIU1, VIU2, VPU, VFPU], [
InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2]>]>,
InstrItinData<IntDivW , [InstrStage<19, [IU1]>]>,
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
index 7474ba4..15056c0 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG4Plus.td
@@ -11,7 +11,11 @@
//
//===----------------------------------------------------------------------===//
-def G4PlusItineraries : ProcessorItineraries<[
+def IU3 : FuncUnit; // integer unit 3 (7450 simple)
+def IU4 : FuncUnit; // integer unit 4 (7450 simple)
+
+def G4PlusItineraries : ProcessorItineraries<
+ [IU1, IU2, IU3, IU4, BPU, SLU, FPU1, VFPU, VIU1, VIU2, VPU], [
InstrItinData<IntGeneral , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
InstrItinData<IntCompare , [InstrStage<1, [IU1, IU2, IU3, IU4]>]>,
InstrItinData<IntDivW , [InstrStage<23, [IU2]>]>,
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG5.td b/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG5.td
index d282147..2dffc48 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG5.td
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCScheduleG5.td
@@ -11,7 +11,8 @@
//
//===----------------------------------------------------------------------===//
-def G5Itineraries : ProcessorItineraries<[
+def G5Itineraries : ProcessorItineraries<
+ [IU1, IU2, SLU, BPU, FPU1, FPU2, VFPU, VIU1, VIU2, VPU], [
InstrItinData<IntGeneral , [InstrStage<2, [IU1, IU2]>]>,
InstrItinData<IntCompare , [InstrStage<3, [IU1, IU2]>]>,
InstrItinData<IntDivD , [InstrStage<68, [IU1]>]>,
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
new file mode 100644
index 0000000..d4258b4
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.cpp
@@ -0,0 +1,23 @@
+//===-- PPCSelectionDAGInfo.cpp - PowerPC SelectionDAG Info ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PPCSelectionDAGInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "powerpc-selectiondag-info"
+#include "PPCTargetMachine.h"
+using namespace llvm;
+
+PPCSelectionDAGInfo::PPCSelectionDAGInfo(const PPCTargetMachine &TM)
+ : TargetSelectionDAGInfo(TM) {
+}
+
+PPCSelectionDAGInfo::~PPCSelectionDAGInfo() {
+}
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h b/libclamav/c++/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h
new file mode 100644
index 0000000..341b69c
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCSelectionDAGInfo.h
@@ -0,0 +1,31 @@
+//===-- PPCSelectionDAGInfo.h - PowerPC SelectionDAG Info -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PowerPC subclass for TargetSelectionDAGInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef POWERPCCSELECTIONDAGINFO_H
+#define POWERPCCSELECTIONDAGINFO_H
+
+#include "llvm/Target/TargetSelectionDAGInfo.h"
+
+namespace llvm {
+
+class PPCTargetMachine;
+
+class PPCSelectionDAGInfo : public TargetSelectionDAGInfo {
+public:
+ explicit PPCSelectionDAGInfo(const PPCTargetMachine &TM);
+ ~PPCSelectionDAGInfo();
+};
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp b/libclamav/c++/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
index cac6962..10cd10b 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -20,7 +20,7 @@
#include "llvm/Support/FormattedStream.h"
using namespace llvm;
-static const MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
Triple TheTriple(TT);
bool isPPC64 = TheTriple.getArch() == Triple::ppc64;
if (TheTriple.getOS() == Triple::Darwin)
@@ -44,7 +44,8 @@ PPCTargetMachine::PPCTargetMachine(const Target &T, const std::string &TT,
: LLVMTargetMachine(T, TT),
Subtarget(TT, FS, is64Bit),
DataLayout(Subtarget.getTargetDataString()), InstrInfo(*this),
- FrameInfo(*this, is64Bit), JITInfo(*this, is64Bit), TLInfo(*this),
+ FrameInfo(*this, is64Bit), JITInfo(*this, is64Bit),
+ TLInfo(*this), TSInfo(*this),
InstrItins(Subtarget.getInstrItineraryData()) {
if (getRelocationModel() == Reloc::Default) {
diff --git a/libclamav/c++/llvm/lib/Target/PowerPC/PPCTargetMachine.h b/libclamav/c++/llvm/lib/Target/PowerPC/PPCTargetMachine.h
index ac9ae2b..626ddbb 100644
--- a/libclamav/c++/llvm/lib/Target/PowerPC/PPCTargetMachine.h
+++ b/libclamav/c++/llvm/lib/Target/PowerPC/PPCTargetMachine.h
@@ -19,6 +19,7 @@
#include "PPCJITInfo.h"
#include "PPCInstrInfo.h"
#include "PPCISelLowering.h"
+#include "PPCSelectionDAGInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetData.h"
@@ -35,6 +36,7 @@ class PPCTargetMachine : public LLVMTargetMachine {
PPCFrameInfo FrameInfo;
PPCJITInfo JITInfo;
PPCTargetLowering TLInfo;
+ PPCSelectionDAGInfo TSInfo;
InstrItineraryData InstrItins;
public:
@@ -44,8 +46,11 @@ public:
virtual const PPCInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const PPCFrameInfo *getFrameInfo() const { return &FrameInfo; }
virtual PPCJITInfo *getJITInfo() { return &JITInfo; }
- virtual PPCTargetLowering *getTargetLowering() const {
- return const_cast<PPCTargetLowering*>(&TLInfo);
+ virtual const PPCTargetLowering *getTargetLowering() const {
+ return &TLInfo;
+ }
+ virtual const PPCSelectionDAGInfo* getSelectionDAGInfo() const {
+ return &TSInfo;
}
virtual const PPCRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
diff --git a/libclamav/c++/llvm/lib/Target/README.txt b/libclamav/c++/llvm/lib/Target/README.txt
index 4fd46a8..4faf8bc 100644
--- a/libclamav/c++/llvm/lib/Target/README.txt
+++ b/libclamav/c++/llvm/lib/Target/README.txt
@@ -263,19 +263,6 @@ if anyone cared enough about sincos.
//===---------------------------------------------------------------------===//
-Turn this into a single byte store with no load (the other 3 bytes are
-unmodified):
-
-define void @test(i32* %P) {
- %tmp = load i32* %P
- %tmp14 = or i32 %tmp, 3305111552
- %tmp15 = and i32 %tmp14, 3321888767
- store i32 %tmp15, i32* %P
- ret void
-}
-
-//===---------------------------------------------------------------------===//
-
quantum_sigma_x in 462.libquantum contains the following loop:
for(i=0; i<reg->size; i++)
@@ -313,6 +300,14 @@ unsigned long reverse(unsigned v) {
return v ^ (t >> 8);
}
+Neither is this (very standard idiom):
+
+int f(int n)
+{
+ return (((n) << 24) | (((n) & 0xff00) << 8)
+ | (((n) >> 8) & 0xff00) | ((n) >> 24));
+}
+
//===---------------------------------------------------------------------===//
[LOOP RECOGNITION]
@@ -911,17 +906,6 @@ The expression should optimize to something like
//===---------------------------------------------------------------------===//
-From GCC Bug 3756:
-int
-pn (int n)
-{
- return (n >= 0 ? 1 : -1);
-}
-Should combine to (n >> 31) | 1. Currently not optimized with "clang
--emit-llvm-bc | opt -std-compile-opts | llc".
-
-//===---------------------------------------------------------------------===//
-
void a(int variable)
{
if (variable == 4 || variable == 6)
@@ -1452,33 +1436,6 @@ This pattern repeats several times, basically doing:
//===---------------------------------------------------------------------===//
-186.crafty contains this interesting pattern:
-
-%77 = call i8* @strstr(i8* getelementptr ([6 x i8]* @"\01LC5", i32 0, i32 0),
- i8* %30)
-%phitmp648 = icmp eq i8* %77, getelementptr ([6 x i8]* @"\01LC5", i32 0, i32 0)
-br i1 %phitmp648, label %bb70, label %bb76
-
-bb70: ; preds = %OptionMatch.exit91, %bb69
- %78 = call i32 @strlen(i8* %30) nounwind readonly align 1 ; <i32> [#uses=1]
-
-This is basically:
- cststr = "abcdef";
- if (strstr(cststr, P) == cststr) {
- x = strlen(P);
- ...
-
-The strstr call would be significantly cheaper written as:
-
-cststr = "abcdef";
-if (memcmp(P, str, strlen(P)))
- x = strlen(P);
-
-This is memcmp+strlen instead of strstr. This also makes the strlen fully
-redundant.
-
-//===---------------------------------------------------------------------===//
-
186.crafty also contains this code:
%1906 = call i32 @strlen(i8* getelementptr ([32 x i8]* @pgn_event, i32 0,i32 0))
@@ -1819,3 +1776,164 @@ int test2(int mainType, int subType) {
}
//===---------------------------------------------------------------------===//
+
+The following test case (from PR6576):
+
+define i32 @mul(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %cond1 = icmp eq i32 %b, 0 ; <i1> [#uses=1]
+ br i1 %cond1, label %exit, label %bb.nph
+bb.nph: ; preds = %entry
+ %tmp = mul i32 %b, %a ; <i32> [#uses=1]
+ ret i32 %tmp
+exit: ; preds = %entry
+ ret i32 0
+}
+
+could be reduced to:
+
+define i32 @mul(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %tmp = mul i32 %b, %a
+ ret i32 %tmp
+}
+
+//===---------------------------------------------------------------------===//
+
+We should use DSE + llvm.lifetime.end to delete dead vtable pointer updates.
+See GCC PR34949
+
+Another interesting case is that something related could be used for variables
+that go const after their ctor has finished. In these cases, globalopt (which
+can statically run the constructor) could mark the global const (so it gets put
+in the readonly section). A testcase would be:
+
+#include <complex>
+using namespace std;
+const complex<char> should_be_in_rodata (42,-42);
+complex<char> should_be_in_data (42,-42);
+complex<char> should_be_in_bss;
+
+Where we currently evaluate the ctors but the globals don't become const because
+the optimizer doesn't know they "become const" after the ctor is done. See
+GCC PR4131 for more examples.
+
+//===---------------------------------------------------------------------===//
+
+In this code:
+
+long foo(long x) {
+ return x > 1 ? x : 1;
+}
+
+LLVM emits a comparison with 1 instead of 0. 0 would be equivalent
+and cheaper on most targets.
+
+LLVM prefers comparisons with zero over non-zero in general, but in this
+case it choses instead to keep the max operation obvious.
+
+//===---------------------------------------------------------------------===//
+
+Take the following testcase on x86-64 (similar testcases exist for all targets
+with addc/adde):
+
+define void @a(i64* nocapture %s, i64* nocapture %t, i64 %a, i64 %b,
+i64 %c) nounwind {
+entry:
+ %0 = zext i64 %a to i128 ; <i128> [#uses=1]
+ %1 = zext i64 %b to i128 ; <i128> [#uses=1]
+ %2 = add i128 %1, %0 ; <i128> [#uses=2]
+ %3 = zext i64 %c to i128 ; <i128> [#uses=1]
+ %4 = shl i128 %3, 64 ; <i128> [#uses=1]
+ %5 = add i128 %4, %2 ; <i128> [#uses=1]
+ %6 = lshr i128 %5, 64 ; <i128> [#uses=1]
+ %7 = trunc i128 %6 to i64 ; <i64> [#uses=1]
+ store i64 %7, i64* %s, align 8
+ %8 = trunc i128 %2 to i64 ; <i64> [#uses=1]
+ store i64 %8, i64* %t, align 8
+ ret void
+}
+
+Generated code:
+ addq %rcx, %rdx
+ movl $0, %eax
+ adcq $0, %rax
+ addq %r8, %rax
+ movq %rax, (%rdi)
+ movq %rdx, (%rsi)
+ ret
+
+Expected code:
+ addq %rcx, %rdx
+ adcq $0, %r8
+ movq %r8, (%rdi)
+ movq %rdx, (%rsi)
+ ret
+
+The generated SelectionDAG has an ADD of an ADDE, where both operands of the
+ADDE are zero. Replacing one of the operands of the ADDE with the other operand
+of the ADD, and replacing the ADD with the ADDE, should give the desired result.
+
+(That said, we are doing a lot better than gcc on this testcase. :) )
+
+//===---------------------------------------------------------------------===//
+
+Switch lowering generates less than ideal code for the following switch:
+define void @a(i32 %x) nounwind {
+entry:
+ switch i32 %x, label %if.end [
+ i32 0, label %if.then
+ i32 1, label %if.then
+ i32 2, label %if.then
+ i32 3, label %if.then
+ i32 5, label %if.then
+ ]
+if.then:
+ tail call void @foo() nounwind
+ ret void
+if.end:
+ ret void
+}
+declare void @foo()
+
+Generated code on x86-64 (other platforms give similar results):
+a:
+ cmpl $5, %edi
+ ja .LBB0_2
+ movl %edi, %eax
+ movl $47, %ecx
+ btq %rax, %rcx
+ jb .LBB0_3
+.LBB0_2:
+ ret
+.LBB0_3:
+ jmp foo # TAILCALL
+
+The movl+movl+btq+jb could be simplified to a cmpl+jne.
+
+Or, if we wanted to be really clever, we could simplify the whole thing to
+something like the following, which eliminates a branch:
+ xorl $1, %edi
+ cmpl $4, %edi
+ ja .LBB0_2
+ ret
+.LBB0_2:
+ jmp foo # TAILCALL
+//===---------------------------------------------------------------------===//
+Given a branch where the two target blocks are identical ("ret i32 %b" in
+both), simplifycfg will simplify them away. But not so for a switch statement:
+
+define i32 @f(i32 %a, i32 %b) nounwind readnone {
+entry:
+ switch i32 %a, label %bb3 [
+ i32 4, label %bb
+ i32 6, label %bb
+ ]
+
+bb: ; preds = %entry, %entry
+ ret i32 %b
+
+bb3: ; preds = %entry
+ ret i32 %b
+}
+//===---------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/lib/Target/SubtargetFeature.cpp b/libclamav/c++/llvm/lib/Target/SubtargetFeature.cpp
index 2094cc9..b35190a 100644
--- a/libclamav/c++/llvm/lib/Target/SubtargetFeature.cpp
+++ b/libclamav/c++/llvm/lib/Target/SubtargetFeature.cpp
@@ -359,29 +359,25 @@ void SubtargetFeatures::dump() const {
print(dbgs());
}
-/// getDefaultSubtargetFeatures - Return a string listing
-/// the features associated with the target triple.
+/// getDefaultSubtargetFeatures - Return a string listing the features
+/// associated with the target triple.
///
/// FIXME: This is an inelegant way of specifying the features of a
/// subtarget. It would be better if we could encode this information
/// into the IR. See <rdar://5972456>.
///
-std::string SubtargetFeatures::getDefaultSubtargetFeatures(
- const Triple& Triple) {
- switch (Triple.getVendor()) {
- case Triple::Apple:
- switch (Triple.getArch()) {
- case Triple::ppc: // powerpc-apple-*
- return std::string("altivec");
- case Triple::ppc64: // powerpc64-apple-*
- return std::string("64bit,altivec");
- default:
- break;
+void SubtargetFeatures::getDefaultSubtargetFeatures(const std::string &CPU,
+ const Triple& Triple) {
+ setCPU(CPU);
+
+ if (Triple.getVendor() == Triple::Apple) {
+ if (Triple.getArch() == Triple::ppc) {
+ // powerpc-apple-*
+ AddFeature("altivec");
+ } else if (Triple.getArch() == Triple::ppc64) {
+ // powerpc64-apple-*
+ AddFeature("64bit");
+ AddFeature("altivec");
}
- break;
- default:
- break;
- }
-
- return std::string("");
+ }
}
diff --git a/libclamav/c++/llvm/lib/Target/TargetData.cpp b/libclamav/c++/llvm/lib/Target/TargetData.cpp
index 9a16808..f35c96d 100644
--- a/libclamav/c++/llvm/lib/Target/TargetData.cpp
+++ b/libclamav/c++/llvm/lib/Target/TargetData.cpp
@@ -34,8 +34,7 @@ using namespace llvm;
// Handle the Pass registration stuff necessary to use TargetData's.
// Register the default SparcV9 implementation...
-static RegisterPass<TargetData> X("targetdata", "Target Data Layout", false,
- true);
+INITIALIZE_PASS(TargetData, "targetdata", "Target Data Layout", false, true);
char TargetData::ID = 0;
//===----------------------------------------------------------------------===//
@@ -98,8 +97,8 @@ unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
//===----------------------------------------------------------------------===//
TargetAlignElem
-TargetAlignElem::get(AlignTypeEnum align_type, unsigned char abi_align,
- unsigned char pref_align, uint32_t bit_width) {
+TargetAlignElem::get(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width) {
assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
TargetAlignElem retval;
retval.AlignType = align_type;
@@ -197,10 +196,10 @@ void TargetData::init(StringRef Desc) {
}
unsigned Size = getInt(Specifier.substr(1));
Split = Token.split(':');
- unsigned char ABIAlign = getInt(Split.first) / 8;
+ unsigned ABIAlign = getInt(Split.first) / 8;
Split = Split.second.split(':');
- unsigned char PrefAlign = getInt(Split.first) / 8;
+ unsigned PrefAlign = getInt(Split.first) / 8;
if (PrefAlign == 0)
PrefAlign = ABIAlign;
setAlignment(AlignType, ABIAlign, PrefAlign, Size);
@@ -227,19 +226,19 @@ void TargetData::init(StringRef Desc) {
///
/// @note This has to exist, because this is a pass, but it should never be
/// used.
-TargetData::TargetData() : ImmutablePass(&ID) {
- llvm_report_error("Bad TargetData ctor used. "
+TargetData::TargetData() : ImmutablePass(ID) {
+ report_fatal_error("Bad TargetData ctor used. "
"Tool did not specify a TargetData to use?");
}
TargetData::TargetData(const Module *M)
- : ImmutablePass(&ID) {
+ : ImmutablePass(ID) {
init(M->getDataLayout());
}
void
-TargetData::setAlignment(AlignTypeEnum align_type, unsigned char abi_align,
- unsigned char pref_align, uint32_t bit_width) {
+TargetData::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width) {
assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
for (unsigned i = 0, e = Alignments.size(); i != e; ++i) {
if (Alignments[i].AlignType == align_type &&
@@ -269,18 +268,8 @@ unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign;
// The best match so far depends on what we're looking for.
- if (AlignType == VECTOR_ALIGN && Alignments[i].AlignType == VECTOR_ALIGN) {
- // If this is a specification for a smaller vector type, we will fall back
- // to it. This happens because <128 x double> can be implemented in terms
- // of 64 <2 x double>.
- if (Alignments[i].TypeBitWidth < BitWidth) {
- // Verify that we pick the biggest of the fallbacks.
- if (BestMatchIdx == -1 ||
- Alignments[BestMatchIdx].TypeBitWidth < Alignments[i].TypeBitWidth)
- BestMatchIdx = i;
- }
- } else if (AlignType == INTEGER_ALIGN &&
- Alignments[i].AlignType == INTEGER_ALIGN) {
+ if (AlignType == INTEGER_ALIGN &&
+ Alignments[i].AlignType == INTEGER_ALIGN) {
// The "best match" for integers is the smallest size that is larger than
// the BitWidth requested.
if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 ||
@@ -303,10 +292,15 @@ unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType,
} else {
assert(AlignType == VECTOR_ALIGN && "Unknown alignment type!");
- // If we didn't find a vector size that is smaller or equal to this type,
- // then we will end up scalarizing this to its element type. Just return
- // the alignment of the element.
- return getAlignment(cast<VectorType>(Ty)->getElementType(), ABIInfo);
+ // By default, use natural alignment for vector types. This is consistent
+ // with what clang and llvm-gcc do.
+ unsigned Align = getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
+ Align *= cast<VectorType>(Ty)->getNumElements();
+ // If the alignment is not a power of 2, round up to the next power of 2.
+ // This happens for non-power-of-2 length vectors.
+ if (Align & (Align-1))
+ Align = llvm::NextPowerOf2(Align);
+ return Align;
}
}
@@ -492,7 +486,7 @@ uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const {
Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
== false) for the requested type \a Ty.
*/
-unsigned char TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const {
+unsigned TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const {
int AlignType = -1;
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
@@ -514,7 +508,7 @@ unsigned char TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const {
// Get the layout annotation... which is lazily created on demand.
const StructLayout *Layout = getStructLayout(cast<StructType>(Ty));
unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
- return std::max(Align, (unsigned)Layout->getAlignment());
+ return std::max(Align, Layout->getAlignment());
}
case Type::IntegerTyID:
case Type::VoidTyID:
@@ -541,18 +535,18 @@ unsigned char TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const {
abi_or_pref, Ty);
}
-unsigned char TargetData::getABITypeAlignment(const Type *Ty) const {
+unsigned TargetData::getABITypeAlignment(const Type *Ty) const {
return getAlignment(Ty, true);
}
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
-unsigned char TargetData::getABIIntegerTypeAlignment(unsigned BitWidth) const {
+unsigned TargetData::getABIIntegerTypeAlignment(unsigned BitWidth) const {
return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0);
}
-unsigned char TargetData::getCallFrameTypeAlignment(const Type *Ty) const {
+unsigned TargetData::getCallFrameTypeAlignment(const Type *Ty) const {
for (unsigned i = 0, e = Alignments.size(); i != e; ++i)
if (Alignments[i].AlignType == STACK_ALIGN)
return Alignments[i].ABIAlign;
@@ -560,12 +554,12 @@ unsigned char TargetData::getCallFrameTypeAlignment(const Type *Ty) const {
return getABITypeAlignment(Ty);
}
-unsigned char TargetData::getPrefTypeAlignment(const Type *Ty) const {
+unsigned TargetData::getPrefTypeAlignment(const Type *Ty) const {
return getAlignment(Ty, false);
}
-unsigned char TargetData::getPreferredTypeAlignmentShift(const Type *Ty) const {
- unsigned Align = (unsigned) getPrefTypeAlignment(Ty);
+unsigned TargetData::getPreferredTypeAlignmentShift(const Type *Ty) const {
+ unsigned Align = getPrefTypeAlignment(Ty);
assert(!(Align & (Align-1)) && "Alignment is not a power of two!");
return Log2_32(Align);
}
@@ -605,8 +599,8 @@ uint64_t TargetData::getIndexedOffset(const Type *ptrTy, Value* const* Indices,
Ty = cast<SequentialType>(Ty)->getElementType();
// Get the array index and the size of each array element.
- int64_t arrayIdx = cast<ConstantInt>(Indices[CurIDX])->getSExtValue();
- Result += arrayIdx * (int64_t)getTypeAllocSize(Ty);
+ if (int64_t arrayIdx = cast<ConstantInt>(Indices[CurIDX])->getSExtValue())
+ Result += (uint64_t)arrayIdx * getTypeAllocSize(Ty);
}
}
diff --git a/libclamav/c++/llvm/lib/Target/TargetInstrInfo.cpp b/libclamav/c++/llvm/lib/Target/TargetInstrInfo.cpp
index 094a57e..c099a7e 100644
--- a/libclamav/c++/llvm/lib/Target/TargetInstrInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/TargetInstrInfo.cpp
@@ -28,6 +28,10 @@ const TargetRegisterClass *
TargetOperandInfo::getRegClass(const TargetRegisterInfo *TRI) const {
if (isLookupPtrRegClass())
return TRI->getPointerRegClass(RegClass);
+ // Instructions like INSERT_SUBREG do not have fixed register classes.
+ if (RegClass < 0)
+ return 0;
+ // Otherwise just look it up normally.
return TRI->getRegClass(RegClass);
}
diff --git a/libclamav/c++/llvm/lib/Target/TargetLoweringObjectFile.cpp b/libclamav/c++/llvm/lib/Target/TargetLoweringObjectFile.cpp
index 82619c7..dd7b532 100644
--- a/libclamav/c++/llvm/lib/Target/TargetLoweringObjectFile.cpp
+++ b/libclamav/c++/llvm/lib/Target/TargetLoweringObjectFile.cpp
@@ -19,6 +19,7 @@
#include "llvm/GlobalVariable.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h"
@@ -56,6 +57,10 @@ TargetLoweringObjectFile::TargetLoweringObjectFile() : Ctx(0) {
DwarfARangesSection = 0;
DwarfRangesSection = 0;
DwarfMacroInfoSection = 0;
+
+ IsFunctionEHSymbolGlobal = false;
+ IsFunctionEHFrameSymbolPrivate = true;
+ SupportsWeakOmittedEHFrame = true;
}
TargetLoweringObjectFile::~TargetLoweringObjectFile() {
@@ -96,7 +101,7 @@ static bool IsNullTerminatedString(const Constant *C) {
ConstantInt *Null =
dyn_cast<ConstantInt>(CVA->getOperand(ATy->getNumElements()-1));
- if (Null == 0 || Null->getZExtValue() != 0)
+ if (Null == 0 || !Null->isZero())
return false; // Not null terminated.
// Verify that the null doesn't occur anywhere else in the string.
@@ -286,40 +291,38 @@ TargetLoweringObjectFile::getSectionForConstant(SectionKind Kind) const {
return DataSection;
}
-/// getSymbolForDwarfGlobalReference - Return an MCExpr to use for a
+/// getExprForDwarfGlobalReference - Return an MCExpr to use for a
/// reference to the specified global variable from exception
/// handling information.
const MCExpr *TargetLoweringObjectFile::
-getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding) const {
- // FIXME: Use GetGlobalValueSymbol.
- SmallString<128> Name;
- Mang->getNameWithPrefix(Name, GV, false);
- const MCSymbol *Sym = getContext().GetOrCreateSymbol(Name.str());
-
- return getSymbolForDwarfReference(Sym, MMI, Encoding);
+getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const {
+ const MCSymbol *Sym = Mang->getSymbol(GV);
+ return getExprForDwarfReference(Sym, Mang, MMI, Encoding, Streamer);
}
const MCExpr *TargetLoweringObjectFile::
-getSymbolForDwarfReference(const MCSymbol *Sym, MachineModuleInfo *MMI,
- unsigned Encoding) const {
+getExprForDwarfReference(const MCSymbol *Sym, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const {
const MCExpr *Res = MCSymbolRefExpr::Create(Sym, getContext());
switch (Encoding & 0xF0) {
default:
- llvm_report_error("We do not support this DWARF encoding yet!");
- break;
+ report_fatal_error("We do not support this DWARF encoding yet!");
case dwarf::DW_EH_PE_absptr:
// Do nothing special
- break;
- case dwarf::DW_EH_PE_pcrel:
- // FIXME: PCSymbol
- const MCExpr *PC = MCSymbolRefExpr::Create(".", getContext());
- Res = MCBinaryExpr::CreateSub(Res, PC, getContext());
- break;
+ return Res;
+ case dwarf::DW_EH_PE_pcrel: {
+ // Emit a label to the streamer for the current position. This gives us
+ // .-foo addressing.
+ MCSymbol *PCSym = getContext().CreateTempSymbol();
+ Streamer.EmitLabel(PCSym);
+ const MCExpr *PC = MCSymbolRefExpr::Create(PCSym, getContext());
+ return MCBinaryExpr::CreateSub(Res, PC, getContext());
+ }
}
-
- return Res;
}
unsigned TargetLoweringObjectFile::getPersonalityEncoding() const {
diff --git a/libclamav/c++/llvm/lib/Target/TargetMachine.cpp b/libclamav/c++/llvm/lib/Target/TargetMachine.cpp
index 88871e3..705b1c0 100644
--- a/libclamav/c++/llvm/lib/Target/TargetMachine.cpp
+++ b/libclamav/c++/llvm/lib/Target/TargetMachine.cpp
@@ -11,6 +11,8 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
@@ -25,16 +27,17 @@ namespace llvm {
bool LessPreciseFPMADOption;
bool PrintMachineCode;
bool NoFramePointerElim;
+ bool NoFramePointerElimNonLeaf;
bool NoExcessFPPrecision;
bool UnsafeFPMath;
- bool FiniteOnlyFPMathOption;
+ bool NoInfsFPMath;
+ bool NoNaNsFPMath;
bool HonorSignDependentRoundingFPMathOption;
bool UseSoftFloat;
FloatABI::ABIType FloatABIType;
bool NoImplicitFloat;
bool NoZerosInBSS;
- bool DwarfExceptionHandling;
- bool SjLjExceptionHandling;
+ bool JITExceptionHandling;
bool JITEmitDebugInfo;
bool JITEmitDebugInfoToDisk;
bool UnwindTablesMandatory;
@@ -58,6 +61,11 @@ DisableFPElim("disable-fp-elim",
cl::location(NoFramePointerElim),
cl::init(false));
static cl::opt<bool, true>
+DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
+ cl::location(NoFramePointerElimNonLeaf),
+ cl::init(false));
+static cl::opt<bool, true>
DisableExcessPrecision("disable-excess-fp-precision",
cl::desc("Disable optimizations that may increase FP precision"),
cl::location(NoExcessFPPrecision),
@@ -73,9 +81,14 @@ EnableUnsafeFPMath("enable-unsafe-fp-math",
cl::location(UnsafeFPMath),
cl::init(false));
static cl::opt<bool, true>
-EnableFiniteOnlyFPMath("enable-finite-only-fp-math",
- cl::desc("Enable optimizations that assumes non- NaNs / +-Infs"),
- cl::location(FiniteOnlyFPMathOption),
+EnableNoInfsFPMath("enable-no-infs-fp-math",
+ cl::desc("Enable FP math optimizations that assume no +-Infs"),
+ cl::location(NoInfsFPMath),
+ cl::init(false));
+static cl::opt<bool, true>
+EnableNoNaNsFPMath("enable-no-nans-fp-math",
+ cl::desc("Enable FP math optimizations that assume no NaNs"),
+ cl::location(NoNaNsFPMath),
cl::init(false));
static cl::opt<bool, true>
EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
@@ -107,14 +120,9 @@ DontPlaceZerosInBSS("nozero-initialized-in-bss",
cl::location(NoZerosInBSS),
cl::init(false));
static cl::opt<bool, true>
-EnableDwarfExceptionHandling("enable-eh",
- cl::desc("Emit DWARF exception handling (default if target supports)"),
- cl::location(DwarfExceptionHandling),
- cl::init(false));
-static cl::opt<bool, true>
-EnableSjLjExceptionHandling("enable-sjlj-eh",
- cl::desc("Emit SJLJ exception handling (default if target supports)"),
- cl::location(SjLjExceptionHandling),
+EnableJITExceptionHandling("jit-enable-eh",
+ cl::desc("Emit exception handling information"),
+ cl::location(JITExceptionHandling),
cl::init(false));
// In debug builds, make this default to true.
#ifdef NDEBUG
@@ -197,13 +205,21 @@ EnableStrongPHIElim(cl::Hidden, "strong-phi-elim",
cl::desc("Use strong PHI elimination."),
cl::location(StrongPHIElim),
cl::init(false));
-
+static cl::opt<bool>
+DataSections("fdata-sections",
+ cl::desc("Emit data into separate sections"),
+ cl::init(false));
+static cl::opt<bool>
+FunctionSections("ffunction-sections",
+ cl::desc("Emit functions into separate sections"),
+ cl::init(false));
//---------------------------------------------------------------------------
// TargetMachine Class
//
TargetMachine::TargetMachine(const Target &T)
- : TheTarget(T), AsmInfo(0) {
+ : TheTarget(T), AsmInfo(0),
+ MCRelaxAll(false) {
// Typically it will be subtargets that will adjust FloatABIType from Default
// to Soft or Hard.
if (UseSoftFloat)
@@ -244,19 +260,42 @@ void TargetMachine::setAsmVerbosityDefault(bool V) {
AsmVerbosityDefault = V;
}
+bool TargetMachine::getFunctionSections() {
+ return FunctionSections;
+}
+
+bool TargetMachine::getDataSections() {
+ return DataSections;
+}
+
+void TargetMachine::setFunctionSections(bool V) {
+ FunctionSections = V;
+}
+
+void TargetMachine::setDataSections(bool V) {
+ DataSections = V;
+}
+
namespace llvm {
+ /// DisableFramePointerElim - This returns true if frame pointer elimination
+ /// optimization should be disabled for the given machine function.
+ bool DisableFramePointerElim(const MachineFunction &MF) {
+ // Check to see if we should eliminate non-leaf frame pointers and then
+ // check to see if we should eliminate all frame pointers.
+ if (NoFramePointerElimNonLeaf && !NoFramePointerElim) {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ return MFI->hasCalls();
+ }
+
+ return NoFramePointerElim;
+ }
+
/// LessPreciseFPMAD - This flag return true when -enable-fp-mad option
/// is specified on the command line. When this flag is off(default), the
/// code generator is not allowed to generate mad (multiply add) if the
/// result is "less precise" than doing those operations individually.
bool LessPreciseFPMAD() { return UnsafeFPMath || LessPreciseFPMADOption; }
- /// FiniteOnlyFPMath - This returns true when the -enable-finite-only-fp-math
- /// option is specified on the command line. If this returns false (default),
- /// the code generator is not allowed to assume that FP arithmetic arguments
- /// and results are never NaNs or +-Infs.
- bool FiniteOnlyFPMath() { return UnsafeFPMath || FiniteOnlyFPMathOption; }
-
/// HonorSignDependentRoundingFPMath - Return true if the codegen must assume
/// that the rounding mode of the FPU can change from its default.
bool HonorSignDependentRoundingFPMath() {
diff --git a/libclamav/c++/llvm/lib/Target/TargetRegisterInfo.cpp b/libclamav/c++/llvm/lib/Target/TargetRegisterInfo.cpp
index 52983ff..55f222c 100644
--- a/libclamav/c++/llvm/lib/Target/TargetRegisterInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/TargetRegisterInfo.cpp
@@ -22,14 +22,14 @@ using namespace llvm;
TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR,
regclass_iterator RCB, regclass_iterator RCE,
+ const char *const *subregindexnames,
int CFSO, int CFDO,
const unsigned* subregs, const unsigned subregsize,
- const unsigned* superregs, const unsigned superregsize,
const unsigned* aliases, const unsigned aliasessize)
: SubregHash(subregs), SubregHashSize(subregsize),
- SuperregHash(superregs), SuperregHashSize(superregsize),
AliasesHash(aliases), AliasesHashSize(aliasessize),
- Desc(D), NumRegs(NR), RegClassBegin(RCB), RegClassEnd(RCE) {
+ Desc(D), SubRegIndexNames(subregindexnames), NumRegs(NR),
+ RegClassBegin(RCB), RegClassEnd(RCE) {
assert(NumRegs < FirstVirtualRegister &&
"Target has too many physical registers!");
@@ -39,20 +39,20 @@ TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR,
TargetRegisterInfo::~TargetRegisterInfo() {}
-/// getPhysicalRegisterRegClass - Returns the Register Class of a physical
-/// register of the given type. If type is EVT::Other, then just return any
-/// register class the register belongs to.
+/// getMinimalPhysRegClass - Returns the Register Class of a physical
+/// register of the given type, picking the most sub register class of
+/// the right type that contains this physreg.
const TargetRegisterClass *
-TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, EVT VT) const {
+TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg, EVT VT) const {
assert(isPhysicalRegister(reg) && "reg must be a physical register");
- // Pick the most super register class of the right type that contains
+ // Pick the most sub register class of the right type that contains
// this physreg.
const TargetRegisterClass* BestRC = 0;
for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; ++I){
const TargetRegisterClass* RC = *I;
if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
- (!BestRC || BestRC->hasSuperClass(RC)))
+ (!BestRC || BestRC->hasSubClass(RC)))
BestRC = RC;
}
@@ -63,7 +63,7 @@ TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, EVT VT) const {
/// getAllocatableSetForRC - Toggle the bits that represent allocatable
/// registers for the specific register class.
static void getAllocatableSetForRC(const MachineFunction &MF,
- const TargetRegisterClass *RC, BitVector &R){
+ const TargetRegisterClass *RC, BitVector &R){
for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
E = RC->allocation_order_end(MF); I != E; ++I)
R.set(*I);
@@ -74,12 +74,16 @@ BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
BitVector Allocatable(NumRegs);
if (RC) {
getAllocatableSetForRC(MF, RC, Allocatable);
- return Allocatable;
+ } else {
+ for (TargetRegisterInfo::regclass_iterator I = regclass_begin(),
+ E = regclass_end(); I != E; ++I)
+ getAllocatableSetForRC(MF, *I, Allocatable);
}
- for (TargetRegisterInfo::regclass_iterator I = regclass_begin(),
- E = regclass_end(); I != E; ++I)
- getAllocatableSetForRC(MF, *I, Allocatable);
+ // Mask out the reserved registers
+ BitVector Reserved = getReservedRegs(MF);
+ Allocatable ^= Reserved & Allocatable;
+
return Allocatable;
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmParser/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/X86/AsmParser/CMakeLists.txt
deleted file mode 100644
index 40dbdd7..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmParser/CMakeLists.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMX86AsmParser
- X86AsmLexer.cpp
- X86AsmParser.cpp
- )
-add_dependencies(LLVMX86AsmParser X86CodeGenTable_gen)
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmParser/Makefile b/libclamav/c++/llvm/lib/Target/X86/AsmParser/Makefile
deleted file mode 100644
index 25fb0a2..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmParser/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/X86/AsmParser/Makefile -------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86AsmParser
-
-# Hack: we need to include 'main' x86 target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp b/libclamav/c++/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
deleted file mode 100644
index a58f58e..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
+++ /dev/null
@@ -1,147 +0,0 @@
-//===-- X86AsmLexer.cpp - Tokenize X86 assembly to AsmTokens --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/Target/TargetAsmLexer.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCParser/MCAsmLexer.h"
-#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
-#include "X86.h"
-
-using namespace llvm;
-
-namespace {
-
-class X86AsmLexer : public TargetAsmLexer {
- const MCAsmInfo &AsmInfo;
-
- bool tentativeIsValid;
- AsmToken tentativeToken;
-
- const AsmToken &lexTentative() {
- tentativeToken = getLexer()->Lex();
- tentativeIsValid = true;
- return tentativeToken;
- }
-
- const AsmToken &lexDefinite() {
- if(tentativeIsValid) {
- tentativeIsValid = false;
- return tentativeToken;
- }
- else {
- return getLexer()->Lex();
- }
- }
-
- AsmToken LexTokenATT();
- AsmToken LexTokenIntel();
-protected:
- AsmToken LexToken() {
- if (!Lexer) {
- SetError(SMLoc(), "No MCAsmLexer installed");
- return AsmToken(AsmToken::Error, "", 0);
- }
-
- switch (AsmInfo.getAssemblerDialect()) {
- default:
- SetError(SMLoc(), "Unhandled dialect");
- return AsmToken(AsmToken::Error, "", 0);
- case 0:
- return LexTokenATT();
- case 1:
- return LexTokenIntel();
- }
- }
-public:
- X86AsmLexer(const Target &T, const MCAsmInfo &MAI)
- : TargetAsmLexer(T), AsmInfo(MAI), tentativeIsValid(false) {
- }
-};
-
-}
-
-static unsigned MatchRegisterName(StringRef Name);
-
-AsmToken X86AsmLexer::LexTokenATT() {
- const AsmToken lexedToken = lexDefinite();
-
- switch (lexedToken.getKind()) {
- default:
- return AsmToken(lexedToken);
- case AsmToken::Error:
- SetError(Lexer->getErrLoc(), Lexer->getErr());
- return AsmToken(lexedToken);
- case AsmToken::Percent:
- {
- const AsmToken &nextToken = lexTentative();
- if (nextToken.getKind() == AsmToken::Identifier) {
- unsigned regID = MatchRegisterName(nextToken.getString());
-
- if (regID) {
- lexDefinite();
-
- StringRef regStr(lexedToken.getString().data(),
- lexedToken.getString().size() +
- nextToken.getString().size());
-
- return AsmToken(AsmToken::Register,
- regStr,
- static_cast<int64_t>(regID));
- }
- else {
- return AsmToken(lexedToken);
- }
- }
- else {
- return AsmToken(lexedToken);
- }
- }
- }
-}
-
-AsmToken X86AsmLexer::LexTokenIntel() {
- const AsmToken &lexedToken = lexDefinite();
-
- switch(lexedToken.getKind()) {
- default:
- return AsmToken(lexedToken);
- case AsmToken::Error:
- SetError(Lexer->getErrLoc(), Lexer->getErr());
- return AsmToken(lexedToken);
- case AsmToken::Identifier:
- {
- std::string upperCase = lexedToken.getString().str();
- std::string lowerCase = LowercaseString(upperCase);
- StringRef lowerRef(lowerCase);
-
- unsigned regID = MatchRegisterName(lowerRef);
-
- if (regID) {
- return AsmToken(AsmToken::Register,
- lexedToken.getString(),
- static_cast<int64_t>(regID));
- }
- else {
- return AsmToken(lexedToken);
- }
- }
- }
-}
-
-extern "C" void LLVMInitializeX86AsmLexer() {
- RegisterAsmLexer<X86AsmLexer> X(TheX86_32Target);
- RegisterAsmLexer<X86AsmLexer> Y(TheX86_64Target);
-}
-
-#define REGISTERS_ONLY
-#include "X86GenAsmMatcher.inc"
-#undef REGISTERS_ONLY
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/libclamav/c++/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
deleted file mode 100644
index 84d7bb7..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ /dev/null
@@ -1,589 +0,0 @@
-//===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Target/TargetAsmParser.h"
-#include "X86.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCParser/MCAsmLexer.h"
-#include "llvm/MC/MCParser/MCAsmParser.h"
-#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/Target/TargetAsmParser.h"
-using namespace llvm;
-
-namespace {
-struct X86Operand;
-
-class X86ATTAsmParser : public TargetAsmParser {
- MCAsmParser &Parser;
-
-private:
- MCAsmParser &getParser() const { return Parser; }
-
- MCAsmLexer &getLexer() const { return Parser.getLexer(); }
-
- void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
-
- bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
-
- bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
-
- X86Operand *ParseOperand();
- X86Operand *ParseMemOperand();
-
- bool ParseDirectiveWord(unsigned Size, SMLoc L);
-
- /// @name Auto-generated Match Functions
- /// {
-
- bool MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCInst &Inst);
-
- /// }
-
-public:
- X86ATTAsmParser(const Target &T, MCAsmParser &_Parser)
- : TargetAsmParser(T), Parser(_Parser) {}
-
- virtual bool ParseInstruction(const StringRef &Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands);
-
- virtual bool ParseDirective(AsmToken DirectiveID);
-};
-
-} // end anonymous namespace
-
-/// @name Auto-generated Match Functions
-/// {
-
-static unsigned MatchRegisterName(StringRef Name);
-
-/// }
-
-namespace {
-
-/// X86Operand - Instances of this class represent a parsed X86 machine
-/// instruction.
-struct X86Operand : public MCParsedAsmOperand {
- enum KindTy {
- Token,
- Register,
- Immediate,
- Memory
- } Kind;
-
- SMLoc StartLoc, EndLoc;
-
- union {
- struct {
- const char *Data;
- unsigned Length;
- } Tok;
-
- struct {
- unsigned RegNo;
- } Reg;
-
- struct {
- const MCExpr *Val;
- } Imm;
-
- struct {
- unsigned SegReg;
- const MCExpr *Disp;
- unsigned BaseReg;
- unsigned IndexReg;
- unsigned Scale;
- } Mem;
- };
-
- X86Operand(KindTy K, SMLoc Start, SMLoc End)
- : Kind(K), StartLoc(Start), EndLoc(End) {}
-
- /// getStartLoc - Get the location of the first token of this operand.
- SMLoc getStartLoc() const { return StartLoc; }
- /// getEndLoc - Get the location of the last token of this operand.
- SMLoc getEndLoc() const { return EndLoc; }
-
- StringRef getToken() const {
- assert(Kind == Token && "Invalid access!");
- return StringRef(Tok.Data, Tok.Length);
- }
-
- unsigned getReg() const {
- assert(Kind == Register && "Invalid access!");
- return Reg.RegNo;
- }
-
- const MCExpr *getImm() const {
- assert(Kind == Immediate && "Invalid access!");
- return Imm.Val;
- }
-
- const MCExpr *getMemDisp() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.Disp;
- }
- unsigned getMemSegReg() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.SegReg;
- }
- unsigned getMemBaseReg() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.BaseReg;
- }
- unsigned getMemIndexReg() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.IndexReg;
- }
- unsigned getMemScale() const {
- assert(Kind == Memory && "Invalid access!");
- return Mem.Scale;
- }
-
- bool isToken() const {return Kind == Token; }
-
- bool isImm() const { return Kind == Immediate; }
-
- bool isImmSExt8() const {
- // Accept immediates which fit in 8 bits when sign extended, and
- // non-absolute immediates.
- if (!isImm())
- return false;
-
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
- int64_t Value = CE->getValue();
- return Value == (int64_t) (int8_t) Value;
- }
-
- return true;
- }
-
- bool isMem() const { return Kind == Memory; }
-
- bool isAbsMem() const {
- return Kind == Memory && !getMemSegReg() && !getMemBaseReg() &&
- !getMemIndexReg() && getMemScale() == 1;
- }
-
- bool isNoSegMem() const {
- return Kind == Memory && !getMemSegReg();
- }
-
- bool isReg() const { return Kind == Register; }
-
- void addExpr(MCInst &Inst, const MCExpr *Expr) const {
- // Add as immediates when possible.
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
- else
- Inst.addOperand(MCOperand::CreateExpr(Expr));
- }
-
- void addRegOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getReg()));
- }
-
- void addImmOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addImmSExt8Operands(MCInst &Inst, unsigned N) const {
- // FIXME: Support user customization of the render method.
- assert(N == 1 && "Invalid number of operands!");
- addExpr(Inst, getImm());
- }
-
- void addMemOperands(MCInst &Inst, unsigned N) const {
- assert((N == 5) && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
- Inst.addOperand(MCOperand::CreateImm(getMemScale()));
- Inst.addOperand(MCOperand::CreateReg(getMemIndexReg()));
- addExpr(Inst, getMemDisp());
- Inst.addOperand(MCOperand::CreateReg(getMemSegReg()));
- }
-
- void addAbsMemOperands(MCInst &Inst, unsigned N) const {
- assert((N == 1) && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateExpr(getMemDisp()));
- }
-
- void addNoSegMemOperands(MCInst &Inst, unsigned N) const {
- assert((N == 4) && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateReg(getMemBaseReg()));
- Inst.addOperand(MCOperand::CreateImm(getMemScale()));
- Inst.addOperand(MCOperand::CreateReg(getMemIndexReg()));
- addExpr(Inst, getMemDisp());
- }
-
- static X86Operand *CreateToken(StringRef Str, SMLoc Loc) {
- X86Operand *Res = new X86Operand(Token, Loc, Loc);
- Res->Tok.Data = Str.data();
- Res->Tok.Length = Str.size();
- return Res;
- }
-
- static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc) {
- X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc);
- Res->Reg.RegNo = RegNo;
- return Res;
- }
-
- static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc){
- X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc);
- Res->Imm.Val = Val;
- return Res;
- }
-
- /// Create an absolute memory operand.
- static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc,
- SMLoc EndLoc) {
- X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
- Res->Mem.SegReg = 0;
- Res->Mem.Disp = Disp;
- Res->Mem.BaseReg = 0;
- Res->Mem.IndexReg = 0;
- Res->Mem.Scale = 1;
- return Res;
- }
-
- /// Create a generalized memory operand.
- static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp,
- unsigned BaseReg, unsigned IndexReg,
- unsigned Scale, SMLoc StartLoc, SMLoc EndLoc) {
- // We should never just have a displacement, that should be parsed as an
- // absolute memory operand.
- assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!");
-
- // The scale should always be one of {1,2,4,8}.
- assert(((Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8)) &&
- "Invalid scale!");
- X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc);
- Res->Mem.SegReg = SegReg;
- Res->Mem.Disp = Disp;
- Res->Mem.BaseReg = BaseReg;
- Res->Mem.IndexReg = IndexReg;
- Res->Mem.Scale = Scale;
- return Res;
- }
-};
-
-} // end anonymous namespace.
-
-
-bool X86ATTAsmParser::ParseRegister(unsigned &RegNo,
- SMLoc &StartLoc, SMLoc &EndLoc) {
- RegNo = 0;
- const AsmToken &TokPercent = Parser.getTok();
- assert(TokPercent.is(AsmToken::Percent) && "Invalid token kind!");
- StartLoc = TokPercent.getLoc();
- Parser.Lex(); // Eat percent token.
-
- const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Identifier))
- return Error(Tok.getLoc(), "invalid register name");
-
- // FIXME: Validate register for the current architecture; we have to do
- // validation later, so maybe there is no need for this here.
- RegNo = MatchRegisterName(Tok.getString());
-
- // Parse %st(1) and "%st" as "%st(0)"
- if (RegNo == 0 && Tok.getString() == "st") {
- RegNo = X86::ST0;
- EndLoc = Tok.getLoc();
- Parser.Lex(); // Eat 'st'
-
- // Check to see if we have '(4)' after %st.
- if (getLexer().isNot(AsmToken::LParen))
- return false;
- // Lex the paren.
- getParser().Lex();
-
- const AsmToken &IntTok = Parser.getTok();
- if (IntTok.isNot(AsmToken::Integer))
- return Error(IntTok.getLoc(), "expected stack index");
- switch (IntTok.getIntVal()) {
- case 0: RegNo = X86::ST0; break;
- case 1: RegNo = X86::ST1; break;
- case 2: RegNo = X86::ST2; break;
- case 3: RegNo = X86::ST3; break;
- case 4: RegNo = X86::ST4; break;
- case 5: RegNo = X86::ST5; break;
- case 6: RegNo = X86::ST6; break;
- case 7: RegNo = X86::ST7; break;
- default: return Error(IntTok.getLoc(), "invalid stack index");
- }
-
- if (getParser().Lex().isNot(AsmToken::RParen))
- return Error(Parser.getTok().getLoc(), "expected ')'");
-
- EndLoc = Tok.getLoc();
- Parser.Lex(); // Eat ')'
- return false;
- }
-
- if (RegNo == 0)
- return Error(Tok.getLoc(), "invalid register name");
-
- EndLoc = Tok.getLoc();
- Parser.Lex(); // Eat identifier token.
- return false;
-}
-
-X86Operand *X86ATTAsmParser::ParseOperand() {
- switch (getLexer().getKind()) {
- default:
- return ParseMemOperand();
- case AsmToken::Percent: {
- // FIXME: if a segment register, this could either be just the seg reg, or
- // the start of a memory operand.
- unsigned RegNo;
- SMLoc Start, End;
- if (ParseRegister(RegNo, Start, End)) return 0;
- return X86Operand::CreateReg(RegNo, Start, End);
- }
- case AsmToken::Dollar: {
- // $42 -> immediate.
- SMLoc Start = Parser.getTok().getLoc(), End;
- Parser.Lex();
- const MCExpr *Val;
- if (getParser().ParseExpression(Val, End))
- return 0;
- return X86Operand::CreateImm(Val, Start, End);
- }
- }
-}
-
-/// ParseMemOperand: segment: disp(basereg, indexreg, scale)
-X86Operand *X86ATTAsmParser::ParseMemOperand() {
- SMLoc MemStart = Parser.getTok().getLoc();
-
- // FIXME: If SegReg ':' (e.g. %gs:), eat and remember.
- unsigned SegReg = 0;
-
- // We have to disambiguate a parenthesized expression "(4+5)" from the start
- // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The
- // only way to do this without lookahead is to eat the '(' and see what is
- // after it.
- const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext());
- if (getLexer().isNot(AsmToken::LParen)) {
- SMLoc ExprEnd;
- if (getParser().ParseExpression(Disp, ExprEnd)) return 0;
-
- // After parsing the base expression we could either have a parenthesized
- // memory address or not. If not, return now. If so, eat the (.
- if (getLexer().isNot(AsmToken::LParen)) {
- // Unless we have a segment register, treat this as an immediate.
- if (SegReg == 0)
- return X86Operand::CreateMem(Disp, MemStart, ExprEnd);
- return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
- }
-
- // Eat the '('.
- Parser.Lex();
- } else {
- // Okay, we have a '('. We don't know if this is an expression or not, but
- // so we have to eat the ( to see beyond it.
- SMLoc LParenLoc = Parser.getTok().getLoc();
- Parser.Lex(); // Eat the '('.
-
- if (getLexer().is(AsmToken::Percent) || getLexer().is(AsmToken::Comma)) {
- // Nothing to do here, fall into the code below with the '(' part of the
- // memory operand consumed.
- } else {
- SMLoc ExprEnd;
-
- // It must be an parenthesized expression, parse it now.
- if (getParser().ParseParenExpression(Disp, ExprEnd))
- return 0;
-
- // After parsing the base expression we could either have a parenthesized
- // memory address or not. If not, return now. If so, eat the (.
- if (getLexer().isNot(AsmToken::LParen)) {
- // Unless we have a segment register, treat this as an immediate.
- if (SegReg == 0)
- return X86Operand::CreateMem(Disp, LParenLoc, ExprEnd);
- return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd);
- }
-
- // Eat the '('.
- Parser.Lex();
- }
- }
-
- // If we reached here, then we just ate the ( of the memory operand. Process
- // the rest of the memory operand.
- unsigned BaseReg = 0, IndexReg = 0, Scale = 1;
-
- if (getLexer().is(AsmToken::Percent)) {
- SMLoc L;
- if (ParseRegister(BaseReg, L, L)) return 0;
- }
-
- if (getLexer().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
-
- // Following the comma we should have either an index register, or a scale
- // value. We don't support the later form, but we want to parse it
- // correctly.
- //
- // Not that even though it would be completely consistent to support syntax
- // like "1(%eax,,1)", the assembler doesn't.
- if (getLexer().is(AsmToken::Percent)) {
- SMLoc L;
- if (ParseRegister(IndexReg, L, L)) return 0;
-
- if (getLexer().isNot(AsmToken::RParen)) {
- // Parse the scale amount:
- // ::= ',' [scale-expression]
- if (getLexer().isNot(AsmToken::Comma)) {
- Error(Parser.getTok().getLoc(),
- "expected comma in scale expression");
- return 0;
- }
- Parser.Lex(); // Eat the comma.
-
- if (getLexer().isNot(AsmToken::RParen)) {
- SMLoc Loc = Parser.getTok().getLoc();
-
- int64_t ScaleVal;
- if (getParser().ParseAbsoluteExpression(ScaleVal))
- return 0;
-
- // Validate the scale amount.
- if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){
- Error(Loc, "scale factor in address must be 1, 2, 4 or 8");
- return 0;
- }
- Scale = (unsigned)ScaleVal;
- }
- }
- } else if (getLexer().isNot(AsmToken::RParen)) {
- // Otherwise we have the unsupported form of a scale amount without an
- // index.
- SMLoc Loc = Parser.getTok().getLoc();
-
- int64_t Value;
- if (getParser().ParseAbsoluteExpression(Value))
- return 0;
-
- Error(Loc, "cannot have scale factor without index register");
- return 0;
- }
- }
-
- // Ok, we've eaten the memory operand, verify we have a ')' and eat it too.
- if (getLexer().isNot(AsmToken::RParen)) {
- Error(Parser.getTok().getLoc(), "unexpected token in memory operand");
- return 0;
- }
- SMLoc MemEnd = Parser.getTok().getLoc();
- Parser.Lex(); // Eat the ')'.
-
- return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale,
- MemStart, MemEnd);
-}
-
-bool X86ATTAsmParser::
-ParseInstruction(const StringRef &Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // FIXME: Hack to recognize "sal..." and "rep..." for now. We need a way to
- // represent alternative syntaxes in the .td file, without requiring
- // instruction duplication.
- StringRef PatchedName = StringSwitch<StringRef>(Name)
- .Case("sal", "shl")
- .Case("salb", "shlb")
- .Case("sall", "shll")
- .Case("salq", "shlq")
- .Case("salw", "shlw")
- .Case("repe", "rep")
- .Case("repz", "rep")
- .Case("repnz", "repne")
- .Default(Name);
- Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
-
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
-
- // Parse '*' modifier.
- if (getLexer().is(AsmToken::Star)) {
- SMLoc Loc = Parser.getTok().getLoc();
- Operands.push_back(X86Operand::CreateToken("*", Loc));
- Parser.Lex(); // Eat the star.
- }
-
- // Read the first operand.
- if (X86Operand *Op = ParseOperand())
- Operands.push_back(Op);
- else
- return true;
-
- while (getLexer().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
-
- // Parse and remember the operand.
- if (X86Operand *Op = ParseOperand())
- Operands.push_back(Op);
- else
- return true;
- }
- }
-
- return false;
-}
-
-bool X86ATTAsmParser::ParseDirective(AsmToken DirectiveID) {
- StringRef IDVal = DirectiveID.getIdentifier();
- if (IDVal == ".word")
- return ParseDirectiveWord(2, DirectiveID.getLoc());
- return true;
-}
-
-/// ParseDirectiveWord
-/// ::= .word [ expression (, expression)* ]
-bool X86ATTAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
- for (;;) {
- const MCExpr *Value;
- if (getParser().ParseExpression(Value))
- return true;
-
- getParser().getStreamer().EmitValue(Value, Size, 0 /*addrspace*/);
-
- if (getLexer().is(AsmToken::EndOfStatement))
- break;
-
- // FIXME: Improve diagnostic.
- if (getLexer().isNot(AsmToken::Comma))
- return Error(L, "unexpected token in directive");
- Parser.Lex();
- }
- }
-
- Parser.Lex();
- return false;
-}
-
-extern "C" void LLVMInitializeX86AsmLexer();
-
-// Force static initialization.
-extern "C" void LLVMInitializeX86AsmParser() {
- RegisterAsmParser<X86ATTAsmParser> X(TheX86_32Target);
- RegisterAsmParser<X86ATTAsmParser> Y(TheX86_64Target);
- LLVMInitializeX86AsmLexer();
-}
-
-#include "X86GenAsmMatcher.inc"
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/CMakeLists.txt
deleted file mode 100644
index b70a587..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMX86AsmPrinter
- X86ATTInstPrinter.cpp
- X86AsmPrinter.cpp
- X86IntelInstPrinter.cpp
- X86MCInstLower.cpp
- )
-add_dependencies(LLVMX86AsmPrinter X86CodeGenTable_gen)
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/Makefile b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/Makefile
deleted file mode 100644
index 2368761..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Target/X86/AsmPrinter/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMX86AsmPrinter
-
-# Hack: we need to include 'main' x86 target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
deleted file mode 100644
index 734a545..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
-//===-- X86ATTInstPrinter.cpp - AT&T assembly instruction printing --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file includes code for rendering MCInst instances as AT&T-style
-// assembly.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "X86ATTInstPrinter.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Format.h"
-#include "llvm/Support/FormattedStream.h"
-#include "X86GenInstrNames.inc"
-using namespace llvm;
-
-// Include the auto-generated portion of the assembly writer.
-#define MachineInstr MCInst
-#define GET_INSTRUCTION_NAME
-#include "X86GenAsmWriter.inc"
-#undef MachineInstr
-
-void X86ATTInstPrinter::printInst(const MCInst *MI) { printInstruction(MI); }
-StringRef X86ATTInstPrinter::getOpcodeName(unsigned Opcode) const {
- return getInstructionName(Opcode);
-}
-
-
-void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op) {
- switch (MI->getOperand(Op).getImm()) {
- default: llvm_unreachable("Invalid ssecc argument!");
- case 0: O << "eq"; break;
- case 1: O << "lt"; break;
- case 2: O << "le"; break;
- case 3: O << "unord"; break;
- case 4: O << "neq"; break;
- case 5: O << "nlt"; break;
- case 6: O << "nle"; break;
- case 7: O << "ord"; break;
- }
-}
-
-/// print_pcrel_imm - This is used to print an immediate value that ends up
-/// being encoded as a pc-relative value (e.g. for jumps and calls). These
-/// print slightly differently than normal immediates. For example, a $ is not
-/// emitted.
-void X86ATTInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo) {
- const MCOperand &Op = MI->getOperand(OpNo);
- if (Op.isImm())
- // Print this as a signed 32-bit value.
- O << (int)Op.getImm();
- else {
- assert(Op.isExpr() && "unknown pcrel immediate operand");
- O << *Op.getExpr();
- }
-}
-
-void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo) {
-
- const MCOperand &Op = MI->getOperand(OpNo);
- if (Op.isReg()) {
- O << '%' << getRegisterName(Op.getReg());
- } else if (Op.isImm()) {
- O << '$' << Op.getImm();
-
- if (CommentStream && (Op.getImm() > 255 || Op.getImm() < -256))
- *CommentStream << format("imm = 0x%llX\n", (long long)Op.getImm());
-
- } else {
- assert(Op.isExpr() && "unknown operand kind in printOperand");
- O << '$' << *Op.getExpr();
- }
-}
-
-void X86ATTInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op) {
- const MCOperand &BaseReg = MI->getOperand(Op);
- const MCOperand &IndexReg = MI->getOperand(Op+2);
- const MCOperand &DispSpec = MI->getOperand(Op+3);
-
- if (DispSpec.isImm()) {
- int64_t DispVal = DispSpec.getImm();
- if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))
- O << DispVal;
- } else {
- assert(DispSpec.isExpr() && "non-immediate displacement for LEA?");
- O << *DispSpec.getExpr();
- }
-
- if (IndexReg.getReg() || BaseReg.getReg()) {
- O << '(';
- if (BaseReg.getReg())
- printOperand(MI, Op);
-
- if (IndexReg.getReg()) {
- O << ',';
- printOperand(MI, Op+2);
- unsigned ScaleVal = MI->getOperand(Op+1).getImm();
- if (ScaleVal != 1)
- O << ',' << ScaleVal;
- }
- O << ')';
- }
-}
-
-void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op) {
- // If this has a segment register, print it.
- if (MI->getOperand(Op+4).getReg()) {
- printOperand(MI, Op+4);
- O << ':';
- }
- printLeaMemReference(MI, Op);
-}
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h
deleted file mode 100644
index d109a07..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h
+++ /dev/null
@@ -1,86 +0,0 @@
-//===-- X86ATTInstPrinter.h - Convert X86 MCInst to assembly syntax -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints an X86 MCInst to AT&T style .s file syntax.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86_ATT_INST_PRINTER_H
-#define X86_ATT_INST_PRINTER_H
-
-#include "llvm/MC/MCInstPrinter.h"
-
-namespace llvm {
- class MCOperand;
-
-class X86ATTInstPrinter : public MCInstPrinter {
-public:
- X86ATTInstPrinter(raw_ostream &O, const MCAsmInfo &MAI)
- : MCInstPrinter(O, MAI) {}
-
-
- virtual void printInst(const MCInst *MI);
- virtual StringRef getOpcodeName(unsigned Opcode) const;
-
- // Autogenerated by tblgen.
- void printInstruction(const MCInst *MI);
- static const char *getRegisterName(unsigned RegNo);
- static const char *getInstructionName(unsigned Opcode);
-
- void printOperand(const MCInst *MI, unsigned OpNo);
- void printMemReference(const MCInst *MI, unsigned Op);
- void printLeaMemReference(const MCInst *MI, unsigned Op);
- void printSSECC(const MCInst *MI, unsigned Op);
- void print_pcrel_imm(const MCInst *MI, unsigned OpNo);
-
- void printopaquemem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
-
- void printi8mem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printi16mem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printi32mem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printi64mem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printi128mem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printf32mem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printf64mem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printf80mem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printf128mem(const MCInst *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printlea32mem(const MCInst *MI, unsigned OpNo) {
- printLeaMemReference(MI, OpNo);
- }
- void printlea64mem(const MCInst *MI, unsigned OpNo) {
- printLeaMemReference(MI, OpNo);
- }
- void printlea64_32mem(const MCInst *MI, unsigned OpNo) {
- printLeaMemReference(MI, OpNo);
- }
-};
-
-}
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
deleted file mode 100644
index 8cab24c..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
+++ /dev/null
@@ -1,653 +0,0 @@
-//===-- X86AsmPrinter.cpp - Convert X86 LLVM code to AT&T assembly --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains a printer that converts from our internal representation
-// of machine-dependent LLVM code to X86 machine code.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86AsmPrinter.h"
-#include "X86ATTInstPrinter.h"
-#include "X86IntelInstPrinter.h"
-#include "X86MCInstLower.h"
-#include "X86.h"
-#include "X86COFF.h"
-#include "X86COFFMachineModuleInfo.h"
-#include "X86MachineFunctionInfo.h"
-#include "X86TargetMachine.h"
-#include "llvm/CallingConv.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Module.h"
-#include "llvm/Type.h"
-#include "llvm/Assembly/Writer.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/CodeGen/MachineModuleInfoImpls.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/ADT/SmallString.h"
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Primitive Helper Functions.
-//===----------------------------------------------------------------------===//
-
-void X86AsmPrinter::PrintPICBaseSymbol() const {
- const TargetLowering *TLI = TM.getTargetLowering();
- O << *static_cast<const X86TargetLowering*>(TLI)->getPICBaseSymbol(MF,
- OutContext);
-}
-
-MCSymbol *X86AsmPrinter::GetGlobalValueSymbol(const GlobalValue *GV) const {
- SmallString<60> NameStr;
- Mang->getNameWithPrefix(NameStr, GV, false);
- MCSymbol *Symb = OutContext.GetOrCreateSymbol(NameStr.str());
-
- if (Subtarget->isTargetCygMing()) {
- X86COFFMachineModuleInfo &COFFMMI =
- MMI->getObjFileInfo<X86COFFMachineModuleInfo>();
- COFFMMI.DecorateCygMingName(Symb, OutContext, GV, *TM.getTargetData());
-
- // Save function name for later type emission.
- if (const Function *F = dyn_cast<Function>(GV))
- if (F->isDeclaration())
- COFFMMI.addExternalFunction(Symb->getName());
-
- }
-
- return Symb;
-}
-
-/// runOnMachineFunction - Emit the function body.
-///
-bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
- SetupMachineFunction(MF);
-
- if (Subtarget->isTargetCOFF()) {
- const Function *F = MF.getFunction();
- O << "\t.def\t " << *CurrentFnSym << ";\t.scl\t" <<
- (F->hasInternalLinkage() ? COFF::C_STAT : COFF::C_EXT)
- << ";\t.type\t" << (COFF::DT_FCN << COFF::N_BTSHFT)
- << ";\t.endef\n";
- }
-
- // Have common code print out the function header with linkage info etc.
- EmitFunctionHeader();
-
- // Emit the rest of the function body.
- EmitFunctionBody();
-
- // We didn't modify anything.
- return false;
-}
-
-/// printSymbolOperand - Print a raw symbol reference operand. This handles
-/// jump tables, constant pools, global address and external symbols, all of
-/// which print to a label with various suffixes for relocation types etc.
-void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO) {
- switch (MO.getType()) {
- default: llvm_unreachable("unknown symbol type!");
- case MachineOperand::MO_JumpTableIndex:
- O << *GetJTISymbol(MO.getIndex());
- break;
- case MachineOperand::MO_ConstantPoolIndex:
- O << *GetCPISymbol(MO.getIndex());
- printOffset(MO.getOffset());
- break;
- case MachineOperand::MO_GlobalAddress: {
- const GlobalValue *GV = MO.getGlobal();
-
- MCSymbol *GVSym;
- if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB)
- GVSym = GetSymbolWithGlobalValueBase(GV, "$stub");
- else if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
- MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
- MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
- GVSym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- else
- GVSym = GetGlobalValueSymbol(GV);
-
- // Handle dllimport linkage.
- if (MO.getTargetFlags() == X86II::MO_DLLIMPORT)
- GVSym = OutContext.GetOrCreateSymbol(Twine("__imp_") + GVSym->getName());
-
- if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
- MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE) {
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
-
- MCSymbol *&StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(Sym);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
-
- } else if (MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE){
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- MCSymbol *&StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getHiddenGVStubEntry(Sym);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
- } else if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
- MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$stub");
- MCSymbol *&StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
- }
-
- // If the name begins with a dollar-sign, enclose it in parens. We do this
- // to avoid having it look like an integer immediate to the assembler.
- if (GVSym->getName()[0] != '$')
- O << *GVSym;
- else
- O << '(' << *GVSym << ')';
- printOffset(MO.getOffset());
- break;
- }
- case MachineOperand::MO_ExternalSymbol: {
- const MCSymbol *SymToPrint;
- if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
- SmallString<128> TempNameStr;
- TempNameStr += StringRef(MO.getSymbolName());
- TempNameStr += StringRef("$stub");
-
- MCSymbol *Sym = GetExternalSymbolSymbol(TempNameStr.str());
- MCSymbol *&StubSym =
- MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym == 0) {
- TempNameStr.erase(TempNameStr.end()-5, TempNameStr.end());
- StubSym = OutContext.GetOrCreateSymbol(TempNameStr.str());
- }
- SymToPrint = StubSym;
- } else {
- SymToPrint = GetExternalSymbolSymbol(MO.getSymbolName());
- }
-
- // If the name begins with a dollar-sign, enclose it in parens. We do this
- // to avoid having it look like an integer immediate to the assembler.
- if (SymToPrint->getName()[0] != '$')
- O << *SymToPrint;
- else
- O << '(' << *SymToPrint << '(';
- break;
- }
- }
-
- switch (MO.getTargetFlags()) {
- default:
- llvm_unreachable("Unknown target flag on GV operand");
- case X86II::MO_NO_FLAG: // No flag.
- break;
- case X86II::MO_DARWIN_NONLAZY:
- case X86II::MO_DLLIMPORT:
- case X86II::MO_DARWIN_STUB:
- // These affect the name of the symbol, not any suffix.
- break;
- case X86II::MO_GOT_ABSOLUTE_ADDRESS:
- O << " + [.-";
- PrintPICBaseSymbol();
- O << ']';
- break;
- case X86II::MO_PIC_BASE_OFFSET:
- case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
- O << '-';
- PrintPICBaseSymbol();
- break;
- case X86II::MO_TLSGD: O << "@TLSGD"; break;
- case X86II::MO_GOTTPOFF: O << "@GOTTPOFF"; break;
- case X86II::MO_INDNTPOFF: O << "@INDNTPOFF"; break;
- case X86II::MO_TPOFF: O << "@TPOFF"; break;
- case X86II::MO_NTPOFF: O << "@NTPOFF"; break;
- case X86II::MO_GOTPCREL: O << "@GOTPCREL"; break;
- case X86II::MO_GOT: O << "@GOT"; break;
- case X86II::MO_GOTOFF: O << "@GOTOFF"; break;
- case X86II::MO_PLT: O << "@PLT"; break;
- }
-}
-
-/// print_pcrel_imm - This is used to print an immediate value that ends up
-/// being encoded as a pc-relative value. These print slightly differently, for
-/// example, a $ is not emitted.
-void X86AsmPrinter::print_pcrel_imm(const MachineInstr *MI, unsigned OpNo) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- switch (MO.getType()) {
- default: llvm_unreachable("Unknown pcrel immediate operand");
- case MachineOperand::MO_Immediate:
- O << MO.getImm();
- return;
- case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol(OutContext);
- return;
- case MachineOperand::MO_GlobalAddress:
- case MachineOperand::MO_ExternalSymbol:
- printSymbolOperand(MO);
- return;
- }
-}
-
-
-void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
- const char *Modifier) {
- const MachineOperand &MO = MI->getOperand(OpNo);
- switch (MO.getType()) {
- default: llvm_unreachable("unknown operand type!");
- case MachineOperand::MO_Register: {
- O << '%';
- unsigned Reg = MO.getReg();
- if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) {
- EVT VT = (strcmp(Modifier+6,"64") == 0) ?
- MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 :
- ((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8));
- Reg = getX86SubSuperRegister(Reg, VT);
- }
- O << X86ATTInstPrinter::getRegisterName(Reg);
- return;
- }
-
- case MachineOperand::MO_Immediate:
- O << '$' << MO.getImm();
- return;
-
- case MachineOperand::MO_JumpTableIndex:
- case MachineOperand::MO_ConstantPoolIndex:
- case MachineOperand::MO_GlobalAddress:
- case MachineOperand::MO_ExternalSymbol: {
- O << '$';
- printSymbolOperand(MO);
- break;
- }
- }
-}
-
-void X86AsmPrinter::printSSECC(const MachineInstr *MI, unsigned Op) {
- unsigned char value = MI->getOperand(Op).getImm();
- assert(value <= 7 && "Invalid ssecc argument!");
- switch (value) {
- case 0: O << "eq"; break;
- case 1: O << "lt"; break;
- case 2: O << "le"; break;
- case 3: O << "unord"; break;
- case 4: O << "neq"; break;
- case 5: O << "nlt"; break;
- case 6: O << "nle"; break;
- case 7: O << "ord"; break;
- }
-}
-
-void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
- const char *Modifier) {
- const MachineOperand &BaseReg = MI->getOperand(Op);
- const MachineOperand &IndexReg = MI->getOperand(Op+2);
- const MachineOperand &DispSpec = MI->getOperand(Op+3);
-
- // If we really don't want to print out (rip), don't.
- bool HasBaseReg = BaseReg.getReg() != 0;
- if (HasBaseReg && Modifier && !strcmp(Modifier, "no-rip") &&
- BaseReg.getReg() == X86::RIP)
- HasBaseReg = false;
-
- // HasParenPart - True if we will print out the () part of the mem ref.
- bool HasParenPart = IndexReg.getReg() || HasBaseReg;
-
- if (DispSpec.isImm()) {
- int DispVal = DispSpec.getImm();
- if (DispVal || !HasParenPart)
- O << DispVal;
- } else {
- assert(DispSpec.isGlobal() || DispSpec.isCPI() ||
- DispSpec.isJTI() || DispSpec.isSymbol());
- printSymbolOperand(MI->getOperand(Op+3));
- }
-
- if (HasParenPart) {
- assert(IndexReg.getReg() != X86::ESP &&
- "X86 doesn't allow scaling by ESP");
-
- O << '(';
- if (HasBaseReg)
- printOperand(MI, Op, Modifier);
-
- if (IndexReg.getReg()) {
- O << ',';
- printOperand(MI, Op+2, Modifier);
- unsigned ScaleVal = MI->getOperand(Op+1).getImm();
- if (ScaleVal != 1)
- O << ',' << ScaleVal;
- }
- O << ')';
- }
-}
-
-void X86AsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
- const char *Modifier) {
- assert(isMem(MI, Op) && "Invalid memory reference!");
- const MachineOperand &Segment = MI->getOperand(Op+4);
- if (Segment.getReg()) {
- printOperand(MI, Op+4, Modifier);
- O << ':';
- }
- printLeaMemReference(MI, Op, Modifier);
-}
-
-void X86AsmPrinter::printPICLabel(const MachineInstr *MI, unsigned Op) {
- PrintPICBaseSymbol();
- O << '\n';
- PrintPICBaseSymbol();
- O << ':';
-}
-
-bool X86AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode) {
- unsigned Reg = MO.getReg();
- switch (Mode) {
- default: return true; // Unknown mode.
- case 'b': // Print QImode register
- Reg = getX86SubSuperRegister(Reg, MVT::i8);
- break;
- case 'h': // Print QImode high register
- Reg = getX86SubSuperRegister(Reg, MVT::i8, true);
- break;
- case 'w': // Print HImode register
- Reg = getX86SubSuperRegister(Reg, MVT::i16);
- break;
- case 'k': // Print SImode register
- Reg = getX86SubSuperRegister(Reg, MVT::i32);
- break;
- case 'q': // Print DImode register
- Reg = getX86SubSuperRegister(Reg, MVT::i64);
- break;
- }
-
- O << '%' << X86ATTInstPrinter::getRegisterName(Reg);
- return false;
-}
-
-/// PrintAsmOperand - Print out an operand for an inline asm expression.
-///
-bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode) {
- // Does this asm operand have a single letter operand modifier?
- if (ExtraCode && ExtraCode[0]) {
- if (ExtraCode[1] != 0) return true; // Unknown modifier.
-
- const MachineOperand &MO = MI->getOperand(OpNo);
-
- switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
- case 'a': // This is an address. Currently only 'i' and 'r' are expected.
- if (MO.isImm()) {
- O << MO.getImm();
- return false;
- }
- if (MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isSymbol()) {
- printSymbolOperand(MO);
- return false;
- }
- if (MO.isReg()) {
- O << '(';
- printOperand(MI, OpNo);
- O << ')';
- return false;
- }
- return true;
-
- case 'c': // Don't print "$" before a global var name or constant.
- if (MO.isImm())
- O << MO.getImm();
- else if (MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isSymbol())
- printSymbolOperand(MO);
- else
- printOperand(MI, OpNo);
- return false;
-
- case 'A': // Print '*' before a register (it must be a register)
- if (MO.isReg()) {
- O << '*';
- printOperand(MI, OpNo);
- return false;
- }
- return true;
-
- case 'b': // Print QImode register
- case 'h': // Print QImode high register
- case 'w': // Print HImode register
- case 'k': // Print SImode register
- case 'q': // Print DImode register
- if (MO.isReg())
- return printAsmMRegister(MO, ExtraCode[0]);
- printOperand(MI, OpNo);
- return false;
-
- case 'P': // This is the operand of a call, treat specially.
- print_pcrel_imm(MI, OpNo);
- return false;
-
- case 'n': // Negate the immediate or print a '-' before the operand.
- // Note: this is a temporary solution. It should be handled target
- // independently as part of the 'MC' work.
- if (MO.isImm()) {
- O << -MO.getImm();
- return false;
- }
- O << '-';
- }
- }
-
- printOperand(MI, OpNo);
- return false;
-}
-
-bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
- unsigned OpNo, unsigned AsmVariant,
- const char *ExtraCode) {
- if (ExtraCode && ExtraCode[0]) {
- if (ExtraCode[1] != 0) return true; // Unknown modifier.
-
- switch (ExtraCode[0]) {
- default: return true; // Unknown modifier.
- case 'b': // Print QImode register
- case 'h': // Print QImode high register
- case 'w': // Print HImode register
- case 'k': // Print SImode register
- case 'q': // Print SImode register
- // These only apply to registers, ignore on mem.
- break;
- case 'P': // Don't print @PLT, but do print as memory.
- printMemReference(MI, OpNo, "no-rip");
- return false;
- }
- }
- printMemReference(MI, OpNo);
- return false;
-}
-
-
-void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
- if (Subtarget->isTargetDarwin()) {
- // All darwin targets use mach-o.
- TargetLoweringObjectFileMachO &TLOFMacho =
- static_cast<TargetLoweringObjectFileMachO &>(getObjFileLowering());
-
- MachineModuleInfoMachO &MMIMacho =
- MMI->getObjFileInfo<MachineModuleInfoMachO>();
-
- // Output stubs for dynamically-linked functions.
- MachineModuleInfoMachO::SymbolListTy Stubs;
-
- Stubs = MMIMacho.GetFnStubList();
- if (!Stubs.empty()) {
- const MCSection *TheSection =
- TLOFMacho.getMachOSection("__IMPORT", "__jump_table",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_SELF_MODIFYING_CODE |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- 5, SectionKind::getMetadata());
- OutStreamer.SwitchSection(TheSection);
-
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- // L_foo$stub:
- OutStreamer.EmitLabel(Stubs[i].first);
- // .indirect_symbol _foo
- OutStreamer.EmitSymbolAttribute(Stubs[i].second, MCSA_IndirectSymbol);
- // hlt; hlt; hlt; hlt; hlt hlt = 0xf4 = -12.
- const char HltInsts[] = { -12, -12, -12, -12, -12 };
- OutStreamer.EmitBytes(StringRef(HltInsts, 5), 0/*addrspace*/);
- }
-
- Stubs.clear();
- OutStreamer.AddBlankLine();
- }
-
- // Output stubs for external and common global variables.
- Stubs = MMIMacho.GetGVStubList();
- if (!Stubs.empty()) {
- const MCSection *TheSection =
- TLOFMacho.getMachOSection("__IMPORT", "__pointers",
- MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS,
- SectionKind::getMetadata());
- OutStreamer.SwitchSection(TheSection);
-
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- // L_foo$non_lazy_ptr:
- OutStreamer.EmitLabel(Stubs[i].first);
- // .indirect_symbol _foo
- OutStreamer.EmitSymbolAttribute(Stubs[i].second, MCSA_IndirectSymbol);
- // .long 0
- OutStreamer.EmitIntValue(0, 4/*size*/, 0/*addrspace*/);
- }
- Stubs.clear();
- OutStreamer.AddBlankLine();
- }
-
- Stubs = MMIMacho.GetHiddenGVStubList();
- if (!Stubs.empty()) {
- OutStreamer.SwitchSection(getObjFileLowering().getDataSection());
- EmitAlignment(2);
-
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
- // L_foo$non_lazy_ptr:
- OutStreamer.EmitLabel(Stubs[i].first);
- // .long _foo
- OutStreamer.EmitValue(MCSymbolRefExpr::Create(Stubs[i].second,
- OutContext),
- 4/*size*/, 0/*addrspace*/);
- }
- Stubs.clear();
- OutStreamer.AddBlankLine();
- }
-
- // Funny Darwin hack: This flag tells the linker that no global symbols
- // contain code that falls through to other global symbols (e.g. the obvious
- // implementation of multiple entry points). If this doesn't occur, the
- // linker can safely perform dead code stripping. Since LLVM never
- // generates code that does this, it is always safe to set.
- OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
- }
-
- if (Subtarget->isTargetCOFF()) {
- X86COFFMachineModuleInfo &COFFMMI =
- MMI->getObjFileInfo<X86COFFMachineModuleInfo>();
-
- // Emit type information for external functions
- for (X86COFFMachineModuleInfo::stub_iterator I = COFFMMI.stub_begin(),
- E = COFFMMI.stub_end(); I != E; ++I) {
- O << "\t.def\t " << I->getKeyData()
- << ";\t.scl\t" << COFF::C_EXT
- << ";\t.type\t" << (COFF::DT_FCN << COFF::N_BTSHFT)
- << ";\t.endef\n";
- }
-
- if (Subtarget->isTargetCygMing()) {
- // Necessary for dllexport support
- std::vector<const MCSymbol*> DLLExportedFns, DLLExportedGlobals;
-
- TargetLoweringObjectFileCOFF &TLOFCOFF =
- static_cast<TargetLoweringObjectFileCOFF&>(getObjFileLowering());
-
- for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
- if (I->hasDLLExportLinkage()) {
- MCSymbol *Sym = GetGlobalValueSymbol(I);
- DLLExportedFns.push_back(Sym);
- }
-
- for (Module::const_global_iterator I = M.global_begin(),
- E = M.global_end(); I != E; ++I)
- if (I->hasDLLExportLinkage())
- DLLExportedGlobals.push_back(GetGlobalValueSymbol(I));
-
- // Output linker support code for dllexported globals on windows.
- if (!DLLExportedGlobals.empty() || !DLLExportedFns.empty()) {
- OutStreamer.SwitchSection(TLOFCOFF.getCOFFSection(".section .drectve",
- true,
- SectionKind::getMetadata()));
- for (unsigned i = 0, e = DLLExportedGlobals.size(); i != e; ++i)
- O << "\t.ascii \" -export:" << *DLLExportedGlobals[i] << ",data\"\n";
-
- for (unsigned i = 0, e = DLLExportedFns.size(); i != e; ++i)
- O << "\t.ascii \" -export:" << *DLLExportedFns[i] << "\"\n";
- }
- }
- }
-
- if (Subtarget->isTargetELF()) {
- TargetLoweringObjectFileELF &TLOFELF =
- static_cast<TargetLoweringObjectFileELF &>(getObjFileLowering());
-
- MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
-
- // Output stubs for external and common global variables.
- MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
- if (!Stubs.empty()) {
- OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
- const TargetData *TD = TM.getTargetData();
-
- for (unsigned i = 0, e = Stubs.size(); i != e; ++i)
- O << *Stubs[i].first << ":\n"
- << (TD->getPointerSize() == 8 ?
- MAI->getData64bitsDirective() : MAI->getData32bitsDirective())
- << *Stubs[i].second << '\n';
-
- Stubs.clear();
- }
- }
-}
-
-
-//===----------------------------------------------------------------------===//
-// Target Registry Stuff
-//===----------------------------------------------------------------------===//
-
-static MCInstPrinter *createX86MCInstPrinter(const Target &T,
- unsigned SyntaxVariant,
- const MCAsmInfo &MAI,
- raw_ostream &O) {
- if (SyntaxVariant == 0)
- return new X86ATTInstPrinter(O, MAI);
- if (SyntaxVariant == 1)
- return new X86IntelInstPrinter(O, MAI);
- return 0;
-}
-
-// Force static initialization.
-extern "C" void LLVMInitializeX86AsmPrinter() {
- RegisterAsmPrinter<X86AsmPrinter> X(TheX86_32Target);
- RegisterAsmPrinter<X86AsmPrinter> Y(TheX86_64Target);
-
- TargetRegistry::RegisterMCInstPrinter(TheX86_32Target,createX86MCInstPrinter);
- TargetRegistry::RegisterMCInstPrinter(TheX86_64Target,createX86MCInstPrinter);
-}
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.h b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.h
deleted file mode 100644
index 039214a..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.h
+++ /dev/null
@@ -1,134 +0,0 @@
-//===-- X86AsmPrinter.h - Convert X86 LLVM code to assembly -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// AT&T assembly code printer class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86ASMPRINTER_H
-#define X86ASMPRINTER_H
-
-#include "../X86.h"
-#include "../X86MachineFunctionInfo.h"
-#include "../X86TargetMachine.h"
-#include "llvm/ADT/StringSet.h"
-#include "llvm/CodeGen/AsmPrinter.h"
-#include "llvm/CodeGen/DwarfWriter.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/Support/Compiler.h"
-
-namespace llvm {
-
-class MachineJumpTableInfo;
-class MCContext;
-class MCInst;
-class MCStreamer;
-class MCSymbol;
-
-class VISIBILITY_HIDDEN X86AsmPrinter : public AsmPrinter {
- const X86Subtarget *Subtarget;
- public:
- explicit X86AsmPrinter(formatted_raw_ostream &O, TargetMachine &TM,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *T)
- : AsmPrinter(O, TM, Ctx, Streamer, T) {
- Subtarget = &TM.getSubtarget<X86Subtarget>();
- }
-
- virtual const char *getPassName() const {
- return "X86 AT&T-Style Assembly Printer";
- }
-
- const X86Subtarget &getSubtarget() const { return *Subtarget; }
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- AU.addRequired<MachineModuleInfo>();
- AU.addRequired<DwarfWriter>();
- AsmPrinter::getAnalysisUsage(AU);
- }
-
-
- virtual void EmitEndOfAsmFile(Module &M);
-
- virtual void EmitInstruction(const MachineInstr *MI);
-
- void printSymbolOperand(const MachineOperand &MO);
- virtual MCSymbol *GetGlobalValueSymbol(const GlobalValue *GV) const;
-
- // These methods are used by the tablegen'erated instruction printer.
- void printOperand(const MachineInstr *MI, unsigned OpNo,
- const char *Modifier = 0);
- void print_pcrel_imm(const MachineInstr *MI, unsigned OpNo);
-
- void printopaquemem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
-
- void printi8mem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printi16mem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printi32mem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printi64mem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printi128mem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printf32mem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printf64mem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printf80mem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printf128mem(const MachineInstr *MI, unsigned OpNo) {
- printMemReference(MI, OpNo);
- }
- void printlea32mem(const MachineInstr *MI, unsigned OpNo) {
- printLeaMemReference(MI, OpNo);
- }
- void printlea64mem(const MachineInstr *MI, unsigned OpNo) {
- printLeaMemReference(MI, OpNo);
- }
- void printlea64_32mem(const MachineInstr *MI, unsigned OpNo) {
- printLeaMemReference(MI, OpNo, "subreg64");
- }
-
- bool printAsmMRegister(const MachineOperand &MO, char Mode);
- bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode);
- bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode);
-
- void printMachineInstruction(const MachineInstr *MI);
- void printSSECC(const MachineInstr *MI, unsigned Op);
- void printMemReference(const MachineInstr *MI, unsigned Op,
- const char *Modifier=NULL);
- void printLeaMemReference(const MachineInstr *MI, unsigned Op,
- const char *Modifier=NULL);
-
- void printPICLabel(const MachineInstr *MI, unsigned Op);
-
- void PrintPICBaseSymbol() const;
-
- bool runOnMachineFunction(MachineFunction &F);
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
deleted file mode 100644
index 610beb5..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
+++ /dev/null
@@ -1,134 +0,0 @@
-//===-- X86IntelInstPrinter.cpp - AT&T assembly instruction printing ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file includes code for rendering MCInst instances as AT&T-style
-// assembly.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "asm-printer"
-#include "X86IntelInstPrinter.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FormattedStream.h"
-#include "X86GenInstrNames.inc"
-using namespace llvm;
-
-// Include the auto-generated portion of the assembly writer.
-#define MachineInstr MCInst
-#define GET_INSTRUCTION_NAME
-#include "X86GenAsmWriter1.inc"
-#undef MachineInstr
-
-void X86IntelInstPrinter::printInst(const MCInst *MI) { printInstruction(MI); }
-StringRef X86IntelInstPrinter::getOpcodeName(unsigned Opcode) const {
- return getInstructionName(Opcode);
-}
-
-void X86IntelInstPrinter::printSSECC(const MCInst *MI, unsigned Op) {
- switch (MI->getOperand(Op).getImm()) {
- default: llvm_unreachable("Invalid ssecc argument!");
- case 0: O << "eq"; break;
- case 1: O << "lt"; break;
- case 2: O << "le"; break;
- case 3: O << "unord"; break;
- case 4: O << "neq"; break;
- case 5: O << "nlt"; break;
- case 6: O << "nle"; break;
- case 7: O << "ord"; break;
- }
-}
-
-/// print_pcrel_imm - This is used to print an immediate value that ends up
-/// being encoded as a pc-relative value.
-void X86IntelInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo) {
- const MCOperand &Op = MI->getOperand(OpNo);
- if (Op.isImm())
- O << Op.getImm();
- else {
- assert(Op.isExpr() && "unknown pcrel immediate operand");
- O << *Op.getExpr();
- }
-}
-
-static void PrintRegName(raw_ostream &O, StringRef RegName) {
- for (unsigned i = 0, e = RegName.size(); i != e; ++i)
- O << (char)toupper(RegName[i]);
-}
-
-void X86IntelInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
- const char *Modifier) {
- assert(Modifier == 0 && "Modifiers should not be used");
-
- const MCOperand &Op = MI->getOperand(OpNo);
- if (Op.isReg()) {
- PrintRegName(O, getRegisterName(Op.getReg()));
- } else if (Op.isImm()) {
- O << Op.getImm();
- } else {
- assert(Op.isExpr() && "unknown operand kind in printOperand");
- O << *Op.getExpr();
- }
-}
-
-void X86IntelInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op) {
- const MCOperand &BaseReg = MI->getOperand(Op);
- unsigned ScaleVal = MI->getOperand(Op+1).getImm();
- const MCOperand &IndexReg = MI->getOperand(Op+2);
- const MCOperand &DispSpec = MI->getOperand(Op+3);
-
- O << '[';
-
- bool NeedPlus = false;
- if (BaseReg.getReg()) {
- printOperand(MI, Op);
- NeedPlus = true;
- }
-
- if (IndexReg.getReg()) {
- if (NeedPlus) O << " + ";
- if (ScaleVal != 1)
- O << ScaleVal << '*';
- printOperand(MI, Op+2);
- NeedPlus = true;
- }
-
-
- if (!DispSpec.isImm()) {
- if (NeedPlus) O << " + ";
- assert(DispSpec.isExpr() && "non-immediate displacement for LEA?");
- O << *DispSpec.getExpr();
- } else {
- int64_t DispVal = DispSpec.getImm();
- if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg())) {
- if (NeedPlus) {
- if (DispVal > 0)
- O << " + ";
- else {
- O << " - ";
- DispVal = -DispVal;
- }
- }
- O << DispVal;
- }
- }
-
- O << ']';
-}
-
-void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op) {
- // If this has a segment register, print it.
- if (MI->getOperand(Op+4).getReg()) {
- printOperand(MI, Op+4);
- O << ':';
- }
- printLeaMemReference(MI, Op);
-}
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h
deleted file mode 100644
index 545bf84..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h
+++ /dev/null
@@ -1,101 +0,0 @@
-//===-- X86IntelInstPrinter.h - Convert X86 MCInst to assembly syntax -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This class prints an X86 MCInst to intel style .s file syntax.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86_INTEL_INST_PRINTER_H
-#define X86_INTEL_INST_PRINTER_H
-
-#include "llvm/MC/MCInstPrinter.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
- class MCOperand;
-
-class X86IntelInstPrinter : public MCInstPrinter {
-public:
- X86IntelInstPrinter(raw_ostream &O, const MCAsmInfo &MAI)
- : MCInstPrinter(O, MAI) {}
-
- virtual void printInst(const MCInst *MI);
- virtual StringRef getOpcodeName(unsigned Opcode) const;
-
- // Autogenerated by tblgen.
- void printInstruction(const MCInst *MI);
- static const char *getRegisterName(unsigned RegNo);
- static const char *getInstructionName(unsigned Opcode);
-
-
- void printOperand(const MCInst *MI, unsigned OpNo,
- const char *Modifier = 0);
- void printMemReference(const MCInst *MI, unsigned Op);
- void printLeaMemReference(const MCInst *MI, unsigned Op);
- void printSSECC(const MCInst *MI, unsigned Op);
- void print_pcrel_imm(const MCInst *MI, unsigned OpNo);
-
- void printopaquemem(const MCInst *MI, unsigned OpNo) {
- O << "OPAQUE PTR ";
- printMemReference(MI, OpNo);
- }
-
- void printi8mem(const MCInst *MI, unsigned OpNo) {
- O << "BYTE PTR ";
- printMemReference(MI, OpNo);
- }
- void printi16mem(const MCInst *MI, unsigned OpNo) {
- O << "WORD PTR ";
- printMemReference(MI, OpNo);
- }
- void printi32mem(const MCInst *MI, unsigned OpNo) {
- O << "DWORD PTR ";
- printMemReference(MI, OpNo);
- }
- void printi64mem(const MCInst *MI, unsigned OpNo) {
- O << "QWORD PTR ";
- printMemReference(MI, OpNo);
- }
- void printi128mem(const MCInst *MI, unsigned OpNo) {
- O << "XMMWORD PTR ";
- printMemReference(MI, OpNo);
- }
- void printf32mem(const MCInst *MI, unsigned OpNo) {
- O << "DWORD PTR ";
- printMemReference(MI, OpNo);
- }
- void printf64mem(const MCInst *MI, unsigned OpNo) {
- O << "QWORD PTR ";
- printMemReference(MI, OpNo);
- }
- void printf80mem(const MCInst *MI, unsigned OpNo) {
- O << "XWORD PTR ";
- printMemReference(MI, OpNo);
- }
- void printf128mem(const MCInst *MI, unsigned OpNo) {
- O << "XMMWORD PTR ";
- printMemReference(MI, OpNo);
- }
- void printlea32mem(const MCInst *MI, unsigned OpNo) {
- O << "DWORD PTR ";
- printLeaMemReference(MI, OpNo);
- }
- void printlea64mem(const MCInst *MI, unsigned OpNo) {
- O << "QWORD PTR ";
- printLeaMemReference(MI, OpNo);
- }
- void printlea64_32mem(const MCInst *MI, unsigned OpNo) {
- O << "QWORD PTR ";
- printLeaMemReference(MI, OpNo);
- }
-};
-
-}
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
deleted file mode 100644
index fa8d13d..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
+++ /dev/null
@@ -1,427 +0,0 @@
-//===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains code to lower X86 MachineInstrs to their corresponding
-// MCInst records.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86MCInstLower.h"
-#include "X86AsmPrinter.h"
-#include "X86COFFMachineModuleInfo.h"
-#include "X86MCAsmInfo.h"
-#include "X86MCTargetExpr.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/CodeGen/MachineModuleInfoImpls.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/Target/Mangler.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Type.h"
-using namespace llvm;
-
-
-const X86Subtarget &X86MCInstLower::getSubtarget() const {
- return AsmPrinter.getSubtarget();
-}
-
-MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
- assert(getSubtarget().isTargetDarwin() &&"Can only get MachO info on darwin");
- return AsmPrinter.MMI->getObjFileInfo<MachineModuleInfoMachO>();
-}
-
-
-MCSymbol *X86MCInstLower::GetPICBaseSymbol() const {
- const TargetLowering *TLI = AsmPrinter.TM.getTargetLowering();
- return static_cast<const X86TargetLowering*>(TLI)->
- getPICBaseSymbol(AsmPrinter.MF, Ctx);
-}
-
-/// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
-/// operand to an MCSymbol.
-MCSymbol *X86MCInstLower::
-GetSymbolFromOperand(const MachineOperand &MO) const {
- assert((MO.isGlobal() || MO.isSymbol()) && "Isn't a symbol reference");
-
- SmallString<128> Name;
-
- if (MO.isGlobal()) {
- bool isImplicitlyPrivate = false;
- if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB ||
- MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
- MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
- MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
- isImplicitlyPrivate = true;
-
- const GlobalValue *GV = MO.getGlobal();
- Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
-
- if (getSubtarget().isTargetCygMing()) {
- X86COFFMachineModuleInfo &COFFMMI =
- AsmPrinter.MMI->getObjFileInfo<X86COFFMachineModuleInfo>();
- COFFMMI.DecorateCygMingName(Name, GV, *AsmPrinter.TM.getTargetData());
- }
- } else {
- assert(MO.isSymbol());
- Name += AsmPrinter.MAI->getGlobalPrefix();
- Name += MO.getSymbolName();
- }
-
- // If the target flags on the operand changes the name of the symbol, do that
- // before we return the symbol.
- switch (MO.getTargetFlags()) {
- default: break;
- case X86II::MO_DLLIMPORT: {
- // Handle dllimport linkage.
- const char *Prefix = "__imp_";
- Name.insert(Name.begin(), Prefix, Prefix+strlen(Prefix));
- break;
- }
- case X86II::MO_DARWIN_NONLAZY:
- case X86II::MO_DARWIN_NONLAZY_PIC_BASE: {
- Name += "$non_lazy_ptr";
- MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
-
- MCSymbol *&StubSym = getMachOMMI().getGVStubEntry(Sym);
- if (StubSym == 0) {
- assert(MO.isGlobal() && "Extern symbol not handled yet");
- StubSym = AsmPrinter.GetGlobalValueSymbol(MO.getGlobal());
- }
- return Sym;
- }
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: {
- Name += "$non_lazy_ptr";
- MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
- MCSymbol *&StubSym = getMachOMMI().getHiddenGVStubEntry(Sym);
- if (StubSym == 0) {
- assert(MO.isGlobal() && "Extern symbol not handled yet");
- StubSym = AsmPrinter.GetGlobalValueSymbol(MO.getGlobal());
- }
- return Sym;
- }
- case X86II::MO_DARWIN_STUB: {
- Name += "$stub";
- MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
- MCSymbol *&StubSym = getMachOMMI().getFnStubEntry(Sym);
- if (StubSym)
- return Sym;
-
- if (MO.isGlobal()) {
- StubSym = AsmPrinter.GetGlobalValueSymbol(MO.getGlobal());
- } else {
- Name.erase(Name.end()-5, Name.end());
- StubSym = Ctx.GetOrCreateSymbol(Name.str());
- }
- return Sym;
- }
- }
-
- return Ctx.GetOrCreateSymbol(Name.str());
-}
-
-MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
- MCSymbol *Sym) const {
- // FIXME: We would like an efficient form for this, so we don't have to do a
- // lot of extra uniquing.
- const MCExpr *Expr = 0;
- X86MCTargetExpr::VariantKind RefKind = X86MCTargetExpr::Invalid;
-
- switch (MO.getTargetFlags()) {
- default: llvm_unreachable("Unknown target flag on GV operand");
- case X86II::MO_NO_FLAG: // No flag.
- // These affect the name of the symbol, not any suffix.
- case X86II::MO_DARWIN_NONLAZY:
- case X86II::MO_DLLIMPORT:
- case X86II::MO_DARWIN_STUB:
- break;
-
- case X86II::MO_TLSGD: RefKind = X86MCTargetExpr::TLSGD; break;
- case X86II::MO_GOTTPOFF: RefKind = X86MCTargetExpr::GOTTPOFF; break;
- case X86II::MO_INDNTPOFF: RefKind = X86MCTargetExpr::INDNTPOFF; break;
- case X86II::MO_TPOFF: RefKind = X86MCTargetExpr::TPOFF; break;
- case X86II::MO_NTPOFF: RefKind = X86MCTargetExpr::NTPOFF; break;
- case X86II::MO_GOTPCREL: RefKind = X86MCTargetExpr::GOTPCREL; break;
- case X86II::MO_GOT: RefKind = X86MCTargetExpr::GOT; break;
- case X86II::MO_GOTOFF: RefKind = X86MCTargetExpr::GOTOFF; break;
- case X86II::MO_PLT: RefKind = X86MCTargetExpr::PLT; break;
- case X86II::MO_PIC_BASE_OFFSET:
- case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
- case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
- Expr = MCSymbolRefExpr::Create(Sym, Ctx);
- // Subtract the pic base.
- Expr = MCBinaryExpr::CreateSub(Expr,
- MCSymbolRefExpr::Create(GetPICBaseSymbol(), Ctx),
- Ctx);
- break;
- }
-
- if (Expr == 0) {
- if (RefKind == X86MCTargetExpr::Invalid)
- Expr = MCSymbolRefExpr::Create(Sym, Ctx);
- else
- Expr = X86MCTargetExpr::Create(Sym, RefKind, Ctx);
- }
-
- if (!MO.isJTI() && MO.getOffset())
- Expr = MCBinaryExpr::CreateAdd(Expr,
- MCConstantExpr::Create(MO.getOffset(), Ctx),
- Ctx);
- return MCOperand::CreateExpr(Expr);
-}
-
-
-
-static void lower_subreg32(MCInst *MI, unsigned OpNo) {
- // Convert registers in the addr mode according to subreg32.
- unsigned Reg = MI->getOperand(OpNo).getReg();
- if (Reg != 0)
- MI->getOperand(OpNo).setReg(getX86SubSuperRegister(Reg, MVT::i32));
-}
-
-static void lower_lea64_32mem(MCInst *MI, unsigned OpNo) {
- // Convert registers in the addr mode according to subreg64.
- for (unsigned i = 0; i != 4; ++i) {
- if (!MI->getOperand(OpNo+i).isReg()) continue;
-
- unsigned Reg = MI->getOperand(OpNo+i).getReg();
- if (Reg == 0) continue;
-
- MI->getOperand(OpNo+i).setReg(getX86SubSuperRegister(Reg, MVT::i64));
- }
-}
-
-/// LowerSubReg32_Op0 - Things like MOVZX16rr8 -> MOVZX32rr8.
-static void LowerSubReg32_Op0(MCInst &OutMI, unsigned NewOpc) {
- OutMI.setOpcode(NewOpc);
- lower_subreg32(&OutMI, 0);
-}
-/// LowerUnaryToTwoAddr - R = setb -> R = sbb R, R
-static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc) {
- OutMI.setOpcode(NewOpc);
- OutMI.addOperand(OutMI.getOperand(0));
- OutMI.addOperand(OutMI.getOperand(0));
-}
-
-
-void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
- OutMI.setOpcode(MI->getOpcode());
-
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
-
- MCOperand MCOp;
- switch (MO.getType()) {
- default:
- MI->dump();
- llvm_unreachable("unknown operand type");
- case MachineOperand::MO_Register:
- // Ignore all implicit register operands.
- if (MO.isImplicit()) continue;
- MCOp = MCOperand::CreateReg(MO.getReg());
- break;
- case MachineOperand::MO_Immediate:
- MCOp = MCOperand::CreateImm(MO.getImm());
- break;
- case MachineOperand::MO_MachineBasicBlock:
- MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
- MO.getMBB()->getSymbol(Ctx), Ctx));
- break;
- case MachineOperand::MO_GlobalAddress:
- MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
- break;
- case MachineOperand::MO_ExternalSymbol:
- MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
- break;
- case MachineOperand::MO_JumpTableIndex:
- MCOp = LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
- break;
- case MachineOperand::MO_ConstantPoolIndex:
- MCOp = LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
- break;
- case MachineOperand::MO_BlockAddress:
- MCOp = LowerSymbolOperand(MO,
- AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
- break;
- }
-
- OutMI.addOperand(MCOp);
- }
-
- // Handle a few special cases to eliminate operand modifiers.
- switch (OutMI.getOpcode()) {
- case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
- lower_lea64_32mem(&OutMI, 1);
- break;
- case X86::MOVZX16rr8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break;
- case X86::MOVZX16rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
- case X86::MOVSX16rr8: LowerSubReg32_Op0(OutMI, X86::MOVSX32rr8); break;
- case X86::MOVSX16rm8: LowerSubReg32_Op0(OutMI, X86::MOVSX32rm8); break;
- case X86::MOVZX64rr32: LowerSubReg32_Op0(OutMI, X86::MOV32rr); break;
- case X86::MOVZX64rm32: LowerSubReg32_Op0(OutMI, X86::MOV32rm); break;
- case X86::MOV64ri64i32: LowerSubReg32_Op0(OutMI, X86::MOV32ri); break;
- case X86::MOVZX64rr8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break;
- case X86::MOVZX64rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
- case X86::MOVZX64rr16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr16); break;
- case X86::MOVZX64rm16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm16); break;
- case X86::SETB_C8r: LowerUnaryToTwoAddr(OutMI, X86::SBB8rr); break;
- case X86::SETB_C16r: LowerUnaryToTwoAddr(OutMI, X86::SBB16rr); break;
- case X86::SETB_C32r: LowerUnaryToTwoAddr(OutMI, X86::SBB32rr); break;
- case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break;
- case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break;
- case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
- case X86::MMX_V_SET0: LowerUnaryToTwoAddr(OutMI, X86::MMX_PXORrr); break;
- case X86::MMX_V_SETALLONES:
- LowerUnaryToTwoAddr(OutMI, X86::MMX_PCMPEQDrr); break;
- case X86::FsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
- case X86::FsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
- case X86::V_SET0: LowerUnaryToTwoAddr(OutMI, X86::XORPSrr); break;
- case X86::V_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::PCMPEQDrr); break;
-
- case X86::MOV16r0:
- LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV16r0 -> MOV32r0
- LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
- break;
- case X86::MOV64r0:
- LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV64r0 -> MOV32r0
- LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
- break;
- }
-}
-
-
-
-void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
- X86MCInstLower MCInstLowering(OutContext, Mang, *this);
- switch (MI->getOpcode()) {
- case TargetOpcode::DBG_VALUE: {
- // FIXME: if this is implemented for another target before it goes
- // away completely, the common part should be moved into AsmPrinter.
- if (!VerboseAsm)
- return;
- O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
- unsigned NOps = MI->getNumOperands();
- // cast away const; DIetc do not take const operands for some reason.
- DIVariable V((MDNode*)(MI->getOperand(NOps-1).getMetadata()));
- O << V.getName();
- O << " <- ";
- if (NOps==3) {
- // Register or immediate value. Register 0 means undef.
- assert(MI->getOperand(0).getType()==MachineOperand::MO_Register ||
- MI->getOperand(0).getType()==MachineOperand::MO_Immediate ||
- MI->getOperand(0).getType()==MachineOperand::MO_FPImmediate);
- if (MI->getOperand(0).getType()==MachineOperand::MO_Register &&
- MI->getOperand(0).getReg()==0) {
- // Suppress offset in this case, it is not meaningful.
- O << "undef";
- OutStreamer.AddBlankLine();
- return;
- } else if (MI->getOperand(0).getType()==MachineOperand::MO_FPImmediate) {
- // This is more naturally done in printOperand, but since the only use
- // of such an operand is in this comment and that is temporary (and it's
- // ugly), we prefer to keep this localized.
- // The include of Type.h may be removable when this code is.
- if (MI->getOperand(0).getFPImm()->getType()->isFloatTy() ||
- MI->getOperand(0).getFPImm()->getType()->isDoubleTy())
- MI->getOperand(0).print(O, &TM);
- else {
- // There is no good way to print long double. Convert a copy to
- // double. Ah well, it's only a comment.
- bool ignored;
- APFloat APF = APFloat(MI->getOperand(0).getFPImm()->getValueAPF());
- APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven,
- &ignored);
- O << "(long double) " << APF.convertToDouble();
- }
- } else
- printOperand(MI, 0);
- } else {
- // Frame address. Currently handles register +- offset only.
- assert(MI->getOperand(0).getType()==MachineOperand::MO_Register);
- assert(MI->getOperand(3).getType()==MachineOperand::MO_Immediate);
- O << '['; printOperand(MI, 0); O << '+'; printOperand(MI, 3); O << ']';
- }
- O << "+";
- printOperand(MI, NOps-2);
- OutStreamer.AddBlankLine();
- return;
- }
- case X86::MOVPC32r: {
- MCInst TmpInst;
- // This is a pseudo op for a two instruction sequence with a label, which
- // looks like:
- // call "L1$pb"
- // "L1$pb":
- // popl %esi
-
- // Emit the call.
- MCSymbol *PICBase = MCInstLowering.GetPICBaseSymbol();
- TmpInst.setOpcode(X86::CALLpcrel32);
- // FIXME: We would like an efficient form for this, so we don't have to do a
- // lot of extra uniquing.
- TmpInst.addOperand(MCOperand::CreateExpr(MCSymbolRefExpr::Create(PICBase,
- OutContext)));
- OutStreamer.EmitInstruction(TmpInst);
-
- // Emit the label.
- OutStreamer.EmitLabel(PICBase);
-
- // popl $reg
- TmpInst.setOpcode(X86::POP32r);
- TmpInst.getOperand(0) = MCOperand::CreateReg(MI->getOperand(0).getReg());
- OutStreamer.EmitInstruction(TmpInst);
- return;
- }
-
- case X86::ADD32ri: {
- // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
- if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
- break;
-
- // Okay, we have something like:
- // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
-
- // For this, we want to print something like:
- // MYGLOBAL + (. - PICBASE)
- // However, we can't generate a ".", so just emit a new label here and refer
- // to it. We know that this operand flag occurs at most once per function.
- const char *Prefix = MAI->getPrivateGlobalPrefix();
- MCSymbol *DotSym = OutContext.GetOrCreateSymbol(Twine(Prefix)+"picbaseref"+
- Twine(getFunctionNumber()));
- OutStreamer.EmitLabel(DotSym);
-
- // Now that we have emitted the label, lower the complex operand expression.
- MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
-
- const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext);
- const MCExpr *PICBase =
- MCSymbolRefExpr::Create(MCInstLowering.GetPICBaseSymbol(), OutContext);
- DotExpr = MCBinaryExpr::CreateSub(DotExpr, PICBase, OutContext);
-
- DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym,OutContext),
- DotExpr, OutContext);
-
- MCInst TmpInst;
- TmpInst.setOpcode(X86::ADD32ri);
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
- TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg()));
- TmpInst.addOperand(MCOperand::CreateExpr(DotExpr));
- OutStreamer.EmitInstruction(TmpInst);
- return;
- }
- }
-
- MCInst TmpInst;
- MCInstLowering.Lower(MI, TmpInst);
-
- OutStreamer.EmitInstruction(TmpInst);
-}
-
diff --git a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.h b/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.h
deleted file mode 100644
index ebd23f6..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//===-- X86MCInstLower.h - Lower MachineInstr to MCInst -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86_MCINSTLOWER_H
-#define X86_MCINSTLOWER_H
-
-#include "llvm/Support/Compiler.h"
-
-namespace llvm {
- class MCContext;
- class MCInst;
- class MCOperand;
- class MCSymbol;
- class MachineInstr;
- class MachineModuleInfoMachO;
- class MachineOperand;
- class Mangler;
- class X86AsmPrinter;
- class X86Subtarget;
-
-/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
-class VISIBILITY_HIDDEN X86MCInstLower {
- MCContext &Ctx;
- Mangler *Mang;
- X86AsmPrinter &AsmPrinter;
-
- const X86Subtarget &getSubtarget() const;
-public:
- X86MCInstLower(MCContext &ctx, Mangler *mang, X86AsmPrinter &asmprinter)
- : Ctx(ctx), Mang(mang), AsmPrinter(asmprinter) {}
-
- void Lower(const MachineInstr *MI, MCInst &OutMI) const;
-
- MCSymbol *GetPICBaseSymbol() const;
-
- MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
- MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
-
-private:
- MachineModuleInfoMachO &getMachOMMI() const;
-};
-
-}
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/X86/CMakeLists.txt
index eed3b45..e9399f5 100644
--- a/libclamav/c++/llvm/lib/Target/X86/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/Target/X86/CMakeLists.txt
@@ -13,33 +13,40 @@ tablegen(X86GenDAGISel.inc -gen-dag-isel)
tablegen(X86GenFastISel.inc -gen-fast-isel)
tablegen(X86GenCallingConv.inc -gen-callingconv)
tablegen(X86GenSubtarget.inc -gen-subtarget)
+tablegen(X86GenEDInfo.inc -gen-enhanced-disassembly-info)
set(sources
+ SSEDomainFix.cpp
X86AsmBackend.cpp
- X86CodeEmitter.cpp
+ X86AsmPrinter.cpp
X86COFFMachineModuleInfo.cpp
+ X86CodeEmitter.cpp
X86ELFWriterInfo.cpp
+ X86FastISel.cpp
X86FloatingPoint.cpp
- X86FloatingPointRegKill.cpp
X86ISelDAGToDAG.cpp
X86ISelLowering.cpp
X86InstrInfo.cpp
X86JITInfo.cpp
X86MCAsmInfo.cpp
X86MCCodeEmitter.cpp
- X86MCTargetExpr.cpp
+ X86MCInstLower.cpp
X86RegisterInfo.cpp
+ X86SelectionDAGInfo.cpp
X86Subtarget.cpp
X86TargetMachine.cpp
X86TargetObjectFile.cpp
- X86FastISel.cpp
)
if( CMAKE_CL_64 )
enable_language(ASM_MASM)
- set(sources ${sources} X86CompilationCallback_Win64.asm)
+ ADD_CUSTOM_COMMAND(
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj
+ COMMAND ${CMAKE_ASM_MASM_COMPILER} /Fo ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj /c ${CMAKE_CURRENT_SOURCE_DIR}/X86CompilationCallback_Win64.asm
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/X86CompilationCallback_Win64.asm
+ )
+ set(sources ${sources} ${CMAKE_CURRENT_BINARY_DIR}/X86CompilationCallback_Win64.obj)
endif()
add_llvm_target(X86CodeGen ${sources})
-target_link_libraries (LLVMX86CodeGen LLVMSelectionDAG)
diff --git a/libclamav/c++/llvm/lib/Target/X86/Disassembler/CMakeLists.txt b/libclamav/c++/llvm/lib/Target/X86/Disassembler/CMakeLists.txt
deleted file mode 100644
index 2a83a9c..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/Disassembler/CMakeLists.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. )
-
-add_llvm_library(LLVMX86Disassembler
- X86Disassembler.cpp
- X86DisassemblerDecoder.c
- )
-add_dependencies(LLVMX86Disassembler X86CodeGenTable_gen)
diff --git a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
deleted file mode 100644
index a316860..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ /dev/null
@@ -1,476 +0,0 @@
-//===- X86Disassembler.cpp - Disassembler for x86 and x86_64 ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is part of the X86 Disassembler.
-// It contains code to translate the data produced by the decoder into
-// MCInsts.
-// Documentation for the disassembler can be found in X86Disassembler.h.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86Disassembler.h"
-#include "X86DisassemblerDecoder.h"
-
-#include "llvm/MC/MCDisassembler.h"
-#include "llvm/MC/MCDisassembler.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/Support/MemoryObject.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-
-#include "X86GenRegisterNames.inc"
-
-using namespace llvm;
-using namespace llvm::X86Disassembler;
-
-namespace llvm {
-
-// Fill-ins to make the compiler happy. These constants are never actually
-// assigned; they are just filler to make an automatically-generated switch
-// statement work.
-namespace X86 {
- enum {
- BX_SI = 500,
- BX_DI = 501,
- BP_SI = 502,
- BP_DI = 503,
- sib = 504,
- sib64 = 505
- };
-}
-
-extern Target TheX86_32Target, TheX86_64Target;
-
-}
-
-static void translateInstruction(MCInst &target,
- InternalInstruction &source);
-
-X86GenericDisassembler::X86GenericDisassembler(DisassemblerMode mode) :
- MCDisassembler(),
- fMode(mode) {
-}
-
-X86GenericDisassembler::~X86GenericDisassembler() {
-}
-
-/// regionReader - a callback function that wraps the readByte method from
-/// MemoryObject.
-///
-/// @param arg - The generic callback parameter. In this case, this should
-/// be a pointer to a MemoryObject.
-/// @param byte - A pointer to the byte to be read.
-/// @param address - The address to be read.
-static int regionReader(void* arg, uint8_t* byte, uint64_t address) {
- MemoryObject* region = static_cast<MemoryObject*>(arg);
- return region->readByte(address, byte);
-}
-
-/// logger - a callback function that wraps the operator<< method from
-/// raw_ostream.
-///
-/// @param arg - The generic callback parameter. This should be a pointe
-/// to a raw_ostream.
-/// @param log - A string to be logged. logger() adds a newline.
-static void logger(void* arg, const char* log) {
- if (!arg)
- return;
-
- raw_ostream &vStream = *(static_cast<raw_ostream*>(arg));
- vStream << log << "\n";
-}
-
-//
-// Public interface for the disassembler
-//
-
-bool X86GenericDisassembler::getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject ®ion,
- uint64_t address,
- raw_ostream &vStream) const {
- InternalInstruction internalInstr;
-
- int ret = decodeInstruction(&internalInstr,
- regionReader,
- (void*)®ion,
- logger,
- (void*)&vStream,
- address,
- fMode);
-
- if(ret) {
- size = internalInstr.readerCursor - address;
- return false;
- }
- else {
- size = internalInstr.length;
- translateInstruction(instr, internalInstr);
- return true;
- }
-}
-
-//
-// Private code that translates from struct InternalInstructions to MCInsts.
-//
-
-/// translateRegister - Translates an internal register to the appropriate LLVM
-/// register, and appends it as an operand to an MCInst.
-///
-/// @param mcInst - The MCInst to append to.
-/// @param reg - The Reg to append.
-static void translateRegister(MCInst &mcInst, Reg reg) {
-#define ENTRY(x) X86::x,
- uint8_t llvmRegnums[] = {
- ALL_REGS
- 0
- };
-#undef ENTRY
-
- uint8_t llvmRegnum = llvmRegnums[reg];
- mcInst.addOperand(MCOperand::CreateReg(llvmRegnum));
-}
-
-/// translateImmediate - Appends an immediate operand to an MCInst.
-///
-/// @param mcInst - The MCInst to append to.
-/// @param immediate - The immediate value to append.
-static void translateImmediate(MCInst &mcInst, uint64_t immediate) {
- mcInst.addOperand(MCOperand::CreateImm(immediate));
-}
-
-/// translateRMRegister - Translates a register stored in the R/M field of the
-/// ModR/M byte to its LLVM equivalent and appends it to an MCInst.
-/// @param mcInst - The MCInst to append to.
-/// @param insn - The internal instruction to extract the R/M field
-/// from.
-static void translateRMRegister(MCInst &mcInst,
- InternalInstruction &insn) {
- assert(insn.eaBase != EA_BASE_sib && insn.eaBase != EA_BASE_sib64 &&
- "A R/M register operand may not have a SIB byte");
-
- switch (insn.eaBase) {
- case EA_BASE_NONE:
- llvm_unreachable("EA_BASE_NONE for ModR/M base");
- break;
-#define ENTRY(x) case EA_BASE_##x:
- ALL_EA_BASES
-#undef ENTRY
- llvm_unreachable("A R/M register operand may not have a base; "
- "the operand must be a register.");
- break;
-#define ENTRY(x) \
- case EA_REG_##x: \
- mcInst.addOperand(MCOperand::CreateReg(X86::x)); break;
- ALL_REGS
-#undef ENTRY
- default:
- llvm_unreachable("Unexpected EA base register");
- }
-}
-
-/// translateRMMemory - Translates a memory operand stored in the Mod and R/M
-/// fields of an internal instruction (and possibly its SIB byte) to a memory
-/// operand in LLVM's format, and appends it to an MCInst.
-///
-/// @param mcInst - The MCInst to append to.
-/// @param insn - The instruction to extract Mod, R/M, and SIB fields
-/// from.
-/// @param sr - Whether or not to emit the segment register. The
-/// LEA instruction does not expect a segment-register
-/// operand.
-static void translateRMMemory(MCInst &mcInst,
- InternalInstruction &insn,
- bool sr) {
- // Addresses in an MCInst are represented as five operands:
- // 1. basereg (register) The R/M base, or (if there is a SIB) the
- // SIB base
- // 2. scaleamount (immediate) 1, or (if there is a SIB) the specified
- // scale amount
- // 3. indexreg (register) x86_registerNONE, or (if there is a SIB)
- // the index (which is multiplied by the
- // scale amount)
- // 4. displacement (immediate) 0, or the displacement if there is one
- // 5. segmentreg (register) x86_registerNONE for now, but could be set
- // if we have segment overrides
-
- MCOperand baseReg;
- MCOperand scaleAmount;
- MCOperand indexReg;
- MCOperand displacement;
- MCOperand segmentReg;
-
- if (insn.eaBase == EA_BASE_sib || insn.eaBase == EA_BASE_sib64) {
- if (insn.sibBase != SIB_BASE_NONE) {
- switch (insn.sibBase) {
- default:
- llvm_unreachable("Unexpected sibBase");
-#define ENTRY(x) \
- case SIB_BASE_##x: \
- baseReg = MCOperand::CreateReg(X86::x); break;
- ALL_SIB_BASES
-#undef ENTRY
- }
- } else {
- baseReg = MCOperand::CreateReg(0);
- }
-
- if (insn.sibIndex != SIB_INDEX_NONE) {
- switch (insn.sibIndex) {
- default:
- llvm_unreachable("Unexpected sibIndex");
-#define ENTRY(x) \
- case SIB_INDEX_##x: \
- indexReg = MCOperand::CreateReg(X86::x); break;
- EA_BASES_32BIT
- EA_BASES_64BIT
-#undef ENTRY
- }
- } else {
- indexReg = MCOperand::CreateReg(0);
- }
-
- scaleAmount = MCOperand::CreateImm(insn.sibScale);
- } else {
- switch (insn.eaBase) {
- case EA_BASE_NONE:
- assert(insn.eaDisplacement != EA_DISP_NONE &&
- "EA_BASE_NONE and EA_DISP_NONE for ModR/M base");
-
- if (insn.mode == MODE_64BIT)
- baseReg = MCOperand::CreateReg(X86::RIP); // Section 2.2.1.6
- else
- baseReg = MCOperand::CreateReg(0);
-
- indexReg = MCOperand::CreateReg(0);
- break;
- case EA_BASE_BX_SI:
- baseReg = MCOperand::CreateReg(X86::BX);
- indexReg = MCOperand::CreateReg(X86::SI);
- break;
- case EA_BASE_BX_DI:
- baseReg = MCOperand::CreateReg(X86::BX);
- indexReg = MCOperand::CreateReg(X86::DI);
- break;
- case EA_BASE_BP_SI:
- baseReg = MCOperand::CreateReg(X86::BP);
- indexReg = MCOperand::CreateReg(X86::SI);
- break;
- case EA_BASE_BP_DI:
- baseReg = MCOperand::CreateReg(X86::BP);
- indexReg = MCOperand::CreateReg(X86::DI);
- break;
- default:
- indexReg = MCOperand::CreateReg(0);
- switch (insn.eaBase) {
- default:
- llvm_unreachable("Unexpected eaBase");
- break;
- // Here, we will use the fill-ins defined above. However,
- // BX_SI, BX_DI, BP_SI, and BP_DI are all handled above and
- // sib and sib64 were handled in the top-level if, so they're only
- // placeholders to keep the compiler happy.
-#define ENTRY(x) \
- case EA_BASE_##x: \
- baseReg = MCOperand::CreateReg(X86::x); break;
- ALL_EA_BASES
-#undef ENTRY
-#define ENTRY(x) case EA_REG_##x:
- ALL_REGS
-#undef ENTRY
- llvm_unreachable("A R/M memory operand may not be a register; "
- "the base field must be a base.");
- break;
- }
- }
-
- scaleAmount = MCOperand::CreateImm(1);
- }
-
- displacement = MCOperand::CreateImm(insn.displacement);
-
- static const uint8_t segmentRegnums[SEG_OVERRIDE_max] = {
- 0, // SEG_OVERRIDE_NONE
- X86::CS,
- X86::SS,
- X86::DS,
- X86::ES,
- X86::FS,
- X86::GS
- };
-
- segmentReg = MCOperand::CreateReg(segmentRegnums[insn.segmentOverride]);
-
- mcInst.addOperand(baseReg);
- mcInst.addOperand(scaleAmount);
- mcInst.addOperand(indexReg);
- mcInst.addOperand(displacement);
-
- if (sr)
- mcInst.addOperand(segmentReg);
-}
-
-/// translateRM - Translates an operand stored in the R/M (and possibly SIB)
-/// byte of an instruction to LLVM form, and appends it to an MCInst.
-///
-/// @param mcInst - The MCInst to append to.
-/// @param operand - The operand, as stored in the descriptor table.
-/// @param insn - The instruction to extract Mod, R/M, and SIB fields
-/// from.
-static void translateRM(MCInst &mcInst,
- OperandSpecifier &operand,
- InternalInstruction &insn) {
- switch (operand.type) {
- default:
- llvm_unreachable("Unexpected type for a R/M operand");
- case TYPE_R8:
- case TYPE_R16:
- case TYPE_R32:
- case TYPE_R64:
- case TYPE_Rv:
- case TYPE_MM:
- case TYPE_MM32:
- case TYPE_MM64:
- case TYPE_XMM:
- case TYPE_XMM32:
- case TYPE_XMM64:
- case TYPE_XMM128:
- case TYPE_DEBUGREG:
- case TYPE_CR32:
- case TYPE_CR64:
- translateRMRegister(mcInst, insn);
- break;
- case TYPE_M:
- case TYPE_M8:
- case TYPE_M16:
- case TYPE_M32:
- case TYPE_M64:
- case TYPE_M128:
- case TYPE_M512:
- case TYPE_Mv:
- case TYPE_M32FP:
- case TYPE_M64FP:
- case TYPE_M80FP:
- case TYPE_M16INT:
- case TYPE_M32INT:
- case TYPE_M64INT:
- case TYPE_M1616:
- case TYPE_M1632:
- case TYPE_M1664:
- translateRMMemory(mcInst, insn, true);
- break;
- case TYPE_LEA:
- translateRMMemory(mcInst, insn, false);
- break;
- }
-}
-
-/// translateFPRegister - Translates a stack position on the FPU stack to its
-/// LLVM form, and appends it to an MCInst.
-///
-/// @param mcInst - The MCInst to append to.
-/// @param stackPos - The stack position to translate.
-static void translateFPRegister(MCInst &mcInst,
- uint8_t stackPos) {
- assert(stackPos < 8 && "Invalid FP stack position");
-
- mcInst.addOperand(MCOperand::CreateReg(X86::ST0 + stackPos));
-}
-
-/// translateOperand - Translates an operand stored in an internal instruction
-/// to LLVM's format and appends it to an MCInst.
-///
-/// @param mcInst - The MCInst to append to.
-/// @param operand - The operand, as stored in the descriptor table.
-/// @param insn - The internal instruction.
-static void translateOperand(MCInst &mcInst,
- OperandSpecifier &operand,
- InternalInstruction &insn) {
- switch (operand.encoding) {
- default:
- llvm_unreachable("Unhandled operand encoding during translation");
- case ENCODING_REG:
- translateRegister(mcInst, insn.reg);
- break;
- case ENCODING_RM:
- translateRM(mcInst, operand, insn);
- break;
- case ENCODING_CB:
- case ENCODING_CW:
- case ENCODING_CD:
- case ENCODING_CP:
- case ENCODING_CO:
- case ENCODING_CT:
- llvm_unreachable("Translation of code offsets isn't supported.");
- case ENCODING_IB:
- case ENCODING_IW:
- case ENCODING_ID:
- case ENCODING_IO:
- case ENCODING_Iv:
- case ENCODING_Ia:
- translateImmediate(mcInst,
- insn.immediates[insn.numImmediatesTranslated++]);
- break;
- case ENCODING_RB:
- case ENCODING_RW:
- case ENCODING_RD:
- case ENCODING_RO:
- translateRegister(mcInst, insn.opcodeRegister);
- break;
- case ENCODING_I:
- translateFPRegister(mcInst, insn.opcodeModifier);
- break;
- case ENCODING_Rv:
- translateRegister(mcInst, insn.opcodeRegister);
- break;
- case ENCODING_DUP:
- translateOperand(mcInst,
- insn.spec->operands[operand.type - TYPE_DUP0],
- insn);
- break;
- }
-}
-
-/// translateInstruction - Translates an internal instruction and all its
-/// operands to an MCInst.
-///
-/// @param mcInst - The MCInst to populate with the instruction's data.
-/// @param insn - The internal instruction.
-static void translateInstruction(MCInst &mcInst,
- InternalInstruction &insn) {
- assert(insn.spec);
-
- mcInst.setOpcode(insn.instructionID);
-
- int index;
-
- insn.numImmediatesTranslated = 0;
-
- for (index = 0; index < X86_MAX_OPERANDS; ++index) {
- if (insn.spec->operands[index].encoding != ENCODING_NONE)
- translateOperand(mcInst, insn.spec->operands[index], insn);
- }
-}
-
-static const MCDisassembler *createX86_32Disassembler(const Target &T) {
- return new X86Disassembler::X86_32Disassembler;
-}
-
-static const MCDisassembler *createX86_64Disassembler(const Target &T) {
- return new X86Disassembler::X86_64Disassembler;
-}
-
-extern "C" void LLVMInitializeX86Disassembler() {
- // Register the disassembler.
- TargetRegistry::RegisterMCDisassembler(TheX86_32Target,
- createX86_32Disassembler);
- TargetRegistry::RegisterMCDisassembler(TheX86_64Target,
- createX86_64Disassembler);
-}
diff --git a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86Disassembler.h b/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
deleted file mode 100644
index 0e6e0b0..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86Disassembler.h
+++ /dev/null
@@ -1,150 +0,0 @@
-//===- X86Disassembler.h - Disassembler for x86 and x86_64 ------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// The X86 disassembler is a table-driven disassembler for the 16-, 32-, and
-// 64-bit X86 instruction sets. The main decode sequence for an assembly
-// instruction in this disassembler is:
-//
-// 1. Read the prefix bytes and determine the attributes of the instruction.
-// These attributes, recorded in enum attributeBits
-// (X86DisassemblerDecoderCommon.h), form a bitmask. The table CONTEXTS_SYM
-// provides a mapping from bitmasks to contexts, which are represented by
-// enum InstructionContext (ibid.).
-//
-// 2. Read the opcode, and determine what kind of opcode it is. The
-// disassembler distinguishes four kinds of opcodes, which are enumerated in
-// OpcodeType (X86DisassemblerDecoderCommon.h): one-byte (0xnn), two-byte
-// (0x0f 0xnn), three-byte-38 (0x0f 0x38 0xnn), or three-byte-3a
-// (0x0f 0x3a 0xnn). Mandatory prefixes are treated as part of the context.
-//
-// 3. Depending on the opcode type, look in one of four ClassDecision structures
-// (X86DisassemblerDecoderCommon.h). Use the opcode class to determine which
-// OpcodeDecision (ibid.) to look the opcode in. Look up the opcode, to get
-// a ModRMDecision (ibid.).
-//
-// 4. Some instructions, such as escape opcodes or extended opcodes, or even
-// instructions that have ModRM*Reg / ModRM*Mem forms in LLVM, need the
-// ModR/M byte to complete decode. The ModRMDecision's type is an entry from
-// ModRMDecisionType (X86DisassemblerDecoderCommon.h) that indicates if the
-// ModR/M byte is required and how to interpret it.
-//
-// 5. After resolving the ModRMDecision, the disassembler has a unique ID
-// of type InstrUID (X86DisassemblerDecoderCommon.h). Looking this ID up in
-// INSTRUCTIONS_SYM yields the name of the instruction and the encodings and
-// meanings of its operands.
-//
-// 6. For each operand, its encoding is an entry from OperandEncoding
-// (X86DisassemblerDecoderCommon.h) and its type is an entry from
-// OperandType (ibid.). The encoding indicates how to read it from the
-// instruction; the type indicates how to interpret the value once it has
-// been read. For example, a register operand could be stored in the R/M
-// field of the ModR/M byte, the REG field of the ModR/M byte, or added to
-// the main opcode. This is orthogonal from its meaning (an GPR or an XMM
-// register, for instance). Given this information, the operands can be
-// extracted and interpreted.
-//
-// 7. As the last step, the disassembler translates the instruction information
-// and operands into a format understandable by the client - in this case, an
-// MCInst for use by the MC infrastructure.
-//
-// The disassembler is broken broadly into two parts: the table emitter that
-// emits the instruction decode tables discussed above during compilation, and
-// the disassembler itself. The table emitter is documented in more detail in
-// utils/TableGen/X86DisassemblerEmitter.h.
-//
-// X86Disassembler.h contains the public interface for the disassembler,
-// adhering to the MCDisassembler interface.
-// X86Disassembler.cpp contains the code responsible for step 7, and for
-// invoking the decoder to execute steps 1-6.
-// X86DisassemblerDecoderCommon.h contains the definitions needed by both the
-// table emitter and the disassembler.
-// X86DisassemblerDecoder.h contains the public interface of the decoder,
-// factored out into C for possible use by other projects.
-// X86DisassemblerDecoder.c contains the source code of the decoder, which is
-// responsible for steps 1-6.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86DISASSEMBLER_H
-#define X86DISASSEMBLER_H
-
-#define INSTRUCTION_SPECIFIER_FIELDS \
- const char* name;
-
-#define INSTRUCTION_IDS \
- InstrUID* instructionIDs;
-
-#include "X86DisassemblerDecoderCommon.h"
-
-#undef INSTRUCTION_SPECIFIER_FIELDS
-#undef INSTRUCTION_IDS
-
-#include "llvm/MC/MCDisassembler.h"
-
-struct InternalInstruction;
-
-namespace llvm {
-
-class MCInst;
-class MemoryObject;
-class raw_ostream;
-
-namespace X86Disassembler {
-
-/// X86GenericDisassembler - Generic disassembler for all X86 platforms.
-/// All each platform class should have to do is subclass the constructor, and
-/// provide a different disassemblerMode value.
-class X86GenericDisassembler : public MCDisassembler {
-protected:
- /// Constructor - Initializes the disassembler.
- ///
- /// @param mode - The X86 architecture mode to decode for.
- X86GenericDisassembler(DisassemblerMode mode);
-public:
- ~X86GenericDisassembler();
-
- /// getInstruction - See MCDisassembler.
- bool getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject ®ion,
- uint64_t address,
- raw_ostream &vStream) const;
-private:
- DisassemblerMode fMode;
-};
-
-/// X86_16Disassembler - 16-bit X86 disassembler.
-class X86_16Disassembler : public X86GenericDisassembler {
-public:
- X86_16Disassembler() :
- X86GenericDisassembler(MODE_16BIT) {
- }
-};
-
-/// X86_16Disassembler - 32-bit X86 disassembler.
-class X86_32Disassembler : public X86GenericDisassembler {
-public:
- X86_32Disassembler() :
- X86GenericDisassembler(MODE_32BIT) {
- }
-};
-
-/// X86_16Disassembler - 64-bit X86 disassembler.
-class X86_64Disassembler : public X86GenericDisassembler {
-public:
- X86_64Disassembler() :
- X86GenericDisassembler(MODE_64BIT) {
- }
-};
-
-} // namespace X86Disassembler
-
-} // namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
deleted file mode 100644
index a0a04ba..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c
+++ /dev/null
@@ -1,1365 +0,0 @@
-/*===- X86DisassemblerDecoder.c - Disassembler decoder -------------*- C -*-==*
- *
- * The LLVM Compiler Infrastructure
- *
- * This file is distributed under the University of Illinois Open Source
- * License. See LICENSE.TXT for details.
- *
- *===----------------------------------------------------------------------===*
- *
- * This file is part of the X86 Disassembler.
- * It contains the implementation of the instruction decoder.
- * Documentation for the disassembler can be found in X86Disassembler.h.
- *
- *===----------------------------------------------------------------------===*/
-
-#include <assert.h> /* for assert() */
-#include <stdarg.h> /* for va_*() */
-#include <stdio.h> /* for vsnprintf() */
-#include <stdlib.h> /* for exit() */
-#include <string.h> /* for memset() */
-
-#include "X86DisassemblerDecoder.h"
-
-#include "X86GenDisassemblerTables.inc"
-
-#define TRUE 1
-#define FALSE 0
-
-#ifdef __GNUC__
-#define NORETURN __attribute__((noreturn))
-#else
-#define NORETURN
-#endif
-
-#define unreachable(s) \
- do { \
- fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, s); \
- exit(-1); \
- } while (0);
-
-/*
- * contextForAttrs - Client for the instruction context table. Takes a set of
- * attributes and returns the appropriate decode context.
- *
- * @param attrMask - Attributes, from the enumeration attributeBits.
- * @return - The InstructionContext to use when looking up an
- * an instruction with these attributes.
- */
-static InstructionContext contextForAttrs(uint8_t attrMask) {
- return CONTEXTS_SYM[attrMask];
-}
-
-/*
- * modRMRequired - Reads the appropriate instruction table to determine whether
- * the ModR/M byte is required to decode a particular instruction.
- *
- * @param type - The opcode type (i.e., how many bytes it has).
- * @param insnContext - The context for the instruction, as returned by
- * contextForAttrs.
- * @param opcode - The last byte of the instruction's opcode, not counting
- * ModR/M extensions and escapes.
- * @return - TRUE if the ModR/M byte is required, FALSE otherwise.
- */
-static int modRMRequired(OpcodeType type,
- InstructionContext insnContext,
- uint8_t opcode) {
- const struct ContextDecision* decision = 0;
-
- switch (type) {
- case ONEBYTE:
- decision = &ONEBYTE_SYM;
- break;
- case TWOBYTE:
- decision = &TWOBYTE_SYM;
- break;
- case THREEBYTE_38:
- decision = &THREEBYTE38_SYM;
- break;
- case THREEBYTE_3A:
- decision = &THREEBYTE3A_SYM;
- break;
- }
-
- return decision->opcodeDecisions[insnContext].modRMDecisions[opcode].
- modrm_type != MODRM_ONEENTRY;
-
- unreachable("Unknown opcode type");
- return 0;
-}
-
-/*
- * decode - Reads the appropriate instruction table to obtain the unique ID of
- * an instruction.
- *
- * @param type - See modRMRequired().
- * @param insnContext - See modRMRequired().
- * @param opcode - See modRMRequired().
- * @param modRM - The ModR/M byte if required, or any value if not.
- */
-static InstrUID decode(OpcodeType type,
- InstructionContext insnContext,
- uint8_t opcode,
- uint8_t modRM) {
- struct ModRMDecision* dec;
-
- switch (type) {
- default:
- unreachable("Unknown opcode type");
- case ONEBYTE:
- dec = &ONEBYTE_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
- break;
- case TWOBYTE:
- dec = &TWOBYTE_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
- break;
- case THREEBYTE_38:
- dec = &THREEBYTE38_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
- break;
- case THREEBYTE_3A:
- dec = &THREEBYTE3A_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode];
- break;
- }
-
- switch (dec->modrm_type) {
- default:
- unreachable("Corrupt table! Unknown modrm_type");
- case MODRM_ONEENTRY:
- return dec->instructionIDs[0];
- case MODRM_SPLITRM:
- if (modFromModRM(modRM) == 0x3)
- return dec->instructionIDs[1];
- else
- return dec->instructionIDs[0];
- case MODRM_FULL:
- return dec->instructionIDs[modRM];
- }
-
- return 0;
-}
-
-/*
- * specifierForUID - Given a UID, returns the name and operand specification for
- * that instruction.
- *
- * @param uid - The unique ID for the instruction. This should be returned by
- * decode(); specifierForUID will not check bounds.
- * @return - A pointer to the specification for that instruction.
- */
-static struct InstructionSpecifier* specifierForUID(InstrUID uid) {
- return &INSTRUCTIONS_SYM[uid];
-}
-
-/*
- * consumeByte - Uses the reader function provided by the user to consume one
- * byte from the instruction's memory and advance the cursor.
- *
- * @param insn - The instruction with the reader function to use. The cursor
- * for this instruction is advanced.
- * @param byte - A pointer to a pre-allocated memory buffer to be populated
- * with the data read.
- * @return - 0 if the read was successful; nonzero otherwise.
- */
-static int consumeByte(struct InternalInstruction* insn, uint8_t* byte) {
- int ret = insn->reader(insn->readerArg, byte, insn->readerCursor);
-
- if (!ret)
- ++(insn->readerCursor);
-
- return ret;
-}
-
-/*
- * lookAtByte - Like consumeByte, but does not advance the cursor.
- *
- * @param insn - See consumeByte().
- * @param byte - See consumeByte().
- * @return - See consumeByte().
- */
-static int lookAtByte(struct InternalInstruction* insn, uint8_t* byte) {
- return insn->reader(insn->readerArg, byte, insn->readerCursor);
-}
-
-static void unconsumeByte(struct InternalInstruction* insn) {
- insn->readerCursor--;
-}
-
-#define CONSUME_FUNC(name, type) \
- static int name(struct InternalInstruction* insn, type* ptr) { \
- type combined = 0; \
- unsigned offset; \
- for (offset = 0; offset < sizeof(type); ++offset) { \
- uint8_t byte; \
- int ret = insn->reader(insn->readerArg, \
- &byte, \
- insn->readerCursor + offset); \
- if (ret) \
- return ret; \
- combined = combined | ((type)byte << ((type)offset * 8)); \
- } \
- *ptr = combined; \
- insn->readerCursor += sizeof(type); \
- return 0; \
- }
-
-/*
- * consume* - Use the reader function provided by the user to consume data
- * values of various sizes from the instruction's memory and advance the
- * cursor appropriately. These readers perform endian conversion.
- *
- * @param insn - See consumeByte().
- * @param ptr - A pointer to a pre-allocated memory of appropriate size to
- * be populated with the data read.
- * @return - See consumeByte().
- */
-CONSUME_FUNC(consumeInt8, int8_t)
-CONSUME_FUNC(consumeInt16, int16_t)
-CONSUME_FUNC(consumeInt32, int32_t)
-CONSUME_FUNC(consumeUInt16, uint16_t)
-CONSUME_FUNC(consumeUInt32, uint32_t)
-CONSUME_FUNC(consumeUInt64, uint64_t)
-
-/*
- * dbgprintf - Uses the logging function provided by the user to log a single
- * message, typically without a carriage-return.
- *
- * @param insn - The instruction containing the logging function.
- * @param format - See printf().
- * @param ... - See printf().
- */
-static void dbgprintf(struct InternalInstruction* insn,
- const char* format,
- ...) {
- char buffer[256];
- va_list ap;
-
- if (!insn->dlog)
- return;
-
- va_start(ap, format);
- (void)vsnprintf(buffer, sizeof(buffer), format, ap);
- va_end(ap);
-
- insn->dlog(insn->dlogArg, buffer);
-
- return;
-}
-
-/*
- * setPrefixPresent - Marks that a particular prefix is present at a particular
- * location.
- *
- * @param insn - The instruction to be marked as having the prefix.
- * @param prefix - The prefix that is present.
- * @param location - The location where the prefix is located (in the address
- * space of the instruction's reader).
- */
-static void setPrefixPresent(struct InternalInstruction* insn,
- uint8_t prefix,
- uint64_t location)
-{
- insn->prefixPresent[prefix] = 1;
- insn->prefixLocations[prefix] = location;
-}
-
-/*
- * isPrefixAtLocation - Queries an instruction to determine whether a prefix is
- * present at a given location.
- *
- * @param insn - The instruction to be queried.
- * @param prefix - The prefix.
- * @param location - The location to query.
- * @return - Whether the prefix is at that location.
- */
-static BOOL isPrefixAtLocation(struct InternalInstruction* insn,
- uint8_t prefix,
- uint64_t location)
-{
- if (insn->prefixPresent[prefix] == 1 &&
- insn->prefixLocations[prefix] == location)
- return TRUE;
- else
- return FALSE;
-}
-
-/*
- * readPrefixes - Consumes all of an instruction's prefix bytes, and marks the
- * instruction as having them. Also sets the instruction's default operand,
- * address, and other relevant data sizes to report operands correctly.
- *
- * @param insn - The instruction whose prefixes are to be read.
- * @return - 0 if the instruction could be read until the end of the prefix
- * bytes, and no prefixes conflicted; nonzero otherwise.
- */
-static int readPrefixes(struct InternalInstruction* insn) {
- BOOL isPrefix = TRUE;
- BOOL prefixGroups[4] = { FALSE };
- uint64_t prefixLocation;
- uint8_t byte;
-
- BOOL hasAdSize = FALSE;
- BOOL hasOpSize = FALSE;
-
- dbgprintf(insn, "readPrefixes()");
-
- while (isPrefix) {
- prefixLocation = insn->readerCursor;
-
- if (consumeByte(insn, &byte))
- return -1;
-
- switch (byte) {
- case 0xf0: /* LOCK */
- case 0xf2: /* REPNE/REPNZ */
- case 0xf3: /* REP or REPE/REPZ */
- if (prefixGroups[0])
- dbgprintf(insn, "Redundant Group 1 prefix");
- prefixGroups[0] = TRUE;
- setPrefixPresent(insn, byte, prefixLocation);
- break;
- case 0x2e: /* CS segment override -OR- Branch not taken */
- case 0x36: /* SS segment override -OR- Branch taken */
- case 0x3e: /* DS segment override */
- case 0x26: /* ES segment override */
- case 0x64: /* FS segment override */
- case 0x65: /* GS segment override */
- switch (byte) {
- case 0x2e:
- insn->segmentOverride = SEG_OVERRIDE_CS;
- break;
- case 0x36:
- insn->segmentOverride = SEG_OVERRIDE_SS;
- break;
- case 0x3e:
- insn->segmentOverride = SEG_OVERRIDE_DS;
- break;
- case 0x26:
- insn->segmentOverride = SEG_OVERRIDE_ES;
- break;
- case 0x64:
- insn->segmentOverride = SEG_OVERRIDE_FS;
- break;
- case 0x65:
- insn->segmentOverride = SEG_OVERRIDE_GS;
- break;
- default:
- unreachable("Unhandled override");
- }
- if (prefixGroups[1])
- dbgprintf(insn, "Redundant Group 2 prefix");
- prefixGroups[1] = TRUE;
- setPrefixPresent(insn, byte, prefixLocation);
- break;
- case 0x66: /* Operand-size override */
- if (prefixGroups[2])
- dbgprintf(insn, "Redundant Group 3 prefix");
- prefixGroups[2] = TRUE;
- hasOpSize = TRUE;
- setPrefixPresent(insn, byte, prefixLocation);
- break;
- case 0x67: /* Address-size override */
- if (prefixGroups[3])
- dbgprintf(insn, "Redundant Group 4 prefix");
- prefixGroups[3] = TRUE;
- hasAdSize = TRUE;
- setPrefixPresent(insn, byte, prefixLocation);
- break;
- default: /* Not a prefix byte */
- isPrefix = FALSE;
- break;
- }
-
- if (isPrefix)
- dbgprintf(insn, "Found prefix 0x%hhx", byte);
- }
-
- if (insn->mode == MODE_64BIT) {
- if ((byte & 0xf0) == 0x40) {
- uint8_t opcodeByte;
-
- if(lookAtByte(insn, &opcodeByte) || ((opcodeByte & 0xf0) == 0x40)) {
- dbgprintf(insn, "Redundant REX prefix");
- return -1;
- }
-
- insn->rexPrefix = byte;
- insn->necessaryPrefixLocation = insn->readerCursor - 2;
-
- dbgprintf(insn, "Found REX prefix 0x%hhx", byte);
- } else {
- unconsumeByte(insn);
- insn->necessaryPrefixLocation = insn->readerCursor - 1;
- }
- } else {
- unconsumeByte(insn);
- }
-
- if (insn->mode == MODE_16BIT) {
- insn->registerSize = (hasOpSize ? 4 : 2);
- insn->addressSize = (hasAdSize ? 4 : 2);
- insn->displacementSize = (hasAdSize ? 4 : 2);
- insn->immediateSize = (hasOpSize ? 4 : 2);
- } else if (insn->mode == MODE_32BIT) {
- insn->registerSize = (hasOpSize ? 2 : 4);
- insn->addressSize = (hasAdSize ? 2 : 4);
- insn->displacementSize = (hasAdSize ? 2 : 4);
- insn->immediateSize = (hasAdSize ? 2 : 4);
- } else if (insn->mode == MODE_64BIT) {
- if (insn->rexPrefix && wFromREX(insn->rexPrefix)) {
- insn->registerSize = 8;
- insn->addressSize = (hasAdSize ? 4 : 8);
- insn->displacementSize = 4;
- insn->immediateSize = 4;
- } else if (insn->rexPrefix) {
- insn->registerSize = (hasOpSize ? 2 : 4);
- insn->addressSize = (hasAdSize ? 4 : 8);
- insn->displacementSize = (hasOpSize ? 2 : 4);
- insn->immediateSize = (hasOpSize ? 2 : 4);
- } else {
- insn->registerSize = (hasOpSize ? 2 : 4);
- insn->addressSize = (hasAdSize ? 4 : 8);
- insn->displacementSize = (hasOpSize ? 2 : 4);
- insn->immediateSize = (hasOpSize ? 2 : 4);
- }
- }
-
- return 0;
-}
-
-/*
- * readOpcode - Reads the opcode (excepting the ModR/M byte in the case of
- * extended or escape opcodes).
- *
- * @param insn - The instruction whose opcode is to be read.
- * @return - 0 if the opcode could be read successfully; nonzero otherwise.
- */
-static int readOpcode(struct InternalInstruction* insn) {
- /* Determine the length of the primary opcode */
-
- uint8_t current;
-
- dbgprintf(insn, "readOpcode()");
-
- insn->opcodeType = ONEBYTE;
- if (consumeByte(insn, ¤t))
- return -1;
-
- if (current == 0x0f) {
- dbgprintf(insn, "Found a two-byte escape prefix (0x%hhx)", current);
-
- insn->twoByteEscape = current;
-
- if (consumeByte(insn, ¤t))
- return -1;
-
- if (current == 0x38) {
- dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current);
-
- insn->threeByteEscape = current;
-
- if (consumeByte(insn, ¤t))
- return -1;
-
- insn->opcodeType = THREEBYTE_38;
- } else if (current == 0x3a) {
- dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current);
-
- insn->threeByteEscape = current;
-
- if (consumeByte(insn, ¤t))
- return -1;
-
- insn->opcodeType = THREEBYTE_3A;
- } else {
- dbgprintf(insn, "Didn't find a three-byte escape prefix");
-
- insn->opcodeType = TWOBYTE;
- }
- }
-
- /*
- * At this point we have consumed the full opcode.
- * Anything we consume from here on must be unconsumed.
- */
-
- insn->opcode = current;
-
- return 0;
-}
-
-static int readModRM(struct InternalInstruction* insn);
-
-/*
- * getIDWithAttrMask - Determines the ID of an instruction, consuming
- * the ModR/M byte as appropriate for extended and escape opcodes,
- * and using a supplied attribute mask.
- *
- * @param instructionID - A pointer whose target is filled in with the ID of the
- * instruction.
- * @param insn - The instruction whose ID is to be determined.
- * @param attrMask - The attribute mask to search.
- * @return - 0 if the ModR/M could be read when needed or was not
- * needed; nonzero otherwise.
- */
-static int getIDWithAttrMask(uint16_t* instructionID,
- struct InternalInstruction* insn,
- uint8_t attrMask) {
- BOOL hasModRMExtension;
-
- uint8_t instructionClass;
-
- instructionClass = contextForAttrs(attrMask);
-
- hasModRMExtension = modRMRequired(insn->opcodeType,
- instructionClass,
- insn->opcode);
-
- if (hasModRMExtension) {
- readModRM(insn);
-
- *instructionID = decode(insn->opcodeType,
- instructionClass,
- insn->opcode,
- insn->modRM);
- } else {
- *instructionID = decode(insn->opcodeType,
- instructionClass,
- insn->opcode,
- 0);
- }
-
- return 0;
-}
-
-/*
- * is16BitEquivalent - Determines whether two instruction names refer to
- * equivalent instructions but one is 16-bit whereas the other is not.
- *
- * @param orig - The instruction that is not 16-bit
- * @param equiv - The instruction that is 16-bit
- */
-static BOOL is16BitEquvalent(const char* orig, const char* equiv) {
- off_t i;
-
- for(i = 0;; i++) {
- if(orig[i] == '\0' && equiv[i] == '\0')
- return TRUE;
- if(orig[i] == '\0' || equiv[i] == '\0')
- return FALSE;
- if(orig[i] != equiv[i]) {
- if((orig[i] == 'Q' || orig[i] == 'L') && equiv[i] == 'W')
- continue;
- if((orig[i] == '6' || orig[i] == '3') && equiv[i] == '1')
- continue;
- if((orig[i] == '4' || orig[i] == '2') && equiv[i] == '6')
- continue;
- return FALSE;
- }
- }
-}
-
-/*
- * is64BitEquivalent - Determines whether two instruction names refer to
- * equivalent instructions but one is 64-bit whereas the other is not.
- *
- * @param orig - The instruction that is not 64-bit
- * @param equiv - The instruction that is 64-bit
- */
-static BOOL is64BitEquivalent(const char* orig, const char* equiv) {
- off_t i;
-
- for(i = 0;; i++) {
- if(orig[i] == '\0' && equiv[i] == '\0')
- return TRUE;
- if(orig[i] == '\0' || equiv[i] == '\0')
- return FALSE;
- if(orig[i] != equiv[i]) {
- if((orig[i] == 'W' || orig[i] == 'L') && equiv[i] == 'Q')
- continue;
- if((orig[i] == '1' || orig[i] == '3') && equiv[i] == '6')
- continue;
- if((orig[i] == '6' || orig[i] == '2') && equiv[i] == '4')
- continue;
- return FALSE;
- }
- }
-}
-
-
-/*
- * getID - Determines the ID of an instruction, consuming the ModR/M byte as
- * appropriate for extended and escape opcodes. Determines the attributes and
- * context for the instruction before doing so.
- *
- * @param insn - The instruction whose ID is to be determined.
- * @return - 0 if the ModR/M could be read when needed or was not needed;
- * nonzero otherwise.
- */
-static int getID(struct InternalInstruction* insn) {
- uint8_t attrMask;
- uint16_t instructionID;
-
- dbgprintf(insn, "getID()");
-
- attrMask = ATTR_NONE;
-
- if (insn->mode == MODE_64BIT)
- attrMask |= ATTR_64BIT;
-
- if (insn->rexPrefix & 0x08)
- attrMask |= ATTR_REXW;
-
- if (isPrefixAtLocation(insn, 0x66, insn->necessaryPrefixLocation))
- attrMask |= ATTR_OPSIZE;
- else if (isPrefixAtLocation(insn, 0xf3, insn->necessaryPrefixLocation))
- attrMask |= ATTR_XS;
- else if (isPrefixAtLocation(insn, 0xf2, insn->necessaryPrefixLocation))
- attrMask |= ATTR_XD;
-
- if(getIDWithAttrMask(&instructionID, insn, attrMask))
- return -1;
-
- /* The following clauses compensate for limitations of the tables. */
-
- if ((attrMask & ATTR_XD) && (attrMask & ATTR_REXW)) {
- /*
- * Although for SSE instructions it is usually necessary to treat REX.W+F2
- * as F2 for decode (in the absence of a 64BIT_REXW_XD category) there is
- * an occasional instruction where F2 is incidental and REX.W is the more
- * significant. If the decoded instruction is 32-bit and adding REX.W
- * instead of F2 changes a 32 to a 64, we adopt the new encoding.
- */
-
- struct InstructionSpecifier* spec;
- uint16_t instructionIDWithREXw;
- struct InstructionSpecifier* specWithREXw;
-
- spec = specifierForUID(instructionID);
-
- if (getIDWithAttrMask(&instructionIDWithREXw,
- insn,
- attrMask & (~ATTR_XD))) {
- /*
- * Decoding with REX.w would yield nothing; give up and return original
- * decode.
- */
-
- insn->instructionID = instructionID;
- insn->spec = spec;
- return 0;
- }
-
- specWithREXw = specifierForUID(instructionIDWithREXw);
-
- if (is64BitEquivalent(spec->name, specWithREXw->name)) {
- insn->instructionID = instructionIDWithREXw;
- insn->spec = specWithREXw;
- } else {
- insn->instructionID = instructionID;
- insn->spec = spec;
- }
- return 0;
- }
-
- if (insn->prefixPresent[0x66] && !(attrMask & ATTR_OPSIZE)) {
- /*
- * The instruction tables make no distinction between instructions that
- * allow OpSize anywhere (i.e., 16-bit operations) and that need it in a
- * particular spot (i.e., many MMX operations). In general we're
- * conservative, but in the specific case where OpSize is present but not
- * in the right place we check if there's a 16-bit operation.
- */
-
- struct InstructionSpecifier* spec;
- uint16_t instructionIDWithOpsize;
- struct InstructionSpecifier* specWithOpsize;
-
- spec = specifierForUID(instructionID);
-
- if (getIDWithAttrMask(&instructionIDWithOpsize,
- insn,
- attrMask | ATTR_OPSIZE)) {
- /*
- * ModRM required with OpSize but not present; give up and return version
- * without OpSize set
- */
-
- insn->instructionID = instructionID;
- insn->spec = spec;
- return 0;
- }
-
- specWithOpsize = specifierForUID(instructionIDWithOpsize);
-
- if (is16BitEquvalent(spec->name, specWithOpsize->name)) {
- insn->instructionID = instructionIDWithOpsize;
- insn->spec = specWithOpsize;
- } else {
- insn->instructionID = instructionID;
- insn->spec = spec;
- }
- return 0;
- }
-
- insn->instructionID = instructionID;
- insn->spec = specifierForUID(insn->instructionID);
-
- return 0;
-}
-
-/*
- * readSIB - Consumes the SIB byte to determine addressing information for an
- * instruction.
- *
- * @param insn - The instruction whose SIB byte is to be read.
- * @return - 0 if the SIB byte was successfully read; nonzero otherwise.
- */
-static int readSIB(struct InternalInstruction* insn) {
- SIBIndex sibIndexBase = 0;
- SIBBase sibBaseBase = 0;
- uint8_t index, base;
-
- dbgprintf(insn, "readSIB()");
-
- if (insn->consumedSIB)
- return 0;
-
- insn->consumedSIB = TRUE;
-
- switch (insn->addressSize) {
- case 2:
- dbgprintf(insn, "SIB-based addressing doesn't work in 16-bit mode");
- return -1;
- break;
- case 4:
- sibIndexBase = SIB_INDEX_EAX;
- sibBaseBase = SIB_BASE_EAX;
- break;
- case 8:
- sibIndexBase = SIB_INDEX_RAX;
- sibBaseBase = SIB_BASE_RAX;
- break;
- }
-
- if (consumeByte(insn, &insn->sib))
- return -1;
-
- index = indexFromSIB(insn->sib) | (xFromREX(insn->rexPrefix) << 3);
-
- switch (index) {
- case 0x4:
- insn->sibIndex = SIB_INDEX_NONE;
- break;
- default:
- insn->sibIndex = (EABase)(sibIndexBase + index);
- if (insn->sibIndex == SIB_INDEX_sib ||
- insn->sibIndex == SIB_INDEX_sib64)
- insn->sibIndex = SIB_INDEX_NONE;
- break;
- }
-
- switch (scaleFromSIB(insn->sib)) {
- case 0:
- insn->sibScale = 1;
- break;
- case 1:
- insn->sibScale = 2;
- break;
- case 2:
- insn->sibScale = 4;
- break;
- case 3:
- insn->sibScale = 8;
- break;
- }
-
- base = baseFromSIB(insn->sib) | (bFromREX(insn->rexPrefix) << 3);
-
- switch (base) {
- case 0x5:
- switch (modFromModRM(insn->modRM)) {
- case 0x0:
- insn->eaDisplacement = EA_DISP_32;
- insn->sibBase = SIB_BASE_NONE;
- break;
- case 0x1:
- insn->eaDisplacement = EA_DISP_8;
- insn->sibBase = (insn->addressSize == 4 ?
- SIB_BASE_EBP : SIB_BASE_RBP);
- break;
- case 0x2:
- insn->eaDisplacement = EA_DISP_32;
- insn->sibBase = (insn->addressSize == 4 ?
- SIB_BASE_EBP : SIB_BASE_RBP);
- break;
- case 0x3:
- unreachable("Cannot have Mod = 0b11 and a SIB byte");
- }
- break;
- default:
- insn->sibBase = (EABase)(sibBaseBase + base);
- break;
- }
-
- return 0;
-}
-
-/*
- * readDisplacement - Consumes the displacement of an instruction.
- *
- * @param insn - The instruction whose displacement is to be read.
- * @return - 0 if the displacement byte was successfully read; nonzero
- * otherwise.
- */
-static int readDisplacement(struct InternalInstruction* insn) {
- int8_t d8;
- int16_t d16;
- int32_t d32;
-
- dbgprintf(insn, "readDisplacement()");
-
- if (insn->consumedDisplacement)
- return 0;
-
- insn->consumedDisplacement = TRUE;
-
- switch (insn->eaDisplacement) {
- case EA_DISP_NONE:
- insn->consumedDisplacement = FALSE;
- break;
- case EA_DISP_8:
- if (consumeInt8(insn, &d8))
- return -1;
- insn->displacement = d8;
- break;
- case EA_DISP_16:
- if (consumeInt16(insn, &d16))
- return -1;
- insn->displacement = d16;
- break;
- case EA_DISP_32:
- if (consumeInt32(insn, &d32))
- return -1;
- insn->displacement = d32;
- break;
- }
-
- insn->consumedDisplacement = TRUE;
- return 0;
-}
-
-/*
- * readModRM - Consumes all addressing information (ModR/M byte, SIB byte, and
- * displacement) for an instruction and interprets it.
- *
- * @param insn - The instruction whose addressing information is to be read.
- * @return - 0 if the information was successfully read; nonzero otherwise.
- */
-static int readModRM(struct InternalInstruction* insn) {
- uint8_t mod, rm, reg;
-
- dbgprintf(insn, "readModRM()");
-
- if (insn->consumedModRM)
- return 0;
-
- consumeByte(insn, &insn->modRM);
- insn->consumedModRM = TRUE;
-
- mod = modFromModRM(insn->modRM);
- rm = rmFromModRM(insn->modRM);
- reg = regFromModRM(insn->modRM);
-
- /*
- * This goes by insn->registerSize to pick the correct register, which messes
- * up if we're using (say) XMM or 8-bit register operands. That gets fixed in
- * fixupReg().
- */
- switch (insn->registerSize) {
- case 2:
- insn->regBase = MODRM_REG_AX;
- insn->eaRegBase = EA_REG_AX;
- break;
- case 4:
- insn->regBase = MODRM_REG_EAX;
- insn->eaRegBase = EA_REG_EAX;
- break;
- case 8:
- insn->regBase = MODRM_REG_RAX;
- insn->eaRegBase = EA_REG_RAX;
- break;
- }
-
- reg |= rFromREX(insn->rexPrefix) << 3;
- rm |= bFromREX(insn->rexPrefix) << 3;
-
- insn->reg = (Reg)(insn->regBase + reg);
-
- switch (insn->addressSize) {
- case 2:
- insn->eaBaseBase = EA_BASE_BX_SI;
-
- switch (mod) {
- case 0x0:
- if (rm == 0x6) {
- insn->eaBase = EA_BASE_NONE;
- insn->eaDisplacement = EA_DISP_16;
- if(readDisplacement(insn))
- return -1;
- } else {
- insn->eaBase = (EABase)(insn->eaBaseBase + rm);
- insn->eaDisplacement = EA_DISP_NONE;
- }
- break;
- case 0x1:
- insn->eaBase = (EABase)(insn->eaBaseBase + rm);
- insn->eaDisplacement = EA_DISP_8;
- if(readDisplacement(insn))
- return -1;
- break;
- case 0x2:
- insn->eaBase = (EABase)(insn->eaBaseBase + rm);
- insn->eaDisplacement = EA_DISP_16;
- if(readDisplacement(insn))
- return -1;
- break;
- case 0x3:
- insn->eaBase = (EABase)(insn->eaRegBase + rm);
- if(readDisplacement(insn))
- return -1;
- break;
- }
- break;
- case 4:
- case 8:
- insn->eaBaseBase = (insn->addressSize == 4 ? EA_BASE_EAX : EA_BASE_RAX);
-
- switch (mod) {
- case 0x0:
- insn->eaDisplacement = EA_DISP_NONE; /* readSIB may override this */
- switch (rm) {
- case 0x4:
- case 0xc: /* in case REXW.b is set */
- insn->eaBase = (insn->addressSize == 4 ?
- EA_BASE_sib : EA_BASE_sib64);
- readSIB(insn);
- if(readDisplacement(insn))
- return -1;
- break;
- case 0x5:
- insn->eaBase = EA_BASE_NONE;
- insn->eaDisplacement = EA_DISP_32;
- if(readDisplacement(insn))
- return -1;
- break;
- default:
- insn->eaBase = (EABase)(insn->eaBaseBase + rm);
- break;
- }
- break;
- case 0x1:
- case 0x2:
- insn->eaDisplacement = (mod == 0x1 ? EA_DISP_8 : EA_DISP_32);
- switch (rm) {
- case 0x4:
- case 0xc: /* in case REXW.b is set */
- insn->eaBase = EA_BASE_sib;
- readSIB(insn);
- if(readDisplacement(insn))
- return -1;
- break;
- default:
- insn->eaBase = (EABase)(insn->eaBaseBase + rm);
- if(readDisplacement(insn))
- return -1;
- break;
- }
- break;
- case 0x3:
- insn->eaDisplacement = EA_DISP_NONE;
- insn->eaBase = (EABase)(insn->eaRegBase + rm);
- break;
- }
- break;
- } /* switch (insn->addressSize) */
-
- return 0;
-}
-
-#define GENERIC_FIXUP_FUNC(name, base, prefix) \
- static uint8_t name(struct InternalInstruction *insn, \
- OperandType type, \
- uint8_t index, \
- uint8_t *valid) { \
- *valid = 1; \
- switch (type) { \
- default: \
- unreachable("Unhandled register type"); \
- case TYPE_Rv: \
- return base + index; \
- case TYPE_R8: \
- if(insn->rexPrefix && \
- index >= 4 && index <= 7) { \
- return prefix##_SPL + (index - 4); \
- } else { \
- return prefix##_AL + index; \
- } \
- case TYPE_R16: \
- return prefix##_AX + index; \
- case TYPE_R32: \
- return prefix##_EAX + index; \
- case TYPE_R64: \
- return prefix##_RAX + index; \
- case TYPE_XMM128: \
- case TYPE_XMM64: \
- case TYPE_XMM32: \
- case TYPE_XMM: \
- return prefix##_XMM0 + index; \
- case TYPE_MM64: \
- case TYPE_MM32: \
- case TYPE_MM: \
- if(index > 7) \
- *valid = 0; \
- return prefix##_MM0 + index; \
- case TYPE_SEGMENTREG: \
- if(index > 5) \
- *valid = 0; \
- return prefix##_ES + index; \
- case TYPE_DEBUGREG: \
- if(index > 7) \
- *valid = 0; \
- return prefix##_DR0 + index; \
- case TYPE_CR32: \
- if(index > 7) \
- *valid = 0; \
- return prefix##_ECR0 + index; \
- case TYPE_CR64: \
- if(index > 8) \
- *valid = 0; \
- return prefix##_RCR0 + index; \
- } \
- }
-
-/*
- * fixup*Value - Consults an operand type to determine the meaning of the
- * reg or R/M field. If the operand is an XMM operand, for example, an
- * operand would be XMM0 instead of AX, which readModRM() would otherwise
- * misinterpret it as.
- *
- * @param insn - The instruction containing the operand.
- * @param type - The operand type.
- * @param index - The existing value of the field as reported by readModRM().
- * @param valid - The address of a uint8_t. The target is set to 1 if the
- * field is valid for the register class; 0 if not.
- */
-GENERIC_FIXUP_FUNC(fixupRegValue, insn->regBase, MODRM_REG)
-GENERIC_FIXUP_FUNC(fixupRMValue, insn->eaRegBase, EA_REG)
-
-/*
- * fixupReg - Consults an operand specifier to determine which of the
- * fixup*Value functions to use in correcting readModRM()'ss interpretation.
- *
- * @param insn - See fixup*Value().
- * @param op - The operand specifier.
- * @return - 0 if fixup was successful; -1 if the register returned was
- * invalid for its class.
- */
-static int fixupReg(struct InternalInstruction *insn,
- struct OperandSpecifier *op) {
- uint8_t valid;
-
- dbgprintf(insn, "fixupReg()");
-
- switch ((OperandEncoding)op->encoding) {
- default:
- unreachable("Expected a REG or R/M encoding in fixupReg");
- case ENCODING_REG:
- insn->reg = (Reg)fixupRegValue(insn,
- (OperandType)op->type,
- insn->reg - insn->regBase,
- &valid);
- if (!valid)
- return -1;
- break;
- case ENCODING_RM:
- if (insn->eaBase >= insn->eaRegBase) {
- insn->eaBase = (EABase)fixupRMValue(insn,
- (OperandType)op->type,
- insn->eaBase - insn->eaRegBase,
- &valid);
- if (!valid)
- return -1;
- }
- break;
- }
-
- return 0;
-}
-
-/*
- * readOpcodeModifier - Reads an operand from the opcode field of an
- * instruction. Handles AddRegFrm instructions.
- *
- * @param insn - The instruction whose opcode field is to be read.
- * @param inModRM - Indicates that the opcode field is to be read from the
- * ModR/M extension; useful for escape opcodes
- */
-static void readOpcodeModifier(struct InternalInstruction* insn) {
- dbgprintf(insn, "readOpcodeModifier()");
-
- if (insn->consumedOpcodeModifier)
- return;
-
- insn->consumedOpcodeModifier = TRUE;
-
- switch(insn->spec->modifierType) {
- default:
- unreachable("Unknown modifier type.");
- case MODIFIER_NONE:
- unreachable("No modifier but an operand expects one.");
- case MODIFIER_OPCODE:
- insn->opcodeModifier = insn->opcode - insn->spec->modifierBase;
- break;
- case MODIFIER_MODRM:
- insn->opcodeModifier = insn->modRM - insn->spec->modifierBase;
- break;
- }
-}
-
-/*
- * readOpcodeRegister - Reads an operand from the opcode field of an
- * instruction and interprets it appropriately given the operand width.
- * Handles AddRegFrm instructions.
- *
- * @param insn - See readOpcodeModifier().
- * @param size - The width (in bytes) of the register being specified.
- * 1 means AL and friends, 2 means AX, 4 means EAX, and 8 means
- * RAX.
- */
-static void readOpcodeRegister(struct InternalInstruction* insn, uint8_t size) {
- dbgprintf(insn, "readOpcodeRegister()");
-
- readOpcodeModifier(insn);
-
- if (size == 0)
- size = insn->registerSize;
-
- switch (size) {
- case 1:
- insn->opcodeRegister = (Reg)(MODRM_REG_AL + ((bFromREX(insn->rexPrefix) << 3)
- | insn->opcodeModifier));
- if(insn->rexPrefix &&
- insn->opcodeRegister >= MODRM_REG_AL + 0x4 &&
- insn->opcodeRegister < MODRM_REG_AL + 0x8) {
- insn->opcodeRegister = (Reg)(MODRM_REG_SPL
- + (insn->opcodeRegister - MODRM_REG_AL - 4));
- }
-
- break;
- case 2:
- insn->opcodeRegister = (Reg)(MODRM_REG_AX
- + ((bFromREX(insn->rexPrefix) << 3)
- | insn->opcodeModifier));
- break;
- case 4:
- insn->opcodeRegister = (Reg)(MODRM_REG_EAX +
- + ((bFromREX(insn->rexPrefix) << 3)
- | insn->opcodeModifier));
- break;
- case 8:
- insn->opcodeRegister = (Reg)(MODRM_REG_RAX
- + ((bFromREX(insn->rexPrefix) << 3)
- | insn->opcodeModifier));
- break;
- }
-}
-
-/*
- * readImmediate - Consumes an immediate operand from an instruction, given the
- * desired operand size.
- *
- * @param insn - The instruction whose operand is to be read.
- * @param size - The width (in bytes) of the operand.
- * @return - 0 if the immediate was successfully consumed; nonzero
- * otherwise.
- */
-static int readImmediate(struct InternalInstruction* insn, uint8_t size) {
- uint8_t imm8;
- uint16_t imm16;
- uint32_t imm32;
- uint64_t imm64;
-
- dbgprintf(insn, "readImmediate()");
-
- if (insn->numImmediatesConsumed == 2)
- unreachable("Already consumed two immediates");
-
- if (size == 0)
- size = insn->immediateSize;
- else
- insn->immediateSize = size;
-
- switch (size) {
- case 1:
- if (consumeByte(insn, &imm8))
- return -1;
- insn->immediates[insn->numImmediatesConsumed] = imm8;
- break;
- case 2:
- if (consumeUInt16(insn, &imm16))
- return -1;
- insn->immediates[insn->numImmediatesConsumed] = imm16;
- break;
- case 4:
- if (consumeUInt32(insn, &imm32))
- return -1;
- insn->immediates[insn->numImmediatesConsumed] = imm32;
- break;
- case 8:
- if (consumeUInt64(insn, &imm64))
- return -1;
- insn->immediates[insn->numImmediatesConsumed] = imm64;
- break;
- }
-
- insn->numImmediatesConsumed++;
-
- return 0;
-}
-
-/*
- * readOperands - Consults the specifier for an instruction and consumes all
- * operands for that instruction, interpreting them as it goes.
- *
- * @param insn - The instruction whose operands are to be read and interpreted.
- * @return - 0 if all operands could be read; nonzero otherwise.
- */
-static int readOperands(struct InternalInstruction* insn) {
- int index;
-
- dbgprintf(insn, "readOperands()");
-
- for (index = 0; index < X86_MAX_OPERANDS; ++index) {
- switch (insn->spec->operands[index].encoding) {
- case ENCODING_NONE:
- break;
- case ENCODING_REG:
- case ENCODING_RM:
- if (readModRM(insn))
- return -1;
- if (fixupReg(insn, &insn->spec->operands[index]))
- return -1;
- break;
- case ENCODING_CB:
- case ENCODING_CW:
- case ENCODING_CD:
- case ENCODING_CP:
- case ENCODING_CO:
- case ENCODING_CT:
- dbgprintf(insn, "We currently don't hande code-offset encodings");
- return -1;
- case ENCODING_IB:
- if (readImmediate(insn, 1))
- return -1;
- break;
- case ENCODING_IW:
- if (readImmediate(insn, 2))
- return -1;
- break;
- case ENCODING_ID:
- if (readImmediate(insn, 4))
- return -1;
- break;
- case ENCODING_IO:
- if (readImmediate(insn, 8))
- return -1;
- break;
- case ENCODING_Iv:
- readImmediate(insn, insn->immediateSize);
- break;
- case ENCODING_Ia:
- readImmediate(insn, insn->addressSize);
- break;
- case ENCODING_RB:
- readOpcodeRegister(insn, 1);
- break;
- case ENCODING_RW:
- readOpcodeRegister(insn, 2);
- break;
- case ENCODING_RD:
- readOpcodeRegister(insn, 4);
- break;
- case ENCODING_RO:
- readOpcodeRegister(insn, 8);
- break;
- case ENCODING_Rv:
- readOpcodeRegister(insn, 0);
- break;
- case ENCODING_I:
- readOpcodeModifier(insn);
- break;
- case ENCODING_DUP:
- break;
- default:
- dbgprintf(insn, "Encountered an operand with an unknown encoding.");
- return -1;
- }
- }
-
- return 0;
-}
-
-/*
- * decodeInstruction - Reads and interprets a full instruction provided by the
- * user.
- *
- * @param insn - A pointer to the instruction to be populated. Must be
- * pre-allocated.
- * @param reader - The function to be used to read the instruction's bytes.
- * @param readerArg - A generic argument to be passed to the reader to store
- * any internal state.
- * @param logger - If non-NULL, the function to be used to write log messages
- * and warnings.
- * @param loggerArg - A generic argument to be passed to the logger to store
- * any internal state.
- * @param startLoc - The address (in the reader's address space) of the first
- * byte in the instruction.
- * @param mode - The mode (real mode, IA-32e, or IA-32e in 64-bit mode) to
- * decode the instruction in.
- * @return - 0 if the instruction's memory could be read; nonzero if
- * not.
- */
-int decodeInstruction(struct InternalInstruction* insn,
- byteReader_t reader,
- void* readerArg,
- dlog_t logger,
- void* loggerArg,
- uint64_t startLoc,
- DisassemblerMode mode) {
- memset(insn, 0, sizeof(struct InternalInstruction));
-
- insn->reader = reader;
- insn->readerArg = readerArg;
- insn->dlog = logger;
- insn->dlogArg = loggerArg;
- insn->startLocation = startLoc;
- insn->readerCursor = startLoc;
- insn->mode = mode;
- insn->numImmediatesConsumed = 0;
-
- if (readPrefixes(insn) ||
- readOpcode(insn) ||
- getID(insn) ||
- insn->instructionID == 0 ||
- readOperands(insn))
- return -1;
-
- insn->length = insn->readerCursor - insn->startLocation;
-
- dbgprintf(insn, "Read from 0x%llx to 0x%llx: length %llu",
- startLoc, insn->readerCursor, insn->length);
-
- if (insn->length > 15)
- dbgprintf(insn, "Instruction exceeds 15-byte limit");
-
- return 0;
-}
diff --git a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
deleted file mode 100644
index c03c07a..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ /dev/null
@@ -1,515 +0,0 @@
-/*===- X86DisassemblerDecoderInternal.h - Disassembler decoder -----*- C -*-==*
- *
- * The LLVM Compiler Infrastructure
- *
- * This file is distributed under the University of Illinois Open Source
- * License. See LICENSE.TXT for details.
- *
- *===----------------------------------------------------------------------===*
- *
- * This file is part of the X86 Disassembler.
- * It contains the public interface of the instruction decoder.
- * Documentation for the disassembler can be found in X86Disassembler.h.
- *
- *===----------------------------------------------------------------------===*/
-
-#ifndef X86DISASSEMBLERDECODER_H
-#define X86DISASSEMBLERDECODER_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define INSTRUCTION_SPECIFIER_FIELDS \
- const char* name;
-
-#define INSTRUCTION_IDS \
- InstrUID* instructionIDs;
-
-#include "X86DisassemblerDecoderCommon.h"
-
-#undef INSTRUCTION_SPECIFIER_FIELDS
-#undef INSTRUCTION_IDS
-
-/*
- * Accessor functions for various fields of an Intel instruction
- */
-#define modFromModRM(modRM) ((modRM & 0xc0) >> 6)
-#define regFromModRM(modRM) ((modRM & 0x38) >> 3)
-#define rmFromModRM(modRM) (modRM & 0x7)
-#define scaleFromSIB(sib) ((sib & 0xc0) >> 6)
-#define indexFromSIB(sib) ((sib & 0x38) >> 3)
-#define baseFromSIB(sib) (sib & 0x7)
-#define wFromREX(rex) ((rex & 0x8) >> 3)
-#define rFromREX(rex) ((rex & 0x4) >> 2)
-#define xFromREX(rex) ((rex & 0x2) >> 1)
-#define bFromREX(rex) (rex & 0x1)
-
-/*
- * These enums represent Intel registers for use by the decoder.
- */
-
-#define REGS_8BIT \
- ENTRY(AL) \
- ENTRY(CL) \
- ENTRY(DL) \
- ENTRY(BL) \
- ENTRY(AH) \
- ENTRY(CH) \
- ENTRY(DH) \
- ENTRY(BH) \
- ENTRY(R8B) \
- ENTRY(R9B) \
- ENTRY(R10B) \
- ENTRY(R11B) \
- ENTRY(R12B) \
- ENTRY(R13B) \
- ENTRY(R14B) \
- ENTRY(R15B) \
- ENTRY(SPL) \
- ENTRY(BPL) \
- ENTRY(SIL) \
- ENTRY(DIL)
-
-#define EA_BASES_16BIT \
- ENTRY(BX_SI) \
- ENTRY(BX_DI) \
- ENTRY(BP_SI) \
- ENTRY(BP_DI) \
- ENTRY(SI) \
- ENTRY(DI) \
- ENTRY(BP) \
- ENTRY(BX) \
- ENTRY(R8W) \
- ENTRY(R9W) \
- ENTRY(R10W) \
- ENTRY(R11W) \
- ENTRY(R12W) \
- ENTRY(R13W) \
- ENTRY(R14W) \
- ENTRY(R15W)
-
-#define REGS_16BIT \
- ENTRY(AX) \
- ENTRY(CX) \
- ENTRY(DX) \
- ENTRY(BX) \
- ENTRY(SP) \
- ENTRY(BP) \
- ENTRY(SI) \
- ENTRY(DI) \
- ENTRY(R8W) \
- ENTRY(R9W) \
- ENTRY(R10W) \
- ENTRY(R11W) \
- ENTRY(R12W) \
- ENTRY(R13W) \
- ENTRY(R14W) \
- ENTRY(R15W)
-
-#define EA_BASES_32BIT \
- ENTRY(EAX) \
- ENTRY(ECX) \
- ENTRY(EDX) \
- ENTRY(EBX) \
- ENTRY(sib) \
- ENTRY(EBP) \
- ENTRY(ESI) \
- ENTRY(EDI) \
- ENTRY(R8D) \
- ENTRY(R9D) \
- ENTRY(R10D) \
- ENTRY(R11D) \
- ENTRY(R12D) \
- ENTRY(R13D) \
- ENTRY(R14D) \
- ENTRY(R15D)
-
-#define REGS_32BIT \
- ENTRY(EAX) \
- ENTRY(ECX) \
- ENTRY(EDX) \
- ENTRY(EBX) \
- ENTRY(ESP) \
- ENTRY(EBP) \
- ENTRY(ESI) \
- ENTRY(EDI) \
- ENTRY(R8D) \
- ENTRY(R9D) \
- ENTRY(R10D) \
- ENTRY(R11D) \
- ENTRY(R12D) \
- ENTRY(R13D) \
- ENTRY(R14D) \
- ENTRY(R15D)
-
-#define EA_BASES_64BIT \
- ENTRY(RAX) \
- ENTRY(RCX) \
- ENTRY(RDX) \
- ENTRY(RBX) \
- ENTRY(sib64) \
- ENTRY(RBP) \
- ENTRY(RSI) \
- ENTRY(RDI) \
- ENTRY(R8) \
- ENTRY(R9) \
- ENTRY(R10) \
- ENTRY(R11) \
- ENTRY(R12) \
- ENTRY(R13) \
- ENTRY(R14) \
- ENTRY(R15)
-
-#define REGS_64BIT \
- ENTRY(RAX) \
- ENTRY(RCX) \
- ENTRY(RDX) \
- ENTRY(RBX) \
- ENTRY(RSP) \
- ENTRY(RBP) \
- ENTRY(RSI) \
- ENTRY(RDI) \
- ENTRY(R8) \
- ENTRY(R9) \
- ENTRY(R10) \
- ENTRY(R11) \
- ENTRY(R12) \
- ENTRY(R13) \
- ENTRY(R14) \
- ENTRY(R15)
-
-#define REGS_MMX \
- ENTRY(MM0) \
- ENTRY(MM1) \
- ENTRY(MM2) \
- ENTRY(MM3) \
- ENTRY(MM4) \
- ENTRY(MM5) \
- ENTRY(MM6) \
- ENTRY(MM7)
-
-#define REGS_XMM \
- ENTRY(XMM0) \
- ENTRY(XMM1) \
- ENTRY(XMM2) \
- ENTRY(XMM3) \
- ENTRY(XMM4) \
- ENTRY(XMM5) \
- ENTRY(XMM6) \
- ENTRY(XMM7) \
- ENTRY(XMM8) \
- ENTRY(XMM9) \
- ENTRY(XMM10) \
- ENTRY(XMM11) \
- ENTRY(XMM12) \
- ENTRY(XMM13) \
- ENTRY(XMM14) \
- ENTRY(XMM15)
-
-#define REGS_SEGMENT \
- ENTRY(ES) \
- ENTRY(CS) \
- ENTRY(SS) \
- ENTRY(DS) \
- ENTRY(FS) \
- ENTRY(GS)
-
-#define REGS_DEBUG \
- ENTRY(DR0) \
- ENTRY(DR1) \
- ENTRY(DR2) \
- ENTRY(DR3) \
- ENTRY(DR4) \
- ENTRY(DR5) \
- ENTRY(DR6) \
- ENTRY(DR7)
-
-#define REGS_CONTROL_32BIT \
- ENTRY(ECR0) \
- ENTRY(ECR1) \
- ENTRY(ECR2) \
- ENTRY(ECR3) \
- ENTRY(ECR4) \
- ENTRY(ECR5) \
- ENTRY(ECR6) \
- ENTRY(ECR7)
-
-#define REGS_CONTROL_64BIT \
- ENTRY(RCR0) \
- ENTRY(RCR1) \
- ENTRY(RCR2) \
- ENTRY(RCR3) \
- ENTRY(RCR4) \
- ENTRY(RCR5) \
- ENTRY(RCR6) \
- ENTRY(RCR7) \
- ENTRY(RCR8)
-
-#define ALL_EA_BASES \
- EA_BASES_16BIT \
- EA_BASES_32BIT \
- EA_BASES_64BIT
-
-#define ALL_SIB_BASES \
- REGS_32BIT \
- REGS_64BIT
-
-#define ALL_REGS \
- REGS_8BIT \
- REGS_16BIT \
- REGS_32BIT \
- REGS_64BIT \
- REGS_MMX \
- REGS_XMM \
- REGS_SEGMENT \
- REGS_DEBUG \
- REGS_CONTROL_32BIT \
- REGS_CONTROL_64BIT \
- ENTRY(RIP)
-
-/*
- * EABase - All possible values of the base field for effective-address
- * computations, a.k.a. the Mod and R/M fields of the ModR/M byte. We
- * distinguish between bases (EA_BASE_*) and registers that just happen to be
- * referred to when Mod == 0b11 (EA_REG_*).
- */
-typedef enum {
- EA_BASE_NONE,
-#define ENTRY(x) EA_BASE_##x,
- ALL_EA_BASES
-#undef ENTRY
-#define ENTRY(x) EA_REG_##x,
- ALL_REGS
-#undef ENTRY
- EA_max
-} EABase;
-
-/*
- * SIBIndex - All possible values of the SIB index field.
- * Borrows entries from ALL_EA_BASES with the special case that
- * sib is synonymous with NONE.
- */
-typedef enum {
- SIB_INDEX_NONE,
-#define ENTRY(x) SIB_INDEX_##x,
- ALL_EA_BASES
-#undef ENTRY
- SIB_INDEX_max
-} SIBIndex;
-
-/*
- * SIBBase - All possible values of the SIB base field.
- */
-typedef enum {
- SIB_BASE_NONE,
-#define ENTRY(x) SIB_BASE_##x,
- ALL_SIB_BASES
-#undef ENTRY
- SIB_BASE_max
-} SIBBase;
-
-/*
- * EADisplacement - Possible displacement types for effective-address
- * computations.
- */
-typedef enum {
- EA_DISP_NONE,
- EA_DISP_8,
- EA_DISP_16,
- EA_DISP_32
-} EADisplacement;
-
-/*
- * Reg - All possible values of the reg field in the ModR/M byte.
- */
-typedef enum {
-#define ENTRY(x) MODRM_REG_##x,
- ALL_REGS
-#undef ENTRY
- MODRM_REG_max
-} Reg;
-
-/*
- * SegmentOverride - All possible segment overrides.
- */
-typedef enum {
- SEG_OVERRIDE_NONE,
- SEG_OVERRIDE_CS,
- SEG_OVERRIDE_SS,
- SEG_OVERRIDE_DS,
- SEG_OVERRIDE_ES,
- SEG_OVERRIDE_FS,
- SEG_OVERRIDE_GS,
- SEG_OVERRIDE_max
-} SegmentOverride;
-
-typedef uint8_t BOOL;
-
-/*
- * byteReader_t - Type for the byte reader that the consumer must provide to
- * the decoder. Reads a single byte from the instruction's address space.
- * @param arg - A baton that the consumer can associate with any internal
- * state that it needs.
- * @param byte - A pointer to a single byte in memory that should be set to
- * contain the value at address.
- * @param address - The address in the instruction's address space that should
- * be read from.
- * @return - -1 if the byte cannot be read for any reason; 0 otherwise.
- */
-typedef int (*byteReader_t)(void* arg, uint8_t* byte, uint64_t address);
-
-/*
- * dlog_t - Type for the logging function that the consumer can provide to
- * get debugging output from the decoder.
- * @param arg - A baton that the consumer can associate with any internal
- * state that it needs.
- * @param log - A string that contains the message. Will be reused after
- * the logger returns.
- */
-typedef void (*dlog_t)(void* arg, const char *log);
-
-/*
- * The x86 internal instruction, which is produced by the decoder.
- */
-struct InternalInstruction {
- /* Reader interface (C) */
- byteReader_t reader;
- /* Opaque value passed to the reader */
- void* readerArg;
- /* The address of the next byte to read via the reader */
- uint64_t readerCursor;
-
- /* Logger interface (C) */
- dlog_t dlog;
- /* Opaque value passed to the logger */
- void* dlogArg;
-
- /* General instruction information */
-
- /* The mode to disassemble for (64-bit, protected, real) */
- DisassemblerMode mode;
- /* The start of the instruction, usable with the reader */
- uint64_t startLocation;
- /* The length of the instruction, in bytes */
- size_t length;
-
- /* Prefix state */
-
- /* 1 if the prefix byte corresponding to the entry is present; 0 if not */
- uint8_t prefixPresent[0x100];
- /* contains the location (for use with the reader) of the prefix byte */
- uint64_t prefixLocations[0x100];
- /* The value of the REX prefix, if present */
- uint8_t rexPrefix;
- /* The location of the REX prefix */
- uint64_t rexLocation;
- /* The location where a mandatory prefix would have to be (i.e., right before
- the opcode, or right before the REX prefix if one is present) */
- uint64_t necessaryPrefixLocation;
- /* The segment override type */
- SegmentOverride segmentOverride;
-
- /* Sizes of various critical pieces of data */
- uint8_t registerSize;
- uint8_t addressSize;
- uint8_t displacementSize;
- uint8_t immediateSize;
-
- /* opcode state */
-
- /* The value of the two-byte escape prefix (usually 0x0f) */
- uint8_t twoByteEscape;
- /* The value of the three-byte escape prefix (usually 0x38 or 0x3a) */
- uint8_t threeByteEscape;
- /* The last byte of the opcode, not counting any ModR/M extension */
- uint8_t opcode;
- /* The ModR/M byte of the instruction, if it is an opcode extension */
- uint8_t modRMExtension;
-
- /* decode state */
-
- /* The type of opcode, used for indexing into the array of decode tables */
- OpcodeType opcodeType;
- /* The instruction ID, extracted from the decode table */
- uint16_t instructionID;
- /* The specifier for the instruction, from the instruction info table */
- struct InstructionSpecifier* spec;
-
- /* state for additional bytes, consumed during operand decode. Pattern:
- consumed___ indicates that the byte was already consumed and does not
- need to be consumed again */
-
- /* The ModR/M byte, which contains most register operands and some portion of
- all memory operands */
- BOOL consumedModRM;
- uint8_t modRM;
-
- /* The SIB byte, used for more complex 32- or 64-bit memory operands */
- BOOL consumedSIB;
- uint8_t sib;
-
- /* The displacement, used for memory operands */
- BOOL consumedDisplacement;
- int32_t displacement;
-
- /* Immediates. There can be two in some cases */
- uint8_t numImmediatesConsumed;
- uint8_t numImmediatesTranslated;
- uint64_t immediates[2];
-
- /* A register or immediate operand encoded into the opcode */
- BOOL consumedOpcodeModifier;
- uint8_t opcodeModifier;
- Reg opcodeRegister;
-
- /* Portions of the ModR/M byte */
-
- /* These fields determine the allowable values for the ModR/M fields, which
- depend on operand and address widths */
- EABase eaBaseBase;
- EABase eaRegBase;
- Reg regBase;
-
- /* The Mod and R/M fields can encode a base for an effective address, or a
- register. These are separated into two fields here */
- EABase eaBase;
- EADisplacement eaDisplacement;
- /* The reg field always encodes a register */
- Reg reg;
-
- /* SIB state */
- SIBIndex sibIndex;
- uint8_t sibScale;
- SIBBase sibBase;
-};
-
-/* decodeInstruction - Decode one instruction and store the decoding results in
- * a buffer provided by the consumer.
- * @param insn - The buffer to store the instruction in. Allocated by the
- * consumer.
- * @param reader - The byteReader_t for the bytes to be read.
- * @param readerArg - An argument to pass to the reader for storing context
- * specific to the consumer. May be NULL.
- * @param logger - The dlog_t to be used in printing status messages from the
- * disassembler. May be NULL.
- * @param loggerArg - An argument to pass to the logger for storing context
- * specific to the logger. May be NULL.
- * @param startLoc - The address (in the reader's address space) of the first
- * byte in the instruction.
- * @param mode - The mode (16-bit, 32-bit, 64-bit) to decode in.
- * @return - Nonzero if there was an error during decode, 0 otherwise.
- */
-int decodeInstruction(struct InternalInstruction* insn,
- byteReader_t reader,
- void* readerArg,
- dlog_t logger,
- void* loggerArg,
- uint64_t startLoc,
- DisassemblerMode mode);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
deleted file mode 100644
index c213f89..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h
+++ /dev/null
@@ -1,355 +0,0 @@
-/*===- X86DisassemblerDecoderCommon.h - Disassembler decoder -------*- C -*-==*
- *
- * The LLVM Compiler Infrastructure
- *
- * This file is distributed under the University of Illinois Open Source
- * License. See LICENSE.TXT for details.
- *
- *===----------------------------------------------------------------------===*
- *
- * This file is part of the X86 Disassembler.
- * It contains common definitions used by both the disassembler and the table
- * generator.
- * Documentation for the disassembler can be found in X86Disassembler.h.
- *
- *===----------------------------------------------------------------------===*/
-
-/*
- * This header file provides those definitions that need to be shared between
- * the decoder and the table generator in a C-friendly manner.
- */
-
-#ifndef X86DISASSEMBLERDECODERCOMMON_H
-#define X86DISASSEMBLERDECODERCOMMON_H
-
-#include "llvm/System/DataTypes.h"
-
-#define INSTRUCTIONS_SYM x86DisassemblerInstrSpecifiers
-#define CONTEXTS_SYM x86DisassemblerContexts
-#define ONEBYTE_SYM x86DisassemblerOneByteOpcodes
-#define TWOBYTE_SYM x86DisassemblerTwoByteOpcodes
-#define THREEBYTE38_SYM x86DisassemblerThreeByte38Opcodes
-#define THREEBYTE3A_SYM x86DisassemblerThreeByte3AOpcodes
-
-#define INSTRUCTIONS_STR "x86DisassemblerInstrSpecifiers"
-#define CONTEXTS_STR "x86DisassemblerContexts"
-#define ONEBYTE_STR "x86DisassemblerOneByteOpcodes"
-#define TWOBYTE_STR "x86DisassemblerTwoByteOpcodes"
-#define THREEBYTE38_STR "x86DisassemblerThreeByte38Opcodes"
-#define THREEBYTE3A_STR "x86DisassemblerThreeByte3AOpcodes"
-
-/*
- * Attributes of an instruction that must be known before the opcode can be
- * processed correctly. Most of these indicate the presence of particular
- * prefixes, but ATTR_64BIT is simply an attribute of the decoding context.
- */
-#define ATTRIBUTE_BITS \
- ENUM_ENTRY(ATTR_NONE, 0x00) \
- ENUM_ENTRY(ATTR_64BIT, 0x01) \
- ENUM_ENTRY(ATTR_XS, 0x02) \
- ENUM_ENTRY(ATTR_XD, 0x04) \
- ENUM_ENTRY(ATTR_REXW, 0x08) \
- ENUM_ENTRY(ATTR_OPSIZE, 0x10)
-
-#define ENUM_ENTRY(n, v) n = v,
-enum attributeBits {
- ATTRIBUTE_BITS
- ATTR_max
-};
-#undef ENUM_ENTRY
-
-/*
- * Combinations of the above attributes that are relevant to instruction
- * decode. Although other combinations are possible, they can be reduced to
- * these without affecting the ultimately decoded instruction.
- */
-
-/* Class name Rank Rationale for rank assignment */
-#define INSTRUCTION_CONTEXTS \
- ENUM_ENTRY(IC, 0, "says nothing about the instruction") \
- ENUM_ENTRY(IC_64BIT, 1, "says the instruction applies in " \
- "64-bit mode but no more") \
- ENUM_ENTRY(IC_OPSIZE, 3, "requires an OPSIZE prefix, so " \
- "operands change width") \
- ENUM_ENTRY(IC_XD, 2, "may say something about the opcode " \
- "but not the operands") \
- ENUM_ENTRY(IC_XS, 2, "may say something about the opcode " \
- "but not the operands") \
- ENUM_ENTRY(IC_64BIT_REXW, 4, "requires a REX.W prefix, so operands "\
- "change width; overrides IC_OPSIZE") \
- ENUM_ENTRY(IC_64BIT_OPSIZE, 3, "Just as meaningful as IC_OPSIZE") \
- ENUM_ENTRY(IC_64BIT_XD, 5, "XD instructions are SSE; REX.W is " \
- "secondary") \
- ENUM_ENTRY(IC_64BIT_XS, 5, "Just as meaningful as IC_64BIT_XD") \
- ENUM_ENTRY(IC_64BIT_REXW_XS, 6, "OPSIZE could mean a different " \
- "opcode") \
- ENUM_ENTRY(IC_64BIT_REXW_XD, 6, "Just as meaningful as " \
- "IC_64BIT_REXW_XS") \
- ENUM_ENTRY(IC_64BIT_REXW_OPSIZE, 7, "The Dynamic Duo! Prefer over all " \
- "else because this changes most " \
- "operands' meaning")
-
-#define ENUM_ENTRY(n, r, d) n,
-typedef enum {
- INSTRUCTION_CONTEXTS
- IC_max
-} InstructionContext;
-#undef ENUM_ENTRY
-
-/*
- * Opcode types, which determine which decode table to use, both in the Intel
- * manual and also for the decoder.
- */
-typedef enum {
- ONEBYTE = 0,
- TWOBYTE = 1,
- THREEBYTE_38 = 2,
- THREEBYTE_3A = 3
-} OpcodeType;
-
-/*
- * The following structs are used for the hierarchical decode table. After
- * determining the instruction's class (i.e., which IC_* constant applies to
- * it), the decoder reads the opcode. Some instructions require specific
- * values of the ModR/M byte, so the ModR/M byte indexes into the final table.
- *
- * If a ModR/M byte is not required, "required" is left unset, and the values
- * for each instructionID are identical.
- */
-
-typedef uint16_t InstrUID;
-
-/*
- * ModRMDecisionType - describes the type of ModR/M decision, allowing the
- * consumer to determine the number of entries in it.
- *
- * MODRM_ONEENTRY - No matter what the value of the ModR/M byte is, the decoded
- * instruction is the same.
- * MODRM_SPLITRM - If the ModR/M byte is between 0x00 and 0xbf, the opcode
- * corresponds to one instruction; otherwise, it corresponds to
- * a different instruction.
- * MODRM_FULL - Potentially, each value of the ModR/M byte could correspond
- * to a different instruction.
- */
-
-#define MODRMTYPES \
- ENUM_ENTRY(MODRM_ONEENTRY) \
- ENUM_ENTRY(MODRM_SPLITRM) \
- ENUM_ENTRY(MODRM_FULL)
-
-#define ENUM_ENTRY(n) n,
-typedef enum {
- MODRMTYPES
- MODRM_max
-} ModRMDecisionType;
-#undef ENUM_ENTRY
-
-/*
- * ModRMDecision - Specifies whether a ModR/M byte is needed and (if so) which
- * instruction each possible value of the ModR/M byte corresponds to. Once
- * this information is known, we have narrowed down to a single instruction.
- */
-struct ModRMDecision {
- uint8_t modrm_type;
-
- /* The macro below must be defined wherever this file is included. */
- INSTRUCTION_IDS
-};
-
-/*
- * OpcodeDecision - Specifies which set of ModR/M->instruction tables to look at
- * given a particular opcode.
- */
-struct OpcodeDecision {
- struct ModRMDecision modRMDecisions[256];
-};
-
-/*
- * ContextDecision - Specifies which opcode->instruction tables to look at given
- * a particular context (set of attributes). Since there are many possible
- * contexts, the decoder first uses CONTEXTS_SYM to determine which context
- * applies given a specific set of attributes. Hence there are only IC_max
- * entries in this table, rather than 2^(ATTR_max).
- */
-struct ContextDecision {
- struct OpcodeDecision opcodeDecisions[IC_max];
-};
-
-/*
- * Physical encodings of instruction operands.
- */
-
-#define ENCODINGS \
- ENUM_ENTRY(ENCODING_NONE, "") \
- ENUM_ENTRY(ENCODING_REG, "Register operand in ModR/M byte.") \
- ENUM_ENTRY(ENCODING_RM, "R/M operand in ModR/M byte.") \
- ENUM_ENTRY(ENCODING_CB, "1-byte code offset (possible new CS value)") \
- ENUM_ENTRY(ENCODING_CW, "2-byte") \
- ENUM_ENTRY(ENCODING_CD, "4-byte") \
- ENUM_ENTRY(ENCODING_CP, "6-byte") \
- ENUM_ENTRY(ENCODING_CO, "8-byte") \
- ENUM_ENTRY(ENCODING_CT, "10-byte") \
- ENUM_ENTRY(ENCODING_IB, "1-byte immediate") \
- ENUM_ENTRY(ENCODING_IW, "2-byte") \
- ENUM_ENTRY(ENCODING_ID, "4-byte") \
- ENUM_ENTRY(ENCODING_IO, "8-byte") \
- ENUM_ENTRY(ENCODING_RB, "(AL..DIL, R8L..R15L) Register code added to " \
- "the opcode byte") \
- ENUM_ENTRY(ENCODING_RW, "(AX..DI, R8W..R15W)") \
- ENUM_ENTRY(ENCODING_RD, "(EAX..EDI, R8D..R15D)") \
- ENUM_ENTRY(ENCODING_RO, "(RAX..RDI, R8..R15)") \
- ENUM_ENTRY(ENCODING_I, "Position on floating-point stack added to the " \
- "opcode byte") \
- \
- ENUM_ENTRY(ENCODING_Iv, "Immediate of operand size") \
- ENUM_ENTRY(ENCODING_Ia, "Immediate of address size") \
- ENUM_ENTRY(ENCODING_Rv, "Register code of operand size added to the " \
- "opcode byte") \
- ENUM_ENTRY(ENCODING_DUP, "Duplicate of another operand; ID is encoded " \
- "in type")
-
-#define ENUM_ENTRY(n, d) n,
- typedef enum {
- ENCODINGS
- ENCODING_max
- } OperandEncoding;
-#undef ENUM_ENTRY
-
-/*
- * Semantic interpretations of instruction operands.
- */
-
-#define TYPES \
- ENUM_ENTRY(TYPE_NONE, "") \
- ENUM_ENTRY(TYPE_REL8, "1-byte immediate address") \
- ENUM_ENTRY(TYPE_REL16, "2-byte") \
- ENUM_ENTRY(TYPE_REL32, "4-byte") \
- ENUM_ENTRY(TYPE_REL64, "8-byte") \
- ENUM_ENTRY(TYPE_PTR1616, "2+2-byte segment+offset address") \
- ENUM_ENTRY(TYPE_PTR1632, "2+4-byte") \
- ENUM_ENTRY(TYPE_PTR1664, "2+8-byte") \
- ENUM_ENTRY(TYPE_R8, "1-byte register operand") \
- ENUM_ENTRY(TYPE_R16, "2-byte") \
- ENUM_ENTRY(TYPE_R32, "4-byte") \
- ENUM_ENTRY(TYPE_R64, "8-byte") \
- ENUM_ENTRY(TYPE_IMM8, "1-byte immediate operand") \
- ENUM_ENTRY(TYPE_IMM16, "2-byte") \
- ENUM_ENTRY(TYPE_IMM32, "4-byte") \
- ENUM_ENTRY(TYPE_IMM64, "8-byte") \
- ENUM_ENTRY(TYPE_RM8, "1-byte register or memory operand") \
- ENUM_ENTRY(TYPE_RM16, "2-byte") \
- ENUM_ENTRY(TYPE_RM32, "4-byte") \
- ENUM_ENTRY(TYPE_RM64, "8-byte") \
- ENUM_ENTRY(TYPE_M, "Memory operand") \
- ENUM_ENTRY(TYPE_M8, "1-byte") \
- ENUM_ENTRY(TYPE_M16, "2-byte") \
- ENUM_ENTRY(TYPE_M32, "4-byte") \
- ENUM_ENTRY(TYPE_M64, "8-byte") \
- ENUM_ENTRY(TYPE_LEA, "Effective address") \
- ENUM_ENTRY(TYPE_M128, "16-byte (SSE/SSE2)") \
- ENUM_ENTRY(TYPE_M1616, "2+2-byte segment+offset address") \
- ENUM_ENTRY(TYPE_M1632, "2+4-byte") \
- ENUM_ENTRY(TYPE_M1664, "2+8-byte") \
- ENUM_ENTRY(TYPE_M16_32, "2+4-byte two-part memory operand (LIDT, LGDT)") \
- ENUM_ENTRY(TYPE_M16_16, "2+2-byte (BOUND)") \
- ENUM_ENTRY(TYPE_M32_32, "4+4-byte (BOUND)") \
- ENUM_ENTRY(TYPE_M16_64, "2+8-byte (LIDT, LGDT)") \
- ENUM_ENTRY(TYPE_MOFFS8, "1-byte memory offset (relative to segment " \
- "base)") \
- ENUM_ENTRY(TYPE_MOFFS16, "2-byte") \
- ENUM_ENTRY(TYPE_MOFFS32, "4-byte") \
- ENUM_ENTRY(TYPE_MOFFS64, "8-byte") \
- ENUM_ENTRY(TYPE_SREG, "Byte with single bit set: 0 = ES, 1 = CS, " \
- "2 = SS, 3 = DS, 4 = FS, 5 = GS") \
- ENUM_ENTRY(TYPE_M32FP, "32-bit IEE754 memory floating-point operand") \
- ENUM_ENTRY(TYPE_M64FP, "64-bit") \
- ENUM_ENTRY(TYPE_M80FP, "80-bit extended") \
- ENUM_ENTRY(TYPE_M16INT, "2-byte memory integer operand for use in " \
- "floating-point instructions") \
- ENUM_ENTRY(TYPE_M32INT, "4-byte") \
- ENUM_ENTRY(TYPE_M64INT, "8-byte") \
- ENUM_ENTRY(TYPE_ST, "Position on the floating-point stack") \
- ENUM_ENTRY(TYPE_MM, "MMX register operand") \
- ENUM_ENTRY(TYPE_MM32, "4-byte MMX register or memory operand") \
- ENUM_ENTRY(TYPE_MM64, "8-byte") \
- ENUM_ENTRY(TYPE_XMM, "XMM register operand") \
- ENUM_ENTRY(TYPE_XMM32, "4-byte XMM register or memory operand") \
- ENUM_ENTRY(TYPE_XMM64, "8-byte") \
- ENUM_ENTRY(TYPE_XMM128, "16-byte") \
- ENUM_ENTRY(TYPE_XMM0, "Implicit use of XMM0") \
- ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand") \
- ENUM_ENTRY(TYPE_DEBUGREG, "Debug register operand") \
- ENUM_ENTRY(TYPE_CR32, "4-byte control register operand") \
- ENUM_ENTRY(TYPE_CR64, "8-byte") \
- \
- ENUM_ENTRY(TYPE_Mv, "Memory operand of operand size") \
- ENUM_ENTRY(TYPE_Rv, "Register operand of operand size") \
- ENUM_ENTRY(TYPE_IMMv, "Immediate operand of operand size") \
- ENUM_ENTRY(TYPE_RELv, "Immediate address of operand size") \
- ENUM_ENTRY(TYPE_DUP0, "Duplicate of operand 0") \
- ENUM_ENTRY(TYPE_DUP1, "operand 1") \
- ENUM_ENTRY(TYPE_DUP2, "operand 2") \
- ENUM_ENTRY(TYPE_DUP3, "operand 3") \
- ENUM_ENTRY(TYPE_DUP4, "operand 4") \
- ENUM_ENTRY(TYPE_M512, "512-bit FPU/MMX/XMM/MXCSR state")
-
-#define ENUM_ENTRY(n, d) n,
-typedef enum {
- TYPES
- TYPE_max
-} OperandType;
-#undef ENUM_ENTRY
-
-/*
- * OperandSpecifier - The specification for how to extract and interpret one
- * operand.
- */
-struct OperandSpecifier {
- OperandEncoding encoding;
- OperandType type;
-};
-
-/*
- * Indicates where the opcode modifier (if any) is to be found. Extended
- * opcodes with AddRegFrm have the opcode modifier in the ModR/M byte.
- */
-
-#define MODIFIER_TYPES \
- ENUM_ENTRY(MODIFIER_NONE) \
- ENUM_ENTRY(MODIFIER_OPCODE) \
- ENUM_ENTRY(MODIFIER_MODRM)
-
-#define ENUM_ENTRY(n) n,
-typedef enum {
- MODIFIER_TYPES
- MODIFIER_max
-} ModifierType;
-#undef ENUM_ENTRY
-
-#define X86_MAX_OPERANDS 5
-
-/*
- * The specification for how to extract and interpret a full instruction and
- * its operands.
- */
-struct InstructionSpecifier {
- ModifierType modifierType;
- uint8_t modifierBase;
- struct OperandSpecifier operands[X86_MAX_OPERANDS];
-
- /* The macro below must be defined wherever this file is included. */
- INSTRUCTION_SPECIFIER_FIELDS
-};
-
-/*
- * Decoding mode for the Intel disassembler. 16-bit, 32-bit, and 64-bit mode
- * are supported, and represent real mode, IA-32e, and IA-32e in 64-bit mode,
- * respectively.
- */
-typedef enum {
- MODE_16BIT,
- MODE_32BIT,
- MODE_64BIT
-} DisassemblerMode;
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/README-FPStack.txt b/libclamav/c++/llvm/lib/Target/X86/README-FPStack.txt
index be28e8b..39efd2d 100644
--- a/libclamav/c++/llvm/lib/Target/X86/README-FPStack.txt
+++ b/libclamav/c++/llvm/lib/Target/X86/README-FPStack.txt
@@ -27,8 +27,8 @@ def FpIADD32m : FpI<(ops RFP:$dst, RFP:$src1, i32mem:$src2), OneArgFPRW,
//===---------------------------------------------------------------------===//
-The FP stackifier needs to be global. Also, it should handle simple permutates
-to reduce number of shuffle instructions, e.g. turning:
+The FP stackifier should handle simple permutates to reduce number of shuffle
+instructions, e.g. turning:
fld P -> fld Q
fld Q fld P
diff --git a/libclamav/c++/llvm/lib/Target/X86/README-SSE.txt b/libclamav/c++/llvm/lib/Target/X86/README-SSE.txt
index e5f84e8..f96b22f 100644
--- a/libclamav/c++/llvm/lib/Target/X86/README-SSE.txt
+++ b/libclamav/c++/llvm/lib/Target/X86/README-SSE.txt
@@ -2,8 +2,46 @@
// Random ideas for the X86 backend: SSE-specific stuff.
//===---------------------------------------------------------------------===//
-- Consider eliminating the unaligned SSE load intrinsics, replacing them with
- unaligned LLVM load instructions.
+//===---------------------------------------------------------------------===//
+
+SSE Variable shift can be custom lowered to something like this, which uses a
+small table + unaligned load + shuffle instead of going through memory.
+
+__m128i_shift_right:
+ .byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+ .byte -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+
+...
+__m128i shift_right(__m128i value, unsigned long offset) {
+ return _mm_shuffle_epi8(value,
+ _mm_loadu_si128((__m128 *) (___m128i_shift_right + offset)));
+}
+
+//===---------------------------------------------------------------------===//
+
+SSE has instructions for doing operations on complex numbers, we should pattern
+match them. Compiling this:
+
+_Complex float f32(_Complex float A, _Complex float B) {
+ return A+B;
+}
+
+into:
+
+_f32:
+ movdqa %xmm0, %xmm2
+ addss %xmm1, %xmm2
+ pshufd $16, %xmm2, %xmm2
+ pshufd $1, %xmm1, %xmm1
+ pshufd $1, %xmm0, %xmm0
+ addss %xmm1, %xmm0
+ pshufd $16, %xmm0, %xmm1
+ movdqa %xmm2, %xmm0
+ unpcklps %xmm1, %xmm0
+ ret
+
+seems silly.
+
//===---------------------------------------------------------------------===//
@@ -36,62 +74,6 @@ The pattern isel got this one right.
//===---------------------------------------------------------------------===//
-SSE doesn't have [mem] op= reg instructions. If we have an SSE instruction
-like this:
-
- X += y
-
-and the register allocator decides to spill X, it is cheaper to emit this as:
-
-Y += [xslot]
-store Y -> [xslot]
-
-than as:
-
-tmp = [xslot]
-tmp += y
-store tmp -> [xslot]
-
-..and this uses one fewer register (so this should be done at load folding
-time, not at spiller time). *Note* however that this can only be done
-if Y is dead. Here's a testcase:
-
- at .str_3 = external global [15 x i8]
-declare void @printf(i32, ...)
-define void @main() {
-build_tree.exit:
- br label %no_exit.i7
-
-no_exit.i7: ; preds = %no_exit.i7, %build_tree.exit
- %tmp.0.1.0.i9 = phi double [ 0.000000e+00, %build_tree.exit ],
- [ %tmp.34.i18, %no_exit.i7 ]
- %tmp.0.0.0.i10 = phi double [ 0.000000e+00, %build_tree.exit ],
- [ %tmp.28.i16, %no_exit.i7 ]
- %tmp.28.i16 = fadd double %tmp.0.0.0.i10, 0.000000e+00
- %tmp.34.i18 = fadd double %tmp.0.1.0.i9, 0.000000e+00
- br i1 false, label %Compute_Tree.exit23, label %no_exit.i7
-
-Compute_Tree.exit23: ; preds = %no_exit.i7
- tail call void (i32, ...)* @printf( i32 0 )
- store double %tmp.34.i18, double* null
- ret void
-}
-
-We currently emit:
-
-.BBmain_1:
- xorpd %XMM1, %XMM1
- addsd %XMM0, %XMM1
-*** movsd %XMM2, QWORD PTR [%ESP + 8]
-*** addsd %XMM2, %XMM1
-*** movsd QWORD PTR [%ESP + 8], %XMM2
- jmp .BBmain_1 # no_exit.i7
-
-This is a bugpoint reduced testcase, which is why the testcase doesn't make
-much sense (e.g. its an infinite loop). :)
-
-//===---------------------------------------------------------------------===//
-
SSE should implement 'select_cc' using 'emulated conditional moves' that use
pcmp/pand/pandn/por to do a selection instead of a conditional branch:
@@ -122,12 +104,6 @@ LBB_X_2:
//===---------------------------------------------------------------------===//
-It's not clear whether we should use pxor or xorps / xorpd to clear XMM
-registers. The choice may depend on subtarget information. We should do some
-more experiments on different x86 machines.
-
-//===---------------------------------------------------------------------===//
-
Lower memcpy / memset to a series of SSE 128 bit move instructions when it's
feasible.
@@ -151,45 +127,6 @@ Perhaps use pxor / xorp* to clear a XMM register first?
//===---------------------------------------------------------------------===//
-How to decide when to use the "floating point version" of logical ops? Here are
-some code fragments:
-
- movaps LCPI5_5, %xmm2
- divps %xmm1, %xmm2
- mulps %xmm2, %xmm3
- mulps 8656(%ecx), %xmm3
- addps 8672(%ecx), %xmm3
- andps LCPI5_6, %xmm2
- andps LCPI5_1, %xmm3
- por %xmm2, %xmm3
- movdqa %xmm3, (%edi)
-
- movaps LCPI5_5, %xmm1
- divps %xmm0, %xmm1
- mulps %xmm1, %xmm3
- mulps 8656(%ecx), %xmm3
- addps 8672(%ecx), %xmm3
- andps LCPI5_6, %xmm1
- andps LCPI5_1, %xmm3
- orps %xmm1, %xmm3
- movaps %xmm3, 112(%esp)
- movaps %xmm3, (%ebx)
-
-Due to some minor source change, the later case ended up using orps and movaps
-instead of por and movdqa. Does it matter?
-
-//===---------------------------------------------------------------------===//
-
-X86RegisterInfo::copyRegToReg() returns X86::MOVAPSrr for VR128. Is it possible
-to choose between movaps, movapd, and movdqa based on types of source and
-destination?
-
-How about andps, andpd, and pand? Do we really care about the type of the packed
-elements? If not, why not always use the "ps" variants which are likely to be
-shorter.
-
-//===---------------------------------------------------------------------===//
-
External test Nurbs exposed some problems. Look for
__ZN15Nurbs_SSE_Cubic17TessellateSurfaceE, bb cond_next140. This is what icc
emits:
@@ -278,41 +215,6 @@ It also exposes some other problems. See MOV32ri -3 and the spills.
//===---------------------------------------------------------------------===//
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=25500
-
-LLVM is producing bad code.
-
-LBB_main_4: # cond_true44
- addps %xmm1, %xmm2
- subps %xmm3, %xmm2
- movaps (%ecx), %xmm4
- movaps %xmm2, %xmm1
- addps %xmm4, %xmm1
- addl $16, %ecx
- incl %edx
- cmpl $262144, %edx
- movaps %xmm3, %xmm2
- movaps %xmm4, %xmm3
- jne LBB_main_4 # cond_true44
-
-There are two problems. 1) No need to two loop induction variables. We can
-compare against 262144 * 16. 2) Known register coalescer issue. We should
-be able eliminate one of the movaps:
-
- addps %xmm2, %xmm1 <=== Commute!
- subps %xmm3, %xmm1
- movaps (%ecx), %xmm4
- movaps %xmm1, %xmm1 <=== Eliminate!
- addps %xmm4, %xmm1
- addl $16, %ecx
- incl %edx
- cmpl $262144, %edx
- movaps %xmm3, %xmm2
- movaps %xmm4, %xmm3
- jne LBB_main_4 # cond_true44
-
-//===---------------------------------------------------------------------===//
-
Consider:
__m128 test(float a) {
@@ -382,22 +284,6 @@ elements are fixed zeros.
//===---------------------------------------------------------------------===//
-__m128d test1( __m128d A, __m128d B) {
- return _mm_shuffle_pd(A, B, 0x3);
-}
-
-compiles to
-
-shufpd $3, %xmm1, %xmm0
-
-Perhaps it's better to use unpckhpd instead?
-
-unpckhpd %xmm1, %xmm0
-
-Don't know if unpckhpd is faster. But it is shorter.
-
-//===---------------------------------------------------------------------===//
-
This code generates ugly code, probably due to costs being off or something:
define void @test(float* %P, <4 x float>* %P2 ) {
@@ -549,6 +435,7 @@ entry:
%tmp20 = tail call i64 @ccoshf( float %tmp6, float %z.0 ) nounwind readonly
ret i64 %tmp20
}
+declare i64 @ccoshf(float %z.0, float %z.1) nounwind readonly
This currently compiles to:
@@ -987,3 +874,34 @@ This would be better kept in the SSE unit by treating XMM0 as a 4xfloat and
doing a shuffle from v[1] to v[0] then a float store.
//===---------------------------------------------------------------------===//
+
+On SSE4 machines, we compile this code:
+
+define <2 x float> @test2(<2 x float> %Q, <2 x float> %R,
+ <2 x float> *%P) nounwind {
+ %Z = fadd <2 x float> %Q, %R
+
+ store <2 x float> %Z, <2 x float> *%P
+ ret <2 x float> %Z
+}
+
+into:
+
+_test2: ## @test2
+## BB#0:
+ insertps $0, %xmm2, %xmm2
+ insertps $16, %xmm3, %xmm2
+ insertps $0, %xmm0, %xmm3
+ insertps $16, %xmm1, %xmm3
+ addps %xmm2, %xmm3
+ movq %xmm3, (%rdi)
+ movaps %xmm3, %xmm0
+ pshufd $1, %xmm3, %xmm1
+ ## kill: XMM1<def> XMM1<kill>
+ ret
+
+The insertps's of $0 are pointless complex copies.
+
+//===---------------------------------------------------------------------===//
+
+
diff --git a/libclamav/c++/llvm/lib/Target/X86/README-X86-64.txt b/libclamav/c++/llvm/lib/Target/X86/README-X86-64.txt
index e8f7c5d..78c4dc0 100644
--- a/libclamav/c++/llvm/lib/Target/X86/README-X86-64.txt
+++ b/libclamav/c++/llvm/lib/Target/X86/README-X86-64.txt
@@ -1,27 +1,5 @@
//===- README_X86_64.txt - Notes for X86-64 code gen ----------------------===//
-Implement different PIC models? Right now we only support Mac OS X with small
-PIC code model.
-
-//===---------------------------------------------------------------------===//
-
-For this:
-
-extern void xx(void);
-void bar(void) {
- xx();
-}
-
-gcc compiles to:
-
-.globl _bar
-_bar:
- jmp _xx
-
-We need to do the tailcall optimization as well.
-
-//===---------------------------------------------------------------------===//
-
AMD64 Optimization Manual 8.2 has some nice information about optimizing integer
multiplication by a constant. How much of it applies to Intel's X86-64
implementation? There are definite trade-offs to consider: latency vs. register
@@ -96,123 +74,14 @@ gcc:
movq %rax, (%rdx)
ret
-//===---------------------------------------------------------------------===//
-
-Vararg function prologue can be further optimized. Currently all XMM registers
-are stored into register save area. Most of them can be eliminated since the
-upper bound of the number of XMM registers used are passed in %al. gcc produces
-something like the following:
-
- movzbl %al, %edx
- leaq 0(,%rdx,4), %rax
- leaq 4+L2(%rip), %rdx
- leaq 239(%rsp), %rax
- jmp *%rdx
- movaps %xmm7, -15(%rax)
- movaps %xmm6, -31(%rax)
- movaps %xmm5, -47(%rax)
- movaps %xmm4, -63(%rax)
- movaps %xmm3, -79(%rax)
- movaps %xmm2, -95(%rax)
- movaps %xmm1, -111(%rax)
- movaps %xmm0, -127(%rax)
-L2:
-
-It jumps over the movaps that do not need to be stored. Hard to see this being
-significant as it added 5 instruciton (including a indirect branch) to avoid
-executing 0 to 8 stores in the function prologue.
-
-Perhaps we can optimize for the common case where no XMM registers are used for
-parameter passing. i.e. is %al == 0 jump over all stores. Or in the case of a
-leaf function where we can determine that no XMM input parameter is need, avoid
-emitting the stores at all.
-
-//===---------------------------------------------------------------------===//
+And the codegen is even worse for the following
+(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33103):
+ void fill1(char *s, int a)
+ {
+ __builtin_memset(s, a, 15);
+ }
-AMD64 has a complex calling convention for aggregate passing by value:
-
-1. If the size of an object is larger than two eightbytes, or in C++, is a non-
- POD structure or union type, or contains unaligned fields, it has class
- MEMORY.
-2. Both eightbytes get initialized to class NO_CLASS.
-3. Each field of an object is classified recursively so that always two fields
- are considered. The resulting class is calculated according to the classes
- of the fields in the eightbyte:
- (a) If both classes are equal, this is the resulting class.
- (b) If one of the classes is NO_CLASS, the resulting class is the other
- class.
- (c) If one of the classes is MEMORY, the result is the MEMORY class.
- (d) If one of the classes is INTEGER, the result is the INTEGER.
- (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, MEMORY is used as
- class.
- (f) Otherwise class SSE is used.
-4. Then a post merger cleanup is done:
- (a) If one of the classes is MEMORY, the whole argument is passed in memory.
- (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
-
-Currently llvm frontend does not handle this correctly.
-
-Problem 1:
- typedef struct { int i; double d; } QuadWordS;
-It is currently passed in two i64 integer registers. However, gcc compiled
-callee expects the second element 'd' to be passed in XMM0.
-
-Problem 2:
- typedef struct { int32_t i; float j; double d; } QuadWordS;
-The size of the first two fields == i64 so they will be combined and passed in
-a integer register RDI. The third field is still passed in XMM0.
-
-Problem 3:
- typedef struct { int64_t i; int8_t j; int64_t d; } S;
- void test(S s)
-The size of this aggregate is greater than two i64 so it should be passed in
-memory. Currently llvm breaks this down and passed it in three integer
-registers.
-
-Problem 4:
-Taking problem 3 one step ahead where a function expects a aggregate value
-in memory followed by more parameter(s) passed in register(s).
- void test(S s, int b)
-
-LLVM IR does not allow parameter passing by aggregates, therefore it must break
-the aggregates value (in problem 3 and 4) into a number of scalar values:
- void %test(long %s.i, byte %s.j, long %s.d);
-
-However, if the backend were to lower this code literally it would pass the 3
-values in integer registers. To force it be passed in memory, the frontend
-should change the function signiture to:
- void %test(long %undef1, long %undef2, long %undef3, long %undef4,
- long %undef5, long %undef6,
- long %s.i, byte %s.j, long %s.d);
-And the callee would look something like this:
- call void %test( undef, undef, undef, undef, undef, undef,
- %tmp.s.i, %tmp.s.j, %tmp.s.d );
-The first 6 undef parameters would exhaust the 6 integer registers used for
-parameter passing. The following three integer values would then be forced into
-memory.
-
-For problem 4, the parameter 'd' would be moved to the front of the parameter
-list so it will be passed in register:
- void %test(int %d,
- long %undef1, long %undef2, long %undef3, long %undef4,
- long %undef5, long %undef6,
- long %s.i, byte %s.j, long %s.d);
-
-//===---------------------------------------------------------------------===//
-
-Right now the asm printer assumes GlobalAddress are accessed via RIP relative
-addressing. Therefore, it is not possible to generate this:
- movabsq $__ZTV10polynomialIdE+16, %rax
-
-That is ok for now since we currently only support small model. So the above
-is selected as
- leaq __ZTV10polynomialIdE+16(%rip), %rax
-
-This is probably slightly slower but is much shorter than movabsq. However, if
-we were to support medium or larger code models, we need to use the movabs
-instruction. We should probably introduce something like AbsoluteAddress to
-distinguish it from GlobalAddress so the asm printer and JIT code emitter can
-do the right thing.
+For this version, we duplicate the computation of the constant to store.
//===---------------------------------------------------------------------===//
@@ -298,3 +167,107 @@ be able to recognize the zero extend. This could also presumably be implemented
if we have whole-function selectiondags.
//===---------------------------------------------------------------------===//
+
+Take the following C code
+(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43640):
+
+struct u1
+{
+ float x;
+ float y;
+};
+
+float foo(struct u1 u)
+{
+ return u.x + u.y;
+}
+
+Optimizes to the following IR:
+define float @foo(double %u.0) nounwind readnone {
+entry:
+ %tmp8 = bitcast double %u.0 to i64 ; <i64> [#uses=2]
+ %tmp6 = trunc i64 %tmp8 to i32 ; <i32> [#uses=1]
+ %tmp7 = bitcast i32 %tmp6 to float ; <float> [#uses=1]
+ %tmp2 = lshr i64 %tmp8, 32 ; <i64> [#uses=1]
+ %tmp3 = trunc i64 %tmp2 to i32 ; <i32> [#uses=1]
+ %tmp4 = bitcast i32 %tmp3 to float ; <float> [#uses=1]
+ %0 = fadd float %tmp7, %tmp4 ; <float> [#uses=1]
+ ret float %0
+}
+
+And current llvm-gcc/clang output:
+ movd %xmm0, %rax
+ movd %eax, %xmm1
+ shrq $32, %rax
+ movd %eax, %xmm0
+ addss %xmm1, %xmm0
+ ret
+
+We really shouldn't move the floats to RAX, only to immediately move them
+straight back to the XMM registers.
+
+There really isn't any good way to handle this purely in IR optimizers; it
+could possibly be handled by changing the output of the fronted, though. It
+would also be feasible to add a x86-specific DAGCombine to optimize the
+bitcast+trunc+(lshr+)bitcast combination.
+
+//===---------------------------------------------------------------------===//
+
+Take the following code
+(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34653):
+extern unsigned long table[];
+unsigned long foo(unsigned char *p) {
+ unsigned long tag = *p;
+ return table[tag >> 4] + table[tag & 0xf];
+}
+
+Current code generated:
+ movzbl (%rdi), %eax
+ movq %rax, %rcx
+ andq $240, %rcx
+ shrq %rcx
+ andq $15, %rax
+ movq table(,%rax,8), %rax
+ addq table(%rcx), %rax
+ ret
+
+Issues:
+1. First movq should be movl; saves a byte.
+2. Both andq's should be andl; saves another two bytes. I think this was
+ implemented at one point, but subsequently regressed.
+3. shrq should be shrl; saves another byte.
+4. The first andq can be completely eliminated by using a slightly more
+ expensive addressing mode.
+
+//===---------------------------------------------------------------------===//
+
+Consider the following (contrived testcase, but contains common factors):
+
+#include <stdarg.h>
+int test(int x, ...) {
+ int sum, i;
+ va_list l;
+ va_start(l, x);
+ for (i = 0; i < x; i++)
+ sum += va_arg(l, int);
+ va_end(l);
+ return sum;
+}
+
+Testcase given in C because fixing it will likely involve changing the IR
+generated for it. The primary issue with the result is that it doesn't do any
+of the optimizations which are possible if we know the address of a va_list
+in the current function is never taken:
+1. We shouldn't spill the XMM registers because we only call va_arg with "int".
+2. It would be nice if we could scalarrepl the va_list.
+3. Probably overkill, but it'd be cool if we could peel off the first five
+iterations of the loop.
+
+Other optimizations involving functions which use va_arg on floats which don't
+have the address of a va_list taken:
+1. Conversely to the above, we shouldn't spill general registers if we only
+ call va_arg on "double".
+2. If we know nothing more than 64 bits wide is read from the XMM registers,
+ we can change the spilling code to reduce the amount of stack used by half.
+
+//===---------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/lib/Target/X86/README.txt b/libclamav/c++/llvm/lib/Target/X86/README.txt
index d4545a6..a305ae6 100644
--- a/libclamav/c++/llvm/lib/Target/X86/README.txt
+++ b/libclamav/c++/llvm/lib/Target/X86/README.txt
@@ -1103,57 +1103,6 @@ be folded into: shl [mem], 1
//===---------------------------------------------------------------------===//
-This testcase misses a read/modify/write opportunity (from PR1425):
-
-void vertical_decompose97iH1(int *b0, int *b1, int *b2, int width){
- int i;
- for(i=0; i<width; i++)
- b1[i] += (1*(b0[i] + b2[i])+0)>>0;
-}
-
-We compile it down to:
-
-LBB1_2: # bb
- movl (%esi,%edi,4), %ebx
- addl (%ecx,%edi,4), %ebx
- addl (%edx,%edi,4), %ebx
- movl %ebx, (%ecx,%edi,4)
- incl %edi
- cmpl %eax, %edi
- jne LBB1_2 # bb
-
-the inner loop should add to the memory location (%ecx,%edi,4), saving
-a mov. Something like:
-
- movl (%esi,%edi,4), %ebx
- addl (%edx,%edi,4), %ebx
- addl %ebx, (%ecx,%edi,4)
-
-Here is another interesting example:
-
-void vertical_compose97iH1(int *b0, int *b1, int *b2, int width){
- int i;
- for(i=0; i<width; i++)
- b1[i] -= (1*(b0[i] + b2[i])+0)>>0;
-}
-
-We miss the r/m/w opportunity here by using 2 subs instead of an add+sub[mem]:
-
-LBB9_2: # bb
- movl (%ecx,%edi,4), %ebx
- subl (%esi,%edi,4), %ebx
- subl (%edx,%edi,4), %ebx
- movl %ebx, (%ecx,%edi,4)
- incl %edi
- cmpl %eax, %edi
- jne LBB9_2 # bb
-
-Additionally, LSR should rewrite the exit condition of these loops to use
-a stride-4 IV, would would allow all the scales in the loop to go away.
-This would result in smaller code and more efficient microops.
-
-//===---------------------------------------------------------------------===//
-
In SSE mode, we turn abs and neg into a load from the constant pool plus a xor
or and instruction, for example:
@@ -1186,13 +1135,6 @@ void test(double *P) {
//===---------------------------------------------------------------------===//
-handling llvm.memory.barrier on pre SSE2 cpus
-
-should generate:
-lock ; mov %esp, %esp
-
-//===---------------------------------------------------------------------===//
-
The generated code on x86 for checking for signed overflow on a multiply the
obvious way is much longer than it needs to be.
@@ -1301,15 +1243,8 @@ FirstOnet:
xorl %eax, %eax
ret
-There are a few possible improvements here:
-1. We should be able to eliminate the dead load into %ecx
-2. We could change the "movl 8(%esp), %eax" into
- "movzwl 10(%esp), %eax"; this lets us change the cmpl
- into a testl, which is shorter, and eliminate the shift.
-
-We could also in theory eliminate the branch by using a conditional
-for the address of the load, but that seems unlikely to be worthwhile
-in general.
+We could change the "movl 8(%esp), %eax" into "movzwl 10(%esp), %eax"; this
+lets us change the cmpl into a testl, which is shorter, and eliminate the shift.
//===---------------------------------------------------------------------===//
@@ -1331,22 +1266,23 @@ bb7: ; preds = %entry
to:
-_foo:
+foo: # @foo
+# BB#0: # %entry
+ movl 4(%esp), %ecx
cmpb $0, 16(%esp)
- movl 12(%esp), %ecx
+ je .LBB0_2
+# BB#1: # %bb
movl 8(%esp), %eax
- movl 4(%esp), %edx
- je LBB1_2 # bb7
-LBB1_1: # bb
- addl %edx, %eax
+ addl %ecx, %eax
ret
-LBB1_2: # bb7
- movl %edx, %eax
- subl %ecx, %eax
+.LBB0_2: # %bb7
+ movl 12(%esp), %edx
+ movl %ecx, %eax
+ subl %edx, %eax
ret
-The coalescer could coalesce "edx" with "eax" to avoid the movl in LBB1_2
-if it commuted the addl in LBB1_1.
+There's an obviously unnecessary movl in .LBB0_2, and we could eliminate a
+couple more movls by putting 4(%esp) into %eax instead of %ecx.
//===---------------------------------------------------------------------===//
@@ -1396,8 +1332,7 @@ Also check why xmm7 is not used at all in the function.
//===---------------------------------------------------------------------===//
-Legalize loses track of the fact that bools are always zero extended when in
-memory. This causes us to compile abort_gzip (from 164.gzip) from:
+Take the following:
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
@@ -1416,16 +1351,15 @@ bb4.i: ; preds = %entry
}
declare void @exit(i32) noreturn nounwind
-into:
-
-_abort_gzip:
+This compiles into:
+_abort_gzip: ## @abort_gzip
+## BB#0: ## %entry
subl $12, %esp
movb _in_exit.4870.b, %al
- notb %al
- testb $1, %al
- jne LBB1_2 ## bb4.i
-LBB1_1: ## bb.i
- ...
+ cmpb $1, %al
+ jne LBB0_2
+
+We somehow miss folding the movb into the cmpb.
//===---------------------------------------------------------------------===//
@@ -1929,3 +1863,100 @@ The code produced by gcc is 3 bytes shorter. This sort of construct often
shows up with bitfields.
//===---------------------------------------------------------------------===//
+
+Take the following C code:
+int f(int a, int b) { return (unsigned char)a == (unsigned char)b; }
+
+We generate the following IR with clang:
+define i32 @f(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %tmp = xor i32 %b, %a ; <i32> [#uses=1]
+ %tmp6 = and i32 %tmp, 255 ; <i32> [#uses=1]
+ %cmp = icmp eq i32 %tmp6, 0 ; <i1> [#uses=1]
+ %conv5 = zext i1 %cmp to i32 ; <i32> [#uses=1]
+ ret i32 %conv5
+}
+
+And the following x86 code:
+ xorl %esi, %edi
+ testb $-1, %dil
+ sete %al
+ movzbl %al, %eax
+ ret
+
+A cmpb instead of the xorl+testb would be one instruction shorter.
+
+//===---------------------------------------------------------------------===//
+
+Given the following C code:
+int f(int a, int b) { return (signed char)a == (signed char)b; }
+
+We generate the following IR with clang:
+define i32 @f(i32 %a, i32 %b) nounwind readnone {
+entry:
+ %sext = shl i32 %a, 24 ; <i32> [#uses=1]
+ %conv1 = ashr i32 %sext, 24 ; <i32> [#uses=1]
+ %sext6 = shl i32 %b, 24 ; <i32> [#uses=1]
+ %conv4 = ashr i32 %sext6, 24 ; <i32> [#uses=1]
+ %cmp = icmp eq i32 %conv1, %conv4 ; <i1> [#uses=1]
+ %conv5 = zext i1 %cmp to i32 ; <i32> [#uses=1]
+ ret i32 %conv5
+}
+
+And the following x86 code:
+ movsbl %sil, %eax
+ movsbl %dil, %ecx
+ cmpl %eax, %ecx
+ sete %al
+ movzbl %al, %eax
+ ret
+
+
+It should be possible to eliminate the sign extensions.
+
+//===---------------------------------------------------------------------===//
+
+LLVM misses a load+store narrowing opportunity in this code:
+
+%struct.bf = type { i64, i16, i16, i32 }
+
+ at bfi = external global %struct.bf* ; <%struct.bf**> [#uses=2]
+
+define void @t1() nounwind ssp {
+entry:
+ %0 = load %struct.bf** @bfi, align 8 ; <%struct.bf*> [#uses=1]
+ %1 = getelementptr %struct.bf* %0, i64 0, i32 1 ; <i16*> [#uses=1]
+ %2 = bitcast i16* %1 to i32* ; <i32*> [#uses=2]
+ %3 = load i32* %2, align 1 ; <i32> [#uses=1]
+ %4 = and i32 %3, -65537 ; <i32> [#uses=1]
+ store i32 %4, i32* %2, align 1
+ %5 = load %struct.bf** @bfi, align 8 ; <%struct.bf*> [#uses=1]
+ %6 = getelementptr %struct.bf* %5, i64 0, i32 1 ; <i16*> [#uses=1]
+ %7 = bitcast i16* %6 to i32* ; <i32*> [#uses=2]
+ %8 = load i32* %7, align 1 ; <i32> [#uses=1]
+ %9 = and i32 %8, -131073 ; <i32> [#uses=1]
+ store i32 %9, i32* %7, align 1
+ ret void
+}
+
+LLVM currently emits this:
+
+ movq bfi(%rip), %rax
+ andl $-65537, 8(%rax)
+ movq bfi(%rip), %rax
+ andl $-131073, 8(%rax)
+ ret
+
+It could narrow the loads and stores to emit this:
+
+ movq bfi(%rip), %rax
+ andb $-2, 10(%rax)
+ movq bfi(%rip), %rax
+ andb $-3, 10(%rax)
+ ret
+
+The trouble is that there is a TokenFactor between the store and the
+load, making it non-trivial to determine if there's anything between
+the load and the store which would prohibit narrowing.
+
+//===---------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/lib/Target/X86/SSEDomainFix.cpp b/libclamav/c++/llvm/lib/Target/X86/SSEDomainFix.cpp
new file mode 100644
index 0000000..13680c5
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/X86/SSEDomainFix.cpp
@@ -0,0 +1,506 @@
+//===- SSEDomainFix.cpp - Use proper int/float domain for SSE ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the SSEDomainFix pass.
+//
+// Some SSE instructions like mov, and, or, xor are available in different
+// variants for different operand types. These variant instructions are
+// equivalent, but on Nehalem and newer cpus there is extra latency
+// transferring data between integer and floating point domains.
+//
+// This pass changes the variant instructions to minimize domain crossings.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "sse-domain-fix"
+#include "X86InstrInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+/// A DomainValue is a bit like LiveIntervals' ValNo, but it also keeps track
+/// of execution domains.
+///
+/// An open DomainValue represents a set of instructions that can still switch
+/// execution domain. Multiple registers may refer to the same open
+/// DomainValue - they will eventually be collapsed to the same execution
+/// domain.
+///
+/// A collapsed DomainValue represents a single register that has been forced
+/// into one of more execution domains. There is a separate collapsed
+/// DomainValue for each register, but it may contain multiple execution
+/// domains. A register value is initially created in a single execution
+/// domain, but if we were forced to pay the penalty of a domain crossing, we
+/// keep track of the fact the the register is now available in multiple
+/// domains.
+namespace {
+struct DomainValue {
+ // Basic reference counting.
+ unsigned Refs;
+
+ // Bitmask of available domains. For an open DomainValue, it is the still
+ // possible domains for collapsing. For a collapsed DomainValue it is the
+ // domains where the register is available for free.
+ unsigned AvailableDomains;
+
+ // Position of the last defining instruction.
+ unsigned Dist;
+
+ // Twiddleable instructions using or defining these registers.
+ SmallVector<MachineInstr*, 8> Instrs;
+
+ // A collapsed DomainValue has no instructions to twiddle - it simply keeps
+ // track of the domains where the registers are already available.
+ bool isCollapsed() const { return Instrs.empty(); }
+
+ // Is domain available?
+ bool hasDomain(unsigned domain) const {
+ return AvailableDomains & (1u << domain);
+ }
+
+ // Mark domain as available.
+ void addDomain(unsigned domain) {
+ AvailableDomains |= 1u << domain;
+ }
+
+ // Restrict to a single domain available.
+ void setSingleDomain(unsigned domain) {
+ AvailableDomains = 1u << domain;
+ }
+
+ // Return bitmask of domains that are available and in mask.
+ unsigned getCommonDomains(unsigned mask) const {
+ return AvailableDomains & mask;
+ }
+
+ // First domain available.
+ unsigned getFirstDomain() const {
+ return CountTrailingZeros_32(AvailableDomains);
+ }
+
+ DomainValue() { clear(); }
+
+ void clear() {
+ Refs = AvailableDomains = Dist = 0;
+ Instrs.clear();
+ }
+};
+}
+
+static const unsigned NumRegs = 16;
+
+namespace {
+class SSEDomainFixPass : public MachineFunctionPass {
+ static char ID;
+ SpecificBumpPtrAllocator<DomainValue> Allocator;
+ SmallVector<DomainValue*,16> Avail;
+
+ MachineFunction *MF;
+ const X86InstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineBasicBlock *MBB;
+ DomainValue **LiveRegs;
+ typedef DenseMap<MachineBasicBlock*,DomainValue**> LiveOutMap;
+ LiveOutMap LiveOuts;
+ unsigned Distance;
+
+public:
+ SSEDomainFixPass() : MachineFunctionPass(ID) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ virtual const char *getPassName() const {
+ return "SSE execution domain fixup";
+ }
+
+private:
+ // Register mapping.
+ int RegIndex(unsigned Reg);
+
+ // DomainValue allocation.
+ DomainValue *Alloc(int domain = -1);
+ void Recycle(DomainValue*);
+
+ // LiveRegs manipulations.
+ void SetLiveReg(int rx, DomainValue *DV);
+ void Kill(int rx);
+ void Force(int rx, unsigned domain);
+ void Collapse(DomainValue *dv, unsigned domain);
+ bool Merge(DomainValue *A, DomainValue *B);
+
+ void enterBasicBlock();
+ void visitGenericInstr(MachineInstr*);
+ void visitSoftInstr(MachineInstr*, unsigned mask);
+ void visitHardInstr(MachineInstr*, unsigned domain);
+};
+}
+
+char SSEDomainFixPass::ID = 0;
+
+/// Translate TRI register number to an index into our smaller tables of
+/// interesting registers. Return -1 for boring registers.
+int SSEDomainFixPass::RegIndex(unsigned reg) {
+ assert(X86::XMM15 == X86::XMM0+NumRegs-1 && "Unexpected sort");
+ reg -= X86::XMM0;
+ return reg < NumRegs ? (int) reg : -1;
+}
+
+DomainValue *SSEDomainFixPass::Alloc(int domain) {
+ DomainValue *dv = Avail.empty() ?
+ new(Allocator.Allocate()) DomainValue :
+ Avail.pop_back_val();
+ dv->Dist = Distance;
+ if (domain >= 0)
+ dv->addDomain(domain);
+ return dv;
+}
+
+void SSEDomainFixPass::Recycle(DomainValue *dv) {
+ assert(dv && "Cannot recycle NULL");
+ dv->clear();
+ Avail.push_back(dv);
+}
+
+/// Set LiveRegs[rx] = dv, updating reference counts.
+void SSEDomainFixPass::SetLiveReg(int rx, DomainValue *dv) {
+ assert(unsigned(rx) < NumRegs && "Invalid index");
+ if (!LiveRegs) {
+ LiveRegs = new DomainValue*[NumRegs];
+ std::fill(LiveRegs, LiveRegs+NumRegs, (DomainValue*)0);
+ }
+
+ if (LiveRegs[rx] == dv)
+ return;
+ if (LiveRegs[rx]) {
+ assert(LiveRegs[rx]->Refs && "Bad refcount");
+ if (--LiveRegs[rx]->Refs == 0) Recycle(LiveRegs[rx]);
+ }
+ LiveRegs[rx] = dv;
+ if (dv) ++dv->Refs;
+}
+
+// Kill register rx, recycle or collapse any DomainValue.
+void SSEDomainFixPass::Kill(int rx) {
+ assert(unsigned(rx) < NumRegs && "Invalid index");
+ if (!LiveRegs || !LiveRegs[rx]) return;
+
+ // Before killing the last reference to an open DomainValue, collapse it to
+ // the first available domain.
+ if (LiveRegs[rx]->Refs == 1 && !LiveRegs[rx]->isCollapsed())
+ Collapse(LiveRegs[rx], LiveRegs[rx]->getFirstDomain());
+ else
+ SetLiveReg(rx, 0);
+}
+
+/// Force register rx into domain.
+void SSEDomainFixPass::Force(int rx, unsigned domain) {
+ assert(unsigned(rx) < NumRegs && "Invalid index");
+ DomainValue *dv;
+ if (LiveRegs && (dv = LiveRegs[rx])) {
+ if (dv->isCollapsed())
+ dv->addDomain(domain);
+ else if (dv->hasDomain(domain))
+ Collapse(dv, domain);
+ else {
+ // This is an incompatible open DomainValue. Collapse it to whatever and force
+ // the new value into domain. This costs a domain crossing.
+ Collapse(dv, dv->getFirstDomain());
+ assert(LiveRegs[rx] && "Not live after collapse?");
+ LiveRegs[rx]->addDomain(domain);
+ }
+ } else {
+ // Set up basic collapsed DomainValue.
+ SetLiveReg(rx, Alloc(domain));
+ }
+}
+
+/// Collapse open DomainValue into given domain. If there are multiple
+/// registers using dv, they each get a unique collapsed DomainValue.
+void SSEDomainFixPass::Collapse(DomainValue *dv, unsigned domain) {
+ assert(dv->hasDomain(domain) && "Cannot collapse");
+
+ // Collapse all the instructions.
+ while (!dv->Instrs.empty())
+ TII->SetSSEDomain(dv->Instrs.pop_back_val(), domain);
+ dv->setSingleDomain(domain);
+
+ // If there are multiple users, give them new, unique DomainValues.
+ if (LiveRegs && dv->Refs > 1)
+ for (unsigned rx = 0; rx != NumRegs; ++rx)
+ if (LiveRegs[rx] == dv)
+ SetLiveReg(rx, Alloc(domain));
+}
+
+/// Merge - All instructions and registers in B are moved to A, and B is
+/// released.
+bool SSEDomainFixPass::Merge(DomainValue *A, DomainValue *B) {
+ assert(!A->isCollapsed() && "Cannot merge into collapsed");
+ assert(!B->isCollapsed() && "Cannot merge from collapsed");
+ if (A == B)
+ return true;
+ // Restrict to the domains that A and B have in common.
+ unsigned common = A->getCommonDomains(B->AvailableDomains);
+ if (!common)
+ return false;
+ A->AvailableDomains = common;
+ A->Dist = std::max(A->Dist, B->Dist);
+ A->Instrs.append(B->Instrs.begin(), B->Instrs.end());
+ for (unsigned rx = 0; rx != NumRegs; ++rx)
+ if (LiveRegs[rx] == B)
+ SetLiveReg(rx, A);
+ return true;
+}
+
+void SSEDomainFixPass::enterBasicBlock() {
+ // Try to coalesce live-out registers from predecessors.
+ for (MachineBasicBlock::livein_iterator i = MBB->livein_begin(),
+ e = MBB->livein_end(); i != e; ++i) {
+ int rx = RegIndex(*i);
+ if (rx < 0) continue;
+ for (MachineBasicBlock::const_pred_iterator pi = MBB->pred_begin(),
+ pe = MBB->pred_end(); pi != pe; ++pi) {
+ LiveOutMap::const_iterator fi = LiveOuts.find(*pi);
+ if (fi == LiveOuts.end()) continue;
+ DomainValue *pdv = fi->second[rx];
+ if (!pdv) continue;
+ if (!LiveRegs || !LiveRegs[rx]) {
+ SetLiveReg(rx, pdv);
+ continue;
+ }
+
+ // We have a live DomainValue from more than one predecessor.
+ if (LiveRegs[rx]->isCollapsed()) {
+ // We are already collapsed, but predecessor is not. Force him.
+ unsigned domain = LiveRegs[rx]->getFirstDomain();
+ if (!pdv->isCollapsed() && pdv->hasDomain(domain))
+ Collapse(pdv, domain);
+ continue;
+ }
+
+ // Currently open, merge in predecessor.
+ if (!pdv->isCollapsed())
+ Merge(LiveRegs[rx], pdv);
+ else
+ Force(rx, pdv->getFirstDomain());
+ }
+ }
+}
+
+// A hard instruction only works in one domain. All input registers will be
+// forced into that domain.
+void SSEDomainFixPass::visitHardInstr(MachineInstr *mi, unsigned domain) {
+ // Collapse all uses.
+ for (unsigned i = mi->getDesc().getNumDefs(),
+ e = mi->getDesc().getNumOperands(); i != e; ++i) {
+ MachineOperand &mo = mi->getOperand(i);
+ if (!mo.isReg()) continue;
+ int rx = RegIndex(mo.getReg());
+ if (rx < 0) continue;
+ Force(rx, domain);
+ }
+
+ // Kill all defs and force them.
+ for (unsigned i = 0, e = mi->getDesc().getNumDefs(); i != e; ++i) {
+ MachineOperand &mo = mi->getOperand(i);
+ if (!mo.isReg()) continue;
+ int rx = RegIndex(mo.getReg());
+ if (rx < 0) continue;
+ Kill(rx);
+ Force(rx, domain);
+ }
+}
+
+// A soft instruction can be changed to work in other domains given by mask.
+void SSEDomainFixPass::visitSoftInstr(MachineInstr *mi, unsigned mask) {
+ // Bitmask of available domains for this instruction after taking collapsed
+ // operands into account.
+ unsigned available = mask;
+
+ // Scan the explicit use operands for incoming domains.
+ SmallVector<int, 4> used;
+ if (LiveRegs)
+ for (unsigned i = mi->getDesc().getNumDefs(),
+ e = mi->getDesc().getNumOperands(); i != e; ++i) {
+ MachineOperand &mo = mi->getOperand(i);
+ if (!mo.isReg()) continue;
+ int rx = RegIndex(mo.getReg());
+ if (rx < 0) continue;
+ if (DomainValue *dv = LiveRegs[rx]) {
+ // Bitmask of domains that dv and available have in common.
+ unsigned common = dv->getCommonDomains(available);
+ // Is it possible to use this collapsed register for free?
+ if (dv->isCollapsed()) {
+ // Restrict available domains to the ones in common with the operand.
+ // If there are no common domains, we must pay the cross-domain
+ // penalty for this operand.
+ if (common) available = common;
+ } else if (common)
+ // Open DomainValue is compatible, save it for merging.
+ used.push_back(rx);
+ else
+ // Open DomainValue is not compatible with instruction. It is useless
+ // now.
+ Kill(rx);
+ }
+ }
+
+ // If the collapsed operands force a single domain, propagate the collapse.
+ if (isPowerOf2_32(available)) {
+ unsigned domain = CountTrailingZeros_32(available);
+ TII->SetSSEDomain(mi, domain);
+ visitHardInstr(mi, domain);
+ return;
+ }
+
+ // Kill off any remaining uses that don't match available, and build a list of
+ // incoming DomainValues that we want to merge.
+ SmallVector<DomainValue*,4> doms;
+ for (SmallVector<int, 4>::iterator i=used.begin(), e=used.end(); i!=e; ++i) {
+ int rx = *i;
+ DomainValue *dv = LiveRegs[rx];
+ // This useless DomainValue could have been missed above.
+ if (!dv->getCommonDomains(available)) {
+ Kill(*i);
+ continue;
+ }
+ // sorted, uniqued insert.
+ bool inserted = false;
+ for (SmallVector<DomainValue*,4>::iterator i = doms.begin(), e = doms.end();
+ i != e && !inserted; ++i) {
+ if (dv == *i)
+ inserted = true;
+ else if (dv->Dist < (*i)->Dist) {
+ inserted = true;
+ doms.insert(i, dv);
+ }
+ }
+ if (!inserted)
+ doms.push_back(dv);
+ }
+
+ // doms are now sorted in order of appearance. Try to merge them all, giving
+ // priority to the latest ones.
+ DomainValue *dv = 0;
+ while (!doms.empty()) {
+ if (!dv) {
+ dv = doms.pop_back_val();
+ continue;
+ }
+
+ DomainValue *latest = doms.pop_back_val();
+ if (Merge(dv, latest)) continue;
+
+ // If latest didn't merge, it is useless now. Kill all registers using it.
+ for (SmallVector<int,4>::iterator i=used.begin(), e=used.end(); i != e; ++i)
+ if (LiveRegs[*i] == latest)
+ Kill(*i);
+ }
+
+ // dv is the DomainValue we are going to use for this instruction.
+ if (!dv)
+ dv = Alloc();
+ dv->Dist = Distance;
+ dv->AvailableDomains = available;
+ dv->Instrs.push_back(mi);
+
+ // Finally set all defs and non-collapsed uses to dv.
+ for (unsigned i = 0, e = mi->getDesc().getNumOperands(); i != e; ++i) {
+ MachineOperand &mo = mi->getOperand(i);
+ if (!mo.isReg()) continue;
+ int rx = RegIndex(mo.getReg());
+ if (rx < 0) continue;
+ if (!LiveRegs || !LiveRegs[rx] || (mo.isDef() && LiveRegs[rx]!=dv)) {
+ Kill(rx);
+ SetLiveReg(rx, dv);
+ }
+ }
+}
+
+void SSEDomainFixPass::visitGenericInstr(MachineInstr *mi) {
+ // Process explicit defs, kill any XMM registers redefined.
+ for (unsigned i = 0, e = mi->getDesc().getNumDefs(); i != e; ++i) {
+ MachineOperand &mo = mi->getOperand(i);
+ if (!mo.isReg()) continue;
+ int rx = RegIndex(mo.getReg());
+ if (rx < 0) continue;
+ Kill(rx);
+ }
+}
+
+bool SSEDomainFixPass::runOnMachineFunction(MachineFunction &mf) {
+ MF = &mf;
+ TII = static_cast<const X86InstrInfo*>(MF->getTarget().getInstrInfo());
+ TRI = MF->getTarget().getRegisterInfo();
+ MBB = 0;
+ LiveRegs = 0;
+ Distance = 0;
+ assert(NumRegs == X86::VR128RegClass.getNumRegs() && "Bad regclass");
+
+ // If no XMM registers are used in the function, we can skip it completely.
+ bool anyregs = false;
+ for (TargetRegisterClass::const_iterator I = X86::VR128RegClass.begin(),
+ E = X86::VR128RegClass.end(); I != E; ++I)
+ if (MF->getRegInfo().isPhysRegUsed(*I)) {
+ anyregs = true;
+ break;
+ }
+ if (!anyregs) return false;
+
+ MachineBasicBlock *Entry = MF->begin();
+ SmallPtrSet<MachineBasicBlock*, 16> Visited;
+ for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*, 16> >
+ DFI = df_ext_begin(Entry, Visited), DFE = df_ext_end(Entry, Visited);
+ DFI != DFE; ++DFI) {
+ MBB = *DFI;
+ enterBasicBlock();
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
+ ++I) {
+ MachineInstr *mi = I;
+ if (mi->isDebugValue()) continue;
+ ++Distance;
+ std::pair<uint16_t, uint16_t> domp = TII->GetSSEDomain(mi);
+ if (domp.first)
+ if (domp.second)
+ visitSoftInstr(mi, domp.second);
+ else
+ visitHardInstr(mi, domp.first);
+ else if (LiveRegs)
+ visitGenericInstr(mi);
+ }
+
+ // Save live registers at end of MBB - used by enterBasicBlock().
+ if (LiveRegs)
+ LiveOuts.insert(std::make_pair(MBB, LiveRegs));
+ LiveRegs = 0;
+ }
+
+ // Clear the LiveOuts vectors. Should we also collapse any remaining
+ // DomainValues?
+ for (LiveOutMap::const_iterator i = LiveOuts.begin(), e = LiveOuts.end();
+ i != e; ++i)
+ delete[] i->second;
+ LiveOuts.clear();
+ Avail.clear();
+ Allocator.DestroyAll();
+
+ return false;
+}
+
+FunctionPass *llvm::createSSEDomainFixPass() {
+ return new SSEDomainFixPass();
+}
diff --git a/libclamav/c++/llvm/lib/Target/X86/TargetInfo/Makefile b/libclamav/c++/llvm/lib/Target/X86/TargetInfo/Makefile
index 9858e6a..ee91982 100644
--- a/libclamav/c++/llvm/lib/Target/X86/TargetInfo/Makefile
+++ b/libclamav/c++/llvm/lib/Target/X86/TargetInfo/Makefile
@@ -11,6 +11,6 @@ LEVEL = ../../../..
LIBRARYNAME = LLVMX86Info
# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86.h b/libclamav/c++/llvm/lib/Target/X86/X86.h
index 924424d..27e8850 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86.h
@@ -20,9 +20,7 @@
namespace llvm {
class FunctionPass;
-class MCContext;
class JITCodeEmitter;
-class MCAssembler;
class MCCodeEmitter;
class MCContext;
class MachineCodeEmitter;
@@ -37,16 +35,19 @@ class formatted_raw_ostream;
FunctionPass *createX86ISelDag(X86TargetMachine &TM,
CodeGenOpt::Level OptLevel);
+/// createGlobalBaseRegPass - This pass initializes a global base
+/// register for PIC on x86-32.
+FunctionPass* createGlobalBaseRegPass();
+
/// createX86FloatingPointStackifierPass - This function returns a pass which
/// converts floating point register references and pseudo instructions into
/// floating point stack references and physical instructions.
///
FunctionPass *createX86FloatingPointStackifierPass();
-/// createX87FPRegKillInserterPass - This function returns a pass which
-/// inserts FP_REG_KILL instructions where needed.
-///
-FunctionPass *createX87FPRegKillInserterPass();
+/// createSSEDomainFixPass - This pass twiddles SSE opcodes to prevent domain
+/// crossings.
+FunctionPass *createSSEDomainFixPass();
/// createX86CodeEmitterPass - Return a pass that emits the collected X86 code
/// to the specified MCE object.
@@ -58,8 +59,8 @@ MCCodeEmitter *createX86_32MCCodeEmitter(const Target &, TargetMachine &TM,
MCCodeEmitter *createX86_64MCCodeEmitter(const Target &, TargetMachine &TM,
MCContext &Ctx);
-TargetAsmBackend *createX86_32AsmBackend(const Target &, MCAssembler &);
-TargetAsmBackend *createX86_64AsmBackend(const Target &, MCAssembler &);
+TargetAsmBackend *createX86_32AsmBackend(const Target &, const std::string &);
+TargetAsmBackend *createX86_64AsmBackend(const Target &, const std::string &);
/// createX86EmitCodeToMemory - Returns a pass that converts a register
/// allocated function into raw machine code in a dynamically
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86.td b/libclamav/c++/llvm/lib/Target/X86/X86.td
index 7919559..a19f1ac 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86.td
@@ -55,14 +55,20 @@ def Feature3DNowA : SubtargetFeature<"3dnowa", "X863DNowLevel", "ThreeDNowA",
// feature, because SSE2 can be disabled (e.g. for compiling OS kernels)
// without disabling 64-bit mode.
def Feature64Bit : SubtargetFeature<"64bit", "HasX86_64", "true",
- "Support 64-bit instructions">;
+ "Support 64-bit instructions",
+ [FeatureCMOV]>;
def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
"Bit testing of memory is slow">;
+def FeatureFastUAMem : SubtargetFeature<"fast-unaligned-mem",
+ "IsUAMemFast", "true",
+ "Fast unaligned memory access">;
def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true",
"Support SSE 4a instructions">;
def FeatureAVX : SubtargetFeature<"avx", "HasAVX", "true",
"Enable AVX instructions">;
+def FeatureCLMUL : SubtargetFeature<"clmul", "HasCLMUL", "true",
+ "Enable carry-less multiplication instructions">;
def FeatureFMA3 : SubtargetFeature<"fma3", "HasFMA3", "true",
"Enable three-operand fused multiple-add">;
def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true",
@@ -70,6 +76,8 @@ def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true",
def FeatureVectorUAMem : SubtargetFeature<"vector-unaligned-mem",
"HasVectorUAMem", "true",
"Allow unaligned memory operands on vector/SIMD instructions">;
+def FeatureAES : SubtargetFeature<"aes", "HasAES", "true",
+ "Enable AES instructions">;
//===----------------------------------------------------------------------===//
// X86 processors supported.
@@ -97,9 +105,17 @@ def : Proc<"nocona", [FeatureSSE3, Feature64Bit, FeatureSlowBTMem]>;
def : Proc<"core2", [FeatureSSSE3, Feature64Bit, FeatureSlowBTMem]>;
def : Proc<"penryn", [FeatureSSE41, Feature64Bit, FeatureSlowBTMem]>;
def : Proc<"atom", [FeatureSSE3, Feature64Bit, FeatureSlowBTMem]>;
-def : Proc<"corei7", [FeatureSSE42, Feature64Bit, FeatureSlowBTMem]>;
-def : Proc<"nehalem", [FeatureSSE42, Feature64Bit, FeatureSlowBTMem]>;
+// "Arrandale" along with corei3 and corei5
+def : Proc<"corei7", [FeatureSSE42, Feature64Bit, FeatureSlowBTMem,
+ FeatureFastUAMem, FeatureAES]>;
+def : Proc<"nehalem", [FeatureSSE42, Feature64Bit, FeatureSlowBTMem,
+ FeatureFastUAMem]>;
+// Westmere is a similar machine to nehalem with some additional features.
+// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
+def : Proc<"westmere", [FeatureSSE42, Feature64Bit, FeatureSlowBTMem,
+ FeatureFastUAMem, FeatureAES]>;
// Sandy Bridge does not have FMA
+// FIXME: Wikipedia says it does... it should have AES as well.
def : Proc<"sandybridge", [FeatureSSE42, FeatureAVX, Feature64Bit]>;
def : Proc<"k6", [FeatureMMX]>;
@@ -150,31 +166,7 @@ include "X86RegisterInfo.td"
include "X86InstrInfo.td"
-def X86InstrInfo : InstrInfo {
-
- // Define how we want to layout our TargetSpecific information field... This
- // should be kept up-to-date with the fields in the X86InstrInfo.h file.
- let TSFlagsFields = ["FormBits",
- "hasOpSizePrefix",
- "hasAdSizePrefix",
- "Prefix",
- "hasREX_WPrefix",
- "ImmTypeBits",
- "FPFormBits",
- "hasLockPrefix",
- "SegOvrBits",
- "Opcode"];
- let TSFlagsShifts = [0,
- 6,
- 7,
- 8,
- 12,
- 13,
- 16,
- 19,
- 20,
- 24];
-}
+def X86InstrInfo : InstrInfo;
//===----------------------------------------------------------------------===//
// Calling Conventions
@@ -189,7 +181,7 @@ include "X86CallingConv.td"
// Currently the X86 assembly parser only supports ATT syntax.
def ATTAsmParser : AsmParser {
- string AsmParserClassName = "ATTAsmParser";
+ string AsmParserClassName = "ATTAsmParser";
int Variant = 0;
// Discard comments in assembly strings.
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86AsmBackend.cpp b/libclamav/c++/llvm/lib/Target/X86/X86AsmBackend.cpp
index e6654ef..69dc967 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86AsmBackend.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86AsmBackend.cpp
@@ -9,26 +9,335 @@
#include "llvm/Target/TargetAsmBackend.h"
#include "X86.h"
+#include "X86FixupKinds.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/ELFObjectWriter.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSectionCOFF.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MachObjectWriter.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/Target/TargetAsmBackend.h"
using namespace llvm;
-namespace {
+static unsigned getFixupKindLog2Size(unsigned Kind) {
+ switch (Kind) {
+ default: assert(0 && "invalid fixup kind!");
+ case X86::reloc_pcrel_1byte:
+ case FK_Data_1: return 0;
+ case X86::reloc_pcrel_2byte:
+ case FK_Data_2: return 1;
+ case X86::reloc_pcrel_4byte:
+ case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_movq_load:
+ case FK_Data_4: return 2;
+ case FK_Data_8: return 3;
+ }
+}
+
+namespace {
class X86AsmBackend : public TargetAsmBackend {
public:
- X86AsmBackend(const Target &T, MCAssembler &A)
+ X86AsmBackend(const Target &T)
: TargetAsmBackend(T) {}
+
+ void ApplyFixup(const MCFixup &Fixup, MCDataFragment &DF,
+ uint64_t Value) const {
+ unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
+
+ assert(Fixup.getOffset() + Size <= DF.getContents().size() &&
+ "Invalid fixup offset!");
+ for (unsigned i = 0; i != Size; ++i)
+ DF.getContents()[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
+ }
+
+ bool MayNeedRelaxation(const MCInst &Inst) const;
+
+ void RelaxInstruction(const MCInst &Inst, MCInst &Res) const;
+
+ bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
};
+} // end anonymous namespace
+
+static unsigned getRelaxedOpcode(unsigned Op) {
+ switch (Op) {
+ default:
+ return Op;
+ case X86::JAE_1: return X86::JAE_4;
+ case X86::JA_1: return X86::JA_4;
+ case X86::JBE_1: return X86::JBE_4;
+ case X86::JB_1: return X86::JB_4;
+ case X86::JE_1: return X86::JE_4;
+ case X86::JGE_1: return X86::JGE_4;
+ case X86::JG_1: return X86::JG_4;
+ case X86::JLE_1: return X86::JLE_4;
+ case X86::JL_1: return X86::JL_4;
+ case X86::JMP_1: return X86::JMP_4;
+ case X86::JNE_1: return X86::JNE_4;
+ case X86::JNO_1: return X86::JNO_4;
+ case X86::JNP_1: return X86::JNP_4;
+ case X86::JNS_1: return X86::JNS_4;
+ case X86::JO_1: return X86::JO_4;
+ case X86::JP_1: return X86::JP_4;
+ case X86::JS_1: return X86::JS_4;
+ }
}
+bool X86AsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
+ // Check if this instruction is ever relaxable.
+ if (getRelaxedOpcode(Inst.getOpcode()) == Inst.getOpcode())
+ return false;
+
+ // If so, just assume it can be relaxed. Once we support relaxing more complex
+ // instructions we should check that the instruction actually has symbolic
+ // operands before doing this, but we need to be careful about things like
+ // PCrel.
+ return true;
+}
+
+// FIXME: Can tblgen help at all here to verify there aren't other instructions
+// we can relax?
+void X86AsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
+ // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
+ unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
+
+ if (RelaxedOp == Inst.getOpcode()) {
+ SmallString<256> Tmp;
+ raw_svector_ostream OS(Tmp);
+ Inst.dump_pretty(OS);
+ OS << "\n";
+ report_fatal_error("unexpected instruction to relax: " + OS.str());
+ }
+
+ Res = Inst;
+ Res.setOpcode(RelaxedOp);
+}
+
+/// WriteNopData - Write optimal nops to the output file for the \arg Count
+/// bytes. This returns the number of bytes written. It may return 0 if
+/// the \arg Count is more than the maximum optimal nops.
+///
+/// FIXME this is X86 32-bit specific and should move to a better place.
+bool X86AsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
+ static const uint8_t Nops[16][16] = {
+ // nop
+ {0x90},
+ // xchg %ax,%ax
+ {0x66, 0x90},
+ // nopl (%[re]ax)
+ {0x0f, 0x1f, 0x00},
+ // nopl 0(%[re]ax)
+ {0x0f, 0x1f, 0x40, 0x00},
+ // nopl 0(%[re]ax,%[re]ax,1)
+ {0x0f, 0x1f, 0x44, 0x00, 0x00},
+ // nopw 0(%[re]ax,%[re]ax,1)
+ {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
+ // nopl 0L(%[re]ax)
+ {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
+ // nopl 0L(%[re]ax,%[re]ax,1)
+ {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+ // nopw 0L(%[re]ax,%[re]ax,1)
+ {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+ // nopw %cs:0L(%[re]ax,%[re]ax,1)
+ {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+ // nopl 0(%[re]ax,%[re]ax,1)
+ // nopw 0(%[re]ax,%[re]ax,1)
+ {0x0f, 0x1f, 0x44, 0x00, 0x00,
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
+ // nopw 0(%[re]ax,%[re]ax,1)
+ // nopw 0(%[re]ax,%[re]ax,1)
+ {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
+ // nopw 0(%[re]ax,%[re]ax,1)
+ // nopl 0L(%[re]ax) */
+ {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
+ 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
+ // nopl 0L(%[re]ax)
+ // nopl 0L(%[re]ax)
+ {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
+ // nopl 0L(%[re]ax)
+ // nopl 0L(%[re]ax,%[re]ax,1)
+ {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}
+ };
+
+ // Write an optimal sequence for the first 15 bytes.
+ uint64_t OptimalCount = (Count < 16) ? Count : 15;
+ for (uint64_t i = 0, e = OptimalCount; i != e; i++)
+ OW->Write8(Nops[OptimalCount - 1][i]);
+
+ // Finish with single byte nops.
+ for (uint64_t i = OptimalCount, e = Count; i != e; ++i)
+ OW->Write8(0x90);
+
+ return true;
+}
+
+/* *** */
+
+namespace {
+class ELFX86AsmBackend : public X86AsmBackend {
+public:
+ ELFX86AsmBackend(const Target &T)
+ : X86AsmBackend(T) {
+ HasAbsolutizedSet = true;
+ HasScatteredSymbols = true;
+ }
+
+ bool isVirtualSection(const MCSection &Section) const {
+ const MCSectionELF &SE = static_cast<const MCSectionELF&>(Section);
+ return SE.getType() == MCSectionELF::SHT_NOBITS;;
+ }
+};
+
+class ELFX86_32AsmBackend : public ELFX86AsmBackend {
+public:
+ ELFX86_32AsmBackend(const Target &T)
+ : ELFX86AsmBackend(T) {}
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ return new ELFObjectWriter(OS, /*Is64Bit=*/false,
+ /*IsLittleEndian=*/true,
+ /*HasRelocationAddend=*/false);
+ }
+};
+
+class ELFX86_64AsmBackend : public ELFX86AsmBackend {
+public:
+ ELFX86_64AsmBackend(const Target &T)
+ : ELFX86AsmBackend(T) {}
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ return new ELFObjectWriter(OS, /*Is64Bit=*/true,
+ /*IsLittleEndian=*/true,
+ /*HasRelocationAddend=*/true);
+ }
+};
+
+class WindowsX86AsmBackend : public X86AsmBackend {
+ bool Is64Bit;
+public:
+ WindowsX86AsmBackend(const Target &T, bool is64Bit)
+ : X86AsmBackend(T)
+ , Is64Bit(is64Bit) {
+ HasScatteredSymbols = true;
+ }
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ return createWinCOFFObjectWriter(OS, Is64Bit);
+ }
+
+ bool isVirtualSection(const MCSection &Section) const {
+ const MCSectionCOFF &SE = static_cast<const MCSectionCOFF&>(Section);
+ return SE.getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+ }
+};
+
+class DarwinX86AsmBackend : public X86AsmBackend {
+public:
+ DarwinX86AsmBackend(const Target &T)
+ : X86AsmBackend(T) {
+ HasAbsolutizedSet = true;
+ HasScatteredSymbols = true;
+ }
+
+ bool isVirtualSection(const MCSection &Section) const {
+ const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
+ return (SMO.getType() == MCSectionMachO::S_ZEROFILL ||
+ SMO.getType() == MCSectionMachO::S_GB_ZEROFILL ||
+ SMO.getType() == MCSectionMachO::S_THREAD_LOCAL_ZEROFILL);
+ }
+};
+
+class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
+public:
+ DarwinX86_32AsmBackend(const Target &T)
+ : DarwinX86AsmBackend(T) {}
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ return new MachObjectWriter(OS, /*Is64Bit=*/false);
+ }
+};
+
+class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
+public:
+ DarwinX86_64AsmBackend(const Target &T)
+ : DarwinX86AsmBackend(T) {
+ HasReliableSymbolDifference = true;
+ }
+
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
+ return new MachObjectWriter(OS, /*Is64Bit=*/true);
+ }
+
+ virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
+ // Temporary labels in the string literals sections require symbols. The
+ // issue is that the x86_64 relocation format does not allow symbol +
+ // offset, and so the linker does not have enough information to resolve the
+ // access to the appropriate atom unless an external relocation is used. For
+ // non-cstring sections, we expect the compiler to use a non-temporary label
+ // for anything that could have an addend pointing outside the symbol.
+ //
+ // See <rdar://problem/4765733>.
+ const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
+ return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS;
+ }
+
+ virtual bool isSectionAtomizable(const MCSection &Section) const {
+ const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
+ // Fixed sized data sections are uniqued, they cannot be diced into atoms.
+ switch (SMO.getType()) {
+ default:
+ return true;
+
+ case MCSectionMachO::S_4BYTE_LITERALS:
+ case MCSectionMachO::S_8BYTE_LITERALS:
+ case MCSectionMachO::S_16BYTE_LITERALS:
+ case MCSectionMachO::S_LITERAL_POINTERS:
+ case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS:
+ case MCSectionMachO::S_LAZY_SYMBOL_POINTERS:
+ case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS:
+ case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS:
+ case MCSectionMachO::S_INTERPOSING:
+ return false;
+ }
+ }
+};
+
+} // end anonymous namespace
+
TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
- MCAssembler &A) {
- return new X86AsmBackend(T, A);
+ const std::string &TT) {
+ switch (Triple(TT).getOS()) {
+ case Triple::Darwin:
+ return new DarwinX86_32AsmBackend(T);
+ case Triple::MinGW32:
+ case Triple::Cygwin:
+ case Triple::Win32:
+ return new WindowsX86AsmBackend(T, false);
+ default:
+ return new ELFX86_32AsmBackend(T);
+ }
}
TargetAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
- MCAssembler &A) {
- return new X86AsmBackend(T, A);
+ const std::string &TT) {
+ switch (Triple(TT).getOS()) {
+ case Triple::Darwin:
+ return new DarwinX86_64AsmBackend(T);
+ case Triple::MinGW64:
+ case Triple::Cygwin:
+ case Triple::Win32:
+ return new WindowsX86AsmBackend(T, true);
+ default:
+ return new ELFX86_64AsmBackend(T);
+ }
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86AsmPrinter.cpp b/libclamav/c++/llvm/lib/Target/X86/X86AsmPrinter.cpp
new file mode 100644
index 0000000..20110ad
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -0,0 +1,727 @@
+//===-- X86AsmPrinter.cpp - Convert X86 LLVM code to AT&T assembly --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to X86 machine code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86AsmPrinter.h"
+#include "AsmPrinter/X86ATTInstPrinter.h"
+#include "AsmPrinter/X86IntelInstPrinter.h"
+#include "X86MCInstLower.h"
+#include "X86.h"
+#include "X86COFFMachineModuleInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86TargetMachine.h"
+#include "llvm/CallingConv.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Type.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/Support/COFF.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/ADT/SmallString.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Primitive Helper Functions.
+//===----------------------------------------------------------------------===//
+
+void X86AsmPrinter::PrintPICBaseSymbol(raw_ostream &O) const {
+ const TargetLowering *TLI = TM.getTargetLowering();
+ O << *static_cast<const X86TargetLowering*>(TLI)->getPICBaseSymbol(MF,
+ OutContext);
+}
+
+/// runOnMachineFunction - Emit the function body.
+///
+bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+ SetupMachineFunction(MF);
+
+ if (Subtarget->isTargetCOFF()) {
+ bool Intrn = MF.getFunction()->hasInternalLinkage();
+ OutStreamer.BeginCOFFSymbolDef(CurrentFnSym);
+ OutStreamer.EmitCOFFSymbolStorageClass(Intrn ? COFF::IMAGE_SYM_CLASS_STATIC
+ : COFF::IMAGE_SYM_CLASS_EXTERNAL);
+ OutStreamer.EmitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
+ << COFF::SCT_COMPLEX_TYPE_SHIFT);
+ OutStreamer.EndCOFFSymbolDef();
+ }
+
+ // Have common code print out the function header with linkage info etc.
+ EmitFunctionHeader();
+
+ // Emit the rest of the function body.
+ EmitFunctionBody();
+
+ // We didn't modify anything.
+ return false;
+}
+
+/// printSymbolOperand - Print a raw symbol reference operand. This handles
+/// jump tables, constant pools, global address and external symbols, all of
+/// which print to a label with various suffixes for relocation types etc.
+void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
+ raw_ostream &O) {
+ switch (MO.getType()) {
+ default: llvm_unreachable("unknown symbol type!");
+ case MachineOperand::MO_JumpTableIndex:
+ O << *GetJTISymbol(MO.getIndex());
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ O << *GetCPISymbol(MO.getIndex());
+ printOffset(MO.getOffset(), O);
+ break;
+ case MachineOperand::MO_GlobalAddress: {
+ const GlobalValue *GV = MO.getGlobal();
+
+ MCSymbol *GVSym;
+ if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB)
+ GVSym = GetSymbolWithGlobalValueBase(GV, "$stub");
+ else if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
+ MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
+ MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
+ GVSym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ else
+ GVSym = Mang->getSymbol(GV);
+
+ // Handle dllimport linkage.
+ if (MO.getTargetFlags() == X86II::MO_DLLIMPORT)
+ GVSym = OutContext.GetOrCreateSymbol(Twine("__imp_") + GVSym->getName());
+
+ if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
+ MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE) {
+ MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(Sym);
+ if (StubSym.getPointer() == 0)
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ } else if (MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE){
+ MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ MMI->getObjFileInfo<MachineModuleInfoMachO>().getHiddenGVStubEntry(Sym);
+ if (StubSym.getPointer() == 0)
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ } else if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
+ MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$stub");
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
+ if (StubSym.getPointer() == 0)
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
+ }
+
+ // If the name begins with a dollar-sign, enclose it in parens. We do this
+ // to avoid having it look like an integer immediate to the assembler.
+ if (GVSym->getName()[0] != '$')
+ O << *GVSym;
+ else
+ O << '(' << *GVSym << ')';
+ printOffset(MO.getOffset(), O);
+ break;
+ }
+ case MachineOperand::MO_ExternalSymbol: {
+ const MCSymbol *SymToPrint;
+ if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
+ SmallString<128> TempNameStr;
+ TempNameStr += StringRef(MO.getSymbolName());
+ TempNameStr += StringRef("$stub");
+
+ MCSymbol *Sym = GetExternalSymbolSymbol(TempNameStr.str());
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
+ if (StubSym.getPointer() == 0) {
+ TempNameStr.erase(TempNameStr.end()-5, TempNameStr.end());
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(OutContext.GetOrCreateSymbol(TempNameStr.str()),
+ true);
+ }
+ SymToPrint = StubSym.getPointer();
+ } else {
+ SymToPrint = GetExternalSymbolSymbol(MO.getSymbolName());
+ }
+
+ // If the name begins with a dollar-sign, enclose it in parens. We do this
+ // to avoid having it look like an integer immediate to the assembler.
+ if (SymToPrint->getName()[0] != '$')
+ O << *SymToPrint;
+ else
+ O << '(' << *SymToPrint << '(';
+ break;
+ }
+ }
+
+ switch (MO.getTargetFlags()) {
+ default:
+ llvm_unreachable("Unknown target flag on GV operand");
+ case X86II::MO_NO_FLAG: // No flag.
+ break;
+ case X86II::MO_DARWIN_NONLAZY:
+ case X86II::MO_DLLIMPORT:
+ case X86II::MO_DARWIN_STUB:
+ // These affect the name of the symbol, not any suffix.
+ break;
+ case X86II::MO_GOT_ABSOLUTE_ADDRESS:
+ O << " + [.-";
+ PrintPICBaseSymbol(O);
+ O << ']';
+ break;
+ case X86II::MO_PIC_BASE_OFFSET:
+ case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
+ case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
+ O << '-';
+ PrintPICBaseSymbol(O);
+ break;
+ case X86II::MO_TLSGD: O << "@TLSGD"; break;
+ case X86II::MO_GOTTPOFF: O << "@GOTTPOFF"; break;
+ case X86II::MO_INDNTPOFF: O << "@INDNTPOFF"; break;
+ case X86II::MO_TPOFF: O << "@TPOFF"; break;
+ case X86II::MO_NTPOFF: O << "@NTPOFF"; break;
+ case X86II::MO_GOTPCREL: O << "@GOTPCREL"; break;
+ case X86II::MO_GOT: O << "@GOT"; break;
+ case X86II::MO_GOTOFF: O << "@GOTOFF"; break;
+ case X86II::MO_PLT: O << "@PLT"; break;
+ case X86II::MO_TLVP: O << "@TLVP"; break;
+ case X86II::MO_TLVP_PIC_BASE:
+ O << "@TLVP" << '-';
+ PrintPICBaseSymbol(O);
+ break;
+ }
+}
+
+/// print_pcrel_imm - This is used to print an immediate value that ends up
+/// being encoded as a pc-relative value. These print slightly differently, for
+/// example, a $ is not emitted.
+void X86AsmPrinter::print_pcrel_imm(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ switch (MO.getType()) {
+ default: llvm_unreachable("Unknown pcrel immediate operand");
+ case MachineOperand::MO_Register:
+ // pc-relativeness was handled when computing the value in the reg.
+ printOperand(MI, OpNo, O);
+ return;
+ case MachineOperand::MO_Immediate:
+ O << MO.getImm();
+ return;
+ case MachineOperand::MO_MachineBasicBlock:
+ O << *MO.getMBB()->getSymbol();
+ return;
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_ExternalSymbol:
+ printSymbolOperand(MO, O);
+ return;
+ }
+}
+
+
+void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
+ raw_ostream &O, const char *Modifier) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ switch (MO.getType()) {
+ default: llvm_unreachable("unknown operand type!");
+ case MachineOperand::MO_Register: {
+ O << '%';
+ unsigned Reg = MO.getReg();
+ if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) {
+ EVT VT = (strcmp(Modifier+6,"64") == 0) ?
+ MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 :
+ ((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8));
+ Reg = getX86SubSuperRegister(Reg, VT);
+ }
+ O << X86ATTInstPrinter::getRegisterName(Reg);
+ return;
+ }
+
+ case MachineOperand::MO_Immediate:
+ O << '$' << MO.getImm();
+ return;
+
+ case MachineOperand::MO_JumpTableIndex:
+ case MachineOperand::MO_ConstantPoolIndex:
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_ExternalSymbol: {
+ O << '$';
+ printSymbolOperand(MO, O);
+ break;
+ }
+ }
+}
+
+void X86AsmPrinter::printSSECC(const MachineInstr *MI, unsigned Op,
+ raw_ostream &O) {
+ unsigned char value = MI->getOperand(Op).getImm();
+ assert(value <= 7 && "Invalid ssecc argument!");
+ switch (value) {
+ case 0: O << "eq"; break;
+ case 1: O << "lt"; break;
+ case 2: O << "le"; break;
+ case 3: O << "unord"; break;
+ case 4: O << "neq"; break;
+ case 5: O << "nlt"; break;
+ case 6: O << "nle"; break;
+ case 7: O << "ord"; break;
+ }
+}
+
+void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
+ raw_ostream &O, const char *Modifier) {
+ const MachineOperand &BaseReg = MI->getOperand(Op);
+ const MachineOperand &IndexReg = MI->getOperand(Op+2);
+ const MachineOperand &DispSpec = MI->getOperand(Op+3);
+
+ // If we really don't want to print out (rip), don't.
+ bool HasBaseReg = BaseReg.getReg() != 0;
+ if (HasBaseReg && Modifier && !strcmp(Modifier, "no-rip") &&
+ BaseReg.getReg() == X86::RIP)
+ HasBaseReg = false;
+
+ // HasParenPart - True if we will print out the () part of the mem ref.
+ bool HasParenPart = IndexReg.getReg() || HasBaseReg;
+
+ if (DispSpec.isImm()) {
+ int DispVal = DispSpec.getImm();
+ if (DispVal || !HasParenPart)
+ O << DispVal;
+ } else {
+ assert(DispSpec.isGlobal() || DispSpec.isCPI() ||
+ DispSpec.isJTI() || DispSpec.isSymbol());
+ printSymbolOperand(MI->getOperand(Op+3), O);
+ }
+
+ if (HasParenPart) {
+ assert(IndexReg.getReg() != X86::ESP &&
+ "X86 doesn't allow scaling by ESP");
+
+ O << '(';
+ if (HasBaseReg)
+ printOperand(MI, Op, O, Modifier);
+
+ if (IndexReg.getReg()) {
+ O << ',';
+ printOperand(MI, Op+2, O, Modifier);
+ unsigned ScaleVal = MI->getOperand(Op+1).getImm();
+ if (ScaleVal != 1)
+ O << ',' << ScaleVal;
+ }
+ O << ')';
+ }
+}
+
+void X86AsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
+ raw_ostream &O, const char *Modifier) {
+ assert(isMem(MI, Op) && "Invalid memory reference!");
+ const MachineOperand &Segment = MI->getOperand(Op+4);
+ if (Segment.getReg()) {
+ printOperand(MI, Op+4, O, Modifier);
+ O << ':';
+ }
+ printLeaMemReference(MI, Op, O, Modifier);
+}
+
+void X86AsmPrinter::printPICLabel(const MachineInstr *MI, unsigned Op,
+ raw_ostream &O) {
+ PrintPICBaseSymbol(O);
+ O << '\n';
+ PrintPICBaseSymbol(O);
+ O << ':';
+}
+
+bool X86AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
+ raw_ostream &O) {
+ unsigned Reg = MO.getReg();
+ switch (Mode) {
+ default: return true; // Unknown mode.
+ case 'b': // Print QImode register
+ Reg = getX86SubSuperRegister(Reg, MVT::i8);
+ break;
+ case 'h': // Print QImode high register
+ Reg = getX86SubSuperRegister(Reg, MVT::i8, true);
+ break;
+ case 'w': // Print HImode register
+ Reg = getX86SubSuperRegister(Reg, MVT::i16);
+ break;
+ case 'k': // Print SImode register
+ Reg = getX86SubSuperRegister(Reg, MVT::i32);
+ break;
+ case 'q': // Print DImode register
+ Reg = getX86SubSuperRegister(Reg, MVT::i64);
+ break;
+ }
+
+ O << '%' << X86ATTInstPrinter::getRegisterName(Reg);
+ return false;
+}
+
+/// PrintAsmOperand - Print out an operand for an inline asm expression.
+///
+bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode, raw_ostream &O) {
+ // Does this asm operand have a single letter operand modifier?
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ const MachineOperand &MO = MI->getOperand(OpNo);
+
+ switch (ExtraCode[0]) {
+ default: return true; // Unknown modifier.
+ case 'a': // This is an address. Currently only 'i' and 'r' are expected.
+ if (MO.isImm()) {
+ O << MO.getImm();
+ return false;
+ }
+ if (MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isSymbol()) {
+ printSymbolOperand(MO, O);
+ if (Subtarget->isPICStyleRIPRel())
+ O << "(%rip)";
+ return false;
+ }
+ if (MO.isReg()) {
+ O << '(';
+ printOperand(MI, OpNo, O);
+ O << ')';
+ return false;
+ }
+ return true;
+
+ case 'c': // Don't print "$" before a global var name or constant.
+ if (MO.isImm())
+ O << MO.getImm();
+ else if (MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isSymbol())
+ printSymbolOperand(MO, O);
+ else
+ printOperand(MI, OpNo, O);
+ return false;
+
+ case 'A': // Print '*' before a register (it must be a register)
+ if (MO.isReg()) {
+ O << '*';
+ printOperand(MI, OpNo, O);
+ return false;
+ }
+ return true;
+
+ case 'b': // Print QImode register
+ case 'h': // Print QImode high register
+ case 'w': // Print HImode register
+ case 'k': // Print SImode register
+ case 'q': // Print DImode register
+ if (MO.isReg())
+ return printAsmMRegister(MO, ExtraCode[0], O);
+ printOperand(MI, OpNo, O);
+ return false;
+
+ case 'P': // This is the operand of a call, treat specially.
+ print_pcrel_imm(MI, OpNo, O);
+ return false;
+
+ case 'n': // Negate the immediate or print a '-' before the operand.
+ // Note: this is a temporary solution. It should be handled target
+ // independently as part of the 'MC' work.
+ if (MO.isImm()) {
+ O << -MO.getImm();
+ return false;
+ }
+ O << '-';
+ }
+ }
+
+ printOperand(MI, OpNo, O);
+ return false;
+}
+
+bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo, unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default: return true; // Unknown modifier.
+ case 'b': // Print QImode register
+ case 'h': // Print QImode high register
+ case 'w': // Print HImode register
+ case 'k': // Print SImode register
+ case 'q': // Print SImode register
+ // These only apply to registers, ignore on mem.
+ break;
+ case 'P': // Don't print @PLT, but do print as memory.
+ printMemReference(MI, OpNo, O, "no-rip");
+ return false;
+ }
+ }
+ printMemReference(MI, OpNo, O);
+ return false;
+}
+
+void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
+ if (Subtarget->isTargetDarwin())
+ OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
+}
+
+
+void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
+ if (Subtarget->isTargetDarwin()) {
+ // All darwin targets use mach-o.
+ MachineModuleInfoMachO &MMIMacho =
+ MMI->getObjFileInfo<MachineModuleInfoMachO>();
+
+ // Output stubs for dynamically-linked functions.
+ MachineModuleInfoMachO::SymbolListTy Stubs;
+
+ Stubs = MMIMacho.GetFnStubList();
+ if (!Stubs.empty()) {
+ const MCSection *TheSection =
+ OutContext.getMachOSection("__IMPORT", "__jump_table",
+ MCSectionMachO::S_SYMBOL_STUBS |
+ MCSectionMachO::S_ATTR_SELF_MODIFYING_CODE |
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ 5, SectionKind::getMetadata());
+ OutStreamer.SwitchSection(TheSection);
+
+ for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
+ // L_foo$stub:
+ OutStreamer.EmitLabel(Stubs[i].first);
+ // .indirect_symbol _foo
+ OutStreamer.EmitSymbolAttribute(Stubs[i].second.getPointer(),
+ MCSA_IndirectSymbol);
+ // hlt; hlt; hlt; hlt; hlt hlt = 0xf4 = -12.
+ const char HltInsts[] = { -12, -12, -12, -12, -12 };
+ OutStreamer.EmitBytes(StringRef(HltInsts, 5), 0/*addrspace*/);
+ }
+
+ Stubs.clear();
+ OutStreamer.AddBlankLine();
+ }
+
+ // Output stubs for external and common global variables.
+ Stubs = MMIMacho.GetGVStubList();
+ if (!Stubs.empty()) {
+ const MCSection *TheSection =
+ OutContext.getMachOSection("__IMPORT", "__pointers",
+ MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS,
+ SectionKind::getMetadata());
+ OutStreamer.SwitchSection(TheSection);
+
+ for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
+ // L_foo$non_lazy_ptr:
+ OutStreamer.EmitLabel(Stubs[i].first);
+ // .indirect_symbol _foo
+ MachineModuleInfoImpl::StubValueTy &MCSym = Stubs[i].second;
+ OutStreamer.EmitSymbolAttribute(MCSym.getPointer(),
+ MCSA_IndirectSymbol);
+ // .long 0
+ if (MCSym.getInt())
+ // External to current translation unit.
+ OutStreamer.EmitIntValue(0, 4/*size*/, 0/*addrspace*/);
+ else
+ // Internal to current translation unit.
+ //
+ // When we place the LSDA into the TEXT section, the type info
+ // pointers need to be indirect and pc-rel. We accomplish this by
+ // using NLPs. However, sometimes the types are local to the file. So
+ // we need to fill in the value for the NLP in those cases.
+ OutStreamer.EmitValue(MCSymbolRefExpr::Create(MCSym.getPointer(),
+ OutContext),
+ 4/*size*/, 0/*addrspace*/);
+ }
+ Stubs.clear();
+ OutStreamer.AddBlankLine();
+ }
+
+ Stubs = MMIMacho.GetHiddenGVStubList();
+ if (!Stubs.empty()) {
+ OutStreamer.SwitchSection(getObjFileLowering().getDataSection());
+ EmitAlignment(2);
+
+ for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
+ // L_foo$non_lazy_ptr:
+ OutStreamer.EmitLabel(Stubs[i].first);
+ // .long _foo
+ OutStreamer.EmitValue(MCSymbolRefExpr::
+ Create(Stubs[i].second.getPointer(),
+ OutContext),
+ 4/*size*/, 0/*addrspace*/);
+ }
+ Stubs.clear();
+ OutStreamer.AddBlankLine();
+ }
+
+ // Funny Darwin hack: This flag tells the linker that no global symbols
+ // contain code that falls through to other global symbols (e.g. the obvious
+ // implementation of multiple entry points). If this doesn't occur, the
+ // linker can safely perform dead code stripping. Since LLVM never
+ // generates code that does this, it is always safe to set.
+ OutStreamer.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
+ }
+
+ if (Subtarget->isTargetCOFF()) {
+ X86COFFMachineModuleInfo &COFFMMI =
+ MMI->getObjFileInfo<X86COFFMachineModuleInfo>();
+
+ // Emit type information for external functions
+ typedef X86COFFMachineModuleInfo::externals_iterator externals_iterator;
+ for (externals_iterator I = COFFMMI.externals_begin(),
+ E = COFFMMI.externals_end();
+ I != E; ++I) {
+ OutStreamer.BeginCOFFSymbolDef(CurrentFnSym);
+ OutStreamer.EmitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
+ OutStreamer.EmitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
+ << COFF::SCT_COMPLEX_TYPE_SHIFT);
+ OutStreamer.EndCOFFSymbolDef();
+ }
+
+ // Necessary for dllexport support
+ std::vector<const MCSymbol*> DLLExportedFns, DLLExportedGlobals;
+
+ const TargetLoweringObjectFileCOFF &TLOFCOFF =
+ static_cast<const TargetLoweringObjectFileCOFF&>(getObjFileLowering());
+
+ for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
+ if (I->hasDLLExportLinkage())
+ DLLExportedFns.push_back(Mang->getSymbol(I));
+
+ for (Module::const_global_iterator I = M.global_begin(),
+ E = M.global_end(); I != E; ++I)
+ if (I->hasDLLExportLinkage())
+ DLLExportedGlobals.push_back(Mang->getSymbol(I));
+
+ // Output linker support code for dllexported globals on windows.
+ if (!DLLExportedGlobals.empty() || !DLLExportedFns.empty()) {
+ OutStreamer.SwitchSection(TLOFCOFF.getDrectveSection());
+ SmallString<128> name;
+ for (unsigned i = 0, e = DLLExportedGlobals.size(); i != e; ++i) {
+ if (Subtarget->isTargetWindows())
+ name = " /EXPORT:";
+ else
+ name = " -export:";
+ name += DLLExportedGlobals[i]->getName();
+ if (Subtarget->isTargetWindows())
+ name += ",DATA";
+ else
+ name += ",data";
+ OutStreamer.EmitBytes(name, 0);
+ }
+
+ for (unsigned i = 0, e = DLLExportedFns.size(); i != e; ++i) {
+ if (Subtarget->isTargetWindows())
+ name = " /EXPORT:";
+ else
+ name = " -export:";
+ name += DLLExportedFns[i]->getName();
+ OutStreamer.EmitBytes(name, 0);
+ }
+ }
+ }
+
+ if (Subtarget->isTargetELF()) {
+ const TargetLoweringObjectFileELF &TLOFELF =
+ static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
+
+ MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
+
+ // Output stubs for external and common global variables.
+ MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
+ if (!Stubs.empty()) {
+ OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
+ const TargetData *TD = TM.getTargetData();
+
+ for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
+ OutStreamer.EmitLabel(Stubs[i].first);
+ OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(),
+ TD->getPointerSize(), 0);
+ }
+ Stubs.clear();
+ }
+ }
+}
+
+MachineLocation
+X86AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
+ MachineLocation Location;
+ assert (MI->getNumOperands() == 7 && "Invalid no. of machine operands!");
+ // Frame address. Currently handles register +- offset only.
+
+ if (MI->getOperand(0).isReg() && MI->getOperand(3).isImm())
+ Location.set(MI->getOperand(0).getReg(), MI->getOperand(3).getImm());
+ else {
+ DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n");
+ }
+ return Location;
+}
+
+void X86AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
+ raw_ostream &O) {
+ // Only the target-dependent form of DBG_VALUE should get here.
+ // Referencing the offset and metadata as NOps-2 and NOps-1 is
+ // probably portable to other targets; frame pointer location is not.
+ unsigned NOps = MI->getNumOperands();
+ assert(NOps==7);
+ O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
+ // cast away const; DIetc do not take const operands for some reason.
+ DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata()));
+ if (V.getContext().isSubprogram())
+ O << DISubprogram(V.getContext()).getDisplayName() << ":";
+ O << V.getName();
+ O << " <- ";
+ // Frame address. Currently handles register +- offset only.
+ O << '[';
+ if (MI->getOperand(0).isReg() && MI->getOperand(0).getReg())
+ printOperand(MI, 0, O);
+ else
+ O << "undef";
+ O << '+'; printOperand(MI, 3, O);
+ O << ']';
+ O << "+";
+ printOperand(MI, NOps-2, O);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Target Registry Stuff
+//===----------------------------------------------------------------------===//
+
+static MCInstPrinter *createX86MCInstPrinter(const Target &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI) {
+ if (SyntaxVariant == 0)
+ return new X86ATTInstPrinter(MAI);
+ if (SyntaxVariant == 1)
+ return new X86IntelInstPrinter(MAI);
+ return 0;
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeX86AsmPrinter() {
+ RegisterAsmPrinter<X86AsmPrinter> X(TheX86_32Target);
+ RegisterAsmPrinter<X86AsmPrinter> Y(TheX86_64Target);
+
+ TargetRegistry::RegisterMCInstPrinter(TheX86_32Target,createX86MCInstPrinter);
+ TargetRegistry::RegisterMCInstPrinter(TheX86_64Target,createX86MCInstPrinter);
+}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86AsmPrinter.h b/libclamav/c++/llvm/lib/Target/X86/X86AsmPrinter.h
new file mode 100644
index 0000000..e61be66
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/X86/X86AsmPrinter.h
@@ -0,0 +1,89 @@
+//===-- X86AsmPrinter.h - Convert X86 LLVM code to assembly -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AT&T assembly code printer class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86ASMPRINTER_H
+#define X86ASMPRINTER_H
+
+#include "X86.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86TargetMachine.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+
+class MachineJumpTableInfo;
+class MCContext;
+class MCInst;
+class MCStreamer;
+class MCSymbol;
+
+class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
+ const X86Subtarget *Subtarget;
+ public:
+ explicit X86AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer) {
+ Subtarget = &TM.getSubtarget<X86Subtarget>();
+ }
+
+ virtual const char *getPassName() const {
+ return "X86 AT&T-Style Assembly Printer";
+ }
+
+ const X86Subtarget &getSubtarget() const { return *Subtarget; }
+
+ virtual void EmitStartOfAsmFile(Module &M);
+
+ virtual void EmitEndOfAsmFile(Module &M);
+
+ virtual void EmitInstruction(const MachineInstr *MI);
+
+ void printSymbolOperand(const MachineOperand &MO, raw_ostream &O);
+
+ // These methods are used by the tablegen'erated instruction printer.
+ void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O,
+ const char *Modifier = 0);
+ void print_pcrel_imm(const MachineInstr *MI, unsigned OpNo, raw_ostream &O);
+
+ bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+
+ void printMachineInstruction(const MachineInstr *MI);
+ void printSSECC(const MachineInstr *MI, unsigned Op, raw_ostream &O);
+ void printMemReference(const MachineInstr *MI, unsigned Op, raw_ostream &O,
+ const char *Modifier=NULL);
+ void printLeaMemReference(const MachineInstr *MI, unsigned Op, raw_ostream &O,
+ const char *Modifier=NULL);
+
+ void printPICLabel(const MachineInstr *MI, unsigned Op, raw_ostream &O);
+
+ void PrintPICBaseSymbol(raw_ostream &O) const;
+
+ bool runOnMachineFunction(MachineFunction &F);
+
+ void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
+
+ MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86COFF.h b/libclamav/c++/llvm/lib/Target/X86/X86COFF.h
deleted file mode 100644
index 0a8e4e6..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/X86COFF.h
+++ /dev/null
@@ -1,95 +0,0 @@
-//===--- X86COFF.h - Some definitions from COFF documentations ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file just defines some symbols found in COFF documentation. They are
-// used to emit function type information for COFF targets (Cygwin/Mingw32).
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86COFF_H
-#define X86COFF_H
-
-namespace COFF
-{
-/// Storage class tells where and what the symbol represents
-enum StorageClass {
- C_EFCN = -1, ///< Physical end of function
- C_NULL = 0, ///< No symbol
- C_AUTO = 1, ///< External definition
- C_EXT = 2, ///< External symbol
- C_STAT = 3, ///< Static
- C_REG = 4, ///< Register variable
- C_EXTDEF = 5, ///< External definition
- C_LABEL = 6, ///< Label
- C_ULABEL = 7, ///< Undefined label
- C_MOS = 8, ///< Member of structure
- C_ARG = 9, ///< Function argument
- C_STRTAG = 10, ///< Structure tag
- C_MOU = 11, ///< Member of union
- C_UNTAG = 12, ///< Union tag
- C_TPDEF = 13, ///< Type definition
- C_USTATIC = 14, ///< Undefined static
- C_ENTAG = 15, ///< Enumeration tag
- C_MOE = 16, ///< Member of enumeration
- C_REGPARM = 17, ///< Register parameter
- C_FIELD = 18, ///< Bit field
-
- C_BLOCK = 100, ///< ".bb" or ".eb" - beginning or end of block
- C_FCN = 101, ///< ".bf" or ".ef" - beginning or end of function
- C_EOS = 102, ///< End of structure
- C_FILE = 103, ///< File name
- C_LINE = 104, ///< Line number, reformatted as symbol
- C_ALIAS = 105, ///< Duplicate tag
- C_HIDDEN = 106 ///< External symbol in dmert public lib
-};
-
-/// The type of the symbol. This is made up of a base type and a derived type.
-/// For example, pointer to int is "pointer to T" and "int"
-enum SymbolType {
- T_NULL = 0, ///< No type info
- T_ARG = 1, ///< Void function argument (only used by compiler)
- T_VOID = 1, ///< The same as above. Just named differently in some specs.
- T_CHAR = 2, ///< Character
- T_SHORT = 3, ///< Short integer
- T_INT = 4, ///< Integer
- T_LONG = 5, ///< Long integer
- T_FLOAT = 6, ///< Floating point
- T_DOUBLE = 7, ///< Double word
- T_STRUCT = 8, ///< Structure
- T_UNION = 9, ///< Union
- T_ENUM = 10, ///< Enumeration
- T_MOE = 11, ///< Member of enumeration
- T_UCHAR = 12, ///< Unsigned character
- T_USHORT = 13, ///< Unsigned short
- T_UINT = 14, ///< Unsigned integer
- T_ULONG = 15 ///< Unsigned long
-};
-
-/// Derived type of symbol
-enum SymbolDerivedType {
- DT_NON = 0, ///< No derived type
- DT_PTR = 1, ///< Pointer to T
- DT_FCN = 2, ///< Function returning T
- DT_ARY = 3 ///< Array of T
-};
-
-/// Masks for extracting parts of type
-enum SymbolTypeMasks {
- N_BTMASK = 017, ///< Mask for base type
- N_TMASK = 060 ///< Mask for derived type
-};
-
-/// Offsets of parts of type
-enum Shifts {
- N_BTSHFT = 4 ///< Type is formed as (base + derived << N_BTSHIFT)
-};
-
-}
-
-#endif // X86COFF_H
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp b/libclamav/c++/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
index ab67acb..4326814 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86COFFMachineModuleInfo.cpp
@@ -12,80 +12,9 @@
//===----------------------------------------------------------------------===//
#include "X86COFFMachineModuleInfo.h"
-#include "X86MachineFunctionInfo.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-X86COFFMachineModuleInfo::X86COFFMachineModuleInfo(const MachineModuleInfo &) {
-}
-X86COFFMachineModuleInfo::~X86COFFMachineModuleInfo() {
-}
-
-void X86COFFMachineModuleInfo::addExternalFunction(const StringRef& Name) {
- CygMingStubs.insert(Name);
-}
-
-/// DecorateCygMingName - Apply various name decorations if the function uses
-/// stdcall or fastcall calling convention.
-void X86COFFMachineModuleInfo::DecorateCygMingName(SmallVectorImpl<char> &Name,
- const GlobalValue *GV,
- const TargetData &TD) {
- const Function *F = dyn_cast<Function>(GV);
- if (!F) return;
-
- // We don't want to decorate non-stdcall or non-fastcall functions right now
- CallingConv::ID CC = F->getCallingConv();
- if (CC != CallingConv::X86_StdCall && CC != CallingConv::X86_FastCall)
- return;
-
- unsigned ArgWords = 0;
- DenseMap<const Function*, unsigned>::const_iterator item = FnArgWords.find(F);
- if (item == FnArgWords.end()) {
- // Calculate arguments sizes
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- const Type* Ty = AI->getType();
- // 'Dereference' type in case of byval parameter attribute
- if (AI->hasByValAttr())
- Ty = cast<PointerType>(Ty)->getElementType();
-
- // Size should be aligned to DWORD boundary
- ArgWords += ((TD.getTypeAllocSize(Ty) + 3)/4)*4;
- }
-
- FnArgWords[F] = ArgWords;
- } else
- ArgWords = item->second;
-
- const FunctionType *FT = F->getFunctionType();
- // "Pure" variadic functions do not receive @0 suffix.
- if (!FT->isVarArg() || FT->getNumParams() == 0 ||
- (FT->getNumParams() == 1 && F->hasStructRetAttr()))
- raw_svector_ostream(Name) << '@' << ArgWords;
-
- if (CC == CallingConv::X86_FastCall) {
- if (Name[0] == '_')
- Name[0] = '@';
- else
- Name.insert(Name.begin(), '@');
- }
+X86COFFMachineModuleInfo::~X86COFFMachineModuleInfo() {
}
-/// DecorateCygMingName - Query FunctionInfoMap and use this information for
-/// various name decorations for Cygwin and MingW.
-void X86COFFMachineModuleInfo::DecorateCygMingName(MCSymbol *&Name,
- MCContext &Ctx,
- const GlobalValue *GV,
- const TargetData &TD) {
- SmallString<128> NameStr(Name->getName().begin(), Name->getName().end());
- DecorateCygMingName(NameStr, GV, TD);
-
- Name = Ctx.GetOrCreateSymbol(NameStr.str());
-}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h b/libclamav/c++/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
index 9de3dcd..98ab2a6 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86COFFMachineModuleInfo.h
@@ -15,7 +15,7 @@
#define X86COFF_MACHINEMODULEINFO_H
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/DenseSet.h"
#include "X86MachineFunctionInfo.h"
namespace llvm {
@@ -25,21 +25,18 @@ namespace llvm {
/// X86COFFMachineModuleInfo - This is a MachineModuleInfoImpl implementation
/// for X86 COFF targets.
class X86COFFMachineModuleInfo : public MachineModuleInfoImpl {
- StringSet<> CygMingStubs;
- DenseMap<const Function*, unsigned> FnArgWords;
+ DenseSet<MCSymbol const *> Externals;
public:
- X86COFFMachineModuleInfo(const MachineModuleInfo &);
- ~X86COFFMachineModuleInfo();
-
- void DecorateCygMingName(MCSymbol* &Name, MCContext &Ctx,
- const GlobalValue *GV, const TargetData &TD);
- void DecorateCygMingName(SmallVectorImpl<char> &Name, const GlobalValue *GV,
- const TargetData &TD);
-
- void addExternalFunction(const StringRef& Name);
- typedef StringSet<>::const_iterator stub_iterator;
- stub_iterator stub_begin() const { return CygMingStubs.begin(); }
- stub_iterator stub_end() const { return CygMingStubs.end(); }
+ X86COFFMachineModuleInfo(const MachineModuleInfo &) {}
+ virtual ~X86COFFMachineModuleInfo();
+
+ void addExternalFunction(MCSymbol* Symbol) {
+ Externals.insert(Symbol);
+ }
+
+ typedef DenseSet<MCSymbol const *>::const_iterator externals_iterator;
+ externals_iterator externals_begin() const { return Externals.begin(); }
+ externals_iterator externals_end() const { return Externals.end(); }
};
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86CallingConv.td b/libclamav/c++/llvm/lib/Target/X86/X86CallingConv.td
index fd15efd..e3409ef 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86CallingConv.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86CallingConv.td
@@ -33,16 +33,22 @@ def RetCC_X86Common : CallingConv<[
CCIfType<[i16], CCAssignToReg<[AX, DX]>>,
CCIfType<[i32], CCAssignToReg<[EAX, EDX]>>,
CCIfType<[i64], CCAssignToReg<[RAX, RDX]>>,
-
- // Vector types are returned in XMM0 and XMM1, when they fit. XMMM2 and XMM3
+
+ // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3
// can only be used by ABI non-compliant code. If the target doesn't have XMM
// registers, it won't have vector types.
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
+ // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
+ // can only be used by ABI non-compliant code. This vector type is only
+ // supported while using the AVX target feature.
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+ CCIfSubtarget<"hasAVX()", CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>>,
+
// MMX vector types are always returned in MM0. If the target doesn't have
// MM0, it doesn't support these vector types.
- CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToReg<[MM0]>>,
+ CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToReg<[MM0]>>,
// Long double types are always returned in ST0 (even with SSE).
CCIfType<[f80], CCAssignToReg<[ST0, ST1]>>
@@ -89,7 +95,7 @@ def RetCC_X86_64_C : CallingConv<[
// returned in RAX. This disagrees with ABI documentation but is bug
// compatible with gcc.
CCIfType<[v1i64], CCAssignToReg<[RAX]>>,
- CCIfType<[v8i8, v4i16, v2i32, v2f32], CCAssignToReg<[XMM0, XMM1]>>,
+ CCIfType<[v8i8, v4i16, v2i32], CCAssignToReg<[XMM0, XMM1]>>,
CCDelegateTo<RetCC_X86Common>
]>;
@@ -155,7 +161,7 @@ def CC_X86_64_C : CallingConv<[
// The first 8 MMX (except for v1i64) vector arguments are passed in XMM
// registers on Darwin.
- CCIfType<[v8i8, v4i16, v2i32, v2f32],
+ CCIfType<[v8i8, v4i16, v2i32],
CCIfSubtarget<"isTargetDarwin()",
CCIfSubtarget<"hasSSE2()",
CCPromoteToType<v2i64>>>>,
@@ -164,11 +170,16 @@ def CC_X86_64_C : CallingConv<[
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
CCIfSubtarget<"hasSSE1()",
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
-
+
+ // The first 8 256-bit vector arguments are passed in YMM registers.
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+ CCIfSubtarget<"hasAVX()",
+ CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7]>>>,
+
// Integer/FP values get stored in stack slots that are 8 bytes in size and
// 8-byte aligned if there are no more registers to hold them.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
-
+
// Long doubles get stack slots whose size and alignment depends on the
// subtarget.
CCIfType<[f80], CCAssignToStack<0, 0>>,
@@ -176,8 +187,12 @@ def CC_X86_64_C : CallingConv<[
// Vectors get 16-byte stack slots that are 16-byte aligned.
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
+ // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+ CCAssignToStack<32, 32>>,
+
// __m64 vectors get 8-byte stack slots that are 8-byte aligned.
- CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToStack<8, 8>>
+ CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>>
]>;
// Calling convention used on Win64
@@ -195,7 +210,7 @@ def CC_X86_Win64_C : CallingConv<[
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
// The first 4 MMX vector arguments are passed in GPRs.
- CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32],
+ CCIfType<[v8i8, v4i16, v2i32, v1i64],
CCBitConvertToType<i64>>,
// The first 4 integer arguments are passed in integer registers.
@@ -254,7 +269,7 @@ def CC_X86_32_Common : CallingConv<[
// The first 3 __m64 (except for v1i64) vector arguments are passed in mmx
// registers if the call is not a vararg call.
- CCIfNotVarArg<CCIfType<[v8i8, v4i16, v2i32, v2f32],
+ CCIfNotVarArg<CCIfType<[v8i8, v4i16, v2i32],
CCAssignToReg<[MM0, MM1, MM2]>>>,
// Integer/Float values get stored in stack slots that are 4 bytes in
@@ -271,9 +286,18 @@ def CC_X86_32_Common : CallingConv<[
CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
+ // The first 4 AVX 256-bit vector arguments are passed in YMM registers.
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+ CCIfSubtarget<"hasAVX()",
+ CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
+
// Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
+ // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
+ CCAssignToStack<32, 32>>,
+
// __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
// passed in the parameter area.
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 4>>]>;
@@ -307,6 +331,20 @@ def CC_X86_32_FastCall : CallingConv<[
CCDelegateTo<CC_X86_32_Common>
]>;
+def CC_X86_32_ThisCall : CallingConv<[
+ // Promote i8/i16 arguments to i32.
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
+
+ // The 'nest' parameter, if any, is passed in EAX.
+ CCIfNest<CCAssignToReg<[EAX]>>,
+
+ // The first integer argument is passed in ECX
+ CCIfType<[i32], CCAssignToReg<[ECX]>>,
+
+ // Otherwise, same as everything else.
+ CCDelegateTo<CC_X86_32_Common>
+]>;
+
def CC_X86_32_FastCC : CallingConv<[
// Handles byval parameters. Note that we can't rely on the delegation
// to CC_X86_32_Common for this because that happens after code that
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86CodeEmitter.cpp b/libclamav/c++/llvm/lib/Target/X86/X86CodeEmitter.cpp
index 8deadf6..824021c 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86CodeEmitter.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86CodeEmitter.cpp
@@ -46,18 +46,19 @@ namespace {
const TargetData *TD;
X86TargetMachine &TM;
CodeEmitter &MCE;
+ MachineModuleInfo *MMI;
intptr_t PICBaseOffset;
bool Is64BitMode;
bool IsPIC;
public:
static char ID;
explicit Emitter(X86TargetMachine &tm, CodeEmitter &mce)
- : MachineFunctionPass(&ID), II(0), TD(0), TM(tm),
+ : MachineFunctionPass(ID), II(0), TD(0), TM(tm),
MCE(mce), PICBaseOffset(0), Is64BitMode(false),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
Emitter(X86TargetMachine &tm, CodeEmitter &mce,
const X86InstrInfo &ii, const TargetData &td, bool is64)
- : MachineFunctionPass(&ID), II(&ii), TD(&td), TM(tm),
+ : MachineFunctionPass(ID), II(&ii), TD(&td), TM(tm),
MCE(mce), PICBaseOffset(0), Is64BitMode(is64),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
@@ -78,7 +79,7 @@ namespace {
private:
void emitPCRelativeBlockAddress(MachineBasicBlock *MBB);
- void emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
+ void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
intptr_t Disp = 0, intptr_t PCAdj = 0,
bool Indirect = false);
void emitExternalSymbolAddress(const char *ES, unsigned Reloc);
@@ -115,8 +116,8 @@ FunctionPass *llvm::createX86JITCodeEmitterPass(X86TargetMachine &TM,
template<class CodeEmitter>
bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
-
- MCE.setModuleInfo(&getAnalysis<MachineModuleInfo>());
+ MMI = &getAnalysis<MachineModuleInfo>();
+ MCE.setModuleInfo(MMI);
II = TM.getInstrInfo();
TD = TM.getTargetData();
@@ -137,7 +138,7 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
// MOVPC32r is basically a call plus a pop instruction.
if (Desc.getOpcode() == X86::MOVPC32r)
emitInstruction(*I, &II->get(X86::POP32r));
- NumEmitted++; // Keep track of the # of mi's emitted
+ ++NumEmitted; // Keep track of the # of mi's emitted
}
}
} while (MCE.finishFunction(MF));
@@ -145,6 +146,103 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
return false;
}
+/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64
+/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
+/// size, and 3) use of X86-64 extended registers.
+static unsigned determineREX(const MachineInstr &MI) {
+ unsigned REX = 0;
+ const TargetInstrDesc &Desc = MI.getDesc();
+
+ // Pseudo instructions do not need REX prefix byte.
+ if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo)
+ return 0;
+ if (Desc.TSFlags & X86II::REX_W)
+ REX |= 1 << 3;
+
+ unsigned NumOps = Desc.getNumOperands();
+ if (NumOps) {
+ bool isTwoAddr = NumOps > 1 &&
+ Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
+
+ // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
+ unsigned i = isTwoAddr ? 1 : 0;
+ for (unsigned e = NumOps; i != e; ++i) {
+ const MachineOperand& MO = MI.getOperand(i);
+ if (MO.isReg()) {
+ unsigned Reg = MO.getReg();
+ if (X86InstrInfo::isX86_64NonExtLowByteReg(Reg))
+ REX |= 0x40;
+ }
+ }
+
+ switch (Desc.TSFlags & X86II::FormMask) {
+ case X86II::MRMInitReg:
+ if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
+ REX |= (1 << 0) | (1 << 2);
+ break;
+ case X86II::MRMSrcReg: {
+ if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
+ REX |= 1 << 2;
+ i = isTwoAddr ? 2 : 1;
+ for (unsigned e = NumOps; i != e; ++i) {
+ const MachineOperand& MO = MI.getOperand(i);
+ if (X86InstrInfo::isX86_64ExtendedReg(MO))
+ REX |= 1 << 0;
+ }
+ break;
+ }
+ case X86II::MRMSrcMem: {
+ if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
+ REX |= 1 << 2;
+ unsigned Bit = 0;
+ i = isTwoAddr ? 2 : 1;
+ for (; i != NumOps; ++i) {
+ const MachineOperand& MO = MI.getOperand(i);
+ if (MO.isReg()) {
+ if (X86InstrInfo::isX86_64ExtendedReg(MO))
+ REX |= 1 << Bit;
+ Bit++;
+ }
+ }
+ break;
+ }
+ case X86II::MRM0m: case X86II::MRM1m:
+ case X86II::MRM2m: case X86II::MRM3m:
+ case X86II::MRM4m: case X86II::MRM5m:
+ case X86II::MRM6m: case X86II::MRM7m:
+ case X86II::MRMDestMem: {
+ unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
+ i = isTwoAddr ? 1 : 0;
+ if (NumOps > e && X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(e)))
+ REX |= 1 << 2;
+ unsigned Bit = 0;
+ for (; i != e; ++i) {
+ const MachineOperand& MO = MI.getOperand(i);
+ if (MO.isReg()) {
+ if (X86InstrInfo::isX86_64ExtendedReg(MO))
+ REX |= 1 << Bit;
+ Bit++;
+ }
+ }
+ break;
+ }
+ default: {
+ if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
+ REX |= 1 << 0;
+ i = isTwoAddr ? 2 : 1;
+ for (unsigned e = NumOps; i != e; ++i) {
+ const MachineOperand& MO = MI.getOperand(i);
+ if (X86InstrInfo::isX86_64ExtendedReg(MO))
+ REX |= 1 << 2;
+ }
+ break;
+ }
+ }
+ }
+ return REX;
+}
+
+
/// emitPCRelativeBlockAddress - This method keeps track of the information
/// necessary to resolve the address of this block later and emits a dummy
/// value.
@@ -162,7 +260,8 @@ void Emitter<CodeEmitter>::emitPCRelativeBlockAddress(MachineBasicBlock *MBB) {
/// this is part of a "take the address of a global" instruction.
///
template<class CodeEmitter>
-void Emitter<CodeEmitter>::emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
+void Emitter<CodeEmitter>::emitGlobalAddress(const GlobalValue *GV,
+ unsigned Reloc,
intptr_t Disp /* = 0 */,
intptr_t PCAdj /* = 0 */,
bool Indirect /* = false */) {
@@ -173,9 +272,10 @@ void Emitter<CodeEmitter>::emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
RelocCST = PCAdj;
MachineRelocation MR = Indirect
? MachineRelocation::getIndirectSymbol(MCE.getCurrentPCOffset(), Reloc,
- GV, RelocCST, false)
+ const_cast<GlobalValue *>(GV),
+ RelocCST, false)
: MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
- GV, RelocCST, false);
+ const_cast<GlobalValue *>(GV), RelocCST, false);
MCE.addRelocation(MR);
// The relocated value will be added to the displacement
if (Reloc == X86::reloc_absolute_dword)
@@ -377,6 +477,16 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
const MachineOperand &IndexReg = MI.getOperand(Op+2);
unsigned BaseReg = Base.getReg();
+
+ // Handle %rip relative addressing.
+ if (BaseReg == X86::RIP ||
+ (Is64BitMode && DispForReloc)) { // [disp32+RIP] in X86-64 mode
+ assert(IndexReg.getReg() == 0 && Is64BitMode &&
+ "Invalid rip-relative address");
+ MCE.emitByte(ModRMByte(0, RegOpcodeField, 5));
+ emitDisplacementField(DispForReloc, DispVal, PCAdj, true);
+ return;
+ }
// Indicate that the displacement will use an pcrel or absolute reference
// by default. MCEs able to resolve addresses on-the-fly use pcrel by default
@@ -444,7 +554,7 @@ void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
// Emit the normal disp32 encoding.
MCE.emitByte(ModRMByte(2, RegOpcodeField, 4));
ForceDisp32 = true;
- } else if (DispVal == 0 && getX86RegNum(BaseReg) != N86::EBP) {
+ } else if (DispVal == 0 && BaseRegNo != N86::EBP) {
// Emit no displacement ModR/M byte
MCE.emitByte(ModRMByte(0, RegOpcodeField, 4));
} else if (isDisp8(DispVal)) {
@@ -556,7 +666,7 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
// Handle REX prefix.
if (Is64BitMode) {
- if (unsigned REX = X86InstrInfo::determineREX(MI))
+ if (unsigned REX = determineREX(MI))
MCE.emitByte(0x40 | REX);
}
@@ -592,23 +702,29 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
// base address.
switch (Opcode) {
default:
- llvm_unreachable("psuedo instructions should be removed before code"
+ llvm_unreachable("pseudo instructions should be removed before code"
" emission");
break;
+ // Do nothing for Int_MemBarrier - it's just a comment. Add a debug
+ // to make it slightly easier to see.
+ case X86::Int_MemBarrier:
+ DEBUG(dbgs() << "#MEMBARRIER\n");
+ break;
+
case TargetOpcode::INLINEASM:
// We allow inline assembler nodes with empty bodies - they can
// implicitly define registers, which is ok for JIT.
if (MI.getOperand(0).getSymbolName()[0])
- llvm_report_error("JIT does not support inline asm!");
+ report_fatal_error("JIT does not support inline asm!");
break;
- case TargetOpcode::DBG_LABEL:
- case TargetOpcode::EH_LABEL:
+ case TargetOpcode::PROLOG_LABEL:
case TargetOpcode::GC_LABEL:
- MCE.emitLabel(MI.getOperand(0).getImm());
+ case TargetOpcode::EH_LABEL:
+ MCE.emitLabel(MI.getOperand(0).getMCSymbol());
break;
+
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
- case X86::FP_REG_KILL:
break;
case X86::MOVPC32r: {
// This emits the "call" portion of this pseudo instruction.
@@ -660,7 +776,8 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
}
assert(MO.isImm() && "Unknown RawFrm operand!");
- if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32) {
+ if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32 ||
+ Opcode == X86::WINCALL64pcrel32) {
// Fix up immediate operand for pc relative calls.
intptr_t Imm = (intptr_t)MO.getImm();
Imm = Imm - MCE.getCurrentPCValue() - 4;
@@ -716,9 +833,9 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
case X86II::MRMDestMem: {
MCE.emitByte(BaseOpcode);
emitMemModRMByte(MI, CurOp,
- getX86RegNum(MI.getOperand(CurOp + X86AddrNumOperands)
+ getX86RegNum(MI.getOperand(CurOp + X86::AddrNumOperands)
.getReg()));
- CurOp += X86AddrNumOperands + 1;
+ CurOp += X86::AddrNumOperands + 1;
if (CurOp != NumOps)
emitConstant(MI.getOperand(CurOp++).getImm(),
X86II::getSizeOfImm(Desc->TSFlags));
@@ -736,13 +853,7 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
break;
case X86II::MRMSrcMem: {
- // FIXME: Maybe lea should have its own form?
- int AddrOperands;
- if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
- Opcode == X86::LEA16r || Opcode == X86::LEA32r)
- AddrOperands = X86AddrNumOperands - 1; // No segment register
- else
- AddrOperands = X86AddrNumOperands;
+ int AddrOperands = X86::AddrNumOperands;
intptr_t PCAdj = (CurOp + AddrOperands + 1 != NumOps) ?
X86II::getSizeOfImm(Desc->TSFlags) : 0;
@@ -796,14 +907,14 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m: {
- intptr_t PCAdj = (CurOp + X86AddrNumOperands != NumOps) ?
- (MI.getOperand(CurOp+X86AddrNumOperands).isImm() ?
+ intptr_t PCAdj = (CurOp + X86::AddrNumOperands != NumOps) ?
+ (MI.getOperand(CurOp+X86::AddrNumOperands).isImm() ?
X86II::getSizeOfImm(Desc->TSFlags) : 4) : 0;
MCE.emitByte(BaseOpcode);
emitMemModRMByte(MI, CurOp, (Desc->TSFlags & X86II::FormMask)-X86II::MRM0m,
PCAdj);
- CurOp += X86AddrNumOperands;
+ CurOp += X86::AddrNumOperands;
if (CurOp == NumOps)
break;
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86ELFWriterInfo.cpp b/libclamav/c++/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
index 1597d2b..f84995d 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86ELFWriterInfo.cpp
@@ -26,7 +26,6 @@ using namespace llvm;
X86ELFWriterInfo::X86ELFWriterInfo(TargetMachine &TM)
: TargetELFWriterInfo(TM) {
- bool is64Bit = TM.getTargetData()->getPointerSizeInBits() == 64;
EMachine = is64Bit ? EM_X86_64 : EM_386;
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86FastISel.cpp b/libclamav/c++/llvm/lib/Target/X86/X86FastISel.cpp
index 27807f2..0c70eec 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86FastISel.cpp
@@ -15,7 +15,6 @@
#include "X86.h"
#include "X86InstrBuilder.h"
-#include "X86ISelLowering.h"
#include "X86RegisterInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
@@ -24,7 +23,9 @@
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -53,37 +54,23 @@ class X86FastISel : public FastISel {
bool X86ScalarSSEf32;
public:
- explicit X86FastISel(MachineFunction &mf,
- MachineModuleInfo *mmi,
- DwarfWriter *dw,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &cil
-#endif
- )
- : FastISel(mf, mmi, dw, vm, bm, am
-#ifndef NDEBUG
- , cil
-#endif
- ) {
+ explicit X86FastISel(FunctionLoweringInfo &funcInfo) : FastISel(funcInfo) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
X86ScalarSSEf64 = Subtarget->hasSSE2();
X86ScalarSSEf32 = Subtarget->hasSSE1();
}
- virtual bool TargetSelectInstruction(Instruction *I);
+ virtual bool TargetSelectInstruction(const Instruction *I);
#include "X86GenFastISel.inc"
private:
- bool X86FastEmitCompare(Value *LHS, Value *RHS, EVT VT);
+ bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT);
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
- bool X86FastEmitStore(EVT VT, Value *Val,
+ bool X86FastEmitStore(EVT VT, const Value *Val,
const X86AddressMode &AM);
bool X86FastEmitStore(EVT VT, unsigned Val,
const X86AddressMode &AM);
@@ -91,34 +78,37 @@ private:
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
unsigned &ResultReg);
- bool X86SelectAddress(Value *V, X86AddressMode &AM);
- bool X86SelectCallAddress(Value *V, X86AddressMode &AM);
+ bool X86SelectAddress(const Value *V, X86AddressMode &AM);
+ bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
- bool X86SelectLoad(Instruction *I);
+ bool X86SelectLoad(const Instruction *I);
- bool X86SelectStore(Instruction *I);
+ bool X86SelectStore(const Instruction *I);
- bool X86SelectCmp(Instruction *I);
+ bool X86SelectRet(const Instruction *I);
- bool X86SelectZExt(Instruction *I);
+ bool X86SelectCmp(const Instruction *I);
- bool X86SelectBranch(Instruction *I);
+ bool X86SelectZExt(const Instruction *I);
- bool X86SelectShift(Instruction *I);
+ bool X86SelectBranch(const Instruction *I);
- bool X86SelectSelect(Instruction *I);
+ bool X86SelectShift(const Instruction *I);
- bool X86SelectTrunc(Instruction *I);
+ bool X86SelectSelect(const Instruction *I);
+
+ bool X86SelectTrunc(const Instruction *I);
- bool X86SelectFPExt(Instruction *I);
- bool X86SelectFPTrunc(Instruction *I);
+ bool X86SelectFPExt(const Instruction *I);
+ bool X86SelectFPTrunc(const Instruction *I);
- bool X86SelectExtractValue(Instruction *I);
+ bool X86SelectExtractValue(const Instruction *I);
- bool X86VisitIntrinsicCall(IntrinsicInst &I);
- bool X86SelectCall(Instruction *I);
+ bool X86VisitIntrinsicCall(const IntrinsicInst &I);
+ bool X86SelectCall(const Instruction *I);
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false);
+ CCAssignFn *CCAssignFnForRet(CallingConv::ID CC, bool isTailCall = false);
const X86InstrInfo *getInstrInfo() const {
return getTargetMachine()->getInstrInfo();
@@ -127,9 +117,9 @@ private:
return static_cast<const X86TargetMachine *>(&TM);
}
- unsigned TargetMaterializeConstant(Constant *C);
+ unsigned TargetMaterializeConstant(const Constant *C);
- unsigned TargetMaterializeAlloca(AllocaInst *C);
+ unsigned TargetMaterializeAlloca(const AllocaInst *C);
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.
@@ -182,6 +172,8 @@ CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC,
if (CC == CallingConv::X86_FastCall)
return CC_X86_32_FastCall;
+ else if (CC == CallingConv::X86_ThisCall)
+ return CC_X86_32_ThisCall;
else if (CC == CallingConv::Fast)
return CC_X86_32_FastCC;
else if (CC == CallingConv::GHC)
@@ -190,6 +182,20 @@ CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC,
return CC_X86_32_C;
}
+/// CCAssignFnForRet - Selects the correct CCAssignFn for a given calling
+/// convention.
+CCAssignFn *X86FastISel::CCAssignFnForRet(CallingConv::ID CC,
+ bool isTaillCall) {
+ if (Subtarget->is64Bit()) {
+ if (Subtarget->isTargetWin64())
+ return RetCC_X86_Win64_C;
+ else
+ return RetCC_X86_64_C;
+ }
+
+ return RetCC_X86_32_C;
+}
+
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
/// Return true and the result register by reference if it is possible.
@@ -242,7 +248,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
}
ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc), ResultReg), AM);
return true;
}
@@ -261,7 +268,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
case MVT::i1: {
// Mask out all but lowest bit.
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
- BuildMI(MBB, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1);
Val = AndResult;
}
@@ -278,18 +285,19 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
break;
}
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc)), AM).addReg(Val);
return true;
}
-bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val,
+bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
const X86AddressMode &AM) {
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Val))
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
// If this is a store of a simple constant, fold the constant into the store.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
unsigned Opc = 0;
bool Signed = true;
switch (VT.getSimpleVT().SimpleTy) {
@@ -306,8 +314,9 @@ bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val,
}
if (Opc) {
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM)
- .addImm(Signed ? CI->getSExtValue() :
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc)), AM)
+ .addImm(Signed ? (uint64_t) CI->getSExtValue() :
CI->getZExtValue());
return true;
}
@@ -326,7 +335,8 @@ bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val,
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
unsigned Src, EVT SrcVT,
unsigned &ResultReg) {
- unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
+ unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
+ Src, /*TODO: Kill=*/false);
if (RR != 0) {
ResultReg = RR;
@@ -337,17 +347,29 @@ bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
/// X86SelectAddress - Attempt to fill in an address from the given value.
///
-bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
- User *U = NULL;
+bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
+ const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
- if (Instruction *I = dyn_cast<Instruction>(V)) {
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ // Don't walk into other basic blocks; it's possible we haven't
+ // visited them yet, so the instructions may not yet be assigned
+ // virtual registers.
+ if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
+ return false;
+
Opcode = I->getOpcode();
U = I;
- } else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
Opcode = C->getOpcode();
U = C;
}
+ if (const PointerType *Ty = dyn_cast<PointerType>(V->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
switch (Opcode) {
default: break;
case Instruction::BitCast:
@@ -369,8 +391,9 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
case Instruction::Alloca: {
// Do static allocas.
const AllocaInst *A = cast<AllocaInst>(V);
- DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A);
- if (SI != StaticAllocaMap.end()) {
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(A);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
AM.BaseType = X86AddressMode::FrameIndexBase;
AM.Base.FrameIndex = SI->second;
return true;
@@ -380,10 +403,10 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
case Instruction::Add: {
// Adds of constants are common and easy enough.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
// They have to fit in the 32-bit signed displacement field though.
- if (isInt32(Disp)) {
+ if (isInt<32>(Disp)) {
AM.Disp = (uint32_t)Disp;
return X86SelectAddress(U->getOperand(0), AM);
}
@@ -401,33 +424,46 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
gep_type_iterator GTI = gep_type_begin(U);
// Iterate through the indices, folding what we can. Constants can be
// folded, and one dynamic index can be handled, if the scale is supported.
- for (User::op_iterator i = U->op_begin() + 1, e = U->op_end();
+ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
- Value *Op = *i;
+ const Value *Op = *i;
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
const StructLayout *SL = TD.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
Disp += SL->getElementOffset(Idx);
} else {
uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
- // Constant-offset addressing.
- Disp += CI->getSExtValue() * S;
- } else if (IndexReg == 0 &&
- (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
- (S == 1 || S == 2 || S == 4 || S == 8)) {
- // Scaled-index addressing.
- Scale = S;
- IndexReg = getRegForGEPIndex(Op);
- if (IndexReg == 0)
- return false;
- } else
- // Unsupported.
- goto unsupported_gep;
+ SmallVector<const Value *, 4> Worklist;
+ Worklist.push_back(Op);
+ do {
+ Op = Worklist.pop_back_val();
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ Disp += CI->getSExtValue() * S;
+ } else if (isa<AddOperator>(Op) &&
+ isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
+ // An add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ Disp += CI->getSExtValue() * S;
+ // Add the other operand back to the work list.
+ Worklist.push_back(cast<AddOperator>(Op)->getOperand(0));
+ } else if (IndexReg == 0 &&
+ (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
+ (S == 1 || S == 2 || S == 4 || S == 8)) {
+ // Scaled-index addressing.
+ Scale = S;
+ IndexReg = getRegForGEPIndex(Op).first;
+ if (IndexReg == 0)
+ return false;
+ } else
+ // Unsupported.
+ goto unsupported_gep;
+ } while (!Worklist.empty());
}
}
// Check for displacement overflow.
- if (!isInt32(Disp))
+ if (!isInt<32>(Disp))
break;
// Ok, the GEP indices were covered by constant-offset and scaled-index
// addressing. Update the address state and move on to examining the base.
@@ -448,7 +484,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
}
// Handle constant address.
- if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
return false;
@@ -459,7 +495,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
return false;
// Can't handle TLS yet.
- if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (GVar->isThreadLocal())
return false;
@@ -472,7 +508,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
// If this reference is relative to the pic base, set it now.
if (isGlobalRelativeToPICBase(GVFlags)) {
// FIXME: How do we know Base.Reg is free??
- AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(&MF);
+ AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
}
// Unless the ABI requires an extra load, return a direct reference to
@@ -503,6 +539,9 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
StubAM.GV = GV;
StubAM.GVOpFlags = GVFlags;
+ // Prepare for inserting code in the local-value area.
+ SavePoint SaveInsertPt = enterLocalValueArea();
+
if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass;
@@ -515,8 +554,13 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
}
LoadReg = createResultReg(RC);
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM);
-
+ MachineInstrBuilder LoadMI =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
+ addFullAddress(LoadMI, StubAM);
+
+ // Ok, back to normal mode.
+ leaveLocalValueArea(SaveInsertPt);
+
// Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = LoadReg;
}
@@ -546,13 +590,13 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
/// X86SelectCallAddress - Attempt to fill in an address from the given value.
///
-bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
- User *U = NULL;
+bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
+ const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
- if (Instruction *I = dyn_cast<Instruction>(V)) {
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
Opcode = I->getOpcode();
U = I;
- } else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
Opcode = C->getOpcode();
U = C;
}
@@ -577,7 +621,7 @@ bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
}
// Handle constant address.
- if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
return false;
@@ -588,7 +632,7 @@ bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
return false;
// Can't handle TLS or DLLImport.
- if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (GVar->isThreadLocal() || GVar->hasDLLImportLinkage())
return false;
@@ -629,7 +673,7 @@ bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
/// X86SelectStore - Select and emit code to implement store instructions.
-bool X86FastISel::X86SelectStore(Instruction* I) {
+bool X86FastISel::X86SelectStore(const Instruction *I) {
EVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true))
return false;
@@ -641,9 +685,96 @@ bool X86FastISel::X86SelectStore(Instruction* I) {
return X86FastEmitStore(VT, I->getOperand(0), AM);
}
+/// X86SelectRet - Select and emit code to implement ret instructions.
+bool X86FastISel::X86SelectRet(const Instruction *I) {
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+ const Function &F = *I->getParent()->getParent();
+
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ CallingConv::ID CC = F.getCallingConv();
+ if (CC != CallingConv::C &&
+ CC != CallingConv::Fast &&
+ CC != CallingConv::X86_FastCall)
+ return false;
+
+ if (Subtarget->isTargetWin64())
+ return false;
+
+ // Don't handle popping bytes on return for now.
+ if (FuncInfo.MF->getInfo<X86MachineFunctionInfo>()
+ ->getBytesToPopOnReturn() != 0)
+ return 0;
+
+ // fastcc with -tailcallopt is intended to provide a guaranteed
+ // tail call optimization. Fastisel doesn't know how to do that.
+ if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
+ return false;
+
+ // Let SDISel handle vararg functions.
+ if (F.isVarArg())
+ return false;
+
+ if (Ret->getNumOperands() > 0) {
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
+ Outs, TLI);
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ValLocs;
+ CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
+ CCInfo.AnalyzeReturn(Outs, CCAssignFnForRet(CC));
+
+ const Value *RV = Ret->getOperand(0);
+ unsigned Reg = getRegForValue(RV);
+ if (Reg == 0)
+ return false;
+
+ // Only handle a single return value for now.
+ if (ValLocs.size() != 1)
+ return false;
+
+ CCValAssign &VA = ValLocs[0];
+
+ // Don't bother handling odd stuff for now.
+ if (VA.getLocInfo() != CCValAssign::Full)
+ return false;
+ // Only handle register returns for now.
+ if (!VA.isRegLoc())
+ return false;
+ // TODO: For now, don't try to handle cases where getLocInfo()
+ // says Full but the types don't match.
+ if (VA.getValVT() != TLI.getValueType(RV->getType()))
+ return false;
+
+ // The calling-convention tables for x87 returns don't tell
+ // the whole story.
+ if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1)
+ return false;
+
+ // Make the copy.
+ unsigned SrcReg = Reg + VA.getValNo();
+ unsigned DstReg = VA.getLocReg();
+ const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
+ // Avoid a cross-class copy. This is very unlikely.
+ if (!SrcRC->contains(DstReg))
+ return false;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ DstReg).addReg(SrcReg);
+
+ // Mark the register as live out of the function.
+ MRI.addLiveOut(VA.getLocReg());
+ }
+
+ // Now emit the RET.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET));
+ return true;
+}
+
/// X86SelectLoad - Select and emit code to implement load instructions.
///
-bool X86FastISel::X86SelectLoad(Instruction *I) {
+bool X86FastISel::X86SelectLoad(const Instruction *I) {
EVT VT;
if (!isTypeLegal(I->getType(), VT, /*AllowI1=*/true))
return false;
@@ -660,22 +791,22 @@ bool X86FastISel::X86SelectLoad(Instruction *I) {
return false;
}
-static unsigned X86ChooseCmpOpcode(EVT VT) {
+static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
switch (VT.getSimpleVT().SimpleTy) {
default: return 0;
case MVT::i8: return X86::CMP8rr;
case MVT::i16: return X86::CMP16rr;
case MVT::i32: return X86::CMP32rr;
case MVT::i64: return X86::CMP64rr;
- case MVT::f32: return X86::UCOMISSrr;
- case MVT::f64: return X86::UCOMISDrr;
+ case MVT::f32: return Subtarget->hasSSE1() ? X86::UCOMISSrr : 0;
+ case MVT::f64: return Subtarget->hasSSE2() ? X86::UCOMISDrr : 0;
}
}
/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
/// of the comparison, return an opcode that works for the compare (e.g.
/// CMP32ri) otherwise return 0.
-static unsigned X86ChooseCmpImmediateOpcode(EVT VT, ConstantInt *RHSC) {
+static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
switch (VT.getSimpleVT().SimpleTy) {
// Otherwise, we can't fold the immediate into this comparison.
default: return 0;
@@ -691,7 +822,8 @@ static unsigned X86ChooseCmpImmediateOpcode(EVT VT, ConstantInt *RHSC) {
}
}
-bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) {
+bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
+ EVT VT) {
unsigned Op0Reg = getRegForValue(Op0);
if (Op0Reg == 0) return false;
@@ -702,26 +834,29 @@ bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) {
// We have two options: compare with register or immediate. If the RHS of
// the compare is an immediate that we can fold into this compare, use
// CMPri, otherwise use CMPrr.
- if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
+ if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
- BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg)
- .addImm(Op1C->getSExtValue());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc))
+ .addReg(Op0Reg)
+ .addImm(Op1C->getSExtValue());
return true;
}
}
- unsigned CompareOpc = X86ChooseCmpOpcode(VT);
+ unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
if (CompareOpc == 0) return false;
unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false;
- BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
+ .addReg(Op0Reg)
+ .addReg(Op1Reg);
return true;
}
-bool X86FastISel::X86SelectCmp(Instruction *I) {
- CmpInst *CI = cast<CmpInst>(I);
+bool X86FastISel::X86SelectCmp(const Instruction *I) {
+ const CmpInst *CI = cast<CmpInst>(I);
EVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
@@ -737,9 +872,10 @@ bool X86FastISel::X86SelectCmp(Instruction *I) {
unsigned EReg = createResultReg(&X86::GR8RegClass);
unsigned NPReg = createResultReg(&X86::GR8RegClass);
- BuildMI(MBB, DL, TII.get(X86::SETEr), EReg);
- BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg);
- BuildMI(MBB, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::SETNPr), NPReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
UpdateValueMap(I, ResultReg);
return true;
@@ -750,9 +886,13 @@ bool X86FastISel::X86SelectCmp(Instruction *I) {
unsigned NEReg = createResultReg(&X86::GR8RegClass);
unsigned PReg = createResultReg(&X86::GR8RegClass);
- BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg);
- BuildMI(MBB, DL, TII.get(X86::SETPr), PReg);
- BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::SETNEr), NEReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::SETPr), PReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::OR8rr), ResultReg)
+ .addReg(PReg).addReg(NEReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -783,7 +923,7 @@ bool X86FastISel::X86SelectCmp(Instruction *I) {
return false;
}
- Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
+ const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
if (SwapArgs)
std::swap(Op0, Op1);
@@ -791,19 +931,19 @@ bool X86FastISel::X86SelectCmp(Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
UpdateValueMap(I, ResultReg);
return true;
}
-bool X86FastISel::X86SelectZExt(Instruction *I) {
+bool X86FastISel::X86SelectZExt(const Instruction *I) {
// Handle zero-extension from i1 to i8, which is common.
if (I->getType()->isIntegerTy(8) &&
I->getOperand(0)->getType()->isIntegerTy(1)) {
unsigned ResultReg = getRegForValue(I->getOperand(0));
if (ResultReg == 0) return false;
// Set the high bits to zero.
- ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg);
+ ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
if (ResultReg == 0) return false;
UpdateValueMap(I, ResultReg);
return true;
@@ -813,21 +953,23 @@ bool X86FastISel::X86SelectZExt(Instruction *I) {
}
-bool X86FastISel::X86SelectBranch(Instruction *I) {
+bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Unconditional branches are selected by tablegen-generated code.
// Handle a conditional branch.
- BranchInst *BI = cast<BranchInst>(I);
- MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
- MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
-
- // Fold the common case of a conditional branch with a comparison.
- if (CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
- if (CI->hasOneUse()) {
+ const BranchInst *BI = cast<BranchInst>(I);
+ MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
+ MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
+
+ // Fold the common case of a conditional branch with a comparison
+ // in the same block (values defined on other blocks may not have
+ // initialized registers).
+ if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
+ if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
// Try to take advantage of fallthrough opportunities.
CmpInst::Predicate Predicate = CI->getPredicate();
- if (MBB->isLayoutSuccessor(TrueMBB)) {
+ if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
std::swap(TrueMBB, FalseMBB);
Predicate = CmpInst::getInversePredicate(Predicate);
}
@@ -868,7 +1010,7 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
return false;
}
- Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
+ const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
if (SwapArgs)
std::swap(Op0, Op1);
@@ -876,16 +1018,18 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
+ .addMBB(TrueMBB);
if (Predicate == CmpInst::FCMP_UNE) {
// X86 requires a second branch to handle UNE (and OEQ,
// which is mapped to UNE above).
- BuildMI(MBB, DL, TII.get(X86::JP_4)).addMBB(TrueMBB);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4))
+ .addMBB(TrueMBB);
}
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
+ FastEmitBranch(FalseMBB, DL);
+ FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
} else if (ExtractValueInst *EI =
@@ -903,21 +1047,21 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
// looking for the SETO/SETB instruction. If an instruction modifies the
// EFLAGS register before we reach the SETO/SETB instruction, then we can't
// convert the branch into a JO/JB instruction.
- if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){
+ if (const IntrinsicInst *CI =
+ dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){
if (CI->getIntrinsicID() == Intrinsic::sadd_with_overflow ||
CI->getIntrinsicID() == Intrinsic::uadd_with_overflow) {
const MachineInstr *SetMI = 0;
- unsigned Reg = lookUpRegForValue(EI);
+ unsigned Reg = getRegForValue(EI);
for (MachineBasicBlock::const_reverse_iterator
- RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
+ RI = FuncInfo.MBB->rbegin(), RE = FuncInfo.MBB->rend();
+ RI != RE; ++RI) {
const MachineInstr &MI = *RI;
- if (MI.modifiesRegister(Reg)) {
- unsigned Src, Dst, SrcSR, DstSR;
-
- if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) {
- Reg = Src;
+ if (MI.definesRegister(Reg)) {
+ if (MI.isCopy()) {
+ Reg = MI.getOperand(1).getReg();
continue;
}
@@ -935,11 +1079,11 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
unsigned OpCode = SetMI->getOpcode();
if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
- BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ?
- X86::JO_4 : X86::JB_4))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(OpCode == X86::SETOr ? X86::JO_4 : X86::JB_4))
.addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
+ FastEmitBranch(FalseMBB, DL);
+ FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
}
@@ -951,14 +1095,16 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
unsigned OpReg = getRegForValue(BI->getCondition());
if (OpReg == 0) return false;
- BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
- BuildMI(MBB, DL, TII.get(X86::JNE_4)).addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
+ .addReg(OpReg).addReg(OpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4))
+ .addMBB(TrueMBB);
+ FastEmitBranch(FalseMBB, DL);
+ FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
-bool X86FastISel::X86SelectShift(Instruction *I) {
+bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned CReg = 0, OpReg = 0, OpImm = 0;
const TargetRegisterClass *RC = NULL;
if (I->getType()->isIntegerTy(8)) {
@@ -1009,9 +1155,9 @@ bool X86FastISel::X86SelectShift(Instruction *I) {
if (Op0Reg == 0) return false;
// Fold immediate in shl(x,3).
- if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = createResultReg(RC);
- BuildMI(MBB, DL, TII.get(OpImm),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm),
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
UpdateValueMap(I, ResultReg);
return true;
@@ -1019,22 +1165,24 @@ bool X86FastISel::X86SelectShift(Instruction *I) {
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
- TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ CReg).addReg(Op1Reg);
// The shift instruction uses X86::CL. If we defined a super-register
- // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what
- // we're doing here.
+ // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
if (CReg != X86::CL)
- BuildMI(MBB, DL, TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL)
- .addReg(CReg).addImm(X86::SUBREG_8BIT);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::KILL), X86::CL)
+ .addReg(CReg, RegState::Kill);
unsigned ResultReg = createResultReg(RC);
- BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg)
+ .addReg(Op0Reg);
UpdateValueMap(I, ResultReg);
return true;
}
-bool X86FastISel::X86SelectSelect(Instruction *I) {
+bool X86FastISel::X86SelectSelect(const Instruction *I) {
EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
return false;
@@ -1061,23 +1209,27 @@ bool X86FastISel::X86SelectSelect(Instruction *I) {
unsigned Op2Reg = getRegForValue(I->getOperand(2));
if (Op2Reg == 0) return false;
- BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
+ .addReg(Op0Reg).addReg(Op0Reg);
unsigned ResultReg = createResultReg(RC);
- BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ .addReg(Op1Reg).addReg(Op2Reg);
UpdateValueMap(I, ResultReg);
return true;
}
-bool X86FastISel::X86SelectFPExt(Instruction *I) {
+bool X86FastISel::X86SelectFPExt(const Instruction *I) {
// fpext from float to double.
if (Subtarget->hasSSE2() &&
I->getType()->isDoubleTy()) {
- Value *V = I->getOperand(0);
+ const Value *V = I->getOperand(0);
if (V->getType()->isFloatTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
- BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::CVTSS2SDrr), ResultReg)
+ .addReg(OpReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1086,15 +1238,17 @@ bool X86FastISel::X86SelectFPExt(Instruction *I) {
return false;
}
-bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
+bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
if (Subtarget->hasSSE2()) {
if (I->getType()->isFloatTy()) {
- Value *V = I->getOperand(0);
+ const Value *V = I->getOperand(0);
if (V->getType()->isDoubleTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
- BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::CVTSD2SSrr), ResultReg)
+ .addReg(OpReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1104,7 +1258,7 @@ bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
return false;
}
-bool X86FastISel::X86SelectTrunc(Instruction *I) {
+bool X86FastISel::X86SelectTrunc(const Instruction *I) {
if (Subtarget->is64Bit())
// All other cases should be handled by the tblgen generated code.
return false;
@@ -1125,15 +1279,16 @@ bool X86FastISel::X86SelectTrunc(Instruction *I) {
return false;
// First issue a copy to GR16_ABCD or GR32_ABCD.
- unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr;
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
unsigned CopyReg = createResultReg(CopyRC);
- BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ CopyReg).addReg(InputReg);
// Then issue an extract_subreg.
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
- CopyReg, X86::SUBREG_8BIT);
+ CopyReg, /*Kill=*/true,
+ X86::sub_8bit);
if (!ResultReg)
return false;
@@ -1141,33 +1296,76 @@ bool X86FastISel::X86SelectTrunc(Instruction *I) {
return true;
}
-bool X86FastISel::X86SelectExtractValue(Instruction *I) {
- ExtractValueInst *EI = cast<ExtractValueInst>(I);
- Value *Agg = EI->getAggregateOperand();
+bool X86FastISel::X86SelectExtractValue(const Instruction *I) {
+ const ExtractValueInst *EI = cast<ExtractValueInst>(I);
+ const Value *Agg = EI->getAggregateOperand();
- if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) {
+ if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) {
switch (CI->getIntrinsicID()) {
default: break;
case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow:
+ case Intrinsic::uadd_with_overflow: {
// Cheat a little. We know that the registers for "add" and "seto" are
// allocated sequentially. However, we only keep track of the register
// for "add" in the value map. Use extractvalue's index to get the
// correct register for "seto".
- UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
+ unsigned OpReg = getRegForValue(Agg);
+ if (OpReg == 0)
+ return false;
+ UpdateValueMap(I, OpReg + *EI->idx_begin());
return true;
}
+ }
}
return false;
}
-bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
+bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
// FIXME: Handle more intrinsics.
switch (I.getIntrinsicID()) {
default: return false;
+ case Intrinsic::stackprotector: {
+ // Emit code inline code to store the stack guard onto the stack.
+ EVT PtrTy = TLI.getPointerTy();
+
+ const Value *Op1 = I.getArgOperand(0); // The guard's value.
+ const AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
+
+ // Grab the frame index.
+ X86AddressMode AM;
+ if (!X86SelectAddress(Slot, AM)) return false;
+
+ if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
+
+ return true;
+ }
+ case Intrinsic::objectsize: {
+ ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
+ const Type *Ty = I.getCalledFunction()->getReturnType();
+
+ assert(CI && "Non-constant type in Intrinsic::objectsize?");
+
+ EVT VT;
+ if (!isTypeLegal(Ty, VT))
+ return false;
+
+ unsigned OpC = 0;
+ if (VT == MVT::i32)
+ OpC = X86::MOV32ri;
+ else if (VT == MVT::i64)
+ OpC = X86::MOV64ri;
+ else
+ return false;
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg).
+ addImm(CI->isZero() ? -1ULL : 0);
+ UpdateValueMap(&I, ResultReg);
+ return true;
+ }
case Intrinsic::dbg_declare: {
- DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
+ const DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
X86AddressMode AM;
assert(DI->getAddress() && "Null address should be checked earlier!");
if (!X86SelectAddress(DI->getAddress(), AM))
@@ -1175,12 +1373,12 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
// FIXME may need to add RegState::Debug to any registers produced,
// although ESP/EBP should be the only ones at the moment.
- addFullAddress(BuildMI(MBB, DL, II), AM).addImm(0).
- addMetadata(DI->getVariable());
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM).
+ addImm(0).addMetadata(DI->getVariable());
return true;
}
case Intrinsic::trap: {
- BuildMI(MBB, DL, TII.get(X86::TRAP));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP));
return true;
}
case Intrinsic::sadd_with_overflow:
@@ -1198,8 +1396,8 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
if (!isTypeLegal(RetTy, VT))
return false;
- Value *Op1 = I.getOperand(1);
- Value *Op2 = I.getOperand(2);
+ const Value *Op1 = I.getArgOperand(0);
+ const Value *Op2 = I.getArgOperand(1);
unsigned Reg1 = getRegForValue(Op1);
unsigned Reg2 = getRegForValue(Op2);
@@ -1216,7 +1414,8 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg)
+ .addReg(Reg1).addReg(Reg2);
unsigned DestReg1 = UpdateValueMap(&I, ResultReg);
// If the add with overflow is an intra-block value then we just want to
@@ -1234,26 +1433,26 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
unsigned Opc = X86::SETBr;
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
Opc = X86::SETOr;
- BuildMI(MBB, DL, TII.get(Opc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg);
return true;
}
}
}
-bool X86FastISel::X86SelectCall(Instruction *I) {
- CallInst *CI = cast<CallInst>(I);
- Value *Callee = I->getOperand(0);
+bool X86FastISel::X86SelectCall(const Instruction *I) {
+ const CallInst *CI = cast<CallInst>(I);
+ const Value *Callee = CI->getCalledValue();
// Can't handle inline asm yet.
if (isa<InlineAsm>(Callee))
return false;
// Handle intrinsic calls.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
return X86VisitIntrinsicCall(*II);
// Handle only C and fastcc calling conventions for now.
- CallSite CS(CI);
+ ImmutableCallSite CS(CI);
CallingConv::ID CC = CS.getCallingConv();
if (CC != CallingConv::C &&
CC != CallingConv::Fast &&
@@ -1271,6 +1470,10 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
if (FTy->isVarArg())
return false;
+ // Fast-isel doesn't know about callee-pop yet.
+ if (Subtarget->IsCalleePop(FTy->isVarArg(), CC))
+ return false;
+
// Handle *simple* calls for now.
const Type *RetTy = CS.getType();
EVT RetVT;
@@ -1285,7 +1488,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
if (!X86SelectCallAddress(Callee, CalleeAM))
return false;
unsigned CalleeOp = 0;
- GlobalValue *GV = 0;
+ const GlobalValue *GV = 0;
if (CalleeAM.GV != 0) {
GV = CalleeAM.GV;
} else if (CalleeAM.Base.Reg != 0) {
@@ -1301,7 +1504,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
}
// Deal with call operands first.
- SmallVector<Value*, 8> ArgVals;
+ SmallVector<const Value *, 8> ArgVals;
SmallVector<unsigned, 8> Args;
SmallVector<EVT, 8> ArgVTs;
SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
@@ -1309,7 +1512,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
ArgVals.reserve(CS.arg_size());
ArgVTs.reserve(CS.arg_size());
ArgFlags.reserve(CS.arg_size());
- for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
+ for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
unsigned Arg = getRegForValue(*i);
if (Arg == 0)
@@ -1344,6 +1547,12 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext());
+
+ // Allocate shadow area for Win64
+ if (Subtarget->isTargetWin64()) {
+ CCInfo.AllocateStack(32, 8);
+ }
+
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
// Get a count of how many bytes are to be pushed on the stack.
@@ -1351,7 +1560,8 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
// Issue CALLSEQ_START
unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
- BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown))
+ .addImm(NumBytes);
// Process argument: walk the register/memloc assignments, inserting
// copies / loads.
@@ -1397,7 +1607,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
}
case CCValAssign::BCvt: {
unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT().getSimpleVT(),
- ISD::BIT_CONVERT, Arg);
+ ISD::BIT_CONVERT, Arg, /*TODO: Kill=*/false);
assert(BC != 0 && "Failed to emit a bitcast!");
Arg = BC;
ArgVT = VA.getLocVT();
@@ -1406,18 +1616,15 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
}
if (VA.isRegLoc()) {
- TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT);
- bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(),
- Arg, RC, RC);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
- Emitted = true;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ VA.getLocReg()).addReg(Arg);
RegArgs.push_back(VA.getLocReg());
} else {
unsigned LocMemOffset = VA.getLocMemOffset();
X86AddressMode AM;
AM.Base.Reg = StackPtr;
AM.Disp = LocMemOffset;
- Value *ArgVal = ArgVals[VA.getValNo()];
+ const Value *ArgVal = ArgVals[VA.getValNo()];
// If this is a really simple value, emit this with the Value* version of
// X86FastEmitStore. If it isn't simple, we don't want to do this, as it
@@ -1432,25 +1639,35 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
if (Subtarget->isPICStyleGOT()) {
- TargetRegisterClass *RC = X86::GR32RegisterClass;
- unsigned Base = getInstrInfo()->getGlobalBaseReg(&MF);
- bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
- Emitted = true;
+ unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ X86::EBX).addReg(Base);
}
// Issue the call.
MachineInstrBuilder MIB;
if (CalleeOp) {
// Register-indirect call.
- unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r;
- MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp);
+ unsigned CallOpc;
+ if (Subtarget->isTargetWin64())
+ CallOpc = X86::WINCALL64r;
+ else if (Subtarget->is64Bit())
+ CallOpc = X86::CALL64r;
+ else
+ CallOpc = X86::CALL32r;
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
+ .addReg(CalleeOp);
} else {
// Direct call.
assert(GV && "Not a direct call");
- unsigned CallOpc =
- Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
+ unsigned CallOpc;
+ if (Subtarget->isTargetWin64())
+ CallOpc = X86::WINCALL64pcrel32;
+ else if (Subtarget->is64Bit())
+ CallOpc = X86::CALL64pcrel32;
+ else
+ CallOpc = X86::CALLpcrel32;
// See if we need any target-specific flags on the GV operand.
unsigned char OpFlags = 0;
@@ -1473,7 +1690,8 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
}
- MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
+ .addGlobalAddress(GV, 0, OpFlags);
}
// Add an implicit use GOT pointer in EBX.
@@ -1486,9 +1704,11 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
// Issue CALLSEQ_END
unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
- BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))
+ .addImm(NumBytes).addImm(0);
// Now handle call return value (if any).
+ SmallVector<unsigned, 4> UsedRegs;
if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
@@ -1498,7 +1718,6 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
EVT CopyVT = RVLocs[0].getValVT();
TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
- TargetRegisterClass *SrcRC = DstRC;
// If this is a call to a function that returns an fp value on the x87 fp
// stack, but where we prefer to use the value in xmm registers, copy it
@@ -1507,15 +1726,14 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
RVLocs[0].getLocReg() == X86::ST1) &&
isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
CopyVT = MVT::f80;
- SrcRC = X86::RSTRegisterClass;
DstRC = X86::RFP80RegisterClass;
}
unsigned ResultReg = createResultReg(DstRC);
- bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- RVLocs[0].getLocReg(), DstRC, SrcRC);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
- Emitted = true;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(RVLocs[0].getLocReg());
+ UsedRegs.push_back(RVLocs[0].getLocReg());
+
if (CopyVT != RVLocs[0].getValVT()) {
// Round the F80 the right size, which also moves to the appropriate xmm
// register. This is accomplished by storing the F80 value in memory and
@@ -1524,18 +1742,21 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8;
int FI = MFI.CreateStackObject(MemSize, MemSize, false);
- addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc)), FI)
+ .addReg(ResultReg);
DstRC = ResVT == MVT::f32
? X86::FR32RegisterClass : X86::FR64RegisterClass;
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
ResultReg = createResultReg(DstRC);
- addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg), FI);
}
if (AndToI1) {
// Mask out all but lowest bit for some call which produces an i1.
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
- BuildMI(MBB, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
ResultReg = AndResult;
}
@@ -1543,18 +1764,23 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
UpdateValueMap(I, ResultReg);
}
+ // Set all unused physreg defs as dead.
+ static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+
return true;
}
bool
-X86FastISel::TargetSelectInstruction(Instruction *I) {
+X86FastISel::TargetSelectInstruction(const Instruction *I) {
switch (I->getOpcode()) {
default: break;
case Instruction::Load:
return X86SelectLoad(I);
case Instruction::Store:
return X86SelectStore(I);
+ case Instruction::Ret:
+ return X86SelectRet(I);
case Instruction::ICmp:
case Instruction::FCmp:
return X86SelectCmp(I);
@@ -1596,7 +1822,7 @@ X86FastISel::TargetSelectInstruction(Instruction *I) {
return false;
}
-unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
+unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
EVT VT;
if (!isTypeLegal(C->getType(), VT))
return false;
@@ -1655,7 +1881,8 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
else
Opc = X86::LEA64r;
unsigned ResultReg = createResultReg(RC);
- addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg), AM);
return ResultReg;
}
return 0;
@@ -1673,10 +1900,10 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
unsigned char OpFlag = 0;
if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic
OpFlag = X86II::MO_PIC_BASE_OFFSET;
- PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
+ PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
} else if (Subtarget->isPICStyleGOT()) {
OpFlag = X86II::MO_GOTOFF;
- PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
+ PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
} else if (Subtarget->isPICStyleRIPRel() &&
TM.getCodeModel() == CodeModel::Small) {
PICBase = X86::RIP;
@@ -1685,13 +1912,14 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
// Create the load from the constant pool.
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
unsigned ResultReg = createResultReg(RC);
- addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg),
+ addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg),
MCPOffset, PICBase, OpFlag);
return ResultReg;
}
-unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
+unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
// Fail on dynamic allocas. At this point, getRegForValue has already
// checked its CSE maps, so if we're here trying to handle a dynamic
// alloca, we're not going to succeed. X86SelectAddress has a
@@ -1699,7 +1927,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
// various places, but TargetMaterializeAlloca also needs a check
// in order to avoid recursion between getRegForValue,
// X86SelectAddrss, and TargetMaterializeAlloca.
- if (!StaticAllocaMap.count(C))
+ if (!FuncInfo.StaticAllocaMap.count(C))
return 0;
X86AddressMode AM;
@@ -1708,25 +1936,13 @@ unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
- addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg), AM);
return ResultReg;
}
namespace llvm {
- llvm::FastISel *X86::createFastISel(MachineFunction &mf,
- MachineModuleInfo *mmi,
- DwarfWriter *dw,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &cil
-#endif
- ) {
- return new X86FastISel(mf, mmi, dw, vm, bm, am
-#ifndef NDEBUG
- , cil
-#endif
- );
+ llvm::FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo) {
+ return new X86FastISel(funcInfo);
}
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86FixupKinds.h b/libclamav/c++/llvm/lib/Target/X86/X86FixupKinds.h
index c8dac3c..96e0aae 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86FixupKinds.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86FixupKinds.h
@@ -17,7 +17,9 @@ namespace X86 {
enum Fixups {
reloc_pcrel_4byte = FirstTargetFixupKind, // 32-bit pcrel, e.g. a branch.
reloc_pcrel_1byte, // 8-bit pcrel, e.g. branch_1
- reloc_riprel_4byte // 32-bit rip-relative
+ reloc_pcrel_2byte, // 16-bit pcrel, e.g. callw
+ reloc_riprel_4byte, // 32-bit rip-relative
+ reloc_riprel_4byte_movq_load // 32-bit rip-relative in movq
};
}
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86FloatingPoint.cpp b/libclamav/c++/llvm/lib/Target/X86/X86FloatingPoint.cpp
index 6d6fe77..e6ebf66 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -8,23 +8,18 @@
//===----------------------------------------------------------------------===//
//
// This file defines the pass which converts floating point instructions from
-// virtual registers into register stack instructions. This pass uses live
+// pseudo registers into register stack instructions. This pass uses live
// variable information to indicate where the FPn registers are used and their
// lifetimes.
//
-// This pass is hampered by the lack of decent CFG manipulation routines for
-// machine code. In particular, this wants to be able to split critical edges
-// as necessary, traverse the machine basic block CFG in depth-first order, and
-// allow there to be multiple machine basic blocks for each LLVM basicblock
-// (needed for critical edge splitting).
+// The x87 hardware tracks liveness of the stack registers, so it is necessary
+// to implement exact liveness tracking between basic blocks. The CFG edges are
+// partitioned into bundles where the same FP registers must be live in
+// identical stack positions. Instructions are inserted at the end of each basic
+// block to rearrange the live registers to match the outgoing bundle.
//
-// In particular, this pass currently barfs on critical edges. Because of this,
-// it requires the instruction selector to insert FP_REG_KILL instructions on
-// the exits of any basic block that has critical edges going from it, or which
-// branch to a critical basic block.
-//
-// FIXME: this is not implemented yet. The stackifier pass only works on local
-// basic blocks.
+// This approach avoids splitting critical edges at the potential cost of more
+// live register shuffling instructions when critical edges are present.
//
//===----------------------------------------------------------------------===//
@@ -32,6 +27,7 @@
#include "X86.h"
#include "X86InstrInfo.h"
#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
@@ -54,7 +50,12 @@ STATISTIC(NumFP , "Number of floating point instructions");
namespace {
struct FPS : public MachineFunctionPass {
static char ID;
- FPS() : MachineFunctionPass(&ID) {}
+ FPS() : MachineFunctionPass(ID) {
+ // This is really only to keep valgrind quiet.
+ // The logic in isLive() is too much for it.
+ memset(Stack, 0, sizeof(Stack));
+ memset(RegMap, 0, sizeof(RegMap));
+ }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -69,11 +70,71 @@ namespace {
private:
const TargetInstrInfo *TII; // Machine instruction info.
+
+ // Two CFG edges are related if they leave the same block, or enter the same
+ // block. The transitive closure of an edge under this relation is a
+ // LiveBundle. It represents a set of CFG edges where the live FP stack
+ // registers must be allocated identically in the x87 stack.
+ //
+ // A LiveBundle is usually all the edges leaving a block, or all the edges
+ // entering a block, but it can contain more edges if critical edges are
+ // present.
+ //
+ // The set of live FP registers in a LiveBundle is calculated by bundleCFG,
+ // but the exact mapping of FP registers to stack slots is fixed later.
+ struct LiveBundle {
+ // Bit mask of live FP registers. Bit 0 = FP0, bit 1 = FP1, &c.
+ unsigned Mask;
+
+ // Number of pre-assigned live registers in FixStack. This is 0 when the
+ // stack order has not yet been fixed.
+ unsigned FixCount;
+
+ // Assigned stack order for live-in registers.
+ // FixStack[i] == getStackEntry(i) for all i < FixCount.
+ unsigned char FixStack[8];
+
+ LiveBundle(unsigned m = 0) : Mask(m), FixCount(0) {}
+
+ // Have the live registers been assigned a stack order yet?
+ bool isFixed() const { return !Mask || FixCount; }
+ };
+
+ // Numbered LiveBundle structs. LiveBundles[0] is used for all CFG edges
+ // with no live FP registers.
+ SmallVector<LiveBundle, 8> LiveBundles;
+
+ // Map each MBB in the current function to an (ingoing, outgoing) index into
+ // LiveBundles. Blocks with no FP registers live in or out map to (0, 0)
+ // and are not actually stored in the map.
+ DenseMap<MachineBasicBlock*, std::pair<unsigned, unsigned> > BlockBundle;
+
+ // Return a bitmask of FP registers in block's live-in list.
+ unsigned calcLiveInMask(MachineBasicBlock *MBB) {
+ unsigned Mask = 0;
+ for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(),
+ E = MBB->livein_end(); I != E; ++I) {
+ unsigned Reg = *I - X86::FP0;
+ if (Reg < 8)
+ Mask |= 1 << Reg;
+ }
+ return Mask;
+ }
+
+ // Partition all the CFG edges into LiveBundles.
+ void bundleCFG(MachineFunction &MF);
+
MachineBasicBlock *MBB; // Current basic block
unsigned Stack[8]; // FP<n> Registers in each stack slot...
unsigned RegMap[8]; // Track which stack slot contains each register
unsigned StackTop; // The current top of the FP stack.
+ // Set up our stack model to match the incoming registers to MBB.
+ void setupBlockStack();
+
+ // Shuffle live registers to match the expectations of successor blocks.
+ void finishBlockStack();
+
void dumpStack() const {
dbgs() << "Stack contents:";
for (unsigned i = 0; i != StackTop; ++i) {
@@ -82,27 +143,36 @@ namespace {
}
dbgs() << "\n";
}
- private:
- /// isStackEmpty - Return true if the FP stack is empty.
- bool isStackEmpty() const {
- return StackTop == 0;
- }
-
- // getSlot - Return the stack slot number a particular register number is
- // in.
+
+ /// getSlot - Return the stack slot number a particular register number is
+ /// in.
unsigned getSlot(unsigned RegNo) const {
assert(RegNo < 8 && "Regno out of range!");
return RegMap[RegNo];
}
- // getStackEntry - Return the X86::FP<n> register in register ST(i).
+ /// isLive - Is RegNo currently live in the stack?
+ bool isLive(unsigned RegNo) const {
+ unsigned Slot = getSlot(RegNo);
+ return Slot < StackTop && Stack[Slot] == RegNo;
+ }
+
+ /// getScratchReg - Return an FP register that is not currently in use.
+ unsigned getScratchReg() {
+ for (int i = 7; i >= 0; --i)
+ if (!isLive(i))
+ return i;
+ llvm_unreachable("Ran out of scratch FP registers");
+ }
+
+ /// getStackEntry - Return the X86::FP<n> register in register ST(i).
unsigned getStackEntry(unsigned STi) const {
assert(STi < StackTop && "Access past stack top!");
return Stack[StackTop-1-STi];
}
- // getSTReg - Return the X86::ST(i) register which contains the specified
- // FP<RegNo> register.
+ /// getSTReg - Return the X86::ST(i) register which contains the specified
+ /// FP<RegNo> register.
unsigned getSTReg(unsigned RegNo) const {
return StackTop - 1 - getSlot(RegNo) + llvm::X86::ST0;
}
@@ -117,10 +187,9 @@ namespace {
bool isAtTop(unsigned RegNo) const { return getSlot(RegNo) == StackTop-1; }
void moveToTop(unsigned RegNo, MachineBasicBlock::iterator I) {
- MachineInstr *MI = I;
- DebugLoc dl = MI->getDebugLoc();
+ DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
if (isAtTop(RegNo)) return;
-
+
unsigned STReg = getSTReg(RegNo);
unsigned RegOnTop = getStackEntry(0);
@@ -133,28 +202,41 @@ namespace {
// Emit an fxch to update the runtime processors version of the state.
BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(STReg);
- NumFXCH++;
+ ++NumFXCH;
}
void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) {
- DebugLoc dl = I->getDebugLoc();
+ DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc();
unsigned STReg = getSTReg(RegNo);
pushReg(AsReg); // New register on top of stack
BuildMI(*MBB, I, dl, TII->get(X86::LD_Frr)).addReg(STReg);
}
- // popStackAfter - Pop the current value off of the top of the FP stack
- // after the specified instruction.
+ /// popStackAfter - Pop the current value off of the top of the FP stack
+ /// after the specified instruction.
void popStackAfter(MachineBasicBlock::iterator &I);
- // freeStackSlotAfter - Free the specified register from the register stack,
- // so that it is no longer in a register. If the register is currently at
- // the top of the stack, we just pop the current instruction, otherwise we
- // store the current top-of-stack into the specified slot, then pop the top
- // of stack.
+ /// freeStackSlotAfter - Free the specified register from the register
+ /// stack, so that it is no longer in a register. If the register is
+ /// currently at the top of the stack, we just pop the current instruction,
+ /// otherwise we store the current top-of-stack into the specified slot,
+ /// then pop the top of stack.
void freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned Reg);
+ /// freeStackSlotBefore - Just the pop, no folding. Return the inserted
+ /// instruction.
+ MachineBasicBlock::iterator
+ freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo);
+
+ /// Adjust the live registers to be the set in Mask.
+ void adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I);
+
+ /// Shuffle the top FixCount stack entries susch that FP reg FixStack[0] is
+ /// st(0), FP reg FixStack[1] is st(1) etc.
+ void shuffleStackTop(const unsigned char *FixStack, unsigned FixCount,
+ MachineBasicBlock::iterator I);
+
bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
void handleZeroArgFP(MachineBasicBlock::iterator &I);
@@ -164,6 +246,8 @@ namespace {
void handleCompareFP(MachineBasicBlock::iterator &I);
void handleCondMovFP(MachineBasicBlock::iterator &I);
void handleSpecialFP(MachineBasicBlock::iterator &I);
+
+ bool translateCopy(MachineInstr*);
};
char FPS::ID = 0;
}
@@ -179,7 +263,6 @@ static unsigned getFPReg(const MachineOperand &MO) {
return Reg - X86::FP0;
}
-
/// runOnMachineFunction - Loop over all of the basic blocks, transforming FP
/// register references into FP stack references.
///
@@ -199,6 +282,10 @@ bool FPS::runOnMachineFunction(MachineFunction &MF) {
if (!FPIsUsed) return false;
TII = MF.getTarget().getInstrInfo();
+
+ // Prepare cross-MBB liveness.
+ bundleCFG(MF);
+
StackTop = 0;
// Process the function in depth first order so that we process at least one
@@ -213,16 +300,111 @@ bool FPS::runOnMachineFunction(MachineFunction &MF) {
Changed |= processBasicBlock(MF, **I);
// Process any unreachable blocks in arbitrary order now.
- if (MF.size() == Processed.size())
- return Changed;
+ if (MF.size() != Processed.size())
+ for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
+ if (Processed.insert(BB))
+ Changed |= processBasicBlock(MF, *BB);
+
+ BlockBundle.clear();
+ LiveBundles.clear();
- for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
- if (Processed.insert(BB))
- Changed |= processBasicBlock(MF, *BB);
-
return Changed;
}
+/// bundleCFG - Scan all the basic blocks to determine consistent live-in and
+/// live-out sets for the FP registers. Consistent means that the set of
+/// registers live-out from a block is identical to the live-in set of all
+/// successors. This is not enforced by the normal live-in lists since
+/// registers may be implicitly defined, or not used by all successors.
+void FPS::bundleCFG(MachineFunction &MF) {
+ assert(LiveBundles.empty() && "Stale data in LiveBundles");
+ assert(BlockBundle.empty() && "Stale data in BlockBundle");
+ SmallPtrSet<MachineBasicBlock*, 8> PropDown, PropUp;
+
+ // LiveBundle[0] is the empty live-in set.
+ LiveBundles.resize(1);
+
+ // First gather the actual live-in masks for all MBBs.
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
+ MachineBasicBlock *MBB = I;
+ const unsigned Mask = calcLiveInMask(MBB);
+ if (!Mask)
+ continue;
+ // Ingoing bundle index.
+ unsigned &Idx = BlockBundle[MBB].first;
+ // Already assigned an ingoing bundle?
+ if (Idx)
+ continue;
+ // Allocate a new LiveBundle struct for this block's live-ins.
+ const unsigned BundleIdx = Idx = LiveBundles.size();
+ DEBUG(dbgs() << "Creating LB#" << BundleIdx << ": in:BB#"
+ << MBB->getNumber());
+ LiveBundles.push_back(Mask);
+ LiveBundle &Bundle = LiveBundles.back();
+
+ // Make sure all predecessors have the same live-out set.
+ PropUp.insert(MBB);
+
+ // Keep pushing liveness up and down the CFG until convergence.
+ // Only critical edges cause iteration here, but when they do, multiple
+ // blocks can be assigned to the same LiveBundle index.
+ do {
+ // Assign BundleIdx as liveout from predecessors in PropUp.
+ for (SmallPtrSet<MachineBasicBlock*, 16>::iterator I = PropUp.begin(),
+ E = PropUp.end(); I != E; ++I) {
+ MachineBasicBlock *MBB = *I;
+ for (MachineBasicBlock::const_pred_iterator LinkI = MBB->pred_begin(),
+ LinkE = MBB->pred_end(); LinkI != LinkE; ++LinkI) {
+ MachineBasicBlock *PredMBB = *LinkI;
+ // PredMBB's liveout bundle should be set to LIIdx.
+ unsigned &Idx = BlockBundle[PredMBB].second;
+ if (Idx) {
+ assert(Idx == BundleIdx && "Inconsistent CFG");
+ continue;
+ }
+ Idx = BundleIdx;
+ DEBUG(dbgs() << " out:BB#" << PredMBB->getNumber());
+ // Propagate to siblings.
+ if (PredMBB->succ_size() > 1)
+ PropDown.insert(PredMBB);
+ }
+ }
+ PropUp.clear();
+
+ // Assign BundleIdx as livein to successors in PropDown.
+ for (SmallPtrSet<MachineBasicBlock*, 16>::iterator I = PropDown.begin(),
+ E = PropDown.end(); I != E; ++I) {
+ MachineBasicBlock *MBB = *I;
+ for (MachineBasicBlock::const_succ_iterator LinkI = MBB->succ_begin(),
+ LinkE = MBB->succ_end(); LinkI != LinkE; ++LinkI) {
+ MachineBasicBlock *SuccMBB = *LinkI;
+ // LinkMBB's livein bundle should be set to BundleIdx.
+ unsigned &Idx = BlockBundle[SuccMBB].first;
+ if (Idx) {
+ assert(Idx == BundleIdx && "Inconsistent CFG");
+ continue;
+ }
+ Idx = BundleIdx;
+ DEBUG(dbgs() << " in:BB#" << SuccMBB->getNumber());
+ // Propagate to siblings.
+ if (SuccMBB->pred_size() > 1)
+ PropUp.insert(SuccMBB);
+ // Also accumulate the bundle liveness mask from the liveins here.
+ Bundle.Mask |= calcLiveInMask(SuccMBB);
+ }
+ }
+ PropDown.clear();
+ } while (!PropUp.empty());
+ DEBUG({
+ dbgs() << " live:";
+ for (unsigned i = 0; i < 8; ++i)
+ if (Bundle.Mask & (1<<i))
+ dbgs() << " %FP" << i;
+ dbgs() << '\n';
+ });
+ }
+}
+
/// processBasicBlock - Loop over all of the instructions in the basic block,
/// transforming FP instructions into their stack form.
///
@@ -230,14 +412,19 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
bool Changed = false;
MBB = &BB;
+ setupBlockStack();
+
for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
MachineInstr *MI = I;
- unsigned Flags = MI->getDesc().TSFlags;
-
+ uint64_t Flags = MI->getDesc().TSFlags;
+
unsigned FPInstClass = Flags & X86II::FPTypeMask;
if (MI->isInlineAsm())
FPInstClass = X86II::SpecialFP;
-
+
+ if (MI->isCopy() && translateCopy(MI))
+ FPInstClass = X86II::SpecialFP;
+
if (FPInstClass == X86II::NotFP)
continue; // Efficiently ignore non-fp insts!
@@ -297,10 +484,82 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
Changed = true;
}
- assert(isStackEmpty() && "Stack not empty at end of basic block?");
+ finishBlockStack();
+
return Changed;
}
+/// setupBlockStack - Use the BlockBundle map to set up our model of the stack
+/// to match predecessors' live out stack.
+void FPS::setupBlockStack() {
+ DEBUG(dbgs() << "\nSetting up live-ins for BB#" << MBB->getNumber()
+ << " derived from " << MBB->getName() << ".\n");
+ StackTop = 0;
+ const LiveBundle &Bundle = LiveBundles[BlockBundle.lookup(MBB).first];
+
+ if (!Bundle.Mask) {
+ DEBUG(dbgs() << "Block has no FP live-ins.\n");
+ return;
+ }
+
+ // Depth-first iteration should ensure that we always have an assigned stack.
+ assert(Bundle.isFixed() && "Reached block before any predecessors");
+
+ // Push the fixed live-in registers.
+ for (unsigned i = Bundle.FixCount; i > 0; --i) {
+ MBB->addLiveIn(X86::ST0+i-1);
+ DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %FP"
+ << unsigned(Bundle.FixStack[i-1]) << '\n');
+ pushReg(Bundle.FixStack[i-1]);
+ }
+
+ // Kill off unwanted live-ins. This can happen with a critical edge.
+ // FIXME: We could keep these live registers around as zombies. They may need
+ // to be revived at the end of a short block. It might save a few instrs.
+ adjustLiveRegs(calcLiveInMask(MBB), MBB->begin());
+ DEBUG(MBB->dump());
+}
+
+/// finishBlockStack - Revive live-outs that are implicitly defined out of
+/// MBB. Shuffle live registers to match the expected fixed stack of any
+/// predecessors, and ensure that all predecessors are expecting the same
+/// stack.
+void FPS::finishBlockStack() {
+ // The RET handling below takes care of return blocks for us.
+ if (MBB->succ_empty())
+ return;
+
+ DEBUG(dbgs() << "Setting up live-outs for BB#" << MBB->getNumber()
+ << " derived from " << MBB->getName() << ".\n");
+
+ unsigned BundleIdx = BlockBundle.lookup(MBB).second;
+ LiveBundle &Bundle = LiveBundles[BundleIdx];
+
+ // We may need to kill and define some registers to match successors.
+ // FIXME: This can probably be combined with the shuffle below.
+ MachineBasicBlock::iterator Term = MBB->getFirstTerminator();
+ adjustLiveRegs(Bundle.Mask, Term);
+
+ if (!Bundle.Mask) {
+ DEBUG(dbgs() << "No live-outs.\n");
+ return;
+ }
+
+ // Has the stack order been fixed yet?
+ DEBUG(dbgs() << "LB#" << BundleIdx << ": ");
+ if (Bundle.isFixed()) {
+ DEBUG(dbgs() << "Shuffling stack to match.\n");
+ shuffleStackTop(Bundle.FixStack, Bundle.FixCount, Term);
+ } else {
+ // Not fixed yet, we get to choose.
+ DEBUG(dbgs() << "Fixing stack order now.\n");
+ Bundle.FixCount = StackTop;
+ for (unsigned i = 0; i < StackTop; ++i)
+ Bundle.FixStack[i] = getStackEntry(i);
+ }
+}
+
+
//===----------------------------------------------------------------------===//
// Efficient Lookup Table Support
//===----------------------------------------------------------------------===//
@@ -313,7 +572,7 @@ namespace {
friend bool operator<(const TableEntry &TE, unsigned V) {
return TE.from < V;
}
- friend bool operator<(unsigned V, const TableEntry &TE) {
+ friend bool ATTRIBUTE_USED operator<(unsigned V, const TableEntry &TE) {
return V < TE.from;
}
};
@@ -592,6 +851,13 @@ void FPS::freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned FPRegNo) {
// Otherwise, store the top of stack into the dead slot, killing the operand
// without having to add in an explicit xchg then pop.
//
+ I = freeStackSlotBefore(++I, FPRegNo);
+}
+
+/// freeStackSlotBefore - Free the specified register without trying any
+/// folding.
+MachineBasicBlock::iterator
+FPS::freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo) {
unsigned STReg = getSTReg(FPRegNo);
unsigned OldSlot = getSlot(FPRegNo);
unsigned TopReg = Stack[StackTop-1];
@@ -599,9 +865,90 @@ void FPS::freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned FPRegNo) {
RegMap[TopReg] = OldSlot;
RegMap[FPRegNo] = ~0;
Stack[--StackTop] = ~0;
- MachineInstr *MI = I;
- DebugLoc dl = MI->getDebugLoc();
- I = BuildMI(*MBB, ++I, dl, TII->get(X86::ST_FPrr)).addReg(STReg);
+ return BuildMI(*MBB, I, DebugLoc(), TII->get(X86::ST_FPrr)).addReg(STReg);
+}
+
+/// adjustLiveRegs - Kill and revive registers such that exactly the FP
+/// registers with a bit in Mask are live.
+void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) {
+ unsigned Defs = Mask;
+ unsigned Kills = 0;
+ for (unsigned i = 0; i < StackTop; ++i) {
+ unsigned RegNo = Stack[i];
+ if (!(Defs & (1 << RegNo)))
+ // This register is live, but we don't want it.
+ Kills |= (1 << RegNo);
+ else
+ // We don't need to imp-def this live register.
+ Defs &= ~(1 << RegNo);
+ }
+ assert((Kills & Defs) == 0 && "Register needs killing and def'ing?");
+
+ // Produce implicit-defs for free by using killed registers.
+ while (Kills && Defs) {
+ unsigned KReg = CountTrailingZeros_32(Kills);
+ unsigned DReg = CountTrailingZeros_32(Defs);
+ DEBUG(dbgs() << "Renaming %FP" << KReg << " as imp %FP" << DReg << "\n");
+ std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]);
+ std::swap(RegMap[KReg], RegMap[DReg]);
+ Kills &= ~(1 << KReg);
+ Defs &= ~(1 << DReg);
+ }
+
+ // Kill registers by popping.
+ if (Kills && I != MBB->begin()) {
+ MachineBasicBlock::iterator I2 = llvm::prior(I);
+ for (;;) {
+ unsigned KReg = getStackEntry(0);
+ if (!(Kills & (1 << KReg)))
+ break;
+ DEBUG(dbgs() << "Popping %FP" << KReg << "\n");
+ popStackAfter(I2);
+ Kills &= ~(1 << KReg);
+ }
+ }
+
+ // Manually kill the rest.
+ while (Kills) {
+ unsigned KReg = CountTrailingZeros_32(Kills);
+ DEBUG(dbgs() << "Killing %FP" << KReg << "\n");
+ freeStackSlotBefore(I, KReg);
+ Kills &= ~(1 << KReg);
+ }
+
+ // Load zeros for all the imp-defs.
+ while(Defs) {
+ unsigned DReg = CountTrailingZeros_32(Defs);
+ DEBUG(dbgs() << "Defining %FP" << DReg << " as 0\n");
+ BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0));
+ pushReg(DReg);
+ Defs &= ~(1 << DReg);
+ }
+
+ // Now we should have the correct registers live.
+ DEBUG(dumpStack());
+ assert(StackTop == CountPopulation_32(Mask) && "Live count mismatch");
+}
+
+/// shuffleStackTop - emit fxch instructions before I to shuffle the top
+/// FixCount entries into the order given by FixStack.
+/// FIXME: Is there a better algorithm than insertion sort?
+void FPS::shuffleStackTop(const unsigned char *FixStack,
+ unsigned FixCount,
+ MachineBasicBlock::iterator I) {
+ // Move items into place, starting from the desired stack bottom.
+ while (FixCount--) {
+ // Old register at position FixCount.
+ unsigned OldReg = getStackEntry(FixCount);
+ // Desired register at position FixCount.
+ unsigned Reg = FixStack[FixCount];
+ if (Reg == OldReg)
+ continue;
+ // (Reg st0) (OldReg st0) = (Reg OldReg st0)
+ moveToTop(Reg, I);
+ moveToTop(OldReg, I);
+ }
+ DEBUG(dumpStack());
}
@@ -628,7 +975,7 @@ void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
MachineInstr *MI = I;
unsigned NumOps = MI->getDesc().getNumOperands();
- assert((NumOps == X86AddrNumOperands + 1 || NumOps == 1) &&
+ assert((NumOps == X86::AddrNumOperands + 1 || NumOps == 1) &&
"Can only handle fst* & ftst instructions!");
// Is this the last use of the source register?
@@ -655,7 +1002,7 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
MI->getOpcode() == X86::ISTT_Fp32m80 ||
MI->getOpcode() == X86::ISTT_Fp64m80 ||
MI->getOpcode() == X86::ST_FpP80m)) {
- duplicateToTop(Reg, 7 /*temp register*/, I);
+ duplicateToTop(Reg, getScratchReg(), I);
} else {
moveToTop(Reg, I); // Move to the top of the stack...
}
@@ -1001,15 +1348,16 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
case X86::FpSET_ST0_32:
case X86::FpSET_ST0_64:
case X86::FpSET_ST0_80: {
+ // FpSET_ST0_80 is generated by copyRegToReg for setting up inline asm
+ // arguments that use an st constraint. We expect a sequence of
+ // instructions: Fp_SET_ST0 Fp_SET_ST1? INLINEASM
unsigned Op0 = getFPReg(MI->getOperand(0));
- // FpSET_ST0_80 is generated by copyRegToReg for both function return
- // and inline assembly with the "st" constrain. In the latter case,
- // it is possible for ST(0) to be alive after this instruction.
if (!MI->killsRegister(X86::FP0 + Op0)) {
- // Duplicate Op0
- duplicateToTop(0, 7 /*temp register*/, I);
+ // Duplicate Op0 into a temporary on the stack top.
+ duplicateToTop(Op0, getScratchReg(), I);
} else {
+ // Op0 is killed, so just swap it into position.
moveToTop(Op0, I);
}
--StackTop; // "Forget" we have something on the top of stack!
@@ -1017,17 +1365,28 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
}
case X86::FpSET_ST1_32:
case X86::FpSET_ST1_64:
- case X86::FpSET_ST1_80:
- // StackTop can be 1 if a FpSET_ST0_* was before this. Exchange them.
- if (StackTop == 1) {
- BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(X86::ST1);
- NumFXCH++;
- StackTop = 0;
- break;
+ case X86::FpSET_ST1_80: {
+ // Set up st(1) for inline asm. We are assuming that st(0) has already been
+ // set up by FpSET_ST0, and our StackTop is off by one because of it.
+ unsigned Op0 = getFPReg(MI->getOperand(0));
+ // Restore the actual StackTop from before Fp_SET_ST0.
+ // Note we can't handle Fp_SET_ST1 without a preceeding Fp_SET_ST0, and we
+ // are not enforcing the constraint.
+ ++StackTop;
+ unsigned RegOnTop = getStackEntry(0); // This reg must remain in st(0).
+ if (!MI->killsRegister(X86::FP0 + Op0)) {
+ duplicateToTop(Op0, getScratchReg(), I);
+ moveToTop(RegOnTop, I);
+ } else if (getSTReg(Op0) != X86::ST1) {
+ // We have the wrong value at st(1). Shuffle! Untested!
+ moveToTop(getStackEntry(1), I);
+ moveToTop(Op0, I);
+ moveToTop(RegOnTop, I);
}
- assert(StackTop == 2 && "Stack should have two element on it to return!");
- --StackTop; // "Forget" we have something on the top of stack!
+ assert(StackTop >= 2 && "Too few live registers");
+ StackTop -= 2; // "Forget" both st(0) and st(1).
break;
+ }
case X86::MOV_Fp3232:
case X86::MOV_Fp3264:
case X86::MOV_Fp6432:
@@ -1041,32 +1400,6 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
unsigned SrcReg = getFPReg(MO1);
const MachineOperand &MO0 = MI->getOperand(0);
- // These can be created due to inline asm. Two address pass can introduce
- // copies from RFP registers to virtual registers.
- if (MO0.getReg() == X86::ST0 && SrcReg == 0) {
- assert(MO1.isKill());
- // Treat %ST0<def> = MOV_Fp8080 %FP0<kill>
- // like FpSET_ST0_80 %FP0<kill>, %ST0<imp-def>
- assert((StackTop == 1 || StackTop == 2)
- && "Stack should have one or two element on it to return!");
- --StackTop; // "Forget" we have something on the top of stack!
- break;
- } else if (MO0.getReg() == X86::ST1 && SrcReg == 1) {
- assert(MO1.isKill());
- // Treat %ST1<def> = MOV_Fp8080 %FP1<kill>
- // like FpSET_ST1_80 %FP0<kill>, %ST1<imp-def>
- // StackTop can be 1 if a FpSET_ST0_* was before this. Exchange them.
- if (StackTop == 1) {
- BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(X86::ST1);
- NumFXCH++;
- StackTop = 0;
- break;
- }
- assert(StackTop == 2 && "Stack should have two element on it to return!");
- --StackTop; // "Forget" we have something on the top of stack!
- break;
- }
-
unsigned DestReg = getFPReg(MO0);
if (MI->killsRegister(X86::FP0+SrcReg)) {
// If the input operand is killed, we can just change the owner of the
@@ -1088,8 +1421,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
// 'f' constraint. These should be turned into the current ST(x) register
// in the machine instr. Also, any kills should be explicitly popped after
// the inline asm.
- unsigned Kills[7];
- unsigned NumKills = 0;
+ unsigned Kills = 0;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
@@ -1103,7 +1435,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
// asm. We just remember it for now, and pop them all off at the end in
// a batch.
if (Op.isKill())
- Kills[NumKills++] = FPReg;
+ Kills |= 1U << FPReg;
}
// If this asm kills any FP registers (is the last use of them) we must
@@ -1114,9 +1446,11 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
// Note: this might be a non-optimal pop sequence. We might be able to do
// better by trying to pop in stack order or something.
MachineBasicBlock::iterator InsertPt = MI;
- while (NumKills)
- freeStackSlotAfter(InsertPt, Kills[--NumKills]);
-
+ while (Kills) {
+ unsigned FPReg = CountTrailingZeros_32(Kills);
+ freeStackSlotAfter(InsertPt, FPReg);
+ Kills &= ~(1U << FPReg);
+ }
// Don't delete the inline asm!
return;
}
@@ -1125,11 +1459,11 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
case X86::RETI:
// If RET has an FP register use operand, pass the first one in ST(0) and
// the second one in ST(1).
- if (isStackEmpty()) return; // Quick check to see if any are possible.
-
+
// Find the register operands.
unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U;
-
+ unsigned LiveMask = 0;
+
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6)
@@ -1148,12 +1482,18 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
assert(SecondFPRegOp == ~0U && "More than two fp operands!");
SecondFPRegOp = getFPReg(Op);
}
+ LiveMask |= (1 << getFPReg(Op));
// Remove the operand so that later passes don't see it.
MI->RemoveOperand(i);
--i, --e;
}
-
+
+ // We may have been carrying spurious live-ins, so make sure only the returned
+ // registers are left live.
+ adjustLiveRegs(LiveMask, MI);
+ if (!LiveMask) return; // Quick check to see if any are possible.
+
// There are only four possibilities here:
// 1) we are returning a single FP value. In this case, it has to be in
// ST(0) already, so just declare success by removing the value from the
@@ -1179,7 +1519,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
// Duplicate the TOS so that we return it twice. Just pick some other FPx
// register to hold it.
- unsigned NewReg = (FirstFPRegOp+1)%7;
+ unsigned NewReg = getScratchReg();
duplicateToTop(FirstFPRegOp, NewReg, MI);
FirstFPRegOp = NewReg;
}
@@ -1203,5 +1543,42 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
}
I = MBB->erase(I); // Remove the pseudo instruction
- --I;
+
+ // We want to leave I pointing to the previous instruction, but what if we
+ // just erased the first instruction?
+ if (I == MBB->begin()) {
+ DEBUG(dbgs() << "Inserting dummy KILL\n");
+ I = BuildMI(*MBB, I, DebugLoc(), TII->get(TargetOpcode::KILL));
+ } else
+ --I;
+}
+
+// Translate a COPY instruction to a pseudo-op that handleSpecialFP understands.
+bool FPS::translateCopy(MachineInstr *MI) {
+ unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI->getOperand(1).getReg();
+
+ if (DstReg == X86::ST0) {
+ MI->setDesc(TII->get(X86::FpSET_ST0_80));
+ MI->RemoveOperand(0);
+ return true;
+ }
+ if (DstReg == X86::ST1) {
+ MI->setDesc(TII->get(X86::FpSET_ST1_80));
+ MI->RemoveOperand(0);
+ return true;
+ }
+ if (SrcReg == X86::ST0) {
+ MI->setDesc(TII->get(X86::FpGET_ST0_80));
+ return true;
+ }
+ if (SrcReg == X86::ST1) {
+ MI->setDesc(TII->get(X86::FpGET_ST1_80));
+ return true;
+ }
+ if (X86::RFP80RegClass.contains(DstReg, SrcReg)) {
+ MI->setDesc(TII->get(X86::MOV_Fp8080));
+ return true;
+ }
+ return false;
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86FloatingPointRegKill.cpp b/libclamav/c++/llvm/lib/Target/X86/X86FloatingPointRegKill.cpp
deleted file mode 100644
index 6a117dd..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/X86FloatingPointRegKill.cpp
+++ /dev/null
@@ -1,140 +0,0 @@
-//===-- X86FloatingPoint.cpp - FP_REG_KILL inserter -----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the pass which inserts FP_REG_KILL instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "x86-codegen"
-#include "X86.h"
-#include "X86InstrInfo.h"
-#include "X86Subtarget.h"
-#include "llvm/Instructions.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-STATISTIC(NumFPKill, "Number of FP_REG_KILL instructions added");
-
-namespace {
- struct FPRegKiller : public MachineFunctionPass {
- static char ID;
- FPRegKiller() : MachineFunctionPass(&ID) {}
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addPreservedID(MachineLoopInfoID);
- AU.addPreservedID(MachineDominatorsID);
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- virtual bool runOnMachineFunction(MachineFunction &MF);
-
- virtual const char *getPassName() const { return "X86 FP_REG_KILL inserter"; }
- };
- char FPRegKiller::ID = 0;
-}
-
-FunctionPass *llvm::createX87FPRegKillInserterPass() { return new FPRegKiller(); }
-
-bool FPRegKiller::runOnMachineFunction(MachineFunction &MF) {
- // If we are emitting FP stack code, scan the basic block to determine if this
- // block defines any FP values. If so, put an FP_REG_KILL instruction before
- // the terminator of the block.
-
- // Note that FP stack instructions are used in all modes for long double,
- // so we always need to do this check.
- // Also note that it's possible for an FP stack register to be live across
- // an instruction that produces multiple basic blocks (SSE CMOV) so we
- // must check all the generated basic blocks.
-
- // Scan all of the machine instructions in these MBBs, checking for FP
- // stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
-
- // Fast-path: If nothing is using the x87 registers, we don't need to do
- // any scanning.
- MachineRegisterInfo &MRI = MF.getRegInfo();
- if (MRI.getRegClassVirtRegs(X86::RFP80RegisterClass).empty() &&
- MRI.getRegClassVirtRegs(X86::RFP64RegisterClass).empty() &&
- MRI.getRegClassVirtRegs(X86::RFP32RegisterClass).empty())
- return false;
-
- bool Changed = false;
- const X86Subtarget &Subtarget = MF.getTarget().getSubtarget<X86Subtarget>();
- MachineFunction::iterator MBBI = MF.begin();
- MachineFunction::iterator EndMBB = MF.end();
- for (; MBBI != EndMBB; ++MBBI) {
- MachineBasicBlock *MBB = MBBI;
-
- // If this block returns, ignore it. We don't want to insert an FP_REG_KILL
- // before the return.
- if (!MBB->empty()) {
- MachineBasicBlock::iterator EndI = MBB->end();
- --EndI;
- if (EndI->getDesc().isReturn())
- continue;
- }
-
- bool ContainsFPCode = false;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- !ContainsFPCode && I != E; ++I) {
- if (I->getNumOperands() != 0 && I->getOperand(0).isReg()) {
- const TargetRegisterClass *clas;
- for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
- if (I->getOperand(op).isReg() && I->getOperand(op).isDef() &&
- TargetRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
- ((clas = MRI.getRegClass(I->getOperand(op).getReg())) ==
- X86::RFP32RegisterClass ||
- clas == X86::RFP64RegisterClass ||
- clas == X86::RFP80RegisterClass)) {
- ContainsFPCode = true;
- break;
- }
- }
- }
- }
- // Check PHI nodes in successor blocks. These PHI's will be lowered to have
- // a copy of the input value in this block. In SSE mode, we only care about
- // 80-bit values.
- if (!ContainsFPCode) {
- // Final check, check LLVM BB's that are successors to the LLVM BB
- // corresponding to BB for FP PHI nodes.
- const BasicBlock *LLVMBB = MBB->getBasicBlock();
- const PHINode *PN;
- for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
- !ContainsFPCode && SI != E; ++SI) {
- for (BasicBlock::const_iterator II = SI->begin();
- (PN = dyn_cast<PHINode>(II)); ++II) {
- if (PN->getType()==Type::getX86_FP80Ty(LLVMBB->getContext()) ||
- (!Subtarget.hasSSE1() && PN->getType()->isFloatingPointTy()) ||
- (!Subtarget.hasSSE2() &&
- PN->getType()==Type::getDoubleTy(LLVMBB->getContext()))) {
- ContainsFPCode = true;
- break;
- }
- }
- }
- }
- // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
- if (ContainsFPCode) {
- BuildMI(*MBB, MBBI->getFirstTerminator(), DebugLoc::getUnknownLoc(),
- MF.getTarget().getInstrInfo()->get(X86::FP_REG_KILL));
- ++NumFPKill;
- Changed = true;
- }
- }
-
- return Changed;
-}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/libclamav/c++/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 3fad8ad..c523441 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -15,12 +15,10 @@
#define DEBUG_TYPE "x86-isel"
#include "X86.h"
#include "X86InstrBuilder.h"
-#include "X86ISelLowering.h"
#include "X86MachineFunctionInfo.h"
#include "X86RegisterInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
-#include "llvm/GlobalValue.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/Support/CFG.h"
@@ -57,25 +55,24 @@ namespace {
FrameIndexBase
} BaseType;
- struct { // This is really a union, discriminated by BaseType!
- SDValue Reg;
- int FrameIndex;
- } Base;
+ // This is really a union, discriminated by BaseType!
+ SDValue Base_Reg;
+ int Base_FrameIndex;
unsigned Scale;
SDValue IndexReg;
int32_t Disp;
SDValue Segment;
- GlobalValue *GV;
- Constant *CP;
- BlockAddress *BlockAddr;
+ const GlobalValue *GV;
+ const Constant *CP;
+ const BlockAddress *BlockAddr;
const char *ES;
int JT;
unsigned Align; // CP alignment.
unsigned char SymbolFlags; // X86II::MO_*
X86ISelAddressMode()
- : BaseType(RegBase), Scale(1), IndexReg(), Disp(0),
+ : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
SymbolFlags(X86II::MO_NO_FLAG) {
}
@@ -85,7 +82,7 @@ namespace {
}
bool hasBaseOrIndexReg() const {
- return IndexReg.getNode() != 0 || Base.Reg.getNode() != 0;
+ return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
}
/// isRIPRelative - Return true if this addressing mode is already RIP
@@ -93,24 +90,24 @@ namespace {
bool isRIPRelative() const {
if (BaseType != RegBase) return false;
if (RegisterSDNode *RegNode =
- dyn_cast_or_null<RegisterSDNode>(Base.Reg.getNode()))
+ dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
return RegNode->getReg() == X86::RIP;
return false;
}
void setBaseReg(SDValue Reg) {
BaseType = RegBase;
- Base.Reg = Reg;
+ Base_Reg = Reg;
}
void dump() {
dbgs() << "X86ISelAddressMode " << this << '\n';
- dbgs() << "Base.Reg ";
- if (Base.Reg.getNode() != 0)
- Base.Reg.getNode()->dump();
+ dbgs() << "Base_Reg ";
+ if (Base_Reg.getNode() != 0)
+ Base_Reg.getNode()->dump();
else
dbgs() << "nul";
- dbgs() << " Base.FrameIndex " << Base.FrameIndex << '\n'
+ dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
<< " Scale" << Scale << '\n'
<< "IndexReg ";
if (IndexReg.getNode() != 0)
@@ -147,7 +144,7 @@ namespace {
class X86DAGToDAGISel : public SelectionDAGISel {
/// X86Lowering - This object fully describes how to lower LLVM code to an
/// X86-specific SelectionDAG.
- X86TargetLowering &X86Lowering;
+ const X86TargetLowering &X86Lowering;
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
@@ -168,12 +165,23 @@ namespace {
return "X86 DAG->DAG Instruction Selection";
}
- virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
+ virtual void EmitFunctionEntryCode();
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
virtual void PreprocessISelDAG();
+ inline bool immSext8(SDNode *N) const {
+ return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
+ }
+
+ // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
+ // sign extended field.
+ inline bool i64immSExt32(SDNode *N) const {
+ uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
+ return (int64_t)v == (int32_t)v;
+ }
+
// Include the pieces autogenerated from the target description.
#include "X86GenDAGISel.inc"
@@ -193,9 +201,11 @@ namespace {
SDValue &Scale, SDValue &Index, SDValue &Disp,
SDValue &Segment);
bool SelectLEAAddr(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Scale, SDValue &Index, SDValue &Disp);
+ SDValue &Scale, SDValue &Index, SDValue &Disp,
+ SDValue &Segment);
bool SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Scale, SDValue &Index, SDValue &Disp);
+ SDValue &Scale, SDValue &Index, SDValue &Disp,
+ SDValue &Segment);
bool SelectScalarSSELoad(SDNode *Root, SDValue N,
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp,
@@ -219,14 +229,15 @@ namespace {
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &Segment) {
Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
- CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
- AM.Base.Reg;
+ CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
+ AM.Base_Reg;
Scale = getI8Imm(AM.Scale);
Index = AM.IndexReg;
// These are 32-bit even in 64-bit mode since RIP relative offset
// is 32-bit.
if (AM.GV)
- Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp,
+ Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
+ MVT::i32, AM.Disp,
AM.SymbolFlags);
else if (AM.CP)
Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
@@ -349,17 +360,17 @@ X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
return true;
}
-/// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
-/// operand and move load below the call's chain operand.
-static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
- SDValue Call, SDValue CallSeqStart) {
+/// MoveBelowCallOrigChain - Replace the original chain operand of the call with
+/// load's chain operand and move load below the call's chain operand.
+static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
+ SDValue Call, SDValue OrigChain) {
SmallVector<SDValue, 8> Ops;
- SDValue Chain = CallSeqStart.getOperand(0);
+ SDValue Chain = OrigChain.getOperand(0);
if (Chain.getNode() == Load.getNode())
Ops.push_back(Load.getOperand(0));
else {
assert(Chain.getOpcode() == ISD::TokenFactor &&
- "Unexpected CallSeqStart chain operand");
+ "Unexpected chain operand");
for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
if (Chain.getOperand(i).getNode() == Load.getNode())
Ops.push_back(Load.getOperand(0));
@@ -371,22 +382,24 @@ static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
Ops.clear();
Ops.push_back(NewChain);
}
- for (unsigned i = 1, e = CallSeqStart.getNumOperands(); i != e; ++i)
- Ops.push_back(CallSeqStart.getOperand(i));
- CurDAG->UpdateNodeOperands(CallSeqStart, &Ops[0], Ops.size());
- CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
+ for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
+ Ops.push_back(OrigChain.getOperand(i));
+ CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
Load.getOperand(1), Load.getOperand(2));
Ops.clear();
Ops.push_back(SDValue(Load.getNode(), 1));
for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
Ops.push_back(Call.getOperand(i));
- CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
}
/// isCalleeLoad - Return true if call address is a load and it can be
/// moved below CALLSEQ_START and the chains leading up to the call.
/// Return the CALLSEQ_START by reference as a second output.
-static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
+/// In the case of a tail call, there isn't a callseq node between the call
+/// chain and the load.
+static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
return false;
LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
@@ -397,12 +410,14 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
return false;
// Now let's find the callseq_start.
- while (Chain.getOpcode() != ISD::CALLSEQ_START) {
+ while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
if (!Chain.hasOneUse())
return false;
Chain = Chain.getOperand(0);
}
-
+
+ if (!Chain.getNumOperands())
+ return false;
if (Chain.getOperand(0).getNode() == Callee.getNode())
return true;
if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
@@ -420,7 +435,9 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
E = CurDAG->allnodes_end(); I != E; ) {
SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
- if (OptLevel != CodeGenOpt::None && N->getOpcode() == X86ISD::CALL) {
+ if (OptLevel != CodeGenOpt::None &&
+ (N->getOpcode() == X86ISD::CALL ||
+ N->getOpcode() == X86ISD::TC_RETURN)) {
/// Also try moving call address load from outside callseq_start to just
/// before the call to allow it to be folded.
///
@@ -440,11 +457,12 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
/// \ /
/// \ /
/// [CALL]
+ bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
SDValue Chain = N->getOperand(0);
SDValue Load = N->getOperand(1);
- if (!isCalleeLoad(Load, Chain))
+ if (!isCalleeLoad(Load, Chain, HasCallSeq))
continue;
- MoveBelowCallSeqStart(CurDAG, Load, SDValue(N, 0), Chain);
+ MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
++NumLoadMoved;
continue;
}
@@ -495,7 +513,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
N->getOperand(0),
MemTmp, NULL, 0, MemVT,
false, false, 0);
- SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
+ SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, DstVT, dl, Store, MemTmp,
NULL, 0, MemVT, false, false, 0);
// We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
@@ -519,15 +537,15 @@ void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
MachineFrameInfo *MFI) {
const TargetInstrInfo *TII = TM.getInstrInfo();
if (Subtarget->isTargetCygMing())
- BuildMI(BB, DebugLoc::getUnknownLoc(),
+ BuildMI(BB, DebugLoc(),
TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
}
-void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
+void X86DAGToDAGISel::EmitFunctionEntryCode() {
// If this is main, emit special code for main.
- MachineBasicBlock *BB = MF.begin();
- if (Fn.hasExternalLinkage() && Fn.getName() == "main")
- EmitSpecialCodeForMain(BB, MF.getFrameInfo());
+ if (const Function *Fn = MF->getFunction())
+ if (Fn->hasExternalLinkage() && Fn->getName() == "main")
+ EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
}
@@ -651,8 +669,8 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
// a smaller encoding and avoids a scaled-index.
if (AM.Scale == 2 &&
AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base.Reg.getNode() == 0) {
- AM.Base.Reg = AM.IndexReg;
+ AM.Base_Reg.getNode() == 0) {
+ AM.Base_Reg = AM.IndexReg;
AM.Scale = 1;
}
@@ -663,15 +681,34 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
Subtarget->is64Bit() &&
AM.Scale == 1 &&
AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base.Reg.getNode() == 0 &&
+ AM.Base_Reg.getNode() == 0 &&
AM.IndexReg.getNode() == 0 &&
AM.SymbolFlags == X86II::MO_NO_FLAG &&
AM.hasSymbolicDisplacement())
- AM.Base.Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
+ AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
return false;
}
+/// isLogicallyAddWithConstant - Return true if this node is semantically an
+/// add of a value with a constantint.
+static bool isLogicallyAddWithConstant(SDValue V, SelectionDAG *CurDAG) {
+ // Check for (add x, Cst)
+ if (V->getOpcode() == ISD::ADD)
+ return isa<ConstantSDNode>(V->getOperand(1));
+
+ // Check for (or x, Cst), where Cst & x == 0.
+ if (V->getOpcode() != ISD::OR ||
+ !isa<ConstantSDNode>(V->getOperand(1)))
+ return false;
+
+ // Handle "X | C" as "X + C" iff X is known to have C bits clear.
+ ConstantSDNode *CN = cast<ConstantSDNode>(V->getOperand(1));
+
+ // Check to see if the LHS & C is zero.
+ return CurDAG->MaskedValueIsZero(V->getOperand(0), CN->getAPIntValue());
+}
+
bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
unsigned Depth) {
bool is64Bit = Subtarget->is64Bit();
@@ -737,9 +774,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
case ISD::FrameIndex:
if (AM.BaseType == X86ISelAddressMode::RegBase
- && AM.Base.Reg.getNode() == 0) {
+ && AM.Base_Reg.getNode() == 0) {
AM.BaseType = X86ISelAddressMode::FrameIndexBase;
- AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
+ AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
return false;
}
break;
@@ -762,8 +799,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// Okay, we know that we have a scale by now. However, if the scaled
// value is an add of something and a constant, we can fold the
// constant into the disp field here.
- if (ShVal.getNode()->getOpcode() == ISD::ADD &&
- isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) {
+ if (isLogicallyAddWithConstant(ShVal, CurDAG)) {
AM.IndexReg = ShVal.getNode()->getOperand(0);
ConstantSDNode *AddVal =
cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
@@ -791,7 +827,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
case X86ISD::MUL_IMM:
// X*[3,5,9] -> X+X*[2,4,8]
if (AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base.Reg.getNode() == 0 &&
+ AM.Base_Reg.getNode() == 0 &&
AM.IndexReg.getNode() == 0) {
if (ConstantSDNode
*CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
@@ -822,7 +858,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
Reg = N.getNode()->getOperand(0);
}
- AM.IndexReg = AM.Base.Reg = Reg;
+ AM.IndexReg = AM.Base_Reg = Reg;
return false;
}
}
@@ -836,6 +872,10 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// other uses, since it avoids a two-address sub instruction, however
// it costs an additional mov if the index register has other uses.
+ // Add an artificial use to this node so that we can keep track of
+ // it if it gets CSE'd with a different node.
+ HandleSDNode Handle(N);
+
// Test if the LHS of the sub can be folded.
X86ISelAddressMode Backup = AM;
if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
@@ -847,8 +887,9 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
AM = Backup;
break;
}
+
int Cost = 0;
- SDValue RHS = N.getNode()->getOperand(1);
+ SDValue RHS = Handle.getValue().getNode()->getOperand(1);
// If the RHS involves a register with multiple uses, this
// transformation incurs an extra mov, due to the neg instruction
// clobbering its operand.
@@ -862,8 +903,8 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// If the base is a register with multiple uses, this
// transformation may save a mov.
if ((AM.BaseType == X86ISelAddressMode::RegBase &&
- AM.Base.Reg.getNode() &&
- !AM.Base.Reg.getNode()->hasOneUse()) ||
+ AM.Base_Reg.getNode() &&
+ !AM.Base_Reg.getNode()->hasOneUse()) ||
AM.BaseType == X86ISelAddressMode::FrameIndexBase)
--Cost;
// If the folded LHS was interesting, this transformation saves
@@ -899,24 +940,36 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
}
case ISD::ADD: {
+ // Add an artificial use to this node so that we can keep track of
+ // it if it gets CSE'd with a different node.
+ HandleSDNode Handle(N);
+ SDValue LHS = Handle.getValue().getNode()->getOperand(0);
+ SDValue RHS = Handle.getValue().getNode()->getOperand(1);
+
X86ISelAddressMode Backup = AM;
- if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1) &&
- !MatchAddressRecursively(N.getNode()->getOperand(1), AM, Depth+1))
+ if (!MatchAddressRecursively(LHS, AM, Depth+1) &&
+ !MatchAddressRecursively(RHS, AM, Depth+1))
return false;
AM = Backup;
- if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM, Depth+1) &&
- !MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1))
+ LHS = Handle.getValue().getNode()->getOperand(0);
+ RHS = Handle.getValue().getNode()->getOperand(1);
+
+ // Try again after commuting the operands.
+ if (!MatchAddressRecursively(RHS, AM, Depth+1) &&
+ !MatchAddressRecursively(LHS, AM, Depth+1))
return false;
AM = Backup;
+ LHS = Handle.getValue().getNode()->getOperand(0);
+ RHS = Handle.getValue().getNode()->getOperand(1);
// If we couldn't fold both operands into the address at the same time,
// see if we can just put each operand into a register and fold at least
// the add.
if (AM.BaseType == X86ISelAddressMode::RegBase &&
- !AM.Base.Reg.getNode() &&
+ !AM.Base_Reg.getNode() &&
!AM.IndexReg.getNode()) {
- AM.Base.Reg = N.getNode()->getOperand(0);
- AM.IndexReg = N.getNode()->getOperand(1);
+ AM.Base_Reg = LHS;
+ AM.IndexReg = RHS;
AM.Scale = 1;
return false;
}
@@ -925,9 +978,11 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
case ISD::OR:
// Handle "X | C" as "X + C" iff X is known to have C bits clear.
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
+ if (isLogicallyAddWithConstant(N, CurDAG)) {
X86ISelAddressMode Backup = AM;
+ ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
uint64_t Offset = CN->getSExtValue();
+
// Start with the LHS as an addr mode.
if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
// Address could not have picked a GV address for the displacement.
@@ -935,9 +990,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// On x86-64, the resultant disp must fit in 32-bits.
(!is64Bit ||
X86::isOffsetSuitableForCodeModel(AM.Disp + Offset, M,
- AM.hasSymbolicDisplacement())) &&
- // Check to see if the LHS & C is zero.
- CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
+ AM.hasSymbolicDisplacement()))) {
AM.Disp += Offset;
return false;
}
@@ -1074,7 +1127,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
/// specified addressing mode without any further recursion.
bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
// Is the base register already occupied?
- if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
+ if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
// If so, check to see if the scale index register is set.
if (AM.IndexReg.getNode() == 0) {
AM.IndexReg = N;
@@ -1088,7 +1141,7 @@ bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
// Default, generate it as a register.
AM.BaseType = X86ISelAddressMode::RegBase;
- AM.Base.Reg = N;
+ AM.Base_Reg = N;
return false;
}
@@ -1104,8 +1157,8 @@ bool X86DAGToDAGISel::SelectAddr(SDNode *Op, SDValue N, SDValue &Base,
EVT VT = N.getValueType();
if (AM.BaseType == X86ISelAddressMode::RegBase) {
- if (!AM.Base.Reg.getNode())
- AM.Base.Reg = CurDAG->getRegister(0, VT);
+ if (!AM.Base_Reg.getNode())
+ AM.Base_Reg = CurDAG->getRegister(0, VT);
}
if (!AM.IndexReg.getNode())
@@ -1132,7 +1185,7 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
PatternNodeWithChain.hasOneUse() &&
IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
- IsLegalToFold(N.getOperand(0), N.getNode(), Root)) {
+ IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
if (!SelectAddr(Root, LD->getBasePtr(), Base, Scale, Index, Disp,Segment))
return false;
@@ -1149,7 +1202,7 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
N.getOperand(0).getOperand(0).hasOneUse() &&
IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
- IsLegalToFold(N.getOperand(0), N.getNode(), Root)) {
+ IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
// Okay, this is a zero extending load. Fold it.
LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
if (!SelectAddr(Root, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
@@ -1165,7 +1218,8 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
/// mode it matches can be cost effectively emitted as an LEA instruction.
bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
SDValue &Base, SDValue &Scale,
- SDValue &Index, SDValue &Disp) {
+ SDValue &Index, SDValue &Disp,
+ SDValue &Segment) {
X86ISelAddressMode AM;
// Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
@@ -1181,10 +1235,10 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
EVT VT = N.getValueType();
unsigned Complexity = 0;
if (AM.BaseType == X86ISelAddressMode::RegBase)
- if (AM.Base.Reg.getNode())
+ if (AM.Base_Reg.getNode())
Complexity = 1;
else
- AM.Base.Reg = CurDAG->getRegister(0, VT);
+ AM.Base_Reg = CurDAG->getRegister(0, VT);
else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
Complexity = 4;
@@ -1212,14 +1266,13 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
Complexity += 2;
}
- if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
+ if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
Complexity++;
// If it isn't worth using an LEA, reject it.
if (Complexity <= 2)
return false;
- SDValue Segment;
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1227,14 +1280,14 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
- SDValue &Disp) {
+ SDValue &Disp, SDValue &Segment) {
assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
-
+
X86ISelAddressMode AM;
AM.GV = GA->getGlobal();
AM.Disp += GA->getOffset();
- AM.Base.Reg = CurDAG->getRegister(0, N.getValueType());
+ AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
AM.SymbolFlags = GA->getTargetFlags();
if (N.getValueType() == MVT::i32) {
@@ -1244,7 +1297,6 @@ bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
}
- SDValue Segment;
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1256,7 +1308,7 @@ bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
SDValue &Segment) {
if (!ISD::isNON_EXTLoad(N.getNode()) ||
!IsProfitableToFold(N, P, P) ||
- !IsLegalToFold(N, P, P))
+ !IsLegalToFold(N, P, P, OptLevel))
return false;
return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment);
@@ -1271,13 +1323,6 @@ SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
}
-static SDNode *FindCallStartFromCall(SDNode *Node) {
- if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
- assert(Node->getOperand(0).getValueType() == MVT::Other &&
- "Node doesn't have a token chain argument!");
- return FindCallStartFromCall(Node->getOperand(0).getNode());
-}
-
SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
SDValue Chain = Node->getOperand(0);
SDValue In1 = Node->getOperand(1);
@@ -1362,7 +1407,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
Opc = X86::LOCK_DEC16m;
else if (isSub) {
if (isCN) {
- if (Predicate_immSext8(Val.getNode()))
+ if (immSext8(Val.getNode()))
Opc = X86::LOCK_SUB16mi8;
else
Opc = X86::LOCK_SUB16mi;
@@ -1370,7 +1415,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
Opc = X86::LOCK_SUB16mr;
} else {
if (isCN) {
- if (Predicate_immSext8(Val.getNode()))
+ if (immSext8(Val.getNode()))
Opc = X86::LOCK_ADD16mi8;
else
Opc = X86::LOCK_ADD16mi;
@@ -1385,7 +1430,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
Opc = X86::LOCK_DEC32m;
else if (isSub) {
if (isCN) {
- if (Predicate_immSext8(Val.getNode()))
+ if (immSext8(Val.getNode()))
Opc = X86::LOCK_SUB32mi8;
else
Opc = X86::LOCK_SUB32mi;
@@ -1393,7 +1438,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
Opc = X86::LOCK_SUB32mr;
} else {
if (isCN) {
- if (Predicate_immSext8(Val.getNode()))
+ if (immSext8(Val.getNode()))
Opc = X86::LOCK_ADD32mi8;
else
Opc = X86::LOCK_ADD32mi;
@@ -1409,17 +1454,17 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
else if (isSub) {
Opc = X86::LOCK_SUB64mr;
if (isCN) {
- if (Predicate_immSext8(Val.getNode()))
+ if (immSext8(Val.getNode()))
Opc = X86::LOCK_SUB64mi8;
- else if (Predicate_i64immSExt32(Val.getNode()))
+ else if (i64immSExt32(Val.getNode()))
Opc = X86::LOCK_SUB64mi32;
}
} else {
Opc = X86::LOCK_ADD64mr;
if (isCN) {
- if (Predicate_immSext8(Val.getNode()))
+ if (immSext8(Val.getNode()))
Opc = X86::LOCK_ADD64mi8;
- else if (Predicate_i64immSExt32(Val.getNode()))
+ else if (i64immSExt32(Val.getNode()))
Opc = X86::LOCK_ADD64mi32;
}
}
@@ -1607,6 +1652,26 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
}
+ // Prevent use of AH in a REX instruction by referencing AX instead.
+ if (HiReg == X86::AH && Subtarget->is64Bit() &&
+ !SDValue(Node, 1).use_empty()) {
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ X86::AX, MVT::i16, InFlag);
+ InFlag = Result.getValue(2);
+ // Get the low part if needed. Don't use getCopyFromReg for aliasing
+ // registers.
+ if (!SDValue(Node, 0).use_empty())
+ ReplaceUses(SDValue(Node, 1),
+ CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
+
+ // Shift AX down 8 bits.
+ Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
+ Result,
+ CurDAG->getTargetConstant(8, MVT::i8)), 0);
+ // Then truncate it down to i8.
+ ReplaceUses(SDValue(Node, 1),
+ CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
+ }
// Copy the low half of the result, if it is needed.
if (!SDValue(Node, 0).use_empty()) {
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
@@ -1617,24 +1682,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
// Copy the high half of the result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
- SDValue Result;
- if (HiReg == X86::AH && Subtarget->is64Bit()) {
- // Prevent use of AH in a REX instruction by referencing AX instead.
- // Shift it down 8 bits.
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- X86::AX, MVT::i16, InFlag);
- InFlag = Result.getValue(2);
- Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
- Result,
- CurDAG->getTargetConstant(8, MVT::i8)), 0);
- // Then truncate it down to i8.
- Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
- MVT::i8, Result);
- } else {
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- HiReg, NVT, InFlag);
- InFlag = Result.getValue(2);
- }
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ HiReg, NVT, InFlag);
+ InFlag = Result.getValue(2);
ReplaceUses(SDValue(Node, 1), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
}
@@ -1747,6 +1797,29 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
}
+ // Prevent use of AH in a REX instruction by referencing AX instead.
+ // Shift it down 8 bits.
+ if (HiReg == X86::AH && Subtarget->is64Bit() &&
+ !SDValue(Node, 1).use_empty()) {
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ X86::AX, MVT::i16, InFlag);
+ InFlag = Result.getValue(2);
+
+ // If we also need AL (the quotient), get it by extracting a subreg from
+ // Result. The fast register allocator does not like multiple CopyFromReg
+ // nodes using aliasing registers.
+ if (!SDValue(Node, 0).use_empty())
+ ReplaceUses(SDValue(Node, 0),
+ CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
+
+ // Shift AX right by 8 bits instead of using AH.
+ Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
+ Result,
+ CurDAG->getTargetConstant(8, MVT::i8)),
+ 0);
+ ReplaceUses(SDValue(Node, 1),
+ CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
+ }
// Copy the division (low) result, if it is needed.
if (!SDValue(Node, 0).use_empty()) {
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
@@ -1757,25 +1830,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
// Copy the remainder (high) result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
- SDValue Result;
- if (HiReg == X86::AH && Subtarget->is64Bit()) {
- // Prevent use of AH in a REX instruction by referencing AX instead.
- // Shift it down 8 bits.
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- X86::AX, MVT::i16, InFlag);
- InFlag = Result.getValue(2);
- Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
- Result,
- CurDAG->getTargetConstant(8, MVT::i8)),
- 0);
- // Then truncate it down to i8.
- Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
- MVT::i8, Result);
- } else {
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- HiReg, NVT, InFlag);
- InFlag = Result.getValue(2);
- }
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ HiReg, NVT, InFlag);
+ InFlag = Result.getValue(2);
ReplaceUses(SDValue(Node, 1), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
}
@@ -1788,6 +1845,10 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
// Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
// use a smaller encoding.
+ if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
+ HasNoSignedComparisonUses(Node))
+ // Look past the truncate if CMP is the only use of it.
+ N0 = N0.getOperand(0);
if (N0.getNode()->getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
N0.getValueType() != MVT::i8 &&
X86::isZeroNode(N1)) {
@@ -1815,7 +1876,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
// Extract the l-register.
- SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
+ SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
MVT::i8, Reg);
// Emit a testb.
@@ -1844,7 +1905,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
Reg.getValueType(), Reg, RC), 0);
// Extract the h-register.
- SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT_HI, dl,
+ SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
MVT::i8, Reg);
// Emit a testb. No special NOREX tricks are needed since there's
@@ -1862,7 +1923,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Reg = N0.getNode()->getOperand(0);
// Extract the 16-bit subregister.
- SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_16BIT, dl,
+ SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
MVT::i16, Reg);
// Emit a testw.
@@ -1878,7 +1939,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue Reg = N0.getNode()->getOperand(0);
// Extract the 32-bit subregister.
- SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_32BIT, dl,
+ SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
MVT::i32, Reg);
// Emit a testl.
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86ISelLowering.cpp b/libclamav/c++/llvm/lib/Target/X86/X86ISelLowering.cpp
index c864c02..a6db979 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -16,7 +16,7 @@
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86ISelLowering.h"
-#include "X86MCTargetExpr.h"
+#include "X86ShuffleDecode.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
#include "llvm/CallingConv.h"
@@ -37,6 +37,7 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
@@ -45,44 +46,36 @@
#include "llvm/ADT/VectorExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+using namespace dwarf;
STATISTIC(NumTailCalls, "Number of tail calls");
static cl::opt<bool>
DisableMMX("disable-mmx", cl::Hidden, cl::desc("Disable use of MMX"));
-// Disable16Bit - 16-bit operations typically have a larger encoding than
-// corresponding 32-bit instructions, and 16-bit code is slow on some
-// processors. This is an experimental flag to disable 16-bit operations
-// (which forces them to be Legalized to 32-bit operations).
-static cl::opt<bool>
-Disable16Bit("disable-16bit", cl::Hidden,
- cl::desc("Disable use of 16-bit instructions"));
-
// Forward declarations.
static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2);
static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
- switch (TM.getSubtarget<X86Subtarget>().TargetType) {
- default: llvm_unreachable("unknown subtarget type");
- case X86Subtarget::isDarwin:
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- return new X8664_MachoTargetObjectFile();
+
+ bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
+
+ if (TM.getSubtarget<X86Subtarget>().isTargetDarwin()) {
+ if (is64Bit) return new X8664_MachoTargetObjectFile();
return new TargetLoweringObjectFileMachO();
- case X86Subtarget::isELF:
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- return new X8664_ELFTargetObjectFile(TM);
+ } else if (TM.getSubtarget<X86Subtarget>().isTargetELF() ){
+ if (is64Bit) return new X8664_ELFTargetObjectFile(TM);
return new X8632_ELFTargetObjectFile(TM);
- case X86Subtarget::isMingw:
- case X86Subtarget::isCygwin:
- case X86Subtarget::isWindows:
+ } else if (TM.getSubtarget<X86Subtarget>().isTargetCOFF()) {
return new TargetLoweringObjectFileCOFF();
- }
+ }
+ llvm_unreachable("unknown subtarget type");
}
X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
@@ -100,7 +93,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// X86 is weird, it always uses i8 for shift amounts and setcc results.
setShiftAmountType(MVT::i8);
setBooleanContents(ZeroOrOneBooleanContent);
- setSchedulingPreference(SchedulingForRegPressure);
+ setSchedulingPreference(Sched::RegPressure);
setStackPointerRegisterToSaveRestore(X86StackPtr);
if (Subtarget->isTargetDarwin()) {
@@ -118,8 +111,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// Set up the register classes.
addRegisterClass(MVT::i8, X86::GR8RegisterClass);
- if (!Disable16Bit)
- addRegisterClass(MVT::i16, X86::GR16RegisterClass);
+ addRegisterClass(MVT::i16, X86::GR16RegisterClass);
addRegisterClass(MVT::i32, X86::GR32RegisterClass);
if (Subtarget->is64Bit())
addRegisterClass(MVT::i64, X86::GR64RegisterClass);
@@ -128,11 +120,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// We don't accept any truncstore of integer registers.
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
- if (!Disable16Bit)
- setTruncStoreAction(MVT::i64, MVT::i16, Expand);
+ setTruncStoreAction(MVT::i64, MVT::i16, Expand);
setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
- if (!Disable16Bit)
- setTruncStoreAction(MVT::i32, MVT::i16, Expand);
+ setTruncStoreAction(MVT::i32, MVT::i16, Expand);
setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
setTruncStoreAction(MVT::i16, MVT::i8, Expand);
@@ -154,13 +144,12 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
} else if (!UseSoftFloat) {
- if (X86ScalarSSEf64) {
- // We have an impenetrably clever algorithm for ui64->double only.
- setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
- }
+ // We have an algorithm for SSE2->double, and we turn this into a
+ // 64-bit FILD followed by conditional FADD for other targets.
+ setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
// We have an algorithm for SSE2, and we turn this into a 64-bit
// FILD for other targets.
- setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
+ setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
}
// Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
@@ -224,9 +213,17 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
- if (!X86ScalarSSEf64) {
+ if (!X86ScalarSSEf64) {
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand);
+ // Without SSE, i64->f64 goes through memory; i64->MMX is Legal.
+ if (Subtarget->hasMMX() && !DisableMMX)
+ setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Custom);
+ else
+ setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand);
+ }
}
// Scalar integer divide and remainder are lowered to use operations that
@@ -283,13 +280,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::CTTZ , MVT::i8 , Custom);
setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
- if (Disable16Bit) {
- setOperationAction(ISD::CTTZ , MVT::i16 , Expand);
- setOperationAction(ISD::CTLZ , MVT::i16 , Expand);
- } else {
- setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
- setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
- }
+ setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
+ setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
@@ -306,19 +298,13 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
// X86 wants to expand cmov itself.
setOperationAction(ISD::SELECT , MVT::i8 , Custom);
- if (Disable16Bit)
- setOperationAction(ISD::SELECT , MVT::i16 , Expand);
- else
- setOperationAction(ISD::SELECT , MVT::i16 , Custom);
+ setOperationAction(ISD::SELECT , MVT::i16 , Custom);
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
setOperationAction(ISD::SELECT , MVT::f32 , Custom);
setOperationAction(ISD::SELECT , MVT::f64 , Custom);
setOperationAction(ISD::SELECT , MVT::f80 , Custom);
setOperationAction(ISD::SETCC , MVT::i8 , Custom);
- if (Disable16Bit)
- setOperationAction(ISD::SETCC , MVT::i16 , Expand);
- else
- setOperationAction(ISD::SETCC , MVT::i16 , Custom);
+ setOperationAction(ISD::SETCC , MVT::i16 , Custom);
setOperationAction(ISD::SETCC , MVT::i32 , Custom);
setOperationAction(ISD::SETCC , MVT::f32 , Custom);
setOperationAction(ISD::SETCC , MVT::f64 , Custom);
@@ -358,8 +344,15 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
if (Subtarget->hasSSE1())
setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
- if (!Subtarget->hasSSE2())
- setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
+ // We may not have a libcall for MEMBARRIER so we should lower this.
+ setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom);
+
+ // On X86 and X86-64, atomic operations are lowered to locked instructions.
+ // Locked instructions, in turn, have implicit fence semantics (all memory
+ // operations are flushed before issuing the locked instruction, and they
+ // are not buffered), so we can fold away the common pattern of
+ // fence-atomic-fence.
+ setShouldFoldAtomicFences(true);
// Expand certain atomics
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Custom);
@@ -621,11 +614,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
if (!UseSoftFloat && !DisableMMX && Subtarget->hasMMX()) {
- addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
- addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
- addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
- addRegisterClass(MVT::v2f32, X86::VR64RegisterClass);
- addRegisterClass(MVT::v1i64, X86::VR64RegisterClass);
+ addRegisterClass(MVT::v8i8, X86::VR64RegisterClass, false);
+ addRegisterClass(MVT::v4i16, X86::VR64RegisterClass, false);
+ addRegisterClass(MVT::v2i32, X86::VR64RegisterClass, false);
+
+ addRegisterClass(MVT::v1i64, X86::VR64RegisterClass, false);
setOperationAction(ISD::ADD, MVT::v8i8, Legal);
setOperationAction(ISD::ADD, MVT::v4i16, Legal);
@@ -670,14 +663,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
- setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
- AddPromotedToType (ISD::LOAD, MVT::v2f32, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
@@ -685,7 +675,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
- setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
@@ -699,6 +688,13 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::VSETCC, MVT::v8i8, Custom);
setOperationAction(ISD::VSETCC, MVT::v4i16, Custom);
setOperationAction(ISD::VSETCC, MVT::v2i32, Custom);
+
+ if (!X86ScalarSSEf64 && Subtarget->is64Bit()) {
+ setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Custom);
+ setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Custom);
+ setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Custom);
+ setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Custom);
+ }
}
if (!UseSoftFloat && Subtarget->hasSSE1()) {
@@ -797,9 +793,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
EVT VT = SVT;
// Do not attempt to promote non-128-bit vectors
- if (!VT.is128BitVector()) {
+ if (!VT.is128BitVector())
continue;
- }
+
setOperationAction(ISD::AND, SVT, Promote);
AddPromotedToType (ISD::AND, SVT, MVT::v2i64);
setOperationAction(ISD::OR, SVT, Promote);
@@ -829,9 +825,24 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
if (Subtarget->hasSSE41()) {
+ setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
+ setOperationAction(ISD::FCEIL, MVT::f32, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
+ setOperationAction(ISD::FRINT, MVT::f32, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
+ setOperationAction(ISD::FCEIL, MVT::f64, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
+ setOperationAction(ISD::FRINT, MVT::f64, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
+
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
+ // Can turn SHL into an integer multiply.
+ setOperationAction(ISD::SHL, MVT::v4i32, Custom);
+ setOperationAction(ISD::SHL, MVT::v16i8, Custom);
+
// i8 and i16 vectors are custom , because the source register and source
// source memory operand types are not the same width. f32 vectors are
// custom since the immediate controlling the insert encodes additional
@@ -861,6 +872,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
addRegisterClass(MVT::v4f64, X86::VR256RegisterClass);
addRegisterClass(MVT::v8i32, X86::VR256RegisterClass);
addRegisterClass(MVT::v4i64, X86::VR256RegisterClass);
+ addRegisterClass(MVT::v32i8, X86::VR256RegisterClass);
setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
setOperationAction(ISD::LOAD, MVT::v8i32, Legal);
@@ -872,7 +884,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
- //setOperationAction(ISD::BUILD_VECTOR, MVT::v8f32, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v8f32, Custom);
//setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Custom);
//setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8f32, Custom);
//setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
@@ -969,15 +981,24 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// Add/Sub/Mul with overflow operations are custom lowered.
setOperationAction(ISD::SADDO, MVT::i32, Custom);
- setOperationAction(ISD::SADDO, MVT::i64, Custom);
setOperationAction(ISD::UADDO, MVT::i32, Custom);
- setOperationAction(ISD::UADDO, MVT::i64, Custom);
setOperationAction(ISD::SSUBO, MVT::i32, Custom);
- setOperationAction(ISD::SSUBO, MVT::i64, Custom);
setOperationAction(ISD::USUBO, MVT::i32, Custom);
- setOperationAction(ISD::USUBO, MVT::i64, Custom);
setOperationAction(ISD::SMULO, MVT::i32, Custom);
- setOperationAction(ISD::SMULO, MVT::i64, Custom);
+
+ // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
+ // handle type legalization for these operations here.
+ //
+ // FIXME: We really should do custom legalization for addition and
+ // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
+ // than generic legalization for 64-bit multiplication-with-overflow, though.
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::SADDO, MVT::i64, Custom);
+ setOperationAction(ISD::UADDO, MVT::i64, Custom);
+ setOperationAction(ISD::SSUBO, MVT::i64, Custom);
+ setOperationAction(ISD::USUBO, MVT::i64, Custom);
+ setOperationAction(ISD::SMULO, MVT::i64, Custom);
+ }
if (!Subtarget->is64Bit()) {
// These libcalls are not available in 32-bit.
@@ -988,6 +1009,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
+ setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::BUILD_VECTOR);
setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::SHL);
@@ -995,7 +1017,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setTargetDAGCombine(ISD::SRL);
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::STORE);
- setTargetDAGCombine(ISD::MEMBARRIER);
setTargetDAGCombine(ISD::ZERO_EXTEND);
if (Subtarget->is64Bit())
setTargetDAGCombine(ISD::MUL);
@@ -1005,7 +1026,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// FIXME: These should be based on subtarget info. Plus, the values should
// be smaller when we are in optimizing for size mode.
maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
- maxStoresPerMemcpy = 16; // For @llvm.memcpy -> sequence of stores
+ maxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores
setPrefLoopAlignment(16);
benefitFromCodePlacementOpt = true;
@@ -1064,22 +1085,45 @@ unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const {
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
-/// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
-/// determining it.
+/// lowering. If DstAlign is zero that means it's safe to destination
+/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
+/// means there isn't a need to check it against alignment requirement,
+/// probably because the source does not need to be loaded. If
+/// 'NonScalarIntSafe' is true, that means it's safe to return a
+/// non-scalar-integer type, e.g. empty string source, constant, or loaded
+/// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
+/// constant so it does not need to be loaded.
+/// It returns EVT::Other if the type should be determined using generic
+/// target-independent logic.
EVT
-X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
- bool isSrcConst, bool isSrcStr,
- SelectionDAG &DAG) const {
+X86TargetLowering::getOptimalMemOpType(uint64_t Size,
+ unsigned DstAlign, unsigned SrcAlign,
+ bool NonScalarIntSafe,
+ bool MemcpyStrSrc,
+ MachineFunction &MF) const {
// FIXME: This turns off use of xmm stores for memset/memcpy on targets like
// linux. This is because the stack realignment code can't handle certain
// cases like PR2962. This should be removed when PR2962 is fixed.
- const Function *F = DAG.getMachineFunction().getFunction();
- bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat);
- if (!NoImplicitFloatOps && Subtarget->getStackAlignment() >= 16) {
- if ((isSrcConst || isSrcStr) && Subtarget->hasSSE2() && Size >= 16)
- return MVT::v4i32;
- if ((isSrcConst || isSrcStr) && Subtarget->hasSSE1() && Size >= 16)
- return MVT::v4f32;
+ const Function *F = MF.getFunction();
+ if (NonScalarIntSafe &&
+ !F->hasFnAttr(Attribute::NoImplicitFloat)) {
+ if (Size >= 16 &&
+ (Subtarget->isUnalignedMemAccessFast() ||
+ ((DstAlign == 0 || DstAlign >= 16) &&
+ (SrcAlign == 0 || SrcAlign >= 16))) &&
+ Subtarget->getStackAlignment() >= 16) {
+ if (Subtarget->hasSSE2())
+ return MVT::v4i32;
+ if (Subtarget->hasSSE1())
+ return MVT::v4f32;
+ } else if (!MemcpyStrSrc && Size >= 8 &&
+ !Subtarget->is64Bit() &&
+ Subtarget->getStackAlignment() >= 8 &&
+ Subtarget->hasSSE2()) {
+ // Do not use f64 to lower memcpy if source is string constant. It's
+ // better to use i32 to avoid the loads.
+ return MVT::f64;
+ }
}
if (Subtarget->is64Bit() && Size >= 8)
return MVT::i64;
@@ -1118,8 +1162,8 @@ X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
Subtarget->isPICStyleGOT());
// In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
// entries.
- return X86MCTargetExpr::Create(MBB->getSymbol(Ctx),
- X86MCTargetExpr::GOTOFF, Ctx);
+ return MCSymbolRefExpr::Create(MBB->getSymbol(),
+ MCSymbolRefExpr::VK_GOTOFF, Ctx);
}
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
@@ -1129,8 +1173,7 @@ SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
if (!Subtarget->is64Bit())
// This doesn't have DebugLoc associated with it, but is not really the
// same as a Register.
- return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc::getUnknownLoc(),
- getPointerTy());
+ return DAG.getNode(X86ISD::GlobalBaseReg, DebugLoc(), getPointerTy());
return Table;
}
@@ -1153,6 +1196,71 @@ unsigned X86TargetLowering::getFunctionAlignment(const Function *F) const {
return F->hasFnAttr(Attribute::OptimizeForSize) ? 0 : 4;
}
+std::pair<const TargetRegisterClass*, uint8_t>
+X86TargetLowering::findRepresentativeClass(EVT VT) const{
+ const TargetRegisterClass *RRC = 0;
+ uint8_t Cost = 1;
+ switch (VT.getSimpleVT().SimpleTy) {
+ default:
+ return TargetLowering::findRepresentativeClass(VT);
+ case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
+ RRC = (Subtarget->is64Bit()
+ ? X86::GR64RegisterClass : X86::GR32RegisterClass);
+ break;
+ case MVT::v8i8: case MVT::v4i16:
+ case MVT::v2i32: case MVT::v1i64:
+ RRC = X86::VR64RegisterClass;
+ break;
+ case MVT::f32: case MVT::f64:
+ case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
+ case MVT::v4f32: case MVT::v2f64:
+ case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
+ case MVT::v4f64:
+ RRC = X86::VR128RegisterClass;
+ break;
+ }
+ return std::make_pair(RRC, Cost);
+}
+
+unsigned
+X86TargetLowering::getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const {
+ unsigned FPDiff = RegInfo->hasFP(MF) ? 1 : 0;
+ switch (RC->getID()) {
+ default:
+ return 0;
+ case X86::GR32RegClassID:
+ return 4 - FPDiff;
+ case X86::GR64RegClassID:
+ return 8 - FPDiff;
+ case X86::VR128RegClassID:
+ return Subtarget->is64Bit() ? 10 : 4;
+ case X86::VR64RegClassID:
+ return 4;
+ }
+}
+
+bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
+ unsigned &Offset) const {
+ if (!Subtarget->isTargetLinux())
+ return false;
+
+ if (Subtarget->is64Bit()) {
+ // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
+ Offset = 0x28;
+ if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
+ AddressSpace = 256;
+ else
+ AddressSpace = 257;
+ } else {
+ // %gs:0x14 on i386
+ Offset = 0x14;
+ AddressSpace = 256;
+ }
+ return true;
+}
+
+
//===----------------------------------------------------------------------===//
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -1161,20 +1269,22 @@ unsigned X86TargetLowering::getFunctionAlignment(const Function *F) const {
bool
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- SelectionDAG &DAG) {
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
- RVLocs, *DAG.getContext());
- return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_X86);
+ RVLocs, Context);
+ return CCInfo.CheckReturn(Outs, RetCC_X86);
}
SDValue
X86TargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- DebugLoc dl, SelectionDAG &DAG) {
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
@@ -1192,13 +1302,27 @@ X86TargetLowering::LowerReturn(SDValue Chain,
SmallVector<SDValue, 6> RetOps;
RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
// Operand #1 = Bytes To Pop
- RetOps.push_back(DAG.getTargetConstant(getBytesToPopOnReturn(), MVT::i16));
+ RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
+ MVT::i16));
// Copy the result values into the output registers.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- SDValue ValToCopy = Outs[i].Val;
+ SDValue ValToCopy = OutVals[i];
+ EVT ValVT = ValToCopy.getValueType();
+
+ // If this is x86-64, and we disabled SSE, we can't return FP values
+ if ((ValVT == MVT::f32 || ValVT == MVT::f64) &&
+ (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
+ report_fatal_error("SSE register return with SSE disabled");
+ }
+ // Likewise we can't return F64 values with SSE1 only. gcc does so, but
+ // llvm-gcc has never done it right and no one has noticed, so this
+ // should be OK for now.
+ if (ValVT == MVT::f64 &&
+ (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
+ report_fatal_error("SSE2 register return with SSE2 disabled");
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
// the RET instruction and handled by the FP Stackifier.
@@ -1216,14 +1340,20 @@ X86TargetLowering::LowerReturn(SDValue Chain,
// 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
// which is returned in RAX / RDX.
if (Subtarget->is64Bit()) {
- EVT ValVT = ValToCopy.getValueType();
if (ValVT.isVector() && ValVT.getSizeInBits() == 64) {
ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy);
- if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1)
- ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, ValToCopy);
+ if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
+ ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
+ ValToCopy);
+
+ // If we don't have SSE2 available, convert to v4f32 so the generated
+ // register is legal.
+ if (!Subtarget->hasSSE2())
+ ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,ValToCopy);
+ }
}
}
-
+
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
Flag = Chain.getValue(1);
}
@@ -1237,10 +1367,8 @@ X86TargetLowering::LowerReturn(SDValue Chain,
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
unsigned Reg = FuncInfo->getSRetReturnReg();
- if (!Reg) {
- Reg = MRI.createVirtualRegister(getRegClassFor(MVT::i64));
- FuncInfo->setSRetReturnReg(Reg);
- }
+ assert(Reg &&
+ "SRetReturnReg should have been set in LowerFormalArguments().");
SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag);
@@ -1268,7 +1396,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
@@ -1285,20 +1413,37 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// If this is x86-64, and we disabled SSE, we can't return FP values
if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
- llvm_report_error("SSE register return with SSE disabled");
+ report_fatal_error("SSE register return with SSE disabled");
}
+ SDValue Val;
+
// If this is a call to a function that returns an fp value on the floating
- // point stack, but where we prefer to use the value in xmm registers, copy
- // it out as F80 and use a truncate to move it from fp stack reg to xmm reg.
- if ((VA.getLocReg() == X86::ST0 ||
- VA.getLocReg() == X86::ST1) &&
- isScalarFPTypeInSSEReg(VA.getValVT())) {
- CopyVT = MVT::f80;
- }
+ // point stack, we must guarantee the the value is popped from the stack, so
+ // a CopyFromReg is not good enough - the copy instruction may be eliminated
+ // if the return value is not used. We use the FpGET_ST0 instructions
+ // instead.
+ if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) {
+ // If we prefer to use the value in xmm registers, copy it out as f80 and
+ // use a truncate to move it from fp stack reg to xmm reg.
+ if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80;
+ bool isST0 = VA.getLocReg() == X86::ST0;
+ unsigned Opc = 0;
+ if (CopyVT == MVT::f32) Opc = isST0 ? X86::FpGET_ST0_32:X86::FpGET_ST1_32;
+ if (CopyVT == MVT::f64) Opc = isST0 ? X86::FpGET_ST0_64:X86::FpGET_ST1_64;
+ if (CopyVT == MVT::f80) Opc = isST0 ? X86::FpGET_ST0_80:X86::FpGET_ST1_80;
+ SDValue Ops[] = { Chain, InFlag };
+ Chain = SDValue(DAG.getMachineNode(Opc, dl, CopyVT, MVT::Other, MVT::Flag,
+ Ops, 2), 1);
+ Val = Chain.getValue(0);
- SDValue Val;
- if (Is64Bit && CopyVT.isVector() && CopyVT.getSizeInBits() == 64) {
+ // Round the f80 to the right size, which also moves it to the appropriate
+ // xmm register.
+ if (CopyVT != VA.getValVT())
+ Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
+ // This truncation won't change the value.
+ DAG.getIntPtrConstant(1));
+ } else if (Is64Bit && CopyVT.isVector() && CopyVT.getSizeInBits() == 64) {
// For x86-64, MMX values are returned in XMM0 / XMM1 except for v1i64.
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
@@ -1318,15 +1463,6 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
Val = Chain.getValue(0);
}
InFlag = Chain.getValue(2);
-
- if (CopyVT != VA.getValVT()) {
- // Round the F80 the right size, which also moves to the appropriate xmm
- // register.
- Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
- // This truncation won't change the value.
- DAG.getIntPtrConstant(1));
- }
-
InVals.push_back(Val);
}
@@ -1363,26 +1499,6 @@ ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
return Ins[0].Flags.isSRet();
}
-/// IsCalleePop - Determines whether the callee is required to pop its
-/// own arguments. Callee pop is necessary to support tail calls.
-bool X86TargetLowering::IsCalleePop(bool IsVarArg, CallingConv::ID CallingConv){
- if (IsVarArg)
- return false;
-
- switch (CallingConv) {
- default:
- return false;
- case CallingConv::X86_StdCall:
- return !Subtarget->is64Bit();
- case CallingConv::X86_FastCall:
- return !Subtarget->is64Bit();
- case CallingConv::Fast:
- return GuaranteedTailCallOpt;
- case CallingConv::GHC:
- return GuaranteedTailCallOpt;
- }
-}
-
/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
/// given CallingConvention value.
CCAssignFn *X86TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
@@ -1397,6 +1513,8 @@ CCAssignFn *X86TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
if (CC == CallingConv::X86_FastCall)
return CC_X86_32_FastCall;
+ else if (CC == CallingConv::X86_ThisCall)
+ return CC_X86_32_ThisCall;
else if (CC == CallingConv::Fast)
return CC_X86_32_FastCC;
else if (CC == CallingConv::GHC)
@@ -1415,7 +1533,8 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
DebugLoc dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
- /*AlwaysInline=*/true, NULL, 0, NULL, 0);
+ /*isVolatile*/false, /*AlwaysInline=*/true,
+ NULL, 0, NULL, 0);
}
/// IsTailCallConvention - Return true if the calling convention is one that
@@ -1437,7 +1556,7 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
MachineFrameInfo *MFI,
- unsigned i) {
+ unsigned i) const {
// Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv);
@@ -1457,11 +1576,11 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
// could be overwritten by lowering of arguments in case of a tail call.
if (Flags.isByVal()) {
int FI = MFI->CreateFixedObject(Flags.getByValSize(),
- VA.getLocMemOffset(), isImmutable, false);
+ VA.getLocMemOffset(), isImmutable);
return DAG.getFrameIndex(FI, getPointerTy());
} else {
int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
- VA.getLocMemOffset(), isImmutable, false);
+ VA.getLocMemOffset(), isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
return DAG.getLoad(ValVT, dl, Chain, FIN,
PseudoSourceValue::getFixedStack(FI), 0,
@@ -1476,7 +1595,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl,
SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals)
+ const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
@@ -1520,6 +1640,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
RC = X86::FR32RegisterClass;
else if (RegVT == MVT::f64)
RC = X86::FR64RegisterClass;
+ else if (RegVT.isVector() && RegVT.getSizeInBits() == 256)
+ RC = X86::VR256RegisterClass;
else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
RC = X86::VR128RegisterClass;
else if (RegVT.isVector() && RegVT.getSizeInBits() == 64)
@@ -1586,8 +1708,9 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) {
- if (Is64Bit || CallConv != CallingConv::X86_FastCall) {
- VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize, true, false);
+ if (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
+ CallConv != CallingConv::X86_ThisCall)) {
+ FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true));
}
if (Is64Bit) {
unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0;
@@ -1635,16 +1758,17 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
// For X86-64, if there are vararg parameters that are passed via
// registers, then we must store them to their spots on the stack so they
// may be loaded by deferencing the result of va_next.
- VarArgsGPOffset = NumIntRegs * 8;
- VarArgsFPOffset = TotalNumIntRegs * 8 + NumXMMRegs * 16;
- RegSaveFrameIndex = MFI->CreateStackObject(TotalNumIntRegs * 8 +
- TotalNumXMMRegs * 16, 16,
- false);
+ FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
+ FuncInfo->setVarArgsFPOffset(TotalNumIntRegs * 8 + NumXMMRegs * 16);
+ FuncInfo->setRegSaveFrameIndex(
+ MFI->CreateStackObject(TotalNumIntRegs * 8 + TotalNumXMMRegs * 16, 16,
+ false));
// Store the integer parameter registers.
SmallVector<SDValue, 8> MemOps;
- SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
- unsigned Offset = VarArgsGPOffset;
+ SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
+ getPointerTy());
+ unsigned Offset = FuncInfo->getVarArgsGPOffset();
for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) {
SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
DAG.getIntPtrConstant(Offset));
@@ -1653,7 +1777,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
- PseudoSourceValue::getFixedStack(RegSaveFrameIndex),
+ PseudoSourceValue::getFixedStack(
+ FuncInfo->getRegSaveFrameIndex()),
Offset, false, false, 0);
MemOps.push_back(Store);
Offset += 8;
@@ -1668,8 +1793,10 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
SDValue ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8);
SaveXMMOps.push_back(ALVal);
- SaveXMMOps.push_back(DAG.getIntPtrConstant(RegSaveFrameIndex));
- SaveXMMOps.push_back(DAG.getIntPtrConstant(VarArgsFPOffset));
+ SaveXMMOps.push_back(DAG.getIntPtrConstant(
+ FuncInfo->getRegSaveFrameIndex()));
+ SaveXMMOps.push_back(DAG.getIntPtrConstant(
+ FuncInfo->getVarArgsFPOffset()));
for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
unsigned VReg = MF.addLiveIn(XMMArgRegs[NumXMMRegs],
@@ -1689,23 +1816,24 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
}
// Some CCs need callee pop.
- if (IsCalleePop(isVarArg, CallConv)) {
- BytesToPopOnReturn = StackSize; // Callee pops everything.
+ if (Subtarget->IsCalleePop(isVarArg, CallConv)) {
+ FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
} else {
- BytesToPopOnReturn = 0; // Callee pops nothing.
+ FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
if (!Is64Bit && !IsTailCallConvention(CallConv) && ArgsAreStructReturn(Ins))
- BytesToPopOnReturn = 4;
+ FuncInfo->setBytesToPopOnReturn(4);
}
if (!Is64Bit) {
- RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only.
- if (CallConv == CallingConv::X86_FastCall)
- VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
+ // RegSaveFrameIndex is X86-64 only.
+ FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
+ if (CallConv == CallingConv::X86_FastCall ||
+ CallConv == CallingConv::X86_ThisCall)
+ // fastcc functions can't have varargs.
+ FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
}
- FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
-
return Chain;
}
@@ -1714,7 +1842,7 @@ X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
SDValue StackPtr, SDValue Arg,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
- ISD::ArgFlagsTy Flags) {
+ ISD::ArgFlagsTy Flags) const {
const unsigned FirstStackArgOffset = (Subtarget->isTargetWin64() ? 32 : 0);
unsigned LocMemOffset = FirstStackArgOffset + VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
@@ -1733,7 +1861,7 @@ SDValue
X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
SDValue &OutRetAddr, SDValue Chain,
bool IsTailCall, bool Is64Bit,
- int FPDiff, DebugLoc dl) {
+ int FPDiff, DebugLoc dl) const {
// Adjust the Return address stack slot.
EVT VT = getPointerTy();
OutRetAddr = getReturnAddressFrameIndex(DAG);
@@ -1754,7 +1882,7 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
// Calculate the new stack slot for the return address.
int SlotSize = Is64Bit ? 8 : 4;
int NewReturnAddrFI =
- MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false, false);
+ MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false);
EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
@@ -1768,9 +1896,10 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) {
+ SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget->is64Bit();
bool IsStructRet = CallIsStructReturn(Outs);
@@ -1780,7 +1909,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
- Outs, Ins, DAG);
+ Outs, OutVals, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
// ABI changes.
@@ -1840,7 +1969,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
EVT RegVT = VA.getLocVT();
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
bool isByVal = Flags.isByVal();
@@ -1880,6 +2009,19 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ if (isVarArg && Subtarget->isTargetWin64()) {
+ // Win64 ABI requires argument XMM reg to be copied to the corresponding
+ // shadow reg if callee is a varargs function.
+ unsigned ShadowReg = 0;
+ switch (VA.getLocReg()) {
+ case X86::XMM0: ShadowReg = X86::RCX; break;
+ case X86::XMM1: ShadowReg = X86::RDX; break;
+ case X86::XMM2: ShadowReg = X86::R8; break;
+ case X86::XMM3: ShadowReg = X86::R9; break;
+ }
+ if (ShadowReg)
+ RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
+ }
} else if (!IsSibcall && (!isTailCall || isByVal)) {
assert(VA.isMemLoc());
if (StackPtr.getNode() == 0)
@@ -1911,8 +2053,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (!isTailCall) {
Chain = DAG.getCopyToReg(Chain, dl, X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(),
- getPointerTy()),
+ DebugLoc(), getPointerTy()),
InFlag);
InFlag = Chain.getValue(1);
} else {
@@ -1934,7 +2075,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
}
}
- if (Is64Bit && isVarArg) {
+ if (Is64Bit && isVarArg && !Subtarget->isTargetWin64()) {
// From AMD64 ABI document:
// For calls that may call functions that use varargs or stdargs
// (prototype-less calls or calls to functions containing ellipsis (...) in
@@ -1943,7 +2084,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// the number of registers, but must be an ubound on the number of SSE
// registers used and is in the range 0 - 8 inclusive.
- // FIXME: Verify this on Win64
// Count the number of XMM registers allocated.
static const unsigned XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
@@ -1980,12 +2120,12 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (VA.isRegLoc())
continue;
assert(VA.isMemLoc());
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
// Create frame index.
int32_t Offset = VA.getLocMemOffset()+FPDiff;
uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
- FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true, false);
+ FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
FIN = DAG.getFrameIndex(FI, getPointerTy());
if (Flags.isByVal()) {
@@ -2026,7 +2166,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
FPDiff, dl);
}
- bool WasGlobalOrExternal = false;
if (getTargetMachine().getCodeModel() == CodeModel::Large) {
assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
// In the 64-bit large code model, we have to make all calls
@@ -2034,14 +2173,13 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// pc-relative offset may not be large enough to hold the whole
// address.
} else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- WasGlobalOrExternal = true;
// If the callee is a GlobalAddress node (quite common, every direct call
// is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
// it.
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
- GlobalValue *GV = G->getGlobal();
+ const GlobalValue *GV = G->getGlobal();
if (!GV->hasDLLImportLinkage()) {
unsigned char OpFlags = 0;
@@ -2062,11 +2200,10 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
OpFlags = X86II::MO_DARWIN_STUB;
}
- Callee = DAG.getTargetGlobalAddress(GV, getPointerTy(),
+ Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
G->getOffset(), OpFlags);
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- WasGlobalOrExternal = true;
unsigned char OpFlags = 0;
// On ELF targets, in either X86-64 or X86-32 mode, direct calls to external
@@ -2086,18 +2223,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
OpFlags);
}
- if (isTailCall && !WasGlobalOrExternal) {
- // Force the address into a (call preserved) caller-saved register since
- // tailcall must happen after callee-saved registers are poped.
- // FIXME: Give it a special register class that contains caller-saved
- // register instead?
- unsigned TCReg = Is64Bit ? X86::R11 : X86::EAX;
- Chain = DAG.getCopyToReg(Chain, dl,
- DAG.getRegister(TCReg, getPointerTy()),
- Callee,InFlag);
- Callee = DAG.getRegister(TCReg, getPointerTy());
- }
-
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 8> Ops;
@@ -2124,33 +2249,20 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (!isTailCall && Subtarget->isPICStyleGOT())
Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
- // Add an implicit use of AL for x86 vararg functions.
- if (Is64Bit && isVarArg)
+ // Add an implicit use of AL for non-Windows x86 64-bit vararg functions.
+ if (Is64Bit && isVarArg && !Subtarget->isTargetWin64())
Ops.push_back(DAG.getRegister(X86::AL, MVT::i8));
if (InFlag.getNode())
Ops.push_back(InFlag);
if (isTailCall) {
- // If this is the first return lowered for this function, add the regs
- // to the liveout set for the function.
- if (MF.getRegInfo().liveout_empty()) {
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
- *DAG.getContext());
- CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
- for (unsigned i = 0; i != RVLocs.size(); ++i)
- if (RVLocs[i].isRegLoc())
- MF.getRegInfo().addLiveOut(RVLocs[i].getLocReg());
- }
-
- assert(((Callee.getOpcode() == ISD::Register &&
- (cast<RegisterSDNode>(Callee)->getReg() == X86::EAX ||
- cast<RegisterSDNode>(Callee)->getReg() == X86::R11)) ||
- Callee.getOpcode() == ISD::TargetExternalSymbol ||
- Callee.getOpcode() == ISD::TargetGlobalAddress) &&
- "Expecting a global address, external symbol, or scratch register");
-
+ // We used to do:
+ //// If this is the first return lowered for this function, add the regs
+ //// to the liveout set for the function.
+ // This isn't right, although it's probably harmless on x86; liveouts
+ // should be computed from returns not tail calls. Consider a void
+ // function making a tail call to a function returning int.
return DAG.getNode(X86ISD::TC_RETURN, dl,
NodeTys, &Ops[0], Ops.size());
}
@@ -2160,7 +2272,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPush;
- if (IsCalleePop(isVarArg, CallConv))
+ if (Subtarget->IsCalleePop(isVarArg, CallConv))
NumBytesForCalleeToPush = NumBytes; // Callee pops everything
else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet)
// If this is a call to a struct-return function, the callee
@@ -2220,8 +2332,9 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
/// for a 16 byte align requirement.
-unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
- SelectionDAG& DAG) {
+unsigned
+X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
+ SelectionDAG& DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
const TargetMachine &TM = MF.getTarget();
const TargetFrameInfo &TFI = *TM.getFrameInfo();
@@ -2300,6 +2413,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
bool isCalleeStructRet,
bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
if (!IsTailCallConvention(CalleeCC) &&
@@ -2309,23 +2423,26 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// If -tailcallopt is specified, make fastcc functions tail-callable.
const MachineFunction &MF = DAG.getMachineFunction();
const Function *CallerF = DAG.getMachineFunction().getFunction();
+ CallingConv::ID CallerCC = CallerF->getCallingConv();
+ bool CCMatch = CallerCC == CalleeCC;
+
if (GuaranteedTailCallOpt) {
- if (IsTailCallConvention(CalleeCC) &&
- CallerF->getCallingConv() == CalleeCC)
+ if (IsTailCallConvention(CalleeCC) && CCMatch)
return true;
return false;
}
- // Look for obvious safe cases to perform tail call optimization that does not
- // requite ABI changes. This is what gcc calls sibcall.
+ // Look for obvious safe cases to perform tail call optimization that do not
+ // require ABI changes. This is what gcc calls sibcall.
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue.
if (RegInfo->needsStackRealignment(MF))
return false;
- // Do not sibcall optimize vararg calls for now.
- if (isVarArg)
+ // Do not sibcall optimize vararg calls unless the call site is not passing
+ // any arguments.
+ if (isVarArg && !Outs.empty())
return false;
// Also avoid sibcall optimization if either caller or callee uses struct
@@ -2348,13 +2465,43 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
CCState CCInfo(CalleeCC, false, getTargetMachine(),
RVLocs, *DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
- for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1)
return false;
}
}
+ // If the calling conventions do not match, then we'd better make sure the
+ // results are returned in the same way as what the caller expects.
+ if (!CCMatch) {
+ SmallVector<CCValAssign, 16> RVLocs1;
+ CCState CCInfo1(CalleeCC, false, getTargetMachine(),
+ RVLocs1, *DAG.getContext());
+ CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
+
+ SmallVector<CCValAssign, 16> RVLocs2;
+ CCState CCInfo2(CallerCC, false, getTargetMachine(),
+ RVLocs2, *DAG.getContext());
+ CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
+
+ if (RVLocs1.size() != RVLocs2.size())
+ return false;
+ for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
+ if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
+ return false;
+ if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
+ return false;
+ if (RVLocs1[i].isRegLoc()) {
+ if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
+ return false;
+ } else {
+ if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
+ return false;
+ }
+ }
+ }
+
// If the callee takes no arguments then go on to check the results of the
// call.
if (!Outs.empty()) {
@@ -2380,8 +2527,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
((X86TargetMachine&)getTargetMachine()).getInstrInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- EVT RegVT = VA.getLocVT();
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (VA.getLocInfo() == CCValAssign::Indirect)
return false;
@@ -2392,26 +2538,38 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
}
}
}
+
+ // If the tailcall address may be in a register, then make sure it's
+ // possible to register allocate for it. In 32-bit, the call address can
+ // only target EAX, EDX, or ECX since the tail call must be scheduled after
+ // callee-saved registers are restored. These happen to be the same
+ // registers used to pass 'inreg' arguments so watch out for those.
+ if (!Subtarget->is64Bit() &&
+ !isa<GlobalAddressSDNode>(Callee) &&
+ !isa<ExternalSymbolSDNode>(Callee)) {
+ unsigned NumInRegs = 0;
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ if (!VA.isRegLoc())
+ continue;
+ unsigned Reg = VA.getLocReg();
+ switch (Reg) {
+ default: break;
+ case X86::EAX: case X86::EDX: case X86::ECX:
+ if (++NumInRegs == 3)
+ return false;
+ break;
+ }
+ }
+ }
}
return true;
}
FastISel *
-X86TargetLowering::createFastISel(MachineFunction &mf, MachineModuleInfo *mmo,
- DwarfWriter *dw,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock*, MachineBasicBlock*> &bm,
- DenseMap<const AllocaInst *, int> &am
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &cil
-#endif
- ) {
- return X86::createFastISel(mf, mmo, dw, vm, bm, am
-#ifndef NDEBUG
- , cil
-#endif
- );
+X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const {
+ return X86::createFastISel(funcInfo);
}
@@ -2419,8 +2577,114 @@ X86TargetLowering::createFastISel(MachineFunction &mf, MachineModuleInfo *mmo,
// Other Lowering Hooks
//===----------------------------------------------------------------------===//
+static bool MayFoldLoad(SDValue Op) {
+ return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
+}
+
+static bool MayFoldIntoStore(SDValue Op) {
+ return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
+}
+
+static bool isTargetShuffle(unsigned Opcode) {
+ switch(Opcode) {
+ default: return false;
+ case X86ISD::PSHUFD:
+ case X86ISD::PSHUFHW:
+ case X86ISD::PSHUFLW:
+ case X86ISD::SHUFPD:
+ case X86ISD::SHUFPS:
+ case X86ISD::MOVLHPS:
+ case X86ISD::MOVLHPD:
+ case X86ISD::MOVHLPS:
+ case X86ISD::MOVLPS:
+ case X86ISD::MOVLPD:
+ case X86ISD::MOVSHDUP:
+ case X86ISD::MOVSLDUP:
+ case X86ISD::MOVSS:
+ case X86ISD::MOVSD:
+ case X86ISD::UNPCKLPS:
+ case X86ISD::UNPCKLPD:
+ case X86ISD::PUNPCKLWD:
+ case X86ISD::PUNPCKLBW:
+ case X86ISD::PUNPCKLDQ:
+ case X86ISD::PUNPCKLQDQ:
+ case X86ISD::UNPCKHPS:
+ case X86ISD::UNPCKHPD:
+ case X86ISD::PUNPCKHWD:
+ case X86ISD::PUNPCKHBW:
+ case X86ISD::PUNPCKHDQ:
+ case X86ISD::PUNPCKHQDQ:
+ return true;
+ }
+ return false;
+}
+
+static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
+ SDValue V1, SelectionDAG &DAG) {
+ switch(Opc) {
+ default: llvm_unreachable("Unknown x86 shuffle node");
+ case X86ISD::MOVSHDUP:
+ case X86ISD::MOVSLDUP:
+ return DAG.getNode(Opc, dl, VT, V1);
+ }
+
+ return SDValue();
+}
+
+static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
+ SDValue V1, unsigned TargetMask, SelectionDAG &DAG) {
+ switch(Opc) {
+ default: llvm_unreachable("Unknown x86 shuffle node");
+ case X86ISD::PSHUFD:
+ case X86ISD::PSHUFHW:
+ case X86ISD::PSHUFLW:
+ return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
+ }
+
+ return SDValue();
+}
+
+static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
+ SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) {
+ switch(Opc) {
+ default: llvm_unreachable("Unknown x86 shuffle node");
+ case X86ISD::SHUFPD:
+ case X86ISD::SHUFPS:
+ return DAG.getNode(Opc, dl, VT, V1, V2,
+ DAG.getConstant(TargetMask, MVT::i8));
+ }
+ return SDValue();
+}
+
+static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT,
+ SDValue V1, SDValue V2, SelectionDAG &DAG) {
+ switch(Opc) {
+ default: llvm_unreachable("Unknown x86 shuffle node");
+ case X86ISD::MOVLHPS:
+ case X86ISD::MOVLHPD:
+ case X86ISD::MOVHLPS:
+ case X86ISD::MOVLPS:
+ case X86ISD::MOVLPD:
+ case X86ISD::MOVSS:
+ case X86ISD::MOVSD:
+ case X86ISD::UNPCKLPS:
+ case X86ISD::UNPCKLPD:
+ case X86ISD::PUNPCKLWD:
+ case X86ISD::PUNPCKLBW:
+ case X86ISD::PUNPCKLDQ:
+ case X86ISD::PUNPCKLQDQ:
+ case X86ISD::UNPCKHPS:
+ case X86ISD::UNPCKHPD:
+ case X86ISD::PUNPCKHWD:
+ case X86ISD::PUNPCKHBW:
+ case X86ISD::PUNPCKHDQ:
+ case X86ISD::PUNPCKHQDQ:
+ return DAG.getNode(Opc, dl, VT, V1, V2);
+ }
+ return SDValue();
+}
-SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
+SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
int ReturnAddrIndex = FuncInfo->getRAIndex();
@@ -2429,7 +2693,7 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
// Set up a frame object for the return address.
uint64_t SlotSize = TD->getPointerSize();
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
- false, false);
+ false);
FuncInfo->setRAIndex(ReturnAddrIndex);
}
@@ -2440,7 +2704,7 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
bool hasSymbolicDisplacement) {
// Offset should fit into 32 bit immediate field.
- if (!isInt32(Offset))
+ if (!isInt<32>(Offset))
return false;
// If we don't have a symbolic displacement - we don't have any extra
@@ -3128,7 +3392,7 @@ unsigned X86::getShufflePALIGNRImmediate(SDNode *N) {
/// constant +0.0.
bool X86::isZeroNode(SDValue Elt) {
return ((isa<ConstantSDNode>(Elt) &&
- cast<ConstantSDNode>(Elt)->getZExtValue() == 0) ||
+ cast<ConstantSDNode>(Elt)->isNullValue()) ||
(isa<ConstantFPSDNode>(Elt) &&
cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
}
@@ -3273,18 +3537,27 @@ static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG,
DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type");
- // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest
- // type. This ensures they get CSE'd.
+ // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted
+ // to their dest type. This ensures they get CSE'd.
SDValue Vec;
if (VT.getSizeInBits() == 64) { // MMX
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
- } else if (HasSSE2) { // SSE2
- SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
- } else { // SSE1
+ } else if (VT.getSizeInBits() == 128) {
+ if (HasSSE2) { // SSE2
+ SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
+ } else { // SSE1
+ SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
+ }
+ } else if (VT.getSizeInBits() == 256) { // AVX
+ // 256-bit logic and arithmetic instructions in AVX are
+ // all floating-point, no support for integer ops. Default
+ // to emitting fp zeroed vectors then.
SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
+ SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8);
}
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
}
@@ -3298,9 +3571,9 @@ static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
// type. This ensures they get CSE'd.
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
SDValue Vec;
- if (VT.getSizeInBits() == 64) // MMX
+ if (VT.getSizeInBits() == 64) // MMX
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
- else // SSE
+ else // SSE
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
}
@@ -3365,9 +3638,8 @@ static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
}
-/// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32.
-static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG,
- bool HasSSE2) {
+/// PromoteSplat - Promote a splat of v4i32, v8i16 or v16i8 to v4f32.
+static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
if (SV->getValueType(0).getVectorNumElements() <= 4)
return SDValue(SV, 0);
@@ -3414,73 +3686,260 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]);
}
-/// getNumOfConsecutiveZeros - Return the number of elements in a result of
-/// a shuffle that is zero.
-static
-unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, int NumElems,
- bool Low, SelectionDAG &DAG) {
- unsigned NumZeros = 0;
- for (int i = 0; i < NumElems; ++i) {
- unsigned Index = Low ? i : NumElems-i-1;
- int Idx = SVOp->getMaskElt(Index);
- if (Idx < 0) {
- ++NumZeros;
- continue;
- }
- SDValue Elt = DAG.getShuffleScalarElt(SVOp, Index);
- if (Elt.getNode() && X86::isZeroNode(Elt))
- ++NumZeros;
- else
+/// getShuffleScalarElt - Returns the scalar element that will make up the ith
+/// element of the result of the vector shuffle.
+SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,
+ unsigned Depth) {
+ if (Depth == 6)
+ return SDValue(); // Limit search depth.
+
+ SDValue V = SDValue(N, 0);
+ EVT VT = V.getValueType();
+ unsigned Opcode = V.getOpcode();
+
+ // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
+ if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
+ Index = SV->getMaskElt(Index);
+
+ if (Index < 0)
+ return DAG.getUNDEF(VT.getVectorElementType());
+
+ int NumElems = VT.getVectorNumElements();
+ SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1);
+ return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1);
+ }
+
+ // Recurse into target specific vector shuffles to find scalars.
+ if (isTargetShuffle(Opcode)) {
+ int NumElems = VT.getVectorNumElements();
+ SmallVector<unsigned, 16> ShuffleMask;
+ SDValue ImmN;
+
+ switch(Opcode) {
+ case X86ISD::SHUFPS:
+ case X86ISD::SHUFPD:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodeSHUFPSMask(NumElems,
+ cast<ConstantSDNode>(ImmN)->getZExtValue(),
+ ShuffleMask);
+ break;
+ case X86ISD::PUNPCKHBW:
+ case X86ISD::PUNPCKHWD:
+ case X86ISD::PUNPCKHDQ:
+ case X86ISD::PUNPCKHQDQ:
+ DecodePUNPCKHMask(NumElems, ShuffleMask);
break;
+ case X86ISD::UNPCKHPS:
+ case X86ISD::UNPCKHPD:
+ DecodeUNPCKHPMask(NumElems, ShuffleMask);
+ break;
+ case X86ISD::PUNPCKLBW:
+ case X86ISD::PUNPCKLWD:
+ case X86ISD::PUNPCKLDQ:
+ case X86ISD::PUNPCKLQDQ:
+ DecodePUNPCKLMask(NumElems, ShuffleMask);
+ break;
+ case X86ISD::UNPCKLPS:
+ case X86ISD::UNPCKLPD:
+ DecodeUNPCKLPMask(NumElems, ShuffleMask);
+ break;
+ case X86ISD::MOVHLPS:
+ DecodeMOVHLPSMask(NumElems, ShuffleMask);
+ break;
+ case X86ISD::MOVLHPS:
+ DecodeMOVLHPSMask(NumElems, ShuffleMask);
+ break;
+ case X86ISD::PSHUFD:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodePSHUFMask(NumElems,
+ cast<ConstantSDNode>(ImmN)->getZExtValue(),
+ ShuffleMask);
+ break;
+ case X86ISD::PSHUFHW:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(),
+ ShuffleMask);
+ break;
+ case X86ISD::PSHUFLW:
+ ImmN = N->getOperand(N->getNumOperands()-1);
+ DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(),
+ ShuffleMask);
+ break;
+ case X86ISD::MOVSS:
+ case X86ISD::MOVSD: {
+ // The index 0 always comes from the first element of the second source,
+ // this is why MOVSS and MOVSD are used in the first place. The other
+ // elements come from the other positions of the first source vector.
+ unsigned OpNum = (Index == 0) ? 1 : 0;
+ return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG,
+ Depth+1);
+ }
+ default:
+ assert("not implemented for target shuffle node");
+ return SDValue();
+ }
+
+ Index = ShuffleMask[Index];
+ if (Index < 0)
+ return DAG.getUNDEF(VT.getVectorElementType());
+
+ SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1);
+ return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG,
+ Depth+1);
}
- return NumZeros;
-}
-/// isVectorShift - Returns true if the shuffle can be implemented as a
-/// logical left or right shift of a vector.
-/// FIXME: split into pslldqi, psrldqi, palignr variants.
-static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
- bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
- int NumElems = SVOp->getValueType(0).getVectorNumElements();
+ // Actual nodes that may contain scalar elements
+ if (Opcode == ISD::BIT_CONVERT) {
+ V = V.getOperand(0);
+ EVT SrcVT = V.getValueType();
+ unsigned NumElems = VT.getVectorNumElements();
- isLeft = true;
- unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, true, DAG);
- if (!NumZeros) {
- isLeft = false;
- NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, false, DAG);
- if (!NumZeros)
- return false;
+ if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
+ return SDValue();
}
+
+ if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
+ return (Index == 0) ? V.getOperand(0)
+ : DAG.getUNDEF(VT.getVectorElementType());
+
+ if (V.getOpcode() == ISD::BUILD_VECTOR)
+ return V.getOperand(Index);
+
+ return SDValue();
+}
+
+/// getNumOfConsecutiveZeros - Return the number of elements of a vector
+/// shuffle operation which come from a consecutively from a zero. The
+/// search can start in two diferent directions, from left or right.
+static
+unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems,
+ bool ZerosFromLeft, SelectionDAG &DAG) {
+ int i = 0;
+
+ while (i < NumElems) {
+ unsigned Index = ZerosFromLeft ? i : NumElems-i-1;
+ SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0);
+ if (!(Elt.getNode() &&
+ (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt))))
+ break;
+ ++i;
+ }
+
+ return i;
+}
+
+/// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to
+/// MaskE correspond consecutively to elements from one of the vector operands,
+/// starting from its index OpIdx. Also tell OpNum which source vector operand.
+static
+bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE,
+ int OpIdx, int NumElems, unsigned &OpNum) {
bool SeenV1 = false;
bool SeenV2 = false;
- for (int i = NumZeros; i < NumElems; ++i) {
- int Val = isLeft ? (i - NumZeros) : i;
- int Idx = SVOp->getMaskElt(isLeft ? i : (i - NumZeros));
+
+ for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) {
+ int Idx = SVOp->getMaskElt(i);
+ // Ignore undef indicies
if (Idx < 0)
continue;
+
if (Idx < NumElems)
SeenV1 = true;
- else {
- Idx -= NumElems;
+ else
SeenV2 = true;
- }
- if (Idx != Val)
+
+ // Only accept consecutive elements from the same vector
+ if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
return false;
}
- if (SeenV1 && SeenV2)
+
+ OpNum = SeenV1 ? 0 : 1;
+ return true;
+}
+
+/// isVectorShiftRight - Returns true if the shuffle can be implemented as a
+/// logical left shift of a vector.
+static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
+ bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
+ unsigned NumElems = SVOp->getValueType(0).getVectorNumElements();
+ unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems,
+ false /* check zeros from right */, DAG);
+ unsigned OpSrc;
+
+ if (!NumZeros)
return false;
- ShVal = SeenV1 ? SVOp->getOperand(0) : SVOp->getOperand(1);
+ // Considering the elements in the mask that are not consecutive zeros,
+ // check if they consecutively come from only one of the source vectors.
+ //
+ // V1 = {X, A, B, C} 0
+ // \ \ \ /
+ // vector_shuffle V1, V2 <1, 2, 3, X>
+ //
+ if (!isShuffleMaskConsecutive(SVOp,
+ 0, // Mask Start Index
+ NumElems-NumZeros-1, // Mask End Index
+ NumZeros, // Where to start looking in the src vector
+ NumElems, // Number of elements in vector
+ OpSrc)) // Which source operand ?
+ return false;
+
+ isLeft = false;
ShAmt = NumZeros;
+ ShVal = SVOp->getOperand(OpSrc);
return true;
}
+/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
+/// logical left shift of a vector.
+static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
+ bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
+ unsigned NumElems = SVOp->getValueType(0).getVectorNumElements();
+ unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems,
+ true /* check zeros from left */, DAG);
+ unsigned OpSrc;
+
+ if (!NumZeros)
+ return false;
+
+ // Considering the elements in the mask that are not consecutive zeros,
+ // check if they consecutively come from only one of the source vectors.
+ //
+ // 0 { A, B, X, X } = V2
+ // / \ / /
+ // vector_shuffle V1, V2 <X, X, 4, 5>
+ //
+ if (!isShuffleMaskConsecutive(SVOp,
+ NumZeros, // Mask Start Index
+ NumElems-1, // Mask End Index
+ 0, // Where to start looking in the src vector
+ NumElems, // Number of elements in vector
+ OpSrc)) // Which source operand ?
+ return false;
+
+ isLeft = true;
+ ShAmt = NumZeros;
+ ShVal = SVOp->getOperand(OpSrc);
+ return true;
+}
+
+/// isVectorShift - Returns true if the shuffle can be implemented as a
+/// logical left or right shift of a vector.
+static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
+ bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
+ if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
+ isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
+ return true;
+
+ return false;
+}
/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
///
static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
- SelectionDAG &DAG, TargetLowering &TLI) {
+ SelectionDAG &DAG,
+ const TargetLowering &TLI) {
if (NumNonZero > 8)
return SDValue();
@@ -3525,8 +3984,9 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
///
static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
- unsigned NumNonZero, unsigned NumZero,
- SelectionDAG &DAG, TargetLowering &TLI) {
+ unsigned NumNonZero, unsigned NumZero,
+ SelectionDAG &DAG,
+ const TargetLowering &TLI) {
if (NumNonZero > 4)
return SDValue();
@@ -3568,7 +4028,7 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
SDValue
X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
// Check if the scalar load can be widened into a vector load. And if
// the address is "base + cst" see if the cst can be "absorbed" into
@@ -3636,12 +4096,79 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
return SDValue();
}
+/// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
+/// vector of type 'VT', see if the elements can be replaced by a single large
+/// load which has the same value as a build_vector whose operands are 'elts'.
+///
+/// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
+///
+/// FIXME: we'd also like to handle the case where the last elements are zero
+/// rather than undef via VZEXT_LOAD, but we do not detect that case today.
+/// There's even a handy isZeroNode for that purpose.
+static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
+ DebugLoc &dl, SelectionDAG &DAG) {
+ EVT EltVT = VT.getVectorElementType();
+ unsigned NumElems = Elts.size();
+
+ LoadSDNode *LDBase = NULL;
+ unsigned LastLoadedElt = -1U;
+
+ // For each element in the initializer, see if we've found a load or an undef.
+ // If we don't find an initial load element, or later load elements are
+ // non-consecutive, bail out.
+ for (unsigned i = 0; i < NumElems; ++i) {
+ SDValue Elt = Elts[i];
+
+ if (!Elt.getNode() ||
+ (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
+ return SDValue();
+ if (!LDBase) {
+ if (Elt.getNode()->getOpcode() == ISD::UNDEF)
+ return SDValue();
+ LDBase = cast<LoadSDNode>(Elt.getNode());
+ LastLoadedElt = i;
+ continue;
+ }
+ if (Elt.getOpcode() == ISD::UNDEF)
+ continue;
+
+ LoadSDNode *LD = cast<LoadSDNode>(Elt);
+ if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i))
+ return SDValue();
+ LastLoadedElt = i;
+ }
+
+ // If we have found an entire vector of loads and undefs, then return a large
+ // load of the entire vector width starting at the base pointer. If we found
+ // consecutive loads for the low half, generate a vzext_load node.
+ if (LastLoadedElt == NumElems - 1) {
+ if (DAG.InferPtrAlignment(LDBase->getBasePtr()) >= 16)
+ return DAG.getLoad(VT, dl, LDBase->getChain(), LDBase->getBasePtr(),
+ LDBase->getSrcValue(), LDBase->getSrcValueOffset(),
+ LDBase->isVolatile(), LDBase->isNonTemporal(), 0);
+ return DAG.getLoad(VT, dl, LDBase->getChain(), LDBase->getBasePtr(),
+ LDBase->getSrcValue(), LDBase->getSrcValueOffset(),
+ LDBase->isVolatile(), LDBase->isNonTemporal(),
+ LDBase->getAlignment());
+ } else if (NumElems == 4 && LastLoadedElt == 1) {
+ SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
+ SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
+ SDValue ResNode = DAG.getNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, VT, ResNode);
+ }
+ return SDValue();
+}
+
SDValue
-X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
- // All zero's are handled with pxor, all one's are handled with pcmpeqd.
- if (ISD::isBuildVectorAllZeros(Op.getNode())
- || ISD::isBuildVectorAllOnes(Op.getNode())) {
+ // All zero's are handled with pxor in SSE2 and above, xorps in SSE1.
+ // All one's are handled with pcmpeqd. In AVX, zero's are handled with
+ // vpxor in 128-bit and xor{pd,ps} in 256-bit, but no 256 version of pcmpeqd
+ // is present, so AllOnes is ignored.
+ if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
+ (Op.getValueType().getSizeInBits() != 256 &&
+ ISD::isBuildVectorAllOnes(Op.getNode()))) {
// Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to
// 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
// eliminated on x86-32 hosts.
@@ -3679,10 +4206,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
}
}
- if (NumNonZero == 0) {
- // All undef vector. Return an UNDEF. All zero vectors were handled above.
+ // All undef vector. Return an UNDEF. All zero vectors were handled above.
+ if (NumNonZero == 0)
return DAG.getUNDEF(VT);
- }
// Special case for single non-zero, non-undef, element.
if (NumNonZero == 1) {
@@ -3820,7 +4346,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
if (EVTBits == 16 && NumElems == 8) {
SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
- *this);
+ *this);
if (V.getNode()) return V;
}
@@ -3864,42 +4390,69 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
}
- if (Values.size() > 2) {
- // If we have SSE 4.1, Expand into a number of inserts unless the number of
- // values to be inserted is equal to the number of elements, in which case
- // use the unpack code below in the hopes of matching the consecutive elts
- // load merge pattern for shuffles.
- // FIXME: We could probably just check that here directly.
- if (Values.size() < NumElems && VT.getSizeInBits() == 128 &&
- getSubtarget()->hasSSE41()) {
- V[0] = DAG.getUNDEF(VT);
- for (unsigned i = 0; i < NumElems; ++i)
- if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
- V[0] = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V[0],
+ if (Values.size() > 1 && VT.getSizeInBits() == 128) {
+ // Check for a build vector of consecutive loads.
+ for (unsigned i = 0; i < NumElems; ++i)
+ V[i] = Op.getOperand(i);
+
+ // Check for elements which are consecutive loads.
+ SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG);
+ if (LD.getNode())
+ return LD;
+
+ // For SSE 4.1, use insertps to put the high elements into the low element.
+ if (getSubtarget()->hasSSE41()) {
+ SDValue Result;
+ if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
+ Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
+ else
+ Result = DAG.getUNDEF(VT);
+
+ for (unsigned i = 1; i < NumElems; ++i) {
+ if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
+ Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
Op.getOperand(i), DAG.getIntPtrConstant(i));
- return V[0];
+ }
+ return Result;
}
- // Expand into a number of unpckl*.
- // e.g. for v4f32
+
+ // Otherwise, expand into a number of unpckl*, start by extending each of
+ // our (non-undef) elements to the full vector width with the element in the
+ // bottom slot of the vector (which generates no code for SSE).
+ for (unsigned i = 0; i < NumElems; ++i) {
+ if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
+ V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
+ else
+ V[i] = DAG.getUNDEF(VT);
+ }
+
+ // Next, we iteratively mix elements, e.g. for v4f32:
// Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
// : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
// Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
- for (unsigned i = 0; i < NumElems; ++i)
- V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
- NumElems >>= 1;
- while (NumElems != 0) {
- for (unsigned i = 0; i < NumElems; ++i)
- V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + NumElems]);
- NumElems >>= 1;
+ unsigned EltStride = NumElems >> 1;
+ while (EltStride != 0) {
+ for (unsigned i = 0; i < EltStride; ++i) {
+ // If V[i+EltStride] is undef and this is the first round of mixing,
+ // then it is safe to just drop this shuffle: V[i] is already in the
+ // right place, the one element (since it's the first round) being
+ // inserted as undef can be dropped. This isn't safe for successive
+ // rounds because they will permute elements within both vectors.
+ if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
+ EltStride == NumElems/2)
+ continue;
+
+ V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
+ }
+ EltStride >>= 1;
}
return V[0];
}
-
return SDValue();
}
SDValue
-X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
// We support concatenate two MMX registers and place them in a MMX
// register. This is better than doing a stack convert.
DebugLoc dl = Op.getDebugLoc();
@@ -3930,9 +4483,10 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
// 2. [ssse3] 1 x pshufb
// 3. [ssse3] 2 x pshufb + 1 x por
// 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
-static
-SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp,
- SelectionDAG &DAG, X86TargetLowering &TLI) {
+SDValue
+X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
+ SelectionDAG &DAG) const {
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
DebugLoc dl = SVOp->getDebugLoc();
@@ -3983,7 +4537,7 @@ SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp,
// quads, disable the next transformation since it does not help SSSE3.
bool V1Used = InputQuads[0] || InputQuads[1];
bool V2Used = InputQuads[2] || InputQuads[3];
- if (TLI.getSubtarget()->hasSSSE3()) {
+ if (Subtarget->hasSSSE3()) {
if (InputQuads.count() == 2 && V1Used && V2Used) {
BestLoQuad = InputQuads.find_first();
BestHiQuad = InputQuads.find_next(BestLoQuad);
@@ -4042,15 +4596,21 @@ SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp,
// If we've eliminated the use of V2, and the new mask is a pshuflw or
// pshufhw, that's as cheap as it gets. Return the new shuffle.
if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
- return DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
+ unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
+ unsigned TargetMask = 0;
+ NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
+ TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()):
+ X86::getShufflePSHUFLWImmediate(NewV.getNode());
+ V1 = NewV.getOperand(0);
+ return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
}
}
// If we have SSSE3, and all words of the result are from 1 input vector,
// case 2 is generated, otherwise case 3 is generated. If no SSSE3
// is present, fall back to case 4.
- if (TLI.getSubtarget()->hasSSSE3()) {
+ if (Subtarget->hasSSSE3()) {
SmallVector<SDValue,16> pshufbMask;
// If we have elements from both input vectors, set the high bit of the
@@ -4117,6 +4677,12 @@ SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp,
MaskV.push_back(i);
NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
&MaskV[0]);
+
+ if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3())
+ NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
+ NewV.getOperand(0),
+ X86::getShufflePSHUFLWImmediate(NewV.getNode()),
+ DAG);
}
// If BestHi >= 0, generate a pshufhw to put the high elements in order,
@@ -4139,6 +4705,12 @@ SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp,
}
NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
&MaskV[0]);
+
+ if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3())
+ NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
+ NewV.getOperand(0),
+ X86::getShufflePSHUFHWImmediate(NewV.getNode()),
+ DAG);
}
// In case BestHi & BestLo were both -1, which means each quadword has a word
@@ -4175,7 +4747,8 @@ SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp,
// 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
static
SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
- SelectionDAG &DAG, X86TargetLowering &TLI) {
+ SelectionDAG &DAG,
+ const X86TargetLowering &TLI) {
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
DebugLoc dl = SVOp->getDebugLoc();
@@ -4314,21 +4887,20 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
}
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
-/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be
+/// ones, or rewriting v4i32 / v2i32 as 2 wide ones if possible. This can be
/// done when every pair / quad of shuffle mask elements point to elements in
/// the right sequence. e.g.
/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
static
SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
SelectionDAG &DAG,
- TargetLowering &TLI, DebugLoc dl) {
+ const TargetLowering &TLI, DebugLoc dl) {
EVT VT = SVOp->getValueType(0);
SDValue V1 = SVOp->getOperand(0);
SDValue V2 = SVOp->getOperand(1);
unsigned NumElems = VT.getVectorNumElements();
unsigned NewWidth = (NumElems == 4) ? 2 : 4;
- EVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
- EVT MaskEltVT = MaskVT.getVectorElementType();
+ EVT MaskVT = (NewWidth == 4) ? MVT::v4i16 : MVT::v2i32;
EVT NewVT = MaskVT;
switch (VT.getSimpleVT().SimpleTy) {
default: assert(false && "Unexpected!");
@@ -4552,8 +5124,131 @@ LowerVECTOR_SHUFFLE_4wide(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
}
+static bool MayFoldVectorLoad(SDValue V) {
+ if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT)
+ V = V.getOperand(0);
+ if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
+ V = V.getOperand(0);
+ if (MayFoldLoad(V))
+ return true;
+ return false;
+}
+
+static
+SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG,
+ bool HasSSE2) {
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ EVT VT = Op.getValueType();
+
+ assert(VT != MVT::v2i64 && "unsupported shuffle type");
+
+ if (HasSSE2 && VT == MVT::v2f64)
+ return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
+
+ // v4f32 or v4i32
+ return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V2, DAG);
+}
+
+static
+SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) {
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ EVT VT = Op.getValueType();
+
+ assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
+ "unsupported shuffle type");
+
+ if (V2.getOpcode() == ISD::UNDEF)
+ V2 = V1;
+
+ // v4i32 or v4f32
+ return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
+}
+
+static
+SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
+ SDValue V1 = Op.getOperand(0);
+ SDValue V2 = Op.getOperand(1);
+ EVT VT = Op.getValueType();
+ unsigned NumElems = VT.getVectorNumElements();
+
+ // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
+ // operand of these instructions is only memory, so check if there's a
+ // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
+ // same masks.
+ bool CanFoldLoad = false;
+
+ // Trivial case, when V2 comes from a load.
+ if (MayFoldVectorLoad(V2))
+ CanFoldLoad = true;
+
+ // When V1 is a load, it can be folded later into a store in isel, example:
+ // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
+ // turns into:
+ // (MOVLPSmr addr:$src1, VR128:$src2)
+ // So, recognize this potential and also use MOVLPS or MOVLPD
+ if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
+ CanFoldLoad = true;
+
+ if (CanFoldLoad) {
+ if (HasSSE2 && NumElems == 2)
+ return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
+
+ if (NumElems == 4)
+ return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
+ }
+
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ // movl and movlp will both match v2i64, but v2i64 is never matched by
+ // movl earlier because we make it strict to avoid messing with the movlp load
+ // folding logic (see the code above getMOVLP call). Match it here then,
+ // this is horrible, but will stay like this until we move all shuffle
+ // matching to x86 specific nodes. Note that for the 1st condition all
+ // types are matched with movsd.
+ if ((HasSSE2 && NumElems == 2) || !X86::isMOVLMask(SVOp))
+ return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
+ else if (HasSSE2)
+ return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
+
+
+ assert(VT != MVT::v4i32 && "unsupported shuffle type");
+
+ // Invert the operand order and use SHUFPS to match it.
+ return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V2, V1,
+ X86::getShuffleSHUFImmediate(SVOp), DAG);
+}
+
+static inline unsigned getUNPCKLOpcode(EVT VT) {
+ switch(VT.getSimpleVT().SimpleTy) {
+ case MVT::v4i32: return X86ISD::PUNPCKLDQ;
+ case MVT::v2i64: return X86ISD::PUNPCKLQDQ;
+ case MVT::v4f32: return X86ISD::UNPCKLPS;
+ case MVT::v2f64: return X86ISD::UNPCKLPD;
+ case MVT::v16i8: return X86ISD::PUNPCKLBW;
+ case MVT::v8i16: return X86ISD::PUNPCKLWD;
+ default:
+ llvm_unreachable("Unknow type for unpckl");
+ }
+ return 0;
+}
+
+static inline unsigned getUNPCKHOpcode(EVT VT) {
+ switch(VT.getSimpleVT().SimpleTy) {
+ case MVT::v4i32: return X86ISD::PUNPCKHDQ;
+ case MVT::v2i64: return X86ISD::PUNPCKHQDQ;
+ case MVT::v4f32: return X86ISD::UNPCKHPS;
+ case MVT::v2f64: return X86ISD::UNPCKHPD;
+ case MVT::v16i8: return X86ISD::PUNPCKHBW;
+ case MVT::v8i16: return X86ISD::PUNPCKHWD;
+ default:
+ llvm_unreachable("Unknow type for unpckh");
+ }
+ return 0;
+}
+
SDValue
-X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
@@ -4565,6 +5260,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
bool V1IsSplat = false;
bool V2IsSplat = false;
+ bool HasSSE2 = Subtarget->hasSSE2() || Subtarget->hasAVX();
+ bool HasSSE3 = Subtarget->hasSSE3() || Subtarget->hasAVX();
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
if (isZeroShuffle(SVOp))
return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl);
@@ -4573,7 +5272,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (SVOp->isSplat()) {
if (isMMX || NumElems < 4)
return Op;
- return PromoteSplat(SVOp, DAG, Subtarget->hasSSE2());
+ return PromoteSplat(SVOp, DAG);
}
// If the shuffle can be profitably rewritten as a narrower shuffle, then
@@ -4601,8 +5300,35 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
}
}
- if (X86::isPSHUFDMask(SVOp))
- return Op;
+ // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
+ // unpckh_undef). Only use pshufd if speed is more important than size.
+ if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp))
+ if (VT != MVT::v2i64 && VT != MVT::v2f64)
+ return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG);
+ if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp))
+ if (VT != MVT::v2i64 && VT != MVT::v2f64)
+ return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG);
+
+ if (X86::isPSHUFDMask(SVOp)) {
+ // The actual implementation will match the mask in the if above and then
+ // during isel it can match several different instructions, not only pshufd
+ // as its name says, sad but true, emulate the behavior for now...
+ if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
+ return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
+
+ unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp);
+
+ if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
+ return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
+
+ if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
+ return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V1,
+ TargetMask, DAG);
+
+ if (VT == MVT::v4f32)
+ return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V1,
+ TargetMask, DAG);
+ }
// Check if this can be converted into a logical shift.
bool isLeft = false;
@@ -4623,17 +5349,32 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
return V2;
if (ISD::isBuildVectorAllZeros(V1.getNode()))
return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
- if (!isMMX)
- return Op;
+ if (!isMMX && !X86::isMOVLPMask(SVOp)) {
+ if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
+ return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
+
+ if (VT == MVT::v4i32 || VT == MVT::v4f32)
+ return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
+ }
}
// FIXME: fold these into legal mask.
- if (!isMMX && (X86::isMOVSHDUPMask(SVOp) ||
- X86::isMOVSLDUPMask(SVOp) ||
- X86::isMOVHLPSMask(SVOp) ||
- X86::isMOVLHPSMask(SVOp) ||
- X86::isMOVLPMask(SVOp)))
- return Op;
+ if (!isMMX) {
+ if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp))
+ return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
+
+ if (X86::isMOVHLPSMask(SVOp))
+ return getMOVHighToLow(Op, dl, DAG);
+
+ if (X86::isMOVSHDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4)
+ return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
+
+ if (X86::isMOVSLDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4)
+ return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
+
+ if (X86::isMOVLPMask(SVOp))
+ return getMOVLP(Op, dl, DAG, HasSSE2);
+ }
if (ShouldXformToMOVHLPS(SVOp) ||
ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp))
@@ -4673,11 +5414,13 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
return getMOVL(DAG, dl, VT, V2, V1);
}
- if (X86::isUNPCKL_v_undef_Mask(SVOp) ||
- X86::isUNPCKH_v_undef_Mask(SVOp) ||
- X86::isUNPCKLMask(SVOp) ||
- X86::isUNPCKHMask(SVOp))
- return Op;
+ if (X86::isUNPCKL_v_undef_Mask(SVOp) || X86::isUNPCKLMask(SVOp))
+ return (isMMX) ?
+ Op : getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG);
+
+ if (X86::isUNPCKH_v_undef_Mask(SVOp) || X86::isUNPCKHMask(SVOp))
+ return (isMMX) ?
+ Op : getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG);
if (V2IsSplat) {
// Normalize mask so all entries that point to V2 points to its first
@@ -4699,11 +5442,14 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
// FIXME: this seems wrong.
SDValue NewOp = CommuteVectorShuffle(SVOp, DAG);
ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp);
- if (X86::isUNPCKL_v_undef_Mask(NewSVOp) ||
- X86::isUNPCKH_v_undef_Mask(NewSVOp) ||
- X86::isUNPCKLMask(NewSVOp) ||
- X86::isUNPCKHMask(NewSVOp))
- return NewOp;
+
+ if (X86::isUNPCKL_v_undef_Mask(NewSVOp) || X86::isUNPCKLMask(NewSVOp))
+ return (isMMX) ?
+ NewOp : getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG);
+
+ if (X86::isUNPCKH_v_undef_Mask(NewSVOp) || X86::isUNPCKHMask(NewSVOp))
+ return (isMMX) ?
+ NewOp : getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG);
}
// FIXME: for mmx, bitcast v2i32 to v4i16 for shuffle.
@@ -4712,15 +5458,45 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (!isMMX && V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp))
return CommuteVectorShuffle(SVOp, DAG);
- // Check for legal shuffle and return?
- SmallVector<int, 16> PermMask;
- SVOp->getMask(PermMask);
- if (isShuffleMaskLegal(PermMask, VT))
+ // The checks below are all present in isShuffleMaskLegal, but they are
+ // inlined here right now to enable us to directly emit target specific
+ // nodes, and remove one by one until they don't return Op anymore.
+ SmallVector<int, 16> M;
+ SVOp->getMask(M);
+
+ // Very little shuffling can be done for 64-bit vectors right now.
+ if (VT.getSizeInBits() == 64)
+ return isPALIGNRMask(M, VT, Subtarget->hasSSSE3()) ? Op : SDValue();
+
+ // FIXME: pshufb, blends, shifts.
+ if (VT.getVectorNumElements() == 2 ||
+ ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
+ isPALIGNRMask(M, VT, Subtarget->hasSSSE3()))
return Op;
+ if (isPSHUFHWMask(M, VT))
+ return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
+ X86::getShufflePSHUFHWImmediate(SVOp),
+ DAG);
+
+ if (isPSHUFLWMask(M, VT))
+ return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
+ X86::getShufflePSHUFLWImmediate(SVOp),
+ DAG);
+
+ if (isSHUFPMask(M, VT)) {
+ unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp);
+ if (VT == MVT::v4f32 || VT == MVT::v4i32)
+ return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V2,
+ TargetMask, DAG);
+ if (VT == MVT::v2f64 || VT == MVT::v2i64)
+ return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V2,
+ TargetMask, DAG);
+ }
+
// Handle v8i16 specifically since SSE can do byte extraction and insertion.
if (VT == MVT::v8i16) {
- SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(SVOp, DAG, *this);
+ SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG);
if (NewOp.getNode())
return NewOp;
}
@@ -4740,7 +5516,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
SDValue
X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
if (VT.getSizeInBits() == 8) {
@@ -4794,7 +5570,8 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
SDValue
-X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
if (!isa<ConstantSDNode>(Op.getOperand(1)))
return SDValue();
@@ -4858,7 +5635,8 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
}
SDValue
-X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG){
+X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op,
+ SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
EVT EltVT = VT.getVectorElementType();
DebugLoc dl = Op.getDebugLoc();
@@ -4907,7 +5685,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG){
}
SDValue
-X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
EVT EltVT = VT.getVectorElementType();
@@ -4936,15 +5714,11 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
}
SDValue
-X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
- if (Op.getValueType() == MVT::v2f32)
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f32,
- DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i32,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
- Op.getOperand(0))));
-
- if (Op.getValueType() == MVT::v1i64 && Op.getOperand(0).getValueType() == MVT::i64)
+
+ if (Op.getValueType() == MVT::v1i64 &&
+ Op.getOperand(0).getValueType() == MVT::i64)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
@@ -4967,7 +5741,7 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
// be used to form addressing mode. These wrapped nodes will be selected
// into MOV32ri.
SDValue
-X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
@@ -4993,14 +5767,14 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
if (OpFlag) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(), getPointerTy()),
+ DebugLoc(), getPointerTy()),
Result);
}
return Result;
}
-SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
@@ -5026,7 +5800,7 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
if (OpFlag) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(), getPointerTy()),
+ DebugLoc(), getPointerTy()),
Result);
}
@@ -5034,7 +5808,7 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
}
SDValue
-X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
@@ -5062,8 +5836,7 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) {
!Subtarget->is64Bit()) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(),
- getPointerTy()),
+ DebugLoc(), getPointerTy()),
Result);
}
@@ -5071,12 +5844,12 @@ X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) {
}
SDValue
-X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
// Create the TargetBlockAddressAddress node.
unsigned char OpFlags =
Subtarget->ClassifyBlockAddressReference();
CodeModel::Model M = getTargetMachine().getCodeModel();
- BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
+ const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
DebugLoc dl = Op.getDebugLoc();
SDValue Result = DAG.getBlockAddress(BA, getPointerTy(),
/*isTarget=*/true, OpFlags);
@@ -5110,10 +5883,10 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
if (OpFlags == X86II::MO_NO_FLAG &&
X86::isOffsetSuitableForCodeModel(Offset, M)) {
// A direct static reference to a global.
- Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
+ Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
Offset = 0;
} else {
- Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0, OpFlags);
+ Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
}
if (Subtarget->isPICStyleRIPRel() &&
@@ -5145,7 +5918,7 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
}
SDValue
-X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
return LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG);
@@ -5158,7 +5931,7 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
DebugLoc dl = GA->getDebugLoc();
- SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(),
OperandFlags);
@@ -5171,7 +5944,7 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
}
// TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
- MFI->setHasCalls(true);
+ MFI->setAdjustsStack(true);
SDValue Flag = Chain.getValue(1);
return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
@@ -5185,8 +5958,7 @@ LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg,
- DebugLoc::getUnknownLoc(),
- PtrVT), InFlag);
+ DebugLoc(), PtrVT), InFlag);
InFlag = Chain.getValue(1);
return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
@@ -5208,7 +5980,7 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
DebugLoc dl = GA->getDebugLoc();
// Get the Thread Pointer
SDValue Base = DAG.getNode(X86ISD::SegmentBaseAddress,
- DebugLoc::getUnknownLoc(), PtrVT,
+ DebugLoc(), PtrVT,
DAG.getRegister(is64Bit? X86::FS : X86::GS,
MVT::i32));
@@ -5232,7 +6004,8 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
// emit "addl x at ntpoff,%eax" (local exec) or "addl x at indntpoff,%eax" (initial
// exec)
- SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
+ GA->getValueType(0),
GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
@@ -5246,34 +6019,79 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
}
SDValue
-X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) {
- // TODO: implement the "local dynamic" model
- // TODO: implement the "initial exec"model for pic executables
- assert(Subtarget->isTargetELF() &&
- "TLS not implemented for non-ELF targets");
+X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
+
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GA->getGlobal();
- // If GV is an alias then use the aliasee for determining
- // thread-localness.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- GV = GA->resolveAliasedGlobal(false);
-
- TLSModel::Model model = getTLSModel(GV,
- getTargetMachine().getRelocationModel());
-
- switch (model) {
- case TLSModel::GeneralDynamic:
- case TLSModel::LocalDynamic: // not implemented
- if (Subtarget->is64Bit())
- return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
- return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
+ if (Subtarget->isTargetELF()) {
+ // TODO: implement the "local dynamic" model
+ // TODO: implement the "initial exec"model for pic executables
+
+ // If GV is an alias then use the aliasee for determining
+ // thread-localness.
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ GV = GA->resolveAliasedGlobal(false);
+
+ TLSModel::Model model
+ = getTLSModel(GV, getTargetMachine().getRelocationModel());
+
+ switch (model) {
+ case TLSModel::GeneralDynamic:
+ case TLSModel::LocalDynamic: // not implemented
+ if (Subtarget->is64Bit())
+ return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
+ return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
+
+ case TLSModel::InitialExec:
+ case TLSModel::LocalExec:
+ return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
+ Subtarget->is64Bit());
+ }
+ } else if (Subtarget->isTargetDarwin()) {
+ // Darwin only has one model of TLS. Lower to that.
+ unsigned char OpFlag = 0;
+ unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
+ X86ISD::WrapperRIP : X86ISD::Wrapper;
+
+ // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
+ // global base reg.
+ bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) &&
+ !Subtarget->is64Bit();
+ if (PIC32)
+ OpFlag = X86II::MO_TLVP_PIC_BASE;
+ else
+ OpFlag = X86II::MO_TLVP;
+ DebugLoc DL = Op.getDebugLoc();
+ SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
+ getPointerTy(),
+ GA->getOffset(), OpFlag);
+ SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
+
+ // With PIC32, the address is actually $g + Offset.
+ if (PIC32)
+ Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
+ DAG.getNode(X86ISD::GlobalBaseReg,
+ DebugLoc(), getPointerTy()),
+ Offset);
+
+ // Lowering the machine isd will make sure everything is in the right
+ // location.
+ SDValue Args[] = { Offset };
+ SDValue Chain = DAG.getNode(X86ISD::TLSCALL, DL, MVT::Other, Args, 1);
+
+ // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setAdjustsStack(true);
- case TLSModel::InitialExec:
- case TLSModel::LocalExec:
- return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
- Subtarget->is64Bit());
+ // And our return value (tls address) is in the standard call return value
+ // location.
+ unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
+ return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
}
+
+ assert(false &&
+ "TLS not implemented for this target.");
llvm_unreachable("Unreachable");
return SDValue();
@@ -5282,7 +6100,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) {
/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and
/// take a 2 x i32 value to shift plus a shift amount.
-SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
EVT VT = Op.getValueType();
unsigned VTBits = VT.getSizeInBits();
@@ -5326,7 +6144,8 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
return DAG.getMergeValues(Ops, 2, dl);
}
-SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
+ SelectionDAG &DAG) const {
EVT SrcVT = Op.getOperand(0).getValueType();
if (SrcVT.isVector()) {
@@ -5361,8 +6180,8 @@ SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
}
SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
- SDValue StackSlot,
- SelectionDAG &DAG) {
+ SDValue StackSlot,
+ SelectionDAG &DAG) const {
// Build the FILD
DebugLoc dl = Op.getDebugLoc();
SDVTList Tys;
@@ -5399,7 +6218,8 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
}
// LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
-SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
+ SelectionDAG &DAG) const {
// This algorithm is not obvious. Here it is in C code, more or less:
/*
double uint64_to_double( uint32_t hi, uint32_t lo ) {
@@ -5483,7 +6303,8 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) {
}
// LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
-SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
+ SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
// FP constant to bias correct the final result.
SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
@@ -5528,43 +6349,81 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) {
return Sub;
}
-SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
+ SelectionDAG &DAG) const {
SDValue N0 = Op.getOperand(0);
DebugLoc dl = Op.getDebugLoc();
- // Now not UINT_TO_FP is legal (it's marked custom), dag combiner won't
+ // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
// optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
// the optimization here.
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
EVT SrcVT = N0.getValueType();
- if (SrcVT == MVT::i64) {
- // We only handle SSE2 f64 target here; caller can expand the rest.
- if (Op.getValueType() != MVT::f64 || !X86ScalarSSEf64)
- return SDValue();
-
+ EVT DstVT = Op.getValueType();
+ if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i64(Op, DAG);
- } else if (SrcVT == MVT::i32 && X86ScalarSSEf64) {
+ else if (SrcVT == MVT::i32 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i32(Op, DAG);
- }
-
- assert(SrcVT == MVT::i32 && "Unknown UINT_TO_FP to lower!");
// Make a 64-bit buffer, and use it to build an FILD.
SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
- SDValue WordOff = DAG.getConstant(4, getPointerTy());
- SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
- getPointerTy(), StackSlot, WordOff);
- SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
+ if (SrcVT == MVT::i32) {
+ SDValue WordOff = DAG.getConstant(4, getPointerTy());
+ SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
+ getPointerTy(), StackSlot, WordOff);
+ SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
+ StackSlot, NULL, 0, false, false, 0);
+ SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
+ OffsetSlot, NULL, 0, false, false, 0);
+ SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
+ return Fild;
+ }
+
+ assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
StackSlot, NULL, 0, false, false, 0);
- SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
- OffsetSlot, NULL, 0, false, false, 0);
- return BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
+ // For i64 source, we need to add the appropriate power of 2 if the input
+ // was negative. This is the same as the optimization in
+ // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
+ // we must be careful to do the computation in x87 extended precision, not
+ // in SSE. (The generic code can't know it's OK to do this, or how to.)
+ SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
+ SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
+ SDValue Fild = DAG.getNode(X86ISD::FILD, dl, Tys, Ops, 3);
+
+ APInt FF(32, 0x5F800000ULL);
+
+ // Check whether the sign bit is set.
+ SDValue SignSet = DAG.getSetCC(dl, getSetCCResultType(MVT::i64),
+ Op.getOperand(0), DAG.getConstant(0, MVT::i64),
+ ISD::SETLT);
+
+ // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
+ SDValue FudgePtr = DAG.getConstantPool(
+ ConstantInt::get(*DAG.getContext(), FF.zext(64)),
+ getPointerTy());
+
+ // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
+ SDValue Zero = DAG.getIntPtrConstant(0);
+ SDValue Four = DAG.getIntPtrConstant(4);
+ SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
+ Zero, Four);
+ FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
+
+ // Load the value out, extending it from f32 to f80.
+ // FIXME: Avoid the extend by constructing the right constant pool?
+ SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, MVT::f80, dl, DAG.getEntryNode(),
+ FudgePtr, PseudoSourceValue::getConstantPool(),
+ 0, MVT::f32, false, false, 4);
+ // Extend everything to 80 bits to force it to be done on x87.
+ SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
+ return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
}
std::pair<SDValue,SDValue> X86TargetLowering::
-FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) {
+FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) const {
DebugLoc dl = Op.getDebugLoc();
EVT DstTy = Op.getValueType();
@@ -5626,7 +6485,8 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) {
return std::make_pair(FIST, StackSlot);
}
-SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
+ SelectionDAG &DAG) const {
if (Op.getValueType().isVector()) {
if (Op.getValueType() == MVT::v2i32 &&
Op.getOperand(0).getValueType() == MVT::v2f64) {
@@ -5645,7 +6505,8 @@ SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
FIST, StackSlot, NULL, 0, false, false, 0);
}
-SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
+ SelectionDAG &DAG) const {
std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, false);
SDValue FIST = Vals.first, StackSlot = Vals.second;
assert(FIST.getNode() && "Unexpected failure");
@@ -5655,7 +6516,8 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) {
FIST, StackSlot, NULL, 0, false, false, 0);
}
-SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerFABS(SDValue Op,
+ SelectionDAG &DAG) const {
LLVMContext *Context = DAG.getContext();
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
@@ -5682,7 +6544,7 @@ SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask);
}
-SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
LLVMContext *Context = DAG.getContext();
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
@@ -5717,7 +6579,7 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) {
}
}
-SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
LLVMContext *Context = DAG.getContext();
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
@@ -5793,7 +6655,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
// CF and OF aren't always set the way we want. Determine which
@@ -5801,6 +6663,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
bool NeedCF = false;
bool NeedOF = false;
switch (X86CC) {
+ default: break;
case X86::COND_A: case X86::COND_AE:
case X86::COND_B: case X86::COND_BE:
NeedCF = true;
@@ -5810,121 +6673,135 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
case X86::COND_O: case X86::COND_NO:
NeedOF = true;
break;
- default: break;
}
// See if we can use the EFLAGS value from the operand instead of
// doing a separate TEST. TEST always sets OF and CF to 0, so unless
// we prove that the arithmetic won't overflow, we can't use OF or CF.
- if (Op.getResNo() == 0 && !NeedOF && !NeedCF) {
- unsigned Opcode = 0;
- unsigned NumOperands = 0;
- switch (Op.getNode()->getOpcode()) {
- case ISD::ADD:
- // Due to an isel shortcoming, be conservative if this add is likely to
- // be selected as part of a load-modify-store instruction. When the root
- // node in a match is a store, isel doesn't know how to remap non-chain
- // non-flag uses of other nodes in the match, such as the ADD in this
- // case. This leads to the ADD being left around and reselected, with
- // the result being two adds in the output.
- for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ if (Op.getResNo() != 0 || NeedOF || NeedCF)
+ // Emit a CMP with 0, which is the TEST pattern.
+ return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
+ DAG.getConstant(0, Op.getValueType()));
+
+ unsigned Opcode = 0;
+ unsigned NumOperands = 0;
+ switch (Op.getNode()->getOpcode()) {
+ case ISD::ADD:
+ // Due to an isel shortcoming, be conservative if this add is likely to be
+ // selected as part of a load-modify-store instruction. When the root node
+ // in a match is a store, isel doesn't know how to remap non-chain non-flag
+ // uses of other nodes in the match, such as the ADD in this case. This
+ // leads to the ADD being left around and reselected, with the result being
+ // two adds in the output. Alas, even if none our users are stores, that
+ // doesn't prove we're O.K. Ergo, if we have any parents that aren't
+ // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
+ // climbing the DAG back to the root, and it doesn't seem to be worth the
+ // effort.
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
UE = Op.getNode()->use_end(); UI != UE; ++UI)
- if (UI->getOpcode() == ISD::STORE)
- goto default_case;
- if (ConstantSDNode *C =
- dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) {
- // An add of one will be selected as an INC.
- if (C->getAPIntValue() == 1) {
- Opcode = X86ISD::INC;
- NumOperands = 1;
- break;
- }
- // An add of negative one (subtract of one) will be selected as a DEC.
- if (C->getAPIntValue().isAllOnesValue()) {
- Opcode = X86ISD::DEC;
- NumOperands = 1;
- break;
- }
+ if (UI->getOpcode() != ISD::CopyToReg && UI->getOpcode() != ISD::SETCC)
+ goto default_case;
+
+ if (ConstantSDNode *C =
+ dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) {
+ // An add of one will be selected as an INC.
+ if (C->getAPIntValue() == 1) {
+ Opcode = X86ISD::INC;
+ NumOperands = 1;
+ break;
}
- // Otherwise use a regular EFLAGS-setting add.
- Opcode = X86ISD::ADD;
- NumOperands = 2;
- break;
- case ISD::AND: {
- // If the primary and result isn't used, don't bother using X86ISD::AND,
- // because a TEST instruction will be better.
- bool NonFlagUse = false;
- for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
- UE = Op.getNode()->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- unsigned UOpNo = UI.getOperandNo();
- if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
- // Look pass truncate.
- UOpNo = User->use_begin().getOperandNo();
- User = *User->use_begin();
- }
- if (User->getOpcode() != ISD::BRCOND &&
- User->getOpcode() != ISD::SETCC &&
- (User->getOpcode() != ISD::SELECT || UOpNo != 0)) {
- NonFlagUse = true;
- break;
- }
+
+ // An add of negative one (subtract of one) will be selected as a DEC.
+ if (C->getAPIntValue().isAllOnesValue()) {
+ Opcode = X86ISD::DEC;
+ NumOperands = 1;
+ break;
+ }
+ }
+
+ // Otherwise use a regular EFLAGS-setting add.
+ Opcode = X86ISD::ADD;
+ NumOperands = 2;
+ break;
+ case ISD::AND: {
+ // If the primary and result isn't used, don't bother using X86ISD::AND,
+ // because a TEST instruction will be better.
+ bool NonFlagUse = false;
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ UE = Op.getNode()->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ unsigned UOpNo = UI.getOperandNo();
+ if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
+ // Look pass truncate.
+ UOpNo = User->use_begin().getOperandNo();
+ User = *User->use_begin();
}
- if (!NonFlagUse)
+
+ if (User->getOpcode() != ISD::BRCOND &&
+ User->getOpcode() != ISD::SETCC &&
+ (User->getOpcode() != ISD::SELECT || UOpNo != 0)) {
+ NonFlagUse = true;
break;
+ }
}
+
+ if (!NonFlagUse)
+ break;
+ }
// FALL THROUGH
- case ISD::SUB:
- case ISD::OR:
- case ISD::XOR:
- // Due to the ISEL shortcoming noted above, be conservative if this op is
- // likely to be selected as part of a load-modify-store instruction.
- for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ case ISD::SUB:
+ case ISD::OR:
+ case ISD::XOR:
+ // Due to the ISEL shortcoming noted above, be conservative if this op is
+ // likely to be selected as part of a load-modify-store instruction.
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
UE = Op.getNode()->use_end(); UI != UE; ++UI)
- if (UI->getOpcode() == ISD::STORE)
- goto default_case;
- // Otherwise use a regular EFLAGS-setting instruction.
- switch (Op.getNode()->getOpcode()) {
- case ISD::SUB: Opcode = X86ISD::SUB; break;
- case ISD::OR: Opcode = X86ISD::OR; break;
- case ISD::XOR: Opcode = X86ISD::XOR; break;
- case ISD::AND: Opcode = X86ISD::AND; break;
- default: llvm_unreachable("unexpected operator!");
- }
- NumOperands = 2;
- break;
- case X86ISD::ADD:
- case X86ISD::SUB:
- case X86ISD::INC:
- case X86ISD::DEC:
- case X86ISD::OR:
- case X86ISD::XOR:
- case X86ISD::AND:
- return SDValue(Op.getNode(), 1);
- default:
- default_case:
- break;
- }
- if (Opcode != 0) {
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
- SmallVector<SDValue, 4> Ops;
- for (unsigned i = 0; i != NumOperands; ++i)
- Ops.push_back(Op.getOperand(i));
- SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands);
- DAG.ReplaceAllUsesWith(Op, New);
- return SDValue(New.getNode(), 1);
+ if (UI->getOpcode() == ISD::STORE)
+ goto default_case;
+
+ // Otherwise use a regular EFLAGS-setting instruction.
+ switch (Op.getNode()->getOpcode()) {
+ default: llvm_unreachable("unexpected operator!");
+ case ISD::SUB: Opcode = X86ISD::SUB; break;
+ case ISD::OR: Opcode = X86ISD::OR; break;
+ case ISD::XOR: Opcode = X86ISD::XOR; break;
+ case ISD::AND: Opcode = X86ISD::AND; break;
}
+
+ NumOperands = 2;
+ break;
+ case X86ISD::ADD:
+ case X86ISD::SUB:
+ case X86ISD::INC:
+ case X86ISD::DEC:
+ case X86ISD::OR:
+ case X86ISD::XOR:
+ case X86ISD::AND:
+ return SDValue(Op.getNode(), 1);
+ default:
+ default_case:
+ break;
}
- // Otherwise just emit a CMP with 0, which is the TEST pattern.
- return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
- DAG.getConstant(0, Op.getValueType()));
+ if (Opcode == 0)
+ // Emit a CMP with 0, which is the TEST pattern.
+ return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
+ DAG.getConstant(0, Op.getValueType()));
+
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
+ SmallVector<SDValue, 4> Ops;
+ for (unsigned i = 0; i != NumOperands; ++i)
+ Ops.push_back(Op.getOperand(i));
+
+ SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands);
+ DAG.ReplaceAllUsesWith(Op, New);
+ return SDValue(New.getNode(), 1);
}
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent.
SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1))
if (C->getAPIntValue() == 0)
return EmitTest(Op0, X86CC, DAG);
@@ -5935,8 +6812,8 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
/// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
/// if it's possible.
-static SDValue LowerToBT(SDValue And, ISD::CondCode CC,
- DebugLoc dl, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
+ DebugLoc dl, SelectionDAG &DAG) const {
SDValue Op0 = And.getOperand(0);
SDValue Op1 = And.getOperand(1);
if (Op0.getOpcode() == ISD::TRUNCATE)
@@ -5945,15 +6822,21 @@ static SDValue LowerToBT(SDValue And, ISD::CondCode CC,
Op1 = Op1.getOperand(0);
SDValue LHS, RHS;
- if (Op1.getOpcode() == ISD::SHL) {
- if (ConstantSDNode *And10C = dyn_cast<ConstantSDNode>(Op1.getOperand(0)))
- if (And10C->getZExtValue() == 1) {
- LHS = Op0;
- RHS = Op1.getOperand(1);
- }
- } else if (Op0.getOpcode() == ISD::SHL) {
+ if (Op1.getOpcode() == ISD::SHL)
+ std::swap(Op0, Op1);
+ if (Op0.getOpcode() == ISD::SHL) {
if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
if (And00C->getZExtValue() == 1) {
+ // If we looked past a truncate, check that it's only truncating away
+ // known zeros.
+ unsigned BitWidth = Op0.getValueSizeInBits();
+ unsigned AndBitWidth = And.getValueSizeInBits();
+ if (BitWidth > AndBitWidth) {
+ APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones;
+ DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones);
+ if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
+ return SDValue();
+ }
LHS = Op1;
RHS = Op0.getOperand(1);
}
@@ -5967,11 +6850,13 @@ static SDValue LowerToBT(SDValue And, ISD::CondCode CC,
}
if (LHS.getNode()) {
- // If LHS is i8, promote it to i16 with any_extend. There is no i8 BT
+ // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
// instruction. Since the shift amount is in-range-or-undefined, we know
- // that doing a bittest on the i16 value is ok. We extend to i32 because
+ // that doing a bittest on the i32 value is ok. We extend to i32 because
// the encoding for the i16 version is larger than the i32 version.
- if (LHS.getValueType() == MVT::i8)
+ // Also promote i16 to i32 for performance / code size reason.
+ if (LHS.getValueType() == MVT::i8 ||
+ LHS.getValueType() == MVT::i16)
LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
// If the operand types disagree, extend the shift amount to match. Since
@@ -5988,7 +6873,7 @@ static SDValue LowerToBT(SDValue And, ISD::CondCode CC,
return SDValue();
}
-SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
@@ -6002,7 +6887,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
if (Op0.getOpcode() == ISD::AND &&
Op0.hasOneUse() &&
Op1.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(Op1)->getZExtValue() == 0 &&
+ cast<ConstantSDNode>(Op1)->isNullValue() &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
if (NewSetCC.getNode())
@@ -6024,7 +6909,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1));
}
- bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
+ bool isFP = Op1.getValueType().isFloatingPoint();
unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
if (X86CC == X86::COND_INVALID)
return SDValue();
@@ -6042,7 +6927,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(X86CC, MVT::i8), Cond);
}
-SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) const {
SDValue Cond;
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
@@ -6178,7 +7063,7 @@ static bool isX86LogicalCmp(SDValue Op) {
return false;
}
-SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
bool addTest = true;
SDValue Cond = Op.getOperand(0);
DebugLoc dl = Op.getDebugLoc();
@@ -6205,7 +7090,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) {
N2C && N2C->isNullValue() &&
RHSC && RHSC->isNullValue()) {
SDValue CmpOp0 = Cmp.getOperand(0);
- Cmp = DAG.getNode(X86ISD::CMP, dl, CmpOp0.getValueType(),
+ Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
return DAG.getNode(X86ISD::SETCC_CARRY, dl, Op.getValueType(),
DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
@@ -6298,7 +7183,7 @@ static bool isXor1OfSetCC(SDValue Op) {
return false;
}
-SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
bool addTest = true;
SDValue Chain = Op.getOperand(0);
SDValue Cond = Op.getOperand(1);
@@ -6382,15 +7267,16 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, MVT::i8);
- SDValue User = SDValue(*Op.getNode()->use_begin(), 0);
+ SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
// to implement FCMP_OEQ.
- if (User.getOpcode() == ISD::BR) {
- SDValue FalseBB = User.getOperand(1);
- SDValue NewBR =
- DAG.UpdateNodeOperands(User, User.getOperand(0), Dest);
+ if (User->getOpcode() == ISD::BR) {
+ SDValue FalseBB = User->getOperand(1);
+ SDNode *NewBR =
+ DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
assert(NewBR == User);
+ (void)NewBR;
Dest = FalseBB;
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
@@ -6450,7 +7336,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
// correct sequence.
SDValue
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
assert(Subtarget->isTargetCygMing() &&
"This should be used only on Cygwin/Mingw targets");
DebugLoc dl = Op.getDebugLoc();
@@ -6462,7 +7348,6 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDValue Flag;
- EVT IntPtr = getPointerTy();
EVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Size, Flag);
@@ -6479,226 +7364,18 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
return DAG.getMergeValues(Ops1, 2, dl);
}
-SDValue
-X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- const Value *DstSV,
- uint64_t DstSVOff) {
- ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
-
- // If not DWORD aligned or size is more than the threshold, call the library.
- // The libc version is likely to be faster for these cases. It can use the
- // address value and run time information about the CPU.
- if ((Align & 3) != 0 ||
- !ConstantSize ||
- ConstantSize->getZExtValue() >
- getSubtarget()->getMaxInlineSizeThreshold()) {
- SDValue InFlag(0, 0);
-
- // Check to see if there is a specialized entry-point for memory zeroing.
- ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
-
- if (const char *bzeroEntry = V &&
- V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
- EVT IntPtr = getPointerTy();
- const Type *IntPtrTy = TD->getIntPtrType(*DAG.getContext());
- TargetLowering::ArgListTy Args;
- TargetLowering::ArgListEntry Entry;
- Entry.Node = Dst;
- Entry.Ty = IntPtrTy;
- Args.push_back(Entry);
- Entry.Node = Size;
- Args.push_back(Entry);
- std::pair<SDValue,SDValue> CallResult =
- LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()),
- false, false, false, false,
- 0, CallingConv::C, false, /*isReturnValueUsed=*/false,
- DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl);
- return CallResult.second;
- }
-
- // Otherwise have the target-independent code call memset.
- return SDValue();
- }
-
- uint64_t SizeVal = ConstantSize->getZExtValue();
- SDValue InFlag(0, 0);
- EVT AVT;
- SDValue Count;
- ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src);
- unsigned BytesLeft = 0;
- bool TwoRepStos = false;
- if (ValC) {
- unsigned ValReg;
- uint64_t Val = ValC->getZExtValue() & 255;
-
- // If the value is a constant, then we can potentially use larger sets.
- switch (Align & 3) {
- case 2: // WORD aligned
- AVT = MVT::i16;
- ValReg = X86::AX;
- Val = (Val << 8) | Val;
- break;
- case 0: // DWORD aligned
- AVT = MVT::i32;
- ValReg = X86::EAX;
- Val = (Val << 8) | Val;
- Val = (Val << 16) | Val;
- if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned
- AVT = MVT::i64;
- ValReg = X86::RAX;
- Val = (Val << 32) | Val;
- }
- break;
- default: // Byte aligned
- AVT = MVT::i8;
- ValReg = X86::AL;
- Count = DAG.getIntPtrConstant(SizeVal);
- break;
- }
-
- if (AVT.bitsGT(MVT::i8)) {
- unsigned UBytes = AVT.getSizeInBits() / 8;
- Count = DAG.getIntPtrConstant(SizeVal / UBytes);
- BytesLeft = SizeVal % UBytes;
- }
-
- Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, AVT),
- InFlag);
- InFlag = Chain.getValue(1);
- } else {
- AVT = MVT::i8;
- Count = DAG.getIntPtrConstant(SizeVal);
- Chain = DAG.getCopyToReg(Chain, dl, X86::AL, Src, InFlag);
- InFlag = Chain.getValue(1);
- }
-
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
- X86::ECX,
- Count, InFlag);
- InFlag = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
- X86::EDI,
- Dst, InFlag);
- InFlag = Chain.getValue(1);
-
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
- Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops));
-
- if (TwoRepStos) {
- InFlag = Chain.getValue(1);
- Count = Size;
- EVT CVT = Count.getValueType();
- SDValue Left = DAG.getNode(ISD::AND, dl, CVT, Count,
- DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
- Chain = DAG.getCopyToReg(Chain, dl, (CVT == MVT::i64) ? X86::RCX :
- X86::ECX,
- Left, InFlag);
- InFlag = Chain.getValue(1);
- Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Ops[] = { Chain, DAG.getValueType(MVT::i8), InFlag };
- Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops));
- } else if (BytesLeft) {
- // Handle the last 1 - 7 bytes.
- unsigned Offset = SizeVal - BytesLeft;
- EVT AddrVT = Dst.getValueType();
- EVT SizeVT = Size.getValueType();
-
- Chain = DAG.getMemset(Chain, dl,
- DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
- DAG.getConstant(Offset, AddrVT)),
- Src,
- DAG.getConstant(BytesLeft, SizeVT),
- Align, DstSV, DstSVOff + Offset);
- }
-
- // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
- return Chain;
-}
-
-SDValue
-X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain, SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff) {
- // This requires the copy size to be a constant, preferrably
- // within a subtarget-specific limit.
- ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
- if (!ConstantSize)
- return SDValue();
- uint64_t SizeVal = ConstantSize->getZExtValue();
- if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold())
- return SDValue();
-
- /// If not DWORD aligned, call the library.
- if ((Align & 3) != 0)
- return SDValue();
-
- // DWORD aligned
- EVT AVT = MVT::i32;
- if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) // QWORD aligned
- AVT = MVT::i64;
-
- unsigned UBytes = AVT.getSizeInBits() / 8;
- unsigned CountVal = SizeVal / UBytes;
- SDValue Count = DAG.getIntPtrConstant(CountVal);
- unsigned BytesLeft = SizeVal % UBytes;
-
- SDValue InFlag(0, 0);
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
- X86::ECX,
- Count, InFlag);
- InFlag = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
- X86::EDI,
- Dst, InFlag);
- InFlag = Chain.getValue(1);
- Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RSI :
- X86::ESI,
- Src, InFlag);
- InFlag = Chain.getValue(1);
-
- SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
- SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops,
- array_lengthof(Ops));
-
- SmallVector<SDValue, 4> Results;
- Results.push_back(RepMovs);
- if (BytesLeft) {
- // Handle the last 1 - 7 bytes.
- unsigned Offset = SizeVal - BytesLeft;
- EVT DstVT = Dst.getValueType();
- EVT SrcVT = Src.getValueType();
- EVT SizeVT = Size.getValueType();
- Results.push_back(DAG.getMemcpy(Chain, dl,
- DAG.getNode(ISD::ADD, dl, DstVT, Dst,
- DAG.getConstant(Offset, DstVT)),
- DAG.getNode(ISD::ADD, dl, SrcVT, Src,
- DAG.getConstant(Offset, SrcVT)),
- DAG.getConstant(BytesLeft, SizeVT),
- Align, AlwaysInline,
- DstSV, DstSVOff + Offset,
- SrcSV, SrcSVOff + Offset));
- }
-
- return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &Results[0], Results.size());
-}
+SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
-SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) {
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
DebugLoc dl = Op.getDebugLoc();
if (!Subtarget->is64Bit()) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
- SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
+ SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
+ getPointerTy());
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0,
false, false, 0);
}
@@ -6712,7 +7389,8 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) {
SDValue FIN = Op.getOperand(1);
// Store gp_offset
SDValue Store = DAG.getStore(Op.getOperand(0), dl,
- DAG.getConstant(VarArgsGPOffset, MVT::i32),
+ DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
+ MVT::i32),
FIN, SV, 0, false, false, 0);
MemOps.push_back(Store);
@@ -6720,41 +7398,41 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) {
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
FIN, DAG.getIntPtrConstant(4));
Store = DAG.getStore(Op.getOperand(0), dl,
- DAG.getConstant(VarArgsFPOffset, MVT::i32),
- FIN, SV, 0, false, false, 0);
+ DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
+ MVT::i32),
+ FIN, SV, 4, false, false, 0);
MemOps.push_back(Store);
// Store ptr to overflow_arg_area
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
FIN, DAG.getIntPtrConstant(4));
- SDValue OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
- Store = DAG.getStore(Op.getOperand(0), dl, OVFIN, FIN, SV, 0,
+ SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
+ getPointerTy());
+ Store = DAG.getStore(Op.getOperand(0), dl, OVFIN, FIN, SV, 8,
false, false, 0);
MemOps.push_back(Store);
// Store ptr to reg_save_area.
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
FIN, DAG.getIntPtrConstant(8));
- SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
- Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 0,
+ SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
+ getPointerTy());
+ Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 16,
false, false, 0);
MemOps.push_back(Store);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOps[0], MemOps.size());
}
-SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
// X86-64 va_list is a struct { i32, i32, i8*, i8* }.
assert(Subtarget->is64Bit() && "This code only handles 64-bit va_arg!");
- SDValue Chain = Op.getOperand(0);
- SDValue SrcPtr = Op.getOperand(1);
- SDValue SrcSV = Op.getOperand(2);
- llvm_report_error("VAArgInst is not yet implemented for x86-64!");
+ report_fatal_error("VAArgInst is not yet implemented for x86-64!");
return SDValue();
}
-SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
// X86-64 va_list is a struct { i32, i32, i8*, i8* }.
assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
SDValue Chain = Op.getOperand(0);
@@ -6765,12 +7443,12 @@ SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
return DAG.getMemcpy(Chain, dl, DstPtr, SrcPtr,
- DAG.getIntPtrConstant(24), 8, false,
- DstSV, 0, SrcSV, 0);
+ DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
+ false, DstSV, 0, SrcSV, 0);
}
SDValue
-X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
+X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
switch (IntNo) {
@@ -6875,24 +7553,58 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(X86CC, MVT::i8), Cond);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
- // ptest intrinsics. The intrinsic these come from are designed to return
- // an integer value, not just an instruction so lower it to the ptest
- // pattern and a setcc for the result.
+ // ptest and testp intrinsics. The intrinsic these come from are designed to
+ // return an integer value, not just an instruction so lower it to the ptest
+ // or testp pattern and a setcc for the result.
case Intrinsic::x86_sse41_ptestz:
case Intrinsic::x86_sse41_ptestc:
- case Intrinsic::x86_sse41_ptestnzc:{
+ case Intrinsic::x86_sse41_ptestnzc:
+ case Intrinsic::x86_avx_ptestz_256:
+ case Intrinsic::x86_avx_ptestc_256:
+ case Intrinsic::x86_avx_ptestnzc_256:
+ case Intrinsic::x86_avx_vtestz_ps:
+ case Intrinsic::x86_avx_vtestc_ps:
+ case Intrinsic::x86_avx_vtestnzc_ps:
+ case Intrinsic::x86_avx_vtestz_pd:
+ case Intrinsic::x86_avx_vtestc_pd:
+ case Intrinsic::x86_avx_vtestnzc_pd:
+ case Intrinsic::x86_avx_vtestz_ps_256:
+ case Intrinsic::x86_avx_vtestc_ps_256:
+ case Intrinsic::x86_avx_vtestnzc_ps_256:
+ case Intrinsic::x86_avx_vtestz_pd_256:
+ case Intrinsic::x86_avx_vtestc_pd_256:
+ case Intrinsic::x86_avx_vtestnzc_pd_256: {
+ bool IsTestPacked = false;
unsigned X86CC = 0;
switch (IntNo) {
default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
+ case Intrinsic::x86_avx_vtestz_ps:
+ case Intrinsic::x86_avx_vtestz_pd:
+ case Intrinsic::x86_avx_vtestz_ps_256:
+ case Intrinsic::x86_avx_vtestz_pd_256:
+ IsTestPacked = true; // Fallthrough
case Intrinsic::x86_sse41_ptestz:
+ case Intrinsic::x86_avx_ptestz_256:
// ZF = 1
X86CC = X86::COND_E;
break;
+ case Intrinsic::x86_avx_vtestc_ps:
+ case Intrinsic::x86_avx_vtestc_pd:
+ case Intrinsic::x86_avx_vtestc_ps_256:
+ case Intrinsic::x86_avx_vtestc_pd_256:
+ IsTestPacked = true; // Fallthrough
case Intrinsic::x86_sse41_ptestc:
+ case Intrinsic::x86_avx_ptestc_256:
// CF = 1
X86CC = X86::COND_B;
break;
+ case Intrinsic::x86_avx_vtestnzc_ps:
+ case Intrinsic::x86_avx_vtestnzc_pd:
+ case Intrinsic::x86_avx_vtestnzc_ps_256:
+ case Intrinsic::x86_avx_vtestnzc_pd_256:
+ IsTestPacked = true; // Fallthrough
case Intrinsic::x86_sse41_ptestnzc:
+ case Intrinsic::x86_avx_ptestnzc_256:
// ZF and CF = 0
X86CC = X86::COND_A;
break;
@@ -6900,7 +7612,8 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
- SDValue Test = DAG.getNode(X86ISD::PTEST, dl, MVT::i32, LHS, RHS);
+ unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
+ SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
SDValue CC = DAG.getConstant(X86CC, MVT::i8);
SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
@@ -7011,7 +7724,11 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
}
}
-SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setReturnAddressIsTaken(true);
+
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
DebugLoc dl = Op.getDebugLoc();
@@ -7032,9 +7749,10 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
RetAddrFI, NULL, 0, false, false, 0);
}
-SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
MFI->setFrameAddressIsTaken(true);
+
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
@@ -7047,24 +7765,24 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
}
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
return DAG.getIntPtrConstant(2*TD->getPointerSize());
}
-SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
-{
+SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
DebugLoc dl = Op.getDebugLoc();
- SDValue Frame = DAG.getRegister(Subtarget->is64Bit() ? X86::RBP : X86::EBP,
- getPointerTy());
+ SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
+ Subtarget->is64Bit() ? X86::RBP : X86::EBP,
+ getPointerTy());
unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX);
- SDValue StoreAddr = DAG.getNode(ISD::SUB, dl, getPointerTy(), Frame,
- DAG.getIntPtrConstant(-TD->getPointerSize()));
+ SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame,
+ DAG.getIntPtrConstant(TD->getPointerSize()));
StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset);
Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, NULL, 0, false, false, 0);
Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
@@ -7076,7 +7794,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
}
SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
SDValue Root = Op.getOperand(0);
SDValue Trmp = Op.getOperand(1); // trampoline
SDValue FPtr = Op.getOperand(2); // nested function
@@ -7167,12 +7885,14 @@ SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
if (InRegCount > 2) {
- llvm_report_error("Nest register in use - reduce number of inreg parameters!");
+ report_fatal_error("Nest register in use - reduce number of inreg"
+ " parameters!");
}
}
break;
}
case CallingConv::X86_FastCall:
+ case CallingConv::X86_ThisCall:
case CallingConv::Fast:
// Pass 'nest' parameter in EAX.
// Must be kept in sync with X86CallingConv.td
@@ -7216,7 +7936,8 @@ SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
}
}
-SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
+ SelectionDAG &DAG) const {
/*
The rounding mode is in bits 11:10 of FPSR, and has the following
settings:
@@ -7278,7 +7999,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) {
ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
}
-SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
@@ -7312,7 +8033,7 @@ SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
return Op;
}
-SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
EVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
@@ -7342,7 +8063,7 @@ SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
return Op;
}
-SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply");
DebugLoc dl = Op.getDebugLoc();
@@ -7386,8 +8107,88 @@ SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) {
return Res;
}
+SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ DebugLoc dl = Op.getDebugLoc();
+ SDValue R = Op.getOperand(0);
+
+ LLVMContext *Context = DAG.getContext();
+
+ assert(Subtarget->hasSSE41() && "Cannot lower SHL without SSE4.1 or later");
+
+ if (VT == MVT::v4i32) {
+ Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
+ Op.getOperand(1), DAG.getConstant(23, MVT::i32));
-SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) {
+ ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U));
+
+ std::vector<Constant*> CV(4, CI);
+ Constant *C = ConstantVector::get(CV);
+ SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
+ SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
+ PseudoSourceValue::getConstantPool(), 0,
+ false, false, 16);
+
+ Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend);
+ Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, Op);
+ Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
+ return DAG.getNode(ISD::MUL, dl, VT, Op, R);
+ }
+ if (VT == MVT::v16i8) {
+ // a = a << 5;
+ Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
+ Op.getOperand(1), DAG.getConstant(5, MVT::i32));
+
+ ConstantInt *CM1 = ConstantInt::get(*Context, APInt(8, 15));
+ ConstantInt *CM2 = ConstantInt::get(*Context, APInt(8, 63));
+
+ std::vector<Constant*> CVM1(16, CM1);
+ std::vector<Constant*> CVM2(16, CM2);
+ Constant *C = ConstantVector::get(CVM1);
+ SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
+ SDValue M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
+ PseudoSourceValue::getConstantPool(), 0,
+ false, false, 16);
+
+ // r = pblendv(r, psllw(r & (char16)15, 4), a);
+ M = DAG.getNode(ISD::AND, dl, VT, R, M);
+ M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M,
+ DAG.getConstant(4, MVT::i32));
+ R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32),
+ R, M, Op);
+ // a += a
+ Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
+
+ C = ConstantVector::get(CVM2);
+ CPIdx = DAG.getConstantPool(C, getPointerTy(), 16);
+ M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
+ PseudoSourceValue::getConstantPool(), 0, false, false, 16);
+
+ // r = pblendv(r, psllw(r & (char16)63, 2), a);
+ M = DAG.getNode(ISD::AND, dl, VT, R, M);
+ M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M,
+ DAG.getConstant(2, MVT::i32));
+ R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32),
+ R, M, Op);
+ // a += a
+ Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
+
+ // return pblendv(r, r+r, a);
+ R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
+ DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32),
+ R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op);
+ return R;
+ }
+ return SDValue();
+}
+
+SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
// Lower the "add/sub/mul with overflow" instruction into a regular ins plus
// a "setcc" instruction that checks the overflow flag. The "brcond" lowering
// looks for this combo and may remove the "setcc" instruction if the "setcc"
@@ -7455,7 +8256,51 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) {
return Sum;
}
-SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{
+ DebugLoc dl = Op.getDebugLoc();
+
+ if (!Subtarget->hasSSE2()) {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Zero = DAG.getConstant(0,
+ Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
+ SDValue Ops[] = {
+ DAG.getRegister(X86::ESP, MVT::i32), // Base
+ DAG.getTargetConstant(1, MVT::i8), // Scale
+ DAG.getRegister(0, MVT::i32), // Index
+ DAG.getTargetConstant(0, MVT::i32), // Disp
+ DAG.getRegister(0, MVT::i32), // Segment.
+ Zero,
+ Chain
+ };
+ SDNode *Res =
+ DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops,
+ array_lengthof(Ops));
+ return SDValue(Res, 0);
+ }
+
+ unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
+ if (!isDev)
+ return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
+
+ unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
+ unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
+
+ // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
+ if (!Op1 && !Op2 && !Op3 && Op4)
+ return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0));
+
+ // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
+ if (Op1 && !Op2 && !Op3 && !Op4)
+ return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0));
+
+ // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)),
+ // (MFENCE)>;
+ return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
+}
+
+SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
EVT T = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
unsigned Reg = 0;
@@ -7486,7 +8331,7 @@ SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) {
}
SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
assert(Subtarget->is64Bit() && "Result not type legalized?");
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
SDValue TheChain = Op.getOperand(0);
@@ -7504,7 +8349,28 @@ SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
return DAG.getMergeValues(Ops, 2, dl);
}
-SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerBIT_CONVERT(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT SrcVT = Op.getOperand(0).getValueType();
+ EVT DstVT = Op.getValueType();
+ assert((Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
+ Subtarget->hasMMX() && !DisableMMX) &&
+ "Unexpected custom BIT_CONVERT");
+ assert((DstVT == MVT::i64 ||
+ (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
+ "Unexpected custom BIT_CONVERT");
+ // i64 <=> MMX conversions are Legal.
+ if (SrcVT==MVT::i64 && DstVT.isVector())
+ return Op;
+ if (DstVT==MVT::i64 && SrcVT.isVector())
+ return Op;
+ // MMX <=> MMX conversions are Legal.
+ if (SrcVT.isVector() && DstVT.isVector())
+ return Op;
+ // All other conversions need to be expanded.
+ return SDValue();
+}
+SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
DebugLoc dl = Node->getDebugLoc();
EVT T = Node->getValueType(0);
@@ -7520,9 +8386,10 @@ SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
/// LowerOperation - Provide custom lowering hooks for some operations.
///
-SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
+SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
+ case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG);
case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG);
case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
@@ -7566,6 +8433,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
case ISD::CTLZ: return LowerCTLZ(Op, DAG);
case ISD::CTTZ: return LowerCTTZ(Op, DAG);
case ISD::MUL: return LowerMUL_V2I64(Op, DAG);
+ case ISD::SHL: return LowerSHL(Op, DAG);
case ISD::SADDO:
case ISD::UADDO:
case ISD::SSUBO:
@@ -7573,12 +8441,13 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
case ISD::SMULO:
case ISD::UMULO: return LowerXALUO(Op, DAG);
case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
+ case ISD::BIT_CONVERT: return LowerBIT_CONVERT(Op, DAG);
}
}
void X86TargetLowering::
ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG, unsigned NewOp) {
+ SelectionDAG &DAG, unsigned NewOp) const {
EVT T = Node->getValueType(0);
DebugLoc dl = Node->getDebugLoc();
assert (T == MVT::i64 && "Only know how to expand i64 atomics");
@@ -7603,7 +8472,7 @@ ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
/// with a new node built out of custom code.
void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) const {
DebugLoc dl = N->getDebugLoc();
switch (N->getOpcode()) {
default:
@@ -7739,6 +8608,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
+ case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
case X86ISD::SegmentBaseAddress: return "X86ISD::SegmentBaseAddress";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
@@ -7776,6 +8646,40 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::AND: return "X86ISD::AND";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
case X86ISD::PTEST: return "X86ISD::PTEST";
+ case X86ISD::TESTP: return "X86ISD::TESTP";
+ case X86ISD::PALIGN: return "X86ISD::PALIGN";
+ case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
+ case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
+ case X86ISD::PSHUFHW_LD: return "X86ISD::PSHUFHW_LD";
+ case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
+ case X86ISD::PSHUFLW_LD: return "X86ISD::PSHUFLW_LD";
+ case X86ISD::SHUFPS: return "X86ISD::SHUFPS";
+ case X86ISD::SHUFPD: return "X86ISD::SHUFPD";
+ case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
+ case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
+ case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
+ case X86ISD::MOVHLPD: return "X86ISD::MOVHLPD";
+ case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
+ case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
+ case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
+ case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
+ case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
+ case X86ISD::MOVSHDUP_LD: return "X86ISD::MOVSHDUP_LD";
+ case X86ISD::MOVSLDUP_LD: return "X86ISD::MOVSLDUP_LD";
+ case X86ISD::MOVSD: return "X86ISD::MOVSD";
+ case X86ISD::MOVSS: return "X86ISD::MOVSS";
+ case X86ISD::UNPCKLPS: return "X86ISD::UNPCKLPS";
+ case X86ISD::UNPCKLPD: return "X86ISD::UNPCKLPD";
+ case X86ISD::UNPCKHPS: return "X86ISD::UNPCKHPS";
+ case X86ISD::UNPCKHPD: return "X86ISD::UNPCKHPD";
+ case X86ISD::PUNPCKLBW: return "X86ISD::PUNPCKLBW";
+ case X86ISD::PUNPCKLWD: return "X86ISD::PUNPCKLWD";
+ case X86ISD::PUNPCKLDQ: return "X86ISD::PUNPCKLDQ";
+ case X86ISD::PUNPCKLQDQ: return "X86ISD::PUNPCKLQDQ";
+ case X86ISD::PUNPCKHBW: return "X86ISD::PUNPCKHBW";
+ case X86ISD::PUNPCKHWD: return "X86ISD::PUNPCKHWD";
+ case X86ISD::PUNPCKHDQ: return "X86ISD::PUNPCKHDQ";
+ case X86ISD::PUNPCKHQDQ: return "X86ISD::PUNPCKHQDQ";
case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
case X86ISD::MINGW_ALLOCA: return "X86ISD::MINGW_ALLOCA";
}
@@ -7787,6 +8691,7 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
const Type *Ty) const {
// X86 supports extremely general addressing modes.
CodeModel::Model M = getTargetMachine().getCodeModel();
+ Reloc::Model R = getTargetMachine().getRelocationModel();
// X86 allows a sign-extended 32-bit immediate field as a displacement.
if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL))
@@ -7806,7 +8711,8 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
return false;
// If lower 4G is not available, then we must use rip-relative addressing.
- if (Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
+ if ((M != CodeModel::Small || R != Reloc::Static) &&
+ Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
return false;
}
@@ -7876,9 +8782,9 @@ bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
bool
X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
EVT VT) const {
- // Only do shuffles on 128-bit vector types for now.
+ // Very little shuffling can be done for 64-bit vectors right now.
if (VT.getSizeInBits() == 64)
- return false;
+ return isPALIGNRMask(M, VT, Subtarget->hasSSSE3());
// FIXME: pshufb, blends, shifts.
return (VT.getVectorNumElements() == 2 ||
@@ -7923,7 +8829,6 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
unsigned immOpc,
unsigned LoadOpc,
unsigned CXchgOpc,
- unsigned copyOpc,
unsigned notOpc,
unsigned EAXreg,
TargetRegisterClass *RC,
@@ -7950,8 +8855,11 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
F->insert(MBBIter, newMBB);
F->insert(MBBIter, nextMBB);
- // Move all successors to thisMBB to nextMBB
- nextMBB->transferSuccessors(thisMBB);
+ // Transfer the remainder of thisMBB and its successor edges to nextMBB.
+ nextMBB->splice(nextMBB->begin(), thisMBB,
+ llvm::next(MachineBasicBlock::iterator(bInstr)),
+ thisMBB->end());
+ nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
// Update thisMBB to fall through to newMBB
thisMBB->addSuccessor(newMBB);
@@ -7961,17 +8869,17 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
newMBB->addSuccessor(newMBB);
// Insert instructions into newMBB based on incoming instruction
- assert(bInstr->getNumOperands() < X86AddrNumOperands + 4 &&
+ assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
"unexpected number of operands");
DebugLoc dl = bInstr->getDebugLoc();
MachineOperand& destOper = bInstr->getOperand(0);
- MachineOperand* argOpers[2 + X86AddrNumOperands];
+ MachineOperand* argOpers[2 + X86::AddrNumOperands];
int numArgs = bInstr->getNumOperands() - 1;
for (int i=0; i < numArgs; ++i)
argOpers[i] = &bInstr->getOperand(i+1);
// x86 address has 4 operands: base, index, scale, and displacement
- int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+ int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
int valArgIndx = lastAddrIndx + 1;
unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
@@ -7997,7 +8905,7 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
MIB.addReg(tt);
(*MIB).addOperand(*argOpers[valArgIndx]);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), EAXreg);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg);
MIB.addReg(t1);
MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc));
@@ -8008,13 +8916,13 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
(*MIB).setMemRefs(bInstr->memoperands_begin(),
bInstr->memoperands_end());
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), destOper.getReg());
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg());
MIB.addReg(EAXreg);
// insert branch
BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
- F->DeleteMachineInstr(bInstr); // The pseudo instruction is gone now.
+ bInstr->eraseFromParent(); // The pseudo instruction is gone now.
return nextMBB;
}
@@ -8044,7 +8952,6 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
const TargetRegisterClass *RC = X86::GR32RegisterClass;
const unsigned LoadOpc = X86::MOV32rm;
- const unsigned copyOpc = X86::MOV32rr;
const unsigned NotOpc = X86::NOT32r;
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
@@ -8059,8 +8966,11 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
F->insert(MBBIter, newMBB);
F->insert(MBBIter, nextMBB);
- // Move all successors to thisMBB to nextMBB
- nextMBB->transferSuccessors(thisMBB);
+ // Transfer the remainder of thisMBB and its successor edges to nextMBB.
+ nextMBB->splice(nextMBB->begin(), thisMBB,
+ llvm::next(MachineBasicBlock::iterator(bInstr)),
+ thisMBB->end());
+ nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
// Update thisMBB to fall through to newMBB
thisMBB->addSuccessor(newMBB);
@@ -8072,16 +8982,22 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
DebugLoc dl = bInstr->getDebugLoc();
// Insert instructions into newMBB based on incoming instruction
// There are 8 "real" operands plus 9 implicit def/uses, ignored here.
- assert(bInstr->getNumOperands() < X86AddrNumOperands + 14 &&
+ assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 &&
"unexpected number of operands");
MachineOperand& dest1Oper = bInstr->getOperand(0);
MachineOperand& dest2Oper = bInstr->getOperand(1);
- MachineOperand* argOpers[2 + X86AddrNumOperands];
- for (int i=0; i < 2 + X86AddrNumOperands; ++i)
+ MachineOperand* argOpers[2 + X86::AddrNumOperands];
+ for (int i=0; i < 2 + X86::AddrNumOperands; ++i) {
argOpers[i] = &bInstr->getOperand(i+2);
+ // We use some of the operands multiple times, so conservatively just
+ // clear any kill flags that might be present.
+ if (argOpers[i]->isReg() && argOpers[i]->isUse())
+ argOpers[i]->setIsKill(false);
+ }
+
// x86 address has 5 operands: base, index, scale, displacement, and segment.
- int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+ int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1);
@@ -8145,14 +9061,14 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
MIB.addReg(t2);
(*MIB).addOperand(*argOpers[valArgIndx + 1]);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EAX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX);
MIB.addReg(t1);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EDX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX);
MIB.addReg(t2);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EBX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX);
MIB.addReg(t5);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::ECX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX);
MIB.addReg(t6);
MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B));
@@ -8163,15 +9079,15 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
(*MIB).setMemRefs(bInstr->memoperands_begin(),
bInstr->memoperands_end());
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), t3);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3);
MIB.addReg(X86::EAX);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), t4);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4);
MIB.addReg(X86::EDX);
// insert branch
BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
- F->DeleteMachineInstr(bInstr); // The pseudo instruction is gone now.
+ bInstr->eraseFromParent(); // The pseudo instruction is gone now.
return nextMBB;
}
@@ -8205,8 +9121,11 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
F->insert(MBBIter, newMBB);
F->insert(MBBIter, nextMBB);
- // Move all successors of thisMBB to nextMBB
- nextMBB->transferSuccessors(thisMBB);
+ // Transfer the remainder of thisMBB and its successor edges to nextMBB.
+ nextMBB->splice(nextMBB->begin(), thisMBB,
+ llvm::next(MachineBasicBlock::iterator(mInstr)),
+ thisMBB->end());
+ nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
// Update thisMBB to fall through to newMBB
thisMBB->addSuccessor(newMBB);
@@ -8217,16 +9136,16 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
DebugLoc dl = mInstr->getDebugLoc();
// Insert instructions into newMBB based on incoming instruction
- assert(mInstr->getNumOperands() < X86AddrNumOperands + 4 &&
+ assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
"unexpected number of operands");
MachineOperand& destOper = mInstr->getOperand(0);
- MachineOperand* argOpers[2 + X86AddrNumOperands];
+ MachineOperand* argOpers[2 + X86::AddrNumOperands];
int numArgs = mInstr->getNumOperands() - 1;
for (int i=0; i < numArgs; ++i)
argOpers[i] = &mInstr->getOperand(i+1);
// x86 address has 4 operands: base, index, scale, and displacement
- int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+ int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
int valArgIndx = lastAddrIndx + 1;
unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
@@ -8241,12 +9160,12 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
if (argOpers[valArgIndx]->isReg())
- MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2);
else
MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
(*MIB).addOperand(*argOpers[valArgIndx]);
- MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), X86::EAX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX);
MIB.addReg(t1);
MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr));
@@ -8268,31 +9187,42 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
(*MIB).setMemRefs(mInstr->memoperands_begin(),
mInstr->memoperands_end());
- MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), destOper.getReg());
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg());
MIB.addReg(X86::EAX);
// insert branch
BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
- F->DeleteMachineInstr(mInstr); // The pseudo instruction is gone now.
+ mInstr->eraseFromParent(); // The pseudo instruction is gone now.
return nextMBB;
}
// FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
-// all of this code can be replaced with that in the .td file.
+// or XMM0_V32I8 in AVX all of this code can be replaced with that
+// in the .td file.
MachineBasicBlock *
X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
unsigned numArgs, bool memArg) const {
- MachineFunction *F = BB->getParent();
+ assert((Subtarget->hasSSE42() || Subtarget->hasAVX()) &&
+ "Target must have SSE4.2 or AVX features enabled");
+
DebugLoc dl = MI->getDebugLoc();
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
unsigned Opc;
- if (memArg)
- Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm;
- else
- Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr;
+
+ if (!Subtarget->hasAVX()) {
+ if (memArg)
+ Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm;
+ else
+ Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr;
+ } else {
+ if (memArg)
+ Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm;
+ else
+ Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr;
+ }
MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(Opc));
@@ -8306,7 +9236,7 @@ X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
BuildMI(BB, dl, TII->get(X86::MOVAPSrr), MI->getOperand(0).getReg())
.addReg(X86::XMM0);
- F->DeleteMachineInstr(MI);
+ MI->eraseFromParent();
return BB;
}
@@ -8335,9 +9265,12 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
F->insert(MBBIter, XMMSaveMBB);
F->insert(MBBIter, EndMBB);
- // Set up the CFG.
- // Move any original successors of MBB to the end block.
- EndMBB->transferSuccessors(MBB);
+ // Transfer the remainder of MBB and its successor edges to EndMBB.
+ EndMBB->splice(EndMBB->begin(), MBB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ MBB->end());
+ EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
// The original block will now fall through to the XMM save block.
MBB->addSuccessor(XMMSaveMBB);
// The XMMSaveMBB will fall through to the end block.
@@ -8376,15 +9309,14 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
.addMemOperand(MMO);
}
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return EndMBB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
- MachineBasicBlock *BB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
+ MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
@@ -8406,79 +9338,140 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- unsigned Opc =
- X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
- BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- // Also inform sdisel of the edge changes.
- for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
- E = BB->succ_end(); I != E; ++I) {
- EM->insert(std::make_pair(*I, sinkMBB));
- sinkMBB->addSuccessor(*I);
- }
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while (!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
+
+ // If the EFLAGS register isn't dead in the terminator, then claim that it's
+ // live into the sink and copy blocks.
+ const MachineFunction *MF = BB->getParent();
+ const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ BitVector ReservedRegs = TRI->getReservedRegs(*MF);
+
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ if (!MO.isReg() || !MO.isUse() || MO.isKill()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg != X86::EFLAGS) continue;
+ copy0MBB->addLiveIn(Reg);
+ sinkMBB->addLiveIn(Reg);
+ }
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
// Add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
+ // Create the conditional branch instruction.
+ unsigned Opc =
+ X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
+ BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
+
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
- BB = copy0MBB;
-
- // Update machine-CFG edges
- BB->addSuccessor(sinkMBB);
+ copy0MBB->addSuccessor(sinkMBB);
// sinkMBB:
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
- BB = sinkMBB;
- BuildMI(BB, DL, TII->get(X86::PHI), MI->getOperand(0).getReg())
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL,
+ TII->get(X86::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
- return BB;
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return sinkMBB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredMingwAlloca(MachineInstr *MI,
- MachineBasicBlock *BB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
+ MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
- MachineFunction *F = BB->getParent();
// The lowering is pretty easy: we're just emitting the call to _alloca. The
// non-trivial part is impdef of ESP.
// FIXME: The code should be tweaked as soon as we'll try to do codegen for
// mingw-w64.
- BuildMI(BB, DL, TII->get(X86::CALLpcrel32))
+ BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32))
.addExternalSymbol("_alloca")
.addReg(X86::EAX, RegState::Implicit)
.addReg(X86::ESP, RegState::Implicit)
.addReg(X86::EAX, RegState::Define | RegState::Implicit)
- .addReg(X86::ESP, RegState::Define | RegState::Implicit);
+ .addReg(X86::ESP, RegState::Define | RegState::Implicit)
+ .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+MachineBasicBlock *
+X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ // This is pretty easy. We're taking the value that we received from
+ // our load from the relocation, sticking it in either RDI (x86-64)
+ // or EAX and doing an indirect call. The return value will then
+ // be in the normal return register.
+ const X86InstrInfo *TII
+ = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo());
+ DebugLoc DL = MI->getDebugLoc();
+ MachineFunction *F = BB->getParent();
+ bool IsWin64 = Subtarget->isTargetWin64();
+
+ assert(MI->getOperand(3).isGlobal() && "This should be a global");
+
+ if (Subtarget->is64Bit()) {
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
+ TII->get(X86::MOV64rm), X86::RDI)
+ .addReg(X86::RIP)
+ .addImm(0).addReg(0)
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ MI->getOperand(3).getTargetFlags())
+ .addReg(0);
+ MIB = BuildMI(*BB, MI, DL, TII->get(IsWin64 ? X86::WINCALL64m : X86::CALL64m));
+ addDirectMem(MIB, X86::RDI);
+ } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
+ TII->get(X86::MOV32rm), X86::EAX)
+ .addReg(0)
+ .addImm(0).addReg(0)
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ MI->getOperand(3).getTargetFlags())
+ .addReg(0);
+ MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
+ addDirectMem(MIB, X86::EAX);
+ } else {
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
+ TII->get(X86::MOV32rm), X86::EAX)
+ .addReg(TII->getGlobalBaseReg(F))
+ .addImm(0).addReg(0)
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ MI->getOperand(3).getTargetFlags())
+ .addReg(0);
+ MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
+ addDirectMem(MIB, X86::EAX);
+ }
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
+ MachineBasicBlock *BB) const {
switch (MI->getOpcode()) {
default: assert(false && "Unexpected instr type to insert");
case X86::MINGW_ALLOCA:
- return EmitLoweredMingwAlloca(MI, BB, EM);
+ return EmitLoweredMingwAlloca(MI, BB);
+ case X86::TLSCall_32:
+ case X86::TLSCall_64:
+ return EmitLoweredTLSCall(MI, BB);
case X86::CMOV_GR8:
case X86::CMOV_V1I64:
case X86::CMOV_FR32:
@@ -8486,7 +9479,12 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::CMOV_V4F32:
case X86::CMOV_V2F64:
case X86::CMOV_V2I64:
- return EmitLoweredSelect(MI, BB, EM);
+ case X86::CMOV_GR16:
+ case X86::CMOV_GR32:
+ case X86::CMOV_RFP32:
+ case X86::CMOV_RFP64:
+ case X86::CMOV_RFP80:
+ return EmitLoweredSelect(MI, BB);
case X86::FP32_TO_INT16_IN_MEM:
case X86::FP32_TO_INT32_IN_MEM:
@@ -8504,23 +9502,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// mode when truncating to an integer value.
MachineFunction *F = BB->getParent();
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
- addFrameReference(BuildMI(BB, DL, TII->get(X86::FNSTCW16m)), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, MI, DL,
+ TII->get(X86::FNSTCW16m)), CWFrameIdx);
// Load the old value of the high byte of the control word...
unsigned OldCW =
F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass);
- addFrameReference(BuildMI(BB, DL, TII->get(X86::MOV16rm), OldCW),
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
CWFrameIdx);
// Set the high part to be round to zero...
- addFrameReference(BuildMI(BB, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
.addImm(0xC7F);
// Reload the modified control word now...
- addFrameReference(BuildMI(BB, DL, TII->get(X86::FLDCW16m)), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, MI, DL,
+ TII->get(X86::FLDCW16m)), CWFrameIdx);
// Restore the memory image of control word to original value
- addFrameReference(BuildMI(BB, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
.addReg(OldCW);
// Get the X86 opcode to use.
@@ -8559,48 +9559,53 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
} else {
AM.Disp = Op.getImm();
}
- addFullAddress(BuildMI(BB, DL, TII->get(Opc)), AM)
- .addReg(MI->getOperand(X86AddrNumOperands).getReg());
+ addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
+ .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
// Reload the original control word now.
- addFrameReference(BuildMI(BB, DL, TII->get(X86::FLDCW16m)), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, MI, DL,
+ TII->get(X86::FLDCW16m)), CWFrameIdx);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
// String/text processing lowering.
case X86::PCMPISTRM128REG:
+ case X86::VPCMPISTRM128REG:
return EmitPCMP(MI, BB, 3, false /* in-mem */);
case X86::PCMPISTRM128MEM:
+ case X86::VPCMPISTRM128MEM:
return EmitPCMP(MI, BB, 3, true /* in-mem */);
case X86::PCMPESTRM128REG:
+ case X86::VPCMPESTRM128REG:
return EmitPCMP(MI, BB, 5, false /* in mem */);
case X86::PCMPESTRM128MEM:
+ case X86::VPCMPESTRM128MEM:
return EmitPCMP(MI, BB, 5, true /* in mem */);
// Atomic Lowering.
case X86::ATOMAND32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
X86::AND32ri, X86::MOV32rm,
- X86::LCMPXCHG32, X86::MOV32rr,
+ X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
X86::GR32RegisterClass);
case X86::ATOMOR32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr,
X86::OR32ri, X86::MOV32rm,
- X86::LCMPXCHG32, X86::MOV32rr,
+ X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
X86::GR32RegisterClass);
case X86::ATOMXOR32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
X86::XOR32ri, X86::MOV32rm,
- X86::LCMPXCHG32, X86::MOV32rr,
+ X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
X86::GR32RegisterClass);
case X86::ATOMNAND32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
X86::AND32ri, X86::MOV32rm,
- X86::LCMPXCHG32, X86::MOV32rr,
+ X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
X86::GR32RegisterClass, true);
case X86::ATOMMIN32:
@@ -8615,25 +9620,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::ATOMAND16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
X86::AND16ri, X86::MOV16rm,
- X86::LCMPXCHG16, X86::MOV16rr,
+ X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
X86::GR16RegisterClass);
case X86::ATOMOR16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr,
X86::OR16ri, X86::MOV16rm,
- X86::LCMPXCHG16, X86::MOV16rr,
+ X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
X86::GR16RegisterClass);
case X86::ATOMXOR16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr,
X86::XOR16ri, X86::MOV16rm,
- X86::LCMPXCHG16, X86::MOV16rr,
+ X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
X86::GR16RegisterClass);
case X86::ATOMNAND16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
X86::AND16ri, X86::MOV16rm,
- X86::LCMPXCHG16, X86::MOV16rr,
+ X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
X86::GR16RegisterClass, true);
case X86::ATOMMIN16:
@@ -8648,25 +9653,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::ATOMAND8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
X86::AND8ri, X86::MOV8rm,
- X86::LCMPXCHG8, X86::MOV8rr,
+ X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
X86::GR8RegisterClass);
case X86::ATOMOR8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr,
X86::OR8ri, X86::MOV8rm,
- X86::LCMPXCHG8, X86::MOV8rr,
+ X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
X86::GR8RegisterClass);
case X86::ATOMXOR8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr,
X86::XOR8ri, X86::MOV8rm,
- X86::LCMPXCHG8, X86::MOV8rr,
+ X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
X86::GR8RegisterClass);
case X86::ATOMNAND8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
X86::AND8ri, X86::MOV8rm,
- X86::LCMPXCHG8, X86::MOV8rr,
+ X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
X86::GR8RegisterClass, true);
// FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
@@ -8674,25 +9679,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::ATOMAND64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
X86::AND64ri32, X86::MOV64rm,
- X86::LCMPXCHG64, X86::MOV64rr,
+ X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass);
case X86::ATOMOR64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
X86::OR64ri32, X86::MOV64rm,
- X86::LCMPXCHG64, X86::MOV64rr,
+ X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass);
case X86::ATOMXOR64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
X86::XOR64ri32, X86::MOV64rm,
- X86::LCMPXCHG64, X86::MOV64rr,
+ X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass);
case X86::ATOMNAND64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
X86::AND64ri32, X86::MOV64rm,
- X86::LCMPXCHG64, X86::MOV64rr,
+ X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass, true);
case X86::ATOMMIN64:
@@ -8789,7 +9794,8 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
bool X86TargetLowering::isGAPlusOffset(SDNode *N,
- GlobalValue* &GA, int64_t &Offset) const{
+ const GlobalValue* &GA,
+ int64_t &Offset) const {
if (N->getOpcode() == X86ISD::Wrapper) {
if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
@@ -8800,82 +9806,104 @@ bool X86TargetLowering::isGAPlusOffset(SDNode *N,
return TargetLowering::isGAPlusOffset(N, GA, Offset);
}
-static bool EltsFromConsecutiveLoads(ShuffleVectorSDNode *N, unsigned NumElems,
- EVT EltVT, LoadSDNode *&LDBase,
- unsigned &LastLoadedElt,
- SelectionDAG &DAG, MachineFrameInfo *MFI,
- const TargetLowering &TLI) {
- LDBase = NULL;
- LastLoadedElt = -1U;
- for (unsigned i = 0; i < NumElems; ++i) {
- if (N->getMaskElt(i) < 0) {
- if (!LDBase)
- return false;
- continue;
- }
-
- SDValue Elt = DAG.getShuffleScalarElt(N, i);
- if (!Elt.getNode() ||
- (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
- return false;
- if (!LDBase) {
- if (Elt.getNode()->getOpcode() == ISD::UNDEF)
- return false;
- LDBase = cast<LoadSDNode>(Elt.getNode());
- LastLoadedElt = i;
- continue;
- }
- if (Elt.getOpcode() == ISD::UNDEF)
- continue;
-
- LoadSDNode *LD = cast<LoadSDNode>(Elt);
- if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i))
- return false;
- LastLoadedElt = i;
- }
- return true;
-}
-
/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
/// if the load addresses are consecutive, non-overlapping, and in the right
-/// order. In the case of v2i64, it will see if it can rewrite the
-/// shuffle to be an appropriate build vector so it can take advantage of
-// performBuildVectorCombine.
+/// order.
static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
const TargetLowering &TLI) {
DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0);
- EVT EltVT = VT.getVectorElementType();
- ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
- unsigned NumElems = VT.getVectorNumElements();
if (VT.getSizeInBits() != 128)
return SDValue();
- // Try to combine a vector_shuffle into a 128-bit load.
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
- LoadSDNode *LD = NULL;
- unsigned LastLoadedElt;
- if (!EltsFromConsecutiveLoads(SVN, NumElems, EltVT, LD, LastLoadedElt, DAG,
- MFI, TLI))
+ SmallVector<SDValue, 16> Elts;
+ for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
+ Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
+
+ return EltsFromConsecutiveLoads(VT, Elts, dl, DAG);
+}
+
+/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
+/// generation and convert it from being a bunch of shuffles and extracts
+/// to a simple store and scalar loads to extract the elements.
+static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
+ const TargetLowering &TLI) {
+ SDValue InputVector = N->getOperand(0);
+
+ // Only operate on vectors of 4 elements, where the alternative shuffling
+ // gets to be more expensive.
+ if (InputVector.getValueType() != MVT::v4i32)
return SDValue();
- if (LastLoadedElt == NumElems - 1) {
- if (DAG.InferPtrAlignment(LD->getBasePtr()) >= 16)
- return DAG.getLoad(VT, dl, LD->getChain(), LD->getBasePtr(),
- LD->getSrcValue(), LD->getSrcValueOffset(),
- LD->isVolatile(), LD->isNonTemporal(), 0);
- return DAG.getLoad(VT, dl, LD->getChain(), LD->getBasePtr(),
- LD->getSrcValue(), LD->getSrcValueOffset(),
- LD->isVolatile(), LD->isNonTemporal(),
- LD->getAlignment());
- } else if (NumElems == 4 && LastLoadedElt == 1) {
- SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
- SDValue Ops[] = { LD->getChain(), LD->getBasePtr() };
- SDValue ResNode = DAG.getNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2);
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, ResNode);
+ // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
+ // single use which is a sign-extend or zero-extend, and all elements are
+ // used.
+ SmallVector<SDNode *, 4> Uses;
+ unsigned ExtractedElements = 0;
+ for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
+ UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
+ if (UI.getUse().getResNo() != InputVector.getResNo())
+ return SDValue();
+
+ SDNode *Extract = *UI;
+ if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return SDValue();
+
+ if (Extract->getValueType(0) != MVT::i32)
+ return SDValue();
+ if (!Extract->hasOneUse())
+ return SDValue();
+ if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
+ Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
+ return SDValue();
+ if (!isa<ConstantSDNode>(Extract->getOperand(1)))
+ return SDValue();
+
+ // Record which element was extracted.
+ ExtractedElements |=
+ 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
+
+ Uses.push_back(Extract);
+ }
+
+ // If not all the elements were used, this may not be worthwhile.
+ if (ExtractedElements != 15)
+ return SDValue();
+
+ // Ok, we've now decided to do the transformation.
+ DebugLoc dl = InputVector.getDebugLoc();
+
+ // Store the value to a temporary stack slot.
+ SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
+ SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, NULL,
+ 0, false, false, 0);
+
+ // Replace each use (extract) with a load of the appropriate element.
+ for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
+ UE = Uses.end(); UI != UE; ++UI) {
+ SDNode *Extract = *UI;
+
+ // Compute the element's address.
+ SDValue Idx = Extract->getOperand(1);
+ unsigned EltSize =
+ InputVector.getValueType().getVectorElementType().getSizeInBits()/8;
+ uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue();
+ SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
+
+ SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(),
+ OffsetVal, StackPtr);
+
+ // Load the scalar.
+ SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch,
+ ScalarAddr, NULL, 0, false, false, 0);
+
+ // Replace the exact with the load.
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar);
}
+
+ // The replacement was made in place; don't return anything.
return SDValue();
}
@@ -8907,8 +9935,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// Converting this to a min would handle NaNs incorrectly, and swapping
// the operands would cause it to handle comparisons between positive
// and negative zero incorrectly.
- if (!FiniteOnlyFPMath() &&
- (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) {
+ if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
if (!UnsafeFPMath &&
!(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
break;
@@ -8946,8 +9973,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// Converting this to a max would handle NaNs incorrectly, and swapping
// the operands would cause it to handle comparisons between positive
// and negative zero incorrectly.
- if (!FiniteOnlyFPMath() &&
- (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) {
+ if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
if (!UnsafeFPMath &&
!(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
break;
@@ -8976,8 +10002,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// cause it to handle NaNs incorrectly.
if (!UnsafeFPMath &&
!(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
- if (!FiniteOnlyFPMath() &&
- (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
+ if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
std::swap(LHS, RHS);
}
@@ -9002,8 +10027,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
case ISD::SETULT:
// Converting this to a max would handle NaNs incorrectly.
- if (!FiniteOnlyFPMath() &&
- (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
+ if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
Opcode = X86ISD::FMAX;
break;
@@ -9013,8 +10037,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// cause it to handle NaNs incorrectly.
if (!UnsafeFPMath &&
!DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
- if (!FiniteOnlyFPMath() &&
- (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
+ if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
std::swap(LHS, RHS);
}
@@ -9451,9 +10474,13 @@ static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
}
static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget *Subtarget) {
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
+
EVT VT = N->getValueType(0);
- if (VT != MVT::i64 || !Subtarget->is64Bit())
+ if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
return SDValue();
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
@@ -9463,6 +10490,8 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
std::swap(N0, N1);
if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
return SDValue();
+ if (!N0.hasOneUse() || !N1.hasOneUse())
+ return SDValue();
SDValue ShAmt0 = N0.getOperand(1);
if (ShAmt0.getValueType() != MVT::i8)
@@ -9485,11 +10514,14 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
std::swap(ShAmt0, ShAmt1);
}
+ unsigned Bits = VT.getSizeInBits();
if (ShAmt1.getOpcode() == ISD::SUB) {
SDValue Sum = ShAmt1.getOperand(0);
if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
- if (SumC->getSExtValue() == 64 &&
- ShAmt1.getOperand(1) == ShAmt0)
+ SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
+ if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
+ ShAmt1Op1 = ShAmt1Op1.getOperand(0);
+ if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
return DAG.getNode(Opc, DL, VT,
Op0, Op1,
DAG.getNode(ISD::TRUNCATE, DL,
@@ -9498,7 +10530,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
} else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
if (ShAmt0C &&
- ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == 64)
+ ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
return DAG.getNode(Opc, DL, VT,
N0.getOperand(0), N1.getOperand(0),
DAG.getNode(ISD::TRUNCATE, DL,
@@ -9662,8 +10694,9 @@ static SDValue PerformBTCombine(SDNode *N,
unsigned BitWidth = Op1.getValueSizeInBits();
APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
APInt KnownZero, KnownOne;
- TargetLowering::TargetLoweringOpt TLO(DAG);
- TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
+ !DCI.isBeforeLegalizeOps());
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
DCI.CommitTargetLoweringOpt(TLO);
@@ -9684,58 +10717,6 @@ static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-// On X86 and X86-64, atomic operations are lowered to locked instructions.
-// Locked instructions, in turn, have implicit fence semantics (all memory
-// operations are flushed before issuing the locked instruction, and the
-// are not buffered), so we can fold away the common pattern of
-// fence-atomic-fence.
-static SDValue PerformMEMBARRIERCombine(SDNode* N, SelectionDAG &DAG) {
- SDValue atomic = N->getOperand(0);
- switch (atomic.getOpcode()) {
- case ISD::ATOMIC_CMP_SWAP:
- case ISD::ATOMIC_SWAP:
- case ISD::ATOMIC_LOAD_ADD:
- case ISD::ATOMIC_LOAD_SUB:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_NAND:
- case ISD::ATOMIC_LOAD_MIN:
- case ISD::ATOMIC_LOAD_MAX:
- case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_LOAD_UMAX:
- break;
- default:
- return SDValue();
- }
-
- SDValue fence = atomic.getOperand(0);
- if (fence.getOpcode() != ISD::MEMBARRIER)
- return SDValue();
-
- switch (atomic.getOpcode()) {
- case ISD::ATOMIC_CMP_SWAP:
- return DAG.UpdateNodeOperands(atomic, fence.getOperand(0),
- atomic.getOperand(1), atomic.getOperand(2),
- atomic.getOperand(3));
- case ISD::ATOMIC_SWAP:
- case ISD::ATOMIC_LOAD_ADD:
- case ISD::ATOMIC_LOAD_SUB:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_NAND:
- case ISD::ATOMIC_LOAD_MIN:
- case ISD::ATOMIC_LOAD_MAX:
- case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_LOAD_UMAX:
- return DAG.UpdateNodeOperands(atomic, fence.getOperand(0),
- atomic.getOperand(1), atomic.getOperand(2));
- default:
- return SDValue();
- }
-}
-
static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) {
// (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
// (and (i32 x86isd::setcc_carry), 1)
@@ -9767,27 +10748,146 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
- case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
+ case ISD::EXTRACT_VECTOR_ELT:
+ return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this);
case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI);
case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
case ISD::SHL:
case ISD::SRA:
case ISD::SRL: return PerformShiftCombine(N, DAG, Subtarget);
- case ISD::OR: return PerformOrCombine(N, DAG, Subtarget);
+ case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
case X86ISD::FXOR:
case X86ISD::FOR: return PerformFORCombine(N, DAG);
case X86ISD::FAND: return PerformFANDCombine(N, DAG);
case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
- case ISD::MEMBARRIER: return PerformMEMBARRIERCombine(N, DAG);
case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG);
+ case X86ISD::SHUFPS: // Handle all target specific shuffles
+ case X86ISD::SHUFPD:
+ case X86ISD::PUNPCKHBW:
+ case X86ISD::PUNPCKHWD:
+ case X86ISD::PUNPCKHDQ:
+ case X86ISD::PUNPCKHQDQ:
+ case X86ISD::UNPCKHPS:
+ case X86ISD::UNPCKHPD:
+ case X86ISD::PUNPCKLBW:
+ case X86ISD::PUNPCKLWD:
+ case X86ISD::PUNPCKLDQ:
+ case X86ISD::PUNPCKLQDQ:
+ case X86ISD::UNPCKLPS:
+ case X86ISD::UNPCKLPD:
+ case X86ISD::MOVHLPS:
+ case X86ISD::MOVLHPS:
+ case X86ISD::PSHUFD:
+ case X86ISD::PSHUFHW:
+ case X86ISD::PSHUFLW:
+ case X86ISD::MOVSS:
+ case X86ISD::MOVSD:
+ case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
}
return SDValue();
}
+/// isTypeDesirableForOp - Return true if the target has native support for
+/// the specified value type and it is 'desirable' to use the type for the
+/// given node type. e.g. On x86 i16 is legal, but undesirable since i16
+/// instruction encodings are longer and some i16 instructions are slow.
+bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
+ if (!isTypeLegal(VT))
+ return false;
+ if (VT != MVT::i16)
+ return true;
+
+ switch (Opc) {
+ default:
+ return true;
+ case ISD::LOAD:
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND:
+ case ISD::SHL:
+ case ISD::SRL:
+ case ISD::SUB:
+ case ISD::ADD:
+ case ISD::MUL:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR:
+ return false;
+ }
+}
+
+/// IsDesirableToPromoteOp - This method query the target whether it is
+/// beneficial for dag combiner to promote the specified node. If true, it
+/// should return the desired promotion type by reference.
+bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
+ EVT VT = Op.getValueType();
+ if (VT != MVT::i16)
+ return false;
+
+ bool Promote = false;
+ bool Commute = false;
+ switch (Op.getOpcode()) {
+ default: break;
+ case ISD::LOAD: {
+ LoadSDNode *LD = cast<LoadSDNode>(Op);
+ // If the non-extending load has a single use and it's not live out, then it
+ // might be folded.
+ if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
+ Op.hasOneUse()*/) {
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ UE = Op.getNode()->use_end(); UI != UE; ++UI) {
+ // The only case where we'd want to promote LOAD (rather then it being
+ // promoted as an operand is when it's only use is liveout.
+ if (UI->getOpcode() != ISD::CopyToReg)
+ return false;
+ }
+ }
+ Promote = true;
+ break;
+ }
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND:
+ Promote = true;
+ break;
+ case ISD::SHL:
+ case ISD::SRL: {
+ SDValue N0 = Op.getOperand(0);
+ // Look out for (store (shl (load), x)).
+ if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
+ return false;
+ Promote = true;
+ break;
+ }
+ case ISD::ADD:
+ case ISD::MUL:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR:
+ Commute = true;
+ // fallthrough
+ case ISD::SUB: {
+ SDValue N0 = Op.getOperand(0);
+ SDValue N1 = Op.getOperand(1);
+ if (!Commute && MayFoldLoad(N1))
+ return false;
+ // Avoid disabling potential load folding opportunities.
+ if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
+ return false;
+ if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
+ return false;
+ Promote = true;
+ }
+ }
+
+ PVT = MVT::i32;
+ return Promote;
+}
+
//===----------------------------------------------------------------------===//
// X86 Inline Assembly Support
//===----------------------------------------------------------------------===//
@@ -9799,8 +10899,8 @@ static bool LowerToBSwap(CallInst *CI) {
// so don't worry about this.
// Verify this is a simple bswap.
- if (CI->getNumOperands() != 2 ||
- CI->getType() != CI->getOperand(1)->getType() ||
+ if (CI->getNumArgOperands() != 1 ||
+ CI->getType() != CI->getArgOperand(0)->getType() ||
!CI->getType()->isIntegerTy())
return false;
@@ -9813,7 +10913,7 @@ static bool LowerToBSwap(CallInst *CI) {
Module *M = CI->getParent()->getParent()->getParent();
Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
Op = CallInst::Create(Int, Op, CI->getName(), CI);
CI->replaceAllUsesWith(Op);
@@ -9946,7 +11046,6 @@ LowerXConstraint(EVT ConstraintVT) const {
/// vector. If it is invalid, don't add anything to Ops.
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
char Constraint,
- bool hasMemory,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
SDValue Result(0, 0);
@@ -9988,9 +11087,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'e': {
// 32-bit signed value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
- const ConstantInt *CI = C->getConstantIntValue();
- if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
- C->getSExtValue())) {
+ if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
+ C->getSExtValue())) {
// Widen to 64 bits here to get it sign extended.
Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
break;
@@ -10003,9 +11101,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'Z': {
// 32-bit unsigned value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
- const ConstantInt *CI = C->getConstantIntValue();
- if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
- C->getZExtValue())) {
+ if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
+ C->getZExtValue())) {
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
break;
}
@@ -10022,6 +11119,12 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
break;
}
+ // In any sort of PIC mode addresses need to be computed at runtime by
+ // adding in a register or some sort of table lookup. These can't
+ // be used as immediates.
+ if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
+ return;
+
// If we are in non-pic codegen mode, we allow the address of a global (with
// an optional displacement) to be used with 'i'.
GlobalAddressSDNode *GA = 0;
@@ -10050,18 +11153,15 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
return;
}
- GlobalValue *GV = GA->getGlobal();
+ const GlobalValue *GV = GA->getGlobal();
// If we require an extra load to get this address, as in PIC mode, we
// can't accept it.
if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV,
getTargetMachine())))
return;
- if (hasMemory)
- Op = LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG);
- else
- Op = DAG.getTargetGlobalAddress(GV, GA->getValueType(0), Offset);
- Result = Op;
+ Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
+ GA->getValueType(0), Offset);
break;
}
}
@@ -10070,8 +11170,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
Ops.push_back(Result);
return;
}
- return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
- Ops, DAG);
+ return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
std::vector<unsigned> X86TargetLowering::
@@ -10313,41 +11412,3 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return Res;
}
-
-//===----------------------------------------------------------------------===//
-// X86 Widen vector type
-//===----------------------------------------------------------------------===//
-
-/// getWidenVectorType: given a vector type, returns the type to widen
-/// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
-/// If there is no vector type that we want to widen to, returns MVT::Other
-/// When and where to widen is target dependent based on the cost of
-/// scalarizing vs using the wider vector type.
-
-EVT X86TargetLowering::getWidenVectorType(EVT VT) const {
- assert(VT.isVector());
- if (isTypeLegal(VT))
- return VT;
-
- // TODO: In computeRegisterProperty, we can compute the list of legal vector
- // type based on element type. This would speed up our search (though
- // it may not be worth it since the size of the list is relatively
- // small).
- EVT EltVT = VT.getVectorElementType();
- unsigned NElts = VT.getVectorNumElements();
-
- // On X86, it make sense to widen any vector wider than 1
- if (NElts <= 1)
- return MVT::Other;
-
- for (unsigned nVT = MVT::FIRST_VECTOR_VALUETYPE;
- nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- EVT SVT = (MVT::SimpleValueType)nVT;
-
- if (isTypeLegal(SVT) &&
- SVT.getVectorElementType() == EltVT &&
- SVT.getVectorNumElements() > NElts)
- return SVT;
- }
- return MVT::Other;
-}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86ISelLowering.h b/libclamav/c++/llvm/lib/Target/X86/X86ISelLowering.h
index 5936975..d2d9b28 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86ISelLowering.h
@@ -196,6 +196,10 @@ namespace llvm {
// TLSADDR - Thread Local Storage.
TLSADDR,
+
+ // TLSCALL - Thread Local Storage. When calling to an OS provided
+ // thunk at the address from an earlier relocation.
+ TLSCALL,
// SegmentBaseAddress - The address segment:0
SegmentBaseAddress,
@@ -244,6 +248,44 @@ namespace llvm {
// PTEST - Vector bitwise comparisons
PTEST,
+ // TESTP - Vector packed fp sign bitwise comparisons
+ TESTP,
+
+ // Several flavors of instructions with vector shuffle behaviors.
+ PALIGN,
+ PSHUFD,
+ PSHUFHW,
+ PSHUFLW,
+ PSHUFHW_LD,
+ PSHUFLW_LD,
+ SHUFPD,
+ SHUFPS,
+ MOVDDUP,
+ MOVSHDUP,
+ MOVSLDUP,
+ MOVSHDUP_LD,
+ MOVSLDUP_LD,
+ MOVLHPS,
+ MOVLHPD,
+ MOVHLPS,
+ MOVHLPD,
+ MOVLPS,
+ MOVLPD,
+ MOVSD,
+ MOVSS,
+ UNPCKLPS,
+ UNPCKLPD,
+ UNPCKHPS,
+ UNPCKHPD,
+ PUNPCKLBW,
+ PUNPCKLWD,
+ PUNPCKLDQ,
+ PUNPCKLQDQ,
+ PUNPCKHBW,
+ PUNPCKHWD,
+ PUNPCKHDQ,
+ PUNPCKHQDQ,
+
// VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
// according to %al. An operator is needed so that this can be expanded
// with control flow.
@@ -261,7 +303,13 @@ namespace llvm {
ATOMXOR64_DAG,
ATOMAND64_DAG,
ATOMNAND64_DAG,
- ATOMSWAP64_DAG
+ ATOMSWAP64_DAG,
+
+ // Memory barrier
+ MEMBARRIER,
+ MFENCE,
+ SFENCE,
+ LFENCE
// WARNING: Do not add anything in the end unless you want the node to
// have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
@@ -374,12 +422,6 @@ namespace llvm {
//===--------------------------------------------------------------------===//
// X86TargetLowering - X86 Implementation of the TargetLowering interface
class X86TargetLowering : public TargetLowering {
- int VarArgsFrameIndex; // FrameIndex for start of varargs area.
- int RegSaveFrameIndex; // X86-64 vararg func register save area.
- unsigned VarArgsGPOffset; // X86-64 vararg func int reg offset.
- unsigned VarArgsFPOffset; // X86-64 vararg func fp reg offset.
- int BytesToPopOnReturn; // Number of arg bytes ret should pop.
-
public:
explicit X86TargetLowering(X86TargetMachine &TM);
@@ -401,11 +443,6 @@ namespace llvm {
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI, MCContext &Ctx) const;
- // Return the number of bytes that a function should pop when it returns (in
- // addition to the space used by the return address).
- //
- unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
-
/// getStackPtrReg - Return the stack pointer register we are using: either
/// ESP or RSP.
unsigned getStackPtrReg() const { return X86StackPtr; }
@@ -418,11 +455,20 @@ namespace llvm {
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
- /// lowering. It returns EVT::iAny if SelectionDAG should be responsible for
- /// determining it.
- virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align,
- bool isSrcConst, bool isSrcStr,
- SelectionDAG &DAG) const;
+ /// lowering. If DstAlign is zero that means it's safe to destination
+ /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
+ /// means there isn't a need to check it against alignment requirement,
+ /// probably because the source does not need to be loaded. If
+ /// 'NonScalarIntSafe' is true, that means it's safe to return a
+ /// non-scalar-integer type, e.g. empty string source, constant, or loaded
+ /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
+ /// constant so it does not need to be loaded.
+ /// It returns EVT::Other if the type should be determined using generic
+ /// target-independent logic.
+ virtual EVT
+ getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
+ bool NonScalarIntSafe, bool MemcpyStrSrc,
+ MachineFunction &MF) const;
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
/// unaligned memory accesses. of the specified type.
@@ -432,20 +478,32 @@ namespace llvm {
/// LowerOperation - Provide custom lowering hooks for some operations.
///
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
+ virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG);
+ SelectionDAG &DAG) const;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
- virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const;
+ /// isTypeDesirableForOp - Return true if the target has native support for
+ /// the specified value type and it is 'desirable' to use the type for the
+ /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
+ /// instruction encodings are longer and some i16 instructions are slow.
+ virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
+
+ /// isTypeDesirable - Return true if the target has native support for the
+ /// specified value type and it is 'desirable' to use the type. e.g. On x86
+ /// i16 is legal, but undesirable since i16 instruction encodings are longer
+ /// and some i16 instructions are slow.
+ virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
+
+ virtual MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
/// getTargetNodeName - This method returns the name of a target specific
@@ -466,9 +524,9 @@ namespace llvm {
unsigned Depth = 0) const;
virtual bool
- isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const;
+ isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
- SDValue getReturnAddressFrameIndex(SelectionDAG &DAG);
+ SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
virtual bool ExpandInlineAsm(CallInst *CI) const;
@@ -486,7 +544,6 @@ namespace llvm {
/// being processed is 'm'.
virtual void LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
@@ -553,7 +610,7 @@ namespace llvm {
return !X86ScalarSSEf64 || VT == MVT::f80;
}
- virtual const X86Subtarget* getSubtarget() {
+ const X86Subtarget* getSubtarget() const {
return Subtarget;
}
@@ -564,29 +621,26 @@ namespace llvm {
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
- /// getWidenVectorType: given a vector type, returns the type to widen
- /// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
- /// If there is no vector type that we want to widen to, returns EVT::Other
- /// When and were to widen is target dependent based on the cost of
- /// scalarizing vs using the wider vector type.
- virtual EVT getWidenVectorType(EVT VT) const;
-
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *
- createFastISel(MachineFunction &mf,
- MachineModuleInfo *mmi, DwarfWriter *dw,
- DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &,
- DenseMap<const AllocaInst *, int> &
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &
-#endif
- );
+ virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const;
+ unsigned getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const;
+
+ /// getStackCookieLocation - Return true if the target stores stack
+ /// protector cookies at a fixed offset in some non-standard address
+ /// space, and populates the address space and offset as
+ /// appropriate.
+ virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
+
+ protected:
+ std::pair<const TargetRegisterClass*, uint8_t>
+ findRepresentativeClass(EVT VT) const;
+
private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
@@ -617,17 +671,17 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerMemArgument(SDValue Chain,
CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &ArgInfo,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA, MachineFrameInfo *MFI,
- unsigned i);
+ unsigned i) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
- ISD::ArgFlagsTy Flags);
+ ISD::ArgFlagsTy Flags) const;
// Call lowering helpers.
@@ -640,123 +694,121 @@ namespace llvm {
bool isCalleeStructRet,
bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
- bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv);
+ bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
SDValue Chain, bool IsTailCall, bool Is64Bit,
- int FPDiff, DebugLoc dl);
+ int FPDiff, DebugLoc dl) const;
CCAssignFn *CCAssignFnForNode(CallingConv::ID CallConv) const;
- unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG);
+ unsigned GetAlignedArgumentStackSize(unsigned StackSize,
+ SelectionDAG &DAG) const;
std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
- bool isSigned);
+ bool isSigned) const;
SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
- SelectionDAG &DAG);
- SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG);
- SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG);
- SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG);
- SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG);
- SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG);
- SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG);
- SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG);
- SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG);
- SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG);
- SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG);
+ SelectionDAG &DAG) const;
+ SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
int64_t Offset, SelectionDAG &DAG) const;
- SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
- SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
- SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG);
- SDValue LowerShift(SDValue Op, SelectionDAG &DAG);
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
- SelectionDAG &DAG);
- SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG);
- SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG);
- SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG);
- SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFABS(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG);
- SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG);
- SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG);
- SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG);
- SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG);
- SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG);
- SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG);
- SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG);
- SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG);
- SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG);
- SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG);
- SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
- SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG);
- SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG);
- SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG);
- SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG);
- SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG);
- SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG);
- SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG);
- SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG);
-
- SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG);
- SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG);
- SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG);
+ SelectionDAG &DAG) const;
+ SDValue LowerBIT_CONVERT(SDValue op, SelectionDAG &DAG) const;
+ SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerToBT(SDValue And, ISD::CondCode CC,
+ DebugLoc dl, SelectionDAG &DAG) const;
+ SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerSHL(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
+
+ // Utility functions to help LowerVECTOR_SHUFFLE
+ SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals);
+ SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
- DebugLoc dl, SelectionDAG &DAG);
+ const SmallVectorImpl<SDValue> &OutVals,
+ DebugLoc dl, SelectionDAG &DAG) const;
virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- SelectionDAG &DAG);
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const;
void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG, unsigned NewOp);
-
- SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- const Value *DstSV, uint64_t DstSVOff);
- SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
- SDValue Chain,
- SDValue Dst, SDValue Src,
- SDValue Size, unsigned Align,
- bool AlwaysInline,
- const Value *DstSV, uint64_t DstSVOff,
- const Value *SrcSV, uint64_t SrcSVOff);
-
+ SelectionDAG &DAG, unsigned NewOp) const;
+
/// Utility function to emit string processing sse4.2 instructions
/// that return in xmm0.
/// This takes the instruction to expand, the associated machine basic
/// block, the number of args, and whether or not the second arg is
/// in memory or not.
MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
- unsigned argNum, bool inMem) const;
+ unsigned argNum, bool inMem) const;
/// Utility function to emit atomic bitwise operations (and, or, xor).
/// It takes the bitwise instruction to expand, the associated machine basic
@@ -768,7 +820,6 @@ namespace llvm {
unsigned immOpc,
unsigned loadOpc,
unsigned cxchgOpc,
- unsigned copyOpc,
unsigned notOpc,
unsigned EAXreg,
TargetRegisterClass *RC,
@@ -796,33 +847,26 @@ namespace llvm {
MachineBasicBlock *BB) const;
MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
- MachineBasicBlock *BB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const;
+ MachineBasicBlock *BB) const;
MachineBasicBlock *EmitLoweredMingwAlloca(MachineInstr *MI,
- MachineBasicBlock *BB,
- DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const;
+ MachineBasicBlock *BB) const;
+
+ MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG);
+ SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent, for use with the given x86 condition code.
SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
- SelectionDAG &DAG);
+ SelectionDAG &DAG) const;
};
namespace X86 {
- FastISel *createFastISel(MachineFunction &mf,
- MachineModuleInfo *mmi, DwarfWriter *dw,
- DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &,
- DenseMap<const AllocaInst *, int> &
-#ifndef NDEBUG
- , SmallSet<Instruction*, 8> &
-#endif
- );
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
}
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86Instr64bit.td b/libclamav/c++/llvm/lib/Target/X86/X86Instr64bit.td
index 8462255..0884b61 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86Instr64bit.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86Instr64bit.td
@@ -18,43 +18,50 @@
//
// 64-bits but only 32 bits are significant.
-def i64i32imm : Operand<i64>;
+def i64i32imm : Operand<i64> {
+ let ParserMatchClass = ImmSExti64i32AsmOperand;
+}
// 64-bits but only 32 bits are significant, and those bits are treated as being
// pc relative.
def i64i32imm_pcrel : Operand<i64> {
let PrintMethod = "print_pcrel_imm";
+ let ParserMatchClass = X86AbsMemAsmOperand;
}
// 64-bits but only 8 bits are significant.
def i64i8imm : Operand<i64> {
- let ParserMatchClass = ImmSExt8AsmOperand;
+ let ParserMatchClass = ImmSExti64i8AsmOperand;
}
-def lea64mem : Operand<i64> {
- let PrintMethod = "printlea64mem";
- let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm);
+def lea64_32mem : Operand<i32> {
+ let PrintMethod = "printi32mem";
+ let AsmOperandLowerMethod = "lower_lea64_32mem";
+ let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
let ParserMatchClass = X86MemAsmOperand;
}
-def lea64_32mem : Operand<i32> {
- let PrintMethod = "printlea64_32mem";
- let AsmOperandLowerMethod = "lower_lea64_32mem";
- let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
+
+// Special i64mem for addresses of load folding tail calls. These are not
+// allowed to use callee-saved registers since they must be scheduled
+// after callee-saved register are popped.
+def i64mem_TC : Operand<i64> {
+ let PrintMethod = "printi64mem";
+ let MIOperandInfo = (ops GR64_TC, i8imm, GR64_TC, i32imm, i8imm);
let ParserMatchClass = X86MemAsmOperand;
}
//===----------------------------------------------------------------------===//
// Complex Pattern Definitions.
//
-def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
+def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
[add, sub, mul, X86mul_imm, shl, or, frameindex,
X86WrapperRIP], []>;
-def tls64addr : ComplexPattern<i64, 4, "SelectTLSADDRAddr",
+def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
[tglobaltlsaddr], []>;
-
+
//===----------------------------------------------------------------------===//
// Pattern fragments.
//
@@ -66,11 +73,7 @@ def GetLo32XForm : SDNodeXForm<imm, [{
return getI32Imm((unsigned)N->getZExtValue());
}]>;
-def i64immSExt32 : PatLeaf<(i64 imm), [{
- // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
- // sign extended field.
- return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue();
-}]>;
+def i64immSExt32 : PatLeaf<(i64 imm), [{ return i64immSExt32(N); }]>;
def i64immZExt32 : PatLeaf<(i64 imm), [{
@@ -134,7 +137,7 @@ let isCall = 1 in
// NOTE: this pattern doesn't match "X86call imm", because we do not know
// that the offset between an arbitrary immediate and the call will fit in
// the 32-bit pcrel field that we have.
- def CALL64pcrel32 : Ii32<0xE8, RawFrm,
+ def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i64i32imm_pcrel:$dst, variable_ops),
"call{q}\t$dst", []>,
Requires<[In64BitMode, NotWin64]>;
@@ -151,7 +154,7 @@ let isCall = 1 in
// FIXME: We need to teach codegen about single list of call-clobbered
// registers.
-let isCall = 1 in
+let isCall = 1, isCodeGenOnly = 1 in
// All calls clobber the non-callee saved registers. RSP is marked as
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead. Uses for argument
@@ -161,7 +164,7 @@ let isCall = 1 in
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS],
Uses = [RSP] in {
- def WINCALL64pcrel32 : I<0xE8, RawFrm,
+ def WINCALL64pcrel32 : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i64i32imm_pcrel:$dst, variable_ops),
"call\t$dst", []>,
Requires<[IsWin64]>;
@@ -175,32 +178,44 @@ let isCall = 1 in
}
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset,
- variable_ops),
- "#TC_RETURN $dst $offset",
- []>;
-
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset,
- variable_ops),
- "#TC_RETURN $dst $offset",
- []>;
-
-
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
- def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst, variable_ops),
- "jmp{q}\t{*}$dst # TAILCALL",
- []>;
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
+ isCodeGenOnly = 1 in
+ let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
+ FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [RSP] in {
+ def TCRETURNdi64 : I<0, Pseudo, (outs),
+ (ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+ def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64_TC:$dst, i32imm:$offset,
+ variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+ let mayLoad = 1 in
+ def TCRETURNmi64 : I<0, Pseudo, (outs),
+ (ins i64mem_TC:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+
+ def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
+ (ins i64i32imm_pcrel:$dst, variable_ops),
+ "jmp\t$dst # TAILCALL", []>;
+ def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64_TC:$dst, variable_ops),
+ "jmp{q}\t{*}$dst # TAILCALL", []>;
+
+ let mayLoad = 1 in
+ def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
+ "jmp{q}\t{*}$dst # TAILCALL", []>;
+}
// Branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst),
"jmp{q}\t$dst", []>;
def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst",
- [(brind GR64:$dst)]>;
+ [(brind GR64:$dst)]>, Requires<[In64BitMode]>;
def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst",
- [(brind (loadi64 addr:$dst))]>;
+ [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>;
def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst),
"ljmp{q}\t{*}$dst", []>;
}
@@ -222,12 +237,13 @@ def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
+let mayLoad = 1 in
def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in
def LEAVE64 : I<0xC9, RawFrm,
- (outs), (ins), "leave", []>;
+ (outs), (ins), "leave", []>, Requires<[In64BitMode]>;
let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in {
let mayLoad = 1 in {
def POP64r : I<0x58, AddRegFrm,
@@ -248,14 +264,16 @@ def PUSH64i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
"push{q}\t$imm", []>;
def PUSH64i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
"push{q}\t$imm", []>;
-def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
+def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i64i32imm:$imm),
"push{q}\t$imm", []>;
}
-let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in
-def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf{q}", []>, REX_W;
-let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in
-def PUSHFQ64 : I<0x9C, RawFrm, (outs), (ins), "pushf{q}", []>;
+let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1, neverHasSideEffects=1 in
+def POPF64 : I<0x9D, RawFrm, (outs), (ins), "popfq", []>,
+ Requires<[In64BitMode]>;
+let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in
+def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", []>,
+ Requires<[In64BitMode]>;
def LEA64_32r : I<0x8D, MRMSrcMem,
(outs GR32:$dst), (ins lea64_32mem:$src),
@@ -263,11 +281,11 @@ def LEA64_32r : I<0x8D, MRMSrcMem,
[(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
let isReMaterializable = 1 in
-def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
+def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"lea{q}\t{$src|$dst}, {$dst|$src}",
[(set GR64:$dst, lea64addr:$src)]>;
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
"bswap{q}\t$dst",
[(set GR64:$dst, (bswap GR64:$src))]>, TB;
@@ -276,36 +294,40 @@ def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
let Defs = [EFLAGS] in {
def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (X86bsf GR64:$src)), (implicit EFLAGS)]>, TB;
+ [(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))]>, TB;
def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsf{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (X86bsf (loadi64 addr:$src))),
- (implicit EFLAGS)]>, TB;
+ [(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))]>, TB;
def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (X86bsr GR64:$src)), (implicit EFLAGS)]>, TB;
+ [(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))]>, TB;
def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"bsr{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (X86bsr (loadi64 addr:$src))),
- (implicit EFLAGS)]>, TB;
+ [(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))]>, TB;
} // Defs = [EFLAGS]
// Repeat string ops
-let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI] in
+let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in
def REP_MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "{rep;movsq|rep movsq}",
[(X86rep_movs i64)]>, REP;
-let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI] in
+let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI], isCodeGenOnly = 1 in
def REP_STOSQ : RI<0xAB, RawFrm, (outs), (ins), "{rep;stosq|rep stosq}",
[(X86rep_stos i64)]>, REP;
-def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scas{q}", []>;
+let Defs = [EDI,ESI], Uses = [EDI,ESI,EFLAGS] in
+def MOVSQ : RI<0xA5, RawFrm, (outs), (ins), "movsq", []>;
+
+let Defs = [RCX,RDI], Uses = [RAX,RCX,RDI,EFLAGS] in
+def STOSQ : RI<0xAB, RawFrm, (outs), (ins), "stosq", []>;
+
+def SCAS64 : RI<0xAF, RawFrm, (outs), (ins), "scasq", []>;
-def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmps{q}", []>;
+def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>;
// Fast system-call instructions
def SYSEXIT64 : RI<0x35, RawFrm,
- (outs), (ins), "sysexit", []>, TB;
+ (outs), (ins), "sysexit", []>, TB, Requires<[In64BitMode]>;
//===----------------------------------------------------------------------===//
// Move Instructions...
@@ -324,8 +346,17 @@ def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src),
[(set GR64:$dst, i64immSExt32:$src)]>;
}
+// The assembler accepts movq of a 64-bit immediate as an alternate spelling of
+// movabsq.
+let isAsmParserOnly = 1 in {
+def MOV64ri_alt : RIi64<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64imm:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
+}
+
+let isCodeGenOnly = 1 in {
def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}", []>;
+}
let canFoldAsLoad = 1, isReMaterializable = 1 in
def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
@@ -339,6 +370,29 @@ def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
[(store i64immSExt32:$src, addr:$dst)]>;
+/// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC.
+let isCodeGenOnly = 1 in {
+let neverHasSideEffects = 1 in
+def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
+
+let mayLoad = 1,
+ canFoldAsLoad = 1, isReMaterializable = 1 in
+def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ []>;
+
+let mayStore = 1 in
+def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ []>;
+}
+
+// FIXME: These definitions are utterly broken
+// Just leave them commented out for now because they're useless outside
+// of the large code model, and most compilers won't generate the instructions
+// in question.
+/*
def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
"mov{q}\t{$src, %rax|%rax, $src}", []>;
def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
@@ -347,6 +401,7 @@ def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins),
"mov{q}\t{%rax, $dst|$dst, %rax}", []>;
def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins),
"mov{q}\t{%rax, $dst|$dst, %rax}", []>;
+*/
// Moves to and from segment registers
def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src),
@@ -365,9 +420,9 @@ def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
// Moves to and from control registers
-def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG_64:$src),
+def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG:$src),
"mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG_64:$dst), (ins GR64:$src),
+def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR64:$src),
"mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
// Sign/Zero extenders
@@ -445,7 +500,7 @@ def def32 : PatLeaf<(i32 GR32:$src), [{
// In the case of a 32-bit def that is known to implicitly zero-extend,
// we can use a SUBREG_TO_REG.
def : Pat<(i64 (zext def32:$src)),
- (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
+ (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
let neverHasSideEffects = 1 in {
let Defs = [RAX], Uses = [EAX] in
@@ -463,46 +518,48 @@ let neverHasSideEffects = 1 in {
let Defs = [EFLAGS] in {
-def ADD64i32 : RI<0x05, RawFrm, (outs), (ins i32imm:$src),
- "add{q}\t{$src, %rax|%rax, $src}", []>;
+def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i64i32imm:$src),
+ "add{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isConvertibleToThreeAddress = 1 in {
let isCommutable = 1 in
// Register-Register Addition
def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86add_flag GR64:$src1, GR64:$src2))]>;
+
+// These are alternate spellings for use by the disassembler, we mark them as
+// code gen only to ensure they aren't matched by the assembler.
+let isCodeGenOnly = 1 in {
+ def ADD64rr_alt : RI<0x03, MRMSrcReg, (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}", []>;
+}
// Register-Integer Addition
def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst),
(ins GR64:$src1, i64i8imm:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86add_flag GR64:$src1, i64immSExt8:$src2))]>;
def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86add_flag GR64:$src1, i64immSExt32:$src2))]>;
} // isConvertibleToThreeAddress
// Register-Memory Addition
def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"add{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
-
-// Register-Register Addition - Equivalent to the normal rr form (ADD64rr), but
-// differently encoded.
-def ADD64mrmrr : RI<0x03, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}", []>;
+ [(set GR64:$dst, EFLAGS,
+ (X86add_flag GR64:$src1, (load addr:$src2)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
// Memory-Register Addition
def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
@@ -520,19 +577,21 @@ def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
let Uses = [EFLAGS] in {
-def ADC64i32 : RI<0x15, RawFrm, (outs), (ins i32imm:$src),
- "adc{q}\t{$src, %rax|%rax, $src}", []>;
+def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
+ "adc{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>;
+let isCodeGenOnly = 1 in {
def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst),
(ins GR64:$src1, GR64:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}", []>;
+}
def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
@@ -547,7 +606,7 @@ def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
@@ -562,40 +621,42 @@ def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
addr:$dst)]>;
} // Uses = [EFLAGS]
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
// Register-Register Subtraction
def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86sub_flag GR64:$src1, GR64:$src2))]>;
+let isCodeGenOnly = 1 in {
def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}", []>;
+}
// Register-Memory Subtraction
def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86sub_flag GR64:$src1, (load addr:$src2)))]>;
// Register-Integer Subtraction
def SUB64ri8 : RIi8<0x83, MRM5r, (outs GR64:$dst),
(ins GR64:$src1, i64i8imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86sub_flag GR64:$src1, i64immSExt8:$src2))]>;
def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"sub{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (sub GR64:$src1, i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
-} // isTwoAddress
+ [(set GR64:$dst, EFLAGS,
+ (X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
+} // Constraints = "$src1 = $dst"
-def SUB64i32 : RI<0x2D, RawFrm, (outs), (ins i32imm:$src),
- "sub{q}\t{$src, %rax|%rax, $src}", []>;
+def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
+ "sub{q}\t{$src, %rax|%rax, $src}", []>;
// Memory-Register Subtraction
def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
@@ -616,15 +677,17 @@ def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
(implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>;
+let isCodeGenOnly = 1 in {
def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}", []>;
+}
def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
@@ -639,10 +702,10 @@ def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
-def SBB64i32 : RI<0x1D, RawFrm, (outs), (ins i32imm:$src),
- "sbb{q}\t{$src, %rax|%rax, $src}", []>;
+def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i64i32imm:$src),
+ "sbb{q}\t{$src, %rax|%rax, $src}", []>;
def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
@@ -673,22 +736,22 @@ def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
}
let Defs = [EFLAGS] in {
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
// Register-Register Signed Integer Multiplication
def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>, TB;
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag GR64:$src1, GR64:$src2))]>, TB;
// Register-Memory Signed Integer Multiplication
def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"imul{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, TB;
-} // isTwoAddress
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
+} // Constraints = "$src1 = $dst"
// Suprisingly enough, these are not two address instructions!
@@ -696,27 +759,27 @@ def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
def IMUL64rri8 : RIi8<0x6B, MRMSrcReg, // GR64 = GR64*I8
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag GR64:$src1, i64immSExt8:$src2))]>;
def IMUL64rri32 : RIi32<0x69, MRMSrcReg, // GR64 = GR64*I32
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul GR64:$src1, i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag GR64:$src1, i64immSExt32:$src2))]>;
// Memory-Integer Signed Integer Multiplication
def IMUL64rmi8 : RIi8<0x6B, MRMSrcMem, // GR64 = [mem64]*I8
(outs GR64:$dst), (ins i64mem:$src1, i64i8imm: $src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul (load addr:$src1),
- i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1),
+ i64immSExt8:$src2))]>;
def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32
(outs GR64:$dst), (ins i64mem:$src1, i64i32imm:$src2),
"imul{q}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR64:$dst, (mul (load addr:$src1),
- i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1),
+ i64immSExt32:$src2))]>;
} // Defs = [EFLAGS]
// Unsigned division / remainder
@@ -740,7 +803,7 @@ def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
// Unary instructions
let Defs = [EFLAGS], CodeSize = 2 in {
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
[(set GR64:$dst, (ineg GR64:$src)),
(implicit EFLAGS)]>;
@@ -748,77 +811,69 @@ def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
[(store (ineg (loadi64 addr:$dst)), addr:$dst),
(implicit EFLAGS)]>;
-let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
+let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
- [(set GR64:$dst, (add GR64:$src, 1)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src))]>;
def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
[(store (add (loadi64 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)]>;
-let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
+let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
- [(set GR64:$dst, (add GR64:$src, -1)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src))]>;
def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
[(store (add (loadi64 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)]>;
// In 64-bit mode, single byte INC and DEC cannot be encoded.
-let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
+let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in {
// Can transform into LEA.
def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
"inc{w}\t$dst",
- [(set GR16:$dst, (add GR16:$src, 1)),
- (implicit EFLAGS)]>,
+ [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src))]>,
OpSize, Requires<[In64BitMode]>;
def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src),
"inc{l}\t$dst",
- [(set GR32:$dst, (add GR32:$src, 1)),
- (implicit EFLAGS)]>,
+ [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src))]>,
Requires<[In64BitMode]>;
def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src),
"dec{w}\t$dst",
- [(set GR16:$dst, (add GR16:$src, -1)),
- (implicit EFLAGS)]>,
+ [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src))]>,
OpSize, Requires<[In64BitMode]>;
def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src),
"dec{l}\t$dst",
- [(set GR32:$dst, (add GR32:$src, -1)),
- (implicit EFLAGS)]>,
+ [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
Requires<[In64BitMode]>;
-} // isConvertibleToThreeAddress
+} // Constraints = "$src = $dst", isConvertibleToThreeAddress
// These are duplicates of their 32-bit counterparts. Only needed so X86 knows
// how to unfold them.
-let isTwoAddress = 0, CodeSize = 2 in {
- def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
- [(store (add (loadi16 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In64BitMode]>;
- def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
- [(store (add (loadi32 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In64BitMode]>;
- def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
- [(store (add (loadi16 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In64BitMode]>;
- def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
- [(store (add (loadi32 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In64BitMode]>;
-}
+def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In64BitMode]>;
+def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In64BitMode]>;
+def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In64BitMode]>;
+def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In64BitMode]>;
} // Defs = [EFLAGS], CodeSize
let Defs = [EFLAGS] in {
// Shift instructions
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
+def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
"shl{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (shl GR64:$src, CL))]>;
+ [(set GR64:$dst, (shl GR64:$src1, CL))]>;
let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
@@ -828,7 +883,7 @@ def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
// 'add reg,reg' is cheaper.
def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
"shl{q}\t$dst", []>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
@@ -841,18 +896,18 @@ def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
"shl{q}\t$dst",
[(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
+def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
"shr{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (srl GR64:$src, CL))]>;
+ [(set GR64:$dst, (srl GR64:$src1, CL))]>;
def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
"shr{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
"shr{q}\t$dst",
[(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
@@ -865,11 +920,11 @@ def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
"shr{q}\t$dst",
[(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
+def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
"sar{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (sra GR64:$src, CL))]>;
+ [(set GR64:$dst, (sra GR64:$src1, CL))]>;
def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"sar{q}\t{$src2, $dst|$dst, $src2}",
@@ -877,7 +932,7 @@ def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
"sar{q}\t$dst",
[(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src = $dst"
let Uses = [CL] in
def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
@@ -892,7 +947,7 @@ def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
// Rotate instructions
-let isTwoAddress = 1 in {
+let Constraints = "$src = $dst" in {
def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src),
"rcl{q}\t{1, $dst|$dst, 1}", []>;
def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
@@ -909,9 +964,8 @@ def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src),
def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src),
"rcr{q}\t{%cl, $dst|$dst, CL}", []>;
}
-}
+} // Constraints = "$src = $dst"
-let isTwoAddress = 0 in {
def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
"rcl{q}\t{1, $dst|$dst, 1}", []>;
def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
@@ -927,13 +981,12 @@ def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
"rcr{q}\t{%cl, $dst|$dst, CL}", []>;
}
-}
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
+def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
"rol{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotl GR64:$src, CL))]>;
+ [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"rol{q}\t{$src2, $dst|$dst, $src2}",
@@ -941,7 +994,7 @@ def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
"rol{q}\t$dst",
[(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
@@ -954,11 +1007,11 @@ def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
"rol{q}\t$dst",
[(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
+def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
"ror{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotr GR64:$src, CL))]>;
+ [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"ror{q}\t{$src2, $dst|$dst, $src2}",
@@ -966,7 +1019,7 @@ def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
"ror{q}\t$dst",
[(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
@@ -980,7 +1033,7 @@ def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
[(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
// Double shift instructions (generalizations of rotate)
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in {
def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
@@ -1010,7 +1063,7 @@ def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
(i8 imm:$src3)))]>,
TB;
} // isCommutable
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in {
def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
@@ -1040,42 +1093,44 @@ def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
// Logical Instructions...
//
-let isTwoAddress = 1 , AddedComplexity = 15 in
+let Constraints = "$src = $dst" , AddedComplexity = 15 in
def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
[(set GR64:$dst, (not GR64:$src))]>;
def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
[(store (not (loadi64 addr:$dst)), addr:$dst)]>;
let Defs = [EFLAGS] in {
-def AND64i32 : RI<0x25, RawFrm, (outs), (ins i32imm:$src),
- "and{q}\t{$src, %rax|%rax, $src}", []>;
+def AND64i32 : RIi32<0x25, RawFrm, (outs), (ins i64i32imm:$src),
+ "and{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
def AND64rr : RI<0x21, MRMDestReg,
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86and_flag GR64:$src1, GR64:$src2))]>;
+let isCodeGenOnly = 1 in {
def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}", []>;
+}
def AND64rm : RI<0x23, MRMSrcMem,
(outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86and_flag GR64:$src1, (load addr:$src2)))]>;
def AND64ri8 : RIi8<0x83, MRM4r,
(outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86and_flag GR64:$src1, i64immSExt8:$src2))]>;
def AND64ri32 : RIi32<0x81, MRM4r,
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"and{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (and GR64:$src1, i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
-} // isTwoAddress
+ [(set GR64:$dst, EFLAGS,
+ (X86and_flag GR64:$src1, i64immSExt32:$src2))]>;
+} // Constraints = "$src1 = $dst"
def AND64mr : RI<0x21, MRMDestMem,
(outs), (ins i64mem:$dst, GR64:$src),
@@ -1093,32 +1148,34 @@ def AND64mi32 : RIi32<0x81, MRM4m,
[(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
(implicit EFLAGS)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86or_flag GR64:$src1, GR64:$src2))]>;
+let isCodeGenOnly = 1 in {
def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}", []>;
+}
def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86or_flag GR64:$src1, (load addr:$src2)))]>;
def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i64i8imm:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86or_flag GR64:$src1, i64immSExt8:$src2))]>;
def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"or{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
-} // isTwoAddress
+ [(set GR64:$dst, EFLAGS,
+ (X86or_flag GR64:$src1, i64immSExt32:$src2))]>;
+} // Constraints = "$src1 = $dst"
def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"or{q}\t{$src, $dst|$dst, $src}",
@@ -1133,35 +1190,37 @@ def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
[(store (or (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
(implicit EFLAGS)]>;
-def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i32imm:$src),
+def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i64i32imm:$src),
"or{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, GR64:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86xor_flag GR64:$src1, GR64:$src2))]>;
+let isCodeGenOnly = 1 in {
def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}", []>;
+}
def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$src1, i64mem:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86xor_flag GR64:$src1, (load addr:$src2)))]>;
def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst),
(ins GR64:$src1, i64i8imm:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR64:$dst, EFLAGS,
+ (X86xor_flag GR64:$src1, i64immSExt8:$src2))]>;
def XOR64ri32 : RIi32<0x81, MRM6r,
(outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
"xor{q}\t{$src2, $dst|$dst, $src2}",
- [(set GR64:$dst, (xor GR64:$src1, i64immSExt32:$src2)),
- (implicit EFLAGS)]>;
-} // isTwoAddress
+ [(set GR64:$dst, EFLAGS,
+ (X86xor_flag GR64:$src1, i64immSExt32:$src2))]>;
+} // Constraints = "$src1 = $dst"
def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"xor{q}\t{$src, $dst|$dst, $src}",
@@ -1176,7 +1235,7 @@ def XOR64mi32 : RIi32<0x81, MRM6m, (outs), (ins i64mem:$dst, i64i32imm:$src),
[(store (xor (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
(implicit EFLAGS)]>;
-def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i32imm:$src),
+def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i64i32imm:$src),
"xor{q}\t{$src, %rax|%rax, $src}", []>;
} // Defs = [EFLAGS]
@@ -1187,62 +1246,62 @@ def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i32imm:$src),
// Integer comparison
let Defs = [EFLAGS] in {
-def TEST64i32 : RI<0xa9, RawFrm, (outs), (ins i32imm:$src),
- "test{q}\t{$src, %rax|%rax, $src}", []>;
+def TEST64i32 : RIi32<0xa9, RawFrm, (outs), (ins i64i32imm:$src),
+ "test{q}\t{$src, %rax|%rax, $src}", []>;
let isCommutable = 1 in
-def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
+def TEST64rr : RI<0x85, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
"test{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and GR64:$src1, GR64:$src2), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and GR64:$src1, GR64:$src2), 0))]>;
def TEST64rm : RI<0x85, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
"test{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and GR64:$src1, (loadi64 addr:$src2)), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and GR64:$src1, (loadi64 addr:$src2)),
+ 0))]>;
def TEST64ri32 : RIi32<0xF7, MRM0r, (outs),
(ins GR64:$src1, i64i32imm:$src2),
"test{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and GR64:$src1, i64immSExt32:$src2), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and GR64:$src1, i64immSExt32:$src2),
+ 0))]>;
def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
(ins i64mem:$src1, i64i32imm:$src2),
"test{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and (loadi64 addr:$src1), i64immSExt32:$src2), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and (loadi64 addr:$src1),
+ i64immSExt32:$src2), 0))]>;
-def CMP64i32 : RI<0x3D, RawFrm, (outs), (ins i32imm:$src),
- "cmp{q}\t{$src, %rax|%rax, $src}", []>;
+def CMP64i32 : RIi32<0x3D, RawFrm, (outs), (ins i64i32imm:$src),
+ "cmp{q}\t{$src, %rax|%rax, $src}", []>;
def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR64:$src1, GR64:$src2),
- (implicit EFLAGS)]>;
-def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
- "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
+ [(set EFLAGS, (X86cmp GR64:$src1, GR64:$src2))]>;
+
+// These are alternate spellings for use by the disassembler, we mark them as
+// code gen only to ensure they aren't matched by the assembler.
+let isCodeGenOnly = 1 in {
+ def CMP64mrmrr : RI<0x3B, MRMSrcReg, (outs), (ins GR64:$src1, GR64:$src2),
+ "cmp{q}\t{$src2, $src1|$src1, $src2}", []>;
+}
+
def CMP64mr : RI<0x39, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi64 addr:$src1), GR64:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (loadi64 addr:$src1), GR64:$src2))]>;
def CMP64rm : RI<0x3B, MRMSrcMem, (outs), (ins GR64:$src1, i64mem:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR64:$src1, (loadi64 addr:$src2)),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp GR64:$src1, (loadi64 addr:$src2)))]>;
def CMP64ri8 : RIi8<0x83, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt8:$src2))]>;
def CMP64ri32 : RIi32<0x81, MRM7r, (outs), (ins GR64:$src1, i64i32imm:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR64:$src1, i64immSExt32:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp GR64:$src1, i64immSExt32:$src2))]>;
def CMP64mi8 : RIi8<0x83, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi64 addr:$src1), i64immSExt8:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
+ i64immSExt8:$src2))]>;
def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
(ins i64mem:$src1, i64i32imm:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi64 addr:$src1), i64immSExt32:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (loadi64 addr:$src1),
+ i64immSExt32:$src2))]>;
} // Defs = [EFLAGS]
// Bit tests.
@@ -1250,8 +1309,7 @@ def CMP64mi32 : RIi32<0x81, MRM7m, (outs),
let Defs = [EFLAGS] in {
def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
- [(X86bt GR64:$src1, GR64:$src2),
- (implicit EFLAGS)]>, TB;
+ [(set EFLAGS, (X86bt GR64:$src1, GR64:$src2))]>, TB;
// Unlike with the register+register form, the memory+register form of the
// bt instruction does not ignore the high bits of the index. From ISel's
@@ -1263,17 +1321,16 @@ def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
[]
>, TB;
-def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
+def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
- [(X86bt GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)]>, TB;
+ [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB;
// Note that these instructions don't need FastBTMem because that
// only applies when the other operand is in a register. When it's
// an immediate, bt is still fast.
-def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
+def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
"bt{q}\t{$src2, $src1|$src1, $src2}",
- [(X86bt (loadi64 addr:$src1), i64immSExt8:$src2),
- (implicit EFLAGS)]>, TB;
+ [(set EFLAGS, (X86bt (loadi64 addr:$src1),
+ i64immSExt8:$src2))]>, TB;
def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
@@ -1304,7 +1361,7 @@ def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
} // Defs = [EFLAGS]
// Conditional moves
-let Uses = [EFLAGS], isTwoAddress = 1 in {
+let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
let isCommutable = 1 in {
def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
@@ -1468,7 +1525,7 @@ def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
"cmovno{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
X86_COND_NO, EFLAGS))]>, TB;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
// Use sbb to materialize carry flag into a GPR.
// FIXME: This are pseudo ops that should be replaced with Pat<> patterns.
@@ -1484,116 +1541,6 @@ def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
(SETB_C64r)>;
//===----------------------------------------------------------------------===//
-// Conversion Instructions...
-//
-
-// f64 -> signed i64
-def CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
- "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>;
-def CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
- "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>;
-def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
- "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst,
- (int_x86_sse2_cvtsd2si64 VR128:$src))]>;
-def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst),
- (ins f128mem:$src),
- "cvtsd2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (int_x86_sse2_cvtsd2si64
- (load addr:$src)))]>;
-def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
- "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (fp_to_sint FR64:$src))]>;
-def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src),
- "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
-def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
- "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst,
- (int_x86_sse2_cvttsd2si64 VR128:$src))]>;
-def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst),
- (ins f128mem:$src),
- "cvttsd2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst,
- (int_x86_sse2_cvttsd2si64
- (load addr:$src)))]>;
-
-// Signed i64 -> f64
-def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
- "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (sint_to_fp GR64:$src))]>;
-def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
- "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
-
-let isTwoAddress = 1 in {
-def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
- "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (int_x86_sse2_cvtsi642sd VR128:$src1,
- GR64:$src2))]>;
-def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
- "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (int_x86_sse2_cvtsi642sd VR128:$src1,
- (loadi64 addr:$src2)))]>;
-} // isTwoAddress
-
-// Signed i64 -> f32
-def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
- "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (sint_to_fp GR64:$src))]>;
-def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
- "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
-
-let isTwoAddress = 1 in {
- def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
- "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (int_x86_sse_cvtsi642ss VR128:$src1,
- GR64:$src2))]>;
- def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem,
- (outs VR128:$dst),
- (ins VR128:$src1, i64mem:$src2),
- "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (int_x86_sse_cvtsi642ss VR128:$src1,
- (loadi64 addr:$src2)))]>;
-}
-
-// f32 -> signed i64
-def CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
- "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>;
-def CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
- "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>;
-def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
- "cvtss2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst,
- (int_x86_sse_cvtss2si64 VR128:$src))]>;
-def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
- "cvtss2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (int_x86_sse_cvtss2si64
- (load addr:$src)))]>;
-def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
- "cvttss2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (fp_to_sint FR32:$src))]>;
-def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src),
- "cvttss2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
-def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
- "cvttss2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst,
- (int_x86_sse_cvttss2si64 VR128:$src))]>;
-def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst),
- (ins f32mem:$src),
- "cvttss2si{q}\t{$src, $dst|$dst, $src}",
- [(set GR64:$dst,
- (int_x86_sse_cvttss2si64 (load addr:$src)))]>;
-
// Descriptor-table support instructions
// LLDT is not interpreted specially in 64-bit mode because there is no sign
@@ -1629,6 +1576,7 @@ def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
// Thread Local Storage Instructions
//===----------------------------------------------------------------------===//
+// ELF TLS Support
// All calls clobber the non-callee saved registers. RSP is marked as
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead.
@@ -1638,7 +1586,7 @@ let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
Uses = [RSP] in
-def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym),
+def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
".byte\t0x66; "
"leaq\t$sym(%rip), %rdi; "
".word\t0x6666; "
@@ -1647,6 +1595,17 @@ def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym),
[(X86tlsaddr tls64addr:$sym)]>,
Requires<[In64BitMode]>;
+// Darwin TLS Support
+// For x86_64, the address of the thunk is passed in %rdi, on return
+// the address of the variable is in %rax. All other registers are preserved.
+let Defs = [RAX],
+ Uses = [RDI],
+ usesCustomInserter = 1 in
+def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
+ "# TLSCall_64",
+ [(X86TLSCall addr:$sym)]>,
+ Requires<[In64BitMode]>;
+
let AddedComplexity = 5, isCodeGenOnly = 1 in
def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"movq\t%gs:$src, $dst",
@@ -1661,6 +1620,14 @@ def MOV64FSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
// Atomic Instructions
//===----------------------------------------------------------------------===//
+// TODO: Get this to fold the constant into the instruction.
+let hasSideEffects = 1, Defs = [ESP] in
+def Int_MemBarrierNoSSE64 : RI<0x09, MRM1r, (outs), (ins GR64:$zero),
+ "lock\n\t"
+ "or{q}\t{$zero, (%rsp)|(%rsp), $zero}",
+ [(X86MemBarrierNoSSE GR64:$zero)]>,
+ Requires<[In64BitMode]>, LOCK;
+
let Defs = [RAX, EFLAGS], Uses = [RAX] in {
def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap),
"lock\n\t"
@@ -1687,11 +1654,13 @@ def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src),
def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
+let mayLoad = 1, mayStore = 1 in
def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"xadd{q}\t{$src, $dst|$dst, $src}", []>, TB;
def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
"cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
+let mayLoad = 1, mayStore = 1 in
def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB;
@@ -1703,9 +1672,9 @@ def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src),
"xchg{q}\t{$src, %rax|%rax, $src}", []>;
// Optimized codegen when the non-memory output is not used.
-let Defs = [EFLAGS] in {
+let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in {
// FIXME: Use normal add / sub instructions and add lock prefix dynamically.
-def LOCK_ADD64mr : RI<0x03, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
+def LOCK_ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"lock\n\t"
"add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs),
@@ -1884,19 +1853,37 @@ def : Pat<(X86call (i64 texternalsym:$dst)),
(WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
// tailcall stuff
-def : Pat<(X86tcret GR64:$dst, imm:$off),
- (TCRETURNri64 GR64:$dst, imm:$off)>;
+def : Pat<(X86tcret GR64_TC:$dst, imm:$off),
+ (TCRETURNri64 GR64_TC:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
+
+def : Pat<(X86tcret (load addr:$dst), imm:$off),
+ (TCRETURNmi64 addr:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
- (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>;
+ (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
- (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
+ (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
+
+// tls has some funny stuff here...
+// This corresponds to movabs $foo at tpoff, %rax
+def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
+ (MOV64ri tglobaltlsaddr :$dst)>;
+// This corresponds to add $foo at tpoff, %rax
+def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
+ (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
+// This corresponds to mov foo at tpoff(%rbx), %eax
+def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
+ (MOV64rm tglobaltlsaddr :$dst)>;
// Comparisons.
// TEST R,R is smaller than CMP R,0
-def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
+def : Pat<(X86cmp GR64:$src1, 0),
(TEST64rr GR64:$src1, GR64:$src1)>;
// Conditional moves with folded loads with operands swapped and conditions
@@ -1948,14 +1935,14 @@ def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
// defined after an extload.
def : Pat<(extloadi64i32 addr:$src),
(SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
- x86_subreg_32bit)>;
+ sub_32bit)>;
// anyext. Define these to do an explicit zero-extend to
// avoid partial-register updates.
def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
def : Pat<(i64 (anyext GR32:$src)),
- (SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
+ (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
//===----------------------------------------------------------------------===//
// Some peepholes
@@ -1982,54 +1969,54 @@ def : Pat<(and GR64:$src, i64immZExt32:$imm),
(SUBREG_TO_REG
(i64 0),
(AND32ri
- (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit),
+ (EXTRACT_SUBREG GR64:$src, sub_32bit),
(i32 (GetLo32XForm imm:$imm))),
- x86_subreg_32bit)>;
+ sub_32bit)>;
// r & (2^32-1) ==> movz
def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
- (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
+ (MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
// r & (2^16-1) ==> movz
def : Pat<(and GR64:$src, 0xffff),
- (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
+ (MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
// r & (2^8-1) ==> movz
def : Pat<(and GR64:$src, 0xff),
- (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
+ (MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
// r & (2^8-1) ==> movz
def : Pat<(and GR32:$src1, 0xff),
- (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit))>,
+ (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
Requires<[In64BitMode]>;
// r & (2^8-1) ==> movz
def : Pat<(and GR16:$src1, 0xff),
- (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, x86_subreg_8bit)))>,
+ (MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
Requires<[In64BitMode]>;
// sext_inreg patterns
def : Pat<(sext_inreg GR64:$src, i32),
- (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
+ (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
def : Pat<(sext_inreg GR64:$src, i16),
- (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
+ (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
def : Pat<(sext_inreg GR64:$src, i8),
- (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>;
+ (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
def : Pat<(sext_inreg GR32:$src, i8),
- (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>,
+ (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
Requires<[In64BitMode]>;
def : Pat<(sext_inreg GR16:$src, i8),
- (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)))>,
+ (MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
Requires<[In64BitMode]>;
// trunc patterns
def : Pat<(i32 (trunc GR64:$src)),
- (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)>;
+ (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
def : Pat<(i16 (trunc GR64:$src)),
- (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)>;
+ (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
def : Pat<(i8 (trunc GR64:$src)),
- (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)>;
+ (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
def : Pat<(i8 (trunc GR32:$src)),
- (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)>,
+ (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
Requires<[In64BitMode]>;
def : Pat<(i8 (trunc GR16:$src)),
- (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)>,
+ (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
Requires<[In64BitMode]>;
// h-register tricks.
@@ -2045,62 +2032,67 @@ def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
(i64 0),
(MOVZX32_NOREXrr8
(EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
- x86_subreg_8bit_hi)),
- x86_subreg_32bit)>;
+ sub_8bit_hi)),
+ sub_32bit)>;
def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
(MOVZX32_NOREXrr8
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
- x86_subreg_8bit_hi))>,
+ sub_8bit_hi))>,
+ Requires<[In64BitMode]>;
+def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
+ (MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
Requires<[In64BitMode]>;
def : Pat<(srl GR16:$src, (i8 8)),
(EXTRACT_SUBREG
(MOVZX32_NOREXrr8
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- x86_subreg_8bit_hi)),
- x86_subreg_16bit)>,
+ sub_8bit_hi)),
+ sub_16bit)>,
Requires<[In64BitMode]>;
def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
(MOVZX32_NOREXrr8
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- x86_subreg_8bit_hi))>,
+ sub_8bit_hi))>,
Requires<[In64BitMode]>;
def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
(MOVZX32_NOREXrr8
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- x86_subreg_8bit_hi))>,
+ sub_8bit_hi))>,
Requires<[In64BitMode]>;
def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
(SUBREG_TO_REG
(i64 0),
(MOVZX32_NOREXrr8
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- x86_subreg_8bit_hi)),
- x86_subreg_32bit)>;
+ sub_8bit_hi)),
+ sub_32bit)>;
def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
(SUBREG_TO_REG
(i64 0),
(MOVZX32_NOREXrr8
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- x86_subreg_8bit_hi)),
- x86_subreg_32bit)>;
+ sub_8bit_hi)),
+ sub_32bit)>;
// h-register extract and store.
def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
(MOV8mr_NOREX
addr:$dst,
(EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
- x86_subreg_8bit_hi))>;
+ sub_8bit_hi))>;
def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
(MOV8mr_NOREX
addr:$dst,
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
- x86_subreg_8bit_hi))>,
+ sub_8bit_hi))>,
Requires<[In64BitMode]>;
def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
(MOV8mr_NOREX
addr:$dst,
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- x86_subreg_8bit_hi))>,
+ sub_8bit_hi))>,
Requires<[In64BitMode]>;
// (shl x, 1) ==> (add x, x)
@@ -2122,31 +2114,13 @@ def : Pat<(sra GR64:$src1, (and CL, 63)),
def : Pat<(store (sra (loadi64 addr:$dst), (and CL, 63)), addr:$dst),
(SAR64mCL addr:$dst)>;
-// Double shift patterns
-def : Pat<(shrd GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm)),
- (SHRD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
-
-def : Pat<(store (shrd (loadi64 addr:$dst), (i8 imm:$amt1),
- GR64:$src2, (i8 imm)), addr:$dst),
- (SHRD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
-
-def : Pat<(shld GR64:$src1, (i8 imm:$amt1), GR64:$src2, (i8 imm)),
- (SHLD64rri8 GR64:$src1, GR64:$src2, (i8 imm:$amt1))>;
-
-def : Pat<(store (shld (loadi64 addr:$dst), (i8 imm:$amt1),
- GR64:$src2, (i8 imm)), addr:$dst),
- (SHLD64mri8 addr:$dst, GR64:$src2, (i8 imm:$amt1))>;
-
// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
let AddedComplexity = 5 in { // Try this before the selecting to OR
-def : Pat<(parallel (or_is_add GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(or_is_add GR64:$src1, i64immSExt8:$src2),
(ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (or_is_add GR64:$src1, i64immSExt32:$src2),
- (implicit EFLAGS)),
+def : Pat<(or_is_add GR64:$src1, i64immSExt32:$src2),
(ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
-def : Pat<(parallel (or_is_add GR64:$src1, GR64:$src2),
- (implicit EFLAGS)),
+def : Pat<(or_is_add GR64:$src1, GR64:$src2),
(ADD64rr GR64:$src1, GR64:$src2)>;
} // AddedComplexity
@@ -2173,233 +2147,78 @@ def : Pat<(subc GR64:$src1, imm:$src2),
// EFLAGS-defining Patterns
//===----------------------------------------------------------------------===//
-// Register-Register Addition with EFLAGS result
-def : Pat<(parallel (X86add_flag GR64:$src1, GR64:$src2),
- (implicit EFLAGS)),
+// addition
+def : Pat<(add GR64:$src1, GR64:$src2),
(ADD64rr GR64:$src1, GR64:$src2)>;
-
-// Register-Integer Addition with EFLAGS result
-def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(add GR64:$src1, i64immSExt8:$src2),
(ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86add_flag GR64:$src1, i64immSExt32:$src2),
- (implicit EFLAGS)),
+def : Pat<(add GR64:$src1, i64immSExt32:$src2),
(ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
-
-// Register-Memory Addition with EFLAGS result
-def : Pat<(parallel (X86add_flag GR64:$src1, (loadi64 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
(ADD64rm GR64:$src1, addr:$src2)>;
-// Memory-Register Addition with EFLAGS result
-def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), GR64:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD64mr addr:$dst, GR64:$src2)>;
-def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD64mi8 addr:$dst, i64immSExt8:$src2)>;
-def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst),
- i64immSExt32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD64mi32 addr:$dst, i64immSExt32:$src2)>;
-
-// Register-Register Subtraction with EFLAGS result
-def : Pat<(parallel (X86sub_flag GR64:$src1, GR64:$src2),
- (implicit EFLAGS)),
+// subtraction
+def : Pat<(sub GR64:$src1, GR64:$src2),
(SUB64rr GR64:$src1, GR64:$src2)>;
-
-// Register-Memory Subtraction with EFLAGS result
-def : Pat<(parallel (X86sub_flag GR64:$src1, (loadi64 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
(SUB64rm GR64:$src1, addr:$src2)>;
-
-// Register-Integer Subtraction with EFLAGS result
-def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(sub GR64:$src1, i64immSExt8:$src2),
(SUB64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86sub_flag GR64:$src1, i64immSExt32:$src2),
- (implicit EFLAGS)),
+def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
(SUB64ri32 GR64:$src1, i64immSExt32:$src2)>;
-// Memory-Register Subtraction with EFLAGS result
-def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), GR64:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB64mr addr:$dst, GR64:$src2)>;
-
-// Memory-Integer Subtraction with EFLAGS result
-def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst),
- i64immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB64mi8 addr:$dst, i64immSExt8:$src2)>;
-def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst),
- i64immSExt32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB64mi32 addr:$dst, i64immSExt32:$src2)>;
-
-// Register-Register Signed Integer Multiplication with EFLAGS result
-def : Pat<(parallel (X86smul_flag GR64:$src1, GR64:$src2),
- (implicit EFLAGS)),
+// Multiply
+def : Pat<(mul GR64:$src1, GR64:$src2),
(IMUL64rr GR64:$src1, GR64:$src2)>;
-
-// Register-Memory Signed Integer Multiplication with EFLAGS result
-def : Pat<(parallel (X86smul_flag GR64:$src1, (loadi64 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
(IMUL64rm GR64:$src1, addr:$src2)>;
-
-// Register-Integer Signed Integer Multiplication with EFLAGS result
-def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul GR64:$src1, i64immSExt8:$src2),
(IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86smul_flag GR64:$src1, i64immSExt32:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
(IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
-
-// Memory-Integer Signed Integer Multiplication with EFLAGS result
-def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2),
(IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86smul_flag (loadi64 addr:$src1), i64immSExt32:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
(IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
-// INC and DEC with EFLAGS result. Note that these do not set CF.
-def : Pat<(parallel (X86inc_flag GR16:$src), (implicit EFLAGS)),
- (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
-def : Pat<(parallel (store (i16 (X86inc_flag (loadi16 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (INC64_16m addr:$dst)>, Requires<[In64BitMode]>;
-def : Pat<(parallel (X86dec_flag GR16:$src), (implicit EFLAGS)),
- (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
-def : Pat<(parallel (store (i16 (X86dec_flag (loadi16 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (DEC64_16m addr:$dst)>, Requires<[In64BitMode]>;
-
-def : Pat<(parallel (X86inc_flag GR32:$src), (implicit EFLAGS)),
- (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
-def : Pat<(parallel (store (i32 (X86inc_flag (loadi32 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (INC64_32m addr:$dst)>, Requires<[In64BitMode]>;
-def : Pat<(parallel (X86dec_flag GR32:$src), (implicit EFLAGS)),
- (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
-def : Pat<(parallel (store (i32 (X86dec_flag (loadi32 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (DEC64_32m addr:$dst)>, Requires<[In64BitMode]>;
-
-def : Pat<(parallel (X86inc_flag GR64:$src), (implicit EFLAGS)),
- (INC64r GR64:$src)>;
-def : Pat<(parallel (store (i64 (X86inc_flag (loadi64 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (INC64m addr:$dst)>;
-def : Pat<(parallel (X86dec_flag GR64:$src), (implicit EFLAGS)),
- (DEC64r GR64:$src)>;
-def : Pat<(parallel (store (i64 (X86dec_flag (loadi64 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (DEC64m addr:$dst)>;
-
-// Register-Register Logical Or with EFLAGS result
-def : Pat<(parallel (X86or_flag GR64:$src1, GR64:$src2),
- (implicit EFLAGS)),
- (OR64rr GR64:$src1, GR64:$src2)>;
+// inc/dec
+def : Pat<(add GR16:$src, 1), (INC64_16r GR16:$src)>, Requires<[In64BitMode]>;
+def : Pat<(add GR16:$src, -1), (DEC64_16r GR16:$src)>, Requires<[In64BitMode]>;
+def : Pat<(add GR32:$src, 1), (INC64_32r GR32:$src)>, Requires<[In64BitMode]>;
+def : Pat<(add GR32:$src, -1), (DEC64_32r GR32:$src)>, Requires<[In64BitMode]>;
+def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>;
+def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>;
-// Register-Integer Logical Or with EFLAGS result
-def : Pat<(parallel (X86or_flag GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)),
+// or
+def : Pat<(or GR64:$src1, GR64:$src2),
+ (OR64rr GR64:$src1, GR64:$src2)>;
+def : Pat<(or GR64:$src1, i64immSExt8:$src2),
(OR64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86or_flag GR64:$src1, i64immSExt32:$src2),
- (implicit EFLAGS)),
+def : Pat<(or GR64:$src1, i64immSExt32:$src2),
(OR64ri32 GR64:$src1, i64immSExt32:$src2)>;
-
-// Register-Memory Logical Or with EFLAGS result
-def : Pat<(parallel (X86or_flag GR64:$src1, (loadi64 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
(OR64rm GR64:$src1, addr:$src2)>;
-// Memory-Register Logical Or with EFLAGS result
-def : Pat<(parallel (store (X86or_flag (loadi64 addr:$dst), GR64:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR64mr addr:$dst, GR64:$src2)>;
-def : Pat<(parallel (store (X86or_flag (loadi64 addr:$dst), i64immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR64mi8 addr:$dst, i64immSExt8:$src2)>;
-def : Pat<(parallel (store (X86or_flag (loadi64 addr:$dst), i64immSExt32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR64mi32 addr:$dst, i64immSExt32:$src2)>;
-
-// Register-Register Logical XOr with EFLAGS result
-def : Pat<(parallel (X86xor_flag GR64:$src1, GR64:$src2),
- (implicit EFLAGS)),
+// xor
+def : Pat<(xor GR64:$src1, GR64:$src2),
(XOR64rr GR64:$src1, GR64:$src2)>;
-
-// Register-Integer Logical XOr with EFLAGS result
-def : Pat<(parallel (X86xor_flag GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(xor GR64:$src1, i64immSExt8:$src2),
(XOR64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86xor_flag GR64:$src1, i64immSExt32:$src2),
- (implicit EFLAGS)),
+def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
(XOR64ri32 GR64:$src1, i64immSExt32:$src2)>;
-
-// Register-Memory Logical XOr with EFLAGS result
-def : Pat<(parallel (X86xor_flag GR64:$src1, (loadi64 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
(XOR64rm GR64:$src1, addr:$src2)>;
-// Memory-Register Logical XOr with EFLAGS result
-def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), GR64:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR64mr addr:$dst, GR64:$src2)>;
-def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), i64immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR64mi8 addr:$dst, i64immSExt8:$src2)>;
-def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst),
- i64immSExt32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR64mi32 addr:$dst, i64immSExt32:$src2)>;
-
-// Register-Register Logical And with EFLAGS result
-def : Pat<(parallel (X86and_flag GR64:$src1, GR64:$src2),
- (implicit EFLAGS)),
+// and
+def : Pat<(and GR64:$src1, GR64:$src2),
(AND64rr GR64:$src1, GR64:$src2)>;
-
-// Register-Integer Logical And with EFLAGS result
-def : Pat<(parallel (X86and_flag GR64:$src1, i64immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(and GR64:$src1, i64immSExt8:$src2),
(AND64ri8 GR64:$src1, i64immSExt8:$src2)>;
-def : Pat<(parallel (X86and_flag GR64:$src1, i64immSExt32:$src2),
- (implicit EFLAGS)),
+def : Pat<(and GR64:$src1, i64immSExt32:$src2),
(AND64ri32 GR64:$src1, i64immSExt32:$src2)>;
-
-// Register-Memory Logical And with EFLAGS result
-def : Pat<(parallel (X86and_flag GR64:$src1, (loadi64 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
(AND64rm GR64:$src1, addr:$src2)>;
-// Memory-Register Logical And with EFLAGS result
-def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), GR64:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND64mr addr:$dst, GR64:$src2)>;
-def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), i64immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND64mi8 addr:$dst, i64immSExt8:$src2)>;
-def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst),
- i64immSExt32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND64mi32 addr:$dst, i64immSExt32:$src2)>;
-
//===----------------------------------------------------------------------===//
// X86-64 SSE Instructions
//===----------------------------------------------------------------------===//
@@ -2418,7 +2237,7 @@ def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
"mov{d|q}\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert GR64:$src))]>;
-def MOV64toSDrm : RPDI<0x6E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
+def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
@@ -2429,57 +2248,3 @@ def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
-//===----------------------------------------------------------------------===//
-// X86-64 SSE4.1 Instructions
-//===----------------------------------------------------------------------===//
-
-/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
-multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set GR64:$dst,
- (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
- def mr : SS4AIi8<opc, MRMDestMem, (outs),
- (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
- addr:$dst)]>, OpSize, REX_W;
-}
-
-defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
-
-let isTwoAddress = 1 in {
- multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
- OpSize, REX_W;
- def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
- imm:$src3)))]>, OpSize, REX_W;
- }
-}
-
-defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;
-
-// -disable-16bit support.
-def : Pat<(truncstorei16 (i64 imm:$src), addr:$dst),
- (MOV16mi addr:$dst, imm:$src)>;
-def : Pat<(truncstorei16 GR64:$src, addr:$dst),
- (MOV16mr addr:$dst, (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
-def : Pat<(i64 (sextloadi16 addr:$dst)),
- (MOVSX64rm16 addr:$dst)>;
-def : Pat<(i64 (zextloadi16 addr:$dst)),
- (MOVZX64rm16 addr:$dst)>;
-def : Pat<(i64 (extloadi16 addr:$dst)),
- (MOVZX64rm16 addr:$dst)>;
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrBuilder.h b/libclamav/c++/llvm/lib/Target/X86/X86InstrBuilder.h
index c475b56..2a6a71d 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86InstrBuilder.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrBuilder.h
@@ -49,7 +49,7 @@ struct X86AddressMode {
unsigned Scale;
unsigned IndexReg;
int Disp;
- GlobalValue *GV;
+ const GlobalValue *GV;
unsigned GVOpFlags;
X86AddressMode()
@@ -64,19 +64,15 @@ struct X86AddressMode {
///
static inline const MachineInstrBuilder &
addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg) {
- // Because memory references are always represented with four
- // values, this adds: Reg, [1, NoReg, 0] to the instruction.
- return MIB.addReg(Reg).addImm(1).addReg(0).addImm(0);
+ // Because memory references are always represented with five
+ // values, this adds: Reg, 1, NoReg, 0, NoReg to the instruction.
+ return MIB.addReg(Reg).addImm(1).addReg(0).addImm(0).addReg(0);
}
-static inline const MachineInstrBuilder &
-addLeaOffset(const MachineInstrBuilder &MIB, int Offset) {
- return MIB.addImm(1).addReg(0).addImm(Offset);
-}
static inline const MachineInstrBuilder &
addOffset(const MachineInstrBuilder &MIB, int Offset) {
- return addLeaOffset(MIB, Offset).addReg(0);
+ return MIB.addImm(1).addReg(0).addImm(Offset).addReg(0);
}
/// addRegOffset - This function is used to add a memory reference of the form
@@ -89,25 +85,20 @@ addRegOffset(const MachineInstrBuilder &MIB,
return addOffset(MIB.addReg(Reg, getKillRegState(isKill)), Offset);
}
-static inline const MachineInstrBuilder &
-addLeaRegOffset(const MachineInstrBuilder &MIB,
- unsigned Reg, bool isKill, int Offset) {
- return addLeaOffset(MIB.addReg(Reg, getKillRegState(isKill)), Offset);
-}
-
/// addRegReg - This function is used to add a memory reference of the form:
/// [Reg + Reg].
static inline const MachineInstrBuilder &addRegReg(const MachineInstrBuilder &MIB,
unsigned Reg1, bool isKill1,
unsigned Reg2, bool isKill2) {
return MIB.addReg(Reg1, getKillRegState(isKill1)).addImm(1)
- .addReg(Reg2, getKillRegState(isKill2)).addImm(0);
+ .addReg(Reg2, getKillRegState(isKill2)).addImm(0).addReg(0);
}
static inline const MachineInstrBuilder &
-addLeaAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM) {
- assert (AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
-
+addFullAddress(const MachineInstrBuilder &MIB,
+ const X86AddressMode &AM) {
+ assert(AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
+
if (AM.BaseType == X86AddressMode::RegBase)
MIB.addReg(AM.Base.Reg);
else if (AM.BaseType == X86AddressMode::FrameIndexBase)
@@ -116,15 +107,11 @@ addLeaAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM) {
assert (0);
MIB.addImm(AM.Scale).addReg(AM.IndexReg);
if (AM.GV)
- return MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
+ MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
else
- return MIB.addImm(AM.Disp);
-}
-
-static inline const MachineInstrBuilder &
-addFullAddress(const MachineInstrBuilder &MIB,
- const X86AddressMode &AM) {
- return addLeaAddress(MIB, AM).addReg(0);
+ MIB.addImm(AM.Disp);
+
+ return MIB.addReg(0);
}
/// addFrameReference - This function is used to add a reference to the base of
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrFMA.td b/libclamav/c++/llvm/lib/Target/X86/X86InstrFMA.td
new file mode 100644
index 0000000..d868773
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrFMA.td
@@ -0,0 +1,60 @@
+//====- X86InstrFMA.td - Describe the X86 Instruction Set --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes FMA (Fused Multiply-Add) instructions.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// FMA3 - Intel 3 operand Fused Multiply-Add instructions
+//===----------------------------------------------------------------------===//
+
+multiclass fma_rm<bits<8> opc, string OpcodeStr> {
+ def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+ def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+ def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+ def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f256mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+}
+
+multiclass fma_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
+ string OpcodeStr, string PackTy> {
+ defm r132 : fma_rm<opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy))>;
+ defm r213 : fma_rm<opc213, !strconcat(OpcodeStr, !strconcat("213", PackTy))>;
+ defm r231 : fma_rm<opc231, !strconcat(OpcodeStr, !strconcat("231", PackTy))>;
+}
+
+let isAsmParserOnly = 1 in {
+ // Fused Multiply-Add
+ defm VFMADDPS : fma_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps">;
+ defm VFMADDPD : fma_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd">, VEX_W;
+ defm VFMADDSUBPS : fma_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps">;
+ defm VFMADDSUBPD : fma_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd">, VEX_W;
+ defm VFMSUBADDPS : fma_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps">;
+ defm VFMSUBADDPD : fma_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd">, VEX_W;
+ defm VFMSUBPS : fma_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps">;
+ defm VFMSUBPD : fma_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd">, VEX_W;
+
+ // Fused Negative Multiply-Add
+ defm VFNMADDPS : fma_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps">;
+ defm VFNMADDPD : fma_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd">, VEX_W;
+ defm VFNMSUBPS : fma_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps">;
+ defm VFNMSUBPD : fma_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd">, VEX_W;
+}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrFPStack.td b/libclamav/c++/llvm/lib/Target/X86/X86InstrFPStack.td
index ae24bfb..9c9bcc7 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86InstrFPStack.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrFPStack.td
@@ -108,10 +108,6 @@ let usesCustomInserter = 1 in { // Expanded after instruction selection.
[(X86fp_to_i64mem RFP80:$src, addr:$dst)]>;
}
-let isTerminator = 1 in
- let Defs = [FP0, FP1, FP2, FP3, FP4, FP5, FP6] in
- def FP_REG_KILL : I<0, Pseudo, (outs), (ins), "##FP_REG_KILL", []>;
-
// All FP Stack operations are represented with four instructions here. The
// first three instructions, generated by the instruction selector, use "RFP32"
// "RFP64" or "RFP80" registers: traditional register files to reference 32-bit,
@@ -157,7 +153,7 @@ def FpSET_ST1_64 : FpI_<(outs), (ins RFP64:$src), SpecialFP, []>; // ST(1) = FPR
def FpSET_ST1_80 : FpI_<(outs), (ins RFP80:$src), SpecialFP, []>; // ST(1) = FPR
}
-// FpIf32, FpIf64 - Floating Point Psuedo Instruction template.
+// FpIf32, FpIf64 - Floating Point Pseudo Instruction template.
// f32 instructions can use SSE1 and are predicated on FPStackf32 == !SSE1.
// f64 instructions can use SSE2 and are predicated on FPStackf64 == !SSE2.
// f80 instructions cannot use SSE and use neither of these.
@@ -327,8 +323,8 @@ def TST_F : FPI<0xE4, RawFrm, (outs), (ins), "ftst">, D9;
// Versions of FP instructions that take a single memory operand. Added for the
// disassembler; remove as they are included with patterns elsewhere.
-def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom{l}\t$src">;
-def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp{l}\t$src">;
+def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom{s}\t$src">;
+def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp{s}\t$src">;
def FLDENVm : FPI<0xD9, MRM4m, (outs), (ins f32mem:$src), "fldenv\t$src">;
def FSTENVm : FPI<0xD9, MRM6m, (outs f32mem:$dst), (ins), "fnstenv\t$dst">;
@@ -336,35 +332,42 @@ def FSTENVm : FPI<0xD9, MRM6m, (outs f32mem:$dst), (ins), "fnstenv\t$dst">;
def FICOM32m : FPI<0xDA, MRM2m, (outs), (ins i32mem:$src), "ficom{l}\t$src">;
def FICOMP32m: FPI<0xDA, MRM3m, (outs), (ins i32mem:$src), "ficomp{l}\t$src">;
-def FCOM64m : FPI<0xDC, MRM2m, (outs), (ins f64mem:$src), "fcom{ll}\t$src">;
-def FCOMP64m : FPI<0xDC, MRM3m, (outs), (ins f64mem:$src), "fcomp{ll}\t$src">;
+def FCOM64m : FPI<0xDC, MRM2m, (outs), (ins f64mem:$src), "fcom{l}\t$src">;
+def FCOMP64m : FPI<0xDC, MRM3m, (outs), (ins f64mem:$src), "fcomp{l}\t$src">;
def FRSTORm : FPI<0xDD, MRM4m, (outs f32mem:$dst), (ins), "frstor\t$dst">;
def FSAVEm : FPI<0xDD, MRM6m, (outs f32mem:$dst), (ins), "fnsave\t$dst">;
def FNSTSWm : FPI<0xDD, MRM7m, (outs f32mem:$dst), (ins), "fnstsw\t$dst">;
-def FICOM16m : FPI<0xDE, MRM2m, (outs), (ins i16mem:$src), "ficom{w}\t$src">;
-def FICOMP16m: FPI<0xDE, MRM3m, (outs), (ins i16mem:$src), "ficomp{w}\t$src">;
+def FICOM16m : FPI<0xDE, MRM2m, (outs), (ins i16mem:$src), "ficom{s}\t$src">;
+def FICOMP16m: FPI<0xDE, MRM3m, (outs), (ins i16mem:$src), "ficomp{s}\t$src">;
def FBLDm : FPI<0xDF, MRM4m, (outs), (ins f32mem:$src), "fbld\t$src">;
def FBSTPm : FPI<0xDF, MRM6m, (outs f32mem:$dst), (ins), "fbstp\t$dst">;
// Floating point cmovs.
+class FpIf32CMov<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
+ FpI_<outs, ins, fp, pattern>, Requires<[FPStackf32, HasCMov]>;
+class FpIf64CMov<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
+ FpI_<outs, ins, fp, pattern>, Requires<[FPStackf64, HasCMov]>;
+
multiclass FPCMov<PatLeaf cc> {
- def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2),
+ def _Fp32 : FpIf32CMov<(outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2),
CondMovFP,
[(set RFP32:$dst, (X86cmov RFP32:$src1, RFP32:$src2,
cc, EFLAGS))]>;
- def _Fp64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2),
+ def _Fp64 : FpIf64CMov<(outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2),
CondMovFP,
[(set RFP64:$dst, (X86cmov RFP64:$src1, RFP64:$src2,
cc, EFLAGS))]>;
def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2),
CondMovFP,
[(set RFP80:$dst, (X86cmov RFP80:$src1, RFP80:$src2,
- cc, EFLAGS))]>;
+ cc, EFLAGS))]>,
+ Requires<[HasCMov]>;
}
-let Uses = [EFLAGS], isTwoAddress = 1 in {
+
+let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
defm CMOVB : FPCMov<X86_COND_B>;
defm CMOVBE : FPCMov<X86_COND_BE>;
defm CMOVE : FPCMov<X86_COND_E>;
@@ -373,8 +376,9 @@ defm CMOVNB : FPCMov<X86_COND_AE>;
defm CMOVNBE: FPCMov<X86_COND_A>;
defm CMOVNE : FPCMov<X86_COND_NE>;
defm CMOVNP : FPCMov<X86_COND_NP>;
-}
+} // Uses = [EFLAGS], Constraints = "$src1 = $dst"
+let Predicates = [HasCMov] in {
// These are not factored because there's no clean way to pass DA/DB.
def CMOVB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins),
"fcmovb\t{$op, %st(0)|%ST(0), $op}">, DA;
@@ -392,6 +396,7 @@ def CMOVNE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins),
"fcmovne\t{$op, %st(0)|%ST(0), $op}">, DB;
def CMOVNP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins),
"fcmovnu\t{$op, %st(0)|%ST(0), $op}">, DB;
+} // Predicates = [HasCMov]
// Floating point loads & stores.
let canFoldAsLoad = 1 in {
@@ -553,15 +558,13 @@ def UCOM_Fpr64 : FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP,
def UCOM_Fpr80 : FpI_ <(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP,
[]>; // FPSW = cmp ST(0) with ST(i)
+// CC = ST(0) cmp ST(i)
def UCOM_FpIr32: FpIf32<(outs), (ins RFP32:$lhs, RFP32:$rhs), CompareFP,
- [(X86cmp RFP32:$lhs, RFP32:$rhs),
- (implicit EFLAGS)]>; // CC = ST(0) cmp ST(i)
+ [(set EFLAGS, (X86cmp RFP32:$lhs, RFP32:$rhs))]>;
def UCOM_FpIr64: FpIf64<(outs), (ins RFP64:$lhs, RFP64:$rhs), CompareFP,
- [(X86cmp RFP64:$lhs, RFP64:$rhs),
- (implicit EFLAGS)]>; // CC = ST(0) cmp ST(i)
+ [(set EFLAGS, (X86cmp RFP64:$lhs, RFP64:$rhs))]>;
def UCOM_FpIr80: FpI_<(outs), (ins RFP80:$lhs, RFP80:$rhs), CompareFP,
- [(X86cmp RFP80:$lhs, RFP80:$rhs),
- (implicit EFLAGS)]>; // CC = ST(0) cmp ST(i)
+ [(set EFLAGS, (X86cmp RFP80:$lhs, RFP80:$rhs))]>;
}
let Defs = [EFLAGS], Uses = [ST0] in {
@@ -673,19 +676,19 @@ def : Pat<(X86fildflag addr:$src, i64), (ILD_Fp64m64 addr:$src)>;
// FP extensions map onto simple pseudo-value conversions if they are to/from
// the FP stack.
-def : Pat<(f64 (fextend RFP32:$src)), (MOV_Fp3264 RFP32:$src)>,
+def : Pat<(f64 (fextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP64)>,
Requires<[FPStackf32]>;
-def : Pat<(f80 (fextend RFP32:$src)), (MOV_Fp3280 RFP32:$src)>,
+def : Pat<(f80 (fextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP80)>,
Requires<[FPStackf32]>;
-def : Pat<(f80 (fextend RFP64:$src)), (MOV_Fp6480 RFP64:$src)>,
+def : Pat<(f80 (fextend RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP80)>,
Requires<[FPStackf64]>;
// FP truncations map onto simple pseudo-value conversions if they are to/from
// the FP stack. We have validated that only value-preserving truncations make
// it through isel.
-def : Pat<(f32 (fround RFP64:$src)), (MOV_Fp6432 RFP64:$src)>,
+def : Pat<(f32 (fround RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP32)>,
Requires<[FPStackf32]>;
-def : Pat<(f32 (fround RFP80:$src)), (MOV_Fp8032 RFP80:$src)>,
+def : Pat<(f32 (fround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP32)>,
Requires<[FPStackf32]>;
-def : Pat<(f64 (fround RFP80:$src)), (MOV_Fp8064 RFP80:$src)>,
+def : Pat<(f64 (fround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP64)>,
Requires<[FPStackf64]>;
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrFormats.td b/libclamav/c++/llvm/lib/Target/X86/X86InstrFormats.td
index bb81cbf..79187e9 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrFormats.td
@@ -39,6 +39,7 @@ def MRM_E8 : Format<39>;
def MRM_F0 : Format<40>;
def MRM_F8 : Format<41>;
def MRM_F9 : Format<42>;
+def RawFrmImm16 : Format<43>;
// ImmType - This specifies the immediate type used by an instruction. This is
// part of the ad-hoc solution used to emit machine instruction encodings by our
@@ -50,9 +51,10 @@ def NoImm : ImmType<0>;
def Imm8 : ImmType<1>;
def Imm8PCRel : ImmType<2>;
def Imm16 : ImmType<3>;
-def Imm32 : ImmType<4>;
-def Imm32PCRel : ImmType<5>;
-def Imm64 : ImmType<6>;
+def Imm16PCRel : ImmType<4>;
+def Imm32 : ImmType<5>;
+def Imm32PCRel : ImmType<6>;
+def Imm64 : ImmType<7>;
// FPFormat - This specifies what form this FP instruction has. This is used by
// the Floating-Point stackifier pass.
@@ -68,6 +70,16 @@ def CompareFP : FPFormat<5>;
def CondMovFP : FPFormat<6>;
def SpecialFP : FPFormat<7>;
+// Class specifying the SSE execution domain, used by the SSEDomainFix pass.
+// Keep in sync with tables in X86InstrInfo.cpp.
+class Domain<bits<2> val> {
+ bits<2> Value = val;
+}
+def GenericDomain : Domain<0>;
+def SSEPackedSingle : Domain<1>;
+def SSEPackedDouble : Domain<2>;
+def SSEPackedInt : Domain<3>;
+
// Prefix byte classes which are used to indicate to the ad-hoc machine code
// emitter that various prefix bytes are required.
class OpSize { bit hasOpSizePrefix = 1; }
@@ -91,9 +103,14 @@ class XS { bits<4> Prefix = 12; }
class T8 { bits<4> Prefix = 13; }
class TA { bits<4> Prefix = 14; }
class TF { bits<4> Prefix = 15; }
+class VEX { bit hasVEXPrefix = 1; }
+class VEX_W { bit hasVEX_WPrefix = 1; }
+class VEX_4V : VEX { bit hasVEX_4VPrefix = 1; }
+class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; }
+class VEX_L { bit hasVEX_L = 1; }
class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
- string AsmStr>
+ string AsmStr, Domain d = GenericDomain>
: Instruction {
let Namespace = "X86";
@@ -101,7 +118,6 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
Format Form = f;
bits<6> FormBits = Form.Value;
ImmType ImmT = i;
- bits<3> ImmTypeBits = ImmT.Value;
dag OutOperandList = outs;
dag InOperandList = ins;
@@ -115,20 +131,45 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
bits<4> Prefix = 0; // Which prefix byte does this inst have?
bit hasREX_WPrefix = 0; // Does this inst requires the REX.W prefix?
- FPFormat FPForm; // What flavor of FP instruction is this?
- bits<3> FPFormBits = 0;
+ FPFormat FPForm = NotFP; // What flavor of FP instruction is this?
bit hasLockPrefix = 0; // Does this inst have a 0xF0 prefix?
bits<2> SegOvrBits = 0; // Segment override prefix.
+ Domain ExeDomain = d;
+ bit hasVEXPrefix = 0; // Does this inst requires a VEX prefix?
+ bit hasVEX_WPrefix = 0; // Does this inst set the VEX_W field?
+ bit hasVEX_4VPrefix = 0; // Does this inst requires the VEX.VVVV field?
+ bit hasVEX_i8ImmReg = 0; // Does this inst requires the last source register
+ // to be encoded in a immediate field?
+ bit hasVEX_L = 0; // Does this inst uses large (256-bit) registers?
+
+ // TSFlags layout should be kept in sync with X86InstrInfo.h.
+ let TSFlags{5-0} = FormBits;
+ let TSFlags{6} = hasOpSizePrefix;
+ let TSFlags{7} = hasAdSizePrefix;
+ let TSFlags{11-8} = Prefix;
+ let TSFlags{12} = hasREX_WPrefix;
+ let TSFlags{15-13} = ImmT.Value;
+ let TSFlags{18-16} = FPForm.Value;
+ let TSFlags{19} = hasLockPrefix;
+ let TSFlags{21-20} = SegOvrBits;
+ let TSFlags{23-22} = ExeDomain.Value;
+ let TSFlags{31-24} = Opcode;
+ let TSFlags{32} = hasVEXPrefix;
+ let TSFlags{33} = hasVEX_WPrefix;
+ let TSFlags{34} = hasVEX_4VPrefix;
+ let TSFlags{35} = hasVEX_i8ImmReg;
+ let TSFlags{36} = hasVEX_L;
}
-class I<bits<8> o, Format f, dag outs, dag ins, string asm, list<dag> pattern>
- : X86Inst<o, f, NoImm, outs, ins, asm> {
+class I<bits<8> o, Format f, dag outs, dag ins, string asm,
+ list<dag> pattern, Domain d = GenericDomain>
+ : X86Inst<o, f, NoImm, outs, ins, asm, d> {
let Pattern = pattern;
let CodeSize = 3;
}
class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern>
- : X86Inst<o, f, Imm8 , outs, ins, asm> {
+ list<dag> pattern, Domain d = GenericDomain>
+ : X86Inst<o, f, Imm8, outs, ins, asm, d> {
let Pattern = pattern;
let CodeSize = 3;
}
@@ -151,6 +192,13 @@ class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm,
let CodeSize = 3;
}
+class Ii16PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : X86Inst<o, f, Imm16PCRel, outs, ins, asm> {
+ let Pattern = pattern;
+ let CodeSize = 3;
+}
+
class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern>
: X86Inst<o, f, Imm32PCRel, outs, ins, asm> {
@@ -163,10 +211,10 @@ class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
class FPI<bits<8> o, Format F, dag outs, dag ins, string asm>
: I<o, F, outs, ins, asm, []> {}
-// FpI_ - Floating Point Psuedo Instruction template. Not Predicated.
+// FpI_ - Floating Point Pseudo Instruction template. Not Predicated.
class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern>
: X86Inst<0, Pseudo, NoImm, outs, ins, ""> {
- let FPForm = fp; let FPFormBits = FPForm.Value;
+ let FPForm = fp;
let Pattern = pattern;
}
@@ -177,33 +225,88 @@ class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern>
// Iseg32 - 16-bit segment selector, 32-bit offset
class Iseg16 <bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern> : X86Inst<o, f, NoImm, outs, ins, asm> {
+ list<dag> pattern> : X86Inst<o, f, Imm16, outs, ins, asm> {
let Pattern = pattern;
let CodeSize = 3;
}
class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
- list<dag> pattern> : X86Inst<o, f, NoImm, outs, ins, asm> {
+ list<dag> pattern> : X86Inst<o, f, Imm32, outs, ins, asm> {
let Pattern = pattern;
let CodeSize = 3;
}
+// SI - SSE 1 & 2 scalar instructions
+class SI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
+ : I<o, F, outs, ins, asm, pattern> {
+ let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
+ !if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2]));
+
+ // AVX instructions have a 'v' prefix in the mnemonic
+ let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+}
+
+// SIi8 - SSE 1 & 2 scalar instructions
+class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : Ii8<o, F, outs, ins, asm, pattern> {
+ let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
+ !if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2]));
+
+ // AVX instructions have a 'v' prefix in the mnemonic
+ let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+}
+
+// PI - SSE 1 & 2 packed instructions
+class PI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
+ Domain d>
+ : I<o, F, outs, ins, asm, pattern, d> {
+ let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
+ !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]));
+
+ // AVX instructions have a 'v' prefix in the mnemonic
+ let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+}
+
+// PIi8 - SSE 1 & 2 packed instructions with immediate
+class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, Domain d>
+ : Ii8<o, F, outs, ins, asm, pattern, d> {
+ let Predicates = !if(hasVEX_4VPrefix /* VEX */, [HasAVX],
+ !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]));
+
+ // AVX instructions have a 'v' prefix in the mnemonic
+ let AsmString = !if(hasVEX_4VPrefix, !strconcat("v", asm), asm);
+}
+
// SSE1 Instruction Templates:
//
// SSI - SSE1 instructions with XS prefix.
// PSI - SSE1 instructions with TB prefix.
// PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix.
+// VSSI - SSE1 instructions with XS prefix in AVX form.
+// VPSI - SSE1 instructions with TB prefix in AVX form.
class SSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
: I<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>;
-class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
: Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>;
class PSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasSSE1]>;
+ : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB,
+ Requires<[HasSSE1]>;
class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, TB, Requires<[HasSSE1]>;
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB,
+ Requires<[HasSSE1]>;
+class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern>, XS,
+ Requires<[HasAVX]>;
+class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedSingle>,
+ Requires<[HasAVX]>;
// SSE2 Instruction Templates:
//
@@ -212,6 +315,8 @@ class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
// SSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix.
// PDI - SSE2 instructions with TB and OpSize prefixes.
// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
+// VSDI - SSE2 instructions with XD prefix in AVX form.
+// VPDI - SSE2 instructions with TB and OpSize prefixes in AVX form.
class SDI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
: I<o, F, outs, ins, asm, pattern>, XD, Requires<[HasSSE2]>;
@@ -222,10 +327,20 @@ class SSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
: Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE2]>;
class PDI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TB, OpSize, Requires<[HasSSE2]>;
+ : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, OpSize,
+ Requires<[HasSSE2]>;
class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, TB, OpSize, Requires<[HasSSE2]>;
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, OpSize,
+ Requires<[HasSSE2]>;
+class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern>, XD,
+ Requires<[HasAVX]>;
+class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedDouble>,
+ OpSize, Requires<[HasAVX]>;
// SSE3 Instruction Templates:
//
@@ -235,12 +350,15 @@ class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE3]>;
+ : I<o, F, outs, ins, asm, pattern, SSEPackedSingle>, XS,
+ Requires<[HasSSE3]>;
class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, XD, Requires<[HasSSE3]>;
+ : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, XD,
+ Requires<[HasSSE3]>;
class S3I<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TB, OpSize, Requires<[HasSSE3]>;
+ : I<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, OpSize,
+ Requires<[HasSSE3]>;
// SSSE3 Instruction Templates:
@@ -254,10 +372,12 @@ class S3I<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, T8, Requires<[HasSSSE3]>;
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
+ Requires<[HasSSSE3]>;
class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, TA, Requires<[HasSSSE3]>;
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
+ Requires<[HasSSSE3]>;
// SSE4.1 Instruction Templates:
//
@@ -266,17 +386,20 @@ class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
//
class SS48I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, T8, Requires<[HasSSE41]>;
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
+ Requires<[HasSSE41]>;
class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : Ii8<o, F, outs, ins, asm, pattern>, TA, Requires<[HasSSE41]>;
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
+ Requires<[HasSSE41]>;
// SSE4.2 Instruction Templates:
//
// SS428I - SSE 4.2 instructions with T8 prefix.
class SS428I<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, T8, Requires<[HasSSE42]>;
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
+ Requires<[HasSSE42]>;
// SS42FI - SSE 4.2 instructions with TF prefix.
class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm,
@@ -286,7 +409,48 @@ class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm,
// SS42AI = SSE 4.2 instructions with TA prefix
class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
- : I<o, F, outs, ins, asm, pattern>, TA, Requires<[HasSSE42]>;
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
+ Requires<[HasSSE42]>;
+
+// AVX Instruction Templates:
+// Instructions introduced in AVX (no SSE equivalent forms)
+//
+// AVX8I - AVX instructions with T8 and OpSize prefix.
+// AVXAIi8 - AVX instructions with TA, OpSize prefix and ImmT = Imm8.
+class AVX8I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, OpSize,
+ Requires<[HasAVX]>;
+class AVXAIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, OpSize,
+ Requires<[HasAVX]>;
+
+// AES Instruction Templates:
+//
+// AES8I
+// These use the same encoding as the SSE4.2 T8 and TA encodings.
+class AES8I<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag>pattern>
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
+ Requires<[HasAES]>;
+
+class AESAI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
+ Requires<[HasAES]>;
+
+// CLMUL Instruction Templates
+class CLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag>pattern>
+ : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA,
+ OpSize, VEX_4V, Requires<[HasAVX, HasCLMUL]>;
+
+// FMA3 Instruction Templates
+class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag>pattern>
+ : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8,
+ OpSize, VEX_4V, Requires<[HasFMA3]>;
// X86-64 Instruction templates...
//
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/libclamav/c++/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 6b9478d..01149b6 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -60,3 +60,409 @@ def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
}], MMX_SHUFFLE_get_shuf_imm>;
+
+//===----------------------------------------------------------------------===//
+// SSE specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
+ SDTCisFP<0>, SDTCisInt<2> ]>;
+def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
+ SDTCisFP<1>, SDTCisVT<3, i8>]>;
+
+def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
+def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
+def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
+def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
+def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
+def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
+def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
+def X86pshufb : SDNode<"X86ISD::PSHUFB",
+ SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>>;
+def X86pextrb : SDNode<"X86ISD::PEXTRB",
+ SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
+def X86pextrw : SDNode<"X86ISD::PEXTRW",
+ SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
+def X86pinsrb : SDNode<"X86ISD::PINSRB",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
+ SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
+def X86pinsrw : SDNode<"X86ISD::PINSRW",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
+ SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
+def X86insrtps : SDNode<"X86ISD::INSERTPS",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
+ SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
+def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
+ SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
+def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
+ [SDNPHasChain, SDNPMayLoad]>;
+def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
+def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
+def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
+def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
+def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
+def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
+def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
+def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
+def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
+def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
+def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
+def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
+
+def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
+ SDTCisVec<1>,
+ SDTCisSameAs<2, 1>]>;
+def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
+def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
+
+// Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
+// translated into one of the target nodes below during lowering.
+// Note: this is a work in progress...
+def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
+def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>;
+
+def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
+ SDTCisSameAs<0,1>, SDTCisInt<2>]>;
+def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>, SDTCisInt<3>]>;
+
+def SDTShuff2OpLdI : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>,
+ SDTCisInt<2>]>;
+
+def X86PAlign : SDNode<"X86ISD::PALIGN", SDTShuff3OpI>;
+
+def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
+def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
+def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
+
+def X86PShufhwLd : SDNode<"X86ISD::PSHUFHW_LD", SDTShuff2OpLdI>;
+def X86PShuflwLd : SDNode<"X86ISD::PSHUFLW_LD", SDTShuff2OpLdI>;
+
+def X86Shufpd : SDNode<"X86ISD::SHUFPD", SDTShuff3OpI>;
+def X86Shufps : SDNode<"X86ISD::SHUFPS", SDTShuff3OpI>;
+
+def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
+def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
+def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>;
+
+def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>;
+def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
+
+def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
+def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
+def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
+def X86Movhlpd : SDNode<"X86ISD::MOVHLPD", SDTShuff2Op>;
+
+def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
+def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
+
+def X86Unpcklps : SDNode<"X86ISD::UNPCKLPS", SDTShuff2Op>;
+def X86Unpcklpd : SDNode<"X86ISD::UNPCKLPD", SDTShuff2Op>;
+def X86Unpckhps : SDNode<"X86ISD::UNPCKHPS", SDTShuff2Op>;
+def X86Unpckhpd : SDNode<"X86ISD::UNPCKHPD", SDTShuff2Op>;
+
+def X86Punpcklbw : SDNode<"X86ISD::PUNPCKLBW", SDTShuff2Op>;
+def X86Punpcklwd : SDNode<"X86ISD::PUNPCKLWD", SDTShuff2Op>;
+def X86Punpckldq : SDNode<"X86ISD::PUNPCKLDQ", SDTShuff2Op>;
+def X86Punpcklqdq : SDNode<"X86ISD::PUNPCKLQDQ", SDTShuff2Op>;
+
+def X86Punpckhbw : SDNode<"X86ISD::PUNPCKHBW", SDTShuff2Op>;
+def X86Punpckhwd : SDNode<"X86ISD::PUNPCKHWD", SDTShuff2Op>;
+def X86Punpckhdq : SDNode<"X86ISD::PUNPCKHDQ", SDTShuff2Op>;
+def X86Punpckhqdq : SDNode<"X86ISD::PUNPCKHQDQ", SDTShuff2Op>;
+
+//===----------------------------------------------------------------------===//
+// SSE Complex Patterns
+//===----------------------------------------------------------------------===//
+
+// These are 'extloads' from a scalar to the low element of a vector, zeroing
+// the top elements. These are used for the SSE 'ss' and 'sd' instruction
+// forms.
+def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
+ [SDNPHasChain, SDNPMayLoad]>;
+def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
+ [SDNPHasChain, SDNPMayLoad]>;
+
+def ssmem : Operand<v4f32> {
+ let PrintMethod = "printf32mem";
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
+}
+def sdmem : Operand<v2f64> {
+ let PrintMethod = "printf64mem";
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE pattern fragments
+//===----------------------------------------------------------------------===//
+
+// 128-bit load pattern fragments
+def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
+def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
+def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
+def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
+
+// 256-bit load pattern fragments
+def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
+def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
+def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
+def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
+
+// Like 'store', but always requires vector alignment.
+def alignedstore : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAlignment() >= 16;
+}]>;
+
+// Like 'load', but always requires vector alignment.
+def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() >= 16;
+}]>;
+
+def alignedloadfsf32 : PatFrag<(ops node:$ptr),
+ (f32 (alignedload node:$ptr))>;
+def alignedloadfsf64 : PatFrag<(ops node:$ptr),
+ (f64 (alignedload node:$ptr))>;
+
+// 128-bit aligned load pattern fragments
+def alignedloadv4f32 : PatFrag<(ops node:$ptr),
+ (v4f32 (alignedload node:$ptr))>;
+def alignedloadv2f64 : PatFrag<(ops node:$ptr),
+ (v2f64 (alignedload node:$ptr))>;
+def alignedloadv4i32 : PatFrag<(ops node:$ptr),
+ (v4i32 (alignedload node:$ptr))>;
+def alignedloadv2i64 : PatFrag<(ops node:$ptr),
+ (v2i64 (alignedload node:$ptr))>;
+
+// 256-bit aligned load pattern fragments
+def alignedloadv8f32 : PatFrag<(ops node:$ptr),
+ (v8f32 (alignedload node:$ptr))>;
+def alignedloadv4f64 : PatFrag<(ops node:$ptr),
+ (v4f64 (alignedload node:$ptr))>;
+def alignedloadv8i32 : PatFrag<(ops node:$ptr),
+ (v8i32 (alignedload node:$ptr))>;
+def alignedloadv4i64 : PatFrag<(ops node:$ptr),
+ (v4i64 (alignedload node:$ptr))>;
+
+// Like 'load', but uses special alignment checks suitable for use in
+// memory operands in most SSE instructions, which are required to
+// be naturally aligned on some targets but not on others. If the subtarget
+// allows unaligned accesses, match any load, though this may require
+// setting a feature bit in the processor (on startup, for example).
+// Opteron 10h and later implement such a feature.
+def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return Subtarget->hasVectorUAMem()
+ || cast<LoadSDNode>(N)->getAlignment() >= 16;
+}]>;
+
+def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
+def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
+
+// 128-bit memop pattern fragments
+def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
+def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
+def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
+def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
+def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
+
+// 256-bit memop pattern fragments
+def memopv32i8 : PatFrag<(ops node:$ptr), (v32i8 (memop node:$ptr))>;
+def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
+def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
+def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
+def memopv8i32 : PatFrag<(ops node:$ptr), (v8i32 (memop node:$ptr))>;
+
+// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
+// 16-byte boundary.
+// FIXME: 8 byte alignment for mmx reads is not required
+def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() >= 8;
+}]>;
+
+def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
+def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
+def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
+def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
+
+// MOVNT Support
+// Like 'store', but requires the non-temporal bit to be set
+def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+ (st node:$val, node:$ptr), [{
+ if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
+ return ST->isNonTemporal();
+ return false;
+}]>;
+
+def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+ (st node:$val, node:$ptr), [{
+ if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
+ return ST->isNonTemporal() && !ST->isTruncatingStore() &&
+ ST->getAddressingMode() == ISD::UNINDEXED &&
+ ST->getAlignment() >= 16;
+ return false;
+}]>;
+
+def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+ (st node:$val, node:$ptr), [{
+ if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
+ return ST->isNonTemporal() &&
+ ST->getAlignment() < 16;
+ return false;
+}]>;
+
+// 128-bit bitconvert pattern fragments
+def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
+def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
+def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
+def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
+def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
+def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
+
+// 256-bit bitconvert pattern fragments
+def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
+
+def vzmovl_v2i64 : PatFrag<(ops node:$src),
+ (bitconvert (v2i64 (X86vzmovl
+ (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
+def vzmovl_v4i32 : PatFrag<(ops node:$src),
+ (bitconvert (v4i32 (X86vzmovl
+ (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
+
+def vzload_v2i64 : PatFrag<(ops node:$src),
+ (bitconvert (v2i64 (X86vzload node:$src)))>;
+
+
+def fp32imm0 : PatLeaf<(f32 fpimm), [{
+ return N->isExactlyValue(+0.0);
+}]>;
+
+// BYTE_imm - Transform bit immediates into byte immediates.
+def BYTE_imm : SDNodeXForm<imm, [{
+ // Transformation function: imm >> 3
+ return getI32Imm(N->getZExtValue() >> 3);
+}]>;
+
+// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
+// SHUFP* etc. imm.
+def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
+ return getI8Imm(X86::getShuffleSHUFImmediate(N));
+}]>;
+
+// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
+// PSHUFHW imm.
+def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
+ return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
+}]>;
+
+// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
+// PSHUFLW imm.
+def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
+ return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
+}]>;
+
+// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
+// a PALIGNR imm.
+def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
+ return getI8Imm(X86::getShufflePALIGNRImmediate(N));
+}]>;
+
+def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+ return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
+}]>;
+
+def movddup : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movlp : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movl : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_shuf_imm>;
+
+def shufp : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_shuf_imm>;
+
+def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_pshufhw_imm>;
+
+def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_pshuflw_imm>;
+
+def palign : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_palign_imm>;
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.cpp b/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.cpp
index 4fd91bb..5280940 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -27,6 +27,7 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -234,6 +235,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::BT64ri8, X86::BT64mi8, 1, 0 },
{ X86::CALL32r, X86::CALL32m, 1, 0 },
{ X86::CALL64r, X86::CALL64m, 1, 0 },
+ { X86::WINCALL64r, X86::WINCALL64m, 1, 0 },
{ X86::CMP16ri, X86::CMP16mi, 1, 0 },
{ X86::CMP16ri8, X86::CMP16mi8, 1, 0 },
{ X86::CMP16rr, X86::CMP16mr, 1, 0 },
@@ -266,6 +268,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::MOV16rr, X86::MOV16mr, 0, 0 },
{ X86::MOV32ri, X86::MOV32mi, 0, 0 },
{ X86::MOV32rr, X86::MOV32mr, 0, 0 },
+ { X86::MOV32rr_TC, X86::MOV32mr_TC, 0, 0 },
{ X86::MOV64ri32, X86::MOV64mi32, 0, 0 },
{ X86::MOV64rr, X86::MOV64mr, 0, 0 },
{ X86::MOV8ri, X86::MOV8mi, 0, 0 },
@@ -301,6 +304,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::SETPr, X86::SETPm, 0, 0 },
{ X86::SETSr, X86::SETSm, 0, 0 },
{ X86::TAILJMPr, X86::TAILJMPm, 1, 0 },
+ { X86::TAILJMPr64, X86::TAILJMPm64, 1, 0 },
{ X86::TEST16ri, X86::TEST16mi, 1, 0 },
{ X86::TEST32ri, X86::TEST32mi, 1, 0 },
{ X86::TEST64ri32, X86::TEST64mi32, 1, 0 },
@@ -376,6 +380,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::Int_UCOMISSrr, X86::Int_UCOMISSrm, 0 },
{ X86::MOV16rr, X86::MOV16rm, 0 },
{ X86::MOV32rr, X86::MOV32rm, 0 },
+ { X86::MOV32rr_TC, X86::MOV32rm_TC, 0 },
{ X86::MOV64rr, X86::MOV64rm, 0 },
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 },
{ X86::MOV64toSDrr, X86::MOV64toSDrm, 0 },
@@ -594,7 +599,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::PMULHUWrr, X86::PMULHUWrm, 16 },
{ X86::PMULHWrr, X86::PMULHWrm, 16 },
{ X86::PMULLDrr, X86::PMULLDrm, 16 },
- { X86::PMULLDrr_int, X86::PMULLDrm_int, 16 },
{ X86::PMULLWrr, X86::PMULLWrm, 16 },
{ X86::PMULUDQrr, X86::PMULUDQrm, 16 },
{ X86::PORrr, X86::PORrm, 16 },
@@ -664,44 +668,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?");
}
-bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned &SrcSubIdx, unsigned &DstSubIdx) const {
- switch (MI.getOpcode()) {
- default:
- return false;
- case X86::MOV8rr:
- case X86::MOV8rr_NOREX:
- case X86::MOV16rr:
- case X86::MOV32rr:
- case X86::MOV64rr:
-
- // FP Stack register class copies
- case X86::MOV_Fp3232: case X86::MOV_Fp6464: case X86::MOV_Fp8080:
- case X86::MOV_Fp3264: case X86::MOV_Fp3280:
- case X86::MOV_Fp6432: case X86::MOV_Fp8032:
-
- // Note that MOVSSrr and MOVSDrr are not considered copies. FR32 and FR64
- // copies are done with FsMOVAPSrr and FsMOVAPDrr.
-
- case X86::FsMOVAPSrr:
- case X86::FsMOVAPDrr:
- case X86::MOVAPSrr:
- case X86::MOVAPDrr:
- case X86::MOVDQArr:
- case X86::MMX_MOVQ64rr:
- assert(MI.getNumOperands() >= 2 &&
- MI.getOperand(0).isReg() &&
- MI.getOperand(1).isReg() &&
- "invalid register-register move instruction");
- SrcReg = MI.getOperand(1).getReg();
- DstReg = MI.getOperand(0).getReg();
- SrcSubIdx = MI.getOperand(1).getSubReg();
- DstSubIdx = MI.getOperand(0).getSubReg();
- return true;
- }
-}
-
bool
X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
@@ -739,17 +705,17 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
case X86::MOVZX32rr8:
case X86::MOVSX64rr8:
case X86::MOVZX64rr8:
- SubIdx = 1;
+ SubIdx = X86::sub_8bit;
break;
case X86::MOVSX32rr16:
case X86::MOVZX32rr16:
case X86::MOVSX64rr16:
case X86::MOVZX64rr16:
- SubIdx = 3;
+ SubIdx = X86::sub_16bit;
break;
case X86::MOVSX64rr32:
case X86::MOVZX64rr32:
- SubIdx = 4;
+ SubIdx = X86::sub_32bit;
break;
}
return true;
@@ -779,7 +745,9 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
+ case X86::MOV32rm_TC:
case X86::MOV64rm:
+ case X86::MOV64rm_TC:
case X86::LD_Fp64m:
case X86::MOVSSrm:
case X86::MOVSDrm:
@@ -800,7 +768,9 @@ static bool isFrameStoreOpcode(int Opcode) {
case X86::MOV8mr:
case X86::MOV16mr:
case X86::MOV32mr:
+ case X86::MOV32mr_TC:
case X86::MOV64mr:
+ case X86::MOV64mr_TC:
case X86::ST_FpP64m:
case X86::MOVSSmr:
case X86::MOVSDmr:
@@ -818,7 +788,7 @@ static bool isFrameStoreOpcode(int Opcode) {
unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
if (isFrameLoadOpcode(MI->getOpcode()))
- if (isFrameOperand(MI, 1, FrameIndex))
+ if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
return MI->getOperand(0).getReg();
return 0;
}
@@ -857,8 +827,9 @@ bool X86InstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
if (isFrameStoreOpcode(MI->getOpcode()))
- if (isFrameOperand(MI, 0, FrameIndex))
- return MI->getOperand(X86AddrNumOperands).getReg();
+ if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
+ isFrameOperand(MI, 0, FrameIndex))
+ return MI->getOperand(X86::AddrNumOperands).getReg();
return 0;
}
@@ -987,8 +958,10 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
/// a few instructions in each direction it assumes it's not safe.
static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) {
+ MachineBasicBlock::iterator E = MBB.end();
+
// It's always safe to clobber EFLAGS at the end of a block.
- if (I == MBB.end())
+ if (I == E)
return true;
// For compile time consideration, if we are not able to determine the
@@ -1012,20 +985,28 @@ static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
// This instruction defines EFLAGS, no need to look any further.
return true;
++Iter;
+ // Skip over DBG_VALUE.
+ while (Iter != E && Iter->isDebugValue())
+ ++Iter;
// If we make it to the end of the block, it's safe to clobber EFLAGS.
- if (Iter == MBB.end())
+ if (Iter == E)
return true;
}
+ MachineBasicBlock::iterator B = MBB.begin();
Iter = I;
for (unsigned i = 0; i < 4; ++i) {
// If we make it to the beginning of the block, it's safe to clobber
// EFLAGS iff EFLAGS is not live-in.
- if (Iter == MBB.begin())
+ if (Iter == B)
return !MBB.isLiveIn(X86::EFLAGS);
--Iter;
+ // Skip over DBG_VALUE.
+ while (Iter != B && Iter->isDebugValue())
+ --Iter;
+
bool SawKill = false;
for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
MachineOperand &MO = Iter->getOperand(j);
@@ -1049,13 +1030,8 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const {
- DebugLoc DL = MBB.findDebugLoc(I);
-
- if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
- DestReg = TRI->getSubReg(DestReg, SubIdx);
- SubIdx = 0;
- }
+ const TargetRegisterInfo &TRI) const {
+ DebugLoc DL = Orig->getDebugLoc();
// MOV32r0 etc. are implemented with xor which clobbers condition code.
// Re-materialize them as movri instructions to avoid side effects.
@@ -1083,14 +1059,13 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
if (Clone) {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
- MI->getOperand(0).setReg(DestReg);
MBB.insert(I, MI);
} else {
- BuildMI(MBB, I, DL, get(Opc), DestReg).addImm(0);
+ BuildMI(MBB, I, DL, get(Opc)).addOperand(Orig->getOperand(0)).addImm(0);
}
MachineInstr *NewMI = prior(I);
- NewMI->getOperand(0).setSubReg(SubIdx);
+ NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
}
/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
@@ -1136,10 +1111,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
// least on modern x86 machines).
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
MachineInstr *InsMI =
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg)
- .addReg(leaInReg)
- .addReg(Src, getKillRegState(isKill))
- .addImm(X86::SUBREG_16BIT);
+ BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(leaInReg, RegState::Define, X86::sub_16bit)
+ .addReg(Src, getKillRegState(isKill));
MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
get(Opc), leaOutReg);
@@ -1150,20 +1124,20 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
case X86::SHL16ri: {
unsigned ShAmt = MI->getOperand(2).getImm();
MIB.addReg(0).addImm(1 << ShAmt)
- .addReg(leaInReg, RegState::Kill).addImm(0);
+ .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
break;
}
case X86::INC16r:
case X86::INC64_16r:
- addLeaRegOffset(MIB, leaInReg, true, 1);
+ addRegOffset(MIB, leaInReg, true, 1);
break;
case X86::DEC16r:
case X86::DEC64_16r:
- addLeaRegOffset(MIB, leaInReg, true, -1);
+ addRegOffset(MIB, leaInReg, true, -1);
break;
case X86::ADD16ri:
case X86::ADD16ri8:
- addLeaRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
+ addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
break;
case X86::ADD16rr: {
unsigned Src2 = MI->getOperand(2).getReg();
@@ -1180,10 +1154,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
// well be shifting and then extracting the lower 16-bits.
BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
InsMI2 =
- BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg2)
- .addReg(leaInReg2)
- .addReg(Src2, getKillRegState(isKill2))
- .addImm(X86::SUBREG_16BIT);
+ BuildMI(*MFI, MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
+ .addReg(Src2, getKillRegState(isKill2));
addRegReg(MIB, leaInReg, true, leaInReg2, true);
}
if (LV && isKill2 && InsMI2)
@@ -1194,10 +1167,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
MachineInstr *NewMI = MIB;
MachineInstr *ExtMI =
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::EXTRACT_SUBREG))
+ BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
- .addReg(leaOutReg, RegState::Kill)
- .addImm(X86::SUBREG_16BIT);
+ .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
if (LV) {
// Update live variables
@@ -1268,7 +1240,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
.addReg(0).addImm(1 << ShAmt)
.addReg(Src, getKillRegState(isKill))
- .addImm(0);
+ .addImm(0).addReg(0);
break;
}
case X86::SHL32ri: {
@@ -1282,7 +1254,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
.addReg(0).addImm(1 << ShAmt)
- .addReg(Src, getKillRegState(isKill)).addImm(0);
+ .addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0);
break;
}
case X86::SHL16ri: {
@@ -1298,7 +1270,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
.addReg(0).addImm(1 << ShAmt)
.addReg(Src, getKillRegState(isKill))
- .addImm(0);
+ .addImm(0).addReg(0);
break;
}
default: {
@@ -1316,7 +1288,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, 1);
@@ -1338,7 +1310,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, -1);
@@ -1386,7 +1358,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD64ri32:
case X86::ADD64ri8:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, MI->getOperand(2).getImm());
@@ -1395,7 +1367,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD32ri8: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, MI->getOperand(2).getImm());
@@ -1406,7 +1378,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, MI->getOperand(2).getImm());
@@ -1654,14 +1626,6 @@ bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
return !isPredicated(MI);
}
-// For purposes of branch analysis do not count FP_REG_KILL as a terminator.
-static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI,
- const X86InstrInfo &TII) {
- if (MI->getOpcode() == X86::FP_REG_KILL)
- return false;
- return TII.isUnpredicatedTerminator(MI);
-}
-
bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
@@ -1670,12 +1634,15 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// Start from the bottom of the block and work up, examining the
// terminator instructions.
MachineBasicBlock::iterator I = MBB.end();
+ MachineBasicBlock::iterator UnCondBrIter = MBB.end();
while (I != MBB.begin()) {
--I;
+ if (I->isDebugValue())
+ continue;
// Working from the bottom, when we see a non-terminator instruction, we're
// done.
- if (!isBrAnalysisUnpredicatedTerminator(I, *this))
+ if (!isUnpredicatedTerminator(I))
break;
// A terminator that isn't a branch can't easily be handled by this
@@ -1685,6 +1652,8 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// Handle unconditional branches.
if (I->getOpcode() == X86::JMP_4) {
+ UnCondBrIter = I;
+
if (!AllowModify) {
TBB = I->getOperand(0).getMBB();
continue;
@@ -1702,10 +1671,11 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
TBB = 0;
I->eraseFromParent();
I = MBB.end();
+ UnCondBrIter = MBB.end();
continue;
}
- // TBB is used to indicate the unconditinal destination.
+ // TBB is used to indicate the unconditional destination.
TBB = I->getOperand(0).getMBB();
continue;
}
@@ -1717,6 +1687,45 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// Working from the bottom, handle the first conditional branch.
if (Cond.empty()) {
+ MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
+ if (AllowModify && UnCondBrIter != MBB.end() &&
+ MBB.isLayoutSuccessor(TargetBB)) {
+ // If we can modify the code and it ends in something like:
+ //
+ // jCC L1
+ // jmp L2
+ // L1:
+ // ...
+ // L2:
+ //
+ // Then we can change this to:
+ //
+ // jnCC L2
+ // L1:
+ // ...
+ // L2:
+ //
+ // Which is a bit more efficient.
+ // We conditionally jump to the fall-through block.
+ BranchCode = GetOppositeBranchCondition(BranchCode);
+ unsigned JNCC = GetCondBranchFromCond(BranchCode);
+ MachineBasicBlock::iterator OldInst = I;
+
+ BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC))
+ .addMBB(UnCondBrIter->getOperand(0).getMBB());
+ BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_4))
+ .addMBB(TargetBB);
+ MBB.addSuccessor(TargetBB);
+
+ OldInst->eraseFromParent();
+ UnCondBrIter->eraseFromParent();
+
+ // Restart the analysis.
+ UnCondBrIter = MBB.end();
+ I = MBB.end();
+ continue;
+ }
+
FBB = TBB;
TBB = I->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(BranchCode));
@@ -1768,6 +1777,8 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
while (I != MBB.begin()) {
--I;
+ if (I->isDebugValue())
+ continue;
if (I->getOpcode() != X86::JMP_4 &&
GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
break;
@@ -1783,9 +1794,8 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc operand
- DebugLoc dl = DebugLoc::getUnknownLoc();
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
@@ -1794,7 +1804,7 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
if (Cond.empty()) {
// Unconditional branch?
assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, dl, get(X86::JMP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB);
return 1;
}
@@ -1804,27 +1814,27 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
switch (CC) {
case X86::COND_NP_OR_E:
// Synthesize NP_OR_E with two branches.
- BuildMI(&MBB, dl, get(X86::JNP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB);
++Count;
- BuildMI(&MBB, dl, get(X86::JE_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB);
++Count;
break;
case X86::COND_NE_OR_P:
// Synthesize NE_OR_P with two branches.
- BuildMI(&MBB, dl, get(X86::JNE_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB);
++Count;
- BuildMI(&MBB, dl, get(X86::JP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB);
++Count;
break;
default: {
unsigned Opc = GetCondBranchFromCond(CC);
- BuildMI(&MBB, dl, get(Opc)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
++Count;
}
}
if (FBB) {
// Two-way Conditional branch. Insert the second branch.
- BuildMI(&MBB, dl, get(X86::JMP_4)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB);
++Count;
}
return Count;
@@ -1835,236 +1845,192 @@ static bool isHReg(unsigned Reg) {
return X86::GR8_ABCD_HRegClass.contains(Reg);
}
-bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const {
- DebugLoc DL = MBB.findDebugLoc(MI);
-
- // Determine if DstRC and SrcRC have a common superclass in common.
- const TargetRegisterClass *CommonRC = DestRC;
- if (DestRC == SrcRC)
- /* Source and destination have the same register class. */;
- else if (CommonRC->hasSuperClass(SrcRC))
- CommonRC = SrcRC;
- else if (!DestRC->hasSubClass(SrcRC)) {
- // Neither of GR64_NOREX or GR64_NOSP is a superclass of the other,
- // but we want to copy them as GR64. Similarly, for GR32_NOREX and
- // GR32_NOSP, copy as GR32.
- if (SrcRC->hasSuperClass(&X86::GR64RegClass) &&
- DestRC->hasSuperClass(&X86::GR64RegClass))
- CommonRC = &X86::GR64RegClass;
- else if (SrcRC->hasSuperClass(&X86::GR32RegClass) &&
- DestRC->hasSuperClass(&X86::GR32RegClass))
- CommonRC = &X86::GR32RegClass;
- else
- CommonRC = 0;
+// Try and copy between VR128/VR64 and GR64 registers.
+static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg) {
+ // SrcReg(VR128) -> DestReg(GR64)
+ // SrcReg(VR64) -> DestReg(GR64)
+ // SrcReg(GR64) -> DestReg(VR128)
+ // SrcReg(GR64) -> DestReg(VR64)
+
+ if (X86::GR64RegClass.contains(DestReg)) {
+ if (X86::VR128RegClass.contains(SrcReg)) {
+ // Copy from a VR128 register to a GR64 register.
+ return X86::MOVPQIto64rr;
+ } else if (X86::VR64RegClass.contains(SrcReg)) {
+ // Copy from a VR64 register to a GR64 register.
+ return X86::MOVSDto64rr;
+ }
+ } else if (X86::GR64RegClass.contains(SrcReg)) {
+ // Copy from a GR64 register to a VR128 register.
+ if (X86::VR128RegClass.contains(DestReg))
+ return X86::MOV64toPQIrr;
+ // Copy from a GR64 register to a VR64 register.
+ else if (X86::VR64RegClass.contains(DestReg))
+ return X86::MOV64toSDrr;
}
- if (CommonRC) {
- unsigned Opc;
- if (CommonRC == &X86::GR64RegClass || CommonRC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64rr;
- } else if (CommonRC == &X86::GR32RegClass ||
- CommonRC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32rr;
- } else if (CommonRC == &X86::GR16RegClass) {
- Opc = X86::MOV16rr;
- } else if (CommonRC == &X86::GR8RegClass) {
- // Copying to or from a physical H register on x86-64 requires a NOREX
- // move. Otherwise use a normal move.
- if ((isHReg(DestReg) || isHReg(SrcReg)) &&
- TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rr_NOREX;
- else
- Opc = X86::MOV8rr;
- } else if (CommonRC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64rr;
- } else if (CommonRC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32rr;
- } else if (CommonRC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16rr;
- } else if (CommonRC == &X86::GR8_ABCD_LRegClass) {
- Opc = X86::MOV8rr;
- } else if (CommonRC == &X86::GR8_ABCD_HRegClass) {
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rr_NOREX;
- else
- Opc = X86::MOV8rr;
- } else if (CommonRC == &X86::GR64_NOREXRegClass ||
- CommonRC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64rr;
- } else if (CommonRC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32rr;
- } else if (CommonRC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16rr;
- } else if (CommonRC == &X86::GR8_NOREXRegClass) {
+ return 0;
+}
+
+void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ // First deal with the normal symmetric copies.
+ unsigned Opc = 0;
+ if (X86::GR64RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MOV64rr;
+ else if (X86::GR32RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MOV32rr;
+ else if (X86::GR16RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MOV16rr;
+ else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
+ // Copying to or from a physical H register on x86-64 requires a NOREX
+ // move. Otherwise use a normal move.
+ if ((isHReg(DestReg) || isHReg(SrcReg)) &&
+ TM.getSubtarget<X86Subtarget>().is64Bit())
+ Opc = X86::MOV8rr_NOREX;
+ else
Opc = X86::MOV8rr;
- } else if (CommonRC == &X86::RFP32RegClass) {
- Opc = X86::MOV_Fp3232;
- } else if (CommonRC == &X86::RFP64RegClass || CommonRC == &X86::RSTRegClass) {
- Opc = X86::MOV_Fp6464;
- } else if (CommonRC == &X86::RFP80RegClass) {
- Opc = X86::MOV_Fp8080;
- } else if (CommonRC == &X86::FR32RegClass) {
- Opc = X86::FsMOVAPSrr;
- } else if (CommonRC == &X86::FR64RegClass) {
- Opc = X86::FsMOVAPDrr;
- } else if (CommonRC == &X86::VR128RegClass) {
- Opc = X86::MOVAPSrr;
- } else if (CommonRC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64rr;
- } else {
- return false;
- }
- BuildMI(MBB, MI, DL, get(Opc), DestReg).addReg(SrcReg);
- return true;
+ } else if (X86::VR128RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MOVAPSrr;
+ else if (X86::VR64RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MMX_MOVQ64rr;
+ else
+ Opc = CopyToFromAsymmetricReg(DestReg, SrcReg);
+
+ if (Opc) {
+ BuildMI(MBB, MI, DL, get(Opc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
}
// Moving EFLAGS to / from another register requires a push and a pop.
- if (SrcRC == &X86::CCRRegClass) {
- if (SrcReg != X86::EFLAGS)
- return false;
- if (DestRC == &X86::GR64RegClass || DestRC == &X86::GR64_NOSPRegClass) {
- BuildMI(MBB, MI, DL, get(X86::PUSHFQ64));
+ if (SrcReg == X86::EFLAGS) {
+ if (X86::GR64RegClass.contains(DestReg)) {
+ BuildMI(MBB, MI, DL, get(X86::PUSHF64));
BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
- return true;
- } else if (DestRC == &X86::GR32RegClass ||
- DestRC == &X86::GR32_NOSPRegClass) {
- BuildMI(MBB, MI, DL, get(X86::PUSHFD));
+ return;
+ } else if (X86::GR32RegClass.contains(DestReg)) {
+ BuildMI(MBB, MI, DL, get(X86::PUSHF32));
BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
- return true;
- }
- } else if (DestRC == &X86::CCRRegClass) {
- if (DestReg != X86::EFLAGS)
- return false;
- if (SrcRC == &X86::GR64RegClass || DestRC == &X86::GR64_NOSPRegClass) {
- BuildMI(MBB, MI, DL, get(X86::PUSH64r)).addReg(SrcReg);
- BuildMI(MBB, MI, DL, get(X86::POPFQ));
- return true;
- } else if (SrcRC == &X86::GR32RegClass ||
- DestRC == &X86::GR32_NOSPRegClass) {
- BuildMI(MBB, MI, DL, get(X86::PUSH32r)).addReg(SrcReg);
- BuildMI(MBB, MI, DL, get(X86::POPFD));
- return true;
+ return;
}
}
-
- // Moving from ST(0) turns into FpGET_ST0_32 etc.
- if (SrcRC == &X86::RSTRegClass) {
- // Copying from ST(0)/ST(1).
- if (SrcReg != X86::ST0 && SrcReg != X86::ST1)
- // Can only copy from ST(0)/ST(1) right now
- return false;
- bool isST0 = SrcReg == X86::ST0;
- unsigned Opc;
- if (DestRC == &X86::RFP32RegClass)
- Opc = isST0 ? X86::FpGET_ST0_32 : X86::FpGET_ST1_32;
- else if (DestRC == &X86::RFP64RegClass)
- Opc = isST0 ? X86::FpGET_ST0_64 : X86::FpGET_ST1_64;
- else {
- if (DestRC != &X86::RFP80RegClass)
- return false;
- Opc = isST0 ? X86::FpGET_ST0_80 : X86::FpGET_ST1_80;
+ if (DestReg == X86::EFLAGS) {
+ if (X86::GR64RegClass.contains(SrcReg)) {
+ BuildMI(MBB, MI, DL, get(X86::PUSH64r))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ BuildMI(MBB, MI, DL, get(X86::POPF64));
+ return;
+ } else if (X86::GR32RegClass.contains(SrcReg)) {
+ BuildMI(MBB, MI, DL, get(X86::PUSH32r))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ BuildMI(MBB, MI, DL, get(X86::POPF32));
+ return;
}
- BuildMI(MBB, MI, DL, get(Opc), DestReg);
- return true;
}
- // Moving to ST(0) turns into FpSET_ST0_32 etc.
- if (DestRC == &X86::RSTRegClass) {
- // Copying to ST(0) / ST(1).
- if (DestReg != X86::ST0 && DestReg != X86::ST1)
- // Can only copy to TOS right now
- return false;
- bool isST0 = DestReg == X86::ST0;
- unsigned Opc;
- if (SrcRC == &X86::RFP32RegClass)
- Opc = isST0 ? X86::FpSET_ST0_32 : X86::FpSET_ST1_32;
- else if (SrcRC == &X86::RFP64RegClass)
- Opc = isST0 ? X86::FpSET_ST0_64 : X86::FpSET_ST1_64;
- else {
- if (SrcRC != &X86::RFP80RegClass)
- return false;
- Opc = isST0 ? X86::FpSET_ST0_80 : X86::FpSET_ST1_80;
- }
- BuildMI(MBB, MI, DL, get(Opc)).addReg(SrcReg);
- return true;
- }
-
- // Not yet supported!
- return false;
+ DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
+ << " to " << RI.getName(DestReg) << '\n');
+ llvm_unreachable("Cannot emit physreg copy instruction");
}
-static unsigned getStoreRegOpcode(unsigned SrcReg,
- const TargetRegisterClass *RC,
- bool isStackAligned,
- TargetMachine &TM) {
- unsigned Opc = 0;
- if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64mr;
- } else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32mr;
- } else if (RC == &X86::GR16RegClass) {
- Opc = X86::MOV16mr;
- } else if (RC == &X86::GR8RegClass) {
+static unsigned getLoadStoreRegOpcode(unsigned Reg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ const TargetMachine &TM,
+ bool load) {
+ switch (RC->getID()) {
+ default:
+ llvm_unreachable("Unknown regclass");
+ case X86::GR64RegClassID:
+ case X86::GR64_NOSPRegClassID:
+ return load ? X86::MOV64rm : X86::MOV64mr;
+ case X86::GR32RegClassID:
+ case X86::GR32_NOSPRegClassID:
+ case X86::GR32_ADRegClassID:
+ return load ? X86::MOV32rm : X86::MOV32mr;
+ case X86::GR16RegClassID:
+ return load ? X86::MOV16rm : X86::MOV16mr;
+ case X86::GR8RegClassID:
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
- if (isHReg(SrcReg) &&
+ if (isHReg(Reg) &&
TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8mr_NOREX;
+ return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
- Opc = X86::MOV8mr;
- } else if (RC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64mr;
- } else if (RC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32mr;
- } else if (RC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16mr;
- } else if (RC == &X86::GR8_ABCD_LRegClass) {
- Opc = X86::MOV8mr;
- } else if (RC == &X86::GR8_ABCD_HRegClass) {
+ return load ? X86::MOV8rm : X86::MOV8mr;
+ case X86::GR64_ABCDRegClassID:
+ return load ? X86::MOV64rm : X86::MOV64mr;
+ case X86::GR32_ABCDRegClassID:
+ return load ? X86::MOV32rm : X86::MOV32mr;
+ case X86::GR16_ABCDRegClassID:
+ return load ? X86::MOV16rm : X86::MOV16mr;
+ case X86::GR8_ABCD_LRegClassID:
+ return load ? X86::MOV8rm :X86::MOV8mr;
+ case X86::GR8_ABCD_HRegClassID:
if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8mr_NOREX;
+ return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
- Opc = X86::MOV8mr;
- } else if (RC == &X86::GR64_NOREXRegClass ||
- RC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64mr;
- } else if (RC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32mr;
- } else if (RC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16mr;
- } else if (RC == &X86::GR8_NOREXRegClass) {
- Opc = X86::MOV8mr;
- } else if (RC == &X86::RFP80RegClass) {
- Opc = X86::ST_FpP80m; // pops
- } else if (RC == &X86::RFP64RegClass) {
- Opc = X86::ST_Fp64m;
- } else if (RC == &X86::RFP32RegClass) {
- Opc = X86::ST_Fp32m;
- } else if (RC == &X86::FR32RegClass) {
- Opc = X86::MOVSSmr;
- } else if (RC == &X86::FR64RegClass) {
- Opc = X86::MOVSDmr;
- } else if (RC == &X86::VR128RegClass) {
+ return load ? X86::MOV8rm : X86::MOV8mr;
+ case X86::GR64_NOREXRegClassID:
+ case X86::GR64_NOREX_NOSPRegClassID:
+ return load ? X86::MOV64rm : X86::MOV64mr;
+ case X86::GR32_NOREXRegClassID:
+ return load ? X86::MOV32rm : X86::MOV32mr;
+ case X86::GR16_NOREXRegClassID:
+ return load ? X86::MOV16rm : X86::MOV16mr;
+ case X86::GR8_NOREXRegClassID:
+ return load ? X86::MOV8rm : X86::MOV8mr;
+ case X86::GR64_TCRegClassID:
+ return load ? X86::MOV64rm_TC : X86::MOV64mr_TC;
+ case X86::GR32_TCRegClassID:
+ return load ? X86::MOV32rm_TC : X86::MOV32mr_TC;
+ case X86::RFP80RegClassID:
+ return load ? X86::LD_Fp80m : X86::ST_FpP80m;
+ case X86::RFP64RegClassID:
+ return load ? X86::LD_Fp64m : X86::ST_Fp64m;
+ case X86::RFP32RegClassID:
+ return load ? X86::LD_Fp32m : X86::ST_Fp32m;
+ case X86::FR32RegClassID:
+ return load ? X86::MOVSSrm : X86::MOVSSmr;
+ case X86::FR64RegClassID:
+ return load ? X86::MOVSDrm : X86::MOVSDmr;
+ case X86::VR128RegClassID:
// If stack is realigned we can use aligned stores.
- Opc = isStackAligned ? X86::MOVAPSmr : X86::MOVUPSmr;
- } else if (RC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64mr;
- } else {
- llvm_unreachable("Unknown regclass");
+ if (isStackAligned)
+ return load ? X86::MOVAPSrm : X86::MOVAPSmr;
+ else
+ return load ? X86::MOVUPSrm : X86::MOVUPSmr;
+ case X86::VR64RegClassID:
+ return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
}
+}
+
+static unsigned getStoreRegOpcode(unsigned SrcReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ TargetMachine &TM) {
+ return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, TM, false);
+}
+
- return Opc;
+static unsigned getLoadRegOpcode(unsigned DestReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ const TargetMachine &TM) {
+ return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, TM, true);
}
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIdx,
- const TargetRegisterClass *RC) const {
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
+ assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() &&
+ "Stack slot too small for store");
bool isAligned = (RI.getStackAlignment() >= 16) || RI.canRealignStack(MF);
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
DebugLoc DL = MBB.findDebugLoc(MI);
@@ -2079,9 +2045,9 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- bool isAligned = (*MMOBegin)->getAlignment() >= 16;
+ bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= 16;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
- DebugLoc DL = DebugLoc::getUnknownLoc();
+ DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.addOperand(Addr[i]);
@@ -2090,73 +2056,12 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
NewMIs.push_back(MIB);
}
-static unsigned getLoadRegOpcode(unsigned DestReg,
- const TargetRegisterClass *RC,
- bool isStackAligned,
- const TargetMachine &TM) {
- unsigned Opc = 0;
- if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16RegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8RegClass) {
- // Copying to or from a physical H register on x86-64 requires a NOREX
- // move. Otherwise use a normal move.
- if (isHReg(DestReg) &&
- TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rm_NOREX;
- else
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8_ABCD_LRegClass) {
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR8_ABCD_HRegClass) {
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rm_NOREX;
- else
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_NOREXRegClass ||
- RC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8_NOREXRegClass) {
- Opc = X86::MOV8rm;
- } else if (RC == &X86::RFP80RegClass) {
- Opc = X86::LD_Fp80m;
- } else if (RC == &X86::RFP64RegClass) {
- Opc = X86::LD_Fp64m;
- } else if (RC == &X86::RFP32RegClass) {
- Opc = X86::LD_Fp32m;
- } else if (RC == &X86::FR32RegClass) {
- Opc = X86::MOVSSrm;
- } else if (RC == &X86::FR64RegClass) {
- Opc = X86::MOVSDrm;
- } else if (RC == &X86::VR128RegClass) {
- // If stack is realigned we can use aligned loads.
- Opc = isStackAligned ? X86::MOVAPSrm : X86::MOVUPSrm;
- } else if (RC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64rm;
- } else {
- llvm_unreachable("Unknown regclass");
- }
-
- return Opc;
-}
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC) const{
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
bool isAligned = (RI.getStackAlignment() >= 16) || RI.canRealignStack(MF);
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
@@ -2170,9 +2075,9 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- bool isAligned = (*MMOBegin)->getAlignment() >= 16;
+ bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= 16;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
- DebugLoc DL = DebugLoc::getUnknownLoc();
+ DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.addOperand(Addr[i]);
@@ -2182,7 +2087,8 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const {
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
if (CSI.empty())
return false;
@@ -2200,17 +2106,18 @@ bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
- const TargetRegisterClass *RegClass = CSI[i-1].getRegClass();
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
if (Reg == FPReg)
// X86RegisterInfo::emitPrologue will handle spilling of frame register.
continue;
- if (RegClass != &X86::VR128RegClass && !isWin64) {
+ if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
CalleeFrameSize += SlotSize;
BuildMI(MBB, MI, DL, get(Opc)).addReg(Reg, RegState::Kill);
} else {
- storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(), RegClass);
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
+ RC, &RI);
}
}
@@ -2220,7 +2127,8 @@ bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const {
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
if (CSI.empty())
return false;
@@ -2236,16 +2144,30 @@ bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
if (Reg == FPReg)
// X86RegisterInfo::emitEpilogue will handle restoring of frame register.
continue;
- const TargetRegisterClass *RegClass = CSI[i].getRegClass();
- if (RegClass != &X86::VR128RegClass && !isWin64) {
+ if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
BuildMI(MBB, MI, DL, get(Opc), Reg);
} else {
- loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RegClass);
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
+ RC, &RI);
}
}
return true;
}
+MachineInstr*
+X86InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
+ int FrameIx, uint64_t Offset,
+ const MDNode *MDPtr,
+ DebugLoc DL) const {
+ X86AddressMode AM;
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = FrameIx;
+ MachineInstrBuilder MIB = BuildMI(MF, DL, get(X86::DBG_VALUE));
+ addFullAddress(MIB, AM).addImm(Offset).addMetadata(MDPtr);
+ return &*MIB;
+}
+
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
const SmallVectorImpl<MachineOperand> &MOs,
MachineInstr *MI,
@@ -2391,16 +2313,16 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
unsigned DstReg = NewMI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
NewMI->getOperand(0).setReg(RI.getSubReg(DstReg,
- 4/*x86_subreg_32bit*/));
+ X86::sub_32bit));
else
- NewMI->getOperand(0).setSubReg(4/*x86_subreg_32bit*/);
+ NewMI->getOperand(0).setSubReg(X86::sub_32bit);
}
return NewMI;
}
}
// No fusion
- if (PrintFailedFusing)
+ if (PrintFailedFusing && !MI->isCopy())
dbgs() << "We failed to fuse operand " << i << " in " << *MI;
return NULL;
}
@@ -2439,9 +2361,9 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
switch (MI->getOpcode()) {
default: return NULL;
case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
- case X86::TEST16rr: NewOpc = X86::CMP16ri; RCSize = 2; break;
- case X86::TEST32rr: NewOpc = X86::CMP32ri; RCSize = 4; break;
- case X86::TEST64rr: NewOpc = X86::CMP64ri32; RCSize = 8; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
}
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -2488,8 +2410,17 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
Alignment = (*LoadMI->memoperands_begin())->getAlignment();
else
switch (LoadMI->getOpcode()) {
- case X86::V_SET0:
+ case X86::AVX_SET0PSY:
+ case X86::AVX_SET0PDY:
+ Alignment = 32;
+ break;
+ case X86::V_SET0PS:
+ case X86::V_SET0PD:
+ case X86::V_SET0PI:
case X86::V_SETALLONES:
+ case X86::AVX_SET0PS:
+ case X86::AVX_SET0PD:
+ case X86::AVX_SET0PI:
Alignment = 16;
break;
case X86::FsFLD0SD:
@@ -2506,9 +2437,9 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
switch (MI->getOpcode()) {
default: return NULL;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
- case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
- case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
- case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
+ case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
+ case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
+ case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
}
// Change to CMPXXri r, 0 first.
MI->setDesc(get(NewOpc));
@@ -2516,13 +2447,25 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
} else if (Ops.size() != 1)
return NULL;
- SmallVector<MachineOperand,X86AddrNumOperands> MOs;
+ // Make sure the subregisters match.
+ // Otherwise we risk changing the size of the load.
+ if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg())
+ return NULL;
+
+ SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
switch (LoadMI->getOpcode()) {
- case X86::V_SET0:
+ case X86::V_SET0PS:
+ case X86::V_SET0PD:
+ case X86::V_SET0PI:
case X86::V_SETALLONES:
+ case X86::AVX_SET0PS:
+ case X86::AVX_SET0PD:
+ case X86::AVX_SET0PI:
+ case X86::AVX_SET0PSY:
+ case X86::AVX_SET0PDY:
case X86::FsFLD0SD:
case X86::FsFLD0SS: {
- // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
+ // Folding a V_SET0P? or V_SETALLONES as a load, to ease register pressure.
// Create a constant-pool entry and operands to load from it.
// Medium and large mode can't fold loads this way.
@@ -2536,7 +2479,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
if (TM.getSubtarget<X86Subtarget>().is64Bit())
PICBase = X86::RIP;
else
- // FIXME: PICBase = TM.getInstrInfo()->getGlobalBaseReg(&MF);
+ // FIXME: PICBase = getGlobalBaseReg(&MF);
// This doesn't work for several reasons.
// 1. GlobalBaseReg may have been spilled.
// 2. It may not be live at MI.
@@ -2546,13 +2489,16 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Create a constant-pool entry.
MachineConstantPool &MCP = *MF.getConstantPool();
const Type *Ty;
- if (LoadMI->getOpcode() == X86::FsFLD0SS)
+ unsigned Opc = LoadMI->getOpcode();
+ if (Opc == X86::FsFLD0SS)
Ty = Type::getFloatTy(MF.getFunction()->getContext());
- else if (LoadMI->getOpcode() == X86::FsFLD0SD)
+ else if (Opc == X86::FsFLD0SD)
Ty = Type::getDoubleTy(MF.getFunction()->getContext());
+ else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY)
+ Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8);
else
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
- Constant *C = LoadMI->getOpcode() == X86::V_SETALLONES ?
+ const Constant *C = LoadMI->getOpcode() == X86::V_SETALLONES ?
Constant::getAllOnesValue(Ty) :
Constant::getNullValue(Ty);
unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
@@ -2568,7 +2514,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
default: {
// Folding a normal load. Just copy the load's address operands.
unsigned NumOps = LoadMI->getDesc().getNumOperands();
- for (unsigned i = NumOps - X86AddrNumOperands; i != NumOps; ++i)
+ for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
MOs.push_back(LoadMI->getOperand(i));
break;
}
@@ -2631,7 +2577,7 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
if (I != OpcodeTablePtr->end())
return true;
}
- return false;
+ return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
}
bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
@@ -2655,13 +2601,20 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
const TargetInstrDesc &TID = get(Opc);
const TargetOperandInfo &TOI = TID.OpInfo[Index];
const TargetRegisterClass *RC = TOI.getRegClass(&RI);
- SmallVector<MachineOperand, X86AddrNumOperands> AddrOps;
+ if (!MI->hasOneMemOperand() &&
+ RC == &X86::VR128RegClass &&
+ !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
+ // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
+ // conservatively assume the address is unaligned. That's bad for
+ // performance.
+ return false;
+ SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
SmallVector<MachineOperand,2> BeforeOps;
SmallVector<MachineOperand,2> AfterOps;
SmallVector<MachineOperand,4> ImpOps;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
- if (i >= Index && i < Index + X86AddrNumOperands)
+ if (i >= Index && i < Index + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (Op.isReg() && Op.isImplicit())
ImpOps.push_back(Op);
@@ -2680,7 +2633,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
- for (unsigned i = 1; i != 1 + X86AddrNumOperands; ++i) {
+ for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
MachineOperand &MO = NewMIs[0]->getOperand(i);
if (MO.isReg())
MO.setIsKill(false);
@@ -2714,16 +2667,22 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
switch (DataMI->getOpcode()) {
default: break;
case X86::CMP64ri32:
+ case X86::CMP64ri8:
case X86::CMP32ri:
+ case X86::CMP32ri8:
case X86::CMP16ri:
+ case X86::CMP16ri8:
case X86::CMP8ri: {
MachineOperand &MO0 = DataMI->getOperand(0);
MachineOperand &MO1 = DataMI->getOperand(1);
if (MO1.getImm() == 0) {
switch (DataMI->getOpcode()) {
default: break;
+ case X86::CMP64ri8:
case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
+ case X86::CMP32ri8:
case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
+ case X86::CMP16ri8:
case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
}
@@ -2771,7 +2730,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
unsigned NumOps = N->getNumOperands();
for (unsigned i = 0; i != NumOps-1; ++i) {
SDValue Op = N->getOperand(i);
- if (i >= Index-NumDefs && i < Index-NumDefs + X86AddrNumOperands)
+ if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (i < Index-NumDefs)
BeforeOps.push_back(Op);
@@ -2790,7 +2749,12 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
MachineInstr::mmo_iterator> MMOs =
MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
cast<MachineSDNode>(N)->memoperands_end());
- bool isAligned = (*MMOs.first)->getAlignment() >= 16;
+ if (!(*MMOs.first) &&
+ RC == &X86::VR128RegClass &&
+ !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
+ // Do not introduce a slow unaligned load.
+ return false;
+ bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= 16;
Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
VT, MVT::Other, &AddrOps[0], AddrOps.size());
NewNodes.push_back(Load);
@@ -2827,7 +2791,12 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
MachineInstr::mmo_iterator> MMOs =
MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
cast<MachineSDNode>(N)->memoperands_end());
- bool isAligned = (*MMOs.first)->getAlignment() >= 16;
+ if (!(*MMOs.first) &&
+ RC == &X86::VR128RegClass &&
+ !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
+ // Do not introduce a slow unaligned store.
+ return false;
+ bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= 16;
SDNode *Store = DAG.getMachineNode(getStoreRegOpcode(0, DstRC,
isAligned, TM),
dl, MVT::Other,
@@ -2927,10 +2896,6 @@ X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
Load1->getOperand(2) == Load2->getOperand(2)) {
if (cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue() != 1)
return false;
- SDValue Op2 = Load1->getOperand(2);
- if (!isa<RegisterSDNode>(Op2) ||
- cast<RegisterSDNode>(Op2)->getReg() != 0)
- return 0;
// Now let's examine the displacements.
if (isa<ConstantSDNode>(Load1->getOperand(3)) &&
@@ -2967,16 +2932,16 @@ bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
EVT VT = Load1->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
- default: {
+ default:
// XMM registers. In 64-bit mode we can be a bit more aggressive since we
// have 16 of them to play with.
if (TM.getSubtargetImpl()->is64Bit()) {
if (NumLoads >= 3)
return false;
- } else if (NumLoads)
+ } else if (NumLoads) {
return false;
+ }
break;
- }
case MVT::i8:
case MVT::i16:
case MVT::i32:
@@ -2985,6 +2950,7 @@ bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
case MVT::f64:
if (NumLoads)
return false;
+ break;
}
return true;
@@ -3025,609 +2991,164 @@ bool X86InstrInfo::isX86_64ExtendedReg(unsigned RegNo) {
case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
+ case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
+ case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
return true;
}
return false;
}
-
-/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64
-/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
-/// size, and 3) use of X86-64 extended registers.
-unsigned X86InstrInfo::determineREX(const MachineInstr &MI) {
- unsigned REX = 0;
- const TargetInstrDesc &Desc = MI.getDesc();
-
- // Pseudo instructions do not need REX prefix byte.
- if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo)
- return 0;
- if (Desc.TSFlags & X86II::REX_W)
- REX |= 1 << 3;
-
- unsigned NumOps = Desc.getNumOperands();
- if (NumOps) {
- bool isTwoAddr = NumOps > 1 &&
- Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
-
- // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
- unsigned i = isTwoAddr ? 1 : 0;
- for (unsigned e = NumOps; i != e; ++i) {
- const MachineOperand& MO = MI.getOperand(i);
- if (MO.isReg()) {
- unsigned Reg = MO.getReg();
- if (isX86_64NonExtLowByteReg(Reg))
- REX |= 0x40;
- }
- }
-
- switch (Desc.TSFlags & X86II::FormMask) {
- case X86II::MRMInitReg:
- if (isX86_64ExtendedReg(MI.getOperand(0)))
- REX |= (1 << 0) | (1 << 2);
- break;
- case X86II::MRMSrcReg: {
- if (isX86_64ExtendedReg(MI.getOperand(0)))
- REX |= 1 << 2;
- i = isTwoAddr ? 2 : 1;
- for (unsigned e = NumOps; i != e; ++i) {
- const MachineOperand& MO = MI.getOperand(i);
- if (isX86_64ExtendedReg(MO))
- REX |= 1 << 0;
- }
- break;
- }
- case X86II::MRMSrcMem: {
- if (isX86_64ExtendedReg(MI.getOperand(0)))
- REX |= 1 << 2;
- unsigned Bit = 0;
- i = isTwoAddr ? 2 : 1;
- for (; i != NumOps; ++i) {
- const MachineOperand& MO = MI.getOperand(i);
- if (MO.isReg()) {
- if (isX86_64ExtendedReg(MO))
- REX |= 1 << Bit;
- Bit++;
- }
- }
- break;
- }
- case X86II::MRM0m: case X86II::MRM1m:
- case X86II::MRM2m: case X86II::MRM3m:
- case X86II::MRM4m: case X86II::MRM5m:
- case X86II::MRM6m: case X86II::MRM7m:
- case X86II::MRMDestMem: {
- unsigned e = (isTwoAddr ? X86AddrNumOperands+1 : X86AddrNumOperands);
- i = isTwoAddr ? 1 : 0;
- if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e)))
- REX |= 1 << 2;
- unsigned Bit = 0;
- for (; i != e; ++i) {
- const MachineOperand& MO = MI.getOperand(i);
- if (MO.isReg()) {
- if (isX86_64ExtendedReg(MO))
- REX |= 1 << Bit;
- Bit++;
- }
- }
- break;
- }
- default: {
- if (isX86_64ExtendedReg(MI.getOperand(0)))
- REX |= 1 << 0;
- i = isTwoAddr ? 2 : 1;
- for (unsigned e = NumOps; i != e; ++i) {
- const MachineOperand& MO = MI.getOperand(i);
- if (isX86_64ExtendedReg(MO))
- REX |= 1 << 2;
- }
- break;
- }
- }
- }
- return REX;
-}
-
-/// sizePCRelativeBlockAddress - This method returns the size of a PC
-/// relative block address instruction
-///
-static unsigned sizePCRelativeBlockAddress() {
- return 4;
-}
-
-/// sizeGlobalAddress - Give the size of the emission of this global address
+/// getGlobalBaseReg - Return a virtual register initialized with the
+/// the global base register value. Output instructions required to
+/// initialize the register in the function entry block, if necessary.
///
-static unsigned sizeGlobalAddress(bool dword) {
- return dword ? 8 : 4;
-}
-
-/// sizeConstPoolAddress - Give the size of the emission of this constant
-/// pool address
+/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
///
-static unsigned sizeConstPoolAddress(bool dword) {
- return dword ? 8 : 4;
-}
+unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
+ assert(!TM.getSubtarget<X86Subtarget>().is64Bit() &&
+ "X86-64 PIC uses RIP relative addressing");
-/// sizeExternalSymbolAddress - Give the size of the emission of this external
-/// symbol
-///
-static unsigned sizeExternalSymbolAddress(bool dword) {
- return dword ? 8 : 4;
-}
+ X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
+ unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
+ if (GlobalBaseReg != 0)
+ return GlobalBaseReg;
-/// sizeJumpTableAddress - Give the size of the emission of this jump
-/// table address
-///
-static unsigned sizeJumpTableAddress(bool dword) {
- return dword ? 8 : 4;
+ // Create the register. The code to initialize it is inserted
+ // later, by the CGBR pass (below).
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+ GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
+ X86FI->setGlobalBaseReg(GlobalBaseReg);
+ return GlobalBaseReg;
}
-static unsigned sizeConstant(unsigned Size) {
- return Size;
+// These are the replaceable SSE instructions. Some of these have Int variants
+// that we don't include here. We don't want to replace instructions selected
+// by intrinsics.
+static const unsigned ReplaceableInstrs[][3] = {
+ //PackedSingle PackedDouble PackedInt
+ { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr },
+ { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm },
+ { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr },
+ { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
+ { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
+ { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
+ { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
+ { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
+ { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm },
+ { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr },
+ { X86::ORPSrm, X86::ORPDrm, X86::PORrm },
+ { X86::ORPSrr, X86::ORPDrr, X86::PORrr },
+ { X86::V_SET0PS, X86::V_SET0PD, X86::V_SET0PI },
+ { X86::XORPSrm, X86::XORPDrm, X86::PXORrm },
+ { X86::XORPSrr, X86::XORPDrr, X86::PXORrr },
+ // AVX 128-bit support
+ { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr },
+ { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm },
+ { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr },
+ { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
+ { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
+ { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
+ { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
+ { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
+ { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm },
+ { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr },
+ { X86::VORPSrm, X86::VORPDrm, X86::VPORrm },
+ { X86::VORPSrr, X86::VORPDrr, X86::VPORrr },
+ { X86::AVX_SET0PS, X86::AVX_SET0PD, X86::AVX_SET0PI },
+ { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm },
+ { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr },
+};
+
+// FIXME: Some shuffle and unpack instructions have equivalents in different
+// domains, but they require a bit more work than just switching opcodes.
+
+static const unsigned *lookup(unsigned opcode, unsigned domain) {
+ for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i)
+ if (ReplaceableInstrs[i][domain-1] == opcode)
+ return ReplaceableInstrs[i];
+ return 0;
}
-static unsigned sizeRegModRMByte(){
- return 1;
+std::pair<uint16_t, uint16_t>
+X86InstrInfo::GetSSEDomain(const MachineInstr *MI) const {
+ uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
+ return std::make_pair(domain,
+ domain && lookup(MI->getOpcode(), domain) ? 0xe : 0);
}
-static unsigned sizeSIBByte(){
- return 1;
+void X86InstrInfo::SetSSEDomain(MachineInstr *MI, unsigned Domain) const {
+ assert(Domain>0 && Domain<4 && "Invalid execution domain");
+ uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
+ assert(dom && "Not an SSE instruction");
+ const unsigned *table = lookup(MI->getOpcode(), dom);
+ assert(table && "Cannot change domain");
+ MI->setDesc(get(table[Domain-1]));
}
-static unsigned getDisplacementFieldSize(const MachineOperand *RelocOp) {
- unsigned FinalSize = 0;
- // If this is a simple integer displacement that doesn't require a relocation.
- if (!RelocOp) {
- FinalSize += sizeConstant(4);
- return FinalSize;
- }
-
- // Otherwise, this is something that requires a relocation.
- if (RelocOp->isGlobal()) {
- FinalSize += sizeGlobalAddress(false);
- } else if (RelocOp->isCPI()) {
- FinalSize += sizeConstPoolAddress(false);
- } else if (RelocOp->isJTI()) {
- FinalSize += sizeJumpTableAddress(false);
- } else {
- llvm_unreachable("Unknown value to relocate!");
- }
- return FinalSize;
+/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
+void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
+ NopInst.setOpcode(X86::NOOP);
}
-static unsigned getMemModRMByteSize(const MachineInstr &MI, unsigned Op,
- bool IsPIC, bool Is64BitMode) {
- const MachineOperand &Op3 = MI.getOperand(Op+3);
- int DispVal = 0;
- const MachineOperand *DispForReloc = 0;
- unsigned FinalSize = 0;
-
- // Figure out what sort of displacement we have to handle here.
- if (Op3.isGlobal()) {
- DispForReloc = &Op3;
- } else if (Op3.isCPI()) {
- if (Is64BitMode || IsPIC) {
- DispForReloc = &Op3;
- } else {
- DispVal = 1;
- }
- } else if (Op3.isJTI()) {
- if (Is64BitMode || IsPIC) {
- DispForReloc = &Op3;
- } else {
- DispVal = 1;
- }
- } else {
- DispVal = 1;
- }
+namespace {
+ /// CGBR - Create Global Base Reg pass. This initializes the PIC
+ /// global base register for x86-32.
+ struct CGBR : public MachineFunctionPass {
+ static char ID;
+ CGBR() : MachineFunctionPass(ID) {}
- const MachineOperand &Base = MI.getOperand(Op);
- const MachineOperand &IndexReg = MI.getOperand(Op+2);
+ virtual bool runOnMachineFunction(MachineFunction &MF) {
+ const X86TargetMachine *TM =
+ static_cast<const X86TargetMachine *>(&MF.getTarget());
- unsigned BaseReg = Base.getReg();
+ assert(!TM->getSubtarget<X86Subtarget>().is64Bit() &&
+ "X86-64 PIC uses RIP relative addressing");
- // Is a SIB byte needed?
- if ((!Is64BitMode || DispForReloc || BaseReg != 0) &&
- IndexReg.getReg() == 0 &&
- (BaseReg == 0 || X86RegisterInfo::getX86RegNum(BaseReg) != N86::ESP)) {
- if (BaseReg == 0) { // Just a displacement?
- // Emit special case [disp32] encoding
- ++FinalSize;
- FinalSize += getDisplacementFieldSize(DispForReloc);
- } else {
- unsigned BaseRegNo = X86RegisterInfo::getX86RegNum(BaseReg);
- if (!DispForReloc && DispVal == 0 && BaseRegNo != N86::EBP) {
- // Emit simple indirect register encoding... [EAX] f.e.
- ++FinalSize;
- // Be pessimistic and assume it's a disp32, not a disp8
- } else {
- // Emit the most general non-SIB encoding: [REG+disp32]
- ++FinalSize;
- FinalSize += getDisplacementFieldSize(DispForReloc);
- }
- }
-
- } else { // We need a SIB byte, so start by outputting the ModR/M byte first
- assert(IndexReg.getReg() != X86::ESP &&
- IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
-
- bool ForceDisp32 = false;
- if (BaseReg == 0 || DispForReloc) {
- // Emit the normal disp32 encoding.
- ++FinalSize;
- ForceDisp32 = true;
- } else {
- ++FinalSize;
- }
-
- FinalSize += sizeSIBByte();
-
- // Do we need to output a displacement?
- if (DispVal != 0 || ForceDisp32) {
- FinalSize += getDisplacementFieldSize(DispForReloc);
- }
- }
- return FinalSize;
-}
+ // Only emit a global base reg in PIC mode.
+ if (TM->getRelocationModel() != Reloc::PIC_)
+ return false;
+ // Insert the set of GlobalBaseReg into the first MBB of the function
+ MachineBasicBlock &FirstMBB = MF.front();
+ MachineBasicBlock::iterator MBBI = FirstMBB.begin();
+ DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ const X86InstrInfo *TII = TM->getInstrInfo();
-static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
- const TargetInstrDesc *Desc,
- bool IsPIC, bool Is64BitMode) {
+ unsigned PC;
+ if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT())
+ PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
+ else
+ PC = TII->getGlobalBaseReg(&MF);
- unsigned Opcode = Desc->Opcode;
- unsigned FinalSize = 0;
-
- // Emit the lock opcode prefix as needed.
- if (Desc->TSFlags & X86II::LOCK) ++FinalSize;
-
- // Emit segment override opcode prefix as needed.
- switch (Desc->TSFlags & X86II::SegOvrMask) {
- case X86II::FS:
- case X86II::GS:
- ++FinalSize;
- break;
- default: llvm_unreachable("Invalid segment!");
- case 0: break; // No segment override!
- }
-
- // Emit the repeat opcode prefix as needed.
- if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP) ++FinalSize;
-
- // Emit the operand size opcode prefix as needed.
- if (Desc->TSFlags & X86II::OpSize) ++FinalSize;
-
- // Emit the address size opcode prefix as needed.
- if (Desc->TSFlags & X86II::AdSize) ++FinalSize;
-
- bool Need0FPrefix = false;
- switch (Desc->TSFlags & X86II::Op0Mask) {
- case X86II::TB: // Two-byte opcode prefix
- case X86II::T8: // 0F 38
- case X86II::TA: // 0F 3A
- Need0FPrefix = true;
- break;
- case X86II::TF: // F2 0F 38
- ++FinalSize;
- Need0FPrefix = true;
- break;
- case X86II::REP: break; // already handled.
- case X86II::XS: // F3 0F
- ++FinalSize;
- Need0FPrefix = true;
- break;
- case X86II::XD: // F2 0F
- ++FinalSize;
- Need0FPrefix = true;
- break;
- case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB:
- case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF:
- ++FinalSize;
- break; // Two-byte opcode prefix
- default: llvm_unreachable("Invalid prefix!");
- case 0: break; // No prefix!
- }
-
- if (Is64BitMode) {
- // REX prefix
- unsigned REX = X86InstrInfo::determineREX(MI);
- if (REX)
- ++FinalSize;
- }
-
- // 0x0F escape code must be emitted just before the opcode.
- if (Need0FPrefix)
- ++FinalSize;
-
- switch (Desc->TSFlags & X86II::Op0Mask) {
- case X86II::T8: // 0F 38
- ++FinalSize;
- break;
- case X86II::TA: // 0F 3A
- ++FinalSize;
- break;
- case X86II::TF: // F2 0F 38
- ++FinalSize;
- break;
- }
-
- // If this is a two-address instruction, skip one of the register operands.
- unsigned NumOps = Desc->getNumOperands();
- unsigned CurOp = 0;
- if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1)
- CurOp++;
- else if (NumOps > 2 && Desc->getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0)
- // Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
- --NumOps;
-
- switch (Desc->TSFlags & X86II::FormMask) {
- default: llvm_unreachable("Unknown FormMask value in X86 MachineCodeEmitter!");
- case X86II::Pseudo:
- // Remember the current PC offset, this is the PIC relocation
- // base address.
- switch (Opcode) {
- default:
- break;
- case TargetOpcode::INLINEASM: {
- const MachineFunction *MF = MI.getParent()->getParent();
- const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo();
- FinalSize += TII.getInlineAsmLength(MI.getOperand(0).getSymbolName(),
- *MF->getTarget().getMCAsmInfo());
- break;
- }
- case TargetOpcode::DBG_LABEL:
- case TargetOpcode::EH_LABEL:
- break;
- case TargetOpcode::IMPLICIT_DEF:
- case TargetOpcode::KILL:
- case X86::FP_REG_KILL:
- break;
- case X86::MOVPC32r: {
- // This emits the "call" portion of this pseudo instruction.
- ++FinalSize;
- FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
- break;
- }
- }
- CurOp = NumOps;
- break;
- case X86II::RawFrm:
- ++FinalSize;
-
- if (CurOp != NumOps) {
- const MachineOperand &MO = MI.getOperand(CurOp++);
- if (MO.isMBB()) {
- FinalSize += sizePCRelativeBlockAddress();
- } else if (MO.isGlobal()) {
- FinalSize += sizeGlobalAddress(false);
- } else if (MO.isSymbol()) {
- FinalSize += sizeExternalSymbolAddress(false);
- } else if (MO.isImm()) {
- FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
- } else {
- llvm_unreachable("Unknown RawFrm operand!");
- }
- }
- break;
-
- case X86II::AddRegFrm:
- ++FinalSize;
- ++CurOp;
-
- if (CurOp != NumOps) {
- const MachineOperand &MO1 = MI.getOperand(CurOp++);
- unsigned Size = X86II::getSizeOfImm(Desc->TSFlags);
- if (MO1.isImm())
- FinalSize += sizeConstant(Size);
- else {
- bool dword = false;
- if (Opcode == X86::MOV64ri)
- dword = true;
- if (MO1.isGlobal()) {
- FinalSize += sizeGlobalAddress(dword);
- } else if (MO1.isSymbol())
- FinalSize += sizeExternalSymbolAddress(dword);
- else if (MO1.isCPI())
- FinalSize += sizeConstPoolAddress(dword);
- else if (MO1.isJTI())
- FinalSize += sizeJumpTableAddress(dword);
+ // Operand of MovePCtoStack is completely ignored by asm printer. It's
+ // only used in JIT code emission as displacement to pc.
+ BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
+
+ // If we're using vanilla 'GOT' PIC style, we should use relative addressing
+ // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
+ if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) {
+ unsigned GlobalBaseReg = TII->getGlobalBaseReg(&MF);
+ // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
+ BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
+ .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
+ X86II::MO_GOT_ABSOLUTE_ADDRESS);
}
- }
- break;
-
- case X86II::MRMDestReg: {
- ++FinalSize;
- FinalSize += sizeRegModRMByte();
- CurOp += 2;
- if (CurOp != NumOps) {
- ++CurOp;
- FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
- }
- break;
- }
- case X86II::MRMDestMem: {
- ++FinalSize;
- FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
- CurOp += X86AddrNumOperands + 1;
- if (CurOp != NumOps) {
- ++CurOp;
- FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
- }
- break;
- }
-
- case X86II::MRMSrcReg:
- ++FinalSize;
- FinalSize += sizeRegModRMByte();
- CurOp += 2;
- if (CurOp != NumOps) {
- ++CurOp;
- FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
- }
- break;
-
- case X86II::MRMSrcMem: {
- int AddrOperands;
- if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
- Opcode == X86::LEA16r || Opcode == X86::LEA32r)
- AddrOperands = X86AddrNumOperands - 1; // No segment register
- else
- AddrOperands = X86AddrNumOperands;
-
- ++FinalSize;
- FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode);
- CurOp += AddrOperands + 1;
- if (CurOp != NumOps) {
- ++CurOp;
- FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
- }
- break;
- }
- case X86II::MRM0r: case X86II::MRM1r:
- case X86II::MRM2r: case X86II::MRM3r:
- case X86II::MRM4r: case X86II::MRM5r:
- case X86II::MRM6r: case X86II::MRM7r:
- ++FinalSize;
- if (Desc->getOpcode() == X86::LFENCE ||
- Desc->getOpcode() == X86::MFENCE) {
- // Special handling of lfence and mfence;
- FinalSize += sizeRegModRMByte();
- } else if (Desc->getOpcode() == X86::MONITOR ||
- Desc->getOpcode() == X86::MWAIT) {
- // Special handling of monitor and mwait.
- FinalSize += sizeRegModRMByte() + 1; // +1 for the opcode.
- } else {
- ++CurOp;
- FinalSize += sizeRegModRMByte();
+ return true;
}
- if (CurOp != NumOps) {
- const MachineOperand &MO1 = MI.getOperand(CurOp++);
- unsigned Size = X86II::getSizeOfImm(Desc->TSFlags);
- if (MO1.isImm())
- FinalSize += sizeConstant(Size);
- else {
- bool dword = false;
- if (Opcode == X86::MOV64ri32)
- dword = true;
- if (MO1.isGlobal()) {
- FinalSize += sizeGlobalAddress(dword);
- } else if (MO1.isSymbol())
- FinalSize += sizeExternalSymbolAddress(dword);
- else if (MO1.isCPI())
- FinalSize += sizeConstPoolAddress(dword);
- else if (MO1.isJTI())
- FinalSize += sizeJumpTableAddress(dword);
- }
+ virtual const char *getPassName() const {
+ return "X86 PIC Global Base Reg Initialization";
}
- break;
- case X86II::MRM0m: case X86II::MRM1m:
- case X86II::MRM2m: case X86II::MRM3m:
- case X86II::MRM4m: case X86II::MRM5m:
- case X86II::MRM6m: case X86II::MRM7m: {
-
- ++FinalSize;
- FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
- CurOp += X86AddrNumOperands;
-
- if (CurOp != NumOps) {
- const MachineOperand &MO = MI.getOperand(CurOp++);
- unsigned Size = X86II::getSizeOfImm(Desc->TSFlags);
- if (MO.isImm())
- FinalSize += sizeConstant(Size);
- else {
- bool dword = false;
- if (Opcode == X86::MOV64mi32)
- dword = true;
- if (MO.isGlobal()) {
- FinalSize += sizeGlobalAddress(dword);
- } else if (MO.isSymbol())
- FinalSize += sizeExternalSymbolAddress(dword);
- else if (MO.isCPI())
- FinalSize += sizeConstPoolAddress(dword);
- else if (MO.isJTI())
- FinalSize += sizeJumpTableAddress(dword);
- }
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
}
- break;
-
- case X86II::MRM_C1:
- case X86II::MRM_C8:
- case X86II::MRM_C9:
- case X86II::MRM_E8:
- case X86II::MRM_F0:
- FinalSize += 2;
- break;
- }
-
- case X86II::MRMInitReg:
- ++FinalSize;
- // Duplicate register, used by things like MOV8r0 (aka xor reg,reg).
- FinalSize += sizeRegModRMByte();
- ++CurOp;
- break;
- }
-
- if (!Desc->isVariadic() && CurOp != NumOps) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Cannot determine size: " << MI;
- llvm_report_error(Msg.str());
- }
-
-
- return FinalSize;
-}
-
-
-unsigned X86InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
- const TargetInstrDesc &Desc = MI->getDesc();
- bool IsPIC = TM.getRelocationModel() == Reloc::PIC_;
- bool Is64BitMode = TM.getSubtargetImpl()->is64Bit();
- unsigned Size = GetInstSizeWithDesc(*MI, &Desc, IsPIC, Is64BitMode);
- if (Desc.getOpcode() == X86::MOVPC32r)
- Size += GetInstSizeWithDesc(*MI, &get(X86::POP32r), IsPIC, Is64BitMode);
- return Size;
+ };
}
-/// getGlobalBaseReg - Return a virtual register initialized with the
-/// the global base register value. Output instructions required to
-/// initialize the register in the function entry block, if necessary.
-///
-unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
- assert(!TM.getSubtarget<X86Subtarget>().is64Bit() &&
- "X86-64 PIC uses RIP relative addressing");
-
- X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
- unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
- if (GlobalBaseReg != 0)
- return GlobalBaseReg;
-
- // Insert the set of GlobalBaseReg into the first MBB of the function
- MachineBasicBlock &FirstMBB = MF->front();
- MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
- MachineRegisterInfo &RegInfo = MF->getRegInfo();
- unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
-
- const TargetInstrInfo *TII = TM.getInstrInfo();
- // Operand of MovePCtoStack is completely ignored by asm printer. It's
- // only used in JIT code emission as displacement to pc.
- BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
-
- // If we're using vanilla 'GOT' PIC style, we should use relative addressing
- // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
- if (TM.getSubtarget<X86Subtarget>().isPICStyleGOT()) {
- GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
- // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
- BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
- .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
- X86II::MO_GOT_ABSOLUTE_ADDRESS);
- } else {
- GlobalBaseReg = PC;
- }
-
- X86FI->setGlobalBaseReg(GlobalBaseReg);
- return GlobalBaseReg;
-}
+char CGBR::ID = 0;
+FunctionPass*
+llvm::createGlobalBaseRegPass() { return new CGBR(); }
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.h b/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.h
index 5111719..f336206 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.h
@@ -24,6 +24,24 @@ namespace llvm {
class X86TargetMachine;
namespace X86 {
+ // Enums for memory operand decoding. Each memory operand is represented with
+ // a 5 operand sequence in the form:
+ // [BaseReg, ScaleAmt, IndexReg, Disp, Segment]
+ // These enums help decode this.
+ enum {
+ AddrBaseReg = 0,
+ AddrScaleAmt = 1,
+ AddrIndexReg = 2,
+ AddrDisp = 3,
+
+ /// AddrSegmentReg - The operand # of the segment in the memory operand.
+ AddrSegmentReg = 4,
+
+ /// AddrNumOperands - Total number of operands in a memory reference.
+ AddrNumOperands = 5
+ };
+
+
// X86 specific condition code. These correspond to X86_*_COND in
// X86InstrInfo.td. They must be kept in synch.
enum CondCode {
@@ -173,7 +191,19 @@ namespace X86II {
/// indicates that the reference is actually to "FOO$non_lazy_ptr -PICBASE",
/// which is a PIC-base-relative reference to a hidden dyld lazy pointer
/// stub.
- MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE
+ MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE,
+
+ /// MO_TLVP - On a symbol operand this indicates that the immediate is
+ /// some TLS offset.
+ ///
+ /// This is the TLS offset for the Darwin TLS mechanism.
+ MO_TLVP,
+
+ /// MO_TLVP_PIC_BASE - On a symbol operand this indicates that the immediate
+ /// is some TLS offset from the picbase.
+ ///
+ /// This is the 32-bit TLS offset for Darwin TLS in PIC mode.
+ MO_TLVP_PIC_BASE
};
}
@@ -203,6 +233,7 @@ inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) {
case X86II::MO_PIC_BASE_OFFSET: // Darwin local global.
case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global.
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Darwin/32 hidden global.
+ case X86II::MO_TLVP: // ??? Pretty sure..
return true;
default:
return false;
@@ -280,6 +311,12 @@ namespace X86II {
MRM_F0 = 40,
MRM_F8 = 41,
MRM_F9 = 42,
+
+ /// RawFrmImm16 - This is used for CALL FAR instructions, which have two
+ /// immediates, the first of which is a 16 or 32-bit immediate (specified by
+ /// the imm encoding) and the second is a 16-bit fixed value. In the AMD
+ /// manual, this operand is described as pntr16:32 and pntr16:16
+ RawFrmImm16 = 43,
FormMask = 63,
@@ -347,9 +384,10 @@ namespace X86II {
Imm8 = 1 << ImmShift,
Imm8PCRel = 2 << ImmShift,
Imm16 = 3 << ImmShift,
- Imm32 = 4 << ImmShift,
- Imm32PCRel = 5 << ImmShift,
- Imm64 = 6 << ImmShift,
+ Imm16PCRel = 4 << ImmShift,
+ Imm32 = 5 << ImmShift,
+ Imm32PCRel = 6 << ImmShift,
+ Imm64 = 7 << ImmShift,
//===------------------------------------------------------------------===//
// FP Instruction Classification... Zero is non-fp instruction.
@@ -398,30 +436,58 @@ namespace X86II {
FS = 1 << SegOvrShift,
GS = 2 << SegOvrShift,
- // Bits 22 -> 23 are unused
+ // Execution domain for SSE instructions in bits 22, 23.
+ // 0 in bits 22-23 means normal, non-SSE instruction.
+ SSEDomainShift = 22,
+
OpcodeShift = 24,
- OpcodeMask = 0xFF << OpcodeShift
+ OpcodeMask = 0xFF << OpcodeShift,
+
+ //===------------------------------------------------------------------===//
+ // VEX - The opcode prefix used by AVX instructions
+ VEX = 1U << 0,
+
+ // VEX_W - Has a opcode specific functionality, but is used in the same
+ // way as REX_W is for regular SSE instructions.
+ VEX_W = 1U << 1,
+
+ // VEX_4V - Used to specify an additional AVX/SSE register. Several 2
+ // address instructions in SSE are represented as 3 address ones in AVX
+ // and the additional register is encoded in VEX_VVVV prefix.
+ VEX_4V = 1U << 2,
+
+ // VEX_I8IMM - Specifies that the last register used in a AVX instruction,
+ // must be encoded in the i8 immediate field. This usually happens in
+ // instructions with 4 operands.
+ VEX_I8IMM = 1U << 3,
+
+ // VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
+ // instruction uses 256-bit wide registers. This is usually auto detected if
+ // a VR256 register is used, but some AVX instructions also have this field
+ // marked when using a f256 memory references.
+ VEX_L = 1U << 4
};
// getBaseOpcodeFor - This function returns the "base" X86 opcode for the
// specified machine instruction.
//
- static inline unsigned char getBaseOpcodeFor(unsigned TSFlags) {
+ static inline unsigned char getBaseOpcodeFor(uint64_t TSFlags) {
return TSFlags >> X86II::OpcodeShift;
}
- static inline bool hasImm(unsigned TSFlags) {
+ static inline bool hasImm(uint64_t TSFlags) {
return (TSFlags & X86II::ImmMask) != 0;
}
/// getSizeOfImm - Decode the "size of immediate" field from the TSFlags field
/// of the specified instruction.
- static inline unsigned getSizeOfImm(unsigned TSFlags) {
+ static inline unsigned getSizeOfImm(uint64_t TSFlags) {
switch (TSFlags & X86II::ImmMask) {
default: assert(0 && "Unknown immediate size");
case X86II::Imm8:
case X86II::Imm8PCRel: return 1;
- case X86II::Imm16: return 2;
+ case X86II::Imm16:
+ case X86II::Imm16PCRel: return 2;
case X86II::Imm32:
case X86II::Imm32PCRel: return 4;
case X86II::Imm64: return 8;
@@ -430,23 +496,78 @@ namespace X86II {
/// isImmPCRel - Return true if the immediate of the specified instruction's
/// TSFlags indicates that it is pc relative.
- static inline unsigned isImmPCRel(unsigned TSFlags) {
+ static inline unsigned isImmPCRel(uint64_t TSFlags) {
switch (TSFlags & X86II::ImmMask) {
- default: assert(0 && "Unknown immediate size");
- case X86II::Imm8PCRel:
- case X86II::Imm32PCRel:
- return true;
- case X86II::Imm8:
- case X86II::Imm16:
- case X86II::Imm32:
- case X86II::Imm64:
- return false;
+ default: assert(0 && "Unknown immediate size");
+ case X86II::Imm8PCRel:
+ case X86II::Imm16PCRel:
+ case X86II::Imm32PCRel:
+ return true;
+ case X86II::Imm8:
+ case X86II::Imm16:
+ case X86II::Imm32:
+ case X86II::Imm64:
+ return false;
}
- }
+ }
+
+ /// getMemoryOperandNo - The function returns the MCInst operand # for the
+ /// first field of the memory operand. If the instruction doesn't have a
+ /// memory operand, this returns -1.
+ ///
+ /// Note that this ignores tied operands. If there is a tied register which
+ /// is duplicated in the MCInst (e.g. "EAX = addl EAX, [mem]") it is only
+ /// counted as one operand.
+ ///
+ static inline int getMemoryOperandNo(uint64_t TSFlags) {
+ switch (TSFlags & X86II::FormMask) {
+ case X86II::MRMInitReg: assert(0 && "FIXME: Remove this form");
+ default: assert(0 && "Unknown FormMask value in getMemoryOperandNo!");
+ case X86II::Pseudo:
+ case X86II::RawFrm:
+ case X86II::AddRegFrm:
+ case X86II::MRMDestReg:
+ case X86II::MRMSrcReg:
+ case X86II::RawFrmImm16:
+ return -1;
+ case X86II::MRMDestMem:
+ return 0;
+ case X86II::MRMSrcMem: {
+ bool HasVEX_4V = (TSFlags >> 32) & X86II::VEX_4V;
+ unsigned FirstMemOp = 1;
+ if (HasVEX_4V)
+ ++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV).
+
+ // FIXME: Maybe lea should have its own form? This is a horrible hack.
+ //if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
+ // Opcode == X86::LEA16r || Opcode == X86::LEA32r)
+ return FirstMemOp;
+ }
+ case X86II::MRM0r: case X86II::MRM1r:
+ case X86II::MRM2r: case X86II::MRM3r:
+ case X86II::MRM4r: case X86II::MRM5r:
+ case X86II::MRM6r: case X86II::MRM7r:
+ return -1;
+ case X86II::MRM0m: case X86II::MRM1m:
+ case X86II::MRM2m: case X86II::MRM3m:
+ case X86II::MRM4m: case X86II::MRM5m:
+ case X86II::MRM6m: case X86II::MRM7m:
+ return 0;
+ case X86II::MRM_C1:
+ case X86II::MRM_C2:
+ case X86II::MRM_C3:
+ case X86II::MRM_C4:
+ case X86II::MRM_C8:
+ case X86II::MRM_C9:
+ case X86II::MRM_E8:
+ case X86II::MRM_F0:
+ case X86II::MRM_F8:
+ case X86II::MRM_F9:
+ return -1;
+ }
+ }
}
-const int X86AddrNumOperands = 5;
-
inline static bool isScale(const MachineOperand &MO) {
return MO.isImm() &&
(MO.getImm() == 1 || MO.getImm() == 2 ||
@@ -486,7 +607,7 @@ class X86InstrInfo : public TargetInstrInfoImpl {
/// MemOp2RegOpTable - Load / store unfolding opcode map.
///
DenseMap<unsigned*, std::pair<unsigned, unsigned> > MemOp2RegOpTable;
-
+
public:
explicit X86InstrInfo(X86TargetMachine &tm);
@@ -496,12 +617,6 @@ public:
///
virtual const X86RegisterInfo &getRegisterInfo() const { return RI; }
- /// Return true if the instruction is a register to register move and return
- /// the source and dest operands and their sub-register indices by reference.
- virtual bool isMoveInstr(const MachineInstr &MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
-
/// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
/// extension instruction. That is, it's like a copy where it's legal for the
/// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
@@ -552,7 +667,7 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo &TRI) const;
/// convertToThreeAddress - This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
@@ -582,16 +697,17 @@ public:
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC) const;
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
@@ -603,7 +719,8 @@ public:
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC) const;
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const;
virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
@@ -614,12 +731,20 @@ public:
virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const;
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI) const;
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const;
+ virtual
+ MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
+ int FrameIx, uint64_t Offset,
+ const MDNode *MDPtr,
+ DebugLoc DL) const;
+
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
/// specified operand(s). If this is possible, the target should perform the
@@ -684,6 +809,8 @@ public:
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const;
+ virtual void getNoopForMachoTarget(MCInst &NopInst) const;
+
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
@@ -700,22 +827,24 @@ public:
if (!MO.isReg()) return false;
return isX86_64ExtendedReg(MO.getReg());
}
- static unsigned determineREX(const MachineInstr &MI);
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or
/// higher) register? e.g. r8, xmm8, xmm13, etc.
static bool isX86_64ExtendedReg(unsigned RegNo);
- /// GetInstSize - Returns the size of the specified MachineInstr.
- ///
- virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const;
-
/// getGlobalBaseReg - Return a virtual register initialized with the
/// the global base register value. Output instructions required to
/// initialize the register in the function entry block, if necessary.
///
unsigned getGlobalBaseReg(MachineFunction *MF) const;
+ /// GetSSEDomain - Return the SSE execution domain of MI as the first element,
+ /// and a bitmask of possible arguments to SetSSEDomain ase the second.
+ std::pair<uint16_t, uint16_t> GetSSEDomain(const MachineInstr *MI) const;
+
+ /// SetSSEDomain - Set the SSEDomain of MI.
+ void SetSSEDomain(MachineInstr *MI, unsigned Domain) const;
+
private:
MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,
MachineFunction::iterator &MFI,
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.td b/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.td
index 8a6ff54..09b7721 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrInfo.td
@@ -1,4 +1,4 @@
-
+//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,19 +21,20 @@ def SDTIntShiftDOp: SDTypeProfile<1, 3,
[SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
SDTCisInt<0>, SDTCisInt<3>]>;
-def SDTX86CmpTest : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
+def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisSameAs<1, 2>]>;
def SDTX86Cmov : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
SDTCisVT<3, i8>, SDTCisVT<4, i32>]>;
// Unary and binary operator instructions that set EFLAGS as a side-effect.
-def SDTUnaryArithWithFlags : SDTypeProfile<1, 1,
- [SDTCisInt<0>]>;
-def SDTBinaryArithWithFlags : SDTypeProfile<1, 2,
- [SDTCisSameAs<0, 1>,
- SDTCisSameAs<0, 2>,
- SDTCisInt<0>]>;
+def SDTUnaryArithWithFlags : SDTypeProfile<2, 1,
+ [SDTCisInt<0>, SDTCisVT<1, i32>]>;
+
+def SDTBinaryArithWithFlags : SDTypeProfile<2, 2,
+ [SDTCisSameAs<0, 2>,
+ SDTCisSameAs<0, 3>,
+ SDTCisInt<0>, SDTCisVT<1, i32>]>;
def SDTX86BrCond : SDTypeProfile<0, 3,
[SDTCisVT<0, OtherVT>,
SDTCisVT<1, i8>, SDTCisVT<2, i32>]>;
@@ -71,19 +72,35 @@ def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
+
def SDT_X86SegmentBaseAddress : SDTypeProfile<1, 1, [SDTCisPtrTy<0>]>;
def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>;
-def X86bsf : SDNode<"X86ISD::BSF", SDTIntUnaryOp>;
-def X86bsr : SDNode<"X86ISD::BSR", SDTIntUnaryOp>;
+def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>;
+def SDT_X86MEMBARRIERNoSSE : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+
+def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER,
+ [SDNPHasChain]>;
+def X86MemBarrierNoSSE : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIERNoSSE,
+ [SDNPHasChain]>;
+def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER,
+ [SDNPHasChain]>;
+def X86SFence : SDNode<"X86ISD::SFENCE", SDT_X86MEMBARRIER,
+ [SDNPHasChain]>;
+def X86LFence : SDNode<"X86ISD::LFENCE", SDT_X86MEMBARRIER,
+ [SDNPHasChain]>;
+
+
+def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>;
+def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>;
def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>;
def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
-
def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
@@ -120,12 +137,12 @@ def X86AtomSwap64 : SDNode<"X86ISD::ATOMSWAP64_DAG", SDTX86atomicBinary,
[SDNPHasChain, SDNPMayStore,
SDNPMayLoad, SDNPMemOperand]>;
def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
def X86vastart_save_xmm_regs :
SDNode<"X86ISD::VASTART_SAVE_XMM_REGS",
SDT_X86VASTART_SAVE_XMM_REGS,
- [SDNPHasChain]>;
+ [SDNPHasChain, SDNPVariadic]>;
def X86callseq_start :
SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart,
@@ -135,7 +152,8 @@ def X86callseq_end :
[SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
def X86call : SDNode<"X86ISD::CALL", SDT_X86Call,
- [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag,
+ SDNPVariadic]>;
def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr,
[SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore]>;
@@ -158,7 +176,7 @@ def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
[SDNPHasChain]>;
def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
- [SDNPHasChain, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
@@ -167,6 +185,7 @@ def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags,
[SDNPCommutative]>;
def X86umul_flag : SDNode<"X86ISD::UMUL", SDTUnaryArithWithFlags,
[SDNPCommutative]>;
+
def X86inc_flag : SDNode<"X86ISD::INC", SDTUnaryArithWithFlags>;
def X86dec_flag : SDNode<"X86ISD::DEC", SDTUnaryArithWithFlags>;
def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags,
@@ -180,6 +199,9 @@ def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
def X86MingwAlloca : SDNode<"X86ISD::MINGW_ALLOCA", SDTX86Void,
[SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
+
+def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
+ []>;
//===----------------------------------------------------------------------===//
// X86 Operand Definitions.
@@ -193,15 +215,11 @@ def ptr_rc_nosp : PointerLikeRegClass<1>;
//
def X86MemAsmOperand : AsmOperandClass {
let Name = "Mem";
- let SuperClass = ?;
+ let SuperClasses = [];
}
def X86AbsMemAsmOperand : AsmOperandClass {
let Name = "AbsMem";
- let SuperClass = X86MemAsmOperand;
-}
-def X86NoSegMemAsmOperand : AsmOperandClass {
- let Name = "NoSegMem";
- let SuperClass = X86MemAsmOperand;
+ let SuperClasses = [X86MemAsmOperand];
}
class X86MemOperand<string printMethod> : Operand<iPTR> {
let PrintMethod = printMethod;
@@ -219,12 +237,12 @@ def i16mem : X86MemOperand<"printi16mem">;
def i32mem : X86MemOperand<"printi32mem">;
def i64mem : X86MemOperand<"printi64mem">;
def i128mem : X86MemOperand<"printi128mem">;
-//def i256mem : X86MemOperand<"printi256mem">;
+def i256mem : X86MemOperand<"printi256mem">;
def f32mem : X86MemOperand<"printf32mem">;
def f64mem : X86MemOperand<"printf64mem">;
def f80mem : X86MemOperand<"printf80mem">;
def f128mem : X86MemOperand<"printf128mem">;
-//def f256mem : X86MemOperand<"printf256mem">;
+def f256mem : X86MemOperand<"printf256mem">;
// A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
// plain GR64, so that it doesn't potentially require a REX prefix.
@@ -234,15 +252,20 @@ def i8mem_NOREX : Operand<i64> {
let ParserMatchClass = X86MemAsmOperand;
}
-def lea32mem : Operand<i32> {
- let PrintMethod = "printlea32mem";
- let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
- let ParserMatchClass = X86NoSegMemAsmOperand;
+// Special i32mem for addresses of load folding tail calls. These are not
+// allowed to use callee-saved registers since they must be scheduled
+// after callee-saved register are popped.
+def i32mem_TC : Operand<i32> {
+ let PrintMethod = "printi32mem";
+ let MIOperandInfo = (ops GR32_TC, i8imm, GR32_TC, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
}
+
let ParserMatchClass = X86AbsMemAsmOperand,
PrintMethod = "print_pcrel_imm" in {
def i32imm_pcrel : Operand<i32>;
+def i16imm_pcrel : Operand<i16>;
def offset8 : Operand<i64>;
def offset16 : Operand<i64>;
@@ -259,19 +282,54 @@ def SSECC : Operand<i8> {
let PrintMethod = "printSSECC";
}
-def ImmSExt8AsmOperand : AsmOperandClass {
- let Name = "ImmSExt8";
- let SuperClass = ImmAsmOperand;
+class ImmSExtAsmOperandClass : AsmOperandClass {
+ let SuperClasses = [ImmAsmOperand];
+ let RenderMethod = "addImmOperands";
+}
+
+// Sign-extended immediate classes. We don't need to define the full lattice
+// here because there is no instruction with an ambiguity between ImmSExti64i32
+// and ImmSExti32i8.
+//
+// The strange ranges come from the fact that the assembler always works with
+// 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
+// (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
+
+// [0, 0x7FFFFFFF] |
+// [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
+def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
+ let Name = "ImmSExti64i32";
+}
+
+// [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
+// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
+ let Name = "ImmSExti16i8";
+ let SuperClasses = [ImmSExti64i32AsmOperand];
+}
+
+// [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
+// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
+ let Name = "ImmSExti32i8";
+}
+
+// [0, 0x0000007F] |
+// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
+ let Name = "ImmSExti64i8";
+ let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
+ ImmSExti64i32AsmOperand];
}
// A couple of more descriptive operand definitions.
// 16-bits but only 8 bits are significant.
def i16i8imm : Operand<i16> {
- let ParserMatchClass = ImmSExt8AsmOperand;
+ let ParserMatchClass = ImmSExti16i8AsmOperand;
}
// 32-bits but only 8 bits are significant.
def i32i8imm : Operand<i32> {
- let ParserMatchClass = ImmSExt8AsmOperand;
+ let ParserMatchClass = ImmSExti32i8AsmOperand;
}
//===----------------------------------------------------------------------===//
@@ -280,23 +338,31 @@ def i32i8imm : Operand<i32> {
// Define X86 specific addressing mode.
def addr : ComplexPattern<iPTR, 5, "SelectAddr", [], []>;
-def lea32addr : ComplexPattern<i32, 4, "SelectLEAAddr",
+def lea32addr : ComplexPattern<i32, 5, "SelectLEAAddr",
[add, sub, mul, X86mul_imm, shl, or, frameindex],
[]>;
-def tls32addr : ComplexPattern<i32, 4, "SelectTLSADDRAddr",
+def tls32addr : ComplexPattern<i32, 5, "SelectTLSADDRAddr",
[tglobaltlsaddr], []>;
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
-def HasMMX : Predicate<"Subtarget->hasMMX()">;
-def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
-def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
-def HasSSE3 : Predicate<"Subtarget->hasSSE3()">;
-def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">;
-def HasSSE41 : Predicate<"Subtarget->hasSSE41()">;
-def HasSSE42 : Predicate<"Subtarget->hasSSE42()">;
-def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
+def HasCMov : Predicate<"Subtarget->hasCMov()">;
+def NoCMov : Predicate<"!Subtarget->hasCMov()">;
+
+// FIXME: temporary hack to let codegen assert or generate poor code in case
+// no AVX version of the desired intructions is present, this is better for
+// incremental dev (without fallbacks it's easier to spot what's missing)
+def HasMMX : Predicate<"Subtarget->hasMMX() && !Subtarget->hasAVX()">;
+def HasSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">;
+def HasSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">;
+def HasSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">;
+def HasSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">;
+def HasSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">;
+def HasSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">;
+def HasSSE4A : Predicate<"Subtarget->hasSSE4A() && !Subtarget->hasAVX()">;
+
def HasAVX : Predicate<"Subtarget->hasAVX()">;
+def HasCLMUL : Predicate<"Subtarget->hasCLMUL()">;
def HasFMA3 : Predicate<"Subtarget->hasFMA3()">;
def HasFMA4 : Predicate<"Subtarget->hasFMA4()">;
def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
@@ -312,10 +378,12 @@ def FarData : Predicate<"TM.getCodeModel() != CodeModel::Small &&"
def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||"
"TM.getCodeModel() == CodeModel::Kernel">;
def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;
+def IsNotPIC : Predicate<"TM.getRelocationModel() != Reloc::PIC_">;
def OptForSize : Predicate<"OptForSize">;
def OptForSpeed : Predicate<"!OptForSize">;
def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">;
def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">;
+def HasAES : Predicate<"Subtarget->hasAES()">;
//===----------------------------------------------------------------------===//
// X86 Instruction Format Definitions.
@@ -346,9 +414,7 @@ def X86_COND_O : PatLeaf<(i8 13)>;
def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
def X86_COND_S : PatLeaf<(i8 15)>;
-def immSext8 : PatLeaf<(imm), [{
- return N->getSExtValue() == (int8_t)N->getSExtValue();
-}]>;
+def immSext8 : PatLeaf<(imm), [{ return immSext8(N); }]>;
def i16immSExt8 : PatLeaf<(i16 immSext8)>;
def i32immSExt8 : PatLeaf<(i32 immSext8)>;
@@ -462,43 +528,14 @@ def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
- else {
- unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
- APInt Mask = APInt::getAllOnesValue(BitWidth);
- APInt KnownZero0, KnownOne0;
- CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
- APInt KnownZero1, KnownOne1;
- CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
- return (~KnownZero0 & ~KnownZero1) == 0;
- }
-}]>;
-// 'shld' and 'shrd' instruction patterns. Note that even though these have
-// the srl and shl in their patterns, the C++ code must still check for them,
-// because predicates are tested before children nodes are explored.
-
-def shrd : PatFrag<(ops node:$src1, node:$amt1, node:$src2, node:$amt2),
- (or (srl node:$src1, node:$amt1),
- (shl node:$src2, node:$amt2)), [{
- assert(N->getOpcode() == ISD::OR);
- return N->getOperand(0).getOpcode() == ISD::SRL &&
- N->getOperand(1).getOpcode() == ISD::SHL &&
- isa<ConstantSDNode>(N->getOperand(0).getOperand(1)) &&
- isa<ConstantSDNode>(N->getOperand(1).getOperand(1)) &&
- N->getOperand(0).getConstantOperandVal(1) ==
- N->getValueSizeInBits(0) - N->getOperand(1).getConstantOperandVal(1);
-}]>;
-
-def shld : PatFrag<(ops node:$src1, node:$amt1, node:$src2, node:$amt2),
- (or (shl node:$src1, node:$amt1),
- (srl node:$src2, node:$amt2)), [{
- assert(N->getOpcode() == ISD::OR);
- return N->getOperand(0).getOpcode() == ISD::SHL &&
- N->getOperand(1).getOpcode() == ISD::SRL &&
- isa<ConstantSDNode>(N->getOperand(0).getOperand(1)) &&
- isa<ConstantSDNode>(N->getOperand(1).getOperand(1)) &&
- N->getOperand(0).getConstantOperandVal(1) ==
- N->getValueSizeInBits(0) - N->getOperand(1).getConstantOperandVal(1);
+ unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
+ APInt Mask = APInt::getAllOnesValue(BitWidth);
+ APInt KnownZero0, KnownOne0;
+ CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
+ APInt KnownZero1, KnownOne1;
+ CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
+ return (~KnownZero0 & ~KnownZero1) == 0;
}]>;
//===----------------------------------------------------------------------===//
@@ -541,9 +578,10 @@ def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
// The main point of having separate instruction are extra unmodelled effects
// (compared to ordinary calls) like stack pointer change.
-def MINGW_ALLOCA : I<0, Pseudo, (outs), (ins),
- "# dynamic stack allocation",
- [(X86MingwAlloca)]>;
+let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
+ def MINGW_ALLOCA : I<0, Pseudo, (outs), (ins),
+ "# dynamic stack allocation",
+ [(X86MingwAlloca)]>;
}
// Nop
@@ -556,8 +594,14 @@ let neverHasSideEffects = 1 in {
}
// Trap
-def INT3 : I<0xcc, RawFrm, (outs), (ins), "int\t3", []>;
-def INT : I<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap", []>;
+let Uses = [EFLAGS] in {
+ def INTO : I<0xce, RawFrm, (outs), (ins), "into", []>;
+}
+def INT3 : I<0xcc, RawFrm, (outs), (ins), "int3",
+ [(int_x86_int (i8 3))]>;
+// FIXME: need to make sure that "int $3" matches int3
+def INT : Ii8<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap",
+ [(int_x86_int imm:$trap)]>;
def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", []>, OpSize;
def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l}", []>;
@@ -574,7 +618,7 @@ let neverHasSideEffects = 1, isNotDuplicable = 1, Uses = [ESP] in
// Return instructions.
let isTerminator = 1, isReturn = 1, isBarrier = 1,
- hasCtrlDep = 1, FPForm = SpecialFP, FPFormBits = SpecialFP.Value in {
+ hasCtrlDep = 1, FPForm = SpecialFP in {
def RET : I <0xC3, RawFrm, (outs), (ins variable_ops),
"ret",
[(X86retflag 0)]>;
@@ -630,16 +674,16 @@ let Uses = [ECX], isBranch = 1, isTerminator = 1 in
// Indirect branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst",
- [(brind GR32:$dst)]>;
+ [(brind GR32:$dst)]>, Requires<[In32BitMode]>;
def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst",
- [(brind (loadi32 addr:$dst))]>;
+ [(brind (loadi32 addr:$dst))]>, Requires<[In32BitMode]>;
- def FARJMP16i : Iseg16<0xEA, RawFrm, (outs),
- (ins i16imm:$seg, i16imm:$off),
- "ljmp{w}\t$seg, $off", []>, OpSize;
- def FARJMP32i : Iseg32<0xEA, RawFrm, (outs),
- (ins i16imm:$seg, i32imm:$off),
- "ljmp{l}\t$seg, $off", []>;
+ def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs),
+ (ins i16imm:$off, i16imm:$seg),
+ "ljmp{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
+ def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs),
+ (ins i32imm:$off, i16imm:$seg),
+ "ljmp{l}\t{$seg, $off|$off, $seg}", []>;
def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst),
"ljmp{w}\t{*}$dst", []>, OpSize;
@@ -650,9 +694,9 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
// Loop instructions
-def LOOP : I<0xE2, RawFrm, (ins brtarget8:$dst), (outs), "loop\t$dst", []>;
-def LOOPE : I<0xE1, RawFrm, (ins brtarget8:$dst), (outs), "loope\t$dst", []>;
-def LOOPNE : I<0xE0, RawFrm, (ins brtarget8:$dst), (outs), "loopne\t$dst", []>;
+def LOOP : Ii8PCRel<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", []>;
+def LOOPE : Ii8PCRel<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", []>;
+def LOOPNE : Ii8PCRel<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", []>;
//===----------------------------------------------------------------------===//
// Call Instructions...
@@ -675,17 +719,23 @@ let isCall = 1 in
def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops),
"call\t{*}$dst", [(X86call (loadi32 addr:$dst))]>;
- def FARCALL16i : Iseg16<0x9A, RawFrm, (outs),
- (ins i16imm:$seg, i16imm:$off),
- "lcall{w}\t$seg, $off", []>, OpSize;
- def FARCALL32i : Iseg32<0x9A, RawFrm, (outs),
- (ins i16imm:$seg, i32imm:$off),
- "lcall{l}\t$seg, $off", []>;
+ def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs),
+ (ins i16imm:$off, i16imm:$seg),
+ "lcall{w}\t{$seg, $off|$off, $seg}", []>, OpSize;
+ def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs),
+ (ins i32imm:$off, i16imm:$seg),
+ "lcall{l}\t{$seg, $off|$off, $seg}", []>;
def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst),
"lcall{w}\t{*}$dst", []>, OpSize;
def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst),
"lcall{l}\t{*}$dst", []>;
+
+ // callw for 16 bit code for the assembler.
+ let isAsmParserOnly = 1 in
+ def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
+ (outs), (ins i16imm_pcrel:$dst, variable_ops),
+ "callw\t$dst", []>, OpSize;
}
// Constructing a stack frame.
@@ -695,45 +745,52 @@ def ENTER : I<0xC8, RawFrm, (outs), (ins i16imm:$len, i8imm:$lvl),
// Tail call stuff.
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNdi : I<0, Pseudo, (outs),
- (ins i32imm:$dst, i32imm:$offset, variable_ops),
- "#TC_RETURN $dst $offset",
- []>;
-
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNri : I<0, Pseudo, (outs),
- (ins GR32:$dst, i32imm:$offset, variable_ops),
- "#TC_RETURN $dst $offset",
- []>;
-
-// FIXME: The should be pseudo instructions that are lowered when going to
-// mcinst.
-let isCall = 1, isBranch = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
- def TAILJMPd : Ii32<0xE9, RawFrm, (outs),(ins i32imm_pcrel:$dst,variable_ops),
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
+ isCodeGenOnly = 1 in
+ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [ESP] in {
+ def TCRETURNdi : I<0, Pseudo, (outs),
+ (ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+ def TCRETURNri : I<0, Pseudo, (outs),
+ (ins GR32_TC:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+ let mayLoad = 1 in
+ def TCRETURNmi : I<0, Pseudo, (outs),
+ (ins i32mem_TC:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+
+ // FIXME: The should be pseudo instructions that are lowered when going to
+ // mcinst.
+ def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs),
+ (ins i32imm_pcrel:$dst, variable_ops),
"jmp\t$dst # TAILCALL",
[]>;
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
- def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst, variable_ops),
- "jmp{l}\t{*}$dst # TAILCALL",
- []>;
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
- def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem:$dst, variable_ops),
- "jmp\t{*}$dst # TAILCALL", []>;
+ def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops),
+ "", []>; // FIXME: Remove encoding when JIT is dead.
+ let mayLoad = 1 in
+ def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
+ "jmp{l}\t{*}$dst # TAILCALL", []>;
+}
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions...
//
let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in
def LEAVE : I<0xC9, RawFrm,
- (outs), (ins), "leave", []>;
+ (outs), (ins), "leave", []>, Requires<[In32BitMode]>;
def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS;
+let mayLoad = 1 in
def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS;
def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS;
+let mayLoad = 1 in
def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS;
@@ -764,24 +821,37 @@ def PUSH32rmm: I<0xFF, MRM6m, (outs), (ins i32mem:$src), "push{l}\t$src",[]>;
}
let Defs = [ESP], Uses = [ESP], neverHasSideEffects = 1, mayStore = 1 in {
-def PUSH32i8 : Ii8<0x6a, RawFrm, (outs), (ins i8imm:$imm),
- "push{l}\t$imm", []>;
-def PUSH32i16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
+def PUSHi8 : Ii8<0x6a, RawFrm, (outs), (ins i32i8imm:$imm),
"push{l}\t$imm", []>;
-def PUSH32i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
+def PUSHi16 : Ii16<0x68, RawFrm, (outs), (ins i16imm:$imm),
+ "push{w}\t$imm", []>, OpSize;
+def PUSHi32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm),
"push{l}\t$imm", []>;
}
let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, neverHasSideEffects=1 in {
-def POPF : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize;
-def POPFD : I<0x9D, RawFrm, (outs), (ins), "popf{l}", []>;
+def POPF16 : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize;
+def POPF32 : I<0x9D, RawFrm, (outs), (ins), "popf{l|d}", []>,
+ Requires<[In32BitMode]>;
}
let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in {
-def PUSHF : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize;
-def PUSHFD : I<0x9C, RawFrm, (outs), (ins), "pushf{l}", []>;
+def PUSHF16 : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize;
+def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>,
+ Requires<[In32BitMode]>;
}
-let isTwoAddress = 1 in // GR32 = bswap GR32
+let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
+ mayLoad=1, neverHasSideEffects=1 in {
+def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", []>,
+ Requires<[In32BitMode]>;
+}
+let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
+ mayStore=1, neverHasSideEffects=1 in {
+def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", []>,
+ Requires<[In32BitMode]>;
+}
+
+let Uses = [EFLAGS], Constraints = "$src = $dst" in // GR32 = bswap GR32
def BSWAP32r : I<0xC8, AddRegFrm,
(outs GR32:$dst), (ins GR32:$src),
"bswap{l}\t$dst",
@@ -792,42 +862,40 @@ let isTwoAddress = 1 in // GR32 = bswap GR32
let Defs = [EFLAGS] in {
def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (X86bsf GR16:$src)), (implicit EFLAGS)]>, TB;
+ [(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))]>, TB, OpSize;
def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsf{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (X86bsf (loadi16 addr:$src))),
- (implicit EFLAGS)]>, TB;
+ [(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))]>, TB,
+ OpSize;
def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (X86bsf GR32:$src)), (implicit EFLAGS)]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))]>, TB;
def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsf{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (X86bsf (loadi32 addr:$src))),
- (implicit EFLAGS)]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))]>, TB;
def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (X86bsr GR16:$src)), (implicit EFLAGS)]>, TB;
+ [(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))]>, TB, OpSize;
def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
"bsr{w}\t{$src, $dst|$dst, $src}",
- [(set GR16:$dst, (X86bsr (loadi16 addr:$src))),
- (implicit EFLAGS)]>, TB;
+ [(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))]>, TB,
+ OpSize;
def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (X86bsr GR32:$src)), (implicit EFLAGS)]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))]>, TB;
def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"bsr{l}\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (X86bsr (loadi32 addr:$src))),
- (implicit EFLAGS)]>, TB;
+ [(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))]>, TB;
} // Defs = [EFLAGS]
let neverHasSideEffects = 1 in
def LEA16r : I<0x8D, MRMSrcMem,
- (outs GR16:$dst), (ins lea32mem:$src),
+ (outs GR16:$dst), (ins i32mem:$src),
"lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize;
let isReMaterializable = 1 in
def LEA32r : I<0x8D, MRMSrcMem,
- (outs GR32:$dst), (ins lea32mem:$src),
+ (outs GR32:$dst), (ins i32mem:$src),
"lea{l}\t{$src|$dst}, {$dst|$src}",
[(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>;
@@ -880,7 +948,7 @@ def RDTSC : I<0x31, RawFrm, (outs), (ins), "rdtsc", [(X86rdtsc)]>,
let Defs = [RAX, RCX, RDX] in
def RDTSCP : I<0x01, MRM_F9, (outs), (ins), "rdtscp", []>, TB;
-let isBarrier = 1, hasCtrlDep = 1 in {
+let isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in {
def TRAP : I<0x0B, RawFrm, (outs), (ins), "ud2", [(trap)]>, TB;
}
@@ -891,7 +959,7 @@ def SYSRET : I<0x07, RawFrm,
def SYSENTER : I<0x34, RawFrm,
(outs), (ins), "sysenter", []>, TB;
def SYSEXIT : I<0x35, RawFrm,
- (outs), (ins), "sysexit", []>, TB;
+ (outs), (ins), "sysexit", []>, TB, Requires<[In32BitMode]>;
def WAIT : I<0x9B, RawFrm, (outs), (ins), "wait", []>;
@@ -979,36 +1047,53 @@ def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(store (i32 imm:$src), addr:$dst)]>;
-def MOV8o8a : Ii8 <0xA0, RawFrm, (outs), (ins offset8:$src),
- "mov{b}\t{$src, %al|%al, $src}", []>;
-def MOV16o16a : Ii16 <0xA1, RawFrm, (outs), (ins offset16:$src),
- "mov{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
+/// moffs8, moffs16 and moffs32 versions of moves. The immediate is a
+/// 32-bit offset from the PC. These are only valid in x86-32 mode.
+def MOV8o8a : Ii32 <0xA0, RawFrm, (outs), (ins offset8:$src),
+ "mov{b}\t{$src, %al|%al, $src}", []>,
+ Requires<[In32BitMode]>;
+def MOV16o16a : Ii32 <0xA1, RawFrm, (outs), (ins offset16:$src),
+ "mov{w}\t{$src, %ax|%ax, $src}", []>, OpSize,
+ Requires<[In32BitMode]>;
def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins offset32:$src),
- "mov{l}\t{$src, %eax|%eax, $src}", []>;
-
-def MOV8ao8 : Ii8 <0xA2, RawFrm, (outs offset8:$dst), (ins),
- "mov{b}\t{%al, $dst|$dst, %al}", []>;
-def MOV16ao16 : Ii16 <0xA3, RawFrm, (outs offset16:$dst), (ins),
- "mov{w}\t{%ax, $dst|$dst, %ax}", []>, OpSize;
+ "mov{l}\t{$src, %eax|%eax, $src}", []>,
+ Requires<[In32BitMode]>;
+def MOV8ao8 : Ii32 <0xA2, RawFrm, (outs offset8:$dst), (ins),
+ "mov{b}\t{%al, $dst|$dst, %al}", []>,
+ Requires<[In32BitMode]>;
+def MOV16ao16 : Ii32 <0xA3, RawFrm, (outs offset16:$dst), (ins),
+ "mov{w}\t{%ax, $dst|$dst, %ax}", []>, OpSize,
+ Requires<[In32BitMode]>;
def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins),
- "mov{l}\t{%eax, $dst|$dst, %eax}", []>;
-
+ "mov{l}\t{%eax, $dst|$dst, %eax}", []>,
+ Requires<[In32BitMode]>;
+
// Moves to and from segment registers
def MOV16rs : I<0x8C, MRMDestReg, (outs GR16:$dst), (ins SEGMENT_REG:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>;
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def MOV32rs : I<0x8C, MRMDestReg, (outs GR32:$dst), (ins SEGMENT_REG:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
def MOV16ms : I<0x8C, MRMDestMem, (outs i16mem:$dst), (ins SEGMENT_REG:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>;
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def MOV32ms : I<0x8C, MRMDestMem, (outs i32mem:$dst), (ins SEGMENT_REG:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
def MOV16sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR16:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>;
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def MOV32sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR32:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
def MOV16sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i16mem:$src),
- "mov{w}\t{$src, $dst|$dst, $src}", []>;
+ "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
+def MOV32sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i32mem:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
+let isCodeGenOnly = 1 in {
def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src),
"mov{b}\t{$src, $dst|$dst, $src}", []>;
def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
"mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize;
def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}", []>;
+}
let canFoldAsLoad = 1, isReMaterializable = 1 in {
def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src),
@@ -1032,9 +1117,28 @@ def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(store GR32:$src, addr:$dst)]>;
+/// Versions of MOV32rr, MOV32rm, and MOV32mr for i32mem_TC and GR32_TC.
+let isCodeGenOnly = 1 in {
+let neverHasSideEffects = 1 in
+def MOV32rr_TC : I<0x89, MRMDestReg, (outs GR32_TC:$dst), (ins GR32_TC:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
+
+let mayLoad = 1,
+ canFoldAsLoad = 1, isReMaterializable = 1 in
+def MOV32rm_TC : I<0x8B, MRMSrcMem, (outs GR32_TC:$dst), (ins i32mem_TC:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}",
+ []>;
+
+let mayStore = 1 in
+def MOV32mr_TC : I<0x89, MRMDestMem, (outs), (ins i32mem_TC:$dst, GR32_TC:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}",
+ []>;
+}
+
// Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
// that they can be used for copying and storing h registers, which can't be
// encoded when a REX prefix is present.
+let isCodeGenOnly = 1 in {
let neverHasSideEffects = 1 in
def MOV8rr_NOREX : I<0x88, MRMDestReg,
(outs GR8_NOREX:$dst), (ins GR8_NOREX:$src),
@@ -1048,6 +1152,7 @@ let mayLoad = 1,
def MOV8rm_NOREX : I<0x8A, MRMSrcMem,
(outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src),
"mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>;
+}
// Moves to and from debug registers
def MOV32rd : I<0x21, MRMDestReg, (outs GR32:$dst), (ins DEBUG_REG:$src),
@@ -1056,10 +1161,10 @@ def MOV32dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
// Moves to and from control registers
-def MOV32rc : I<0x20, MRMDestReg, (outs GR32:$dst), (ins CONTROL_REG_32:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
-def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG_32:$dst), (ins GR32:$src),
- "mov{q}\t{$src, $dst|$dst, $src}", []>, TB;
+def MOV32rc : I<0x20, MRMDestReg, (outs GR32:$dst), (ins CONTROL_REG:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
+def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR32:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>, TB;
//===----------------------------------------------------------------------===//
// Fixed-Register Multiplication and Division Instructions...
@@ -1067,7 +1172,7 @@ def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG_32:$dst), (ins GR32:$src),
// Extra precision multiplication
-// AL is really implied by AX, by the registers in Defs must match the
+// AL is really implied by AX, but the registers in Defs must match the
// SDNode results (i8, i32).
let Defs = [AL,EFLAGS,AX], Uses = [AL] in
def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src",
@@ -1180,24 +1285,12 @@ def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src),
//===----------------------------------------------------------------------===//
// Two address Instructions.
//
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
// Conditional moves
let Uses = [EFLAGS] in {
-// X86 doesn't have 8-bit conditional moves. Use a customInserter to
-// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
-// however that requires promoting the operands, and can induce additional
-// i8 register pressure. Note that CMOV_GR8 is conservatively considered to
-// clobber EFLAGS, because if one of the operands is zero, the expansion
-// could involve an xor.
-let usesCustomInserter = 1, isTwoAddress = 0, Defs = [EFLAGS] in
-def CMOV_GR8 : I<0, Pseudo,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
- "#CMOV_GR8 PSEUDO!",
- [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
- imm:$cond, EFLAGS))]>;
-
+let Predicates = [HasCMov] in {
let isCommutable = 1 in {
def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
@@ -1585,71 +1678,133 @@ def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32]
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
X86_COND_NO, EFLAGS))]>,
TB;
+} // Predicates = [HasCMov]
+
+// X86 doesn't have 8-bit conditional moves. Use a customInserter to
+// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
+// however that requires promoting the operands, and can induce additional
+// i8 register pressure. Note that CMOV_GR8 is conservatively considered to
+// clobber EFLAGS, because if one of the operands is zero, the expansion
+// could involve an xor.
+let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
+def CMOV_GR8 : I<0, Pseudo,
+ (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
+ "#CMOV_GR8 PSEUDO!",
+ [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
+ imm:$cond, EFLAGS))]>;
+
+let Predicates = [NoCMov] in {
+def CMOV_GR32 : I<0, Pseudo,
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
+ "#CMOV_GR32* PSEUDO!",
+ [(set GR32:$dst,
+ (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
+def CMOV_GR16 : I<0, Pseudo,
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
+ "#CMOV_GR16* PSEUDO!",
+ [(set GR16:$dst,
+ (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
+def CMOV_RFP32 : I<0, Pseudo,
+ (outs RFP32:$dst),
+ (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
+ "#CMOV_RFP32 PSEUDO!",
+ [(set RFP32:$dst,
+ (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
+ EFLAGS))]>;
+def CMOV_RFP64 : I<0, Pseudo,
+ (outs RFP64:$dst),
+ (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
+ "#CMOV_RFP64 PSEUDO!",
+ [(set RFP64:$dst,
+ (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
+ EFLAGS))]>;
+def CMOV_RFP80 : I<0, Pseudo,
+ (outs RFP80:$dst),
+ (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
+ "#CMOV_RFP80 PSEUDO!",
+ [(set RFP80:$dst,
+ (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
+ EFLAGS))]>;
+} // Predicates = [NoCMov]
+} // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
} // Uses = [EFLAGS]
// unary instructions
let CodeSize = 2 in {
let Defs = [EFLAGS] in {
-def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src), "neg{b}\t$dst",
- [(set GR8:$dst, (ineg GR8:$src)),
+def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "neg{b}\t$dst",
+ [(set GR8:$dst, (ineg GR8:$src1)),
(implicit EFLAGS)]>;
-def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src), "neg{w}\t$dst",
- [(set GR16:$dst, (ineg GR16:$src)),
+def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
+ "neg{w}\t$dst",
+ [(set GR16:$dst, (ineg GR16:$src1)),
(implicit EFLAGS)]>, OpSize;
-def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src), "neg{l}\t$dst",
- [(set GR32:$dst, (ineg GR32:$src)),
+def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
+ "neg{l}\t$dst",
+ [(set GR32:$dst, (ineg GR32:$src1)),
(implicit EFLAGS)]>;
-let isTwoAddress = 0 in {
- def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst), "neg{b}\t$dst",
+
+let Constraints = "" in {
+ def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst),
+ "neg{b}\t$dst",
[(store (ineg (loadi8 addr:$dst)), addr:$dst),
(implicit EFLAGS)]>;
- def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst), "neg{w}\t$dst",
+ def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst),
+ "neg{w}\t$dst",
[(store (ineg (loadi16 addr:$dst)), addr:$dst),
(implicit EFLAGS)]>, OpSize;
- def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst), "neg{l}\t$dst",
+ def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst),
+ "neg{l}\t$dst",
[(store (ineg (loadi32 addr:$dst)), addr:$dst),
(implicit EFLAGS)]>;
-}
+} // Constraints = ""
} // Defs = [EFLAGS]
// Match xor -1 to not. Favors these over a move imm + xor to save code size.
let AddedComplexity = 15 in {
-def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src), "not{b}\t$dst",
- [(set GR8:$dst, (not GR8:$src))]>;
-def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src), "not{w}\t$dst",
- [(set GR16:$dst, (not GR16:$src))]>, OpSize;
-def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src), "not{l}\t$dst",
- [(set GR32:$dst, (not GR32:$src))]>;
+def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "not{b}\t$dst",
+ [(set GR8:$dst, (not GR8:$src1))]>;
+def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
+ "not{w}\t$dst",
+ [(set GR16:$dst, (not GR16:$src1))]>, OpSize;
+def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
+ "not{l}\t$dst",
+ [(set GR32:$dst, (not GR32:$src1))]>;
}
-let isTwoAddress = 0 in {
- def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst), "not{b}\t$dst",
+let Constraints = "" in {
+ def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst),
+ "not{b}\t$dst",
[(store (not (loadi8 addr:$dst)), addr:$dst)]>;
- def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst), "not{w}\t$dst",
+ def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst),
+ "not{w}\t$dst",
[(store (not (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
- def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst), "not{l}\t$dst",
+ def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst),
+ "not{l}\t$dst",
[(store (not (loadi32 addr:$dst)), addr:$dst)]>;
-}
+} // Constraints = ""
} // CodeSize
// TODO: inc/dec is slow for P4, but fast for Pentium-M.
let Defs = [EFLAGS] in {
let CodeSize = 2 in
-def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src), "inc{b}\t$dst",
- [(set GR8:$dst, (add GR8:$src, 1)),
- (implicit EFLAGS)]>;
+def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "inc{b}\t$dst",
+ [(set GR8:$dst, EFLAGS, (X86inc_flag GR8:$src1))]>;
+
let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
-def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
+def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
"inc{w}\t$dst",
- [(set GR16:$dst, (add GR16:$src, 1)),
- (implicit EFLAGS)]>,
+ [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))]>,
OpSize, Requires<[In32BitMode]>;
-def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
+def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
"inc{l}\t$dst",
- [(set GR32:$dst, (add GR32:$src, 1)),
- (implicit EFLAGS)]>, Requires<[In32BitMode]>;
+ [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))]>,
+ Requires<[In32BitMode]>;
}
-let isTwoAddress = 0, CodeSize = 2 in {
+let Constraints = "", CodeSize = 2 in {
def INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), "inc{b}\t$dst",
[(store (add (loadi8 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)]>;
@@ -1661,25 +1816,24 @@ let isTwoAddress = 0, CodeSize = 2 in {
[(store (add (loadi32 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)]>,
Requires<[In32BitMode]>;
-}
+} // Constraints = "", CodeSize = 2
let CodeSize = 2 in
-def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src), "dec{b}\t$dst",
- [(set GR8:$dst, (add GR8:$src, -1)),
- (implicit EFLAGS)]>;
+def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "dec{b}\t$dst",
+ [(set GR8:$dst, EFLAGS, (X86dec_flag GR8:$src1))]>;
let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
-def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
+def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
"dec{w}\t$dst",
- [(set GR16:$dst, (add GR16:$src, -1)),
- (implicit EFLAGS)]>,
+ [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))]>,
OpSize, Requires<[In32BitMode]>;
-def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
+def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
"dec{l}\t$dst",
- [(set GR32:$dst, (add GR32:$src, -1)),
- (implicit EFLAGS)]>, Requires<[In32BitMode]>;
-}
+ [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))]>,
+ Requires<[In32BitMode]>;
+} // CodeSize = 2
-let isTwoAddress = 0, CodeSize = 2 in {
+let Constraints = "", CodeSize = 2 in {
def DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), "dec{b}\t$dst",
[(store (add (loadi8 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)]>;
@@ -1691,31 +1845,31 @@ let isTwoAddress = 0, CodeSize = 2 in {
[(store (add (loadi32 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)]>,
Requires<[In32BitMode]>;
-}
+} // Constraints = "", CodeSize = 2
} // Defs = [EFLAGS]
// Logical operators...
let Defs = [EFLAGS] in {
let isCommutable = 1 in { // X = AND Y, Z --> X = AND Z, Y
-def AND8rr : I<0x20, MRMDestReg,
- (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
- "and{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (and GR8:$src1, GR8:$src2)),
- (implicit EFLAGS)]>;
-def AND16rr : I<0x21, MRMDestReg,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
- "and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (and GR16:$src1, GR16:$src2)),
- (implicit EFLAGS)]>, OpSize;
-def AND32rr : I<0x21, MRMDestReg,
- (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
- "and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, GR32:$src2)),
- (implicit EFLAGS)]>;
+def AND8rr : I<0x20, MRMDestReg,
+ (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
+ "and{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, EFLAGS, (X86and_flag GR8:$src1, GR8:$src2))]>;
+def AND16rr : I<0x21, MRMDestReg,
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ "and{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, EFLAGS, (X86and_flag GR16:$src1,
+ GR16:$src2))]>, OpSize;
+def AND32rr : I<0x21, MRMDestReg,
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
+ "and{l}\t{$src2, $dst|$dst, $src2}",
+ [(set GR32:$dst, EFLAGS, (X86and_flag GR32:$src1,
+ GR32:$src2))]>;
}
// AND instructions with the destination register in REG and the source register
// in R/M. Included for the disassembler.
+let isCodeGenOnly = 1 in {
def AND8rr_REV : I<0x22, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"and{b}\t{$src2, $dst|$dst, $src2}", []>;
def AND16rr_REV : I<0x23, MRMSrcReg, (outs GR16:$dst),
@@ -1724,51 +1878,53 @@ def AND16rr_REV : I<0x23, MRMSrcReg, (outs GR16:$dst),
def AND32rr_REV : I<0x23, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"and{l}\t{$src2, $dst|$dst, $src2}", []>;
+}
def AND8rm : I<0x22, MRMSrcMem,
(outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2),
"and{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (and GR8:$src1, (loadi8 addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS, (X86and_flag GR8:$src1,
+ (loadi8 addr:$src2)))]>;
def AND16rm : I<0x23, MRMSrcMem,
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
"and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (and GR16:$src1, (loadi16 addr:$src2))),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86and_flag GR16:$src1,
+ (loadi16 addr:$src2)))]>,
+ OpSize;
def AND32rm : I<0x23, MRMSrcMem,
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, (loadi32 addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86and_flag GR32:$src1,
+ (loadi32 addr:$src2)))]>;
def AND8ri : Ii8<0x80, MRM4r,
(outs GR8 :$dst), (ins GR8 :$src1, i8imm :$src2),
"and{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (and GR8:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS, (X86and_flag GR8:$src1,
+ imm:$src2))]>;
def AND16ri : Ii16<0x81, MRM4r,
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (and GR16:$src1, imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86and_flag GR16:$src1,
+ imm:$src2))]>, OpSize;
def AND32ri : Ii32<0x81, MRM4r,
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86and_flag GR32:$src1,
+ imm:$src2))]>;
def AND16ri8 : Ii8<0x83, MRM4r,
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"and{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (and GR16:$src1, i16immSExt8:$src2)),
- (implicit EFLAGS)]>,
+ [(set GR16:$dst, EFLAGS, (X86and_flag GR16:$src1,
+ i16immSExt8:$src2))]>,
OpSize;
def AND32ri8 : Ii8<0x83, MRM4r,
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"and{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (and GR32:$src1, i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86and_flag GR32:$src1,
+ i32immSExt8:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def AND8mr : I<0x20, MRMDestMem,
(outs), (ins i8mem :$dst, GR8 :$src),
"and{b}\t{$src, $dst|$dst, $src}",
@@ -1820,29 +1976,28 @@ let isTwoAddress = 0 in {
def AND32i32 : Ii32<0x25, RawFrm, (outs), (ins i32imm:$src),
"and{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
let isCommutable = 1 in { // X = OR Y, Z --> X = OR Z, Y
def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst),
(ins GR8 :$src1, GR8 :$src2),
"or{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (or GR8:$src1, GR8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS, (X86or_flag GR8:$src1, GR8:$src2))]>;
def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (or GR16:$src1, GR16:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,GR16:$src2))]>,
+ OpSize;
def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, GR32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,GR32:$src2))]>;
}
// OR instructions with the destination register in REG and the source register
// in R/M. Included for the disassembler.
+let isCodeGenOnly = 1 in {
def OR8rr_REV : I<0x0A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"or{b}\t{$src2, $dst|$dst, $src2}", []>;
def OR16rr_REV : I<0x0B, MRMSrcReg, (outs GR16:$dst),
@@ -1851,50 +2006,51 @@ def OR16rr_REV : I<0x0B, MRMSrcReg, (outs GR16:$dst),
def OR32rr_REV : I<0x0B, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}", []>;
+}
-def OR8rm : I<0x0A, MRMSrcMem , (outs GR8 :$dst),
+def OR8rm : I<0x0A, MRMSrcMem, (outs GR8 :$dst),
(ins GR8 :$src1, i8mem :$src2),
"or{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (or GR8:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
-def OR16rm : I<0x0B, MRMSrcMem , (outs GR16:$dst),
+ [(set GR8:$dst, EFLAGS, (X86or_flag GR8:$src1,
+ (load addr:$src2)))]>;
+def OR16rm : I<0x0B, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (or GR16:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, OpSize;
-def OR32rm : I<0x0B, MRMSrcMem , (outs GR32:$dst),
+ [(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,
+ (load addr:$src2)))]>,
+ OpSize;
+def OR32rm : I<0x0B, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
+ (load addr:$src2)))]>;
def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst),
(ins GR8 :$src1, i8imm:$src2),
"or{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (or GR8:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst,EFLAGS, (X86or_flag GR8:$src1, imm:$src2))]>;
def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (or GR16:$src1, imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,
+ imm:$src2))]>, OpSize;
def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
+ imm:$src2))]>;
def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"or{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (or GR16:$src1, i16immSExt8:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86or_flag GR16:$src1,
+ i16immSExt8:$src2))]>, OpSize;
def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"or{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (or GR32:$src1, i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
-let isTwoAddress = 0 in {
+ [(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
+ i32immSExt8:$src2))]>;
+let Constraints = "" in {
def OR8mr : I<0x08, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
"or{b}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), GR8:$src), addr:$dst),
@@ -1936,29 +2092,30 @@ let isTwoAddress = 0 in {
"or{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def OR32i32 : Ii32 <0x0D, RawFrm, (outs), (ins i32imm:$src),
"or{l}\t{$src, %eax|%eax, $src}", []>;
-} // isTwoAddress = 0
+} // Constraints = ""
let isCommutable = 1 in { // X = XOR Y, Z --> X = XOR Z, Y
def XOR8rr : I<0x30, MRMDestReg,
(outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (xor GR8:$src1, GR8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS, (X86xor_flag GR8:$src1,
+ GR8:$src2))]>;
def XOR16rr : I<0x31, MRMDestReg,
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (xor GR16:$src1, GR16:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
+ GR16:$src2))]>, OpSize;
def XOR32rr : I<0x31, MRMDestReg,
(outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, GR32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
+ GR32:$src2))]>;
} // isCommutable = 1
// XOR instructions with the destination register in REG and the source register
// in R/M. Included for the disassembler.
+let isCodeGenOnly = 1 in {
def XOR8rr_REV : I<0x32, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}", []>;
def XOR16rr_REV : I<0x33, MRMSrcReg, (outs GR16:$dst),
@@ -1967,52 +2124,52 @@ def XOR16rr_REV : I<0x33, MRMSrcReg, (outs GR16:$dst),
def XOR32rr_REV : I<0x33, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}", []>;
+}
-def XOR8rm : I<0x32, MRMSrcMem ,
+def XOR8rm : I<0x32, MRMSrcMem,
(outs GR8 :$dst), (ins GR8:$src1, i8mem :$src2),
"xor{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (xor GR8:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
-def XOR16rm : I<0x33, MRMSrcMem ,
+ [(set GR8:$dst, EFLAGS, (X86xor_flag GR8:$src1,
+ (load addr:$src2)))]>;
+def XOR16rm : I<0x33, MRMSrcMem,
(outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (xor GR16:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>,
+ [(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
+ (load addr:$src2)))]>,
OpSize;
-def XOR32rm : I<0x33, MRMSrcMem ,
+def XOR32rm : I<0x33, MRMSrcMem,
(outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
-
-def XOR8ri : Ii8<0x80, MRM6r,
- (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
- "xor{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (xor GR8:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
-def XOR16ri : Ii16<0x81, MRM6r,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
- "xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (xor GR16:$src1, imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
+ (load addr:$src2)))]>;
+
+def XOR8ri : Ii8<0x80, MRM6r,
+ (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ "xor{b}\t{$src2, $dst|$dst, $src2}",
+ [(set GR8:$dst, EFLAGS, (X86xor_flag GR8:$src1, imm:$src2))]>;
+def XOR16ri : Ii16<0x81, MRM6r,
+ (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ "xor{w}\t{$src2, $dst|$dst, $src2}",
+ [(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
+ imm:$src2))]>, OpSize;
def XOR32ri : Ii32<0x81, MRM6r,
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
+ imm:$src2))]>;
def XOR16ri8 : Ii8<0x83, MRM6r,
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"xor{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (xor GR16:$src1, i16immSExt8:$src2)),
- (implicit EFLAGS)]>,
+ [(set GR16:$dst, EFLAGS, (X86xor_flag GR16:$src1,
+ i16immSExt8:$src2))]>,
OpSize;
def XOR32ri8 : Ii8<0x83, MRM6r,
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"xor{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (xor GR32:$src1, i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
+ i32immSExt8:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def XOR8mr : I<0x30, MRMDestMem,
(outs), (ins i8mem :$dst, GR8 :$src),
"xor{b}\t{$src, $dst|$dst, $src}",
@@ -2057,32 +2214,33 @@ let isTwoAddress = 0 in {
[(store (xor (load addr:$dst), i32immSExt8:$src), addr:$dst),
(implicit EFLAGS)]>;
- def XOR8i8 : Ii8 <0x34, RawFrm, (outs), (ins i8imm:$src),
- "xor{b}\t{$src, %al|%al, $src}", []>;
- def XOR16i16 : Ii16 <0x35, RawFrm, (outs), (ins i16imm:$src),
- "xor{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
- def XOR32i32 : Ii32 <0x35, RawFrm, (outs), (ins i32imm:$src),
- "xor{l}\t{$src, %eax|%eax, $src}", []>;
-} // isTwoAddress = 0
+ def XOR8i8 : Ii8 <0x34, RawFrm, (outs), (ins i8imm:$src),
+ "xor{b}\t{$src, %al|%al, $src}", []>;
+ def XOR16i16 : Ii16<0x35, RawFrm, (outs), (ins i16imm:$src),
+ "xor{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
+ def XOR32i32 : Ii32<0x35, RawFrm, (outs), (ins i32imm:$src),
+ "xor{l}\t{$src, %eax|%eax, $src}", []>;
+} // Constraints = ""
} // Defs = [EFLAGS]
// Shift instructions
let Defs = [EFLAGS] in {
let Uses = [CL] in {
-def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src),
+def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
"shl{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (shl GR8:$src, CL))]>;
-def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (shl GR8:$src1, CL))]>;
+def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
"shl{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (shl GR16:$src, CL))]>, OpSize;
-def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize;
+def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
"shl{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (shl GR32:$src, CL))]>;
+ [(set GR32:$dst, (shl GR32:$src1, CL))]>;
} // Uses = [CL]
def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
"shl{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
+
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"shl{w}\t{$src2, $dst|$dst, $src2}",
@@ -2103,7 +2261,7 @@ def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
} // isConvertibleToThreeAddress = 1
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
"shl{b}\t{%cl, $dst|$dst, CL}",
@@ -2137,18 +2295,18 @@ let isTwoAddress = 0 in {
def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
"shl{l}\t$dst",
[(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
+} // Constraints = ""
let Uses = [CL] in {
-def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src),
+def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
"shr{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (srl GR8:$src, CL))]>;
-def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (srl GR8:$src1, CL))]>;
+def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
"shr{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (srl GR16:$src, CL))]>, OpSize;
-def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (srl GR16:$src1, CL))]>, OpSize;
+def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
"shr{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (srl GR32:$src, CL))]>;
+ [(set GR32:$dst, (srl GR32:$src1, CL))]>;
}
def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
@@ -2172,7 +2330,7 @@ def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
"shr{l}\t$dst",
[(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
"shr{b}\t{%cl, $dst|$dst, CL}",
@@ -2206,18 +2364,18 @@ let isTwoAddress = 0 in {
def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
"shr{l}\t$dst",
[(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
+} // Constraints = ""
let Uses = [CL] in {
-def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src),
+def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
"sar{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (sra GR8:$src, CL))]>;
-def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (sra GR8:$src1, CL))]>;
+def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
"sar{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (sra GR16:$src, CL))]>, OpSize;
-def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (sra GR16:$src1, CL))]>, OpSize;
+def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
"sar{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (sra GR32:$src, CL))]>;
+ [(set GR32:$dst, (sra GR32:$src1, CL))]>;
}
def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
@@ -2242,7 +2400,7 @@ def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
"sar{l}\t$dst",
[(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
"sar{b}\t{%cl, $dst|$dst, CL}",
@@ -2276,65 +2434,65 @@ let isTwoAddress = 0 in {
def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
"sar{l}\t$dst",
[(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
+} // Constraints = ""
// Rotate instructions
-def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src),
+def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
"rcl{b}\t{1, $dst|$dst, 1}", []>;
let Uses = [CL] in {
-def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src),
+def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
"rcl{b}\t{%cl, $dst|$dst, CL}", []>;
}
-def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src, i8imm:$cnt),
+def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
"rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src),
+def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
"rcl{w}\t{1, $dst|$dst, 1}", []>, OpSize;
let Uses = [CL] in {
-def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src),
+def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
"rcl{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
}
-def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src, i8imm:$cnt),
+def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
"rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
-def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src),
+def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
"rcl{l}\t{1, $dst|$dst, 1}", []>;
let Uses = [CL] in {
-def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src),
+def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
"rcl{l}\t{%cl, $dst|$dst, CL}", []>;
}
-def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src, i8imm:$cnt),
+def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
"rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src),
+def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
"rcr{b}\t{1, $dst|$dst, 1}", []>;
let Uses = [CL] in {
-def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src),
+def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
"rcr{b}\t{%cl, $dst|$dst, CL}", []>;
}
-def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src, i8imm:$cnt),
+def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
"rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src),
+def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
"rcr{w}\t{1, $dst|$dst, 1}", []>, OpSize;
let Uses = [CL] in {
-def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src),
+def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
"rcr{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
}
-def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src, i8imm:$cnt),
+def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
"rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
-def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src),
+def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
"rcr{l}\t{1, $dst|$dst, 1}", []>;
let Uses = [CL] in {
-def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src),
+def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
"rcr{l}\t{%cl, $dst|$dst, CL}", []>;
}
-def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src, i8imm:$cnt),
+def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
"rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
"rcl{b}\t{1, $dst|$dst, 1}", []>;
def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, i8imm:$cnt),
@@ -2374,19 +2532,19 @@ def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
"rcr{l}\t{%cl, $dst|$dst, CL}", []>;
}
-}
+} // Constraints = ""
// FIXME: provide shorter instructions when imm8 == 1
let Uses = [CL] in {
-def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src),
+def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
"rol{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (rotl GR8:$src, CL))]>;
-def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (rotl GR8:$src1, CL))]>;
+def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"rol{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (rotl GR16:$src, CL))]>, OpSize;
-def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (rotl GR16:$src1, CL))]>, OpSize;
+def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"rol{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (rotl GR32:$src, CL))]>;
+ [(set GR32:$dst, (rotl GR32:$src1, CL))]>;
}
def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
@@ -2411,7 +2569,7 @@ def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"rol{l}\t$dst",
[(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
"rol{b}\t{%cl, $dst|$dst, CL}",
@@ -2445,18 +2603,18 @@ let isTwoAddress = 0 in {
def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
"rol{l}\t$dst",
[(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
+} // Constraints = ""
let Uses = [CL] in {
-def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src),
+def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
"ror{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (rotr GR8:$src, CL))]>;
-def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (rotr GR8:$src1, CL))]>;
+def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"ror{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (rotr GR16:$src, CL))]>, OpSize;
-def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (rotr GR16:$src1, CL))]>, OpSize;
+def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"ror{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (rotr GR32:$src, CL))]>;
+ [(set GR32:$dst, (rotr GR32:$src1, CL))]>;
}
def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
@@ -2481,7 +2639,7 @@ def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"ror{l}\t$dst",
[(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
"ror{b}\t{%cl, $dst|$dst, CL}",
@@ -2515,8 +2673,7 @@ let isTwoAddress = 0 in {
def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
"ror{l}\t$dst",
[(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
-
+} // Constraints = ""
// Double shift instructions (generalizations of rotate)
@@ -2572,7 +2729,7 @@ def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
TB, OpSize;
}
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
@@ -2618,7 +2775,7 @@ let isTwoAddress = 0 in {
[(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
(i8 imm:$src3)), addr:$dst)]>,
TB, OpSize;
-}
+} // Constraints = ""
} // Defs = [EFLAGS]
@@ -2629,81 +2786,82 @@ let isCommutable = 1 in { // X = ADD Y, Z --> X = ADD Z, Y
def ADD8rr : I<0x00, MRMDestReg, (outs GR8 :$dst),
(ins GR8 :$src1, GR8 :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (add GR8:$src1, GR8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS, (X86add_flag GR8:$src1, GR8:$src2))]>;
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
// Register-Register Addition
def ADD16rr : I<0x01, MRMDestReg, (outs GR16:$dst),
(ins GR16:$src1, GR16:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (add GR16:$src1, GR16:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86add_flag GR16:$src1,
+ GR16:$src2))]>, OpSize;
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, GR32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86add_flag GR32:$src1,
+ GR32:$src2))]>;
} // end isConvertibleToThreeAddress
} // end isCommutable
+// These are alternate spellings for use by the disassembler, we mark them as
+// code gen only to ensure they aren't matched by the assembler.
+let isCodeGenOnly = 1 in {
+ def ADD8rr_alt: I<0x02, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ "add{b}\t{$src2, $dst|$dst, $src2}", []>;
+ def ADD16rr_alt: I<0x03, MRMSrcReg,(outs GR16:$dst),(ins GR16:$src1, GR16:$src2),
+ "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
+ def ADD32rr_alt: I<0x03, MRMSrcReg,(outs GR32:$dst),(ins GR32:$src1, GR32:$src2),
+ "add{l}\t{$src2, $dst|$dst, $src2}", []>;
+}
+
// Register-Memory Addition
def ADD8rm : I<0x02, MRMSrcMem, (outs GR8 :$dst),
(ins GR8 :$src1, i8mem :$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (add GR8:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS, (X86add_flag GR8:$src1,
+ (load addr:$src2)))]>;
def ADD16rm : I<0x03, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (add GR16:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS, (X86add_flag GR16:$src1,
+ (load addr:$src2)))]>, OpSize;
def ADD32rm : I<0x03, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS, (X86add_flag GR32:$src1,
+ (load addr:$src2)))]>;
-// Register-Register Addition - Equivalent to the normal rr forms (ADD8rr,
-// ADD16rr, and ADD32rr), but differently encoded.
-def ADD8mrmrr: I<0x02, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
- "add{b}\t{$src2, $dst|$dst, $src2}", []>;
-def ADD16mrmrr: I<0x03, MRMSrcReg,(outs GR16:$dst),(ins GR16:$src1, GR16:$src2),
- "add{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize;
-def ADD32mrmrr: I<0x03, MRMSrcReg,(outs GR16:$dst),(ins GR16:$src1, GR16:$src2),
- "add{l}\t{$src2, $dst|$dst, $src2}", []>;
-
// Register-Integer Addition
def ADD8ri : Ii8<0x80, MRM0r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (add GR8:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS,
+ (X86add_flag GR8:$src1, imm:$src2))]>;
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
// Register-Integer Addition
def ADD16ri : Ii16<0x81, MRM0r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (add GR16:$src1, imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86add_flag GR16:$src1, imm:$src2))]>, OpSize;
def ADD32ri : Ii32<0x81, MRM0r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86add_flag GR32:$src1, imm:$src2))]>;
def ADD16ri8 : Ii8<0x83, MRM0r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"add{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (add GR16:$src1, i16immSExt8:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86add_flag GR16:$src1, i16immSExt8:$src2))]>, OpSize;
def ADD32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"add{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (add GR32:$src1, i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86add_flag GR32:$src1, i32immSExt8:$src2))]>;
}
-let isTwoAddress = 0 in {
+let Constraints = "" in {
// Memory-Register Addition
def ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
@@ -2747,7 +2905,7 @@ let isTwoAddress = 0 in {
"add{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def ADD32i32 : Ii32<0x05, RawFrm, (outs), (ins i32imm:$src),
"add{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
let Uses = [EFLAGS] in {
let isCommutable = 1 in { // X = ADC Y, Z --> X = ADC Z, Y
@@ -2764,6 +2922,7 @@ def ADC32rr : I<0x11, MRMDestReg, (outs GR32:$dst),
[(set GR32:$dst, (adde GR32:$src1, GR32:$src2))]>;
}
+let isCodeGenOnly = 1 in {
def ADC8rr_REV : I<0x12, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"adc{b}\t{$src2, $dst|$dst, $src2}", []>;
def ADC16rr_REV : I<0x13, MRMSrcReg, (outs GR16:$dst),
@@ -2772,6 +2931,7 @@ def ADC16rr_REV : I<0x13, MRMSrcReg, (outs GR16:$dst),
def ADC32rr_REV : I<0x13, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"adc{l}\t{$src2, $dst|$dst, $src2}", []>;
+}
def ADC8rm : I<0x12, MRMSrcMem , (outs GR8:$dst),
(ins GR8:$src1, i8mem:$src2),
@@ -2807,7 +2967,7 @@ def ADC32ri8 : Ii8<0x83, MRM2r, (outs GR32:$dst),
"adc{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (adde GR32:$src1, i32immSExt8:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def ADC8mr : I<0x10, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
"adc{b}\t{$src2, $dst|$dst, $src2}",
[(store (adde (load addr:$dst), GR8:$src2), addr:$dst)]>;
@@ -2842,23 +3002,24 @@ let isTwoAddress = 0 in {
"adc{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def ADC32i32 : Ii32<0x15, RawFrm, (outs), (ins i32imm:$src),
"adc{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
} // Uses = [EFLAGS]
// Register-Register Subtraction
def SUB8rr : I<0x28, MRMDestReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sub GR8:$src1, GR8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS,
+ (X86sub_flag GR8:$src1, GR8:$src2))]>;
def SUB16rr : I<0x29, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, GR16:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86sub_flag GR16:$src1, GR16:$src2))]>, OpSize;
def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, GR32:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86sub_flag GR32:$src1, GR32:$src2))]>;
+let isCodeGenOnly = 1 in {
def SUB8rr_REV : I<0x2A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}", []>;
def SUB16rr_REV : I<0x2B, MRMSrcReg, (outs GR16:$dst),
@@ -2867,52 +3028,53 @@ def SUB16rr_REV : I<0x2B, MRMSrcReg, (outs GR16:$dst),
def SUB32rr_REV : I<0x2B, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}", []>;
+}
// Register-Memory Subtraction
def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst),
(ins GR8 :$src1, i8mem :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sub GR8:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS,
+ (X86sub_flag GR8:$src1, (load addr:$src2)))]>;
def SUB16rm : I<0x2B, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86sub_flag GR16:$src1, (load addr:$src2)))]>, OpSize;
def SUB32rm : I<0x2B, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86sub_flag GR32:$src1, (load addr:$src2)))]>;
// Register-Integer Subtraction
def SUB8ri : Ii8 <0x80, MRM5r, (outs GR8:$dst),
(ins GR8:$src1, i8imm:$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
- [(set GR8:$dst, (sub GR8:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR8:$dst, EFLAGS,
+ (X86sub_flag GR8:$src1, imm:$src2))]>;
def SUB16ri : Ii16<0x81, MRM5r, (outs GR16:$dst),
(ins GR16:$src1, i16imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86sub_flag GR16:$src1, imm:$src2))]>, OpSize;
def SUB32ri : Ii32<0x81, MRM5r, (outs GR32:$dst),
(ins GR32:$src1, i32imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86sub_flag GR32:$src1, imm:$src2))]>;
def SUB16ri8 : Ii8<0x83, MRM5r, (outs GR16:$dst),
(ins GR16:$src1, i16i8imm:$src2),
"sub{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (sub GR16:$src1, i16immSExt8:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86sub_flag GR16:$src1, i16immSExt8:$src2))]>, OpSize;
def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst),
(ins GR32:$src1, i32i8imm:$src2),
"sub{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (sub GR32:$src1, i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86sub_flag GR32:$src1, i32immSExt8:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
// Memory-Register Subtraction
def SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
@@ -2957,7 +3119,7 @@ let isTwoAddress = 0 in {
"sub{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def SUB32i32 : Ii32<0x2D, RawFrm, (outs), (ins i32imm:$src),
"sub{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
let Uses = [EFLAGS] in {
def SBB8rr : I<0x18, MRMDestReg, (outs GR8:$dst),
@@ -2973,7 +3135,7 @@ def SBB32rr : I<0x19, MRMDestReg, (outs GR32:$dst),
"sbb{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (sube GR32:$src1, GR32:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def SBB8mr : I<0x18, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
"sbb{b}\t{$src2, $dst|$dst, $src2}",
[(store (sube (load addr:$dst), GR8:$src2), addr:$dst)]>;
@@ -3008,8 +3170,9 @@ let isTwoAddress = 0 in {
"sbb{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def SBB32i32 : Ii32<0x1D, RawFrm, (outs), (ins i32imm:$src),
"sbb{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
+let isCodeGenOnly = 1 in {
def SBB8rr_REV : I<0x1A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
"sbb{b}\t{$src2, $dst|$dst, $src2}", []>;
def SBB16rr_REV : I<0x1B, MRMSrcReg, (outs GR16:$dst),
@@ -3018,6 +3181,7 @@ def SBB16rr_REV : I<0x1B, MRMSrcReg, (outs GR16:$dst),
def SBB32rr_REV : I<0x1B, MRMSrcReg, (outs GR32:$dst),
(ins GR32:$src1, GR32:$src2),
"sbb{l}\t{$src2, $dst|$dst, $src2}", []>;
+}
def SBB8rm : I<0x1A, MRMSrcMem, (outs GR8:$dst), (ins GR8:$src1, i8mem:$src2),
"sbb{b}\t{$src2, $dst|$dst, $src2}",
@@ -3059,25 +3223,26 @@ let isCommutable = 1 in { // X = IMUL Y, Z --> X = IMUL Z, Y
// Register-Register Signed Integer Multiply
def IMUL16rr : I<0xAF, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1,GR16:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, GR16:$src2)),
- (implicit EFLAGS)]>, TB, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag GR16:$src1, GR16:$src2))]>, TB, OpSize;
def IMUL32rr : I<0xAF, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, GR32:$src2)),
- (implicit EFLAGS)]>, TB;
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag GR32:$src1, GR32:$src2))]>, TB;
}
// Register-Memory Signed Integer Multiply
def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst),
(ins GR16:$src1, i16mem:$src2),
"imul{w}\t{$src2, $dst|$dst, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, TB, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag GR16:$src1, (load addr:$src2)))]>,
+ TB, OpSize;
def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst),
(ins GR32:$src1, i32mem:$src2),
"imul{l}\t{$src2, $dst|$dst, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, (load addr:$src2))),
- (implicit EFLAGS)]>, TB;
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag GR32:$src1, (load addr:$src2)))]>, TB;
} // Defs = [EFLAGS]
} // end Two Address instructions
@@ -3087,47 +3252,49 @@ let Defs = [EFLAGS] in {
def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16
(outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag GR16:$src1, imm:$src2))]>, OpSize;
def IMUL32rri : Ii32<0x69, MRMSrcReg, // GR32 = GR32*I32
(outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag GR32:$src1, imm:$src2))]>;
def IMUL16rri8 : Ii8<0x6B, MRMSrcReg, // GR16 = GR16*I8
(outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul GR16:$src1, i16immSExt8:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag GR16:$src1, i16immSExt8:$src2))]>,
+ OpSize;
def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8
(outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul GR32:$src1, i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag GR32:$src1, i32immSExt8:$src2))]>;
// Memory-Integer Signed Integer Multiply
def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16
(outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul (load addr:$src1), imm:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1), imm:$src2))]>,
+ OpSize;
def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32
(outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul (load addr:$src1), imm:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1), imm:$src2))]>;
def IMUL16rmi8 : Ii8<0x6B, MRMSrcMem, // GR16 = [mem16]*I8
(outs GR16:$dst), (ins i16mem:$src1, i16i8imm :$src2),
"imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR16:$dst, (mul (load addr:$src1),
- i16immSExt8:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set GR16:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1),
+ i16immSExt8:$src2))]>, OpSize;
def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
(outs GR32:$dst), (ins i32mem:$src1, i32i8imm: $src2),
"imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set GR32:$dst, (mul (load addr:$src1),
- i32immSExt8:$src2)),
- (implicit EFLAGS)]>;
+ [(set GR32:$dst, EFLAGS,
+ (X86smul_flag (load addr:$src1),
+ i32immSExt8:$src2))]>;
} // Defs = [EFLAGS]
//===----------------------------------------------------------------------===//
@@ -3135,19 +3302,18 @@ def IMUL32rmi8 : Ii8<0x6B, MRMSrcMem, // GR32 = [mem32]*I8
//
let Defs = [EFLAGS] in {
let isCommutable = 1 in { // TEST X, Y --> TEST Y, X
-def TEST8rr : I<0x84, MRMDestReg, (outs), (ins GR8:$src1, GR8:$src2),
+def TEST8rr : I<0x84, MRMSrcReg, (outs), (ins GR8:$src1, GR8:$src2),
"test{b}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and_su GR8:$src1, GR8:$src2), 0),
- (implicit EFLAGS)]>;
-def TEST16rr : I<0x85, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
+ [(set EFLAGS, (X86cmp (and_su GR8:$src1, GR8:$src2), 0))]>;
+def TEST16rr : I<0x85, MRMSrcReg, (outs), (ins GR16:$src1, GR16:$src2),
"test{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and_su GR16:$src1, GR16:$src2), 0),
- (implicit EFLAGS)]>,
+ [(set EFLAGS, (X86cmp (and_su GR16:$src1, GR16:$src2),
+ 0))]>,
OpSize;
-def TEST32rr : I<0x85, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
+def TEST32rr : I<0x85, MRMSrcReg, (outs), (ins GR32:$src1, GR32:$src2),
"test{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and_su GR32:$src1, GR32:$src2), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and_su GR32:$src1, GR32:$src2),
+ 0))]>;
}
def TEST8i8 : Ii8<0xA8, RawFrm, (outs), (ins i8imm:$src),
@@ -3159,48 +3325,46 @@ def TEST32i32 : Ii32<0xA9, RawFrm, (outs), (ins i32imm:$src),
def TEST8rm : I<0x84, MRMSrcMem, (outs), (ins GR8 :$src1, i8mem :$src2),
"test{b}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and GR8:$src1, (loadi8 addr:$src2)), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and GR8:$src1, (loadi8 addr:$src2)),
+ 0))]>;
def TEST16rm : I<0x85, MRMSrcMem, (outs), (ins GR16:$src1, i16mem:$src2),
"test{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and GR16:$src1, (loadi16 addr:$src2)), 0),
- (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp (and GR16:$src1,
+ (loadi16 addr:$src2)), 0))]>, OpSize;
def TEST32rm : I<0x85, MRMSrcMem, (outs), (ins GR32:$src1, i32mem:$src2),
"test{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and GR32:$src1, (loadi32 addr:$src2)), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and GR32:$src1,
+ (loadi32 addr:$src2)), 0))]>;
def TEST8ri : Ii8 <0xF6, MRM0r, // flags = GR8 & imm8
(outs), (ins GR8:$src1, i8imm:$src2),
"test{b}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and_su GR8:$src1, imm:$src2), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and_su GR8:$src1, imm:$src2), 0))]>;
def TEST16ri : Ii16<0xF7, MRM0r, // flags = GR16 & imm16
(outs), (ins GR16:$src1, i16imm:$src2),
"test{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and_su GR16:$src1, imm:$src2), 0),
- (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp (and_su GR16:$src1, imm:$src2), 0))]>,
+ OpSize;
def TEST32ri : Ii32<0xF7, MRM0r, // flags = GR32 & imm32
(outs), (ins GR32:$src1, i32imm:$src2),
"test{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and_su GR32:$src1, imm:$src2), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and_su GR32:$src1, imm:$src2), 0))]>;
def TEST8mi : Ii8 <0xF6, MRM0m, // flags = [mem8] & imm8
(outs), (ins i8mem:$src1, i8imm:$src2),
"test{b}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and (loadi8 addr:$src1), imm:$src2), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and (loadi8 addr:$src1), imm:$src2),
+ 0))]>;
def TEST16mi : Ii16<0xF7, MRM0m, // flags = [mem16] & imm16
(outs), (ins i16mem:$src1, i16imm:$src2),
"test{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and (loadi16 addr:$src1), imm:$src2), 0),
- (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp (and (loadi16 addr:$src1), imm:$src2),
+ 0))]>, OpSize;
def TEST32mi : Ii32<0xF7, MRM0m, // flags = [mem32] & imm32
(outs), (ins i32mem:$src1, i32imm:$src2),
"test{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (and (loadi32 addr:$src1), imm:$src2), 0),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (and (loadi32 addr:$src1), imm:$src2),
+ 0))]>;
} // Defs = [EFLAGS]
@@ -3414,99 +3578,97 @@ def CMP32i32 : Ii32<0x3D, RawFrm, (outs), (ins i32imm:$src),
def CMP8rr : I<0x38, MRMDestReg,
(outs), (ins GR8 :$src1, GR8 :$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR8:$src1, GR8:$src2), (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp GR8:$src1, GR8:$src2))]>;
def CMP16rr : I<0x39, MRMDestReg,
(outs), (ins GR16:$src1, GR16:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR16:$src1, GR16:$src2), (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp GR16:$src1, GR16:$src2))]>, OpSize;
def CMP32rr : I<0x39, MRMDestReg,
(outs), (ins GR32:$src1, GR32:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR32:$src1, GR32:$src2), (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp GR32:$src1, GR32:$src2))]>;
def CMP8mr : I<0x38, MRMDestMem,
(outs), (ins i8mem :$src1, GR8 :$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi8 addr:$src1), GR8:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (loadi8 addr:$src1), GR8:$src2))]>;
def CMP16mr : I<0x39, MRMDestMem,
(outs), (ins i16mem:$src1, GR16:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi16 addr:$src1), GR16:$src2),
- (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp (loadi16 addr:$src1), GR16:$src2))]>,
+ OpSize;
def CMP32mr : I<0x39, MRMDestMem,
(outs), (ins i32mem:$src1, GR32:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi32 addr:$src1), GR32:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (loadi32 addr:$src1), GR32:$src2))]>;
def CMP8rm : I<0x3A, MRMSrcMem,
(outs), (ins GR8 :$src1, i8mem :$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR8:$src1, (loadi8 addr:$src2)),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp GR8:$src1, (loadi8 addr:$src2)))]>;
def CMP16rm : I<0x3B, MRMSrcMem,
(outs), (ins GR16:$src1, i16mem:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR16:$src1, (loadi16 addr:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp GR16:$src1, (loadi16 addr:$src2)))]>,
+ OpSize;
def CMP32rm : I<0x3B, MRMSrcMem,
(outs), (ins GR32:$src1, i32mem:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR32:$src1, (loadi32 addr:$src2)),
- (implicit EFLAGS)]>;
-def CMP8mrmrr : I<0x3A, MRMSrcReg, (outs), (ins GR8:$src1, GR8:$src2),
- "cmp{b}\t{$src2, $src1|$src1, $src2}", []>;
-def CMP16mrmrr : I<0x3B, MRMSrcReg, (outs), (ins GR16:$src1, GR16:$src2),
- "cmp{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize;
-def CMP32mrmrr : I<0x3B, MRMSrcReg, (outs), (ins GR32:$src1, GR32:$src2),
- "cmp{l}\t{$src2, $src1|$src1, $src2}", []>;
+ [(set EFLAGS, (X86cmp GR32:$src1, (loadi32 addr:$src2)))]>;
+
+// These are alternate spellings for use by the disassembler, we mark them as
+// code gen only to ensure they aren't matched by the assembler.
+let isCodeGenOnly = 1 in {
+ def CMP8rr_alt : I<0x3A, MRMSrcReg, (outs), (ins GR8:$src1, GR8:$src2),
+ "cmp{b}\t{$src2, $src1|$src1, $src2}", []>;
+ def CMP16rr_alt : I<0x3B, MRMSrcReg, (outs), (ins GR16:$src1, GR16:$src2),
+ "cmp{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize;
+ def CMP32rr_alt : I<0x3B, MRMSrcReg, (outs), (ins GR32:$src1, GR32:$src2),
+ "cmp{l}\t{$src2, $src1|$src1, $src2}", []>;
+}
+
def CMP8ri : Ii8<0x80, MRM7r,
(outs), (ins GR8:$src1, i8imm:$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR8:$src1, imm:$src2), (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp GR8:$src1, imm:$src2))]>;
def CMP16ri : Ii16<0x81, MRM7r,
(outs), (ins GR16:$src1, i16imm:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR16:$src1, imm:$src2),
- (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp GR16:$src1, imm:$src2))]>, OpSize;
def CMP32ri : Ii32<0x81, MRM7r,
(outs), (ins GR32:$src1, i32imm:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR32:$src1, imm:$src2), (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp GR32:$src1, imm:$src2))]>;
def CMP8mi : Ii8 <0x80, MRM7m,
(outs), (ins i8mem :$src1, i8imm :$src2),
"cmp{b}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi8 addr:$src1), imm:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (loadi8 addr:$src1), imm:$src2))]>;
def CMP16mi : Ii16<0x81, MRM7m,
(outs), (ins i16mem:$src1, i16imm:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi16 addr:$src1), imm:$src2),
- (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp (loadi16 addr:$src1), imm:$src2))]>,
+ OpSize;
def CMP32mi : Ii32<0x81, MRM7m,
(outs), (ins i32mem:$src1, i32imm:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi32 addr:$src1), imm:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (loadi32 addr:$src1), imm:$src2))]>;
def CMP16ri8 : Ii8<0x83, MRM7r,
(outs), (ins GR16:$src1, i16i8imm:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR16:$src1, i16immSExt8:$src2),
- (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp GR16:$src1, i16immSExt8:$src2))]>,
+ OpSize;
def CMP16mi8 : Ii8<0x83, MRM7m,
(outs), (ins i16mem:$src1, i16i8imm:$src2),
"cmp{w}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi16 addr:$src1), i16immSExt8:$src2),
- (implicit EFLAGS)]>, OpSize;
+ [(set EFLAGS, (X86cmp (loadi16 addr:$src1),
+ i16immSExt8:$src2))]>, OpSize;
def CMP32mi8 : Ii8<0x83, MRM7m,
(outs), (ins i32mem:$src1, i32i8imm:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp (loadi32 addr:$src1), i32immSExt8:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp (loadi32 addr:$src1),
+ i32immSExt8:$src2))]>;
def CMP32ri8 : Ii8<0x83, MRM7r,
(outs), (ins GR32:$src1, i32i8imm:$src2),
"cmp{l}\t{$src2, $src1|$src1, $src2}",
- [(X86cmp GR32:$src1, i32immSExt8:$src2),
- (implicit EFLAGS)]>;
+ [(set EFLAGS, (X86cmp GR32:$src1, i32immSExt8:$src2))]>;
} // Defs = [EFLAGS]
// Bit tests.
@@ -3514,12 +3676,10 @@ def CMP32ri8 : Ii8<0x83, MRM7r,
let Defs = [EFLAGS] in {
def BT16rr : I<0xA3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
- [(X86bt GR16:$src1, GR16:$src2),
- (implicit EFLAGS)]>, OpSize, TB;
+ [(set EFLAGS, (X86bt GR16:$src1, GR16:$src2))]>, OpSize, TB;
def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
- [(X86bt GR32:$src1, GR32:$src2),
- (implicit EFLAGS)]>, TB;
+ [(set EFLAGS, (X86bt GR32:$src1, GR32:$src2))]>, TB;
// Unlike with the register+register form, the memory+register form of the
// bt instruction does not ignore the high bits of the index. From ISel's
@@ -3541,23 +3701,22 @@ def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
- [(X86bt GR16:$src1, i16immSExt8:$src2),
- (implicit EFLAGS)]>, OpSize, TB;
+ [(set EFLAGS, (X86bt GR16:$src1, i16immSExt8:$src2))]>,
+ OpSize, TB;
def BT32ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR32:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
- [(X86bt GR32:$src1, i32immSExt8:$src2),
- (implicit EFLAGS)]>, TB;
+ [(set EFLAGS, (X86bt GR32:$src1, i32immSExt8:$src2))]>, TB;
// Note that these instructions don't need FastBTMem because that
// only applies when the other operand is in a register. When it's
// an immediate, bt is still fast.
def BT16mi8 : Ii8<0xBA, MRM4m, (outs), (ins i16mem:$src1, i16i8imm:$src2),
"bt{w}\t{$src2, $src1|$src1, $src2}",
- [(X86bt (loadi16 addr:$src1), i16immSExt8:$src2),
- (implicit EFLAGS)]>, OpSize, TB;
+ [(set EFLAGS, (X86bt (loadi16 addr:$src1), i16immSExt8:$src2))
+ ]>, OpSize, TB;
def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2),
"bt{l}\t{$src2, $src1|$src1, $src2}",
- [(X86bt (loadi32 addr:$src1), i32immSExt8:$src2),
- (implicit EFLAGS)]>, TB;
+ [(set EFLAGS, (X86bt (loadi32 addr:$src1), i32immSExt8:$src2))
+ ]>, TB;
def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2),
"btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB;
@@ -3719,6 +3878,7 @@ def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
// Thread Local Storage Instructions
//
+// ELF TLS Support
// All calls clobber the non-callee saved registers. ESP is marked as
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead.
@@ -3727,12 +3887,24 @@ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
Uses = [ESP] in
-def TLS_addr32 : I<0, Pseudo, (outs), (ins lea32mem:$sym),
+def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"leal\t$sym, %eax; "
"call\t___tls_get_addr at PLT",
[(X86tlsaddr tls32addr:$sym)]>,
Requires<[In32BitMode]>;
+// Darwin TLS Support
+// For i386, the address of the thunk is passed on the stack, on return the
+// address of the variable is in %eax. %ecx is trashed during the function
+// call. All other registers are preserved.
+let Defs = [EAX, ECX],
+ Uses = [ESP],
+ usesCustomInserter = 1 in
+def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
+ "# TLSCall_32",
+ [(X86TLSCall addr:$sym)]>,
+ Requires<[In32BitMode]>;
+
let AddedComplexity = 5, isCodeGenOnly = 1 in
def GS_MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movl\t%gs:$src, $dst",
@@ -3758,6 +3930,20 @@ def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
// Atomic support
//
+// Memory barriers
+
+// TODO: Get this to fold the constant into the instruction.
+def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero),
+ "lock\n\t"
+ "or{l}\t{$zero, $dst|$dst, $zero}",
+ []>, Requires<[In32BitMode]>, LOCK;
+
+let hasSideEffects = 1 in {
+def Int_MemBarrier : I<0, Pseudo, (outs), (ins),
+ "#MEMBARRIER",
+ [(X86MemBarrier)]>, Requires<[HasSSE2]>;
+}
+
// Atomic swap. These are just normal xchg instructions. But since a memory
// operand is referenced, the atomicity is ensured.
let Constraints = "$val = $dst" in {
@@ -3840,12 +4026,14 @@ def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"xadd{l}\t{$src, $dst|$dst, $src}", []>, TB;
+let mayLoad = 1, mayStore = 1 in {
def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
"xadd{b}\t{$src, $dst|$dst, $src}", []>, TB;
def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"xadd{l}\t{$src, $dst|$dst, $src}", []>, TB;
+}
def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src),
"cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
@@ -3854,12 +4042,14 @@ def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src),
def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
"cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB;
+let mayLoad = 1, mayStore = 1 in {
def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
"cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB;
def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
"cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB;
+}
let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in
def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
@@ -3867,7 +4057,7 @@ def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst),
// Optimized codegen when the non-memory output is not used.
// FIXME: Use normal add / sub instructions and add lock prefix dynamically.
-let Defs = [EFLAGS] in {
+let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in {
def LOCK_ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
"lock\n\t"
"add{b}\t{$src2, $dst|$dst, $src2}", []>, LOCK;
@@ -4252,7 +4442,7 @@ def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
// 0F 01 C4
def VMXOFF : I<0x01, MRM_C4, (outs), (ins), "vmxoff", []>, TB;
def VMXON : I<0xC7, MRM6m, (outs), (ins i64mem:$vmxon),
- "vmxon\t{$vmxon}", []>, XD;
+ "vmxon\t{$vmxon}", []>, XS;
//===----------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -4286,14 +4476,24 @@ def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
// Calls
// tailcall stuff
-def : Pat<(X86tcret GR32:$dst, imm:$off),
- (TCRETURNri GR32:$dst, imm:$off)>;
+def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
+ (TCRETURNri GR32_TC:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
+
+// FIXME: This is disabled for 32-bit PIC mode because the global base
+// register which is part of the address mode may be assigned a
+// callee-saved register.
+def : Pat<(X86tcret (load addr:$dst), imm:$off),
+ (TCRETURNmi addr:$dst, imm:$off)>,
+ Requires<[In32BitMode, IsNotPIC]>;
def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
- (TCRETURNdi texternalsym:$dst, imm:$off)>;
+ (TCRETURNdi texternalsym:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
- (TCRETURNdi texternalsym:$dst, imm:$off)>;
+ (TCRETURNdi texternalsym:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
// Normal calls, with various flavors of addresses.
def : Pat<(X86call (i32 tglobaladdr:$dst)),
@@ -4325,11 +4525,11 @@ def : Pat<(subc GR32:$src1, i32immSExt8:$src2),
// Comparisons.
// TEST R,R is smaller than CMP R,0
-def : Pat<(parallel (X86cmp GR8:$src1, 0), (implicit EFLAGS)),
+def : Pat<(X86cmp GR8:$src1, 0),
(TEST8rr GR8:$src1, GR8:$src1)>;
-def : Pat<(parallel (X86cmp GR16:$src1, 0), (implicit EFLAGS)),
+def : Pat<(X86cmp GR16:$src1, 0),
(TEST16rr GR16:$src1, GR16:$src1)>;
-def : Pat<(parallel (X86cmp GR32:$src1, 0), (implicit EFLAGS)),
+def : Pat<(X86cmp GR32:$src1, 0),
(TEST32rr GR32:$src1, GR32:$src1)>;
// Conditional moves with folded loads with operands swapped and conditions
@@ -4416,7 +4616,11 @@ def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
// avoid partial-register updates.
def : Pat<(i16 (anyext GR8 :$src)), (MOVZX16rr8 GR8 :$src)>;
def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
-def : Pat<(i32 (anyext GR16:$src)), (MOVZX32rr16 GR16:$src)>;
+
+// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
+def : Pat<(i32 (anyext GR16:$src)),
+ (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
+
//===----------------------------------------------------------------------===//
// Some peepholes
@@ -4435,76 +4639,81 @@ def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
// r & (2^16-1) ==> movz
def : Pat<(and GR32:$src1, 0xffff),
- (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, x86_subreg_16bit))>;
+ (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
// r & (2^8-1) ==> movz
def : Pat<(and GR32:$src1, 0xff),
(MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
GR32_ABCD)),
- x86_subreg_8bit))>,
+ sub_8bit))>,
Requires<[In32BitMode]>;
// r & (2^8-1) ==> movz
def : Pat<(and GR16:$src1, 0xff),
(MOVZX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src1,
GR16_ABCD)),
- x86_subreg_8bit))>,
+ sub_8bit))>,
Requires<[In32BitMode]>;
// sext_inreg patterns
def : Pat<(sext_inreg GR32:$src, i16),
- (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit))>;
+ (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
def : Pat<(sext_inreg GR32:$src, i8),
(MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
GR32_ABCD)),
- x86_subreg_8bit))>,
+ sub_8bit))>,
Requires<[In32BitMode]>;
def : Pat<(sext_inreg GR16:$src, i8),
(MOVSX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
GR16_ABCD)),
- x86_subreg_8bit))>,
+ sub_8bit))>,
Requires<[In32BitMode]>;
// trunc patterns
def : Pat<(i16 (trunc GR32:$src)),
- (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit)>;
+ (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
def : Pat<(i8 (trunc GR32:$src)),
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
- x86_subreg_8bit)>,
+ sub_8bit)>,
Requires<[In32BitMode]>;
def : Pat<(i8 (trunc GR16:$src)),
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- x86_subreg_8bit)>,
+ sub_8bit)>,
Requires<[In32BitMode]>;
// h-register tricks
def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
- (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- x86_subreg_8bit_hi)>,
+ (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
+ sub_8bit_hi)>,
Requires<[In32BitMode]>;
def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
- (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
- x86_subreg_8bit_hi)>,
+ (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
+ sub_8bit_hi)>,
Requires<[In32BitMode]>;
def : Pat<(srl GR16:$src, (i8 8)),
(EXTRACT_SUBREG
(MOVZX32rr8
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
- x86_subreg_8bit_hi)),
- x86_subreg_16bit)>,
+ sub_8bit_hi)),
+ sub_16bit)>,
Requires<[In32BitMode]>;
def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
(MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
GR16_ABCD)),
- x86_subreg_8bit_hi))>,
+ sub_8bit_hi))>,
Requires<[In32BitMode]>;
def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
(MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
GR16_ABCD)),
- x86_subreg_8bit_hi))>,
+ sub_8bit_hi))>,
Requires<[In32BitMode]>;
def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
(MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
GR32_ABCD)),
- x86_subreg_8bit_hi))>,
+ sub_8bit_hi))>,
+ Requires<[In32BitMode]>;
+def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
+ (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
+ GR32_ABCD)),
+ sub_8bit_hi))>,
Requires<[In32BitMode]>;
// (shl x, 1) ==> (add x, x)
@@ -4552,131 +4761,27 @@ def : Pat<(store (sra (loadi16 addr:$dst), (and CL, 31)), addr:$dst),
def : Pat<(store (sra (loadi32 addr:$dst), (and CL, 31)), addr:$dst),
(SAR32mCL addr:$dst)>;
-// (or (x >> c) | (y << (32 - c))) ==> (shrd32 x, y, c)
-def : Pat<(or (srl GR32:$src1, CL:$amt),
- (shl GR32:$src2, (sub 32, CL:$amt))),
- (SHRD32rrCL GR32:$src1, GR32:$src2)>;
-
-def : Pat<(store (or (srl (loadi32 addr:$dst), CL:$amt),
- (shl GR32:$src2, (sub 32, CL:$amt))), addr:$dst),
- (SHRD32mrCL addr:$dst, GR32:$src2)>;
-
-def : Pat<(or (srl GR32:$src1, (i8 (trunc ECX:$amt))),
- (shl GR32:$src2, (i8 (trunc (sub 32, ECX:$amt))))),
- (SHRD32rrCL GR32:$src1, GR32:$src2)>;
-
-def : Pat<(store (or (srl (loadi32 addr:$dst), (i8 (trunc ECX:$amt))),
- (shl GR32:$src2, (i8 (trunc (sub 32, ECX:$amt))))),
- addr:$dst),
- (SHRD32mrCL addr:$dst, GR32:$src2)>;
-
-def : Pat<(shrd GR32:$src1, (i8 imm:$amt1), GR32:$src2, (i8 imm/*:$amt2*/)),
- (SHRD32rri8 GR32:$src1, GR32:$src2, (i8 imm:$amt1))>;
-
-def : Pat<(store (shrd (loadi32 addr:$dst), (i8 imm:$amt1),
- GR32:$src2, (i8 imm/*:$amt2*/)), addr:$dst),
- (SHRD32mri8 addr:$dst, GR32:$src2, (i8 imm:$amt1))>;
-
-// (or (x << c) | (y >> (32 - c))) ==> (shld32 x, y, c)
-def : Pat<(or (shl GR32:$src1, CL:$amt),
- (srl GR32:$src2, (sub 32, CL:$amt))),
- (SHLD32rrCL GR32:$src1, GR32:$src2)>;
-
-def : Pat<(store (or (shl (loadi32 addr:$dst), CL:$amt),
- (srl GR32:$src2, (sub 32, CL:$amt))), addr:$dst),
- (SHLD32mrCL addr:$dst, GR32:$src2)>;
-
-def : Pat<(or (shl GR32:$src1, (i8 (trunc ECX:$amt))),
- (srl GR32:$src2, (i8 (trunc (sub 32, ECX:$amt))))),
- (SHLD32rrCL GR32:$src1, GR32:$src2)>;
-
-def : Pat<(store (or (shl (loadi32 addr:$dst), (i8 (trunc ECX:$amt))),
- (srl GR32:$src2, (i8 (trunc (sub 32, ECX:$amt))))),
- addr:$dst),
- (SHLD32mrCL addr:$dst, GR32:$src2)>;
-
-def : Pat<(shld GR32:$src1, (i8 imm:$amt1), GR32:$src2, (i8 imm/*:$amt2*/)),
- (SHLD32rri8 GR32:$src1, GR32:$src2, (i8 imm:$amt1))>;
-
-def : Pat<(store (shld (loadi32 addr:$dst), (i8 imm:$amt1),
- GR32:$src2, (i8 imm/*:$amt2*/)), addr:$dst),
- (SHLD32mri8 addr:$dst, GR32:$src2, (i8 imm:$amt1))>;
-
-// (or (x >> c) | (y << (16 - c))) ==> (shrd16 x, y, c)
-def : Pat<(or (srl GR16:$src1, CL:$amt),
- (shl GR16:$src2, (sub 16, CL:$amt))),
- (SHRD16rrCL GR16:$src1, GR16:$src2)>;
-
-def : Pat<(store (or (srl (loadi16 addr:$dst), CL:$amt),
- (shl GR16:$src2, (sub 16, CL:$amt))), addr:$dst),
- (SHRD16mrCL addr:$dst, GR16:$src2)>;
-
-def : Pat<(or (srl GR16:$src1, (i8 (trunc CX:$amt))),
- (shl GR16:$src2, (i8 (trunc (sub 16, CX:$amt))))),
- (SHRD16rrCL GR16:$src1, GR16:$src2)>;
-
-def : Pat<(store (or (srl (loadi16 addr:$dst), (i8 (trunc CX:$amt))),
- (shl GR16:$src2, (i8 (trunc (sub 16, CX:$amt))))),
- addr:$dst),
- (SHRD16mrCL addr:$dst, GR16:$src2)>;
-
-def : Pat<(shrd GR16:$src1, (i8 imm:$amt1), GR16:$src2, (i8 imm/*:$amt2*/)),
- (SHRD16rri8 GR16:$src1, GR16:$src2, (i8 imm:$amt1))>;
-
-def : Pat<(store (shrd (loadi16 addr:$dst), (i8 imm:$amt1),
- GR16:$src2, (i8 imm/*:$amt2*/)), addr:$dst),
- (SHRD16mri8 addr:$dst, GR16:$src2, (i8 imm:$amt1))>;
-
-// (or (x << c) | (y >> (16 - c))) ==> (shld16 x, y, c)
-def : Pat<(or (shl GR16:$src1, CL:$amt),
- (srl GR16:$src2, (sub 16, CL:$amt))),
- (SHLD16rrCL GR16:$src1, GR16:$src2)>;
-
-def : Pat<(store (or (shl (loadi16 addr:$dst), CL:$amt),
- (srl GR16:$src2, (sub 16, CL:$amt))), addr:$dst),
- (SHLD16mrCL addr:$dst, GR16:$src2)>;
-
-def : Pat<(or (shl GR16:$src1, (i8 (trunc CX:$amt))),
- (srl GR16:$src2, (i8 (trunc (sub 16, CX:$amt))))),
- (SHLD16rrCL GR16:$src1, GR16:$src2)>;
-
-def : Pat<(store (or (shl (loadi16 addr:$dst), (i8 (trunc CX:$amt))),
- (srl GR16:$src2, (i8 (trunc (sub 16, CX:$amt))))),
- addr:$dst),
- (SHLD16mrCL addr:$dst, GR16:$src2)>;
-
-def : Pat<(shld GR16:$src1, (i8 imm:$amt1), GR16:$src2, (i8 imm/*:$amt2*/)),
- (SHLD16rri8 GR16:$src1, GR16:$src2, (i8 imm:$amt1))>;
-
-def : Pat<(store (shld (loadi16 addr:$dst), (i8 imm:$amt1),
- GR16:$src2, (i8 imm/*:$amt2*/)), addr:$dst),
- (SHLD16mri8 addr:$dst, GR16:$src2, (i8 imm:$amt1))>;
-
// (anyext (setcc_carry)) -> (setcc_carry)
def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
(SETB_C16r)>;
def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
(SETB_C32r)>;
+def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
+ (SETB_C32r)>;
// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
let AddedComplexity = 5 in { // Try this before the selecting to OR
-def : Pat<(parallel (or_is_add GR16:$src1, imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(or_is_add GR16:$src1, imm:$src2),
(ADD16ri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (or_is_add GR32:$src1, imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(or_is_add GR32:$src1, imm:$src2),
(ADD32ri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (or_is_add GR16:$src1, i16immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(or_is_add GR16:$src1, i16immSExt8:$src2),
(ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (or_is_add GR32:$src1, i32immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(or_is_add GR32:$src1, i32immSExt8:$src2),
(ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
-def : Pat<(parallel (or_is_add GR16:$src1, GR16:$src2),
- (implicit EFLAGS)),
+def : Pat<(or_is_add GR16:$src1, GR16:$src2),
(ADD16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (or_is_add GR32:$src1, GR32:$src2),
- (implicit EFLAGS)),
+def : Pat<(or_is_add GR32:$src1, GR32:$src2),
(ADD32rr GR32:$src1, GR32:$src2)>;
} // AddedComplexity
@@ -4684,483 +4789,175 @@ def : Pat<(parallel (or_is_add GR32:$src1, GR32:$src2),
// EFLAGS-defining Patterns
//===----------------------------------------------------------------------===//
-// Register-Register Addition with EFLAGS result
-def : Pat<(parallel (X86add_flag GR8:$src1, GR8:$src2),
- (implicit EFLAGS)),
- (ADD8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(parallel (X86add_flag GR16:$src1, GR16:$src2),
- (implicit EFLAGS)),
- (ADD16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (X86add_flag GR32:$src1, GR32:$src2),
- (implicit EFLAGS)),
- (ADD32rr GR32:$src1, GR32:$src2)>;
+// add reg, reg
+def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>;
-// Register-Memory Addition with EFLAGS result
-def : Pat<(parallel (X86add_flag GR8:$src1, (loadi8 addr:$src2)),
- (implicit EFLAGS)),
+// add reg, mem
+def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
(ADD8rm GR8:$src1, addr:$src2)>;
-def : Pat<(parallel (X86add_flag GR16:$src1, (loadi16 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
(ADD16rm GR16:$src1, addr:$src2)>;
-def : Pat<(parallel (X86add_flag GR32:$src1, (loadi32 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
(ADD32rm GR32:$src1, addr:$src2)>;
-// Register-Integer Addition with EFLAGS result
-def : Pat<(parallel (X86add_flag GR8:$src1, imm:$src2),
- (implicit EFLAGS)),
- (ADD8ri GR8:$src1, imm:$src2)>;
-def : Pat<(parallel (X86add_flag GR16:$src1, imm:$src2),
- (implicit EFLAGS)),
- (ADD16ri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (X86add_flag GR32:$src1, imm:$src2),
- (implicit EFLAGS)),
- (ADD32ri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (X86add_flag GR16:$src1, i16immSExt8:$src2),
- (implicit EFLAGS)),
+// add reg, imm
+def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>;
+def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>;
+def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>;
+def : Pat<(add GR16:$src1, i16immSExt8:$src2),
(ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86add_flag GR32:$src1, i32immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(add GR32:$src1, i32immSExt8:$src2),
(ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
-// Memory-Register Addition with EFLAGS result
-def : Pat<(parallel (store (X86add_flag (loadi8 addr:$dst), GR8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD8mr addr:$dst, GR8:$src2)>;
-def : Pat<(parallel (store (X86add_flag (loadi16 addr:$dst), GR16:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD16mr addr:$dst, GR16:$src2)>;
-def : Pat<(parallel (store (X86add_flag (loadi32 addr:$dst), GR32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD32mr addr:$dst, GR32:$src2)>;
-
-// Memory-Integer Addition with EFLAGS result
-def : Pat<(parallel (store (X86add_flag (loadi8 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD8mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86add_flag (loadi16 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD16mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86add_flag (loadi32 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD32mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86add_flag (loadi16 addr:$dst), i16immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD16mi8 addr:$dst, i16immSExt8:$src2)>;
-def : Pat<(parallel (store (X86add_flag (loadi32 addr:$dst), i32immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (ADD32mi8 addr:$dst, i32immSExt8:$src2)>;
-
-// Register-Register Subtraction with EFLAGS result
-def : Pat<(parallel (X86sub_flag GR8:$src1, GR8:$src2),
- (implicit EFLAGS)),
- (SUB8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(parallel (X86sub_flag GR16:$src1, GR16:$src2),
- (implicit EFLAGS)),
- (SUB16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (X86sub_flag GR32:$src1, GR32:$src2),
- (implicit EFLAGS)),
- (SUB32rr GR32:$src1, GR32:$src2)>;
+// sub reg, reg
+def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>;
-// Register-Memory Subtraction with EFLAGS result
-def : Pat<(parallel (X86sub_flag GR8:$src1, (loadi8 addr:$src2)),
- (implicit EFLAGS)),
+// sub reg, mem
+def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
(SUB8rm GR8:$src1, addr:$src2)>;
-def : Pat<(parallel (X86sub_flag GR16:$src1, (loadi16 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
(SUB16rm GR16:$src1, addr:$src2)>;
-def : Pat<(parallel (X86sub_flag GR32:$src1, (loadi32 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
(SUB32rm GR32:$src1, addr:$src2)>;
-// Register-Integer Subtraction with EFLAGS result
-def : Pat<(parallel (X86sub_flag GR8:$src1, imm:$src2),
- (implicit EFLAGS)),
+// sub reg, imm
+def : Pat<(sub GR8:$src1, imm:$src2),
(SUB8ri GR8:$src1, imm:$src2)>;
-def : Pat<(parallel (X86sub_flag GR16:$src1, imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(sub GR16:$src1, imm:$src2),
(SUB16ri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (X86sub_flag GR32:$src1, imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(sub GR32:$src1, imm:$src2),
(SUB32ri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (X86sub_flag GR16:$src1, i16immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(sub GR16:$src1, i16immSExt8:$src2),
(SUB16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86sub_flag GR32:$src1, i32immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(sub GR32:$src1, i32immSExt8:$src2),
(SUB32ri8 GR32:$src1, i32immSExt8:$src2)>;
-// Memory-Register Subtraction with EFLAGS result
-def : Pat<(parallel (store (X86sub_flag (loadi8 addr:$dst), GR8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB8mr addr:$dst, GR8:$src2)>;
-def : Pat<(parallel (store (X86sub_flag (loadi16 addr:$dst), GR16:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB16mr addr:$dst, GR16:$src2)>;
-def : Pat<(parallel (store (X86sub_flag (loadi32 addr:$dst), GR32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB32mr addr:$dst, GR32:$src2)>;
-
-// Memory-Integer Subtraction with EFLAGS result
-def : Pat<(parallel (store (X86sub_flag (loadi8 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB8mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86sub_flag (loadi16 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB16mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86sub_flag (loadi32 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB32mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86sub_flag (loadi16 addr:$dst), i16immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB16mi8 addr:$dst, i16immSExt8:$src2)>;
-def : Pat<(parallel (store (X86sub_flag (loadi32 addr:$dst), i32immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (SUB32mi8 addr:$dst, i32immSExt8:$src2)>;
-
-
-// Register-Register Signed Integer Multiply with EFLAGS result
-def : Pat<(parallel (X86smul_flag GR16:$src1, GR16:$src2),
- (implicit EFLAGS)),
+// mul reg, reg
+def : Pat<(mul GR16:$src1, GR16:$src2),
(IMUL16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (X86smul_flag GR32:$src1, GR32:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul GR32:$src1, GR32:$src2),
(IMUL32rr GR32:$src1, GR32:$src2)>;
-// Register-Memory Signed Integer Multiply with EFLAGS result
-def : Pat<(parallel (X86smul_flag GR16:$src1, (loadi16 addr:$src2)),
- (implicit EFLAGS)),
+// mul reg, mem
+def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
(IMUL16rm GR16:$src1, addr:$src2)>;
-def : Pat<(parallel (X86smul_flag GR32:$src1, (loadi32 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
(IMUL32rm GR32:$src1, addr:$src2)>;
-// Register-Integer Signed Integer Multiply with EFLAGS result
-def : Pat<(parallel (X86smul_flag GR16:$src1, imm:$src2),
- (implicit EFLAGS)),
+// mul reg, imm
+def : Pat<(mul GR16:$src1, imm:$src2),
(IMUL16rri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (X86smul_flag GR32:$src1, imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul GR32:$src1, imm:$src2),
(IMUL32rri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (X86smul_flag GR16:$src1, i16immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul GR16:$src1, i16immSExt8:$src2),
(IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86smul_flag GR32:$src1, i32immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul GR32:$src1, i32immSExt8:$src2),
(IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>;
-// Memory-Integer Signed Integer Multiply with EFLAGS result
-def : Pat<(parallel (X86smul_flag (loadi16 addr:$src1), imm:$src2),
- (implicit EFLAGS)),
+// reg = mul mem, imm
+def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
(IMUL16rmi addr:$src1, imm:$src2)>;
-def : Pat<(parallel (X86smul_flag (loadi32 addr:$src1), imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
(IMUL32rmi addr:$src1, imm:$src2)>;
-def : Pat<(parallel (X86smul_flag (loadi16 addr:$src1), i16immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2),
(IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86smul_flag (loadi32 addr:$src1), i32immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2),
(IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>;
// Optimize multiply by 2 with EFLAGS result.
let AddedComplexity = 2 in {
-def : Pat<(parallel (X86smul_flag GR16:$src1, 2),
- (implicit EFLAGS)),
- (ADD16rr GR16:$src1, GR16:$src1)>;
-
-def : Pat<(parallel (X86smul_flag GR32:$src1, 2),
- (implicit EFLAGS)),
- (ADD32rr GR32:$src1, GR32:$src1)>;
+def : Pat<(X86smul_flag GR16:$src1, 2), (ADD16rr GR16:$src1, GR16:$src1)>;
+def : Pat<(X86smul_flag GR32:$src1, 2), (ADD32rr GR32:$src1, GR32:$src1)>;
}
-// INC and DEC with EFLAGS result. Note that these do not set CF.
-def : Pat<(parallel (X86inc_flag GR8:$src), (implicit EFLAGS)),
- (INC8r GR8:$src)>;
-def : Pat<(parallel (store (i8 (X86inc_flag (loadi8 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (INC8m addr:$dst)>;
-def : Pat<(parallel (X86dec_flag GR8:$src), (implicit EFLAGS)),
- (DEC8r GR8:$src)>;
-def : Pat<(parallel (store (i8 (X86dec_flag (loadi8 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (DEC8m addr:$dst)>;
-
-def : Pat<(parallel (X86inc_flag GR16:$src), (implicit EFLAGS)),
- (INC16r GR16:$src)>, Requires<[In32BitMode]>;
-def : Pat<(parallel (store (i16 (X86inc_flag (loadi16 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (INC16m addr:$dst)>, Requires<[In32BitMode]>;
-def : Pat<(parallel (X86dec_flag GR16:$src), (implicit EFLAGS)),
- (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
-def : Pat<(parallel (store (i16 (X86dec_flag (loadi16 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (DEC16m addr:$dst)>, Requires<[In32BitMode]>;
-
-def : Pat<(parallel (X86inc_flag GR32:$src), (implicit EFLAGS)),
- (INC32r GR32:$src)>, Requires<[In32BitMode]>;
-def : Pat<(parallel (store (i32 (X86inc_flag (loadi32 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (INC32m addr:$dst)>, Requires<[In32BitMode]>;
-def : Pat<(parallel (X86dec_flag GR32:$src), (implicit EFLAGS)),
- (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
-def : Pat<(parallel (store (i32 (X86dec_flag (loadi32 addr:$dst))), addr:$dst),
- (implicit EFLAGS)),
- (DEC32m addr:$dst)>, Requires<[In32BitMode]>;
-
-// Register-Register Or with EFLAGS result
-def : Pat<(parallel (X86or_flag GR8:$src1, GR8:$src2),
- (implicit EFLAGS)),
- (OR8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(parallel (X86or_flag GR16:$src1, GR16:$src2),
- (implicit EFLAGS)),
- (OR16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (X86or_flag GR32:$src1, GR32:$src2),
- (implicit EFLAGS)),
- (OR32rr GR32:$src1, GR32:$src2)>;
-
-// Register-Memory Or with EFLAGS result
-def : Pat<(parallel (X86or_flag GR8:$src1, (loadi8 addr:$src2)),
- (implicit EFLAGS)),
+// Patterns for nodes that do not produce flags, for instructions that do.
+
+// Increment reg.
+def : Pat<(add GR8:$src1 , 1), (INC8r GR8:$src1)>;
+def : Pat<(add GR16:$src1, 1), (INC16r GR16:$src1)>, Requires<[In32BitMode]>;
+def : Pat<(add GR32:$src1, 1), (INC32r GR32:$src1)>, Requires<[In32BitMode]>;
+
+// Decrement reg.
+def : Pat<(add GR8:$src1 , -1), (DEC8r GR8:$src1)>;
+def : Pat<(add GR16:$src1, -1), (DEC16r GR16:$src1)>, Requires<[In32BitMode]>;
+def : Pat<(add GR32:$src1, -1), (DEC32r GR32:$src1)>, Requires<[In32BitMode]>;
+
+// or reg/reg.
+def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>;
+
+// or reg/mem
+def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
(OR8rm GR8:$src1, addr:$src2)>;
-def : Pat<(parallel (X86or_flag GR16:$src1, (loadi16 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
(OR16rm GR16:$src1, addr:$src2)>;
-def : Pat<(parallel (X86or_flag GR32:$src1, (loadi32 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
(OR32rm GR32:$src1, addr:$src2)>;
-// Register-Integer Or with EFLAGS result
-def : Pat<(parallel (X86or_flag GR8:$src1, imm:$src2),
- (implicit EFLAGS)),
- (OR8ri GR8:$src1, imm:$src2)>;
-def : Pat<(parallel (X86or_flag GR16:$src1, imm:$src2),
- (implicit EFLAGS)),
- (OR16ri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (X86or_flag GR32:$src1, imm:$src2),
- (implicit EFLAGS)),
- (OR32ri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (X86or_flag GR16:$src1, i16immSExt8:$src2),
- (implicit EFLAGS)),
+// or reg/imm
+def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>;
+def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>;
+def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>;
+def : Pat<(or GR16:$src1, i16immSExt8:$src2),
(OR16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86or_flag GR32:$src1, i32immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(or GR32:$src1, i32immSExt8:$src2),
(OR32ri8 GR32:$src1, i32immSExt8:$src2)>;
-// Memory-Register Or with EFLAGS result
-def : Pat<(parallel (store (X86or_flag (loadi8 addr:$dst), GR8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR8mr addr:$dst, GR8:$src2)>;
-def : Pat<(parallel (store (X86or_flag (loadi16 addr:$dst), GR16:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR16mr addr:$dst, GR16:$src2)>;
-def : Pat<(parallel (store (X86or_flag (loadi32 addr:$dst), GR32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR32mr addr:$dst, GR32:$src2)>;
-
-// Memory-Integer Or with EFLAGS result
-def : Pat<(parallel (store (X86or_flag (loadi8 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR8mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86or_flag (loadi16 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR16mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86or_flag (loadi32 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR32mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86or_flag (loadi16 addr:$dst), i16immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR16mi8 addr:$dst, i16immSExt8:$src2)>;
-def : Pat<(parallel (store (X86or_flag (loadi32 addr:$dst), i32immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (OR32mi8 addr:$dst, i32immSExt8:$src2)>;
-
-// Register-Register XOr with EFLAGS result
-def : Pat<(parallel (X86xor_flag GR8:$src1, GR8:$src2),
- (implicit EFLAGS)),
- (XOR8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(parallel (X86xor_flag GR16:$src1, GR16:$src2),
- (implicit EFLAGS)),
- (XOR16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (X86xor_flag GR32:$src1, GR32:$src2),
- (implicit EFLAGS)),
- (XOR32rr GR32:$src1, GR32:$src2)>;
-
-// Register-Memory XOr with EFLAGS result
-def : Pat<(parallel (X86xor_flag GR8:$src1, (loadi8 addr:$src2)),
- (implicit EFLAGS)),
+// xor reg/reg
+def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>;
+
+// xor reg/mem
+def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
(XOR8rm GR8:$src1, addr:$src2)>;
-def : Pat<(parallel (X86xor_flag GR16:$src1, (loadi16 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
(XOR16rm GR16:$src1, addr:$src2)>;
-def : Pat<(parallel (X86xor_flag GR32:$src1, (loadi32 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
(XOR32rm GR32:$src1, addr:$src2)>;
-// Register-Integer XOr with EFLAGS result
-def : Pat<(parallel (X86xor_flag GR8:$src1, imm:$src2),
- (implicit EFLAGS)),
+// xor reg/imm
+def : Pat<(xor GR8:$src1, imm:$src2),
(XOR8ri GR8:$src1, imm:$src2)>;
-def : Pat<(parallel (X86xor_flag GR16:$src1, imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(xor GR16:$src1, imm:$src2),
(XOR16ri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (X86xor_flag GR32:$src1, imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(xor GR32:$src1, imm:$src2),
(XOR32ri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (X86xor_flag GR16:$src1, i16immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(xor GR16:$src1, i16immSExt8:$src2),
(XOR16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86xor_flag GR32:$src1, i32immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(xor GR32:$src1, i32immSExt8:$src2),
(XOR32ri8 GR32:$src1, i32immSExt8:$src2)>;
-// Memory-Register XOr with EFLAGS result
-def : Pat<(parallel (store (X86xor_flag (loadi8 addr:$dst), GR8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR8mr addr:$dst, GR8:$src2)>;
-def : Pat<(parallel (store (X86xor_flag (loadi16 addr:$dst), GR16:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR16mr addr:$dst, GR16:$src2)>;
-def : Pat<(parallel (store (X86xor_flag (loadi32 addr:$dst), GR32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR32mr addr:$dst, GR32:$src2)>;
-
-// Memory-Integer XOr with EFLAGS result
-def : Pat<(parallel (store (X86xor_flag (loadi8 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR8mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86xor_flag (loadi16 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR16mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86xor_flag (loadi32 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR32mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86xor_flag (loadi16 addr:$dst), i16immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR16mi8 addr:$dst, i16immSExt8:$src2)>;
-def : Pat<(parallel (store (X86xor_flag (loadi32 addr:$dst), i32immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (XOR32mi8 addr:$dst, i32immSExt8:$src2)>;
-
-// Register-Register And with EFLAGS result
-def : Pat<(parallel (X86and_flag GR8:$src1, GR8:$src2),
- (implicit EFLAGS)),
- (AND8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(parallel (X86and_flag GR16:$src1, GR16:$src2),
- (implicit EFLAGS)),
- (AND16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(parallel (X86and_flag GR32:$src1, GR32:$src2),
- (implicit EFLAGS)),
- (AND32rr GR32:$src1, GR32:$src2)>;
-
-// Register-Memory And with EFLAGS result
-def : Pat<(parallel (X86and_flag GR8:$src1, (loadi8 addr:$src2)),
- (implicit EFLAGS)),
+// and reg/reg
+def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>;
+def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>;
+def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>;
+
+// and reg/mem
+def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
(AND8rm GR8:$src1, addr:$src2)>;
-def : Pat<(parallel (X86and_flag GR16:$src1, (loadi16 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
(AND16rm GR16:$src1, addr:$src2)>;
-def : Pat<(parallel (X86and_flag GR32:$src1, (loadi32 addr:$src2)),
- (implicit EFLAGS)),
+def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
(AND32rm GR32:$src1, addr:$src2)>;
-// Register-Integer And with EFLAGS result
-def : Pat<(parallel (X86and_flag GR8:$src1, imm:$src2),
- (implicit EFLAGS)),
+// and reg/imm
+def : Pat<(and GR8:$src1, imm:$src2),
(AND8ri GR8:$src1, imm:$src2)>;
-def : Pat<(parallel (X86and_flag GR16:$src1, imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(and GR16:$src1, imm:$src2),
(AND16ri GR16:$src1, imm:$src2)>;
-def : Pat<(parallel (X86and_flag GR32:$src1, imm:$src2),
- (implicit EFLAGS)),
+def : Pat<(and GR32:$src1, imm:$src2),
(AND32ri GR32:$src1, imm:$src2)>;
-def : Pat<(parallel (X86and_flag GR16:$src1, i16immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(and GR16:$src1, i16immSExt8:$src2),
(AND16ri8 GR16:$src1, i16immSExt8:$src2)>;
-def : Pat<(parallel (X86and_flag GR32:$src1, i32immSExt8:$src2),
- (implicit EFLAGS)),
+def : Pat<(and GR32:$src1, i32immSExt8:$src2),
(AND32ri8 GR32:$src1, i32immSExt8:$src2)>;
-// Memory-Register And with EFLAGS result
-def : Pat<(parallel (store (X86and_flag (loadi8 addr:$dst), GR8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND8mr addr:$dst, GR8:$src2)>;
-def : Pat<(parallel (store (X86and_flag (loadi16 addr:$dst), GR16:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND16mr addr:$dst, GR16:$src2)>;
-def : Pat<(parallel (store (X86and_flag (loadi32 addr:$dst), GR32:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND32mr addr:$dst, GR32:$src2)>;
-
-// Memory-Integer And with EFLAGS result
-def : Pat<(parallel (store (X86and_flag (loadi8 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND8mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86and_flag (loadi16 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND16mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86and_flag (loadi32 addr:$dst), imm:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND32mi addr:$dst, imm:$src2)>;
-def : Pat<(parallel (store (X86and_flag (loadi16 addr:$dst), i16immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND16mi8 addr:$dst, i16immSExt8:$src2)>;
-def : Pat<(parallel (store (X86and_flag (loadi32 addr:$dst), i32immSExt8:$src2),
- addr:$dst),
- (implicit EFLAGS)),
- (AND32mi8 addr:$dst, i32immSExt8:$src2)>;
-
-// -disable-16bit support.
-def : Pat<(truncstorei16 (i32 imm:$src), addr:$dst),
- (MOV16mi addr:$dst, imm:$src)>;
-def : Pat<(truncstorei16 GR32:$src, addr:$dst),
- (MOV16mr addr:$dst, (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit))>;
-def : Pat<(i32 (sextloadi16 addr:$dst)),
- (MOVSX32rm16 addr:$dst)>;
-def : Pat<(i32 (zextloadi16 addr:$dst)),
- (MOVZX32rm16 addr:$dst)>;
-def : Pat<(i32 (extloadi16 addr:$dst)),
- (MOVZX32rm16 addr:$dst)>;
-
//===----------------------------------------------------------------------===//
// Floating Point Stack Support
//===----------------------------------------------------------------------===//
@@ -5180,6 +4977,12 @@ include "X86Instr64bit.td"
include "X86InstrFragmentsSIMD.td"
//===----------------------------------------------------------------------===//
+// FMA - Fused Multiply-Add support (requires FMA)
+//===----------------------------------------------------------------------===//
+
+include "X86InstrFMA.td"
+
+//===----------------------------------------------------------------------===//
// XMM Floating point support (requires SSE / SSE2)
//===----------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrMMX.td b/libclamav/c++/llvm/lib/Target/X86/X86InstrMMX.td
index c8e0723..11d4179 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrMMX.td
@@ -117,9 +117,6 @@ def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src),
"movd\t{$src, $dst|$dst, $src}", []>;
def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs), (ins GR32:$dst, VR64:$src),
"movd\t{$src, $dst|$dst, $src}", []>;
-def MMX_MOVQ64gmr : MMXRI<0x7E, MRMDestMem, (outs),
- (ins i64mem:$dst, VR64:$src),
- "movq\t{$src, $dst|$dst, $src}", []>;
let neverHasSideEffects = 1 in
def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
@@ -133,10 +130,10 @@ let neverHasSideEffects = 1 in
def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg,
(outs GR64:$dst), (ins VR64:$src),
"movd\t{$src, $dst|$dst, $src}", []>;
-def MMX_MOVD64rrv164 : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
- "movd\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst,
- (v1i64 (scalar_to_vector GR64:$src)))]>;
+def MMX_MOVD64rrv164 : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR64:$dst,
+ (v1i64 (scalar_to_vector GR64:$src)))]>;
let neverHasSideEffects = 1 in
def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
@@ -160,12 +157,16 @@ def MMX_MOVQ2DQrr : SSDIi8<0xD6, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
"movq2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(movl immAllZerosV,
- (v2i64 (scalar_to_vector (i64 (bitconvert VR64:$src))))))]>;
+ (v2i64 (scalar_to_vector
+ (i64 (bitconvert (v1i64 VR64:$src)))))))]>;
let neverHasSideEffects = 1 in
def MMX_MOVQ2FR64rr: SSDIi8<0xD6, MRMSrcReg, (outs FR64:$dst), (ins VR64:$src),
"movq2dq\t{$src, $dst|$dst, $src}", []>;
+def MMX_MOVFR642Qrr: SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins FR64:$src),
+ "movdq2q\t{$src, $dst|$dst, $src}", []>;
+
def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
"movntq\t{$src, $dst|$dst, $src}",
[(int_x86_mmx_movnt_dq addr:$dst, VR64:$src)]>;
@@ -271,9 +272,9 @@ defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
// Shift up / down and insert zero's.
def : Pat<(v1i64 (X86vshl VR64:$src, (i8 imm:$amt))),
- (v1i64 (MMX_PSLLQri VR64:$src, imm:$amt))>;
+ (MMX_PSLLQri VR64:$src, (GetLo32XForm imm:$amt))>;
def : Pat<(v1i64 (X86vshr VR64:$src, (i8 imm:$amt))),
- (v1i64 (MMX_PSRLQri VR64:$src, imm:$amt))>;
+ (MMX_PSRLQri VR64:$src, (GetLo32XForm imm:$amt))>;
// Comparison Instructions
defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>;
@@ -512,30 +513,20 @@ def : Pat<(store (v4i16 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
def : Pat<(store (v2i32 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
-def : Pat<(store (v2f32 VR64:$src), addr:$dst),
- (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
def : Pat<(store (v1i64 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
// Bit convert.
def : Pat<(v8i8 (bitconvert (v1i64 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v8i8 (bitconvert (v2i32 VR64:$src))), (v8i8 VR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v2f32 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v8i8 (bitconvert (v4i16 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v1i64 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v2i32 VR64:$src))), (v4i16 VR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v2f32 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v8i8 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v1i64 VR64:$src))), (v2i32 VR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v2f32 VR64:$src))), (v2i32 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v4i16 VR64:$src))), (v2i32 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v8i8 VR64:$src))), (v2i32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v1i64 VR64:$src))), (v2f32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v2i32 VR64:$src))), (v2f32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v4i16 VR64:$src))), (v2f32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v8i8 VR64:$src))), (v2f32 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v2i32 VR64:$src))), (v1i64 VR64:$src)>;
-def : Pat<(v1i64 (bitconvert (v2f32 VR64:$src))), (v1i64 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v4i16 VR64:$src))), (v1i64 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v8i8 VR64:$src))), (v1i64 VR64:$src)>;
@@ -544,8 +535,6 @@ def : Pat<(v1i64 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v2i32 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
-def : Pat<(v2f32 (bitconvert (i64 GR64:$src))),
- (MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v4i16 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v8i8 (bitconvert (i64 GR64:$src))),
@@ -554,8 +543,6 @@ def : Pat<(i64 (bitconvert (v1i64 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v2i32 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
-def : Pat<(i64 (bitconvert (v2f32 VR64:$src))),
- (MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v4i16 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v8i8 VR64:$src))),
@@ -568,6 +555,14 @@ def : Pat<(f64 (bitconvert (v4i16 VR64:$src))),
(MMX_MOVQ2FR64rr VR64:$src)>;
def : Pat<(f64 (bitconvert (v8i8 VR64:$src))),
(MMX_MOVQ2FR64rr VR64:$src)>;
+def : Pat<(v1i64 (bitconvert (f64 FR64:$src))),
+ (MMX_MOVFR642Qrr FR64:$src)>;
+def : Pat<(v2i32 (bitconvert (f64 FR64:$src))),
+ (MMX_MOVFR642Qrr FR64:$src)>;
+def : Pat<(v4i16 (bitconvert (f64 FR64:$src))),
+ (MMX_MOVFR642Qrr FR64:$src)>;
+def : Pat<(v8i8 (bitconvert (f64 FR64:$src))),
+ (MMX_MOVFR642Qrr FR64:$src)>;
let AddedComplexity = 20 in {
def : Pat<(v2i32 (X86vzmovl (bc_v2i32 (load_mmx addr:$src)))),
@@ -577,7 +572,7 @@ let AddedComplexity = 20 in {
// Clear top half.
let AddedComplexity = 15 in {
def : Pat<(v2i32 (X86vzmovl VR64:$src)),
- (MMX_PUNPCKLDQrr VR64:$src, (MMX_V_SET0))>;
+ (MMX_PUNPCKLDQrr VR64:$src, (v2i32 (MMX_V_SET0)))>;
}
// Patterns to perform canonical versions of vector shuffling.
@@ -604,22 +599,9 @@ let AddedComplexity = 10 in {
def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
VR64:$src2)),
(MMX_PANDNrr VR64:$src1, VR64:$src2)>;
-def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))),
- VR64:$src2)),
- (MMX_PANDNrr VR64:$src1, VR64:$src2)>;
-def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))),
- VR64:$src2)),
- (MMX_PANDNrr VR64:$src1, VR64:$src2)>;
-
def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
(load addr:$src2))),
(MMX_PANDNrm VR64:$src1, addr:$src2)>;
-def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))),
- (load addr:$src2))),
- (MMX_PANDNrm VR64:$src1, addr:$src2)>;
-def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))),
- (load addr:$src2))),
- (MMX_PANDNrm VR64:$src1, addr:$src2)>;
// Move MMX to lower 64-bit of XMM
def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert (v8i8 VR64:$src))))),
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86InstrSSE.td b/libclamav/c++/llvm/lib/Target/X86/X86InstrSSE.td
index 67d987e..f5466f8 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86InstrSSE.td
@@ -15,321 +15,6 @@
//===----------------------------------------------------------------------===//
-// SSE specific DAG Nodes.
-//===----------------------------------------------------------------------===//
-
-def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
- SDTCisFP<0>, SDTCisInt<2> ]>;
-def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
- SDTCisFP<1>, SDTCisVT<3, i8>]>;
-
-def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
-def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
-def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
- [SDNPCommutative, SDNPAssociative]>;
-def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
- [SDNPCommutative, SDNPAssociative]>;
-def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
- [SDNPCommutative, SDNPAssociative]>;
-def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
-def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
-def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
-def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
-def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
-def X86pshufb : SDNode<"X86ISD::PSHUFB",
- SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>]>>;
-def X86pextrb : SDNode<"X86ISD::PEXTRB",
- SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
-def X86pextrw : SDNode<"X86ISD::PEXTRW",
- SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
-def X86pinsrb : SDNode<"X86ISD::PINSRB",
- SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
- SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
-def X86pinsrw : SDNode<"X86ISD::PINSRW",
- SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
- SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
-def X86insrtps : SDNode<"X86ISD::INSERTPS",
- SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
- SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
-def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
- SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
-def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
- [SDNPHasChain, SDNPMayLoad]>;
-def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
-def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
-def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
-def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
-def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
-def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
-def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
-def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
-
-def SDTX86CmpPTest : SDTypeProfile<0, 2, [SDTCisVT<0, v4f32>,
- SDTCisVT<1, v4f32>]>;
-def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
-
-//===----------------------------------------------------------------------===//
-// SSE Complex Patterns
-//===----------------------------------------------------------------------===//
-
-// These are 'extloads' from a scalar to the low element of a vector, zeroing
-// the top elements. These are used for the SSE 'ss' and 'sd' instruction
-// forms.
-def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
- [SDNPHasChain, SDNPMayLoad]>;
-def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
- [SDNPHasChain, SDNPMayLoad]>;
-
-def ssmem : Operand<v4f32> {
- let PrintMethod = "printf32mem";
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
-}
-def sdmem : Operand<v2f64> {
- let PrintMethod = "printf64mem";
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
-}
-
-//===----------------------------------------------------------------------===//
-// SSE pattern fragments
-//===----------------------------------------------------------------------===//
-
-def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
-def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
-def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
-def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
-
-// Like 'store', but always requires vector alignment.
-def alignedstore : PatFrag<(ops node:$val, node:$ptr),
- (store node:$val, node:$ptr), [{
- return cast<StoreSDNode>(N)->getAlignment() >= 16;
-}]>;
-
-// Like 'load', but always requires vector alignment.
-def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return cast<LoadSDNode>(N)->getAlignment() >= 16;
-}]>;
-
-def alignedloadfsf32 : PatFrag<(ops node:$ptr),
- (f32 (alignedload node:$ptr))>;
-def alignedloadfsf64 : PatFrag<(ops node:$ptr),
- (f64 (alignedload node:$ptr))>;
-def alignedloadv4f32 : PatFrag<(ops node:$ptr),
- (v4f32 (alignedload node:$ptr))>;
-def alignedloadv2f64 : PatFrag<(ops node:$ptr),
- (v2f64 (alignedload node:$ptr))>;
-def alignedloadv4i32 : PatFrag<(ops node:$ptr),
- (v4i32 (alignedload node:$ptr))>;
-def alignedloadv2i64 : PatFrag<(ops node:$ptr),
- (v2i64 (alignedload node:$ptr))>;
-
-// Like 'load', but uses special alignment checks suitable for use in
-// memory operands in most SSE instructions, which are required to
-// be naturally aligned on some targets but not on others. If the subtarget
-// allows unaligned accesses, match any load, though this may require
-// setting a feature bit in the processor (on startup, for example).
-// Opteron 10h and later implement such a feature.
-def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return Subtarget->hasVectorUAMem()
- || cast<LoadSDNode>(N)->getAlignment() >= 16;
-}]>;
-
-def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
-def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
-def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
-def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
-def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
-def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
-def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
-
-// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
-// 16-byte boundary.
-// FIXME: 8 byte alignment for mmx reads is not required
-def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return cast<LoadSDNode>(N)->getAlignment() >= 8;
-}]>;
-
-def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
-def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
-def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
-def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
-
-// MOVNT Support
-// Like 'store', but requires the non-temporal bit to be set
-def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
- (st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isNonTemporal();
- return false;
-}]>;
-
-def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
- (st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isNonTemporal() && !ST->isTruncatingStore() &&
- ST->getAddressingMode() == ISD::UNINDEXED &&
- ST->getAlignment() >= 16;
- return false;
-}]>;
-
-def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
- (st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isNonTemporal() &&
- ST->getAlignment() < 16;
- return false;
-}]>;
-
-def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
-def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
-def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
-def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
-def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
-def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
-
-def vzmovl_v2i64 : PatFrag<(ops node:$src),
- (bitconvert (v2i64 (X86vzmovl
- (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
-def vzmovl_v4i32 : PatFrag<(ops node:$src),
- (bitconvert (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
-
-def vzload_v2i64 : PatFrag<(ops node:$src),
- (bitconvert (v2i64 (X86vzload node:$src)))>;
-
-
-def fp32imm0 : PatLeaf<(f32 fpimm), [{
- return N->isExactlyValue(+0.0);
-}]>;
-
-// BYTE_imm - Transform bit immediates into byte immediates.
-def BYTE_imm : SDNodeXForm<imm, [{
- // Transformation function: imm >> 3
- return getI32Imm(N->getZExtValue() >> 3);
-}]>;
-
-// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
-// SHUFP* etc. imm.
-def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShuffleSHUFImmediate(N));
-}]>;
-
-// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
-// PSHUFHW imm.
-def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
-}]>;
-
-// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
-// PSHUFLW imm.
-def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
-}]>;
-
-// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
-// a PALIGNR imm.
-def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShufflePALIGNRImmediate(N));
-}]>;
-
-def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
-}]>;
-
-def movddup : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movlp : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movl : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_shuf_imm>;
-
-def shufp : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_shuf_imm>;
-
-def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_pshufhw_imm>;
-
-def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_pshuflw_imm>;
-
-def palign : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_palign_imm>;
-
-//===----------------------------------------------------------------------===//
// SSE scalar FP Instructions
//===----------------------------------------------------------------------===//
@@ -367,832 +52,744 @@ let Uses = [EFLAGS], usesCustomInserter = 1 in {
}
//===----------------------------------------------------------------------===//
-// SSE1 Instructions
+// SSE 1 & 2 Instructions Classes
//===----------------------------------------------------------------------===//
-// Move Instructions. Register-to-register movss is not used for FR32
-// register copies because it's a partial register update; FsMOVAPSrr is
-// used instead. Register-to-register movss is not modeled as an INSERT_SUBREG
-// because INSERT_SUBREG requires that the insert be implementable in terms of
-// a copy, and just mentioned, we don't use movss for copies.
-let Constraints = "$src1 = $dst" in
-def MOVSSrr : SSI<0x10, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
- "movss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (movl VR128:$src1, (scalar_to_vector FR32:$src2)))]>;
+/// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
+multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, X86MemOperand x86memop,
+ bit Is2Addr = 1> {
+ let isCommutable = 1 in {
+ def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
+ }
+ def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
+}
+
+/// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
+multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ string asm, string SSEVer, string FPSizeStr,
+ Operand memopr, ComplexPattern mem_cpat,
+ bit Is2Addr = 1> {
+ def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
+ !strconcat(SSEVer, !strconcat("_",
+ !strconcat(OpcodeStr, FPSizeStr))))
+ RC:$src1, RC:$src2))]>;
+ def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
+ !strconcat(SSEVer, !strconcat("_",
+ !strconcat(OpcodeStr, FPSizeStr))))
+ RC:$src1, mem_cpat:$src2))]>;
+}
+
+/// sse12_fp_packed - SSE 1 & 2 packed instructions class
+multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, ValueType vt,
+ X86MemOperand x86memop, PatFrag mem_frag,
+ Domain d, bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
+ let mayLoad = 1 in
+ def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
+}
+
+/// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
+multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
+ string OpcodeStr, X86MemOperand x86memop,
+ list<dag> pat_rr, list<dag> pat_rm,
+ bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ pat_rr, d>;
+ def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ pat_rm, d>;
+}
+
+/// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
+multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ string asm, string SSEVer, string FPSizeStr,
+ X86MemOperand x86memop, PatFrag mem_frag,
+ Domain d, bit Is2Addr = 1> {
+ def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
+ !strconcat(SSEVer, !strconcat("_",
+ !strconcat(OpcodeStr, FPSizeStr))))
+ RC:$src1, RC:$src2))], d>;
+ def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
+ !strconcat(SSEVer, !strconcat("_",
+ !strconcat(OpcodeStr, FPSizeStr))))
+ RC:$src1, (mem_frag addr:$src2)))], d>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Move Instructions
+//===----------------------------------------------------------------------===//
+class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
+ SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
+ [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
+
+// Loading from memory automatically zeroing upper bits.
+class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
+ PatFrag mem_pat, string OpcodeStr> :
+ SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (mem_pat addr:$src))]>;
+
+// Move Instructions. Register-to-register movss/movsd is not used for FR32/64
+// register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
+// is used instead. Register-to-register movss/movsd is not modeled as an
+// INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
+// in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
+let isAsmParserOnly = 1 in {
+ def VMOVSSrr : sse12_move_rr<FR32, v4f32,
+ "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
+ def VMOVSDrr : sse12_move_rr<FR64, v2f64,
+ "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
+
+ let canFoldAsLoad = 1, isReMaterializable = 1 in {
+ def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
+
+ let AddedComplexity = 20 in
+ def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
+ }
+}
+
+let Constraints = "$src1 = $dst" in {
+ def MOVSSrr : sse12_move_rr<FR32, v4f32,
+ "movss\t{$src2, $dst|$dst, $src2}">, XS;
+ def MOVSDrr : sse12_move_rr<FR64, v2f64,
+ "movsd\t{$src2, $dst|$dst, $src2}">, XD;
+}
+
+let canFoldAsLoad = 1, isReMaterializable = 1 in {
+ def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
+
+ let AddedComplexity = 20 in
+ def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
+}
+
+let AddedComplexity = 15 in {
// Extract the low 32-bit value from one vector and insert it into another.
-let AddedComplexity = 15 in
def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
- (MOVSSrr VR128:$src1,
- (EXTRACT_SUBREG (v4f32 VR128:$src2), x86_subreg_ss))>;
+ (MOVSSrr (v4f32 VR128:$src1),
+ (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
+// Extract the low 64-bit value from one vector and insert it into another.
+def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
+ (MOVSDrr (v2f64 VR128:$src1),
+ (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
+}
// Implicitly promote a 32-bit scalar to a vector.
def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
- (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, x86_subreg_ss)>;
-
-// Loading from memory automatically zeroing upper bits.
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
- "movss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (loadf32 addr:$src))]>;
+ (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
+// Implicitly promote a 64-bit scalar to a vector.
+def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
+ (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
+let AddedComplexity = 20 in {
// MOVSSrm zeros the high parts of the register; represent this
// with SUBREG_TO_REG.
-let AddedComplexity = 20 in {
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
- (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), x86_subreg_ss)>;
+ (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
- (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), x86_subreg_ss)>;
+ (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
- (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), x86_subreg_ss)>;
+ (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
+// MOVSDrm zeros the high parts of the register; represent this
+// with SUBREG_TO_REG.
+def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+def : Pat<(v2f64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
}
// Store scalar value to memory.
def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
"movss\t{$src, $dst|$dst, $src}",
[(store FR32:$src, addr:$dst)]>;
+def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
+ "movsd\t{$src, $dst|$dst, $src}",
+ [(store FR64:$src, addr:$dst)]>;
+
+let isAsmParserOnly = 1 in {
+def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
+ "movss\t{$src, $dst|$dst, $src}",
+ [(store FR32:$src, addr:$dst)]>, XS, VEX;
+def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
+ "movsd\t{$src, $dst|$dst, $src}",
+ [(store FR64:$src, addr:$dst)]>, XD, VEX;
+}
// Extract and store.
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
addr:$dst),
(MOVSSmr addr:$dst,
- (EXTRACT_SUBREG (v4f32 VR128:$src), x86_subreg_ss))>;
-
-// Conversion instructions
-def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
- "cvttss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
-def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
- "cvttss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
-def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
- "cvtsi2ss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
-def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
- "cvtsi2ss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
-
-// Match intrinsics which expect XMM operand(s).
-def CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
- "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>;
-def CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
- "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>;
-
-def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "cvtss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
-def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
- "cvtss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse_cvtss2si
- (load addr:$src)))]>;
-
-// Match intrinisics which expect MM and XMM operand(s).
-def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvtps2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
-def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
- "cvtps2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvtps2pi
- (load addr:$src)))]>;
-def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvttps2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
-def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
- "cvttps2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvttps2pi
- (load addr:$src)))]>;
-let Constraints = "$src1 = $dst" in {
- def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
- "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
- VR64:$src2))]>;
- def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
- "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
- (load addr:$src2)))]>;
-}
-
-// Aliases for intrinsics
-def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "cvttss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst,
- (int_x86_sse_cvttss2si VR128:$src))]>;
-def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
- "cvttss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst,
- (int_x86_sse_cvttss2si(load addr:$src)))]>;
-
-let Constraints = "$src1 = $dst" in {
- def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
- "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
- GR32:$src2))]>;
- def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
- "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
- (loadi32 addr:$src2)))]>;
-}
-
-// Comparison instructions
-let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
- def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
- (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
- "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
- def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
- (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
- "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
-}
-
-let Defs = [EFLAGS] in {
-def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
- "ucomiss\t{$src2, $src1|$src1, $src2}",
- [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
-def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
- "ucomiss\t{$src2, $src1|$src1, $src2}",
- [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
- (implicit EFLAGS)]>;
-
-def COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "comiss\t{$src2, $src1|$src1, $src2}", []>;
-def COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
- "comiss\t{$src2, $src1|$src1, $src2}", []>;
-
-} // Defs = [EFLAGS]
-
-// Aliases to match intrinsics which expect XMM operand(s).
-let Constraints = "$src1 = $dst" in {
- def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src, SSECC:$cc),
- "cmp${cc}ss\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cmp_ss
- VR128:$src1,
- VR128:$src, imm:$cc))]>;
- def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst),
- (ins VR128:$src1, f32mem:$src, SSECC:$cc),
- "cmp${cc}ss\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
- (load addr:$src), imm:$cc))]>;
-}
-
-let Defs = [EFLAGS] in {
-def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "ucomiss\t{$src2, $src1|$src1, $src2}",
- [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
- (implicit EFLAGS)]>;
-def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
- "ucomiss\t{$src2, $src1|$src1, $src2}",
- [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
- (implicit EFLAGS)]>;
-
-def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "comiss\t{$src2, $src1|$src1, $src2}",
- [(X86comi (v4f32 VR128:$src1), VR128:$src2),
- (implicit EFLAGS)]>;
-def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
- "comiss\t{$src2, $src1|$src1, $src2}",
- [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
- (implicit EFLAGS)]>;
-} // Defs = [EFLAGS]
-
-// Aliases of packed SSE1 instructions for scalar use. These all have names
-// that start with 'Fs'.
-
-// Alias instructions that map fld0 to pxor for sse.
-let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
- canFoldAsLoad = 1 in
- // FIXME: Set encoding to pseudo!
-def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
- [(set FR32:$dst, fp32imm0)]>,
- Requires<[HasSSE1]>, TB, OpSize;
+ (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
+ addr:$dst),
+ (MOVSDmr addr:$dst,
+ (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
-// Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
-// disregarded.
+// Move Aligned/Unaligned floating point values
+multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
+ X86MemOperand x86memop, PatFrag ld_frag,
+ string asm, Domain d,
+ bit IsReMaterializable = 1> {
let neverHasSideEffects = 1 in
-def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>;
-
-// Alias instruction to load FR32 from f128mem using movaps. Upper bits are
-// disregarded.
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
- "movaps\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
-
-// Alias bitwise logical operations using SSE logical ops on packed FP values.
-let Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in {
- def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst),
- (ins FR32:$src1, FR32:$src2),
- "andps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
- def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst),
- (ins FR32:$src1, FR32:$src2),
- "orps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
- def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst),
- (ins FR32:$src1, FR32:$src2),
- "xorps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
-}
-
-def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f128mem:$src2),
- "andps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86fand FR32:$src1,
- (memopfsf32 addr:$src2)))]>;
-def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f128mem:$src2),
- "orps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86for FR32:$src1,
- (memopfsf32 addr:$src2)))]>;
-def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f128mem:$src2),
- "xorps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86fxor FR32:$src1,
- (memopfsf32 addr:$src2)))]>;
-
-let neverHasSideEffects = 1 in {
-def FsANDNPSrr : PSI<0x55, MRMSrcReg,
- (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
- "andnps\t{$src2, $dst|$dst, $src2}", []>;
-let mayLoad = 1 in
-def FsANDNPSrm : PSI<0x55, MRMSrcMem,
- (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
- "andnps\t{$src2, $dst|$dst, $src2}", []>;
-}
+ def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
+let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
+ def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (ld_frag addr:$src))], d>;
}
-/// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
-///
-/// In addition, we also have a special variant of the scalar form here to
-/// represent the associated intrinsic operation. This form is unlike the
-/// plain scalar form, in that it takes an entire vector (instead of a scalar)
-/// and leaves the top elements unmodified (therefore these cannot be commuted).
-///
-/// These three forms can each be reg+reg or reg+mem, so there are a total of
-/// six "instructions".
-///
-let Constraints = "$src1 = $dst" in {
-multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode, Intrinsic F32Int,
- bit Commutable = 0> {
- // Scalar operation, reg+reg.
- def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, reg+mem.
- def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f32mem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
-
- // Vector operation, reg+reg.
- def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, reg+mem.
- def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
-
- // Intrinsic operation, reg+reg.
- def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]>;
-
- // Intrinsic operation, reg+mem.
- def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, ssmem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F32Int VR128:$src1,
- sse_load_f32:$src2))]>;
-}
-}
-
-// Arithmetic instructions
-defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
-defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
-defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
-defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
-
-/// sse1_fp_binop_rm - Other SSE1 binops
-///
-/// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
-/// instructions for a full-vector intrinsic form. Operations that map
-/// onto C operators don't use this form since they just use the plain
-/// vector form instead of having a separate vector intrinsic form.
-///
-/// This provides a total of eight "instructions".
-///
-let Constraints = "$src1 = $dst" in {
-multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode,
- Intrinsic F32Int,
- Intrinsic V4F32Int,
- bit Commutable = 0> {
-
- // Scalar operation, reg+reg.
- def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, reg+mem.
- def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f32mem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
-
- // Vector operation, reg+reg.
- def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, reg+mem.
- def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
-
- // Intrinsic operation, reg+reg.
- def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Intrinsic operation, reg+mem.
- def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, ssmem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F32Int VR128:$src1,
- sse_load_f32:$src2))]>;
-
- // Vector intrinsic operation, reg+reg.
- def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Vector intrinsic operation, reg+mem.
- def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (V4F32Int VR128:$src1, (memopv4f32 addr:$src2)))]>;
+let isAsmParserOnly = 1 in {
+defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
+ "movaps", SSEPackedSingle>, VEX;
+defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
+ "movapd", SSEPackedDouble>, OpSize, VEX;
+defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
+ "movups", SSEPackedSingle>, VEX;
+defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
+ "movupd", SSEPackedDouble, 0>, OpSize, VEX;
+
+defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
+ "movaps", SSEPackedSingle>, VEX;
+defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
+ "movapd", SSEPackedDouble>, OpSize, VEX;
+defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
+ "movups", SSEPackedSingle>, VEX;
+defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
+ "movupd", SSEPackedDouble, 0>, OpSize, VEX;
}
+defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
+ "movaps", SSEPackedSingle>, TB;
+defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
+ "movapd", SSEPackedDouble>, TB, OpSize;
+defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
+ "movups", SSEPackedSingle>, TB;
+defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
+ "movupd", SSEPackedDouble, 0>, TB, OpSize;
+
+let isAsmParserOnly = 1 in {
+def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movaps\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
+def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
+def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movups\t{$src, $dst|$dst, $src}",
+ [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
+def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
+def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+ "movaps\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
+def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
+def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+ "movups\t{$src, $dst|$dst, $src}",
+ [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
+def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
}
-defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
- int_x86_sse_max_ss, int_x86_sse_max_ps>;
-defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
- int_x86_sse_min_ss, int_x86_sse_min_ps>;
+def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
+def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
+ (VMOVUPSYmr addr:$dst, VR256:$src)>;
-//===----------------------------------------------------------------------===//
-// SSE packed FP Instructions
-
-// Move Instructions
-let neverHasSideEffects = 1 in
-def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movaps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
+def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
+def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
+ (VMOVUPDYmr addr:$dst, VR256:$src)>;
def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movaps\t{$src, $dst|$dst, $src}",
[(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
-
-let neverHasSideEffects = 1 in
-def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movups\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movups\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (loadv4f32 addr:$src))]>;
+def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
[(store (v4f32 VR128:$src), addr:$dst)]>;
+def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(store (v2f64 VR128:$src), addr:$dst)]>;
-// Intrinsic forms of MOVUPS load and store
+// Intrinsic forms of MOVUPS/D load and store
+let isAsmParserOnly = 1 in {
+ let canFoldAsLoad = 1, isReMaterializable = 1 in
+ def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "movups\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
+ def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
+ def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movups\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
+ def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
+}
let canFoldAsLoad = 1, isReMaterializable = 1 in
def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movups\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
+def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
+
def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
[(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
+def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
-let Constraints = "$src1 = $dst" in {
- let AddedComplexity = 20 in {
- def MOVLPSrm : PSI<0x12, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "movlps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (movlp VR128:$src1,
- (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))]>;
- def MOVHPSrm : PSI<0x16, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "movhps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (movlhps VR128:$src1,
- (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))]>;
- } // AddedComplexity
-} // Constraints = "$src1 = $dst"
-
+// Move Low/High packed floating point values
+multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
+ PatFrag mov_frag, string base_opc,
+ string asm_opr> {
+ def PSrm : PI<opc, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
+ !strconcat(!strconcat(base_opc,"s"), asm_opr),
+ [(set RC:$dst,
+ (mov_frag RC:$src1,
+ (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
+ SSEPackedSingle>, TB;
+
+ def PDrm : PI<opc, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
+ !strconcat(!strconcat(base_opc,"d"), asm_opr),
+ [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
+ (scalar_to_vector (loadf64 addr:$src2)))))],
+ SSEPackedDouble>, TB, OpSize;
+}
-def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
- (MOVHPSrm VR128:$src1, addr:$src2)>;
+let isAsmParserOnly = 1, AddedComplexity = 20 in {
+ defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
+ defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
+}
+let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
+ defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
+ "\t{$src2, $dst|$dst, $src2}">;
+ defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
+ "\t{$src2, $dst|$dst, $src2}">;
+}
+let isAsmParserOnly = 1 in {
+def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movlps\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
+ (iPTR 0))), addr:$dst)]>, VEX;
+def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movlpd\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract (v2f64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>, VEX;
+}
def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
(iPTR 0))), addr:$dst)]>;
+def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movlpd\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract (v2f64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>;
// v2f64 extract element 1 is always custom lowered to unpack high to low
// and extract element 0 so the non-store version isn't too horrible.
+let isAsmParserOnly = 1 in {
+def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movhps\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract
+ (unpckh (bc_v2f64 (v4f32 VR128:$src)),
+ (undef)), (iPTR 0))), addr:$dst)]>,
+ VEX;
+def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movhpd\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract
+ (v2f64 (unpckh VR128:$src, (undef))),
+ (iPTR 0))), addr:$dst)]>,
+ VEX;
+}
def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract
(unpckh (bc_v2f64 (v4f32 VR128:$src)),
(undef)), (iPTR 0))), addr:$dst)]>;
+def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movhpd\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract
+ (v2f64 (unpckh VR128:$src, (undef))),
+ (iPTR 0))), addr:$dst)]>;
-let Constraints = "$src1 = $dst" in {
-let AddedComplexity = 20 in {
-def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- "movlhps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
-
-def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- "movhlps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
-} // AddedComplexity
-} // Constraints = "$src1 = $dst"
+let isAsmParserOnly = 1, AddedComplexity = 20 in {
+ def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
+ VEX_4V;
+ def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
+ VEX_4V;
+}
+let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
+ def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ "movlhps\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
+ def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ "movhlps\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
+}
+def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
+ (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
let AddedComplexity = 20 in {
-def : Pat<(v4f32 (movddup VR128:$src, (undef))),
- (MOVLHPSrr VR128:$src, VR128:$src)>;
-def : Pat<(v2i64 (movddup VR128:$src, (undef))),
- (MOVLHPSrr VR128:$src, VR128:$src)>;
+ def : Pat<(v4f32 (movddup VR128:$src, (undef))),
+ (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
+ def : Pat<(v2i64 (movddup VR128:$src, (undef))),
+ (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
}
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Conversion Instructions
+//===----------------------------------------------------------------------===//
+multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
+}
-// Arithmetic
-
-/// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
-///
-/// In addition, we also have a special variant of the scalar form here to
-/// represent the associated intrinsic operation. This form is unlike the
-/// plain scalar form, in that it takes an entire vector (instead of a
-/// scalar) and leaves the top elements undefined.
-///
-/// And, we have a special variant form for a full-vector intrinsic form.
-///
-/// These four forms can each have a reg or a mem operand, so there are a
-/// total of eight "instructions".
-///
-multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode,
- Intrinsic F32Int,
- Intrinsic V4F32Int,
- bit Commutable = 0> {
- // Scalar operation, reg.
- def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set FR32:$dst, (OpNode FR32:$src))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, mem.
- def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
- Requires<[HasSSE1, OptForSize]>;
-
- // Vector operation, reg.
- def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, mem.
- def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
+multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ X86MemOperand x86memop, string asm> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ []>;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ []>;
+}
- // Intrinsic operation, reg.
- def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F32Int VR128:$src))]> {
- let isCommutable = Commutable;
- }
+multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm, Domain d> {
+ def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
+ def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
+}
- // Intrinsic operation, mem.
- def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
+multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ X86MemOperand x86memop, string asm> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
+ !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins DstRC:$src1, x86memop:$src),
+ !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
+}
- // Vector intrinsic operation, reg
- def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V4F32Int VR128:$src))]> {
- let isCommutable = Commutable;
- }
+let isAsmParserOnly = 1 in {
+defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
+ "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
+defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
+ "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
+ VEX_W;
+defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
+ "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
+defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
+ "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
+ VEX, VEX_W;
+
+// The assembler can recognize rr 64-bit instructions by seeing a rxx
+// register, but the same isn't true when only using memory operands,
+// provide other assembly "l" and "q" forms to address this explicitly
+// where appropriate to do so.
+defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
+ VEX_4V;
+defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
+ VEX_4V, VEX_W;
+defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
+ VEX_4V;
+defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
+ VEX_4V;
+defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
+ VEX_4V, VEX_W;
+}
- // Vector intrinsic operation, mem
- def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
+defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
+ "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
+defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
+ "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
+defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
+ "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
+defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
+ "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
+defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
+ "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
+defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
+ "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
+defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
+ "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
+defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
+ "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
+
+// Conversion Instructions Intrinsics - Match intrinsics which expect MM
+// and/or XMM operand(s).
+multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm, Domain d> {
+ def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [(set DstRC:$dst, (Int SrcRC:$src))], d>;
+ def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
}
-// Square root.
-defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
- int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
+multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst, (Int SrcRC:$src))]>;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
+}
-// Reciprocal approximations. Note that these typically require refinement
-// in order to obtain suitable precision.
-defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
- int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
-defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
- int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
+multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
+ RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
+ PatFrag ld_frag, string asm, Domain d> {
+ def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
+ asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
+ def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins DstRC:$src1, x86memop:$src2), asm,
+ [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
+}
-// Logical
-let Constraints = "$src1 = $dst" in {
- let isCommutable = 1 in {
- def ANDPSrr : PSI<0x54, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "andps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64
- (and VR128:$src1, VR128:$src2)))]>;
- def ORPSrr : PSI<0x56, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "orps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64
- (or VR128:$src1, VR128:$src2)))]>;
- def XORPSrr : PSI<0x57, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "xorps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64
- (xor VR128:$src1, VR128:$src2)))]>;
- }
+multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
+ RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
+ PatFrag ld_frag, string asm, bit Is2Addr = 1> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins DstRC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
+}
- def ANDPSrm : PSI<0x54, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "andps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def ORPSrm : PSI<0x56, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "orps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def XORPSrm : PSI<0x57, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "xorps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def ANDNPSrr : PSI<0x55, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "andnps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2i64 (and (xor VR128:$src1,
- (bc_v2i64 (v4i32 immAllOnesV))),
- VR128:$src2)))]>;
- def ANDNPSrm : PSI<0x55, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
- "andnps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
- (bc_v2i64 (v4i32 immAllOnesV))),
- (memopv2i64 addr:$src2))))]>;
+let isAsmParserOnly = 1 in {
+ defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
+ f32mem, load, "cvtss2si">, XS, VEX;
+ defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
+ int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
+ XS, VEX, VEX_W;
+ defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
+ f128mem, load, "cvtsd2si">, XD, VEX;
+ defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
+ int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
+ XD, VEX, VEX_W;
+
+ // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
+ // Get rid of this hack or rename the intrinsics, there are several
+ // intructions that only match with the intrinsic form, why create duplicates
+ // to let them be recognized by the assembler?
+ defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
+ "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
+ defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
+ "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
+}
+defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
+ f32mem, load, "cvtss2si">, XS;
+defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
+ f32mem, load, "cvtss2si{q}">, XS, REX_W;
+defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
+ f128mem, load, "cvtsd2si">, XD;
+defm Int_CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
+ f128mem, load, "cvtsd2si">, XD, REX_W;
+
+defm CVTSD2SI64 : sse12_cvt_s_np<0x2D, VR128, GR64, f64mem, "cvtsd2si{q}">, XD,
+ REX_W;
+
+let isAsmParserOnly = 1 in {
+ defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
+ defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+ int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
+ VEX_W;
+ defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
+ defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+ int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
+ VEX_4V, VEX_W;
}
let Constraints = "$src1 = $dst" in {
- def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
- "cmp${cc}ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
- VR128:$src, imm:$cc))]>;
- def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
- "cmp${cc}ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
- (memop addr:$src), imm:$cc))]>;
+ defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse_cvtsi2ss, i32mem, loadi32,
+ "cvtsi2ss">, XS;
+ defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+ int_x86_sse_cvtsi642ss, i64mem, loadi64,
+ "cvtsi2ss{q}">, XS, REX_W;
+ defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse2_cvtsi2sd, i32mem, loadi32,
+ "cvtsi2sd">, XD;
+ defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+ int_x86_sse2_cvtsi642sd, i64mem, loadi64,
+ "cvtsi2sd">, XD, REX_W;
}
-def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
- (CMPPSrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
- (CMPPSrmi VR128:$src1, addr:$src2, imm:$cc)>;
-// Shuffle and unpack instructions
+// Instructions below don't have an AVX form.
+defm Int_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
+ f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB;
+defm Int_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
+ f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedDouble>, TB, OpSize;
+defm Int_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
+ f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB;
+defm Int_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
+ f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedDouble>, TB, OpSize;
+defm Int_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
+ i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
+ SSEPackedDouble>, TB, OpSize;
let Constraints = "$src1 = $dst" in {
- let isConvertibleToThreeAddress = 1 in // Convert to pshufd
- def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1,
- VR128:$src2, i8imm:$src3),
- "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (v4f32 (shufp:$src3 VR128:$src1, VR128:$src2)))]>;
- def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,
- f128mem:$src2, i8imm:$src3),
- "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (v4f32 (shufp:$src3
- VR128:$src1, (memopv4f32 addr:$src2))))]>;
-
- let AddedComplexity = 10 in {
- def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "unpckhps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (unpckh VR128:$src1, VR128:$src2)))]>;
- def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "unpckhps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (unpckh VR128:$src1,
- (memopv4f32 addr:$src2))))]>;
-
- def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "unpcklps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (unpckl VR128:$src1, VR128:$src2)))]>;
- def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "unpcklps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckl VR128:$src1, (memopv4f32 addr:$src2)))]>;
- } // AddedComplexity
-} // Constraints = "$src1 = $dst"
-
-// Mask creation
-def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "movmskps\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
-def MOVMSKPDrr : PDI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "movmskpd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
-
-// Prefetch intrinsic.
-def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
- "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
-def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
- "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
-def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
- "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
-def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
- "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
-
-// Non-temporal stores
-def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
- "movntps\t{$src, $dst|$dst, $src}",
- [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
-
-let AddedComplexity = 400 in { // Prefer non-temporal versions
-def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntps\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
-
-def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
-
-def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "movnti\t{$src, $dst|$dst, $src}",
- [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
- TB, Requires<[HasSSE2]>;
-
-def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "movnti\t{$src, $dst|$dst, $src}",
- [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
- TB, Requires<[HasSSE2]>;
+ defm Int_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
+ int_x86_sse_cvtpi2ps,
+ i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
+ SSEPackedSingle>, TB;
}
-// Load, store, and memory fence
-def SFENCE : PSI<0xAE, MRM7r, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
-
-// MXCSR register
-def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
- "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
-def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
- "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
-
-// Alias instructions that map zero vector to pxor / xorp* for sse.
-// We set canFoldAsLoad because this can be converted to a constant-pool
-// load of an all-zeros value if folding it would be beneficial.
-// FIXME: Change encoding to pseudo!
-let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isCodeGenOnly = 1 in
-def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
- [(set VR128:$dst, (v4i32 immAllZerosV))]>;
-
-def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
-def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
-def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
-def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
-def : Pat<(v4f32 immAllZerosV), (V_SET0)>;
-
-def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
- (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), x86_subreg_ss))>;
-
-//===---------------------------------------------------------------------===//
-// SSE2 Instructions
-//===---------------------------------------------------------------------===//
-
-// Move Instructions. Register-to-register movsd is not used for FR64
-// register copies because it's a partial register update; FsMOVAPDrr is
-// used instead. Register-to-register movsd is not modeled as an INSERT_SUBREG
-// because INSERT_SUBREG requires that the insert be implementable in terms of
-// a copy, and just mentioned, we don't use movsd for copies.
-let Constraints = "$src1 = $dst" in
-def MOVSDrr : SDI<0x10, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
- "movsd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (movl VR128:$src1, (scalar_to_vector FR64:$src2)))]>;
-
-// Extract the low 64-bit value from one vector and insert it into another.
-let AddedComplexity = 15 in
-def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1,
- (EXTRACT_SUBREG (v2f64 VR128:$src2), x86_subreg_sd))>;
-
-// Implicitly promote a 64-bit scalar to a vector.
-def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
- (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, x86_subreg_sd)>;
-
-// Loading from memory automatically zeroing upper bits.
-let canFoldAsLoad = 1, isReMaterializable = 1, AddedComplexity = 20 in
-def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
- "movsd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (loadf64 addr:$src))]>;
+/// SSE 1 Only
-// MOVSDrm zeros the high parts of the register; represent this
-// with SUBREG_TO_REG.
-let AddedComplexity = 20 in {
-def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
-def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
-def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
-def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
-def : Pat<(v2f64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
+// Aliases for intrinsics
+let isAsmParserOnly = 1 in {
+defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
+ f32mem, load, "cvttss2si">, XS, VEX;
+defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
+ int_x86_sse_cvttss2si64, f32mem, load,
+ "cvttss2si">, XS, VEX, VEX_W;
+defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
+ f128mem, load, "cvttss2si">, XD, VEX;
+defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
+ int_x86_sse2_cvttsd2si64, f128mem, load,
+ "cvttss2si">, XD, VEX, VEX_W;
+}
+defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
+ f32mem, load, "cvttss2si">, XS;
+defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
+ int_x86_sse_cvttss2si64, f32mem, load,
+ "cvttss2si{q}">, XS, REX_W;
+defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
+ f128mem, load, "cvttss2si">, XD;
+defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
+ int_x86_sse2_cvttsd2si64, f128mem, load,
+ "cvttss2si{q}">, XD, REX_W;
+
+let isAsmParserOnly = 1, Pattern = []<dag> in {
+defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
+ "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
+defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
+ "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
+ VEX_W;
+defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
+ "cvtdq2ps\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB, VEX;
+defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
+ "cvtdq2ps\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB, VEX;
+}
+let Pattern = []<dag> in {
+defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
+ "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
+defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
+ "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
+defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
+ "cvtdq2ps\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
}
-// Store scalar value to memory.
-def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
- "movsd\t{$src, $dst|$dst, $src}",
- [(store FR64:$src, addr:$dst)]>;
-
-// Extract and store.
-def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
- addr:$dst),
- (MOVSDmr addr:$dst,
- (EXTRACT_SUBREG (v2f64 VR128:$src), x86_subreg_sd))>;
-
-// Conversion instructions
-def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
- "cvttsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
-def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
- "cvttsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
+/// SSE 2 Only
+
+// Convert scalar double to scalar single
+let isAsmParserOnly = 1 in {
+def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
+ (ins FR64:$src1, FR64:$src2),
+ "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ VEX_4V;
+def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
+ (ins FR64:$src1, f64mem:$src2),
+ "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
+}
def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (fround FR64:$src))]>;
@@ -1200,35 +797,26 @@ def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
Requires<[HasSSE2, OptForSize]>;
-def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
- "cvtsi2sd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
-def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
- "cvtsi2sd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
-def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
-def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
-def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
-def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
-def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
-def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
-def CVTDQ2PSrr : PSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2ps\t{$src, $dst|$dst, $src}", []>;
-def CVTDQ2PSrm : PSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtdq2ps\t{$src, $dst|$dst, $src}", []>;
-def COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "comisd\t{$src2, $src1|$src1, $src2}", []>;
-def COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
- "comisd\t{$src2, $src1|$src1, $src2}", []>;
-
-// SSE2 instructions with XS prefix
+let isAsmParserOnly = 1 in
+defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
+ int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
+ XS, VEX_4V;
+let Constraints = "$src1 = $dst" in
+defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
+ int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
+
+// Convert scalar single to scalar double
+let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
+def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
+ (ins FR32:$src1, FR32:$src2),
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, XS, Requires<[HasAVX]>, VEX_4V;
+def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
+ (ins FR32:$src1, f32mem:$src2),
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
+}
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (fextend FR32:$src))]>, XS,
@@ -1238,384 +826,51 @@ def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
[(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
Requires<[HasSSE2, OptForSize]>;
-def : Pat<(extloadf32 addr:$src),
- (CVTSS2SDrr (MOVSSrm addr:$src))>,
- Requires<[HasSSE2, OptForSpeed]>;
-
-// Match intrinsics which expect XMM operand(s).
-def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "cvtsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
-def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
- "cvtsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_cvtsd2si
- (load addr:$src)))]>;
-
-// Match intrinisics which expect MM and XMM operand(s).
-def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvtpd2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
-def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
- "cvtpd2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvtpd2pi
- (memop addr:$src)))]>;
-def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvttpd2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
-def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
- "cvttpd2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvttpd2pi
- (memop addr:$src)))]>;
-def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
- "cvtpi2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
-def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "cvtpi2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cvtpi2pd
- (load addr:$src)))]>;
-
-// Aliases for intrinsics
-def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "cvttsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst,
- (int_x86_sse2_cvttsd2si VR128:$src))]>;
-def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
- "cvttsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_cvttsd2si
- (load addr:$src)))]>;
-
-// Comparison instructions
-let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
- def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
- (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
- "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
- def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
- (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
- "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
-}
-
-let Defs = [EFLAGS] in {
-def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
- "ucomisd\t{$src2, $src1|$src1, $src2}",
- [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
-def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
- "ucomisd\t{$src2, $src1|$src1, $src2}",
- [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
- (implicit EFLAGS)]>;
-} // Defs = [EFLAGS]
-
-// Aliases to match intrinsics which expect XMM operand(s).
-let Constraints = "$src1 = $dst" in {
- def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src, SSECC:$cc),
- "cmp${cc}sd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
- VR128:$src, imm:$cc))]>;
- def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst),
- (ins VR128:$src1, f64mem:$src, SSECC:$cc),
- "cmp${cc}sd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
- (load addr:$src), imm:$cc))]>;
-}
-
-let Defs = [EFLAGS] in {
-def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "ucomisd\t{$src2, $src1|$src1, $src2}",
- [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
- (implicit EFLAGS)]>;
-def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
- "ucomisd\t{$src2, $src1|$src1, $src2}",
- [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
- (implicit EFLAGS)]>;
-
-def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "comisd\t{$src2, $src1|$src1, $src2}",
- [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
- (implicit EFLAGS)]>;
-def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
- "comisd\t{$src2, $src1|$src1, $src2}",
- [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
- (implicit EFLAGS)]>;
-} // Defs = [EFLAGS]
-
-// Aliases of packed SSE2 instructions for scalar use. These all have names
-// that start with 'Fs'.
-
-// Alias instructions that map fld0 to pxor for sse.
-let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
- canFoldAsLoad = 1 in
-def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
- [(set FR64:$dst, fpimm0)]>,
- Requires<[HasSSE2]>, TB, OpSize;
-
-// Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
-// disregarded.
-let neverHasSideEffects = 1 in
-def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>;
-
-// Alias instruction to load FR64 from f128mem using movapd. Upper bits are
-// disregarded.
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
- "movapd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
-
-// Alias bitwise logical operations using SSE logical ops on packed FP values.
-let Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in {
- def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst),
- (ins FR64:$src1, FR64:$src2),
- "andpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
- def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst),
- (ins FR64:$src1, FR64:$src2),
- "orpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
- def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst),
- (ins FR64:$src1, FR64:$src2),
- "xorpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
-}
-
-def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f128mem:$src2),
- "andpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86fand FR64:$src1,
- (memopfsf64 addr:$src2)))]>;
-def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f128mem:$src2),
- "orpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86for FR64:$src1,
- (memopfsf64 addr:$src2)))]>;
-def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f128mem:$src2),
- "xorpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86fxor FR64:$src1,
- (memopfsf64 addr:$src2)))]>;
-
-let neverHasSideEffects = 1 in {
-def FsANDNPDrr : PDI<0x55, MRMSrcReg,
- (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
- "andnpd\t{$src2, $dst|$dst, $src2}", []>;
-let mayLoad = 1 in
-def FsANDNPDrm : PDI<0x55, MRMSrcMem,
- (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
- "andnpd\t{$src2, $dst|$dst, $src2}", []>;
-}
-}
-
-/// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
-///
-/// In addition, we also have a special variant of the scalar form here to
-/// represent the associated intrinsic operation. This form is unlike the
-/// plain scalar form, in that it takes an entire vector (instead of a scalar)
-/// and leaves the top elements unmodified (therefore these cannot be commuted).
-///
-/// These three forms can each be reg+reg or reg+mem, so there are a total of
-/// six "instructions".
-///
-let Constraints = "$src1 = $dst" in {
-multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode, Intrinsic F64Int,
- bit Commutable = 0> {
- // Scalar operation, reg+reg.
- def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, reg+mem.
- def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f64mem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
-
- // Vector operation, reg+reg.
- def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, reg+mem.
- def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
-
- // Intrinsic operation, reg+reg.
- def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]>;
-
- // Intrinsic operation, reg+mem.
- def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, sdmem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F64Int VR128:$src1,
- sse_load_f64:$src2))]>;
+let isAsmParserOnly = 1 in {
+def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
+ VR128:$src2))]>, XS, VEX_4V,
+ Requires<[HasAVX]>;
+def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
+ (load addr:$src2)))]>, XS, VEX_4V,
+ Requires<[HasAVX]>;
}
+let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
+def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "cvtss2sd\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
+ VR128:$src2))]>, XS,
+ Requires<[HasSSE2]>;
+def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
+ "cvtss2sd\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
+ (load addr:$src2)))]>, XS,
+ Requires<[HasSSE2]>;
}
-// Arithmetic instructions
-defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
-defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
-defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
-defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
-
-/// sse2_fp_binop_rm - Other SSE2 binops
-///
-/// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
-/// instructions for a full-vector intrinsic form. Operations that map
-/// onto C operators don't use this form since they just use the plain
-/// vector form instead of having a separate vector intrinsic form.
-///
-/// This provides a total of eight "instructions".
-///
-let Constraints = "$src1 = $dst" in {
-multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode,
- Intrinsic F64Int,
- Intrinsic V2F64Int,
- bit Commutable = 0> {
-
- // Scalar operation, reg+reg.
- def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, reg+mem.
- def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f64mem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
-
- // Vector operation, reg+reg.
- def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, reg+mem.
- def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
-
- // Intrinsic operation, reg+reg.
- def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Intrinsic operation, reg+mem.
- def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, sdmem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F64Int VR128:$src1,
- sse_load_f64:$src2))]>;
-
- // Vector intrinsic operation, reg+reg.
- def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
+def : Pat<(extloadf32 addr:$src),
+ (CVTSS2SDrr (MOVSSrm addr:$src))>,
+ Requires<[HasSSE2, OptForSpeed]>;
- // Vector intrinsic operation, reg+mem.
- def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (V2F64Int VR128:$src1,
- (memopv2f64 addr:$src2)))]>;
-}
+// Convert doubleword to packed single/double fp
+let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
+def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtdq2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
+ TB, VEX, Requires<[HasAVX]>;
+def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vcvtdq2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
+ (bitconvert (memopv2i64 addr:$src))))]>,
+ TB, VEX, Requires<[HasAVX]>;
}
-
-defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
- int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
-defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
- int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
-
-//===---------------------------------------------------------------------===//
-// SSE packed FP Instructions
-
-// Move Instructions
-let neverHasSideEffects = 1 in
-def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movapd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
-
-def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movapd\t{$src, $dst|$dst, $src}",
- [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
-
-let neverHasSideEffects = 1 in
-def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1 in
-def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (loadv2f64 addr:$src))]>;
-def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(store (v2f64 VR128:$src), addr:$dst)]>;
-
-// Intrinsic forms of MOVUPD load and store
-def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
-def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
-
-let Constraints = "$src1 = $dst" in {
- let AddedComplexity = 20 in {
- def MOVLPDrm : PDI<0x12, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "movlpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (movlp VR128:$src1,
- (scalar_to_vector (loadf64 addr:$src2)))))]>;
- def MOVHPDrm : PDI<0x16, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "movhpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (movlhps VR128:$src1,
- (scalar_to_vector (loadf64 addr:$src2)))))]>;
- } // AddedComplexity
-} // Constraints = "$src1 = $dst"
-
-def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
- "movlpd\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract (v2f64 VR128:$src),
- (iPTR 0))), addr:$dst)]>;
-
-// v2f64 extract element 1 is always custom lowered to unpack high to low
-// and extract element 0 so the non-store version isn't too horrible.
-def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
- "movhpd\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract
- (v2f64 (unpckh VR128:$src, (undef))),
- (iPTR 0))), addr:$dst)]>;
-
-// SSE2 instructions without OpSize prefix
def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtdq2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
@@ -1626,7 +881,18 @@ def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
(bitconvert (memopv2i64 addr:$src))))]>,
TB, Requires<[HasSSE2]>;
-// SSE2 instructions with XS prefix
+// FIXME: why the non-intrinsic version is described as SSE3?
+let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
+def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
+ XS, VEX, Requires<[HasAVX]>;
+def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
+ (bitconvert (memopv2i64 addr:$src))))]>,
+ XS, VEX, Requires<[HasAVX]>;
+}
def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
@@ -1637,6 +903,34 @@ def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
(bitconvert (memopv2i64 addr:$src))))]>,
XS, Requires<[HasSSE2]>;
+
+// Convert packed single/double fp to doubleword
+let isAsmParserOnly = 1 in {
+def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+}
+def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
+def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
+
+let isAsmParserOnly = 1 in {
+def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
+ VEX;
+def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2dq
+ (memop addr:$src)))]>, VEX;
+}
def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
@@ -1644,15 +938,61 @@ def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2dq
(memop addr:$src)))]>;
-// SSE2 packed instructions with XS prefix
+
+let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
+def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
+ XD, VEX, Requires<[HasAVX]>;
+def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "vcvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
+ (memop addr:$src)))]>,
+ XD, VEX, Requires<[HasAVX]>;
+}
+def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
+ XD, Requires<[HasSSE2]>;
+def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
+ (memop addr:$src)))]>,
+ XD, Requires<[HasSSE2]>;
+
+
+// Convert with truncation packed single/double fp to doubleword
+let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
+def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+}
def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>;
def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>;
+
+let isAsmParserOnly = 1 in {
+def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvttps2dq VR128:$src))]>,
+ XS, VEX, Requires<[HasAVX]>;
+def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "vcvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttps2dq
+ (memop addr:$src)))]>,
+ XS, VEX, Requires<[HasAVX]>;
+}
def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
+ [(set VR128:$dst,
(int_x86_sse2_cvttps2dq VR128:$src))]>,
XS, Requires<[HasSSE2]>;
def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
@@ -1661,17 +1001,18 @@ def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
(memop addr:$src)))]>,
XS, Requires<[HasSSE2]>;
-// SSE2 packed instructions with XD prefix
-def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
- XD, Requires<[HasSSE2]>;
-def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
- (memop addr:$src)))]>,
- XD, Requires<[HasSSE2]>;
-
+let isAsmParserOnly = 1 in {
+def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
+ VEX;
+def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
+ (memop addr:$src)))]>, VEX;
+}
def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
@@ -1680,12 +1021,56 @@ def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
(memop addr:$src)))]>;
-// SSE2 instructions without OpSize prefix
+let isAsmParserOnly = 1 in {
+// The assembler can recognize rr 256-bit instructions by seeing a ymm
+// register, but the same isn't true when using memory operands instead.
+// Provide other assembly rr and rm forms to address this explicitly.
+def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// XMM only
+def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// YMM only
+def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
+ "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
+}
+
+// Convert packed single to packed double
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ // SSE2 instructions without OpSize prefix
+def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+}
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
+let isAsmParserOnly = 1 in {
+def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
+ VEX, Requires<[HasAVX]>;
+def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2pd
+ (load addr:$src)))]>,
+ VEX, Requires<[HasAVX]>;
+}
def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
@@ -1696,12 +1081,44 @@ def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
(load addr:$src)))]>,
TB, Requires<[HasSSE2]>;
+// Convert packed double to packed single
+let isAsmParserOnly = 1 in {
+// The assembler can recognize rr 256-bit instructions by seeing a ymm
+// register, but the same isn't true when using memory operands instead.
+// Provide other assembly rr and rm forms to address this explicitly.
+def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// XMM only
+def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// YMM only
+def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
+ "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
+}
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
+let isAsmParserOnly = 1 in {
+def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
+def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "cvtpd2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
+ (memop addr:$src)))]>;
+}
def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
@@ -1710,258 +1127,1189 @@ def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps
(memop addr:$src)))]>;
-// Match intrinsics which expect XMM operand(s).
-// Aliases for intrinsics
+// AVX 256-bit register conversion intrinsics
+// FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
+// whenever possible to avoid declaring two versions of each one.
+def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
+ (VCVTDQ2PSYrr VR256:$src)>;
+def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
+ (VCVTDQ2PSYrm addr:$src)>;
+
+def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
+ (VCVTPD2PSYrr VR256:$src)>;
+def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
+ (VCVTPD2PSYrm addr:$src)>;
+
+def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
+ (VCVTPS2DQYrr VR256:$src)>;
+def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
+ (VCVTPS2DQYrm addr:$src)>;
+
+def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
+ (VCVTPS2PDYrr VR128:$src)>;
+def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
+ (VCVTPS2PDYrm addr:$src)>;
+
+def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
+ (VCVTTPD2DQYrr VR256:$src)>;
+def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
+ (VCVTTPD2DQYrm addr:$src)>;
+
+def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
+ (VCVTTPS2DQYrr VR256:$src)>;
+def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
+ (VCVTTPS2DQYrm addr:$src)>;
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Compare Instructions
+//===----------------------------------------------------------------------===//
+
+// sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
+multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
+ string asm, string asm_alt> {
+ def rr : SIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
+ asm, []>;
+ let mayLoad = 1 in
+ def rm : SIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
+ asm, []>;
+ // Accept explicit immediate argument form instead of comparison code.
+ let isAsmParserOnly = 1 in {
+ def rr_alt : SIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
+ asm_alt, []>;
+ let mayLoad = 1 in
+ def rm_alt : SIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
+ asm_alt, []>;
+ }
+}
+
+let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
+ defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
+ "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
+ XS, VEX_4V;
+ defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
+ "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
+ XD, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
+ defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
+ "cmp${cc}ss\t{$src, $dst|$dst, $src}",
+ "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
+ defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
+ "cmp${cc}sd\t{$src, $dst|$dst, $src}",
+ "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
+}
+
+multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
+ Intrinsic Int, string asm> {
+ def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
+ [(set VR128:$dst, (Int VR128:$src1,
+ VR128:$src, imm:$cc))]>;
+ def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
+ [(set VR128:$dst, (Int VR128:$src1,
+ (load addr:$src), imm:$cc))]>;
+}
+
+// Aliases to match intrinsics which expect XMM operand(s).
+let isAsmParserOnly = 1 in {
+ defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
+ "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
+ XS, VEX_4V;
+ defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
+ "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
+ XD, VEX_4V;
+}
let Constraints = "$src1 = $dst" in {
-def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
- "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
- GR32:$src2))]>;
-def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
- "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
- (loadi32 addr:$src2)))]>;
-def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
- VR128:$src2))]>;
-def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
- (load addr:$src2)))]>;
-def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "cvtss2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- VR128:$src2))]>, XS,
- Requires<[HasSSE2]>;
-def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
- "cvtss2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- (load addr:$src2)))]>, XS,
- Requires<[HasSSE2]>;
+ defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
+ "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
+ defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
+ "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
}
-// Arithmetic
-/// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
+// sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
+multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
+ ValueType vt, X86MemOperand x86memop,
+ PatFrag ld_frag, string OpcodeStr, Domain d> {
+ def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
+ [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
+ def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
+ [(set EFLAGS, (OpNode (vt RC:$src1),
+ (ld_frag addr:$src2)))], d>;
+}
+
+let Defs = [EFLAGS] in {
+ let isAsmParserOnly = 1 in {
+ defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
+ "ucomiss", SSEPackedSingle>, VEX;
+ defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
+ "ucomisd", SSEPackedDouble>, OpSize, VEX;
+ let Pattern = []<dag> in {
+ defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
+ "comiss", SSEPackedSingle>, VEX;
+ defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
+ "comisd", SSEPackedDouble>, OpSize, VEX;
+ }
+
+ defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
+ load, "ucomiss", SSEPackedSingle>, VEX;
+ defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
+ load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
+
+ defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
+ load, "comiss", SSEPackedSingle>, VEX;
+ defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
+ load, "comisd", SSEPackedDouble>, OpSize, VEX;
+ }
+ defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
+ "ucomiss", SSEPackedSingle>, TB;
+ defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
+ "ucomisd", SSEPackedDouble>, TB, OpSize;
+
+ let Pattern = []<dag> in {
+ defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
+ "comiss", SSEPackedSingle>, TB;
+ defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
+ "comisd", SSEPackedDouble>, TB, OpSize;
+ }
+
+ defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
+ load, "ucomiss", SSEPackedSingle>, TB;
+ defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
+ load, "ucomisd", SSEPackedDouble>, TB, OpSize;
+
+ defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
+ "comiss", SSEPackedSingle>, TB;
+ defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
+ "comisd", SSEPackedDouble>, TB, OpSize;
+} // Defs = [EFLAGS]
+
+// sse12_cmp_packed - sse 1 & 2 compared packed instructions
+multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
+ Intrinsic Int, string asm, string asm_alt,
+ Domain d> {
+ def rri : PIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
+ [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
+ def rmi : PIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
+ [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
+ // Accept explicit immediate argument form instead of comparison code.
+ let isAsmParserOnly = 1 in {
+ def rri_alt : PIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
+ asm_alt, [], d>;
+ def rmi_alt : PIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
+ asm_alt, [], d>;
+ }
+}
+
+let isAsmParserOnly = 1 in {
+ defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
+ "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
+ "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+ defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
+ "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
+ "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+}
+let Constraints = "$src1 = $dst" in {
+ defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
+ "cmp${cc}ps\t{$src, $dst|$dst, $src}",
+ "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
+ SSEPackedSingle>, TB;
+ defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
+ "cmp${cc}pd\t{$src, $dst|$dst, $src}",
+ "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
+ SSEPackedDouble>, TB, OpSize;
+}
+
+def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
+ (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
+def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
+ (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
+def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
+ (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
+def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
+ (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Shuffle Instructions
+//===----------------------------------------------------------------------===//
+
+/// sse12_shuffle - sse 1 & 2 shuffle instructions
+multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
+ ValueType vt, string asm, PatFrag mem_frag,
+ Domain d, bit IsConvertibleToThreeAddress = 0> {
+ def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
+ [(set RC:$dst, (vt (shufp:$src3
+ RC:$src1, (mem_frag addr:$src2))))], d>;
+ let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
+ def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
+ [(set RC:$dst,
+ (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
+ "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ memopv4f32, SSEPackedSingle>, VEX_4V;
+ defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
+ "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ memopv8f32, SSEPackedSingle>, VEX_4V;
+ defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
+ "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
+ memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
+ defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
+ "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
+ memopv4f64, SSEPackedDouble>, OpSize, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
+ "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
+ TB;
+ defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
+ "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ memopv2f64, SSEPackedDouble>, TB, OpSize;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Unpack Instructions
+//===----------------------------------------------------------------------===//
+
+/// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
+multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
+ PatFrag mem_frag, RegisterClass RC,
+ X86MemOperand x86memop, string asm,
+ Domain d> {
+ def rr : PI<opc, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ asm, [(set RC:$dst,
+ (vt (OpNode RC:$src1, RC:$src2)))], d>;
+ def rm : PI<opc, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ asm, [(set RC:$dst,
+ (vt (OpNode RC:$src1,
+ (mem_frag addr:$src2))))], d>;
+}
+
+let AddedComplexity = 10 in {
+ let isAsmParserOnly = 1 in {
+ defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
+ VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
+ VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+ defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
+ VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
+ VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+
+ defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
+ VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
+ VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+ defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
+ VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
+ VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+ }
+
+ let Constraints = "$src1 = $dst" in {
+ defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
+ VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
+ SSEPackedSingle>, TB;
+ defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
+ VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
+ SSEPackedDouble>, TB, OpSize;
+ defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
+ VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
+ SSEPackedSingle>, TB;
+ defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
+ VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
+ SSEPackedDouble>, TB, OpSize;
+ } // Constraints = "$src1 = $dst"
+} // AddedComplexity
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Extract Floating-Point Sign mask
+//===----------------------------------------------------------------------===//
+
+/// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
+multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
+ Domain d> {
+ def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set GR32:$dst, (Int RC:$src))], d>;
+}
+
+// Mask creation
+defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
+ SSEPackedSingle>, TB;
+defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
+ SSEPackedDouble>, TB, OpSize;
+
+let isAsmParserOnly = 1 in {
+ defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
+ "movmskps", SSEPackedSingle>, VEX;
+ defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
+ "movmskpd", SSEPackedDouble>, OpSize,
+ VEX;
+ defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
+ "movmskps", SSEPackedSingle>, VEX;
+ defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
+ "movmskpd", SSEPackedDouble>, OpSize,
+ VEX;
+
+ // Assembler Only
+ def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
+ "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
+ def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
+ "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
+ VEX;
+ def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
+ "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
+ def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
+ "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
+ VEX;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
+//===----------------------------------------------------------------------===//
+
+// Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
+// names that start with 'Fs'.
+
+// Alias instructions that map fld0 to pxor for sse.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
+ canFoldAsLoad = 1 in {
+ // FIXME: Set encoding to pseudo!
+def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
+ [(set FR32:$dst, fp32imm0)]>,
+ Requires<[HasSSE1]>, TB, OpSize;
+def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
+ [(set FR64:$dst, fpimm0)]>,
+ Requires<[HasSSE2]>, TB, OpSize;
+}
+
+// Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
+// bits are disregarded.
+let neverHasSideEffects = 1 in {
+def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
+ "movaps\t{$src, $dst|$dst, $src}", []>;
+def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
+ "movapd\t{$src, $dst|$dst, $src}", []>;
+}
+
+// Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
+// bits are disregarded.
+let canFoldAsLoad = 1, isReMaterializable = 1 in {
+def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
+ "movaps\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
+def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Logical Instructions
+//===----------------------------------------------------------------------===//
+
+/// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
+///
+multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
+ SDNode OpNode> {
+ let isAsmParserOnly = 1 in {
+ defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
+ FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
+
+ defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
+ FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
+ }
+
+ let Constraints = "$src1 = $dst" in {
+ defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
+ f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
+
+ defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
+ f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
+ }
+}
+
+// Alias bitwise logical operations using SSE logical ops on packed FP values.
+let mayLoad = 0 in {
+ defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
+ defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
+ defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
+}
+
+let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
+ defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
+
+/// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
///
+multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, int HasPat = 0,
+ list<list<dag>> Pattern = []> {
+ let isAsmParserOnly = 1, Pattern = []<dag> in {
+ defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
+ !strconcat(OpcodeStr, "ps"), f128mem,
+ !if(HasPat, Pattern[0], // rr
+ [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
+ VR128:$src2)))]),
+ !if(HasPat, Pattern[2], // rm
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
+ (memopv2i64 addr:$src2)))]), 0>,
+ VEX_4V;
+
+ defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
+ !strconcat(OpcodeStr, "pd"), f128mem,
+ !if(HasPat, Pattern[1], // rr
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
+ (bc_v2i64 (v2f64
+ VR128:$src2))))]),
+ !if(HasPat, Pattern[3], // rm
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
+ (memopv2i64 addr:$src2)))]), 0>,
+ OpSize, VEX_4V;
+ }
+ let Constraints = "$src1 = $dst" in {
+ defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
+ !strconcat(OpcodeStr, "ps"), f128mem,
+ !if(HasPat, Pattern[0], // rr
+ [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
+ VR128:$src2)))]),
+ !if(HasPat, Pattern[2], // rm
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
+ (memopv2i64 addr:$src2)))])>, TB;
+
+ defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
+ !strconcat(OpcodeStr, "pd"), f128mem,
+ !if(HasPat, Pattern[1], // rr
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
+ (bc_v2i64 (v2f64
+ VR128:$src2))))]),
+ !if(HasPat, Pattern[3], // rm
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
+ (memopv2i64 addr:$src2)))])>,
+ TB, OpSize;
+ }
+}
+
+/// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
+///
+let isAsmParserOnly = 1 in {
+multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
+ defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
+ !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
+
+ defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
+ !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
+}
+}
+
+// AVX 256-bit packed logical ops forms
+defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
+defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
+defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
+let isCommutable = 0 in
+ defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
+
+defm AND : sse12_fp_packed_logical<0x54, "and", and>;
+defm OR : sse12_fp_packed_logical<0x56, "or", or>;
+defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
+let isCommutable = 0 in
+ defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
+ // single r+r
+ [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
+ (bc_v2i64 (v4i32 immAllOnesV))),
+ VR128:$src2)))],
+ // double r+r
+ [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
+ (bc_v2i64 (v2f64 VR128:$src2))))],
+ // single r+m
+ [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
+ (bc_v2i64 (v4i32 immAllOnesV))),
+ (memopv2i64 addr:$src2))))],
+ // double r+m
+ [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
+ (memopv2i64 addr:$src2)))]]>;
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Arithmetic Instructions
+//===----------------------------------------------------------------------===//
+
+/// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
+/// vector forms.
+///
+/// In addition, we also have a special variant of the scalar form here to
+/// represent the associated intrinsic operation. This form is unlike the
+/// plain scalar form, in that it takes an entire vector (instead of a scalar)
+/// and leaves the top elements unmodified (therefore these cannot be commuted).
+///
+/// These three forms can each be reg+reg or reg+mem.
+///
+
+/// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
+/// classes below
+multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ bit Is2Addr = 1> {
+ defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
+ OpNode, FR32, f32mem, Is2Addr>, XS;
+ defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
+ OpNode, FR64, f64mem, Is2Addr>, XD;
+}
+
+multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ bit Is2Addr = 1> {
+ let mayLoad = 0 in {
+ defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
+ v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
+ defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
+ v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
+ }
+}
+
+multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
+ SDNode OpNode> {
+ let mayLoad = 0 in {
+ defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
+ v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
+ defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
+ v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
+ }
+}
+
+multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
+ bit Is2Addr = 1> {
+ defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
+ defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
+}
+
+multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
+ bit Is2Addr = 1> {
+ defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
+ SSEPackedSingle, Is2Addr>, TB;
+
+ defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
+ SSEPackedDouble, Is2Addr>, TB, OpSize;
+}
+
+multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
+ defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
+ !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
+ SSEPackedSingle, 0>, TB;
+
+ defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
+ !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
+ SSEPackedDouble, 0>, TB, OpSize;
+}
+
+// Binary Arithmetic instructions
+let isAsmParserOnly = 1 in {
+ defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
+ basic_sse12_fp_binop_s_int<0x58, "add", 0>,
+ basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
+ basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
+ defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
+ basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
+ basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
+ basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
+
+ let isCommutable = 0 in {
+ defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
+ basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
+ basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
+ basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
+ defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
+ basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
+ basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
+ basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
+ defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
+ basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
+ basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
+ basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
+ basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
+ basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
+ defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
+ basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
+ basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
+ basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
+ basic_sse12_fp_binop_p_y_int<0x5D, "min">,
+ basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
+ }
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
+ basic_sse12_fp_binop_p<0x58, "add", fadd>,
+ basic_sse12_fp_binop_s_int<0x58, "add">;
+ defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
+ basic_sse12_fp_binop_p<0x59, "mul", fmul>,
+ basic_sse12_fp_binop_s_int<0x59, "mul">;
+
+ let isCommutable = 0 in {
+ defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
+ basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
+ basic_sse12_fp_binop_s_int<0x5C, "sub">;
+ defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
+ basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
+ basic_sse12_fp_binop_s_int<0x5E, "div">;
+ defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
+ basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
+ basic_sse12_fp_binop_s_int<0x5F, "max">,
+ basic_sse12_fp_binop_p_int<0x5F, "max">;
+ defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
+ basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
+ basic_sse12_fp_binop_s_int<0x5D, "min">,
+ basic_sse12_fp_binop_p_int<0x5D, "min">;
+ }
+}
+
+/// Unop Arithmetic
/// In addition, we also have a special variant of the scalar form here to
/// represent the associated intrinsic operation. This form is unlike the
/// plain scalar form, in that it takes an entire vector (instead of a
/// scalar) and leaves the top elements undefined.
///
/// And, we have a special variant form for a full-vector intrinsic form.
-///
-/// These four forms can each have a reg or a mem operand, so there are a
-/// total of eight "instructions".
-///
-multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode,
- Intrinsic F64Int,
- Intrinsic V2F64Int,
- bit Commutable = 0> {
- // Scalar operation, reg.
+
+/// sse1_fp_unop_s - SSE1 unops in scalar form.
+multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, Intrinsic F32Int> {
+ def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set FR32:$dst, (OpNode FR32:$src))]>;
+ // For scalar unary operations, fold a load into the operation
+ // only in OptForSize mode. It eliminates an instruction, but it also
+ // eliminates a whole-register clobber (the load), so it introduces a
+ // partial register update condition.
+ def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
+ Requires<[HasSSE1, OptForSize]>;
+ def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (F32Int VR128:$src))]>;
+ def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
+}
+
+/// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
+multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, Intrinsic F32Int> {
+ def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
+ !strconcat(OpcodeStr,
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
+ !strconcat(OpcodeStr,
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, XS, Requires<[HasAVX, OptForSize]>;
+ def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr,
+ "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
+ [(set VR128:$dst, (F32Int VR128:$src))]>;
+ def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
+ !strconcat(OpcodeStr,
+ "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
+ [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
+}
+
+/// sse1_fp_unop_p - SSE1 unops in packed form.
+multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+ def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
+ def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
+}
+
+/// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
+multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+ def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
+ def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
+}
+
+/// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
+multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
+ Intrinsic V4F32Int> {
+ def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (V4F32Int VR128:$src))]>;
+ def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
+}
+
+/// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
+multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
+ Intrinsic V4F32Int> {
+ def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (V4F32Int VR256:$src))]>;
+ def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
+}
+
+/// sse2_fp_unop_s - SSE2 unops in scalar form.
+multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, Intrinsic F64Int> {
def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set FR64:$dst, (OpNode FR64:$src))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, mem.
- def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
+ [(set FR64:$dst, (OpNode FR64:$src))]>;
+ // See the comments in sse1_fp_unop_s for why this is OptForSize.
+ def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set FR64:$dst, (OpNode (load addr:$src)))]>;
+ [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
+ Requires<[HasSSE2, OptForSize]>;
+ def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (F64Int VR128:$src))]>;
+ def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
+ !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
+}
- // Vector operation, reg.
+/// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
+multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, Intrinsic F64Int> {
+ def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
+ !strconcat(OpcodeStr,
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
+ (ins FR64:$src1, f64mem:$src2),
+ !strconcat(OpcodeStr,
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
+ [(set VR128:$dst, (F64Int VR128:$src))]>;
+ def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
+ !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
+ [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
+}
+
+/// sse2_fp_unop_p - SSE2 unops in vector forms.
+multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
+ SDNode OpNode> {
def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, mem.
+ [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
+}
- // Intrinsic operation, reg.
- def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F64Int VR128:$src))]> {
- let isCommutable = Commutable;
- }
-
- // Intrinsic operation, mem.
- def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
- !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
+/// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
+multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+ def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
+ def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
+}
- // Vector intrinsic operation, reg
+/// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
+multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
+ Intrinsic V2F64Int> {
def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V2F64Int VR128:$src))]> {
- let isCommutable = Commutable;
- }
-
- // Vector intrinsic operation, mem
+ [(set VR128:$dst, (V2F64Int VR128:$src))]>;
def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
}
+/// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
+multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
+ Intrinsic V2F64Int> {
+ def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (V2F64Int VR256:$src))]>;
+ def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ // Square root.
+ defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
+ sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
+ VEX_4V;
+
+ defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
+ sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
+ sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
+ sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
+ sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
+ sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
+ sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
+ sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
+ VEX;
+
+ // Reciprocal approximations. Note that these typically require refinement
+ // in order to obtain suitable precision.
+ defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
+ int_x86_sse_rsqrt_ss>, VEX_4V;
+ defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
+ sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
+ sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
+ sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
+
+ defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
+ VEX_4V;
+ defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
+ sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
+ sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
+ sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
+}
+
// Square root.
-defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
- int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
+defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
+ sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
+ sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
+ sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
+ sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
+ sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
+
+// Reciprocal approximations. Note that these typically require refinement
+// in order to obtain suitable precision.
+defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
+ sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
+ sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
+defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
+ sse1_fp_unop_p<0x53, "rcp", X86frcp>,
+ sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
// There is no f64 version of the reciprocal approximation instructions.
-// Logical
-let Constraints = "$src1 = $dst" in {
- let isCommutable = 1 in {
- def ANDPDrr : PDI<0x54, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "andpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (and (bc_v2i64 (v2f64 VR128:$src1)),
- (bc_v2i64 (v2f64 VR128:$src2))))]>;
- def ORPDrr : PDI<0x56, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "orpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (or (bc_v2i64 (v2f64 VR128:$src1)),
- (bc_v2i64 (v2f64 VR128:$src2))))]>;
- def XORPDrr : PDI<0x57, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "xorpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (xor (bc_v2i64 (v2f64 VR128:$src1)),
- (bc_v2i64 (v2f64 VR128:$src2))))]>;
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Non-temporal stores
+//===----------------------------------------------------------------------===//
+
+let isAsmParserOnly = 1 in {
+ def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
+ (ins i128mem:$dst, VR128:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
+ def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
+ (ins i128mem:$dst, VR128:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
+
+ let ExeDomain = SSEPackedInt in
+ def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
+
+ let AddedComplexity = 400 in { // Prefer non-temporal versions
+ def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f32 VR128:$src),
+ addr:$dst)]>, VEX;
+ def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v2f64 VR128:$src),
+ addr:$dst)]>, VEX;
+ def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v2f64 VR128:$src),
+ addr:$dst)]>, VEX;
+ let ExeDomain = SSEPackedInt in
+ def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f32 VR128:$src),
+ addr:$dst)]>, VEX;
+
+ def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR256:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v8f32 VR256:$src),
+ addr:$dst)]>, VEX;
+ def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR256:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f64 VR256:$src),
+ addr:$dst)]>, VEX;
+ def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR256:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f64 VR256:$src),
+ addr:$dst)]>, VEX;
+ let ExeDomain = SSEPackedInt in
+ def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR256:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v8f32 VR256:$src),
+ addr:$dst)]>, VEX;
}
+}
+
+def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
+ (VMOVNTDQYmr addr:$dst, VR256:$src)>;
+def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
+ (VMOVNTPDYmr addr:$dst, VR256:$src)>;
+def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
+ (VMOVNTPSYmr addr:$dst, VR256:$src)>;
+
+def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
+def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
+
+let ExeDomain = SSEPackedInt in
+def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
+
+let AddedComplexity = 400 in { // Prefer non-temporal versions
+def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
+def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
+
+def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
+
+let ExeDomain = SSEPackedInt in
+def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
+
+// There is no AVX form for instructions below this point
+def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
+ "movnti\t{$src, $dst|$dst, $src}",
+ [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
+ TB, Requires<[HasSSE2]>;
+
+def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
+ "movnti\t{$src, $dst|$dst, $src}",
+ [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
+ TB, Requires<[HasSSE2]>;
- def ANDPDrm : PDI<0x54, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "andpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (and (bc_v2i64 (v2f64 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def ORPDrm : PDI<0x56, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "orpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (or (bc_v2i64 (v2f64 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def XORPDrm : PDI<0x57, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "xorpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (xor (bc_v2i64 (v2f64 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def ANDNPDrr : PDI<0x55, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "andnpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
- (bc_v2i64 (v2f64 VR128:$src2))))]>;
- def ANDNPDrm : PDI<0x55, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
- "andnpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
- (memopv2i64 addr:$src2)))]>;
}
+def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
+ "movnti\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
+ TB, Requires<[HasSSE2]>;
-let Constraints = "$src1 = $dst" in {
- def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
- "cmp${cc}pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
- VR128:$src, imm:$cc))]>;
- def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
- "cmp${cc}pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
- (memop addr:$src), imm:$cc))]>;
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Misc Instructions (No AVX form)
+//===----------------------------------------------------------------------===//
+
+// Prefetch intrinsic.
+def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
+ "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
+def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
+ "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
+def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
+ "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
+def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
+ "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
+
+// Load, store, and memory fence
+def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
+ TB, Requires<[HasSSE1]>;
+def : Pat<(X86SFence), (SFENCE)>;
+
+// Alias instructions that map zero vector to pxor / xorp* for sse.
+// We set canFoldAsLoad because this can be converted to a constant-pool
+// load of an all-zeros value if folding it would be beneficial.
+// FIXME: Change encoding to pseudo! This is blocked right now by the x86
+// JIT implementatioan, it does not expand the instructions below like
+// X86MCInstLower does.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+ isCodeGenOnly = 1 in {
+def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4f32 immAllZerosV))]>;
+def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v2f64 immAllZerosV))]>;
+let ExeDomain = SSEPackedInt in
+def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4i32 immAllZerosV))]>;
}
-def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
- (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
- (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
-// Shuffle and unpack instructions
-let Constraints = "$src1 = $dst" in {
- def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (v2f64 (shufp:$src3 VR128:$src1, VR128:$src2)))]>;
- def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,
- f128mem:$src2, i8imm:$src3),
- "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (v2f64 (shufp:$src3
- VR128:$src1, (memopv2f64 addr:$src2))))]>;
+// The same as done above but for AVX. The 128-bit versions are the
+// same, but re-encoded. The 256-bit does not support PI version.
+// FIXME: Change encoding to pseudo! This is blocked right now by the x86
+// JIT implementatioan, it does not expand the instructions below like
+// X86MCInstLower does.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+ isCodeGenOnly = 1, Predicates = [HasAVX] in {
+def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
+def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
+def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
+ [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
+def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
+ [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
+let ExeDomain = SSEPackedInt in
+def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4i32 immAllZerosV))]>;
+}
- let AddedComplexity = 10 in {
- def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "unpckhpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (unpckh VR128:$src1, VR128:$src2)))]>;
- def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "unpckhpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (unpckh VR128:$src1,
- (memopv2f64 addr:$src2))))]>;
-
- def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "unpcklpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (unpckl VR128:$src1, VR128:$src2)))]>;
- def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "unpcklpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckl VR128:$src1, (memopv2f64 addr:$src2)))]>;
- } // AddedComplexity
-} // Constraints = "$src1 = $dst"
+def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
+def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
+def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
+
+def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
+ (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Load/Store XCSR register
+//===----------------------------------------------------------------------===//
+
+let isAsmParserOnly = 1 in {
+ def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
+ "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
+ def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
+ "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
+}
+def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
+ "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
+def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
+ "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
//===---------------------------------------------------------------------===//
-// SSE integer instructions
+// SSE2 - Move Aligned/Unaligned Packed Integer Instructions
+//===---------------------------------------------------------------------===//
+
+let ExeDomain = SSEPackedInt in { // SSE integer instructions
+
+let isAsmParserOnly = 1 in {
+ let neverHasSideEffects = 1 in {
+ def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ }
+ def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
+ def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
+
+ let canFoldAsLoad = 1, mayLoad = 1 in {
+ def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
+ "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ let Predicates = [HasAVX] in {
+ def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
+ def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
+ }
+ }
+
+ let mayStore = 1 in {
+ def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
+ (ins i128mem:$dst, VR128:$src),
+ "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
+ (ins i256mem:$dst, VR256:$src),
+ "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ let Predicates = [HasAVX] in {
+ def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
+ def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
+ }
+ }
+}
-// Move Instructions
let neverHasSideEffects = 1 in
def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1, mayLoad = 1 in
+
+let canFoldAsLoad = 1, mayLoad = 1 in {
def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
-let mayStore = 1 in
-def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}",
- [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
-let canFoldAsLoad = 1, mayLoad = 1 in
def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
XS, Requires<[HasSSE2]>;
-let mayStore = 1 in
+}
+
+let mayStore = 1 in {
+def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "movdqa\t{$src, $dst|$dst, $src}",
+ [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
XS, Requires<[HasSSE2]>;
+}
// Intrinsic forms of MOVDQU load and store
+let isAsmParserOnly = 1 in {
+let canFoldAsLoad = 1 in
+def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
+ XS, VEX, Requires<[HasAVX]>;
+def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
+ XS, VEX, Requires<[HasAVX]>;
+}
+
let canFoldAsLoad = 1 in
def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
@@ -1972,55 +2320,76 @@ def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
[(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
XS, Requires<[HasSSE2]>;
-let Constraints = "$src1 = $dst" in {
+} // ExeDomain = SSEPackedInt
+
+def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
+def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
+ (VMOVDQUYmr addr:$dst, VR256:$src)>;
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Arithmetic Instructions
+//===---------------------------------------------------------------------===//
+
+let ExeDomain = SSEPackedInt in { // SSE integer instructions
multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
- bit Commutable = 0> {
- def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
- def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1,
- (bitconvert (memopv2i64
- addr:$src2))))]>;
+ bit IsCommutable = 0, bit Is2Addr = 1> {
+ let isCommutable = IsCommutable in
+ def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
+ def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId VR128:$src1,
+ (bitconvert (memopv2i64 addr:$src2))))]>;
}
multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
- string OpcodeStr,
- Intrinsic IntId, Intrinsic IntId2> {
- def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
+ string OpcodeStr, Intrinsic IntId,
+ Intrinsic IntId2, bit Is2Addr = 1> {
+ def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1,
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId VR128:$src1,
(bitconvert (memopv2i64 addr:$src2))))]>;
- def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
+ def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
}
/// PDI_binop_rm - Simple SSE2 binary operator.
multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- ValueType OpVT, bit Commutable = 0> {
- def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
- def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
+ ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
+ let isCommutable = IsCommutable in
+ def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
+ def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
(bitconvert (memopv2i64 addr:$src2)))))]>;
}
@@ -2030,63 +2399,177 @@ multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
/// to collapse (bitconvert VT to VT) into its operand.
///
multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
- bit Commutable = 0> {
+ bit IsCommutable = 0, bit Is2Addr = 1> {
+ let isCommutable = IsCommutable in
def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1,
- (memopv2i64 addr:$src2)))]>;
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
}
-} // Constraints = "$src1 = $dst"
+} // ExeDomain = SSEPackedInt
// 128-bit Integer Arithmetic
-defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
-defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
-defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
-defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
-
-defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
-defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
-defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
-defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
+defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
+defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
+defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
+defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
+defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
+defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
+defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
+defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
+
+// Intrinsic forms
+defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
+ VEX_4V;
+defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
+ VEX_4V;
+defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
+ VEX_4V;
+defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
+ VEX_4V;
+defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
+ VEX_4V;
+defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
+ VEX_4V;
+defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
+ VEX_4V;
+defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
+ VEX_4V;
+defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
+ VEX_4V;
+defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
+ VEX_4V;
+defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
+ VEX_4V;
+defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
+ VEX_4V;
+defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
+ VEX_4V;
+defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
+ VEX_4V;
+defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
+ VEX_4V;
+defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
+ VEX_4V;
+defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
+ VEX_4V;
+defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
+ VEX_4V;
+defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
+ VEX_4V;
+}
+let Constraints = "$src1 = $dst" in {
+defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
+defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
+defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
+defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
+defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
+// Intrinsic forms
defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
-
-defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
-
+defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
+defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
+defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
+defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
-defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
+defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
-
defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
+defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
+defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
+defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
+defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
+defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
+defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
+defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
-defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
-defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
+} // Constraints = "$src1 = $dst"
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Logical Instructions
+//===---------------------------------------------------------------------===//
-defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
-defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
-defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
-defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
-defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
+ int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
+ VEX_4V;
+defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
+ int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
+ VEX_4V;
+defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
+ int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
+ VEX_4V;
+
+defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
+ int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
+ VEX_4V;
+defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
+ int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
+ VEX_4V;
+defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
+ int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
+ VEX_4V;
+
+defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
+ int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
+ VEX_4V;
+defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
+ int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
+ VEX_4V;
+
+defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
+defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
+defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
+
+let ExeDomain = SSEPackedInt in {
+ let neverHasSideEffects = 1 in {
+ // 128-bit logical shifts.
+ def VPSLLDQri : PDIi8<0x73, MRM7r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ VEX_4V;
+ def VPSRLDQri : PDIi8<0x73, MRM3r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ VEX_4V;
+ // PSRADQri doesn't exist in SSE[1-3].
+ }
+ def VPANDNrr : PDI<0xDF, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
+ VR128:$src2)))]>, VEX_4V;
+ def VPANDNrm : PDI<0xDF, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
+ (memopv2i64 addr:$src2))))]>,
+ VEX_4V;
+}
+}
+let Constraints = "$src1 = $dst" in {
defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
@@ -2106,15 +2589,52 @@ defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
-// 128-bit logical shifts.
-let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
- def PSLLDQri : PDIi8<0x73, MRM7r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- "pslldq\t{$src2, $dst|$dst, $src2}", []>;
- def PSRLDQri : PDIi8<0x73, MRM3r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- "psrldq\t{$src2, $dst|$dst, $src2}", []>;
- // PSRADQri doesn't exist in SSE[1-3].
+defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
+defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
+defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
+
+let ExeDomain = SSEPackedInt in {
+ let neverHasSideEffects = 1 in {
+ // 128-bit logical shifts.
+ def PSLLDQri : PDIi8<0x73, MRM7r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "pslldq\t{$src2, $dst|$dst, $src2}", []>;
+ def PSRLDQri : PDIi8<0x73, MRM3r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "psrldq\t{$src2, $dst|$dst, $src2}", []>;
+ // PSRADQri doesn't exist in SSE[1-3].
+ }
+ def PANDNrr : PDI<0xDF, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "pandn\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
+ VR128:$src2)))]>;
+
+ def PANDNrm : PDI<0xDF, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ "pandn\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
+ (memopv2i64 addr:$src2))))]>;
+}
+} // Constraints = "$src1 = $dst"
+
+let Predicates = [HasAVX] in {
+ def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
+ (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
+ def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
+ (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
+ def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
+ (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
+ def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
+ (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
+ def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
+ (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
+
+ // Shift up / down and insert zero's.
+ def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
+ (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
+ def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
+ (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
}
let Predicates = [HasSSE2] in {
@@ -2136,32 +2656,33 @@ let Predicates = [HasSSE2] in {
(v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
}
-// Logical
-defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
-defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
-defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
-
-let Constraints = "$src1 = $dst" in {
- def PANDNrr : PDI<0xDF, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
- VR128:$src2)))]>;
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Comparison Instructions
+//===---------------------------------------------------------------------===//
- def PANDNrm : PDI<0xDF, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
- (memopv2i64 addr:$src2))))]>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
+ 0>, VEX_4V;
+ defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
+ 0>, VEX_4V;
+ defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
+ 0>, VEX_4V;
+ defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
+ 0>, VEX_4V;
+ defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
+ 0>, VEX_4V;
+ defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
+ 0>, VEX_4V;
}
-// SSE2 Integer comparison
-defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
-defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
-defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
-defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
-defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
-defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
+let Constraints = "$src1 = $dst" in {
+ defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
+ defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
+ defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
+ defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
+ defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
+ defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
+} // Constraints = "$src1 = $dst"
def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
(PCMPEQBrr VR128:$src1, VR128:$src2)>;
@@ -2189,92 +2710,147 @@ def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
(PCMPGTDrm VR128:$src1, addr:$src2)>;
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Pack Instructions
+//===---------------------------------------------------------------------===//
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
+ 0, 0>, VEX_4V;
+defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
+ 0, 0>, VEX_4V;
+defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
+ 0, 0>, VEX_4V;
+}
-// Pack instructions
+let Constraints = "$src1 = $dst" in {
defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
+} // Constraints = "$src1 = $dst"
-// Shuffle and unpack instructions
-let AddedComplexity = 5 in {
-def PSHUFDri : PDIi8<0x70, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
- "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v4i32 (pshufd:$src2
- VR128:$src1, (undef))))]>;
-def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
- (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
- "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v4i32 (pshufd:$src2
- (bc_v4i32 (memopv2i64 addr:$src1)),
- (undef))))]>;
-}
-
-// SSE2 with ImmT == Imm8 and XS prefix.
-def PSHUFHWri : Ii8<0x70, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
- "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v8i16 (pshufhw:$src2 VR128:$src1,
- (undef))))]>,
- XS, Requires<[HasSSE2]>;
-def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
- (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
- "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v8i16 (pshufhw:$src2
- (bc_v8i16 (memopv2i64 addr:$src1)),
- (undef))))]>,
- XS, Requires<[HasSSE2]>;
-
-// SSE2 with ImmT == Imm8 and XD prefix.
-def PSHUFLWri : Ii8<0x70, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
- "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v8i16 (pshuflw:$src2 VR128:$src1,
- (undef))))]>,
- XD, Requires<[HasSSE2]>;
-def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
- (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
- "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v8i16 (pshuflw:$src2
- (bc_v8i16 (memopv2i64 addr:$src1)),
- (undef))))]>,
- XD, Requires<[HasSSE2]>;
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Shuffle Instructions
+//===---------------------------------------------------------------------===//
+let ExeDomain = SSEPackedInt in {
+multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
+ PatFrag bc_frag> {
+def ri : Ii8<0x70, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
+ (undef))))]>;
+def mi : Ii8<0x70, MRMSrcMem,
+ (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (vt (pshuf_frag:$src2
+ (bc_frag (memopv2i64 addr:$src1)),
+ (undef))))]>;
+}
+} // ExeDomain = SSEPackedInt
-let Constraints = "$src1 = $dst" in {
- def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpcklbw\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v16i8 (unpckl VR128:$src1, VR128:$src2)))]>;
- def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpcklbw\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckl VR128:$src1,
- (bc_v16i8 (memopv2i64 addr:$src2))))]>;
- def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpcklwd\t{$src2, $dst|$dst, $src2}",
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ let AddedComplexity = 5 in
+ defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
+ VEX;
+
+ // SSE2 with ImmT == Imm8 and XS prefix.
+ defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
+ VEX;
+
+ // SSE2 with ImmT == Imm8 and XD prefix.
+ defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
+ VEX;
+}
+
+let Predicates = [HasSSE2] in {
+ let AddedComplexity = 5 in
+ defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
+
+ // SSE2 with ImmT == Imm8 and XS prefix.
+ defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
+
+ // SSE2 with ImmT == Imm8 and XD prefix.
+ defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
+}
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Unpack Instructions
+//===---------------------------------------------------------------------===//
+
+let ExeDomain = SSEPackedInt in {
+multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
+ PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
+ def rr : PDI<opc, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
+ def rm : PDI<opc, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (unp_frag VR128:$src1,
+ (bc_frag (memopv2i64
+ addr:$src2))))]>;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
+ 0>, VEX_4V;
+ defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
+ 0>, VEX_4V;
+ defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
+ 0>, VEX_4V;
+
+ /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
+ /// knew to collapse (bitconvert VT to VT) into its operand.
+ def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v8i16 (unpckl VR128:$src1, VR128:$src2)))]>;
- def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpcklwd\t{$src2, $dst|$dst, $src2}",
+ (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
+ def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (unpckl VR128:$src1,
- (bc_v8i16 (memopv2i64 addr:$src2))))]>;
- def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpckldq\t{$src2, $dst|$dst, $src2}",
+ (v2i64 (unpckl VR128:$src1,
+ (memopv2i64 addr:$src2))))]>, VEX_4V;
+
+ defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
+ 0>, VEX_4V;
+ defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
+ 0>, VEX_4V;
+ defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
+ 0>, VEX_4V;
+
+ /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
+ /// knew to collapse (bitconvert VT to VT) into its operand.
+ def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v4i32 (unpckl VR128:$src1, VR128:$src2)))]>;
- def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
+ (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
+ def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpckldq\t{$src2, $dst|$dst, $src2}",
+ "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (unpckl VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2))))]>;
+ (v2i64 (unpckh VR128:$src1,
+ (memopv2i64 addr:$src2))))]>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
+ defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
+ defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
+
+ /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
+ /// knew to collapse (bitconvert VT to VT) into its operand.
def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"punpcklqdq\t{$src2, $dst|$dst, $src2}",
@@ -2287,39 +2863,12 @@ let Constraints = "$src1 = $dst" in {
(v2i64 (unpckl VR128:$src1,
(memopv2i64 addr:$src2))))]>;
- def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpckhbw\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v16i8 (unpckh VR128:$src1, VR128:$src2)))]>;
- def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpckhbw\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckh VR128:$src1,
- (bc_v16i8 (memopv2i64 addr:$src2))))]>;
- def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpckhwd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v8i16 (unpckh VR128:$src1, VR128:$src2)))]>;
- def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpckhwd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckh VR128:$src1,
- (bc_v8i16 (memopv2i64 addr:$src2))))]>;
- def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpckhdq\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4i32 (unpckh VR128:$src1, VR128:$src2)))]>;
- def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpckhdq\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckh VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2))))]>;
+ defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
+ defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
+ defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
+
+ /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
+ /// knew to collapse (bitconvert VT to VT) into its operand.
def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"punpckhqdq\t{$src2, $dst|$dst, $src2}",
@@ -2333,97 +2882,125 @@ let Constraints = "$src1 = $dst" in {
(memopv2i64 addr:$src2))))]>;
}
-// Extract / Insert
+} // ExeDomain = SSEPackedInt
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Extract and Insert
+//===---------------------------------------------------------------------===//
+
+let ExeDomain = SSEPackedInt in {
+multiclass sse2_pinsrw<bit Is2Addr = 1> {
+ def rri : Ii8<0xC4, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1,
+ GR32:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
+ def rmi : Ii8<0xC4, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1,
+ i16mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
+ imm:$src3))]>;
+}
+
+// Extract
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
+ (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
+ imm:$src2))]>, OpSize, VEX;
def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
(outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
imm:$src2))]>;
-let Constraints = "$src1 = $dst" in {
- def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1,
- GR32:$src2, i32i8imm:$src3),
- "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
- def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,
- i16mem:$src2, i32i8imm:$src3),
- "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
- imm:$src3))]>;
+
+// Insert
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
+ def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
+ "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, OpSize, VEX_4V;
}
-// Mask creation
-def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
+let Constraints = "$src1 = $dst" in
+ defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
-// Conditional store
-let Uses = [EDI] in
-def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
- "maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
+} // ExeDomain = SSEPackedInt
-let Uses = [RDI] in
-def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
- "maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Mask Creation
+//===---------------------------------------------------------------------===//
-// Non-temporal stores
-def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
- "movntpd\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
-def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntdq\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
-def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "movnti\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
- TB, Requires<[HasSSE2]>;
+let ExeDomain = SSEPackedInt in {
-let AddedComplexity = 400 in { // Prefer non-temporal versions
-def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntpd\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
+let isAsmParserOnly = 1 in {
+def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
+ "pmovmskb\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
+def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
+ "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
+}
+def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
+ "pmovmskb\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
-def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
+} // ExeDomain = SSEPackedInt
-def : Pat<(alignednontemporalstore (v4i32 VR128:$src), addr:$dst),
- (MOVNTDQmr VR128:$src, addr:$dst)>;
-}
+//===---------------------------------------------------------------------===//
+// SSE2 - Conditional Store
+//===---------------------------------------------------------------------===//
-// Flush cache
-def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
- "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
- TB, Requires<[HasSSE2]>;
+let ExeDomain = SSEPackedInt in {
-// Load, store, and memory fence
-def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
- "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
-def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
- "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
+let isAsmParserOnly = 1 in {
+let Uses = [EDI] in
+def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
+ (ins VR128:$src, VR128:$mask),
+ "maskmovdqu\t{$mask, $src|$src, $mask}",
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
+let Uses = [RDI] in
+def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
+ (ins VR128:$src, VR128:$mask),
+ "maskmovdqu\t{$mask, $src|$src, $mask}",
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
+}
-//TODO: custom lower this so as to never even generate the noop
-def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
- (i8 0)), (NOOP)>;
-def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
-def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
-def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
- (i8 1)), (MFENCE)>;
+let Uses = [EDI] in
+def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
+ "maskmovdqu\t{$mask, $src|$src, $mask}",
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
+let Uses = [RDI] in
+def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
+ "maskmovdqu\t{$mask, $src|$src, $mask}",
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
-// Alias instructions that map zero vector to pxor / xorp* for sse.
-// We set canFoldAsLoad because this can be converted to a constant-pool
-// load of an all-ones value if folding it would be beneficial.
-let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isCodeGenOnly = 1 in
- // FIXME: Change encoding to pseudo.
- def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
- [(set VR128:$dst, (v4i32 immAllOnesV))]>;
+} // ExeDomain = SSEPackedInt
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Move Doubleword
+//===---------------------------------------------------------------------===//
+// Move Int Doubleword to Packed Double Int
+let isAsmParserOnly = 1 in {
+def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
+def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
+ VEX;
+}
def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -2433,6 +3010,18 @@ def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
[(set VR128:$dst,
(v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
+
+// Move Int Doubleword to Single Scalar
+let isAsmParserOnly = 1 in {
+def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
+
+def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
+ VEX;
+}
def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (bitconvert GR32:$src))]>;
@@ -2441,20 +3030,18 @@ def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
-// SSE2 instructions with XS prefix
-def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
- Requires<[HasSSE2]>;
-def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(store (i64 (vector_extract (v2i64 VR128:$src),
- (iPTR 0))), addr:$dst)]>;
-
-def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
- (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), x86_subreg_sd))>;
-
+// Move Packed Doubleword Int to Packed Double Int
+let isAsmParserOnly = 1 in {
+def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
+ (iPTR 0)))]>, VEX;
+def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
+ (ins i32mem:$dst, VR128:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(store (i32 (vector_extract (v4i32 VR128:$src),
+ (iPTR 0))), addr:$dst)]>, VEX;
+}
def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
@@ -2464,6 +3051,15 @@ def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
[(store (i32 (vector_extract (v4i32 VR128:$src),
(iPTR 0))), addr:$dst)]>;
+// Move Scalar Single to Double Int
+let isAsmParserOnly = 1 in {
+def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
+def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
+}
def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bitconvert FR32:$src))]>;
@@ -2471,25 +3067,38 @@ def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
-// Store / copy lower 64-bits of a XMM register.
-def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
-
// movd / movq to XMM register zero-extends
+let AddedComplexity = 15, isAsmParserOnly = 1 in {
+def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (v4i32 (X86vzmovl
+ (v4i32 (scalar_to_vector GR32:$src)))))]>,
+ VEX;
+def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+ "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
+ [(set VR128:$dst, (v2i64 (X86vzmovl
+ (v2i64 (scalar_to_vector GR64:$src)))))]>,
+ VEX, VEX_W;
+}
let AddedComplexity = 15 in {
def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4i32 (X86vzmovl
(v4i32 (scalar_to_vector GR32:$src)))))]>;
-// This is X86-64 only.
def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+ "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
[(set VR128:$dst, (v2i64 (X86vzmovl
(v2i64 (scalar_to_vector GR64:$src)))))]>;
}
let AddedComplexity = 20 in {
+let isAsmParserOnly = 1 in
+def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
+ (loadi32 addr:$src))))))]>,
+ VEX;
def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -2502,13 +3111,63 @@ def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
(MOVZDI2PDIrm addr:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
(MOVZDI2PDIrm addr:$src)>;
+}
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Move Quadword
+//===---------------------------------------------------------------------===//
+
+// Move Quadword Int to Packed Quadword Int
+let isAsmParserOnly = 1 in
+def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
+ VEX, Requires<[HasAVX]>;
+def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
+ Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
+
+// Move Packed Quadword Int to Quadword Int
+let isAsmParserOnly = 1 in
+def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(store (i64 (vector_extract (v2i64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>, VEX;
+def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(store (i64 (vector_extract (v2i64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>;
+
+def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
+ (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
+
+// Store / copy lower 64-bits of a XMM register.
+let isAsmParserOnly = 1 in
+def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
+def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
+
+let AddedComplexity = 20, isAsmParserOnly = 1 in
+def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
+ (loadi64 addr:$src))))))]>,
+ XS, VEX, Requires<[HasAVX]>;
+let AddedComplexity = 20 in {
def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (X86vzmovl (v2i64 (scalar_to_vector
- (loadi64 addr:$src))))))]>, XS,
- Requires<[HasSSE2]>;
+ (loadi64 addr:$src))))))]>,
+ XS, Requires<[HasSSE2]>;
def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
(MOVZQI2PQIrm addr:$src)>;
@@ -2519,12 +3178,23 @@ def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
// IA32 document. movq xmm1, xmm2 does clear the high bits.
+let isAsmParserOnly = 1, AddedComplexity = 15 in
+def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
+ XS, VEX, Requires<[HasAVX]>;
let AddedComplexity = 15 in
def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
XS, Requires<[HasSSE2]>;
+let AddedComplexity = 20, isAsmParserOnly = 1 in
+def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (v2i64 (X86vzmovl
+ (loadv2i64 addr:$src))))]>,
+ XS, VEX, Requires<[HasAVX]>;
let AddedComplexity = 20 in {
def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movq\t{$src, $dst|$dst, $src}",
@@ -2536,49 +3206,198 @@ def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
(MOVZPQILo2PQIrm addr:$src)>;
}
+// Instructions to match in the assembler
+let isAsmParserOnly = 1 in {
+def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+ "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
+def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
+// Recognize "movd" with GR64 destination, but encode as a "movq"
+def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
+ "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
+}
+
// Instructions for the disassembler
// xr = XMM register
// xm = mem64
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}", []>, XS;
//===---------------------------------------------------------------------===//
-// SSE3 Instructions
+// SSE2 - Misc Instructions
//===---------------------------------------------------------------------===//
-// Move Instructions
-def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movshdup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v4f32 (movshdup
- VR128:$src, (undef))))]>;
-def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movshdup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (movshdup
- (memopv4f32 addr:$src), (undef)))]>;
+// Flush cache
+def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
+ "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
+ TB, Requires<[HasSSE2]>;
+
+// Load, store, and memory fence
+def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
+ "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
+def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
+ "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
+def : Pat<(X86LFence), (LFENCE)>;
+def : Pat<(X86MFence), (MFENCE)>;
+
+
+// Pause. This "instruction" is encoded as "rep; nop", so even though it
+// was introduced with SSE2, it's backward compatible.
+def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
-def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movsldup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v4f32 (movsldup
+// Alias instructions that map zero vector to pxor / xorp* for sse.
+// We set canFoldAsLoad because this can be converted to a constant-pool
+// load of an all-ones value if folding it would be beneficial.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+ isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
+ // FIXME: Change encoding to pseudo.
+ def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4i32 immAllOnesV))]>;
+
+//===---------------------------------------------------------------------===//
+// SSE3 - Conversion Instructions
+//===---------------------------------------------------------------------===//
+
+// Convert Packed Double FP to Packed DW Integers
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+// The assembler can recognize rr 256-bit instructions by seeing a ymm
+// register, but the same isn't true when using memory operands instead.
+// Provide other assembly rr and rm forms to address this explicitly.
+def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// XMM only
+def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// YMM only
+def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
+ "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
+}
+
+def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
+def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
+
+// Convert Packed DW Integers to Packed Double FP
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+}
+
+def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
+def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
+
+// AVX 256-bit register conversion intrinsics
+def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
+ (VCVTDQ2PDYrr VR128:$src)>;
+def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
+ (VCVTDQ2PDYrm addr:$src)>;
+
+def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
+ (VCVTPD2DQYrr VR256:$src)>;
+def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
+ (VCVTPD2DQYrm addr:$src)>;
+
+//===---------------------------------------------------------------------===//
+// SSE3 - Move Instructions
+//===---------------------------------------------------------------------===//
+
+// Replicate Single FP
+multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
+def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (v4f32 (rep_frag
VR128:$src, (undef))))]>;
-def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movsldup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (movsldup
+def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (rep_frag
(memopv4f32 addr:$src), (undef)))]>;
+}
+
+multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
+ string OpcodeStr> {
+def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
+}
-def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movddup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
-def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
- "movddup\t{$src, $dst|$dst, $src}",
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ // FIXME: Merge above classes when we have patterns for the ymm version
+ defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
+ defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
+ defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
+ defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
+}
+defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
+defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
+
+// Replicate Double FP
+multiclass sse3_replicate_dfp<string OpcodeStr> {
+def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
+def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
(undef))))]>;
+}
+
+multiclass sse3_replicate_dfp_y<string OpcodeStr> {
+def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ []>;
+def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ []>;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ // FIXME: Merge above classes when we have patterns for the ymm version
+ defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
+ defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
+}
+defm MOVDDUP : sse3_replicate_dfp<"movddup">;
+
+// Move Unaligned Integer
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vlddqu\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
+ def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
+ "vlddqu\t{$src, $dst|$dst, $src}",
+ [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
+}
+def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "lddqu\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
(undef)),
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
+// Several Move patterns
let AddedComplexity = 5 in {
def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
@@ -2590,69 +3409,6 @@ def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
}
-// Arithmetic
-let Constraints = "$src1 = $dst" in {
- def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "addsubps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
- VR128:$src2))]>;
- def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "addsubps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
- (memop addr:$src2)))]>;
- def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "addsubpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
- VR128:$src2))]>;
- def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "addsubpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
- (memop addr:$src2)))]>;
-}
-
-def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "lddqu\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
-
-// Horizontal ops
-class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
- : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
-class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
- : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>;
-class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
- : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
-class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
- : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>;
-
-let Constraints = "$src1 = $dst" in {
- def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
- def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
- def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
- def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
- def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
- def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
- def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
- def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
-}
-
-// Thread synchronization
-def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
- [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
-def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
- [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
-
// vector_shuffle v1, <undef> <1, 1, 3, 3>
let AddedComplexity = 15 in
def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
@@ -2670,12 +3426,115 @@ let AddedComplexity = 20 in
(MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
//===---------------------------------------------------------------------===//
-// SSSE3 Instructions
+// SSE3 - Arithmetic
//===---------------------------------------------------------------------===//
-/// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
-multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128> {
+multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
+ X86MemOperand x86memop, bit Is2Addr = 1> {
+ def rr : I<0xD0, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
+ def rm : I<0xD0, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX],
+ ExeDomain = SSEPackedDouble in {
+ defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
+ f128mem, 0>, XD, VEX_4V;
+ defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
+ f128mem, 0>, OpSize, VEX_4V;
+ defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
+ f256mem, 0>, XD, VEX_4V;
+ defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
+ f256mem, 0>, OpSize, VEX_4V;
+}
+let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
+ ExeDomain = SSEPackedDouble in {
+ defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
+ f128mem>, XD;
+ defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
+ f128mem>, TB, OpSize;
+}
+
+//===---------------------------------------------------------------------===//
+// SSE3 Instructions
+//===---------------------------------------------------------------------===//
+
+// Horizontal ops
+multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
+ X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
+ def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
+
+ def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
+}
+multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
+ X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
+ def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
+
+ def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
+ int_x86_sse3_hadd_ps, 0>, VEX_4V;
+ defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
+ int_x86_sse3_hadd_pd, 0>, VEX_4V;
+ defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
+ int_x86_sse3_hsub_ps, 0>, VEX_4V;
+ defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
+ int_x86_sse3_hsub_pd, 0>, VEX_4V;
+ defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
+ int_x86_avx_hadd_ps_256, 0>, VEX_4V;
+ defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
+ int_x86_avx_hadd_pd_256, 0>, VEX_4V;
+ defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
+ int_x86_avx_hsub_ps_256, 0>, VEX_4V;
+ defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
+ int_x86_avx_hsub_pd_256, 0>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
+ int_x86_sse3_hadd_ps>;
+ defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
+ int_x86_sse3_hadd_pd>;
+ defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
+ int_x86_sse3_hsub_ps>;
+ defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
+ int_x86_sse3_hsub_pd>;
+}
+
+//===---------------------------------------------------------------------===//
+// SSSE3 - Packed Absolute Instructions
+//===---------------------------------------------------------------------===//
+
+/// SS3I_unop_rm_int_mm - Simple SSSE3 unary whose type can be v*{i8,i16,i32}.
+multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr,
+ PatFrag mem_frag64, Intrinsic IntId64> {
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR64:$dst, (IntId64 VR64:$src))]>;
@@ -2683,8 +3542,12 @@ multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR64:$dst,
- (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
+ (IntId64 (bitconvert (mem_frag64 addr:$src))))]>;
+}
+/// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
+multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
+ PatFrag mem_frag128, Intrinsic IntId128> {
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
@@ -2696,256 +3559,216 @@ multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(IntId128
- (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
+ (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
}
-/// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
-multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst, (IntId64 VR64:$src))]>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
+ int_x86_ssse3_pabs_b_128>, VEX;
+ defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
+ int_x86_ssse3_pabs_w_128>, VEX;
+ defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
+ int_x86_ssse3_pabs_d_128>, VEX;
+}
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins i64mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst,
- (IntId64
- (bitconvert (memopv4i16 addr:$src))))]>;
+defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
+ int_x86_ssse3_pabs_b_128>,
+ SS3I_unop_rm_int_mm<0x1C, "pabsb", memopv8i8,
+ int_x86_ssse3_pabs_b>;
- def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))]>,
- OpSize;
+defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
+ int_x86_ssse3_pabs_w_128>,
+ SS3I_unop_rm_int_mm<0x1D, "pabsw", memopv4i16,
+ int_x86_ssse3_pabs_w>;
+
+defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
+ int_x86_ssse3_pabs_d_128>,
+ SS3I_unop_rm_int_mm<0x1E, "pabsd", memopv2i32,
+ int_x86_ssse3_pabs_d>;
+//===---------------------------------------------------------------------===//
+// SSSE3 - Packed Binary Operator Instructions
+//===---------------------------------------------------------------------===//
+
+/// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
+multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
+ PatFrag mem_frag128, Intrinsic IntId128,
+ bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
+ OpSize;
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins i128mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst,
- (IntId128
- (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
}
-
-/// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
-multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128> {
+multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr,
+ PatFrag mem_frag64, Intrinsic IntId64> {
+ let isCommutable = 1 in
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst, (IntId64 VR64:$src))]>;
-
+ (ins VR64:$src1, VR64:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins i64mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst,
- (IntId64
- (bitconvert (memopv2i32 addr:$src))))]>;
+ (ins VR64:$src1, i64mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ [(set VR64:$dst,
+ (IntId64 VR64:$src1,
+ (bitconvert (memopv8i8 addr:$src2))))]>;
+}
- def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))]>,
- OpSize;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+let isCommutable = 0 in {
+ defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
+ int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
+ defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
+ int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
+ defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
+ int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
+ defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
+ int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
+ defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
+ int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
+ defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
+ int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
+ defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
+ int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
+ defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
+ int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
+ defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
+ int_x86_ssse3_psign_b_128, 0>, VEX_4V;
+ defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
+ int_x86_ssse3_psign_w_128, 0>, VEX_4V;
+ defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
+ int_x86_ssse3_psign_d_128, 0>, VEX_4V;
+}
+defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
+ int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
+}
- def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins i128mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst,
- (IntId128
- (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
+// None of these have i8 immediate fields.
+let ImmT = NoImm, Constraints = "$src1 = $dst" in {
+let isCommutable = 0 in {
+ defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
+ int_x86_ssse3_phadd_w_128>,
+ SS3I_binop_rm_int_mm<0x01, "phaddw", memopv4i16,
+ int_x86_ssse3_phadd_w>;
+ defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
+ int_x86_ssse3_phadd_d_128>,
+ SS3I_binop_rm_int_mm<0x02, "phaddd", memopv2i32,
+ int_x86_ssse3_phadd_d>;
+ defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
+ int_x86_ssse3_phadd_sw_128>,
+ SS3I_binop_rm_int_mm<0x03, "phaddsw", memopv4i16,
+ int_x86_ssse3_phadd_sw>;
+ defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
+ int_x86_ssse3_phsub_w_128>,
+ SS3I_binop_rm_int_mm<0x05, "phsubw", memopv4i16,
+ int_x86_ssse3_phsub_w>;
+ defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
+ int_x86_ssse3_phsub_d_128>,
+ SS3I_binop_rm_int_mm<0x06, "phsubd", memopv2i32,
+ int_x86_ssse3_phsub_d>;
+ defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
+ int_x86_ssse3_phsub_sw_128>,
+ SS3I_binop_rm_int_mm<0x07, "phsubsw", memopv4i16,
+ int_x86_ssse3_phsub_sw>;
+ defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
+ int_x86_ssse3_pmadd_ub_sw_128>,
+ SS3I_binop_rm_int_mm<0x04, "pmaddubsw", memopv8i8,
+ int_x86_ssse3_pmadd_ub_sw>;
+ defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8,
+ int_x86_ssse3_pshuf_b_128>,
+ SS3I_binop_rm_int_mm<0x00, "pshufb", memopv8i8,
+ int_x86_ssse3_pshuf_b>;
+ defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
+ int_x86_ssse3_psign_b_128>,
+ SS3I_binop_rm_int_mm<0x08, "psignb", memopv8i8,
+ int_x86_ssse3_psign_b>;
+ defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
+ int_x86_ssse3_psign_w_128>,
+ SS3I_binop_rm_int_mm<0x09, "psignw", memopv4i16,
+ int_x86_ssse3_psign_w>;
+ defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
+ int_x86_ssse3_psign_d_128>,
+ SS3I_binop_rm_int_mm<0x0A, "psignd", memopv2i32,
+ int_x86_ssse3_psign_d>;
+}
+defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
+ int_x86_ssse3_pmul_hr_sw_128>,
+ SS3I_binop_rm_int_mm<0x0B, "pmulhrsw", memopv4i16,
+ int_x86_ssse3_pmul_hr_sw>;
}
-defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
- int_x86_ssse3_pabs_b,
- int_x86_ssse3_pabs_b_128>;
-defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
- int_x86_ssse3_pabs_w,
- int_x86_ssse3_pabs_w_128>;
-defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
- int_x86_ssse3_pabs_d,
- int_x86_ssse3_pabs_d_128>;
+def : Pat<(X86pshufb VR128:$src, VR128:$mask),
+ (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
+def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
+ (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
-/// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
-let Constraints = "$src1 = $dst" in {
- multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128,
- bit Commutable = 0> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
- let isCommutable = Commutable;
- }
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst,
- (IntId64 VR64:$src1,
- (bitconvert (memopv8i8 addr:$src2))))]>;
-
- def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
- }
-}
+//===---------------------------------------------------------------------===//
+// SSSE3 - Packed Align Instruction Patterns
+//===---------------------------------------------------------------------===//
-/// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
-let Constraints = "$src1 = $dst" in {
- multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128,
- bit Commutable = 0> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
- let isCommutable = Commutable;
- }
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst,
- (IntId64 VR64:$src1,
- (bitconvert (memopv4i16 addr:$src2))))]>;
-
- def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
- }
+multiclass ssse3_palign_mm<string asm> {
+ def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
+ (ins VR64:$src1, VR64:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
+ def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
+ (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>;
}
-/// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
-let Constraints = "$src1 = $dst" in {
- multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128,
- bit Commutable = 0> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
- let isCommutable = Commutable;
- }
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst,
- (IntId64 VR64:$src1,
- (bitconvert (memopv2i32 addr:$src2))))]>;
-
- def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
- }
+multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
+ def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ []>, OpSize;
+ def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ []>, OpSize;
}
-defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
- int_x86_ssse3_phadd_w,
- int_x86_ssse3_phadd_w_128>;
-defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
- int_x86_ssse3_phadd_d,
- int_x86_ssse3_phadd_d_128>;
-defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
- int_x86_ssse3_phadd_sw,
- int_x86_ssse3_phadd_sw_128>;
-defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
- int_x86_ssse3_phsub_w,
- int_x86_ssse3_phsub_w_128>;
-defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
- int_x86_ssse3_phsub_d,
- int_x86_ssse3_phsub_d_128>;
-defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
- int_x86_ssse3_phsub_sw,
- int_x86_ssse3_phsub_sw_128>;
-defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
- int_x86_ssse3_pmadd_ub_sw,
- int_x86_ssse3_pmadd_ub_sw_128>;
-defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
- int_x86_ssse3_pmul_hr_sw,
- int_x86_ssse3_pmul_hr_sw_128, 1>;
-defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
- int_x86_ssse3_pshuf_b,
- int_x86_ssse3_pshuf_b_128>;
-defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
- int_x86_ssse3_psign_b,
- int_x86_ssse3_psign_b_128>;
-defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
- int_x86_ssse3_psign_w,
- int_x86_ssse3_psign_w_128>;
-defm PSIGND : SS3I_binop_rm_int_32<0x0A, "psignd",
- int_x86_ssse3_psign_d,
- int_x86_ssse3_psign_d_128>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PALIGN : ssse3_palign<"palignr">,
+ ssse3_palign_mm<"palignr">;
-let Constraints = "$src1 = $dst" in {
- def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2, i8imm:$src3),
- "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>;
- def PALIGNR64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
- "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>;
-
- def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>, OpSize;
- def PALIGNR128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>, OpSize;
-}
-
-// palignr patterns.
-def : Pat<(int_x86_ssse3_palign_r VR64:$src1, VR64:$src2, (i8 imm:$src3)),
- (PALIGNR64rr VR64:$src1, VR64:$src2, (BYTE_imm imm:$src3))>,
+let AddedComplexity = 5 in {
+
+def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
+ (PALIGNR64rr VR64:$src2, VR64:$src1,
+ (SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
-def : Pat<(int_x86_ssse3_palign_r VR64:$src1,
- (memop64 addr:$src2),
- (i8 imm:$src3)),
- (PALIGNR64rm VR64:$src1, addr:$src2, (BYTE_imm imm:$src3))>,
+def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
+ (PALIGNR64rr VR64:$src2, VR64:$src1,
+ (SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
-
-def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1, VR128:$src2, (i8 imm:$src3)),
- (PALIGNR128rr VR128:$src1, VR128:$src2, (BYTE_imm imm:$src3))>,
+def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
+ (PALIGNR64rr VR64:$src2, VR64:$src1,
+ (SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
-def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1,
- (memopv2i64 addr:$src2),
- (i8 imm:$src3)),
- (PALIGNR128rm VR128:$src1, addr:$src2, (BYTE_imm imm:$src3))>,
+def : Pat<(v8i8 (palign:$src3 VR64:$src1, VR64:$src2)),
+ (PALIGNR64rr VR64:$src2, VR64:$src1,
+ (SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
-let AddedComplexity = 5 in {
def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
(PALIGNR128rr VR128:$src2, VR128:$src1,
(SHUFFLE_get_palign_imm VR128:$src3))>,
@@ -2964,10 +3787,15 @@ def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
Requires<[HasSSSE3]>;
}
-def : Pat<(X86pshufb VR128:$src, VR128:$mask),
- (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
-def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
- (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
+//===---------------------------------------------------------------------===//
+// SSSE3 Misc Instructions
+//===---------------------------------------------------------------------===//
+
+// Thread synchronization
+def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
+ [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
+def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
+ [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
//===---------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -3021,15 +3849,15 @@ let Predicates = [HasSSE2] in {
let AddedComplexity = 15 in {
// Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
- (MOVSDrr (v2f64 (V_SET0)), FR64:$src)>;
+ (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
- (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
+ (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
- (MOVSSrr (v4f32 (V_SET0)),
- (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), x86_subreg_ss)))>;
+ (MOVSSrr (v4f32 (V_SET0PS)),
+ (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
- (MOVSSrr (v4i32 (V_SET0)),
- (EXTRACT_SUBREG (v4i32 VR128:$src), x86_subreg_ss))>;
+ (MOVSSrr (v4i32 (V_SET0PI)),
+ (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
}
// Splat v2f64 / v2i64
@@ -3165,17 +3993,17 @@ let AddedComplexity = 15 in {
// Setting the lowest element in the vector.
def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
(MOVSSrr (v4i32 VR128:$src1),
- (EXTRACT_SUBREG (v4i32 VR128:$src2), x86_subreg_ss))>;
+ (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
(MOVSDrr (v2i64 VR128:$src1),
- (EXTRACT_SUBREG (v2i64 VR128:$src2), x86_subreg_sd))>;
+ (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
// vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, x86_subreg_sd))>,
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
Requires<[HasSSE2]>;
def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
- (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, x86_subreg_sd))>,
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
Requires<[HasSSE2]>;
}
@@ -3186,9 +4014,6 @@ def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
(SHUFFLE_get_shuf_imm VR128:$src3))>;
// Set lowest element and zero upper elements.
-let AddedComplexity = 15 in
-def : Pat<(v2f64 (movl immAllZerosV_bc, VR128:$src)),
- (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
(MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
@@ -3224,293 +4049,66 @@ def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))),
(Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>;
// Use movaps / movups for SSE integer load / store (one byte shorter).
-def : Pat<(alignedloadv4i32 addr:$src),
- (MOVAPSrm addr:$src)>;
-def : Pat<(loadv4i32 addr:$src),
- (MOVUPSrm addr:$src)>;
-def : Pat<(alignedloadv2i64 addr:$src),
- (MOVAPSrm addr:$src)>;
-def : Pat<(loadv2i64 addr:$src),
- (MOVUPSrm addr:$src)>;
-
-def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
- (MOVAPSmr addr:$dst, VR128:$src)>;
-def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
- (MOVAPSmr addr:$dst, VR128:$src)>;
-def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
- (MOVAPSmr addr:$dst, VR128:$src)>;
-def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
- (MOVAPSmr addr:$dst, VR128:$src)>;
-def : Pat<(store (v2i64 VR128:$src), addr:$dst),
- (MOVUPSmr addr:$dst, VR128:$src)>;
-def : Pat<(store (v4i32 VR128:$src), addr:$dst),
- (MOVUPSmr addr:$dst, VR128:$src)>;
-def : Pat<(store (v8i16 VR128:$src), addr:$dst),
- (MOVUPSmr addr:$dst, VR128:$src)>;
-def : Pat<(store (v16i8 VR128:$src), addr:$dst),
- (MOVUPSmr addr:$dst, VR128:$src)>;
-
-//===----------------------------------------------------------------------===//
-// SSE4.1 Instructions
-//===----------------------------------------------------------------------===//
-
-multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd,
- string OpcodeStr,
- Intrinsic V4F32Int,
- Intrinsic V2F64Int> {
- // Intrinsic operation, reg.
- // Vector intrinsic operation, reg
- def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
- OpSize;
-
- // Vector intrinsic operation, mem
- def PSm_Int : Ii8<opcps, MRMSrcMem,
- (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst,
- (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
- TA, OpSize,
- Requires<[HasSSE41]>;
-
- // Vector intrinsic operation, reg
- def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
- OpSize;
-
- // Vector intrinsic operation, mem
- def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
- (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst,
- (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
- OpSize;
+let Predicates = [HasSSE1] in {
+ def : Pat<(alignedloadv4i32 addr:$src),
+ (MOVAPSrm addr:$src)>;
+ def : Pat<(loadv4i32 addr:$src),
+ (MOVUPSrm addr:$src)>;
+ def : Pat<(alignedloadv2i64 addr:$src),
+ (MOVAPSrm addr:$src)>;
+ def : Pat<(loadv2i64 addr:$src),
+ (MOVUPSrm addr:$src)>;
+
+ def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
+ (MOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
+ (MOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
+ (MOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
+ (MOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v2i64 VR128:$src), addr:$dst),
+ (MOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v4i32 VR128:$src), addr:$dst),
+ (MOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v8i16 VR128:$src), addr:$dst),
+ (MOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v16i8 VR128:$src), addr:$dst),
+ (MOVUPSmr addr:$dst, VR128:$src)>;
}
-let Constraints = "$src1 = $dst" in {
-multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
- string OpcodeStr,
- Intrinsic F32Int,
- Intrinsic F64Int> {
- // Intrinsic operation, reg.
- def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
- (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
- OpSize;
-
- // Intrinsic operation, mem.
- def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
- (outs VR128:$dst),
- (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
- OpSize;
-
- // Intrinsic operation, reg.
- def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
- (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
- OpSize;
-
- // Intrinsic operation, mem.
- def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
- (outs VR128:$dst),
- (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
- OpSize;
+// Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter).
+let Predicates = [HasAVX] in {
+ def : Pat<(alignedloadv4i32 addr:$src),
+ (VMOVAPSrm addr:$src)>;
+ def : Pat<(loadv4i32 addr:$src),
+ (VMOVUPSrm addr:$src)>;
+ def : Pat<(alignedloadv2i64 addr:$src),
+ (VMOVAPSrm addr:$src)>;
+ def : Pat<(loadv2i64 addr:$src),
+ (VMOVUPSrm addr:$src)>;
+
+ def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
+ (VMOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
+ (VMOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
+ (VMOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
+ (VMOVAPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v2i64 VR128:$src), addr:$dst),
+ (VMOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v4i32 VR128:$src), addr:$dst),
+ (VMOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v8i16 VR128:$src), addr:$dst),
+ (VMOVUPSmr addr:$dst, VR128:$src)>;
+ def : Pat<(store (v16i8 VR128:$src), addr:$dst),
+ (VMOVUPSmr addr:$dst, VR128:$src)>;
}
-}
-
-// FP round - roundss, roundps, roundsd, roundpd
-defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round",
- int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
-defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
- int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
-
-// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
-multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128> {
- def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
- def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins i128mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst,
- (IntId128
- (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
-}
-
-defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
- int_x86_sse41_phminposuw>;
-
-/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Commutable = 0> {
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
- }
-}
-
-defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq",
- int_x86_sse41_pcmpeqq, 1>;
-defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw",
- int_x86_sse41_packusdw, 0>;
-defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb",
- int_x86_sse41_pminsb, 1>;
-defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd",
- int_x86_sse41_pminsd, 1>;
-defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud",
- int_x86_sse41_pminud, 1>;
-defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw",
- int_x86_sse41_pminuw, 1>;
-defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb",
- int_x86_sse41_pmaxsb, 1>;
-defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd",
- int_x86_sse41_pmaxsd, 1>;
-defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud",
- int_x86_sse41_pmaxud, 1>;
-defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw",
- int_x86_sse41_pmaxuw, 1>;
-
-defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq, 1>;
-
-def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
- (PCMPEQQrr VR128:$src1, VR128:$src2)>;
-def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
- (PCMPEQQrm VR128:$src1, addr:$src2)>;
-
-/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_binop_patint<bits<8> opc, string OpcodeStr, ValueType OpVT,
- SDNode OpNode, Intrinsic IntId128,
- bit Commutable = 0> {
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode (OpVT VR128:$src1),
- VR128:$src2))]>, OpSize {
- let isCommutable = Commutable;
- }
- def rr_int : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (OpVT (OpNode VR128:$src1, (memop addr:$src2))))]>, OpSize;
- def rm_int : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1, (memop addr:$src2)))]>,
- OpSize;
- }
-}
-defm PMULLD : SS41I_binop_patint<0x40, "pmulld", v4i32, mul,
- int_x86_sse41_pmulld, 1>;
-
-/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Commutable = 0> {
- def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
- OpSize;
- }
-}
-
-defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps",
- int_x86_sse41_blendps, 0>;
-defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd",
- int_x86_sse41_blendpd, 0>;
-defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw",
- int_x86_sse41_pblendw, 0>;
-defm DPPS : SS41I_binop_rmi_int<0x40, "dpps",
- int_x86_sse41_dpps, 1>;
-defm DPPD : SS41I_binop_rmi_int<0x41, "dppd",
- int_x86_sse41_dppd, 1>;
-defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw",
- int_x86_sse41_mpsadbw, 1>;
-
-
-/// SS41I_ternary_int - SSE 4.1 ternary operator
-let Uses = [XMM0], Constraints = "$src1 = $dst" in {
- multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
- def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr,
- "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
- OpSize;
-
- def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr,
- "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
- [(set VR128:$dst,
- (IntId VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
- }
-}
-
-defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
-defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
-defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Packed Move with Sign/Zero Extend
+//===----------------------------------------------------------------------===//
multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
@@ -3524,6 +4122,21 @@ multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
+ VEX;
+defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
+ VEX;
+defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
+ VEX;
+defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
+ VEX;
+defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
+ VEX;
+defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
+ VEX;
+}
+
defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
@@ -3575,6 +4188,17 @@ multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
+ VEX;
+defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
+ VEX;
+defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
+ VEX;
+defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
+ VEX;
+}
+
defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
@@ -3605,6 +4229,12 @@ multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
+ VEX;
+defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
+ VEX;
+}
defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
@@ -3619,6 +4249,9 @@ def : Pat<(int_x86_sse41_pmovzxbq
(v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
(PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Extract Instructions
+//===----------------------------------------------------------------------===//
/// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
@@ -3638,6 +4271,13 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
// (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
+ def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
+}
+
defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
@@ -3653,6 +4293,9 @@ multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
// (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
+
defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
@@ -3672,8 +4315,31 @@ multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
addr:$dst)]>, OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
+
defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
+/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
+multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
+ def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set GR64:$dst,
+ (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
+ def mr : SS4AIi8<opc, MRMDestMem, (outs),
+ (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
+ addr:$dst)]>, OpSize, REX_W;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
+
+defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
/// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
/// destination
@@ -3693,6 +4359,13 @@ multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
addr:$dst)]>, OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
+ def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, OpSize, VEX;
+}
defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
// Also match an EXTRACTPS store when the store is done as f32 instead of i32.
@@ -3702,245 +4375,764 @@ def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
(EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
Requires<[HasSSE41]>;
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_insert8<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
- def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
- imm:$src3))]>, OpSize;
- }
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Insert Instructions
+//===----------------------------------------------------------------------===//
+
+multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
+ def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
+ def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
+ imm:$src3))]>, OpSize;
}
-defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
+
+multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
+ def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
+ OpSize;
+ def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
+ imm:$src3)))]>, OpSize;
+}
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_insert32<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
- OpSize;
- def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
- imm:$src3)))]>, OpSize;
- }
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
+
+multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
+ def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
+ OpSize;
+ def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
+ imm:$src3)))]>, OpSize;
}
-defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
+let Constraints = "$src1 = $dst" in
+ defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
// insertps has a few different modes, there's the first two here below which
// are optimized inserts that won't zero arbitrary elements in the destination
// vector. The next one matches the intrinsic and could zero arbitrary elements
// in the target vector.
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_insertf32<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
+multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
+ def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
OpSize;
- def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (X86insrtps VR128:$src1,
- (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
- imm:$src3))]>, OpSize;
- }
+ def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (X86insrtps VR128:$src1,
+ (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
+ imm:$src3))]>, OpSize;
}
-defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
+let Constraints = "$src1 = $dst" in
+ defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
- (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>;
+ (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
+ Requires<[HasAVX]>;
+def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
+ (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
+ Requires<[HasSSE41]>;
+
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Round Instructions
+//===----------------------------------------------------------------------===//
+
+multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
+ X86MemOperand x86memop, RegisterClass RC,
+ PatFrag mem_frag32, PatFrag mem_frag64,
+ Intrinsic V4F32Int, Intrinsic V2F64Int> {
+ // Intrinsic operation, reg.
+ // Vector intrinsic operation, reg
+ def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
+ OpSize;
+
+ // Vector intrinsic operation, mem
+ def PSm_Int : Ii8<opcps, MRMSrcMem,
+ (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
+ TA, OpSize,
+ Requires<[HasSSE41]>;
+
+ // Vector intrinsic operation, reg
+ def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
+ OpSize;
+
+ // Vector intrinsic operation, mem
+ def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
+ (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst,
+ (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
+ OpSize;
+}
+
+multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
+ RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
+ // Intrinsic operation, reg.
+ // Vector intrinsic operation, reg
+ def PSr : SS4AIi8<opcps, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, OpSize;
+
+ // Vector intrinsic operation, mem
+ def PSm : Ii8<opcps, MRMSrcMem,
+ (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, TA, OpSize, Requires<[HasSSE41]>;
+
+ // Vector intrinsic operation, reg
+ def PDr : SS4AIi8<opcpd, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, OpSize;
+
+ // Vector intrinsic operation, mem
+ def PDm : SS4AIi8<opcpd, MRMSrcMem,
+ (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, OpSize;
+}
+
+multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
+ string OpcodeStr,
+ Intrinsic F32Int,
+ Intrinsic F64Int, bit Is2Addr = 1> {
+ // Intrinsic operation, reg.
+ def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
+ OpSize;
+
+ // Intrinsic operation, mem.
+ def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
+ OpSize;
+
+ // Intrinsic operation, reg.
+ def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
+ OpSize;
+
+ // Intrinsic operation, mem.
+ def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
+ OpSize;
+}
+
+multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
+ string OpcodeStr> {
+ // Intrinsic operation, reg.
+ def SSr : SS4AIi8<opcss, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, OpSize;
+
+ // Intrinsic operation, mem.
+ def SSm : SS4AIi8<opcss, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, OpSize;
+
+ // Intrinsic operation, reg.
+ def SDr : SS4AIi8<opcsd, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, OpSize;
+
+ // Intrinsic operation, mem.
+ def SDm : SS4AIi8<opcsd, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, OpSize;
+}
+
+// FP round - roundss, roundps, roundsd, roundpd
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ // Intrinsic form
+ defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
+ memopv4f32, memopv2f64,
+ int_x86_sse41_round_ps,
+ int_x86_sse41_round_pd>, VEX;
+ defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
+ memopv8f32, memopv4f64,
+ int_x86_avx_round_ps_256,
+ int_x86_avx_round_pd_256>, VEX;
+ defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
+ int_x86_sse41_round_ss,
+ int_x86_sse41_round_sd, 0>, VEX_4V;
+
+ // Instructions for the assembler
+ defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
+ VEX;
+ defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
+ VEX;
+ defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
+}
+
+defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
+ memopv4f32, memopv2f64,
+ int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
+let Constraints = "$src1 = $dst" in
+defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
+ int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
+
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Packed Bit Test
+//===----------------------------------------------------------------------===//
// ptest instruction we'll lower to this in X86ISelLowering primarily from
// the intel intrinsic that corresponds to this.
+let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
+def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
+ "vptest\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
+ OpSize, VEX;
+def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
+ "vptest\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
+ OpSize, VEX;
+
+def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
+ "vptest\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
+ OpSize, VEX;
+def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
+ "vptest\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
+ OpSize, VEX;
+}
+
let Defs = [EFLAGS] in {
def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "ptest \t{$src2, $src1|$src1, $src2}",
- [(X86ptest VR128:$src1, VR128:$src2),
- (implicit EFLAGS)]>, OpSize;
-def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
- "ptest \t{$src2, $src1|$src1, $src2}",
- [(X86ptest VR128:$src1, (load addr:$src2)),
- (implicit EFLAGS)]>, OpSize;
+ "ptest \t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
+ OpSize;
+def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
+ "ptest \t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
+ OpSize;
+}
+
+// The bit test instructions below are AVX only
+multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
+ def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
+ [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
+ def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
+ [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
+ OpSize, VEX;
+}
+
+let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
+defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
+defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
+defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Misc Instructions
+//===----------------------------------------------------------------------===//
+
+// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
+multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId128> {
+ def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
+ def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins i128mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst,
+ (IntId128
+ (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
+ int_x86_sse41_phminposuw>, VEX;
+defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
+ int_x86_sse41_phminposuw>;
+
+/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
+multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId128, bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
+ def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ let isCommutable = 0 in
+ defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
+ 0>, VEX_4V;
+ defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
+ 0>, VEX_4V;
+ defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
+ 0>, VEX_4V;
+ defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
+ 0>, VEX_4V;
+ defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
+ 0>, VEX_4V;
+ defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
+ 0>, VEX_4V;
+ defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
+ 0>, VEX_4V;
+ defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
+ 0>, VEX_4V;
+ defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
+ 0>, VEX_4V;
+ defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
+ 0>, VEX_4V;
+ defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
+ 0>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ let isCommutable = 0 in
+ defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
+ defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
+ defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
+ defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
+ defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
+ defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
+ defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
+ defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
+ defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
+ defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
+ defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
+}
+
+def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
+ (PCMPEQQrr VR128:$src1, VR128:$src2)>;
+def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
+ (PCMPEQQrm VR128:$src1, addr:$src2)>;
+
+/// SS48I_binop_rm - Simple SSE41 binary operator.
+multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType OpVT, bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
+ OpSize;
+ def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpNode VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2))))]>,
+ OpSize;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
+
+/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
+multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
+ X86MemOperand x86memop, bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
+ OpSize;
+ def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set RC:$dst,
+ (IntId RC:$src1,
+ (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
+ OpSize;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ let isCommutable = 0 in {
+ defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
+ VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
+ VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
+ int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
+ defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
+ int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
+ defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
+ VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
+ VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ }
+ defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
+ VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
+ VR128, memopv16i8, i128mem, 0>, VEX_4V;
+ defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
+ VR256, memopv32i8, i256mem, 0>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ let isCommutable = 0 in {
+ defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
+ VR128, memopv16i8, i128mem>;
+ defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
+ VR128, memopv16i8, i128mem>;
+ defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
+ VR128, memopv16i8, i128mem>;
+ defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
+ VR128, memopv16i8, i128mem>;
+ }
+ defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
+ VR128, memopv16i8, i128mem>;
+ defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
+ VR128, memopv16i8, i128mem>;
+}
+
+/// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
+ RegisterClass RC, X86MemOperand x86memop,
+ PatFrag mem_frag, Intrinsic IntId> {
+ def rr : I<opc, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2, RC:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
+ SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
+
+ def rm : I<opc, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop:$src2, RC:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set RC:$dst,
+ (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
+ RC:$src3))],
+ SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
+}
+}
+
+defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
+ memopv16i8, int_x86_sse41_blendvpd>;
+defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
+ memopv16i8, int_x86_sse41_blendvps>;
+defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
+ memopv16i8, int_x86_sse41_pblendvb>;
+defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
+ memopv32i8, int_x86_avx_blendv_pd_256>;
+defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
+ memopv32i8, int_x86_avx_blendv_ps_256>;
+
+/// SS41I_ternary_int - SSE 4.1 ternary operator
+let Uses = [XMM0], Constraints = "$src1 = $dst" in {
+ multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
+ def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr,
+ "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
+ [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
+ OpSize;
+
+ def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
+ [(set VR128:$dst,
+ (IntId VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
+ }
+}
+
+defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
+defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
+defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vmovntdqa\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
+ OpSize, VEX;
def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movntdqa\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
OpSize;
-
//===----------------------------------------------------------------------===//
-// SSE4.2 Instructions
+// SSE4.2 - Compare Instructions
//===----------------------------------------------------------------------===//
/// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
-let Constraints = "$src1 = $dst" in {
- multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Commutable = 0> {
- def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
- }
+multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId128, bit Is2Addr = 1> {
+ def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
+ OpSize;
+ def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
}
-defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
+ 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
(PCMPGTQrr VR128:$src1, VR128:$src2)>;
def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
(PCMPGTQrm VR128:$src1, addr:$src2)>;
-// crc intrinsic instruction
-// This set of instructions are only rm, the only difference is the size
-// of r and m.
-let Constraints = "$src1 = $dst" in {
- def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i8mem:$src2),
- "crc32 \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_8 GR32:$src1,
- (load addr:$src2)))]>, OpSize;
- def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR8:$src2),
- "crc32 \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>,
- OpSize;
- def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i16mem:$src2),
- "crc32 \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_16 GR32:$src1,
- (load addr:$src2)))]>,
- OpSize;
- def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR16:$src2),
- "crc32 \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
- OpSize;
- def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
- (ins GR32:$src1, i32mem:$src2),
- "crc32 \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_32 GR32:$src1,
- (load addr:$src2)))]>, OpSize;
- def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
- (ins GR32:$src1, GR32:$src2),
- "crc32 \t{$src2, $src1|$src1, $src2}",
- [(set GR32:$dst,
- (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>,
- OpSize;
- def CRC64m64 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
- (ins GR64:$src1, i64mem:$src2),
- "crc32 \t{$src2, $src1|$src1, $src2}",
- [(set GR64:$dst,
- (int_x86_sse42_crc32_64 GR64:$src1,
- (load addr:$src2)))]>,
- OpSize, REX_W;
- def CRC64r64 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
- (ins GR64:$src1, GR64:$src2),
- "crc32 \t{$src2, $src1|$src1, $src2}",
- [(set GR64:$dst,
- (int_x86_sse42_crc32_64 GR64:$src1, GR64:$src2))]>,
- OpSize, REX_W;
+//===----------------------------------------------------------------------===//
+// SSE4.2 - String/text Processing Instructions
+//===----------------------------------------------------------------------===//
+
+// Packed Compare Implicit Length Strings, Return Mask
+multiclass pseudo_pcmpistrm<string asm> {
+ def REG : Ii8<0, Pseudo, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3), !strconcat(asm, "rr PSEUDO"),
+ [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
+ imm:$src3))]>;
+ def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3), !strconcat(asm, "rm PSEUDO"),
+ [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
+ VR128:$src1, (load addr:$src2), imm:$src3))]>;
}
-// String/text processing instructions.
let Defs = [EFLAGS], usesCustomInserter = 1 in {
-def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "#PCMPISTRM128rr PSEUDO!",
- [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
- imm:$src3))]>, OpSize;
-def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "#PCMPISTRM128rm PSEUDO!",
- [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, (load addr:$src2),
- imm:$src3))]>, OpSize;
+ defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
+ defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
+}
+
+let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
+ Predicates = [HasAVX] in {
+ def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
+ def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
}
let Defs = [XMM0, EFLAGS] in {
-def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
-def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
+ def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
+ def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
+}
+
+// Packed Compare Explicit Length Strings, Return Mask
+multiclass pseudo_pcmpestrm<string asm> {
+ def REG : Ii8<0, Pseudo, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5), !strconcat(asm, "rr PSEUDO"),
+ [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
+ VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
+ def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5), !strconcat(asm, "rm PSEUDO"),
+ [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
+ VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
}
let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
-def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
- "#PCMPESTRM128rr PSEUDO!",
- [(set VR128:$dst,
- (int_x86_sse42_pcmpestrm128
- VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize;
-
-def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- "#PCMPESTRM128rm PSEUDO!",
- [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
- VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>,
- OpSize;
+ defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
+ defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX],
+ Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
+ def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
+ def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
}
let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
-def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
- "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
-def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
+ def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
+ def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
}
+// Packed Compare Implicit Length Strings, Return Index
let Defs = [ECX, EFLAGS] in {
- multiclass SS42AI_pcmpistri<Intrinsic IntId128> {
- def rr : SS42AI<0x63, MRMSrcReg, (outs),
+ multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
+ def rr : SS42AI<0x63, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}",
+ !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
[(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
(implicit EFLAGS)]>, OpSize;
def rm : SS42AI<0x63, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}",
+ !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
[(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
(implicit EFLAGS)]>, OpSize;
}
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
+ VEX;
+}
+
defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
@@ -3948,22 +5140,36 @@ defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
-let Defs = [ECX, EFLAGS] in {
-let Uses = [EAX, EDX] in {
- multiclass SS42AI_pcmpestri<Intrinsic IntId128> {
+// Packed Compare Explicit Length Strings, Return Index
+let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
+ multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
def rr : SS42AI<0x61, MRMSrcReg, (outs),
(ins VR128:$src1, VR128:$src3, i8imm:$src5),
- "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}",
+ !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
[(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
(implicit EFLAGS)]>, OpSize;
def rm : SS42AI<0x61, MRMSrcMem, (outs),
(ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}",
- [(set ECX,
+ !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
+ [(set ECX,
(IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
(implicit EFLAGS)]>, OpSize;
}
}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
+ VEX;
}
defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
@@ -3972,3 +5178,773 @@ defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
+
+//===----------------------------------------------------------------------===//
+// SSE4.2 - CRC Instructions
+//===----------------------------------------------------------------------===//
+
+// No CRC instructions have AVX equivalents
+
+// crc intrinsic instruction
+// This set of instructions are only rm, the only difference is the size
+// of r and m.
+let Constraints = "$src1 = $dst" in {
+ def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$src1, i8mem:$src2),
+ "crc32{b} \t{$src2, $src1|$src1, $src2}",
+ [(set GR32:$dst,
+ (int_x86_sse42_crc32_8 GR32:$src1,
+ (load addr:$src2)))]>;
+ def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
+ (ins GR32:$src1, GR8:$src2),
+ "crc32{b} \t{$src2, $src1|$src1, $src2}",
+ [(set GR32:$dst,
+ (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
+ def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$src1, i16mem:$src2),
+ "crc32{w} \t{$src2, $src1|$src1, $src2}",
+ [(set GR32:$dst,
+ (int_x86_sse42_crc32_16 GR32:$src1,
+ (load addr:$src2)))]>,
+ OpSize;
+ def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
+ (ins GR32:$src1, GR16:$src2),
+ "crc32{w} \t{$src2, $src1|$src1, $src2}",
+ [(set GR32:$dst,
+ (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
+ OpSize;
+ def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
+ (ins GR32:$src1, i32mem:$src2),
+ "crc32{l} \t{$src2, $src1|$src1, $src2}",
+ [(set GR32:$dst,
+ (int_x86_sse42_crc32_32 GR32:$src1,
+ (load addr:$src2)))]>;
+ def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
+ (ins GR32:$src1, GR32:$src2),
+ "crc32{l} \t{$src2, $src1|$src1, $src2}",
+ [(set GR32:$dst,
+ (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
+ def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
+ (ins GR64:$src1, i8mem:$src2),
+ "crc32{b} \t{$src2, $src1|$src1, $src2}",
+ [(set GR64:$dst,
+ (int_x86_sse42_crc64_8 GR64:$src1,
+ (load addr:$src2)))]>,
+ REX_W;
+ def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
+ (ins GR64:$src1, GR8:$src2),
+ "crc32{b} \t{$src2, $src1|$src1, $src2}",
+ [(set GR64:$dst,
+ (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
+ REX_W;
+ def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
+ (ins GR64:$src1, i64mem:$src2),
+ "crc32{q} \t{$src2, $src1|$src1, $src2}",
+ [(set GR64:$dst,
+ (int_x86_sse42_crc64_64 GR64:$src1,
+ (load addr:$src2)))]>,
+ REX_W;
+ def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
+ (ins GR64:$src1, GR64:$src2),
+ "crc32{q} \t{$src2, $src1|$src1, $src2}",
+ [(set GR64:$dst,
+ (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
+ REX_W;
+}
+
+//===----------------------------------------------------------------------===//
+// AES-NI Instructions
+//===----------------------------------------------------------------------===//
+
+multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId128, bit Is2Addr = 1> {
+ def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
+ OpSize;
+ def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
+}
+
+// Perform One Round of an AES Encryption/Decryption Flow
+let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
+ defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
+ int_x86_aesni_aesenc, 0>, VEX_4V;
+ defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
+ int_x86_aesni_aesenclast, 0>, VEX_4V;
+ defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
+ int_x86_aesni_aesdec, 0>, VEX_4V;
+ defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
+ int_x86_aesni_aesdeclast, 0>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
+ int_x86_aesni_aesenc>;
+ defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
+ int_x86_aesni_aesenclast>;
+ defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
+ int_x86_aesni_aesdec>;
+ defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
+ int_x86_aesni_aesdeclast>;
+}
+
+def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
+ (AESENCrr VR128:$src1, VR128:$src2)>;
+def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
+ (AESENCrm VR128:$src1, addr:$src2)>;
+def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
+ (AESENCLASTrr VR128:$src1, VR128:$src2)>;
+def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
+ (AESENCLASTrm VR128:$src1, addr:$src2)>;
+def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
+ (AESDECrr VR128:$src1, VR128:$src2)>;
+def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
+ (AESDECrm VR128:$src1, addr:$src2)>;
+def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
+ (AESDECLASTrr VR128:$src1, VR128:$src2)>;
+def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
+ (AESDECLASTrm VR128:$src1, addr:$src2)>;
+
+// Perform the AES InvMixColumn Transformation
+let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
+ def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1),
+ "vaesimc\t{$src1, $dst|$dst, $src1}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aesimc VR128:$src1))]>,
+ OpSize, VEX;
+ def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
+ (ins i128mem:$src1),
+ "vaesimc\t{$src1, $dst|$dst, $src1}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
+ OpSize, VEX;
+}
+def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1),
+ "aesimc\t{$src1, $dst|$dst, $src1}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aesimc VR128:$src1))]>,
+ OpSize;
+def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
+ (ins i128mem:$src1),
+ "aesimc\t{$src1, $dst|$dst, $src1}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
+ OpSize;
+
+// AES Round Key Generation Assist
+let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
+ def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, i8imm:$src2),
+ "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
+ OpSize, VEX;
+ def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
+ (ins i128mem:$src1, i8imm:$src2),
+ "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
+ imm:$src2))]>,
+ OpSize, VEX;
+}
+def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, i8imm:$src2),
+ "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
+ OpSize;
+def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
+ (ins i128mem:$src1, i8imm:$src2),
+ "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
+ imm:$src2))]>,
+ OpSize;
+
+//===----------------------------------------------------------------------===//
+// CLMUL Instructions
+//===----------------------------------------------------------------------===//
+
+// Only the AVX version of CLMUL instructions are described here.
+
+// Carry-less Multiplication instructions
+let isAsmParserOnly = 1 in {
+def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>;
+
+def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>;
+
+// Assembler Only
+multiclass avx_vpclmul<string asm> {
+ def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+
+ def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+}
+defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
+defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
+defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
+defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
+
+} // isAsmParserOnly
+
+//===----------------------------------------------------------------------===//
+// AVX Instructions
+//===----------------------------------------------------------------------===//
+
+let isAsmParserOnly = 1 in {
+
+// Load from memory and broadcast to all elements of the destination operand
+class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ X86MemOperand x86memop, Intrinsic Int> :
+ AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (Int addr:$src))]>, VEX;
+
+def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
+ int_x86_avx_vbroadcastss>;
+def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
+ int_x86_avx_vbroadcastss_256>;
+def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
+ int_x86_avx_vbroadcast_sd_256>;
+def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
+ int_x86_avx_vbroadcastf128_pd_256>;
+
+// Insert packed floating-point values
+def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR128:$src2, i8imm:$src3),
+ "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, VEX_4V;
+def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
+ "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, VEX_4V;
+
+// Extract packed floating-point values
+def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
+ (ins VR256:$src1, i8imm:$src2),
+ "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, VEX;
+def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
+ "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, VEX;
+
+// Conditional SIMD Packed Loads and Stores
+multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
+ Intrinsic IntLd, Intrinsic IntLd256,
+ Intrinsic IntSt, Intrinsic IntSt256,
+ PatFrag pf128, PatFrag pf256> {
+ def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
+ VEX_4V;
+ def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f256mem:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
+ VEX_4V;
+ def mr : AVX8I<opc_mr, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
+ def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR256:$src1, VR256:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
+}
+
+defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
+ int_x86_avx_maskload_ps,
+ int_x86_avx_maskload_ps_256,
+ int_x86_avx_maskstore_ps,
+ int_x86_avx_maskstore_ps_256,
+ memopv4f32, memopv8f32>;
+defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
+ int_x86_avx_maskload_pd,
+ int_x86_avx_maskload_pd_256,
+ int_x86_avx_maskstore_pd,
+ int_x86_avx_maskstore_pd_256,
+ memopv2f64, memopv4f64>;
+
+// Permute Floating-Point Values
+multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
+ RegisterClass RC, X86MemOperand x86memop_f,
+ X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
+ Intrinsic IntVar, Intrinsic IntImm> {
+ def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
+ def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
+ (ins RC:$src1, x86memop_i:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
+
+ def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
+ (ins RC:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
+ def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
+ (ins x86memop_f:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
+}
+
+defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
+ memopv4f32, memopv4i32,
+ int_x86_avx_vpermilvar_ps,
+ int_x86_avx_vpermil_ps>;
+defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
+ memopv8f32, memopv8i32,
+ int_x86_avx_vpermilvar_ps_256,
+ int_x86_avx_vpermil_ps_256>;
+defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
+ memopv2f64, memopv2i64,
+ int_x86_avx_vpermilvar_pd,
+ int_x86_avx_vpermil_pd>;
+defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
+ memopv4f64, memopv4i64,
+ int_x86_avx_vpermilvar_pd_256,
+ int_x86_avx_vpermil_pd_256>;
+
+def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
+ (ins VR256:$src1, VR256:$src2, i8imm:$src3),
+ "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, VEX_4V;
+def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
+ (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
+ "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ []>, VEX_4V;
+
+// Zero All YMM registers
+def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
+ [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
+
+// Zero Upper bits of YMM registers
+def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
+ [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
+
+} // isAsmParserOnly
+
+def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
+ (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
+def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
+ (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
+def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
+ (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
+
+def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
+ (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
+def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
+ (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
+def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
+ (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
+
+def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
+ (VBROADCASTF128 addr:$src)>;
+
+def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
+ (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
+def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
+ (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
+def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
+ (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
+
+def : Pat<(int_x86_avx_vperm2f128_ps_256
+ VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
+def : Pat<(int_x86_avx_vperm2f128_pd_256
+ VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
+def : Pat<(int_x86_avx_vperm2f128_si_256
+ VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
+ (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
+
+//===----------------------------------------------------------------------===//
+// SSE Shuffle pattern fragments
+//===----------------------------------------------------------------------===//
+
+// This is part of a "work in progress" refactoring. The idea is that all
+// vector shuffles are going to be translated into target specific nodes and
+// directly matched by the patterns below (which can be changed along the way)
+// The AVX version of some but not all of them are described here, and more
+// should come in a near future.
+
+// Shuffle with PSHUFD instruction folding loads. The first two patterns match
+// SSE2 loads, which are always promoted to v2i64. The last one should match
+// the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
+// in SSE2, how does it ever worked? Anyway, the pattern will remain here until
+// we investigate further.
+def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
+ (i8 imm:$imm))),
+ (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
+ (i8 imm:$imm))),
+ (PSHUFDmi addr:$src1, imm:$imm)>;
+def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
+ (i8 imm:$imm))),
+ (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
+
+// Shuffle with PSHUFD instruction.
+def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
+ (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
+ (PSHUFDri VR128:$src1, imm:$imm)>;
+
+def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
+ (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
+ (PSHUFDri VR128:$src1, imm:$imm)>;
+
+// Shuffle with SHUFPD instruction.
+def : Pat<(v2f64 (X86Shufps VR128:$src1,
+ (memopv2f64 addr:$src2), (i8 imm:$imm))),
+ (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v2f64 (X86Shufps VR128:$src1,
+ (memopv2f64 addr:$src2), (i8 imm:$imm))),
+ (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
+
+def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
+
+def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
+
+// Shuffle with SHUFPS instruction.
+def : Pat<(v4f32 (X86Shufps VR128:$src1,
+ (memopv4f32 addr:$src2), (i8 imm:$imm))),
+ (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v4f32 (X86Shufps VR128:$src1,
+ (memopv4f32 addr:$src2), (i8 imm:$imm))),
+ (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
+
+def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
+
+def : Pat<(v4i32 (X86Shufps VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
+ (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v4i32 (X86Shufps VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
+ (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
+
+def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
+def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
+
+// Shuffle with MOVHLPS instruction
+def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
+ (MOVHLPSrr VR128:$src1, VR128:$src2)>;
+def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
+ (MOVHLPSrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with MOVDDUP instruction
+def : Pat<(X86Movddup (memopv2f64 addr:$src)),
+ (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
+def : Pat<(X86Movddup (memopv2f64 addr:$src)),
+ (MOVDDUPrm addr:$src)>;
+
+def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
+ (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
+def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
+ (MOVDDUPrm addr:$src)>;
+
+def : Pat<(X86Movddup (memopv2i64 addr:$src)),
+ (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
+def : Pat<(X86Movddup (memopv2i64 addr:$src)),
+ (MOVDDUPrm addr:$src)>;
+
+def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
+ (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
+def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
+ (MOVDDUPrm addr:$src)>;
+
+def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
+ (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
+def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
+ (MOVDDUPrm addr:$src)>;
+
+def : Pat<(X86Movddup (bc_v2f64
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
+ (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
+def : Pat<(X86Movddup (bc_v2f64
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
+ (MOVDDUPrm addr:$src)>;
+
+// Shuffle with UNPCKLPS
+def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
+ (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
+def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
+ (UNPCKLPSrm VR128:$src1, addr:$src2)>;
+
+def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
+ (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
+def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
+ (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with UNPCKHPS
+def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
+ (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
+def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
+ (UNPCKHPSrm VR128:$src1, addr:$src2)>;
+
+def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
+ (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
+def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
+ (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with UNPCKLPD
+def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
+ (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
+def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
+ (UNPCKLPSrm VR128:$src1, addr:$src2)>;
+
+def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
+ (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
+def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
+ (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with UNPCKHPD
+def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
+ (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
+def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
+ (UNPCKLPSrm VR128:$src1, addr:$src2)>;
+
+def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
+ (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
+def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
+ (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with PUNPCKLBW
+def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
+ (bc_v16i8 (memopv2i64 addr:$src2)))),
+ (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
+def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
+ (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with PUNPCKLWD
+def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
+ (bc_v8i16 (memopv2i64 addr:$src2)))),
+ (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
+def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
+ (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with PUNPCKLDQ
+def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2)))),
+ (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
+def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
+ (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with PUNPCKLQDQ
+def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
+ (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
+def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
+ (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with PUNPCKHBW
+def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
+ (bc_v16i8 (memopv2i64 addr:$src2)))),
+ (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
+def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
+ (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with PUNPCKHWD
+def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
+ (bc_v8i16 (memopv2i64 addr:$src2)))),
+ (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
+def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
+ (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with PUNPCKHDQ
+def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2)))),
+ (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
+def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
+ (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with PUNPCKHQDQ
+def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
+ (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
+def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
+ (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
+
+// Shuffle with MOVLHPS
+def : Pat<(X86Movlhps VR128:$src1,
+ (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
+ (MOVHPSrm VR128:$src1, addr:$src2)>;
+def : Pat<(X86Movlhps VR128:$src1,
+ (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
+ (MOVHPSrm VR128:$src1, addr:$src2)>;
+def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
+ (MOVLHPSrr VR128:$src1, VR128:$src2)>;
+def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
+ (MOVLHPSrr VR128:$src1, VR128:$src2)>;
+def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
+ (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
+
+// Shuffle with MOVLHPD
+def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
+ (scalar_to_vector (loadf64 addr:$src2)))),
+ (MOVHPDrm VR128:$src1, addr:$src2)>;
+// FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
+// is during lowering, where it's not possible to recognize the load fold cause
+// it has two uses through a bitcast. One use disappears at isel time and the
+// fold opportunity reappears.
+def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
+ (scalar_to_vector (loadf64 addr:$src2)))),
+ (MOVHPDrm VR128:$src1, addr:$src2)>;
+
+// Shuffle with MOVSS
+def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
+ (MOVSSrr VR128:$src1, FR32:$src2)>;
+def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
+ (MOVSSrr (v4i32 VR128:$src1),
+ (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
+def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
+ (MOVSSrr (v4f32 VR128:$src1),
+ (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
+// FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
+// is during lowering, where it's not possible to recognize the load fold cause
+// it has two uses through a bitcast. One use disappears at isel time and the
+// fold opportunity reappears.
+def : Pat<(X86Movss VR128:$src1,
+ (bc_v4i32 (v2i64 (load addr:$src2)))),
+ (MOVLPSrm VR128:$src1, addr:$src2)>;
+
+// Shuffle with MOVSD
+def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
+ (MOVSDrr VR128:$src1, FR64:$src2)>;
+def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
+ (MOVSDrr (v2i64 VR128:$src1),
+ (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
+def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
+ (MOVSDrr (v2f64 VR128:$src1),
+ (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
+def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
+def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
+ (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
+
+// Shuffle with MOVSHDUP
+def : Pat<(v4i32 (X86Movshdup VR128:$src)),
+ (MOVSHDUPrr VR128:$src)>;
+def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
+ (MOVSHDUPrm addr:$src)>;
+
+def : Pat<(v4f32 (X86Movshdup VR128:$src)),
+ (MOVSHDUPrr VR128:$src)>;
+def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
+ (MOVSHDUPrm addr:$src)>;
+
+// Shuffle with MOVSLDUP
+def : Pat<(v4i32 (X86Movsldup VR128:$src)),
+ (MOVSLDUPrr VR128:$src)>;
+def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
+ (MOVSLDUPrm addr:$src)>;
+
+def : Pat<(v4f32 (X86Movsldup VR128:$src)),
+ (MOVSLDUPrr VR128:$src)>;
+def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
+ (MOVSLDUPrm addr:$src)>;
+
+// Shuffle with PSHUFHW
+def : Pat<(v8i16 (X86PShufhwLd addr:$src, (i8 imm:$imm))),
+ (PSHUFHWmi addr:$src, imm:$imm)>;
+def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
+ (PSHUFHWri VR128:$src, imm:$imm)>;
+def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
+ (PSHUFHWmi addr:$src, imm:$imm)>;
+
+// Shuffle with PSHUFLW
+def : Pat<(v8i16 (X86PShuflwLd addr:$src, (i8 imm:$imm))),
+ (PSHUFLWmi addr:$src, imm:$imm)>;
+def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
+ (PSHUFLWri VR128:$src, imm:$imm)>;
+def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
+ (PSHUFLWmi addr:$src, imm:$imm)>;
+
+// Shuffle with PALIGN
+def : Pat<(v1i64 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
+ (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
+def : Pat<(v2i32 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
+ (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
+def : Pat<(v4i16 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
+ (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
+def : Pat<(v8i8 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))),
+ (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>;
+
+def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
+def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
+def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
+def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
+ (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
+
+// Shuffle with MOVLPS
+def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
+ (MOVLPSrm VR128:$src1, addr:$src2)>;
+def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
+ (MOVLPSrm VR128:$src1, addr:$src2)>;
+def : Pat<(X86Movlps VR128:$src1,
+ (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
+ (MOVLPSrm VR128:$src1, addr:$src2)>;
+
+// Shuffle with MOVLPD
+def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
+ (MOVLPDrm VR128:$src1, addr:$src2)>;
+def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
+ (MOVLPDrm VR128:$src1, addr:$src2)>;
+def : Pat<(v2f64 (X86Movlpd VR128:$src1,
+ (scalar_to_vector (loadf64 addr:$src2)))),
+ (MOVLPDrm VR128:$src1, addr:$src2)>;
+
+// Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
+def : Pat<(store (f64 (vector_extract
+ (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
+ (MOVHPSmr addr:$dst, VR128:$src)>;
+def : Pat<(store (f64 (vector_extract
+ (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
+ (MOVHPDmr addr:$dst, VR128:$src)>;
+
+def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
+ (MOVLPSmr addr:$src1, VR128:$src2)>;
+def : Pat<(store (v4i32 (X86Movlps
+ (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
+ (MOVLPSmr addr:$src1, VR128:$src2)>;
+
+def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
+ (MOVLPDmr addr:$src1, VR128:$src2)>;
+def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
+ (MOVLPDmr addr:$src1, VR128:$src2)>;
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86JITInfo.cpp b/libclamav/c++/llvm/lib/Target/X86/X86JITInfo.cpp
index d297d24..6f0a8d9 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86JITInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86JITInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/Function.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/System/Valgrind.h"
#include <cstdlib>
#include <cstring>
using namespace llvm;
@@ -37,6 +38,10 @@ void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
unsigned NewAddr = (intptr_t)New;
unsigned OldAddr = (intptr_t)OldWord;
*OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code.
+
+ // X86 doesn't need to invalidate the processor cache, so just invalidate
+ // Valgrind's cache directly.
+ sys::ValgrindDiscardTranslations(Old, 5);
}
@@ -393,8 +398,10 @@ X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) {
*(intptr_t *)(RetAddr - 0xa) = NewVal;
((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6));
}
+ sys::ValgrindDiscardTranslations((void*)(RetAddr-0xc), 0xd);
#else
((unsigned char*)RetAddr)[-1] = 0xE9;
+ sys::ValgrindDiscardTranslations((void*)(RetAddr-1), 5);
#endif
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86MCAsmInfo.cpp b/libclamav/c++/llvm/lib/Target/X86/X86MCAsmInfo.cpp
index ded9717..36badb4 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86MCAsmInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86MCAsmInfo.cpp
@@ -14,6 +14,7 @@
#include "X86MCAsmInfo.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
@@ -68,7 +69,6 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &Triple) {
// Exceptions handling
ExceptionsType = ExceptionHandling::Dwarf;
- AbsoluteEHSectionOffsets = false;
}
X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
@@ -85,12 +85,10 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
HasLEB128 = true; // Target asm supports leb128 directives (little-endian)
// Debug Information
- AbsoluteDebugSectionOffsets = true;
SupportsDebugInformation = true;
// Exceptions handling
ExceptionsType = ExceptionHandling::Dwarf;
- AbsoluteEHSectionOffsets = false;
// OpenBSD has buggy support for .quad in 32-bit mode, just split into two
// .words.
@@ -98,12 +96,16 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
Data64bitsDirective = 0;
}
-MCSection *X86ELFMCAsmInfo::getNonexecutableStackSection(MCContext &Ctx) const {
- return MCSectionELF::Create(".note.GNU-stack", MCSectionELF::SHT_PROGBITS,
- 0, SectionKind::getMetadata(), false, Ctx);
+const MCSection *X86ELFMCAsmInfo::
+getNonexecutableStackSection(MCContext &Ctx) const {
+ return Ctx.getELFSection(".note.GNU-stack", MCSectionELF::SHT_PROGBITS,
+ 0, SectionKind::getMetadata(), false);
}
X86MCAsmInfoCOFF::X86MCAsmInfoCOFF(const Triple &Triple) {
+ if (Triple.getArch() == Triple::x86_64)
+ GlobalPrefix = "";
+
AsmTransCBE = x86_asm_table;
AssemblerDialect = AsmWriterFlavor;
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86MCAsmInfo.h b/libclamav/c++/llvm/lib/Target/X86/X86MCAsmInfo.h
index 69716bf..5815225 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86MCAsmInfo.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86MCAsmInfo.h
@@ -27,7 +27,7 @@ namespace llvm {
struct X86ELFMCAsmInfo : public MCAsmInfo {
explicit X86ELFMCAsmInfo(const Triple &Triple);
- virtual MCSection *getNonexecutableStackSection(MCContext &Ctx) const;
+ virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const;
};
struct X86MCAsmInfoCOFF : public MCAsmInfoCOFF {
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86MCCodeEmitter.cpp b/libclamav/c++/llvm/lib/Target/X86/X86MCCodeEmitter.cpp
index 3f18696..9564fe0 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86MCCodeEmitter.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86MCCodeEmitter.cpp
@@ -30,7 +30,7 @@ class X86MCCodeEmitter : public MCCodeEmitter {
MCContext &Ctx;
bool Is64BitMode;
public:
- X86MCCodeEmitter(TargetMachine &tm, MCContext &ctx, bool is64Bit)
+ X86MCCodeEmitter(TargetMachine &tm, MCContext &ctx, bool is64Bit)
: TM(tm), TII(*TM.getInstrInfo()), Ctx(ctx) {
Is64BitMode = is64Bit;
}
@@ -38,16 +38,18 @@ public:
~X86MCCodeEmitter() {}
unsigned getNumFixupKinds() const {
- return 3;
+ return 5;
}
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
const static MCFixupKindInfo Infos[] = {
- { "reloc_pcrel_4byte", 0, 4 * 8 },
- { "reloc_pcrel_1byte", 0, 1 * 8 },
- { "reloc_riprel_4byte", 0, 4 * 8 }
+ { "reloc_pcrel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
+ { "reloc_pcrel_1byte", 0, 1 * 8, MCFixupKindInfo::FKF_IsPCRel },
+ { "reloc_pcrel_2byte", 0, 2 * 8, MCFixupKindInfo::FKF_IsPCRel },
+ { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
+ { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel }
};
-
+
if (Kind < FirstTargetFixupKind)
return MCCodeEmitter::getFixupKindInfo(Kind);
@@ -55,16 +57,38 @@ public:
"Invalid kind!");
return Infos[Kind - FirstTargetFixupKind];
}
-
+
static unsigned GetX86RegNum(const MCOperand &MO) {
return X86RegisterInfo::getX86RegNum(MO.getReg());
}
-
+
+ // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
+ // 0-7 and the difference between the 2 groups is given by the REX prefix.
+ // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
+ // in 1's complement form, example:
+ //
+ // ModRM field => XMM9 => 1
+ // VEX.VVVV => XMM9 => ~9
+ //
+ // See table 4-35 of Intel AVX Programming Reference for details.
+ static unsigned char getVEXRegisterEncoding(const MCInst &MI,
+ unsigned OpNum) {
+ unsigned SrcReg = MI.getOperand(OpNum).getReg();
+ unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
+ if ((SrcReg >= X86::XMM8 && SrcReg <= X86::XMM15) ||
+ (SrcReg >= X86::YMM8 && SrcReg <= X86::YMM15))
+ SrcRegNum += 8;
+
+ // The registers represented through VEX_VVVV should
+ // be encoded in 1's complement form.
+ return (~SrcRegNum) & 0xf;
+ }
+
void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
OS << (char)C;
++CurByte;
}
-
+
void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
raw_ostream &OS) const {
// Output the constant in little endian byte order.
@@ -74,38 +98,49 @@ public:
}
}
- void EmitImmediate(const MCOperand &Disp,
+ void EmitImmediate(const MCOperand &Disp,
unsigned ImmSize, MCFixupKind FixupKind,
unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
int ImmOffset = 0) const;
-
+
inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
unsigned RM) {
assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
return RM | (RegOpcode << 3) | (Mod << 6);
}
-
+
void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
unsigned &CurByte, raw_ostream &OS) const {
EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
}
-
+
void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
unsigned &CurByte, raw_ostream &OS) const {
// SIB byte is in the same format as the ModRMByte.
EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
}
-
-
+
+
void EmitMemModRMByte(const MCInst &MI, unsigned Op,
- unsigned RegOpcodeField,
- unsigned TSFlags, unsigned &CurByte, raw_ostream &OS,
+ unsigned RegOpcodeField,
+ uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const;
-
+
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const;
-
+
+ void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
+ const MCInst &MI, const TargetInstrDesc &Desc,
+ raw_ostream &OS) const;
+
+ void EmitSegmentOverridePrefix(uint64_t TSFlags, unsigned &CurByte,
+ int MemOperand, const MCInst &MI,
+ raw_ostream &OS) const;
+
+ void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
+ const MCInst &MI, const TargetInstrDesc &Desc,
+ raw_ostream &OS) const;
};
} // end anonymous namespace
@@ -123,24 +158,23 @@ MCCodeEmitter *llvm::createX86_64MCCodeEmitter(const Target &,
return new X86MCCodeEmitter(TM, Ctx, true);
}
-
-/// isDisp8 - Return true if this signed displacement fits in a 8-bit
-/// sign-extended field.
+/// isDisp8 - Return true if this signed displacement fits in a 8-bit
+/// sign-extended field.
static bool isDisp8(int Value) {
return Value == (signed char)Value;
}
/// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
/// in an instruction with the specified TSFlags.
-static MCFixupKind getImmFixupKind(unsigned TSFlags) {
+static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
unsigned Size = X86II::getSizeOfImm(TSFlags);
bool isPCRel = X86II::isImmPCRel(TSFlags);
-
+
switch (Size) {
default: assert(0 && "Unknown immediate size");
case 1: return isPCRel ? MCFixupKind(X86::reloc_pcrel_1byte) : FK_Data_1;
+ case 2: return isPCRel ? MCFixupKind(X86::reloc_pcrel_2byte) : FK_Data_2;
case 4: return isPCRel ? MCFixupKind(X86::reloc_pcrel_4byte) : FK_Data_4;
- case 2: assert(!isPCRel); return FK_Data_2;
case 8: assert(!isPCRel); return FK_Data_8;
}
}
@@ -161,28 +195,30 @@ EmitImmediate(const MCOperand &DispOp, unsigned Size, MCFixupKind FixupKind,
// If we have an immoffset, add it to the expression.
const MCExpr *Expr = DispOp.getExpr();
-
+
// If the fixup is pc-relative, we need to bias the value to be relative to
// the start of the field, not the end of the field.
if (FixupKind == MCFixupKind(X86::reloc_pcrel_4byte) ||
- FixupKind == MCFixupKind(X86::reloc_riprel_4byte))
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
ImmOffset -= 4;
+ if (FixupKind == MCFixupKind(X86::reloc_pcrel_2byte))
+ ImmOffset -= 2;
if (FixupKind == MCFixupKind(X86::reloc_pcrel_1byte))
ImmOffset -= 1;
-
+
if (ImmOffset)
Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(ImmOffset, Ctx),
Ctx);
-
+
// Emit a symbolic constant as a fixup and 4 zeros.
Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind));
EmitConstant(0, Size, CurByte, OS);
}
-
void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
unsigned RegOpcodeField,
- unsigned TSFlags, unsigned &CurByte,
+ uint64_t TSFlags, unsigned &CurByte,
raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const{
const MCOperand &Disp = MI.getOperand(Op+3);
@@ -190,34 +226,43 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
const MCOperand &Scale = MI.getOperand(Op+1);
const MCOperand &IndexReg = MI.getOperand(Op+2);
unsigned BaseReg = Base.getReg();
-
+
// Handle %rip relative addressing.
if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
- assert(IndexReg.getReg() == 0 && Is64BitMode &&
- "Invalid rip-relative address");
+ assert(Is64BitMode && "Rip-relative addressing requires 64-bit mode");
+ assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
-
+
+ unsigned FixupKind = X86::reloc_riprel_4byte;
+
+ // movq loads are handled with a special relocation form which allows the
+ // linker to eliminate some loads for GOT references which end up in the
+ // same linkage unit.
+ if (MI.getOpcode() == X86::MOV64rm ||
+ MI.getOpcode() == X86::MOV64rm_TC)
+ FixupKind = X86::reloc_riprel_4byte_movq_load;
+
// rip-relative addressing is actually relative to the *next* instruction.
// Since an immediate can follow the mod/rm byte for an instruction, this
// means that we need to bias the immediate field of the instruction with
// the size of the immediate field. If we have this case, add it into the
// expression to emit.
int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
-
- EmitImmediate(Disp, 4, MCFixupKind(X86::reloc_riprel_4byte),
+
+ EmitImmediate(Disp, 4, MCFixupKind(FixupKind),
CurByte, OS, Fixups, -ImmSize);
return;
}
-
+
unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
-
+
// Determine whether a SIB byte is needed.
- // If no BaseReg, issue a RIP relative instruction only if the MCE can
+ // If no BaseReg, issue a RIP relative instruction only if the MCE can
// resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
// 2-7) and absolute references.
if (// The SIB byte must be used if there is an index register.
- IndexReg.getReg() == 0 &&
+ IndexReg.getReg() == 0 &&
// The SIB byte must be used if the base is ESP/RSP/R12, all of which
// encode to an R/M value of 4, which indicates that a SIB byte is
// present.
@@ -231,7 +276,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
return;
}
-
+
// If the base is not EBP/ESP and there is no displacement, use simple
// indirect register encoding, this handles addresses like [EAX]. The
// encoding for [EBP] with no displacement means [disp32] so we handle it
@@ -240,24 +285,24 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
return;
}
-
+
// Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
if (Disp.isImm() && isDisp8(Disp.getImm())) {
EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
return;
}
-
+
// Otherwise, emit the most general non-SIB encoding: [REG+disp32]
EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
return;
}
-
+
// We need a SIB byte, so start by outputting the ModR/M byte first
assert(IndexReg.getReg() != X86::ESP &&
IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
-
+
bool ForceDisp32 = false;
bool ForceDisp8 = false;
if (BaseReg == 0) {
@@ -269,7 +314,10 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Emit the normal disp32 encoding.
EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
ForceDisp32 = true;
- } else if (Disp.getImm() == 0 && BaseReg != X86::EBP) {
+ } else if (Disp.getImm() == 0 &&
+ // Base reg can't be anything that ends up with '5' as the base
+ // reg, it is the magic [*] nomenclature that indicates no base.
+ BaseRegNo != N86::EBP) {
// Emit no displacement ModR/M byte
EmitByte(ModRMByte(0, RegOpcodeField, 4), CurByte, OS);
} else if (isDisp8(Disp.getImm())) {
@@ -280,13 +328,13 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Emit the normal disp32 encoding.
EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
}
-
+
// Calculate what the SS field value should be...
static const unsigned SSTable[] = { ~0, 0, 1, ~0, 2, ~0, ~0, ~0, 3 };
unsigned SS = SSTable[Scale.getImm()];
-
+
if (BaseReg == 0) {
- // Handle the SIB byte for the case where there is no base, see Intel
+ // Handle the SIB byte for the case where there is no base, see Intel
// Manual 2A, table 2-7. The displacement has already been output.
unsigned IndexRegNo;
if (IndexReg.getReg())
@@ -302,7 +350,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
}
-
+
// Do we need to output a displacement?
if (ForceDisp8)
EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
@@ -310,26 +358,227 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
}
+/// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
+/// called VEX.
+void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+ int MemOperand, const MCInst &MI,
+ const TargetInstrDesc &Desc,
+ raw_ostream &OS) const {
+ bool HasVEX_4V = false;
+ if ((TSFlags >> 32) & X86II::VEX_4V)
+ HasVEX_4V = true;
+
+ // VEX_R: opcode externsion equivalent to REX.R in
+ // 1's complement (inverted) form
+ //
+ // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
+ // 0: Same as REX_R=1 (64 bit mode only)
+ //
+ unsigned char VEX_R = 0x1;
+
+ // VEX_X: equivalent to REX.X, only used when a
+ // register is used for index in SIB Byte.
+ //
+ // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
+ // 0: Same as REX.X=1 (64-bit mode only)
+ unsigned char VEX_X = 0x1;
+
+ // VEX_B:
+ //
+ // 1: Same as REX_B=0 (ignored in 32-bit mode)
+ // 0: Same as REX_B=1 (64 bit mode only)
+ //
+ unsigned char VEX_B = 0x1;
+
+ // VEX_W: opcode specific (use like REX.W, or used for
+ // opcode extension, or ignored, depending on the opcode byte)
+ unsigned char VEX_W = 0;
+
+ // VEX_5M (VEX m-mmmmm field):
+ //
+ // 0b00000: Reserved for future use
+ // 0b00001: implied 0F leading opcode
+ // 0b00010: implied 0F 38 leading opcode bytes
+ // 0b00011: implied 0F 3A leading opcode bytes
+ // 0b00100-0b11111: Reserved for future use
+ //
+ unsigned char VEX_5M = 0x1;
+
+ // VEX_4V (VEX vvvv field): a register specifier
+ // (in 1's complement form) or 1111 if unused.
+ unsigned char VEX_4V = 0xf;
+
+ // VEX_L (Vector Length):
+ //
+ // 0: scalar or 128-bit vector
+ // 1: 256-bit vector
+ //
+ unsigned char VEX_L = 0;
+
+ // VEX_PP: opcode extension providing equivalent
+ // functionality of a SIMD prefix
+ //
+ // 0b00: None
+ // 0b01: 66
+ // 0b10: F3
+ // 0b11: F2
+ //
+ unsigned char VEX_PP = 0;
+
+ // Encode the operand size opcode prefix as needed.
+ if (TSFlags & X86II::OpSize)
+ VEX_PP = 0x01;
+
+ if ((TSFlags >> 32) & X86II::VEX_W)
+ VEX_W = 1;
+
+ if ((TSFlags >> 32) & X86II::VEX_L)
+ VEX_L = 1;
+
+ switch (TSFlags & X86II::Op0Mask) {
+ default: assert(0 && "Invalid prefix!");
+ case X86II::T8: // 0F 38
+ VEX_5M = 0x2;
+ break;
+ case X86II::TA: // 0F 3A
+ VEX_5M = 0x3;
+ break;
+ case X86II::TF: // F2 0F 38
+ VEX_PP = 0x3;
+ VEX_5M = 0x2;
+ break;
+ case X86II::XS: // F3 0F
+ VEX_PP = 0x2;
+ break;
+ case X86II::XD: // F2 0F
+ VEX_PP = 0x3;
+ break;
+ case X86II::TB: // Bypass: Not used by VEX
+ case 0:
+ break; // No prefix!
+ }
+
+ // Set the vector length to 256-bit if YMM0-YMM15 is used
+ for (unsigned i = 0; i != MI.getNumOperands(); ++i) {
+ if (!MI.getOperand(i).isReg())
+ continue;
+ unsigned SrcReg = MI.getOperand(i).getReg();
+ if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15)
+ VEX_L = 1;
+ }
+
+ unsigned NumOps = MI.getNumOperands();
+ unsigned CurOp = 0;
+ bool IsDestMem = false;
+
+ switch (TSFlags & X86II::FormMask) {
+ case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
+ case X86II::MRMDestMem:
+ IsDestMem = true;
+ // The important info for the VEX prefix is never beyond the address
+ // registers. Don't check beyond that.
+ NumOps = CurOp = X86::AddrNumOperands;
+ case X86II::MRM0m: case X86II::MRM1m:
+ case X86II::MRM2m: case X86II::MRM3m:
+ case X86II::MRM4m: case X86II::MRM5m:
+ case X86II::MRM6m: case X86II::MRM7m:
+ case X86II::MRMSrcMem:
+ case X86II::MRMSrcReg:
+ if (MI.getNumOperands() > CurOp && MI.getOperand(CurOp).isReg() &&
+ X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_R = 0x0;
+ CurOp++;
+
+ if (HasVEX_4V) {
+ VEX_4V = getVEXRegisterEncoding(MI, IsDestMem ? CurOp-1 : CurOp);
+ CurOp++;
+ }
+
+ // To only check operands before the memory address ones, start
+ // the search from the begining
+ if (IsDestMem)
+ CurOp = 0;
+
+ // If the last register should be encoded in the immediate field
+ // do not use any bit from VEX prefix to this register, ignore it
+ if ((TSFlags >> 32) & X86II::VEX_I8IMM)
+ NumOps--;
+
+ for (; CurOp != NumOps; ++CurOp) {
+ const MCOperand &MO = MI.getOperand(CurOp);
+ if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
+ VEX_B = 0x0;
+ if (!VEX_B && MO.isReg() &&
+ ((TSFlags & X86II::FormMask) == X86II::MRMSrcMem) &&
+ X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
+ VEX_X = 0x0;
+ }
+ break;
+ default: // MRMDestReg, MRM0r-MRM7r, RawFrm
+ if (!MI.getNumOperands())
+ break;
+
+ if (MI.getOperand(CurOp).isReg() &&
+ X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_B = 0;
+
+ if (HasVEX_4V)
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+
+ CurOp++;
+ for (; CurOp != NumOps; ++CurOp) {
+ const MCOperand &MO = MI.getOperand(CurOp);
+ if (MO.isReg() && !HasVEX_4V &&
+ X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
+ VEX_R = 0x0;
+ }
+ break;
+ }
+
+ // Emit segment override opcode prefix as needed.
+ EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
+
+ // VEX opcode prefix can have 2 or 3 bytes
+ //
+ // 3 bytes:
+ // +-----+ +--------------+ +-------------------+
+ // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
+ // +-----+ +--------------+ +-------------------+
+ // 2 bytes:
+ // +-----+ +-------------------+
+ // | C5h | | R | vvvv | L | pp |
+ // +-----+ +-------------------+
+ //
+ unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
+
+ if (VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { // 2 byte VEX prefix
+ EmitByte(0xC5, CurByte, OS);
+ EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
+ return;
+ }
+
+ // 3 byte VEX prefix
+ EmitByte(0xC4, CurByte, OS);
+ EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
+ EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
+}
+
/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
/// size, and 3) use of X86-64 extended registers.
-static unsigned DetermineREXPrefix(const MCInst &MI, unsigned TSFlags,
+static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
const TargetInstrDesc &Desc) {
- // Pseudo instructions never have a rex byte.
- if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
- return 0;
-
unsigned REX = 0;
if (TSFlags & X86II::REX_W)
- REX |= 1 << 3;
-
+ REX |= 1 << 3; // set REX.W
+
if (MI.getNumOperands() == 0) return REX;
-
+
unsigned NumOps = MI.getNumOperands();
// FIXME: MCInst should explicitize the two-addrness.
bool isTwoAddr = NumOps > 1 &&
Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
-
+
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
unsigned i = isTwoAddr ? 1 : 0;
for (; i != NumOps; ++i) {
@@ -339,34 +588,34 @@ static unsigned DetermineREXPrefix(const MCInst &MI, unsigned TSFlags,
if (!X86InstrInfo::isX86_64NonExtLowByteReg(Reg)) continue;
// FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
// that returns non-zero.
- REX |= 0x40;
+ REX |= 0x40; // REX fixed encoding prefix
break;
}
-
+
switch (TSFlags & X86II::FormMask) {
case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
case X86II::MRMSrcReg:
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 0;
+ REX |= 1 << 0; // set REX.B
}
break;
case X86II::MRMSrcMem: {
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
unsigned Bit = 0;
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg()) {
if (X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit;
+ REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
Bit++;
}
}
@@ -377,17 +626,17 @@ static unsigned DetermineREXPrefix(const MCInst &MI, unsigned TSFlags,
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
case X86II::MRMDestMem: {
- unsigned e = (isTwoAddr ? X86AddrNumOperands+1 : X86AddrNumOperands);
+ unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
i = isTwoAddr ? 1 : 0;
if (NumOps > e && MI.getOperand(e).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
unsigned Bit = 0;
for (; i != e; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg()) {
if (X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit;
+ REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
Bit++;
}
}
@@ -396,39 +645,40 @@ static unsigned DetermineREXPrefix(const MCInst &MI, unsigned TSFlags,
default:
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 0;
+ REX |= 1 << 0; // set REX.B
i = isTwoAddr ? 2 : 1;
for (unsigned e = NumOps; i != e; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
}
break;
}
return REX;
}
-void X86MCCodeEmitter::
-EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
- unsigned Opcode = MI.getOpcode();
- const TargetInstrDesc &Desc = TII.get(Opcode);
- unsigned TSFlags = Desc.TSFlags;
-
- // Keep track of the current byte being emitted.
- unsigned CurByte = 0;
-
- // FIXME: We should emit the prefixes in exactly the same order as GAS does,
- // in order to provide diffability.
-
- // Emit the lock opcode prefix as needed.
- if (TSFlags & X86II::LOCK)
- EmitByte(0xF0, CurByte, OS);
-
- // Emit segment override opcode prefix as needed.
+/// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
+void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags,
+ unsigned &CurByte, int MemOperand,
+ const MCInst &MI,
+ raw_ostream &OS) const {
switch (TSFlags & X86II::SegOvrMask) {
default: assert(0 && "Invalid segment!");
- case 0: break; // No segment override!
+ case 0:
+ // No segment override, check for explicit one on memory operand.
+ if (MemOperand != -1) { // If the instruction has a memory operand.
+ switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
+ default: assert(0 && "Unknown segment register!");
+ case 0: break;
+ case X86::CS: EmitByte(0x2E, CurByte, OS); break;
+ case X86::SS: EmitByte(0x36, CurByte, OS); break;
+ case X86::DS: EmitByte(0x3E, CurByte, OS); break;
+ case X86::ES: EmitByte(0x26, CurByte, OS); break;
+ case X86::FS: EmitByte(0x64, CurByte, OS); break;
+ case X86::GS: EmitByte(0x65, CurByte, OS); break;
+ }
+ }
+ break;
case X86II::FS:
EmitByte(0x64, CurByte, OS);
break;
@@ -436,19 +686,36 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(0x65, CurByte, OS);
break;
}
-
+}
+
+/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
+///
+/// MemOperand is the operand # of the start of a memory operand if present. If
+/// Not present, it is -1.
+void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+ int MemOperand, const MCInst &MI,
+ const TargetInstrDesc &Desc,
+ raw_ostream &OS) const {
+
+ // Emit the lock opcode prefix as needed.
+ if (TSFlags & X86II::LOCK)
+ EmitByte(0xF0, CurByte, OS);
+
+ // Emit segment override opcode prefix as needed.
+ EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
+
// Emit the repeat opcode prefix as needed.
if ((TSFlags & X86II::Op0Mask) == X86II::REP)
EmitByte(0xF3, CurByte, OS);
-
+
// Emit the operand size opcode prefix as needed.
if (TSFlags & X86II::OpSize)
EmitByte(0x66, CurByte, OS);
-
+
// Emit the address size opcode prefix as needed.
if (TSFlags & X86II::AdSize)
EmitByte(0x67, CurByte, OS);
-
+
bool Need0FPrefix = false;
switch (TSFlags & X86II::Op0Mask) {
default: assert(0 && "Invalid prefix!");
@@ -480,18 +747,18 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::DE: EmitByte(0xDE, CurByte, OS); break;
case X86II::DF: EmitByte(0xDF, CurByte, OS); break;
}
-
+
// Handle REX prefix.
// FIXME: Can this come before F2 etc to simplify emission?
if (Is64BitMode) {
if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
EmitByte(0x40 | REX, CurByte, OS);
}
-
+
// 0x0F escape code must be emitted just before the opcode.
if (Need0FPrefix)
EmitByte(0x0F, CurByte, OS);
-
+
// FIXME: Pull this up into previous switch if REX can be moved earlier.
switch (TSFlags & X86II::Op0Mask) {
case X86II::TF: // F2 0F 38
@@ -502,8 +769,21 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(0x3A, CurByte, OS);
break;
}
-
+}
+
+void X86MCCodeEmitter::
+EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ unsigned Opcode = MI.getOpcode();
+ const TargetInstrDesc &Desc = TII.get(Opcode);
+ uint64_t TSFlags = Desc.TSFlags;
+
+ // Pseudo instructions don't get encoded.
+ if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
+ return;
+
// If this is a two-address instruction, skip one of the register operands.
+ // FIXME: This should be handled during MCInst lowering.
unsigned NumOps = Desc.getNumOperands();
unsigned CurOp = 0;
if (NumOps > 1 && Desc.getOperandConstraint(1, TOI::TIED_TO) != -1)
@@ -511,56 +791,98 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
else if (NumOps > 2 && Desc.getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0)
// Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
--NumOps;
-
+
+ // Keep track of the current byte being emitted.
+ unsigned CurByte = 0;
+
+ // Is this instruction encoded using the AVX VEX prefix?
+ bool HasVEXPrefix = false;
+
+ // It uses the VEX.VVVV field?
+ bool HasVEX_4V = false;
+
+ if ((TSFlags >> 32) & X86II::VEX)
+ HasVEXPrefix = true;
+ if ((TSFlags >> 32) & X86II::VEX_4V)
+ HasVEX_4V = true;
+
+ // Determine where the memory operand starts, if present.
+ int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
+ if (MemoryOperand != -1) MemoryOperand += CurOp;
+
+ if (!HasVEXPrefix)
+ EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
+ else
+ EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
+
unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
+ unsigned SrcRegNum = 0;
switch (TSFlags & X86II::FormMask) {
case X86II::MRMInitReg:
assert(0 && "FIXME: Remove this form when the JIT moves to MCCodeEmitter!");
default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
assert(0 && "Unknown FormMask value in X86MCCodeEmitter!");
- case X86II::Pseudo: return; // Pseudo instructions encode to nothing.
+ case X86II::Pseudo:
+ assert(0 && "Pseudo instruction shouldn't be emitted");
case X86II::RawFrm:
EmitByte(BaseOpcode, CurByte, OS);
break;
+ case X86II::RawFrmImm16:
+ EmitByte(BaseOpcode, CurByte, OS);
+ EmitImmediate(MI.getOperand(CurOp++),
+ X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
+ CurByte, OS, Fixups);
+ EmitImmediate(MI.getOperand(CurOp++), 2, FK_Data_2, CurByte, OS, Fixups);
+ break;
+
case X86II::AddRegFrm:
EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
break;
-
+
case X86II::MRMDestReg:
EmitByte(BaseOpcode, CurByte, OS);
EmitRegModRMByte(MI.getOperand(CurOp),
GetX86RegNum(MI.getOperand(CurOp+1)), CurByte, OS);
CurOp += 2;
break;
-
+
case X86II::MRMDestMem:
EmitByte(BaseOpcode, CurByte, OS);
+ SrcRegNum = CurOp + X86::AddrNumOperands;
+
+ if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
+ SrcRegNum++;
+
EmitMemModRMByte(MI, CurOp,
- GetX86RegNum(MI.getOperand(CurOp + X86AddrNumOperands)),
+ GetX86RegNum(MI.getOperand(SrcRegNum)),
TSFlags, CurByte, OS, Fixups);
- CurOp += X86AddrNumOperands + 1;
+ CurOp = SrcRegNum + 1;
break;
-
+
case X86II::MRMSrcReg:
EmitByte(BaseOpcode, CurByte, OS);
- EmitRegModRMByte(MI.getOperand(CurOp+1), GetX86RegNum(MI.getOperand(CurOp)),
- CurByte, OS);
- CurOp += 2;
+ SrcRegNum = CurOp + 1;
+
+ if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
+ SrcRegNum++;
+
+ EmitRegModRMByte(MI.getOperand(SrcRegNum),
+ GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
+ CurOp = SrcRegNum + 1;
break;
-
+
case X86II::MRMSrcMem: {
+ int AddrOperands = X86::AddrNumOperands;
+ unsigned FirstMemOp = CurOp+1;
+ if (HasVEX_4V) {
+ ++AddrOperands;
+ ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
+ }
+
EmitByte(BaseOpcode, CurByte, OS);
- // FIXME: Maybe lea should have its own form? This is a horrible hack.
- int AddrOperands;
- if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
- Opcode == X86::LEA16r || Opcode == X86::LEA32r)
- AddrOperands = X86AddrNumOperands - 1; // No segment register
- else
- AddrOperands = X86AddrNumOperands;
-
- EmitMemModRMByte(MI, CurOp+1, GetX86RegNum(MI.getOperand(CurOp)),
+ EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
TSFlags, CurByte, OS, Fixups);
CurOp += AddrOperands + 1;
break;
@@ -570,6 +892,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
case X86II::MRM6r: case X86II::MRM7r:
+ if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
+ CurOp++;
EmitByte(BaseOpcode, CurByte, OS);
EmitRegModRMByte(MI.getOperand(CurOp++),
(TSFlags & X86II::FormMask)-X86II::MRM0r,
@@ -582,7 +906,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode, CurByte, OS);
EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m,
TSFlags, CurByte, OS, Fixups);
- CurOp += X86AddrNumOperands;
+ CurOp += X86::AddrNumOperands;
break;
case X86II::MRM_C1:
EmitByte(BaseOpcode, CurByte, OS);
@@ -625,14 +949,27 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(0xF9, CurByte, OS);
break;
}
-
+
// If there is a remaining operand, it must be a trailing immediate. Emit it
// according to the right size for the instruction.
- if (CurOp != NumOps)
- EmitImmediate(MI.getOperand(CurOp++),
- X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
- CurByte, OS, Fixups);
-
+ if (CurOp != NumOps) {
+ // The last source register of a 4 operand instruction in AVX is encoded
+ // in bits[7:4] of a immediate byte, and bits[3:0] are ignored.
+ if ((TSFlags >> 32) & X86II::VEX_I8IMM) {
+ const MCOperand &MO = MI.getOperand(CurOp++);
+ bool IsExtReg =
+ X86InstrInfo::isX86_64ExtendedReg(MO.getReg());
+ unsigned RegNum = (IsExtReg ? (1 << 7) : 0);
+ RegNum |= GetX86RegNum(MO) << 4;
+ EmitImmediate(MCOperand::CreateImm(RegNum), 1, FK_Data_1, CurByte, OS,
+ Fixups);
+ } else
+ EmitImmediate(MI.getOperand(CurOp++),
+ X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
+ CurByte, OS, Fixups);
+ }
+
+
#ifndef NDEBUG
// FIXME: Verify.
if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86MCInstLower.cpp b/libclamav/c++/llvm/lib/Target/X86/X86MCInstLower.cpp
new file mode 100644
index 0000000..8c4620f
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -0,0 +1,609 @@
+//===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower X86 MachineInstrs to their corresponding
+// MCInst records.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86MCInstLower.h"
+#include "X86AsmPrinter.h"
+#include "X86COFFMachineModuleInfo.h"
+#include "X86MCAsmInfo.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Type.h"
+using namespace llvm;
+
+X86MCInstLower::X86MCInstLower(Mangler *mang, const MachineFunction &mf,
+ X86AsmPrinter &asmprinter)
+: Ctx(mf.getContext()), Mang(mang), MF(mf), TM(mf.getTarget()),
+ MAI(*TM.getMCAsmInfo()), AsmPrinter(asmprinter) {}
+
+MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const {
+ return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>();
+}
+
+
+MCSymbol *X86MCInstLower::GetPICBaseSymbol() const {
+ return static_cast<const X86TargetLowering*>(TM.getTargetLowering())->
+ getPICBaseSymbol(&MF, Ctx);
+}
+
+/// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
+/// operand to an MCSymbol.
+MCSymbol *X86MCInstLower::
+GetSymbolFromOperand(const MachineOperand &MO) const {
+ assert((MO.isGlobal() || MO.isSymbol()) && "Isn't a symbol reference");
+
+ SmallString<128> Name;
+
+ if (!MO.isGlobal()) {
+ assert(MO.isSymbol());
+ Name += MAI.getGlobalPrefix();
+ Name += MO.getSymbolName();
+ } else {
+ const GlobalValue *GV = MO.getGlobal();
+ bool isImplicitlyPrivate = false;
+ if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB ||
+ MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
+ MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE ||
+ MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
+ isImplicitlyPrivate = true;
+
+ Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
+ }
+
+ // If the target flags on the operand changes the name of the symbol, do that
+ // before we return the symbol.
+ switch (MO.getTargetFlags()) {
+ default: break;
+ case X86II::MO_DLLIMPORT: {
+ // Handle dllimport linkage.
+ const char *Prefix = "__imp_";
+ Name.insert(Name.begin(), Prefix, Prefix+strlen(Prefix));
+ break;
+ }
+ case X86II::MO_DARWIN_NONLAZY:
+ case X86II::MO_DARWIN_NONLAZY_PIC_BASE: {
+ Name += "$non_lazy_ptr";
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
+
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ getMachOMMI().getGVStubEntry(Sym);
+ if (StubSym.getPointer() == 0) {
+ assert(MO.isGlobal() && "Extern symbol not handled yet");
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(MO.getGlobal()),
+ !MO.getGlobal()->hasInternalLinkage());
+ }
+ return Sym;
+ }
+ case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: {
+ Name += "$non_lazy_ptr";
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ getMachOMMI().getHiddenGVStubEntry(Sym);
+ if (StubSym.getPointer() == 0) {
+ assert(MO.isGlobal() && "Extern symbol not handled yet");
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(MO.getGlobal()),
+ !MO.getGlobal()->hasInternalLinkage());
+ }
+ return Sym;
+ }
+ case X86II::MO_DARWIN_STUB: {
+ Name += "$stub";
+ MCSymbol *Sym = Ctx.GetOrCreateSymbol(Name.str());
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ getMachOMMI().getFnStubEntry(Sym);
+ if (StubSym.getPointer())
+ return Sym;
+
+ if (MO.isGlobal()) {
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(MO.getGlobal()),
+ !MO.getGlobal()->hasInternalLinkage());
+ } else {
+ Name.erase(Name.end()-5, Name.end());
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(Ctx.GetOrCreateSymbol(Name.str()), false);
+ }
+ return Sym;
+ }
+ }
+
+ return Ctx.GetOrCreateSymbol(Name.str());
+}
+
+MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
+ MCSymbol *Sym) const {
+ // FIXME: We would like an efficient form for this, so we don't have to do a
+ // lot of extra uniquing.
+ const MCExpr *Expr = 0;
+ MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
+
+ switch (MO.getTargetFlags()) {
+ default: llvm_unreachable("Unknown target flag on GV operand");
+ case X86II::MO_NO_FLAG: // No flag.
+ // These affect the name of the symbol, not any suffix.
+ case X86II::MO_DARWIN_NONLAZY:
+ case X86II::MO_DLLIMPORT:
+ case X86II::MO_DARWIN_STUB:
+ break;
+
+ case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break;
+ case X86II::MO_TLVP_PIC_BASE:
+ Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
+ // Subtract the pic base.
+ Expr = MCBinaryExpr::CreateSub(Expr,
+ MCSymbolRefExpr::Create(GetPICBaseSymbol(),
+ Ctx),
+ Ctx);
+ break;
+ case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break;
+ case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break;
+ case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break;
+ case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break;
+ case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break;
+ case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break;
+ case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break;
+ case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break;
+ case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break;
+ case X86II::MO_PIC_BASE_OFFSET:
+ case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
+ case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
+ Expr = MCSymbolRefExpr::Create(Sym, Ctx);
+ // Subtract the pic base.
+ Expr = MCBinaryExpr::CreateSub(Expr,
+ MCSymbolRefExpr::Create(GetPICBaseSymbol(), Ctx),
+ Ctx);
+ if (MO.isJTI() && MAI.hasSetDirective()) {
+ // If .set directive is supported, use it to reduce the number of
+ // relocations the assembler will generate for differences between
+ // local labels. This is only safe when the symbols are in the same
+ // section so we are restricting it to jumptable references.
+ MCSymbol *Label = Ctx.CreateTempSymbol();
+ AsmPrinter.OutStreamer.EmitAssignment(Label, Expr);
+ Expr = MCSymbolRefExpr::Create(Label, Ctx);
+ }
+ break;
+ }
+
+ if (Expr == 0)
+ Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
+
+ if (!MO.isJTI() && MO.getOffset())
+ Expr = MCBinaryExpr::CreateAdd(Expr,
+ MCConstantExpr::Create(MO.getOffset(), Ctx),
+ Ctx);
+ return MCOperand::CreateExpr(Expr);
+}
+
+
+
+static void lower_subreg32(MCInst *MI, unsigned OpNo) {
+ // Convert registers in the addr mode according to subreg32.
+ unsigned Reg = MI->getOperand(OpNo).getReg();
+ if (Reg != 0)
+ MI->getOperand(OpNo).setReg(getX86SubSuperRegister(Reg, MVT::i32));
+}
+
+static void lower_lea64_32mem(MCInst *MI, unsigned OpNo) {
+ // Convert registers in the addr mode according to subreg64.
+ for (unsigned i = 0; i != 4; ++i) {
+ if (!MI->getOperand(OpNo+i).isReg()) continue;
+
+ unsigned Reg = MI->getOperand(OpNo+i).getReg();
+ if (Reg == 0) continue;
+
+ MI->getOperand(OpNo+i).setReg(getX86SubSuperRegister(Reg, MVT::i64));
+ }
+}
+
+/// LowerSubReg32_Op0 - Things like MOVZX16rr8 -> MOVZX32rr8.
+static void LowerSubReg32_Op0(MCInst &OutMI, unsigned NewOpc) {
+ OutMI.setOpcode(NewOpc);
+ lower_subreg32(&OutMI, 0);
+}
+/// LowerUnaryToTwoAddr - R = setb -> R = sbb R, R
+static void LowerUnaryToTwoAddr(MCInst &OutMI, unsigned NewOpc) {
+ OutMI.setOpcode(NewOpc);
+ OutMI.addOperand(OutMI.getOperand(0));
+ OutMI.addOperand(OutMI.getOperand(0));
+}
+
+/// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
+/// a short fixed-register form.
+static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
+ unsigned ImmOp = Inst.getNumOperands() - 1;
+ assert(Inst.getOperand(0).isReg() && Inst.getOperand(ImmOp).isImm() &&
+ ((Inst.getNumOperands() == 3 && Inst.getOperand(1).isReg() &&
+ Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) ||
+ Inst.getNumOperands() == 2) && "Unexpected instruction!");
+
+ // Check whether the destination register can be fixed.
+ unsigned Reg = Inst.getOperand(0).getReg();
+ if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
+ return;
+
+ // If so, rewrite the instruction.
+ MCOperand Saved = Inst.getOperand(ImmOp);
+ Inst = MCInst();
+ Inst.setOpcode(Opcode);
+ Inst.addOperand(Saved);
+}
+
+/// \brief Simplify things like MOV32rm to MOV32o32a.
+static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
+ unsigned Opcode) {
+ // Don't make these simplifications in 64-bit mode; other assemblers don't
+ // perform them because they make the code larger.
+ if (Printer.getSubtarget().is64Bit())
+ return;
+
+ bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg();
+ unsigned AddrBase = IsStore;
+ unsigned RegOp = IsStore ? 0 : 5;
+ unsigned AddrOp = AddrBase + 3;
+ assert(Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() &&
+ Inst.getOperand(AddrBase + 0).isReg() && // base
+ Inst.getOperand(AddrBase + 1).isImm() && // scale
+ Inst.getOperand(AddrBase + 2).isReg() && // index register
+ (Inst.getOperand(AddrOp).isExpr() || // address
+ Inst.getOperand(AddrOp).isImm())&&
+ Inst.getOperand(AddrBase + 4).isReg() && // segment
+ "Unexpected instruction!");
+
+ // Check whether the destination register can be fixed.
+ unsigned Reg = Inst.getOperand(RegOp).getReg();
+ if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX)
+ return;
+
+ // Check whether this is an absolute address.
+ // FIXME: We know TLVP symbol refs aren't, but there should be a better way
+ // to do this here.
+ bool Absolute = true;
+ if (Inst.getOperand(AddrOp).isExpr()) {
+ const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr();
+ if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE))
+ if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP)
+ Absolute = false;
+ }
+
+ if (Absolute &&
+ (Inst.getOperand(AddrBase + 0).getReg() != 0 ||
+ Inst.getOperand(AddrBase + 2).getReg() != 0 ||
+ Inst.getOperand(AddrBase + 4).getReg() != 0 ||
+ Inst.getOperand(AddrBase + 1).getImm() != 1))
+ return;
+
+ // If so, rewrite the instruction.
+ MCOperand Saved = Inst.getOperand(AddrOp);
+ Inst = MCInst();
+ Inst.setOpcode(Opcode);
+ Inst.addOperand(Saved);
+}
+
+void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
+ OutMI.setOpcode(MI->getOpcode());
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+
+ MCOperand MCOp;
+ switch (MO.getType()) {
+ default:
+ MI->dump();
+ llvm_unreachable("unknown operand type");
+ case MachineOperand::MO_Register:
+ // Ignore all implicit register operands.
+ if (MO.isImplicit()) continue;
+ MCOp = MCOperand::CreateReg(MO.getReg());
+ break;
+ case MachineOperand::MO_Immediate:
+ MCOp = MCOperand::CreateImm(MO.getImm());
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
+ MO.getMBB()->getSymbol(), Ctx));
+ break;
+ case MachineOperand::MO_GlobalAddress:
+ MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
+ break;
+ case MachineOperand::MO_JumpTableIndex:
+ MCOp = LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ MCOp = LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
+ break;
+ case MachineOperand::MO_BlockAddress:
+ MCOp = LowerSymbolOperand(MO,
+ AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
+ break;
+ }
+
+ OutMI.addOperand(MCOp);
+ }
+
+ // Handle a few special cases to eliminate operand modifiers.
+ switch (OutMI.getOpcode()) {
+ case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
+ lower_lea64_32mem(&OutMI, 1);
+ // FALL THROUGH.
+ case X86::LEA64r:
+ case X86::LEA16r:
+ case X86::LEA32r:
+ // LEA should have a segment register, but it must be empty.
+ assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&
+ "Unexpected # of LEA operands");
+ assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
+ "LEA has segment specified!");
+ break;
+ case X86::MOVZX16rr8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break;
+ case X86::MOVZX16rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
+ case X86::MOVSX16rr8: LowerSubReg32_Op0(OutMI, X86::MOVSX32rr8); break;
+ case X86::MOVSX16rm8: LowerSubReg32_Op0(OutMI, X86::MOVSX32rm8); break;
+ case X86::MOVZX64rr32: LowerSubReg32_Op0(OutMI, X86::MOV32rr); break;
+ case X86::MOVZX64rm32: LowerSubReg32_Op0(OutMI, X86::MOV32rm); break;
+ case X86::MOV64ri64i32: LowerSubReg32_Op0(OutMI, X86::MOV32ri); break;
+ case X86::MOVZX64rr8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break;
+ case X86::MOVZX64rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
+ case X86::MOVZX64rr16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr16); break;
+ case X86::MOVZX64rm16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm16); break;
+ case X86::SETB_C8r: LowerUnaryToTwoAddr(OutMI, X86::SBB8rr); break;
+ case X86::SETB_C16r: LowerUnaryToTwoAddr(OutMI, X86::SBB16rr); break;
+ case X86::SETB_C32r: LowerUnaryToTwoAddr(OutMI, X86::SBB32rr); break;
+ case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break;
+ case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break;
+ case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
+ case X86::MMX_V_SET0: LowerUnaryToTwoAddr(OutMI, X86::MMX_PXORrr); break;
+ case X86::MMX_V_SETALLONES:
+ LowerUnaryToTwoAddr(OutMI, X86::MMX_PCMPEQDrr); break;
+ case X86::FsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
+ case X86::FsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
+ case X86::V_SET0PS: LowerUnaryToTwoAddr(OutMI, X86::XORPSrr); break;
+ case X86::V_SET0PD: LowerUnaryToTwoAddr(OutMI, X86::XORPDrr); break;
+ case X86::V_SET0PI: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
+ case X86::V_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::PCMPEQDrr); break;
+ case X86::AVX_SET0PS: LowerUnaryToTwoAddr(OutMI, X86::VXORPSrr); break;
+ case X86::AVX_SET0PSY: LowerUnaryToTwoAddr(OutMI, X86::VXORPSYrr); break;
+ case X86::AVX_SET0PD: LowerUnaryToTwoAddr(OutMI, X86::VXORPDrr); break;
+ case X86::AVX_SET0PDY: LowerUnaryToTwoAddr(OutMI, X86::VXORPDYrr); break;
+ case X86::AVX_SET0PI: LowerUnaryToTwoAddr(OutMI, X86::VPXORrr); break;
+
+ case X86::MOV16r0:
+ LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV16r0 -> MOV32r0
+ LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
+ break;
+ case X86::MOV64r0:
+ LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV64r0 -> MOV32r0
+ LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
+ break;
+
+ // TAILJMPr64, [WIN]CALL64r, [WIN]CALL64pcrel32 - These instructions have
+ // register inputs modeled as normal uses instead of implicit uses. As such,
+ // truncate off all but the first operand (the callee). FIXME: Change isel.
+ case X86::TAILJMPr64:
+ case X86::CALL64r:
+ case X86::CALL64pcrel32:
+ case X86::WINCALL64r:
+ case X86::WINCALL64pcrel32: {
+ unsigned Opcode = OutMI.getOpcode();
+ MCOperand Saved = OutMI.getOperand(0);
+ OutMI = MCInst();
+ OutMI.setOpcode(Opcode);
+ OutMI.addOperand(Saved);
+ break;
+ }
+
+ // TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions.
+ case X86::TAILJMPr:
+ case X86::TAILJMPd:
+ case X86::TAILJMPd64: {
+ unsigned Opcode;
+ switch (OutMI.getOpcode()) {
+ default: assert(0 && "Invalid opcode");
+ case X86::TAILJMPr: Opcode = X86::JMP32r; break;
+ case X86::TAILJMPd:
+ case X86::TAILJMPd64: Opcode = X86::JMP_1; break;
+ }
+
+ MCOperand Saved = OutMI.getOperand(0);
+ OutMI = MCInst();
+ OutMI.setOpcode(Opcode);
+ OutMI.addOperand(Saved);
+ break;
+ }
+
+ // The assembler backend wants to see branches in their small form and relax
+ // them to their large form. The JIT can only handle the large form because
+ // it does not do relaxation. For now, translate the large form to the
+ // small one here.
+ case X86::JMP_4: OutMI.setOpcode(X86::JMP_1); break;
+ case X86::JO_4: OutMI.setOpcode(X86::JO_1); break;
+ case X86::JNO_4: OutMI.setOpcode(X86::JNO_1); break;
+ case X86::JB_4: OutMI.setOpcode(X86::JB_1); break;
+ case X86::JAE_4: OutMI.setOpcode(X86::JAE_1); break;
+ case X86::JE_4: OutMI.setOpcode(X86::JE_1); break;
+ case X86::JNE_4: OutMI.setOpcode(X86::JNE_1); break;
+ case X86::JBE_4: OutMI.setOpcode(X86::JBE_1); break;
+ case X86::JA_4: OutMI.setOpcode(X86::JA_1); break;
+ case X86::JS_4: OutMI.setOpcode(X86::JS_1); break;
+ case X86::JNS_4: OutMI.setOpcode(X86::JNS_1); break;
+ case X86::JP_4: OutMI.setOpcode(X86::JP_1); break;
+ case X86::JNP_4: OutMI.setOpcode(X86::JNP_1); break;
+ case X86::JL_4: OutMI.setOpcode(X86::JL_1); break;
+ case X86::JGE_4: OutMI.setOpcode(X86::JGE_1); break;
+ case X86::JLE_4: OutMI.setOpcode(X86::JLE_1); break;
+ case X86::JG_4: OutMI.setOpcode(X86::JG_1); break;
+
+ // We don't currently select the correct instruction form for instructions
+ // which have a short %eax, etc. form. Handle this by custom lowering, for
+ // now.
+ //
+ // Note, we are currently not handling the following instructions:
+ // MOV64ao8, MOV64o8a
+ // XCHG16ar, XCHG32ar, XCHG64ar
+ case X86::MOV8mr_NOREX:
+ case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao8); break;
+ case X86::MOV8rm_NOREX:
+ case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o8a); break;
+ case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao16); break;
+ case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o16a); break;
+ case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break;
+ case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break;
+
+ case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break;
+ case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break;
+ case X86::ADC32ri: SimplifyShortImmForm(OutMI, X86::ADC32i32); break;
+ case X86::ADC64ri32: SimplifyShortImmForm(OutMI, X86::ADC64i32); break;
+ case X86::ADD8ri: SimplifyShortImmForm(OutMI, X86::ADD8i8); break;
+ case X86::ADD16ri: SimplifyShortImmForm(OutMI, X86::ADD16i16); break;
+ case X86::ADD32ri: SimplifyShortImmForm(OutMI, X86::ADD32i32); break;
+ case X86::ADD64ri32: SimplifyShortImmForm(OutMI, X86::ADD64i32); break;
+ case X86::AND8ri: SimplifyShortImmForm(OutMI, X86::AND8i8); break;
+ case X86::AND16ri: SimplifyShortImmForm(OutMI, X86::AND16i16); break;
+ case X86::AND32ri: SimplifyShortImmForm(OutMI, X86::AND32i32); break;
+ case X86::AND64ri32: SimplifyShortImmForm(OutMI, X86::AND64i32); break;
+ case X86::CMP8ri: SimplifyShortImmForm(OutMI, X86::CMP8i8); break;
+ case X86::CMP16ri: SimplifyShortImmForm(OutMI, X86::CMP16i16); break;
+ case X86::CMP32ri: SimplifyShortImmForm(OutMI, X86::CMP32i32); break;
+ case X86::CMP64ri32: SimplifyShortImmForm(OutMI, X86::CMP64i32); break;
+ case X86::OR8ri: SimplifyShortImmForm(OutMI, X86::OR8i8); break;
+ case X86::OR16ri: SimplifyShortImmForm(OutMI, X86::OR16i16); break;
+ case X86::OR32ri: SimplifyShortImmForm(OutMI, X86::OR32i32); break;
+ case X86::OR64ri32: SimplifyShortImmForm(OutMI, X86::OR64i32); break;
+ case X86::SBB8ri: SimplifyShortImmForm(OutMI, X86::SBB8i8); break;
+ case X86::SBB16ri: SimplifyShortImmForm(OutMI, X86::SBB16i16); break;
+ case X86::SBB32ri: SimplifyShortImmForm(OutMI, X86::SBB32i32); break;
+ case X86::SBB64ri32: SimplifyShortImmForm(OutMI, X86::SBB64i32); break;
+ case X86::SUB8ri: SimplifyShortImmForm(OutMI, X86::SUB8i8); break;
+ case X86::SUB16ri: SimplifyShortImmForm(OutMI, X86::SUB16i16); break;
+ case X86::SUB32ri: SimplifyShortImmForm(OutMI, X86::SUB32i32); break;
+ case X86::SUB64ri32: SimplifyShortImmForm(OutMI, X86::SUB64i32); break;
+ case X86::TEST8ri: SimplifyShortImmForm(OutMI, X86::TEST8i8); break;
+ case X86::TEST16ri: SimplifyShortImmForm(OutMI, X86::TEST16i16); break;
+ case X86::TEST32ri: SimplifyShortImmForm(OutMI, X86::TEST32i32); break;
+ case X86::TEST64ri32: SimplifyShortImmForm(OutMI, X86::TEST64i32); break;
+ case X86::XOR8ri: SimplifyShortImmForm(OutMI, X86::XOR8i8); break;
+ case X86::XOR16ri: SimplifyShortImmForm(OutMI, X86::XOR16i16); break;
+ case X86::XOR32ri: SimplifyShortImmForm(OutMI, X86::XOR32i32); break;
+ case X86::XOR64ri32: SimplifyShortImmForm(OutMI, X86::XOR64i32); break;
+ }
+}
+
+
+void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ X86MCInstLower MCInstLowering(Mang, *MF, *this);
+ switch (MI->getOpcode()) {
+ case TargetOpcode::DBG_VALUE:
+ if (isVerbose() && OutStreamer.hasRawTextSupport()) {
+ std::string TmpStr;
+ raw_string_ostream OS(TmpStr);
+ PrintDebugValueComment(MI, OS);
+ OutStreamer.EmitRawText(StringRef(OS.str()));
+ }
+ return;
+
+ // Emit nothing here but a comment if we can.
+ case X86::Int_MemBarrier:
+ if (OutStreamer.hasRawTextSupport())
+ OutStreamer.EmitRawText(StringRef("\t#MEMBARRIER"));
+ return;
+
+ case X86::TAILJMPr:
+ case X86::TAILJMPd:
+ case X86::TAILJMPd64:
+ // Lower these as normal, but add some comments.
+ OutStreamer.AddComment("TAILCALL");
+ break;
+
+ case X86::MOVPC32r: {
+ MCInst TmpInst;
+ // This is a pseudo op for a two instruction sequence with a label, which
+ // looks like:
+ // call "L1$pb"
+ // "L1$pb":
+ // popl %esi
+
+ // Emit the call.
+ MCSymbol *PICBase = MCInstLowering.GetPICBaseSymbol();
+ TmpInst.setOpcode(X86::CALLpcrel32);
+ // FIXME: We would like an efficient form for this, so we don't have to do a
+ // lot of extra uniquing.
+ TmpInst.addOperand(MCOperand::CreateExpr(MCSymbolRefExpr::Create(PICBase,
+ OutContext)));
+ OutStreamer.EmitInstruction(TmpInst);
+
+ // Emit the label.
+ OutStreamer.EmitLabel(PICBase);
+
+ // popl $reg
+ TmpInst.setOpcode(X86::POP32r);
+ TmpInst.getOperand(0) = MCOperand::CreateReg(MI->getOperand(0).getReg());
+ OutStreamer.EmitInstruction(TmpInst);
+ return;
+ }
+
+ case X86::ADD32ri: {
+ // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
+ if (MI->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS)
+ break;
+
+ // Okay, we have something like:
+ // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
+
+ // For this, we want to print something like:
+ // MYGLOBAL + (. - PICBASE)
+ // However, we can't generate a ".", so just emit a new label here and refer
+ // to it.
+ MCSymbol *DotSym = OutContext.CreateTempSymbol();
+ OutStreamer.EmitLabel(DotSym);
+
+ // Now that we have emitted the label, lower the complex operand expression.
+ MCSymbol *OpSym = MCInstLowering.GetSymbolFromOperand(MI->getOperand(2));
+
+ const MCExpr *DotExpr = MCSymbolRefExpr::Create(DotSym, OutContext);
+ const MCExpr *PICBase =
+ MCSymbolRefExpr::Create(MCInstLowering.GetPICBaseSymbol(), OutContext);
+ DotExpr = MCBinaryExpr::CreateSub(DotExpr, PICBase, OutContext);
+
+ DotExpr = MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym,OutContext),
+ DotExpr, OutContext);
+
+ MCInst TmpInst;
+ TmpInst.setOpcode(X86::ADD32ri);
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
+ TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(1).getReg()));
+ TmpInst.addOperand(MCOperand::CreateExpr(DotExpr));
+ OutStreamer.EmitInstruction(TmpInst);
+ return;
+ }
+ }
+
+ MCInst TmpInst;
+ MCInstLowering.Lower(MI, TmpInst);
+ OutStreamer.EmitInstruction(TmpInst);
+}
+
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86MCInstLower.h b/libclamav/c++/llvm/lib/Target/X86/X86MCInstLower.h
new file mode 100644
index 0000000..539b09b
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/X86/X86MCInstLower.h
@@ -0,0 +1,54 @@
+//===-- X86MCInstLower.h - Lower MachineInstr to MCInst -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86_MCINSTLOWER_H
+#define X86_MCINSTLOWER_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+ class MCAsmInfo;
+ class MCContext;
+ class MCInst;
+ class MCOperand;
+ class MCSymbol;
+ class MachineInstr;
+ class MachineFunction;
+ class MachineModuleInfoMachO;
+ class MachineOperand;
+ class Mangler;
+ class TargetMachine;
+ class X86AsmPrinter;
+
+/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst.
+class LLVM_LIBRARY_VISIBILITY X86MCInstLower {
+ MCContext &Ctx;
+ Mangler *Mang;
+ const MachineFunction &MF;
+ const TargetMachine &TM;
+ const MCAsmInfo &MAI;
+ X86AsmPrinter &AsmPrinter;
+public:
+ X86MCInstLower(Mangler *mang, const MachineFunction &MF,
+ X86AsmPrinter &asmprinter);
+
+ void Lower(const MachineInstr *MI, MCInst &OutMI) const;
+
+ MCSymbol *GetPICBaseSymbol() const;
+
+ MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
+ MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
+
+private:
+ MachineModuleInfoMachO &getMachOMMI() const;
+};
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86MCTargetExpr.cpp b/libclamav/c++/llvm/lib/Target/X86/X86MCTargetExpr.cpp
deleted file mode 100644
index 17b4fe8..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/X86MCTargetExpr.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//===- X86MCTargetExpr.cpp - X86 Target Specific MCExpr Implementation ----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86MCTargetExpr.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCValue.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-X86MCTargetExpr *X86MCTargetExpr::Create(const MCSymbol *Sym, VariantKind K,
- MCContext &Ctx) {
- return new (Ctx) X86MCTargetExpr(Sym, K);
-}
-
-void X86MCTargetExpr::PrintImpl(raw_ostream &OS) const {
- OS << *Sym;
-
- switch (Kind) {
- case Invalid: OS << "@<invalid>"; break;
- case GOT: OS << "@GOT"; break;
- case GOTOFF: OS << "@GOTOFF"; break;
- case GOTPCREL: OS << "@GOTPCREL"; break;
- case GOTTPOFF: OS << "@GOTTPOFF"; break;
- case INDNTPOFF: OS << "@INDNTPOFF"; break;
- case NTPOFF: OS << "@NTPOFF"; break;
- case PLT: OS << "@PLT"; break;
- case TLSGD: OS << "@TLSGD"; break;
- case TPOFF: OS << "@TPOFF"; break;
- }
-}
-
-bool X86MCTargetExpr::EvaluateAsRelocatableImpl(MCValue &Res) const {
- // FIXME: I don't know if this is right, it followed MCSymbolRefExpr.
-
- // Evaluate recursively if this is a variable.
- if (Sym->isVariable())
- return Sym->getValue()->EvaluateAsRelocatable(Res);
-
- Res = MCValue::get(Sym, 0, 0);
- return true;
-}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86MCTargetExpr.h b/libclamav/c++/llvm/lib/Target/X86/X86MCTargetExpr.h
deleted file mode 100644
index 7de8a5c..0000000
--- a/libclamav/c++/llvm/lib/Target/X86/X86MCTargetExpr.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===- X86MCTargetExpr.h - X86 Target Specific MCExpr -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86_MCTARGETEXPR_H
-#define X86_MCTARGETEXPR_H
-
-#include "llvm/MC/MCExpr.h"
-
-namespace llvm {
-
-/// X86MCTargetExpr - This class represents symbol variants, like foo at GOT.
-class X86MCTargetExpr : public MCTargetExpr {
-public:
- enum VariantKind {
- Invalid,
- GOT,
- GOTOFF,
- GOTPCREL,
- GOTTPOFF,
- INDNTPOFF,
- NTPOFF,
- PLT,
- TLSGD,
- TPOFF
- };
-private:
- /// Sym - The symbol being referenced.
- const MCSymbol * const Sym;
- /// Kind - The modifier.
- const VariantKind Kind;
-
- X86MCTargetExpr(const MCSymbol *S, VariantKind K) : Sym(S), Kind(K) {}
-public:
- static X86MCTargetExpr *Create(const MCSymbol *Sym, VariantKind K,
- MCContext &Ctx);
-
- void PrintImpl(raw_ostream &OS) const;
- bool EvaluateAsRelocatableImpl(MCValue &Res) const;
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86MachineFunctionInfo.h b/libclamav/c++/llvm/lib/Target/X86/X86MachineFunctionInfo.h
index a916c63..06043ec 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86MachineFunctionInfo.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86MachineFunctionInfo.h
@@ -31,7 +31,8 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
/// stack frame in bytes.
unsigned CalleeSavedFrameSize;
- /// BytesToPopOnReturn - Number of bytes function pops on return.
+ /// BytesToPopOnReturn - Number of bytes function pops on return (in addition
+ /// to the space used by the return address).
/// Used on windows platform for stdcall & fastcall name decoration
unsigned BytesToPopOnReturn;
@@ -56,6 +57,15 @@ class X86MachineFunctionInfo : public MachineFunctionInfo {
/// when allocating, even if there may not actually be a frame pointer used.
bool ReserveFP;
+ /// VarArgsFrameIndex - FrameIndex for start of varargs area.
+ int VarArgsFrameIndex;
+ /// RegSaveFrameIndex - X86-64 vararg func register save area.
+ int RegSaveFrameIndex;
+ /// VarArgsGPOffset - X86-64 vararg func int reg offset.
+ unsigned VarArgsGPOffset;
+ /// VarArgsFPOffset - X86-64 vararg func fp reg offset.
+ unsigned VarArgsFPOffset;
+
public:
X86MachineFunctionInfo() : ForceFramePointer(false),
CalleeSavedFrameSize(0),
@@ -63,7 +73,11 @@ public:
ReturnAddrIndex(0),
TailCallReturnAddrDelta(0),
SRetReturnReg(0),
- GlobalBaseReg(0) {}
+ GlobalBaseReg(0),
+ VarArgsFrameIndex(0),
+ RegSaveFrameIndex(0),
+ VarArgsGPOffset(0),
+ VarArgsFPOffset(0) {}
explicit X86MachineFunctionInfo(MachineFunction &MF)
: ForceFramePointer(false),
@@ -73,7 +87,11 @@ public:
TailCallReturnAddrDelta(0),
SRetReturnReg(0),
GlobalBaseReg(0),
- ReserveFP(false) {}
+ ReserveFP(false),
+ VarArgsFrameIndex(0),
+ RegSaveFrameIndex(0),
+ VarArgsGPOffset(0),
+ VarArgsFPOffset(0) {}
bool getForceFramePointer() const { return ForceFramePointer;}
void setForceFramePointer(bool forceFP) { ForceFramePointer = forceFP; }
@@ -98,6 +116,18 @@ public:
bool getReserveFP() const { return ReserveFP; }
void setReserveFP(bool reserveFP) { ReserveFP = reserveFP; }
+
+ int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
+ void setVarArgsFrameIndex(int Idx) { VarArgsFrameIndex = Idx; }
+
+ int getRegSaveFrameIndex() const { return RegSaveFrameIndex; }
+ void setRegSaveFrameIndex(int Idx) { RegSaveFrameIndex = Idx; }
+
+ unsigned getVarArgsGPOffset() const { return VarArgsGPOffset; }
+ void setVarArgsGPOffset(unsigned Offset) { VarArgsGPOffset = Offset; }
+
+ unsigned getVarArgsFPOffset() const { return VarArgsFPOffset; }
+ void setVarArgsFPOffset(unsigned Offset) { VarArgsFPOffset = Offset; }
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.cpp b/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 4fe4d0a..fedd49e 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -37,10 +37,16 @@
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
+static cl::opt<bool>
+ForceStackAlign("force-align-stack",
+ cl::desc("Force align the stack to the minimum alignment"
+ " needed for the function."),
+ cl::init(false), cl::Hidden);
+
X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
const TargetInstrInfo &tii)
: X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ?
@@ -128,23 +134,78 @@ unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
return RegNo-X86::ST0;
- case X86::XMM0: case X86::XMM8: case X86::MM0:
+ case X86::XMM0: case X86::XMM8:
+ case X86::YMM0: case X86::YMM8: case X86::MM0:
return 0;
- case X86::XMM1: case X86::XMM9: case X86::MM1:
+ case X86::XMM1: case X86::XMM9:
+ case X86::YMM1: case X86::YMM9: case X86::MM1:
return 1;
- case X86::XMM2: case X86::XMM10: case X86::MM2:
+ case X86::XMM2: case X86::XMM10:
+ case X86::YMM2: case X86::YMM10: case X86::MM2:
return 2;
- case X86::XMM3: case X86::XMM11: case X86::MM3:
+ case X86::XMM3: case X86::XMM11:
+ case X86::YMM3: case X86::YMM11: case X86::MM3:
return 3;
- case X86::XMM4: case X86::XMM12: case X86::MM4:
+ case X86::XMM4: case X86::XMM12:
+ case X86::YMM4: case X86::YMM12: case X86::MM4:
return 4;
- case X86::XMM5: case X86::XMM13: case X86::MM5:
+ case X86::XMM5: case X86::XMM13:
+ case X86::YMM5: case X86::YMM13: case X86::MM5:
return 5;
- case X86::XMM6: case X86::XMM14: case X86::MM6:
+ case X86::XMM6: case X86::XMM14:
+ case X86::YMM6: case X86::YMM14: case X86::MM6:
return 6;
- case X86::XMM7: case X86::XMM15: case X86::MM7:
+ case X86::XMM7: case X86::XMM15:
+ case X86::YMM7: case X86::YMM15: case X86::MM7:
return 7;
+ case X86::ES:
+ return 0;
+ case X86::CS:
+ return 1;
+ case X86::SS:
+ return 2;
+ case X86::DS:
+ return 3;
+ case X86::FS:
+ return 4;
+ case X86::GS:
+ return 5;
+
+ case X86::CR0:
+ return 0;
+ case X86::CR1:
+ return 1;
+ case X86::CR2:
+ return 2;
+ case X86::CR3:
+ return 3;
+ case X86::CR4:
+ return 4;
+
+ case X86::DR0:
+ return 0;
+ case X86::DR1:
+ return 1;
+ case X86::DR2:
+ return 2;
+ case X86::DR3:
+ return 3;
+ case X86::DR4:
+ return 4;
+ case X86::DR5:
+ return 5;
+ case X86::DR6:
+ return 6;
+ case X86::DR7:
+ return 7;
+
+ // Pseudo index registers are equivalent to a "none"
+ // scaled index (See Intel Manual 2A, table 2-3)
+ case X86::EIZ:
+ case X86::RIZ:
+ return 4;
+
default:
assert(isVirtualRegister(RegNo) && "Unknown physical register!");
llvm_unreachable("Register allocator hasn't allocated reg correctly yet!");
@@ -158,8 +219,7 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
unsigned SubIdx) const {
switch (SubIdx) {
default: return 0;
- case 1:
- // 8-bit
+ case X86::sub_8bit:
if (B == &X86::GR8RegClass) {
if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8)
return A;
@@ -191,12 +251,9 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
return &X86::GR16_NOREXRegClass;
else if (A == &X86::GR16_ABCDRegClass)
return &X86::GR16_ABCDRegClass;
- } else if (B == &X86::FR32RegClass) {
- return A;
}
break;
- case 2:
- // 8-bit hi
+ case X86::sub_8bit_hi:
if (B == &X86::GR8_ABCD_HRegClass) {
if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
A == &X86::GR64_NOREXRegClass ||
@@ -209,12 +266,9 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
A == &X86::GR16_NOREXRegClass)
return &X86::GR16_ABCDRegClass;
- } else if (B == &X86::FR64RegClass) {
- return A;
}
break;
- case 3:
- // 16-bit
+ case X86::sub_16bit:
if (B == &X86::GR16RegClass) {
if (A->getSize() == 4 || A->getSize() == 8)
return A;
@@ -238,12 +292,9 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
return &X86::GR32_NOREXRegClass;
else if (A == &X86::GR32_ABCDRegClass)
return &X86::GR64_ABCDRegClass;
- } else if (B == &X86::VR128RegClass) {
- return A;
}
break;
- case 4:
- // 32-bit
+ case X86::sub_32bit:
if (B == &X86::GR32RegClass || B == &X86::GR32_NOSPRegClass) {
if (A->getSize() == 8)
return A;
@@ -261,6 +312,18 @@ X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
return &X86::GR64_ABCDRegClass;
}
break;
+ case X86::sub_ss:
+ if (B == &X86::FR32RegClass)
+ return A;
+ break;
+ case X86::sub_sd:
+ if (B == &X86::FR64RegClass)
+ return A;
+ break;
+ case X86::sub_xmm:
+ if (B == &X86::VR128RegClass)
+ return A;
+ break;
}
return 0;
}
@@ -297,9 +360,7 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
bool ghcCall = false;
if (MF) {
- const MachineFrameInfo *MFI = MF->getFrameInfo();
- const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
- callsEHReturn = (MMI ? MMI->callsEHReturn() : false);
+ callsEHReturn = MF->getMMI().callsEHReturn();
const Function *F = MF->getFunction();
ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
}
@@ -345,60 +406,6 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
}
}
-const TargetRegisterClass* const*
-X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- bool callsEHReturn = false;
-
- if (MF) {
- const MachineFrameInfo *MFI = MF->getFrameInfo();
- const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
- callsEHReturn = (MMI ? MMI->callsEHReturn() : false);
- }
-
- static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = {
- &X86::GR32RegClass, &X86::GR32RegClass,
- &X86::GR32RegClass, &X86::GR32RegClass, 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = {
- &X86::GR32RegClass, &X86::GR32RegClass,
- &X86::GR32RegClass, &X86::GR32RegClass,
- &X86::GR32RegClass, &X86::GR32RegClass, 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = {
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass, 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = {
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass, 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = {
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass, 0
- };
-
- if (Is64Bit) {
- if (IsWin64)
- return CalleeSavedRegClassesWin64;
- else
- return (callsEHReturn ?
- CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit);
- } else {
- return (callsEHReturn ?
- CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit);
- }
-}
-
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
// Set the stack-pointer register and its aliases as reserved.
@@ -443,14 +450,14 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
/// or if frame pointer elimination is disabled.
bool X86RegisterInfo::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
- const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
+ const MachineModuleInfo &MMI = MF.getMMI();
- return (NoFramePointerElim ||
+ return (DisableFramePointerElim(MF) ||
needsStackRealignment(MF) ||
MFI->hasVarSizedObjects() ||
MFI->isFrameAddressTaken() ||
MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
- (MMI && MMI->callsUnwindInit()));
+ MMI.callsUnwindInit());
}
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
@@ -462,26 +469,29 @@ bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *F = MF.getFunction();
- bool requiresRealignment =
- RealignStack && ((MFI->getMaxAlignment() > StackAlign) ||
- F->hasFnAttr(Attribute::StackAlignment));
+ bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
+ F->hasFnAttr(Attribute::StackAlignment));
// FIXME: Currently we don't support stack realignment for functions with
// variable-sized allocas.
- // FIXME: Temporary disable the error - it seems to be too conservative.
+ // FIXME: It's more complicated than this...
if (0 && requiresRealignment && MFI->hasVarSizedObjects())
- llvm_report_error(
+ report_fatal_error(
"Stack realignment in presense of dynamic allocas is not supported");
-
- return (requiresRealignment && !MFI->hasVarSizedObjects());
+
+ // If we've requested that we force align the stack do so now.
+ if (ForceStackAlign)
+ return canRealignStack(MF);
+
+ return requiresRealignment && canRealignStack(MF);
}
-bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
+bool X86RegisterInfo::hasReservedCallFrame(const MachineFunction &MF) const {
return !MF.getFrameInfo()->hasVarSizedObjects();
}
-bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg,
- int &FrameIdx) const {
+bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
+ unsigned Reg, int &FrameIdx) const {
if (Reg == FramePtr && hasFP(MF)) {
FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
return true;
@@ -524,6 +534,30 @@ X86RegisterInfo::getFrameIndexOffset(const MachineFunction &MF, int FI) const {
return Offset;
}
+static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
+ if (is64Bit) {
+ if (isInt<8>(Imm))
+ return X86::SUB64ri8;
+ return X86::SUB64ri32;
+ } else {
+ if (isInt<8>(Imm))
+ return X86::SUB32ri8;
+ return X86::SUB32ri;
+ }
+}
+
+static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
+ if (is64Bit) {
+ if (isInt<8>(Imm))
+ return X86::ADD64ri8;
+ return X86::ADD64ri32;
+ } else {
+ if (isInt<8>(Imm))
+ return X86::ADD32ri8;
+ return X86::ADD32ri;
+ }
+}
+
void X86RegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
@@ -543,7 +577,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineInstr *New = 0;
if (Old->getOpcode() == getCallFrameSetupOpcode()) {
New = BuildMI(MF, Old->getDebugLoc(),
- TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri),
+ TII.get(getSUBriOpcode(Is64Bit, Amount)),
StackPtr)
.addReg(StackPtr)
.addImm(Amount);
@@ -555,9 +589,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
Amount -= CalleeAmt;
if (Amount) {
- unsigned Opc = (Amount < 128) ?
- (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
- (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri);
+ unsigned Opc = getADDriOpcode(Is64Bit, Amount);
New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr)
.addReg(StackPtr)
.addImm(Amount);
@@ -577,9 +609,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
// something off the stack pointer, add it back. We do this until we have
// more advanced stack pointer tracking ability.
if (uint64_t CalleeAmt = I->getOperand(1).getImm()) {
- unsigned Opc = (CalleeAmt < 128) ?
- (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
- (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri);
+ unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
MachineInstr *Old = I;
MachineInstr *New =
BuildMI(MF, Old->getDebugLoc(), TII.get(Opc),
@@ -596,10 +626,9 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MBB.erase(I);
}
-unsigned
+void
X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
- int SPAdj, int *Value,
- RegScavenger *RS) const{
+ int SPAdj, RegScavenger *RS) const{
assert(SPAdj == 0 && "Unexpected");
unsigned i = 0;
@@ -614,8 +643,12 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int FrameIndex = MI.getOperand(i).getIndex();
unsigned BasePtr;
+ unsigned Opc = MI.getOpcode();
+ bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
if (needsStackRealignment(MF))
BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
+ else if (AfterFPPop)
+ BasePtr = StackPtr;
else
BasePtr = (hasFP(MF) ? FramePtr : StackPtr);
@@ -624,19 +657,24 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MI.getOperand(i).ChangeToRegister(BasePtr, false);
// Now add the frame object offset to the offset from EBP.
+ int FIOffset;
+ if (AfterFPPop) {
+ // Tail call jmp happens after FP is popped.
+ const TargetFrameInfo &TFI = *MF.getTarget().getFrameInfo();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ FIOffset = MFI->getObjectOffset(FrameIndex) - TFI.getOffsetOfLocalArea();
+ } else
+ FIOffset = getFrameIndexOffset(MF, FrameIndex);
+
if (MI.getOperand(i+3).isImm()) {
// Offset is a 32-bit integer.
- int Offset = getFrameIndexOffset(MF, FrameIndex) +
- (int)(MI.getOperand(i + 3).getImm());
-
+ int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm());
MI.getOperand(i + 3).ChangeToImmediate(Offset);
} else {
// Offset is symbolic. This is extremely rare.
- uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) +
- (uint64_t)MI.getOperand(i+3).getOffset();
+ uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset();
MI.getOperand(i+3).setOffset(Offset);
}
- return 0;
}
void
@@ -658,8 +696,7 @@ X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// }
// [EBP]
MFI->CreateFixedObject(-TailCallReturnAddrDelta,
- (-1U*SlotSize)+TailCallReturnAddrDelta,
- true, false);
+ (-1U*SlotSize)+TailCallReturnAddrDelta, true);
}
if (hasFP(MF)) {
@@ -672,7 +709,7 @@ X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
-(int)SlotSize +
TFI.getOffsetOfLocalArea() +
TailCallReturnAddrDelta,
- true, false);
+ true);
assert(FrameIdx == MFI->getObjectIndexBegin() &&
"Slot for EBP register must be last in order to be found!");
FrameIdx = 0;
@@ -687,13 +724,9 @@ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
const TargetInstrInfo &TII) {
bool isSub = NumBytes < 0;
uint64_t Offset = isSub ? -NumBytes : NumBytes;
- unsigned Opc = isSub
- ? ((Offset < 128) ?
- (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
- (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri))
- : ((Offset < 128) ?
- (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
- (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri));
+ unsigned Opc = isSub ?
+ getSUBriOpcode(Is64Bit, Offset) :
+ getADDriOpcode(Is64Bit, Offset);
uint64_t Chunk = (1LL << 31) - 1;
DebugLoc DL = MBB.findDebugLoc(MBBI);
@@ -731,7 +764,7 @@ void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
}
}
-/// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator.
+/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
static
void mergeSPUpdatesDown(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
@@ -797,17 +830,16 @@ static int mergeSPUpdates(MachineBasicBlock &MBB,
}
void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF,
- unsigned LabelId,
+ MCSymbol *Label,
unsigned FramePtr) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
- MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
- if (!MMI) return;
+ MachineModuleInfo &MMI = MF.getMMI();
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
if (CSI.empty()) return;
- std::vector<MachineMove> &Moves = MMI->getFrameMoves();
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
const TargetData *TD = MF.getTarget().getTargetData();
bool HasFP = hasFP(MF);
@@ -860,7 +892,7 @@ void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF,
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(LabelId, CSDst, CSSrc));
+ Moves.push_back(MachineMove(Label, CSDst, CSSrc));
}
}
@@ -874,15 +906,26 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *Fn = MF.getFunction();
const X86Subtarget *Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>();
- MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) ||
+ bool needsFrameMoves = MMI.hasDebugInfo() ||
!Fn->doesNotThrow() || UnwindTablesMandatory;
uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
bool HasFP = hasFP(MF);
DebugLoc DL;
+ // If we're forcing a stack realignment we can't rely on just the frame
+ // info, we need to know the ABI stack alignment as well in case we
+ // have a call out. Otherwise just make sure we have some alignment - we'll
+ // go with the minimum SlotSize.
+ if (ForceStackAlign) {
+ if (MFI->hasCalls())
+ MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+ else if (MaxAlign < SlotSize)
+ MaxAlign = SlotSize;
+ }
+
// Add RETADDR move area to callee saved frame size.
int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
if (TailCallReturnAddrDelta < 0)
@@ -896,7 +939,7 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) &&
!needsStackRealignment(MF) &&
!MFI->hasVarSizedObjects() && // No dynamic alloca.
- !MFI->hasCalls() && // No calls.
+ !MFI->adjustsStack() && // No calls.
!Subtarget->isTargetWin64()) { // Win64 has no Red Zone
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
@@ -914,7 +957,8 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
// size is bigger than the callers.
if (TailCallReturnAddrDelta < 0) {
MachineInstr *MI =
- BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri),
+ BuildMI(MBB, MBBI, DL,
+ TII.get(getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta)),
StackPtr)
.addReg(StackPtr)
.addImm(-TailCallReturnAddrDelta);
@@ -935,13 +979,10 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
// REG < 64 => DW_CFA_offset + Reg
// ELSE => DW_CFA_offset_extended
- std::vector<MachineMove> &Moves = MMI->getFrameMoves();
+ std::vector<MachineMove> &Moves = MMI.getFrameMoves();
const TargetData *TD = MF.getTarget().getTargetData();
uint64_t NumBytes = 0;
- int stackGrowth =
- (MF.getTarget().getFrameInfo()->getStackGrowthDirection() ==
- TargetFrameInfo::StackGrowsUp ?
- TD->getPointerSize() : -TD->getPointerSize());
+ int stackGrowth = -TD->getPointerSize();
if (HasFP) {
// Calculate required stack adjustment.
@@ -962,26 +1003,25 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
if (needsFrameMoves) {
// Mark the place where EBP/RBP was saved.
- unsigned FrameLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId);
+ MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel);
// Define the current CFA rule to use the provided offset.
if (StackSize) {
MachineLocation SPDst(MachineLocation::VirtualFP);
MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth);
- Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
} else {
// FIXME: Verify & implement for FP
MachineLocation SPDst(StackPtr);
MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
}
// Change the rule for the FramePtr to be an "offset" rule.
- MachineLocation FPDst(MachineLocation::VirtualFP,
- 2 * stackGrowth);
+ MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
MachineLocation FPSrc(FramePtr);
- Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc));
+ Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
}
// Update EBP with the new base value...
@@ -991,13 +1031,13 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
if (needsFrameMoves) {
// Mark effective beginning of when frame pointer becomes valid.
- unsigned FrameLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId);
+ MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel);
// Define the current CFA to use the EBP/RBP register.
MachineLocation FPDst(FramePtr);
MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc));
+ Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
}
// Mark the FramePtr as live-in in every block except the entry.
@@ -1031,15 +1071,15 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
if (!HasFP && needsFrameMoves) {
// Mark callee-saved push instruction.
- unsigned LabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId);
+ MCSymbol *Label = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
// Define the current CFA rule to use the provided offset.
unsigned Ptr = StackSize ?
MachineLocation::VirtualFP : StackPtr;
MachineLocation SPDst(Ptr);
MachineLocation SPSrc(Ptr, StackOffset);
- Moves.push_back(MachineMove(LabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(Label, SPDst, SPSrc));
StackOffset += stackGrowth;
}
}
@@ -1047,7 +1087,17 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
DL = MBB.findDebugLoc(MBBI);
// Adjust stack pointer: ESP -= numbytes.
- if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) {
+
+ // Windows and cygwin/mingw require a prologue helper routine when allocating
+ // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
+ // uses __alloca. __alloca and the 32-bit version of __chkstk will probe
+ // the stack and adjust the stack pointer in one go. The 64-bit version
+ // of __chkstk is only responsible for probing the stack. The 64-bit
+ // prologue is responsible for adjusting the stack pointer. Touching the
+ // stack at 4K increments is necessary to ensure that the guard pages used
+ // by the OS virtual memory manager are allocated in correct sequence.
+ if (NumBytes >= 4096 &&
+ (Subtarget->isTargetCygMing() || Subtarget->isTargetWin32())) {
// Check, whether EAX is livein for this function.
bool isEAXAlive = false;
for (MachineRegisterInfo::livein_iterator
@@ -1058,16 +1108,16 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
Reg == X86::AH || Reg == X86::AL);
}
- // Function prologue calls _alloca to probe the stack when allocating more
- // than 4k bytes in one go. Touching the stack at 4K increments is necessary
- // to ensure that the guard pages used by the OS virtual memory manager are
- // allocated in correct sequence.
+
+ const char *StackProbeSymbol =
+ Subtarget->isTargetWindows() ? "_chkstk" : "_alloca";
if (!isEAXAlive) {
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
.addImm(NumBytes);
BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
- .addExternalSymbol("_alloca")
- .addReg(StackPtr, RegState::Define | RegState::Implicit);
+ .addExternalSymbol(StackProbeSymbol)
+ .addReg(StackPtr, RegState::Define | RegState::Implicit)
+ .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
} else {
// Save EAX
BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
@@ -1078,8 +1128,9 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
.addImm(NumBytes - 4);
BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
- .addExternalSymbol("_alloca")
- .addReg(StackPtr, RegState::Define | RegState::Implicit);
+ .addExternalSymbol(StackProbeSymbol)
+ .addReg(StackPtr, RegState::Define | RegState::Implicit)
+ .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
// Restore EAX
MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
@@ -1103,8 +1154,8 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
if ((NumBytes || PushedRegs) && needsFrameMoves) {
// Mark end of stack pointer adjustment.
- unsigned LabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId);
+ MCSymbol *Label = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
if (!HasFP && NumBytes) {
// Define the current CFA rule to use the provided offset.
@@ -1112,18 +1163,18 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
MachineLocation SPDst(MachineLocation::VirtualFP);
MachineLocation SPSrc(MachineLocation::VirtualFP,
-StackSize + stackGrowth);
- Moves.push_back(MachineMove(LabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(Label, SPDst, SPSrc));
} else {
// FIXME: Verify & implement for FP
MachineLocation SPDst(StackPtr);
MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(LabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(Label, SPDst, SPSrc));
}
}
// Emit DWARF info specifying the offsets of the callee-saved registers.
if (PushedRegs)
- emitCalleeSavedFrameMoves(MF, LabelId, HasFP ? FramePtr : StackPtr);
+ emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
}
}
@@ -1142,13 +1193,12 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
case X86::RETI:
case X86::TCRETURNdi:
case X86::TCRETURNri:
- case X86::TCRETURNri64:
+ case X86::TCRETURNmi:
case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64:
case X86::EH_RETURN:
case X86::EH_RETURN64:
- case X86::TAILJMPd:
- case X86::TAILJMPr:
- case X86::TAILJMPm:
break; // These are ok
}
@@ -1158,6 +1208,17 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
unsigned CSSize = X86FI->getCalleeSavedFrameSize();
uint64_t NumBytes = 0;
+ // If we're forcing a stack realignment we can't rely on just the frame
+ // info, we need to know the ABI stack alignment as well in case we
+ // have a call out. Otherwise just make sure we have some alignment - we'll
+ // go with the minimum.
+ if (ForceStackAlign) {
+ if (MFI->hasCalls())
+ MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+ else
+ MaxAlign = MaxAlign ? MaxAlign : 4;
+ }
+
if (hasFP(MF)) {
// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
@@ -1211,8 +1272,8 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
if (CSSize) {
unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
MachineInstr *MI =
- addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
- FramePtr, false, -CSSize);
+ addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
+ FramePtr, false, -CSSize);
MBB.insert(MBBI, MI);
} else {
BuildMI(MBB, MBBI, DL,
@@ -1233,11 +1294,14 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
StackPtr).addReg(DestAddr.getReg());
} else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
- RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) {
+ RetOpcode == X86::TCRETURNmi ||
+ RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
+ RetOpcode == X86::TCRETURNmi64) {
+ bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
// Tail call return: adjust the stack pointer and jump to callee.
MBBI = prior(MBB.end());
MachineOperand &JumpTarget = MBBI->getOperand(0);
- MachineOperand &StackAdjust = MBBI->getOperand(1);
+ MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
assert(StackAdjust.isImm() && "Expecting immediate value.");
// Adjust stack pointer.
@@ -1257,14 +1321,23 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
}
// Jump to label or value in register.
- if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)).
+ if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
+ BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
+ ? X86::TAILJMPd : X86::TAILJMPd64)).
addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
JumpTarget.getTargetFlags());
+ } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
+ ? X86::TAILJMPm : X86::TAILJMPm64));
+ for (unsigned i = 0; i != 5; ++i)
+ MIB.addOperand(MBBI->getOperand(i));
} else if (RetOpcode == X86::TCRETURNri64) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
} else {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
}
MachineInstr *NewMI = prior(MBBI);
@@ -1299,7 +1372,7 @@ X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves) const {
// Calculate amount of bytes used for return address storing
int stackGrowth = (Is64Bit ? -8 : -4);
- // Initial state of the frame pointer is esp+4.
+ // Initial state of the frame pointer is esp+stackGrowth.
MachineLocation Dst(MachineLocation::VirtualFP);
MachineLocation Src(StackPtr, stackGrowth);
Moves.push_back(MachineMove(0, Dst, Src));
@@ -1493,7 +1566,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
namespace {
struct MSAH : public MachineFunctionPass {
static char ID;
- MSAH() : MachineFunctionPass(&ID) {}
+ MSAH() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF) {
const X86TargetMachine *TM =
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.h b/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.h
index e4bdb4e..527df05 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -30,16 +30,6 @@ namespace N86 {
};
}
-namespace X86 {
- /// SubregIndex - The index of various sized subregister classes. Note that
- /// these indices must be kept in sync with the class indices in the
- /// X86RegisterInfo.td file.
- enum SubregIndex {
- SUBREG_8BIT = 1, SUBREG_8BIT_HI = 2, SUBREG_16BIT = 3, SUBREG_32BIT = 4,
- SUBREG_SS = 1, SUBREG_SD = 2, SUBREG_XMM = 3
- };
-}
-
/// DWARFFlavour - Flavour of dwarf regnumbers
///
namespace DWARFFlavour {
@@ -115,12 +105,6 @@ public:
/// callee-save registers on this target.
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
- /// getCalleeSavedRegClasses - Return a null-terminated list of the preferred
- /// register classes to spill each callee-saved register with. The order and
- /// length of this list match the getCalleeSavedRegs() list.
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction *MF = 0) const;
-
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses and
/// should be considered unavailable at all times, e.g. SP, RA. This is used by
@@ -133,23 +117,22 @@ public:
bool needsStackRealignment(const MachineFunction &MF) const;
- bool hasReservedCallFrame(MachineFunction &MF) const;
+ bool hasReservedCallFrame(const MachineFunction &MF) const;
- bool hasReservedSpillSlot(MachineFunction &MF, unsigned Reg,
+ bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
int &FrameIdx) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
- unsigned eliminateFrameIndex(MachineBasicBlock::iterator MI,
- int SPAdj, int *Value = NULL,
- RegScavenger *RS = NULL) const;
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj, RegScavenger *RS = NULL) const;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS = NULL) const;
- void emitCalleeSavedFrameMoves(MachineFunction &MF, unsigned LabelId,
+ void emitCalleeSavedFrameMoves(MachineFunction &MF, MCSymbol *Label,
unsigned FramePtr) const;
void emitPrologue(MachineFunction &MF) const;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.td b/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.td
index 0a6f657..95269b1 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/libclamav/c++/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -18,6 +18,17 @@
//
let Namespace = "X86" in {
+ // Subregister indices.
+ def sub_8bit : SubRegIndex;
+ def sub_8bit_hi : SubRegIndex;
+ def sub_16bit : SubRegIndex;
+ def sub_32bit : SubRegIndex;
+
+ def sub_ss : SubRegIndex;
+ def sub_sd : SubRegIndex;
+ def sub_xmm : SubRegIndex;
+
+
// In the register alias definitions below, we define which registers alias
// which others. We only specify which registers the small registers alias,
// because the register file generator is smart enough to figure out that
@@ -57,17 +68,22 @@ let Namespace = "X86" in {
def BH : Register<"bh">, DwarfRegNum<[3, 3, 3]>;
// 16-bit registers
+ let SubRegIndices = [sub_8bit, sub_8bit_hi] in {
def AX : RegisterWithSubRegs<"ax", [AL,AH]>, DwarfRegNum<[0, 0, 0]>;
def DX : RegisterWithSubRegs<"dx", [DL,DH]>, DwarfRegNum<[1, 2, 2]>;
def CX : RegisterWithSubRegs<"cx", [CL,CH]>, DwarfRegNum<[2, 1, 1]>;
def BX : RegisterWithSubRegs<"bx", [BL,BH]>, DwarfRegNum<[3, 3, 3]>;
+ }
+ let SubRegIndices = [sub_8bit] in {
def SI : RegisterWithSubRegs<"si", [SIL]>, DwarfRegNum<[4, 6, 6]>;
def DI : RegisterWithSubRegs<"di", [DIL]>, DwarfRegNum<[5, 7, 7]>;
def BP : RegisterWithSubRegs<"bp", [BPL]>, DwarfRegNum<[6, 4, 5]>;
def SP : RegisterWithSubRegs<"sp", [SPL]>, DwarfRegNum<[7, 5, 4]>;
+ }
def IP : Register<"ip">, DwarfRegNum<[16]>;
// X86-64 only
+ let SubRegIndices = [sub_8bit] in {
def R8W : RegisterWithSubRegs<"r8w", [R8B]>, DwarfRegNum<[8, -2, -2]>;
def R9W : RegisterWithSubRegs<"r9w", [R9B]>, DwarfRegNum<[9, -2, -2]>;
def R10W : RegisterWithSubRegs<"r10w", [R10B]>, DwarfRegNum<[10, -2, -2]>;
@@ -76,8 +92,9 @@ let Namespace = "X86" in {
def R13W : RegisterWithSubRegs<"r13w", [R13B]>, DwarfRegNum<[13, -2, -2]>;
def R14W : RegisterWithSubRegs<"r14w", [R14B]>, DwarfRegNum<[14, -2, -2]>;
def R15W : RegisterWithSubRegs<"r15w", [R15B]>, DwarfRegNum<[15, -2, -2]>;
-
+ }
// 32-bit registers
+ let SubRegIndices = [sub_16bit] in {
def EAX : RegisterWithSubRegs<"eax", [AX]>, DwarfRegNum<[0, 0, 0]>;
def EDX : RegisterWithSubRegs<"edx", [DX]>, DwarfRegNum<[1, 2, 2]>;
def ECX : RegisterWithSubRegs<"ecx", [CX]>, DwarfRegNum<[2, 1, 1]>;
@@ -97,8 +114,10 @@ let Namespace = "X86" in {
def R13D : RegisterWithSubRegs<"r13d", [R13W]>, DwarfRegNum<[13, -2, -2]>;
def R14D : RegisterWithSubRegs<"r14d", [R14W]>, DwarfRegNum<[14, -2, -2]>;
def R15D : RegisterWithSubRegs<"r15d", [R15W]>, DwarfRegNum<[15, -2, -2]>;
+ }
// 64-bit registers, X86-64 only
+ let SubRegIndices = [sub_32bit] in {
def RAX : RegisterWithSubRegs<"rax", [EAX]>, DwarfRegNum<[0, -2, -2]>;
def RDX : RegisterWithSubRegs<"rdx", [EDX]>, DwarfRegNum<[1, -2, -2]>;
def RCX : RegisterWithSubRegs<"rcx", [ECX]>, DwarfRegNum<[2, -2, -2]>;
@@ -117,6 +136,7 @@ let Namespace = "X86" in {
def R14 : RegisterWithSubRegs<"r14", [R14D]>, DwarfRegNum<[14, -2, -2]>;
def R15 : RegisterWithSubRegs<"r15", [R15D]>, DwarfRegNum<[15, -2, -2]>;
def RIP : RegisterWithSubRegs<"rip", [EIP]>, DwarfRegNum<[16, -2, -2]>;
+ }
// MMX Registers. These are actually aliased to ST0 .. ST7
def MM0 : Register<"mm0">, DwarfRegNum<[41, 29, 29]>;
@@ -127,7 +147,7 @@ let Namespace = "X86" in {
def MM5 : Register<"mm5">, DwarfRegNum<[46, 34, 34]>;
def MM6 : Register<"mm6">, DwarfRegNum<[47, 35, 35]>;
def MM7 : Register<"mm7">, DwarfRegNum<[48, 36, 36]>;
-
+
// Pseudo Floating Point registers
def FP0 : Register<"fp0">;
def FP1 : Register<"fp1">;
@@ -135,9 +155,11 @@ let Namespace = "X86" in {
def FP3 : Register<"fp3">;
def FP4 : Register<"fp4">;
def FP5 : Register<"fp5">;
- def FP6 : Register<"fp6">;
+ def FP6 : Register<"fp6">;
- // XMM Registers, used by the various SSE instruction set extensions
+ // XMM Registers, used by the various SSE instruction set extensions.
+ // The sub_ss and sub_sd subregs are the same registers with another regclass.
+ let CompositeIndices = [(sub_ss), (sub_sd)] in {
def XMM0: Register<"xmm0">, DwarfRegNum<[17, 21, 21]>;
def XMM1: Register<"xmm1">, DwarfRegNum<[18, 22, 22]>;
def XMM2: Register<"xmm2">, DwarfRegNum<[19, 23, 23]>;
@@ -156,8 +178,10 @@ let Namespace = "X86" in {
def XMM13: Register<"xmm13">, DwarfRegNum<[30, -2, -2]>;
def XMM14: Register<"xmm14">, DwarfRegNum<[31, -2, -2]>;
def XMM15: Register<"xmm15">, DwarfRegNum<[32, -2, -2]>;
+ }
// YMM Registers, used by AVX instructions
+ let SubRegIndices = [sub_xmm] in {
def YMM0: RegisterWithSubRegs<"ymm0", [XMM0]>, DwarfRegNum<[17, 21, 21]>;
def YMM1: RegisterWithSubRegs<"ymm1", [XMM1]>, DwarfRegNum<[18, 22, 22]>;
def YMM2: RegisterWithSubRegs<"ymm2", [XMM2]>, DwarfRegNum<[19, 23, 23]>;
@@ -174,6 +198,7 @@ let Namespace = "X86" in {
def YMM13: RegisterWithSubRegs<"ymm13", [XMM13]>, DwarfRegNum<[30, -2, -2]>;
def YMM14: RegisterWithSubRegs<"ymm14", [XMM14]>, DwarfRegNum<[31, -2, -2]>;
def YMM15: RegisterWithSubRegs<"ymm15", [XMM15]>, DwarfRegNum<[32, -2, -2]>;
+ }
// Floating point stack registers
def ST0 : Register<"st(0)">, DwarfRegNum<[33, 12, 11]>;
@@ -207,106 +232,23 @@ let Namespace = "X86" in {
def DR7 : Register<"dr7">;
// Condition registers
- def ECR0 : Register<"ecr0">;
- def ECR1 : Register<"ecr1">;
- def ECR2 : Register<"ecr2">;
- def ECR3 : Register<"ecr3">;
- def ECR4 : Register<"ecr4">;
- def ECR5 : Register<"ecr5">;
- def ECR6 : Register<"ecr6">;
- def ECR7 : Register<"ecr7">;
-
- def RCR0 : Register<"rcr0">;
- def RCR1 : Register<"rcr1">;
- def RCR2 : Register<"rcr2">;
- def RCR3 : Register<"rcr3">;
- def RCR4 : Register<"rcr4">;
- def RCR5 : Register<"rcr5">;
- def RCR6 : Register<"rcr6">;
- def RCR7 : Register<"rcr7">;
- def RCR8 : Register<"rcr8">;
+ def CR0 : Register<"cr0">;
+ def CR1 : Register<"cr1">;
+ def CR2 : Register<"cr2">;
+ def CR3 : Register<"cr3">;
+ def CR4 : Register<"cr4">;
+ def CR5 : Register<"cr5">;
+ def CR6 : Register<"cr6">;
+ def CR7 : Register<"cr7">;
+ def CR8 : Register<"cr8">;
+
+ // Pseudo index registers
+ def EIZ : Register<"eiz">;
+ def RIZ : Register<"riz">;
}
//===----------------------------------------------------------------------===//
-// Subregister Set Definitions... now that we have all of the pieces, define the
-// sub registers for each register.
-//
-
-def x86_subreg_8bit : PatLeaf<(i32 1)>;
-def x86_subreg_8bit_hi : PatLeaf<(i32 2)>;
-def x86_subreg_16bit : PatLeaf<(i32 3)>;
-def x86_subreg_32bit : PatLeaf<(i32 4)>;
-
-def x86_subreg_ss : PatLeaf<(i32 1)>;
-def x86_subreg_sd : PatLeaf<(i32 2)>;
-def x86_subreg_xmm : PatLeaf<(i32 3)>;
-
-def : SubRegSet<1, [AX, CX, DX, BX, SP, BP, SI, DI,
- R8W, R9W, R10W, R11W, R12W, R13W, R14W, R15W],
- [AL, CL, DL, BL, SPL, BPL, SIL, DIL,
- R8B, R9B, R10B, R11B, R12B, R13B, R14B, R15B]>;
-
-def : SubRegSet<2, [AX, CX, DX, BX],
- [AH, CH, DH, BH]>;
-
-def : SubRegSet<1, [EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,
- R8D, R9D, R10D, R11D, R12D, R13D, R14D, R15D],
- [AL, CL, DL, BL, SPL, BPL, SIL, DIL,
- R8B, R9B, R10B, R11B, R12B, R13B, R14B, R15B]>;
-
-def : SubRegSet<2, [EAX, ECX, EDX, EBX],
- [AH, CH, DH, BH]>;
-
-def : SubRegSet<3, [EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,
- R8D, R9D, R10D, R11D, R12D, R13D, R14D, R15D],
- [AX, CX, DX, BX, SP, BP, SI, DI,
- R8W, R9W, R10W, R11W, R12W, R13W, R14W, R15W]>;
-
-def : SubRegSet<1, [RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI,
- R8, R9, R10, R11, R12, R13, R14, R15],
- [AL, CL, DL, BL, SPL, BPL, SIL, DIL,
- R8B, R9B, R10B, R11B, R12B, R13B, R14B, R15B]>;
-
-def : SubRegSet<2, [RAX, RCX, RDX, RBX],
- [AH, CH, DH, BH]>;
-
-def : SubRegSet<3, [RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI,
- R8, R9, R10, R11, R12, R13, R14, R15],
- [AX, CX, DX, BX, SP, BP, SI, DI,
- R8W, R9W, R10W, R11W, R12W, R13W, R14W, R15W]>;
-
-def : SubRegSet<4, [RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI,
- R8, R9, R10, R11, R12, R13, R14, R15],
- [EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,
- R8D, R9D, R10D, R11D, R12D, R13D, R14D, R15D]>;
-
-def : SubRegSet<1, [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
- YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15],
- [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]>;
-
-def : SubRegSet<2, [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
- YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15],
- [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]>;
-
-def : SubRegSet<3, [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
- YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15],
- [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]>;
-
-def : SubRegSet<1, [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15],
- [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]>;
-
-def : SubRegSet<2, [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15],
- [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
- XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]>;
-
-//===----------------------------------------------------------------------===//
// Register Class Definitions... now that we have all of the pieces, define the
// top-level register classes. The order specified in the register list is
// implicitly defined to be the register allocation order.
@@ -370,7 +312,7 @@ def GR8 : RegisterClass<"X86", [i8], 8,
def GR16 : RegisterClass<"X86", [i16], 16,
[AX, CX, DX, SI, DI, BX, BP, SP,
R8W, R9W, R10W, R11W, R14W, R15W, R12W, R13W]> {
- let SubRegClassList = [GR8, GR8];
+ let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi)];
let MethodProtos = [{
iterator allocation_order_begin(const MachineFunction &MF) const;
iterator allocation_order_end(const MachineFunction &MF) const;
@@ -419,10 +361,10 @@ def GR16 : RegisterClass<"X86", [i16], 16,
}];
}
-def GR32 : RegisterClass<"X86", [i32], 32,
+def GR32 : RegisterClass<"X86", [i32], 32,
[EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP,
R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D]> {
- let SubRegClassList = [GR8, GR8, GR16];
+ let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)];
let MethodProtos = [{
iterator allocation_order_begin(const MachineFunction &MF) const;
iterator allocation_order_end(const MachineFunction &MF) const;
@@ -474,10 +416,12 @@ def GR32 : RegisterClass<"X86", [i32], 32,
// GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since
// RIP isn't really a register and it can't be used anywhere except in an
// address, but it doesn't cause trouble.
-def GR64 : RegisterClass<"X86", [i64], 64,
+def GR64 : RegisterClass<"X86", [i64], 64,
[RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
RBX, R14, R15, R12, R13, RBP, RSP, RIP]> {
- let SubRegClassList = [GR8, GR8, GR16, GR32];
+ let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi),
+ (GR16 sub_16bit),
+ (GR32 sub_32bit)];
let MethodProtos = [{
iterator allocation_order_end(const MachineFunction &MF) const;
}];
@@ -506,19 +450,13 @@ def SEGMENT_REG : RegisterClass<"X86", [i16], 16, [CS, DS, SS, ES, FS, GS]> {
}
// Debug registers.
-def DEBUG_REG : RegisterClass<"X86", [i32], 32,
+def DEBUG_REG : RegisterClass<"X86", [i32], 32,
[DR0, DR1, DR2, DR3, DR4, DR5, DR6, DR7]> {
}
// Control registers.
-def CONTROL_REG_32 : RegisterClass<"X86", [i32], 32,
- [ECR0, ECR1, ECR2, ECR3, ECR4, ECR5, ECR6,
- ECR7]> {
-}
-
-def CONTROL_REG_64 : RegisterClass<"X86", [i64], 64,
- [RCR0, RCR1, RCR2, RCR3, RCR4, RCR5, RCR6,
- RCR7, RCR8]> {
+def CONTROL_REG : RegisterClass<"X86", [i64], 64,
+ [CR0, CR1, CR2, CR3, CR4, CR5, CR6, CR7, CR8]> {
}
// GR8_ABCD_L, GR8_ABCD_H, GR16_ABCD, GR32_ABCD, GR64_ABCD - Subclasses of
@@ -532,13 +470,27 @@ def GR8_ABCD_L : RegisterClass<"X86", [i8], 8, [AL, CL, DL, BL]> {
def GR8_ABCD_H : RegisterClass<"X86", [i8], 8, [AH, CH, DH, BH]> {
}
def GR16_ABCD : RegisterClass<"X86", [i16], 16, [AX, CX, DX, BX]> {
- let SubRegClassList = [GR8_ABCD_L, GR8_ABCD_H];
+ let SubRegClasses = [(GR8_ABCD_L sub_8bit), (GR8_ABCD_H sub_8bit_hi)];
}
def GR32_ABCD : RegisterClass<"X86", [i32], 32, [EAX, ECX, EDX, EBX]> {
- let SubRegClassList = [GR8_ABCD_L, GR8_ABCD_H, GR16_ABCD];
+ let SubRegClasses = [(GR8_ABCD_L sub_8bit),
+ (GR8_ABCD_H sub_8bit_hi),
+ (GR16_ABCD sub_16bit)];
}
def GR64_ABCD : RegisterClass<"X86", [i64], 64, [RAX, RCX, RDX, RBX]> {
- let SubRegClassList = [GR8_ABCD_L, GR8_ABCD_H, GR16_ABCD, GR32_ABCD];
+ let SubRegClasses = [(GR8_ABCD_L sub_8bit),
+ (GR8_ABCD_H sub_8bit_hi),
+ (GR16_ABCD sub_16bit),
+ (GR32_ABCD sub_32bit)];
+}
+def GR32_TC : RegisterClass<"X86", [i32], 32, [EAX, ECX, EDX]> {
+ let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)];
+}
+def GR64_TC : RegisterClass<"X86", [i64], 64, [RAX, RCX, RDX, RSI, RDI,
+ R8, R9, R11]> {
+ let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi),
+ (GR16 sub_16bit),
+ (GR32_TC sub_32bit)];
}
// GR8_NOREX - GR8 registers which do not require a REX prefix.
@@ -578,7 +530,7 @@ def GR8_NOREX : RegisterClass<"X86", [i8], 8,
// GR16_NOREX - GR16 registers which do not require a REX prefix.
def GR16_NOREX : RegisterClass<"X86", [i16], 16,
[AX, CX, DX, SI, DI, BX, BP, SP]> {
- let SubRegClassList = [GR8_NOREX, GR8_NOREX];
+ let SubRegClasses = [(GR8_NOREX sub_8bit, sub_8bit_hi)];
let MethodProtos = [{
iterator allocation_order_end(const MachineFunction &MF) const;
}];
@@ -601,7 +553,8 @@ def GR16_NOREX : RegisterClass<"X86", [i16], 16,
// GR32_NOREX - GR32 registers which do not require a REX prefix.
def GR32_NOREX : RegisterClass<"X86", [i32], 32,
[EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP]> {
- let SubRegClassList = [GR8_NOREX, GR8_NOREX, GR16_NOREX];
+ let SubRegClasses = [(GR8_NOREX sub_8bit, sub_8bit_hi),
+ (GR16_NOREX sub_16bit)];
let MethodProtos = [{
iterator allocation_order_end(const MachineFunction &MF) const;
}];
@@ -624,7 +577,9 @@ def GR32_NOREX : RegisterClass<"X86", [i32], 32,
// GR64_NOREX - GR64 registers which do not require a REX prefix.
def GR64_NOREX : RegisterClass<"X86", [i64], 64,
[RAX, RCX, RDX, RSI, RDI, RBX, RBP, RSP, RIP]> {
- let SubRegClassList = [GR8_NOREX, GR8_NOREX, GR16_NOREX, GR32_NOREX];
+ let SubRegClasses = [(GR8_NOREX sub_8bit, sub_8bit_hi),
+ (GR16_NOREX sub_16bit),
+ (GR32_NOREX sub_32bit)];
let MethodProtos = [{
iterator allocation_order_end(const MachineFunction &MF) const;
}];
@@ -649,7 +604,7 @@ def GR64_NOREX : RegisterClass<"X86", [i64], 64,
def GR32_NOSP : RegisterClass<"X86", [i32], 32,
[EAX, ECX, EDX, ESI, EDI, EBX, EBP,
R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D]> {
- let SubRegClassList = [GR8, GR8, GR16];
+ let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)];
let MethodProtos = [{
iterator allocation_order_begin(const MachineFunction &MF) const;
iterator allocation_order_end(const MachineFunction &MF) const;
@@ -702,7 +657,9 @@ def GR32_NOSP : RegisterClass<"X86", [i32], 32,
def GR64_NOSP : RegisterClass<"X86", [i64], 64,
[RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
RBX, R14, R15, R12, R13, RBP]> {
- let SubRegClassList = [GR8, GR8, GR16, GR32_NOSP];
+ let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi),
+ (GR16 sub_16bit),
+ (GR32_NOSP sub_32bit)];
let MethodProtos = [{
iterator allocation_order_end(const MachineFunction &MF) const;
}];
@@ -727,7 +684,9 @@ def GR64_NOSP : RegisterClass<"X86", [i64], 64,
// GR64_NOREX_NOSP - GR64_NOREX registers except RSP.
def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64,
[RAX, RCX, RDX, RSI, RDI, RBX, RBP]> {
- let SubRegClassList = [GR8_NOREX, GR8_NOREX, GR16_NOREX, GR32_NOREX];
+ let SubRegClasses = [(GR8_NOREX sub_8bit, sub_8bit_hi),
+ (GR16_NOREX sub_16bit),
+ (GR32_NOREX sub_32bit)];
let MethodProtos = [{
iterator allocation_order_end(const MachineFunction &MF) const;
}];
@@ -751,7 +710,9 @@ def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64,
// A class to support the 'A' assembler constraint: EAX then EDX.
def GR32_AD : RegisterClass<"X86", [i32], 32, [EAX, EDX]> {
- let SubRegClassList = [GR8_ABCD_L, GR8_ABCD_H, GR16_ABCD];
+ let SubRegClasses = [(GR8_ABCD_L sub_8bit),
+ (GR8_ABCD_H sub_8bit_hi),
+ (GR16_ABCD sub_16bit)];
}
// Scalar SSE2 floating point registers.
@@ -823,13 +784,14 @@ def RST : RegisterClass<"X86", [f80, f64, f32], 32,
}
// Generic vector registers: VR64 and VR128.
-def VR64 : RegisterClass<"X86", [v8i8, v4i16, v2i32, v1i64, v2f32], 64,
+def VR64 : RegisterClass<"X86", [v8i8, v4i16, v2i32, v1i64], 64,
[MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7]>;
def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128,
[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11,
XMM12, XMM13, XMM14, XMM15]> {
- let SubRegClassList = [FR32, FR64];
+ let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd)];
+
let MethodProtos = [{
iterator allocation_order_end(const MachineFunction &MF) const;
}];
@@ -845,14 +807,41 @@ def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128,
}
}];
}
-def VR256 : RegisterClass<"X86", [ v8i32, v4i64, v8f32, v4f64],256,
+
+def VR256 : RegisterClass<"X86", [v32i8, v8i32, v4i64, v8f32, v4f64], 256,
[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
YMM8, YMM9, YMM10, YMM11,
YMM12, YMM13, YMM14, YMM15]> {
- let SubRegClassList = [FR32, FR64, VR128];
+ let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd), (VR128 sub_xmm)];
+
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ VR256Class::iterator
+ VR256Class::allocation_order_end(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ if (!Subtarget.is64Bit())
+ return end()-8; // Only YMM0 to YMM7 are available in 32-bit mode.
+ else
+ return end();
+ }
+ }];
}
// Status flags registers.
def CCR : RegisterClass<"X86", [i32], 32, [EFLAGS]> {
let CopyCost = -1; // Don't allow copying of status registers.
+
+ // EFLAGS is not allocatable.
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ CCRClass::iterator
+ CCRClass::allocation_order_end(const MachineFunction &MF) const {
+ return allocation_order_begin(MF);
+ }
+ }];
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/libclamav/c++/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
new file mode 100644
index 0000000..6297a27
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -0,0 +1,243 @@
+//===-- X86SelectionDAGInfo.cpp - X86 SelectionDAG Info -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the X86SelectionDAGInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "x86-selectiondag-info"
+#include "X86TargetMachine.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+using namespace llvm;
+
+X86SelectionDAGInfo::X86SelectionDAGInfo(const X86TargetMachine &TM) :
+ TargetSelectionDAGInfo(TM),
+ Subtarget(&TM.getSubtarget<X86Subtarget>()),
+ TLI(*TM.getTargetLowering()) {
+}
+
+X86SelectionDAGInfo::~X86SelectionDAGInfo() {
+}
+
+SDValue
+X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
+ SDValue Chain,
+ SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ bool isVolatile,
+ const Value *DstSV,
+ uint64_t DstSVOff) const {
+ ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+
+ // If not DWORD aligned or size is more than the threshold, call the library.
+ // The libc version is likely to be faster for these cases. It can use the
+ // address value and run time information about the CPU.
+ if ((Align & 3) != 0 ||
+ !ConstantSize ||
+ ConstantSize->getZExtValue() >
+ Subtarget->getMaxInlineSizeThreshold()) {
+ SDValue InFlag(0, 0);
+
+ // Check to see if there is a specialized entry-point for memory zeroing.
+ ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
+
+ if (const char *bzeroEntry = V &&
+ V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
+ EVT IntPtr = TLI.getPointerTy();
+ const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Node = Dst;
+ Entry.Ty = IntPtrTy;
+ Args.push_back(Entry);
+ Entry.Node = Size;
+ Args.push_back(Entry);
+ std::pair<SDValue,SDValue> CallResult =
+ TLI.LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()),
+ false, false, false, false,
+ 0, CallingConv::C, false, /*isReturnValueUsed=*/false,
+ DAG.getExternalSymbol(bzeroEntry, IntPtr), Args,
+ DAG, dl);
+ return CallResult.second;
+ }
+
+ // Otherwise have the target-independent code call memset.
+ return SDValue();
+ }
+
+ uint64_t SizeVal = ConstantSize->getZExtValue();
+ SDValue InFlag(0, 0);
+ EVT AVT;
+ SDValue Count;
+ ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src);
+ unsigned BytesLeft = 0;
+ bool TwoRepStos = false;
+ if (ValC) {
+ unsigned ValReg;
+ uint64_t Val = ValC->getZExtValue() & 255;
+
+ // If the value is a constant, then we can potentially use larger sets.
+ switch (Align & 3) {
+ case 2: // WORD aligned
+ AVT = MVT::i16;
+ ValReg = X86::AX;
+ Val = (Val << 8) | Val;
+ break;
+ case 0: // DWORD aligned
+ AVT = MVT::i32;
+ ValReg = X86::EAX;
+ Val = (Val << 8) | Val;
+ Val = (Val << 16) | Val;
+ if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned
+ AVT = MVT::i64;
+ ValReg = X86::RAX;
+ Val = (Val << 32) | Val;
+ }
+ break;
+ default: // Byte aligned
+ AVT = MVT::i8;
+ ValReg = X86::AL;
+ Count = DAG.getIntPtrConstant(SizeVal);
+ break;
+ }
+
+ if (AVT.bitsGT(MVT::i8)) {
+ unsigned UBytes = AVT.getSizeInBits() / 8;
+ Count = DAG.getIntPtrConstant(SizeVal / UBytes);
+ BytesLeft = SizeVal % UBytes;
+ }
+
+ Chain = DAG.getCopyToReg(Chain, dl, ValReg, DAG.getConstant(Val, AVT),
+ InFlag);
+ InFlag = Chain.getValue(1);
+ } else {
+ AVT = MVT::i8;
+ Count = DAG.getIntPtrConstant(SizeVal);
+ Chain = DAG.getCopyToReg(Chain, dl, X86::AL, Src, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
+ X86::ECX,
+ Count, InFlag);
+ InFlag = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
+ X86::EDI,
+ Dst, InFlag);
+ InFlag = Chain.getValue(1);
+
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
+ Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops));
+
+ if (TwoRepStos) {
+ InFlag = Chain.getValue(1);
+ Count = Size;
+ EVT CVT = Count.getValueType();
+ SDValue Left = DAG.getNode(ISD::AND, dl, CVT, Count,
+ DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
+ Chain = DAG.getCopyToReg(Chain, dl, (CVT == MVT::i64) ? X86::RCX :
+ X86::ECX,
+ Left, InFlag);
+ InFlag = Chain.getValue(1);
+ Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDValue Ops[] = { Chain, DAG.getValueType(MVT::i8), InFlag };
+ Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops));
+ } else if (BytesLeft) {
+ // Handle the last 1 - 7 bytes.
+ unsigned Offset = SizeVal - BytesLeft;
+ EVT AddrVT = Dst.getValueType();
+ EVT SizeVT = Size.getValueType();
+
+ Chain = DAG.getMemset(Chain, dl,
+ DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
+ DAG.getConstant(Offset, AddrVT)),
+ Src,
+ DAG.getConstant(BytesLeft, SizeVT),
+ Align, isVolatile, DstSV, DstSVOff + Offset);
+ }
+
+ // TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
+ return Chain;
+}
+
+SDValue
+X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
+ SDValue Chain, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ bool isVolatile, bool AlwaysInline,
+ const Value *DstSV,
+ uint64_t DstSVOff,
+ const Value *SrcSV,
+ uint64_t SrcSVOff) const {
+ // This requires the copy size to be a constant, preferrably
+ // within a subtarget-specific limit.
+ ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
+ if (!ConstantSize)
+ return SDValue();
+ uint64_t SizeVal = ConstantSize->getZExtValue();
+ if (!AlwaysInline && SizeVal > Subtarget->getMaxInlineSizeThreshold())
+ return SDValue();
+
+ /// If not DWORD aligned, call the library.
+ if ((Align & 3) != 0)
+ return SDValue();
+
+ // DWORD aligned
+ EVT AVT = MVT::i32;
+ if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) // QWORD aligned
+ AVT = MVT::i64;
+
+ unsigned UBytes = AVT.getSizeInBits() / 8;
+ unsigned CountVal = SizeVal / UBytes;
+ SDValue Count = DAG.getIntPtrConstant(CountVal);
+ unsigned BytesLeft = SizeVal % UBytes;
+
+ SDValue InFlag(0, 0);
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
+ X86::ECX,
+ Count, InFlag);
+ InFlag = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
+ X86::EDI,
+ Dst, InFlag);
+ InFlag = Chain.getValue(1);
+ Chain = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RSI :
+ X86::ESI,
+ Src, InFlag);
+ InFlag = Chain.getValue(1);
+
+ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag };
+ SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops,
+ array_lengthof(Ops));
+
+ SmallVector<SDValue, 4> Results;
+ Results.push_back(RepMovs);
+ if (BytesLeft) {
+ // Handle the last 1 - 7 bytes.
+ unsigned Offset = SizeVal - BytesLeft;
+ EVT DstVT = Dst.getValueType();
+ EVT SrcVT = Src.getValueType();
+ EVT SizeVT = Size.getValueType();
+ Results.push_back(DAG.getMemcpy(Chain, dl,
+ DAG.getNode(ISD::ADD, dl, DstVT, Dst,
+ DAG.getConstant(Offset, DstVT)),
+ DAG.getNode(ISD::ADD, dl, SrcVT, Src,
+ DAG.getConstant(Offset, SrcVT)),
+ DAG.getConstant(BytesLeft, SizeVT),
+ Align, isVolatile, AlwaysInline,
+ DstSV, DstSVOff + Offset,
+ SrcSV, SrcSVOff + Offset));
+ }
+
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ &Results[0], Results.size());
+}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86SelectionDAGInfo.h b/libclamav/c++/llvm/lib/Target/X86/X86SelectionDAGInfo.h
new file mode 100644
index 0000000..4f30f31
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/X86/X86SelectionDAGInfo.h
@@ -0,0 +1,59 @@
+//===-- X86SelectionDAGInfo.h - X86 SelectionDAG Info -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the X86 subclass for TargetSelectionDAGInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86SELECTIONDAGINFO_H
+#define X86SELECTIONDAGINFO_H
+
+#include "llvm/Target/TargetSelectionDAGInfo.h"
+
+namespace llvm {
+
+class X86TargetLowering;
+class X86TargetMachine;
+class X86Subtarget;
+
+class X86SelectionDAGInfo : public TargetSelectionDAGInfo {
+ /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
+ /// make the right decision when generating code for different targets.
+ const X86Subtarget *Subtarget;
+
+ const X86TargetLowering &TLI;
+
+public:
+ explicit X86SelectionDAGInfo(const X86TargetMachine &TM);
+ ~X86SelectionDAGInfo();
+
+ virtual
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
+ SDValue Chain,
+ SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ bool isVolatile,
+ const Value *DstSV,
+ uint64_t DstSVOff) const;
+
+ virtual
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
+ SDValue Chain,
+ SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ bool isVolatile, bool AlwaysInline,
+ const Value *DstSV,
+ uint64_t DstSVOff,
+ const Value *SrcSV,
+ uint64_t SrcSVOff) const;
+};
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86ShuffleDecode.h b/libclamav/c++/llvm/lib/Target/X86/X86ShuffleDecode.h
new file mode 100644
index 0000000..df04052
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Target/X86/X86ShuffleDecode.h
@@ -0,0 +1,155 @@
+//===-- X86ShuffleDecode.h - X86 shuffle decode logic ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Define several functions to decode x86 specific shuffle semantics into a
+// generic vector mask.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef X86_SHUFFLE_DECODE_H
+#define X86_SHUFFLE_DECODE_H
+
+#include "llvm/ADT/SmallVector.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Vector Mask Decoding
+//===----------------------------------------------------------------------===//
+
+enum {
+ SM_SentinelZero = ~0U
+};
+
+static inline
+void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<unsigned> &ShuffleMask) {
+ // Defaults the copying the dest value.
+ ShuffleMask.push_back(0);
+ ShuffleMask.push_back(1);
+ ShuffleMask.push_back(2);
+ ShuffleMask.push_back(3);
+
+ // Decode the immediate.
+ unsigned ZMask = Imm & 15;
+ unsigned CountD = (Imm >> 4) & 3;
+ unsigned CountS = (Imm >> 6) & 3;
+
+ // CountS selects which input element to use.
+ unsigned InVal = 4+CountS;
+ // CountD specifies which element of destination to update.
+ ShuffleMask[CountD] = InVal;
+ // ZMask zaps values, potentially overriding the CountD elt.
+ if (ZMask & 1) ShuffleMask[0] = SM_SentinelZero;
+ if (ZMask & 2) ShuffleMask[1] = SM_SentinelZero;
+ if (ZMask & 4) ShuffleMask[2] = SM_SentinelZero;
+ if (ZMask & 8) ShuffleMask[3] = SM_SentinelZero;
+}
+
+// <3,1> or <6,7,2,3>
+static void DecodeMOVHLPSMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ for (unsigned i = NElts/2; i != NElts; ++i)
+ ShuffleMask.push_back(NElts+i);
+
+ for (unsigned i = NElts/2; i != NElts; ++i)
+ ShuffleMask.push_back(i);
+}
+
+// <0,2> or <0,1,4,5>
+static void DecodeMOVLHPSMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ for (unsigned i = 0; i != NElts/2; ++i)
+ ShuffleMask.push_back(i);
+
+ for (unsigned i = 0; i != NElts/2; ++i)
+ ShuffleMask.push_back(NElts+i);
+}
+
+static void DecodePSHUFMask(unsigned NElts, unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ for (unsigned i = 0; i != NElts; ++i) {
+ ShuffleMask.push_back(Imm % NElts);
+ Imm /= NElts;
+ }
+}
+
+static void DecodePSHUFHWMask(unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ ShuffleMask.push_back(0);
+ ShuffleMask.push_back(1);
+ ShuffleMask.push_back(2);
+ ShuffleMask.push_back(3);
+ for (unsigned i = 0; i != 4; ++i) {
+ ShuffleMask.push_back(4+(Imm & 3));
+ Imm >>= 2;
+ }
+}
+
+static void DecodePSHUFLWMask(unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ for (unsigned i = 0; i != 4; ++i) {
+ ShuffleMask.push_back((Imm & 3));
+ Imm >>= 2;
+ }
+ ShuffleMask.push_back(4);
+ ShuffleMask.push_back(5);
+ ShuffleMask.push_back(6);
+ ShuffleMask.push_back(7);
+}
+
+static void DecodePUNPCKLMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ for (unsigned i = 0; i != NElts/2; ++i) {
+ ShuffleMask.push_back(i);
+ ShuffleMask.push_back(i+NElts);
+ }
+}
+
+static void DecodePUNPCKHMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ for (unsigned i = 0; i != NElts/2; ++i) {
+ ShuffleMask.push_back(i+NElts/2);
+ ShuffleMask.push_back(i+NElts+NElts/2);
+ }
+}
+
+static void DecodeSHUFPSMask(unsigned NElts, unsigned Imm,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ // Part that reads from dest.
+ for (unsigned i = 0; i != NElts/2; ++i) {
+ ShuffleMask.push_back(Imm % NElts);
+ Imm /= NElts;
+ }
+ // Part that reads from src.
+ for (unsigned i = 0; i != NElts/2; ++i) {
+ ShuffleMask.push_back(Imm % NElts + NElts);
+ Imm /= NElts;
+ }
+}
+
+static void DecodeUNPCKHPMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ for (unsigned i = 0; i != NElts/2; ++i) {
+ ShuffleMask.push_back(i+NElts/2); // Reads from dest
+ ShuffleMask.push_back(i+NElts+NElts/2); // Reads from src
+ }
+}
+
+
+/// DecodeUNPCKLPMask - This decodes the shuffle masks for unpcklps/unpcklpd
+/// etc. NElts indicates the number of elements in the vector allowing it to
+/// handle different datatypes and vector widths.
+static void DecodeUNPCKLPMask(unsigned NElts,
+ SmallVectorImpl<unsigned> &ShuffleMask) {
+ for (unsigned i = 0; i != NElts/2; ++i) {
+ ShuffleMask.push_back(i); // Reads from dest
+ ShuffleMask.push_back(i+NElts); // Reads from src
+ }
+}
+
+#endif
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86Subtarget.cpp b/libclamav/c++/llvm/lib/Target/X86/X86Subtarget.cpp
index adef5bc..0d02e5e 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -53,9 +53,12 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
if (GV->hasDLLImportLinkage())
return X86II::MO_DLLIMPORT;
- // Materializable GVs (in JIT lazy compilation mode) do not require an
- // extra load from stub.
- bool isDecl = GV->isDeclaration() && !GV->isMaterializable();
+ // Determine whether this is a reference to a definition or a declaration.
+ // Materializable GVs (in JIT lazy compilation mode) do not require an extra
+ // load from stub.
+ bool isDecl = GV->hasAvailableExternallyLinkage();
+ if (GV->isDeclaration() && !GV->isMaterializable())
+ isDecl = true;
// X86-64 in PIC mode.
if (isPICStyleRIPRel()) {
@@ -70,7 +73,7 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
if (GV->hasDefaultVisibility() &&
(isDecl || GV->isWeakForLinker()))
return X86II::MO_GOTPCREL;
- } else {
+ } else if (!isTargetWin64()) {
assert(isTargetELF() && "Unknown rip-relative target");
// Extra load is needed for all externally visible.
@@ -257,8 +260,10 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
bool IsIntel = memcmp(text.c, "GenuineIntel", 12) == 0;
bool IsAMD = !IsIntel && memcmp(text.c, "AuthenticAMD", 12) == 0;
- HasFMA3 = IsIntel && ((ECX >> 12) & 0x1);
- HasAVX = ((ECX >> 28) & 0x1);
+ HasCLMUL = IsIntel && ((ECX >> 1) & 0x1);
+ HasFMA3 = IsIntel && ((ECX >> 12) & 0x1);
+ HasAVX = ((ECX >> 28) & 0x1);
+ HasAES = IsIntel && ((ECX >> 25) & 0x1);
if (IsIntel || IsAMD) {
// Determine if bit test memory instructions are slow.
@@ -266,6 +271,9 @@ void X86Subtarget::AutoDetectSubtargetFeatures() {
unsigned Model = 0;
DetectFamilyModel(EAX, Family, Model);
IsBTMemSlow = IsAMD || (Family == 6 && Model >= 13);
+ // If it's Nehalem, unaligned memory access is fast.
+ if (Family == 15 && Model == 26)
+ IsUAMemFast = true;
GetCpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
HasX86_64 = (EDX >> 29) & 0x1;
@@ -283,16 +291,18 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS,
, HasX86_64(false)
, HasSSE4A(false)
, HasAVX(false)
+ , HasAES(false)
+ , HasCLMUL(false)
, HasFMA3(false)
, HasFMA4(false)
, IsBTMemSlow(false)
+ , IsUAMemFast(false)
, HasVectorUAMem(false)
- , DarwinVers(0)
, stackAlignment(8)
// FIXME: this is a known good value for Yonah. How about others?
, MaxInlineSizeThreshold(128)
- , Is64Bit(is64Bit)
- , TargetType(isELF) { // Default to ELF unless otherwise specified.
+ , TargetTriple(TT)
+ , Is64Bit(is64Bit) {
// default to hard float ABI
if (FloatABIType == FloatABI::Default)
@@ -315,58 +325,47 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS,
// If requesting codegen for X86-64, make sure that 64-bit features
// are enabled.
- if (Is64Bit)
+ if (Is64Bit) {
HasX86_64 = true;
+ // All 64-bit cpus have cmov support.
+ HasCMov = true;
+ }
+
DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
<< ", 3DNowLevel " << X863DNowLevel
<< ", 64bit " << HasX86_64 << "\n");
assert((!Is64Bit || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
- // Set the boolean corresponding to the current target triple, or the default
- // if one cannot be determined, to true.
- if (TT.length() > 5) {
- size_t Pos;
- if ((Pos = TT.find("-darwin")) != std::string::npos) {
- TargetType = isDarwin;
-
- // Compute the darwin version number.
- if (isdigit(TT[Pos+7]))
- DarwinVers = atoi(&TT[Pos+7]);
- else
- DarwinVers = 8; // Minimum supported darwin is Tiger.
- } else if (TT.find("linux") != std::string::npos) {
- // Linux doesn't imply ELF, but we don't currently support anything else.
- TargetType = isELF;
- } else if (TT.find("cygwin") != std::string::npos) {
- TargetType = isCygwin;
- } else if (TT.find("mingw") != std::string::npos) {
- TargetType = isMingw;
- } else if (TT.find("win32") != std::string::npos) {
- TargetType = isWindows;
- } else if (TT.find("windows") != std::string::npos) {
- TargetType = isWindows;
- } else if (TT.find("-cl") != std::string::npos) {
- TargetType = isDarwin;
- DarwinVers = 9;
- }
- }
-
// Stack alignment is 16 bytes on Darwin (both 32 and 64 bit) and for all 64
// bit targets.
- if (TargetType == isDarwin || Is64Bit)
+ if (isTargetDarwin() || Is64Bit)
stackAlignment = 16;
if (StackAlignment)
stackAlignment = StackAlignment;
}
-bool X86Subtarget::enablePostRAScheduler(
- CodeGenOpt::Level OptLevel,
- TargetSubtarget::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtarget::ANTIDEP_CRITICAL;
- CriticalPathRCs.clear();
- return OptLevel >= CodeGenOpt::Aggressive;
+/// IsCalleePop - Determines whether the callee is required to pop its
+/// own arguments. Callee pop is necessary to support tail calls.
+bool X86Subtarget::IsCalleePop(bool IsVarArg,
+ CallingConv::ID CallingConv) const {
+ if (IsVarArg)
+ return false;
+
+ switch (CallingConv) {
+ default:
+ return false;
+ case CallingConv::X86_StdCall:
+ return !is64Bit();
+ case CallingConv::X86_FastCall:
+ return !is64Bit();
+ case CallingConv::X86_ThisCall:
+ return !is64Bit();
+ case CallingConv::Fast:
+ return GuaranteedTailCallOpt;
+ case CallingConv::GHC:
+ return GuaranteedTailCallOpt;
+ }
}
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86Subtarget.h b/libclamav/c++/llvm/lib/Target/X86/X86Subtarget.h
index 594a470..0ee91ab 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86Subtarget.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86Subtarget.h
@@ -14,7 +14,9 @@
#ifndef X86SUBTARGET_H
#define X86SUBTARGET_H
+#include "llvm/ADT/Triple.h"
#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/CallingConv.h"
#include <string>
namespace llvm {
@@ -69,6 +71,12 @@ protected:
/// HasAVX - Target has AVX instructions
bool HasAVX;
+ /// HasAES - Target has AES instructions
+ bool HasAES;
+
+ /// HasCLMUL - Target has carry-less multiplication
+ bool HasCLMUL;
+
/// HasFMA3 - Target has 3-operand fused multiply-add
bool HasFMA3;
@@ -78,15 +86,13 @@ protected:
/// IsBTMemSlow - True if BT (bit test) of memory instructions are slow.
bool IsBTMemSlow;
+ /// IsUAMemFast - True if unaligned memory access is fast.
+ bool IsUAMemFast;
+
/// HasVectorUAMem - True if SIMD operations can have unaligned memory
- /// operands. This may require setting a feature bit in the
- /// processor.
+ /// operands. This may require setting a feature bit in the processor.
bool HasVectorUAMem;
- /// DarwinVers - Nonzero if this is a darwin platform: the numeric
- /// version of the platform, e.g. 8 = 10.4 (Tiger), 9 = 10.5 (Leopard), etc.
- unsigned char DarwinVers; // Is any darwin-x86 platform.
-
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment;
@@ -94,6 +100,9 @@ protected:
/// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
///
unsigned MaxInlineSizeThreshold;
+
+ /// TargetTriple - What processor and OS we're targeting.
+ Triple TargetTriple;
private:
/// Is64Bit - True if the processor supports 64-bit instructions and
@@ -101,9 +110,6 @@ private:
bool Is64Bit;
public:
- enum {
- isELF, isCygwin, isDarwin, isWindows, isMingw
- } TargetType;
/// This constructor initializes the data members to match that
/// of the specified triple.
@@ -133,6 +139,7 @@ public:
PICStyles::Style getPICStyle() const { return PICStyle; }
void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
+ bool hasCMov() const { return HasCMov; }
bool hasMMX() const { return X86SSELevel >= MMX; }
bool hasSSE1() const { return X86SSELevel >= SSE1; }
bool hasSSE2() const { return X86SSELevel >= SSE2; }
@@ -144,29 +151,43 @@ public:
bool has3DNow() const { return X863DNowLevel >= ThreeDNow; }
bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; }
bool hasAVX() const { return HasAVX; }
+ bool hasAES() const { return HasAES; }
+ bool hasCLMUL() const { return HasCLMUL; }
bool hasFMA3() const { return HasFMA3; }
bool hasFMA4() const { return HasFMA4; }
bool isBTMemSlow() const { return IsBTMemSlow; }
+ bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
bool hasVectorUAMem() const { return HasVectorUAMem; }
- bool isTargetDarwin() const { return TargetType == isDarwin; }
- bool isTargetELF() const { return TargetType == isELF; }
+ bool isTargetDarwin() const { return TargetTriple.getOS() == Triple::Darwin; }
+
+ // ELF is a reasonably sane default and the only other X86 targets we
+ // support are Darwin and Windows. Just use "not those".
+ bool isTargetELF() const {
+ return !isTargetDarwin() && !isTargetWindows() && !isTargetCygMing();
+ }
+ bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; }
- bool isTargetWindows() const { return TargetType == isWindows; }
- bool isTargetMingw() const { return TargetType == isMingw; }
- bool isTargetCygwin() const { return TargetType == isCygwin; }
+ bool isTargetWindows() const { return TargetTriple.getOS() == Triple::Win32; }
+ bool isTargetMingw() const {
+ return TargetTriple.getOS() == Triple::MinGW32 ||
+ TargetTriple.getOS() == Triple::MinGW64; }
+ bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; }
bool isTargetCygMing() const {
- return TargetType == isMingw || TargetType == isCygwin;
+ return isTargetMingw() || isTargetCygwin();
}
-
+
/// isTargetCOFF - Return true if this is any COFF/Windows target variant.
bool isTargetCOFF() const {
- return TargetType == isMingw || TargetType == isCygwin ||
- TargetType == isWindows;
+ return isTargetMingw() || isTargetCygwin() || isTargetWindows();
}
bool isTargetWin64() const {
- return Is64Bit && (TargetType == isMingw || TargetType == isWindows);
+ return Is64Bit && (isTargetMingw() || isTargetWindows());
+ }
+
+ bool isTargetWin32() const {
+ return !Is64Bit && (isTargetMingw() || isTargetWindows());
}
std::string getDataLayout() const {
@@ -200,7 +221,10 @@ public:
/// getDarwinVers - Return the darwin version number, 8 = Tiger, 9 = Leopard,
/// 10 = Snow Leopard, etc.
- unsigned getDarwinVers() const { return DarwinVers; }
+ unsigned getDarwinVers() const {
+ if (isTargetDarwin()) return TargetTriple.getDarwinMajorNumber();
+ return 0;
+ }
/// ClassifyGlobalReference - Classify a global variable reference for the
/// current subtarget according to how we should reference it in a non-pcrel
@@ -230,11 +254,8 @@ public:
/// should be attempted.
unsigned getSpecialAddressLatency() const;
- /// enablePostRAScheduler - X86 target is enabling post-alloc scheduling
- /// at 'More' optimization level.
- bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
- TargetSubtarget::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const;
+ /// IsCalleePop - Test whether a function should pop its own arguments.
+ bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86TargetMachine.cpp b/libclamav/c++/llvm/lib/Target/X86/X86TargetMachine.cpp
index 98f0dc0..ce8636e 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -17,12 +17,14 @@
#include "llvm/PassManager.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegistry.h"
using namespace llvm;
-static const MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
Triple TheTriple(TT);
switch (TheTriple.getOS()) {
case Triple::Darwin:
@@ -37,6 +39,25 @@ static const MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
}
}
+static MCStreamer *createMCStreamer(const Target &T, const std::string &TT,
+ MCContext &Ctx, TargetAsmBackend &TAB,
+ raw_ostream &_OS,
+ MCCodeEmitter *_Emitter,
+ bool RelaxAll) {
+ Triple TheTriple(TT);
+ switch (TheTriple.getOS()) {
+ case Triple::Darwin:
+ return createMachOStreamer(Ctx, TAB, _OS, _Emitter, RelaxAll);
+ case Triple::MinGW32:
+ case Triple::MinGW64:
+ case Triple::Cygwin:
+ case Triple::Win32:
+ return createWinCOFFStreamer(Ctx, TAB, *_Emitter, _OS, RelaxAll);
+ default:
+ return createELFStreamer(Ctx, TAB, _OS, _Emitter, RelaxAll);
+ }
+}
+
extern "C" void LLVMInitializeX86Target() {
// Register the target.
RegisterTargetMachine<X86_32TargetMachine> X(TheX86_32Target);
@@ -57,6 +78,12 @@ extern "C" void LLVMInitializeX86Target() {
createX86_32AsmBackend);
TargetRegistry::RegisterAsmBackend(TheX86_64Target,
createX86_64AsmBackend);
+
+ // Register the object streamer.
+ TargetRegistry::RegisterObjectStreamer(TheX86_32Target,
+ createMCStreamer);
+ TargetRegistry::RegisterObjectStreamer(TheX86_64Target,
+ createMCStreamer);
}
@@ -82,17 +109,24 @@ X86TargetMachine::X86TargetMachine(const Target &T, const std::string &TT,
Subtarget.getStackAlignment(),
(Subtarget.isTargetWin64() ? -40 :
(Subtarget.is64Bit() ? -8 : -4))),
- InstrInfo(*this), JITInfo(*this), TLInfo(*this), ELFWriterInfo(*this) {
+ InstrInfo(*this), JITInfo(*this), TLInfo(*this), TSInfo(*this),
+ ELFWriterInfo(*this) {
DefRelocModel = getRelocationModel();
-
+
// If no relocation model was picked, default as appropriate for the target.
if (getRelocationModel() == Reloc::Default) {
- if (!Subtarget.isTargetDarwin())
- setRelocationModel(Reloc::Static);
- else if (Subtarget.is64Bit())
+ // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode.
+ // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we
+ // use static relocation model by default.
+ if (Subtarget.isTargetDarwin()) {
+ if (Subtarget.is64Bit())
+ setRelocationModel(Reloc::PIC_);
+ else
+ setRelocationModel(Reloc::DynamicNoPIC);
+ } else if (Subtarget.isTargetWin64())
setRelocationModel(Reloc::PIC_);
else
- setRelocationModel(Reloc::DynamicNoPIC);
+ setRelocationModel(Reloc::Static);
}
assert(getRelocationModel() != Reloc::Default &&
@@ -115,29 +149,27 @@ X86TargetMachine::X86TargetMachine(const Target &T, const std::string &TT,
Subtarget.isTargetDarwin() &&
is64Bit)
setRelocationModel(Reloc::PIC_);
-
+
// Determine the PICStyle based on the target selected.
if (getRelocationModel() == Reloc::Static) {
// Unless we're in PIC or DynamicNoPIC mode, set the PIC style to None.
Subtarget.setPICStyle(PICStyles::None);
+ } else if (Subtarget.is64Bit()) {
+ // PIC in 64 bit mode is always rip-rel.
+ Subtarget.setPICStyle(PICStyles::RIPRel);
} else if (Subtarget.isTargetCygMing()) {
Subtarget.setPICStyle(PICStyles::None);
} else if (Subtarget.isTargetDarwin()) {
- if (Subtarget.is64Bit())
- Subtarget.setPICStyle(PICStyles::RIPRel);
- else if (getRelocationModel() == Reloc::PIC_)
+ if (getRelocationModel() == Reloc::PIC_)
Subtarget.setPICStyle(PICStyles::StubPIC);
else {
assert(getRelocationModel() == Reloc::DynamicNoPIC);
Subtarget.setPICStyle(PICStyles::StubDynamicNoPIC);
}
} else if (Subtarget.isTargetELF()) {
- if (Subtarget.is64Bit())
- Subtarget.setPICStyle(PICStyles::RIPRel);
- else
- Subtarget.setPICStyle(PICStyles::GOT);
+ Subtarget.setPICStyle(PICStyles::GOT);
}
-
+
// Finally, if we have "none" as our PIC style, force to static mode.
if (Subtarget.getPICStyle() == PICStyles::None)
setRelocationModel(Reloc::Static);
@@ -152,8 +184,9 @@ bool X86TargetMachine::addInstSelector(PassManagerBase &PM,
// Install an instruction selector.
PM.add(createX86ISelDag(*this, OptLevel));
- // Install a pass to insert x87 FP_REG_KILL instructions, as needed.
- PM.add(createX87FPRegKillInserterPass());
+ // For 32-bit, prepend instructions to set the "global base reg" for PIC.
+ if (!Subtarget.is64Bit())
+ PM.add(createGlobalBaseRegPass());
return false;
}
@@ -170,6 +203,15 @@ bool X86TargetMachine::addPostRegAlloc(PassManagerBase &PM,
return true; // -print-machineinstr should print after this.
}
+bool X86TargetMachine::addPreEmitPass(PassManagerBase &PM,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel != CodeGenOpt::None && Subtarget.hasSSE2()) {
+ PM.add(createSSEDomainFixPass());
+ return true;
+ }
+ return false;
+}
+
bool X86TargetMachine::addCodeEmitter(PassManagerBase &PM,
CodeGenOpt::Level OptLevel,
JITCodeEmitter &JCE) {
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86TargetMachine.h b/libclamav/c++/llvm/lib/Target/X86/X86TargetMachine.h
index 2bb5454..f9fb424 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86TargetMachine.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86TargetMachine.h
@@ -23,6 +23,7 @@
#include "X86JITInfo.h"
#include "X86Subtarget.h"
#include "X86ISelLowering.h"
+#include "X86SelectionDAGInfo.h"
namespace llvm {
@@ -35,6 +36,7 @@ class X86TargetMachine : public LLVMTargetMachine {
X86InstrInfo InstrInfo;
X86JITInfo JITInfo;
X86TargetLowering TLInfo;
+ X86SelectionDAGInfo TSInfo;
X86ELFWriterInfo ELFWriterInfo;
Reloc::Model DefRelocModel; // Reloc model before it's overridden.
@@ -51,8 +53,11 @@ public:
virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; }
virtual X86JITInfo *getJITInfo() { return &JITInfo; }
virtual const X86Subtarget *getSubtargetImpl() const{ return &Subtarget; }
- virtual X86TargetLowering *getTargetLowering() const {
- return const_cast<X86TargetLowering*>(&TLInfo);
+ virtual const X86TargetLowering *getTargetLowering() const {
+ return &TLInfo;
+ }
+ virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
+ return &TSInfo;
}
virtual const X86RegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
@@ -66,6 +71,7 @@ public:
virtual bool addInstSelector(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
virtual bool addPreRegAlloc(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
virtual bool addPostRegAlloc(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
+ virtual bool addPreEmitPass(PassManagerBase &PM, CodeGenOpt::Level OptLevel);
virtual bool addCodeEmitter(PassManagerBase &PM, CodeGenOpt::Level OptLevel,
JITCodeEmitter &JCE);
};
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86TargetObjectFile.cpp b/libclamav/c++/llvm/lib/Target/X86/X86TargetObjectFile.cpp
index 29a0be5..c15dfbb 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86TargetObjectFile.cpp
+++ b/libclamav/c++/llvm/lib/Target/X86/X86TargetObjectFile.cpp
@@ -7,11 +7,12 @@
//
//===----------------------------------------------------------------------===//
-#include "X86MCTargetExpr.h"
#include "X86TargetObjectFile.h"
#include "X86TargetMachine.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Target/Mangler.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Dwarf.h"
@@ -19,23 +20,22 @@ using namespace llvm;
using namespace dwarf;
const MCExpr *X8664_MachoTargetObjectFile::
-getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding) const {
+getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const {
// On Darwin/X86-64, we can reference dwarf symbols with foo at GOTPCREL+4, which
// is an indirect pc-relative reference.
if (Encoding & (DW_EH_PE_indirect | DW_EH_PE_pcrel)) {
- SmallString<128> Name;
- Mang->getNameWithPrefix(Name, GV, false);
- const MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+ const MCSymbol *Sym = Mang->getSymbol(GV);
const MCExpr *Res =
- X86MCTargetExpr::Create(Sym, X86MCTargetExpr::GOTPCREL, getContext());
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTPCREL, getContext());
const MCExpr *Four = MCConstantExpr::Create(4, getContext());
return MCBinaryExpr::CreateAdd(Res, Four, getContext());
}
return TargetLoweringObjectFileMachO::
- getSymbolForDwarfGlobalReference(GV, Mang, MMI, Encoding);
+ getExprForDwarfGlobalReference(GV, Mang, MMI, Encoding, Streamer);
}
unsigned X8632_ELFTargetObjectFile::getPersonalityEncoding() const {
diff --git a/libclamav/c++/llvm/lib/Target/X86/X86TargetObjectFile.h b/libclamav/c++/llvm/lib/Target/X86/X86TargetObjectFile.h
index 0444417..f2fd49c 100644
--- a/libclamav/c++/llvm/lib/Target/X86/X86TargetObjectFile.h
+++ b/libclamav/c++/llvm/lib/Target/X86/X86TargetObjectFile.h
@@ -17,14 +17,14 @@
namespace llvm {
class X86TargetMachine;
- /// X8664_MachoTargetObjectFile - This TLOF implementation is used for
- /// Darwin/x86-64.
+ /// X8664_MachoTargetObjectFile - This TLOF implementation is used for Darwin
+ /// x86-64.
class X8664_MachoTargetObjectFile : public TargetLoweringObjectFileMachO {
public:
-
virtual const MCExpr *
- getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding) const;
+ getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const;
};
class X8632_ELFTargetObjectFile : public TargetLoweringObjectFileELF {
diff --git a/libclamav/c++/llvm/lib/Transforms/Hello/Hello.cpp b/libclamav/c++/llvm/lib/Transforms/Hello/Hello.cpp
index 37d7a00..838d550 100644
--- a/libclamav/c++/llvm/lib/Transforms/Hello/Hello.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Hello/Hello.cpp
@@ -25,10 +25,10 @@ namespace {
// Hello - The first implementation, without getAnalysisUsage.
struct Hello : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- Hello() : FunctionPass(&ID) {}
+ Hello() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F) {
- HelloCounter++;
+ ++HelloCounter;
errs() << "Hello: ";
errs().write_escaped(F.getName()) << '\n';
return false;
@@ -37,16 +37,16 @@ namespace {
}
char Hello::ID = 0;
-static RegisterPass<Hello> X("hello", "Hello World Pass");
+INITIALIZE_PASS(Hello, "hello", "Hello World Pass", false, false);
namespace {
// Hello2 - The second implementation with getAnalysisUsage implemented.
struct Hello2 : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- Hello2() : FunctionPass(&ID) {}
+ Hello2() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F) {
- HelloCounter++;
+ ++HelloCounter;
errs() << "Hello: ";
errs().write_escaped(F.getName()) << '\n';
return false;
@@ -60,5 +60,6 @@ namespace {
}
char Hello2::ID = 0;
-static RegisterPass<Hello2>
-Y("hello2", "Hello World Pass (with getAnalysisUsage implemented)");
+INITIALIZE_PASS(Hello2, "hello2",
+ "Hello World Pass (with getAnalysisUsage implemented)",
+ false, false);
diff --git a/libclamav/c++/llvm/docs/doxygen.cfg.in b/libclamav/c++/llvm/lib/Transforms/Hello/Hello.exports
similarity index 100%
copy from libclamav/c++/llvm/docs/doxygen.cfg.in
copy to libclamav/c++/llvm/lib/Transforms/Hello/Hello.exports
diff --git a/libclamav/c++/llvm/lib/Transforms/Hello/Makefile b/libclamav/c++/llvm/lib/Transforms/Hello/Makefile
index c5e75d4..f1e3148 100644
--- a/libclamav/c++/llvm/lib/Transforms/Hello/Makefile
+++ b/libclamav/c++/llvm/lib/Transforms/Hello/Makefile
@@ -12,5 +12,13 @@ LIBRARYNAME = LLVMHello
LOADABLE_MODULE = 1
USEDLIBS =
+# If we don't need RTTI or EH, there's no reason to export anything
+# from the hello plugin.
+ifneq ($(REQUIRES_RTTI), 1)
+ifneq ($(REQUIRES_EH), 1)
+EXPORTED_SYMBOL_FILE = $(PROJ_SRC_DIR)/Hello.exports
+endif
+endif
+
include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 7cb1367..0c77e1f 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -64,10 +64,10 @@ namespace {
CallGraphSCCPass::getAnalysisUsage(AU);
}
- virtual bool runOnSCC(std::vector<CallGraphNode *> &SCC);
+ virtual bool runOnSCC(CallGraphSCC &SCC);
static char ID; // Pass identification, replacement for typeid
explicit ArgPromotion(unsigned maxElements = 3)
- : CallGraphSCCPass(&ID), maxElements(maxElements) {}
+ : CallGraphSCCPass(ID), maxElements(maxElements) {}
/// A vector used to hold the indices of a single GEP instruction
typedef std::vector<uint64_t> IndicesVector;
@@ -84,27 +84,28 @@ namespace {
}
char ArgPromotion::ID = 0;
-static RegisterPass<ArgPromotion>
-X("argpromotion", "Promote 'by reference' arguments to scalars");
+INITIALIZE_PASS(ArgPromotion, "argpromotion",
+ "Promote 'by reference' arguments to scalars", false, false);
Pass *llvm::createArgumentPromotionPass(unsigned maxElements) {
return new ArgPromotion(maxElements);
}
-bool ArgPromotion::runOnSCC(std::vector<CallGraphNode *> &SCC) {
+bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
bool Changed = false, LocalChange;
do { // Iterate until we stop promoting from this SCC.
LocalChange = false;
// Attempt to promote arguments from all functions in this SCC.
- for (unsigned i = 0, e = SCC.size(); i != e; ++i)
- if (CallGraphNode *CGN = PromoteArguments(SCC[i])) {
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ if (CallGraphNode *CGN = PromoteArguments(*I)) {
LocalChange = true;
- SCC[i] = CGN;
+ SCC.ReplaceNode(*I, CGN);
}
+ }
Changed |= LocalChange; // Remember that we changed something.
} while (LocalChange);
-
+
return Changed;
}
@@ -207,8 +208,8 @@ static bool AllCalleesPassInValidPointerForArgument(Argument *Arg) {
// have direct callees.
for (Value::use_iterator UI = Callee->use_begin(), E = Callee->use_end();
UI != E; ++UI) {
- CallSite CS = CallSite::get(*UI);
- assert(CS.getInstruction() && "Should only have direct calls!");
+ CallSite CS(*UI);
+ assert(CS && "Should only have direct calls!");
if (!IsAlwaysValidPointer(CS.getArgument(ArgNo)))
return false;
@@ -359,19 +360,20 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
IndicesVector Operands;
for (Value::use_iterator UI = Arg->use_begin(), E = Arg->use_end();
UI != E; ++UI) {
+ User *U = *UI;
Operands.clear();
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (LI->isVolatile()) return false; // Don't hack volatile loads
Loads.push_back(LI);
// Direct loads are equivalent to a GEP with a zero index and then a load.
Operands.push_back(0);
- } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
+ } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
if (GEP->use_empty()) {
// Dead GEP's cause trouble later. Just remove them if we run into
// them.
getAnalysis<AliasAnalysis>().deleteValue(GEP);
GEP->eraseFromParent();
- // TODO: This runs the above loop over and over again for dead GEPS
+ // TODO: This runs the above loop over and over again for dead GEPs
// Couldn't we just do increment the UI iterator earlier and erase the
// use?
return isSafeToPromoteArgument(Arg, isByVal);
@@ -451,12 +453,14 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
// Now check every path from the entry block to the load for transparency.
// To do this, we perform a depth first search on the inverse CFG from the
// loading block.
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
for (idf_ext_iterator<BasicBlock*, SmallPtrSet<BasicBlock*, 16> >
- I = idf_ext_begin(*PI, TranspBlocks),
- E = idf_ext_end(*PI, TranspBlocks); I != E; ++I)
+ I = idf_ext_begin(P, TranspBlocks),
+ E = idf_ext_end(P, TranspBlocks); I != E; ++I)
if (AA.canBasicBlockModify(**I, Arg, LoadSize))
return false;
+ }
}
// If the path from the entry of the function to each load is free of
@@ -615,14 +619,14 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Get a new callgraph node for NF.
CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);
-
// Loop over all of the callers of the function, transforming the call sites
// to pass in the loaded pointers.
//
SmallVector<Value*, 16> Args;
while (!F->use_empty()) {
- CallSite CS = CallSite::get(F->use_back());
+ CallSite CS(F->use_back());
+ assert(CS.getCalledFunction() == F);
Instruction *Call = CS.getInstruction();
const AttrListPtr &CallPAL = CS.getAttributes();
@@ -660,7 +664,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Non-dead argument: insert GEPs and loads as appropriate.
ScalarizeTable &ArgIndices = ScalarizedElements[I];
// Store the Value* version of the indices in here, but declare it now
- // for reuse
+ // for reuse.
std::vector<Value*> Ops;
for (ScalarizeTable::iterator SI = ArgIndices.begin(),
E = ArgIndices.end(); SI != E; ++SI) {
@@ -677,16 +681,20 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
Type::getInt32Ty(F->getContext()) :
Type::getInt64Ty(F->getContext()));
Ops.push_back(ConstantInt::get(IdxTy, *II));
- // Keep track of the type we're currently indexing
+ // Keep track of the type we're currently indexing.
ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II);
}
- // And create a GEP to extract those indices
+ // And create a GEP to extract those indices.
V = GetElementPtrInst::Create(V, Ops.begin(), Ops.end(),
V->getName()+".idx", Call);
Ops.clear();
AA.copyValue(OrigLoad->getOperand(0), V);
}
- Args.push_back(new LoadInst(V, V->getName()+".val", Call));
+ // Since we're replacing a load make sure we take the alignment
+ // of the previous load.
+ LoadInst *newLoad = new LoadInst(V, V->getName()+".val", Call);
+ newLoad->setAlignment(OrigLoad->getAlignment());
+ Args.push_back(newLoad);
AA.copyValue(OrigLoad, Args.back());
}
}
@@ -694,7 +702,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
if (ExtraArgHack)
Args.push_back(Constant::getNullValue(Type::getInt32Ty(F->getContext())));
- // Push any varargs arguments on the list
+ // Push any varargs arguments on the list.
for (; AI != CS.arg_end(); ++AI, ++ArgIndex) {
Args.push_back(*AI);
if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex))
@@ -868,8 +876,14 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
NF_CGN->stealCalledFunctionsFrom(CG[F]);
- // Now that the old function is dead, delete it.
- delete CG.removeFunctionFromModule(F);
+ // Now that the old function is dead, delete it. If there is a dangling
+ // reference to the CallgraphNode, just leave the dead function around for
+ // someone else to nuke.
+ CallGraphNode *CGN = CG[F];
+ if (CGN->getNumReferences() == 0)
+ delete CG.removeFunctionFromModule(CGN);
+ else
+ F->setLinkage(Function::ExternalLinkage);
return NF_CGN;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/CMakeLists.txt b/libclamav/c++/llvm/lib/Transforms/IPO/CMakeLists.txt
index 92bef3b..65483e8 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/CMakeLists.txt
@@ -23,3 +23,5 @@ add_llvm_library(LLVMipo
StripSymbols.cpp
StructRetPromotion.cpp
)
+
+target_link_libraries (LLVMipo LLVMScalarOpts LLVMInstCombine)
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/ConstantMerge.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/ConstantMerge.cpp
index 3c05f88..64e8d79 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/ConstantMerge.cpp
@@ -19,10 +19,12 @@
#define DEBUG_TYPE "constmerge"
#include "llvm/Transforms/IPO.h"
+#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
@@ -31,7 +33,7 @@ STATISTIC(NumMerged, "Number of global constants merged");
namespace {
struct ConstantMerge : public ModulePass {
static char ID; // Pass identification, replacement for typeid
- ConstantMerge() : ModulePass(&ID) {}
+ ConstantMerge() : ModulePass(ID) {}
// run - For this pass, process all of the globals in the module,
// eliminating duplicate constants.
@@ -41,12 +43,32 @@ namespace {
}
char ConstantMerge::ID = 0;
-static RegisterPass<ConstantMerge>
-X("constmerge", "Merge Duplicate Global Constants");
+INITIALIZE_PASS(ConstantMerge, "constmerge",
+ "Merge Duplicate Global Constants", false, false);
ModulePass *llvm::createConstantMergePass() { return new ConstantMerge(); }
+
+
+/// Find values that are marked as llvm.used.
+static void FindUsedValues(GlobalVariable *LLVMUsed,
+ SmallPtrSet<const GlobalValue*, 8> &UsedValues) {
+ if (LLVMUsed == 0) return;
+ ConstantArray *Inits = dyn_cast<ConstantArray>(LLVMUsed->getInitializer());
+ if (Inits == 0) return;
+
+ for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i)
+ if (GlobalValue *GV =
+ dyn_cast<GlobalValue>(Inits->getOperand(i)->stripPointerCasts()))
+ UsedValues.insert(GV);
+}
+
bool ConstantMerge::runOnModule(Module &M) {
+ // Find all the globals that are marked "used". These cannot be merged.
+ SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
+ FindUsedValues(M.getGlobalVariable("llvm.used"), UsedGlobals);
+ FindUsedValues(M.getGlobalVariable("llvm.compiler.used"), UsedGlobals);
+
// Map unique constant/section pairs to globals. We don't want to merge
// globals in different sections.
DenseMap<Constant*, GlobalVariable*> CMap;
@@ -79,9 +101,13 @@ bool ConstantMerge::runOnModule(Module &M) {
// Only process constants with initializers in the default addres space.
if (!GV->isConstant() ||!GV->hasDefinitiveInitializer() ||
- GV->getType()->getAddressSpace() != 0 || !GV->getSection().empty())
+ GV->getType()->getAddressSpace() != 0 || !GV->getSection().empty() ||
+ // Don't touch values marked with attribute(used).
+ UsedGlobals.count(GV))
continue;
+
+
Constant *Init = GV->getInitializer();
// Check to see if the initializer is already known.
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index f386ed7..47df235 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -50,7 +50,7 @@ namespace {
/// argument. Used so that arguments and return values can be used
/// interchangably.
struct RetOrArg {
- RetOrArg(const Function* F, unsigned Idx, bool IsArg) : F(F), Idx(Idx),
+ RetOrArg(const Function *F, unsigned Idx, bool IsArg) : F(F), Idx(Idx),
IsArg(IsArg) {}
const Function *F;
unsigned Idx;
@@ -72,7 +72,7 @@ namespace {
}
std::string getDescription() const {
- return std::string((IsArg ? "Argument #" : "Return value #"))
+ return std::string((IsArg ? "Argument #" : "Return value #"))
+ utostr(Idx) + " of function " + F->getNameStr();
}
};
@@ -120,20 +120,25 @@ namespace {
typedef SmallVector<RetOrArg, 5> UseVector;
+ protected:
+ // DAH uses this to specify a different ID.
+ explicit DAE(char &ID) : ModulePass(ID) {}
+
public:
static char ID; // Pass identification, replacement for typeid
- DAE() : ModulePass(&ID) {}
+ DAE() : ModulePass(ID) {}
+
bool runOnModule(Module &M);
virtual bool ShouldHackArguments() const { return false; }
private:
Liveness MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses);
- Liveness SurveyUse(Value::use_iterator U, UseVector &MaybeLiveUses,
+ Liveness SurveyUse(Value::const_use_iterator U, UseVector &MaybeLiveUses,
unsigned RetValNum = 0);
- Liveness SurveyUses(Value *V, UseVector &MaybeLiveUses);
+ Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
- void SurveyFunction(Function &F);
+ void SurveyFunction(const Function &F);
void MarkValue(const RetOrArg &RA, Liveness L,
const UseVector &MaybeLiveUses);
void MarkLive(const RetOrArg &RA);
@@ -146,8 +151,7 @@ namespace {
char DAE::ID = 0;
-static RegisterPass<DAE>
-X("deadargelim", "Dead Argument Elimination");
+INITIALIZE_PASS(DAE, "deadargelim", "Dead Argument Elimination", false, false);
namespace {
/// DAH - DeadArgumentHacking pass - Same as dead argument elimination, but
@@ -155,13 +159,16 @@ namespace {
/// by bugpoint.
struct DAH : public DAE {
static char ID;
+ DAH() : DAE(ID) {}
+
virtual bool ShouldHackArguments() const { return true; }
};
}
char DAH::ID = 0;
-static RegisterPass<DAH>
-Y("deadarghaX0r", "Dead Argument Hacking (BUGPOINT USE ONLY; DO NOT USE)");
+INITIALIZE_PASS(DAH, "deadarghaX0r",
+ "Dead Argument Hacking (BUGPOINT USE ONLY; DO NOT USE)",
+ false, false);
/// createDeadArgEliminationPass - This pass removes arguments from functions
/// which are not used by the body of the function.
@@ -196,7 +203,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
// Start by computing a new prototype for the function, which is the same as
// the old function, but doesn't have isVarArg set.
const FunctionType *FTy = Fn.getFunctionType();
-
+
std::vector<const Type*> Params(FTy->param_begin(), FTy->param_end());
FunctionType *NFTy = FunctionType::get(FTy->getReturnType(),
Params, false);
@@ -213,11 +220,11 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
//
std::vector<Value*> Args;
while (!Fn.use_empty()) {
- CallSite CS = CallSite::get(Fn.use_back());
+ CallSite CS(Fn.use_back());
Instruction *Call = CS.getInstruction();
// Pass all the same arguments.
- Args.assign(CS.arg_begin(), CS.arg_begin()+NumArgs);
+ Args.assign(CS.arg_begin(), CS.arg_begin() + NumArgs);
// Drop any attributes that were on the vararg arguments.
AttrListPtr PAL = CS.getAttributes();
@@ -225,7 +232,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
SmallVector<AttributeWithIndex, 8> AttributesVec;
for (unsigned i = 0; PAL.getSlot(i).Index <= NumArgs; ++i)
AttributesVec.push_back(PAL.getSlot(i));
- if (Attributes FnAttrs = PAL.getFnAttributes())
+ if (Attributes FnAttrs = PAL.getFnAttributes())
AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
PAL = AttrListPtr::get(AttributesVec.begin(), AttributesVec.end());
}
@@ -243,6 +250,8 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
if (cast<CallInst>(Call)->isTailCall())
cast<CallInst>(New)->setTailCall();
}
+ New->setDebugLoc(Call->getDebugLoc());
+
Args.clear();
if (!Call->use_empty())
@@ -280,7 +289,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
/// for void functions and 1 for functions not returning a struct. It returns
/// the number of struct elements for functions returning a struct.
static unsigned NumRetVals(const Function *F) {
- if (F->getReturnType() == Type::getVoidTy(F->getContext()))
+ if (F->getReturnType()->isVoidTy())
return 0;
else if (const StructType *STy = dyn_cast<StructType>(F->getReturnType()))
return STy->getNumElements();
@@ -305,15 +314,15 @@ DAE::Liveness DAE::MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses) {
/// SurveyUse - This looks at a single use of an argument or return value
/// and determines if it should be alive or not. Adds this use to MaybeLiveUses
-/// if it causes the used value to become MaybeAlive.
+/// if it causes the used value to become MaybeLive.
///
/// RetValNum is the return value number to use when this use is used in a
/// return instruction. This is used in the recursion, you should always leave
/// it at 0.
-DAE::Liveness DAE::SurveyUse(Value::use_iterator U, UseVector &MaybeLiveUses,
- unsigned RetValNum) {
- Value *V = *U;
- if (ReturnInst *RI = dyn_cast<ReturnInst>(V)) {
+DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
+ UseVector &MaybeLiveUses, unsigned RetValNum) {
+ const User *V = *U;
+ if (const ReturnInst *RI = dyn_cast<ReturnInst>(V)) {
// The value is returned from a function. It's only live when the
// function's return value is live. We use RetValNum here, for the case
// that U is really a use of an insertvalue instruction that uses the
@@ -322,7 +331,7 @@ DAE::Liveness DAE::SurveyUse(Value::use_iterator U, UseVector &MaybeLiveUses,
// We might be live, depending on the liveness of Use.
return MarkIfNotLive(Use, MaybeLiveUses);
}
- if (InsertValueInst *IV = dyn_cast<InsertValueInst>(V)) {
+ if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(V)) {
if (U.getOperandNo() != InsertValueInst::getAggregateOperandIndex()
&& IV->hasIndices())
// The use we are examining is inserted into an aggregate. Our liveness
@@ -334,7 +343,7 @@ DAE::Liveness DAE::SurveyUse(Value::use_iterator U, UseVector &MaybeLiveUses,
// we don't change RetValNum, but do survey all our uses.
Liveness Result = MaybeLive;
- for (Value::use_iterator I = IV->use_begin(),
+ for (Value::const_use_iterator I = IV->use_begin(),
E = V->use_end(); I != E; ++I) {
Result = SurveyUse(I, MaybeLiveUses, RetValNum);
if (Result == Live)
@@ -342,24 +351,24 @@ DAE::Liveness DAE::SurveyUse(Value::use_iterator U, UseVector &MaybeLiveUses,
}
return Result;
}
- CallSite CS = CallSite::get(V);
- if (CS.getInstruction()) {
- Function *F = CS.getCalledFunction();
+
+ if (ImmutableCallSite CS = V) {
+ const Function *F = CS.getCalledFunction();
if (F) {
// Used in a direct call.
-
+
// Find the argument number. We know for sure that this use is an
// argument, since if it was the function argument this would be an
// indirect call and the we know can't be looking at a value of the
// label type (for the invoke instruction).
- unsigned ArgNo = CS.getArgumentNo(U.getOperandNo());
+ unsigned ArgNo = CS.getArgumentNo(U);
if (ArgNo >= F->getFunctionType()->getNumParams())
// The value is passed in through a vararg! Must be live.
return Live;
- assert(CS.getArgument(ArgNo)
- == CS.getInstruction()->getOperand(U.getOperandNo())
+ assert(CS.getArgument(ArgNo)
+ == CS->getOperand(U.getOperandNo())
&& "Argument is not where we expected it");
// Value passed to a normal call. It's only live when the corresponding
@@ -378,11 +387,11 @@ DAE::Liveness DAE::SurveyUse(Value::use_iterator U, UseVector &MaybeLiveUses,
/// Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses. If
/// the result is Live, MaybeLiveUses might be modified but its content should
/// be ignored (since it might not be complete).
-DAE::Liveness DAE::SurveyUses(Value *V, UseVector &MaybeLiveUses) {
+DAE::Liveness DAE::SurveyUses(const Value *V, UseVector &MaybeLiveUses) {
// Assume it's dead (which will only hold if there are no uses at all..).
Liveness Result = MaybeLive;
// Check each use.
- for (Value::use_iterator I = V->use_begin(),
+ for (Value::const_use_iterator I = V->use_begin(),
E = V->use_end(); I != E; ++I) {
Result = SurveyUse(I, MaybeLiveUses);
if (Result == Live)
@@ -399,7 +408,7 @@ DAE::Liveness DAE::SurveyUses(Value *V, UseVector &MaybeLiveUses) {
// We consider arguments of non-internal functions to be intrinsically alive as
// well as arguments to functions which have their "address taken".
//
-void DAE::SurveyFunction(Function &F) {
+void DAE::SurveyFunction(const Function &F) {
unsigned RetCount = NumRetVals(&F);
// Assume all return values are dead
typedef SmallVector<Liveness, 5> RetVals;
@@ -411,8 +420,8 @@ void DAE::SurveyFunction(Function &F) {
// MaybeLive. Initialized to a list of RetCount empty lists.
RetUses MaybeLiveRetUses(RetCount);
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
- if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()))
+ for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
+ if (const ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()))
if (RI->getNumOperands() != 0 && RI->getOperand(0)->getType()
!= F.getFunctionType()->getReturnType()) {
// We don't support old style multiple return values.
@@ -431,17 +440,18 @@ void DAE::SurveyFunction(Function &F) {
unsigned NumLiveRetVals = 0;
const Type *STy = dyn_cast<StructType>(F.getReturnType());
// Loop all uses of the function.
- for (Value::use_iterator I = F.use_begin(), E = F.use_end(); I != E; ++I) {
+ for (Value::const_use_iterator I = F.use_begin(), E = F.use_end();
+ I != E; ++I) {
// If the function is PASSED IN as an argument, its address has been
// taken.
- CallSite CS = CallSite::get(*I);
- if (!CS.getInstruction() || !CS.isCallee(I)) {
+ ImmutableCallSite CS(*I);
+ if (!CS || !CS.isCallee(I)) {
MarkLive(F);
return;
}
// If this use is anything other than a call site, the function is alive.
- Instruction *TheCall = CS.getInstruction();
+ const Instruction *TheCall = CS.getInstruction();
if (!TheCall) { // Not a direct call site?
MarkLive(F);
return;
@@ -454,9 +464,9 @@ void DAE::SurveyFunction(Function &F) {
if (NumLiveRetVals != RetCount) {
if (STy) {
// Check all uses of the return value.
- for (Value::use_iterator I = TheCall->use_begin(),
+ for (Value::const_use_iterator I = TheCall->use_begin(),
E = TheCall->use_end(); I != E; ++I) {
- ExtractValueInst *Ext = dyn_cast<ExtractValueInst>(*I);
+ const ExtractValueInst *Ext = dyn_cast<ExtractValueInst>(*I);
if (Ext && Ext->hasIndices()) {
// This use uses a part of our return value, survey the uses of
// that part and store the results for this index only.
@@ -493,7 +503,7 @@ void DAE::SurveyFunction(Function &F) {
// Now, check all of our arguments.
unsigned i = 0;
UseVector MaybeLiveArgUses;
- for (Function::arg_iterator AI = F.arg_begin(),
+ for (Function::const_arg_iterator AI = F.arg_begin(),
E = F.arg_end(); AI != E; ++AI, ++i) {
// See what the effect of this use is (recording any uses that cause
// MaybeLive in MaybeLiveArgUses).
@@ -531,14 +541,14 @@ void DAE::MarkValue(const RetOrArg &RA, Liveness L,
/// values (according to Uses) live as well.
void DAE::MarkLive(const Function &F) {
DEBUG(dbgs() << "DAE - Intrinsically live fn: " << F.getName() << "\n");
- // Mark the function as live.
- LiveFunctions.insert(&F);
- // Mark all arguments as live.
- for (unsigned i = 0, e = F.arg_size(); i != e; ++i)
- PropagateLiveness(CreateArg(&F, i));
- // Mark all return values as live.
- for (unsigned i = 0, e = NumRetVals(&F); i != e; ++i)
- PropagateLiveness(CreateRet(&F, i));
+ // Mark the function as live.
+ LiveFunctions.insert(&F);
+ // Mark all arguments as live.
+ for (unsigned i = 0, e = F.arg_size(); i != e; ++i)
+ PropagateLiveness(CreateArg(&F, i));
+ // Mark all return values as live.
+ for (unsigned i = 0, e = NumRetVals(&F); i != e; ++i)
+ PropagateLiveness(CreateRet(&F, i));
}
/// MarkLive - Mark the given return value or argument as live. Additionally,
@@ -599,12 +609,12 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
const Type *RetTy = FTy->getReturnType();
const Type *NRetTy = NULL;
unsigned RetCount = NumRetVals(F);
-
+
// -1 means unused, other numbers are the new index
SmallVector<int, 5> NewRetIdxs(RetCount, -1);
std::vector<const Type*> RetTypes;
- if (RetTy == Type::getVoidTy(F->getContext())) {
- NRetTy = Type::getVoidTy(F->getContext());
+ if (RetTy->isVoidTy()) {
+ NRetTy = RetTy;
} else {
const StructType *STy = dyn_cast<StructType>(RetTy);
if (STy)
@@ -653,10 +663,10 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// values. Otherwise, ensure that we don't have any conflicting attributes
// here. Currently, this should not be possible, but special handling might be
// required when new return value attributes are added.
- if (NRetTy == Type::getVoidTy(F->getContext()))
+ if (NRetTy->isVoidTy())
RAttrs &= ~Attribute::typeIncompatible(NRetTy);
else
- assert((RAttrs & Attribute::typeIncompatible(NRetTy)) == 0
+ assert((RAttrs & Attribute::typeIncompatible(NRetTy)) == 0
&& "Return attributes no longer compatible?");
if (RAttrs)
@@ -686,27 +696,15 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
}
}
- if (FnAttrs != Attribute::None)
+ if (FnAttrs != Attribute::None)
AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
// Reconstruct the AttributesList based on the vector we constructed.
- AttrListPtr NewPAL = AttrListPtr::get(AttributesVec.begin(), AttributesVec.end());
-
- // Work around LLVM bug PR56: the CWriter cannot emit varargs functions which
- // have zero fixed arguments.
- //
- // Note that we apply this hack for a vararg fuction that does not have any
- // arguments anymore, but did have them before (so don't bother fixing
- // functions that were already broken wrt CWriter).
- bool ExtraArgHack = false;
- if (Params.empty() && FTy->isVarArg() && FTy->getNumParams() != 0) {
- ExtraArgHack = true;
- Params.push_back(Type::getInt32Ty(F->getContext()));
- }
+ AttrListPtr NewPAL = AttrListPtr::get(AttributesVec.begin(),
+ AttributesVec.end());
// Create the new function type based on the recomputed parameters.
- FunctionType *NFTy = FunctionType::get(NRetTy, Params,
- FTy->isVarArg());
+ FunctionType *NFTy = FunctionType::get(NRetTy, Params, FTy->isVarArg());
// No change?
if (NFTy == FTy)
@@ -726,7 +724,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
//
std::vector<Value*> Args;
while (!F->use_empty()) {
- CallSite CS = CallSite::get(F->use_back());
+ CallSite CS(F->use_back());
Instruction *Call = CS.getInstruction();
AttributesVec.clear();
@@ -754,9 +752,6 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs));
}
- if (ExtraArgHack)
- Args.push_back(UndefValue::get(Type::getInt32Ty(F->getContext())));
-
// Push any varargs arguments on the list. Don't forget their attributes.
for (CallSite::arg_iterator E = CS.arg_end(); I != E; ++I, ++i) {
Args.push_back(*I);
@@ -784,6 +779,8 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
if (cast<CallInst>(Call)->isTailCall())
cast<CallInst>(New)->setTailCall();
}
+ New->setDebugLoc(Call->getDebugLoc());
+
Args.clear();
if (!Call->use_empty()) {
@@ -791,7 +788,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// Return type not changed? Just replace users then.
Call->replaceAllUsesWith(New);
New->takeName(Call);
- } else if (New->getType() == Type::getVoidTy(F->getContext())) {
+ } else if (New->getType()->isVoidTy()) {
// Our return value has uses, but they will get removed later on.
// Replace by null for now.
Call->replaceAllUsesWith(Constant::getNullValue(Call->getType()));
@@ -805,7 +802,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
while (isa<PHINode>(IP)) ++IP;
InsertPt = IP;
}
-
+
// We used to return a struct. Instead of doing smart stuff with all the
// uses of this struct, we will just rebuild it using
// extract/insertvalue chaining and let instcombine clean that up.
@@ -867,7 +864,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
Value *RetVal;
- if (NFTy->getReturnType() == Type::getVoidTy(F->getContext())) {
+ if (NFTy->getReturnType()->isVoidTy()) {
RetVal = 0;
} else {
assert (RetTy->isStructTy());
@@ -929,11 +926,11 @@ bool DAE::runOnModule(Module &M) {
DEBUG(dbgs() << "DAE - Determining liveness\n");
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
SurveyFunction(*I);
-
+
// Now, remove all dead arguments and return values from each function in
- // turn
+ // turn.
for (Module::iterator I = M.begin(), E = M.end(); I != E; ) {
- // Increment now, because the function will probably get removed (ie
+ // Increment now, because the function will probably get removed (ie.
// replaced by a new one).
Function *F = I++;
Changed |= RemoveDeadStuffFromFunction(F);
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/DeadTypeElimination.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/DeadTypeElimination.cpp
index 662fbb5..5dc50c5 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/DeadTypeElimination.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/DeadTypeElimination.cpp
@@ -26,7 +26,7 @@ STATISTIC(NumKilled, "Number of unused typenames removed from symtab");
namespace {
struct DTE : public ModulePass {
static char ID; // Pass identification, replacement for typeid
- DTE() : ModulePass(&ID) {}
+ DTE() : ModulePass(ID) {}
// doPassInitialization - For this pass, it removes global symbol table
// entries for primitive types. These are never used for linking in GCC and
@@ -45,7 +45,7 @@ namespace {
}
char DTE::ID = 0;
-static RegisterPass<DTE> X("deadtypeelim", "Dead Type Elimination");
+INITIALIZE_PASS(DTE, "deadtypeelim", "Dead Type Elimination", false, false);
ModulePass *llvm::createDeadTypeEliminationPass() {
return new DTE();
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/ExtractGV.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/ExtractGV.cpp
index 7f67e48..45c5fe7 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/ExtractGV.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/ExtractGV.cpp
@@ -17,15 +17,15 @@
#include "llvm/Pass.h"
#include "llvm/Constants.h"
#include "llvm/Transforms/IPO.h"
+#include "llvm/ADT/SetVector.h"
#include <algorithm>
using namespace llvm;
namespace {
/// @brief A pass to extract specific functions and their dependencies.
class GVExtractorPass : public ModulePass {
- std::vector<GlobalValue*> Named;
+ SetVector<GlobalValue *> Named;
bool deleteStuff;
- bool reLink;
public:
static char ID; // Pass identification, replacement for typeid
@@ -33,135 +33,42 @@ namespace {
/// specified function. Otherwise, it deletes as much of the module as
/// possible, except for the function specified.
///
- explicit GVExtractorPass(std::vector<GlobalValue*>& GVs, bool deleteS = true,
- bool relinkCallees = false)
- : ModulePass(&ID), Named(GVs), deleteStuff(deleteS),
- reLink(relinkCallees) {}
+ explicit GVExtractorPass(std::vector<GlobalValue*>& GVs, bool deleteS = true)
+ : ModulePass(ID), Named(GVs.begin(), GVs.end()), deleteStuff(deleteS) {}
bool runOnModule(Module &M) {
- if (Named.size() == 0) {
- return false; // Nothing to extract
- }
-
-
- if (deleteStuff)
- return deleteGV();
- M.setModuleInlineAsm("");
- return isolateGV(M);
- }
-
- bool deleteGV() {
- for (std::vector<GlobalValue*>::iterator GI = Named.begin(),
- GE = Named.end(); GI != GE; ++GI) {
- if (Function* NamedFunc = dyn_cast<Function>(*GI)) {
- // If we're in relinking mode, set linkage of all internal callees to
- // external. This will allow us extract function, and then - link
- // everything together
- if (reLink) {
- for (Function::iterator B = NamedFunc->begin(), BE = NamedFunc->end();
- B != BE; ++B) {
- for (BasicBlock::iterator I = B->begin(), E = B->end();
- I != E; ++I) {
- if (CallInst* callInst = dyn_cast<CallInst>(&*I)) {
- Function* Callee = callInst->getCalledFunction();
- if (Callee && Callee->hasLocalLinkage())
- Callee->setLinkage(GlobalValue::ExternalLinkage);
- }
- }
- }
- }
-
- NamedFunc->setLinkage(GlobalValue::ExternalLinkage);
- NamedFunc->deleteBody();
- assert(NamedFunc->isDeclaration() && "This didn't make the function external!");
- } else {
- if (!(*GI)->isDeclaration()) {
- cast<GlobalVariable>(*GI)->setInitializer(0); //clear the initializer
- (*GI)->setLinkage(GlobalValue::ExternalLinkage);
- }
- }
- }
- return true;
- }
-
- bool isolateGV(Module &M) {
- // Mark all globals internal
- // FIXME: what should we do with private linkage?
- for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I)
+ // Visit the global inline asm.
+ if (!deleteStuff)
+ M.setModuleInlineAsm("");
+
+ // For simplicity, just give all GlobalValues ExternalLinkage. A trickier
+ // implementation could figure out which GlobalValues are actually
+ // referenced by the Named set, and which GlobalValues in the rest of
+ // the module are referenced by the NamedSet, and get away with leaving
+ // more internal and private things internal and private. But for now,
+ // be conservative and simple.
+
+ // Visit the GlobalVariables.
+ for (Module::global_iterator I = M.global_begin(), E = M.global_end();
+ I != E; ++I)
if (!I->isDeclaration()) {
- I->setLinkage(GlobalValue::InternalLinkage);
+ if (I->hasLocalLinkage())
+ I->setVisibility(GlobalValue::HiddenVisibility);
+ I->setLinkage(GlobalValue::ExternalLinkage);
+ if (deleteStuff == Named.count(I))
+ I->setInitializer(0);
}
+
+ // Visit the Functions.
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
if (!I->isDeclaration()) {
- I->setLinkage(GlobalValue::InternalLinkage);
- }
-
- // Make sure our result is globally accessible...
- // by putting them in the used array
- {
- std::vector<Constant *> AUGs;
- const Type *SBP=
- Type::getInt8PtrTy(M.getContext());
- for (std::vector<GlobalValue*>::iterator GI = Named.begin(),
- GE = Named.end(); GI != GE; ++GI) {
- (*GI)->setLinkage(GlobalValue::ExternalLinkage);
- AUGs.push_back(ConstantExpr::getBitCast(*GI, SBP));
- }
- ArrayType *AT = ArrayType::get(SBP, AUGs.size());
- Constant *Init = ConstantArray::get(AT, AUGs);
- GlobalValue *gv = new GlobalVariable(M, AT, false,
- GlobalValue::AppendingLinkage,
- Init, "llvm.used");
- gv->setSection("llvm.metadata");
- }
-
- // All of the functions may be used by global variables or the named
- // globals. Loop through them and create a new, external functions that
- // can be "used", instead of ones with bodies.
- std::vector<Function*> NewFunctions;
-
- Function *Last = --M.end(); // Figure out where the last real fn is.
-
- for (Module::iterator I = M.begin(); ; ++I) {
- if (std::find(Named.begin(), Named.end(), &*I) == Named.end()) {
- Function *New = Function::Create(I->getFunctionType(),
- GlobalValue::ExternalLinkage);
- New->copyAttributesFrom(I);
-
- // If it's not the named function, delete the body of the function
- I->dropAllReferences();
-
- M.getFunctionList().push_back(New);
- NewFunctions.push_back(New);
- New->takeName(I);
+ if (I->hasLocalLinkage())
+ I->setVisibility(GlobalValue::HiddenVisibility);
+ I->setLinkage(GlobalValue::ExternalLinkage);
+ if (deleteStuff == Named.count(I))
+ I->deleteBody();
}
- if (&*I == Last) break; // Stop after processing the last function
- }
-
- // Now that we have replacements all set up, loop through the module,
- // deleting the old functions, replacing them with the newly created
- // functions.
- if (!NewFunctions.empty()) {
- unsigned FuncNum = 0;
- Module::iterator I = M.begin();
- do {
- if (std::find(Named.begin(), Named.end(), &*I) == Named.end()) {
- // Make everything that uses the old function use the new dummy fn
- I->replaceAllUsesWith(NewFunctions[FuncNum++]);
-
- Function *Old = I;
- ++I; // Move the iterator to the new function
-
- // Delete the old function!
- M.getFunctionList().erase(Old);
-
- } else {
- ++I; // Skip the function we are extracting
- }
- } while (&*I != NewFunctions[0]);
- }
-
return true;
}
};
@@ -170,6 +77,6 @@ namespace {
}
ModulePass *llvm::createGVExtractionPass(std::vector<GlobalValue*>& GVs,
- bool deleteFn, bool relinkCallees) {
- return new GVExtractorPass(GVs, deleteFn, relinkCallees);
+ bool deleteFn) {
+ return new GVExtractorPass(GVs, deleteFn);
}
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 298d5cf..6165ba0 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -41,23 +41,23 @@ STATISTIC(NumNoAlias, "Number of function returns marked noalias");
namespace {
struct FunctionAttrs : public CallGraphSCCPass {
static char ID; // Pass identification, replacement for typeid
- FunctionAttrs() : CallGraphSCCPass(&ID) {}
+ FunctionAttrs() : CallGraphSCCPass(ID) {}
// runOnSCC - Analyze the SCC, performing the transformation if possible.
- bool runOnSCC(std::vector<CallGraphNode *> &SCC);
+ bool runOnSCC(CallGraphSCC &SCC);
// AddReadAttrs - Deduce readonly/readnone attributes for the SCC.
- bool AddReadAttrs(const std::vector<CallGraphNode *> &SCC);
+ bool AddReadAttrs(const CallGraphSCC &SCC);
// AddNoCaptureAttrs - Deduce nocapture attributes for the SCC.
- bool AddNoCaptureAttrs(const std::vector<CallGraphNode *> &SCC);
+ bool AddNoCaptureAttrs(const CallGraphSCC &SCC);
// IsFunctionMallocLike - Does this function allocate new memory?
bool IsFunctionMallocLike(Function *F,
SmallPtrSet<Function*, 8> &) const;
// AddNoAliasAttrs - Deduce noalias attributes for the SCC.
- bool AddNoAliasAttrs(const std::vector<CallGraphNode *> &SCC);
+ bool AddNoAliasAttrs(const CallGraphSCC &SCC);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
@@ -69,8 +69,8 @@ namespace {
}
char FunctionAttrs::ID = 0;
-static RegisterPass<FunctionAttrs>
-X("functionattrs", "Deduce function attributes");
+INITIALIZE_PASS(FunctionAttrs, "functionattrs",
+ "Deduce function attributes", false, false);
Pass *llvm::createFunctionAttrsPass() { return new FunctionAttrs(); }
@@ -123,19 +123,19 @@ bool FunctionAttrs::PointsToLocalMemory(Value *V) {
}
/// AddReadAttrs - Deduce readonly/readnone attributes for the SCC.
-bool FunctionAttrs::AddReadAttrs(const std::vector<CallGraphNode *> &SCC) {
+bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
SmallPtrSet<Function*, 8> SCCNodes;
// Fill SCCNodes with the elements of the SCC. Used for quickly
// looking up whether a given CallGraphNode is in this SCC.
- for (unsigned i = 0, e = SCC.size(); i != e; ++i)
- SCCNodes.insert(SCC[i]->getFunction());
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
+ SCCNodes.insert((*I)->getFunction());
// Check if any of the functions in the SCC read or write memory. If they
// write memory then they can't be marked readnone or readonly.
bool ReadsMemory = false;
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
- Function *F = SCC[i]->getFunction();
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
if (F == 0)
// External node - may write memory. Just give up.
@@ -162,14 +162,14 @@ bool FunctionAttrs::AddReadAttrs(const std::vector<CallGraphNode *> &SCC) {
// Some instructions can be ignored even if they read or write memory.
// Detect these now, skipping to the next instruction if one is found.
- CallSite CS = CallSite::get(I);
- if (CS.getInstruction() && CS.getCalledFunction()) {
+ CallSite CS(cast<Value>(I));
+ if (CS && CS.getCalledFunction()) {
// Ignore calls to functions in the same SCC.
if (SCCNodes.count(CS.getCalledFunction()))
continue;
// Ignore intrinsics that only access local memory.
if (unsigned id = CS.getCalledFunction()->getIntrinsicID())
- if (AliasAnalysis::getModRefBehavior(id) ==
+ if (AliasAnalysis::getIntrinsicModRefBehavior(id) ==
AliasAnalysis::AccessesArguments) {
// Check that all pointer arguments point to local memory.
for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
@@ -210,8 +210,8 @@ bool FunctionAttrs::AddReadAttrs(const std::vector<CallGraphNode *> &SCC) {
// Success! Functions in this SCC do not access memory, or only read memory.
// Give them the appropriate attribute.
bool MadeChange = false;
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
- Function *F = SCC[i]->getFunction();
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
if (F->doesNotAccessMemory())
// Already perfect!
@@ -239,13 +239,13 @@ bool FunctionAttrs::AddReadAttrs(const std::vector<CallGraphNode *> &SCC) {
}
/// AddNoCaptureAttrs - Deduce nocapture attributes for the SCC.
-bool FunctionAttrs::AddNoCaptureAttrs(const std::vector<CallGraphNode *> &SCC) {
+bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) {
bool Changed = false;
// Check each function in turn, determining which pointer arguments are not
// captured.
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
- Function *F = SCC[i]->getFunction();
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
if (F == 0)
// External node - skip it;
@@ -334,18 +334,18 @@ bool FunctionAttrs::IsFunctionMallocLike(Function *F,
}
/// AddNoAliasAttrs - Deduce noalias attributes for the SCC.
-bool FunctionAttrs::AddNoAliasAttrs(const std::vector<CallGraphNode *> &SCC) {
+bool FunctionAttrs::AddNoAliasAttrs(const CallGraphSCC &SCC) {
SmallPtrSet<Function*, 8> SCCNodes;
// Fill SCCNodes with the elements of the SCC. Used for quickly
// looking up whether a given CallGraphNode is in this SCC.
- for (unsigned i = 0, e = SCC.size(); i != e; ++i)
- SCCNodes.insert(SCC[i]->getFunction());
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
+ SCCNodes.insert((*I)->getFunction());
// Check each function in turn, determining which functions return noalias
// pointers.
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
- Function *F = SCC[i]->getFunction();
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
if (F == 0)
// External node - skip it;
@@ -370,8 +370,8 @@ bool FunctionAttrs::AddNoAliasAttrs(const std::vector<CallGraphNode *> &SCC) {
}
bool MadeChange = false;
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
- Function *F = SCC[i]->getFunction();
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
if (F->doesNotAlias(0) || !F->getReturnType()->isPointerTy())
continue;
@@ -383,7 +383,7 @@ bool FunctionAttrs::AddNoAliasAttrs(const std::vector<CallGraphNode *> &SCC) {
return MadeChange;
}
-bool FunctionAttrs::runOnSCC(std::vector<CallGraphNode *> &SCC) {
+bool FunctionAttrs::runOnSCC(CallGraphSCC &SCC) {
bool Changed = AddReadAttrs(SCC);
Changed |= AddNoCaptureAttrs(SCC);
Changed |= AddNoAliasAttrs(SCC);
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/GlobalDCE.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/GlobalDCE.cpp
index 44216a6..aa18601 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/GlobalDCE.cpp
@@ -31,7 +31,7 @@ STATISTIC(NumVariables, "Number of global variables removed");
namespace {
struct GlobalDCE : public ModulePass {
static char ID; // Pass identification, replacement for typeid
- GlobalDCE() : ModulePass(&ID) {}
+ GlobalDCE() : ModulePass(ID) {}
// run - Do the GlobalDCE pass on the specified module, optionally updating
// the specified callgraph to reflect the changes.
@@ -51,7 +51,8 @@ namespace {
}
char GlobalDCE::ID = 0;
-static RegisterPass<GlobalDCE> X("globaldce", "Dead Global Elimination");
+INITIALIZE_PASS(GlobalDCE, "globaldce",
+ "Dead Global Elimination", false, false);
ModulePass *llvm::createGlobalDCEPass() { return new GlobalDCE(); }
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index f58ca6c..a77af54 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -59,7 +59,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
}
static char ID; // Pass identification, replacement for typeid
- GlobalOpt() : ModulePass(&ID) {}
+ GlobalOpt() : ModulePass(ID) {}
bool runOnModule(Module &M);
@@ -74,7 +74,8 @@ namespace {
}
char GlobalOpt::ID = 0;
-static RegisterPass<GlobalOpt> X("globalopt", "Global Variable Optimizer");
+INITIALIZE_PASS(GlobalOpt, "globalopt",
+ "Global Variable Optimizer", false, false);
ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
@@ -119,7 +120,7 @@ struct GlobalStatus {
/// null/false. When the first accessing function is noticed, it is recorded.
/// When a second different accessing function is noticed,
/// HasMultipleAccessingFunctions is set to true.
- Function *AccessingFunction;
+ const Function *AccessingFunction;
bool HasMultipleAccessingFunctions;
/// HasNonInstructionUser - Set to true if this global has a user that is not
@@ -140,11 +141,12 @@ struct GlobalStatus {
// by constants itself. Note that constants cannot be cyclic, so this test is
// pretty easy to implement recursively.
//
-static bool SafeToDestroyConstant(Constant *C) {
+static bool SafeToDestroyConstant(const Constant *C) {
if (isa<GlobalValue>(C)) return false;
- for (Value::use_iterator UI = C->use_begin(), E = C->use_end(); UI != E; ++UI)
- if (Constant *CU = dyn_cast<Constant>(*UI)) {
+ for (Value::const_use_iterator UI = C->use_begin(), E = C->use_end(); UI != E;
+ ++UI)
+ if (const Constant *CU = dyn_cast<Constant>(*UI)) {
if (!SafeToDestroyConstant(CU)) return false;
} else
return false;
@@ -156,26 +158,26 @@ static bool SafeToDestroyConstant(Constant *C) {
/// structure. If the global has its address taken, return true to indicate we
/// can't do anything with it.
///
-static bool AnalyzeGlobal(Value *V, GlobalStatus &GS,
- SmallPtrSet<PHINode*, 16> &PHIUsers) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) {
+static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
+ SmallPtrSet<const PHINode*, 16> &PHIUsers) {
+ for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
+ ++UI) {
+ const User *U = *UI;
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
GS.HasNonInstructionUser = true;
-
if (AnalyzeGlobal(CE, GS, PHIUsers)) return true;
-
- } else if (Instruction *I = dyn_cast<Instruction>(*UI)) {
+ } else if (const Instruction *I = dyn_cast<Instruction>(U)) {
if (!GS.HasMultipleAccessingFunctions) {
- Function *F = I->getParent()->getParent();
+ const Function *F = I->getParent()->getParent();
if (GS.AccessingFunction == 0)
GS.AccessingFunction = F;
else if (GS.AccessingFunction != F)
GS.HasMultipleAccessingFunctions = true;
}
- if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
GS.isLoaded = true;
if (LI->isVolatile()) return true; // Don't hack on volatile loads.
- } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
// Don't allow a store OF the address, only stores TO the address.
if (SI->getOperand(0) == V) return true;
@@ -185,14 +187,14 @@ static bool AnalyzeGlobal(Value *V, GlobalStatus &GS,
// value, not an aggregate), keep more specific information about
// stores.
if (GS.StoredType != GlobalStatus::isStored) {
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(SI->getOperand(1))){
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(
+ SI->getOperand(1))) {
Value *StoredVal = SI->getOperand(0);
if (StoredVal == GV->getInitializer()) {
if (GS.StoredType < GlobalStatus::isInitializerStored)
GS.StoredType = GlobalStatus::isInitializerStored;
} else if (isa<LoadInst>(StoredVal) &&
cast<LoadInst>(StoredVal)->getOperand(0) == GV) {
- // G = G
if (GS.StoredType < GlobalStatus::isInitializerStored)
GS.StoredType = GlobalStatus::isInitializerStored;
} else if (GS.StoredType < GlobalStatus::isStoredOnce) {
@@ -212,25 +214,28 @@ static bool AnalyzeGlobal(Value *V, GlobalStatus &GS,
if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
} else if (isa<SelectInst>(I)) {
if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
- } else if (PHINode *PN = dyn_cast<PHINode>(I)) {
+ } else if (const PHINode *PN = dyn_cast<PHINode>(I)) {
// PHI nodes we can check just like select or GEP instructions, but we
// have to be careful about infinite recursion.
if (PHIUsers.insert(PN)) // Not already visited.
if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
GS.HasPHIUser = true;
} else if (isa<CmpInst>(I)) {
+ // Nothing to analyse.
} else if (isa<MemTransferInst>(I)) {
- if (I->getOperand(1) == V)
+ const MemTransferInst *MTI = cast<MemTransferInst>(I);
+ if (MTI->getArgOperand(0) == V)
GS.StoredType = GlobalStatus::isStored;
- if (I->getOperand(2) == V)
+ if (MTI->getArgOperand(1) == V)
GS.isLoaded = true;
} else if (isa<MemSetInst>(I)) {
- assert(I->getOperand(1) == V && "Memset only takes one pointer!");
+ assert(cast<MemSetInst>(I)->getArgOperand(0) == V &&
+ "Memset only takes one pointer!");
GS.StoredType = GlobalStatus::isStored;
} else {
return true; // Any other non-load instruction might take address!
}
- } else if (Constant *C = dyn_cast<Constant>(*UI)) {
+ } else if (const Constant *C = dyn_cast<Constant>(U)) {
GS.HasNonInstructionUser = true;
// We might have a dead and dangling constant hanging off of here.
if (!SafeToDestroyConstant(C))
@@ -240,6 +245,7 @@ static bool AnalyzeGlobal(Value *V, GlobalStatus &GS,
// Otherwise must be some other user.
return true;
}
+ }
return false;
}
@@ -611,62 +617,69 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
/// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
/// value will trap if the value is dynamically null. PHIs keeps track of any
/// phi nodes we've seen to avoid reprocessing them.
-static bool AllUsesOfValueWillTrapIfNull(Value *V,
- SmallPtrSet<PHINode*, 8> &PHIs) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
- if (isa<LoadInst>(*UI)) {
+static bool AllUsesOfValueWillTrapIfNull(const Value *V,
+ SmallPtrSet<const PHINode*, 8> &PHIs) {
+ for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
+ ++UI) {
+ const User *U = *UI;
+
+ if (isa<LoadInst>(U)) {
// Will trap.
- } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (SI->getOperand(0) == V) {
- //cerr << "NONTRAPPING USE: " << **UI;
+ //cerr << "NONTRAPPING USE: " << *U;
return false; // Storing the value.
}
- } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
- if (CI->getOperand(0) != V) {
- //cerr << "NONTRAPPING USE: " << **UI;
+ } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
+ if (CI->getCalledValue() != V) {
+ //cerr << "NONTRAPPING USE: " << *U;
return false; // Not calling the ptr
}
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
- if (II->getOperand(0) != V) {
- //cerr << "NONTRAPPING USE: " << **UI;
+ } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
+ if (II->getCalledValue() != V) {
+ //cerr << "NONTRAPPING USE: " << *U;
return false; // Not calling the ptr
}
- } else if (BitCastInst *CI = dyn_cast<BitCastInst>(*UI)) {
+ } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
- } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI)) {
+ } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
- } else if (PHINode *PN = dyn_cast<PHINode>(*UI)) {
+ } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
// If we've already seen this phi node, ignore it, it has already been
// checked.
if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
return false;
- } else if (isa<ICmpInst>(*UI) &&
+ } else if (isa<ICmpInst>(U) &&
isa<ConstantPointerNull>(UI->getOperand(1))) {
// Ignore icmp X, null
} else {
- //cerr << "NONTRAPPING USE: " << **UI;
+ //cerr << "NONTRAPPING USE: " << *U;
return false;
}
+ }
return true;
}
/// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads
/// from GV will trap if the loaded value is null. Note that this also permits
/// comparisons of the loaded value against null, as a special case.
-static bool AllUsesOfLoadedValueWillTrapIfNull(GlobalVariable *GV) {
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI!=E; ++UI)
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
- SmallPtrSet<PHINode*, 8> PHIs;
+static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
+ for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
+ UI != E; ++UI) {
+ const User *U = *UI;
+
+ if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
+ SmallPtrSet<const PHINode*, 8> PHIs;
if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
return false;
- } else if (isa<StoreInst>(*UI)) {
+ } else if (isa<StoreInst>(U)) {
// Ignore stores to the global.
} else {
// We don't know or understand this user, bail out.
- //cerr << "UNKNOWN USER OF GLOBAL!: " << **UI;
+ //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
return false;
}
-
+ }
return true;
}
@@ -683,16 +696,17 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
Changed = true;
}
} else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
- if (I->getOperand(0) == V) {
+ CallSite CS(I);
+ if (CS.getCalledValue() == V) {
// Calling through the pointer! Turn into a direct call, but be careful
// that the pointer is not also being passed as an argument.
- I->setOperand(0, NewV);
+ CS.setCalledFunction(NewV);
Changed = true;
bool PassedAsArg = false;
- for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i)
- if (I->getOperand(i) == V) {
+ for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
+ if (CS.getArgument(i) == V) {
PassedAsArg = true;
- I->setOperand(i, NewV);
+ CS.setArgument(i, NewV);
}
if (PassedAsArg) {
@@ -939,17 +953,18 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
/// to make sure that there are no complex uses of V. We permit simple things
/// like dereferencing the pointer, but not storing through the address, unless
/// it is to the specified global.
-static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Instruction *V,
- GlobalVariable *GV,
- SmallPtrSet<PHINode*, 8> &PHIs) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- Instruction *Inst = cast<Instruction>(*UI);
-
+static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
+ const GlobalVariable *GV,
+ SmallPtrSet<const PHINode*, 8> &PHIs) {
+ for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI) {
+ const Instruction *Inst = cast<Instruction>(*UI);
+
if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
continue; // Fine, ignore.
}
- if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
return false; // Storing the pointer itself... bad.
continue; // Otherwise, storing through it, or storing into GV... fine.
@@ -962,7 +977,7 @@ static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Instruction *V,
continue;
}
- if (PHINode *PN = dyn_cast<PHINode>(Inst)) {
+ if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
// PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
// cycles.
if (PHIs.insert(PN))
@@ -971,7 +986,7 @@ static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Instruction *V,
continue;
}
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
+ if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
return false;
continue;
@@ -1030,23 +1045,24 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
/// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi
/// of a load) are simple enough to perform heap SRA on. This permits GEP's
/// that index through the array and struct field, icmps of null, and PHIs.
-static bool LoadUsesSimpleEnoughForHeapSRA(Value *V,
- SmallPtrSet<PHINode*, 32> &LoadUsingPHIs,
- SmallPtrSet<PHINode*, 32> &LoadUsingPHIsPerLoad) {
+static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
+ SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs,
+ SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) {
// We permit two users of the load: setcc comparing against the null
// pointer, and a getelementptr of a specific form.
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- Instruction *User = cast<Instruction>(*UI);
+ for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
+ ++UI) {
+ const Instruction *User = cast<Instruction>(*UI);
// Comparison against null is ok.
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
+ if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
return false;
continue;
}
// getelementptr is also ok, but only a simple form.
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
+ if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
// Must index into the array and into the struct.
if (GEPI->getNumOperands() < 3)
return false;
@@ -1055,7 +1071,7 @@ static bool LoadUsesSimpleEnoughForHeapSRA(Value *V,
continue;
}
- if (PHINode *PN = dyn_cast<PHINode>(User)) {
+ if (const PHINode *PN = dyn_cast<PHINode>(User)) {
if (!LoadUsingPHIsPerLoad.insert(PN))
// This means some phi nodes are dependent on each other.
// Avoid infinite looping!
@@ -1082,13 +1098,13 @@ static bool LoadUsesSimpleEnoughForHeapSRA(Value *V,
/// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from
/// GV are simple enough to perform HeapSRA, return true.
-static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV,
+static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
Instruction *StoredVal) {
- SmallPtrSet<PHINode*, 32> LoadUsingPHIs;
- SmallPtrSet<PHINode*, 32> LoadUsingPHIsPerLoad;
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;
- ++UI)
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
+ SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
+ for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
+ UI != E; ++UI)
+ if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
LoadUsingPHIsPerLoad))
return false;
@@ -1100,16 +1116,16 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV,
// that all inputs the to the PHI nodes are in the same equivalence sets.
// Check to verify that all operands of the PHIs are either PHIS that can be
// transformed, loads from GV, or MI itself.
- for (SmallPtrSet<PHINode*, 32>::iterator I = LoadUsingPHIs.begin(),
- E = LoadUsingPHIs.end(); I != E; ++I) {
- PHINode *PN = *I;
+ for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin()
+ , E = LoadUsingPHIs.end(); I != E; ++I) {
+ const PHINode *PN = *I;
for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
Value *InVal = PN->getIncomingValue(op);
// PHI of the stored value itself is ok.
if (InVal == StoredVal) continue;
- if (PHINode *InPN = dyn_cast<PHINode>(InVal)) {
+ if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
// One of the PHIs in our set is (optimistically) ok.
if (LoadUsingPHIs.count(InPN))
continue;
@@ -1117,7 +1133,7 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV,
}
// Load from GV is ok.
- if (LoadInst *LI = dyn_cast<LoadInst>(InVal))
+ if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
if (LI->getOperand(0) == GV)
continue;
@@ -1292,7 +1308,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
const Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
- NElems,
+ NElems, 0,
CI->getName() + ".f" + Twine(FieldNo));
FieldMallocs.push_back(NMI);
new StoreInst(NMI, NGV, CI);
@@ -1311,8 +1327,8 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// if (F2) { free(F2); F2 = 0; }
// }
// The malloc can also fail if its argument is too large.
- Constant *ConstantZero = ConstantInt::get(CI->getOperand(1)->getType(), 0);
- Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getOperand(1),
+ Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
+ Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
ConstantZero, "isneg");
for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
@@ -1450,6 +1466,9 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
const Type *AllocTy,
Module::global_iterator &GVI,
TargetData *TD) {
+ if (!TD)
+ return false;
+
// If this is a malloc of an abstract type, don't touch it.
if (!AllocTy->isSized())
return false;
@@ -1468,66 +1487,66 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// malloc to be stored into the specified global, loaded setcc'd, and
// GEP'd. These are all things we could transform to using the global
// for.
- {
- SmallPtrSet<PHINode*, 8> PHIs;
- if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
- return false;
- }
+ SmallPtrSet<const PHINode*, 8> PHIs;
+ if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
+ return false;
// If we have a global that is only initialized with a fixed size malloc,
// transform the program to use global memory instead of malloc'd memory.
// This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt.
// We cannot optimize the malloc if we cannot determine malloc array size.
- if (Value *NElems = getMallocArraySize(CI, TD, true)) {
- if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
- // Restrict this transformation to only working on small allocations
- // (2048 bytes currently), as we don't want to introduce a 16M global or
- // something.
- if (TD &&
- NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
- GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD);
- return true;
- }
-
- // If the allocation is an array of structures, consider transforming this
- // into multiple malloc'd arrays, one for each field. This is basically
- // SRoA for malloc'd memory.
-
- // If this is an allocation of a fixed size array of structs, analyze as a
- // variable size array. malloc [100 x struct],1 -> malloc struct, 100
- if (NElems == ConstantInt::get(CI->getOperand(1)->getType(), 1))
- if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
- AllocTy = AT->getElementType();
+ Value *NElems = getMallocArraySize(CI, TD, true);
+ if (!NElems)
+ return false;
+
+ if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
+ // Restrict this transformation to only working on small allocations
+ // (2048 bytes currently), as we don't want to introduce a 16M global or
+ // something.
+ if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
+ GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD);
+ return true;
+ }
- if (const StructType *AllocSTy = dyn_cast<StructType>(AllocTy)) {
- // This the structure has an unreasonable number of fields, leave it
- // alone.
- if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
- AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
-
- // If this is a fixed size array, transform the Malloc to be an alloc of
- // structs. malloc [100 x struct],1 -> malloc struct, 100
- if (const ArrayType *AT =
- dyn_cast<ArrayType>(getMallocAllocatedType(CI))) {
- const Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
- unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
- Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
- Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
- Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
- AllocSize, NumElements,
- CI->getName());
- Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
- CI->replaceAllUsesWith(Cast);
- CI->eraseFromParent();
- CI = dyn_cast<BitCastInst>(Malloc) ?
- extractMallocCallFromBitCast(Malloc) : cast<CallInst>(Malloc);
- }
-
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true),TD);
- return true;
- }
+ // If the allocation is an array of structures, consider transforming this
+ // into multiple malloc'd arrays, one for each field. This is basically
+ // SRoA for malloc'd memory.
+
+ // If this is an allocation of a fixed size array of structs, analyze as a
+ // variable size array. malloc [100 x struct],1 -> malloc struct, 100
+ if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
+ if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
+ AllocTy = AT->getElementType();
+
+ const StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
+ if (!AllocSTy)
+ return false;
+
+ // This the structure has an unreasonable number of fields, leave it
+ // alone.
+ if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
+ AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
+
+ // If this is a fixed size array, transform the Malloc to be an alloc of
+ // structs. malloc [100 x struct],1 -> malloc struct, 100
+ if (const ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) {
+ const Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
+ unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
+ Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
+ Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
+ Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
+ AllocSize, NumElements,
+ 0, CI->getName());
+ Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
+ CI->replaceAllUsesWith(Cast);
+ CI->eraseFromParent();
+ CI = dyn_cast<BitCastInst>(Malloc) ?
+ extractMallocCallFromBitCast(Malloc) : cast<CallInst>(Malloc);
}
+
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true),TD);
+ return true;
}
return false;
@@ -1582,13 +1601,15 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
GVElType->isFloatingPointTy() ||
GVElType->isPointerTy() || GVElType->isVectorTy())
return false;
-
+
// Walk the use list of the global seeing if all the uses are load or store.
// If there is anything else, bail out.
- for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I)
- if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
+ for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
+ User *U = *I;
+ if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
return false;
-
+ }
+
DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
// Create the new global, initializing it to false.
@@ -1626,7 +1647,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// bool.
Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
- // If we're already replaced the input, StoredVal will be a cast or
+ // If we've already replaced the input, StoredVal will be a cast or
// select instruction. If not, it will be a load of the original
// global.
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
@@ -1665,7 +1686,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
/// it if possible. If we make a change, return true.
bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
Module::global_iterator &GVI) {
- SmallPtrSet<PHINode*, 16> PHIUsers;
+ SmallPtrSet<const PHINode*, 16> PHIUsers;
GlobalStatus GS;
GV->removeDeadConstantUsers();
@@ -1691,8 +1712,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
if (GS.StoredType == GlobalStatus::isStoredOnce && GS.StoredOnceValue)
DEBUG(dbgs() << " StoredOnceValue = " << *GS.StoredOnceValue << "\n");
if (GS.AccessingFunction && !GS.HasMultipleAccessingFunctions)
- DEBUG(dbgs() << " AccessingFunction = " << GS.AccessingFunction->getName()
- << "\n");
+ DEBUG(dbgs() << " AccessingFunction = "
+ << GS.AccessingFunction->getName() << "\n");
DEBUG(dbgs() << " HasMultipleAccessingFunctions = "
<< GS.HasMultipleAccessingFunctions << "\n");
DEBUG(dbgs() << " HasNonInstructionUser = "
@@ -1716,12 +1737,13 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GS.AccessingFunction->hasExternalLinkage() &&
GV->getType()->getAddressSpace() == 0) {
DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
- Instruction* FirstI = GS.AccessingFunction->getEntryBlock().begin();
+ Instruction& FirstI = const_cast<Instruction&>(*GS.AccessingFunction
+ ->getEntryBlock().begin());
const Type* ElemTy = GV->getType()->getElementType();
// FIXME: Pass Global's alignment when globals have alignment
- AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), FirstI);
+ AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
if (!isa<UndefValue>(GV->getInitializer()))
- new StoreInst(GV->getInitializer(), Alloca, FirstI);
+ new StoreInst(GV->getInitializer(), Alloca, &FirstI);
GV->replaceAllUsesWith(Alloca);
GV->eraseFromParent();
@@ -2056,7 +2078,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C) {
return false;
// The first index must be zero.
- ConstantInt *CI = dyn_cast<ConstantInt>(*next(CE->op_begin()));
+ ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin()));
if (!CI || !CI->isZero()) return false;
// The remaining indices must be compile-time known integers within the
@@ -2244,8 +2266,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
getVal(Values, CI->getOperand(0)),
CI->getType());
} else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
- InstResult =
- ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)),
+ InstResult = ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)),
getVal(Values, SI->getOperand(1)),
getVal(Values, SI->getOperand(2)));
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
@@ -2279,14 +2300,16 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
}
// Cannot handle inline asm.
- if (isa<InlineAsm>(CI->getOperand(0))) return false;
+ if (isa<InlineAsm>(CI->getCalledValue())) return false;
// Resolve function pointers.
- Function *Callee = dyn_cast<Function>(getVal(Values, CI->getOperand(0)));
+ Function *Callee = dyn_cast<Function>(getVal(Values,
+ CI->getCalledValue()));
if (!Callee) return false; // Cannot resolve.
SmallVector<Constant*, 8> Formals;
- for (User::op_iterator i = CI->op_begin() + 1, e = CI->op_end();
+ CallSite CS(CI);
+ for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i)
Formals.push_back(getVal(Values, *i));
@@ -2501,7 +2524,7 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
continue;
// Do not perform the transform if multiple aliases potentially target the
- // aliasee. This check also ensures that it is safe to replace the section
+ // aliasee. This check also ensures that it is safe to replace the section
// and other attributes of the aliasee with those of the alias.
if (!hasOneUse)
continue;
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
index df2456f..1b3cf78 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -35,7 +35,7 @@ namespace {
///
struct IPCP : public ModulePass {
static char ID; // Pass identification, replacement for typeid
- IPCP() : ModulePass(&ID) {}
+ IPCP() : ModulePass(ID) {}
bool runOnModule(Module &M);
private:
@@ -45,8 +45,8 @@ namespace {
}
char IPCP::ID = 0;
-static RegisterPass<IPCP>
-X("ipconstprop", "Interprocedural constant propagation");
+INITIALIZE_PASS(IPCP, "ipconstprop",
+ "Interprocedural constant propagation", false, false);
ModulePass *llvm::createIPConstantPropagationPass() { return new IPCP(); }
@@ -85,15 +85,16 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
unsigned NumNonconstant = 0;
for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
+ User *U = *UI;
// Ignore blockaddress uses.
- if (isa<BlockAddress>(*UI)) continue;
+ if (isa<BlockAddress>(U)) continue;
// Used by a non-instruction, or not the callee of a function, do not
// transform.
- if (!isa<CallInst>(*UI) && !isa<InvokeInst>(*UI))
+ if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
return false;
- CallSite CS = CallSite::get(cast<Instruction>(*UI));
+ CallSite CS(cast<Instruction>(U));
if (!CS.isCallee(UI))
return false;
@@ -218,7 +219,7 @@ bool IPCP::PropagateConstantReturn(Function &F) {
// constant.
bool MadeChange = false;
for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
- CallSite CS = CallSite::get(*UI);
+ CallSite CS(*UI);
Instruction* Call = CS.getInstruction();
// Not a call instruction or a call instruction that's not calling F
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/IPO.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/IPO.cpp
index 83e8624..340b70e 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/IPO.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/IPO.cpp
@@ -62,6 +62,15 @@ void LLVMAddPruneEHPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createPruneEHPass());
}
+void LLVMAddIPSCCPPass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createIPSCCPPass());
+}
+
+void LLVMAddInternalizePass(LLVMPassManagerRef PM, unsigned AllButMain) {
+ unwrap(PM)->add(createInternalizePass(AllButMain != 0));
+}
+
+
void LLVMAddRaiseAllocationsPass(LLVMPassManagerRef PM) {
// FIXME: Remove in LLVM 3.0.
}
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/InlineAlways.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/InlineAlways.cpp
index f11ecae..ecc60ad 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/InlineAlways.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/InlineAlways.cpp
@@ -36,7 +36,7 @@ namespace {
InlineCostAnalyzer CA;
public:
// Use extremely low threshold.
- AlwaysInliner() : Inliner(&ID, -2000000000) {}
+ AlwaysInliner() : Inliner(ID, -2000000000) {}
static char ID; // Pass identification, replacement for typeid
InlineCost getInlineCost(CallSite CS) {
return CA.getInlineCost(CS, NeverInline);
@@ -45,18 +45,24 @@ namespace {
return CA.getInlineFudgeFactor(CS);
}
void resetCachedCostInfo(Function *Caller) {
- return CA.resetCachedCostInfo(Caller);
+ CA.resetCachedCostInfo(Caller);
+ }
+ void growCachedCostInfo(Function* Caller, Function* Callee) {
+ CA.growCachedCostInfo(Caller, Callee);
}
virtual bool doFinalization(CallGraph &CG) {
return removeDeadFunctions(CG, &NeverInline);
}
virtual bool doInitialization(CallGraph &CG);
+ void releaseMemory() {
+ CA.clear();
+ }
};
}
char AlwaysInliner::ID = 0;
-static RegisterPass<AlwaysInliner>
-X("always-inline", "Inliner for always_inline functions");
+INITIALIZE_PASS(AlwaysInliner, "always-inline",
+ "Inliner for always_inline functions", false, false);
Pass *llvm::createAlwaysInlinerPass() { return new AlwaysInliner(); }
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/InlineSimple.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/InlineSimple.cpp
index 598043d..9c6637d 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/InlineSimple.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/InlineSimple.cpp
@@ -33,8 +33,8 @@ namespace {
SmallPtrSet<const Function*, 16> NeverInline;
InlineCostAnalyzer CA;
public:
- SimpleInliner() : Inliner(&ID) {}
- SimpleInliner(int Threshold) : Inliner(&ID, Threshold) {}
+ SimpleInliner() : Inliner(ID) {}
+ SimpleInliner(int Threshold) : Inliner(ID, Threshold) {}
static char ID; // Pass identification, replacement for typeid
InlineCost getInlineCost(CallSite CS) {
return CA.getInlineCost(CS, NeverInline);
@@ -45,13 +45,19 @@ namespace {
void resetCachedCostInfo(Function *Caller) {
CA.resetCachedCostInfo(Caller);
}
+ void growCachedCostInfo(Function* Caller, Function* Callee) {
+ CA.growCachedCostInfo(Caller, Callee);
+ }
virtual bool doInitialization(CallGraph &CG);
+ void releaseMemory() {
+ CA.clear();
+ }
};
}
char SimpleInliner::ID = 0;
-static RegisterPass<SimpleInliner>
-X("inline", "Function Integration/Inlining");
+INITIALIZE_PASS(SimpleInliner, "inline",
+ "Function Integration/Inlining", false, false);
Pass *llvm::createFunctionInliningPass() { return new SimpleInliner(); }
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/Inliner.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/Inliner.cpp
index 752a97c..4983e8e 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -48,10 +48,10 @@ HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
// Threshold to use when optsize is specified (and there is no -inline-limit).
const int OptSizeThreshold = 75;
-Inliner::Inliner(void *ID)
+Inliner::Inliner(char &ID)
: CallGraphSCCPass(ID), InlineThreshold(InlineLimit) {}
-Inliner::Inliner(void *ID, int Threshold)
+Inliner::Inliner(char &ID, int Threshold)
: CallGraphSCCPass(ID), InlineThreshold(Threshold) {}
/// getAnalysisUsage - For this class, we declare that we require and preserve
@@ -73,16 +73,14 @@ InlinedArrayAllocasTy;
/// available from other functions inlined into the caller. If we are able to
/// inline this call site we attempt to reuse already available allocas or add
/// any new allocas to the set if not possible.
-static bool InlineCallIfPossible(CallSite CS, CallGraph &CG,
- const TargetData *TD,
+static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
InlinedArrayAllocasTy &InlinedArrayAllocas) {
Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller();
// Try to inline the function. Get the list of static allocas that were
// inlined.
- SmallVector<AllocaInst*, 16> StaticAllocas;
- if (!InlineFunction(CS, &CG, TD, &StaticAllocas))
+ if (!InlineFunction(CS, IFI))
return false;
// If the inlined function had a higher stack protection level than the
@@ -119,9 +117,9 @@ static bool InlineCallIfPossible(CallSite CS, CallGraph &CG,
// Loop over all the allocas we have so far and see if they can be merged with
// a previously inlined alloca. If not, remember that we had it.
- for (unsigned AllocaNo = 0, e = StaticAllocas.size();
+ for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
AllocaNo != e; ++AllocaNo) {
- AllocaInst *AI = StaticAllocas[AllocaNo];
+ AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
// Don't bother trying to merge array allocations (they will usually be
// canonicalized to be an allocation *of* an array), or allocations whose
@@ -219,8 +217,10 @@ bool Inliner::shouldInline(CallSite CS) {
Function *Caller = CS.getCaller();
int CurrentThreshold = getInlineThreshold(CS);
float FudgeFactor = getInlineFudgeFactor(CS);
- if (Cost >= (int)(CurrentThreshold * FudgeFactor)) {
+ int AdjThreshold = (int)(CurrentThreshold * FudgeFactor);
+ if (Cost >= AdjThreshold) {
DEBUG(dbgs() << " NOT Inlining: cost=" << Cost
+ << ", thres=" << AdjThreshold
<< ", Call: " << *CS.getInstruction() << "\n");
return false;
}
@@ -238,11 +238,11 @@ bool Inliner::shouldInline(CallSite CS) {
bool someOuterCallWouldNotBeInlined = false;
for (Value::use_iterator I = Caller->use_begin(), E =Caller->use_end();
I != E; ++I) {
- CallSite CS2 = CallSite::get(*I);
+ CallSite CS2(*I);
// If this isn't a call to Caller (it could be some other sort
// of reference) skip it.
- if (CS2.getInstruction() == 0 || CS2.getCalledFunction() != Caller)
+ if (!CS2 || CS2.getCalledFunction() != Caller)
continue;
InlineCost IC2 = getInlineCost(CS2);
@@ -285,18 +285,34 @@ bool Inliner::shouldInline(CallSite CS) {
}
DEBUG(dbgs() << " Inlining: cost=" << Cost
+ << ", thres=" << AdjThreshold
<< ", Call: " << *CS.getInstruction() << '\n');
return true;
}
-bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
+/// InlineHistoryIncludes - Return true if the specified inline history ID
+/// indicates an inline history that includes the specified function.
+static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
+ const SmallVectorImpl<std::pair<Function*, int> > &InlineHistory) {
+ while (InlineHistoryID != -1) {
+ assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
+ "Invalid inline history ID");
+ if (InlineHistory[InlineHistoryID].first == F)
+ return true;
+ InlineHistoryID = InlineHistory[InlineHistoryID].second;
+ }
+ return false;
+}
+
+
+bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallGraph &CG = getAnalysis<CallGraph>();
const TargetData *TD = getAnalysisIfAvailable<TargetData>();
SmallPtrSet<Function*, 8> SCCFunctions;
DEBUG(dbgs() << "Inliner visiting SCC:");
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
- Function *F = SCC[i]->getFunction();
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
if (F) SCCFunctions.insert(F);
DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
}
@@ -304,18 +320,24 @@ bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
// Scan through and identify all call sites ahead of time so that we only
// inline call sites in the original functions, not call sites that result
// from inlining other functions.
- SmallVector<CallSite, 16> CallSites;
-
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
- Function *F = SCC[i]->getFunction();
+ SmallVector<std::pair<CallSite, int>, 16> CallSites;
+
+ // When inlining a callee produces new call sites, we want to keep track of
+ // the fact that they were inlined from the callee. This allows us to avoid
+ // infinite inlining in some obscure cases. To represent this, we use an
+ // index into the InlineHistory vector.
+ SmallVector<std::pair<Function*, int>, 8> InlineHistory;
+
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
if (!F) continue;
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- CallSite CS = CallSite::get(I);
+ CallSite CS(cast<Value>(I));
// If this isn't a call, or it is a call to an intrinsic, it can
// never be inlined.
- if (CS.getInstruction() == 0 || isa<IntrinsicInst>(I))
+ if (!CS || isa<IntrinsicInst>(I))
continue;
// If this is a direct call to an external function, we can never inline
@@ -324,22 +346,27 @@ bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
continue;
- CallSites.push_back(CS);
+ CallSites.push_back(std::make_pair(CS, -1));
}
}
DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
+ // If there are no calls in this function, exit early.
+ if (CallSites.empty())
+ return false;
+
// Now that we have all of the call sites, move the ones to functions in the
// current SCC to the end of the list.
unsigned FirstCallInSCC = CallSites.size();
for (unsigned i = 0; i < FirstCallInSCC; ++i)
- if (Function *F = CallSites[i].getCalledFunction())
+ if (Function *F = CallSites[i].first.getCalledFunction())
if (SCCFunctions.count(F))
std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
InlinedArrayAllocasTy InlinedArrayAllocas;
+ InlineFunctionInfo InlineInfo(&CG, TD);
// Now that we have all of the call sites, loop over them and inline them if
// it looks profitable to do so.
@@ -350,7 +377,7 @@ bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
// Iterate over the outer loop because inlining functions can cause indirect
// calls to become direct calls.
for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
- CallSite CS = CallSites[CSi];
+ CallSite CS = CallSites[CSi].first;
Function *Caller = CS.getCaller();
Function *Callee = CS.getCalledFunction();
@@ -366,19 +393,50 @@ bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
CG[Caller]->removeCallEdgeFor(CS);
CS.getInstruction()->eraseFromParent();
++NumCallsDeleted;
+ // Update the cached cost info with the missing call
+ growCachedCostInfo(Caller, NULL);
} else {
// We can only inline direct calls to non-declarations.
if (Callee == 0 || Callee->isDeclaration()) continue;
+ // If this call site was obtained by inlining another function, verify
+ // that the include path for the function did not include the callee
+ // itself. If so, we'd be recursively inlinling the same function,
+ // which would provide the same callsites, which would cause us to
+ // infinitely inline.
+ int InlineHistoryID = CallSites[CSi].second;
+ if (InlineHistoryID != -1 &&
+ InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
+ continue;
+
+
// If the policy determines that we should inline this function,
// try to do so.
if (!shouldInline(CS))
continue;
- // Attempt to inline the function...
- if (!InlineCallIfPossible(CS, CG, TD, InlinedArrayAllocas))
+ // Attempt to inline the function.
+ if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas))
continue;
++NumInlined;
+
+ // If inlining this function gave us any new call sites, throw them
+ // onto our worklist to process. They are useful inline candidates.
+ if (!InlineInfo.InlinedCalls.empty()) {
+ // Create a new inline history entry for this, so that we remember
+ // that these new callsites came about due to inlining Callee.
+ int NewHistoryID = InlineHistory.size();
+ InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
+
+ for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
+ i != e; ++i) {
+ Value *Ptr = InlineInfo.InlinedCalls[i];
+ CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
+ }
+ }
+
+ // Update the cached cost info with the inlined call.
+ growCachedCostInfo(Caller, Callee);
}
// If we inlined or deleted the last possible call site to the function,
@@ -404,18 +462,13 @@ bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
delete CG.removeFunctionFromModule(CalleeNode);
++NumDeleted;
}
-
- // Remove any cached cost info for this caller, as inlining the
- // callee has increased the size of the caller (which may be the
- // same as the callee).
- resetCachedCostInfo(Caller);
// Remove this call site from the list. If possible, use
// swap/pop_back for efficiency, but do not use it if doing so would
// move a call site to a function in this SCC before the
// 'FirstCallInSCC' barrier.
- if (SCC.size() == 1) {
- std::swap(CallSites[CSi], CallSites.back());
+ if (SCC.isSingular()) {
+ CallSites[CSi] = CallSites.back();
CallSites.pop_back();
} else {
CallSites.erase(CallSites.begin()+CSi);
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/Internalize.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/Internalize.cpp
index 3d31932..a1d919f 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/Internalize.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/Internalize.cpp
@@ -63,11 +63,11 @@ namespace {
} // end anonymous namespace
char InternalizePass::ID = 0;
-static RegisterPass<InternalizePass>
-X("internalize", "Internalize Global Symbols");
+INITIALIZE_PASS(InternalizePass, "internalize",
+ "Internalize Global Symbols", false, false);
InternalizePass::InternalizePass(bool AllButMain)
- : ModulePass(&ID), AllButMain(AllButMain){
+ : ModulePass(ID), AllButMain(AllButMain){
if (!APIFile.empty()) // If a filename is specified, use it.
LoadFile(APIFile.c_str());
if (!APIList.empty()) // If a list is specified, use it as well.
@@ -75,7 +75,7 @@ InternalizePass::InternalizePass(bool AllButMain)
}
InternalizePass::InternalizePass(const std::vector<const char *>&exportList)
- : ModulePass(&ID), AllButMain(false){
+ : ModulePass(ID), AllButMain(false){
for(std::vector<const char *>::const_iterator itr = exportList.begin();
itr != exportList.end(); itr++) {
ExternalNames.insert(*itr);
@@ -156,6 +156,8 @@ bool InternalizePass::runOnModule(Module &M) {
for (Module::global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I)
if (!I->isDeclaration() && !I->hasLocalLinkage() &&
+ // Available externally is really just a "declaration with a body".
+ !I->hasAvailableExternallyLinkage() &&
!ExternalNames.count(I->getName())) {
I->setLinkage(GlobalValue::InternalLinkage);
Changed = true;
@@ -167,6 +169,8 @@ bool InternalizePass::runOnModule(Module &M) {
for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
I != E; ++I)
if (!I->isDeclaration() && !I->hasInternalLinkage() &&
+ // Available externally is really just a "declaration with a body".
+ !I->hasAvailableExternallyLinkage() &&
!ExternalNames.count(I->getName())) {
I->setLinkage(GlobalValue::InternalLinkage);
Changed = true;
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/LoopExtractor.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/LoopExtractor.cpp
index cb81330..f88dff6 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/LoopExtractor.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/LoopExtractor.cpp
@@ -37,7 +37,7 @@ namespace {
unsigned NumLoops;
explicit LoopExtractor(unsigned numLoops = ~0)
- : LoopPass(&ID), NumLoops(numLoops) {}
+ : LoopPass(ID), NumLoops(numLoops) {}
virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -50,8 +50,8 @@ namespace {
}
char LoopExtractor::ID = 0;
-static RegisterPass<LoopExtractor>
-X("loop-extract", "Extract loops into new functions");
+INITIALIZE_PASS(LoopExtractor, "loop-extract",
+ "Extract loops into new functions", false, false);
namespace {
/// SingleLoopExtractor - For bugpoint.
@@ -62,8 +62,8 @@ namespace {
} // End anonymous namespace
char SingleLoopExtractor::ID = 0;
-static RegisterPass<SingleLoopExtractor>
-Y("loop-extract-single", "Extract at most one loop into a new function");
+INITIALIZE_PASS(SingleLoopExtractor, "loop-extract-single",
+ "Extract at most one loop into a new function", false, false);
// createLoopExtractorPass - This pass extracts all natural loops from the
// program into a function if it can.
@@ -147,27 +147,26 @@ namespace {
std::vector<std::pair<std::string, std::string> > BlocksToNotExtractByName;
public:
static char ID; // Pass identification, replacement for typeid
- explicit BlockExtractorPass(const std::vector<BasicBlock*> &B)
- : ModulePass(&ID), BlocksToNotExtract(B) {
+ BlockExtractorPass() : ModulePass(ID) {
if (!BlockFile.empty())
LoadFile(BlockFile.c_str());
}
- BlockExtractorPass() : ModulePass(&ID) {}
bool runOnModule(Module &M);
};
}
char BlockExtractorPass::ID = 0;
-static RegisterPass<BlockExtractorPass>
-XX("extract-blocks", "Extract Basic Blocks From Module (for bugpoint use)");
+INITIALIZE_PASS(BlockExtractorPass, "extract-blocks",
+ "Extract Basic Blocks From Module (for bugpoint use)",
+ false, false);
// createBlockExtractorPass - This pass extracts all blocks (except those
// specified in the argument list) from the functions in the module.
//
-ModulePass *llvm::createBlockExtractorPass(const std::vector<BasicBlock*> &BTNE)
+ModulePass *llvm::createBlockExtractorPass()
{
- return new BlockExtractorPass(BTNE);
+ return new BlockExtractorPass();
}
void BlockExtractorPass::LoadFile(const char *Filename) {
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/LowerSetJmp.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/LowerSetJmp.cpp
index 4d61e83..6c715de 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/LowerSetJmp.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/LowerSetJmp.cpp
@@ -42,6 +42,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -108,7 +109,7 @@ namespace {
bool IsTransformableFunction(StringRef Name);
public:
static char ID; // Pass identification, replacement for typeid
- LowerSetJmp() : ModulePass(&ID) {}
+ LowerSetJmp() : ModulePass(ID) {}
void visitCallInst(CallInst& CI);
void visitInvokeInst(InvokeInst& II);
@@ -121,7 +122,7 @@ namespace {
} // end anonymous namespace
char LowerSetJmp::ID = 0;
-static RegisterPass<LowerSetJmp> X("lowersetjmp", "Lower Set Jump");
+INITIALIZE_PASS(LowerSetJmp, "lowersetjmp", "Lower Set Jump", false, false);
// run - Run the transformation on the program. We grab the function
// prototypes for longjmp and setjmp. If they are used in the program,
@@ -262,8 +263,8 @@ void LowerSetJmp::TransformLongJmpCall(CallInst* Inst)
// char*. It returns "void", so it doesn't need to replace any of
// Inst's uses and doesn't get a name.
CastInst* CI =
- new BitCastInst(Inst->getOperand(1), SBPTy, "LJBuf", Inst);
- Value *Args[] = { CI, Inst->getOperand(2) };
+ new BitCastInst(Inst->getArgOperand(0), SBPTy, "LJBuf", Inst);
+ Value *Args[] = { CI, Inst->getArgOperand(1) };
CallInst::Create(ThrowLongJmp, Args, Args + 2, "", Inst);
SwitchValuePair& SVP = SwitchValMap[Inst->getParent()->getParent()];
@@ -378,7 +379,7 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
const Type* SBPTy =
Type::getInt8PtrTy(Inst->getContext());
CastInst* BufPtr =
- new BitCastInst(Inst->getOperand(1), SBPTy, "SBJmpBuf", Inst);
+ new BitCastInst(Inst->getArgOperand(0), SBPTy, "SBJmpBuf", Inst);
Value *Args[] = {
GetSetJmpMap(Func), BufPtr,
ConstantInt::get(Type::getInt32Ty(Inst->getContext()), SetJmpIDMap[Func]++)
@@ -405,12 +406,14 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
// Loop over all of the uses of instruction. If any of them are after the
// call, "spill" the value to the stack.
for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
- UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != ABlock ||
- InstrsAfterCall.count(cast<Instruction>(*UI))) {
+ UI != E; ++UI) {
+ User *U = *UI;
+ if (cast<Instruction>(U)->getParent() != ABlock ||
+ InstrsAfterCall.count(cast<Instruction>(U))) {
DemoteRegToStack(*II);
break;
}
+ }
InstrsAfterCall.clear();
// Change the setjmp call into a branch statement. We'll remove the
@@ -473,7 +476,8 @@ void LowerSetJmp::visitCallInst(CallInst& CI)
// Construct the new "invoke" instruction.
TerminatorInst* Term = OldBB->getTerminator();
- std::vector<Value*> Params(CI.op_begin() + 1, CI.op_end());
+ CallSite CS(&CI);
+ std::vector<Value*> Params(CS.arg_begin(), CS.arg_end());
InvokeInst* II =
InvokeInst::Create(CI.getCalledValue(), NewBB, PrelimBBMap[Func],
Params.begin(), Params.end(), CI.getName(), Term);
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index b07e22c..5d838f9 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -17,33 +17,39 @@
// important that the hash function be high quality. The equality comparison
// iterates through each instruction in each basic block.
//
-// When a match is found, the functions are folded. We can only fold two
-// functions when we know that the definition of one of them is not
-// overridable.
+// When a match is found the functions are folded. If both functions are
+// overridable, we move the functionality into a new internal function and
+// leave two overridable thunks to it.
//
//===----------------------------------------------------------------------===//
//
// Future work:
//
-// * fold vector<T*>::push_back and vector<S*>::push_back.
-//
-// These two functions have different types, but in a way that doesn't matter
-// to us. As long as we never see an S or T itself, using S* and S** is the
-// same as using a T* and T**.
-//
// * virtual functions.
//
// Many functions have their address taken by the virtual function table for
// the object they belong to. However, as long as it's only used for a lookup
-// and call, this is irrelevant, and we'd like to fold such implementations.
+// and call, this is irrelevant, and we'd like to fold such functions.
+//
+// * switch from n^2 pair-wise comparisons to an n-way comparison for each
+// bucket.
+//
+// * be smarter about bitcasts.
+//
+// In order to fold functions, we will sometimes add either bitcast instructions
+// or bitcast constant expressions. Unfortunately, this can confound further
+// analysis since the two functions differ where one has a bitcast and the
+// other doesn't. We should learn to look through bitcasts.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "mergefunc"
#include "llvm/Transforms/IPO.h"
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Constants.h"
#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
@@ -53,66 +59,117 @@
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/IRBuilder.h"
+#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include <map>
+#include "llvm/Target/TargetData.h"
#include <vector>
using namespace llvm;
STATISTIC(NumFunctionsMerged, "Number of functions merged");
namespace {
- struct MergeFunctions : public ModulePass {
- static char ID; // Pass identification, replacement for typeid
- MergeFunctions() : ModulePass(&ID) {}
+ /// MergeFunctions finds functions which will generate identical machine code,
+ /// by considering all pointer types to be equivalent. Once identified,
+ /// MergeFunctions will fold them by replacing a call to one to a call to a
+ /// bitcast of the other.
+ ///
+ class MergeFunctions : public ModulePass {
+ public:
+ static char ID;
+ MergeFunctions() : ModulePass(ID) {}
bool runOnModule(Module &M);
+
+ private:
+ /// MergeTwoFunctions - Merge two equivalent functions. Upon completion, G
+ /// may be deleted, or may be converted into a thunk. In either case, it
+ /// should never be visited again.
+ void MergeTwoFunctions(Function *F, Function *G) const;
+
+ /// WriteThunk - Replace G with a simple tail call to bitcast(F). Also
+ /// replace direct uses of G with bitcast(F).
+ void WriteThunk(Function *F, Function *G) const;
+
+ TargetData *TD;
};
}
char MergeFunctions::ID = 0;
-static RegisterPass<MergeFunctions>
-X("mergefunc", "Merge Functions");
+INITIALIZE_PASS(MergeFunctions, "mergefunc", "Merge Functions", false, false);
ModulePass *llvm::createMergeFunctionsPass() {
return new MergeFunctions();
}
-// ===----------------------------------------------------------------------===
-// Comparison of functions
-// ===----------------------------------------------------------------------===
+namespace {
+/// FunctionComparator - Compares two functions to determine whether or not
+/// they will generate machine code with the same behaviour. TargetData is
+/// used if available. The comparator always fails conservatively (erring on the
+/// side of claiming that two functions are different).
+class FunctionComparator {
+public:
+ FunctionComparator(const TargetData *TD, const Function *F1,
+ const Function *F2)
+ : F1(F1), F2(F2), TD(TD), IDMap1Count(0), IDMap2Count(0) {}
+
+ /// Compare - test whether the two functions have equivalent behaviour.
+ bool Compare();
+
+private:
+ /// Compare - test whether two basic blocks have equivalent behaviour.
+ bool Compare(const BasicBlock *BB1, const BasicBlock *BB2);
+
+ /// Enumerate - Assign or look up previously assigned numbers for the two
+ /// values, and return whether the numbers are equal. Numbers are assigned in
+ /// the order visited.
+ bool Enumerate(const Value *V1, const Value *V2);
+
+ /// isEquivalentOperation - Compare two Instructions for equivalence, similar
+ /// to Instruction::isSameOperationAs but with modifications to the type
+ /// comparison.
+ bool isEquivalentOperation(const Instruction *I1,
+ const Instruction *I2) const;
+
+ /// isEquivalentGEP - Compare two GEPs for equivalent pointer arithmetic.
+ bool isEquivalentGEP(const GEPOperator *GEP1, const GEPOperator *GEP2);
+ bool isEquivalentGEP(const GetElementPtrInst *GEP1,
+ const GetElementPtrInst *GEP2) {
+ return isEquivalentGEP(cast<GEPOperator>(GEP1), cast<GEPOperator>(GEP2));
+ }
-static unsigned long hash(const Function *F) {
- const FunctionType *FTy = F->getFunctionType();
+ /// isEquivalentType - Compare two Types, treating all pointer types as equal.
+ bool isEquivalentType(const Type *Ty1, const Type *Ty2) const;
- FoldingSetNodeID ID;
- ID.AddInteger(F->size());
- ID.AddInteger(F->getCallingConv());
- ID.AddBoolean(F->hasGC());
- ID.AddBoolean(FTy->isVarArg());
- ID.AddInteger(FTy->getReturnType()->getTypeID());
- for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- ID.AddInteger(FTy->getParamType(i)->getTypeID());
- return ID.ComputeHash();
-}
+ // The two functions undergoing comparison.
+ const Function *F1, *F2;
-/// IgnoreBitcasts - given a bitcast, returns the first non-bitcast found by
-/// walking the chain of cast operands. Otherwise, returns the argument.
-static Value* IgnoreBitcasts(Value *V) {
- while (BitCastInst *BC = dyn_cast<BitCastInst>(V))
- V = BC->getOperand(0);
+ const TargetData *TD;
- return V;
+ typedef DenseMap<const Value *, unsigned long> IDMap;
+ IDMap Map1, Map2;
+ unsigned long IDMap1Count, IDMap2Count;
+};
}
-/// isEquivalentType - any two pointers are equivalent. Otherwise, standard
-/// type equivalence rules apply.
-static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
+/// isEquivalentType - any two pointers in the same address space are
+/// equivalent. Otherwise, standard type equivalence rules apply.
+bool FunctionComparator::isEquivalentType(const Type *Ty1,
+ const Type *Ty2) const {
if (Ty1 == Ty2)
return true;
if (Ty1->getTypeID() != Ty2->getTypeID())
return false;
switch(Ty1->getTypeID()) {
+ default:
+ llvm_unreachable("Unknown type!");
+ // Fall through in Release mode.
+ case Type::IntegerTyID:
+ case Type::OpaqueTyID:
+ // Ty1 == Ty2 would have returned true earlier.
+ return false;
+
case Type::VoidTyID:
case Type::FloatTyID:
case Type::DoubleTyID:
@@ -123,15 +180,6 @@ static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
case Type::MetadataTyID:
return true;
- case Type::IntegerTyID:
- case Type::OpaqueTyID:
- // Ty1 == Ty2 would have returned true earlier.
- return false;
-
- default:
- llvm_unreachable("Unknown type!");
- return false;
-
case Type::PointerTyID: {
const PointerType *PTy1 = cast<PointerType>(Ty1);
const PointerType *PTy2 = cast<PointerType>(Ty2);
@@ -171,11 +219,18 @@ static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
return true;
}
- case Type::ArrayTyID:
+ case Type::ArrayTyID: {
+ const ArrayType *ATy1 = cast<ArrayType>(Ty1);
+ const ArrayType *ATy2 = cast<ArrayType>(Ty2);
+ return ATy1->getNumElements() == ATy2->getNumElements() &&
+ isEquivalentType(ATy1->getElementType(), ATy2->getElementType());
+ }
+
case Type::VectorTyID: {
- const SequentialType *STy1 = cast<SequentialType>(Ty1);
- const SequentialType *STy2 = cast<SequentialType>(Ty2);
- return isEquivalentType(STy1->getElementType(), STy2->getElementType());
+ const VectorType *VTy1 = cast<VectorType>(Ty1);
+ const VectorType *VTy2 = cast<VectorType>(Ty2);
+ return VTy1->getNumElements() == VTy2->getNumElements() &&
+ isEquivalentType(VTy1->getElementType(), VTy2->getElementType());
}
}
}
@@ -183,8 +238,8 @@ static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
/// isEquivalentOperation - determine whether the two operations are the same
/// except that pointer-to-A and pointer-to-B are equivalent. This should be
/// kept in sync with Instruction::isSameOperationAs.
-static bool
-isEquivalentOperation(const Instruction *I1, const Instruction *I2) {
+bool FunctionComparator::isEquivalentOperation(const Instruction *I1,
+ const Instruction *I2) const {
if (I1->getOpcode() != I2->getOpcode() ||
I1->getNumOperands() != I2->getNumOperands() ||
!isEquivalentType(I1->getType(), I2->getType()) ||
@@ -236,428 +291,361 @@ isEquivalentOperation(const Instruction *I1, const Instruction *I2) {
return true;
}
-static bool compare(const Value *V, const Value *U) {
- assert(!isa<BasicBlock>(V) && !isa<BasicBlock>(U) &&
- "Must not compare basic blocks.");
-
- assert(isEquivalentType(V->getType(), U->getType()) &&
- "Two of the same operation have operands of different type.");
+/// isEquivalentGEP - determine whether two GEP operations perform the same
+/// underlying arithmetic.
+bool FunctionComparator::isEquivalentGEP(const GEPOperator *GEP1,
+ const GEPOperator *GEP2) {
+ // When we have target data, we can reduce the GEP down to the value in bytes
+ // added to the address.
+ if (TD && GEP1->hasAllConstantIndices() && GEP2->hasAllConstantIndices()) {
+ SmallVector<Value *, 8> Indices1(GEP1->idx_begin(), GEP1->idx_end());
+ SmallVector<Value *, 8> Indices2(GEP2->idx_begin(), GEP2->idx_end());
+ uint64_t Offset1 = TD->getIndexedOffset(GEP1->getPointerOperandType(),
+ Indices1.data(), Indices1.size());
+ uint64_t Offset2 = TD->getIndexedOffset(GEP2->getPointerOperandType(),
+ Indices2.data(), Indices2.size());
+ return Offset1 == Offset2;
+ }
- // TODO: If the constant is an expression of F, we should accept that it's
- // equal to the same expression in terms of G.
- if (isa<Constant>(V))
- return V == U;
+ if (GEP1->getPointerOperand()->getType() !=
+ GEP2->getPointerOperand()->getType())
+ return false;
- // The caller has ensured that ValueMap[V] != U. Since Arguments are
- // pre-loaded into the ValueMap, and Instructions are added as we go, we know
- // that this can only be a mis-match.
- if (isa<Instruction>(V) || isa<Argument>(V))
+ if (GEP1->getNumOperands() != GEP2->getNumOperands())
return false;
- if (isa<InlineAsm>(V) && isa<InlineAsm>(U)) {
- const InlineAsm *IAF = cast<InlineAsm>(V);
- const InlineAsm *IAG = cast<InlineAsm>(U);
- return IAF->getAsmString() == IAG->getAsmString() &&
- IAF->getConstraintString() == IAG->getConstraintString();
+ for (unsigned i = 0, e = GEP1->getNumOperands(); i != e; ++i) {
+ if (!Enumerate(GEP1->getOperand(i), GEP2->getOperand(i)))
+ return false;
}
- return false;
+ return true;
}
-static bool equals(const BasicBlock *BB1, const BasicBlock *BB2,
- DenseMap<const Value *, const Value *> &ValueMap,
- DenseMap<const Value *, const Value *> &SpeculationMap) {
- // Speculatively add it anyways. If it's false, we'll notice a difference
- // later, and this won't matter.
- ValueMap[BB1] = BB2;
+/// Enumerate - Compare two values used by the two functions under pair-wise
+/// comparison. If this is the first time the values are seen, they're added to
+/// the mapping so that we will detect mismatches on next use.
+bool FunctionComparator::Enumerate(const Value *V1, const Value *V2) {
+ // Check for function @f1 referring to itself and function @f2 referring to
+ // itself, or referring to each other, or both referring to either of them.
+ // They're all equivalent if the two functions are otherwise equivalent.
+ if (V1 == F1 && V2 == F2)
+ return true;
+ if (V1 == F2 && V2 == F1)
+ return true;
- BasicBlock::const_iterator FI = BB1->begin(), FE = BB1->end();
- BasicBlock::const_iterator GI = BB2->begin(), GE = BB2->end();
+ // TODO: constant expressions with GEP or references to F1 or F2.
+ if (isa<Constant>(V1))
+ return V1 == V2;
- do {
- if (isa<BitCastInst>(FI)) {
- ++FI;
- continue;
- }
- if (isa<BitCastInst>(GI)) {
- ++GI;
- continue;
- }
+ if (isa<InlineAsm>(V1) && isa<InlineAsm>(V2)) {
+ const InlineAsm *IA1 = cast<InlineAsm>(V1);
+ const InlineAsm *IA2 = cast<InlineAsm>(V2);
+ return IA1->getAsmString() == IA2->getAsmString() &&
+ IA1->getConstraintString() == IA2->getConstraintString();
+ }
- if (!isEquivalentOperation(FI, GI))
- return false;
+ unsigned long &ID1 = Map1[V1];
+ if (!ID1)
+ ID1 = ++IDMap1Count;
- if (isa<GetElementPtrInst>(FI)) {
- const GetElementPtrInst *GEPF = cast<GetElementPtrInst>(FI);
- const GetElementPtrInst *GEPG = cast<GetElementPtrInst>(GI);
- if (GEPF->hasAllZeroIndices() && GEPG->hasAllZeroIndices()) {
- // It's effectively a bitcast.
- ++FI, ++GI;
- continue;
- }
+ unsigned long &ID2 = Map2[V2];
+ if (!ID2)
+ ID2 = ++IDMap2Count;
- // TODO: we only really care about the elements before the index
- if (FI->getOperand(0)->getType() != GI->getOperand(0)->getType())
- return false;
- }
+ return ID1 == ID2;
+}
- if (ValueMap[FI] == GI) {
- ++FI, ++GI;
- continue;
- }
+/// Compare - test whether two basic blocks have equivalent behaviour.
+bool FunctionComparator::Compare(const BasicBlock *BB1, const BasicBlock *BB2) {
+ BasicBlock::const_iterator F1I = BB1->begin(), F1E = BB1->end();
+ BasicBlock::const_iterator F2I = BB2->begin(), F2E = BB2->end();
- if (ValueMap[FI] != NULL)
+ do {
+ if (!Enumerate(F1I, F2I))
return false;
- for (unsigned i = 0, e = FI->getNumOperands(); i != e; ++i) {
- Value *OpF = IgnoreBitcasts(FI->getOperand(i));
- Value *OpG = IgnoreBitcasts(GI->getOperand(i));
-
- if (ValueMap[OpF] == OpG)
- continue;
+ if (const GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(F1I)) {
+ const GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(F2I);
+ if (!GEP2)
+ return false;
- if (ValueMap[OpF] != NULL)
+ if (!Enumerate(GEP1->getPointerOperand(), GEP2->getPointerOperand()))
return false;
- if (OpF->getValueID() != OpG->getValueID() ||
- !isEquivalentType(OpF->getType(), OpG->getType()))
+ if (!isEquivalentGEP(GEP1, GEP2))
+ return false;
+ } else {
+ if (!isEquivalentOperation(F1I, F2I))
return false;
- if (isa<PHINode>(FI)) {
- if (SpeculationMap[OpF] == NULL)
- SpeculationMap[OpF] = OpG;
- else if (SpeculationMap[OpF] != OpG)
- return false;
- continue;
- } else if (isa<BasicBlock>(OpF)) {
- assert(isa<TerminatorInst>(FI) &&
- "BasicBlock referenced by non-Terminator non-PHI");
- // This call changes the ValueMap, hence we can't use
- // Value *& = ValueMap[...]
- if (!equals(cast<BasicBlock>(OpF), cast<BasicBlock>(OpG), ValueMap,
- SpeculationMap))
+ assert(F1I->getNumOperands() == F2I->getNumOperands());
+ for (unsigned i = 0, e = F1I->getNumOperands(); i != e; ++i) {
+ Value *OpF1 = F1I->getOperand(i);
+ Value *OpF2 = F2I->getOperand(i);
+
+ if (!Enumerate(OpF1, OpF2))
return false;
- } else {
- if (!compare(OpF, OpG))
+
+ if (OpF1->getValueID() != OpF2->getValueID() ||
+ !isEquivalentType(OpF1->getType(), OpF2->getType()))
return false;
}
-
- ValueMap[OpF] = OpG;
}
- ValueMap[FI] = GI;
- ++FI, ++GI;
- } while (FI != FE && GI != GE);
+ ++F1I, ++F2I;
+ } while (F1I != F1E && F2I != F2E);
- return FI == FE && GI == GE;
+ return F1I == F1E && F2I == F2E;
}
-static bool equals(const Function *F, const Function *G) {
+/// Compare - test whether the two functions have equivalent behaviour.
+bool FunctionComparator::Compare() {
// We need to recheck everything, but check the things that weren't included
// in the hash first.
- if (F->getAttributes() != G->getAttributes())
+ if (F1->getAttributes() != F2->getAttributes())
return false;
- if (F->hasGC() != G->hasGC())
+ if (F1->hasGC() != F2->hasGC())
return false;
- if (F->hasGC() && F->getGC() != G->getGC())
+ if (F1->hasGC() && F1->getGC() != F2->getGC())
return false;
- if (F->hasSection() != G->hasSection())
+ if (F1->hasSection() != F2->hasSection())
return false;
- if (F->hasSection() && F->getSection() != G->getSection())
+ if (F1->hasSection() && F1->getSection() != F2->getSection())
return false;
- if (F->isVarArg() != G->isVarArg())
+ if (F1->isVarArg() != F2->isVarArg())
return false;
// TODO: if it's internal and only used in direct calls, we could handle this
// case too.
- if (F->getCallingConv() != G->getCallingConv())
+ if (F1->getCallingConv() != F2->getCallingConv())
return false;
- if (!isEquivalentType(F->getFunctionType(), G->getFunctionType()))
+ if (!isEquivalentType(F1->getFunctionType(), F2->getFunctionType()))
return false;
- DenseMap<const Value *, const Value *> ValueMap;
- DenseMap<const Value *, const Value *> SpeculationMap;
- ValueMap[F] = G;
-
- assert(F->arg_size() == G->arg_size() &&
+ assert(F1->arg_size() == F2->arg_size() &&
"Identical functions have a different number of args.");
- for (Function::const_arg_iterator fi = F->arg_begin(), gi = G->arg_begin(),
- fe = F->arg_end(); fi != fe; ++fi, ++gi)
- ValueMap[fi] = gi;
+ // Visit the arguments so that they get enumerated in the order they're
+ // passed in.
+ for (Function::const_arg_iterator f1i = F1->arg_begin(),
+ f2i = F2->arg_begin(), f1e = F1->arg_end(); f1i != f1e; ++f1i, ++f2i) {
+ if (!Enumerate(f1i, f2i))
+ llvm_unreachable("Arguments repeat");
+ }
- if (!equals(&F->getEntryBlock(), &G->getEntryBlock(), ValueMap,
- SpeculationMap))
- return false;
+ // We do a CFG-ordered walk since the actual ordering of the blocks in the
+ // linked list is immaterial. Our walk starts at the entry block for both
+ // functions, then takes each block from each terminator in order. As an
+ // artifact, this also means that unreachable blocks are ignored.
+ SmallVector<const BasicBlock *, 8> F1BBs, F2BBs;
+ SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F1.
+
+ F1BBs.push_back(&F1->getEntryBlock());
+ F2BBs.push_back(&F2->getEntryBlock());
- for (DenseMap<const Value *, const Value *>::iterator
- I = SpeculationMap.begin(), E = SpeculationMap.end(); I != E; ++I) {
- if (ValueMap[I->first] != I->second)
+ VisitedBBs.insert(F1BBs[0]);
+ while (!F1BBs.empty()) {
+ const BasicBlock *F1BB = F1BBs.pop_back_val();
+ const BasicBlock *F2BB = F2BBs.pop_back_val();
+
+ if (!Enumerate(F1BB, F2BB) || !Compare(F1BB, F2BB))
return false;
- }
- return true;
-}
+ const TerminatorInst *F1TI = F1BB->getTerminator();
+ const TerminatorInst *F2TI = F2BB->getTerminator();
-// ===----------------------------------------------------------------------===
-// Folding of functions
-// ===----------------------------------------------------------------------===
-
-// Cases:
-// * F is external strong, G is external strong:
-// turn G into a thunk to F (1)
-// * F is external strong, G is external weak:
-// turn G into a thunk to F (1)
-// * F is external weak, G is external weak:
-// unfoldable
-// * F is external strong, G is internal:
-// address of G taken:
-// turn G into a thunk to F (1)
-// address of G not taken:
-// make G an alias to F (2)
-// * F is internal, G is external weak
-// address of F is taken:
-// turn G into a thunk to F (1)
-// address of F is not taken:
-// make G an alias of F (2)
-// * F is internal, G is internal:
-// address of F and G are taken:
-// turn G into a thunk to F (1)
-// address of G is not taken:
-// make G an alias to F (2)
-//
-// alias requires linkage == (external,local,weak) fallback to creating a thunk
-// external means 'externally visible' linkage != (internal,private)
-// internal means linkage == (internal,private)
-// weak means linkage mayBeOverridable
-// being external implies that the address is taken
-//
-// 1. turn G into a thunk to F
-// 2. make G an alias to F
+ assert(F1TI->getNumSuccessors() == F2TI->getNumSuccessors());
+ for (unsigned i = 0, e = F1TI->getNumSuccessors(); i != e; ++i) {
+ if (!VisitedBBs.insert(F1TI->getSuccessor(i)))
+ continue;
-enum LinkageCategory {
- ExternalStrong,
- ExternalWeak,
- Internal
-};
+ F1BBs.push_back(F1TI->getSuccessor(i));
+ F2BBs.push_back(F2TI->getSuccessor(i));
+ }
+ }
+ return true;
+}
-static LinkageCategory categorize(const Function *F) {
- switch (F->getLinkage()) {
- case GlobalValue::InternalLinkage:
- case GlobalValue::PrivateLinkage:
- case GlobalValue::LinkerPrivateLinkage:
- return Internal;
-
- case GlobalValue::WeakAnyLinkage:
- case GlobalValue::WeakODRLinkage:
- case GlobalValue::ExternalWeakLinkage:
- return ExternalWeak;
-
- case GlobalValue::ExternalLinkage:
- case GlobalValue::AvailableExternallyLinkage:
- case GlobalValue::LinkOnceAnyLinkage:
- case GlobalValue::LinkOnceODRLinkage:
- case GlobalValue::AppendingLinkage:
- case GlobalValue::DLLImportLinkage:
- case GlobalValue::DLLExportLinkage:
- case GlobalValue::CommonLinkage:
- return ExternalStrong;
+/// WriteThunk - Replace G with a simple tail call to bitcast(F). Also replace
+/// direct uses of G with bitcast(F).
+void MergeFunctions::WriteThunk(Function *F, Function *G) const {
+ if (!G->mayBeOverridden()) {
+ // Redirect direct callers of G to F.
+ Constant *BitcastF = ConstantExpr::getBitCast(F, G->getType());
+ for (Value::use_iterator UI = G->use_begin(), UE = G->use_end();
+ UI != UE;) {
+ Value::use_iterator TheIter = UI;
+ ++UI;
+ CallSite CS(*TheIter);
+ if (CS && CS.isCallee(TheIter))
+ TheIter.getUse().set(BitcastF);
+ }
}
- llvm_unreachable("Unknown LinkageType.");
- return ExternalWeak;
-}
+ // If G was internal then we may have replaced all uses if G with F. If so,
+ // stop here and delete G. There's no need for a thunk.
+ if (G->hasLocalLinkage() && G->use_empty()) {
+ G->eraseFromParent();
+ return;
+ }
-static void ThunkGToF(Function *F, Function *G) {
Function *NewG = Function::Create(G->getFunctionType(), G->getLinkage(), "",
G->getParent());
BasicBlock *BB = BasicBlock::Create(F->getContext(), "", NewG);
+ IRBuilder<false> Builder(BB);
- std::vector<Value *> Args;
+ SmallVector<Value *, 16> Args;
unsigned i = 0;
const FunctionType *FFTy = F->getFunctionType();
for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end();
AI != AE; ++AI) {
- if (FFTy->getParamType(i) == AI->getType())
- Args.push_back(AI);
- else {
- Value *BCI = new BitCastInst(AI, FFTy->getParamType(i), "", BB);
- Args.push_back(BCI);
- }
+ Args.push_back(Builder.CreateBitCast(AI, FFTy->getParamType(i)));
++i;
}
- CallInst *CI = CallInst::Create(F, Args.begin(), Args.end(), "", BB);
+ CallInst *CI = Builder.CreateCall(F, Args.begin(), Args.end());
CI->setTailCall();
CI->setCallingConv(F->getCallingConv());
if (NewG->getReturnType()->isVoidTy()) {
- ReturnInst::Create(F->getContext(), BB);
- } else if (CI->getType() != NewG->getReturnType()) {
- Value *BCI = new BitCastInst(CI, NewG->getReturnType(), "", BB);
- ReturnInst::Create(F->getContext(), BCI, BB);
+ Builder.CreateRetVoid();
} else {
- ReturnInst::Create(F->getContext(), CI, BB);
+ Builder.CreateRet(Builder.CreateBitCast(CI, NewG->getReturnType()));
}
NewG->copyAttributesFrom(G);
NewG->takeName(G);
G->replaceAllUsesWith(NewG);
G->eraseFromParent();
-
- // TODO: look at direct callers to G and make them all direct callers to F.
}
-static void AliasGToF(Function *F, Function *G) {
- if (!G->hasExternalLinkage() && !G->hasLocalLinkage() && !G->hasWeakLinkage())
- return ThunkGToF(F, G);
-
- GlobalAlias *GA = new GlobalAlias(
- G->getType(), G->getLinkage(), "",
- ConstantExpr::getBitCast(F, G->getType()), G->getParent());
- F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
- GA->takeName(G);
- GA->setVisibility(G->getVisibility());
- G->replaceAllUsesWith(GA);
- G->eraseFromParent();
-}
+/// MergeTwoFunctions - Merge two equivalent functions. Upon completion,
+/// Function G is deleted.
+void MergeFunctions::MergeTwoFunctions(Function *F, Function *G) const {
+ if (F->isWeakForLinker()) {
+ assert(G->isWeakForLinker());
-static bool fold(std::vector<Function *> &FnVec, unsigned i, unsigned j) {
- Function *F = FnVec[i];
- Function *G = FnVec[j];
+ // Make them both thunks to the same internal function.
+ Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
+ F->getParent());
+ H->copyAttributesFrom(F);
+ H->takeName(F);
+ F->replaceAllUsesWith(H);
- LinkageCategory catF = categorize(F);
- LinkageCategory catG = categorize(G);
+ unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment());
- if (catF == ExternalWeak || (catF == Internal && catG == ExternalStrong)) {
- std::swap(FnVec[i], FnVec[j]);
- std::swap(F, G);
- std::swap(catF, catG);
- }
+ WriteThunk(F, G);
+ WriteThunk(F, H);
- switch (catF) {
- case ExternalStrong:
- switch (catG) {
- case ExternalStrong:
- case ExternalWeak:
- ThunkGToF(F, G);
- break;
- case Internal:
- if (G->hasAddressTaken())
- ThunkGToF(F, G);
- else
- AliasGToF(F, G);
- break;
- }
- break;
-
- case ExternalWeak: {
- assert(catG == ExternalWeak);
-
- // Make them both thunks to the same internal function.
- F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
- Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
- F->getParent());
- H->copyAttributesFrom(F);
- H->takeName(F);
- F->replaceAllUsesWith(H);
-
- ThunkGToF(F, G);
- ThunkGToF(F, H);
-
- F->setLinkage(GlobalValue::InternalLinkage);
- } break;
-
- case Internal:
- switch (catG) {
- case ExternalStrong:
- llvm_unreachable(0);
- // fall-through
- case ExternalWeak:
- if (F->hasAddressTaken())
- ThunkGToF(F, G);
- else
- AliasGToF(F, G);
- break;
- case Internal: {
- bool addrTakenF = F->hasAddressTaken();
- bool addrTakenG = G->hasAddressTaken();
- if (!addrTakenF && addrTakenG) {
- std::swap(FnVec[i], FnVec[j]);
- std::swap(F, G);
- std::swap(addrTakenF, addrTakenG);
- }
-
- if (addrTakenF && addrTakenG) {
- ThunkGToF(F, G);
- } else {
- assert(!addrTakenG);
- AliasGToF(F, G);
- }
- } break;
- }
- break;
+ F->setAlignment(MaxAlignment);
+ F->setLinkage(GlobalValue::InternalLinkage);
+ } else {
+ WriteThunk(F, G);
}
++NumFunctionsMerged;
- return true;
}
-// ===----------------------------------------------------------------------===
-// Pass definition
-// ===----------------------------------------------------------------------===
+static unsigned ProfileFunction(const Function *F) {
+ const FunctionType *FTy = F->getFunctionType();
-bool MergeFunctions::runOnModule(Module &M) {
- bool Changed = false;
+ FoldingSetNodeID ID;
+ ID.AddInteger(F->size());
+ ID.AddInteger(F->getCallingConv());
+ ID.AddBoolean(F->hasGC());
+ ID.AddBoolean(FTy->isVarArg());
+ ID.AddInteger(FTy->getReturnType()->getTypeID());
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
+ ID.AddInteger(FTy->getParamType(i)->getTypeID());
+ return ID.ComputeHash();
+}
- std::map<unsigned long, std::vector<Function *> > FnMap;
+class ComparableFunction {
+public:
+ ComparableFunction(Function *Func, TargetData *TD)
+ : Func(Func), Hash(ProfileFunction(Func)), TD(TD) {}
- for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
- if (F->isDeclaration() || F->isIntrinsic())
- continue;
+ AssertingVH<Function> const Func;
+ const unsigned Hash;
+ TargetData * const TD;
+};
- FnMap[hash(F)].push_back(F);
+struct MergeFunctionsEqualityInfo {
+ static ComparableFunction *getEmptyKey() {
+ return reinterpret_cast<ComparableFunction*>(0);
+ }
+ static ComparableFunction *getTombstoneKey() {
+ return reinterpret_cast<ComparableFunction*>(-1);
+ }
+ static unsigned getHashValue(const ComparableFunction *CF) {
+ return CF->Hash;
+ }
+ static bool isEqual(const ComparableFunction *LHS,
+ const ComparableFunction *RHS) {
+ if (LHS == RHS)
+ return true;
+ if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
+ RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return false;
+ assert(LHS->TD == RHS->TD && "Comparing functions for different targets");
+ return FunctionComparator(LHS->TD, LHS->Func, RHS->Func).Compare();
}
+};
+
+bool MergeFunctions::runOnModule(Module &M) {
+ typedef DenseSet<ComparableFunction *, MergeFunctionsEqualityInfo> FnSetType;
- // TODO: instead of running in a loop, we could also fold functions in
- // callgraph order. Constructing the CFG probably isn't cheaper than just
- // running in a loop, unless it happened to already be available.
+ bool Changed = false;
+ TD = getAnalysisIfAvailable<TargetData>();
+
+ std::vector<Function *> Funcs;
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage())
+ Funcs.push_back(F);
+ }
bool LocalChanged;
do {
LocalChanged = false;
- DEBUG(dbgs() << "size: " << FnMap.size() << "\n");
- for (std::map<unsigned long, std::vector<Function *> >::iterator
- I = FnMap.begin(), E = FnMap.end(); I != E; ++I) {
- std::vector<Function *> &FnVec = I->second;
- DEBUG(dbgs() << "hash (" << I->first << "): " << FnVec.size() << "\n");
-
- for (int i = 0, e = FnVec.size(); i != e; ++i) {
- for (int j = i + 1; j != e; ++j) {
- bool isEqual = equals(FnVec[i], FnVec[j]);
-
- DEBUG(dbgs() << " " << FnVec[i]->getName()
- << (isEqual ? " == " : " != ")
- << FnVec[j]->getName() << "\n");
-
- if (isEqual) {
- if (fold(FnVec, i, j)) {
- LocalChanged = true;
- FnVec.erase(FnVec.begin() + j);
- --j, --e;
- }
- }
- }
- }
+ FnSetType FnSet;
+ for (unsigned i = 0, e = Funcs.size(); i != e;) {
+ Function *F = Funcs[i];
+ ComparableFunction *NewF = new ComparableFunction(F, TD);
+ std::pair<FnSetType::iterator, bool> Result = FnSet.insert(NewF);
+ if (!Result.second) {
+ ComparableFunction *&OldF = *Result.first;
+ assert(OldF && "Expected a hash collision");
+
+ // NewF will be deleted in favour of OldF unless NewF is strong and
+ // OldF is weak in which case swap them to keep the strong definition.
+
+ if (OldF->Func->isWeakForLinker() && !NewF->Func->isWeakForLinker())
+ std::swap(OldF, NewF);
+
+ DEBUG(dbgs() << " " << OldF->Func->getName() << " == "
+ << NewF->Func->getName() << '\n');
+
+ Funcs.erase(Funcs.begin() + i);
+ --e;
+
+ Function *DeleteF = NewF->Func;
+ delete NewF;
+ MergeTwoFunctions(OldF->Func, DeleteF);
+ LocalChanged = true;
+ Changed = true;
+ } else {
+ ++i;
+ }
}
- Changed |= LocalChanged;
+ DeleteContainerPointers(FnSet);
} while (LocalChanged);
return Changed;
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/PartialInlining.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/PartialInlining.cpp
index f8ec722..432f7c5 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/PartialInlining.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/PartialInlining.cpp
@@ -30,7 +30,7 @@ namespace {
struct PartialInliner : public ModulePass {
virtual void getAnalysisUsage(AnalysisUsage &AU) const { }
static char ID; // Pass identification, replacement for typeid
- PartialInliner() : ModulePass(&ID) {}
+ PartialInliner() : ModulePass(ID) {}
bool runOnModule(Module& M);
@@ -40,7 +40,8 @@ namespace {
}
char PartialInliner::ID = 0;
-static RegisterPass<PartialInliner> X("partial-inliner", "Partial Inliner");
+INITIALIZE_PASS(PartialInliner, "partial-inliner",
+ "Partial Inliner", false, false);
ModulePass* llvm::createPartialInliningPass() { return new PartialInliner(); }
@@ -66,13 +67,14 @@ Function* PartialInliner::unswitchFunction(Function* F) {
return 0;
// Clone the function, so that we can hack away on it.
- DenseMap<const Value*, Value*> ValueMap;
- Function* duplicateFunction = CloneFunction(F, ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Function* duplicateFunction = CloneFunction(F, VMap,
+ /*ModuleLevelChanges=*/false);
duplicateFunction->setLinkage(GlobalValue::InternalLinkage);
F->getParent()->getFunctionList().push_back(duplicateFunction);
- BasicBlock* newEntryBlock = cast<BasicBlock>(ValueMap[entryBlock]);
- BasicBlock* newReturnBlock = cast<BasicBlock>(ValueMap[returnBlock]);
- BasicBlock* newNonReturnBlock = cast<BasicBlock>(ValueMap[nonReturnBlock]);
+ BasicBlock* newEntryBlock = cast<BasicBlock>(VMap[entryBlock]);
+ BasicBlock* newReturnBlock = cast<BasicBlock>(VMap[returnBlock]);
+ BasicBlock* newNonReturnBlock = cast<BasicBlock>(VMap[nonReturnBlock]);
// Go ahead and update all uses to the duplicate, so that we can just
// use the inliner functionality when we're done hacking.
@@ -120,15 +122,17 @@ Function* PartialInliner::unswitchFunction(Function* F) {
// Extract the body of the if.
Function* extractedFunction = ExtractCodeRegion(DT, toExtract);
+ InlineFunctionInfo IFI;
+
// Inline the top-level if test into all callers.
std::vector<User*> Users(duplicateFunction->use_begin(),
duplicateFunction->use_end());
for (std::vector<User*>::iterator UI = Users.begin(), UE = Users.end();
UI != UE; ++UI)
- if (CallInst* CI = dyn_cast<CallInst>(*UI))
- InlineFunction(CI);
- else if (InvokeInst* II = dyn_cast<InvokeInst>(*UI))
- InlineFunction(II);
+ if (CallInst *CI = dyn_cast<CallInst>(*UI))
+ InlineFunction(CI, IFI);
+ else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI))
+ InlineFunction(II, IFI);
// Ditch the duplicate, since we're done with it, and rewrite all remaining
// users (function pointers, etc.) back to the original function.
@@ -157,7 +161,7 @@ bool PartialInliner::runOnModule(Module& M) {
bool recursive = false;
for (Function::use_iterator UI = currFunc->use_begin(),
UE = currFunc->use_end(); UI != UE; ++UI)
- if (Instruction* I = dyn_cast<Instruction>(UI))
+ if (Instruction* I = dyn_cast<Instruction>(*UI))
if (I->getParent()->getParent() == currFunc) {
recursive = true;
break;
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/PartialSpecialization.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/PartialSpecialization.cpp
index 084b94e..4a99a41 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/PartialSpecialization.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/PartialSpecialization.cpp
@@ -32,6 +32,10 @@
using namespace llvm;
STATISTIC(numSpecialized, "Number of specialized functions created");
+STATISTIC(numReplaced, "Number of callers replaced by specialization");
+
+// Maximum number of arguments markable interested
+static const int MaxInterests = 6;
// Call must be used at least occasionally
static const int CallsMin = 5;
@@ -40,34 +44,38 @@ static const int CallsMin = 5;
static const double ConstValPercent = .1;
namespace {
+ typedef SmallVector<int, MaxInterests> InterestingArgVector;
class PartSpec : public ModulePass {
- void scanForInterest(Function&, SmallVector<int, 6>&);
+ void scanForInterest(Function&, InterestingArgVector&);
int scanDistribution(Function&, int, std::map<Constant*, int>&);
public :
static char ID; // Pass identification, replacement for typeid
- PartSpec() : ModulePass(&ID) {}
+ PartSpec() : ModulePass(ID) {}
bool runOnModule(Module &M);
};
}
char PartSpec::ID = 0;
-static RegisterPass<PartSpec>
-X("partialspecialization", "Partial Specialization");
+INITIALIZE_PASS(PartSpec, "partialspecialization",
+ "Partial Specialization", false, false);
// Specialize F by replacing the arguments (keys) in replacements with the
// constants (values). Replace all calls to F with those constants with
// a call to the specialized function. Returns the specialized function
static Function*
SpecializeFunction(Function* F,
- DenseMap<const Value*, Value*>& replacements) {
+ ValueMap<const Value*, Value*>& replacements) {
// arg numbers of deleted arguments
- DenseSet<unsigned> deleted;
- for (DenseMap<const Value*, Value*>::iterator
+ DenseMap<unsigned, const Argument*> deleted;
+ for (ValueMap<const Value*, Value*>::iterator
repb = replacements.begin(), repe = replacements.end();
- repb != repe; ++repb)
- deleted.insert(cast<Argument>(repb->first)->getArgNo());
+ repb != repe; ++repb) {
+ Argument const *arg = cast<const Argument>(repb->first);
+ deleted[arg->getArgNo()] = arg;
+ }
- Function* NF = CloneFunction(F, replacements);
+ Function* NF = CloneFunction(F, replacements,
+ /*ModuleLevelChanges=*/false);
NF->setLinkage(GlobalValue::InternalLinkage);
F->getParent()->getFunctionList().push_back(NF);
@@ -75,22 +83,36 @@ SpecializeFunction(Function* F,
ii != ee; ) {
Value::use_iterator i = ii;
++ii;
- if (isa<CallInst>(i) || isa<InvokeInst>(i)) {
- CallSite CS(cast<Instruction>(i));
+ User *U = *i;
+ CallSite CS(U);
+ if (CS) {
if (CS.getCalledFunction() == F) {
-
SmallVector<Value*, 6> args;
- for (unsigned x = 0; x < CS.arg_size(); ++x)
- if (!deleted.count(x))
- args.push_back(CS.getArgument(x));
+ // Assemble the non-specialized arguments for the updated callsite.
+ // In the process, make sure that the specialized arguments are
+ // constant and match the specialization. If that's not the case,
+ // this callsite needs to call the original or some other
+ // specialization; don't change it here.
+ CallSite::arg_iterator as = CS.arg_begin(), ae = CS.arg_end();
+ for (CallSite::arg_iterator ai = as; ai != ae; ++ai) {
+ DenseMap<unsigned, const Argument*>::iterator delit = deleted.find(
+ std::distance(as, ai));
+ if (delit == deleted.end())
+ args.push_back(cast<Value>(ai));
+ else {
+ Constant *ci = dyn_cast<Constant>(ai);
+ if (!(ci && ci == replacements[delit->second]))
+ goto next_use;
+ }
+ }
Value* NCall;
- if (CallInst *CI = dyn_cast<CallInst>(i)) {
+ if (CallInst *CI = dyn_cast<CallInst>(U)) {
NCall = CallInst::Create(NF, args.begin(), args.end(),
CI->getName(), CI);
cast<CallInst>(NCall)->setTailCall(CI->isTailCall());
cast<CallInst>(NCall)->setCallingConv(CI->getCallingConv());
} else {
- InvokeInst *II = cast<InvokeInst>(i);
+ InvokeInst *II = cast<InvokeInst>(U);
NCall = InvokeInst::Create(NF, II->getNormalDest(),
II->getUnwindDest(),
args.begin(), args.end(),
@@ -99,8 +121,10 @@ SpecializeFunction(Function* F,
}
CS.getInstruction()->replaceAllUsesWith(NCall);
CS.getInstruction()->eraseFromParent();
+ ++numReplaced;
}
}
+ next_use:;
}
return NF;
}
@@ -111,7 +135,7 @@ bool PartSpec::runOnModule(Module &M) {
for (Module::iterator I = M.begin(); I != M.end(); ++I) {
Function &F = *I;
if (F.isDeclaration() || F.mayBeOverridden()) continue;
- SmallVector<int, 6> interestingArgs;
+ InterestingArgVector interestingArgs;
scanForInterest(F, interestingArgs);
// Find the first interesting Argument that we can specialize on
@@ -126,7 +150,7 @@ bool PartSpec::runOnModule(Module &M) {
ee = distribution.end(); ii != ee; ++ii)
if (total > ii->second && ii->first &&
ii->second > total * ConstValPercent) {
- DenseMap<const Value*, Value*> m;
+ ValueMap<const Value*, Value*> m;
Function::arg_iterator arg = F.arg_begin();
for (int y = 0; y < interestingArgs[x]; ++y)
++arg;
@@ -143,21 +167,21 @@ bool PartSpec::runOnModule(Module &M) {
/// scanForInterest - This function decides which arguments would be worth
/// specializing on.
-void PartSpec::scanForInterest(Function& F, SmallVector<int, 6>& args) {
+void PartSpec::scanForInterest(Function& F, InterestingArgVector& args) {
for(Function::arg_iterator ii = F.arg_begin(), ee = F.arg_end();
ii != ee; ++ii) {
for(Value::use_iterator ui = ii->use_begin(), ue = ii->use_end();
ui != ue; ++ui) {
bool interesting = false;
-
- if (isa<CmpInst>(ui)) interesting = true;
- else if (isa<CallInst>(ui))
+ User *U = *ui;
+ if (isa<CmpInst>(U)) interesting = true;
+ else if (isa<CallInst>(U))
interesting = ui->getOperand(0) == ii;
- else if (isa<InvokeInst>(ui))
+ else if (isa<InvokeInst>(U))
interesting = ui->getOperand(0) == ii;
- else if (isa<SwitchInst>(ui)) interesting = true;
- else if (isa<BranchInst>(ui)) interesting = true;
+ else if (isa<SwitchInst>(U)) interesting = true;
+ else if (isa<BranchInst>(U)) interesting = true;
if (interesting) {
args.push_back(std::distance(F.arg_begin(), ii));
@@ -172,14 +196,16 @@ int PartSpec::scanDistribution(Function& F, int arg,
std::map<Constant*, int>& dist) {
bool hasIndirect = false;
int total = 0;
- for(Value::use_iterator ii = F.use_begin(), ee = F.use_end();
- ii != ee; ++ii)
- if ((isa<CallInst>(ii) || isa<InvokeInst>(ii))
- && ii->getOperand(0) == &F) {
- ++dist[dyn_cast<Constant>(ii->getOperand(arg + 1))];
+ for (Value::use_iterator ii = F.use_begin(), ee = F.use_end();
+ ii != ee; ++ii) {
+ User *U = *ii;
+ CallSite CS(U);
+ if (CS && CS.getCalledFunction() == &F) {
+ ++dist[dyn_cast<Constant>(CS.getArgument(arg))];
++total;
} else
hasIndirect = true;
+ }
// Preserve the original address taken function even if all other uses
// will be specialized.
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/PruneEH.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/PruneEH.cpp
index 3ae771c..09ac76f 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/PruneEH.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/PruneEH.cpp
@@ -37,10 +37,10 @@ STATISTIC(NumUnreach, "Number of noreturn calls optimized");
namespace {
struct PruneEH : public CallGraphSCCPass {
static char ID; // Pass identification, replacement for typeid
- PruneEH() : CallGraphSCCPass(&ID) {}
+ PruneEH() : CallGraphSCCPass(ID) {}
// runOnSCC - Analyze the SCC, performing the transformation if possible.
- bool runOnSCC(std::vector<CallGraphNode *> &SCC);
+ bool runOnSCC(CallGraphSCC &SCC);
bool SimplifyFunction(Function *F);
void DeleteBasicBlock(BasicBlock *BB);
@@ -48,26 +48,26 @@ namespace {
}
char PruneEH::ID = 0;
-static RegisterPass<PruneEH>
-X("prune-eh", "Remove unused exception handling info");
+INITIALIZE_PASS(PruneEH, "prune-eh",
+ "Remove unused exception handling info", false, false);
Pass *llvm::createPruneEHPass() { return new PruneEH(); }
-bool PruneEH::runOnSCC(std::vector<CallGraphNode *> &SCC) {
+bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
SmallPtrSet<CallGraphNode *, 8> SCCNodes;
CallGraph &CG = getAnalysis<CallGraph>();
bool MadeChange = false;
// Fill SCCNodes with the elements of the SCC. Used for quickly
// looking up whether a given CallGraphNode is in this SCC.
- for (unsigned i = 0, e = SCC.size(); i != e; ++i)
- SCCNodes.insert(SCC[i]);
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
+ SCCNodes.insert(*I);
// First pass, scan all of the functions in the SCC, simplifying them
// according to what we know.
- for (unsigned i = 0, e = SCC.size(); i != e; ++i)
- if (Function *F = SCC[i]->getFunction())
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
+ if (Function *F = (*I)->getFunction())
MadeChange |= SimplifyFunction(F);
// Next, check to see if any callees might throw or if there are any external
@@ -78,9 +78,9 @@ bool PruneEH::runOnSCC(std::vector<CallGraphNode *> &SCC) {
// obviously the SCC might throw.
//
bool SCCMightUnwind = false, SCCMightReturn = false;
- for (unsigned i = 0, e = SCC.size();
- (!SCCMightUnwind || !SCCMightReturn) && i != e; ++i) {
- Function *F = SCC[i]->getFunction();
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end();
+ (!SCCMightUnwind || !SCCMightReturn) && I != E; ++I) {
+ Function *F = (*I)->getFunction();
if (F == 0) {
SCCMightUnwind = true;
SCCMightReturn = true;
@@ -132,7 +132,7 @@ bool PruneEH::runOnSCC(std::vector<CallGraphNode *> &SCC) {
// If the SCC doesn't unwind or doesn't throw, note this fact.
if (!SCCMightUnwind || !SCCMightReturn)
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Attributes NewAttributes = Attribute::None;
if (!SCCMightUnwind)
@@ -140,19 +140,20 @@ bool PruneEH::runOnSCC(std::vector<CallGraphNode *> &SCC) {
if (!SCCMightReturn)
NewAttributes |= Attribute::NoReturn;
- const AttrListPtr &PAL = SCC[i]->getFunction()->getAttributes();
+ Function *F = (*I)->getFunction();
+ const AttrListPtr &PAL = F->getAttributes();
const AttrListPtr &NPAL = PAL.addAttr(~0, NewAttributes);
if (PAL != NPAL) {
MadeChange = true;
- SCC[i]->getFunction()->setAttributes(NPAL);
+ F->setAttributes(NPAL);
}
}
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
// Convert any invoke instructions to non-throwing functions in this node
// into call instructions with a branch. This makes the exception blocks
// dead.
- if (Function *F = SCC[i]->getFunction())
+ if (Function *F = (*I)->getFunction())
MadeChange |= SimplifyFunction(F);
}
@@ -168,7 +169,7 @@ bool PruneEH::SimplifyFunction(Function *F) {
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator()))
if (II->doesNotThrow()) {
- SmallVector<Value*, 8> Args(II->op_begin()+3, II->op_end());
+ SmallVector<Value*, 8> Args(II->op_begin(), II->op_end() - 3);
// Insert a call instruction before the invoke.
CallInst *Call = CallInst::Create(II->getCalledValue(),
Args.begin(), Args.end(), "", II);
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp
index 4566a76..ee10ad0 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/StripDeadPrototypes.cpp
@@ -29,15 +29,15 @@ namespace {
class StripDeadPrototypesPass : public ModulePass {
public:
static char ID; // Pass identification, replacement for typeid
- StripDeadPrototypesPass() : ModulePass(&ID) { }
+ StripDeadPrototypesPass() : ModulePass(ID) { }
virtual bool runOnModule(Module &M);
};
} // end anonymous namespace
char StripDeadPrototypesPass::ID = 0;
-static RegisterPass<StripDeadPrototypesPass>
-X("strip-dead-prototypes", "Strip Unused Function Prototypes");
+INITIALIZE_PASS(StripDeadPrototypesPass, "strip-dead-prototypes",
+ "Strip Unused Function Prototypes", false, false);
bool StripDeadPrototypesPass::runOnModule(Module &M) {
bool MadeChange = false;
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/StripSymbols.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/StripSymbols.cpp
index 310e4a2..20b7b8f 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/StripSymbols.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/StripSymbols.cpp
@@ -39,7 +39,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit StripSymbols(bool ODI = false)
- : ModulePass(&ID), OnlyDebugInfo(ODI) {}
+ : ModulePass(ID), OnlyDebugInfo(ODI) {}
virtual bool runOnModule(Module &M);
@@ -52,7 +52,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit StripNonDebugSymbols()
- : ModulePass(&ID) {}
+ : ModulePass(ID) {}
virtual bool runOnModule(Module &M);
@@ -65,7 +65,20 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit StripDebugDeclare()
- : ModulePass(&ID) {}
+ : ModulePass(ID) {}
+
+ virtual bool runOnModule(Module &M);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+ };
+
+ class StripDeadDebugInfo : public ModulePass {
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ explicit StripDeadDebugInfo()
+ : ModulePass(ID) {}
virtual bool runOnModule(Module &M);
@@ -76,29 +89,38 @@ namespace {
}
char StripSymbols::ID = 0;
-static RegisterPass<StripSymbols>
-X("strip", "Strip all symbols from a module");
+INITIALIZE_PASS(StripSymbols, "strip",
+ "Strip all symbols from a module", false, false);
ModulePass *llvm::createStripSymbolsPass(bool OnlyDebugInfo) {
return new StripSymbols(OnlyDebugInfo);
}
char StripNonDebugSymbols::ID = 0;
-static RegisterPass<StripNonDebugSymbols>
-Y("strip-nondebug", "Strip all symbols, except dbg symbols, from a module");
+INITIALIZE_PASS(StripNonDebugSymbols, "strip-nondebug",
+ "Strip all symbols, except dbg symbols, from a module",
+ false, false);
ModulePass *llvm::createStripNonDebugSymbolsPass() {
return new StripNonDebugSymbols();
}
char StripDebugDeclare::ID = 0;
-static RegisterPass<StripDebugDeclare>
-Z("strip-debug-declare", "Strip all llvm.dbg.declare intrinsics");
+INITIALIZE_PASS(StripDebugDeclare, "strip-debug-declare",
+ "Strip all llvm.dbg.declare intrinsics", false, false);
ModulePass *llvm::createStripDebugDeclarePass() {
return new StripDebugDeclare();
}
+char StripDeadDebugInfo::ID = 0;
+INITIALIZE_PASS(StripDeadDebugInfo, "strip-dead-debug-info",
+ "Strip debug info for unused symbols", false, false);
+
+ModulePass *llvm::createStripDeadDebugInfoPass() {
+ return new StripDeadDebugInfo();
+}
+
/// OnlyUsedBy - Return true if V is only used by Usr.
static bool OnlyUsedBy(Value *V, Value *Usr) {
for(Value::use_iterator I = V->use_begin(), E = V->use_end(); I != E; ++I) {
@@ -223,21 +245,28 @@ static bool StripDebugInfo(Module &M) {
Changed = true;
}
- NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv");
- if (NMD) {
- Changed = true;
- NMD->eraseFromParent();
+ for (Module::named_metadata_iterator NMI = M.named_metadata_begin(),
+ NME = M.named_metadata_end(); NMI != NME;) {
+ NamedMDNode *NMD = NMI;
+ ++NMI;
+ if (NMD->getName().startswith("llvm.dbg.")) {
+ NMD->eraseFromParent();
+ Changed = true;
+ }
}
-
- unsigned MDDbgKind = M.getMDKindID("dbg");
- for (Module::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
+
+ for (Module::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
for (Function::iterator FI = MI->begin(), FE = MI->end(); FI != FE;
++FI)
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE;
- ++BI)
- BI->setMetadata(MDDbgKind, 0);
+ ++BI) {
+ if (!BI->getDebugLoc().isUnknown()) {
+ Changed = true;
+ BI->setDebugLoc(DebugLoc());
+ }
+ }
- return true;
+ return Changed;
}
bool StripSymbols::runOnModule(Module &M) {
@@ -260,8 +289,8 @@ bool StripDebugDeclare::runOnModule(Module &M) {
if (Declare) {
while (!Declare->use_empty()) {
CallInst *CI = cast<CallInst>(Declare->use_back());
- Value *Arg1 = CI->getOperand(1);
- Value *Arg2 = CI->getOperand(2);
+ Value *Arg1 = CI->getArgOperand(0);
+ Value *Arg2 = CI->getArgOperand(1);
assert(CI->use_empty() && "llvm.dbg intrinsic should have void result");
CI->eraseFromParent();
if (Arg1->use_empty()) {
@@ -289,3 +318,83 @@ bool StripDebugDeclare::runOnModule(Module &M) {
return true;
}
+
+/// getRealLinkageName - If special LLVM prefix that is used to inform the asm
+/// printer to not emit usual symbol prefix before the symbol name is used then
+/// return linkage name after skipping this special LLVM prefix.
+static StringRef getRealLinkageName(StringRef LinkageName) {
+ char One = '\1';
+ if (LinkageName.startswith(StringRef(&One, 1)))
+ return LinkageName.substr(1);
+ return LinkageName;
+}
+
+bool StripDeadDebugInfo::runOnModule(Module &M) {
+ bool Changed = false;
+
+ // Debugging infomration is encoded in llvm IR using metadata. This is designed
+ // such a way that debug info for symbols preserved even if symbols are
+ // optimized away by the optimizer. This special pass removes debug info for
+ // such symbols.
+
+ // llvm.dbg.gv keeps track of debug info for global variables.
+ if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv")) {
+ SmallVector<MDNode *, 8> MDs;
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ if (DIGlobalVariable(NMD->getOperand(i)).Verify())
+ MDs.push_back(NMD->getOperand(i));
+ else
+ Changed = true;
+ NMD->eraseFromParent();
+ NMD = NULL;
+
+ for (SmallVector<MDNode *, 8>::iterator I = MDs.begin(),
+ E = MDs.end(); I != E; ++I) {
+ GlobalVariable *GV = DIGlobalVariable(*I).getGlobal();
+ if (GV && M.getGlobalVariable(GV->getName(), true)) {
+ if (!NMD)
+ NMD = M.getOrInsertNamedMetadata("llvm.dbg.gv");
+ NMD->addOperand(*I);
+ }
+ else
+ Changed = true;
+ }
+ }
+
+ // llvm.dbg.sp keeps track of debug info for subprograms.
+ if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.sp")) {
+ SmallVector<MDNode *, 8> MDs;
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ if (DISubprogram(NMD->getOperand(i)).Verify())
+ MDs.push_back(NMD->getOperand(i));
+ else
+ Changed = true;
+ NMD->eraseFromParent();
+ NMD = NULL;
+
+ for (SmallVector<MDNode *, 8>::iterator I = MDs.begin(),
+ E = MDs.end(); I != E; ++I) {
+ bool FnIsLive = false;
+ if (Function *F = DISubprogram(*I).getFunction())
+ if (M.getFunction(F->getName()))
+ FnIsLive = true;
+ if (FnIsLive) {
+ if (!NMD)
+ NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+ NMD->addOperand(*I);
+ } else {
+ // Remove llvm.dbg.lv.fnname named mdnode which may have been used
+ // to hold debug info for dead function's local variables.
+ StringRef FName = DISubprogram(*I).getLinkageName();
+ if (FName.empty())
+ FName = DISubprogram(*I).getName();
+ if (NamedMDNode *LVNMD =
+ M.getNamedMetadata(Twine("llvm.dbg.lv.",
+ getRealLinkageName(FName))))
+ LVNMD->eraseFromParent();
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/libclamav/c++/llvm/lib/Transforms/IPO/StructRetPromotion.cpp b/libclamav/c++/llvm/lib/Transforms/IPO/StructRetPromotion.cpp
index dda32d0..b82b03f 100644
--- a/libclamav/c++/llvm/lib/Transforms/IPO/StructRetPromotion.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/IPO/StructRetPromotion.cpp
@@ -1,4 +1,4 @@
-//===-- StructRetPromotion.cpp - Promote sret arguments ------------------===//
+//===-- StructRetPromotion.cpp - Promote sret arguments -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -48,33 +48,32 @@ namespace {
CallGraphSCCPass::getAnalysisUsage(AU);
}
- virtual bool runOnSCC(std::vector<CallGraphNode *> &SCC);
+ virtual bool runOnSCC(CallGraphSCC &SCC);
static char ID; // Pass identification, replacement for typeid
- SRETPromotion() : CallGraphSCCPass(&ID) {}
+ SRETPromotion() : CallGraphSCCPass(ID) {}
private:
CallGraphNode *PromoteReturn(CallGraphNode *CGN);
bool isSafeToUpdateAllCallers(Function *F);
Function *cloneFunctionBody(Function *F, const StructType *STy);
CallGraphNode *updateCallSites(Function *F, Function *NF);
- bool nestedStructType(const StructType *STy);
};
}
char SRETPromotion::ID = 0;
-static RegisterPass<SRETPromotion>
-X("sretpromotion", "Promote sret arguments to multiple ret values");
+INITIALIZE_PASS(SRETPromotion, "sretpromotion",
+ "Promote sret arguments to multiple ret values", false, false);
Pass *llvm::createStructRetPromotionPass() {
return new SRETPromotion();
}
-bool SRETPromotion::runOnSCC(std::vector<CallGraphNode *> &SCC) {
+bool SRETPromotion::runOnSCC(CallGraphSCC &SCC) {
bool Changed = false;
- for (unsigned i = 0, e = SCC.size(); i != e; ++i)
- if (CallGraphNode *NewNode = PromoteReturn(SCC[i])) {
- SCC[i] = NewNode;
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
+ if (CallGraphNode *NewNode = PromoteReturn(*I)) {
+ SCC.ReplaceNode(*I, NewNode);
Changed = true;
}
@@ -107,12 +106,12 @@ CallGraphNode *SRETPromotion::PromoteReturn(CallGraphNode *CGN) {
// Check if it is ok to perform this promotion.
if (isSafeToUpdateAllCallers(F) == false) {
DEBUG(dbgs() << "SretPromotion: Not all callers can be updated\n");
- NumRejectedSRETUses++;
+ ++NumRejectedSRETUses;
return 0;
}
DEBUG(dbgs() << "SretPromotion: sret argument will be promoted\n");
- NumSRET++;
+ ++NumSRET;
// [1] Replace use of sret parameter
AllocaInst *TheAlloca = new AllocaInst(STy, NULL, "mrv",
F->getEntryBlock().begin());
@@ -156,7 +155,7 @@ bool SRETPromotion::isSafeToUpdateAllCallers(Function *F) {
FnUseI != FnUseE; ++FnUseI) {
// The function is passed in as an argument to (possibly) another function,
// we can't change it!
- CallSite CS = CallSite::get(*FnUseI);
+ CallSite CS(*FnUseI);
Instruction *Call = CS.getInstruction();
// The function is used by something else than a call or invoke instruction,
// we can't change it!
@@ -171,23 +170,23 @@ bool SRETPromotion::isSafeToUpdateAllCallers(Function *F) {
// Check FirstArg's users.
for (Value::use_iterator ArgI = FirstArg->use_begin(),
ArgE = FirstArg->use_end(); ArgI != ArgE; ++ArgI) {
-
+ User *U = *ArgI;
// If FirstArg user is a CallInst that does not correspond to current
// call site then this function F is not suitable for sret promotion.
- if (CallInst *CI = dyn_cast<CallInst>(ArgI)) {
+ if (CallInst *CI = dyn_cast<CallInst>(U)) {
if (CI != Call)
return false;
}
// If FirstArg user is a GEP whose all users are not LoadInst then
// this function F is not suitable for sret promotion.
- else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(ArgI)) {
+ else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
// TODO : Use dom info and insert PHINodes to collect get results
// from multiple call sites for this GEP.
if (GEP->getParent() != Call->getParent())
return false;
for (Value::use_iterator GEPI = GEP->use_begin(), GEPE = GEP->use_end();
GEPI != GEPE; ++GEPI)
- if (!isa<LoadInst>(GEPI))
+ if (!isa<LoadInst>(*GEPI))
return false;
}
// Any other FirstArg users make this function unsuitable for sret
@@ -271,7 +270,7 @@ CallGraphNode *SRETPromotion::updateCallSites(Function *F, Function *NF) {
CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);
while (!F->use_empty()) {
- CallSite CS = CallSite::get(*F->use_begin());
+ CallSite CS(*F->use_begin());
Instruction *Call = CS.getInstruction();
const AttrListPtr &PAL = F->getAttributes();
@@ -351,14 +350,3 @@ CallGraphNode *SRETPromotion::updateCallSites(Function *F, Function *NF) {
return NF_CGN;
}
-/// nestedStructType - Return true if STy includes any
-/// other aggregate types
-bool SRETPromotion::nestedStructType(const StructType *STy) {
- unsigned Num = STy->getNumElements();
- for (unsigned i = 0; i < Num; i++) {
- const Type *Ty = STy->getElementType(i);
- if (!Ty->isSingleValueType() && !Ty->isVoidTy())
- return true;
- }
- return false;
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/CMakeLists.txt b/libclamav/c++/llvm/lib/Transforms/InstCombine/CMakeLists.txt
deleted file mode 100644
index 5b1ff3e..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/CMakeLists.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-add_llvm_library(LLVMInstCombine
- InstructionCombining.cpp
- InstCombineAddSub.cpp
- InstCombineAndOrXor.cpp
- InstCombineCalls.cpp
- InstCombineCasts.cpp
- InstCombineCompares.cpp
- InstCombineLoadStoreAlloca.cpp
- InstCombineMulDivRem.cpp
- InstCombinePHI.cpp
- InstCombineSelect.cpp
- InstCombineShifts.cpp
- InstCombineSimplifyDemanded.cpp
- InstCombineVectorOps.cpp
- )
-
-target_link_libraries (LLVMInstCombine LLVMTransformUtils)
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombine.h b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombine.h
deleted file mode 100644
index bd06499..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombine.h
+++ /dev/null
@@ -1,351 +0,0 @@
-//===- InstCombine.h - Main InstCombine pass definition -------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef INSTCOMBINE_INSTCOMBINE_H
-#define INSTCOMBINE_INSTCOMBINE_H
-
-#include "InstCombineWorklist.h"
-#include "llvm/Pass.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Support/IRBuilder.h"
-#include "llvm/Support/InstVisitor.h"
-#include "llvm/Support/TargetFolder.h"
-
-namespace llvm {
- class CallSite;
- class TargetData;
- class DbgDeclareInst;
- class MemIntrinsic;
- class MemSetInst;
-
-/// SelectPatternFlavor - We can match a variety of different patterns for
-/// select operations.
-enum SelectPatternFlavor {
- SPF_UNKNOWN = 0,
- SPF_SMIN, SPF_UMIN,
- SPF_SMAX, SPF_UMAX
- //SPF_ABS - TODO.
-};
-
-/// getComplexity: Assign a complexity or rank value to LLVM Values...
-/// 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
-static inline unsigned getComplexity(Value *V) {
- if (isa<Instruction>(V)) {
- if (BinaryOperator::isNeg(V) ||
- BinaryOperator::isFNeg(V) ||
- BinaryOperator::isNot(V))
- return 3;
- return 4;
- }
- if (isa<Argument>(V)) return 3;
- return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
-}
-
-
-/// InstCombineIRInserter - This is an IRBuilder insertion helper that works
-/// just like the normal insertion helper, but also adds any new instructions
-/// to the instcombine worklist.
-class VISIBILITY_HIDDEN InstCombineIRInserter
- : public IRBuilderDefaultInserter<true> {
- InstCombineWorklist &Worklist;
-public:
- InstCombineIRInserter(InstCombineWorklist &WL) : Worklist(WL) {}
-
- void InsertHelper(Instruction *I, const Twine &Name,
- BasicBlock *BB, BasicBlock::iterator InsertPt) const {
- IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
- Worklist.Add(I);
- }
-};
-
-/// InstCombiner - The -instcombine pass.
-class VISIBILITY_HIDDEN InstCombiner
- : public FunctionPass,
- public InstVisitor<InstCombiner, Instruction*> {
- TargetData *TD;
- bool MustPreserveLCSSA;
- bool MadeIRChange;
-public:
- /// Worklist - All of the instructions that need to be simplified.
- InstCombineWorklist Worklist;
-
- /// Builder - This is an IRBuilder that automatically inserts new
- /// instructions into the worklist when they are created.
- typedef IRBuilder<true, TargetFolder, InstCombineIRInserter> BuilderTy;
- BuilderTy *Builder;
-
- static char ID; // Pass identification, replacement for typeid
- InstCombiner() : FunctionPass(&ID), TD(0), Builder(0) {}
-
-public:
- virtual bool runOnFunction(Function &F);
-
- bool DoOneIteration(Function &F, unsigned ItNum);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
-
- TargetData *getTargetData() const { return TD; }
-
- // Visitation implementation - Implement instruction combining for different
- // instruction types. The semantics are as follows:
- // Return Value:
- // null - No change was made
- // I - Change was made, I is still valid, I may be dead though
- // otherwise - Change was made, replace I with returned instruction
- //
- Instruction *visitAdd(BinaryOperator &I);
- Instruction *visitFAdd(BinaryOperator &I);
- Value *OptimizePointerDifference(Value *LHS, Value *RHS, const Type *Ty);
- Instruction *visitSub(BinaryOperator &I);
- Instruction *visitFSub(BinaryOperator &I);
- Instruction *visitMul(BinaryOperator &I);
- Instruction *visitFMul(BinaryOperator &I);
- Instruction *visitURem(BinaryOperator &I);
- Instruction *visitSRem(BinaryOperator &I);
- Instruction *visitFRem(BinaryOperator &I);
- bool SimplifyDivRemOfSelect(BinaryOperator &I);
- Instruction *commonRemTransforms(BinaryOperator &I);
- Instruction *commonIRemTransforms(BinaryOperator &I);
- Instruction *commonDivTransforms(BinaryOperator &I);
- Instruction *commonIDivTransforms(BinaryOperator &I);
- Instruction *visitUDiv(BinaryOperator &I);
- Instruction *visitSDiv(BinaryOperator &I);
- Instruction *visitFDiv(BinaryOperator &I);
- Value *FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS);
- Value *FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS);
- Instruction *visitAnd(BinaryOperator &I);
- Value *FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS);
- Value *FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS);
- Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
- Value *A, Value *B, Value *C);
- Instruction *visitOr (BinaryOperator &I);
- Instruction *visitXor(BinaryOperator &I);
- Instruction *visitShl(BinaryOperator &I);
- Instruction *visitAShr(BinaryOperator &I);
- Instruction *visitLShr(BinaryOperator &I);
- Instruction *commonShiftTransforms(BinaryOperator &I);
- Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI,
- Constant *RHSC);
- Instruction *FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
- GlobalVariable *GV, CmpInst &ICI,
- ConstantInt *AndCst = 0);
- Instruction *visitFCmpInst(FCmpInst &I);
- Instruction *visitICmpInst(ICmpInst &I);
- Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
- Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
- Instruction *LHS,
- ConstantInt *RHS);
- Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
- ConstantInt *DivRHS);
- Instruction *FoldICmpAddOpCst(ICmpInst &ICI, Value *X, ConstantInt *CI,
- ICmpInst::Predicate Pred, Value *TheAdd);
- Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
- ICmpInst::Predicate Cond, Instruction &I);
- Instruction *FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
- BinaryOperator &I);
- Instruction *commonCastTransforms(CastInst &CI);
- Instruction *commonPointerCastTransforms(CastInst &CI);
- Instruction *visitTrunc(TruncInst &CI);
- Instruction *visitZExt(ZExtInst &CI);
- Instruction *visitSExt(SExtInst &CI);
- Instruction *visitFPTrunc(FPTruncInst &CI);
- Instruction *visitFPExt(CastInst &CI);
- Instruction *visitFPToUI(FPToUIInst &FI);
- Instruction *visitFPToSI(FPToSIInst &FI);
- Instruction *visitUIToFP(CastInst &CI);
- Instruction *visitSIToFP(CastInst &CI);
- Instruction *visitPtrToInt(PtrToIntInst &CI);
- Instruction *visitIntToPtr(IntToPtrInst &CI);
- Instruction *visitBitCast(BitCastInst &CI);
- Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI,
- Instruction *FI);
- Instruction *FoldSelectIntoOp(SelectInst &SI, Value*, Value*);
- Instruction *FoldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
- Value *A, Value *B, Instruction &Outer,
- SelectPatternFlavor SPF2, Value *C);
- Instruction *visitSelectInst(SelectInst &SI);
- Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
- Instruction *visitCallInst(CallInst &CI);
- Instruction *visitInvokeInst(InvokeInst &II);
-
- Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
- Instruction *visitPHINode(PHINode &PN);
- Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
- Instruction *visitAllocaInst(AllocaInst &AI);
- Instruction *visitFree(Instruction &FI);
- Instruction *visitLoadInst(LoadInst &LI);
- Instruction *visitStoreInst(StoreInst &SI);
- Instruction *visitBranchInst(BranchInst &BI);
- Instruction *visitSwitchInst(SwitchInst &SI);
- Instruction *visitInsertElementInst(InsertElementInst &IE);
- Instruction *visitExtractElementInst(ExtractElementInst &EI);
- Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
- Instruction *visitExtractValueInst(ExtractValueInst &EV);
-
- // visitInstruction - Specify what to return for unhandled instructions...
- Instruction *visitInstruction(Instruction &I) { return 0; }
-
-private:
- bool ShouldChangeType(const Type *From, const Type *To) const;
- Value *dyn_castNegVal(Value *V) const;
- Value *dyn_castFNegVal(Value *V) const;
- const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
- SmallVectorImpl<Value*> &NewIndices);
- Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
-
- /// ShouldOptimizeCast - Return true if the cast from "V to Ty" actually
- /// results in any code being generated and is interesting to optimize out. If
- /// the cast can be eliminated by some other simple transformation, we prefer
- /// to do the simplification first.
- bool ShouldOptimizeCast(Instruction::CastOps opcode,const Value *V,
- const Type *Ty);
-
- Instruction *visitCallSite(CallSite CS);
- Instruction *tryOptimizeCall(CallInst *CI, const TargetData *TD);
- bool transformConstExprCastCall(CallSite CS);
- Instruction *transformCallThroughTrampoline(CallSite CS);
- Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
- bool DoXform = true);
- bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS);
- DbgDeclareInst *hasOneUsePlusDeclare(Value *V);
- Value *EmitGEPOffset(User *GEP);
-
-public:
- // InsertNewInstBefore - insert an instruction New before instruction Old
- // in the program. Add the new instruction to the worklist.
- //
- Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
- assert(New && New->getParent() == 0 &&
- "New instruction already inserted into a basic block!");
- BasicBlock *BB = Old.getParent();
- BB->getInstList().insert(&Old, New); // Insert inst
- Worklist.Add(New);
- return New;
- }
-
- // ReplaceInstUsesWith - This method is to be used when an instruction is
- // found to be dead, replacable with another preexisting expression. Here
- // we add all uses of I to the worklist, replace all uses of I with the new
- // value, then return I, so that the inst combiner will know that I was
- // modified.
- //
- Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
- Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
-
- // If we are replacing the instruction with itself, this must be in a
- // segment of unreachable code, so just clobber the instruction.
- if (&I == V)
- V = UndefValue::get(I.getType());
-
- I.replaceAllUsesWith(V);
- return &I;
- }
-
- // EraseInstFromFunction - When dealing with an instruction that has side
- // effects or produces a void value, we can't rely on DCE to delete the
- // instruction. Instead, visit methods should return the value returned by
- // this function.
- Instruction *EraseInstFromFunction(Instruction &I) {
- DEBUG(errs() << "IC: ERASE " << I << '\n');
-
- assert(I.use_empty() && "Cannot erase instruction that is used!");
- // Make sure that we reprocess all operands now that we reduced their
- // use counts.
- if (I.getNumOperands() < 8) {
- for (User::op_iterator i = I.op_begin(), e = I.op_end(); i != e; ++i)
- if (Instruction *Op = dyn_cast<Instruction>(*i))
- Worklist.Add(Op);
- }
- Worklist.Remove(&I);
- I.eraseFromParent();
- MadeIRChange = true;
- return 0; // Don't do anything with FI
- }
-
- void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero,
- APInt &KnownOne, unsigned Depth = 0) const {
- return llvm::ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
- }
-
- bool MaskedValueIsZero(Value *V, const APInt &Mask,
- unsigned Depth = 0) const {
- return llvm::MaskedValueIsZero(V, Mask, TD, Depth);
- }
- unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0) const {
- return llvm::ComputeNumSignBits(Op, TD, Depth);
- }
-
-private:
-
- /// SimplifyCommutative - This performs a few simplifications for
- /// commutative operators.
- bool SimplifyCommutative(BinaryOperator &I);
-
- /// SimplifyDemandedUseBits - Attempts to replace V with a simpler value
- /// based on the demanded bits.
- Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
- APInt& KnownZero, APInt& KnownOne,
- unsigned Depth);
- bool SimplifyDemandedBits(Use &U, APInt DemandedMask,
- APInt& KnownZero, APInt& KnownOne,
- unsigned Depth=0);
-
- /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
- /// SimplifyDemandedBits knows about. See if the instruction has any
- /// properties that allow us to simplify its operands.
- bool SimplifyDemandedInstructionBits(Instruction &Inst);
-
- Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
- APInt& UndefElts, unsigned Depth = 0);
-
- // FoldOpIntoPhi - Given a binary operator, cast instruction, or select
- // which has a PHI node as operand #0, see if we can fold the instruction
- // into the PHI (which is only possible if all operands to the PHI are
- // constants).
- //
- // If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
- // that would normally be unprofitable because they strongly encourage jump
- // threading.
- Instruction *FoldOpIntoPhi(Instruction &I, bool AllowAggressive = false);
-
- // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
- // operator and they all are only used by the PHI, PHI together their
- // inputs, and do the operation once, to the result of the PHI.
- Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
- Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
- Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
- Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
-
-
- Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
- ConstantInt *AndRHS, BinaryOperator &TheAnd);
-
- Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
- bool isSub, Instruction &I);
- Value *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
- bool isSigned, bool Inside);
- Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
- Instruction *MatchBSwap(BinaryOperator &I);
- bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
- Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
- Instruction *SimplifyMemSet(MemSetInst *MI);
-
-
- Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
-
- unsigned GetOrEnforceKnownAlignment(Value *V,
- unsigned PrefAlign = 0);
-
-};
-
-
-
-} // end namespace llvm.
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
deleted file mode 100644
index 4d2c89e..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ /dev/null
@@ -1,731 +0,0 @@
-//===- InstCombineAddSub.cpp ----------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visit functions for add, fadd, sub, and fsub.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
-using namespace llvm;
-using namespace PatternMatch;
-
-/// AddOne - Add one to a ConstantInt.
-static Constant *AddOne(Constant *C) {
- return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
-}
-/// SubOne - Subtract one from a ConstantInt.
-static Constant *SubOne(ConstantInt *C) {
- return ConstantInt::get(C->getContext(), C->getValue()-1);
-}
-
-
-// dyn_castFoldableMul - If this value is a multiply that can be folded into
-// other computations (because it has a constant operand), return the
-// non-constant operand of the multiply, and set CST to point to the multiplier.
-// Otherwise, return null.
-//
-static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
- if (!V->hasOneUse() || !V->getType()->isIntegerTy())
- return 0;
-
- Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return 0;
-
- if (I->getOpcode() == Instruction::Mul)
- if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
- return I->getOperand(0);
- if (I->getOpcode() == Instruction::Shl)
- if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
- // The multiplier is really 1 << CST.
- uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
- uint32_t CSTVal = CST->getLimitedValue(BitWidth);
- CST = ConstantInt::get(V->getType()->getContext(),
- APInt(BitWidth, 1).shl(CSTVal));
- return I->getOperand(0);
- }
- return 0;
-}
-
-
-/// WillNotOverflowSignedAdd - Return true if we can prove that:
-/// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
-/// This basically requires proving that the add in the original type would not
-/// overflow to change the sign bit or have a carry out.
-bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
- // There are different heuristics we can use for this. Here are some simple
- // ones.
-
- // Add has the property that adding any two 2's complement numbers can only
- // have one carry bit which can change a sign. As such, if LHS and RHS each
- // have at least two sign bits, we know that the addition of the two values
- // will sign extend fine.
- if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
- return true;
-
-
- // If one of the operands only has one non-zero bit, and if the other operand
- // has a known-zero bit in a more significant place than it (not including the
- // sign bit) the ripple may go up to and fill the zero, but won't change the
- // sign. For example, (X & ~4) + 1.
-
- // TODO: Implement.
-
- return false;
-}
-
-Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
- Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
-
- if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
- I.hasNoUnsignedWrap(), TD))
- return ReplaceInstUsesWith(I, V);
-
-
- if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) {
- // X + (signbit) --> X ^ signbit
- const APInt& Val = CI->getValue();
- uint32_t BitWidth = Val.getBitWidth();
- if (Val == APInt::getSignBit(BitWidth))
- return BinaryOperator::CreateXor(LHS, RHS);
-
- // See if SimplifyDemandedBits can simplify this. This handles stuff like
- // (X & 254)+1 -> (X&254)|1
- if (SimplifyDemandedInstructionBits(I))
- return &I;
-
- // zext(bool) + C -> bool ? C + 1 : C
- if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
- if (ZI->getSrcTy() == Type::getInt1Ty(I.getContext()))
- return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
- }
-
- if (isa<PHINode>(LHS))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
-
- ConstantInt *XorRHS = 0;
- Value *XorLHS = 0;
- if (isa<ConstantInt>(RHSC) &&
- match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
- uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
- const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue();
- unsigned ExtendAmt = 0;
- // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
- // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
- if (XorRHS->getValue() == -RHSVal) {
- if (RHSVal.isPowerOf2())
- ExtendAmt = TySizeBits - RHSVal.logBase2() - 1;
- else if (XorRHS->getValue().isPowerOf2())
- ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
- }
-
- if (ExtendAmt) {
- APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
- if (!MaskedValueIsZero(XorLHS, Mask))
- ExtendAmt = 0;
- }
-
- if (ExtendAmt) {
- Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
- Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
- return BinaryOperator::CreateAShr(NewShl, ShAmt);
- }
- }
- }
-
- if (I.getType()->isIntegerTy(1))
- return BinaryOperator::CreateXor(LHS, RHS);
-
- if (I.getType()->isIntegerTy()) {
- // X + X --> X << 1
- if (LHS == RHS)
- return BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
-
- if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) {
- if (RHSI->getOpcode() == Instruction::Sub)
- if (LHS == RHSI->getOperand(1)) // A + (B - A) --> B
- return ReplaceInstUsesWith(I, RHSI->getOperand(0));
- }
- if (Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
- if (LHSI->getOpcode() == Instruction::Sub)
- if (RHS == LHSI->getOperand(1)) // (B - A) + A --> B
- return ReplaceInstUsesWith(I, LHSI->getOperand(0));
- }
- }
-
- // -A + B --> B - A
- // -A + -B --> -(A + B)
- if (Value *LHSV = dyn_castNegVal(LHS)) {
- if (LHS->getType()->isIntOrIntVectorTy()) {
- if (Value *RHSV = dyn_castNegVal(RHS)) {
- Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
- return BinaryOperator::CreateNeg(NewAdd);
- }
- }
-
- return BinaryOperator::CreateSub(RHS, LHSV);
- }
-
- // A + -B --> A - B
- if (!isa<Constant>(RHS))
- if (Value *V = dyn_castNegVal(RHS))
- return BinaryOperator::CreateSub(LHS, V);
-
-
- ConstantInt *C2;
- if (Value *X = dyn_castFoldableMul(LHS, C2)) {
- if (X == RHS) // X*C + X --> X * (C+1)
- return BinaryOperator::CreateMul(RHS, AddOne(C2));
-
- // X*C1 + X*C2 --> X * (C1+C2)
- ConstantInt *C1;
- if (X == dyn_castFoldableMul(RHS, C1))
- return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2));
- }
-
- // X + X*C --> X * (C+1)
- if (dyn_castFoldableMul(RHS, C2) == LHS)
- return BinaryOperator::CreateMul(LHS, AddOne(C2));
-
- // X + ~X --> -1 since ~X = -X-1
- if (match(LHS, m_Not(m_Specific(RHS))) ||
- match(RHS, m_Not(m_Specific(LHS))))
- return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
-
- // A+B --> A|B iff A and B have no bits set in common.
- if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
- APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
- APInt LHSKnownOne(IT->getBitWidth(), 0);
- APInt LHSKnownZero(IT->getBitWidth(), 0);
- ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
- if (LHSKnownZero != 0) {
- APInt RHSKnownOne(IT->getBitWidth(), 0);
- APInt RHSKnownZero(IT->getBitWidth(), 0);
- ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
-
- // No bits in common -> bitwise or.
- if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
- return BinaryOperator::CreateOr(LHS, RHS);
- }
- }
-
- // W*X + Y*Z --> W * (X+Z) iff W == Y
- if (I.getType()->isIntOrIntVectorTy()) {
- Value *W, *X, *Y, *Z;
- if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
- match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
- if (W != Y) {
- if (W == Z) {
- std::swap(Y, Z);
- } else if (Y == X) {
- std::swap(W, X);
- } else if (X == Z) {
- std::swap(Y, Z);
- std::swap(W, X);
- }
- }
-
- if (W == Y) {
- Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName());
- return BinaryOperator::CreateMul(W, NewAdd);
- }
- }
- }
-
- if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
- Value *X = 0;
- if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
- return BinaryOperator::CreateSub(SubOne(CRHS), X);
-
- // (X & FF00) + xx00 -> (X+xx00) & FF00
- if (LHS->hasOneUse() &&
- match(LHS, m_And(m_Value(X), m_ConstantInt(C2)))) {
- Constant *Anded = ConstantExpr::getAnd(CRHS, C2);
- if (Anded == CRHS) {
- // See if all bits from the first bit set in the Add RHS up are included
- // in the mask. First, get the rightmost bit.
- const APInt &AddRHSV = CRHS->getValue();
-
- // Form a mask of all bits from the lowest bit added through the top.
- APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
-
- // See if the and mask includes all of these bits.
- APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
-
- if (AddRHSHighBits == AddRHSHighBitsAnd) {
- // Okay, the xform is safe. Insert the new add pronto.
- Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
- return BinaryOperator::CreateAnd(NewAdd, C2);
- }
- }
- }
-
- // Try to fold constant add into select arguments.
- if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
- }
-
- // add (select X 0 (sub n A)) A --> select X A n
- {
- SelectInst *SI = dyn_cast<SelectInst>(LHS);
- Value *A = RHS;
- if (!SI) {
- SI = dyn_cast<SelectInst>(RHS);
- A = LHS;
- }
- if (SI && SI->hasOneUse()) {
- Value *TV = SI->getTrueValue();
- Value *FV = SI->getFalseValue();
- Value *N;
-
- // Can we fold the add into the argument of the select?
- // We check both true and false select arguments for a matching subtract.
- if (match(FV, m_Zero()) &&
- match(TV, m_Sub(m_Value(N), m_Specific(A))))
- // Fold the add into the true select value.
- return SelectInst::Create(SI->getCondition(), N, A);
- if (match(TV, m_Zero()) &&
- match(FV, m_Sub(m_Value(N), m_Specific(A))))
- // Fold the add into the false select value.
- return SelectInst::Create(SI->getCondition(), A, N);
- }
- }
-
- // Check for (add (sext x), y), see if we can merge this into an
- // integer add followed by a sext.
- if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
- // (add (sext x), cst) --> (sext (add x, cst'))
- if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
- Constant *CI =
- ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
- if (LHSConv->hasOneUse() &&
- ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
- WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
- // Insert the new, smaller add.
- Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
- CI, "addconv");
- return new SExtInst(NewAdd, I.getType());
- }
- }
-
- // (add (sext x), (sext y)) --> (sext (add int x, y))
- if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
- // Only do this if x/y have the same type, if at last one of them has a
- // single use (so we don't increase the number of sexts), and if the
- // integer add will not overflow.
- if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
- (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
- WillNotOverflowSignedAdd(LHSConv->getOperand(0),
- RHSConv->getOperand(0))) {
- // Insert the new integer add.
- Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
- RHSConv->getOperand(0), "addconv");
- return new SExtInst(NewAdd, I.getType());
- }
- }
- }
-
- return Changed ? &I : 0;
-}
-
-Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
- Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
-
- if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
- // X + 0 --> X
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
- if (CFP->isExactlyValue(ConstantFP::getNegativeZero
- (I.getType())->getValueAPF()))
- return ReplaceInstUsesWith(I, LHS);
- }
-
- if (isa<PHINode>(LHS))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
- }
-
- // -A + B --> B - A
- // -A + -B --> -(A + B)
- if (Value *LHSV = dyn_castFNegVal(LHS))
- return BinaryOperator::CreateFSub(RHS, LHSV);
-
- // A + -B --> A - B
- if (!isa<Constant>(RHS))
- if (Value *V = dyn_castFNegVal(RHS))
- return BinaryOperator::CreateFSub(LHS, V);
-
- // Check for X+0.0. Simplify it to X if we know X is not -0.0.
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
- if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS))
- return ReplaceInstUsesWith(I, LHS);
-
- // Check for (fadd double (sitofp x), y), see if we can merge this into an
- // integer add followed by a promotion.
- if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
- // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
- // ... if the constant fits in the integer value. This is useful for things
- // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
- // requires a constant pool load, and generally allows the add to be better
- // instcombined.
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
- Constant *CI =
- ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType());
- if (LHSConv->hasOneUse() &&
- ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
- WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
- // Insert the new integer add.
- Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
- CI, "addconv");
- return new SIToFPInst(NewAdd, I.getType());
- }
- }
-
- // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
- if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
- // Only do this if x/y have the same type, if at last one of them has a
- // single use (so we don't increase the number of int->fp conversions),
- // and if the integer add will not overflow.
- if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
- (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
- WillNotOverflowSignedAdd(LHSConv->getOperand(0),
- RHSConv->getOperand(0))) {
- // Insert the new integer add.
- Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
- RHSConv->getOperand(0),"addconv");
- return new SIToFPInst(NewAdd, I.getType());
- }
- }
- }
-
- return Changed ? &I : 0;
-}
-
-
-/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
-/// code necessary to compute the offset from the base pointer (without adding
-/// in the base pointer). Return the result as a signed integer of intptr size.
-Value *InstCombiner::EmitGEPOffset(User *GEP) {
- TargetData &TD = *getTargetData();
- gep_type_iterator GTI = gep_type_begin(GEP);
- const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
- Value *Result = Constant::getNullValue(IntPtrTy);
-
- // Build a mask for high order bits.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
- uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
-
- for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
- ++i, ++GTI) {
- Value *Op = *i;
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
- if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
- if (OpC->isZero()) continue;
-
- // Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
- Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
-
- Result = Builder->CreateAdd(Result,
- ConstantInt::get(IntPtrTy, Size),
- GEP->getName()+".offs");
- continue;
- }
-
- Constant *Scale = ConstantInt::get(IntPtrTy, Size);
- Constant *OC =
- ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
- Scale = ConstantExpr::getMul(OC, Scale);
- // Emit an add instruction.
- Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
- continue;
- }
- // Convert to correct type.
- if (Op->getType() != IntPtrTy)
- Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
- if (Size != 1) {
- Constant *Scale = ConstantInt::get(IntPtrTy, Size);
- // We'll let instcombine(mul) convert this to a shl if possible.
- Op = Builder->CreateMul(Op, Scale, GEP->getName()+".idx");
- }
-
- // Emit an add instruction.
- Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
- }
- return Result;
-}
-
-
-
-
-/// Optimize pointer differences into the same array into a size. Consider:
-/// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
-/// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
-///
-Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
- const Type *Ty) {
- assert(TD && "Must have target data info for this");
-
- // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
- // this.
- bool Swapped = false;
- GetElementPtrInst *GEP = 0;
- ConstantExpr *CstGEP = 0;
-
- // TODO: Could also optimize &A[i] - &A[j] -> "i-j", and "&A.foo[i] - &A.foo".
- // For now we require one side to be the base pointer "A" or a constant
- // expression derived from it.
- if (GetElementPtrInst *LHSGEP = dyn_cast<GetElementPtrInst>(LHS)) {
- // (gep X, ...) - X
- if (LHSGEP->getOperand(0) == RHS) {
- GEP = LHSGEP;
- Swapped = false;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(RHS)) {
- // (gep X, ...) - (ce_gep X, ...)
- if (CE->getOpcode() == Instruction::GetElementPtr &&
- LHSGEP->getOperand(0) == CE->getOperand(0)) {
- CstGEP = CE;
- GEP = LHSGEP;
- Swapped = false;
- }
- }
- }
-
- if (GetElementPtrInst *RHSGEP = dyn_cast<GetElementPtrInst>(RHS)) {
- // X - (gep X, ...)
- if (RHSGEP->getOperand(0) == LHS) {
- GEP = RHSGEP;
- Swapped = true;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(LHS)) {
- // (ce_gep X, ...) - (gep X, ...)
- if (CE->getOpcode() == Instruction::GetElementPtr &&
- RHSGEP->getOperand(0) == CE->getOperand(0)) {
- CstGEP = CE;
- GEP = RHSGEP;
- Swapped = true;
- }
- }
- }
-
- if (GEP == 0)
- return 0;
-
- // Emit the offset of the GEP and an intptr_t.
- Value *Result = EmitGEPOffset(GEP);
-
- // If we had a constant expression GEP on the other side offsetting the
- // pointer, subtract it from the offset we have.
- if (CstGEP) {
- Value *CstOffset = EmitGEPOffset(CstGEP);
- Result = Builder->CreateSub(Result, CstOffset);
- }
-
-
- // If we have p - gep(p, ...) then we have to negate the result.
- if (Swapped)
- Result = Builder->CreateNeg(Result, "diff.neg");
-
- return Builder->CreateIntCast(Result, Ty, true);
-}
-
-
-Instruction *InstCombiner::visitSub(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (Op0 == Op1) // sub X, X -> 0
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
-
- // If this is a 'B = x-(-A)', change to B = x+A. This preserves NSW/NUW.
- if (Value *V = dyn_castNegVal(Op1)) {
- BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
- Res->setHasNoSignedWrap(I.hasNoSignedWrap());
- Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
- return Res;
- }
-
- if (isa<UndefValue>(Op0))
- return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
- if (isa<UndefValue>(Op1))
- return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
- if (I.getType()->isIntegerTy(1))
- return BinaryOperator::CreateXor(Op0, Op1);
-
- if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
- // Replace (-1 - A) with (~A).
- if (C->isAllOnesValue())
- return BinaryOperator::CreateNot(Op1);
-
- // C - ~X == X + (1+C)
- Value *X = 0;
- if (match(Op1, m_Not(m_Value(X))))
- return BinaryOperator::CreateAdd(X, AddOne(C));
-
- // -(X >>u 31) -> (X >>s 31)
- // -(X >>s 31) -> (X >>u 31)
- if (C->isZero()) {
- if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1)) {
- if (SI->getOpcode() == Instruction::LShr) {
- if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
- // Check to see if we are shifting out everything but the sign bit.
- if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
- SI->getType()->getPrimitiveSizeInBits()-1) {
- // Ok, the transformation is safe. Insert AShr.
- return BinaryOperator::Create(Instruction::AShr,
- SI->getOperand(0), CU, SI->getName());
- }
- }
- } else if (SI->getOpcode() == Instruction::AShr) {
- if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
- // Check to see if we are shifting out everything but the sign bit.
- if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
- SI->getType()->getPrimitiveSizeInBits()-1) {
- // Ok, the transformation is safe. Insert LShr.
- return BinaryOperator::CreateLShr(
- SI->getOperand(0), CU, SI->getName());
- }
- }
- }
- }
- }
-
- // Try to fold constant sub into select arguments.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
-
- // C - zext(bool) -> bool ? C - 1 : C
- if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1))
- if (ZI->getSrcTy() == Type::getInt1Ty(I.getContext()))
- return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
- }
-
- if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
- if (Op1I->getOpcode() == Instruction::Add) {
- if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
- return BinaryOperator::CreateNeg(Op1I->getOperand(1),
- I.getName());
- else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
- return BinaryOperator::CreateNeg(Op1I->getOperand(0),
- I.getName());
- else if (ConstantInt *CI1 = dyn_cast<ConstantInt>(I.getOperand(0))) {
- if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Op1I->getOperand(1)))
- // C1-(X+C2) --> (C1-C2)-X
- return BinaryOperator::CreateSub(
- ConstantExpr::getSub(CI1, CI2), Op1I->getOperand(0));
- }
- }
-
- if (Op1I->hasOneUse()) {
- // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
- // is not used by anyone else...
- //
- if (Op1I->getOpcode() == Instruction::Sub) {
- // Swap the two operands of the subexpr...
- Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1);
- Op1I->setOperand(0, IIOp1);
- Op1I->setOperand(1, IIOp0);
-
- // Create the new top level add instruction...
- return BinaryOperator::CreateAdd(Op0, Op1);
- }
-
- // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
- //
- if (Op1I->getOpcode() == Instruction::And &&
- (Op1I->getOperand(0) == Op0 || Op1I->getOperand(1) == Op0)) {
- Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0);
-
- Value *NewNot = Builder->CreateNot(OtherOp, "B.not");
- return BinaryOperator::CreateAnd(Op0, NewNot);
- }
-
- // 0 - (X sdiv C) -> (X sdiv -C)
- if (Op1I->getOpcode() == Instruction::SDiv)
- if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
- if (CSI->isZero())
- if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1)))
- return BinaryOperator::CreateSDiv(Op1I->getOperand(0),
- ConstantExpr::getNeg(DivRHS));
-
- // 0 - (C << X) -> (-C << X)
- if (Op1I->getOpcode() == Instruction::Shl)
- if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
- if (CSI->isZero())
- if (Value *ShlLHSNeg = dyn_castNegVal(Op1I->getOperand(0)))
- return BinaryOperator::CreateShl(ShlLHSNeg, Op1I->getOperand(1));
-
- // X - X*C --> X * (1-C)
- ConstantInt *C2 = 0;
- if (dyn_castFoldableMul(Op1I, C2) == Op0) {
- Constant *CP1 =
- ConstantExpr::getSub(ConstantInt::get(I.getType(), 1),
- C2);
- return BinaryOperator::CreateMul(Op0, CP1);
- }
- }
- }
-
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
- if (Op0I->getOpcode() == Instruction::Add) {
- if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X
- return ReplaceInstUsesWith(I, Op0I->getOperand(1));
- else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X
- return ReplaceInstUsesWith(I, Op0I->getOperand(0));
- } else if (Op0I->getOpcode() == Instruction::Sub) {
- if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y
- return BinaryOperator::CreateNeg(Op0I->getOperand(1),
- I.getName());
- }
- }
-
- ConstantInt *C1;
- if (Value *X = dyn_castFoldableMul(Op0, C1)) {
- if (X == Op1) // X*C - X --> X * (C-1)
- return BinaryOperator::CreateMul(Op1, SubOne(C1));
-
- ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
- if (X == dyn_castFoldableMul(Op1, C2))
- return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
- }
-
- // Optimize pointer differences into the same array into a size. Consider:
- // &A[10] - &A[0]: we should compile this to "10".
- if (TD) {
- Value *LHSOp, *RHSOp;
- if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
- match(Op1, m_PtrToInt(m_Value(RHSOp))))
- if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
- return ReplaceInstUsesWith(I, Res);
-
- // trunc(p)-trunc(q) -> trunc(p-q)
- if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
- match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
- if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
- return ReplaceInstUsesWith(I, Res);
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- // If this is a 'B = x-(-A)', change to B = x+A...
- if (Value *V = dyn_castFNegVal(Op1))
- return BinaryOperator::CreateFAdd(Op0, V);
-
- return 0;
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
deleted file mode 100644
index 3fb3de7..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ /dev/null
@@ -1,1965 +0,0 @@
-//===- InstCombineAndOrXor.cpp --------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visitAnd, visitOr, and visitXor functions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Support/PatternMatch.h"
-using namespace llvm;
-using namespace PatternMatch;
-
-
-/// AddOne - Add one to a ConstantInt.
-static Constant *AddOne(Constant *C) {
- return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
-}
-/// SubOne - Subtract one from a ConstantInt.
-static Constant *SubOne(ConstantInt *C) {
- return ConstantInt::get(C->getContext(), C->getValue()-1);
-}
-
-/// isFreeToInvert - Return true if the specified value is free to invert (apply
-/// ~ to). This happens in cases where the ~ can be eliminated.
-static inline bool isFreeToInvert(Value *V) {
- // ~(~(X)) -> X.
- if (BinaryOperator::isNot(V))
- return true;
-
- // Constants can be considered to be not'ed values.
- if (isa<ConstantInt>(V))
- return true;
-
- // Compares can be inverted if they have a single use.
- if (CmpInst *CI = dyn_cast<CmpInst>(V))
- return CI->hasOneUse();
-
- return false;
-}
-
-static inline Value *dyn_castNotVal(Value *V) {
- // If this is not(not(x)) don't return that this is a not: we want the two
- // not's to be folded first.
- if (BinaryOperator::isNot(V)) {
- Value *Operand = BinaryOperator::getNotArgument(V);
- if (!isFreeToInvert(Operand))
- return Operand;
- }
-
- // Constants can be considered to be not'ed values...
- if (ConstantInt *C = dyn_cast<ConstantInt>(V))
- return ConstantInt::get(C->getType(), ~C->getValue());
- return 0;
-}
-
-
-/// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
-/// are carefully arranged to allow folding of expressions such as:
-///
-/// (A < B) | (A > B) --> (A != B)
-///
-/// Note that this is only valid if the first and second predicates have the
-/// same sign. Is illegal to do: (A u< B) | (A s> B)
-///
-/// Three bits are used to represent the condition, as follows:
-/// 0 A > B
-/// 1 A == B
-/// 2 A < B
-///
-/// <=> Value Definition
-/// 000 0 Always false
-/// 001 1 A > B
-/// 010 2 A == B
-/// 011 3 A >= B
-/// 100 4 A < B
-/// 101 5 A != B
-/// 110 6 A <= B
-/// 111 7 Always true
-///
-static unsigned getICmpCode(const ICmpInst *ICI) {
- switch (ICI->getPredicate()) {
- // False -> 0
- case ICmpInst::ICMP_UGT: return 1; // 001
- case ICmpInst::ICMP_SGT: return 1; // 001
- case ICmpInst::ICMP_EQ: return 2; // 010
- case ICmpInst::ICMP_UGE: return 3; // 011
- case ICmpInst::ICMP_SGE: return 3; // 011
- case ICmpInst::ICMP_ULT: return 4; // 100
- case ICmpInst::ICMP_SLT: return 4; // 100
- case ICmpInst::ICMP_NE: return 5; // 101
- case ICmpInst::ICMP_ULE: return 6; // 110
- case ICmpInst::ICMP_SLE: return 6; // 110
- // True -> 7
- default:
- llvm_unreachable("Invalid ICmp predicate!");
- return 0;
- }
-}
-
-/// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
-/// predicate into a three bit mask. It also returns whether it is an ordered
-/// predicate by reference.
-static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
- isOrdered = false;
- switch (CC) {
- case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
- case FCmpInst::FCMP_UNO: return 0; // 000
- case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
- case FCmpInst::FCMP_UGT: return 1; // 001
- case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
- case FCmpInst::FCMP_UEQ: return 2; // 010
- case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
- case FCmpInst::FCMP_UGE: return 3; // 011
- case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
- case FCmpInst::FCMP_ULT: return 4; // 100
- case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
- case FCmpInst::FCMP_UNE: return 5; // 101
- case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
- case FCmpInst::FCMP_ULE: return 6; // 110
- // True -> 7
- default:
- // Not expecting FCMP_FALSE and FCMP_TRUE;
- llvm_unreachable("Unexpected FCmp predicate!");
- return 0;
- }
-}
-
-/// getICmpValue - This is the complement of getICmpCode, which turns an
-/// opcode and two operands into either a constant true or false, or a brand
-/// new ICmp instruction. The sign is passed in to determine which kind
-/// of predicate to use in the new icmp instruction.
-static Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
- InstCombiner::BuilderTy *Builder) {
- CmpInst::Predicate Pred;
- switch (Code) {
- default: assert(0 && "Illegal ICmp code!");
- case 0: // False.
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
- case 1: Pred = Sign ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
- case 2: Pred = ICmpInst::ICMP_EQ; break;
- case 3: Pred = Sign ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
- case 4: Pred = Sign ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
- case 5: Pred = ICmpInst::ICMP_NE; break;
- case 6: Pred = Sign ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
- case 7: // True.
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
- }
- return Builder->CreateICmp(Pred, LHS, RHS);
-}
-
-/// getFCmpValue - This is the complement of getFCmpCode, which turns an
-/// opcode and two operands into either a FCmp instruction. isordered is passed
-/// in to determine which kind of predicate to use in the new fcmp instruction.
-static Value *getFCmpValue(bool isordered, unsigned code,
- Value *LHS, Value *RHS,
- InstCombiner::BuilderTy *Builder) {
- CmpInst::Predicate Pred;
- switch (code) {
- default: assert(0 && "Illegal FCmp code!");
- case 0: Pred = isordered ? FCmpInst::FCMP_ORD : FCmpInst::FCMP_UNO; break;
- case 1: Pred = isordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; break;
- case 2: Pred = isordered ? FCmpInst::FCMP_OEQ : FCmpInst::FCMP_UEQ; break;
- case 3: Pred = isordered ? FCmpInst::FCMP_OGE : FCmpInst::FCMP_UGE; break;
- case 4: Pred = isordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; break;
- case 5: Pred = isordered ? FCmpInst::FCMP_ONE : FCmpInst::FCMP_UNE; break;
- case 6: Pred = isordered ? FCmpInst::FCMP_OLE : FCmpInst::FCMP_ULE; break;
- case 7: return ConstantInt::getTrue(LHS->getContext());
- }
- return Builder->CreateFCmp(Pred, LHS, RHS);
-}
-
-/// PredicatesFoldable - Return true if both predicates match sign or if at
-/// least one of them is an equality comparison (which is signless).
-static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
- return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
- (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
- (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
-}
-
-// OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
-// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
-// guaranteed to be a binary operator.
-Instruction *InstCombiner::OptAndOp(Instruction *Op,
- ConstantInt *OpRHS,
- ConstantInt *AndRHS,
- BinaryOperator &TheAnd) {
- Value *X = Op->getOperand(0);
- Constant *Together = 0;
- if (!Op->isShift())
- Together = ConstantExpr::getAnd(AndRHS, OpRHS);
-
- switch (Op->getOpcode()) {
- case Instruction::Xor:
- if (Op->hasOneUse()) {
- // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
- Value *And = Builder->CreateAnd(X, AndRHS);
- And->takeName(Op);
- return BinaryOperator::CreateXor(And, Together);
- }
- break;
- case Instruction::Or:
- if (Together == AndRHS) // (X | C) & C --> C
- return ReplaceInstUsesWith(TheAnd, AndRHS);
-
- if (Op->hasOneUse() && Together != OpRHS) {
- // (X | C1) & C2 --> (X | (C1&C2)) & C2
- Value *Or = Builder->CreateOr(X, Together);
- Or->takeName(Op);
- return BinaryOperator::CreateAnd(Or, AndRHS);
- }
- break;
- case Instruction::Add:
- if (Op->hasOneUse()) {
- // Adding a one to a single bit bit-field should be turned into an XOR
- // of the bit. First thing to check is to see if this AND is with a
- // single bit constant.
- const APInt &AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
-
- // If there is only one bit set.
- if (AndRHSV.isPowerOf2()) {
- // Ok, at this point, we know that we are masking the result of the
- // ADD down to exactly one bit. If the constant we are adding has
- // no bits set below this bit, then we can eliminate the ADD.
- const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
-
- // Check to see if any bits below the one bit set in AndRHSV are set.
- if ((AddRHS & (AndRHSV-1)) == 0) {
- // If not, the only thing that can effect the output of the AND is
- // the bit specified by AndRHSV. If that bit is set, the effect of
- // the XOR is to toggle the bit. If it is clear, then the ADD has
- // no effect.
- if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
- TheAnd.setOperand(0, X);
- return &TheAnd;
- } else {
- // Pull the XOR out of the AND.
- Value *NewAnd = Builder->CreateAnd(X, AndRHS);
- NewAnd->takeName(Op);
- return BinaryOperator::CreateXor(NewAnd, AndRHS);
- }
- }
- }
- }
- break;
-
- case Instruction::Shl: {
- // We know that the AND will not produce any of the bits shifted in, so if
- // the anded constant includes them, clear them now!
- //
- uint32_t BitWidth = AndRHS->getType()->getBitWidth();
- uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
- APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
- ConstantInt *CI = ConstantInt::get(AndRHS->getContext(),
- AndRHS->getValue() & ShlMask);
-
- if (CI->getValue() == ShlMask) {
- // Masking out bits that the shift already masks
- return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
- } else if (CI != AndRHS) { // Reducing bits set in and.
- TheAnd.setOperand(1, CI);
- return &TheAnd;
- }
- break;
- }
- case Instruction::LShr: {
- // We know that the AND will not produce any of the bits shifted in, so if
- // the anded constant includes them, clear them now! This only applies to
- // unsigned shifts, because a signed shr may bring in set bits!
- //
- uint32_t BitWidth = AndRHS->getType()->getBitWidth();
- uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
- APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
- ConstantInt *CI = ConstantInt::get(Op->getContext(),
- AndRHS->getValue() & ShrMask);
-
- if (CI->getValue() == ShrMask) {
- // Masking out bits that the shift already masks.
- return ReplaceInstUsesWith(TheAnd, Op);
- } else if (CI != AndRHS) {
- TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
- return &TheAnd;
- }
- break;
- }
- case Instruction::AShr:
- // Signed shr.
- // See if this is shifting in some sign extension, then masking it out
- // with an and.
- if (Op->hasOneUse()) {
- uint32_t BitWidth = AndRHS->getType()->getBitWidth();
- uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
- APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
- Constant *C = ConstantInt::get(Op->getContext(),
- AndRHS->getValue() & ShrMask);
- if (C == AndRHS) { // Masking out bits shifted in.
- // (Val ashr C1) & C2 -> (Val lshr C1) & C2
- // Make the argument unsigned.
- Value *ShVal = Op->getOperand(0);
- ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
- return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
- }
- }
- break;
- }
- return 0;
-}
-
-
-/// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
-/// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
-/// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
-/// whether to treat the V, Lo and HI as signed or not. IB is the location to
-/// insert new instructions.
-Value *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
- bool isSigned, bool Inside) {
- assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
- ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
- "Lo is not <= Hi in range emission code!");
-
- if (Inside) {
- if (Lo == Hi) // Trivially false.
- return ConstantInt::getFalse(V->getContext());
-
- // V >= Min && V < Hi --> V < Hi
- if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
- ICmpInst::Predicate pred = (isSigned ?
- ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
- return Builder->CreateICmp(pred, V, Hi);
- }
-
- // Emit V-Lo <u Hi-Lo
- Constant *NegLo = ConstantExpr::getNeg(Lo);
- Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
- Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
- return Builder->CreateICmpULT(Add, UpperBound);
- }
-
- if (Lo == Hi) // Trivially true.
- return ConstantInt::getTrue(V->getContext());
-
- // V < Min || V >= Hi -> V > Hi-1
- Hi = SubOne(cast<ConstantInt>(Hi));
- if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
- ICmpInst::Predicate pred = (isSigned ?
- ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
- return Builder->CreateICmp(pred, V, Hi);
- }
-
- // Emit V-Lo >u Hi-1-Lo
- // Note that Hi has already had one subtracted from it, above.
- ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
- Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
- Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
- return Builder->CreateICmpUGT(Add, LowerBound);
-}
-
-// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
-// any number of 0s on either side. The 1s are allowed to wrap from LSB to
-// MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
-// not, since all 1s are not contiguous.
-static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
- const APInt& V = Val->getValue();
- uint32_t BitWidth = Val->getType()->getBitWidth();
- if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
-
- // look for the first zero bit after the run of ones
- MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
- // look for the first non-zero bit
- ME = V.getActiveBits();
- return true;
-}
-
-/// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
-/// where isSub determines whether the operator is a sub. If we can fold one of
-/// the following xforms:
-///
-/// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
-/// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
-/// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
-///
-/// return (A +/- B).
-///
-Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
- ConstantInt *Mask, bool isSub,
- Instruction &I) {
- Instruction *LHSI = dyn_cast<Instruction>(LHS);
- if (!LHSI || LHSI->getNumOperands() != 2 ||
- !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
-
- ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
-
- switch (LHSI->getOpcode()) {
- default: return 0;
- case Instruction::And:
- if (ConstantExpr::getAnd(N, Mask) == Mask) {
- // If the AndRHS is a power of two minus one (0+1+), this is simple.
- if ((Mask->getValue().countLeadingZeros() +
- Mask->getValue().countPopulation()) ==
- Mask->getValue().getBitWidth())
- break;
-
- // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
- // part, we don't need any explicit masks to take them out of A. If that
- // is all N is, ignore it.
- uint32_t MB = 0, ME = 0;
- if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
- uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
- APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
- if (MaskedValueIsZero(RHS, Mask))
- break;
- }
- }
- return 0;
- case Instruction::Or:
- case Instruction::Xor:
- // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
- if ((Mask->getValue().countLeadingZeros() +
- Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
- && ConstantExpr::getAnd(N, Mask)->isNullValue())
- break;
- return 0;
- }
-
- if (isSub)
- return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
- return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
-}
-
-/// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
-Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
- ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
-
- // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
- if (PredicatesFoldable(LHSCC, RHSCC)) {
- if (LHS->getOperand(0) == RHS->getOperand(1) &&
- LHS->getOperand(1) == RHS->getOperand(0))
- LHS->swapOperands();
- if (LHS->getOperand(0) == RHS->getOperand(0) &&
- LHS->getOperand(1) == RHS->getOperand(1)) {
- Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
- unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
- bool isSigned = LHS->isSigned() || RHS->isSigned();
- return getICmpValue(isSigned, Code, Op0, Op1, Builder);
- }
- }
-
- // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
- Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
- ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
- ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
- if (LHSCst == 0 || RHSCst == 0) return 0;
-
- if (LHSCst == RHSCst && LHSCC == RHSCC) {
- // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
- // where C is a power of 2
- if (LHSCC == ICmpInst::ICMP_ULT &&
- LHSCst->getValue().isPowerOf2()) {
- Value *NewOr = Builder->CreateOr(Val, Val2);
- return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
- }
-
- // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
- if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) {
- Value *NewOr = Builder->CreateOr(Val, Val2);
- return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
- }
- }
-
- // From here on, we only handle:
- // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
- if (Val != Val2) return 0;
-
- // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
- if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
- RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
- LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
- RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
- return 0;
-
- // We can't fold (ugt x, C) & (sgt x, C2).
- if (!PredicatesFoldable(LHSCC, RHSCC))
- return 0;
-
- // Ensure that the larger constant is on the RHS.
- bool ShouldSwap;
- if (CmpInst::isSigned(LHSCC) ||
- (ICmpInst::isEquality(LHSCC) &&
- CmpInst::isSigned(RHSCC)))
- ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
- else
- ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
-
- if (ShouldSwap) {
- std::swap(LHS, RHS);
- std::swap(LHSCst, RHSCst);
- std::swap(LHSCC, RHSCC);
- }
-
- // At this point, we know we have two icmp instructions
- // comparing a value against two constants and and'ing the result
- // together. Because of the above check, we know that we only have
- // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
- // (from the icmp folding check above), that the two constants
- // are not equal and that the larger constant is on the RHS
- assert(LHSCst != RHSCst && "Compares not folded above?");
-
- switch (LHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
- case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
- case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
- case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
- case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
- case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
- return LHS;
- }
- case ICmpInst::ICMP_NE:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_ULT:
- if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
- return Builder->CreateICmpULT(Val, LHSCst);
- break; // (X != 13 & X u< 15) -> no change
- case ICmpInst::ICMP_SLT:
- if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
- return Builder->CreateICmpSLT(Val, LHSCst);
- break; // (X != 13 & X s< 15) -> no change
- case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
- case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
- case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
- return RHS;
- case ICmpInst::ICMP_NE:
- if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
- Constant *AddCST = ConstantExpr::getNeg(LHSCst);
- Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
- return Builder->CreateICmpUGT(Add, ConstantInt::get(Add->getType(), 1));
- }
- break; // (X != 13 & X != 15) -> no change
- }
- break;
- case ICmpInst::ICMP_ULT:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
- case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
- case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
- break;
- case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
- case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
- return LHS;
- case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
- break;
- }
- break;
- case ICmpInst::ICMP_SLT:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
- case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
- case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
- break;
- case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
- case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
- return LHS;
- case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
- break;
- }
- break;
- case ICmpInst::ICMP_UGT:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
- case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
- return RHS;
- case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
- break;
- case ICmpInst::ICMP_NE:
- if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
- return Builder->CreateICmp(LHSCC, Val, RHSCst);
- break; // (X u> 13 & X != 15) -> no change
- case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
- return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, false, true);
- case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
- break;
- }
- break;
- case ICmpInst::ICMP_SGT:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
- case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
- return RHS;
- case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
- break;
- case ICmpInst::ICMP_NE:
- if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
- return Builder->CreateICmp(LHSCC, Val, RHSCst);
- break; // (X s> 13 & X != 15) -> no change
- case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
- return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, true, true);
- case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
- break;
- }
- break;
- }
-
- return 0;
-}
-
-/// FoldAndOfFCmps - Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of
-/// instcombine, this returns a Value which should already be inserted into the
-/// function.
-Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
- if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
- RHS->getPredicate() == FCmpInst::FCMP_ORD) {
- // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
- if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
- if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
- // If either of the constants are nans, then the whole thing returns
- // false.
- if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
- return ConstantInt::getFalse(LHS->getContext());
- return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
- }
-
- // Handle vector zeros. This occurs because the canonical form of
- // "fcmp ord x,x" is "fcmp ord x, 0".
- if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
- isa<ConstantAggregateZero>(RHS->getOperand(1)))
- return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
- return 0;
- }
-
- Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
- Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
- FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
-
-
- if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
- // Swap RHS operands to match LHS.
- Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
- std::swap(Op1LHS, Op1RHS);
- }
-
- if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
- // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
- if (Op0CC == Op1CC)
- return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
- if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
- if (Op0CC == FCmpInst::FCMP_TRUE)
- return RHS;
- if (Op1CC == FCmpInst::FCMP_TRUE)
- return LHS;
-
- bool Op0Ordered;
- bool Op1Ordered;
- unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
- unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
- if (Op1Pred == 0) {
- std::swap(LHS, RHS);
- std::swap(Op0Pred, Op1Pred);
- std::swap(Op0Ordered, Op1Ordered);
- }
- if (Op0Pred == 0) {
- // uno && ueq -> uno && (uno || eq) -> ueq
- // ord && olt -> ord && (ord && lt) -> olt
- if (Op0Ordered == Op1Ordered)
- return RHS;
-
- // uno && oeq -> uno && (ord && eq) -> false
- // uno && ord -> false
- if (!Op0Ordered)
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
- // ord && ueq -> ord && (uno || eq) -> oeq
- return getFCmpValue(true, Op1Pred, Op0LHS, Op0RHS, Builder);
- }
- }
-
- return 0;
-}
-
-
-Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (Value *V = SimplifyAndInst(Op0, Op1, TD))
- return ReplaceInstUsesWith(I, V);
-
- // See if we can simplify any instructions used by the instruction whose sole
- // purpose is to compute bits we don't care about.
- if (SimplifyDemandedInstructionBits(I))
- return &I;
-
- if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
- const APInt &AndRHSMask = AndRHS->getValue();
- APInt NotAndRHS(~AndRHSMask);
-
- // Optimize a variety of ((val OP C1) & C2) combinations...
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
- Value *Op0LHS = Op0I->getOperand(0);
- Value *Op0RHS = Op0I->getOperand(1);
- switch (Op0I->getOpcode()) {
- default: break;
- case Instruction::Xor:
- case Instruction::Or:
- // If the mask is only needed on one incoming arm, push it up.
- if (!Op0I->hasOneUse()) break;
-
- if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
- // Not masking anything out for the LHS, move to RHS.
- Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
- Op0RHS->getName()+".masked");
- return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
- }
- if (!isa<Constant>(Op0RHS) &&
- MaskedValueIsZero(Op0RHS, NotAndRHS)) {
- // Not masking anything out for the RHS, move to LHS.
- Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
- Op0LHS->getName()+".masked");
- return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
- }
-
- break;
- case Instruction::Add:
- // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
- // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
- // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
- if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
- return BinaryOperator::CreateAnd(V, AndRHS);
- if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
- return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
- break;
-
- case Instruction::Sub:
- // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
- // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
- // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
- if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
- return BinaryOperator::CreateAnd(V, AndRHS);
-
- // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
- // has 1's for all bits that the subtraction with A might affect.
- if (Op0I->hasOneUse()) {
- uint32_t BitWidth = AndRHSMask.getBitWidth();
- uint32_t Zeros = AndRHSMask.countLeadingZeros();
- APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
-
- ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
- if (!(A && A->isZero()) && // avoid infinite recursion.
- MaskedValueIsZero(Op0LHS, Mask)) {
- Value *NewNeg = Builder->CreateNeg(Op0RHS);
- return BinaryOperator::CreateAnd(NewNeg, AndRHS);
- }
- }
- break;
-
- case Instruction::Shl:
- case Instruction::LShr:
- // (1 << x) & 1 --> zext(x == 0)
- // (1 >> x) & 1 --> zext(x == 0)
- if (AndRHSMask == 1 && Op0LHS == AndRHS) {
- Value *NewICmp =
- Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
- return new ZExtInst(NewICmp, I.getType());
- }
- break;
- }
-
- if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
- if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
- return Res;
- } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
- // If this is an integer truncation or change from signed-to-unsigned, and
- // if the source is an and/or with immediate, transform it. This
- // frequently occurs for bitfield accesses.
- if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
- if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
- CastOp->getNumOperands() == 2)
- if (ConstantInt *AndCI =dyn_cast<ConstantInt>(CastOp->getOperand(1))){
- if (CastOp->getOpcode() == Instruction::And) {
- // Change: and (cast (and X, C1) to T), C2
- // into : and (cast X to T), trunc_or_bitcast(C1)&C2
- // This will fold the two constants together, which may allow
- // other simplifications.
- Value *NewCast = Builder->CreateTruncOrBitCast(
- CastOp->getOperand(0), I.getType(),
- CastOp->getName()+".shrunk");
- // trunc_or_bitcast(C1)&C2
- Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
- C3 = ConstantExpr::getAnd(C3, AndRHS);
- return BinaryOperator::CreateAnd(NewCast, C3);
- } else if (CastOp->getOpcode() == Instruction::Or) {
- // Change: and (cast (or X, C1) to T), C2
- // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
- Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
- if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS)
- // trunc(C1)&C2
- return ReplaceInstUsesWith(I, AndRHS);
- }
- }
- }
- }
-
- // Try to fold constant and into select arguments.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
- if (isa<PHINode>(Op0))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
- }
-
-
- // (~A & ~B) == (~(A | B)) - De Morgan's Law
- if (Value *Op0NotVal = dyn_castNotVal(Op0))
- if (Value *Op1NotVal = dyn_castNotVal(Op1))
- if (Op0->hasOneUse() && Op1->hasOneUse()) {
- Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
- I.getName()+".demorgan");
- return BinaryOperator::CreateNot(Or);
- }
-
- {
- Value *A = 0, *B = 0, *C = 0, *D = 0;
- // (A|B) & ~(A&B) -> A^B
- if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
- match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
- ((A == C && B == D) || (A == D && B == C)))
- return BinaryOperator::CreateXor(A, B);
-
- // ~(A&B) & (A|B) -> A^B
- if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
- match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
- ((A == C && B == D) || (A == D && B == C)))
- return BinaryOperator::CreateXor(A, B);
-
- if (Op0->hasOneUse() &&
- match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
- if (A == Op1) { // (A^B)&A -> A&(A^B)
- I.swapOperands(); // Simplify below
- std::swap(Op0, Op1);
- } else if (B == Op1) { // (A^B)&B -> B&(B^A)
- cast<BinaryOperator>(Op0)->swapOperands();
- I.swapOperands(); // Simplify below
- std::swap(Op0, Op1);
- }
- }
-
- if (Op1->hasOneUse() &&
- match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
- if (B == Op0) { // B&(A^B) -> B&(B^A)
- cast<BinaryOperator>(Op1)->swapOperands();
- std::swap(A, B);
- }
- if (A == Op0) // A&(A^B) -> A & ~B
- return BinaryOperator::CreateAnd(A, Builder->CreateNot(B, "tmp"));
- }
-
- // (A&((~A)|B)) -> A&B
- if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
- match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
- return BinaryOperator::CreateAnd(A, Op1);
- if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
- match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
- return BinaryOperator::CreateAnd(A, Op0);
- }
-
- if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1))
- if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
- if (Value *Res = FoldAndOfICmps(LHS, RHS))
- return ReplaceInstUsesWith(I, Res);
-
- // If and'ing two fcmp, try combine them into one.
- if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
- if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
- if (Value *Res = FoldAndOfFCmps(LHS, RHS))
- return ReplaceInstUsesWith(I, Res);
-
-
- // fold (and (cast A), (cast B)) -> (cast (and A, B))
- if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
- if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
- const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
- SrcTy == Op1C->getOperand(0)->getType() &&
- SrcTy->isIntOrIntVectorTy()) {
- Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
-
- // Only do this if the casts both really cause code to be generated.
- if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
- ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
- Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName());
- return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
- }
-
- // If this is and(cast(icmp), cast(icmp)), try to fold this even if the
- // cast is otherwise not optimizable. This happens for vector sexts.
- if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
- if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
- if (Value *Res = FoldAndOfICmps(LHS, RHS))
- return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
-
- // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the
- // cast is otherwise not optimizable. This happens for vector sexts.
- if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
- if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
- if (Value *Res = FoldAndOfFCmps(LHS, RHS))
- return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
- }
- }
-
- // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
- if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
- if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
- if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
- SI0->getOperand(1) == SI1->getOperand(1) &&
- (SI0->hasOneUse() || SI1->hasOneUse())) {
- Value *NewOp =
- Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0),
- SI0->getName());
- return BinaryOperator::Create(SI1->getOpcode(), NewOp,
- SI1->getOperand(1));
- }
- }
-
- return Changed ? &I : 0;
-}
-
-/// CollectBSwapParts - Analyze the specified subexpression and see if it is
-/// capable of providing pieces of a bswap. The subexpression provides pieces
-/// of a bswap if it is proven that each of the non-zero bytes in the output of
-/// the expression came from the corresponding "byte swapped" byte in some other
-/// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
-/// we know that the expression deposits the low byte of %X into the high byte
-/// of the bswap result and that all other bytes are zero. This expression is
-/// accepted, the high byte of ByteValues is set to X to indicate a correct
-/// match.
-///
-/// This function returns true if the match was unsuccessful and false if so.
-/// On entry to the function the "OverallLeftShift" is a signed integer value
-/// indicating the number of bytes that the subexpression is later shifted. For
-/// example, if the expression is later right shifted by 16 bits, the
-/// OverallLeftShift value would be -2 on entry. This is used to specify which
-/// byte of ByteValues is actually being set.
-///
-/// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
-/// byte is masked to zero by a user. For example, in (X & 255), X will be
-/// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
-/// this function to working on up to 32-byte (256 bit) values. ByteMask is
-/// always in the local (OverallLeftShift) coordinate space.
-///
-static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
- SmallVector<Value*, 8> &ByteValues) {
- if (Instruction *I = dyn_cast<Instruction>(V)) {
- // If this is an or instruction, it may be an inner node of the bswap.
- if (I->getOpcode() == Instruction::Or) {
- return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
- ByteValues) ||
- CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
- ByteValues);
- }
-
- // If this is a logical shift by a constant multiple of 8, recurse with
- // OverallLeftShift and ByteMask adjusted.
- if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
- unsigned ShAmt =
- cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
- // Ensure the shift amount is defined and of a byte value.
- if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
- return true;
-
- unsigned ByteShift = ShAmt >> 3;
- if (I->getOpcode() == Instruction::Shl) {
- // X << 2 -> collect(X, +2)
- OverallLeftShift += ByteShift;
- ByteMask >>= ByteShift;
- } else {
- // X >>u 2 -> collect(X, -2)
- OverallLeftShift -= ByteShift;
- ByteMask <<= ByteShift;
- ByteMask &= (~0U >> (32-ByteValues.size()));
- }
-
- if (OverallLeftShift >= (int)ByteValues.size()) return true;
- if (OverallLeftShift <= -(int)ByteValues.size()) return true;
-
- return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
- ByteValues);
- }
-
- // If this is a logical 'and' with a mask that clears bytes, clear the
- // corresponding bytes in ByteMask.
- if (I->getOpcode() == Instruction::And &&
- isa<ConstantInt>(I->getOperand(1))) {
- // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
- unsigned NumBytes = ByteValues.size();
- APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
- const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
-
- for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
- // If this byte is masked out by a later operation, we don't care what
- // the and mask is.
- if ((ByteMask & (1 << i)) == 0)
- continue;
-
- // If the AndMask is all zeros for this byte, clear the bit.
- APInt MaskB = AndMask & Byte;
- if (MaskB == 0) {
- ByteMask &= ~(1U << i);
- continue;
- }
-
- // If the AndMask is not all ones for this byte, it's not a bytezap.
- if (MaskB != Byte)
- return true;
-
- // Otherwise, this byte is kept.
- }
-
- return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
- ByteValues);
- }
- }
-
- // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
- // the input value to the bswap. Some observations: 1) if more than one byte
- // is demanded from this input, then it could not be successfully assembled
- // into a byteswap. At least one of the two bytes would not be aligned with
- // their ultimate destination.
- if (!isPowerOf2_32(ByteMask)) return true;
- unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
-
- // 2) The input and ultimate destinations must line up: if byte 3 of an i32
- // is demanded, it needs to go into byte 0 of the result. This means that the
- // byte needs to be shifted until it lands in the right byte bucket. The
- // shift amount depends on the position: if the byte is coming from the high
- // part of the value (e.g. byte 3) then it must be shifted right. If from the
- // low part, it must be shifted left.
- unsigned DestByteNo = InputByteNo + OverallLeftShift;
- if (InputByteNo < ByteValues.size()/2) {
- if (ByteValues.size()-1-DestByteNo != InputByteNo)
- return true;
- } else {
- if (ByteValues.size()-1-DestByteNo != InputByteNo)
- return true;
- }
-
- // If the destination byte value is already defined, the values are or'd
- // together, which isn't a bswap (unless it's an or of the same bits).
- if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
- return true;
- ByteValues[DestByteNo] = V;
- return false;
-}
-
-/// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
-/// If so, insert the new bswap intrinsic and return it.
-Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
- const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
- if (!ITy || ITy->getBitWidth() % 16 ||
- // ByteMask only allows up to 32-byte values.
- ITy->getBitWidth() > 32*8)
- return 0; // Can only bswap pairs of bytes. Can't do vectors.
-
- /// ByteValues - For each byte of the result, we keep track of which value
- /// defines each byte.
- SmallVector<Value*, 8> ByteValues;
- ByteValues.resize(ITy->getBitWidth()/8);
-
- // Try to find all the pieces corresponding to the bswap.
- uint32_t ByteMask = ~0U >> (32-ByteValues.size());
- if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
- return 0;
-
- // Check to see if all of the bytes come from the same value.
- Value *V = ByteValues[0];
- if (V == 0) return 0; // Didn't find a byte? Must be zero.
-
- // Check to make sure that all of the bytes come from the same value.
- for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
- if (ByteValues[i] != V)
- return 0;
- const Type *Tys[] = { ITy };
- Module *M = I.getParent()->getParent()->getParent();
- Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
- return CallInst::Create(F, V);
-}
-
-/// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
-/// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
-/// we can simplify this expression to "cond ? C : D or B".
-static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
- Value *C, Value *D) {
- // If A is not a select of -1/0, this cannot match.
- Value *Cond = 0;
- if (!match(A, m_SExt(m_Value(Cond))) ||
- !Cond->getType()->isIntegerTy(1))
- return 0;
-
- // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
- if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
- return SelectInst::Create(Cond, C, B);
- if (match(D, m_SExt(m_Not(m_Specific(Cond)))))
- return SelectInst::Create(Cond, C, B);
-
- // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
- if (match(B, m_Not(m_SExt(m_Specific(Cond)))))
- return SelectInst::Create(Cond, C, D);
- if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
- return SelectInst::Create(Cond, C, D);
- return 0;
-}
-
-/// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
-Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
- ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
-
- // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
- if (PredicatesFoldable(LHSCC, RHSCC)) {
- if (LHS->getOperand(0) == RHS->getOperand(1) &&
- LHS->getOperand(1) == RHS->getOperand(0))
- LHS->swapOperands();
- if (LHS->getOperand(0) == RHS->getOperand(0) &&
- LHS->getOperand(1) == RHS->getOperand(1)) {
- Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
- unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
- bool isSigned = LHS->isSigned() || RHS->isSigned();
- return getICmpValue(isSigned, Code, Op0, Op1, Builder);
- }
- }
-
- // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
- Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
- ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
- ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
- if (LHSCst == 0 || RHSCst == 0) return 0;
-
- // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
- if (LHSCst == RHSCst && LHSCC == RHSCC &&
- LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
- Value *NewOr = Builder->CreateOr(Val, Val2);
- return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
- }
-
- // From here on, we only handle:
- // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
- if (Val != Val2) return 0;
-
- // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
- if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
- RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
- LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
- RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
- return 0;
-
- // We can't fold (ugt x, C) | (sgt x, C2).
- if (!PredicatesFoldable(LHSCC, RHSCC))
- return 0;
-
- // Ensure that the larger constant is on the RHS.
- bool ShouldSwap;
- if (CmpInst::isSigned(LHSCC) ||
- (ICmpInst::isEquality(LHSCC) &&
- CmpInst::isSigned(RHSCC)))
- ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
- else
- ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
-
- if (ShouldSwap) {
- std::swap(LHS, RHS);
- std::swap(LHSCst, RHSCst);
- std::swap(LHSCC, RHSCC);
- }
-
- // At this point, we know we have two icmp instructions
- // comparing a value against two constants and or'ing the result
- // together. Because of the above check, we know that we only have
- // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
- // icmp folding check above), that the two constants are not
- // equal.
- assert(LHSCst != RHSCst && "Compares not folded above?");
-
- switch (LHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ:
- if (LHSCst == SubOne(RHSCst)) {
- // (X == 13 | X == 14) -> X-13 <u 2
- Constant *AddCST = ConstantExpr::getNeg(LHSCst);
- Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
- AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
- return Builder->CreateICmpULT(Add, AddCST);
- }
- break; // (X == 13 | X == 15) -> no change
- case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
- case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
- break;
- case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
- case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
- case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
- return RHS;
- }
- break;
- case ICmpInst::ICMP_NE:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
- case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
- case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
- return LHS;
- case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
- case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
- case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
- return ConstantInt::getTrue(LHS->getContext());
- }
- break;
- case ICmpInst::ICMP_ULT:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
- break;
- case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
- // If RHSCst is [us]MAXINT, it is always false. Not handling
- // this can cause overflow.
- if (RHSCst->isMaxValue(false))
- return LHS;
- return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), false, false);
- case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
- break;
- case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
- case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
- return RHS;
- case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
- break;
- }
- break;
- case ICmpInst::ICMP_SLT:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
- break;
- case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
- // If RHSCst is [us]MAXINT, it is always false. Not handling
- // this can cause overflow.
- if (RHSCst->isMaxValue(true))
- return LHS;
- return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), true, false);
- case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
- break;
- case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
- case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
- return RHS;
- case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
- break;
- }
- break;
- case ICmpInst::ICMP_UGT:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
- case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
- return LHS;
- case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
- break;
- case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
- case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
- return ConstantInt::getTrue(LHS->getContext());
- case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
- break;
- }
- break;
- case ICmpInst::ICMP_SGT:
- switch (RHSCC) {
- default: llvm_unreachable("Unknown integer condition code!");
- case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
- case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
- return LHS;
- case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
- break;
- case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
- case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
- return ConstantInt::getTrue(LHS->getContext());
- case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
- break;
- }
- break;
- }
- return 0;
-}
-
-/// FoldOrOfFCmps - Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of
-/// instcombine, this returns a Value which should already be inserted into the
-/// function.
-Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
- if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
- RHS->getPredicate() == FCmpInst::FCMP_UNO &&
- LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
- if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
- if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
- // If either of the constants are nans, then the whole thing returns
- // true.
- if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
- return ConstantInt::getTrue(LHS->getContext());
-
- // Otherwise, no need to compare the two constants, compare the
- // rest.
- return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
- }
-
- // Handle vector zeros. This occurs because the canonical form of
- // "fcmp uno x,x" is "fcmp uno x, 0".
- if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
- isa<ConstantAggregateZero>(RHS->getOperand(1)))
- return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
-
- return 0;
- }
-
- Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
- Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
- FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
-
- if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
- // Swap RHS operands to match LHS.
- Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
- std::swap(Op1LHS, Op1RHS);
- }
- if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
- // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
- if (Op0CC == Op1CC)
- return Builder->CreateFCmp((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
- if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
- return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
- if (Op0CC == FCmpInst::FCMP_FALSE)
- return RHS;
- if (Op1CC == FCmpInst::FCMP_FALSE)
- return LHS;
- bool Op0Ordered;
- bool Op1Ordered;
- unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
- unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
- if (Op0Ordered == Op1Ordered) {
- // If both are ordered or unordered, return a new fcmp with
- // or'ed predicates.
- return getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS, Builder);
- }
- }
- return 0;
-}
-
-/// FoldOrWithConstants - This helper function folds:
-///
-/// ((A | B) & C1) | (B & C2)
-///
-/// into:
-///
-/// (A & C1) | B
-///
-/// when the XOR of the two constants is "all ones" (-1).
-Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
- Value *A, Value *B, Value *C) {
- ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
- if (!CI1) return 0;
-
- Value *V1 = 0;
- ConstantInt *CI2 = 0;
- if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
-
- APInt Xor = CI1->getValue() ^ CI2->getValue();
- if (!Xor.isAllOnesValue()) return 0;
-
- if (V1 == A || V1 == B) {
- Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
- return BinaryOperator::CreateOr(NewOp, V1);
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitOr(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (Value *V = SimplifyOrInst(Op0, Op1, TD))
- return ReplaceInstUsesWith(I, V);
-
- // See if we can simplify any instructions used by the instruction whose sole
- // purpose is to compute bits we don't care about.
- if (SimplifyDemandedInstructionBits(I))
- return &I;
-
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- ConstantInt *C1 = 0; Value *X = 0;
- // (X & C1) | C2 --> (X | C2) & (C1|C2)
- // iff (C1 & C2) == 0.
- if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
- (RHS->getValue() & C1->getValue()) != 0 &&
- Op0->hasOneUse()) {
- Value *Or = Builder->CreateOr(X, RHS);
- Or->takeName(Op0);
- return BinaryOperator::CreateAnd(Or,
- ConstantInt::get(I.getContext(),
- RHS->getValue() | C1->getValue()));
- }
-
- // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
- if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
- Op0->hasOneUse()) {
- Value *Or = Builder->CreateOr(X, RHS);
- Or->takeName(Op0);
- return BinaryOperator::CreateXor(Or,
- ConstantInt::get(I.getContext(),
- C1->getValue() & ~RHS->getValue()));
- }
-
- // Try to fold constant and into select arguments.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
-
- if (isa<PHINode>(Op0))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
- }
-
- Value *A = 0, *B = 0;
- ConstantInt *C1 = 0, *C2 = 0;
-
- // (A | B) | C and A | (B | C) -> bswap if possible.
- // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
- if (match(Op0, m_Or(m_Value(), m_Value())) ||
- match(Op1, m_Or(m_Value(), m_Value())) ||
- (match(Op0, m_Shift(m_Value(), m_Value())) &&
- match(Op1, m_Shift(m_Value(), m_Value())))) {
- if (Instruction *BSwap = MatchBSwap(I))
- return BSwap;
- }
-
- // (X^C)|Y -> (X|Y)^C iff Y&C == 0
- if (Op0->hasOneUse() &&
- match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
- MaskedValueIsZero(Op1, C1->getValue())) {
- Value *NOr = Builder->CreateOr(A, Op1);
- NOr->takeName(Op0);
- return BinaryOperator::CreateXor(NOr, C1);
- }
-
- // Y|(X^C) -> (X|Y)^C iff Y&C == 0
- if (Op1->hasOneUse() &&
- match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
- MaskedValueIsZero(Op0, C1->getValue())) {
- Value *NOr = Builder->CreateOr(A, Op0);
- NOr->takeName(Op0);
- return BinaryOperator::CreateXor(NOr, C1);
- }
-
- // (A & C)|(B & D)
- Value *C = 0, *D = 0;
- if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
- match(Op1, m_And(m_Value(B), m_Value(D)))) {
- Value *V1 = 0, *V2 = 0, *V3 = 0;
- C1 = dyn_cast<ConstantInt>(C);
- C2 = dyn_cast<ConstantInt>(D);
- if (C1 && C2) { // (A & C1)|(B & C2)
- // If we have: ((V + N) & C1) | (V & C2)
- // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
- // replace with V+N.
- if (C1->getValue() == ~C2->getValue()) {
- if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
- match(A, m_Add(m_Value(V1), m_Value(V2)))) {
- // Add commutes, try both ways.
- if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
- return ReplaceInstUsesWith(I, A);
- if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
- return ReplaceInstUsesWith(I, A);
- }
- // Or commutes, try both ways.
- if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
- match(B, m_Add(m_Value(V1), m_Value(V2)))) {
- // Add commutes, try both ways.
- if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
- return ReplaceInstUsesWith(I, B);
- if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
- return ReplaceInstUsesWith(I, B);
- }
- }
-
- if ((C1->getValue() & C2->getValue()) == 0) {
- // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
- // iff (C1&C2) == 0 and (N&~C1) == 0
- if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
- ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N)
- (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V)
- return BinaryOperator::CreateAnd(A,
- ConstantInt::get(A->getContext(),
- C1->getValue()|C2->getValue()));
- // Or commutes, try both ways.
- if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
- ((V1 == A && MaskedValueIsZero(V2, ~C2->getValue())) || // (V|N)
- (V2 == A && MaskedValueIsZero(V1, ~C2->getValue())))) // (N|V)
- return BinaryOperator::CreateAnd(B,
- ConstantInt::get(B->getContext(),
- C1->getValue()|C2->getValue()));
-
- // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
- // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
- ConstantInt *C3 = 0, *C4 = 0;
- if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
- (C3->getValue() & ~C1->getValue()) == 0 &&
- match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
- (C4->getValue() & ~C2->getValue()) == 0) {
- V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
- return BinaryOperator::CreateAnd(V2,
- ConstantInt::get(B->getContext(),
- C1->getValue()|C2->getValue()));
- }
- }
- }
-
- // Check to see if we have any common things being and'ed. If so, find the
- // terms for V1 & (V2|V3).
- if (Op0->hasOneUse() || Op1->hasOneUse()) {
- V1 = 0;
- if (A == B) // (A & C)|(A & D) == A & (C|D)
- V1 = A, V2 = C, V3 = D;
- else if (A == D) // (A & C)|(B & A) == A & (B|C)
- V1 = A, V2 = B, V3 = C;
- else if (C == B) // (A & C)|(C & D) == C & (A|D)
- V1 = C, V2 = A, V3 = D;
- else if (C == D) // (A & C)|(B & C) == C & (A|B)
- V1 = C, V2 = A, V3 = B;
-
- if (V1) {
- Value *Or = Builder->CreateOr(V2, V3, "tmp");
- return BinaryOperator::CreateAnd(V1, Or);
- }
- }
-
- // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants.
- // Don't do this for vector select idioms, the code generator doesn't handle
- // them well yet.
- if (!I.getType()->isVectorTy()) {
- if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
- return Match;
- if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
- return Match;
- if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D))
- return Match;
- if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C))
- return Match;
- }
-
- // ((A&~B)|(~A&B)) -> A^B
- if ((match(C, m_Not(m_Specific(D))) &&
- match(B, m_Not(m_Specific(A)))))
- return BinaryOperator::CreateXor(A, D);
- // ((~B&A)|(~A&B)) -> A^B
- if ((match(A, m_Not(m_Specific(D))) &&
- match(B, m_Not(m_Specific(C)))))
- return BinaryOperator::CreateXor(C, D);
- // ((A&~B)|(B&~A)) -> A^B
- if ((match(C, m_Not(m_Specific(B))) &&
- match(D, m_Not(m_Specific(A)))))
- return BinaryOperator::CreateXor(A, B);
- // ((~B&A)|(B&~A)) -> A^B
- if ((match(A, m_Not(m_Specific(B))) &&
- match(D, m_Not(m_Specific(C)))))
- return BinaryOperator::CreateXor(C, B);
- }
-
- // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
- if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
- if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
- if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
- SI0->getOperand(1) == SI1->getOperand(1) &&
- (SI0->hasOneUse() || SI1->hasOneUse())) {
- Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0),
- SI0->getName());
- return BinaryOperator::Create(SI1->getOpcode(), NewOp,
- SI1->getOperand(1));
- }
- }
-
- // ((A|B)&1)|(B&-2) -> (A&1) | B
- if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
- match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
- Instruction *Ret = FoldOrWithConstants(I, Op1, A, B, C);
- if (Ret) return Ret;
- }
- // (B&-2)|((A|B)&1) -> (A&1) | B
- if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
- match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
- Instruction *Ret = FoldOrWithConstants(I, Op0, A, B, C);
- if (Ret) return Ret;
- }
-
- // (~A | ~B) == (~(A & B)) - De Morgan's Law
- if (Value *Op0NotVal = dyn_castNotVal(Op0))
- if (Value *Op1NotVal = dyn_castNotVal(Op1))
- if (Op0->hasOneUse() && Op1->hasOneUse()) {
- Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal,
- I.getName()+".demorgan");
- return BinaryOperator::CreateNot(And);
- }
-
- if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
- if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
- if (Value *Res = FoldOrOfICmps(LHS, RHS))
- return ReplaceInstUsesWith(I, Res);
-
- // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
- if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
- if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
- if (Value *Res = FoldOrOfFCmps(LHS, RHS))
- return ReplaceInstUsesWith(I, Res);
-
- // fold (or (cast A), (cast B)) -> (cast (or A, B))
- if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
- if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
- if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
- const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() &&
- SrcTy->isIntOrIntVectorTy()) {
- Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
-
- if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) &&
- // Only do this if the casts both really cause code to be
- // generated.
- ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) &&
- ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) {
- Value *NewOp = Builder->CreateOr(Op0COp, Op1COp, I.getName());
- return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
- }
-
- // If this is or(cast(icmp), cast(icmp)), try to fold this even if the
- // cast is otherwise not optimizable. This happens for vector sexts.
- if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1COp))
- if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0COp))
- if (Value *Res = FoldOrOfICmps(LHS, RHS))
- return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
-
- // If this is or(cast(fcmp), cast(fcmp)), try to fold this even if the
- // cast is otherwise not optimizable. This happens for vector sexts.
- if (FCmpInst *RHS = dyn_cast<FCmpInst>(Op1COp))
- if (FCmpInst *LHS = dyn_cast<FCmpInst>(Op0COp))
- if (Value *Res = FoldOrOfFCmps(LHS, RHS))
- return CastInst::Create(Op0C->getOpcode(), Res, I.getType());
- }
- }
- }
-
- return Changed ? &I : 0;
-}
-
-Instruction *InstCombiner::visitXor(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (isa<UndefValue>(Op1)) {
- if (isa<UndefValue>(Op0))
- // Handle undef ^ undef -> 0 special case. This is a common
- // idiom (misuse).
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
- }
-
- // xor X, X = 0
- if (Op0 == Op1)
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
-
- // See if we can simplify any instructions used by the instruction whose sole
- // purpose is to compute bits we don't care about.
- if (SimplifyDemandedInstructionBits(I))
- return &I;
- if (I.getType()->isVectorTy())
- if (isa<ConstantAggregateZero>(Op1))
- return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
-
- // Is this a ~ operation?
- if (Value *NotOp = dyn_castNotVal(&I)) {
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
- if (Op0I->getOpcode() == Instruction::And ||
- Op0I->getOpcode() == Instruction::Or) {
- // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
- // ~(~X | Y) === (X & ~Y) - De Morgan's Law
- if (dyn_castNotVal(Op0I->getOperand(1)))
- Op0I->swapOperands();
- if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
- Value *NotY =
- Builder->CreateNot(Op0I->getOperand(1),
- Op0I->getOperand(1)->getName()+".not");
- if (Op0I->getOpcode() == Instruction::And)
- return BinaryOperator::CreateOr(Op0NotVal, NotY);
- return BinaryOperator::CreateAnd(Op0NotVal, NotY);
- }
-
- // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
- // ~(X | Y) === (~X & ~Y) - De Morgan's Law
- if (isFreeToInvert(Op0I->getOperand(0)) &&
- isFreeToInvert(Op0I->getOperand(1))) {
- Value *NotX =
- Builder->CreateNot(Op0I->getOperand(0), "notlhs");
- Value *NotY =
- Builder->CreateNot(Op0I->getOperand(1), "notrhs");
- if (Op0I->getOpcode() == Instruction::And)
- return BinaryOperator::CreateOr(NotX, NotY);
- return BinaryOperator::CreateAnd(NotX, NotY);
- }
-
- } else if (Op0I->getOpcode() == Instruction::AShr) {
- // ~(~X >>s Y) --> (X >>s Y)
- if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0)))
- return BinaryOperator::CreateAShr(Op0NotVal, Op0I->getOperand(1));
- }
- }
- }
-
-
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- if (RHS->isOne() && Op0->hasOneUse()) {
- // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(Op0))
- return new ICmpInst(ICI->getInversePredicate(),
- ICI->getOperand(0), ICI->getOperand(1));
-
- if (FCmpInst *FCI = dyn_cast<FCmpInst>(Op0))
- return new FCmpInst(FCI->getInversePredicate(),
- FCI->getOperand(0), FCI->getOperand(1));
- }
-
- // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
- if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
- if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
- if (CI->hasOneUse() && Op0C->hasOneUse()) {
- Instruction::CastOps Opcode = Op0C->getOpcode();
- if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
- (RHS == ConstantExpr::getCast(Opcode,
- ConstantInt::getTrue(I.getContext()),
- Op0C->getDestTy()))) {
- CI->setPredicate(CI->getInversePredicate());
- return CastInst::Create(Opcode, CI, Op0C->getType());
- }
- }
- }
- }
-
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
- // ~(c-X) == X-c-1 == X+(-c-1)
- if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
- if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
- Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
- Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
- ConstantInt::get(I.getType(), 1));
- return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
- }
-
- if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
- if (Op0I->getOpcode() == Instruction::Add) {
- // ~(X-c) --> (-c-1)-X
- if (RHS->isAllOnesValue()) {
- Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
- return BinaryOperator::CreateSub(
- ConstantExpr::getSub(NegOp0CI,
- ConstantInt::get(I.getType(), 1)),
- Op0I->getOperand(0));
- } else if (RHS->getValue().isSignBit()) {
- // (X + C) ^ signbit -> (X + C + signbit)
- Constant *C = ConstantInt::get(I.getContext(),
- RHS->getValue() + Op0CI->getValue());
- return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
-
- }
- } else if (Op0I->getOpcode() == Instruction::Or) {
- // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
- if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
- Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
- // Anything in both C1 and C2 is known to be zero, remove it from
- // NewRHS.
- Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
- NewRHS = ConstantExpr::getAnd(NewRHS,
- ConstantExpr::getNot(CommonBits));
- Worklist.Add(Op0I);
- I.setOperand(0, Op0I->getOperand(0));
- I.setOperand(1, NewRHS);
- return &I;
- }
- }
- }
- }
-
- // Try to fold constant and into select arguments.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
- if (isa<PHINode>(Op0))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
- }
-
- if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
- if (X == Op1)
- return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
-
- if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
- if (X == Op0)
- return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
-
-
- BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
- if (Op1I) {
- Value *A, *B;
- if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
- if (A == Op0) { // B^(B|A) == (A|B)^B
- Op1I->swapOperands();
- I.swapOperands();
- std::swap(Op0, Op1);
- } else if (B == Op0) { // B^(A|B) == (A|B)^B
- I.swapOperands(); // Simplified below.
- std::swap(Op0, Op1);
- }
- } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) {
- return ReplaceInstUsesWith(I, B); // A^(A^B) == B
- } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) {
- return ReplaceInstUsesWith(I, A); // A^(B^A) == B
- } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
- Op1I->hasOneUse()){
- if (A == Op0) { // A^(A&B) -> A^(B&A)
- Op1I->swapOperands();
- std::swap(A, B);
- }
- if (B == Op0) { // A^(B&A) -> (B&A)^A
- I.swapOperands(); // Simplified below.
- std::swap(Op0, Op1);
- }
- }
- }
-
- BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
- if (Op0I) {
- Value *A, *B;
- if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
- Op0I->hasOneUse()) {
- if (A == Op1) // (B|A)^B == (A|B)^B
- std::swap(A, B);
- if (B == Op1) // (A|B)^B == A & ~B
- return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1, "tmp"));
- } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) {
- return ReplaceInstUsesWith(I, B); // (A^B)^A == B
- } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) {
- return ReplaceInstUsesWith(I, A); // (B^A)^A == B
- } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
- Op0I->hasOneUse()){
- if (A == Op1) // (A&B)^A -> (B&A)^A
- std::swap(A, B);
- if (B == Op1 && // (B&A)^A == ~B & A
- !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
- return BinaryOperator::CreateAnd(Builder->CreateNot(A, "tmp"), Op1);
- }
- }
- }
-
- // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
- if (Op0I && Op1I && Op0I->isShift() &&
- Op0I->getOpcode() == Op1I->getOpcode() &&
- Op0I->getOperand(1) == Op1I->getOperand(1) &&
- (Op1I->hasOneUse() || Op1I->hasOneUse())) {
- Value *NewOp =
- Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
- Op0I->getName());
- return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
- Op1I->getOperand(1));
- }
-
- if (Op0I && Op1I) {
- Value *A, *B, *C, *D;
- // (A & B)^(A | B) -> A ^ B
- if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
- match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
- if ((A == C && B == D) || (A == D && B == C))
- return BinaryOperator::CreateXor(A, B);
- }
- // (A | B)^(A & B) -> A ^ B
- if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
- match(Op1I, m_And(m_Value(C), m_Value(D)))) {
- if ((A == C && B == D) || (A == D && B == C))
- return BinaryOperator::CreateXor(A, B);
- }
-
- // (A & B)^(C & D)
- if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
- match(Op0I, m_And(m_Value(A), m_Value(B))) &&
- match(Op1I, m_And(m_Value(C), m_Value(D)))) {
- // (X & Y)^(X & Y) -> (Y^Z) & X
- Value *X = 0, *Y = 0, *Z = 0;
- if (A == C)
- X = A, Y = B, Z = D;
- else if (A == D)
- X = A, Y = B, Z = C;
- else if (B == C)
- X = B, Y = A, Z = D;
- else if (B == D)
- X = B, Y = A, Z = C;
-
- if (X) {
- Value *NewOp = Builder->CreateXor(Y, Z, Op0->getName());
- return BinaryOperator::CreateAnd(NewOp, X);
- }
- }
- }
-
- // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
- if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
- if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
- if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
- if (LHS->getOperand(0) == RHS->getOperand(1) &&
- LHS->getOperand(1) == RHS->getOperand(0))
- LHS->swapOperands();
- if (LHS->getOperand(0) == RHS->getOperand(0) &&
- LHS->getOperand(1) == RHS->getOperand(1)) {
- Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
- unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
- bool isSigned = LHS->isSigned() || RHS->isSigned();
- return ReplaceInstUsesWith(I,
- getICmpValue(isSigned, Code, Op0, Op1, Builder));
- }
- }
-
- // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
- if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
- if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
- if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
- const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() &&
- // Only do this if the casts both really cause code to be generated.
- ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0),
- I.getType()) &&
- ShouldOptimizeCast(Op1C->getOpcode(), Op1C->getOperand(0),
- I.getType())) {
- Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
- Op1C->getOperand(0), I.getName());
- return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
- }
- }
- }
-
- return Changed ? &I : 0;
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
deleted file mode 100644
index e2b7d3d..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ /dev/null
@@ -1,1338 +0,0 @@
-//===- InstCombineCalls.cpp -----------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visitCall and visitInvoke functions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Transforms/Utils/BuildLibCalls.h"
-using namespace llvm;
-
-/// getPromotedType - Return the specified type promoted as it would be to pass
-/// though a va_arg area.
-static const Type *getPromotedType(const Type *Ty) {
- if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
- if (ITy->getBitWidth() < 32)
- return Type::getInt32Ty(Ty->getContext());
- }
- return Ty;
-}
-
-/// EnforceKnownAlignment - If the specified pointer points to an object that
-/// we control, modify the object's alignment to PrefAlign. This isn't
-/// often possible though. If alignment is important, a more reliable approach
-/// is to simply align all global variables and allocation instructions to
-/// their preferred alignment from the beginning.
-///
-static unsigned EnforceKnownAlignment(Value *V,
- unsigned Align, unsigned PrefAlign) {
-
- User *U = dyn_cast<User>(V);
- if (!U) return Align;
-
- switch (Operator::getOpcode(U)) {
- default: break;
- case Instruction::BitCast:
- return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
- case Instruction::GetElementPtr: {
- // If all indexes are zero, it is just the alignment of the base pointer.
- bool AllZeroOperands = true;
- for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
- if (!isa<Constant>(*i) ||
- !cast<Constant>(*i)->isNullValue()) {
- AllZeroOperands = false;
- break;
- }
-
- if (AllZeroOperands) {
- // Treat this like a bitcast.
- return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
- }
- break;
- }
- }
-
- if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
- // If there is a large requested alignment and we can, bump up the alignment
- // of the global.
- if (!GV->isDeclaration()) {
- if (GV->getAlignment() >= PrefAlign)
- Align = GV->getAlignment();
- else {
- GV->setAlignment(PrefAlign);
- Align = PrefAlign;
- }
- }
- } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
- // If there is a requested alignment and if this is an alloca, round up.
- if (AI->getAlignment() >= PrefAlign)
- Align = AI->getAlignment();
- else {
- AI->setAlignment(PrefAlign);
- Align = PrefAlign;
- }
- }
-
- return Align;
-}
-
-/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
-/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
-/// and it is more than the alignment of the ultimate object, see if we can
-/// increase the alignment of the ultimate object, making this check succeed.
-unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
- unsigned PrefAlign) {
- unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
- sizeof(PrefAlign) * CHAR_BIT;
- APInt Mask = APInt::getAllOnesValue(BitWidth);
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
- unsigned TrailZ = KnownZero.countTrailingOnes();
- unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
-
- if (PrefAlign > Align)
- Align = EnforceKnownAlignment(V, Align, PrefAlign);
-
- // We don't need to make any adjustment.
- return Align;
-}
-
-Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
- unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
- unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
- unsigned MinAlign = std::min(DstAlign, SrcAlign);
- unsigned CopyAlign = MI->getAlignment();
-
- if (CopyAlign < MinAlign) {
- MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
- MinAlign, false));
- return MI;
- }
-
- // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
- // load/store.
- ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
- if (MemOpLength == 0) return 0;
-
- // Source and destination pointer types are always "i8*" for intrinsic. See
- // if the size is something we can handle with a single primitive load/store.
- // A single load+store correctly handles overlapping memory in the memmove
- // case.
- unsigned Size = MemOpLength->getZExtValue();
- if (Size == 0) return MI; // Delete this mem transfer.
-
- if (Size > 8 || (Size&(Size-1)))
- return 0; // If not 1/2/4/8 bytes, exit.
-
- // Use an integer load+store unless we can find something better.
- Type *NewPtrTy =
- PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3));
-
- // Memcpy forces the use of i8* for the source and destination. That means
- // that if you're using memcpy to move one double around, you'll get a cast
- // from double* to i8*. We'd much rather use a double load+store rather than
- // an i64 load+store, here because this improves the odds that the source or
- // dest address will be promotable. See if we can find a better type than the
- // integer datatype.
- Value *StrippedDest = MI->getOperand(1)->stripPointerCasts();
- if (StrippedDest != MI->getOperand(1)) {
- const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
- ->getElementType();
- if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
- // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
- // down through these levels if so.
- while (!SrcETy->isSingleValueType()) {
- if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
- if (STy->getNumElements() == 1)
- SrcETy = STy->getElementType(0);
- else
- break;
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
- if (ATy->getNumElements() == 1)
- SrcETy = ATy->getElementType();
- else
- break;
- } else
- break;
- }
-
- if (SrcETy->isSingleValueType())
- NewPtrTy = PointerType::getUnqual(SrcETy);
- }
- }
-
-
- // If the memcpy/memmove provides better alignment info than we can
- // infer, use it.
- SrcAlign = std::max(SrcAlign, CopyAlign);
- DstAlign = std::max(DstAlign, CopyAlign);
-
- Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
- Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
- Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
- InsertNewInstBefore(L, *MI);
- InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
-
- // Set the size of the copy to 0, it will be deleted on the next iteration.
- MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
- return MI;
-}
-
-Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
- unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
- if (MI->getAlignment() < Alignment) {
- MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
- Alignment, false));
- return MI;
- }
-
- // Extract the length and alignment and fill if they are constant.
- ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
- ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
- if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
- return 0;
- uint64_t Len = LenC->getZExtValue();
- Alignment = MI->getAlignment();
-
- // If the length is zero, this is a no-op
- if (Len == 0) return MI; // memset(d,c,0,a) -> noop
-
- // memset(s,c,n) -> store s, c (for n=1,2,4,8)
- if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
- const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
-
- Value *Dest = MI->getDest();
- Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
-
- // Alignment 0 is identity for alignment 1 for memset, but not store.
- if (Alignment == 0) Alignment = 1;
-
- // Extract the fill value and store.
- uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
- InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
- Dest, false, Alignment), *MI);
-
- // Set the size of the copy to 0, it will be deleted on the next iteration.
- MI->setLength(Constant::getNullValue(LenC->getType()));
- return MI;
- }
-
- return 0;
-}
-
-/// visitCallInst - CallInst simplification. This mostly only handles folding
-/// of intrinsic instructions. For normal calls, it allows visitCallSite to do
-/// the heavy lifting.
-///
-Instruction *InstCombiner::visitCallInst(CallInst &CI) {
- if (isFreeCall(&CI))
- return visitFree(CI);
-
- // If the caller function is nounwind, mark the call as nounwind, even if the
- // callee isn't.
- if (CI.getParent()->getParent()->doesNotThrow() &&
- !CI.doesNotThrow()) {
- CI.setDoesNotThrow();
- return &CI;
- }
-
- IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
- if (!II) return visitCallSite(&CI);
-
- // Intrinsics cannot occur in an invoke, so handle them here instead of in
- // visitCallSite.
- if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
- bool Changed = false;
-
- // memmove/cpy/set of zero bytes is a noop.
- if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
- if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
-
- if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
- if (CI->getZExtValue() == 1) {
- // Replace the instruction with just byte operations. We would
- // transform other cases to loads/stores, but we don't know if
- // alignment is sufficient.
- }
- }
-
- // If we have a memmove and the source operation is a constant global,
- // then the source and dest pointers can't alias, so we can change this
- // into a call to memcpy.
- if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
- if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
- if (GVSrc->isConstant()) {
- Module *M = CI.getParent()->getParent()->getParent();
- Intrinsic::ID MemCpyID = Intrinsic::memcpy;
- const Type *Tys[1];
- Tys[0] = CI.getOperand(3)->getType();
- CI.setOperand(0,
- Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
- Changed = true;
- }
- }
-
- if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
- // memmove(x,x,size) -> noop.
- if (MTI->getSource() == MTI->getDest())
- return EraseInstFromFunction(CI);
- }
-
- // If we can determine a pointer alignment that is bigger than currently
- // set, update the alignment.
- if (isa<MemTransferInst>(MI)) {
- if (Instruction *I = SimplifyMemTransfer(MI))
- return I;
- } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
- if (Instruction *I = SimplifyMemSet(MSI))
- return I;
- }
-
- if (Changed) return II;
- }
-
- switch (II->getIntrinsicID()) {
- default: break;
- case Intrinsic::objectsize: {
- // We need target data for just about everything so depend on it.
- if (!TD) break;
-
- const Type *ReturnTy = CI.getType();
- bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1);
-
- // Get to the real allocated thing and offset as fast as possible.
- Value *Op1 = II->getOperand(1)->stripPointerCasts();
-
- // If we've stripped down to a single global variable that we
- // can know the size of then just return that.
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
- if (GV->hasDefinitiveInitializer()) {
- Constant *C = GV->getInitializer();
- uint64_t GlobalSize = TD->getTypeAllocSize(C->getType());
- return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize));
- } else {
- // Can't determine size of the GV.
- Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
- return ReplaceInstUsesWith(CI, RetVal);
- }
- } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
- // Get alloca size.
- if (AI->getAllocatedType()->isSized()) {
- uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
- if (AI->isArrayAllocation()) {
- const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
- if (!C) break;
- AllocaSize *= C->getZExtValue();
- }
- return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize));
- }
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
- // Only handle constant GEPs here.
- if (CE->getOpcode() != Instruction::GetElementPtr) break;
- GEPOperator *GEP = cast<GEPOperator>(CE);
-
- // Make sure we're not a constant offset from an external
- // global.
- Value *Operand = GEP->getPointerOperand();
- Operand = Operand->stripPointerCasts();
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
- if (!GV->hasDefinitiveInitializer()) break;
-
- // Get what we're pointing to and its size.
- const PointerType *BaseType =
- cast<PointerType>(Operand->getType());
- uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType());
-
- // Get the current byte offset into the thing. Use the original
- // operand in case we're looking through a bitcast.
- SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
- const PointerType *OffsetType =
- cast<PointerType>(GEP->getPointerOperand()->getType());
- uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
-
- if (Size < Offset) {
- // Out of bound reference? Negative index normalized to large
- // index? Just return "I don't know".
- Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
- return ReplaceInstUsesWith(CI, RetVal);
- }
-
- Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
- return ReplaceInstUsesWith(CI, RetVal);
-
- }
-
- // Do not return "I don't know" here. Later optimization passes could
- // make it possible to evaluate objectsize to a constant.
- break;
- }
- case Intrinsic::bswap:
- // bswap(bswap(x)) -> x
- if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
- if (Operand->getIntrinsicID() == Intrinsic::bswap)
- return ReplaceInstUsesWith(CI, Operand->getOperand(1));
-
- // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
- if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
- if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
- if (Operand->getIntrinsicID() == Intrinsic::bswap) {
- unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
- TI->getType()->getPrimitiveSizeInBits();
- Value *CV = ConstantInt::get(Operand->getType(), C);
- Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
- return new TruncInst(V, TI->getType());
- }
- }
-
- break;
- case Intrinsic::powi:
- if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
- // powi(x, 0) -> 1.0
- if (Power->isZero())
- return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
- // powi(x, 1) -> x
- if (Power->isOne())
- return ReplaceInstUsesWith(CI, II->getOperand(1));
- // powi(x, -1) -> 1/x
- if (Power->isAllOnesValue())
- return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
- II->getOperand(1));
- }
- break;
- case Intrinsic::cttz: {
- // If all bits below the first known one are known zero,
- // this value is constant.
- const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
- uint32_t BitWidth = IT->getBitWidth();
- APInt KnownZero(BitWidth, 0);
- APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
- KnownZero, KnownOne);
- unsigned TrailingZeros = KnownOne.countTrailingZeros();
- APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
- if ((Mask & KnownZero) == Mask)
- return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
- APInt(BitWidth, TrailingZeros)));
-
- }
- break;
- case Intrinsic::ctlz: {
- // If all bits above the first known one are known zero,
- // this value is constant.
- const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
- uint32_t BitWidth = IT->getBitWidth();
- APInt KnownZero(BitWidth, 0);
- APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
- KnownZero, KnownOne);
- unsigned LeadingZeros = KnownOne.countLeadingZeros();
- APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
- if ((Mask & KnownZero) == Mask)
- return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
- APInt(BitWidth, LeadingZeros)));
-
- }
- break;
- case Intrinsic::uadd_with_overflow: {
- Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
- const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
- uint32_t BitWidth = IT->getBitWidth();
- APInt Mask = APInt::getSignBit(BitWidth);
- APInt LHSKnownZero(BitWidth, 0);
- APInt LHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
- bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
- bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
-
- if (LHSKnownNegative || LHSKnownPositive) {
- APInt RHSKnownZero(BitWidth, 0);
- APInt RHSKnownOne(BitWidth, 0);
- ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
- bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
- bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
- if (LHSKnownNegative && RHSKnownNegative) {
- // The sign bit is set in both cases: this MUST overflow.
- // Create a simple add instruction, and insert it into the struct.
- Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
- Worklist.Add(Add);
- Constant *V[] = {
- UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
- };
- Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, Add, 0);
- }
-
- if (LHSKnownPositive && RHSKnownPositive) {
- // The sign bit is clear in both cases: this CANNOT overflow.
- // Create a simple add instruction, and insert it into the struct.
- Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
- Worklist.Add(Add);
- Constant *V[] = {
- UndefValue::get(LHS->getType()),
- ConstantInt::getFalse(II->getContext())
- };
- Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, Add, 0);
- }
- }
- }
- // FALL THROUGH uadd into sadd
- case Intrinsic::sadd_with_overflow:
- // Canonicalize constants into the RHS.
- if (isa<Constant>(II->getOperand(1)) &&
- !isa<Constant>(II->getOperand(2))) {
- Value *LHS = II->getOperand(1);
- II->setOperand(1, II->getOperand(2));
- II->setOperand(2, LHS);
- return II;
- }
-
- // X + undef -> undef
- if (isa<UndefValue>(II->getOperand(2)))
- return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
-
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
- // X + 0 -> {X, false}
- if (RHS->isZero()) {
- Constant *V[] = {
- UndefValue::get(II->getOperand(0)->getType()),
- ConstantInt::getFalse(II->getContext())
- };
- Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(1), 0);
- }
- }
- break;
- case Intrinsic::usub_with_overflow:
- case Intrinsic::ssub_with_overflow:
- // undef - X -> undef
- // X - undef -> undef
- if (isa<UndefValue>(II->getOperand(1)) ||
- isa<UndefValue>(II->getOperand(2)))
- return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
-
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
- // X - 0 -> {X, false}
- if (RHS->isZero()) {
- Constant *V[] = {
- UndefValue::get(II->getOperand(1)->getType()),
- ConstantInt::getFalse(II->getContext())
- };
- Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(1), 0);
- }
- }
- break;
- case Intrinsic::umul_with_overflow:
- case Intrinsic::smul_with_overflow:
- // Canonicalize constants into the RHS.
- if (isa<Constant>(II->getOperand(1)) &&
- !isa<Constant>(II->getOperand(2))) {
- Value *LHS = II->getOperand(1);
- II->setOperand(1, II->getOperand(2));
- II->setOperand(2, LHS);
- return II;
- }
-
- // X * undef -> undef
- if (isa<UndefValue>(II->getOperand(2)))
- return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
-
- if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
- // X*0 -> {0, false}
- if (RHSI->isZero())
- return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
-
- // X * 1 -> {X, false}
- if (RHSI->equalsInt(1)) {
- Constant *V[] = {
- UndefValue::get(II->getOperand(1)->getType()),
- ConstantInt::getFalse(II->getContext())
- };
- Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(1), 0);
- }
- }
- break;
- case Intrinsic::ppc_altivec_lvx:
- case Intrinsic::ppc_altivec_lvxl:
- case Intrinsic::x86_sse_loadu_ps:
- case Intrinsic::x86_sse2_loadu_pd:
- case Intrinsic::x86_sse2_loadu_dq:
- // Turn PPC lvx -> load if the pointer is known aligned.
- // Turn X86 loadups -> load if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
- Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
- PointerType::getUnqual(II->getType()));
- return new LoadInst(Ptr);
- }
- break;
- case Intrinsic::ppc_altivec_stvx:
- case Intrinsic::ppc_altivec_stvxl:
- // Turn stvx -> store if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
- const Type *OpPtrTy =
- PointerType::getUnqual(II->getOperand(1)->getType());
- Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
- return new StoreInst(II->getOperand(1), Ptr);
- }
- break;
- case Intrinsic::x86_sse_storeu_ps:
- case Intrinsic::x86_sse2_storeu_pd:
- case Intrinsic::x86_sse2_storeu_dq:
- // Turn X86 storeu -> store if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
- const Type *OpPtrTy =
- PointerType::getUnqual(II->getOperand(2)->getType());
- Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
- return new StoreInst(II->getOperand(2), Ptr);
- }
- break;
-
- case Intrinsic::x86_sse_cvttss2si: {
- // These intrinsics only demands the 0th element of its input vector. If
- // we can simplify the input based on that, do so now.
- unsigned VWidth =
- cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
- APInt DemandedElts(VWidth, 1);
- APInt UndefElts(VWidth, 0);
- if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
- UndefElts)) {
- II->setOperand(1, V);
- return II;
- }
- break;
- }
-
- case Intrinsic::ppc_altivec_vperm:
- // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
- if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
- assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
-
- // Check that all of the elements are integer constants or undefs.
- bool AllEltsOk = true;
- for (unsigned i = 0; i != 16; ++i) {
- if (!isa<ConstantInt>(Mask->getOperand(i)) &&
- !isa<UndefValue>(Mask->getOperand(i))) {
- AllEltsOk = false;
- break;
- }
- }
-
- if (AllEltsOk) {
- // Cast the input vectors to byte vectors.
- Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
- Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
- Value *Result = UndefValue::get(Op0->getType());
-
- // Only extract each element once.
- Value *ExtractedElts[32];
- memset(ExtractedElts, 0, sizeof(ExtractedElts));
-
- for (unsigned i = 0; i != 16; ++i) {
- if (isa<UndefValue>(Mask->getOperand(i)))
- continue;
- unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
- Idx &= 31; // Match the hardware behavior.
-
- if (ExtractedElts[Idx] == 0) {
- ExtractedElts[Idx] =
- Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
- ConstantInt::get(Type::getInt32Ty(II->getContext()),
- Idx&15, false), "tmp");
- }
-
- // Insert this value into the result vector.
- Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
- ConstantInt::get(Type::getInt32Ty(II->getContext()),
- i, false), "tmp");
- }
- return CastInst::Create(Instruction::BitCast, Result, CI.getType());
- }
- }
- break;
-
- case Intrinsic::stackrestore: {
- // If the save is right next to the restore, remove the restore. This can
- // happen when variable allocas are DCE'd.
- if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
- if (SS->getIntrinsicID() == Intrinsic::stacksave) {
- BasicBlock::iterator BI = SS;
- if (&*++BI == II)
- return EraseInstFromFunction(CI);
- }
- }
-
- // Scan down this block to see if there is another stack restore in the
- // same block without an intervening call/alloca.
- BasicBlock::iterator BI = II;
- TerminatorInst *TI = II->getParent()->getTerminator();
- bool CannotRemove = false;
- for (++BI; &*BI != TI; ++BI) {
- if (isa<AllocaInst>(BI) || isMalloc(BI)) {
- CannotRemove = true;
- break;
- }
- if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
- // If there is a stackrestore below this one, remove this one.
- if (II->getIntrinsicID() == Intrinsic::stackrestore)
- return EraseInstFromFunction(CI);
- // Otherwise, ignore the intrinsic.
- } else {
- // If we found a non-intrinsic call, we can't remove the stack
- // restore.
- CannotRemove = true;
- break;
- }
- }
- }
-
- // If the stack restore is in a return/unwind block and if there are no
- // allocas or calls between the restore and the return, nuke the restore.
- if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
- return EraseInstFromFunction(CI);
- break;
- }
- }
-
- return visitCallSite(II);
-}
-
-// InvokeInst simplification
-//
-Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
- return visitCallSite(&II);
-}
-
-/// isSafeToEliminateVarargsCast - If this cast does not affect the value
-/// passed through the varargs area, we can eliminate the use of the cast.
-static bool isSafeToEliminateVarargsCast(const CallSite CS,
- const CastInst * const CI,
- const TargetData * const TD,
- const int ix) {
- if (!CI->isLosslessCast())
- return false;
-
- // The size of ByVal arguments is derived from the type, so we
- // can't change to a type with a different size. If the size were
- // passed explicitly we could avoid this check.
- if (!CS.paramHasAttr(ix, Attribute::ByVal))
- return true;
-
- const Type* SrcTy =
- cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
- const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
- if (!SrcTy->isSized() || !DstTy->isSized())
- return false;
- if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
- return false;
- return true;
-}
-
-// Try to fold some different type of calls here.
-// Currently we're only working with the checking functions, memcpy_chk,
-// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
-// strcat_chk and strncat_chk.
-Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
- if (CI->getCalledFunction() == 0) return 0;
-
- StringRef Name = CI->getCalledFunction()->getName();
- BasicBlock *BB = CI->getParent();
- IRBuilder<> B(CI->getParent()->getContext());
-
- // Set the builder to the instruction after the call.
- B.SetInsertPoint(BB, CI);
-
- if (Name == "__memcpy_chk") {
- ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4));
- if (!SizeCI)
- return 0;
- ConstantInt *SizeArg = dyn_cast<ConstantInt>(CI->getOperand(3));
- if (!SizeArg)
- return 0;
- if (SizeCI->isAllOnesValue() ||
- SizeCI->getZExtValue() <= SizeArg->getZExtValue()) {
- EmitMemCpy(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
- 1, B, TD);
- return ReplaceInstUsesWith(*CI, CI->getOperand(1));
- }
- return 0;
- }
-
- // Should be similar to memcpy.
- if (Name == "__mempcpy_chk") {
- return 0;
- }
-
- if (Name == "__memmove_chk") {
- ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4));
- if (!SizeCI)
- return 0;
- ConstantInt *SizeArg = dyn_cast<ConstantInt>(CI->getOperand(3));
- if (!SizeArg)
- return 0;
- if (SizeCI->isAllOnesValue() ||
- SizeCI->getZExtValue() <= SizeArg->getZExtValue()) {
- EmitMemMove(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
- 1, B, TD);
- return ReplaceInstUsesWith(*CI, CI->getOperand(1));
- }
- return 0;
- }
-
- if (Name == "__memset_chk") {
- ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4));
- if (!SizeCI)
- return 0;
- ConstantInt *SizeArg = dyn_cast<ConstantInt>(CI->getOperand(3));
- if (!SizeArg)
- return 0;
- if (SizeCI->isAllOnesValue() ||
- SizeCI->getZExtValue() <= SizeArg->getZExtValue()) {
- Value *Val = B.CreateIntCast(CI->getOperand(2), B.getInt8Ty(),
- false);
- EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), B, TD);
- return ReplaceInstUsesWith(*CI, CI->getOperand(1));
- }
- return 0;
- }
-
- if (Name == "__strcpy_chk") {
- ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(3));
- if (!SizeCI)
- return 0;
- // If a) we don't have any length information, or b) we know this will
- // fit then just lower to a plain strcpy. Otherwise we'll keep our
- // strcpy_chk call which may fail at runtime if the size is too long.
- // TODO: It might be nice to get a maximum length out of the possible
- // string lengths for varying.
- if (SizeCI->isAllOnesValue() ||
- SizeCI->getZExtValue() >= GetStringLength(CI->getOperand(2))) {
- Value *Ret = EmitStrCpy(CI->getOperand(1), CI->getOperand(2), B, TD);
- return ReplaceInstUsesWith(*CI, Ret);
- }
- return 0;
- }
-
- // Should be similar to strcpy.
- if (Name == "__stpcpy_chk") {
- return 0;
- }
-
- if (Name == "__strncpy_chk") {
- ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4));
- if (!SizeCI)
- return 0;
- ConstantInt *SizeArg = dyn_cast<ConstantInt>(CI->getOperand(3));
- if (!SizeArg)
- return 0;
- if (SizeCI->isAllOnesValue() ||
- SizeCI->getZExtValue() <= SizeArg->getZExtValue()) {
- Value *Ret = EmitStrCpy(CI->getOperand(1), CI->getOperand(2), B, TD);
- return ReplaceInstUsesWith(*CI, Ret);
- }
- return 0;
- }
-
- if (Name == "__strcat_chk") {
- return 0;
- }
-
- if (Name == "__strncat_chk") {
- return 0;
- }
-
- return 0;
-}
-
-// visitCallSite - Improvements for call and invoke instructions.
-//
-Instruction *InstCombiner::visitCallSite(CallSite CS) {
- bool Changed = false;
-
- // If the callee is a constexpr cast of a function, attempt to move the cast
- // to the arguments of the call/invoke.
- if (transformConstExprCastCall(CS)) return 0;
-
- Value *Callee = CS.getCalledValue();
-
- if (Function *CalleeF = dyn_cast<Function>(Callee))
- // If the call and callee calling conventions don't match, this call must
- // be unreachable, as the call is undefined.
- if (CalleeF->getCallingConv() != CS.getCallingConv() &&
- // Only do this for calls to a function with a body. A prototype may
- // not actually end up matching the implementation's calling conv for a
- // variety of reasons (e.g. it may be written in assembly).
- !CalleeF->isDeclaration()) {
- Instruction *OldCall = CS.getInstruction();
- new StoreInst(ConstantInt::getTrue(Callee->getContext()),
- UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
- OldCall);
- // If OldCall dues not return void then replaceAllUsesWith undef.
- // This allows ValueHandlers and custom metadata to adjust itself.
- if (!OldCall->getType()->isVoidTy())
- OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
- if (isa<CallInst>(OldCall))
- return EraseInstFromFunction(*OldCall);
-
- // We cannot remove an invoke, because it would change the CFG, just
- // change the callee to a null pointer.
- cast<InvokeInst>(OldCall)->setOperand(0,
- Constant::getNullValue(CalleeF->getType()));
- return 0;
- }
-
- if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
- // This instruction is not reachable, just remove it. We insert a store to
- // undef so that we know that this code is not reachable, despite the fact
- // that we can't modify the CFG here.
- new StoreInst(ConstantInt::getTrue(Callee->getContext()),
- UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
- CS.getInstruction());
-
- // If CS dues not return void then replaceAllUsesWith undef.
- // This allows ValueHandlers and custom metadata to adjust itself.
- if (!CS.getInstruction()->getType()->isVoidTy())
- CS.getInstruction()->
- replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
-
- if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
- // Don't break the CFG, insert a dummy cond branch.
- BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
- ConstantInt::getTrue(Callee->getContext()), II);
- }
- return EraseInstFromFunction(*CS.getInstruction());
- }
-
- if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
- if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
- if (In->getIntrinsicID() == Intrinsic::init_trampoline)
- return transformCallThroughTrampoline(CS);
-
- const PointerType *PTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
- if (FTy->isVarArg()) {
- int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
- // See if we can optimize any arguments passed through the varargs area of
- // the call.
- for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
- E = CS.arg_end(); I != E; ++I, ++ix) {
- CastInst *CI = dyn_cast<CastInst>(*I);
- if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
- *I = CI->getOperand(0);
- Changed = true;
- }
- }
- }
-
- if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
- // Inline asm calls cannot throw - mark them 'nounwind'.
- CS.setDoesNotThrow();
- Changed = true;
- }
-
- // Try to optimize the call if possible, we require TargetData for most of
- // this. None of these calls are seen as possibly dead so go ahead and
- // delete the instruction now.
- if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
- Instruction *I = tryOptimizeCall(CI, TD);
- // If we changed something return the result, etc. Otherwise let
- // the fallthrough check.
- if (I) return EraseInstFromFunction(*I);
- }
-
- return Changed ? CS.getInstruction() : 0;
-}
-
-// transformConstExprCastCall - If the callee is a constexpr cast of a function,
-// attempt to move the cast to the arguments of the call/invoke.
-//
-bool InstCombiner::transformConstExprCastCall(CallSite CS) {
- if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
- ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
- if (CE->getOpcode() != Instruction::BitCast ||
- !isa<Function>(CE->getOperand(0)))
- return false;
- Function *Callee = cast<Function>(CE->getOperand(0));
- Instruction *Caller = CS.getInstruction();
- const AttrListPtr &CallerPAL = CS.getAttributes();
-
- // Okay, this is a cast from a function to a different type. Unless doing so
- // would cause a type conversion of one of our arguments, change this call to
- // be a direct call with arguments casted to the appropriate types.
- //
- const FunctionType *FT = Callee->getFunctionType();
- const Type *OldRetTy = Caller->getType();
- const Type *NewRetTy = FT->getReturnType();
-
- if (NewRetTy->isStructTy())
- return false; // TODO: Handle multiple return values.
-
- // Check to see if we are changing the return type...
- if (OldRetTy != NewRetTy) {
- if (Callee->isDeclaration() &&
- // Conversion is ok if changing from one pointer type to another or from
- // a pointer to an integer of the same size.
- !((OldRetTy->isPointerTy() || !TD ||
- OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
- (NewRetTy->isPointerTy() || !TD ||
- NewRetTy == TD->getIntPtrType(Caller->getContext()))))
- return false; // Cannot transform this return value.
-
- if (!Caller->use_empty() &&
- // void -> non-void is handled specially
- !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
- return false; // Cannot transform this return value.
-
- if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
- Attributes RAttrs = CallerPAL.getRetAttributes();
- if (RAttrs & Attribute::typeIncompatible(NewRetTy))
- return false; // Attribute not compatible with transformed value.
- }
-
- // If the callsite is an invoke instruction, and the return value is used by
- // a PHI node in a successor, we cannot change the return type of the call
- // because there is no place to put the cast instruction (without breaking
- // the critical edge). Bail out in this case.
- if (!Caller->use_empty())
- if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
- for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
- UI != E; ++UI)
- if (PHINode *PN = dyn_cast<PHINode>(*UI))
- if (PN->getParent() == II->getNormalDest() ||
- PN->getParent() == II->getUnwindDest())
- return false;
- }
-
- unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
- unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
-
- CallSite::arg_iterator AI = CS.arg_begin();
- for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
- const Type *ParamTy = FT->getParamType(i);
- const Type *ActTy = (*AI)->getType();
-
- if (!CastInst::isCastable(ActTy, ParamTy))
- return false; // Cannot transform this parameter value.
-
- if (CallerPAL.getParamAttributes(i + 1)
- & Attribute::typeIncompatible(ParamTy))
- return false; // Attribute not compatible with transformed value.
-
- // Converting from one pointer type to another or between a pointer and an
- // integer of the same size is safe even if we do not have a body.
- bool isConvertible = ActTy == ParamTy ||
- (TD && ((ParamTy->isPointerTy() ||
- ParamTy == TD->getIntPtrType(Caller->getContext())) &&
- (ActTy->isPointerTy() ||
- ActTy == TD->getIntPtrType(Caller->getContext()))));
- if (Callee->isDeclaration() && !isConvertible) return false;
- }
-
- if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
- Callee->isDeclaration())
- return false; // Do not delete arguments unless we have a function body.
-
- if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
- !CallerPAL.isEmpty())
- // In this case we have more arguments than the new function type, but we
- // won't be dropping them. Check that these extra arguments have attributes
- // that are compatible with being a vararg call argument.
- for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
- if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
- break;
- Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
- if (PAttrs & Attribute::VarArgsIncompatible)
- return false;
- }
-
- // Okay, we decided that this is a safe thing to do: go ahead and start
- // inserting cast instructions as necessary...
- std::vector<Value*> Args;
- Args.reserve(NumActualArgs);
- SmallVector<AttributeWithIndex, 8> attrVec;
- attrVec.reserve(NumCommonArgs);
-
- // Get any return attributes.
- Attributes RAttrs = CallerPAL.getRetAttributes();
-
- // If the return value is not being used, the type may not be compatible
- // with the existing attributes. Wipe out any problematic attributes.
- RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
-
- // Add the new return attributes.
- if (RAttrs)
- attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
-
- AI = CS.arg_begin();
- for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
- const Type *ParamTy = FT->getParamType(i);
- if ((*AI)->getType() == ParamTy) {
- Args.push_back(*AI);
- } else {
- Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
- false, ParamTy, false);
- Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
- }
-
- // Add any parameter attributes.
- if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
- attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
- }
-
- // If the function takes more arguments than the call was taking, add them
- // now.
- for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
- Args.push_back(Constant::getNullValue(FT->getParamType(i)));
-
- // If we are removing arguments to the function, emit an obnoxious warning.
- if (FT->getNumParams() < NumActualArgs) {
- if (!FT->isVarArg()) {
- errs() << "WARNING: While resolving call to function '"
- << Callee->getName() << "' arguments were dropped!\n";
- } else {
- // Add all of the arguments in their promoted form to the arg list.
- for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
- const Type *PTy = getPromotedType((*AI)->getType());
- if (PTy != (*AI)->getType()) {
- // Must promote to pass through va_arg area!
- Instruction::CastOps opcode =
- CastInst::getCastOpcode(*AI, false, PTy, false);
- Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
- } else {
- Args.push_back(*AI);
- }
-
- // Add any parameter attributes.
- if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
- attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
- }
- }
- }
-
- if (Attributes FnAttrs = CallerPAL.getFnAttributes())
- attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
-
- if (NewRetTy->isVoidTy())
- Caller->setName(""); // Void type should not have a name.
-
- const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
- attrVec.end());
-
- Instruction *NC;
- if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
- NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
- Args.begin(), Args.end(),
- Caller->getName(), Caller);
- cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
- cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
- } else {
- NC = CallInst::Create(Callee, Args.begin(), Args.end(),
- Caller->getName(), Caller);
- CallInst *CI = cast<CallInst>(Caller);
- if (CI->isTailCall())
- cast<CallInst>(NC)->setTailCall();
- cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
- cast<CallInst>(NC)->setAttributes(NewCallerPAL);
- }
-
- // Insert a cast of the return type as necessary.
- Value *NV = NC;
- if (OldRetTy != NV->getType() && !Caller->use_empty()) {
- if (!NV->getType()->isVoidTy()) {
- Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
- OldRetTy, false);
- NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
-
- // If this is an invoke instruction, we should insert it after the first
- // non-phi, instruction in the normal successor block.
- if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
- BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
- InsertNewInstBefore(NC, *I);
- } else {
- // Otherwise, it's a call, just insert cast right after the call instr
- InsertNewInstBefore(NC, *Caller);
- }
- Worklist.AddUsersToWorkList(*Caller);
- } else {
- NV = UndefValue::get(Caller->getType());
- }
- }
-
-
- if (!Caller->use_empty())
- Caller->replaceAllUsesWith(NV);
-
- EraseInstFromFunction(*Caller);
- return true;
-}
-
-// transformCallThroughTrampoline - Turn a call to a function created by the
-// init_trampoline intrinsic into a direct call to the underlying function.
-//
-Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
- Value *Callee = CS.getCalledValue();
- const PointerType *PTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
- const AttrListPtr &Attrs = CS.getAttributes();
-
- // If the call already has the 'nest' attribute somewhere then give up -
- // otherwise 'nest' would occur twice after splicing in the chain.
- if (Attrs.hasAttrSomewhere(Attribute::Nest))
- return 0;
-
- IntrinsicInst *Tramp =
- cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
-
- Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
- const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
- const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
-
- const AttrListPtr &NestAttrs = NestF->getAttributes();
- if (!NestAttrs.isEmpty()) {
- unsigned NestIdx = 1;
- const Type *NestTy = 0;
- Attributes NestAttr = Attribute::None;
-
- // Look for a parameter marked with the 'nest' attribute.
- for (FunctionType::param_iterator I = NestFTy->param_begin(),
- E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
- if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
- // Record the parameter type and any other attributes.
- NestTy = *I;
- NestAttr = NestAttrs.getParamAttributes(NestIdx);
- break;
- }
-
- if (NestTy) {
- Instruction *Caller = CS.getInstruction();
- std::vector<Value*> NewArgs;
- NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
-
- SmallVector<AttributeWithIndex, 8> NewAttrs;
- NewAttrs.reserve(Attrs.getNumSlots() + 1);
-
- // Insert the nest argument into the call argument list, which may
- // mean appending it. Likewise for attributes.
-
- // Add any result attributes.
- if (Attributes Attr = Attrs.getRetAttributes())
- NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
-
- {
- unsigned Idx = 1;
- CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
- do {
- if (Idx == NestIdx) {
- // Add the chain argument and attributes.
- Value *NestVal = Tramp->getOperand(3);
- if (NestVal->getType() != NestTy)
- NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
- NewArgs.push_back(NestVal);
- NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
- }
-
- if (I == E)
- break;
-
- // Add the original argument and attributes.
- NewArgs.push_back(*I);
- if (Attributes Attr = Attrs.getParamAttributes(Idx))
- NewAttrs.push_back
- (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
-
- ++Idx, ++I;
- } while (1);
- }
-
- // Add any function attributes.
- if (Attributes Attr = Attrs.getFnAttributes())
- NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
-
- // The trampoline may have been bitcast to a bogus type (FTy).
- // Handle this by synthesizing a new function type, equal to FTy
- // with the chain parameter inserted.
-
- std::vector<const Type*> NewTypes;
- NewTypes.reserve(FTy->getNumParams()+1);
-
- // Insert the chain's type into the list of parameter types, which may
- // mean appending it.
- {
- unsigned Idx = 1;
- FunctionType::param_iterator I = FTy->param_begin(),
- E = FTy->param_end();
-
- do {
- if (Idx == NestIdx)
- // Add the chain's type.
- NewTypes.push_back(NestTy);
-
- if (I == E)
- break;
-
- // Add the original type.
- NewTypes.push_back(*I);
-
- ++Idx, ++I;
- } while (1);
- }
-
- // Replace the trampoline call with a direct call. Let the generic
- // code sort out any function type mismatches.
- FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
- FTy->isVarArg());
- Constant *NewCallee =
- NestF->getType() == PointerType::getUnqual(NewFTy) ?
- NestF : ConstantExpr::getBitCast(NestF,
- PointerType::getUnqual(NewFTy));
- const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
- NewAttrs.end());
-
- Instruction *NewCaller;
- if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
- NewCaller = InvokeInst::Create(NewCallee,
- II->getNormalDest(), II->getUnwindDest(),
- NewArgs.begin(), NewArgs.end(),
- Caller->getName(), Caller);
- cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
- cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
- } else {
- NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
- Caller->getName(), Caller);
- if (cast<CallInst>(Caller)->isTailCall())
- cast<CallInst>(NewCaller)->setTailCall();
- cast<CallInst>(NewCaller)->
- setCallingConv(cast<CallInst>(Caller)->getCallingConv());
- cast<CallInst>(NewCaller)->setAttributes(NewPAL);
- }
- if (!Caller->getType()->isVoidTy())
- Caller->replaceAllUsesWith(NewCaller);
- Caller->eraseFromParent();
- Worklist.Remove(Caller);
- return 0;
- }
- }
-
- // Replace the trampoline call with a direct call. Since there is no 'nest'
- // parameter, there is no need to adjust the argument list. Let the generic
- // code sort out any function type mismatches.
- Constant *NewCallee =
- NestF->getType() == PTy ? NestF :
- ConstantExpr::getBitCast(NestF, PTy);
- CS.setCalledFunction(NewCallee);
- return CS.getInstruction();
-}
-
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
deleted file mode 100644
index a68fc6d..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ /dev/null
@@ -1,1352 +0,0 @@
-//===- InstCombineCasts.cpp -----------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visit functions for cast operations.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Support/PatternMatch.h"
-using namespace llvm;
-using namespace PatternMatch;
-
-/// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
-/// expression. If so, decompose it, returning some value X, such that Val is
-/// X*Scale+Offset.
-///
-static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
- int &Offset) {
- assert(Val->getType()->isIntegerTy(32) && "Unexpected allocation size type!");
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
- Offset = CI->getZExtValue();
- Scale = 0;
- return ConstantInt::get(Type::getInt32Ty(Val->getContext()), 0);
- }
-
- if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
- if (I->getOpcode() == Instruction::Shl) {
- // This is a value scaled by '1 << the shift amt'.
- Scale = 1U << RHS->getZExtValue();
- Offset = 0;
- return I->getOperand(0);
- }
-
- if (I->getOpcode() == Instruction::Mul) {
- // This value is scaled by 'RHS'.
- Scale = RHS->getZExtValue();
- Offset = 0;
- return I->getOperand(0);
- }
-
- if (I->getOpcode() == Instruction::Add) {
- // We have X+C. Check to see if we really have (X*C2)+C1,
- // where C1 is divisible by C2.
- unsigned SubScale;
- Value *SubVal =
- DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
- Offset += RHS->getZExtValue();
- Scale = SubScale;
- return SubVal;
- }
- }
- }
-
- // Otherwise, we can't look past this.
- Scale = 1;
- Offset = 0;
- return Val;
-}
-
-/// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
-/// try to eliminate the cast by moving the type information into the alloc.
-Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
- AllocaInst &AI) {
- // This requires TargetData to get the alloca alignment and size information.
- if (!TD) return 0;
-
- const PointerType *PTy = cast<PointerType>(CI.getType());
-
- BuilderTy AllocaBuilder(*Builder);
- AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
-
- // Get the type really allocated and the type casted to.
- const Type *AllocElTy = AI.getAllocatedType();
- const Type *CastElTy = PTy->getElementType();
- if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
-
- unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
- unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
- if (CastElTyAlign < AllocElTyAlign) return 0;
-
- // If the allocation has multiple uses, only promote it if we are strictly
- // increasing the alignment of the resultant allocation. If we keep it the
- // same, we open the door to infinite loops of various kinds. (A reference
- // from a dbg.declare doesn't count as a use for this purpose.)
- if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
- CastElTyAlign == AllocElTyAlign) return 0;
-
- uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
- uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
- if (CastElTySize == 0 || AllocElTySize == 0) return 0;
-
- // See if we can satisfy the modulus by pulling a scale out of the array
- // size argument.
- unsigned ArraySizeScale;
- int ArrayOffset;
- Value *NumElements = // See if the array size is a decomposable linear expr.
- DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
-
- // If we can now satisfy the modulus, by using a non-1 scale, we really can
- // do the xform.
- if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
- (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
-
- unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
- Value *Amt = 0;
- if (Scale == 1) {
- Amt = NumElements;
- } else {
- Amt = ConstantInt::get(Type::getInt32Ty(CI.getContext()), Scale);
- // Insert before the alloca, not before the cast.
- Amt = AllocaBuilder.CreateMul(Amt, NumElements, "tmp");
- }
-
- if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
- Value *Off = ConstantInt::get(Type::getInt32Ty(CI.getContext()),
- Offset, true);
- Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
- }
-
- AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
- New->setAlignment(AI.getAlignment());
- New->takeName(&AI);
-
- // If the allocation has one real use plus a dbg.declare, just remove the
- // declare.
- if (DbgDeclareInst *DI = hasOneUsePlusDeclare(&AI)) {
- EraseInstFromFunction(*(Instruction*)DI);
- }
- // If the allocation has multiple real uses, insert a cast and change all
- // things that used it to use the new cast. This will also hack on CI, but it
- // will die soon.
- else if (!AI.hasOneUse()) {
- // New is the allocation instruction, pointer typed. AI is the original
- // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
- Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
- AI.replaceAllUsesWith(NewCast);
- }
- return ReplaceInstUsesWith(CI, New);
-}
-
-
-
-/// EvaluateInDifferentType - Given an expression that
-/// CanEvaluateTruncated or CanEvaluateSExtd returns true for, actually
-/// insert the code to evaluate the expression.
-Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
- bool isSigned) {
- if (Constant *C = dyn_cast<Constant>(V)) {
- C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
- // If we got a constantexpr back, try to simplify it with TD info.
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
- return C;
- }
-
- // Otherwise, it must be an instruction.
- Instruction *I = cast<Instruction>(V);
- Instruction *Res = 0;
- unsigned Opc = I->getOpcode();
- switch (Opc) {
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::AShr:
- case Instruction::LShr:
- case Instruction::Shl:
- case Instruction::UDiv:
- case Instruction::URem: {
- Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
- Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
- Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
- break;
- }
- case Instruction::Trunc:
- case Instruction::ZExt:
- case Instruction::SExt:
- // If the source type of the cast is the type we're trying for then we can
- // just return the source. There's no need to insert it because it is not
- // new.
- if (I->getOperand(0)->getType() == Ty)
- return I->getOperand(0);
-
- // Otherwise, must be the same type of cast, so just reinsert a new one.
- // This also handles the case of zext(trunc(x)) -> zext(x).
- Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
- Opc == Instruction::SExt);
- break;
- case Instruction::Select: {
- Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
- Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
- Res = SelectInst::Create(I->getOperand(0), True, False);
- break;
- }
- case Instruction::PHI: {
- PHINode *OPN = cast<PHINode>(I);
- PHINode *NPN = PHINode::Create(Ty);
- for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
- Value *V =EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
- NPN->addIncoming(V, OPN->getIncomingBlock(i));
- }
- Res = NPN;
- break;
- }
- default:
- // TODO: Can handle more cases here.
- llvm_unreachable("Unreachable!");
- break;
- }
-
- Res->takeName(I);
- return InsertNewInstBefore(Res, *I);
-}
-
-
-/// This function is a wrapper around CastInst::isEliminableCastPair. It
-/// simply extracts arguments and returns what that function returns.
-static Instruction::CastOps
-isEliminableCastPair(
- const CastInst *CI, ///< The first cast instruction
- unsigned opcode, ///< The opcode of the second cast instruction
- const Type *DstTy, ///< The target type for the second cast instruction
- TargetData *TD ///< The target data for pointer size
-) {
-
- const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
- const Type *MidTy = CI->getType(); // B from above
-
- // Get the opcodes of the two Cast instructions
- Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
- Instruction::CastOps secondOp = Instruction::CastOps(opcode);
-
- unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
- DstTy,
- TD ? TD->getIntPtrType(CI->getContext()) : 0);
-
- // We don't want to form an inttoptr or ptrtoint that converts to an integer
- // type that differs from the pointer size.
- if ((Res == Instruction::IntToPtr &&
- (!TD || SrcTy != TD->getIntPtrType(CI->getContext()))) ||
- (Res == Instruction::PtrToInt &&
- (!TD || DstTy != TD->getIntPtrType(CI->getContext()))))
- Res = 0;
-
- return Instruction::CastOps(Res);
-}
-
-/// ShouldOptimizeCast - Return true if the cast from "V to Ty" actually
-/// results in any code being generated and is interesting to optimize out. If
-/// the cast can be eliminated by some other simple transformation, we prefer
-/// to do the simplification first.
-bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
- const Type *Ty) {
- // Noop casts and casts of constants should be eliminated trivially.
- if (V->getType() == Ty || isa<Constant>(V)) return false;
-
- // If this is another cast that can be eliminated, we prefer to have it
- // eliminated.
- if (const CastInst *CI = dyn_cast<CastInst>(V))
- if (isEliminableCastPair(CI, opc, Ty, TD))
- return false;
-
- // If this is a vector sext from a compare, then we don't want to break the
- // idiom where each element of the extended vector is either zero or all ones.
- if (opc == Instruction::SExt && isa<CmpInst>(V) && Ty->isVectorTy())
- return false;
-
- return true;
-}
-
-
-/// @brief Implement the transforms common to all CastInst visitors.
-Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
- Value *Src = CI.getOperand(0);
-
- // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
- // eliminate it now.
- if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
- if (Instruction::CastOps opc =
- isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
- // The first cast (CSrc) is eliminable so we need to fix up or replace
- // the second cast (CI). CSrc will then have a good chance of being dead.
- return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
- }
- }
-
- // If we are casting a select then fold the cast into the select
- if (SelectInst *SI = dyn_cast<SelectInst>(Src))
- if (Instruction *NV = FoldOpIntoSelect(CI, SI))
- return NV;
-
- // If we are casting a PHI then fold the cast into the PHI
- if (isa<PHINode>(Src)) {
- // We don't do this if this would create a PHI node with an illegal type if
- // it is currently legal.
- if (!Src->getType()->isIntegerTy() ||
- !CI.getType()->isIntegerTy() ||
- ShouldChangeType(CI.getType(), Src->getType()))
- if (Instruction *NV = FoldOpIntoPhi(CI))
- return NV;
- }
-
- return 0;
-}
-
-/// CanEvaluateTruncated - Return true if we can evaluate the specified
-/// expression tree as type Ty instead of its larger type, and arrive with the
-/// same value. This is used by code that tries to eliminate truncates.
-///
-/// Ty will always be a type smaller than V. We should return true if trunc(V)
-/// can be computed by computing V in the smaller type. If V is an instruction,
-/// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only
-/// makes sense if x and y can be efficiently truncated.
-///
-/// This function works on both vectors and scalars.
-///
-static bool CanEvaluateTruncated(Value *V, const Type *Ty) {
- // We can always evaluate constants in another type.
- if (isa<Constant>(V))
- return true;
-
- Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return false;
-
- const Type *OrigTy = V->getType();
-
- // If this is an extension from the dest type, we can eliminate it, even if it
- // has multiple uses.
- if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
- I->getOperand(0)->getType() == Ty)
- return true;
-
- // We can't extend or shrink something that has multiple uses: doing so would
- // require duplicating the instruction in general, which isn't profitable.
- if (!I->hasOneUse()) return false;
-
- unsigned Opc = I->getOpcode();
- switch (Opc) {
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- // These operators can all arbitrarily be extended or truncated.
- return CanEvaluateTruncated(I->getOperand(0), Ty) &&
- CanEvaluateTruncated(I->getOperand(1), Ty);
-
- case Instruction::UDiv:
- case Instruction::URem: {
- // UDiv and URem can be truncated if all the truncated bits are zero.
- uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
- uint32_t BitWidth = Ty->getScalarSizeInBits();
- if (BitWidth < OrigBitWidth) {
- APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth);
- if (MaskedValueIsZero(I->getOperand(0), Mask) &&
- MaskedValueIsZero(I->getOperand(1), Mask)) {
- return CanEvaluateTruncated(I->getOperand(0), Ty) &&
- CanEvaluateTruncated(I->getOperand(1), Ty);
- }
- }
- break;
- }
- case Instruction::Shl:
- // If we are truncating the result of this SHL, and if it's a shift of a
- // constant amount, we can always perform a SHL in a smaller type.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint32_t BitWidth = Ty->getScalarSizeInBits();
- if (CI->getLimitedValue(BitWidth) < BitWidth)
- return CanEvaluateTruncated(I->getOperand(0), Ty);
- }
- break;
- case Instruction::LShr:
- // If this is a truncate of a logical shr, we can truncate it to a smaller
- // lshr iff we know that the bits we would otherwise be shifting in are
- // already zeros.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
- uint32_t BitWidth = Ty->getScalarSizeInBits();
- if (MaskedValueIsZero(I->getOperand(0),
- APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
- CI->getLimitedValue(BitWidth) < BitWidth) {
- return CanEvaluateTruncated(I->getOperand(0), Ty);
- }
- }
- break;
- case Instruction::Trunc:
- // trunc(trunc(x)) -> trunc(x)
- return true;
- case Instruction::Select: {
- SelectInst *SI = cast<SelectInst>(I);
- return CanEvaluateTruncated(SI->getTrueValue(), Ty) &&
- CanEvaluateTruncated(SI->getFalseValue(), Ty);
- }
- case Instruction::PHI: {
- // We can change a phi if we can change all operands. Note that we never
- // get into trouble with cyclic PHIs here because we only consider
- // instructions with a single use.
- PHINode *PN = cast<PHINode>(I);
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (!CanEvaluateTruncated(PN->getIncomingValue(i), Ty))
- return false;
- return true;
- }
- default:
- // TODO: Can handle more cases here.
- break;
- }
-
- return false;
-}
-
-Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
- if (Instruction *Result = commonCastTransforms(CI))
- return Result;
-
- // See if we can simplify any instructions used by the input whose sole
- // purpose is to compute bits we don't care about.
- if (SimplifyDemandedInstructionBits(CI))
- return &CI;
-
- Value *Src = CI.getOperand(0);
- const Type *DestTy = CI.getType(), *SrcTy = Src->getType();
-
- // Attempt to truncate the entire input expression tree to the destination
- // type. Only do this if the dest type is a simple type, don't convert the
- // expression tree to something weird like i93 unless the source is also
- // strange.
- if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
- CanEvaluateTruncated(Src, DestTy)) {
-
- // If this cast is a truncate, evaluting in a different type always
- // eliminates the cast, so it is always a win.
- DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
- " to avoid cast: " << CI);
- Value *Res = EvaluateInDifferentType(Src, DestTy, false);
- assert(Res->getType() == DestTy);
- return ReplaceInstUsesWith(CI, Res);
- }
-
- // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector.
- if (DestTy->getScalarSizeInBits() == 1) {
- Constant *One = ConstantInt::get(Src->getType(), 1);
- Src = Builder->CreateAnd(Src, One, "tmp");
- Value *Zero = Constant::getNullValue(Src->getType());
- return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
- }
-
- return 0;
-}
-
-/// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
-/// in order to eliminate the icmp.
-Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
- bool DoXform) {
- // If we are just checking for a icmp eq of a single bit and zext'ing it
- // to an integer, then shift the bit to the appropriate place and then
- // cast to integer to avoid the comparison.
- if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
- const APInt &Op1CV = Op1C->getValue();
-
- // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
- // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
- if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
- (ICI->getPredicate() == ICmpInst::ICMP_SGT &&Op1CV.isAllOnesValue())) {
- if (!DoXform) return ICI;
-
- Value *In = ICI->getOperand(0);
- Value *Sh = ConstantInt::get(In->getType(),
- In->getType()->getScalarSizeInBits()-1);
- In = Builder->CreateLShr(In, Sh, In->getName()+".lobit");
- if (In->getType() != CI.getType())
- In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/, "tmp");
-
- if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
- Constant *One = ConstantInt::get(In->getType(), 1);
- In = Builder->CreateXor(In, One, In->getName()+".not");
- }
-
- return ReplaceInstUsesWith(CI, In);
- }
-
-
-
- // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
- // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
- // zext (X == 1) to i32 --> X iff X has only the low bit set.
- // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
- // zext (X != 0) to i32 --> X iff X has only the low bit set.
- // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
- // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
- // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
- if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
- // This only works for EQ and NE
- ICI->isEquality()) {
- // If Op1C some other power of two, convert:
- uint32_t BitWidth = Op1C->getType()->getBitWidth();
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- APInt TypeMask(APInt::getAllOnesValue(BitWidth));
- ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
-
- APInt KnownZeroMask(~KnownZero);
- if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
- if (!DoXform) return ICI;
-
- bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
- if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
- // (X&4) == 2 --> false
- // (X&4) != 2 --> true
- Constant *Res = ConstantInt::get(Type::getInt1Ty(CI.getContext()),
- isNE);
- Res = ConstantExpr::getZExt(Res, CI.getType());
- return ReplaceInstUsesWith(CI, Res);
- }
-
- uint32_t ShiftAmt = KnownZeroMask.logBase2();
- Value *In = ICI->getOperand(0);
- if (ShiftAmt) {
- // Perform a logical shr by shiftamt.
- // Insert the shift to put the result in the low bit.
- In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
- In->getName()+".lobit");
- }
-
- if ((Op1CV != 0) == isNE) { // Toggle the low bit.
- Constant *One = ConstantInt::get(In->getType(), 1);
- In = Builder->CreateXor(In, One, "tmp");
- }
-
- if (CI.getType() == In->getType())
- return ReplaceInstUsesWith(CI, In);
- else
- return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
- }
- }
- }
-
- // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
- // It is also profitable to transform icmp eq into not(xor(A, B)) because that
- // may lead to additional simplifications.
- if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
- if (const IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
- uint32_t BitWidth = ITy->getBitWidth();
- Value *LHS = ICI->getOperand(0);
- Value *RHS = ICI->getOperand(1);
-
- APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
- APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
- APInt TypeMask(APInt::getAllOnesValue(BitWidth));
- ComputeMaskedBits(LHS, TypeMask, KnownZeroLHS, KnownOneLHS);
- ComputeMaskedBits(RHS, TypeMask, KnownZeroRHS, KnownOneRHS);
-
- if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
- APInt KnownBits = KnownZeroLHS | KnownOneLHS;
- APInt UnknownBit = ~KnownBits;
- if (UnknownBit.countPopulation() == 1) {
- if (!DoXform) return ICI;
-
- Value *Result = Builder->CreateXor(LHS, RHS);
-
- // Mask off any bits that are set and won't be shifted away.
- if (KnownOneLHS.uge(UnknownBit))
- Result = Builder->CreateAnd(Result,
- ConstantInt::get(ITy, UnknownBit));
-
- // Shift the bit we're testing down to the lsb.
- Result = Builder->CreateLShr(
- Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
-
- if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
- Result = Builder->CreateXor(Result, ConstantInt::get(ITy, 1));
- Result->takeName(ICI);
- return ReplaceInstUsesWith(CI, Result);
- }
- }
- }
- }
-
- return 0;
-}
-
-/// CanEvaluateZExtd - Determine if the specified value can be computed in the
-/// specified wider type and produce the same low bits. If not, return false.
-///
-/// If this function returns true, it can also return a non-zero number of bits
-/// (in BitsToClear) which indicates that the value it computes is correct for
-/// the zero extend, but that the additional BitsToClear bits need to be zero'd
-/// out. For example, to promote something like:
-///
-/// %B = trunc i64 %A to i32
-/// %C = lshr i32 %B, 8
-/// %E = zext i32 %C to i64
-///
-/// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be
-/// set to 8 to indicate that the promoted value needs to have bits 24-31
-/// cleared in addition to bits 32-63. Since an 'and' will be generated to
-/// clear the top bits anyway, doing this has no extra cost.
-///
-/// This function works on both vectors and scalars.
-static bool CanEvaluateZExtd(Value *V, const Type *Ty, unsigned &BitsToClear) {
- BitsToClear = 0;
- if (isa<Constant>(V))
- return true;
-
- Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return false;
-
- // If the input is a truncate from the destination type, we can trivially
- // eliminate it, even if it has multiple uses.
- // FIXME: This is currently disabled until codegen can handle this without
- // pessimizing code, PR5997.
- if (0 && isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
- return true;
-
- // We can't extend or shrink something that has multiple uses: doing so would
- // require duplicating the instruction in general, which isn't profitable.
- if (!I->hasOneUse()) return false;
-
- unsigned Opc = I->getOpcode(), Tmp;
- switch (Opc) {
- case Instruction::ZExt: // zext(zext(x)) -> zext(x).
- case Instruction::SExt: // zext(sext(x)) -> sext(x).
- case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x)
- return true;
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- case Instruction::Shl:
- if (!CanEvaluateZExtd(I->getOperand(0), Ty, BitsToClear) ||
- !CanEvaluateZExtd(I->getOperand(1), Ty, Tmp))
- return false;
- // These can all be promoted if neither operand has 'bits to clear'.
- if (BitsToClear == 0 && Tmp == 0)
- return true;
-
- // If the operation is an AND/OR/XOR and the bits to clear are zero in the
- // other side, BitsToClear is ok.
- if (Tmp == 0 &&
- (Opc == Instruction::And || Opc == Instruction::Or ||
- Opc == Instruction::Xor)) {
- // We use MaskedValueIsZero here for generality, but the case we care
- // about the most is constant RHS.
- unsigned VSize = V->getType()->getScalarSizeInBits();
- if (MaskedValueIsZero(I->getOperand(1),
- APInt::getHighBitsSet(VSize, BitsToClear)))
- return true;
- }
-
- // Otherwise, we don't know how to analyze this BitsToClear case yet.
- return false;
-
- case Instruction::LShr:
- // We can promote lshr(x, cst) if we can promote x. This requires the
- // ultimate 'and' to clear out the high zero bits we're clearing out though.
- if (ConstantInt *Amt = dyn_cast<ConstantInt>(I->getOperand(1))) {
- if (!CanEvaluateZExtd(I->getOperand(0), Ty, BitsToClear))
- return false;
- BitsToClear += Amt->getZExtValue();
- if (BitsToClear > V->getType()->getScalarSizeInBits())
- BitsToClear = V->getType()->getScalarSizeInBits();
- return true;
- }
- // Cannot promote variable LSHR.
- return false;
- case Instruction::Select:
- if (!CanEvaluateZExtd(I->getOperand(1), Ty, Tmp) ||
- !CanEvaluateZExtd(I->getOperand(2), Ty, BitsToClear) ||
- // TODO: If important, we could handle the case when the BitsToClear are
- // known zero in the disagreeing side.
- Tmp != BitsToClear)
- return false;
- return true;
-
- case Instruction::PHI: {
- // We can change a phi if we can change all operands. Note that we never
- // get into trouble with cyclic PHIs here because we only consider
- // instructions with a single use.
- PHINode *PN = cast<PHINode>(I);
- if (!CanEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear))
- return false;
- for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
- if (!CanEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp) ||
- // TODO: If important, we could handle the case when the BitsToClear
- // are known zero in the disagreeing input.
- Tmp != BitsToClear)
- return false;
- return true;
- }
- default:
- // TODO: Can handle more cases here.
- return false;
- }
-}
-
-Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
- // If this zero extend is only used by a truncate, let the truncate by
- // eliminated before we try to optimize this zext.
- if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
- return 0;
-
- // If one of the common conversion will work, do it.
- if (Instruction *Result = commonCastTransforms(CI))
- return Result;
-
- // See if we can simplify any instructions used by the input whose sole
- // purpose is to compute bits we don't care about.
- if (SimplifyDemandedInstructionBits(CI))
- return &CI;
-
- Value *Src = CI.getOperand(0);
- const Type *SrcTy = Src->getType(), *DestTy = CI.getType();
-
- // Attempt to extend the entire input expression tree to the destination
- // type. Only do this if the dest type is a simple type, don't convert the
- // expression tree to something weird like i93 unless the source is also
- // strange.
- unsigned BitsToClear;
- if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
- CanEvaluateZExtd(Src, DestTy, BitsToClear)) {
- assert(BitsToClear < SrcTy->getScalarSizeInBits() &&
- "Unreasonable BitsToClear");
-
- // Okay, we can transform this! Insert the new expression now.
- DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
- " to avoid zero extend: " << CI);
- Value *Res = EvaluateInDifferentType(Src, DestTy, false);
- assert(Res->getType() == DestTy);
-
- uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
- uint32_t DestBitSize = DestTy->getScalarSizeInBits();
-
- // If the high bits are already filled with zeros, just replace this
- // cast with the result.
- if (MaskedValueIsZero(Res, APInt::getHighBitsSet(DestBitSize,
- DestBitSize-SrcBitsKept)))
- return ReplaceInstUsesWith(CI, Res);
-
- // We need to emit an AND to clear the high bits.
- Constant *C = ConstantInt::get(Res->getType(),
- APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
- return BinaryOperator::CreateAnd(Res, C);
- }
-
- // If this is a TRUNC followed by a ZEXT then we are dealing with integral
- // types and if the sizes are just right we can convert this into a logical
- // 'and' which will be much cheaper than the pair of casts.
- if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
- // TODO: Subsume this into EvaluateInDifferentType.
-
- // Get the sizes of the types involved. We know that the intermediate type
- // will be smaller than A or C, but don't know the relation between A and C.
- Value *A = CSrc->getOperand(0);
- unsigned SrcSize = A->getType()->getScalarSizeInBits();
- unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
- unsigned DstSize = CI.getType()->getScalarSizeInBits();
- // If we're actually extending zero bits, then if
- // SrcSize < DstSize: zext(a & mask)
- // SrcSize == DstSize: a & mask
- // SrcSize > DstSize: trunc(a) & mask
- if (SrcSize < DstSize) {
- APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
- Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
- Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
- return new ZExtInst(And, CI.getType());
- }
-
- if (SrcSize == DstSize) {
- APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
- return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
- AndValue));
- }
- if (SrcSize > DstSize) {
- Value *Trunc = Builder->CreateTrunc(A, CI.getType(), "tmp");
- APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
- return BinaryOperator::CreateAnd(Trunc,
- ConstantInt::get(Trunc->getType(),
- AndValue));
- }
- }
-
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
- return transformZExtICmp(ICI, CI);
-
- BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
- if (SrcI && SrcI->getOpcode() == Instruction::Or) {
- // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
- // of the (zext icmp) will be transformed.
- ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
- ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
- if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
- (transformZExtICmp(LHS, CI, false) ||
- transformZExtICmp(RHS, CI, false))) {
- Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName());
- Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName());
- return BinaryOperator::Create(Instruction::Or, LCast, RCast);
- }
- }
-
- // zext(trunc(t) & C) -> (t & zext(C)).
- if (SrcI && SrcI->getOpcode() == Instruction::And && SrcI->hasOneUse())
- if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
- if (TruncInst *TI = dyn_cast<TruncInst>(SrcI->getOperand(0))) {
- Value *TI0 = TI->getOperand(0);
- if (TI0->getType() == CI.getType())
- return
- BinaryOperator::CreateAnd(TI0,
- ConstantExpr::getZExt(C, CI.getType()));
- }
-
- // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
- if (SrcI && SrcI->getOpcode() == Instruction::Xor && SrcI->hasOneUse())
- if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
- if (BinaryOperator *And = dyn_cast<BinaryOperator>(SrcI->getOperand(0)))
- if (And->getOpcode() == Instruction::And && And->hasOneUse() &&
- And->getOperand(1) == C)
- if (TruncInst *TI = dyn_cast<TruncInst>(And->getOperand(0))) {
- Value *TI0 = TI->getOperand(0);
- if (TI0->getType() == CI.getType()) {
- Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
- Value *NewAnd = Builder->CreateAnd(TI0, ZC, "tmp");
- return BinaryOperator::CreateXor(NewAnd, ZC);
- }
- }
-
- // zext (xor i1 X, true) to i32 --> xor (zext i1 X to i32), 1
- Value *X;
- if (SrcI && SrcI->hasOneUse() && SrcI->getType()->isIntegerTy(1) &&
- match(SrcI, m_Not(m_Value(X))) &&
- (!X->hasOneUse() || !isa<CmpInst>(X))) {
- Value *New = Builder->CreateZExt(X, CI.getType());
- return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1));
- }
-
- return 0;
-}
-
-/// CanEvaluateSExtd - Return true if we can take the specified value
-/// and return it as type Ty without inserting any new casts and without
-/// changing the value of the common low bits. This is used by code that tries
-/// to promote integer operations to a wider types will allow us to eliminate
-/// the extension.
-///
-/// This function works on both vectors and scalars.
-///
-static bool CanEvaluateSExtd(Value *V, const Type *Ty) {
- assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
- "Can't sign extend type to a smaller type");
- // If this is a constant, it can be trivially promoted.
- if (isa<Constant>(V))
- return true;
-
- Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return false;
-
- // If this is a truncate from the dest type, we can trivially eliminate it,
- // even if it has multiple uses.
- // FIXME: This is currently disabled until codegen can handle this without
- // pessimizing code, PR5997.
- if (0 && isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
- return true;
-
- // We can't extend or shrink something that has multiple uses: doing so would
- // require duplicating the instruction in general, which isn't profitable.
- if (!I->hasOneUse()) return false;
-
- switch (I->getOpcode()) {
- case Instruction::SExt: // sext(sext(x)) -> sext(x)
- case Instruction::ZExt: // sext(zext(x)) -> zext(x)
- case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x)
- return true;
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- // These operators can all arbitrarily be extended if their inputs can.
- return CanEvaluateSExtd(I->getOperand(0), Ty) &&
- CanEvaluateSExtd(I->getOperand(1), Ty);
-
- //case Instruction::Shl: TODO
- //case Instruction::LShr: TODO
-
- case Instruction::Select:
- return CanEvaluateSExtd(I->getOperand(1), Ty) &&
- CanEvaluateSExtd(I->getOperand(2), Ty);
-
- case Instruction::PHI: {
- // We can change a phi if we can change all operands. Note that we never
- // get into trouble with cyclic PHIs here because we only consider
- // instructions with a single use.
- PHINode *PN = cast<PHINode>(I);
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (!CanEvaluateSExtd(PN->getIncomingValue(i), Ty)) return false;
- return true;
- }
- default:
- // TODO: Can handle more cases here.
- break;
- }
-
- return false;
-}
-
-Instruction *InstCombiner::visitSExt(SExtInst &CI) {
- // If this sign extend is only used by a truncate, let the truncate by
- // eliminated before we try to optimize this zext.
- if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
- return 0;
-
- if (Instruction *I = commonCastTransforms(CI))
- return I;
-
- // See if we can simplify any instructions used by the input whose sole
- // purpose is to compute bits we don't care about.
- if (SimplifyDemandedInstructionBits(CI))
- return &CI;
-
- Value *Src = CI.getOperand(0);
- const Type *SrcTy = Src->getType(), *DestTy = CI.getType();
-
- // Attempt to extend the entire input expression tree to the destination
- // type. Only do this if the dest type is a simple type, don't convert the
- // expression tree to something weird like i93 unless the source is also
- // strange.
- if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
- CanEvaluateSExtd(Src, DestTy)) {
- // Okay, we can transform this! Insert the new expression now.
- DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
- " to avoid sign extend: " << CI);
- Value *Res = EvaluateInDifferentType(Src, DestTy, true);
- assert(Res->getType() == DestTy);
-
- uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
- uint32_t DestBitSize = DestTy->getScalarSizeInBits();
-
- // If the high bits are already filled with sign bit, just replace this
- // cast with the result.
- if (ComputeNumSignBits(Res) > DestBitSize - SrcBitSize)
- return ReplaceInstUsesWith(CI, Res);
-
- // We need to emit a shl + ashr to do the sign extend.
- Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
- return BinaryOperator::CreateAShr(Builder->CreateShl(Res, ShAmt, "sext"),
- ShAmt);
- }
-
- // If this input is a trunc from our destination, then turn sext(trunc(x))
- // into shifts.
- if (TruncInst *TI = dyn_cast<TruncInst>(Src))
- if (TI->hasOneUse() && TI->getOperand(0)->getType() == DestTy) {
- uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
- uint32_t DestBitSize = DestTy->getScalarSizeInBits();
-
- // We need to emit a shl + ashr to do the sign extend.
- Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
- Value *Res = Builder->CreateShl(TI->getOperand(0), ShAmt, "sext");
- return BinaryOperator::CreateAShr(Res, ShAmt);
- }
-
-
- // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
- // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
- {
- ICmpInst::Predicate Pred; Value *CmpLHS; ConstantInt *CmpRHS;
- if (match(Src, m_ICmp(Pred, m_Value(CmpLHS), m_ConstantInt(CmpRHS)))) {
- // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
- // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
- if ((Pred == ICmpInst::ICMP_SLT && CmpRHS->isZero()) ||
- (Pred == ICmpInst::ICMP_SGT && CmpRHS->isAllOnesValue())) {
- Value *Sh = ConstantInt::get(CmpLHS->getType(),
- CmpLHS->getType()->getScalarSizeInBits()-1);
- Value *In = Builder->CreateAShr(CmpLHS, Sh, CmpLHS->getName()+".lobit");
- if (In->getType() != CI.getType())
- In = Builder->CreateIntCast(In, CI.getType(), true/*SExt*/, "tmp");
-
- if (Pred == ICmpInst::ICMP_SGT)
- In = Builder->CreateNot(In, In->getName()+".not");
- return ReplaceInstUsesWith(CI, In);
- }
- }
- }
-
-
- // If the input is a shl/ashr pair of a same constant, then this is a sign
- // extension from a smaller value. If we could trust arbitrary bitwidth
- // integers, we could turn this into a truncate to the smaller bit and then
- // use a sext for the whole extension. Since we don't, look deeper and check
- // for a truncate. If the source and dest are the same type, eliminate the
- // trunc and extend and just do shifts. For example, turn:
- // %a = trunc i32 %i to i8
- // %b = shl i8 %a, 6
- // %c = ashr i8 %b, 6
- // %d = sext i8 %c to i32
- // into:
- // %a = shl i32 %i, 30
- // %d = ashr i32 %a, 30
- Value *A = 0;
- // TODO: Eventually this could be subsumed by EvaluateInDifferentType.
- ConstantInt *BA = 0, *CA = 0;
- if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)),
- m_ConstantInt(CA))) &&
- BA == CA && A->getType() == CI.getType()) {
- unsigned MidSize = Src->getType()->getScalarSizeInBits();
- unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
- unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
- Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
- A = Builder->CreateShl(A, ShAmtV, CI.getName());
- return BinaryOperator::CreateAShr(A, ShAmtV);
- }
-
- return 0;
-}
-
-
-/// FitsInFPType - Return a Constant* for the specified FP constant if it fits
-/// in the specified FP type without changing its value.
-static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
- bool losesInfo;
- APFloat F = CFP->getValueAPF();
- (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
- if (!losesInfo)
- return ConstantFP::get(CFP->getContext(), F);
- return 0;
-}
-
-/// LookThroughFPExtensions - If this is an fp extension instruction, look
-/// through it until we get the source value.
-static Value *LookThroughFPExtensions(Value *V) {
- if (Instruction *I = dyn_cast<Instruction>(V))
- if (I->getOpcode() == Instruction::FPExt)
- return LookThroughFPExtensions(I->getOperand(0));
-
- // If this value is a constant, return the constant in the smallest FP type
- // that can accurately represent it. This allows us to turn
- // (float)((double)X+2.0) into x+2.0f.
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
- if (CFP->getType() == Type::getPPC_FP128Ty(V->getContext()))
- return V; // No constant folding of this.
- // See if the value can be truncated to float and then reextended.
- if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle))
- return V;
- if (CFP->getType()->isDoubleTy())
- return V; // Won't shrink.
- if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble))
- return V;
- // Don't try to shrink to various long double types.
- }
-
- return V;
-}
-
-Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
- if (Instruction *I = commonCastTransforms(CI))
- return I;
-
- // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
- // smaller than the destination type, we can eliminate the truncate by doing
- // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well
- // as many builtins (sqrt, etc).
- BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
- if (OpI && OpI->hasOneUse()) {
- switch (OpI->getOpcode()) {
- default: break;
- case Instruction::FAdd:
- case Instruction::FSub:
- case Instruction::FMul:
- case Instruction::FDiv:
- case Instruction::FRem:
- const Type *SrcTy = OpI->getType();
- Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
- Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
- if (LHSTrunc->getType() != SrcTy &&
- RHSTrunc->getType() != SrcTy) {
- unsigned DstSize = CI.getType()->getScalarSizeInBits();
- // If the source types were both smaller than the destination type of
- // the cast, do this xform.
- if (LHSTrunc->getType()->getScalarSizeInBits() <= DstSize &&
- RHSTrunc->getType()->getScalarSizeInBits() <= DstSize) {
- LHSTrunc = Builder->CreateFPExt(LHSTrunc, CI.getType());
- RHSTrunc = Builder->CreateFPExt(RHSTrunc, CI.getType());
- return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
- }
- }
- break;
- }
- }
- return 0;
-}
-
-Instruction *InstCombiner::visitFPExt(CastInst &CI) {
- return commonCastTransforms(CI);
-}
-
-Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
- Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
- if (OpI == 0)
- return commonCastTransforms(FI);
-
- // fptoui(uitofp(X)) --> X
- // fptoui(sitofp(X)) --> X
- // This is safe if the intermediate type has enough bits in its mantissa to
- // accurately represent all values of X. For example, do not do this with
- // i64->float->i64. This is also safe for sitofp case, because any negative
- // 'X' value would cause an undefined result for the fptoui.
- if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
- OpI->getOperand(0)->getType() == FI.getType() &&
- (int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
- OpI->getType()->getFPMantissaWidth())
- return ReplaceInstUsesWith(FI, OpI->getOperand(0));
-
- return commonCastTransforms(FI);
-}
-
-Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
- Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
- if (OpI == 0)
- return commonCastTransforms(FI);
-
- // fptosi(sitofp(X)) --> X
- // fptosi(uitofp(X)) --> X
- // This is safe if the intermediate type has enough bits in its mantissa to
- // accurately represent all values of X. For example, do not do this with
- // i64->float->i64. This is also safe for sitofp case, because any negative
- // 'X' value would cause an undefined result for the fptoui.
- if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
- OpI->getOperand(0)->getType() == FI.getType() &&
- (int)FI.getType()->getScalarSizeInBits() <=
- OpI->getType()->getFPMantissaWidth())
- return ReplaceInstUsesWith(FI, OpI->getOperand(0));
-
- return commonCastTransforms(FI);
-}
-
-Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
- return commonCastTransforms(CI);
-}
-
-Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
- return commonCastTransforms(CI);
-}
-
-Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
- // If the source integer type is not the intptr_t type for this target, do a
- // trunc or zext to the intptr_t type, then inttoptr of it. This allows the
- // cast to be exposed to other transforms.
- if (TD) {
- if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
- TD->getPointerSizeInBits()) {
- Value *P = Builder->CreateTrunc(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()), "tmp");
- return new IntToPtrInst(P, CI.getType());
- }
- if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
- TD->getPointerSizeInBits()) {
- Value *P = Builder->CreateZExt(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()), "tmp");
- return new IntToPtrInst(P, CI.getType());
- }
- }
-
- if (Instruction *I = commonCastTransforms(CI))
- return I;
-
- return 0;
-}
-
-/// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
-Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
- Value *Src = CI.getOperand(0);
-
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
- // If casting the result of a getelementptr instruction with no offset, turn
- // this into a cast of the original pointer!
- if (GEP->hasAllZeroIndices()) {
- // Changing the cast operand is usually not a good idea but it is safe
- // here because the pointer operand is being replaced with another
- // pointer operand so the opcode doesn't need to change.
- Worklist.Add(GEP);
- CI.setOperand(0, GEP->getOperand(0));
- return &CI;
- }
-
- // If the GEP has a single use, and the base pointer is a bitcast, and the
- // GEP computes a constant offset, see if we can convert these three
- // instructions into fewer. This typically happens with unions and other
- // non-type-safe code.
- if (TD && GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0)) &&
- GEP->hasAllConstantIndices()) {
- // We are guaranteed to get a constant from EmitGEPOffset.
- ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(GEP));
- int64_t Offset = OffsetV->getSExtValue();
-
- // Get the base pointer input of the bitcast, and the type it points to.
- Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
- const Type *GEPIdxTy =
- cast<PointerType>(OrigBase->getType())->getElementType();
- SmallVector<Value*, 8> NewIndices;
- if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices)) {
- // If we were able to index down into an element, create the GEP
- // and bitcast the result. This eliminates one bitcast, potentially
- // two.
- Value *NGEP = cast<GEPOperator>(GEP)->isInBounds() ?
- Builder->CreateInBoundsGEP(OrigBase,
- NewIndices.begin(), NewIndices.end()) :
- Builder->CreateGEP(OrigBase, NewIndices.begin(), NewIndices.end());
- NGEP->takeName(GEP);
-
- if (isa<BitCastInst>(CI))
- return new BitCastInst(NGEP, CI.getType());
- assert(isa<PtrToIntInst>(CI));
- return new PtrToIntInst(NGEP, CI.getType());
- }
- }
- }
-
- return commonCastTransforms(CI);
-}
-
-Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
- // If the destination integer type is not the intptr_t type for this target,
- // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
- // to be exposed to other transforms.
- if (TD) {
- if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
- Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()),
- "tmp");
- return new TruncInst(P, CI.getType());
- }
- if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits()) {
- Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()),
- "tmp");
- return new ZExtInst(P, CI.getType());
- }
- }
-
- return commonPointerCastTransforms(CI);
-}
-
-Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
- // If the operands are integer typed then apply the integer transforms,
- // otherwise just apply the common ones.
- Value *Src = CI.getOperand(0);
- const Type *SrcTy = Src->getType();
- const Type *DestTy = CI.getType();
-
- // Get rid of casts from one type to the same type. These are useless and can
- // be replaced by the operand.
- if (DestTy == Src->getType())
- return ReplaceInstUsesWith(CI, Src);
-
- if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
- const PointerType *SrcPTy = cast<PointerType>(SrcTy);
- const Type *DstElTy = DstPTy->getElementType();
- const Type *SrcElTy = SrcPTy->getElementType();
-
- // If the address spaces don't match, don't eliminate the bitcast, which is
- // required for changing types.
- if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
- return 0;
-
- // If we are casting a alloca to a pointer to a type of the same
- // size, rewrite the allocation instruction to allocate the "right" type.
- // There is no need to modify malloc calls because it is their bitcast that
- // needs to be cleaned up.
- if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
- if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
- return V;
-
- // If the source and destination are pointers, and this cast is equivalent
- // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
- // This can enhance SROA and other transforms that want type-safe pointers.
- Constant *ZeroUInt =
- Constant::getNullValue(Type::getInt32Ty(CI.getContext()));
- unsigned NumZeros = 0;
- while (SrcElTy != DstElTy &&
- isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() &&
- SrcElTy->getNumContainedTypes() /* not "{}" */) {
- SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
- ++NumZeros;
- }
-
- // If we found a path from the src to dest, create the getelementptr now.
- if (SrcElTy == DstElTy) {
- SmallVector<Value*, 8> Idxs(NumZeros+1, ZeroUInt);
- return GetElementPtrInst::CreateInBounds(Src, Idxs.begin(), Idxs.end(),"",
- ((Instruction*)NULL));
- }
- }
-
- if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
- if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
- Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
- return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
- Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
- // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
- }
- }
-
- if (const VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
- if (SrcVTy->getNumElements() == 1 && !DestTy->isVectorTy()) {
- Value *Elem =
- Builder->CreateExtractElement(Src,
- Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
- return CastInst::Create(Instruction::BitCast, Elem, DestTy);
- }
- }
-
- if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
- // Okay, we have (bitcast (shuffle ..)). Check to see if this is
- // a bitconvert to a vector with the same # elts.
- if (SVI->hasOneUse() && DestTy->isVectorTy() &&
- cast<VectorType>(DestTy)->getNumElements() ==
- SVI->getType()->getNumElements() &&
- SVI->getType()->getNumElements() ==
- cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements()) {
- BitCastInst *Tmp;
- // If either of the operands is a cast from CI.getType(), then
- // evaluating the shuffle in the casted destination's type will allow
- // us to eliminate at least one cast.
- if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) &&
- Tmp->getOperand(0)->getType() == DestTy) ||
- ((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
- Tmp->getOperand(0)->getType() == DestTy)) {
- Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
- Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
- // Return a new shuffle vector. Use the same element ID's, as we
- // know the vector types match #elts.
- return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
- }
- }
- }
-
- if (SrcTy->isPointerTy())
- return commonPointerCastTransforms(CI);
- return commonCastTransforms(CI);
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
deleted file mode 100644
index 72fd558..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ /dev/null
@@ -1,2476 +0,0 @@
-//===- InstCombineCompares.cpp --------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visitICmp and visitFCmp functions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
-using namespace llvm;
-using namespace PatternMatch;
-
-/// AddOne - Add one to a ConstantInt
-static Constant *AddOne(Constant *C) {
- return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
-}
-/// SubOne - Subtract one from a ConstantInt
-static Constant *SubOne(ConstantInt *C) {
- return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
-}
-
-static ConstantInt *ExtractElement(Constant *V, Constant *Idx) {
- return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
-}
-
-static bool HasAddOverflow(ConstantInt *Result,
- ConstantInt *In1, ConstantInt *In2,
- bool IsSigned) {
- if (IsSigned)
- if (In2->getValue().isNegative())
- return Result->getValue().sgt(In1->getValue());
- else
- return Result->getValue().slt(In1->getValue());
- else
- return Result->getValue().ult(In1->getValue());
-}
-
-/// AddWithOverflow - Compute Result = In1+In2, returning true if the result
-/// overflowed for this type.
-static bool AddWithOverflow(Constant *&Result, Constant *In1,
- Constant *In2, bool IsSigned = false) {
- Result = ConstantExpr::getAdd(In1, In2);
-
- if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
- if (HasAddOverflow(ExtractElement(Result, Idx),
- ExtractElement(In1, Idx),
- ExtractElement(In2, Idx),
- IsSigned))
- return true;
- }
- return false;
- }
-
- return HasAddOverflow(cast<ConstantInt>(Result),
- cast<ConstantInt>(In1), cast<ConstantInt>(In2),
- IsSigned);
-}
-
-static bool HasSubOverflow(ConstantInt *Result,
- ConstantInt *In1, ConstantInt *In2,
- bool IsSigned) {
- if (IsSigned)
- if (In2->getValue().isNegative())
- return Result->getValue().slt(In1->getValue());
- else
- return Result->getValue().sgt(In1->getValue());
- else
- return Result->getValue().ugt(In1->getValue());
-}
-
-/// SubWithOverflow - Compute Result = In1-In2, returning true if the result
-/// overflowed for this type.
-static bool SubWithOverflow(Constant *&Result, Constant *In1,
- Constant *In2, bool IsSigned = false) {
- Result = ConstantExpr::getSub(In1, In2);
-
- if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
- for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
- Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
- if (HasSubOverflow(ExtractElement(Result, Idx),
- ExtractElement(In1, Idx),
- ExtractElement(In2, Idx),
- IsSigned))
- return true;
- }
- return false;
- }
-
- return HasSubOverflow(cast<ConstantInt>(Result),
- cast<ConstantInt>(In1), cast<ConstantInt>(In2),
- IsSigned);
-}
-
-/// isSignBitCheck - Given an exploded icmp instruction, return true if the
-/// comparison only checks the sign bit. If it only checks the sign bit, set
-/// TrueIfSigned if the result of the comparison is true when the input value is
-/// signed.
-static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS,
- bool &TrueIfSigned) {
- switch (pred) {
- case ICmpInst::ICMP_SLT: // True if LHS s< 0
- TrueIfSigned = true;
- return RHS->isZero();
- case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
- TrueIfSigned = true;
- return RHS->isAllOnesValue();
- case ICmpInst::ICMP_SGT: // True if LHS s> -1
- TrueIfSigned = false;
- return RHS->isAllOnesValue();
- case ICmpInst::ICMP_UGT:
- // True if LHS u> RHS and RHS == high-bit-mask - 1
- TrueIfSigned = true;
- return RHS->getValue() ==
- APInt::getSignedMaxValue(RHS->getType()->getPrimitiveSizeInBits());
- case ICmpInst::ICMP_UGE:
- // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
- TrueIfSigned = true;
- return RHS->getValue().isSignBit();
- default:
- return false;
- }
-}
-
-// isHighOnes - Return true if the constant is of the form 1+0+.
-// This is the same as lowones(~X).
-static bool isHighOnes(const ConstantInt *CI) {
- return (~CI->getValue() + 1).isPowerOf2();
-}
-
-/// ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
-/// set of known zero and one bits, compute the maximum and minimum values that
-/// could have the specified known zero and known one bits, returning them in
-/// min/max.
-static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero,
- const APInt& KnownOne,
- APInt& Min, APInt& Max) {
- assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
- KnownZero.getBitWidth() == Min.getBitWidth() &&
- KnownZero.getBitWidth() == Max.getBitWidth() &&
- "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
- APInt UnknownBits = ~(KnownZero|KnownOne);
-
- // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
- // bit if it is unknown.
- Min = KnownOne;
- Max = KnownOne|UnknownBits;
-
- if (UnknownBits.isNegative()) { // Sign bit is unknown
- Min.set(Min.getBitWidth()-1);
- Max.clear(Max.getBitWidth()-1);
- }
-}
-
-// ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
-// a set of known zero and one bits, compute the maximum and minimum values that
-// could have the specified known zero and known one bits, returning them in
-// min/max.
-static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
- const APInt &KnownOne,
- APInt &Min, APInt &Max) {
- assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
- KnownZero.getBitWidth() == Min.getBitWidth() &&
- KnownZero.getBitWidth() == Max.getBitWidth() &&
- "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
- APInt UnknownBits = ~(KnownZero|KnownOne);
-
- // The minimum value is when the unknown bits are all zeros.
- Min = KnownOne;
- // The maximum value is when the unknown bits are all ones.
- Max = KnownOne|UnknownBits;
-}
-
-
-
-/// FoldCmpLoadFromIndexedGlobal - Called we see this pattern:
-/// cmp pred (load (gep GV, ...)), cmpcst
-/// where GV is a global variable with a constant initializer. Try to simplify
-/// this into some simple computation that does not need the load. For example
-/// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
-///
-/// If AndCst is non-null, then the loaded value is masked with that constant
-/// before doing the comparison. This handles cases like "A[i]&4 == 0".
-Instruction *InstCombiner::
-FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
- CmpInst &ICI, ConstantInt *AndCst) {
- // We need TD information to know the pointer size unless this is inbounds.
- if (!GEP->isInBounds() && TD == 0) return 0;
-
- ConstantArray *Init = dyn_cast<ConstantArray>(GV->getInitializer());
- if (Init == 0 || Init->getNumOperands() > 1024) return 0;
-
- // There are many forms of this optimization we can handle, for now, just do
- // the simple index into a single-dimensional array.
- //
- // Require: GEP GV, 0, i {{, constant indices}}
- if (GEP->getNumOperands() < 3 ||
- !isa<ConstantInt>(GEP->getOperand(1)) ||
- !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
- isa<Constant>(GEP->getOperand(2)))
- return 0;
-
- // Check that indices after the variable are constants and in-range for the
- // type they index. Collect the indices. This is typically for arrays of
- // structs.
- SmallVector<unsigned, 4> LaterIndices;
-
- const Type *EltTy = cast<ArrayType>(Init->getType())->getElementType();
- for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
- ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (Idx == 0) return 0; // Variable index.
-
- uint64_t IdxVal = Idx->getZExtValue();
- if ((unsigned)IdxVal != IdxVal) return 0; // Too large array index.
-
- if (const StructType *STy = dyn_cast<StructType>(EltTy))
- EltTy = STy->getElementType(IdxVal);
- else if (const ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
- if (IdxVal >= ATy->getNumElements()) return 0;
- EltTy = ATy->getElementType();
- } else {
- return 0; // Unknown type.
- }
-
- LaterIndices.push_back(IdxVal);
- }
-
- enum { Overdefined = -3, Undefined = -2 };
-
- // Variables for our state machines.
-
- // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
- // "i == 47 | i == 87", where 47 is the first index the condition is true for,
- // and 87 is the second (and last) index. FirstTrueElement is -2 when
- // undefined, otherwise set to the first true element. SecondTrueElement is
- // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
- int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
-
- // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
- // form "i != 47 & i != 87". Same state transitions as for true elements.
- int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
-
- /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
- /// define a state machine that triggers for ranges of values that the index
- /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
- /// This is -2 when undefined, -3 when overdefined, and otherwise the last
- /// index in the range (inclusive). We use -2 for undefined here because we
- /// use relative comparisons and don't want 0-1 to match -1.
- int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
-
- // MagicBitvector - This is a magic bitvector where we set a bit if the
- // comparison is true for element 'i'. If there are 64 elements or less in
- // the array, this will fully represent all the comparison results.
- uint64_t MagicBitvector = 0;
-
-
- // Scan the array and see if one of our patterns matches.
- Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
- for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
- Constant *Elt = Init->getOperand(i);
-
- // If this is indexing an array of structures, get the structure element.
- if (!LaterIndices.empty())
- Elt = ConstantExpr::getExtractValue(Elt, LaterIndices.data(),
- LaterIndices.size());
-
- // If the element is masked, handle it.
- if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
-
- // Find out if the comparison would be true or false for the i'th element.
- Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
- CompareRHS, TD);
- // If the result is undef for this element, ignore it.
- if (isa<UndefValue>(C)) {
- // Extend range state machines to cover this element in case there is an
- // undef in the middle of the range.
- if (TrueRangeEnd == (int)i-1)
- TrueRangeEnd = i;
- if (FalseRangeEnd == (int)i-1)
- FalseRangeEnd = i;
- continue;
- }
-
- // If we can't compute the result for any of the elements, we have to give
- // up evaluating the entire conditional.
- if (!isa<ConstantInt>(C)) return 0;
-
- // Otherwise, we know if the comparison is true or false for this element,
- // update our state machines.
- bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
-
- // State machine for single/double/range index comparison.
- if (IsTrueForElt) {
- // Update the TrueElement state machine.
- if (FirstTrueElement == Undefined)
- FirstTrueElement = TrueRangeEnd = i; // First true element.
- else {
- // Update double-compare state machine.
- if (SecondTrueElement == Undefined)
- SecondTrueElement = i;
- else
- SecondTrueElement = Overdefined;
-
- // Update range state machine.
- if (TrueRangeEnd == (int)i-1)
- TrueRangeEnd = i;
- else
- TrueRangeEnd = Overdefined;
- }
- } else {
- // Update the FalseElement state machine.
- if (FirstFalseElement == Undefined)
- FirstFalseElement = FalseRangeEnd = i; // First false element.
- else {
- // Update double-compare state machine.
- if (SecondFalseElement == Undefined)
- SecondFalseElement = i;
- else
- SecondFalseElement = Overdefined;
-
- // Update range state machine.
- if (FalseRangeEnd == (int)i-1)
- FalseRangeEnd = i;
- else
- FalseRangeEnd = Overdefined;
- }
- }
-
-
- // If this element is in range, update our magic bitvector.
- if (i < 64 && IsTrueForElt)
- MagicBitvector |= 1ULL << i;
-
- // If all of our states become overdefined, bail out early. Since the
- // predicate is expensive, only check it every 8 elements. This is only
- // really useful for really huge arrays.
- if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
- SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
- FalseRangeEnd == Overdefined)
- return 0;
- }
-
- // Now that we've scanned the entire array, emit our new comparison(s). We
- // order the state machines in complexity of the generated code.
- Value *Idx = GEP->getOperand(2);
-
- // If the index is larger than the pointer size of the target, truncate the
- // index down like the GEP would do implicitly. We don't have to do this for
- // an inbounds GEP because the index can't be out of range.
- if (!GEP->isInBounds() &&
- Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits())
- Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext()));
-
- // If the comparison is only true for one or two elements, emit direct
- // comparisons.
- if (SecondTrueElement != Overdefined) {
- // None true -> false.
- if (FirstTrueElement == Undefined)
- return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(GEP->getContext()));
-
- Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
-
- // True for one element -> 'i == 47'.
- if (SecondTrueElement == Undefined)
- return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
-
- // True for two elements -> 'i == 47 | i == 72'.
- Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx);
- Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
- Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx);
- return BinaryOperator::CreateOr(C1, C2);
- }
-
- // If the comparison is only false for one or two elements, emit direct
- // comparisons.
- if (SecondFalseElement != Overdefined) {
- // None false -> true.
- if (FirstFalseElement == Undefined)
- return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(GEP->getContext()));
-
- Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
-
- // False for one element -> 'i != 47'.
- if (SecondFalseElement == Undefined)
- return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
-
- // False for two elements -> 'i != 47 & i != 72'.
- Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx);
- Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
- Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx);
- return BinaryOperator::CreateAnd(C1, C2);
- }
-
- // If the comparison can be replaced with a range comparison for the elements
- // where it is true, emit the range check.
- if (TrueRangeEnd != Overdefined) {
- assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
-
- // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
- if (FirstTrueElement) {
- Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
- Idx = Builder->CreateAdd(Idx, Offs);
- }
-
- Value *End = ConstantInt::get(Idx->getType(),
- TrueRangeEnd-FirstTrueElement+1);
- return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
- }
-
- // False range check.
- if (FalseRangeEnd != Overdefined) {
- assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
- // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
- if (FirstFalseElement) {
- Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
- Idx = Builder->CreateAdd(Idx, Offs);
- }
-
- Value *End = ConstantInt::get(Idx->getType(),
- FalseRangeEnd-FirstFalseElement);
- return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
- }
-
-
- // If a 32-bit or 64-bit magic bitvector captures the entire comparison state
- // of this load, replace it with computation that does:
- // ((magic_cst >> i) & 1) != 0
- if (Init->getNumOperands() <= 32 ||
- (TD && Init->getNumOperands() <= 64 && TD->isLegalInteger(64))) {
- const Type *Ty;
- if (Init->getNumOperands() <= 32)
- Ty = Type::getInt32Ty(Init->getContext());
- else
- Ty = Type::getInt64Ty(Init->getContext());
- Value *V = Builder->CreateIntCast(Idx, Ty, false);
- V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
- V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
- return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
- }
-
- return 0;
-}
-
-
-/// EvaluateGEPOffsetExpression - Return a value that can be used to compare
-/// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
-/// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
-/// be complex, and scales are involved. The above expression would also be
-/// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
-/// This later form is less amenable to optimization though, and we are allowed
-/// to generate the first by knowing that pointer arithmetic doesn't overflow.
-///
-/// If we can't emit an optimized form for this expression, this returns null.
-///
-static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
- InstCombiner &IC) {
- TargetData &TD = *IC.getTargetData();
- gep_type_iterator GTI = gep_type_begin(GEP);
-
- // Check to see if this gep only has a single variable index. If so, and if
- // any constant indices are a multiple of its scale, then we can compute this
- // in terms of the scale of the variable index. For example, if the GEP
- // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
- // because the expression will cross zero at the same point.
- unsigned i, e = GEP->getNumOperands();
- int64_t Offset = 0;
- for (i = 1; i != e; ++i, ++GTI) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
- // Compute the aggregate offset of constant indices.
- if (CI->isZero()) continue;
-
- // Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
- } else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
- Offset += Size*CI->getSExtValue();
- }
- } else {
- // Found our variable index.
- break;
- }
- }
-
- // If there are no variable indices, we must have a constant offset, just
- // evaluate it the general way.
- if (i == e) return 0;
-
- Value *VariableIdx = GEP->getOperand(i);
- // Determine the scale factor of the variable element. For example, this is
- // 4 if the variable index is into an array of i32.
- uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
-
- // Verify that there are no other variable indices. If so, emit the hard way.
- for (++i, ++GTI; i != e; ++i, ++GTI) {
- ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (!CI) return 0;
-
- // Compute the aggregate offset of constant indices.
- if (CI->isZero()) continue;
-
- // Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
- } else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
- Offset += Size*CI->getSExtValue();
- }
- }
-
- // Okay, we know we have a single variable index, which must be a
- // pointer/array/vector index. If there is no offset, life is simple, return
- // the index.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
- if (Offset == 0) {
- // Cast to intptrty in case a truncation occurs. If an extension is needed,
- // we don't need to bother extending: the extension won't affect where the
- // computation crosses zero.
- if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
- VariableIdx = new TruncInst(VariableIdx,
- TD.getIntPtrType(VariableIdx->getContext()),
- VariableIdx->getName(), &I);
- return VariableIdx;
- }
-
- // Otherwise, there is an index. The computation we will do will be modulo
- // the pointer size, so get it.
- uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
-
- Offset &= PtrSizeMask;
- VariableScale &= PtrSizeMask;
-
- // To do this transformation, any constant index must be a multiple of the
- // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
- // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
- // multiple of the variable scale.
- int64_t NewOffs = Offset / (int64_t)VariableScale;
- if (Offset != NewOffs*(int64_t)VariableScale)
- return 0;
-
- // Okay, we can do this evaluation. Start by converting the index to intptr.
- const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
- if (VariableIdx->getType() != IntPtrTy)
- VariableIdx = CastInst::CreateIntegerCast(VariableIdx, IntPtrTy,
- true /*SExt*/,
- VariableIdx->getName(), &I);
- Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
- return BinaryOperator::CreateAdd(VariableIdx, OffsetVal, "offset", &I);
-}
-
-/// FoldGEPICmp - Fold comparisons between a GEP instruction and something
-/// else. At this point we know that the GEP is on the LHS of the comparison.
-Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
- ICmpInst::Predicate Cond,
- Instruction &I) {
- // Look through bitcasts.
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
- RHS = BCI->getOperand(0);
-
- Value *PtrBase = GEPLHS->getOperand(0);
- if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {
- // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
- // This transformation (ignoring the base and scales) is valid because we
- // know pointers can't overflow since the gep is inbounds. See if we can
- // output an optimized form.
- Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, I, *this);
-
- // If not, synthesize the offset the hard way.
- if (Offset == 0)
- Offset = EmitGEPOffset(GEPLHS);
- return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
- Constant::getNullValue(Offset->getType()));
- } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
- // If the base pointers are different, but the indices are the same, just
- // compare the base pointer.
- if (PtrBase != GEPRHS->getOperand(0)) {
- bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
- IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
- GEPRHS->getOperand(0)->getType();
- if (IndicesTheSame)
- for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
- if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
- IndicesTheSame = false;
- break;
- }
-
- // If all indices are the same, just compare the base pointers.
- if (IndicesTheSame)
- return new ICmpInst(ICmpInst::getSignedPredicate(Cond),
- GEPLHS->getOperand(0), GEPRHS->getOperand(0));
-
- // Otherwise, the base pointers are different and the indices are
- // different, bail out.
- return 0;
- }
-
- // If one of the GEPs has all zero indices, recurse.
- bool AllZeros = true;
- for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
- if (!isa<Constant>(GEPLHS->getOperand(i)) ||
- !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) {
- AllZeros = false;
- break;
- }
- if (AllZeros)
- return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
- ICmpInst::getSwappedPredicate(Cond), I);
-
- // If the other GEP has all zero indices, recurse.
- AllZeros = true;
- for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
- if (!isa<Constant>(GEPRHS->getOperand(i)) ||
- !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) {
- AllZeros = false;
- break;
- }
- if (AllZeros)
- return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
-
- if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
- // If the GEPs only differ by one index, compare it.
- unsigned NumDifferences = 0; // Keep track of # differences.
- unsigned DiffOperand = 0; // The operand that differs.
- for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
- if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
- if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
- GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
- // Irreconcilable differences.
- NumDifferences = 2;
- break;
- } else {
- if (NumDifferences++) break;
- DiffOperand = i;
- }
- }
-
- if (NumDifferences == 0) // SAME GEP?
- return ReplaceInstUsesWith(I, // No comparison is needed here.
- ConstantInt::get(Type::getInt1Ty(I.getContext()),
- ICmpInst::isTrueWhenEqual(Cond)));
-
- else if (NumDifferences == 1) {
- Value *LHSV = GEPLHS->getOperand(DiffOperand);
- Value *RHSV = GEPRHS->getOperand(DiffOperand);
- // Make sure we do a signed comparison here.
- return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
- }
- }
-
- // Only lower this if the icmp is the only user of the GEP or if we expect
- // the result to fold to a constant!
- if (TD &&
- (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
- (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
- // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
- Value *L = EmitGEPOffset(GEPLHS);
- Value *R = EmitGEPOffset(GEPRHS);
- return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
- }
- }
- return 0;
-}
-
-/// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X".
-Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI,
- Value *X, ConstantInt *CI,
- ICmpInst::Predicate Pred,
- Value *TheAdd) {
- // If we have X+0, exit early (simplifying logic below) and let it get folded
- // elsewhere. icmp X+0, X -> icmp X, X
- if (CI->isZero()) {
- bool isTrue = ICmpInst::isTrueWhenEqual(Pred);
- return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
- }
-
- // (X+4) == X -> false.
- if (Pred == ICmpInst::ICMP_EQ)
- return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext()));
-
- // (X+4) != X -> true.
- if (Pred == ICmpInst::ICMP_NE)
- return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext()));
-
- // If this is an instruction (as opposed to constantexpr) get NUW/NSW info.
- bool isNUW = false, isNSW = false;
- if (BinaryOperator *Add = dyn_cast<BinaryOperator>(TheAdd)) {
- isNUW = Add->hasNoUnsignedWrap();
- isNSW = Add->hasNoSignedWrap();
- }
-
- // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
- // so the values can never be equal. Similiarly for all other "or equals"
- // operators.
-
- // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
- // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
- // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
- if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
- // If this is an NUW add, then this is always false.
- if (isNUW)
- return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext()));
-
- Value *R =
- ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI);
- return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
- }
-
- // (X+1) >u X --> X <u (0-1) --> X != 255
- // (X+2) >u X --> X <u (0-2) --> X <u 254
- // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
- if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
- // If this is an NUW add, then this is always true.
- if (isNUW)
- return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext()));
- return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
- }
-
- unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
- ConstantInt *SMax = ConstantInt::get(X->getContext(),
- APInt::getSignedMaxValue(BitWidth));
-
- // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
- // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
- // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
- // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
- // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
- // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
- if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
- // If this is an NSW add, then we have two cases: if the constant is
- // positive, then this is always false, if negative, this is always true.
- if (isNSW) {
- bool isTrue = CI->getValue().isNegative();
- return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
- }
-
- return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
- }
-
- // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
- // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
- // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
- // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
- // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
- // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
-
- // If this is an NSW add, then we have two cases: if the constant is
- // positive, then this is always true, if negative, this is always false.
- if (isNSW) {
- bool isTrue = !CI->getValue().isNegative();
- return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
- }
-
- assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
- Constant *C = ConstantInt::get(X->getContext(), CI->getValue()-1);
- return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
-}
-
-/// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
-/// and CmpRHS are both known to be integer constants.
-Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
- ConstantInt *DivRHS) {
- ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
- const APInt &CmpRHSV = CmpRHS->getValue();
-
- // FIXME: If the operand types don't match the type of the divide
- // then don't attempt this transform. The code below doesn't have the
- // logic to deal with a signed divide and an unsigned compare (and
- // vice versa). This is because (x /s C1) <s C2 produces different
- // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
- // (x /u C1) <u C2. Simply casting the operands and result won't
- // work. :( The if statement below tests that condition and bails
- // if it finds it.
- bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
- if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
- return 0;
- if (DivRHS->isZero())
- return 0; // The ProdOV computation fails on divide by zero.
- if (DivIsSigned && DivRHS->isAllOnesValue())
- return 0; // The overflow computation also screws up here
- if (DivRHS->isOne())
- return 0; // Not worth bothering, and eliminates some funny cases
- // with INT_MIN.
-
- // Compute Prod = CI * DivRHS. We are essentially solving an equation
- // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
- // C2 (CI). By solving for X we can turn this into a range check
- // instead of computing a divide.
- Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
-
- // Determine if the product overflows by seeing if the product is
- // not equal to the divide. Make sure we do the same kind of divide
- // as in the LHS instruction that we're folding.
- bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) :
- ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
-
- // Get the ICmp opcode
- ICmpInst::Predicate Pred = ICI.getPredicate();
-
- // Figure out the interval that is being checked. For example, a comparison
- // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
- // Compute this interval based on the constants involved and the signedness of
- // the compare/divide. This computes a half-open interval, keeping track of
- // whether either value in the interval overflows. After analysis each
- // overflow variable is set to 0 if it's corresponding bound variable is valid
- // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
- int LoOverflow = 0, HiOverflow = 0;
- Constant *LoBound = 0, *HiBound = 0;
-
- if (!DivIsSigned) { // udiv
- // e.g. X/5 op 3 --> [15, 20)
- LoBound = Prod;
- HiOverflow = LoOverflow = ProdOV;
- if (!HiOverflow)
- HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, false);
- } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
- if (CmpRHSV == 0) { // (X / pos) op 0
- // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
- LoBound = cast<ConstantInt>(ConstantExpr::getNeg(SubOne(DivRHS)));
- HiBound = DivRHS;
- } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
- LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
- HiOverflow = LoOverflow = ProdOV;
- if (!HiOverflow)
- HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, true);
- } else { // (X / pos) op neg
- // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
- HiBound = AddOne(Prod);
- LoOverflow = HiOverflow = ProdOV ? -1 : 0;
- if (!LoOverflow) {
- ConstantInt* DivNeg =
- cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
- LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
- }
- }
- } else if (DivRHS->getValue().isNegative()) { // Divisor is < 0.
- if (CmpRHSV == 0) { // (X / neg) op 0
- // e.g. X/-5 op 0 --> [-4, 5)
- LoBound = AddOne(DivRHS);
- HiBound = cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
- if (HiBound == DivRHS) { // -INTMIN = INTMIN
- HiOverflow = 1; // [INTMIN+1, overflow)
- HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
- }
- } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
- // e.g. X/-5 op 3 --> [-19, -14)
- HiBound = AddOne(Prod);
- HiOverflow = LoOverflow = ProdOV ? -1 : 0;
- if (!LoOverflow)
- LoOverflow = AddWithOverflow(LoBound, HiBound, DivRHS, true) ? -1 : 0;
- } else { // (X / neg) op neg
- LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
- LoOverflow = HiOverflow = ProdOV;
- if (!HiOverflow)
- HiOverflow = SubWithOverflow(HiBound, Prod, DivRHS, true);
- }
-
- // Dividing by a negative swaps the condition. LT <-> GT
- Pred = ICmpInst::getSwappedPredicate(Pred);
- }
-
- Value *X = DivI->getOperand(0);
- switch (Pred) {
- default: llvm_unreachable("Unhandled icmp opcode!");
- case ICmpInst::ICMP_EQ:
- if (LoOverflow && HiOverflow)
- return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
- if (HiOverflow)
- return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
- ICmpInst::ICMP_UGE, X, LoBound);
- if (LoOverflow)
- return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
- ICmpInst::ICMP_ULT, X, HiBound);
- return ReplaceInstUsesWith(ICI,
- InsertRangeTest(X, LoBound, HiBound, DivIsSigned,
- true));
- case ICmpInst::ICMP_NE:
- if (LoOverflow && HiOverflow)
- return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
- if (HiOverflow)
- return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
- ICmpInst::ICMP_ULT, X, LoBound);
- if (LoOverflow)
- return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
- ICmpInst::ICMP_UGE, X, HiBound);
- return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
- DivIsSigned, false));
- case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_SLT:
- if (LoOverflow == +1) // Low bound is greater than input range.
- return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
- if (LoOverflow == -1) // Low bound is less than input range.
- return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
- return new ICmpInst(Pred, X, LoBound);
- case ICmpInst::ICMP_UGT:
- case ICmpInst::ICMP_SGT:
- if (HiOverflow == +1) // High bound greater than input range.
- return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
- else if (HiOverflow == -1) // High bound less than input range.
- return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
- if (Pred == ICmpInst::ICMP_UGT)
- return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
- else
- return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
- }
-}
-
-
-/// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
-///
-Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
- Instruction *LHSI,
- ConstantInt *RHS) {
- const APInt &RHSV = RHS->getValue();
-
- switch (LHSI->getOpcode()) {
- case Instruction::Trunc:
- if (ICI.isEquality() && LHSI->hasOneUse()) {
- // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
- // of the high bits truncated out of x are known.
- unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
- SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
- APInt Mask(APInt::getHighBitsSet(SrcBits, SrcBits-DstBits));
- APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
- ComputeMaskedBits(LHSI->getOperand(0), Mask, KnownZero, KnownOne);
-
- // If all the high bits are known, we can do this xform.
- if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
- // Pull in the high bits from known-ones set.
- APInt NewRHS(RHS->getValue());
- NewRHS.zext(SrcBits);
- NewRHS |= KnownOne;
- return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
- ConstantInt::get(ICI.getContext(), NewRHS));
- }
- }
- break;
-
- case Instruction::Xor: // (icmp pred (xor X, XorCST), CI)
- if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
- // If this is a comparison that tests the signbit (X < 0) or (x > -1),
- // fold the xor.
- if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
- (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
- Value *CompareVal = LHSI->getOperand(0);
-
- // If the sign bit of the XorCST is not set, there is no change to
- // the operation, just stop using the Xor.
- if (!XorCST->getValue().isNegative()) {
- ICI.setOperand(0, CompareVal);
- Worklist.Add(LHSI);
- return &ICI;
- }
-
- // Was the old condition true if the operand is positive?
- bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT;
-
- // If so, the new one isn't.
- isTrueIfPositive ^= true;
-
- if (isTrueIfPositive)
- return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal,
- SubOne(RHS));
- else
- return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal,
- AddOne(RHS));
- }
-
- if (LHSI->hasOneUse()) {
- // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
- if (!ICI.isEquality() && XorCST->getValue().isSignBit()) {
- const APInt &SignBit = XorCST->getValue();
- ICmpInst::Predicate Pred = ICI.isSigned()
- ? ICI.getUnsignedPredicate()
- : ICI.getSignedPredicate();
- return new ICmpInst(Pred, LHSI->getOperand(0),
- ConstantInt::get(ICI.getContext(),
- RHSV ^ SignBit));
- }
-
- // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
- if (!ICI.isEquality() && XorCST->getValue().isMaxSignedValue()) {
- const APInt &NotSignBit = XorCST->getValue();
- ICmpInst::Predicate Pred = ICI.isSigned()
- ? ICI.getUnsignedPredicate()
- : ICI.getSignedPredicate();
- Pred = ICI.getSwappedPredicate(Pred);
- return new ICmpInst(Pred, LHSI->getOperand(0),
- ConstantInt::get(ICI.getContext(),
- RHSV ^ NotSignBit));
- }
- }
- }
- break;
- case Instruction::And: // (icmp pred (and X, AndCST), RHS)
- if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
- LHSI->getOperand(0)->hasOneUse()) {
- ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
-
- // If the LHS is an AND of a truncating cast, we can widen the
- // and/compare to be the input width without changing the value
- // produced, eliminating a cast.
- if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) {
- // We can do this transformation if either the AND constant does not
- // have its sign bit set or if it is an equality comparison.
- // Extending a relational comparison when we're checking the sign
- // bit would not work.
- if (Cast->hasOneUse() &&
- (ICI.isEquality() ||
- (AndCST->getValue().isNonNegative() && RHSV.isNonNegative()))) {
- uint32_t BitWidth =
- cast<IntegerType>(Cast->getOperand(0)->getType())->getBitWidth();
- APInt NewCST = AndCST->getValue();
- NewCST.zext(BitWidth);
- APInt NewCI = RHSV;
- NewCI.zext(BitWidth);
- Value *NewAnd =
- Builder->CreateAnd(Cast->getOperand(0),
- ConstantInt::get(ICI.getContext(), NewCST),
- LHSI->getName());
- return new ICmpInst(ICI.getPredicate(), NewAnd,
- ConstantInt::get(ICI.getContext(), NewCI));
- }
- }
-
- // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
- // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
- // happens a LOT in code produced by the C front-end, for bitfield
- // access.
- BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
- if (Shift && !Shift->isShift())
- Shift = 0;
-
- ConstantInt *ShAmt;
- ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
- const Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
- const Type *AndTy = AndCST->getType(); // Type of the and.
-
- // We can fold this as long as we can't shift unknown bits
- // into the mask. This can only happen with signed shift
- // rights, as they sign-extend.
- if (ShAmt) {
- bool CanFold = Shift->isLogicalShift();
- if (!CanFold) {
- // To test for the bad case of the signed shr, see if any
- // of the bits shifted in could be tested after the mask.
- uint32_t TyBits = Ty->getPrimitiveSizeInBits();
- int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits);
-
- uint32_t BitWidth = AndTy->getPrimitiveSizeInBits();
- if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) &
- AndCST->getValue()) == 0)
- CanFold = true;
- }
-
- if (CanFold) {
- Constant *NewCst;
- if (Shift->getOpcode() == Instruction::Shl)
- NewCst = ConstantExpr::getLShr(RHS, ShAmt);
- else
- NewCst = ConstantExpr::getShl(RHS, ShAmt);
-
- // Check to see if we are shifting out any of the bits being
- // compared.
- if (ConstantExpr::get(Shift->getOpcode(),
- NewCst, ShAmt) != RHS) {
- // If we shifted bits out, the fold is not going to work out.
- // As a special case, check to see if this means that the
- // result is always true or false now.
- if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
- return ReplaceInstUsesWith(ICI,
- ConstantInt::getFalse(ICI.getContext()));
- if (ICI.getPredicate() == ICmpInst::ICMP_NE)
- return ReplaceInstUsesWith(ICI,
- ConstantInt::getTrue(ICI.getContext()));
- } else {
- ICI.setOperand(1, NewCst);
- Constant *NewAndCST;
- if (Shift->getOpcode() == Instruction::Shl)
- NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt);
- else
- NewAndCST = ConstantExpr::getShl(AndCST, ShAmt);
- LHSI->setOperand(1, NewAndCST);
- LHSI->setOperand(0, Shift->getOperand(0));
- Worklist.Add(Shift); // Shift is dead.
- return &ICI;
- }
- }
- }
-
- // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
- // preferable because it allows the C<<Y expression to be hoisted out
- // of a loop if Y is invariant and X is not.
- if (Shift && Shift->hasOneUse() && RHSV == 0 &&
- ICI.isEquality() && !Shift->isArithmeticShift() &&
- !isa<Constant>(Shift->getOperand(0))) {
- // Compute C << Y.
- Value *NS;
- if (Shift->getOpcode() == Instruction::LShr) {
- NS = Builder->CreateShl(AndCST, Shift->getOperand(1), "tmp");
- } else {
- // Insert a logical shift.
- NS = Builder->CreateLShr(AndCST, Shift->getOperand(1), "tmp");
- }
-
- // Compute X & (C << Y).
- Value *NewAnd =
- Builder->CreateAnd(Shift->getOperand(0), NS, LHSI->getName());
-
- ICI.setOperand(0, NewAnd);
- return &ICI;
- }
- }
-
- // Try to optimize things like "A[i]&42 == 0" to index computations.
- if (LoadInst *LI = dyn_cast<LoadInst>(LHSI->getOperand(0))) {
- if (GetElementPtrInst *GEP =
- dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
- if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
- !LI->isVolatile() && isa<ConstantInt>(LHSI->getOperand(1))) {
- ConstantInt *C = cast<ConstantInt>(LHSI->getOperand(1));
- if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV,ICI, C))
- return Res;
- }
- }
- break;
-
- case Instruction::Or: {
- if (!ICI.isEquality() || !RHS->isNullValue() || !LHSI->hasOneUse())
- break;
- Value *P, *Q;
- if (match(LHSI, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
- // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
- // -> and (icmp eq P, null), (icmp eq Q, null).
-
- Value *ICIP = Builder->CreateICmp(ICI.getPredicate(), P,
- Constant::getNullValue(P->getType()));
- Value *ICIQ = Builder->CreateICmp(ICI.getPredicate(), Q,
- Constant::getNullValue(Q->getType()));
- Instruction *Op;
- if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
- Op = BinaryOperator::CreateAnd(ICIP, ICIQ);
- else
- Op = BinaryOperator::CreateOr(ICIP, ICIQ);
- return Op;
- }
- break;
- }
-
- case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI)
- ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
- if (!ShAmt) break;
-
- uint32_t TypeBits = RHSV.getBitWidth();
-
- // Check that the shift amount is in range. If not, don't perform
- // undefined shifts. When the shift is visited it will be
- // simplified.
- if (ShAmt->uge(TypeBits))
- break;
-
- if (ICI.isEquality()) {
- // If we are comparing against bits always shifted out, the
- // comparison cannot succeed.
- Constant *Comp =
- ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt),
- ShAmt);
- if (Comp != RHS) {// Comparing against a bit that we know is zero.
- bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
- Constant *Cst =
- ConstantInt::get(Type::getInt1Ty(ICI.getContext()), IsICMP_NE);
- return ReplaceInstUsesWith(ICI, Cst);
- }
-
- if (LHSI->hasOneUse()) {
- // Otherwise strength reduce the shift into an and.
- uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
- Constant *Mask =
- ConstantInt::get(ICI.getContext(), APInt::getLowBitsSet(TypeBits,
- TypeBits-ShAmtVal));
-
- Value *And =
- Builder->CreateAnd(LHSI->getOperand(0),Mask, LHSI->getName()+".mask");
- return new ICmpInst(ICI.getPredicate(), And,
- ConstantInt::get(ICI.getContext(),
- RHSV.lshr(ShAmtVal)));
- }
- }
-
- // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
- bool TrueIfSigned = false;
- if (LHSI->hasOneUse() &&
- isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
- // (X << 31) <s 0 --> (X&1) != 0
- Constant *Mask = ConstantInt::get(ICI.getContext(), APInt(TypeBits, 1) <<
- (TypeBits-ShAmt->getZExtValue()-1));
- Value *And =
- Builder->CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask");
- return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
- And, Constant::getNullValue(And->getType()));
- }
- break;
- }
-
- case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
- case Instruction::AShr: {
- // Only handle equality comparisons of shift-by-constant.
- ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
- if (!ShAmt || !ICI.isEquality()) break;
-
- // Check that the shift amount is in range. If not, don't perform
- // undefined shifts. When the shift is visited it will be
- // simplified.
- uint32_t TypeBits = RHSV.getBitWidth();
- if (ShAmt->uge(TypeBits))
- break;
-
- uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
-
- // If we are comparing against bits always shifted out, the
- // comparison cannot succeed.
- APInt Comp = RHSV << ShAmtVal;
- if (LHSI->getOpcode() == Instruction::LShr)
- Comp = Comp.lshr(ShAmtVal);
- else
- Comp = Comp.ashr(ShAmtVal);
-
- if (Comp != RHSV) { // Comparing against a bit that we know is zero.
- bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
- Constant *Cst = ConstantInt::get(Type::getInt1Ty(ICI.getContext()),
- IsICMP_NE);
- return ReplaceInstUsesWith(ICI, Cst);
- }
-
- // Otherwise, check to see if the bits shifted out are known to be zero.
- // If so, we can compare against the unshifted value:
- // (X & 4) >> 1 == 2 --> (X & 4) == 4.
- if (LHSI->hasOneUse() &&
- MaskedValueIsZero(LHSI->getOperand(0),
- APInt::getLowBitsSet(Comp.getBitWidth(), ShAmtVal))) {
- return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
- ConstantExpr::getShl(RHS, ShAmt));
- }
-
- if (LHSI->hasOneUse()) {
- // Otherwise strength reduce the shift into an and.
- APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
- Constant *Mask = ConstantInt::get(ICI.getContext(), Val);
-
- Value *And = Builder->CreateAnd(LHSI->getOperand(0),
- Mask, LHSI->getName()+".mask");
- return new ICmpInst(ICI.getPredicate(), And,
- ConstantExpr::getShl(RHS, ShAmt));
- }
- break;
- }
-
- case Instruction::SDiv:
- case Instruction::UDiv:
- // Fold: icmp pred ([us]div X, C1), C2 -> range test
- // Fold this div into the comparison, producing a range check.
- // Determine, based on the divide type, what the range is being
- // checked. If there is an overflow on the low or high side, remember
- // it, otherwise compute the range [low, hi) bounding the new value.
- // See: InsertRangeTest above for the kinds of replacements possible.
- if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
- if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
- DivRHS))
- return R;
- break;
-
- case Instruction::Add:
- // Fold: icmp pred (add X, C1), C2
- if (!ICI.isEquality()) {
- ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1));
- if (!LHSC) break;
- const APInt &LHSV = LHSC->getValue();
-
- ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
- .subtract(LHSV);
-
- if (ICI.isSigned()) {
- if (CR.getLower().isSignBit()) {
- return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0),
- ConstantInt::get(ICI.getContext(),CR.getUpper()));
- } else if (CR.getUpper().isSignBit()) {
- return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0),
- ConstantInt::get(ICI.getContext(),CR.getLower()));
- }
- } else {
- if (CR.getLower().isMinValue()) {
- return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0),
- ConstantInt::get(ICI.getContext(),CR.getUpper()));
- } else if (CR.getUpper().isMinValue()) {
- return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0),
- ConstantInt::get(ICI.getContext(),CR.getLower()));
- }
- }
- }
- break;
- }
-
- // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
- if (ICI.isEquality()) {
- bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
-
- // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
- // the second operand is a constant, simplify a bit.
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) {
- switch (BO->getOpcode()) {
- case Instruction::SRem:
- // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
- if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){
- const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue();
- if (V.sgt(APInt(V.getBitWidth(), 1)) && V.isPowerOf2()) {
- Value *NewRem =
- Builder->CreateURem(BO->getOperand(0), BO->getOperand(1),
- BO->getName());
- return new ICmpInst(ICI.getPredicate(), NewRem,
- Constant::getNullValue(BO->getType()));
- }
- }
- break;
- case Instruction::Add:
- // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
- if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) {
- if (BO->hasOneUse())
- return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
- ConstantExpr::getSub(RHS, BOp1C));
- } else if (RHSV == 0) {
- // Replace ((add A, B) != 0) with (A != -B) if A or B is
- // efficiently invertible, or if the add has just this one use.
- Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
-
- if (Value *NegVal = dyn_castNegVal(BOp1))
- return new ICmpInst(ICI.getPredicate(), BOp0, NegVal);
- else if (Value *NegVal = dyn_castNegVal(BOp0))
- return new ICmpInst(ICI.getPredicate(), NegVal, BOp1);
- else if (BO->hasOneUse()) {
- Value *Neg = Builder->CreateNeg(BOp1);
- Neg->takeName(BO);
- return new ICmpInst(ICI.getPredicate(), BOp0, Neg);
- }
- }
- break;
- case Instruction::Xor:
- // For the xor case, we can xor two constants together, eliminating
- // the explicit xor.
- if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1)))
- return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
- ConstantExpr::getXor(RHS, BOC));
-
- // FALLTHROUGH
- case Instruction::Sub:
- // Replace (([sub|xor] A, B) != 0) with (A != B)
- if (RHSV == 0)
- return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
- BO->getOperand(1));
- break;
-
- case Instruction::Or:
- // If bits are being or'd in that are not present in the constant we
- // are comparing against, then the comparison could never succeed!
- if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) {
- Constant *NotCI = ConstantExpr::getNot(RHS);
- if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
- return ReplaceInstUsesWith(ICI,
- ConstantInt::get(Type::getInt1Ty(ICI.getContext()),
- isICMP_NE));
- }
- break;
-
- case Instruction::And:
- if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
- // If bits are being compared against that are and'd out, then the
- // comparison can never succeed!
- if ((RHSV & ~BOC->getValue()) != 0)
- return ReplaceInstUsesWith(ICI,
- ConstantInt::get(Type::getInt1Ty(ICI.getContext()),
- isICMP_NE));
-
- // If we have ((X & C) == C), turn it into ((X & C) != 0).
- if (RHS == BOC && RHSV.isPowerOf2())
- return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ :
- ICmpInst::ICMP_NE, LHSI,
- Constant::getNullValue(RHS->getType()));
-
- // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
- if (BOC->getValue().isSignBit()) {
- Value *X = BO->getOperand(0);
- Constant *Zero = Constant::getNullValue(X->getType());
- ICmpInst::Predicate pred = isICMP_NE ?
- ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
- return new ICmpInst(pred, X, Zero);
- }
-
- // ((X & ~7) == 0) --> X < 8
- if (RHSV == 0 && isHighOnes(BOC)) {
- Value *X = BO->getOperand(0);
- Constant *NegX = ConstantExpr::getNeg(BOC);
- ICmpInst::Predicate pred = isICMP_NE ?
- ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
- return new ICmpInst(pred, X, NegX);
- }
- }
- default: break;
- }
- } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) {
- // Handle icmp {eq|ne} <intrinsic>, intcst.
- switch (II->getIntrinsicID()) {
- case Intrinsic::bswap:
- Worklist.Add(II);
- ICI.setOperand(0, II->getOperand(1));
- ICI.setOperand(1, ConstantInt::get(II->getContext(), RHSV.byteSwap()));
- return &ICI;
- case Intrinsic::ctlz:
- case Intrinsic::cttz:
- // ctz(A) == bitwidth(a) -> A == 0 and likewise for !=
- if (RHSV == RHS->getType()->getBitWidth()) {
- Worklist.Add(II);
- ICI.setOperand(0, II->getOperand(1));
- ICI.setOperand(1, ConstantInt::get(RHS->getType(), 0));
- return &ICI;
- }
- break;
- case Intrinsic::ctpop:
- // popcount(A) == 0 -> A == 0 and likewise for !=
- if (RHS->isZero()) {
- Worklist.Add(II);
- ICI.setOperand(0, II->getOperand(1));
- ICI.setOperand(1, RHS);
- return &ICI;
- }
- break;
- default:
- break;
- }
- }
- }
- return 0;
-}
-
-/// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
-/// We only handle extending casts so far.
-///
-Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
- const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
- Value *LHSCIOp = LHSCI->getOperand(0);
- const Type *SrcTy = LHSCIOp->getType();
- const Type *DestTy = LHSCI->getType();
- Value *RHSCIOp;
-
- // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
- // integer type is the same size as the pointer type.
- if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
- TD->getPointerSizeInBits() ==
- cast<IntegerType>(DestTy)->getBitWidth()) {
- Value *RHSOp = 0;
- if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
- RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
- } else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) {
- RHSOp = RHSC->getOperand(0);
- // If the pointer types don't match, insert a bitcast.
- if (LHSCIOp->getType() != RHSOp->getType())
- RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
- }
-
- if (RHSOp)
- return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp);
- }
-
- // The code below only handles extension cast instructions, so far.
- // Enforce this.
- if (LHSCI->getOpcode() != Instruction::ZExt &&
- LHSCI->getOpcode() != Instruction::SExt)
- return 0;
-
- bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
- bool isSignedCmp = ICI.isSigned();
-
- if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
- // Not an extension from the same type?
- RHSCIOp = CI->getOperand(0);
- if (RHSCIOp->getType() != LHSCIOp->getType())
- return 0;
-
- // If the signedness of the two casts doesn't agree (i.e. one is a sext
- // and the other is a zext), then we can't handle this.
- if (CI->getOpcode() != LHSCI->getOpcode())
- return 0;
-
- // Deal with equality cases early.
- if (ICI.isEquality())
- return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
-
- // A signed comparison of sign extended values simplifies into a
- // signed comparison.
- if (isSignedCmp && isSignedExt)
- return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
-
- // The other three cases all fold into an unsigned comparison.
- return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
- }
-
- // If we aren't dealing with a constant on the RHS, exit early
- ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
- if (!CI)
- return 0;
-
- // Compute the constant that would happen if we truncated to SrcTy then
- // reextended to DestTy.
- Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy);
- Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(),
- Res1, DestTy);
-
- // If the re-extended constant didn't change...
- if (Res2 == CI) {
- // Deal with equality cases early.
- if (ICI.isEquality())
- return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
-
- // A signed comparison of sign extended values simplifies into a
- // signed comparison.
- if (isSignedExt && isSignedCmp)
- return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
-
- // The other three cases all fold into an unsigned comparison.
- return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, Res1);
- }
-
- // The re-extended constant changed so the constant cannot be represented
- // in the shorter type. Consequently, we cannot emit a simple comparison.
-
- // First, handle some easy cases. We know the result cannot be equal at this
- // point so handle the ICI.isEquality() cases
- if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
- return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
- if (ICI.getPredicate() == ICmpInst::ICMP_NE)
- return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
-
- // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
- // should have been folded away previously and not enter in here.
- Value *Result;
- if (isSignedCmp) {
- // We're performing a signed comparison.
- if (cast<ConstantInt>(CI)->getValue().isNegative())
- Result = ConstantInt::getFalse(ICI.getContext()); // X < (small) --> false
- else
- Result = ConstantInt::getTrue(ICI.getContext()); // X < (large) --> true
- } else {
- // We're performing an unsigned comparison.
- if (isSignedExt) {
- // We're performing an unsigned comp with a sign extended value.
- // This is true if the input is >= 0. [aka >s -1]
- Constant *NegOne = Constant::getAllOnesValue(SrcTy);
- Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName());
- } else {
- // Unsigned extend & unsigned compare -> always true.
- Result = ConstantInt::getTrue(ICI.getContext());
- }
- }
-
- // Finally, return the value computed.
- if (ICI.getPredicate() == ICmpInst::ICMP_ULT ||
- ICI.getPredicate() == ICmpInst::ICMP_SLT)
- return ReplaceInstUsesWith(ICI, Result);
-
- assert((ICI.getPredicate()==ICmpInst::ICMP_UGT ||
- ICI.getPredicate()==ICmpInst::ICMP_SGT) &&
- "ICmp should be folded!");
- if (Constant *CI = dyn_cast<Constant>(Result))
- return ReplaceInstUsesWith(ICI, ConstantExpr::getNot(CI));
- return BinaryOperator::CreateNot(Result);
-}
-
-
-
-Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
- bool Changed = false;
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- /// Orders the operands of the compare so that they are listed from most
- /// complex to least complex. This puts constants before unary operators,
- /// before binary operators.
- if (getComplexity(Op0) < getComplexity(Op1)) {
- I.swapOperands();
- std::swap(Op0, Op1);
- Changed = true;
- }
-
- if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
- return ReplaceInstUsesWith(I, V);
-
- const Type *Ty = Op0->getType();
-
- // icmp's with boolean values can always be turned into bitwise operations
- if (Ty->isIntegerTy(1)) {
- switch (I.getPredicate()) {
- default: llvm_unreachable("Invalid icmp instruction!");
- case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
- Value *Xor = Builder->CreateXor(Op0, Op1, I.getName()+"tmp");
- return BinaryOperator::CreateNot(Xor);
- }
- case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B
- return BinaryOperator::CreateXor(Op0, Op1);
-
- case ICmpInst::ICMP_UGT:
- std::swap(Op0, Op1); // Change icmp ugt -> icmp ult
- // FALL THROUGH
- case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B
- Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
- return BinaryOperator::CreateAnd(Not, Op1);
- }
- case ICmpInst::ICMP_SGT:
- std::swap(Op0, Op1); // Change icmp sgt -> icmp slt
- // FALL THROUGH
- case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B
- Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
- return BinaryOperator::CreateAnd(Not, Op0);
- }
- case ICmpInst::ICMP_UGE:
- std::swap(Op0, Op1); // Change icmp uge -> icmp ule
- // FALL THROUGH
- case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B
- Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
- return BinaryOperator::CreateOr(Not, Op1);
- }
- case ICmpInst::ICMP_SGE:
- std::swap(Op0, Op1); // Change icmp sge -> icmp sle
- // FALL THROUGH
- case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B
- Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
- return BinaryOperator::CreateOr(Not, Op0);
- }
- }
- }
-
- unsigned BitWidth = 0;
- if (TD)
- BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
- else if (Ty->isIntOrIntVectorTy())
- BitWidth = Ty->getScalarSizeInBits();
-
- bool isSignBit = false;
-
- // See if we are doing a comparison with a constant.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- Value *A = 0, *B = 0;
-
- // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
- if (I.isEquality() && CI->isZero() &&
- match(Op0, m_Sub(m_Value(A), m_Value(B)))) {
- // (icmp cond A B) if cond is equality
- return new ICmpInst(I.getPredicate(), A, B);
- }
-
- // If we have an icmp le or icmp ge instruction, turn it into the
- // appropriate icmp lt or icmp gt instruction. This allows us to rely on
- // them being folded in the code below. The SimplifyICmpInst code has
- // already handled the edge cases for us, so we just assert on them.
- switch (I.getPredicate()) {
- default: break;
- case ICmpInst::ICMP_ULE:
- assert(!CI->isMaxValue(false)); // A <=u MAX -> TRUE
- return new ICmpInst(ICmpInst::ICMP_ULT, Op0,
- ConstantInt::get(CI->getContext(), CI->getValue()+1));
- case ICmpInst::ICMP_SLE:
- assert(!CI->isMaxValue(true)); // A <=s MAX -> TRUE
- return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
- ConstantInt::get(CI->getContext(), CI->getValue()+1));
- case ICmpInst::ICMP_UGE:
- assert(!CI->isMinValue(false)); // A >=u MIN -> TRUE
- return new ICmpInst(ICmpInst::ICMP_UGT, Op0,
- ConstantInt::get(CI->getContext(), CI->getValue()-1));
- case ICmpInst::ICMP_SGE:
- assert(!CI->isMinValue(true)); // A >=s MIN -> TRUE
- return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
- ConstantInt::get(CI->getContext(), CI->getValue()-1));
- }
-
- // If this comparison is a normal comparison, it demands all
- // bits, if it is a sign bit comparison, it only demands the sign bit.
- bool UnusedBit;
- isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit);
- }
-
- // See if we can fold the comparison based on range information we can get
- // by checking whether bits are known to be zero or one in the input.
- if (BitWidth != 0) {
- APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
- APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
-
- if (SimplifyDemandedBits(I.getOperandUse(0),
- isSignBit ? APInt::getSignBit(BitWidth)
- : APInt::getAllOnesValue(BitWidth),
- Op0KnownZero, Op0KnownOne, 0))
- return &I;
- if (SimplifyDemandedBits(I.getOperandUse(1),
- APInt::getAllOnesValue(BitWidth),
- Op1KnownZero, Op1KnownOne, 0))
- return &I;
-
- // Given the known and unknown bits, compute a range that the LHS could be
- // in. Compute the Min, Max and RHS values based on the known bits. For the
- // EQ and NE we use unsigned values.
- APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
- APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
- if (I.isSigned()) {
- ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
- Op0Min, Op0Max);
- ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
- Op1Min, Op1Max);
- } else {
- ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
- Op0Min, Op0Max);
- ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
- Op1Min, Op1Max);
- }
-
- // If Min and Max are known to be the same, then SimplifyDemandedBits
- // figured out that the LHS is a constant. Just constant fold this now so
- // that code below can assume that Min != Max.
- if (!isa<Constant>(Op0) && Op0Min == Op0Max)
- return new ICmpInst(I.getPredicate(),
- ConstantInt::get(I.getContext(), Op0Min), Op1);
- if (!isa<Constant>(Op1) && Op1Min == Op1Max)
- return new ICmpInst(I.getPredicate(), Op0,
- ConstantInt::get(I.getContext(), Op1Min));
-
- // Based on the range information we know about the LHS, see if we can
- // simplify this comparison. For example, (x&4) < 8 is always true.
- switch (I.getPredicate()) {
- default: llvm_unreachable("Unknown icmp opcode!");
- case ICmpInst::ICMP_EQ:
- if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- break;
- case ICmpInst::ICMP_NE:
- if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- break;
- case ICmpInst::ICMP_ULT:
- if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
- return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- if (Op1Max == Op0Min+1) // A <u C -> A == C-1 if min(A)+1 == C
- return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
- ConstantInt::get(CI->getContext(), CI->getValue()-1));
-
- // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
- if (CI->isMinValue(true))
- return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
- Constant::getAllOnesValue(Op0->getType()));
- }
- break;
- case ICmpInst::ICMP_UGT:
- if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
-
- if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
- return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C
- return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
- ConstantInt::get(CI->getContext(), CI->getValue()+1));
-
- // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
- if (CI->isMaxValue(true))
- return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
- Constant::getNullValue(Op0->getType()));
- }
- break;
- case ICmpInst::ICMP_SLT:
- if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
- return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- if (Op1Max == Op0Min+1) // A <s C -> A == C-1 if min(A)+1 == C
- return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
- ConstantInt::get(CI->getContext(), CI->getValue()-1));
- }
- break;
- case ICmpInst::ICMP_SGT:
- if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
-
- if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
- return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C
- return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
- ConstantInt::get(CI->getContext(), CI->getValue()+1));
- }
- break;
- case ICmpInst::ICMP_SGE:
- assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
- if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- break;
- case ICmpInst::ICMP_SLE:
- assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
- if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- break;
- case ICmpInst::ICMP_UGE:
- assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
- if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- break;
- case ICmpInst::ICMP_ULE:
- assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
- if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- break;
- }
-
- // Turn a signed comparison into an unsigned one if both operands
- // are known to have the same sign.
- if (I.isSigned() &&
- ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
- (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
- return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
- }
-
- // Test if the ICmpInst instruction is used exclusively by a select as
- // part of a minimum or maximum operation. If so, refrain from doing
- // any other folding. This helps out other analyses which understand
- // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
- // and CodeGen. And in this case, at least one of the comparison
- // operands has at least one user besides the compare (the select),
- // which would often largely negate the benefit of folding anyway.
- if (I.hasOneUse())
- if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin()))
- if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
- (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
- return 0;
-
- // See if we are doing a comparison between a constant and an instruction that
- // can be folded into the comparison.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- // Since the RHS is a ConstantInt (CI), if the left hand side is an
- // instruction, see if that instruction also has constants so that the
- // instruction can be folded into the icmp
- if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
- if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
- return Res;
- }
-
- // Handle icmp with constant (but not simple integer constant) RHS
- if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
- if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
- switch (LHSI->getOpcode()) {
- case Instruction::GetElementPtr:
- // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
- if (RHSC->isNullValue() &&
- cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
- return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
- Constant::getNullValue(LHSI->getOperand(0)->getType()));
- break;
- case Instruction::PHI:
- // Only fold icmp into the PHI if the phi and icmp are in the same
- // block. If in the same block, we're encouraging jump threading. If
- // not, we are just pessimizing the code by making an i1 phi.
- if (LHSI->getParent() == I.getParent())
- if (Instruction *NV = FoldOpIntoPhi(I, true))
- return NV;
- break;
- case Instruction::Select: {
- // If either operand of the select is a constant, we can fold the
- // comparison into the select arms, which will cause one to be
- // constant folded and the select turned into a bitwise or.
- Value *Op1 = 0, *Op2 = 0;
- if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1)))
- Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
- if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2)))
- Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
-
- // We only want to perform this transformation if it will not lead to
- // additional code. This is true if either both sides of the select
- // fold to a constant (in which case the icmp is replaced with a select
- // which will usually simplify) or this is the only user of the
- // select (in which case we are trading a select+icmp for a simpler
- // select+icmp).
- if ((Op1 && Op2) || (LHSI->hasOneUse() && (Op1 || Op2))) {
- if (!Op1)
- Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1),
- RHSC, I.getName());
- if (!Op2)
- Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2),
- RHSC, I.getName());
- return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
- }
- break;
- }
- case Instruction::Call:
- // If we have (malloc != null), and if the malloc has a single use, we
- // can assume it is successful and remove the malloc.
- if (isMalloc(LHSI) && LHSI->hasOneUse() &&
- isa<ConstantPointerNull>(RHSC)) {
- // Need to explicitly erase malloc call here, instead of adding it to
- // Worklist, because it won't get DCE'd from the Worklist since
- // isInstructionTriviallyDead() returns false for function calls.
- // It is OK to replace LHSI/MallocCall with Undef because the
- // instruction that uses it will be erased via Worklist.
- if (extractMallocCall(LHSI)) {
- LHSI->replaceAllUsesWith(UndefValue::get(LHSI->getType()));
- EraseInstFromFunction(*LHSI);
- return ReplaceInstUsesWith(I,
- ConstantInt::get(Type::getInt1Ty(I.getContext()),
- !I.isTrueWhenEqual()));
- }
- if (CallInst* MallocCall = extractMallocCallFromBitCast(LHSI))
- if (MallocCall->hasOneUse()) {
- MallocCall->replaceAllUsesWith(
- UndefValue::get(MallocCall->getType()));
- EraseInstFromFunction(*MallocCall);
- Worklist.Add(LHSI); // The malloc's bitcast use.
- return ReplaceInstUsesWith(I,
- ConstantInt::get(Type::getInt1Ty(I.getContext()),
- !I.isTrueWhenEqual()));
- }
- }
- break;
- case Instruction::IntToPtr:
- // icmp pred inttoptr(X), null -> icmp pred X, 0
- if (RHSC->isNullValue() && TD &&
- TD->getIntPtrType(RHSC->getContext()) ==
- LHSI->getOperand(0)->getType())
- return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
- Constant::getNullValue(LHSI->getOperand(0)->getType()));
- break;
-
- case Instruction::Load:
- // Try to optimize things like "A[i] > 4" to index computations.
- if (GetElementPtrInst *GEP =
- dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
- if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
- !cast<LoadInst>(LHSI)->isVolatile())
- if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
- return Res;
- }
- break;
- }
- }
-
- // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
- if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
- return NI;
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
- if (Instruction *NI = FoldGEPICmp(GEP, Op0,
- ICmpInst::getSwappedPredicate(I.getPredicate()), I))
- return NI;
-
- // Test to see if the operands of the icmp are casted versions of other
- // values. If the ptr->ptr cast can be stripped off both arguments, we do so
- // now.
- if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
- if (Op0->getType()->isPointerTy() &&
- (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
- // We keep moving the cast from the left operand over to the right
- // operand, where it can often be eliminated completely.
- Op0 = CI->getOperand(0);
-
- // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
- // so eliminate it as well.
- if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
- Op1 = CI2->getOperand(0);
-
- // If Op1 is a constant, we can fold the cast into the constant.
- if (Op0->getType() != Op1->getType()) {
- if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
- Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
- } else {
- // Otherwise, cast the RHS right before the icmp
- Op1 = Builder->CreateBitCast(Op1, Op0->getType());
- }
- }
- return new ICmpInst(I.getPredicate(), Op0, Op1);
- }
- }
-
- if (isa<CastInst>(Op0)) {
- // Handle the special case of: icmp (cast bool to X), <cst>
- // This comes up when you have code like
- // int X = A < B;
- // if (X) ...
- // For generality, we handle any zero-extension of any operand comparison
- // with a constant or another cast from the same type.
- if (isa<Constant>(Op1) || isa<CastInst>(Op1))
- if (Instruction *R = visitICmpInstWithCastAndCast(I))
- return R;
- }
-
- // See if it's the same type of instruction on the left and right.
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
- if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
- if (Op0I->getOpcode() == Op1I->getOpcode() && Op0I->hasOneUse() &&
- Op1I->hasOneUse() && Op0I->getOperand(1) == Op1I->getOperand(1)) {
- switch (Op0I->getOpcode()) {
- default: break;
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Xor:
- if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
- return new ICmpInst(I.getPredicate(), Op0I->getOperand(0),
- Op1I->getOperand(0));
- // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
- if (CI->getValue().isSignBit()) {
- ICmpInst::Predicate Pred = I.isSigned()
- ? I.getUnsignedPredicate()
- : I.getSignedPredicate();
- return new ICmpInst(Pred, Op0I->getOperand(0),
- Op1I->getOperand(0));
- }
-
- if (CI->getValue().isMaxSignedValue()) {
- ICmpInst::Predicate Pred = I.isSigned()
- ? I.getUnsignedPredicate()
- : I.getSignedPredicate();
- Pred = I.getSwappedPredicate(Pred);
- return new ICmpInst(Pred, Op0I->getOperand(0),
- Op1I->getOperand(0));
- }
- }
- break;
- case Instruction::Mul:
- if (!I.isEquality())
- break;
-
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
- // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
- // Mask = -1 >> count-trailing-zeros(Cst).
- if (!CI->isZero() && !CI->isOne()) {
- const APInt &AP = CI->getValue();
- ConstantInt *Mask = ConstantInt::get(I.getContext(),
- APInt::getLowBitsSet(AP.getBitWidth(),
- AP.getBitWidth() -
- AP.countTrailingZeros()));
- Value *And1 = Builder->CreateAnd(Op0I->getOperand(0), Mask);
- Value *And2 = Builder->CreateAnd(Op1I->getOperand(0), Mask);
- return new ICmpInst(I.getPredicate(), And1, And2);
- }
- }
- break;
- }
- }
- }
- }
-
- // ~x < ~y --> y < x
- { Value *A, *B;
- if (match(Op0, m_Not(m_Value(A))) &&
- match(Op1, m_Not(m_Value(B))))
- return new ICmpInst(I.getPredicate(), B, A);
- }
-
- if (I.isEquality()) {
- Value *A, *B, *C, *D;
-
- // -x == -y --> x == y
- if (match(Op0, m_Neg(m_Value(A))) &&
- match(Op1, m_Neg(m_Value(B))))
- return new ICmpInst(I.getPredicate(), A, B);
-
- if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
- if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
- Value *OtherVal = A == Op1 ? B : A;
- return new ICmpInst(I.getPredicate(), OtherVal,
- Constant::getNullValue(A->getType()));
- }
-
- if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
- // A^c1 == C^c2 --> A == C^(c1^c2)
- ConstantInt *C1, *C2;
- if (match(B, m_ConstantInt(C1)) &&
- match(D, m_ConstantInt(C2)) && Op1->hasOneUse()) {
- Constant *NC = ConstantInt::get(I.getContext(),
- C1->getValue() ^ C2->getValue());
- Value *Xor = Builder->CreateXor(C, NC, "tmp");
- return new ICmpInst(I.getPredicate(), A, Xor);
- }
-
- // A^B == A^D -> B == D
- if (A == C) return new ICmpInst(I.getPredicate(), B, D);
- if (A == D) return new ICmpInst(I.getPredicate(), B, C);
- if (B == C) return new ICmpInst(I.getPredicate(), A, D);
- if (B == D) return new ICmpInst(I.getPredicate(), A, C);
- }
- }
-
- if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
- (A == Op0 || B == Op0)) {
- // A == (A^B) -> B == 0
- Value *OtherVal = A == Op0 ? B : A;
- return new ICmpInst(I.getPredicate(), OtherVal,
- Constant::getNullValue(A->getType()));
- }
-
- // (A-B) == A -> B == 0
- if (match(Op0, m_Sub(m_Specific(Op1), m_Value(B))))
- return new ICmpInst(I.getPredicate(), B,
- Constant::getNullValue(B->getType()));
-
- // A == (A-B) -> B == 0
- if (match(Op1, m_Sub(m_Specific(Op0), m_Value(B))))
- return new ICmpInst(I.getPredicate(), B,
- Constant::getNullValue(B->getType()));
-
- // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
- if (Op0->hasOneUse() && Op1->hasOneUse() &&
- match(Op0, m_And(m_Value(A), m_Value(B))) &&
- match(Op1, m_And(m_Value(C), m_Value(D)))) {
- Value *X = 0, *Y = 0, *Z = 0;
-
- if (A == C) {
- X = B; Y = D; Z = A;
- } else if (A == D) {
- X = B; Y = C; Z = A;
- } else if (B == C) {
- X = A; Y = D; Z = B;
- } else if (B == D) {
- X = A; Y = C; Z = B;
- }
-
- if (X) { // Build (X^Y) & Z
- Op1 = Builder->CreateXor(X, Y, "tmp");
- Op1 = Builder->CreateAnd(Op1, Z, "tmp");
- I.setOperand(0, Op1);
- I.setOperand(1, Constant::getNullValue(Op1->getType()));
- return &I;
- }
- }
- }
-
- {
- Value *X; ConstantInt *Cst;
- // icmp X+Cst, X
- if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
- return FoldICmpAddOpCst(I, X, Cst, I.getPredicate(), Op0);
-
- // icmp X, X+Cst
- if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
- return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate(), Op1);
- }
- return Changed ? &I : 0;
-}
-
-
-
-
-
-
-/// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
-///
-Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
- Instruction *LHSI,
- Constant *RHSC) {
- if (!isa<ConstantFP>(RHSC)) return 0;
- const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
-
- // Get the width of the mantissa. We don't want to hack on conversions that
- // might lose information from the integer, e.g. "i64 -> float"
- int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
- if (MantissaWidth == -1) return 0; // Unknown.
-
- // Check to see that the input is converted from an integer type that is small
- // enough that preserves all bits. TODO: check here for "known" sign bits.
- // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
- unsigned InputSize = LHSI->getOperand(0)->getType()->getScalarSizeInBits();
-
- // If this is a uitofp instruction, we need an extra bit to hold the sign.
- bool LHSUnsigned = isa<UIToFPInst>(LHSI);
- if (LHSUnsigned)
- ++InputSize;
-
- // If the conversion would lose info, don't hack on this.
- if ((int)InputSize > MantissaWidth)
- return 0;
-
- // Otherwise, we can potentially simplify the comparison. We know that it
- // will always come through as an integer value and we know the constant is
- // not a NAN (it would have been previously simplified).
- assert(!RHS.isNaN() && "NaN comparison not already folded!");
-
- ICmpInst::Predicate Pred;
- switch (I.getPredicate()) {
- default: llvm_unreachable("Unexpected predicate!");
- case FCmpInst::FCMP_UEQ:
- case FCmpInst::FCMP_OEQ:
- Pred = ICmpInst::ICMP_EQ;
- break;
- case FCmpInst::FCMP_UGT:
- case FCmpInst::FCMP_OGT:
- Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
- break;
- case FCmpInst::FCMP_UGE:
- case FCmpInst::FCMP_OGE:
- Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
- break;
- case FCmpInst::FCMP_ULT:
- case FCmpInst::FCMP_OLT:
- Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
- break;
- case FCmpInst::FCMP_ULE:
- case FCmpInst::FCMP_OLE:
- Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
- break;
- case FCmpInst::FCMP_UNE:
- case FCmpInst::FCMP_ONE:
- Pred = ICmpInst::ICMP_NE;
- break;
- case FCmpInst::FCMP_ORD:
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- case FCmpInst::FCMP_UNO:
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- }
-
- const IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
-
- // Now we know that the APFloat is a normal number, zero or inf.
-
- // See if the FP constant is too large for the integer. For example,
- // comparing an i8 to 300.0.
- unsigned IntWidth = IntTy->getScalarSizeInBits();
-
- if (!LHSUnsigned) {
- // If the RHS value is > SignedMax, fold the comparison. This handles +INF
- // and large values.
- APFloat SMax(RHS.getSemantics(), APFloat::fcZero, false);
- SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
- APFloat::rmNearestTiesToEven);
- if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
- if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
- Pred == ICmpInst::ICMP_SLE)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- }
- } else {
- // If the RHS value is > UnsignedMax, fold the comparison. This handles
- // +INF and large values.
- APFloat UMax(RHS.getSemantics(), APFloat::fcZero, false);
- UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
- APFloat::rmNearestTiesToEven);
- if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
- if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
- Pred == ICmpInst::ICMP_ULE)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- }
- }
-
- if (!LHSUnsigned) {
- // See if the RHS value is < SignedMin.
- APFloat SMin(RHS.getSemantics(), APFloat::fcZero, false);
- SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
- APFloat::rmNearestTiesToEven);
- if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
- if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
- Pred == ICmpInst::ICMP_SGE)
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- }
- }
-
- // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
- // [0, UMAX], but it may still be fractional. See if it is fractional by
- // casting the FP value to the integer value and back, checking for equality.
- // Don't do this for zero, because -0.0 is not fractional.
- Constant *RHSInt = LHSUnsigned
- ? ConstantExpr::getFPToUI(RHSC, IntTy)
- : ConstantExpr::getFPToSI(RHSC, IntTy);
- if (!RHS.isZero()) {
- bool Equal = LHSUnsigned
- ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
- : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
- if (!Equal) {
- // If we had a comparison against a fractional value, we have to adjust
- // the compare predicate and sometimes the value. RHSC is rounded towards
- // zero at this point.
- switch (Pred) {
- default: llvm_unreachable("Unexpected integer comparison!");
- case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- case ICmpInst::ICMP_ULE:
- // (float)int <= 4.4 --> int <= 4
- // (float)int <= -4.4 --> false
- if (RHS.isNegative())
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- break;
- case ICmpInst::ICMP_SLE:
- // (float)int <= 4.4 --> int <= 4
- // (float)int <= -4.4 --> int < -4
- if (RHS.isNegative())
- Pred = ICmpInst::ICMP_SLT;
- break;
- case ICmpInst::ICMP_ULT:
- // (float)int < -4.4 --> false
- // (float)int < 4.4 --> int <= 4
- if (RHS.isNegative())
- return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
- Pred = ICmpInst::ICMP_ULE;
- break;
- case ICmpInst::ICMP_SLT:
- // (float)int < -4.4 --> int < -4
- // (float)int < 4.4 --> int <= 4
- if (!RHS.isNegative())
- Pred = ICmpInst::ICMP_SLE;
- break;
- case ICmpInst::ICMP_UGT:
- // (float)int > 4.4 --> int > 4
- // (float)int > -4.4 --> true
- if (RHS.isNegative())
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- break;
- case ICmpInst::ICMP_SGT:
- // (float)int > 4.4 --> int > 4
- // (float)int > -4.4 --> int >= -4
- if (RHS.isNegative())
- Pred = ICmpInst::ICMP_SGE;
- break;
- case ICmpInst::ICMP_UGE:
- // (float)int >= -4.4 --> true
- // (float)int >= 4.4 --> int > 4
- if (!RHS.isNegative())
- return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
- Pred = ICmpInst::ICMP_UGT;
- break;
- case ICmpInst::ICMP_SGE:
- // (float)int >= -4.4 --> int >= -4
- // (float)int >= 4.4 --> int > 4
- if (!RHS.isNegative())
- Pred = ICmpInst::ICMP_SGT;
- break;
- }
- }
- }
-
- // Lower this FP comparison into an appropriate integer version of the
- // comparison.
- return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
-}
-
-Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
- bool Changed = false;
-
- /// Orders the operands of the compare so that they are listed from most
- /// complex to least complex. This puts constants before unary operators,
- /// before binary operators.
- if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
- I.swapOperands();
- Changed = true;
- }
-
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD))
- return ReplaceInstUsesWith(I, V);
-
- // Simplify 'fcmp pred X, X'
- if (Op0 == Op1) {
- switch (I.getPredicate()) {
- default: llvm_unreachable("Unknown predicate!");
- case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
- case FCmpInst::FCMP_ULT: // True if unordered or less than
- case FCmpInst::FCMP_UGT: // True if unordered or greater than
- case FCmpInst::FCMP_UNE: // True if unordered or not equal
- // Canonicalize these to be 'fcmp uno %X, 0.0'.
- I.setPredicate(FCmpInst::FCMP_UNO);
- I.setOperand(1, Constant::getNullValue(Op0->getType()));
- return &I;
-
- case FCmpInst::FCMP_ORD: // True if ordered (no nans)
- case FCmpInst::FCMP_OEQ: // True if ordered and equal
- case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
- case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
- // Canonicalize these to be 'fcmp ord %X, 0.0'.
- I.setPredicate(FCmpInst::FCMP_ORD);
- I.setOperand(1, Constant::getNullValue(Op0->getType()));
- return &I;
- }
- }
-
- // Handle fcmp with constant RHS
- if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
- if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
- switch (LHSI->getOpcode()) {
- case Instruction::PHI:
- // Only fold fcmp into the PHI if the phi and fcmp are in the same
- // block. If in the same block, we're encouraging jump threading. If
- // not, we are just pessimizing the code by making an i1 phi.
- if (LHSI->getParent() == I.getParent())
- if (Instruction *NV = FoldOpIntoPhi(I, true))
- return NV;
- break;
- case Instruction::SIToFP:
- case Instruction::UIToFP:
- if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
- return NV;
- break;
- case Instruction::Select: {
- // If either operand of the select is a constant, we can fold the
- // comparison into the select arms, which will cause one to be
- // constant folded and the select turned into a bitwise or.
- Value *Op1 = 0, *Op2 = 0;
- if (LHSI->hasOneUse()) {
- if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
- // Fold the known value into the constant operand.
- Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
- // Insert a new FCmp of the other select operand.
- Op2 = Builder->CreateFCmp(I.getPredicate(),
- LHSI->getOperand(2), RHSC, I.getName());
- } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
- // Fold the known value into the constant operand.
- Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
- // Insert a new FCmp of the other select operand.
- Op1 = Builder->CreateFCmp(I.getPredicate(), LHSI->getOperand(1),
- RHSC, I.getName());
- }
- }
-
- if (Op1)
- return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
- break;
- }
- case Instruction::Load:
- if (GetElementPtrInst *GEP =
- dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
- if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
- !cast<LoadInst>(LHSI)->isVolatile())
- if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
- return Res;
- }
- break;
- }
- }
-
- return Changed ? &I : 0;
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
deleted file mode 100644
index 0f2a24f..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ /dev/null
@@ -1,614 +0,0 @@
-//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visit functions for load, store and alloca.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Transforms/Utils/BasicBlockUtils.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-STATISTIC(NumDeadStore, "Number of dead stores eliminated");
-
-Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
- // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
- if (AI.isArrayAllocation()) { // Check C != 1
- if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
- const Type *NewTy =
- ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
- assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
- AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
- New->setAlignment(AI.getAlignment());
-
- // Scan to the end of the allocation instructions, to skip over a block of
- // allocas if possible...also skip interleaved debug info
- //
- BasicBlock::iterator It = New;
- while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
-
- // Now that I is pointing to the first non-allocation-inst in the block,
- // insert our getelementptr instruction...
- //
- Value *NullIdx =Constant::getNullValue(Type::getInt32Ty(AI.getContext()));
- Value *Idx[2];
- Idx[0] = NullIdx;
- Idx[1] = NullIdx;
- Value *V = GetElementPtrInst::CreateInBounds(New, Idx, Idx + 2,
- New->getName()+".sub", It);
-
- // Now make everything use the getelementptr instead of the original
- // allocation.
- return ReplaceInstUsesWith(AI, V);
- } else if (isa<UndefValue>(AI.getArraySize())) {
- return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
- }
- }
-
- if (TD && isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
- // If alloca'ing a zero byte object, replace the alloca with a null pointer.
- // Note that we only do this for alloca's, because malloc should allocate
- // and return a unique pointer, even for a zero byte allocation.
- if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
- return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
-
- // If the alignment is 0 (unspecified), assign it the preferred alignment.
- if (AI.getAlignment() == 0)
- AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
- }
-
- return 0;
-}
-
-
-/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
-static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
- const TargetData *TD) {
- User *CI = cast<User>(LI.getOperand(0));
- Value *CastOp = CI->getOperand(0);
-
- const PointerType *DestTy = cast<PointerType>(CI->getType());
- const Type *DestPTy = DestTy->getElementType();
- if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
-
- // If the address spaces don't match, don't eliminate the cast.
- if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
- return 0;
-
- const Type *SrcPTy = SrcTy->getElementType();
-
- if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
- DestPTy->isVectorTy()) {
- // If the source is an array, the code below will not succeed. Check to
- // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
- // constants.
- if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
- if (Constant *CSrc = dyn_cast<Constant>(CastOp))
- if (ASrcTy->getNumElements() != 0) {
- Value *Idxs[2];
- Idxs[0] = Constant::getNullValue(Type::getInt32Ty(LI.getContext()));
- Idxs[1] = Idxs[0];
- CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
- SrcTy = cast<PointerType>(CastOp->getType());
- SrcPTy = SrcTy->getElementType();
- }
-
- if (IC.getTargetData() &&
- (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
- SrcPTy->isVectorTy()) &&
- // Do not allow turning this into a load of an integer, which is then
- // casted to a pointer, this pessimizes pointer analysis a lot.
- (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
- IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
- IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
-
- // Okay, we are casting from one integer or pointer type to another of
- // the same size. Instead of casting the pointer before the load, cast
- // the result of the loaded value.
- LoadInst *NewLoad =
- IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
- NewLoad->setAlignment(LI.getAlignment());
- // Now cast the result of the load.
- return new BitCastInst(NewLoad, LI.getType());
- }
- }
- }
- return 0;
-}
-
-Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
- Value *Op = LI.getOperand(0);
-
- // Attempt to improve the alignment.
- if (TD) {
- unsigned KnownAlign =
- GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
- if (KnownAlign >
- (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) :
- LI.getAlignment()))
- LI.setAlignment(KnownAlign);
- }
-
- // load (cast X) --> cast (load X) iff safe.
- if (isa<CastInst>(Op))
- if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
- return Res;
-
- // None of the following transforms are legal for volatile loads.
- if (LI.isVolatile()) return 0;
-
- // Do really simple store-to-load forwarding and load CSE, to catch cases
- // where there are several consequtive memory accesses to the same location,
- // separated by a few arithmetic operations.
- BasicBlock::iterator BBI = &LI;
- if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
- return ReplaceInstUsesWith(LI, AvailableVal);
-
- // load(gep null, ...) -> unreachable
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
- const Value *GEPI0 = GEPI->getOperand(0);
- // TODO: Consider a target hook for valid address spaces for this xform.
- if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
- // Insert a new store to null instruction before the load to indicate
- // that this code is not reachable. We do this instead of inserting
- // an unreachable instruction directly because we cannot modify the
- // CFG.
- new StoreInst(UndefValue::get(LI.getType()),
- Constant::getNullValue(Op->getType()), &LI);
- return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
- }
- }
-
- // load null/undef -> unreachable
- // TODO: Consider a target hook for valid address spaces for this xform.
- if (isa<UndefValue>(Op) ||
- (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
- // Insert a new store to null instruction before the load to indicate that
- // this code is not reachable. We do this instead of inserting an
- // unreachable instruction directly because we cannot modify the CFG.
- new StoreInst(UndefValue::get(LI.getType()),
- Constant::getNullValue(Op->getType()), &LI);
- return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
- }
-
- // Instcombine load (constantexpr_cast global) -> cast (load global)
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
- if (CE->isCast())
- if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
- return Res;
-
- if (Op->hasOneUse()) {
- // Change select and PHI nodes to select values instead of addresses: this
- // helps alias analysis out a lot, allows many others simplifications, and
- // exposes redundancy in the code.
- //
- // Note that we cannot do the transformation unless we know that the
- // introduced loads cannot trap! Something like this is valid as long as
- // the condition is always false: load (select bool %C, int* null, int* %G),
- // but it would not be valid if we transformed it to load from null
- // unconditionally.
- //
- if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
- // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
- unsigned Align = LI.getAlignment();
- if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
- isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
- LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
- SI->getOperand(1)->getName()+".val");
- LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
- SI->getOperand(2)->getName()+".val");
- V1->setAlignment(Align);
- V2->setAlignment(Align);
- return SelectInst::Create(SI->getCondition(), V1, V2);
- }
-
- // load (select (cond, null, P)) -> load P
- if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
- if (C->isNullValue()) {
- LI.setOperand(0, SI->getOperand(2));
- return &LI;
- }
-
- // load (select (cond, P, null)) -> load P
- if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
- if (C->isNullValue()) {
- LI.setOperand(0, SI->getOperand(1));
- return &LI;
- }
- }
- }
- return 0;
-}
-
-/// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
-/// when possible. This makes it generally easy to do alias analysis and/or
-/// SROA/mem2reg of the memory object.
-static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
- User *CI = cast<User>(SI.getOperand(1));
- Value *CastOp = CI->getOperand(0);
-
- const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
- const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
- if (SrcTy == 0) return 0;
-
- const Type *SrcPTy = SrcTy->getElementType();
-
- if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
- return 0;
-
- /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
- /// to its first element. This allows us to handle things like:
- /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
- /// on 32-bit hosts.
- SmallVector<Value*, 4> NewGEPIndices;
-
- // If the source is an array, the code below will not succeed. Check to
- // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
- // constants.
- if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
- // Index through pointer.
- Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
- NewGEPIndices.push_back(Zero);
-
- while (1) {
- if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
- if (!STy->getNumElements()) /* Struct can be empty {} */
- break;
- NewGEPIndices.push_back(Zero);
- SrcPTy = STy->getElementType(0);
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
- NewGEPIndices.push_back(Zero);
- SrcPTy = ATy->getElementType();
- } else {
- break;
- }
- }
-
- SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
- }
-
- if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
- return 0;
-
- // If the pointers point into different address spaces or if they point to
- // values with different sizes, we can't do the transformation.
- if (!IC.getTargetData() ||
- SrcTy->getAddressSpace() !=
- cast<PointerType>(CI->getType())->getAddressSpace() ||
- IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
- IC.getTargetData()->getTypeSizeInBits(DestPTy))
- return 0;
-
- // Okay, we are casting from one integer or pointer type to another of
- // the same size. Instead of casting the pointer before
- // the store, cast the value to be stored.
- Value *NewCast;
- Value *SIOp0 = SI.getOperand(0);
- Instruction::CastOps opcode = Instruction::BitCast;
- const Type* CastSrcTy = SIOp0->getType();
- const Type* CastDstTy = SrcPTy;
- if (CastDstTy->isPointerTy()) {
- if (CastSrcTy->isIntegerTy())
- opcode = Instruction::IntToPtr;
- } else if (CastDstTy->isIntegerTy()) {
- if (SIOp0->getType()->isPointerTy())
- opcode = Instruction::PtrToInt;
- }
-
- // SIOp0 is a pointer to aggregate and this is a store to the first field,
- // emit a GEP to index into its first field.
- if (!NewGEPIndices.empty())
- CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices.begin(),
- NewGEPIndices.end());
-
- NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
- SIOp0->getName()+".c");
- return new StoreInst(NewCast, CastOp);
-}
-
-/// equivalentAddressValues - Test if A and B will obviously have the same
-/// value. This includes recognizing that %t0 and %t1 will have the same
-/// value in code like this:
-/// %t0 = getelementptr \@a, 0, 3
-/// store i32 0, i32* %t0
-/// %t1 = getelementptr \@a, 0, 3
-/// %t2 = load i32* %t1
-///
-static bool equivalentAddressValues(Value *A, Value *B) {
- // Test if the values are trivially equivalent.
- if (A == B) return true;
-
- // Test if the values come form identical arithmetic instructions.
- // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
- // its only used to compare two uses within the same basic block, which
- // means that they'll always either have the same value or one of them
- // will have an undefined value.
- if (isa<BinaryOperator>(A) ||
- isa<CastInst>(A) ||
- isa<PHINode>(A) ||
- isa<GetElementPtrInst>(A))
- if (Instruction *BI = dyn_cast<Instruction>(B))
- if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
- return true;
-
- // Otherwise they may not be equivalent.
- return false;
-}
-
-// If this instruction has two uses, one of which is a llvm.dbg.declare,
-// return the llvm.dbg.declare.
-DbgDeclareInst *InstCombiner::hasOneUsePlusDeclare(Value *V) {
- if (!V->hasNUses(2))
- return 0;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
- UI != E; ++UI) {
- if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI))
- return DI;
- if (isa<BitCastInst>(UI) && UI->hasOneUse()) {
- if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI->use_begin()))
- return DI;
- }
- }
- return 0;
-}
-
-Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
- Value *Val = SI.getOperand(0);
- Value *Ptr = SI.getOperand(1);
-
- // If the RHS is an alloca with a single use, zapify the store, making the
- // alloca dead.
- // If the RHS is an alloca with a two uses, the other one being a
- // llvm.dbg.declare, zapify the store and the declare, making the
- // alloca dead. We must do this to prevent declares from affecting
- // codegen.
- if (!SI.isVolatile()) {
- if (Ptr->hasOneUse()) {
- if (isa<AllocaInst>(Ptr))
- return EraseInstFromFunction(SI);
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
- if (isa<AllocaInst>(GEP->getOperand(0))) {
- if (GEP->getOperand(0)->hasOneUse())
- return EraseInstFromFunction(SI);
- if (DbgDeclareInst *DI = hasOneUsePlusDeclare(GEP->getOperand(0))) {
- EraseInstFromFunction(*DI);
- return EraseInstFromFunction(SI);
- }
- }
- }
- }
- if (DbgDeclareInst *DI = hasOneUsePlusDeclare(Ptr)) {
- EraseInstFromFunction(*DI);
- return EraseInstFromFunction(SI);
- }
- }
-
- // Attempt to improve the alignment.
- if (TD) {
- unsigned KnownAlign =
- GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
- if (KnownAlign >
- (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) :
- SI.getAlignment()))
- SI.setAlignment(KnownAlign);
- }
-
- // Do really simple DSE, to catch cases where there are several consecutive
- // stores to the same location, separated by a few arithmetic operations. This
- // situation often occurs with bitfield accesses.
- BasicBlock::iterator BBI = &SI;
- for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
- --ScanInsts) {
- --BBI;
- // Don't count debug info directives, lest they affect codegen,
- // and we skip pointer-to-pointer bitcasts, which are NOPs.
- if (isa<DbgInfoIntrinsic>(BBI) ||
- (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
- ScanInsts++;
- continue;
- }
-
- if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
- // Prev store isn't volatile, and stores to the same location?
- if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
- SI.getOperand(1))) {
- ++NumDeadStore;
- ++BBI;
- EraseInstFromFunction(*PrevSI);
- continue;
- }
- break;
- }
-
- // If this is a load, we have to stop. However, if the loaded value is from
- // the pointer we're loading and is producing the pointer we're storing,
- // then *this* store is dead (X = load P; store X -> P).
- if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
- if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
- !SI.isVolatile())
- return EraseInstFromFunction(SI);
-
- // Otherwise, this is a load from some other location. Stores before it
- // may not be dead.
- break;
- }
-
- // Don't skip over loads or things that can modify memory.
- if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
- break;
- }
-
-
- if (SI.isVolatile()) return 0; // Don't hack volatile stores.
-
- // store X, null -> turns into 'unreachable' in SimplifyCFG
- if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
- if (!isa<UndefValue>(Val)) {
- SI.setOperand(0, UndefValue::get(Val->getType()));
- if (Instruction *U = dyn_cast<Instruction>(Val))
- Worklist.Add(U); // Dropped a use.
- }
- return 0; // Do not modify these!
- }
-
- // store undef, Ptr -> noop
- if (isa<UndefValue>(Val))
- return EraseInstFromFunction(SI);
-
- // If the pointer destination is a cast, see if we can fold the cast into the
- // source instead.
- if (isa<CastInst>(Ptr))
- if (Instruction *Res = InstCombineStoreToCast(*this, SI))
- return Res;
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
- if (CE->isCast())
- if (Instruction *Res = InstCombineStoreToCast(*this, SI))
- return Res;
-
-
- // If this store is the last instruction in the basic block (possibly
- // excepting debug info instructions), and if the block ends with an
- // unconditional branch, try to move it to the successor block.
- BBI = &SI;
- do {
- ++BBI;
- } while (isa<DbgInfoIntrinsic>(BBI) ||
- (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
- if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
- if (BI->isUnconditional())
- if (SimplifyStoreAtEndOfBlock(SI))
- return 0; // xform done!
-
- return 0;
-}
-
-/// SimplifyStoreAtEndOfBlock - Turn things like:
-/// if () { *P = v1; } else { *P = v2 }
-/// into a phi node with a store in the successor.
-///
-/// Simplify things like:
-/// *P = v1; if () { *P = v2; }
-/// into a phi node with a store in the successor.
-///
-bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
- BasicBlock *StoreBB = SI.getParent();
-
- // Check to see if the successor block has exactly two incoming edges. If
- // so, see if the other predecessor contains a store to the same location.
- // if so, insert a PHI node (if needed) and move the stores down.
- BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
-
- // Determine whether Dest has exactly two predecessors and, if so, compute
- // the other predecessor.
- pred_iterator PI = pred_begin(DestBB);
- BasicBlock *OtherBB = 0;
- if (*PI != StoreBB)
- OtherBB = *PI;
- ++PI;
- if (PI == pred_end(DestBB))
- return false;
-
- if (*PI != StoreBB) {
- if (OtherBB)
- return false;
- OtherBB = *PI;
- }
- if (++PI != pred_end(DestBB))
- return false;
-
- // Bail out if all the relevant blocks aren't distinct (this can happen,
- // for example, if SI is in an infinite loop)
- if (StoreBB == DestBB || OtherBB == DestBB)
- return false;
-
- // Verify that the other block ends in a branch and is not otherwise empty.
- BasicBlock::iterator BBI = OtherBB->getTerminator();
- BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
- if (!OtherBr || BBI == OtherBB->begin())
- return false;
-
- // If the other block ends in an unconditional branch, check for the 'if then
- // else' case. there is an instruction before the branch.
- StoreInst *OtherStore = 0;
- if (OtherBr->isUnconditional()) {
- --BBI;
- // Skip over debugging info.
- while (isa<DbgInfoIntrinsic>(BBI) ||
- (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
- if (BBI==OtherBB->begin())
- return false;
- --BBI;
- }
- // If this isn't a store, isn't a store to the same location, or if the
- // alignments differ, bail out.
- OtherStore = dyn_cast<StoreInst>(BBI);
- if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
- OtherStore->getAlignment() != SI.getAlignment())
- return false;
- } else {
- // Otherwise, the other block ended with a conditional branch. If one of the
- // destinations is StoreBB, then we have the if/then case.
- if (OtherBr->getSuccessor(0) != StoreBB &&
- OtherBr->getSuccessor(1) != StoreBB)
- return false;
-
- // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
- // if/then triangle. See if there is a store to the same ptr as SI that
- // lives in OtherBB.
- for (;; --BBI) {
- // Check to see if we find the matching store.
- if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
- if (OtherStore->getOperand(1) != SI.getOperand(1) ||
- OtherStore->getAlignment() != SI.getAlignment())
- return false;
- break;
- }
- // If we find something that may be using or overwriting the stored
- // value, or if we run out of instructions, we can't do the xform.
- if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
- BBI == OtherBB->begin())
- return false;
- }
-
- // In order to eliminate the store in OtherBr, we have to
- // make sure nothing reads or overwrites the stored value in
- // StoreBB.
- for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
- // FIXME: This should really be AA driven.
- if (I->mayReadFromMemory() || I->mayWriteToMemory())
- return false;
- }
- }
-
- // Insert a PHI node now if we need it.
- Value *MergedVal = OtherStore->getOperand(0);
- if (MergedVal != SI.getOperand(0)) {
- PHINode *PN = PHINode::Create(MergedVal->getType(), "storemerge");
- PN->reserveOperandSpace(2);
- PN->addIncoming(SI.getOperand(0), SI.getParent());
- PN->addIncoming(OtherStore->getOperand(0), OtherBB);
- MergedVal = InsertNewInstBefore(PN, DestBB->front());
- }
-
- // Advance to a place where it is safe to insert the new store and
- // insert it.
- BBI = DestBB->getFirstNonPHI();
- InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
- OtherStore->isVolatile(),
- SI.getAlignment()), *BBI);
-
- // Nuke the old stores.
- EraseInstFromFunction(SI);
- EraseInstFromFunction(*OtherStore);
- return true;
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
deleted file mode 100644
index b3974e8..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ /dev/null
@@ -1,695 +0,0 @@
-//===- InstCombineMulDivRem.cpp -------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
-// srem, urem, frem.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Support/PatternMatch.h"
-using namespace llvm;
-using namespace PatternMatch;
-
-/// SubOne - Subtract one from a ConstantInt.
-static Constant *SubOne(ConstantInt *C) {
- return ConstantInt::get(C->getContext(), C->getValue()-1);
-}
-
-/// MultiplyOverflows - True if the multiply can not be expressed in an int
-/// this size.
-static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) {
- uint32_t W = C1->getBitWidth();
- APInt LHSExt = C1->getValue(), RHSExt = C2->getValue();
- if (sign) {
- LHSExt.sext(W * 2);
- RHSExt.sext(W * 2);
- } else {
- LHSExt.zext(W * 2);
- RHSExt.zext(W * 2);
- }
-
- APInt MulExt = LHSExt * RHSExt;
-
- if (!sign)
- return MulExt.ugt(APInt::getLowBitsSet(W * 2, W));
-
- APInt Min = APInt::getSignedMinValue(W).sext(W * 2);
- APInt Max = APInt::getSignedMaxValue(W).sext(W * 2);
- return MulExt.slt(Min) || MulExt.sgt(Max);
-}
-
-Instruction *InstCombiner::visitMul(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (isa<UndefValue>(Op1)) // undef * X -> 0
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
-
- // Simplify mul instructions with a constant RHS.
- if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1C)) {
-
- // ((X << C1)*C2) == (X * (C2 << C1))
- if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
- if (SI->getOpcode() == Instruction::Shl)
- if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
- return BinaryOperator::CreateMul(SI->getOperand(0),
- ConstantExpr::getShl(CI, ShOp));
-
- if (CI->isZero())
- return ReplaceInstUsesWith(I, Op1C); // X * 0 == 0
- if (CI->equalsInt(1)) // X * 1 == X
- return ReplaceInstUsesWith(I, Op0);
- if (CI->isAllOnesValue()) // X * -1 == 0 - X
- return BinaryOperator::CreateNeg(Op0, I.getName());
-
- const APInt& Val = cast<ConstantInt>(CI)->getValue();
- if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C
- return BinaryOperator::CreateShl(Op0,
- ConstantInt::get(Op0->getType(), Val.logBase2()));
- }
- } else if (Op1C->getType()->isVectorTy()) {
- if (Op1C->isNullValue())
- return ReplaceInstUsesWith(I, Op1C);
-
- if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
- if (Op1V->isAllOnesValue()) // X * -1 == 0 - X
- return BinaryOperator::CreateNeg(Op0, I.getName());
-
- // As above, vector X*splat(1.0) -> X in all defined cases.
- if (Constant *Splat = Op1V->getSplatValue()) {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Splat))
- if (CI->equalsInt(1))
- return ReplaceInstUsesWith(I, Op0);
- }
- }
- }
-
- if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0))
- if (Op0I->getOpcode() == Instruction::Add && Op0I->hasOneUse() &&
- isa<ConstantInt>(Op0I->getOperand(1)) && isa<ConstantInt>(Op1C)) {
- // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
- Value *Add = Builder->CreateMul(Op0I->getOperand(0), Op1C, "tmp");
- Value *C1C2 = Builder->CreateMul(Op1C, Op0I->getOperand(1));
- return BinaryOperator::CreateAdd(Add, C1C2);
-
- }
-
- // Try to fold constant mul into select arguments.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
-
- if (isa<PHINode>(Op0))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
- }
-
- if (Value *Op0v = dyn_castNegVal(Op0)) // -X * -Y = X*Y
- if (Value *Op1v = dyn_castNegVal(Op1))
- return BinaryOperator::CreateMul(Op0v, Op1v);
-
- // (X / Y) * Y = X - (X % Y)
- // (X / Y) * -Y = (X % Y) - X
- {
- Value *Op1C = Op1;
- BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0);
- if (!BO ||
- (BO->getOpcode() != Instruction::UDiv &&
- BO->getOpcode() != Instruction::SDiv)) {
- Op1C = Op0;
- BO = dyn_cast<BinaryOperator>(Op1);
- }
- Value *Neg = dyn_castNegVal(Op1C);
- if (BO && BO->hasOneUse() &&
- (BO->getOperand(1) == Op1C || BO->getOperand(1) == Neg) &&
- (BO->getOpcode() == Instruction::UDiv ||
- BO->getOpcode() == Instruction::SDiv)) {
- Value *Op0BO = BO->getOperand(0), *Op1BO = BO->getOperand(1);
-
- // If the division is exact, X % Y is zero.
- if (SDivOperator *SDiv = dyn_cast<SDivOperator>(BO))
- if (SDiv->isExact()) {
- if (Op1BO == Op1C)
- return ReplaceInstUsesWith(I, Op0BO);
- return BinaryOperator::CreateNeg(Op0BO);
- }
-
- Value *Rem;
- if (BO->getOpcode() == Instruction::UDiv)
- Rem = Builder->CreateURem(Op0BO, Op1BO);
- else
- Rem = Builder->CreateSRem(Op0BO, Op1BO);
- Rem->takeName(BO);
-
- if (Op1BO == Op1C)
- return BinaryOperator::CreateSub(Op0BO, Rem);
- return BinaryOperator::CreateSub(Rem, Op0BO);
- }
- }
-
- /// i1 mul -> i1 and.
- if (I.getType()->isIntegerTy(1))
- return BinaryOperator::CreateAnd(Op0, Op1);
-
- // X*(1 << Y) --> X << Y
- // (1 << Y)*X --> X << Y
- {
- Value *Y;
- if (match(Op0, m_Shl(m_One(), m_Value(Y))))
- return BinaryOperator::CreateShl(Op1, Y);
- if (match(Op1, m_Shl(m_One(), m_Value(Y))))
- return BinaryOperator::CreateShl(Op0, Y);
- }
-
- // If one of the operands of the multiply is a cast from a boolean value, then
- // we know the bool is either zero or one, so this is a 'masking' multiply.
- // X * Y (where Y is 0 or 1) -> X & (0-Y)
- if (!I.getType()->isVectorTy()) {
- // -2 is "-1 << 1" so it is all bits set except the low one.
- APInt Negative2(I.getType()->getPrimitiveSizeInBits(), (uint64_t)-2, true);
-
- Value *BoolCast = 0, *OtherOp = 0;
- if (MaskedValueIsZero(Op0, Negative2))
- BoolCast = Op0, OtherOp = Op1;
- else if (MaskedValueIsZero(Op1, Negative2))
- BoolCast = Op1, OtherOp = Op0;
-
- if (BoolCast) {
- Value *V = Builder->CreateSub(Constant::getNullValue(I.getType()),
- BoolCast, "tmp");
- return BinaryOperator::CreateAnd(V, OtherOp);
- }
- }
-
- return Changed ? &I : 0;
-}
-
-Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
- bool Changed = SimplifyCommutative(I);
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- // Simplify mul instructions with a constant RHS...
- if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
- if (ConstantFP *Op1F = dyn_cast<ConstantFP>(Op1C)) {
- // "In IEEE floating point, x*1 is not equivalent to x for nans. However,
- // ANSI says we can drop signals, so we can do this anyway." (from GCC)
- if (Op1F->isExactlyValue(1.0))
- return ReplaceInstUsesWith(I, Op0); // Eliminate 'fmul double %X, 1.0'
- } else if (Op1C->getType()->isVectorTy()) {
- if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
- // As above, vector X*splat(1.0) -> X in all defined cases.
- if (Constant *Splat = Op1V->getSplatValue()) {
- if (ConstantFP *F = dyn_cast<ConstantFP>(Splat))
- if (F->isExactlyValue(1.0))
- return ReplaceInstUsesWith(I, Op0);
- }
- }
- }
-
- // Try to fold constant mul into select arguments.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
-
- if (isa<PHINode>(Op0))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
- }
-
- if (Value *Op0v = dyn_castFNegVal(Op0)) // -X * -Y = X*Y
- if (Value *Op1v = dyn_castFNegVal(Op1))
- return BinaryOperator::CreateFMul(Op0v, Op1v);
-
- return Changed ? &I : 0;
-}
-
-/// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
-/// instruction.
-bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
- SelectInst *SI = cast<SelectInst>(I.getOperand(1));
-
- // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
- int NonNullOperand = -1;
- if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
- if (ST->isNullValue())
- NonNullOperand = 2;
- // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
- if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
- if (ST->isNullValue())
- NonNullOperand = 1;
-
- if (NonNullOperand == -1)
- return false;
-
- Value *SelectCond = SI->getOperand(0);
-
- // Change the div/rem to use 'Y' instead of the select.
- I.setOperand(1, SI->getOperand(NonNullOperand));
-
- // Okay, we know we replace the operand of the div/rem with 'Y' with no
- // problem. However, the select, or the condition of the select may have
- // multiple uses. Based on our knowledge that the operand must be non-zero,
- // propagate the known value for the select into other uses of it, and
- // propagate a known value of the condition into its other users.
-
- // If the select and condition only have a single use, don't bother with this,
- // early exit.
- if (SI->use_empty() && SelectCond->hasOneUse())
- return true;
-
- // Scan the current block backward, looking for other uses of SI.
- BasicBlock::iterator BBI = &I, BBFront = I.getParent()->begin();
-
- while (BBI != BBFront) {
- --BBI;
- // If we found a call to a function, we can't assume it will return, so
- // information from below it cannot be propagated above it.
- if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI))
- break;
-
- // Replace uses of the select or its condition with the known values.
- for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
- I != E; ++I) {
- if (*I == SI) {
- *I = SI->getOperand(NonNullOperand);
- Worklist.Add(BBI);
- } else if (*I == SelectCond) {
- *I = NonNullOperand == 1 ? ConstantInt::getTrue(BBI->getContext()) :
- ConstantInt::getFalse(BBI->getContext());
- Worklist.Add(BBI);
- }
- }
-
- // If we past the instruction, quit looking for it.
- if (&*BBI == SI)
- SI = 0;
- if (&*BBI == SelectCond)
- SelectCond = 0;
-
- // If we ran out of things to eliminate, break out of the loop.
- if (SelectCond == 0 && SI == 0)
- break;
-
- }
- return true;
-}
-
-
-/// This function implements the transforms on div instructions that work
-/// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
-/// used by the visitors to those instructions.
-/// @brief Transforms common to all three div instructions
-Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- // undef / X -> 0 for integer.
- // undef / X -> undef for FP (the undef could be a snan).
- if (isa<UndefValue>(Op0)) {
- if (Op0->getType()->isFPOrFPVectorTy())
- return ReplaceInstUsesWith(I, Op0);
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- }
-
- // X / undef -> undef
- if (isa<UndefValue>(Op1))
- return ReplaceInstUsesWith(I, Op1);
-
- return 0;
-}
-
-/// This function implements the transforms common to both integer division
-/// instructions (udiv and sdiv). It is called by the visitors to those integer
-/// division instructions.
-/// @brief Common integer divide transforms
-Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- // (sdiv X, X) --> 1 (udiv X, X) --> 1
- if (Op0 == Op1) {
- if (const VectorType *Ty = dyn_cast<VectorType>(I.getType())) {
- Constant *CI = ConstantInt::get(Ty->getElementType(), 1);
- std::vector<Constant*> Elts(Ty->getNumElements(), CI);
- return ReplaceInstUsesWith(I, ConstantVector::get(Elts));
- }
-
- Constant *CI = ConstantInt::get(I.getType(), 1);
- return ReplaceInstUsesWith(I, CI);
- }
-
- if (Instruction *Common = commonDivTransforms(I))
- return Common;
-
- // Handle cases involving: [su]div X, (select Cond, Y, Z)
- // This does not apply for fdiv.
- if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
- return &I;
-
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- // div X, 1 == X
- if (RHS->equalsInt(1))
- return ReplaceInstUsesWith(I, Op0);
-
- // (X / C1) / C2 -> X / (C1*C2)
- if (Instruction *LHS = dyn_cast<Instruction>(Op0))
- if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode())
- if (ConstantInt *LHSRHS = dyn_cast<ConstantInt>(LHS->getOperand(1))) {
- if (MultiplyOverflows(RHS, LHSRHS,
- I.getOpcode()==Instruction::SDiv))
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- else
- return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0),
- ConstantExpr::getMul(RHS, LHSRHS));
- }
-
- if (!RHS->isZero()) { // avoid X udiv 0
- if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
- if (isa<PHINode>(Op0))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
- }
- }
-
- // 0 / X == 0, we don't need to preserve faults!
- if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
- if (LHS->equalsInt(0))
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
-
- // It can't be division by zero, hence it must be division by one.
- if (I.getType()->isIntegerTy(1))
- return ReplaceInstUsesWith(I, Op0);
-
- if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
- if (ConstantInt *X = cast_or_null<ConstantInt>(Op1V->getSplatValue()))
- // div X, 1 == X
- if (X->isOne())
- return ReplaceInstUsesWith(I, Op0);
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- // Handle the integer div common cases
- if (Instruction *Common = commonIDivTransforms(I))
- return Common;
-
- if (ConstantInt *C = dyn_cast<ConstantInt>(Op1)) {
- // X udiv 2^C -> X >> C
- // Check to see if this is an unsigned division with an exact power of 2,
- // if so, convert to a right shift.
- if (C->getValue().isPowerOf2()) // 0 not included in isPowerOf2
- return BinaryOperator::CreateLShr(Op0,
- ConstantInt::get(Op0->getType(), C->getValue().logBase2()));
-
- // X udiv C, where C >= signbit
- if (C->getValue().isNegative()) {
- Value *IC = Builder->CreateICmpULT( Op0, C);
- return SelectInst::Create(IC, Constant::getNullValue(I.getType()),
- ConstantInt::get(I.getType(), 1));
- }
- }
-
- // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
- if (BinaryOperator *RHSI = dyn_cast<BinaryOperator>(I.getOperand(1))) {
- if (RHSI->getOpcode() == Instruction::Shl &&
- isa<ConstantInt>(RHSI->getOperand(0))) {
- const APInt& C1 = cast<ConstantInt>(RHSI->getOperand(0))->getValue();
- if (C1.isPowerOf2()) {
- Value *N = RHSI->getOperand(1);
- const Type *NTy = N->getType();
- if (uint32_t C2 = C1.logBase2())
- N = Builder->CreateAdd(N, ConstantInt::get(NTy, C2), "tmp");
- return BinaryOperator::CreateLShr(Op0, N);
- }
- }
- }
-
- // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
- // where C1&C2 are powers of two.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
- if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
- if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
- const APInt &TVA = STO->getValue(), &FVA = SFO->getValue();
- if (TVA.isPowerOf2() && FVA.isPowerOf2()) {
- // Compute the shift amounts
- uint32_t TSA = TVA.logBase2(), FSA = FVA.logBase2();
- // Construct the "on true" case of the select
- Constant *TC = ConstantInt::get(Op0->getType(), TSA);
- Value *TSI = Builder->CreateLShr(Op0, TC, SI->getName()+".t");
-
- // Construct the "on false" case of the select
- Constant *FC = ConstantInt::get(Op0->getType(), FSA);
- Value *FSI = Builder->CreateLShr(Op0, FC, SI->getName()+".f");
-
- // construct the select instruction and return it.
- return SelectInst::Create(SI->getOperand(0), TSI, FSI, SI->getName());
- }
- }
- return 0;
-}
-
-Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- // Handle the integer div common cases
- if (Instruction *Common = commonIDivTransforms(I))
- return Common;
-
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- // sdiv X, -1 == -X
- if (RHS->isAllOnesValue())
- return BinaryOperator::CreateNeg(Op0);
-
- // sdiv X, C --> ashr X, log2(C)
- if (cast<SDivOperator>(&I)->isExact() &&
- RHS->getValue().isNonNegative() &&
- RHS->getValue().isPowerOf2()) {
- Value *ShAmt = llvm::ConstantInt::get(RHS->getType(),
- RHS->getValue().exactLogBase2());
- return BinaryOperator::CreateAShr(Op0, ShAmt, I.getName());
- }
-
- // -X/C --> X/-C provided the negation doesn't overflow.
- if (SubOperator *Sub = dyn_cast<SubOperator>(Op0))
- if (isa<Constant>(Sub->getOperand(0)) &&
- cast<Constant>(Sub->getOperand(0))->isNullValue() &&
- Sub->hasNoSignedWrap())
- return BinaryOperator::CreateSDiv(Sub->getOperand(1),
- ConstantExpr::getNeg(RHS));
- }
-
- // If the sign bits of both operands are zero (i.e. we can prove they are
- // unsigned inputs), turn this into a udiv.
- if (I.getType()->isIntegerTy()) {
- APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
- if (MaskedValueIsZero(Op0, Mask)) {
- if (MaskedValueIsZero(Op1, Mask)) {
- // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
- return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
- }
- ConstantInt *ShiftedInt;
- if (match(Op1, m_Shl(m_ConstantInt(ShiftedInt), m_Value())) &&
- ShiftedInt->getValue().isPowerOf2()) {
- // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
- // Safe because the only negative value (1 << Y) can take on is
- // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
- // the sign bit set.
- return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
- }
- }
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
- return commonDivTransforms(I);
-}
-
-/// This function implements the transforms on rem instructions that work
-/// regardless of the kind of rem instruction it is (urem, srem, or frem). It
-/// is used by the visitors to those instructions.
-/// @brief Transforms common to all three rem instructions
-Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (isa<UndefValue>(Op0)) { // undef % X -> 0
- if (I.getType()->isFPOrFPVectorTy())
- return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN)
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- }
- if (isa<UndefValue>(Op1))
- return ReplaceInstUsesWith(I, Op1); // X % undef -> undef
-
- // Handle cases involving: rem X, (select Cond, Y, Z)
- if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
- return &I;
-
- return 0;
-}
-
-/// This function implements the transforms common to both integer remainder
-/// instructions (urem and srem). It is called by the visitors to those integer
-/// remainder instructions.
-/// @brief Common integer remainder transforms
-Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (Instruction *common = commonRemTransforms(I))
- return common;
-
- // 0 % X == 0 for integer, we don't need to preserve faults!
- if (Constant *LHS = dyn_cast<Constant>(Op0))
- if (LHS->isNullValue())
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
-
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- // X % 0 == undef, we don't need to preserve faults!
- if (RHS->equalsInt(0))
- return ReplaceInstUsesWith(I, UndefValue::get(I.getType()));
-
- if (RHS->equalsInt(1)) // X % 1 == 0
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
-
- if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
- if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
- } else if (isa<PHINode>(Op0I)) {
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
- }
-
- // See if we can fold away this rem instruction.
- if (SimplifyDemandedInstructionBits(I))
- return &I;
- }
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitURem(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (Instruction *common = commonIRemTransforms(I))
- return common;
-
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- // X urem C^2 -> X and C
- // Check to see if this is an unsigned remainder with an exact power of 2,
- // if so, convert to a bitwise and.
- if (ConstantInt *C = dyn_cast<ConstantInt>(RHS))
- if (C->getValue().isPowerOf2())
- return BinaryOperator::CreateAnd(Op0, SubOne(C));
- }
-
- if (Instruction *RHSI = dyn_cast<Instruction>(I.getOperand(1))) {
- // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
- if (RHSI->getOpcode() == Instruction::Shl &&
- isa<ConstantInt>(RHSI->getOperand(0))) {
- if (cast<ConstantInt>(RHSI->getOperand(0))->getValue().isPowerOf2()) {
- Constant *N1 = Constant::getAllOnesValue(I.getType());
- Value *Add = Builder->CreateAdd(RHSI, N1, "tmp");
- return BinaryOperator::CreateAnd(Op0, Add);
- }
- }
- }
-
- // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
- // where C1&C2 are powers of two.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
- if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
- if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
- // STO == 0 and SFO == 0 handled above.
- if ((STO->getValue().isPowerOf2()) &&
- (SFO->getValue().isPowerOf2())) {
- Value *TrueAnd = Builder->CreateAnd(Op0, SubOne(STO),
- SI->getName()+".t");
- Value *FalseAnd = Builder->CreateAnd(Op0, SubOne(SFO),
- SI->getName()+".f");
- return SelectInst::Create(SI->getOperand(0), TrueAnd, FalseAnd);
- }
- }
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- // Handle the integer rem common cases
- if (Instruction *Common = commonIRemTransforms(I))
- return Common;
-
- if (Value *RHSNeg = dyn_castNegVal(Op1))
- if (!isa<Constant>(RHSNeg) ||
- (isa<ConstantInt>(RHSNeg) &&
- cast<ConstantInt>(RHSNeg)->getValue().isStrictlyPositive())) {
- // X % -Y -> X % Y
- Worklist.AddValue(I.getOperand(1));
- I.setOperand(1, RHSNeg);
- return &I;
- }
-
- // If the sign bits of both operands are zero (i.e. we can prove they are
- // unsigned inputs), turn this into a urem.
- if (I.getType()->isIntegerTy()) {
- APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
- if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
- // X srem Y -> X urem Y, iff X and Y don't have sign bit set
- return BinaryOperator::CreateURem(Op0, Op1, I.getName());
- }
- }
-
- // If it's a constant vector, flip any negative values positive.
- if (ConstantVector *RHSV = dyn_cast<ConstantVector>(Op1)) {
- unsigned VWidth = RHSV->getNumOperands();
-
- bool hasNegative = false;
- for (unsigned i = 0; !hasNegative && i != VWidth; ++i)
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i)))
- if (RHS->getValue().isNegative())
- hasNegative = true;
-
- if (hasNegative) {
- std::vector<Constant *> Elts(VWidth);
- for (unsigned i = 0; i != VWidth; ++i) {
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i))) {
- if (RHS->getValue().isNegative())
- Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
- else
- Elts[i] = RHS;
- }
- }
-
- Constant *NewRHSV = ConstantVector::get(Elts);
- if (NewRHSV != RHSV) {
- Worklist.AddValue(I.getOperand(1));
- I.setOperand(1, NewRHSV);
- return &I;
- }
- }
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
- return commonRemTransforms(I);
-}
-
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
deleted file mode 100644
index 65f0393..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ /dev/null
@@ -1,843 +0,0 @@
-//===- InstCombinePHI.cpp -------------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visitPHINode function.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/STLExtras.h"
-using namespace llvm;
-
-/// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)]
-/// and if a/b/c and the add's all have a single use, turn this into a phi
-/// and a single binop.
-Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
- Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
- assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
- unsigned Opc = FirstInst->getOpcode();
- Value *LHSVal = FirstInst->getOperand(0);
- Value *RHSVal = FirstInst->getOperand(1);
-
- const Type *LHSType = LHSVal->getType();
- const Type *RHSType = RHSVal->getType();
-
- // Scan to see if all operands are the same opcode, and all have one use.
- for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
- Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
- if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
- // Verify type of the LHS matches so we don't fold cmp's of different
- // types or GEP's with different index types.
- I->getOperand(0)->getType() != LHSType ||
- I->getOperand(1)->getType() != RHSType)
- return 0;
-
- // If they are CmpInst instructions, check their predicates
- if (Opc == Instruction::ICmp || Opc == Instruction::FCmp)
- if (cast<CmpInst>(I)->getPredicate() !=
- cast<CmpInst>(FirstInst)->getPredicate())
- return 0;
-
- // Keep track of which operand needs a phi node.
- if (I->getOperand(0) != LHSVal) LHSVal = 0;
- if (I->getOperand(1) != RHSVal) RHSVal = 0;
- }
-
- // If both LHS and RHS would need a PHI, don't do this transformation,
- // because it would increase the number of PHIs entering the block,
- // which leads to higher register pressure. This is especially
- // bad when the PHIs are in the header of a loop.
- if (!LHSVal && !RHSVal)
- return 0;
-
- // Otherwise, this is safe to transform!
-
- Value *InLHS = FirstInst->getOperand(0);
- Value *InRHS = FirstInst->getOperand(1);
- PHINode *NewLHS = 0, *NewRHS = 0;
- if (LHSVal == 0) {
- NewLHS = PHINode::Create(LHSType,
- FirstInst->getOperand(0)->getName() + ".pn");
- NewLHS->reserveOperandSpace(PN.getNumOperands()/2);
- NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
- InsertNewInstBefore(NewLHS, PN);
- LHSVal = NewLHS;
- }
-
- if (RHSVal == 0) {
- NewRHS = PHINode::Create(RHSType,
- FirstInst->getOperand(1)->getName() + ".pn");
- NewRHS->reserveOperandSpace(PN.getNumOperands()/2);
- NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
- InsertNewInstBefore(NewRHS, PN);
- RHSVal = NewRHS;
- }
-
- // Add all operands to the new PHIs.
- if (NewLHS || NewRHS) {
- for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
- Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
- if (NewLHS) {
- Value *NewInLHS = InInst->getOperand(0);
- NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
- }
- if (NewRHS) {
- Value *NewInRHS = InInst->getOperand(1);
- NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
- }
- }
- }
-
- if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
- return BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
- CmpInst *CIOp = cast<CmpInst>(FirstInst);
- return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
- LHSVal, RHSVal);
-}
-
-Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
- GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
-
- SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
- FirstInst->op_end());
- // This is true if all GEP bases are allocas and if all indices into them are
- // constants.
- bool AllBasePointersAreAllocas = true;
-
- // We don't want to replace this phi if the replacement would require
- // more than one phi, which leads to higher register pressure. This is
- // especially bad when the PHIs are in the header of a loop.
- bool NeededPhi = false;
-
- // Scan to see if all operands are the same opcode, and all have one use.
- for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
- GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
- if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
- GEP->getNumOperands() != FirstInst->getNumOperands())
- return 0;
-
- // Keep track of whether or not all GEPs are of alloca pointers.
- if (AllBasePointersAreAllocas &&
- (!isa<AllocaInst>(GEP->getOperand(0)) ||
- !GEP->hasAllConstantIndices()))
- AllBasePointersAreAllocas = false;
-
- // Compare the operand lists.
- for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
- if (FirstInst->getOperand(op) == GEP->getOperand(op))
- continue;
-
- // Don't merge two GEPs when two operands differ (introducing phi nodes)
- // if one of the PHIs has a constant for the index. The index may be
- // substantially cheaper to compute for the constants, so making it a
- // variable index could pessimize the path. This also handles the case
- // for struct indices, which must always be constant.
- if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
- isa<ConstantInt>(GEP->getOperand(op)))
- return 0;
-
- if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
- return 0;
-
- // If we already needed a PHI for an earlier operand, and another operand
- // also requires a PHI, we'd be introducing more PHIs than we're
- // eliminating, which increases register pressure on entry to the PHI's
- // block.
- if (NeededPhi)
- return 0;
-
- FixedOperands[op] = 0; // Needs a PHI.
- NeededPhi = true;
- }
- }
-
- // If all of the base pointers of the PHI'd GEPs are from allocas, don't
- // bother doing this transformation. At best, this will just save a bit of
- // offset calculation, but all the predecessors will have to materialize the
- // stack address into a register anyway. We'd actually rather *clone* the
- // load up into the predecessors so that we have a load of a gep of an alloca,
- // which can usually all be folded into the load.
- if (AllBasePointersAreAllocas)
- return 0;
-
- // Otherwise, this is safe to transform. Insert PHI nodes for each operand
- // that is variable.
- SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
-
- bool HasAnyPHIs = false;
- for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
- if (FixedOperands[i]) continue; // operand doesn't need a phi.
- Value *FirstOp = FirstInst->getOperand(i);
- PHINode *NewPN = PHINode::Create(FirstOp->getType(),
- FirstOp->getName()+".pn");
- InsertNewInstBefore(NewPN, PN);
-
- NewPN->reserveOperandSpace(e);
- NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
- OperandPhis[i] = NewPN;
- FixedOperands[i] = NewPN;
- HasAnyPHIs = true;
- }
-
-
- // Add all operands to the new PHIs.
- if (HasAnyPHIs) {
- for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
- GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
- BasicBlock *InBB = PN.getIncomingBlock(i);
-
- for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
- if (PHINode *OpPhi = OperandPhis[op])
- OpPhi->addIncoming(InGEP->getOperand(op), InBB);
- }
- }
-
- Value *Base = FixedOperands[0];
- return cast<GEPOperator>(FirstInst)->isInBounds() ?
- GetElementPtrInst::CreateInBounds(Base, FixedOperands.begin()+1,
- FixedOperands.end()) :
- GetElementPtrInst::Create(Base, FixedOperands.begin()+1,
- FixedOperands.end());
-}
-
-
-/// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
-/// sink the load out of the block that defines it. This means that it must be
-/// obvious the value of the load is not changed from the point of the load to
-/// the end of the block it is in.
-///
-/// Finally, it is safe, but not profitable, to sink a load targetting a
-/// non-address-taken alloca. Doing so will cause us to not promote the alloca
-/// to a register.
-static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
- BasicBlock::iterator BBI = L, E = L->getParent()->end();
-
- for (++BBI; BBI != E; ++BBI)
- if (BBI->mayWriteToMemory())
- return false;
-
- // Check for non-address taken alloca. If not address-taken already, it isn't
- // profitable to do this xform.
- if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
- bool isAddressTaken = false;
- for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
- UI != E; ++UI) {
- if (isa<LoadInst>(UI)) continue;
- if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
- // If storing TO the alloca, then the address isn't taken.
- if (SI->getOperand(1) == AI) continue;
- }
- isAddressTaken = true;
- break;
- }
-
- if (!isAddressTaken && AI->isStaticAlloca())
- return false;
- }
-
- // If this load is a load from a GEP with a constant offset from an alloca,
- // then we don't want to sink it. In its present form, it will be
- // load [constant stack offset]. Sinking it will cause us to have to
- // materialize the stack addresses in each predecessor in a register only to
- // do a shared load from register in the successor.
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
- if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
- if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
- return false;
-
- return true;
-}
-
-Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
- LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
-
- // When processing loads, we need to propagate two bits of information to the
- // sunk load: whether it is volatile, and what its alignment is. We currently
- // don't sink loads when some have their alignment specified and some don't.
- // visitLoadInst will propagate an alignment onto the load when TD is around,
- // and if TD isn't around, we can't handle the mixed case.
- bool isVolatile = FirstLI->isVolatile();
- unsigned LoadAlignment = FirstLI->getAlignment();
- unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
-
- // We can't sink the load if the loaded value could be modified between the
- // load and the PHI.
- if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
- !isSafeAndProfitableToSinkLoad(FirstLI))
- return 0;
-
- // If the PHI is of volatile loads and the load block has multiple
- // successors, sinking it would remove a load of the volatile value from
- // the path through the other successor.
- if (isVolatile &&
- FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
- return 0;
-
- // Check to see if all arguments are the same operation.
- for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
- LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
- if (!LI || !LI->hasOneUse())
- return 0;
-
- // We can't sink the load if the loaded value could be modified between
- // the load and the PHI.
- if (LI->isVolatile() != isVolatile ||
- LI->getParent() != PN.getIncomingBlock(i) ||
- LI->getPointerAddressSpace() != LoadAddrSpace ||
- !isSafeAndProfitableToSinkLoad(LI))
- return 0;
-
- // If some of the loads have an alignment specified but not all of them,
- // we can't do the transformation.
- if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
- return 0;
-
- LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
-
- // If the PHI is of volatile loads and the load block has multiple
- // successors, sinking it would remove a load of the volatile value from
- // the path through the other successor.
- if (isVolatile &&
- LI->getParent()->getTerminator()->getNumSuccessors() != 1)
- return 0;
- }
-
- // Okay, they are all the same operation. Create a new PHI node of the
- // correct type, and PHI together all of the LHS's of the instructions.
- PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
- PN.getName()+".in");
- NewPN->reserveOperandSpace(PN.getNumOperands()/2);
-
- Value *InVal = FirstLI->getOperand(0);
- NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
-
- // Add all operands to the new PHI.
- for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
- Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
- if (NewInVal != InVal)
- InVal = 0;
- NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
- }
-
- Value *PhiVal;
- if (InVal) {
- // The new PHI unions all of the same values together. This is really
- // common, so we handle it intelligently here for compile-time speed.
- PhiVal = InVal;
- delete NewPN;
- } else {
- InsertNewInstBefore(NewPN, PN);
- PhiVal = NewPN;
- }
-
- // If this was a volatile load that we are merging, make sure to loop through
- // and mark all the input loads as non-volatile. If we don't do this, we will
- // insert a new volatile load and the old ones will not be deletable.
- if (isVolatile)
- for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
- cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
-
- return new LoadInst(PhiVal, "", isVolatile, LoadAlignment);
-}
-
-
-
-/// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
-/// operator and they all are only used by the PHI, PHI together their
-/// inputs, and do the operation once, to the result of the PHI.
-Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
- Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
-
- if (isa<GetElementPtrInst>(FirstInst))
- return FoldPHIArgGEPIntoPHI(PN);
- if (isa<LoadInst>(FirstInst))
- return FoldPHIArgLoadIntoPHI(PN);
-
- // Scan the instruction, looking for input operations that can be folded away.
- // If all input operands to the phi are the same instruction (e.g. a cast from
- // the same type or "+42") we can pull the operation through the PHI, reducing
- // code size and simplifying code.
- Constant *ConstantOp = 0;
- const Type *CastSrcTy = 0;
-
- if (isa<CastInst>(FirstInst)) {
- CastSrcTy = FirstInst->getOperand(0)->getType();
-
- // Be careful about transforming integer PHIs. We don't want to pessimize
- // the code by turning an i32 into an i1293.
- if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
- if (!ShouldChangeType(PN.getType(), CastSrcTy))
- return 0;
- }
- } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
- // Can fold binop, compare or shift here if the RHS is a constant,
- // otherwise call FoldPHIArgBinOpIntoPHI.
- ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
- if (ConstantOp == 0)
- return FoldPHIArgBinOpIntoPHI(PN);
- } else {
- return 0; // Cannot fold this operation.
- }
-
- // Check to see if all arguments are the same operation.
- for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
- Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
- if (I == 0 || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
- return 0;
- if (CastSrcTy) {
- if (I->getOperand(0)->getType() != CastSrcTy)
- return 0; // Cast operation must match.
- } else if (I->getOperand(1) != ConstantOp) {
- return 0;
- }
- }
-
- // Okay, they are all the same operation. Create a new PHI node of the
- // correct type, and PHI together all of the LHS's of the instructions.
- PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
- PN.getName()+".in");
- NewPN->reserveOperandSpace(PN.getNumOperands()/2);
-
- Value *InVal = FirstInst->getOperand(0);
- NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
-
- // Add all operands to the new PHI.
- for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
- Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
- if (NewInVal != InVal)
- InVal = 0;
- NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
- }
-
- Value *PhiVal;
- if (InVal) {
- // The new PHI unions all of the same values together. This is really
- // common, so we handle it intelligently here for compile-time speed.
- PhiVal = InVal;
- delete NewPN;
- } else {
- InsertNewInstBefore(NewPN, PN);
- PhiVal = NewPN;
- }
-
- // Insert and return the new operation.
- if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst))
- return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType());
-
- if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
- return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
-
- CmpInst *CIOp = cast<CmpInst>(FirstInst);
- return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
- PhiVal, ConstantOp);
-}
-
-/// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
-/// that is dead.
-static bool DeadPHICycle(PHINode *PN,
- SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) {
- if (PN->use_empty()) return true;
- if (!PN->hasOneUse()) return false;
-
- // Remember this node, and if we find the cycle, return.
- if (!PotentiallyDeadPHIs.insert(PN))
- return true;
-
- // Don't scan crazily complex things.
- if (PotentiallyDeadPHIs.size() == 16)
- return false;
-
- if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
- return DeadPHICycle(PU, PotentiallyDeadPHIs);
-
- return false;
-}
-
-/// PHIsEqualValue - Return true if this phi node is always equal to
-/// NonPhiInVal. This happens with mutually cyclic phi nodes like:
-/// z = some value; x = phi (y, z); y = phi (x, z)
-static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
- SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) {
- // See if we already saw this PHI node.
- if (!ValueEqualPHIs.insert(PN))
- return true;
-
- // Don't scan crazily complex things.
- if (ValueEqualPHIs.size() == 16)
- return false;
-
- // Scan the operands to see if they are either phi nodes or are equal to
- // the value.
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
- Value *Op = PN->getIncomingValue(i);
- if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
- if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
- return false;
- } else if (Op != NonPhiInVal)
- return false;
- }
-
- return true;
-}
-
-
-namespace {
-struct PHIUsageRecord {
- unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
- unsigned Shift; // The amount shifted.
- Instruction *Inst; // The trunc instruction.
-
- PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
- : PHIId(pn), Shift(Sh), Inst(User) {}
-
- bool operator<(const PHIUsageRecord &RHS) const {
- if (PHIId < RHS.PHIId) return true;
- if (PHIId > RHS.PHIId) return false;
- if (Shift < RHS.Shift) return true;
- if (Shift > RHS.Shift) return false;
- return Inst->getType()->getPrimitiveSizeInBits() <
- RHS.Inst->getType()->getPrimitiveSizeInBits();
- }
-};
-
-struct LoweredPHIRecord {
- PHINode *PN; // The PHI that was lowered.
- unsigned Shift; // The amount shifted.
- unsigned Width; // The width extracted.
-
- LoweredPHIRecord(PHINode *pn, unsigned Sh, const Type *Ty)
- : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
-
- // Ctor form used by DenseMap.
- LoweredPHIRecord(PHINode *pn, unsigned Sh)
- : PN(pn), Shift(Sh), Width(0) {}
-};
-}
-
-namespace llvm {
- template<>
- struct DenseMapInfo<LoweredPHIRecord> {
- static inline LoweredPHIRecord getEmptyKey() {
- return LoweredPHIRecord(0, 0);
- }
- static inline LoweredPHIRecord getTombstoneKey() {
- return LoweredPHIRecord(0, 1);
- }
- static unsigned getHashValue(const LoweredPHIRecord &Val) {
- return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
- (Val.Width>>3);
- }
- static bool isEqual(const LoweredPHIRecord &LHS,
- const LoweredPHIRecord &RHS) {
- return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
- LHS.Width == RHS.Width;
- }
- };
- template <>
- struct isPodLike<LoweredPHIRecord> { static const bool value = true; };
-}
-
-
-/// SliceUpIllegalIntegerPHI - This is an integer PHI and we know that it has an
-/// illegal type: see if it is only used by trunc or trunc(lshr) operations. If
-/// so, we split the PHI into the various pieces being extracted. This sort of
-/// thing is introduced when SROA promotes an aggregate to large integer values.
-///
-/// TODO: The user of the trunc may be an bitcast to float/double/vector or an
-/// inttoptr. We should produce new PHIs in the right type.
-///
-Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
- // PHIUsers - Keep track of all of the truncated values extracted from a set
- // of PHIs, along with their offset. These are the things we want to rewrite.
- SmallVector<PHIUsageRecord, 16> PHIUsers;
-
- // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
- // nodes which are extracted from. PHIsToSlice is a set we use to avoid
- // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
- // check the uses of (to ensure they are all extracts).
- SmallVector<PHINode*, 8> PHIsToSlice;
- SmallPtrSet<PHINode*, 8> PHIsInspected;
-
- PHIsToSlice.push_back(&FirstPhi);
- PHIsInspected.insert(&FirstPhi);
-
- for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
- PHINode *PN = PHIsToSlice[PHIId];
-
- // Scan the input list of the PHI. If any input is an invoke, and if the
- // input is defined in the predecessor, then we won't be split the critical
- // edge which is required to insert a truncate. Because of this, we have to
- // bail out.
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
- InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
- if (II == 0) continue;
- if (II->getParent() != PN->getIncomingBlock(i))
- continue;
-
- // If we have a phi, and if it's directly in the predecessor, then we have
- // a critical edge where we need to put the truncate. Since we can't
- // split the edge in instcombine, we have to bail out.
- return 0;
- }
-
-
- for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
- UI != E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
-
- // If the user is a PHI, inspect its uses recursively.
- if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
- if (PHIsInspected.insert(UserPN))
- PHIsToSlice.push_back(UserPN);
- continue;
- }
-
- // Truncates are always ok.
- if (isa<TruncInst>(User)) {
- PHIUsers.push_back(PHIUsageRecord(PHIId, 0, User));
- continue;
- }
-
- // Otherwise it must be a lshr which can only be used by one trunc.
- if (User->getOpcode() != Instruction::LShr ||
- !User->hasOneUse() || !isa<TruncInst>(User->use_back()) ||
- !isa<ConstantInt>(User->getOperand(1)))
- return 0;
-
- unsigned Shift = cast<ConstantInt>(User->getOperand(1))->getZExtValue();
- PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, User->use_back()));
- }
- }
-
- // If we have no users, they must be all self uses, just nuke the PHI.
- if (PHIUsers.empty())
- return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
-
- // If this phi node is transformable, create new PHIs for all the pieces
- // extracted out of it. First, sort the users by their offset and size.
- array_pod_sort(PHIUsers.begin(), PHIUsers.end());
-
- DEBUG(errs() << "SLICING UP PHI: " << FirstPhi << '\n';
- for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
- errs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] <<'\n';
- );
-
- // PredValues - This is a temporary used when rewriting PHI nodes. It is
- // hoisted out here to avoid construction/destruction thrashing.
- DenseMap<BasicBlock*, Value*> PredValues;
-
- // ExtractedVals - Each new PHI we introduce is saved here so we don't
- // introduce redundant PHIs.
- DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
-
- for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
- unsigned PHIId = PHIUsers[UserI].PHIId;
- PHINode *PN = PHIsToSlice[PHIId];
- unsigned Offset = PHIUsers[UserI].Shift;
- const Type *Ty = PHIUsers[UserI].Inst->getType();
-
- PHINode *EltPHI;
-
- // If we've already lowered a user like this, reuse the previously lowered
- // value.
- if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == 0) {
-
- // Otherwise, Create the new PHI node for this user.
- EltPHI = PHINode::Create(Ty, PN->getName()+".off"+Twine(Offset), PN);
- assert(EltPHI->getType() != PN->getType() &&
- "Truncate didn't shrink phi?");
-
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
- BasicBlock *Pred = PN->getIncomingBlock(i);
- Value *&PredVal = PredValues[Pred];
-
- // If we already have a value for this predecessor, reuse it.
- if (PredVal) {
- EltPHI->addIncoming(PredVal, Pred);
- continue;
- }
-
- // Handle the PHI self-reuse case.
- Value *InVal = PN->getIncomingValue(i);
- if (InVal == PN) {
- PredVal = EltPHI;
- EltPHI->addIncoming(PredVal, Pred);
- continue;
- }
-
- if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
- // If the incoming value was a PHI, and if it was one of the PHIs we
- // already rewrote it, just use the lowered value.
- if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
- PredVal = Res;
- EltPHI->addIncoming(PredVal, Pred);
- continue;
- }
- }
-
- // Otherwise, do an extract in the predecessor.
- Builder->SetInsertPoint(Pred, Pred->getTerminator());
- Value *Res = InVal;
- if (Offset)
- Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
- Offset), "extract");
- Res = Builder->CreateTrunc(Res, Ty, "extract.t");
- PredVal = Res;
- EltPHI->addIncoming(Res, Pred);
-
- // If the incoming value was a PHI, and if it was one of the PHIs we are
- // rewriting, we will ultimately delete the code we inserted. This
- // means we need to revisit that PHI to make sure we extract out the
- // needed piece.
- if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
- if (PHIsInspected.count(OldInVal)) {
- unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(),
- OldInVal)-PHIsToSlice.begin();
- PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
- cast<Instruction>(Res)));
- ++UserE;
- }
- }
- PredValues.clear();
-
- DEBUG(errs() << " Made element PHI for offset " << Offset << ": "
- << *EltPHI << '\n');
- ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
- }
-
- // Replace the use of this piece with the PHI node.
- ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
- }
-
- // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
- // with undefs.
- Value *Undef = UndefValue::get(FirstPhi.getType());
- for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
- ReplaceInstUsesWith(*PHIsToSlice[i], Undef);
- return ReplaceInstUsesWith(FirstPhi, Undef);
-}
-
-// PHINode simplification
-//
-Instruction *InstCombiner::visitPHINode(PHINode &PN) {
- // If LCSSA is around, don't mess with Phi nodes
- if (MustPreserveLCSSA) return 0;
-
- if (Value *V = PN.hasConstantValue())
- return ReplaceInstUsesWith(PN, V);
-
- // If all PHI operands are the same operation, pull them through the PHI,
- // reducing code size.
- if (isa<Instruction>(PN.getIncomingValue(0)) &&
- isa<Instruction>(PN.getIncomingValue(1)) &&
- cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
- cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
- // FIXME: The hasOneUse check will fail for PHIs that use the value more
- // than themselves more than once.
- PN.getIncomingValue(0)->hasOneUse())
- if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
- return Result;
-
- // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
- // this PHI only has a single use (a PHI), and if that PHI only has one use (a
- // PHI)... break the cycle.
- if (PN.hasOneUse()) {
- Instruction *PHIUser = cast<Instruction>(PN.use_back());
- if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
- SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
- PotentiallyDeadPHIs.insert(&PN);
- if (DeadPHICycle(PU, PotentiallyDeadPHIs))
- return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
- }
-
- // If this phi has a single use, and if that use just computes a value for
- // the next iteration of a loop, delete the phi. This occurs with unused
- // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
- // common case here is good because the only other things that catch this
- // are induction variable analysis (sometimes) and ADCE, which is only run
- // late.
- if (PHIUser->hasOneUse() &&
- (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
- PHIUser->use_back() == &PN) {
- return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
- }
- }
-
- // We sometimes end up with phi cycles that non-obviously end up being the
- // same value, for example:
- // z = some value; x = phi (y, z); y = phi (x, z)
- // where the phi nodes don't necessarily need to be in the same block. Do a
- // quick check to see if the PHI node only contains a single non-phi value, if
- // so, scan to see if the phi cycle is actually equal to that value.
- {
- unsigned InValNo = 0, NumOperandVals = PN.getNumIncomingValues();
- // Scan for the first non-phi operand.
- while (InValNo != NumOperandVals &&
- isa<PHINode>(PN.getIncomingValue(InValNo)))
- ++InValNo;
-
- if (InValNo != NumOperandVals) {
- Value *NonPhiInVal = PN.getOperand(InValNo);
-
- // Scan the rest of the operands to see if there are any conflicts, if so
- // there is no need to recursively scan other phis.
- for (++InValNo; InValNo != NumOperandVals; ++InValNo) {
- Value *OpVal = PN.getIncomingValue(InValNo);
- if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
- break;
- }
-
- // If we scanned over all operands, then we have one unique value plus
- // phi values. Scan PHI nodes to see if they all merge in each other or
- // the value.
- if (InValNo == NumOperandVals) {
- SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
- if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
- return ReplaceInstUsesWith(PN, NonPhiInVal);
- }
- }
- }
-
- // If there are multiple PHIs, sort their operands so that they all list
- // the blocks in the same order. This will help identical PHIs be eliminated
- // by other passes. Other passes shouldn't depend on this for correctness
- // however.
- PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
- if (&PN != FirstPN)
- for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
- BasicBlock *BBA = PN.getIncomingBlock(i);
- BasicBlock *BBB = FirstPN->getIncomingBlock(i);
- if (BBA != BBB) {
- Value *VA = PN.getIncomingValue(i);
- unsigned j = PN.getBasicBlockIndex(BBB);
- Value *VB = PN.getIncomingValue(j);
- PN.setIncomingBlock(i, BBB);
- PN.setIncomingValue(i, VB);
- PN.setIncomingBlock(j, BBA);
- PN.setIncomingValue(j, VA);
- // NOTE: Instcombine normally would want us to "return &PN" if we
- // modified any of the operands of an instruction. However, since we
- // aren't adding or removing uses (just rearranging them) we don't do
- // this in this case.
- }
- }
-
- // If this is an integer PHI and we know that it has an illegal type, see if
- // it is only used by trunc or trunc(lshr) operations. If so, we split the
- // PHI into the various pieces being extracted. This sort of thing is
- // introduced when SROA promotes an aggregate to a single large integer type.
- if (PN.getType()->isIntegerTy() && TD &&
- !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
- if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
- return Res;
-
- return 0;
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
deleted file mode 100644
index 2fc9325..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ /dev/null
@@ -1,691 +0,0 @@
-//===- InstCombineSelect.cpp ----------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visitSelect function.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/Support/PatternMatch.h"
-using namespace llvm;
-using namespace PatternMatch;
-
-/// MatchSelectPattern - Pattern match integer [SU]MIN, [SU]MAX, and ABS idioms,
-/// returning the kind and providing the out parameter results if we
-/// successfully match.
-static SelectPatternFlavor
-MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
- SelectInst *SI = dyn_cast<SelectInst>(V);
- if (SI == 0) return SPF_UNKNOWN;
-
- ICmpInst *ICI = dyn_cast<ICmpInst>(SI->getCondition());
- if (ICI == 0) return SPF_UNKNOWN;
-
- LHS = ICI->getOperand(0);
- RHS = ICI->getOperand(1);
-
- // (icmp X, Y) ? X : Y
- if (SI->getTrueValue() == ICI->getOperand(0) &&
- SI->getFalseValue() == ICI->getOperand(1)) {
- switch (ICI->getPredicate()) {
- default: return SPF_UNKNOWN; // Equality.
- case ICmpInst::ICMP_UGT:
- case ICmpInst::ICMP_UGE: return SPF_UMAX;
- case ICmpInst::ICMP_SGT:
- case ICmpInst::ICMP_SGE: return SPF_SMAX;
- case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_ULE: return SPF_UMIN;
- case ICmpInst::ICMP_SLT:
- case ICmpInst::ICMP_SLE: return SPF_SMIN;
- }
- }
-
- // (icmp X, Y) ? Y : X
- if (SI->getTrueValue() == ICI->getOperand(1) &&
- SI->getFalseValue() == ICI->getOperand(0)) {
- switch (ICI->getPredicate()) {
- default: return SPF_UNKNOWN; // Equality.
- case ICmpInst::ICMP_UGT:
- case ICmpInst::ICMP_UGE: return SPF_UMIN;
- case ICmpInst::ICMP_SGT:
- case ICmpInst::ICMP_SGE: return SPF_SMIN;
- case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_ULE: return SPF_UMAX;
- case ICmpInst::ICMP_SLT:
- case ICmpInst::ICMP_SLE: return SPF_SMAX;
- }
- }
-
- // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5)
-
- return SPF_UNKNOWN;
-}
-
-
-/// GetSelectFoldableOperands - We want to turn code that looks like this:
-/// %C = or %A, %B
-/// %D = select %cond, %C, %A
-/// into:
-/// %C = select %cond, %B, 0
-/// %D = or %A, %C
-///
-/// Assuming that the specified instruction is an operand to the select, return
-/// a bitmask indicating which operands of this instruction are foldable if they
-/// equal the other incoming value of the select.
-///
-static unsigned GetSelectFoldableOperands(Instruction *I) {
- switch (I->getOpcode()) {
- case Instruction::Add:
- case Instruction::Mul:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- return 3; // Can fold through either operand.
- case Instruction::Sub: // Can only fold on the amount subtracted.
- case Instruction::Shl: // Can only fold on the shift amount.
- case Instruction::LShr:
- case Instruction::AShr:
- return 1;
- default:
- return 0; // Cannot fold
- }
-}
-
-/// GetSelectFoldableConstant - For the same transformation as the previous
-/// function, return the identity constant that goes into the select.
-static Constant *GetSelectFoldableConstant(Instruction *I) {
- switch (I->getOpcode()) {
- default: llvm_unreachable("This cannot happen!");
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:
- return Constant::getNullValue(I->getType());
- case Instruction::And:
- return Constant::getAllOnesValue(I->getType());
- case Instruction::Mul:
- return ConstantInt::get(I->getType(), 1);
- }
-}
-
-/// FoldSelectOpOp - Here we have (select c, TI, FI), and we know that TI and FI
-/// have the same opcode and only one use each. Try to simplify this.
-Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
- Instruction *FI) {
- if (TI->getNumOperands() == 1) {
- // If this is a non-volatile load or a cast from the same type,
- // merge.
- if (TI->isCast()) {
- if (TI->getOperand(0)->getType() != FI->getOperand(0)->getType())
- return 0;
- } else {
- return 0; // unknown unary op.
- }
-
- // Fold this by inserting a select from the input values.
- SelectInst *NewSI = SelectInst::Create(SI.getCondition(), TI->getOperand(0),
- FI->getOperand(0), SI.getName()+".v");
- InsertNewInstBefore(NewSI, SI);
- return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
- TI->getType());
- }
-
- // Only handle binary operators here.
- if (!isa<BinaryOperator>(TI))
- return 0;
-
- // Figure out if the operations have any operands in common.
- Value *MatchOp, *OtherOpT, *OtherOpF;
- bool MatchIsOpZero;
- if (TI->getOperand(0) == FI->getOperand(0)) {
- MatchOp = TI->getOperand(0);
- OtherOpT = TI->getOperand(1);
- OtherOpF = FI->getOperand(1);
- MatchIsOpZero = true;
- } else if (TI->getOperand(1) == FI->getOperand(1)) {
- MatchOp = TI->getOperand(1);
- OtherOpT = TI->getOperand(0);
- OtherOpF = FI->getOperand(0);
- MatchIsOpZero = false;
- } else if (!TI->isCommutative()) {
- return 0;
- } else if (TI->getOperand(0) == FI->getOperand(1)) {
- MatchOp = TI->getOperand(0);
- OtherOpT = TI->getOperand(1);
- OtherOpF = FI->getOperand(0);
- MatchIsOpZero = true;
- } else if (TI->getOperand(1) == FI->getOperand(0)) {
- MatchOp = TI->getOperand(1);
- OtherOpT = TI->getOperand(0);
- OtherOpF = FI->getOperand(1);
- MatchIsOpZero = true;
- } else {
- return 0;
- }
-
- // If we reach here, they do have operations in common.
- SelectInst *NewSI = SelectInst::Create(SI.getCondition(), OtherOpT,
- OtherOpF, SI.getName()+".v");
- InsertNewInstBefore(NewSI, SI);
-
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TI)) {
- if (MatchIsOpZero)
- return BinaryOperator::Create(BO->getOpcode(), MatchOp, NewSI);
- else
- return BinaryOperator::Create(BO->getOpcode(), NewSI, MatchOp);
- }
- llvm_unreachable("Shouldn't get here");
- return 0;
-}
-
-static bool isSelect01(Constant *C1, Constant *C2) {
- ConstantInt *C1I = dyn_cast<ConstantInt>(C1);
- if (!C1I)
- return false;
- ConstantInt *C2I = dyn_cast<ConstantInt>(C2);
- if (!C2I)
- return false;
- return (C1I->isZero() || C1I->isOne()) && (C2I->isZero() || C2I->isOne());
-}
-
-/// FoldSelectIntoOp - Try fold the select into one of the operands to
-/// facilitate further optimization.
-Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
- Value *FalseVal) {
- // See the comment above GetSelectFoldableOperands for a description of the
- // transformation we are doing here.
- if (Instruction *TVI = dyn_cast<Instruction>(TrueVal)) {
- if (TVI->hasOneUse() && TVI->getNumOperands() == 2 &&
- !isa<Constant>(FalseVal)) {
- if (unsigned SFO = GetSelectFoldableOperands(TVI)) {
- unsigned OpToFold = 0;
- if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
- OpToFold = 1;
- } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
- OpToFold = 2;
- }
-
- if (OpToFold) {
- Constant *C = GetSelectFoldableConstant(TVI);
- Value *OOp = TVI->getOperand(2-OpToFold);
- // Avoid creating select between 2 constants unless it's selecting
- // between 0 and 1.
- if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
- Instruction *NewSel = SelectInst::Create(SI.getCondition(), OOp, C);
- InsertNewInstBefore(NewSel, SI);
- NewSel->takeName(TVI);
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TVI))
- return BinaryOperator::Create(BO->getOpcode(), FalseVal, NewSel);
- llvm_unreachable("Unknown instruction!!");
- }
- }
- }
- }
- }
-
- if (Instruction *FVI = dyn_cast<Instruction>(FalseVal)) {
- if (FVI->hasOneUse() && FVI->getNumOperands() == 2 &&
- !isa<Constant>(TrueVal)) {
- if (unsigned SFO = GetSelectFoldableOperands(FVI)) {
- unsigned OpToFold = 0;
- if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
- OpToFold = 1;
- } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
- OpToFold = 2;
- }
-
- if (OpToFold) {
- Constant *C = GetSelectFoldableConstant(FVI);
- Value *OOp = FVI->getOperand(2-OpToFold);
- // Avoid creating select between 2 constants unless it's selecting
- // between 0 and 1.
- if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
- Instruction *NewSel = SelectInst::Create(SI.getCondition(), C, OOp);
- InsertNewInstBefore(NewSel, SI);
- NewSel->takeName(FVI);
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FVI))
- return BinaryOperator::Create(BO->getOpcode(), TrueVal, NewSel);
- llvm_unreachable("Unknown instruction!!");
- }
- }
- }
- }
- }
-
- return 0;
-}
-
-/// visitSelectInstWithICmp - Visit a SelectInst that has an
-/// ICmpInst as its first operand.
-///
-Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
- ICmpInst *ICI) {
- bool Changed = false;
- ICmpInst::Predicate Pred = ICI->getPredicate();
- Value *CmpLHS = ICI->getOperand(0);
- Value *CmpRHS = ICI->getOperand(1);
- Value *TrueVal = SI.getTrueValue();
- Value *FalseVal = SI.getFalseValue();
-
- // Check cases where the comparison is with a constant that
- // can be adjusted to fit the min/max idiom. We may edit ICI in
- // place here, so make sure the select is the only user.
- if (ICI->hasOneUse())
- if (ConstantInt *CI = dyn_cast<ConstantInt>(CmpRHS)) {
- switch (Pred) {
- default: break;
- case ICmpInst::ICMP_ULT:
- case ICmpInst::ICMP_SLT: {
- // X < MIN ? T : F --> F
- if (CI->isMinValue(Pred == ICmpInst::ICMP_SLT))
- return ReplaceInstUsesWith(SI, FalseVal);
- // X < C ? X : C-1 --> X > C-1 ? C-1 : X
- Constant *AdjustedRHS =
- ConstantInt::get(CI->getContext(), CI->getValue()-1);
- if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
- (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
- Pred = ICmpInst::getSwappedPredicate(Pred);
- CmpRHS = AdjustedRHS;
- std::swap(FalseVal, TrueVal);
- ICI->setPredicate(Pred);
- ICI->setOperand(1, CmpRHS);
- SI.setOperand(1, TrueVal);
- SI.setOperand(2, FalseVal);
- Changed = true;
- }
- break;
- }
- case ICmpInst::ICMP_UGT:
- case ICmpInst::ICMP_SGT: {
- // X > MAX ? T : F --> F
- if (CI->isMaxValue(Pred == ICmpInst::ICMP_SGT))
- return ReplaceInstUsesWith(SI, FalseVal);
- // X > C ? X : C+1 --> X < C+1 ? C+1 : X
- Constant *AdjustedRHS =
- ConstantInt::get(CI->getContext(), CI->getValue()+1);
- if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
- (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
- Pred = ICmpInst::getSwappedPredicate(Pred);
- CmpRHS = AdjustedRHS;
- std::swap(FalseVal, TrueVal);
- ICI->setPredicate(Pred);
- ICI->setOperand(1, CmpRHS);
- SI.setOperand(1, TrueVal);
- SI.setOperand(2, FalseVal);
- Changed = true;
- }
- break;
- }
- }
- }
-
- if (CmpLHS == TrueVal && CmpRHS == FalseVal) {
- // Transform (X == Y) ? X : Y -> Y
- if (Pred == ICmpInst::ICMP_EQ)
- return ReplaceInstUsesWith(SI, FalseVal);
- // Transform (X != Y) ? X : Y -> X
- if (Pred == ICmpInst::ICMP_NE)
- return ReplaceInstUsesWith(SI, TrueVal);
- /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
-
- } else if (CmpLHS == FalseVal && CmpRHS == TrueVal) {
- // Transform (X == Y) ? Y : X -> X
- if (Pred == ICmpInst::ICMP_EQ)
- return ReplaceInstUsesWith(SI, FalseVal);
- // Transform (X != Y) ? Y : X -> Y
- if (Pred == ICmpInst::ICMP_NE)
- return ReplaceInstUsesWith(SI, TrueVal);
- /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
- }
- return Changed ? &SI : 0;
-}
-
-
-/// CanSelectOperandBeMappingIntoPredBlock - SI is a select whose condition is a
-/// PHI node (but the two may be in different blocks). See if the true/false
-/// values (V) are live in all of the predecessor blocks of the PHI. For
-/// example, cases like this cannot be mapped:
-///
-/// X = phi [ C1, BB1], [C2, BB2]
-/// Y = add
-/// Z = select X, Y, 0
-///
-/// because Y is not live in BB1/BB2.
-///
-static bool CanSelectOperandBeMappingIntoPredBlock(const Value *V,
- const SelectInst &SI) {
- // If the value is a non-instruction value like a constant or argument, it
- // can always be mapped.
- const Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return true;
-
- // If V is a PHI node defined in the same block as the condition PHI, we can
- // map the arguments.
- const PHINode *CondPHI = cast<PHINode>(SI.getCondition());
-
- if (const PHINode *VP = dyn_cast<PHINode>(I))
- if (VP->getParent() == CondPHI->getParent())
- return true;
-
- // Otherwise, if the PHI and select are defined in the same block and if V is
- // defined in a different block, then we can transform it.
- if (SI.getParent() == CondPHI->getParent() &&
- I->getParent() != CondPHI->getParent())
- return true;
-
- // Otherwise we have a 'hard' case and we can't tell without doing more
- // detailed dominator based analysis, punt.
- return false;
-}
-
-/// FoldSPFofSPF - We have an SPF (e.g. a min or max) of an SPF of the form:
-/// SPF2(SPF1(A, B), C)
-Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
- SelectPatternFlavor SPF1,
- Value *A, Value *B,
- Instruction &Outer,
- SelectPatternFlavor SPF2, Value *C) {
- if (C == A || C == B) {
- // MAX(MAX(A, B), B) -> MAX(A, B)
- // MIN(MIN(a, b), a) -> MIN(a, b)
- if (SPF1 == SPF2)
- return ReplaceInstUsesWith(Outer, Inner);
-
- // MAX(MIN(a, b), a) -> a
- // MIN(MAX(a, b), a) -> a
- if ((SPF1 == SPF_SMIN && SPF2 == SPF_SMAX) ||
- (SPF1 == SPF_SMAX && SPF2 == SPF_SMIN) ||
- (SPF1 == SPF_UMIN && SPF2 == SPF_UMAX) ||
- (SPF1 == SPF_UMAX && SPF2 == SPF_UMIN))
- return ReplaceInstUsesWith(Outer, C);
- }
-
- // TODO: MIN(MIN(A, 23), 97)
- return 0;
-}
-
-
-
-
-Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
- Value *CondVal = SI.getCondition();
- Value *TrueVal = SI.getTrueValue();
- Value *FalseVal = SI.getFalseValue();
-
- // select true, X, Y -> X
- // select false, X, Y -> Y
- if (ConstantInt *C = dyn_cast<ConstantInt>(CondVal))
- return ReplaceInstUsesWith(SI, C->getZExtValue() ? TrueVal : FalseVal);
-
- // select C, X, X -> X
- if (TrueVal == FalseVal)
- return ReplaceInstUsesWith(SI, TrueVal);
-
- if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
- return ReplaceInstUsesWith(SI, FalseVal);
- if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
- return ReplaceInstUsesWith(SI, TrueVal);
- if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
- if (isa<Constant>(TrueVal))
- return ReplaceInstUsesWith(SI, TrueVal);
- else
- return ReplaceInstUsesWith(SI, FalseVal);
- }
-
- if (SI.getType()->isIntegerTy(1)) {
- if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
- if (C->getZExtValue()) {
- // Change: A = select B, true, C --> A = or B, C
- return BinaryOperator::CreateOr(CondVal, FalseVal);
- } else {
- // Change: A = select B, false, C --> A = and !B, C
- Value *NotCond =
- InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
- "not."+CondVal->getName()), SI);
- return BinaryOperator::CreateAnd(NotCond, FalseVal);
- }
- } else if (ConstantInt *C = dyn_cast<ConstantInt>(FalseVal)) {
- if (C->getZExtValue() == false) {
- // Change: A = select B, C, false --> A = and B, C
- return BinaryOperator::CreateAnd(CondVal, TrueVal);
- } else {
- // Change: A = select B, C, true --> A = or !B, C
- Value *NotCond =
- InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
- "not."+CondVal->getName()), SI);
- return BinaryOperator::CreateOr(NotCond, TrueVal);
- }
- }
-
- // select a, b, a -> a&b
- // select a, a, b -> a|b
- if (CondVal == TrueVal)
- return BinaryOperator::CreateOr(CondVal, FalseVal);
- else if (CondVal == FalseVal)
- return BinaryOperator::CreateAnd(CondVal, TrueVal);
- }
-
- // Selecting between two integer constants?
- if (ConstantInt *TrueValC = dyn_cast<ConstantInt>(TrueVal))
- if (ConstantInt *FalseValC = dyn_cast<ConstantInt>(FalseVal)) {
- // select C, 1, 0 -> zext C to int
- if (FalseValC->isZero() && TrueValC->getValue() == 1)
- return new ZExtInst(CondVal, SI.getType());
-
- // select C, -1, 0 -> sext C to int
- if (FalseValC->isZero() && TrueValC->isAllOnesValue())
- return new SExtInst(CondVal, SI.getType());
-
- // select C, 0, 1 -> zext !C to int
- if (TrueValC->isZero() && FalseValC->getValue() == 1) {
- Value *NotCond = Builder->CreateNot(CondVal, "not."+CondVal->getName());
- return new ZExtInst(NotCond, SI.getType());
- }
-
- // select C, 0, -1 -> sext !C to int
- if (TrueValC->isZero() && FalseValC->isAllOnesValue()) {
- Value *NotCond = Builder->CreateNot(CondVal, "not."+CondVal->getName());
- return new SExtInst(NotCond, SI.getType());
- }
-
- if (ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition())) {
- // If one of the constants is zero (we know they can't both be) and we
- // have an icmp instruction with zero, and we have an 'and' with the
- // non-constant value, eliminate this whole mess. This corresponds to
- // cases like this: ((X & 27) ? 27 : 0)
- if (TrueValC->isZero() || FalseValC->isZero())
- if (IC->isEquality() && isa<ConstantInt>(IC->getOperand(1)) &&
- cast<Constant>(IC->getOperand(1))->isNullValue())
- if (Instruction *ICA = dyn_cast<Instruction>(IC->getOperand(0)))
- if (ICA->getOpcode() == Instruction::And &&
- isa<ConstantInt>(ICA->getOperand(1)) &&
- (ICA->getOperand(1) == TrueValC ||
- ICA->getOperand(1) == FalseValC) &&
- cast<ConstantInt>(ICA->getOperand(1))->getValue().isPowerOf2()) {
- // Okay, now we know that everything is set up, we just don't
- // know whether we have a icmp_ne or icmp_eq and whether the
- // true or false val is the zero.
- bool ShouldNotVal = !TrueValC->isZero();
- ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
- Value *V = ICA;
- if (ShouldNotVal)
- V = Builder->CreateXor(V, ICA->getOperand(1));
- return ReplaceInstUsesWith(SI, V);
- }
- }
- }
-
- // See if we are selecting two values based on a comparison of the two values.
- if (FCmpInst *FCI = dyn_cast<FCmpInst>(CondVal)) {
- if (FCI->getOperand(0) == TrueVal && FCI->getOperand(1) == FalseVal) {
- // Transform (X == Y) ? X : Y -> Y
- if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
- // This is not safe in general for floating point:
- // consider X== -0, Y== +0.
- // It becomes safe if either operand is a nonzero constant.
- ConstantFP *CFPt, *CFPf;
- if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
- !CFPt->getValueAPF().isZero()) ||
- ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
- !CFPf->getValueAPF().isZero()))
- return ReplaceInstUsesWith(SI, FalseVal);
- }
- // Transform (X une Y) ? X : Y -> X
- if (FCI->getPredicate() == FCmpInst::FCMP_UNE) {
- // This is not safe in general for floating point:
- // consider X== -0, Y== +0.
- // It becomes safe if either operand is a nonzero constant.
- ConstantFP *CFPt, *CFPf;
- if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
- !CFPt->getValueAPF().isZero()) ||
- ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
- !CFPf->getValueAPF().isZero()))
- return ReplaceInstUsesWith(SI, TrueVal);
- }
- // NOTE: if we wanted to, this is where to detect MIN/MAX
-
- } else if (FCI->getOperand(0) == FalseVal && FCI->getOperand(1) == TrueVal){
- // Transform (X == Y) ? Y : X -> X
- if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
- // This is not safe in general for floating point:
- // consider X== -0, Y== +0.
- // It becomes safe if either operand is a nonzero constant.
- ConstantFP *CFPt, *CFPf;
- if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
- !CFPt->getValueAPF().isZero()) ||
- ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
- !CFPf->getValueAPF().isZero()))
- return ReplaceInstUsesWith(SI, FalseVal);
- }
- // Transform (X une Y) ? Y : X -> Y
- if (FCI->getPredicate() == FCmpInst::FCMP_UNE) {
- // This is not safe in general for floating point:
- // consider X== -0, Y== +0.
- // It becomes safe if either operand is a nonzero constant.
- ConstantFP *CFPt, *CFPf;
- if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
- !CFPt->getValueAPF().isZero()) ||
- ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
- !CFPf->getValueAPF().isZero()))
- return ReplaceInstUsesWith(SI, TrueVal);
- }
- // NOTE: if we wanted to, this is where to detect MIN/MAX
- }
- // NOTE: if we wanted to, this is where to detect ABS
- }
-
- // See if we are selecting two values based on a comparison of the two values.
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
- if (Instruction *Result = visitSelectInstWithICmp(SI, ICI))
- return Result;
-
- if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
- if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
- if (TI->hasOneUse() && FI->hasOneUse()) {
- Instruction *AddOp = 0, *SubOp = 0;
-
- // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
- if (TI->getOpcode() == FI->getOpcode())
- if (Instruction *IV = FoldSelectOpOp(SI, TI, FI))
- return IV;
-
- // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is
- // even legal for FP.
- if ((TI->getOpcode() == Instruction::Sub &&
- FI->getOpcode() == Instruction::Add) ||
- (TI->getOpcode() == Instruction::FSub &&
- FI->getOpcode() == Instruction::FAdd)) {
- AddOp = FI; SubOp = TI;
- } else if ((FI->getOpcode() == Instruction::Sub &&
- TI->getOpcode() == Instruction::Add) ||
- (FI->getOpcode() == Instruction::FSub &&
- TI->getOpcode() == Instruction::FAdd)) {
- AddOp = TI; SubOp = FI;
- }
-
- if (AddOp) {
- Value *OtherAddOp = 0;
- if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
- OtherAddOp = AddOp->getOperand(1);
- } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
- OtherAddOp = AddOp->getOperand(0);
- }
-
- if (OtherAddOp) {
- // So at this point we know we have (Y -> OtherAddOp):
- // select C, (add X, Y), (sub X, Z)
- Value *NegVal; // Compute -Z
- if (Constant *C = dyn_cast<Constant>(SubOp->getOperand(1))) {
- NegVal = ConstantExpr::getNeg(C);
- } else {
- NegVal = InsertNewInstBefore(
- BinaryOperator::CreateNeg(SubOp->getOperand(1),
- "tmp"), SI);
- }
-
- Value *NewTrueOp = OtherAddOp;
- Value *NewFalseOp = NegVal;
- if (AddOp != TI)
- std::swap(NewTrueOp, NewFalseOp);
- Instruction *NewSel =
- SelectInst::Create(CondVal, NewTrueOp,
- NewFalseOp, SI.getName() + ".p");
-
- NewSel = InsertNewInstBefore(NewSel, SI);
- return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
- }
- }
- }
-
- // See if we can fold the select into one of our operands.
- if (SI.getType()->isIntegerTy()) {
- if (Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal))
- return FoldI;
-
- // MAX(MAX(a, b), a) -> MAX(a, b)
- // MIN(MIN(a, b), a) -> MIN(a, b)
- // MAX(MIN(a, b), a) -> a
- // MIN(MAX(a, b), a) -> a
- Value *LHS, *RHS, *LHS2, *RHS2;
- if (SelectPatternFlavor SPF = MatchSelectPattern(&SI, LHS, RHS)) {
- if (SelectPatternFlavor SPF2 = MatchSelectPattern(LHS, LHS2, RHS2))
- if (Instruction *R = FoldSPFofSPF(cast<Instruction>(LHS),SPF2,LHS2,RHS2,
- SI, SPF, RHS))
- return R;
- if (SelectPatternFlavor SPF2 = MatchSelectPattern(RHS, LHS2, RHS2))
- if (Instruction *R = FoldSPFofSPF(cast<Instruction>(RHS),SPF2,LHS2,RHS2,
- SI, SPF, LHS))
- return R;
- }
-
- // TODO.
- // ABS(-X) -> ABS(X)
- // ABS(ABS(X)) -> ABS(X)
- }
-
- // See if we can fold the select into a phi node if the condition is a select.
- if (isa<PHINode>(SI.getCondition()))
- // The true/false values have to be live in the PHI predecessor's blocks.
- if (CanSelectOperandBeMappingIntoPredBlock(TrueVal, SI) &&
- CanSelectOperandBeMappingIntoPredBlock(FalseVal, SI))
- if (Instruction *NV = FoldOpIntoPhi(SI))
- return NV;
-
- if (BinaryOperator::isNot(CondVal)) {
- SI.setOperand(0, BinaryOperator::getNotArgument(CondVal));
- SI.setOperand(1, FalseVal);
- SI.setOperand(2, TrueVal);
- return &SI;
- }
-
- return 0;
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
deleted file mode 100644
index 836bda3..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ /dev/null
@@ -1,463 +0,0 @@
-//===- InstCombineShifts.cpp ----------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the visitShl, visitLShr, and visitAShr functions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Support/PatternMatch.h"
-using namespace llvm;
-using namespace PatternMatch;
-
-Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
- assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- // shl X, 0 == X and shr X, 0 == X
- // shl 0, X == 0 and shr 0, X == 0
- if (Op1 == Constant::getNullValue(Op1->getType()) ||
- Op0 == Constant::getNullValue(Op0->getType()))
- return ReplaceInstUsesWith(I, Op0);
-
- if (isa<UndefValue>(Op0)) {
- if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef
- return ReplaceInstUsesWith(I, Op0);
- else // undef << X -> 0, undef >>u X -> 0
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- }
- if (isa<UndefValue>(Op1)) {
- if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X
- return ReplaceInstUsesWith(I, Op0);
- else // X << undef, X >>u undef -> 0
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- }
-
- // See if we can fold away this shift.
- if (SimplifyDemandedInstructionBits(I))
- return &I;
-
- // Try to fold constant and into select arguments.
- if (isa<Constant>(Op0))
- if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
-
- if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
- if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
- return Res;
- return 0;
-}
-
-Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
- BinaryOperator &I) {
- bool isLeftShift = I.getOpcode() == Instruction::Shl;
-
- // See if we can simplify any instructions used by the instruction whose sole
- // purpose is to compute bits we don't care about.
- uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();
-
- // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
- // a signed shift.
- //
- if (Op1->uge(TypeBits)) {
- if (I.getOpcode() != Instruction::AShr)
- return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
- // ashr i32 X, 32 --> ashr i32 X, 31
- I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
- return &I;
- }
-
- // ((X*C1) << C2) == (X * (C1 << C2))
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
- if (BO->getOpcode() == Instruction::Mul && isLeftShift)
- if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
- return BinaryOperator::CreateMul(BO->getOperand(0),
- ConstantExpr::getShl(BOOp, Op1));
-
- // Try to fold constant and into select arguments.
- if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
- if (Instruction *R = FoldOpIntoSelect(I, SI))
- return R;
- if (isa<PHINode>(Op0))
- if (Instruction *NV = FoldOpIntoPhi(I))
- return NV;
-
- // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
- if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
- Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
- // If 'shift2' is an ashr, we would have to get the sign bit into a funny
- // place. Don't try to do this transformation in this case. Also, we
- // require that the input operand is a shift-by-constant so that we have
- // confidence that the shifts will get folded together. We could do this
- // xform in more cases, but it is unlikely to be profitable.
- if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
- isa<ConstantInt>(TrOp->getOperand(1))) {
- // Okay, we'll do this xform. Make the shift of shift.
- Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
- // (shift2 (shift1 & 0x00FF), c2)
- Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());
-
- // For logical shifts, the truncation has the effect of making the high
- // part of the register be zeros. Emulate this by inserting an AND to
- // clear the top bits as needed. This 'and' will usually be zapped by
- // other xforms later if dead.
- unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
- unsigned DstSize = TI->getType()->getScalarSizeInBits();
- APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
-
- // The mask we constructed says what the trunc would do if occurring
- // between the shifts. We want to know the effect *after* the second
- // shift. We know that it is a logical shift by a constant, so adjust the
- // mask as appropriate.
- if (I.getOpcode() == Instruction::Shl)
- MaskV <<= Op1->getZExtValue();
- else {
- assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
- MaskV = MaskV.lshr(Op1->getZExtValue());
- }
-
- // shift1 & 0x00FF
- Value *And = Builder->CreateAnd(NSh,
- ConstantInt::get(I.getContext(), MaskV),
- TI->getName());
-
- // Return the value truncated to the interesting size.
- return new TruncInst(And, I.getType());
- }
- }
-
- if (Op0->hasOneUse()) {
- if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
- // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
- Value *V1, *V2;
- ConstantInt *CC;
- switch (Op0BO->getOpcode()) {
- default: break;
- case Instruction::Add:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor: {
- // These operators commute.
- // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
- if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
- match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
- m_Specific(Op1)))) {
- Value *YS = // (Y << C)
- Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
- // (X + (Y << C))
- Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
- Op0BO->getOperand(1)->getName());
- uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
- return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
- APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
- }
-
- // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
- Value *Op0BOOp1 = Op0BO->getOperand(1);
- if (isLeftShift && Op0BOOp1->hasOneUse() &&
- match(Op0BOOp1,
- m_And(m_Shr(m_Value(V1), m_Specific(Op1)),
- m_ConstantInt(CC))) &&
- cast<BinaryOperator>(Op0BOOp1)->getOperand(0)->hasOneUse()) {
- Value *YS = // (Y << C)
- Builder->CreateShl(Op0BO->getOperand(0), Op1,
- Op0BO->getName());
- // X & (CC << C)
- Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
- V1->getName()+".mask");
- return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
- }
- }
-
- // FALL THROUGH.
- case Instruction::Sub: {
- // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
- if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
- match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
- m_Specific(Op1)))) {
- Value *YS = // (Y << C)
- Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
- // (X + (Y << C))
- Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
- Op0BO->getOperand(0)->getName());
- uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
- return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
- APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
- }
-
- // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
- if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
- match(Op0BO->getOperand(0),
- m_And(m_Shr(m_Value(V1), m_Value(V2)),
- m_ConstantInt(CC))) && V2 == Op1 &&
- cast<BinaryOperator>(Op0BO->getOperand(0))
- ->getOperand(0)->hasOneUse()) {
- Value *YS = // (Y << C)
- Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
- // X & (CC << C)
- Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
- V1->getName()+".mask");
-
- return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
- }
-
- break;
- }
- }
-
-
- // If the operand is an bitwise operator with a constant RHS, and the
- // shift is the only use, we can pull it out of the shift.
- if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
- bool isValid = true; // Valid only for And, Or, Xor
- bool highBitSet = false; // Transform if high bit of constant set?
-
- switch (Op0BO->getOpcode()) {
- default: isValid = false; break; // Do not perform transform!
- case Instruction::Add:
- isValid = isLeftShift;
- break;
- case Instruction::Or:
- case Instruction::Xor:
- highBitSet = false;
- break;
- case Instruction::And:
- highBitSet = true;
- break;
- }
-
- // If this is a signed shift right, and the high bit is modified
- // by the logical operation, do not perform the transformation.
- // The highBitSet boolean indicates the value of the high bit of
- // the constant which would cause it to be modified for this
- // operation.
- //
- if (isValid && I.getOpcode() == Instruction::AShr)
- isValid = Op0C->getValue()[TypeBits-1] == highBitSet;
-
- if (isValid) {
- Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
-
- Value *NewShift =
- Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
- NewShift->takeName(Op0BO);
-
- return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
- NewRHS);
- }
- }
- }
- }
-
- // Find out if this is a shift of a shift by a constant.
- BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
- if (ShiftOp && !ShiftOp->isShift())
- ShiftOp = 0;
-
- if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
- ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
- uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
- uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
- assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
- if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
- Value *X = ShiftOp->getOperand(0);
-
- uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
-
- const IntegerType *Ty = cast<IntegerType>(I.getType());
-
- // Check for (X << c1) << c2 and (X >> c1) >> c2
- if (I.getOpcode() == ShiftOp->getOpcode()) {
- // If this is oversized composite shift, then unsigned shifts get 0, ashr
- // saturates.
- if (AmtSum >= TypeBits) {
- if (I.getOpcode() != Instruction::AShr)
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
- AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr.
- }
-
- return BinaryOperator::Create(I.getOpcode(), X,
- ConstantInt::get(Ty, AmtSum));
- }
-
- if (ShiftOp->getOpcode() == Instruction::LShr &&
- I.getOpcode() == Instruction::AShr) {
- if (AmtSum >= TypeBits)
- return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
-
- // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
- return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
- }
-
- if (ShiftOp->getOpcode() == Instruction::AShr &&
- I.getOpcode() == Instruction::LShr) {
- // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
- if (AmtSum >= TypeBits)
- AmtSum = TypeBits-1;
-
- Value *Shift = Builder->CreateAShr(X, ConstantInt::get(Ty, AmtSum));
-
- APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
- return BinaryOperator::CreateAnd(Shift,
- ConstantInt::get(I.getContext(), Mask));
- }
-
- // Okay, if we get here, one shift must be left, and the other shift must be
- // right. See if the amounts are equal.
- if (ShiftAmt1 == ShiftAmt2) {
- // If we have ((X >>? C) << C), turn this into X & (-1 << C).
- if (I.getOpcode() == Instruction::Shl) {
- APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1));
- return BinaryOperator::CreateAnd(X,
- ConstantInt::get(I.getContext(),Mask));
- }
- // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
- if (I.getOpcode() == Instruction::LShr) {
- APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
- return BinaryOperator::CreateAnd(X,
- ConstantInt::get(I.getContext(), Mask));
- }
- } else if (ShiftAmt1 < ShiftAmt2) {
- uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
-
- // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
- if (I.getOpcode() == Instruction::Shl) {
- assert(ShiftOp->getOpcode() == Instruction::LShr ||
- ShiftOp->getOpcode() == Instruction::AShr);
- Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
-
- APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
- return BinaryOperator::CreateAnd(Shift,
- ConstantInt::get(I.getContext(),Mask));
- }
-
- // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
- if (I.getOpcode() == Instruction::LShr) {
- assert(ShiftOp->getOpcode() == Instruction::Shl);
- Value *Shift = Builder->CreateLShr(X, ConstantInt::get(Ty, ShiftDiff));
-
- APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
- return BinaryOperator::CreateAnd(Shift,
- ConstantInt::get(I.getContext(),Mask));
- }
-
- // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
- } else {
- assert(ShiftAmt2 < ShiftAmt1);
- uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
-
- // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
- if (I.getOpcode() == Instruction::Shl) {
- assert(ShiftOp->getOpcode() == Instruction::LShr ||
- ShiftOp->getOpcode() == Instruction::AShr);
- Value *Shift = Builder->CreateBinOp(ShiftOp->getOpcode(), X,
- ConstantInt::get(Ty, ShiftDiff));
-
- APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
- return BinaryOperator::CreateAnd(Shift,
- ConstantInt::get(I.getContext(),Mask));
- }
-
- // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
- if (I.getOpcode() == Instruction::LShr) {
- assert(ShiftOp->getOpcode() == Instruction::Shl);
- Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
-
- APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
- return BinaryOperator::CreateAnd(Shift,
- ConstantInt::get(I.getContext(),Mask));
- }
-
- // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
- }
- }
- return 0;
-}
-
-Instruction *InstCombiner::visitShl(BinaryOperator &I) {
- return commonShiftTransforms(I);
-}
-
-Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
- if (Instruction *R = commonShiftTransforms(I))
- return R;
-
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1))
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op0)) {
- unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
- // ctlz.i32(x)>>5 --> zext(x == 0)
- // cttz.i32(x)>>5 --> zext(x == 0)
- // ctpop.i32(x)>>5 --> zext(x == -1)
- if ((II->getIntrinsicID() == Intrinsic::ctlz ||
- II->getIntrinsicID() == Intrinsic::cttz ||
- II->getIntrinsicID() == Intrinsic::ctpop) &&
- isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == Op1C->getZExtValue()){
- bool isCtPop = II->getIntrinsicID() == Intrinsic::ctpop;
- Constant *RHS = ConstantInt::getSigned(Op0->getType(), isCtPop ? -1:0);
- Value *Cmp = Builder->CreateICmpEQ(II->getOperand(1), RHS);
- return new ZExtInst(Cmp, II->getType());
- }
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
- if (Instruction *R = commonShiftTransforms(I))
- return R;
-
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
- if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0)) {
- // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
- if (CSI->isAllOnesValue())
- return ReplaceInstUsesWith(I, CSI);
- }
-
- if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
- // If the input is a SHL by the same constant (ashr (shl X, C), C), then we
- // have a sign-extend idiom.
- Value *X;
- if (match(Op0, m_Shl(m_Value(X), m_Specific(Op1)))) {
- // If the input value is known to already be sign extended enough, delete
- // the extension.
- if (ComputeNumSignBits(X) > Op1C->getZExtValue())
- return ReplaceInstUsesWith(I, X);
-
- // If the input is an extension from the shifted amount value, e.g.
- // %x = zext i8 %A to i32
- // %y = shl i32 %x, 24
- // %z = ashr %y, 24
- // then turn this into "z = sext i8 A to i32".
- if (ZExtInst *ZI = dyn_cast<ZExtInst>(X)) {
- uint32_t SrcBits = ZI->getOperand(0)->getType()->getScalarSizeInBits();
- uint32_t DestBits = ZI->getType()->getScalarSizeInBits();
- if (Op1C->getZExtValue() == DestBits-SrcBits)
- return new SExtInst(ZI->getOperand(0), ZI->getType());
- }
- }
- }
-
- // See if we can turn a signed shr into an unsigned shr.
- if (MaskedValueIsZero(Op0,
- APInt::getSignBit(I.getType()->getScalarSizeInBits())))
- return BinaryOperator::CreateLShr(Op0, Op1);
-
- // Arithmetic shifting an all-sign-bit value is a no-op.
- unsigned NumSignBits = ComputeNumSignBits(Op0);
- if (NumSignBits == Op0->getType()->getScalarSizeInBits())
- return ReplaceInstUsesWith(I, Op0);
-
- return 0;
-}
-
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
deleted file mode 100644
index cd41844..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ /dev/null
@@ -1,1113 +0,0 @@
-//===- InstCombineSimplifyDemanded.cpp ------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains logic for simplifying instructions based on information
-// about how they are used.
-//
-//===----------------------------------------------------------------------===//
-
-
-#include "InstCombine.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/IntrinsicInst.h"
-
-using namespace llvm;
-
-
-/// ShrinkDemandedConstant - Check to see if the specified operand of the
-/// specified instruction is a constant integer. If so, check to see if there
-/// are any bits set in the constant that are not demanded. If so, shrink the
-/// constant and return true.
-static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
- APInt Demanded) {
- assert(I && "No instruction?");
- assert(OpNo < I->getNumOperands() && "Operand index too large");
-
- // If the operand is not a constant integer, nothing to do.
- ConstantInt *OpC = dyn_cast<ConstantInt>(I->getOperand(OpNo));
- if (!OpC) return false;
-
- // If there are no bits set that aren't demanded, nothing to do.
- Demanded.zextOrTrunc(OpC->getValue().getBitWidth());
- if ((~Demanded & OpC->getValue()) == 0)
- return false;
-
- // This instruction is producing bits that are not demanded. Shrink the RHS.
- Demanded &= OpC->getValue();
- I->setOperand(OpNo, ConstantInt::get(OpC->getType(), Demanded));
- return true;
-}
-
-
-
-/// SimplifyDemandedInstructionBits - Inst is an integer instruction that
-/// SimplifyDemandedBits knows about. See if the instruction has any
-/// properties that allow us to simplify its operands.
-bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
- unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
- APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
- APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
-
- Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask,
- KnownZero, KnownOne, 0);
- if (V == 0) return false;
- if (V == &Inst) return true;
- ReplaceInstUsesWith(Inst, V);
- return true;
-}
-
-/// SimplifyDemandedBits - This form of SimplifyDemandedBits simplifies the
-/// specified instruction operand if possible, updating it in place. It returns
-/// true if it made any change and false otherwise.
-bool InstCombiner::SimplifyDemandedBits(Use &U, APInt DemandedMask,
- APInt &KnownZero, APInt &KnownOne,
- unsigned Depth) {
- Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask,
- KnownZero, KnownOne, Depth);
- if (NewVal == 0) return false;
- U = NewVal;
- return true;
-}
-
-
-/// SimplifyDemandedUseBits - This function attempts to replace V with a simpler
-/// value based on the demanded bits. When this function is called, it is known
-/// that only the bits set in DemandedMask of the result of V are ever used
-/// downstream. Consequently, depending on the mask and V, it may be possible
-/// to replace V with a constant or one of its operands. In such cases, this
-/// function does the replacement and returns true. In all other cases, it
-/// returns false after analyzing the expression and setting KnownOne and known
-/// to be one in the expression. KnownZero contains all the bits that are known
-/// to be zero in the expression. These are provided to potentially allow the
-/// caller (which might recursively be SimplifyDemandedBits itself) to simplify
-/// the expression. KnownOne and KnownZero always follow the invariant that
-/// KnownOne & KnownZero == 0. That is, a bit can't be both 1 and 0. Note that
-/// the bits in KnownOne and KnownZero may only be accurate for those bits set
-/// in DemandedMask. Note also that the bitwidth of V, DemandedMask, KnownZero
-/// and KnownOne must all be the same.
-///
-/// This returns null if it did not change anything and it permits no
-/// simplification. This returns V itself if it did some simplification of V's
-/// operands based on the information about what bits are demanded. This returns
-/// some other non-null value if it found out that V is equal to another value
-/// in the context where the specified bits are demanded, but not for all users.
-Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
- APInt &KnownZero, APInt &KnownOne,
- unsigned Depth) {
- assert(V != 0 && "Null pointer of Value???");
- assert(Depth <= 6 && "Limit Search Depth");
- uint32_t BitWidth = DemandedMask.getBitWidth();
- const Type *VTy = V->getType();
- assert((TD || !VTy->isPointerTy()) &&
- "SimplifyDemandedBits needs to know bit widths!");
- assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
- (!VTy->isIntOrIntVectorTy() ||
- VTy->getScalarSizeInBits() == BitWidth) &&
- KnownZero.getBitWidth() == BitWidth &&
- KnownOne.getBitWidth() == BitWidth &&
- "Value *V, DemandedMask, KnownZero and KnownOne "
- "must have same BitWidth");
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- // We know all of the bits for a constant!
- KnownOne = CI->getValue() & DemandedMask;
- KnownZero = ~KnownOne & DemandedMask;
- return 0;
- }
- if (isa<ConstantPointerNull>(V)) {
- // We know all of the bits for a constant!
- KnownOne.clear();
- KnownZero = DemandedMask;
- return 0;
- }
-
- KnownZero.clear();
- KnownOne.clear();
- if (DemandedMask == 0) { // Not demanding any bits from V.
- if (isa<UndefValue>(V))
- return 0;
- return UndefValue::get(VTy);
- }
-
- if (Depth == 6) // Limit search depth.
- return 0;
-
- APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
- APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
-
- Instruction *I = dyn_cast<Instruction>(V);
- if (!I) {
- ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
- return 0; // Only analyze instructions.
- }
-
- // If there are multiple uses of this value and we aren't at the root, then
- // we can't do any simplifications of the operands, because DemandedMask
- // only reflects the bits demanded by *one* of the users.
- if (Depth != 0 && !I->hasOneUse()) {
- // Despite the fact that we can't simplify this instruction in all User's
- // context, we can at least compute the knownzero/knownone bits, and we can
- // do simplifications that apply to *just* the one user if we know that
- // this instruction has a simpler value in that context.
- if (I->getOpcode() == Instruction::And) {
- // If either the LHS or the RHS are Zero, the result is zero.
- ComputeMaskedBits(I->getOperand(1), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1);
- ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownZero,
- LHSKnownZero, LHSKnownOne, Depth+1);
-
- // If all of the demanded bits are known 1 on one side, return the other.
- // These bits cannot contribute to the result of the 'and' in this
- // context.
- if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
- (DemandedMask & ~LHSKnownZero))
- return I->getOperand(0);
- if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
- (DemandedMask & ~RHSKnownZero))
- return I->getOperand(1);
-
- // If all of the demanded bits in the inputs are known zeros, return zero.
- if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
- return Constant::getNullValue(VTy);
-
- } else if (I->getOpcode() == Instruction::Or) {
- // We can simplify (X|Y) -> X or Y in the user's context if we know that
- // only bits from X or Y are demanded.
-
- // If either the LHS or the RHS are One, the result is One.
- ComputeMaskedBits(I->getOperand(1), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1);
- ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownOne,
- LHSKnownZero, LHSKnownOne, Depth+1);
-
- // If all of the demanded bits are known zero on one side, return the
- // other. These bits cannot contribute to the result of the 'or' in this
- // context.
- if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
- (DemandedMask & ~LHSKnownOne))
- return I->getOperand(0);
- if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
- (DemandedMask & ~RHSKnownOne))
- return I->getOperand(1);
-
- // If all of the potentially set bits on one side are known to be set on
- // the other side, just use the 'other' side.
- if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
- (DemandedMask & (~RHSKnownZero)))
- return I->getOperand(0);
- if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
- (DemandedMask & (~LHSKnownZero)))
- return I->getOperand(1);
- }
-
- // Compute the KnownZero/KnownOne bits to simplify things downstream.
- ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
- return 0;
- }
-
- // If this is the root being simplified, allow it to have multiple uses,
- // just set the DemandedMask to all bits so that we can try to simplify the
- // operands. This allows visitTruncInst (for example) to simplify the
- // operand of a trunc without duplicating all the logic below.
- if (Depth == 0 && !V->hasOneUse())
- DemandedMask = APInt::getAllOnesValue(BitWidth);
-
- switch (I->getOpcode()) {
- default:
- ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
- break;
- case Instruction::And:
- // If either the LHS or the RHS are Zero, the result is zero.
- if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1) ||
- SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownZero,
- LHSKnownZero, LHSKnownOne, Depth+1))
- return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
-
- // If all of the demanded bits are known 1 on one side, return the other.
- // These bits cannot contribute to the result of the 'and'.
- if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
- (DemandedMask & ~LHSKnownZero))
- return I->getOperand(0);
- if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
- (DemandedMask & ~RHSKnownZero))
- return I->getOperand(1);
-
- // If all of the demanded bits in the inputs are known zeros, return zero.
- if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
- return Constant::getNullValue(VTy);
-
- // If the RHS is a constant, see if we can simplify it.
- if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero))
- return I;
-
- // Output known-1 bits are only known if set in both the LHS & RHS.
- KnownOne = RHSKnownOne & LHSKnownOne;
- // Output known-0 are known to be clear if zero in either the LHS | RHS.
- KnownZero = RHSKnownZero | LHSKnownZero;
- break;
- case Instruction::Or:
- // If either the LHS or the RHS are One, the result is One.
- if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1) ||
- SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownOne,
- LHSKnownZero, LHSKnownOne, Depth+1))
- return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
-
- // If all of the demanded bits are known zero on one side, return the other.
- // These bits cannot contribute to the result of the 'or'.
- if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
- (DemandedMask & ~LHSKnownOne))
- return I->getOperand(0);
- if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
- (DemandedMask & ~RHSKnownOne))
- return I->getOperand(1);
-
- // If all of the potentially set bits on one side are known to be set on
- // the other side, just use the 'other' side.
- if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
- (DemandedMask & (~RHSKnownZero)))
- return I->getOperand(0);
- if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
- (DemandedMask & (~LHSKnownZero)))
- return I->getOperand(1);
-
- // If the RHS is a constant, see if we can simplify it.
- if (ShrinkDemandedConstant(I, 1, DemandedMask))
- return I;
-
- // Output known-0 bits are only known if clear in both the LHS & RHS.
- KnownZero = RHSKnownZero & LHSKnownZero;
- // Output known-1 are known to be set if set in either the LHS | RHS.
- KnownOne = RHSKnownOne | LHSKnownOne;
- break;
- case Instruction::Xor: {
- if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1) ||
- SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
- LHSKnownZero, LHSKnownOne, Depth+1))
- return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
-
- // If all of the demanded bits are known zero on one side, return the other.
- // These bits cannot contribute to the result of the 'xor'.
- if ((DemandedMask & RHSKnownZero) == DemandedMask)
- return I->getOperand(0);
- if ((DemandedMask & LHSKnownZero) == DemandedMask)
- return I->getOperand(1);
-
- // If all of the demanded bits are known to be zero on one side or the
- // other, turn this into an *inclusive* or.
- // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
- if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) {
- Instruction *Or =
- BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
- I->getName());
- return InsertNewInstBefore(Or, *I);
- }
-
- // If all of the demanded bits on one side are known, and all of the set
- // bits on that side are also known to be set on the other side, turn this
- // into an AND, as we know the bits will be cleared.
- // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
- if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) {
- // all known
- if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) {
- Constant *AndC = Constant::getIntegerValue(VTy,
- ~RHSKnownOne & DemandedMask);
- Instruction *And =
- BinaryOperator::CreateAnd(I->getOperand(0), AndC, "tmp");
- return InsertNewInstBefore(And, *I);
- }
- }
-
- // If the RHS is a constant, see if we can simplify it.
- // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
- if (ShrinkDemandedConstant(I, 1, DemandedMask))
- return I;
-
- // If our LHS is an 'and' and if it has one use, and if any of the bits we
- // are flipping are known to be set, then the xor is just resetting those
- // bits to zero. We can just knock out bits from the 'and' and the 'xor',
- // simplifying both of them.
- if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0)))
- if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
- isa<ConstantInt>(I->getOperand(1)) &&
- isa<ConstantInt>(LHSInst->getOperand(1)) &&
- (LHSKnownOne & RHSKnownOne & DemandedMask) != 0) {
- ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
- ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
- APInt NewMask = ~(LHSKnownOne & RHSKnownOne & DemandedMask);
-
- Constant *AndC =
- ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
- Instruction *NewAnd =
- BinaryOperator::CreateAnd(I->getOperand(0), AndC, "tmp");
- InsertNewInstBefore(NewAnd, *I);
-
- Constant *XorC =
- ConstantInt::get(I->getType(), NewMask & XorRHS->getValue());
- Instruction *NewXor =
- BinaryOperator::CreateXor(NewAnd, XorC, "tmp");
- return InsertNewInstBefore(NewXor, *I);
- }
-
- // Output known-0 bits are known if clear or set in both the LHS & RHS.
- KnownZero= (RHSKnownZero & LHSKnownZero) | (RHSKnownOne & LHSKnownOne);
- // Output known-1 are known to be set if set in only one of the LHS, RHS.
- KnownOne = (RHSKnownZero & LHSKnownOne) | (RHSKnownOne & LHSKnownZero);
- break;
- }
- case Instruction::Select:
- if (SimplifyDemandedBits(I->getOperandUse(2), DemandedMask,
- RHSKnownZero, RHSKnownOne, Depth+1) ||
- SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
- LHSKnownZero, LHSKnownOne, Depth+1))
- return I;
- assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
- assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
-
- // If the operands are constants, see if we can simplify them.
- if (ShrinkDemandedConstant(I, 1, DemandedMask) ||
- ShrinkDemandedConstant(I, 2, DemandedMask))
- return I;
-
- // Only known if known in both the LHS and RHS.
- KnownOne = RHSKnownOne & LHSKnownOne;
- KnownZero = RHSKnownZero & LHSKnownZero;
- break;
- case Instruction::Trunc: {
- unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
- DemandedMask.zext(truncBf);
- KnownZero.zext(truncBf);
- KnownOne.zext(truncBf);
- if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
- KnownZero, KnownOne, Depth+1))
- return I;
- DemandedMask.trunc(BitWidth);
- KnownZero.trunc(BitWidth);
- KnownOne.trunc(BitWidth);
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- break;
- }
- case Instruction::BitCast:
- if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
- return 0; // vector->int or fp->int?
-
- if (const VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
- if (const VectorType *SrcVTy =
- dyn_cast<VectorType>(I->getOperand(0)->getType())) {
- if (DstVTy->getNumElements() != SrcVTy->getNumElements())
- // Don't touch a bitcast between vectors of different element counts.
- return 0;
- } else
- // Don't touch a scalar-to-vector bitcast.
- return 0;
- } else if (I->getOperand(0)->getType()->isVectorTy())
- // Don't touch a vector-to-scalar bitcast.
- return 0;
-
- if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
- KnownZero, KnownOne, Depth+1))
- return I;
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- break;
- case Instruction::ZExt: {
- // Compute the bits in the result that are not present in the input.
- unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
-
- DemandedMask.trunc(SrcBitWidth);
- KnownZero.trunc(SrcBitWidth);
- KnownOne.trunc(SrcBitWidth);
- if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
- KnownZero, KnownOne, Depth+1))
- return I;
- DemandedMask.zext(BitWidth);
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- // The top bits are known to be zero.
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
- break;
- }
- case Instruction::SExt: {
- // Compute the bits in the result that are not present in the input.
- unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
-
- APInt InputDemandedBits = DemandedMask &
- APInt::getLowBitsSet(BitWidth, SrcBitWidth);
-
- APInt NewBits(APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth));
- // If any of the sign extended bits are demanded, we know that the sign
- // bit is demanded.
- if ((NewBits & DemandedMask) != 0)
- InputDemandedBits.set(SrcBitWidth-1);
-
- InputDemandedBits.trunc(SrcBitWidth);
- KnownZero.trunc(SrcBitWidth);
- KnownOne.trunc(SrcBitWidth);
- if (SimplifyDemandedBits(I->getOperandUse(0), InputDemandedBits,
- KnownZero, KnownOne, Depth+1))
- return I;
- InputDemandedBits.zext(BitWidth);
- KnownZero.zext(BitWidth);
- KnownOne.zext(BitWidth);
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
-
- // If the sign bit of the input is known set or clear, then we know the
- // top bits of the result.
-
- // If the input sign bit is known zero, or if the NewBits are not demanded
- // convert this into a zero extension.
- if (KnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
- // Convert to ZExt cast
- CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
- return InsertNewInstBefore(NewCast, *I);
- } else if (KnownOne[SrcBitWidth-1]) { // Input sign bit known set
- KnownOne |= NewBits;
- }
- break;
- }
- case Instruction::Add: {
- // Figure out what the input bits are. If the top bits of the and result
- // are not demanded, then the add doesn't demand them from its input
- // either.
- unsigned NLZ = DemandedMask.countLeadingZeros();
-
- // If there is a constant on the RHS, there are a variety of xformations
- // we can do.
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
- // If null, this should be simplified elsewhere. Some of the xforms here
- // won't work if the RHS is zero.
- if (RHS->isZero())
- break;
-
- // If the top bit of the output is demanded, demand everything from the
- // input. Otherwise, we demand all the input bits except NLZ top bits.
- APInt InDemandedBits(APInt::getLowBitsSet(BitWidth, BitWidth - NLZ));
-
- // Find information about known zero/one bits in the input.
- if (SimplifyDemandedBits(I->getOperandUse(0), InDemandedBits,
- LHSKnownZero, LHSKnownOne, Depth+1))
- return I;
-
- // If the RHS of the add has bits set that can't affect the input, reduce
- // the constant.
- if (ShrinkDemandedConstant(I, 1, InDemandedBits))
- return I;
-
- // Avoid excess work.
- if (LHSKnownZero == 0 && LHSKnownOne == 0)
- break;
-
- // Turn it into OR if input bits are zero.
- if ((LHSKnownZero & RHS->getValue()) == RHS->getValue()) {
- Instruction *Or =
- BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
- I->getName());
- return InsertNewInstBefore(Or, *I);
- }
-
- // We can say something about the output known-zero and known-one bits,
- // depending on potential carries from the input constant and the
- // unknowns. For example if the LHS is known to have at most the 0x0F0F0
- // bits set and the RHS constant is 0x01001, then we know we have a known
- // one mask of 0x00001 and a known zero mask of 0xE0F0E.
-
- // To compute this, we first compute the potential carry bits. These are
- // the bits which may be modified. I'm not aware of a better way to do
- // this scan.
- const APInt &RHSVal = RHS->getValue();
- APInt CarryBits((~LHSKnownZero + RHSVal) ^ (~LHSKnownZero ^ RHSVal));
-
- // Now that we know which bits have carries, compute the known-1/0 sets.
-
- // Bits are known one if they are known zero in one operand and one in the
- // other, and there is no input carry.
- KnownOne = ((LHSKnownZero & RHSVal) |
- (LHSKnownOne & ~RHSVal)) & ~CarryBits;
-
- // Bits are known zero if they are known zero in both operands and there
- // is no input carry.
- KnownZero = LHSKnownZero & ~RHSVal & ~CarryBits;
- } else {
- // If the high-bits of this ADD are not demanded, then it does not demand
- // the high bits of its LHS or RHS.
- if (DemandedMask[BitWidth-1] == 0) {
- // Right fill the mask of bits for this ADD to demand the most
- // significant bit and all those below it.
- APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
- if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
- LHSKnownZero, LHSKnownOne, Depth+1) ||
- SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
- LHSKnownZero, LHSKnownOne, Depth+1))
- return I;
- }
- }
- break;
- }
- case Instruction::Sub:
- // If the high-bits of this SUB are not demanded, then it does not demand
- // the high bits of its LHS or RHS.
- if (DemandedMask[BitWidth-1] == 0) {
- // Right fill the mask of bits for this SUB to demand the most
- // significant bit and all those below it.
- uint32_t NLZ = DemandedMask.countLeadingZeros();
- APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
- if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
- LHSKnownZero, LHSKnownOne, Depth+1) ||
- SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
- LHSKnownZero, LHSKnownOne, Depth+1))
- return I;
- }
- // Otherwise just hand the sub off to ComputeMaskedBits to fill in
- // the known zeros and ones.
- ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
- break;
- case Instruction::Shl:
- if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
- APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
- if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
- KnownZero, KnownOne, Depth+1))
- return I;
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- KnownZero <<= ShiftAmt;
- KnownOne <<= ShiftAmt;
- // low bits known zero.
- if (ShiftAmt)
- KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
- }
- break;
- case Instruction::LShr:
- // For a logical shift right
- if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
-
- // Unsigned shift right.
- APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
- if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
- KnownZero, KnownOne, Depth+1))
- return I;
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
- KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
- if (ShiftAmt) {
- // Compute the new bits that are at the top now.
- APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
- KnownZero |= HighBits; // high bits known zero.
- }
- }
- break;
- case Instruction::AShr:
- // If this is an arithmetic shift right and only the low-bit is set, we can
- // always convert this into a logical shr, even if the shift amount is
- // variable. The low bit of the shift cannot be an input sign bit unless
- // the shift amount is >= the size of the datatype, which is undefined.
- if (DemandedMask == 1) {
- // Perform the logical shift right.
- Instruction *NewVal = BinaryOperator::CreateLShr(
- I->getOperand(0), I->getOperand(1), I->getName());
- return InsertNewInstBefore(NewVal, *I);
- }
-
- // If the sign bit is the only bit demanded by this ashr, then there is no
- // need to do it, the shift doesn't change the high bit.
- if (DemandedMask.isSignBit())
- return I->getOperand(0);
-
- if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
- uint32_t ShiftAmt = SA->getLimitedValue(BitWidth);
-
- // Signed shift right.
- APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
- // If any of the "high bits" are demanded, we should set the sign bit as
- // demanded.
- if (DemandedMask.countLeadingZeros() <= ShiftAmt)
- DemandedMaskIn.set(BitWidth-1);
- if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
- KnownZero, KnownOne, Depth+1))
- return I;
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- // Compute the new bits that are at the top now.
- APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
- KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
- KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
-
- // Handle the sign bits.
- APInt SignBit(APInt::getSignBit(BitWidth));
- // Adjust to where it is now in the mask.
- SignBit = APIntOps::lshr(SignBit, ShiftAmt);
-
- // If the input sign bit is known to be zero, or if none of the top bits
- // are demanded, turn this into an unsigned shift right.
- if (BitWidth <= ShiftAmt || KnownZero[BitWidth-ShiftAmt-1] ||
- (HighBits & ~DemandedMask) == HighBits) {
- // Perform the logical shift right.
- Instruction *NewVal = BinaryOperator::CreateLShr(
- I->getOperand(0), SA, I->getName());
- return InsertNewInstBefore(NewVal, *I);
- } else if ((KnownOne & SignBit) != 0) { // New bits are known one.
- KnownOne |= HighBits;
- }
- }
- break;
- case Instruction::SRem:
- if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
- APInt RA = Rem->getValue().abs();
- if (RA.isPowerOf2()) {
- if (DemandedMask.ult(RA)) // srem won't affect demanded bits
- return I->getOperand(0);
-
- APInt LowBits = RA - 1;
- APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
- if (SimplifyDemandedBits(I->getOperandUse(0), Mask2,
- LHSKnownZero, LHSKnownOne, Depth+1))
- return I;
-
- // The low bits of LHS are unchanged by the srem.
- KnownZero = LHSKnownZero & LowBits;
- KnownOne = LHSKnownOne & LowBits;
-
- // If LHS is non-negative or has all low bits zero, then the upper bits
- // are all zero.
- if (LHSKnownZero[BitWidth-1] || ((LHSKnownZero & LowBits) == LowBits))
- KnownZero |= ~LowBits;
-
- // If LHS is negative and not all low bits are zero, then the upper bits
- // are all one.
- if (LHSKnownOne[BitWidth-1] && ((LHSKnownOne & LowBits) != 0))
- KnownOne |= ~LowBits;
-
- assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
- }
- }
- break;
- case Instruction::URem: {
- APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
- APInt AllOnes = APInt::getAllOnesValue(BitWidth);
- if (SimplifyDemandedBits(I->getOperandUse(0), AllOnes,
- KnownZero2, KnownOne2, Depth+1) ||
- SimplifyDemandedBits(I->getOperandUse(1), AllOnes,
- KnownZero2, KnownOne2, Depth+1))
- return I;
-
- unsigned Leaders = KnownZero2.countLeadingOnes();
- Leaders = std::max(Leaders,
- KnownZero2.countLeadingOnes());
- KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
- break;
- }
- case Instruction::Call:
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
- switch (II->getIntrinsicID()) {
- default: break;
- case Intrinsic::bswap: {
- // If the only bits demanded come from one byte of the bswap result,
- // just shift the input byte into position to eliminate the bswap.
- unsigned NLZ = DemandedMask.countLeadingZeros();
- unsigned NTZ = DemandedMask.countTrailingZeros();
-
- // Round NTZ down to the next byte. If we have 11 trailing zeros, then
- // we need all the bits down to bit 8. Likewise, round NLZ. If we
- // have 14 leading zeros, round to 8.
- NLZ &= ~7;
- NTZ &= ~7;
- // If we need exactly one byte, we can do this transformation.
- if (BitWidth-NLZ-NTZ == 8) {
- unsigned ResultBit = NTZ;
- unsigned InputBit = BitWidth-NTZ-8;
-
- // Replace this with either a left or right shift to get the byte into
- // the right place.
- Instruction *NewVal;
- if (InputBit > ResultBit)
- NewVal = BinaryOperator::CreateLShr(I->getOperand(1),
- ConstantInt::get(I->getType(), InputBit-ResultBit));
- else
- NewVal = BinaryOperator::CreateShl(I->getOperand(1),
- ConstantInt::get(I->getType(), ResultBit-InputBit));
- NewVal->takeName(I);
- return InsertNewInstBefore(NewVal, *I);
- }
-
- // TODO: Could compute known zero/one bits based on the input.
- break;
- }
- }
- }
- ComputeMaskedBits(V, DemandedMask, KnownZero, KnownOne, Depth);
- break;
- }
-
- // If the client is only demanding bits that we know, return the known
- // constant.
- if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
- return Constant::getIntegerValue(VTy, KnownOne);
- return 0;
-}
-
-
-/// SimplifyDemandedVectorElts - The specified value produces a vector with
-/// any number of elements. DemandedElts contains the set of elements that are
-/// actually used by the caller. This method analyzes which elements of the
-/// operand are undef and returns that information in UndefElts.
-///
-/// If the information about demanded elements can be used to simplify the
-/// operation, the operation is simplified, then the resultant value is
-/// returned. This returns null if no change was made.
-Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
- APInt &UndefElts,
- unsigned Depth) {
- unsigned VWidth = cast<VectorType>(V->getType())->getNumElements();
- APInt EltMask(APInt::getAllOnesValue(VWidth));
- assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
-
- if (isa<UndefValue>(V)) {
- // If the entire vector is undefined, just return this info.
- UndefElts = EltMask;
- return 0;
- }
-
- if (DemandedElts == 0) { // If nothing is demanded, provide undef.
- UndefElts = EltMask;
- return UndefValue::get(V->getType());
- }
-
- UndefElts = 0;
- if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
- const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
- Constant *Undef = UndefValue::get(EltTy);
-
- std::vector<Constant*> Elts;
- for (unsigned i = 0; i != VWidth; ++i)
- if (!DemandedElts[i]) { // If not demanded, set to undef.
- Elts.push_back(Undef);
- UndefElts.set(i);
- } else if (isa<UndefValue>(CV->getOperand(i))) { // Already undef.
- Elts.push_back(Undef);
- UndefElts.set(i);
- } else { // Otherwise, defined.
- Elts.push_back(CV->getOperand(i));
- }
-
- // If we changed the constant, return it.
- Constant *NewCP = ConstantVector::get(Elts);
- return NewCP != CV ? NewCP : 0;
- }
-
- if (isa<ConstantAggregateZero>(V)) {
- // Simplify the CAZ to a ConstantVector where the non-demanded elements are
- // set to undef.
-
- // Check if this is identity. If so, return 0 since we are not simplifying
- // anything.
- if (DemandedElts.isAllOnesValue())
- return 0;
-
- const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
- Constant *Zero = Constant::getNullValue(EltTy);
- Constant *Undef = UndefValue::get(EltTy);
- std::vector<Constant*> Elts;
- for (unsigned i = 0; i != VWidth; ++i) {
- Constant *Elt = DemandedElts[i] ? Zero : Undef;
- Elts.push_back(Elt);
- }
- UndefElts = DemandedElts ^ EltMask;
- return ConstantVector::get(Elts);
- }
-
- // Limit search depth.
- if (Depth == 10)
- return 0;
-
- // If multiple users are using the root value, procede with
- // simplification conservatively assuming that all elements
- // are needed.
- if (!V->hasOneUse()) {
- // Quit if we find multiple users of a non-root value though.
- // They'll be handled when it's their turn to be visited by
- // the main instcombine process.
- if (Depth != 0)
- // TODO: Just compute the UndefElts information recursively.
- return 0;
-
- // Conservatively assume that all elements are needed.
- DemandedElts = EltMask;
- }
-
- Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return 0; // Only analyze instructions.
-
- bool MadeChange = false;
- APInt UndefElts2(VWidth, 0);
- Value *TmpV;
- switch (I->getOpcode()) {
- default: break;
-
- case Instruction::InsertElement: {
- // If this is a variable index, we don't know which element it overwrites.
- // demand exactly the same input as we produce.
- ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
- if (Idx == 0) {
- // Note that we can't propagate undef elt info, because we don't know
- // which elt is getting updated.
- TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
- UndefElts2, Depth+1);
- if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
- break;
- }
-
- // If this is inserting an element that isn't demanded, remove this
- // insertelement.
- unsigned IdxNo = Idx->getZExtValue();
- if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
- Worklist.Add(I);
- return I->getOperand(0);
- }
-
- // Otherwise, the element inserted overwrites whatever was there, so the
- // input demanded set is simpler than the output set.
- APInt DemandedElts2 = DemandedElts;
- DemandedElts2.clear(IdxNo);
- TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2,
- UndefElts, Depth+1);
- if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
-
- // The inserted element is defined.
- UndefElts.clear(IdxNo);
- break;
- }
- case Instruction::ShuffleVector: {
- ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
- uint64_t LHSVWidth =
- cast<VectorType>(Shuffle->getOperand(0)->getType())->getNumElements();
- APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
- for (unsigned i = 0; i < VWidth; i++) {
- if (DemandedElts[i]) {
- unsigned MaskVal = Shuffle->getMaskValue(i);
- if (MaskVal != -1u) {
- assert(MaskVal < LHSVWidth * 2 &&
- "shufflevector mask index out of range!");
- if (MaskVal < LHSVWidth)
- LeftDemanded.set(MaskVal);
- else
- RightDemanded.set(MaskVal - LHSVWidth);
- }
- }
- }
-
- APInt UndefElts4(LHSVWidth, 0);
- TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded,
- UndefElts4, Depth+1);
- if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
-
- APInt UndefElts3(LHSVWidth, 0);
- TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded,
- UndefElts3, Depth+1);
- if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
-
- bool NewUndefElts = false;
- for (unsigned i = 0; i < VWidth; i++) {
- unsigned MaskVal = Shuffle->getMaskValue(i);
- if (MaskVal == -1u) {
- UndefElts.set(i);
- } else if (MaskVal < LHSVWidth) {
- if (UndefElts4[MaskVal]) {
- NewUndefElts = true;
- UndefElts.set(i);
- }
- } else {
- if (UndefElts3[MaskVal - LHSVWidth]) {
- NewUndefElts = true;
- UndefElts.set(i);
- }
- }
- }
-
- if (NewUndefElts) {
- // Add additional discovered undefs.
- std::vector<Constant*> Elts;
- for (unsigned i = 0; i < VWidth; ++i) {
- if (UndefElts[i])
- Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext())));
- else
- Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()),
- Shuffle->getMaskValue(i)));
- }
- I->setOperand(2, ConstantVector::get(Elts));
- MadeChange = true;
- }
- break;
- }
- case Instruction::BitCast: {
- // Vector->vector casts only.
- const VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
- if (!VTy) break;
- unsigned InVWidth = VTy->getNumElements();
- APInt InputDemandedElts(InVWidth, 0);
- unsigned Ratio;
-
- if (VWidth == InVWidth) {
- // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
- // elements as are demanded of us.
- Ratio = 1;
- InputDemandedElts = DemandedElts;
- } else if (VWidth > InVWidth) {
- // Untested so far.
- break;
-
- // If there are more elements in the result than there are in the source,
- // then an input element is live if any of the corresponding output
- // elements are live.
- Ratio = VWidth/InVWidth;
- for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
- if (DemandedElts[OutIdx])
- InputDemandedElts.set(OutIdx/Ratio);
- }
- } else {
- // Untested so far.
- break;
-
- // If there are more elements in the source than there are in the result,
- // then an input element is live if the corresponding output element is
- // live.
- Ratio = InVWidth/VWidth;
- for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
- if (DemandedElts[InIdx/Ratio])
- InputDemandedElts.set(InIdx);
- }
-
- // div/rem demand all inputs, because they don't want divide by zero.
- TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
- UndefElts2, Depth+1);
- if (TmpV) {
- I->setOperand(0, TmpV);
- MadeChange = true;
- }
-
- UndefElts = UndefElts2;
- if (VWidth > InVWidth) {
- llvm_unreachable("Unimp");
- // If there are more elements in the result than there are in the source,
- // then an output element is undef if the corresponding input element is
- // undef.
- for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
- if (UndefElts2[OutIdx/Ratio])
- UndefElts.set(OutIdx);
- } else if (VWidth < InVWidth) {
- llvm_unreachable("Unimp");
- // If there are more elements in the source than there are in the result,
- // then a result element is undef if all of the corresponding input
- // elements are undef.
- UndefElts = ~0ULL >> (64-VWidth); // Start out all undef.
- for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
- if (!UndefElts2[InIdx]) // Not undef?
- UndefElts.clear(InIdx/Ratio); // Clear undef bit.
- }
- break;
- }
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Add:
- case Instruction::Sub:
- case Instruction::Mul:
- // div/rem demand all inputs, because they don't want divide by zero.
- TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
- UndefElts, Depth+1);
- if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
- TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
- UndefElts2, Depth+1);
- if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
-
- // Output elements are undefined if both are undefined. Consider things
- // like undef&0. The result is known zero, not undef.
- UndefElts &= UndefElts2;
- break;
-
- case Instruction::Call: {
- IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
- if (!II) break;
- switch (II->getIntrinsicID()) {
- default: break;
-
- // Binary vector operations that work column-wise. A dest element is a
- // function of the corresponding input elements from the two inputs.
- case Intrinsic::x86_sse_sub_ss:
- case Intrinsic::x86_sse_mul_ss:
- case Intrinsic::x86_sse_min_ss:
- case Intrinsic::x86_sse_max_ss:
- case Intrinsic::x86_sse2_sub_sd:
- case Intrinsic::x86_sse2_mul_sd:
- case Intrinsic::x86_sse2_min_sd:
- case Intrinsic::x86_sse2_max_sd:
- TmpV = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
- UndefElts, Depth+1);
- if (TmpV) { II->setOperand(1, TmpV); MadeChange = true; }
- TmpV = SimplifyDemandedVectorElts(II->getOperand(2), DemandedElts,
- UndefElts2, Depth+1);
- if (TmpV) { II->setOperand(2, TmpV); MadeChange = true; }
-
- // If only the low elt is demanded and this is a scalarizable intrinsic,
- // scalarize it now.
- if (DemandedElts == 1) {
- switch (II->getIntrinsicID()) {
- default: break;
- case Intrinsic::x86_sse_sub_ss:
- case Intrinsic::x86_sse_mul_ss:
- case Intrinsic::x86_sse2_sub_sd:
- case Intrinsic::x86_sse2_mul_sd:
- // TODO: Lower MIN/MAX/ABS/etc
- Value *LHS = II->getOperand(1);
- Value *RHS = II->getOperand(2);
- // Extract the element as scalars.
- LHS = InsertNewInstBefore(ExtractElementInst::Create(LHS,
- ConstantInt::get(Type::getInt32Ty(I->getContext()), 0U)), *II);
- RHS = InsertNewInstBefore(ExtractElementInst::Create(RHS,
- ConstantInt::get(Type::getInt32Ty(I->getContext()), 0U)), *II);
-
- switch (II->getIntrinsicID()) {
- default: llvm_unreachable("Case stmts out of sync!");
- case Intrinsic::x86_sse_sub_ss:
- case Intrinsic::x86_sse2_sub_sd:
- TmpV = InsertNewInstBefore(BinaryOperator::CreateFSub(LHS, RHS,
- II->getName()), *II);
- break;
- case Intrinsic::x86_sse_mul_ss:
- case Intrinsic::x86_sse2_mul_sd:
- TmpV = InsertNewInstBefore(BinaryOperator::CreateFMul(LHS, RHS,
- II->getName()), *II);
- break;
- }
-
- Instruction *New =
- InsertElementInst::Create(
- UndefValue::get(II->getType()), TmpV,
- ConstantInt::get(Type::getInt32Ty(I->getContext()), 0U, false),
- II->getName());
- InsertNewInstBefore(New, *II);
- return New;
- }
- }
-
- // Output elements are undefined if both are undefined. Consider things
- // like undef&0. The result is known zero, not undef.
- UndefElts &= UndefElts2;
- break;
- }
- break;
- }
- }
- return MadeChange ? I : 0;
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
deleted file mode 100644
index a58124d..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ /dev/null
@@ -1,561 +0,0 @@
-//===- InstCombineVectorOps.cpp -------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements instcombine for ExtractElement, InsertElement and
-// ShuffleVector.
-//
-//===----------------------------------------------------------------------===//
-
-#include "InstCombine.h"
-using namespace llvm;
-
-/// CheapToScalarize - Return true if the value is cheaper to scalarize than it
-/// is to leave as a vector operation.
-static bool CheapToScalarize(Value *V, bool isConstant) {
- if (isa<ConstantAggregateZero>(V))
- return true;
- if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
- if (isConstant) return true;
- // If all elts are the same, we can extract.
- Constant *Op0 = C->getOperand(0);
- for (unsigned i = 1; i < C->getNumOperands(); ++i)
- if (C->getOperand(i) != Op0)
- return false;
- return true;
- }
- Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return false;
-
- // Insert element gets simplified to the inserted element or is deleted if
- // this is constant idx extract element and its a constant idx insertelt.
- if (I->getOpcode() == Instruction::InsertElement && isConstant &&
- isa<ConstantInt>(I->getOperand(2)))
- return true;
- if (I->getOpcode() == Instruction::Load && I->hasOneUse())
- return true;
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I))
- if (BO->hasOneUse() &&
- (CheapToScalarize(BO->getOperand(0), isConstant) ||
- CheapToScalarize(BO->getOperand(1), isConstant)))
- return true;
- if (CmpInst *CI = dyn_cast<CmpInst>(I))
- if (CI->hasOneUse() &&
- (CheapToScalarize(CI->getOperand(0), isConstant) ||
- CheapToScalarize(CI->getOperand(1), isConstant)))
- return true;
-
- return false;
-}
-
-/// Read and decode a shufflevector mask.
-///
-/// It turns undef elements into values that are larger than the number of
-/// elements in the input.
-static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
- unsigned NElts = SVI->getType()->getNumElements();
- if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
- return std::vector<unsigned>(NElts, 0);
- if (isa<UndefValue>(SVI->getOperand(2)))
- return std::vector<unsigned>(NElts, 2*NElts);
-
- std::vector<unsigned> Result;
- const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
- for (User::const_op_iterator i = CP->op_begin(), e = CP->op_end(); i!=e; ++i)
- if (isa<UndefValue>(*i))
- Result.push_back(NElts*2); // undef -> 8
- else
- Result.push_back(cast<ConstantInt>(*i)->getZExtValue());
- return Result;
-}
-
-/// FindScalarElement - Given a vector and an element number, see if the scalar
-/// value is already around as a register, for example if it were inserted then
-/// extracted from the vector.
-static Value *FindScalarElement(Value *V, unsigned EltNo) {
- assert(V->getType()->isVectorTy() && "Not looking at a vector?");
- const VectorType *PTy = cast<VectorType>(V->getType());
- unsigned Width = PTy->getNumElements();
- if (EltNo >= Width) // Out of range access.
- return UndefValue::get(PTy->getElementType());
-
- if (isa<UndefValue>(V))
- return UndefValue::get(PTy->getElementType());
- if (isa<ConstantAggregateZero>(V))
- return Constant::getNullValue(PTy->getElementType());
- if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
- return CP->getOperand(EltNo);
-
- if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
- // If this is an insert to a variable element, we don't know what it is.
- if (!isa<ConstantInt>(III->getOperand(2)))
- return 0;
- unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
-
- // If this is an insert to the element we are looking for, return the
- // inserted value.
- if (EltNo == IIElt)
- return III->getOperand(1);
-
- // Otherwise, the insertelement doesn't modify the value, recurse on its
- // vector input.
- return FindScalarElement(III->getOperand(0), EltNo);
- }
-
- if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
- unsigned LHSWidth =
- cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
- unsigned InEl = getShuffleMask(SVI)[EltNo];
- if (InEl < LHSWidth)
- return FindScalarElement(SVI->getOperand(0), InEl);
- else if (InEl < LHSWidth*2)
- return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth);
- else
- return UndefValue::get(PTy->getElementType());
- }
-
- // Otherwise, we don't know.
- return 0;
-}
-
-Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
- // If vector val is undef, replace extract with scalar undef.
- if (isa<UndefValue>(EI.getOperand(0)))
- return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
-
- // If vector val is constant 0, replace extract with scalar 0.
- if (isa<ConstantAggregateZero>(EI.getOperand(0)))
- return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType()));
-
- if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
- // If vector val is constant with all elements the same, replace EI with
- // that element. When the elements are not identical, we cannot replace yet
- // (we do that below, but only when the index is constant).
- Constant *op0 = C->getOperand(0);
- for (unsigned i = 1; i != C->getNumOperands(); ++i)
- if (C->getOperand(i) != op0) {
- op0 = 0;
- break;
- }
- if (op0)
- return ReplaceInstUsesWith(EI, op0);
- }
-
- // If extracting a specified index from the vector, see if we can recursively
- // find a previously computed scalar that was inserted into the vector.
- if (ConstantInt *IdxC = dyn_cast<ConstantInt>(EI.getOperand(1))) {
- unsigned IndexVal = IdxC->getZExtValue();
- unsigned VectorWidth = EI.getVectorOperandType()->getNumElements();
-
- // If this is extracting an invalid index, turn this into undef, to avoid
- // crashing the code below.
- if (IndexVal >= VectorWidth)
- return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
-
- // This instruction only demands the single element from the input vector.
- // If the input vector has a single use, simplify it based on this use
- // property.
- if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
- APInt UndefElts(VectorWidth, 0);
- APInt DemandedMask(VectorWidth, 0);
- DemandedMask.set(IndexVal);
- if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
- DemandedMask, UndefElts)) {
- EI.setOperand(0, V);
- return &EI;
- }
- }
-
- if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal))
- return ReplaceInstUsesWith(EI, Elt);
-
- // If the this extractelement is directly using a bitcast from a vector of
- // the same number of elements, see if we can find the source element from
- // it. In this case, we will end up needing to bitcast the scalars.
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
- if (const VectorType *VT =
- dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
- if (VT->getNumElements() == VectorWidth)
- if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal))
- return new BitCastInst(Elt, EI.getType());
- }
- }
-
- if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) {
- // Push extractelement into predecessor operation if legal and
- // profitable to do so
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
- if (I->hasOneUse() &&
- CheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) {
- Value *newEI0 =
- Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
- EI.getName()+".lhs");
- Value *newEI1 =
- Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
- EI.getName()+".rhs");
- return BinaryOperator::Create(BO->getOpcode(), newEI0, newEI1);
- }
- } else if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
- // Extracting the inserted element?
- if (IE->getOperand(2) == EI.getOperand(1))
- return ReplaceInstUsesWith(EI, IE->getOperand(1));
- // If the inserted and extracted elements are constants, they must not
- // be the same value, extract from the pre-inserted value instead.
- if (isa<Constant>(IE->getOperand(2)) && isa<Constant>(EI.getOperand(1))) {
- Worklist.AddValue(EI.getOperand(0));
- EI.setOperand(0, IE->getOperand(0));
- return &EI;
- }
- } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) {
- // If this is extracting an element from a shufflevector, figure out where
- // it came from and extract from the appropriate input element instead.
- if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
- unsigned SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
- Value *Src;
- unsigned LHSWidth =
- cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
-
- if (SrcIdx < LHSWidth)
- Src = SVI->getOperand(0);
- else if (SrcIdx < LHSWidth*2) {
- SrcIdx -= LHSWidth;
- Src = SVI->getOperand(1);
- } else {
- return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
- }
- return ExtractElementInst::Create(Src,
- ConstantInt::get(Type::getInt32Ty(EI.getContext()),
- SrcIdx, false));
- }
- }
- // FIXME: Canonicalize extractelement(bitcast) -> bitcast(extractelement)
- }
- return 0;
-}
-
-/// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
-/// elements from either LHS or RHS, return the shuffle mask and true.
-/// Otherwise, return false.
-static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
- std::vector<Constant*> &Mask) {
- assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
- "Invalid CollectSingleShuffleElements");
- unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
-
- if (isa<UndefValue>(V)) {
- Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
- return true;
- }
-
- if (V == LHS) {
- for (unsigned i = 0; i != NumElts; ++i)
- Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()), i));
- return true;
- }
-
- if (V == RHS) {
- for (unsigned i = 0; i != NumElts; ++i)
- Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()),
- i+NumElts));
- return true;
- }
-
- if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
- // If this is an insert of an extract from some other vector, include it.
- Value *VecOp = IEI->getOperand(0);
- Value *ScalarOp = IEI->getOperand(1);
- Value *IdxOp = IEI->getOperand(2);
-
- if (!isa<ConstantInt>(IdxOp))
- return false;
- unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
-
- if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
- // Okay, we can handle this if the vector we are insertinting into is
- // transitively ok.
- if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
- // If so, update the mask to reflect the inserted undef.
- Mask[InsertedIdx] = UndefValue::get(Type::getInt32Ty(V->getContext()));
- return true;
- }
- } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
- if (isa<ConstantInt>(EI->getOperand(1)) &&
- EI->getOperand(0)->getType() == V->getType()) {
- unsigned ExtractedIdx =
- cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
-
- // This must be extracting from either LHS or RHS.
- if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
- // Okay, we can handle this if the vector we are insertinting into is
- // transitively ok.
- if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
- // If so, update the mask to reflect the inserted value.
- if (EI->getOperand(0) == LHS) {
- Mask[InsertedIdx % NumElts] =
- ConstantInt::get(Type::getInt32Ty(V->getContext()),
- ExtractedIdx);
- } else {
- assert(EI->getOperand(0) == RHS);
- Mask[InsertedIdx % NumElts] =
- ConstantInt::get(Type::getInt32Ty(V->getContext()),
- ExtractedIdx+NumElts);
-
- }
- return true;
- }
- }
- }
- }
- }
- // TODO: Handle shufflevector here!
-
- return false;
-}
-
-/// CollectShuffleElements - We are building a shuffle of V, using RHS as the
-/// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
-/// that computes V and the LHS value of the shuffle.
-static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
- Value *&RHS) {
- assert(V->getType()->isVectorTy() &&
- (RHS == 0 || V->getType() == RHS->getType()) &&
- "Invalid shuffle!");
- unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
-
- if (isa<UndefValue>(V)) {
- Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
- return V;
- } else if (isa<ConstantAggregateZero>(V)) {
- Mask.assign(NumElts, ConstantInt::get(Type::getInt32Ty(V->getContext()),0));
- return V;
- } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
- // If this is an insert of an extract from some other vector, include it.
- Value *VecOp = IEI->getOperand(0);
- Value *ScalarOp = IEI->getOperand(1);
- Value *IdxOp = IEI->getOperand(2);
-
- if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
- if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
- EI->getOperand(0)->getType() == V->getType()) {
- unsigned ExtractedIdx =
- cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
- unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
-
- // Either the extracted from or inserted into vector must be RHSVec,
- // otherwise we'd end up with a shuffle of three inputs.
- if (EI->getOperand(0) == RHS || RHS == 0) {
- RHS = EI->getOperand(0);
- Value *V = CollectShuffleElements(VecOp, Mask, RHS);
- Mask[InsertedIdx % NumElts] =
- ConstantInt::get(Type::getInt32Ty(V->getContext()),
- NumElts+ExtractedIdx);
- return V;
- }
-
- if (VecOp == RHS) {
- Value *V = CollectShuffleElements(EI->getOperand(0), Mask, RHS);
- // Everything but the extracted element is replaced with the RHS.
- for (unsigned i = 0; i != NumElts; ++i) {
- if (i != InsertedIdx)
- Mask[i] = ConstantInt::get(Type::getInt32Ty(V->getContext()),
- NumElts+i);
- }
- return V;
- }
-
- // If this insertelement is a chain that comes from exactly these two
- // vectors, return the vector and the effective shuffle.
- if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask))
- return EI->getOperand(0);
- }
- }
- }
- // TODO: Handle shufflevector here!
-
- // Otherwise, can't do anything fancy. Return an identity vector.
- for (unsigned i = 0; i != NumElts; ++i)
- Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()), i));
- return V;
-}
-
-Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
- Value *VecOp = IE.getOperand(0);
- Value *ScalarOp = IE.getOperand(1);
- Value *IdxOp = IE.getOperand(2);
-
- // Inserting an undef or into an undefined place, remove this.
- if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
- ReplaceInstUsesWith(IE, VecOp);
-
- // If the inserted element was extracted from some other vector, and if the
- // indexes are constant, try to turn this into a shufflevector operation.
- if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
- if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
- EI->getOperand(0)->getType() == IE.getType()) {
- unsigned NumVectorElts = IE.getType()->getNumElements();
- unsigned ExtractedIdx =
- cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
- unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
-
- if (ExtractedIdx >= NumVectorElts) // Out of range extract.
- return ReplaceInstUsesWith(IE, VecOp);
-
- if (InsertedIdx >= NumVectorElts) // Out of range insert.
- return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
-
- // If we are extracting a value from a vector, then inserting it right
- // back into the same place, just use the input vector.
- if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
- return ReplaceInstUsesWith(IE, VecOp);
-
- // If this insertelement isn't used by some other insertelement, turn it
- // (and any insertelements it points to), into one big shuffle.
- if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
- std::vector<Constant*> Mask;
- Value *RHS = 0;
- Value *LHS = CollectShuffleElements(&IE, Mask, RHS);
- if (RHS == 0) RHS = UndefValue::get(LHS->getType());
- // We now have a shuffle of LHS, RHS, Mask.
- return new ShuffleVectorInst(LHS, RHS,
- ConstantVector::get(Mask));
- }
- }
- }
-
- unsigned VWidth = cast<VectorType>(VecOp->getType())->getNumElements();
- APInt UndefElts(VWidth, 0);
- APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
- if (SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts))
- return &IE;
-
- return 0;
-}
-
-
-Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
- Value *LHS = SVI.getOperand(0);
- Value *RHS = SVI.getOperand(1);
- std::vector<unsigned> Mask = getShuffleMask(&SVI);
-
- bool MadeChange = false;
-
- // Undefined shuffle mask -> undefined value.
- if (isa<UndefValue>(SVI.getOperand(2)))
- return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
-
- unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
-
- if (VWidth != cast<VectorType>(LHS->getType())->getNumElements())
- return 0;
-
- APInt UndefElts(VWidth, 0);
- APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
- if (SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
- LHS = SVI.getOperand(0);
- RHS = SVI.getOperand(1);
- MadeChange = true;
- }
-
- // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
- // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
- if (LHS == RHS || isa<UndefValue>(LHS)) {
- if (isa<UndefValue>(LHS) && LHS == RHS) {
- // shuffle(undef,undef,mask) -> undef.
- return ReplaceInstUsesWith(SVI, LHS);
- }
-
- // Remap any references to RHS to use LHS.
- std::vector<Constant*> Elts;
- for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
- if (Mask[i] >= 2*e)
- Elts.push_back(UndefValue::get(Type::getInt32Ty(SVI.getContext())));
- else {
- if ((Mask[i] >= e && isa<UndefValue>(RHS)) ||
- (Mask[i] < e && isa<UndefValue>(LHS))) {
- Mask[i] = 2*e; // Turn into undef.
- Elts.push_back(UndefValue::get(Type::getInt32Ty(SVI.getContext())));
- } else {
- Mask[i] = Mask[i] % e; // Force to LHS.
- Elts.push_back(ConstantInt::get(Type::getInt32Ty(SVI.getContext()),
- Mask[i]));
- }
- }
- }
- SVI.setOperand(0, SVI.getOperand(1));
- SVI.setOperand(1, UndefValue::get(RHS->getType()));
- SVI.setOperand(2, ConstantVector::get(Elts));
- LHS = SVI.getOperand(0);
- RHS = SVI.getOperand(1);
- MadeChange = true;
- }
-
- // Analyze the shuffle, are the LHS or RHS and identity shuffles?
- bool isLHSID = true, isRHSID = true;
-
- for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
- if (Mask[i] >= e*2) continue; // Ignore undef values.
- // Is this an identity shuffle of the LHS value?
- isLHSID &= (Mask[i] == i);
-
- // Is this an identity shuffle of the RHS value?
- isRHSID &= (Mask[i]-e == i);
- }
-
- // Eliminate identity shuffles.
- if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
- if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
-
- // If the LHS is a shufflevector itself, see if we can combine it with this
- // one without producing an unusual shuffle. Here we are really conservative:
- // we are absolutely afraid of producing a shuffle mask not in the input
- // program, because the code gen may not be smart enough to turn a merged
- // shuffle into two specific shuffles: it may produce worse code. As such,
- // we only merge two shuffles if the result is one of the two input shuffle
- // masks. In this case, merging the shuffles just removes one instruction,
- // which we know is safe. This is good for things like turning:
- // (splat(splat)) -> splat.
- if (ShuffleVectorInst *LHSSVI = dyn_cast<ShuffleVectorInst>(LHS)) {
- if (isa<UndefValue>(RHS)) {
- std::vector<unsigned> LHSMask = getShuffleMask(LHSSVI);
-
- if (LHSMask.size() == Mask.size()) {
- std::vector<unsigned> NewMask;
- for (unsigned i = 0, e = Mask.size(); i != e; ++i)
- if (Mask[i] >= e)
- NewMask.push_back(2*e);
- else
- NewMask.push_back(LHSMask[Mask[i]]);
-
- // If the result mask is equal to the src shuffle or this
- // shuffle mask, do the replacement.
- if (NewMask == LHSMask || NewMask == Mask) {
- unsigned LHSInNElts =
- cast<VectorType>(LHSSVI->getOperand(0)->getType())->
- getNumElements();
- std::vector<Constant*> Elts;
- for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
- if (NewMask[i] >= LHSInNElts*2) {
- Elts.push_back(UndefValue::get(
- Type::getInt32Ty(SVI.getContext())));
- } else {
- Elts.push_back(ConstantInt::get(
- Type::getInt32Ty(SVI.getContext()),
- NewMask[i]));
- }
- }
- return new ShuffleVectorInst(LHSSVI->getOperand(0),
- LHSSVI->getOperand(1),
- ConstantVector::get(Elts));
- }
- }
- }
- }
-
- return MadeChange ? &SVI : 0;
-}
-
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
deleted file mode 100644
index 9d88621..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstCombineWorklist.h
+++ /dev/null
@@ -1,105 +0,0 @@
-//===- InstCombineWorklist.h - Worklist for the InstCombine pass ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef INSTCOMBINE_WORKLIST_H
-#define INSTCOMBINE_WORKLIST_H
-
-#define DEBUG_TYPE "instcombine"
-#include "llvm/Instruction.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
-
-/// InstCombineWorklist - This is the worklist management logic for
-/// InstCombine.
-class VISIBILITY_HIDDEN InstCombineWorklist {
- SmallVector<Instruction*, 256> Worklist;
- DenseMap<Instruction*, unsigned> WorklistMap;
-
- void operator=(const InstCombineWorklist&RHS); // DO NOT IMPLEMENT
- InstCombineWorklist(const InstCombineWorklist&); // DO NOT IMPLEMENT
-public:
- InstCombineWorklist() {}
-
- bool isEmpty() const { return Worklist.empty(); }
-
- /// Add - Add the specified instruction to the worklist if it isn't already
- /// in it.
- void Add(Instruction *I) {
- if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
- DEBUG(errs() << "IC: ADD: " << *I << '\n');
- Worklist.push_back(I);
- }
- }
-
- void AddValue(Value *V) {
- if (Instruction *I = dyn_cast<Instruction>(V))
- Add(I);
- }
-
- /// AddInitialGroup - Add the specified batch of stuff in reverse order.
- /// which should only be done when the worklist is empty and when the group
- /// has no duplicates.
- void AddInitialGroup(Instruction *const *List, unsigned NumEntries) {
- assert(Worklist.empty() && "Worklist must be empty to add initial group");
- Worklist.reserve(NumEntries+16);
- DEBUG(errs() << "IC: ADDING: " << NumEntries << " instrs to worklist\n");
- for (; NumEntries; --NumEntries) {
- Instruction *I = List[NumEntries-1];
- WorklistMap.insert(std::make_pair(I, Worklist.size()));
- Worklist.push_back(I);
- }
- }
-
- // Remove - remove I from the worklist if it exists.
- void Remove(Instruction *I) {
- DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
- if (It == WorklistMap.end()) return; // Not in worklist.
-
- // Don't bother moving everything down, just null out the slot.
- Worklist[It->second] = 0;
-
- WorklistMap.erase(It);
- }
-
- Instruction *RemoveOne() {
- Instruction *I = Worklist.back();
- Worklist.pop_back();
- WorklistMap.erase(I);
- return I;
- }
-
- /// AddUsersToWorkList - When an instruction is simplified, add all users of
- /// the instruction to the work lists because they might get more simplified
- /// now.
- ///
- void AddUsersToWorkList(Instruction &I) {
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
- UI != UE; ++UI)
- Add(cast<Instruction>(*UI));
- }
-
-
- /// Zap - check that the worklist is empty and nuke the backing store for
- /// the map if it is large.
- void Zap() {
- assert(WorklistMap.empty() && "Worklist empty, but map not?");
-
- // Do an explicit clear, this shrinks the map if needed.
- WorklistMap.clear();
- }
-};
-
-} // end namespace llvm.
-
-#endif
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/libclamav/c++/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
deleted file mode 100644
index af9ec5c..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ /dev/null
@@ -1,1274 +0,0 @@
-//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// InstructionCombining - Combine instructions to form fewer, simple
-// instructions. This pass does not modify the CFG. This pass is where
-// algebraic simplification happens.
-//
-// This pass combines things like:
-// %Y = add i32 %X, 1
-// %Z = add i32 %Y, 1
-// into:
-// %Z = add i32 %X, 2
-//
-// This is a simple worklist driven algorithm.
-//
-// This pass guarantees that the following canonicalizations are performed on
-// the program:
-// 1. If a binary operator has a constant operand, it is moved to the RHS
-// 2. Bitwise operators with constant operands are always grouped so that
-// shifts are performed first, then or's, then and's, then xor's.
-// 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
-// 4. All cmp instructions on boolean values are replaced with logical ops
-// 5. add X, X is represented as (X*2) => (X << 1)
-// 6. Multiplies with a power-of-two constant argument are transformed into
-// shifts.
-// ... etc.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "instcombine"
-#include "llvm/Transforms/Scalar.h"
-#include "InstCombine.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/MemoryBuiltins.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Transforms/Utils/Local.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
-#include <algorithm>
-#include <climits>
-using namespace llvm;
-using namespace llvm::PatternMatch;
-
-STATISTIC(NumCombined , "Number of insts combined");
-STATISTIC(NumConstProp, "Number of constant folds");
-STATISTIC(NumDeadInst , "Number of dead inst eliminated");
-STATISTIC(NumSunkInst , "Number of instructions sunk");
-
-
-char InstCombiner::ID = 0;
-static RegisterPass<InstCombiner>
-X("instcombine", "Combine redundant instructions");
-
-void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addPreservedID(LCSSAID);
- AU.setPreservesCFG();
-}
-
-
-/// ShouldChangeType - Return true if it is desirable to convert a computation
-/// from 'From' to 'To'. We don't want to convert from a legal to an illegal
-/// type for example, or from a smaller to a larger illegal type.
-bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const {
- assert(From->isIntegerTy() && To->isIntegerTy());
-
- // If we don't have TD, we don't know if the source/dest are legal.
- if (!TD) return false;
-
- unsigned FromWidth = From->getPrimitiveSizeInBits();
- unsigned ToWidth = To->getPrimitiveSizeInBits();
- bool FromLegal = TD->isLegalInteger(FromWidth);
- bool ToLegal = TD->isLegalInteger(ToWidth);
-
- // If this is a legal integer from type, and the result would be an illegal
- // type, don't do the transformation.
- if (FromLegal && !ToLegal)
- return false;
-
- // Otherwise, if both are illegal, do not increase the size of the result. We
- // do allow things like i160 -> i64, but not i64 -> i160.
- if (!FromLegal && !ToLegal && ToWidth > FromWidth)
- return false;
-
- return true;
-}
-
-
-// SimplifyCommutative - This performs a few simplifications for commutative
-// operators:
-//
-// 1. Order operands such that they are listed from right (least complex) to
-// left (most complex). This puts constants before unary operators before
-// binary operators.
-//
-// 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
-// 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
-//
-bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
- bool Changed = false;
- if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
- Changed = !I.swapOperands();
-
- if (!I.isAssociative()) return Changed;
-
- Instruction::BinaryOps Opcode = I.getOpcode();
- if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
- if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
- if (isa<Constant>(I.getOperand(1))) {
- Constant *Folded = ConstantExpr::get(I.getOpcode(),
- cast<Constant>(I.getOperand(1)),
- cast<Constant>(Op->getOperand(1)));
- I.setOperand(0, Op->getOperand(0));
- I.setOperand(1, Folded);
- return true;
- }
-
- if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)))
- if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
- Op->hasOneUse() && Op1->hasOneUse()) {
- Constant *C1 = cast<Constant>(Op->getOperand(1));
- Constant *C2 = cast<Constant>(Op1->getOperand(1));
-
- // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
- Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2);
- Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0),
- Op1->getOperand(0),
- Op1->getName(), &I);
- Worklist.Add(New);
- I.setOperand(0, New);
- I.setOperand(1, Folded);
- return true;
- }
- }
- return Changed;
-}
-
-// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
-// if the LHS is a constant zero (which is the 'negate' form).
-//
-Value *InstCombiner::dyn_castNegVal(Value *V) const {
- if (BinaryOperator::isNeg(V))
- return BinaryOperator::getNegArgument(V);
-
- // Constants can be considered to be negated values if they can be folded.
- if (ConstantInt *C = dyn_cast<ConstantInt>(V))
- return ConstantExpr::getNeg(C);
-
- if (ConstantVector *C = dyn_cast<ConstantVector>(V))
- if (C->getType()->getElementType()->isIntegerTy())
- return ConstantExpr::getNeg(C);
-
- return 0;
-}
-
-// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
-// instruction if the LHS is a constant negative zero (which is the 'negate'
-// form).
-//
-Value *InstCombiner::dyn_castFNegVal(Value *V) const {
- if (BinaryOperator::isFNeg(V))
- return BinaryOperator::getFNegArgument(V);
-
- // Constants can be considered to be negated values if they can be folded.
- if (ConstantFP *C = dyn_cast<ConstantFP>(V))
- return ConstantExpr::getFNeg(C);
-
- if (ConstantVector *C = dyn_cast<ConstantVector>(V))
- if (C->getType()->getElementType()->isFloatingPointTy())
- return ConstantExpr::getFNeg(C);
-
- return 0;
-}
-
-static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
- InstCombiner *IC) {
- if (CastInst *CI = dyn_cast<CastInst>(&I))
- return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
-
- // Figure out if the constant is the left or the right argument.
- bool ConstIsRHS = isa<Constant>(I.getOperand(1));
- Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
-
- if (Constant *SOC = dyn_cast<Constant>(SO)) {
- if (ConstIsRHS)
- return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
- return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
- }
-
- Value *Op0 = SO, *Op1 = ConstOperand;
- if (!ConstIsRHS)
- std::swap(Op0, Op1);
-
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
- return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
- SO->getName()+".op");
- if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
- return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
- SO->getName()+".cmp");
- if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
- return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
- SO->getName()+".cmp");
- llvm_unreachable("Unknown binary instruction type!");
-}
-
-// FoldOpIntoSelect - Given an instruction with a select as one operand and a
-// constant as the other operand, try to fold the binary operator into the
-// select arguments. This also works for Cast instructions, which obviously do
-// not have a second operand.
-Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
- // Don't modify shared select instructions
- if (!SI->hasOneUse()) return 0;
- Value *TV = SI->getOperand(1);
- Value *FV = SI->getOperand(2);
-
- if (isa<Constant>(TV) || isa<Constant>(FV)) {
- // Bool selects with constant operands can be folded to logical ops.
- if (SI->getType()->isIntegerTy(1)) return 0;
-
- Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
- Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
-
- return SelectInst::Create(SI->getCondition(), SelectTrueVal,
- SelectFalseVal);
- }
- return 0;
-}
-
-
-/// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
-/// has a PHI node as operand #0, see if we can fold the instruction into the
-/// PHI (which is only possible if all operands to the PHI are constants).
-///
-/// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
-/// that would normally be unprofitable because they strongly encourage jump
-/// threading.
-Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
- bool AllowAggressive) {
- AllowAggressive = false;
- PHINode *PN = cast<PHINode>(I.getOperand(0));
- unsigned NumPHIValues = PN->getNumIncomingValues();
- if (NumPHIValues == 0 ||
- // We normally only transform phis with a single use, unless we're trying
- // hard to make jump threading happen.
- (!PN->hasOneUse() && !AllowAggressive))
- return 0;
-
-
- // Check to see if all of the operands of the PHI are simple constants
- // (constantint/constantfp/undef). If there is one non-constant value,
- // remember the BB it is in. If there is more than one or if *it* is a PHI,
- // bail out. We don't do arbitrary constant expressions here because moving
- // their computation can be expensive without a cost model.
- BasicBlock *NonConstBB = 0;
- for (unsigned i = 0; i != NumPHIValues; ++i)
- if (!isa<Constant>(PN->getIncomingValue(i)) ||
- isa<ConstantExpr>(PN->getIncomingValue(i))) {
- if (NonConstBB) return 0; // More than one non-const value.
- if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
- NonConstBB = PN->getIncomingBlock(i);
-
- // If the incoming non-constant value is in I's block, we have an infinite
- // loop.
- if (NonConstBB == I.getParent())
- return 0;
- }
-
- // If there is exactly one non-constant value, we can insert a copy of the
- // operation in that block. However, if this is a critical edge, we would be
- // inserting the computation one some other paths (e.g. inside a loop). Only
- // do this if the pred block is unconditionally branching into the phi block.
- if (NonConstBB != 0 && !AllowAggressive) {
- BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
- if (!BI || !BI->isUnconditional()) return 0;
- }
-
- // Okay, we can do the transformation: create the new PHI node.
- PHINode *NewPN = PHINode::Create(I.getType(), "");
- NewPN->reserveOperandSpace(PN->getNumOperands()/2);
- InsertNewInstBefore(NewPN, *PN);
- NewPN->takeName(PN);
-
- // Next, add all of the operands to the PHI.
- if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
- // We only currently try to fold the condition of a select when it is a phi,
- // not the true/false values.
- Value *TrueV = SI->getTrueValue();
- Value *FalseV = SI->getFalseValue();
- BasicBlock *PhiTransBB = PN->getParent();
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- BasicBlock *ThisBB = PN->getIncomingBlock(i);
- Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
- Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
- Value *InV = 0;
- if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
- InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
- } else {
- assert(PN->getIncomingBlock(i) == NonConstBB);
- InV = SelectInst::Create(PN->getIncomingValue(i), TrueVInPred,
- FalseVInPred,
- "phitmp", NonConstBB->getTerminator());
- Worklist.Add(cast<Instruction>(InV));
- }
- NewPN->addIncoming(InV, ThisBB);
- }
- } else if (I.getNumOperands() == 2) {
- Constant *C = cast<Constant>(I.getOperand(1));
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV = 0;
- if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
- if (CmpInst *CI = dyn_cast<CmpInst>(&I))
- InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
- else
- InV = ConstantExpr::get(I.getOpcode(), InC, C);
- } else {
- assert(PN->getIncomingBlock(i) == NonConstBB);
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
- InV = BinaryOperator::Create(BO->getOpcode(),
- PN->getIncomingValue(i), C, "phitmp",
- NonConstBB->getTerminator());
- else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
- InV = CmpInst::Create(CI->getOpcode(),
- CI->getPredicate(),
- PN->getIncomingValue(i), C, "phitmp",
- NonConstBB->getTerminator());
- else
- llvm_unreachable("Unknown binop!");
-
- Worklist.Add(cast<Instruction>(InV));
- }
- NewPN->addIncoming(InV, PN->getIncomingBlock(i));
- }
- } else {
- CastInst *CI = cast<CastInst>(&I);
- const Type *RetTy = CI->getType();
- for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV;
- if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
- InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
- } else {
- assert(PN->getIncomingBlock(i) == NonConstBB);
- InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i),
- I.getType(), "phitmp",
- NonConstBB->getTerminator());
- Worklist.Add(cast<Instruction>(InV));
- }
- NewPN->addIncoming(InV, PN->getIncomingBlock(i));
- }
- }
- return ReplaceInstUsesWith(I, NewPN);
-}
-
-/// FindElementAtOffset - Given a type and a constant offset, determine whether
-/// or not there is a sequence of GEP indices into the type that will land us at
-/// the specified offset. If so, fill them into NewIndices and return the
-/// resultant element type, otherwise return null.
-const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
- SmallVectorImpl<Value*> &NewIndices) {
- if (!TD) return 0;
- if (!Ty->isSized()) return 0;
-
- // Start with the index over the outer type. Note that the type size
- // might be zero (even if the offset isn't zero) if the indexed type
- // is something like [0 x {int, int}]
- const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
- int64_t FirstIdx = 0;
- if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
- FirstIdx = Offset/TySize;
- Offset -= FirstIdx*TySize;
-
- // Handle hosts where % returns negative instead of values [0..TySize).
- if (Offset < 0) {
- --FirstIdx;
- Offset += TySize;
- assert(Offset >= 0);
- }
- assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
- }
-
- NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
-
- // Index into the types. If we fail, set OrigBase to null.
- while (Offset) {
- // Indexing into tail padding between struct/array elements.
- if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
- return 0;
-
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = TD->getStructLayout(STy);
- assert(Offset < (int64_t)SL->getSizeInBytes() &&
- "Offset must stay within the indexed type");
-
- unsigned Elt = SL->getElementContainingOffset(Offset);
- NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
- Elt));
-
- Offset -= SL->getElementOffset(Elt);
- Ty = STy->getElementType(Elt);
- } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
- uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
- assert(EltSize && "Cannot index into a zero-sized array");
- NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
- Offset %= EltSize;
- Ty = AT->getElementType();
- } else {
- // Otherwise, we can't index into the middle of this atomic type, bail.
- return 0;
- }
- }
-
- return Ty;
-}
-
-
-
-Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
- SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
-
- if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD))
- return ReplaceInstUsesWith(GEP, V);
-
- Value *PtrOp = GEP.getOperand(0);
-
- if (isa<UndefValue>(GEP.getOperand(0)))
- return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType()));
-
- // Eliminate unneeded casts for indices.
- if (TD) {
- bool MadeChange = false;
- unsigned PtrSize = TD->getPointerSizeInBits();
-
- gep_type_iterator GTI = gep_type_begin(GEP);
- for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
- I != E; ++I, ++GTI) {
- if (!isa<SequentialType>(*GTI)) continue;
-
- // If we are using a wider index than needed for this platform, shrink it
- // to what we need. If narrower, sign-extend it to what we need. This
- // explicit cast can make subsequent optimizations more obvious.
- unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth();
- if (OpBits == PtrSize)
- continue;
-
- *I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true);
- MadeChange = true;
- }
- if (MadeChange) return &GEP;
- }
-
- // Combine Indices - If the source pointer to this getelementptr instruction
- // is a getelementptr instruction, combine the indices of the two
- // getelementptr instructions into a single instruction.
- //
- if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
- // Note that if our source is a gep chain itself that we wait for that
- // chain to be resolved before we perform this transformation. This
- // avoids us creating a TON of code in some cases.
- //
- if (GetElementPtrInst *SrcGEP =
- dyn_cast<GetElementPtrInst>(Src->getOperand(0)))
- if (SrcGEP->getNumOperands() == 2)
- return 0; // Wait until our source is folded to completion.
-
- SmallVector<Value*, 8> Indices;
-
- // Find out whether the last index in the source GEP is a sequential idx.
- bool EndsWithSequential = false;
- for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
- I != E; ++I)
- EndsWithSequential = !(*I)->isStructTy();
-
- // Can we combine the two pointer arithmetics offsets?
- if (EndsWithSequential) {
- // Replace: gep (gep %P, long B), long A, ...
- // With: T = long A+B; gep %P, T, ...
- //
- Value *Sum;
- Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
- Value *GO1 = GEP.getOperand(1);
- if (SO1 == Constant::getNullValue(SO1->getType())) {
- Sum = GO1;
- } else if (GO1 == Constant::getNullValue(GO1->getType())) {
- Sum = SO1;
- } else {
- // If they aren't the same type, then the input hasn't been processed
- // by the loop above yet (which canonicalizes sequential index types to
- // intptr_t). Just avoid transforming this until the input has been
- // normalized.
- if (SO1->getType() != GO1->getType())
- return 0;
- Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
- }
-
- // Update the GEP in place if possible.
- if (Src->getNumOperands() == 2) {
- GEP.setOperand(0, Src->getOperand(0));
- GEP.setOperand(1, Sum);
- return &GEP;
- }
- Indices.append(Src->op_begin()+1, Src->op_end()-1);
- Indices.push_back(Sum);
- Indices.append(GEP.op_begin()+2, GEP.op_end());
- } else if (isa<Constant>(*GEP.idx_begin()) &&
- cast<Constant>(*GEP.idx_begin())->isNullValue() &&
- Src->getNumOperands() != 1) {
- // Otherwise we can do the fold if the first index of the GEP is a zero
- Indices.append(Src->op_begin()+1, Src->op_end());
- Indices.append(GEP.idx_begin()+1, GEP.idx_end());
- }
-
- if (!Indices.empty())
- return (GEP.isInBounds() && Src->isInBounds()) ?
- GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(),
- Indices.end(), GEP.getName()) :
- GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(),
- Indices.end(), GEP.getName());
- }
-
- // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
- Value *StrippedPtr = PtrOp->stripPointerCasts();
- if (StrippedPtr != PtrOp) {
- const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
-
- bool HasZeroPointerIndex = false;
- if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
- HasZeroPointerIndex = C->isZero();
-
- // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
- // into : GEP [10 x i8]* X, i32 0, ...
- //
- // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
- // into : GEP i8* X, ...
- //
- // This occurs when the program declares an array extern like "int X[];"
- if (HasZeroPointerIndex) {
- const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
- if (const ArrayType *CATy =
- dyn_cast<ArrayType>(CPTy->getElementType())) {
- // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
- if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
- // -> GEP i8* X, ...
- SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
- GetElementPtrInst *Res =
- GetElementPtrInst::Create(StrippedPtr, Idx.begin(),
- Idx.end(), GEP.getName());
- Res->setIsInBounds(GEP.isInBounds());
- return Res;
- }
-
- if (const ArrayType *XATy =
- dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
- // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
- if (CATy->getElementType() == XATy->getElementType()) {
- // -> GEP [10 x i8]* X, i32 0, ...
- // At this point, we know that the cast source type is a pointer
- // to an array of the same type as the destination pointer
- // array. Because the array type is never stepped over (there
- // is a leading zero) we can fold the cast into this GEP.
- GEP.setOperand(0, StrippedPtr);
- return &GEP;
- }
- }
- }
- } else if (GEP.getNumOperands() == 2) {
- // Transform things like:
- // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
- // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
- const Type *SrcElTy = StrippedPtrTy->getElementType();
- const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
- if (TD && SrcElTy->isArrayTy() &&
- TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
- TD->getTypeAllocSize(ResElTy)) {
- Value *Idx[2];
- Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
- Idx[1] = GEP.getOperand(1);
- Value *NewGEP = GEP.isInBounds() ?
- Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2, GEP.getName()) :
- Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName());
- // V and GEP are both pointer types --> BitCast
- return new BitCastInst(NewGEP, GEP.getType());
- }
-
- // Transform things like:
- // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
- // (where tmp = 8*tmp2) into:
- // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
-
- if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
- uint64_t ArrayEltSize =
- TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
-
- // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
- // allow either a mul, shift, or constant here.
- Value *NewIdx = 0;
- ConstantInt *Scale = 0;
- if (ArrayEltSize == 1) {
- NewIdx = GEP.getOperand(1);
- Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
- } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
- NewIdx = ConstantInt::get(CI->getType(), 1);
- Scale = CI;
- } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
- if (Inst->getOpcode() == Instruction::Shl &&
- isa<ConstantInt>(Inst->getOperand(1))) {
- ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
- uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
- Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
- 1ULL << ShAmtVal);
- NewIdx = Inst->getOperand(0);
- } else if (Inst->getOpcode() == Instruction::Mul &&
- isa<ConstantInt>(Inst->getOperand(1))) {
- Scale = cast<ConstantInt>(Inst->getOperand(1));
- NewIdx = Inst->getOperand(0);
- }
- }
-
- // If the index will be to exactly the right offset with the scale taken
- // out, perform the transformation. Note, we don't know whether Scale is
- // signed or not. We'll use unsigned version of division/modulo
- // operation after making sure Scale doesn't have the sign bit set.
- if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
- Scale->getZExtValue() % ArrayEltSize == 0) {
- Scale = ConstantInt::get(Scale->getType(),
- Scale->getZExtValue() / ArrayEltSize);
- if (Scale->getZExtValue() != 1) {
- Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
- false /*ZExt*/);
- NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
- }
-
- // Insert the new GEP instruction.
- Value *Idx[2];
- Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
- Idx[1] = NewIdx;
- Value *NewGEP = GEP.isInBounds() ?
- Builder->CreateInBoundsGEP(StrippedPtr, Idx, Idx + 2,GEP.getName()):
- Builder->CreateGEP(StrippedPtr, Idx, Idx + 2, GEP.getName());
- // The NewGEP must be pointer typed, so must the old one -> BitCast
- return new BitCastInst(NewGEP, GEP.getType());
- }
- }
- }
- }
-
- /// See if we can simplify:
- /// X = bitcast A* to B*
- /// Y = gep X, <...constant indices...>
- /// into a gep of the original struct. This is important for SROA and alias
- /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
- if (TD &&
- !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) {
- // Determine how much the GEP moves the pointer. We are guaranteed to get
- // a constant back from EmitGEPOffset.
- ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP));
- int64_t Offset = OffsetV->getSExtValue();
-
- // If this GEP instruction doesn't move the pointer, just replace the GEP
- // with a bitcast of the real input to the dest type.
- if (Offset == 0) {
- // If the bitcast is of an allocation, and the allocation will be
- // converted to match the type of the cast, don't touch this.
- if (isa<AllocaInst>(BCI->getOperand(0)) ||
- isMalloc(BCI->getOperand(0))) {
- // See if the bitcast simplifies, if so, don't nuke this GEP yet.
- if (Instruction *I = visitBitCast(*BCI)) {
- if (I != BCI) {
- I->takeName(BCI);
- BCI->getParent()->getInstList().insert(BCI, I);
- ReplaceInstUsesWith(*BCI, I);
- }
- return &GEP;
- }
- }
- return new BitCastInst(BCI->getOperand(0), GEP.getType());
- }
-
- // Otherwise, if the offset is non-zero, we need to find out if there is a
- // field at Offset in 'A's type. If so, we can pull the cast through the
- // GEP.
- SmallVector<Value*, 8> NewIndices;
- const Type *InTy =
- cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
- if (FindElementAtOffset(InTy, Offset, NewIndices)) {
- Value *NGEP = GEP.isInBounds() ?
- Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(),
- NewIndices.end()) :
- Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(),
- NewIndices.end());
-
- if (NGEP->getType() == GEP.getType())
- return ReplaceInstUsesWith(GEP, NGEP);
- NGEP->takeName(&GEP);
- return new BitCastInst(NGEP, GEP.getType());
- }
- }
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitFree(Instruction &FI) {
- Value *Op = FI.getOperand(1);
-
- // free undef -> unreachable.
- if (isa<UndefValue>(Op)) {
- // Insert a new store to null because we cannot modify the CFG here.
- new StoreInst(ConstantInt::getTrue(FI.getContext()),
- UndefValue::get(Type::getInt1PtrTy(FI.getContext())), &FI);
- return EraseInstFromFunction(FI);
- }
-
- // If we have 'free null' delete the instruction. This can happen in stl code
- // when lots of inlining happens.
- if (isa<ConstantPointerNull>(Op))
- return EraseInstFromFunction(FI);
-
- // If we have a malloc call whose only use is a free call, delete both.
- if (isMalloc(Op)) {
- if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
- if (Op->hasOneUse() && CI->hasOneUse()) {
- EraseInstFromFunction(FI);
- EraseInstFromFunction(*CI);
- return EraseInstFromFunction(*cast<Instruction>(Op));
- }
- } else {
- // Op is a call to malloc
- if (Op->hasOneUse()) {
- EraseInstFromFunction(FI);
- return EraseInstFromFunction(*cast<Instruction>(Op));
- }
- }
- }
-
- return 0;
-}
-
-
-
-Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
- // Change br (not X), label True, label False to: br X, label False, True
- Value *X = 0;
- BasicBlock *TrueDest;
- BasicBlock *FalseDest;
- if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
- !isa<Constant>(X)) {
- // Swap Destinations and condition...
- BI.setCondition(X);
- BI.setSuccessor(0, FalseDest);
- BI.setSuccessor(1, TrueDest);
- return &BI;
- }
-
- // Cannonicalize fcmp_one -> fcmp_oeq
- FCmpInst::Predicate FPred; Value *Y;
- if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
- TrueDest, FalseDest)) &&
- BI.getCondition()->hasOneUse())
- if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
- FPred == FCmpInst::FCMP_OGE) {
- FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
- Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
-
- // Swap Destinations and condition.
- BI.setSuccessor(0, FalseDest);
- BI.setSuccessor(1, TrueDest);
- Worklist.Add(Cond);
- return &BI;
- }
-
- // Cannonicalize icmp_ne -> icmp_eq
- ICmpInst::Predicate IPred;
- if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
- TrueDest, FalseDest)) &&
- BI.getCondition()->hasOneUse())
- if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
- IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
- IPred == ICmpInst::ICMP_SGE) {
- ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
- Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
- // Swap Destinations and condition.
- BI.setSuccessor(0, FalseDest);
- BI.setSuccessor(1, TrueDest);
- Worklist.Add(Cond);
- return &BI;
- }
-
- return 0;
-}
-
-Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
- Value *Cond = SI.getCondition();
- if (Instruction *I = dyn_cast<Instruction>(Cond)) {
- if (I->getOpcode() == Instruction::Add)
- if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
- // change 'switch (X+4) case 1:' into 'switch (X) case -3'
- for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
- SI.setOperand(i,
- ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
- AddRHS));
- SI.setOperand(0, I->getOperand(0));
- Worklist.Add(I);
- return &SI;
- }
- }
- return 0;
-}
-
-Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
- Value *Agg = EV.getAggregateOperand();
-
- if (!EV.hasIndices())
- return ReplaceInstUsesWith(EV, Agg);
-
- if (Constant *C = dyn_cast<Constant>(Agg)) {
- if (isa<UndefValue>(C))
- return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
-
- if (isa<ConstantAggregateZero>(C))
- return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
-
- if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
- // Extract the element indexed by the first index out of the constant
- Value *V = C->getOperand(*EV.idx_begin());
- if (EV.getNumIndices() > 1)
- // Extract the remaining indices out of the constant indexed by the
- // first index
- return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end());
- else
- return ReplaceInstUsesWith(EV, V);
- }
- return 0; // Can't handle other constants
- }
- if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
- // We're extracting from an insertvalue instruction, compare the indices
- const unsigned *exti, *exte, *insi, *inse;
- for (exti = EV.idx_begin(), insi = IV->idx_begin(),
- exte = EV.idx_end(), inse = IV->idx_end();
- exti != exte && insi != inse;
- ++exti, ++insi) {
- if (*insi != *exti)
- // The insert and extract both reference distinctly different elements.
- // This means the extract is not influenced by the insert, and we can
- // replace the aggregate operand of the extract with the aggregate
- // operand of the insert. i.e., replace
- // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
- // %E = extractvalue { i32, { i32 } } %I, 0
- // with
- // %E = extractvalue { i32, { i32 } } %A, 0
- return ExtractValueInst::Create(IV->getAggregateOperand(),
- EV.idx_begin(), EV.idx_end());
- }
- if (exti == exte && insi == inse)
- // Both iterators are at the end: Index lists are identical. Replace
- // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
- // %C = extractvalue { i32, { i32 } } %B, 1, 0
- // with "i32 42"
- return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
- if (exti == exte) {
- // The extract list is a prefix of the insert list. i.e. replace
- // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
- // %E = extractvalue { i32, { i32 } } %I, 1
- // with
- // %X = extractvalue { i32, { i32 } } %A, 1
- // %E = insertvalue { i32 } %X, i32 42, 0
- // by switching the order of the insert and extract (though the
- // insertvalue should be left in, since it may have other uses).
- Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
- EV.idx_begin(), EV.idx_end());
- return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
- insi, inse);
- }
- if (insi == inse)
- // The insert list is a prefix of the extract list
- // We can simply remove the common indices from the extract and make it
- // operate on the inserted value instead of the insertvalue result.
- // i.e., replace
- // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
- // %E = extractvalue { i32, { i32 } } %I, 1, 0
- // with
- // %E extractvalue { i32 } { i32 42 }, 0
- return ExtractValueInst::Create(IV->getInsertedValueOperand(),
- exti, exte);
- }
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
- // We're extracting from an intrinsic, see if we're the only user, which
- // allows us to simplify multiple result intrinsics to simpler things that
- // just get one value..
- if (II->hasOneUse()) {
- // Check if we're grabbing the overflow bit or the result of a 'with
- // overflow' intrinsic. If it's the latter we can remove the intrinsic
- // and replace it with a traditional binary instruction.
- switch (II->getIntrinsicID()) {
- case Intrinsic::uadd_with_overflow:
- case Intrinsic::sadd_with_overflow:
- if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
- II->replaceAllUsesWith(UndefValue::get(II->getType()));
- EraseInstFromFunction(*II);
- return BinaryOperator::CreateAdd(LHS, RHS);
- }
- break;
- case Intrinsic::usub_with_overflow:
- case Intrinsic::ssub_with_overflow:
- if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
- II->replaceAllUsesWith(UndefValue::get(II->getType()));
- EraseInstFromFunction(*II);
- return BinaryOperator::CreateSub(LHS, RHS);
- }
- break;
- case Intrinsic::umul_with_overflow:
- case Intrinsic::smul_with_overflow:
- if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
- II->replaceAllUsesWith(UndefValue::get(II->getType()));
- EraseInstFromFunction(*II);
- return BinaryOperator::CreateMul(LHS, RHS);
- }
- break;
- default:
- break;
- }
- }
- }
- // Can't simplify extracts from other values. Note that nested extracts are
- // already simplified implicitely by the above (extract ( extract (insert) )
- // will be translated into extract ( insert ( extract ) ) first and then just
- // the value inserted, if appropriate).
- return 0;
-}
-
-
-
-
-/// TryToSinkInstruction - Try to move the specified instruction from its
-/// current block into the beginning of DestBlock, which can only happen if it's
-/// safe to move the instruction past all of the instructions between it and the
-/// end of its block.
-static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
- assert(I->hasOneUse() && "Invariants didn't hold!");
-
- // Cannot move control-flow-involving, volatile loads, vaarg, etc.
- if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I))
- return false;
-
- // Do not sink alloca instructions out of the entry block.
- if (isa<AllocaInst>(I) && I->getParent() ==
- &DestBlock->getParent()->getEntryBlock())
- return false;
-
- // We can only sink load instructions if there is nothing between the load and
- // the end of block that could change the value.
- if (I->mayReadFromMemory()) {
- for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
- Scan != E; ++Scan)
- if (Scan->mayWriteToMemory())
- return false;
- }
-
- BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI();
-
- I->moveBefore(InsertPos);
- ++NumSunkInst;
- return true;
-}
-
-
-/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
-/// all reachable code to the worklist.
-///
-/// This has a couple of tricks to make the code faster and more powerful. In
-/// particular, we constant fold and DCE instructions as we go, to avoid adding
-/// them to the worklist (this significantly speeds up instcombine on code where
-/// many instructions are dead or constant). Additionally, if we find a branch
-/// whose condition is a known constant, we only visit the reachable successors.
-///
-static bool AddReachableCodeToWorklist(BasicBlock *BB,
- SmallPtrSet<BasicBlock*, 64> &Visited,
- InstCombiner &IC,
- const TargetData *TD) {
- bool MadeIRChange = false;
- SmallVector<BasicBlock*, 256> Worklist;
- Worklist.push_back(BB);
-
- std::vector<Instruction*> InstrsForInstCombineWorklist;
- InstrsForInstCombineWorklist.reserve(128);
-
- SmallPtrSet<ConstantExpr*, 64> FoldedConstants;
-
- do {
- BB = Worklist.pop_back_val();
-
- // We have now visited this block! If we've already been here, ignore it.
- if (!Visited.insert(BB)) continue;
-
- for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
- Instruction *Inst = BBI++;
-
- // DCE instruction if trivially dead.
- if (isInstructionTriviallyDead(Inst)) {
- ++NumDeadInst;
- DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
- Inst->eraseFromParent();
- continue;
- }
-
- // ConstantProp instruction if trivially constant.
- if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
- if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
- DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
- << *Inst << '\n');
- Inst->replaceAllUsesWith(C);
- ++NumConstProp;
- Inst->eraseFromParent();
- continue;
- }
-
- if (TD) {
- // See if we can constant fold its operands.
- for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
- i != e; ++i) {
- ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
- if (CE == 0) continue;
-
- // If we already folded this constant, don't try again.
- if (!FoldedConstants.insert(CE))
- continue;
-
- Constant *NewC = ConstantFoldConstantExpression(CE, TD);
- if (NewC && NewC != CE) {
- *i = NewC;
- MadeIRChange = true;
- }
- }
- }
-
- InstrsForInstCombineWorklist.push_back(Inst);
- }
-
- // Recursively visit successors. If this is a branch or switch on a
- // constant, only visit the reachable successor.
- TerminatorInst *TI = BB->getTerminator();
- if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
- if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
- bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
- BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
- Worklist.push_back(ReachableBB);
- continue;
- }
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
- if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
- // See if this is an explicit destination.
- for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
- if (SI->getCaseValue(i) == Cond) {
- BasicBlock *ReachableBB = SI->getSuccessor(i);
- Worklist.push_back(ReachableBB);
- continue;
- }
-
- // Otherwise it is the default destination.
- Worklist.push_back(SI->getSuccessor(0));
- continue;
- }
- }
-
- for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- Worklist.push_back(TI->getSuccessor(i));
- } while (!Worklist.empty());
-
- // Once we've found all of the instructions to add to instcombine's worklist,
- // add them in reverse order. This way instcombine will visit from the top
- // of the function down. This jives well with the way that it adds all uses
- // of instructions to the worklist after doing a transformation, thus avoiding
- // some N^2 behavior in pathological cases.
- IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
- InstrsForInstCombineWorklist.size());
-
- return MadeIRChange;
-}
-
-bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
- MadeIRChange = false;
-
- DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
- << F.getNameStr() << "\n");
-
- {
- // Do a depth-first traversal of the function, populate the worklist with
- // the reachable instructions. Ignore blocks that are not reachable. Keep
- // track of which blocks we visit.
- SmallPtrSet<BasicBlock*, 64> Visited;
- MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
-
- // Do a quick scan over the function. If we find any blocks that are
- // unreachable, remove any instructions inside of them. This prevents
- // the instcombine code from having to deal with some bad special cases.
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
- if (!Visited.count(BB)) {
- Instruction *Term = BB->getTerminator();
- while (Term != BB->begin()) { // Remove instrs bottom-up
- BasicBlock::iterator I = Term; --I;
-
- DEBUG(errs() << "IC: DCE: " << *I << '\n');
- // A debug intrinsic shouldn't force another iteration if we weren't
- // going to do one without it.
- if (!isa<DbgInfoIntrinsic>(I)) {
- ++NumDeadInst;
- MadeIRChange = true;
- }
-
- // If I is not void type then replaceAllUsesWith undef.
- // This allows ValueHandlers and custom metadata to adjust itself.
- if (!I->getType()->isVoidTy())
- I->replaceAllUsesWith(UndefValue::get(I->getType()));
- I->eraseFromParent();
- }
- }
- }
-
- while (!Worklist.isEmpty()) {
- Instruction *I = Worklist.RemoveOne();
- if (I == 0) continue; // skip null values.
-
- // Check to see if we can DCE the instruction.
- if (isInstructionTriviallyDead(I)) {
- DEBUG(errs() << "IC: DCE: " << *I << '\n');
- EraseInstFromFunction(*I);
- ++NumDeadInst;
- MadeIRChange = true;
- continue;
- }
-
- // Instruction isn't dead, see if we can constant propagate it.
- if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
- if (Constant *C = ConstantFoldInstruction(I, TD)) {
- DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
-
- // Add operands to the worklist.
- ReplaceInstUsesWith(*I, C);
- ++NumConstProp;
- EraseInstFromFunction(*I);
- MadeIRChange = true;
- continue;
- }
-
- // See if we can trivially sink this instruction to a successor basic block.
- if (I->hasOneUse()) {
- BasicBlock *BB = I->getParent();
- Instruction *UserInst = cast<Instruction>(I->use_back());
- BasicBlock *UserParent;
-
- // Get the block the use occurs in.
- if (PHINode *PN = dyn_cast<PHINode>(UserInst))
- UserParent = PN->getIncomingBlock(I->use_begin().getUse());
- else
- UserParent = UserInst->getParent();
-
- if (UserParent != BB) {
- bool UserIsSuccessor = false;
- // See if the user is one of our successors.
- for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
- if (*SI == UserParent) {
- UserIsSuccessor = true;
- break;
- }
-
- // If the user is one of our immediate successors, and if that successor
- // only has us as a predecessors (we'd have to split the critical edge
- // otherwise), we can keep going.
- if (UserIsSuccessor && UserParent->getSinglePredecessor())
- // Okay, the CFG is simple enough, try to sink this instruction.
- MadeIRChange |= TryToSinkInstruction(I, UserParent);
- }
- }
-
- // Now that we have an instruction, try combining it to simplify it.
- Builder->SetInsertPoint(I->getParent(), I);
-
-#ifndef NDEBUG
- std::string OrigI;
-#endif
- DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
- DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
-
- if (Instruction *Result = visit(*I)) {
- ++NumCombined;
- // Should we replace the old instruction with a new one?
- if (Result != I) {
- DEBUG(errs() << "IC: Old = " << *I << '\n'
- << " New = " << *Result << '\n');
-
- // Everything uses the new instruction now.
- I->replaceAllUsesWith(Result);
-
- // Push the new instruction and any users onto the worklist.
- Worklist.Add(Result);
- Worklist.AddUsersToWorkList(*Result);
-
- // Move the name to the new instruction first.
- Result->takeName(I);
-
- // Insert the new instruction into the basic block...
- BasicBlock *InstParent = I->getParent();
- BasicBlock::iterator InsertPos = I;
-
- if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
- while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
- ++InsertPos;
-
- InstParent->getInstList().insert(InsertPos, Result);
-
- EraseInstFromFunction(*I);
- } else {
-#ifndef NDEBUG
- DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
- << " New = " << *I << '\n');
-#endif
-
- // If the instruction was modified, it's possible that it is now dead.
- // if so, remove it.
- if (isInstructionTriviallyDead(I)) {
- EraseInstFromFunction(*I);
- } else {
- Worklist.Add(I);
- Worklist.AddUsersToWorkList(*I);
- }
- }
- MadeIRChange = true;
- }
- }
-
- Worklist.Zap();
- return MadeIRChange;
-}
-
-
-bool InstCombiner::runOnFunction(Function &F) {
- MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
- TD = getAnalysisIfAvailable<TargetData>();
-
-
- /// Builder - This is an IRBuilder that automatically inserts new
- /// instructions into the worklist when they are created.
- IRBuilder<true, TargetFolder, InstCombineIRInserter>
- TheBuilder(F.getContext(), TargetFolder(TD),
- InstCombineIRInserter(Worklist));
- Builder = &TheBuilder;
-
- bool EverMadeChange = false;
-
- // Iterate while there is work to do.
- unsigned Iteration = 0;
- while (DoOneIteration(F, Iteration++))
- EverMadeChange = true;
-
- Builder = 0;
- return EverMadeChange;
-}
-
-FunctionPass *llvm::createInstructionCombiningPass() {
- return new InstCombiner();
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/InstCombine/Makefile b/libclamav/c++/llvm/lib/Transforms/InstCombine/Makefile
deleted file mode 100644
index 0c488e7..0000000
--- a/libclamav/c++/llvm/lib/Transforms/InstCombine/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- lib/Transforms/InstCombine/Makefile -----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMInstCombine
-BUILD_ARCHIVE = 1
-
-include $(LEVEL)/Makefile.common
-
diff --git a/libclamav/c++/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp b/libclamav/c++/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp
index 9ae3786..a77d70c 100644
--- a/libclamav/c++/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Instrumentation/EdgeProfiling.cpp
@@ -34,7 +34,7 @@ namespace {
bool runOnModule(Module &M);
public:
static char ID; // Pass identification, replacement for typeid
- EdgeProfiler() : ModulePass(&ID) {}
+ EdgeProfiler() : ModulePass(ID) {}
virtual const char *getPassName() const {
return "Edge Profiler";
@@ -43,8 +43,8 @@ namespace {
}
char EdgeProfiler::ID = 0;
-static RegisterPass<EdgeProfiler>
-X("insert-edge-profiling", "Insert instrumentation for edge profiling");
+INITIALIZE_PASS(EdgeProfiler, "insert-edge-profiling",
+ "Insert instrumentation for edge profiling", false, false);
ModulePass *llvm::createEdgeProfilerPass() { return new EdgeProfiler(); }
diff --git a/libclamav/c++/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp b/libclamav/c++/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
index 5650150..8eec987 100644
--- a/libclamav/c++/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
@@ -36,7 +36,7 @@ namespace {
bool runOnModule(Module &M);
public:
static char ID; // Pass identification, replacement for typeid
- OptimalEdgeProfiler() : ModulePass(&ID) {}
+ OptimalEdgeProfiler() : ModulePass(ID) {}
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredID(ProfileEstimatorPassID);
@@ -50,9 +50,9 @@ namespace {
}
char OptimalEdgeProfiler::ID = 0;
-static RegisterPass<OptimalEdgeProfiler>
-X("insert-optimal-edge-profiling",
- "Insert optimal instrumentation for edge profiling");
+INITIALIZE_PASS(OptimalEdgeProfiler, "insert-optimal-edge-profiling",
+ "Insert optimal instrumentation for edge profiling",
+ false, false);
ModulePass *llvm::createOptimalEdgeProfilerPass() {
return new OptimalEdgeProfiler();
@@ -143,7 +143,7 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
ProfileInfo::Edge edge = ProfileInfo::getEdge(0,entry);
if (!std::binary_search(MST.begin(), MST.end(), edge)) {
printEdgeCounter(edge,entry,i);
- IncrementCounterInBlock(entry, i, Counters); NumEdgesInserted++;
+ IncrementCounterInBlock(entry, i, Counters); ++NumEdgesInserted;
Initializer[i++] = (Zero);
} else{
Initializer[i++] = (Uncounted);
@@ -166,7 +166,7 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
ProfileInfo::Edge edge = ProfileInfo::getEdge(BB,0);
if (!std::binary_search(MST.begin(), MST.end(), edge)) {
printEdgeCounter(edge,BB,i);
- IncrementCounterInBlock(BB, i, Counters); NumEdgesInserted++;
+ IncrementCounterInBlock(BB, i, Counters); ++NumEdgesInserted;
Initializer[i++] = (Zero);
} else{
Initializer[i++] = (Uncounted);
@@ -189,11 +189,11 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
if (TI->getNumSuccessors() == 1) {
// Insert counter at the start of the block
printEdgeCounter(edge,BB,i);
- IncrementCounterInBlock(BB, i, Counters); NumEdgesInserted++;
+ IncrementCounterInBlock(BB, i, Counters); ++NumEdgesInserted;
} else {
// Insert counter at the start of the block
printEdgeCounter(edge,Succ,i);
- IncrementCounterInBlock(Succ, i, Counters); NumEdgesInserted++;
+ IncrementCounterInBlock(Succ, i, Counters); ++NumEdgesInserted;
}
Initializer[i++] = (Zero);
} else {
diff --git a/libclamav/c++/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/libclamav/c++/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
index 8662a82..1a30e9b 100644
--- a/libclamav/c++/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
@@ -61,8 +61,8 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
}
Args[3] = ConstantInt::get(Type::getInt32Ty(Context), NumElements);
- Instruction *InitCall = CallInst::Create(InitFn, Args.begin(), Args.end(),
- "newargc", InsertPos);
+ CallInst *InitCall = CallInst::Create(InitFn, Args.begin(), Args.end(),
+ "newargc", InsertPos);
// If argc or argv are not available in main, just pass null values in.
Function::arg_iterator AI;
@@ -73,10 +73,10 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
if (AI->getType() != ArgVTy) {
Instruction::CastOps opcode = CastInst::getCastOpcode(AI, false, ArgVTy,
false);
- InitCall->setOperand(2,
+ InitCall->setArgOperand(1,
CastInst::Create(opcode, AI, ArgVTy, "argv.cast", InitCall));
} else {
- InitCall->setOperand(2, AI);
+ InitCall->setArgOperand(1, AI);
}
/* FALL THROUGH */
@@ -93,12 +93,12 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
}
opcode = CastInst::getCastOpcode(AI, true,
Type::getInt32Ty(Context), true);
- InitCall->setOperand(1,
+ InitCall->setArgOperand(0,
CastInst::Create(opcode, AI, Type::getInt32Ty(Context),
"argc.cast", InitCall));
} else {
AI->replaceAllUsesWith(InitCall);
- InitCall->setOperand(1, AI);
+ InitCall->setArgOperand(0, AI);
}
case 0: break;
diff --git a/libclamav/c++/llvm/lib/Transforms/Makefile b/libclamav/c++/llvm/lib/Transforms/Makefile
index ea4a115..e527be2 100644
--- a/libclamav/c++/llvm/lib/Transforms/Makefile
+++ b/libclamav/c++/llvm/lib/Transforms/Makefile
@@ -13,7 +13,7 @@ PARALLEL_DIRS = Utils Instrumentation Scalar InstCombine IPO Hello
include $(LEVEL)/Makefile.config
# No support for plugins on windows targets
-ifeq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW))
+ifeq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW Minix))
PARALLEL_DIRS := $(filter-out Hello, $(PARALLEL_DIRS))
endif
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/ABCD.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/ABCD.cpp
deleted file mode 100644
index ea8e5c3..0000000
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/ABCD.cpp
+++ /dev/null
@@ -1,1117 +0,0 @@
-//===------- ABCD.cpp - Removes redundant conditional branches ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass removes redundant branch instructions. This algorithm was
-// described by Rastislav Bodik, Rajiv Gupta and Vivek Sarkar in their paper
-// "ABCD: Eliminating Array Bounds Checks on Demand (2000)". The original
-// Algorithm was created to remove array bound checks for strongly typed
-// languages. This implementation expands the idea and removes any conditional
-// branches that can be proved redundant, not only those used in array bound
-// checks. With the SSI representation, each variable has a
-// constraint. By analyzing these constraints we can prove that a branch is
-// redundant. When a branch is proved redundant it means that
-// one direction will always be taken; thus, we can change this branch into an
-// unconditional jump.
-// It is advisable to run SimplifyCFG and Aggressive Dead Code Elimination
-// after ABCD to clean up the code.
-// This implementation was created based on the implementation of the ABCD
-// algorithm implemented for the compiler Jitrino.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "abcd"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Constants.h"
-#include "llvm/Function.h"
-#include "llvm/Instructions.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/Transforms/Utils/SSI.h"
-
-using namespace llvm;
-
-STATISTIC(NumBranchTested, "Number of conditional branches analyzed");
-STATISTIC(NumBranchRemoved, "Number of conditional branches removed");
-
-namespace {
-
-class ABCD : public FunctionPass {
- public:
- static char ID; // Pass identification, replacement for typeid.
- ABCD() : FunctionPass(&ID) {}
-
- void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<SSI>();
- }
-
- bool runOnFunction(Function &F);
-
- private:
- /// Keep track of whether we've modified the program yet.
- bool modified;
-
- enum ProveResult {
- False = 0,
- Reduced = 1,
- True = 2
- };
-
- typedef ProveResult (*meet_function)(ProveResult, ProveResult);
- static ProveResult max(ProveResult res1, ProveResult res2) {
- return (ProveResult) std::max(res1, res2);
- }
- static ProveResult min(ProveResult res1, ProveResult res2) {
- return (ProveResult) std::min(res1, res2);
- }
-
- class Bound {
- public:
- Bound(APInt v, bool upper) : value(v), upper_bound(upper) {}
- Bound(const Bound *b, int cnst)
- : value(b->value - cnst), upper_bound(b->upper_bound) {}
- Bound(const Bound *b, const APInt &cnst)
- : value(b->value - cnst), upper_bound(b->upper_bound) {}
-
- /// Test if Bound is an upper bound
- bool isUpperBound() const { return upper_bound; }
-
- /// Get the bitwidth of this bound
- int32_t getBitWidth() const { return value.getBitWidth(); }
-
- /// Creates a Bound incrementing the one received
- static Bound *createIncrement(const Bound *b) {
- return new Bound(b->isUpperBound() ? b->value+1 : b->value-1,
- b->upper_bound);
- }
-
- /// Creates a Bound decrementing the one received
- static Bound *createDecrement(const Bound *b) {
- return new Bound(b->isUpperBound() ? b->value-1 : b->value+1,
- b->upper_bound);
- }
-
- /// Test if two bounds are equal
- static bool eq(const Bound *a, const Bound *b) {
- if (!a || !b) return false;
-
- assert(a->isUpperBound() == b->isUpperBound());
- return a->value == b->value;
- }
-
- /// Test if val is less than or equal to Bound b
- static bool leq(APInt val, const Bound *b) {
- if (!b) return false;
- return b->isUpperBound() ? val.sle(b->value) : val.sge(b->value);
- }
-
- /// Test if Bound a is less then or equal to Bound
- static bool leq(const Bound *a, const Bound *b) {
- if (!a || !b) return false;
-
- assert(a->isUpperBound() == b->isUpperBound());
- return a->isUpperBound() ? a->value.sle(b->value) :
- a->value.sge(b->value);
- }
-
- /// Test if Bound a is less then Bound b
- static bool lt(const Bound *a, const Bound *b) {
- if (!a || !b) return false;
-
- assert(a->isUpperBound() == b->isUpperBound());
- return a->isUpperBound() ? a->value.slt(b->value) :
- a->value.sgt(b->value);
- }
-
- /// Test if Bound b is greater then or equal val
- static bool geq(const Bound *b, APInt val) {
- return leq(val, b);
- }
-
- /// Test if Bound a is greater then or equal Bound b
- static bool geq(const Bound *a, const Bound *b) {
- return leq(b, a);
- }
-
- private:
- APInt value;
- bool upper_bound;
- };
-
- /// This class is used to store results some parts of the graph,
- /// so information does not need to be recalculated. The maximum false,
- /// minimum true and minimum reduced results are stored
- class MemoizedResultChart {
- public:
- MemoizedResultChart()
- : max_false(NULL), min_true(NULL), min_reduced(NULL) {}
-
- /// Returns the max false
- Bound *getFalse() const { return max_false; }
-
- /// Returns the min true
- Bound *getTrue() const { return min_true; }
-
- /// Returns the min reduced
- Bound *getReduced() const { return min_reduced; }
-
- /// Return the stored result for this bound
- ProveResult getResult(const Bound *bound) const;
-
- /// Stores a false found
- void addFalse(Bound *bound);
-
- /// Stores a true found
- void addTrue(Bound *bound);
-
- /// Stores a Reduced found
- void addReduced(Bound *bound);
-
- /// Clears redundant reduced
- /// If a min_true is smaller than a min_reduced then the min_reduced
- /// is unnecessary and then removed. It also works for min_reduced
- /// begin smaller than max_false.
- void clearRedundantReduced();
-
- void clear() {
- delete max_false;
- delete min_true;
- delete min_reduced;
- }
-
- private:
- Bound *max_false, *min_true, *min_reduced;
- };
-
- /// This class stores the result found for a node of the graph,
- /// so these results do not need to be recalculated, only searched for.
- class MemoizedResult {
- public:
- /// Test if there is true result stored from b to a
- /// that is less then the bound
- bool hasTrue(Value *b, const Bound *bound) const {
- Bound *trueBound = map.lookup(b).getTrue();
- return trueBound && Bound::leq(trueBound, bound);
- }
-
- /// Test if there is false result stored from b to a
- /// that is less then the bound
- bool hasFalse(Value *b, const Bound *bound) const {
- Bound *falseBound = map.lookup(b).getFalse();
- return falseBound && Bound::leq(falseBound, bound);
- }
-
- /// Test if there is reduced result stored from b to a
- /// that is less then the bound
- bool hasReduced(Value *b, const Bound *bound) const {
- Bound *reducedBound = map.lookup(b).getReduced();
- return reducedBound && Bound::leq(reducedBound, bound);
- }
-
- /// Returns the stored bound for b
- ProveResult getBoundResult(Value *b, Bound *bound) {
- return map[b].getResult(bound);
- }
-
- /// Clears the map
- void clear() {
- DenseMapIterator<Value*, MemoizedResultChart> begin = map.begin();
- DenseMapIterator<Value*, MemoizedResultChart> end = map.end();
- for (; begin != end; ++begin) {
- begin->second.clear();
- }
- map.clear();
- }
-
- /// Stores the bound found
- void updateBound(Value *b, Bound *bound, const ProveResult res);
-
- private:
- // Maps a nod in the graph with its results found.
- DenseMap<Value*, MemoizedResultChart> map;
- };
-
- /// This class represents an edge in the inequality graph used by the
- /// ABCD algorithm. An edge connects node v to node u with a value c if
- /// we could infer a constraint v <= u + c in the source program.
- class Edge {
- public:
- Edge(Value *V, APInt val, bool upper)
- : vertex(V), value(val), upper_bound(upper) {}
-
- Value *getVertex() const { return vertex; }
- const APInt &getValue() const { return value; }
- bool isUpperBound() const { return upper_bound; }
-
- private:
- Value *vertex;
- APInt value;
- bool upper_bound;
- };
-
- /// Weighted and Directed graph to represent constraints.
- /// There is one type of constraint, a <= b + X, which will generate an
- /// edge from b to a with weight X.
- class InequalityGraph {
- public:
-
- /// Adds an edge from V_from to V_to with weight value
- void addEdge(Value *V_from, Value *V_to, APInt value, bool upper);
-
- /// Test if there is a node V
- bool hasNode(Value *V) const { return graph.count(V); }
-
- /// Test if there is any edge from V in the upper direction
- bool hasEdge(Value *V, bool upper) const;
-
- /// Returns all edges pointed by vertex V
- SmallPtrSet<Edge *, 16> getEdges(Value *V) const {
- return graph.lookup(V);
- }
-
- /// Prints the graph in dot format.
- /// Blue edges represent upper bound and Red lower bound.
- void printGraph(raw_ostream &OS, Function &F) const {
- printHeader(OS, F);
- printBody(OS);
- printFooter(OS);
- }
-
- /// Clear the graph
- void clear() {
- graph.clear();
- }
-
- private:
- DenseMap<Value *, SmallPtrSet<Edge *, 16> > graph;
-
- /// Adds a Node to the graph.
- DenseMap<Value *, SmallPtrSet<Edge *, 16> >::iterator addNode(Value *V) {
- SmallPtrSet<Edge *, 16> p;
- return graph.insert(std::make_pair(V, p)).first;
- }
-
- /// Prints the header of the dot file
- void printHeader(raw_ostream &OS, Function &F) const;
-
- /// Prints the footer of the dot file
- void printFooter(raw_ostream &OS) const {
- OS << "}\n";
- }
-
- /// Prints the body of the dot file
- void printBody(raw_ostream &OS) const;
-
- /// Prints vertex source to the dot file
- void printVertex(raw_ostream &OS, Value *source) const;
-
- /// Prints the edge to the dot file
- void printEdge(raw_ostream &OS, Value *source, Edge *edge) const;
-
- void printName(raw_ostream &OS, Value *info) const;
- };
-
- /// Iterates through all BasicBlocks, if the Terminator Instruction
- /// uses an Comparator Instruction, all operands of this comparator
- /// are sent to be transformed to SSI. Only Instruction operands are
- /// transformed.
- void createSSI(Function &F);
-
- /// Creates the graphs for this function.
- /// It will look for all comparators used in branches, and create them.
- /// These comparators will create constraints for any instruction as an
- /// operand.
- void executeABCD(Function &F);
-
- /// Seeks redundancies in the comparator instruction CI.
- /// If the ABCD algorithm can prove that the comparator CI always
- /// takes one way, then the Terminator Instruction TI is substituted from
- /// a conditional branch to a unconditional one.
- /// This code basically receives a comparator, and verifies which kind of
- /// instruction it is. Depending on the kind of instruction, we use different
- /// strategies to prove its redundancy.
- void seekRedundancy(ICmpInst *ICI, TerminatorInst *TI);
-
- /// Substitutes Terminator Instruction TI, that is a conditional branch,
- /// with one unconditional branch. Succ_edge determines if the new
- /// unconditional edge will be the first or second edge of the former TI
- /// instruction.
- void removeRedundancy(TerminatorInst *TI, bool Succ_edge);
-
- /// When an conditional branch is removed, the BasicBlock that is no longer
- /// reachable will have problems in phi functions. This method fixes these
- /// phis removing the former BasicBlock from the list of incoming BasicBlocks
- /// of all phis. In case the phi remains with no predecessor it will be
- /// marked to be removed later.
- void fixPhi(BasicBlock *BB, BasicBlock *Succ);
-
- /// Removes phis that have no predecessor
- void removePhis();
-
- /// Creates constraints for Instructions.
- /// If the constraint for this instruction has already been created
- /// nothing is done.
- void createConstraintInstruction(Instruction *I);
-
- /// Creates constraints for Binary Operators.
- /// It will create constraints only for addition and subtraction,
- /// the other binary operations are not treated by ABCD.
- /// For additions in the form a = b + X and a = X + b, where X is a constant,
- /// the constraint a <= b + X can be obtained. For this constraint, an edge
- /// a->b with weight X is added to the lower bound graph, and an edge
- /// b->a with weight -X is added to the upper bound graph.
- /// Only subtractions in the format a = b - X is used by ABCD.
- /// Edges are created using the same semantic as addition.
- void createConstraintBinaryOperator(BinaryOperator *BO);
-
- /// Creates constraints for Comparator Instructions.
- /// Only comparators that have any of the following operators
- /// are used to create constraints: >=, >, <=, <. And only if
- /// at least one operand is an Instruction. In a Comparator Instruction
- /// a op b, there will be 4 sigma functions a_t, a_f, b_t and b_f. Where
- /// t and f represent sigma for operands in true and false branches. The
- /// following constraints can be obtained. a_t <= a, a_f <= a, b_t <= b and
- /// b_f <= b. There are two more constraints that depend on the operator.
- /// For the operator <= : a_t <= b_t and b_f <= a_f-1
- /// For the operator < : a_t <= b_t-1 and b_f <= a_f
- /// For the operator >= : b_t <= a_t and a_f <= b_f-1
- /// For the operator > : b_t <= a_t-1 and a_f <= b_f
- void createConstraintCmpInst(ICmpInst *ICI, TerminatorInst *TI);
-
- /// Creates constraints for PHI nodes.
- /// In a PHI node a = phi(b,c) we can create the constraint
- /// a<= max(b,c). With this constraint there will be the edges,
- /// b->a and c->a with weight 0 in the lower bound graph, and the edges
- /// a->b and a->c with weight 0 in the upper bound graph.
- void createConstraintPHINode(PHINode *PN);
-
- /// Given a binary operator, we are only interest in the case
- /// that one operand is an Instruction and the other is a ConstantInt. In
- /// this case the method returns true, otherwise false. It also obtains the
- /// Instruction and ConstantInt from the BinaryOperator and returns it.
- bool createBinaryOperatorInfo(BinaryOperator *BO, Instruction **I1,
- Instruction **I2, ConstantInt **C1,
- ConstantInt **C2);
-
- /// This method creates a constraint between a Sigma and an Instruction.
- /// These constraints are created as soon as we find a comparator that uses a
- /// SSI variable.
- void createConstraintSigInst(Instruction *I_op, BasicBlock *BB_succ_t,
- BasicBlock *BB_succ_f, PHINode **SIG_op_t,
- PHINode **SIG_op_f);
-
- /// If PN_op1 and PN_o2 are different from NULL, create a constraint
- /// PN_op2 -> PN_op1 with value. In case any of them is NULL, replace
- /// with the respective V_op#, if V_op# is a ConstantInt.
- void createConstraintSigSig(PHINode *SIG_op1, PHINode *SIG_op2,
- ConstantInt *V_op1, ConstantInt *V_op2,
- APInt value);
-
- /// Returns the sigma representing the Instruction I in BasicBlock BB.
- /// Returns NULL in case there is no sigma for this Instruction in this
- /// Basic Block. This methods assume that sigmas are the first instructions
- /// in a block, and that there can be only two sigmas in a block. So it will
- /// only look on the first two instructions of BasicBlock BB.
- PHINode *findSigma(BasicBlock *BB, Instruction *I);
-
- /// Original ABCD algorithm to prove redundant checks.
- /// This implementation works on any kind of inequality branch.
- bool demandProve(Value *a, Value *b, int c, bool upper_bound);
-
- /// Prove that distance between b and a is <= bound
- ProveResult prove(Value *a, Value *b, Bound *bound, unsigned level);
-
- /// Updates the distance value for a and b
- void updateMemDistance(Value *a, Value *b, Bound *bound, unsigned level,
- meet_function meet);
-
- InequalityGraph inequality_graph;
- MemoizedResult mem_result;
- DenseMap<Value*, Bound*> active;
- SmallPtrSet<Value*, 16> created;
- SmallVector<PHINode *, 16> phis_to_remove;
-};
-
-} // end anonymous namespace.
-
-char ABCD::ID = 0;
-static RegisterPass<ABCD> X("abcd", "ABCD: Eliminating Array Bounds Checks on Demand");
-
-
-bool ABCD::runOnFunction(Function &F) {
- modified = false;
- createSSI(F);
- executeABCD(F);
- DEBUG(inequality_graph.printGraph(dbgs(), F));
- removePhis();
-
- inequality_graph.clear();
- mem_result.clear();
- active.clear();
- created.clear();
- phis_to_remove.clear();
- return modified;
-}
-
-/// Iterates through all BasicBlocks, if the Terminator Instruction
-/// uses an Comparator Instruction, all operands of this comparator
-/// are sent to be transformed to SSI. Only Instruction operands are
-/// transformed.
-void ABCD::createSSI(Function &F) {
- SSI *ssi = &getAnalysis<SSI>();
-
- SmallVector<Instruction *, 16> Insts;
-
- for (Function::iterator begin = F.begin(), end = F.end();
- begin != end; ++begin) {
- BasicBlock *BB = begin;
- TerminatorInst *TI = BB->getTerminator();
- if (TI->getNumOperands() == 0)
- continue;
-
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(TI->getOperand(0))) {
- if (Instruction *I = dyn_cast<Instruction>(ICI->getOperand(0))) {
- modified = true; // XXX: but yet createSSI might do nothing
- Insts.push_back(I);
- }
- if (Instruction *I = dyn_cast<Instruction>(ICI->getOperand(1))) {
- modified = true;
- Insts.push_back(I);
- }
- }
- }
- ssi->createSSI(Insts);
-}
-
-/// Creates the graphs for this function.
-/// It will look for all comparators used in branches, and create them.
-/// These comparators will create constraints for any instruction as an
-/// operand.
-void ABCD::executeABCD(Function &F) {
- for (Function::iterator begin = F.begin(), end = F.end();
- begin != end; ++begin) {
- BasicBlock *BB = begin;
- TerminatorInst *TI = BB->getTerminator();
- if (TI->getNumOperands() == 0)
- continue;
-
- ICmpInst *ICI = dyn_cast<ICmpInst>(TI->getOperand(0));
- if (!ICI || !ICI->getOperand(0)->getType()->isIntegerTy())
- continue;
-
- createConstraintCmpInst(ICI, TI);
- seekRedundancy(ICI, TI);
- }
-}
-
-/// Seeks redundancies in the comparator instruction CI.
-/// If the ABCD algorithm can prove that the comparator CI always
-/// takes one way, then the Terminator Instruction TI is substituted from
-/// a conditional branch to a unconditional one.
-/// This code basically receives a comparator, and verifies which kind of
-/// instruction it is. Depending on the kind of instruction, we use different
-/// strategies to prove its redundancy.
-void ABCD::seekRedundancy(ICmpInst *ICI, TerminatorInst *TI) {
- CmpInst::Predicate Pred = ICI->getPredicate();
-
- Value *source, *dest;
- int distance1, distance2;
- bool upper;
-
- switch(Pred) {
- case CmpInst::ICMP_SGT: // signed greater than
- upper = false;
- distance1 = 1;
- distance2 = 0;
- break;
-
- case CmpInst::ICMP_SGE: // signed greater or equal
- upper = false;
- distance1 = 0;
- distance2 = -1;
- break;
-
- case CmpInst::ICMP_SLT: // signed less than
- upper = true;
- distance1 = -1;
- distance2 = 0;
- break;
-
- case CmpInst::ICMP_SLE: // signed less or equal
- upper = true;
- distance1 = 0;
- distance2 = 1;
- break;
-
- default:
- return;
- }
-
- ++NumBranchTested;
- source = ICI->getOperand(0);
- dest = ICI->getOperand(1);
- if (demandProve(dest, source, distance1, upper)) {
- removeRedundancy(TI, true);
- } else if (demandProve(dest, source, distance2, !upper)) {
- removeRedundancy(TI, false);
- }
-}
-
-/// Substitutes Terminator Instruction TI, that is a conditional branch,
-/// with one unconditional branch. Succ_edge determines if the new
-/// unconditional edge will be the first or second edge of the former TI
-/// instruction.
-void ABCD::removeRedundancy(TerminatorInst *TI, bool Succ_edge) {
- BasicBlock *Succ;
- if (Succ_edge) {
- Succ = TI->getSuccessor(0);
- fixPhi(TI->getParent(), TI->getSuccessor(1));
- } else {
- Succ = TI->getSuccessor(1);
- fixPhi(TI->getParent(), TI->getSuccessor(0));
- }
-
- BranchInst::Create(Succ, TI);
- TI->eraseFromParent(); // XXX: invoke
- ++NumBranchRemoved;
- modified = true;
-}
-
-/// When an conditional branch is removed, the BasicBlock that is no longer
-/// reachable will have problems in phi functions. This method fixes these
-/// phis removing the former BasicBlock from the list of incoming BasicBlocks
-/// of all phis. In case the phi remains with no predecessor it will be
-/// marked to be removed later.
-void ABCD::fixPhi(BasicBlock *BB, BasicBlock *Succ) {
- BasicBlock::iterator begin = Succ->begin();
- while (PHINode *PN = dyn_cast<PHINode>(begin++)) {
- PN->removeIncomingValue(BB, false);
- if (PN->getNumIncomingValues() == 0)
- phis_to_remove.push_back(PN);
- }
-}
-
-/// Removes phis that have no predecessor
-void ABCD::removePhis() {
- for (unsigned i = 0, e = phis_to_remove.size(); i != e; ++i) {
- PHINode *PN = phis_to_remove[i];
- PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
- PN->eraseFromParent();
- }
-}
-
-/// Creates constraints for Instructions.
-/// If the constraint for this instruction has already been created
-/// nothing is done.
-void ABCD::createConstraintInstruction(Instruction *I) {
- // Test if this instruction has not been created before
- if (created.insert(I)) {
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
- createConstraintBinaryOperator(BO);
- } else if (PHINode *PN = dyn_cast<PHINode>(I)) {
- createConstraintPHINode(PN);
- }
- }
-}
-
-/// Creates constraints for Binary Operators.
-/// It will create constraints only for addition and subtraction,
-/// the other binary operations are not treated by ABCD.
-/// For additions in the form a = b + X and a = X + b, where X is a constant,
-/// the constraint a <= b + X can be obtained. For this constraint, an edge
-/// a->b with weight X is added to the lower bound graph, and an edge
-/// b->a with weight -X is added to the upper bound graph.
-/// Only subtractions in the format a = b - X is used by ABCD.
-/// Edges are created using the same semantic as addition.
-void ABCD::createConstraintBinaryOperator(BinaryOperator *BO) {
- Instruction *I1 = NULL, *I2 = NULL;
- ConstantInt *CI1 = NULL, *CI2 = NULL;
-
- // Test if an operand is an Instruction and the other is a Constant
- if (!createBinaryOperatorInfo(BO, &I1, &I2, &CI1, &CI2))
- return;
-
- Instruction *I = 0;
- APInt value;
-
- switch (BO->getOpcode()) {
- case Instruction::Add:
- if (I1) {
- I = I1;
- value = CI2->getValue();
- } else if (I2) {
- I = I2;
- value = CI1->getValue();
- }
- break;
-
- case Instruction::Sub:
- // Instructions like a = X-b, where X is a constant are not represented
- // in the graph.
- if (!I1)
- return;
-
- I = I1;
- value = -CI2->getValue();
- break;
-
- default:
- return;
- }
-
- inequality_graph.addEdge(I, BO, value, true);
- inequality_graph.addEdge(BO, I, -value, false);
- createConstraintInstruction(I);
-}
-
-/// Given a binary operator, we are only interest in the case
-/// that one operand is an Instruction and the other is a ConstantInt. In
-/// this case the method returns true, otherwise false. It also obtains the
-/// Instruction and ConstantInt from the BinaryOperator and returns it.
-bool ABCD::createBinaryOperatorInfo(BinaryOperator *BO, Instruction **I1,
- Instruction **I2, ConstantInt **C1,
- ConstantInt **C2) {
- Value *op1 = BO->getOperand(0);
- Value *op2 = BO->getOperand(1);
-
- if ((*I1 = dyn_cast<Instruction>(op1))) {
- if ((*C2 = dyn_cast<ConstantInt>(op2)))
- return true; // First is Instruction and second ConstantInt
-
- return false; // Both are Instruction
- } else {
- if ((*C1 = dyn_cast<ConstantInt>(op1)) &&
- (*I2 = dyn_cast<Instruction>(op2)))
- return true; // First is ConstantInt and second Instruction
-
- return false; // Both are not Instruction
- }
-}
-
-/// Creates constraints for Comparator Instructions.
-/// Only comparators that have any of the following operators
-/// are used to create constraints: >=, >, <=, <. And only if
-/// at least one operand is an Instruction. In a Comparator Instruction
-/// a op b, there will be 4 sigma functions a_t, a_f, b_t and b_f. Where
-/// t and f represent sigma for operands in true and false branches. The
-/// following constraints can be obtained. a_t <= a, a_f <= a, b_t <= b and
-/// b_f <= b. There are two more constraints that depend on the operator.
-/// For the operator <= : a_t <= b_t and b_f <= a_f-1
-/// For the operator < : a_t <= b_t-1 and b_f <= a_f
-/// For the operator >= : b_t <= a_t and a_f <= b_f-1
-/// For the operator > : b_t <= a_t-1 and a_f <= b_f
-void ABCD::createConstraintCmpInst(ICmpInst *ICI, TerminatorInst *TI) {
- Value *V_op1 = ICI->getOperand(0);
- Value *V_op2 = ICI->getOperand(1);
-
- if (!V_op1->getType()->isIntegerTy())
- return;
-
- Instruction *I_op1 = dyn_cast<Instruction>(V_op1);
- Instruction *I_op2 = dyn_cast<Instruction>(V_op2);
-
- // Test if at least one operand is an Instruction
- if (!I_op1 && !I_op2)
- return;
-
- BasicBlock *BB_succ_t = TI->getSuccessor(0);
- BasicBlock *BB_succ_f = TI->getSuccessor(1);
-
- PHINode *SIG_op1_t = NULL, *SIG_op1_f = NULL,
- *SIG_op2_t = NULL, *SIG_op2_f = NULL;
-
- createConstraintSigInst(I_op1, BB_succ_t, BB_succ_f, &SIG_op1_t, &SIG_op1_f);
- createConstraintSigInst(I_op2, BB_succ_t, BB_succ_f, &SIG_op2_t, &SIG_op2_f);
-
- int32_t width = cast<IntegerType>(V_op1->getType())->getBitWidth();
- APInt MinusOne = APInt::getAllOnesValue(width);
- APInt Zero = APInt::getNullValue(width);
-
- CmpInst::Predicate Pred = ICI->getPredicate();
- ConstantInt *CI1 = dyn_cast<ConstantInt>(V_op1);
- ConstantInt *CI2 = dyn_cast<ConstantInt>(V_op2);
- switch (Pred) {
- case CmpInst::ICMP_SGT: // signed greater than
- createConstraintSigSig(SIG_op2_t, SIG_op1_t, CI2, CI1, MinusOne);
- createConstraintSigSig(SIG_op1_f, SIG_op2_f, CI1, CI2, Zero);
- break;
-
- case CmpInst::ICMP_SGE: // signed greater or equal
- createConstraintSigSig(SIG_op2_t, SIG_op1_t, CI2, CI1, Zero);
- createConstraintSigSig(SIG_op1_f, SIG_op2_f, CI1, CI2, MinusOne);
- break;
-
- case CmpInst::ICMP_SLT: // signed less than
- createConstraintSigSig(SIG_op1_t, SIG_op2_t, CI1, CI2, MinusOne);
- createConstraintSigSig(SIG_op2_f, SIG_op1_f, CI2, CI1, Zero);
- break;
-
- case CmpInst::ICMP_SLE: // signed less or equal
- createConstraintSigSig(SIG_op1_t, SIG_op2_t, CI1, CI2, Zero);
- createConstraintSigSig(SIG_op2_f, SIG_op1_f, CI2, CI1, MinusOne);
- break;
-
- default:
- break;
- }
-
- if (I_op1)
- createConstraintInstruction(I_op1);
- if (I_op2)
- createConstraintInstruction(I_op2);
-}
-
-/// Creates constraints for PHI nodes.
-/// In a PHI node a = phi(b,c) we can create the constraint
-/// a<= max(b,c). With this constraint there will be the edges,
-/// b->a and c->a with weight 0 in the lower bound graph, and the edges
-/// a->b and a->c with weight 0 in the upper bound graph.
-void ABCD::createConstraintPHINode(PHINode *PN) {
- // FIXME: We really want to disallow sigma nodes, but I don't know the best
- // way to detect the other than this.
- if (PN->getNumOperands() == 2) return;
-
- int32_t width = cast<IntegerType>(PN->getType())->getBitWidth();
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
- Value *V = PN->getIncomingValue(i);
- if (Instruction *I = dyn_cast<Instruction>(V)) {
- createConstraintInstruction(I);
- }
- inequality_graph.addEdge(V, PN, APInt(width, 0), true);
- inequality_graph.addEdge(V, PN, APInt(width, 0), false);
- }
-}
-
-/// This method creates a constraint between a Sigma and an Instruction.
-/// These constraints are created as soon as we find a comparator that uses a
-/// SSI variable.
-void ABCD::createConstraintSigInst(Instruction *I_op, BasicBlock *BB_succ_t,
- BasicBlock *BB_succ_f, PHINode **SIG_op_t,
- PHINode **SIG_op_f) {
- *SIG_op_t = findSigma(BB_succ_t, I_op);
- *SIG_op_f = findSigma(BB_succ_f, I_op);
-
- if (*SIG_op_t) {
- int32_t width = cast<IntegerType>((*SIG_op_t)->getType())->getBitWidth();
- inequality_graph.addEdge(I_op, *SIG_op_t, APInt(width, 0), true);
- inequality_graph.addEdge(*SIG_op_t, I_op, APInt(width, 0), false);
- }
- if (*SIG_op_f) {
- int32_t width = cast<IntegerType>((*SIG_op_f)->getType())->getBitWidth();
- inequality_graph.addEdge(I_op, *SIG_op_f, APInt(width, 0), true);
- inequality_graph.addEdge(*SIG_op_f, I_op, APInt(width, 0), false);
- }
-}
-
-/// If PN_op1 and PN_o2 are different from NULL, create a constraint
-/// PN_op2 -> PN_op1 with value. In case any of them is NULL, replace
-/// with the respective V_op#, if V_op# is a ConstantInt.
-void ABCD::createConstraintSigSig(PHINode *SIG_op1, PHINode *SIG_op2,
- ConstantInt *V_op1, ConstantInt *V_op2,
- APInt value) {
- if (SIG_op1 && SIG_op2) {
- inequality_graph.addEdge(SIG_op2, SIG_op1, value, true);
- inequality_graph.addEdge(SIG_op1, SIG_op2, -value, false);
- } else if (SIG_op1 && V_op2) {
- inequality_graph.addEdge(V_op2, SIG_op1, value, true);
- inequality_graph.addEdge(SIG_op1, V_op2, -value, false);
- } else if (SIG_op2 && V_op1) {
- inequality_graph.addEdge(SIG_op2, V_op1, value, true);
- inequality_graph.addEdge(V_op1, SIG_op2, -value, false);
- }
-}
-
-/// Returns the sigma representing the Instruction I in BasicBlock BB.
-/// Returns NULL in case there is no sigma for this Instruction in this
-/// Basic Block. This methods assume that sigmas are the first instructions
-/// in a block, and that there can be only two sigmas in a block. So it will
-/// only look on the first two instructions of BasicBlock BB.
-PHINode *ABCD::findSigma(BasicBlock *BB, Instruction *I) {
- // BB has more than one predecessor, BB cannot have sigmas.
- if (I == NULL || BB->getSinglePredecessor() == NULL)
- return NULL;
-
- BasicBlock::iterator begin = BB->begin();
- BasicBlock::iterator end = BB->end();
-
- for (unsigned i = 0; i < 2 && begin != end; ++i, ++begin) {
- Instruction *I_succ = begin;
- if (PHINode *PN = dyn_cast<PHINode>(I_succ))
- if (PN->getIncomingValue(0) == I)
- return PN;
- }
-
- return NULL;
-}
-
-/// Original ABCD algorithm to prove redundant checks.
-/// This implementation works on any kind of inequality branch.
-bool ABCD::demandProve(Value *a, Value *b, int c, bool upper_bound) {
- int32_t width = cast<IntegerType>(a->getType())->getBitWidth();
- Bound *bound = new Bound(APInt(width, c), upper_bound);
-
- mem_result.clear();
- active.clear();
-
- ProveResult res = prove(a, b, bound, 0);
- return res != False;
-}
-
-/// Prove that distance between b and a is <= bound
-ABCD::ProveResult ABCD::prove(Value *a, Value *b, Bound *bound,
- unsigned level) {
- // if (C[b-a<=e] == True for some e <= bound
- // Same or stronger difference was already proven
- if (mem_result.hasTrue(b, bound))
- return True;
-
- // if (C[b-a<=e] == False for some e >= bound
- // Same or weaker difference was already disproved
- if (mem_result.hasFalse(b, bound))
- return False;
-
- // if (C[b-a<=e] == Reduced for some e <= bound
- // b is on a cycle that was reduced for same or stronger difference
- if (mem_result.hasReduced(b, bound))
- return Reduced;
-
- // traversal reached the source vertex
- if (a == b && Bound::geq(bound, APInt(bound->getBitWidth(), 0, true)))
- return True;
-
- // if b has no predecessor then fail
- if (!inequality_graph.hasEdge(b, bound->isUpperBound()))
- return False;
-
- // a cycle was encountered
- if (active.count(b)) {
- if (Bound::leq(active.lookup(b), bound))
- return Reduced; // a "harmless" cycle
-
- return False; // an amplifying cycle
- }
-
- active[b] = bound;
- PHINode *PN = dyn_cast<PHINode>(b);
-
- // Test if a Value is a Phi. If it is a PHINode with more than 1 incoming
- // value, then it is a phi, if it has 1 incoming value it is a sigma.
- if (PN && PN->getNumIncomingValues() > 1)
- updateMemDistance(a, b, bound, level, min);
- else
- updateMemDistance(a, b, bound, level, max);
-
- active.erase(b);
-
- ABCD::ProveResult res = mem_result.getBoundResult(b, bound);
- return res;
-}
-
-/// Updates the distance value for a and b
-void ABCD::updateMemDistance(Value *a, Value *b, Bound *bound, unsigned level,
- meet_function meet) {
- ABCD::ProveResult res = (meet == max) ? False : True;
-
- SmallPtrSet<Edge *, 16> Edges = inequality_graph.getEdges(b);
- SmallPtrSet<Edge *, 16>::iterator begin = Edges.begin(), end = Edges.end();
-
- for (; begin != end ; ++begin) {
- if (((res >= Reduced) && (meet == max)) ||
- ((res == False) && (meet == min))) {
- break;
- }
- Edge *in = *begin;
- if (in->isUpperBound() == bound->isUpperBound()) {
- Value *succ = in->getVertex();
- res = meet(res, prove(a, succ, new Bound(bound, in->getValue()),
- level+1));
- }
- }
-
- mem_result.updateBound(b, bound, res);
-}
-
-/// Return the stored result for this bound
-ABCD::ProveResult ABCD::MemoizedResultChart::getResult(const Bound *bound)const{
- if (max_false && Bound::leq(bound, max_false))
- return False;
- if (min_true && Bound::leq(min_true, bound))
- return True;
- if (min_reduced && Bound::leq(min_reduced, bound))
- return Reduced;
- return False;
-}
-
-/// Stores a false found
-void ABCD::MemoizedResultChart::addFalse(Bound *bound) {
- if (!max_false || Bound::leq(max_false, bound))
- max_false = bound;
-
- if (Bound::eq(max_false, min_reduced))
- min_reduced = Bound::createIncrement(min_reduced);
- if (Bound::eq(max_false, min_true))
- min_true = Bound::createIncrement(min_true);
- if (Bound::eq(min_reduced, min_true))
- min_reduced = NULL;
- clearRedundantReduced();
-}
-
-/// Stores a true found
-void ABCD::MemoizedResultChart::addTrue(Bound *bound) {
- if (!min_true || Bound::leq(bound, min_true))
- min_true = bound;
-
- if (Bound::eq(min_true, min_reduced))
- min_reduced = Bound::createDecrement(min_reduced);
- if (Bound::eq(min_true, max_false))
- max_false = Bound::createDecrement(max_false);
- if (Bound::eq(max_false, min_reduced))
- min_reduced = NULL;
- clearRedundantReduced();
-}
-
-/// Stores a Reduced found
-void ABCD::MemoizedResultChart::addReduced(Bound *bound) {
- if (!min_reduced || Bound::leq(bound, min_reduced))
- min_reduced = bound;
-
- if (Bound::eq(min_reduced, min_true))
- min_true = Bound::createIncrement(min_true);
- if (Bound::eq(min_reduced, max_false))
- max_false = Bound::createDecrement(max_false);
-}
-
-/// Clears redundant reduced
-/// If a min_true is smaller than a min_reduced then the min_reduced
-/// is unnecessary and then removed. It also works for min_reduced
-/// begin smaller than max_false.
-void ABCD::MemoizedResultChart::clearRedundantReduced() {
- if (min_true && min_reduced && Bound::lt(min_true, min_reduced))
- min_reduced = NULL;
- if (max_false && min_reduced && Bound::lt(min_reduced, max_false))
- min_reduced = NULL;
-}
-
-/// Stores the bound found
-void ABCD::MemoizedResult::updateBound(Value *b, Bound *bound,
- const ProveResult res) {
- if (res == False) {
- map[b].addFalse(bound);
- } else if (res == True) {
- map[b].addTrue(bound);
- } else {
- map[b].addReduced(bound);
- }
-}
-
-/// Adds an edge from V_from to V_to with weight value
-void ABCD::InequalityGraph::addEdge(Value *V_to, Value *V_from,
- APInt value, bool upper) {
- assert(V_from->getType() == V_to->getType());
- assert(cast<IntegerType>(V_from->getType())->getBitWidth() ==
- value.getBitWidth());
-
- DenseMap<Value *, SmallPtrSet<Edge *, 16> >::iterator from;
- from = addNode(V_from);
- from->second.insert(new Edge(V_to, value, upper));
-}
-
-/// Test if there is any edge from V in the upper direction
-bool ABCD::InequalityGraph::hasEdge(Value *V, bool upper) const {
- SmallPtrSet<Edge *, 16> it = graph.lookup(V);
-
- SmallPtrSet<Edge *, 16>::iterator begin = it.begin();
- SmallPtrSet<Edge *, 16>::iterator end = it.end();
- for (; begin != end; ++begin) {
- if ((*begin)->isUpperBound() == upper) {
- return true;
- }
- }
- return false;
-}
-
-/// Prints the header of the dot file
-void ABCD::InequalityGraph::printHeader(raw_ostream &OS, Function &F) const {
- OS << "digraph dotgraph {\n";
- OS << "label=\"Inequality Graph for \'";
- OS << F.getNameStr() << "\' function\";\n";
- OS << "node [shape=record,fontname=\"Times-Roman\",fontsize=14];\n";
-}
-
-/// Prints the body of the dot file
-void ABCD::InequalityGraph::printBody(raw_ostream &OS) const {
- DenseMap<Value *, SmallPtrSet<Edge *, 16> >::const_iterator begin =
- graph.begin(), end = graph.end();
-
- for (; begin != end ; ++begin) {
- SmallPtrSet<Edge *, 16>::iterator begin_par =
- begin->second.begin(), end_par = begin->second.end();
- Value *source = begin->first;
-
- printVertex(OS, source);
-
- for (; begin_par != end_par ; ++begin_par) {
- Edge *edge = *begin_par;
- printEdge(OS, source, edge);
- }
- }
-}
-
-/// Prints vertex source to the dot file
-///
-void ABCD::InequalityGraph::printVertex(raw_ostream &OS, Value *source) const {
- OS << "\"";
- printName(OS, source);
- OS << "\"";
- OS << " [label=\"{";
- printName(OS, source);
- OS << "}\"];\n";
-}
-
-/// Prints the edge to the dot file
-void ABCD::InequalityGraph::printEdge(raw_ostream &OS, Value *source,
- Edge *edge) const {
- Value *dest = edge->getVertex();
- APInt value = edge->getValue();
- bool upper = edge->isUpperBound();
-
- OS << "\"";
- printName(OS, source);
- OS << "\"";
- OS << " -> ";
- OS << "\"";
- printName(OS, dest);
- OS << "\"";
- OS << " [label=\"" << value << "\"";
- if (upper) {
- OS << "color=\"blue\"";
- } else {
- OS << "color=\"red\"";
- }
- OS << "];\n";
-}
-
-void ABCD::InequalityGraph::printName(raw_ostream &OS, Value *info) const {
- if (ConstantInt *CI = dyn_cast<ConstantInt>(info)) {
- OS << *CI;
- } else {
- if (!info->hasName()) {
- info->setName("V");
- }
- OS << info->getNameStr();
- }
-}
-
-/// createABCDPass - The public interface to this file...
-FunctionPass *llvm::createABCDPass() {
- return new ABCD();
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/ADCE.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/ADCE.cpp
index 5a49841..ada086e 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/ADCE.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/ADCE.cpp
@@ -33,7 +33,7 @@ STATISTIC(NumRemoved, "Number of instructions removed");
namespace {
struct ADCE : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- ADCE() : FunctionPass(&ID) {}
+ ADCE() : FunctionPass(ID) {}
virtual bool runOnFunction(Function& F);
@@ -45,7 +45,7 @@ namespace {
}
char ADCE::ID = 0;
-static RegisterPass<ADCE> X("adce", "Aggressive Dead Code Elimination");
+INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false);
bool ADCE::runOnFunction(Function& F) {
SmallPtrSet<Instruction*, 128> alive;
@@ -83,7 +83,7 @@ bool ADCE::runOnFunction(Function& F) {
for (SmallVector<Instruction*, 1024>::iterator I = worklist.begin(),
E = worklist.end(); I != E; ++I) {
- NumRemoved++;
+ ++NumRemoved;
(*I)->eraseFromParent();
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/BasicBlockPlacement.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/BasicBlockPlacement.cpp
index 54533f5..b144678 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/BasicBlockPlacement.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/BasicBlockPlacement.cpp
@@ -41,7 +41,7 @@ STATISTIC(NumMoved, "Number of basic blocks moved");
namespace {
struct BlockPlacement : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- BlockPlacement() : FunctionPass(&ID) {}
+ BlockPlacement() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F);
@@ -74,8 +74,8 @@ namespace {
}
char BlockPlacement::ID = 0;
-static RegisterPass<BlockPlacement>
-X("block-placement", "Profile Guided Basic Block Placement");
+INITIALIZE_PASS(BlockPlacement, "block-placement",
+ "Profile Guided Basic Block Placement", false, false);
FunctionPass *llvm::createBlockPlacementPass() { return new BlockPlacement(); }
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/CMakeLists.txt b/libclamav/c++/llvm/lib/Transforms/Scalar/CMakeLists.txt
index 683c1c2..b7598ea 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/CMakeLists.txt
@@ -1,9 +1,9 @@
add_llvm_library(LLVMScalarOpts
- ABCD.cpp
ADCE.cpp
BasicBlockPlacement.cpp
CodeGenPrepare.cpp
ConstantProp.cpp
+ CorrelatedValuePropagation.cpp
DCE.cpp
DeadStoreElimination.cpp
GEPSplitter.cpp
@@ -17,16 +17,17 @@ add_llvm_library(LLVMScalarOpts
LoopStrengthReduce.cpp
LoopUnrollPass.cpp
LoopUnswitch.cpp
+ LowerAtomic.cpp
MemCpyOptimizer.cpp
Reassociate.cpp
Reg2Mem.cpp
SCCP.cpp
- SCCVN.cpp
Scalar.cpp
ScalarReplAggregates.cpp
SimplifyCFGPass.cpp
SimplifyHalfPowrLibCalls.cpp
SimplifyLibCalls.cpp
+ Sink.cpp
TailDuplication.cpp
TailRecursionElimination.cpp
)
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 7ceda1f..e07b761 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -28,17 +28,25 @@
#include "llvm/Transforms/Utils/AddrModeMatcher.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CallSite.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/IRBuilder.h"
using namespace llvm;
using namespace llvm::PatternMatch;
+static cl::opt<bool>
+CriticalEdgeSplit("cgp-critical-edge-splitting",
+ cl::desc("Split critical edges during codegen prepare"),
+ cl::init(true), cl::Hidden);
+
namespace {
class CodeGenPrepare : public FunctionPass {
/// TLI - Keep a pointer of a TargetLowering to consult for determining
@@ -52,7 +60,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit CodeGenPrepare(const TargetLowering *tli = 0)
- : FunctionPass(&ID), TLI(tli) {}
+ : FunctionPass(ID), TLI(tli) {}
bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
@@ -72,6 +80,7 @@ namespace {
DenseMap<Value*,Value*> &SunkAddrs);
bool OptimizeInlineAsmInst(Instruction *I, CallSite CS,
DenseMap<Value*,Value*> &SunkAddrs);
+ bool OptimizeCallInst(CallInst *CI);
bool MoveExtToFormExtLoad(Instruction *I);
bool OptimizeExtUses(Instruction *I);
void findLoopBackEdges(const Function &F);
@@ -79,8 +88,8 @@ namespace {
}
char CodeGenPrepare::ID = 0;
-static RegisterPass<CodeGenPrepare> X("codegenprepare",
- "Optimize for code generation");
+INITIALIZE_PASS(CodeGenPrepare, "codegenprepare",
+ "Optimize for code generation", false, false);
FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
return new CodeGenPrepare(TLI);
@@ -171,7 +180,7 @@ bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
// don't mess around with them.
BasicBlock::const_iterator BBI = BB->begin();
while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
- for (Value::use_const_iterator UI = PN->use_begin(), E = PN->use_end();
+ for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end();
UI != E; ++UI) {
const Instruction *User = cast<Instruction>(*UI);
if (User->getParent() != DestBB || !isa<PHINode>(User))
@@ -424,9 +433,9 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
// If these values will be promoted, find out what they will be promoted
// to. This helps us consider truncates on PPC as noop copies when they
// are.
- if (TLI.getTypeAction(CI->getContext(), SrcVT) == TargetLowering::Promote)
+ if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
- if (TLI.getTypeAction(CI->getContext(), DstVT) == TargetLowering::Promote)
+ if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
// If, after promotion, these are the same types, this is a noop copy.
@@ -537,6 +546,48 @@ static bool OptimizeCmpExpression(CmpInst *CI) {
return MadeChange;
}
+namespace {
+class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls {
+protected:
+ void replaceCall(Value *With) {
+ CI->replaceAllUsesWith(With);
+ CI->eraseFromParent();
+ }
+ bool isFoldable(unsigned SizeCIOp, unsigned, bool) const {
+ if (ConstantInt *SizeCI =
+ dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp)))
+ return SizeCI->isAllOnesValue();
+ return false;
+ }
+};
+} // end anonymous namespace
+
+bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
+ // Lower all uses of llvm.objectsize.*
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
+ if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
+ bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
+ const Type *ReturnTy = CI->getType();
+ Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
+ CI->replaceAllUsesWith(RetVal);
+ CI->eraseFromParent();
+ return true;
+ }
+
+ // From here on out we're working with named functions.
+ if (CI->getCalledFunction() == 0) return false;
+
+ // We'll need TargetData from here on out.
+ const TargetData *TD = TLI ? TLI->getTargetData() : 0;
+ if (!TD) return false;
+
+ // Lower all default uses of _chk calls. This is very similar
+ // to what InstCombineCalls does, but here we are only lowering calls
+ // that have the default "don't know" as the objectsize. Anything else
+ // should be left alone.
+ CodeGenPrepareFortifiedLibCalls Simplifier;
+ return Simplifier.fold(CI, TD);
+}
//===----------------------------------------------------------------------===//
// Memory Optimization
//===----------------------------------------------------------------------===//
@@ -670,8 +721,12 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
MemoryInst->replaceUsesOfWith(Addr, SunkAddr);
- if (Addr->use_empty())
+ if (Addr->use_empty()) {
RecursivelyDeleteTriviallyDeadInstructions(Addr);
+ // This address is now available for reassignment, so erase the table entry;
+ // we don't want to match some completely different instruction.
+ SunkAddrs[Addr] = 0;
+ }
return true;
}
@@ -711,8 +766,7 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
}
// Compute the constraint code and ConstraintType to use.
- TLI->ComputeConstraintToUse(OpInfo, SDValue(),
- OpInfo.ConstraintType == TargetLowering::C_Memory);
+ TLI->ComputeConstraintToUse(OpInfo, SDValue());
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
OpInfo.isIndirect) {
@@ -843,12 +897,14 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
bool MadeChange = false;
// Split all critical edges where the dest block has a PHI.
- TerminatorInst *BBTI = BB.getTerminator();
- if (BBTI->getNumSuccessors() > 1 && !isa<IndirectBrInst>(BBTI)) {
- for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) {
- BasicBlock *SuccBB = BBTI->getSuccessor(i);
- if (isa<PHINode>(SuccBB->begin()) && isCriticalEdge(BBTI, i, true))
- SplitEdgeNicely(BBTI, i, BackEdges, this);
+ if (CriticalEdgeSplit) {
+ TerminatorInst *BBTI = BB.getTerminator();
+ if (BBTI->getNumSuccessors() > 1 && !isa<IndirectBrInst>(BBTI)) {
+ for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) {
+ BasicBlock *SuccBB = BBTI->getSuccessor(i);
+ if (isa<PHINode>(SuccBB->begin()) && isCriticalEdge(BBTI, i, true))
+ SplitEdgeNicely(BBTI, i, BackEdges, this);
+ }
}
}
@@ -913,6 +969,10 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
} else
// Sink address computing for memory operands into the block.
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
+ } else {
+ // Other CallInst optimizations that don't need to muck with the
+ // enclosing iterator here.
+ MadeChange |= OptimizeCallInst(CI);
}
}
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/ConstantProp.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/ConstantProp.cpp
index ea20813..a0ea369 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/ConstantProp.cpp
@@ -34,7 +34,7 @@ STATISTIC(NumInstKilled, "Number of instructions killed");
namespace {
struct ConstantPropagation : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- ConstantPropagation() : FunctionPass(&ID) {}
+ ConstantPropagation() : FunctionPass(ID) {}
bool runOnFunction(Function &F);
@@ -45,8 +45,8 @@ namespace {
}
char ConstantPropagation::ID = 0;
-static RegisterPass<ConstantPropagation>
-X("constprop", "Simple constant propagation");
+INITIALIZE_PASS(ConstantPropagation, "constprop",
+ "Simple constant propagation", false, false);
FunctionPass *llvm::createConstantPropagationPass() {
return new ConstantPropagation();
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
new file mode 100644
index 0000000..0d4e45d
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -0,0 +1,200 @@
+//===- CorrelatedValuePropagation.cpp - Propagate CFG-derived info --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Correlated Value Propagation pass.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "correlated-value-propagation"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/LazyValueInfo.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/ADT/Statistic.h"
+using namespace llvm;
+
+STATISTIC(NumPhis, "Number of phis propagated");
+STATISTIC(NumSelects, "Number of selects propagated");
+STATISTIC(NumMemAccess, "Number of memory access targets propagated");
+STATISTIC(NumCmps, "Number of comparisons propagated");
+
+namespace {
+ class CorrelatedValuePropagation : public FunctionPass {
+ LazyValueInfo *LVI;
+
+ bool processSelect(SelectInst *SI);
+ bool processPHI(PHINode *P);
+ bool processMemAccess(Instruction *I);
+ bool processCmp(CmpInst *C);
+
+ public:
+ static char ID;
+ CorrelatedValuePropagation(): FunctionPass(ID) { }
+
+ bool runOnFunction(Function &F);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<LazyValueInfo>();
+ }
+ };
+}
+
+char CorrelatedValuePropagation::ID = 0;
+INITIALIZE_PASS(CorrelatedValuePropagation, "correlated-propagation",
+ "Value Propagation", false, false);
+
+// Public interface to the Value Propagation pass
+Pass *llvm::createCorrelatedValuePropagationPass() {
+ return new CorrelatedValuePropagation();
+}
+
+bool CorrelatedValuePropagation::processSelect(SelectInst *S) {
+ if (S->getType()->isVectorTy()) return false;
+ if (isa<Constant>(S->getOperand(0))) return false;
+
+ Constant *C = LVI->getConstant(S->getOperand(0), S->getParent());
+ if (!C) return false;
+
+ ConstantInt *CI = dyn_cast<ConstantInt>(C);
+ if (!CI) return false;
+
+ S->replaceAllUsesWith(S->getOperand(CI->isOne() ? 1 : 2));
+ S->eraseFromParent();
+
+ ++NumSelects;
+
+ return true;
+}
+
+bool CorrelatedValuePropagation::processPHI(PHINode *P) {
+ bool Changed = false;
+
+ BasicBlock *BB = P->getParent();
+ for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
+ Value *Incoming = P->getIncomingValue(i);
+ if (isa<Constant>(Incoming)) continue;
+
+ Constant *C = LVI->getConstantOnEdge(P->getIncomingValue(i),
+ P->getIncomingBlock(i),
+ BB);
+ if (!C) continue;
+
+ P->setIncomingValue(i, C);
+ Changed = true;
+ }
+
+ if (Value *ConstVal = P->hasConstantValue()) {
+ P->replaceAllUsesWith(ConstVal);
+ P->eraseFromParent();
+ Changed = true;
+ }
+
+ ++NumPhis;
+
+ return Changed;
+}
+
+bool CorrelatedValuePropagation::processMemAccess(Instruction *I) {
+ Value *Pointer = 0;
+ if (LoadInst *L = dyn_cast<LoadInst>(I))
+ Pointer = L->getPointerOperand();
+ else
+ Pointer = cast<StoreInst>(I)->getPointerOperand();
+
+ if (isa<Constant>(Pointer)) return false;
+
+ Constant *C = LVI->getConstant(Pointer, I->getParent());
+ if (!C) return false;
+
+ ++NumMemAccess;
+ I->replaceUsesOfWith(Pointer, C);
+ return true;
+}
+
+/// processCmp - If the value of this comparison could be determined locally,
+/// constant propagation would already have figured it out. Instead, walk
+/// the predecessors and statically evaluate the comparison based on information
+/// available on that edge. If a given static evaluation is true on ALL
+/// incoming edges, then it's true universally and we can simplify the compare.
+bool CorrelatedValuePropagation::processCmp(CmpInst *C) {
+ Value *Op0 = C->getOperand(0);
+ if (isa<Instruction>(Op0) &&
+ cast<Instruction>(Op0)->getParent() == C->getParent())
+ return false;
+
+ Constant *Op1 = dyn_cast<Constant>(C->getOperand(1));
+ if (!Op1) return false;
+
+ pred_iterator PI = pred_begin(C->getParent()), PE = pred_end(C->getParent());
+ if (PI == PE) return false;
+
+ LazyValueInfo::Tristate Result = LVI->getPredicateOnEdge(C->getPredicate(),
+ C->getOperand(0), Op1, *PI, C->getParent());
+ if (Result == LazyValueInfo::Unknown) return false;
+
+ ++PI;
+ while (PI != PE) {
+ LazyValueInfo::Tristate Res = LVI->getPredicateOnEdge(C->getPredicate(),
+ C->getOperand(0), Op1, *PI, C->getParent());
+ if (Res != Result) return false;
+ ++PI;
+ }
+
+ ++NumCmps;
+
+ if (Result == LazyValueInfo::True)
+ C->replaceAllUsesWith(ConstantInt::getTrue(C->getContext()));
+ else
+ C->replaceAllUsesWith(ConstantInt::getFalse(C->getContext()));
+
+ C->eraseFromParent();
+
+ return true;
+}
+
+bool CorrelatedValuePropagation::runOnFunction(Function &F) {
+ LVI = &getAnalysis<LazyValueInfo>();
+
+ bool FnChanged = false;
+
+ for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
+ bool BBChanged = false;
+ for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ) {
+ Instruction *II = BI++;
+ switch (II->getOpcode()) {
+ case Instruction::Select:
+ BBChanged |= processSelect(cast<SelectInst>(II));
+ break;
+ case Instruction::PHI:
+ BBChanged |= processPHI(cast<PHINode>(II));
+ break;
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ BBChanged |= processCmp(cast<CmpInst>(II));
+ break;
+ case Instruction::Load:
+ case Instruction::Store:
+ BBChanged |= processMemAccess(II);
+ break;
+ }
+ }
+
+ // Propagating correlated values might leave cruft around.
+ // Try to clean it up before we continue.
+ if (BBChanged)
+ SimplifyInstructionsInBlock(FI);
+
+ FnChanged |= BBChanged;
+ }
+
+ return FnChanged;
+}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/DCE.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/DCE.cpp
index 39940c3..87ea803 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/DCE.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/DCE.cpp
@@ -35,7 +35,7 @@ namespace {
//
struct DeadInstElimination : public BasicBlockPass {
static char ID; // Pass identification, replacement for typeid
- DeadInstElimination() : BasicBlockPass(&ID) {}
+ DeadInstElimination() : BasicBlockPass(ID) {}
virtual bool runOnBasicBlock(BasicBlock &BB) {
bool Changed = false;
for (BasicBlock::iterator DI = BB.begin(); DI != BB.end(); ) {
@@ -56,8 +56,8 @@ namespace {
}
char DeadInstElimination::ID = 0;
-static RegisterPass<DeadInstElimination>
-X("die", "Dead Instruction Elimination");
+INITIALIZE_PASS(DeadInstElimination, "die",
+ "Dead Instruction Elimination", false, false);
Pass *llvm::createDeadInstEliminationPass() {
return new DeadInstElimination();
@@ -70,7 +70,7 @@ namespace {
//
struct DCE : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- DCE() : FunctionPass(&ID) {}
+ DCE() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F);
@@ -81,7 +81,7 @@ namespace {
}
char DCE::ID = 0;
-static RegisterPass<DCE> Y("dce", "Dead Code Elimination");
+INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false);
bool DCE::runOnFunction(Function &F) {
// Start out with all of the instructions in the worklist...
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 09c01d3..c8fd9d9 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -40,7 +40,7 @@ namespace {
TargetData *TD;
static char ID; // Pass identification, replacement for typeid
- DSE() : FunctionPass(&ID) {}
+ DSE() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F) {
bool Changed = false;
@@ -56,7 +56,8 @@ namespace {
}
bool runOnBasicBlock(BasicBlock &BB);
- bool handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep);
+ bool handleFreeWithNonTrivialDependency(const CallInst *F,
+ MemDepResult Dep);
bool handleEndBlock(BasicBlock &BB);
bool RemoveUndeadPointers(Value *Ptr, uint64_t killPointerSize,
BasicBlock::iterator &BBI,
@@ -73,7 +74,6 @@ namespace {
AU.addRequired<AliasAnalysis>();
AU.addRequired<MemoryDependenceAnalysis>();
AU.addPreserved<DominatorTree>();
- AU.addPreserved<AliasAnalysis>();
AU.addPreserved<MemoryDependenceAnalysis>();
}
@@ -82,7 +82,7 @@ namespace {
}
char DSE::ID = 0;
-static RegisterPass<DSE> X("dse", "Dead Store Elimination");
+INITIALIZE_PASS(DSE, "dse", "Dead Store Elimination", false, false);
FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
@@ -123,14 +123,15 @@ static Value *getPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
- return MI->getOperand(1);
-
- switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
+ return MI->getArgOperand(0);
+
+ IntrinsicInst *II = cast<IntrinsicInst>(I);
+ switch (II->getIntrinsicID()) {
default: assert(false && "Unexpected intrinsic!");
case Intrinsic::init_trampoline:
- return I->getOperand(1);
+ return II->getArgOperand(0);
case Intrinsic::lifetime_end:
- return I->getOperand(2);
+ return II->getArgOperand(1);
}
}
@@ -147,12 +148,13 @@ static unsigned getStoreSize(Instruction *I, const TargetData *TD) {
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
Len = MI->getLength();
} else {
- switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
+ IntrinsicInst *II = cast<IntrinsicInst>(I);
+ switch (II->getIntrinsicID()) {
default: assert(false && "Unexpected intrinsic!");
case Intrinsic::init_trampoline:
return -1u;
case Intrinsic::lifetime_end:
- Len = I->getOperand(1);
+ Len = II->getArgOperand(0);
break;
}
}
@@ -201,8 +203,8 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
if (InstDep.isNonLocal()) continue;
// Handle frees whose dependencies are non-trivial.
- if (isFreeCall(Inst)) {
- MadeChange |= handleFreeWithNonTrivialDependency(Inst, InstDep);
+ if (const CallInst *F = isFreeCall(Inst)) {
+ MadeChange |= handleFreeWithNonTrivialDependency(F, InstDep);
continue;
}
@@ -218,7 +220,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
isElidable(DepStore)) {
// Delete the store and now-dead instructions that feed it.
DeleteDeadInstruction(DepStore);
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
// DeleteDeadInstruction can delete the current instruction in loop
@@ -249,7 +251,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
BBI = BB.begin();
else if (BBI != BB.begin()) // Revisit this instruction if possible.
--BBI;
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
continue;
}
@@ -270,7 +272,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
BBI = BB.begin();
else if (BBI != BB.begin()) // Revisit this instruction if possible.
--BBI;
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
continue;
}
@@ -287,7 +289,8 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
/// handleFreeWithNonTrivialDependency - Handle frees of entire structures whose
/// dependency is a store to a field of that structure.
-bool DSE::handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep) {
+bool DSE::handleFreeWithNonTrivialDependency(const CallInst *F,
+ MemDepResult Dep) {
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
Instruction *Dependency = Dep.getInst();
@@ -297,13 +300,13 @@ bool DSE::handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep) {
Value *DepPointer = getPointerOperand(Dependency)->getUnderlyingObject();
// Check for aliasing.
- if (AA.alias(F->getOperand(1), 1, DepPointer, 1) !=
+ if (AA.alias(F->getArgOperand(0), 1, DepPointer, 1) !=
AliasAnalysis::MustAlias)
return false;
// DCE instructions only used to calculate that store
DeleteDeadInstruction(Dependency);
- NumFastStores++;
+ ++NumFastStores;
return true;
}
@@ -349,9 +352,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (deadPointers.count(pointerOperand)) {
// DCE instructions only used to calculate that store.
Instruction *Dead = BBI;
- BBI++;
+ ++BBI;
DeleteDeadInstruction(Dead, &deadPointers);
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
continue;
}
@@ -371,9 +374,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// However, if this load is unused and not volatile, we can go ahead and
// remove it, and not have to worry about it making our pointer undead!
if (L->use_empty() && !L->isVolatile()) {
- BBI++;
+ ++BBI;
DeleteDeadInstruction(L, &deadPointers);
- NumFastOther++;
+ ++NumFastOther;
MadeChange = true;
continue;
}
@@ -391,17 +394,16 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Dead alloca's can be DCE'd when we reach them
if (A->use_empty()) {
- BBI++;
+ ++BBI;
DeleteDeadInstruction(A, &deadPointers);
- NumFastOther++;
+ ++NumFastOther;
MadeChange = true;
}
continue;
- } else if (CallSite::get(BBI).getInstruction() != 0) {
+ } else if (CallSite CS = cast<Value>(BBI)) {
// If this call does not access memory, it can't
// be undeadifying any of our pointers.
- CallSite CS = CallSite::get(BBI);
if (AA.doesNotAccessMemory(CS))
continue;
@@ -426,9 +428,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
getPointerSize(*I));
if (A == AliasAnalysis::ModRef)
- modRef++;
+ ++modRef;
else
- other++;
+ ++other;
if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
dead.push_back(*I);
@@ -442,9 +444,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
} else if (isInstructionTriviallyDead(BBI)) {
// For any non-memory-affecting non-terminators, DCE them as we reach them
Instruction *Inst = BBI;
- BBI++;
+ ++BBI;
DeleteDeadInstruction(Inst, &deadPointers);
- NumFastOther++;
+ ++NumFastOther;
MadeChange = true;
continue;
}
@@ -497,7 +499,7 @@ bool DSE::RemoveUndeadPointers(Value *killPointer, uint64_t killPointerSize,
// Remove it!
++BBI;
DeleteDeadInstruction(S, &deadPointers);
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
continue;
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/GEPSplitter.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/GEPSplitter.cpp
index 610a41d..53dd06d 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/GEPSplitter.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/GEPSplitter.cpp
@@ -27,13 +27,13 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
public:
static char ID; // Pass identification, replacement for typeid
- explicit GEPSplitter() : FunctionPass(&ID) {}
+ explicit GEPSplitter() : FunctionPass(ID) {}
};
}
char GEPSplitter::ID = 0;
-static RegisterPass<GEPSplitter> X("split-geps",
- "split complex GEPs into simple GEPs");
+INITIALIZE_PASS(GEPSplitter, "split-geps",
+ "split complex GEPs into simple GEPs", false, false);
FunctionPass *llvm::createGEPSplitterPass() {
return new GEPSplitter();
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/GVN.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/GVN.cpp
index fcb802a..c62ce1f 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -35,6 +35,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/PHITransAddr.h"
@@ -164,7 +165,6 @@ namespace {
Expression create_expression(CastInst* C);
Expression create_expression(GetElementPtrInst* G);
Expression create_expression(CallInst* C);
- Expression create_expression(Constant* C);
Expression create_expression(ExtractValueInst* C);
Expression create_expression(InsertValueInst* C);
@@ -271,7 +271,8 @@ Expression ValueTable::create_expression(CallInst* C) {
e.function = C->getCalledFunction();
e.opcode = Expression::CALL;
- for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
+ CallSite CS(C);
+ for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I)
e.varargs.push_back(lookup_or_add(*I));
@@ -447,14 +448,14 @@ uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
if (local_dep.isDef()) {
CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
- if (local_cdep->getNumOperands() != C->getNumOperands()) {
+ if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
- for (unsigned i = 1; i < C->getNumOperands(); ++i) {
- uint32_t c_vn = lookup_or_add(C->getOperand(i));
- uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
+ for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
+ uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
+ uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i));
if (c_vn != cd_vn) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
@@ -504,13 +505,13 @@ uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
return nextValueNumber++;
}
- if (cdep->getNumOperands() != C->getNumOperands()) {
+ if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
- for (unsigned i = 1; i < C->getNumOperands(); ++i) {
- uint32_t c_vn = lookup_or_add(C->getOperand(i));
- uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
+ for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
+ uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
+ uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i));
if (c_vn != cd_vn) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
@@ -663,7 +664,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit GVN(bool noloads = false)
- : FunctionPass(&ID), NoLoads(noloads), MD(0) { }
+ : FunctionPass(ID), NoLoads(noloads), MD(0) { }
private:
bool NoLoads;
@@ -714,8 +715,7 @@ FunctionPass *llvm::createGVNPass(bool NoLoads) {
return new GVN(NoLoads);
}
-static RegisterPass<GVN> X("gvn",
- "Global Value Numbering");
+INITIALIZE_PASS(GVN, "gvn", "Global Value Numbering", false, false);
void GVN::dump(DenseMap<uint32_t, Value*>& d) {
errs() << "{\n";
@@ -733,7 +733,7 @@ static bool isSafeReplacement(PHINode* p, Instruction *inst) {
for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
UI != E; ++UI)
- if (PHINode* use_phi = dyn_cast<PHINode>(UI))
+ if (PHINode* use_phi = dyn_cast<PHINode>(*UI))
if (use_phi->getParent() == inst->getParent())
return false;
@@ -868,7 +868,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
const Type *StoredValTy = StoredVal->getType();
- uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
+ uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy);
uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
// If the store and reload are the same size, we can always reuse it.
@@ -1004,18 +1004,18 @@ static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
// If the load and store are to the exact same address, they should have been
// a must alias. AA must have gotten confused.
- // FIXME: Study to see if/when this happens.
- if (LoadOffset == StoreOffset) {
+ // FIXME: Study to see if/when this happens. One case is forwarding a memset
+ // to a load from the base of the memset.
#if 0
+ if (LoadOffset == StoreOffset) {
dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
<< "Base = " << *StoreBase << "\n"
<< "Store Ptr = " << *WritePtr << "\n"
<< "Store Offs = " << StoreOffset << "\n"
<< "Load Ptr = " << *LoadPtr << "\n";
abort();
-#endif
- return -1;
}
+#endif
// If the load and store don't overlap at all, the store doesn't provide
// anything to the load. In this case, they really don't alias at all, AA
@@ -1031,11 +1031,11 @@ static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
bool isAAFailure = false;
- if (StoreOffset < LoadOffset) {
+ if (StoreOffset < LoadOffset)
isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
- } else {
+ else
isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
- }
+
if (isAAFailure) {
#if 0
dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
@@ -1132,8 +1132,8 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
Instruction *InsertPt, const TargetData &TD){
LLVMContext &Ctx = SrcVal->getType()->getContext();
- uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8;
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
+ uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
+ uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
@@ -1217,7 +1217,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
return ConstantFoldLoadFromConstPtr(Src, &TD);
}
-
+namespace {
struct AvailableValueInBlock {
/// BB - The basic block in question.
@@ -1291,6 +1291,8 @@ struct AvailableValueInBlock {
}
};
+}
+
/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
/// construct SSA form, allowing us to eliminate LI. This returns the value
/// that should be used at LI's definition site.
@@ -1308,7 +1310,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
// Otherwise, we have to construct SSA form.
SmallVector<PHINode*, 8> NewPHIs;
SSAUpdater SSAUpdate(&NewPHIs);
- SSAUpdate.Initialize(LI);
+ SSAUpdate.Initialize(LI->getType(), LI->getName());
const Type *LoadTy = LI->getType();
@@ -1333,8 +1335,8 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
return V;
}
-static bool isLifetimeStart(Instruction *Inst) {
- if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
+static bool isLifetimeStart(const Instruction *Inst) {
+ if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
return II->getIntrinsicID() == Intrinsic::lifetime_start;
return false;
}
@@ -1498,7 +1500,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
MD->invalidateCachedPointerInfo(V);
VN.erase(LI);
toErase.push_back(LI);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1582,7 +1584,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
FullyAvailableBlocks[UnavailableBlocks[i]] = false;
- bool NeedToSplitEdges = false;
+ SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit;
for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
PI != E; ++PI) {
BasicBlock *Pred = *PI;
@@ -1598,12 +1600,13 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
return false;
}
unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB);
- toSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
- NeedToSplitEdges = true;
+ NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
}
}
- if (NeedToSplitEdges)
+ if (!NeedToSplit.empty()) {
+ toSplit.append(NeedToSplit.begin(), NeedToSplit.end());
return false;
+ }
// Decide whether PRE is profitable for this load.
unsigned NumUnavailablePreds = PredLoads.size();
@@ -1720,7 +1723,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
MD->invalidateCachedPointerInfo(V);
VN.erase(LI);
toErase.push_back(LI);
- NumPRELoad++;
+ ++NumPRELoad;
return true;
}
@@ -1781,7 +1784,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
MD->invalidateCachedPointerInfo(AvailVal);
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1827,7 +1830,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
MD->invalidateCachedPointerInfo(StoredVal);
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1857,7 +1860,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
MD->invalidateCachedPointerInfo(DepLI);
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1868,7 +1871,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1879,7 +1882,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
}
@@ -2011,7 +2014,7 @@ bool GVN::runOnFunction(Function& F) {
BasicBlock *BB = FI;
++FI;
bool removedBlock = MergeBlockIntoPredecessor(BB, this);
- if (removedBlock) NumGVNBlocks++;
+ if (removedBlock) ++NumGVNBlocks;
Changed |= removedBlock;
}
@@ -2107,6 +2110,11 @@ bool GVN::performPRE(Function &F) {
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
isa<DbgInfoIntrinsic>(CurInst))
continue;
+
+ // We don't currently value number ANY inline asm calls.
+ if (CallInst *CallI = dyn_cast<CallInst>(CurInst))
+ if (CallI->isInlineAsm())
+ continue;
uint32_t ValNo = VN.lookup(CurInst);
@@ -2123,27 +2131,28 @@ bool GVN::performPRE(Function &F) {
for (pred_iterator PI = pred_begin(CurrentBlock),
PE = pred_end(CurrentBlock); PI != PE; ++PI) {
+ BasicBlock *P = *PI;
// We're not interested in PRE where the block is its
// own predecessor, or in blocks with predecessors
// that are not reachable.
- if (*PI == CurrentBlock) {
+ if (P == CurrentBlock) {
NumWithout = 2;
break;
- } else if (!localAvail.count(*PI)) {
+ } else if (!localAvail.count(P)) {
NumWithout = 2;
break;
}
DenseMap<uint32_t, Value*>::iterator predV =
- localAvail[*PI]->table.find(ValNo);
- if (predV == localAvail[*PI]->table.end()) {
- PREPred = *PI;
- NumWithout++;
+ localAvail[P]->table.find(ValNo);
+ if (predV == localAvail[P]->table.end()) {
+ PREPred = P;
+ ++NumWithout;
} else if (predV->second == CurInst) {
NumWithout = 2;
} else {
- predMap[*PI] = predV->second;
- NumWith++;
+ predMap[P] = predV->second;
+ ++NumWith;
}
}
@@ -2198,7 +2207,7 @@ bool GVN::performPRE(Function &F) {
PREInstr->setName(CurInst->getName() + ".pre");
predMap[PREPred] = PREInstr;
VN.add(PREInstr, ValNo);
- NumGVNPRE++;
+ ++NumGVNPRE;
// Update the availability map to include the new instruction.
localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
@@ -2208,8 +2217,10 @@ bool GVN::performPRE(Function &F) {
CurInst->getName() + ".pre-phi",
CurrentBlock->begin());
for (pred_iterator PI = pred_begin(CurrentBlock),
- PE = pred_end(CurrentBlock); PI != PE; ++PI)
- Phi->addIncoming(predMap[*PI], *PI);
+ PE = pred_end(CurrentBlock); PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ Phi->addIncoming(predMap[P], P);
+ }
VN.add(Phi, ValNo);
localAvail[CurrentBlock]->table[ValNo] = Phi;
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index cb563c3..af2eafc 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -43,6 +43,7 @@
#include "llvm/BasicBlock.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
#include "llvm/Analysis/Dominators.h"
@@ -76,7 +77,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- IndVarSimplify() : LoopPass(&ID) {}
+ IndVarSimplify() : LoopPass(ID) {}
virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -96,10 +97,12 @@ namespace {
private:
+ void EliminateIVComparisons();
+ void EliminateIVRemainders();
void RewriteNonIntegerIVs(Loop *L);
ICmpInst *LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount,
- Value *IndVar,
+ PHINode *IndVar,
BasicBlock *ExitingBlock,
BranchInst *BI,
SCEVExpander &Rewriter);
@@ -114,8 +117,8 @@ namespace {
}
char IndVarSimplify::ID = 0;
-static RegisterPass<IndVarSimplify>
-X("indvars", "Canonicalize Induction Variables");
+INITIALIZE_PASS(IndVarSimplify, "indvars",
+ "Canonicalize Induction Variables", false, false);
Pass *llvm::createIndVarSimplifyPass() {
return new IndVarSimplify();
@@ -128,10 +131,28 @@ Pass *llvm::createIndVarSimplifyPass() {
/// is actually a much broader range than just linear tests.
ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L,
const SCEV *BackedgeTakenCount,
- Value *IndVar,
+ PHINode *IndVar,
BasicBlock *ExitingBlock,
BranchInst *BI,
SCEVExpander &Rewriter) {
+ // Special case: If the backedge-taken count is a UDiv, it's very likely a
+ // UDiv that ScalarEvolution produced in order to compute a precise
+ // expression, rather than a UDiv from the user's code. If we can't find a
+ // UDiv in the code with some simple searching, assume the former and forego
+ // rewriting the loop.
+ if (isa<SCEVUDivExpr>(BackedgeTakenCount)) {
+ ICmpInst *OrigCond = dyn_cast<ICmpInst>(BI->getCondition());
+ if (!OrigCond) return 0;
+ const SCEV *R = SE->getSCEV(OrigCond->getOperand(1));
+ R = SE->getMinusSCEV(R, SE->getConstant(R->getType(), 1));
+ if (R != BackedgeTakenCount) {
+ const SCEV *L = SE->getSCEV(OrigCond->getOperand(0));
+ L = SE->getMinusSCEV(L, SE->getConstant(L->getType(), 1));
+ if (L != BackedgeTakenCount)
+ return 0;
+ }
+ }
+
// If the exiting block is not the same as the backedge block, we must compare
// against the preincremented value, otherwise we prefer to compare against
// the post-incremented value.
@@ -141,12 +162,12 @@ ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L,
// Add one to the "backedge-taken" count to get the trip count.
// If this addition may overflow, we have to be more pessimistic and
// cast the induction variable before doing the add.
- const SCEV *Zero = SE->getIntegerSCEV(0, BackedgeTakenCount->getType());
+ const SCEV *Zero = SE->getConstant(BackedgeTakenCount->getType(), 0);
const SCEV *N =
SE->getAddExpr(BackedgeTakenCount,
- SE->getIntegerSCEV(1, BackedgeTakenCount->getType()));
+ SE->getConstant(BackedgeTakenCount->getType(), 1));
if ((isa<SCEVConstant>(N) && !N->isZero()) ||
- SE->isLoopGuardedByCond(L, ICmpInst::ICMP_NE, N, Zero)) {
+ SE->isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, N, Zero)) {
// No overflow. Cast the sum.
RHS = SE->getTruncateOrZeroExtend(N, IndVar->getType());
} else {
@@ -154,13 +175,13 @@ ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L,
RHS = SE->getTruncateOrZeroExtend(BackedgeTakenCount,
IndVar->getType());
RHS = SE->getAddExpr(RHS,
- SE->getIntegerSCEV(1, IndVar->getType()));
+ SE->getConstant(IndVar->getType(), 1));
}
// The BackedgeTaken expression contains the number of times that the
// backedge branches to the loop header. This is one less than the
// number of times the loop executes, so use the incremented indvar.
- CmpIndVar = L->getCanonicalInductionVariableIncrement();
+ CmpIndVar = IndVar->getIncomingValueForBlock(ExitingBlock);
} else {
// We have to use the preincremented value...
RHS = SE->getTruncateOrZeroExtend(BackedgeTakenCount,
@@ -215,7 +236,7 @@ ICmpInst *IndVarSimplify::LinearFunctionTestReplace(Loop *L,
void IndVarSimplify::RewriteLoopExitValues(Loop *L,
SCEVExpander &Rewriter) {
// Verify the input to the pass in already in LCSSA form.
- assert(L->isLCSSAForm());
+ assert(L->isLCSSAForm(*DT));
SmallVector<BasicBlock*, 8> ExitBlocks;
L->getUniqueExitBlocks(ExitBlocks);
@@ -306,6 +327,10 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L,
}
}
}
+
+ // The insertion point instruction may have been deleted; clear it out
+ // so that the rewriter doesn't trip over it later.
+ Rewriter.clearInsertPoint();
}
void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) {
@@ -331,7 +356,128 @@ void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) {
SE->forgetLoop(L);
}
+void IndVarSimplify::EliminateIVComparisons() {
+ SmallVector<WeakVH, 16> DeadInsts;
+
+ // Look for ICmp users.
+ for (IVUsers::iterator I = IU->begin(), E = IU->end(); I != E; ++I) {
+ IVStrideUse &UI = *I;
+ ICmpInst *ICmp = dyn_cast<ICmpInst>(UI.getUser());
+ if (!ICmp) continue;
+
+ bool Swapped = UI.getOperandValToReplace() == ICmp->getOperand(1);
+ ICmpInst::Predicate Pred = ICmp->getPredicate();
+ if (Swapped) Pred = ICmpInst::getSwappedPredicate(Pred);
+
+ // Get the SCEVs for the ICmp operands.
+ const SCEV *S = IU->getReplacementExpr(UI);
+ const SCEV *X = SE->getSCEV(ICmp->getOperand(!Swapped));
+
+ // Simplify unnecessary loops away.
+ const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
+ S = SE->getSCEVAtScope(S, ICmpLoop);
+ X = SE->getSCEVAtScope(X, ICmpLoop);
+
+ // If the condition is always true or always false, replace it with
+ // a constant value.
+ if (SE->isKnownPredicate(Pred, S, X))
+ ICmp->replaceAllUsesWith(ConstantInt::getTrue(ICmp->getContext()));
+ else if (SE->isKnownPredicate(ICmpInst::getInversePredicate(Pred), S, X))
+ ICmp->replaceAllUsesWith(ConstantInt::getFalse(ICmp->getContext()));
+ else
+ continue;
+
+ DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
+ DeadInsts.push_back(ICmp);
+ }
+
+ // Now that we're done iterating through lists, clean up any instructions
+ // which are now dead.
+ while (!DeadInsts.empty())
+ if (Instruction *Inst =
+ dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()))
+ RecursivelyDeleteTriviallyDeadInstructions(Inst);
+}
+
+void IndVarSimplify::EliminateIVRemainders() {
+ SmallVector<WeakVH, 16> DeadInsts;
+
+ // Look for SRem and URem users.
+ for (IVUsers::iterator I = IU->begin(), E = IU->end(); I != E; ++I) {
+ IVStrideUse &UI = *I;
+ BinaryOperator *Rem = dyn_cast<BinaryOperator>(UI.getUser());
+ if (!Rem) continue;
+
+ bool isSigned = Rem->getOpcode() == Instruction::SRem;
+ if (!isSigned && Rem->getOpcode() != Instruction::URem)
+ continue;
+
+ // We're only interested in the case where we know something about
+ // the numerator.
+ if (UI.getOperandValToReplace() != Rem->getOperand(0))
+ continue;
+
+ // Get the SCEVs for the ICmp operands.
+ const SCEV *S = SE->getSCEV(Rem->getOperand(0));
+ const SCEV *X = SE->getSCEV(Rem->getOperand(1));
+
+ // Simplify unnecessary loops away.
+ const Loop *ICmpLoop = LI->getLoopFor(Rem->getParent());
+ S = SE->getSCEVAtScope(S, ICmpLoop);
+ X = SE->getSCEVAtScope(X, ICmpLoop);
+
+ // i % n --> i if i is in [0,n).
+ if ((!isSigned || SE->isKnownNonNegative(S)) &&
+ SE->isKnownPredicate(isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
+ S, X))
+ Rem->replaceAllUsesWith(Rem->getOperand(0));
+ else {
+ // (i+1) % n --> (i+1)==n?0:(i+1) if i is in [0,n).
+ const SCEV *LessOne =
+ SE->getMinusSCEV(S, SE->getConstant(S->getType(), 1));
+ if ((!isSigned || SE->isKnownNonNegative(LessOne)) &&
+ SE->isKnownPredicate(isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
+ LessOne, X)) {
+ ICmpInst *ICmp = new ICmpInst(Rem, ICmpInst::ICMP_EQ,
+ Rem->getOperand(0), Rem->getOperand(1),
+ "tmp");
+ SelectInst *Sel =
+ SelectInst::Create(ICmp,
+ ConstantInt::get(Rem->getType(), 0),
+ Rem->getOperand(0), "tmp", Rem);
+ Rem->replaceAllUsesWith(Sel);
+ } else
+ continue;
+ }
+
+ // Inform IVUsers about the new users.
+ if (Instruction *I = dyn_cast<Instruction>(Rem->getOperand(0)))
+ IU->AddUsersIfInteresting(I);
+
+ DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
+ DeadInsts.push_back(Rem);
+ }
+
+ // Now that we're done iterating through lists, clean up any instructions
+ // which are now dead.
+ while (!DeadInsts.empty())
+ if (Instruction *Inst =
+ dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()))
+ RecursivelyDeleteTriviallyDeadInstructions(Inst);
+}
+
bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
+ // If LoopSimplify form is not available, stay out of trouble. Some notes:
+ // - LSR currently only supports LoopSimplify-form loops. Indvars'
+ // canonicalization can be a pessimization without LSR to "clean up"
+ // afterwards.
+ // - We depend on having a preheader; in particular,
+ // Loop::getCanonicalInductionVariable only supports loops with preheaders,
+ // and we're in trouble if we can't find the induction variable even when
+ // we've manually inserted one.
+ if (!L->isLoopSimplifyForm())
+ return false;
+
IU = &getAnalysis<IVUsers>();
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
@@ -357,6 +503,12 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount))
RewriteLoopExitValues(L, Rewriter);
+ // Simplify ICmp IV users.
+ EliminateIVComparisons();
+
+ // Simplify SRem and URem IV users.
+ EliminateIVRemainders();
+
// Compute the type of the largest recurrence expression, and decide whether
// a canonical induction variable should be inserted.
const Type *LargestType = 0;
@@ -382,7 +534,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// Now that we know the largest of the induction variable expressions
// in this loop, insert a canonical induction variable of the largest size.
- Value *IndVar = 0;
+ PHINode *IndVar = 0;
if (NeedCannIV) {
// Check to see if the loop already has any canonical-looking induction
// variables. If any are present and wider than the planned canonical
@@ -445,10 +597,50 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// Clean up dead instructions.
Changed |= DeleteDeadPHIs(L->getHeader());
// Check a post-condition.
- assert(L->isLCSSAForm() && "Indvars did not leave the loop in lcssa form!");
+ assert(L->isLCSSAForm(*DT) && "Indvars did not leave the loop in lcssa form!");
return Changed;
}
+// FIXME: It is an extremely bad idea to indvar substitute anything more
+// complex than affine induction variables. Doing so will put expensive
+// polynomial evaluations inside of the loop, and the str reduction pass
+// currently can only reduce affine polynomials. For now just disable
+// indvar subst on anything more complex than an affine addrec, unless
+// it can be expanded to a trivial value.
+static bool isSafe(const SCEV *S, const Loop *L) {
+ // Loop-invariant values are safe.
+ if (S->isLoopInvariant(L)) return true;
+
+ // Affine addrecs are safe. Non-affine are not, because LSR doesn't know how
+ // to transform them into efficient code.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
+ return AR->isAffine();
+
+ // An add is safe it all its operands are safe.
+ if (const SCEVCommutativeExpr *Commutative = dyn_cast<SCEVCommutativeExpr>(S)) {
+ for (SCEVCommutativeExpr::op_iterator I = Commutative->op_begin(),
+ E = Commutative->op_end(); I != E; ++I)
+ if (!isSafe(*I, L)) return false;
+ return true;
+ }
+
+ // A cast is safe if its operand is.
+ if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
+ return isSafe(C->getOperand(), L);
+
+ // A udiv is safe if its operands are.
+ if (const SCEVUDivExpr *UD = dyn_cast<SCEVUDivExpr>(S))
+ return isSafe(UD->getLHS(), L) &&
+ isSafe(UD->getRHS(), L);
+
+ // SCEVUnknown is always safe.
+ if (isa<SCEVUnknown>(S))
+ return true;
+
+ // Nothing else is safe.
+ return false;
+}
+
void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
SmallVector<WeakVH, 16> DeadInsts;
@@ -460,7 +652,6 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
// the need for the code evaluation methods to insert induction variables
// of different sizes.
for (IVUsers::iterator UI = IU->begin(), E = IU->end(); UI != E; ++UI) {
- const SCEV *Stride = UI->getStride();
Value *Op = UI->getOperandValToReplace();
const Type *UseTy = Op->getType();
Instruction *User = UI->getUser();
@@ -481,7 +672,7 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
// currently can only reduce affine polynomials. For now just disable
// indvar subst on anything more complex than an affine addrec, unless
// it can be expanded to a trivial value.
- if (!AR->isLoopInvariant(L) && !Stride->isLoopInvariant(L))
+ if (!isSafe(AR, L))
continue;
// Determine the insertion point for this user. By default, insert
@@ -505,6 +696,13 @@ void IndVarSimplify::RewriteIVExpressions(Loop *L, SCEVExpander &Rewriter) {
// Now expand it into actual Instructions and patch it into place.
Value *NewVal = Rewriter.expandCodeFor(AR, UseTy, InsertPt);
+ // Inform ScalarEvolution that this value is changing. The change doesn't
+ // affect its value, but it does potentially affect which use lists the
+ // value will be on after the replacement, which affects ScalarEvolution's
+ // ability to walk use lists and drop dangling pointers when a value is
+ // deleted.
+ SE->forgetValue(User);
+
// Patch the new value into place.
if (Op->hasName())
NewVal->takeName(Op);
@@ -548,26 +746,34 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
// New instructions were inserted at the end of the preheader.
if (isa<PHINode>(I))
break;
+
// Don't move instructions which might have side effects, since the side
- // effects need to complete before instructions inside the loop. Also
- // don't move instructions which might read memory, since the loop may
- // modify memory. Note that it's okay if the instruction might have
- // undefined behavior: LoopSimplify guarantees that the preheader
- // dominates the exit block.
+ // effects need to complete before instructions inside the loop. Also don't
+ // move instructions which might read memory, since the loop may modify
+ // memory. Note that it's okay if the instruction might have undefined
+ // behavior: LoopSimplify guarantees that the preheader dominates the exit
+ // block.
if (I->mayHaveSideEffects() || I->mayReadFromMemory())
continue;
+
+ // Skip debug info intrinsics.
+ if (isa<DbgInfoIntrinsic>(I))
+ continue;
+
// Don't sink static AllocaInsts out of the entry block, which would
// turn them into dynamic allocas!
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (AI->isStaticAlloca())
continue;
+
// Determine if there is a use in or before the loop (direct or
// otherwise).
bool UsedInLoop = false;
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI) {
- BasicBlock *UseBB = cast<Instruction>(UI)->getParent();
- if (PHINode *P = dyn_cast<PHINode>(UI)) {
+ User *U = *UI;
+ BasicBlock *UseBB = cast<Instruction>(U)->getParent();
+ if (PHINode *P = dyn_cast<PHINode>(U)) {
unsigned i =
PHINode::getIncomingValueNumForOperand(UI.getOperandNo());
UseBB = P->getIncomingBlock(i);
@@ -577,53 +783,45 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
break;
}
}
+
// If there is, the def must remain in the preheader.
if (UsedInLoop)
continue;
+
// Otherwise, sink it to the exit block.
Instruction *ToMove = I;
bool Done = false;
- if (I != Preheader->begin())
- --I;
- else
+
+ if (I != Preheader->begin()) {
+ // Skip debug info intrinsics.
+ do {
+ --I;
+ } while (isa<DbgInfoIntrinsic>(I) && I != Preheader->begin());
+
+ if (isa<DbgInfoIntrinsic>(I) && I == Preheader->begin())
+ Done = true;
+ } else {
Done = true;
+ }
+
ToMove->moveBefore(InsertPt);
- if (Done)
- break;
+ if (Done) break;
InsertPt = ToMove;
}
}
-/// Return true if it is OK to use SIToFPInst for an induction variable
-/// with given initial and exit values.
-static bool useSIToFPInst(ConstantFP &InitV, ConstantFP &ExitV,
- uint64_t intIV, uint64_t intEV) {
-
- if (InitV.getValueAPF().isNegative() || ExitV.getValueAPF().isNegative())
- return true;
-
- // If the iteration range can be handled by SIToFPInst then use it.
- APInt Max = APInt::getSignedMaxValue(32);
- if (Max.getZExtValue() > static_cast<uint64_t>(abs64(intEV - intIV)))
- return true;
-
- return false;
-}
-
-/// convertToInt - Convert APF to an integer, if possible.
-static bool convertToInt(const APFloat &APF, uint64_t *intVal) {
-
+/// ConvertToSInt - Convert APF to an integer, if possible.
+static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) {
bool isExact = false;
if (&APF.getSemantics() == &APFloat::PPCDoubleDouble)
return false;
- if (APF.convertToInteger(intVal, 32, APF.isNegative(),
- APFloat::rmTowardZero, &isExact)
- != APFloat::opOK)
- return false;
- if (!isExact)
+ // See if we can convert this to an int64_t
+ uint64_t UIntVal;
+ if (APF.convertToInteger(&UIntVal, 64, true, APFloat::rmTowardZero,
+ &isExact) != APFloat::opOK || !isExact)
return false;
+ IntVal = UIntVal;
return true;
-
}
/// HandleFloatingPointIV - If the loop has floating induction variable
@@ -635,144 +833,200 @@ static bool convertToInt(const APFloat &APF, uint64_t *intVal) {
/// for(int i = 0; i < 10000; ++i)
/// bar((double)i);
///
-void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
-
- unsigned IncomingEdge = L->contains(PH->getIncomingBlock(0));
+void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
+ unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
unsigned BackEdge = IncomingEdge^1;
// Check incoming value.
- ConstantFP *InitValue = dyn_cast<ConstantFP>(PH->getIncomingValue(IncomingEdge));
- if (!InitValue) return;
- uint64_t newInitValue =
- Type::getInt32Ty(PH->getContext())->getPrimitiveSizeInBits();
- if (!convertToInt(InitValue->getValueAPF(), &newInitValue))
+ ConstantFP *InitValueVal =
+ dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge));
+
+ int64_t InitValue;
+ if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue))
return;
- // Check IV increment. Reject this PH if increment operation is not
+ // Check IV increment. Reject this PN if increment operation is not
// an add or increment value can not be represented by an integer.
BinaryOperator *Incr =
- dyn_cast<BinaryOperator>(PH->getIncomingValue(BackEdge));
- if (!Incr) return;
- if (Incr->getOpcode() != Instruction::FAdd) return;
- ConstantFP *IncrValue = NULL;
- unsigned IncrVIndex = 1;
- if (Incr->getOperand(1) == PH)
- IncrVIndex = 0;
- IncrValue = dyn_cast<ConstantFP>(Incr->getOperand(IncrVIndex));
- if (!IncrValue) return;
- uint64_t newIncrValue =
- Type::getInt32Ty(PH->getContext())->getPrimitiveSizeInBits();
- if (!convertToInt(IncrValue->getValueAPF(), &newIncrValue))
+ dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge));
+ if (Incr == 0 || Incr->getOpcode() != Instruction::FAdd) return;
+
+ // If this is not an add of the PHI with a constantfp, or if the constant fp
+ // is not an integer, bail out.
+ ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1));
+ int64_t IncValue;
+ if (IncValueVal == 0 || Incr->getOperand(0) != PN ||
+ !ConvertToSInt(IncValueVal->getValueAPF(), IncValue))
return;
- // Check Incr uses. One user is PH and the other users is exit condition used
- // by the conditional terminator.
+ // Check Incr uses. One user is PN and the other user is an exit condition
+ // used by the conditional terminator.
Value::use_iterator IncrUse = Incr->use_begin();
- Instruction *U1 = cast<Instruction>(IncrUse++);
+ Instruction *U1 = cast<Instruction>(*IncrUse++);
if (IncrUse == Incr->use_end()) return;
- Instruction *U2 = cast<Instruction>(IncrUse++);
+ Instruction *U2 = cast<Instruction>(*IncrUse++);
if (IncrUse != Incr->use_end()) return;
- // Find exit condition.
- FCmpInst *EC = dyn_cast<FCmpInst>(U1);
- if (!EC)
- EC = dyn_cast<FCmpInst>(U2);
- if (!EC) return;
-
- if (BranchInst *BI = dyn_cast<BranchInst>(EC->getParent()->getTerminator())) {
- if (!BI->isConditional()) return;
- if (BI->getCondition() != EC) return;
- }
-
- // Find exit value. If exit value can not be represented as an integer then
- // do not handle this floating point PH.
- ConstantFP *EV = NULL;
- unsigned EVIndex = 1;
- if (EC->getOperand(1) == Incr)
- EVIndex = 0;
- EV = dyn_cast<ConstantFP>(EC->getOperand(EVIndex));
- if (!EV) return;
- uint64_t intEV = Type::getInt32Ty(PH->getContext())->getPrimitiveSizeInBits();
- if (!convertToInt(EV->getValueAPF(), &intEV))
+ // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't
+ // only used by a branch, we can't transform it.
+ FCmpInst *Compare = dyn_cast<FCmpInst>(U1);
+ if (!Compare)
+ Compare = dyn_cast<FCmpInst>(U2);
+ if (Compare == 0 || !Compare->hasOneUse() ||
+ !isa<BranchInst>(Compare->use_back()))
return;
-
+
+ BranchInst *TheBr = cast<BranchInst>(Compare->use_back());
+
+ // We need to verify that the branch actually controls the iteration count
+ // of the loop. If not, the new IV can overflow and no one will notice.
+ // The branch block must be in the loop and one of the successors must be out
+ // of the loop.
+ assert(TheBr->isConditional() && "Can't use fcmp if not conditional");
+ if (!L->contains(TheBr->getParent()) ||
+ (L->contains(TheBr->getSuccessor(0)) &&
+ L->contains(TheBr->getSuccessor(1))))
+ return;
+
+
+ // If it isn't a comparison with an integer-as-fp (the exit value), we can't
+ // transform it.
+ ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1));
+ int64_t ExitValue;
+ if (ExitValueVal == 0 ||
+ !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue))
+ return;
+
// Find new predicate for integer comparison.
CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE;
- switch (EC->getPredicate()) {
+ switch (Compare->getPredicate()) {
+ default: return; // Unknown comparison.
case CmpInst::FCMP_OEQ:
- case CmpInst::FCMP_UEQ:
- NewPred = CmpInst::ICMP_EQ;
- break;
+ case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break;
+ case CmpInst::FCMP_ONE:
+ case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break;
case CmpInst::FCMP_OGT:
- case CmpInst::FCMP_UGT:
- NewPred = CmpInst::ICMP_UGT;
- break;
+ case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break;
case CmpInst::FCMP_OGE:
- case CmpInst::FCMP_UGE:
- NewPred = CmpInst::ICMP_UGE;
- break;
+ case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break;
case CmpInst::FCMP_OLT:
- case CmpInst::FCMP_ULT:
- NewPred = CmpInst::ICMP_ULT;
- break;
+ case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break;
case CmpInst::FCMP_OLE:
- case CmpInst::FCMP_ULE:
- NewPred = CmpInst::ICMP_ULE;
- break;
- default:
- break;
+ case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break;
+ }
+
+ // We convert the floating point induction variable to a signed i32 value if
+ // we can. This is only safe if the comparison will not overflow in a way
+ // that won't be trapped by the integer equivalent operations. Check for this
+ // now.
+ // TODO: We could use i64 if it is native and the range requires it.
+
+ // The start/stride/exit values must all fit in signed i32.
+ if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue))
+ return;
+
+ // If not actually striding (add x, 0.0), avoid touching the code.
+ if (IncValue == 0)
+ return;
+
+ // Positive and negative strides have different safety conditions.
+ if (IncValue > 0) {
+ // If we have a positive stride, we require the init to be less than the
+ // exit value and an equality or less than comparison.
+ if (InitValue >= ExitValue ||
+ NewPred == CmpInst::ICMP_SGT || NewPred == CmpInst::ICMP_SGE)
+ return;
+
+ uint32_t Range = uint32_t(ExitValue-InitValue);
+ if (NewPred == CmpInst::ICMP_SLE) {
+ // Normalize SLE -> SLT, check for infinite loop.
+ if (++Range == 0) return; // Range overflows.
+ }
+
+ unsigned Leftover = Range % uint32_t(IncValue);
+
+ // If this is an equality comparison, we require that the strided value
+ // exactly land on the exit value, otherwise the IV condition will wrap
+ // around and do things the fp IV wouldn't.
+ if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
+ Leftover != 0)
+ return;
+
+ // If the stride would wrap around the i32 before exiting, we can't
+ // transform the IV.
+ if (Leftover != 0 && int32_t(ExitValue+IncValue) < ExitValue)
+ return;
+
+ } else {
+ // If we have a negative stride, we require the init to be greater than the
+ // exit value and an equality or greater than comparison.
+ if (InitValue >= ExitValue ||
+ NewPred == CmpInst::ICMP_SLT || NewPred == CmpInst::ICMP_SLE)
+ return;
+
+ uint32_t Range = uint32_t(InitValue-ExitValue);
+ if (NewPred == CmpInst::ICMP_SGE) {
+ // Normalize SGE -> SGT, check for infinite loop.
+ if (++Range == 0) return; // Range overflows.
+ }
+
+ unsigned Leftover = Range % uint32_t(-IncValue);
+
+ // If this is an equality comparison, we require that the strided value
+ // exactly land on the exit value, otherwise the IV condition will wrap
+ // around and do things the fp IV wouldn't.
+ if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) &&
+ Leftover != 0)
+ return;
+
+ // If the stride would wrap around the i32 before exiting, we can't
+ // transform the IV.
+ if (Leftover != 0 && int32_t(ExitValue+IncValue) > ExitValue)
+ return;
}
- if (NewPred == CmpInst::BAD_ICMP_PREDICATE) return;
+
+ const IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext());
// Insert new integer induction variable.
- PHINode *NewPHI = PHINode::Create(Type::getInt32Ty(PH->getContext()),
- PH->getName()+".int", PH);
- NewPHI->addIncoming(ConstantInt::get(Type::getInt32Ty(PH->getContext()),
- newInitValue),
- PH->getIncomingBlock(IncomingEdge));
-
- Value *NewAdd = BinaryOperator::CreateAdd(NewPHI,
- ConstantInt::get(Type::getInt32Ty(PH->getContext()),
- newIncrValue),
- Incr->getName()+".int", Incr);
- NewPHI->addIncoming(NewAdd, PH->getIncomingBlock(BackEdge));
-
- // The back edge is edge 1 of newPHI, whatever it may have been in the
- // original PHI.
- ConstantInt *NewEV = ConstantInt::get(Type::getInt32Ty(PH->getContext()),
- intEV);
- Value *LHS = (EVIndex == 1 ? NewPHI->getIncomingValue(1) : NewEV);
- Value *RHS = (EVIndex == 1 ? NewEV : NewPHI->getIncomingValue(1));
- ICmpInst *NewEC = new ICmpInst(EC->getParent()->getTerminator(),
- NewPred, LHS, RHS, EC->getName());
-
- // In the following deletions, PH may become dead and may be deleted.
+ PHINode *NewPHI = PHINode::Create(Int32Ty, PN->getName()+".int", PN);
+ NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue),
+ PN->getIncomingBlock(IncomingEdge));
+
+ Value *NewAdd =
+ BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue),
+ Incr->getName()+".int", Incr);
+ NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge));
+
+ ICmpInst *NewCompare = new ICmpInst(TheBr, NewPred, NewAdd,
+ ConstantInt::get(Int32Ty, ExitValue),
+ Compare->getName());
+
+ // In the following deletions, PN may become dead and may be deleted.
// Use a WeakVH to observe whether this happens.
- WeakVH WeakPH = PH;
+ WeakVH WeakPH = PN;
- // Delete old, floating point, exit comparison instruction.
- NewEC->takeName(EC);
- EC->replaceAllUsesWith(NewEC);
- RecursivelyDeleteTriviallyDeadInstructions(EC);
+ // Delete the old floating point exit comparison. The branch starts using the
+ // new comparison.
+ NewCompare->takeName(Compare);
+ Compare->replaceAllUsesWith(NewCompare);
+ RecursivelyDeleteTriviallyDeadInstructions(Compare);
- // Delete old, floating point, increment instruction.
+ // Delete the old floating point increment.
Incr->replaceAllUsesWith(UndefValue::get(Incr->getType()));
RecursivelyDeleteTriviallyDeadInstructions(Incr);
- // Replace floating induction variable, if it isn't already deleted.
- // Give SIToFPInst preference over UIToFPInst because it is faster on
- // platforms that are widely used.
- if (WeakPH && !PH->use_empty()) {
- if (useSIToFPInst(*InitValue, *EV, newInitValue, intEV)) {
- SIToFPInst *Conv = new SIToFPInst(NewPHI, PH->getType(), "indvar.conv",
- PH->getParent()->getFirstNonPHI());
- PH->replaceAllUsesWith(Conv);
- } else {
- UIToFPInst *Conv = new UIToFPInst(NewPHI, PH->getType(), "indvar.conv",
- PH->getParent()->getFirstNonPHI());
- PH->replaceAllUsesWith(Conv);
- }
- RecursivelyDeleteTriviallyDeadInstructions(PH);
+ // If the FP induction variable still has uses, this is because something else
+ // in the loop uses its value. In order to canonicalize the induction
+ // variable, we chose to eliminate the IV and rewrite it in terms of an
+ // int->fp cast.
+ //
+ // We give preference to sitofp over uitofp because it is faster on most
+ // platforms.
+ if (WeakPH) {
+ Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv",
+ PN->getParent()->getFirstNonPHI());
+ PN->replaceAllUsesWith(Conv);
+ RecursivelyDeleteTriviallyDeadInstructions(PN);
}
// Add a new IVUsers entry for the newly-created integer PHI.
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index a6489ec..104d5ae 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -18,11 +18,13 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LazyValueInfo.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/Target/TargetData.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -44,7 +46,10 @@ Threshold("jump-threading-threshold",
// Turn on use of LazyValueInfo.
static cl::opt<bool>
-EnableLVI("enable-jump-threading-lvi", cl::ReallyHidden);
+EnableLVI("enable-jump-threading-lvi",
+ cl::desc("Use LVI for jump threading"),
+ cl::init(true),
+ cl::ReallyHidden);
@@ -73,15 +78,32 @@ namespace {
#else
SmallSet<AssertingVH<BasicBlock>, 16> LoopHeaders;
#endif
+ DenseSet<std::pair<Value*, BasicBlock*> > RecursionSet;
+
+ // RAII helper for updating the recursion stack.
+ struct RecursionSetRemover {
+ DenseSet<std::pair<Value*, BasicBlock*> > &TheSet;
+ std::pair<Value*, BasicBlock*> ThePair;
+
+ RecursionSetRemover(DenseSet<std::pair<Value*, BasicBlock*> > &S,
+ std::pair<Value*, BasicBlock*> P)
+ : TheSet(S), ThePair(P) { }
+
+ ~RecursionSetRemover() {
+ TheSet.erase(ThePair);
+ }
+ };
public:
static char ID; // Pass identification
- JumpThreading() : FunctionPass(&ID) {}
+ JumpThreading() : FunctionPass(ID) {}
bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- if (EnableLVI)
+ if (EnableLVI) {
AU.addRequired<LazyValueInfo>();
+ AU.addPreserved<LazyValueInfo>();
+ }
}
void FindLoopHeaders(Function &F);
@@ -110,8 +132,8 @@ namespace {
}
char JumpThreading::ID = 0;
-static RegisterPass<JumpThreading>
-X("jump-threading", "Jump Threading");
+INITIALIZE_PASS(JumpThreading, "jump-threading",
+ "Jump Threading", false, false);
// Public interface to the Jump Threading pass
FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); }
@@ -143,6 +165,7 @@ bool JumpThreading::runOnFunction(Function &F) {
DEBUG(dbgs() << " JT: Deleting dead block '" << BB->getName()
<< "' with terminator: " << *BB->getTerminator() << '\n');
LoopHeaders.erase(BB);
+ if (LVI) LVI->eraseBlock(BB);
DeleteDeadBlock(BB);
Changed = true;
} else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
@@ -163,6 +186,11 @@ bool JumpThreading::runOnFunction(Function &F) {
bool ErasedFromLoopHeaders = LoopHeaders.erase(BB);
BasicBlock *Succ = BI->getSuccessor(0);
+ // FIXME: It is always conservatively correct to drop the info
+ // for a block even if it doesn't get erased. This isn't totally
+ // awesome, but it allows us to use AssertingVH to prevent nasty
+ // dangling pointer issues within LazyValueInfo.
+ if (LVI) LVI->eraseBlock(BB);
if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) {
Changed = true;
// If we deleted BB and BB was the header of a loop, then the
@@ -250,6 +278,17 @@ void JumpThreading::FindLoopHeaders(Function &F) {
LoopHeaders.insert(const_cast<BasicBlock*>(Edges[i].second));
}
+// Helper method for ComputeValueKnownInPredecessors. If Value is a
+// ConstantInt, push it. If it's an undef, push 0. Otherwise, do nothing.
+static void PushConstantIntOrUndef(SmallVectorImpl<std::pair<ConstantInt*,
+ BasicBlock*> > &Result,
+ Constant *Value, BasicBlock* BB){
+ if (ConstantInt *FoldedCInt = dyn_cast<ConstantInt>(Value))
+ Result.push_back(std::make_pair(FoldedCInt, BB));
+ else if (isa<UndefValue>(Value))
+ Result.push_back(std::make_pair((ConstantInt*)0, BB));
+}
+
/// ComputeValueKnownInPredecessors - Given a basic block BB and a value V, see
/// if we can infer that the value is a known ConstantInt in any of our
/// predecessors. If so, return the known list of value and pred BB in the
@@ -259,12 +298,24 @@ void JumpThreading::FindLoopHeaders(Function &F) {
///
bool JumpThreading::
ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
+ // This method walks up use-def chains recursively. Because of this, we could
+ // get into an infinite loop going around loops in the use-def chain. To
+ // prevent this, keep track of what (value, block) pairs we've already visited
+ // and terminate the search if we loop back to them
+ if (!RecursionSet.insert(std::make_pair(V, BB)).second)
+ return false;
+
+ // An RAII help to remove this pair from the recursion set once the recursion
+ // stack pops back out again.
+ RecursionSetRemover remover(RecursionSet, std::make_pair(V, BB));
+
// If V is a constantint, then it is known in all predecessors.
if (isa<ConstantInt>(V) || isa<UndefValue>(V)) {
ConstantInt *CI = dyn_cast<ConstantInt>(V);
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
Result.push_back(std::make_pair(CI, *PI));
+
return true;
}
@@ -288,14 +339,15 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
// Perhaps getConstantOnEdge should be smart enough to do this?
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
// If the value is known by LazyValueInfo to be a constant in a
// predecessor, use that information to try to thread this block.
- Constant *PredCst = LVI->getConstantOnEdge(V, *PI, BB);
+ Constant *PredCst = LVI->getConstantOnEdge(V, P, BB);
if (PredCst == 0 ||
(!isa<ConstantInt>(PredCst) && !isa<UndefValue>(PredCst)))
continue;
- Result.push_back(std::make_pair(dyn_cast<ConstantInt>(PredCst), *PI));
+ Result.push_back(std::make_pair(dyn_cast<ConstantInt>(PredCst), P));
}
return !Result.empty();
@@ -311,8 +363,15 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
if (isa<ConstantInt>(InVal) || isa<UndefValue>(InVal)) {
ConstantInt *CI = dyn_cast<ConstantInt>(InVal);
Result.push_back(std::make_pair(CI, PN->getIncomingBlock(i)));
+ } else if (LVI) {
+ Constant *CI = LVI->getConstantOnEdge(InVal,
+ PN->getIncomingBlock(i), BB);
+ // LVI returns null is no value could be determined.
+ if (!CI) continue;
+ PushConstantIntOrUndef(Result, CI, PN->getIncomingBlock(i));
}
}
+
return !Result.empty();
}
@@ -336,18 +395,26 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
else
InterestingVal = ConstantInt::getFalse(I->getContext());
+ SmallPtrSet<BasicBlock*, 4> LHSKnownBBs;
+
// Scan for the sentinel. If we find an undef, force it to the
// interesting value: x|undef -> true and x&undef -> false.
for (unsigned i = 0, e = LHSVals.size(); i != e; ++i)
if (LHSVals[i].first == InterestingVal || LHSVals[i].first == 0) {
Result.push_back(LHSVals[i]);
Result.back().first = InterestingVal;
+ LHSKnownBBs.insert(LHSVals[i].second);
}
for (unsigned i = 0, e = RHSVals.size(); i != e; ++i)
if (RHSVals[i].first == InterestingVal || RHSVals[i].first == 0) {
- Result.push_back(RHSVals[i]);
- Result.back().first = InterestingVal;
+ // If we already inferred a value for this block on the LHS, don't
+ // re-add it.
+ if (!LHSKnownBBs.count(RHSVals[i].second)) {
+ Result.push_back(RHSVals[i]);
+ Result.back().first = InterestingVal;
+ }
}
+
return !Result.empty();
}
@@ -364,8 +431,27 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
if (Result[i].first)
Result[i].first =
cast<ConstantInt>(ConstantExpr::getNot(Result[i].first));
+
return true;
}
+
+ // Try to simplify some other binary operator values.
+ } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
+ SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> LHSVals;
+ ComputeValueKnownInPredecessors(BO->getOperand(0), BB, LHSVals);
+
+ // Try to use constant folding to simplify the binary operator.
+ for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) {
+ Constant *V = LHSVals[i].first ? LHSVals[i].first :
+ cast<Constant>(UndefValue::get(BO->getType()));
+ Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI);
+
+ PushConstantIntOrUndef(Result, Folded, LHSVals[i].second);
+ }
+ }
+
+ return !Result.empty();
}
// Handle compare with phi operand, where the PHI is defined in this block.
@@ -392,10 +478,8 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT);
}
- if (isa<UndefValue>(Res))
- Result.push_back(std::make_pair((ConstantInt*)0, PredBB));
- else if (ConstantInt *CI = dyn_cast<ConstantInt>(Res))
- Result.push_back(std::make_pair(CI, PredBB));
+ if (Constant *ConstRes = dyn_cast<Constant>(Res))
+ PushConstantIntOrUndef(Result, ConstRes, PredBB);
}
return !Result.empty();
@@ -405,27 +489,59 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
// If comparing a live-in value against a constant, see if we know the
// live-in value on any predecessors.
if (LVI && isa<Constant>(Cmp->getOperand(1)) &&
- Cmp->getType()->isIntegerTy() && // Not vector compare.
- (!isa<Instruction>(Cmp->getOperand(0)) ||
- cast<Instruction>(Cmp->getOperand(0))->getParent() != BB)) {
- Constant *RHSCst = cast<Constant>(Cmp->getOperand(1));
-
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
- // If the value is known by LazyValueInfo to be a constant in a
- // predecessor, use that information to try to thread this block.
- LazyValueInfo::Tristate
- Res = LVI->getPredicateOnEdge(Cmp->getPredicate(), Cmp->getOperand(0),
- RHSCst, *PI, BB);
- if (Res == LazyValueInfo::Unknown)
- continue;
+ Cmp->getType()->isIntegerTy()) {
+ if (!isa<Instruction>(Cmp->getOperand(0)) ||
+ cast<Instruction>(Cmp->getOperand(0))->getParent() != BB) {
+ Constant *RHSCst = cast<Constant>(Cmp->getOperand(1));
+
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB);PI != E; ++PI){
+ BasicBlock *P = *PI;
+ // If the value is known by LazyValueInfo to be a constant in a
+ // predecessor, use that information to try to thread this block.
+ LazyValueInfo::Tristate Res =
+ LVI->getPredicateOnEdge(Cmp->getPredicate(), Cmp->getOperand(0),
+ RHSCst, P, BB);
+ if (Res == LazyValueInfo::Unknown)
+ continue;
- Constant *ResC = ConstantInt::get(Cmp->getType(), Res);
- Result.push_back(std::make_pair(cast<ConstantInt>(ResC), *PI));
+ Constant *ResC = ConstantInt::get(Cmp->getType(), Res);
+ Result.push_back(std::make_pair(cast<ConstantInt>(ResC), P));
+ }
+
+ return !Result.empty();
}
- return !Result.empty();
+ // Try to find a constant value for the LHS of a comparison,
+ // and evaluate it statically if we can.
+ if (Constant *CmpConst = dyn_cast<Constant>(Cmp->getOperand(1))) {
+ SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> LHSVals;
+ ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals);
+
+ for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) {
+ Constant *V = LHSVals[i].first ? LHSVals[i].first :
+ cast<Constant>(UndefValue::get(CmpConst->getType()));
+ Constant *Folded = ConstantExpr::getCompare(Cmp->getPredicate(),
+ V, CmpConst);
+ PushConstantIntOrUndef(Result, Folded, LHSVals[i].second);
+ }
+
+ return !Result.empty();
+ }
}
}
+
+ if (LVI) {
+ // If all else fails, see if LVI can figure out a constant value for us.
+ Constant *CI = LVI->getConstant(V, BB);
+ ConstantInt *CInt = dyn_cast_or_null<ConstantInt>(CI);
+ if (CInt) {
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
+ Result.push_back(std::make_pair(CInt, *PI));
+ }
+
+ return !Result.empty();
+ }
+
return false;
}
@@ -476,6 +592,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
// Remember if SinglePred was the entry block of the function. If so, we
// will need to move BB back to the entry position.
bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
+ if (LVI) LVI->eraseBlock(SinglePred);
MergeBasicBlockIntoOnlyPred(BB);
if (isEntry && BB != &BB->getParent()->getEntryBlock())
@@ -538,18 +655,22 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
(CondInst == 0 || CondInst->getParent() != BB)) { // Non-local definition.
pred_iterator PI = pred_begin(BB), E = pred_end(BB);
if (isa<BranchInst>(BB->getTerminator())) {
- for (; PI != E; ++PI)
- if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
+ for (; PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ if (BranchInst *PBI = dyn_cast<BranchInst>(P->getTerminator()))
if (PBI->isConditional() && PBI->getCondition() == Condition &&
- ProcessBranchOnDuplicateCond(*PI, BB))
+ ProcessBranchOnDuplicateCond(P, BB))
return true;
+ }
} else {
assert(isa<SwitchInst>(BB->getTerminator()) && "Unknown jump terminator");
- for (; PI != E; ++PI)
- if (SwitchInst *PSI = dyn_cast<SwitchInst>((*PI)->getTerminator()))
+ for (; PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ if (SwitchInst *PSI = dyn_cast<SwitchInst>(P->getTerminator()))
if (PSI->getCondition() == Condition &&
- ProcessSwitchOnDuplicateCond(*PI, BB))
+ ProcessSwitchOnDuplicateCond(P, BB))
return true;
+ }
}
}
@@ -569,19 +690,59 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
// If we have a comparison, loop over the predecessors to see if there is
// a condition with a lexically identical value.
pred_iterator PI = pred_begin(BB), E = pred_end(BB);
- for (; PI != E; ++PI)
- if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
- if (PBI->isConditional() && *PI != BB) {
+ for (; PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ if (BranchInst *PBI = dyn_cast<BranchInst>(P->getTerminator()))
+ if (PBI->isConditional() && P != BB) {
if (CmpInst *CI = dyn_cast<CmpInst>(PBI->getCondition())) {
if (CI->getOperand(0) == CondCmp->getOperand(0) &&
CI->getOperand(1) == CondCmp->getOperand(1) &&
CI->getPredicate() == CondCmp->getPredicate()) {
// TODO: Could handle things like (x != 4) --> (x == 17)
- if (ProcessBranchOnDuplicateCond(*PI, BB))
+ if (ProcessBranchOnDuplicateCond(P, BB))
return true;
}
}
}
+ }
+ }
+
+ // For a comparison where the LHS is outside this block, it's possible
+ // that we've branched on it before. Used LVI to see if we can simplify
+ // the branch based on that.
+ BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
+ Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1));
+ pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
+ if (LVI && CondBr && CondConst && CondBr->isConditional() && PI != PE &&
+ (!isa<Instruction>(CondCmp->getOperand(0)) ||
+ cast<Instruction>(CondCmp->getOperand(0))->getParent() != BB)) {
+ // For predecessor edge, determine if the comparison is true or false
+ // on that edge. If they're all true or all false, we can simplify the
+ // branch.
+ // FIXME: We could handle mixed true/false by duplicating code.
+ LazyValueInfo::Tristate Baseline =
+ LVI->getPredicateOnEdge(CondCmp->getPredicate(), CondCmp->getOperand(0),
+ CondConst, *PI, BB);
+ if (Baseline != LazyValueInfo::Unknown) {
+ // Check that all remaining incoming values match the first one.
+ while (++PI != PE) {
+ LazyValueInfo::Tristate Ret = LVI->getPredicateOnEdge(
+ CondCmp->getPredicate(),
+ CondCmp->getOperand(0),
+ CondConst, *PI, BB);
+ if (Ret != Baseline) break;
+ }
+
+ // If we terminated early, then one of the values didn't match.
+ if (PI == PE) {
+ unsigned ToRemove = Baseline == LazyValueInfo::True ? 1 : 0;
+ unsigned ToKeep = Baseline == LazyValueInfo::True ? 0 : 1;
+ RemovePredecessorAndSimplify(CondBr->getSuccessor(ToRemove), BB, TD);
+ BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
+ CondBr->eraseFromParent();
+ return true;
+ }
+ }
}
}
@@ -670,8 +831,10 @@ bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB,
Value *OldCond = DestBI->getCondition();
DestBI->setCondition(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
BranchDir));
- ConstantFoldTerminator(BB);
+ // Delete dead instructions before we fold the branch. Folding the branch
+ // can eliminate edges from the CFG which can end up deleting OldCond.
RecursivelyDeleteTriviallyDeadInstructions(OldCond);
+ ConstantFoldTerminator(BB);
return true;
}
@@ -867,9 +1030,15 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// Add all the unavailable predecessors to the PredsToSplit list.
for (pred_iterator PI = pred_begin(LoadBB), PE = pred_end(LoadBB);
- PI != PE; ++PI)
- if (!AvailablePredSet.count(*PI))
- PredsToSplit.push_back(*PI);
+ PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ // If the predecessor is an indirect goto, we can't split the edge.
+ if (isa<IndirectBrInst>(P->getTerminator()))
+ return false;
+
+ if (!AvailablePredSet.count(P))
+ PredsToSplit.push_back(P);
+ }
// Split them out to their own block.
UnavailablePred =
@@ -901,11 +1070,12 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// have multiple entries here.
for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); PI != E;
++PI) {
+ BasicBlock *P = *PI;
AvailablePredsTy::iterator I =
std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(),
- std::make_pair(*PI, (Value*)0));
+ std::make_pair(P, (Value*)0));
- assert(I != AvailablePreds.end() && I->first == *PI &&
+ assert(I != AvailablePreds.end() && I->first == P &&
"Didn't find entry for predecessor!");
PN->addIncoming(I->second, I->first);
@@ -991,6 +1161,7 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB) {
SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> PredValues;
if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues))
return false;
+
assert(!PredValues.empty() &&
"ComputeValueKnownInPredecessors returned true with no values");
@@ -1285,6 +1456,9 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
<< ", across block:\n "
<< *BB << "\n");
+ if (LVI)
+ LVI->threadEdge(PredBB, BB, SuccBB);
+
// We are going to have to map operands from the original BB block to the new
// copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to
// account for entry from PredBB.
@@ -1354,7 +1528,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
// We found a use of I outside of BB. Rename all uses of I that are outside
// its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
// with the two values we know.
- SSAUpdate.Initialize(I);
+ SSAUpdate.Initialize(I->getType(), I->getName());
SSAUpdate.AddAvailableValue(BB, I);
SSAUpdate.AddAvailableValue(NewBB, ValueMapping[I]);
@@ -1509,7 +1683,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
// We found a use of I outside of BB. Rename all uses of I that are outside
// its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
// with the two values we know.
- SSAUpdate.Initialize(I);
+ SSAUpdate.Initialize(I->getType(), I->getName());
SSAUpdate.AddAvailableValue(BB, I);
SSAUpdate.AddAvailableValue(PredBB, ValueMapping[I]);
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/LICM.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/LICM.cpp
index d7ace34..2ef8544 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -26,8 +26,7 @@
// pointer. There are no calls in the loop which mod/ref the pointer.
// If these conditions are true, we can promote the loads and stores in the
// loop of the pointer to use a temporary alloca'd variable. We then use
-// the mem2reg functionality to construct the appropriate SSA form for the
-// variable.
+// the SSAUpdater to construct the appropriate SSA form for the value.
//
//===----------------------------------------------------------------------===//
@@ -37,14 +36,15 @@
#include "llvm/DerivedTypes.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Instructions.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/ScalarEvolution.h"
-#include "llvm/Transforms/Utils/PromoteMemToReg.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
@@ -66,7 +66,7 @@ DisablePromotion("disable-licm-promotion", cl::Hidden,
namespace {
struct LICM : public LoopPass {
static char ID; // Pass identification, replacement for typeid
- LICM() : LoopPass(&ID) {}
+ LICM() : LoopPass(ID) {}
virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -75,39 +75,31 @@ namespace {
///
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
- AU.addRequiredID(LoopSimplifyID);
- AU.addRequired<LoopInfo>();
AU.addRequired<DominatorTree>();
- AU.addRequired<DominanceFrontier>(); // For scalar promotion (mem2reg)
+ AU.addRequired<LoopInfo>();
+ AU.addRequiredID(LoopSimplifyID);
AU.addRequired<AliasAnalysis>();
+ AU.addPreserved<AliasAnalysis>();
AU.addPreserved<ScalarEvolution>();
- AU.addPreserved<DominanceFrontier>();
AU.addPreservedID(LoopSimplifyID);
}
bool doFinalization() {
- // Free the values stored in the map
- for (std::map<Loop *, AliasSetTracker *>::iterator
- I = LoopToAliasMap.begin(), E = LoopToAliasMap.end(); I != E; ++I)
- delete I->second;
-
- LoopToAliasMap.clear();
+ assert(LoopToAliasSetMap.empty() && "Didn't free loop alias sets");
return false;
}
private:
- // Various analyses that we use...
AliasAnalysis *AA; // Current AliasAnalysis information
LoopInfo *LI; // Current LoopInfo
- DominatorTree *DT; // Dominator Tree for the current Loop...
- DominanceFrontier *DF; // Current Dominance Frontier
+ DominatorTree *DT; // Dominator Tree for the current Loop.
- // State that is updated as we process loops
+ // State that is updated as we process loops.
bool Changed; // Set to true when we change anything.
BasicBlock *Preheader; // The preheader block of the current loop...
Loop *CurLoop; // The current loop we are working on...
AliasSetTracker *CurAST; // AliasSet information for the current loop...
- std::map<Loop *, AliasSetTracker *> LoopToAliasMap;
+ DenseMap<Loop*, AliasSetTracker*> LoopToAliasSetMap;
/// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L);
@@ -204,25 +196,12 @@ namespace {
bool isLoopInvariantInst(Instruction &I);
bool isNotUsedInLoop(Instruction &I);
- /// PromoteValuesInLoop - Look at the stores in the loop and promote as many
- /// to scalars as we can.
- ///
- void PromoteValuesInLoop();
-
- /// FindPromotableValuesInLoop - Check the current loop for stores to
- /// definite pointers, which are not loaded and stored through may aliases.
- /// If these are found, create an alloca for the value, add it to the
- /// PromotedValues list, and keep track of the mapping from value to
- /// alloca...
- ///
- void FindPromotableValuesInLoop(
- std::vector<std::pair<AllocaInst*, Value*> > &PromotedValues,
- std::map<Value*, AllocaInst*> &Val2AlMap);
+ void PromoteAliasSet(AliasSet &AS);
};
}
char LICM::ID = 0;
-static RegisterPass<LICM> X("licm", "Loop Invariant Code Motion");
+INITIALIZE_PASS(LICM, "licm", "Loop Invariant Code Motion", false, false);
Pass *llvm::createLICMPass() { return new LICM(); }
@@ -236,19 +215,23 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
// Get our Loop and Alias Analysis information...
LI = &getAnalysis<LoopInfo>();
AA = &getAnalysis<AliasAnalysis>();
- DF = &getAnalysis<DominanceFrontier>();
DT = &getAnalysis<DominatorTree>();
CurAST = new AliasSetTracker(*AA);
- // Collect Alias info from subloops
+ // Collect Alias info from subloops.
for (Loop::iterator LoopItr = L->begin(), LoopItrE = L->end();
LoopItr != LoopItrE; ++LoopItr) {
Loop *InnerL = *LoopItr;
- AliasSetTracker *InnerAST = LoopToAliasMap[InnerL];
- assert (InnerAST && "Where is my AST?");
+ AliasSetTracker *InnerAST = LoopToAliasSetMap[InnerL];
+ assert(InnerAST && "Where is my AST?");
// What if InnerLoop was modified by other passes ?
CurAST->add(*InnerAST);
+
+ // Once we've incorporated the inner loop's AST into ours, we don't need the
+ // subloop's anymore.
+ delete InnerAST;
+ LoopToAliasSetMap.erase(InnerL);
}
CurLoop = L;
@@ -263,7 +246,7 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BasicBlock *BB = *I;
- if (LI->getLoopFor(BB) == L) // Ignore blocks in subloops...
+ if (LI->getLoopFor(BB) == L) // Ignore blocks in subloops.
CurAST->add(*BB); // Incorporate the specified basic block
}
@@ -283,15 +266,24 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
HoistRegion(DT->getNode(L->getHeader()));
// Now that all loop invariants have been removed from the loop, promote any
- // memory references to scalars that we can...
- if (!DisablePromotion && Preheader && L->hasDedicatedExits())
- PromoteValuesInLoop();
-
+ // memory references to scalars that we can.
+ if (!DisablePromotion && Preheader && L->hasDedicatedExits()) {
+ // Loop over all of the alias sets in the tracker object.
+ for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
+ I != E; ++I)
+ PromoteAliasSet(*I);
+ }
+
// Clear out loops state information for the next iteration
CurLoop = 0;
Preheader = 0;
- LoopToAliasMap[L] = CurAST;
+ // If this loop is nested inside of another one, save the alias information
+ // for when we process the outer loop.
+ if (L->getParentLoop())
+ LoopToAliasSetMap[L] = CurAST;
+ else
+ delete CurAST;
return Changed;
}
@@ -308,7 +300,7 @@ void LICM::SinkRegion(DomTreeNode *N) {
// If this subregion is not in the top level loop at all, exit.
if (!CurLoop->contains(BB)) return;
- // We are processing blocks in reverse dfo, so process children first...
+ // We are processing blocks in reverse dfo, so process children first.
const std::vector<DomTreeNode*> &Children = N->getChildren();
for (unsigned i = 0, e = Children.size(); i != e; ++i)
SinkRegion(Children[i]);
@@ -319,6 +311,17 @@ void LICM::SinkRegion(DomTreeNode *N) {
for (BasicBlock::iterator II = BB->end(); II != BB->begin(); ) {
Instruction &I = *--II;
+
+ // If the instruction is dead, we would try to sink it because it isn't used
+ // in the loop, instead, just delete it.
+ if (isInstructionTriviallyDead(&I)) {
+ DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
+ ++II;
+ CurAST->deleteValue(&I);
+ I.eraseFromParent();
+ Changed = true;
+ continue;
+ }
// Check to see if we can sink this instruction to the exit blocks
// of the loop. We can do this if the all users of the instruction are
@@ -350,6 +353,18 @@ void LICM::HoistRegion(DomTreeNode *N) {
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ) {
Instruction &I = *II++;
+ // Try constant folding this instruction. If all the operands are
+ // constants, it is technically hoistable, but it would be better to just
+ // fold it.
+ if (Constant *C = ConstantFoldInstruction(&I)) {
+ DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n');
+ CurAST->copyValue(&I, C);
+ CurAST->deleteValue(&I);
+ I.replaceAllUsesWith(C);
+ I.eraseFromParent();
+ continue;
+ }
+
// Try hoisting the instruction out to the preheader. We can only do this
// if all of the operands of the instruction are loop invariant and if it
// is safe to hoist the instruction.
@@ -357,7 +372,7 @@ void LICM::HoistRegion(DomTreeNode *N) {
if (isLoopInvariantInst(I) && canSinkOrHoistInst(I) &&
isSafeToExecuteUnconditionally(I))
hoist(I);
- }
+ }
const std::vector<DomTreeNode*> &Children = N->getChildren();
for (unsigned i = 0, e = Children.size(); i != e; ++i)
@@ -457,10 +472,10 @@ bool LICM::isLoopInvariantInst(Instruction &I) {
/// position, and may either delete it or move it to outside of the loop.
///
void LICM::sink(Instruction &I) {
- DEBUG(dbgs() << "LICM sinking instruction: " << I);
+ DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
SmallVector<BasicBlock*, 8> ExitBlocks;
- CurLoop->getExitBlocks(ExitBlocks);
+ CurLoop->getUniqueExitBlocks(ExitBlocks);
if (isa<LoadInst>(I)) ++NumMovedLoads;
else if (isa<CallInst>(I)) ++NumMovedCalls;
@@ -477,122 +492,101 @@ void LICM::sink(Instruction &I) {
// If I has users in unreachable blocks, eliminate.
// If I is not void type then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
- if (!I.getType()->isVoidTy())
+ if (!I.use_empty())
I.replaceAllUsesWith(UndefValue::get(I.getType()));
I.eraseFromParent();
} else {
// Move the instruction to the start of the exit block, after any PHI
// nodes in it.
- I.removeFromParent();
- BasicBlock::iterator InsertPt = ExitBlocks[0]->getFirstNonPHI();
- ExitBlocks[0]->getInstList().insert(InsertPt, &I);
+ I.moveBefore(ExitBlocks[0]->getFirstNonPHI());
+
+ // This instruction is no longer in the AST for the current loop, because
+ // we just sunk it out of the loop. If we just sunk it into an outer
+ // loop, we will rediscover the operation when we process it.
+ CurAST->deleteValue(&I);
}
- } else if (ExitBlocks.empty()) {
+ return;
+ }
+
+ if (ExitBlocks.empty()) {
// The instruction is actually dead if there ARE NO exit blocks.
CurAST->deleteValue(&I);
// If I has users in unreachable blocks, eliminate.
// If I is not void type then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
- if (!I.getType()->isVoidTy())
+ if (!I.use_empty())
I.replaceAllUsesWith(UndefValue::get(I.getType()));
I.eraseFromParent();
- } else {
- // Otherwise, if we have multiple exits, use the PromoteMem2Reg function to
- // do all of the hard work of inserting PHI nodes as necessary. We convert
- // the value into a stack object to get it to do this.
-
- // Firstly, we create a stack object to hold the value...
- AllocaInst *AI = 0;
-
- if (!I.getType()->isVoidTy()) {
- AI = new AllocaInst(I.getType(), 0, I.getName(),
- I.getParent()->getParent()->getEntryBlock().begin());
- CurAST->add(AI);
- }
-
- // Secondly, insert load instructions for each use of the instruction
- // outside of the loop.
- while (!I.use_empty()) {
- Instruction *U = cast<Instruction>(I.use_back());
-
- // If the user is a PHI Node, we actually have to insert load instructions
- // in all predecessor blocks, not in the PHI block itself!
- if (PHINode *UPN = dyn_cast<PHINode>(U)) {
- // Only insert into each predecessor once, so that we don't have
- // different incoming values from the same block!
- std::map<BasicBlock*, Value*> InsertedBlocks;
- for (unsigned i = 0, e = UPN->getNumIncomingValues(); i != e; ++i)
- if (UPN->getIncomingValue(i) == &I) {
- BasicBlock *Pred = UPN->getIncomingBlock(i);
- Value *&PredVal = InsertedBlocks[Pred];
- if (!PredVal) {
- // Insert a new load instruction right before the terminator in
- // the predecessor block.
- PredVal = new LoadInst(AI, "", Pred->getTerminator());
- CurAST->add(cast<LoadInst>(PredVal));
- }
-
- UPN->setIncomingValue(i, PredVal);
- }
-
- } else {
- LoadInst *L = new LoadInst(AI, "", U);
- U->replaceUsesOfWith(&I, L);
- CurAST->add(L);
- }
- }
-
- // Thirdly, insert a copy of the instruction in each exit block of the loop
- // that is dominated by the instruction, storing the result into the memory
- // location. Be careful not to insert the instruction into any particular
- // basic block more than once.
- std::set<BasicBlock*> InsertedBlocks;
- BasicBlock *InstOrigBB = I.getParent();
-
- for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
- BasicBlock *ExitBlock = ExitBlocks[i];
-
- if (isExitBlockDominatedByBlockInLoop(ExitBlock, InstOrigBB)) {
- // If we haven't already processed this exit block, do so now.
- if (InsertedBlocks.insert(ExitBlock).second) {
- // Insert the code after the last PHI node...
- BasicBlock::iterator InsertPt = ExitBlock->getFirstNonPHI();
-
- // If this is the first exit block processed, just move the original
- // instruction, otherwise clone the original instruction and insert
- // the copy.
- Instruction *New;
- if (InsertedBlocks.size() == 1) {
- I.removeFromParent();
- ExitBlock->getInstList().insert(InsertPt, &I);
- New = &I;
- } else {
- New = I.clone();
- CurAST->copyValue(&I, New);
- if (!I.getName().empty())
- New->setName(I.getName()+".le");
- ExitBlock->getInstList().insert(InsertPt, New);
- }
-
- // Now that we have inserted the instruction, store it into the alloca
- if (AI) new StoreInst(New, AI, InsertPt);
- }
- }
- }
-
- // If the instruction doesn't dominate any exit blocks, it must be dead.
- if (InsertedBlocks.empty()) {
- CurAST->deleteValue(&I);
- I.eraseFromParent();
- }
-
- // Finally, promote the fine value to SSA form.
- if (AI) {
- std::vector<AllocaInst*> Allocas;
- Allocas.push_back(AI);
- PromoteMemToReg(Allocas, *DT, *DF, CurAST);
+ return;
+ }
+
+ // Otherwise, if we have multiple exits, use the SSAUpdater to do all of the
+ // hard work of inserting PHI nodes as necessary.
+ SmallVector<PHINode*, 8> NewPHIs;
+ SSAUpdater SSA(&NewPHIs);
+
+ if (!I.use_empty())
+ SSA.Initialize(I.getType(), I.getName());
+
+ // Insert a copy of the instruction in each exit block of the loop that is
+ // dominated by the instruction. Each exit block is known to only be in the
+ // ExitBlocks list once.
+ BasicBlock *InstOrigBB = I.getParent();
+ unsigned NumInserted = 0;
+
+ for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
+ BasicBlock *ExitBlock = ExitBlocks[i];
+
+ if (!isExitBlockDominatedByBlockInLoop(ExitBlock, InstOrigBB))
+ continue;
+
+ // Insert the code after the last PHI node.
+ BasicBlock::iterator InsertPt = ExitBlock->getFirstNonPHI();
+
+ // If this is the first exit block processed, just move the original
+ // instruction, otherwise clone the original instruction and insert
+ // the copy.
+ Instruction *New;
+ if (NumInserted++ == 0) {
+ I.moveBefore(InsertPt);
+ New = &I;
+ } else {
+ New = I.clone();
+ if (!I.getName().empty())
+ New->setName(I.getName()+".le");
+ ExitBlock->getInstList().insert(InsertPt, New);
}
+
+ // Now that we have inserted the instruction, inform SSAUpdater.
+ if (!I.use_empty())
+ SSA.AddAvailableValue(ExitBlock, New);
+ }
+
+ // If the instruction doesn't dominate any exit blocks, it must be dead.
+ if (NumInserted == 0) {
+ CurAST->deleteValue(&I);
+ if (!I.use_empty())
+ I.replaceAllUsesWith(UndefValue::get(I.getType()));
+ I.eraseFromParent();
+ return;
}
+
+ // Next, rewrite uses of the instruction, inserting PHI nodes as needed.
+ for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; ) {
+ // Grab the use before incrementing the iterator.
+ Use &U = UI.getUse();
+ // Increment the iterator before removing the use from the list.
+ ++UI;
+ SSA.RewriteUseAfterInsertions(U);
+ }
+
+ // Update CurAST for NewPHIs if I had pointer type.
+ if (I.getType()->isPointerTy())
+ for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
+ CurAST->copyValue(&I, NewPHIs[i]);
+
+ // Finally, remove the instruction from CurAST. It is no longer in the loop.
+ CurAST->deleteValue(&I);
}
/// hoist - When an instruction is found to only use loop invariant operands
@@ -602,12 +596,8 @@ void LICM::hoist(Instruction &I) {
DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": "
<< I << "\n");
- // Remove the instruction from its current basic block... but don't delete the
- // instruction.
- I.removeFromParent();
-
- // Insert the new node in Preheader, before the terminator.
- Preheader->getInstList().insert(Preheader->getTerminator(), &I);
+ // Move the new node to the Preheader, before its terminator.
+ I.moveBefore(Preheader->getTerminator());
if (isa<LoadInst>(I)) ++NumMovedLoads;
else if (isa<CallInst>(I)) ++NumMovedCalls;
@@ -647,221 +637,269 @@ bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
return true;
}
-
-/// PromoteValuesInLoop - Try to promote memory values to scalars by sinking
+/// PromoteAliasSet - Try to promote memory values to scalars by sinking
/// stores out of the loop and moving loads to before the loop. We do this by
/// looping over the stores in the loop, looking for stores to Must pointers
-/// which are loop invariant. We promote these memory locations to use allocas
-/// instead. These allocas can easily be raised to register values by the
-/// PromoteMem2Reg functionality.
+/// which are loop invariant.
///
-void LICM::PromoteValuesInLoop() {
- // PromotedValues - List of values that are promoted out of the loop. Each
- // value has an alloca instruction for it, and a canonical version of the
- // pointer.
- std::vector<std::pair<AllocaInst*, Value*> > PromotedValues;
- std::map<Value*, AllocaInst*> ValueToAllocaMap; // Map of ptr to alloca
-
- FindPromotableValuesInLoop(PromotedValues, ValueToAllocaMap);
- if (ValueToAllocaMap.empty()) return; // If there are values to promote.
-
- Changed = true;
- NumPromoted += PromotedValues.size();
-
- std::vector<Value*> PointerValueNumbers;
-
- // Emit a copy from the value into the alloca'd value in the loop preheader
- TerminatorInst *LoopPredInst = Preheader->getTerminator();
- for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i) {
- Value *Ptr = PromotedValues[i].second;
-
- // If we are promoting a pointer value, update alias information for the
- // inserted load.
- Value *LoadValue = 0;
- if (cast<PointerType>(Ptr->getType())->getElementType()->isPointerTy()) {
- // Locate a load or store through the pointer, and assign the same value
- // to LI as we are loading or storing. Since we know that the value is
- // stored in this loop, this will always succeed.
- for (Value::use_iterator UI = Ptr->use_begin(), E = Ptr->use_end();
- UI != E; ++UI)
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
- LoadValue = LI;
- break;
- } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
- if (SI->getOperand(1) == Ptr) {
- LoadValue = SI->getOperand(0);
- break;
- }
- }
- assert(LoadValue && "No store through the pointer found!");
- PointerValueNumbers.push_back(LoadValue); // Remember this for later.
- }
-
- // Load from the memory we are promoting.
- LoadInst *LI = new LoadInst(Ptr, Ptr->getName()+".promoted", LoopPredInst);
-
- if (LoadValue) CurAST->copyValue(LoadValue, LI);
-
- // Store into the temporary alloca.
- new StoreInst(LI, PromotedValues[i].first, LoopPredInst);
- }
+void LICM::PromoteAliasSet(AliasSet &AS) {
+ // We can promote this alias set if it has a store, if it is a "Must" alias
+ // set, if the pointer is loop invariant, and if we are not eliminating any
+ // volatile loads or stores.
+ if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
+ AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue()))
+ return;
+
+ assert(!AS.empty() &&
+ "Must alias set should have at least one pointer element in it!");
+ Value *SomePtr = AS.begin()->getValue();
- // Scan the basic blocks in the loop, replacing uses of our pointers with
- // uses of the allocas in question.
+ // It isn't safe to promote a load/store from the loop if the load/store is
+ // conditional. For example, turning:
//
- for (Loop::block_iterator I = CurLoop->block_begin(),
- E = CurLoop->block_end(); I != E; ++I) {
- BasicBlock *BB = *I;
- // Rewrite all loads and stores in the block of the pointer...
- for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
- if (LoadInst *L = dyn_cast<LoadInst>(II)) {
- std::map<Value*, AllocaInst*>::iterator
- I = ValueToAllocaMap.find(L->getOperand(0));
- if (I != ValueToAllocaMap.end())
- L->setOperand(0, I->second); // Rewrite load instruction...
- } else if (StoreInst *S = dyn_cast<StoreInst>(II)) {
- std::map<Value*, AllocaInst*>::iterator
- I = ValueToAllocaMap.find(S->getOperand(1));
- if (I != ValueToAllocaMap.end())
- S->setOperand(1, I->second); // Rewrite store instruction...
- }
- }
- }
-
- // Now that the body of the loop uses the allocas instead of the original
- // memory locations, insert code to copy the alloca value back into the
- // original memory location on all exits from the loop. Note that we only
- // want to insert one copy of the code in each exit block, though the loop may
- // exit to the same block more than once.
+ // for () { if (c) *P += 1; }
//
- SmallPtrSet<BasicBlock*, 16> ProcessedBlocks;
-
- SmallVector<BasicBlock*, 8> ExitBlocks;
- CurLoop->getExitBlocks(ExitBlocks);
- for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
- if (!ProcessedBlocks.insert(ExitBlocks[i]))
- continue;
-
- // Copy all of the allocas into their memory locations.
- BasicBlock::iterator BI = ExitBlocks[i]->getFirstNonPHI();
- Instruction *InsertPos = BI;
- unsigned PVN = 0;
- for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i) {
- // Load from the alloca.
- LoadInst *LI = new LoadInst(PromotedValues[i].first, "", InsertPos);
-
- // If this is a pointer type, update alias info appropriately.
- if (LI->getType()->isPointerTy())
- CurAST->copyValue(PointerValueNumbers[PVN++], LI);
-
- // Store into the memory we promoted.
- new StoreInst(LI, PromotedValues[i].second, InsertPos);
- }
- }
-
- // Now that we have done the deed, use the mem2reg functionality to promote
- // all of the new allocas we just created into real SSA registers.
+ // into:
//
- std::vector<AllocaInst*> PromotedAllocas;
- PromotedAllocas.reserve(PromotedValues.size());
- for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i)
- PromotedAllocas.push_back(PromotedValues[i].first);
- PromoteMemToReg(PromotedAllocas, *DT, *DF, CurAST);
-}
-
-/// FindPromotableValuesInLoop - Check the current loop for stores to definite
-/// pointers, which are not loaded and stored through may aliases and are safe
-/// for promotion. If these are found, create an alloca for the value, add it
-/// to the PromotedValues list, and keep track of the mapping from value to
-/// alloca.
-void LICM::FindPromotableValuesInLoop(
- std::vector<std::pair<AllocaInst*, Value*> > &PromotedValues,
- std::map<Value*, AllocaInst*> &ValueToAllocaMap) {
- Instruction *FnStart = CurLoop->getHeader()->getParent()->begin()->begin();
-
- // Loop over all of the alias sets in the tracker object.
- for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
- I != E; ++I) {
- AliasSet &AS = *I;
- // We can promote this alias set if it has a store, if it is a "Must" alias
- // set, if the pointer is loop invariant, and if we are not eliminating any
- // volatile loads or stores.
- if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
- AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue()))
- continue;
+ // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
+ //
+ // is not safe, because *P may only be valid to access if 'c' is true.
+ //
+ // It is safe to promote P if all uses are direct load/stores and if at
+ // least one is guaranteed to be executed.
+ bool GuaranteedToExecute = false;
+
+ SmallVector<Instruction*, 64> LoopUses;
+ SmallPtrSet<Value*, 4> PointerMustAliases;
+
+ // Check that all of the pointers in the alias set have the same type. We
+ // cannot (yet) promote a memory location that is loaded and stored in
+ // different sizes.
+ for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
+ Value *ASIV = ASI->getValue();
+ PointerMustAliases.insert(ASIV);
- assert(!AS.empty() &&
- "Must alias set should have at least one pointer element in it!");
- Value *V = AS.begin()->getValue();
-
// Check that all of the pointers in the alias set have the same type. We
// cannot (yet) promote a memory location that is loaded and stored in
// different sizes.
- {
- bool PointerOk = true;
- for (AliasSet::iterator I = AS.begin(), E = AS.end(); I != E; ++I)
- if (V->getType() != I->getValue()->getType()) {
- PointerOk = false;
- break;
- }
- if (!PointerOk)
- continue;
- }
-
- // It isn't safe to promote a load/store from the loop if the load/store is
- // conditional. For example, turning:
- //
- // for () { if (c) *P += 1; }
- //
- // into:
- //
- // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
- //
- // is not safe, because *P may only be valid to access if 'c' is true.
- //
- // It is safe to promote P if all uses are direct load/stores and if at
- // least one is guaranteed to be executed.
- bool GuaranteedToExecute = false;
- bool InvalidInst = false;
- for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
+ if (SomePtr->getType() != ASIV->getType())
+ return;
+
+ for (Value::use_iterator UI = ASIV->use_begin(), UE = ASIV->use_end();
UI != UE; ++UI) {
- // Ignore instructions not in this loop.
+ // Ignore instructions that are outside the loop.
Instruction *Use = dyn_cast<Instruction>(*UI);
if (!Use || !CurLoop->contains(Use))
continue;
-
- if (!isa<LoadInst>(Use) && !isa<StoreInst>(Use)) {
- InvalidInst = true;
- break;
- }
+
+ // If there is an non-load/store instruction in the loop, we can't promote
+ // it.
+ if (isa<LoadInst>(Use))
+ assert(!cast<LoadInst>(Use)->isVolatile() && "AST broken");
+ else if (isa<StoreInst>(Use)) {
+ assert(!cast<StoreInst>(Use)->isVolatile() && "AST broken");
+ if (Use->getOperand(0) == ASIV) return;
+ } else
+ return; // Not a load or store.
if (!GuaranteedToExecute)
GuaranteedToExecute = isSafeToExecuteUnconditionally(*Use);
+
+ LoopUses.push_back(Use);
}
+ }
+
+ // If there isn't a guaranteed-to-execute instruction, we can't promote.
+ if (!GuaranteedToExecute)
+ return;
+
+ // Otherwise, this is safe to promote, lets do it!
+ DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n');
+ Changed = true;
+ ++NumPromoted;
- // If there is an non-load/store instruction in the loop, we can't promote
- // it. If there isn't a guaranteed-to-execute instruction, we can't
- // promote.
- if (InvalidInst || !GuaranteedToExecute)
+ // We use the SSAUpdater interface to insert phi nodes as required.
+ SmallVector<PHINode*, 16> NewPHIs;
+ SSAUpdater SSA(&NewPHIs);
+
+ // It wants to know some value of the same type as what we'll be inserting.
+ Value *SomeValue;
+ if (isa<LoadInst>(LoopUses[0]))
+ SomeValue = LoopUses[0];
+ else
+ SomeValue = cast<StoreInst>(LoopUses[0])->getOperand(0);
+ SSA.Initialize(SomeValue->getType(), SomeValue->getName());
+
+ // First step: bucket up uses of the pointers by the block they occur in.
+ // This is important because we have to handle multiple defs/uses in a block
+ // ourselves: SSAUpdater is purely for cross-block references.
+ // FIXME: Want a TinyVector<Instruction*> since there is usually 0/1 element.
+ DenseMap<BasicBlock*, std::vector<Instruction*> > UsesByBlock;
+ for (unsigned i = 0, e = LoopUses.size(); i != e; ++i) {
+ Instruction *User = LoopUses[i];
+ UsesByBlock[User->getParent()].push_back(User);
+ }
+
+ // Okay, now we can iterate over all the blocks in the loop with uses,
+ // processing them. Keep track of which loads are loading a live-in value.
+ SmallVector<LoadInst*, 32> LiveInLoads;
+ DenseMap<Value*, Value*> ReplacedLoads;
+
+ for (unsigned LoopUse = 0, e = LoopUses.size(); LoopUse != e; ++LoopUse) {
+ Instruction *User = LoopUses[LoopUse];
+ std::vector<Instruction*> &BlockUses = UsesByBlock[User->getParent()];
+
+ // If this block has already been processed, ignore this repeat use.
+ if (BlockUses.empty()) continue;
+
+ // Okay, this is the first use in the block. If this block just has a
+ // single user in it, we can rewrite it trivially.
+ if (BlockUses.size() == 1) {
+ // If it is a store, it is a trivial def of the value in the block.
+ if (isa<StoreInst>(User)) {
+ SSA.AddAvailableValue(User->getParent(),
+ cast<StoreInst>(User)->getOperand(0));
+ } else {
+ // Otherwise it is a load, queue it to rewrite as a live-in load.
+ LiveInLoads.push_back(cast<LoadInst>(User));
+ }
+ BlockUses.clear();
continue;
+ }
- const Type *Ty = cast<PointerType>(V->getType())->getElementType();
- AllocaInst *AI = new AllocaInst(Ty, 0, V->getName()+".tmp", FnStart);
- PromotedValues.push_back(std::make_pair(AI, V));
+ // Otherwise, check to see if this block is all loads. If so, we can queue
+ // them all as live in loads.
+ bool HasStore = false;
+ for (unsigned i = 0, e = BlockUses.size(); i != e; ++i) {
+ if (isa<StoreInst>(BlockUses[i])) {
+ HasStore = true;
+ break;
+ }
+ }
+
+ if (!HasStore) {
+ for (unsigned i = 0, e = BlockUses.size(); i != e; ++i)
+ LiveInLoads.push_back(cast<LoadInst>(BlockUses[i]));
+ BlockUses.clear();
+ continue;
+ }
- // Update the AST and alias analysis.
- CurAST->copyValue(V, AI);
+ // Otherwise, we have mixed loads and stores (or just a bunch of stores).
+ // Since SSAUpdater is purely for cross-block values, we need to determine
+ // the order of these instructions in the block. If the first use in the
+ // block is a load, then it uses the live in value. The last store defines
+ // the live out value. We handle this by doing a linear scan of the block.
+ BasicBlock *BB = User->getParent();
+ Value *StoredValue = 0;
+ for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
+ if (LoadInst *L = dyn_cast<LoadInst>(II)) {
+ // If this is a load from an unrelated pointer, ignore it.
+ if (!PointerMustAliases.count(L->getOperand(0))) continue;
+
+ // If we haven't seen a store yet, this is a live in use, otherwise
+ // use the stored value.
+ if (StoredValue) {
+ L->replaceAllUsesWith(StoredValue);
+ ReplacedLoads[L] = StoredValue;
+ } else {
+ LiveInLoads.push_back(L);
+ }
+ continue;
+ }
+
+ if (StoreInst *S = dyn_cast<StoreInst>(II)) {
+ // If this is a store to an unrelated pointer, ignore it.
+ if (!PointerMustAliases.count(S->getOperand(1))) continue;
- for (AliasSet::iterator I = AS.begin(), E = AS.end(); I != E; ++I)
- ValueToAllocaMap.insert(std::make_pair(I->getValue(), AI));
+ // Remember that this is the active value in the block.
+ StoredValue = S->getOperand(0);
+ }
+ }
+
+ // The last stored value that happened is the live-out for the block.
+ assert(StoredValue && "Already checked that there is a store in block");
+ SSA.AddAvailableValue(BB, StoredValue);
+ BlockUses.clear();
+ }
+
+ // Now that all the intra-loop values are classified, set up the preheader.
+ // It gets a load of the pointer we're promoting, and it is the live-out value
+ // from the preheader.
+ LoadInst *PreheaderLoad = new LoadInst(SomePtr,SomePtr->getName()+".promoted",
+ Preheader->getTerminator());
+ SSA.AddAvailableValue(Preheader, PreheaderLoad);
+
+ // Now that the preheader is good to go, set up the exit blocks. Each exit
+ // block gets a store of the live-out values that feed them. Since we've
+ // already told the SSA updater about the defs in the loop and the preheader
+ // definition, it is all set and we can start using it.
+ SmallVector<BasicBlock*, 8> ExitBlocks;
+ CurLoop->getUniqueExitBlocks(ExitBlocks);
+ for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
+ BasicBlock *ExitBlock = ExitBlocks[i];
+ Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
+ Instruction *InsertPos = ExitBlock->getFirstNonPHI();
+ new StoreInst(LiveInValue, SomePtr, InsertPos);
+ }
- DEBUG(dbgs() << "LICM: Promoting value: " << *V << "\n");
+ // Okay, now we rewrite all loads that use live-in values in the loop,
+ // inserting PHI nodes as necessary.
+ for (unsigned i = 0, e = LiveInLoads.size(); i != e; ++i) {
+ LoadInst *ALoad = LiveInLoads[i];
+ Value *NewVal = SSA.GetValueInMiddleOfBlock(ALoad->getParent());
+ ALoad->replaceAllUsesWith(NewVal);
+ CurAST->copyValue(ALoad, NewVal);
+ ReplacedLoads[ALoad] = NewVal;
+ }
+
+ // If the preheader load is itself a pointer, we need to tell alias analysis
+ // about the new pointer we created in the preheader block and about any PHI
+ // nodes that just got inserted.
+ if (PreheaderLoad->getType()->isPointerTy()) {
+ // Copy any value stored to or loaded from a must-alias of the pointer.
+ CurAST->copyValue(SomeValue, PreheaderLoad);
+
+ for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
+ CurAST->copyValue(SomeValue, NewPHIs[i]);
}
+
+ // Now that everything is rewritten, delete the old instructions from the body
+ // of the loop. They should all be dead now.
+ for (unsigned i = 0, e = LoopUses.size(); i != e; ++i) {
+ Instruction *User = LoopUses[i];
+
+ // If this is a load that still has uses, then the load must have been added
+ // as a live value in the SSAUpdate data structure for a block (e.g. because
+ // the loaded value was stored later). In this case, we need to recursively
+ // propagate the updates until we get to the real value.
+ if (!User->use_empty()) {
+ Value *NewVal = ReplacedLoads[User];
+ assert(NewVal && "not a replaced load?");
+
+ // Propagate down to the ultimate replacee. The intermediately loads
+ // could theoretically already have been deleted, so we don't want to
+ // dereference the Value*'s.
+ DenseMap<Value*, Value*>::iterator RLI = ReplacedLoads.find(NewVal);
+ while (RLI != ReplacedLoads.end()) {
+ NewVal = RLI->second;
+ RLI = ReplacedLoads.find(NewVal);
+ }
+
+ User->replaceAllUsesWith(NewVal);
+ CurAST->copyValue(User, NewVal);
+ }
+
+ CurAST->deleteValue(User);
+ User->eraseFromParent();
+ }
+
+ // fwew, we're done!
}
+
/// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
void LICM::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L) {
- AliasSetTracker *AST = LoopToAliasMap[L];
+ AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
if (!AST)
return;
@@ -871,7 +909,7 @@ void LICM::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L) {
/// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias
/// set.
void LICM::deleteAnalysisValue(Value *V, Loop *L) {
- AliasSetTracker *AST = LoopToAliasMap[L];
+ AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
if (!AST)
return;
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index 48817ab..543dfc1 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -28,7 +28,7 @@ namespace {
class LoopDeletion : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
- LoopDeletion() : LoopPass(&ID) {}
+ LoopDeletion() : LoopPass(ID) {}
// Possibly eliminate loop L if it is dead.
bool runOnLoop(Loop* L, LPPassManager& LPM);
@@ -38,9 +38,9 @@ namespace {
bool &Changed, BasicBlock *Preheader);
virtual void getAnalysisUsage(AnalysisUsage& AU) const {
- AU.addRequired<ScalarEvolution>();
AU.addRequired<DominatorTree>();
AU.addRequired<LoopInfo>();
+ AU.addRequired<ScalarEvolution>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
@@ -55,7 +55,8 @@ namespace {
}
char LoopDeletion::ID = 0;
-static RegisterPass<LoopDeletion> X("loop-deletion", "Delete dead loops");
+INITIALIZE_PASS(LoopDeletion, "loop-deletion",
+ "Delete dead loops", false, false);
Pass* llvm::createLoopDeletionPass() {
return new LoopDeletion();
@@ -83,7 +84,7 @@ bool LoopDeletion::IsLoopDead(Loop* L,
if (!L->makeLoopInvariant(I, Changed, Preheader->getTerminator()))
return false;
- BI++;
+ ++BI;
}
// Make sure that no instructions in the block have potential side-effects.
@@ -176,7 +177,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
BasicBlock::iterator BI = exitBlock->begin();
while (PHINode* P = dyn_cast<PHINode>(BI)) {
P->replaceUsesOfWith(exitingBlock, preheader);
- BI++;
+ ++BI;
}
// Update the dominator tree and remove the instructions and blocks that will
@@ -226,7 +227,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
LPM.deleteLoopFromQueue(L);
Changed = true;
- NumDeleted++;
+ ++NumDeleted;
return Changed;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp
index 16d3f2f..a433674 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp
@@ -74,7 +74,7 @@ namespace {
class LoopIndexSplit : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
- LoopIndexSplit() : LoopPass(&ID) {}
+ LoopIndexSplit() : LoopPass(ID) {}
// Index split Loop L. Return true if loop is split.
bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -197,8 +197,8 @@ namespace {
}
char LoopIndexSplit::ID = 0;
-static RegisterPass<LoopIndexSplit>
-X("loop-index-split", "Index Split Loops");
+INITIALIZE_PASS(LoopIndexSplit, "loop-index-split",
+ "Index Split Loops", false, false);
Pass *llvm::createLoopIndexSplitPass() {
return new LoopIndexSplit();
@@ -649,7 +649,7 @@ bool LoopIndexSplit::updateLoopIterationSpace() {
}
}
}
- NumRestrictBounds++;
+ ++NumRestrictBounds;
return true;
}
@@ -677,7 +677,7 @@ void LoopIndexSplit::removeBlocks(BasicBlock *DeadBB, Loop *LP,
for(pred_iterator PI = pred_begin(FrontierBB), PE = pred_end(FrontierBB);
PI != PE; ++PI) {
BasicBlock *P = *PI;
- if (P == DeadBB || DT->dominates(DeadBB, P))
+ if (DT->dominates(DeadBB, P))
PredBlocks.push_back(P);
}
@@ -799,7 +799,7 @@ void LoopIndexSplit::moveExitCondition(BasicBlock *CondBB, BasicBlock *ActiveBB,
// the dominance frontiers.
for (Loop::block_iterator I = LP->block_begin(), E = LP->block_end();
I != E; ++I) {
- if (*I == CondBB || !DT->dominates(CondBB, *I)) continue;
+ if (!DT->properlyDominates(CondBB, *I)) continue;
DominanceFrontier::iterator BBDF = DF->find(*I);
DominanceFrontier::DomSetType::iterator DomSetI = BBDF->second.begin();
DominanceFrontier::DomSetType::iterator DomSetE = BBDF->second.end();
@@ -948,6 +948,25 @@ bool LoopIndexSplit::splitLoop() {
if (!IVBasedValues.count(SplitCondition->getOperand(!SVOpNum)))
return false;
+ // Check for side effects.
+ for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
+ I != E; ++I) {
+ BasicBlock *BB = *I;
+
+ assert(DT->dominates(Header, BB));
+ if (DT->properlyDominates(SplitCondition->getParent(), BB))
+ continue;
+
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
+ BI != BE; ++BI) {
+ Instruction *Inst = BI;
+
+ if (!Inst->isSafeToSpeculativelyExecute() && !isa<PHINode>(Inst)
+ && !isa<BranchInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst))
+ return false;
+ }
+ }
+
// Normalize loop conditions so that it is easier to calculate new loop
// bounds.
if (IVisGT(*ExitCondition) || IVisGE(*ExitCondition)) {
@@ -997,13 +1016,13 @@ bool LoopIndexSplit::splitLoop() {
BSV = getMax(BSV, IVStartValue, Sign, PHTerm);
// [*] Clone Loop
- DenseMap<const Value *, Value *> ValueMap;
- Loop *BLoop = CloneLoop(L, LPM, LI, ValueMap, this);
+ ValueMap<const Value *, Value *> VMap;
+ Loop *BLoop = CloneLoop(L, LPM, LI, VMap, this);
Loop *ALoop = L;
// [*] ALoop's exiting edge enters BLoop's header.
// ALoop's original exit block becomes BLoop's exit block.
- PHINode *B_IndVar = cast<PHINode>(ValueMap[IndVar]);
+ PHINode *B_IndVar = cast<PHINode>(VMap[IndVar]);
BasicBlock *A_ExitingBlock = ExitCondition->getParent();
BranchInst *A_ExitInsn =
dyn_cast<BranchInst>(A_ExitingBlock->getTerminator());
@@ -1028,7 +1047,7 @@ bool LoopIndexSplit::splitLoop() {
for (BasicBlock::iterator BI = ALoop->getHeader()->begin(),
BE = ALoop->getHeader()->end(); BI != BE; ++BI) {
if (PHINode *PN = dyn_cast<PHINode>(BI)) {
- PHINode *PNClone = cast<PHINode>(ValueMap[PN]);
+ PHINode *PNClone = cast<PHINode>(VMap[PN]);
InverseMap[PNClone] = PN;
} else
break;
@@ -1066,11 +1085,11 @@ bool LoopIndexSplit::splitLoop() {
// block. Remove incoming PHINode values from ALoop's exiting block.
// Add new incoming values from BLoop's incoming exiting value.
// Update BLoop exit block's dominator info..
- BasicBlock *B_ExitingBlock = cast<BasicBlock>(ValueMap[A_ExitingBlock]);
+ BasicBlock *B_ExitingBlock = cast<BasicBlock>(VMap[A_ExitingBlock]);
for (BasicBlock::iterator BI = B_ExitBlock->begin(), BE = B_ExitBlock->end();
BI != BE; ++BI) {
if (PHINode *PN = dyn_cast<PHINode>(BI)) {
- PN->addIncoming(ValueMap[PN->getIncomingValueForBlock(A_ExitingBlock)],
+ PN->addIncoming(VMap[PN->getIncomingValueForBlock(A_ExitingBlock)],
B_ExitingBlock);
PN->removeIncomingValue(A_ExitingBlock);
} else
@@ -1112,7 +1131,7 @@ bool LoopIndexSplit::splitLoop() {
removeBlocks(A_InactiveBranch, L, A_ActiveBranch);
//[*] Eliminate split condition's inactive branch in from BLoop.
- BasicBlock *B_SplitCondBlock = cast<BasicBlock>(ValueMap[A_SplitCondBlock]);
+ BasicBlock *B_SplitCondBlock = cast<BasicBlock>(VMap[A_SplitCondBlock]);
BranchInst *B_BR = cast<BranchInst>(B_SplitCondBlock->getTerminator());
BasicBlock *B_InactiveBranch = NULL;
BasicBlock *B_ActiveBranch = NULL;
@@ -1127,9 +1146,9 @@ bool LoopIndexSplit::splitLoop() {
//[*] Move exit condition into split condition block to avoid
// executing dead loop iteration.
- ICmpInst *B_ExitCondition = cast<ICmpInst>(ValueMap[ExitCondition]);
- Instruction *B_IndVarIncrement = cast<Instruction>(ValueMap[IVIncrement]);
- ICmpInst *B_SplitCondition = cast<ICmpInst>(ValueMap[SplitCondition]);
+ ICmpInst *B_ExitCondition = cast<ICmpInst>(VMap[ExitCondition]);
+ Instruction *B_IndVarIncrement = cast<Instruction>(VMap[IVIncrement]);
+ ICmpInst *B_SplitCondition = cast<ICmpInst>(VMap[SplitCondition]);
moveExitCondition(A_SplitCondBlock, A_ActiveBranch, A_ExitBlock, ExitCondition,
cast<ICmpInst>(SplitCondition), IndVar, IVIncrement,
@@ -1140,7 +1159,7 @@ bool LoopIndexSplit::splitLoop() {
B_SplitCondition, B_IndVar, B_IndVarIncrement,
BLoop, EVOpNum);
- NumIndexSplit++;
+ ++NumIndexSplit;
return true;
}
@@ -1164,7 +1183,7 @@ bool LoopIndexSplit::cleanBlock(BasicBlock *BB) {
bool usedOutsideBB = false;
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI) {
- Instruction *U = cast<Instruction>(UI);
+ Instruction *U = cast<Instruction>(*UI);
if (U->getParent() != BB)
usedOutsideBB = true;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 5004483..65acc1d 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -35,7 +35,7 @@ namespace {
class LoopRotate : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
- LoopRotate() : LoopPass(&ID) {}
+ LoopRotate() : LoopPass(ID) {}
// Rotate Loop L as many times as possible. Return true if
// loop is rotated at least once.
@@ -43,15 +43,15 @@ namespace {
// LCSSA form makes instruction renaming easier.
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addPreserved<DominatorTree>();
+ AU.addPreserved<DominanceFrontier>();
+ AU.addRequired<LoopInfo>();
+ AU.addPreserved<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
AU.addPreservedID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
AU.addPreserved<ScalarEvolution>();
- AU.addRequired<LoopInfo>();
- AU.addPreserved<LoopInfo>();
- AU.addPreserved<DominatorTree>();
- AU.addPreserved<DominanceFrontier>();
}
// Helper functions
@@ -79,7 +79,7 @@ namespace {
}
char LoopRotate::ID = 0;
-static RegisterPass<LoopRotate> X("loop-rotate", "Rotate Loops");
+INITIALIZE_PASS(LoopRotate, "loop-rotate", "Rotate Loops", false, false);
Pass *llvm::createLoopRotatePass() { return new LoopRotate(); }
@@ -147,7 +147,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
continue; // PHI nodes don't count.
if (isa<DbgInfoIntrinsic>(OI))
continue; // Debug intrinsics don't count as size.
- Size++;
+ ++Size;
}
if (Size > MAX_HEADER_SIZE)
@@ -221,7 +221,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
// The value now exits in two versions: the initial value in the preheader
// and the loop "next" value in the original header.
- SSA.Initialize(OrigHeaderVal);
+ SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName());
SSA.AddAvailableValue(OrigHeader, OrigHeaderVal);
SSA.AddAvailableValue(OrigPreHeader, OrigPreHeaderVal);
@@ -261,9 +261,29 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
// NewHeader is now the header of the loop.
L->moveToHeader(NewHeader);
+ // Move the original header to the bottom of the loop, where it now more
+ // naturally belongs. This isn't necessary for correctness, and CodeGen can
+ // usually reorder blocks on its own to fix things like this up, but it's
+ // still nice to keep the IR readable.
+ //
+ // The original header should have only one predecessor at this point, since
+ // we checked that the loop had a proper preheader and unique backedge before
+ // we started.
+ assert(OrigHeader->getSinglePredecessor() &&
+ "Original loop header has too many predecessors after loop rotation!");
+ OrigHeader->moveAfter(OrigHeader->getSinglePredecessor());
+
+ // Also, since this original header only has one predecessor, zap its
+ // PHI nodes, which are now trivial.
+ FoldSingleEntryPHINodes(OrigHeader);
+
+ // TODO: We could just go ahead and merge OrigHeader into its predecessor
+ // at this point, if we don't mind updating dominator info.
+
+ // Establish a new preheader, update dominators, etc.
preserveCanonicalLoopForm(LPM);
- NumRotated++;
+ ++NumRotated;
return true;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index d226f06..e8dc5d3 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -107,11 +107,13 @@ namespace {
class RegUseTracker {
typedef DenseMap<const SCEV *, RegSortData> RegUsesTy;
- RegUsesTy RegUses;
+ RegUsesTy RegUsesMap;
SmallVector<const SCEV *, 16> RegSequence;
public:
void CountRegister(const SCEV *Reg, size_t LUIdx);
+ void DropRegister(const SCEV *Reg, size_t LUIdx);
+ void DropUse(size_t LUIdx);
bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const;
@@ -132,7 +134,7 @@ public:
void
RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) {
std::pair<RegUsesTy::iterator, bool> Pair =
- RegUses.insert(std::make_pair(Reg, RegSortData()));
+ RegUsesMap.insert(std::make_pair(Reg, RegSortData()));
RegSortData &RSD = Pair.first->second;
if (Pair.second)
RegSequence.push_back(Reg);
@@ -140,11 +142,29 @@ RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) {
RSD.UsedByIndices.set(LUIdx);
}
+void
+RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) {
+ RegUsesTy::iterator It = RegUsesMap.find(Reg);
+ assert(It != RegUsesMap.end());
+ RegSortData &RSD = It->second;
+ assert(RSD.UsedByIndices.size() > LUIdx);
+ RSD.UsedByIndices.reset(LUIdx);
+}
+
+void
+RegUseTracker::DropUse(size_t LUIdx) {
+ // Remove the use index from every register's use list.
+ for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end();
+ I != E; ++I)
+ I->second.UsedByIndices.reset(LUIdx);
+}
+
bool
RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const {
- if (!RegUses.count(Reg)) return false;
- const SmallBitVector &UsedByIndices =
- RegUses.find(Reg)->second.UsedByIndices;
+ RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
+ if (I == RegUsesMap.end())
+ return false;
+ const SmallBitVector &UsedByIndices = I->second.UsedByIndices;
int i = UsedByIndices.find_first();
if (i == -1) return false;
if ((size_t)i != LUIdx) return true;
@@ -152,13 +172,13 @@ RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const {
}
const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const {
- RegUsesTy::const_iterator I = RegUses.find(Reg);
- assert(I != RegUses.end() && "Unknown register!");
+ RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
+ assert(I != RegUsesMap.end() && "Unknown register!");
return I->second.UsedByIndices;
}
void RegUseTracker::clear() {
- RegUses.clear();
+ RegUsesMap.clear();
RegSequence.clear();
}
@@ -188,6 +208,8 @@ struct Formula {
unsigned getNumRegs() const;
const Type *getType() const;
+ void DeleteBaseReg(const SCEV *&S);
+
bool referencesReg(const SCEV *S) const;
bool hasRegsUsedByUsesOtherThan(size_t LUIdx,
const RegUseTracker &RegUses) const;
@@ -221,7 +243,7 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
if (!AR->getStart()->isZero()) {
DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT);
- DoInitialMatch(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()),
+ DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
AR->getStepRecurrence(SE),
AR->getLoop()),
L, Good, Bad, SE, DT);
@@ -262,11 +284,15 @@ void Formula::InitialMatch(const SCEV *S, Loop *L,
SmallVector<const SCEV *, 4> Bad;
DoInitialMatch(S, L, Good, Bad, SE, DT);
if (!Good.empty()) {
- BaseRegs.push_back(SE.getAddExpr(Good));
+ const SCEV *Sum = SE.getAddExpr(Good);
+ if (!Sum->isZero())
+ BaseRegs.push_back(Sum);
AM.HasBaseReg = true;
}
if (!Bad.empty()) {
- BaseRegs.push_back(SE.getAddExpr(Bad));
+ const SCEV *Sum = SE.getAddExpr(Bad);
+ if (!Sum->isZero())
+ BaseRegs.push_back(Sum);
AM.HasBaseReg = true;
}
}
@@ -287,6 +313,13 @@ const Type *Formula::getType() const {
0;
}
+/// DeleteBaseReg - Delete the given base reg from the BaseRegs list.
+void Formula::DeleteBaseReg(const SCEV *&S) {
+ if (&S != &BaseRegs.back())
+ std::swap(S, BaseRegs.back());
+ BaseRegs.pop_back();
+}
+
/// referencesReg - Test if this formula references the given register.
bool Formula::referencesReg(const SCEV *S) const {
return S == ScaledReg ||
@@ -322,6 +355,13 @@ void Formula::print(raw_ostream &OS) const {
if (!First) OS << " + "; else First = false;
OS << "reg(" << **I << ')';
}
+ if (AM.HasBaseReg && BaseRegs.empty()) {
+ if (!First) OS << " + "; else First = false;
+ OS << "**error: HasBaseReg**";
+ } else if (!AM.HasBaseReg && !BaseRegs.empty()) {
+ if (!First) OS << " + "; else First = false;
+ OS << "**error: !HasBaseReg**";
+ }
if (AM.Scale != 0) {
if (!First) OS << " + "; else First = false;
OS << AM.Scale << "*reg(";
@@ -341,8 +381,7 @@ void Formula::dump() const {
/// without changing its value.
static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
const Type *WideTy =
- IntegerType::get(SE.getContext(),
- SE.getTypeSizeInBits(AR->getType()) + 1);
+ IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1);
return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
}
@@ -350,18 +389,17 @@ static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
/// without changing its value.
static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
const Type *WideTy =
- IntegerType::get(SE.getContext(),
- SE.getTypeSizeInBits(A->getType()) + 1);
+ IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
}
-/// isMulSExtable - Return true if the given add can be sign-extended
+/// isMulSExtable - Return true if the given mul can be sign-extended
/// without changing its value.
-static bool isMulSExtable(const SCEVMulExpr *A, ScalarEvolution &SE) {
+static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
const Type *WideTy =
IntegerType::get(SE.getContext(),
- SE.getTypeSizeInBits(A->getType()) + 1);
- return isa<SCEVMulExpr>(SE.getSignExtendExpr(A, WideTy));
+ SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
+ return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
}
/// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined
@@ -375,35 +413,44 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
bool IgnoreSignificantBits = false) {
// Handle the trivial case, which works for any SCEV type.
if (LHS == RHS)
- return SE.getIntegerSCEV(1, LHS->getType());
+ return SE.getConstant(LHS->getType(), 1);
- // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do some
- // folding.
- if (RHS->isAllOnesValue())
- return SE.getMulExpr(LHS, RHS);
+ // Handle a few RHS special cases.
+ const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
+ if (RC) {
+ const APInt &RA = RC->getValue()->getValue();
+ // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
+ // some folding.
+ if (RA.isAllOnesValue())
+ return SE.getMulExpr(LHS, RC);
+ // Handle x /s 1 as x.
+ if (RA == 1)
+ return LHS;
+ }
// Check for a division of a constant by a constant.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
- const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
if (!RC)
return 0;
- if (C->getValue()->getValue().srem(RC->getValue()->getValue()) != 0)
+ const APInt &LA = C->getValue()->getValue();
+ const APInt &RA = RC->getValue()->getValue();
+ if (LA.srem(RA) != 0)
return 0;
- return SE.getConstant(C->getValue()->getValue()
- .sdiv(RC->getValue()->getValue()));
+ return SE.getConstant(LA.sdiv(RA));
}
// Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) {
- const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
- IgnoreSignificantBits);
- if (!Start) return 0;
const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
IgnoreSignificantBits);
if (!Step) return 0;
+ const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
+ IgnoreSignificantBits);
+ if (!Start) return 0;
return SE.getAddRecExpr(Start, Step, AR->getLoop());
}
+ return 0;
}
// Distribute the sdiv over add operands, if the add doesn't overflow.
@@ -419,26 +466,29 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
}
return SE.getAddExpr(Ops);
}
+ return 0;
}
// Check for a multiply operand that we can pull RHS out of.
- if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS))
+ if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) {
if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) {
SmallVector<const SCEV *, 4> Ops;
bool Found = false;
for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end();
I != E; ++I) {
+ const SCEV *S = *I;
if (!Found)
- if (const SCEV *Q = getExactSDiv(*I, RHS, SE,
+ if (const SCEV *Q = getExactSDiv(S, RHS, SE,
IgnoreSignificantBits)) {
- Ops.push_back(Q);
+ S = Q;
Found = true;
- continue;
}
- Ops.push_back(*I);
+ Ops.push_back(S);
}
return Found ? SE.getMulExpr(Ops) : 0;
}
+ return 0;
+ }
// Otherwise we don't know.
return 0;
@@ -450,18 +500,20 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
if (C->getValue()->getValue().getMinSignedBits() <= 64) {
- S = SE.getIntegerSCEV(0, C->getType());
+ S = SE.getConstant(C->getType(), 0);
return C->getValue()->getSExtValue();
}
} else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
int64_t Result = ExtractImmediate(NewOps.front(), SE);
- S = SE.getAddExpr(NewOps);
+ if (Result != 0)
+ S = SE.getAddExpr(NewOps);
return Result;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
int64_t Result = ExtractImmediate(NewOps.front(), SE);
- S = SE.getAddRecExpr(NewOps, AR->getLoop());
+ if (Result != 0)
+ S = SE.getAddRecExpr(NewOps, AR->getLoop());
return Result;
}
return 0;
@@ -473,18 +525,20 @@ static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
- S = SE.getIntegerSCEV(0, GV->getType());
+ S = SE.getConstant(GV->getType(), 0);
return GV;
}
} else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
- S = SE.getAddExpr(NewOps);
+ if (Result)
+ S = SE.getAddExpr(NewOps);
return Result;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
- S = SE.getAddRecExpr(NewOps, AR->getLoop());
+ if (Result)
+ S = SE.getAddRecExpr(NewOps, AR->getLoop());
return Result;
}
return 0;
@@ -510,7 +564,7 @@ static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
case Intrinsic::x86_sse2_storel_dq:
- if (II->getOperand(1) == OperandVal)
+ if (II->getArgOperand(0) == OperandVal)
isAddress = true;
break;
}
@@ -532,7 +586,7 @@ static const Type *getAccessType(const Instruction *Inst) {
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
case Intrinsic::x86_sse2_storel_dq:
- AccessTy = II->getOperand(1)->getType();
+ AccessTy = II->getArgOperand(0)->getType();
break;
}
}
@@ -781,10 +835,10 @@ struct LSRFixup {
/// will be replaced.
Value *OperandValToReplace;
- /// PostIncLoop - If this user is to use the post-incremented value of an
+ /// PostIncLoops - If this user is to use the post-incremented value of an
/// induction variable, this variable is non-null and holds the loop
/// associated with the induction variable.
- const Loop *PostIncLoop;
+ PostIncLoopSet PostIncLoops;
/// LUIdx - The index of the LSRUse describing the expression which
/// this fixup needs, minus an offset (below).
@@ -795,6 +849,8 @@ struct LSRFixup {
/// offsets, for example in an unrolled loop.
int64_t Offset;
+ bool isUseFullyOutsideLoop(const Loop *L) const;
+
LSRFixup();
void print(raw_ostream &OS) const;
@@ -804,8 +860,22 @@ struct LSRFixup {
}
LSRFixup::LSRFixup()
- : UserInst(0), OperandValToReplace(0), PostIncLoop(0),
- LUIdx(~size_t(0)), Offset(0) {}
+ : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {}
+
+/// isUseFullyOutsideLoop - Test whether this fixup always uses its
+/// value outside of the given loop.
+bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const {
+ // PHI nodes use their value in their incoming blocks.
+ if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) {
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ if (PN->getIncomingValue(i) == OperandValToReplace &&
+ L->contains(PN->getIncomingBlock(i)))
+ return false;
+ return true;
+ }
+
+ return !L->contains(UserInst);
+}
void LSRFixup::print(raw_ostream &OS) const {
OS << "UserInst=";
@@ -821,9 +891,10 @@ void LSRFixup::print(raw_ostream &OS) const {
OS << ", OperandValToReplace=";
WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false);
- if (PostIncLoop) {
+ for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(),
+ E = PostIncLoops.end(); I != E; ++I) {
OS << ", PostIncLoop=";
- WriteAsOperand(OS, PostIncLoop->getHeader(), /*PrintType=*/false);
+ WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false);
}
if (LUIdx != ~size_t(0))
@@ -899,6 +970,12 @@ public:
/// may be used.
bool AllFixupsOutsideLoop;
+ /// WidestFixupType - This records the widest use type for any fixup using
+ /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different
+ /// max fixup widths to be equivalent, because the narrower one may be relying
+ /// on the implicit truncation to truncate away bogus bits.
+ const Type *WidestFixupType;
+
/// Formulae - A list of ways to build a value that can satisfy this user.
/// After the list is populated, one of these is selected heuristically and
/// used to formulate a replacement for OperandValToReplace in UserInst.
@@ -910,16 +987,30 @@ public:
LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T),
MinOffset(INT64_MAX),
MaxOffset(INT64_MIN),
- AllFixupsOutsideLoop(true) {}
+ AllFixupsOutsideLoop(true),
+ WidestFixupType(0) {}
+ bool HasFormulaWithSameRegs(const Formula &F) const;
bool InsertFormula(const Formula &F);
-
- void check() const;
+ void DeleteFormula(Formula &F);
+ void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses);
void print(raw_ostream &OS) const;
void dump() const;
};
+}
+
+/// HasFormula - Test whether this use as a formula which has the same
+/// registers as the given formula.
+bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
+ SmallVector<const SCEV *, 2> Key = F.BaseRegs;
+ if (F.ScaledReg) Key.push_back(F.ScaledReg);
+ // Unstable sort by host order ok, because this is only used for uniquifying.
+ std::sort(Key.begin(), Key.end());
+ return Uniquifier.count(Key);
+}
+
/// InsertFormula - If the given formula has not yet been inserted, add it to
/// the list, and return true. Return false otherwise.
bool LSRUse::InsertFormula(const Formula &F) {
@@ -950,6 +1041,33 @@ bool LSRUse::InsertFormula(const Formula &F) {
return true;
}
+/// DeleteFormula - Remove the given formula from this use's list.
+void LSRUse::DeleteFormula(Formula &F) {
+ if (&F != &Formulae.back())
+ std::swap(F, Formulae.back());
+ Formulae.pop_back();
+ assert(!Formulae.empty() && "LSRUse has no formulae left!");
+}
+
+/// RecomputeRegs - Recompute the Regs field, and update RegUses.
+void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) {
+ // Now that we've filtered out some formulae, recompute the Regs set.
+ SmallPtrSet<const SCEV *, 4> OldRegs = Regs;
+ Regs.clear();
+ for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(),
+ E = Formulae.end(); I != E; ++I) {
+ const Formula &F = *I;
+ if (F.ScaledReg) Regs.insert(F.ScaledReg);
+ Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
+ }
+
+ // Update the RegTracker.
+ for (SmallPtrSet<const SCEV *, 4>::iterator I = OldRegs.begin(),
+ E = OldRegs.end(); I != E; ++I)
+ if (!Regs.count(*I))
+ RegUses.DropRegister(*I, LUIdx);
+}
+
void LSRUse::print(raw_ostream &OS) const {
OS << "LSR Use: Kind=";
switch (Kind) {
@@ -968,13 +1086,16 @@ void LSRUse::print(raw_ostream &OS) const {
for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
E = Offsets.end(); I != E; ++I) {
OS << *I;
- if (next(I) != E)
+ if (llvm::next(I) != E)
OS << ',';
}
OS << '}';
if (AllFixupsOutsideLoop)
OS << ", all-fixups-outside-loop";
+
+ if (WidestFixupType)
+ OS << ", widest fixup type: " << *WidestFixupType;
}
void LSRUse::dump() const {
@@ -1069,6 +1190,13 @@ static bool isAlwaysFoldable(int64_t BaseOffs,
AM.HasBaseReg = HasBaseReg;
AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
+ // Canonicalize a scale of 1 to a base register if the formula doesn't
+ // already have a base register.
+ if (!AM.HasBaseReg && AM.Scale == 1) {
+ AM.Scale = 0;
+ AM.HasBaseReg = true;
+ }
+
return isLegalUse(AM, Kind, AccessTy, TLI);
}
@@ -1103,6 +1231,32 @@ static bool isAlwaysFoldable(const SCEV *S,
return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI);
}
+namespace {
+
+/// UseMapDenseMapInfo - A DenseMapInfo implementation for holding
+/// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind.
+struct UseMapDenseMapInfo {
+ static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() {
+ return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic);
+ }
+
+ static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() {
+ return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic);
+ }
+
+ static unsigned
+ getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) {
+ unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first);
+ Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second));
+ return Result;
+ }
+
+ static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS,
+ const std::pair<const SCEV *, LSRUse::KindType> &RHS) {
+ return LHS == RHS;
+ }
+};
+
/// FormulaSorter - This class implements an ordering for formulae which sorts
/// the by their standalone cost.
class FormulaSorter {
@@ -1135,6 +1289,7 @@ class LSRInstance {
IVUsers &IU;
ScalarEvolution &SE;
DominatorTree &DT;
+ LoopInfo &LI;
const TargetLowering *const TLI;
Loop *const L;
bool Changed;
@@ -1163,7 +1318,7 @@ class LSRInstance {
void OptimizeShadowIV();
bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse);
ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse);
- bool OptimizeLoopTermCond();
+ void OptimizeLoopTermCond();
void CollectInterestingTypesAndFactors();
void CollectFixupsAndInitialFormulae();
@@ -1174,16 +1329,22 @@ class LSRInstance {
}
// Support for sharing of LSRUses between LSRFixups.
- typedef DenseMap<const SCEV *, size_t> UseMapTy;
+ typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>,
+ size_t,
+ UseMapDenseMapInfo> UseMapTy;
UseMapTy UseMap;
- bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset,
+ bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
LSRUse::KindType Kind, const Type *AccessTy);
std::pair<size_t, int64_t> getUse(const SCEV *&Expr,
LSRUse::KindType Kind,
const Type *AccessTy);
+ void DeleteUse(LSRUse &LU);
+
+ LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU);
+
public:
void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
@@ -1204,6 +1365,12 @@ public:
void GenerateAllReuseFormulae();
void FilterOutUndesirableDedicatedRegisters();
+
+ size_t EstimateSearchSpaceComplexity() const;
+ void NarrowSearchSpaceByDetectingSupersets();
+ void NarrowSearchSpaceByCollapsingUnrolledCode();
+ void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
+ void NarrowSearchSpaceByPickingWinnerRegs();
void NarrowSearchSpaceUsingHeuristics();
void SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
@@ -1214,6 +1381,13 @@ public:
DenseSet<const SCEV *> &VisitedRegs) const;
void Solve(SmallVectorImpl<const Formula *> &Solution) const;
+ BasicBlock::iterator
+ HoistInsertPosition(BasicBlock::iterator IP,
+ const SmallVectorImpl<Instruction *> &Inputs) const;
+ BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP,
+ const LSRFixup &LF,
+ const LSRUse &LU) const;
+
Value *Expand(const LSRFixup &LF,
const Formula &F,
BasicBlock::iterator IP,
@@ -1345,6 +1519,7 @@ void LSRInstance::OptimizeShadowIV() {
/* Remove cast operation */
ShadowUse->replaceAllUsesWith(NewPH);
ShadowUse->eraseFromParent();
+ Changed = true;
break;
}
}
@@ -1352,8 +1527,7 @@ void LSRInstance::OptimizeShadowIV() {
/// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
/// set the IV user and stride information and return true, otherwise return
/// false.
-bool LSRInstance::FindIVUserForCond(ICmpInst *Cond,
- IVStrideUse *&CondUse) {
+bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) {
for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
if (UI->getUser() == Cond) {
// NOTE: we could handle setcc instructions with multiple uses here, but
@@ -1427,16 +1601,30 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
return Cond;
- const SCEV *One = SE.getIntegerSCEV(1, BackedgeTakenCount->getType());
+ const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1);
// Add one to the backedge-taken count to get the trip count.
- const SCEV *IterationCount = SE.getAddExpr(BackedgeTakenCount, One);
-
- // Check for a max calculation that matches the pattern.
- if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount))
+ const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount);
+ if (IterationCount != SE.getSCEV(Sel)) return Cond;
+
+ // Check for a max calculation that matches the pattern. There's no check
+ // for ICMP_ULE here because the comparison would be with zero, which
+ // isn't interesting.
+ CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
+ const SCEVNAryExpr *Max = 0;
+ if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) {
+ Pred = ICmpInst::ICMP_SLE;
+ Max = S;
+ } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) {
+ Pred = ICmpInst::ICMP_SLT;
+ Max = S;
+ } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) {
+ Pred = ICmpInst::ICMP_ULT;
+ Max = U;
+ } else {
+ // No match; bail.
return Cond;
- const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount);
- if (Max != SE.getSCEV(Sel)) return Cond;
+ }
// To handle a max with more than two operands, this optimization would
// require additional checking and setup.
@@ -1445,7 +1633,13 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
const SCEV *MaxLHS = Max->getOperand(0);
const SCEV *MaxRHS = Max->getOperand(1);
- if (!MaxLHS || MaxLHS != One) return Cond;
+
+ // ScalarEvolution canonicalizes constants to the left. For < and >, look
+ // for a comparison with 1. For <= and >=, a comparison with zero.
+ if (!MaxLHS ||
+ (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One)))
+ return Cond;
+
// Check the relevant induction variable for conformance to
// the pattern.
const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
@@ -1461,16 +1655,32 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
// Check the right operand of the select, and remember it, as it will
// be used in the new comparison instruction.
Value *NewRHS = 0;
- if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS)
+ if (ICmpInst::isTrueWhenEqual(Pred)) {
+ // Look for n+1, and grab n.
+ if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1)))
+ if (isa<ConstantInt>(BO->getOperand(1)) &&
+ cast<ConstantInt>(BO->getOperand(1))->isOne() &&
+ SE.getSCEV(BO->getOperand(0)) == MaxRHS)
+ NewRHS = BO->getOperand(0);
+ if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2)))
+ if (isa<ConstantInt>(BO->getOperand(1)) &&
+ cast<ConstantInt>(BO->getOperand(1))->isOne() &&
+ SE.getSCEV(BO->getOperand(0)) == MaxRHS)
+ NewRHS = BO->getOperand(0);
+ if (!NewRHS)
+ return Cond;
+ } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS)
NewRHS = Sel->getOperand(1);
else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS)
NewRHS = Sel->getOperand(2);
- if (!NewRHS) return Cond;
+ else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS))
+ NewRHS = SU->getValue();
+ else
+ // Max doesn't match expected pattern.
+ return Cond;
// Determine the new comparison opcode. It may be signed or unsigned,
// and the original comparison may be either equality or inequality.
- CmpInst::Predicate Pred =
- isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT;
if (Cond->getPredicate() == CmpInst::ICMP_EQ)
Pred = CmpInst::getInversePredicate(Pred);
@@ -1492,7 +1702,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
/// OptimizeLoopTermCond - Change loop terminating condition to use the
/// postinc iv when possible.
-bool
+void
LSRInstance::OptimizeLoopTermCond() {
SmallPtrSet<Instruction *, 4> PostIncs;
@@ -1545,8 +1755,9 @@ LSRInstance::OptimizeLoopTermCond() {
!DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) {
// Conservatively assume there may be reuse if the quotient of their
// strides could be a legal scale.
- const SCEV *A = CondUse->getStride();
- const SCEV *B = UI->getStride();
+ const SCEV *A = IU.getStride(*CondUse, L);
+ const SCEV *B = IU.getStride(*UI, L);
+ if (!A || !B) continue;
if (SE.getTypeSizeInBits(A->getType()) !=
SE.getTypeSizeInBits(B->getType())) {
if (SE.getTypeSizeInBits(A->getType()) >
@@ -1557,13 +1768,13 @@ LSRInstance::OptimizeLoopTermCond() {
}
if (const SCEVConstant *D =
dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) {
+ const ConstantInt *C = D->getValue();
// Stride of one or negative one can have reuse with non-addresses.
- if (D->getValue()->isOne() ||
- D->getValue()->isAllOnesValue())
+ if (C->isOne() || C->isAllOnesValue())
goto decline_post_inc;
// Avoid weird situations.
- if (D->getValue()->getValue().getMinSignedBits() >= 64 ||
- D->getValue()->getValue().isMinSignedValue())
+ if (C->getValue().getMinSignedBits() >= 64 ||
+ C->getValue().isMinSignedValue())
goto decline_post_inc;
// Without TLI, assume that any stride might be valid, and so any
// use might be shared.
@@ -1572,7 +1783,7 @@ LSRInstance::OptimizeLoopTermCond() {
// Check for possible scaled-address reuse.
const Type *AccessTy = getAccessType(UI->getUser());
TargetLowering::AddrMode AM;
- AM.Scale = D->getValue()->getSExtValue();
+ AM.Scale = C->getSExtValue();
if (TLI->isLegalAddressingMode(AM, AccessTy))
goto decline_post_inc;
AM.Scale = -AM.Scale;
@@ -1598,8 +1809,7 @@ LSRInstance::OptimizeLoopTermCond() {
ExitingBlock->getInstList().insert(TermBr, Cond);
// Clone the IVUse, as the old use still exists!
- CondUse = &IU.AddUser(CondUse->getStride(), CondUse->getOffset(),
- Cond, CondUse->getOperandValToReplace());
+ CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace());
TermBr->replaceUsesOfWith(OldCond, Cond);
}
}
@@ -1607,9 +1817,7 @@ LSRInstance::OptimizeLoopTermCond() {
// If we get to here, we know that we can transform the setcc instruction to
// use the post-incremented version of the IV, allowing us to coalesce the
// live ranges for the IV correctly.
- CondUse->setOffset(SE.getMinusSCEV(CondUse->getOffset(),
- CondUse->getStride()));
- CondUse->setIsUseOfPostIncrementedValue(true);
+ CondUse->transformToPostInc(L);
Changed = true;
PostIncs.insert(Cond);
@@ -1630,12 +1838,13 @@ LSRInstance::OptimizeLoopTermCond() {
else if (BB != IVIncInsertPos->getParent())
IVIncInsertPos = BB->getTerminator();
}
-
- return Changed;
}
+/// reconcileNewOffset - Determine if the given use can accomodate a fixup
+/// at the given offset and other details. If so, update the use and
+/// return true.
bool
-LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset,
+LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
LSRUse::KindType Kind, const Type *AccessTy) {
int64_t NewMinOffset = LU.MinOffset;
int64_t NewMaxOffset = LU.MaxOffset;
@@ -1648,17 +1857,19 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset,
return false;
// Conservatively assume HasBaseReg is true for now.
if (NewOffset < LU.MinOffset) {
- if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, /*HasBaseReg=*/true,
+ if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg,
Kind, AccessTy, TLI))
return false;
NewMinOffset = NewOffset;
} else if (NewOffset > LU.MaxOffset) {
- if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, /*HasBaseReg=*/true,
+ if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg,
Kind, AccessTy, TLI))
return false;
NewMaxOffset = NewOffset;
}
// Check for a mismatched access type, and fall back conservatively as needed.
+ // TODO: Be less conservative when the type is similar and can use the same
+ // addressing modes.
if (Kind == LSRUse::Address && AccessTy != LU.AccessTy)
NewAccessTy = Type::getVoidTy(AccessTy->getContext());
@@ -1687,12 +1898,12 @@ LSRInstance::getUse(const SCEV *&Expr,
}
std::pair<UseMapTy::iterator, bool> P =
- UseMap.insert(std::make_pair(Expr, 0));
+ UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0));
if (!P.second) {
// A use already existed with this base.
size_t LUIdx = P.first->second;
LSRUse &LU = Uses[LUIdx];
- if (reconcileNewOffset(LU, Offset, Kind, AccessTy))
+ if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy))
// Reuse this use.
return std::make_pair(LUIdx, Offset);
}
@@ -1713,30 +1924,85 @@ LSRInstance::getUse(const SCEV *&Expr,
return std::make_pair(LUIdx, Offset);
}
+/// DeleteUse - Delete the given use from the Uses list.
+void LSRInstance::DeleteUse(LSRUse &LU) {
+ if (&LU != &Uses.back())
+ std::swap(LU, Uses.back());
+ Uses.pop_back();
+}
+
+/// FindUseWithFormula - Look for a use distinct from OrigLU which is has
+/// a formula that has the same registers as the given formula.
+LSRUse *
+LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
+ const LSRUse &OrigLU) {
+ // Search all uses for the formula. This could be more clever.
+ for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+ LSRUse &LU = Uses[LUIdx];
+ // Check whether this use is close enough to OrigLU, to see whether it's
+ // worthwhile looking through its formulae.
+ // Ignore ICmpZero uses because they may contain formulae generated by
+ // GenerateICmpZeroScales, in which case adding fixup offsets may
+ // be invalid.
+ if (&LU != &OrigLU &&
+ LU.Kind != LSRUse::ICmpZero &&
+ LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy &&
+ LU.WidestFixupType == OrigLU.WidestFixupType &&
+ LU.HasFormulaWithSameRegs(OrigF)) {
+ // Scan through this use's formulae.
+ for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
+ E = LU.Formulae.end(); I != E; ++I) {
+ const Formula &F = *I;
+ // Check to see if this formula has the same registers and symbols
+ // as OrigF.
+ if (F.BaseRegs == OrigF.BaseRegs &&
+ F.ScaledReg == OrigF.ScaledReg &&
+ F.AM.BaseGV == OrigF.AM.BaseGV &&
+ F.AM.Scale == OrigF.AM.Scale) {
+ if (F.AM.BaseOffs == 0)
+ return &LU;
+ // This is the formula where all the registers and symbols matched;
+ // there aren't going to be any others. Since we declined it, we
+ // can skip the rest of the formulae and procede to the next LSRUse.
+ break;
+ }
+ }
+ }
+ }
+
+ // Nothing looked good.
+ return 0;
+}
+
void LSRInstance::CollectInterestingTypesAndFactors() {
SmallSetVector<const SCEV *, 4> Strides;
// Collect interesting types and strides.
+ SmallVector<const SCEV *, 4> Worklist;
for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
- const SCEV *Stride = UI->getStride();
+ const SCEV *Expr = IU.getExpr(*UI);
// Collect interesting types.
- Types.insert(SE.getEffectiveSCEVType(Stride->getType()));
-
- // Add the stride for this loop.
- Strides.insert(Stride);
-
- // Add strides for other mentioned loops.
- for (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(UI->getOffset());
- AR; AR = dyn_cast<SCEVAddRecExpr>(AR->getStart()))
- Strides.insert(AR->getStepRecurrence(SE));
+ Types.insert(SE.getEffectiveSCEVType(Expr->getType()));
+
+ // Add strides for mentioned loops.
+ Worklist.push_back(Expr);
+ do {
+ const SCEV *S = Worklist.pop_back_val();
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ Strides.insert(AR->getStepRecurrence(SE));
+ Worklist.push_back(AR->getStart());
+ } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
+ Worklist.append(Add->op_begin(), Add->op_end());
+ }
+ } while (!Worklist.empty());
}
// Compute interesting factors from the set of interesting strides.
for (SmallSetVector<const SCEV *, 4>::const_iterator
I = Strides.begin(), E = Strides.end(); I != E; ++I)
for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
- next(I); NewStrideIter != E; ++NewStrideIter) {
+ llvm::next(I); NewStrideIter != E; ++NewStrideIter) {
const SCEV *OldStride = *I;
const SCEV *NewStride = *NewStrideIter;
@@ -1776,8 +2042,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
LSRFixup &LF = getNewFixup();
LF.UserInst = UI->getUser();
LF.OperandValToReplace = UI->getOperandValToReplace();
- if (UI->isUseOfPostIncrementedValue())
- LF.PostIncLoop = L;
+ LF.PostIncLoops = UI->getPostIncLoops();
LSRUse::KindType Kind = LSRUse::Basic;
const Type *AccessTy = 0;
@@ -1786,7 +2051,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
AccessTy = getAccessType(LF.UserInst);
}
- const SCEV *S = IU.getCanonicalExpr(*UI);
+ const SCEV *S = IU.getExpr(*UI);
// Equality (== and !=) ICmps are special. We can rewrite (i == N) as
// (N - i == 0), and this allows (N - i) to be the expression that we work
@@ -1802,6 +2067,8 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
if (NV == LF.OperandValToReplace) {
CI->setOperand(1, CI->getOperand(0));
CI->setOperand(0, NV);
+ NV = CI->getOperand(1);
+ Changed = true;
}
// x == y --> x - y == 0
@@ -1824,7 +2091,11 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
LF.LUIdx = P.first;
LF.Offset = P.second;
LSRUse &LU = Uses[LF.LUIdx];
- LU.AllFixupsOutsideLoop &= !L->contains(LF.UserInst);
+ LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
+ if (!LU.WidestFixupType ||
+ SE.getTypeSizeInBits(LU.WidestFixupType) <
+ SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
+ LU.WidestFixupType = LF.OperandValToReplace->getType();
// If this is the first use of this LSRUse, give it a formula.
if (LU.Formulae.empty()) {
@@ -1836,6 +2107,9 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
DEBUG(print_fixups(dbgs()));
}
+/// InsertInitialFormula - Insert a formula for the given expression into
+/// the given use, separating out loop-variant portions from loop-invariant
+/// and loop-computable portions.
void
LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) {
Formula F;
@@ -1844,6 +2118,8 @@ LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) {
assert(Inserted && "Initial formula already exists!"); (void)Inserted;
}
+/// InsertSupplementalFormula - Insert a simple single-register formula for
+/// the given expression into the given use.
void
LSRInstance::InsertSupplementalFormula(const SCEV *S,
LSRUse &LU, size_t LUIdx) {
@@ -1888,7 +2164,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
const SCEV *S = Worklist.pop_back_val();
if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
- Worklist.insert(Worklist.end(), N->op_begin(), N->op_end());
+ Worklist.append(N->op_begin(), N->op_end());
else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
Worklist.push_back(C->getOperand());
else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
@@ -1897,9 +2173,13 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
} else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (!Inserted.insert(U)) continue;
const Value *V = U->getValue();
- if (const Instruction *Inst = dyn_cast<Instruction>(V))
+ if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
+ // Look for instructions defined outside the loop.
if (L->contains(Inst)) continue;
- for (Value::use_const_iterator UI = V->use_begin(), UE = V->use_end();
+ } else if (isa<UndefValue>(V))
+ // Undef doesn't have a live range, so it doesn't matter.
+ continue;
+ for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
UI != UE; ++UI) {
const Instruction *UserInst = dyn_cast<Instruction>(*UI);
// Ignore non-instructions.
@@ -1918,9 +2198,17 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
continue;
// Ignore uses which are part of other SCEV expressions, to avoid
// analyzing them multiple times.
- if (SE.isSCEVable(UserInst->getType()) &&
- !isa<SCEVUnknown>(SE.getSCEV(const_cast<Instruction *>(UserInst))))
- continue;
+ if (SE.isSCEVable(UserInst->getType())) {
+ const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst));
+ // If the user is a no-op, look through to its uses.
+ if (!isa<SCEVUnknown>(UserS))
+ continue;
+ if (UserS == U) {
+ Worklist.push_back(
+ SE.getUnknown(const_cast<Instruction *>(UserInst)));
+ continue;
+ }
+ }
// Ignore icmp instructions which are already being analyzed.
if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
unsigned OtherIdx = !UI.getOperandNo();
@@ -1936,7 +2224,11 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
LF.LUIdx = P.first;
LF.Offset = P.second;
LSRUse &LU = Uses[LF.LUIdx];
- LU.AllFixupsOutsideLoop &= L->contains(LF.UserInst);
+ LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
+ if (!LU.WidestFixupType ||
+ SE.getTypeSizeInBits(LU.WidestFixupType) <
+ SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
+ LU.WidestFixupType = LF.OperandValToReplace->getType();
InsertSupplementalFormula(U, LU, LF.LUIdx);
CountRegisters(LU.Formulae.back(), Uses.size() - 1);
break;
@@ -1949,20 +2241,22 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
/// separate registers. If C is non-null, multiply each subexpression by C.
static void CollectSubexprs(const SCEV *S, const SCEVConstant *C,
SmallVectorImpl<const SCEV *> &Ops,
+ const Loop *L,
ScalarEvolution &SE) {
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
// Break out add operands.
for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
I != E; ++I)
- CollectSubexprs(*I, C, Ops, SE);
+ CollectSubexprs(*I, C, Ops, L, SE);
return;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
// Split a non-zero base out of an addrec.
if (!AR->getStart()->isZero()) {
- CollectSubexprs(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()),
+ CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
AR->getStepRecurrence(SE),
- AR->getLoop()), C, Ops, SE);
- CollectSubexprs(AR->getStart(), C, Ops, SE);
+ AR->getLoop()),
+ C, Ops, L, SE);
+ CollectSubexprs(AR->getStart(), C, Ops, L, SE);
return;
}
} else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
@@ -1972,12 +2266,12 @@ static void CollectSubexprs(const SCEV *S, const SCEVConstant *C,
dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
CollectSubexprs(Mul->getOperand(1),
C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0,
- Ops, SE);
+ Ops, L, SE);
return;
}
}
- // Otherwise use the value itself.
+ // Otherwise use the value itself, optionally with a scale applied.
Ops.push_back(C ? SE.getMulExpr(C, S) : S);
}
@@ -1993,11 +2287,18 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
const SCEV *BaseReg = Base.BaseRegs[i];
SmallVector<const SCEV *, 8> AddOps;
- CollectSubexprs(BaseReg, 0, AddOps, SE);
+ CollectSubexprs(BaseReg, 0, AddOps, L, SE);
+
if (AddOps.size() == 1) continue;
for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
JE = AddOps.end(); J != JE; ++J) {
+
+ // Loop-variant "unknown" values are uninteresting; we won't be able to
+ // do anything meaningful with them.
+ if (isa<SCEVUnknown>(*J) && !(*J)->isLoopInvariant(L))
+ continue;
+
// Don't pull a constant into a register if the constant could be folded
// into an immediate field.
if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset,
@@ -2006,11 +2307,10 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
continue;
// Collect all operands except *J.
- SmallVector<const SCEV *, 8> InnerAddOps;
- for (SmallVectorImpl<const SCEV *>::const_iterator K = AddOps.begin(),
- KE = AddOps.end(); K != KE; ++K)
- if (K != J)
- InnerAddOps.push_back(*K);
+ SmallVector<const SCEV *, 8> InnerAddOps
+ (((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
+ InnerAddOps.append
+ (llvm::next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end());
// Don't leave just a constant behind in a register if the constant could
// be folded into an immediate field.
@@ -2020,8 +2320,11 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
LU.Kind, LU.AccessTy, TLI, SE))
continue;
+ const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
+ if (InnerSum->isZero())
+ continue;
Formula F = Base;
- F.BaseRegs[i] = SE.getAddExpr(InnerAddOps);
+ F.BaseRegs[i] = InnerSum;
F.BaseRegs.push_back(*J);
if (InsertFormula(LU, LUIdx, F))
// If that formula hadn't been seen before, recurse to find more like
@@ -2088,7 +2391,7 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
Formula Base) {
// TODO: For now, just add the min and max offset, because it usually isn't
// worthwhile looking at everything inbetween.
- SmallVector<int64_t, 4> Worklist;
+ SmallVector<int64_t, 2> Worklist;
Worklist.push_back(LU.MinOffset);
if (LU.MaxOffset != LU.MinOffset)
Worklist.push_back(LU.MaxOffset);
@@ -2102,7 +2405,14 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I;
if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I,
LU.Kind, LU.AccessTy, TLI)) {
- F.BaseRegs[i] = SE.getAddExpr(G, SE.getIntegerSCEV(*I, G->getType()));
+ // Add the offset to the base register.
+ const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
+ // If it cancelled out, drop the base register, otherwise update it.
+ if (NewG->isZero()) {
+ std::swap(F.BaseRegs[i], F.BaseRegs.back());
+ F.BaseRegs.pop_back();
+ } else
+ F.BaseRegs[i] = NewG;
(void)InsertFormula(LU, LUIdx, F);
}
@@ -2141,13 +2451,12 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
for (SmallSetVector<int64_t, 8>::const_iterator
I = Factors.begin(), E = Factors.end(); I != E; ++I) {
int64_t Factor = *I;
- Formula F = Base;
// Check that the multiplication doesn't overflow.
- if (F.AM.BaseOffs == INT64_MIN && Factor == -1)
+ if (Base.AM.BaseOffs == INT64_MIN && Factor == -1)
continue;
- F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs * Factor;
- if (F.AM.BaseOffs / Factor != Base.AM.BaseOffs)
+ int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor;
+ if (NewBaseOffs / Factor != Base.AM.BaseOffs)
continue;
// Check that multiplying with the use offset doesn't overflow.
@@ -2158,6 +2467,9 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
if (Offset / Factor != LU.MinOffset)
continue;
+ Formula F = Base;
+ F.AM.BaseOffs = NewBaseOffs;
+
// Check that this scale is legal.
if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI))
continue;
@@ -2165,7 +2477,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
// Compensate for the use having MinOffset built into it.
F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset;
- const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy);
+ const SCEV *FactorS = SE.getConstant(IntTy, Factor);
// Check that multiplying with each base register doesn't overflow.
for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
@@ -2189,8 +2501,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
/// GenerateScales - Generate stride factor reuse formulae by making use of
/// scaled-offset address modes, for example.
-void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx,
- Formula Base) {
+void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
// Determine the integer type for the base formula.
const Type *IntTy = Base.getType();
if (!IntTy) return;
@@ -2227,7 +2538,7 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx,
for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
if (const SCEVAddRecExpr *AR =
dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) {
- const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy);
+ const SCEV *FactorS = SE.getConstant(IntTy, Factor);
if (FactorS->isZero())
continue;
// Divide out the factor, ignoring high bits, since we'll be
@@ -2236,8 +2547,7 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx,
// TODO: This could be optimized to avoid all the copying.
Formula F = Base;
F.ScaledReg = Quotient;
- std::swap(F.BaseRegs[i], F.BaseRegs.back());
- F.BaseRegs.pop_back();
+ F.DeleteBaseReg(F.BaseRegs[i]);
(void)InsertFormula(LU, LUIdx, F);
}
}
@@ -2245,8 +2555,7 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx,
}
/// GenerateTruncates - Generate reuse formulae from different IV types.
-void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx,
- Formula Base) {
+void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
// This requires TargetLowering to tell us which truncates are free.
if (!TLI) return;
@@ -2403,7 +2712,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
// TODO: Use a more targeted data structure.
for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
- Formula F = LU.Formulae[L];
+ const Formula &F = LU.Formulae[L];
// Use the immediate in the scaled register.
if (F.ScaledReg == OrigReg) {
int64_t Offs = (uint64_t)F.AM.BaseOffs +
@@ -2426,7 +2735,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
if (C->getValue()->getValue().isNegative() !=
(NewF.AM.BaseOffs < 0) &&
(C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale))
- .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs()))
+ .ule(abs64(NewF.AM.BaseOffs)))
continue;
// OK, looks good.
@@ -2451,10 +2760,11 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end();
J != JE; ++J)
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J))
- if (C->getValue()->getValue().isNegative() !=
- (NewF.AM.BaseOffs < 0) &&
- C->getValue()->getValue().abs()
- .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs()))
+ if ((C->getValue()->getValue() + NewF.AM.BaseOffs).abs().slt(
+ abs64(NewF.AM.BaseOffs)) &&
+ (C->getValue()->getValue() +
+ NewF.AM.BaseOffs).countTrailingZeros() >=
+ CountTrailingZeros_64(NewF.AM.BaseOffs))
goto skip_formula;
// Ok, looks good.
@@ -2497,13 +2807,17 @@ LSRInstance::GenerateAllReuseFormulae() {
}
GenerateCrossUseConstantOffsets();
+
+ DEBUG(dbgs() << "\n"
+ "After generating reuse formulae:\n";
+ print_uses(dbgs()));
}
/// If their are multiple formulae with the same set of registers used
/// by other uses, pick the best one and delete the others.
void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
#ifndef NDEBUG
- bool Changed = false;
+ bool ChangedFormulae = false;
#endif
// Collect the best formula for each unique set of shared registers. This
@@ -2515,10 +2829,9 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
LSRUse &LU = Uses[LUIdx];
FormulaSorter Sorter(L, LU, SE, DT);
+ DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n');
- // Clear out the set of used regs; it will be recomputed.
- LU.Regs.clear();
-
+ bool Any = false;
for (size_t FIdx = 0, NumForms = LU.Formulae.size();
FIdx != NumForms; ++FIdx) {
Formula &F = LU.Formulae[FIdx];
@@ -2543,62 +2856,228 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
Formula &Best = LU.Formulae[P.first->second];
if (Sorter.operator()(F, Best))
std::swap(F, Best);
- DEBUG(dbgs() << "Filtering out "; F.print(dbgs());
+ DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
dbgs() << "\n"
- " in favor of "; Best.print(dbgs());
+ " in favor of formula "; Best.print(dbgs());
dbgs() << '\n');
#ifndef NDEBUG
- Changed = true;
+ ChangedFormulae = true;
#endif
- std::swap(F, LU.Formulae.back());
- LU.Formulae.pop_back();
+ LU.DeleteFormula(F);
--FIdx;
--NumForms;
+ Any = true;
continue;
}
- if (F.ScaledReg) LU.Regs.insert(F.ScaledReg);
- LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
}
+
+ // Now that we've filtered out some formulae, recompute the Regs set.
+ if (Any)
+ LU.RecomputeRegs(LUIdx, RegUses);
+
+ // Reset this to prepare for the next use.
BestFormulae.clear();
}
- DEBUG(if (Changed) {
+ DEBUG(if (ChangedFormulae) {
dbgs() << "\n"
"After filtering out undesirable candidates:\n";
print_uses(dbgs());
});
}
-/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of
-/// formulae to choose from, use some rough heuristics to prune down the number
-/// of formulae. This keeps the main solver from taking an extraordinary amount
-/// of time in some worst-case scenarios.
-void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
- // This is a rough guess that seems to work fairly well.
- const size_t Limit = UINT16_MAX;
+// This is a rough guess that seems to work fairly well.
+static const size_t ComplexityLimit = UINT16_MAX;
- SmallPtrSet<const SCEV *, 4> Taken;
- for (;;) {
- // Estimate the worst-case number of solutions we might consider. We almost
- // never consider this many solutions because we prune the search space,
- // but the pruning isn't always sufficient.
- uint32_t Power = 1;
- for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
- E = Uses.end(); I != E; ++I) {
- size_t FSize = I->Formulae.size();
- if (FSize >= Limit) {
- Power = Limit;
- break;
- }
- Power *= FSize;
- if (Power >= Limit)
- break;
+/// EstimateSearchSpaceComplexity - Estimate the worst-case number of
+/// solutions the solver might have to consider. It almost never considers
+/// this many solutions because it prune the search space, but the pruning
+/// isn't always sufficient.
+size_t LSRInstance::EstimateSearchSpaceComplexity() const {
+ uint32_t Power = 1;
+ for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
+ E = Uses.end(); I != E; ++I) {
+ size_t FSize = I->Formulae.size();
+ if (FSize >= ComplexityLimit) {
+ Power = ComplexityLimit;
+ break;
}
- if (Power < Limit)
+ Power *= FSize;
+ if (Power >= ComplexityLimit)
break;
+ }
+ return Power;
+}
+
+/// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset
+/// of the registers of another formula, it won't help reduce register
+/// pressure (though it may not necessarily hurt register pressure); remove
+/// it to simplify the system.
+void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
+ if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
+ DEBUG(dbgs() << "The search space is too complex.\n");
+
+ DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
+ "which use a superset of registers used by other "
+ "formulae.\n");
+
+ for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+ LSRUse &LU = Uses[LUIdx];
+ bool Any = false;
+ for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
+ Formula &F = LU.Formulae[i];
+ // Look for a formula with a constant or GV in a register. If the use
+ // also has a formula with that same value in an immediate field,
+ // delete the one that uses a register.
+ for (SmallVectorImpl<const SCEV *>::const_iterator
+ I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) {
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) {
+ Formula NewF = F;
+ NewF.AM.BaseOffs += C->getValue()->getSExtValue();
+ NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
+ (I - F.BaseRegs.begin()));
+ if (LU.HasFormulaWithSameRegs(NewF)) {
+ DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
+ LU.DeleteFormula(F);
+ --i;
+ --e;
+ Any = true;
+ break;
+ }
+ } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) {
+ if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue()))
+ if (!F.AM.BaseGV) {
+ Formula NewF = F;
+ NewF.AM.BaseGV = GV;
+ NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
+ (I - F.BaseRegs.begin()));
+ if (LU.HasFormulaWithSameRegs(NewF)) {
+ DEBUG(dbgs() << " Deleting "; F.print(dbgs());
+ dbgs() << '\n');
+ LU.DeleteFormula(F);
+ --i;
+ --e;
+ Any = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (Any)
+ LU.RecomputeRegs(LUIdx, RegUses);
+ }
+
+ DEBUG(dbgs() << "After pre-selection:\n";
+ print_uses(dbgs()));
+ }
+}
+
+/// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers
+/// for expressions like A, A+1, A+2, etc., allocate a single register for
+/// them.
+void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
+ if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
+ DEBUG(dbgs() << "The search space is too complex.\n");
+
+ DEBUG(dbgs() << "Narrowing the search space by assuming that uses "
+ "separated by a constant offset will use the same "
+ "registers.\n");
+
+ // This is especially useful for unrolled loops.
+
+ for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+ LSRUse &LU = Uses[LUIdx];
+ for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
+ E = LU.Formulae.end(); I != E; ++I) {
+ const Formula &F = *I;
+ if (F.AM.BaseOffs != 0 && F.AM.Scale == 0) {
+ if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) {
+ if (reconcileNewOffset(*LUThatHas, F.AM.BaseOffs,
+ /*HasBaseReg=*/false,
+ LU.Kind, LU.AccessTy)) {
+ DEBUG(dbgs() << " Deleting use "; LU.print(dbgs());
+ dbgs() << '\n');
+
+ LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
+
+ // Delete formulae from the new use which are no longer legal.
+ bool Any = false;
+ for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) {
+ Formula &F = LUThatHas->Formulae[i];
+ if (!isLegalUse(F.AM,
+ LUThatHas->MinOffset, LUThatHas->MaxOffset,
+ LUThatHas->Kind, LUThatHas->AccessTy, TLI)) {
+ DEBUG(dbgs() << " Deleting "; F.print(dbgs());
+ dbgs() << '\n');
+ LUThatHas->DeleteFormula(F);
+ --i;
+ --e;
+ Any = true;
+ }
+ }
+ if (Any)
+ LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses);
+
+ // Update the relocs to reference the new use.
+ for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(),
+ E = Fixups.end(); I != E; ++I) {
+ LSRFixup &Fixup = *I;
+ if (Fixup.LUIdx == LUIdx) {
+ Fixup.LUIdx = LUThatHas - &Uses.front();
+ Fixup.Offset += F.AM.BaseOffs;
+ DEBUG(dbgs() << "New fixup has offset "
+ << Fixup.Offset << '\n');
+ }
+ if (Fixup.LUIdx == NumUses-1)
+ Fixup.LUIdx = LUIdx;
+ }
+
+ // Delete the old use.
+ DeleteUse(LU);
+ --LUIdx;
+ --NumUses;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ DEBUG(dbgs() << "After pre-selection:\n";
+ print_uses(dbgs()));
+ }
+}
+
+/// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call
+/// FilterOutUndesirableDedicatedRegisters again, if necessary, now that
+/// we've done more filtering, as it may be able to find more formulae to
+/// eliminate.
+void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
+ if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
+ DEBUG(dbgs() << "The search space is too complex.\n");
+
+ DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
+ "undesirable dedicated registers.\n");
+
+ FilterOutUndesirableDedicatedRegisters();
+
+ DEBUG(dbgs() << "After pre-selection:\n";
+ print_uses(dbgs()));
+ }
+}
+/// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely
+/// to be profitable, and then in any use which has any reference to that
+/// register, delete all formulae which do not reference that register.
+void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
+ // With all other options exhausted, loop until the system is simple
+ // enough to handle.
+ SmallPtrSet<const SCEV *, 4> Taken;
+ while (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
// Ok, we have too many of formulae on our hands to conveniently handle.
// Use a rough heuristic to thin out the list.
+ DEBUG(dbgs() << "The search space is too complex.\n");
// Pick the register which is used by the most LSRUses, which is likely
// to be a good reuse register candidate.
@@ -2626,28 +3105,26 @@ void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
// In any use with formulae which references this register, delete formulae
// which don't reference it.
- for (SmallVectorImpl<LSRUse>::iterator I = Uses.begin(),
- E = Uses.end(); I != E; ++I) {
- LSRUse &LU = *I;
+ for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
+ LSRUse &LU = Uses[LUIdx];
if (!LU.Regs.count(Best)) continue;
- // Clear out the set of used regs; it will be recomputed.
- LU.Regs.clear();
-
+ bool Any = false;
for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
Formula &F = LU.Formulae[i];
if (!F.referencesReg(Best)) {
DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
- std::swap(LU.Formulae.back(), F);
- LU.Formulae.pop_back();
+ LU.DeleteFormula(F);
--e;
--i;
+ Any = true;
+ assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?");
continue;
}
-
- if (F.ScaledReg) LU.Regs.insert(F.ScaledReg);
- LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
}
+
+ if (Any)
+ LU.RecomputeRegs(LUIdx, RegUses);
}
DEBUG(dbgs() << "After pre-selection:\n";
@@ -2655,6 +3132,17 @@ void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
}
}
+/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of
+/// formulae to choose from, use some rough heuristics to prune down the number
+/// of formulae. This keeps the main solver from taking an extraordinary amount
+/// of time in some worst-case scenarios.
+void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
+ NarrowSearchSpaceByDetectingSupersets();
+ NarrowSearchSpaceByCollapsingUnrolledCode();
+ NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
+ NarrowSearchSpaceByPickingWinnerRegs();
+}
+
/// SolveRecurse - This is the recursive solver.
void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
Cost &SolutionCost,
@@ -2734,11 +3222,14 @@ retry:
// If none of the formulae had all of the required registers, relax the
// constraint so that we don't exclude all formulae.
if (!AnySatisfiedReqRegs) {
+ assert(!ReqRegs.empty() && "Solver failed even without required registers");
ReqRegs.clear();
goto retry;
}
}
+/// Solve - Choose one formula from each use. Return the results in the given
+/// Solution vector.
void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
SmallVector<const Formula *, 8> Workspace;
Cost SolutionCost;
@@ -2748,6 +3239,7 @@ void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
DenseSet<const SCEV *> VisitedRegs;
Workspace.reserve(Uses.size());
+ // SolveRecurse does all the work.
SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
CurRegs, VisitedRegs);
@@ -2763,50 +3255,39 @@ void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
Solution[i]->print(dbgs());
dbgs() << '\n';
});
-}
-/// getImmediateDominator - A handy utility for the specific DominatorTree
-/// query that we need here.
-///
-static BasicBlock *getImmediateDominator(BasicBlock *BB, DominatorTree &DT) {
- DomTreeNode *Node = DT.getNode(BB);
- if (!Node) return 0;
- Node = Node->getIDom();
- if (!Node) return 0;
- return Node->getBlock();
+ assert(Solution.size() == Uses.size() && "Malformed solution!");
}
-Value *LSRInstance::Expand(const LSRFixup &LF,
- const Formula &F,
- BasicBlock::iterator IP,
- SCEVExpander &Rewriter,
- SmallVectorImpl<WeakVH> &DeadInsts) const {
- const LSRUse &LU = Uses[LF.LUIdx];
-
- // Then, collect some instructions which we will remain dominated by when
- // expanding the replacement. These must be dominated by any operands that
- // will be required in the expansion.
- SmallVector<Instruction *, 4> Inputs;
- if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace))
- Inputs.push_back(I);
- if (LU.Kind == LSRUse::ICmpZero)
- if (Instruction *I =
- dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1)))
- Inputs.push_back(I);
- if (LF.PostIncLoop) {
- if (!L->contains(LF.UserInst))
- Inputs.push_back(L->getLoopLatch()->getTerminator());
- else
- Inputs.push_back(IVIncInsertPos);
- }
-
- // Then, climb up the immediate dominator tree as far as we can go while
- // still being dominated by the input positions.
+/// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up
+/// the dominator tree far as we can go while still being dominated by the
+/// input positions. This helps canonicalize the insert position, which
+/// encourages sharing.
+BasicBlock::iterator
+LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
+ const SmallVectorImpl<Instruction *> &Inputs)
+ const {
for (;;) {
+ const Loop *IPLoop = LI.getLoopFor(IP->getParent());
+ unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0;
+
+ BasicBlock *IDom;
+ for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) {
+ if (!Rung) return IP;
+ Rung = Rung->getIDom();
+ if (!Rung) return IP;
+ IDom = Rung->getBlock();
+
+ // Don't climb into a loop though.
+ const Loop *IDomLoop = LI.getLoopFor(IDom);
+ unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0;
+ if (IDomDepth <= IPLoopDepth &&
+ (IDomDepth != IPLoopDepth || IDomLoop == IPLoop))
+ break;
+ }
+
bool AllDominate = true;
Instruction *BetterPos = 0;
- BasicBlock *IDom = getImmediateDominator(IP->getParent(), DT);
- if (!IDom) break;
Instruction *Tentative = IDom->getTerminator();
for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(),
E = Inputs.end(); I != E; ++I) {
@@ -2815,6 +3296,8 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
AllDominate = false;
break;
}
+ // Attempt to find an insert position in the middle of the block,
+ // instead of at the end, so that it can be used for other expansions.
if (IDom == Inst->getParent() &&
(!BetterPos || DT.dominates(BetterPos, Inst)))
BetterPos = llvm::next(BasicBlock::iterator(Inst));
@@ -2826,11 +3309,79 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
else
IP = Tentative;
}
+
+ return IP;
+}
+
+/// AdjustInsertPositionForExpand - Determine an input position which will be
+/// dominated by the operands and which will dominate the result.
+BasicBlock::iterator
+LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP,
+ const LSRFixup &LF,
+ const LSRUse &LU) const {
+ // Collect some instructions which must be dominated by the
+ // expanding replacement. These must be dominated by any operands that
+ // will be required in the expansion.
+ SmallVector<Instruction *, 4> Inputs;
+ if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace))
+ Inputs.push_back(I);
+ if (LU.Kind == LSRUse::ICmpZero)
+ if (Instruction *I =
+ dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1)))
+ Inputs.push_back(I);
+ if (LF.PostIncLoops.count(L)) {
+ if (LF.isUseFullyOutsideLoop(L))
+ Inputs.push_back(L->getLoopLatch()->getTerminator());
+ else
+ Inputs.push_back(IVIncInsertPos);
+ }
+ // The expansion must also be dominated by the increment positions of any
+ // loops it for which it is using post-inc mode.
+ for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(),
+ E = LF.PostIncLoops.end(); I != E; ++I) {
+ const Loop *PIL = *I;
+ if (PIL == L) continue;
+
+ // Be dominated by the loop exit.
+ SmallVector<BasicBlock *, 4> ExitingBlocks;
+ PIL->getExitingBlocks(ExitingBlocks);
+ if (!ExitingBlocks.empty()) {
+ BasicBlock *BB = ExitingBlocks[0];
+ for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i)
+ BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]);
+ Inputs.push_back(BB->getTerminator());
+ }
+ }
+
+ // Then, climb up the immediate dominator tree as far as we can go while
+ // still being dominated by the input positions.
+ IP = HoistInsertPosition(IP, Inputs);
+
+ // Don't insert instructions before PHI nodes.
while (isa<PHINode>(IP)) ++IP;
+ // Ignore debug intrinsics.
+ while (isa<DbgInfoIntrinsic>(IP)) ++IP;
+
+ return IP;
+}
+
+/// Expand - Emit instructions for the leading candidate expression for this
+/// LSRUse (this is called "expanding").
+Value *LSRInstance::Expand(const LSRFixup &LF,
+ const Formula &F,
+ BasicBlock::iterator IP,
+ SCEVExpander &Rewriter,
+ SmallVectorImpl<WeakVH> &DeadInsts) const {
+ const LSRUse &LU = Uses[LF.LUIdx];
+
+ // Determine an input position which will be dominated by the operands and
+ // which will dominate the result.
+ IP = AdjustInsertPositionForExpand(IP, LF, LU);
+
// Inform the Rewriter if we have a post-increment use, so that it can
// perform an advantageous expansion.
- Rewriter.setPostInc(LF.PostIncLoop);
+ Rewriter.setPostInc(LF.PostIncLoops);
// This is the type that the user actually needs.
const Type *OpTy = LF.OperandValToReplace->getType();
@@ -2854,22 +3405,11 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
const SCEV *Reg = *I;
assert(!Reg->isZero() && "Zero allocated in a base register!");
- // If we're expanding for a post-inc user for the add-rec's loop, make the
- // post-inc adjustment.
- const SCEV *Start = Reg;
- while (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Start)) {
- if (AR->getLoop() == LF.PostIncLoop) {
- Reg = SE.getAddExpr(Reg, AR->getStepRecurrence(SE));
- // If the user is inside the loop, insert the code after the increment
- // so that it is dominated by its operand. If the original insert point
- // was already dominated by the increment, keep it, because there may
- // be loop-variant operands that need to be respected also.
- if (L->contains(LF.UserInst) && !DT.dominates(IVIncInsertPos, IP))
- IP = IVIncInsertPos;
- break;
- }
- Start = AR->getStart();
- }
+ // If we're expanding for a post-inc user, make the post-inc adjustment.
+ PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
+ Reg = TransformForPostIncUse(Denormalize, Reg,
+ LF.UserInst, LF.OperandValToReplace,
+ Loops, SE, DT);
Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP)));
}
@@ -2886,11 +3426,11 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
if (F.AM.Scale != 0) {
const SCEV *ScaledS = F.ScaledReg;
- // If we're expanding for a post-inc user for the add-rec's loop, make the
- // post-inc adjustment.
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ScaledS))
- if (AR->getLoop() == LF.PostIncLoop)
- ScaledS = SE.getAddExpr(ScaledS, AR->getStepRecurrence(SE));
+ // If we're expanding for a post-inc user, make the post-inc adjustment.
+ PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
+ ScaledS = TransformForPostIncUse(Denormalize, ScaledS,
+ LF.UserInst, LF.OperandValToReplace,
+ Loops, SE, DT);
if (LU.Kind == LSRUse::ICmpZero) {
// An interesting way of "folding" with an icmp is to use a negated
@@ -2904,8 +3444,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
// which is expected to be matched as part of the address.
ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP));
ScaledS = SE.getMulExpr(ScaledS,
- SE.getIntegerSCEV(F.AM.Scale,
- ScaledS->getType()));
+ SE.getConstant(ScaledS->getType(), F.AM.Scale));
Ops.push_back(ScaledS);
// Flush the operand list to suppress SCEVExpander hoisting.
@@ -2946,12 +3485,12 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
// Emit instructions summing all the operands.
const SCEV *FullS = Ops.empty() ?
- SE.getIntegerSCEV(0, IntTy) :
+ SE.getConstant(IntTy, 0) :
SE.getAddExpr(Ops);
Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP);
// We're done expanding now, so reset the rewriter.
- Rewriter.setPostInc(0);
+ Rewriter.clearPostInc();
// An ICmpZero Formula represents an ICmp which we're handling as a
// comparison against zero. Now that we've expanded an expression for that
@@ -3084,6 +3623,8 @@ void LSRInstance::Rewrite(const LSRFixup &LF,
DeadInsts.push_back(LF.OperandValToReplace);
}
+/// ImplementSolution - Rewrite all the fixup locations with new values,
+/// following the chosen solution.
void
LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
Pass *P) {
@@ -3096,10 +3637,11 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
// Expand the new value definitions and update the users.
- for (size_t i = 0, e = Fixups.size(); i != e; ++i) {
- size_t LUIdx = Fixups[i].LUIdx;
+ for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
+ E = Fixups.end(); I != E; ++I) {
+ const LSRFixup &Fixup = *I;
- Rewrite(Fixups[i], *Solution[LUIdx], Rewriter, DeadInsts, P);
+ Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P);
Changed = true;
}
@@ -3115,6 +3657,7 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
: IU(P->getAnalysis<IVUsers>()),
SE(P->getAnalysis<ScalarEvolution>()),
DT(P->getAnalysis<DominatorTree>()),
+ LI(P->getAnalysis<LoopInfo>()),
TLI(tli), L(l), Changed(false), IVIncInsertPos(0) {
// If LoopSimplify form is not available, stay out of trouble.
@@ -3127,13 +3670,11 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false);
dbgs() << ":\n");
- /// OptimizeShadowIV - If IV is used in a int-to-float cast
- /// inside the loop then try to eliminate the cast operation.
+ // First, perform some low-level loop optimizations.
OptimizeShadowIV();
+ OptimizeLoopTermCond();
- // Change loop terminating condition to use the postinc iv when possible.
- Changed |= OptimizeLoopTermCond();
-
+ // Start collecting data and preparing for the solver.
CollectInterestingTypesAndFactors();
CollectFixupsAndInitialFormulae();
CollectLoopInvariantFixupsAndFormulae();
@@ -3145,16 +3686,11 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
// to formulate the values needed for the uses.
GenerateAllReuseFormulae();
- DEBUG(dbgs() << "\n"
- "After generating reuse formulae:\n";
- print_uses(dbgs()));
-
FilterOutUndesirableDedicatedRegisters();
NarrowSearchSpaceUsingHeuristics();
SmallVector<const Formula *, 8> Solution;
Solve(Solution);
- assert(Solution.size() == Uses.size() && "Malformed solution!");
// Release memory that is no longer needed.
Factors.clear();
@@ -3204,9 +3740,8 @@ void LSRInstance::print_fixups(raw_ostream &OS) const {
OS << "LSR is examining the following fixup sites:\n";
for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
E = Fixups.end(); I != E; ++I) {
- const LSRFixup &LF = *I;
dbgs() << " ";
- LF.print(OS);
+ I->print(OS);
OS << '\n';
}
}
@@ -3257,23 +3792,24 @@ private:
}
char LoopStrengthReduce::ID = 0;
-static RegisterPass<LoopStrengthReduce>
-X("loop-reduce", "Loop Strength Reduction");
+INITIALIZE_PASS(LoopStrengthReduce, "loop-reduce",
+ "Loop Strength Reduction", false, false);
Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
return new LoopStrengthReduce(TLI);
}
LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli)
- : LoopPass(&ID), TLI(tli) {}
+ : LoopPass(ID), TLI(tli) {}
void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
// We split critical edges, so we change the CFG. However, we do update
// many analyses if they are around.
AU.addPreservedID(LoopSimplifyID);
- AU.addPreserved<LoopInfo>();
AU.addPreserved("domfrontier");
+ AU.addRequired<LoopInfo>();
+ AU.addPreserved<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
AU.addRequired<DominatorTree>();
AU.addPreserved<DominatorTree>();
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index a355ec3..d0edfa2 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -17,6 +17,7 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -26,7 +27,7 @@
using namespace llvm;
static cl::opt<unsigned>
-UnrollThreshold("unroll-threshold", cl::init(100), cl::Hidden,
+UnrollThreshold("unroll-threshold", cl::init(200), cl::Hidden,
cl::desc("The cut-off point for automatic loop unrolling"));
static cl::opt<unsigned>
@@ -42,7 +43,7 @@ namespace {
class LoopUnroll : public LoopPass {
public:
static char ID; // Pass ID, replacement for typeid
- LoopUnroll() : LoopPass(&ID) {}
+ LoopUnroll() : LoopPass(ID) {}
/// A magic value for use with the Threshold parameter to indicate
/// that the loop unroll should be performed regardless of how much
@@ -55,23 +56,24 @@ namespace {
/// loop preheaders be inserted into the CFG...
///
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<LoopInfo>();
+ AU.addPreserved<LoopInfo>();
AU.addRequiredID(LoopSimplifyID);
+ AU.addPreservedID(LoopSimplifyID);
AU.addRequiredID(LCSSAID);
- AU.addRequired<LoopInfo>();
AU.addPreservedID(LCSSAID);
- AU.addPreserved<LoopInfo>();
+ AU.addPreserved<ScalarEvolution>();
// FIXME: Loop unroll requires LCSSA. And LCSSA requires dom info.
// If loop unroll does not preserve dom info then LCSSA pass on next
// loop will receive invalid dom info.
// For now, recreate dom info, if loop is unrolled.
AU.addPreserved<DominatorTree>();
- AU.addPreserved<DominanceFrontier>();
}
};
}
char LoopUnroll::ID = 0;
-static RegisterPass<LoopUnroll> X("loop-unroll", "Unroll loops");
+INITIALIZE_PASS(LoopUnroll, "loop-unroll", "Unroll loops", false, false);
Pass *llvm::createLoopUnrollPass() { return new LoopUnroll(); }
@@ -86,7 +88,6 @@ static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls) {
}
bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
- assert(L->isLCSSAForm());
LoopInfo *LI = &getAnalysis<LoopInfo>();
BasicBlock *Header = L->getHeader();
@@ -146,12 +147,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
return false;
// FIXME: Reconstruct dom info, because it is not preserved properly.
- DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>();
- if (DT) {
+ if (DominatorTree *DT = getAnalysisIfAvailable<DominatorTree>())
DT->runOnFunction(*F);
- DominanceFrontier *DF = getAnalysisIfAvailable<DominanceFrontier>();
- if (DF)
- DF->runOnFunction(*F);
- }
return true;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index 071e9b7..9afe428 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -34,6 +34,7 @@
#include "llvm/Instructions.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/Dominators.h"
@@ -76,7 +77,6 @@ namespace {
bool redoLoop;
Loop *currentLoop;
- DominanceFrontier *DF;
DominatorTree *DT;
BasicBlock *loopHeader;
BasicBlock *loopPreheader;
@@ -91,15 +91,15 @@ namespace {
public:
static char ID; // Pass ID, replacement for typeid
explicit LoopUnswitch(bool Os = false) :
- LoopPass(&ID), OptimizeForSize(Os), redoLoop(false),
- currentLoop(NULL), DF(NULL), DT(NULL), loopHeader(NULL),
+ LoopPass(ID), OptimizeForSize(Os), redoLoop(false),
+ currentLoop(NULL), DT(NULL), loopHeader(NULL),
loopPreheader(NULL) {}
bool runOnLoop(Loop *L, LPPassManager &LPM);
bool processCurrentLoop();
/// This transformation requires natural loop information & requires that
- /// loop preheaders be inserted into the CFG...
+ /// loop preheaders be inserted into the CFG.
///
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredID(LoopSimplifyID);
@@ -109,7 +109,6 @@ namespace {
AU.addRequiredID(LCSSAID);
AU.addPreservedID(LCSSAID);
AU.addPreserved<DominatorTree>();
- AU.addPreserved<DominanceFrontier>();
}
private:
@@ -159,7 +158,7 @@ namespace {
};
}
char LoopUnswitch::ID = 0;
-static RegisterPass<LoopUnswitch> X("loop-unswitch", "Unswitch loops");
+INITIALIZE_PASS(LoopUnswitch, "loop-unswitch", "Unswitch loops", false, false);
Pass *llvm::createLoopUnswitchPass(bool Os) {
return new LoopUnswitch(Os);
@@ -200,13 +199,12 @@ static Value *FindLIVLoopCondition(Value *Cond, Loop *L, bool &Changed) {
bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
LI = &getAnalysis<LoopInfo>();
LPM = &LPM_Ref;
- DF = getAnalysisIfAvailable<DominanceFrontier>();
DT = getAnalysisIfAvailable<DominatorTree>();
currentLoop = L;
Function *F = currentLoop->getHeader()->getParent();
bool Changed = false;
do {
- assert(currentLoop->isLCSSAForm());
+ assert(currentLoop->isLCSSAForm(*DT));
redoLoop = false;
Changed |= processCurrentLoop();
} while(redoLoop);
@@ -215,8 +213,6 @@ bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
// FIXME: Reconstruct dom info, because it is not preserved properly.
if (DT)
DT->runOnFunction(*F);
- if (DF)
- DF->runOnFunction(*F);
}
return Changed;
}
@@ -231,8 +227,7 @@ bool LoopUnswitch::processCurrentLoop() {
// block that is branching on a loop-invariant condition, we can unswitch this
// loop.
for (Loop::block_iterator I = currentLoop->block_begin(),
- E = currentLoop->block_end();
- I != E; ++I) {
+ E = currentLoop->block_end(); I != E; ++I) {
TerminatorInst *TI = (*I)->getTerminator();
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
// If this isn't branching on an invariant condition, we can't unswitch
@@ -282,19 +277,18 @@ bool LoopUnswitch::processCurrentLoop() {
return Changed;
}
-/// isTrivialLoopExitBlock - Check to see if all paths from BB either:
-/// 1. Exit the loop with no side effects.
-/// 2. Branch to the latch block with no side-effects.
+/// isTrivialLoopExitBlock - Check to see if all paths from BB exit the
+/// loop with no side effects (including infinite loops).
///
-/// If these conditions are true, we return true and set ExitBB to the block we
+/// If true, we return true and set ExitBB to the block we
/// exit through.
///
static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB,
BasicBlock *&ExitBB,
std::set<BasicBlock*> &Visited) {
if (!Visited.insert(BB).second) {
- // Already visited and Ok, end of recursion.
- return true;
+ // Already visited. Without more analysis, this could indicate an infinte loop.
+ return false;
} else if (!L->contains(BB)) {
// Otherwise, this is a loop exit, this is fine so long as this is the
// first exit.
@@ -324,7 +318,7 @@ static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB,
/// process. If so, return the block that is exited to, otherwise return null.
static BasicBlock *isTrivialLoopExitBlock(Loop *L, BasicBlock *BB) {
std::set<BasicBlock*> Visited;
- Visited.insert(L->getHeader()); // Branches to header are ok.
+ Visited.insert(L->getHeader()); // Branches to header make infinite loops.
BasicBlock *ExitBB = 0;
if (isTrivialLoopExitBlockHelper(L, BB, ExitBB, Visited))
return ExitBB;
@@ -356,8 +350,8 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
if (!BI->isConditional() || BI->getCondition() != Cond)
return false;
- // Check to see if a successor of the branch is guaranteed to go to the
- // latch block or exit through a one exit block without having any
+ // Check to see if a successor of the branch is guaranteed to
+ // exit through a unique exit block without having any
// side-effects. If so, determine the value of Cond that causes it to do
// this.
if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
@@ -415,68 +409,65 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
Function *F = loopHeader->getParent();
- // If the condition is trivial, always unswitch. There is no code growth for
- // this case.
- if (!IsTrivialUnswitchCondition(LoopCond)) {
- // Check to see if it would be profitable to unswitch current loop.
+ Constant *CondVal = 0;
+ BasicBlock *ExitBlock = 0;
+ if (IsTrivialUnswitchCondition(LoopCond, &CondVal, &ExitBlock)) {
+ // If the condition is trivial, always unswitch. There is no code growth
+ // for this case.
+ UnswitchTrivialCondition(currentLoop, LoopCond, CondVal, ExitBlock);
+ return true;
+ }
- // Do not do non-trivial unswitch while optimizing for size.
- if (OptimizeForSize || F->hasFnAttr(Attribute::OptimizeForSize))
- return false;
+ // Check to see if it would be profitable to unswitch current loop.
- // FIXME: This is overly conservative because it does not take into
- // consideration code simplification opportunities and code that can
- // be shared by the resultant unswitched loops.
- CodeMetrics Metrics;
- for (Loop::block_iterator I = currentLoop->block_begin(),
- E = currentLoop->block_end();
- I != E; ++I)
- Metrics.analyzeBasicBlock(*I);
-
- // Limit the number of instructions to avoid causing significant code
- // expansion, and the number of basic blocks, to avoid loops with
- // large numbers of branches which cause loop unswitching to go crazy.
- // This is a very ad-hoc heuristic.
- if (Metrics.NumInsts > Threshold ||
- Metrics.NumBlocks * 5 > Threshold ||
- Metrics.NeverInline) {
- DEBUG(dbgs() << "NOT unswitching loop %"
- << currentLoop->getHeader()->getName() << ", cost too high: "
- << currentLoop->getBlocks().size() << "\n");
- return false;
- }
- }
+ // Do not do non-trivial unswitch while optimizing for size.
+ if (OptimizeForSize || F->hasFnAttr(Attribute::OptimizeForSize))
+ return false;
- Constant *CondVal;
- BasicBlock *ExitBlock;
- if (IsTrivialUnswitchCondition(LoopCond, &CondVal, &ExitBlock)) {
- UnswitchTrivialCondition(currentLoop, LoopCond, CondVal, ExitBlock);
- } else {
- UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
+ // FIXME: This is overly conservative because it does not take into
+ // consideration code simplification opportunities and code that can
+ // be shared by the resultant unswitched loops.
+ CodeMetrics Metrics;
+ for (Loop::block_iterator I = currentLoop->block_begin(),
+ E = currentLoop->block_end();
+ I != E; ++I)
+ Metrics.analyzeBasicBlock(*I);
+
+ // Limit the number of instructions to avoid causing significant code
+ // expansion, and the number of basic blocks, to avoid loops with
+ // large numbers of branches which cause loop unswitching to go crazy.
+ // This is a very ad-hoc heuristic.
+ if (Metrics.NumInsts > Threshold ||
+ Metrics.NumBlocks * 5 > Threshold ||
+ Metrics.containsIndirectBr || Metrics.isRecursive) {
+ DEBUG(dbgs() << "NOT unswitching loop %"
+ << currentLoop->getHeader()->getName() << ", cost too high: "
+ << currentLoop->getBlocks().size() << "\n");
+ return false;
}
+ UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
return true;
}
// RemapInstruction - Convert the instruction operands from referencing the
-// current values into those specified by ValueMap.
+// current values into those specified by VMap.
//
static inline void RemapInstruction(Instruction *I,
- DenseMap<const Value *, Value*> &ValueMap) {
+ ValueMap<const Value *, Value*> &VMap) {
for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
Value *Op = I->getOperand(op);
- DenseMap<const Value *, Value*>::iterator It = ValueMap.find(Op);
- if (It != ValueMap.end()) Op = It->second;
+ ValueMap<const Value *, Value*>::iterator It = VMap.find(Op);
+ if (It != VMap.end()) Op = It->second;
I->setOperand(op, Op);
}
}
/// CloneLoop - Recursively clone the specified loop and all of its children,
/// mapping the blocks with the specified map.
-static Loop *CloneLoop(Loop *L, Loop *PL, DenseMap<const Value*, Value*> &VM,
+static Loop *CloneLoop(Loop *L, Loop *PL, ValueMap<const Value*, Value*> &VM,
LoopInfo *LI, LPPassManager *LPM) {
Loop *New = new Loop();
-
LPM->insertLoop(New, PL);
// Add all of the blocks in L to the new loop.
@@ -567,8 +558,7 @@ void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
/// SplitExitEdges - Split all of the edges from inside the loop to their exit
/// blocks. Update the appropriate Phi nodes as we do so.
void LoopUnswitch::SplitExitEdges(Loop *L,
- const SmallVector<BasicBlock *, 8> &ExitBlocks)
-{
+ const SmallVector<BasicBlock *, 8> &ExitBlocks){
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
BasicBlock *ExitBlock = ExitBlocks[i];
@@ -619,21 +609,21 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
// the loop preheader and exit blocks), keeping track of the mapping between
// the instructions and blocks.
NewBlocks.reserve(LoopBlocks.size());
- DenseMap<const Value*, Value*> ValueMap;
+ ValueMap<const Value*, Value*> VMap;
for (unsigned i = 0, e = LoopBlocks.size(); i != e; ++i) {
- BasicBlock *New = CloneBasicBlock(LoopBlocks[i], ValueMap, ".us", F);
- NewBlocks.push_back(New);
- ValueMap[LoopBlocks[i]] = New; // Keep the BB mapping.
- LPM->cloneBasicBlockSimpleAnalysis(LoopBlocks[i], New, L);
+ BasicBlock *NewBB = CloneBasicBlock(LoopBlocks[i], VMap, ".us", F);
+ NewBlocks.push_back(NewBB);
+ VMap[LoopBlocks[i]] = NewBB; // Keep the BB mapping.
+ LPM->cloneBasicBlockSimpleAnalysis(LoopBlocks[i], NewBB, L);
}
// Splice the newly inserted blocks into the function right before the
// original preheader.
- F->getBasicBlockList().splice(LoopBlocks[0], F->getBasicBlockList(),
+ F->getBasicBlockList().splice(NewPreheader, F->getBasicBlockList(),
NewBlocks[0], F->end());
// Now we create the new Loop object for the versioned loop.
- Loop *NewLoop = CloneLoop(L, L->getParentLoop(), ValueMap, LI, LPM);
+ Loop *NewLoop = CloneLoop(L, L->getParentLoop(), VMap, LI, LPM);
Loop *ParentLoop = L->getParentLoop();
if (ParentLoop) {
// Make sure to add the cloned preheader and exit blocks to the parent loop
@@ -642,7 +632,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
}
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
- BasicBlock *NewExit = cast<BasicBlock>(ValueMap[ExitBlocks[i]]);
+ BasicBlock *NewExit = cast<BasicBlock>(VMap[ExitBlocks[i]]);
// The new exit block should be in the same loop as the old one.
if (Loop *ExitBBLoop = LI->getLoopFor(ExitBlocks[i]))
ExitBBLoop->addBasicBlockToLoop(NewExit, LI->getBase());
@@ -654,11 +644,11 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
// If the successor of the exit block had PHI nodes, add an entry for
// NewExit.
PHINode *PN;
- for (BasicBlock::iterator I = ExitSucc->begin();
- (PN = dyn_cast<PHINode>(I)); ++I) {
+ for (BasicBlock::iterator I = ExitSucc->begin(); isa<PHINode>(I); ++I) {
+ PN = cast<PHINode>(I);
Value *V = PN->getIncomingValueForBlock(ExitBlocks[i]);
- DenseMap<const Value *, Value*>::iterator It = ValueMap.find(V);
- if (It != ValueMap.end()) V = It->second;
+ ValueMap<const Value *, Value*>::iterator It = VMap.find(V);
+ if (It != VMap.end()) V = It->second;
PN->addIncoming(V, NewExit);
}
}
@@ -667,7 +657,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
for (unsigned i = 0, e = NewBlocks.size(); i != e; ++i)
for (BasicBlock::iterator I = NewBlocks[i]->begin(),
E = NewBlocks[i]->end(); I != E; ++I)
- RemapInstruction(I, ValueMap);
+ RemapInstruction(I, VMap);
// Rewrite the original preheader to select between versions of the loop.
BranchInst *OldBR = cast<BranchInst>(loopPreheader->getTerminator());
@@ -682,15 +672,22 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
LoopProcessWorklist.push_back(NewLoop);
redoLoop = true;
+ // Keep a WeakVH holding onto LIC. If the first call to RewriteLoopBody
+ // deletes the instruction (for example by simplifying a PHI that feeds into
+ // the condition that we're unswitching on), we don't rewrite the second
+ // iteration.
+ WeakVH LICHandle(LIC);
+
// Now we rewrite the original code to know that the condition is true and the
// new code to know that the condition is false.
- RewriteLoopBodyWithConditionConstant(L , LIC, Val, false);
-
- // It's possible that simplifying one loop could cause the other to be
- // deleted. If so, don't simplify it.
- if (!LoopProcessWorklist.empty() && LoopProcessWorklist.back() == NewLoop)
- RewriteLoopBodyWithConditionConstant(NewLoop, LIC, Val, true);
+ RewriteLoopBodyWithConditionConstant(L, LIC, Val, false);
+ // It's possible that simplifying one loop could cause the other to be
+ // changed to another value or a constant. If its a constant, don't simplify
+ // it.
+ if (!LoopProcessWorklist.empty() && LoopProcessWorklist.back() == NewLoop &&
+ LICHandle && !isa<Constant>(LICHandle))
+ RewriteLoopBodyWithConditionConstant(NewLoop, LICHandle, Val, true);
}
/// RemoveFromWorklist - Remove all instances of I from the worklist vector
@@ -886,65 +883,66 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
U->replaceUsesOfWith(LIC, Replacement);
Worklist.push_back(U);
}
- } else {
- // Otherwise, we don't know the precise value of LIC, but we do know that it
- // is certainly NOT "Val". As such, simplify any uses in the loop that we
- // can. This case occurs when we unswitch switch statements.
- for (unsigned i = 0, e = Users.size(); i != e; ++i)
- if (Instruction *U = cast<Instruction>(Users[i])) {
- if (!L->contains(U))
- continue;
+ SimplifyCode(Worklist, L);
+ return;
+ }
+
+ // Otherwise, we don't know the precise value of LIC, but we do know that it
+ // is certainly NOT "Val". As such, simplify any uses in the loop that we
+ // can. This case occurs when we unswitch switch statements.
+ for (unsigned i = 0, e = Users.size(); i != e; ++i) {
+ Instruction *U = cast<Instruction>(Users[i]);
+ if (!L->contains(U))
+ continue;
- Worklist.push_back(U);
+ Worklist.push_back(U);
- // If we know that LIC is not Val, use this info to simplify code.
- if (SwitchInst *SI = dyn_cast<SwitchInst>(U)) {
- for (unsigned i = 1, e = SI->getNumCases(); i != e; ++i) {
- if (SI->getCaseValue(i) == Val) {
- // Found a dead case value. Don't remove PHI nodes in the
- // successor if they become single-entry, those PHI nodes may
- // be in the Users list.
-
- // FIXME: This is a hack. We need to keep the successor around
- // and hooked up so as to preserve the loop structure, because
- // trying to update it is complicated. So instead we preserve the
- // loop structure and put the block on a dead code path.
- BasicBlock *Switch = SI->getParent();
- SplitEdge(Switch, SI->getSuccessor(i), this);
- // Compute the successors instead of relying on the return value
- // of SplitEdge, since it may have split the switch successor
- // after PHI nodes.
- BasicBlock *NewSISucc = SI->getSuccessor(i);
- BasicBlock *OldSISucc = *succ_begin(NewSISucc);
- // Create an "unreachable" destination.
- BasicBlock *Abort = BasicBlock::Create(Context, "us-unreachable",
- Switch->getParent(),
- OldSISucc);
- new UnreachableInst(Context, Abort);
- // Force the new case destination to branch to the "unreachable"
- // block while maintaining a (dead) CFG edge to the old block.
- NewSISucc->getTerminator()->eraseFromParent();
- BranchInst::Create(Abort, OldSISucc,
- ConstantInt::getTrue(Context), NewSISucc);
- // Release the PHI operands for this edge.
- for (BasicBlock::iterator II = NewSISucc->begin();
- PHINode *PN = dyn_cast<PHINode>(II); ++II)
- PN->setIncomingValue(PN->getBasicBlockIndex(Switch),
- UndefValue::get(PN->getType()));
- // Tell the domtree about the new block. We don't fully update the
- // domtree here -- instead we force it to do a full recomputation
- // after the pass is complete -- but we do need to inform it of
- // new blocks.
- if (DT)
- DT->addNewBlock(Abort, NewSISucc);
- break;
- }
- }
- }
+ // TODO: We could do other simplifications, for example, turning
+ // 'icmp eq LIC, Val' -> false.
+
+ // If we know that LIC is not Val, use this info to simplify code.
+ SwitchInst *SI = dyn_cast<SwitchInst>(U);
+ if (SI == 0 || !isa<ConstantInt>(Val)) continue;
+
+ unsigned DeadCase = SI->findCaseValue(cast<ConstantInt>(Val));
+ if (DeadCase == 0) continue; // Default case is live for multiple values.
+
+ // Found a dead case value. Don't remove PHI nodes in the
+ // successor if they become single-entry, those PHI nodes may
+ // be in the Users list.
- // TODO: We could do other simplifications, for example, turning
- // LIC == Val -> false.
- }
+ // FIXME: This is a hack. We need to keep the successor around
+ // and hooked up so as to preserve the loop structure, because
+ // trying to update it is complicated. So instead we preserve the
+ // loop structure and put the block on a dead code path.
+ BasicBlock *Switch = SI->getParent();
+ SplitEdge(Switch, SI->getSuccessor(DeadCase), this);
+ // Compute the successors instead of relying on the return value
+ // of SplitEdge, since it may have split the switch successor
+ // after PHI nodes.
+ BasicBlock *NewSISucc = SI->getSuccessor(DeadCase);
+ BasicBlock *OldSISucc = *succ_begin(NewSISucc);
+ // Create an "unreachable" destination.
+ BasicBlock *Abort = BasicBlock::Create(Context, "us-unreachable",
+ Switch->getParent(),
+ OldSISucc);
+ new UnreachableInst(Context, Abort);
+ // Force the new case destination to branch to the "unreachable"
+ // block while maintaining a (dead) CFG edge to the old block.
+ NewSISucc->getTerminator()->eraseFromParent();
+ BranchInst::Create(Abort, OldSISucc,
+ ConstantInt::getTrue(Context), NewSISucc);
+ // Release the PHI operands for this edge.
+ for (BasicBlock::iterator II = NewSISucc->begin();
+ PHINode *PN = dyn_cast<PHINode>(II); ++II)
+ PN->setIncomingValue(PN->getBasicBlockIndex(Switch),
+ UndefValue::get(PN->getType()));
+ // Tell the domtree about the new block. We don't fully update the
+ // domtree here -- instead we force it to do a full recomputation
+ // after the pass is complete -- but we do need to inform it of
+ // new blocks.
+ if (DT)
+ DT->addNewBlock(Abort, NewSISucc);
}
SimplifyCode(Worklist, L);
@@ -985,45 +983,16 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
continue;
}
+ // See if instruction simplification can hack this up. This is common for
+ // things like "select false, X, Y" after unswitching made the condition be
+ // 'false'.
+ if (Value *V = SimplifyInstruction(I)) {
+ ReplaceUsesOfWith(I, V, Worklist, L, LPM);
+ continue;
+ }
+
// Special case hacks that appear commonly in unswitched code.
- switch (I->getOpcode()) {
- case Instruction::Select:
- if (ConstantInt *CB = dyn_cast<ConstantInt>(I->getOperand(0))) {
- ReplaceUsesOfWith(I, I->getOperand(!CB->getZExtValue()+1), Worklist, L,
- LPM);
- continue;
- }
- break;
- case Instruction::And:
- if (isa<ConstantInt>(I->getOperand(0)) &&
- // constant -> RHS
- I->getOperand(0)->getType()->isIntegerTy(1))
- cast<BinaryOperator>(I)->swapOperands();
- if (ConstantInt *CB = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (CB->getType()->isIntegerTy(1)) {
- if (CB->isOne()) // X & 1 -> X
- ReplaceUsesOfWith(I, I->getOperand(0), Worklist, L, LPM);
- else // X & 0 -> 0
- ReplaceUsesOfWith(I, I->getOperand(1), Worklist, L, LPM);
- continue;
- }
- break;
- case Instruction::Or:
- if (isa<ConstantInt>(I->getOperand(0)) &&
- // constant -> RHS
- I->getOperand(0)->getType()->isIntegerTy(1))
- cast<BinaryOperator>(I)->swapOperands();
- if (ConstantInt *CB = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (CB->getType()->isIntegerTy(1)) {
- if (CB->isOne()) // X | 1 -> 1
- ReplaceUsesOfWith(I, I->getOperand(1), Worklist, L, LPM);
- else // X | 0 -> X
- ReplaceUsesOfWith(I, I->getOperand(0), Worklist, L, LPM);
- continue;
- }
- break;
- case Instruction::Br: {
- BranchInst *BI = cast<BranchInst>(I);
+ if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
if (BI->isUnconditional()) {
// If BI's parent is the only pred of the successor, fold the two blocks
// together.
@@ -1056,10 +1025,13 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
LPM->deleteSimpleAnalysisValue(Succ, L);
Succ->eraseFromParent();
++NumSimplify;
- } else if (ConstantInt *CB = dyn_cast<ConstantInt>(BI->getCondition())){
+ continue;
+ }
+
+ if (ConstantInt *CB = dyn_cast<ConstantInt>(BI->getCondition())){
// Conditional branch. Turn it into an unconditional branch, then
// remove dead blocks.
- break; // FIXME: Enable.
+ continue; // FIXME: Enable.
DEBUG(dbgs() << "Folded branch: " << *BI);
BasicBlock *DeadSucc = BI->getSuccessor(CB->getZExtValue());
@@ -1073,8 +1045,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
RemoveBlockIfDead(DeadSucc, Worklist, L);
}
- break;
- }
+ continue;
}
}
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/LowerAtomic.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
new file mode 100644
index 0000000..973ffe7
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -0,0 +1,161 @@
+//===- LowerAtomic.cpp - Lower atomic intrinsics --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass lowers atomic intrinsics to non-atomic form for use in a known
+// non-preemptible environment.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "loweratomic"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/Function.h"
+#include "llvm/Instruction.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/IRBuilder.h"
+
+using namespace llvm;
+
+namespace {
+
+bool LowerAtomicIntrinsic(CallInst *CI) {
+ IRBuilder<> Builder(CI->getParent(), CI);
+
+ Function *Callee = CI->getCalledFunction();
+ if (!Callee)
+ return false;
+
+ unsigned IID = Callee->getIntrinsicID();
+ switch (IID) {
+ case Intrinsic::memory_barrier:
+ break;
+
+ case Intrinsic::atomic_load_add:
+ case Intrinsic::atomic_load_sub:
+ case Intrinsic::atomic_load_and:
+ case Intrinsic::atomic_load_nand:
+ case Intrinsic::atomic_load_or:
+ case Intrinsic::atomic_load_xor:
+ case Intrinsic::atomic_load_max:
+ case Intrinsic::atomic_load_min:
+ case Intrinsic::atomic_load_umax:
+ case Intrinsic::atomic_load_umin: {
+ Value *Ptr = CI->getArgOperand(0);
+ Value *Delta = CI->getArgOperand(1);
+
+ LoadInst *Orig = Builder.CreateLoad(Ptr);
+ Value *Res = NULL;
+ switch (IID) {
+ default: assert(0 && "Unrecognized atomic modify operation");
+ case Intrinsic::atomic_load_add:
+ Res = Builder.CreateAdd(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_sub:
+ Res = Builder.CreateSub(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_and:
+ Res = Builder.CreateAnd(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_nand:
+ Res = Builder.CreateNot(Builder.CreateAnd(Orig, Delta));
+ break;
+ case Intrinsic::atomic_load_or:
+ Res = Builder.CreateOr(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_xor:
+ Res = Builder.CreateXor(Orig, Delta);
+ break;
+ case Intrinsic::atomic_load_max:
+ Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
+ Delta,
+ Orig);
+ break;
+ case Intrinsic::atomic_load_min:
+ Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
+ Orig,
+ Delta);
+ break;
+ case Intrinsic::atomic_load_umax:
+ Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
+ Delta,
+ Orig);
+ break;
+ case Intrinsic::atomic_load_umin:
+ Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
+ Orig,
+ Delta);
+ break;
+ }
+ Builder.CreateStore(Res, Ptr);
+
+ CI->replaceAllUsesWith(Orig);
+ break;
+ }
+
+ case Intrinsic::atomic_swap: {
+ Value *Ptr = CI->getArgOperand(0);
+ Value *Val = CI->getArgOperand(1);
+
+ LoadInst *Orig = Builder.CreateLoad(Ptr);
+ Builder.CreateStore(Val, Ptr);
+
+ CI->replaceAllUsesWith(Orig);
+ break;
+ }
+
+ case Intrinsic::atomic_cmp_swap: {
+ Value *Ptr = CI->getArgOperand(0);
+ Value *Cmp = CI->getArgOperand(1);
+ Value *Val = CI->getArgOperand(2);
+
+ LoadInst *Orig = Builder.CreateLoad(Ptr);
+ Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
+ Value *Res = Builder.CreateSelect(Equal, Val, Orig);
+ Builder.CreateStore(Res, Ptr);
+
+ CI->replaceAllUsesWith(Orig);
+ break;
+ }
+
+ default:
+ return false;
+ }
+
+ assert(CI->use_empty() &&
+ "Lowering should have eliminated any uses of the intrinsic call!");
+ CI->eraseFromParent();
+
+ return true;
+}
+
+struct LowerAtomic : public BasicBlockPass {
+ static char ID;
+ LowerAtomic() : BasicBlockPass(ID) {}
+ bool runOnBasicBlock(BasicBlock &BB) {
+ bool Changed = false;
+ for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; ) {
+ Instruction *Inst = DI++;
+ if (CallInst *CI = dyn_cast<CallInst>(Inst))
+ Changed |= LowerAtomicIntrinsic(CI);
+ }
+ return Changed;
+ }
+
+};
+
+}
+
+char LowerAtomic::ID = 0;
+INITIALIZE_PASS(LowerAtomic, "loweratomic",
+ "Lower atomic intrinsics to non-atomic form",
+ false, false);
+
+Pass *llvm::createLowerAtomicPass() { return new LowerAtomic(); }
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 62e2977..24fae42 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -304,7 +304,7 @@ namespace {
bool runOnFunction(Function &F);
public:
static char ID; // Pass identification, replacement for typeid
- MemCpyOpt() : FunctionPass(&ID) {}
+ MemCpyOpt() : FunctionPass(ID) {}
private:
// This transformation requires dominator postdominator info
@@ -331,8 +331,7 @@ namespace {
// createMemCpyOptPass - The public interface to this file...
FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
-static RegisterPass<MemCpyOpt> X("memcpyopt",
- "MemCpy Optimization");
+INITIALIZE_PASS(MemCpyOpt, "memcpyopt", "MemCpy Optimization", false, false);
@@ -374,7 +373,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// If the call is readnone, ignore it, otherwise bail out. We don't even
// allow readonly here because we don't want something like:
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
- if (AA.getModRefBehavior(CallSite::get(BI)) ==
+ if (AA.getModRefBehavior(CallSite(BI)) ==
AliasAnalysis::DoesNotAccessMemory)
continue;
@@ -413,7 +412,6 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// interesting as a small compile-time optimization.
Ranges.addStore(0, SI);
- Function *MemSetF = 0;
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
@@ -433,29 +431,40 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// memset block. This ensure that the memset is dominated by any addressing
// instruction needed by the start of the block.
BasicBlock::iterator InsertPt = BI;
-
- if (MemSetF == 0) {
- const Type *Ty = Type::getInt64Ty(Context);
- MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, &Ty, 1);
- }
-
+
// Get the starting pointer of the block.
StartPtr = Range.StartPtr;
-
+
+ // Determine alignment
+ unsigned Alignment = Range.Alignment;
+ if (Alignment == 0) {
+ const Type *EltType =
+ cast<PointerType>(StartPtr->getType())->getElementType();
+ Alignment = TD->getABITypeAlignment(EltType);
+ }
+
// Cast the start ptr to be i8* as memset requires.
- const Type *i8Ptr = Type::getInt8PtrTy(Context);
- if (StartPtr->getType() != i8Ptr)
+ const PointerType* StartPTy = cast<PointerType>(StartPtr->getType());
+ const PointerType *i8Ptr = Type::getInt8PtrTy(Context,
+ StartPTy->getAddressSpace());
+ if (StartPTy!= i8Ptr)
StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
InsertPt);
-
+
Value *Ops[] = {
StartPtr, ByteVal, // Start, value
// size
ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start),
// align
- ConstantInt::get(Type::getInt32Ty(Context), Range.Alignment)
+ ConstantInt::get(Type::getInt32Ty(Context), Alignment),
+ // volatile
+ ConstantInt::get(Type::getInt1Ty(Context), 0),
};
- Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
+ const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
+
+ Function *MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
+
+ Value *C = CallInst::Create(MemSetF, Ops, Ops+5, "", InsertPt);
DEBUG(dbgs() << "Replace stores:\n";
for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
dbgs() << *Range.TheStores[i];
@@ -499,7 +508,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
// because we'll need to do type comparisons based on the underlying type.
Value *cpyDest = cpy->getDest();
Value *cpySrc = cpy->getSource();
- CallSite CS = CallSite::get(C);
+ CallSite CS(C);
// We need to be able to reason about the size of the memcpy, so we require
// that it be a constant.
@@ -622,15 +631,16 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
// Remove the memcpy
MD.removeInstruction(cpy);
cpy->eraseFromParent();
- NumMemCpyInstr++;
+ ++NumMemCpyInstr;
return true;
}
-/// processMemCpy - perform simplication of memcpy's. If we have memcpy A which
-/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
-/// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
-/// This allows later passes to remove the first memcpy altogether.
+/// processMemCpy - perform simplification of memcpy's. If we have memcpy A
+/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
+/// B to be a memcpy from X to Z (or potentially a memmove, depending on
+/// circumstances). This allows later passes to remove the first memcpy
+/// altogether.
bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
@@ -680,16 +690,19 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
return false;
// If all checks passed, then we can transform these memcpy's
- const Type *Ty = M->getLength()->getType();
+ const Type *ArgTys[3] = { M->getRawDest()->getType(),
+ MDep->getRawSource()->getType(),
+ M->getLength()->getType() };
Function *MemCpyFun = Intrinsic::getDeclaration(
M->getParent()->getParent()->getParent(),
- M->getIntrinsicID(), &Ty, 1);
+ M->getIntrinsicID(), ArgTys, 3);
- Value *Args[4] = {
- M->getRawDest(), MDep->getRawSource(), M->getLength(), M->getAlignmentCst()
+ Value *Args[5] = {
+ M->getRawDest(), MDep->getRawSource(), M->getLength(),
+ M->getAlignmentCst(), M->getVolatileCst()
};
- CallInst *C = CallInst::Create(MemCpyFun, Args, Args+4, "", M);
+ CallInst *C = CallInst::Create(MemCpyFun, Args, Args+5, "", M);
// If C and M don't interfere, then this is a valid transformation. If they
@@ -697,7 +710,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
if (MD.getDependency(C) == dep) {
MD.removeInstruction(M);
M->eraseFromParent();
- NumMemCpyInstr++;
+ ++NumMemCpyInstr;
return true;
}
@@ -728,8 +741,11 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
// If not, then we know we can transform this.
Module *Mod = M->getParent()->getParent()->getParent();
- const Type *Ty = M->getLength()->getType();
- M->setOperand(0, Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, &Ty, 1));
+ const Type *ArgTys[3] = { M->getRawDest()->getType(),
+ M->getRawSource()->getType(),
+ M->getLength()->getType() };
+ M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy,
+ ArgTys, 3));
// MemDep may have over conservative information about this instruction, just
// conservatively flush it from the cache.
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/Reassociate.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 5aca9cd..b8afcc1 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -77,7 +77,7 @@ namespace {
bool MadeChange;
public:
static char ID; // Pass identification, replacement for typeid
- Reassociate() : FunctionPass(&ID) {}
+ Reassociate() : FunctionPass(ID) {}
bool runOnFunction(Function &F);
@@ -103,7 +103,8 @@ namespace {
}
char Reassociate::ID = 0;
-static RegisterPass<Reassociate> X("reassociate", "Reassociate expressions");
+INITIALIZE_PASS(Reassociate, "reassociate",
+ "Reassociate expressions", false, false);
// Public interface to the Reassociate pass
FunctionPass *llvm::createReassociatePass() { return new Reassociate(); }
@@ -407,13 +408,14 @@ static Value *NegateValue(Value *V, Instruction *BI) {
// Okay, we need to materialize a negated version of V with an instruction.
// Scan the use lists of V to see if we have one already.
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- if (!BinaryOperator::isNeg(*UI)) continue;
+ User *U = *UI;
+ if (!BinaryOperator::isNeg(U)) continue;
// We found one! Now we have to make sure that the definition dominates
// this use. We do this by moving it to the entry block (if it is a
// non-instruction value) or right after the definition. These negates will
// be zapped by reassociate later, so we don't need much finesse here.
- BinaryOperator *TheNeg = cast<BinaryOperator>(*UI);
+ BinaryOperator *TheNeg = cast<BinaryOperator>(U);
// Verify that the negate is in this function, V might be a constant expr.
if (TheNeg->getParent()->getParent() != BI->getParent()->getParent())
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/Reg2Mem.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
index 99e1252..506b72a 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/Reg2Mem.cpp
@@ -36,7 +36,7 @@ STATISTIC(NumPhisDemoted, "Number of phi-nodes demoted");
namespace {
struct RegToMem : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- RegToMem() : FunctionPass(&ID) {}
+ RegToMem() : FunctionPass(ID) {}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredID(BreakCriticalEdgesID);
@@ -45,11 +45,12 @@ namespace {
bool valueEscapes(const Instruction *Inst) const {
const BasicBlock *BB = Inst->getParent();
- for (Value::use_const_iterator UI = Inst->use_begin(),E = Inst->use_end();
- UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != BB ||
- isa<PHINode>(*UI))
+ for (Value::const_use_iterator UI = Inst->use_begin(),E = Inst->use_end();
+ UI != E; ++UI) {
+ const Instruction *I = cast<Instruction>(*UI);
+ if (I->getParent() != BB || isa<PHINode>(I))
return true;
+ }
return false;
}
@@ -58,8 +59,8 @@ namespace {
}
char RegToMem::ID = 0;
-static RegisterPass<RegToMem>
-X("reg2mem", "Demote all values to stack slots");
+INITIALIZE_PASS(RegToMem, "reg2mem", "Demote all values to stack slots",
+ false, false);
bool RegToMem::runOnFunction(Function &F) {
@@ -123,7 +124,7 @@ bool RegToMem::runOnFunction(Function &F) {
// createDemoteRegisterToMemory - Provide an entry point to create this pass.
//
-const PassInfo *const llvm::DemoteRegisterToMemoryID = &X;
+char &llvm::DemoteRegisterToMemoryID = RegToMem::ID;
FunctionPass *llvm::createDemoteRegisterToMemoryPass() {
return new RegToMem();
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/SCCP.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/SCCP.cpp
index 7e37938..6115c05 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/SCCP.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/SCCP.cpp
@@ -275,12 +275,12 @@ public:
return I->second;
}
- LatticeVal getStructLatticeValueFor(Value *V, unsigned i) const {
+ /*LatticeVal getStructLatticeValueFor(Value *V, unsigned i) const {
DenseMap<std::pair<Value*, unsigned>, LatticeVal>::const_iterator I =
StructValueState.find(std::make_pair(V, i));
assert(I != StructValueState.end() && "V is not in valuemap!");
return I->second;
- }
+ }*/
/// getTrackedRetVals - Get the inferred return value map.
///
@@ -317,7 +317,10 @@ private:
void markConstant(LatticeVal &IV, Value *V, Constant *C) {
if (!IV.markConstant(C)) return;
DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
- InstWorkList.push_back(V);
+ if (IV.isOverdefined())
+ OverdefinedInstWorkList.push_back(V);
+ else
+ InstWorkList.push_back(V);
}
void markConstant(Value *V, Constant *C) {
@@ -327,9 +330,13 @@ private:
void markForcedConstant(Value *V, Constant *C) {
assert(!V->getType()->isStructTy() && "Should use other method");
- ValueState[V].markForcedConstant(C);
+ LatticeVal &IV = ValueState[V];
+ IV.markForcedConstant(C);
DEBUG(dbgs() << "markForcedConstant: " << *C << ": " << *V << '\n');
- InstWorkList.push_back(V);
+ if (IV.isOverdefined())
+ OverdefinedInstWorkList.push_back(V);
+ else
+ InstWorkList.push_back(V);
}
@@ -501,17 +508,16 @@ private:
void visitLoadInst (LoadInst &I);
void visitGetElementPtrInst(GetElementPtrInst &I);
void visitCallInst (CallInst &I) {
- visitCallSite(CallSite::get(&I));
+ visitCallSite(&I);
}
void visitInvokeInst (InvokeInst &II) {
- visitCallSite(CallSite::get(&II));
+ visitCallSite(&II);
visitTerminatorInst(II);
}
void visitCallSite (CallSite CS);
void visitUnwindInst (TerminatorInst &I) { /*returns void*/ }
void visitUnreachableInst(TerminatorInst &I) { /*returns void*/ }
void visitAllocaInst (Instruction &I) { markOverdefined(&I); }
- void visitVANextInst (Instruction &I) { markOverdefined(&I); }
void visitVAArgInst (Instruction &I) { markAnythingOverdefined(&I); }
void visitInstruction(Instruction &I) {
@@ -1445,6 +1451,8 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
// After a zero extend, we know the top part is zero. SExt doesn't have
// to be handled here, because we don't know whether the top part is 1's
// or 0's.
+ case Instruction::SIToFP: // some FP values are not possible, just use 0.
+ case Instruction::UIToFP: // some FP values are not possible, just use 0.
markForcedConstant(I, Constant::getNullValue(ITy));
return true;
case Instruction::Mul:
@@ -1521,45 +1529,48 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) {
}
}
+ // Check to see if we have a branch or switch on an undefined value. If so
+ // we force the branch to go one way or the other to make the successor
+ // values live. It doesn't really matter which way we force it.
TerminatorInst *TI = BB->getTerminator();
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
if (!BI->isConditional()) continue;
if (!getValueState(BI->getCondition()).isUndefined())
continue;
- } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
+
+ // If the input to SCCP is actually branch on undef, fix the undef to
+ // false.
+ if (isa<UndefValue>(BI->getCondition())) {
+ BI->setCondition(ConstantInt::getFalse(BI->getContext()));
+ markEdgeExecutable(BB, TI->getSuccessor(1));
+ return true;
+ }
+
+ // Otherwise, it is a branch on a symbolic value which is currently
+ // considered to be undef. Handle this by forcing the input value to the
+ // branch to false.
+ markForcedConstant(BI->getCondition(),
+ ConstantInt::getFalse(TI->getContext()));
+ return true;
+ }
+
+ if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
if (SI->getNumSuccessors() < 2) // no cases
continue;
if (!getValueState(SI->getCondition()).isUndefined())
continue;
- } else {
- continue;
- }
-
- // If the edge to the second successor isn't thought to be feasible yet,
- // mark it so now. We pick the second one so that this goes to some
- // enumerated value in a switch instead of going to the default destination.
- if (KnownFeasibleEdges.count(Edge(BB, TI->getSuccessor(1))))
- continue;
-
- // Otherwise, it isn't already thought to be feasible. Mark it as such now
- // and return. This will make other blocks reachable, which will allow new
- // values to be discovered and existing ones to be moved in the lattice.
- markEdgeExecutable(BB, TI->getSuccessor(1));
-
- // This must be a conditional branch of switch on undef. At this point,
- // force the old terminator to branch to the first successor. This is
- // required because we are now influencing the dataflow of the function with
- // the assumption that this edge is taken. If we leave the branch condition
- // as undef, then further analysis could think the undef went another way
- // leading to an inconsistent set of conclusions.
- if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
- BI->setCondition(ConstantInt::getFalse(BI->getContext()));
- } else {
- SwitchInst *SI = cast<SwitchInst>(TI);
- SI->setCondition(SI->getCaseValue(1));
+
+ // If the input to SCCP is actually switch on undef, fix the undef to
+ // the first constant.
+ if (isa<UndefValue>(SI->getCondition())) {
+ SI->setCondition(SI->getCaseValue(1));
+ markEdgeExecutable(BB, TI->getSuccessor(1));
+ return true;
+ }
+
+ markForcedConstant(SI->getCondition(), SI->getCaseValue(1));
+ return true;
}
-
- return true;
}
return false;
@@ -1574,7 +1585,7 @@ namespace {
///
struct SCCP : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- SCCP() : FunctionPass(&ID) {}
+ SCCP() : FunctionPass(ID) {}
// runOnFunction - Run the Sparse Conditional Constant Propagation
// algorithm, and return true if the function was modified.
@@ -1588,8 +1599,8 @@ namespace {
} // end anonymous namespace
char SCCP::ID = 0;
-static RegisterPass<SCCP>
-X("sccp", "Sparse Conditional Constant Propagation");
+INITIALIZE_PASS(SCCP, "sccp",
+ "Sparse Conditional Constant Propagation", false, false);
// createSCCPPass - This is the public interface to this file.
FunctionPass *llvm::createSCCPPass() {
@@ -1690,14 +1701,15 @@ namespace {
///
struct IPSCCP : public ModulePass {
static char ID;
- IPSCCP() : ModulePass(&ID) {}
+ IPSCCP() : ModulePass(ID) {}
bool runOnModule(Module &M);
};
} // end anonymous namespace
char IPSCCP::ID = 0;
-static RegisterPass<IPSCCP>
-Y("ipsccp", "Interprocedural Sparse Conditional Constant Propagation");
+INITIALIZE_PASS(IPSCCP, "ipsccp",
+ "Interprocedural Sparse Conditional Constant Propagation",
+ false, false);
// createIPSCCPPass - This is the public interface to this file.
ModulePass *llvm::createIPSCCPPass() {
@@ -1705,34 +1717,44 @@ ModulePass *llvm::createIPSCCPPass() {
}
-static bool AddressIsTaken(GlobalValue *GV) {
+static bool AddressIsTaken(const GlobalValue *GV) {
// Delete any dead constantexpr klingons.
GV->removeDeadConstantUsers();
- for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
- UI != E; ++UI)
- if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+ for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
+ UI != E; ++UI) {
+ const User *U = *UI;
+ if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (SI->getOperand(0) == GV || SI->isVolatile())
return true; // Storing addr of GV.
- } else if (isa<InvokeInst>(*UI) || isa<CallInst>(*UI)) {
+ } else if (isa<InvokeInst>(U) || isa<CallInst>(U)) {
// Make sure we are calling the function, not passing the address.
- if (UI.getOperandNo() != 0)
+ ImmutableCallSite CS(cast<Instruction>(U));
+ if (!CS.isCallee(UI))
return true;
- } else if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ } else if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (LI->isVolatile())
return true;
- } else if (isa<BlockAddress>(*UI)) {
+ } else if (isa<BlockAddress>(U)) {
// blockaddress doesn't take the address of the function, it takes addr
// of label.
} else {
return true;
}
+ }
return false;
}
bool IPSCCP::runOnModule(Module &M) {
SCCPSolver Solver(getAnalysisIfAvailable<TargetData>());
+ // AddressTakenFunctions - This set keeps track of the address-taken functions
+ // that are in the input. As IPSCCP runs through and simplifies code,
+ // functions that were address taken can end up losing their
+ // address-taken-ness. Because of this, we keep track of their addresses from
+ // the first pass so we can use them for the later simplification pass.
+ SmallPtrSet<Function*, 32> AddressTakenFunctions;
+
// Loop over all functions, marking arguments to those with their addresses
// taken or that are external as overdefined.
//
@@ -1748,9 +1770,13 @@ bool IPSCCP::runOnModule(Module &M) {
// If this function only has direct calls that we can see, we can track its
// arguments and return value aggressively, and can assume it is not called
// unless we see evidence to the contrary.
- if (F->hasLocalLinkage() && !AddressIsTaken(F)) {
- Solver.AddArgumentTrackedFunction(F);
- continue;
+ if (F->hasLocalLinkage()) {
+ if (AddressIsTaken(F))
+ AddressTakenFunctions.insert(F);
+ else {
+ Solver.AddArgumentTrackedFunction(F);
+ continue;
+ }
}
// Assume the function is called.
@@ -1935,7 +1961,7 @@ bool IPSCCP::runOnModule(Module &M) {
continue;
// We can only do this if we know that nothing else can call the function.
- if (!F->hasLocalLinkage() || AddressIsTaken(F))
+ if (!F->hasLocalLinkage() || AddressTakenFunctions.count(F))
continue;
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/SCCVN.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/SCCVN.cpp
deleted file mode 100644
index 9685a29..0000000
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/SCCVN.cpp
+++ /dev/null
@@ -1,716 +0,0 @@
-//===- SCCVN.cpp - Eliminate redundant values -----------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass performs global value numbering to eliminate fully redundant
-// instructions. This is based on the paper "SCC-based Value Numbering"
-// by Cooper, et al.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "sccvn"
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/BasicBlock.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/Operator.h"
-#include "llvm/Value.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/SparseBitVector.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/Support/CFG.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Transforms/Utils/SSAUpdater.h"
-using namespace llvm;
-
-STATISTIC(NumSCCVNInstr, "Number of instructions deleted by SCCVN");
-STATISTIC(NumSCCVNPhi, "Number of phis deleted by SCCVN");
-
-//===----------------------------------------------------------------------===//
-// ValueTable Class
-//===----------------------------------------------------------------------===//
-
-/// This class holds the mapping between values and value numbers. It is used
-/// as an efficient mechanism to determine the expression-wise equivalence of
-/// two values.
-namespace {
- struct Expression {
- enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL,
- UDIV, SDIV, FDIV, UREM, SREM,
- FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ,
- ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
- ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
- FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
- FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
- FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
- SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI,
- FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT,
- PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT,
- INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
-
- ExpressionOpcode opcode;
- const Type* type;
- SmallVector<uint32_t, 4> varargs;
-
- Expression() { }
- Expression(ExpressionOpcode o) : opcode(o) { }
-
- bool operator==(const Expression &other) const {
- if (opcode != other.opcode)
- return false;
- else if (opcode == EMPTY || opcode == TOMBSTONE)
- return true;
- else if (type != other.type)
- return false;
- else {
- if (varargs.size() != other.varargs.size())
- return false;
-
- for (size_t i = 0; i < varargs.size(); ++i)
- if (varargs[i] != other.varargs[i])
- return false;
-
- return true;
- }
- }
-
- bool operator!=(const Expression &other) const {
- return !(*this == other);
- }
- };
-
- class ValueTable {
- private:
- DenseMap<Value*, uint32_t> valueNumbering;
- DenseMap<Expression, uint32_t> expressionNumbering;
- DenseMap<Value*, uint32_t> constantsNumbering;
-
- uint32_t nextValueNumber;
-
- Expression::ExpressionOpcode getOpcode(BinaryOperator* BO);
- Expression::ExpressionOpcode getOpcode(CmpInst* C);
- Expression::ExpressionOpcode getOpcode(CastInst* C);
- Expression create_expression(BinaryOperator* BO);
- Expression create_expression(CmpInst* C);
- Expression create_expression(ShuffleVectorInst* V);
- Expression create_expression(ExtractElementInst* C);
- Expression create_expression(InsertElementInst* V);
- Expression create_expression(SelectInst* V);
- Expression create_expression(CastInst* C);
- Expression create_expression(GetElementPtrInst* G);
- Expression create_expression(CallInst* C);
- Expression create_expression(Constant* C);
- Expression create_expression(ExtractValueInst* C);
- Expression create_expression(InsertValueInst* C);
- public:
- ValueTable() : nextValueNumber(1) { }
- uint32_t computeNumber(Value *V);
- uint32_t lookup(Value *V);
- void add(Value *V, uint32_t num);
- void clear();
- void clearExpressions();
- void erase(Value *v);
- unsigned size();
- void verifyRemoved(const Value *) const;
- };
-}
-
-namespace llvm {
-template <> struct DenseMapInfo<Expression> {
- static inline Expression getEmptyKey() {
- return Expression(Expression::EMPTY);
- }
-
- static inline Expression getTombstoneKey() {
- return Expression(Expression::TOMBSTONE);
- }
-
- static unsigned getHashValue(const Expression e) {
- unsigned hash = e.opcode;
-
- hash = ((unsigned)((uintptr_t)e.type >> 4) ^
- (unsigned)((uintptr_t)e.type >> 9));
-
- for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
- E = e.varargs.end(); I != E; ++I)
- hash = *I + hash * 37;
-
- return hash;
- }
- static bool isEqual(const Expression &LHS, const Expression &RHS) {
- return LHS == RHS;
- }
-};
-template <>
-struct isPodLike<Expression> { static const bool value = true; };
-
-}
-
-//===----------------------------------------------------------------------===//
-// ValueTable Internal Functions
-//===----------------------------------------------------------------------===//
-Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) {
- switch(BO->getOpcode()) {
- default: // THIS SHOULD NEVER HAPPEN
- llvm_unreachable("Binary operator with unknown opcode?");
- case Instruction::Add: return Expression::ADD;
- case Instruction::FAdd: return Expression::FADD;
- case Instruction::Sub: return Expression::SUB;
- case Instruction::FSub: return Expression::FSUB;
- case Instruction::Mul: return Expression::MUL;
- case Instruction::FMul: return Expression::FMUL;
- case Instruction::UDiv: return Expression::UDIV;
- case Instruction::SDiv: return Expression::SDIV;
- case Instruction::FDiv: return Expression::FDIV;
- case Instruction::URem: return Expression::UREM;
- case Instruction::SRem: return Expression::SREM;
- case Instruction::FRem: return Expression::FREM;
- case Instruction::Shl: return Expression::SHL;
- case Instruction::LShr: return Expression::LSHR;
- case Instruction::AShr: return Expression::ASHR;
- case Instruction::And: return Expression::AND;
- case Instruction::Or: return Expression::OR;
- case Instruction::Xor: return Expression::XOR;
- }
-}
-
-Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
- if (isa<ICmpInst>(C)) {
- switch (C->getPredicate()) {
- default: // THIS SHOULD NEVER HAPPEN
- llvm_unreachable("Comparison with unknown predicate?");
- case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
- case ICmpInst::ICMP_NE: return Expression::ICMPNE;
- case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
- case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
- case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
- case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
- case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
- case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
- case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
- case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
- }
- } else {
- switch (C->getPredicate()) {
- default: // THIS SHOULD NEVER HAPPEN
- llvm_unreachable("Comparison with unknown predicate?");
- case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
- case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
- case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
- case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
- case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
- case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
- case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
- case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
- case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
- case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
- case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
- case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
- case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
- case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
- }
- }
-}
-
-Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) {
- switch(C->getOpcode()) {
- default: // THIS SHOULD NEVER HAPPEN
- llvm_unreachable("Cast operator with unknown opcode?");
- case Instruction::Trunc: return Expression::TRUNC;
- case Instruction::ZExt: return Expression::ZEXT;
- case Instruction::SExt: return Expression::SEXT;
- case Instruction::FPToUI: return Expression::FPTOUI;
- case Instruction::FPToSI: return Expression::FPTOSI;
- case Instruction::UIToFP: return Expression::UITOFP;
- case Instruction::SIToFP: return Expression::SITOFP;
- case Instruction::FPTrunc: return Expression::FPTRUNC;
- case Instruction::FPExt: return Expression::FPEXT;
- case Instruction::PtrToInt: return Expression::PTRTOINT;
- case Instruction::IntToPtr: return Expression::INTTOPTR;
- case Instruction::BitCast: return Expression::BITCAST;
- }
-}
-
-Expression ValueTable::create_expression(CallInst* C) {
- Expression e;
-
- e.type = C->getType();
- e.opcode = Expression::CALL;
-
- e.varargs.push_back(lookup(C->getCalledFunction()));
- for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
- I != E; ++I)
- e.varargs.push_back(lookup(*I));
-
- return e;
-}
-
-Expression ValueTable::create_expression(BinaryOperator* BO) {
- Expression e;
- e.varargs.push_back(lookup(BO->getOperand(0)));
- e.varargs.push_back(lookup(BO->getOperand(1)));
- e.type = BO->getType();
- e.opcode = getOpcode(BO);
-
- return e;
-}
-
-Expression ValueTable::create_expression(CmpInst* C) {
- Expression e;
-
- e.varargs.push_back(lookup(C->getOperand(0)));
- e.varargs.push_back(lookup(C->getOperand(1)));
- e.type = C->getType();
- e.opcode = getOpcode(C);
-
- return e;
-}
-
-Expression ValueTable::create_expression(CastInst* C) {
- Expression e;
-
- e.varargs.push_back(lookup(C->getOperand(0)));
- e.type = C->getType();
- e.opcode = getOpcode(C);
-
- return e;
-}
-
-Expression ValueTable::create_expression(ShuffleVectorInst* S) {
- Expression e;
-
- e.varargs.push_back(lookup(S->getOperand(0)));
- e.varargs.push_back(lookup(S->getOperand(1)));
- e.varargs.push_back(lookup(S->getOperand(2)));
- e.type = S->getType();
- e.opcode = Expression::SHUFFLE;
-
- return e;
-}
-
-Expression ValueTable::create_expression(ExtractElementInst* E) {
- Expression e;
-
- e.varargs.push_back(lookup(E->getOperand(0)));
- e.varargs.push_back(lookup(E->getOperand(1)));
- e.type = E->getType();
- e.opcode = Expression::EXTRACT;
-
- return e;
-}
-
-Expression ValueTable::create_expression(InsertElementInst* I) {
- Expression e;
-
- e.varargs.push_back(lookup(I->getOperand(0)));
- e.varargs.push_back(lookup(I->getOperand(1)));
- e.varargs.push_back(lookup(I->getOperand(2)));
- e.type = I->getType();
- e.opcode = Expression::INSERT;
-
- return e;
-}
-
-Expression ValueTable::create_expression(SelectInst* I) {
- Expression e;
-
- e.varargs.push_back(lookup(I->getCondition()));
- e.varargs.push_back(lookup(I->getTrueValue()));
- e.varargs.push_back(lookup(I->getFalseValue()));
- e.type = I->getType();
- e.opcode = Expression::SELECT;
-
- return e;
-}
-
-Expression ValueTable::create_expression(GetElementPtrInst* G) {
- Expression e;
-
- e.varargs.push_back(lookup(G->getPointerOperand()));
- e.type = G->getType();
- e.opcode = Expression::GEP;
-
- for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
- I != E; ++I)
- e.varargs.push_back(lookup(*I));
-
- return e;
-}
-
-Expression ValueTable::create_expression(ExtractValueInst* E) {
- Expression e;
-
- e.varargs.push_back(lookup(E->getAggregateOperand()));
- for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
- II != IE; ++II)
- e.varargs.push_back(*II);
- e.type = E->getType();
- e.opcode = Expression::EXTRACTVALUE;
-
- return e;
-}
-
-Expression ValueTable::create_expression(InsertValueInst* E) {
- Expression e;
-
- e.varargs.push_back(lookup(E->getAggregateOperand()));
- e.varargs.push_back(lookup(E->getInsertedValueOperand()));
- for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
- II != IE; ++II)
- e.varargs.push_back(*II);
- e.type = E->getType();
- e.opcode = Expression::INSERTVALUE;
-
- return e;
-}
-
-//===----------------------------------------------------------------------===//
-// ValueTable External Functions
-//===----------------------------------------------------------------------===//
-
-/// add - Insert a value into the table with a specified value number.
-void ValueTable::add(Value *V, uint32_t num) {
- valueNumbering[V] = num;
-}
-
-/// computeNumber - Returns the value number for the specified value, assigning
-/// it a new number if it did not have one before.
-uint32_t ValueTable::computeNumber(Value *V) {
- if (uint32_t v = valueNumbering[V])
- return v;
- else if (uint32_t v= constantsNumbering[V])
- return v;
-
- if (!isa<Instruction>(V)) {
- constantsNumbering[V] = nextValueNumber;
- return nextValueNumber++;
- }
-
- Instruction* I = cast<Instruction>(V);
- Expression exp;
- switch (I->getOpcode()) {
- case Instruction::Add:
- case Instruction::FAdd:
- case Instruction::Sub:
- case Instruction::FSub:
- case Instruction::Mul:
- case Instruction::FMul:
- case Instruction::UDiv:
- case Instruction::SDiv:
- case Instruction::FDiv:
- case Instruction::URem:
- case Instruction::SRem:
- case Instruction::FRem:
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:
- case Instruction::And:
- case Instruction::Or :
- case Instruction::Xor:
- exp = create_expression(cast<BinaryOperator>(I));
- break;
- case Instruction::ICmp:
- case Instruction::FCmp:
- exp = create_expression(cast<CmpInst>(I));
- break;
- case Instruction::Trunc:
- case Instruction::ZExt:
- case Instruction::SExt:
- case Instruction::FPToUI:
- case Instruction::FPToSI:
- case Instruction::UIToFP:
- case Instruction::SIToFP:
- case Instruction::FPTrunc:
- case Instruction::FPExt:
- case Instruction::PtrToInt:
- case Instruction::IntToPtr:
- case Instruction::BitCast:
- exp = create_expression(cast<CastInst>(I));
- break;
- case Instruction::Select:
- exp = create_expression(cast<SelectInst>(I));
- break;
- case Instruction::ExtractElement:
- exp = create_expression(cast<ExtractElementInst>(I));
- break;
- case Instruction::InsertElement:
- exp = create_expression(cast<InsertElementInst>(I));
- break;
- case Instruction::ShuffleVector:
- exp = create_expression(cast<ShuffleVectorInst>(I));
- break;
- case Instruction::ExtractValue:
- exp = create_expression(cast<ExtractValueInst>(I));
- break;
- case Instruction::InsertValue:
- exp = create_expression(cast<InsertValueInst>(I));
- break;
- case Instruction::GetElementPtr:
- exp = create_expression(cast<GetElementPtrInst>(I));
- break;
- default:
- valueNumbering[V] = nextValueNumber;
- return nextValueNumber++;
- }
-
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
-
- return e;
-}
-
-/// lookup - Returns the value number of the specified value. Returns 0 if
-/// the value has not yet been numbered.
-uint32_t ValueTable::lookup(Value *V) {
- if (!isa<Instruction>(V)) {
- if (!constantsNumbering.count(V))
- constantsNumbering[V] = nextValueNumber++;
- return constantsNumbering[V];
- }
-
- return valueNumbering[V];
-}
-
-/// clear - Remove all entries from the ValueTable
-void ValueTable::clear() {
- valueNumbering.clear();
- expressionNumbering.clear();
- constantsNumbering.clear();
- nextValueNumber = 1;
-}
-
-void ValueTable::clearExpressions() {
- expressionNumbering.clear();
- constantsNumbering.clear();
- nextValueNumber = 1;
-}
-
-/// erase - Remove a value from the value numbering
-void ValueTable::erase(Value *V) {
- valueNumbering.erase(V);
-}
-
-/// verifyRemoved - Verify that the value is removed from all internal data
-/// structures.
-void ValueTable::verifyRemoved(const Value *V) const {
- for (DenseMap<Value*, uint32_t>::const_iterator
- I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
- assert(I->first != V && "Inst still occurs in value numbering map!");
- }
-}
-
-//===----------------------------------------------------------------------===//
-// SCCVN Pass
-//===----------------------------------------------------------------------===//
-
-namespace {
-
- struct ValueNumberScope {
- ValueNumberScope* parent;
- DenseMap<uint32_t, Value*> table;
- SparseBitVector<128> availIn;
- SparseBitVector<128> availOut;
-
- ValueNumberScope(ValueNumberScope* p) : parent(p) { }
- };
-
- class SCCVN : public FunctionPass {
- bool runOnFunction(Function &F);
- public:
- static char ID; // Pass identification, replacement for typeid
- SCCVN() : FunctionPass(&ID) { }
-
- private:
- ValueTable VT;
- DenseMap<BasicBlock*, ValueNumberScope*> BBMap;
-
- // This transformation requires dominator postdominator info
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
-
- AU.addPreserved<DominatorTree>();
- AU.setPreservesCFG();
- }
- };
-
- char SCCVN::ID = 0;
-}
-
-// createSCCVNPass - The public interface to this file...
-FunctionPass *llvm::createSCCVNPass() { return new SCCVN(); }
-
-static RegisterPass<SCCVN> X("sccvn",
- "SCC Value Numbering");
-
-static Value *lookupNumber(ValueNumberScope *Locals, uint32_t num) {
- while (Locals) {
- DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
- if (I != Locals->table.end())
- return I->second;
- Locals = Locals->parent;
- }
-
- return 0;
-}
-
-bool SCCVN::runOnFunction(Function& F) {
- // Implement the RPO version of the SCCVN algorithm. Conceptually,
- // we optimisitically assume that all instructions with the same opcode have
- // the same VN. Then we deepen our comparison by one level, to all
- // instructions whose operands have the same opcodes get the same VN. We
- // iterate this process until the partitioning stops changing, at which
- // point we have computed a full numbering.
- ReversePostOrderTraversal<Function*> RPOT(&F);
- bool done = false;
- while (!done) {
- done = true;
- VT.clearExpressions();
- for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(),
- E = RPOT.end(); I != E; ++I) {
- BasicBlock* BB = *I;
- for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
- BI != BE; ++BI) {
- uint32_t origVN = VT.lookup(BI);
- uint32_t newVN = VT.computeNumber(BI);
- if (origVN != newVN)
- done = false;
- }
- }
- }
-
- // Now, do a dominator walk, eliminating simple, dominated redundancies as we
- // go. Also, build the ValueNumberScope structure that will be used for
- // computing full availability.
- DominatorTree& DT = getAnalysis<DominatorTree>();
- bool changed = false;
- for (df_iterator<DomTreeNode*> DI = df_begin(DT.getRootNode()),
- DE = df_end(DT.getRootNode()); DI != DE; ++DI) {
- BasicBlock* BB = DI->getBlock();
- if (DI->getIDom())
- BBMap[BB] = new ValueNumberScope(BBMap[DI->getIDom()->getBlock()]);
- else
- BBMap[BB] = new ValueNumberScope(0);
-
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
- uint32_t num = VT.lookup(I);
- Value* repl = lookupNumber(BBMap[BB], num);
-
- if (repl) {
- if (isa<PHINode>(I))
- ++NumSCCVNPhi;
- else
- ++NumSCCVNInstr;
- I->replaceAllUsesWith(repl);
- Instruction* OldInst = I;
- ++I;
- BBMap[BB]->table[num] = repl;
- OldInst->eraseFromParent();
- changed = true;
- } else {
- BBMap[BB]->table[num] = I;
- BBMap[BB]->availOut.set(num);
-
- ++I;
- }
- }
- }
-
- // Perform a forward data-flow to compute availability at all points on
- // the CFG.
- do {
- changed = false;
- for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(),
- E = RPOT.end(); I != E; ++I) {
- BasicBlock* BB = *I;
- ValueNumberScope *VNS = BBMap[BB];
-
- SparseBitVector<128> preds;
- bool first = true;
- for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
- PI != PE; ++PI) {
- if (first) {
- preds = BBMap[*PI]->availOut;
- first = false;
- } else {
- preds &= BBMap[*PI]->availOut;
- }
- }
-
- changed |= (VNS->availIn |= preds);
- changed |= (VNS->availOut |= preds);
- }
- } while (changed);
-
- // Use full availability information to perform non-dominated replacements.
- SSAUpdater SSU;
- for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
- if (!BBMap.count(FI)) continue;
- for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
- BI != BE; ) {
- uint32_t num = VT.lookup(BI);
- if (!BBMap[FI]->availIn.test(num)) {
- ++BI;
- continue;
- }
-
- SSU.Initialize(BI);
-
- SmallPtrSet<BasicBlock*, 8> visited;
- SmallVector<BasicBlock*, 8> stack;
- visited.insert(FI);
- for (pred_iterator PI = pred_begin(FI), PE = pred_end(FI);
- PI != PE; ++PI)
- if (!visited.count(*PI))
- stack.push_back(*PI);
-
- while (!stack.empty()) {
- BasicBlock* CurrBB = stack.pop_back_val();
- visited.insert(CurrBB);
-
- ValueNumberScope* S = BBMap[CurrBB];
- if (S->table.count(num)) {
- SSU.AddAvailableValue(CurrBB, S->table[num]);
- } else {
- for (pred_iterator PI = pred_begin(CurrBB), PE = pred_end(CurrBB);
- PI != PE; ++PI)
- if (!visited.count(*PI))
- stack.push_back(*PI);
- }
- }
-
- Value* repl = SSU.GetValueInMiddleOfBlock(FI);
- BI->replaceAllUsesWith(repl);
- Instruction* CurInst = BI;
- ++BI;
- BBMap[FI]->table[num] = repl;
- if (isa<PHINode>(CurInst))
- ++NumSCCVNPhi;
- else
- ++NumSCCVNInstr;
-
- CurInst->eraseFromParent();
- }
- }
-
- VT.clear();
- for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
- I = BBMap.begin(), E = BBMap.end(); I != E; ++I)
- delete I->second;
- BBMap.clear();
-
- return changed;
-}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/Scalar.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/Scalar.cpp
index b54565c..cb03423 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/Scalar.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/Scalar.cpp
@@ -14,6 +14,8 @@
#include "llvm-c/Transforms/Scalar.h"
#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Scalar.h"
using namespace llvm;
@@ -90,6 +92,11 @@ void LLVMAddScalarReplAggregatesPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createScalarReplAggregatesPass());
}
+void LLVMAddScalarReplAggregatesPassWithThreshold(LLVMPassManagerRef PM,
+ int Threshold) {
+ unwrap(PM)->add(createScalarReplAggregatesPass(Threshold));
+}
+
void LLVMAddSimplifyLibCallsPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createSimplifyLibCallsPass());
}
@@ -105,3 +112,7 @@ void LLVMAddConstantPropagationPass(LLVMPassManagerRef PM) {
void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM) {
unwrap(PM)->add(createDemoteRegisterToMemoryPass());
}
+
+void LLVMAddVerifierPass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createVerifierPass());
+}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index bbe6270..fee317d 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -28,6 +28,7 @@
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Target/TargetData.h"
@@ -51,7 +52,7 @@ STATISTIC(NumGlobals, "Number of allocas copied from constant global");
namespace {
struct SROA : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- explicit SROA(signed T = -1) : FunctionPass(&ID) {
+ explicit SROA(signed T = -1) : FunctionPass(ID) {
if (T == -1)
SRThreshold = 128;
else
@@ -114,8 +115,7 @@ namespace {
void DoScalarReplacement(AllocaInst *AI,
std::vector<AllocaInst*> &WorkList);
void DeleteDeadInstructions();
- AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base);
-
+
void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
SmallVector<AllocaInst*, 32> &NewElts);
void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
@@ -130,19 +130,13 @@ namespace {
void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
- bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
- bool &SawVec, uint64_t Offset, unsigned AllocaSize);
- void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
- Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
- uint64_t Offset, IRBuilder<> &Builder);
- Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
- uint64_t Offset, IRBuilder<> &Builder);
- static Instruction *isOnlyCopiedFromConstantGlobal(AllocaInst *AI);
+ static MemTransferInst *isOnlyCopiedFromConstantGlobal(AllocaInst *AI);
};
}
char SROA::ID = 0;
-static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
+INITIALIZE_PASS(SROA, "scalarrepl",
+ "Scalar Replacement of Aggregates", false, false);
// Public interface to the ScalarReplAggregates pass
FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
@@ -150,6 +144,618 @@ FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
}
+//===----------------------------------------------------------------------===//
+// Convert To Scalar Optimization.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// ConvertToScalarInfo - This class implements the "Convert To Scalar"
+/// optimization, which scans the uses of an alloca and determines if it can
+/// rewrite it in terms of a single new alloca that can be mem2reg'd.
+class ConvertToScalarInfo {
+ /// AllocaSize - The size of the alloca being considered.
+ unsigned AllocaSize;
+ const TargetData &TD;
+
+ /// IsNotTrivial - This is set to true if there is some access to the object
+ /// which means that mem2reg can't promote it.
+ bool IsNotTrivial;
+
+ /// VectorTy - This tracks the type that we should promote the vector to if
+ /// it is possible to turn it into a vector. This starts out null, and if it
+ /// isn't possible to turn into a vector type, it gets set to VoidTy.
+ const Type *VectorTy;
+
+ /// HadAVector - True if there is at least one vector access to the alloca.
+ /// We don't want to turn random arrays into vectors and use vector element
+ /// insert/extract, but if there are element accesses to something that is
+ /// also declared as a vector, we do want to promote to a vector.
+ bool HadAVector;
+
+public:
+ explicit ConvertToScalarInfo(unsigned Size, const TargetData &td)
+ : AllocaSize(Size), TD(td) {
+ IsNotTrivial = false;
+ VectorTy = 0;
+ HadAVector = false;
+ }
+
+ AllocaInst *TryConvert(AllocaInst *AI);
+
+private:
+ bool CanConvertToScalar(Value *V, uint64_t Offset);
+ void MergeInType(const Type *In, uint64_t Offset);
+ void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
+
+ Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
+ uint64_t Offset, IRBuilder<> &Builder);
+ Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
+ uint64_t Offset, IRBuilder<> &Builder);
+};
+} // end anonymous namespace.
+
+
+/// IsVerbotenVectorType - Return true if this is a vector type ScalarRepl isn't
+/// allowed to form. We do this to avoid MMX types, which is a complete hack,
+/// but is required until the backend is fixed.
+static bool IsVerbotenVectorType(const VectorType *VTy, const Instruction *I) {
+ StringRef Triple(I->getParent()->getParent()->getParent()->getTargetTriple());
+ if (!Triple.startswith("i386") &&
+ !Triple.startswith("x86_64"))
+ return false;
+
+ // Reject all the MMX vector types.
+ switch (VTy->getNumElements()) {
+ default: return false;
+ case 1: return VTy->getElementType()->isIntegerTy(64);
+ case 2: return VTy->getElementType()->isIntegerTy(32);
+ case 4: return VTy->getElementType()->isIntegerTy(16);
+ case 8: return VTy->getElementType()->isIntegerTy(8);
+ }
+}
+
+
+/// TryConvert - Analyze the specified alloca, and if it is safe to do so,
+/// rewrite it to be a new alloca which is mem2reg'able. This returns the new
+/// alloca if possible or null if not.
+AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
+ // If we can't convert this scalar, or if mem2reg can trivially do it, bail
+ // out.
+ if (!CanConvertToScalar(AI, 0) || !IsNotTrivial)
+ return 0;
+
+ // If we were able to find a vector type that can handle this with
+ // insert/extract elements, and if there was at least one use that had
+ // a vector type, promote this to a vector. We don't want to promote
+ // random stuff that doesn't use vectors (e.g. <9 x double>) because then
+ // we just get a lot of insert/extracts. If at least one vector is
+ // involved, then we probably really do have a union of vector/array.
+ const Type *NewTy;
+ if (VectorTy && VectorTy->isVectorTy() && HadAVector &&
+ !IsVerbotenVectorType(cast<VectorType>(VectorTy), AI)) {
+ DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = "
+ << *VectorTy << '\n');
+ NewTy = VectorTy; // Use the vector type.
+ } else {
+ DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
+ // Create and insert the integer alloca.
+ NewTy = IntegerType::get(AI->getContext(), AllocaSize*8);
+ }
+ AllocaInst *NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
+ ConvertUsesToScalar(AI, NewAI, 0);
+ return NewAI;
+}
+
+/// MergeInType - Add the 'In' type to the accumulated vector type (VectorTy)
+/// so far at the offset specified by Offset (which is specified in bytes).
+///
+/// There are two cases we handle here:
+/// 1) A union of vector types of the same size and potentially its elements.
+/// Here we turn element accesses into insert/extract element operations.
+/// This promotes a <4 x float> with a store of float to the third element
+/// into a <4 x float> that uses insert element.
+/// 2) A fully general blob of memory, which we turn into some (potentially
+/// large) integer type with extract and insert operations where the loads
+/// and stores would mutate the memory. We mark this by setting VectorTy
+/// to VoidTy.
+void ConvertToScalarInfo::MergeInType(const Type *In, uint64_t Offset) {
+ // If we already decided to turn this into a blob of integer memory, there is
+ // nothing to be done.
+ if (VectorTy && VectorTy->isVoidTy())
+ return;
+
+ // If this could be contributing to a vector, analyze it.
+
+ // If the In type is a vector that is the same size as the alloca, see if it
+ // matches the existing VecTy.
+ if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
+ // Remember if we saw a vector type.
+ HadAVector = true;
+
+ if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
+ // If we're storing/loading a vector of the right size, allow it as a
+ // vector. If this the first vector we see, remember the type so that
+ // we know the element size. If this is a subsequent access, ignore it
+ // even if it is a differing type but the same size. Worst case we can
+ // bitcast the resultant vectors.
+ if (VectorTy == 0)
+ VectorTy = VInTy;
+ return;
+ }
+ } else if (In->isFloatTy() || In->isDoubleTy() ||
+ (In->isIntegerTy() && In->getPrimitiveSizeInBits() >= 8 &&
+ isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
+ // If we're accessing something that could be an element of a vector, see
+ // if the implied vector agrees with what we already have and if Offset is
+ // compatible with it.
+ unsigned EltSize = In->getPrimitiveSizeInBits()/8;
+ if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 &&
+ (VectorTy == 0 ||
+ cast<VectorType>(VectorTy)->getElementType()
+ ->getPrimitiveSizeInBits()/8 == EltSize)) {
+ if (VectorTy == 0)
+ VectorTy = VectorType::get(In, AllocaSize/EltSize);
+ return;
+ }
+ }
+
+ // Otherwise, we have a case that we can't handle with an optimized vector
+ // form. We can still turn this into a large integer.
+ VectorTy = Type::getVoidTy(In->getContext());
+}
+
+/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
+/// its accesses to a single vector type, return true and set VecTy to
+/// the new type. If we could convert the alloca into a single promotable
+/// integer, return true but set VecTy to VoidTy. Further, if the use is not a
+/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset
+/// is the current offset from the base of the alloca being analyzed.
+///
+/// If we see at least one access to the value that is as a vector type, set the
+/// SawVec flag.
+bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
+ Instruction *User = cast<Instruction>(*UI);
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
+ // Don't break volatile loads.
+ if (LI->isVolatile())
+ return false;
+ MergeInType(LI->getType(), Offset);
+ continue;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ // Storing the pointer, not into the value?
+ if (SI->getOperand(0) == V || SI->isVolatile()) return false;
+ MergeInType(SI->getOperand(0)->getType(), Offset);
+ continue;
+ }
+
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
+ IsNotTrivial = true; // Can't be mem2reg'd.
+ if (!CanConvertToScalar(BCI, Offset))
+ return false;
+ continue;
+ }
+
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
+ // If this is a GEP with a variable indices, we can't handle it.
+ if (!GEP->hasAllConstantIndices())
+ return false;
+
+ // Compute the offset that this GEP adds to the pointer.
+ SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
+ uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
+ &Indices[0], Indices.size());
+ // See if all uses can be converted.
+ if (!CanConvertToScalar(GEP, Offset+GEPOffset))
+ return false;
+ IsNotTrivial = true; // Can't be mem2reg'd.
+ continue;
+ }
+
+ // If this is a constant sized memset of a constant value (e.g. 0) we can
+ // handle it.
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
+ // Store of constant value and constant size.
+ if (!isa<ConstantInt>(MSI->getValue()) ||
+ !isa<ConstantInt>(MSI->getLength()))
+ return false;
+ IsNotTrivial = true; // Can't be mem2reg'd.
+ continue;
+ }
+
+ // If this is a memcpy or memmove into or out of the whole allocation, we
+ // can handle it like a load or store of the scalar type.
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
+ ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength());
+ if (Len == 0 || Len->getZExtValue() != AllocaSize || Offset != 0)
+ return false;
+
+ IsNotTrivial = true; // Can't be mem2reg'd.
+ continue;
+ }
+
+ // Otherwise, we cannot handle this!
+ return false;
+ }
+
+ return true;
+}
+
+/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
+/// directly. This happens when we are converting an "integer union" to a
+/// single integer scalar, or when we are converting a "vector union" to a
+/// vector with insert/extractelement instructions.
+///
+/// Offset is an offset from the original alloca, in bits that need to be
+/// shifted to the right. By the end of this, there should be no uses of Ptr.
+void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
+ uint64_t Offset) {
+ while (!Ptr->use_empty()) {
+ Instruction *User = cast<Instruction>(Ptr->use_back());
+
+ if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
+ ConvertUsesToScalar(CI, NewAI, Offset);
+ CI->eraseFromParent();
+ continue;
+ }
+
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
+ // Compute the offset that this GEP adds to the pointer.
+ SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
+ uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
+ &Indices[0], Indices.size());
+ ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
+ GEP->eraseFromParent();
+ continue;
+ }
+
+ IRBuilder<> Builder(User->getParent(), User);
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
+ // The load is a bit extract from NewAI shifted right by Offset bits.
+ Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp");
+ Value *NewLoadVal
+ = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder);
+ LI->replaceAllUsesWith(NewLoadVal);
+ LI->eraseFromParent();
+ continue;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ assert(SI->getOperand(0) != Ptr && "Consistency error!");
+ Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
+ Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
+ Builder);
+ Builder.CreateStore(New, NewAI);
+ SI->eraseFromParent();
+
+ // If the load we just inserted is now dead, then the inserted store
+ // overwrote the entire thing.
+ if (Old->use_empty())
+ Old->eraseFromParent();
+ continue;
+ }
+
+ // If this is a constant sized memset of a constant value (e.g. 0) we can
+ // transform it into a store of the expanded constant value.
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
+ assert(MSI->getRawDest() == Ptr && "Consistency error!");
+ unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
+ if (NumBytes != 0) {
+ unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
+
+ // Compute the value replicated the right number of times.
+ APInt APVal(NumBytes*8, Val);
+
+ // Splat the value if non-zero.
+ if (Val)
+ for (unsigned i = 1; i != NumBytes; ++i)
+ APVal |= APVal << 8;
+
+ Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
+ Value *New = ConvertScalar_InsertValue(
+ ConstantInt::get(User->getContext(), APVal),
+ Old, Offset, Builder);
+ Builder.CreateStore(New, NewAI);
+
+ // If the load we just inserted is now dead, then the memset overwrote
+ // the entire thing.
+ if (Old->use_empty())
+ Old->eraseFromParent();
+ }
+ MSI->eraseFromParent();
+ continue;
+ }
+
+ // If this is a memcpy or memmove into or out of the whole allocation, we
+ // can handle it like a load or store of the scalar type.
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
+ assert(Offset == 0 && "must be store to start of alloca");
+
+ // If the source and destination are both to the same alloca, then this is
+ // a noop copy-to-self, just delete it. Otherwise, emit a load and store
+ // as appropriate.
+ AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject(0));
+
+ if (MTI->getSource()->getUnderlyingObject(0) != OrigAI) {
+ // Dest must be OrigAI, change this to be a load from the original
+ // pointer (bitcasted), then a store to our new alloca.
+ assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
+ Value *SrcPtr = MTI->getSource();
+ SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType());
+
+ LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
+ SrcVal->setAlignment(MTI->getAlignment());
+ Builder.CreateStore(SrcVal, NewAI);
+ } else if (MTI->getDest()->getUnderlyingObject(0) != OrigAI) {
+ // Src must be OrigAI, change this to be a load from NewAI then a store
+ // through the original dest pointer (bitcasted).
+ assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
+ LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
+
+ Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType());
+ StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
+ NewStore->setAlignment(MTI->getAlignment());
+ } else {
+ // Noop transfer. Src == Dst
+ }
+
+ MTI->eraseFromParent();
+ continue;
+ }
+
+ llvm_unreachable("Unsupported operation!");
+ }
+}
+
+/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
+/// or vector value FromVal, extracting the bits from the offset specified by
+/// Offset. This returns the value, which is of type ToType.
+///
+/// This happens when we are converting an "integer union" to a single
+/// integer scalar, or when we are converting a "vector union" to a vector with
+/// insert/extractelement instructions.
+///
+/// Offset is an offset from the original alloca, in bits that need to be
+/// shifted to the right.
+Value *ConvertToScalarInfo::
+ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
+ uint64_t Offset, IRBuilder<> &Builder) {
+ // If the load is of the whole new alloca, no conversion is needed.
+ if (FromVal->getType() == ToType && Offset == 0)
+ return FromVal;
+
+ // If the result alloca is a vector type, this is either an element
+ // access or a bitcast to another vector type of the same size.
+ if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) {
+ if (ToType->isVectorTy())
+ return Builder.CreateBitCast(FromVal, ToType, "tmp");
+
+ // Otherwise it must be an element access.
+ unsigned Elt = 0;
+ if (Offset) {
+ unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
+ Elt = Offset/EltSize;
+ assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
+ }
+ // Return the element extracted out of it.
+ Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get(
+ Type::getInt32Ty(FromVal->getContext()), Elt), "tmp");
+ if (V->getType() != ToType)
+ V = Builder.CreateBitCast(V, ToType, "tmp");
+ return V;
+ }
+
+ // If ToType is a first class aggregate, extract out each of the pieces and
+ // use insertvalue's to form the FCA.
+ if (const StructType *ST = dyn_cast<StructType>(ToType)) {
+ const StructLayout &Layout = *TD.getStructLayout(ST);
+ Value *Res = UndefValue::get(ST);
+ for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
+ Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
+ Offset+Layout.getElementOffsetInBits(i),
+ Builder);
+ Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
+ }
+ return Res;
+ }
+
+ if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
+ uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
+ Value *Res = UndefValue::get(AT);
+ for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
+ Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
+ Offset+i*EltSize, Builder);
+ Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
+ }
+ return Res;
+ }
+
+ // Otherwise, this must be a union that was converted to an integer value.
+ const IntegerType *NTy = cast<IntegerType>(FromVal->getType());
+
+ // If this is a big-endian system and the load is narrower than the
+ // full alloca type, we need to do a shift to get the right bits.
+ int ShAmt = 0;
+ if (TD.isBigEndian()) {
+ // On big-endian machines, the lowest bit is stored at the bit offset
+ // from the pointer given by getTypeStoreSizeInBits. This matters for
+ // integers with a bitwidth that is not a multiple of 8.
+ ShAmt = TD.getTypeStoreSizeInBits(NTy) -
+ TD.getTypeStoreSizeInBits(ToType) - Offset;
+ } else {
+ ShAmt = Offset;
+ }
+
+ // Note: we support negative bitwidths (with shl) which are not defined.
+ // We do this to support (f.e.) loads off the end of a structure where
+ // only some bits are used.
+ if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
+ FromVal = Builder.CreateLShr(FromVal,
+ ConstantInt::get(FromVal->getType(),
+ ShAmt), "tmp");
+ else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
+ FromVal = Builder.CreateShl(FromVal,
+ ConstantInt::get(FromVal->getType(),
+ -ShAmt), "tmp");
+
+ // Finally, unconditionally truncate the integer to the right width.
+ unsigned LIBitWidth = TD.getTypeSizeInBits(ToType);
+ if (LIBitWidth < NTy->getBitWidth())
+ FromVal =
+ Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
+ LIBitWidth), "tmp");
+ else if (LIBitWidth > NTy->getBitWidth())
+ FromVal =
+ Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
+ LIBitWidth), "tmp");
+
+ // If the result is an integer, this is a trunc or bitcast.
+ if (ToType->isIntegerTy()) {
+ // Should be done.
+ } else if (ToType->isFloatingPointTy() || ToType->isVectorTy()) {
+ // Just do a bitcast, we know the sizes match up.
+ FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
+ } else {
+ // Otherwise must be a pointer.
+ FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp");
+ }
+ assert(FromVal->getType() == ToType && "Didn't convert right?");
+ return FromVal;
+}
+
+/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
+/// or vector value "Old" at the offset specified by Offset.
+///
+/// This happens when we are converting an "integer union" to a
+/// single integer scalar, or when we are converting a "vector union" to a
+/// vector with insert/extractelement instructions.
+///
+/// Offset is an offset from the original alloca, in bits that need to be
+/// shifted to the right.
+Value *ConvertToScalarInfo::
+ConvertScalar_InsertValue(Value *SV, Value *Old,
+ uint64_t Offset, IRBuilder<> &Builder) {
+ // Convert the stored type to the actual type, shift it left to insert
+ // then 'or' into place.
+ const Type *AllocaType = Old->getType();
+ LLVMContext &Context = Old->getContext();
+
+ if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
+ uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy);
+ uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType());
+
+ // Changing the whole vector with memset or with an access of a different
+ // vector type?
+ if (ValSize == VecSize)
+ return Builder.CreateBitCast(SV, AllocaType, "tmp");
+
+ uint64_t EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
+
+ // Must be an element insertion.
+ unsigned Elt = Offset/EltSize;
+
+ if (SV->getType() != VTy->getElementType())
+ SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
+
+ SV = Builder.CreateInsertElement(Old, SV,
+ ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt),
+ "tmp");
+ return SV;
+ }
+
+ // If SV is a first-class aggregate value, insert each value recursively.
+ if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
+ const StructLayout &Layout = *TD.getStructLayout(ST);
+ for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
+ Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
+ Old = ConvertScalar_InsertValue(Elt, Old,
+ Offset+Layout.getElementOffsetInBits(i),
+ Builder);
+ }
+ return Old;
+ }
+
+ if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
+ uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
+ for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
+ Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
+ Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);
+ }
+ return Old;
+ }
+
+ // If SV is a float, convert it to the appropriate integer type.
+ // If it is a pointer, do the same.
+ unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
+ unsigned DestWidth = TD.getTypeSizeInBits(AllocaType);
+ unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType());
+ unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType);
+ if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
+ SV = Builder.CreateBitCast(SV,
+ IntegerType::get(SV->getContext(),SrcWidth), "tmp");
+ else if (SV->getType()->isPointerTy())
+ SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getContext()), "tmp");
+
+ // Zero extend or truncate the value if needed.
+ if (SV->getType() != AllocaType) {
+ if (SV->getType()->getPrimitiveSizeInBits() <
+ AllocaType->getPrimitiveSizeInBits())
+ SV = Builder.CreateZExt(SV, AllocaType, "tmp");
+ else {
+ // Truncation may be needed if storing more than the alloca can hold
+ // (undefined behavior).
+ SV = Builder.CreateTrunc(SV, AllocaType, "tmp");
+ SrcWidth = DestWidth;
+ SrcStoreWidth = DestStoreWidth;
+ }
+ }
+
+ // If this is a big-endian system and the store is narrower than the
+ // full alloca type, we need to do a shift to get the right bits.
+ int ShAmt = 0;
+ if (TD.isBigEndian()) {
+ // On big-endian machines, the lowest bit is stored at the bit offset
+ // from the pointer given by getTypeStoreSizeInBits. This matters for
+ // integers with a bitwidth that is not a multiple of 8.
+ ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
+ } else {
+ ShAmt = Offset;
+ }
+
+ // Note: we support negative bitwidths (with shr) which are not defined.
+ // We do this to support (f.e.) stores off the end of a structure where
+ // only some bits in the structure are set.
+ APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
+ if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
+ SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(),
+ ShAmt), "tmp");
+ Mask <<= ShAmt;
+ } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
+ SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(),
+ -ShAmt), "tmp");
+ Mask = Mask.lshr(-ShAmt);
+ }
+
+ // Mask out the bits we are about to insert from the old value, and or
+ // in the new bits.
+ if (SrcWidth != DestWidth) {
+ assert(DestWidth > SrcWidth);
+ Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask");
+ SV = Builder.CreateOr(Old, SV, "ins");
+ }
+ return SV;
+}
+
+
+//===----------------------------------------------------------------------===//
+// SRoA Driver
+//===----------------------------------------------------------------------===//
+
+
bool SROA::runOnFunction(Function &F) {
TD = getAnalysisIfAvailable<TargetData>();
@@ -202,6 +808,7 @@ bool SROA::performPromotion(Function &F) {
return Changed;
}
+
/// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for
/// SROA. It must be a struct or array type with a small number of elements.
static bool ShouldAttemptScalarRepl(AllocaInst *AI) {
@@ -216,6 +823,7 @@ static bool ShouldAttemptScalarRepl(AllocaInst *AI) {
return false;
}
+
// performScalarRepl - This algorithm is a simple worklist driven algorithm,
// which runs on all of the malloc/alloca instructions in the function, removing
// them if they are only used by getelementptr instructions.
@@ -223,7 +831,7 @@ static bool ShouldAttemptScalarRepl(AllocaInst *AI) {
bool SROA::performScalarRepl(Function &F) {
std::vector<AllocaInst*> WorkList;
- // Scan the entry basic block, adding any alloca's and mallocs to the worklist
+ // Scan the entry basic block, adding allocas to the worklist.
BasicBlock &BB = F.getEntryBlock();
for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
if (AllocaInst *A = dyn_cast<AllocaInst>(I))
@@ -239,6 +847,7 @@ bool SROA::performScalarRepl(Function &F) {
// with unused elements.
if (AI->use_empty()) {
AI->eraseFromParent();
+ Changed = true;
continue;
}
@@ -251,10 +860,10 @@ bool SROA::performScalarRepl(Function &F) {
// the constant global instead. This is commonly produced by the CFE by
// constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
// is only subsequently read.
- if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
+ if (MemTransferInst *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n');
DEBUG(dbgs() << " memcpy = " << *TheCopy << '\n');
- Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
+ Constant *TheSrc = cast<Constant>(TheCopy->getSource());
AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
TheCopy->eraseFromParent(); // Don't mutate the global.
AI->eraseFromParent();
@@ -271,7 +880,10 @@ bool SROA::performScalarRepl(Function &F) {
// Do not promote [0 x %struct].
if (AllocaSize == 0) continue;
-
+
+ // Do not promote any struct whose size is too big.
+ if (AllocaSize > SRThreshold) continue;
+
// If the alloca looks like a good candidate for scalar replacement, and if
// all its users can be transformed, then split up the aggregate into its
// separate elements.
@@ -281,48 +893,20 @@ bool SROA::performScalarRepl(Function &F) {
continue;
}
- // Do not promote any struct whose size is too big.
- if (AllocaSize > SRThreshold) continue;
-
// If we can turn this aggregate value (potentially with casts) into a
// simple scalar value that can be mem2reg'd into a register value.
// IsNotTrivial tracks whether this is something that mem2reg could have
// promoted itself. If so, we don't want to transform it needlessly. Note
// that we can't just check based on the type: the alloca may be of an i32
// but that has pointer arithmetic to set byte 3 of it or something.
- bool IsNotTrivial = false;
- const Type *VectorTy = 0;
- bool HadAVector = false;
- if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector,
- 0, unsigned(AllocaSize)) && IsNotTrivial) {
- AllocaInst *NewAI;
- // If we were able to find a vector type that can handle this with
- // insert/extract elements, and if there was at least one use that had
- // a vector type, promote this to a vector. We don't want to promote
- // random stuff that doesn't use vectors (e.g. <9 x double>) because then
- // we just get a lot of insert/extracts. If at least one vector is
- // involved, then we probably really do have a union of vector/array.
- if (VectorTy && VectorTy->isVectorTy() && HadAVector) {
- DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = "
- << *VectorTy << '\n');
-
- // Create and insert the vector alloca.
- NewAI = new AllocaInst(VectorTy, 0, "", AI->getParent()->begin());
- ConvertUsesToScalar(AI, NewAI, 0);
- } else {
- DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
-
- // Create and insert the integer alloca.
- const Type *NewTy = IntegerType::get(AI->getContext(), AllocaSize*8);
- NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
- ConvertUsesToScalar(AI, NewAI, 0);
- }
+ if (AllocaInst *NewAI =
+ ConvertToScalarInfo((unsigned)AllocaSize, *TD).TryConvert(AI)) {
NewAI->takeName(AI);
AI->eraseFromParent();
++NumConverted;
Changed = true;
continue;
- }
+ }
// Otherwise, couldn't process this alloca.
}
@@ -365,7 +949,7 @@ void SROA::DoScalarReplacement(AllocaInst *AI,
DeleteDeadInstructions();
AI->eraseFromParent();
- NumReplaced++;
+ ++NumReplaced;
}
/// DeleteDeadInstructions - Erase instructions on the DeadInstrs list,
@@ -404,11 +988,11 @@ void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
isSafeGEP(GEPI, AI, GEPOffset, Info);
if (!Info.isUnsafe)
isSafeForScalarRepl(GEPI, AI, GEPOffset, Info);
- } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
+ } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
if (Length)
isSafeMemAccess(AI, Offset, Length->getZExtValue(), 0,
- UI.getOperandNo() == 1, Info);
+ UI.getOperandNo() == 0, Info);
else
MarkUnsafe(Info);
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
@@ -698,7 +1282,6 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// that doesn't have anything to do with the alloca that we are promoting. For
// memset, this Value* stays null.
Value *OtherPtr = 0;
- LLVMContext &Context = MI->getContext();
unsigned MemAlignment = MI->getAlignment();
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
if (Inst == MTI->getRawDest())
@@ -712,6 +1295,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// If there is an other pointer, we want to convert it to the same pointer
// type as AI has, so we can GEP through it safely.
if (OtherPtr) {
+ unsigned AddrSpace =
+ cast<PointerType>(OtherPtr->getType())->getAddressSpace();
// Remove bitcasts and all-zero GEPs from OtherPtr. This is an
// optimization, but it's also required to detect the corner case where
@@ -719,20 +1304,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// OtherPtr may be a bitcast or GEP that currently being rewritten. (This
// function is only called for mem intrinsics that access the whole
// aggregate, so non-zero GEPs are not an issue here.)
- while (1) {
- if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) {
- OtherPtr = BC->getOperand(0);
- continue;
- }
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) {
- // All zero GEPs are effectively bitcasts.
- if (GEP->hasAllZeroIndices()) {
- OtherPtr = GEP->getOperand(0);
- continue;
- }
- }
- break;
- }
+ OtherPtr = OtherPtr->stripPointerCasts();
+
// Copying the alloca to itself is a no-op: just delete it.
if (OtherPtr == AI || OtherPtr == NewElts[0]) {
// This code will run twice for a no-op memcpy -- once for each operand.
@@ -744,19 +1317,17 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
return;
}
- if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
- if (BCE->getOpcode() == Instruction::BitCast)
- OtherPtr = BCE->getOperand(0);
-
// If the pointer is not the right type, insert a bitcast to the right
// type.
- if (OtherPtr->getType() != AI->getType())
- OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
- MI);
+ const Type *NewTy =
+ PointerType::get(AI->getType()->getElementType(), AddrSpace);
+
+ if (OtherPtr->getType() != NewTy)
+ OtherPtr = new BitCastInst(OtherPtr, NewTy, OtherPtr->getName(), MI);
}
// Process each element of the aggregate.
- Value *TheFn = MI->getOperand(0);
+ Value *TheFn = MI->getCalledValue();
const Type *BytePtrTy = MI->getRawDest()->getType();
bool SROADest = MI->getRawDest() == Inst;
@@ -775,12 +1346,11 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
MI);
uint64_t EltOffset;
const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
- if (const StructType *ST =
- dyn_cast<StructType>(OtherPtrTy->getElementType())) {
+ const Type *OtherTy = OtherPtrTy->getElementType();
+ if (const StructType *ST = dyn_cast<StructType>(OtherTy)) {
EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
} else {
- const Type *EltTy =
- cast<SequentialType>(OtherPtr->getType())->getElementType();
+ const Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
EltOffset = TD->getTypeAllocSize(EltTy)*i;
}
@@ -814,7 +1384,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// If the stored element is zero (common case), just store a null
// constant.
Constant *StoreVal;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getArgOperand(1))) {
if (CI->isZero()) {
StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
} else {
@@ -832,7 +1402,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
}
// Convert the integer value to the appropriate type.
- StoreVal = ConstantInt::get(Context, TotalVal);
+ StoreVal = ConstantInt::get(CI->getContext(), TotalVal);
if (ValTy->isPointerTy())
StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
else if (ValTy->isFloatingPointTy())
@@ -858,8 +1428,17 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getName(), MI);
// Cast the other pointer (if we have one) to BytePtrTy.
- if (OtherElt && OtherElt->getType() != BytePtrTy)
- OtherElt = new BitCastInst(OtherElt, BytePtrTy, OtherElt->getName(), MI);
+ if (OtherElt && OtherElt->getType() != BytePtrTy) {
+ // Preserve address space of OtherElt
+ const PointerType* OtherPTy = cast<PointerType>(OtherElt->getType());
+ const PointerType* PTy = cast<PointerType>(BytePtrTy);
+ if (OtherPTy->getElementType() != PTy->getElementType()) {
+ Type *NewOtherPTy = PointerType::get(PTy->getElementType(),
+ OtherPTy->getAddressSpace());
+ OtherElt = new BitCastInst(OtherElt, NewOtherPTy,
+ OtherElt->getNameStr(), MI);
+ }
+ }
unsigned EltSize = TD->getTypeAllocSize(EltTy);
@@ -868,19 +1447,30 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Value *Ops[] = {
SROADest ? EltPtr : OtherElt, // Dest ptr
SROADest ? OtherElt : EltPtr, // Src ptr
- ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
+ ConstantInt::get(MI->getArgOperand(2)->getType(), EltSize), // Size
// Align
- ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign)
+ ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign),
+ MI->getVolatileCst()
};
- CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
+ // In case we fold the address space overloaded memcpy of A to B
+ // with memcpy of B to C, change the function to be a memcpy of A to C.
+ const Type *Tys[] = { Ops[0]->getType(), Ops[1]->getType(),
+ Ops[2]->getType() };
+ Module *M = MI->getParent()->getParent()->getParent();
+ TheFn = Intrinsic::getDeclaration(M, MI->getIntrinsicID(), Tys, 3);
+ CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
} else {
assert(isa<MemSetInst>(MI));
Value *Ops[] = {
- EltPtr, MI->getOperand(2), // Dest, Value,
- ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
- Zero // Align
+ EltPtr, MI->getArgOperand(1), // Dest, Value,
+ ConstantInt::get(MI->getArgOperand(2)->getType(), EltSize), // Size
+ Zero, // Align
+ ConstantInt::get(Type::getInt1Ty(MI->getContext()), 0) // isVolatile
};
- CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
+ const Type *Tys[] = { Ops[0]->getType(), Ops[2]->getType() };
+ Module *M = MI->getParent()->getParent()->getParent();
+ TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys, 2);
+ CallInst::Create(TheFn, Ops, Ops + 5, "", MI);
}
}
DeadInsts.push_back(MI);
@@ -1076,7 +1666,12 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
}
- ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
+ // Don't create an 'or x, 0' on the first iteration.
+ if (!isa<Constant>(ResultVal) ||
+ !cast<Constant>(ResultVal)->isNullValue())
+ ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
+ else
+ ResultVal = SrcField;
}
// Handle tail padding by truncating the result
@@ -1090,6 +1685,12 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
/// HasPadding - Return true if the specified type has any structure or
/// alignment padding, false otherwise.
static bool HasPadding(const Type *Ty, const TargetData &TD) {
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty))
+ return HasPadding(ATy->getElementType(), TD);
+
+ if (const VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return HasPadding(VTy->getElementType(), TD);
+
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = TD.getStructLayout(STy);
unsigned PrevFieldBitOffset = 0;
@@ -1119,12 +1720,8 @@ static bool HasPadding(const Type *Ty, const TargetData &TD) {
if (PrevFieldEnd < SL->getSizeInBits())
return true;
}
-
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
- return HasPadding(ATy->getElementType(), TD);
- } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
- return HasPadding(VTy->getElementType(), TD);
}
+
return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
}
@@ -1154,509 +1751,6 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
return true;
}
-/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at
-/// the offset specified by Offset (which is specified in bytes).
-///
-/// There are two cases we handle here:
-/// 1) A union of vector types of the same size and potentially its elements.
-/// Here we turn element accesses into insert/extract element operations.
-/// This promotes a <4 x float> with a store of float to the third element
-/// into a <4 x float> that uses insert element.
-/// 2) A fully general blob of memory, which we turn into some (potentially
-/// large) integer type with extract and insert operations where the loads
-/// and stores would mutate the memory.
-static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
- unsigned AllocaSize, const TargetData &TD,
- LLVMContext &Context) {
- // If this could be contributing to a vector, analyze it.
- if (VecTy != Type::getVoidTy(Context)) { // either null or a vector type.
-
- // If the In type is a vector that is the same size as the alloca, see if it
- // matches the existing VecTy.
- if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
- if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
- // If we're storing/loading a vector of the right size, allow it as a
- // vector. If this the first vector we see, remember the type so that
- // we know the element size.
- if (VecTy == 0)
- VecTy = VInTy;
- return;
- }
- } else if (In->isFloatTy() || In->isDoubleTy() ||
- (In->isIntegerTy() && In->getPrimitiveSizeInBits() >= 8 &&
- isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
- // If we're accessing something that could be an element of a vector, see
- // if the implied vector agrees with what we already have and if Offset is
- // compatible with it.
- unsigned EltSize = In->getPrimitiveSizeInBits()/8;
- if (Offset % EltSize == 0 &&
- AllocaSize % EltSize == 0 &&
- (VecTy == 0 ||
- cast<VectorType>(VecTy)->getElementType()
- ->getPrimitiveSizeInBits()/8 == EltSize)) {
- if (VecTy == 0)
- VecTy = VectorType::get(In, AllocaSize/EltSize);
- return;
- }
- }
- }
-
- // Otherwise, we have a case that we can't handle with an optimized vector
- // form. We can still turn this into a large integer.
- VecTy = Type::getVoidTy(Context);
-}
-
-/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
-/// its accesses to a single vector type, return true and set VecTy to
-/// the new type. If we could convert the alloca into a single promotable
-/// integer, return true but set VecTy to VoidTy. Further, if the use is not a
-/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset
-/// is the current offset from the base of the alloca being analyzed.
-///
-/// If we see at least one access to the value that is as a vector type, set the
-/// SawVec flag.
-bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
- bool &SawVec, uint64_t Offset,
- unsigned AllocaSize) {
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
- Instruction *User = cast<Instruction>(*UI);
-
- if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
- // Don't break volatile loads.
- if (LI->isVolatile())
- return false;
- MergeInType(LI->getType(), Offset, VecTy,
- AllocaSize, *TD, V->getContext());
- SawVec |= LI->getType()->isVectorTy();
- continue;
- }
-
- if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
- // Storing the pointer, not into the value?
- if (SI->getOperand(0) == V || SI->isVolatile()) return 0;
- MergeInType(SI->getOperand(0)->getType(), Offset,
- VecTy, AllocaSize, *TD, V->getContext());
- SawVec |= SI->getOperand(0)->getType()->isVectorTy();
- continue;
- }
-
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
- if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset,
- AllocaSize))
- return false;
- IsNotTrivial = true;
- continue;
- }
-
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
- // If this is a GEP with a variable indices, we can't handle it.
- if (!GEP->hasAllConstantIndices())
- return false;
-
- // Compute the offset that this GEP adds to the pointer.
- SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
- uint64_t GEPOffset = TD->getIndexedOffset(GEP->getPointerOperandType(),
- &Indices[0], Indices.size());
- // See if all uses can be converted.
- if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset,
- AllocaSize))
- return false;
- IsNotTrivial = true;
- continue;
- }
-
- // If this is a constant sized memset of a constant value (e.g. 0) we can
- // handle it.
- if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
- // Store of constant value and constant size.
- if (isa<ConstantInt>(MSI->getValue()) &&
- isa<ConstantInt>(MSI->getLength())) {
- IsNotTrivial = true;
- continue;
- }
- }
-
- // If this is a memcpy or memmove into or out of the whole allocation, we
- // can handle it like a load or store of the scalar type.
- if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
- if (ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()))
- if (Len->getZExtValue() == AllocaSize && Offset == 0) {
- IsNotTrivial = true;
- continue;
- }
- }
-
- // Otherwise, we cannot handle this!
- return false;
- }
-
- return true;
-}
-
-/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
-/// directly. This happens when we are converting an "integer union" to a
-/// single integer scalar, or when we are converting a "vector union" to a
-/// vector with insert/extractelement instructions.
-///
-/// Offset is an offset from the original alloca, in bits that need to be
-/// shifted to the right. By the end of this, there should be no uses of Ptr.
-void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) {
- while (!Ptr->use_empty()) {
- Instruction *User = cast<Instruction>(Ptr->use_back());
-
- if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
- ConvertUsesToScalar(CI, NewAI, Offset);
- CI->eraseFromParent();
- continue;
- }
-
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
- // Compute the offset that this GEP adds to the pointer.
- SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
- uint64_t GEPOffset = TD->getIndexedOffset(GEP->getPointerOperandType(),
- &Indices[0], Indices.size());
- ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
- GEP->eraseFromParent();
- continue;
- }
-
- IRBuilder<> Builder(User->getParent(), User);
-
- if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
- // The load is a bit extract from NewAI shifted right by Offset bits.
- Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp");
- Value *NewLoadVal
- = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder);
- LI->replaceAllUsesWith(NewLoadVal);
- LI->eraseFromParent();
- continue;
- }
-
- if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
- assert(SI->getOperand(0) != Ptr && "Consistency error!");
- Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
- Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
- Builder);
- Builder.CreateStore(New, NewAI);
- SI->eraseFromParent();
-
- // If the load we just inserted is now dead, then the inserted store
- // overwrote the entire thing.
- if (Old->use_empty())
- Old->eraseFromParent();
- continue;
- }
-
- // If this is a constant sized memset of a constant value (e.g. 0) we can
- // transform it into a store of the expanded constant value.
- if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
- assert(MSI->getRawDest() == Ptr && "Consistency error!");
- unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
- if (NumBytes != 0) {
- unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
-
- // Compute the value replicated the right number of times.
- APInt APVal(NumBytes*8, Val);
-
- // Splat the value if non-zero.
- if (Val)
- for (unsigned i = 1; i != NumBytes; ++i)
- APVal |= APVal << 8;
-
- Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
- Value *New = ConvertScalar_InsertValue(
- ConstantInt::get(User->getContext(), APVal),
- Old, Offset, Builder);
- Builder.CreateStore(New, NewAI);
-
- // If the load we just inserted is now dead, then the memset overwrote
- // the entire thing.
- if (Old->use_empty())
- Old->eraseFromParent();
- }
- MSI->eraseFromParent();
- continue;
- }
-
- // If this is a memcpy or memmove into or out of the whole allocation, we
- // can handle it like a load or store of the scalar type.
- if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
- assert(Offset == 0 && "must be store to start of alloca");
-
- // If the source and destination are both to the same alloca, then this is
- // a noop copy-to-self, just delete it. Otherwise, emit a load and store
- // as appropriate.
- AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject(0));
-
- if (MTI->getSource()->getUnderlyingObject(0) != OrigAI) {
- // Dest must be OrigAI, change this to be a load from the original
- // pointer (bitcasted), then a store to our new alloca.
- assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
- Value *SrcPtr = MTI->getSource();
- SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType());
-
- LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
- SrcVal->setAlignment(MTI->getAlignment());
- Builder.CreateStore(SrcVal, NewAI);
- } else if (MTI->getDest()->getUnderlyingObject(0) != OrigAI) {
- // Src must be OrigAI, change this to be a load from NewAI then a store
- // through the original dest pointer (bitcasted).
- assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
- LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
-
- Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType());
- StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
- NewStore->setAlignment(MTI->getAlignment());
- } else {
- // Noop transfer. Src == Dst
- }
-
- MTI->eraseFromParent();
- continue;
- }
-
- llvm_unreachable("Unsupported operation!");
- }
-}
-
-/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
-/// or vector value FromVal, extracting the bits from the offset specified by
-/// Offset. This returns the value, which is of type ToType.
-///
-/// This happens when we are converting an "integer union" to a single
-/// integer scalar, or when we are converting a "vector union" to a vector with
-/// insert/extractelement instructions.
-///
-/// Offset is an offset from the original alloca, in bits that need to be
-/// shifted to the right.
-Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
- uint64_t Offset, IRBuilder<> &Builder) {
- // If the load is of the whole new alloca, no conversion is needed.
- if (FromVal->getType() == ToType && Offset == 0)
- return FromVal;
-
- // If the result alloca is a vector type, this is either an element
- // access or a bitcast to another vector type of the same size.
- if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) {
- if (ToType->isVectorTy())
- return Builder.CreateBitCast(FromVal, ToType, "tmp");
-
- // Otherwise it must be an element access.
- unsigned Elt = 0;
- if (Offset) {
- unsigned EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
- Elt = Offset/EltSize;
- assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
- }
- // Return the element extracted out of it.
- Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get(
- Type::getInt32Ty(FromVal->getContext()), Elt), "tmp");
- if (V->getType() != ToType)
- V = Builder.CreateBitCast(V, ToType, "tmp");
- return V;
- }
-
- // If ToType is a first class aggregate, extract out each of the pieces and
- // use insertvalue's to form the FCA.
- if (const StructType *ST = dyn_cast<StructType>(ToType)) {
- const StructLayout &Layout = *TD->getStructLayout(ST);
- Value *Res = UndefValue::get(ST);
- for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
- Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
- Offset+Layout.getElementOffsetInBits(i),
- Builder);
- Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
- }
- return Res;
- }
-
- if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
- uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
- Value *Res = UndefValue::get(AT);
- for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
- Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
- Offset+i*EltSize, Builder);
- Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
- }
- return Res;
- }
-
- // Otherwise, this must be a union that was converted to an integer value.
- const IntegerType *NTy = cast<IntegerType>(FromVal->getType());
-
- // If this is a big-endian system and the load is narrower than the
- // full alloca type, we need to do a shift to get the right bits.
- int ShAmt = 0;
- if (TD->isBigEndian()) {
- // On big-endian machines, the lowest bit is stored at the bit offset
- // from the pointer given by getTypeStoreSizeInBits. This matters for
- // integers with a bitwidth that is not a multiple of 8.
- ShAmt = TD->getTypeStoreSizeInBits(NTy) -
- TD->getTypeStoreSizeInBits(ToType) - Offset;
- } else {
- ShAmt = Offset;
- }
-
- // Note: we support negative bitwidths (with shl) which are not defined.
- // We do this to support (f.e.) loads off the end of a structure where
- // only some bits are used.
- if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
- FromVal = Builder.CreateLShr(FromVal,
- ConstantInt::get(FromVal->getType(),
- ShAmt), "tmp");
- else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
- FromVal = Builder.CreateShl(FromVal,
- ConstantInt::get(FromVal->getType(),
- -ShAmt), "tmp");
-
- // Finally, unconditionally truncate the integer to the right width.
- unsigned LIBitWidth = TD->getTypeSizeInBits(ToType);
- if (LIBitWidth < NTy->getBitWidth())
- FromVal =
- Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
- LIBitWidth), "tmp");
- else if (LIBitWidth > NTy->getBitWidth())
- FromVal =
- Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
- LIBitWidth), "tmp");
-
- // If the result is an integer, this is a trunc or bitcast.
- if (ToType->isIntegerTy()) {
- // Should be done.
- } else if (ToType->isFloatingPointTy() || ToType->isVectorTy()) {
- // Just do a bitcast, we know the sizes match up.
- FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
- } else {
- // Otherwise must be a pointer.
- FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp");
- }
- assert(FromVal->getType() == ToType && "Didn't convert right?");
- return FromVal;
-}
-
-/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
-/// or vector value "Old" at the offset specified by Offset.
-///
-/// This happens when we are converting an "integer union" to a
-/// single integer scalar, or when we are converting a "vector union" to a
-/// vector with insert/extractelement instructions.
-///
-/// Offset is an offset from the original alloca, in bits that need to be
-/// shifted to the right.
-Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
- uint64_t Offset, IRBuilder<> &Builder) {
-
- // Convert the stored type to the actual type, shift it left to insert
- // then 'or' into place.
- const Type *AllocaType = Old->getType();
- LLVMContext &Context = Old->getContext();
-
- if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
- uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy);
- uint64_t ValSize = TD->getTypeAllocSizeInBits(SV->getType());
-
- // Changing the whole vector with memset or with an access of a different
- // vector type?
- if (ValSize == VecSize)
- return Builder.CreateBitCast(SV, AllocaType, "tmp");
-
- uint64_t EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
-
- // Must be an element insertion.
- unsigned Elt = Offset/EltSize;
-
- if (SV->getType() != VTy->getElementType())
- SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
-
- SV = Builder.CreateInsertElement(Old, SV,
- ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt),
- "tmp");
- return SV;
- }
-
- // If SV is a first-class aggregate value, insert each value recursively.
- if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
- const StructLayout &Layout = *TD->getStructLayout(ST);
- for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
- Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
- Old = ConvertScalar_InsertValue(Elt, Old,
- Offset+Layout.getElementOffsetInBits(i),
- Builder);
- }
- return Old;
- }
-
- if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
- uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
- for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
- Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
- Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);
- }
- return Old;
- }
-
- // If SV is a float, convert it to the appropriate integer type.
- // If it is a pointer, do the same.
- unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType());
- unsigned DestWidth = TD->getTypeSizeInBits(AllocaType);
- unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
- unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
- if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
- SV = Builder.CreateBitCast(SV,
- IntegerType::get(SV->getContext(),SrcWidth), "tmp");
- else if (SV->getType()->isPointerTy())
- SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(SV->getContext()), "tmp");
-
- // Zero extend or truncate the value if needed.
- if (SV->getType() != AllocaType) {
- if (SV->getType()->getPrimitiveSizeInBits() <
- AllocaType->getPrimitiveSizeInBits())
- SV = Builder.CreateZExt(SV, AllocaType, "tmp");
- else {
- // Truncation may be needed if storing more than the alloca can hold
- // (undefined behavior).
- SV = Builder.CreateTrunc(SV, AllocaType, "tmp");
- SrcWidth = DestWidth;
- SrcStoreWidth = DestStoreWidth;
- }
- }
-
- // If this is a big-endian system and the store is narrower than the
- // full alloca type, we need to do a shift to get the right bits.
- int ShAmt = 0;
- if (TD->isBigEndian()) {
- // On big-endian machines, the lowest bit is stored at the bit offset
- // from the pointer given by getTypeStoreSizeInBits. This matters for
- // integers with a bitwidth that is not a multiple of 8.
- ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
- } else {
- ShAmt = Offset;
- }
-
- // Note: we support negative bitwidths (with shr) which are not defined.
- // We do this to support (f.e.) stores off the end of a structure where
- // only some bits in the structure are set.
- APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
- if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
- SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(),
- ShAmt), "tmp");
- Mask <<= ShAmt;
- } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
- SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(),
- -ShAmt), "tmp");
- Mask = Mask.lshr(-ShAmt);
- }
-
- // Mask out the bits we are about to insert from the old value, and or
- // in the new bits.
- if (SrcWidth != DestWidth) {
- assert(DestWidth > SrcWidth);
- Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask");
- SV = Builder.CreateOr(Old, SV, "ins");
- }
- return SV;
-}
-
/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
@@ -1679,21 +1773,23 @@ static bool PointsToConstantGlobal(Value *V) {
/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
/// the alloca, and if the source pointer is a pointer to a constant global, we
/// can optimize this.
-static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
+static bool isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
bool isOffset) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI))
+ User *U = cast<Instruction>(*UI);
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(U))
// Ignore non-volatile loads, they are always ok.
if (!LI->isVolatile())
continue;
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
// If uses of the bitcast are ok, we are ok.
if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
return false;
continue;
}
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
// If the GEP has all zero indices, it doesn't offset the pointer. If it
// doesn't, it does.
if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
@@ -1704,7 +1800,8 @@ static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
// If this is isn't our memcpy/memmove, reject it as something we can't
// handle.
- if (!isa<MemTransferInst>(*UI))
+ MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
+ if (MI == 0)
return false;
// If we already have seen a copy, reject the second one.
@@ -1715,12 +1812,10 @@ static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
if (isOffset) return false;
// If the memintrinsic isn't using the alloca as the dest, reject it.
- if (UI.getOperandNo() != 1) return false;
-
- MemIntrinsic *MI = cast<MemIntrinsic>(*UI);
+ if (UI.getOperandNo() != 0) return false;
// If the source of the memcpy/move is not a constant global, reject it.
- if (!PointsToConstantGlobal(MI->getOperand(2)))
+ if (!PointsToConstantGlobal(MI->getSource()))
return false;
// Otherwise, the transform is safe. Remember the copy instruction.
@@ -1732,8 +1827,8 @@ static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
/// modified by a copy from a constant global. If we can prove this, we can
/// replace any uses of the alloca with uses of the global directly.
-Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) {
- Instruction *TheCopy = 0;
+MemTransferInst *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) {
+ MemTransferInst *TheCopy = 0;
if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))
return TheCopy;
return 0;
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 62f34a2..360749c 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -26,6 +26,7 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
#include "llvm/Attributes.h"
#include "llvm/Support/CFG.h"
@@ -41,14 +42,15 @@ STATISTIC(NumSimpl, "Number of blocks simplified");
namespace {
struct CFGSimplifyPass : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- CFGSimplifyPass() : FunctionPass(&ID) {}
+ CFGSimplifyPass() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F);
};
}
char CFGSimplifyPass::ID = 0;
-static RegisterPass<CFGSimplifyPass> X("simplifycfg", "Simplify the CFG");
+INITIALIZE_PASS(CFGSimplifyPass, "simplifycfg",
+ "Simplify the CFG", false, false);
// Public interface to the CFGSimplification pass
FunctionPass *llvm::createCFGSimplificationPass() {
@@ -57,13 +59,20 @@ FunctionPass *llvm::createCFGSimplificationPass() {
/// ChangeToUnreachable - Insert an unreachable instruction before the specified
/// instruction, making it and the rest of the code in the block dead.
-static void ChangeToUnreachable(Instruction *I) {
+static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) {
BasicBlock *BB = I->getParent();
// Loop over all of the successors, removing BB's entry from any PHI
// nodes.
for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
(*SI)->removePredecessor(BB);
+ // Insert a call to llvm.trap right before this. This turns the undefined
+ // behavior into a hard fail instead of falling through into random code.
+ if (UseLLVMTrap) {
+ Function *TrapFn =
+ Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
+ CallInst::Create(TrapFn, "", I);
+ }
new UnreachableInst(I->getContext(), I);
// All instructions after this are dead.
@@ -78,7 +87,7 @@ static void ChangeToUnreachable(Instruction *I) {
/// ChangeToCall - Convert the specified invoke into a normal call.
static void ChangeToCall(InvokeInst *II) {
BasicBlock *BB = II->getParent();
- SmallVector<Value*, 8> Args(II->op_begin()+3, II->op_end());
+ SmallVector<Value*, 8> Args(II->op_begin(), II->op_end() - 3);
CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args.begin(),
Args.end(), "", II);
NewCall->takeName(II);
@@ -117,7 +126,8 @@ static bool MarkAliveBlocks(BasicBlock *BB,
// though.
++BBI;
if (!isa<UnreachableInst>(BBI)) {
- ChangeToUnreachable(BBI);
+ // Don't insert a call to llvm.trap right before the unreachable.
+ ChangeToUnreachable(BBI, false);
Changed = true;
}
break;
@@ -128,12 +138,15 @@ static bool MarkAliveBlocks(BasicBlock *BB,
// they should be changed to unreachable by passes that can't modify the
// CFG.
if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
+ // Don't touch volatile stores.
+ if (SI->isVolatile()) continue;
+
Value *Ptr = SI->getOperand(1);
if (isa<UndefValue>(Ptr) ||
(isa<ConstantPointerNull>(Ptr) &&
SI->getPointerAddressSpace() == 0)) {
- ChangeToUnreachable(SI);
+ ChangeToUnreachable(SI, true);
Changed = true;
break;
}
@@ -210,12 +223,16 @@ static bool MergeEmptyReturnBlocks(Function &F) {
// Check for something else in the block.
BasicBlock::iterator I = Ret;
--I;
- if (!isa<PHINode>(I) || I != BB.begin() ||
- Ret->getNumOperands() == 0 ||
- Ret->getOperand(0) != I)
+ // Skip over debug info.
+ while (isa<DbgInfoIntrinsic>(I) && I != BB.begin())
+ --I;
+ if (!isa<DbgInfoIntrinsic>(I) &&
+ (!isa<PHINode>(I) || I != BB.begin() ||
+ Ret->getNumOperands() == 0 ||
+ Ret->getOperand(0) != I))
continue;
}
-
+
// If this is the first returning block, remember it and keep going.
if (RetBlock == 0) {
RetBlock = &BB;
@@ -239,7 +256,7 @@ static bool MergeEmptyReturnBlocks(Function &F) {
// If the canonical return block has no PHI node, create one now.
PHINode *RetBlockPHI = dyn_cast<PHINode>(RetBlock->begin());
if (RetBlockPHI == 0) {
- Value *InVal = cast<ReturnInst>(RetBlock->begin())->getOperand(0);
+ Value *InVal = cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0);
RetBlockPHI = PHINode::Create(Ret->getOperand(0)->getType(), "merge",
&RetBlock->front());
@@ -268,10 +285,9 @@ static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) {
while (LocalChange) {
LocalChange = false;
- // Loop over all of the basic blocks (except the first one) and remove them
- // if they are unneeded...
+ // Loop over all of the basic blocks and remove them if they are unneeded...
//
- for (Function::iterator BBIt = ++F.begin(); BBIt != F.end(); ) {
+ for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) {
if (SimplifyCFG(BBIt++, TD)) {
LocalChange = true;
++NumSimpl;
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp
index 4464961..3ec70ec 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyHalfPowrLibCalls.cpp
@@ -32,7 +32,7 @@ namespace {
const TargetData *TD;
public:
static char ID; // Pass identification
- SimplifyHalfPowrLibCalls() : FunctionPass(&ID) {}
+ SimplifyHalfPowrLibCalls() : FunctionPass(ID) {}
bool runOnFunction(Function &F);
@@ -46,8 +46,8 @@ namespace {
char SimplifyHalfPowrLibCalls::ID = 0;
} // end anonymous namespace.
-static RegisterPass<SimplifyHalfPowrLibCalls>
-X("simplify-libcalls-halfpowr", "Simplify half_powr library calls");
+INITIALIZE_PASS(SimplifyHalfPowrLibCalls, "simplify-libcalls-halfpowr",
+ "Simplify half_powr library calls", false, false);
// Public interface to the Simplify HalfPowr LibCalls pass.
FunctionPass *llvm::createSimplifyHalfPowrLibCallsPass() {
@@ -93,7 +93,8 @@ InlineHalfPowrs(const std::vector<Instruction *> &HalfPowrs,
// Inline the call, taking care of what code ends up where.
NewBlock = SplitBlock(NextInst->getParent(), NextInst, this);
- bool B = InlineFunction(Call, 0, TD);
+ InlineFunctionInfo IFI(0, TD);
+ bool B = InlineFunction(Call, IFI);
assert(B && "half_powr didn't inline?"); B=B;
BasicBlock *NewBody = NewBlock->getSinglePredecessor();
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index 05027ae..d7ce53f 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -66,6 +66,11 @@ public:
this->TD = TD;
if (CI->getCalledFunction())
Context = &CI->getCalledFunction()->getContext();
+
+ // We never change the calling convention.
+ if (CI->getCallingConv() != llvm::CallingConv::C)
+ return NULL;
+
return CallOptimizer(CI->getCalledFunction(), CI, B);
}
};
@@ -92,6 +97,20 @@ static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
return true;
}
+/// IsOnlyUsedInEqualityComparison - Return true if it is only used in equality
+/// comparisons with With.
+static bool IsOnlyUsedInEqualityComparison(Value *V, Value *With) {
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI) {
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ if (IC->isEquality() && IC->getOperand(1) == With)
+ continue;
+ // Unknown instruction.
+ return false;
+ }
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// String and Memory LibCall Optimizations
//===----------------------------------------------------------------------===//
@@ -110,8 +129,8 @@ struct StrCatOpt : public LibCallOptimization {
return 0;
// Extract some information from the instruction
- Value *Dst = CI->getOperand(1);
- Value *Src = CI->getOperand(2);
+ Value *Dst = CI->getArgOperand(0);
+ Value *Src = CI->getArgOperand(1);
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
@@ -142,7 +161,8 @@ struct StrCatOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
EmitMemCpy(CpyDst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len+1), 1, B, TD);
+ ConstantInt::get(TD->getIntPtrType(*Context), Len+1),
+ 1, false, B, TD);
}
};
@@ -161,12 +181,12 @@ struct StrNCatOpt : public StrCatOpt {
return 0;
// Extract some information from the instruction
- Value *Dst = CI->getOperand(1);
- Value *Src = CI->getOperand(2);
+ Value *Dst = CI->getArgOperand(0);
+ Value *Src = CI->getArgOperand(1);
uint64_t Len;
// We don't do anything if length is not constant
- if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getOperand(3)))
+ if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
Len = LengthArg->getZExtValue();
else
return 0;
@@ -206,11 +226,11 @@ struct StrChrOpt : public LibCallOptimization {
FT->getParamType(0) != FT->getReturnType())
return 0;
- Value *SrcStr = CI->getOperand(1);
+ Value *SrcStr = CI->getArgOperand(0);
// If the second operand is non-constant, see if we can compute the length
// of the input string and turn this into memchr.
- ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getOperand(2));
+ ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
if (CharC == 0) {
// These optimizations require TargetData.
if (!TD) return 0;
@@ -219,7 +239,7 @@ struct StrChrOpt : public LibCallOptimization {
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
return 0;
- return EmitMemChr(SrcStr, CI->getOperand(2), // include nul.
+ return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
ConstantInt::get(TD->getIntPtrType(*Context), Len),
B, TD);
}
@@ -259,12 +279,12 @@ struct StrCmpOpt : public LibCallOptimization {
// Verify the "strcmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
- !FT->getReturnType()->isIntegerTy(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context))
return 0;
- Value *Str1P = CI->getOperand(1), *Str2P = CI->getOperand(2);
+ Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
if (Str1P == Str2P) // strcmp(x,x) -> 0
return ConstantInt::get(CI->getType(), 0);
@@ -307,19 +327,19 @@ struct StrNCmpOpt : public LibCallOptimization {
// Verify the "strncmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
- !FT->getReturnType()->isIntegerTy(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context) ||
!FT->getParamType(2)->isIntegerTy())
return 0;
- Value *Str1P = CI->getOperand(1), *Str2P = CI->getOperand(2);
+ Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
if (Str1P == Str2P) // strncmp(x,x,n) -> 0
return ConstantInt::get(CI->getType(), 0);
// Get the length argument if it is constant.
uint64_t Length;
- if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getOperand(3)))
+ if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
Length = LengthArg->getZExtValue();
else
return 0;
@@ -327,6 +347,9 @@ struct StrNCmpOpt : public LibCallOptimization {
if (Length == 0) // strncmp(x,y,0) -> 0
return ConstantInt::get(CI->getType(), 0);
+ if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
+ return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD);
+
std::string Str1, Str2;
bool HasStr1 = GetConstantStringInfo(Str1P, Str1);
bool HasStr2 = GetConstantStringInfo(Str2P, Str2);
@@ -350,15 +373,21 @@ struct StrNCmpOpt : public LibCallOptimization {
// 'strcpy' Optimizations
struct StrCpyOpt : public LibCallOptimization {
+ bool OptChkCall; // True if it's optimizing a __strcpy_chk libcall.
+
+ StrCpyOpt(bool c) : OptChkCall(c) {}
+
virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// Verify the "strcpy" function prototype.
+ unsigned NumParams = OptChkCall ? 3 : 2;
const FunctionType *FT = Callee->getFunctionType();
- if (FT->getNumParams() != 2 || FT->getReturnType() != FT->getParamType(0) ||
+ if (FT->getNumParams() != NumParams ||
+ FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context))
return 0;
- Value *Dst = CI->getOperand(1), *Src = CI->getOperand(2);
+ Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) // strcpy(x,x) -> x
return Src;
@@ -371,8 +400,14 @@ struct StrCpyOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
- EmitMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1, B, TD);
+ if (OptChkCall)
+ EmitMemCpyChk(Dst, Src,
+ ConstantInt::get(TD->getIntPtrType(*Context), Len),
+ CI->getArgOperand(2), B, TD);
+ else
+ EmitMemCpy(Dst, Src,
+ ConstantInt::get(TD->getIntPtrType(*Context), Len),
+ 1, false, B, TD);
return Dst;
}
};
@@ -389,9 +424,9 @@ struct StrNCpyOpt : public LibCallOptimization {
!FT->getParamType(2)->isIntegerTy())
return 0;
- Value *Dst = CI->getOperand(1);
- Value *Src = CI->getOperand(2);
- Value *LenOp = CI->getOperand(3);
+ Value *Dst = CI->getArgOperand(0);
+ Value *Src = CI->getArgOperand(1);
+ Value *LenOp = CI->getArgOperand(2);
// See if we can get the length of the input string.
uint64_t SrcLen = GetStringLength(Src);
@@ -400,8 +435,8 @@ struct StrNCpyOpt : public LibCallOptimization {
if (SrcLen == 0) {
// strncpy(x, "", y) -> memset(x, '\0', y, 1)
- EmitMemSet(Dst, ConstantInt::get(Type::getInt8Ty(*Context), '\0'), LenOp,
- B, TD);
+ EmitMemSet(Dst, ConstantInt::get(Type::getInt8Ty(*Context), '\0'),
+ LenOp, false, B, TD);
return Dst;
}
@@ -421,7 +456,8 @@ struct StrNCpyOpt : public LibCallOptimization {
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
EmitMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1, B, TD);
+ ConstantInt::get(TD->getIntPtrType(*Context), Len),
+ 1, false, B, TD);
return Dst;
}
@@ -438,7 +474,7 @@ struct StrLenOpt : public LibCallOptimization {
!FT->getReturnType()->isIntegerTy())
return 0;
- Value *Src = CI->getOperand(1);
+ Value *Src = CI->getArgOperand(0);
// Constant folding: strlen("xyz") -> 3
if (uint64_t Len = GetStringLength(Src))
@@ -463,7 +499,7 @@ struct StrToOpt : public LibCallOptimization {
!FT->getParamType(1)->isPointerTy())
return 0;
- Value *EndPtr = CI->getOperand(2);
+ Value *EndPtr = CI->getArgOperand(1);
if (isa<ConstantPointerNull>(EndPtr)) {
CI->setOnlyReadsMemory();
CI->addAttribute(1, Attribute::NoCapture);
@@ -486,17 +522,34 @@ struct StrStrOpt : public LibCallOptimization {
return 0;
// fold strstr(x, x) -> x.
- if (CI->getOperand(1) == CI->getOperand(2))
- return B.CreateBitCast(CI->getOperand(1), CI->getType());
+ if (CI->getArgOperand(0) == CI->getArgOperand(1))
+ return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
+
+ // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
+ if (TD && IsOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
+ Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD);
+ Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
+ StrLen, B, TD);
+ for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
+ UI != UE; ) {
+ ICmpInst *Old = cast<ICmpInst>(*UI++);
+ Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp,
+ ConstantInt::getNullValue(StrNCmp->getType()),
+ "cmp");
+ Old->replaceAllUsesWith(Cmp);
+ Old->eraseFromParent();
+ }
+ return CI;
+ }
// See if either input string is a constant string.
std::string SearchStr, ToFindStr;
- bool HasStr1 = GetConstantStringInfo(CI->getOperand(1), SearchStr);
- bool HasStr2 = GetConstantStringInfo(CI->getOperand(2), ToFindStr);
+ bool HasStr1 = GetConstantStringInfo(CI->getArgOperand(0), SearchStr);
+ bool HasStr2 = GetConstantStringInfo(CI->getArgOperand(1), ToFindStr);
// fold strstr(x, "") -> x.
if (HasStr2 && ToFindStr.empty())
- return B.CreateBitCast(CI->getOperand(1), CI->getType());
+ return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
// If both strings are known, constant fold it.
if (HasStr1 && HasStr2) {
@@ -506,15 +559,15 @@ struct StrStrOpt : public LibCallOptimization {
return Constant::getNullValue(CI->getType());
// strstr("abcd", "bc") -> gep((char*)"abcd", 1)
- Value *Result = CastToCStr(CI->getOperand(1), B);
+ Value *Result = CastToCStr(CI->getArgOperand(0), B);
Result = B.CreateConstInBoundsGEP1_64(Result, Offset, "strstr");
return B.CreateBitCast(Result, CI->getType());
}
// fold strstr(x, "y") -> strchr(x, 'y').
if (HasStr2 && ToFindStr.size() == 1)
- return B.CreateBitCast(EmitStrChr(CI->getOperand(1), ToFindStr[0], B, TD),
- CI->getType());
+ return B.CreateBitCast(EmitStrChr(CI->getArgOperand(0),
+ ToFindStr[0], B, TD), CI->getType());
return 0;
}
};
@@ -531,23 +584,26 @@ struct MemCmpOpt : public LibCallOptimization {
!FT->getReturnType()->isIntegerTy(32))
return 0;
- Value *LHS = CI->getOperand(1), *RHS = CI->getOperand(2);
+ Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
if (LHS == RHS) // memcmp(s,s,x) -> 0
return Constant::getNullValue(CI->getType());
// Make sure we have a constant length.
- ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getOperand(3));
+ ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
if (!LenC) return 0;
uint64_t Len = LenC->getZExtValue();
if (Len == 0) // memcmp(s1,s2,0) -> 0
return Constant::getNullValue(CI->getType());
- if (Len == 1) { // memcmp(S1,S2,1) -> *LHS - *RHS
- Value *LHSV = B.CreateLoad(CastToCStr(LHS, B), "lhsv");
- Value *RHSV = B.CreateLoad(CastToCStr(RHS, B), "rhsv");
- return B.CreateSExt(B.CreateSub(LHSV, RHSV, "chardiff"), CI->getType());
+ // memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS
+ if (Len == 1) {
+ Value *LHSV = B.CreateZExt(B.CreateLoad(CastToCStr(LHS, B), "lhsc"),
+ CI->getType(), "lhsv");
+ Value *RHSV = B.CreateZExt(B.CreateLoad(CastToCStr(RHS, B), "rhsc"),
+ CI->getType(), "rhsv");
+ return B.CreateSub(LHSV, RHSV, "chardiff");
}
// Constant folding: memcmp(x, y, l) -> cnst (all arguments are constant)
@@ -581,9 +637,9 @@ struct MemCpyOpt : public LibCallOptimization {
return 0;
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
- EmitMemCpy(CI->getOperand(1), CI->getOperand(2),
- CI->getOperand(3), 1, B, TD);
- return CI->getOperand(1);
+ EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1, false, B, TD);
+ return CI->getArgOperand(0);
}
};
@@ -603,9 +659,9 @@ struct MemMoveOpt : public LibCallOptimization {
return 0;
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
- EmitMemMove(CI->getOperand(1), CI->getOperand(2),
- CI->getOperand(3), 1, B, TD);
- return CI->getOperand(1);
+ EmitMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1, false, B, TD);
+ return CI->getArgOperand(0);
}
};
@@ -625,10 +681,10 @@ struct MemSetOpt : public LibCallOptimization {
return 0;
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
- Value *Val = B.CreateIntCast(CI->getOperand(2), Type::getInt8Ty(*Context),
- false);
- EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), B, TD);
- return CI->getOperand(1);
+ Value *Val = B.CreateIntCast(CI->getArgOperand(1),
+ Type::getInt8Ty(*Context), false);
+ EmitMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), false, B, TD);
+ return CI->getArgOperand(0);
}
};
@@ -649,7 +705,7 @@ struct PowOpt : public LibCallOptimization {
!FT->getParamType(0)->isFloatingPointTy())
return 0;
- Value *Op1 = CI->getOperand(1), *Op2 = CI->getOperand(2);
+ Value *Op1 = CI->getArgOperand(0), *Op2 = CI->getArgOperand(1);
if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1)) {
if (Op1C->isExactlyValue(1.0)) // pow(1.0, x) -> 1.0
return Op1C;
@@ -703,18 +759,18 @@ struct Exp2Opt : public LibCallOptimization {
!FT->getParamType(0)->isFloatingPointTy())
return 0;
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
// Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x)) if sizeof(x) <= 32
// Turn exp2(uitofp(x)) -> ldexp(1.0, zext(x)) if sizeof(x) < 32
Value *LdExpArg = 0;
if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
LdExpArg = B.CreateSExt(OpC->getOperand(0),
- Type::getInt32Ty(*Context), "tmp");
+ Type::getInt32Ty(*Context), "tmp");
} else if (UIToFPInst *OpC = dyn_cast<UIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
LdExpArg = B.CreateZExt(OpC->getOperand(0),
- Type::getInt32Ty(*Context), "tmp");
+ Type::getInt32Ty(*Context), "tmp");
}
if (LdExpArg) {
@@ -755,7 +811,7 @@ struct UnaryDoubleFPOpt : public LibCallOptimization {
return 0;
// If this is something like 'floor((double)floatval)', convert to floorf.
- FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getOperand(1));
+ FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getArgOperand(0));
if (Cast == 0 || !Cast->getOperand(0)->getType()->isFloatTy())
return 0;
@@ -780,11 +836,11 @@ struct FFSOpt : public LibCallOptimization {
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
if (FT->getNumParams() != 1 ||
- !FT->getReturnType()->isIntegerTy(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
!FT->getParamType(0)->isIntegerTy())
return 0;
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
// Constant fold.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
@@ -804,7 +860,7 @@ struct FFSOpt : public LibCallOptimization {
Value *Cond = B.CreateICmpNE(Op, Constant::getNullValue(ArgType), "tmp");
return B.CreateSelect(Cond, V,
- ConstantInt::get(Type::getInt32Ty(*Context), 0));
+ ConstantInt::get(Type::getInt32Ty(*Context), 0));
}
};
@@ -820,7 +876,7 @@ struct IsDigitOpt : public LibCallOptimization {
return 0;
// isdigit(c) -> (c-'0') <u 10
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
Op = B.CreateSub(Op, ConstantInt::get(Type::getInt32Ty(*Context), '0'),
"isdigittmp");
Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 10),
@@ -841,7 +897,7 @@ struct IsAsciiOpt : public LibCallOptimization {
return 0;
// isascii(c) -> c <u 128
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 128),
"isascii");
return B.CreateZExt(Op, CI->getType());
@@ -860,7 +916,7 @@ struct AbsOpt : public LibCallOptimization {
return 0;
// abs(x) -> x >s -1 ? x : -x
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
Value *Pos = B.CreateICmpSGT(Op,
Constant::getAllOnesValue(Op->getType()),
"ispos");
@@ -882,7 +938,7 @@ struct ToAsciiOpt : public LibCallOptimization {
return 0;
// isascii(c) -> c & 0x7f
- return B.CreateAnd(CI->getOperand(1),
+ return B.CreateAnd(CI->getArgOperand(0),
ConstantInt::get(CI->getType(),0x7F));
}
};
@@ -905,7 +961,7 @@ struct PrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
std::string FormatStr;
- if (!GetConstantStringInfo(CI->getOperand(1), FormatStr))
+ if (!GetConstantStringInfo(CI->getArgOperand(0), FormatStr))
return 0;
// Empty format string -> noop.
@@ -937,20 +993,20 @@ struct PrintFOpt : public LibCallOptimization {
}
// Optimize specific format strings.
- // printf("%c", chr) --> putchar(*(i8*)dst)
- if (FormatStr == "%c" && CI->getNumOperands() > 2 &&
- CI->getOperand(2)->getType()->isIntegerTy()) {
- Value *Res = EmitPutChar(CI->getOperand(2), B, TD);
+ // printf("%c", chr) --> putchar(chr)
+ if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
+ CI->getArgOperand(1)->getType()->isIntegerTy()) {
+ Value *Res = EmitPutChar(CI->getArgOperand(1), B, TD);
if (CI->use_empty()) return CI;
return B.CreateIntCast(Res, CI->getType(), true);
}
// printf("%s\n", str) --> puts(str)
- if (FormatStr == "%s\n" && CI->getNumOperands() > 2 &&
- CI->getOperand(2)->getType()->isPointerTy() &&
+ if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
+ CI->getArgOperand(1)->getType()->isPointerTy() &&
CI->use_empty()) {
- EmitPutS(CI->getOperand(2), B, TD);
+ EmitPutS(CI->getArgOperand(1), B, TD);
return CI;
}
return 0;
@@ -971,11 +1027,11 @@ struct SPrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
std::string FormatStr;
- if (!GetConstantStringInfo(CI->getOperand(2), FormatStr))
+ if (!GetConstantStringInfo(CI->getArgOperand(1), FormatStr))
return 0;
// If we just have a format string (nothing else crazy) transform it.
- if (CI->getNumOperands() == 3) {
+ if (CI->getNumArgOperands() == 2) {
// Make sure there's no % in the constant array. We could try to handle
// %% -> % in the future if we cared.
for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
@@ -986,27 +1042,28 @@ struct SPrintFOpt : public LibCallOptimization {
if (!TD) return 0;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
- EmitMemCpy(CI->getOperand(1), CI->getOperand(2), // Copy the nul byte.
- ConstantInt::get(TD->getIntPtrType(*Context),
- FormatStr.size()+1), 1, B, TD);
+ EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1), // Copy the
+ ConstantInt::get(TD->getIntPtrType(*Context), // nul byte.
+ FormatStr.size() + 1), 1, false, B, TD);
return ConstantInt::get(CI->getType(), FormatStr.size());
}
// The remaining optimizations require the format string to be "%s" or "%c"
// and have an extra operand.
- if (FormatStr.size() != 2 || FormatStr[0] != '%' || CI->getNumOperands() <4)
+ if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
+ CI->getNumArgOperands() < 3)
return 0;
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
// sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
- if (!CI->getOperand(3)->getType()->isIntegerTy()) return 0;
- Value *V = B.CreateTrunc(CI->getOperand(3),
- Type::getInt8Ty(*Context), "char");
- Value *Ptr = CastToCStr(CI->getOperand(1), B);
+ if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
+ Value *V = B.CreateTrunc(CI->getArgOperand(2),
+ Type::getInt8Ty(*Context), "char");
+ Value *Ptr = CastToCStr(CI->getArgOperand(0), B);
B.CreateStore(V, Ptr);
Ptr = B.CreateGEP(Ptr, ConstantInt::get(Type::getInt32Ty(*Context), 1),
- "nul");
+ "nul");
B.CreateStore(Constant::getNullValue(Type::getInt8Ty(*Context)), Ptr);
return ConstantInt::get(CI->getType(), 1);
@@ -1017,13 +1074,14 @@ struct SPrintFOpt : public LibCallOptimization {
if (!TD) return 0;
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
- if (!CI->getOperand(3)->getType()->isPointerTy()) return 0;
+ if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0;
- Value *Len = EmitStrLen(CI->getOperand(3), B, TD);
+ Value *Len = EmitStrLen(CI->getArgOperand(2), B, TD);
Value *IncLen = B.CreateAdd(Len,
ConstantInt::get(Len->getType(), 1),
"leninc");
- EmitMemCpy(CI->getOperand(1), CI->getOperand(3), IncLen, 1, B, TD);
+ EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(2),
+ IncLen, 1, false, B, TD);
// The sprintf result is the unincremented number of bytes in the string.
return B.CreateIntCast(Len, CI->getType(), false);
@@ -1047,8 +1105,8 @@ struct FWriteOpt : public LibCallOptimization {
return 0;
// Get the element size and count.
- ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getOperand(2));
- ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getOperand(3));
+ ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+ ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
if (!SizeC || !CountC) return 0;
uint64_t Bytes = SizeC->getZExtValue()*CountC->getZExtValue();
@@ -1058,8 +1116,8 @@ struct FWriteOpt : public LibCallOptimization {
// If this is writing one byte, turn it into fputc.
if (Bytes == 1) { // fwrite(S,1,1,F) -> fputc(S[0],F)
- Value *Char = B.CreateLoad(CastToCStr(CI->getOperand(1), B), "char");
- EmitFPutC(Char, CI->getOperand(4), B, TD);
+ Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B), "char");
+ EmitFPutC(Char, CI->getArgOperand(3), B, TD);
return ConstantInt::get(CI->getType(), 1);
}
@@ -1083,11 +1141,11 @@ struct FPutsOpt : public LibCallOptimization {
return 0;
// fputs(s,F) --> fwrite(s,1,strlen(s),F)
- uint64_t Len = GetStringLength(CI->getOperand(1));
+ uint64_t Len = GetStringLength(CI->getArgOperand(0));
if (!Len) return 0;
- EmitFWrite(CI->getOperand(1),
+ EmitFWrite(CI->getArgOperand(0),
ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
- CI->getOperand(2), B, TD);
+ CI->getArgOperand(1), B, TD);
return CI; // Known to have no uses (see above).
}
};
@@ -1106,11 +1164,11 @@ struct FPrintFOpt : public LibCallOptimization {
// All the optimizations depend on the format string.
std::string FormatStr;
- if (!GetConstantStringInfo(CI->getOperand(2), FormatStr))
+ if (!GetConstantStringInfo(CI->getArgOperand(1), FormatStr))
return 0;
// fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
- if (CI->getNumOperands() == 3) {
+ if (CI->getNumArgOperands() == 2) {
for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
if (FormatStr[i] == '%') // Could handle %% -> % if we cared.
return 0; // We found a format specifier.
@@ -1118,31 +1176,32 @@ struct FPrintFOpt : public LibCallOptimization {
// These optimizations require TargetData.
if (!TD) return 0;
- EmitFWrite(CI->getOperand(2),
+ EmitFWrite(CI->getArgOperand(1),
ConstantInt::get(TD->getIntPtrType(*Context),
FormatStr.size()),
- CI->getOperand(1), B, TD);
+ CI->getArgOperand(0), B, TD);
return ConstantInt::get(CI->getType(), FormatStr.size());
}
// The remaining optimizations require the format string to be "%s" or "%c"
// and have an extra operand.
- if (FormatStr.size() != 2 || FormatStr[0] != '%' || CI->getNumOperands() <4)
+ if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
+ CI->getNumArgOperands() < 3)
return 0;
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
- // fprintf(F, "%c", chr) --> *(i8*)dst = chr
- if (!CI->getOperand(3)->getType()->isIntegerTy()) return 0;
- EmitFPutC(CI->getOperand(3), CI->getOperand(1), B, TD);
+ // fprintf(F, "%c", chr) --> fputc(chr, F)
+ if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
+ EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TD);
return ConstantInt::get(CI->getType(), 1);
}
if (FormatStr[1] == 's') {
- // fprintf(F, "%s", str) -> fputs(str, F)
- if (!CI->getOperand(3)->getType()->isPointerTy() || !CI->use_empty())
+ // fprintf(F, "%s", str) --> fputs(str, F)
+ if (!CI->getArgOperand(2)->getType()->isPointerTy() || !CI->use_empty())
return 0;
- EmitFPutS(CI->getOperand(3), CI->getOperand(1), B, TD);
+ EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD);
return CI;
}
return 0;
@@ -1162,7 +1221,8 @@ namespace {
StringMap<LibCallOptimization*> Optimizations;
// String and Memory LibCall Optimizations
StrCatOpt StrCat; StrNCatOpt StrNCat; StrChrOpt StrChr; StrCmpOpt StrCmp;
- StrNCmpOpt StrNCmp; StrCpyOpt StrCpy; StrNCpyOpt StrNCpy; StrLenOpt StrLen;
+ StrNCmpOpt StrNCmp; StrCpyOpt StrCpy; StrCpyOpt StrCpyChk;
+ StrNCpyOpt StrNCpy; StrLenOpt StrLen;
StrToOpt StrTo; StrStrOpt StrStr;
MemCmpOpt MemCmp; MemCpyOpt MemCpy; MemMoveOpt MemMove; MemSetOpt MemSet;
// Math Library Optimizations
@@ -1177,8 +1237,7 @@ namespace {
bool Modified; // This is only used by doInitialization.
public:
static char ID; // Pass identification
- SimplifyLibCalls() : FunctionPass(&ID) {}
-
+ SimplifyLibCalls() : FunctionPass(ID), StrCpy(false), StrCpyChk(true) {}
void InitOptimizations();
bool runOnFunction(Function &F);
@@ -1195,8 +1254,8 @@ namespace {
char SimplifyLibCalls::ID = 0;
} // end anonymous namespace.
-static RegisterPass<SimplifyLibCalls>
-X("simplify-libcalls", "Simplify well-known library calls");
+INITIALIZE_PASS(SimplifyLibCalls, "simplify-libcalls",
+ "Simplify well-known library calls", false, false);
// Public interface to the Simplify LibCalls pass.
FunctionPass *llvm::createSimplifyLibCallsPass() {
@@ -1228,6 +1287,9 @@ void SimplifyLibCalls::InitOptimizations() {
Optimizations["memmove"] = &MemMove;
Optimizations["memset"] = &MemSet;
+ // _chk variants of String and Memory LibCall Optimizations.
+ Optimizations["__strcpy_chk"] = &StrCpyChk;
+
// Math Library Optimizations
Optimizations["powf"] = &Pow;
Optimizations["pow"] = &Pow;
@@ -1400,6 +1462,14 @@ bool SimplifyLibCalls::doInitialization(Module &M) {
setOnlyReadsMemory(F);
setDoesNotThrow(F);
setDoesNotCapture(F, 1);
+ } else if (Name == "strchr" ||
+ Name == "strrchr") {
+ if (FTy->getNumParams() != 2 ||
+ !FTy->getParamType(0)->isPointerTy() ||
+ !FTy->getParamType(1)->isIntegerTy())
+ continue;
+ setOnlyReadsMemory(F);
+ setDoesNotThrow(F);
} else if (Name == "strcpy" ||
Name == "stpcpy" ||
Name == "strcat" ||
@@ -1428,7 +1498,7 @@ bool SimplifyLibCalls::doInitialization(Module &M) {
} else if (Name == "strcmp" ||
Name == "strspn" ||
Name == "strncmp" ||
- Name ==" strcspn" ||
+ Name == "strcspn" ||
Name == "strcoll" ||
Name == "strcasecmp" ||
Name == "strncasecmp") {
@@ -2086,7 +2156,7 @@ bool SimplifyLibCalls::doInitialization(Module &M) {
// * pow(pow(x,y),z)-> pow(x,y*z)
//
// puts:
-// * puts("") -> putchar("\n")
+// * puts("") -> putchar('\n')
//
// round, roundf, roundl:
// * round(cnst) -> cnst'
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/Sink.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/Sink.cpp
new file mode 100644
index 0000000..95d3ded
--- /dev/null
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/Sink.cpp
@@ -0,0 +1,266 @@
+//===-- Sink.cpp - Code Sinking -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass moves instructions into successor blocks, when possible, so that
+// they aren't executed on paths where their results aren't needed.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "sink"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+STATISTIC(NumSunk, "Number of instructions sunk");
+
+namespace {
+ class Sinking : public FunctionPass {
+ DominatorTree *DT;
+ LoopInfo *LI;
+ AliasAnalysis *AA;
+
+ public:
+ static char ID; // Pass identification
+ Sinking() : FunctionPass(ID) {}
+
+ virtual bool runOnFunction(Function &F);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ FunctionPass::getAnalysisUsage(AU);
+ AU.addRequired<AliasAnalysis>();
+ AU.addRequired<DominatorTree>();
+ AU.addRequired<LoopInfo>();
+ AU.addPreserved<DominatorTree>();
+ AU.addPreserved<LoopInfo>();
+ }
+ private:
+ bool ProcessBlock(BasicBlock &BB);
+ bool SinkInstruction(Instruction *I, SmallPtrSet<Instruction *, 8> &Stores);
+ bool AllUsesDominatedByBlock(Instruction *Inst, BasicBlock *BB) const;
+ };
+} // end anonymous namespace
+
+char Sinking::ID = 0;
+INITIALIZE_PASS(Sinking, "sink", "Code sinking", false, false);
+
+FunctionPass *llvm::createSinkingPass() { return new Sinking(); }
+
+/// AllUsesDominatedByBlock - Return true if all uses of the specified value
+/// occur in blocks dominated by the specified block.
+bool Sinking::AllUsesDominatedByBlock(Instruction *Inst,
+ BasicBlock *BB) const {
+ // Ignoring debug uses is necessary so debug info doesn't affect the code.
+ // This may leave a referencing dbg_value in the original block, before
+ // the definition of the vreg. Dwarf generator handles this although the
+ // user might not get the right info at runtime.
+ for (Value::use_iterator I = Inst->use_begin(),
+ E = Inst->use_end(); I != E; ++I) {
+ // Determine the block of the use.
+ Instruction *UseInst = cast<Instruction>(*I);
+ BasicBlock *UseBlock = UseInst->getParent();
+ if (PHINode *PN = dyn_cast<PHINode>(UseInst)) {
+ // PHI nodes use the operand in the predecessor block, not the block with
+ // the PHI.
+ unsigned Num = PHINode::getIncomingValueNumForOperand(I.getOperandNo());
+ UseBlock = PN->getIncomingBlock(Num);
+ }
+ // Check that it dominates.
+ if (!DT->dominates(BB, UseBlock))
+ return false;
+ }
+ return true;
+}
+
+bool Sinking::runOnFunction(Function &F) {
+ DT = &getAnalysis<DominatorTree>();
+ LI = &getAnalysis<LoopInfo>();
+ AA = &getAnalysis<AliasAnalysis>();
+
+ bool EverMadeChange = false;
+
+ while (1) {
+ bool MadeChange = false;
+
+ // Process all basic blocks.
+ for (Function::iterator I = F.begin(), E = F.end();
+ I != E; ++I)
+ MadeChange |= ProcessBlock(*I);
+
+ // If this iteration over the code changed anything, keep iterating.
+ if (!MadeChange) break;
+ EverMadeChange = true;
+ }
+ return EverMadeChange;
+}
+
+bool Sinking::ProcessBlock(BasicBlock &BB) {
+ // Can't sink anything out of a block that has less than two successors.
+ if (BB.getTerminator()->getNumSuccessors() <= 1 || BB.empty()) return false;
+
+ // Don't bother sinking code out of unreachable blocks. In addition to being
+ // unprofitable, it can also lead to infinite looping, because in an unreachable
+ // loop there may be nowhere to stop.
+ if (!DT->isReachableFromEntry(&BB)) return false;
+
+ bool MadeChange = false;
+
+ // Walk the basic block bottom-up. Remember if we saw a store.
+ BasicBlock::iterator I = BB.end();
+ --I;
+ bool ProcessedBegin = false;
+ SmallPtrSet<Instruction *, 8> Stores;
+ do {
+ Instruction *Inst = I; // The instruction to sink.
+
+ // Predecrement I (if it's not begin) so that it isn't invalidated by
+ // sinking.
+ ProcessedBegin = I == BB.begin();
+ if (!ProcessedBegin)
+ --I;
+
+ if (isa<DbgInfoIntrinsic>(Inst))
+ continue;
+
+ if (SinkInstruction(Inst, Stores))
+ ++NumSunk, MadeChange = true;
+
+ // If we just processed the first instruction in the block, we're done.
+ } while (!ProcessedBegin);
+
+ return MadeChange;
+}
+
+static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
+ SmallPtrSet<Instruction *, 8> &Stores) {
+ if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
+ if (L->isVolatile()) return false;
+
+ Value *Ptr = L->getPointerOperand();
+ unsigned Size = AA->getTypeStoreSize(L->getType());
+ for (SmallPtrSet<Instruction *, 8>::iterator I = Stores.begin(),
+ E = Stores.end(); I != E; ++I)
+ if (AA->getModRefInfo(*I, Ptr, Size) & AliasAnalysis::Mod)
+ return false;
+ }
+
+ if (Inst->mayWriteToMemory()) {
+ Stores.insert(Inst);
+ return false;
+ }
+
+ return Inst->isSafeToSpeculativelyExecute();
+}
+
+/// SinkInstruction - Determine whether it is safe to sink the specified machine
+/// instruction out of its current block into a successor.
+bool Sinking::SinkInstruction(Instruction *Inst,
+ SmallPtrSet<Instruction *, 8> &Stores) {
+ // Check if it's safe to move the instruction.
+ if (!isSafeToMove(Inst, AA, Stores))
+ return false;
+
+ // FIXME: This should include support for sinking instructions within the
+ // block they are currently in to shorten the live ranges. We often get
+ // instructions sunk into the top of a large block, but it would be better to
+ // also sink them down before their first use in the block. This xform has to
+ // be careful not to *increase* register pressure though, e.g. sinking
+ // "x = y + z" down if it kills y and z would increase the live ranges of y
+ // and z and only shrink the live range of x.
+
+ // Loop over all the operands of the specified instruction. If there is
+ // anything we can't handle, bail out.
+ BasicBlock *ParentBlock = Inst->getParent();
+
+ // SuccToSinkTo - This is the successor to sink this instruction to, once we
+ // decide.
+ BasicBlock *SuccToSinkTo = 0;
+
+ // FIXME: This picks a successor to sink into based on having one
+ // successor that dominates all the uses. However, there are cases where
+ // sinking can happen but where the sink point isn't a successor. For
+ // example:
+ // x = computation
+ // if () {} else {}
+ // use x
+ // the instruction could be sunk over the whole diamond for the
+ // if/then/else (or loop, etc), allowing it to be sunk into other blocks
+ // after that.
+
+ // Instructions can only be sunk if all their uses are in blocks
+ // dominated by one of the successors.
+ // Look at all the successors and decide which one
+ // we should sink to.
+ for (succ_iterator SI = succ_begin(ParentBlock),
+ E = succ_end(ParentBlock); SI != E; ++SI) {
+ if (AllUsesDominatedByBlock(Inst, *SI)) {
+ SuccToSinkTo = *SI;
+ break;
+ }
+ }
+
+ // If we couldn't find a block to sink to, ignore this instruction.
+ if (SuccToSinkTo == 0)
+ return false;
+
+ // It is not possible to sink an instruction into its own block. This can
+ // happen with loops.
+ if (Inst->getParent() == SuccToSinkTo)
+ return false;
+
+ DEBUG(dbgs() << "Sink instr " << *Inst);
+ DEBUG(dbgs() << "to block ";
+ WriteAsOperand(dbgs(), SuccToSinkTo, false));
+
+ // If the block has multiple predecessors, this would introduce computation on
+ // a path that it doesn't already exist. We could split the critical edge,
+ // but for now we just punt.
+ // FIXME: Split critical edges if not backedges.
+ if (SuccToSinkTo->getUniquePredecessor() != ParentBlock) {
+ // We cannot sink a load across a critical edge - there may be stores in
+ // other code paths.
+ if (!Inst->isSafeToSpeculativelyExecute()) {
+ DEBUG(dbgs() << " *** PUNTING: Wont sink load along critical edge.\n");
+ return false;
+ }
+
+ // We don't want to sink across a critical edge if we don't dominate the
+ // successor. We could be introducing calculations to new code paths.
+ if (!DT->dominates(ParentBlock, SuccToSinkTo)) {
+ DEBUG(dbgs() << " *** PUNTING: Critical edge found\n");
+ return false;
+ }
+
+ // Don't sink instructions into a loop.
+ if (LI->isLoopHeader(SuccToSinkTo)) {
+ DEBUG(dbgs() << " *** PUNTING: Loop header found\n");
+ return false;
+ }
+
+ // Otherwise we are OK with sinking along a critical edge.
+ DEBUG(dbgs() << "Sinking along critical edge.\n");
+ }
+
+ // Determine where to insert into. Skip phi nodes.
+ BasicBlock::iterator InsertPos = SuccToSinkTo->begin();
+ while (InsertPos != SuccToSinkTo->end() && isa<PHINode>(InsertPos))
+ ++InsertPos;
+
+ // Move the instruction.
+ Inst->moveBefore(InsertPos);
+ return true;
+}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/TailDuplication.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/TailDuplication.cpp
index 2306a77..2e437ac 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/TailDuplication.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/TailDuplication.cpp
@@ -49,7 +49,7 @@ namespace {
bool runOnFunction(Function &F);
public:
static char ID; // Pass identification, replacement for typeid
- TailDup() : FunctionPass(&ID) {}
+ TailDup() : FunctionPass(ID) {}
private:
inline bool shouldEliminateUnconditionalBranch(TerminatorInst *, unsigned);
@@ -59,7 +59,7 @@ namespace {
}
char TailDup::ID = 0;
-static RegisterPass<TailDup> X("tailduplicate", "Tail Duplication");
+INITIALIZE_PASS(TailDup, "tailduplicate", "Tail Duplication", false, false);
// Public interface to the Tail Duplication pass
FunctionPass *llvm::createTailDuplicationPass() { return new TailDup(); }
@@ -206,12 +206,13 @@ static BasicBlock *FindObviousSharedDomOf(BasicBlock *SrcBlock,
// there is only one other pred, get it, otherwise we can't handle it.
PI = pred_begin(DstBlock); PE = pred_end(DstBlock);
BasicBlock *DstOtherPred = 0;
- if (*PI == SrcBlock) {
+ BasicBlock *P = *PI;
+ if (P == SrcBlock) {
if (++PI == PE) return 0;
DstOtherPred = *PI;
if (++PI != PE) return 0;
} else {
- DstOtherPred = *PI;
+ DstOtherPred = P;
if (++PI == PE || *PI != SrcBlock || ++PI != PE) return 0;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/libclamav/c++/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 162d902..3717254 100644
--- a/libclamav/c++/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -16,9 +16,9 @@
// transformation from taking place, though currently the analysis cannot
// support moving any really useful instructions (only dead ones).
// 2. This pass transforms functions that are prevented from being tail
-// recursive by an associative expression to use an accumulator variable,
-// thus compiling the typical naive factorial or 'fib' implementation into
-// efficient code.
+// recursive by an associative and commutative expression to use an
+// accumulator variable, thus compiling the typical naive factorial or
+// 'fib' implementation into efficient code.
// 3. TRE is performed if the function returns void, if the return
// returns the result returned by the call, or if the function returns a
// run-time constant on all exits from the function. It is possible, though
@@ -59,6 +59,9 @@
#include "llvm/Instructions.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/Loads.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/CFG.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
@@ -69,7 +72,7 @@ STATISTIC(NumAccumAdded, "Number of accumulators introduced");
namespace {
struct TailCallElim : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- TailCallElim() : FunctionPass(&ID) {}
+ TailCallElim() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F);
@@ -84,7 +87,8 @@ namespace {
}
char TailCallElim::ID = 0;
-static RegisterPass<TailCallElim> X("tailcallelim", "Tail Call Elimination");
+INITIALIZE_PASS(TailCallElim, "tailcallelim",
+ "Tail Call Elimination", false, false);
// Public interface to the TailCallElimination pass
FunctionPass *llvm::createTailCallEliminationPass() {
@@ -250,7 +254,7 @@ static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
// If we are passing this argument into call as the corresponding
// argument operand, then the argument is dynamically constant.
// Otherwise, we cannot transform this function safely.
- if (CI->getOperand(ArgNo+1) == Arg)
+ if (CI->getArgOperand(ArgNo) == Arg)
return true;
}
@@ -267,29 +271,29 @@ static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
}
// getCommonReturnValue - Check to see if the function containing the specified
-// return instruction and tail call consistently returns the same
-// runtime-constant value at all exit points. If so, return the returned value.
+// tail call consistently returns the same runtime-constant value at all exit
+// points except for IgnoreRI. If so, return the returned value.
//
-static Value *getCommonReturnValue(ReturnInst *TheRI, CallInst *CI) {
- Function *F = TheRI->getParent()->getParent();
+static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
+ Function *F = CI->getParent()->getParent();
Value *ReturnedValue = 0;
- for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI)
- if (ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator()))
- if (RI != TheRI) {
- Value *RetOp = RI->getOperand(0);
-
- // We can only perform this transformation if the value returned is
- // evaluatable at the start of the initial invocation of the function,
- // instead of at the end of the evaluation.
- //
- if (!isDynamicConstant(RetOp, CI, RI))
- return 0;
-
- if (ReturnedValue && RetOp != ReturnedValue)
- return 0; // Cannot transform if differing values are returned.
- ReturnedValue = RetOp;
- }
+ for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI) {
+ ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator());
+ if (RI == 0 || RI == IgnoreRI) continue;
+
+ // We can only perform this transformation if the value returned is
+ // evaluatable at the start of the initial invocation of the function,
+ // instead of at the end of the evaluation.
+ //
+ Value *RetOp = RI->getOperand(0);
+ if (!isDynamicConstant(RetOp, CI, RI))
+ return 0;
+
+ if (ReturnedValue && RetOp != ReturnedValue)
+ return 0; // Cannot transform if differing values are returned.
+ ReturnedValue = RetOp;
+ }
return ReturnedValue;
}
@@ -299,11 +303,11 @@ static Value *getCommonReturnValue(ReturnInst *TheRI, CallInst *CI) {
///
Value *TailCallElim::CanTransformAccumulatorRecursion(Instruction *I,
CallInst *CI) {
- if (!I->isAssociative()) return 0;
+ if (!I->isAssociative() || !I->isCommutative()) return 0;
assert(I->getNumOperands() == 2 &&
- "Associative operations should have 2 args!");
+ "Associative/commutative operations should have 2 args!");
- // Exactly one operand should be the result of the call instruction...
+ // Exactly one operand should be the result of the call instruction.
if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
(I->getOperand(0) != CI && I->getOperand(1) != CI))
return 0;
@@ -328,15 +332,6 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
if (&BB->front() == Ret) // Make sure there is something before the ret...
return false;
- // If the return is in the entry block, then making this transformation would
- // turn infinite recursion into an infinite loop. This transformation is ok
- // in theory, but breaks some code like:
- // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
- // disable this xform in this case, because the code generator will lower the
- // call to fabs into inline code.
- if (BB == &F->getEntryBlock())
- return false;
-
// Scan backwards from the return, checking to see if there is a tail call in
// this block. If so, set CI to it.
CallInst *CI;
@@ -356,11 +351,35 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail)
return false;
- // If we are introducing accumulator recursion to eliminate associative
- // operations after the call instruction, this variable contains the initial
- // value for the accumulator. If this value is set, we actually perform
- // accumulator recursion elimination instead of simple tail recursion
- // elimination.
+ // As a special case, detect code like this:
+ // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
+ // and disable this xform in this case, because the code generator will
+ // lower the call to fabs into inline code.
+ if (BB == &F->getEntryBlock() &&
+ &BB->front() == CI && &*++BB->begin() == Ret &&
+ callIsSmall(F)) {
+ // A single-block function with just a call and a return. Check that
+ // the arguments match.
+ CallSite::arg_iterator I = CallSite(CI).arg_begin(),
+ E = CallSite(CI).arg_end();
+ Function::arg_iterator FI = F->arg_begin(),
+ FE = F->arg_end();
+ for (; I != E && FI != FE; ++I, ++FI)
+ if (*I != &*FI) break;
+ if (I == E && FI == FE)
+ return false;
+ }
+
+ // If we are introducing accumulator recursion to eliminate operations after
+ // the call instruction that are both associative and commutative, the initial
+ // value for the accumulator is placed in this variable. If this value is set
+ // then we actually perform accumulator recursion elimination instead of
+ // simple tail recursion elimination. If the operation is an LLVM instruction
+ // (eg: "add") then it is recorded in AccumulatorRecursionInstr. If not, then
+ // we are handling the case when the return instruction returns a constant C
+ // which is different to the constant returned by other return instructions
+ // (which is recorded in AccumulatorRecursionEliminationInitVal). This is a
+ // special case of accumulator recursion, the operation being "return C".
Value *AccumulatorRecursionEliminationInitVal = 0;
Instruction *AccumulatorRecursionInstr = 0;
@@ -368,21 +387,22 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
// tail call if all of the instructions between the call and the return are
// movable to above the call itself, leaving the call next to the return.
// Check that this is the case now.
- for (BBI = CI, ++BBI; &*BBI != Ret; ++BBI)
- if (!CanMoveAboveCall(BBI, CI)) {
- // If we can't move the instruction above the call, it might be because it
- // is an associative operation that could be tranformed using accumulator
- // recursion elimination. Check to see if this is the case, and if so,
- // remember the initial accumulator value for later.
- if ((AccumulatorRecursionEliminationInitVal =
- CanTransformAccumulatorRecursion(BBI, CI))) {
- // Yes, this is accumulator recursion. Remember which instruction
- // accumulates.
- AccumulatorRecursionInstr = BBI;
- } else {
- return false; // Otherwise, we cannot eliminate the tail recursion!
- }
+ for (BBI = CI, ++BBI; &*BBI != Ret; ++BBI) {
+ if (CanMoveAboveCall(BBI, CI)) continue;
+
+ // If we can't move the instruction above the call, it might be because it
+ // is an associative and commutative operation that could be tranformed
+ // using accumulator recursion elimination. Check to see if this is the
+ // case, and if so, remember the initial accumulator value for later.
+ if ((AccumulatorRecursionEliminationInitVal =
+ CanTransformAccumulatorRecursion(BBI, CI))) {
+ // Yes, this is accumulator recursion. Remember which instruction
+ // accumulates.
+ AccumulatorRecursionInstr = BBI;
+ } else {
+ return false; // Otherwise, we cannot eliminate the tail recursion!
}
+ }
// We can only transform call/return pairs that either ignore the return value
// of the call and return void, ignore the value of the call and return a
@@ -391,8 +411,18 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
if (Ret->getNumOperands() == 1 && Ret->getReturnValue() != CI &&
!isa<UndefValue>(Ret->getReturnValue()) &&
AccumulatorRecursionEliminationInitVal == 0 &&
- !getCommonReturnValue(Ret, CI))
- return false;
+ !getCommonReturnValue(0, CI)) {
+ // One case remains that we are able to handle: the current return
+ // instruction returns a constant, and all other return instructions
+ // return a different constant.
+ if (!isDynamicConstant(Ret->getReturnValue(), CI, Ret))
+ return false; // Current return instruction does not return a constant.
+ // Check that all other return instructions return a common constant. If
+ // so, record it in AccumulatorRecursionEliminationInitVal.
+ AccumulatorRecursionEliminationInitVal = getCommonReturnValue(Ret, CI);
+ if (!AccumulatorRecursionEliminationInitVal)
+ return false;
+ }
// OK! We can transform this tail call. If this is the first one found,
// create the new entry block, allowing us to branch back to the old entry.
@@ -441,8 +471,8 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
// Ok, now that we know we have a pseudo-entry block WITH all of the
// required PHI nodes, add entries into the PHI node for the actual
// parameters passed into the tail-recursive call.
- for (unsigned i = 0, e = CI->getNumOperands()-1; i != e; ++i)
- ArgumentPHIs[i]->addIncoming(CI->getOperand(i+1), BB);
+ for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
+ ArgumentPHIs[i]->addIncoming(CI->getArgOperand(i), BB);
// If we are introducing an accumulator variable to eliminate the recursion,
// do so now. Note that we _know_ that no subsequent tail recursion
@@ -452,8 +482,9 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
if (AccumulatorRecursionEliminationInitVal) {
Instruction *AccRecInstr = AccumulatorRecursionInstr;
// Start by inserting a new PHI node for the accumulator.
- PHINode *AccPN = PHINode::Create(AccRecInstr->getType(), "accumulator.tr",
- OldEntry->begin());
+ PHINode *AccPN =
+ PHINode::Create(AccumulatorRecursionEliminationInitVal->getType(),
+ "accumulator.tr", OldEntry->begin());
// Loop over all of the predecessors of the tail recursion block. For the
// real entry into the function we seed the PHI with the initial value,
@@ -463,20 +494,27 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
// it will not show up as a predecessor.
for (pred_iterator PI = pred_begin(OldEntry), PE = pred_end(OldEntry);
PI != PE; ++PI) {
- if (*PI == &F->getEntryBlock())
- AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, *PI);
+ BasicBlock *P = *PI;
+ if (P == &F->getEntryBlock())
+ AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, P);
else
- AccPN->addIncoming(AccPN, *PI);
+ AccPN->addIncoming(AccPN, P);
}
- // Add an incoming argument for the current block, which is computed by our
- // associative accumulator instruction.
- AccPN->addIncoming(AccRecInstr, BB);
-
- // Next, rewrite the accumulator recursion instruction so that it does not
- // use the result of the call anymore, instead, use the PHI node we just
- // inserted.
- AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
+ if (AccRecInstr) {
+ // Add an incoming argument for the current block, which is computed by
+ // our associative and commutative accumulator instruction.
+ AccPN->addIncoming(AccRecInstr, BB);
+
+ // Next, rewrite the accumulator recursion instruction so that it does not
+ // use the result of the call anymore, instead, use the PHI node we just
+ // inserted.
+ AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
+ } else {
+ // Add an incoming argument for the current block, which is just the
+ // constant returned by the current return instruction.
+ AccPN->addIncoming(Ret->getReturnValue(), BB);
+ }
// Finally, rewrite any return instructions in the program to return the PHI
// node instead of the "initval" that they do currently. This loop will
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
index be6b383..4d64c85 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
@@ -381,29 +381,28 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
const TargetLowering &TLI) {
std::vector<InlineAsm::ConstraintInfo>
Constraints = IA->ParseConstraints();
-
- unsigned ArgNo = 1; // ArgNo - The operand of the CallInst.
+
+ unsigned ArgNo = 0; // The argument of the CallInst.
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
-
+
// Compute the value type for each operand.
switch (OpInfo.Type) {
case InlineAsm::isOutput:
if (OpInfo.isIndirect)
- OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
+ OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
break;
case InlineAsm::isInput:
- OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
+ OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
break;
case InlineAsm::isClobber:
// Nothing to do.
break;
}
-
+
// Compute the constraint code and ConstraintType to use.
- TLI.ComputeConstraintToUse(OpInfo, SDValue(),
- OpInfo.ConstraintType == TargetLowering::C_Memory);
-
+ TLI.ComputeConstraintToUse(OpInfo, SDValue());
+
// If this asm operand is our Value*, and if it isn't an indirect memory
// operand, we can't fold it!
if (OpInfo.CallOperandVal == OpVal &&
@@ -411,7 +410,7 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
!OpInfo.isIndirect))
return false;
}
-
+
return true;
}
@@ -434,20 +433,23 @@ static bool FindAllMemoryUses(Instruction *I,
// Loop over all the uses, recursively processing them.
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
UI != E; ++UI) {
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ User *U = *UI;
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
continue;
}
- if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
- if (UI.getOperandNo() == 0) return true; // Storing addr, not into addr.
- MemoryUses.push_back(std::make_pair(SI, UI.getOperandNo()));
+ if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
+ unsigned opNo = UI.getOperandNo();
+ if (opNo == 0) return true; // Storing addr, not into addr.
+ MemoryUses.push_back(std::make_pair(SI, opNo));
continue;
}
- if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
+ if (CallInst *CI = dyn_cast<CallInst>(U)) {
InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
- if (IA == 0) return true;
+ if (!IA) return true;
// If this is a memory operand, we're cool, otherwise bail out.
if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
@@ -455,7 +457,7 @@ static bool FindAllMemoryUses(Instruction *I,
continue;
}
- if (FindAllMemoryUses(cast<Instruction>(*UI), MemoryUses, ConsideredInsts,
+ if (FindAllMemoryUses(cast<Instruction>(U), MemoryUses, ConsideredInsts,
TLI))
return true;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 1f62dab..093083a 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -97,23 +97,13 @@ bool llvm::DeleteDeadPHIs(BasicBlock *BB) {
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure.
bool llvm::MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P) {
- pred_iterator PI(pred_begin(BB)), PE(pred_end(BB));
- // Can't merge the entry block. Don't merge away blocks who have their
- // address taken: this is a bug if the predecessor block is the entry node
- // (because we'd end up taking the address of the entry) and undesirable in
- // any case.
- if (pred_begin(BB) == pred_end(BB) ||
- BB->hasAddressTaken()) return false;
+ // Don't merge away blocks who have their address taken.
+ if (BB->hasAddressTaken()) return false;
- BasicBlock *PredBB = *PI++;
- for (; PI != PE; ++PI) // Search all predecessors, see if they are all same
- if (*PI != PredBB) {
- PredBB = 0; // There are multiple different predecessors...
- break;
- }
-
- // Can't merge if there are multiple predecessors.
+ // Can't merge if there are multiple predecessors, or no predecessors.
+ BasicBlock *PredBB = BB->getUniquePredecessor();
if (!PredBB) return false;
+
// Don't break self-loops.
if (PredBB == BB) return false;
// Don't break invokes.
@@ -267,7 +257,7 @@ void llvm::RemoveSuccessor(TerminatorInst *TI, unsigned SuccNum) {
case Instruction::Switch: // Should remove entry
default:
case Instruction::Ret: // Cannot happen, has no successors!
- llvm_unreachable("Unhandled terminator instruction type in RemoveSuccessor!");
+ llvm_unreachable("Unhandled terminator inst type in RemoveSuccessor!");
}
if (NewTI) // If it's a different instruction, replace.
@@ -336,21 +326,19 @@ BasicBlock *llvm::SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P) {
if (Loop *L = LI->getLoopFor(Old))
L->addBasicBlockToLoop(New, LI->getBase());
- if (DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>())
- {
- // Old dominates New. New node domiantes all other nodes dominated by Old.
- DomTreeNode *OldNode = DT->getNode(Old);
- std::vector<DomTreeNode *> Children;
- for (DomTreeNode::iterator I = OldNode->begin(), E = OldNode->end();
- I != E; ++I)
- Children.push_back(*I);
-
- DomTreeNode *NewNode = DT->addNewBlock(New,Old);
+ if (DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>()) {
+ // Old dominates New. New node domiantes all other nodes dominated by Old.
+ DomTreeNode *OldNode = DT->getNode(Old);
+ std::vector<DomTreeNode *> Children;
+ for (DomTreeNode::iterator I = OldNode->begin(), E = OldNode->end();
+ I != E; ++I)
+ Children.push_back(*I);
+ DomTreeNode *NewNode = DT->addNewBlock(New,Old);
for (std::vector<DomTreeNode *>::iterator I = Children.begin(),
E = Children.end(); I != E; ++I)
DT->changeImmediateDominator(*I, NewNode);
- }
+ }
if (DominanceFrontier *DF = P->getAnalysisIfAvailable<DominanceFrontier>())
DF->splitBlock(Old);
@@ -423,7 +411,8 @@ BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
DominatorTree *DT = P ? P->getAnalysisIfAvailable<DominatorTree>() : 0;
if (DT)
DT->splitBlock(NewBB);
- if (DominanceFrontier *DF = P ? P->getAnalysisIfAvailable<DominanceFrontier>():0)
+ if (DominanceFrontier *DF =
+ P ? P->getAnalysisIfAvailable<DominanceFrontier>() : 0)
DF->splitBlock(NewBB);
// Insert a new PHI node into NewBB for every PHI node in BB and that new PHI
@@ -560,121 +549,3 @@ void llvm::FindFunctionBackedges(const Function &F,
}
-
-
-
-/// AreEquivalentAddressValues - Test if A and B will obviously have the same
-/// value. This includes recognizing that %t0 and %t1 will have the same
-/// value in code like this:
-/// %t0 = getelementptr \@a, 0, 3
-/// store i32 0, i32* %t0
-/// %t1 = getelementptr \@a, 0, 3
-/// %t2 = load i32* %t1
-///
-static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
- // Test if the values are trivially equivalent.
- if (A == B) return true;
-
- // Test if the values come from identical arithmetic instructions.
- // Use isIdenticalToWhenDefined instead of isIdenticalTo because
- // this function is only used when one address use dominates the
- // other, which means that they'll always either have the same
- // value or one of them will have an undefined value.
- if (isa<BinaryOperator>(A) || isa<CastInst>(A) ||
- isa<PHINode>(A) || isa<GetElementPtrInst>(A))
- if (const Instruction *BI = dyn_cast<Instruction>(B))
- if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
- return true;
-
- // Otherwise they may not be equivalent.
- return false;
-}
-
-/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at the
-/// instruction before ScanFrom) checking to see if we have the value at the
-/// memory address *Ptr locally available within a small number of instructions.
-/// If the value is available, return it.
-///
-/// If not, return the iterator for the last validated instruction that the
-/// value would be live through. If we scanned the entire block and didn't find
-/// something that invalidates *Ptr or provides it, ScanFrom would be left at
-/// begin() and this returns null. ScanFrom could also be left
-///
-/// MaxInstsToScan specifies the maximum instructions to scan in the block. If
-/// it is set to 0, it will scan the whole block. You can also optionally
-/// specify an alias analysis implementation, which makes this more precise.
-Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
- BasicBlock::iterator &ScanFrom,
- unsigned MaxInstsToScan,
- AliasAnalysis *AA) {
- if (MaxInstsToScan == 0) MaxInstsToScan = ~0U;
-
- // If we're using alias analysis to disambiguate get the size of *Ptr.
- unsigned AccessSize = 0;
- if (AA) {
- const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
- AccessSize = AA->getTypeStoreSize(AccessTy);
- }
-
- while (ScanFrom != ScanBB->begin()) {
- // We must ignore debug info directives when counting (otherwise they
- // would affect codegen).
- Instruction *Inst = --ScanFrom;
- if (isa<DbgInfoIntrinsic>(Inst))
- continue;
-
- // Restore ScanFrom to expected value in case next test succeeds
- ScanFrom++;
-
- // Don't scan huge blocks.
- if (MaxInstsToScan-- == 0) return 0;
-
- --ScanFrom;
- // If this is a load of Ptr, the loaded value is available.
- if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
- if (AreEquivalentAddressValues(LI->getOperand(0), Ptr))
- return LI;
-
- if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- // If this is a store through Ptr, the value is available!
- if (AreEquivalentAddressValues(SI->getOperand(1), Ptr))
- return SI->getOperand(0);
-
- // If Ptr is an alloca and this is a store to a different alloca, ignore
- // the store. This is a trivial form of alias analysis that is important
- // for reg2mem'd code.
- if ((isa<AllocaInst>(Ptr) || isa<GlobalVariable>(Ptr)) &&
- (isa<AllocaInst>(SI->getOperand(1)) ||
- isa<GlobalVariable>(SI->getOperand(1))))
- continue;
-
- // If we have alias analysis and it says the store won't modify the loaded
- // value, ignore the store.
- if (AA &&
- (AA->getModRefInfo(SI, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
- continue;
-
- // Otherwise the store that may or may not alias the pointer, bail out.
- ++ScanFrom;
- return 0;
- }
-
- // If this is some other instruction that may clobber Ptr, bail out.
- if (Inst->mayWriteToMemory()) {
- // If alias analysis claims that it really won't modify the load,
- // ignore it.
- if (AA &&
- (AA->getModRefInfo(Inst, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
- continue;
-
- // May modify the pointer, bail out.
- ++ScanFrom;
- return 0;
- }
- }
-
- // Got to the start of the block, we didn't find it, but are done for this
- // block.
- return 0;
-}
-
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/BasicInliner.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/BasicInliner.cpp
index c580b8f..23a30cc 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/BasicInliner.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/BasicInliner.cpp
@@ -82,8 +82,8 @@ void BasicInlinerImpl::inlineFunctions() {
Function *F = *FI;
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
- CallSite CS = CallSite::get(I);
- if (CS.getInstruction() && CS.getCalledFunction()
+ CallSite CS(cast<Value>(I));
+ if (CS && CS.getCalledFunction()
&& !CS.getCalledFunction()->isDeclaration())
CallSites.push_back(CS);
}
@@ -129,7 +129,8 @@ void BasicInlinerImpl::inlineFunctions() {
}
// Inline
- if (InlineFunction(CS, NULL, TD)) {
+ InlineFunctionInfo IFI(0, TD);
+ if (InlineFunction(CS, IFI)) {
if (Callee->use_empty() && (Callee->hasLocalLinkage() ||
Callee->hasAvailableExternallyLinkage()))
DeadFunctions.insert(Callee);
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 3657390..f75ffe6 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -36,7 +36,7 @@ STATISTIC(NumBroken, "Number of blocks inserted");
namespace {
struct BreakCriticalEdges : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- BreakCriticalEdges() : FunctionPass(&ID) {}
+ BreakCriticalEdges() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F);
@@ -53,11 +53,11 @@ namespace {
}
char BreakCriticalEdges::ID = 0;
-static RegisterPass<BreakCriticalEdges>
-X("break-crit-edges", "Break critical edges in CFG");
+INITIALIZE_PASS(BreakCriticalEdges, "break-crit-edges",
+ "Break critical edges in CFG", false, false);
// Publically exposed interface to pass...
-const PassInfo *const llvm::BreakCriticalEdgesID = &X;
+char &llvm::BreakCriticalEdgesID = BreakCriticalEdges::ID;
FunctionPass *llvm::createBreakCriticalEdgesPass() {
return new BreakCriticalEdges();
}
@@ -94,7 +94,7 @@ bool llvm::isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
if (TI->getNumSuccessors() == 1) return false;
const BasicBlock *Dest = TI->getSuccessor(SuccNum);
- pred_const_iterator I = pred_begin(Dest), E = pred_end(Dest);
+ const_pred_iterator I = pred_begin(Dest), E = pred_end(Dest);
// If there is more than one predecessor, this is a critical edge...
assert(I != E && "No preds, but we have an edge to the block?");
@@ -106,11 +106,12 @@ bool llvm::isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
// If AllowIdenticalEdges is true, then we allow this edge to be considered
// non-critical iff all preds come from TI's block.
while (I != E) {
- if (*I != FirstPred)
+ const BasicBlock *P = *I;
+ if (P != FirstPred)
return true;
// Note: leave this as is until no one ever compiles with either gcc 4.0.1
// or Xcode 2. This seems to work around the pred_iterator assert in PR 2207
- E = pred_end(*I);
+ E = pred_end(P);
++I;
}
return false;
@@ -224,7 +225,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
for (Value::use_iterator UI = TIBB->use_begin(), E = TIBB->use_end();
UI != E; ) {
Value::use_iterator Use = UI++;
- if (PHINode *PN = dyn_cast<PHINode>(Use)) {
+ if (PHINode *PN = dyn_cast<PHINode>(*Use)) {
// Remove one entry from each PHI.
if (PN->getParent() == DestBB && UpdatedPHIs.insert(PN))
PN->setOperand(Use.getOperandNo(), NewBB);
@@ -277,11 +278,13 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
OtherPreds.push_back(PN->getIncomingBlock(i));
} else {
for (pred_iterator I = pred_begin(DestBB), E = pred_end(DestBB);
- I != E; ++I)
- if (*I != NewBB)
- OtherPreds.push_back(*I);
+ I != E; ++I) {
+ BasicBlock *P = *I;
+ if (P != NewBB)
+ OtherPreds.push_back(P);
+ }
}
-
+
bool NewBBDominatesDestBB = true;
// Should we update DominatorTree information?
@@ -400,11 +403,13 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
bool HasPredOutsideOfLoop = false;
BasicBlock *Exit = ExitBlocks[i];
for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit);
- I != E; ++I)
- if (TIL->contains(*I))
- Preds.push_back(*I);
+ I != E; ++I) {
+ BasicBlock *P = *I;
+ if (TIL->contains(P))
+ Preds.push_back(P);
else
HasPredOutsideOfLoop = true;
+ }
// If there are any preds not in the loop, we'll need to split
// the edges. The Preds.empty() check is needed because a block
// may appear multiple times in the list. We can't use
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index 2ea4bb6..c313949 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -69,49 +69,121 @@ Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
return CI;
}
+/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
+ IRBuilder<> &B, const TargetData *TD) {
+ Module *M = B.GetInsertBlock()->getParent()->getParent();
+ AttributeWithIndex AWI[3];
+ AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
+ AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture);
+ AWI[2] = AttributeWithIndex::get(~0u, Attribute::ReadOnly |
+ Attribute::NoUnwind);
+
+ LLVMContext &Context = B.GetInsertBlock()->getContext();
+ Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI, 3),
+ B.getInt32Ty(),
+ B.getInt8PtrTy(),
+ B.getInt8PtrTy(),
+ TD->getIntPtrType(Context), NULL);
+ CallInst *CI = B.CreateCall3(StrNCmp, CastToCStr(Ptr1, B),
+ CastToCStr(Ptr2, B), Len, "strncmp");
+
+ if (const Function *F = dyn_cast<Function>(StrNCmp->stripPointerCasts()))
+ CI->setCallingConv(F->getCallingConv());
+
+ return CI;
+}
+
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
- const TargetData *TD) {
+ const TargetData *TD, StringRef Name) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeWithIndex AWI[2];
AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
const Type *I8Ptr = B.getInt8PtrTy();
- Value *StrCpy = M->getOrInsertFunction("strcpy", AttrListPtr::get(AWI, 2),
+ Value *StrCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI, 2),
I8Ptr, I8Ptr, I8Ptr, NULL);
CallInst *CI = B.CreateCall2(StrCpy, CastToCStr(Dst, B), CastToCStr(Src, B),
- "strcpy");
+ Name);
if (const Function *F = dyn_cast<Function>(StrCpy->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
+/// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
+/// specified pointer arguments.
+Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
+ IRBuilder<> &B, const TargetData *TD, StringRef Name) {
+ Module *M = B.GetInsertBlock()->getParent()->getParent();
+ AttributeWithIndex AWI[2];
+ AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture);
+ AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ const Type *I8Ptr = B.getInt8PtrTy();
+ Value *StrNCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI, 2),
+ I8Ptr, I8Ptr, I8Ptr,
+ Len->getType(), NULL);
+ CallInst *CI = B.CreateCall3(StrNCpy, CastToCStr(Dst, B), CastToCStr(Src, B),
+ Len, "strncpy");
+ if (const Function *F = dyn_cast<Function>(StrNCpy->stripPointerCasts()))
+ CI->setCallingConv(F->getCallingConv());
+ return CI;
+}
+
+
/// EmitMemCpy - Emit a call to the memcpy function to the builder. This always
-/// expects that the size has type 'intptr_t' and Dst/Src are pointers.
-Value *llvm::EmitMemCpy(Value *Dst, Value *Src, Value *Len,
- unsigned Align, IRBuilder<> &B, const TargetData *TD) {
+/// expects that Len has type 'intptr_t' and Dst/Src are pointers.
+Value *llvm::EmitMemCpy(Value *Dst, Value *Src, Value *Len, unsigned Align,
+ bool isVolatile, IRBuilder<> &B, const TargetData *TD) {
+ Module *M = B.GetInsertBlock()->getParent()->getParent();
+ Dst = CastToCStr(Dst, B);
+ Src = CastToCStr(Src, B);
+ const Type *ArgTys[3] = { Dst->getType(), Src->getType(), Len->getType() };
+ Value *MemCpy = Intrinsic::getDeclaration(M, Intrinsic::memcpy, ArgTys, 3);
+ return B.CreateCall5(MemCpy, Dst, Src, Len,
+ ConstantInt::get(B.getInt32Ty(), Align),
+ ConstantInt::get(B.getInt1Ty(), isVolatile));
+}
+
+/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
+/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
+/// are pointers.
+Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
+ IRBuilder<> &B, const TargetData *TD) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
- const Type *Ty = Len->getType();
- Value *MemCpy = Intrinsic::getDeclaration(M, Intrinsic::memcpy, &Ty, 1);
+ AttributeWithIndex AWI;
+ AWI = AttributeWithIndex::get(~0u, Attribute::NoUnwind);
+ LLVMContext &Context = B.GetInsertBlock()->getContext();
+ Value *MemCpy = M->getOrInsertFunction("__memcpy_chk",
+ AttrListPtr::get(&AWI, 1),
+ B.getInt8PtrTy(),
+ B.getInt8PtrTy(),
+ B.getInt8PtrTy(),
+ TD->getIntPtrType(Context),
+ TD->getIntPtrType(Context), NULL);
Dst = CastToCStr(Dst, B);
Src = CastToCStr(Src, B);
- return B.CreateCall4(MemCpy, Dst, Src, Len,
- ConstantInt::get(B.getInt32Ty(), Align));
+ CallInst *CI = B.CreateCall4(MemCpy, Dst, Src, Len, ObjSize);
+ if (const Function *F = dyn_cast<Function>(MemCpy->stripPointerCasts()))
+ CI->setCallingConv(F->getCallingConv());
+ return CI;
}
/// EmitMemMove - Emit a call to the memmove function to the builder. This
/// always expects that the size has type 'intptr_t' and Dst/Src are pointers.
-Value *llvm::EmitMemMove(Value *Dst, Value *Src, Value *Len,
- unsigned Align, IRBuilder<> &B, const TargetData *TD) {
+Value *llvm::EmitMemMove(Value *Dst, Value *Src, Value *Len, unsigned Align,
+ bool isVolatile, IRBuilder<> &B, const TargetData *TD) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
LLVMContext &Context = B.GetInsertBlock()->getContext();
- const Type *Ty = TD->getIntPtrType(Context);
- Value *MemMove = Intrinsic::getDeclaration(M, Intrinsic::memmove, &Ty, 1);
+ const Type *ArgTys[3] = { Dst->getType(), Src->getType(),
+ TD->getIntPtrType(Context) };
+ Value *MemMove = Intrinsic::getDeclaration(M, Intrinsic::memmove, ArgTys, 3);
Dst = CastToCStr(Dst, B);
Src = CastToCStr(Src, B);
Value *A = ConstantInt::get(B.getInt32Ty(), Align);
- return B.CreateCall4(MemMove, Dst, Src, Len, A);
+ Value *Vol = ConstantInt::get(B.getInt1Ty(), isVolatile);
+ return B.CreateCall5(MemMove, Dst, Src, Len, A, Vol);
}
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
@@ -162,15 +234,15 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
}
/// EmitMemSet - Emit a call to the memset function
-Value *llvm::EmitMemSet(Value *Dst, Value *Val,
- Value *Len, IRBuilder<> &B, const TargetData *TD) {
+Value *llvm::EmitMemSet(Value *Dst, Value *Val, Value *Len, bool isVolatile,
+ IRBuilder<> &B, const TargetData *TD) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
Intrinsic::ID IID = Intrinsic::memset;
- const Type *Tys[1];
- Tys[0] = Len->getType();
- Value *MemSet = Intrinsic::getDeclaration(M, IID, Tys, 1);
+ const Type *Tys[2] = { Dst->getType(), Len->getType() };
+ Value *MemSet = Intrinsic::getDeclaration(M, IID, Tys, 2);
Value *Align = ConstantInt::get(B.getInt32Ty(), 1);
- return B.CreateCall4(MemSet, CastToCStr(Dst, B), Val, Len, Align);
+ Value *Vol = ConstantInt::get(B.getInt1Ty(), isVolatile);
+ return B.CreateCall5(MemSet, CastToCStr(Dst, B), Val, Len, Align, Vol);
}
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name' (e.g.
@@ -322,3 +394,134 @@ void llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
}
+
+SimplifyFortifiedLibCalls::~SimplifyFortifiedLibCalls() { }
+
+bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
+ // We really need TargetData for later.
+ if (!TD) return false;
+
+ this->CI = CI;
+ Function *Callee = CI->getCalledFunction();
+ StringRef Name = Callee->getName();
+ const FunctionType *FT = Callee->getFunctionType();
+ BasicBlock *BB = CI->getParent();
+ LLVMContext &Context = CI->getParent()->getContext();
+ IRBuilder<> B(Context);
+
+ // Set the builder to the instruction after the call.
+ B.SetInsertPoint(BB, CI);
+
+ if (Name == "__memcpy_chk") {
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isPointerTy() ||
+ FT->getParamType(2) != TD->getIntPtrType(Context) ||
+ FT->getParamType(3) != TD->getIntPtrType(Context))
+ return false;
+
+ if (isFoldable(3, 2, false)) {
+ EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1, false, B, TD);
+ replaceCall(CI->getArgOperand(0));
+ return true;
+ }
+ return false;
+ }
+
+ // Should be similar to memcpy.
+ if (Name == "__mempcpy_chk") {
+ return false;
+ }
+
+ if (Name == "__memmove_chk") {
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isPointerTy() ||
+ FT->getParamType(2) != TD->getIntPtrType(Context) ||
+ FT->getParamType(3) != TD->getIntPtrType(Context))
+ return false;
+
+ if (isFoldable(3, 2, false)) {
+ EmitMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1, false, B, TD);
+ replaceCall(CI->getArgOperand(0));
+ return true;
+ }
+ return false;
+ }
+
+ if (Name == "__memset_chk") {
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
+ !FT->getParamType(0)->isPointerTy() ||
+ !FT->getParamType(1)->isIntegerTy() ||
+ FT->getParamType(2) != TD->getIntPtrType(Context) ||
+ FT->getParamType(3) != TD->getIntPtrType(Context))
+ return false;
+
+ if (isFoldable(3, 2, false)) {
+ Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(),
+ false);
+ EmitMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2),
+ false, B, TD);
+ replaceCall(CI->getArgOperand(0));
+ return true;
+ }
+ return false;
+ }
+
+ if (Name == "__strcpy_chk" || Name == "__stpcpy_chk") {
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 3 ||
+ FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
+ FT->getParamType(2) != TD->getIntPtrType(Context))
+ return 0;
+
+
+ // If a) we don't have any length information, or b) we know this will
+ // fit then just lower to a plain st[rp]cpy. Otherwise we'll keep our
+ // st[rp]cpy_chk call which may fail at runtime if the size is too long.
+ // TODO: It might be nice to get a maximum length out of the possible
+ // string lengths for varying.
+ if (isFoldable(2, 1, true)) {
+ Value *Ret = EmitStrCpy(CI->getArgOperand(0), CI->getArgOperand(1), B, TD,
+ Name.substr(2, 6));
+ replaceCall(Ret);
+ return true;
+ }
+ return false;
+ }
+
+ if (Name == "__strncpy_chk" || Name == "__stpncpy_chk") {
+ // Check if this has the right signature.
+ if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
+ FT->getParamType(0) != FT->getParamType(1) ||
+ FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
+ !FT->getParamType(2)->isIntegerTy() ||
+ FT->getParamType(3) != TD->getIntPtrType(Context))
+ return false;
+
+ if (isFoldable(3, 2, false)) {
+ Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), B, TD, Name.substr(2, 7));
+ replaceCall(Ret);
+ return true;
+ }
+ return false;
+ }
+
+ if (Name == "__strcat_chk") {
+ return false;
+ }
+
+ if (Name == "__strncat_chk") {
+ return false;
+ }
+
+ return false;
+}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/CMakeLists.txt b/libclamav/c++/llvm/lib/Transforms/Utils/CMakeLists.txt
index dec227a..61cbeb2 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/CMakeLists.txt
@@ -20,7 +20,6 @@ add_llvm_library(LLVMTransformUtils
Mem2Reg.cpp
PromoteMemoryToRegister.cpp
SSAUpdater.cpp
- SSI.cpp
SimplifyCFG.cpp
UnifyFunctionExitNodes.cpp
ValueMapper.cpp
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/CloneFunction.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/CloneFunction.cpp
index c80827d..f43186e 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -32,7 +32,7 @@ using namespace llvm;
// CloneBasicBlock - See comments in Cloning.h
BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
const Twine &NameSuffix, Function *F,
ClonedCodeInfo *CodeInfo) {
BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F);
@@ -47,7 +47,7 @@ BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
if (II->hasName())
NewInst->setName(II->getName()+NameSuffix);
NewBB->getInstList().push_back(NewInst);
- ValueMap[II] = NewInst; // Add instruction map to value.
+ VMap[II] = NewInst; // Add instruction map to value.
hasCalls |= (isa<CallInst>(II) && !isa<DbgInfoIntrinsic>(II));
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
@@ -69,10 +69,11 @@ BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
}
// Clone OldFunc into NewFunc, transforming the old arguments into references to
-// ArgMap values.
+// VMap values.
//
void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
+ bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix, ClonedCodeInfo *CodeInfo) {
assert(NameSuffix && "NameSuffix cannot be null!");
@@ -80,17 +81,17 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
#ifndef NDEBUG
for (Function::const_arg_iterator I = OldFunc->arg_begin(),
E = OldFunc->arg_end(); I != E; ++I)
- assert(ValueMap.count(I) && "No mapping from source argument specified!");
+ assert(VMap.count(I) && "No mapping from source argument specified!");
#endif
// Clone any attributes.
if (NewFunc->arg_size() == OldFunc->arg_size())
NewFunc->copyAttributesFrom(OldFunc);
else {
- //Some arguments were deleted with the ValueMap. Copy arguments one by one
+ //Some arguments were deleted with the VMap. Copy arguments one by one
for (Function::const_arg_iterator I = OldFunc->arg_begin(),
E = OldFunc->arg_end(); I != E; ++I)
- if (Argument* Anew = dyn_cast<Argument>(ValueMap[I]))
+ if (Argument* Anew = dyn_cast<Argument>(VMap[I]))
Anew->addAttr( OldFunc->getAttributes()
.getParamAttributes(I->getArgNo() + 1));
NewFunc->setAttributes(NewFunc->getAttributes()
@@ -111,43 +112,44 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
const BasicBlock &BB = *BI;
// Create a new basic block and copy instructions into it!
- BasicBlock *CBB = CloneBasicBlock(&BB, ValueMap, NameSuffix, NewFunc,
+ BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc,
CodeInfo);
- ValueMap[&BB] = CBB; // Add basic block mapping.
+ VMap[&BB] = CBB; // Add basic block mapping.
if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator()))
Returns.push_back(RI);
}
// Loop over all of the instructions in the function, fixing up operand
- // references as we go. This uses ValueMap to do all the hard work.
+ // references as we go. This uses VMap to do all the hard work.
//
- for (Function::iterator BB = cast<BasicBlock>(ValueMap[OldFunc->begin()]),
+ for (Function::iterator BB = cast<BasicBlock>(VMap[OldFunc->begin()]),
BE = NewFunc->end(); BB != BE; ++BB)
// Loop over all instructions, fixing each one as we find it...
for (BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II)
- RemapInstruction(II, ValueMap);
+ RemapInstruction(II, VMap, ModuleLevelChanges);
}
/// CloneFunction - Return a copy of the specified function, but without
/// embedding the function into another module. Also, any references specified
-/// in the ValueMap are changed to refer to their mapped value instead of the
-/// original one. If any of the arguments to the function are in the ValueMap,
-/// the arguments are deleted from the resultant function. The ValueMap is
+/// in the VMap are changed to refer to their mapped value instead of the
+/// original one. If any of the arguments to the function are in the VMap,
+/// the arguments are deleted from the resultant function. The VMap is
/// updated to include mappings from all of the instructions and basicblocks in
/// the function from their old to new values.
///
Function *llvm::CloneFunction(const Function *F,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
+ bool ModuleLevelChanges,
ClonedCodeInfo *CodeInfo) {
std::vector<const Type*> ArgTypes;
// The user might be deleting arguments to the function by specifying them in
- // the ValueMap. If so, we need to not add the arguments to the arg ty vector
+ // the VMap. If so, we need to not add the arguments to the arg ty vector
//
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I)
- if (ValueMap.count(I) == 0) // Haven't mapped the argument to anything yet?
+ if (VMap.count(I) == 0) // Haven't mapped the argument to anything yet?
ArgTypes.push_back(I->getType());
// Create a new function type...
@@ -161,13 +163,13 @@ Function *llvm::CloneFunction(const Function *F,
Function::arg_iterator DestI = NewF->arg_begin();
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I)
- if (ValueMap.count(I) == 0) { // Is this argument preserved?
+ if (VMap.count(I) == 0) { // Is this argument preserved?
DestI->setName(I->getName()); // Copy the name over...
- ValueMap[I] = DestI++; // Add mapping to ValueMap
+ VMap[I] = DestI++; // Add mapping to VMap
}
SmallVector<ReturnInst*, 8> Returns; // Ignore returns cloned.
- CloneFunctionInto(NewF, F, ValueMap, Returns, "", CodeInfo);
+ CloneFunctionInto(NewF, F, VMap, ModuleLevelChanges, Returns, "", CodeInfo);
return NewF;
}
@@ -179,20 +181,23 @@ namespace {
struct PruningFunctionCloner {
Function *NewFunc;
const Function *OldFunc;
- DenseMap<const Value*, Value*> &ValueMap;
+ ValueToValueMapTy &VMap;
+ bool ModuleLevelChanges;
SmallVectorImpl<ReturnInst*> &Returns;
const char *NameSuffix;
ClonedCodeInfo *CodeInfo;
const TargetData *TD;
public:
PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
- DenseMap<const Value*, Value*> &valueMap,
+ ValueToValueMapTy &valueMap,
+ bool moduleLevelChanges,
SmallVectorImpl<ReturnInst*> &returns,
const char *nameSuffix,
ClonedCodeInfo *codeInfo,
const TargetData *td)
- : NewFunc(newFunc), OldFunc(oldFunc), ValueMap(valueMap), Returns(returns),
- NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
+ : NewFunc(newFunc), OldFunc(oldFunc),
+ VMap(valueMap), ModuleLevelChanges(moduleLevelChanges),
+ Returns(returns), NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
}
/// CloneBlock - The specified block is found to be reachable, clone it and
@@ -202,7 +207,7 @@ namespace {
public:
/// ConstantFoldMappedInstruction - Constant fold the specified instruction,
- /// mapping its operands through ValueMap if they are available.
+ /// mapping its operands through VMap if they are available.
Constant *ConstantFoldMappedInstruction(const Instruction *I);
};
}
@@ -211,7 +216,7 @@ namespace {
/// anything that it can reach.
void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
std::vector<const BasicBlock*> &ToClone){
- Value *&BBEntry = ValueMap[BB];
+ Value *&BBEntry = VMap[BB];
// Have we already cloned this block?
if (BBEntry) return;
@@ -230,7 +235,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If this instruction constant folds, don't bother cloning the instruction,
// instead, just add the constant to the value map.
if (Constant *C = ConstantFoldMappedInstruction(II)) {
- ValueMap[II] = C;
+ VMap[II] = C;
continue;
}
@@ -238,7 +243,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
if (II->hasName())
NewInst->setName(II->getName()+NameSuffix);
NewBB->getInstList().push_back(NewInst);
- ValueMap[II] = NewInst; // Add instruction map to value.
+ VMap[II] = NewInst; // Add instruction map to value.
hasCalls |= (isa<CallInst>(II) && !isa<DbgInfoIntrinsic>(II));
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
@@ -258,12 +263,12 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
// Or is a known constant in the caller...
if (Cond == 0)
- Cond = dyn_cast_or_null<ConstantInt>(ValueMap[BI->getCondition()]);
+ Cond = dyn_cast_or_null<ConstantInt>(VMap[BI->getCondition()]);
// Constant fold to uncond branch!
if (Cond) {
BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue());
- ValueMap[OldTI] = BranchInst::Create(Dest, NewBB);
+ VMap[OldTI] = BranchInst::Create(Dest, NewBB);
ToClone.push_back(Dest);
TerminatorDone = true;
}
@@ -272,10 +277,10 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If switching on a value known constant in the caller.
ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition());
if (Cond == 0) // Or known constant after constant prop in the callee...
- Cond = dyn_cast_or_null<ConstantInt>(ValueMap[SI->getCondition()]);
+ Cond = dyn_cast_or_null<ConstantInt>(VMap[SI->getCondition()]);
if (Cond) { // Constant fold to uncond branch!
BasicBlock *Dest = SI->getSuccessor(SI->findCaseValue(Cond));
- ValueMap[OldTI] = BranchInst::Create(Dest, NewBB);
+ VMap[OldTI] = BranchInst::Create(Dest, NewBB);
ToClone.push_back(Dest);
TerminatorDone = true;
}
@@ -286,7 +291,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
if (OldTI->hasName())
NewInst->setName(OldTI->getName()+NameSuffix);
NewBB->getInstList().push_back(NewInst);
- ValueMap[OldTI] = NewInst; // Add instruction map to value.
+ VMap[OldTI] = NewInst; // Add instruction map to value.
// Recursively clone any reachable successor blocks.
const TerminatorInst *TI = BB->getTerminator();
@@ -307,13 +312,13 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
}
/// ConstantFoldMappedInstruction - Constant fold the specified instruction,
-/// mapping its operands through ValueMap if they are available.
+/// mapping its operands through VMap if they are available.
Constant *PruningFunctionCloner::
ConstantFoldMappedInstruction(const Instruction *I) {
SmallVector<Constant*, 8> Ops;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (Constant *Op = dyn_cast_or_null<Constant>(MapValue(I->getOperand(i),
- ValueMap)))
+ VMap, ModuleLevelChanges)))
Ops.push_back(Op);
else
return 0; // All operands not constant!
@@ -334,25 +339,16 @@ ConstantFoldMappedInstruction(const Instruction *I) {
Ops.size(), TD);
}
-static MDNode *UpdateInlinedAtInfo(MDNode *InsnMD, MDNode *TheCallMD) {
- DILocation ILoc(InsnMD);
- if (ILoc.isNull()) return InsnMD;
+static DebugLoc
+UpdateInlinedAtInfo(const DebugLoc &InsnDL, const DebugLoc &TheCallDL,
+ LLVMContext &Ctx) {
+ DebugLoc NewLoc = TheCallDL;
+ if (MDNode *IA = InsnDL.getInlinedAt(Ctx))
+ NewLoc = UpdateInlinedAtInfo(DebugLoc::getFromDILocation(IA), TheCallDL,
+ Ctx);
- DILocation CallLoc(TheCallMD);
- if (CallLoc.isNull()) return InsnMD;
-
- DILocation OrigLocation = ILoc.getOrigLocation();
- MDNode *NewLoc = TheCallMD;
- if (!OrigLocation.isNull())
- NewLoc = UpdateInlinedAtInfo(OrigLocation.getNode(), TheCallMD);
-
- Value *MDVs[] = {
- InsnMD->getOperand(0), // Line
- InsnMD->getOperand(1), // Col
- InsnMD->getOperand(2), // Scope
- NewLoc
- };
- return MDNode::get(InsnMD->getContext(), MDVs, 4);
+ return DebugLoc::get(InsnDL.getLine(), InsnDL.getCol(),
+ InsnDL.getScope(Ctx), NewLoc.getAsMDNode(Ctx));
}
/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
@@ -363,7 +359,8 @@ static MDNode *UpdateInlinedAtInfo(MDNode *InsnMD, MDNode *TheCallMD) {
/// dead. Since this doesn't produce an exact copy of the input, it can't be
/// used for things like CloneFunction or CloneModule.
void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
+ bool ModuleLevelChanges,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix,
ClonedCodeInfo *CodeInfo,
@@ -374,11 +371,11 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
#ifndef NDEBUG
for (Function::const_arg_iterator II = OldFunc->arg_begin(),
E = OldFunc->arg_end(); II != E; ++II)
- assert(ValueMap.count(II) && "No mapping from source argument specified!");
+ assert(VMap.count(II) && "No mapping from source argument specified!");
#endif
- PruningFunctionCloner PFC(NewFunc, OldFunc, ValueMap, Returns,
- NameSuffix, CodeInfo, TD);
+ PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges,
+ Returns, NameSuffix, CodeInfo, TD);
// Clone the entry block, and anything recursively reachable from it.
std::vector<const BasicBlock*> CloneWorklist;
@@ -397,21 +394,20 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVector<const PHINode*, 16> PHIToResolve;
for (Function::const_iterator BI = OldFunc->begin(), BE = OldFunc->end();
BI != BE; ++BI) {
- BasicBlock *NewBB = cast_or_null<BasicBlock>(ValueMap[BI]);
+ BasicBlock *NewBB = cast_or_null<BasicBlock>(VMap[BI]);
if (NewBB == 0) continue; // Dead block.
// Add the new block to the new function.
NewFunc->getBasicBlockList().push_back(NewBB);
// Loop over all of the instructions in the block, fixing up operand
- // references as we go. This uses ValueMap to do all the hard work.
+ // references as we go. This uses VMap to do all the hard work.
//
BasicBlock::iterator I = NewBB->begin();
- unsigned DbgKind = OldFunc->getContext().getMDKindID("dbg");
- MDNode *TheCallMD = NULL;
- if (TheCall && TheCall->hasMetadata())
- TheCallMD = TheCall->getMetadata(DbgKind);
+ DebugLoc TheCallDL;
+ if (TheCall)
+ TheCallDL = TheCall->getDebugLoc();
// Handle PHI nodes specially, as we have to remove references to dead
// blocks.
@@ -420,15 +416,17 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
BasicBlock::const_iterator OldI = BI->begin();
for (; (PN = dyn_cast<PHINode>(I)); ++I, ++OldI) {
if (I->hasMetadata()) {
- if (TheCallMD) {
- if (MDNode *IMD = I->getMetadata(DbgKind)) {
- MDNode *NewMD = UpdateInlinedAtInfo(IMD, TheCallMD);
- I->setMetadata(DbgKind, NewMD);
+ if (!TheCallDL.isUnknown()) {
+ DebugLoc IDL = I->getDebugLoc();
+ if (!IDL.isUnknown()) {
+ DebugLoc NewDL = UpdateInlinedAtInfo(IDL, TheCallDL,
+ I->getContext());
+ I->setDebugLoc(NewDL);
}
} else {
// The cloned instruction has dbg info but the call instruction
// does not have dbg info. Remove dbg info from cloned instruction.
- I->setMetadata(DbgKind, 0);
+ I->setDebugLoc(DebugLoc());
}
}
PHIToResolve.push_back(cast<PHINode>(OldI));
@@ -444,18 +442,20 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
// Otherwise, remap the rest of the instructions normally.
for (; I != NewBB->end(); ++I) {
if (I->hasMetadata()) {
- if (TheCallMD) {
- if (MDNode *IMD = I->getMetadata(DbgKind)) {
- MDNode *NewMD = UpdateInlinedAtInfo(IMD, TheCallMD);
- I->setMetadata(DbgKind, NewMD);
+ if (!TheCallDL.isUnknown()) {
+ DebugLoc IDL = I->getDebugLoc();
+ if (!IDL.isUnknown()) {
+ DebugLoc NewDL = UpdateInlinedAtInfo(IDL, TheCallDL,
+ I->getContext());
+ I->setDebugLoc(NewDL);
}
} else {
// The cloned instruction has dbg info but the call instruction
// does not have dbg info. Remove dbg info from cloned instruction.
- I->setMetadata(DbgKind, 0);
+ I->setDebugLoc(DebugLoc());
}
}
- RemapInstruction(I, ValueMap);
+ RemapInstruction(I, VMap, ModuleLevelChanges);
}
}
@@ -465,19 +465,19 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
const PHINode *OPN = PHIToResolve[phino];
unsigned NumPreds = OPN->getNumIncomingValues();
const BasicBlock *OldBB = OPN->getParent();
- BasicBlock *NewBB = cast<BasicBlock>(ValueMap[OldBB]);
+ BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]);
// Map operands for blocks that are live and remove operands for blocks
// that are dead.
for (; phino != PHIToResolve.size() &&
PHIToResolve[phino]->getParent() == OldBB; ++phino) {
OPN = PHIToResolve[phino];
- PHINode *PN = cast<PHINode>(ValueMap[OPN]);
+ PHINode *PN = cast<PHINode>(VMap[OPN]);
for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) {
if (BasicBlock *MappedBlock =
- cast_or_null<BasicBlock>(ValueMap[PN->getIncomingBlock(pred)])) {
+ cast_or_null<BasicBlock>(VMap[PN->getIncomingBlock(pred)])) {
Value *InVal = MapValue(PN->getIncomingValue(pred),
- ValueMap);
+ VMap, ModuleLevelChanges);
assert(InVal && "Unknown input value?");
PN->setIncomingValue(pred, InVal);
PN->setIncomingBlock(pred, MappedBlock);
@@ -531,15 +531,15 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
while ((PN = dyn_cast<PHINode>(I++))) {
Value *NV = UndefValue::get(PN->getType());
PN->replaceAllUsesWith(NV);
- assert(ValueMap[OldI] == PN && "ValueMap mismatch");
- ValueMap[OldI] = NV;
+ assert(VMap[OldI] == PN && "VMap mismatch");
+ VMap[OldI] = NV;
PN->eraseFromParent();
++OldI;
}
}
// NOTE: We cannot eliminate single entry phi nodes here, because of
- // ValueMap. Single entry phi nodes can have multiple ValueMap entries
- // pointing at them. Thus, deleting one would require scanning the ValueMap
+ // VMap. Single entry phi nodes can have multiple VMap entries
+ // pointing at them. Thus, deleting one would require scanning the VMap
// to update any entries in it that would require that. This would be
// really slow.
}
@@ -548,14 +548,14 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
// and zap unconditional fall-through branches. This happen all the time when
// specializing code: code specialization turns conditional branches into
// uncond branches, and this code folds them.
- Function::iterator I = cast<BasicBlock>(ValueMap[&OldFunc->getEntryBlock()]);
+ Function::iterator I = cast<BasicBlock>(VMap[&OldFunc->getEntryBlock()]);
while (I != NewFunc->end()) {
BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator());
if (!BI || BI->isConditional()) { ++I; continue; }
// Note that we can't eliminate uncond branches if the destination has
// single-entry PHI nodes. Eliminating the single-entry phi nodes would
- // require scanning the ValueMap to update any entries that point to the phi
+ // require scanning the VMap to update any entries that point to the phi
// node.
BasicBlock *Dest = BI->getSuccessor(0);
if (!Dest->getSinglePredecessor() || isa<PHINode>(Dest->begin())) {
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/CloneLoop.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/CloneLoop.cpp
index 38928dc..551b630 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/CloneLoop.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/CloneLoop.cpp
@@ -15,7 +15,6 @@
#include "llvm/BasicBlock.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/Dominators.h"
-#include "llvm/ADT/DenseMap.h"
using namespace llvm;
@@ -23,13 +22,13 @@ using namespace llvm;
/// CloneDominatorInfo - Clone basicblock's dominator tree and, if available,
/// dominance info. It is expected that basic block is already cloned.
static void CloneDominatorInfo(BasicBlock *BB,
- DenseMap<const Value *, Value *> &ValueMap,
+ ValueMap<const Value *, Value *> &VMap,
DominatorTree *DT,
DominanceFrontier *DF) {
assert (DT && "DominatorTree is not available");
- DenseMap<const Value *, Value*>::iterator BI = ValueMap.find(BB);
- assert (BI != ValueMap.end() && "BasicBlock clone is missing");
+ ValueMap<const Value *, Value*>::iterator BI = VMap.find(BB);
+ assert (BI != VMap.end() && "BasicBlock clone is missing");
BasicBlock *NewBB = cast<BasicBlock>(BI->second);
// NewBB already got dominator info.
@@ -43,11 +42,11 @@ static void CloneDominatorInfo(BasicBlock *BB,
// NewBB's dominator is either BB's dominator or BB's dominator's clone.
BasicBlock *NewBBDom = BBDom;
- DenseMap<const Value *, Value*>::iterator BBDomI = ValueMap.find(BBDom);
- if (BBDomI != ValueMap.end()) {
+ ValueMap<const Value *, Value*>::iterator BBDomI = VMap.find(BBDom);
+ if (BBDomI != VMap.end()) {
NewBBDom = cast<BasicBlock>(BBDomI->second);
if (!DT->getNode(NewBBDom))
- CloneDominatorInfo(BBDom, ValueMap, DT, DF);
+ CloneDominatorInfo(BBDom, VMap, DT, DF);
}
DT->addNewBlock(NewBB, NewBBDom);
@@ -60,8 +59,8 @@ static void CloneDominatorInfo(BasicBlock *BB,
for (DominanceFrontier::DomSetType::iterator I = S.begin(), E = S.end();
I != E; ++I) {
BasicBlock *DB = *I;
- DenseMap<const Value*, Value*>::iterator IDM = ValueMap.find(DB);
- if (IDM != ValueMap.end())
+ ValueMap<const Value*, Value*>::iterator IDM = VMap.find(DB);
+ if (IDM != VMap.end())
NewDFSet.insert(cast<BasicBlock>(IDM->second));
else
NewDFSet.insert(DB);
@@ -71,10 +70,10 @@ static void CloneDominatorInfo(BasicBlock *BB,
}
}
-/// CloneLoop - Clone Loop. Clone dominator info. Populate ValueMap
+/// CloneLoop - Clone Loop. Clone dominator info. Populate VMap
/// using old blocks to new blocks mapping.
Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
- DenseMap<const Value *, Value *> &ValueMap, Pass *P) {
+ ValueMap<const Value *, Value *> &VMap, Pass *P) {
DominatorTree *DT = NULL;
DominanceFrontier *DF = NULL;
@@ -104,8 +103,8 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BasicBlock *BB = *I;
- BasicBlock *NewBB = CloneBasicBlock(BB, ValueMap, ".clone");
- ValueMap[BB] = NewBB;
+ BasicBlock *NewBB = CloneBasicBlock(BB, VMap, ".clone");
+ VMap[BB] = NewBB;
if (P)
LPM->cloneBasicBlockSimpleAnalysis(BB, NewBB, L);
NewLoop->addBasicBlockToLoop(NewBB, LI->getBase());
@@ -117,7 +116,7 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BasicBlock *BB = *I;
- CloneDominatorInfo(BB, ValueMap, DT, DF);
+ CloneDominatorInfo(BB, VMap, DT, DF);
}
// Process sub loops
@@ -125,7 +124,7 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
LoopNest.push_back(*I);
} while (!LoopNest.empty());
- // Remap instructions to reference operands from ValueMap.
+ // Remap instructions to reference operands from VMap.
for(SmallVector<BasicBlock *, 16>::iterator NBItr = NewBlocks.begin(),
NBE = NewBlocks.end(); NBItr != NBE; ++NBItr) {
BasicBlock *NB = *NBItr;
@@ -135,8 +134,8 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (unsigned index = 0, num_ops = Insn->getNumOperands();
index != num_ops; ++index) {
Value *Op = Insn->getOperand(index);
- DenseMap<const Value *, Value *>::iterator OpItr = ValueMap.find(Op);
- if (OpItr != ValueMap.end())
+ ValueMap<const Value *, Value *>::iterator OpItr = VMap.find(Op);
+ if (OpItr != VMap.end())
Insn->setOperand(index, OpItr->second);
}
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/CloneModule.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/CloneModule.cpp
index a163f89..b347bf5 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/CloneModule.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/CloneModule.cpp
@@ -28,12 +28,12 @@ using namespace llvm;
Module *llvm::CloneModule(const Module *M) {
// Create the value map that maps things from the old module over to the new
// module.
- DenseMap<const Value*, Value*> ValueMap;
- return CloneModule(M, ValueMap);
+ ValueToValueMapTy VMap;
+ return CloneModule(M, VMap);
}
Module *llvm::CloneModule(const Module *M,
- DenseMap<const Value*, Value*> &ValueMap) {
+ ValueToValueMapTy &VMap) {
// First off, we need to create the new module...
Module *New = new Module(M->getModuleIdentifier(), M->getContext());
New->setDataLayout(M->getDataLayout());
@@ -51,7 +51,7 @@ Module *llvm::CloneModule(const Module *M,
New->addLibrary(*I);
// Loop over all of the global variables, making corresponding globals in the
- // new module. Here we add them to the ValueMap and to the new Module. We
+ // new module. Here we add them to the VMap and to the new Module. We
// don't worry about attributes or initializers, they will come later.
//
for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
@@ -62,7 +62,7 @@ Module *llvm::CloneModule(const Module *M,
GlobalValue::ExternalLinkage, 0,
I->getName());
GV->setAlignment(I->getAlignment());
- ValueMap[I] = GV;
+ VMap[I] = GV;
}
// Loop over the functions in the module, making external functions as before
@@ -71,13 +71,13 @@ Module *llvm::CloneModule(const Module *M,
Function::Create(cast<FunctionType>(I->getType()->getElementType()),
GlobalValue::ExternalLinkage, I->getName(), New);
NF->copyAttributesFrom(I);
- ValueMap[I] = NF;
+ VMap[I] = NF;
}
// Loop over the aliases in the module
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I)
- ValueMap[I] = new GlobalAlias(I->getType(), GlobalAlias::ExternalLinkage,
+ VMap[I] = new GlobalAlias(I->getType(), GlobalAlias::ExternalLinkage,
I->getName(), NULL, New);
// Now that all of the things that global variable initializer can refer to
@@ -86,10 +86,11 @@ Module *llvm::CloneModule(const Module *M,
//
for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
I != E; ++I) {
- GlobalVariable *GV = cast<GlobalVariable>(ValueMap[I]);
+ GlobalVariable *GV = cast<GlobalVariable>(VMap[I]);
if (I->hasInitializer())
GV->setInitializer(cast<Constant>(MapValue(I->getInitializer(),
- ValueMap)));
+ VMap,
+ true)));
GV->setLinkage(I->getLinkage());
GV->setThreadLocal(I->isThreadLocal());
GV->setConstant(I->isConstant());
@@ -98,17 +99,17 @@ Module *llvm::CloneModule(const Module *M,
// Similarly, copy over function bodies now...
//
for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I) {
- Function *F = cast<Function>(ValueMap[I]);
+ Function *F = cast<Function>(VMap[I]);
if (!I->isDeclaration()) {
Function::arg_iterator DestI = F->arg_begin();
for (Function::const_arg_iterator J = I->arg_begin(); J != I->arg_end();
++J) {
DestI->setName(J->getName());
- ValueMap[J] = DestI++;
+ VMap[J] = DestI++;
}
SmallVector<ReturnInst*, 8> Returns; // Ignore returns cloned.
- CloneFunctionInto(F, I, ValueMap, Returns);
+ CloneFunctionInto(F, I, VMap, /*ModuleLevelChanges=*/true, Returns);
}
F->setLinkage(I->getLinkage());
@@ -117,11 +118,20 @@ Module *llvm::CloneModule(const Module *M,
// And aliases
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I) {
- GlobalAlias *GA = cast<GlobalAlias>(ValueMap[I]);
+ GlobalAlias *GA = cast<GlobalAlias>(VMap[I]);
GA->setLinkage(I->getLinkage());
if (const Constant* C = I->getAliasee())
- GA->setAliasee(cast<Constant>(MapValue(C, ValueMap)));
+ GA->setAliasee(cast<Constant>(MapValue(C, VMap, true)));
}
-
+
+ // And named metadata....
+ for (Module::const_named_metadata_iterator I = M->named_metadata_begin(),
+ E = M->named_metadata_end(); I != E; ++I) {
+ const NamedMDNode &NMD = *I;
+ NamedMDNode *NewNMD = New->getOrInsertNamedMetadata(NMD.getName());
+ for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i)
+ NewNMD->addOperand(cast<MDNode>(MapValue(NMD.getOperand(i), VMap, true)));
+ }
+
return New;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index b208494..b51f751 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -751,7 +751,7 @@ ExtractCodeRegion(const std::vector<BasicBlock*> &code) {
// verifyFunction(*oldFunction);
DEBUG(if (verifyFunction(*newFunction))
- llvm_report_error("verifyFunction failed!"));
+ report_fatal_error("verifyFunction failed!"));
return newFunction;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
index c908b4a..8e82a02 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
@@ -35,7 +35,7 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
I.eraseFromParent();
return 0;
}
-
+
// Create a stack slot to hold the value.
AllocaInst *Slot;
if (AllocaPoint) {
@@ -46,7 +46,7 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Slot = new AllocaInst(I.getType(), 0, I.getName()+".reg2mem",
F->getEntryBlock().begin());
}
-
+
// Change all of the users of the instruction to read from the stack slot
// instead.
while (!I.use_empty()) {
@@ -67,7 +67,7 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Value *&V = Loads[PN->getIncomingBlock(i)];
if (V == 0) {
// Insert the load into the predecessor block
- V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
+ V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
PN->getIncomingBlock(i)->getTerminator());
}
PN->setIncomingValue(i, V);
@@ -110,8 +110,8 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
/// The phi node is deleted and it returns the pointer to the alloca inserted.
AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
if (P->use_empty()) {
- P->eraseFromParent();
- return 0;
+ P->eraseFromParent();
+ return 0;
}
// Create a stack slot to hold the value.
@@ -124,23 +124,23 @@ AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
Slot = new AllocaInst(P->getType(), 0, P->getName()+".reg2mem",
F->getEntryBlock().begin());
}
-
+
// Iterate over each operand, insert store in each predecessor.
for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
if (InvokeInst *II = dyn_cast<InvokeInst>(P->getIncomingValue(i))) {
- assert(II->getParent() != P->getIncomingBlock(i) &&
+ assert(II->getParent() != P->getIncomingBlock(i) &&
"Invoke edge not supported yet"); II=II;
}
- new StoreInst(P->getIncomingValue(i), Slot,
+ new StoreInst(P->getIncomingValue(i), Slot,
P->getIncomingBlock(i)->getTerminator());
}
-
+
// Insert load in place of the phi and replace all uses.
Value *V = new LoadInst(Slot, P->getName()+".reload", P);
P->replaceAllUsesWith(V);
-
+
// Delete phi.
P->eraseFromParent();
-
+
return Slot;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/InlineFunction.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 17f8827..88979e8 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -15,7 +15,6 @@
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
-#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
@@ -29,13 +28,11 @@
#include "llvm/Support/CallSite.h"
using namespace llvm;
-bool llvm::InlineFunction(CallInst *CI, CallGraph *CG, const TargetData *TD,
- SmallVectorImpl<AllocaInst*> *StaticAllocas) {
- return InlineFunction(CallSite(CI), CG, TD, StaticAllocas);
+bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
+ return InlineFunction(CallSite(CI), IFI);
}
-bool llvm::InlineFunction(InvokeInst *II, CallGraph *CG, const TargetData *TD,
- SmallVectorImpl<AllocaInst*> *StaticAllocas) {
- return InlineFunction(CallSite(II), CG, TD, StaticAllocas);
+bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
+ return InlineFunction(CallSite(II), IFI);
}
@@ -66,7 +63,8 @@ static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
// Next, create the new invoke instruction, inserting it at the end
// of the old basic block.
- SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
+ ImmutableCallSite CS(CI);
+ SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
InvokeInst *II =
InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
InvokeArgs.begin(), InvokeArgs.end(),
@@ -75,7 +73,7 @@ static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
II->setAttributes(CI->getAttributes());
// Make sure that anything using the call now uses the invoke! This also
- // updates the CallGraph if present.
+ // updates the CallGraph if present, because it uses a WeakVH.
CI->replaceAllUsesWith(II);
// Delete the unconditional branch inserted by splitBasicBlock
@@ -172,8 +170,9 @@ static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
/// some edges of the callgraph may remain.
static void UpdateCallGraphAfterInlining(CallSite CS,
Function::iterator FirstNewBlock,
- DenseMap<const Value*, Value*> &ValueMap,
- CallGraph &CG) {
+ ValueMap<const Value*, Value*> &VMap,
+ InlineFunctionInfo &IFI) {
+ CallGraph &CG = *IFI.CG;
const Function *Caller = CS.getInstruction()->getParent()->getParent();
const Function *Callee = CS.getCalledFunction();
CallGraphNode *CalleeNode = CG[Callee];
@@ -194,15 +193,34 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
for (; I != E; ++I) {
const Value *OrigCall = I->first;
- DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
+ ValueMap<const Value*, Value*>::iterator VMI = VMap.find(OrigCall);
// Only copy the edge if the call was inlined!
- if (VMI == ValueMap.end() || VMI->second == 0)
+ if (VMI == VMap.end() || VMI->second == 0)
continue;
// If the call was inlined, but then constant folded, there is no edge to
// add. Check for this case.
- if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second))
- CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
+ Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
+ if (NewCall == 0) continue;
+
+ // Remember that this call site got inlined for the client of
+ // InlineFunction.
+ IFI.InlinedCalls.push_back(NewCall);
+
+ // It's possible that inlining the callsite will cause it to go from an
+ // indirect to a direct call by resolving a function pointer. If this
+ // happens, set the callee of the new call site to a more precise
+ // destination. This can also happen if the call graph node of the caller
+ // was just unnecessarily imprecise.
+ if (I->second->getFunction() == 0)
+ if (Function *F = CallSite(NewCall).getCalledFunction()) {
+ // Indirect call site resolved to direct call.
+ CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
+
+ continue;
+ }
+
+ CallerNode->addCalledFunction(CallSite(NewCall), I->second);
}
// Update the call graph by deleting the edge from Callee to Caller. We must
@@ -219,13 +237,15 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
// exists in the instruction stream. Similiarly this will inline a recursive
// function by one level.
//
-bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
- SmallVectorImpl<AllocaInst*> *StaticAllocas) {
+bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
Instruction *TheCall = CS.getInstruction();
LLVMContext &Context = TheCall->getContext();
assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
"Instruction not in function!");
+ // If IFI has any state in it, zap it before we fill it in.
+ IFI.reset();
+
const Function *CalledFunc = CS.getCalledFunction();
if (CalledFunc == 0 || // Can't inline external function or indirect
CalledFunc->isDeclaration() || // call, or call to a vararg function!
@@ -266,8 +286,8 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
ClonedCodeInfo InlinedFunctionInfo;
Function::iterator FirstNewBlock;
- { // Scope to destroy ValueMap after cloning.
- DenseMap<const Value*, Value*> ValueMap;
+ { // Scope to destroy VMap after cloning.
+ ValueMap<const Value*, Value*> VMap;
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
@@ -292,37 +312,38 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// Create the alloca. If we have TargetData, use nice alignment.
unsigned Align = 1;
- if (TD) Align = TD->getPrefTypeAlignment(AggTy);
+ if (IFI.TD) Align = IFI.TD->getPrefTypeAlignment(AggTy);
Value *NewAlloca = new AllocaInst(AggTy, 0, Align,
I->getName(),
&*Caller->begin()->begin());
// Emit a memcpy.
- const Type *Tys[] = { Type::getInt64Ty(Context) };
+ const Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
Intrinsic::memcpy,
- Tys, 1);
+ Tys, 3);
Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
Value *Size;
- if (TD == 0)
+ if (IFI.TD == 0)
Size = ConstantExpr::getSizeOf(AggTy);
else
Size = ConstantInt::get(Type::getInt64Ty(Context),
- TD->getTypeStoreSize(AggTy));
+ IFI.TD->getTypeStoreSize(AggTy));
// Always generate a memcpy of alignment 1 here because we don't know
// the alignment of the src pointer. Other optimizations can infer
// better alignment.
Value *CallArgs[] = {
DestCast, SrcCast, Size,
- ConstantInt::get(Type::getInt32Ty(Context), 1)
+ ConstantInt::get(Type::getInt32Ty(Context), 1),
+ ConstantInt::get(Type::getInt1Ty(Context), 0)
};
CallInst *TheMemCpy =
- CallInst::Create(MemCpyFn, CallArgs, CallArgs+4, "", TheCall);
+ CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall);
// If we have a call graph, update it.
- if (CG) {
+ if (CallGraph *CG = IFI.CG) {
CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
CallGraphNode *CallerNode = (*CG)[Caller];
CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
@@ -331,24 +352,29 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// Uses of the argument in the function should use our new alloca
// instead.
ActualArg = NewAlloca;
+
+ // Calls that we inline may use the new alloca, so we need to clear
+ // their 'tail' flags.
+ MustClearTailCallFlags = true;
}
- ValueMap[I] = ActualArg;
+ VMap[I] = ActualArg;
}
// We want the inliner to prune the code as it copies. We would LOVE to
// have no dead or constant instructions leftover after inlining occurs
// (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do.
- CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
- &InlinedFunctionInfo, TD, TheCall);
+ CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
+ /*ModuleLevelChanges=*/false, Returns, ".i",
+ &InlinedFunctionInfo, IFI.TD, TheCall);
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
// Update the callgraph if requested.
- if (CG)
- UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, *CG);
+ if (IFI.CG)
+ UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
}
// If there are any alloca instructions in the block that used to be the entry
@@ -375,13 +401,13 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// Keep track of the static allocas that we inline into the caller if the
// StaticAllocas pointer is non-null.
- if (StaticAllocas) StaticAllocas->push_back(AI);
+ IFI.StaticAllocas.push_back(AI);
// Scan for the block of allocas that we can move over, and move them
// all at once.
while (isa<AllocaInst>(I) &&
isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
- if (StaticAllocas) StaticAllocas->push_back(cast<AllocaInst>(I));
+ IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
++I;
}
@@ -405,7 +431,7 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// If we are preserving the callgraph, add edges to the stacksave/restore
// functions for the calls we insert.
CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
- if (CG) {
+ if (CallGraph *CG = IFI.CG) {
StackSaveCGN = CG->getOrInsertFunction(StackSave);
StackRestoreCGN = CG->getOrInsertFunction(StackRestore);
CallerNode = (*CG)[Caller];
@@ -414,13 +440,13 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
// Insert the llvm.stacksave.
CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack",
FirstNewBlock->begin());
- if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
+ if (IFI.CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
// Insert a call to llvm.stackrestore before any return instructions in the
// inlined function.
for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]);
- if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
+ if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
}
// Count the number of StackRestore calls we insert.
@@ -433,7 +459,7 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
BB != E; ++BB)
if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI);
- if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
+ if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
++NumStackRestores;
}
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/InstructionNamer.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/InstructionNamer.cpp
index 090af95..5ca8299 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/InstructionNamer.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/InstructionNamer.cpp
@@ -23,7 +23,7 @@ using namespace llvm;
namespace {
struct InstNamer : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- InstNamer() : FunctionPass(&ID) {}
+ InstNamer() : FunctionPass(ID) {}
void getAnalysisUsage(AnalysisUsage &Info) const {
Info.setPreservesAll();
@@ -48,12 +48,12 @@ namespace {
};
char InstNamer::ID = 0;
- static RegisterPass<InstNamer> X("instnamer",
- "Assign names to anonymous instructions");
+ INITIALIZE_PASS(InstNamer, "instnamer",
+ "Assign names to anonymous instructions", false, false);
}
-const PassInfo *const llvm::InstructionNamerID = &X;
+char &llvm::InstructionNamerID = InstNamer::ID;
//===----------------------------------------------------------------------===//
//
// InstructionNamer - Give any unnamed non-void instructions "tmp" names.
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/LCSSA.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/LCSSA.cpp
index 590d667..275b265 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/LCSSA.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/LCSSA.cpp
@@ -47,7 +47,7 @@ STATISTIC(NumLCSSA, "Number of live out of a loop variables");
namespace {
struct LCSSA : public LoopPass {
static char ID; // Pass identification, replacement for typeid
- LCSSA() : LoopPass(&ID) {}
+ LCSSA() : LoopPass(ID) {}
// Cached analysis information for the current function.
DominatorTree *DT;
@@ -64,22 +64,13 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
- // LCSSA doesn't actually require LoopSimplify, but the PassManager
- // doesn't know how to schedule LoopSimplify by itself.
- AU.addRequiredID(LoopSimplifyID);
- AU.addPreservedID(LoopSimplifyID);
- AU.addRequiredTransitive<LoopInfo>();
- AU.addPreserved<LoopInfo>();
- AU.addRequiredTransitive<DominatorTree>();
- AU.addPreserved<ScalarEvolution>();
+ AU.addRequired<DominatorTree>();
AU.addPreserved<DominatorTree>();
-
- // Request DominanceFrontier now, even though LCSSA does
- // not use it. This allows Pass Manager to schedule Dominance
- // Frontier early enough such that one LPPassManager can handle
- // multiple loop transformation passes.
- AU.addRequired<DominanceFrontier>();
AU.addPreserved<DominanceFrontier>();
+ AU.addRequired<LoopInfo>();
+ AU.addPreserved<LoopInfo>();
+ AU.addPreservedID(LoopSimplifyID);
+ AU.addPreserved<ScalarEvolution>();
}
private:
bool ProcessInstruction(Instruction *Inst,
@@ -88,7 +79,7 @@ namespace {
/// verifyAnalysis() - Verify loop nest.
virtual void verifyAnalysis() const {
// Check the special guarantees that LCSSA makes.
- assert(L->isLCSSAForm() && "LCSSA form not preserved!");
+ assert(L->isLCSSAForm(*DT) && "LCSSA form not preserved!");
}
/// inLoop - returns true if the given block is within the current loop
@@ -99,10 +90,10 @@ namespace {
}
char LCSSA::ID = 0;
-static RegisterPass<LCSSA> X("lcssa", "Loop-Closed SSA Form Pass");
+INITIALIZE_PASS(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false);
Pass *llvm::createLCSSAPass() { return new LCSSA(); }
-const PassInfo *const llvm::LCSSAID = &X;
+char &llvm::LCSSAID = LCSSA::ID;
/// BlockDominatesAnExit - Return true if the specified block dominates at least
@@ -164,7 +155,7 @@ bool LCSSA::runOnLoop(Loop *TheLoop, LPPassManager &LPM) {
}
}
- assert(L->isLCSSAForm());
+ assert(L->isLCSSAForm(*DT));
PredCache.clear();
return MadeChange;
@@ -190,14 +181,15 @@ bool LCSSA::ProcessInstruction(Instruction *Inst,
for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
UI != E; ++UI) {
- BasicBlock *UserBB = cast<Instruction>(*UI)->getParent();
- if (PHINode *PN = dyn_cast<PHINode>(*UI))
+ User *U = *UI;
+ BasicBlock *UserBB = cast<Instruction>(U)->getParent();
+ if (PHINode *PN = dyn_cast<PHINode>(U))
UserBB = PN->getIncomingBlock(UI);
if (InstBB != UserBB && !inLoop(UserBB))
UsesToRewrite.push_back(&UI.getUse());
}
-
+
// If there are no uses outside the loop, exit with no change.
if (UsesToRewrite.empty()) return false;
@@ -214,7 +206,7 @@ bool LCSSA::ProcessInstruction(Instruction *Inst,
DomTreeNode *DomNode = DT->getNode(DomBB);
SSAUpdater SSAUpdate;
- SSAUpdate.Initialize(Inst);
+ SSAUpdate.Initialize(Inst->getType(), Inst->getName());
// Insert the LCSSA phi's into all of the exit blocks dominated by the
// value, and add them to the Phi's map.
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/Local.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/Local.cpp
index d03f7a6..52f0499 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/Local.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/Local.cpp
@@ -35,111 +35,6 @@
using namespace llvm;
//===----------------------------------------------------------------------===//
-// Local analysis.
-//
-
-/// getUnderlyingObjectWithOffset - Strip off up to MaxLookup GEPs and
-/// bitcasts to get back to the underlying object being addressed, keeping
-/// track of the offset in bytes from the GEPs relative to the result.
-/// This is closely related to Value::getUnderlyingObject but is located
-/// here to avoid making VMCore depend on TargetData.
-static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
- uint64_t &ByteOffset,
- unsigned MaxLookup = 6) {
- if (!V->getType()->isPointerTy())
- return V;
- for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
- if (!GEP->hasAllConstantIndices())
- return V;
- SmallVector<Value*, 8> Indices(GEP->op_begin() + 1, GEP->op_end());
- ByteOffset += TD->getIndexedOffset(GEP->getPointerOperandType(),
- &Indices[0], Indices.size());
- V = GEP->getPointerOperand();
- } else if (Operator::getOpcode(V) == Instruction::BitCast) {
- V = cast<Operator>(V)->getOperand(0);
- } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
- if (GA->mayBeOverridden())
- return V;
- V = GA->getAliasee();
- } else {
- return V;
- }
- assert(V->getType()->isPointerTy() && "Unexpected operand type!");
- }
- return V;
-}
-
-/// isSafeToLoadUnconditionally - Return true if we know that executing a load
-/// from this value cannot trap. If it is not obviously safe to load from the
-/// specified pointer, we do a quick local scan of the basic block containing
-/// ScanFrom, to determine if the address is already accessed.
-bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD) {
- uint64_t ByteOffset = 0;
- Value *Base = V;
- if (TD)
- Base = getUnderlyingObjectWithOffset(V, TD, ByteOffset);
-
- const Type *BaseType = 0;
- unsigned BaseAlign = 0;
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
- // An alloca is safe to load from as load as it is suitably aligned.
- BaseType = AI->getAllocatedType();
- BaseAlign = AI->getAlignment();
- } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(Base)) {
- // Global variables are safe to load from but their size cannot be
- // guaranteed if they are overridden.
- if (!isa<GlobalAlias>(GV) && !GV->mayBeOverridden()) {
- BaseType = GV->getType()->getElementType();
- BaseAlign = GV->getAlignment();
- }
- }
-
- if (BaseType && BaseType->isSized()) {
- if (TD && BaseAlign == 0)
- BaseAlign = TD->getPrefTypeAlignment(BaseType);
-
- if (Align <= BaseAlign) {
- if (!TD)
- return true; // Loading directly from an alloca or global is OK.
-
- // Check if the load is within the bounds of the underlying object.
- const PointerType *AddrTy = cast<PointerType>(V->getType());
- uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
- if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
- (Align == 0 || (ByteOffset % Align) == 0))
- return true;
- }
- }
-
- // Otherwise, be a little bit aggressive by scanning the local block where we
- // want to check to see if the pointer is already being loaded or stored
- // from/to. If so, the previous load or store would have already trapped,
- // so there is no harm doing an extra load (also, CSE will later eliminate
- // the load entirely).
- BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
-
- while (BBI != E) {
- --BBI;
-
- // If we see a free or a call which may write to memory (i.e. which might do
- // a free) the pointer could be marked invalid.
- if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
- !isa<DbgInfoIntrinsic>(BBI))
- return false;
-
- if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
- if (LI->getOperand(0) == V) return true;
- } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
- if (SI->getOperand(1) == V) return true;
- }
- }
- return false;
-}
-
-
-//===----------------------------------------------------------------------===//
// Local constant propagation.
//
@@ -411,7 +306,7 @@ bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) {
WeakVH BIHandle(BI);
ReplaceAndSimplifyAllUses(Inst, V, TD);
MadeChange = true;
- if (BIHandle == 0)
+ if (BIHandle != BI)
BI = BB->begin();
continue;
}
@@ -459,12 +354,13 @@ void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
// value into all of its uses.
assert(PNV != PN && "hasConstantValue broken");
+ Value *OldPhiIt = PhiIt;
ReplaceAndSimplifyAllUses(PN, PNV, TD);
// If recursive simplification ended up deleting the next PHI node we would
// iterate to, then our iterator is invalid, restart scanning from the top
// of the block.
- if (PhiIt == 0) PhiIt = &BB->front();
+ if (PhiIt != OldPhiIt) PhiIt = &BB->front();
}
}
@@ -537,9 +433,11 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
// Use that list to make another list of common predecessors of BB and Succ
BlockSet CommonPreds;
for (pred_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
- PI != PE; ++PI)
- if (BBPreds.count(*PI))
- CommonPreds.insert(*PI);
+ PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (BBPreds.count(P))
+ CommonPreds.insert(P);
+ }
// Shortcut, if there are no common predecessors, merging is always safe
if (CommonPreds.empty())
@@ -592,6 +490,9 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
/// rewriting all the predecessors to branch to the successor block and return
/// true. If we can't transform, return false.
bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
+ assert(BB != &BB->getParent()->getEntryBlock() &&
+ "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
+
// We can't eliminate infinite loops.
BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
if (BB == Succ) return false;
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index 924b744..b3c4801 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -41,13 +41,14 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/Function.h"
#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
-#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/CFG.h"
@@ -64,27 +65,30 @@ STATISTIC(NumNested , "Number of nested loops split out");
namespace {
struct LoopSimplify : public LoopPass {
static char ID; // Pass identification, replacement for typeid
- LoopSimplify() : LoopPass(&ID) {}
+ LoopSimplify() : LoopPass(ID) {}
// AA - If we have an alias analysis object to update, this is it, otherwise
// this is null.
AliasAnalysis *AA;
LoopInfo *LI;
DominatorTree *DT;
+ ScalarEvolution *SE;
Loop *L;
virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
// We need loop information to identify the loops...
- AU.addRequiredTransitive<LoopInfo>();
- AU.addRequiredTransitive<DominatorTree>();
+ AU.addRequired<DominatorTree>();
+ AU.addPreserved<DominatorTree>();
+ AU.addRequired<LoopInfo>();
AU.addPreserved<LoopInfo>();
- AU.addPreserved<DominatorTree>();
- AU.addPreserved<DominanceFrontier>();
+
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<ScalarEvolution>();
AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added.
+ AU.addPreserved<DominanceFrontier>();
+ AU.addPreservedID(LCSSAID);
}
/// verifyAnalysis() - Verify LoopSimplifyForm's guarantees.
@@ -103,11 +107,11 @@ namespace {
}
char LoopSimplify::ID = 0;
-static RegisterPass<LoopSimplify>
-X("loopsimplify", "Canonicalize natural loops", true);
+INITIALIZE_PASS(LoopSimplify, "loopsimplify",
+ "Canonicalize natural loops", true, false);
// Publically exposed interface to pass...
-const PassInfo *const llvm::LoopSimplifyID = &X;
+char &llvm::LoopSimplifyID = LoopSimplify::ID;
Pass *llvm::createLoopSimplifyPass() { return new LoopSimplify(); }
/// runOnLoop - Run down all loops in the CFG (recursively, but we could do
@@ -119,6 +123,7 @@ bool LoopSimplify::runOnLoop(Loop *l, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>();
AA = getAnalysisIfAvailable<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>();
+ SE = getAnalysisIfAvailable<ScalarEvolution>();
Changed |= ProcessLoop(L, LPM);
@@ -132,7 +137,7 @@ bool LoopSimplify::ProcessLoop(Loop *L, LPPassManager &LPM) {
bool Changed = false;
ReprocessLoop:
- // Check to see that no blocks (other than the header) in this loop that has
+ // Check to see that no blocks (other than the header) in this loop have
// predecessors that are not in the loop. This is not valid for natural
// loops, but can occur if the blocks are unreachable. Since they are
// unreachable we can just shamelessly delete those CFG edges!
@@ -140,13 +145,16 @@ ReprocessLoop:
BB != E; ++BB) {
if (*BB == L->getHeader()) continue;
- SmallPtrSet<BasicBlock *, 4> BadPreds;
- for (pred_iterator PI = pred_begin(*BB), PE = pred_end(*BB); PI != PE; ++PI)
- if (!L->contains(*PI))
- BadPreds.insert(*PI);
+ SmallPtrSet<BasicBlock*, 4> BadPreds;
+ for (pred_iterator PI = pred_begin(*BB),
+ PE = pred_end(*BB); PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (!L->contains(P))
+ BadPreds.insert(P);
+ }
// Delete each unique out-of-loop (and thus dead) predecessor.
- for (SmallPtrSet<BasicBlock *, 4>::iterator I = BadPreds.begin(),
+ for (SmallPtrSet<BasicBlock*, 4>::iterator I = BadPreds.begin(),
E = BadPreds.end(); I != E; ++I) {
DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor ";
@@ -191,7 +199,7 @@ ReprocessLoop:
if (!Preheader) {
Preheader = InsertPreheaderForLoop(L);
if (Preheader) {
- NumInserted++;
+ ++NumInserted;
Changed = true;
}
}
@@ -214,7 +222,7 @@ ReprocessLoop:
// allowed.
if (!L->contains(*PI)) {
if (RewriteLoopExitBlock(L, ExitBlock)) {
- NumInserted++;
+ ++NumInserted;
Changed = true;
}
break;
@@ -243,7 +251,7 @@ ReprocessLoop:
// loop header.
LoopLatch = InsertUniqueBackedgeBlock(L, Preheader);
if (LoopLatch) {
- NumInserted++;
+ ++NumInserted;
Changed = true;
}
}
@@ -290,6 +298,9 @@ ReprocessLoop:
bool AllInvariant = true;
for (BasicBlock::iterator I = ExitingBlock->begin(); &*I != BI; ) {
Instruction *Inst = I++;
+ // Skip debug info intrinsics.
+ if (isa<DbgInfoIntrinsic>(Inst))
+ continue;
if (Inst == CI)
continue;
if (!L->makeLoopInvariant(Inst, Changed,
@@ -349,16 +360,18 @@ BasicBlock *LoopSimplify::InsertPreheaderForLoop(Loop *L) {
// Compute the set of predecessors of the loop that are not in the loop.
SmallVector<BasicBlock*, 8> OutsideBlocks;
for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
- PI != PE; ++PI)
- if (!L->contains(*PI)) { // Coming in from outside the loop?
+ PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (!L->contains(P)) { // Coming in from outside the loop?
// If the loop is branched to from an indirect branch, we won't
// be able to fully transform the loop, because it prohibits
// edge splitting.
- if (isa<IndirectBrInst>((*PI)->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return 0;
// Keep track of it.
- OutsideBlocks.push_back(*PI);
+ OutsideBlocks.push_back(P);
}
+ }
// Split out the loop pre-header.
BasicBlock *NewBB =
@@ -381,13 +394,15 @@ BasicBlock *LoopSimplify::InsertPreheaderForLoop(Loop *L) {
/// outside of the loop.
BasicBlock *LoopSimplify::RewriteLoopExitBlock(Loop *L, BasicBlock *Exit) {
SmallVector<BasicBlock*, 8> LoopBlocks;
- for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I)
- if (L->contains(*I)) {
+ for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I) {
+ BasicBlock *P = *I;
+ if (L->contains(P)) {
// Don't do this if the loop is exited via an indirect branch.
- if (isa<IndirectBrInst>((*I)->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return 0;
- LoopBlocks.push_back(*I);
+ LoopBlocks.push_back(P);
}
+ }
assert(!LoopBlocks.empty() && "No edges coming in from outside the loop?");
BasicBlock *NewBB = SplitBlockPredecessors(Exit, &LoopBlocks[0],
@@ -520,6 +535,12 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM) {
DEBUG(dbgs() << "LoopSimplify: Splitting out a new outer loop\n");
+ // If ScalarEvolution is around and knows anything about values in
+ // this loop, tell it to forget them, because we're about to
+ // substantially change it.
+ if (SE)
+ SE->forgetLoop(L);
+
BasicBlock *Header = L->getHeader();
BasicBlock *NewBB = SplitBlockPredecessors(Header, &OuterLoopPreds[0],
OuterLoopPreds.size(),
@@ -555,10 +576,11 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM) {
// Determine which blocks should stay in L and which should be moved out to
// the Outer loop now.
std::set<BasicBlock*> BlocksInL;
- for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); PI!=E; ++PI)
- if (DT->dominates(Header, *PI))
- AddBlockAndPredsToSet(*PI, Header, BlocksInL);
-
+ for (pred_iterator PI=pred_begin(Header), E = pred_end(Header); PI!=E; ++PI) {
+ BasicBlock *P = *PI;
+ if (DT->dominates(Header, P))
+ AddBlockAndPredsToSet(P, Header, BlocksInL);
+ }
// Scan all of the loop children of L, moving them to OuterLoop if they are
// not part of the inner loop.
@@ -606,8 +628,15 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
// Figure out which basic blocks contain back-edges to the loop header.
std::vector<BasicBlock*> BackedgeBlocks;
- for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I)
- if (*I != Preheader) BackedgeBlocks.push_back(*I);
+ for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I){
+ BasicBlock *P = *I;
+
+ // Indirectbr edges cannot be split, so we must fail if we find one.
+ if (isa<IndirectBrInst>(P->getTerminator()))
+ return 0;
+
+ if (P != Preheader) BackedgeBlocks.push_back(P);
+ }
// Create and insert the new backedge block...
BasicBlock *BEBlock = BasicBlock::Create(Header->getContext(),
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index e47c86d..236bbe9 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -24,6 +24,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
@@ -37,13 +38,13 @@ STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
/// RemapInstruction - Convert the instruction operands from referencing the
-/// current values into those specified by ValueMap.
+/// current values into those specified by VMap.
static inline void RemapInstruction(Instruction *I,
- DenseMap<const Value *, Value*> &ValueMap) {
+ ValueMap<const Value *, Value*> &VMap) {
for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
Value *Op = I->getOperand(op);
- DenseMap<const Value *, Value*>::iterator It = ValueMap.find(Op);
- if (It != ValueMap.end())
+ ValueMap<const Value *, Value*>::iterator It = VMap.find(Op);
+ if (It != VMap.end())
I->setOperand(op, It->second);
}
}
@@ -105,8 +106,6 @@ static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI) {
/// If a LoopPassManager is passed in, and the loop is fully removed, it will be
/// removed from the LoopPassManager as well. LPM can also be NULL.
bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM) {
- assert(L->isLCSSAForm());
-
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) {
DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
@@ -129,6 +128,11 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
return false;
}
+ // Notify ScalarEvolution that the loop will be substantially changed,
+ // if not outright eliminated.
+ if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>())
+ SE->forgetLoop(L);
+
// Find trip count
unsigned TripCount = L->getSmallConstantTripCount();
// Find trip multiple if count is not available
@@ -185,8 +189,8 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
// For the first iteration of the loop, we should use the precloned values for
// PHI nodes. Insert associations now.
- typedef DenseMap<const Value*, Value*> ValueMapTy;
- ValueMapTy LastValueMap;
+ typedef ValueMap<const Value*, Value*> ValueToValueMapTy;
+ ValueToValueMapTy LastValueMap;
std::vector<PHINode*> OrigPHINode;
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
PHINode *PN = cast<PHINode>(I);
@@ -207,26 +211,26 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
for (std::vector<BasicBlock*>::iterator BB = LoopBlocks.begin(),
E = LoopBlocks.end(); BB != E; ++BB) {
- ValueMapTy ValueMap;
- BasicBlock *New = CloneBasicBlock(*BB, ValueMap, "." + Twine(It));
+ ValueToValueMapTy VMap;
+ BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
Header->getParent()->getBasicBlockList().push_back(New);
// Loop over all of the PHI nodes in the block, changing them to use the
// incoming values from the previous block.
if (*BB == Header)
for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) {
- PHINode *NewPHI = cast<PHINode>(ValueMap[OrigPHINode[i]]);
+ PHINode *NewPHI = cast<PHINode>(VMap[OrigPHINode[i]]);
Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock);
if (Instruction *InValI = dyn_cast<Instruction>(InVal))
if (It > 1 && L->contains(InValI))
InVal = LastValueMap[InValI];
- ValueMap[OrigPHINode[i]] = InVal;
+ VMap[OrigPHINode[i]] = InVal;
New->getInstList().erase(NewPHI);
}
// Update our running map of newest clones
LastValueMap[*BB] = New;
- for (ValueMapTy::iterator VI = ValueMap.begin(), VE = ValueMap.end();
+ for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
VI != VE; ++VI)
LastValueMap[VI->first] = VI->second;
@@ -370,9 +374,5 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
if (CompletelyUnroll && LPM != NULL)
LPM->deleteLoopFromQueue(L);
- // If we didn't completely unroll the loop, it should still be in LCSSA form.
- if (!CompletelyUnroll)
- assert(L->isLCSSAForm());
-
return true;
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/LowerInvoke.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/LowerInvoke.cpp
index ebed676..a46dd84 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/LowerInvoke.cpp
@@ -45,6 +45,7 @@
#include "llvm/Pass.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
@@ -62,52 +63,56 @@ static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support",
namespace {
class LowerInvoke : public FunctionPass {
// Used for both models.
- Constant *WriteFn;
Constant *AbortFn;
- Value *AbortMessage;
- unsigned AbortMessageLength;
// Used for expensive EH support.
const Type *JBLinkTy;
GlobalVariable *JBListHead;
- Constant *SetJmpFn, *LongJmpFn;
+ Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn;
+ bool useExpensiveEHSupport;
// We peek in TLI to grab the target's jmp_buf size and alignment
const TargetLowering *TLI;
public:
static char ID; // Pass identification, replacement for typeid
- explicit LowerInvoke(const TargetLowering *tli = NULL)
- : FunctionPass(&ID), TLI(tli) { }
+ explicit LowerInvoke(const TargetLowering *tli = NULL,
+ bool useExpensiveEHSupport = ExpensiveEHSupport)
+ : FunctionPass(ID), useExpensiveEHSupport(useExpensiveEHSupport),
+ TLI(tli) { }
bool doInitialization(Module &M);
bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
// This is a cluster of orthogonal Transforms
- AU.addPreservedID(PromoteMemoryToRegisterID);
+ AU.addPreserved("mem2reg");
AU.addPreservedID(LowerSwitchID);
}
private:
- void createAbortMessage(Module *M);
- void writeAbortMessage(Instruction *IB);
bool insertCheapEHSupport(Function &F);
- void splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes);
+ void splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*>&Invokes);
void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
- AllocaInst *InvokeNum, SwitchInst *CatchSwitch);
+ AllocaInst *InvokeNum, AllocaInst *StackPtr,
+ SwitchInst *CatchSwitch);
bool insertExpensiveEHSupport(Function &F);
};
}
char LowerInvoke::ID = 0;
-static RegisterPass<LowerInvoke>
-X("lowerinvoke", "Lower invoke and unwind, for unwindless code generators");
+INITIALIZE_PASS(LowerInvoke, "lowerinvoke",
+ "Lower invoke and unwind, for unwindless code generators",
+ false, false);
-const PassInfo *const llvm::LowerInvokePassID = &X;
+char &llvm::LowerInvokePassID = LowerInvoke::ID;
// Public Interface To the LowerInvoke pass.
FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI) {
- return new LowerInvoke(TLI);
+ return new LowerInvoke(TLI, ExpensiveEHSupport);
+}
+FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI,
+ bool useExpensiveEHSupport) {
+ return new LowerInvoke(TLI, useExpensiveEHSupport);
}
// doInitialization - Make sure that there is a prototype for abort in the
@@ -115,8 +120,7 @@ FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI) {
bool LowerInvoke::doInitialization(Module &M) {
const Type *VoidPtrTy =
Type::getInt8PtrTy(M.getContext());
- AbortMessage = 0;
- if (ExpensiveEHSupport) {
+ if (useExpensiveEHSupport) {
// Insert a type for the linked list of jump buffers.
unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
JBSize = JBSize ? JBSize : 200;
@@ -146,90 +150,39 @@ bool LowerInvoke::doInitialization(Module &M) {
// VisualStudio defines setjmp as _setjmp via #include <csetjmp> / <setjmp.h>,
// so it looks like Intrinsic::_setjmp
-#if defined(_MSC_VER) && _MSC_VER < 1600 && defined(setjmp)
+#if defined(_MSC_VER) && defined(setjmp)
#define setjmp_undefined_for_visual_studio
#undef setjmp
#endif
SetJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::setjmp);
-#if defined(_MSC_VER) && _MSC_VER < 1600 && defined(setjmp_undefined_for_visual_studio)
+#if defined(_MSC_VER) && defined(setjmp_undefined_for_visual_studio)
// let's return it to _setjmp state in case anyone ever needs it after this
// point under VisualStudio
#define setjmp _setjmp
#endif
LongJmpFn = Intrinsic::getDeclaration(&M, Intrinsic::longjmp);
+ StackSaveFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave);
+ StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
}
// We need the 'write' and 'abort' functions for both models.
AbortFn = M.getOrInsertFunction("abort", Type::getVoidTy(M.getContext()),
(Type *)0);
-#if 0 // "write" is Unix-specific.. code is going away soon anyway.
- WriteFn = M.getOrInsertFunction("write", Type::VoidTy, Type::Int32Ty,
- VoidPtrTy, Type::Int32Ty, (Type *)0);
-#else
- WriteFn = 0;
-#endif
return true;
}
-void LowerInvoke::createAbortMessage(Module *M) {
- if (ExpensiveEHSupport) {
- // The abort message for expensive EH support tells the user that the
- // program 'unwound' without an 'invoke' instruction.
- Constant *Msg =
- ConstantArray::get(M->getContext(),
- "ERROR: Exception thrown, but not caught!\n");
- AbortMessageLength = Msg->getNumOperands()-1; // don't include \0
-
- GlobalVariable *MsgGV = new GlobalVariable(*M, Msg->getType(), true,
- GlobalValue::InternalLinkage,
- Msg, "abortmsg");
- std::vector<Constant*> GEPIdx(2,
- Constant::getNullValue(Type::getInt32Ty(M->getContext())));
- AbortMessage = ConstantExpr::getGetElementPtr(MsgGV, &GEPIdx[0], 2);
- } else {
- // The abort message for cheap EH support tells the user that EH is not
- // enabled.
- Constant *Msg =
- ConstantArray::get(M->getContext(),
- "Exception handler needed, but not enabled."
- "Recompile program with -enable-correct-eh-support.\n");
- AbortMessageLength = Msg->getNumOperands()-1; // don't include \0
-
- GlobalVariable *MsgGV = new GlobalVariable(*M, Msg->getType(), true,
- GlobalValue::InternalLinkage,
- Msg, "abortmsg");
- std::vector<Constant*> GEPIdx(2, Constant::getNullValue(
- Type::getInt32Ty(M->getContext())));
- AbortMessage = ConstantExpr::getGetElementPtr(MsgGV, &GEPIdx[0], 2);
- }
-}
-
-
-void LowerInvoke::writeAbortMessage(Instruction *IB) {
-#if 0
- if (AbortMessage == 0)
- createAbortMessage(IB->getParent()->getParent()->getParent());
-
- // These are the arguments we WANT...
- Value* Args[3];
- Args[0] = ConstantInt::get(Type::Int32Ty, 2);
- Args[1] = AbortMessage;
- Args[2] = ConstantInt::get(Type::Int32Ty, AbortMessageLength);
- (new CallInst(WriteFn, Args, 3, "", IB))->setTailCall();
-#endif
-}
-
bool LowerInvoke::insertCheapEHSupport(Function &F) {
bool Changed = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
- std::vector<Value*> CallArgs(II->op_begin()+3, II->op_end());
+ SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
// Insert a normal call instruction...
CallInst *NewCall = CallInst::Create(II->getCalledValue(),
- CallArgs.begin(), CallArgs.end(), "",II);
+ CallArgs.begin(), CallArgs.end(),
+ "",II);
NewCall->takeName(II);
NewCall->setCallingConv(II->getCallingConv());
NewCall->setAttributes(II->getAttributes());
@@ -246,9 +199,6 @@ bool LowerInvoke::insertCheapEHSupport(Function &F) {
++NumInvokes; Changed = true;
} else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- // Insert a new call to write(2, AbortMessage, AbortMessageLength);
- writeAbortMessage(UI);
-
// Insert a call to abort()
CallInst::Create(AbortFn, "", UI)->setTailCall();
@@ -270,6 +220,7 @@ bool LowerInvoke::insertCheapEHSupport(Function &F) {
/// specified invoke instruction with a call.
void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
AllocaInst *InvokeNum,
+ AllocaInst *StackPtr,
SwitchInst *CatchSwitch) {
ConstantInt *InvokeNoC = ConstantInt::get(Type::getInt32Ty(II->getContext()),
InvokeNo);
@@ -288,17 +239,27 @@ void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
// Insert a store of the invoke num before the invoke and store zero into the
// location afterward.
new StoreInst(InvokeNoC, InvokeNum, true, II); // volatile
+
+ // Insert a store of the stack ptr before the invoke, so we can restore it
+ // later in the exception case.
+ CallInst* StackSaveRet = CallInst::Create(StackSaveFn, "ssret", II);
+ new StoreInst(StackSaveRet, StackPtr, true, II); // volatile
BasicBlock::iterator NI = II->getNormalDest()->getFirstNonPHI();
// nonvolatile.
new StoreInst(Constant::getNullValue(Type::getInt32Ty(II->getContext())),
InvokeNum, false, NI);
+ Instruction* StackPtrLoad = new LoadInst(StackPtr, "stackptr.restore", true,
+ II->getUnwindDest()->getFirstNonPHI()
+ );
+ CallInst::Create(StackRestoreFn, StackPtrLoad, "")->insertAfter(StackPtrLoad);
+
// Add a switch case to our unwind block.
CatchSwitch->addCase(InvokeNoC, II->getUnwindDest());
// Insert a normal call instruction.
- std::vector<Value*> CallArgs(II->op_begin()+3, II->op_end());
+ SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
CallInst *NewCall = CallInst::Create(II->getCalledValue(),
CallArgs.begin(), CallArgs.end(), "",
II);
@@ -327,7 +288,7 @@ static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) {
// across the unwind edge. This process also splits all critical edges
// coming out of invoke's.
void LowerInvoke::
-splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
+splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
// First step, split all critical edges from invoke instructions.
for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
InvokeInst *II = Invokes[i];
@@ -349,16 +310,33 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
++AfterAllocaInsertPt;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI) {
- // This is always a no-op cast because we're casting AI to AI->getType() so
- // src and destination types are identical. BitCast is the only possibility.
- CastInst *NC = new BitCastInst(
- AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
- AI->replaceAllUsesWith(NC);
- // Normally its is forbidden to replace a CastInst's operand because it
- // could cause the opcode to reflect an illegal conversion. However, we're
- // replacing it here with the same value it was constructed with to simply
- // make NC its user.
- NC->setOperand(0, AI);
+ const Type *Ty = AI->getType();
+ // Aggregate types can't be cast, but are legal argument types, so we have
+ // to handle them differently. We use an extract/insert pair as a
+ // lightweight method to achieve the same goal.
+ if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
+ Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
+ Instruction *NI = InsertValueInst::Create(AI, EI, 0);
+ NI->insertAfter(EI);
+ AI->replaceAllUsesWith(NI);
+ // Set the operand of the instructions back to the AllocaInst.
+ EI->setOperand(0, AI);
+ NI->setOperand(0, AI);
+ } else {
+ // This is always a no-op cast because we're casting AI to AI->getType()
+ // so src and destination types are identical. BitCast is the only
+ // possibility.
+ CastInst *NC = new BitCastInst(
+ AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
+ AI->replaceAllUsesWith(NC);
+ // Set the operand of the cast instruction back to the AllocaInst.
+ // Normally it's forbidden to replace a CastInst's operand because it
+ // could cause the opcode to reflect an illegal conversion. However,
+ // we're replacing it here with the same value it was constructed with.
+ // We do this because the above replaceAllUsesWith() clobbered the
+ // operand, but we want this one to remain.
+ NC->setOperand(0, AI);
+ }
}
// Finally, scan the code looking for instructions with bad live ranges.
@@ -380,7 +358,7 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
continue;
// Avoid iterator invalidation by copying users to a temporary vector.
- std::vector<Instruction*> Users;
+ SmallVector<Instruction*,16> Users;
for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
UI != E; ++UI) {
Instruction *User = cast<Instruction>(*UI);
@@ -430,9 +408,9 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
}
bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
- std::vector<ReturnInst*> Returns;
- std::vector<UnwindInst*> Unwinds;
- std::vector<InvokeInst*> Invokes;
+ SmallVector<ReturnInst*,16> Returns;
+ SmallVector<UnwindInst*,16> Unwinds;
+ SmallVector<InvokeInst*,16> Invokes;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
@@ -480,12 +458,11 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
new AllocaInst(JBLinkTy, 0, Align,
"jblink", F.begin()->begin());
- std::vector<Value*> Idx;
- Idx.push_back(Constant::getNullValue(Type::getInt32Ty(F.getContext())));
- Idx.push_back(ConstantInt::get(Type::getInt32Ty(F.getContext()), 1));
- OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx.begin(), Idx.end(),
+ Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
+ ConstantInt::get(Type::getInt32Ty(F.getContext()), 1) };
+ OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, &Idx[0], &Idx[2],
"OldBuf",
- EntryBB->getTerminator());
+ EntryBB->getTerminator());
// Copy the JBListHead to the alloca.
Value *OldBuf = new LoadInst(JBListHead, "oldjmpbufptr", true,
@@ -500,6 +477,12 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
BasicBlock *CatchBB =
BasicBlock::Create(F.getContext(), "setjmp.catch", &F);
+ // Create an alloca which keeps track of the stack pointer before every
+ // invoke, this allows us to properly restore the stack pointer after
+ // long jumping.
+ AllocaInst *StackPtr = new AllocaInst(Type::getInt8PtrTy(F.getContext()), 0,
+ "stackptr", EntryBB->begin());
+
// Create an alloca which keeps track of which invoke is currently
// executing. For normal calls it contains zero.
AllocaInst *InvokeNum = new AllocaInst(Type::getInt32Ty(F.getContext()), 0,
@@ -524,7 +507,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
"setjmp.cont");
Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 0);
- Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx.begin(), Idx.end(),
+ Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, &Idx[0], &Idx[2],
"TheJmpBuf",
EntryBB->getTerminator());
JmpBufPtr = new BitCastInst(JmpBufPtr,
@@ -546,7 +529,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// At this point, we are all set up, rewrite each invoke instruction.
for (unsigned i = 0, e = Invokes.size(); i != e; ++i)
- rewriteExpensiveInvoke(Invokes[i], i+1, InvokeNum, CatchSwitch);
+ rewriteExpensiveInvoke(Invokes[i], i+1, InvokeNum, StackPtr, CatchSwitch);
}
// We know that there is at least one unwind.
@@ -577,24 +560,20 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// Create the block to do the longjmp.
// Get a pointer to the jmpbuf and longjmp.
- std::vector<Value*> Idx;
- Idx.push_back(Constant::getNullValue(Type::getInt32Ty(F.getContext())));
- Idx.push_back(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0));
- Idx[0] = GetElementPtrInst::Create(BufPtr, Idx.begin(), Idx.end(), "JmpBuf",
+ Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
+ ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) };
+ Idx[0] = GetElementPtrInst::Create(BufPtr, &Idx[0], &Idx[2], "JmpBuf",
UnwindBlock);
Idx[0] = new BitCastInst(Idx[0],
Type::getInt8PtrTy(F.getContext()),
"tmp", UnwindBlock);
Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 1);
- CallInst::Create(LongJmpFn, Idx.begin(), Idx.end(), "", UnwindBlock);
+ CallInst::Create(LongJmpFn, &Idx[0], &Idx[2], "", UnwindBlock);
new UnreachableInst(F.getContext(), UnwindBlock);
// Set up the term block ("throw without a catch").
new UnreachableInst(F.getContext(), TermBlock);
- // Insert a new call to write(2, AbortMessage, AbortMessageLength);
- writeAbortMessage(TermBlock->getTerminator());
-
// Insert a call to abort()
CallInst::Create(AbortFn, "",
TermBlock->getTerminator())->setTailCall();
@@ -622,7 +601,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
}
bool LowerInvoke::runOnFunction(Function &F) {
- if (ExpensiveEHSupport)
+ if (useExpensiveEHSupport)
return insertExpensiveEHSupport(F);
else
return insertCheapEHSupport(F);
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/LowerSwitch.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/LowerSwitch.cpp
index 468a5fe..5530b47 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/LowerSwitch.cpp
@@ -29,19 +29,18 @@ using namespace llvm;
namespace {
/// LowerSwitch Pass - Replace all SwitchInst instructions with chained branch
- /// instructions. Note that this cannot be a BasicBlock pass because it
- /// modifies the CFG!
+ /// instructions.
class LowerSwitch : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- LowerSwitch() : FunctionPass(&ID) {}
+ LowerSwitch() : FunctionPass(ID) {}
virtual bool runOnFunction(Function &F);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
// This is a cluster of orthogonal Transforms
AU.addPreserved<UnifyFunctionExitNodes>();
- AU.addPreservedID(PromoteMemoryToRegisterID);
+ AU.addPreserved("mem2reg");
AU.addPreservedID(LowerInvokePassID);
}
@@ -50,8 +49,7 @@ namespace {
Constant* High;
BasicBlock* BB;
- CaseRange() : Low(0), High(0), BB(0) { }
- CaseRange(Constant* low, Constant* high, BasicBlock* bb) :
+ CaseRange(Constant *low = 0, Constant *high = 0, BasicBlock *bb = 0) :
Low(low), High(high), BB(bb) { }
};
@@ -81,11 +79,11 @@ namespace {
}
char LowerSwitch::ID = 0;
-static RegisterPass<LowerSwitch>
-X("lowerswitch", "Lower SwitchInst's to branches");
+INITIALIZE_PASS(LowerSwitch, "lowerswitch",
+ "Lower SwitchInst's to branches", false, false);
// Publically exposed interface to pass...
-const PassInfo *const llvm::LowerSwitchID = &X;
+char &llvm::LowerSwitchID = LowerSwitch::ID;
// createLowerSwitchPass - Interface to this file...
FunctionPass *llvm::createLowerSwitchPass() {
return new LowerSwitch();
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/Mem2Reg.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/Mem2Reg.cpp
index 99203b6..101645b 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/Mem2Reg.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/Mem2Reg.cpp
@@ -27,7 +27,7 @@ STATISTIC(NumPromoted, "Number of alloca's promoted");
namespace {
struct PromotePass : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- PromotePass() : FunctionPass(&ID) {}
+ PromotePass() : FunctionPass(ID) {}
// runOnFunction - To run this pass, first we calculate the alloca
// instructions that are safe for promotion, then we promote each one.
@@ -49,7 +49,8 @@ namespace {
} // end of anonymous namespace
char PromotePass::ID = 0;
-static RegisterPass<PromotePass> X("mem2reg", "Promote Memory to Register");
+INITIALIZE_PASS(PromotePass, "mem2reg", "Promote Memory to Register",
+ false, false);
bool PromotePass::runOnFunction(Function &F) {
std::vector<AllocaInst*> Allocas;
@@ -81,8 +82,6 @@ bool PromotePass::runOnFunction(Function &F) {
return Changed;
}
-// Publically exposed interface to pass...
-const PassInfo *const llvm::PromoteMemoryToRegisterID = &X;
// createPromoteMemoryToRegister - Provide an entry point to create this pass.
//
FunctionPass *llvm::createPromoteMemoryToRegisterPass() {
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index d62b160..a4e3029 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -68,12 +68,13 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI) {
// assignments to subsections of the memory unit.
// Only allow direct and non-volatile loads and stores...
- for (Value::use_const_iterator UI = AI->use_begin(), UE = AI->use_end();
- UI != UE; ++UI) // Loop over all of the uses of the alloca
- if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end();
+ UI != UE; ++UI) { // Loop over all of the uses of the alloca
+ const User *U = *UI;
+ if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (LI->isVolatile())
return false;
- } else if (const StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (SI->getOperand(0) == AI)
return false; // Don't allow a store OF the AI, only INTO the AI.
if (SI->isVolatile())
@@ -81,6 +82,7 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI) {
} else {
return false;
}
+ }
return true;
}
@@ -226,14 +228,6 @@ namespace {
void run();
- /// properlyDominates - Return true if I1 properly dominates I2.
- ///
- bool properlyDominates(Instruction *I1, Instruction *I2) const {
- if (InvokeInst *II = dyn_cast<InvokeInst>(I1))
- I1 = II->getNormalDest()->begin();
- return DT.properlyDominates(I1->getParent(), I2->getParent());
- }
-
/// dominates - Return true if BB1 dominates BB2 using the DominatorTree.
///
bool dominates(BasicBlock *BB1, BasicBlock *BB2) const {
@@ -603,9 +597,8 @@ ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
// To determine liveness, we must iterate through the predecessors of blocks
// where the def is live. Blocks are added to the worklist if we need to
// check their predecessors. Start with all the using blocks.
- SmallVector<BasicBlock*, 64> LiveInBlockWorklist;
- LiveInBlockWorklist.insert(LiveInBlockWorklist.end(),
- Info.UsingBlocks.begin(), Info.UsingBlocks.end());
+ SmallVector<BasicBlock*, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
+ Info.UsingBlocks.end());
// If any of the using blocks is also a definition block, check to see if the
// definition occurs before or after the use. If it happens before the use,
@@ -886,7 +879,7 @@ void PromoteMem2Reg::PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info,
void PromoteMem2Reg::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
StoreInst *SI) {
DIVariable DIVar(DDI->getVariable());
- if (!DIVar.getNode())
+ if (!DIVar.Verify())
return;
if (!DIF)
@@ -895,8 +888,12 @@ void PromoteMem2Reg::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
DIVar, SI);
// Propagate any debug metadata from the store onto the dbg.value.
- if (MDNode *SIMD = SI->getMetadata("dbg"))
- DbgVal->setMetadata("dbg", SIMD);
+ DebugLoc SIDL = SI->getDebugLoc();
+ if (!SIDL.isUnknown())
+ DbgVal->setDebugLoc(SIDL);
+ // Otherwise propagate debug metadata from dbg.declare.
+ else
+ DbgVal->setDebugLoc(DDI->getDebugLoc());
}
// QueuePhiNode - queues a phi-node to be added to a basic-block for a specific
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/SSAUpdater.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/SSAUpdater.cpp
index a31235a..c855988 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/SSAUpdater.cpp
@@ -11,49 +11,39 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Transforms/Utils/SSAUpdater.h"
+#define DEBUG_TYPE "ssaupdater"
#include "llvm/Instructions.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
+#include "llvm/Transforms/Utils/SSAUpdaterImpl.h"
using namespace llvm;
-typedef DenseMap<BasicBlock*, TrackingVH<Value> > AvailableValsTy;
-typedef std::vector<std::pair<BasicBlock*, TrackingVH<Value> > >
- IncomingPredInfoTy;
-
+typedef DenseMap<BasicBlock*, Value*> AvailableValsTy;
static AvailableValsTy &getAvailableVals(void *AV) {
return *static_cast<AvailableValsTy*>(AV);
}
-static IncomingPredInfoTy &getIncomingPredInfo(void *IPI) {
- return *static_cast<IncomingPredInfoTy*>(IPI);
-}
-
-
SSAUpdater::SSAUpdater(SmallVectorImpl<PHINode*> *NewPHI)
- : AV(0), PrototypeValue(0), IPI(0), InsertedPHIs(NewPHI) {}
+ : AV(0), ProtoType(0), ProtoName(), InsertedPHIs(NewPHI) {}
SSAUpdater::~SSAUpdater() {
delete &getAvailableVals(AV);
- delete &getIncomingPredInfo(IPI);
}
/// Initialize - Reset this object to get ready for a new set of SSA
-/// updates. ProtoValue is the value used to name PHI nodes.
-void SSAUpdater::Initialize(Value *ProtoValue) {
+/// updates with type 'Ty'. PHI nodes get a name based on 'Name'.
+void SSAUpdater::Initialize(const Type *Ty, StringRef Name) {
if (AV == 0)
AV = new AvailableValsTy();
else
getAvailableVals(AV).clear();
-
- if (IPI == 0)
- IPI = new IncomingPredInfoTy();
- else
- getIncomingPredInfo(IPI).clear();
- PrototypeValue = ProtoValue;
+ ProtoType = Ty;
+ ProtoName = Name;
}
/// HasValueForBlock - Return true if the SSAUpdater already has a value for
@@ -65,15 +55,15 @@ bool SSAUpdater::HasValueForBlock(BasicBlock *BB) const {
/// AddAvailableValue - Indicate that a rewritten value is available in the
/// specified block with the specified value.
void SSAUpdater::AddAvailableValue(BasicBlock *BB, Value *V) {
- assert(PrototypeValue != 0 && "Need to initialize SSAUpdater");
- assert(PrototypeValue->getType() == V->getType() &&
+ assert(ProtoType != 0 && "Need to initialize SSAUpdater");
+ assert(ProtoType == V->getType() &&
"All rewritten values must have the same type");
getAvailableVals(AV)[BB] = V;
}
/// IsEquivalentPHI - Check if PHI has the same incoming value as specified
/// in ValueMapping for each predecessor block.
-static bool IsEquivalentPHI(PHINode *PHI,
+static bool IsEquivalentPHI(PHINode *PHI,
DenseMap<BasicBlock*, Value*> &ValueMapping) {
unsigned PHINumValues = PHI->getNumIncomingValues();
if (PHINumValues != ValueMapping.size())
@@ -89,38 +79,10 @@ static bool IsEquivalentPHI(PHINode *PHI,
return true;
}
-/// GetExistingPHI - Check if BB already contains a phi node that is equivalent
-/// to the specified mapping from predecessor blocks to incoming values.
-static Value *GetExistingPHI(BasicBlock *BB,
- DenseMap<BasicBlock*, Value*> &ValueMapping) {
- PHINode *SomePHI;
- for (BasicBlock::iterator It = BB->begin();
- (SomePHI = dyn_cast<PHINode>(It)); ++It) {
- if (IsEquivalentPHI(SomePHI, ValueMapping))
- return SomePHI;
- }
- return 0;
-}
-
-/// GetExistingPHI - Check if BB already contains an equivalent phi node.
-/// The InputIt type must be an iterator over std::pair<BasicBlock*, Value*>
-/// objects that specify the mapping from predecessor blocks to incoming values.
-template<typename InputIt>
-static Value *GetExistingPHI(BasicBlock *BB, const InputIt &I,
- const InputIt &E) {
- // Avoid create the mapping if BB has no phi nodes at all.
- if (!isa<PHINode>(BB->begin()))
- return 0;
- DenseMap<BasicBlock*, Value*> ValueMapping(I, E);
- return GetExistingPHI(BB, ValueMapping);
-}
-
/// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
/// live at the end of the specified block.
Value *SSAUpdater::GetValueAtEndOfBlock(BasicBlock *BB) {
- assert(getIncomingPredInfo(IPI).empty() && "Unexpected Internal State");
Value *Res = GetValueAtEndOfBlockInternal(BB);
- assert(getIncomingPredInfo(IPI).empty() && "Unexpected Internal State");
return Res;
}
@@ -146,7 +108,7 @@ Value *SSAUpdater::GetValueAtEndOfBlock(BasicBlock *BB) {
Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
// If there is no definition of the renamed variable in this block, just use
// GetValueAtEndOfBlock to do our work.
- if (!getAvailableVals(AV).count(BB))
+ if (!HasValueForBlock(BB))
return GetValueAtEndOfBlock(BB);
// Otherwise, we have the hard case. Get the live-in values for each
@@ -187,21 +149,27 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
// If there are no predecessors, just return undef.
if (PredValues.empty())
- return UndefValue::get(PrototypeValue->getType());
+ return UndefValue::get(ProtoType);
// Otherwise, if all the merged values are the same, just use it.
if (SingularValue != 0)
return SingularValue;
- // Otherwise, we do need a PHI.
- if (Value *ExistingPHI = GetExistingPHI(BB, PredValues.begin(),
- PredValues.end()))
- return ExistingPHI;
+ // Otherwise, we do need a PHI: check to see if we already have one available
+ // in this block that produces the right value.
+ if (isa<PHINode>(BB->begin())) {
+ DenseMap<BasicBlock*, Value*> ValueMapping(PredValues.begin(),
+ PredValues.end());
+ PHINode *SomePHI;
+ for (BasicBlock::iterator It = BB->begin();
+ (SomePHI = dyn_cast<PHINode>(It)); ++It) {
+ if (IsEquivalentPHI(SomePHI, ValueMapping))
+ return SomePHI;
+ }
+ }
// Ok, we have no way out, insert a new one now.
- PHINode *InsertedPHI = PHINode::Create(PrototypeValue->getType(),
- PrototypeValue->getName(),
- &BB->front());
+ PHINode *InsertedPHI = PHINode::Create(ProtoType, ProtoName, &BB->front());
InsertedPHI->reserveOperandSpace(PredValues.size());
// Fill in all the predecessors of the PHI.
@@ -226,7 +194,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
/// which use their value in the corresponding predecessor.
void SSAUpdater::RewriteUse(Use &U) {
Instruction *User = cast<Instruction>(U.getUser());
-
+
Value *V;
if (PHINode *UserPN = dyn_cast<PHINode>(User))
V = GetValueAtEndOfBlock(UserPN->getIncomingBlock(U));
@@ -236,161 +204,141 @@ void SSAUpdater::RewriteUse(Use &U) {
U.set(V);
}
+/// RewriteUseAfterInsertions - Rewrite a use, just like RewriteUse. However,
+/// this version of the method can rewrite uses in the same block as a
+/// definition, because it assumes that all uses of a value are below any
+/// inserted values.
+void SSAUpdater::RewriteUseAfterInsertions(Use &U) {
+ Instruction *User = cast<Instruction>(U.getUser());
+
+ Value *V;
+ if (PHINode *UserPN = dyn_cast<PHINode>(User))
+ V = GetValueAtEndOfBlock(UserPN->getIncomingBlock(U));
+ else
+ V = GetValueAtEndOfBlock(User->getParent());
+
+ U.set(V);
+}
-/// GetValueAtEndOfBlockInternal - Check to see if AvailableVals has an entry
-/// for the specified BB and if so, return it. If not, construct SSA form by
-/// walking predecessors inserting PHI nodes as needed until we get to a block
-/// where the value is available.
-///
-Value *SSAUpdater::GetValueAtEndOfBlockInternal(BasicBlock *BB) {
- AvailableValsTy &AvailableVals = getAvailableVals(AV);
+/// PHIiter - Iterator for PHI operands. This is used for the PHI_iterator
+/// in the SSAUpdaterImpl template.
+namespace {
+ class PHIiter {
+ private:
+ PHINode *PHI;
+ unsigned idx;
+
+ public:
+ explicit PHIiter(PHINode *P) // begin iterator
+ : PHI(P), idx(0) {}
+ PHIiter(PHINode *P, bool) // end iterator
+ : PHI(P), idx(PHI->getNumIncomingValues()) {}
+
+ PHIiter &operator++() { ++idx; return *this; }
+ bool operator==(const PHIiter& x) const { return idx == x.idx; }
+ bool operator!=(const PHIiter& x) const { return !operator==(x); }
+ Value *getIncomingValue() { return PHI->getIncomingValue(idx); }
+ BasicBlock *getIncomingBlock() { return PHI->getIncomingBlock(idx); }
+ };
+}
- // Query AvailableVals by doing an insertion of null.
- std::pair<AvailableValsTy::iterator, bool> InsertRes =
- AvailableVals.insert(std::make_pair(BB, TrackingVH<Value>()));
-
- // Handle the case when the insertion fails because we have already seen BB.
- if (!InsertRes.second) {
- // If the insertion failed, there are two cases. The first case is that the
- // value is already available for the specified block. If we get this, just
- // return the value.
- if (InsertRes.first->second != 0)
- return InsertRes.first->second;
-
- // Otherwise, if the value we find is null, then this is the value is not
- // known but it is being computed elsewhere in our recursion. This means
- // that we have a cycle. Handle this by inserting a PHI node and returning
- // it. When we get back to the first instance of the recursion we will fill
- // in the PHI node.
- return InsertRes.first->second =
- PHINode::Create(PrototypeValue->getType(), PrototypeValue->getName(),
- &BB->front());
+/// SSAUpdaterTraits<SSAUpdater> - Traits for the SSAUpdaterImpl template,
+/// specialized for SSAUpdater.
+namespace llvm {
+template<>
+class SSAUpdaterTraits<SSAUpdater> {
+public:
+ typedef BasicBlock BlkT;
+ typedef Value *ValT;
+ typedef PHINode PhiT;
+
+ typedef succ_iterator BlkSucc_iterator;
+ static BlkSucc_iterator BlkSucc_begin(BlkT *BB) { return succ_begin(BB); }
+ static BlkSucc_iterator BlkSucc_end(BlkT *BB) { return succ_end(BB); }
+
+ typedef PHIiter PHI_iterator;
+ static inline PHI_iterator PHI_begin(PhiT *PHI) { return PHI_iterator(PHI); }
+ static inline PHI_iterator PHI_end(PhiT *PHI) {
+ return PHI_iterator(PHI, true);
}
- // Okay, the value isn't in the map and we just inserted a null in the entry
- // to indicate that we're processing the block. Since we have no idea what
- // value is in this block, we have to recurse through our predecessors.
- //
- // While we're walking our predecessors, we keep track of them in a vector,
- // then insert a PHI node in the end if we actually need one. We could use a
- // smallvector here, but that would take a lot of stack space for every level
- // of the recursion, just use IncomingPredInfo as an explicit stack.
- IncomingPredInfoTy &IncomingPredInfo = getIncomingPredInfo(IPI);
- unsigned FirstPredInfoEntry = IncomingPredInfo.size();
-
- // As we're walking the predecessors, keep track of whether they are all
- // producing the same value. If so, this value will capture it, if not, it
- // will get reset to null. We distinguish the no-predecessor case explicitly
- // below.
- TrackingVH<Value> ExistingValue;
-
- // We can get our predecessor info by walking the pred_iterator list, but it
- // is relatively slow. If we already have PHI nodes in this block, walk one
- // of them to get the predecessor list instead.
- if (PHINode *SomePhi = dyn_cast<PHINode>(BB->begin())) {
- for (unsigned i = 0, e = SomePhi->getNumIncomingValues(); i != e; ++i) {
- BasicBlock *PredBB = SomePhi->getIncomingBlock(i);
- Value *PredVal = GetValueAtEndOfBlockInternal(PredBB);
- IncomingPredInfo.push_back(std::make_pair(PredBB, PredVal));
-
- // Set ExistingValue to singular value from all predecessors so far.
- if (i == 0)
- ExistingValue = PredVal;
- else if (PredVal != ExistingValue)
- ExistingValue = 0;
- }
- } else {
- bool isFirstPred = true;
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
- BasicBlock *PredBB = *PI;
- Value *PredVal = GetValueAtEndOfBlockInternal(PredBB);
- IncomingPredInfo.push_back(std::make_pair(PredBB, PredVal));
-
- // Set ExistingValue to singular value from all predecessors so far.
- if (isFirstPred) {
- ExistingValue = PredVal;
- isFirstPred = false;
- } else if (PredVal != ExistingValue)
- ExistingValue = 0;
+ /// FindPredecessorBlocks - Put the predecessors of Info->BB into the Preds
+ /// vector, set Info->NumPreds, and allocate space in Info->Preds.
+ static void FindPredecessorBlocks(BasicBlock *BB,
+ SmallVectorImpl<BasicBlock*> *Preds) {
+ // We can get our predecessor info by walking the pred_iterator list,
+ // but it is relatively slow. If we already have PHI nodes in this
+ // block, walk one of them to get the predecessor list instead.
+ if (PHINode *SomePhi = dyn_cast<PHINode>(BB->begin())) {
+ for (unsigned PI = 0, E = SomePhi->getNumIncomingValues(); PI != E; ++PI)
+ Preds->push_back(SomePhi->getIncomingBlock(PI));
+ } else {
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
+ Preds->push_back(*PI);
}
}
- // If there are no predecessors, then we must have found an unreachable block
- // just return 'undef'. Since there are no predecessors, InsertRes must not
- // be invalidated.
- if (IncomingPredInfo.size() == FirstPredInfoEntry)
- return InsertRes.first->second = UndefValue::get(PrototypeValue->getType());
-
- /// Look up BB's entry in AvailableVals. 'InsertRes' may be invalidated. If
- /// this block is involved in a loop, a no-entry PHI node will have been
- /// inserted as InsertedVal. Otherwise, we'll still have the null we inserted
- /// above.
- TrackingVH<Value> &InsertedVal = AvailableVals[BB];
-
- // If the predecessor values are not all the same, then check to see if there
- // is an existing PHI that can be used.
- if (!ExistingValue)
- ExistingValue = GetExistingPHI(BB,
- IncomingPredInfo.begin()+FirstPredInfoEntry,
- IncomingPredInfo.end());
-
- // If there is an existing value we can use, then we don't need to insert a
- // PHI. This is the simple and common case.
- if (ExistingValue) {
- // If a PHI node got inserted, replace it with the existing value and delete
- // it.
- if (InsertedVal) {
- PHINode *OldVal = cast<PHINode>(InsertedVal);
- // Be careful about dead loops. These RAUW's also update InsertedVal.
- if (InsertedVal != ExistingValue)
- OldVal->replaceAllUsesWith(ExistingValue);
- else
- OldVal->replaceAllUsesWith(UndefValue::get(InsertedVal->getType()));
- OldVal->eraseFromParent();
- } else {
- InsertedVal = ExistingValue;
- }
+ /// GetUndefVal - Get an undefined value of the same type as the value
+ /// being handled.
+ static Value *GetUndefVal(BasicBlock *BB, SSAUpdater *Updater) {
+ return UndefValue::get(Updater->ProtoType);
+ }
- // Either path through the 'if' should have set InsertedVal -> ExistingVal.
- assert((InsertedVal == ExistingValue || isa<UndefValue>(InsertedVal)) &&
- "RAUW didn't change InsertedVal to be ExistingValue");
+ /// CreateEmptyPHI - Create a new PHI instruction in the specified block.
+ /// Reserve space for the operands but do not fill them in yet.
+ static Value *CreateEmptyPHI(BasicBlock *BB, unsigned NumPreds,
+ SSAUpdater *Updater) {
+ PHINode *PHI = PHINode::Create(Updater->ProtoType, Updater->ProtoName,
+ &BB->front());
+ PHI->reserveOperandSpace(NumPreds);
+ return PHI;
+ }
- // Drop the entries we added in IncomingPredInfo to restore the stack.
- IncomingPredInfo.erase(IncomingPredInfo.begin()+FirstPredInfoEntry,
- IncomingPredInfo.end());
- return ExistingValue;
+ /// AddPHIOperand - Add the specified value as an operand of the PHI for
+ /// the specified predecessor block.
+ static void AddPHIOperand(PHINode *PHI, Value *Val, BasicBlock *Pred) {
+ PHI->addIncoming(Val, Pred);
}
- // Otherwise, we do need a PHI: insert one now if we don't already have one.
- if (InsertedVal == 0)
- InsertedVal = PHINode::Create(PrototypeValue->getType(),
- PrototypeValue->getName(), &BB->front());
+ /// InstrIsPHI - Check if an instruction is a PHI.
+ ///
+ static PHINode *InstrIsPHI(Instruction *I) {
+ return dyn_cast<PHINode>(I);
+ }
- PHINode *InsertedPHI = cast<PHINode>(InsertedVal);
- InsertedPHI->reserveOperandSpace(IncomingPredInfo.size()-FirstPredInfoEntry);
+ /// ValueIsPHI - Check if a value is a PHI.
+ ///
+ static PHINode *ValueIsPHI(Value *Val, SSAUpdater *Updater) {
+ return dyn_cast<PHINode>(Val);
+ }
- // Fill in all the predecessors of the PHI.
- for (IncomingPredInfoTy::iterator I =
- IncomingPredInfo.begin()+FirstPredInfoEntry,
- E = IncomingPredInfo.end(); I != E; ++I)
- InsertedPHI->addIncoming(I->second, I->first);
+ /// ValueIsNewPHI - Like ValueIsPHI but also check if the PHI has no source
+ /// operands, i.e., it was just added.
+ static PHINode *ValueIsNewPHI(Value *Val, SSAUpdater *Updater) {
+ PHINode *PHI = ValueIsPHI(Val, Updater);
+ if (PHI && PHI->getNumIncomingValues() == 0)
+ return PHI;
+ return 0;
+ }
- // Drop the entries we added in IncomingPredInfo to restore the stack.
- IncomingPredInfo.erase(IncomingPredInfo.begin()+FirstPredInfoEntry,
- IncomingPredInfo.end());
+ /// GetPHIValue - For the specified PHI instruction, return the value
+ /// that it defines.
+ static Value *GetPHIValue(PHINode *PHI) {
+ return PHI;
+ }
+};
- // See if the PHI node can be merged to a single value. This can happen in
- // loop cases when we get a PHI of itself and one other value.
- if (Value *ConstVal = InsertedPHI->hasConstantValue()) {
- InsertedPHI->replaceAllUsesWith(ConstVal);
- InsertedPHI->eraseFromParent();
- InsertedVal = ConstVal;
- } else {
- DEBUG(dbgs() << " Inserted PHI: " << *InsertedPHI << "\n");
+} // End llvm namespace
- // If the client wants to know about all new instructions, tell it.
- if (InsertedPHIs) InsertedPHIs->push_back(InsertedPHI);
- }
+/// GetValueAtEndOfBlockInternal - Check to see if AvailableVals has an entry
+/// for the specified BB and if so, return it. If not, construct SSA form by
+/// first calculating the required placement of PHIs and then inserting new
+/// PHIs where needed.
+Value *SSAUpdater::GetValueAtEndOfBlockInternal(BasicBlock *BB) {
+ AvailableValsTy &AvailableVals = getAvailableVals(AV);
+ if (Value *V = AvailableVals[BB])
+ return V;
- return InsertedVal;
+ SSAUpdaterImpl<SSAUpdater> Impl(this, &AvailableVals, InsertedPHIs);
+ return Impl.GetValue(BB);
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/SSI.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/SSI.cpp
deleted file mode 100644
index 4e813dd..0000000
--- a/libclamav/c++/llvm/lib/Transforms/Utils/SSI.cpp
+++ /dev/null
@@ -1,432 +0,0 @@
-//===------------------- SSI.cpp - Creates SSI Representation -------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass converts a list of variables to the Static Single Information
-// form. This is a program representation described by Scott Ananian in his
-// Master Thesis: "The Static Single Information Form (1999)".
-// We are building an on-demand representation, that is, we do not convert
-// every single variable in the target function to SSI form. Rather, we receive
-// a list of target variables that must be converted. We also do not
-// completely convert a target variable to the SSI format. Instead, we only
-// change the variable in the points where new information can be attached
-// to its live range, that is, at branch points.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "ssi"
-
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/Transforms/Utils/SSI.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/Dominators.h"
-
-using namespace llvm;
-
-static const std::string SSI_PHI = "SSI_phi";
-static const std::string SSI_SIG = "SSI_sigma";
-
-STATISTIC(NumSigmaInserted, "Number of sigma functions inserted");
-STATISTIC(NumPhiInserted, "Number of phi functions inserted");
-
-void SSI::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequiredTransitive<DominanceFrontier>();
- AU.addRequiredTransitive<DominatorTree>();
- AU.setPreservesAll();
-}
-
-bool SSI::runOnFunction(Function &F) {
- DT_ = &getAnalysis<DominatorTree>();
- return false;
-}
-
-/// This methods creates the SSI representation for the list of values
-/// received. It will only create SSI representation if a value is used
-/// to decide a branch. Repeated values are created only once.
-///
-void SSI::createSSI(SmallVectorImpl<Instruction *> &value) {
- init(value);
-
- SmallPtrSet<Instruction*, 4> needConstruction;
- for (SmallVectorImpl<Instruction*>::iterator I = value.begin(),
- E = value.end(); I != E; ++I)
- if (created.insert(*I))
- needConstruction.insert(*I);
-
- insertSigmaFunctions(needConstruction);
-
- // Test if there is a need to transform to SSI
- if (!needConstruction.empty()) {
- insertPhiFunctions(needConstruction);
- renameInit(needConstruction);
- rename(DT_->getRoot());
- fixPhis();
- }
-
- clean();
-}
-
-/// Insert sigma functions (a sigma function is a phi function with one
-/// operator)
-///
-void SSI::insertSigmaFunctions(SmallPtrSet<Instruction*, 4> &value) {
- for (SmallPtrSet<Instruction*, 4>::iterator I = value.begin(),
- E = value.end(); I != E; ++I) {
- for (Value::use_iterator begin = (*I)->use_begin(),
- end = (*I)->use_end(); begin != end; ++begin) {
- // Test if the Use of the Value is in a comparator
- if (CmpInst *CI = dyn_cast<CmpInst>(begin)) {
- // Iterates through all uses of CmpInst
- for (Value::use_iterator begin_ci = CI->use_begin(),
- end_ci = CI->use_end(); begin_ci != end_ci; ++begin_ci) {
- // Test if any use of CmpInst is in a Terminator
- if (TerminatorInst *TI = dyn_cast<TerminatorInst>(begin_ci)) {
- insertSigma(TI, *I);
- }
- }
- }
- }
- }
-}
-
-/// Inserts Sigma Functions in every BasicBlock successor to Terminator
-/// Instruction TI. All inserted Sigma Function are related to Instruction I.
-///
-void SSI::insertSigma(TerminatorInst *TI, Instruction *I) {
- // Basic Block of the Terminator Instruction
- BasicBlock *BB = TI->getParent();
- for (unsigned i = 0, e = TI->getNumSuccessors(); i < e; ++i) {
- // Next Basic Block
- BasicBlock *BB_next = TI->getSuccessor(i);
- if (BB_next != BB &&
- BB_next->getSinglePredecessor() != NULL &&
- dominateAny(BB_next, I)) {
- PHINode *PN = PHINode::Create(I->getType(), SSI_SIG, BB_next->begin());
- PN->addIncoming(I, BB);
- sigmas[PN] = I;
- created.insert(PN);
- defsites[I].push_back(BB_next);
- ++NumSigmaInserted;
- }
- }
-}
-
-/// Insert phi functions when necessary
-///
-void SSI::insertPhiFunctions(SmallPtrSet<Instruction*, 4> &value) {
- DominanceFrontier *DF = &getAnalysis<DominanceFrontier>();
- for (SmallPtrSet<Instruction*, 4>::iterator I = value.begin(),
- E = value.end(); I != E; ++I) {
- // Test if there were any sigmas for this variable
- SmallPtrSet<BasicBlock *, 16> BB_visited;
-
- // Insert phi functions if there is any sigma function
- while (!defsites[*I].empty()) {
-
- BasicBlock *BB = defsites[*I].back();
-
- defsites[*I].pop_back();
- DominanceFrontier::iterator DF_BB = DF->find(BB);
-
- // The BB is unreachable. Skip it.
- if (DF_BB == DF->end())
- continue;
-
- // Iterates through all the dominance frontier of BB
- for (std::set<BasicBlock *>::iterator DF_BB_begin =
- DF_BB->second.begin(), DF_BB_end = DF_BB->second.end();
- DF_BB_begin != DF_BB_end; ++DF_BB_begin) {
- BasicBlock *BB_dominated = *DF_BB_begin;
-
- // Test if has not yet visited this node and if the
- // original definition dominates this node
- if (BB_visited.insert(BB_dominated) &&
- DT_->properlyDominates(value_original[*I], BB_dominated) &&
- dominateAny(BB_dominated, *I)) {
- PHINode *PN = PHINode::Create(
- (*I)->getType(), SSI_PHI, BB_dominated->begin());
- phis.insert(std::make_pair(PN, *I));
- created.insert(PN);
-
- defsites[*I].push_back(BB_dominated);
- ++NumPhiInserted;
- }
- }
- }
- BB_visited.clear();
- }
-}
-
-/// Some initialization for the rename part
-///
-void SSI::renameInit(SmallPtrSet<Instruction*, 4> &value) {
- for (SmallPtrSet<Instruction*, 4>::iterator I = value.begin(),
- E = value.end(); I != E; ++I)
- value_stack[*I].push_back(*I);
-}
-
-/// Renames all variables in the specified BasicBlock.
-/// Only variables that need to be rename will be.
-///
-void SSI::rename(BasicBlock *BB) {
- SmallPtrSet<Instruction*, 8> defined;
-
- // Iterate through instructions and make appropriate renaming.
- // For SSI_PHI (b = PHI()), store b at value_stack as a new
- // definition of the variable it represents.
- // For SSI_SIG (b = PHI(a)), substitute a with the current
- // value of a, present in the value_stack.
- // Then store bin the value_stack as the new definition of a.
- // For all other instructions (b = OP(a, c, d, ...)), we need to substitute
- // all operands with its current value, present in value_stack.
- for (BasicBlock::iterator begin = BB->begin(), end = BB->end();
- begin != end; ++begin) {
- Instruction *I = begin;
- if (PHINode *PN = dyn_cast<PHINode>(I)) { // Treat PHI functions
- Instruction* position;
-
- // Treat SSI_PHI
- if ((position = getPositionPhi(PN))) {
- value_stack[position].push_back(PN);
- defined.insert(position);
- // Treat SSI_SIG
- } else if ((position = getPositionSigma(PN))) {
- substituteUse(I);
- value_stack[position].push_back(PN);
- defined.insert(position);
- }
-
- // Treat all other PHI functions
- else {
- substituteUse(I);
- }
- }
-
- // Treat all other functions
- else {
- substituteUse(I);
- }
- }
-
- // This loop iterates in all BasicBlocks that are successors of the current
- // BasicBlock. For each SSI_PHI instruction found, insert an operand.
- // This operand is the current operand in value_stack for the variable
- // in "position". And the BasicBlock this operand represents is the current
- // BasicBlock.
- for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI) {
- BasicBlock *BB_succ = *SI;
-
- for (BasicBlock::iterator begin = BB_succ->begin(),
- notPhi = BB_succ->getFirstNonPHI(); begin != *notPhi; ++begin) {
- Instruction *I = begin;
- PHINode *PN = dyn_cast<PHINode>(I);
- Instruction* position;
- if (PN && ((position = getPositionPhi(PN)))) {
- PN->addIncoming(value_stack[position].back(), BB);
- }
- }
- }
-
- // This loop calls rename on all children from this block. This time children
- // refers to a successor block in the dominance tree.
- DomTreeNode *DTN = DT_->getNode(BB);
- for (DomTreeNode::iterator begin = DTN->begin(), end = DTN->end();
- begin != end; ++begin) {
- DomTreeNodeBase<BasicBlock> *DTN_children = *begin;
- BasicBlock *BB_children = DTN_children->getBlock();
- rename(BB_children);
- }
-
- // Now we remove all inserted definitions of a variable from the top of
- // the stack leaving the previous one as the top.
- for (SmallPtrSet<Instruction*, 8>::iterator DI = defined.begin(),
- DE = defined.end(); DI != DE; ++DI)
- value_stack[*DI].pop_back();
-}
-
-/// Substitute any use in this instruction for the last definition of
-/// the variable
-///
-void SSI::substituteUse(Instruction *I) {
- for (unsigned i = 0, e = I->getNumOperands(); i < e; ++i) {
- Value *operand = I->getOperand(i);
- for (DenseMap<Instruction*, SmallVector<Instruction*, 1> >::iterator
- VI = value_stack.begin(), VE = value_stack.end(); VI != VE; ++VI) {
- if (operand == VI->second.front() &&
- I != VI->second.back()) {
- PHINode *PN_I = dyn_cast<PHINode>(I);
- PHINode *PN_vs = dyn_cast<PHINode>(VI->second.back());
-
- // If a phi created in a BasicBlock is used as an operand of another
- // created in the same BasicBlock, this step marks this second phi,
- // to fix this issue later. It cannot be fixed now, because the
- // operands of the first phi are not final yet.
- if (PN_I && PN_vs &&
- VI->second.back()->getParent() == I->getParent()) {
-
- phisToFix.insert(PN_I);
- }
-
- I->setOperand(i, VI->second.back());
- break;
- }
- }
- }
-}
-
-/// Test if the BasicBlock BB dominates any use or definition of value.
-/// If it dominates a phi instruction that is on the same BasicBlock,
-/// that does not count.
-///
-bool SSI::dominateAny(BasicBlock *BB, Instruction *value) {
- for (Value::use_iterator begin = value->use_begin(),
- end = value->use_end(); begin != end; ++begin) {
- Instruction *I = cast<Instruction>(*begin);
- BasicBlock *BB_father = I->getParent();
- if (BB == BB_father && isa<PHINode>(I))
- continue;
- if (DT_->dominates(BB, BB_father)) {
- return true;
- }
- }
- return false;
-}
-
-/// When there is a phi node that is created in a BasicBlock and it is used
-/// as an operand of another phi function used in the same BasicBlock,
-/// LLVM looks this as an error. So on the second phi, the first phi is called
-/// P and the BasicBlock it incomes is B. This P will be replaced by the value
-/// it has for BasicBlock B. It also includes undef values for predecessors
-/// that were not included in the phi.
-///
-void SSI::fixPhis() {
- for (SmallPtrSet<PHINode *, 1>::iterator begin = phisToFix.begin(),
- end = phisToFix.end(); begin != end; ++begin) {
- PHINode *PN = *begin;
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) {
- PHINode *PN_father = dyn_cast<PHINode>(PN->getIncomingValue(i));
- if (PN_father && PN->getParent() == PN_father->getParent() &&
- !DT_->dominates(PN->getParent(), PN->getIncomingBlock(i))) {
- BasicBlock *BB = PN->getIncomingBlock(i);
- int pos = PN_father->getBasicBlockIndex(BB);
- PN->setIncomingValue(i, PN_father->getIncomingValue(pos));
- }
- }
- }
-
- for (DenseMapIterator<PHINode *, Instruction*> begin = phis.begin(),
- end = phis.end(); begin != end; ++begin) {
- PHINode *PN = begin->first;
- BasicBlock *BB = PN->getParent();
- pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
- SmallVector<BasicBlock*, 8> Preds(PI, PE);
- for (unsigned size = Preds.size();
- PI != PE && PN->getNumIncomingValues() != size; ++PI) {
- bool found = false;
- for (unsigned i = 0, pn_end = PN->getNumIncomingValues();
- i < pn_end; ++i) {
- if (PN->getIncomingBlock(i) == *PI) {
- found = true;
- break;
- }
- }
- if (!found) {
- PN->addIncoming(UndefValue::get(PN->getType()), *PI);
- }
- }
- }
-}
-
-/// Return which variable (position on the vector of variables) this phi
-/// represents on the phis list.
-///
-Instruction* SSI::getPositionPhi(PHINode *PN) {
- DenseMap<PHINode *, Instruction*>::iterator val = phis.find(PN);
- if (val == phis.end())
- return 0;
- else
- return val->second;
-}
-
-/// Return which variable (position on the vector of variables) this phi
-/// represents on the sigmas list.
-///
-Instruction* SSI::getPositionSigma(PHINode *PN) {
- DenseMap<PHINode *, Instruction*>::iterator val = sigmas.find(PN);
- if (val == sigmas.end())
- return 0;
- else
- return val->second;
-}
-
-/// Initializes
-///
-void SSI::init(SmallVectorImpl<Instruction *> &value) {
- for (SmallVectorImpl<Instruction *>::iterator I = value.begin(),
- E = value.end(); I != E; ++I) {
- value_original[*I] = (*I)->getParent();
- defsites[*I].push_back((*I)->getParent());
- }
-}
-
-/// Clean all used resources in this creation of SSI
-///
-void SSI::clean() {
- phis.clear();
- sigmas.clear();
- phisToFix.clear();
-
- defsites.clear();
- value_stack.clear();
- value_original.clear();
-}
-
-/// createSSIPass - The public interface to this file...
-///
-FunctionPass *llvm::createSSIPass() { return new SSI(); }
-
-char SSI::ID = 0;
-static RegisterPass<SSI> X("ssi", "Static Single Information Construction");
-
-/// SSIEverything - A pass that runs createSSI on every non-void variable,
-/// intended for debugging.
-namespace {
- struct SSIEverything : public FunctionPass {
- static char ID; // Pass identification, replacement for typeid
- SSIEverything() : FunctionPass(&ID) {}
-
- bool runOnFunction(Function &F);
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<SSI>();
- }
- };
-}
-
-bool SSIEverything::runOnFunction(Function &F) {
- SmallVector<Instruction *, 16> Insts;
- SSI &ssi = getAnalysis<SSI>();
-
- if (F.isDeclaration() || F.isIntrinsic()) return false;
-
- for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B)
- for (BasicBlock::iterator I = B->begin(), E = B->end(); I != E; ++I)
- if (!I->getType()->isVoidTy())
- Insts.push_back(I);
-
- ssi.createSSI(Insts);
- return true;
-}
-
-/// createSSIEverythingPass - The public interface to this file...
-///
-FunctionPass *llvm::createSSIEverythingPass() { return new SSIEverything(); }
-
-char SSIEverything::ID = 0;
-static RegisterPass<SSIEverything>
-Y("ssi-everything", "Static Single Information Construction");
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index f343c38..28d7afb 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -224,7 +224,7 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
if (BI->isUnconditional() && BI->getSuccessor(0) == BB) {
if (!AggressiveInsts) return false;
// Okay, it looks like the instruction IS in the "condition". Check to
- // see if its a cheap instruction to unconditionally compute, and if it
+ // see if it's a cheap instruction to unconditionally compute, and if it
// only uses stuff defined outside of the condition. If so, hoist it out.
if (!I->isSafeToSpeculativelyExecute())
return false;
@@ -949,7 +949,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) {
UI != E; ++UI) {
// Ignore any user that is not a PHI node in BB2. These can only occur in
// unreachable blocks, because they would not be dominated by the instr.
- PHINode *PN = dyn_cast<PHINode>(UI);
+ PHINode *PN = dyn_cast<PHINode>(*UI);
if (!PN || PN->getParent() != BB2)
return false;
PHIUses.push_back(PN);
@@ -1377,8 +1377,9 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI) {
bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
BasicBlock *BB = BI->getParent();
Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
- if (Cond == 0) return false;
-
+ if (Cond == 0 || (!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
+ Cond->getParent() != BB || !Cond->hasOneUse())
+ return false;
// Only allow this if the condition is a simple instruction that can be
// executed unconditionally. It must be in the same block as the branch, and
@@ -1387,11 +1388,23 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Ignore dbg intrinsics.
while(isa<DbgInfoIntrinsic>(FrontIt))
++FrontIt;
- if ((!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
- Cond->getParent() != BB || &*FrontIt != Cond || !Cond->hasOneUse()) {
- return false;
+
+ // Allow a single instruction to be hoisted in addition to the compare
+ // that feeds the branch. We later ensure that any values that _it_ uses
+ // were also live in the predecessor, so that we don't unnecessarily create
+ // register pressure or inhibit out-of-order execution.
+ Instruction *BonusInst = 0;
+ if (&*FrontIt != Cond &&
+ FrontIt->hasOneUse() && *FrontIt->use_begin() == Cond &&
+ FrontIt->isSafeToSpeculativelyExecute()) {
+ BonusInst = &*FrontIt;
+ ++FrontIt;
}
+ // Only a single bonus inst is allowed.
+ if (&*FrontIt != Cond)
+ return false;
+
// Make sure the instruction after the condition is the cond branch.
BasicBlock::iterator CondIt = Cond; ++CondIt;
// Ingore dbg intrinsics.
@@ -1429,6 +1442,44 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
!SafeToMergeTerminators(BI, PBI))
continue;
+ // Ensure that any values used in the bonus instruction are also used
+ // by the terminator of the predecessor. This means that those values
+ // must already have been resolved, so we won't be inhibiting the
+ // out-of-order core by speculating them earlier.
+ if (BonusInst) {
+ // Collect the values used by the bonus inst
+ SmallPtrSet<Value*, 4> UsedValues;
+ for (Instruction::op_iterator OI = BonusInst->op_begin(),
+ OE = BonusInst->op_end(); OI != OE; ++OI) {
+ Value* V = *OI;
+ if (!isa<Constant>(V))
+ UsedValues.insert(V);
+ }
+
+ SmallVector<std::pair<Value*, unsigned>, 4> Worklist;
+ Worklist.push_back(std::make_pair(PBI->getOperand(0), 0));
+
+ // Walk up to four levels back up the use-def chain of the predecessor's
+ // terminator to see if all those values were used. The choice of four
+ // levels is arbitrary, to provide a compile-time-cost bound.
+ while (!Worklist.empty()) {
+ std::pair<Value*, unsigned> Pair = Worklist.back();
+ Worklist.pop_back();
+
+ if (Pair.second >= 4) continue;
+ UsedValues.erase(Pair.first);
+ if (UsedValues.empty()) break;
+
+ if (Instruction* I = dyn_cast<Instruction>(Pair.first)) {
+ for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
+ OI != OE; ++OI)
+ Worklist.push_back(std::make_pair(OI->get(), Pair.second+1));
+ }
+ }
+
+ if (!UsedValues.empty()) return false;
+ }
+
Instruction::BinaryOps Opc;
bool InvertPredCond = false;
@@ -1457,9 +1508,19 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
PBI->setSuccessor(1, OldTrue);
}
+ // If we have a bonus inst, clone it into the predecessor block.
+ Instruction *NewBonus = 0;
+ if (BonusInst) {
+ NewBonus = BonusInst->clone();
+ PredBlock->getInstList().insert(PBI, NewBonus);
+ NewBonus->takeName(BonusInst);
+ BonusInst->setName(BonusInst->getName()+".old");
+ }
+
// Clone Cond into the predecessor basic block, and or/and the
// two conditions together.
Instruction *New = Cond->clone();
+ if (BonusInst) New->replaceUsesOfWith(BonusInst, NewBonus);
PredBlock->getInstList().insert(PBI, New);
New->takeName(Cond);
Cond->setName(New->getName()+".old");
@@ -1513,17 +1574,19 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
// Okay, we're going to insert the PHI node. Since PBI is not the only
// predecessor, compute the PHI'd conditional value for all of the preds.
// Any predecessor where the condition is not computable we keep symbolic.
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- if ((PBI = dyn_cast<BranchInst>((*PI)->getTerminator())) &&
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ if ((PBI = dyn_cast<BranchInst>(P->getTerminator())) &&
PBI != BI && PBI->isConditional() &&
PBI->getCondition() == BI->getCondition() &&
PBI->getSuccessor(0) != PBI->getSuccessor(1)) {
bool CondIsTrue = PBI->getSuccessor(0) == BB;
NewPN->addIncoming(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
- CondIsTrue), *PI);
+ CondIsTrue), P);
} else {
- NewPN->addIncoming(BI->getCondition(), *PI);
+ NewPN->addIncoming(BI->getCondition(), P);
}
+ }
BI->setCondition(NewPN);
return true;
@@ -1661,12 +1724,12 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
assert(BB && BB->getParent() && "Block not embedded in function!");
assert(BB->getTerminator() && "Degenerate basic block encountered!");
- assert(&BB->getParent()->getEntryBlock() != BB &&
- "Can't Simplify entry block!");
- // Remove basic blocks that have no predecessors... or that just have themself
- // as a predecessor. These are unreachable.
- if (pred_begin(BB) == pred_end(BB) || BB->getSinglePredecessor() == BB) {
+ // Remove basic blocks that have no predecessors (except the entry block)...
+ // or that just have themself as a predecessor. These are unreachable.
+ if ((pred_begin(BB) == pred_end(BB) &&
+ &BB->getParent()->getEntryBlock() != BB) ||
+ BB->getSinglePredecessor() == BB) {
DEBUG(dbgs() << "Removing BB: \n" << *BB);
DeleteDeadBlock(BB);
return true;
@@ -1697,10 +1760,11 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
SmallVector<BasicBlock*, 8> UncondBranchPreds;
SmallVector<BranchInst*, 8> CondBranchPreds;
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
- TerminatorInst *PTI = (*PI)->getTerminator();
+ BasicBlock *P = *PI;
+ TerminatorInst *PTI = P->getTerminator();
if (BranchInst *BI = dyn_cast<BranchInst>(PTI)) {
if (BI->isUnconditional())
- UncondBranchPreds.push_back(*PI);
+ UncondBranchPreds.push_back(P);
else
CondBranchPreds.push_back(BI);
}
@@ -1768,7 +1832,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
Pred->getInstList().remove(II); // Take out of symbol table
// Insert the call now.
- SmallVector<Value*,8> Args(II->op_begin()+3, II->op_end());
+ SmallVector<Value*,8> Args(II->op_begin(), II->op_end()-3);
CallInst *CI = CallInst::Create(II->getCalledValue(),
Args.begin(), Args.end(),
II->getName(), BI);
@@ -1816,8 +1880,9 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
while (isa<DbgInfoIntrinsic>(BBI))
++BBI;
if (BBI->isTerminator()) // Terminator is the only non-phi instruction!
- if (TryToSimplifyUncondBranchFromEmptyBlock(BB))
- return true;
+ if (BB != &BB->getParent()->getEntryBlock())
+ if (TryToSimplifyUncondBranchFromEmptyBlock(BB))
+ return true;
} else { // Conditional branch
if (isValueEqualityComparison(BI)) {
@@ -1826,7 +1891,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
// switch.
if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred))
- return SimplifyCFG(BB) || 1;
+ return SimplifyCFG(BB) | true;
// This block must be empty, except for the setcond inst, if it exists.
// Ignore dbg intrinsics.
@@ -1860,7 +1925,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
// branches to us and one of our successors, fold the setcc into the
// predecessor and use logical operations to pick the right destination.
if (FoldBranchToCommonDest(BI))
- return SimplifyCFG(BB) | 1;
+ return SimplifyCFG(BB) | true;
// Scan predecessor blocks for conditional branches.
@@ -1970,13 +2035,13 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
II->removeFromParent(); // Take out of symbol table
// Insert the call now...
- SmallVector<Value*, 8> Args(II->op_begin()+3, II->op_end());
+ SmallVector<Value*, 8> Args(II->op_begin(), II->op_end()-3);
CallInst *CI = CallInst::Create(II->getCalledValue(),
Args.begin(), Args.end(),
II->getName(), BI);
CI->setCallingConv(II->getCallingConv());
CI->setAttributes(II->getAttributes());
- // If the invoke produced a value, the Call does now instead.
+ // If the invoke produced a value, the call does now instead.
II->replaceAllUsesWith(CI);
delete II;
Changed = true;
@@ -1985,12 +2050,38 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
}
// If this block is now dead, remove it.
- if (pred_begin(BB) == pred_end(BB)) {
+ if (pred_begin(BB) == pred_end(BB) &&
+ BB != &BB->getParent()->getEntryBlock()) {
// We know there are no successors, so just nuke the block.
M->getBasicBlockList().erase(BB);
return true;
}
}
+ } else if (IndirectBrInst *IBI =
+ dyn_cast<IndirectBrInst>(BB->getTerminator())) {
+ // Eliminate redundant destinations.
+ SmallPtrSet<Value *, 8> Succs;
+ for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
+ BasicBlock *Dest = IBI->getDestination(i);
+ if (!Dest->hasAddressTaken() || !Succs.insert(Dest)) {
+ Dest->removePredecessor(BB);
+ IBI->removeDestination(i);
+ --i; --e;
+ Changed = true;
+ }
+ }
+
+ if (IBI->getNumDestinations() == 0) {
+ // If the indirectbr has no successors, change it to unreachable.
+ new UnreachableInst(IBI->getContext(), IBI);
+ IBI->eraseFromParent();
+ Changed = true;
+ } else if (IBI->getNumDestinations() == 1) {
+ // If the indirectbr has one successor, change it to a direct branch.
+ BranchInst::Create(IBI->getDestination(0), IBI);
+ IBI->eraseFromParent();
+ Changed = true;
+ }
}
// Merge basic blocks into their predecessor if there is only one distinct
@@ -2004,12 +2095,15 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
// is a conditional branch, see if we can hoist any code from this block up
// into our predecessor.
pred_iterator PI(pred_begin(BB)), PE(pred_end(BB));
- BasicBlock *OnlyPred = *PI++;
- for (; PI != PE; ++PI) // Search all predecessors, see if they are all same
- if (*PI != OnlyPred) {
+ BasicBlock *OnlyPred = 0;
+ for (; PI != PE; ++PI) { // Search all predecessors, see if they are all same
+ if (!OnlyPred)
+ OnlyPred = *PI;
+ else if (*PI != OnlyPred) {
OnlyPred = 0; // There are multiple different predecessors...
break;
}
+ }
if (OnlyPred)
if (BranchInst *BI = dyn_cast<BranchInst>(OnlyPred->getTerminator()))
@@ -2108,8 +2202,6 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
/// eliminates unreachable basic blocks, and does other "peephole" optimization
/// of the CFG. It returns true if a modification was made.
///
-/// WARNING: The entry node of a function may not be simplified.
-///
bool llvm::SimplifyCFG(BasicBlock *BB, const TargetData *TD) {
return SimplifyCFGOpt(TD).run(BB);
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
index 3fa8b70..a51f1e1 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
@@ -24,8 +24,8 @@
using namespace llvm;
char UnifyFunctionExitNodes::ID = 0;
-static RegisterPass<UnifyFunctionExitNodes>
-X("mergereturn", "Unify function exit nodes");
+INITIALIZE_PASS(UnifyFunctionExitNodes, "mergereturn",
+ "Unify function exit nodes", false, false);
Pass *llvm::createUnifyFunctionExitNodesPass() {
return new UnifyFunctionExitNodes();
@@ -35,7 +35,7 @@ void UnifyFunctionExitNodes::getAnalysisUsage(AnalysisUsage &AU) const{
// We preserve the non-critical-edgeness property
AU.addPreservedID(BreakCriticalEdgesID);
// This is a cluster of orthogonal Transforms
- AU.addPreservedID(PromoteMemoryToRegisterID);
+ AU.addPreserved("mem2reg");
AU.addPreservedID(LowerSwitchID);
}
diff --git a/libclamav/c++/llvm/lib/Transforms/Utils/ValueMapper.cpp b/libclamav/c++/llvm/lib/Transforms/Utils/ValueMapper.cpp
index 6045048..fc4bde7 100644
--- a/libclamav/c++/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/libclamav/c++/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -20,38 +20,61 @@
#include "llvm/ADT/SmallVector.h"
using namespace llvm;
-Value *llvm::MapValue(const Value *V, ValueMapTy &VM) {
+Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM,
+ bool ModuleLevelChanges) {
Value *&VMSlot = VM[V];
if (VMSlot) return VMSlot; // Does it exist in the map yet?
// NOTE: VMSlot can be invalidated by any reference to VM, which can grow the
// DenseMap. This includes any recursive calls to MapValue.
- // Global values and non-function-local metadata do not need to be seeded into
- // the ValueMap if they are using the identity mapping.
+ // Global values do not need to be seeded into the VM if they
+ // are using the identity mapping.
if (isa<GlobalValue>(V) || isa<InlineAsm>(V) || isa<MDString>(V) ||
- (isa<MDNode>(V) && !cast<MDNode>(V)->isFunctionLocal()))
+ (isa<MDNode>(V) && !cast<MDNode>(V)->isFunctionLocal() &&
+ !ModuleLevelChanges))
return VMSlot = const_cast<Value*>(V);
if (const MDNode *MD = dyn_cast<MDNode>(V)) {
- SmallVector<Value*, 4> Elts;
- for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i)
- Elts.push_back(MD->getOperand(i) ? MapValue(MD->getOperand(i), VM) : 0);
- return VM[V] = MDNode::get(V->getContext(), Elts.data(), Elts.size());
+ // Start by assuming that we'll use the identity mapping.
+ VMSlot = const_cast<Value*>(V);
+
+ // Check all operands to see if any need to be remapped.
+ for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i) {
+ Value *OP = MD->getOperand(i);
+ if (!OP || MapValue(OP, VM, ModuleLevelChanges) == OP) continue;
+
+ // Ok, at least one operand needs remapping.
+ MDNode *Dummy = MDNode::getTemporary(V->getContext(), 0, 0);
+ VM[V] = Dummy;
+ SmallVector<Value*, 4> Elts;
+ Elts.reserve(MD->getNumOperands());
+ for (i = 0; i != e; ++i)
+ Elts.push_back(MD->getOperand(i) ?
+ MapValue(MD->getOperand(i), VM, ModuleLevelChanges) : 0);
+ MDNode *NewMD = MDNode::get(V->getContext(), Elts.data(), Elts.size());
+ Dummy->replaceAllUsesWith(NewMD);
+ MDNode::deleteTemporary(Dummy);
+ return VM[V] = NewMD;
+ }
+
+ // No operands needed remapping; keep the identity map.
+ return const_cast<Value*>(V);
}
Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V));
- if (C == 0) return 0;
+ if (C == 0)
+ return 0;
if (isa<ConstantInt>(C) || isa<ConstantFP>(C) ||
isa<ConstantPointerNull>(C) || isa<ConstantAggregateZero>(C) ||
- isa<UndefValue>(C) || isa<MDString>(C))
+ isa<UndefValue>(C))
return VMSlot = C; // Primitive constants map directly
if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
for (User::op_iterator b = CA->op_begin(), i = b, e = CA->op_end();
i != e; ++i) {
- Value *MV = MapValue(*i, VM);
+ Value *MV = MapValue(*i, VM, ModuleLevelChanges);
if (MV != *i) {
// This array must contain a reference to a global, make a new array
// and return it.
@@ -62,7 +85,8 @@ Value *llvm::MapValue(const Value *V, ValueMapTy &VM) {
Values.push_back(cast<Constant>(*j));
Values.push_back(cast<Constant>(MV));
for (++i; i != e; ++i)
- Values.push_back(cast<Constant>(MapValue(*i, VM)));
+ Values.push_back(cast<Constant>(MapValue(*i, VM,
+ ModuleLevelChanges)));
return VM[V] = ConstantArray::get(CA->getType(), Values);
}
}
@@ -72,7 +96,7 @@ Value *llvm::MapValue(const Value *V, ValueMapTy &VM) {
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
for (User::op_iterator b = CS->op_begin(), i = b, e = CS->op_end();
i != e; ++i) {
- Value *MV = MapValue(*i, VM);
+ Value *MV = MapValue(*i, VM, ModuleLevelChanges);
if (MV != *i) {
// This struct must contain a reference to a global, make a new struct
// and return it.
@@ -83,7 +107,8 @@ Value *llvm::MapValue(const Value *V, ValueMapTy &VM) {
Values.push_back(cast<Constant>(*j));
Values.push_back(cast<Constant>(MV));
for (++i; i != e; ++i)
- Values.push_back(cast<Constant>(MapValue(*i, VM)));
+ Values.push_back(cast<Constant>(MapValue(*i, VM,
+ ModuleLevelChanges)));
return VM[V] = ConstantStruct::get(CS->getType(), Values);
}
}
@@ -93,14 +118,14 @@ Value *llvm::MapValue(const Value *V, ValueMapTy &VM) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
std::vector<Constant*> Ops;
for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i)
- Ops.push_back(cast<Constant>(MapValue(*i, VM)));
+ Ops.push_back(cast<Constant>(MapValue(*i, VM, ModuleLevelChanges)));
return VM[V] = CE->getWithOperands(Ops);
}
if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
for (User::op_iterator b = CV->op_begin(), i = b, e = CV->op_end();
i != e; ++i) {
- Value *MV = MapValue(*i, VM);
+ Value *MV = MapValue(*i, VM, ModuleLevelChanges);
if (MV != *i) {
// This vector value must contain a reference to a global, make a new
// vector constant and return it.
@@ -111,7 +136,8 @@ Value *llvm::MapValue(const Value *V, ValueMapTy &VM) {
Values.push_back(cast<Constant>(*j));
Values.push_back(cast<Constant>(MV));
for (++i; i != e; ++i)
- Values.push_back(cast<Constant>(MapValue(*i, VM)));
+ Values.push_back(cast<Constant>(MapValue(*i, VM,
+ ModuleLevelChanges)));
return VM[V] = ConstantVector::get(Values);
}
}
@@ -119,19 +145,33 @@ Value *llvm::MapValue(const Value *V, ValueMapTy &VM) {
}
BlockAddress *BA = cast<BlockAddress>(C);
- Function *F = cast<Function>(MapValue(BA->getFunction(), VM));
- BasicBlock *BB = cast_or_null<BasicBlock>(MapValue(BA->getBasicBlock(),VM));
+ Function *F = cast<Function>(MapValue(BA->getFunction(), VM,
+ ModuleLevelChanges));
+ BasicBlock *BB = cast_or_null<BasicBlock>(MapValue(BA->getBasicBlock(),VM,
+ ModuleLevelChanges));
return VM[V] = BlockAddress::get(F, BB ? BB : BA->getBasicBlock());
}
/// RemapInstruction - Convert the instruction operands from referencing the
-/// current values into those specified by ValueMap.
+/// current values into those specified by VMap.
///
-void llvm::RemapInstruction(Instruction *I, ValueMapTy &ValueMap) {
+void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap,
+ bool ModuleLevelChanges) {
+ // Remap operands.
for (User::op_iterator op = I->op_begin(), E = I->op_end(); op != E; ++op) {
- Value *V = MapValue(*op, ValueMap);
+ Value *V = MapValue(*op, VMap, ModuleLevelChanges);
assert(V && "Referenced value not in value map!");
*op = V;
}
-}
+ // Remap attached metadata.
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ I->getAllMetadata(MDs);
+ for (SmallVectorImpl<std::pair<unsigned, MDNode *> >::iterator
+ MI = MDs.begin(), ME = MDs.end(); MI != ME; ++MI) {
+ Value *Old = MI->second;
+ Value *New = MapValue(Old, VMap, ModuleLevelChanges);
+ if (New != Old)
+ I->setMetadata(MI->first, cast<MDNode>(New));
+ }
+}
diff --git a/libclamav/c++/llvm/lib/VMCore/AsmWriter.cpp b/libclamav/c++/llvm/lib/VMCore/AsmWriter.cpp
index fd74241..831a996 100644
--- a/libclamav/c++/llvm/lib/VMCore/AsmWriter.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/AsmWriter.cpp
@@ -16,7 +16,7 @@
#include "llvm/Assembly/Writer.h"
#include "llvm/Assembly/PrintModulePass.h"
-#include "llvm/Assembly/AsmAnnotationWriter.h"
+#include "llvm/Assembly/AssemblyAnnotationWriter.h"
#include "llvm/LLVMContext.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
@@ -63,15 +63,12 @@ static const Module *getModuleFromVal(const Value *V) {
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
return GV->getParent();
- if (const NamedMDNode *NMD = dyn_cast<NamedMDNode>(V))
- return NMD->getParent();
return 0;
}
// PrintEscapedString - Print each character of the specified string, escaping
// it if it is not printable or if it is an escape char.
-static void PrintEscapedString(const StringRef &Name,
- raw_ostream &Out) {
+static void PrintEscapedString(StringRef Name, raw_ostream &Out) {
for (unsigned i = 0, e = Name.size(); i != e; ++i) {
unsigned char C = Name[i];
if (isprint(C) && C != '\\' && C != '"')
@@ -91,8 +88,7 @@ enum PrefixType {
/// PrintLLVMName - Turn the specified name into an 'LLVM name', which is either
/// prefixed with % (if the string only contains simple characters) or is
/// surrounded with ""'s (if it has special chars in it). Print it out.
-static void PrintLLVMName(raw_ostream &OS, const StringRef &Name,
- PrefixType Prefix) {
+static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
assert(Name.data() && "Cannot get empty name!");
switch (Prefix) {
default: llvm_unreachable("Bad prefix!");
@@ -227,32 +223,21 @@ void TypePrinting::CalcTypeName(const Type *Ty,
const StructType *STy = cast<StructType>(Ty);
if (STy->isPacked())
OS << '<';
- OS << "{ ";
+ OS << '{';
for (StructType::element_iterator I = STy->element_begin(),
E = STy->element_end(); I != E; ++I) {
+ OS << ' ';
CalcTypeName(*I, TypeStack, OS);
- if (next(I) != STy->element_end())
+ if (llvm::next(I) == STy->element_end())
+ OS << ' ';
+ else
OS << ',';
- OS << ' ';
}
OS << '}';
if (STy->isPacked())
OS << '>';
break;
}
- case Type::UnionTyID: {
- const UnionType *UTy = cast<UnionType>(Ty);
- OS << "union { ";
- for (StructType::element_iterator I = UTy->element_begin(),
- E = UTy->element_end(); I != E; ++I) {
- CalcTypeName(*I, TypeStack, OS);
- if (next(I) != UTy->element_end())
- OS << ',';
- OS << ' ';
- }
- OS << '}';
- break;
- }
case Type::PointerTyID: {
const PointerType *PTy = cast<PointerType>(Ty);
CalcTypeName(PTy->getElementType(), TypeStack, OS);
@@ -579,8 +564,12 @@ static SlotTracker *createSlotTracker(const Value *V) {
if (const Function *Func = dyn_cast<Function>(V))
return new SlotTracker(Func);
- if (isa<MDNode>(V))
+ if (const MDNode *MD = dyn_cast<MDNode>(V)) {
+ if (!MD->isFunctionLocal())
+ return new SlotTracker(MD->getFunction());
+
return new SlotTracker((Function *)0);
+ }
return 0;
}
@@ -632,10 +621,8 @@ void SlotTracker::processModule() {
I = TheModule->named_metadata_begin(),
E = TheModule->named_metadata_end(); I != E; ++I) {
const NamedMDNode *NMD = I;
- for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
- if (MDNode *MD = NMD->getOperand(i))
- CreateMetadataSlot(MD);
- }
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ CreateMetadataSlot(NMD->getOperand(i));
}
// Add all the unnamed functions to the table.
@@ -673,11 +660,16 @@ void SlotTracker::processFunction() {
if (!I->getType()->isVoidTy() && !I->hasName())
CreateFunctionSlot(I);
- // Intrinsics can directly use metadata.
- if (isa<IntrinsicInst>(I))
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
- if (MDNode *N = dyn_cast_or_null<MDNode>(I->getOperand(i)))
- CreateMetadataSlot(N);
+ // Intrinsics can directly use metadata. We allow direct calls to any
+ // llvm.foo function here, because the target may not be linked into the
+ // optimizer.
+ if (const CallInst *CI = dyn_cast<CallInst>(I)) {
+ if (Function *F = CI->getCalledFunction())
+ if (F->getName().startswith("llvm."))
+ for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
+ if (MDNode *N = dyn_cast_or_null<MDNode>(I->getOperand(i)))
+ CreateMetadataSlot(N);
+ }
// Process metadata attached with this instruction.
I->getAllMetadata(MDForInst);
@@ -771,15 +763,14 @@ void SlotTracker::CreateMetadataSlot(const MDNode *N) {
// Don't insert if N is a function-local metadata, these are always printed
// inline.
- if (N->isFunctionLocal())
- return;
-
- mdn_iterator I = mdnMap.find(N);
- if (I != mdnMap.end())
- return;
+ if (!N->isFunctionLocal()) {
+ mdn_iterator I = mdnMap.find(N);
+ if (I != mdnMap.end())
+ return;
- unsigned DestSlot = mdnNext++;
- mdnMap[N] = DestSlot;
+ unsigned DestSlot = mdnNext++;
+ mdnMap[N] = DestSlot;
+ }
// Recursively add any MDNodes referenced by operands.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
@@ -793,7 +784,8 @@ void SlotTracker::CreateMetadataSlot(const MDNode *N) {
static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
TypePrinting *TypePrinter,
- SlotTracker *Machine);
+ SlotTracker *Machine,
+ const Module *Context);
@@ -847,8 +839,10 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
}
}
-static void WriteConstantInt(raw_ostream &Out, const Constant *CV,
- TypePrinting &TypePrinter, SlotTracker *Machine) {
+static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
+ TypePrinting &TypePrinter,
+ SlotTracker *Machine,
+ const Module *Context) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
if (CI->getType()->isIntegerTy(1)) {
Out << (CI->getZExtValue() ? "true" : "false");
@@ -964,9 +958,11 @@ static void WriteConstantInt(raw_ostream &Out, const Constant *CV,
if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) {
Out << "blockaddress(";
- WriteAsOperandInternal(Out, BA->getFunction(), &TypePrinter, Machine);
+ WriteAsOperandInternal(Out, BA->getFunction(), &TypePrinter, Machine,
+ Context);
Out << ", ";
- WriteAsOperandInternal(Out, BA->getBasicBlock(), &TypePrinter, Machine);
+ WriteAsOperandInternal(Out, BA->getBasicBlock(), &TypePrinter, Machine,
+ Context);
Out << ")";
return;
}
@@ -986,12 +982,14 @@ static void WriteConstantInt(raw_ostream &Out, const Constant *CV,
TypePrinter.print(ETy, Out);
Out << ' ';
WriteAsOperandInternal(Out, CA->getOperand(0),
- &TypePrinter, Machine);
+ &TypePrinter, Machine,
+ Context);
for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) {
Out << ", ";
TypePrinter.print(ETy, Out);
Out << ' ';
- WriteAsOperandInternal(Out, CA->getOperand(i), &TypePrinter, Machine);
+ WriteAsOperandInternal(Out, CA->getOperand(i), &TypePrinter, Machine,
+ Context);
}
}
Out << ']';
@@ -1009,14 +1007,16 @@ static void WriteConstantInt(raw_ostream &Out, const Constant *CV,
TypePrinter.print(CS->getOperand(0)->getType(), Out);
Out << ' ';
- WriteAsOperandInternal(Out, CS->getOperand(0), &TypePrinter, Machine);
+ WriteAsOperandInternal(Out, CS->getOperand(0), &TypePrinter, Machine,
+ Context);
for (unsigned i = 1; i < N; i++) {
Out << ", ";
TypePrinter.print(CS->getOperand(i)->getType(), Out);
Out << ' ';
- WriteAsOperandInternal(Out, CS->getOperand(i), &TypePrinter, Machine);
+ WriteAsOperandInternal(Out, CS->getOperand(i), &TypePrinter, Machine,
+ Context);
}
Out << ' ';
}
@@ -1034,12 +1034,14 @@ static void WriteConstantInt(raw_ostream &Out, const Constant *CV,
Out << '<';
TypePrinter.print(ETy, Out);
Out << ' ';
- WriteAsOperandInternal(Out, CP->getOperand(0), &TypePrinter, Machine);
+ WriteAsOperandInternal(Out, CP->getOperand(0), &TypePrinter, Machine,
+ Context);
for (unsigned i = 1, e = CP->getNumOperands(); i != e; ++i) {
Out << ", ";
TypePrinter.print(ETy, Out);
Out << ' ';
- WriteAsOperandInternal(Out, CP->getOperand(i), &TypePrinter, Machine);
+ WriteAsOperandInternal(Out, CP->getOperand(i), &TypePrinter, Machine,
+ Context);
}
Out << '>';
return;
@@ -1070,7 +1072,7 @@ static void WriteConstantInt(raw_ostream &Out, const Constant *CV,
for (User::const_op_iterator OI=CE->op_begin(); OI != CE->op_end(); ++OI) {
TypePrinter.print((*OI)->getType(), Out);
Out << ' ';
- WriteAsOperandInternal(Out, *OI, &TypePrinter, Machine);
+ WriteAsOperandInternal(Out, *OI, &TypePrinter, Machine, Context);
if (OI+1 != CE->op_end())
Out << ", ";
}
@@ -1095,7 +1097,8 @@ static void WriteConstantInt(raw_ostream &Out, const Constant *CV,
static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
TypePrinting *TypePrinter,
- SlotTracker *Machine) {
+ SlotTracker *Machine,
+ const Module *Context) {
Out << "!{";
for (unsigned mi = 0, me = Node->getNumOperands(); mi != me; ++mi) {
const Value *V = Node->getOperand(mi);
@@ -1105,7 +1108,7 @@ static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
TypePrinter->print(V->getType(), Out);
Out << ' ';
WriteAsOperandInternal(Out, Node->getOperand(mi),
- TypePrinter, Machine);
+ TypePrinter, Machine, Context);
}
if (mi + 1 != me)
Out << ", ";
@@ -1121,7 +1124,8 @@ static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node,
///
static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
TypePrinting *TypePrinter,
- SlotTracker *Machine) {
+ SlotTracker *Machine,
+ const Module *Context) {
if (V->hasName()) {
PrintLLVMName(Out, V);
return;
@@ -1130,7 +1134,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
const Constant *CV = dyn_cast<Constant>(V);
if (CV && !isa<GlobalValue>(CV)) {
assert(TypePrinter && "Constants require TypePrinting!");
- WriteConstantInt(Out, CV, *TypePrinter, Machine);
+ WriteConstantInternal(Out, CV, *TypePrinter, Machine, Context);
return;
}
@@ -1151,12 +1155,16 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
if (const MDNode *N = dyn_cast<MDNode>(V)) {
if (N->isFunctionLocal()) {
// Print metadata inline, not via slot reference number.
- WriteMDNodeBodyInternal(Out, N, TypePrinter, Machine);
+ WriteMDNodeBodyInternal(Out, N, TypePrinter, Machine, Context);
return;
}
- if (!Machine)
- Machine = createSlotTracker(V);
+ if (!Machine) {
+ if (N->isFunctionLocal())
+ Machine = new SlotTracker(N->getFunction());
+ else
+ Machine = new SlotTracker(Context);
+ }
Out << '!' << Machine->getMetadataSlot(N);
return;
}
@@ -1210,8 +1218,9 @@ void llvm::WriteAsOperand(raw_ostream &Out, const Value *V,
// Fast path: Don't construct and populate a TypePrinting object if we
// won't be needing any types printed.
if (!PrintType &&
- (!isa<Constant>(V) || V->hasName() || isa<GlobalValue>(V))) {
- WriteAsOperandInternal(Out, V, 0, 0);
+ ((!isa<Constant>(V) && !isa<MDNode>(V)) ||
+ V->hasName() || isa<GlobalValue>(V))) {
+ WriteAsOperandInternal(Out, V, 0, 0, Context);
return;
}
@@ -1225,7 +1234,7 @@ void llvm::WriteAsOperand(raw_ostream &Out, const Value *V,
Out << ' ';
}
- WriteAsOperandInternal(Out, V, &TypePrinter, 0);
+ WriteAsOperandInternal(Out, V, &TypePrinter, 0, Context);
}
namespace {
@@ -1280,7 +1289,7 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
TypePrinter.print(Operand->getType(), Out);
Out << ' ';
}
- WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine);
+ WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
}
void AssemblyWriter::writeParamOperand(const Value *Operand,
@@ -1297,7 +1306,7 @@ void AssemblyWriter::writeParamOperand(const Value *Operand,
Out << ' ' << Attribute::getAsString(Attrs);
Out << ' ';
// Print the operand
- WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine);
+ WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
}
void AssemblyWriter::printModule(const Module *M) {
@@ -1386,10 +1395,7 @@ void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) {
Out << "!" << NMD->getName() << " = !{";
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
if (i) Out << ", ";
- if (MDNode *MD = NMD->getOperand(i))
- Out << '!' << Machine.getMetadataSlot(MD);
- else
- Out << "null";
+ Out << '!' << Machine.getMetadataSlot(NMD->getOperand(i));
}
Out << "}\n";
}
@@ -1401,6 +1407,12 @@ static void PrintLinkage(GlobalValue::LinkageTypes LT,
case GlobalValue::ExternalLinkage: break;
case GlobalValue::PrivateLinkage: Out << "private "; break;
case GlobalValue::LinkerPrivateLinkage: Out << "linker_private "; break;
+ case GlobalValue::LinkerPrivateWeakLinkage:
+ Out << "linker_private_weak ";
+ break;
+ case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
+ Out << "linker_private_weak_def_auto ";
+ break;
case GlobalValue::InternalLinkage: Out << "internal "; break;
case GlobalValue::LinkOnceAnyLinkage: Out << "linkonce "; break;
case GlobalValue::LinkOnceODRLinkage: Out << "linkonce_odr "; break;
@@ -1431,7 +1443,7 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
if (GV->isMaterializable())
Out << "; Materializable\n";
- WriteAsOperandInternal(Out, GV, &TypePrinter, &Machine);
+ WriteAsOperandInternal(Out, GV, &TypePrinter, &Machine, GV->getParent());
Out << " = ";
if (!GV->hasInitializer() && GV->hasExternalLinkage())
@@ -1451,8 +1463,11 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
writeOperand(GV->getInitializer(), false);
}
- if (GV->hasSection())
- Out << ", section \"" << GV->getSection() << '"';
+ if (GV->hasSection()) {
+ Out << ", section \"";
+ PrintEscapedString(GV->getSection(), Out);
+ Out << '"';
+ }
if (GV->getAlignment())
Out << ", align " << GV->getAlignment();
@@ -1487,7 +1502,7 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) {
TypePrinter.print(F->getFunctionType(), Out);
Out << "* ";
- WriteAsOperandInternal(Out, F, &TypePrinter, &Machine);
+ WriteAsOperandInternal(Out, F, &TypePrinter, &Machine, F->getParent());
} else if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(Aliasee)) {
TypePrinter.print(GA->getType(), Out);
Out << ' ';
@@ -1555,6 +1570,7 @@ void AssemblyWriter::printFunction(const Function *F) {
case CallingConv::Cold: Out << "coldcc "; break;
case CallingConv::X86_StdCall: Out << "x86_stdcallcc "; break;
case CallingConv::X86_FastCall: Out << "x86_fastcallcc "; break;
+ case CallingConv::X86_ThisCall: Out << "x86_thiscallcc "; break;
case CallingConv::ARM_APCS: Out << "arm_apcscc "; break;
case CallingConv::ARM_AAPCS: Out << "arm_aapcscc "; break;
case CallingConv::ARM_AAPCS_VFP:Out << "arm_aapcs_vfpcc "; break;
@@ -1569,7 +1585,7 @@ void AssemblyWriter::printFunction(const Function *F) {
Out << Attribute::getAsString(Attrs.getRetAttributes()) << ' ';
TypePrinter.print(F->getReturnType(), Out);
Out << ' ';
- WriteAsOperandInternal(Out, F, &TypePrinter, &Machine);
+ WriteAsOperandInternal(Out, F, &TypePrinter, &Machine, F->getParent());
Out << '(';
Machine.incorporateFunction(F);
@@ -1609,18 +1625,20 @@ void AssemblyWriter::printFunction(const Function *F) {
Attributes FnAttrs = Attrs.getFnAttributes();
if (FnAttrs != Attribute::None)
Out << ' ' << Attribute::getAsString(Attrs.getFnAttributes());
- if (F->hasSection())
- Out << " section \"" << F->getSection() << '"';
+ if (F->hasSection()) {
+ Out << " section \"";
+ PrintEscapedString(F->getSection(), Out);
+ Out << '"';
+ }
if (F->getAlignment())
Out << " align " << F->getAlignment();
if (F->hasGC())
Out << " gc \"" << F->getGC() << '"';
if (F->isDeclaration()) {
- Out << "\n";
+ Out << '\n';
} else {
Out << " {";
-
- // Output all of its basic blocks... for the function
+ // Output all of the function's basic blocks.
for (Function::const_iterator I = F->begin(), E = F->end(); I != E; ++I)
printBasicBlock(I);
@@ -1669,10 +1687,10 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) {
Out.PadToColumn(50);
Out << "; Error: Block without parent!";
} else if (BB != &BB->getParent()->getEntryBlock()) { // Not the entry block?
- // Output predecessors for the block...
+ // Output predecessors for the block.
Out.PadToColumn(50);
Out << ";";
- pred_const_iterator PI = pred_begin(BB), PE = pred_end(BB);
+ const_pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
if (PI == PE) {
Out << " No predecessors!";
@@ -1707,13 +1725,6 @@ void AssemblyWriter::printInfoComment(const Value &V) {
AnnotationWriter->printInfoComment(V, Out);
return;
}
-
- if (V.getType()->isVoidTy()) return;
-
- Out.PadToColumn(50);
- Out << "; <";
- TypePrinter.print(V.getType(), Out);
- Out << "> [#uses=" << V.getNumUses() << ']'; // Output # uses
}
// This member is called for each Instruction in a function..
@@ -1827,6 +1838,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
case CallingConv::Cold: Out << " coldcc"; break;
case CallingConv::X86_StdCall: Out << " x86_stdcallcc"; break;
case CallingConv::X86_FastCall: Out << " x86_fastcallcc"; break;
+ case CallingConv::X86_ThisCall: Out << " x86_thiscallcc"; break;
case CallingConv::ARM_APCS: Out << " arm_apcscc "; break;
case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break;
case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break;
@@ -1834,6 +1846,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
default: Out << " cc" << CI->getCallingConv(); break;
}
+ Operand = CI->getCalledValue();
const PointerType *PTy = cast<PointerType>(Operand->getType());
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
const Type *RetTy = FTy->getReturnType();
@@ -1857,15 +1870,16 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(Operand, true);
}
Out << '(';
- for (unsigned op = 1, Eop = I.getNumOperands(); op < Eop; ++op) {
- if (op > 1)
+ for (unsigned op = 0, Eop = CI->getNumArgOperands(); op < Eop; ++op) {
+ if (op > 0)
Out << ", ";
- writeParamOperand(I.getOperand(op), PAL.getParamAttributes(op));
+ writeParamOperand(CI->getArgOperand(op), PAL.getParamAttributes(op + 1));
}
Out << ')';
if (PAL.getFnAttributes() != Attribute::None)
Out << ' ' << Attribute::getAsString(PAL.getFnAttributes());
} else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
+ Operand = II->getCalledValue();
const PointerType *PTy = cast<PointerType>(Operand->getType());
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
const Type *RetTy = FTy->getReturnType();
@@ -1878,6 +1892,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
case CallingConv::Cold: Out << " coldcc"; break;
case CallingConv::X86_StdCall: Out << " x86_stdcallcc"; break;
case CallingConv::X86_FastCall: Out << " x86_fastcallcc"; break;
+ case CallingConv::X86_ThisCall: Out << " x86_thiscallcc"; break;
case CallingConv::ARM_APCS: Out << " arm_apcscc "; break;
case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break;
case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break;
@@ -1903,10 +1918,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(Operand, true);
}
Out << '(';
- for (unsigned op = 3, Eop = I.getNumOperands(); op < Eop; ++op) {
- if (op > 3)
+ for (unsigned op = 0, Eop = II->getNumArgOperands(); op < Eop; ++op) {
+ if (op)
Out << ", ";
- writeParamOperand(I.getOperand(op), PAL.getParamAttributes(op-2));
+ writeParamOperand(II->getArgOperand(op), PAL.getParamAttributes(op + 1));
}
Out << ')';
@@ -1998,21 +2013,23 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
} else {
Out << ", !<unknown kind #" << Kind << ">";
}
- Out << " !" << Machine.getMetadataSlot(InstMD[i].second);
+ Out << ' ';
+ WriteAsOperandInternal(Out, InstMD[i].second, &TypePrinter, &Machine,
+ TheModule);
}
}
printInfoComment(I);
}
static void WriteMDNodeComment(const MDNode *Node,
- formatted_raw_ostream &Out) {
+ formatted_raw_ostream &Out) {
if (Node->getNumOperands() < 1)
return;
ConstantInt *CI = dyn_cast_or_null<ConstantInt>(Node->getOperand(0));
if (!CI) return;
- unsigned Val = CI->getZExtValue();
- unsigned Tag = Val & ~LLVMDebugVersionMask;
- if (Val < LLVMDebugVersion)
+ APInt Val = CI->getValue();
+ APInt Tag = Val & ~APInt(Val.getBitWidth(), LLVMDebugVersionMask);
+ if (Val.ult(LLVMDebugVersion))
return;
Out.PadToColumn(50);
@@ -2026,8 +2043,10 @@ static void WriteMDNodeComment(const MDNode *Node,
Out << "; [ DW_TAG_vector_type ]";
else if (Tag == dwarf::DW_TAG_user_base)
Out << "; [ DW_TAG_user_base ]";
- else if (const char *TagName = dwarf::TagString(Tag))
- Out << "; [ " << TagName << " ]";
+ else if (Tag.isIntN(32)) {
+ if (const char *TagName = dwarf::TagString(Tag.getZExtValue()))
+ Out << "; [ " << TagName << " ]";
+ }
}
void AssemblyWriter::writeAllMDNodes() {
@@ -2044,7 +2063,7 @@ void AssemblyWriter::writeAllMDNodes() {
}
void AssemblyWriter::printMDNodeBody(const MDNode *Node) {
- WriteMDNodeBodyInternal(Out, Node, &TypePrinter, &Machine);
+ WriteMDNodeBodyInternal(Out, Node, &TypePrinter, &Machine, TheModule);
WriteMDNodeComment(Node, Out);
Out << "\n";
}
@@ -2060,6 +2079,13 @@ void Module::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const {
W.printModule(this);
}
+void NamedMDNode::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const {
+ SlotTracker SlotTable(getParent());
+ formatted_raw_ostream OS(ROS);
+ AssemblyWriter W(OS, SlotTable, getParent(), AAW);
+ W.printNamedMDNode(this);
+}
+
void Type::print(raw_ostream &OS) const {
if (this == 0) {
OS << "<null Type>";
@@ -2095,17 +2121,13 @@ void Value::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const {
} else if (const MDNode *N = dyn_cast<MDNode>(this)) {
const Function *F = N->getFunction();
SlotTracker SlotTable(F);
- AssemblyWriter W(OS, SlotTable, F ? getModuleFromVal(F) : 0, AAW);
+ AssemblyWriter W(OS, SlotTable, F ? F->getParent() : 0, AAW);
W.printMDNodeBody(N);
- } else if (const NamedMDNode *N = dyn_cast<NamedMDNode>(this)) {
- SlotTracker SlotTable(N->getParent());
- AssemblyWriter W(OS, SlotTable, N->getParent(), AAW);
- W.printNamedMDNode(N);
} else if (const Constant *C = dyn_cast<Constant>(this)) {
TypePrinting TypePrinter;
TypePrinter.print(C->getType(), OS);
OS << ' ';
- WriteConstantInt(OS, C, TypePrinter, 0);
+ WriteConstantInternal(OS, C, TypePrinter, 0, 0);
} else if (isa<InlineAsm>(this) || isa<MDString>(this) ||
isa<Argument>(this)) {
WriteAsOperand(OS, this, true, 0);
diff --git a/libclamav/c++/llvm/lib/VMCore/AutoUpgrade.cpp b/libclamav/c++/llvm/lib/VMCore/AutoUpgrade.cpp
index 5e4c9fb..9330e14 100644
--- a/libclamav/c++/llvm/lib/VMCore/AutoUpgrade.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/AutoUpgrade.cpp
@@ -18,7 +18,9 @@
#include "llvm/Module.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/IRBuilder.h"
#include <cstring>
using namespace llvm;
@@ -76,6 +78,63 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
NewFn = F;
return true;
}
+ } else if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
+ if (((Name.compare(14, 5, "vmovl", 5) == 0 ||
+ Name.compare(14, 5, "vaddl", 5) == 0 ||
+ Name.compare(14, 5, "vsubl", 5) == 0 ||
+ Name.compare(14, 5, "vaddw", 5) == 0 ||
+ Name.compare(14, 5, "vsubw", 5) == 0 ||
+ Name.compare(14, 5, "vmull", 5) == 0 ||
+ Name.compare(14, 5, "vmlal", 5) == 0 ||
+ Name.compare(14, 5, "vmlsl", 5) == 0 ||
+ Name.compare(14, 5, "vabdl", 5) == 0 ||
+ Name.compare(14, 5, "vabal", 5) == 0) &&
+ (Name.compare(19, 2, "s.", 2) == 0 ||
+ Name.compare(19, 2, "u.", 2) == 0)) ||
+
+ (Name.compare(14, 4, "vaba", 4) == 0 &&
+ (Name.compare(18, 2, "s.", 2) == 0 ||
+ Name.compare(18, 2, "u.", 2) == 0)) ||
+
+ (Name.compare(14, 6, "vmovn.", 6) == 0)) {
+
+ // Calls to these are transformed into IR without intrinsics.
+ NewFn = 0;
+ return true;
+ }
+ // Old versions of NEON ld/st intrinsics are missing alignment arguments.
+ bool isVLd = (Name.compare(14, 3, "vld", 3) == 0);
+ bool isVSt = (Name.compare(14, 3, "vst", 3) == 0);
+ if (isVLd || isVSt) {
+ unsigned NumVecs = Name.at(17) - '0';
+ if (NumVecs == 0 || NumVecs > 4)
+ return false;
+ bool isLaneOp = (Name.compare(18, 5, "lane.", 5) == 0);
+ if (!isLaneOp && Name.at(18) != '.')
+ return false;
+ unsigned ExpectedArgs = 2; // for the address and alignment
+ if (isVSt || isLaneOp)
+ ExpectedArgs += NumVecs;
+ if (isLaneOp)
+ ExpectedArgs += 1; // for the lane number
+ unsigned NumP = FTy->getNumParams();
+ if (NumP != ExpectedArgs - 1)
+ return false;
+
+ // Change the name of the old (bad) intrinsic, because
+ // its type is incorrect, but we cannot overload that name.
+ F->setName("");
+
+ // One argument is missing: add the alignment argument.
+ std::vector<const Type*> NewParams;
+ for (unsigned p = 0; p < NumP; ++p)
+ NewParams.push_back(FTy->getParamType(p));
+ NewParams.push_back(Type::getInt32Ty(F->getContext()));
+ FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(),
+ NewParams, false);
+ NewFn = cast<Function>(M->getOrInsertFunction(Name, NewFTy));
+ return true;
+ }
}
break;
case 'b':
@@ -145,6 +204,53 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
}
break;
+ case 'm': {
+ // This upgrades the llvm.memcpy, llvm.memmove, and llvm.memset to the
+ // new format that allows overloading the pointer for different address
+ // space (e.g., llvm.memcpy.i16 => llvm.memcpy.p0i8.p0i8.i16)
+ const char* NewFnName = NULL;
+ if (Name.compare(5,8,"memcpy.i",8) == 0) {
+ if (Name[13] == '8')
+ NewFnName = "llvm.memcpy.p0i8.p0i8.i8";
+ else if (Name.compare(13,2,"16") == 0)
+ NewFnName = "llvm.memcpy.p0i8.p0i8.i16";
+ else if (Name.compare(13,2,"32") == 0)
+ NewFnName = "llvm.memcpy.p0i8.p0i8.i32";
+ else if (Name.compare(13,2,"64") == 0)
+ NewFnName = "llvm.memcpy.p0i8.p0i8.i64";
+ } else if (Name.compare(5,9,"memmove.i",9) == 0) {
+ if (Name[14] == '8')
+ NewFnName = "llvm.memmove.p0i8.p0i8.i8";
+ else if (Name.compare(14,2,"16") == 0)
+ NewFnName = "llvm.memmove.p0i8.p0i8.i16";
+ else if (Name.compare(14,2,"32") == 0)
+ NewFnName = "llvm.memmove.p0i8.p0i8.i32";
+ else if (Name.compare(14,2,"64") == 0)
+ NewFnName = "llvm.memmove.p0i8.p0i8.i64";
+ }
+ else if (Name.compare(5,8,"memset.i",8) == 0) {
+ if (Name[13] == '8')
+ NewFnName = "llvm.memset.p0i8.i8";
+ else if (Name.compare(13,2,"16") == 0)
+ NewFnName = "llvm.memset.p0i8.i16";
+ else if (Name.compare(13,2,"32") == 0)
+ NewFnName = "llvm.memset.p0i8.i32";
+ else if (Name.compare(13,2,"64") == 0)
+ NewFnName = "llvm.memset.p0i8.i64";
+ }
+ if (NewFnName) {
+ NewFn = cast<Function>(M->getOrInsertFunction(NewFnName,
+ FTy->getReturnType(),
+ FTy->getParamType(0),
+ FTy->getParamType(1),
+ FTy->getParamType(2),
+ FTy->getParamType(3),
+ Type::getInt1Ty(F->getContext()),
+ (Type *)0));
+ return true;
+ }
+ break;
+ }
case 'p':
// This upgrades the llvm.part.select overloaded intrinsic names to only
// use one type specifier in the name. We only care about the old format
@@ -225,6 +331,16 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
// Calls to these intrinsics are transformed into ShuffleVector's.
NewFn = 0;
return true;
+ } else if (Name.compare(5, 16, "x86.sse41.pmulld", 16) == 0) {
+ // Calls to these intrinsics are transformed into vector multiplies.
+ NewFn = 0;
+ return true;
+ } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 ||
+ Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) {
+ // Calls to these intrinsics are transformed into vector shuffles, shifts,
+ // or 0.
+ NewFn = 0;
+ return true;
}
break;
@@ -249,16 +365,138 @@ bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
return Upgraded;
}
+bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
+ StringRef Name(GV->getName());
+
+ // We are only upgrading one symbol here.
+ if (Name == ".llvm.eh.catch.all.value") {
+ GV->setName("llvm.eh.catch.all.value");
+ return true;
+ }
+
+ return false;
+}
+
+/// ExtendNEONArgs - For NEON "long" and "wide" operations, where the results
+/// have vector elements twice as big as one or both source operands, do the
+/// sign- or zero-extension that used to be handled by intrinsics. The
+/// extended values are returned via V0 and V1.
+static void ExtendNEONArgs(CallInst *CI, Value *Arg0, Value *Arg1,
+ Value *&V0, Value *&V1) {
+ Function *F = CI->getCalledFunction();
+ const std::string& Name = F->getName();
+ bool isLong = (Name.at(18) == 'l');
+ bool isSigned = (Name.at(19) == 's');
+
+ if (isSigned) {
+ if (isLong)
+ V0 = new SExtInst(Arg0, CI->getType(), "", CI);
+ else
+ V0 = Arg0;
+ V1 = new SExtInst(Arg1, CI->getType(), "", CI);
+ } else {
+ if (isLong)
+ V0 = new ZExtInst(Arg0, CI->getType(), "", CI);
+ else
+ V0 = Arg0;
+ V1 = new ZExtInst(Arg1, CI->getType(), "", CI);
+ }
+}
+
+/// CallVABD - As part of expanding a call to one of the old NEON vabdl, vaba,
+/// or vabal intrinsics, construct a call to a vabd intrinsic. Examine the
+/// name of the old intrinsic to determine whether to use a signed or unsigned
+/// vabd intrinsic. Get the type from the old call instruction, adjusted for
+/// half-size vector elements if the old intrinsic was vabdl or vabal.
+static Instruction *CallVABD(CallInst *CI, Value *Arg0, Value *Arg1) {
+ Function *F = CI->getCalledFunction();
+ const std::string& Name = F->getName();
+ bool isLong = (Name.at(18) == 'l');
+ bool isSigned = (Name.at(isLong ? 19 : 18) == 's');
+
+ Intrinsic::ID intID;
+ if (isSigned)
+ intID = Intrinsic::arm_neon_vabds;
+ else
+ intID = Intrinsic::arm_neon_vabdu;
+
+ const Type *Ty = CI->getType();
+ if (isLong)
+ Ty = VectorType::getTruncatedElementVectorType(cast<const VectorType>(Ty));
+
+ Function *VABD = Intrinsic::getDeclaration(F->getParent(), intID, &Ty, 1);
+ Value *Operands[2];
+ Operands[0] = Arg0;
+ Operands[1] = Arg1;
+ return CallInst::Create(VABD, Operands, Operands+2,
+ "upgraded."+CI->getName(), CI);
+}
+
// UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
// upgraded intrinsic. All argument and return casting must be provided in
// order to seamlessly integrate with existing context.
void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Function *F = CI->getCalledFunction();
LLVMContext &C = CI->getContext();
-
+ ImmutableCallSite CS(CI);
+
assert(F && "CallInst has no function associated with it.");
if (!NewFn) {
+ // Get the Function's name.
+ const std::string& Name = F->getName();
+
+ // Upgrade ARM NEON intrinsics.
+ if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
+ Instruction *NewI;
+ Value *V0, *V1;
+ if (Name.compare(14, 7, "vmovls.", 7) == 0) {
+ NewI = new SExtInst(CI->getArgOperand(0), CI->getType(),
+ "upgraded." + CI->getName(), CI);
+ } else if (Name.compare(14, 7, "vmovlu.", 7) == 0) {
+ NewI = new ZExtInst(CI->getArgOperand(0), CI->getType(),
+ "upgraded." + CI->getName(), CI);
+ } else if (Name.compare(14, 4, "vadd", 4) == 0) {
+ ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
+ NewI = BinaryOperator::CreateAdd(V0, V1, "upgraded."+CI->getName(), CI);
+ } else if (Name.compare(14, 4, "vsub", 4) == 0) {
+ ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
+ NewI = BinaryOperator::CreateSub(V0, V1,"upgraded."+CI->getName(),CI);
+ } else if (Name.compare(14, 4, "vmul", 4) == 0) {
+ ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
+ NewI = BinaryOperator::CreateMul(V0, V1,"upgraded."+CI->getName(),CI);
+ } else if (Name.compare(14, 4, "vmla", 4) == 0) {
+ ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
+ Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
+ NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), MulI,
+ "upgraded."+CI->getName(), CI);
+ } else if (Name.compare(14, 4, "vmls", 4) == 0) {
+ ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
+ Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
+ NewI = BinaryOperator::CreateSub(CI->getArgOperand(0), MulI,
+ "upgraded."+CI->getName(), CI);
+ } else if (Name.compare(14, 4, "vabd", 4) == 0) {
+ NewI = CallVABD(CI, CI->getArgOperand(0), CI->getArgOperand(1));
+ NewI = new ZExtInst(NewI, CI->getType(), "upgraded."+CI->getName(), CI);
+ } else if (Name.compare(14, 4, "vaba", 4) == 0) {
+ NewI = CallVABD(CI, CI->getArgOperand(1), CI->getArgOperand(2));
+ if (Name.at(18) == 'l')
+ NewI = new ZExtInst(NewI, CI->getType(), "", CI);
+ NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), NewI,
+ "upgraded."+CI->getName(), CI);
+ } else if (Name.compare(14, 6, "vmovn.", 6) == 0) {
+ NewI = new TruncInst(CI->getArgOperand(0), CI->getType(),
+ "upgraded." + CI->getName(), CI);
+ } else {
+ llvm_unreachable("Unknown arm.neon function for CallInst upgrade.");
+ }
+ // Replace any uses of the old CallInst.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(NewI);
+ CI->eraseFromParent();
+ return;
+ }
+
bool isLoadH = false, isLoadL = false, isMovL = false;
bool isMovSD = false, isShufPD = false;
bool isUnpckhPD = false, isUnpcklPD = false;
@@ -285,11 +523,11 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
if (isLoadH || isLoadL || isMovL || isMovSD || isShufPD ||
isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
std::vector<Constant*> Idxs;
- Value *Op0 = CI->getOperand(1);
+ Value *Op0 = CI->getArgOperand(0);
ShuffleVectorInst *SI = NULL;
if (isLoadH || isLoadL) {
Value *Op1 = UndefValue::get(Op0->getType());
- Value *Addr = new BitCastInst(CI->getOperand(2),
+ Value *Addr = new BitCastInst(CI->getArgOperand(1),
Type::getDoublePtrTy(C),
"upgraded.", CI);
Value *Load = new LoadInst(Addr, "upgraded.", false, 8, CI);
@@ -322,7 +560,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
SI = new ShuffleVectorInst(ZeroV, Op0, Mask, "upgraded.", CI);
} else if (isMovSD ||
isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
- Value *Op1 = CI->getOperand(2);
+ Value *Op1 = CI->getArgOperand(1);
if (isMovSD) {
Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
@@ -336,8 +574,9 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *Mask = ConstantVector::get(Idxs);
SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
} else if (isShufPD) {
- Value *Op1 = CI->getOperand(2);
- unsigned MaskVal = cast<ConstantInt>(CI->getOperand(3))->getZExtValue();
+ Value *Op1 = CI->getArgOperand(1);
+ unsigned MaskVal =
+ cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), MaskVal & 1));
Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C),
((MaskVal >> 1) & 1)+2));
@@ -355,6 +594,130 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Clean up the old call now that it has been completely upgraded.
CI->eraseFromParent();
+ } else if (F->getName() == "llvm.x86.sse41.pmulld") {
+ // Upgrade this set of intrinsics into vector multiplies.
+ Instruction *Mul = BinaryOperator::CreateMul(CI->getArgOperand(0),
+ CI->getArgOperand(1),
+ CI->getName(),
+ CI);
+ // Fix up all the uses with our new multiply.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Mul);
+
+ // Remove upgraded multiply.
+ CI->eraseFromParent();
+ } else if (F->getName() == "llvm.x86.ssse3.palign.r") {
+ Value *Op1 = CI->getArgOperand(0);
+ Value *Op2 = CI->getArgOperand(1);
+ Value *Op3 = CI->getArgOperand(2);
+ unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
+ Value *Rep;
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ // If palignr is shifting the pair of input vectors less than 9 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 8) {
+ const Type *IntTy = Type::getInt32Ty(C);
+ const Type *EltTy = Type::getInt8Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 8);
+
+ Op2 = Builder.CreateBitCast(Op2, VecTy);
+ Op1 = Builder.CreateBitCast(Op1, VecTy);
+
+ llvm::SmallVector<llvm::Constant*, 8> Indices;
+ for (unsigned i = 0; i != 8; ++i)
+ Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
+
+ Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
+ Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
+ Rep = Builder.CreateBitCast(Rep, F->getReturnType());
+ }
+
+ // If palignr is shifting the pair of input vectors more than 8 but less
+ // than 16 bytes, emit a logical right shift of the destination.
+ else if (shiftVal < 16) {
+ // MMX has these as 1 x i64 vectors for some odd optimization reasons.
+ const Type *EltTy = Type::getInt64Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 1);
+
+ Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
+ Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8);
+
+ // create i32 constant
+ Function *I =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q);
+ Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ else {
+ Rep = Constant::getNullValue(F->getReturnType());
+ }
+
+ // Replace any uses with our new instruction.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Rep);
+
+ // Remove upgraded instruction.
+ CI->eraseFromParent();
+
+ } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
+ Value *Op1 = CI->getArgOperand(0);
+ Value *Op2 = CI->getArgOperand(1);
+ Value *Op3 = CI->getArgOperand(2);
+ unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
+ Value *Rep;
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ const Type *IntTy = Type::getInt32Ty(C);
+ const Type *EltTy = Type::getInt8Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 16);
+
+ Op2 = Builder.CreateBitCast(Op2, VecTy);
+ Op1 = Builder.CreateBitCast(Op1, VecTy);
+
+ llvm::SmallVector<llvm::Constant*, 16> Indices;
+ for (unsigned i = 0; i != 16; ++i)
+ Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
+
+ Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
+ Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
+ Rep = Builder.CreateBitCast(Rep, F->getReturnType());
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ else if (shiftVal < 32) {
+ const Type *EltTy = Type::getInt64Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 2);
+ const Type *IntTy = Type::getInt32Ty(C);
+
+ Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
+ Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8);
+
+ // create i32 constant
+ Function *I =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq);
+ Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ else {
+ Rep = Constant::getNullValue(F->getReturnType());
+ }
+
+ // Replace any uses with our new instruction.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Rep);
+
+ // Remove upgraded instruction.
+ CI->eraseFromParent();
+
} else {
llvm_unreachable("Unknown function for CallInst upgrade.");
}
@@ -362,7 +725,40 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
}
switch (NewFn->getIntrinsicID()) {
- default: llvm_unreachable("Unknown function for CallInst upgrade.");
+ default: llvm_unreachable("Unknown function for CallInst upgrade.");
+ case Intrinsic::arm_neon_vld1:
+ case Intrinsic::arm_neon_vld2:
+ case Intrinsic::arm_neon_vld3:
+ case Intrinsic::arm_neon_vld4:
+ case Intrinsic::arm_neon_vst1:
+ case Intrinsic::arm_neon_vst2:
+ case Intrinsic::arm_neon_vst3:
+ case Intrinsic::arm_neon_vst4:
+ case Intrinsic::arm_neon_vld2lane:
+ case Intrinsic::arm_neon_vld3lane:
+ case Intrinsic::arm_neon_vld4lane:
+ case Intrinsic::arm_neon_vst2lane:
+ case Intrinsic::arm_neon_vst3lane:
+ case Intrinsic::arm_neon_vst4lane: {
+ // Add a default alignment argument of 1.
+ SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
+ Operands.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
+ CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
+ CI->getName(), CI);
+ NewCI->setTailCall(CI->isTailCall());
+ NewCI->setCallingConv(CI->getCallingConv());
+
+ // Handle any uses of the old CallInst.
+ if (!CI->use_empty())
+ // Replace all uses of the old call with the new cast which has the
+ // correct type.
+ CI->replaceAllUsesWith(NewCI);
+
+ // Clean up the old call now that it has been completely upgraded.
+ CI->eraseFromParent();
+ break;
+ }
+
case Intrinsic::x86_mmx_psll_d:
case Intrinsic::x86_mmx_psll_q:
case Intrinsic::x86_mmx_psll_w:
@@ -373,10 +769,10 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::x86_mmx_psrl_w: {
Value *Operands[2];
- Operands[0] = CI->getOperand(1);
+ Operands[0] = CI->getArgOperand(0);
// Cast the second parameter to the correct type.
- BitCastInst *BC = new BitCastInst(CI->getOperand(2),
+ BitCastInst *BC = new BitCastInst(CI->getArgOperand(1),
NewFn->getFunctionType()->getParamType(1),
"upgraded.", CI);
Operands[1] = BC;
@@ -400,9 +796,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::ctlz:
case Intrinsic::ctpop:
case Intrinsic::cttz: {
- // Build a small vector of the 1..(N-1) operands, which are the
- // parameters.
- SmallVector<Value*, 8> Operands(CI->op_begin()+1, CI->op_end());
+ // Build a small vector of the original arguments.
+ SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
// Construct a new CallInst
CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
@@ -437,7 +832,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::eh_selector:
case Intrinsic::eh_typeid_for: {
// Only the return type changed.
- SmallVector<Value*, 8> Operands(CI->op_begin() + 1, CI->op_end());
+ SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
"upgraded." + CI->getName(), CI);
NewCI->setTailCall(CI->isTailCall());
@@ -455,6 +850,28 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
CI->eraseFromParent();
}
break;
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memset: {
+ // Add isVolatile
+ const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext());
+ Value *Operands[5] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), CI->getArgOperand(3),
+ llvm::ConstantInt::get(I1Ty, 0) };
+ CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5,
+ CI->getName(), CI);
+ NewCI->setTailCall(CI->isTailCall());
+ NewCI->setCallingConv(CI->getCallingConv());
+ // Handle any uses of the old CallInst.
+ if (!CI->use_empty())
+ // Replace all uses of the old call with the new cast which has the
+ // correct type.
+ CI->replaceAllUsesWith(NewCI);
+
+ // Clean up the old call now that it has been completely upgraded.
+ CI->eraseFromParent();
+ break;
+ }
}
}
@@ -521,7 +938,8 @@ void llvm::CheckDebugInfoIntrinsics(Module *M) {
if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
if (!Declare->use_empty()) {
DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
- if (!isa<MDNode>(DDI->getOperand(1)) ||!isa<MDNode>(DDI->getOperand(2))) {
+ if (!isa<MDNode>(DDI->getArgOperand(0)) ||
+ !isa<MDNode>(DDI->getArgOperand(1))) {
while (!Declare->use_empty()) {
CallInst *CI = cast<CallInst>(Declare->use_back());
CI->eraseFromParent();
diff --git a/libclamav/c++/llvm/lib/VMCore/BasicBlock.cpp b/libclamav/c++/llvm/lib/VMCore/BasicBlock.cpp
index 16437bc..8ad5373 100644
--- a/libclamav/c++/llvm/lib/VMCore/BasicBlock.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/BasicBlock.cpp
@@ -14,6 +14,7 @@
#include "llvm/BasicBlock.h"
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Type.h"
#include "llvm/ADT/STLExtras.h"
@@ -136,6 +137,16 @@ Instruction* BasicBlock::getFirstNonPHI() {
return &*i;
}
+Instruction* BasicBlock::getFirstNonPHIOrDbg() {
+ BasicBlock::iterator i = begin();
+ // All valid basic blocks should have a terminator,
+ // which is not a PHINode. If we have an invalid basic
+ // block we'll get an assertion failure when dereferencing
+ // a past-the-end iterator.
+ while (isa<PHINode>(i) || isa<DbgInfoIntrinsic>(i)) ++i;
+ return &*i;
+}
+
void BasicBlock::dropAllReferences() {
for(iterator I = begin(), E = end(); I != E; ++I)
I->dropAllReferences();
diff --git a/libclamav/c++/llvm/lib/VMCore/CMakeLists.txt b/libclamav/c++/llvm/lib/VMCore/CMakeLists.txt
index 448d8fb..1388c93 100644
--- a/libclamav/c++/llvm/lib/VMCore/CMakeLists.txt
+++ b/libclamav/c++/llvm/lib/VMCore/CMakeLists.txt
@@ -6,6 +6,7 @@ add_llvm_library(LLVMCore
ConstantFold.cpp
Constants.cpp
Core.cpp
+ DebugLoc.cpp
Dominators.cpp
Function.cpp
GVMaterializer.cpp
@@ -22,6 +23,7 @@ add_llvm_library(LLVMCore
Module.cpp
Pass.cpp
PassManager.cpp
+ PassRegistry.cpp
PrintModulePass.cpp
Type.cpp
TypeSymbolTable.cpp
diff --git a/libclamav/c++/llvm/lib/VMCore/ConstantFold.cpp b/libclamav/c++/llvm/lib/VMCore/ConstantFold.cpp
index 549977c..9a91daf 100644
--- a/libclamav/c++/llvm/lib/VMCore/ConstantFold.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/ConstantFold.cpp
@@ -357,22 +357,6 @@ static Constant *getFoldedSizeOf(const Type *Ty, const Type *DestTy,
}
}
- if (const UnionType *UTy = dyn_cast<UnionType>(Ty)) {
- unsigned NumElems = UTy->getNumElements();
- // Check for a union with all members having the same size.
- Constant *MemberSize =
- getFoldedSizeOf(UTy->getElementType(0), DestTy, true);
- bool AllSame = true;
- for (unsigned i = 1; i != NumElems; ++i)
- if (MemberSize !=
- getFoldedSizeOf(UTy->getElementType(i), DestTy, true)) {
- AllSame = false;
- break;
- }
- if (AllSame)
- return MemberSize;
- }
-
// Pointer size doesn't depend on the pointee type, so canonicalize them
// to an arbitrary pointee.
if (const PointerType *PTy = dyn_cast<PointerType>(Ty))
@@ -438,24 +422,6 @@ static Constant *getFoldedAlignOf(const Type *Ty, const Type *DestTy,
return MemberAlign;
}
- if (const UnionType *UTy = dyn_cast<UnionType>(Ty)) {
- // Union alignment is the maximum alignment of any member.
- // Without target data, we can't compare much, but we can check to see
- // if all the members have the same alignment.
- unsigned NumElems = UTy->getNumElements();
- // Check for a union with all members having the same alignment.
- Constant *MemberAlign =
- getFoldedAlignOf(UTy->getElementType(0), DestTy, true);
- bool AllSame = true;
- for (unsigned i = 1; i != NumElems; ++i)
- if (MemberAlign != getFoldedAlignOf(UTy->getElementType(i), DestTy, true)) {
- AllSame = false;
- break;
- }
- if (AllSame)
- return MemberAlign;
- }
-
// Pointer alignment doesn't depend on the pointee type, so canonicalize them
// to an arbitrary pointee.
if (const PointerType *PTy = dyn_cast<PointerType>(Ty))
@@ -658,7 +624,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
}
}
// Handle an offsetof-like expression.
- if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()){
+ if (Ty->isStructTy() || Ty->isArrayTy()) {
if (Constant *C = getFoldedOffsetOf(Ty, CE->getOperand(2),
DestTy, false))
return C;
@@ -909,8 +875,6 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
unsigned numOps;
if (const ArrayType *AR = dyn_cast<ArrayType>(AggTy))
numOps = AR->getNumElements();
- else if (AggTy->isUnionTy())
- numOps = 1;
else
numOps = cast<StructType>(AggTy)->getNumElements();
@@ -927,10 +891,6 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
if (const StructType* ST = dyn_cast<StructType>(AggTy))
return ConstantStruct::get(ST->getContext(), Ops, ST->isPacked());
- if (const UnionType* UT = dyn_cast<UnionType>(AggTy)) {
- assert(Ops.size() == 1 && "Union can only contain a single value!");
- return ConstantUnion::get(UT, Ops[0]);
- }
return ConstantArray::get(cast<ArrayType>(AggTy), Ops);
}
@@ -1817,8 +1777,15 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
return Constant::getAllOnesValue(ResultTy);
// Handle some degenerate cases first
- if (isa<UndefValue>(C1) || isa<UndefValue>(C2))
+ if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
+ // For EQ and NE, we can always pick a value for the undef to make the
+ // predicate pass or fail, so we can return undef.
+ if (ICmpInst::isEquality(ICmpInst::Predicate(pred)))
+ return UndefValue::get(ResultTy);
+ // Otherwise, pick the same value as the non-undef operand, and fold
+ // it to true or false.
return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(pred));
+ }
// No compile-time operations on this type yet.
if (C1->getType()->isPPC_FP128Ty())
@@ -2194,7 +2161,7 @@ Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
}
NewIndices.push_back(Combined);
- NewIndices.insert(NewIndices.end(), Idxs+1, Idxs+NumIdx);
+ NewIndices.append(Idxs+1, Idxs+NumIdx);
return (inBounds && cast<GEPOperator>(CE)->isInBounds()) ?
ConstantExpr::getInBoundsGetElementPtr(CE->getOperand(0),
&NewIndices[0],
diff --git a/libclamav/c++/llvm/lib/VMCore/Constants.cpp b/libclamav/c++/llvm/lib/VMCore/Constants.cpp
index 10f8879..16eaca8 100644
--- a/libclamav/c++/llvm/lib/VMCore/Constants.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Constants.cpp
@@ -160,7 +160,7 @@ bool Constant::canTrap() const {
/// isConstantUsed - Return true if the constant has users other than constant
/// exprs and other dangling things.
bool Constant::isConstantUsed() const {
- for (use_const_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
+ for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
const Constant *UC = dyn_cast<Constant>(*UI);
if (UC == 0 || isa<GlobalValue>(UC))
return true;
@@ -525,6 +525,7 @@ Constant* ConstantArray::get(const ArrayType* T, Constant* const* Vals,
Constant* ConstantArray::get(LLVMContext &Context, StringRef Str,
bool AddNull) {
std::vector<Constant*> ElementVals;
+ ElementVals.reserve(Str.size() + size_t(AddNull));
for (unsigned i = 0; i < Str.size(); ++i)
ElementVals.push_back(ConstantInt::get(Type::getInt8Ty(Context), Str[i]));
@@ -585,27 +586,6 @@ Constant* ConstantStruct::get(LLVMContext &Context,
return get(Context, std::vector<Constant*>(Vals, Vals+NumVals), Packed);
}
-ConstantUnion::ConstantUnion(const UnionType *T, Constant* V)
- : Constant(T, ConstantUnionVal,
- OperandTraits<ConstantUnion>::op_end(this) - 1, 1) {
- Use *OL = OperandList;
- assert(T->getElementTypeIndex(V->getType()) >= 0 &&
- "Initializer for union element isn't a member of union type!");
- *OL = V;
-}
-
-// ConstantUnion accessors.
-Constant* ConstantUnion::get(const UnionType* T, Constant* V) {
- LLVMContextImpl* pImpl = T->getContext().pImpl;
-
- // Create a ConstantAggregateZero value if all elements are zeros...
- if (!V->isNullValue())
- return pImpl->UnionConstants.getOrCreate(T, V);
-
- return ConstantAggregateZero::get(T);
-}
-
-
ConstantVector::ConstantVector(const VectorType *T,
const std::vector<Constant*> &V)
: Constant(T, ConstantVectorVal,
@@ -722,7 +702,7 @@ bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const {
if (getOpcode() != Instruction::GetElementPtr) return false;
gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
- User::const_op_iterator OI = next(this->op_begin());
+ User::const_op_iterator OI = llvm::next(this->op_begin());
// Skip the first index, as it has no static limit.
++GEPI;
@@ -954,14 +934,14 @@ ConstantAggregateZero* ConstantAggregateZero::get(const Type* Ty) {
/// destroyConstant - Remove the constant from the constant table...
///
void ConstantAggregateZero::destroyConstant() {
- getType()->getContext().pImpl->AggZeroConstants.remove(this);
+ getRawType()->getContext().pImpl->AggZeroConstants.remove(this);
destroyConstantImpl();
}
/// destroyConstant - Remove the constant from the constant table...
///
void ConstantArray::destroyConstant() {
- getType()->getContext().pImpl->ArrayConstants.remove(this);
+ getRawType()->getContext().pImpl->ArrayConstants.remove(this);
destroyConstantImpl();
}
@@ -1025,21 +1005,14 @@ namespace llvm {
// destroyConstant - Remove the constant from the constant table...
//
void ConstantStruct::destroyConstant() {
- getType()->getContext().pImpl->StructConstants.remove(this);
- destroyConstantImpl();
-}
-
-// destroyConstant - Remove the constant from the constant table...
-//
-void ConstantUnion::destroyConstant() {
- getType()->getContext().pImpl->UnionConstants.remove(this);
+ getRawType()->getContext().pImpl->StructConstants.remove(this);
destroyConstantImpl();
}
// destroyConstant - Remove the constant from the constant table...
//
void ConstantVector::destroyConstant() {
- getType()->getContext().pImpl->VectorConstants.remove(this);
+ getRawType()->getContext().pImpl->VectorConstants.remove(this);
destroyConstantImpl();
}
@@ -1080,7 +1053,7 @@ ConstantPointerNull *ConstantPointerNull::get(const PointerType *Ty) {
// destroyConstant - Remove the constant from the constant table...
//
void ConstantPointerNull::destroyConstant() {
- getType()->getContext().pImpl->NullPtrConstants.remove(this);
+ getRawType()->getContext().pImpl->NullPtrConstants.remove(this);
destroyConstantImpl();
}
@@ -1095,7 +1068,7 @@ UndefValue *UndefValue::get(const Type *Ty) {
// destroyConstant - Remove the constant from the constant table.
//
void UndefValue::destroyConstant() {
- getType()->getContext().pImpl->UndefValueConstants.remove(this);
+ getRawType()->getContext().pImpl->UndefValueConstants.remove(this);
destroyConstantImpl();
}
@@ -1129,7 +1102,7 @@ BlockAddress::BlockAddress(Function *F, BasicBlock *BB)
// destroyConstant - Remove the constant from the constant table.
//
void BlockAddress::destroyConstant() {
- getFunction()->getType()->getContext().pImpl
+ getFunction()->getRawType()->getContext().pImpl
->BlockAddresses.erase(std::make_pair(getFunction(), getBasicBlock()));
getBasicBlock()->AdjustBlockAddressRefCount(-1);
destroyConstantImpl();
@@ -1222,20 +1195,20 @@ Constant *ConstantExpr::getCast(unsigned oc, Constant *C, const Type *Ty) {
Constant *ConstantExpr::getZExtOrBitCast(Constant *C, const Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
- return getCast(Instruction::BitCast, C, Ty);
- return getCast(Instruction::ZExt, C, Ty);
+ return getBitCast(C, Ty);
+ return getZExt(C, Ty);
}
Constant *ConstantExpr::getSExtOrBitCast(Constant *C, const Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
- return getCast(Instruction::BitCast, C, Ty);
- return getCast(Instruction::SExt, C, Ty);
+ return getBitCast(C, Ty);
+ return getSExt(C, Ty);
}
Constant *ConstantExpr::getTruncOrBitCast(Constant *C, const Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
- return getCast(Instruction::BitCast, C, Ty);
- return getCast(Instruction::Trunc, C, Ty);
+ return getBitCast(C, Ty);
+ return getTrunc(C, Ty);
}
Constant *ConstantExpr::getPointerCast(Constant *S, const Type *Ty) {
@@ -1243,8 +1216,8 @@ Constant *ConstantExpr::getPointerCast(Constant *S, const Type *Ty) {
assert((Ty->isIntegerTy() || Ty->isPointerTy()) && "Invalid cast");
if (Ty->isIntegerTy())
- return getCast(Instruction::PtrToInt, S, Ty);
- return getCast(Instruction::BitCast, S, Ty);
+ return getPtrToInt(S, Ty);
+ return getBitCast(S, Ty);
}
Constant *ConstantExpr::getIntegerCast(Constant *C, const Type *Ty,
@@ -1448,12 +1421,6 @@ Constant *ConstantExpr::getCompareTy(unsigned short predicate,
Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2,
unsigned Flags) {
- // API compatibility: Adjust integer opcodes to floating-point opcodes.
- if (C1->getType()->isFPOrFPVectorTy()) {
- if (Opcode == Instruction::Add) Opcode = Instruction::FAdd;
- else if (Opcode == Instruction::Sub) Opcode = Instruction::FSub;
- else if (Opcode == Instruction::Mul) Opcode = Instruction::FMul;
- }
#ifndef NDEBUG
switch (Opcode) {
case Instruction::Add:
@@ -1521,8 +1488,8 @@ Constant* ConstantExpr::getSizeOf(const Type* Ty) {
Constant *GEPIdx = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
Constant *GEP = getGetElementPtr(
Constant::getNullValue(PointerType::getUnqual(Ty)), &GEPIdx, 1);
- return getCast(Instruction::PtrToInt, GEP,
- Type::getInt64Ty(Ty->getContext()));
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
}
Constant* ConstantExpr::getAlignOf(const Type* Ty) {
@@ -1535,8 +1502,8 @@ Constant* ConstantExpr::getAlignOf(const Type* Ty) {
Constant *One = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
Constant *Indices[2] = { Zero, One };
Constant *GEP = getGetElementPtr(NullPtr, Indices, 2);
- return getCast(Instruction::PtrToInt, GEP,
- Type::getInt64Ty(Ty->getContext()));
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
}
Constant* ConstantExpr::getOffsetOf(const StructType* STy, unsigned FieldNo) {
@@ -1553,8 +1520,8 @@ Constant* ConstantExpr::getOffsetOf(const Type* Ty, Constant *FieldNo) {
};
Constant *GEP = getGetElementPtr(
Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx, 2);
- return getCast(Instruction::PtrToInt, GEP,
- Type::getInt64Ty(Ty->getContext()));
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
}
Constant *ConstantExpr::getCompare(unsigned short pred,
@@ -1838,9 +1805,6 @@ Constant *ConstantExpr::getExtractValue(Constant *Agg,
}
Constant* ConstantExpr::getNeg(Constant* C) {
- // API compatibility: Adjust integer opcodes to floating-point opcodes.
- if (C->getType()->isFPOrFPVectorTy())
- return getFNeg(C);
assert(C->getType()->isIntOrIntVectorTy() &&
"Cannot NEG a nonintegral value!");
return get(Instruction::Sub,
@@ -1937,7 +1901,7 @@ Constant* ConstantExpr::getAShr(Constant* C1, Constant* C2) {
// destroyConstant - Remove the constant from the constant table...
//
void ConstantExpr::destroyConstant() {
- getType()->getContext().pImpl->ExprConstants.remove(this);
+ getRawType()->getContext().pImpl->ExprConstants.remove(this);
destroyConstantImpl();
}
@@ -1945,6 +1909,20 @@ const char *ConstantExpr::getOpcodeName() const {
return Instruction::getOpcodeName(getOpcode());
}
+
+
+GetElementPtrConstantExpr::
+GetElementPtrConstantExpr(Constant *C, const std::vector<Constant*> &IdxList,
+ const Type *DestTy)
+ : ConstantExpr(DestTy, Instruction::GetElementPtr,
+ OperandTraits<GetElementPtrConstantExpr>::op_end(this)
+ - (IdxList.size()+1), IdxList.size()+1) {
+ OperandList[0] = C;
+ for (unsigned i = 0, E = IdxList.size(); i != E; ++i)
+ OperandList[i+1] = IdxList[i];
+}
+
+
//===----------------------------------------------------------------------===//
// replaceUsesOfWithOnConstant implementations
@@ -1964,11 +1942,10 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
Constant *ToC = cast<Constant>(To);
- LLVMContext &Context = getType()->getContext();
- LLVMContextImpl *pImpl = Context.pImpl;
+ LLVMContextImpl *pImpl = getRawType()->getContext().pImpl;
std::pair<LLVMContextImpl::ArrayConstantsTy::MapKey, ConstantArray*> Lookup;
- Lookup.first.first = getType();
+ Lookup.first.first = cast<ArrayType>(getRawType());
Lookup.second = this;
std::vector<Constant*> &Values = Lookup.first.second;
@@ -2002,7 +1979,7 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
Constant *Replacement = 0;
if (isAllZeros) {
- Replacement = ConstantAggregateZero::get(getType());
+ Replacement = ConstantAggregateZero::get(getRawType());
} else {
// Check to see if we have this array type already.
bool Exists;
@@ -2053,7 +2030,7 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
assert(getOperand(OperandToUpdate) == From && "ReplaceAllUsesWith broken!");
std::pair<LLVMContextImpl::StructConstantsTy::MapKey, ConstantStruct*> Lookup;
- Lookup.first.first = getType();
+ Lookup.first.first = cast<StructType>(getRawType());
Lookup.second = this;
std::vector<Constant*> &Values = Lookup.first.second;
Values.reserve(getNumOperands()); // Build replacement struct.
@@ -2075,14 +2052,13 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
}
Values[OperandToUpdate] = ToC;
- LLVMContext &Context = getType()->getContext();
- LLVMContextImpl *pImpl = Context.pImpl;
+ LLVMContextImpl *pImpl = getRawType()->getContext().pImpl;
Constant *Replacement = 0;
if (isAllZeros) {
- Replacement = ConstantAggregateZero::get(getType());
+ Replacement = ConstantAggregateZero::get(getRawType());
} else {
- // Check to see if we have this array type already.
+ // Check to see if we have this struct type already.
bool Exists;
LLVMContextImpl::StructConstantsTy::MapTy::iterator I =
pImpl->StructConstants.InsertOrGetItem(Lookup, Exists);
@@ -2111,56 +2087,6 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
destroyConstant();
}
-void ConstantUnion::replaceUsesOfWithOnConstant(Value *From, Value *To,
- Use *U) {
- assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
- Constant *ToC = cast<Constant>(To);
-
- assert(U == OperandList && "Union constants can only have one use!");
- assert(getNumOperands() == 1 && "Union constants can only have one use!");
- assert(getOperand(0) == From && "ReplaceAllUsesWith broken!");
-
- std::pair<LLVMContextImpl::UnionConstantsTy::MapKey, ConstantUnion*> Lookup;
- Lookup.first.first = getType();
- Lookup.second = this;
- Lookup.first.second = ToC;
-
- LLVMContext &Context = getType()->getContext();
- LLVMContextImpl *pImpl = Context.pImpl;
-
- Constant *Replacement = 0;
- if (ToC->isNullValue()) {
- Replacement = ConstantAggregateZero::get(getType());
- } else {
- // Check to see if we have this union type already.
- bool Exists;
- LLVMContextImpl::UnionConstantsTy::MapTy::iterator I =
- pImpl->UnionConstants.InsertOrGetItem(Lookup, Exists);
-
- if (Exists) {
- Replacement = I->second;
- } else {
- // Okay, the new shape doesn't exist in the system yet. Instead of
- // creating a new constant union, inserting it, replaceallusesof'ing the
- // old with the new, then deleting the old... just update the current one
- // in place!
- pImpl->UnionConstants.MoveConstantToNewSlot(this, I);
-
- // Update to the new value.
- setOperand(0, ToC);
- return;
- }
- }
-
- assert(Replacement != this && "I didn't contain From!");
-
- // Everyone using this now uses the replacement.
- uncheckedReplaceAllUsesWith(Replacement);
-
- // Delete the old constant!
- destroyConstant();
-}
-
void ConstantVector::replaceUsesOfWithOnConstant(Value *From, Value *To,
Use *U) {
assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
@@ -2173,7 +2099,7 @@ void ConstantVector::replaceUsesOfWithOnConstant(Value *From, Value *To,
Values.push_back(Val);
}
- Constant *Replacement = get(getType(), Values);
+ Constant *Replacement = get(cast<VectorType>(getRawType()), Values);
assert(Replacement != this && "I didn't contain From!");
// Everyone using this now uses the replacement.
@@ -2220,7 +2146,7 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
&Indices[0], Indices.size());
} else if (isCast()) {
assert(getOperand(0) == From && "Cast only has one use!");
- Replacement = ConstantExpr::getCast(getOpcode(), To, getType());
+ Replacement = ConstantExpr::getCast(getOpcode(), To, getRawType());
} else if (getOpcode() == Instruction::Select) {
Constant *C1 = getOperand(0);
Constant *C2 = getOperand(1);
diff --git a/libclamav/c++/llvm/lib/VMCore/ConstantsContext.h b/libclamav/c++/llvm/lib/VMCore/ConstantsContext.h
index 2f2fac5..1c04c3e 100644
--- a/libclamav/c++/llvm/lib/VMCore/ConstantsContext.h
+++ b/libclamav/c++/llvm/lib/VMCore/ConstantsContext.h
@@ -511,14 +511,6 @@ struct ConstantKeyData<ConstantStruct> {
}
};
-template<>
-struct ConstantKeyData<ConstantUnion> {
- typedef Constant* ValType;
- static ValType getValType(ConstantUnion *CU) {
- return cast<Constant>(CU->getOperand(0));
- }
-};
-
// ConstantPointerNull does not take extra "value" argument...
template<class ValType>
struct ConstantCreator<ConstantPointerNull, PointerType, ValType> {
@@ -757,9 +749,13 @@ public:
// If this constant is the representative element for its abstract type,
// update the AbstractTypeMap so that the representative element is I.
- if (C->getType()->isAbstract()) {
+ //
+ // This must use getRawType() because if the type is under refinement, we
+ // will get the refineAbstractType callback below, and we don't want to
+ // kick union find in on the constant.
+ if (C->getRawType()->isAbstract()) {
typename AbstractTypeMapTy::iterator ATI =
- AbstractTypeMap.find(C->getType());
+ AbstractTypeMap.find(cast<DerivedType>(C->getRawType()));
assert(ATI != AbstractTypeMap.end() &&
"Abstract type not in AbstractTypeMap?");
if (ATI->second == OldI)
diff --git a/libclamav/c++/llvm/lib/VMCore/Core.cpp b/libclamav/c++/llvm/lib/VMCore/Core.cpp
index f4f65c5..5aad19d 100644
--- a/libclamav/c++/llvm/lib/VMCore/Core.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Core.cpp
@@ -22,6 +22,7 @@
#include "llvm/TypeSymbolTable.h"
#include "llvm/InlineAsm.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/PassManager.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -119,6 +120,11 @@ void LLVMDumpModule(LLVMModuleRef M) {
unwrap(M)->dump();
}
+/*--.. Operations on inline assembler ......................................--*/
+void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm) {
+ unwrap(M)->setModuleInlineAsm(StringRef(Asm));
+}
+
/*===-- Operations on types -----------------------------------------------===*/
@@ -150,8 +156,6 @@ LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty) {
return LLVMFunctionTypeKind;
case Type::StructTyID:
return LLVMStructTypeKind;
- case Type::UnionTyID:
- return LLVMUnionTypeKind;
case Type::ArrayTyID:
return LLVMArrayTypeKind;
case Type::PointerTyID:
@@ -310,35 +314,6 @@ LLVMBool LLVMIsPackedStruct(LLVMTypeRef StructTy) {
return unwrap<StructType>(StructTy)->isPacked();
}
-/*--.. Operations on union types ..........................................--*/
-
-LLVMTypeRef LLVMUnionTypeInContext(LLVMContextRef C, LLVMTypeRef *ElementTypes,
- unsigned ElementCount) {
- SmallVector<const Type*, 8> Tys;
- for (LLVMTypeRef *I = ElementTypes,
- *E = ElementTypes + ElementCount; I != E; ++I)
- Tys.push_back(unwrap(*I));
-
- return wrap(UnionType::get(&Tys[0], Tys.size()));
-}
-
-LLVMTypeRef LLVMUnionType(LLVMTypeRef *ElementTypes,
- unsigned ElementCount, int Packed) {
- return LLVMUnionTypeInContext(LLVMGetGlobalContext(), ElementTypes,
- ElementCount);
-}
-
-unsigned LLVMCountUnionElementTypes(LLVMTypeRef UnionTy) {
- return unwrap<UnionType>(UnionTy)->getNumElements();
-}
-
-void LLVMGetUnionElementTypes(LLVMTypeRef UnionTy, LLVMTypeRef *Dest) {
- UnionType *Ty = unwrap<UnionType>(UnionTy);
- for (FunctionType::param_iterator I = Ty->element_begin(),
- E = Ty->element_end(); I != E; ++I)
- *Dest++ = wrap(*I);
-}
-
/*--.. Operations on array, pointer, and vector types (sequence types) .....--*/
LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount) {
@@ -484,6 +459,14 @@ LLVMValueRef LLVMGetOperand(LLVMValueRef Val, unsigned Index) {
return wrap(unwrap<User>(Val)->getOperand(Index));
}
+void LLVMSetOperand(LLVMValueRef Val, unsigned Index, LLVMValueRef Op) {
+ unwrap<User>(Val)->setOperand(Index, unwrap(Op));
+}
+
+int LLVMGetNumOperands(LLVMValueRef Val) {
+ return unwrap<User>(Val)->getNumOperands();
+}
+
/*--.. Operations on constants of any type .................................--*/
LLVMValueRef LLVMConstNull(LLVMTypeRef Ty) {
@@ -615,10 +598,6 @@ LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size) {
return wrap(ConstantVector::get(
unwrap<Constant>(ScalarConstantVals, Size), Size));
}
-LLVMValueRef LLVMConstUnion(LLVMTypeRef Ty, LLVMValueRef Val) {
- return wrap(ConstantUnion::get(unwrap<UnionType>(Ty), unwrap<Constant>(Val)));
-}
-
/*--.. Constant expressions ................................................--*/
LLVMOpcode LLVMGetConstOpcode(LLVMValueRef ConstantVal) {
@@ -1054,6 +1033,10 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
return LLVMPrivateLinkage;
case GlobalValue::LinkerPrivateLinkage:
return LLVMLinkerPrivateLinkage;
+ case GlobalValue::LinkerPrivateWeakLinkage:
+ return LLVMLinkerPrivateWeakLinkage;
+ case GlobalValue::LinkerPrivateWeakDefAutoLinkage:
+ return LLVMLinkerPrivateWeakDefAutoLinkage;
case GlobalValue::DLLImportLinkage:
return LLVMDLLImportLinkage;
case GlobalValue::DLLExportLinkage:
@@ -1104,6 +1087,12 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
case LLVMLinkerPrivateLinkage:
GV->setLinkage(GlobalValue::LinkerPrivateLinkage);
break;
+ case LLVMLinkerPrivateWeakLinkage:
+ GV->setLinkage(GlobalValue::LinkerPrivateWeakLinkage);
+ break;
+ case LLVMLinkerPrivateWeakDefAutoLinkage:
+ GV->setLinkage(GlobalValue::LinkerPrivateWeakDefAutoLinkage);
+ break;
case LLVMDLLImportLinkage:
GV->setLinkage(GlobalValue::DLLImportLinkage);
break;
@@ -1506,6 +1495,14 @@ void LLVMDeleteBasicBlock(LLVMBasicBlockRef BBRef) {
unwrap(BBRef)->eraseFromParent();
}
+void LLVMMoveBasicBlockBefore(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos) {
+ unwrap(BB)->moveBefore(unwrap(MovePos));
+}
+
+void LLVMMoveBasicBlockAfter(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos) {
+ unwrap(BB)->moveAfter(unwrap(MovePos));
+}
+
/*--.. Operations on instructions ..........................................--*/
LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst) {
@@ -1651,7 +1648,7 @@ LLVMBasicBlockRef LLVMGetInsertBlock(LLVMBuilderRef Builder) {
}
void LLVMClearInsertionPosition(LLVMBuilderRef Builder) {
- unwrap(Builder)->ClearInsertionPoint ();
+ unwrap(Builder)->ClearInsertionPoint();
}
void LLVMInsertIntoBuilder(LLVMBuilderRef Builder, LLVMValueRef Instr) {
@@ -1670,11 +1667,13 @@ void LLVMDisposeBuilder(LLVMBuilderRef Builder) {
/*--.. Metadata builders ...................................................--*/
void LLVMSetCurrentDebugLocation(LLVMBuilderRef Builder, LLVMValueRef L) {
- unwrap(Builder)->SetCurrentDebugLocation(L? unwrap<MDNode>(L) : NULL);
+ MDNode *Loc = L ? unwrap<MDNode>(L) : NULL;
+ unwrap(Builder)->SetCurrentDebugLocation(DebugLoc::getFromDILocation(Loc));
}
LLVMValueRef LLVMGetCurrentDebugLocation(LLVMBuilderRef Builder) {
- return wrap(unwrap(Builder)->getCurrentDebugLocation());
+ return wrap(unwrap(Builder)->getCurrentDebugLocation()
+ .getAsMDNode(unwrap(Builder)->getContext()));
}
void LLVMSetInstDebugLocation(LLVMBuilderRef Builder, LLVMValueRef Inst) {
@@ -2199,17 +2198,52 @@ LLVMBool LLVMCreateMemoryBufferWithContentsOfFile(
LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf,
char **OutMessage) {
- MemoryBuffer *MB = MemoryBuffer::getSTDIN();
- if (!MB->getBufferSize()) {
- delete MB;
- *OutMessage = strdup("stdin is empty.");
- return 1;
+ std::string Error;
+ if (MemoryBuffer *MB = MemoryBuffer::getSTDIN(&Error)) {
+ *OutMemBuf = wrap(MB);
+ return 0;
}
- *OutMemBuf = wrap(MB);
- return 0;
+ *OutMessage = strdup(Error.c_str());
+ return 1;
}
void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf) {
delete unwrap(MemBuf);
}
+
+
+/*===-- Pass Manager ------------------------------------------------------===*/
+
+LLVMPassManagerRef LLVMCreatePassManager() {
+ return wrap(new PassManager());
+}
+
+LLVMPassManagerRef LLVMCreateFunctionPassManagerForModule(LLVMModuleRef M) {
+ return wrap(new FunctionPassManager(unwrap(M)));
+}
+
+LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef P) {
+ return LLVMCreateFunctionPassManagerForModule(
+ reinterpret_cast<LLVMModuleRef>(P));
+}
+
+LLVMBool LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M) {
+ return unwrap<PassManager>(PM)->run(*unwrap(M));
+}
+
+LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM) {
+ return unwrap<FunctionPassManager>(FPM)->doInitialization();
+}
+
+LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F) {
+ return unwrap<FunctionPassManager>(FPM)->run(*unwrap<Function>(F));
+}
+
+LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM) {
+ return unwrap<FunctionPassManager>(FPM)->doFinalization();
+}
+
+void LLVMDisposePassManager(LLVMPassManagerRef PM) {
+ delete unwrap(PM);
+}
diff --git a/libclamav/c++/llvm/lib/VMCore/DebugLoc.cpp b/libclamav/c++/llvm/lib/VMCore/DebugLoc.cpp
new file mode 100644
index 0000000..f8b45ee
--- /dev/null
+++ b/libclamav/c++/llvm/lib/VMCore/DebugLoc.cpp
@@ -0,0 +1,288 @@
+//===-- DebugLoc.cpp - Implement DebugLoc class ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/DebugLoc.h"
+#include "LLVMContextImpl.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// DebugLoc Implementation
+//===----------------------------------------------------------------------===//
+
+MDNode *DebugLoc::getScope(const LLVMContext &Ctx) const {
+ if (ScopeIdx == 0) return 0;
+
+ if (ScopeIdx > 0) {
+ // Positive ScopeIdx is an index into ScopeRecords, which has no inlined-at
+ // position specified.
+ assert(unsigned(ScopeIdx) <= Ctx.pImpl->ScopeRecords.size() &&
+ "Invalid ScopeIdx!");
+ return Ctx.pImpl->ScopeRecords[ScopeIdx-1].get();
+ }
+
+ // Otherwise, the index is in the ScopeInlinedAtRecords array.
+ assert(unsigned(-ScopeIdx) <= Ctx.pImpl->ScopeInlinedAtRecords.size() &&
+ "Invalid ScopeIdx");
+ return Ctx.pImpl->ScopeInlinedAtRecords[-ScopeIdx-1].first.get();
+}
+
+MDNode *DebugLoc::getInlinedAt(const LLVMContext &Ctx) const {
+ // Positive ScopeIdx is an index into ScopeRecords, which has no inlined-at
+ // position specified. Zero is invalid.
+ if (ScopeIdx >= 0) return 0;
+
+ // Otherwise, the index is in the ScopeInlinedAtRecords array.
+ assert(unsigned(-ScopeIdx) <= Ctx.pImpl->ScopeInlinedAtRecords.size() &&
+ "Invalid ScopeIdx");
+ return Ctx.pImpl->ScopeInlinedAtRecords[-ScopeIdx-1].second.get();
+}
+
+/// Return both the Scope and the InlinedAt values.
+void DebugLoc::getScopeAndInlinedAt(MDNode *&Scope, MDNode *&IA,
+ const LLVMContext &Ctx) const {
+ if (ScopeIdx == 0) {
+ Scope = IA = 0;
+ return;
+ }
+
+ if (ScopeIdx > 0) {
+ // Positive ScopeIdx is an index into ScopeRecords, which has no inlined-at
+ // position specified.
+ assert(unsigned(ScopeIdx) <= Ctx.pImpl->ScopeRecords.size() &&
+ "Invalid ScopeIdx!");
+ Scope = Ctx.pImpl->ScopeRecords[ScopeIdx-1].get();
+ IA = 0;
+ return;
+ }
+
+ // Otherwise, the index is in the ScopeInlinedAtRecords array.
+ assert(unsigned(-ScopeIdx) <= Ctx.pImpl->ScopeInlinedAtRecords.size() &&
+ "Invalid ScopeIdx");
+ Scope = Ctx.pImpl->ScopeInlinedAtRecords[-ScopeIdx-1].first.get();
+ IA = Ctx.pImpl->ScopeInlinedAtRecords[-ScopeIdx-1].second.get();
+}
+
+
+DebugLoc DebugLoc::get(unsigned Line, unsigned Col,
+ MDNode *Scope, MDNode *InlinedAt) {
+ DebugLoc Result;
+
+ // If no scope is available, this is an unknown location.
+ if (Scope == 0) return Result;
+
+ // Saturate line and col to "unknown".
+ if (Col > 255) Col = 0;
+ if (Line >= (1 << 24)) Line = 0;
+ Result.LineCol = Line | (Col << 24);
+
+ LLVMContext &Ctx = Scope->getContext();
+
+ // If there is no inlined-at location, use the ScopeRecords array.
+ if (InlinedAt == 0)
+ Result.ScopeIdx = Ctx.pImpl->getOrAddScopeRecordIdxEntry(Scope, 0);
+ else
+ Result.ScopeIdx = Ctx.pImpl->getOrAddScopeInlinedAtIdxEntry(Scope,
+ InlinedAt, 0);
+
+ return Result;
+}
+
+/// getAsMDNode - This method converts the compressed DebugLoc node into a
+/// DILocation compatible MDNode.
+MDNode *DebugLoc::getAsMDNode(const LLVMContext &Ctx) const {
+ if (isUnknown()) return 0;
+
+ MDNode *Scope, *IA;
+ getScopeAndInlinedAt(Scope, IA, Ctx);
+ assert(Scope && "If scope is null, this should be isUnknown()");
+
+ LLVMContext &Ctx2 = Scope->getContext();
+ const Type *Int32 = Type::getInt32Ty(Ctx2);
+ Value *Elts[] = {
+ ConstantInt::get(Int32, getLine()), ConstantInt::get(Int32, getCol()),
+ Scope, IA
+ };
+ return MDNode::get(Ctx2, &Elts[0], 4);
+}
+
+/// getFromDILocation - Translate the DILocation quad into a DebugLoc.
+DebugLoc DebugLoc::getFromDILocation(MDNode *N) {
+ if (N == 0 || N->getNumOperands() != 4) return DebugLoc();
+
+ MDNode *Scope = dyn_cast_or_null<MDNode>(N->getOperand(2));
+ if (Scope == 0) return DebugLoc();
+
+ unsigned LineNo = 0, ColNo = 0;
+ if (ConstantInt *Line = dyn_cast_or_null<ConstantInt>(N->getOperand(0)))
+ LineNo = Line->getZExtValue();
+ if (ConstantInt *Col = dyn_cast_or_null<ConstantInt>(N->getOperand(1)))
+ ColNo = Col->getZExtValue();
+
+ return get(LineNo, ColNo, Scope, dyn_cast_or_null<MDNode>(N->getOperand(3)));
+}
+
+//===----------------------------------------------------------------------===//
+// LLVMContextImpl Implementation
+//===----------------------------------------------------------------------===//
+
+int LLVMContextImpl::getOrAddScopeRecordIdxEntry(MDNode *Scope,
+ int ExistingIdx) {
+ // If we already have an entry for this scope, return it.
+ int &Idx = ScopeRecordIdx[Scope];
+ if (Idx) return Idx;
+
+ // If we don't have an entry, but ExistingIdx is specified, use it.
+ if (ExistingIdx)
+ return Idx = ExistingIdx;
+
+ // Otherwise add a new entry.
+
+ // Start out ScopeRecords with a minimal reasonable size to avoid
+ // excessive reallocation starting out.
+ if (ScopeRecords.empty())
+ ScopeRecords.reserve(128);
+
+ // Index is biased by 1 for index.
+ Idx = ScopeRecords.size()+1;
+ ScopeRecords.push_back(DebugRecVH(Scope, this, Idx));
+ return Idx;
+}
+
+int LLVMContextImpl::getOrAddScopeInlinedAtIdxEntry(MDNode *Scope, MDNode *IA,
+ int ExistingIdx) {
+ // If we already have an entry, return it.
+ int &Idx = ScopeInlinedAtIdx[std::make_pair(Scope, IA)];
+ if (Idx) return Idx;
+
+ // If we don't have an entry, but ExistingIdx is specified, use it.
+ if (ExistingIdx)
+ return Idx = ExistingIdx;
+
+ // Start out ScopeInlinedAtRecords with a minimal reasonable size to avoid
+ // excessive reallocation starting out.
+ if (ScopeInlinedAtRecords.empty())
+ ScopeInlinedAtRecords.reserve(128);
+
+ // Index is biased by 1 and negated.
+ Idx = -ScopeInlinedAtRecords.size()-1;
+ ScopeInlinedAtRecords.push_back(std::make_pair(DebugRecVH(Scope, this, Idx),
+ DebugRecVH(IA, this, Idx)));
+ return Idx;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DebugRecVH Implementation
+//===----------------------------------------------------------------------===//
+
+/// deleted - The MDNode this is pointing to got deleted, so this pointer needs
+/// to drop to null and we need remove our entry from the DenseMap.
+void DebugRecVH::deleted() {
+ // If this is a non-canonical reference, just drop the value to null, we know
+ // it doesn't have a map entry.
+ if (Idx == 0) {
+ setValPtr(0);
+ return;
+ }
+
+ MDNode *Cur = get();
+
+ // If the index is positive, it is an entry in ScopeRecords.
+ if (Idx > 0) {
+ assert(Ctx->ScopeRecordIdx[Cur] == Idx && "Mapping out of date!");
+ Ctx->ScopeRecordIdx.erase(Cur);
+ // Reset this VH to null and we're done.
+ setValPtr(0);
+ Idx = 0;
+ return;
+ }
+
+ // Otherwise, it is an entry in ScopeInlinedAtRecords, we don't know if it
+ // is the scope or the inlined-at record entry.
+ assert(unsigned(-Idx-1) < Ctx->ScopeInlinedAtRecords.size());
+ std::pair<DebugRecVH, DebugRecVH> &Entry = Ctx->ScopeInlinedAtRecords[-Idx-1];
+ assert((this == &Entry.first || this == &Entry.second) &&
+ "Mapping out of date!");
+
+ MDNode *OldScope = Entry.first.get();
+ MDNode *OldInlinedAt = Entry.second.get();
+ assert(OldScope != 0 && OldInlinedAt != 0 &&
+ "Entry should be non-canonical if either val dropped to null");
+
+ // Otherwise, we do have an entry in it, nuke it and we're done.
+ assert(Ctx->ScopeInlinedAtIdx[std::make_pair(OldScope, OldInlinedAt)] == Idx&&
+ "Mapping out of date");
+ Ctx->ScopeInlinedAtIdx.erase(std::make_pair(OldScope, OldInlinedAt));
+
+ // Reset this VH to null. Drop both 'Idx' values to null to indicate that
+ // we're in non-canonical form now.
+ setValPtr(0);
+ Entry.first.Idx = Entry.second.Idx = 0;
+}
+
+void DebugRecVH::allUsesReplacedWith(Value *NewVa) {
+ // If being replaced with a non-mdnode value (e.g. undef) handle this as if
+ // the mdnode got deleted.
+ MDNode *NewVal = dyn_cast<MDNode>(NewVa);
+ if (NewVal == 0) return deleted();
+
+ // If this is a non-canonical reference, just change it, we know it already
+ // doesn't have a map entry.
+ if (Idx == 0) {
+ setValPtr(NewVa);
+ return;
+ }
+
+ MDNode *OldVal = get();
+ assert(OldVal != NewVa && "Node replaced with self?");
+
+ // If the index is positive, it is an entry in ScopeRecords.
+ if (Idx > 0) {
+ assert(Ctx->ScopeRecordIdx[OldVal] == Idx && "Mapping out of date!");
+ Ctx->ScopeRecordIdx.erase(OldVal);
+ setValPtr(NewVal);
+
+ int NewEntry = Ctx->getOrAddScopeRecordIdxEntry(NewVal, Idx);
+
+ // If NewVal already has an entry, this becomes a non-canonical reference,
+ // just drop Idx to 0 to signify this.
+ if (NewEntry != Idx)
+ Idx = 0;
+ return;
+ }
+
+ // Otherwise, it is an entry in ScopeInlinedAtRecords, we don't know if it
+ // is the scope or the inlined-at record entry.
+ assert(unsigned(-Idx-1) < Ctx->ScopeInlinedAtRecords.size());
+ std::pair<DebugRecVH, DebugRecVH> &Entry = Ctx->ScopeInlinedAtRecords[-Idx-1];
+ assert((this == &Entry.first || this == &Entry.second) &&
+ "Mapping out of date!");
+
+ MDNode *OldScope = Entry.first.get();
+ MDNode *OldInlinedAt = Entry.second.get();
+ assert(OldScope != 0 && OldInlinedAt != 0 &&
+ "Entry should be non-canonical if either val dropped to null");
+
+ // Otherwise, we do have an entry in it, nuke it and we're done.
+ assert(Ctx->ScopeInlinedAtIdx[std::make_pair(OldScope, OldInlinedAt)] == Idx&&
+ "Mapping out of date");
+ Ctx->ScopeInlinedAtIdx.erase(std::make_pair(OldScope, OldInlinedAt));
+
+ // Reset this VH to the new value.
+ setValPtr(NewVal);
+
+ int NewIdx = Ctx->getOrAddScopeInlinedAtIdxEntry(Entry.first.get(),
+ Entry.second.get(), Idx);
+ // If NewVal already has an entry, this becomes a non-canonical reference,
+ // just drop Idx to 0 to signify this.
+ if (NewIdx != Idx) {
+ std::pair<DebugRecVH, DebugRecVH> &Entry=Ctx->ScopeInlinedAtRecords[-Idx-1];
+ Entry.first.Idx = Entry.second.Idx = 0;
+ }
+}
diff --git a/libclamav/c++/llvm/lib/VMCore/Dominators.cpp b/libclamav/c++/llvm/lib/VMCore/Dominators.cpp
index 3441750..f3dad82 100644
--- a/libclamav/c++/llvm/lib/VMCore/Dominators.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Dominators.cpp
@@ -17,6 +17,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -30,9 +31,9 @@ using namespace llvm;
// Always verify dominfo if expensive checking is enabled.
#ifdef XDEBUG
-bool VerifyDomInfo = true;
+static bool VerifyDomInfo = true;
#else
-bool VerifyDomInfo = false;
+static bool VerifyDomInfo = false;
#endif
static cl::opt<bool,true>
VerifyDomInfoX("verify-dom-info", cl::location(VerifyDomInfo),
@@ -51,8 +52,8 @@ TEMPLATE_INSTANTIATION(class llvm::DomTreeNodeBase<BasicBlock>);
TEMPLATE_INSTANTIATION(class llvm::DominatorTreeBase<BasicBlock>);
char DominatorTree::ID = 0;
-static RegisterPass<DominatorTree>
-E("domtree", "Dominator Tree Construction", true, true);
+INITIALIZE_PASS(DominatorTree, "domtree",
+ "Dominator Tree Construction", true, true);
bool DominatorTree::runOnFunction(Function &F) {
DT->recalculate(F);
@@ -105,8 +106,8 @@ bool DominatorTree::dominates(const Instruction *A, const Instruction *B) const{
//===----------------------------------------------------------------------===//
char DominanceFrontier::ID = 0;
-static RegisterPass<DominanceFrontier>
-G("domfrontier", "Dominance Frontier Construction", true, true);
+INITIALIZE_PASS(DominanceFrontier, "domfrontier",
+ "Dominance Frontier Construction", true, true);
void DominanceFrontier::verifyAnalysis() const {
if (!VerifyDomInfo) return;
@@ -119,39 +120,26 @@ void DominanceFrontier::verifyAnalysis() const {
assert(!compare(OtherDF) && "Invalid DominanceFrontier info!");
}
-// NewBB is split and now it has one successor. Update dominace frontier to
+// NewBB is split and now it has one successor. Update dominance frontier to
// reflect this change.
void DominanceFrontier::splitBlock(BasicBlock *NewBB) {
- assert(NewBB->getTerminator()->getNumSuccessors() == 1
- && "NewBB should have a single successor!");
+ assert(NewBB->getTerminator()->getNumSuccessors() == 1 &&
+ "NewBB should have a single successor!");
BasicBlock *NewBBSucc = NewBB->getTerminator()->getSuccessor(0);
- SmallVector<BasicBlock*, 8> PredBlocks;
- for (pred_iterator PI = pred_begin(NewBB), PE = pred_end(NewBB);
- PI != PE; ++PI)
- PredBlocks.push_back(*PI);
-
- if (PredBlocks.empty())
- // If NewBB does not have any predecessors then it is a entry block.
- // In this case, NewBB and its successor NewBBSucc dominates all
- // other blocks.
- return;
-
// NewBBSucc inherits original NewBB frontier.
DominanceFrontier::iterator NewBBI = find(NewBB);
- if (NewBBI != end()) {
- DominanceFrontier::DomSetType NewBBSet = NewBBI->second;
- DominanceFrontier::DomSetType NewBBSuccSet;
- NewBBSuccSet.insert(NewBBSet.begin(), NewBBSet.end());
- addBasicBlock(NewBBSucc, NewBBSuccSet);
- }
+ if (NewBBI != end())
+ addBasicBlock(NewBBSucc, NewBBI->second);
// If NewBB dominates NewBBSucc, then DF(NewBB) is now going to be the
- // DF(PredBlocks[0]) without the stuff that the new block does not dominate
+ // DF(NewBBSucc) without the stuff that the new block does not dominate
// a predecessor of.
DominatorTree &DT = getAnalysis<DominatorTree>();
- if (DT.dominates(NewBB, NewBBSucc)) {
- DominanceFrontier::iterator DFI = find(PredBlocks[0]);
+ DomTreeNode *NewBBNode = DT.getNode(NewBB);
+ DomTreeNode *NewBBSuccNode = DT.getNode(NewBBSucc);
+ if (DT.dominates(NewBBNode, NewBBSuccNode)) {
+ DominanceFrontier::iterator DFI = find(NewBBSucc);
if (DFI != end()) {
DominanceFrontier::DomSetType Set = DFI->second;
// Filter out stuff in Set that we do not dominate a predecessor of.
@@ -160,8 +148,10 @@ void DominanceFrontier::splitBlock(BasicBlock *NewBB) {
bool DominatesPred = false;
for (pred_iterator PI = pred_begin(*SetI), E = pred_end(*SetI);
PI != E; ++PI)
- if (DT.dominates(NewBB, *PI))
+ if (DT.dominates(NewBBNode, DT.getNode(*PI))) {
DominatesPred = true;
+ break;
+ }
if (!DominatesPred)
Set.erase(SetI++);
else
@@ -186,50 +176,71 @@ void DominanceFrontier::splitBlock(BasicBlock *NewBB) {
NewDFSet.insert(NewBBSucc);
addBasicBlock(NewBB, NewDFSet);
}
-
- // Now we must loop over all of the dominance frontiers in the function,
- // replacing occurrences of NewBBSucc with NewBB in some cases. All
- // blocks that dominate a block in PredBlocks and contained NewBBSucc in
- // their dominance frontier must be updated to contain NewBB instead.
- //
- for (Function::iterator FI = NewBB->getParent()->begin(),
- FE = NewBB->getParent()->end(); FI != FE; ++FI) {
- DominanceFrontier::iterator DFI = find(FI);
- if (DFI == end()) continue; // unreachable block.
-
- // Only consider nodes that have NewBBSucc in their dominator frontier.
- if (!DFI->second.count(NewBBSucc)) continue;
-
- // Verify whether this block dominates a block in predblocks. If not, do
- // not update it.
- bool BlockDominatesAny = false;
- for (SmallVectorImpl<BasicBlock*>::const_iterator BI = PredBlocks.begin(),
- BE = PredBlocks.end(); BI != BE; ++BI) {
- if (DT.dominates(FI, *BI)) {
- BlockDominatesAny = true;
+
+ // Now update dominance frontiers which either used to contain NewBBSucc
+ // or which now need to include NewBB.
+
+ // Collect the set of blocks which dominate a predecessor of NewBB or
+ // NewSuccBB and which don't dominate both. This is an initial
+ // approximation of the blocks whose dominance frontiers will need updates.
+ SmallVector<DomTreeNode *, 16> AllPredDoms;
+
+ // Compute the block which dominates both NewBBSucc and NewBB. This is
+ // the immediate dominator of NewBBSucc unless NewBB dominates NewBBSucc.
+ // The code below which climbs dominator trees will stop at this point,
+ // because from this point up, dominance frontiers are unaffected.
+ DomTreeNode *DominatesBoth = 0;
+ if (NewBBSuccNode) {
+ DominatesBoth = NewBBSuccNode->getIDom();
+ if (DominatesBoth == NewBBNode)
+ DominatesBoth = NewBBNode->getIDom();
+ }
+
+ // Collect the set of all blocks which dominate a predecessor of NewBB.
+ SmallPtrSet<DomTreeNode *, 8> NewBBPredDoms;
+ for (pred_iterator PI = pred_begin(NewBB), E = pred_end(NewBB); PI != E; ++PI)
+ for (DomTreeNode *DTN = DT.getNode(*PI); DTN; DTN = DTN->getIDom()) {
+ if (DTN == DominatesBoth)
break;
- }
+ if (!NewBBPredDoms.insert(DTN))
+ break;
+ AllPredDoms.push_back(DTN);
}
- // If NewBBSucc should not stay in our dominator frontier, remove it.
- // We remove it unless there is a predecessor of NewBBSucc that we
- // dominate, but we don't strictly dominate NewBBSucc.
- bool ShouldRemove = true;
- if ((BasicBlock*)FI == NewBBSucc || !DT.dominates(FI, NewBBSucc)) {
- // Okay, we know that PredDom does not strictly dominate NewBBSucc.
- // Check to see if it dominates any predecessors of NewBBSucc.
- for (pred_iterator PI = pred_begin(NewBBSucc),
- E = pred_end(NewBBSucc); PI != E; ++PI)
- if (DT.dominates(FI, *PI)) {
- ShouldRemove = false;
- break;
- }
+ // Collect the set of all blocks which dominate a predecessor of NewSuccBB.
+ SmallPtrSet<DomTreeNode *, 8> NewBBSuccPredDoms;
+ for (pred_iterator PI = pred_begin(NewBBSucc),
+ E = pred_end(NewBBSucc); PI != E; ++PI)
+ for (DomTreeNode *DTN = DT.getNode(*PI); DTN; DTN = DTN->getIDom()) {
+ if (DTN == DominatesBoth)
+ break;
+ if (!NewBBSuccPredDoms.insert(DTN))
+ break;
+ if (!NewBBPredDoms.count(DTN))
+ AllPredDoms.push_back(DTN);
}
-
- if (ShouldRemove)
- removeFromFrontier(DFI, NewBBSucc);
- if (BlockDominatesAny && (&*FI == NewBB || !DT.dominates(FI, NewBB)))
+
+ // Visit all relevant dominance frontiers and make any needed updates.
+ for (SmallVectorImpl<DomTreeNode *>::const_iterator I = AllPredDoms.begin(),
+ E = AllPredDoms.end(); I != E; ++I) {
+ DomTreeNode *DTN = *I;
+ iterator DFI = find((*I)->getBlock());
+
+ // Only consider nodes that have NewBBSucc in their dominator frontier.
+ if (DFI == end() || !DFI->second.count(NewBBSucc)) continue;
+
+ // If the block dominates a predecessor of NewBB but does not properly
+ // dominate NewBB itself, add NewBB to its dominance frontier.
+ if (NewBBPredDoms.count(DTN) &&
+ !DT.properlyDominates(DTN, NewBBNode))
addToFrontier(DFI, NewBB);
+
+ // If the block does not dominate a predecessor of NewBBSucc or
+ // properly dominates NewBBSucc itself, remove NewBBSucc from its
+ // dominance frontier.
+ if (!NewBBSuccPredDoms.count(DTN) ||
+ DT.properlyDominates(DTN, NewBBSuccNode))
+ removeFromFrontier(DFI, NewBBSucc);
}
}
@@ -343,3 +354,7 @@ void DominanceFrontierBase::print(raw_ostream &OS, const Module* ) const {
}
}
+void DominanceFrontierBase::dump() const {
+ print(dbgs());
+}
+
diff --git a/libclamav/c++/llvm/lib/VMCore/Function.cpp b/libclamav/c++/llvm/lib/VMCore/Function.cpp
index dbc283e..8f94efc 100644
--- a/libclamav/c++/llvm/lib/VMCore/Function.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Function.cpp
@@ -16,6 +16,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/StringPool.h"
@@ -400,13 +401,16 @@ Function *Intrinsic::getDeclaration(Module *M, ID id, const Type **Tys,
#include "llvm/Intrinsics.gen"
#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
- /// hasAddressTaken - returns true if there are any uses of this function
- /// other than direct calls or invokes to it.
-bool Function::hasAddressTaken() const {
- for (Value::use_const_iterator I = use_begin(), E = use_end(); I != E; ++I) {
- if (I.getOperandNo() != 0 ||
- (!isa<CallInst>(*I) && !isa<InvokeInst>(*I)))
- return true;
+/// hasAddressTaken - returns true if there are any uses of this function
+/// other than direct calls or invokes to it.
+bool Function::hasAddressTaken(const User* *PutOffender) const {
+ for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) {
+ const User *U = *I;
+ if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
+ return PutOffender ? (*PutOffender = U, true) : true;
+ ImmutableCallSite CS(cast<Instruction>(U));
+ if (!CS.isCallee(I))
+ return PutOffender ? (*PutOffender = U, true) : true;
}
return false;
}
diff --git a/libclamav/c++/llvm/lib/VMCore/Globals.cpp b/libclamav/c++/llvm/lib/VMCore/Globals.cpp
index 489ec65..96716ee 100644
--- a/libclamav/c++/llvm/lib/VMCore/Globals.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Globals.cpp
@@ -61,8 +61,8 @@ void GlobalValue::Dematerialize() {
/// that want to check to see if a global is unused, but don't want to deal
/// with potentially dead constants hanging off of the globals.
void GlobalValue::removeDeadConstantUsers() const {
- Value::use_const_iterator I = use_begin(), E = use_end();
- Value::use_const_iterator LastNonDeadUser = E;
+ Value::const_use_iterator I = use_begin(), E = use_end();
+ Value::const_use_iterator LastNonDeadUser = E;
while (I != E) {
if (const Constant *User = dyn_cast<Constant>(*I)) {
if (!removeDeadUsersOfConstant(User)) {
@@ -102,7 +102,14 @@ void GlobalValue::copyAttributesFrom(const GlobalValue *Src) {
setVisibility(Src->getVisibility());
}
-
+void GlobalValue::setAlignment(unsigned Align) {
+ assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
+ assert(Align <= MaximumAlignment &&
+ "Alignment is greater than MaximumAlignment!");
+ Alignment = Log2_32(Align) + 1;
+ assert(getAlignment() == Align && "Alignment representation error!");
+}
+
//===----------------------------------------------------------------------===//
// GlobalVariable Implementation
//===----------------------------------------------------------------------===//
diff --git a/libclamav/c++/llvm/lib/VMCore/IRBuilder.cpp b/libclamav/c++/llvm/lib/VMCore/IRBuilder.cpp
index 9f2786e..c1b783c 100644
--- a/libclamav/c++/llvm/lib/VMCore/IRBuilder.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/IRBuilder.cpp
@@ -32,19 +32,6 @@ Value *IRBuilderBase::CreateGlobalString(const char *Str, const Twine &Name) {
return GV;
}
-/// SetCurrentDebugLocation - Set location information used by debugging
-/// information.
-void IRBuilderBase::SetCurrentDebugLocation(MDNode *L) {
- if (DbgMDKind == 0)
- DbgMDKind = Context.getMDKindID("dbg");
- CurDbgLocation = L;
-}
-
-void IRBuilderBase::SetInstDebugLocation(Instruction *I) const {
- if (CurDbgLocation)
- I->setMetadata(DbgMDKind, CurDbgLocation);
-}
-
const Type *IRBuilderBase::getCurrentFunctionReturnType() const {
assert(BB && BB->getParent() && "No current function!");
return BB->getParent()->getReturnType();
diff --git a/libclamav/c++/llvm/lib/VMCore/InlineAsm.cpp b/libclamav/c++/llvm/lib/VMCore/InlineAsm.cpp
index 0d2eca9..69f713b 100644
--- a/libclamav/c++/llvm/lib/VMCore/InlineAsm.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/InlineAsm.cpp
@@ -164,7 +164,7 @@ InlineAsm::ParseConstraints(StringRef Constraints) {
StringRef::iterator ConstraintEnd = std::find(I, E, ',');
if (ConstraintEnd == I || // Empty constraint like ",,"
- Info.Parse(std::string(I, ConstraintEnd), Result)) {
+ Info.Parse(StringRef(I, ConstraintEnd-I), Result)) {
Result.clear(); // Erroneous constraint?
break;
}
diff --git a/libclamav/c++/llvm/lib/VMCore/Instruction.cpp b/libclamav/c++/llvm/lib/VMCore/Instruction.cpp
index 3fabfd0..05bed4c 100644
--- a/libclamav/c++/llvm/lib/VMCore/Instruction.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Instruction.cpp
@@ -49,8 +49,8 @@ Instruction::Instruction(const Type *ty, unsigned it, Use *Ops, unsigned NumOps,
// Out of line virtual method, so the vtable, etc has a home.
Instruction::~Instruction() {
assert(Parent == 0 && "Instruction still linked in the program!");
- if (hasMetadata())
- removeAllMetadata();
+ if (hasMetadataHashEntry())
+ clearMetadataHashEntries();
}
@@ -283,12 +283,13 @@ bool Instruction::isSameOperationAs(const Instruction *I) const {
/// specified block. Note that PHI nodes are considered to evaluate their
/// operands in the corresponding predecessor block.
bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
- for (use_const_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
+ for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
// PHI nodes uses values in the corresponding predecessor block. For other
// instructions, just check to see whether the parent of the use matches up.
- const PHINode *PN = dyn_cast<PHINode>(*UI);
+ const User *U = *UI;
+ const PHINode *PN = dyn_cast<PHINode>(U);
if (PN == 0) {
- if (cast<Instruction>(*UI)->getParent() != BB)
+ if (cast<Instruction>(U)->getParent() != BB)
return true;
continue;
}
@@ -401,12 +402,20 @@ bool Instruction::isSafeToSpeculativelyExecute() const {
return false;
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
- if (isa<AllocaInst>(getOperand(0)))
+ // It's also not safe to follow a bitcast, for example:
+ // bitcast i8* (alloca i8) to i32*
+ // would result in a 4-byte load from a 1-byte alloca.
+ Value *Op0 = getOperand(0);
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0)) {
+ // TODO: it's safe to do this for any GEP with constant indices that
+ // compute inside the allocated type, but not for any inbounds gep.
+ if (GEP->hasAllZeroIndices())
+ Op0 = GEP->getPointerOperand();
+ }
+ if (isa<AllocaInst>(Op0))
return true;
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(getOperand(0)))
return !GV->hasExternalWeakLinkage();
- // FIXME: Handle cases involving GEPs. We have to be careful because
- // a load of a out-of-bounds GEP has undefined behavior.
return false;
}
case Call:
@@ -421,6 +430,7 @@ bool Instruction::isSafeToSpeculativelyExecute() const {
case Store:
case Ret:
case Br:
+ case IndirectBr:
case Switch:
case Unwind:
case Unreachable:
diff --git a/libclamav/c++/llvm/lib/VMCore/Instructions.cpp b/libclamav/c++/llvm/lib/VMCore/Instructions.cpp
index 8f4763f..401802e 100644
--- a/libclamav/c++/llvm/lib/VMCore/Instructions.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Instructions.cpp
@@ -30,76 +30,12 @@ using namespace llvm;
// CallSite Class
//===----------------------------------------------------------------------===//
-#define CALLSITE_DELEGATE_GETTER(METHOD) \
- Instruction *II(getInstruction()); \
- return isCall() \
- ? cast<CallInst>(II)->METHOD \
- : cast<InvokeInst>(II)->METHOD
-
-#define CALLSITE_DELEGATE_SETTER(METHOD) \
- Instruction *II(getInstruction()); \
- if (isCall()) \
- cast<CallInst>(II)->METHOD; \
- else \
- cast<InvokeInst>(II)->METHOD
-
-CallSite::CallSite(Instruction *C) {
- assert((isa<CallInst>(C) || isa<InvokeInst>(C)) && "Not a call!");
- I.setPointer(C);
- I.setInt(isa<CallInst>(C));
-}
-CallingConv::ID CallSite::getCallingConv() const {
- CALLSITE_DELEGATE_GETTER(getCallingConv());
-}
-void CallSite::setCallingConv(CallingConv::ID CC) {
- CALLSITE_DELEGATE_SETTER(setCallingConv(CC));
-}
-const AttrListPtr &CallSite::getAttributes() const {
- CALLSITE_DELEGATE_GETTER(getAttributes());
-}
-void CallSite::setAttributes(const AttrListPtr &PAL) {
- CALLSITE_DELEGATE_SETTER(setAttributes(PAL));
-}
-bool CallSite::paramHasAttr(uint16_t i, Attributes attr) const {
- CALLSITE_DELEGATE_GETTER(paramHasAttr(i, attr));
-}
-uint16_t CallSite::getParamAlignment(uint16_t i) const {
- CALLSITE_DELEGATE_GETTER(getParamAlignment(i));
-}
-bool CallSite::doesNotAccessMemory() const {
- CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
-}
-void CallSite::setDoesNotAccessMemory(bool doesNotAccessMemory) {
- CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory(doesNotAccessMemory));
-}
-bool CallSite::onlyReadsMemory() const {
- CALLSITE_DELEGATE_GETTER(onlyReadsMemory());
-}
-void CallSite::setOnlyReadsMemory(bool onlyReadsMemory) {
- CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory(onlyReadsMemory));
-}
-bool CallSite::doesNotReturn() const {
- CALLSITE_DELEGATE_GETTER(doesNotReturn());
+User::op_iterator CallSite::getCallee() const {
+ Instruction *II(getInstruction());
+ return isCall()
+ ? cast<CallInst>(II)->op_end() - 1 // Skip Callee
+ : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee
}
-void CallSite::setDoesNotReturn(bool doesNotReturn) {
- CALLSITE_DELEGATE_SETTER(setDoesNotReturn(doesNotReturn));
-}
-bool CallSite::doesNotThrow() const {
- CALLSITE_DELEGATE_GETTER(doesNotThrow());
-}
-void CallSite::setDoesNotThrow(bool doesNotThrow) {
- CALLSITE_DELEGATE_SETTER(setDoesNotThrow(doesNotThrow));
-}
-
-bool CallSite::hasArgument(const Value *Arg) const {
- for (arg_iterator AI = this->arg_begin(), E = this->arg_end(); AI != E; ++AI)
- if (AI->get() == Arg)
- return true;
- return false;
-}
-
-#undef CALLSITE_DELEGATE_GETTER
-#undef CALLSITE_DELEGATE_SETTER
//===----------------------------------------------------------------------===//
// TerminatorInst Class
@@ -295,8 +231,7 @@ CallInst::~CallInst() {
void CallInst::init(Value *Func, Value* const *Params, unsigned NumParams) {
assert(NumOperands == NumParams+1 && "NumOperands not set up?");
- Use *OL = OperandList;
- OL[0] = Func;
+ Op<-1>() = Func;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
@@ -309,16 +244,15 @@ void CallInst::init(Value *Func, Value* const *Params, unsigned NumParams) {
assert((i >= FTy->getNumParams() ||
FTy->getParamType(i) == Params[i]->getType()) &&
"Calling a function with a bad signature!");
- OL[i+1] = Params[i];
+ OperandList[i] = Params[i];
}
}
void CallInst::init(Value *Func, Value *Actual1, Value *Actual2) {
assert(NumOperands == 3 && "NumOperands not set up?");
- Use *OL = OperandList;
- OL[0] = Func;
- OL[1] = Actual1;
- OL[2] = Actual2;
+ Op<-1>() = Func;
+ Op<0>() = Actual1;
+ Op<1>() = Actual2;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
@@ -337,9 +271,8 @@ void CallInst::init(Value *Func, Value *Actual1, Value *Actual2) {
void CallInst::init(Value *Func, Value *Actual) {
assert(NumOperands == 2 && "NumOperands not set up?");
- Use *OL = OperandList;
- OL[0] = Func;
- OL[1] = Actual;
+ Op<-1>() = Func;
+ Op<0>() = Actual;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
@@ -355,8 +288,7 @@ void CallInst::init(Value *Func, Value *Actual) {
void CallInst::init(Value *Func) {
assert(NumOperands == 1 && "NumOperands not set up?");
- Use *OL = OperandList;
- OL[0] = Func;
+ Op<-1>() = Func;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
@@ -537,9 +469,10 @@ static Instruction *createMalloc(Instruction *InsertBefore,
Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
const Type *IntPtrTy, const Type *AllocTy,
Value *AllocSize, Value *ArraySize,
+ Function * MallocF,
const Twine &Name) {
return createMalloc(InsertBefore, NULL, IntPtrTy, AllocTy, AllocSize,
- ArraySize, NULL, Name);
+ ArraySize, MallocF, Name);
}
/// CreateMalloc - Generate the IR for a call to malloc:
@@ -591,8 +524,8 @@ static Instruction* createFree(Value* Source, Instruction *InsertBefore,
}
/// CreateFree - Generate the IR for a call to the builtin free function.
-void CallInst::CreateFree(Value* Source, Instruction *InsertBefore) {
- createFree(Source, InsertBefore, NULL);
+Instruction * CallInst::CreateFree(Value* Source, Instruction *InsertBefore) {
+ return createFree(Source, InsertBefore, NULL);
}
/// CreateFree - Generate the IR for a call to the builtin free function.
@@ -611,24 +544,24 @@ Instruction* CallInst::CreateFree(Value* Source, BasicBlock *InsertAtEnd) {
void InvokeInst::init(Value *Fn, BasicBlock *IfNormal, BasicBlock *IfException,
Value* const *Args, unsigned NumArgs) {
assert(NumOperands == 3+NumArgs && "NumOperands not set up?");
- Use *OL = OperandList;
- OL[0] = Fn;
- OL[1] = IfNormal;
- OL[2] = IfException;
+ Op<-3>() = Fn;
+ Op<-2>() = IfNormal;
+ Op<-1>() = IfException;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType());
FTy = FTy; // silence warning.
assert(((NumArgs == FTy->getNumParams()) ||
(FTy->isVarArg() && NumArgs > FTy->getNumParams())) &&
- "Calling a function with bad signature");
+ "Invoking a function with bad signature");
+ Use *OL = OperandList;
for (unsigned i = 0, e = NumArgs; i != e; i++) {
assert((i >= FTy->getNumParams() ||
FTy->getParamType(i) == Args[i]->getType()) &&
"Invoking a function with a bad signature!");
- OL[i+3] = Args[i];
+ OL[i] = Args[i];
}
}
@@ -892,8 +825,8 @@ static Value *getAISize(LLVMContext &Context, Value *Amt) {
else {
assert(!isa<BasicBlock>(Amt) &&
"Passed basic block into allocation size parameter! Use other ctor");
- assert(Amt->getType()->isIntegerTy(32) &&
- "Allocation array size is not a 32-bit integer!");
+ assert(Amt->getType()->isIntegerTy() &&
+ "Allocation array size is not an integer!");
}
return Amt;
}
@@ -958,6 +891,8 @@ AllocaInst::~AllocaInst() {
void AllocaInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
+ assert(Align <= MaximumAlignment &&
+ "Alignment is greater than MaximumAlignment!");
setInstructionSubclassData(Log2_32(Align) + 1);
assert(getAlignment() == Align && "Alignment representation error!");
}
@@ -1093,8 +1028,11 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
void LoadInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
+ assert(Align <= MaximumAlignment &&
+ "Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
((Log2_32(Align)+1)<<1));
+ assert(getAlignment() == Align && "Alignment representation error!");
}
//===----------------------------------------------------------------------===//
@@ -1189,8 +1127,11 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
void StoreInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
+ assert(Align <= MaximumAlignment &&
+ "Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
((Log2_32(Align)+1) << 1));
+ assert(getAlignment() == Align && "Alignment representation error!");
}
//===----------------------------------------------------------------------===//
@@ -1489,9 +1430,24 @@ bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
return false;
const VectorType *MaskTy = dyn_cast<VectorType>(Mask->getType());
- if (!isa<Constant>(Mask) || MaskTy == 0 ||
- !MaskTy->getElementType()->isIntegerTy(32))
+ if (MaskTy == 0 || !MaskTy->getElementType()->isIntegerTy(32))
return false;
+
+ // Check to see if Mask is valid.
+ if (const ConstantVector *MV = dyn_cast<ConstantVector>(Mask)) {
+ const VectorType *VTy = cast<VectorType>(V1->getType());
+ for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) {
+ if (ConstantInt* CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
+ if (CI->uge(VTy->getNumElements()*2))
+ return false;
+ } else if (!isa<UndefValue>(MV->getOperand(i))) {
+ return false;
+ }
+ }
+ }
+ else if (!isa<UndefValue>(Mask) && !isa<ConstantAggregateZero>(Mask))
+ return false;
+
return true;
}
@@ -1520,7 +1476,7 @@ void InsertValueInst::init(Value *Agg, Value *Val, const unsigned *Idx,
Op<0>() = Agg;
Op<1>() = Val;
- Indices.insert(Indices.end(), Idx, Idx + NumIdx);
+ Indices.append(Idx, Idx + NumIdx);
setName(Name);
}
@@ -1573,7 +1529,7 @@ void ExtractValueInst::init(const unsigned *Idx, unsigned NumIdx,
const Twine &Name) {
assert(NumOperands == 1 && "NumOperands not initialized?");
- Indices.insert(Indices.end(), Idx, Idx + NumIdx);
+ Indices.append(Idx, Idx + NumIdx);
setName(Name);
}
@@ -1626,43 +1582,29 @@ const Type* ExtractValueInst::getIndexedType(const Type *Agg,
// BinaryOperator Class
//===----------------------------------------------------------------------===//
-/// AdjustIType - Map Add, Sub, and Mul to FAdd, FSub, and FMul when the
-/// type is floating-point, to help provide compatibility with an older API.
-///
-static BinaryOperator::BinaryOps AdjustIType(BinaryOperator::BinaryOps iType,
- const Type *Ty) {
- // API compatibility: Adjust integer opcodes to floating-point opcodes.
- if (Ty->isFPOrFPVectorTy()) {
- if (iType == BinaryOperator::Add) iType = BinaryOperator::FAdd;
- else if (iType == BinaryOperator::Sub) iType = BinaryOperator::FSub;
- else if (iType == BinaryOperator::Mul) iType = BinaryOperator::FMul;
- }
- return iType;
-}
-
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
const Type *Ty, const Twine &Name,
Instruction *InsertBefore)
- : Instruction(Ty, AdjustIType(iType, Ty),
+ : Instruction(Ty, iType,
OperandTraits<BinaryOperator>::op_begin(this),
OperandTraits<BinaryOperator>::operands(this),
InsertBefore) {
Op<0>() = S1;
Op<1>() = S2;
- init(AdjustIType(iType, Ty));
+ init(iType);
setName(Name);
}
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
const Type *Ty, const Twine &Name,
BasicBlock *InsertAtEnd)
- : Instruction(Ty, AdjustIType(iType, Ty),
+ : Instruction(Ty, iType,
OperandTraits<BinaryOperator>::op_begin(this),
OperandTraits<BinaryOperator>::operands(this),
InsertAtEnd) {
Op<0>() = S1;
Op<1>() = S2;
- init(AdjustIType(iType, Ty));
+ init(iType);
setName(Name);
}
@@ -1989,9 +1931,12 @@ bool CastInst::isLosslessCast() const {
/// # bitcast i32* %x to i8*
/// # bitcast <2 x i32> %x to <4 x i16>
/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
-/// @brief Determine if a cast is a no-op.
-bool CastInst::isNoopCast(const Type *IntPtrTy) const {
- switch (getOpcode()) {
+/// @brief Determine if the described cast is a no-op.
+bool CastInst::isNoopCast(Instruction::CastOps Opcode,
+ const Type *SrcTy,
+ const Type *DestTy,
+ const Type *IntPtrTy) {
+ switch (Opcode) {
default:
assert(!"Invalid CastOp");
case Instruction::Trunc:
@@ -2008,13 +1953,18 @@ bool CastInst::isNoopCast(const Type *IntPtrTy) const {
return true; // BitCast never modifies bits.
case Instruction::PtrToInt:
return IntPtrTy->getScalarSizeInBits() ==
- getType()->getScalarSizeInBits();
+ DestTy->getScalarSizeInBits();
case Instruction::IntToPtr:
return IntPtrTy->getScalarSizeInBits() ==
- getOperand(0)->getType()->getScalarSizeInBits();
+ SrcTy->getScalarSizeInBits();
}
}
+/// @brief Determine if a cast is a no-op.
+bool CastInst::isNoopCast(const Type *IntPtrTy) const {
+ return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
+}
+
/// This function determines if a pair of casts can be eliminated and what
/// opcode should be used in the elimination. This assumes that there are two
/// instructions like this:
@@ -2047,7 +1997,7 @@ unsigned CastInst::isEliminableCastPair(
// FPEXT < FloatPt n/a FloatPt n/a
// PTRTOINT n/a Pointer n/a Integral Unsigned
// INTTOPTR n/a Integral Unsigned Pointer n/a
- // BITCONVERT = FirstClass n/a FirstClass n/a
+ // BITCAST = FirstClass n/a FirstClass n/a
//
// NOTE: some transforms are safe, but we consider them to be non-profitable.
// For example, we could merge "fptoui double to i32" + "zext i32 to i64",
@@ -2077,6 +2027,14 @@ unsigned CastInst::isEliminableCastPair(
{ 99,99,99,99,99,99,99,99,99,13,99,12 }, // IntToPtr |
{ 5, 5, 5, 6, 6, 5, 5, 6, 6,11, 5, 1 }, // BitCast -+
};
+
+ // If either of the casts are a bitcast from scalar to vector, disallow the
+ // merging.
+ if ((firstOp == Instruction::BitCast &&
+ isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
+ (secondOp == Instruction::BitCast &&
+ isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
+ return 0; // Disallowed
int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
[secondOp-Instruction::CastOpsBegin];
diff --git a/libclamav/c++/llvm/lib/VMCore/IntrinsicInst.cpp b/libclamav/c++/llvm/lib/VMCore/IntrinsicInst.cpp
index d8f015a..ac8ec20 100644
--- a/libclamav/c++/llvm/lib/VMCore/IntrinsicInst.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/IntrinsicInst.cpp
@@ -24,8 +24,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/Constants.h"
#include "llvm/GlobalVariable.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/Metadata.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
@@ -55,7 +54,7 @@ Value *DbgInfoIntrinsic::StripCast(Value *C) {
///
Value *DbgDeclareInst::getAddress() const {
- if (MDNode* MD = cast_or_null<MDNode>(getOperand(1)))
+ if (MDNode* MD = cast_or_null<MDNode>(getArgOperand(0)))
return MD->getOperand(0);
else
return NULL;
@@ -66,9 +65,9 @@ Value *DbgDeclareInst::getAddress() const {
///
const Value *DbgValueInst::getValue() const {
- return cast<MDNode>(getOperand(1))->getOperand(0);
+ return cast<MDNode>(getArgOperand(0))->getOperand(0);
}
Value *DbgValueInst::getValue() {
- return cast<MDNode>(getOperand(1))->getOperand(0);
+ return cast<MDNode>(getArgOperand(0))->getOperand(0);
}
diff --git a/libclamav/c++/llvm/lib/VMCore/LLVMContext.cpp b/libclamav/c++/llvm/lib/VMCore/LLVMContext.cpp
index 5a8ea5c..563c651 100644
--- a/libclamav/c++/llvm/lib/VMCore/LLVMContext.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/LLVMContext.cpp
@@ -17,6 +17,7 @@
#include "llvm/Constants.h"
#include "llvm/Instruction.h"
#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/SourceMgr.h"
#include "LLVMContextImpl.h"
using namespace llvm;
@@ -26,19 +27,101 @@ LLVMContext& llvm::getGlobalContext() {
return *GlobalContext;
}
-LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) { }
+LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
+ // Create the first metadata kind, which is always 'dbg'.
+ unsigned DbgID = getMDKindID("dbg");
+ assert(DbgID == MD_dbg && "dbg kind id drifted"); (void)DbgID;
+}
LLVMContext::~LLVMContext() { delete pImpl; }
-GetElementPtrConstantExpr::GetElementPtrConstantExpr
- (Constant *C,
- const std::vector<Constant*> &IdxList,
- const Type *DestTy)
- : ConstantExpr(DestTy, Instruction::GetElementPtr,
- OperandTraits<GetElementPtrConstantExpr>::op_end(this)
- - (IdxList.size()+1),
- IdxList.size()+1) {
- OperandList[0] = C;
- for (unsigned i = 0, E = IdxList.size(); i != E; ++i)
- OperandList[i+1] = IdxList[i];
+//===----------------------------------------------------------------------===//
+// Recoverable Backend Errors
+//===----------------------------------------------------------------------===//
+
+void LLVMContext::setInlineAsmDiagnosticHandler(void *DiagHandler,
+ void *DiagContext) {
+ pImpl->InlineAsmDiagHandler = DiagHandler;
+ pImpl->InlineAsmDiagContext = DiagContext;
+}
+
+/// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
+/// setInlineAsmDiagnosticHandler.
+void *LLVMContext::getInlineAsmDiagnosticHandler() const {
+ return pImpl->InlineAsmDiagHandler;
+}
+
+/// getInlineAsmDiagnosticContext - Return the diagnostic context set by
+/// setInlineAsmDiagnosticHandler.
+void *LLVMContext::getInlineAsmDiagnosticContext() const {
+ return pImpl->InlineAsmDiagContext;
+}
+
+void LLVMContext::emitError(StringRef ErrorStr) {
+ emitError(0U, ErrorStr);
+}
+
+void LLVMContext::emitError(const Instruction *I, StringRef ErrorStr) {
+ unsigned LocCookie = 0;
+ if (const MDNode *SrcLoc = I->getMetadata("srcloc")) {
+ if (SrcLoc->getNumOperands() != 0)
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(SrcLoc->getOperand(0)))
+ LocCookie = CI->getZExtValue();
+ }
+ return emitError(LocCookie, ErrorStr);
}
+void LLVMContext::emitError(unsigned LocCookie, StringRef ErrorStr) {
+ // If there is no error handler installed, just print the error and exit.
+ if (pImpl->InlineAsmDiagHandler == 0) {
+ errs() << "error: " << ErrorStr << "\n";
+ exit(1);
+ }
+
+ // If we do have an error handler, we can report the error and keep going.
+ SMDiagnostic Diag("", "error: " + ErrorStr.str());
+
+ ((SourceMgr::DiagHandlerTy)(intptr_t)pImpl->InlineAsmDiagHandler)
+ (Diag, pImpl->InlineAsmDiagContext, LocCookie);
+
+}
+
+//===----------------------------------------------------------------------===//
+// Metadata Kind Uniquing
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+/// isValidName - Return true if Name is a valid custom metadata handler name.
+static bool isValidName(StringRef MDName) {
+ if (MDName.empty())
+ return false;
+
+ if (!isalpha(MDName[0]))
+ return false;
+
+ for (StringRef::iterator I = MDName.begin() + 1, E = MDName.end(); I != E;
+ ++I) {
+ if (!isalnum(*I) && *I != '_' && *I != '-' && *I != '.')
+ return false;
+ }
+ return true;
+}
+#endif
+
+/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
+unsigned LLVMContext::getMDKindID(StringRef Name) const {
+ assert(isValidName(Name) && "Invalid MDNode name");
+
+ // If this is new, assign it its ID.
+ return
+ pImpl->CustomMDKindNames.GetOrCreateValue(
+ Name, pImpl->CustomMDKindNames.size()).second;
+}
+
+/// getHandlerNames - Populate client supplied smallvector using custome
+/// metadata name and ID.
+void LLVMContext::getMDKindNames(SmallVectorImpl<StringRef> &Names) const {
+ Names.resize(pImpl->CustomMDKindNames.size());
+ for (StringMap<unsigned>::const_iterator I = pImpl->CustomMDKindNames.begin(),
+ E = pImpl->CustomMDKindNames.end(); I != E; ++I)
+ Names[I->second] = I->first();
+}
diff --git a/libclamav/c++/llvm/lib/VMCore/LLVMContextImpl.cpp b/libclamav/c++/llvm/lib/VMCore/LLVMContextImpl.cpp
index b4553dd..93a075f 100644
--- a/libclamav/c++/llvm/lib/VMCore/LLVMContextImpl.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/LLVMContextImpl.cpp
@@ -13,6 +13,7 @@
#include "LLVMContextImpl.h"
#include <algorithm>
+using namespace llvm;
LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
: TheTrueVal(0), TheFalseVal(0),
@@ -30,6 +31,9 @@ LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
Int32Ty(C, 32),
Int64Ty(C, 64),
AlwaysOpaqueTy(new OpaqueType(C)) {
+ InlineAsmDiagHandler = 0;
+ InlineAsmDiagContext = 0;
+
// Make sure the AlwaysOpaqueTy stays alive as long as the Context.
AlwaysOpaqueTy->addRef();
OpaqueTypes.insert(AlwaysOpaqueTy);
@@ -53,14 +57,11 @@ LLVMContextImpl::~LLVMContextImpl() {
DropReferences());
std::for_each(StructConstants.map_begin(), StructConstants.map_end(),
DropReferences());
- std::for_each(UnionConstants.map_begin(), UnionConstants.map_end(),
- DropReferences());
std::for_each(VectorConstants.map_begin(), VectorConstants.map_end(),
DropReferences());
ExprConstants.freeConstants();
ArrayConstants.freeConstants();
StructConstants.freeConstants();
- UnionConstants.freeConstants();
VectorConstants.freeConstants();
AggZeroConstants.freeConstants();
NullPtrConstants.freeConstants();
diff --git a/libclamav/c++/llvm/lib/VMCore/LLVMContextImpl.h b/libclamav/c++/llvm/lib/VMCore/LLVMContextImpl.h
index 8666f45..51b2992 100644
--- a/libclamav/c++/llvm/lib/VMCore/LLVMContextImpl.h
+++ b/libclamav/c++/llvm/lib/VMCore/LLVMContextImpl.h
@@ -90,8 +90,33 @@ struct DenseMapAPFloatKeyInfo {
}
};
+/// DebugRecVH - This is a CallbackVH used to keep the Scope -> index maps
+/// up to date as MDNodes mutate. This class is implemented in DebugLoc.cpp.
+class DebugRecVH : public CallbackVH {
+ /// Ctx - This is the LLVM Context being referenced.
+ LLVMContextImpl *Ctx;
+
+ /// Idx - The index into either ScopeRecordIdx or ScopeInlinedAtRecords that
+ /// this reference lives in. If this is zero, then it represents a
+ /// non-canonical entry that has no DenseMap value. This can happen due to
+ /// RAUW.
+ int Idx;
+public:
+ DebugRecVH(MDNode *n, LLVMContextImpl *ctx, int idx)
+ : CallbackVH(n), Ctx(ctx), Idx(idx) {}
+
+ MDNode *get() const {
+ return cast_or_null<MDNode>(getValPtr());
+ }
+
+ virtual void deleted();
+ virtual void allUsesReplacedWith(Value *VNew);
+};
+
class LLVMContextImpl {
public:
+ void *InlineAsmDiagHandler, *InlineAsmDiagContext;
+
typedef DenseMap<DenseMapAPIntKeyInfo::KeyTy, ConstantInt*,
DenseMapAPIntKeyInfo> IntMapTy;
IntMapTy IntConstants;
@@ -119,16 +144,11 @@ public:
ConstantStruct, true /*largekey*/> StructConstantsTy;
StructConstantsTy StructConstants;
- typedef ConstantUniqueMap<Constant*, UnionType, ConstantUnion>
- UnionConstantsTy;
- UnionConstantsTy UnionConstants;
-
typedef ConstantUniqueMap<std::vector<Constant*>, VectorType,
ConstantVector> VectorConstantsTy;
VectorConstantsTy VectorConstants;
ConstantUniqueMap<char, PointerType, ConstantPointerNull> NullPtrConstants;
-
ConstantUniqueMap<char, Type, UndefValue> UndefValueConstants;
DenseMap<std::pair<Function*, BasicBlock*> , BlockAddress*> BlockAddresses;
@@ -168,7 +188,6 @@ public:
TypeMap<PointerValType, PointerType> PointerTypes;
TypeMap<FunctionValType, FunctionType> FunctionTypes;
TypeMap<StructValType, StructType> StructTypes;
- TypeMap<UnionValType, UnionType> UnionTypes;
TypeMap<IntegerValType, IntegerType> IntegerTypes;
// Opaque types are not structurally uniqued, so don't use TypeMap.
@@ -195,6 +214,27 @@ public:
/// context.
DenseMap<const Instruction *, MDMapTy> MetadataStore;
+ /// ScopeRecordIdx - This is the index in ScopeRecords for an MDNode scope
+ /// entry with no "inlined at" element.
+ DenseMap<MDNode*, int> ScopeRecordIdx;
+
+ /// ScopeRecords - These are the actual mdnodes (in a value handle) for an
+ /// index. The ValueHandle ensures that ScopeRecordIdx stays up to date if
+ /// the MDNode is RAUW'd.
+ std::vector<DebugRecVH> ScopeRecords;
+
+ /// ScopeInlinedAtIdx - This is the index in ScopeInlinedAtRecords for an
+ /// scope/inlined-at pair.
+ DenseMap<std::pair<MDNode*, MDNode*>, int> ScopeInlinedAtIdx;
+
+ /// ScopeInlinedAtRecords - These are the actual mdnodes (in value handles)
+ /// for an index. The ValueHandle ensures that ScopeINlinedAtIdx stays up
+ /// to date.
+ std::vector<std::pair<DebugRecVH, DebugRecVH> > ScopeInlinedAtRecords;
+
+ int getOrAddScopeRecordIdxEntry(MDNode *N, int ExistingIdx);
+ int getOrAddScopeInlinedAtIdxEntry(MDNode *Scope, MDNode *IA,int ExistingIdx);
+
LLVMContextImpl(LLVMContext &C);
~LLVMContextImpl();
};
diff --git a/libclamav/c++/llvm/lib/VMCore/LeaksContext.h b/libclamav/c++/llvm/lib/VMCore/LeaksContext.h
index abff090..b9e59d4 100644
--- a/libclamav/c++/llvm/lib/VMCore/LeaksContext.h
+++ b/libclamav/c++/llvm/lib/VMCore/LeaksContext.h
@@ -14,7 +14,8 @@
#include "llvm/Value.h"
#include "llvm/ADT/SmallPtrSet.h"
-using namespace llvm;
+
+namespace llvm {
template <class T>
struct PrinterTrait {
@@ -87,3 +88,5 @@ private:
const T* Cache;
const char* Name;
};
+
+}
diff --git a/libclamav/c++/llvm/lib/VMCore/Makefile b/libclamav/c++/llvm/lib/VMCore/Makefile
index 4395ecf..03a4fc7 100644
--- a/libclamav/c++/llvm/lib/VMCore/Makefile
+++ b/libclamav/c++/llvm/lib/VMCore/Makefile
@@ -1,4 +1,4 @@
-##===- lib/VMCore/Makefile ------------------------------*- Makefile -*-===##
+##===- lib/VMCore/Makefile ---------------------------------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
diff --git a/libclamav/c++/llvm/lib/VMCore/Metadata.cpp b/libclamav/c++/llvm/lib/VMCore/Metadata.cpp
index faf83e6..da69c43 100644
--- a/libclamav/c++/llvm/lib/VMCore/Metadata.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Metadata.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/SmallString.h"
#include "SymbolTableListTraitsImpl.h"
+#include "llvm/Support/LeakDetector.h"
#include "llvm/Support/ValueHandle.h"
using namespace llvm;
@@ -39,15 +40,6 @@ MDString *MDString::get(LLVMContext &Context, StringRef Str) {
return S;
}
-MDString *MDString::get(LLVMContext &Context, const char *Str) {
- LLVMContextImpl *pImpl = Context.pImpl;
- StringMapEntry<MDString *> &Entry =
- pImpl->MDStringCache.GetOrCreateValue(Str ? StringRef(Str) : StringRef());
- MDString *&S = Entry.getValue();
- if (!S) S = new MDString(Context, Entry.getKey());
- return S;
-}
-
//===----------------------------------------------------------------------===//
// MDNodeOperand implementation.
//
@@ -87,7 +79,8 @@ void MDNodeOperand::allUsesReplacedWith(Value *NV) {
/// getOperandPtr - Helper function to get the MDNodeOperand's coallocated on
/// the end of the MDNode.
static MDNodeOperand *getOperandPtr(MDNode *N, unsigned Op) {
- assert(Op < N->getNumOperands() && "Invalid operand number");
+ // Use <= instead of < to permit a one-past-the-end address.
+ assert(Op <= N->getNumOperands() && "Invalid operand number");
return reinterpret_cast<MDNodeOperand*>(N+1)+Op;
}
@@ -124,14 +117,17 @@ MDNode::~MDNode() {
}
static const Function *getFunctionForValue(Value *V) {
- assert(!isa<MDNode>(V) && "does not iterate over metadata operands");
if (!V) return NULL;
- if (Instruction *I = dyn_cast<Instruction>(V))
- return I->getParent()->getParent();
- if (BasicBlock *BB = dyn_cast<BasicBlock>(V))
- return BB->getParent();
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
+ BasicBlock *BB = I->getParent();
+ return BB ? BB->getParent() : 0;
+ }
if (Argument *A = dyn_cast<Argument>(V))
return A->getParent();
+ if (BasicBlock *BB = dyn_cast<BasicBlock>(V))
+ return BB->getParent();
+ if (MDNode *MD = dyn_cast<MDNode>(V))
+ return MD->getFunction();
return NULL;
}
@@ -139,6 +135,7 @@ static const Function *getFunctionForValue(Value *V) {
static const Function *assertLocalFunction(const MDNode *N) {
if (!N->isFunctionLocal()) return 0;
+ // FIXME: This does not handle cyclic function local metadata.
const Function *F = 0, *NewF = 0;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
if (Value *V = N->getOperand(i)) {
@@ -165,17 +162,9 @@ const Function *MDNode::getFunction() const {
return assertLocalFunction(this);
#endif
if (!isFunctionLocal()) return NULL;
-
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
- if (Value *V = getOperand(i)) {
- if (MDNode *MD = dyn_cast<MDNode>(V)) {
- if (const Function *F = MD->getFunction())
- return F;
- } else {
- return getFunctionForValue(V);
- }
- }
- }
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ if (const Function *F = getFunctionForValue(getOperand(i)))
+ return F;
return NULL;
}
@@ -187,10 +176,22 @@ void MDNode::destroy() {
free(this);
}
+/// isFunctionLocalValue - Return true if this is a value that would require a
+/// function-local MDNode.
+static bool isFunctionLocalValue(Value *V) {
+ return isa<Instruction>(V) || isa<Argument>(V) || isa<BasicBlock>(V) ||
+ (isa<MDNode>(V) && cast<MDNode>(V)->isFunctionLocal());
+}
+
MDNode *MDNode::getMDNode(LLVMContext &Context, Value *const *Vals,
unsigned NumVals, FunctionLocalness FL,
bool Insert) {
LLVMContextImpl *pImpl = Context.pImpl;
+
+ // Add all the operand pointers. Note that we don't have to add the
+ // isFunctionLocal bit because that's implied by the operands.
+ // Note that if the operands are later nulled out, the node will be
+ // removed from the uniquing map.
FoldingSetNodeID ID;
for (unsigned i = 0; i != NumVals; ++i)
ID.AddPointer(Vals[i]);
@@ -201,17 +202,13 @@ MDNode *MDNode::getMDNode(LLVMContext &Context, Value *const *Vals,
if ((N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint)))
return N;
- if (!Insert)
- return NULL;
-
bool isFunctionLocal = false;
switch (FL) {
case FL_Unknown:
for (unsigned i = 0; i != NumVals; ++i) {
Value *V = Vals[i];
if (!V) continue;
- if (isa<Instruction>(V) || isa<Argument>(V) || isa<BasicBlock>(V) ||
- (isa<MDNode>(V) && cast<MDNode>(V)->isFunctionLocal())) {
+ if (isFunctionLocalValue(V)) {
isFunctionLocal = true;
break;
}
@@ -249,12 +246,40 @@ MDNode *MDNode::getIfExists(LLVMContext &Context, Value *const *Vals,
return getMDNode(Context, Vals, NumVals, FL_Unknown, false);
}
+MDNode *MDNode::getTemporary(LLVMContext &Context, Value *const *Vals,
+ unsigned NumVals) {
+ MDNode *N = (MDNode *)malloc(sizeof(MDNode)+NumVals*sizeof(MDNodeOperand));
+ N = new (N) MDNode(Context, Vals, NumVals, FL_No);
+ N->setValueSubclassData(N->getSubclassDataFromValue() |
+ NotUniquedBit);
+ LeakDetector::addGarbageObject(N);
+ return N;
+}
+
+void MDNode::deleteTemporary(MDNode *N) {
+ assert(N->use_empty() && "Temporary MDNode has uses!");
+ assert(!N->getContext().pImpl->MDNodeSet.RemoveNode(N) &&
+ "Deleting a non-temporary uniqued node!");
+ assert(!N->getContext().pImpl->NonUniquedMDNodes.erase(N) &&
+ "Deleting a non-temporary non-uniqued node!");
+ assert((N->getSubclassDataFromValue() & NotUniquedBit) &&
+ "Temporary MDNode does not have NotUniquedBit set!");
+ assert((N->getSubclassDataFromValue() & DestroyFlag) == 0 &&
+ "Temporary MDNode has DestroyFlag set!");
+ LeakDetector::removeGarbageObject(N);
+ N->destroy();
+}
+
/// getOperand - Return specified operand.
Value *MDNode::getOperand(unsigned i) const {
return *getOperandPtr(const_cast<MDNode*>(this), i);
}
void MDNode::Profile(FoldingSetNodeID &ID) const {
+ // Add all the operand pointers. Note that we don't have to add the
+ // isFunctionLocal bit because that's implied by the operands.
+ // Note that if the operands are later nulled out, the node will be
+ // removed from the uniquing map.
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
ID.AddPointer(getOperand(i));
}
@@ -269,6 +294,24 @@ void MDNode::setIsNotUniqued() {
void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
Value *From = *Op;
+ // If is possible that someone did GV->RAUW(inst), replacing a global variable
+ // with an instruction or some other function-local object. If this is a
+ // non-function-local MDNode, it can't point to a function-local object.
+ // Handle this case by implicitly dropping the MDNode reference to null.
+ // Likewise if the MDNode is function-local but for a different function.
+ if (To && isFunctionLocalValue(To)) {
+ if (!isFunctionLocal())
+ To = 0;
+ else {
+ const Function *F = getFunction();
+ const Function *FV = getFunctionForValue(To);
+ // Metadata can be function-local without having an associated function.
+ // So only consider functions to have changed if non-null.
+ if (F && FV && F != FV)
+ To = 0;
+ }
+ }
+
if (From == To)
return;
@@ -287,7 +330,8 @@ void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
// If we are dropping an argument to null, we choose to not unique the MDNode
// anymore. This commonly occurs during destruction, and uniquing these
- // brings little reuse.
+ // brings little reuse. Also, this means we don't need to include
+ // isFunctionLocal bits in FoldingSetNodeIDs for MDNodes.
if (To == 0) {
setIsNotUniqued();
return;
@@ -310,59 +354,35 @@ void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
// InsertPoint will have been set by the FindNodeOrInsertPos call.
pImpl->MDNodeSet.InsertNode(this, InsertPoint);
+
+ // If this MDValue was previously function-local but no longer is, clear
+ // its function-local flag.
+ if (isFunctionLocal() && !isFunctionLocalValue(To)) {
+ bool isStillFunctionLocal = false;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ Value *V = getOperand(i);
+ if (!V) continue;
+ if (isFunctionLocalValue(V)) {
+ isStillFunctionLocal = true;
+ break;
+ }
+ }
+ if (!isStillFunctionLocal)
+ setValueSubclassData(getSubclassDataFromValue() & ~FunctionLocalBit);
+ }
}
//===----------------------------------------------------------------------===//
// NamedMDNode implementation.
//
-namespace llvm {
-// SymbolTableListTraits specialization for MDSymbolTable.
-void ilist_traits<NamedMDNode>
-::addNodeToList(NamedMDNode *N) {
- assert(N->getParent() == 0 && "Value already in a container!!");
- Module *Owner = getListOwner();
- N->setParent(Owner);
- MDSymbolTable &ST = Owner->getMDSymbolTable();
- ST.insert(N->getName(), N);
-}
-
-void ilist_traits<NamedMDNode>::removeNodeFromList(NamedMDNode *N) {
- N->setParent(0);
- Module *Owner = getListOwner();
- MDSymbolTable &ST = Owner->getMDSymbolTable();
- ST.remove(N->getName());
-}
+static SmallVector<TrackingVH<MDNode>, 4> &getNMDOps(void *Operands) {
+ return *(SmallVector<TrackingVH<MDNode>, 4>*)Operands;
}
-static SmallVector<WeakVH, 4> &getNMDOps(void *Operands) {
- return *(SmallVector<WeakVH, 4>*)Operands;
-}
-
-NamedMDNode::NamedMDNode(LLVMContext &C, const Twine &N,
- MDNode *const *MDs,
- unsigned NumMDs, Module *ParentModule)
- : Value(Type::getMetadataTy(C), Value::NamedMDNodeVal), Parent(0) {
- setName(N);
- Operands = new SmallVector<WeakVH, 4>();
-
- SmallVector<WeakVH, 4> &Node = getNMDOps(Operands);
- for (unsigned i = 0; i != NumMDs; ++i)
- Node.push_back(WeakVH(MDs[i]));
-
- if (ParentModule)
- ParentModule->getNamedMDList().push_back(this);
-}
-
-NamedMDNode *NamedMDNode::Create(const NamedMDNode *NMD, Module *M) {
- assert(NMD && "Invalid source NamedMDNode!");
- SmallVector<MDNode *, 4> Elems;
- Elems.reserve(NMD->getNumOperands());
-
- for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
- Elems.push_back(NMD->getOperand(i));
- return new NamedMDNode(NMD->getContext(), NMD->getName().data(),
- Elems.data(), Elems.size(), M);
+NamedMDNode::NamedMDNode(const Twine &N)
+ : Name(N.str()), Parent(0),
+ Operands(new SmallVector<TrackingVH<MDNode>, 4>()) {
}
NamedMDNode::~NamedMDNode() {
@@ -378,18 +398,20 @@ unsigned NamedMDNode::getNumOperands() const {
/// getOperand - Return specified operand.
MDNode *NamedMDNode::getOperand(unsigned i) const {
assert(i < getNumOperands() && "Invalid Operand number!");
- return dyn_cast_or_null<MDNode>(getNMDOps(Operands)[i]);
+ return dyn_cast<MDNode>(&*getNMDOps(Operands)[i]);
}
/// addOperand - Add metadata Operand.
void NamedMDNode::addOperand(MDNode *M) {
- getNMDOps(Operands).push_back(WeakVH(M));
+ assert(!M->isFunctionLocal() &&
+ "NamedMDNode operands must not be function-local!");
+ getNMDOps(Operands).push_back(TrackingVH<MDNode>(M));
}
/// eraseFromParent - Drop all references and remove the node from parent
/// module.
void NamedMDNode::eraseFromParent() {
- getParent()->getNamedMDList().erase(this);
+ getParent()->eraseNamedMetadata(this);
}
/// dropAllReferences - Remove all uses and clear node vector.
@@ -397,72 +419,12 @@ void NamedMDNode::dropAllReferences() {
getNMDOps(Operands).clear();
}
-/// setName - Set the name of this named metadata.
-void NamedMDNode::setName(const Twine &NewName) {
- assert (!NewName.isTriviallyEmpty() && "Invalid named metadata name!");
-
- SmallString<256> NameData;
- StringRef NameRef = NewName.toStringRef(NameData);
-
- // Name isn't changing?
- if (getName() == NameRef)
- return;
-
- Name = NameRef.str();
- if (Parent)
- Parent->getMDSymbolTable().insert(NameRef, this);
-}
-
/// getName - Return a constant reference to this named metadata's name.
StringRef NamedMDNode::getName() const {
return StringRef(Name);
}
//===----------------------------------------------------------------------===//
-// LLVMContext MDKind naming implementation.
-//
-
-#ifndef NDEBUG
-/// isValidName - Return true if Name is a valid custom metadata handler name.
-static bool isValidName(StringRef MDName) {
- if (MDName.empty())
- return false;
-
- if (!isalpha(MDName[0]))
- return false;
-
- for (StringRef::iterator I = MDName.begin() + 1, E = MDName.end(); I != E;
- ++I) {
- if (!isalnum(*I) && *I != '_' && *I != '-' && *I != '.')
- return false;
- }
- return true;
-}
-#endif
-
-/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
-unsigned LLVMContext::getMDKindID(StringRef Name) const {
- assert(isValidName(Name) && "Invalid MDNode name");
-
- unsigned &Entry = pImpl->CustomMDKindNames[Name];
-
- // If this is new, assign it its ID.
- if (Entry == 0) Entry = pImpl->CustomMDKindNames.size();
- return Entry;
-}
-
-/// getHandlerNames - Populate client supplied smallvector using custome
-/// metadata name and ID.
-void LLVMContext::getMDKindNames(SmallVectorImpl<StringRef> &Names) const {
- Names.resize(pImpl->CustomMDKindNames.size()+1);
- Names[0] = "";
- for (StringMap<unsigned>::const_iterator I = pImpl->CustomMDKindNames.begin(),
- E = pImpl->CustomMDKindNames.end(); I != E; ++I)
- // MD Handlers are numbered from 1.
- Names[I->second] = I->first();
-}
-
-//===----------------------------------------------------------------------===//
// Instruction Metadata method implementations.
//
@@ -481,12 +443,19 @@ MDNode *Instruction::getMetadataImpl(const char *Kind) const {
void Instruction::setMetadata(unsigned KindID, MDNode *Node) {
if (Node == 0 && !hasMetadata()) return;
+ // Handle 'dbg' as a special case since it is not stored in the hash table.
+ if (KindID == LLVMContext::MD_dbg) {
+ DbgLoc = DebugLoc::getFromDILocation(Node);
+ return;
+ }
+
// Handle the case when we're adding/updating metadata on an instruction.
if (Node) {
LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this];
- assert(!Info.empty() == hasMetadata() && "HasMetadata bit is wonked");
+ assert(!Info.empty() == hasMetadataHashEntry() &&
+ "HasMetadata bit is wonked");
if (Info.empty()) {
- setHasMetadata(true);
+ setHasMetadataHashEntry(true);
} else {
// Handle replacement of an existing value.
for (unsigned i = 0, e = Info.size(); i != e; ++i)
@@ -502,18 +471,19 @@ void Instruction::setMetadata(unsigned KindID, MDNode *Node) {
}
// Otherwise, we're removing metadata from an instruction.
- assert(hasMetadata() && getContext().pImpl->MetadataStore.count(this) &&
+ assert(hasMetadataHashEntry() &&
+ getContext().pImpl->MetadataStore.count(this) &&
"HasMetadata bit out of date!");
LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this];
// Common case is removing the only entry.
if (Info.size() == 1 && Info[0].first == KindID) {
getContext().pImpl->MetadataStore.erase(this);
- setHasMetadata(false);
+ setHasMetadataHashEntry(false);
return;
}
- // Handle replacement of an existing value.
+ // Handle removal of an existing value.
for (unsigned i = 0, e = Info.size(); i != e; ++i)
if (Info[i].first == KindID) {
Info[i] = Info.back();
@@ -525,8 +495,14 @@ void Instruction::setMetadata(unsigned KindID, MDNode *Node) {
}
MDNode *Instruction::getMetadataImpl(unsigned KindID) const {
+ // Handle 'dbg' as a special case since it is not stored in the hash table.
+ if (KindID == LLVMContext::MD_dbg)
+ return DbgLoc.getAsMDNode(getContext());
+
+ if (!hasMetadataHashEntry()) return 0;
+
LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this];
- assert(hasMetadata() && !Info.empty() && "Shouldn't have called this");
+ assert(!Info.empty() && "bit out of sync with hash table");
for (LLVMContextImpl::MDMapTy::iterator I = Info.begin(), E = Info.end();
I != E; ++I)
@@ -536,14 +512,23 @@ MDNode *Instruction::getMetadataImpl(unsigned KindID) const {
}
void Instruction::getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned,
- MDNode*> > &Result)const {
- assert(hasMetadata() && getContext().pImpl->MetadataStore.count(this) &&
+ MDNode*> > &Result) const {
+ Result.clear();
+
+ // Handle 'dbg' as a special case since it is not stored in the hash table.
+ if (!DbgLoc.isUnknown()) {
+ Result.push_back(std::make_pair((unsigned)LLVMContext::MD_dbg,
+ DbgLoc.getAsMDNode(getContext())));
+ if (!hasMetadataHashEntry()) return;
+ }
+
+ assert(hasMetadataHashEntry() &&
+ getContext().pImpl->MetadataStore.count(this) &&
"Shouldn't have called this");
const LLVMContextImpl::MDMapTy &Info =
getContext().pImpl->MetadataStore.find(this)->second;
assert(!Info.empty() && "Shouldn't have called this");
- Result.clear();
Result.append(Info.begin(), Info.end());
// Sort the resulting array so it is stable.
@@ -551,10 +536,30 @@ void Instruction::getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned,
array_pod_sort(Result.begin(), Result.end());
}
-/// removeAllMetadata - Remove all metadata from this instruction.
-void Instruction::removeAllMetadata() {
- assert(hasMetadata() && "Caller should check");
+void Instruction::
+getAllMetadataOtherThanDebugLocImpl(SmallVectorImpl<std::pair<unsigned,
+ MDNode*> > &Result) const {
+ Result.clear();
+ assert(hasMetadataHashEntry() &&
+ getContext().pImpl->MetadataStore.count(this) &&
+ "Shouldn't have called this");
+ const LLVMContextImpl::MDMapTy &Info =
+ getContext().pImpl->MetadataStore.find(this)->second;
+ assert(!Info.empty() && "Shouldn't have called this");
+
+ Result.append(Info.begin(), Info.end());
+
+ // Sort the resulting array so it is stable.
+ if (Result.size() > 1)
+ array_pod_sort(Result.begin(), Result.end());
+}
+
+
+/// clearMetadataHashEntries - Clear all hashtable-based metadata from
+/// this instruction.
+void Instruction::clearMetadataHashEntries() {
+ assert(hasMetadataHashEntry() && "Caller should check");
getContext().pImpl->MetadataStore.erase(this);
- setHasMetadata(false);
+ setHasMetadataHashEntry(false);
}
diff --git a/libclamav/c++/llvm/lib/VMCore/Module.cpp b/libclamav/c++/llvm/lib/VMCore/Module.cpp
index 001bb00..d7ddf96 100644
--- a/libclamav/c++/llvm/lib/VMCore/Module.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Module.cpp
@@ -17,6 +17,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/GVMaterializer.h"
#include "llvm/LLVMContext.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/LeakDetector.h"
@@ -57,10 +58,10 @@ template class llvm::SymbolTableListTraits<GlobalAlias, Module>;
//
Module::Module(StringRef MID, LLVMContext& C)
- : Context(C), Materializer(NULL), ModuleID(MID), DataLayout("") {
+ : Context(C), Materializer(NULL), ModuleID(MID) {
ValSymTab = new ValueSymbolTable();
TypeSymTab = new TypeSymbolTable();
- NamedMDSymTab = new MDSymbolTable();
+ NamedMDSymTab = new StringMap<NamedMDNode *>();
}
Module::~Module() {
@@ -72,7 +73,7 @@ Module::~Module() {
NamedMDList.clear();
delete ValSymTab;
delete TypeSymTab;
- delete NamedMDSymTab;
+ delete static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab);
}
/// Target endian information...
@@ -82,7 +83,7 @@ Module::Endianness Module::getEndianness() const {
while (!temp.empty()) {
StringRef token = DataLayout;
- tie(token, temp) = getToken(DataLayout, "-");
+ tie(token, temp) = getToken(temp, "-");
if (token[0] == 'e') {
ret = LittleEndian;
@@ -311,21 +312,32 @@ GlobalAlias *Module::getNamedAlias(StringRef Name) const {
/// getNamedMetadata - Return the first NamedMDNode in the module with the
/// specified name. This method returns null if a NamedMDNode with the
-//// specified name is not found.
-NamedMDNode *Module::getNamedMetadata(StringRef Name) const {
- return NamedMDSymTab->lookup(Name);
+/// specified name is not found.
+NamedMDNode *Module::getNamedMetadata(const Twine &Name) const {
+ SmallString<256> NameData;
+ StringRef NameRef = Name.toStringRef(NameData);
+ return static_cast<StringMap<NamedMDNode*> *>(NamedMDSymTab)->lookup(NameRef);
}
/// getOrInsertNamedMetadata - Return the first named MDNode in the module
/// with the specified name. This method returns a new NamedMDNode if a
/// NamedMDNode with the specified name is not found.
NamedMDNode *Module::getOrInsertNamedMetadata(StringRef Name) {
- NamedMDNode *NMD = NamedMDSymTab->lookup(Name);
- if (!NMD)
- NMD = NamedMDNode::Create(getContext(), Name, NULL, 0, this);
+ NamedMDNode *&NMD =
+ (*static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab))[Name];
+ if (!NMD) {
+ NMD = new NamedMDNode(Name);
+ NMD->setParent(this);
+ NamedMDList.push_back(NMD);
+ }
return NMD;
}
+void Module::eraseNamedMetadata(NamedMDNode *NMD) {
+ static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab)->erase(NMD->getName());
+ NamedMDList.erase(NMD);
+}
+
//===----------------------------------------------------------------------===//
// Methods for easy access to the types in the module.
//
diff --git a/libclamav/c++/llvm/lib/VMCore/Pass.cpp b/libclamav/c++/llvm/lib/VMCore/Pass.cpp
index a782e5a..a7d7f61 100644
--- a/libclamav/c++/llvm/lib/VMCore/Pass.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Pass.cpp
@@ -14,26 +14,19 @@
//===----------------------------------------------------------------------===//
#include "llvm/Pass.h"
-#include "llvm/PassManager.h"
-#include "llvm/Module.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringMap.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/PassNameParser.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Atomic.h"
-#include "llvm/System/Mutex.h"
-#include "llvm/System/Threading.h"
-#include <algorithm>
-#include <map>
-#include <set>
using namespace llvm;
//===----------------------------------------------------------------------===//
// Pass Implementation
//
+Pass::Pass(PassKind K, char &pid) : Resolver(0), PassID(&pid), Kind(K) { }
+
// Force out-of-line virtual method.
Pass::~Pass() {
delete Resolver;
@@ -42,12 +35,17 @@ Pass::~Pass() {
// Force out-of-line virtual method.
ModulePass::~ModulePass() { }
+Pass *ModulePass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+ return createPrintModulePass(&O, false, Banner);
+}
+
PassManagerType ModulePass::getPotentialPassManagerType() const {
return PMT_ModulePassManager;
}
-bool Pass::mustPreserveAnalysisID(const PassInfo *AnalysisID) const {
- return Resolver->getAnalysisIfAvailable(AnalysisID, true) != 0;
+bool Pass::mustPreserveAnalysisID(char &AID) const {
+ return Resolver->getAnalysisIfAvailable(&AID, true) != 0;
}
// dumpPassStructure - Implement the -debug-passes=Structure option
@@ -60,7 +58,9 @@ void Pass::dumpPassStructure(unsigned Offset) {
/// Registration templates, but can be overloaded directly.
///
const char *Pass::getPassName() const {
- if (const PassInfo *PI = getPassInfo())
+ AnalysisID AID = getPassID();
+ const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(AID);
+ if (PI)
return PI->getPassName();
return "Unnamed pass: implement Pass::getPassName()";
}
@@ -86,6 +86,23 @@ void Pass::verifyAnalysis() const {
// By default, don't do anything.
}
+void *Pass::getAdjustedAnalysisPointer(AnalysisID AID) {
+ return this;
+}
+
+ImmutablePass *Pass::getAsImmutablePass() {
+ return 0;
+}
+
+PMDataManager *Pass::getAsPMDataManager() {
+ return 0;
+}
+
+void Pass::setResolver(AnalysisResolver *AR) {
+ assert(!Resolver && "Resolver is already set");
+ Resolver = AR;
+}
+
// print - Print out the internal state of the pass. This is called by Analyze
// to print out the contents of an analysis. Otherwise it is not necessary to
// implement this method.
@@ -113,28 +130,9 @@ void ImmutablePass::initializePass() {
// FunctionPass Implementation
//
-// run - On a module, we run this pass by initializing, runOnFunction'ing once
-// for every function in the module, then by finalizing.
-//
-bool FunctionPass::runOnModule(Module &M) {
- bool Changed = doInitialization(M);
-
- for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
- if (!I->isDeclaration()) // Passes are not run on external functions!
- Changed |= runOnFunction(*I);
-
- return Changed | doFinalization(M);
-}
-
-// run - On a function, we simply initialize, run the function, then finalize.
-//
-bool FunctionPass::run(Function &F) {
- // Passes are not run on external functions!
- if (F.isDeclaration()) return false;
-
- bool Changed = doInitialization(*F.getParent());
- Changed |= runOnFunction(F);
- return Changed | doFinalization(*F.getParent());
+Pass *FunctionPass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+ return createPrintFunctionPass(Banner, &O);
}
bool FunctionPass::doInitialization(Module &) {
@@ -155,14 +153,11 @@ PassManagerType FunctionPass::getPotentialPassManagerType() const {
// BasicBlockPass Implementation
//
-// To run this pass on a function, we simply call runOnBasicBlock once for each
-// function.
-//
-bool BasicBlockPass::runOnFunction(Function &F) {
- bool Changed = doInitialization(F);
- for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
- Changed |= runOnBasicBlock(*I);
- return Changed | doFinalization(F);
+Pass *BasicBlockPass::createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const {
+
+ llvm_unreachable("BasicBlockPass printing unsupported.");
+ return 0;
}
bool BasicBlockPass::doInitialization(Module &) {
@@ -189,149 +184,20 @@ PassManagerType BasicBlockPass::getPotentialPassManagerType() const {
return PMT_BasicBlockPassManager;
}
-//===----------------------------------------------------------------------===//
-// Pass Registration mechanism
-//
-namespace {
-class PassRegistrar {
- /// Guards the contents of this class.
- mutable sys::SmartMutex<true> Lock;
-
- /// PassInfoMap - Keep track of the passinfo object for each registered llvm
- /// pass.
- typedef std::map<intptr_t, const PassInfo*> MapType;
- MapType PassInfoMap;
-
- typedef StringMap<const PassInfo*> StringMapType;
- StringMapType PassInfoStringMap;
-
- /// AnalysisGroupInfo - Keep track of information for each analysis group.
- struct AnalysisGroupInfo {
- std::set<const PassInfo *> Implementations;
- };
-
- /// AnalysisGroupInfoMap - Information for each analysis group.
- std::map<const PassInfo *, AnalysisGroupInfo> AnalysisGroupInfoMap;
-
-public:
-
- const PassInfo *GetPassInfo(intptr_t TI) const {
- sys::SmartScopedLock<true> Guard(Lock);
- MapType::const_iterator I = PassInfoMap.find(TI);
- return I != PassInfoMap.end() ? I->second : 0;
- }
-
- const PassInfo *GetPassInfo(StringRef Arg) const {
- sys::SmartScopedLock<true> Guard(Lock);
- StringMapType::const_iterator I = PassInfoStringMap.find(Arg);
- return I != PassInfoStringMap.end() ? I->second : 0;
- }
-
- void RegisterPass(const PassInfo &PI) {
- sys::SmartScopedLock<true> Guard(Lock);
- bool Inserted =
- PassInfoMap.insert(std::make_pair(PI.getTypeInfo(),&PI)).second;
- assert(Inserted && "Pass registered multiple times!"); Inserted=Inserted;
- PassInfoStringMap[PI.getPassArgument()] = &PI;
- }
-
- void UnregisterPass(const PassInfo &PI) {
- sys::SmartScopedLock<true> Guard(Lock);
- MapType::iterator I = PassInfoMap.find(PI.getTypeInfo());
- assert(I != PassInfoMap.end() && "Pass registered but not in map!");
-
- // Remove pass from the map.
- PassInfoMap.erase(I);
- PassInfoStringMap.erase(PI.getPassArgument());
- }
-
- void EnumerateWith(PassRegistrationListener *L) {
- sys::SmartScopedLock<true> Guard(Lock);
- for (MapType::const_iterator I = PassInfoMap.begin(),
- E = PassInfoMap.end(); I != E; ++I)
- L->passEnumerate(I->second);
- }
-
-
- /// Analysis Group Mechanisms.
- void RegisterAnalysisGroup(PassInfo *InterfaceInfo,
- const PassInfo *ImplementationInfo,
- bool isDefault) {
- sys::SmartScopedLock<true> Guard(Lock);
- AnalysisGroupInfo &AGI = AnalysisGroupInfoMap[InterfaceInfo];
- assert(AGI.Implementations.count(ImplementationInfo) == 0 &&
- "Cannot add a pass to the same analysis group more than once!");
- AGI.Implementations.insert(ImplementationInfo);
- if (isDefault) {
- assert(InterfaceInfo->getNormalCtor() == 0 &&
- "Default implementation for analysis group already specified!");
- assert(ImplementationInfo->getNormalCtor() &&
- "Cannot specify pass as default if it does not have a default ctor");
- InterfaceInfo->setNormalCtor(ImplementationInfo->getNormalCtor());
- }
- }
-};
-}
-
-static std::vector<PassRegistrationListener*> *Listeners = 0;
-static sys::SmartMutex<true> ListenersLock;
-
-// FIXME: This should use ManagedStatic to manage the pass registrar.
-// Unfortunately, we can't do this, because passes are registered with static
-// ctors, and having llvm_shutdown clear this map prevents successful
-// ressurection after llvm_shutdown is run.
-static PassRegistrar *getPassRegistrar() {
- static PassRegistrar *PassRegistrarObj = 0;
-
- // Use double-checked locking to safely initialize the registrar when
- // we're running in multithreaded mode.
- PassRegistrar* tmp = PassRegistrarObj;
- if (llvm_is_multithreaded()) {
- sys::MemoryFence();
- if (!tmp) {
- llvm_acquire_global_lock();
- tmp = PassRegistrarObj;
- if (!tmp) {
- tmp = new PassRegistrar();
- sys::MemoryFence();
- PassRegistrarObj = tmp;
- }
- llvm_release_global_lock();
- }
- } else if (!tmp) {
- PassRegistrarObj = new PassRegistrar();
- }
-
- return PassRegistrarObj;
-}
-
-// getPassInfo - Return the PassInfo data structure that corresponds to this
-// pass...
-const PassInfo *Pass::getPassInfo() const {
- return lookupPassInfo(PassID);
-}
-
-const PassInfo *Pass::lookupPassInfo(intptr_t TI) {
- return getPassRegistrar()->GetPassInfo(TI);
+const PassInfo *Pass::lookupPassInfo(const void *TI) {
+ return PassRegistry::getPassRegistry()->getPassInfo(TI);
}
const PassInfo *Pass::lookupPassInfo(StringRef Arg) {
- return getPassRegistrar()->GetPassInfo(Arg);
+ return PassRegistry::getPassRegistry()->getPassInfo(Arg);
}
-void PassInfo::registerPass() {
- getPassRegistrar()->RegisterPass(*this);
-
- // Notify any listeners.
- sys::SmartScopedLock<true> Lock(ListenersLock);
- if (Listeners)
- for (std::vector<PassRegistrationListener*>::iterator
- I = Listeners->begin(), E = Listeners->end(); I != E; ++I)
- (*I)->passRegistered(this);
-}
-
-void PassInfo::unregisterPass() {
- getPassRegistrar()->UnregisterPass(*this);
+Pass *PassInfo::createPass() const {
+ assert((!isAnalysisGroup() || NormalCtor) &&
+ "No default implementation found for analysis group!");
+ assert(NormalCtor &&
+ "Cannot call createPass on PassInfo without default ctor!");
+ return NormalCtor();
}
//===----------------------------------------------------------------------===//
@@ -340,32 +206,11 @@ void PassInfo::unregisterPass() {
// RegisterAGBase implementation
//
-RegisterAGBase::RegisterAGBase(const char *Name, intptr_t InterfaceID,
- intptr_t PassID, bool isDefault)
- : PassInfo(Name, InterfaceID) {
-
- PassInfo *InterfaceInfo =
- const_cast<PassInfo*>(Pass::lookupPassInfo(InterfaceID));
- if (InterfaceInfo == 0) {
- // First reference to Interface, register it now.
- registerPass();
- InterfaceInfo = this;
- }
- assert(isAnalysisGroup() &&
- "Trying to join an analysis group that is a normal pass!");
-
- if (PassID) {
- const PassInfo *ImplementationInfo = Pass::lookupPassInfo(PassID);
- assert(ImplementationInfo &&
- "Must register pass before adding to AnalysisGroup!");
-
- // Make sure we keep track of the fact that the implementation implements
- // the interface.
- PassInfo *IIPI = const_cast<PassInfo*>(ImplementationInfo);
- IIPI->addInterfaceImplemented(InterfaceInfo);
-
- getPassRegistrar()->RegisterAnalysisGroup(InterfaceInfo, IIPI, isDefault);
- }
+RegisterAGBase::RegisterAGBase(const char *Name, const void *InterfaceID,
+ const void *PassID, bool isDefault)
+ : PassInfo(Name, InterfaceID) {
+ PassRegistry::getPassRegistry()->registerAnalysisGroup(InterfaceID, PassID,
+ *this, isDefault);
}
@@ -376,31 +221,19 @@ RegisterAGBase::RegisterAGBase(const char *Name, intptr_t InterfaceID,
// PassRegistrationListener ctor - Add the current object to the list of
// PassRegistrationListeners...
PassRegistrationListener::PassRegistrationListener() {
- sys::SmartScopedLock<true> Lock(ListenersLock);
- if (!Listeners) Listeners = new std::vector<PassRegistrationListener*>();
- Listeners->push_back(this);
+ PassRegistry::getPassRegistry()->addRegistrationListener(this);
}
// dtor - Remove object from list of listeners...
PassRegistrationListener::~PassRegistrationListener() {
- sys::SmartScopedLock<true> Lock(ListenersLock);
- std::vector<PassRegistrationListener*>::iterator I =
- std::find(Listeners->begin(), Listeners->end(), this);
- assert(Listeners && I != Listeners->end() &&
- "PassRegistrationListener not registered!");
- Listeners->erase(I);
-
- if (Listeners->empty()) {
- delete Listeners;
- Listeners = 0;
- }
+ PassRegistry::getPassRegistry()->removeRegistrationListener(this);
}
// enumeratePasses - Iterate over the registered passes, calling the
// passEnumerate callback on each PassInfo object.
//
void PassRegistrationListener::enumeratePasses() {
- getPassRegistrar()->EnumerateWith(this);
+ PassRegistry::getPassRegistry()->enumerateWith(this);
}
PassNameParser::~PassNameParser() {}
@@ -417,7 +250,7 @@ namespace {
void passEnumerate(const PassInfo *P) {
if (P->isCFGOnlyPass())
- CFGOnlyList.push_back(P);
+ CFGOnlyList.push_back(P->getTypeInfo());
}
};
}
@@ -437,4 +270,25 @@ void AnalysisUsage::setPreservesCFG() {
GetCFGOnlyPasses(Preserved).enumeratePasses();
}
+AnalysisUsage &AnalysisUsage::addPreserved(StringRef Arg) {
+ const PassInfo *PI = Pass::lookupPassInfo(Arg);
+ // If the pass exists, preserve it. Otherwise silently do nothing.
+ if (PI) Preserved.push_back(PI->getTypeInfo());
+ return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredID(const void *ID) {
+ Required.push_back(ID);
+ return *this;
+}
+AnalysisUsage &AnalysisUsage::addRequiredID(char &ID) {
+ Required.push_back(&ID);
+ return *this;
+}
+
+AnalysisUsage &AnalysisUsage::addRequiredTransitiveID(char &ID) {
+ Required.push_back(&ID);
+ RequiredTransitive.push_back(&ID);
+ return *this;
+}
diff --git a/libclamav/c++/llvm/lib/VMCore/PassManager.cpp b/libclamav/c++/llvm/lib/VMCore/PassManager.cpp
index c4dfe14..ab4d4e5 100644
--- a/libclamav/c++/llvm/lib/VMCore/PassManager.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/PassManager.cpp
@@ -7,12 +7,14 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the LLVM Pass Manager infrastructure.
+// This file implements the LLVM Pass Manager infrastructure.
//
//===----------------------------------------------------------------------===//
#include "llvm/PassManagers.h"
+#include "llvm/PassManager.h"
+#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -20,10 +22,9 @@
#include "llvm/Module.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PassNameParser.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/System/Mutex.h"
-#include "llvm/System/Threading.h"
-#include "llvm-c/Core.h"
#include <algorithm>
#include <cstdio>
#include <map>
@@ -55,6 +56,59 @@ PassDebugging("debug-pass", cl::Hidden,
clEnumVal(Executions, "print pass name before it is executed"),
clEnumVal(Details , "print pass details when it is executed"),
clEnumValEnd));
+
+typedef llvm::cl::list<const llvm::PassInfo *, bool, PassNameParser>
+PassOptionList;
+
+// Print IR out before/after specified passes.
+static PassOptionList
+PrintBefore("print-before",
+ llvm::cl::desc("Print IR before specified passes"));
+
+static PassOptionList
+PrintAfter("print-after",
+ llvm::cl::desc("Print IR after specified passes"));
+
+static cl::opt<bool>
+PrintBeforeAll("print-before-all",
+ llvm::cl::desc("Print IR before each pass"),
+ cl::init(false));
+static cl::opt<bool>
+PrintAfterAll("print-after-all",
+ llvm::cl::desc("Print IR after each pass"),
+ cl::init(false));
+
+/// This is a helper to determine whether to print IR before or
+/// after a pass.
+
+static bool ShouldPrintBeforeOrAfterPass(const void *PassID,
+ PassOptionList &PassesToPrint) {
+ if (const llvm::PassInfo *PI =
+ PassRegistry::getPassRegistry()->getPassInfo(PassID)) {
+ for (unsigned i = 0, ie = PassesToPrint.size(); i < ie; ++i) {
+ const llvm::PassInfo *PassInf = PassesToPrint[i];
+ if (PassInf)
+ if (PassInf->getPassArgument() == PI->getPassArgument()) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+/// This is a utility to check whether a pass should have IR dumped
+/// before it.
+static bool ShouldPrintBeforePass(const void *PassID) {
+ return PrintBeforeAll || ShouldPrintBeforeOrAfterPass(PassID, PrintBefore);
+}
+
+/// This is a utility to check whether a pass should have IR dumped
+/// after it.
+static bool ShouldPrintAfterPass(const void *PassID) {
+ return PrintAfterAll || ShouldPrintBeforeOrAfterPass(PassID, PrintAfter);
+}
+
} // End of llvm namespace
/// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions
@@ -71,9 +125,9 @@ void PassManagerPrettyStackEntry::print(raw_ostream &OS) const {
OS << "Releasing pass '";
else
OS << "Running pass '";
-
+
OS << P->getPassName() << "'";
-
+
if (M) {
OS << " on module '" << M->getModuleIdentifier() << "'.\n";
return;
@@ -109,8 +163,8 @@ class BBPassManager : public PMDataManager, public FunctionPass {
public:
static char ID;
- explicit BBPassManager(int Depth)
- : PMDataManager(Depth), FunctionPass(&ID) {}
+ explicit BBPassManager(int Depth)
+ : PMDataManager(Depth), FunctionPass(ID) {}
/// Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the function, and if so, return true.
@@ -149,8 +203,8 @@ public:
return BP;
}
- virtual PassManagerType getPassManagerType() const {
- return PMT_BasicBlockPassManager;
+ virtual PassManagerType getPassManagerType() const {
+ return PMT_BasicBlockPassManager;
}
};
@@ -170,9 +224,9 @@ private:
bool wasRun;
public:
static char ID;
- explicit FunctionPassManagerImpl(int Depth) :
- Pass(PT_PassManager, &ID), PMDataManager(Depth),
- PMTopLevelManager(TLM_Function), wasRun(false) { }
+ explicit FunctionPassManagerImpl(int Depth) :
+ Pass(PT_PassManager, ID), PMDataManager(Depth),
+ PMTopLevelManager(new FPPassManager(1)), wasRun(false) {}
/// add - Add a pass to the queue of passes to run. This passes ownership of
/// the Pass to the PassManager. When the PassManager is destroyed, the pass
@@ -181,7 +235,12 @@ public:
void add(Pass *P) {
schedulePass(P);
}
-
+
+ /// createPrinterPass - Get a function printer pass.
+ Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const {
+ return createPrintFunctionPass(Banner, &O);
+ }
+
// Prepare for running an on the fly pass, freeing memory if needed
// from a previous run.
void releaseMemoryOnTheFly();
@@ -193,12 +252,12 @@ public:
/// doInitialization - Run all of the initializers for the function passes.
///
bool doInitialization(Module &M);
-
+
/// doFinalization - Run all of the finalizers for the function passes.
///
bool doFinalization(Module &M);
-
+
virtual PMDataManager *getAsPMDataManager() { return this; }
virtual Pass *getAsPass() { return this; }
@@ -207,7 +266,7 @@ public:
Info.setPreservesAll();
}
- inline void addTopLevelPass(Pass *P) {
+ void addTopLevelPass(Pass *P) {
if (ImmutablePass *IP = P->getAsImmutablePass()) {
// P is a immutable pass and it will be managed by this
// top level manager. Set up analysis resolver to connect them.
@@ -217,7 +276,7 @@ public:
addImmutablePass(IP);
recordAvailableAnalysis(IP);
} else {
- P->assignPassManager(activeStack);
+ P->assignPassManager(activeStack, PMT_FunctionPassManager);
}
}
@@ -230,6 +289,7 @@ public:
};
char FunctionPassManagerImpl::ID = 0;
+
//===----------------------------------------------------------------------===//
// MPPassManager
//
@@ -240,11 +300,11 @@ class MPPassManager : public Pass, public PMDataManager {
public:
static char ID;
explicit MPPassManager(int Depth) :
- Pass(PT_PassManager, &ID), PMDataManager(Depth) { }
+ Pass(PT_PassManager, ID), PMDataManager(Depth) { }
// Delete on the fly managers.
virtual ~MPPassManager() {
- for (std::map<Pass *, FunctionPassManagerImpl *>::iterator
+ for (std::map<Pass *, FunctionPassManagerImpl *>::iterator
I = OnTheFlyManagers.begin(), E = OnTheFlyManagers.end();
I != E; ++I) {
FunctionPassManagerImpl *FPP = I->second;
@@ -252,6 +312,11 @@ public:
}
}
+ /// createPrinterPass - Get a module printer pass.
+ Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const {
+ return createPrintModulePass(&O, false, Banner);
+ }
+
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
bool runOnModule(Module &M);
@@ -266,10 +331,10 @@ public:
/// through getAnalysis interface.
virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
- /// Return function pass corresponding to PassInfo PI, that is
+ /// Return function pass corresponding to PassInfo PI, that is
/// required by module pass MP. Instantiate analysis pass, by using
/// its runOnFunction() for function F.
- virtual Pass* getOnTheFlyPass(Pass *MP, const PassInfo *PI, Function &F);
+ virtual Pass* getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F);
virtual const char *getPassName() const {
return "Module Pass Manager";
@@ -297,8 +362,8 @@ public:
return static_cast<ModulePass *>(PassVector[N]);
}
- virtual PassManagerType getPassManagerType() const {
- return PMT_ModulePassManager;
+ virtual PassManagerType getPassManagerType() const {
+ return PMT_ModulePassManager;
}
private:
@@ -320,8 +385,8 @@ class PassManagerImpl : public Pass,
public:
static char ID;
explicit PassManagerImpl(int Depth) :
- Pass(PT_PassManager, &ID), PMDataManager(Depth),
- PMTopLevelManager(TLM_Pass) { }
+ Pass(PT_PassManager, ID), PMDataManager(Depth),
+ PMTopLevelManager(new MPPassManager(1)) {}
/// add - Add a pass to the queue of passes to run. This passes ownership of
/// the Pass to the PassManager. When the PassManager is destroyed, the pass
@@ -330,7 +395,12 @@ public:
void add(Pass *P) {
schedulePass(P);
}
-
+
+ /// createPrinterPass - Get a module printer pass.
+ Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const {
+ return createPrintModulePass(&O, false, Banner);
+ }
+
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
bool run(Module &M);
@@ -340,7 +410,7 @@ public:
Info.setPreservesAll();
}
- inline void addTopLevelPass(Pass *P) {
+ void addTopLevelPass(Pass *P) {
if (ImmutablePass *IP = P->getAsImmutablePass()) {
// P is a immutable pass and it will be managed by this
// top level manager. Set up analysis resolver to connect them.
@@ -350,7 +420,7 @@ public:
addImmutablePass(IP);
recordAvailableAnalysis(IP);
} else {
- P->assignPassManager(activeStack);
+ P->assignPassManager(activeStack, PMT_ModulePassManager);
}
}
@@ -378,17 +448,19 @@ namespace {
static ManagedStatic<sys::SmartMutex<true> > TimingInfoMutex;
class TimingInfo {
- std::map<Pass*, Timer> TimingData;
+ DenseMap<Pass*, Timer*> TimingData;
TimerGroup TG;
-
public:
// Use 'create' member to get this.
TimingInfo() : TG("... Pass execution timing report ...") {}
-
+
// TimingDtor - Print out information about timing information
~TimingInfo() {
- // Delete all of the timers...
- TimingData.clear();
+ // Delete all of the timers, which accumulate their info into the
+ // TimerGroup.
+ for (DenseMap<Pass*, Timer*>::iterator I = TimingData.begin(),
+ E = TimingData.end(); I != E; ++I)
+ delete I->second;
// TimerGroup is deleted next, printing the report.
}
@@ -397,18 +469,15 @@ public:
// null. It may be called multiple times.
static void createTheTimeInfo();
- /// passStarted - This method creates a timer for the given pass if it doesn't
- /// already have one, and starts the timer.
- Timer *passStarted(Pass *P) {
- if (P->getAsPMDataManager())
+ /// getPassTimer - Return the timer for the specified pass if it exists.
+ Timer *getPassTimer(Pass *P) {
+ if (P->getAsPMDataManager())
return 0;
sys::SmartScopedLock<true> Lock(*TimingInfoMutex);
- std::map<Pass*, Timer>::iterator I = TimingData.find(P);
- if (I == TimingData.end())
- I=TimingData.insert(std::make_pair(P, Timer(P->getPassName(), TG))).first;
- Timer *T = &I->second;
- T->startTimer();
+ Timer *&T = TimingData[P];
+ if (T == 0)
+ T = new Timer(P->getPassName(), TG);
return T;
}
};
@@ -421,28 +490,20 @@ static TimingInfo *TheTimeInfo;
// PMTopLevelManager implementation
/// Initialize top level manager. Create first pass manager.
-PMTopLevelManager::PMTopLevelManager(enum TopLevelManagerType t) {
- if (t == TLM_Pass) {
- MPPassManager *MPP = new MPPassManager(1);
- MPP->setTopLevelManager(this);
- addPassManager(MPP);
- activeStack.push(MPP);
- } else if (t == TLM_Function) {
- FPPassManager *FPP = new FPPassManager(1);
- FPP->setTopLevelManager(this);
- addPassManager(FPP);
- activeStack.push(FPP);
- }
+PMTopLevelManager::PMTopLevelManager(PMDataManager *PMDM) {
+ PMDM->setTopLevelManager(this);
+ addPassManager(PMDM);
+ activeStack.push(PMDM);
}
/// Set pass P as the last user of the given analysis passes.
-void PMTopLevelManager::setLastUser(SmallVector<Pass *, 12> &AnalysisPasses,
+void PMTopLevelManager::setLastUser(SmallVector<Pass *, 12> &AnalysisPasses,
Pass *P) {
for (SmallVector<Pass *, 12>::iterator I = AnalysisPasses.begin(),
E = AnalysisPasses.end(); I != E; ++I) {
Pass *AP = *I;
LastUser[AP] = P;
-
+
if (P == AP)
continue;
@@ -461,7 +522,7 @@ void PMTopLevelManager::setLastUser(SmallVector<Pass *, 12> &AnalysisPasses,
/// Collect passes whose last user is P
void PMTopLevelManager::collectLastUses(SmallVector<Pass *, 12> &LastUses,
Pass *P) {
- DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator DMI =
+ DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator DMI =
InversedLastUser.find(P);
if (DMI == InversedLastUser.end())
return;
@@ -477,7 +538,7 @@ void PMTopLevelManager::collectLastUses(SmallVector<Pass *, 12> &LastUses,
AnalysisUsage *PMTopLevelManager::findAnalysisUsage(Pass *P) {
AnalysisUsage *AnUsage = NULL;
DenseMap<Pass *, AnalysisUsage *>::iterator DMI = AnUsageMap.find(P);
- if (DMI != AnUsageMap.end())
+ if (DMI != AnUsageMap.end())
AnUsage = DMI->second;
else {
AnUsage = new AnalysisUsage();
@@ -501,8 +562,9 @@ void PMTopLevelManager::schedulePass(Pass *P) {
// If P is an analysis pass and it is available then do not
// generate the analysis again. Stale analysis info should not be
// available at this point.
- if (P->getPassInfo() &&
- P->getPassInfo()->isAnalysis() && findAnalysisPass(P->getPassInfo())) {
+ const PassInfo *PI =
+ PassRegistry::getPassRegistry()->getPassInfo(P->getPassID());
+ if (PI && PI->isAnalysis() && findAnalysisPass(P->getPassID())) {
delete P;
return;
}
@@ -512,14 +574,15 @@ void PMTopLevelManager::schedulePass(Pass *P) {
bool checkAnalysis = true;
while (checkAnalysis) {
checkAnalysis = false;
-
+
const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet();
for (AnalysisUsage::VectorType::const_iterator I = RequiredSet.begin(),
E = RequiredSet.end(); I != E; ++I) {
-
+
Pass *AnalysisPass = findAnalysisPass(*I);
if (!AnalysisPass) {
- AnalysisPass = (*I)->createPass();
+ const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(*I);
+ AnalysisPass = PI->createPass();
if (P->getPotentialPassManagerType () ==
AnalysisPass->getPotentialPassManagerType())
// Schedule analysis pass that is managed by the same pass manager.
@@ -528,12 +591,12 @@ void PMTopLevelManager::schedulePass(Pass *P) {
AnalysisPass->getPotentialPassManagerType()) {
// Schedule analysis pass that is managed by a new manager.
schedulePass(AnalysisPass);
- // Recheck analysis passes to ensure that required analysises that
+ // Recheck analysis passes to ensure that required analyses that
// are already checked are still available.
checkAnalysis = true;
}
else
- // Do not schedule this analysis. Lower level analsyis
+ // Do not schedule this analysis. Lower level analsyis
// passes are run on the fly.
delete AnalysisPass;
}
@@ -565,16 +628,21 @@ Pass *PMTopLevelManager::findAnalysisPass(AnalysisID AID) {
for (SmallVector<ImmutablePass *, 8>::iterator I = ImmutablePasses.begin(),
E = ImmutablePasses.end(); P == NULL && I != E; ++I) {
- const PassInfo *PI = (*I)->getPassInfo();
+ AnalysisID PI = (*I)->getPassID();
if (PI == AID)
P = *I;
// If Pass not found then check the interfaces implemented by Immutable Pass
if (!P) {
+ const PassInfo *PassInf =
+ PassRegistry::getPassRegistry()->getPassInfo(PI);
const std::vector<const PassInfo*> &ImmPI =
- PI->getInterfacesImplemented();
- if (std::find(ImmPI.begin(), ImmPI.end(), AID) != ImmPI.end())
- P = *I;
+ PassInf->getInterfacesImplemented();
+ for (std::vector<const PassInfo*>::const_iterator II = ImmPI.begin(),
+ EE = ImmPI.end(); II != EE; ++II) {
+ if ((*II)->getTypeInfo() == AID)
+ P = *I;
+ }
}
}
@@ -591,7 +659,7 @@ void PMTopLevelManager::dumpPasses() const {
for (unsigned i = 0, e = ImmutablePasses.size(); i != e; ++i) {
ImmutablePasses[i]->dumpPassStructure(0);
}
-
+
// Every class that derives from PMDataManager also derives from Pass
// (sometimes indirectly), but there's no inheritance relationship
// between PMDataManager and Pass, so we have to getAsPass to get
@@ -617,15 +685,16 @@ void PMTopLevelManager::initializeAllAnalysisInfo() {
for (SmallVector<PMDataManager *, 8>::iterator I = PassManagers.begin(),
E = PassManagers.end(); I != E; ++I)
(*I)->initializeAnalysisInfo();
-
+
// Initailize other pass managers
- for (SmallVector<PMDataManager *, 8>::iterator I = IndirectPassManagers.begin(),
- E = IndirectPassManagers.end(); I != E; ++I)
+ for (SmallVector<PMDataManager *, 8>::iterator
+ I = IndirectPassManagers.begin(), E = IndirectPassManagers.end();
+ I != E; ++I)
(*I)->initializeAnalysisInfo();
for (DenseMap<Pass *, Pass *>::iterator DMI = LastUser.begin(),
DME = LastUser.end(); DMI != DME; ++DMI) {
- DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator InvDMI =
+ DenseMap<Pass *, SmallPtrSet<Pass *, 8> >::iterator InvDMI =
InversedLastUser.find(DMI->second);
if (InvDMI != InversedLastUser.end()) {
SmallPtrSet<Pass *, 8> &L = InvDMI->second;
@@ -642,7 +711,7 @@ PMTopLevelManager::~PMTopLevelManager() {
for (SmallVector<PMDataManager *, 8>::iterator I = PassManagers.begin(),
E = PassManagers.end(); I != E; ++I)
delete *I;
-
+
for (SmallVector<ImmutablePass *, 8>::iterator
I = ImmutablePasses.begin(), E = ImmutablePasses.end(); I != E; ++I)
delete *I;
@@ -657,16 +726,19 @@ PMTopLevelManager::~PMTopLevelManager() {
/// Augement AvailableAnalysis by adding analysis made available by pass P.
void PMDataManager::recordAvailableAnalysis(Pass *P) {
- const PassInfo *PI = P->getPassInfo();
- if (PI == 0) return;
-
+ AnalysisID PI = P->getPassID();
+
AvailableAnalysis[PI] = P;
- //This pass is the current implementation of all of the interfaces it
- //implements as well.
- const std::vector<const PassInfo*> &II = PI->getInterfacesImplemented();
+ assert(!AvailableAnalysis.empty());
+
+ // This pass is the current implementation of all of the interfaces it
+ // implements as well.
+ const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(PI);
+ if (PInf == 0) return;
+ const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented();
for (unsigned i = 0, e = II.size(); i != e; ++i)
- AvailableAnalysis[II[i]] = P;
+ AvailableAnalysis[II[i]->getTypeInfo()] = P;
}
// Return true if P preserves high level analysis used by other
@@ -675,18 +747,18 @@ bool PMDataManager::preserveHigherLevelAnalysis(Pass *P) {
AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
if (AnUsage->getPreservesAll())
return true;
-
+
const AnalysisUsage::VectorType &PreservedSet = AnUsage->getPreservedSet();
for (SmallVector<Pass *, 8>::iterator I = HigherLevelAnalysis.begin(),
E = HigherLevelAnalysis.end(); I != E; ++I) {
Pass *P1 = *I;
if (P1->getAsImmutablePass() == 0 &&
std::find(PreservedSet.begin(), PreservedSet.end(),
- P1->getPassInfo()) ==
+ P1->getPassID()) ==
PreservedSet.end())
return false;
}
-
+
return true;
}
@@ -704,11 +776,8 @@ void PMDataManager::verifyPreservedAnalysis(Pass *P) {
E = PreservedSet.end(); I != E; ++I) {
AnalysisID AID = *I;
if (Pass *AP = findAnalysisPass(AID, true)) {
-
- Timer *T = 0;
- if (TheTimeInfo) T = TheTimeInfo->passStarted(AP);
+ TimeRegion PassTimer(getPassTimer(AP));
AP->verifyAnalysis();
- if (T) T->stopTimer();
}
}
}
@@ -724,7 +793,7 @@ void PMDataManager::removeNotPreservedAnalysis(Pass *P) {
E = AvailableAnalysis.end(); I != E; ) {
std::map<AnalysisID, Pass*>::iterator Info = I++;
if (Info->second->getAsImmutablePass() == 0 &&
- std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
+ std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
PreservedSet.end()) {
// Remove this analysis
if (PassDebugging >= Details) {
@@ -743,12 +812,12 @@ void PMDataManager::removeNotPreservedAnalysis(Pass *P) {
if (!InheritedAnalysis[Index])
continue;
- for (std::map<AnalysisID, Pass*>::iterator
+ for (std::map<AnalysisID, Pass*>::iterator
I = InheritedAnalysis[Index]->begin(),
E = InheritedAnalysis[Index]->end(); I != E; ) {
std::map<AnalysisID, Pass *>::iterator Info = I++;
if (Info->second->getAsImmutablePass() == 0 &&
- std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
+ std::find(PreservedSet.begin(), PreservedSet.end(), Info->first) ==
PreservedSet.end()) {
// Remove this analysis
if (PassDebugging >= Details) {
@@ -792,29 +861,29 @@ void PMDataManager::freePass(Pass *P, StringRef Msg,
{
// If the pass crashes releasing memory, remember this.
PassManagerPrettyStackEntry X(P);
-
- Timer *T = StartPassTimer(P);
+ TimeRegion PassTimer(getPassTimer(P));
+
P->releaseMemory();
- StopPassTimer(P, T);
}
- if (const PassInfo *PI = P->getPassInfo()) {
+ AnalysisID PI = P->getPassID();
+ if (const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(PI)) {
// Remove the pass itself (if it is not already removed).
AvailableAnalysis.erase(PI);
// Remove all interfaces this pass implements, for which it is also
// listed as the available implementation.
- const std::vector<const PassInfo*> &II = PI->getInterfacesImplemented();
+ const std::vector<const PassInfo*> &II = PInf->getInterfacesImplemented();
for (unsigned i = 0, e = II.size(); i != e; ++i) {
std::map<AnalysisID, Pass*>::iterator Pos =
- AvailableAnalysis.find(II[i]);
+ AvailableAnalysis.find(II[i]->getTypeInfo());
if (Pos != AvailableAnalysis.end() && Pos->second == P)
AvailableAnalysis.erase(Pos);
}
}
}
-/// Add pass P into the PassVector. Update
+/// Add pass P into the PassVector. Update
/// AvailableAnalysis appropriately if ProcessAnalysis is true.
void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
// This manager is going to manage pass P. Set up analysis resolver
@@ -839,7 +908,7 @@ void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
unsigned PDepth = this->getDepth();
- collectRequiredAnalysis(RequiredPasses,
+ collectRequiredAnalysis(RequiredPasses,
ReqAnalysisNotAvailable, P);
for (SmallVector<Pass *, 8>::iterator I = RequiredPasses.begin(),
E = RequiredPasses.end(); I != E; ++I) {
@@ -857,7 +926,7 @@ void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
TransferLastUses.push_back(PRequired);
// Keep track of higher level analysis used by this manager.
HigherLevelAnalysis.push_back(PRequired);
- } else
+ } else
llvm_unreachable("Unable to accomodate Required Pass");
}
@@ -874,11 +943,12 @@ void PMDataManager::add(Pass *P, bool ProcessAnalysis) {
TransferLastUses.clear();
}
- // Now, take care of required analysises that are not available.
- for (SmallVector<AnalysisID, 8>::iterator
- I = ReqAnalysisNotAvailable.begin(),
+ // Now, take care of required analyses that are not available.
+ for (SmallVector<AnalysisID, 8>::iterator
+ I = ReqAnalysisNotAvailable.begin(),
E = ReqAnalysisNotAvailable.end() ;I != E; ++I) {
- Pass *AnalysisPass = (*I)->createPass();
+ const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(*I);
+ Pass *AnalysisPass = PI->createPass();
this->addLowerLevelRequiredPass(P, AnalysisPass);
}
@@ -900,10 +970,10 @@ void PMDataManager::collectRequiredAnalysis(SmallVector<Pass *, 8>&RP,
Pass *P) {
AnalysisUsage *AnUsage = TPM->findAnalysisUsage(P);
const AnalysisUsage::VectorType &RequiredSet = AnUsage->getRequiredSet();
- for (AnalysisUsage::VectorType::const_iterator
+ for (AnalysisUsage::VectorType::const_iterator
I = RequiredSet.begin(), E = RequiredSet.end(); I != E; ++I) {
if (Pass *AnalysisPass = findAnalysisPass(*I, true))
- RP.push_back(AnalysisPass);
+ RP.push_back(AnalysisPass);
else
RP_NotAvail.push_back(*I);
}
@@ -912,7 +982,7 @@ void PMDataManager::collectRequiredAnalysis(SmallVector<Pass *, 8>&RP,
for (AnalysisUsage::VectorType::const_iterator I = IDs.begin(),
E = IDs.end(); I != E; ++I) {
if (Pass *AnalysisPass = findAnalysisPass(*I, true))
- RP.push_back(AnalysisPass);
+ RP.push_back(AnalysisPass);
else
RP_NotAvail.push_back(*I);
}
@@ -953,7 +1023,7 @@ Pass *PMDataManager::findAnalysisPass(AnalysisID AID, bool SearchParent) {
// Search Parents through TopLevelManager
if (SearchParent)
return TPM->findAnalysisPass(AID);
-
+
return NULL;
}
@@ -967,7 +1037,7 @@ void PMDataManager::dumpLastUses(Pass *P, unsigned Offset) const{
return;
TPM->collectLastUses(LUses, P);
-
+
for (SmallVector<Pass *, 12>::iterator I = LUses.begin(),
E = LUses.end(); I != E; ++I) {
llvm::dbgs() << "--" << std::string(Offset*2, ' ');
@@ -981,7 +1051,8 @@ void PMDataManager::dumpPassArguments() const {
if (PMDataManager *PMD = (*I)->getAsPMDataManager())
PMD->dumpPassArguments();
else
- if (const PassInfo *PI = (*I)->getPassInfo())
+ if (const PassInfo *PI =
+ PassRegistry::getPassRegistry()->getPassInfo((*I)->getPassID()))
if (!PI->isAnalysisGroup())
dbgs() << " -" << PI->getPassArgument();
}
@@ -1030,7 +1101,7 @@ void PMDataManager::dumpPassInfo(Pass *P, enum PassDebuggingString S1,
void PMDataManager::dumpRequiredSet(const Pass *P) const {
if (PassDebugging < Details)
return;
-
+
AnalysisUsage analysisUsage;
P->getAnalysisUsage(analysisUsage);
dumpAnalysisUsage("Required", P, analysisUsage.getRequiredSet());
@@ -1039,7 +1110,7 @@ void PMDataManager::dumpRequiredSet(const Pass *P) const {
void PMDataManager::dumpPreservedSet(const Pass *P) const {
if (PassDebugging < Details)
return;
-
+
AnalysisUsage analysisUsage;
P->getAnalysisUsage(analysisUsage);
dumpAnalysisUsage("Preserved", P, analysisUsage.getPreservedSet());
@@ -1053,7 +1124,8 @@ void PMDataManager::dumpAnalysisUsage(StringRef Msg, const Pass *P,
dbgs() << (void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:";
for (unsigned i = 0; i != Set.size(); ++i) {
if (i) dbgs() << ',';
- dbgs() << ' ' << Set[i]->getPassName();
+ const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(Set[i]);
+ dbgs() << ' ' << PInf->getPassName();
}
dbgs() << '\n';
}
@@ -1068,14 +1140,14 @@ void PMDataManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
TPM->dumpPasses();
}
- // Module Level pass may required Function Level analysis info
- // (e.g. dominator info). Pass manager uses on the fly function pass manager
- // to provide this on demand. In that case, in Pass manager terminology,
+ // Module Level pass may required Function Level analysis info
+ // (e.g. dominator info). Pass manager uses on the fly function pass manager
+ // to provide this on demand. In that case, in Pass manager terminology,
// module level pass is requiring lower level analysis info managed by
// lower level pass manager.
// When Pass manager is not able to order required analysis info, Pass manager
- // checks whether any lower level manager will be able to provide this
+ // checks whether any lower level manager will be able to provide this
// analysis info on demand or not.
#ifndef NDEBUG
dbgs() << "Unable to schedule '" << RequiredPass->getPassName();
@@ -1084,6 +1156,11 @@ void PMDataManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
llvm_unreachable("Unable to schedule pass");
}
+Pass *PMDataManager::getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F) {
+ assert(0 && "Unable to find on the fly pass");
+ return NULL;
+}
+
// Destructor
PMDataManager::~PMDataManager() {
for (SmallVector<Pass *, 8>::iterator I = PassVector.begin(),
@@ -1098,7 +1175,7 @@ Pass *AnalysisResolver::getAnalysisIfAvailable(AnalysisID ID, bool dir) const {
return PM.findAnalysisPass(ID, dir);
}
-Pass *AnalysisResolver::findImplPass(Pass *P, const PassInfo *AnalysisPI,
+Pass *AnalysisResolver::findImplPass(Pass *P, AnalysisID AnalysisPI,
Function &F) {
return PM.getOnTheFlyPass(P, AnalysisPI, F);
}
@@ -1106,8 +1183,8 @@ Pass *AnalysisResolver::findImplPass(Pass *P, const PassInfo *AnalysisPI,
//===----------------------------------------------------------------------===//
// BBPassManager implementation
-/// Execute all of the passes scheduled for execution by invoking
-/// runOnBasicBlock method. Keep track of whether any of the passes modifies
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnBasicBlock method. Keep track of whether any of the passes modifies
/// the function, and if so, return true.
bool BBPassManager::runOnFunction(Function &F) {
if (F.isDeclaration())
@@ -1128,14 +1205,13 @@ bool BBPassManager::runOnFunction(Function &F) {
{
// If the pass crashes, remember this.
PassManagerPrettyStackEntry X(BP, *I);
-
- Timer *T = StartPassTimer(BP);
+ TimeRegion PassTimer(getPassTimer(BP));
+
LocalChanged |= BP->runOnBasicBlock(*I);
- StopPassTimer(BP, T);
}
Changed |= LocalChanged;
- if (LocalChanged)
+ if (LocalChanged)
dumpPassInfo(BP, MODIFICATION_MSG, ON_BASICBLOCK_MSG,
I->getName());
dumpPreservedSet(BP);
@@ -1208,13 +1284,31 @@ FunctionPassManager::~FunctionPassManager() {
delete FPM;
}
+/// addImpl - Add a pass to the queue of passes to run, without
+/// checking whether to add a printer pass.
+void FunctionPassManager::addImpl(Pass *P) {
+ FPM->add(P);
+}
+
/// add - Add a pass to the queue of passes to run. This passes
/// ownership of the Pass to the PassManager. When the
/// PassManager_X is destroyed, the pass will be destroyed as well, so
/// there is no need to delete the pass. (TODO delete passes.)
/// This implies that all passes MUST be allocated with 'new'.
-void FunctionPassManager::add(Pass *P) {
- FPM->add(P);
+void FunctionPassManager::add(Pass *P) {
+ // If this is a not a function pass, don't add a printer for it.
+ const void *PassID = P->getPassID();
+ if (P->getPassKind() == PT_Function)
+ if (ShouldPrintBeforePass(PassID))
+ addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump Before ")
+ + P->getPassName() + " ***"));
+
+ addImpl(P);
+
+ if (P->getPassKind() == PT_Function)
+ if (ShouldPrintAfterPass(PassID))
+ addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump After ")
+ + P->getPassName() + " ***"));
}
/// run - Execute all of the passes scheduled for execution. Keep
@@ -1224,9 +1318,8 @@ void FunctionPassManager::add(Pass *P) {
bool FunctionPassManager::run(Function &F) {
if (F.isMaterializable()) {
std::string errstr;
- if (F.Materialize(&errstr)) {
- llvm_report_error("Error reading bitcode file: " + errstr);
- }
+ if (F.Materialize(&errstr))
+ report_fatal_error("Error reading bitcode file: " + Twine(errstr));
}
return FPM->run(F);
}
@@ -1322,8 +1415,8 @@ void FPPassManager::dumpPassStructure(unsigned Offset) {
}
-/// Execute all of the passes scheduled for execution by invoking
-/// runOnFunction method. Keep track of whether any of the passes modifies
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnFunction method. Keep track of whether any of the passes modifies
/// the function, and if so, return true.
bool FPPassManager::runOnFunction(Function &F) {
if (F.isDeclaration())
@@ -1345,10 +1438,9 @@ bool FPPassManager::runOnFunction(Function &F) {
{
PassManagerPrettyStackEntry X(FP, F);
+ TimeRegion PassTimer(getPassTimer(FP));
- Timer *T = StartPassTimer(FP);
LocalChanged |= FP->runOnFunction(F);
- StopPassTimer(FP, T);
}
Changed |= LocalChanged;
@@ -1394,8 +1486,8 @@ bool FPPassManager::doFinalization(Module &M) {
//===----------------------------------------------------------------------===//
// MPPassManager implementation
-/// Execute all of the passes scheduled for execution by invoking
-/// runOnModule method. Keep track of whether any of the passes modifies
+/// Execute all of the passes scheduled for execution by invoking
+/// runOnModule method. Keep track of whether any of the passes modifies
/// the module, and if so, return true.
bool
MPPassManager::runOnModule(Module &M) {
@@ -1420,9 +1512,9 @@ MPPassManager::runOnModule(Module &M) {
{
PassManagerPrettyStackEntry X(MP, M);
- Timer *T = StartPassTimer(MP);
+ TimeRegion PassTimer(getPassTimer(MP));
+
LocalChanged |= MP->runOnModule(M);
- StopPassTimer(MP, T);
}
Changed |= LocalChanged;
@@ -1430,7 +1522,7 @@ MPPassManager::runOnModule(Module &M) {
dumpPassInfo(MP, MODIFICATION_MSG, ON_MODULE_MSG,
M.getModuleIdentifier());
dumpPreservedSet(MP);
-
+
verifyPreservedAnalysis(MP);
removeNotPreservedAnalysis(MP);
recordAvailableAnalysis(MP);
@@ -1456,7 +1548,7 @@ MPPassManager::runOnModule(Module &M) {
void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
assert(P->getPotentialPassManagerType() == PMT_ModulePassManager &&
"Unable to handle Pass that requires lower level Analysis pass");
- assert((P->getPotentialPassManagerType() <
+ assert((P->getPotentialPassManagerType() <
RequiredPass->getPotentialPassManagerType()) &&
"Unable to handle Pass that requires lower level Analysis pass");
@@ -1476,13 +1568,13 @@ void MPPassManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
FPP->setLastUser(LU, P);
}
-/// Return function pass corresponding to PassInfo PI, that is
+/// Return function pass corresponding to PassInfo PI, that is
/// required by module pass MP. Instantiate analysis pass, by using
/// its runOnFunction() for function F.
-Pass* MPPassManager::getOnTheFlyPass(Pass *MP, const PassInfo *PI, Function &F){
+Pass* MPPassManager::getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F){
FunctionPassManagerImpl *FPP = OnTheFlyManagers[MP];
assert(FPP && "Unable to find on the fly pass");
-
+
FPP->releaseMemoryOnTheFly();
FPP->run(F);
return ((PMTopLevelManager*)FPP)->findAnalysisPass(PI);
@@ -1521,12 +1613,27 @@ PassManager::~PassManager() {
delete PM;
}
+/// addImpl - Add a pass to the queue of passes to run, without
+/// checking whether to add a printer pass.
+void PassManager::addImpl(Pass *P) {
+ PM->add(P);
+}
+
/// add - Add a pass to the queue of passes to run. This passes ownership of
/// the Pass to the PassManager. When the PassManager is destroyed, the pass
/// will be destroyed as well, so there is no need to delete the pass. This
/// implies that all passes MUST be allocated with 'new'.
void PassManager::add(Pass *P) {
- PM->add(P);
+ const void* PassID = P->getPassID();
+ if (ShouldPrintBeforePass(PassID))
+ addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump Before ")
+ + P->getPassName() + " ***"));
+
+ addImpl(P);
+
+ if (ShouldPrintAfterPass(PassID))
+ addImpl(P->createPrinterPass(dbgs(), std::string("*** IR Dump After ")
+ + P->getPassName() + " ***"));
}
/// run - Execute all of the passes scheduled for execution. Keep track of
@@ -1559,17 +1666,12 @@ void TimingInfo::createTheTimeInfo() {
}
/// If TimingInfo is enabled then start pass timer.
-Timer *llvm::StartPassTimer(Pass *P) {
- if (TheTimeInfo)
- return TheTimeInfo->passStarted(P);
+Timer *llvm::getPassTimer(Pass *P) {
+ if (TheTimeInfo)
+ return TheTimeInfo->getPassTimer(P);
return 0;
}
-/// If TimingInfo is enabled then stop pass timer.
-void llvm::StopPassTimer(Pass *P, Timer *T) {
- if (T) T->stopTimer();
-}
-
//===----------------------------------------------------------------------===//
// PMStack implementation
//
@@ -1599,8 +1701,8 @@ void PMStack::push(PMDataManager *PM) {
}
// Dump content of the pass manager stack.
-void PMStack::dump() {
- for (std::deque<PMDataManager *>::iterator I = S.begin(),
+void PMStack::dump() const {
+ for (std::vector<PMDataManager *>::const_iterator I = S.begin(),
E = S.end(); I != E; ++I)
printf("%s ", (*I)->getAsPass()->getPassName());
@@ -1609,11 +1711,11 @@ void PMStack::dump() {
}
/// Find appropriate Module Pass Manager in the PM Stack and
-/// add self into that manager.
-void ModulePass::assignPassManager(PMStack &PMS,
+/// add self into that manager.
+void ModulePass::assignPassManager(PMStack &PMS,
PassManagerType PreferredType) {
// Find Module Pass Manager
- while(!PMS.empty()) {
+ while (!PMS.empty()) {
PassManagerType TopPMType = PMS.top()->getPassManagerType();
if (TopPMType == PreferredType)
break; // We found desired pass manager
@@ -1627,7 +1729,7 @@ void ModulePass::assignPassManager(PMStack &PMS,
}
/// Find appropriate Function Pass Manager or Call Graph Pass Manager
-/// in the PM Stack and add self into that manager.
+/// in the PM Stack and add self into that manager.
void FunctionPass::assignPassManager(PMStack &PMS,
PassManagerType PreferredType) {
@@ -1636,7 +1738,7 @@ void FunctionPass::assignPassManager(PMStack &PMS,
if (PMS.top()->getPassManagerType() > PMT_FunctionPassManager)
PMS.pop();
else
- break;
+ break;
}
// Create new Function Pass Manager if needed.
@@ -1668,14 +1770,14 @@ void FunctionPass::assignPassManager(PMStack &PMS,
}
/// Find appropriate Basic Pass Manager or Call Graph Pass Manager
-/// in the PM Stack and add self into that manager.
+/// in the PM Stack and add self into that manager.
void BasicBlockPass::assignPassManager(PMStack &PMS,
PassManagerType PreferredType) {
BBPassManager *BBP;
// Basic Pass Manager is a leaf pass manager. It does not handle
// any other pass manager.
- if (!PMS.empty() &&
+ if (!PMS.empty() &&
PMS.top()->getPassManagerType() == PMT_BasicBlockPassManager) {
BBP = (BBPassManager *)PMS.top();
} else {
@@ -1694,7 +1796,7 @@ void BasicBlockPass::assignPassManager(PMStack &PMS,
// [3] Assign manager to manage this new manager. This may create
// and push new managers into PMS
- BBP->assignPassManager(PMS);
+ BBP->assignPassManager(PMS, PreferredType);
// [4] Push new manager into PMS
PMS.push(BBP);
@@ -1705,38 +1807,3 @@ void BasicBlockPass::assignPassManager(PMStack &PMS,
}
PassManagerBase::~PassManagerBase() {}
-
-/*===-- C Bindings --------------------------------------------------------===*/
-
-LLVMPassManagerRef LLVMCreatePassManager() {
- return wrap(new PassManager());
-}
-
-LLVMPassManagerRef LLVMCreateFunctionPassManagerForModule(LLVMModuleRef M) {
- return wrap(new FunctionPassManager(unwrap(M)));
-}
-
-LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef P) {
- return LLVMCreateFunctionPassManagerForModule(
- reinterpret_cast<LLVMModuleRef>(P));
-}
-
-LLVMBool LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M) {
- return unwrap<PassManager>(PM)->run(*unwrap(M));
-}
-
-LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM) {
- return unwrap<FunctionPassManager>(FPM)->doInitialization();
-}
-
-LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F) {
- return unwrap<FunctionPassManager>(FPM)->run(*unwrap<Function>(F));
-}
-
-LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM) {
- return unwrap<FunctionPassManager>(FPM)->doFinalization();
-}
-
-void LLVMDisposePassManager(LLVMPassManagerRef PM) {
- delete unwrap(PM);
-}
diff --git a/libclamav/c++/llvm/lib/VMCore/PassRegistry.cpp b/libclamav/c++/llvm/lib/VMCore/PassRegistry.cpp
new file mode 100644
index 0000000..21dba56
--- /dev/null
+++ b/libclamav/c++/llvm/lib/VMCore/PassRegistry.cpp
@@ -0,0 +1,159 @@
+//===- PassRegistry.cpp - Pass Registration Implementation ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PassRegistry, with which passes are registered on
+// initialization, and supports the PassManager in dependency resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/PassRegistry.h"
+#include "llvm/PassSupport.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ManagedStatic.h"
+
+using namespace llvm;
+
+static PassRegistry *PassRegistryObj = 0;
+PassRegistry *PassRegistry::getPassRegistry() {
+ // Use double-checked locking to safely initialize the registrar when
+ // we're running in multithreaded mode.
+ PassRegistry* tmp = PassRegistryObj;
+ if (llvm_is_multithreaded()) {
+ sys::MemoryFence();
+ if (!tmp) {
+ llvm_acquire_global_lock();
+ tmp = PassRegistryObj;
+ if (!tmp) {
+ tmp = new PassRegistry();
+ sys::MemoryFence();
+ PassRegistryObj = tmp;
+ }
+ llvm_release_global_lock();
+ }
+ } else if (!tmp) {
+ PassRegistryObj = new PassRegistry();
+ }
+
+ return PassRegistryObj;
+}
+
+namespace {
+
+// FIXME: We use ManagedCleanup to erase the pass registrar on shutdown.
+// Unfortunately, passes are registered with static ctors, and having
+// llvm_shutdown clear this map prevents successful ressurection after
+// llvm_shutdown is run. Ideally we should find a solution so that we don't
+// leak the map, AND can still resurrect after shutdown.
+void cleanupPassRegistry(void*) {
+ if (PassRegistryObj) {
+ delete PassRegistryObj;
+ PassRegistryObj = 0;
+ }
+}
+ManagedCleanup<&cleanupPassRegistry> registryCleanup ATTRIBUTE_USED;
+
+}
+
+const PassInfo *PassRegistry::getPassInfo(const void *TI) const {
+ sys::SmartScopedLock<true> Guard(Lock);
+ MapType::const_iterator I = PassInfoMap.find(TI);
+ return I != PassInfoMap.end() ? I->second : 0;
+}
+
+const PassInfo *PassRegistry::getPassInfo(StringRef Arg) const {
+ sys::SmartScopedLock<true> Guard(Lock);
+ StringMapType::const_iterator I = PassInfoStringMap.find(Arg);
+ return I != PassInfoStringMap.end() ? I->second : 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Pass Registration mechanism
+//
+
+void PassRegistry::registerPass(const PassInfo &PI) {
+ sys::SmartScopedLock<true> Guard(Lock);
+ bool Inserted =
+ PassInfoMap.insert(std::make_pair(PI.getTypeInfo(),&PI)).second;
+ assert(Inserted && "Pass registered multiple times!"); Inserted=Inserted;
+ PassInfoStringMap[PI.getPassArgument()] = &PI;
+
+ // Notify any listeners.
+ for (std::vector<PassRegistrationListener*>::iterator
+ I = Listeners.begin(), E = Listeners.end(); I != E; ++I)
+ (*I)->passRegistered(&PI);
+}
+
+void PassRegistry::unregisterPass(const PassInfo &PI) {
+ sys::SmartScopedLock<true> Guard(Lock);
+ MapType::iterator I = PassInfoMap.find(PI.getTypeInfo());
+ assert(I != PassInfoMap.end() && "Pass registered but not in map!");
+
+ // Remove pass from the map.
+ PassInfoMap.erase(I);
+ PassInfoStringMap.erase(PI.getPassArgument());
+}
+
+void PassRegistry::enumerateWith(PassRegistrationListener *L) {
+ sys::SmartScopedLock<true> Guard(Lock);
+ for (MapType::const_iterator I = PassInfoMap.begin(),
+ E = PassInfoMap.end(); I != E; ++I)
+ L->passEnumerate(I->second);
+}
+
+
+/// Analysis Group Mechanisms.
+void PassRegistry::registerAnalysisGroup(const void *InterfaceID,
+ const void *PassID,
+ PassInfo& Registeree,
+ bool isDefault) {
+ PassInfo *InterfaceInfo = const_cast<PassInfo*>(getPassInfo(InterfaceID));
+ if (InterfaceInfo == 0) {
+ // First reference to Interface, register it now.
+ registerPass(Registeree);
+ InterfaceInfo = &Registeree;
+ }
+ assert(Registeree.isAnalysisGroup() &&
+ "Trying to join an analysis group that is a normal pass!");
+
+ if (PassID) {
+ PassInfo *ImplementationInfo = const_cast<PassInfo*>(getPassInfo(PassID));
+ assert(ImplementationInfo &&
+ "Must register pass before adding to AnalysisGroup!");
+
+ // Make sure we keep track of the fact that the implementation implements
+ // the interface.
+ ImplementationInfo->addInterfaceImplemented(InterfaceInfo);
+
+ sys::SmartScopedLock<true> Guard(Lock);
+ AnalysisGroupInfo &AGI = AnalysisGroupInfoMap[InterfaceInfo];
+ assert(AGI.Implementations.count(ImplementationInfo) == 0 &&
+ "Cannot add a pass to the same analysis group more than once!");
+ AGI.Implementations.insert(ImplementationInfo);
+ if (isDefault) {
+ assert(InterfaceInfo->getNormalCtor() == 0 &&
+ "Default implementation for analysis group already specified!");
+ assert(ImplementationInfo->getNormalCtor() &&
+ "Cannot specify pass as default if it does not have a default ctor");
+ InterfaceInfo->setNormalCtor(ImplementationInfo->getNormalCtor());
+ }
+ }
+}
+
+void PassRegistry::addRegistrationListener(PassRegistrationListener *L) {
+ sys::SmartScopedLock<true> Guard(Lock);
+ Listeners.push_back(L);
+}
+
+void PassRegistry::removeRegistrationListener(PassRegistrationListener *L) {
+ sys::SmartScopedLock<true> Guard(Lock);
+ std::vector<PassRegistrationListener*>::iterator I =
+ std::find(Listeners.begin(), Listeners.end(), L);
+ assert(I != Listeners.end() && "PassRegistrationListener not registered!");
+ Listeners.erase(I);
+}
diff --git a/libclamav/c++/llvm/lib/VMCore/PrintModulePass.cpp b/libclamav/c++/llvm/lib/VMCore/PrintModulePass.cpp
index f0f6e7a..2ee49d2 100644
--- a/libclamav/c++/llvm/lib/VMCore/PrintModulePass.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/PrintModulePass.cpp
@@ -23,21 +23,22 @@ using namespace llvm;
namespace {
class PrintModulePass : public ModulePass {
+ std::string Banner;
raw_ostream *Out; // raw_ostream to print on
bool DeleteStream; // Delete the ostream in our dtor?
public:
static char ID;
- PrintModulePass() : ModulePass(&ID), Out(&dbgs()),
+ PrintModulePass() : ModulePass(ID), Out(&dbgs()),
DeleteStream(false) {}
- PrintModulePass(raw_ostream *o, bool DS)
- : ModulePass(&ID), Out(o), DeleteStream(DS) {}
+ PrintModulePass(const std::string &B, raw_ostream *o, bool DS)
+ : ModulePass(ID), Banner(B), Out(o), DeleteStream(DS) {}
~PrintModulePass() {
if (DeleteStream) delete Out;
}
bool runOnModule(Module &M) {
- (*Out) << M;
+ (*Out) << Banner << M;
return false;
}
@@ -52,12 +53,12 @@ namespace {
bool DeleteStream; // Delete the ostream in our dtor?
public:
static char ID;
- PrintFunctionPass() : FunctionPass(&ID), Banner(""), Out(&dbgs()),
+ PrintFunctionPass() : FunctionPass(ID), Banner(""), Out(&dbgs()),
DeleteStream(false) {}
PrintFunctionPass(const std::string &B, raw_ostream *o, bool DS)
- : FunctionPass(&ID), Banner(B), Out(o), DeleteStream(DS) {}
+ : FunctionPass(ID), Banner(B), Out(o), DeleteStream(DS) {}
- inline ~PrintFunctionPass() {
+ ~PrintFunctionPass() {
if (DeleteStream) delete Out;
}
@@ -76,17 +77,18 @@ namespace {
}
char PrintModulePass::ID = 0;
-static RegisterPass<PrintModulePass>
-X("print-module", "Print module to stderr");
+INITIALIZE_PASS(PrintModulePass, "print-module",
+ "Print module to stderr", false, false);
char PrintFunctionPass::ID = 0;
-static RegisterPass<PrintFunctionPass>
-Y("print-function","Print function to stderr");
+INITIALIZE_PASS(PrintFunctionPass, "print-function",
+ "Print function to stderr", false, false);
/// createPrintModulePass - Create and return a pass that writes the
/// module to the specified raw_ostream.
ModulePass *llvm::createPrintModulePass(llvm::raw_ostream *OS,
- bool DeleteStream) {
- return new PrintModulePass(OS, DeleteStream);
+ bool DeleteStream,
+ const std::string &Banner) {
+ return new PrintModulePass(Banner, OS, DeleteStream);
}
/// createPrintFunctionPass - Create and return a pass that prints
diff --git a/libclamav/c++/llvm/lib/VMCore/Type.cpp b/libclamav/c++/llvm/lib/VMCore/Type.cpp
index 2a0cfa8..c55e626 100644
--- a/libclamav/c++/llvm/lib/VMCore/Type.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Type.cpp
@@ -50,19 +50,23 @@ void AbstractTypeUser::setType(Value *V, const Type *NewTy) {
/// Because of the way Type subclasses are allocated, this function is necessary
/// to use the correct kind of "delete" operator to deallocate the Type object.
-/// Some type objects (FunctionTy, StructTy, UnionTy) allocate additional space
+/// Some type objects (FunctionTy, StructTy) allocate additional space
/// after the space for their derived type to hold the contained types array of
/// PATypeHandles. Using this allocation scheme means all the PATypeHandles are
/// allocated with the type object, decreasing allocations and eliminating the
/// need for a std::vector to be used in the Type class itself.
/// @brief Type destruction function
void Type::destroy() const {
+ // Nothing calls getForwardedType from here on.
+ if (ForwardType && ForwardType->isAbstract()) {
+ ForwardType->dropRef();
+ ForwardType = NULL;
+ }
// Structures and Functions allocate their contained types past the end of
// the type object itself. These need to be destroyed differently than the
// other types.
- if (this->isFunctionTy() || this->isStructTy() ||
- this->isUnionTy()) {
+ if (this->isFunctionTy() || this->isStructTy()) {
// First, make sure we destruct any PATypeHandles allocated by these
// subclasses. They must be manually destructed.
for (unsigned i = 0; i < NumContainedTys; ++i)
@@ -72,10 +76,10 @@ void Type::destroy() const {
// to delete this as an array of char.
if (this->isFunctionTy())
static_cast<const FunctionType*>(this)->FunctionType::~FunctionType();
- else if (this->isStructTy())
+ else {
+ assert(isStructTy());
static_cast<const StructType*>(this)->StructType::~StructType();
- else
- static_cast<const UnionType*>(this)->UnionType::~UnionType();
+ }
// Finally, remove the memory as an array deallocation of the chars it was
// constructed from.
@@ -87,11 +91,6 @@ void Type::destroy() const {
pImpl->OpaqueTypes.erase(opaque_this);
}
- if (ForwardType && ForwardType->isAbstract()) {
- ForwardType->dropRef();
- ForwardType = NULL;
- }
-
// For all the other type subclasses, there is either no contained types or
// just one (all Sequentials). For Sequentials, the PATypeHandle is not
// allocated past the type object, its included directly in the SequentialType
@@ -234,7 +233,7 @@ bool Type::isSizedDerivedType() const {
if (const VectorType *PTy = dyn_cast<VectorType>(this))
return PTy->getElementType()->isSized();
- if (!this->isStructTy() && !this->isUnionTy())
+ if (!this->isStructTy())
return false;
// Okay, our struct is sized if all of the elements are...
@@ -319,31 +318,6 @@ const Type *StructType::getTypeAtIndex(unsigned Idx) const {
}
-bool UnionType::indexValid(const Value *V) const {
- // Union indexes require 32-bit integer constants.
- if (V->getType()->isIntegerTy(32))
- if (const ConstantInt *CU = dyn_cast<ConstantInt>(V))
- return indexValid(CU->getZExtValue());
- return false;
-}
-
-bool UnionType::indexValid(unsigned V) const {
- return V < NumContainedTys;
-}
-
-// getTypeAtIndex - Given an index value into the type, return the type of the
-// element. For a structure type, this must be a constant value...
-//
-const Type *UnionType::getTypeAtIndex(const Value *V) const {
- unsigned Idx = (unsigned)cast<ConstantInt>(V)->getZExtValue();
- return getTypeAtIndex(Idx);
-}
-
-const Type *UnionType::getTypeAtIndex(unsigned Idx) const {
- assert(indexValid(Idx) && "Invalid structure index!");
- return ContainedTys[Idx];
-}
-
//===----------------------------------------------------------------------===//
// Primitive 'Type' data
//===----------------------------------------------------------------------===//
@@ -380,6 +354,10 @@ const Type *Type::getPPC_FP128Ty(LLVMContext &C) {
return &C.pImpl->PPC_FP128Ty;
}
+const IntegerType *Type::getIntNTy(LLVMContext &C, unsigned N) {
+ return IntegerType::get(C, N);
+}
+
const IntegerType *Type::getInt1Ty(LLVMContext &C) {
return &C.pImpl->Int1Ty;
}
@@ -420,6 +398,10 @@ const PointerType *Type::getPPC_FP128PtrTy(LLVMContext &C, unsigned AS) {
return getPPC_FP128Ty(C)->getPointerTo(AS);
}
+const PointerType *Type::getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS) {
+ return getIntNTy(C, N)->getPointerTo(AS);
+}
+
const PointerType *Type::getInt1PtrTy(LLVMContext &C, unsigned AS) {
return getInt1Ty(C)->getPointerTo(AS);
}
@@ -447,8 +429,8 @@ const PointerType *Type::getInt64PtrTy(LLVMContext &C, unsigned AS) {
/// isValidReturnType - Return true if the specified type is valid as a return
/// type.
bool FunctionType::isValidReturnType(const Type *RetTy) {
- return RetTy->getTypeID() != LabelTyID &&
- RetTy->getTypeID() != MetadataTyID;
+ return !RetTy->isFunctionTy() && !RetTy->isLabelTy() &&
+ !RetTy->isMetadataTy();
}
/// isValidArgumentType - Return true if the specified type is valid as an
@@ -499,23 +481,6 @@ StructType::StructType(LLVMContext &C,
setAbstract(isAbstract);
}
-UnionType::UnionType(LLVMContext &C,const Type* const* Types, unsigned NumTypes)
- : CompositeType(C, UnionTyID) {
- ContainedTys = reinterpret_cast<PATypeHandle*>(this + 1);
- NumContainedTys = NumTypes;
- bool isAbstract = false;
- for (unsigned i = 0; i < NumTypes; ++i) {
- assert(Types[i] && "<null> type for union field!");
- assert(isValidElementType(Types[i]) &&
- "Invalid type for union element!");
- new (&ContainedTys[i]) PATypeHandle(Types[i], this);
- isAbstract |= Types[i]->isAbstract();
- }
-
- // Calculate whether or not this type is abstract
- setAbstract(isAbstract);
-}
-
ArrayType::ArrayType(const Type *ElType, uint64_t NumEl)
: SequentialType(ArrayTyID, ElType) {
NumElements = NumEl;
@@ -595,8 +560,8 @@ namespace llvm {
static inline ChildIteratorType child_begin(NodeType *N) {
if (N->isAbstract())
return N->subtype_begin();
- else // No need to process children of concrete types.
- return N->subtype_end();
+ // No need to process children of concrete types.
+ return N->subtype_end();
}
static inline ChildIteratorType child_end(NodeType *N) {
return N->subtype_end();
@@ -619,35 +584,35 @@ void Type::PromoteAbstractToConcrete() {
// Concrete types are leaves in the tree. Since an SCC will either be all
// abstract or all concrete, we only need to check one type.
- if (SCC[0]->isAbstract()) {
- if (SCC[0]->isOpaqueTy())
- return; // Not going to be concrete, sorry.
-
- // If all of the children of all of the types in this SCC are concrete,
- // then this SCC is now concrete as well. If not, neither this SCC, nor
- // any parent SCCs will be concrete, so we might as well just exit.
- for (unsigned i = 0, e = SCC.size(); i != e; ++i)
- for (Type::subtype_iterator CI = SCC[i]->subtype_begin(),
- E = SCC[i]->subtype_end(); CI != E; ++CI)
- if ((*CI)->isAbstract())
- // If the child type is in our SCC, it doesn't make the entire SCC
- // abstract unless there is a non-SCC abstract type.
- if (std::find(SCC.begin(), SCC.end(), *CI) == SCC.end())
- return; // Not going to be concrete, sorry.
-
- // Okay, we just discovered this whole SCC is now concrete, mark it as
- // such!
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
- assert(SCC[i]->isAbstract() && "Why are we processing concrete types?");
-
- SCC[i]->setAbstract(false);
- }
-
- for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
- assert(!SCC[i]->isAbstract() && "Concrete type became abstract?");
- // The type just became concrete, notify all users!
- cast<DerivedType>(SCC[i])->notifyUsesThatTypeBecameConcrete();
- }
+ if (!SCC[0]->isAbstract()) continue;
+
+ if (SCC[0]->isOpaqueTy())
+ return; // Not going to be concrete, sorry.
+
+ // If all of the children of all of the types in this SCC are concrete,
+ // then this SCC is now concrete as well. If not, neither this SCC, nor
+ // any parent SCCs will be concrete, so we might as well just exit.
+ for (unsigned i = 0, e = SCC.size(); i != e; ++i)
+ for (Type::subtype_iterator CI = SCC[i]->subtype_begin(),
+ E = SCC[i]->subtype_end(); CI != E; ++CI)
+ if ((*CI)->isAbstract())
+ // If the child type is in our SCC, it doesn't make the entire SCC
+ // abstract unless there is a non-SCC abstract type.
+ if (std::find(SCC.begin(), SCC.end(), *CI) == SCC.end())
+ return; // Not going to be concrete, sorry.
+
+ // Okay, we just discovered this whole SCC is now concrete, mark it as
+ // such!
+ for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
+ assert(SCC[i]->isAbstract() && "Why are we processing concrete types?");
+
+ SCC[i]->setAbstract(false);
+ }
+
+ for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
+ assert(!SCC[i]->isAbstract() && "Concrete type became abstract?");
+ // The type just became concrete, notify all users!
+ cast<DerivedType>(SCC[i])->notifyUsesThatTypeBecameConcrete();
}
}
}
@@ -685,11 +650,15 @@ static bool TypesEqual(const Type *Ty, const Type *Ty2,
if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty)) {
const IntegerType *ITy2 = cast<IntegerType>(Ty2);
return ITy->getBitWidth() == ITy2->getBitWidth();
- } else if (const PointerType *PTy = dyn_cast<PointerType>(Ty)) {
+ }
+
+ if (const PointerType *PTy = dyn_cast<PointerType>(Ty)) {
const PointerType *PTy2 = cast<PointerType>(Ty2);
return PTy->getAddressSpace() == PTy2->getAddressSpace() &&
TypesEqual(PTy->getElementType(), PTy2->getElementType(), EqTypes);
- } else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ }
+
+ if (const StructType *STy = dyn_cast<StructType>(Ty)) {
const StructType *STy2 = cast<StructType>(Ty2);
if (STy->getNumElements() != STy2->getNumElements()) return false;
if (STy->isPacked() != STy2->isPacked()) return false;
@@ -697,22 +666,21 @@ static bool TypesEqual(const Type *Ty, const Type *Ty2,
if (!TypesEqual(STy->getElementType(i), STy2->getElementType(i), EqTypes))
return false;
return true;
- } else if (const UnionType *UTy = dyn_cast<UnionType>(Ty)) {
- const UnionType *UTy2 = cast<UnionType>(Ty2);
- if (UTy->getNumElements() != UTy2->getNumElements()) return false;
- for (unsigned i = 0, e = UTy2->getNumElements(); i != e; ++i)
- if (!TypesEqual(UTy->getElementType(i), UTy2->getElementType(i), EqTypes))
- return false;
- return true;
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ }
+
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
const ArrayType *ATy2 = cast<ArrayType>(Ty2);
return ATy->getNumElements() == ATy2->getNumElements() &&
TypesEqual(ATy->getElementType(), ATy2->getElementType(), EqTypes);
- } else if (const VectorType *PTy = dyn_cast<VectorType>(Ty)) {
+ }
+
+ if (const VectorType *PTy = dyn_cast<VectorType>(Ty)) {
const VectorType *PTy2 = cast<VectorType>(Ty2);
return PTy->getNumElements() == PTy2->getNumElements() &&
TypesEqual(PTy->getElementType(), PTy2->getElementType(), EqTypes);
- } else if (const FunctionType *FTy = dyn_cast<FunctionType>(Ty)) {
+ }
+
+ if (const FunctionType *FTy = dyn_cast<FunctionType>(Ty)) {
const FunctionType *FTy2 = cast<FunctionType>(Ty2);
if (FTy->isVarArg() != FTy2->isVarArg() ||
FTy->getNumParams() != FTy2->getNumParams() ||
@@ -723,10 +691,10 @@ static bool TypesEqual(const Type *Ty, const Type *Ty2,
return false;
}
return true;
- } else {
- llvm_unreachable("Unknown derived type!");
- return false;
}
+
+ llvm_unreachable("Unknown derived type!");
+ return false;
}
namespace llvm { // in namespace llvm so findable by ADL
@@ -800,13 +768,13 @@ const IntegerType *IntegerType::get(LLVMContext &C, unsigned NumBits) {
// Check for the built-in integer types
switch (NumBits) {
- case 1: return cast<IntegerType>(Type::getInt1Ty(C));
- case 8: return cast<IntegerType>(Type::getInt8Ty(C));
- case 16: return cast<IntegerType>(Type::getInt16Ty(C));
- case 32: return cast<IntegerType>(Type::getInt32Ty(C));
- case 64: return cast<IntegerType>(Type::getInt64Ty(C));
- default:
- break;
+ case 1: return cast<IntegerType>(Type::getInt1Ty(C));
+ case 8: return cast<IntegerType>(Type::getInt8Ty(C));
+ case 16: return cast<IntegerType>(Type::getInt16Ty(C));
+ case 32: return cast<IntegerType>(Type::getInt32Ty(C));
+ case 64: return cast<IntegerType>(Type::getInt64Ty(C));
+ default:
+ break;
}
LLVMContextImpl *pImpl = C.pImpl;
@@ -894,8 +862,8 @@ ArrayType *ArrayType::get(const Type *ElementType, uint64_t NumElements) {
}
bool ArrayType::isValidElementType(const Type *ElemTy) {
- return ElemTy->getTypeID() != VoidTyID && ElemTy->getTypeID() != LabelTyID &&
- ElemTy->getTypeID() != MetadataTyID && !ElemTy->isFunctionTy();
+ return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
+ !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy();
}
VectorType *VectorType::get(const Type *ElementType, unsigned NumElements) {
@@ -967,60 +935,6 @@ bool StructType::isValidElementType(const Type *ElemTy) {
//===----------------------------------------------------------------------===//
-// Union Type Factory...
-//
-
-UnionType *UnionType::get(const Type* const* Types, unsigned NumTypes) {
- assert(NumTypes > 0 && "union must have at least one member type!");
- UnionValType UTV(Types, NumTypes);
- UnionType *UT = 0;
-
- LLVMContextImpl *pImpl = Types[0]->getContext().pImpl;
-
- UT = pImpl->UnionTypes.get(UTV);
-
- if (!UT) {
- // Value not found. Derive a new type!
- UT = (UnionType*) operator new(sizeof(UnionType) +
- sizeof(PATypeHandle) * NumTypes);
- new (UT) UnionType(Types[0]->getContext(), Types, NumTypes);
- pImpl->UnionTypes.add(UTV, UT);
- }
-#ifdef DEBUG_MERGE_TYPES
- DEBUG(dbgs() << "Derived new type: " << *UT << "\n");
-#endif
- return UT;
-}
-
-UnionType *UnionType::get(const Type *type, ...) {
- va_list ap;
- SmallVector<const llvm::Type*, 8> UnionFields;
- va_start(ap, type);
- while (type) {
- UnionFields.push_back(type);
- type = va_arg(ap, llvm::Type*);
- }
- unsigned NumTypes = UnionFields.size();
- assert(NumTypes > 0 && "union must have at least one member type!");
- return llvm::UnionType::get(&UnionFields[0], NumTypes);
-}
-
-bool UnionType::isValidElementType(const Type *ElemTy) {
- return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
- !ElemTy->isMetadataTy() && !ElemTy->isFunctionTy();
-}
-
-int UnionType::getElementTypeIndex(const Type *ElemTy) const {
- int index = 0;
- for (UnionType::element_iterator I = element_begin(), E = element_end();
- I != E; ++I, ++index) {
- if (ElemTy == *I) return index;
- }
-
- return -1;
-}
-
-//===----------------------------------------------------------------------===//
// Pointer Type Factory...
//
@@ -1052,9 +966,8 @@ const PointerType *Type::getPointerTo(unsigned addrs) const {
}
bool PointerType::isValidElementType(const Type *ElemTy) {
- return ElemTy->getTypeID() != VoidTyID &&
- ElemTy->getTypeID() != LabelTyID &&
- ElemTy->getTypeID() != MetadataTyID;
+ return !ElemTy->isVoidTy() && !ElemTy->isLabelTy() &&
+ !ElemTy->isMetadataTy();
}
@@ -1063,8 +976,7 @@ bool PointerType::isValidElementType(const Type *ElemTy) {
//
OpaqueType *OpaqueType::get(LLVMContext &C) {
- OpaqueType *OT = new OpaqueType(C); // All opaque types are distinct
-
+ OpaqueType *OT = new OpaqueType(C); // All opaque types are distinct.
LLVMContextImpl *pImpl = C.pImpl;
pImpl->OpaqueTypes.insert(OT);
return OT;
@@ -1115,18 +1027,17 @@ void Type::removeAbstractTypeUser(AbstractTypeUser *U) const {
<< ">[" << (void*)this << "]" << "\n");
#endif
- this->destroy();
+ this->destroy();
}
-
}
-// unlockedRefineAbstractTypeTo - This function is used when it is discovered
+// refineAbstractTypeTo - This function is used when it is discovered
// that the 'this' abstract type is actually equivalent to the NewType
// specified. This causes all users of 'this' to switch to reference the more
// concrete type NewType and for 'this' to be deleted. Only used for internal
// callers.
//
-void DerivedType::unlockedRefineAbstractTypeTo(const Type *NewType) {
+void DerivedType::refineAbstractTypeTo(const Type *NewType) {
assert(isAbstract() && "refineAbstractTypeTo: Current type is not abstract!");
assert(this != NewType && "Can't refine to myself!");
assert(ForwardType == 0 && "This type has already been refined!");
@@ -1191,15 +1102,6 @@ void DerivedType::unlockedRefineAbstractTypeTo(const Type *NewType) {
// destroyed.
}
-// refineAbstractTypeTo - This function is used by external callers to notify
-// us that this abstract type is equivalent to another type.
-//
-void DerivedType::refineAbstractTypeTo(const Type *NewType) {
- // All recursive calls will go through unlockedRefineAbstractTypeTo,
- // to avoid deadlock problems.
- unlockedRefineAbstractTypeTo(NewType);
-}
-
// notifyUsesThatTypeBecameConcrete - Notify AbstractTypeUsers of this type that
// the current type has transitioned from being abstract to being concrete.
//
@@ -1283,21 +1185,6 @@ void StructType::typeBecameConcrete(const DerivedType *AbsTy) {
// concrete - this could potentially change us from an abstract type to a
// concrete type.
//
-void UnionType::refineAbstractType(const DerivedType *OldType,
- const Type *NewType) {
- LLVMContextImpl *pImpl = OldType->getContext().pImpl;
- pImpl->UnionTypes.RefineAbstractType(this, OldType, NewType);
-}
-
-void UnionType::typeBecameConcrete(const DerivedType *AbsTy) {
- LLVMContextImpl *pImpl = AbsTy->getContext().pImpl;
- pImpl->UnionTypes.TypeBecameConcrete(this, AbsTy);
-}
-
-// refineAbstractType - Called when a contained type is found to be more
-// concrete - this could potentially change us from an abstract type to a
-// concrete type.
-//
void PointerType::refineAbstractType(const DerivedType *OldType,
const Type *NewType) {
LLVMContextImpl *pImpl = OldType->getContext().pImpl;
diff --git a/libclamav/c++/llvm/lib/VMCore/TypeSymbolTable.cpp b/libclamav/c++/llvm/lib/VMCore/TypeSymbolTable.cpp
index b4daf0f..d68a44b 100644
--- a/libclamav/c++/llvm/lib/VMCore/TypeSymbolTable.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/TypeSymbolTable.cpp
@@ -126,13 +126,15 @@ void TypeSymbolTable::refineAbstractType(const DerivedType *OldType,
// faster to remove them all in one pass.
//
for (iterator I = begin(), E = end(); I != E; ++I) {
- if (I->second == (Type*)OldType) { // FIXME when Types aren't const.
+ // FIXME when Types aren't const.
+ if (I->second == const_cast<DerivedType *>(OldType)) {
#if DEBUG_ABSTYPE
dbgs() << "Removing type " << OldType->getDescription() << "\n";
#endif
OldType->removeAbstractTypeUser(this);
- I->second = (Type*)NewType; // TODO FIXME when types aren't const
+ // TODO FIXME when types aren't const
+ I->second = const_cast<Type *>(NewType);
if (NewType->isAbstract()) {
#if DEBUG_ABSTYPE
dbgs() << "Added type " << NewType->getDescription() << "\n";
diff --git a/libclamav/c++/llvm/lib/VMCore/TypesContext.h b/libclamav/c++/llvm/lib/VMCore/TypesContext.h
index 02ab113..5a90917 100644
--- a/libclamav/c++/llvm/lib/VMCore/TypesContext.h
+++ b/libclamav/c++/llvm/lib/VMCore/TypesContext.h
@@ -180,32 +180,6 @@ public:
}
};
-// UnionValType - Define a class to hold the key that goes into the TypeMap
-//
-class UnionValType {
- std::vector<const Type*> ElTypes;
-public:
- UnionValType(const Type* const* Types, unsigned NumTypes)
- : ElTypes(&Types[0], &Types[NumTypes]) {}
-
- static UnionValType get(const UnionType *UT) {
- std::vector<const Type *> ElTypes;
- ElTypes.reserve(UT->getNumElements());
- for (unsigned i = 0, e = UT->getNumElements(); i != e; ++i)
- ElTypes.push_back(UT->getElementType(i));
-
- return UnionValType(&ElTypes[0], ElTypes.size());
- }
-
- static unsigned hashTypeStructure(const UnionType *UT) {
- return UT->getNumElements();
- }
-
- inline bool operator<(const UnionValType &UTV) const {
- return (ElTypes < UTV.ElTypes);
- }
-};
-
// FunctionValType - Define a class to hold the key that goes into the TypeMap
//
class FunctionValType {
@@ -370,7 +344,7 @@ public:
// We already have this type in the table. Get rid of the newly refined
// type.
TypeClass *NewTy = cast<TypeClass>((Type*)I->second.get());
- Ty->unlockedRefineAbstractTypeTo(NewTy);
+ Ty->refineAbstractTypeTo(NewTy);
return;
}
} else {
@@ -385,31 +359,33 @@ public:
if (I->second == Ty) {
// Remember the position of the old type if we see it in our scan.
Entry = I;
+ continue;
+ }
+
+ if (!TypesEqual(Ty, I->second))
+ continue;
+
+ TypeClass *NewTy = cast<TypeClass>((Type*)I->second.get());
+
+ // Remove the old entry form TypesByHash. If the hash values differ
+ // now, remove it from the old place. Otherwise, continue scanning
+ // withing this hashcode to reduce work.
+ if (NewTypeHash != OldTypeHash) {
+ RemoveFromTypesByHash(OldTypeHash, Ty);
} else {
- if (TypesEqual(Ty, I->second)) {
- TypeClass *NewTy = cast<TypeClass>((Type*)I->second.get());
-
- // Remove the old entry form TypesByHash. If the hash values differ
- // now, remove it from the old place. Otherwise, continue scanning
- // withing this hashcode to reduce work.
- if (NewTypeHash != OldTypeHash) {
- RemoveFromTypesByHash(OldTypeHash, Ty);
- } else {
- if (Entry == E) {
- // Find the location of Ty in the TypesByHash structure if we
- // haven't seen it already.
- while (I->second != Ty) {
- ++I;
- assert(I != E && "Structure doesn't contain type??");
- }
- Entry = I;
- }
- TypesByHash.erase(Entry);
+ if (Entry == E) {
+ // Find the location of Ty in the TypesByHash structure if we
+ // haven't seen it already.
+ while (I->second != Ty) {
+ ++I;
+ assert(I != E && "Structure doesn't contain type??");
}
- Ty->unlockedRefineAbstractTypeTo(NewTy);
- return;
+ Entry = I;
}
+ TypesByHash.erase(Entry);
}
+ Ty->refineAbstractTypeTo(NewTy);
+ return;
}
// If there is no existing type of the same structure, we reinsert an
diff --git a/libclamav/c++/llvm/lib/VMCore/Use.cpp b/libclamav/c++/llvm/lib/VMCore/Use.cpp
index b7fd92f..fec710b 100644
--- a/libclamav/c++/llvm/lib/VMCore/Use.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Use.cpp
@@ -86,14 +86,27 @@ const Use *Use::getImpliedUser() const {
//===----------------------------------------------------------------------===//
Use *Use::initTags(Use * const Start, Use *Stop, ptrdiff_t Done) {
+ while (Done < 20) {
+ if (Start == Stop--)
+ return Start;
+ static const PrevPtrTag tags[20] = { fullStopTag, oneDigitTag, stopTag,
+ oneDigitTag, oneDigitTag, stopTag,
+ zeroDigitTag, oneDigitTag, oneDigitTag,
+ stopTag, zeroDigitTag, oneDigitTag,
+ zeroDigitTag, oneDigitTag, stopTag,
+ oneDigitTag, oneDigitTag, oneDigitTag,
+ oneDigitTag, stopTag
+ };
+ Stop->Prev.setFromOpaqueValue(reinterpret_cast<Use**>(tags[Done++]));
+ Stop->Val = 0;
+ }
+
ptrdiff_t Count = Done;
while (Start != Stop) {
--Stop;
Stop->Val = 0;
if (!Count) {
- Stop->Prev.setFromOpaqueValue(reinterpret_cast<Use**>(Done == 0
- ? fullStopTag
- : stopTag));
+ Stop->Prev.setFromOpaqueValue(reinterpret_cast<Use**>(stopTag));
++Done;
Count = Done;
} else {
diff --git a/libclamav/c++/llvm/lib/VMCore/Value.cpp b/libclamav/c++/llvm/lib/VMCore/Value.cpp
index a36d262..b8c6775 100644
--- a/libclamav/c++/llvm/lib/VMCore/Value.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Value.cpp
@@ -86,7 +86,7 @@ Value::~Value() {
/// hasNUses - Return true if this Value has exactly N users.
///
bool Value::hasNUses(unsigned N) const {
- use_const_iterator UI = use_begin(), E = use_end();
+ const_use_iterator UI = use_begin(), E = use_end();
for (; N; --N, ++UI)
if (UI == E) return false; // Too few.
@@ -97,7 +97,7 @@ bool Value::hasNUses(unsigned N) const {
/// logically equivalent to getNumUses() >= N.
///
bool Value::hasNUsesOrMore(unsigned N) const {
- use_const_iterator UI = use_begin(), E = use_end();
+ const_use_iterator UI = use_begin(), E = use_end();
for (; N; --N, ++UI)
if (UI == E) return false; // Too few.
@@ -108,7 +108,7 @@ bool Value::hasNUsesOrMore(unsigned N) const {
/// isUsedInBasicBlock - Return true if this value is used in the specified
/// basic block.
bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
- for (use_const_iterator I = use_begin(), E = use_end(); I != E; ++I) {
+ for (const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) {
const Instruction *User = dyn_cast<Instruction>(*I);
if (User && User->getParent() == BB)
return true;
@@ -139,10 +139,6 @@ static bool getSymTab(Value *V, ValueSymbolTable *&ST) {
} else if (Argument *A = dyn_cast<Argument>(V)) {
if (Function *P = A->getParent())
ST = &P->getValueSymbolTable();
- } else if (NamedMDNode *N = dyn_cast<NamedMDNode>(V)) {
- if (Module *P = N->getParent()) {
- ST = &P->getValueSymbolTable();
- }
} else if (isa<MDString>(V))
return true;
else {
@@ -322,7 +318,13 @@ void Value::replaceAllUsesWith(Value *New) {
Value *Value::stripPointerCasts() {
if (!getType()->isPointerTy())
return this;
+
+ // Even though we don't look through PHI nodes, we could be called on an
+ // instruction in an unreachable block, which may be on a cycle.
+ SmallPtrSet<Value *, 4> Visited;
+
Value *V = this;
+ Visited.insert(V);
do {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
if (!GEP->hasAllZeroIndices())
@@ -338,7 +340,9 @@ Value *Value::stripPointerCasts() {
return V;
}
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
- } while (1);
+ } while (Visited.insert(V));
+
+ return V;
}
Value *Value::getUnderlyingObject(unsigned MaxLookup) {
@@ -484,10 +488,15 @@ void ValueHandleBase::ValueIsDeleted(Value *V) {
ValueHandleBase *Entry = pImpl->ValueHandles[V];
assert(Entry && "Value bit set but no entries exist");
- // We use a local ValueHandleBase as an iterator so that
- // ValueHandles can add and remove themselves from the list without
- // breaking our iteration. This is not really an AssertingVH; we
- // just have to give ValueHandleBase some kind.
+ // We use a local ValueHandleBase as an iterator so that ValueHandles can add
+ // and remove themselves from the list without breaking our iteration. This
+ // is not really an AssertingVH; we just have to give ValueHandleBase a kind.
+ // Note that we deliberately do not the support the case when dropping a value
+ // handle results in a new value handle being permanently added to the list
+ // (as might occur in theory for CallbackVH's): the new value handle will not
+ // be processed and the checking code will mete out righteous punishment if
+ // the handle is still present once we have finished processing all the other
+ // value handles (it is fine to momentarily add then remove a value handle).
for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
Iterator.RemoveFromUseList();
Iterator.AddToExistingUseListAfter(Entry);
@@ -568,6 +577,24 @@ void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
break;
}
}
+
+#ifndef NDEBUG
+ // If any new tracking or weak value handles were added while processing the
+ // list, then complain about it now.
+ if (Old->HasValueHandle)
+ for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next)
+ switch (Entry->getKind()) {
+ case Tracking:
+ case Weak:
+ dbgs() << "After RAUW from " << *Old->getType() << " %"
+ << Old->getNameStr() << " to " << *New->getType() << " %"
+ << New->getNameStr() << "\n";
+ llvm_unreachable("A tracking or weak value handle still pointed to the"
+ " old value!\n");
+ default:
+ break;
+ }
+#endif
}
/// ~CallbackVH. Empty, but defined here to avoid emitting the vtable
diff --git a/libclamav/c++/llvm/lib/VMCore/ValueSymbolTable.cpp b/libclamav/c++/llvm/lib/VMCore/ValueSymbolTable.cpp
index d30a9d6..254bf06 100644
--- a/libclamav/c++/llvm/lib/VMCore/ValueSymbolTable.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/ValueSymbolTable.cpp
@@ -55,9 +55,7 @@ void ValueSymbolTable::reinsertValue(Value* V) {
raw_svector_ostream(UniqueName) << ++LastUnique;
// Try insert the vmap entry with this suffix.
- ValueName &NewName =
- vmap.GetOrCreateValue(StringRef(UniqueName.data(),
- UniqueName.size()));
+ ValueName &NewName = vmap.GetOrCreateValue(UniqueName);
if (NewName.getValue() == 0) {
// Newly inserted name. Success!
NewName.setValue(V);
@@ -88,7 +86,7 @@ ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) {
}
// Otherwise, there is a naming conflict. Rename this value.
- SmallString<128> UniqueName(Name.begin(), Name.end());
+ SmallString<256> UniqueName(Name.begin(), Name.end());
while (1) {
// Trim any suffix off and append the next number.
@@ -96,9 +94,7 @@ ValueName *ValueSymbolTable::createValueName(StringRef Name, Value *V) {
raw_svector_ostream(UniqueName) << ++LastUnique;
// Try insert the vmap entry with this suffix.
- ValueName &NewName =
- vmap.GetOrCreateValue(StringRef(UniqueName.data(),
- UniqueName.size()));
+ ValueName &NewName = vmap.GetOrCreateValue(UniqueName);
if (NewName.getValue() == 0) {
// Newly inserted name. Success!
NewName.setValue(V);
@@ -119,5 +115,3 @@ void ValueSymbolTable::dump() const {
//DEBUG(dbgs() << "\n");
}
}
-
-MDSymbolTable::~MDSymbolTable() { }
diff --git a/libclamav/c++/llvm/lib/VMCore/ValueTypes.cpp b/libclamav/c++/llvm/lib/VMCore/ValueTypes.cpp
index a092cd1..d2a8ce3 100644
--- a/libclamav/c++/llvm/lib/VMCore/ValueTypes.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/ValueTypes.cpp
@@ -61,6 +61,10 @@ bool EVT::isExtended256BitVector() const {
return isExtendedVector() && getSizeInBits() == 256;
}
+bool EVT::isExtended512BitVector() const {
+ return isExtendedVector() && getSizeInBits() == 512;
+}
+
EVT EVT::getExtendedVectorElementType() const {
assert(isExtended() && "Type is not extended!");
return EVT::getEVT(cast<VectorType>(LLVMTy)->getElementType());
@@ -121,6 +125,7 @@ std::string EVT::getEVTString() const {
case MVT::v1i64: return "v1i64";
case MVT::v2i64: return "v2i64";
case MVT::v4i64: return "v4i64";
+ case MVT::v8i64: return "v8i64";
case MVT::v2f32: return "v2f32";
case MVT::v4f32: return "v4f32";
case MVT::v8f32: return "v8f32";
@@ -165,6 +170,7 @@ const Type *EVT::getTypeForEVT(LLVMContext &Context) const {
case MVT::v1i64: return VectorType::get(Type::getInt64Ty(Context), 1);
case MVT::v2i64: return VectorType::get(Type::getInt64Ty(Context), 2);
case MVT::v4i64: return VectorType::get(Type::getInt64Ty(Context), 4);
+ case MVT::v8i64: return VectorType::get(Type::getInt64Ty(Context), 8);
case MVT::v2f32: return VectorType::get(Type::getFloatTy(Context), 2);
case MVT::v4f32: return VectorType::get(Type::getFloatTy(Context), 4);
case MVT::v8f32: return VectorType::get(Type::getFloatTy(Context), 8);
diff --git a/libclamav/c++/llvm/lib/VMCore/Verifier.cpp b/libclamav/c++/llvm/lib/VMCore/Verifier.cpp
index 721e96a..e3ecc97 100644
--- a/libclamav/c++/llvm/lib/VMCore/Verifier.cpp
+++ b/libclamav/c++/llvm/lib/VMCore/Verifier.cpp
@@ -72,7 +72,7 @@ namespace { // Anonymous namespace for class
struct PreVerifier : public FunctionPass {
static char ID; // Pass ID, replacement for typeid
- PreVerifier() : FunctionPass(&ID) { }
+ PreVerifier() : FunctionPass(ID) { }
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
@@ -85,7 +85,8 @@ namespace { // Anonymous namespace for class
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
if (I->empty() || !I->back().isTerminator()) {
- dbgs() << "Basic Block does not have terminator!\n";
+ dbgs() << "Basic Block in function '" << F.getName()
+ << "' does not have terminator!\n";
WriteAsOperand(dbgs(), I, true);
dbgs() << "\n";
Broken = true;
@@ -93,7 +94,7 @@ namespace { // Anonymous namespace for class
}
if (Broken)
- llvm_report_error("Broken module, no Basic Block terminator!");
+ report_fatal_error("Broken module, no Basic Block terminator!");
return false;
}
@@ -101,9 +102,9 @@ namespace { // Anonymous namespace for class
}
char PreVerifier::ID = 0;
-static RegisterPass<PreVerifier>
-PreVer("preverify", "Preliminary module verification");
-static const PassInfo *const PreVerifyID = &PreVer;
+INITIALIZE_PASS(PreVerifier, "preverify", "Preliminary module verification",
+ false, false);
+char &PreVerifyID = PreVerifier::ID;
namespace {
class TypeSet : public AbstractTypeUser {
@@ -176,24 +177,18 @@ namespace {
/// Types - keep track of the types that have been checked already.
TypeSet Types;
+ /// MDNodes - keep track of the metadata nodes that have been checked
+ /// already.
+ SmallPtrSet<MDNode *, 32> MDNodes;
+
Verifier()
- : FunctionPass(&ID),
+ : FunctionPass(ID),
Broken(false), RealPass(true), action(AbortProcessAction),
Mod(0), Context(0), DT(0), MessagesStr(Messages) {}
explicit Verifier(VerifierFailureAction ctn)
- : FunctionPass(&ID),
+ : FunctionPass(ID),
Broken(false), RealPass(true), action(ctn), Mod(0), Context(0), DT(0),
MessagesStr(Messages) {}
- explicit Verifier(bool AB)
- : FunctionPass(&ID),
- Broken(false), RealPass(true),
- action( AB ? AbortProcessAction : PrintMessageAction), Mod(0),
- Context(0), DT(0), MessagesStr(Messages) {}
- explicit Verifier(DominatorTree &dt)
- : FunctionPass(&ID),
- Broken(false), RealPass(false), action(PrintMessageAction), Mod(0),
- Context(0), DT(&dt), MessagesStr(Messages) {}
-
bool doInitialization(Module &M) {
Mod = &M;
@@ -244,6 +239,10 @@ namespace {
I != E; ++I)
visitGlobalAlias(*I);
+ for (Module::named_metadata_iterator I = M.named_metadata_begin(),
+ E = M.named_metadata_end(); I != E; ++I)
+ visitNamedMDNode(*I);
+
// If the module is broken, abort at this time.
return abortIfBroken();
}
@@ -284,6 +283,8 @@ namespace {
void visitGlobalValue(GlobalValue &GV);
void visitGlobalVariable(GlobalVariable &GV);
void visitGlobalAlias(GlobalAlias &GA);
+ void visitNamedMDNode(NamedMDNode &NMD);
+ void visitMDNode(MDNode &MD, Function *F);
void visitFunction(Function &F);
void visitBasicBlock(BasicBlock &BB);
using InstVisitor<Verifier>::visit;
@@ -320,6 +321,7 @@ namespace {
void visitBranchInst(BranchInst &BI);
void visitReturnInst(ReturnInst &RI);
void visitSwitchInst(SwitchInst &SI);
+ void visitIndirectBrInst(IndirectBrInst &BI);
void visitSelectInst(SelectInst &SI);
void visitUserOp1(Instruction &I);
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
@@ -333,8 +335,6 @@ namespace {
int VT, unsigned ArgNo, std::string &Suffix);
void VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
unsigned RetNum, unsigned ParamNum, ...);
- void VerifyFunctionLocalMetadata(MDNode *N, Function *F,
- SmallPtrSet<MDNode *, 32> &Visited);
void VerifyParameterAttrs(Attributes Attrs, const Type *Ty,
bool isReturnValue, const Value *V);
void VerifyFunctionAttrs(const FunctionType *FT, const AttrListPtr &Attrs,
@@ -393,7 +393,7 @@ namespace {
} // End anonymous namespace
char Verifier::ID = 0;
-static RegisterPass<Verifier> X("verify", "Module Verifier");
+INITIALIZE_PASS(Verifier, "verify", "Module Verifier", false, false);
// Assert - We know that cond should be true, if not print an error message.
#define Assert(C, M) \
@@ -436,6 +436,10 @@ void Verifier::visitGlobalValue(GlobalValue &GV) {
Assert1(GVar && GVar->getType()->getElementType()->isArrayTy(),
"Only global arrays can have appending linkage!", GVar);
}
+
+ Assert1(!GV.hasLinkerPrivateWeakDefAutoLinkage() || GV.hasDefaultVisibility(),
+ "linker_private_weak_def_auto can only have default visibility!",
+ &GV);
}
void Verifier::visitGlobalVariable(GlobalVariable &GV) {
@@ -489,6 +493,54 @@ void Verifier::visitGlobalAlias(GlobalAlias &GA) {
visitGlobalValue(GA);
}
+void Verifier::visitNamedMDNode(NamedMDNode &NMD) {
+ for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i) {
+ MDNode *MD = NMD.getOperand(i);
+ if (!MD)
+ continue;
+
+ Assert1(!MD->isFunctionLocal(),
+ "Named metadata operand cannot be function local!", MD);
+ visitMDNode(*MD, 0);
+ }
+}
+
+void Verifier::visitMDNode(MDNode &MD, Function *F) {
+ // Only visit each node once. Metadata can be mutually recursive, so this
+ // avoids infinite recursion here, as well as being an optimization.
+ if (!MDNodes.insert(&MD))
+ return;
+
+ for (unsigned i = 0, e = MD.getNumOperands(); i != e; ++i) {
+ Value *Op = MD.getOperand(i);
+ if (!Op)
+ continue;
+ if (isa<Constant>(Op) || isa<MDString>(Op))
+ continue;
+ if (MDNode *N = dyn_cast<MDNode>(Op)) {
+ Assert2(MD.isFunctionLocal() || !N->isFunctionLocal(),
+ "Global metadata operand cannot be function local!", &MD, N);
+ visitMDNode(*N, F);
+ continue;
+ }
+ Assert2(MD.isFunctionLocal(), "Invalid operand for global metadata!", &MD, Op);
+
+ // If this was an instruction, bb, or argument, verify that it is in the
+ // function that we expect.
+ Function *ActualF = 0;
+ if (Instruction *I = dyn_cast<Instruction>(Op))
+ ActualF = I->getParent()->getParent();
+ else if (BasicBlock *BB = dyn_cast<BasicBlock>(Op))
+ ActualF = BB->getParent();
+ else if (Argument *A = dyn_cast<Argument>(Op))
+ ActualF = A->getParent();
+ assert(ActualF && "Unimplemented function local metadata case!");
+
+ Assert2(ActualF == F, "function-local metadata used in wrong function",
+ &MD, Op);
+ }
+}
+
void Verifier::verifyTypeSymbolTable(TypeSymbolTable &ST) {
for (TypeSymbolTable::iterator I = ST.begin(), E = ST.end(); I != E; ++I)
VerifyType(I->second);
@@ -632,6 +684,7 @@ void Verifier::visitFunction(Function &F) {
case CallingConv::Fast:
case CallingConv::Cold:
case CallingConv::X86_FastCall:
+ case CallingConv::X86_ThisCall:
Assert1(!F.isVarArg(),
"Varargs functions must have C calling conventions!", &F);
break;
@@ -676,17 +729,13 @@ void Verifier::visitFunction(Function &F) {
"blockaddress may not be used with the entry block!", Entry);
}
}
-
+
// If this function is actually an intrinsic, verify that it is only used in
// direct call/invokes, never having its "address taken".
if (F.getIntrinsicID()) {
- for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E;++UI){
- User *U = cast<User>(UI);
- if ((isa<CallInst>(U) || isa<InvokeInst>(U)) && UI.getOperandNo() == 0)
- continue; // Direct calls/invokes are ok.
-
+ const User *U;
+ if (F.hasAddressTaken(&U))
Assert1(0, "Invalid user of intrinsic instruction!", U);
- }
}
}
@@ -810,6 +859,16 @@ void Verifier::visitSwitchInst(SwitchInst &SI) {
visitTerminatorInst(SI);
}
+void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
+ Assert1(BI.getAddress()->getType()->isPointerTy(),
+ "Indirectbr operand must have pointer type!", &BI);
+ for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
+ Assert1(BI.getDestination(i)->getType()->isLabelTy(),
+ "Indirectbr destinations must all have pointer type!", &BI);
+
+ visitTerminatorInst(BI);
+}
+
void Verifier::visitSelectInst(SelectInst &SI) {
Assert1(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
SI.getOperand(2)),
@@ -1100,7 +1159,7 @@ void Verifier::VerifyCallSite(CallSite CS) {
Assert1(CS.arg_size() == FTy->getNumParams(),
"Incorrect number of arguments passed to called function!", I);
- // Verify that all arguments to the call match the function type...
+ // Verify that all arguments to the call match the function type.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
Assert3(CS.getArgument(i)->getType() == FTy->getParamType(i),
"Call parameter type does not match function signature!",
@@ -1127,8 +1186,8 @@ void Verifier::VerifyCallSite(CallSite CS) {
}
// Verify that there's no metadata unless it's a direct call to an intrinsic.
- if (!CS.getCalledFunction() || CS.getCalledFunction()->getName().size() < 5 ||
- CS.getCalledFunction()->getName().substr(0, 5) != "llvm.") {
+ if (!CS.getCalledFunction() ||
+ !CS.getCalledFunction()->getName().startswith("llvm.")) {
for (FunctionType::param_iterator PI = FTy->param_begin(),
PE = FTy->param_end(); PI != PE; ++PI)
Assert1(!PI->get()->isMetadataTy(),
@@ -1148,6 +1207,7 @@ void Verifier::visitCallInst(CallInst &CI) {
void Verifier::visitInvokeInst(InvokeInst &II) {
VerifyCallSite(&II);
+ visitTerminatorInst(II);
}
/// visitBinaryOperator - Check that both arguments to the binary operator are
@@ -1212,28 +1272,37 @@ void Verifier::visitBinaryOperator(BinaryOperator &B) {
visitInstruction(B);
}
-void Verifier::visitICmpInst(ICmpInst& IC) {
+void Verifier::visitICmpInst(ICmpInst &IC) {
// Check that the operands are the same type
- const Type* Op0Ty = IC.getOperand(0)->getType();
- const Type* Op1Ty = IC.getOperand(1)->getType();
+ const Type *Op0Ty = IC.getOperand(0)->getType();
+ const Type *Op1Ty = IC.getOperand(1)->getType();
Assert1(Op0Ty == Op1Ty,
"Both operands to ICmp instruction are not of the same type!", &IC);
// Check that the operands are the right type
Assert1(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPointerTy(),
"Invalid operand types for ICmp instruction", &IC);
+ // Check that the predicate is valid.
+ Assert1(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
+ IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE,
+ "Invalid predicate in ICmp instruction!", &IC);
visitInstruction(IC);
}
-void Verifier::visitFCmpInst(FCmpInst& FC) {
+void Verifier::visitFCmpInst(FCmpInst &FC) {
// Check that the operands are the same type
- const Type* Op0Ty = FC.getOperand(0)->getType();
- const Type* Op1Ty = FC.getOperand(1)->getType();
+ const Type *Op0Ty = FC.getOperand(0)->getType();
+ const Type *Op1Ty = FC.getOperand(1)->getType();
Assert1(Op0Ty == Op1Ty,
"Both operands to FCmp instruction are not of the same type!", &FC);
// Check that the operands are the right type
Assert1(Op0Ty->isFPOrFPVectorTy(),
"Invalid operand types for FCmp instruction", &FC);
+ // Check that the predicate is valid.
+ Assert1(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE &&
+ FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE,
+ "Invalid predicate in FCmp instruction!", &FC);
+
visitInstruction(FC);
}
@@ -1256,27 +1325,6 @@ void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
Assert1(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
SV.getOperand(2)),
"Invalid shufflevector operands!", &SV);
-
- const VectorType *VTy = dyn_cast<VectorType>(SV.getOperand(0)->getType());
- Assert1(VTy, "Operands are not a vector type", &SV);
-
- // Check to see if Mask is valid.
- if (const ConstantVector *MV = dyn_cast<ConstantVector>(SV.getOperand(2))) {
- for (unsigned i = 0, e = MV->getNumOperands(); i != e; ++i) {
- if (ConstantInt* CI = dyn_cast<ConstantInt>(MV->getOperand(i))) {
- Assert1(!CI->uge(VTy->getNumElements()*2),
- "Invalid shufflevector shuffle mask!", &SV);
- } else {
- Assert1(isa<UndefValue>(MV->getOperand(i)),
- "Invalid shufflevector shuffle mask!", &SV);
- }
- }
- } else {
- Assert1(isa<UndefValue>(SV.getOperand(2)) ||
- isa<ConstantAggregateZero>(SV.getOperand(2)),
- "Invalid shufflevector shuffle mask!", &SV);
- }
-
visitInstruction(SV);
}
@@ -1303,7 +1351,7 @@ void Verifier::visitLoadInst(LoadInst &LI) {
void Verifier::visitStoreInst(StoreInst &SI) {
const PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
- Assert1(PTy, "Load operand must be a pointer.", &SI);
+ Assert1(PTy, "Store operand must be a pointer.", &SI);
const Type *ElTy = PTy->getElementType();
Assert2(ElTy == SI.getOperand(0)->getType(),
"Stored value type does not match pointer operand type!",
@@ -1318,8 +1366,8 @@ void Verifier::visitAllocaInst(AllocaInst &AI) {
&AI);
Assert1(PTy->getElementType()->isSized(), "Cannot allocate unsized type",
&AI);
- Assert1(AI.getArraySize()->getType()->isIntegerTy(32),
- "Alloca array size must be i32", &AI);
+ Assert1(AI.getArraySize()->getType()->isIntegerTy(),
+ "Alloca array size must have integer type", &AI);
visitInstruction(AI);
}
@@ -1354,10 +1402,6 @@ void Verifier::visitInstruction(Instruction &I) {
"Only PHI nodes may reference their own value!", &I);
}
- // Verify that if this is a terminator that it is at the end of the block.
- if (isa<TerminatorInst>(I))
- Assert1(BB->getTerminator() == &I, "Terminator not at end of block!", &I);
-
// Check that void typed values don't have names
Assert1(!I.getType()->isVoidTy() || !I.hasName(),
"Instruction has a name, but provides a void value!", &I);
@@ -1400,7 +1444,7 @@ void Verifier::visitInstruction(Instruction &I) {
if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
// Check to make sure that the "address of" an intrinsic function is never
// taken.
- Assert1(!F->isIntrinsic() || (i == 0 && isa<CallInst>(I)),
+ Assert1(!F->isIntrinsic() || (i + 1 == e && isa<CallInst>(I)),
"Cannot take the address of an intrinsic!", &I);
Assert1(F->getParent() == Mod, "Referencing function in another module!",
&I);
@@ -1483,7 +1527,8 @@ void Verifier::visitInstruction(Instruction &I) {
"Instruction does not dominate all uses!", Op, &I);
}
} else if (isa<InlineAsm>(I.getOperand(i))) {
- Assert1(i == 0 && (isa<CallInst>(I) || isa<InvokeInst>(I)),
+ Assert1((i + 1 == e && isa<CallInst>(I)) ||
+ (i + 3 == e && isa<InvokeInst>(I)),
"Cannot take the address of an inline asm!", &I);
}
}
@@ -1515,7 +1560,8 @@ void Verifier::VerifyType(const Type *Ty) {
"Function type with invalid parameter type", ElTy, FTy);
VerifyType(ElTy);
}
- } break;
+ break;
+ }
case Type::StructTyID: {
const StructType *STy = cast<StructType>(Ty);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -1524,68 +1570,31 @@ void Verifier::VerifyType(const Type *Ty) {
"Structure type with invalid element type", ElTy, STy);
VerifyType(ElTy);
}
- } break;
- case Type::UnionTyID: {
- const UnionType *UTy = cast<UnionType>(Ty);
- for (unsigned i = 0, e = UTy->getNumElements(); i != e; ++i) {
- const Type *ElTy = UTy->getElementType(i);
- Assert2(UnionType::isValidElementType(ElTy),
- "Union type with invalid element type", ElTy, UTy);
- VerifyType(ElTy);
- }
- } break;
+ break;
+ }
case Type::ArrayTyID: {
const ArrayType *ATy = cast<ArrayType>(Ty);
Assert1(ArrayType::isValidElementType(ATy->getElementType()),
"Array type with invalid element type", ATy);
VerifyType(ATy->getElementType());
- } break;
+ break;
+ }
case Type::PointerTyID: {
const PointerType *PTy = cast<PointerType>(Ty);
Assert1(PointerType::isValidElementType(PTy->getElementType()),
"Pointer type with invalid element type", PTy);
VerifyType(PTy->getElementType());
- } break;
+ break;
+ }
case Type::VectorTyID: {
const VectorType *VTy = cast<VectorType>(Ty);
Assert1(VectorType::isValidElementType(VTy->getElementType()),
"Vector type with invalid element type", VTy);
VerifyType(VTy->getElementType());
- } break;
- default:
break;
}
-}
-
-/// VerifyFunctionLocalMetadata - Verify that the specified MDNode is local to
-/// specified Function.
-void Verifier::VerifyFunctionLocalMetadata(MDNode *N, Function *F,
- SmallPtrSet<MDNode *, 32> &Visited) {
- assert(N->isFunctionLocal() && "Should only be called on function-local MD");
-
- // Only visit each node once.
- if (!Visited.insert(N))
- return;
-
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- Value *V = N->getOperand(i);
- if (!V) continue;
-
- Function *ActualF = 0;
- if (Instruction *I = dyn_cast<Instruction>(V))
- ActualF = I->getParent()->getParent();
- else if (BasicBlock *BB = dyn_cast<BasicBlock>(V))
- ActualF = BB->getParent();
- else if (Argument *A = dyn_cast<Argument>(V))
- ActualF = A->getParent();
- else if (MDNode *MD = dyn_cast<MDNode>(V))
- if (MD->isFunctionLocal())
- VerifyFunctionLocalMetadata(MD, F, Visited);
-
- // If this was an instruction, bb, or argument, verify that it is in the
- // function that we expect.
- Assert1(ActualF == 0 || ActualF == F,
- "function-local metadata used in wrong function", N);
+ default:
+ break;
}
}
@@ -1607,31 +1616,24 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
// If the intrinsic takes MDNode arguments, verify that they are either global
// or are local to *this* function.
- for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
- if (MDNode *MD = dyn_cast<MDNode>(CI.getOperand(i))) {
- if (!MD->isFunctionLocal()) continue;
- SmallPtrSet<MDNode *, 32> Visited;
- VerifyFunctionLocalMetadata(MD, CI.getParent()->getParent(), Visited);
- }
+ for (unsigned i = 0, e = CI.getNumArgOperands(); i != e; ++i)
+ if (MDNode *MD = dyn_cast<MDNode>(CI.getArgOperand(i)))
+ visitMDNode(*MD, CI.getParent()->getParent());
switch (ID) {
default:
break;
case Intrinsic::dbg_declare: { // llvm.dbg.declare
- Assert1(CI.getOperand(1) && isa<MDNode>(CI.getOperand(1)),
+ Assert1(CI.getArgOperand(0) && isa<MDNode>(CI.getArgOperand(0)),
"invalid llvm.dbg.declare intrinsic call 1", &CI);
- MDNode *MD = cast<MDNode>(CI.getOperand(1));
+ MDNode *MD = cast<MDNode>(CI.getArgOperand(0));
Assert1(MD->getNumOperands() == 1,
"invalid llvm.dbg.declare intrinsic call 2", &CI);
- if (MD->getOperand(0))
- if (Constant *C = dyn_cast<Constant>(MD->getOperand(0)))
- Assert1(C && !isa<ConstantPointerNull>(C),
- "invalid llvm.dbg.declare intrinsic call 3", &CI);
} break;
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset:
- Assert1(isa<ConstantInt>(CI.getOperand(4)),
+ Assert1(isa<ConstantInt>(CI.getArgOperand(3)),
"alignment argument of memory intrinsics must be a constant int",
&CI);
break;
@@ -1640,10 +1642,10 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
case Intrinsic::gcread:
if (ID == Intrinsic::gcroot) {
AllocaInst *AI =
- dyn_cast<AllocaInst>(CI.getOperand(1)->stripPointerCasts());
+ dyn_cast<AllocaInst>(CI.getArgOperand(0)->stripPointerCasts());
Assert1(AI && AI->getType()->getElementType()->isPointerTy(),
"llvm.gcroot parameter #1 must be a pointer alloca.", &CI);
- Assert1(isa<Constant>(CI.getOperand(2)),
+ Assert1(isa<Constant>(CI.getArgOperand(1)),
"llvm.gcroot parameter #2 must be a constant.", &CI);
}
@@ -1651,32 +1653,32 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
"Enclosing function does not use GC.", &CI);
break;
case Intrinsic::init_trampoline:
- Assert1(isa<Function>(CI.getOperand(2)->stripPointerCasts()),
+ Assert1(isa<Function>(CI.getArgOperand(1)->stripPointerCasts()),
"llvm.init_trampoline parameter #2 must resolve to a function.",
&CI);
break;
case Intrinsic::prefetch:
- Assert1(isa<ConstantInt>(CI.getOperand(2)) &&
- isa<ConstantInt>(CI.getOperand(3)) &&
- cast<ConstantInt>(CI.getOperand(2))->getZExtValue() < 2 &&
- cast<ConstantInt>(CI.getOperand(3))->getZExtValue() < 4,
+ Assert1(isa<ConstantInt>(CI.getArgOperand(1)) &&
+ isa<ConstantInt>(CI.getArgOperand(2)) &&
+ cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue() < 2 &&
+ cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue() < 4,
"invalid arguments to llvm.prefetch",
&CI);
break;
case Intrinsic::stackprotector:
- Assert1(isa<AllocaInst>(CI.getOperand(2)->stripPointerCasts()),
+ Assert1(isa<AllocaInst>(CI.getArgOperand(1)->stripPointerCasts()),
"llvm.stackprotector parameter #2 must resolve to an alloca.",
&CI);
break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
- Assert1(isa<ConstantInt>(CI.getOperand(1)),
+ Assert1(isa<ConstantInt>(CI.getArgOperand(0)),
"size argument of memory use markers must be a constant integer",
&CI);
break;
case Intrinsic::invariant_end:
- Assert1(isa<ConstantInt>(CI.getOperand(2)),
+ Assert1(isa<ConstantInt>(CI.getArgOperand(1)),
"llvm.invariant.end parameter #2 must be a constant integer", &CI);
break;
}
@@ -1687,13 +1689,11 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
/// parameters beginning with NumRets.
///
static std::string IntrinsicParam(unsigned ArgNo, unsigned NumRets) {
- if (ArgNo < NumRets) {
- if (NumRets == 1)
- return "Intrinsic result type";
- else
- return "Intrinsic result type #" + utostr(ArgNo);
- } else
+ if (ArgNo >= NumRets)
return "Intrinsic parameter #" + utostr(ArgNo - NumRets);
+ if (NumRets == 1)
+ return "Intrinsic result type";
+ return "Intrinsic result type #" + utostr(ArgNo);
}
bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
@@ -1710,9 +1710,13 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
const Type *RetTy = FTy->getReturnType();
const StructType *ST = dyn_cast<StructType>(RetTy);
- unsigned NumRets = 1;
- if (ST)
- NumRets = ST->getNumElements();
+ unsigned NumRetVals;
+ if (RetTy->isVoidTy())
+ NumRetVals = 0;
+ else if (ST)
+ NumRetVals = ST->getNumElements();
+ else
+ NumRetVals = 1;
if (VT < 0) {
int Match = ~VT;
@@ -1724,7 +1728,7 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
TruncatedElementVectorType)) != 0) {
const IntegerType *IEltTy = dyn_cast<IntegerType>(EltTy);
if (!VTy || !IEltTy) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not "
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not "
"an integral vector type.", F);
return false;
}
@@ -1732,7 +1736,7 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
// the type being matched against.
if ((Match & ExtendedElementVectorType) != 0) {
if ((IEltTy->getBitWidth() & 1) != 0) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " vector "
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " vector "
"element bit-width is odd.", F);
return false;
}
@@ -1742,25 +1746,25 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
Match &= ~(ExtendedElementVectorType | TruncatedElementVectorType);
}
- if (Match <= static_cast<int>(NumRets - 1)) {
+ if (Match <= static_cast<int>(NumRetVals - 1)) {
if (ST)
RetTy = ST->getElementType(Match);
if (Ty != RetTy) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " does not "
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " does not "
"match return type.", F);
return false;
}
} else {
- if (Ty != FTy->getParamType(Match - NumRets)) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " does not "
- "match parameter %" + utostr(Match - NumRets) + ".", F);
+ if (Ty != FTy->getParamType(Match - NumRetVals)) {
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " does not "
+ "match parameter %" + utostr(Match - NumRetVals) + ".", F);
return false;
}
}
} else if (VT == MVT::iAny) {
if (!EltTy->isIntegerTy()) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not "
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not "
"an integer type.", F);
return false;
}
@@ -1785,7 +1789,7 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
}
} else if (VT == MVT::fAny) {
if (!EltTy->isFloatingPointTy()) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not "
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not "
"a floating-point type.", F);
return false;
}
@@ -1798,13 +1802,14 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
Suffix += EVT::getEVT(EltTy).getEVTString();
} else if (VT == MVT::vAny) {
if (!VTy) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not a vector type.", F);
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not a vector type.",
+ F);
return false;
}
Suffix += ".v" + utostr(NumElts) + EVT::getEVT(EltTy).getEVTString();
} else if (VT == MVT::iPTR) {
if (!Ty->isPointerTy()) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not a "
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not a "
"pointer and a pointer is required.", F);
return false;
}
@@ -1813,10 +1818,15 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
// and iPTR. In the verifier, we can not distinguish which case we have so
// allow either case to be legal.
if (const PointerType* PTyp = dyn_cast<PointerType>(Ty)) {
- Suffix += ".p" + utostr(PTyp->getAddressSpace()) +
- EVT::getEVT(PTyp->getElementType()).getEVTString();
+ EVT PointeeVT = EVT::getEVT(PTyp->getElementType(), true);
+ if (PointeeVT == MVT::Other) {
+ CheckFailed("Intrinsic has pointer to complex type.");
+ return false;
+ }
+ Suffix += ".p" + utostr(PTyp->getAddressSpace()) +
+ PointeeVT.getEVTString();
} else {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not a "
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is not a "
"pointer and a pointer is required.", F);
return false;
}
@@ -1836,10 +1846,10 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
}
} else if (EVT((MVT::SimpleValueType)VT).getTypeForEVT(Ty->getContext()) !=
EltTy) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is wrong!", F);
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is wrong!", F);
return false;
} else if (EltTy != Ty) {
- CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is a vector "
+ CheckFailed(IntrinsicParam(ArgNo, NumRetVals) + " is a vector "
"and a scalar is required.", F);
return false;
}
@@ -1851,10 +1861,10 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
/// Intrinsics.gen. This implements a little state machine that verifies the
/// prototype of intrinsics.
void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
- unsigned RetNum,
- unsigned ParamNum, ...) {
+ unsigned NumRetVals,
+ unsigned NumParams, ...) {
va_list VA;
- va_start(VA, ParamNum);
+ va_start(VA, NumParams);
const FunctionType *FTy = F->getFunctionType();
// For overloaded intrinsics, the Suffix of the function name must match the
@@ -1862,7 +1872,7 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
// suffix, to be checked at the end.
std::string Suffix;
- if (FTy->getNumParams() + FTy->isVarArg() != ParamNum) {
+ if (FTy->getNumParams() + FTy->isVarArg() != NumParams) {
CheckFailed("Intrinsic prototype has incorrect number of arguments!", F);
return;
}
@@ -1870,23 +1880,27 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
const Type *Ty = FTy->getReturnType();
const StructType *ST = dyn_cast<StructType>(Ty);
+ if (NumRetVals == 0 && !Ty->isVoidTy()) {
+ CheckFailed("Intrinsic should return void", F);
+ return;
+ }
+
// Verify the return types.
- if (ST && ST->getNumElements() != RetNum) {
+ if (ST && ST->getNumElements() != NumRetVals) {
CheckFailed("Intrinsic prototype has incorrect number of return types!", F);
return;
}
-
- for (unsigned ArgNo = 0; ArgNo < RetNum; ++ArgNo) {
+
+ for (unsigned ArgNo = 0; ArgNo != NumRetVals; ++ArgNo) {
int VT = va_arg(VA, int); // An MVT::SimpleValueType when non-negative.
if (ST) Ty = ST->getElementType(ArgNo);
-
if (!PerformTypeCheck(ID, F, Ty, VT, ArgNo, Suffix))
break;
}
// Verify the parameter types.
- for (unsigned ArgNo = 0; ArgNo < ParamNum; ++ArgNo) {
+ for (unsigned ArgNo = 0; ArgNo != NumParams; ++ArgNo) {
int VT = va_arg(VA, int); // An MVT::SimpleValueType when non-negative.
if (VT == MVT::isVoid && ArgNo > 0) {
@@ -1895,8 +1909,8 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
break;
}
- if (!PerformTypeCheck(ID, F, FTy->getParamType(ArgNo), VT, ArgNo + RetNum,
- Suffix))
+ if (!PerformTypeCheck(ID, F, FTy->getParamType(ArgNo), VT,
+ ArgNo + NumRetVals, Suffix))
break;
}
@@ -1934,7 +1948,9 @@ FunctionPass *llvm::createVerifierPass(VerifierFailureAction action) {
}
-// verifyFunction - Create
+/// verifyFunction - Check a function for errors, printing messages on stderr.
+/// Return true if the function is corrupt.
+///
bool llvm::verifyFunction(const Function &f, VerifierFailureAction action) {
Function &F = const_cast<Function&>(f);
assert(!F.isDeclaration() && "Cannot verify external functions");
diff --git a/libclamav/c++/llvm/test/CMakeLists.txt b/libclamav/c++/llvm/test/CMakeLists.txt
deleted file mode 100644
index ab060c9..0000000
--- a/libclamav/c++/llvm/test/CMakeLists.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-foreach(c ${LLVM_TARGETS_TO_BUILD})
- set(TARGETS_BUILT "${TARGETS_BUILT} ${c}")
-endforeach(c)
-set(TARGETS_TO_BUILD ${TARGETS_BUILT})
-
-# FIXME: This won't work for project files, we need to use a --param.
-set(LLVM_LIBS_DIR "${LLVM_BINARY_DIR}/lib/${CMAKE_CFG_INTDIR}")
-set(SHLIBEXT "${LTDL_SHLIB_EXT}")
-
-if(BUILD_SHARED_LIBS)
- set(LLVM_SHARED_LIBS_ENABLED "1")
-else()
- set(LLVM_SHARED_LIBS_ENABLED "0")
-endif(BUILD_SHARED_LIBS)
-
-if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
- set(SHLIBPATH_VAR "DYLD_LIBRARY_PATH")
-else() # Default for all other unix like systems.
- # CMake hardcodes the library locaction using rpath.
- # Therefore LD_LIBRARY_PATH is not required to run binaries in the
- # build dir. We pass it anyways.
- set(SHLIBPATH_VAR "LD_LIBRARY_PATH")
-endif()
-
-include(FindPythonInterp)
-if(PYTHONINTERP_FOUND)
- configure_file(
- ${CMAKE_CURRENT_SOURCE_DIR}/site.exp.in
- ${CMAKE_CURRENT_BINARY_DIR}/site.exp)
-
- MAKE_DIRECTORY(${CMAKE_CURRENT_BINARY_DIR}/Unit)
-
- add_custom_target(check
- COMMAND sed -e "s#\@LLVM_SOURCE_DIR\@#${LLVM_MAIN_SRC_DIR}#"
- -e "s#\@LLVM_BINARY_DIR\@#${LLVM_BINARY_DIR}#"
- -e "s#\@LLVM_TOOLS_DIR\@#${LLVM_TOOLS_BINARY_DIR}/${CMAKE_CFG_INTDIR}#"
- -e "s#\@LLVMGCCDIR\@##"
- ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in >
- ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
- COMMAND sed -e "s#\@LLVM_SOURCE_DIR\@#${LLVM_MAIN_SRC_DIR}#"
- -e "s#\@LLVM_BINARY_DIR\@#${LLVM_BINARY_DIR}#"
- -e "s#\@LLVM_TOOLS_DIR\@#${LLVM_TOOLS_BINARY_DIR}/${CMAKE_CFG_INTDIR}#"
- -e "s#\@LLVMGCCDIR\@##"
- -e "s#\@LLVM_BUILD_MODE\@#${CMAKE_CFG_INTDIR}#"
- -e "s#\@ENABLE_SHARED\@#${LLVM_SHARED_LIBS_ENABLED}#"
- -e "s#\@SHLIBPATH_VAR\@#${SHLIBPATH_VAR}#"
- ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in >
- ${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg
- COMMAND ${PYTHON_EXECUTABLE}
- ${LLVM_SOURCE_DIR}/utils/lit/lit.py
- --param llvm_site_config=${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
- --param llvm_unit_site_config=${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg
- -sv
- ${CMAKE_CURRENT_BINARY_DIR}
- DEPENDS
- COMMENT "Running LLVM regression tests")
-
-endif()
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll
deleted file mode 100644
index a0235f7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6
-
-%struct.layer_data = type { i32, [2048 x i8], i8*, [16 x i8], i32, i8*, i32, i32, [64 x i32], [64 x i32], [64 x i32], [64 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [12 x [64 x i16]] }
- at ld = external global %struct.layer_data* ; <%struct.layer_data**> [#uses=1]
-
-define void @main() {
-entry:
- br i1 false, label %bb169.i, label %cond_true11
-
-bb169.i: ; preds = %entry
- ret void
-
-cond_true11: ; preds = %entry
- %tmp.i32 = load %struct.layer_data** @ld ; <%struct.layer_data*> [#uses=2]
- %tmp3.i35 = getelementptr %struct.layer_data* %tmp.i32, i32 0, i32 1, i32 2048; <i8*> [#uses=2]
- %tmp.i36 = getelementptr %struct.layer_data* %tmp.i32, i32 0, i32 2 ; <i8**> [#uses=1]
- store i8* %tmp3.i35, i8** %tmp.i36
- store i8* %tmp3.i35, i8** null
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
deleted file mode 100644
index 81483cb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
+++ /dev/null
@@ -1,103 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2
-
- at quant_coef = external global [6 x [4 x [4 x i32]]] ; <[6 x [4 x [4 x i32]]]*> [#uses=1]
- at dequant_coef = external global [6 x [4 x [4 x i32]]] ; <[6 x [4 x [4 x i32]]]*> [#uses=1]
- at A = external global [4 x [4 x i32]] ; <[4 x [4 x i32]]*> [#uses=1]
-
-define fastcc i32 @dct_luma_sp(i32 %block_x, i32 %block_y, i32* %coeff_cost) {
-entry:
- %predicted_block = alloca [4 x [4 x i32]], align 4 ; <[4 x [4 x i32]]*> [#uses=1]
- br label %cond_next489
-
-cond_next489: ; preds = %cond_false, %bb471
- %j.7.in = load i8* null ; <i8> [#uses=1]
- %i.8.in = load i8* null ; <i8> [#uses=1]
- %i.8 = zext i8 %i.8.in to i32 ; <i32> [#uses=4]
- %j.7 = zext i8 %j.7.in to i32 ; <i32> [#uses=4]
- %tmp495 = getelementptr [4 x [4 x i32]]* %predicted_block, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=2]
- %tmp496 = load i32* %tmp495 ; <i32> [#uses=2]
- %tmp502 = load i32* null ; <i32> [#uses=1]
- %tmp542 = getelementptr [6 x [4 x [4 x i32]]]* @quant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
- %tmp543 = load i32* %tmp542 ; <i32> [#uses=1]
- %tmp548 = ashr i32 0, 0 ; <i32> [#uses=3]
- %tmp561 = sub i32 0, %tmp496 ; <i32> [#uses=3]
- %abscond563 = icmp sgt i32 %tmp561, -1 ; <i1> [#uses=1]
- %abs564 = select i1 %abscond563, i32 %tmp561, i32 0 ; <i32> [#uses=1]
- %tmp572 = mul i32 %abs564, %tmp543 ; <i32> [#uses=1]
- %tmp574 = add i32 %tmp572, 0 ; <i32> [#uses=1]
- %tmp576 = ashr i32 %tmp574, 0 ; <i32> [#uses=7]
- %tmp579 = icmp eq i32 %tmp548, %tmp576 ; <i1> [#uses=1]
- br i1 %tmp579, label %bb712, label %cond_next589
-
-cond_next589: ; preds = %cond_next489
- %tmp605 = getelementptr [6 x [4 x [4 x i32]]]* @dequant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
- %tmp606 = load i32* %tmp605 ; <i32> [#uses=1]
- %tmp612 = load i32* null ; <i32> [#uses=1]
- %tmp629 = load i32* null ; <i32> [#uses=1]
- %tmp629a = sitofp i32 %tmp629 to double ; <double> [#uses=1]
- %tmp631 = fmul double %tmp629a, 0.000000e+00 ; <double> [#uses=1]
- %tmp632 = fadd double 0.000000e+00, %tmp631 ; <double> [#uses=1]
- %tmp642 = call fastcc i32 @sign( i32 %tmp576, i32 %tmp561 ) ; <i32> [#uses=1]
- %tmp650 = mul i32 %tmp606, %tmp642 ; <i32> [#uses=1]
- %tmp656 = mul i32 %tmp650, %tmp612 ; <i32> [#uses=1]
- %tmp658 = shl i32 %tmp656, 0 ; <i32> [#uses=1]
- %tmp659 = ashr i32 %tmp658, 6 ; <i32> [#uses=1]
- %tmp660 = sub i32 0, %tmp659 ; <i32> [#uses=1]
- %tmp666 = sub i32 %tmp660, %tmp496 ; <i32> [#uses=1]
- %tmp667 = sitofp i32 %tmp666 to double ; <double> [#uses=2]
- call void @levrun_linfo_inter( i32 %tmp576, i32 0, i32* null, i32* null )
- %tmp671 = fmul double %tmp667, %tmp667 ; <double> [#uses=1]
- %tmp675 = fadd double %tmp671, 0.000000e+00 ; <double> [#uses=1]
- %tmp678 = fcmp oeq double %tmp632, %tmp675 ; <i1> [#uses=1]
- br i1 %tmp678, label %cond_true679, label %cond_false693
-
-cond_true679: ; preds = %cond_next589
- %abscond681 = icmp sgt i32 %tmp548, -1 ; <i1> [#uses=1]
- %abs682 = select i1 %abscond681, i32 %tmp548, i32 0 ; <i32> [#uses=1]
- %abscond684 = icmp sgt i32 %tmp576, -1 ; <i1> [#uses=1]
- %abs685 = select i1 %abscond684, i32 %tmp576, i32 0 ; <i32> [#uses=1]
- %tmp686 = icmp slt i32 %abs682, %abs685 ; <i1> [#uses=1]
- br i1 %tmp686, label %cond_next702, label %cond_false689
-
-cond_false689: ; preds = %cond_true679
- %tmp739 = icmp eq i32 %tmp576, 0 ; <i1> [#uses=1]
- br i1 %tmp579, label %bb737, label %cond_false708
-
-cond_false693: ; preds = %cond_next589
- ret i32 0
-
-cond_next702: ; preds = %cond_true679
- ret i32 0
-
-cond_false708: ; preds = %cond_false689
- ret i32 0
-
-bb712: ; preds = %cond_next489
- ret i32 0
-
-bb737: ; preds = %cond_false689
- br i1 %tmp739, label %cond_next791, label %cond_true740
-
-cond_true740: ; preds = %bb737
- %tmp761 = call fastcc i32 @sign( i32 %tmp576, i32 0 ) ; <i32> [#uses=1]
- %tmp780 = load i32* null ; <i32> [#uses=1]
- %tmp785 = getelementptr [4 x [4 x i32]]* @A, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
- %tmp786 = load i32* %tmp785 ; <i32> [#uses=1]
- %tmp781 = mul i32 %tmp780, %tmp761 ; <i32> [#uses=1]
- %tmp787 = mul i32 %tmp781, %tmp786 ; <i32> [#uses=1]
- %tmp789 = shl i32 %tmp787, 0 ; <i32> [#uses=1]
- %tmp790 = ashr i32 %tmp789, 6 ; <i32> [#uses=1]
- br label %cond_next791
-
-cond_next791: ; preds = %cond_true740, %bb737
- %ilev.1 = phi i32 [ %tmp790, %cond_true740 ], [ 0, %bb737 ] ; <i32> [#uses=1]
- %tmp796 = load i32* %tmp495 ; <i32> [#uses=1]
- %tmp798 = add i32 %tmp796, %ilev.1 ; <i32> [#uses=1]
- %tmp812 = mul i32 0, %tmp502 ; <i32> [#uses=0]
- %tmp818 = call fastcc i32 @sign( i32 0, i32 %tmp798 ) ; <i32> [#uses=0]
- unreachable
-}
-
-declare i32 @sign(i32, i32)
-
-declare void @levrun_linfo_inter(i32, i32, i32*, i32*)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-07-CombinerCrash.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-07-CombinerCrash.ll
deleted file mode 100644
index 83b26d3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-07-CombinerCrash.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6,+vfp2
-
-define fastcc i8* @read_sleb128(i8* %p, i32* %val) {
- br label %bb
-
-bb: ; preds = %bb, %0
- %p_addr.0 = getelementptr i8* %p, i32 0 ; <i8*> [#uses=1]
- %tmp2 = load i8* %p_addr.0 ; <i8> [#uses=2]
- %tmp4.rec = add i32 0, 1 ; <i32> [#uses=1]
- %tmp4 = getelementptr i8* %p, i32 %tmp4.rec ; <i8*> [#uses=1]
- %tmp56 = zext i8 %tmp2 to i32 ; <i32> [#uses=1]
- %tmp7 = and i32 %tmp56, 127 ; <i32> [#uses=1]
- %tmp9 = shl i32 %tmp7, 0 ; <i32> [#uses=1]
- %tmp11 = or i32 %tmp9, 0 ; <i32> [#uses=1]
- icmp slt i8 %tmp2, 0 ; <i1>:1 [#uses=1]
- br i1 %1, label %bb, label %cond_next28
-
-cond_next28: ; preds = %bb
- store i32 %tmp11, i32* %val
- ret i8* %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-13-InstrSched.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-13-InstrSched.ll
deleted file mode 100644
index 33f935e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-13-InstrSched.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic \
-; RUN: -mattr=+v6 | grep r9
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic \
-; RUN: -mattr=+v6 -arm-reserve-r9 -ifcvt-limit=0 -stats |& grep asm-printer
-; | grep 35
-
-define void @test(i32 %tmp56222, i32 %tmp36224, i32 %tmp46223, i32 %i.0196.0.ph, i32 %tmp8, i32* %tmp1011, i32** %tmp1, i32* %d2.1.out, i32* %d3.1.out, i32* %d0.1.out, i32* %d1.1.out) {
-newFuncRoot:
- br label %bb74
-
-bb78.exitStub: ; preds = %bb74
- store i32 %d2.1, i32* %d2.1.out
- store i32 %d3.1, i32* %d3.1.out
- store i32 %d0.1, i32* %d0.1.out
- store i32 %d1.1, i32* %d1.1.out
- ret void
-
-bb74: ; preds = %bb26, %newFuncRoot
- %fp.1.rec = phi i32 [ 0, %newFuncRoot ], [ %tmp71.rec, %bb26 ] ; <i32> [#uses=3]
- %fm.1.in = phi i32* [ %tmp71, %bb26 ], [ %tmp1011, %newFuncRoot ] ; <i32*> [#uses=1]
- %d0.1 = phi i32 [ %tmp44, %bb26 ], [ 8192, %newFuncRoot ] ; <i32> [#uses=2]
- %d1.1 = phi i32 [ %tmp54, %bb26 ], [ 8192, %newFuncRoot ] ; <i32> [#uses=2]
- %d2.1 = phi i32 [ %tmp64, %bb26 ], [ 8192, %newFuncRoot ] ; <i32> [#uses=2]
- %d3.1 = phi i32 [ %tmp69, %bb26 ], [ 8192, %newFuncRoot ] ; <i32> [#uses=2]
- %fm.1 = load i32* %fm.1.in ; <i32> [#uses=4]
- icmp eq i32 %fp.1.rec, %tmp8 ; <i1>:0 [#uses=1]
- br i1 %0, label %bb78.exitStub, label %bb26
-
-bb26: ; preds = %bb74
- %tmp28 = getelementptr i32** %tmp1, i32 %fp.1.rec ; <i32**> [#uses=1]
- %tmp30 = load i32** %tmp28 ; <i32*> [#uses=4]
- %tmp33 = getelementptr i32* %tmp30, i32 %i.0196.0.ph ; <i32*> [#uses=1]
- %tmp34 = load i32* %tmp33 ; <i32> [#uses=1]
- %tmp38 = getelementptr i32* %tmp30, i32 %tmp36224 ; <i32*> [#uses=1]
- %tmp39 = load i32* %tmp38 ; <i32> [#uses=1]
- %tmp42 = mul i32 %tmp34, %fm.1 ; <i32> [#uses=1]
- %tmp44 = add i32 %tmp42, %d0.1 ; <i32> [#uses=1]
- %tmp48 = getelementptr i32* %tmp30, i32 %tmp46223 ; <i32*> [#uses=1]
- %tmp49 = load i32* %tmp48 ; <i32> [#uses=1]
- %tmp52 = mul i32 %tmp39, %fm.1 ; <i32> [#uses=1]
- %tmp54 = add i32 %tmp52, %d1.1 ; <i32> [#uses=1]
- %tmp58 = getelementptr i32* %tmp30, i32 %tmp56222 ; <i32*> [#uses=1]
- %tmp59 = load i32* %tmp58 ; <i32> [#uses=1]
- %tmp62 = mul i32 %tmp49, %fm.1 ; <i32> [#uses=1]
- %tmp64 = add i32 %tmp62, %d2.1 ; <i32> [#uses=1]
- %tmp67 = mul i32 %tmp59, %fm.1 ; <i32> [#uses=1]
- %tmp69 = add i32 %tmp67, %d3.1 ; <i32> [#uses=1]
- %tmp71.rec = add i32 %fp.1.rec, 1 ; <i32> [#uses=2]
- %tmp71 = getelementptr i32* %tmp1011, i32 %tmp71.rec ; <i32*> [#uses=1]
- br label %bb74
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-21-JoinIntervalsCrash.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-21-JoinIntervalsCrash.ll
deleted file mode 100644
index b0953dc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-21-JoinIntervalsCrash.ll
+++ /dev/null
@@ -1,96 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi
-; PR1257
-
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32 }
- %struct.arm_stack_offsets = type { i32, i32, i32, i32, i32 }
- %struct.c_arg_info = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i8 }
- %struct.c_language_function = type { %struct.stmt_tree_s }
- %struct.c_switch = type opaque
- %struct.eh_status = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.location_t, i32, i8*, %struct.rtx_def** }
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i8, i8, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.location_t, %struct.varray_head_tag*, %struct.tree_node*, i8, i8, i8 }
- %struct.ht_identifier = type { i8*, i32, i32 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type { i8 }
- %struct.language_function = type { %struct.c_language_function, %struct.tree_node*, %struct.tree_node*, %struct.c_switch*, %struct.c_arg_info*, i32, i32, i32, i32 }
- %struct.location_t = type { i8*, i32 }
- %struct.machine_function = type { %struct.rtx_def*, i32, i32, i32, %struct.arm_stack_offsets, i32, i32, i32, [14 x %struct.rtx_def*] }
- %struct.rtvec_def = type { i32, [1 x %struct.rtx_def*] }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.stmt_tree_s = type { %struct.tree_node*, i32 }
- %struct.temp_slot = type opaque
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_identifier = type { %struct.tree_common, %struct.ht_identifier }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.u = type { [1 x i64] }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.varray_head_tag = type opaque
- %union.tree_ann_d = type opaque
-
-
-define void @declspecs_add_type(i32 %spec.1) {
-entry:
- %spec.1961 = zext i32 %spec.1 to i64 ; <i64> [#uses=1]
- %spec.1961.adj = shl i64 %spec.1961, 32 ; <i64> [#uses=1]
- %spec.1961.adj.ins = or i64 %spec.1961.adj, 0 ; <i64> [#uses=2]
- %tmp10959 = lshr i64 %spec.1961.adj.ins, 32 ; <i64> [#uses=2]
- %tmp1920 = inttoptr i64 %tmp10959 to %struct.tree_common* ; <%struct.tree_common*> [#uses=1]
- %tmp21 = getelementptr %struct.tree_common* %tmp1920, i32 0, i32 3 ; <i8*> [#uses=1]
- %tmp2122 = bitcast i8* %tmp21 to i32* ; <i32*> [#uses=1]
- br i1 false, label %cond_next53, label %cond_true
-
-cond_true: ; preds = %entry
- ret void
-
-cond_next53: ; preds = %entry
- br i1 false, label %cond_true63, label %cond_next689
-
-cond_true63: ; preds = %cond_next53
- ret void
-
-cond_next689: ; preds = %cond_next53
- br i1 false, label %cond_false841, label %bb743
-
-bb743: ; preds = %cond_next689
- ret void
-
-cond_false841: ; preds = %cond_next689
- br i1 false, label %cond_true851, label %cond_true918
-
-cond_true851: ; preds = %cond_false841
- tail call void @lookup_name( )
- br i1 false, label %bb866, label %cond_next856
-
-cond_next856: ; preds = %cond_true851
- ret void
-
-bb866: ; preds = %cond_true851
- %tmp874 = load i32* %tmp2122 ; <i32> [#uses=1]
- %tmp876877 = trunc i32 %tmp874 to i8 ; <i8> [#uses=1]
- icmp eq i8 %tmp876877, 1 ; <i1>:0 [#uses=1]
- br i1 %0, label %cond_next881, label %cond_true878
-
-cond_true878: ; preds = %bb866
- unreachable
-
-cond_next881: ; preds = %bb866
- %tmp884885 = inttoptr i64 %tmp10959 to %struct.tree_identifier* ; <%struct.tree_identifier*> [#uses=1]
- %tmp887 = getelementptr %struct.tree_identifier* %tmp884885, i32 0, i32 1, i32 0 ; <i8**> [#uses=1]
- %tmp888 = load i8** %tmp887 ; <i8*> [#uses=1]
- tail call void (i32, ...)* @error( i32 undef, i8* %tmp888 )
- ret void
-
-cond_true918: ; preds = %cond_false841
- %tmp920957 = trunc i64 %spec.1961.adj.ins to i32 ; <i32> [#uses=0]
- ret void
-}
-
-declare void @error(i32, ...)
-
-declare void @lookup_name()
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-26-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-26-RegScavengerAssert.ll
deleted file mode 100644
index d741112..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-26-RegScavengerAssert.ll
+++ /dev/null
@@ -1,947 +0,0 @@
-; RUN: llc < %s -march=arm
-; PR1266
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "arm-linux-gnueabi"
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32 }
- %struct.FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct.FILE*, i32, i32, i32, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i32, [52 x i8] }
- %struct.VEC_edge = type { i32, i32, [1 x %struct.edge_def*] }
- %struct.VEC_tree = type { i32, i32, [1 x %struct.tree_node*] }
- %struct._IO_marker = type { %struct._IO_marker*, %struct.FILE*, i32 }
- %struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
- %struct.addr_diff_vec_flags = type { i8, i8, i8, i8 }
- %struct.arm_stack_offsets = type { i32, i32, i32, i32, i32 }
- %struct.attribute_spec = type { i8*, i32, i32, i8, i8, i8, %struct.tree_node* (%struct.tree_node**, %struct.tree_node*, %struct.tree_node*, i32, i8*)* }
- %struct.basic_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.tree_node*, %struct.VEC_edge*, %struct.VEC_edge*, %struct.bitmap_head_def*, %struct.bitmap_head_def*, i8*, %struct.loop*, [2 x %struct.et_node*], %struct.basic_block_def*, %struct.basic_block_def*, %struct.reorder_block_def*, %struct.bb_ann_d*, i64, i32, i32, i32, i32 }
- %struct.bb_ann_d = type { %struct.tree_node*, i8, %struct.edge_prediction* }
- %struct.bitmap_element_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, [4 x i32] }
- %struct.bitmap_head_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, %struct.bitmap_obstack* }
- %struct.bitmap_obstack = type { %struct.bitmap_element_def*, %struct.bitmap_head_def*, %struct.obstack }
- %struct.cgraph_edge = type { %struct.cgraph_node*, %struct.cgraph_node*, %struct.cgraph_edge*, %struct.cgraph_edge*, %struct.cgraph_edge*, %struct.cgraph_edge*, %struct.tree_node*, i8*, i8* }
- %struct.cgraph_global_info = type { %struct.cgraph_node*, i32, i8 }
- %struct.cgraph_local_info = type { i32, i8, i8, i8, i8, i8, i8, i8 }
- %struct.cgraph_node = type { %struct.tree_node*, %struct.cgraph_edge*, %struct.cgraph_edge*, %struct.cgraph_node*, %struct.cgraph_node*, %struct.cgraph_node*, %struct.cgraph_node*, %struct.cgraph_node*, %struct.cgraph_node*, %struct.cgraph_node*, i8*, %struct.cgraph_local_info, %struct.cgraph_global_info, %struct.cgraph_rtl_info, i32, i8, i8, i8, i8, i8 }
- %struct.cgraph_rtl_info = type { i32, i8, i8 }
- %struct.cl_perfunc_opts = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.cselib_val_struct = type opaque
- %struct.dataflow_d = type { %struct.varray_head_tag*, [2 x %struct.tree_node*] }
- %struct.def_operand_ptr = type { %struct.tree_node** }
- %struct.def_optype_d = type { i32, [1 x %struct.def_operand_ptr] }
- %struct.diagnostic_context = type { %struct.pretty_printer*, [8 x i32], i8, i8, i8, void (%struct.diagnostic_context*, %struct.diagnostic_info*)*, void (%struct.diagnostic_context*, %struct.diagnostic_info*)*, void (i8*, i8**)*, %struct.tree_node*, i32, i32 }
- %struct.diagnostic_info = type { %struct.text_info, %struct.location_t, i32 }
- %struct.die_struct = type opaque
- %struct.edge_def = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.edge_def_insns, i8*, %struct.location_t*, i32, i32, i64, i32 }
- %struct.edge_def_insns = type { %struct.rtx_def* }
- %struct.edge_prediction = type { %struct.edge_prediction*, %struct.edge_def*, i32, i32 }
- %struct.eh_status = type opaque
- %struct.elt_list = type opaque
- %struct.elt_t = type { %struct.tree_node*, %struct.tree_node* }
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.location_t, i32, i8*, %struct.rtx_def** }
- %struct.et_node = type opaque
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i8, i8, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.location_t, %struct.varray_head_tag*, %struct.tree_node*, i8, i8, i8 }
- %struct.ggc_root_tab = type { i8*, i32, i32, void (i8*)*, void (i8*)* }
- %struct.gimplify_ctx = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.varray_head_tag*, %struct.htab*, i32, i8, i8 }
- %struct.gimplify_init_ctor_preeval_data = type { %struct.tree_node*, i32 }
- %struct.ht_identifier = type { i8*, i32, i32 }
- %struct.htab = type { i32 (i8*)*, i32 (i8*, i8*)*, void (i8*)*, i8**, i32, i32, i32, i32, i32, i8* (i32, i32)*, void (i8*)*, i8*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i32 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type opaque
- %struct.lang_hooks = type { i8*, i32, i32 (i32)*, i32 (i32, i8**)*, void (%struct.diagnostic_context*)*, i32 (i32, i8*, i32)*, i8 (i8*, i32) zeroext *, i8 (i8**) zeroext *, i8 () zeroext *, void ()*, void ()*, void (i32)*, void ()*, i64 (%struct.tree_node*)*, %struct.tree_node* (%struct.tree_node*)*, %struct.rtx_def* (%struct.tree_node*, %struct.rtx_def*, i32, i32, %struct.rtx_def**)*, i32 (%struct.tree_node*)*, %struct.tree_node* (%struct.tree_node*)*, i32 (%struct.rtx_def*, %struct.tree_node*)*, void (%struct.tree_node*)*, i8 (%struct.tree_node*) zeroext *, %struct.tree_node* (%struct.tree_node*)*, void (%struct.tree_node*)*, void (%struct.tree_node*)*, i8 () zeroext *, i8, i8, void ()*, void (%struct.FILE*, %struct.tree_node*, i32)*, void (%struct.FILE*, %struct.tree_node*, i32)*, void (%struct.FILE*, %struct.tree_node*, i32)*, void (%struct.FILE*, %struct.tree_node*, i32)*, i8* (%struct.tree_node*, i32)*, i32 (%struct.tree_node*, %struct.tree_node*)*, %struct.tree_node* (%struct.tree_node*)*, void (%struct.diagnostic_context*, i8*)*, %struct.tree_node* (%struct.tree_node*)*, i64 (i64)*, %struct.attribute_spec*, %struct.attribute_spec*, %struct.attribute_spec*, i32 (%struct.tree_node*)*, %struct.lang_hooks_for_functions, %struct.lang_hooks_for_tree_inlining, %struct.lang_hooks_for_callgraph, %struct.lang_hooks_for_tree_dump, %struct.lang_hooks_for_decls, %struct.lang_hooks_for_types, i32 (%struct.tree_node**, %struct.tree_node**, %struct.tree_node**)*, %struct.tree_node* (%struct.tree_node*, %struct.tree_node*)*, %struct.tree_node* (i8*, %struct.tree_node*, i32, i32, i8*, %struct.tree_node*)* }
- %struct.lang_hooks_for_callgraph = type { %struct.tree_node* (%struct.tree_node**, i32*, %struct.tree_node*)*, void (%struct.tree_node*)* }
- %struct.lang_hooks_for_decls = type { i32 ()*, void (%struct.tree_node*)*, %struct.tree_node* (%struct.tree_node*)*, %struct.tree_node* ()*, i8 (%struct.tree_node*) zeroext *, void ()*, void (%struct.tree_node*)*, i8 (%struct.tree_node*) zeroext *, i8* (%struct.tree_node*)* }
- %struct.lang_hooks_for_functions = type { void (%struct.function*)*, void (%struct.function*)*, void (%struct.function*)*, void (%struct.function*)*, i8 (%struct.tree_node*) zeroext * }
- %struct.lang_hooks_for_tree_dump = type { i8 (i8*, %struct.tree_node*) zeroext *, i32 (%struct.tree_node*)* }
- %struct.lang_hooks_for_tree_inlining = type { %struct.tree_node* (%struct.tree_node**, i32*, %struct.tree_node* (%struct.tree_node**, i32*, i8*)*, i8*, %struct.pointer_set_t*)*, i32 (%struct.tree_node**)*, i32 (%struct.tree_node*)*, %struct.tree_node* (i8*, %struct.tree_node*)*, i32 (%struct.tree_node*, %struct.tree_node*)*, i32 (%struct.tree_node*)*, i8 (%struct.tree_node*, %struct.tree_node*) zeroext *, i32 (%struct.tree_node*)*, void (%struct.tree_node*)*, %struct.tree_node* (%struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i32)* }
- %struct.lang_hooks_for_types = type { %struct.tree_node* (i32)*, %struct.tree_node* (i32, i32)*, %struct.tree_node* (i32, i32)*, %struct.tree_node* (%struct.tree_node*)*, %struct.tree_node* (%struct.tree_node*)*, %struct.tree_node* (i32, %struct.tree_node*)*, %struct.tree_node* (%struct.tree_node*)*, void (%struct.tree_node*, i8*)*, void (%struct.tree_node*, %struct.tree_node*)*, %struct.tree_node* (%struct.tree_node*)*, i8 }
- %struct.lang_type = type opaque
- %struct.language_function = type opaque
- %struct.location_t = type { i8*, i32 }
- %struct.loop = type opaque
- %struct.machine_function = type { %struct.rtx_def*, i32, i32, i32, %struct.arm_stack_offsets, i32, i32, i32, [14 x %struct.rtx_def*] }
- %struct.mem_attrs = type { i64, %struct.tree_node*, %struct.rtx_def*, %struct.rtx_def*, i32 }
- %struct.obstack = type { i32, %struct._obstack_chunk*, i8*, i8*, i8*, i32, i32, %struct._obstack_chunk* (i8*, i32)*, void (i8*, %struct._obstack_chunk*)*, i8*, i8 }
- %struct.output_buffer = type { %struct.obstack, %struct.FILE*, i32, [128 x i8] }
- %struct.phi_arg_d = type { %struct.tree_node*, i8 }
- %struct.pointer_set_t = type opaque
- %struct.pretty_printer = type { %struct.output_buffer*, i8*, i32, i32, i32, i32, i32, i8 (%struct.pretty_printer*, %struct.text_info*) zeroext *, i8, i8 }
- %struct.ptr_info_def = type { i8, %struct.bitmap_head_def*, %struct.tree_node* }
- %struct.real_value = type { i8, [3 x i8], [5 x i32] }
- %struct.reg_attrs = type { %struct.tree_node*, i64 }
- %struct.reg_info_def = type opaque
- %struct.reorder_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.basic_block_def*, %struct.basic_block_def*, %struct.basic_block_def*, i32, i32, i32 }
- %struct.rtunion = type { i32 }
- %struct.rtvec_def = type { i32, [1 x %struct.rtx_def*] }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.stmt_ann_d = type { %struct.tree_ann_common_d, i8, %struct.basic_block_def*, %struct.stmt_operands_d, %struct.dataflow_d*, %struct.bitmap_head_def*, i32 }
- %struct.stmt_operands_d = type { %struct.def_optype_d*, %struct.def_optype_d*, %struct.v_may_def_optype_d*, %struct.vuse_optype_d*, %struct.v_may_def_optype_d* }
- %struct.temp_slot = type opaque
- %struct.text_info = type { i8*, i8**, i32 }
- %struct.tree_ann_common_d = type { i32, i8*, %struct.tree_node* }
- %struct.tree_ann_d = type { %struct.stmt_ann_d }
- %struct.tree_binfo = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.VEC_tree*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.VEC_tree }
- %struct.tree_block = type { %struct.tree_common, i8, [3 x i8], %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_complex = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u1_a = type { i32 }
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_exp = type { %struct.tree_common, %struct.location_t*, i32, %struct.tree_node*, [1 x %struct.tree_node*] }
- %struct.tree_identifier = type { %struct.tree_common, %struct.ht_identifier }
- %struct.tree_int_cst = type { %struct.tree_common, %struct.tree_int_cst_lowhi }
- %struct.tree_int_cst_lowhi = type { i64, i64 }
- %struct.tree_list = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.tree_phi_node = type { %struct.tree_common, %struct.tree_node*, i32, i32, i32, %struct.basic_block_def*, %struct.dataflow_d*, [1 x %struct.phi_arg_d] }
- %struct.tree_real_cst = type { %struct.tree_common, %struct.real_value* }
- %struct.tree_ssa_name = type { %struct.tree_common, %struct.tree_node*, i32, %struct.ptr_info_def*, %struct.tree_node*, i8* }
- %struct.tree_statement_list = type { %struct.tree_common, %struct.tree_statement_list_node*, %struct.tree_statement_list_node* }
- %struct.tree_statement_list_node = type { %struct.tree_statement_list_node*, %struct.tree_statement_list_node*, %struct.tree_node* }
- %struct.tree_stmt_iterator = type { %struct.tree_statement_list_node*, %struct.tree_node* }
- %struct.tree_string = type { %struct.tree_common, i32, [1 x i8] }
- %struct.tree_type = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i32, i16, i8, i8, i32, %struct.tree_node*, %struct.tree_node*, %struct.rtunion, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_type* }
- %struct.tree_type_symtab = type { i32 }
- %struct.tree_value_handle = type { %struct.tree_common, %struct.value_set*, i32 }
- %struct.tree_vec = type { %struct.tree_common, i32, [1 x %struct.tree_node*] }
- %struct.tree_vector = type { %struct.tree_common, %struct.tree_node* }
- %struct.u = type { [1 x i64] }
- %struct.use_operand_ptr = type { %struct.tree_node** }
- %struct.use_optype_d = type { i32, [1 x %struct.def_operand_ptr] }
- %struct.v_def_use_operand_type_t = type { %struct.tree_node*, %struct.tree_node* }
- %struct.v_may_def_optype_d = type { i32, [1 x %struct.elt_t] }
- %struct.v_must_def_optype_d = type { i32, [1 x %struct.elt_t] }
- %struct.value_set = type opaque
- %struct.var_ann_d = type { %struct.tree_ann_common_d, i8, i8, %struct.tree_node*, %struct.varray_head_tag*, i32, i32, i32, %struct.tree_node*, %struct.tree_node* }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.varray_data = type { [1 x i64] }
- %struct.varray_head_tag = type { i32, i32, i32, i8*, %struct.u }
- %struct.vuse_optype_d = type { i32, [1 x %struct.tree_node*] }
- at gt_pch_rs_gt_gimplify_h = external global [2 x %struct.ggc_root_tab] ; <[2 x %struct.ggc_root_tab]*> [#uses=0]
- at tmp_var_id_num = external global i32 ; <i32*> [#uses=0]
- at gt_ggc_r_gt_gimplify_h = external global [1 x %struct.ggc_root_tab] ; <[1 x %struct.ggc_root_tab]*> [#uses=0]
- at __FUNCTION__.19956 = external global [15 x i8] ; <[15 x i8]*> [#uses=0]
- at str = external global [42 x i8] ; <[42 x i8]*> [#uses=1]
- at __FUNCTION__.19974 = external global [22 x i8] ; <[22 x i8]*> [#uses=0]
- at gimplify_ctxp = external global %struct.gimplify_ctx* ; <%struct.gimplify_ctx**> [#uses=0]
- at cl_pf_opts = external global %struct.cl_perfunc_opts ; <%struct.cl_perfunc_opts*> [#uses=0]
- at __FUNCTION__.20030 = external global [22 x i8] ; <[22 x i8]*> [#uses=0]
- at __FUNCTION__.20099 = external global [24 x i8] ; <[24 x i8]*> [#uses=0]
- at global_trees = external global [47 x %struct.tree_node*] ; <[47 x %struct.tree_node*]*> [#uses=0]
- at tree_code_type = external global [0 x i32] ; <[0 x i32]*> [#uses=2]
- at current_function_decl = external global %struct.tree_node* ; <%struct.tree_node**> [#uses=0]
- at str1 = external global [2 x i8] ; <[2 x i8]*> [#uses=0]
- at str2 = external global [7 x i8] ; <[7 x i8]*> [#uses=0]
- at __FUNCTION__.20151 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at __FUNCTION__.20221 = external global [9 x i8] ; <[9 x i8]*> [#uses=0]
- at tree_code_length = external global [0 x i8] ; <[0 x i8]*> [#uses=0]
- at __FUNCTION__.20435 = external global [17 x i8] ; <[17 x i8]*> [#uses=0]
- at __FUNCTION__.20496 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at cfun = external global %struct.function* ; <%struct.function**> [#uses=0]
- at __FUNCTION__.20194 = external global [15 x i8] ; <[15 x i8]*> [#uses=0]
- at __FUNCTION__.19987 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.20532 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.20583 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at __FUNCTION__.20606 = external global [22 x i8] ; <[22 x i8]*> [#uses=0]
- at __FUNCTION__.20644 = external global [17 x i8] ; <[17 x i8]*> [#uses=0]
- at __FUNCTION__.20681 = external global [13 x i8] ; <[13 x i8]*> [#uses=0]
- at __FUNCTION__.20700 = external global [13 x i8] ; <[13 x i8]*> [#uses=0]
- at __FUNCTION__.21426 = external global [20 x i8] ; <[20 x i8]*> [#uses=0]
- at __FUNCTION__.21471 = external global [17 x i8] ; <[17 x i8]*> [#uses=0]
- at __FUNCTION__.21962 = external global [27 x i8] ; <[27 x i8]*> [#uses=0]
- at __FUNCTION__.22992 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.23735 = external global [15 x i8] ; <[15 x i8]*> [#uses=0]
- at lang_hooks = external global %struct.lang_hooks ; <%struct.lang_hooks*> [#uses=0]
- at __FUNCTION__.27383 = external global [22 x i8] ; <[22 x i8]*> [#uses=0]
- at __FUNCTION__.20776 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.10672 = external global [9 x i8] ; <[9 x i8]*> [#uses=0]
- at str3 = external global [47 x i8] ; <[47 x i8]*> [#uses=0]
- at str4 = external global [7 x i8] ; <[7 x i8]*> [#uses=0]
- at __FUNCTION__.20065 = external global [25 x i8] ; <[25 x i8]*> [#uses=0]
- at __FUNCTION__.23256 = external global [16 x i8] ; <[16 x i8]*> [#uses=0]
- at __FUNCTION__.23393 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at __FUNCTION__.20043 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.20729 = external global [23 x i8] ; <[23 x i8]*> [#uses=0]
- at __FUNCTION__.20563 = external global [24 x i8] ; <[24 x i8]*> [#uses=0]
- at __FUNCTION__.10663 = external global [10 x i8] ; <[10 x i8]*> [#uses=0]
- at __FUNCTION__.20367 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.20342 = external global [15 x i8] ; <[15 x i8]*> [#uses=0]
- at input_location = external global %struct.location_t ; <%struct.location_t*> [#uses=0]
- at __FUNCTION__.24510 = external global [27 x i8] ; <[27 x i8]*> [#uses=0]
- at __FUNCTION__.25097 = external global [25 x i8] ; <[25 x i8]*> [#uses=0]
- at __FUNCTION__.24705 = external global [26 x i8] ; <[26 x i8]*> [#uses=0]
- at str5 = external global [2 x i8] ; <[2 x i8]*> [#uses=0]
- at __FUNCTION__.25136 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.24450 = external global [31 x i8] ; <[31 x i8]*> [#uses=0]
- at implicit_built_in_decls = external global [471 x %struct.tree_node*] ; <[471 x %struct.tree_node*]*> [#uses=0]
- at __FUNCTION__.24398 = external global [31 x i8] ; <[31 x i8]*> [#uses=0]
- at __FUNCTION__.26156 = external global [14 x i8] ; <[14 x i8]*> [#uses=1]
- at unknown_location = external global %struct.location_t ; <%struct.location_t*> [#uses=0]
- at __FUNCTION__.23038 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at str6 = external global [43 x i8] ; <[43 x i8]*> [#uses=0]
- at __FUNCTION__.25476 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at __FUNCTION__.22136 = external global [20 x i8] ; <[20 x i8]*> [#uses=1]
- at __FUNCTION__.21997 = external global [23 x i8] ; <[23 x i8]*> [#uses=0]
- at __FUNCTION__.21247 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at built_in_decls = external global [471 x %struct.tree_node*] ; <[471 x %struct.tree_node*]*> [#uses=0]
- at __FUNCTION__.21924 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at __FUNCTION__.21861 = external global [25 x i8] ; <[25 x i8]*> [#uses=0]
- at global_dc = external global %struct.diagnostic_context* ; <%struct.diagnostic_context**> [#uses=0]
- at __FUNCTION__.25246 = external global [32 x i8] ; <[32 x i8]*> [#uses=0]
- at str7 = external global [4 x i8] ; <[4 x i8]*> [#uses=0]
- at stderr = external global %struct.FILE* ; <%struct.FILE**> [#uses=0]
- at str8 = external global [24 x i8] ; <[24 x i8]*> [#uses=0]
- at str9 = external global [22 x i8] ; <[22 x i8]*> [#uses=0]
- at __FUNCTION__.27653 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.27322 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.27139 = external global [20 x i8] ; <[20 x i8]*> [#uses=0]
- at __FUNCTION__.22462 = external global [23 x i8] ; <[23 x i8]*> [#uses=0]
- at str10 = external global [6 x i8] ; <[6 x i8]*> [#uses=0]
- at __FUNCTION__.25389 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at __FUNCTION__.25650 = external global [18 x i8] ; <[18 x i8]*> [#uses=0]
- at str11 = external global [32 x i8] ; <[32 x i8]*> [#uses=0]
- at str12 = external global [3 x i8] ; <[3 x i8]*> [#uses=0]
- at str13 = external global [44 x i8] ; <[44 x i8]*> [#uses=0]
- at __FUNCTION__.27444 = external global [14 x i8] ; <[14 x i8]*> [#uses=0]
- at timevar_enable = external global i8 ; <i8*> [#uses=0]
- at __FUNCTION__.27533 = external global [23 x i8] ; <[23 x i8]*> [#uses=0]
- at flag_instrument_function_entry_exit = external global i32 ; <i32*> [#uses=0]
- at __FUNCTION__.25331 = external global [23 x i8] ; <[23 x i8]*> [#uses=0]
- at __FUNCTION__.20965 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at str14 = external global [12 x i8] ; <[12 x i8]*> [#uses=0]
- at __FUNCTION__.26053 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.26004 = external global [20 x i8] ; <[20 x i8]*> [#uses=0]
- at str15 = external global [8 x i8] ; <[8 x i8]*> [#uses=0]
- at __FUNCTION__.21584 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
- at str16 = external global [12 x i8] ; <[12 x i8]*> [#uses=0]
- at __FUNCTION__.25903 = external global [28 x i8] ; <[28 x i8]*> [#uses=0]
- at __FUNCTION__.22930 = external global [23 x i8] ; <[23 x i8]*> [#uses=0]
- at __FUNCTION__.23832 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at str17 = external global [6 x i8] ; <[6 x i8]*> [#uses=0]
- at __FUNCTION__.24620 = external global [24 x i8] ; <[24 x i8]*> [#uses=0]
- at __FUNCTION__.24582 = external global [30 x i8] ; <[30 x i8]*> [#uses=0]
- at __FUNCTION__.21382 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
- at __FUNCTION__.21117 = external global [21 x i8] ; <[21 x i8]*> [#uses=0]
-
-
-declare void @push_gimplify_context()
-
-declare i32 @gimple_tree_hash(i8*)
-
-declare i32 @iterative_hash_expr(%struct.tree_node*, i32)
-
-declare i32 @gimple_tree_eq(i8*, i8*)
-
-declare i32 @operand_equal_p(%struct.tree_node*, %struct.tree_node*, i32)
-
-declare void @fancy_abort(i8*, i32, i8*)
-
-declare i8* @xcalloc(i32, i32)
-
-declare %struct.htab* @htab_create(i32, i32 (i8*)*, i32 (i8*, i8*)*, void (i8*)*)
-
-declare void @free(i8*)
-
-declare void @gimple_push_bind_expr(%struct.tree_node*)
-
-declare void @gimple_pop_bind_expr()
-
-declare %struct.tree_node* @gimple_current_bind_expr()
-
-declare fastcc void @gimple_push_condition()
-
-declare %struct.tree_node* @create_artificial_label()
-
-declare %struct.tree_node* @build_decl_stat(i32, %struct.tree_node*, %struct.tree_node*)
-
-declare void @tree_class_check_failed(%struct.tree_node*, i32, i8*, i32, i8*)
-
-declare %struct.tree_node* @create_tmp_var_name(i8*)
-
-declare i32 @strlen(i8*)
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-declare i32 @sprintf(i8*, i8*, ...)
-
-declare %struct.tree_node* @get_identifier(i8*)
-
-declare %struct.tree_node* @create_tmp_var_raw(%struct.tree_node*, i8*)
-
-declare %struct.tree_node* @build_qualified_type(%struct.tree_node*, i32)
-
-declare i8* @get_name(%struct.tree_node*)
-
-declare void @tree_operand_check_failed(i32, i32, i8*, i32, i8*)
-
-declare void @tree_check_failed(%struct.tree_node*, i8*, i32, i8*, ...)
-
-declare void @declare_tmp_vars(%struct.tree_node*, %struct.tree_node*)
-
-declare %struct.tree_node* @nreverse(%struct.tree_node*)
-
-declare void @gimple_add_tmp_var(%struct.tree_node*)
-
-declare void @record_vars(%struct.tree_node*)
-
-declare %struct.tree_node* @create_tmp_var(%struct.tree_node*, i8*)
-
-declare void @pop_gimplify_context(%struct.tree_node*)
-
-declare void @htab_delete(%struct.htab*)
-
-declare fastcc void @annotate_one_with_locus(%struct.tree_node*, i32, i32)
-
-declare void @annotate_with_locus(%struct.tree_node*, i32, i32)
-
-declare %struct.tree_node* @mostly_copy_tree_r(%struct.tree_node**, i32*, i8*)
-
-declare %struct.tree_node* @copy_tree_r(%struct.tree_node**, i32*, i8*)
-
-declare %struct.tree_node* @mark_decls_volatile_r(%struct.tree_node**, i32*, i8*)
-
-declare %struct.tree_node* @copy_if_shared_r(%struct.tree_node**, i32*, i8*)
-
-declare %struct.tree_node* @walk_tree(%struct.tree_node**, %struct.tree_node* (%struct.tree_node**, i32*, i8*)*, i8*, %struct.pointer_set_t*)
-
-declare %struct.tree_node* @unmark_visited_r(%struct.tree_node**, i32*, i8*)
-
-declare fastcc void @unshare_body(%struct.tree_node**, %struct.tree_node*)
-
-declare %struct.cgraph_node* @cgraph_node(%struct.tree_node*)
-
-declare fastcc void @unvisit_body(%struct.tree_node**, %struct.tree_node*)
-
-declare void @unshare_all_trees(%struct.tree_node*)
-
-declare %struct.tree_node* @unshare_expr(%struct.tree_node*)
-
-declare %struct.tree_node* @build_and_jump(%struct.tree_node**)
-
-declare %struct.tree_node* @build1_stat(i32, %struct.tree_node*, %struct.tree_node*)
-
-declare i32 @compare_case_labels(i8*, i8*)
-
-declare i32 @tree_int_cst_compare(%struct.tree_node*, %struct.tree_node*)
-
-declare void @sort_case_labels(%struct.tree_node*)
-
-declare void @tree_vec_elt_check_failed(i32, i32, i8*, i32, i8*)
-
-declare void @qsort(i8*, i32, i32, i32 (i8*, i8*)*)
-
-declare %struct.tree_node* @force_labels_r(%struct.tree_node**, i32*, i8*)
-
-declare fastcc void @canonicalize_component_ref(%struct.tree_node**)
-
-declare %struct.tree_node* @get_unwidened(%struct.tree_node*, %struct.tree_node*)
-
-declare fastcc void @maybe_with_size_expr(%struct.tree_node**)
-
-declare %struct.tree_node* @substitute_placeholder_in_expr(%struct.tree_node*, %struct.tree_node*)
-
-declare %struct.tree_node* @build2_stat(i32, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*)
-
-declare fastcc %struct.tree_node* @gimple_boolify(%struct.tree_node*)
-
-declare %struct.tree_node* @convert(%struct.tree_node*, %struct.tree_node*)
-
-declare %struct.tree_node* @gimplify_init_ctor_preeval_1(%struct.tree_node**, i32*, i8*)
-
-declare i64 @get_alias_set(%struct.tree_node*)
-
-declare i32 @alias_sets_conflict_p(i64, i64)
-
-declare fastcc i8 @cpt_same_type(%struct.tree_node*, %struct.tree_node*) zeroext
-
-declare %struct.tree_node* @check_pointer_types_r(%struct.tree_node**, i32*, i8*)
-
-declare %struct.tree_node* @voidify_wrapper_expr(%struct.tree_node*, %struct.tree_node*)
-
-declare i32 @integer_zerop(%struct.tree_node*)
-
-declare fastcc void @append_to_statement_list_1(%struct.tree_node*, %struct.tree_node**)
-
-declare %struct.tree_node* @alloc_stmt_list()
-
-declare void @tsi_link_after(%struct.tree_stmt_iterator*, %struct.tree_node*, i32)
-
-declare void @append_to_statement_list_force(%struct.tree_node*, %struct.tree_node**)
-
-declare void @append_to_statement_list(%struct.tree_node*, %struct.tree_node**)
-
-declare fastcc %struct.tree_node* @shortcut_cond_r(%struct.tree_node*, %struct.tree_node**, %struct.tree_node**)
-
-declare %struct.tree_node* @build3_stat(i32, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*)
-
-declare fastcc %struct.tree_node* @shortcut_cond_expr(%struct.tree_node*)
-
-declare %struct.tree_node* @expr_last(%struct.tree_node*)
-
-declare i8 @block_may_fallthru(%struct.tree_node*) zeroext
-
-declare fastcc void @gimple_pop_condition(%struct.tree_node**)
-
-declare %struct.tree_node* @gimple_build_eh_filter(%struct.tree_node*, %struct.tree_node*, %struct.tree_node*)
-
-declare void @annotate_all_with_locus(%struct.tree_node**, i32, i32)
-
-declare fastcc %struct.tree_node* @internal_get_tmp_var(%struct.tree_node*, %struct.tree_node**, %struct.tree_node**, i8 zeroext )
-
-define i32 @gimplify_expr(%struct.tree_node** %expr_p, %struct.tree_node** %pre_p, %struct.tree_node** %post_p, i8 (%struct.tree_node*) zeroext * %gimple_test_f, i32 %fallback) {
-entry:
- %internal_post = alloca %struct.tree_node*, align 4 ; <%struct.tree_node**> [#uses=2]
- %pre_p_addr.0 = select i1 false, %struct.tree_node** null, %struct.tree_node** %pre_p ; <%struct.tree_node**> [#uses=7]
- %post_p_addr.0 = select i1 false, %struct.tree_node** %internal_post, %struct.tree_node** %post_p ; <%struct.tree_node**> [#uses=7]
- br i1 false, label %bb277, label %bb191
-
-bb191: ; preds = %entry
- ret i32 0
-
-bb277: ; preds = %entry
- %tmp283 = call i32 null( %struct.tree_node** %expr_p, %struct.tree_node** %pre_p_addr.0, %struct.tree_node** %post_p_addr.0 ) ; <i32> [#uses=1]
- switch i32 %tmp283, label %bb7478 [
- i32 0, label %cond_next289
- i32 -1, label %cond_next298
- ]
-
-cond_next289: ; preds = %bb277
- ret i32 0
-
-cond_next298: ; preds = %bb277
- switch i32 0, label %bb7444 [
- i32 24, label %bb7463
- i32 25, label %bb7463
- i32 26, label %bb7463
- i32 27, label %bb7463
- i32 28, label %bb7463
- i32 33, label %bb4503
- i32 39, label %bb397
- i32 40, label %bb5650
- i32 41, label %bb4339
- i32 42, label %bb4350
- i32 43, label %bb4350
- i32 44, label %bb319
- i32 45, label %bb397
- i32 46, label %bb6124
- i32 47, label %bb7463
- i32 49, label %bb5524
- i32 50, label %bb1283
- i32 51, label %bb1289
- i32 52, label %bb1289
- i32 53, label %bb5969
- i32 54, label %bb408
- i32 56, label %bb5079
- i32 57, label %bb428
- i32 59, label %bb5965
- i32 74, label %bb4275
- i32 75, label %bb4275
- i32 76, label %bb4275
- i32 77, label %bb4275
- i32 91, label %bb1296
- i32 92, label %bb1296
- i32 96, label %bb1322
- i32 112, label %bb2548
- i32 113, label %bb2548
- i32 115, label %bb397
- i32 116, label %bb5645
- i32 117, label %bb1504
- i32 121, label %bb397
- i32 122, label %bb397
- i32 123, label %bb313
- i32 124, label %bb313
- i32 125, label %bb313
- i32 126, label %bb313
- i32 127, label %bb2141
- i32 128, label %cond_next5873
- i32 129, label %cond_next5873
- i32 130, label %bb4536
- i32 131, label %bb5300
- i32 132, label %bb5170
- i32 133, label %bb5519
- i32 134, label %bb5091
- i32 135, label %bb5083
- i32 136, label %bb5087
- i32 137, label %bb5382
- i32 139, label %bb7463
- i32 140, label %bb7463
- i32 142, label %bb5974
- i32 143, label %bb6049
- i32 147, label %bb6296
- i32 151, label %cond_next6474
- ]
-
-bb313: ; preds = %cond_next298, %cond_next298, %cond_next298, %cond_next298
- ret i32 0
-
-bb319: ; preds = %cond_next298
- ret i32 0
-
-bb397: ; preds = %cond_next298, %cond_next298, %cond_next298, %cond_next298, %cond_next298
- ret i32 0
-
-bb408: ; preds = %cond_next298
- %tmp413 = call fastcc i32 @gimplify_cond_expr( %struct.tree_node** %expr_p, %struct.tree_node** %pre_p_addr.0, %struct.tree_node** %post_p_addr.0, %struct.tree_node* null, i32 %fallback ) ; <i32> [#uses=0]
- ret i32 0
-
-bb428: ; preds = %cond_next298
- ret i32 0
-
-bb1283: ; preds = %cond_next298
- ret i32 0
-
-bb1289: ; preds = %cond_next298, %cond_next298
- ret i32 0
-
-bb1296: ; preds = %cond_next298, %cond_next298
- ret i32 0
-
-bb1322: ; preds = %cond_next298
- ret i32 0
-
-bb1504: ; preds = %cond_next298
- ret i32 0
-
-bb2141: ; preds = %cond_next298
- ret i32 0
-
-bb2548: ; preds = %cond_next298, %cond_next298
- %tmp2554 = load %struct.tree_node** %expr_p ; <%struct.tree_node*> [#uses=2]
- %tmp2562 = and i32 0, 255 ; <i32> [#uses=1]
- %tmp2569 = add i8 0, -4 ; <i8> [#uses=1]
- icmp ugt i8 %tmp2569, 5 ; <i1>:0 [#uses=2]
- %tmp2587 = load i8* null ; <i8> [#uses=1]
- icmp eq i8 %tmp2587, 0 ; <i1>:1 [#uses=2]
- %tmp2607 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=2]
- br i1 false, label %bb2754, label %cond_next2617
-
-cond_next2617: ; preds = %bb2548
- ret i32 0
-
-bb2754: ; preds = %bb2548
- br i1 %0, label %cond_true2780, label %cond_next2783
-
-cond_true2780: ; preds = %bb2754
- call void @tree_class_check_failed( %struct.tree_node* %tmp2554, i32 9, i8* getelementptr ([42 x i8]* @str, i32 0, i32 0), i32 1415, i8* getelementptr ([20 x i8]* @__FUNCTION__.22136, i32 0, i32 0) )
- unreachable
-
-cond_next2783: ; preds = %bb2754
- %tmp2825 = and i32 0, 255 ; <i32> [#uses=1]
- %tmp2829 = load i32* null ; <i32> [#uses=1]
- %tmp28292830 = trunc i32 %tmp2829 to i8 ; <i8> [#uses=1]
- %tmp2832 = add i8 %tmp28292830, -4 ; <i8> [#uses=1]
- icmp ugt i8 %tmp2832, 5 ; <i1>:2 [#uses=1]
- icmp eq i8 0, 0 ; <i1>:3 [#uses=1]
- %tmp28652866 = bitcast %struct.tree_node* %tmp2607 to %struct.tree_exp* ; <%struct.tree_exp*> [#uses=1]
- %tmp2868 = getelementptr %struct.tree_exp* %tmp28652866, i32 0, i32 4, i32 0 ; <%struct.tree_node**> [#uses=1]
- %tmp2870 = load %struct.tree_node** %tmp2868 ; <%struct.tree_node*> [#uses=1]
- br i1 %1, label %cond_true2915, label %cond_next2927
-
-cond_true2915: ; preds = %cond_next2783
- unreachable
-
-cond_next2927: ; preds = %cond_next2783
- %tmp2938 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
- %tmp2944 = load i32* null ; <i32> [#uses=1]
- %tmp2946 = and i32 %tmp2944, 255 ; <i32> [#uses=1]
- %tmp2949 = getelementptr [0 x i32]* @tree_code_type, i32 0, i32 %tmp2946 ; <i32*> [#uses=1]
- %tmp2950 = load i32* %tmp2949 ; <i32> [#uses=1]
- icmp eq i32 %tmp2950, 2 ; <i1>:4 [#uses=1]
- br i1 %4, label %cond_next2954, label %cond_true2951
-
-cond_true2951: ; preds = %cond_next2927
- call void @tree_class_check_failed( %struct.tree_node* %tmp2938, i32 2, i8* getelementptr ([42 x i8]* @str, i32 0, i32 0), i32 1415, i8* getelementptr ([20 x i8]* @__FUNCTION__.22136, i32 0, i32 0) )
- unreachable
-
-cond_next2954: ; preds = %cond_next2927
- br i1 %0, label %cond_true2991, label %cond_next2994
-
-cond_true2991: ; preds = %cond_next2954
- unreachable
-
-cond_next2994: ; preds = %cond_next2954
- br i1 %1, label %cond_true3009, label %cond_next3021
-
-cond_true3009: ; preds = %cond_next2994
- call void @tree_operand_check_failed( i32 0, i32 %tmp2562, i8* getelementptr ([42 x i8]* @str, i32 0, i32 0), i32 1415, i8* getelementptr ([20 x i8]* @__FUNCTION__.22136, i32 0, i32 0) )
- unreachable
-
-cond_next3021: ; preds = %cond_next2994
- br i1 %2, label %cond_true3044, label %cond_next3047
-
-cond_true3044: ; preds = %cond_next3021
- call void @tree_class_check_failed( %struct.tree_node* %tmp2607, i32 9, i8* getelementptr ([42 x i8]* @str, i32 0, i32 0), i32 1415, i8* getelementptr ([20 x i8]* @__FUNCTION__.22136, i32 0, i32 0) )
- unreachable
-
-cond_next3047: ; preds = %cond_next3021
- br i1 %3, label %cond_true3062, label %cond_next3074
-
-cond_true3062: ; preds = %cond_next3047
- call void @tree_operand_check_failed( i32 0, i32 %tmp2825, i8* getelementptr ([42 x i8]* @str, i32 0, i32 0), i32 1415, i8* getelementptr ([20 x i8]* @__FUNCTION__.22136, i32 0, i32 0) )
- unreachable
-
-cond_next3074: ; preds = %cond_next3047
- %tmp3084 = getelementptr %struct.tree_node* %tmp2870, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
- %tmp3085 = load %struct.tree_node** %tmp3084 ; <%struct.tree_node*> [#uses=1]
- %tmp31043105 = bitcast %struct.tree_node* %tmp3085 to %struct.tree_type* ; <%struct.tree_type*> [#uses=1]
- %tmp3106 = getelementptr %struct.tree_type* %tmp31043105, i32 0, i32 6 ; <i16*> [#uses=1]
- %tmp31063107 = bitcast i16* %tmp3106 to i32* ; <i32*> [#uses=1]
- %tmp3108 = load i32* %tmp31063107 ; <i32> [#uses=1]
- xor i32 %tmp3108, 0 ; <i32>:5 [#uses=1]
- %tmp81008368 = and i32 %5, 65024 ; <i32> [#uses=1]
- icmp eq i32 %tmp81008368, 0 ; <i1>:6 [#uses=1]
- br i1 %6, label %cond_next3113, label %bb3351
-
-cond_next3113: ; preds = %cond_next3074
- ret i32 0
-
-bb3351: ; preds = %cond_next3074
- %tmp3354 = call i8 @tree_ssa_useless_type_conversion( %struct.tree_node* %tmp2554 ) zeroext ; <i8> [#uses=1]
- icmp eq i8 %tmp3354, 0 ; <i1>:7 [#uses=1]
- %tmp3424 = load i32* null ; <i32> [#uses=1]
- br i1 %7, label %cond_next3417, label %cond_true3356
-
-cond_true3356: ; preds = %bb3351
- ret i32 0
-
-cond_next3417: ; preds = %bb3351
- br i1 false, label %cond_true3429, label %cond_next4266
-
-cond_true3429: ; preds = %cond_next3417
- %tmp3443 = and i32 %tmp3424, 255 ; <i32> [#uses=0]
- ret i32 0
-
-cond_next4266: ; preds = %cond_next3417
- %tmp4268 = load %struct.tree_node** %expr_p ; <%struct.tree_node*> [#uses=1]
- icmp eq %struct.tree_node* %tmp4268, null ; <i1>:8 [#uses=1]
- br i1 %8, label %bb4275, label %bb7463
-
-bb4275: ; preds = %cond_next4266, %cond_next298, %cond_next298, %cond_next298, %cond_next298
- %tmp4289 = and i32 0, 255 ; <i32> [#uses=2]
- %tmp4292 = getelementptr [0 x i32]* @tree_code_type, i32 0, i32 %tmp4289 ; <i32*> [#uses=1]
- %tmp4293 = load i32* %tmp4292 ; <i32> [#uses=1]
- %tmp42934294 = trunc i32 %tmp4293 to i8 ; <i8> [#uses=1]
- %tmp4296 = add i8 %tmp42934294, -4 ; <i8> [#uses=1]
- icmp ugt i8 %tmp4296, 5 ; <i1>:9 [#uses=1]
- br i1 %9, label %cond_true4297, label %cond_next4300
-
-cond_true4297: ; preds = %bb4275
- unreachable
-
-cond_next4300: ; preds = %bb4275
- %tmp4314 = load i8* null ; <i8> [#uses=1]
- icmp eq i8 %tmp4314, 0 ; <i1>:10 [#uses=1]
- br i1 %10, label %cond_true4315, label %cond_next4327
-
-cond_true4315: ; preds = %cond_next4300
- call void @tree_operand_check_failed( i32 0, i32 %tmp4289, i8* getelementptr ([42 x i8]* @str, i32 0, i32 0), i32 3997, i8* getelementptr ([14 x i8]* @__FUNCTION__.26156, i32 0, i32 0) )
- unreachable
-
-cond_next4327: ; preds = %cond_next4300
- %tmp4336 = call i32 @gimplify_expr( %struct.tree_node** null, %struct.tree_node** %pre_p_addr.0, %struct.tree_node** %post_p_addr.0, i8 (%struct.tree_node*) zeroext * @is_gimple_val, i32 1 ) ; <i32> [#uses=0]
- ret i32 0
-
-bb4339: ; preds = %cond_next298
- ret i32 0
-
-bb4350: ; preds = %cond_next298, %cond_next298
- ret i32 0
-
-bb4503: ; preds = %cond_next298
- ret i32 0
-
-bb4536: ; preds = %cond_next298
- ret i32 0
-
-bb5079: ; preds = %cond_next298
- ret i32 0
-
-bb5083: ; preds = %cond_next298
- ret i32 0
-
-bb5087: ; preds = %cond_next298
- ret i32 0
-
-bb5091: ; preds = %cond_next298
- ret i32 0
-
-bb5170: ; preds = %cond_next298
- ret i32 0
-
-bb5300: ; preds = %cond_next298
- ret i32 0
-
-bb5382: ; preds = %cond_next298
- ret i32 0
-
-bb5519: ; preds = %cond_next298
- ret i32 0
-
-bb5524: ; preds = %cond_next298
- ret i32 0
-
-bb5645: ; preds = %cond_next298
- ret i32 0
-
-bb5650: ; preds = %cond_next298
- ret i32 0
-
-cond_next5873: ; preds = %cond_next298, %cond_next298
- ret i32 0
-
-bb5965: ; preds = %cond_next298
- %tmp5968 = call fastcc i32 @gimplify_cleanup_point_expr( %struct.tree_node** %expr_p, %struct.tree_node** %pre_p_addr.0 ) ; <i32> [#uses=0]
- ret i32 0
-
-bb5969: ; preds = %cond_next298
- %tmp5973 = call fastcc i32 @gimplify_target_expr( %struct.tree_node** %expr_p, %struct.tree_node** %pre_p_addr.0, %struct.tree_node** %post_p_addr.0 ) ; <i32> [#uses=0]
- ret i32 0
-
-bb5974: ; preds = %cond_next298
- ret i32 0
-
-bb6049: ; preds = %cond_next298
- ret i32 0
-
-bb6124: ; preds = %cond_next298
- ret i32 0
-
-bb6296: ; preds = %cond_next298
- ret i32 0
-
-cond_next6474: ; preds = %cond_next298
- icmp eq %struct.tree_node** %internal_post, %post_p_addr.0 ; <i1>:11 [#uses=1]
- %iftmp.381.0 = select i1 %11, %struct.tree_node** null, %struct.tree_node** %post_p_addr.0 ; <%struct.tree_node**> [#uses=1]
- %tmp6490 = call i32 @gimplify_expr( %struct.tree_node** null, %struct.tree_node** %pre_p_addr.0, %struct.tree_node** %iftmp.381.0, i8 (%struct.tree_node*) zeroext * %gimple_test_f, i32 %fallback ) ; <i32> [#uses=0]
- %tmp6551 = call i32 @gimplify_expr( %struct.tree_node** null, %struct.tree_node** %pre_p_addr.0, %struct.tree_node** %post_p_addr.0, i8 (%struct.tree_node*) zeroext * @is_gimple_val, i32 1 ) ; <i32> [#uses=0]
- ret i32 0
-
-bb7444: ; preds = %cond_next298
- ret i32 0
-
-bb7463: ; preds = %cond_next4266, %cond_next298, %cond_next298, %cond_next298, %cond_next298, %cond_next298, %cond_next298, %cond_next298, %cond_next298
- ret i32 0
-
-bb7478: ; preds = %bb277
- ret i32 0
-}
-
-declare i8 @is_gimple_formal_tmp_rhs(%struct.tree_node*) zeroext
-
-declare void @gimplify_and_add(%struct.tree_node*, %struct.tree_node**)
-
-declare %struct.tree_node* @get_initialized_tmp_var(%struct.tree_node*, %struct.tree_node**, %struct.tree_node**)
-
-declare %struct.tree_node* @get_formal_tmp_var(%struct.tree_node*, %struct.tree_node**)
-
-declare fastcc void @gimplify_init_ctor_preeval(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**, %struct.gimplify_init_ctor_preeval_data*)
-
-declare i8 @type_contains_placeholder_p(%struct.tree_node*) zeroext
-
-declare i8 @is_gimple_mem_rhs(%struct.tree_node*) zeroext
-
-declare fastcc i32 @gimplify_modify_expr_rhs(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**, %struct.tree_node**, %struct.tree_node**, i8 zeroext )
-
-declare %struct.tree_node* @fold_indirect_ref(%struct.tree_node*)
-
-declare fastcc i32 @gimplify_compound_expr(%struct.tree_node**, %struct.tree_node**, i8 zeroext )
-
-declare i8 @is_gimple_lvalue(%struct.tree_node*) zeroext
-
-declare void @categorize_ctor_elements(%struct.tree_node*, i64*, i64*, i64*, i8*)
-
-declare void @lhd_set_decl_assembler_name(%struct.tree_node*)
-
-declare i64 @int_size_in_bytes(%struct.tree_node*)
-
-declare i32 @can_move_by_pieces(i64, i32)
-
-declare i64 @count_type_elements(%struct.tree_node*)
-
-declare void @gimplify_stmt(%struct.tree_node**)
-
-declare %struct.tree_node* @get_base_address(%struct.tree_node*)
-
-declare fastcc void @gimplify_init_ctor_eval(%struct.tree_node*, %struct.tree_node*, %struct.tree_node**, i8 zeroext )
-
-declare %struct.tree_node* @build_complex(%struct.tree_node*, %struct.tree_node*, %struct.tree_node*)
-
-declare i8 (%struct.tree_node*) zeroext * @rhs_predicate_for(%struct.tree_node*)
-
-declare %struct.tree_node* @build_vector(%struct.tree_node*, %struct.tree_node*)
-
-declare i8 @is_gimple_val(%struct.tree_node*) zeroext
-
-declare i8 @is_gimple_reg_type(%struct.tree_node*) zeroext
-
-declare fastcc i32 @gimplify_cond_expr(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**, %struct.tree_node*, i32)
-
-declare fastcc i32 @gimplify_modify_expr(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**, i8 zeroext )
-
-declare %struct.tree_node* @tree_cons_stat(%struct.tree_node*, %struct.tree_node*, %struct.tree_node*)
-
-declare %struct.tree_node* @build_fold_addr_expr(%struct.tree_node*)
-
-declare %struct.tree_node* @build_function_call_expr(%struct.tree_node*, %struct.tree_node*)
-
-declare i8 @is_gimple_addressable(%struct.tree_node*) zeroext
-
-declare i8 @is_gimple_reg(%struct.tree_node*) zeroext
-
-declare %struct.tree_node* @make_ssa_name(%struct.tree_node*, %struct.tree_node*)
-
-declare i8 @tree_ssa_useless_type_conversion(%struct.tree_node*) zeroext
-
-declare fastcc i32 @gimplify_self_mod_expr(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**, i8 zeroext )
-
-declare fastcc i32 @gimplify_compound_lval(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**, i32)
-
-declare %struct.tree_node* @get_callee_fndecl(%struct.tree_node*)
-
-declare %struct.tree_node* @fold_builtin(%struct.tree_node*, i8 zeroext )
-
-declare void @error(i8*, ...)
-
-declare %struct.tree_node* @build_empty_stmt()
-
-declare i8 @fold_builtin_next_arg(%struct.tree_node*) zeroext
-
-declare fastcc i32 @gimplify_arg(%struct.tree_node**, %struct.tree_node**)
-
-declare i8 @is_gimple_call_addr(%struct.tree_node*) zeroext
-
-declare i32 @call_expr_flags(%struct.tree_node*)
-
-declare void @recalculate_side_effects(%struct.tree_node*)
-
-declare %struct.tree_node* @fold_convert(%struct.tree_node*, %struct.tree_node*)
-
-declare void @recompute_tree_invarant_for_addr_expr(%struct.tree_node*)
-
-declare i32 @gimplify_va_arg_expr(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**)
-
-declare %struct.tree_node* @size_int_kind(i64, i32)
-
-declare %struct.tree_node* @size_binop(i32, %struct.tree_node*, %struct.tree_node*)
-
-declare %struct.tree_node* @build4_stat(i32, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*)
-
-declare void @gimplify_type_sizes(%struct.tree_node*, %struct.tree_node**)
-
-declare void @gimplify_one_sizepos(%struct.tree_node**, %struct.tree_node**)
-
-declare %struct.tree_node* @build_pointer_type(%struct.tree_node*)
-
-declare %struct.tree_node* @build_fold_indirect_ref(%struct.tree_node*)
-
-declare fastcc i32 @gimplify_bind_expr(%struct.tree_node**, %struct.tree_node*, %struct.tree_node**)
-
-declare fastcc void @gimplify_loop_expr(%struct.tree_node**, %struct.tree_node**)
-
-declare fastcc i32 @gimplify_switch_expr(%struct.tree_node**, %struct.tree_node**)
-
-declare %struct.tree_node* @decl_function_context(%struct.tree_node*)
-
-declare %struct.varray_head_tag* @varray_grow(%struct.varray_head_tag*, i32)
-
-declare fastcc void @gimplify_return_expr(%struct.tree_node*, %struct.tree_node**)
-
-declare fastcc i32 @gimplify_save_expr(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**)
-
-declare fastcc i32 @gimplify_asm_expr(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**)
-
-declare void @gimplify_to_stmt_list(%struct.tree_node**)
-
-declare fastcc i32 @gimplify_cleanup_point_expr(%struct.tree_node**, %struct.tree_node**)
-
-declare fastcc i32 @gimplify_target_expr(%struct.tree_node**, %struct.tree_node**, %struct.tree_node**)
-
-declare void @tsi_delink(%struct.tree_stmt_iterator*)
-
-declare void @tsi_link_before(%struct.tree_stmt_iterator*, %struct.tree_node*, i32)
-
-declare i8 @is_gimple_stmt(%struct.tree_node*) zeroext
-
-declare void @print_generic_expr(%struct.FILE*, %struct.tree_node*, i32)
-
-declare void @debug_tree(%struct.tree_node*)
-
-declare void @internal_error(i8*, ...)
-
-declare %struct.tree_node* @force_gimple_operand(%struct.tree_node*, %struct.tree_node**, i8 zeroext , %struct.tree_node*)
-
-declare i8 @is_gimple_reg_rhs(%struct.tree_node*) zeroext
-
-declare void @add_referenced_tmp_var(%struct.tree_node*)
-
-declare i8 @contains_placeholder_p(%struct.tree_node*) zeroext
-
-declare %struct.varray_head_tag* @varray_init(i32, i32, i8*)
-
-declare i32 @handled_component_p(%struct.tree_node*)
-
-declare void @varray_check_failed(%struct.varray_head_tag*, i32, i8*, i32, i8*)
-
-declare %struct.tree_node* @array_ref_low_bound(%struct.tree_node*)
-
-declare i8 @is_gimple_min_invariant(%struct.tree_node*) zeroext
-
-declare i8 @is_gimple_formal_tmp_reg(%struct.tree_node*) zeroext
-
-declare %struct.tree_node* @array_ref_element_size(%struct.tree_node*)
-
-declare %struct.tree_node* @component_ref_field_offset(%struct.tree_node*)
-
-declare i8 @is_gimple_min_lval(%struct.tree_node*) zeroext
-
-declare void @varray_underflow(%struct.varray_head_tag*, i8*, i32, i8*)
-
-declare i32 @list_length(%struct.tree_node*)
-
-declare i8 @parse_output_constraint(i8**, i32, i32, i32, i8*, i8*, i8*) zeroext
-
-declare i8* @xstrdup(i8*)
-
-declare %struct.tree_node* @build_string(i32, i8*)
-
-declare i8* @strchr(i8*, i32)
-
-declare %struct.tree_node* @build_tree_list_stat(%struct.tree_node*, %struct.tree_node*)
-
-declare %struct.tree_node* @chainon(%struct.tree_node*, %struct.tree_node*)
-
-declare i8 @parse_input_constraint(i8**, i32, i32, i32, i32, i8**, i8*, i8*) zeroext
-
-declare i8 @is_gimple_asm_val(%struct.tree_node*) zeroext
-
-declare void @gimplify_body(%struct.tree_node**, %struct.tree_node*, i8 zeroext )
-
-declare void @timevar_push_1(i32)
-
-declare %struct.tree_node* @gimplify_parameters()
-
-declare %struct.tree_node* @expr_only(%struct.tree_node*)
-
-declare void @timevar_pop_1(i32)
-
-declare void @gimplify_function_tree(%struct.tree_node*)
-
-declare void @allocate_struct_function(%struct.tree_node*)
-
-declare %struct.tree_node* @make_tree_vec_stat(i32)
-
-declare %struct.tree_node* @tsi_split_statement_list_after(%struct.tree_stmt_iterator*)
-
-declare i8 @is_gimple_condexpr(%struct.tree_node*) zeroext
-
-declare %struct.tree_node* @invert_truthvalue(%struct.tree_node*)
-
-declare i8 @initializer_zerop(%struct.tree_node*) zeroext
-
-declare i32 @simple_cst_equal(%struct.tree_node*, %struct.tree_node*)
-
-declare i32 @aggregate_value_p(%struct.tree_node*, %struct.tree_node*)
-
-declare i32 @fwrite(i8*, i32, i32, %struct.FILE*)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-27-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-27-RegScavengerAssert.ll
deleted file mode 100644
index e4635f5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-27-RegScavengerAssert.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi
-; PR1279
-
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.u = type { [1 x i64] }
-
-define fastcc void @find_reloads_address(%struct.rtx_def** %loc) {
-entry:
- %ad_addr = alloca %struct.rtx_def* ; <%struct.rtx_def**> [#uses=2]
- br i1 false, label %cond_next416, label %cond_true340
-
-cond_true340: ; preds = %entry
- ret void
-
-cond_next416: ; preds = %entry
- %tmp1085 = load %struct.rtx_def** %ad_addr ; <%struct.rtx_def*> [#uses=1]
- br i1 false, label %bb1084, label %cond_true418
-
-cond_true418: ; preds = %cond_next416
- ret void
-
-bb1084: ; preds = %cond_next416
- br i1 false, label %cond_true1092, label %cond_next1102
-
-cond_true1092: ; preds = %bb1084
- %tmp1094 = getelementptr %struct.rtx_def* %tmp1085, i32 0, i32 3 ; <%struct.u*> [#uses=1]
- %tmp10981099 = bitcast %struct.u* %tmp1094 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=2]
- %tmp1101 = load %struct.rtx_def** %tmp10981099 ; <%struct.rtx_def*> [#uses=1]
- store %struct.rtx_def* %tmp1101, %struct.rtx_def** %ad_addr
- br label %cond_next1102
-
-cond_next1102: ; preds = %cond_true1092, %bb1084
- %loc_addr.0 = phi %struct.rtx_def** [ %tmp10981099, %cond_true1092 ], [ %loc, %bb1084 ] ; <%struct.rtx_def**> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-30-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-30-RegScavengerAssert.ll
deleted file mode 100644
index ea27676..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-03-30-RegScavengerAssert.ll
+++ /dev/null
@@ -1,101 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi
-; PR1279
-
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32 }
- %struct.arm_stack_offsets = type { i32, i32, i32, i32, i32 }
- %struct.eh_status = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.location_t, i32, i8*, %struct.rtx_def** }
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i8, i8, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.location_t, %struct.varray_head_tag*, %struct.tree_node*, i8, i8, i8 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type opaque
- %struct.language_function = type opaque
- %struct.location_t = type { i8*, i32 }
- %struct.machine_function = type { %struct.rtx_def*, i32, i32, i32, %struct.arm_stack_offsets, i32, i32, i32, [14 x %struct.rtx_def*] }
- %struct.rtvec_def = type { i32, [1 x %struct.rtx_def*] }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.temp_slot = type opaque
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.u = type { [1 x i64] }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.varray_head_tag = type { i32, i32, i32, i8*, %struct.u }
- %union.tree_ann_d = type opaque
- at str469 = external global [42 x i8] ; <[42 x i8]*> [#uses=0]
- at __FUNCTION__.24265 = external global [19 x i8] ; <[19 x i8]*> [#uses=0]
-
-declare void @fancy_abort()
-
-define fastcc void @fold_builtin_bitop() {
-entry:
- br i1 false, label %cond_true105, label %UnifiedReturnBlock
-
-cond_true105: ; preds = %entry
- br i1 false, label %cond_true134, label %UnifiedReturnBlock
-
-cond_true134: ; preds = %cond_true105
- switch i32 0, label %bb479 [
- i32 378, label %bb313
- i32 380, label %bb313
- i32 381, label %bb313
- i32 383, label %bb366
- i32 385, label %bb366
- i32 386, label %bb366
- i32 403, label %bb250
- i32 405, label %bb250
- i32 406, label %bb250
- i32 434, label %bb464
- i32 436, label %bb464
- i32 437, label %bb464
- i32 438, label %bb441
- i32 440, label %bb441
- i32 441, label %bb441
- ]
-
-bb250: ; preds = %cond_true134, %cond_true134, %cond_true134
- ret void
-
-bb313: ; preds = %cond_true134, %cond_true134, %cond_true134
- ret void
-
-bb366: ; preds = %cond_true134, %cond_true134, %cond_true134
- ret void
-
-bb441: ; preds = %cond_true134, %cond_true134, %cond_true134
- ret void
-
-bb457: ; preds = %bb464, %bb457
- %tmp459 = add i64 0, 1 ; <i64> [#uses=1]
- br i1 false, label %bb474.preheader, label %bb457
-
-bb464: ; preds = %cond_true134, %cond_true134, %cond_true134
- br i1 false, label %bb474.preheader, label %bb457
-
-bb474.preheader: ; preds = %bb464, %bb457
- %result.5.ph = phi i64 [ 0, %bb464 ], [ %tmp459, %bb457 ] ; <i64> [#uses=1]
- br label %bb474
-
-bb467: ; preds = %bb474
- %indvar.next586 = add i64 %indvar585, 1 ; <i64> [#uses=1]
- br label %bb474
-
-bb474: ; preds = %bb467, %bb474.preheader
- %indvar585 = phi i64 [ 0, %bb474.preheader ], [ %indvar.next586, %bb467 ] ; <i64> [#uses=2]
- br i1 false, label %bb476, label %bb467
-
-bb476: ; preds = %bb474
- %result.5 = add i64 %indvar585, %result.5.ph ; <i64> [#uses=0]
- ret void
-
-bb479: ; preds = %cond_true134
- tail call void @fancy_abort( )
- unreachable
-
-UnifiedReturnBlock: ; preds = %cond_true105, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-02-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-02-RegScavengerAssert.ll
deleted file mode 100644
index f24def3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-02-RegScavengerAssert.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin
-
- %struct.H_TBL = type { [17 x i8], [256 x i8], i32 }
- %struct.Q_TBL = type { [64 x i16], i32 }
- %struct.anon = type { [80 x i8] }
- %struct.X_c_coef_ccler = type { void (%struct.X_Y*, i32)*, i32 (%struct.X_Y*, i8***)* }
- %struct.X_c_main_ccler = type { void (%struct.X_Y*, i32)*, void (%struct.X_Y*, i8**, i32*, i32)* }
- %struct.X_c_prep_ccler = type { void (%struct.X_Y*, i32)*, void (%struct.X_Y*, i8**, i32*, i32, i8***, i32*, i32)* }
- %struct.X_color_converter = type { void (%struct.X_Y*)*, void (%struct.X_Y*, i8**, i8***, i32, i32)* }
- %struct.X_common_struct = type { %struct.X_error_mgr*, %struct.X_memory_mgr*, %struct.X_progress_mgr*, i8*, i32, i32 }
- %struct.X_comp_master = type { void (%struct.X_Y*)*, void (%struct.X_Y*)*, void (%struct.X_Y*)*, i32, i32 }
- %struct.X_component_info = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.Q_TBL*, i8* }
- %struct.X_Y = type { %struct.X_error_mgr*, %struct.X_memory_mgr*, %struct.X_progress_mgr*, i8*, i32, i32, %struct.X_destination_mgr*, i32, i32, i32, i32, double, i32, i32, i32, %struct.X_component_info*, [4 x %struct.Q_TBL*], [4 x %struct.H_TBL*], [4 x %struct.H_TBL*], [16 x i8], [16 x i8], [16 x i8], i32, %struct.X_scan_info*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i16, i16, i32, i32, i32, i32, i32, i32, i32, [4 x %struct.X_component_info*], i32, i32, i32, [10 x i32], i32, i32, i32, i32, %struct.X_comp_master*, %struct.X_c_main_ccler*, %struct.X_c_prep_ccler*, %struct.X_c_coef_ccler*, %struct.X_marker_writer*, %struct.X_color_converter*, %struct.X_downssr*, %struct.X_forward_D*, %struct.X_entropy_en*, %struct.X_scan_info*, i32 }
- %struct.X_destination_mgr = type { i8*, i32, void (%struct.X_Y*)*, i32 (%struct.X_Y*)*, void (%struct.X_Y*)* }
- %struct.X_downssr = type { void (%struct.X_Y*)*, void (%struct.X_Y*, i8***, i32, i8***, i32)*, i32 }
- %struct.X_entropy_en = type { void (%struct.X_Y*, i32)*, i32 (%struct.X_Y*, [64 x i16]**)*, void (%struct.X_Y*)* }
- %struct.X_error_mgr = type { void (%struct.X_common_struct*)*, void (%struct.X_common_struct*, i32)*, void (%struct.X_common_struct*)*, void (%struct.X_common_struct*, i8*)*, void (%struct.X_common_struct*)*, i32, %struct.anon, i32, i32, i8**, i32, i8**, i32, i32 }
- %struct.X_forward_D = type { void (%struct.X_Y*)*, void (%struct.X_Y*, %struct.X_component_info*, i8**, [64 x i16]*, i32, i32, i32)* }
- %struct.X_marker_writer = type { void (%struct.X_Y*)*, void (%struct.X_Y*)*, void (%struct.X_Y*)*, void (%struct.X_Y*)*, void (%struct.X_Y*)*, void (%struct.X_Y*, i32, i32)*, void (%struct.X_Y*, i32)* }
- %struct.X_memory_mgr = type { i8* (%struct.X_common_struct*, i32, i32)*, i8* (%struct.X_common_struct*, i32, i32)*, i8** (%struct.X_common_struct*, i32, i32, i32)*, [64 x i16]** (%struct.X_common_struct*, i32, i32, i32)*, %struct.jvirt_sAY_cc* (%struct.X_common_struct*, i32, i32, i32, i32, i32)*, %struct.jvirt_bAY_cc* (%struct.X_common_struct*, i32, i32, i32, i32, i32)*, void (%struct.X_common_struct*)*, i8** (%struct.X_common_struct*, %struct.jvirt_sAY_cc*, i32, i32, i32)*, [64 x i16]** (%struct.X_common_struct*, %struct.jvirt_bAY_cc*, i32, i32, i32)*, void (%struct.X_common_struct*, i32)*, void (%struct.X_common_struct*)*, i32, i32 }
- %struct.X_progress_mgr = type { void (%struct.X_common_struct*)*, i32, i32, i32, i32 }
- %struct.X_scan_info = type { i32, [4 x i32], i32, i32, i32, i32 }
- %struct.jvirt_bAY_cc = type opaque
- %struct.jvirt_sAY_cc = type opaque
-
-define void @test(%struct.X_Y* %cinfo) {
-entry:
- br i1 false, label %bb.preheader, label %return
-
-bb.preheader: ; preds = %entry
- %tbl.014.us = load i32* null ; <i32> [#uses=1]
- br i1 false, label %cond_next.us, label %bb
-
-cond_next51.us: ; preds = %cond_next.us, %cond_true33.us.cond_true46.us_crit_edge
- %htblptr.019.1.us = phi %struct.H_TBL** [ %tmp37.us, %cond_true33.us.cond_true46.us_crit_edge ], [ %tmp37.us, %cond_next.us ] ; <%struct.H_TBL**> [#uses=0]
- ret void
-
-cond_true33.us.cond_true46.us_crit_edge: ; preds = %cond_next.us
- call void @_C_X_a_HT( )
- br label %cond_next51.us
-
-cond_next.us: ; preds = %bb.preheader
- %tmp37.us = getelementptr %struct.X_Y* %cinfo, i32 0, i32 17, i32 %tbl.014.us ; <%struct.H_TBL**> [#uses=3]
- %tmp4524.us = load %struct.H_TBL** %tmp37.us ; <%struct.H_TBL*> [#uses=1]
- icmp eq %struct.H_TBL* %tmp4524.us, null ; <i1>:0 [#uses=1]
- br i1 %0, label %cond_true33.us.cond_true46.us_crit_edge, label %cond_next51.us
-
-bb: ; preds = %bb.preheader
- ret void
-
-return: ; preds = %entry
- ret void
-}
-
-declare void @_C_X_a_HT()
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-03-PEIBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-03-PEIBug.ll
deleted file mode 100644
index b543c57..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-03-PEIBug.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=arm | not grep {add.*#0}
-
-define i32 @foo() {
-entry:
- %A = alloca [1123 x i32], align 16 ; <[1123 x i32]*> [#uses=1]
- %B = alloca [3123 x i32], align 16 ; <[3123 x i32]*> [#uses=1]
- %C = alloca [12312 x i32], align 16 ; <[12312 x i32]*> [#uses=1]
- %tmp = call i32 (...)* @bar( [3123 x i32]* %B, [1123 x i32]* %A, [12312 x i32]* %C ) ; <i32> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-03-UndefinedSymbol.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-03-UndefinedSymbol.ll
deleted file mode 100644
index e001cde..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-03-UndefinedSymbol.ll
+++ /dev/null
@@ -1,99 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic | \
-; RUN: not grep LPC9
-
- %struct.B = type { i32 }
- %struct.anon = type { void (%struct.B*)*, i32 }
- at str = internal constant [7 x i8] c"i, %d\0A\00" ; <[7 x i8]*> [#uses=1]
- at str1 = internal constant [7 x i8] c"j, %d\0A\00" ; <[7 x i8]*> [#uses=1]
-
-define internal void @_ZN1B1iEv(%struct.B* %this) {
-entry:
- %tmp1 = getelementptr %struct.B* %this, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp2 = load i32* %tmp1 ; <i32> [#uses=1]
- %tmp4 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([7 x i8]* @str, i32 0, i32 0), i32 %tmp2 ) ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @printf(i8*, ...)
-
-define internal void @_ZN1B1jEv(%struct.B* %this) {
-entry:
- %tmp1 = getelementptr %struct.B* %this, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp2 = load i32* %tmp1 ; <i32> [#uses=1]
- %tmp4 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([7 x i8]* @str1, i32 0, i32 0), i32 %tmp2 ) ; <i32> [#uses=0]
- ret void
-}
-
-define i32 @main() {
-entry:
- %b.i29 = alloca %struct.B, align 4 ; <%struct.B*> [#uses=3]
- %b.i1 = alloca %struct.B, align 4 ; <%struct.B*> [#uses=3]
- %b.i = alloca %struct.B, align 4 ; <%struct.B*> [#uses=3]
- %tmp2.i = getelementptr %struct.B* %b.i, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 4, i32* %tmp2.i
- br i1 icmp eq (i64 and (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 4294967296), i64 0), label %_Z3fooiM1BFvvE.exit, label %cond_true.i
-
-cond_true.i: ; preds = %entry
- %b2.i = bitcast %struct.B* %b.i to i8* ; <i8*> [#uses=1]
- %ctg23.i = getelementptr i8* %b2.i, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
- %tmp121314.i = bitcast i8* %ctg23.i to i32 (...)*** ; <i32 (...)***> [#uses=1]
- %tmp15.i = load i32 (...)*** %tmp121314.i ; <i32 (...)**> [#uses=1]
- %tmp151.i = bitcast i32 (...)** %tmp15.i to i8* ; <i8*> [#uses=1]
- %ctg2.i = getelementptr i8* %tmp151.i, i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) ; <i8*> [#uses=1]
- %tmp2021.i = bitcast i8* %ctg2.i to i32 (...)** ; <i32 (...)**> [#uses=1]
- %tmp22.i = load i32 (...)** %tmp2021.i ; <i32 (...)*> [#uses=1]
- %tmp2223.i = bitcast i32 (...)* %tmp22.i to void (%struct.B*)* ; <void (%struct.B*)*> [#uses=1]
- br label %_Z3fooiM1BFvvE.exit
-
-_Z3fooiM1BFvvE.exit: ; preds = %cond_true.i, %entry
- %iftmp.2.0.i = phi void (%struct.B*)* [ %tmp2223.i, %cond_true.i ], [ inttoptr (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to void (%struct.B*)*), %entry ] ; <void (%struct.B*)*> [#uses=1]
- %b4.i = bitcast %struct.B* %b.i to i8* ; <i8*> [#uses=1]
- %ctg25.i = getelementptr i8* %b4.i, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
- %tmp3031.i = bitcast i8* %ctg25.i to %struct.B* ; <%struct.B*> [#uses=1]
- call void %iftmp.2.0.i( %struct.B* %tmp3031.i )
- %tmp2.i30 = getelementptr %struct.B* %b.i29, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 6, i32* %tmp2.i30
- br i1 icmp eq (i64 and (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 4294967296), i64 0), label %_Z3fooiM1BFvvE.exit56, label %cond_true.i46
-
-cond_true.i46: ; preds = %_Z3fooiM1BFvvE.exit
- %b2.i35 = bitcast %struct.B* %b.i29 to i8* ; <i8*> [#uses=1]
- %ctg23.i36 = getelementptr i8* %b2.i35, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
- %tmp121314.i37 = bitcast i8* %ctg23.i36 to i32 (...)*** ; <i32 (...)***> [#uses=1]
- %tmp15.i38 = load i32 (...)*** %tmp121314.i37 ; <i32 (...)**> [#uses=1]
- %tmp151.i41 = bitcast i32 (...)** %tmp15.i38 to i8* ; <i8*> [#uses=1]
- %ctg2.i42 = getelementptr i8* %tmp151.i41, i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) ; <i8*> [#uses=1]
- %tmp2021.i43 = bitcast i8* %ctg2.i42 to i32 (...)** ; <i32 (...)**> [#uses=1]
- %tmp22.i44 = load i32 (...)** %tmp2021.i43 ; <i32 (...)*> [#uses=1]
- %tmp2223.i45 = bitcast i32 (...)* %tmp22.i44 to void (%struct.B*)* ; <void (%struct.B*)*> [#uses=1]
- br label %_Z3fooiM1BFvvE.exit56
-
-_Z3fooiM1BFvvE.exit56: ; preds = %cond_true.i46, %_Z3fooiM1BFvvE.exit
- %iftmp.2.0.i49 = phi void (%struct.B*)* [ %tmp2223.i45, %cond_true.i46 ], [ inttoptr (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to void (%struct.B*)*), %_Z3fooiM1BFvvE.exit ] ; <void (%struct.B*)*> [#uses=1]
- %b4.i53 = bitcast %struct.B* %b.i29 to i8* ; <i8*> [#uses=1]
- %ctg25.i54 = getelementptr i8* %b4.i53, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
- %tmp3031.i55 = bitcast i8* %ctg25.i54 to %struct.B* ; <%struct.B*> [#uses=1]
- call void %iftmp.2.0.i49( %struct.B* %tmp3031.i55 )
- %tmp2.i2 = getelementptr %struct.B* %b.i1, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 -1, i32* %tmp2.i2
- br i1 icmp eq (i64 and (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 4294967296), i64 0), label %_Z3fooiM1BFvvE.exit28, label %cond_true.i18
-
-cond_true.i18: ; preds = %_Z3fooiM1BFvvE.exit56
- %b2.i7 = bitcast %struct.B* %b.i1 to i8* ; <i8*> [#uses=1]
- %ctg23.i8 = getelementptr i8* %b2.i7, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
- %tmp121314.i9 = bitcast i8* %ctg23.i8 to i32 (...)*** ; <i32 (...)***> [#uses=1]
- %tmp15.i10 = load i32 (...)*** %tmp121314.i9 ; <i32 (...)**> [#uses=1]
- %tmp151.i13 = bitcast i32 (...)** %tmp15.i10 to i8* ; <i8*> [#uses=1]
- %ctg2.i14 = getelementptr i8* %tmp151.i13, i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) ; <i8*> [#uses=1]
- %tmp2021.i15 = bitcast i8* %ctg2.i14 to i32 (...)** ; <i32 (...)**> [#uses=1]
- %tmp22.i16 = load i32 (...)** %tmp2021.i15 ; <i32 (...)*> [#uses=1]
- %tmp2223.i17 = bitcast i32 (...)* %tmp22.i16 to void (%struct.B*)* ; <void (%struct.B*)*> [#uses=1]
- br label %_Z3fooiM1BFvvE.exit28
-
-_Z3fooiM1BFvvE.exit28: ; preds = %cond_true.i18, %_Z3fooiM1BFvvE.exit56
- %iftmp.2.0.i21 = phi void (%struct.B*)* [ %tmp2223.i17, %cond_true.i18 ], [ inttoptr (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to void (%struct.B*)*), %_Z3fooiM1BFvvE.exit56 ] ; <void (%struct.B*)*> [#uses=1]
- %b4.i25 = bitcast %struct.B* %b.i1 to i8* ; <i8*> [#uses=1]
- %ctg25.i26 = getelementptr i8* %b4.i25, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
- %tmp3031.i27 = bitcast i8* %ctg25.i26 to %struct.B* ; <%struct.B*> [#uses=1]
- call void %iftmp.2.0.i21( %struct.B* %tmp3031.i27 )
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-30-CombinerCrash.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-30-CombinerCrash.ll
deleted file mode 100644
index a89e937..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-04-30-CombinerCrash.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6,+vfp2
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "arm-apple-darwin8"
- %struct.CHESS_POSITION = type { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i32, i32, i8, i8, [64 x i8], i8, i8, i8, i8, i8 }
- at search = external global %struct.CHESS_POSITION ; <%struct.CHESS_POSITION*> [#uses=3]
- at file_mask = external global [8 x i64] ; <[8 x i64]*> [#uses=1]
- at rank_mask.1.b = external global i1 ; <i1*> [#uses=1]
-
-define fastcc void @EvaluateDevelopment() {
-entry:
- %tmp7 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 7) ; <i64> [#uses=1]
- %tmp50 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 0) ; <i64> [#uses=1]
- %tmp52 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 1) ; <i64> [#uses=1]
- %tmp53 = or i64 %tmp52, %tmp50 ; <i64> [#uses=1]
- %tmp57.b = load i1* @rank_mask.1.b ; <i1> [#uses=1]
- %tmp57 = select i1 %tmp57.b, i64 71776119061217280, i64 0 ; <i64> [#uses=1]
- %tmp58 = and i64 %tmp57, %tmp7 ; <i64> [#uses=1]
- %tmp59 = lshr i64 %tmp58, 8 ; <i64> [#uses=1]
- %tmp63 = load i64* getelementptr ([8 x i64]* @file_mask, i32 0, i32 4) ; <i64> [#uses=1]
- %tmp64 = or i64 %tmp63, 0 ; <i64> [#uses=1]
- %tmp65 = and i64 %tmp59, %tmp53 ; <i64> [#uses=1]
- %tmp66 = and i64 %tmp65, %tmp64 ; <i64> [#uses=1]
- %tmp67 = icmp eq i64 %tmp66, 0 ; <i1> [#uses=1]
- br i1 %tmp67, label %cond_next145, label %cond_true70
-
-cond_true70: ; preds = %entry
- ret void
-
-cond_next145: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-03-BadPostIndexedLd.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-03-BadPostIndexedLd.ll
deleted file mode 100644
index c73b679..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-03-BadPostIndexedLd.ll
+++ /dev/null
@@ -1,113 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
- %struct.Connection = type { i32, [10 x i8], i32 }
- %struct.IntChunk = type { %struct.cppobjtype, i32, i32*, i32 }
- %struct.Point = type { i8*, %struct.cppobjtype, i16 (%struct.Point*) signext *, i16 (%struct.Point*) signext *, double (%struct.Point*)*, double (%struct.Point*)* }
- %struct.RefPoint = type { %struct.Point*, %struct.cppobjtype }
- %struct.ShortArray = type { %struct.cppobjtype, i32, i16* }
- %struct.TestObj = type { i8*, %struct.cppobjtype, i8, [32 x i8], i8*, i8**, i16, i16, i32, i32, i32, i32, float, double, %struct.cppobjtype, i32, i16*, i16**, i8**, i32, %struct.XyPoint, [3 x %struct.Connection], %struct.Point*, %struct.XyPoint*, i32, i8*, i8*, i16*, %struct.ShortArray, %struct.IntChunk, %struct.cppobjtype, %struct.cppobjtype, %struct.RefPoint, i32, %struct.cppobjtype, %struct.cppobjtype }
- %struct.XyPoint = type { i16, i16 }
- %struct.cppobjtype = type { i32, i16, i16 }
- at Msg = external global [256 x i8] ; <[256 x i8]*> [#uses=1]
- at .str53615 = external constant [48 x i8] ; <[48 x i8]*> [#uses=1]
- at FirstTime.4637.b = external global i1 ; <i1*> [#uses=1]
-
-define fastcc void @Draw7(i32 %Option, i32* %Status) {
-entry:
- %tmp115.b = load i1* @FirstTime.4637.b ; <i1> [#uses=1]
- br i1 %tmp115.b, label %cond_next239, label %cond_next.i
-
-cond_next.i: ; preds = %entry
- ret void
-
-cond_next239: ; preds = %entry
- %tmp242 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp242, label %cond_next253, label %cond_next296
-
-cond_next253: ; preds = %cond_next239
- switch i32 %Option, label %bb1326 [
- i32 3, label %cond_true258
- i32 4, label %cond_true268
- i32 2, label %cond_true279
- i32 1, label %cond_next315
- ]
-
-cond_true258: ; preds = %cond_next253
- ret void
-
-cond_true268: ; preds = %cond_next253
- ret void
-
-cond_true279: ; preds = %cond_next253
- ret void
-
-cond_next296: ; preds = %cond_next239
- ret void
-
-cond_next315: ; preds = %cond_next253
- %tmp1140 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp1140, label %cond_true1143, label %bb1326
-
-cond_true1143: ; preds = %cond_next315
- %tmp1148 = icmp eq i32 0, 0 ; <i1> [#uses=4]
- br i1 %tmp1148, label %cond_next1153, label %cond_true1151
-
-cond_true1151: ; preds = %cond_true1143
- ret void
-
-cond_next1153: ; preds = %cond_true1143
- %tmp8.i.i185 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp8.i.i185, label %TestObj_new1.exit, label %cond_true.i.i187
-
-cond_true.i.i187: ; preds = %cond_next1153
- ret void
-
-TestObj_new1.exit: ; preds = %cond_next1153
- %tmp1167 = icmp eq i16 0, 0 ; <i1> [#uses=1]
- %tmp1178 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- %bothcond = and i1 %tmp1167, %tmp1178 ; <i1> [#uses=1]
- br i1 %bothcond, label %bb1199, label %bb1181
-
-bb1181: ; preds = %TestObj_new1.exit
- ret void
-
-bb1199: ; preds = %TestObj_new1.exit
- br i1 %tmp1148, label %cond_next1235, label %Object_Dump.exit302
-
-Object_Dump.exit302: ; preds = %bb1199
- ret void
-
-cond_next1235: ; preds = %bb1199
- %bothcond10485 = or i1 false, %tmp1148 ; <i1> [#uses=1]
- br i1 %bothcond10485, label %cond_next1267, label %cond_true1248
-
-cond_true1248: ; preds = %cond_next1235
- ret void
-
-cond_next1267: ; preds = %cond_next1235
- br i1 %tmp1148, label %cond_next1275, label %cond_true1272
-
-cond_true1272: ; preds = %cond_next1267
- %tmp1273 = load %struct.TestObj** null ; <%struct.TestObj*> [#uses=2]
- %tmp2930.i = ptrtoint %struct.TestObj* %tmp1273 to i32 ; <i32> [#uses=1]
- %tmp42.i348 = sub i32 0, %tmp2930.i ; <i32> [#uses=1]
- %tmp45.i = getelementptr %struct.TestObj* %tmp1273, i32 0, i32 0 ; <i8**> [#uses=2]
- %tmp48.i = load i8** %tmp45.i ; <i8*> [#uses=1]
- %tmp50.i350 = call i32 (i8*, i8*, ...)* @sprintf( i8* getelementptr ([256 x i8]* @Msg, i32 0, i32 0), i8* getelementptr ([48 x i8]* @.str53615, i32 0, i32 0), i8* null, i8** %tmp45.i, i8* %tmp48.i ) ; <i32> [#uses=0]
- br i1 false, label %cond_true.i632.i, label %Ut_TraceMsg.exit648.i
-
-cond_true.i632.i: ; preds = %cond_true1272
- ret void
-
-Ut_TraceMsg.exit648.i: ; preds = %cond_true1272
- %tmp57.i = getelementptr i8* null, i32 %tmp42.i348 ; <i8*> [#uses=0]
- ret void
-
-cond_next1275: ; preds = %cond_next1267
- ret void
-
-bb1326: ; preds = %cond_next315, %cond_next253
- ret void
-}
-
-declare i32 @sprintf(i8*, i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-07-jumptoentry.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-07-jumptoentry.ll
deleted file mode 100644
index 26864f1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-07-jumptoentry.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; RUN: llc < %s | not grep 1_0
-; This used to create an extra branch to 'entry', LBB1_0.
-
-; ModuleID = 'bug.bc'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "arm-apple-darwin8"
- %struct.HexxagonMove = type { i8, i8, i32 }
- %struct.HexxagonMoveList = type { i32, %struct.HexxagonMove* }
-
-define void @_ZN16HexxagonMoveList8sortListEv(%struct.HexxagonMoveList* %this) {
-entry:
- %tmp51 = getelementptr %struct.HexxagonMoveList* %this, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp2 = getelementptr %struct.HexxagonMoveList* %this, i32 0, i32 1 ; <%struct.HexxagonMove**> [#uses=2]
- br label %bb49
-
-bb1: ; preds = %bb49
- %tmp3 = load %struct.HexxagonMove** %tmp2 ; <%struct.HexxagonMove*> [#uses=5]
- %tmp6 = getelementptr %struct.HexxagonMove* %tmp3, i32 %i.1, i32 2 ; <i32*> [#uses=1]
- %tmp7 = load i32* %tmp6 ; <i32> [#uses=2]
- %tmp12 = add i32 %i.1, 1 ; <i32> [#uses=7]
- %tmp14 = getelementptr %struct.HexxagonMove* %tmp3, i32 %tmp12, i32 2 ; <i32*> [#uses=1]
- %tmp15 = load i32* %tmp14 ; <i32> [#uses=1]
- %tmp16 = icmp slt i32 %tmp7, %tmp15 ; <i1> [#uses=1]
- br i1 %tmp16, label %cond_true, label %bb49
-
-cond_true: ; preds = %bb1
- %tmp23.0 = getelementptr %struct.HexxagonMove* %tmp3, i32 %i.1, i32 0 ; <i8*> [#uses=2]
- %tmp67 = load i8* %tmp23.0 ; <i8> [#uses=1]
- %tmp23.1 = getelementptr %struct.HexxagonMove* %tmp3, i32 %i.1, i32 1 ; <i8*> [#uses=1]
- %tmp68 = load i8* %tmp23.1 ; <i8> [#uses=1]
- %tmp3638 = getelementptr %struct.HexxagonMove* %tmp3, i32 %tmp12, i32 0 ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.i32( i8* %tmp23.0, i8* %tmp3638, i32 8, i32 4 )
- %tmp41 = load %struct.HexxagonMove** %tmp2 ; <%struct.HexxagonMove*> [#uses=3]
- %tmp44.0 = getelementptr %struct.HexxagonMove* %tmp41, i32 %tmp12, i32 0 ; <i8*> [#uses=1]
- store i8 %tmp67, i8* %tmp44.0
- %tmp44.1 = getelementptr %struct.HexxagonMove* %tmp41, i32 %tmp12, i32 1 ; <i8*> [#uses=1]
- store i8 %tmp68, i8* %tmp44.1
- %tmp44.2 = getelementptr %struct.HexxagonMove* %tmp41, i32 %tmp12, i32 2 ; <i32*> [#uses=1]
- store i32 %tmp7, i32* %tmp44.2
- br label %bb49
-
-bb49: ; preds = %bb59, %cond_true, %bb1, %entry
- %i.1 = phi i32 [ 0, %entry ], [ %tmp12, %bb1 ], [ %tmp12, %cond_true ], [ 0, %bb59 ] ; <i32> [#uses=5]
- %move.2 = phi i32 [ 0, %entry ], [ 1, %cond_true ], [ %move.2, %bb1 ], [ 0, %bb59 ] ; <i32> [#uses=2]
- %tmp52 = load i32* %tmp51 ; <i32> [#uses=1]
- %tmp53 = add i32 %tmp52, -1 ; <i32> [#uses=1]
- %tmp55 = icmp sgt i32 %tmp53, %i.1 ; <i1> [#uses=1]
- br i1 %tmp55, label %bb1, label %bb59
-
-bb59: ; preds = %bb49
- %tmp61 = icmp eq i32 %move.2, 0 ; <i1> [#uses=1]
- br i1 %tmp61, label %return, label %bb49
-
-return: ; preds = %bb59
- ret void
-}
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-07-tailmerge-1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-07-tailmerge-1.ll
deleted file mode 100644
index f2a8ee1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-07-tailmerge-1.ll
+++ /dev/null
@@ -1,68 +0,0 @@
-; RUN: llc < %s -march=arm -enable-tail-merge | grep bl.*baz | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge | grep bl.*quux | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge -enable-eh | grep bl.*baz | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge -enable-eh | grep bl.*quux | count 1
-; Check that calls to baz and quux are tail-merged.
-; PR1628
-
-; ModuleID = 'tail.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-define i32 @f(i32 %i, i32 %q) {
-entry:
- %i_addr = alloca i32 ; <i32*> [#uses=2]
- %q_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %i, i32* %i_addr
- store i32 %q, i32* %q_addr
- %tmp = load i32* %i_addr ; <i32> [#uses=1]
- %tmp1 = icmp ne i32 %tmp, 0 ; <i1> [#uses=1]
- %tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
- %toBool = icmp ne i8 %tmp12, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- %tmp3 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp4 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- br label %cond_next
-
-cond_false: ; preds = %entry
- %tmp5 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp6 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- br label %cond_next
-
-cond_next: ; preds = %cond_false, %cond_true
- %tmp7 = load i32* %q_addr ; <i32> [#uses=1]
- %tmp8 = icmp ne i32 %tmp7, 0 ; <i1> [#uses=1]
- %tmp89 = zext i1 %tmp8 to i8 ; <i8> [#uses=1]
- %toBool10 = icmp ne i8 %tmp89, 0 ; <i1> [#uses=1]
- br i1 %toBool10, label %cond_true11, label %cond_false15
-
-cond_true11: ; preds = %cond_next
- %tmp13 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp14 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
- br label %cond_next18
-
-cond_false15: ; preds = %cond_next
- %tmp16 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp17 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
- br label %cond_next18
-
-cond_next18: ; preds = %cond_false15, %cond_true11
- %tmp19 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %cond_next18
- %retval20 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval20
-}
-
-declare i32 @bar(...)
-
-declare i32 @baz(...)
-
-declare i32 @foo(...)
-
-declare i32 @quux(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll
deleted file mode 100644
index 2758505..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll
+++ /dev/null
@@ -1,69 +0,0 @@
-; RUN: llc < %s -march=arm -enable-tail-merge | grep bl.*baz | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge | grep bl.*quux | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge -enable-eh | grep bl.*baz | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge -enable-eh | grep bl.*quux | count 1
-; Check that calls to baz and quux are tail-merged.
-; PR1628
-
-; ModuleID = 'tail.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-define i32 @f(i32 %i, i32 %q) {
-entry:
- %i_addr = alloca i32 ; <i32*> [#uses=2]
- %q_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %i, i32* %i_addr
- store i32 %q, i32* %q_addr
- %tmp = load i32* %i_addr ; <i32> [#uses=1]
- %tmp1 = icmp ne i32 %tmp, 0 ; <i1> [#uses=1]
- %tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
- %toBool = icmp ne i8 %tmp12, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- %tmp3 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp4 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp7 = load i32* %q_addr ; <i32> [#uses=1]
- %tmp8 = icmp ne i32 %tmp7, 0 ; <i1> [#uses=1]
- %tmp89 = zext i1 %tmp8 to i8 ; <i8> [#uses=1]
- %toBool10 = icmp ne i8 %tmp89, 0 ; <i1> [#uses=1]
- br i1 %toBool10, label %cond_true11, label %cond_false15
-
-cond_false: ; preds = %entry
- %tmp5 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp6 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp27 = load i32* %q_addr ; <i32> [#uses=1]
- %tmp28 = icmp ne i32 %tmp27, 0 ; <i1> [#uses=1]
- %tmp289 = zext i1 %tmp28 to i8 ; <i8> [#uses=1]
- %toBool210 = icmp ne i8 %tmp289, 0 ; <i1> [#uses=1]
- br i1 %toBool210, label %cond_true11, label %cond_false15
-
-cond_true11: ; preds = %cond_next
- %tmp13 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp14 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
- br label %cond_next18
-
-cond_false15: ; preds = %cond_next
- %tmp16 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp17 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
- br label %cond_next18
-
-cond_next18: ; preds = %cond_false15, %cond_true11
- %tmp19 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %cond_next18
- %retval20 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval20
-}
-
-declare i32 @bar(...)
-
-declare i32 @baz(...)
-
-declare i32 @foo(...)
-
-declare i32 @quux(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-14-InlineAsmCstCrash.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-14-InlineAsmCstCrash.ll
deleted file mode 100644
index b3b0769..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-14-InlineAsmCstCrash.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6
-
-define i32 @test3() {
- tail call void asm sideeffect "/* number: ${0:c} */", "i"( i32 1 )
- ret i32 11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll
deleted file mode 100644
index 7b15ded..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi
-; PR1406
-
- %struct.AVClass = type { i8*, i8* (i8*)*, %struct.AVOption* }
- %struct.AVCodec = type { i8*, i32, i32, i32, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32, i8*)*, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32*, i8*, i32)*, i32, %struct.AVCodec*, void (%struct.AVCodecContext*)*, %struct.AVRational*, i32* }
- %struct.AVCodecContext = type { %struct.AVClass*, i32, i32, i32, i32, i32, i8*, i32, %struct.AVRational, i32, i32, i32, i32, i32, void (%struct.AVCodecContext*, %struct.AVFrame*, i32*, i32, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, float, float, i32, i32, i32, i32, float, i32, i32, i32, %struct.AVCodec*, i8*, i32, i32, void (%struct.AVCodecContext*, i8*, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, [32 x i8], i32, i32, i32, i32, i32, i32, i32, float, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, void (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i8*, i8*, float, float, i32, %struct.RcOverride*, i32, i8*, i32, i32, i32, float, float, float, float, i32, float, float, float, float, float, i32, i32, i32, i32*, i32, i32, i32, i32, %struct.AVRational, %struct.AVFrame*, i32, i32, [4 x i64], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32*)*, i32, i32, i32, i32, i32, i32, i8*, i32, i32, i32, i32, i32, i32, i16*, i16*, i32, i32, i32, i32, %struct.AVPaletteControl*, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32 (%struct.AVCodecContext*, i8*)*, i8**, i32*, i32)*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64 }
- %struct.AVFrame = type { [4 x i8*], [4 x i32], [4 x i8*], i32, i32, i64, i32, i32, i32, i32, i32, i8*, i32, i8*, [2 x [2 x i16]*], i32*, i8, i8*, [4 x i64], i32, i32, i32, i32, i32, %struct.AVPanScan*, i32, i32, i16*, [2 x i8*] }
- %struct.AVOption = type opaque
- %struct.AVPaletteControl = type { i32, [256 x i32] }
- %struct.AVPanScan = type { i32, i32, i32, [3 x [2 x i16]] }
- %struct.AVRational = type { i32, i32 }
- %struct.RcOverride = type { i32, i32, i32, float }
-
-define i32 @decode_init(%struct.AVCodecContext* %avctx) {
-entry:
- br i1 false, label %bb, label %cond_next789
-
-bb: ; preds = %bb, %entry
- br i1 false, label %bb59, label %bb
-
-bb59: ; preds = %bb
- %tmp68 = sdiv i64 0, 0 ; <i64> [#uses=1]
- %tmp6869 = trunc i64 %tmp68 to i32 ; <i32> [#uses=2]
- %tmp81 = call i32 asm "smull $0, $1, $2, $3 \0A\09mov $0, $0, lsr $4\0A\09add $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* null, i32 %tmp6869, i32 13316085, i32 23, i32 9 ) ; <i32> [#uses=0]
- %tmp90 = call i32 asm "smull $0, $1, $2, $3 \0A\09mov $0, $0, lsr $4\0A\09add $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* null, i32 %tmp6869, i32 10568984, i32 23, i32 9 ) ; <i32> [#uses=0]
- unreachable
-
-cond_next789: ; preds = %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
deleted file mode 100644
index 061bf5e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: llc < %s -march=arm | grep bl.*baz | count 1
-; RUN: llc < %s -march=arm | grep bl.*quux | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge=0 | grep bl.*baz | count 2
-; RUN: llc < %s -march=arm -enable-tail-merge=0 | grep bl.*quux | count 2
-; RUN: llc < %s -march=arm -enable-eh | grep bl.*baz | count 1
-; RUN: llc < %s -march=arm -enable-eh | grep bl.*quux | count 1
-; RUN: llc < %s -march=arm -enable-tail-merge=0 -enable-eh | grep bl.*baz | count 2
-; RUN: llc < %s -march=arm -enable-tail-merge=0 -enable-eh | grep bl.*quux | count 2
-; Check that tail merging is the default on ARM, and that -enable-tail-merge=0 works.
-; PR1628
-
-; ModuleID = 'tail.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-define i32 @f(i32 %i, i32 %q) {
-entry:
- %i_addr = alloca i32 ; <i32*> [#uses=2]
- %q_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %i, i32* %i_addr
- store i32 %q, i32* %q_addr
- %tmp = load i32* %i_addr ; <i32> [#uses=1]
- %tmp1 = icmp ne i32 %tmp, 0 ; <i1> [#uses=1]
- %tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
- %toBool = icmp ne i8 %tmp12, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- %tmp3 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp4 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp7 = load i32* %q_addr ; <i32> [#uses=1]
- %tmp8 = icmp ne i32 %tmp7, 0 ; <i1> [#uses=1]
- %tmp89 = zext i1 %tmp8 to i8 ; <i8> [#uses=1]
- %toBool10 = icmp ne i8 %tmp89, 0 ; <i1> [#uses=1]
- br i1 %toBool10, label %cond_true11, label %cond_false15
-
-cond_false: ; preds = %entry
- %tmp5 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp6 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp27 = load i32* %q_addr ; <i32> [#uses=1]
- %tmp28 = icmp ne i32 %tmp27, 0 ; <i1> [#uses=1]
- %tmp289 = zext i1 %tmp28 to i8 ; <i8> [#uses=1]
- %toBool210 = icmp ne i8 %tmp289, 0 ; <i1> [#uses=1]
- br i1 %toBool210, label %cond_true11, label %cond_false15
-
-cond_true11: ; preds = %cond_next
- %tmp13 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp14 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
- br label %cond_next18
-
-cond_false15: ; preds = %cond_next
- %tmp16 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp17 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
- br label %cond_next18
-
-cond_next18: ; preds = %cond_false15, %cond_true11
- %tmp19 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %cond_next18
- %retval20 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval20
-}
-
-declare i32 @bar(...)
-
-declare i32 @baz(...)
-
-declare i32 @foo(...)
-
-declare i32 @quux(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-23-BadPreIndexedStore.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-23-BadPreIndexedStore.ll
deleted file mode 100644
index d2eb85d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-23-BadPreIndexedStore.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=arm | not grep {str.*\\!}
-
- %struct.shape_edge_t = type { %struct.shape_edge_t*, %struct.shape_edge_t*, i32, i32, i32, i32 }
- %struct.shape_path_t = type { %struct.shape_edge_t*, %struct.shape_edge_t*, i32, i32, i32, i32, i32, i32 }
- %struct.shape_pool_t = type { i8* (%struct.shape_pool_t*, i8*, i32)*, i8* (%struct.shape_pool_t*, i32)*, void (%struct.shape_pool_t*, i8*)* }
-
-define %struct.shape_path_t* @shape_path_alloc(%struct.shape_pool_t* %pool, i32* %shape) {
-entry:
- br i1 false, label %cond_false, label %bb45
-
-bb45: ; preds = %entry
- ret %struct.shape_path_t* null
-
-cond_false: ; preds = %entry
- br i1 false, label %bb140, label %bb174
-
-bb140: ; preds = %bb140, %cond_false
- %indvar = phi i32 [ 0, %cond_false ], [ %indvar.next, %bb140 ] ; <i32> [#uses=2]
- %edge.230.0.rec = shl i32 %indvar, 1 ; <i32> [#uses=3]
- %edge.230.0 = getelementptr %struct.shape_edge_t* null, i32 %edge.230.0.rec ; <%struct.shape_edge_t*> [#uses=1]
- %edge.230.0.sum6970 = or i32 %edge.230.0.rec, 1 ; <i32> [#uses=2]
- %tmp154 = getelementptr %struct.shape_edge_t* null, i32 %edge.230.0.sum6970 ; <%struct.shape_edge_t*> [#uses=1]
- %tmp11.i5 = getelementptr %struct.shape_edge_t* null, i32 %edge.230.0.sum6970, i32 0 ; <%struct.shape_edge_t**> [#uses=1]
- store %struct.shape_edge_t* %edge.230.0, %struct.shape_edge_t** %tmp11.i5
- store %struct.shape_edge_t* %tmp154, %struct.shape_edge_t** null
- %tmp16254.0.rec = add i32 %edge.230.0.rec, 2 ; <i32> [#uses=1]
- %xp.350.sum = add i32 0, %tmp16254.0.rec ; <i32> [#uses=1]
- %tmp168 = icmp slt i32 %xp.350.sum, 0 ; <i1> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %tmp168, label %bb140, label %bb174
-
-bb174: ; preds = %bb140, %cond_false
- ret %struct.shape_path_t* null
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-31-RegScavengerInfiniteLoop.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-31-RegScavengerInfiniteLoop.ll
deleted file mode 100644
index 030486a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-05-31-RegScavengerInfiniteLoop.ll
+++ /dev/null
@@ -1,237 +0,0 @@
-; RUN: llc < %s
-; PR1424
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "arm-linux-gnueabi"
- %struct.AVClass = type { i8*, i8* (i8*)*, %struct.AVOption* }
- %struct.AVCodec = type { i8*, i32, i32, i32, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32, i8*)*, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32*, i8*, i32)*, i32, %struct.AVCodec*, void (%struct.AVCodecContext*)*, %struct.AVRational*, i32* }
- %struct.AVCodecContext = type { %struct.AVClass*, i32, i32, i32, i32, i32, i8*, i32, %struct.AVRational, i32, i32, i32, i32, i32, void (%struct.AVCodecContext*, %struct.AVFrame*, i32*, i32, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, float, float, i32, i32, i32, i32, float, i32, i32, i32, %struct.AVCodec*, i8*, i32, i32, void (%struct.AVCodecContext*, i8*, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, [32 x i8], i32, i32, i32, i32, i32, i32, i32, float, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, void (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i8*, i8*, float, float, i32, %struct.RcOverride*, i32, i8*, i32, i32, i32, float, float, float, float, i32, float, float, float, float, float, i32, i32, i32, i32*, i32, i32, i32, i32, %struct.AVRational, %struct.AVFrame*, i32, i32, [4 x i64], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32*)*, i32, i32, i32, i32, i32, i32, i8*, i32, i32, i32, i32, i32, i32, i16*, i16*, i32, i32, i32, i32, %struct.AVPaletteControl*, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32 (%struct.AVCodecContext*, i8*)*, i8**, i32*, i32)*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64 }
- %struct.AVEvalExpr = type opaque
- %struct.AVFrame = type { [4 x i8*], [4 x i32], [4 x i8*], i32, i32, i64, i32, i32, i32, i32, i32, i8*, i32, i8*, [2 x [2 x i16]*], i32*, i8, i8*, [4 x i64], i32, i32, i32, i32, i32, %struct.AVPanScan*, i32, i32, i16*, [2 x i8*] }
- %struct.AVOption = type opaque
- %struct.AVPaletteControl = type { i32, [256 x i32] }
- %struct.AVPanScan = type { i32, i32, i32, [3 x [2 x i16]] }
- %struct.AVRational = type { i32, i32 }
- %struct.BlockNode = type { i16, i16, i8, [3 x i8], i8, i8 }
- %struct.DSPContext = type { void (i16*, i8*, i32)*, void (i16*, i8*, i8*, i32)*, void (i16*, i8*, i32)*, void (i16*, i8*, i32)*, void (i16*, i8*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, i32 (i16*)*, void (i8*, i8*, i32, i32, i32, i32, i32)*, void (i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)*, void (i16*)*, i32 (i8*, i32)*, i32 (i8*, i32)*, [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], i32 (i8*, i16*, i32)*, [4 x [4 x void (i8*, i8*, i32, i32)*]], [4 x [4 x void (i8*, i8*, i32, i32)*]], [4 x [4 x void (i8*, i8*, i32, i32)*]], [4 x [4 x void (i8*, i8*, i32, i32)*]], [2 x void (i8*, i8*, i8*, i32, i32)*], [11 x void (i8*, i8*, i32, i32, i32)*], [11 x void (i8*, i8*, i32, i32, i32)*], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], [8 x void (i8*, i8*, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [10 x void (i8*, i32, i32, i32, i32)*], [10 x void (i8*, i8*, i32, i32, i32, i32, i32)*], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i16*, i32)*, [2 x [4 x i32 (i8*, i8*, i8*, i32, i32)*]], void (i8*, i8*, i32)*, void (i8*, i8*, i8*, i32)*, void (i8*, i8*, i8*, i32, i32*, i32*)*, void (i32*, i32*, i32)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)*, void ([4 x [4 x i16]]*, i8*, [40 x i8]*, [40 x [2 x i16]]*, i32, i32, i32, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32)*, void (float*, float*, i32)*, void (float*, float*, i32)*, void (float*, float*, float*, i32)*, void (float*, float*, float*, float*, i32, i32, i32)*, void (i16*, float*, i32)*, void (i16*)*, void (i16*)*, void (i16*)*, void (i8*, i32, i16*)*, void (i8*, i32, i16*)*, [64 x i8], i32, i32 (i16*, i16*, i16*, i32)*, void (i16*, i16*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void ([4 x i16]*)*, void (i32*, i32*, i32*, i32*, i32*, i32*, i32)*, void (i32*, i32)*, void (i8*, i32, i8**, i32, i32, i32, i32, i32, %struct.slice_buffer*, i32, i8*)*, void (i8*, i32, i32)*, [4 x void (i8*, i32, i8*, i32, i32, i32)*], void (i16*)*, void (i16*, i32)*, void (i16*, i32)*, void (i16*, i32)*, void (i8*, i32)*, void (i8*, i32)*, [16 x void (i8*, i8*, i32, i32)*] }
- %struct.FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct.FILE*, i32, i32, i32, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i32, i32, [40 x i8] }
- %struct.GetBitContext = type { i8*, i8*, i32*, i32, i32, i32, i32 }
- %struct.MJpegContext = type opaque
- %struct.MotionEstContext = type { %struct.AVCodecContext*, i32, [4 x [2 x i32]], [4 x [2 x i32]], i8*, i8*, [2 x i8*], i8*, i32, i32*, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [4 x [4 x i8*]], [4 x [4 x i8*]], i32, i32, i32, i32, i32, [4 x void (i8*, i8*, i32, i32)*]*, [4 x void (i8*, i8*, i32, i32)*]*, [16 x void (i8*, i8*, i32)*]*, [16 x void (i8*, i8*, i32)*]*, [4097 x i8]*, i8*, i32 (%struct.MpegEncContext*, i32*, i32*, i32, i32, i32, i32, i32)* }
- %struct.MpegEncContext = type { %struct.AVCodecContext*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.PutBitContext, i32, i32, i32, i32, i32, i32, i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.Picture*, %struct.Picture**, %struct.Picture**, i32, i32, [8 x %struct.MpegEncContext*], %struct.Picture, %struct.Picture, %struct.Picture, %struct.Picture, %struct.Picture*, %struct.Picture*, %struct.Picture*, [3 x i8*], [3 x i32], i16*, [3 x i16*], [20 x i16], i32, i32, i8*, i8*, i8*, i8*, i8*, [16 x i16]*, [3 x [16 x i16]*], i32, i8*, i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32*, i32, i32, i32, i32, i32, i32, i32, [5 x i32], i32, i32, i32, i32, %struct.DSPContext, i32, i32, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x [2 x [2 x i16]*]], [2 x [2 x [2 x [2 x i16]*]]], [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x [2 x [2 x i16]*]], [2 x [2 x [2 x [2 x i16]*]]], [2 x i8*], [2 x [2 x i8*]], i32, i32, i32, [2 x [4 x [2 x i32]]], [2 x [2 x i32]], [2 x [2 x [2 x i32]]], i8*, [2 x [64 x i16]], %struct.MotionEstContext, i32, i32, i32, i32, i32, i32, i16*, [6 x i32], [6 x i32], [3 x i8*], i32*, [64 x i16], [64 x i16], [64 x i16], [64 x i16], i32, i32, i32, i32, i32, i8*, i8*, i8*, i8*, i8*, i8*, [8 x i32], [64 x i32]*, [64 x i32]*, [2 x [64 x i16]]*, [2 x [64 x i16]]*, [12 x i32], %struct.ScanTable, %struct.ScanTable, %struct.ScanTable, %struct.ScanTable, [64 x i32]*, [2 x i32], [64 x i16]*, i8*, i64, i64, i32, i32, %struct.RateControlContext, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i32, i32, %struct.GetBitContext, i32, i32, i32, %struct.ParseContext, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i16, i16, i16, i16, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x [2 x i32]], [2 x [2 x i32]], [2 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.PutBitContext, %struct.PutBitContext, i32, i32, i32, i32, i32, i32, i8*, i32, i32, i32, i32, i32, [3 x i32], %struct.MJpegContext*, [3 x i32], [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x [65 x [65 x [2 x i32]]]]*, i32, i32, %struct.GetBitContext, i32, i32, i32, i8*, i32, [2 x [2 x i32]], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], i32, i32, i32, i32, i8*, i32, [12 x i16*], [64 x i16]*, [8 x [64 x i16]]*, i32 (%struct.MpegEncContext*, [64 x i16]*)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, i32 (%struct.MpegEncContext*, i16*, i32, i32, i32*)*, i32 (%struct.MpegEncContext*, i16*, i32, i32, i32*)*, void (%struct.MpegEncContext*, i16*)* }
- %struct.ParseContext = type { i8*, i32, i32, i32, i32, i32, i32, i32 }
- %struct.Picture = type { [4 x i8*], [4 x i32], [4 x i8*], i32, i32, i64, i32, i32, i32, i32, i32, i8*, i32, i8*, [2 x [2 x i16]*], i32*, i8, i8*, [4 x i64], i32, i32, i32, i32, i32, %struct.AVPanScan*, i32, i32, i16*, [2 x i8*], [3 x i8*], [2 x [2 x i16]*], i32*, [2 x i32], i32, i32, i32, i32, [2 x [16 x i32]], [2 x i32], i32, i32, i16*, i16*, i8*, i32*, i32 }
- %struct.Plane = type { i32, i32, [8 x [4 x %struct.SubBand]] }
- %struct.Predictor = type { double, double, double }
- %struct.PutBitContext = type { i32, i32, i8*, i8*, i8* }
- %struct.RangeCoder = type { i32, i32, i32, i32, [256 x i8], [256 x i8], i8*, i8*, i8* }
- %struct.RateControlContext = type { %struct.FILE*, i32, %struct.RateControlEntry*, double, [5 x %struct.Predictor], double, double, double, double, double, [5 x double], i32, i32, [5 x i64], [5 x i64], [5 x i64], [5 x i64], [5 x i32], i32, i8*, float, i32, %struct.AVEvalExpr* }
- %struct.RateControlEntry = type { i32, float, i32, i32, i32, i32, i32, i64, i32, float, i32, i32, i32, i32, i32, i32 }
- %struct.RcOverride = type { i32, i32, i32, float }
- %struct.ScanTable = type { i8*, [64 x i8], [64 x i8] }
- %struct.SnowContext = type { %struct.AVCodecContext*, %struct.RangeCoder, %struct.DSPContext, %struct.AVFrame, %struct.AVFrame, %struct.AVFrame, [8 x %struct.AVFrame], %struct.AVFrame, [32 x i8], [4224 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [8 x [2 x i16]*], [8 x i32*], i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [4 x %struct.Plane], %struct.BlockNode*, [1024 x i32], i32, %struct.slice_buffer, %struct.MpegEncContext }
- %struct.SubBand = type { i32, i32, i32, i32, i32, i32*, i32, i32, i32, %struct.x_and_coeff*, %struct.SubBand*, [519 x [32 x i8]] }
- %struct._IO_marker = type { %struct._IO_marker*, %struct.FILE*, i32 }
- %struct.slice_buffer = type { i32**, i32**, i32, i32, i32, i32, i32* }
- %struct.x_and_coeff = type { i16, i16 }
-
-define fastcc void @iterative_me(%struct.SnowContext* %s) {
-entry:
- %state = alloca [4224 x i8], align 8 ; <[4224 x i8]*> [#uses=0]
- %best_rd4233 = alloca i32, align 4 ; <i32*> [#uses=0]
- %tmp21 = getelementptr %struct.SnowContext* %s, i32 0, i32 36 ; <i32*> [#uses=2]
- br label %bb4198
-
-bb79: ; preds = %bb4189.preheader
- br i1 false, label %cond_next239, label %cond_true
-
-cond_true: ; preds = %bb79
- ret void
-
-cond_next239: ; preds = %bb79
- %tmp286 = alloca i8, i32 0 ; <i8*> [#uses=0]
- ret void
-
-bb4198: ; preds = %bb4189.preheader, %entry
- br i1 false, label %bb4189.preheader, label %bb4204
-
-bb4189.preheader: ; preds = %bb4198
- br i1 false, label %bb79, label %bb4198
-
-bb4204: ; preds = %bb4198
- br i1 false, label %bb4221, label %cond_next4213
-
-cond_next4213: ; preds = %bb4204
- ret void
-
-bb4221: ; preds = %bb4204
- br i1 false, label %bb5242.preheader, label %UnifiedReturnBlock
-
-bb5242.preheader: ; preds = %bb4221
- br label %bb5242
-
-bb4231: ; preds = %bb5233
- %tmp4254.sum = add i32 0, 1 ; <i32> [#uses=2]
- br i1 false, label %bb4559, label %cond_next4622
-
-bb4559: ; preds = %bb4231
- ret void
-
-cond_next4622: ; preds = %bb4231
- %tmp4637 = load i16* null ; <i16> [#uses=1]
- %tmp46374638 = sext i16 %tmp4637 to i32 ; <i32> [#uses=1]
- %tmp4642 = load i16* null ; <i16> [#uses=1]
- %tmp46424643 = sext i16 %tmp4642 to i32 ; <i32> [#uses=1]
- %tmp4648 = load i16* null ; <i16> [#uses=1]
- %tmp46484649 = sext i16 %tmp4648 to i32 ; <i32> [#uses=1]
- %tmp4653 = getelementptr %struct.BlockNode* null, i32 %tmp4254.sum, i32 0 ; <i16*> [#uses=1]
- %tmp4654 = load i16* %tmp4653 ; <i16> [#uses=1]
- %tmp46544655 = sext i16 %tmp4654 to i32 ; <i32> [#uses=1]
- %tmp4644 = add i32 %tmp46374638, 2 ; <i32> [#uses=1]
- %tmp4650 = add i32 %tmp4644, %tmp46424643 ; <i32> [#uses=1]
- %tmp4656 = add i32 %tmp4650, %tmp46484649 ; <i32> [#uses=1]
- %tmp4657 = add i32 %tmp4656, %tmp46544655 ; <i32> [#uses=2]
- %tmp4658 = ashr i32 %tmp4657, 2 ; <i32> [#uses=1]
- %tmp4662 = load i16* null ; <i16> [#uses=1]
- %tmp46624663 = sext i16 %tmp4662 to i32 ; <i32> [#uses=1]
- %tmp4672 = getelementptr %struct.BlockNode* null, i32 0, i32 1 ; <i16*> [#uses=1]
- %tmp4673 = load i16* %tmp4672 ; <i16> [#uses=1]
- %tmp46734674 = sext i16 %tmp4673 to i32 ; <i32> [#uses=1]
- %tmp4678 = getelementptr %struct.BlockNode* null, i32 %tmp4254.sum, i32 1 ; <i16*> [#uses=1]
- %tmp4679 = load i16* %tmp4678 ; <i16> [#uses=1]
- %tmp46794680 = sext i16 %tmp4679 to i32 ; <i32> [#uses=1]
- %tmp4669 = add i32 %tmp46624663, 2 ; <i32> [#uses=1]
- %tmp4675 = add i32 %tmp4669, 0 ; <i32> [#uses=1]
- %tmp4681 = add i32 %tmp4675, %tmp46734674 ; <i32> [#uses=1]
- %tmp4682 = add i32 %tmp4681, %tmp46794680 ; <i32> [#uses=2]
- %tmp4683 = ashr i32 %tmp4682, 2 ; <i32> [#uses=1]
- %tmp4703 = load i32* %tmp21 ; <i32> [#uses=1]
- %tmp4707 = shl i32 %tmp4703, 0 ; <i32> [#uses=4]
- %tmp4710 = load %struct.BlockNode** null ; <%struct.BlockNode*> [#uses=6]
- %tmp4713 = mul i32 %tmp4707, %mb_y.4 ; <i32> [#uses=1]
- %tmp4715 = add i32 %tmp4713, %mb_x.7 ; <i32> [#uses=7]
- store i8 0, i8* null
- store i8 0, i8* null
- %tmp47594761 = bitcast %struct.BlockNode* null to i8* ; <i8*> [#uses=2]
- call void @llvm.memcpy.i32( i8* null, i8* %tmp47594761, i32 10, i32 0 )
- %tmp4716.sum5775 = add i32 %tmp4715, 1 ; <i32> [#uses=1]
- %tmp4764 = getelementptr %struct.BlockNode* %tmp4710, i32 %tmp4716.sum5775 ; <%struct.BlockNode*> [#uses=1]
- %tmp47644766 = bitcast %struct.BlockNode* %tmp4764 to i8* ; <i8*> [#uses=1]
- %tmp4716.sum5774 = add i32 %tmp4715, %tmp4707 ; <i32> [#uses=0]
- %tmp47704772 = bitcast %struct.BlockNode* null to i8* ; <i8*> [#uses=1]
- %tmp4774 = add i32 %tmp4707, 1 ; <i32> [#uses=1]
- %tmp4716.sum5773 = add i32 %tmp4774, %tmp4715 ; <i32> [#uses=1]
- %tmp4777 = getelementptr %struct.BlockNode* %tmp4710, i32 %tmp4716.sum5773 ; <%struct.BlockNode*> [#uses=1]
- %tmp47774779 = bitcast %struct.BlockNode* %tmp4777 to i8* ; <i8*> [#uses=1]
- %tmp4781 = icmp slt i32 %mb_x.7, 0 ; <i1> [#uses=1]
- %tmp4788 = or i1 %tmp4781, %tmp4784 ; <i1> [#uses=2]
- br i1 %tmp4788, label %cond_true4791, label %cond_next4794
-
-cond_true4791: ; preds = %cond_next4622
- unreachable
-
-cond_next4794: ; preds = %cond_next4622
- %tmp4797 = icmp slt i32 %mb_x.7, %tmp4707 ; <i1> [#uses=1]
- br i1 %tmp4797, label %cond_next4803, label %cond_true4800
-
-cond_true4800: ; preds = %cond_next4794
- unreachable
-
-cond_next4803: ; preds = %cond_next4794
- %tmp4825 = ashr i32 %tmp4657, 12 ; <i32> [#uses=1]
- shl i32 %tmp4682, 4 ; <i32>:0 [#uses=1]
- %tmp4828 = and i32 %0, -64 ; <i32> [#uses=1]
- %tmp4831 = getelementptr %struct.BlockNode* %tmp4710, i32 %tmp4715, i32 2 ; <i8*> [#uses=0]
- %tmp4826 = add i32 %tmp4828, %tmp4825 ; <i32> [#uses=1]
- %tmp4829 = add i32 %tmp4826, 0 ; <i32> [#uses=1]
- %tmp4835 = add i32 %tmp4829, 0 ; <i32> [#uses=1]
- store i32 %tmp4835, i32* null
- %tmp48534854 = trunc i32 %tmp4658 to i16 ; <i16> [#uses=1]
- %tmp4856 = getelementptr %struct.BlockNode* %tmp4710, i32 %tmp4715, i32 0 ; <i16*> [#uses=1]
- store i16 %tmp48534854, i16* %tmp4856
- %tmp48574858 = trunc i32 %tmp4683 to i16 ; <i16> [#uses=1]
- %tmp4860 = getelementptr %struct.BlockNode* %tmp4710, i32 %tmp4715, i32 1 ; <i16*> [#uses=1]
- store i16 %tmp48574858, i16* %tmp4860
- %tmp4866 = getelementptr %struct.BlockNode* %tmp4710, i32 %tmp4715, i32 4 ; <i8*> [#uses=0]
- br i1 false, label %bb4933, label %cond_false4906
-
-cond_false4906: ; preds = %cond_next4803
- call void @llvm.memcpy.i32( i8* %tmp47594761, i8* null, i32 10, i32 0 )
- call void @llvm.memcpy.i32( i8* %tmp47644766, i8* null, i32 10, i32 0 )
- call void @llvm.memcpy.i32( i8* %tmp47704772, i8* null, i32 10, i32 0 )
- call void @llvm.memcpy.i32( i8* %tmp47774779, i8* null, i32 10, i32 0 )
- br label %bb5215
-
-bb4933: ; preds = %bb5215, %cond_next4803
- br i1 false, label %cond_true4944, label %bb5215
-
-cond_true4944: ; preds = %bb4933
- %tmp4982 = load i32* %tmp21 ; <i32> [#uses=1]
- %tmp4986 = shl i32 %tmp4982, 0 ; <i32> [#uses=2]
- %tmp4992 = mul i32 %tmp4986, %mb_y.4 ; <i32> [#uses=1]
- %tmp4994 = add i32 %tmp4992, %mb_x.7 ; <i32> [#uses=5]
- %tmp4995.sum5765 = add i32 %tmp4994, 1 ; <i32> [#uses=1]
- %tmp5043 = getelementptr %struct.BlockNode* null, i32 %tmp4995.sum5765 ; <%struct.BlockNode*> [#uses=1]
- %tmp50435045 = bitcast %struct.BlockNode* %tmp5043 to i8* ; <i8*> [#uses=2]
- call void @llvm.memcpy.i32( i8* null, i8* %tmp50435045, i32 10, i32 0 )
- %tmp4995.sum5764 = add i32 %tmp4994, %tmp4986 ; <i32> [#uses=1]
- %tmp5049 = getelementptr %struct.BlockNode* null, i32 %tmp4995.sum5764 ; <%struct.BlockNode*> [#uses=1]
- %tmp50495051 = bitcast %struct.BlockNode* %tmp5049 to i8* ; <i8*> [#uses=2]
- call void @llvm.memcpy.i32( i8* null, i8* %tmp50495051, i32 10, i32 0 )
- %tmp4995.sum5763 = add i32 0, %tmp4994 ; <i32> [#uses=1]
- %tmp5056 = getelementptr %struct.BlockNode* null, i32 %tmp4995.sum5763 ; <%struct.BlockNode*> [#uses=1]
- %tmp50565058 = bitcast %struct.BlockNode* %tmp5056 to i8* ; <i8*> [#uses=1]
- br i1 %tmp4788, label %cond_true5070, label %cond_next5073
-
-cond_true5070: ; preds = %cond_true4944
- unreachable
-
-cond_next5073: ; preds = %cond_true4944
- %tmp5139 = getelementptr %struct.BlockNode* null, i32 %tmp4994, i32 1 ; <i16*> [#uses=0]
- %tmp5145 = getelementptr %struct.BlockNode* null, i32 %tmp4994, i32 4 ; <i8*> [#uses=0]
- call void @llvm.memcpy.i32( i8* %tmp50435045, i8* null, i32 10, i32 0 )
- call void @llvm.memcpy.i32( i8* %tmp50495051, i8* null, i32 10, i32 0 )
- call void @llvm.memcpy.i32( i8* %tmp50565058, i8* null, i32 10, i32 0 )
- br label %bb5215
-
-bb5215: ; preds = %cond_next5073, %bb4933, %cond_false4906
- %i4232.3 = phi i32 [ 0, %cond_false4906 ], [ 0, %cond_next5073 ], [ 0, %bb4933 ] ; <i32> [#uses=1]
- %tmp5217 = icmp slt i32 %i4232.3, 4 ; <i1> [#uses=1]
- br i1 %tmp5217, label %bb4933, label %bb5220
-
-bb5220: ; preds = %bb5215
- br i1 false, label %bb5230, label %cond_true5226
-
-cond_true5226: ; preds = %bb5220
- ret void
-
-bb5230: ; preds = %bb5220
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br label %bb5233
-
-bb5233: ; preds = %bb5233.preheader, %bb5230
- %indvar = phi i32 [ 0, %bb5233.preheader ], [ %indvar.next, %bb5230 ] ; <i32> [#uses=2]
- %mb_x.7 = shl i32 %indvar, 1 ; <i32> [#uses=4]
- br i1 false, label %bb4231, label %bb5239
-
-bb5239: ; preds = %bb5233
- %indvar.next37882 = add i32 %indvar37881, 1 ; <i32> [#uses=1]
- br label %bb5242
-
-bb5242: ; preds = %bb5239, %bb5242.preheader
- %indvar37881 = phi i32 [ 0, %bb5242.preheader ], [ %indvar.next37882, %bb5239 ] ; <i32> [#uses=2]
- %mb_y.4 = shl i32 %indvar37881, 1 ; <i32> [#uses=3]
- br i1 false, label %bb5233.preheader, label %bb5248
-
-bb5233.preheader: ; preds = %bb5242
- %tmp4784 = icmp slt i32 %mb_y.4, 0 ; <i1> [#uses=1]
- br label %bb5233
-
-bb5248: ; preds = %bb5242
- ret void
-
-UnifiedReturnBlock: ; preds = %bb4221
- ret void
-}
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2007-08-15-ReuseBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2007-08-15-ReuseBug.ll
deleted file mode 100644
index 30b72e0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2007-08-15-ReuseBug.ll
+++ /dev/null
@@ -1,106 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -mattr=+v6
-; PR1609
-
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- at _C_nextcmd = external global i32 ; <i32*> [#uses=2]
- at _C_cmds = external global [100 x i8*] ; <[100 x i8*]*> [#uses=2]
- at .str44 = external constant [2 x i8] ; <[2 x i8]*> [#uses=1]
-
-define i32 @main(i32 %argc, i8** %argv) {
-entry:
- br label %cond_next212.i
-
-bb21.i: ; preds = %cond_next212.i
- br label %cond_next212.i
-
-bb24.i: ; preds = %cond_next212.i
- ret i32 0
-
-bb27.i: ; preds = %cond_next212.i
- ret i32 0
-
-bb30.i: ; preds = %cond_next212.i
- %tmp205399.i = add i32 %argc_addr.2358.0.i, -1 ; <i32> [#uses=1]
- br label %cond_next212.i
-
-bb33.i: ; preds = %cond_next212.i
- ret i32 0
-
-cond_next73.i: ; preds = %cond_next212.i
- ret i32 0
-
-bb75.i: ; preds = %cond_next212.i
- ret i32 0
-
-bb77.i: ; preds = %cond_next212.i
- ret i32 0
-
-bb79.i: ; preds = %cond_next212.i
- ret i32 0
-
-bb102.i: ; preds = %cond_next212.i
- br i1 false, label %cond_true110.i, label %cond_next123.i
-
-cond_true110.i: ; preds = %bb102.i
- %tmp116.i = getelementptr i8** %argv_addr.2321.0.i, i32 2 ; <i8**> [#uses=1]
- %tmp117.i = load i8** %tmp116.i ; <i8*> [#uses=1]
- %tmp126425.i = call %struct.FILE* @fopen( i8* %tmp117.i, i8* getelementptr ([2 x i8]* @.str44, i32 0, i32 0) ) ; <%struct.FILE*> [#uses=0]
- ret i32 0
-
-cond_next123.i: ; preds = %bb102.i
- %tmp122.i = getelementptr i8* %tmp215.i, i32 2 ; <i8*> [#uses=0]
- ret i32 0
-
-bb162.i: ; preds = %cond_next212.i
- ret i32 0
-
-C_addcmd.exit120.i: ; preds = %cond_next212.i
- %tmp3.i.i.i.i105.i = call i8* @calloc( i32 15, i32 1 ) ; <i8*> [#uses=1]
- %tmp1.i108.i = getelementptr [100 x i8*]* @_C_cmds, i32 0, i32 0 ; <i8**> [#uses=1]
- store i8* %tmp3.i.i.i.i105.i, i8** %tmp1.i108.i, align 4
- %tmp.i91.i = load i32* @_C_nextcmd, align 4 ; <i32> [#uses=1]
- store i32 0, i32* @_C_nextcmd, align 4
- %tmp3.i.i.i.i95.i = call i8* @calloc( i32 15, i32 1 ) ; <i8*> [#uses=1]
- %tmp1.i98.i = getelementptr [100 x i8*]* @_C_cmds, i32 0, i32 %tmp.i91.i ; <i8**> [#uses=1]
- store i8* %tmp3.i.i.i.i95.i, i8** %tmp1.i98.i, align 4
- br label %cond_next212.i
-
-bb174.i: ; preds = %cond_next212.i
- ret i32 0
-
-bb192.i: ; preds = %cond_next212.i
- br label %cond_next212.i
-
-cond_next212.i: ; preds = %cond_next212.i, %cond_next212.i, %cond_next212.i, %cond_next212.i, %bb192.i, %C_addcmd.exit120.i, %bb30.i, %bb21.i, %entry
- %max_d.3 = phi i32 [ -1, %entry ], [ %max_d.3, %bb30.i ], [ %max_d.3, %bb21.i ], [ %max_d.3, %C_addcmd.exit120.i ], [ 0, %bb192.i ], [ %max_d.3, %cond_next212.i ], [ %max_d.3, %cond_next212.i ], [ %max_d.3, %cond_next212.i ], [ %max_d.3, %cond_next212.i ] ; <i32> [#uses=7]
- %argv_addr.2321.0.i = phi i8** [ %argv, %entry ], [ %tmp214.i, %bb192.i ], [ %tmp214.i, %C_addcmd.exit120.i ], [ %tmp214.i, %bb30.i ], [ %tmp214.i, %bb21.i ], [ %tmp214.i, %cond_next212.i ], [ %tmp214.i, %cond_next212.i ], [ %tmp214.i, %cond_next212.i ], [ %tmp214.i, %cond_next212.i ] ; <i8**> [#uses=2]
- %argc_addr.2358.0.i = phi i32 [ %argc, %entry ], [ %tmp205399.i, %bb30.i ], [ 0, %bb21.i ], [ 0, %C_addcmd.exit120.i ], [ 0, %bb192.i ], [ 0, %cond_next212.i ], [ 0, %cond_next212.i ], [ 0, %cond_next212.i ], [ 0, %cond_next212.i ] ; <i32> [#uses=1]
- %tmp214.i = getelementptr i8** %argv_addr.2321.0.i, i32 1 ; <i8**> [#uses=9]
- %tmp215.i = load i8** %tmp214.i ; <i8*> [#uses=1]
- %tmp1314.i = sext i8 0 to i32 ; <i32> [#uses=1]
- switch i32 %tmp1314.i, label %bb192.i [
- i32 76, label %C_addcmd.exit120.i
- i32 77, label %bb174.i
- i32 83, label %bb162.i
- i32 97, label %bb33.i
- i32 98, label %bb21.i
- i32 99, label %bb24.i
- i32 100, label %bb27.i
- i32 101, label %cond_next212.i
- i32 102, label %bb102.i
- i32 105, label %bb75.i
- i32 109, label %bb30.i
- i32 113, label %cond_next212.i
- i32 114, label %cond_next73.i
- i32 115, label %bb79.i
- i32 116, label %cond_next212.i
- i32 118, label %bb77.i
- i32 119, label %cond_next212.i
- ]
-}
-
-declare %struct.FILE* @fopen(i8*, i8*)
-
-declare i8* @calloc(i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll
deleted file mode 100644
index ff01506..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi -regalloc=local
-; PR1925
-
- %struct.encode_aux_nearestmatch = type { i32*, i32*, i32*, i32*, i32, i32 }
- %struct.encode_aux_pigeonhole = type { float, float, i32, i32, i32*, i32, i32*, i32*, i32* }
- %struct.encode_aux_threshmatch = type { float*, i32*, i32, i32 }
- %struct.oggpack_buffer = type { i32, i32, i8*, i8*, i32 }
- %struct.static_codebook = type { i32, i32, i32*, i32, i32, i32, i32, i32, i32*, %struct.encode_aux_nearestmatch*, %struct.encode_aux_threshmatch*, %struct.encode_aux_pigeonhole*, i32 }
-
-define i32 @vorbis_staticbook_pack(%struct.static_codebook* %c, %struct.oggpack_buffer* %opb) {
-entry:
- %opb_addr = alloca %struct.oggpack_buffer* ; <%struct.oggpack_buffer**> [#uses=1]
- %tmp1 = load %struct.oggpack_buffer** %opb_addr, align 4 ; <%struct.oggpack_buffer*> [#uses=1]
- call void @oggpack_write( %struct.oggpack_buffer* %tmp1, i32 5653314, i32 24 ) nounwind
- call void @oggpack_write( %struct.oggpack_buffer* null, i32 0, i32 24 ) nounwind
- unreachable
-}
-
-declare void @oggpack_write(%struct.oggpack_buffer*, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-02-29-RegAllocLocal.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-02-29-RegAllocLocal.ll
deleted file mode 100644
index 06bc987..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-02-29-RegAllocLocal.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -regalloc=local
-; PR1925
-
- %"struct.kc::impl_Ccode_option" = type { %"struct.kc::impl_abstract_phylum" }
- %"struct.kc::impl_ID" = type { %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_Ccode_option"*, %"struct.kc::impl_casestring__Str"*, i32, %"struct.kc::impl_casestring__Str"* }
- %"struct.kc::impl_abstract_phylum" = type { i32 (...)** }
- %"struct.kc::impl_casestring__Str" = type { %"struct.kc::impl_abstract_phylum", i8* }
-
-define %"struct.kc::impl_ID"* @_ZN2kc18f_typeofunpsubtermEPNS_15impl_unpsubtermEPNS_7impl_IDE(%"struct.kc::impl_Ccode_option"* %a_unpsubterm, %"struct.kc::impl_ID"* %a_operator) {
-entry:
- %tmp8 = getelementptr %"struct.kc::impl_Ccode_option"* %a_unpsubterm, i32 0, i32 0, i32 0 ; <i32 (...)***> [#uses=0]
- br i1 false, label %bb41, label %bb55
-
-bb41: ; preds = %entry
- ret %"struct.kc::impl_ID"* null
-
-bb55: ; preds = %entry
- %tmp67 = tail call i32 null( %"struct.kc::impl_abstract_phylum"* null ) ; <i32> [#uses=0]
- %tmp97 = tail call i32 null( %"struct.kc::impl_abstract_phylum"* null ) ; <i32> [#uses=0]
- ret %"struct.kc::impl_ID"* null
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll
deleted file mode 100644
index a604c5c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | not grep 255
-
-define i32 @main(i32 %argc, i8** %argv) {
-entry:
- br label %bb1
-bb1: ; preds = %entry
- %tmp3.i.i = load i8* null, align 1 ; <i8> [#uses=1]
- %tmp4.i.i = icmp slt i8 %tmp3.i.i, 0 ; <i1> [#uses=1]
- br i1 %tmp4.i.i, label %bb2, label %bb3
-bb2: ; preds = %bb1
- ret i32 1
-bb3: ; preds = %bb1
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll
deleted file mode 100644
index 78c6222..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6,+vfp2
-
- at accum = external global { double, double } ; <{ double, double }*> [#uses=1]
- at .str = external constant [4 x i8] ; <[4 x i8]*> [#uses=1]
-
-define i32 @main() {
-entry:
- br label %bb74.i
-bb74.i: ; preds = %bb88.i, %bb74.i, %entry
- br i1 false, label %bb88.i, label %bb74.i
-bb88.i: ; preds = %bb74.i
- br i1 false, label %mandel.exit, label %bb74.i
-mandel.exit: ; preds = %bb88.i
- %tmp2 = volatile load double* getelementptr ({ double, double }* @accum, i32 0, i32 0), align 8 ; <double> [#uses=1]
- %tmp23 = fptosi double %tmp2 to i32 ; <i32> [#uses=1]
- %tmp5 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @.str, i32 0, i32 0), i32 %tmp23 ) ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-04-04-ScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-04-04-ScavengerAssert.ll
deleted file mode 100644
index 234c7b6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-04-04-ScavengerAssert.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
- at numBinsY = external global i32 ; <i32*> [#uses=1]
-
-declare double @pow(double, double)
-
-define void @main(i32 %argc, i8** %argv) noreturn nounwind {
-entry:
- br i1 false, label %bb34.outer.i.i.i, label %cond_false674
-bb34.outer.i.i.i: ; preds = %entry
- br i1 false, label %bb2.i.i.i, label %bb47.i.i.i
-bb2.i.i.i: ; preds = %bb34.outer.i.i.i
- %tmp24.i.i.i = call double @pow( double 0.000000e+00, double 2.000000e+00 ) ; <double> [#uses=0]
- ret void
-bb47.i.i.i: ; preds = %bb34.outer.i.i.i
- br i1 false, label %bb220.i.i.i, label %bb62.preheader.i.i.i
-bb62.preheader.i.i.i: ; preds = %bb47.i.i.i
- ret void
-bb220.i.i.i: ; preds = %bb47.i.i.i
- br i1 false, label %bb248.i.i.i, label %cond_next232.i.i.i
-cond_next232.i.i.i: ; preds = %bb220.i.i.i
- ret void
-bb248.i.i.i: ; preds = %bb220.i.i.i
- br i1 false, label %bb300.i.i.i, label %cond_false256.i.i.i
-cond_false256.i.i.i: ; preds = %bb248.i.i.i
- ret void
-bb300.i.i.i: ; preds = %bb248.i.i.i
- store i32 undef, i32* @numBinsY, align 4
- ret void
-cond_false674: ; preds = %entry
- ret void
-}
-
- %struct.anon = type { %struct.rnode*, %struct.rnode* }
- %struct.ch_set = type { { i8, i8 }*, %struct.ch_set* }
- %struct.pat_list = type { i32, %struct.pat_list* }
- %struct.rnode = type { i16, { %struct.anon }, i16, %struct.pat_list*, %struct.pat_list* }
-
-define fastcc { i16, %struct.rnode* }* @get_token(i8** %s) nounwind {
-entry:
- br i1 false, label %bb42, label %bb78
-bb42: ; preds = %entry
- br label %cond_next119.i
-bb17.i: ; preds = %cond_next119.i
- br i1 false, label %cond_true53.i, label %cond_false99.i
-cond_true53.i: ; preds = %bb17.i
- ret { i16, %struct.rnode* }* null
-cond_false99.i: ; preds = %bb17.i
- %tmp106.i = malloc %struct.ch_set ; <%struct.ch_set*> [#uses=1]
- br i1 false, label %bb126.i, label %cond_next119.i
-cond_next119.i: ; preds = %cond_false99.i, %bb42
- %curr_ptr.0.reg2mem.0.i = phi %struct.ch_set* [ %tmp106.i, %cond_false99.i ], [ null, %bb42 ] ; <%struct.ch_set*> [#uses=2]
- %prev_ptr.0.reg2mem.0.i = phi %struct.ch_set* [ %curr_ptr.0.reg2mem.0.i, %cond_false99.i ], [ undef, %bb42 ] ; <%struct.ch_set*> [#uses=1]
- br i1 false, label %bb126.i, label %bb17.i
-bb126.i: ; preds = %cond_next119.i, %cond_false99.i
- %prev_ptr.0.reg2mem.1.i = phi %struct.ch_set* [ %prev_ptr.0.reg2mem.0.i, %cond_next119.i ], [ %curr_ptr.0.reg2mem.0.i, %cond_false99.i ] ; <%struct.ch_set*> [#uses=0]
- ret { i16, %struct.rnode* }* null
-bb78: ; preds = %entry
- ret { i16, %struct.rnode* }* null
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-04-10-ScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-04-10-ScavengerAssert.ll
deleted file mode 100644
index 77418be..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-04-10-ScavengerAssert.ll
+++ /dev/null
@@ -1,258 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
- %struct.CONTENTBOX = type { i32, i32, i32, i32, i32 }
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.LOCBOX = type { i32, i32, i32, i32 }
- %struct.SIDEBOX = type { i32, i32 }
- %struct.UNCOMBOX = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct.cellbox = type { i8*, i32, i32, i32, [9 x i32], i32, i32, i32, i32, i32, i32, i32, double, double, double, double, double, i32, i32, %struct.CONTENTBOX*, %struct.UNCOMBOX*, [8 x %struct.tilebox*], %struct.SIDEBOX* }
- %struct.termbox = type { %struct.termbox*, i32, i32, i32, i32, i32 }
- %struct.tilebox = type { %struct.tilebox*, double, double, double, double, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.termbox*, %struct.LOCBOX* }
- at .str127 = external constant [2 x i8] ; <[2 x i8]*> [#uses=1]
- at .str584 = external constant [5 x i8] ; <[5 x i8]*> [#uses=1]
- at .str8115 = external constant [9 x i8] ; <[9 x i8]*> [#uses=1]
-
-declare %struct.FILE* @fopen(i8*, i8*)
-
-declare i32 @strcmp(i8*, i8*)
-
-declare i32 @fscanf(%struct.FILE*, i8*, ...)
-
-define void @main(i32 %argc, i8** %argv) noreturn {
-entry:
- br i1 false, label %cond_next48, label %cond_false674
-cond_next48: ; preds = %entry
- %tmp61 = call %struct.FILE* @fopen( i8* null, i8* getelementptr ([2 x i8]* @.str127, i32 0, i32 0) ) ; <%struct.FILE*> [#uses=2]
- br i1 false, label %bb220.i.i.i, label %bb62.preheader.i.i.i
-bb62.preheader.i.i.i: ; preds = %cond_next48
- ret void
-bb220.i.i.i: ; preds = %cond_next48
- br i1 false, label %bb248.i.i.i, label %cond_next232.i.i.i
-cond_next232.i.i.i: ; preds = %bb220.i.i.i
- ret void
-bb248.i.i.i: ; preds = %bb220.i.i.i
- br i1 false, label %bb300.i.i.i, label %cond_false256.i.i.i
-cond_false256.i.i.i: ; preds = %bb248.i.i.i
- ret void
-bb300.i.i.i: ; preds = %bb248.i.i.i
- br label %bb.i.i347.i
-bb.i.i347.i: ; preds = %bb.i.i347.i, %bb300.i.i.i
- br i1 false, label %bb894.loopexit.i.i, label %bb.i.i347.i
-bb.i350.i: ; preds = %bb894.i.i
- br i1 false, label %bb24.i.i, label %cond_false373.i.i
-bb24.i.i: ; preds = %bb24.i.i, %bb.i350.i
- br i1 false, label %bb40.i.i, label %bb24.i.i
-bb40.i.i: ; preds = %bb24.i.i
- br i1 false, label %bb177.i393.i, label %bb82.i.i
-bb82.i.i: ; preds = %bb40.i.i
- ret void
-bb177.i393.i: ; preds = %bb40.i.i
- br i1 false, label %bb894.i.i, label %bb192.i.i
-bb192.i.i: ; preds = %bb177.i393.i
- ret void
-cond_false373.i.i: ; preds = %bb.i350.i
- %tmp376.i.i = call i32 @strcmp( i8* null, i8* getelementptr ([9 x i8]* @.str8115, i32 0, i32 0) ) ; <i32> [#uses=0]
- br i1 false, label %cond_true380.i.i, label %cond_next602.i.i
-cond_true380.i.i: ; preds = %cond_false373.i.i
- %tmp394.i418.i = add i32 %cell.0.i.i, 1 ; <i32> [#uses=1]
- %tmp397.i420.i = load %struct.cellbox** null, align 4 ; <%struct.cellbox*> [#uses=1]
- br label %bb398.i.i
-bb398.i.i: ; preds = %bb398.i.i, %cond_true380.i.i
- br i1 false, label %bb414.i.i, label %bb398.i.i
-bb414.i.i: ; preds = %bb398.i.i
- br i1 false, label %bb581.i.i, label %bb455.i442.i
-bb455.i442.i: ; preds = %bb414.i.i
- ret void
-bb581.i.i: ; preds = %bb581.i.i, %bb414.i.i
- br i1 false, label %bb894.i.i, label %bb581.i.i
-cond_next602.i.i: ; preds = %cond_false373.i.i
- br i1 false, label %bb609.i.i, label %bb661.i.i
-bb609.i.i: ; preds = %cond_next602.i.i
- br label %bb620.i.i
-bb620.i.i: ; preds = %bb620.i.i, %bb609.i.i
- %indvar166.i465.i = phi i32 [ %indvar.next167.i.i, %bb620.i.i ], [ 0, %bb609.i.i ] ; <i32> [#uses=1]
- %tmp640.i.i = call i32 (%struct.FILE*, i8*, ...)* @fscanf( %struct.FILE* %tmp61, i8* getelementptr ([5 x i8]* @.str584, i32 0, i32 0), [1024 x i8]* null ) ; <i32> [#uses=0]
- %tmp648.i.i = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp650.i468.i = icmp sgt i32 0, %tmp648.i.i ; <i1> [#uses=1]
- %tmp624.i469.i = call i32 (%struct.FILE*, i8*, ...)* @fscanf( %struct.FILE* %tmp61, i8* getelementptr ([5 x i8]* @.str584, i32 0, i32 0), [1024 x i8]* null ) ; <i32> [#uses=0]
- %indvar.next167.i.i = add i32 %indvar166.i465.i, 1 ; <i32> [#uses=1]
- br i1 %tmp650.i468.i, label %bb653.i.i.loopexit, label %bb620.i.i
-bb653.i.i.loopexit: ; preds = %bb620.i.i
- %tmp642.i466.i = add i32 0, 1 ; <i32> [#uses=1]
- br label %bb894.i.i
-bb661.i.i: ; preds = %cond_next602.i.i
- ret void
-bb894.loopexit.i.i: ; preds = %bb.i.i347.i
- br label %bb894.i.i
-bb894.i.i: ; preds = %bb894.loopexit.i.i, %bb653.i.i.loopexit, %bb581.i.i, %bb177.i393.i
- %pinctr.0.i.i = phi i32 [ 0, %bb894.loopexit.i.i ], [ %tmp642.i466.i, %bb653.i.i.loopexit ], [ %pinctr.0.i.i, %bb177.i393.i ], [ %pinctr.0.i.i, %bb581.i.i ] ; <i32> [#uses=2]
- %soft.0.i.i = phi i32 [ undef, %bb894.loopexit.i.i ], [ %soft.0.i.i, %bb653.i.i.loopexit ], [ 0, %bb177.i393.i ], [ 1, %bb581.i.i ] ; <i32> [#uses=1]
- %cell.0.i.i = phi i32 [ 0, %bb894.loopexit.i.i ], [ %cell.0.i.i, %bb653.i.i.loopexit ], [ 0, %bb177.i393.i ], [ %tmp394.i418.i, %bb581.i.i ] ; <i32> [#uses=2]
- %ptr.0.i.i = phi %struct.cellbox* [ undef, %bb894.loopexit.i.i ], [ %ptr.0.i.i, %bb653.i.i.loopexit ], [ null, %bb177.i393.i ], [ %tmp397.i420.i, %bb581.i.i ] ; <%struct.cellbox*> [#uses=1]
- br i1 false, label %bb.i350.i, label %bb902.i502.i
-bb902.i502.i: ; preds = %bb894.i.i
- ret void
-cond_false674: ; preds = %entry
- ret void
-}
-
- %struct.III_psy_xmin = type { [22 x double], [13 x [3 x double]] }
- %struct.III_scalefac_t = type { [22 x i32], [13 x [3 x i32]] }
- %struct.gr_info = type { i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32*, [4 x i32] }
- %struct.lame_global_flags = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, float, float, float, float, i32, i32, i32, i32, i32, i32, i32, i32 }
- at scalefac_band.1 = external global [14 x i32] ; <[14 x i32]*> [#uses=2]
-
-declare fastcc i32 @init_outer_loop(%struct.lame_global_flags*, double*, %struct.gr_info*)
-
-define fastcc void @outer_loop(%struct.lame_global_flags* %gfp, double* %xr, i32 %targ_bits, double* %best_noise, %struct.III_psy_xmin* %l3_xmin, i32* %l3_enc, %struct.III_scalefac_t* %scalefac, %struct.gr_info* %cod_info, i32 %ch) {
-entry:
- %cod_info.182 = getelementptr %struct.gr_info* %cod_info, i32 0, i32 1 ; <i32*> [#uses=1]
- br label %bb
-bb: ; preds = %bb226, %entry
- %save_cod_info.1.1 = phi i32 [ undef, %entry ], [ %save_cod_info.1.1, %bb226 ] ; <i32> [#uses=2]
- br i1 false, label %cond_next, label %cond_true
-cond_true: ; preds = %bb
- ret void
-cond_next: ; preds = %bb
- br i1 false, label %cond_next144, label %cond_false
-cond_false: ; preds = %cond_next
- ret void
-cond_next144: ; preds = %cond_next
- br i1 false, label %cond_next205, label %cond_true163
-cond_true163: ; preds = %cond_next144
- br i1 false, label %bb34.i, label %bb.i53
-bb.i53: ; preds = %cond_true163
- ret void
-bb34.i: ; preds = %cond_true163
- %tmp37.i55 = load i32* null, align 4 ; <i32> [#uses=1]
- br i1 false, label %bb65.preheader.i, label %bb78.i
-bb65.preheader.i: ; preds = %bb34.i
- br label %bb65.outer.us.i
-bb65.outer.us.i: ; preds = %bb65.outer.us.i, %bb65.preheader.i
- br i1 false, label %bb78.i, label %bb65.outer.us.i
-bb78.i: ; preds = %bb65.outer.us.i, %bb34.i
- br i1 false, label %bb151.i.preheader, label %bb90.i
-bb90.i: ; preds = %bb78.i
- ret void
-bb151.i.preheader: ; preds = %bb78.i
- br label %bb151.i
-bb151.i: ; preds = %bb226.backedge.i, %bb151.i.preheader
- %i.154.i = phi i32 [ %tmp15747.i, %bb226.backedge.i ], [ 0, %bb151.i.preheader ] ; <i32> [#uses=2]
- %tmp15747.i = add i32 %i.154.i, 1 ; <i32> [#uses=3]
- br i1 false, label %bb155.i, label %bb226.backedge.i
-bb226.backedge.i: ; preds = %cond_next215.i, %bb151.i
- %tmp228.i71 = icmp slt i32 %tmp15747.i, 3 ; <i1> [#uses=1]
- br i1 %tmp228.i71, label %bb151.i, label %amp_scalefac_bands.exit
-bb155.i: ; preds = %cond_next215.i, %bb151.i
- %indvar90.i = phi i32 [ %indvar.next91.i, %cond_next215.i ], [ 0, %bb151.i ] ; <i32> [#uses=2]
- %sfb.3.reg2mem.0.i = add i32 %indvar90.i, %tmp37.i55 ; <i32> [#uses=4]
- %tmp161.i = getelementptr [4 x [21 x double]]* null, i32 0, i32 %tmp15747.i, i32 %sfb.3.reg2mem.0.i ; <double*> [#uses=1]
- %tmp162.i74 = load double* %tmp161.i, align 4 ; <double> [#uses=0]
- br i1 false, label %cond_true167.i, label %cond_next215.i
-cond_true167.i: ; preds = %bb155.i
- %tmp173.i = getelementptr %struct.III_scalefac_t* null, i32 0, i32 1, i32 %sfb.3.reg2mem.0.i, i32 %i.154.i ; <i32*> [#uses=1]
- store i32 0, i32* %tmp173.i, align 4
- %tmp182.1.i = getelementptr [14 x i32]* @scalefac_band.1, i32 0, i32 %sfb.3.reg2mem.0.i ; <i32*> [#uses=0]
- %tmp185.i78 = add i32 %sfb.3.reg2mem.0.i, 1 ; <i32> [#uses=1]
- %tmp187.1.i = getelementptr [14 x i32]* @scalefac_band.1, i32 0, i32 %tmp185.i78 ; <i32*> [#uses=1]
- %tmp188.i = load i32* %tmp187.1.i, align 4 ; <i32> [#uses=1]
- %tmp21153.i = icmp slt i32 0, %tmp188.i ; <i1> [#uses=1]
- br i1 %tmp21153.i, label %bb190.preheader.i, label %cond_next215.i
-bb190.preheader.i: ; preds = %cond_true167.i
- ret void
-cond_next215.i: ; preds = %cond_true167.i, %bb155.i
- %indvar.next91.i = add i32 %indvar90.i, 1 ; <i32> [#uses=2]
- %exitcond99.i87 = icmp eq i32 %indvar.next91.i, 0 ; <i1> [#uses=1]
- br i1 %exitcond99.i87, label %bb226.backedge.i, label %bb155.i
-amp_scalefac_bands.exit: ; preds = %bb226.backedge.i
- br i1 false, label %bb19.i, label %bb.i16
-bb.i16: ; preds = %amp_scalefac_bands.exit
- ret void
-bb19.i: ; preds = %amp_scalefac_bands.exit
- br i1 false, label %bb40.outer.i, label %cond_next205
-bb40.outer.i: ; preds = %bb19.i
- ret void
-cond_next205: ; preds = %bb19.i, %cond_next144
- br i1 false, label %bb226, label %cond_true210
-cond_true210: ; preds = %cond_next205
- br i1 false, label %bb226, label %cond_true217
-cond_true217: ; preds = %cond_true210
- %tmp221 = call fastcc i32 @init_outer_loop( %struct.lame_global_flags* %gfp, double* %xr, %struct.gr_info* %cod_info ) ; <i32> [#uses=0]
- ret void
-bb226: ; preds = %cond_true210, %cond_next205
- br i1 false, label %bb231, label %bb
-bb231: ; preds = %bb226
- store i32 %save_cod_info.1.1, i32* %cod_info.182
- ret void
-}
-
- %struct.III_psy_xmin = type { [22 x double], [13 x [3 x double]] }
- %struct.III_scalefac_t = type { [22 x i32], [13 x [3 x i32]] }
- %struct.gr_info = type { i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32*, [4 x i32] }
- %struct.lame_global_flags = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, float, float, float, float, i32, i32, i32, i32, i32, i32, i32, i32 }
-
-define fastcc void @outer_loop2(%struct.lame_global_flags* %gfp, double* %xr, i32 %targ_bits, double* %best_noise, %struct.III_psy_xmin* %l3_xmin, i32* %l3_enc, %struct.III_scalefac_t* %scalefac, %struct.gr_info* %cod_info, i32 %ch) {
-entry:
- %cod_info.20128.1 = getelementptr %struct.gr_info* %cod_info, i32 0, i32 20, i32 1 ; <i32*> [#uses=1]
- %cod_info.20128.2 = getelementptr %struct.gr_info* %cod_info, i32 0, i32 20, i32 2 ; <i32*> [#uses=1]
- %cod_info.20128.3 = getelementptr %struct.gr_info* %cod_info, i32 0, i32 20, i32 3 ; <i32*> [#uses=1]
- br label %bb
-bb: ; preds = %bb226, %entry
- %save_cod_info.19.1 = phi i32* [ undef, %entry ], [ %save_cod_info.19.0, %bb226 ] ; <i32*> [#uses=1]
- %save_cod_info.0.1 = phi i32 [ undef, %entry ], [ %save_cod_info.0.0, %bb226 ] ; <i32> [#uses=1]
- br i1 false, label %cond_next144, label %cond_false
-cond_false: ; preds = %bb
- br i1 false, label %cond_true56, label %cond_false78
-cond_true56: ; preds = %cond_false
- br i1 false, label %inner_loop.exit, label %cond_next85
-inner_loop.exit: ; preds = %cond_true56
- br i1 false, label %cond_next104, label %cond_false96
-cond_false78: ; preds = %cond_false
- ret void
-cond_next85: ; preds = %cond_true56
- ret void
-cond_false96: ; preds = %inner_loop.exit
- ret void
-cond_next104: ; preds = %inner_loop.exit
- br i1 false, label %cond_next144, label %cond_false110
-cond_false110: ; preds = %cond_next104
- ret void
-cond_next144: ; preds = %cond_next104, %bb
- %save_cod_info.19.0 = phi i32* [ %save_cod_info.19.1, %bb ], [ null, %cond_next104 ] ; <i32*> [#uses=1]
- %save_cod_info.4.0 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ] ; <i32> [#uses=1]
- %save_cod_info.3.0 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ] ; <i32> [#uses=1]
- %save_cod_info.2.0 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ] ; <i32> [#uses=1]
- %save_cod_info.1.0 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ] ; <i32> [#uses=1]
- %save_cod_info.0.0 = phi i32 [ %save_cod_info.0.1, %bb ], [ 0, %cond_next104 ] ; <i32> [#uses=1]
- %over.1 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ] ; <i32> [#uses=1]
- %best_over.0 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ] ; <i32> [#uses=1]
- %notdone.0 = phi i32 [ 0, %bb ], [ 0, %cond_next104 ] ; <i32> [#uses=1]
- %tmp147 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp148 = icmp eq i32 %tmp147, 0 ; <i1> [#uses=1]
- %tmp153 = icmp eq i32 %over.1, 0 ; <i1> [#uses=1]
- %bothcond = and i1 %tmp148, %tmp153 ; <i1> [#uses=1]
- %notdone.2 = select i1 %bothcond, i32 0, i32 %notdone.0 ; <i32> [#uses=1]
- br i1 false, label %cond_next205, label %cond_true163
-cond_true163: ; preds = %cond_next144
- ret void
-cond_next205: ; preds = %cond_next144
- br i1 false, label %bb226, label %cond_true210
-cond_true210: ; preds = %cond_next205
- ret void
-bb226: ; preds = %cond_next205
- %tmp228 = icmp eq i32 %notdone.2, 0 ; <i1> [#uses=1]
- br i1 %tmp228, label %bb231, label %bb
-bb231: ; preds = %bb226
- store i32 %save_cod_info.1.0, i32* null
- store i32 %save_cod_info.2.0, i32* null
- store i32 %save_cod_info.3.0, i32* null
- store i32 %save_cod_info.4.0, i32* null
- store i32 0, i32* %cod_info.20128.1
- store i32 0, i32* %cod_info.20128.2
- store i32 0, i32* %cod_info.20128.3
- %tmp244245 = sitofp i32 %best_over.0 to double ; <double> [#uses=1]
- store double %tmp244245, double* %best_noise, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-04-11-PHIofImpDef.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-04-11-PHIofImpDef.ll
deleted file mode 100644
index 33bd4de..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-04-11-PHIofImpDef.ll
+++ /dev/null
@@ -1,3544 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-declare void @foo(i8*, i8*, i32, i32, i32, i32, i32, i32, i32)
-
-define void @t() nounwind {
- br label %1
-; <label>:1 ; preds = %0
- br label %bb4351.i
-bb4351.i: ; preds = %1
- switch i32 0, label %bb4411.i [
- i32 1, label %bb4354.i
- i32 2, label %bb4369.i
- ]
-bb4354.i: ; preds = %bb4351.i
- br label %t.exit
-bb4369.i: ; preds = %bb4351.i
- br label %bb4374.i
-bb4374.i: ; preds = %bb4369.i
- br label %bb4411.i
-bb4411.i: ; preds = %bb4374.i, %bb4351.i
- %sf4083.0.i = phi i32 [ 0, %bb4374.i ], [ 6, %bb4351.i ] ; <i32> [#uses=8]
- br label %bb4498.i
-bb4498.i: ; preds = %bb4411.i
- %sfComp4077.1.i = phi i32 [ undef, %bb4411.i ] ; <i32> [#uses=2]
- %stComp4075.1.i = phi i32 [ undef, %bb4411.i ] ; <i32> [#uses=1]
- switch i32 0, label %bb4553.i [
- i32 1, label %bb4501.i
- i32 2, label %bb4521.i
- ]
-bb4501.i: ; preds = %bb4498.i
- %sfComp4077.1.reg2mem.0.i = phi i32 [ %sfComp4077.1.i, %bb4498.i ] ; <i32> [#uses=1]
- call void @foo( i8* null, i8* null, i32 %sfComp4077.1.reg2mem.0.i, i32 0, i32 8, i32 0, i32 0, i32 0, i32 0 ) nounwind
- br i1 false, label %UnifiedReturnBlock.i, label %bb4517.i
-bb4517.i: ; preds = %bb4501.i
- br label %t.exit
-bb4521.i: ; preds = %bb4498.i
- br label %bb4526.i
-bb4526.i: ; preds = %bb4521.i
- switch i32 0, label %bb4529.i [
- i32 6, label %bb4530.i
- i32 7, label %bb4530.i
- ]
-bb4529.i: ; preds = %bb4526.i
- br label %bb4530.i
-bb4530.i: ; preds = %bb4529.i, %bb4526.i, %bb4526.i
- br label %bb4553.i
-bb4553.i: ; preds = %bb4530.i, %bb4498.i
- %dt4080.0.i = phi i32 [ %stComp4075.1.i, %bb4530.i ], [ 7, %bb4498.i ] ; <i32> [#uses=32]
- %df4081.0.i = phi i32 [ %sfComp4077.1.i, %bb4530.i ], [ 8, %bb4498.i ] ; <i32> [#uses=17]
- switch i32 %sf4083.0.i, label %bb4559.i [
- i32 0, label %bb4558.i
- i32 1, label %bb4558.i
- i32 2, label %bb4558.i
- i32 5, label %bb4561.i
- i32 6, label %bb4561.i
- i32 7, label %bb4561.i
- i32 9, label %bb4557.i
- ]
-bb4557.i: ; preds = %bb4553.i
- switch i32 %df4081.0.i, label %bb4569.i [
- i32 0, label %bb4568.i
- i32 1, label %bb4568.i
- i32 2, label %bb4568.i
- i32 5, label %bb4571.i
- i32 6, label %bb4571.i
- i32 7, label %bb4571.i
- i32 9, label %bb4567.i
- ]
-bb4558.i: ; preds = %bb4553.i, %bb4553.i, %bb4553.i
- switch i32 %df4081.0.i, label %bb4569.i [
- i32 0, label %bb4568.i
- i32 1, label %bb4568.i
- i32 2, label %bb4568.i
- i32 5, label %bb4571.i
- i32 6, label %bb4571.i
- i32 7, label %bb4571.i
- i32 9, label %bb4567.i
- ]
-bb4559.i: ; preds = %bb4553.i
- br label %bb4561.i
-bb4561.i: ; preds = %bb4559.i, %bb4553.i, %bb4553.i, %bb4553.i
- switch i32 %df4081.0.i, label %bb4569.i [
- i32 0, label %bb4568.i
- i32 1, label %bb4568.i
- i32 2, label %bb4568.i
- i32 5, label %bb4571.i
- i32 6, label %bb4571.i
- i32 7, label %bb4571.i
- i32 9, label %bb4567.i
- ]
-bb4567.i: ; preds = %bb4561.i, %bb4558.i, %bb4557.i
- br label %bb4580.i
-bb4568.i: ; preds = %bb4561.i, %bb4561.i, %bb4561.i, %bb4558.i, %bb4558.i, %bb4558.i, %bb4557.i, %bb4557.i, %bb4557.i
- br label %bb4580.i
-bb4569.i: ; preds = %bb4561.i, %bb4558.i, %bb4557.i
- br label %bb4571.i
-bb4571.i: ; preds = %bb4569.i, %bb4561.i, %bb4561.i, %bb4561.i, %bb4558.i, %bb4558.i, %bb4558.i, %bb4557.i, %bb4557.i, %bb4557.i
- br label %bb4580.i
-bb4580.i: ; preds = %bb4571.i, %bb4568.i, %bb4567.i
- br i1 false, label %bb4611.i, label %bb4593.i
-bb4593.i: ; preds = %bb4580.i
- br i1 false, label %bb4610.i, label %bb4611.i
-bb4610.i: ; preds = %bb4593.i
- br label %bb4611.i
-bb4611.i: ; preds = %bb4610.i, %bb4593.i, %bb4580.i
- br i1 false, label %bb4776.i, label %bb4620.i
-bb4620.i: ; preds = %bb4611.i
- switch i32 0, label %bb4776.i [
- i32 0, label %bb4691.i
- i32 2, label %bb4740.i
- i32 4, label %bb4755.i
- i32 8, label %bb4622.i
- i32 9, label %bb4622.i
- i32 10, label %bb4629.i
- i32 11, label %bb4629.i
- i32 12, label %bb4651.i
- i32 13, label %bb4651.i
- i32 14, label %bb4665.i
- i32 15, label %bb4665.i
- i32 16, label %bb4691.i
- i32 17, label %bb4691.i
- i32 18, label %bb4712.i
- i32 19, label %bb4712.i
- i32 22, label %bb4733.i
- i32 23, label %bb4733.i
- ]
-bb4622.i: ; preds = %bb4620.i, %bb4620.i
- br i1 false, label %bb4628.i, label %bb4776.i
-bb4628.i: ; preds = %bb4622.i
- br label %bb4776.i
-bb4629.i: ; preds = %bb4620.i, %bb4620.i
- br i1 false, label %bb4776.i, label %bb4644.i
-bb4644.i: ; preds = %bb4629.i
- br i1 false, label %bb4650.i, label %bb4776.i
-bb4650.i: ; preds = %bb4644.i
- br label %bb4776.i
-bb4651.i: ; preds = %bb4620.i, %bb4620.i
- br i1 false, label %bb4776.i, label %bb4658.i
-bb4658.i: ; preds = %bb4651.i
- br i1 false, label %bb4664.i, label %bb4776.i
-bb4664.i: ; preds = %bb4658.i
- br label %bb4776.i
-bb4665.i: ; preds = %bb4620.i, %bb4620.i
- br i1 false, label %bb4776.i, label %bb4684.i
-bb4684.i: ; preds = %bb4665.i
- br i1 false, label %bb4690.i, label %bb4776.i
-bb4690.i: ; preds = %bb4684.i
- br label %bb4776.i
-bb4691.i: ; preds = %bb4620.i, %bb4620.i, %bb4620.i
- br i1 false, label %bb4776.i, label %bb4698.i
-bb4698.i: ; preds = %bb4691.i
- br i1 false, label %bb4711.i, label %bb4776.i
-bb4711.i: ; preds = %bb4698.i
- br label %bb4776.i
-bb4712.i: ; preds = %bb4620.i, %bb4620.i
- br i1 false, label %bb4776.i, label %bb4726.i
-bb4726.i: ; preds = %bb4712.i
- br i1 false, label %bb4732.i, label %bb4776.i
-bb4732.i: ; preds = %bb4726.i
- br label %bb4776.i
-bb4733.i: ; preds = %bb4620.i, %bb4620.i
- br i1 false, label %bb4739.i, label %bb4776.i
-bb4739.i: ; preds = %bb4733.i
- br label %bb4776.i
-bb4740.i: ; preds = %bb4620.i
- br i1 false, label %bb4776.i, label %bb4754.i
-bb4754.i: ; preds = %bb4740.i
- br label %bb4776.i
-bb4755.i: ; preds = %bb4620.i
- br i1 false, label %bb4776.i, label %bb4774.i
-bb4774.i: ; preds = %bb4755.i
- br label %bb4776.i
-bb4776.i: ; preds = %bb4774.i, %bb4755.i, %bb4754.i, %bb4740.i, %bb4739.i, %bb4733.i, %bb4732.i, %bb4726.i, %bb4712.i, %bb4711.i, %bb4698.i, %bb4691.i, %bb4690.i, %bb4684.i, %bb4665.i, %bb4664.i, %bb4658.i, %bb4651.i, %bb4650.i, %bb4644.i, %bb4629.i, %bb4628.i, %bb4622.i, %bb4620.i, %bb4611.i
- switch i32 0, label %bb4790.i [
- i32 0, label %bb4786.i
- i32 1, label %bb4784.i
- i32 3, label %bb4784.i
- i32 5, label %bb4784.i
- i32 6, label %bb4785.i
- i32 7, label %bb4785.i
- i32 8, label %bb4791.i
- i32 9, label %bb4791.i
- i32 10, label %bb4791.i
- i32 11, label %bb4791.i
- i32 12, label %bb4791.i
- i32 13, label %bb4791.i
- i32 14, label %bb4791.i
- i32 15, label %bb4791.i
- i32 16, label %bb4791.i
- i32 17, label %bb4791.i
- i32 18, label %bb4791.i
- i32 19, label %bb4791.i
- ]
-bb4784.i: ; preds = %bb4776.i, %bb4776.i, %bb4776.i
- br label %bb4791.i
-bb4785.i: ; preds = %bb4776.i, %bb4776.i
- br label %bb4791.i
-bb4786.i: ; preds = %bb4776.i
- br label %bb4791.i
-bb4790.i: ; preds = %bb4776.i
- br label %bb4791.i
-bb4791.i: ; preds = %bb4790.i, %bb4786.i, %bb4785.i, %bb4784.i, %bb4776.i, %bb4776.i, %bb4776.i, %bb4776.i, %bb4776.i, %bb4776.i, %bb4776.i, %bb4776.i, %bb4776.i, %bb4776.i, %bb4776.i, %bb4776.i
- switch i32 %dt4080.0.i, label %bb4803.i [
- i32 0, label %bb4799.i
- i32 6, label %bb4794.i
- i32 7, label %bb4794.i
- i32 8, label %bb4804.i
- i32 9, label %bb4804.i
- i32 10, label %bb4804.i
- i32 11, label %bb4804.i
- i32 12, label %bb4804.i
- i32 13, label %bb4804.i
- i32 14, label %bb4804.i
- i32 15, label %bb4804.i
- i32 16, label %bb4804.i
- i32 17, label %bb4804.i
- i32 18, label %bb4804.i
- i32 19, label %bb4804.i
- ]
-bb4794.i: ; preds = %bb4791.i, %bb4791.i
- br i1 false, label %bb4809.i, label %bb4819.i
-bb4799.i: ; preds = %bb4791.i
- br i1 false, label %bb4809.i, label %bb4819.i
-bb4803.i: ; preds = %bb4791.i
- br label %bb4804.i
-bb4804.i: ; preds = %bb4803.i, %bb4791.i, %bb4791.i, %bb4791.i, %bb4791.i, %bb4791.i, %bb4791.i, %bb4791.i, %bb4791.i, %bb4791.i, %bb4791.i, %bb4791.i, %bb4791.i
- br i1 false, label %bb4809.i, label %bb4819.i
-bb4809.i: ; preds = %bb4804.i, %bb4799.i, %bb4794.i
- switch i32 %df4081.0.i, label %bb71.i.i [
- i32 3, label %bb61.i.i
- i32 4, label %bb.i.i
- i32 5, label %bb.i.i
- i32 6, label %bb.i.i
- i32 7, label %bb.i.i
- i32 8, label %bb38.i.i
- i32 9, label %bb38.i.i
- i32 10, label %bb50.i.i
- i32 11, label %bb40.i.i
- i32 16, label %bb38.i.i
- ]
-bb.i.i: ; preds = %bb4809.i, %bb4809.i, %bb4809.i, %bb4809.i
- br label %bb403.i.i
-bb38.i.i: ; preds = %bb4809.i, %bb4809.i, %bb4809.i
- br label %bb403.i.i
-bb40.i.i: ; preds = %bb4809.i
- br label %bb403.i.i
-bb50.i.i: ; preds = %bb4809.i
- br label %bb403.i.i
-bb61.i.i: ; preds = %bb4809.i
- br label %bb403.i.i
-bb71.i.i: ; preds = %bb4809.i
- br label %bb403.i.i
-bb403.i.i: ; preds = %bb71.i.i, %bb61.i.i, %bb50.i.i, %bb40.i.i, %bb38.i.i, %bb.i.i
- br i1 false, label %bb408.i.i, label %bb502.i.i
-bb408.i.i: ; preds = %bb403.i.i
- br label %bb708.i.i
-bb502.i.i: ; preds = %bb403.i.i
- br label %bb708.i.i
-bb708.i.i: ; preds = %bb502.i.i, %bb408.i.i
- switch i32 0, label %bb758.i.i [
- i32 0, label %bb710.i.i
- i32 1, label %bb713.i.i
- i32 2, label %bb718.i.i
- i32 3, label %bb721.i.i
- i32 4, label %bb726.i.i
- i32 5, label %bb729.i.i
- i32 8, label %bb732.i.i
- i32 9, label %bb732.i.i
- i32 10, label %bb737.i.i
- i32 11, label %bb737.i.i
- i32 12, label %bb742.i.i
- i32 13, label %bb742.i.i
- i32 14, label %bb745.i.i
- i32 15, label %bb745.i.i
- i32 16, label %bb750.i.i
- i32 17, label %bb750.i.i
- i32 18, label %bb753.i.i
- i32 19, label %bb753.i.i
- i32 22, label %bb750.i.i
- i32 23, label %bb750.i.i
- ]
-bb710.i.i: ; preds = %bb708.i.i
- br label %bb758.i.i
-bb713.i.i: ; preds = %bb708.i.i
- br label %bb758.i.i
-bb718.i.i: ; preds = %bb708.i.i
- br label %bb758.i.i
-bb721.i.i: ; preds = %bb708.i.i
- br label %bb758.i.i
-bb726.i.i: ; preds = %bb708.i.i
- br label %bb758.i.i
-bb729.i.i: ; preds = %bb708.i.i
- br label %bb758.i.i
-bb732.i.i: ; preds = %bb708.i.i, %bb708.i.i
- br label %bb758.i.i
-bb737.i.i: ; preds = %bb708.i.i, %bb708.i.i
- br label %bb758.i.i
-bb742.i.i: ; preds = %bb708.i.i, %bb708.i.i
- br label %bb758.i.i
-bb745.i.i: ; preds = %bb708.i.i, %bb708.i.i
- br label %bb758.i.i
-bb750.i.i: ; preds = %bb708.i.i, %bb708.i.i, %bb708.i.i, %bb708.i.i
- br label %bb758.i.i
-bb753.i.i: ; preds = %bb708.i.i, %bb708.i.i
- br label %bb758.i.i
-bb758.i.i: ; preds = %bb753.i.i, %bb750.i.i, %bb745.i.i, %bb742.i.i, %bb737.i.i, %bb732.i.i, %bb729.i.i, %bb726.i.i, %bb721.i.i, %bb718.i.i, %bb713.i.i, %bb710.i.i, %bb708.i.i
- switch i32 %dt4080.0.i, label %bb808.i.i [
- i32 0, label %bb760.i.i
- i32 1, label %bb763.i.i
- i32 2, label %bb768.i.i
- i32 3, label %bb771.i.i
- i32 4, label %bb776.i.i
- i32 5, label %bb779.i.i
- i32 8, label %bb782.i.i
- i32 9, label %bb782.i.i
- i32 10, label %bb787.i.i
- i32 11, label %bb787.i.i
- i32 12, label %bb792.i.i
- i32 13, label %bb792.i.i
- i32 14, label %bb795.i.i
- i32 15, label %bb795.i.i
- i32 16, label %bb800.i.i
- i32 17, label %bb800.i.i
- i32 18, label %bb803.i.i
- i32 19, label %bb803.i.i
- i32 22, label %bb800.i.i
- i32 23, label %bb800.i.i
- ]
-bb760.i.i: ; preds = %bb758.i.i
- br label %bb811.i.i
-bb763.i.i: ; preds = %bb758.i.i
- br label %bb811.i.i
-bb768.i.i: ; preds = %bb758.i.i
- br label %bb811.i.i
-bb771.i.i: ; preds = %bb758.i.i
- br label %bb811.i.i
-bb776.i.i: ; preds = %bb758.i.i
- br label %bb811.i.i
-bb779.i.i: ; preds = %bb758.i.i
- br label %bb811.i.i
-bb782.i.i: ; preds = %bb758.i.i, %bb758.i.i
- br label %bb811.i.i
-bb787.i.i: ; preds = %bb758.i.i, %bb758.i.i
- br label %bb811.i.i
-bb792.i.i: ; preds = %bb758.i.i, %bb758.i.i
- br label %bb811.i.i
-bb795.i.i: ; preds = %bb758.i.i, %bb758.i.i
- br label %bb811.i.i
-bb800.i.i: ; preds = %bb758.i.i, %bb758.i.i, %bb758.i.i, %bb758.i.i
- br label %bb811.i.i
-bb803.i.i: ; preds = %bb758.i.i, %bb758.i.i
- br label %bb808.i.i
-bb808.i.i: ; preds = %bb803.i.i, %bb758.i.i
- br label %bb811.i.i
-bb811.i.i: ; preds = %bb808.i.i, %bb800.i.i, %bb795.i.i, %bb792.i.i, %bb787.i.i, %bb782.i.i, %bb779.i.i, %bb776.i.i, %bb771.i.i, %bb768.i.i, %bb763.i.i, %bb760.i.i
- switch i32 0, label %bb928.i.i [
- i32 0, label %bb813.i.i
- i32 1, label %bb833.i.i
- i32 2, label %bb813.i.i
- i32 3, label %bb833.i.i
- i32 4, label %bb813.i.i
- i32 5, label %bb813.i.i
- i32 8, label %bb872.i.i
- i32 9, label %bb872.i.i
- i32 10, label %bb890.i.i
- i32 11, label %bb890.i.i
- i32 12, label %bb813.i.i
- i32 13, label %bb813.i.i
- i32 14, label %bb908.i.i
- i32 15, label %bb908.i.i
- i32 16, label %bb813.i.i
- i32 17, label %bb813.i.i
- i32 18, label %bb908.i.i
- i32 19, label %bb908.i.i
- i32 22, label %bb813.i.i
- i32 23, label %bb813.i.i
- ]
-bb813.i.i: ; preds = %bb811.i.i, %bb811.i.i, %bb811.i.i, %bb811.i.i, %bb811.i.i, %bb811.i.i, %bb811.i.i, %bb811.i.i, %bb811.i.i, %bb811.i.i
- switch i32 %dt4080.0.i, label %bb1065.i.i [
- i32 0, label %bb930.i.i
- i32 1, label %bb950.i.i
- i32 2, label %bb930.i.i
- i32 3, label %bb950.i.i
- i32 4, label %bb989.i.i
- i32 5, label %bb989.i.i
- i32 8, label %bb1009.i.i
- i32 9, label %bb1009.i.i
- i32 10, label %bb1027.i.i
- i32 11, label %bb1027.i.i
- i32 12, label %bb930.i.i
- i32 13, label %bb930.i.i
- i32 14, label %bb1045.i.i
- i32 15, label %bb1045.i.i
- i32 16, label %bb930.i.i
- i32 17, label %bb930.i.i
- i32 18, label %bb1045.i.i
- i32 19, label %bb1045.i.i
- i32 22, label %bb930.i.i
- i32 23, label %bb930.i.i
- ]
-bb833.i.i: ; preds = %bb811.i.i, %bb811.i.i
- switch i32 %dt4080.0.i, label %bb1065.i.i [
- i32 0, label %bb930.i.i
- i32 1, label %bb950.i.i
- i32 2, label %bb930.i.i
- i32 3, label %bb950.i.i
- i32 4, label %bb989.i.i
- i32 5, label %bb989.i.i
- i32 8, label %bb1009.i.i
- i32 9, label %bb1009.i.i
- i32 10, label %bb1027.i.i
- i32 11, label %bb1027.i.i
- i32 12, label %bb930.i.i
- i32 13, label %bb930.i.i
- i32 14, label %bb1045.i.i
- i32 15, label %bb1045.i.i
- i32 16, label %bb930.i.i
- i32 17, label %bb930.i.i
- i32 18, label %bb1045.i.i
- i32 19, label %bb1045.i.i
- i32 22, label %bb930.i.i
- i32 23, label %bb930.i.i
- ]
-bb872.i.i: ; preds = %bb811.i.i, %bb811.i.i
- switch i32 %dt4080.0.i, label %bb1065.i.i [
- i32 0, label %bb930.i.i
- i32 1, label %bb950.i.i
- i32 2, label %bb930.i.i
- i32 3, label %bb950.i.i
- i32 4, label %bb989.i.i
- i32 5, label %bb989.i.i
- i32 8, label %bb1009.i.i
- i32 9, label %bb1009.i.i
- i32 10, label %bb1027.i.i
- i32 11, label %bb1027.i.i
- i32 12, label %bb930.i.i
- i32 13, label %bb930.i.i
- i32 14, label %bb1045.i.i
- i32 15, label %bb1045.i.i
- i32 16, label %bb930.i.i
- i32 17, label %bb930.i.i
- i32 18, label %bb1045.i.i
- i32 19, label %bb1045.i.i
- i32 22, label %bb930.i.i
- i32 23, label %bb930.i.i
- ]
-bb890.i.i: ; preds = %bb811.i.i, %bb811.i.i
- switch i32 %dt4080.0.i, label %bb1065.i.i [
- i32 0, label %bb930.i.i
- i32 1, label %bb950.i.i
- i32 2, label %bb930.i.i
- i32 3, label %bb950.i.i
- i32 4, label %bb989.i.i
- i32 5, label %bb989.i.i
- i32 8, label %bb1009.i.i
- i32 9, label %bb1009.i.i
- i32 10, label %bb1027.i.i
- i32 11, label %bb1027.i.i
- i32 12, label %bb930.i.i
- i32 13, label %bb930.i.i
- i32 14, label %bb1045.i.i
- i32 15, label %bb1045.i.i
- i32 16, label %bb930.i.i
- i32 17, label %bb930.i.i
- i32 18, label %bb1045.i.i
- i32 19, label %bb1045.i.i
- i32 22, label %bb930.i.i
- i32 23, label %bb930.i.i
- ]
-bb908.i.i: ; preds = %bb811.i.i, %bb811.i.i, %bb811.i.i, %bb811.i.i
- br label %bb928.i.i
-bb928.i.i: ; preds = %bb908.i.i, %bb811.i.i
- switch i32 %dt4080.0.i, label %bb1065.i.i [
- i32 0, label %bb930.i.i
- i32 1, label %bb950.i.i
- i32 2, label %bb930.i.i
- i32 3, label %bb950.i.i
- i32 4, label %bb989.i.i
- i32 5, label %bb989.i.i
- i32 8, label %bb1009.i.i
- i32 9, label %bb1009.i.i
- i32 10, label %bb1027.i.i
- i32 11, label %bb1027.i.i
- i32 12, label %bb930.i.i
- i32 13, label %bb930.i.i
- i32 14, label %bb1045.i.i
- i32 15, label %bb1045.i.i
- i32 16, label %bb930.i.i
- i32 17, label %bb930.i.i
- i32 18, label %bb1045.i.i
- i32 19, label %bb1045.i.i
- i32 22, label %bb930.i.i
- i32 23, label %bb930.i.i
- ]
-bb930.i.i: ; preds = %bb928.i.i, %bb928.i.i, %bb928.i.i, %bb928.i.i, %bb928.i.i, %bb928.i.i, %bb928.i.i, %bb928.i.i, %bb890.i.i, %bb890.i.i, %bb890.i.i, %bb890.i.i, %bb890.i.i, %bb890.i.i, %bb890.i.i, %bb890.i.i, %bb872.i.i, %bb872.i.i, %bb872.i.i, %bb872.i.i, %bb872.i.i, %bb872.i.i, %bb872.i.i, %bb872.i.i, %bb833.i.i, %bb833.i.i, %bb833.i.i, %bb833.i.i, %bb833.i.i, %bb833.i.i, %bb833.i.i, %bb833.i.i, %bb813.i.i, %bb813.i.i, %bb813.i.i, %bb813.i.i, %bb813.i.i, %bb813.i.i, %bb813.i.i, %bb813.i.i
- br label %bb5235.i
-bb950.i.i: ; preds = %bb928.i.i, %bb928.i.i, %bb890.i.i, %bb890.i.i, %bb872.i.i, %bb872.i.i, %bb833.i.i, %bb833.i.i, %bb813.i.i, %bb813.i.i
- br label %bb5235.i
-bb989.i.i: ; preds = %bb928.i.i, %bb928.i.i, %bb890.i.i, %bb890.i.i, %bb872.i.i, %bb872.i.i, %bb833.i.i, %bb833.i.i, %bb813.i.i, %bb813.i.i
- br label %bb5235.i
-bb1009.i.i: ; preds = %bb928.i.i, %bb928.i.i, %bb890.i.i, %bb890.i.i, %bb872.i.i, %bb872.i.i, %bb833.i.i, %bb833.i.i, %bb813.i.i, %bb813.i.i
- br label %bb5235.i
-bb1027.i.i: ; preds = %bb928.i.i, %bb928.i.i, %bb890.i.i, %bb890.i.i, %bb872.i.i, %bb872.i.i, %bb833.i.i, %bb833.i.i, %bb813.i.i, %bb813.i.i
- br label %bb5235.i
-bb1045.i.i: ; preds = %bb928.i.i, %bb928.i.i, %bb928.i.i, %bb928.i.i, %bb890.i.i, %bb890.i.i, %bb890.i.i, %bb890.i.i, %bb872.i.i, %bb872.i.i, %bb872.i.i, %bb872.i.i, %bb833.i.i, %bb833.i.i, %bb833.i.i, %bb833.i.i, %bb813.i.i, %bb813.i.i, %bb813.i.i, %bb813.i.i
- br label %bb1065.i.i
-bb1065.i.i: ; preds = %bb1045.i.i, %bb928.i.i, %bb890.i.i, %bb872.i.i, %bb833.i.i, %bb813.i.i
- br label %bb5235.i
-bb4819.i: ; preds = %bb4804.i, %bb4799.i, %bb4794.i
- br i1 false, label %bb5208.i, label %bb5011.i
-bb5011.i: ; preds = %bb4819.i
- switch i32 0, label %bb5039.i [
- i32 10, label %bb5016.i
- i32 3, label %bb5103.i
- ]
-bb5016.i: ; preds = %bb5011.i
- br i1 false, label %bb5103.i, label %bb5039.i
-bb5039.i: ; preds = %bb5016.i, %bb5011.i
- switch i32 0, label %bb5052.i [
- i32 3, label %bb5103.i
- i32 10, label %bb5103.i
- ]
-bb5052.i: ; preds = %bb5039.i
- br i1 false, label %bb5103.i, label %bb5065.i
-bb5065.i: ; preds = %bb5052.i
- br i1 false, label %bb5078.i, label %bb5103.i
-bb5078.i: ; preds = %bb5065.i
- br i1 false, label %bb5103.i, label %bb5084.i
-bb5084.i: ; preds = %bb5078.i
- br i1 false, label %bb5103.i, label %bb5090.i
-bb5090.i: ; preds = %bb5084.i
- br i1 false, label %bb5103.i, label %bb5096.i
-bb5096.i: ; preds = %bb5090.i
- br i1 false, label %bb5103.i, label %bb5102.i
-bb5102.i: ; preds = %bb5096.i
- br label %bb5103.i
-bb5103.i: ; preds = %bb5102.i, %bb5096.i, %bb5090.i, %bb5084.i, %bb5078.i, %bb5065.i, %bb5052.i, %bb5039.i, %bb5039.i, %bb5016.i, %bb5011.i
- switch i32 0, label %bb5208.i [
- i32 0, label %bb5133.i
- i32 2, label %bb5162.i
- i32 4, label %bb5182.i
- i32 10, label %bb5113.i
- i32 11, label %bb5113.i
- i32 12, label %bb5121.i
- i32 13, label %bb5121.i
- i32 14, label %bb5125.i
- i32 15, label %bb5125.i
- i32 16, label %bb5133.i
- i32 17, label %bb5133.i
- i32 18, label %bb5146.i
- i32 19, label %bb5146.i
- ]
-bb5113.i: ; preds = %bb5103.i, %bb5103.i
- switch i32 %dt4080.0.i, label %bb5208.i [
- i32 8, label %bb5115.i
- i32 9, label %bb5115.i
- i32 12, label %bb5117.i
- i32 13, label %bb5117.i
- i32 14, label %bb5119.i
- i32 15, label %bb5119.i
- ]
-bb5115.i: ; preds = %bb5113.i, %bb5113.i
- br label %bb5208.i
-bb5117.i: ; preds = %bb5113.i, %bb5113.i
- br label %bb5208.i
-bb5119.i: ; preds = %bb5113.i, %bb5113.i
- br label %bb5208.i
-bb5121.i: ; preds = %bb5103.i, %bb5103.i
- switch i32 %dt4080.0.i, label %bb5208.i [
- i32 8, label %bb5123.i
- i32 9, label %bb5123.i
- ]
-bb5123.i: ; preds = %bb5121.i, %bb5121.i
- br label %bb5208.i
-bb5125.i: ; preds = %bb5103.i, %bb5103.i
- switch i32 %dt4080.0.i, label %bb5208.i [
- i32 8, label %bb5127.i
- i32 9, label %bb5127.i
- i32 12, label %bb5129.i
- i32 13, label %bb5129.i
- ]
-bb5127.i: ; preds = %bb5125.i, %bb5125.i
- br label %bb5208.i
-bb5129.i: ; preds = %bb5125.i, %bb5125.i
- br label %bb5208.i
-bb5133.i: ; preds = %bb5103.i, %bb5103.i, %bb5103.i
- switch i32 %dt4080.0.i, label %bb5208.i [
- i32 8, label %bb5135.i
- i32 9, label %bb5135.i
- i32 10, label %bb5137.i
- i32 11, label %bb5137.i
- i32 12, label %bb5139.i
- i32 13, label %bb5139.i
- i32 14, label %bb5143.i
- i32 15, label %bb5143.i
- ]
-bb5135.i: ; preds = %bb5133.i, %bb5133.i
- br label %bb5208.i
-bb5137.i: ; preds = %bb5133.i, %bb5133.i
- br label %bb5208.i
-bb5139.i: ; preds = %bb5133.i, %bb5133.i
- br label %bb5208.i
-bb5143.i: ; preds = %bb5133.i, %bb5133.i
- br label %bb5208.i
-bb5146.i: ; preds = %bb5103.i, %bb5103.i
- switch i32 %dt4080.0.i, label %bb5208.i [
- i32 0, label %bb5158.i
- i32 8, label %bb5148.i
- i32 9, label %bb5148.i
- i32 10, label %bb5150.i
- i32 11, label %bb5150.i
- i32 12, label %bb5152.i
- i32 13, label %bb5152.i
- i32 14, label %bb5155.i
- i32 15, label %bb5155.i
- i32 16, label %bb5158.i
- i32 17, label %bb5158.i
- ]
-bb5148.i: ; preds = %bb5146.i, %bb5146.i
- br label %bb5208.i
-bb5150.i: ; preds = %bb5146.i, %bb5146.i
- br label %bb5208.i
-bb5152.i: ; preds = %bb5146.i, %bb5146.i
- br label %bb5208.i
-bb5155.i: ; preds = %bb5146.i, %bb5146.i
- br label %bb5208.i
-bb5158.i: ; preds = %bb5146.i, %bb5146.i, %bb5146.i
- br label %bb5208.i
-bb5162.i: ; preds = %bb5103.i
- switch i32 %dt4080.0.i, label %bb5208.i [
- i32 0, label %bb5175.i
- i32 8, label %bb5164.i
- i32 9, label %bb5164.i
- i32 10, label %bb5166.i
- i32 11, label %bb5166.i
- i32 12, label %bb5168.i
- i32 13, label %bb5168.i
- i32 14, label %bb5172.i
- i32 15, label %bb5172.i
- i32 16, label %bb5175.i
- i32 17, label %bb5175.i
- i32 18, label %bb5179.i
- i32 19, label %bb5179.i
- ]
-bb5164.i: ; preds = %bb5162.i, %bb5162.i
- br label %bb5208.i
-bb5166.i: ; preds = %bb5162.i, %bb5162.i
- br label %bb5208.i
-bb5168.i: ; preds = %bb5162.i, %bb5162.i
- br label %bb5208.i
-bb5172.i: ; preds = %bb5162.i, %bb5162.i
- br label %bb5208.i
-bb5175.i: ; preds = %bb5162.i, %bb5162.i, %bb5162.i
- br label %bb5208.i
-bb5179.i: ; preds = %bb5162.i, %bb5162.i
- br label %bb5208.i
-bb5182.i: ; preds = %bb5103.i
- switch i32 %dt4080.0.i, label %bb5208.i [
- i32 0, label %bb5195.i
- i32 2, label %bb5202.i
- i32 8, label %bb5184.i
- i32 9, label %bb5184.i
- i32 10, label %bb5186.i
- i32 11, label %bb5186.i
- i32 12, label %bb5188.i
- i32 13, label %bb5188.i
- i32 14, label %bb5192.i
- i32 15, label %bb5192.i
- i32 16, label %bb5195.i
- i32 17, label %bb5195.i
- i32 18, label %bb5199.i
- i32 19, label %bb5199.i
- ]
-bb5184.i: ; preds = %bb5182.i, %bb5182.i
- br label %bb5208.i
-bb5186.i: ; preds = %bb5182.i, %bb5182.i
- br label %bb5208.i
-bb5188.i: ; preds = %bb5182.i, %bb5182.i
- br label %bb5208.i
-bb5192.i: ; preds = %bb5182.i, %bb5182.i
- br label %bb5208.i
-bb5195.i: ; preds = %bb5182.i, %bb5182.i, %bb5182.i
- br label %bb5208.i
-bb5199.i: ; preds = %bb5182.i, %bb5182.i
- br label %bb5208.i
-bb5202.i: ; preds = %bb5182.i
- br label %bb5208.i
-bb5208.i: ; preds = %bb5202.i, %bb5199.i, %bb5195.i, %bb5192.i, %bb5188.i, %bb5186.i, %bb5184.i, %bb5182.i, %bb5179.i, %bb5175.i, %bb5172.i, %bb5168.i, %bb5166.i, %bb5164.i, %bb5162.i, %bb5158.i, %bb5155.i, %bb5152.i, %bb5150.i, %bb5148.i, %bb5146.i, %bb5143.i, %bb5139.i, %bb5137.i, %bb5135.i, %bb5133.i, %bb5129.i, %bb5127.i, %bb5125.i, %bb5123.i, %bb5121.i, %bb5119.i, %bb5117.i, %bb5115.i, %bb5113.i, %bb5103.i, %bb4819.i
- switch i32 0, label %bb5221.i [
- i32 0, label %bb5210.i
- i32 1, label %bb5211.i
- i32 2, label %bb5212.i
- i32 3, label %bb5213.i
- i32 4, label %bb5214.i
- i32 5, label %bb5215.i
- i32 6, label %bb5217.i
- i32 7, label %bb5216.i
- i32 12, label %bb5218.i
- i32 13, label %bb5218.i
- i32 14, label %bb5219.i
- i32 15, label %bb5219.i
- i32 16, label %bb5210.i
- i32 17, label %bb5210.i
- i32 22, label %bb5210.i
- i32 23, label %bb5210.i
- ]
-bb5210.i: ; preds = %bb5208.i, %bb5208.i, %bb5208.i, %bb5208.i, %bb5208.i
- br label %bb5224.i
-bb5211.i: ; preds = %bb5208.i
- br label %bb5224.i
-bb5212.i: ; preds = %bb5208.i
- br label %bb5224.i
-bb5213.i: ; preds = %bb5208.i
- br label %bb5224.i
-bb5214.i: ; preds = %bb5208.i
- br label %bb5224.i
-bb5215.i: ; preds = %bb5208.i
- br label %bb5224.i
-bb5216.i: ; preds = %bb5208.i
- br label %bb5224.i
-bb5217.i: ; preds = %bb5208.i
- br label %bb5224.i
-bb5218.i: ; preds = %bb5208.i, %bb5208.i
- br label %bb5224.i
-bb5219.i: ; preds = %bb5208.i, %bb5208.i
- br label %bb5224.i
-bb5221.i: ; preds = %bb5208.i
- br label %bb5224.i
-bb5224.i: ; preds = %bb5221.i, %bb5219.i, %bb5218.i, %bb5217.i, %bb5216.i, %bb5215.i, %bb5214.i, %bb5213.i, %bb5212.i, %bb5211.i, %bb5210.i
- br label %bb5235.i
-bb5235.i: ; preds = %bb5224.i, %bb1065.i.i, %bb1027.i.i, %bb1009.i.i, %bb989.i.i, %bb950.i.i, %bb930.i.i
- br label %bb5272.i
-bb5272.i: ; preds = %bb5235.i
- br label %bb5276.i
-bb5276.i: ; preds = %bb19808.i, %bb5272.i
- br label %bb16607.i
-bb5295.i: ; preds = %bb5295.preheader.i, %storeVecColor_RGB_UI.exit
- br label %loadVecColor_BGRA_UI8888R.exit
-loadVecColor_BGRA_UI8888R.exit: ; preds = %bb5295.i
- br i1 false, label %bb5325.i, label %bb5351.i
-bb5325.i: ; preds = %loadVecColor_BGRA_UI8888R.exit
- br i1 false, label %bb4527.i, label %bb.i
-bb.i: ; preds = %bb5325.i
- switch i32 0, label %bb4527.i [
- i32 4, label %bb4362.i
- i32 8, label %bb4448.i
- ]
-bb4362.i: ; preds = %bb.i
- br i1 false, label %bb4532.i, label %bb5556.i
-bb4448.i: ; preds = %bb.i
- br label %bb4527.i
-bb4527.i: ; preds = %bb4448.i, %bb.i, %bb5325.i
- br i1 false, label %bb4532.i, label %bb5556.i
-bb4532.i: ; preds = %bb4527.i, %bb4362.i
- switch i32 0, label %bb4997.i [
- i32 6, label %bb4534.i
- i32 7, label %bb4982.i
- ]
-bb4534.i: ; preds = %bb4532.i
- br i1 false, label %bb4875.i, label %bb4619.i
-bb4619.i: ; preds = %bb4534.i
- br i1 false, label %bb4875.i, label %bb4663.i
-bb4663.i: ; preds = %bb4619.i
- br label %bb4855.i
-bb4759.i: ; preds = %bb4855.i
- br label %bb4855.i
-bb4855.i: ; preds = %bb4759.i, %bb4663.i
- br i1 false, label %bb4866.i, label %bb4759.i
-bb4866.i: ; preds = %bb4855.i
- br label %bb4875.i
-bb4875.i: ; preds = %bb4866.i, %bb4619.i, %bb4534.i
- br i1 false, label %bb4973.i, label %bb4922.i
-bb4922.i: ; preds = %bb4875.i
- br label %bb4973.i
-bb4973.i: ; preds = %bb4922.i, %bb4875.i
- br label %bb4982.i
-bb4982.i: ; preds = %bb4973.i, %bb4532.i
- br label %bb5041.i
-bb4997.i: ; preds = %bb4532.i
- br label %bb5041.i
-bb5041.i: ; preds = %bb4997.i, %bb4982.i
- switch i32 0, label %bb5464.i [
- i32 0, label %bb5344.i
- i32 1, label %bb5374.i
- i32 2, label %bb5404.i
- i32 3, label %bb5434.i
- i32 11, label %bb5263.i
- ]
-bb5263.i: ; preds = %bb5041.i
- br i1 false, label %bb12038.i, label %bb5467.i
-bb5344.i: ; preds = %bb5041.i
- br i1 false, label %bb12038.i, label %bb5467.i
-bb5374.i: ; preds = %bb5041.i
- br i1 false, label %bb12038.i, label %bb5467.i
-bb5404.i: ; preds = %bb5041.i
- br i1 false, label %bb12038.i, label %bb5467.i
-bb5434.i: ; preds = %bb5041.i
- br label %bb5464.i
-bb5464.i: ; preds = %bb5434.i, %bb5041.i
- br i1 false, label %bb12038.i, label %bb5467.i
-bb5467.i: ; preds = %bb5464.i, %bb5404.i, %bb5374.i, %bb5344.i, %bb5263.i
- switch i32 0, label %bb15866.i [
- i32 3, label %bb13016.i
- i32 4, label %bb12040.i
- i32 8, label %bb12514.i
- i32 10, label %bb12903.i
- i32 11, label %bb12553.i
- i32 16, label %bb12514.i
- ]
-bb5556.i: ; preds = %bb4527.i, %bb4362.i
- switch i32 0, label %bb8990.i [
- i32 3, label %bb6403.i
- i32 4, label %bb6924.i
- i32 8, label %bb6924.i
- i32 10, label %bb6403.i
- i32 11, label %bb5882.i
- i32 16, label %bb5558.i
- ]
-bb5558.i: ; preds = %bb5556.i
- br label %bb8990.i
-bb5882.i: ; preds = %bb5556.i
- switch i32 0, label %bb6387.i [
- i32 1, label %bb6332.i
- i32 3, label %bb6332.i
- i32 4, label %bb6352.i
- i32 6, label %bb5884.i
- i32 7, label %bb8990.i
- ]
-bb5884.i: ; preds = %bb5882.i
- br i1 false, label %bb6225.i, label %bb5969.i
-bb5969.i: ; preds = %bb5884.i
- br i1 false, label %bb6225.i, label %bb6013.i
-bb6013.i: ; preds = %bb5969.i
- br label %bb6205.i
-bb6109.i: ; preds = %bb6205.i
- br label %bb6205.i
-bb6205.i: ; preds = %bb6109.i, %bb6013.i
- br i1 false, label %bb6216.i, label %bb6109.i
-bb6216.i: ; preds = %bb6205.i
- br label %bb6225.i
-bb6225.i: ; preds = %bb6216.i, %bb5969.i, %bb5884.i
- br i1 false, label %bb6323.i, label %bb6272.i
-bb6272.i: ; preds = %bb6225.i
- switch i32 0, label %bb6908.i [
- i32 1, label %bb6853.i48
- i32 3, label %bb6853.i48
- i32 4, label %bb6873.i
- i32 6, label %bb6405.i
- i32 7, label %bb8990.i
- ]
-bb6323.i: ; preds = %bb6225.i
- switch i32 0, label %bb6908.i [
- i32 1, label %bb6853.i48
- i32 3, label %bb6853.i48
- i32 4, label %bb6873.i
- i32 6, label %bb6405.i
- i32 7, label %bb8990.i
- ]
-bb6332.i: ; preds = %bb5882.i, %bb5882.i
- switch i32 0, label %bb6908.i [
- i32 1, label %bb6853.i48
- i32 3, label %bb6853.i48
- i32 4, label %bb6873.i
- i32 6, label %bb6405.i
- i32 7, label %bb8990.i
- ]
-bb6352.i: ; preds = %bb5882.i
- br label %bb6873.i
-bb6387.i: ; preds = %bb5882.i
- br label %bb6403.i
-bb6403.i: ; preds = %bb6387.i, %bb5556.i, %bb5556.i
- switch i32 0, label %bb6908.i [
- i32 1, label %bb6853.i48
- i32 3, label %bb6853.i48
- i32 4, label %bb6873.i
- i32 6, label %bb6405.i
- i32 7, label %bb8990.i
- ]
-bb6405.i: ; preds = %bb6403.i, %bb6332.i, %bb6323.i, %bb6272.i
- br i1 false, label %bb6746.i, label %bb6490.i
-bb6490.i: ; preds = %bb6405.i
- br i1 false, label %bb6746.i, label %bb6534.i
-bb6534.i: ; preds = %bb6490.i
- br label %bb6726.i
-bb6630.i: ; preds = %bb6726.i
- br label %bb6726.i
-bb6726.i: ; preds = %bb6630.i, %bb6534.i
- br i1 false, label %bb6737.i, label %bb6630.i
-bb6737.i: ; preds = %bb6726.i
- br label %bb6746.i
-bb6746.i: ; preds = %bb6737.i, %bb6490.i, %bb6405.i
- br i1 false, label %bb6844.i, label %bb6793.i
-bb6793.i: ; preds = %bb6746.i
- br label %bb8990.i
-bb6844.i: ; preds = %bb6746.i
- br label %bb8990.i
-bb6853.i48: ; preds = %bb6403.i, %bb6403.i, %bb6332.i, %bb6332.i, %bb6323.i, %bb6323.i, %bb6272.i, %bb6272.i
- br label %bb8990.i
-bb6873.i: ; preds = %bb6403.i, %bb6352.i, %bb6332.i, %bb6323.i, %bb6272.i
- br label %bb8990.i
-bb6908.i: ; preds = %bb6403.i, %bb6332.i, %bb6323.i, %bb6272.i
- br label %bb8990.i
-bb6924.i: ; preds = %bb5556.i, %bb5556.i
- switch i32 0, label %bb8929.i [
- i32 1, label %bb8715.i
- i32 3, label %bb8715.i
- i32 4, label %bb8792.i
- i32 6, label %bb6926.i
- i32 7, label %bb8990.i
- ]
-bb6926.i: ; preds = %bb6924.i
- br i1 false, label %bb7267.i, label %bb7011.i
-bb7011.i: ; preds = %bb6926.i
- br i1 false, label %bb7267.i, label %bb7055.i
-bb7055.i: ; preds = %bb7011.i
- br label %bb7247.i
-bb7151.i: ; preds = %bb7247.i
- br label %bb7247.i
-bb7247.i: ; preds = %bb7151.i, %bb7055.i
- br i1 false, label %bb7258.i, label %bb7151.i
-bb7258.i: ; preds = %bb7247.i
- br label %bb7267.i
-bb7267.i: ; preds = %bb7258.i, %bb7011.i, %bb6926.i
- br i1 false, label %bb7365.i, label %bb7314.i
-bb7314.i: ; preds = %bb7267.i
- br label %bb7365.i
-bb7365.i: ; preds = %bb7314.i, %bb7267.i
- br i1 false, label %bb7714.i, label %bb7458.i
-bb7458.i: ; preds = %bb7365.i
- br i1 false, label %bb7714.i, label %bb7502.i
-bb7502.i: ; preds = %bb7458.i
- br label %bb7694.i
-bb7598.i: ; preds = %bb7694.i
- br label %bb7694.i
-bb7694.i: ; preds = %bb7598.i, %bb7502.i
- br i1 false, label %bb7705.i, label %bb7598.i
-bb7705.i: ; preds = %bb7694.i
- br label %bb7714.i
-bb7714.i: ; preds = %bb7705.i, %bb7458.i, %bb7365.i
- br i1 false, label %bb7812.i, label %bb7761.i
-bb7761.i: ; preds = %bb7714.i
- br label %bb7812.i
-bb7812.i: ; preds = %bb7761.i, %bb7714.i
- br i1 false, label %bb8161.i, label %bb7905.i
-bb7905.i: ; preds = %bb7812.i
- br i1 false, label %bb8161.i, label %bb7949.i
-bb7949.i: ; preds = %bb7905.i
- br label %bb8141.i
-bb8045.i: ; preds = %bb8141.i
- br label %bb8141.i
-bb8141.i: ; preds = %bb8045.i, %bb7949.i
- br i1 false, label %bb8152.i, label %bb8045.i
-bb8152.i: ; preds = %bb8141.i
- br label %bb8161.i
-bb8161.i: ; preds = %bb8152.i, %bb7905.i, %bb7812.i
- br i1 false, label %bb8259.i, label %bb8208.i
-bb8208.i: ; preds = %bb8161.i
- br label %bb8259.i
-bb8259.i: ; preds = %bb8208.i, %bb8161.i
- br i1 false, label %bb8608.i, label %bb8352.i
-bb8352.i: ; preds = %bb8259.i
- br i1 false, label %bb8608.i, label %bb8396.i
-bb8396.i: ; preds = %bb8352.i
- br label %bb8588.i63
-bb8492.i: ; preds = %bb8588.i63
- br label %bb8588.i63
-bb8588.i63: ; preds = %bb8492.i, %bb8396.i
- br i1 false, label %bb8599.i, label %bb8492.i
-bb8599.i: ; preds = %bb8588.i63
- br label %bb8608.i
-bb8608.i: ; preds = %bb8599.i, %bb8352.i, %bb8259.i
- br i1 false, label %bb8706.i, label %bb8655.i
-bb8655.i: ; preds = %bb8608.i
- br label %bb8990.i
-bb8706.i: ; preds = %bb8608.i
- br label %bb8990.i
-bb8715.i: ; preds = %bb6924.i, %bb6924.i
- br label %bb8990.i
-bb8792.i: ; preds = %bb6924.i
- br label %bb8990.i
-bb8929.i: ; preds = %bb6924.i
- br label %bb8990.i
-bb8990.i: ; preds = %bb8929.i, %bb8792.i, %bb8715.i, %bb8706.i, %bb8655.i, %bb6924.i, %bb6908.i, %bb6873.i, %bb6853.i48, %bb6844.i, %bb6793.i, %bb6403.i, %bb6332.i, %bb6323.i, %bb6272.i, %bb5882.i, %bb5558.i, %bb5556.i
- switch i32 %sf4083.0.i, label %bb11184.i [
- i32 0, label %bb10372.i
- i32 1, label %bb10609.i
- i32 2, label %bb10811.i
- i32 3, label %bb11013.i
- i32 4, label %bb8992.i
- i32 5, label %bb8992.i
- i32 6, label %bb8992.i
- i32 7, label %bb8992.i
- i32 8, label %bb9195.i
- i32 9, label %bb9195.i
- i32 10, label %bb9965.i
- i32 11, label %bb9585.i
- i32 16, label %bb9195.i
- ]
-bb8992.i: ; preds = %bb8990.i, %bb8990.i, %bb8990.i, %bb8990.i
- switch i32 0, label %bb11184.i [
- i32 0, label %bb9075.i
- i32 1, label %bb9105.i
- i32 2, label %bb9135.i
- i32 3, label %bb9165.i
- i32 11, label %bb8994.i
- ]
-bb8994.i: ; preds = %bb8992.i
- br label %bb11247.i
-bb9075.i: ; preds = %bb8992.i
- br label %bb11247.i
-bb9105.i: ; preds = %bb8992.i
- br label %bb11247.i
-bb9135.i: ; preds = %bb8992.i
- br label %bb11247.i
-bb9165.i: ; preds = %bb8992.i
- br label %bb11247.i
-bb9195.i: ; preds = %bb8990.i, %bb8990.i, %bb8990.i
- switch i32 0, label %bb11184.i [
- i32 0, label %bb9491.i
- i32 1, label %bb9521.i
- i32 2, label %bb9551.i
- i32 3, label %bb9581.i
- i32 4, label %bb9197.i
- i32 11, label %bb9342.i
- ]
-bb9197.i: ; preds = %bb9195.i
- br label %bb11247.i
-bb9342.i: ; preds = %bb9195.i
- br label %bb11247.i
-bb9491.i: ; preds = %bb9195.i
- br label %bb11247.i
-bb9521.i: ; preds = %bb9195.i
- br label %bb11247.i
-bb9551.i: ; preds = %bb9195.i
- br label %bb11247.i
-bb9581.i: ; preds = %bb9195.i
- br label %bb11247.i
-bb9585.i: ; preds = %bb8990.i
- switch i32 0, label %bb11184.i [
- i32 0, label %bb9879.i
- i32 1, label %bb9920.i
- i32 2, label %bb9920.i
- i32 3, label %bb9924.i
- i32 4, label %bb9587.i
- i32 8, label %bb9587.i
- ]
-bb9587.i: ; preds = %bb9585.i, %bb9585.i
- br label %bb11247.i
-bb9879.i: ; preds = %bb9585.i
- br label %bb11247.i
-bb9920.i: ; preds = %bb9585.i, %bb9585.i
- br label %bb11247.i
-bb9924.i: ; preds = %bb9585.i
- br label %bb11247.i
-bb9965.i: ; preds = %bb8990.i
- switch i32 0, label %bb11184.i [
- i32 1, label %bb10368.i
- i32 2, label %bb10368.i
- i32 3, label %bb10364.i
- i32 4, label %bb9967.i
- i32 8, label %bb10127.i
- i32 11, label %bb10287.i
- ]
-bb9967.i: ; preds = %bb9965.i
- br label %bb11247.i
-bb10127.i: ; preds = %bb9965.i
- br label %bb11247.i
-bb10287.i: ; preds = %bb9965.i
- br label %bb11247.i
-bb10364.i: ; preds = %bb9965.i
- br label %bb11247.i
-bb10368.i: ; preds = %bb9965.i, %bb9965.i
- br label %bb11247.i
-bb10372.i: ; preds = %bb8990.i
- switch i32 0, label %bb11184.i [
- i32 1, label %bb10605.i
- i32 2, label %bb10605.i
- i32 3, label %bb10601.i
- i32 4, label %bb10374.i
- i32 8, label %bb10449.i
- i32 11, label %bb10524.i
- ]
-bb10374.i: ; preds = %bb10372.i
- br label %bb11247.i
-bb10449.i: ; preds = %bb10372.i
- br label %bb11247.i
-bb10524.i: ; preds = %bb10372.i
- br label %bb11247.i
-bb10601.i: ; preds = %bb10372.i
- br label %bb11247.i
-bb10605.i: ; preds = %bb10372.i, %bb10372.i
- br label %bb11247.i
-bb10609.i: ; preds = %bb8990.i
- switch i32 0, label %bb11184.i [
- i32 0, label %bb10807.i
- i32 2, label %bb10807.i
- i32 3, label %bb10803.i
- i32 4, label %bb10611.i
- i32 8, label %bb10686.i
- i32 11, label %bb10761.i
- ]
-bb10611.i: ; preds = %bb10609.i
- br label %bb11247.i
-bb10686.i: ; preds = %bb10609.i
- br label %bb11247.i
-bb10761.i: ; preds = %bb10609.i
- br label %bb11247.i
-bb10803.i: ; preds = %bb10609.i
- br label %bb11247.i
-bb10807.i: ; preds = %bb10609.i, %bb10609.i
- br label %bb11247.i
-bb10811.i: ; preds = %bb8990.i
- switch i32 0, label %bb11184.i [
- i32 0, label %bb11009.i
- i32 1, label %bb11009.i
- i32 3, label %bb11005.i
- i32 4, label %bb10813.i
- i32 8, label %bb10888.i
- i32 11, label %bb10963.i
- ]
-bb10813.i: ; preds = %bb10811.i
- br label %bb11247.i
-bb10888.i: ; preds = %bb10811.i
- br label %bb11247.i
-bb10963.i: ; preds = %bb10811.i
- br label %bb11247.i
-bb11005.i: ; preds = %bb10811.i
- br label %bb11247.i
-bb11009.i: ; preds = %bb10811.i, %bb10811.i
- br label %bb11247.i
-bb11013.i: ; preds = %bb8990.i
- switch i32 0, label %bb11184.i [
- i32 0, label %bb11180.i
- i32 1, label %bb11180.i
- i32 2, label %bb11180.i
- i32 4, label %bb11015.i
- i32 8, label %bb11090.i
- i32 11, label %bb11103.i
- ]
-bb11015.i: ; preds = %bb11013.i
- br label %bb11247.i
-bb11090.i: ; preds = %bb11013.i
- br label %bb11247.i
-bb11103.i: ; preds = %bb11013.i
- br label %bb11247.i
-bb11180.i: ; preds = %bb11013.i, %bb11013.i, %bb11013.i
- br label %bb11184.i
-bb11184.i: ; preds = %bb11180.i, %bb11013.i, %bb10811.i, %bb10609.i, %bb10372.i, %bb9965.i, %bb9585.i, %bb9195.i, %bb8992.i, %bb8990.i
- br label %bb11247.i
-bb11247.i: ; preds = %bb11184.i, %bb11103.i, %bb11090.i, %bb11015.i, %bb11009.i, %bb11005.i, %bb10963.i, %bb10888.i, %bb10813.i, %bb10807.i, %bb10803.i, %bb10761.i, %bb10686.i, %bb10611.i, %bb10605.i, %bb10601.i, %bb10524.i, %bb10449.i, %bb10374.i, %bb10368.i, %bb10364.i, %bb10287.i, %bb10127.i, %bb9967.i, %bb9924.i, %bb9920.i, %bb9879.i, %bb9587.i, %bb9581.i, %bb9551.i, %bb9521.i, %bb9491.i, %bb9342.i, %bb9197.i, %bb9165.i, %bb9135.i, %bb9105.i, %bb9075.i, %bb8994.i
- br i1 false, label %bb11250.i, label %bb11256.i
-bb11250.i: ; preds = %bb11247.i
- br label %bb11378.i
-bb11256.i: ; preds = %bb11247.i
- switch i32 0, label %bb11348.i [
- i32 4, label %bb11258.i
- i32 8, label %bb11258.i
- i32 11, label %bb11318.i
- ]
-bb11258.i: ; preds = %bb11256.i, %bb11256.i
- br i1 false, label %bb11273.i, label %bb11261.i
-bb11261.i: ; preds = %bb11258.i
- br label %bb11273.i
-bb11273.i: ; preds = %bb11261.i, %bb11258.i
- br i1 false, label %bb11288.i, label %bb11276.i
-bb11276.i: ; preds = %bb11273.i
- br label %bb11288.i
-bb11288.i: ; preds = %bb11276.i, %bb11273.i
- br i1 false, label %bb11303.i, label %bb11291.i
-bb11291.i: ; preds = %bb11288.i
- br label %bb11303.i
-bb11303.i: ; preds = %bb11291.i, %bb11288.i
- br i1 false, label %bb11318.i, label %bb11306.i
-bb11306.i: ; preds = %bb11303.i
- br label %bb11318.i
-bb11318.i: ; preds = %bb11306.i, %bb11303.i, %bb11256.i
- br i1 false, label %bb11333.i, label %bb11321.i
-bb11321.i: ; preds = %bb11318.i
- br label %bb11333.i
-bb11333.i: ; preds = %bb11321.i, %bb11318.i
- br i1 false, label %bb11348.i, label %bb11336.i
-bb11336.i: ; preds = %bb11333.i
- br label %bb11348.i
-bb11348.i: ; preds = %bb11336.i, %bb11333.i, %bb11256.i
- br i1 false, label %bb11363.i, label %bb11351.i
-bb11351.i: ; preds = %bb11348.i
- br label %bb11363.i
-bb11363.i: ; preds = %bb11351.i, %bb11348.i
- br i1 false, label %bb11378.i, label %bb11366.i
-bb11366.i: ; preds = %bb11363.i
- br label %bb11378.i
-bb11378.i: ; preds = %bb11366.i, %bb11363.i, %bb11250.i
- br label %bb12038.i
-bb12038.i: ; preds = %bb11378.i, %bb5464.i, %bb5404.i, %bb5374.i, %bb5344.i, %bb5263.i
- switch i32 0, label %bb15866.i [
- i32 3, label %bb13016.i
- i32 4, label %bb12040.i
- i32 8, label %bb12514.i
- i32 10, label %bb12903.i
- i32 11, label %bb12553.i
- i32 16, label %bb12514.i
- ]
-bb12040.i: ; preds = %bb12038.i, %bb5467.i
- br label %bb13026.i
-bb12514.i: ; preds = %bb12038.i, %bb12038.i, %bb5467.i, %bb5467.i
- br label %bb13026.i
-bb12553.i: ; preds = %bb12038.i, %bb5467.i
- br i1 false, label %bb12558.i, label %bb12747.i
-bb12558.i: ; preds = %bb12553.i
- br i1 false, label %bb12666.i, label %bb12654.i
-bb12654.i: ; preds = %bb12558.i
- br label %bb12666.i
-bb12666.i: ; preds = %bb12654.i, %bb12558.i
- br label %bb12747.i
-bb12747.i: ; preds = %bb12666.i, %bb12553.i
- br label %bb13026.i
-bb12903.i: ; preds = %bb12038.i, %bb5467.i
- br i1 false, label %bb12908.i, label %bb13026.i
-bb12908.i: ; preds = %bb12903.i
- br i1 false, label %bb13026.i, label %bb13004.i
-bb13004.i: ; preds = %bb12908.i
- switch i32 0, label %bb15866.i [
- i32 3, label %bb13752.i
- i32 4, label %bb14197.i
- i32 8, label %bb14197.i
- i32 10, label %bb13752.i
- i32 11, label %bb13307.i
- i32 16, label %bb13028.i
- ]
-bb13016.i: ; preds = %bb12038.i, %bb5467.i
- br label %bb13026.i
-bb13026.i: ; preds = %bb13016.i, %bb12908.i, %bb12903.i, %bb12747.i, %bb12514.i, %bb12040.i
- switch i32 0, label %bb15866.i [
- i32 3, label %bb13752.i
- i32 4, label %bb14197.i
- i32 8, label %bb14197.i
- i32 10, label %bb13752.i
- i32 11, label %bb13307.i
- i32 16, label %bb13028.i
- ]
-bb13028.i: ; preds = %bb13026.i, %bb13004.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb13307.i: ; preds = %bb13026.i, %bb13004.i
- switch i32 %dt4080.0.i, label %bb13736.i [
- i32 6, label %bb13312.i
- i32 1, label %bb13624.i
- i32 3, label %bb13624.i
- i32 5, label %bb13649.i
- i32 4, label %bb13688.i
- i32 7, label %bb15866.i
- ]
-bb13312.i: ; preds = %bb13307.i
- br i1 false, label %bb13483.i, label %bb13400.i
-bb13400.i: ; preds = %bb13312.i
- br label %bb13483.i
-bb13483.i: ; preds = %bb13400.i, %bb13312.i
- br i1 false, label %bb13593.i, label %bb13505.i
-bb13505.i: ; preds = %bb13483.i
- switch i32 %dt4080.0.i, label %bb14181.i [
- i32 6, label %bb13757.i
- i32 1, label %bb14069.i
- i32 3, label %bb14069.i
- i32 5, label %bb14094.i
- i32 4, label %bb14133.i
- i32 7, label %bb15866.i
- ]
-bb13593.i: ; preds = %bb13483.i
- switch i32 %dt4080.0.i, label %bb14181.i [
- i32 6, label %bb13757.i
- i32 1, label %bb14069.i
- i32 3, label %bb14069.i
- i32 5, label %bb14094.i
- i32 4, label %bb14133.i
- i32 7, label %bb15866.i
- ]
-bb13624.i: ; preds = %bb13307.i, %bb13307.i
- switch i32 %dt4080.0.i, label %bb14181.i [
- i32 6, label %bb13757.i
- i32 1, label %bb14069.i
- i32 3, label %bb14069.i
- i32 5, label %bb14094.i
- i32 4, label %bb14133.i
- i32 7, label %bb15866.i
- ]
-bb13649.i: ; preds = %bb13307.i
- br label %bb14094.i
-bb13688.i: ; preds = %bb13307.i
- br label %bb14133.i
-bb13736.i: ; preds = %bb13307.i
- br label %bb13752.i
-bb13752.i: ; preds = %bb13736.i, %bb13026.i, %bb13026.i, %bb13004.i, %bb13004.i
- switch i32 %dt4080.0.i, label %bb14181.i [
- i32 6, label %bb13757.i
- i32 1, label %bb14069.i
- i32 3, label %bb14069.i
- i32 5, label %bb14094.i
- i32 4, label %bb14133.i
- i32 7, label %bb15866.i
- ]
-bb13757.i: ; preds = %bb13752.i, %bb13624.i, %bb13593.i, %bb13505.i
- br i1 false, label %bb13928.i, label %bb13845.i
-bb13845.i: ; preds = %bb13757.i
- br label %bb13928.i
-bb13928.i: ; preds = %bb13845.i, %bb13757.i
- br i1 false, label %bb14038.i, label %bb13950.i
-bb13950.i: ; preds = %bb13928.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb14038.i: ; preds = %bb13928.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb14069.i: ; preds = %bb13752.i, %bb13752.i, %bb13624.i, %bb13624.i, %bb13593.i, %bb13593.i, %bb13505.i, %bb13505.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb14094.i: ; preds = %bb13752.i, %bb13649.i, %bb13624.i, %bb13593.i, %bb13505.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb14133.i: ; preds = %bb13752.i, %bb13688.i, %bb13624.i, %bb13593.i, %bb13505.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb14181.i: ; preds = %bb13752.i, %bb13624.i, %bb13593.i, %bb13505.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb14197.i: ; preds = %bb13026.i, %bb13026.i, %bb13004.i, %bb13004.i
- switch i32 %dt4080.0.i, label %bb15805.i [
- i32 6, label %bb14202.i
- i32 1, label %bb15411.i
- i32 3, label %bb15411.i
- i32 5, label %bb15493.i
- i32 4, label %bb15631.i
- i32 7, label %bb15866.i
- ]
-bb14202.i: ; preds = %bb14197.i
- br i1 false, label %bb14373.i, label %bb14290.i
-bb14290.i: ; preds = %bb14202.i
- br label %bb14373.i
-bb14373.i: ; preds = %bb14290.i, %bb14202.i
- br i1 false, label %bb14483.i, label %bb14395.i
-bb14395.i: ; preds = %bb14373.i
- br label %bb14483.i
-bb14483.i: ; preds = %bb14395.i, %bb14373.i
- br i1 false, label %bb14672.i, label %bb14589.i
-bb14589.i: ; preds = %bb14483.i
- br label %bb14672.i
-bb14672.i: ; preds = %bb14589.i, %bb14483.i
- br i1 false, label %bb14782.i, label %bb14694.i
-bb14694.i: ; preds = %bb14672.i
- br label %bb14782.i
-bb14782.i: ; preds = %bb14694.i, %bb14672.i
- br i1 false, label %bb14971.i, label %bb14888.i
-bb14888.i: ; preds = %bb14782.i
- br label %bb14971.i
-bb14971.i: ; preds = %bb14888.i, %bb14782.i
- br i1 false, label %bb15081.i, label %bb14993.i
-bb14993.i: ; preds = %bb14971.i
- br label %bb15081.i
-bb15081.i: ; preds = %bb14993.i, %bb14971.i
- br i1 false, label %bb15270.i, label %bb15187.i
-bb15187.i: ; preds = %bb15081.i
- br label %bb15270.i
-bb15270.i: ; preds = %bb15187.i, %bb15081.i
- br i1 false, label %bb15380.i, label %bb15292.i
-bb15292.i: ; preds = %bb15270.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb15380.i: ; preds = %bb15270.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb15411.i: ; preds = %bb14197.i, %bb14197.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb15493.i: ; preds = %bb14197.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb15631.i: ; preds = %bb14197.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb15805.i: ; preds = %bb14197.i
- br label %bb15866.i
-bb15866.i: ; preds = %bb15805.i, %bb14197.i, %bb13752.i, %bb13624.i, %bb13593.i, %bb13505.i, %bb13307.i, %bb13026.i, %bb13004.i, %bb12038.i, %bb5467.i
- br i1 false, label %UnifiedReturnBlock.i177, label %bb15869.i
-bb15869.i: ; preds = %bb15866.i, %bb15631.i, %bb15493.i, %bb15411.i, %bb15380.i, %bb15292.i, %bb14181.i, %bb14133.i, %bb14094.i, %bb14069.i, %bb14038.i, %bb13950.i, %bb13028.i
- switch i32 0, label %UnifiedReturnBlock.i177 [
- i32 4, label %bb15874.i
- i32 8, label %bb15960.i
- ]
-bb15874.i: ; preds = %bb15869.i
- br label %glgVectorFloatConversion.exit
-bb15960.i: ; preds = %bb15869.i
- br label %glgVectorFloatConversion.exit
-UnifiedReturnBlock.i177: ; preds = %bb15869.i, %bb15866.i, %bb15631.i, %bb15493.i, %bb15411.i, %bb15380.i, %bb15292.i, %bb14181.i, %bb14133.i, %bb14094.i, %bb14069.i, %bb14038.i, %bb13950.i, %bb13028.i
- br label %glgVectorFloatConversion.exit
-glgVectorFloatConversion.exit: ; preds = %UnifiedReturnBlock.i177, %bb15960.i, %bb15874.i
- br label %bb16581.i
-bb5351.i: ; preds = %loadVecColor_BGRA_UI8888R.exit
- br i1 false, label %bb5359.i, label %bb5586.i
-bb5359.i: ; preds = %bb5351.i
- switch i32 0, label %bb5586.i [
- i32 0, label %bb5361.i
- i32 1, label %bb5511.i
- i32 2, label %bb5511.i
- ]
-bb5361.i: ; preds = %bb5359.i
- br i1 false, label %bb5366.i, label %bb5379.i
-bb5366.i: ; preds = %bb5361.i
- br label %bb7230.i
-bb5379.i: ; preds = %bb5361.i
- switch i32 %sf4083.0.i, label %bb5415.i [
- i32 1, label %bb5384.i
- i32 2, label %bb5402.i
- ]
-bb5384.i: ; preds = %bb5379.i
- switch i32 0, label %bb7230.i [
- i32 4, label %bb5445.i
- i32 8, label %bb5445.i
- i32 11, label %bb5445.i
- ]
-bb5402.i: ; preds = %bb5379.i
- switch i32 0, label %bb7230.i [
- i32 4, label %bb5445.i
- i32 8, label %bb5445.i
- i32 11, label %bb5445.i
- ]
-bb5415.i: ; preds = %bb5379.i
- switch i32 0, label %bb7230.i [
- i32 4, label %bb5445.i
- i32 8, label %bb5445.i
- i32 11, label %bb5445.i
- ]
-bb5445.i: ; preds = %bb5415.i, %bb5415.i, %bb5415.i, %bb5402.i, %bb5402.i, %bb5402.i, %bb5384.i, %bb5384.i, %bb5384.i
- switch i32 0, label %bb7230.i [
- i32 4, label %bb5470.i
- i32 8, label %bb5470.i
- i32 11, label %bb6853.i
- ]
-bb5470.i: ; preds = %bb5445.i, %bb5445.i
- switch i32 0, label %bb7230.i [
- i32 4, label %bb5498.i
- i32 8, label %bb5493.i
- i32 11, label %bb6853.i
- ]
-bb5493.i: ; preds = %bb5470.i
- br i1 false, label %bb5498.i, label %bb5586.i
-bb5498.i: ; preds = %bb5493.i, %bb5470.i
- switch i32 0, label %bb7230.i [
- i32 4, label %bb5591.i
- i32 8, label %bb6153.i
- i32 11, label %bb6853.i
- ]
-bb5511.i: ; preds = %bb5359.i, %bb5359.i
- br i1 false, label %bb5568.i, label %bb5586.i
-bb5568.i: ; preds = %bb5511.i
- br label %bb5586.i
-bb5586.i: ; preds = %bb5568.i, %bb5511.i, %bb5493.i, %bb5359.i, %bb5351.i
- switch i32 0, label %bb7230.i [
- i32 4, label %bb5591.i
- i32 8, label %bb6153.i
- i32 11, label %bb6853.i
- ]
-bb5591.i: ; preds = %bb5586.i, %bb5498.i
- switch i32 0, label %bb5995.i [
- i32 4, label %bb5596.i
- i32 8, label %bb5680.i
- i32 11, label %bb5842.i
- ]
-bb5596.i: ; preds = %bb5591.i
- br i1 false, label %bb8428.i, label %bb5602.i
-bb5602.i: ; preds = %bb5596.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb5680.i: ; preds = %bb5591.i
- br i1 false, label %bb5692.i, label %bb5764.i
-bb5692.i: ; preds = %bb5680.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb5764.i: ; preds = %bb5680.i
- br i1 false, label %bb8428.i, label %bb5772.i
-bb5772.i: ; preds = %bb5764.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb5842.i: ; preds = %bb5591.i
- br i1 false, label %bb5920.i, label %bb5845.i
-bb5845.i: ; preds = %bb5842.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb5920.i: ; preds = %bb5842.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb5995.i: ; preds = %bb5591.i
- switch i32 %df4081.0.i, label %bb8428.i [
- i32 0, label %bb6007.i
- i32 10, label %bb6007.i
- i32 1, label %bb6042.i
- i32 2, label %bb6079.i
- i32 3, label %bb6116.i
- ]
-bb6007.i: ; preds = %bb5995.i, %bb5995.i
- br i1 false, label %bb6012.i, label %bb8428.i
-bb6012.i: ; preds = %bb6007.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6042.i: ; preds = %bb5995.i
- br i1 false, label %bb6049.i, label %bb6045.i
-bb6045.i: ; preds = %bb6042.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6049.i: ; preds = %bb6042.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6079.i: ; preds = %bb5995.i
- br i1 false, label %bb6086.i, label %bb6082.i
-bb6082.i: ; preds = %bb6079.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6086.i: ; preds = %bb6079.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6116.i: ; preds = %bb5995.i
- br i1 false, label %bb6123.i, label %bb6119.i
-bb6119.i: ; preds = %bb6116.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6123.i: ; preds = %bb6116.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6153.i: ; preds = %bb5586.i, %bb5498.i
- switch i32 0, label %bb6724.i [
- i32 4, label %bb6158.i
- i32 8, label %bb6459.i
- i32 11, label %bb6621.i
- ]
-bb6158.i: ; preds = %bb6153.i
- br i1 false, label %bb6242.i, label %bb6161.i
-bb6161.i: ; preds = %bb6158.i
- br i1 false, label %bb6239.i, label %bb6166.i
-bb6166.i: ; preds = %bb6161.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6239.i: ; preds = %bb6161.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6242.i: ; preds = %bb6158.i
- br i1 false, label %bb6245.i, label %bb6317.i
-bb6245.i: ; preds = %bb6242.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6317.i: ; preds = %bb6242.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6459.i: ; preds = %bb6153.i
- br i1 false, label %bb6471.i, label %bb6543.i
-bb6471.i: ; preds = %bb6459.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6543.i: ; preds = %bb6459.i
- br i1 false, label %bb8428.i, label %bb6551.i
-bb6551.i: ; preds = %bb6543.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6621.i: ; preds = %bb6153.i
- br i1 false, label %bb6626.i, label %bb6651.i
-bb6626.i: ; preds = %bb6621.i
- br label %bb6651.i
-bb6651.i: ; preds = %bb6626.i, %bb6621.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6724.i: ; preds = %bb6153.i
- switch i32 %df4081.0.i, label %bb8428.i [
- i32 0, label %bb6736.i
- i32 10, label %bb6736.i
- i32 1, label %bb6771.i
- i32 2, label %bb6808.i
- i32 3, label %bb6845.i
- ]
-bb6736.i: ; preds = %bb6724.i, %bb6724.i
- br i1 false, label %bb6741.i, label %bb8428.i
-bb6741.i: ; preds = %bb6736.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6771.i: ; preds = %bb6724.i
- br i1 false, label %bb6778.i, label %bb6774.i
-bb6774.i: ; preds = %bb6771.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6778.i: ; preds = %bb6771.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6808.i: ; preds = %bb6724.i
- br i1 false, label %bb6815.i, label %bb6811.i
-bb6811.i: ; preds = %bb6808.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6815.i: ; preds = %bb6808.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6845.i: ; preds = %bb6724.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6853.i: ; preds = %bb5586.i, %bb5498.i, %bb5470.i, %bb5445.i
- switch i32 0, label %bb8428.i [
- i32 4, label %bb6858.i
- i32 8, label %bb7072.i
- i32 10, label %bb7149.i
- i32 3, label %bb7192.i
- ]
-bb6858.i: ; preds = %bb6853.i
- br i1 false, label %bb6942.i, label %bb6861.i
-bb6861.i: ; preds = %bb6858.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb6942.i: ; preds = %bb6858.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7072.i: ; preds = %bb6853.i
- br i1 false, label %bb7119.i, label %bb7075.i
-bb7075.i: ; preds = %bb7072.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7119.i: ; preds = %bb7072.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7149.i: ; preds = %bb6853.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7192.i: ; preds = %bb6853.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7230.i: ; preds = %bb5586.i, %bb5498.i, %bb5470.i, %bb5445.i, %bb5415.i, %bb5402.i, %bb5384.i, %bb5366.i
- switch i32 %sf4083.0.i, label %bb8428.i [
- i32 10, label %bb7235.i
- i32 0, label %bb7455.i
- i32 1, label %bb7725.i
- i32 2, label %bb7978.i
- i32 3, label %bb8231.i
- ]
-bb7235.i: ; preds = %bb7230.i
- switch i32 0, label %bb7442.i [
- i32 4, label %bb7240.i
- i32 8, label %bb7329.i
- i32 11, label %bb7369.i
- ]
-bb7240.i: ; preds = %bb7235.i
- br i1 false, label %bb7252.i, label %bb7243.i
-bb7243.i: ; preds = %bb7240.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7252.i: ; preds = %bb7240.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7329.i: ; preds = %bb7235.i
- br i1 false, label %bb7339.i, label %bb7332.i
-bb7332.i: ; preds = %bb7329.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7339.i: ; preds = %bb7329.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7369.i: ; preds = %bb7235.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7442.i: ; preds = %bb7235.i
- br i1 false, label %bb7447.i, label %bb8428.i
-bb7447.i: ; preds = %bb7442.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7455.i: ; preds = %bb7230.i
- switch i32 0, label %bb7703.i [
- i32 4, label %bb7460.i
- i32 8, label %bb7546.i
- i32 11, label %bb7630.i
- ]
-bb7460.i: ; preds = %bb7455.i
- br i1 false, label %bb7471.i, label %bb7463.i
-bb7463.i: ; preds = %bb7460.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7471.i: ; preds = %bb7460.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7546.i: ; preds = %bb7455.i
- br i1 false, label %bb7555.i, label %bb7549.i
-bb7549.i: ; preds = %bb7546.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7555.i: ; preds = %bb7546.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7630.i: ; preds = %bb7455.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7703.i: ; preds = %bb7455.i
- br i1 false, label %bb7709.i, label %bb7712.i
-bb7709.i: ; preds = %bb7703.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7712.i: ; preds = %bb7703.i
- br i1 false, label %bb7717.i, label %bb8428.i
-bb7717.i: ; preds = %bb7712.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7725.i: ; preds = %bb7230.i
- switch i32 0, label %bb7945.i [
- i32 4, label %bb7730.i
- i32 8, label %bb7819.i
- i32 11, label %bb7906.i
- ]
-bb7730.i: ; preds = %bb7725.i
- br i1 false, label %bb7744.i, label %bb7733.i
-bb7733.i: ; preds = %bb7730.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7744.i: ; preds = %bb7730.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7819.i: ; preds = %bb7725.i
- br i1 false, label %bb7831.i, label %bb7822.i
-bb7822.i: ; preds = %bb7819.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7831.i: ; preds = %bb7819.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7906.i: ; preds = %bb7725.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7945.i: ; preds = %bb7725.i
- switch i32 %df4081.0.i, label %bb8428.i [
- i32 0, label %bb7962.i
- i32 2, label %bb7962.i
- i32 10, label %bb7962.i
- i32 3, label %bb7970.i
- ]
-bb7962.i: ; preds = %bb7945.i, %bb7945.i, %bb7945.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7970.i: ; preds = %bb7945.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7978.i: ; preds = %bb7230.i
- switch i32 0, label %bb8198.i [
- i32 4, label %bb7983.i
- i32 8, label %bb8072.i
- i32 11, label %bb8159.i
- ]
-bb7983.i: ; preds = %bb7978.i
- br i1 false, label %bb7997.i, label %bb7986.i
-bb7986.i: ; preds = %bb7983.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb7997.i: ; preds = %bb7983.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8072.i: ; preds = %bb7978.i
- br i1 false, label %bb8084.i, label %bb8075.i
-bb8075.i: ; preds = %bb8072.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8084.i: ; preds = %bb8072.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8159.i: ; preds = %bb7978.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8198.i: ; preds = %bb7978.i
- switch i32 %df4081.0.i, label %bb8428.i [
- i32 0, label %bb8215.i
- i32 1, label %bb8215.i
- i32 10, label %bb8215.i
- i32 3, label %bb8223.i
- ]
-bb8215.i: ; preds = %bb8198.i, %bb8198.i, %bb8198.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8223.i: ; preds = %bb8198.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8231.i: ; preds = %bb7230.i
- switch i32 0, label %bb8428.i [
- i32 4, label %bb8236.i
- i32 8, label %bb8326.i
- i32 11, label %bb8347.i
- i32 10, label %bb8425.i
- ]
-bb8236.i: ; preds = %bb8231.i
- br i1 false, label %bb8251.i, label %bb8239.i
-bb8239.i: ; preds = %bb8236.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8251.i: ; preds = %bb8236.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8326.i: ; preds = %bb8231.i
- br i1 false, label %bb8339.i, label %bb8428.i
-bb8339.i: ; preds = %bb8326.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8347.i: ; preds = %bb8231.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8425.i: ; preds = %bb8231.i
- br label %bb8428.i
-bb8428.i: ; preds = %bb8425.i, %bb8326.i, %bb8231.i, %bb8198.i, %bb7945.i, %bb7712.i, %bb7442.i, %bb7230.i, %bb6853.i, %bb6736.i, %bb6724.i, %bb6543.i, %bb6007.i, %bb5995.i, %bb5764.i, %bb5596.i
- br i1 false, label %bb8668.i, label %bb8434.i
-bb8434.i: ; preds = %bb8428.i, %bb8347.i, %bb8339.i, %bb8251.i, %bb8239.i, %bb8223.i, %bb8215.i, %bb8159.i, %bb8084.i, %bb8075.i, %bb7997.i, %bb7986.i, %bb7970.i, %bb7962.i, %bb7906.i, %bb7831.i, %bb7822.i, %bb7744.i, %bb7733.i, %bb7717.i, %bb7709.i, %bb7630.i, %bb7555.i, %bb7549.i, %bb7471.i, %bb7463.i, %bb7447.i, %bb7369.i, %bb7339.i, %bb7332.i, %bb7252.i, %bb7243.i, %bb7192.i, %bb7149.i, %bb7119.i, %bb7075.i, %bb6942.i, %bb6861.i, %bb6845.i, %bb6815.i, %bb6811.i, %bb6778.i, %bb6774.i, %bb6741.i, %bb6651.i, %bb6551.i, %bb6471.i, %bb6317.i, %bb6245.i, %bb6239.i, %bb6166.i, %bb6123.i, %bb6119.i, %bb6086.i, %bb6082.i, %bb6049.i, %bb6045.i, %bb6012.i, %bb5920.i, %bb5845.i, %bb5772.i, %bb5692.i, %bb5602.i
- switch i32 0, label %bb8668.i [
- i32 0, label %bb8436.i
- i32 1, label %bb8531.i
- i32 2, label %bb8531.i
- ]
-bb8436.i: ; preds = %bb8434.i
- switch i32 0, label %bb9310.i [
- i32 4, label %bb8465.i
- i32 8, label %bb8465.i
- i32 11, label %bb8465.i
- i32 3, label %bb9301.i
- ]
-bb8465.i: ; preds = %bb8436.i, %bb8436.i, %bb8436.i
- switch i32 0, label %bb9310.i [
- i32 4, label %bb8490.i
- i32 8, label %bb8490.i
- i32 3, label %bb9301.i
- i32 11, label %bb9153.i
- ]
-bb8490.i: ; preds = %bb8465.i, %bb8465.i
- switch i32 0, label %bb9310.i [
- i32 4, label %bb8518.i
- i32 8, label %bb8513.i
- i32 3, label %bb9301.i
- i32 11, label %bb9153.i
- ]
-bb8513.i: ; preds = %bb8490.i
- br i1 false, label %bb8518.i, label %bb8668.i
-bb8518.i: ; preds = %bb8513.i, %bb8490.i
- switch i32 0, label %bb9310.i [
- i32 3, label %bb9301.i
- i32 4, label %bb8670.i
- i32 8, label %bb9112.i
- i32 11, label %bb9153.i
- ]
-bb8531.i: ; preds = %bb8434.i, %bb8434.i
- br i1 false, label %bb8536.i, label %bb8575.i
-bb8536.i: ; preds = %bb8531.i
- br i1 false, label %bb8557.i, label %bb8588.i
-bb8557.i: ; preds = %bb8536.i
- switch i32 0, label %bb9310.i [
- i32 4, label %bb8600.i
- i32 8, label %bb8600.i
- i32 3, label %bb9301.i
- i32 11, label %bb9153.i
- ]
-bb8575.i: ; preds = %bb8531.i
- br label %bb8588.i
-bb8588.i: ; preds = %bb8575.i, %bb8536.i
- switch i32 0, label %bb9310.i [
- i32 4, label %bb8600.i
- i32 8, label %bb8600.i
- i32 3, label %bb9301.i
- i32 11, label %bb9153.i
- ]
-bb8600.i: ; preds = %bb8588.i, %bb8588.i, %bb8557.i, %bb8557.i
- switch i32 0, label %bb9310.i [
- i32 4, label %bb8629.i
- i32 3, label %bb9301.i
- i32 8, label %bb9112.i
- i32 11, label %bb9153.i
- ]
-bb8629.i: ; preds = %bb8600.i
- br i1 false, label %bb8650.i, label %bb8668.i
-bb8650.i: ; preds = %bb8629.i
- br label %bb8668.i
-bb8668.i: ; preds = %bb8650.i, %bb8629.i, %bb8513.i, %bb8434.i, %bb8428.i, %bb8347.i, %bb8339.i, %bb8251.i, %bb8239.i, %bb8223.i, %bb8215.i, %bb8159.i, %bb8084.i, %bb8075.i, %bb7997.i, %bb7986.i, %bb7970.i, %bb7962.i, %bb7906.i, %bb7831.i, %bb7822.i, %bb7744.i, %bb7733.i, %bb7717.i, %bb7709.i, %bb7630.i, %bb7555.i, %bb7549.i, %bb7471.i, %bb7463.i, %bb7447.i, %bb7369.i, %bb7339.i, %bb7332.i, %bb7252.i, %bb7243.i, %bb7192.i, %bb7149.i, %bb7119.i, %bb7075.i, %bb6942.i, %bb6861.i, %bb6845.i, %bb6815.i, %bb6811.i, %bb6778.i, %bb6774.i, %bb6741.i, %bb6651.i, %bb6551.i, %bb6471.i, %bb6317.i, %bb6245.i, %bb6239.i, %bb6166.i, %bb6123.i, %bb6119.i, %bb6086.i, %bb6082.i, %bb6049.i, %bb6045.i, %bb6012.i, %bb5920.i, %bb5845.i, %bb5772.i, %bb5692.i, %bb5602.i
- switch i32 0, label %bb9310.i [
- i32 3, label %bb9301.i
- i32 4, label %bb8670.i
- i32 8, label %bb9112.i
- i32 11, label %bb9153.i
- ]
-bb8670.i: ; preds = %bb8668.i, %bb8518.i
- br label %bb9310.i
-bb9112.i: ; preds = %bb8668.i, %bb8600.i, %bb8518.i
- br label %bb9310.i
-bb9153.i: ; preds = %bb8668.i, %bb8600.i, %bb8588.i, %bb8557.i, %bb8518.i, %bb8490.i, %bb8465.i
- br label %bb9310.i
-bb9301.i: ; preds = %bb8668.i, %bb8600.i, %bb8588.i, %bb8557.i, %bb8518.i, %bb8490.i, %bb8465.i, %bb8436.i
- br label %bb9310.i
-bb9310.i: ; preds = %bb9301.i, %bb9153.i, %bb9112.i, %bb8670.i, %bb8668.i, %bb8600.i, %bb8588.i, %bb8557.i, %bb8518.i, %bb8490.i, %bb8465.i, %bb8436.i
- br i1 false, label %bb16581.i, label %bb9313.i
-bb9313.i: ; preds = %bb9310.i
- switch i32 %dt4080.0.i, label %bb16578.i [
- i32 0, label %bb9315.i
- i32 1, label %bb9890.i
- i32 2, label %bb10465.i
- i32 3, label %bb11040.i
- i32 4, label %bb11615.i
- i32 5, label %bb11823.i
- i32 8, label %bb12398.i
- i32 9, label %bb12833.i
- i32 10, label %bb13268.i
- i32 11, label %bb13268.i
- i32 12, label %bb13703.i
- i32 13, label %bb13703.i
- i32 14, label %bb14278.i
- i32 15, label %bb14853.i
- i32 16, label %bb9315.i
- i32 17, label %bb9315.i
- i32 18, label %bb15428.i
- i32 19, label %bb16003.i
- ]
-bb9315.i: ; preds = %bb9313.i, %bb9313.i, %bb9313.i
- br i1 false, label %bb9535.i, label %bb9323.i
-bb9323.i: ; preds = %bb9315.i
- br label %bb9535.i
-bb9535.i: ; preds = %bb9323.i, %bb9315.i
- br label %bb16581.i
-bb9890.i: ; preds = %bb9313.i
- br i1 false, label %bb10255.i, label %bb9898.i
-bb9898.i: ; preds = %bb9890.i
- br label %bb10255.i
-bb10255.i: ; preds = %bb9898.i, %bb9890.i
- br label %bb16581.i
-bb10465.i: ; preds = %bb9313.i
- br i1 false, label %bb10685.i, label %bb10473.i
-bb10473.i: ; preds = %bb10465.i
- br label %bb10685.i
-bb10685.i: ; preds = %bb10473.i, %bb10465.i
- br label %bb16581.i
-bb11040.i: ; preds = %bb9313.i
- br i1 false, label %bb11405.i, label %bb11048.i
-bb11048.i: ; preds = %bb11040.i
- br label %bb11405.i
-bb11405.i: ; preds = %bb11048.i, %bb11040.i
- br label %bb16581.i
-bb11615.i: ; preds = %bb9313.i
- br i1 false, label %bb16581.i, label %bb11618.i
-bb11618.i: ; preds = %bb11615.i
- br label %bb16581.i
-bb11823.i: ; preds = %bb9313.i
- br i1 false, label %bb12188.i, label %bb11831.i
-bb11831.i: ; preds = %bb11823.i
- br label %bb12188.i
-bb12188.i: ; preds = %bb11831.i, %bb11823.i
- br label %bb16581.i
-bb12398.i: ; preds = %bb9313.i
- br i1 false, label %bb12566.i, label %bb12406.i
-bb12406.i: ; preds = %bb12398.i
- br label %bb12566.i
-bb12566.i: ; preds = %bb12406.i, %bb12398.i
- br label %bb16581.i
-bb12833.i: ; preds = %bb9313.i
- br i1 false, label %bb13001.i, label %bb12841.i
-bb12841.i: ; preds = %bb12833.i
- br label %bb13001.i
-bb13001.i: ; preds = %bb12841.i, %bb12833.i
- br label %bb16581.i
-bb13268.i: ; preds = %bb9313.i, %bb9313.i
- br i1 false, label %bb13436.i, label %bb13276.i
-bb13276.i: ; preds = %bb13268.i
- br label %bb13436.i
-bb13436.i: ; preds = %bb13276.i, %bb13268.i
- br label %bb16581.i
-bb13703.i: ; preds = %bb9313.i, %bb9313.i
- br i1 false, label %bb13923.i, label %bb13711.i
-bb13711.i: ; preds = %bb13703.i
- br label %bb13923.i
-bb13923.i: ; preds = %bb13711.i, %bb13703.i
- br label %bb16581.i
-bb14278.i: ; preds = %bb9313.i
- br i1 false, label %bb14498.i, label %bb14286.i
-bb14286.i: ; preds = %bb14278.i
- br label %bb14498.i
-bb14498.i: ; preds = %bb14286.i, %bb14278.i
- br label %bb16581.i
-bb14853.i: ; preds = %bb9313.i
- br i1 false, label %bb15073.i, label %bb14861.i
-bb14861.i: ; preds = %bb14853.i
- br label %bb15073.i
-bb15073.i: ; preds = %bb14861.i, %bb14853.i
- br label %bb16581.i
-bb15428.i: ; preds = %bb9313.i
- br i1 false, label %bb15648.i, label %bb15436.i
-bb15436.i: ; preds = %bb15428.i
- br label %bb15648.i
-bb15648.i: ; preds = %bb15436.i, %bb15428.i
- br label %bb16581.i
-bb16003.i: ; preds = %bb9313.i
- br i1 false, label %bb16223.i, label %bb16011.i
-bb16011.i: ; preds = %bb16003.i
- br label %bb16223.i
-bb16223.i: ; preds = %bb16011.i, %bb16003.i
- br label %bb16581.i
-bb16578.i: ; preds = %bb9313.i
- unreachable
-bb16581.i: ; preds = %bb16223.i, %bb15648.i, %bb15073.i, %bb14498.i, %bb13923.i, %bb13436.i, %bb13001.i, %bb12566.i, %bb12188.i, %bb11618.i, %bb11615.i, %bb11405.i, %bb10685.i, %bb10255.i, %bb9535.i, %bb9310.i, %glgVectorFloatConversion.exit
- br label %storeVecColor_RGB_UI.exit
-storeVecColor_RGB_UI.exit: ; preds = %bb16581.i
- br i1 false, label %bb5295.i, label %bb16621.i
-bb16607.i: ; preds = %bb5276.i
- br i1 false, label %bb5295.preheader.i, label %bb16621.i
-bb5295.preheader.i: ; preds = %bb16607.i
- br label %bb5295.i
-bb16621.i: ; preds = %bb16607.i, %storeVecColor_RGB_UI.exit
- br label %bb16650.outer.i
-bb16650.outer.i: ; preds = %bb16621.i
- br label %bb16650.i
-bb16650.i: ; preds = %storeColor_RGB_UI.exit, %bb16650.outer.i
- br label %loadColor_BGRA_UI8888R.exit
-loadColor_BGRA_UI8888R.exit: ; preds = %bb16650.i
- br i1 false, label %bb16671.i, label %bb16697.i
-bb16671.i: ; preds = %loadColor_BGRA_UI8888R.exit
- br i1 false, label %bb.i179, label %bb662.i
-bb.i179: ; preds = %bb16671.i
- switch i32 0, label %bb513.i [
- i32 7, label %bb418.i
- i32 6, label %bb433.i
- ]
-bb418.i: ; preds = %bb.i179
- br label %bb559.i
-bb433.i: ; preds = %bb.i179
- switch i32 0, label %bb493.i [
- i32 31744, label %bb455.i
- i32 0, label %bb471.i
- ]
-bb455.i: ; preds = %bb433.i
- br i1 false, label %bb463.i, label %bb504.i
-bb463.i: ; preds = %bb455.i
- br label %bb559.i
-bb471.i: ; preds = %bb433.i
- br i1 false, label %bb497.i, label %bb484.preheader.i
-bb484.preheader.i: ; preds = %bb471.i
- br i1 false, label %bb479.i, label %bb490.i
-bb479.i: ; preds = %bb479.i, %bb484.preheader.i
- br i1 false, label %bb479.i, label %bb490.i
-bb490.i: ; preds = %bb479.i, %bb484.preheader.i
- br label %bb559.i
-bb493.i: ; preds = %bb433.i
- br label %bb497.i
-bb497.i: ; preds = %bb493.i, %bb471.i
- br label %bb504.i
-bb504.i: ; preds = %bb497.i, %bb455.i
- br label %bb513.i
-bb513.i: ; preds = %bb504.i, %bb.i179
- br label %bb559.i
-bb559.i: ; preds = %bb513.i, %bb490.i, %bb463.i, %bb418.i
- br i1 false, label %bb2793.i, label %bb614.i
-bb614.i: ; preds = %bb559.i
- br i1 false, label %bb626.i, label %bb620.i
-bb620.i: ; preds = %bb614.i
- br i1 false, label %bb625.i, label %bb626.i
-bb625.i: ; preds = %bb620.i
- br label %bb626.i
-bb626.i: ; preds = %bb625.i, %bb620.i, %bb614.i
- br i1 false, label %bb638.i, label %bb632.i
-bb632.i: ; preds = %bb626.i
- br i1 false, label %bb637.i, label %bb638.i
-bb637.i: ; preds = %bb632.i
- br label %bb638.i
-bb638.i: ; preds = %bb637.i, %bb632.i, %bb626.i
- br i1 false, label %bb650.i, label %bb644.i
-bb644.i: ; preds = %bb638.i
- br i1 false, label %bb649.i, label %bb650.i
-bb649.i: ; preds = %bb644.i
- br label %bb650.i
-bb650.i: ; preds = %bb649.i, %bb644.i, %bb638.i
- br i1 false, label %bb2793.i, label %bb656.i
-bb656.i: ; preds = %bb650.i
- br i1 false, label %bb661.i, label %bb2793.i
-bb661.i: ; preds = %bb656.i
- switch i32 0, label %bb2883.i [
- i32 3, label %bb2874.i
- i32 4, label %bb2795.i
- i32 8, label %bb2810.i
- i32 10, label %bb2834.i
- i32 11, label %bb2819.i
- i32 16, label %bb2810.i
- ]
-bb662.i: ; preds = %bb16671.i
- switch i32 0, label %bb1937.i [
- i32 3, label %bb902.i
- i32 4, label %bb1416.i
- i32 8, label %bb1020.i
- i32 10, label %bb902.i
- i32 11, label %bb784.i
- i32 16, label %bb664.i
- ]
-bb664.i: ; preds = %bb662.i
- br i1 false, label %bb682.i, label %bb669.i
-bb669.i: ; preds = %bb664.i
- br label %bb710.i
-bb682.i: ; preds = %bb664.i
- br label %bb710.i
-bb710.i: ; preds = %bb682.i, %bb669.i
- br i1 false, label %bb760.i, label %bb754.i
-bb754.i: ; preds = %bb710.i
- br i1 false, label %bb759.i, label %bb760.i
-bb759.i: ; preds = %bb754.i
- br label %bb760.i
-bb760.i: ; preds = %bb759.i, %bb754.i, %bb710.i
- br i1 false, label %bb772.i, label %bb766.i
-bb766.i: ; preds = %bb760.i
- br i1 false, label %bb771.i, label %bb772.i
-bb771.i: ; preds = %bb766.i
- br label %bb772.i
-bb772.i: ; preds = %bb771.i, %bb766.i, %bb760.i
- br i1 false, label %bb1937.i, label %bb778.i
-bb778.i: ; preds = %bb772.i
- br i1 false, label %bb783.i, label %bb1937.i
-bb783.i: ; preds = %bb778.i
- br label %bb1937.i
-bb784.i: ; preds = %bb662.i
- switch i32 0, label %bb892.i [
- i32 1, label %bb868.i
- i32 3, label %bb868.i
- i32 4, label %bb882.i
- i32 6, label %bb792.i
- i32 7, label %bb786.i
- ]
-bb786.i: ; preds = %bb784.i
- br label %bb904.i
-bb792.i: ; preds = %bb784.i
- switch i32 0, label %bb852.i [
- i32 31744, label %bb814.i
- i32 0, label %bb830.i
- ]
-bb814.i: ; preds = %bb792.i
- br i1 false, label %bb822.i, label %bb863.i
-bb822.i: ; preds = %bb814.i
- switch i32 0, label %bb1010.i [
- i32 1, label %bb986.i
- i32 3, label %bb986.i
- i32 4, label %bb1000.i
- i32 6, label %bb910.i
- i32 7, label %bb904.i
- ]
-bb830.i: ; preds = %bb792.i
- br i1 false, label %bb856.i, label %bb843.preheader.i
-bb843.preheader.i: ; preds = %bb830.i
- br i1 false, label %bb838.i, label %bb849.i
-bb838.i: ; preds = %bb838.i, %bb843.preheader.i
- br i1 false, label %bb838.i, label %bb849.i
-bb849.i: ; preds = %bb838.i, %bb843.preheader.i
- switch i32 0, label %bb1010.i [
- i32 1, label %bb986.i
- i32 3, label %bb986.i
- i32 4, label %bb1000.i
- i32 6, label %bb910.i
- i32 7, label %bb904.i
- ]
-bb852.i: ; preds = %bb792.i
- br label %bb856.i
-bb856.i: ; preds = %bb852.i, %bb830.i
- switch i32 0, label %bb1010.i [
- i32 1, label %bb986.i
- i32 3, label %bb986.i
- i32 4, label %bb1000.i
- i32 6, label %bb910.i
- i32 7, label %bb904.i
- ]
-bb863.i: ; preds = %bb814.i
- switch i32 0, label %bb1010.i [
- i32 1, label %bb986.i
- i32 3, label %bb986.i
- i32 4, label %bb1000.i
- i32 6, label %bb910.i
- i32 7, label %bb904.i
- ]
-bb868.i: ; preds = %bb784.i, %bb784.i
- switch i32 0, label %bb1010.i [
- i32 1, label %bb986.i
- i32 3, label %bb986.i
- i32 4, label %bb1000.i
- i32 6, label %bb910.i
- i32 7, label %bb904.i
- ]
-bb882.i: ; preds = %bb784.i
- br label %bb1000.i
-bb892.i: ; preds = %bb784.i
- br label %bb902.i
-bb902.i: ; preds = %bb892.i, %bb662.i, %bb662.i
- switch i32 0, label %bb1010.i [
- i32 1, label %bb986.i
- i32 3, label %bb986.i
- i32 4, label %bb1000.i
- i32 6, label %bb910.i
- i32 7, label %bb904.i
- ]
-bb904.i: ; preds = %bb902.i, %bb868.i, %bb863.i, %bb856.i, %bb849.i, %bb822.i, %bb786.i
- br label %bb1937.i
-bb910.i: ; preds = %bb902.i, %bb868.i, %bb863.i, %bb856.i, %bb849.i, %bb822.i
- switch i32 0, label %bb970.i [
- i32 31744, label %bb932.i
- i32 0, label %bb948.i
- ]
-bb932.i: ; preds = %bb910.i
- br i1 false, label %bb940.i, label %bb981.i
-bb940.i: ; preds = %bb932.i
- br label %bb1937.i
-bb948.i: ; preds = %bb910.i
- br i1 false, label %bb974.i, label %bb961.preheader.i
-bb961.preheader.i: ; preds = %bb948.i
- br i1 false, label %bb956.i, label %bb967.i
-bb956.i: ; preds = %bb956.i, %bb961.preheader.i
- br i1 false, label %bb956.i, label %bb967.i
-bb967.i: ; preds = %bb956.i, %bb961.preheader.i
- br label %bb1937.i
-bb970.i: ; preds = %bb910.i
- br label %bb974.i
-bb974.i: ; preds = %bb970.i, %bb948.i
- br label %bb1937.i
-bb981.i: ; preds = %bb932.i
- br label %bb1937.i
-bb986.i: ; preds = %bb902.i, %bb902.i, %bb868.i, %bb868.i, %bb863.i, %bb863.i, %bb856.i, %bb856.i, %bb849.i, %bb849.i, %bb822.i, %bb822.i
- br label %bb1937.i
-bb1000.i: ; preds = %bb902.i, %bb882.i, %bb868.i, %bb863.i, %bb856.i, %bb849.i, %bb822.i
- br label %bb1937.i
-bb1010.i: ; preds = %bb902.i, %bb868.i, %bb863.i, %bb856.i, %bb849.i, %bb822.i
- br label %bb1937.i
-bb1020.i: ; preds = %bb662.i
- switch i32 0, label %bb1388.i [
- i32 1, label %bb1264.i
- i32 3, label %bb1264.i
- i32 4, label %bb1304.i
- i32 6, label %bb1038.i
- i32 7, label %bb1022.i
- i32 8, label %bb1332.i
- i32 9, label %bb1332.i
- i32 10, label %bb1360.i
- i32 11, label %bb1360.i
- ]
-bb1022.i: ; preds = %bb1020.i
- br label %bb1937.i
-bb1038.i: ; preds = %bb1020.i
- switch i32 0, label %bb1098.i [
- i32 31744, label %bb1060.i
- i32 0, label %bb1076.i
- ]
-bb1060.i: ; preds = %bb1038.i
- br i1 false, label %bb1068.i, label %bb1109.i
-bb1068.i: ; preds = %bb1060.i
- br label %bb1109.i
-bb1076.i: ; preds = %bb1038.i
- br i1 false, label %bb1102.i, label %bb1089.preheader.i
-bb1089.preheader.i: ; preds = %bb1076.i
- br i1 false, label %bb1084.i, label %bb1095.i
-bb1084.i: ; preds = %bb1084.i, %bb1089.preheader.i
- br i1 false, label %bb1084.i, label %bb1095.i
-bb1095.i: ; preds = %bb1084.i, %bb1089.preheader.i
- br label %bb1109.i
-bb1098.i: ; preds = %bb1038.i
- br label %bb1102.i
-bb1102.i: ; preds = %bb1098.i, %bb1076.i
- br label %bb1109.i
-bb1109.i: ; preds = %bb1102.i, %bb1095.i, %bb1068.i, %bb1060.i
- switch i32 0, label %bb1173.i [
- i32 31744, label %bb1135.i
- i32 0, label %bb1151.i
- ]
-bb1135.i: ; preds = %bb1109.i
- br i1 false, label %bb1143.i, label %bb1184.i
-bb1143.i: ; preds = %bb1135.i
- br label %bb1184.i
-bb1151.i: ; preds = %bb1109.i
- br i1 false, label %bb1177.i, label %bb1164.preheader.i
-bb1164.preheader.i: ; preds = %bb1151.i
- br i1 false, label %bb1159.i, label %bb1170.i
-bb1159.i: ; preds = %bb1159.i, %bb1164.preheader.i
- br i1 false, label %bb1159.i, label %bb1170.i
-bb1170.i: ; preds = %bb1159.i, %bb1164.preheader.i
- br label %bb1184.i
-bb1173.i: ; preds = %bb1109.i
- br label %bb1177.i
-bb1177.i: ; preds = %bb1173.i, %bb1151.i
- br label %bb1184.i
-bb1184.i: ; preds = %bb1177.i, %bb1170.i, %bb1143.i, %bb1135.i
- switch i32 0, label %bb1248.i [
- i32 31744, label %bb1210.i
- i32 0, label %bb1226.i
- ]
-bb1210.i: ; preds = %bb1184.i
- br i1 false, label %bb1218.i, label %bb1259.i
-bb1218.i: ; preds = %bb1210.i
- br label %bb1937.i
-bb1226.i: ; preds = %bb1184.i
- br i1 false, label %bb1252.i, label %bb1239.preheader.i
-bb1239.preheader.i: ; preds = %bb1226.i
- br i1 false, label %bb1234.i, label %bb1245.i
-bb1234.i: ; preds = %bb1234.i, %bb1239.preheader.i
- br i1 false, label %bb1234.i, label %bb1245.i
-bb1245.i: ; preds = %bb1234.i, %bb1239.preheader.i
- br label %bb1937.i
-bb1248.i: ; preds = %bb1184.i
- br label %bb1252.i
-bb1252.i: ; preds = %bb1248.i, %bb1226.i
- br label %bb1937.i
-bb1259.i: ; preds = %bb1210.i
- br label %bb1937.i
-bb1264.i: ; preds = %bb1020.i, %bb1020.i
- br label %bb1937.i
-bb1304.i: ; preds = %bb1020.i
- br label %bb1937.i
-bb1332.i: ; preds = %bb1020.i, %bb1020.i
- br label %bb1937.i
-bb1360.i: ; preds = %bb1020.i, %bb1020.i
- br label %bb1937.i
-bb1388.i: ; preds = %bb1020.i
- br label %bb1937.i
-bb1416.i: ; preds = %bb662.i
- switch i32 0, label %bb1900.i [
- i32 1, label %bb1740.i
- i32 3, label %bb1740.i
- i32 4, label %bb1793.i
- i32 6, label %bb1439.i
- i32 7, label %bb1418.i
- i32 14, label %bb1830.i
- i32 15, label %bb1830.i
- i32 18, label %bb1863.i
- i32 19, label %bb1863.i
- ]
-bb1418.i: ; preds = %bb1416.i
- br label %bb1937.i
-bb1439.i: ; preds = %bb1416.i
- switch i32 0, label %bb1499.i [
- i32 31744, label %bb1461.i
- i32 0, label %bb1477.i
- ]
-bb1461.i: ; preds = %bb1439.i
- br i1 false, label %bb1469.i, label %bb1510.i
-bb1469.i: ; preds = %bb1461.i
- br label %bb1510.i
-bb1477.i: ; preds = %bb1439.i
- br i1 false, label %bb1503.i, label %bb1490.preheader.i
-bb1490.preheader.i: ; preds = %bb1477.i
- br i1 false, label %bb1485.i, label %bb1496.i
-bb1485.i: ; preds = %bb1485.i, %bb1490.preheader.i
- br i1 false, label %bb1485.i, label %bb1496.i
-bb1496.i: ; preds = %bb1485.i, %bb1490.preheader.i
- br label %bb1510.i
-bb1499.i: ; preds = %bb1439.i
- br label %bb1503.i
-bb1503.i: ; preds = %bb1499.i, %bb1477.i
- br label %bb1510.i
-bb1510.i: ; preds = %bb1503.i, %bb1496.i, %bb1469.i, %bb1461.i
- switch i32 0, label %bb1574.i [
- i32 31744, label %bb1536.i
- i32 0, label %bb1552.i
- ]
-bb1536.i: ; preds = %bb1510.i
- br i1 false, label %bb1544.i, label %bb1585.i
-bb1544.i: ; preds = %bb1536.i
- br label %bb1585.i
-bb1552.i: ; preds = %bb1510.i
- br i1 false, label %bb1578.i, label %bb1565.preheader.i
-bb1565.preheader.i: ; preds = %bb1552.i
- br i1 false, label %bb1560.i, label %bb1571.i
-bb1560.i: ; preds = %bb1560.i, %bb1565.preheader.i
- br i1 false, label %bb1560.i, label %bb1571.i
-bb1571.i: ; preds = %bb1560.i, %bb1565.preheader.i
- br label %bb1585.i
-bb1574.i: ; preds = %bb1510.i
- br label %bb1578.i
-bb1578.i: ; preds = %bb1574.i, %bb1552.i
- br label %bb1585.i
-bb1585.i: ; preds = %bb1578.i, %bb1571.i, %bb1544.i, %bb1536.i
- switch i32 0, label %bb1649.i [
- i32 31744, label %bb1611.i
- i32 0, label %bb1627.i
- ]
-bb1611.i: ; preds = %bb1585.i
- br i1 false, label %bb1619.i, label %bb1660.i
-bb1619.i: ; preds = %bb1611.i
- br label %bb1660.i
-bb1627.i: ; preds = %bb1585.i
- br i1 false, label %bb1653.i, label %bb1640.preheader.i
-bb1640.preheader.i: ; preds = %bb1627.i
- br i1 false, label %bb1635.i, label %bb1646.i
-bb1635.i: ; preds = %bb1635.i, %bb1640.preheader.i
- br i1 false, label %bb1635.i, label %bb1646.i
-bb1646.i: ; preds = %bb1635.i, %bb1640.preheader.i
- br label %bb1660.i
-bb1649.i: ; preds = %bb1585.i
- br label %bb1653.i
-bb1653.i: ; preds = %bb1649.i, %bb1627.i
- br label %bb1660.i
-bb1660.i: ; preds = %bb1653.i, %bb1646.i, %bb1619.i, %bb1611.i
- switch i32 0, label %bb1724.i [
- i32 31744, label %bb1686.i
- i32 0, label %bb1702.i
- ]
-bb1686.i: ; preds = %bb1660.i
- br i1 false, label %bb1694.i, label %bb1735.i
-bb1694.i: ; preds = %bb1686.i
- br label %bb1937.i
-bb1702.i: ; preds = %bb1660.i
- br i1 false, label %bb1728.i, label %bb1715.preheader.i
-bb1715.preheader.i: ; preds = %bb1702.i
- br i1 false, label %bb1710.i, label %bb1721.i
-bb1710.i: ; preds = %bb1710.i, %bb1715.preheader.i
- br i1 false, label %bb1710.i, label %bb1721.i
-bb1721.i: ; preds = %bb1710.i, %bb1715.preheader.i
- br label %bb1937.i
-bb1724.i: ; preds = %bb1660.i
- br label %bb1728.i
-bb1728.i: ; preds = %bb1724.i, %bb1702.i
- br label %bb1937.i
-bb1735.i: ; preds = %bb1686.i
- br label %bb1937.i
-bb1740.i: ; preds = %bb1416.i, %bb1416.i
- br label %bb1937.i
-bb1793.i: ; preds = %bb1416.i
- br label %bb1937.i
-bb1830.i: ; preds = %bb1416.i, %bb1416.i
- br label %bb1937.i
-bb1863.i: ; preds = %bb1416.i, %bb1416.i
- br label %bb1937.i
-bb1900.i: ; preds = %bb1416.i
- br label %bb1937.i
-bb1937.i: ; preds = %bb1900.i, %bb1863.i, %bb1830.i, %bb1793.i, %bb1740.i, %bb1735.i, %bb1728.i, %bb1721.i, %bb1694.i, %bb1418.i, %bb1388.i, %bb1360.i, %bb1332.i, %bb1304.i, %bb1264.i, %bb1259.i, %bb1252.i, %bb1245.i, %bb1218.i, %bb1022.i, %bb1010.i, %bb1000.i, %bb986.i, %bb981.i, %bb974.i, %bb967.i, %bb940.i, %bb904.i, %bb783.i, %bb778.i, %bb772.i, %bb662.i
- switch i32 %sf4083.0.i, label %bb2321.i [
- i32 0, label %bb2027.i
- i32 1, label %bb2081.i
- i32 2, label %bb2161.i
- i32 3, label %bb2241.i
- i32 8, label %bb1939.i
- i32 9, label %bb1939.i
- i32 10, label %bb1957.i
- i32 11, label %bb1975.i
- i32 16, label %bb1939.i
- ]
-bb1939.i: ; preds = %bb1937.i, %bb1937.i, %bb1937.i
- switch i32 0, label %bb2321.i [
- i32 3, label %bb1956.i
- i32 4, label %bb1956.i
- i32 11, label %bb1956.i
- ]
-bb1956.i: ; preds = %bb1939.i, %bb1939.i, %bb1939.i
- br label %bb2337.i
-bb1957.i: ; preds = %bb1937.i
- switch i32 0, label %bb1975.i [
- i32 3, label %bb1974.i
- i32 4, label %bb1974.i
- i32 11, label %bb1974.i
- ]
-bb1974.i: ; preds = %bb1957.i, %bb1957.i, %bb1957.i
- br label %bb1975.i
-bb1975.i: ; preds = %bb1974.i, %bb1957.i, %bb1937.i
- switch i32 0, label %bb2001.i [
- i32 1, label %bb1992.i
- i32 4, label %bb1992.i
- i32 8, label %bb1992.i
- ]
-bb1992.i: ; preds = %bb1975.i, %bb1975.i, %bb1975.i
- br label %bb2001.i
-bb2001.i: ; preds = %bb1992.i, %bb1975.i
- switch i32 0, label %bb2321.i [
- i32 2, label %bb2018.i
- i32 4, label %bb2018.i
- i32 8, label %bb2018.i
- ]
-bb2018.i: ; preds = %bb2001.i, %bb2001.i, %bb2001.i
- br label %bb2321.i
-bb2027.i: ; preds = %bb1937.i
- switch i32 0, label %bb2045.i [
- i32 1, label %bb2044.i
- i32 4, label %bb2044.i
- i32 8, label %bb2044.i
- ]
-bb2044.i: ; preds = %bb2027.i, %bb2027.i, %bb2027.i
- br label %bb2045.i
-bb2045.i: ; preds = %bb2044.i, %bb2027.i
- switch i32 0, label %bb2063.i [
- i32 2, label %bb2062.i
- i32 4, label %bb2062.i
- i32 8, label %bb2062.i
- ]
-bb2062.i: ; preds = %bb2045.i, %bb2045.i, %bb2045.i
- br label %bb2063.i
-bb2063.i: ; preds = %bb2062.i, %bb2045.i
- switch i32 0, label %bb2321.i [
- i32 3, label %bb2080.i
- i32 4, label %bb2080.i
- i32 11, label %bb2080.i
- ]
-bb2080.i: ; preds = %bb2063.i, %bb2063.i, %bb2063.i
- br label %bb2321.i
-bb2081.i: ; preds = %bb1937.i
- switch i32 0, label %bb2100.i [
- i32 1, label %bb2098.i
- i32 4, label %bb2098.i
- i32 8, label %bb2098.i
- ]
-bb2098.i: ; preds = %bb2081.i, %bb2081.i, %bb2081.i
- br label %bb2100.i
-bb2100.i: ; preds = %bb2098.i, %bb2081.i
- switch i32 0, label %bb2125.i [
- i32 4, label %bb2124.i
- i32 8, label %bb2124.i
- i32 0, label %bb2124.i
- i32 11, label %bb2124.i
- ]
-bb2124.i: ; preds = %bb2100.i, %bb2100.i, %bb2100.i, %bb2100.i
- br label %bb2125.i
-bb2125.i: ; preds = %bb2124.i, %bb2100.i
- switch i32 0, label %bb2143.i [
- i32 2, label %bb2142.i
- i32 4, label %bb2142.i
- i32 8, label %bb2142.i
- ]
-bb2142.i: ; preds = %bb2125.i, %bb2125.i, %bb2125.i
- br label %bb2143.i
-bb2143.i: ; preds = %bb2142.i, %bb2125.i
- switch i32 0, label %bb2321.i [
- i32 3, label %bb2160.i
- i32 4, label %bb2160.i
- i32 11, label %bb2160.i
- ]
-bb2160.i: ; preds = %bb2143.i, %bb2143.i, %bb2143.i
- br label %bb2321.i
-bb2161.i: ; preds = %bb1937.i
- switch i32 0, label %bb2180.i [
- i32 2, label %bb2178.i
- i32 4, label %bb2178.i
- i32 8, label %bb2178.i
- ]
-bb2178.i: ; preds = %bb2161.i, %bb2161.i, %bb2161.i
- br label %bb2180.i
-bb2180.i: ; preds = %bb2178.i, %bb2161.i
- switch i32 0, label %bb2205.i [
- i32 4, label %bb2204.i
- i32 8, label %bb2204.i
- i32 0, label %bb2204.i
- i32 11, label %bb2204.i
- ]
-bb2204.i: ; preds = %bb2180.i, %bb2180.i, %bb2180.i, %bb2180.i
- br label %bb2205.i
-bb2205.i: ; preds = %bb2204.i, %bb2180.i
- switch i32 0, label %bb2223.i [
- i32 1, label %bb2222.i
- i32 4, label %bb2222.i
- i32 8, label %bb2222.i
- ]
-bb2222.i: ; preds = %bb2205.i, %bb2205.i, %bb2205.i
- br label %bb2223.i
-bb2223.i: ; preds = %bb2222.i, %bb2205.i
- switch i32 0, label %bb2321.i [
- i32 3, label %bb2240.i
- i32 4, label %bb2240.i
- i32 11, label %bb2240.i
- ]
-bb2240.i: ; preds = %bb2223.i, %bb2223.i, %bb2223.i
- br label %bb2321.i
-bb2241.i: ; preds = %bb1937.i
- switch i32 0, label %bb2260.i [
- i32 3, label %bb2258.i
- i32 4, label %bb2258.i
- i32 11, label %bb2258.i
- ]
-bb2258.i: ; preds = %bb2241.i, %bb2241.i, %bb2241.i
- br label %bb2260.i
-bb2260.i: ; preds = %bb2258.i, %bb2241.i
- switch i32 0, label %bb2285.i [
- i32 4, label %bb2284.i
- i32 11, label %bb2284.i
- i32 0, label %bb2284.i
- i32 8, label %bb2284.i
- ]
-bb2284.i: ; preds = %bb2260.i, %bb2260.i, %bb2260.i, %bb2260.i
- br label %bb2285.i
-bb2285.i: ; preds = %bb2284.i, %bb2260.i
- switch i32 0, label %bb2303.i [
- i32 1, label %bb2302.i
- i32 4, label %bb2302.i
- i32 8, label %bb2302.i
- ]
-bb2302.i: ; preds = %bb2285.i, %bb2285.i, %bb2285.i
- br label %bb2303.i
-bb2303.i: ; preds = %bb2302.i, %bb2285.i
- switch i32 0, label %bb2321.i [
- i32 2, label %bb2320.i
- i32 4, label %bb2320.i
- i32 8, label %bb2320.i
- ]
-bb2320.i: ; preds = %bb2303.i, %bb2303.i, %bb2303.i
- br label %bb2321.i
-bb2321.i: ; preds = %bb2320.i, %bb2303.i, %bb2240.i, %bb2223.i, %bb2160.i, %bb2143.i, %bb2080.i, %bb2063.i, %bb2018.i, %bb2001.i, %bb1939.i, %bb1937.i
- br label %bb2337.i
-bb2337.i: ; preds = %bb2321.i, %bb1956.i
- br label %bb2353.i
-bb2353.i: ; preds = %bb2337.i
- br label %bb2369.i
-bb2369.i: ; preds = %bb2353.i
- br label %bb2385.i
-bb2385.i: ; preds = %bb2369.i
- br i1 false, label %bb2388.i, label %bb2394.i
-bb2388.i: ; preds = %bb2385.i
- br label %bb2600.i
-bb2394.i: ; preds = %bb2385.i
- switch i32 0, label %bb2600.i [
- i32 0, label %bb2504.i
- i32 1, label %bb2528.i
- i32 2, label %bb2552.i
- i32 3, label %bb2576.i
- i32 4, label %bb2396.i
- i32 8, label %bb2420.i
- i32 11, label %bb2480.i
- ]
-bb2396.i: ; preds = %bb2394.i
- br i1 false, label %bb2411.i, label %bb2399.i
-bb2399.i: ; preds = %bb2396.i
- br i1 false, label %bb2420.i, label %bb2405.i
-bb2405.i: ; preds = %bb2399.i
- br i1 false, label %bb2410.i, label %bb2420.i
-bb2410.i: ; preds = %bb2405.i
- br i1 false, label %bb2459.i, label %bb2423.i
-bb2411.i: ; preds = %bb2396.i
- br i1 false, label %bb2420.i, label %bb2414.i
-bb2414.i: ; preds = %bb2411.i
- br i1 false, label %bb2419.i, label %bb2420.i
-bb2419.i: ; preds = %bb2414.i
- br label %bb2420.i
-bb2420.i: ; preds = %bb2419.i, %bb2414.i, %bb2411.i, %bb2405.i, %bb2399.i, %bb2394.i
- br i1 false, label %bb2459.i, label %bb2423.i
-bb2423.i: ; preds = %bb2420.i, %bb2410.i
- br i1 false, label %bb2435.i, label %bb2429.i
-bb2429.i: ; preds = %bb2423.i
- br i1 false, label %bb2434.i, label %bb2435.i
-bb2434.i: ; preds = %bb2429.i
- br label %bb2435.i
-bb2435.i: ; preds = %bb2434.i, %bb2429.i, %bb2423.i
- br i1 false, label %bb2447.i, label %bb2441.i
-bb2441.i: ; preds = %bb2435.i
- br i1 false, label %bb2446.i, label %bb2447.i
-bb2446.i: ; preds = %bb2441.i
- br label %bb2447.i
-bb2447.i: ; preds = %bb2446.i, %bb2441.i, %bb2435.i
- br i1 false, label %bb2600.i, label %bb2453.i
-bb2453.i: ; preds = %bb2447.i
- br i1 false, label %bb2458.i, label %bb2600.i
-bb2458.i: ; preds = %bb2453.i
- br label %bb2793.i
-bb2459.i: ; preds = %bb2420.i, %bb2410.i
- br i1 false, label %bb2600.i, label %bb2462.i
-bb2462.i: ; preds = %bb2459.i
- br i1 false, label %bb2479.i, label %bb2600.i
-bb2479.i: ; preds = %bb2462.i
- br label %bb2600.i
-bb2480.i: ; preds = %bb2394.i
- br i1 false, label %bb2495.i, label %bb2483.i
-bb2483.i: ; preds = %bb2480.i
- br i1 false, label %bb2504.i, label %bb2489.i
-bb2489.i: ; preds = %bb2483.i
- br i1 false, label %bb2494.i, label %bb2504.i
-bb2494.i: ; preds = %bb2489.i
- br i1 false, label %bb2519.i, label %bb2507.i
-bb2495.i: ; preds = %bb2480.i
- br i1 false, label %bb2504.i, label %bb2498.i
-bb2498.i: ; preds = %bb2495.i
- br i1 false, label %bb2503.i, label %bb2504.i
-bb2503.i: ; preds = %bb2498.i
- br label %bb2504.i
-bb2504.i: ; preds = %bb2503.i, %bb2498.i, %bb2495.i, %bb2489.i, %bb2483.i, %bb2394.i
- br i1 false, label %bb2519.i, label %bb2507.i
-bb2507.i: ; preds = %bb2504.i, %bb2494.i
- br i1 false, label %bb2600.i, label %bb2513.i
-bb2513.i: ; preds = %bb2507.i
- br i1 false, label %bb2518.i, label %bb2600.i
-bb2518.i: ; preds = %bb2513.i
- br label %bb2600.i
-bb2519.i: ; preds = %bb2504.i, %bb2494.i
- br i1 false, label %bb2600.i, label %bb2522.i
-bb2522.i: ; preds = %bb2519.i
- br i1 false, label %bb2527.i, label %bb2600.i
-bb2527.i: ; preds = %bb2522.i
- br label %bb2600.i
-bb2528.i: ; preds = %bb2394.i
- br i1 false, label %bb2543.i, label %bb2531.i
-bb2531.i: ; preds = %bb2528.i
- br i1 false, label %bb2600.i, label %bb2537.i
-bb2537.i: ; preds = %bb2531.i
- br i1 false, label %bb2542.i, label %bb2600.i
-bb2542.i: ; preds = %bb2537.i
- br label %bb2600.i
-bb2543.i: ; preds = %bb2528.i
- br i1 false, label %bb2600.i, label %bb2546.i
-bb2546.i: ; preds = %bb2543.i
- br i1 false, label %bb2551.i, label %bb2600.i
-bb2551.i: ; preds = %bb2546.i
- br label %bb2600.i
-bb2552.i: ; preds = %bb2394.i
- br i1 false, label %bb2567.i, label %bb2555.i
-bb2555.i: ; preds = %bb2552.i
- br i1 false, label %bb2600.i, label %bb2561.i
-bb2561.i: ; preds = %bb2555.i
- br i1 false, label %bb2566.i, label %bb2600.i
-bb2566.i: ; preds = %bb2561.i
- br label %bb2600.i
-bb2567.i: ; preds = %bb2552.i
- br i1 false, label %bb2600.i, label %bb2570.i
-bb2570.i: ; preds = %bb2567.i
- br i1 false, label %bb2575.i, label %bb2600.i
-bb2575.i: ; preds = %bb2570.i
- br label %bb2600.i
-bb2576.i: ; preds = %bb2394.i
- br i1 false, label %bb2591.i, label %bb2579.i
-bb2579.i: ; preds = %bb2576.i
- br i1 false, label %bb2600.i, label %bb2585.i
-bb2585.i: ; preds = %bb2579.i
- br i1 false, label %bb2590.i, label %bb2600.i
-bb2590.i: ; preds = %bb2585.i
- br label %bb2600.i
-bb2591.i: ; preds = %bb2576.i
- br i1 false, label %bb2600.i, label %bb2594.i
-bb2594.i: ; preds = %bb2591.i
- br i1 false, label %bb2599.i, label %bb2600.i
-bb2599.i: ; preds = %bb2594.i
- br label %bb2600.i
-bb2600.i: ; preds = %bb2599.i, %bb2594.i, %bb2591.i, %bb2590.i, %bb2585.i, %bb2579.i, %bb2575.i, %bb2570.i, %bb2567.i, %bb2566.i, %bb2561.i, %bb2555.i, %bb2551.i, %bb2546.i, %bb2543.i, %bb2542.i, %bb2537.i, %bb2531.i, %bb2527.i, %bb2522.i, %bb2519.i, %bb2518.i, %bb2513.i, %bb2507.i, %bb2479.i, %bb2462.i, %bb2459.i, %bb2453.i, %bb2447.i, %bb2394.i, %bb2388.i
- br label %bb2793.i
-bb2793.i: ; preds = %bb2600.i, %bb2458.i, %bb656.i, %bb650.i, %bb559.i
- switch i32 0, label %bb2883.i [
- i32 3, label %bb2874.i
- i32 4, label %bb2795.i
- i32 8, label %bb2810.i
- i32 10, label %bb2834.i
- i32 11, label %bb2819.i
- i32 16, label %bb2810.i
- ]
-bb2795.i: ; preds = %bb2793.i, %bb661.i
- br label %bb2810.i
-bb2810.i: ; preds = %bb2795.i, %bb2793.i, %bb2793.i, %bb661.i, %bb661.i
- br label %bb2883.i
-bb2819.i: ; preds = %bb2793.i, %bb661.i
- br label %bb2834.i
-bb2834.i: ; preds = %bb2819.i, %bb2793.i, %bb661.i
- switch i32 0, label %bb2860.i [
- i32 4, label %bb2846.i
- i32 8, label %bb2846.i
- ]
-bb2846.i: ; preds = %bb2834.i, %bb2834.i
- br i1 false, label %bb2859.i, label %bb2860.i
-bb2859.i: ; preds = %bb2846.i
- br label %bb2860.i
-bb2860.i: ; preds = %bb2859.i, %bb2846.i, %bb2834.i
- switch i32 %df4081.0.i, label %bb2867.bb2883_crit_edge.i [
- i32 1, label %bb2883.i
- i32 2, label %bb2872.i
- ]
-bb2867.bb2883_crit_edge.i: ; preds = %bb2860.i
- br label %bb2883.i
-bb2872.i: ; preds = %bb2860.i
- switch i32 0, label %UnifiedReturnBlock.i235 [
- i32 3, label %bb3253.i
- i32 4, label %bb4173.i
- i32 8, label %bb3485.i
- i32 10, label %bb3253.i
- i32 11, label %bb3021.i
- i32 16, label %bb2885.i
- ]
-bb2874.i: ; preds = %bb2793.i, %bb661.i
- br label %bb2883.i
-bb2883.i: ; preds = %bb2874.i, %bb2867.bb2883_crit_edge.i, %bb2860.i, %bb2810.i, %bb2793.i, %bb661.i
- %f_alpha.1.i = phi i32 [ 0, %bb2867.bb2883_crit_edge.i ], [ 0, %bb2874.i ], [ 1065353216, %bb661.i ], [ 0, %bb2793.i ], [ 0, %bb2810.i ], [ 0, %bb2860.i ] ; <i32> [#uses=1]
- switch i32 0, label %UnifiedReturnBlock.i235 [
- i32 3, label %bb3253.i
- i32 4, label %bb4173.i
- i32 8, label %bb3485.i
- i32 10, label %bb3253.i
- i32 11, label %bb3021.i
- i32 16, label %bb2885.i
- ]
-bb2885.i: ; preds = %bb2883.i, %bb2872.i
- br i1 false, label %bb3011.i, label %bb2890.i
-bb2890.i: ; preds = %bb2885.i
- br i1 false, label %bb2960.i, label %bb2954.i
-bb2954.i: ; preds = %bb2890.i
- br i1 false, label %bb2959.i, label %bb2960.i
-bb2959.i: ; preds = %bb2954.i
- br label %bb2960.i
-bb2960.i: ; preds = %bb2959.i, %bb2954.i, %bb2890.i
- br i1 false, label %bb2972.i, label %bb2966.i
-bb2966.i: ; preds = %bb2960.i
- br i1 false, label %bb2971.i, label %bb2972.i
-bb2971.i: ; preds = %bb2966.i
- br label %bb2972.i
-bb2972.i: ; preds = %bb2971.i, %bb2966.i, %bb2960.i
- br label %glgScalarFloatConversion.exit
-bb3011.i: ; preds = %bb2885.i
- br label %glgScalarFloatConversion.exit
-bb3021.i: ; preds = %bb2883.i, %bb2872.i
- switch i32 %dt4080.0.i, label %bb3192.i [
- i32 7, label %bb3026.i
- i32 6, label %bb3037.i
- i32 1, label %bb3125.i
- i32 3, label %bb3125.i
- i32 5, label %bb3144.i
- ]
-bb3026.i: ; preds = %bb3021.i
- br label %bb3258.i
-bb3037.i: ; preds = %bb3021.i
- br i1 false, label %bb3052.i, label %bb3074.i
-bb3052.i: ; preds = %bb3037.i
- br i1 false, label %bb3105.i, label %bb3069.i
-bb3069.i: ; preds = %bb3052.i
- switch i32 %dt4080.0.i, label %bb3424.i [
- i32 7, label %bb3258.i
- i32 6, label %bb3269.i
- i32 1, label %bb3357.i
- i32 3, label %bb3357.i
- i32 5, label %bb3376.i
- ]
-bb3074.i: ; preds = %bb3037.i
- br i1 false, label %bb3079.i, label %bb3092.i
-bb3079.i: ; preds = %bb3074.i
- switch i32 %dt4080.0.i, label %bb3424.i [
- i32 7, label %bb3258.i
- i32 6, label %bb3269.i
- i32 1, label %bb3357.i
- i32 3, label %bb3357.i
- i32 5, label %bb3376.i
- ]
-bb3092.i: ; preds = %bb3074.i
- switch i32 %dt4080.0.i, label %bb3424.i [
- i32 7, label %bb3258.i
- i32 6, label %bb3269.i
- i32 1, label %bb3357.i
- i32 3, label %bb3357.i
- i32 5, label %bb3376.i
- ]
-bb3105.i: ; preds = %bb3052.i
- switch i32 %dt4080.0.i, label %bb3424.i [
- i32 7, label %bb3258.i
- i32 6, label %bb3269.i
- i32 1, label %bb3357.i
- i32 3, label %bb3357.i
- i32 5, label %bb3376.i
- ]
-bb3125.i: ; preds = %bb3021.i, %bb3021.i
- switch i32 %dt4080.0.i, label %bb3424.i [
- i32 7, label %bb3258.i
- i32 6, label %bb3269.i
- i32 1, label %bb3357.i
- i32 3, label %bb3357.i
- i32 5, label %bb3376.i
- ]
-bb3144.i: ; preds = %bb3021.i
- br label %bb3376.i
-bb3192.i: ; preds = %bb3021.i
- br i1 false, label %bb3197.i, label %bb3243.i
-bb3197.i: ; preds = %bb3192.i
- br label %bb3424.i
-bb3243.i: ; preds = %bb3192.i
- br label %bb3253.i
-bb3253.i: ; preds = %bb3243.i, %bb2883.i, %bb2883.i, %bb2872.i, %bb2872.i
- switch i32 %dt4080.0.i, label %bb3424.i [
- i32 7, label %bb3258.i
- i32 6, label %bb3269.i
- i32 1, label %bb3357.i
- i32 3, label %bb3357.i
- i32 5, label %bb3376.i
- ]
-bb3258.i: ; preds = %bb3253.i, %bb3125.i, %bb3105.i, %bb3092.i, %bb3079.i, %bb3069.i, %bb3026.i
- br label %glgScalarFloatConversion.exit
-bb3269.i: ; preds = %bb3253.i, %bb3125.i, %bb3105.i, %bb3092.i, %bb3079.i, %bb3069.i
- br i1 false, label %bb3284.i, label %bb3306.i
-bb3284.i: ; preds = %bb3269.i
- br i1 false, label %bb3337.i, label %bb3301.i
-bb3301.i: ; preds = %bb3284.i
- br label %glgScalarFloatConversion.exit
-bb3306.i: ; preds = %bb3269.i
- br i1 false, label %bb3311.i, label %bb3324.i
-bb3311.i: ; preds = %bb3306.i
- br label %glgScalarFloatConversion.exit
-bb3324.i: ; preds = %bb3306.i
- br label %glgScalarFloatConversion.exit
-bb3337.i: ; preds = %bb3284.i
- br label %glgScalarFloatConversion.exit
-bb3357.i: ; preds = %bb3253.i, %bb3253.i, %bb3125.i, %bb3125.i, %bb3105.i, %bb3105.i, %bb3092.i, %bb3092.i, %bb3079.i, %bb3079.i, %bb3069.i, %bb3069.i
- br label %glgScalarFloatConversion.exit
-bb3376.i: ; preds = %bb3253.i, %bb3144.i, %bb3125.i, %bb3105.i, %bb3092.i, %bb3079.i, %bb3069.i
- br label %glgScalarFloatConversion.exit
-bb3424.i: ; preds = %bb3253.i, %bb3197.i, %bb3125.i, %bb3105.i, %bb3092.i, %bb3079.i, %bb3069.i
- br i1 false, label %bb3429.i, label %bb3475.i
-bb3429.i: ; preds = %bb3424.i
- br label %glgScalarFloatConversion.exit
-bb3475.i: ; preds = %bb3424.i
- br label %glgScalarFloatConversion.exit
-bb3485.i: ; preds = %bb2883.i, %bb2872.i
- switch i32 %dt4080.0.i, label %bb4077.i [
- i32 7, label %bb3490.i
- i32 6, label %bb3511.i
- i32 1, label %bb3749.i
- i32 3, label %bb3749.i
- i32 5, label %bb3794.i
- i32 4, label %bb3941.i
- ]
-bb3490.i: ; preds = %bb3485.i
- br label %glgScalarFloatConversion.exit
-bb3511.i: ; preds = %bb3485.i
- br i1 false, label %bb3526.i, label %bb3548.i
-bb3526.i: ; preds = %bb3511.i
- br i1 false, label %bb3579.i, label %bb3543.i
-bb3543.i: ; preds = %bb3526.i
- br label %bb3579.i
-bb3548.i: ; preds = %bb3511.i
- br i1 false, label %bb3553.i, label %bb3566.i
-bb3553.i: ; preds = %bb3548.i
- br label %bb3579.i
-bb3566.i: ; preds = %bb3548.i
- br label %bb3579.i
-bb3579.i: ; preds = %bb3566.i, %bb3553.i, %bb3543.i, %bb3526.i
- br i1 false, label %bb3601.i, label %bb3623.i
-bb3601.i: ; preds = %bb3579.i
- br i1 false, label %bb3654.i, label %bb3618.i
-bb3618.i: ; preds = %bb3601.i
- br label %bb3654.i
-bb3623.i: ; preds = %bb3579.i
- br i1 false, label %bb3628.i, label %bb3641.i
-bb3628.i: ; preds = %bb3623.i
- br label %bb3654.i
-bb3641.i: ; preds = %bb3623.i
- br label %bb3654.i
-bb3654.i: ; preds = %bb3641.i, %bb3628.i, %bb3618.i, %bb3601.i
- br i1 false, label %bb3676.i, label %bb3698.i
-bb3676.i: ; preds = %bb3654.i
- br i1 false, label %bb3729.i, label %bb3693.i
-bb3693.i: ; preds = %bb3676.i
- br label %glgScalarFloatConversion.exit
-bb3698.i: ; preds = %bb3654.i
- br i1 false, label %bb3703.i, label %bb3716.i
-bb3703.i: ; preds = %bb3698.i
- br label %glgScalarFloatConversion.exit
-bb3716.i: ; preds = %bb3698.i
- br label %glgScalarFloatConversion.exit
-bb3729.i: ; preds = %bb3676.i
- br label %glgScalarFloatConversion.exit
-bb3749.i: ; preds = %bb3485.i, %bb3485.i
- br label %glgScalarFloatConversion.exit
-bb3794.i: ; preds = %bb3485.i
- br label %glgScalarFloatConversion.exit
-bb3941.i: ; preds = %bb3485.i
- br label %glgScalarFloatConversion.exit
-bb4077.i: ; preds = %bb3485.i
- br i1 false, label %bb4083.i, label %bb4111.i
-bb4083.i: ; preds = %bb4077.i
- br label %glgScalarFloatConversion.exit
-bb4111.i: ; preds = %bb4077.i
- br i1 false, label %bb4117.i, label %bb4145.i
-bb4117.i: ; preds = %bb4111.i
- br label %glgScalarFloatConversion.exit
-bb4145.i: ; preds = %bb4111.i
- br label %glgScalarFloatConversion.exit
-bb4173.i: ; preds = %bb2883.i, %bb2872.i
- %f_red.0.reg2mem.4.i = phi i32 [ 0, %bb2872.i ], [ 0, %bb2883.i ] ; <i32> [#uses=2]
- %f_green.0.reg2mem.2.i = phi i32 [ 0, %bb2872.i ], [ 0, %bb2883.i ] ; <i32> [#uses=1]
- %f_blue.0.reg2mem.2.i = phi i32 [ 0, %bb2872.i ], [ 0, %bb2883.i ] ; <i32> [#uses=1]
- %f_alpha.1.reg2mem.1.i = phi i32 [ 0, %bb2872.i ], [ %f_alpha.1.i, %bb2883.i ] ; <i32> [#uses=1]
- switch i32 %dt4080.0.i, label %bb4950.i [
- i32 7, label %bb4178.i
- i32 6, label %bb4204.i
- i32 1, label %bb4517.i202
- i32 3, label %bb4517.i202
- i32 5, label %bb4575.i
- i32 4, label %bb4769.i
- ]
-bb4178.i: ; preds = %bb4173.i
- br label %glgScalarFloatConversion.exit
-bb4204.i: ; preds = %bb4173.i
- %tmp4210.i = and i32 0, 32768 ; <i32> [#uses=4]
- %tmp4212.i = and i32 %f_red.0.reg2mem.4.i, 2139095040 ; <i32> [#uses=1]
- %tmp4214.i = and i32 %f_red.0.reg2mem.4.i, 8388607 ; <i32> [#uses=1]
- br i1 false, label %bb4219.i, label %bb4241.i
-bb4219.i: ; preds = %bb4204.i
- br i1 false, label %bb4272.i, label %bb4236.i
-bb4236.i: ; preds = %bb4219.i
- br label %bb4272.i
-bb4241.i: ; preds = %bb4204.i
- br i1 false, label %bb4246.i, label %bb4259.i
-bb4246.i: ; preds = %bb4241.i
- %tmp4253.i = lshr i32 %tmp4214.i, 0 ; <i32> [#uses=1]
- %tmp4253.masked.i = and i32 %tmp4253.i, 65535 ; <i32> [#uses=1]
- br label %bb4272.i
-bb4259.i: ; preds = %bb4241.i
- %tmp4261.i187 = add i32 %tmp4212.i, 134217728 ; <i32> [#uses=1]
- %tmp4262.i188 = lshr i32 %tmp4261.i187, 13 ; <i32> [#uses=1]
- %tmp4262.masked.i = and i32 %tmp4262.i188, 64512 ; <i32> [#uses=1]
- %tmp42665693.masked.i = or i32 %tmp4262.masked.i, %tmp4210.i ; <i32> [#uses=1]
- br label %bb4272.i
-bb4272.i: ; preds = %bb4259.i, %bb4246.i, %bb4236.i, %bb4219.i
- %tmp42665693.masked.pn.i = phi i32 [ %tmp42665693.masked.i, %bb4259.i ], [ %tmp4253.masked.i, %bb4246.i ], [ %tmp4210.i, %bb4236.i ], [ %tmp4210.i, %bb4219.i ] ; <i32> [#uses=1]
- %tmp4268.pn.i = phi i32 [ 0, %bb4259.i ], [ %tmp4210.i, %bb4246.i ], [ 31744, %bb4236.i ], [ 32767, %bb4219.i ] ; <i32> [#uses=1]
- %tmp100.0.i = or i32 %tmp4268.pn.i, %tmp42665693.masked.pn.i ; <i32> [#uses=0]
- %tmp4289.i = and i32 %f_green.0.reg2mem.2.i, 8388607 ; <i32> [#uses=1]
- br i1 false, label %bb4294.i, label %bb4316.i
-bb4294.i: ; preds = %bb4272.i
- br i1 false, label %bb4347.i, label %bb4311.i
-bb4311.i: ; preds = %bb4294.i
- br label %bb4347.i
-bb4316.i: ; preds = %bb4272.i
- br i1 false, label %bb4321.i, label %bb4334.i
-bb4321.i: ; preds = %bb4316.i
- br label %bb4347.i
-bb4334.i: ; preds = %bb4316.i
- %tmp4343.i = lshr i32 %tmp4289.i, 13 ; <i32> [#uses=0]
- br label %bb4347.i
-bb4347.i: ; preds = %bb4334.i, %bb4321.i, %bb4311.i, %bb4294.i
- %tmp4364.i190 = and i32 %f_blue.0.reg2mem.2.i, 8388607 ; <i32> [#uses=1]
- br i1 false, label %bb4369.i192, label %bb4391.i
-bb4369.i192: ; preds = %bb4347.i
- br i1 false, label %bb4422.i, label %bb4386.i
-bb4386.i: ; preds = %bb4369.i192
- br label %bb4422.i
-bb4391.i: ; preds = %bb4347.i
- br i1 false, label %bb4396.i, label %bb4409.i
-bb4396.i: ; preds = %bb4391.i
- br label %bb4422.i
-bb4409.i: ; preds = %bb4391.i
- %tmp4418.i = lshr i32 %tmp4364.i190, 13 ; <i32> [#uses=0]
- br label %bb4422.i
-bb4422.i: ; preds = %bb4409.i, %bb4396.i, %bb4386.i, %bb4369.i192
- %tmp4439.i194 = and i32 %f_alpha.1.reg2mem.1.i, 8388607 ; <i32> [#uses=1]
- br i1 false, label %bb4444.i, label %bb4466.i
-bb4444.i: ; preds = %bb4422.i
- br i1 false, label %bb4497.i, label %bb4461.i
-bb4461.i: ; preds = %bb4444.i
- br label %glgScalarFloatConversion.exit
-bb4466.i: ; preds = %bb4422.i
- br i1 false, label %bb4471.i, label %bb4484.i
-bb4471.i: ; preds = %bb4466.i
- br label %glgScalarFloatConversion.exit
-bb4484.i: ; preds = %bb4466.i
- %tmp4493.i = lshr i32 %tmp4439.i194, 13 ; <i32> [#uses=0]
- br label %glgScalarFloatConversion.exit
-bb4497.i: ; preds = %bb4444.i
- br label %glgScalarFloatConversion.exit
-bb4517.i202: ; preds = %bb4173.i, %bb4173.i
- br label %glgScalarFloatConversion.exit
-bb4575.i: ; preds = %bb4173.i
- br label %glgScalarFloatConversion.exit
-bb4769.i: ; preds = %bb4173.i
- br label %glgScalarFloatConversion.exit
-bb4950.i: ; preds = %bb4173.i
- br i1 false, label %bb4956.i, label %bb4993.i
-bb4956.i: ; preds = %bb4950.i
- br label %glgScalarFloatConversion.exit
-bb4993.i: ; preds = %bb4950.i
- br i1 false, label %bb4999.i, label %bb5036.i
-bb4999.i: ; preds = %bb4993.i
- br label %glgScalarFloatConversion.exit
-bb5036.i: ; preds = %bb4993.i
- br label %glgScalarFloatConversion.exit
-UnifiedReturnBlock.i235: ; preds = %bb2883.i, %bb2872.i
- br label %glgScalarFloatConversion.exit
-glgScalarFloatConversion.exit: ; preds = %UnifiedReturnBlock.i235, %bb5036.i, %bb4999.i, %bb4956.i, %bb4769.i, %bb4575.i, %bb4517.i202, %bb4497.i, %bb4484.i, %bb4471.i, %bb4461.i, %bb4178.i, %bb4145.i, %bb4117.i, %bb4083.i, %bb3941.i, %bb3794.i, %bb3749.i, %bb3729.i, %bb3716.i, %bb3703.i, %bb3693.i, %bb3490.i, %bb3475.i, %bb3429.i, %bb3376.i, %bb3357.i, %bb3337.i, %bb3324.i, %bb3311.i, %bb3301.i, %bb3258.i, %bb3011.i, %bb2972.i
- br label %bb18851.i
-bb16697.i: ; preds = %loadColor_BGRA_UI8888R.exit
- br i1 false, label %bb17749.i, label %bb16700.i
-bb16700.i: ; preds = %bb16697.i
- switch i32 0, label %bb16829.i [
- i32 4, label %bb16705.i
- i32 8, label %bb16743.i
- i32 11, label %bb16795.i
- ]
-bb16705.i: ; preds = %bb16700.i
- switch i32 %df4081.0.i, label %bb17183.i [
- i32 1, label %bb16710.i
- i32 2, label %bb16721.i
- i32 3, label %bb16732.i
- ]
-bb16710.i: ; preds = %bb16705.i
- br label %bb17195.i
-bb16721.i: ; preds = %bb16705.i
- br label %bb17195.i
-bb16732.i: ; preds = %bb16705.i
- br label %bb17195.i
-bb16743.i: ; preds = %bb16700.i
- switch i32 0, label %bb16759.i [
- i32 4, label %bb16755.i
- i32 11, label %bb16755.i
- ]
-bb16755.i: ; preds = %bb16743.i, %bb16743.i
- br label %bb17195.i
-bb16759.i: ; preds = %bb16743.i
- switch i32 %df4081.0.i, label %bb17183.i [
- i32 1, label %bb16764.i
- i32 2, label %bb16775.i
- i32 3, label %bb16786.i
- ]
-bb16764.i: ; preds = %bb16759.i
- br label %bb17195.i
-bb16775.i: ; preds = %bb16759.i
- br label %bb17195.i
-bb16786.i: ; preds = %bb16759.i
- br label %bb17195.i
-bb16795.i: ; preds = %bb16700.i
- switch i32 0, label %bb17183.i [
- i32 4, label %bb16807.i
- i32 8, label %bb16807.i
- i32 3, label %bb16823.i
- ]
-bb16807.i: ; preds = %bb16795.i, %bb16795.i
- br label %bb17195.i
-bb16823.i: ; preds = %bb16795.i
- br label %bb17195.i
-bb16829.i: ; preds = %bb16700.i
- switch i32 %sf4083.0.i, label %bb17183.i [
- i32 10, label %bb16834.i
- i32 0, label %bb16892.i
- i32 1, label %bb16953.i
- i32 2, label %bb17037.i
- i32 3, label %bb17121.i
- ]
-bb16834.i: ; preds = %bb16829.i
- switch i32 0, label %bb16878.i [
- i32 4, label %bb16839.i
- i32 8, label %bb16858.i
- i32 11, label %bb16874.i
- ]
-bb16839.i: ; preds = %bb16834.i
- br label %bb17195.i
-bb16858.i: ; preds = %bb16834.i
- br label %bb17195.i
-bb16874.i: ; preds = %bb16834.i
- br label %bb17195.i
-bb16878.i: ; preds = %bb16834.i
- br i1 false, label %bb16883.i, label %bb17183.i
-bb16883.i: ; preds = %bb16878.i
- br label %bb17195.i
-bb16892.i: ; preds = %bb16829.i
- switch i32 0, label %bb16930.i [
- i32 4, label %bb16897.i
- i32 8, label %bb16913.i
- i32 11, label %bb16926.i
- ]
-bb16897.i: ; preds = %bb16892.i
- br label %bb17195.i
-bb16913.i: ; preds = %bb16892.i
- br label %bb17195.i
-bb16926.i: ; preds = %bb16892.i
- br label %bb17195.i
-bb16930.i: ; preds = %bb16892.i
- br i1 false, label %bb16936.i, label %bb16939.i
-bb16936.i: ; preds = %bb16930.i
- br label %bb17195.i
-bb16939.i: ; preds = %bb16930.i
- br i1 false, label %bb16944.i, label %bb17183.i
-bb16944.i: ; preds = %bb16939.i
- br label %bb17195.i
-bb16953.i: ; preds = %bb16829.i
- switch i32 0, label %bb17003.i [
- i32 4, label %bb16958.i
- i32 8, label %bb16979.i
- i32 11, label %bb16997.i
- ]
-bb16958.i: ; preds = %bb16953.i
- br label %bb17195.i
-bb16979.i: ; preds = %bb16953.i
- br label %bb17195.i
-bb16997.i: ; preds = %bb16953.i
- br label %bb17195.i
-bb17003.i: ; preds = %bb16953.i
- switch i32 %df4081.0.i, label %bb17183.i [
- i32 0, label %bb17020.i
- i32 2, label %bb17020.i
- i32 10, label %bb17020.i
- i32 3, label %bb17028.i
- ]
-bb17020.i: ; preds = %bb17003.i, %bb17003.i, %bb17003.i
- br label %bb17195.i
-bb17028.i: ; preds = %bb17003.i
- br label %bb17195.i
-bb17037.i: ; preds = %bb16829.i
- switch i32 0, label %bb17087.i [
- i32 4, label %bb17042.i
- i32 8, label %bb17063.i
- i32 11, label %bb17081.i
- ]
-bb17042.i: ; preds = %bb17037.i
- br label %bb17195.i
-bb17063.i: ; preds = %bb17037.i
- br label %bb17195.i
-bb17081.i: ; preds = %bb17037.i
- br label %bb17195.i
-bb17087.i: ; preds = %bb17037.i
- switch i32 %df4081.0.i, label %bb17183.i [
- i32 0, label %bb17104.i
- i32 1, label %bb17104.i
- i32 10, label %bb17104.i
- i32 3, label %bb17112.i
- ]
-bb17104.i: ; preds = %bb17087.i, %bb17087.i, %bb17087.i
- br label %bb17195.i
-bb17112.i: ; preds = %bb17087.i
- br label %bb17195.i
-bb17121.i: ; preds = %bb16829.i
- switch i32 0, label %bb17183.i [
- i32 4, label %bb17126.i
- i32 8, label %bb17149.i
- i32 11, label %bb17167.i
- i32 10, label %bb17180.i
- ]
-bb17126.i: ; preds = %bb17121.i
- br label %bb17195.i
-bb17149.i: ; preds = %bb17121.i
- br label %bb17195.i
-bb17167.i: ; preds = %bb17121.i
- br label %bb17195.i
-bb17180.i: ; preds = %bb17121.i
- br label %bb17183.i
-bb17183.i: ; preds = %bb17180.i, %bb17121.i, %bb17087.i, %bb17003.i, %bb16939.i, %bb16878.i, %bb16829.i, %bb16795.i, %bb16759.i, %bb16705.i
- br label %bb17195.i
-bb17195.i: ; preds = %bb17183.i, %bb17167.i, %bb17149.i, %bb17126.i, %bb17112.i, %bb17104.i, %bb17081.i, %bb17063.i, %bb17042.i, %bb17028.i, %bb17020.i, %bb16997.i, %bb16979.i, %bb16958.i, %bb16944.i, %bb16936.i, %bb16926.i, %bb16913.i, %bb16897.i, %bb16883.i, %bb16874.i, %bb16858.i, %bb16839.i, %bb16823.i, %bb16807.i, %bb16786.i, %bb16775.i, %bb16764.i, %bb16755.i, %bb16732.i, %bb16721.i, %bb16710.i
- br i1 false, label %bb18845.i, label %bb17225.i
-bb17225.i: ; preds = %bb17195.i
- switch i32 %dt4080.0.i, label %bb17677.i [
- i32 4, label %bb17227.i
- i32 8, label %bb17259.i
- i32 9, label %bb17309.i
- i32 10, label %bb17359.i
- i32 11, label %bb17359.i
- i32 14, label %bb17409.i
- i32 15, label %bb17474.i
- i32 18, label %bb17539.i
- i32 19, label %bb17604.i
- i32 0, label %bb17680.i
- i32 1, label %bb17672.i
- i32 2, label %bb17673.i
- i32 3, label %bb17674.i
- i32 5, label %bb17675.i
- i32 12, label %bb17676.i
- i32 13, label %bb17676.i
- i32 16, label %bb17680.i
- i32 17, label %bb17680.i
- ]
-bb17227.i: ; preds = %bb17225.i
- br i1 false, label %bb18845.i, label %bb17230.i
-bb17230.i: ; preds = %bb17227.i
- br label %bb18851.i
-bb17259.i: ; preds = %bb17225.i
- br i1 false, label %bb17284.i, label %bb17262.i
-bb17262.i: ; preds = %bb17259.i
- br label %bb17284.i
-bb17284.i: ; preds = %bb17262.i, %bb17259.i
- br label %bb18851.i
-bb17309.i: ; preds = %bb17225.i
- br i1 false, label %bb17334.i, label %bb17312.i
-bb17312.i: ; preds = %bb17309.i
- br label %bb17334.i
-bb17334.i: ; preds = %bb17312.i, %bb17309.i
- br label %bb18851.i
-bb17359.i: ; preds = %bb17225.i, %bb17225.i
- br i1 false, label %bb17384.i, label %bb17362.i
-bb17362.i: ; preds = %bb17359.i
- br label %bb17384.i
-bb17384.i: ; preds = %bb17362.i, %bb17359.i
- br label %bb18851.i
-bb17409.i: ; preds = %bb17225.i
- br i1 false, label %bb17441.i, label %bb17412.i
-bb17412.i: ; preds = %bb17409.i
- br label %bb17441.i
-bb17441.i: ; preds = %bb17412.i, %bb17409.i
- br label %bb18851.i
-bb17474.i: ; preds = %bb17225.i
- br i1 false, label %bb17506.i, label %bb17477.i
-bb17477.i: ; preds = %bb17474.i
- br label %bb17506.i
-bb17506.i: ; preds = %bb17477.i, %bb17474.i
- br label %bb18851.i
-bb17539.i: ; preds = %bb17225.i
- br i1 false, label %bb17571.i, label %bb17542.i
-bb17542.i: ; preds = %bb17539.i
- br label %bb17571.i
-bb17571.i: ; preds = %bb17542.i, %bb17539.i
- br label %bb18851.i
-bb17604.i: ; preds = %bb17225.i
- br i1 false, label %bb17636.i, label %bb17607.i
-bb17607.i: ; preds = %bb17604.i
- br label %bb17636.i
-bb17636.i: ; preds = %bb17607.i, %bb17604.i
- br label %bb18851.i
-bb17672.i: ; preds = %bb17225.i
- br i1 false, label %bb17716.i, label %bb17683.i
-bb17673.i: ; preds = %bb17225.i
- br i1 false, label %bb17716.i, label %bb17683.i
-bb17674.i: ; preds = %bb17225.i
- br i1 false, label %bb17716.i, label %bb17683.i
-bb17675.i: ; preds = %bb17225.i
- br i1 false, label %bb17716.i, label %bb17683.i
-bb17676.i: ; preds = %bb17225.i, %bb17225.i
- br i1 false, label %bb17716.i, label %bb17683.i
-bb17677.i: ; preds = %bb17225.i
- unreachable
-bb17680.i: ; preds = %bb17225.i, %bb17225.i, %bb17225.i
- br i1 false, label %bb17716.i, label %bb17683.i
-bb17683.i: ; preds = %bb17680.i, %bb17676.i, %bb17675.i, %bb17674.i, %bb17673.i, %bb17672.i
- br label %bb17716.i
-bb17716.i: ; preds = %bb17683.i, %bb17680.i, %bb17676.i, %bb17675.i, %bb17674.i, %bb17673.i, %bb17672.i
- br label %bb18851.i
-bb17749.i: ; preds = %bb16697.i
- br i1 false, label %bb17757.i, label %bb17903.i
-bb17757.i: ; preds = %bb17749.i
- switch i32 0, label %bb17903.i [
- i32 0, label %bb17759.i
- i32 1, label %bb17853.i
- i32 2, label %bb17853.i
- ]
-bb17759.i: ; preds = %bb17757.i
- br i1 false, label %bb17764.i, label %bb17772.i
-bb17764.i: ; preds = %bb17759.i
- br label %bb18032.i
-bb17772.i: ; preds = %bb17759.i
- switch i32 %sf4083.0.i, label %bb17798.i [
- i32 1, label %bb17777.i
- i32 2, label %bb17790.i
- ]
-bb17777.i: ; preds = %bb17772.i
- switch i32 0, label %bb18032.i [
- i32 4, label %bb17818.i
- i32 8, label %bb17818.i
- i32 11, label %bb17845.i
- ]
-bb17790.i: ; preds = %bb17772.i
- switch i32 0, label %bb18032.i [
- i32 4, label %bb17818.i
- i32 8, label %bb17818.i
- i32 11, label %bb17845.i
- ]
-bb17798.i: ; preds = %bb17772.i
- switch i32 0, label %bb18032.i [
- i32 4, label %bb17818.i
- i32 8, label %bb17818.i
- i32 11, label %bb17845.i
- ]
-bb17818.i: ; preds = %bb17798.i, %bb17798.i, %bb17790.i, %bb17790.i, %bb17777.i, %bb17777.i
- switch i32 0, label %bb18032.i [
- i32 4, label %bb17845.i
- i32 11, label %bb17845.i
- i32 8, label %bb17946.i
- ]
-bb17845.i: ; preds = %bb17818.i, %bb17818.i, %bb17798.i, %bb17790.i, %bb17777.i
- switch i32 0, label %bb18032.i [
- i32 4, label %bb17908.i
- i32 8, label %bb17946.i
- i32 11, label %bb17998.i
- ]
-bb17853.i: ; preds = %bb17757.i, %bb17757.i
- br i1 false, label %bb17890.i, label %bb17903.i
-bb17890.i: ; preds = %bb17853.i
- br label %bb17903.i
-bb17903.i: ; preds = %bb17890.i, %bb17853.i, %bb17757.i, %bb17749.i
- switch i32 0, label %bb18032.i [
- i32 4, label %bb17908.i
- i32 8, label %bb17946.i
- i32 11, label %bb17998.i
- ]
-bb17908.i: ; preds = %bb17903.i, %bb17845.i
- switch i32 %df4081.0.i, label %bb18386.i [
- i32 1, label %bb17913.i
- i32 2, label %bb17924.i
- i32 3, label %bb17935.i
- ]
-bb17913.i: ; preds = %bb17908.i
- br label %bb18398.i
-bb17924.i: ; preds = %bb17908.i
- br label %bb18398.i
-bb17935.i: ; preds = %bb17908.i
- br label %bb18398.i
-bb17946.i: ; preds = %bb17903.i, %bb17845.i, %bb17818.i
- switch i32 0, label %bb17962.i [
- i32 4, label %bb17958.i
- i32 11, label %bb17958.i
- ]
-bb17958.i: ; preds = %bb17946.i, %bb17946.i
- br label %bb18398.i
-bb17962.i: ; preds = %bb17946.i
- switch i32 %df4081.0.i, label %bb18386.i [
- i32 1, label %bb17967.i
- i32 2, label %bb17978.i
- i32 3, label %bb17989.i
- ]
-bb17967.i: ; preds = %bb17962.i
- br label %bb18398.i
-bb17978.i: ; preds = %bb17962.i
- br label %bb18398.i
-bb17989.i: ; preds = %bb17962.i
- br label %bb18398.i
-bb17998.i: ; preds = %bb17903.i, %bb17845.i
- switch i32 0, label %bb18386.i [
- i32 4, label %bb18010.i
- i32 8, label %bb18010.i
- i32 3, label %bb18026.i
- ]
-bb18010.i: ; preds = %bb17998.i, %bb17998.i
- br label %bb18398.i
-bb18026.i: ; preds = %bb17998.i
- br label %bb18398.i
-bb18032.i: ; preds = %bb17903.i, %bb17845.i, %bb17818.i, %bb17798.i, %bb17790.i, %bb17777.i, %bb17764.i
- switch i32 %sf4083.0.i, label %bb18386.i [
- i32 10, label %bb18037.i
- i32 0, label %bb18095.i
- i32 1, label %bb18156.i
- i32 2, label %bb18240.i
- i32 3, label %bb18324.i
- ]
-bb18037.i: ; preds = %bb18032.i
- switch i32 0, label %bb18081.i [
- i32 4, label %bb18042.i
- i32 8, label %bb18061.i
- i32 11, label %bb18077.i
- ]
-bb18042.i: ; preds = %bb18037.i
- br label %bb18398.i
-bb18061.i: ; preds = %bb18037.i
- br label %bb18398.i
-bb18077.i: ; preds = %bb18037.i
- br label %bb18398.i
-bb18081.i: ; preds = %bb18037.i
- br i1 false, label %bb18086.i, label %bb18386.i
-bb18086.i: ; preds = %bb18081.i
- br label %bb18398.i
-bb18095.i: ; preds = %bb18032.i
- switch i32 0, label %bb18133.i [
- i32 4, label %bb18100.i
- i32 8, label %bb18116.i
- i32 11, label %bb18129.i
- ]
-bb18100.i: ; preds = %bb18095.i
- br label %bb18398.i
-bb18116.i: ; preds = %bb18095.i
- br label %bb18398.i
-bb18129.i: ; preds = %bb18095.i
- br label %bb18398.i
-bb18133.i: ; preds = %bb18095.i
- br i1 false, label %bb18139.i, label %bb18142.i
-bb18139.i: ; preds = %bb18133.i
- br label %bb18398.i
-bb18142.i: ; preds = %bb18133.i
- br i1 false, label %bb18147.i, label %bb18386.i
-bb18147.i: ; preds = %bb18142.i
- br label %bb18398.i
-bb18156.i: ; preds = %bb18032.i
- switch i32 0, label %bb18206.i [
- i32 4, label %bb18161.i
- i32 8, label %bb18182.i
- i32 11, label %bb18200.i
- ]
-bb18161.i: ; preds = %bb18156.i
- br label %bb18398.i
-bb18182.i: ; preds = %bb18156.i
- br label %bb18398.i
-bb18200.i: ; preds = %bb18156.i
- br label %bb18398.i
-bb18206.i: ; preds = %bb18156.i
- switch i32 %df4081.0.i, label %bb18386.i [
- i32 0, label %bb18223.i
- i32 2, label %bb18223.i
- i32 10, label %bb18223.i
- i32 3, label %bb18231.i
- ]
-bb18223.i: ; preds = %bb18206.i, %bb18206.i, %bb18206.i
- br label %bb18398.i
-bb18231.i: ; preds = %bb18206.i
- br label %bb18398.i
-bb18240.i: ; preds = %bb18032.i
- switch i32 0, label %bb18290.i [
- i32 4, label %bb18245.i
- i32 8, label %bb18266.i
- i32 11, label %bb18284.i
- ]
-bb18245.i: ; preds = %bb18240.i
- br label %bb18398.i
-bb18266.i: ; preds = %bb18240.i
- br label %bb18398.i
-bb18284.i: ; preds = %bb18240.i
- br label %bb18398.i
-bb18290.i: ; preds = %bb18240.i
- switch i32 %df4081.0.i, label %bb18386.i [
- i32 0, label %bb18307.i
- i32 1, label %bb18307.i
- i32 10, label %bb18307.i
- i32 3, label %bb18315.i
- ]
-bb18307.i: ; preds = %bb18290.i, %bb18290.i, %bb18290.i
- br label %bb18398.i
-bb18315.i: ; preds = %bb18290.i
- br label %bb18398.i
-bb18324.i: ; preds = %bb18032.i
- switch i32 0, label %bb18386.i [
- i32 4, label %bb18329.i
- i32 8, label %bb18352.i
- i32 11, label %bb18370.i
- i32 10, label %bb18383.i
- ]
-bb18329.i: ; preds = %bb18324.i
- br label %bb18398.i
-bb18352.i: ; preds = %bb18324.i
- br label %bb18398.i
-bb18370.i: ; preds = %bb18324.i
- br label %bb18398.i
-bb18383.i: ; preds = %bb18324.i
- br label %bb18386.i
-bb18386.i: ; preds = %bb18383.i, %bb18324.i, %bb18290.i, %bb18206.i, %bb18142.i, %bb18081.i, %bb18032.i, %bb17998.i, %bb17962.i, %bb17908.i
- br label %bb18398.i
-bb18398.i: ; preds = %bb18386.i, %bb18370.i, %bb18352.i, %bb18329.i, %bb18315.i, %bb18307.i, %bb18284.i, %bb18266.i, %bb18245.i, %bb18231.i, %bb18223.i, %bb18200.i, %bb18182.i, %bb18161.i, %bb18147.i, %bb18139.i, %bb18129.i, %bb18116.i, %bb18100.i, %bb18086.i, %bb18077.i, %bb18061.i, %bb18042.i, %bb18026.i, %bb18010.i, %bb17989.i, %bb17978.i, %bb17967.i, %bb17958.i, %bb17935.i, %bb17924.i, %bb17913.i
- br i1 false, label %bb18589.i, label %bb18431.i
-bb18431.i: ; preds = %bb18398.i
- switch i32 0, label %bb18589.i [
- i32 0, label %bb18433.i
- i32 1, label %bb18487.i
- i32 2, label %bb18487.i
- ]
-bb18433.i: ; preds = %bb18431.i
- switch i32 0, label %bb18589.i [
- i32 4, label %bb18452.i
- i32 8, label %bb18452.i
- i32 11, label %bb18479.i
- ]
-bb18452.i: ; preds = %bb18433.i, %bb18433.i
- switch i32 0, label %bb18589.i [
- i32 4, label %bb18479.i
- i32 11, label %bb18479.i
- ]
-bb18479.i: ; preds = %bb18452.i, %bb18452.i, %bb18433.i
- br i1 false, label %bb18845.i, label %bb18592.i
-bb18487.i: ; preds = %bb18431.i, %bb18431.i
- br i1 false, label %bb18492.i, label %bb18521.i
-bb18492.i: ; preds = %bb18487.i
- br i1 false, label %bb18508.i, label %bb18529.i
-bb18508.i: ; preds = %bb18492.i
- switch i32 0, label %bb18589.i [
- i32 4, label %bb18541.i
- i32 8, label %bb18541.i
- ]
-bb18521.i: ; preds = %bb18487.i
- br label %bb18529.i
-bb18529.i: ; preds = %bb18521.i, %bb18492.i
- switch i32 0, label %bb18589.i [
- i32 4, label %bb18541.i
- i32 8, label %bb18541.i
- ]
-bb18541.i: ; preds = %bb18529.i, %bb18529.i, %bb18508.i, %bb18508.i
- br i1 false, label %bb18560.i, label %bb18589.i
-bb18560.i: ; preds = %bb18541.i
- br i1 false, label %bb18576.i, label %bb18589.i
-bb18576.i: ; preds = %bb18560.i
- br label %bb18589.i
-bb18589.i: ; preds = %bb18576.i, %bb18560.i, %bb18541.i, %bb18529.i, %bb18508.i, %bb18452.i, %bb18433.i, %bb18431.i, %bb18398.i
- br i1 false, label %bb18845.i, label %bb18592.i
-bb18592.i: ; preds = %bb18589.i, %bb18479.i
- switch i32 %dt4080.0.i, label %bb18809.i [
- i32 4, label %bb18845.i
- i32 8, label %bb18594.i
- i32 9, label %bb18619.i
- i32 10, label %bb18644.i
- i32 11, label %bb18644.i
- i32 14, label %bb18669.i
- i32 15, label %bb18702.i
- i32 18, label %bb18735.i
- i32 19, label %bb18768.i
- i32 0, label %bb18812.i
- i32 1, label %bb18804.i
- i32 2, label %bb18805.i
- i32 3, label %bb18806.i
- i32 5, label %bb18807.i
- i32 12, label %bb18808.i
- i32 13, label %bb18808.i
- i32 16, label %bb18812.i
- i32 17, label %bb18812.i
- ]
-bb18594.i: ; preds = %bb18592.i
- br label %bb18851.i
-bb18619.i: ; preds = %bb18592.i
- br label %bb18851.i
-bb18644.i: ; preds = %bb18592.i, %bb18592.i
- br label %bb18851.i
-bb18669.i: ; preds = %bb18592.i
- br label %bb18851.i
-bb18702.i: ; preds = %bb18592.i
- br label %bb18851.i
-bb18735.i: ; preds = %bb18592.i
- br label %bb18851.i
-bb18768.i: ; preds = %bb18592.i
- br label %bb18851.i
-bb18804.i: ; preds = %bb18592.i
- br label %bb18812.i
-bb18805.i: ; preds = %bb18592.i
- br label %bb18812.i
-bb18806.i: ; preds = %bb18592.i
- br label %bb18812.i
-bb18807.i: ; preds = %bb18592.i
- br label %bb18812.i
-bb18808.i: ; preds = %bb18592.i, %bb18592.i
- br label %bb18812.i
-bb18809.i: ; preds = %bb18592.i
- unreachable
-bb18812.i: ; preds = %bb18808.i, %bb18807.i, %bb18806.i, %bb18805.i, %bb18804.i, %bb18592.i, %bb18592.i, %bb18592.i
- br label %bb18845.i
-bb18845.i: ; preds = %bb18812.i, %bb18592.i, %bb18589.i, %bb18479.i, %bb17227.i, %bb17195.i
- br label %bb18851.i
-bb18851.i: ; preds = %bb18845.i, %bb18768.i, %bb18735.i, %bb18702.i, %bb18669.i, %bb18644.i, %bb18619.i, %bb18594.i, %bb17716.i, %bb17636.i, %bb17571.i, %bb17506.i, %bb17441.i, %bb17384.i, %bb17334.i, %bb17284.i, %bb17230.i, %glgScalarFloatConversion.exit
- br label %storeColor_RGB_UI.exit
-storeColor_RGB_UI.exit: ; preds = %bb18851.i
- br i1 false, label %bb19786.i, label %bb16650.i
-bb19786.i: ; preds = %storeColor_RGB_UI.exit
- br label %bb19808.i
-bb19808.i: ; preds = %bb19786.i
- br i1 false, label %bb19818.i, label %bb5276.i
-bb19818.i: ; preds = %bb19808.i
- br i1 false, label %bb19840.i, label %bb19821.i
-bb19821.i: ; preds = %bb19818.i
- br label %bb19840.i
-bb19840.i: ; preds = %bb19821.i, %bb19818.i
- br i1 false, label %UnifiedReturnBlock.i, label %bb19843.i
-bb19843.i: ; preds = %bb19840.i
- br label %t.exit
-UnifiedReturnBlock.i: ; preds = %bb19840.i, %bb4501.i
- br label %t.exit
-t.exit: ; preds = %UnifiedReturnBlock.i, %bb19843.i, %bb4517.i, %bb4354.i
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-05-19-LiveIntervalsBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-05-19-LiveIntervalsBug.ll
deleted file mode 100644
index 71aa603..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-05-19-LiveIntervalsBug.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
- %struct.BiContextType = type { i16, i8, i32 }
- %struct.Bitstream = type { i32, i32, i8, i32, i32, i8, i8, i32, i32, i8*, i32 }
- %struct.DataPartition = type { %struct.Bitstream*, %struct.EncodingEnvironment, %struct.EncodingEnvironment }
- %struct.DecRefPicMarking_t = type { i32, i32, i32, i32, i32, %struct.DecRefPicMarking_t* }
- %struct.EncodingEnvironment = type { i32, i32, i32, i32, i32, i8*, i32*, i32, i32 }
- %struct.ImageParameters = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8**, i8**, i32, i32***, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [9 x [16 x [16 x i16]]], [5 x [16 x [16 x i16]]], [9 x [8 x [8 x i16]]], [2 x [4 x [16 x [16 x i16]]]], [16 x [16 x i16]], [16 x [16 x i32]], i32****, i32***, i32***, i32***, i32****, i32****, %struct.Picture*, %struct.Slice*, %struct.Macroblock*, i32*, i32*, i32, i32, i32, i32, [4 x [4 x i32]], i32, i32, i32, i32, i32, double, i32, i32, i32, i32, i16******, i16******, i16******, i16******, [15 x i16], i32, i32, i32, i32, i32, i32, i32, i32, [6 x [32 x i32]], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [1 x i32], i32, i32, [2 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.DecRefPicMarking_t*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, double**, double***, i32***, double**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x [2 x i32]], [2 x i32], i32, i32, i16, i32, i32, i32, i32, i32 }
- %struct.Macroblock = type { i32, i32, i32, [2 x i32], i32, [8 x i32], %struct.Macroblock*, %struct.Macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], [16 x i8], [16 x i8], i32, i64, [4 x i32], [4 x i32], i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, double, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.MotionInfoContexts = type { [3 x [11 x %struct.BiContextType]], [2 x [9 x %struct.BiContextType]], [2 x [10 x %struct.BiContextType]], [2 x [6 x %struct.BiContextType]], [4 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x %struct.BiContextType] }
- %struct.Picture = type { i32, i32, [100 x %struct.Slice*], i32, float, float, float }
- %struct.Slice = type { i32, i32, i32, i32, i32, i32, %struct.DataPartition*, %struct.MotionInfoContexts*, %struct.TextureInfoContexts*, i32, i32*, i32*, i32*, i32, i32*, i32*, i32*, i32 (i32)*, [3 x [2 x i32]] }
- %struct.TextureInfoContexts = type { [2 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x [4 x %struct.BiContextType]], [10 x [4 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]] }
- at images = external global %struct.ImageParameters ; <%struct.ImageParameters*> [#uses=2]
-
-declare i8* @calloc(i32, i32)
-
-define fastcc void @init_global_buffers() nounwind {
-entry:
- %tmp50.i.i = mul i32 0, 0 ; <i32> [#uses=2]
- br i1 false, label %init_orig_buffers.exit, label %cond_true.i29
-
-cond_true.i29: ; preds = %entry
- %tmp17.i = load i32* getelementptr (%struct.ImageParameters* @images, i32 0, i32 20), align 8 ; <i32> [#uses=1]
- %tmp20.i27 = load i32* getelementptr (%struct.ImageParameters* @images, i32 0, i32 16), align 8 ; <i32> [#uses=1]
- %tmp8.i.i = select i1 false, i32 1, i32 0 ; <i32> [#uses=1]
- br label %bb.i8.us.i
-
-bb.i8.us.i: ; preds = %get_mem2Dpel.exit.i.us.i, %cond_true.i29
- %j.04.i.us.i = phi i32 [ %indvar.next39.i, %get_mem2Dpel.exit.i.us.i ], [ 0, %cond_true.i29 ] ; <i32> [#uses=2]
- %tmp13.i.us.i = getelementptr i16*** null, i32 %j.04.i.us.i ; <i16***> [#uses=0]
- %tmp15.i.i.us.i = tail call i8* @calloc( i32 0, i32 2 ) ; <i8*> [#uses=0]
- store i16* null, i16** null, align 4
- br label %bb.i.i.us.i
-
-get_mem2Dpel.exit.i.us.i: ; preds = %bb.i.i.us.i
- %indvar.next39.i = add i32 %j.04.i.us.i, 1 ; <i32> [#uses=2]
- %exitcond40.i = icmp eq i32 %indvar.next39.i, 2 ; <i1> [#uses=1]
- br i1 %exitcond40.i, label %get_mem3Dpel.exit.split.i, label %bb.i8.us.i
-
-bb.i.i.us.i: ; preds = %bb.i.i.us.i, %bb.i8.us.i
- %exitcond.i = icmp eq i32 0, %tmp8.i.i ; <i1> [#uses=1]
- br i1 %exitcond.i, label %get_mem2Dpel.exit.i.us.i, label %bb.i.i.us.i
-
-get_mem3Dpel.exit.split.i: ; preds = %get_mem2Dpel.exit.i.us.i
- %tmp30.i.i = shl i32 %tmp17.i, 2 ; <i32> [#uses=1]
- %tmp31.i.i = mul i32 %tmp30.i.i, %tmp20.i27 ; <i32> [#uses=1]
- %tmp23.i31 = add i32 %tmp31.i.i, %tmp50.i.i ; <i32> [#uses=1]
- br label %init_orig_buffers.exit
-
-init_orig_buffers.exit: ; preds = %get_mem3Dpel.exit.split.i, %entry
- %memory_size.0.i = phi i32 [ %tmp23.i31, %get_mem3Dpel.exit.split.i ], [ %tmp50.i.i, %entry ] ; <i32> [#uses=1]
- %tmp41 = add i32 0, %memory_size.0.i ; <i32> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-05-19-ScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-05-19-ScavengerAssert.ll
deleted file mode 100644
index aa61d86..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-05-19-ScavengerAssert.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
- %struct.Decoders = type { i32**, i16***, i16****, i16***, i16**, i8**, i8** }
- at decoders = external global %struct.Decoders ; <%struct.Decoders*> [#uses=1]
-
-declare i8* @calloc(i32, i32)
-
-declare fastcc i32 @get_mem2Dint(i32***, i32, i32)
-
-define fastcc void @init_global_buffers() nounwind {
-entry:
- %tmp151 = tail call fastcc i32 @get_mem2Dint( i32*** getelementptr (%struct.Decoders* @decoders, i32 0, i32 0), i32 16, i32 16 ) ; <i32> [#uses=1]
- %tmp158 = tail call i8* @calloc( i32 0, i32 4 ) ; <i8*> [#uses=0]
- br i1 false, label %cond_true166, label %bb190.preheader
-
-bb190.preheader: ; preds = %entry
- %memory_size.3555 = add i32 0, %tmp151 ; <i32> [#uses=0]
- unreachable
-
-cond_true166: ; preds = %entry
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-07-17-Fdiv.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-07-17-Fdiv.ll
deleted file mode 100644
index 4cb768e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-07-17-Fdiv.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define float @f(float %a, float %b) nounwind {
- %tmp = fdiv float %a, %b
- ret float %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll
deleted file mode 100644
index 83fde07..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=arm
-; PR2589
-
-define void @main({ i32 }*) {
-entry:
- %sret1 = alloca { i32 } ; <{ i32 }*> [#uses=1]
- load { i32 }* %sret1 ; <{ i32 }>:1 [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-08-07-AsmPrintBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-08-07-AsmPrintBug.ll
deleted file mode 100644
index adb0112..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-08-07-AsmPrintBug.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6 -relocation-model=pic | grep comm
-
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.__gcov_var = type { %struct.FILE*, i32, i32, i32, i32, i32, i32, [1025 x i32] }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- at __gcov_var = common global %struct.__gcov_var zeroinitializer ; <%struct.__gcov_var*> [#uses=1]
-
-define i32 @__gcov_close() nounwind {
-entry:
- load i32* getelementptr (%struct.__gcov_var* @__gcov_var, i32 0, i32 5), align 4 ; <i32>:0 [#uses=1]
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-09-14-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-09-14-CoalescerBug.ll
deleted file mode 100644
index 5f9d9ae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-09-14-CoalescerBug.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-@"\01LC1" = external constant [288 x i8] ; <[288 x i8]*> [#uses=1]
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) nounwind
-
-define i32 @main(i32 %argc, i8** %argv) nounwind {
-entry:
- br label %bb.i
-
-bb.i: ; preds = %bb.i, %entry
- %i.01.i = phi i32 [ 0, %entry ], [ %indvar.next52, %bb.i ] ; <i32> [#uses=1]
- %indvar.next52 = add i32 %i.01.i, 1 ; <i32> [#uses=2]
- %exitcond53 = icmp eq i32 %indvar.next52, 15 ; <i1> [#uses=1]
- br i1 %exitcond53, label %bb.i33.loopexit, label %bb.i
-
-bb.i33.loopexit: ; preds = %bb.i
- %0 = malloc [347 x i8] ; <[347 x i8]*> [#uses=2]
- %.sub = getelementptr [347 x i8]* %0, i32 0, i32 0 ; <i8*> [#uses=1]
- call void @llvm.memcpy.i32( i8* %.sub, i8* getelementptr ([288 x i8]* @"\01LC1", i32 0, i32 0), i32 287, i32 1 ) nounwind
- br label %bb.i28
-
-bb.i28: ; preds = %bb.i28, %bb.i33.loopexit
- br i1 false, label %repeat_fasta.exit, label %bb.i28
-
-repeat_fasta.exit: ; preds = %bb.i28
- free [347 x i8]* %0
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-09-17-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-09-17-CoalescerBug.ll
deleted file mode 100644
index d3bc3e1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-09-17-CoalescerBug.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-define void @gcov_exit() nounwind {
-entry:
- br i1 false, label %bb24, label %bb33.thread
-
-bb24: ; preds = %entry
- br label %bb39
-
-bb33.thread: ; preds = %entry
- %0 = alloca i8, i32 0 ; <i8*> [#uses=1]
- br label %bb39
-
-bb39: ; preds = %bb33.thread, %bb24
- %.reg2mem.0 = phi i8* [ %0, %bb33.thread ], [ null, %bb24 ] ; <i8*> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll
deleted file mode 100644
index 601a516..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2008-11-18-ScavengerAssert.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2
-
-define hidden i64 @__muldi3(i64 %u, i64 %v) nounwind {
-entry:
- %0 = trunc i64 %u to i32 ; <i32> [#uses=1]
- %asmtmp = tail call { i32, i32, i32, i32, i32 } asm "@ Inlined umul_ppmm\0A\09mov\09$2, $5, lsr #16\0A\09mov\09$0, $6, lsr #16\0A\09bic\09$3, $5, $2, lsl #16\0A\09bic\09$4, $6, $0, lsl #16\0A\09mul\09$1, $3, $4\0A\09mul\09$4, $2, $4\0A\09mul\09$3, $0, $3\0A\09mul\09$0, $2, $0\0A\09adds\09$3, $4, $3\0A\09addcs\09$0, $0, #65536\0A\09adds\09$1, $1, $3, lsl #16\0A\09adc\09$0, $0, $3, lsr #16", "=&r,=r,=&r,=&r,=r,r,r,~{cc}"(i32 %0, i32 0) nounwind ; <{ i32, i32, i32, i32, i32 }> [#uses=1]
- %asmresult1 = extractvalue { i32, i32, i32, i32, i32 } %asmtmp, 1 ; <i32> [#uses=1]
- %asmresult116 = zext i32 %asmresult1 to i64 ; <i64> [#uses=1]
- %asmresult116.ins = or i64 0, %asmresult116 ; <i64> [#uses=1]
- %1 = lshr i64 %v, 32 ; <i64> [#uses=1]
- %2 = mul i64 %1, %u ; <i64> [#uses=1]
- %3 = add i64 %2, 0 ; <i64> [#uses=1]
- %4 = shl i64 %3, 32 ; <i64> [#uses=1]
- %5 = add i64 %asmresult116.ins, %4 ; <i64> [#uses=1]
- ret i64 %5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-02-16-SpillerBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-02-16-SpillerBug.ll
deleted file mode 100644
index 4c0c59c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-02-16-SpillerBug.ll
+++ /dev/null
@@ -1,117 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2
-
-target triple = "arm-apple-darwin9"
- %struct.FILE_POS = type { i8, i8, i16, i32 }
- %struct.FIRST_UNION = type { %struct.FILE_POS }
- %struct.FOURTH_UNION = type { %struct.STYLE }
- %struct.GAP = type { i8, i8, i16 }
- %struct.LIST = type { %struct.rec*, %struct.rec* }
- %struct.SECOND_UNION = type { { i16, i8, i8 } }
- %struct.STYLE = type { { %struct.GAP }, { %struct.GAP }, i16, i16, i32 }
- %struct.THIRD_UNION = type { { [2 x i32], [2 x i32] } }
- %struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, %struct.rec*, { %struct.rec* }, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i32 }
- %struct.rec = type { %struct.head_type }
- at no_file_pos = external global %struct.FILE_POS ; <%struct.FILE_POS*> [#uses=1]
-@"\01LC13423" = external constant [23 x i8] ; <[23 x i8]*> [#uses=1]
-@"\01LC18972" = external constant [13 x i8] ; <[13 x i8]*> [#uses=1]
-
-define fastcc void @FlushGalley(%struct.rec* %hd) nounwind {
-entry:
- br label %RESUME
-
-RESUME: ; preds = %bb520.preheader, %entry
- br label %bb396
-
-bb122: ; preds = %bb396
- switch i32 0, label %bb394 [
- i32 1, label %bb131
- i32 2, label %bb244
- i32 4, label %bb244
- i32 5, label %bb244
- i32 6, label %bb244
- i32 7, label %bb244
- i32 11, label %bb244
- i32 12, label %bb244
- i32 15, label %bb244
- i32 17, label %bb244
- i32 18, label %bb244
- i32 19, label %bb244
- i32 20, label %bb396
- i32 21, label %bb396
- i32 22, label %bb396
- i32 23, label %bb396
- i32 24, label %bb244
- i32 25, label %bb244
- i32 26, label %bb244
- i32 27, label %bb244
- i32 28, label %bb244
- i32 29, label %bb244
- i32 30, label %bb244
- i32 31, label %bb244
- i32 32, label %bb244
- i32 33, label %bb244
- i32 34, label %bb244
- i32 35, label %bb244
- i32 36, label %bb244
- i32 37, label %bb244
- i32 38, label %bb244
- i32 39, label %bb244
- i32 40, label %bb244
- i32 41, label %bb244
- i32 42, label %bb244
- i32 43, label %bb244
- i32 44, label %bb244
- i32 45, label %bb244
- i32 46, label %bb244
- i32 50, label %bb244
- i32 51, label %bb244
- i32 94, label %bb244
- i32 95, label %bb244
- i32 96, label %bb244
- i32 97, label %bb244
- i32 98, label %bb244
- i32 99, label %bb244
- ]
-
-bb131: ; preds = %bb122
- br label %bb396
-
-bb244: ; preds = %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122, %bb122
- %0 = icmp eq %struct.rec* %stop_link.3, null ; <i1> [#uses=1]
- br i1 %0, label %bb435, label %bb433
-
-bb394: ; preds = %bb122
- call void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 3, i8* getelementptr ([23 x i8]* @"\01LC13423", i32 0, i32 0), i32 0, %struct.FILE_POS* @no_file_pos, i8* getelementptr ([13 x i8]* @"\01LC18972", i32 0, i32 0), i8* null) nounwind
- br label %bb396
-
-bb396: ; preds = %bb394, %bb131, %bb122, %bb122, %bb122, %bb122, %RESUME
- %stop_link.3 = phi %struct.rec* [ null, %RESUME ], [ %stop_link.3, %bb394 ], [ %stop_link.3, %bb122 ], [ %stop_link.3, %bb122 ], [ %stop_link.3, %bb122 ], [ %stop_link.3, %bb122 ], [ %link.1, %bb131 ] ; <%struct.rec*> [#uses=7]
- %headers_seen.1 = phi i32 [ 0, %RESUME ], [ %headers_seen.1, %bb394 ], [ 1, %bb122 ], [ 1, %bb122 ], [ 1, %bb122 ], [ 1, %bb122 ], [ %headers_seen.1, %bb131 ] ; <i32> [#uses=2]
- %link.1 = load %struct.rec** null ; <%struct.rec*> [#uses=2]
- %1 = icmp eq %struct.rec* %link.1, %hd ; <i1> [#uses=1]
- br i1 %1, label %bb398, label %bb122
-
-bb398: ; preds = %bb396
- unreachable
-
-bb433: ; preds = %bb244
- call fastcc void @Promote(%struct.rec* %hd, %struct.rec* %stop_link.3, %struct.rec* null, i32 1) nounwind
- br label %bb435
-
-bb435: ; preds = %bb433, %bb244
- br i1 false, label %bb491, label %bb499
-
-bb491: ; preds = %bb435
- br label %bb499
-
-bb499: ; preds = %bb499, %bb491, %bb435
- %2 = icmp eq %struct.rec* null, null ; <i1> [#uses=1]
- br i1 %2, label %bb520.preheader, label %bb499
-
-bb520.preheader: ; preds = %bb499
- br label %RESUME
-}
-
-declare fastcc void @Promote(%struct.rec*, %struct.rec*, %struct.rec* nocapture, i32) nounwind
-
-declare void @Error(i32, i32, i8*, i32, %struct.FILE_POS*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-02-22-SoftenFloatVaArg.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-02-22-SoftenFloatVaArg.ll
deleted file mode 100644
index a48f003..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-02-22-SoftenFloatVaArg.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s
-; PR3610
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-s0:0:64-f80:32:32"
-target triple = "arm-elf"
-
-define i32 @main(i8*) nounwind {
-entry:
- %ap = alloca i8* ; <i8**> [#uses=2]
- store i8* %0, i8** %ap
- %retval = alloca i32 ; <i32*> [#uses=2]
- store i32 0, i32* %retval
- %tmp = alloca float ; <float*> [#uses=1]
- %1 = va_arg i8** %ap, float ; <float> [#uses=1]
- store float %1, float* %tmp
- br label %return
-
-return: ; preds = %entry
- %2 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-02-27-SpillerBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-02-27-SpillerBug.ll
deleted file mode 100644
index bc5e602..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-02-27-SpillerBug.ll
+++ /dev/null
@@ -1,229 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2
-
-target triple = "arm-apple-darwin9"
- at a = external global double ; <double*> [#uses=1]
- at N = external global double ; <double*> [#uses=1]
-
-declare double @llvm.exp.f64(double) nounwind readonly
-
-define fastcc void @findratio(double* nocapture %res1, double* nocapture %res2) nounwind {
-bb.thread:
- br label %bb52
-
-bb32: ; preds = %bb52
- %0 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %1 = add i32 %j.1, 1 ; <i32> [#uses=1]
- br label %bb52
-
-bb52: ; preds = %bb53, %bb32, %bb.thread
- %i.3494 = phi i32 [ 0, %bb.thread ], [ %3, %bb53 ], [ %i.3494, %bb32 ] ; <i32> [#uses=2]
- %k.4 = phi double [ %0, %bb32 ], [ 0.000000e+00, %bb53 ], [ 0.000000e+00, %bb.thread ] ; <double> [#uses=2]
- %j.1 = phi i32 [ %1, %bb32 ], [ 0, %bb53 ], [ 0, %bb.thread ] ; <i32> [#uses=2]
- %2 = icmp sgt i32 %j.1, 99 ; <i1> [#uses=1]
- br i1 %2, label %bb53, label %bb32
-
-bb53: ; preds = %bb52
- %3 = add i32 %i.3494, 1 ; <i32> [#uses=2]
- %phitmp = icmp sgt i32 %3, 999999 ; <i1> [#uses=1]
- br i1 %phitmp, label %bb55, label %bb52
-
-bb55: ; preds = %bb53
- %4 = load double* @a, align 4 ; <double> [#uses=10]
- %5 = fadd double %4, 0.000000e+00 ; <double> [#uses=16]
- %6 = fcmp ogt double %k.4, 0.000000e+00 ; <i1> [#uses=1]
- %.pn404 = fmul double %4, %4 ; <double> [#uses=4]
- %.pn402 = fmul double %5, %5 ; <double> [#uses=5]
- %.pn165.in = load double* @N ; <double> [#uses=5]
- %.pn198 = fmul double 0.000000e+00, %5 ; <double> [#uses=1]
- %.pn185 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %.pn147 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %.pn141 = fdiv double 0.000000e+00, %4 ; <double> [#uses=1]
- %.pn142 = fdiv double 0.000000e+00, %5 ; <double> [#uses=1]
- %.pn136 = fdiv double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %.pn132 = fdiv double 0.000000e+00, %5 ; <double> [#uses=1]
- %.pn123 = fdiv double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %.pn124 = fdiv double 0.000000e+00, %.pn198 ; <double> [#uses=1]
- %.pn120 = fdiv double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %.pn117 = fdiv double 0.000000e+00, %4 ; <double> [#uses=1]
- %.pn118 = fdiv double %.pn185, %5 ; <double> [#uses=1]
- %.pn88 = fdiv double %.pn147, %5 ; <double> [#uses=1]
- %.pn81 = fsub double %.pn141, %.pn142 ; <double> [#uses=1]
- %.pn77 = fsub double 0.000000e+00, %.pn136 ; <double> [#uses=1]
- %.pn75 = fsub double 0.000000e+00, %.pn132 ; <double> [#uses=1]
- %.pn69 = fsub double %.pn123, %.pn124 ; <double> [#uses=1]
- %.pn67 = fsub double 0.000000e+00, %.pn120 ; <double> [#uses=1]
- %.pn56 = fsub double %.pn117, %.pn118 ; <double> [#uses=1]
- %.pn42 = fsub double 0.000000e+00, %.pn88 ; <double> [#uses=1]
- %.pn60 = fmul double %.pn81, 0.000000e+00 ; <double> [#uses=1]
- %.pn57 = fadd double %.pn77, 0.000000e+00 ; <double> [#uses=1]
- %.pn58 = fmul double %.pn75, %.pn165.in ; <double> [#uses=1]
- %.pn32 = fadd double %.pn69, 0.000000e+00 ; <double> [#uses=1]
- %.pn33 = fmul double %.pn67, %.pn165.in ; <double> [#uses=1]
- %.pn17 = fsub double 0.000000e+00, %.pn60 ; <double> [#uses=1]
- %.pn9 = fadd double %.pn57, %.pn58 ; <double> [#uses=1]
- %.pn30 = fmul double 0.000000e+00, %.pn56 ; <double> [#uses=1]
- %.pn24 = fmul double 0.000000e+00, %.pn42 ; <double> [#uses=1]
- %.pn1 = fadd double %.pn32, %.pn33 ; <double> [#uses=1]
- %.pn28 = fsub double %.pn30, 0.000000e+00 ; <double> [#uses=1]
- %.pn26 = fadd double %.pn28, 0.000000e+00 ; <double> [#uses=1]
- %.pn22 = fsub double %.pn26, 0.000000e+00 ; <double> [#uses=1]
- %.pn20 = fsub double %.pn24, 0.000000e+00 ; <double> [#uses=1]
- %.pn18 = fadd double %.pn22, 0.000000e+00 ; <double> [#uses=1]
- %.pn16 = fadd double %.pn20, 0.000000e+00 ; <double> [#uses=1]
- %.pn14 = fsub double %.pn18, 0.000000e+00 ; <double> [#uses=1]
- %.pn12 = fsub double %.pn16, %.pn17 ; <double> [#uses=1]
- %.pn10 = fadd double %.pn14, 0.000000e+00 ; <double> [#uses=1]
- %.pn8 = fadd double %.pn12, 0.000000e+00 ; <double> [#uses=1]
- %.pn6 = fsub double %.pn10, 0.000000e+00 ; <double> [#uses=1]
- %.pn4 = fsub double %.pn8, %.pn9 ; <double> [#uses=1]
- %.pn2 = fadd double %.pn6, 0.000000e+00 ; <double> [#uses=1]
- %.pn = fadd double %.pn4, 0.000000e+00 ; <double> [#uses=1]
- %N1.0 = fsub double %.pn2, 0.000000e+00 ; <double> [#uses=2]
- %D1.0 = fsub double %.pn, %.pn1 ; <double> [#uses=2]
- br i1 %6, label %bb62, label %bb64
-
-bb62: ; preds = %bb55
- %7 = fmul double 0.000000e+00, %4 ; <double> [#uses=1]
- %8 = fsub double -0.000000e+00, %7 ; <double> [#uses=3]
- %9 = fmul double 0.000000e+00, %5 ; <double> [#uses=1]
- %10 = fsub double -0.000000e+00, %9 ; <double> [#uses=3]
- %11 = fmul double %.pn404, %4 ; <double> [#uses=5]
- %12 = fmul double %.pn402, %5 ; <double> [#uses=5]
- %13 = fmul double 0.000000e+00, -2.000000e+00 ; <double> [#uses=1]
- %14 = fdiv double 0.000000e+00, %.pn402 ; <double> [#uses=1]
- %15 = fsub double 0.000000e+00, %14 ; <double> [#uses=1]
- %16 = fmul double 0.000000e+00, %15 ; <double> [#uses=1]
- %17 = fadd double %13, %16 ; <double> [#uses=1]
- %18 = fmul double %.pn165.in, -2.000000e+00 ; <double> [#uses=5]
- %19 = fmul double %18, 0.000000e+00 ; <double> [#uses=1]
- %20 = fadd double %17, %19 ; <double> [#uses=1]
- %21 = fmul double 0.000000e+00, %20 ; <double> [#uses=1]
- %22 = fadd double 0.000000e+00, %21 ; <double> [#uses=1]
- %23 = fdiv double 0.000000e+00, %12 ; <double> [#uses=1]
- %24 = fsub double 0.000000e+00, %23 ; <double> [#uses=0]
- %25 = fmul double %18, 0.000000e+00 ; <double> [#uses=1]
- %26 = fadd double 0.000000e+00, %25 ; <double> [#uses=1]
- %27 = fmul double 0.000000e+00, %26 ; <double> [#uses=1]
- %28 = fsub double %22, %27 ; <double> [#uses=1]
- %29 = fmul double %11, %4 ; <double> [#uses=1]
- %30 = fmul double %12, %5 ; <double> [#uses=3]
- %31 = fmul double %.pn165.in, -4.000000e+00 ; <double> [#uses=1]
- %32 = fmul double %.pn165.in, 0x3FF5555555555555 ; <double> [#uses=1]
- %33 = fmul double %32, 0.000000e+00 ; <double> [#uses=2]
- %34 = fadd double %28, 0.000000e+00 ; <double> [#uses=1]
- %35 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %36 = fdiv double %35, %11 ; <double> [#uses=1]
- %37 = fdiv double 0.000000e+00, %12 ; <double> [#uses=1]
- %38 = fsub double %36, %37 ; <double> [#uses=1]
- %39 = fmul double 0.000000e+00, %38 ; <double> [#uses=1]
- %40 = fadd double 0.000000e+00, %39 ; <double> [#uses=1]
- %41 = fadd double %40, 0.000000e+00 ; <double> [#uses=1]
- %42 = fadd double %41, 0.000000e+00 ; <double> [#uses=1]
- %43 = fmul double %42, 0.000000e+00 ; <double> [#uses=1]
- %44 = fsub double %34, %43 ; <double> [#uses=1]
- %45 = tail call double @llvm.exp.f64(double %8) nounwind ; <double> [#uses=1]
- %46 = fsub double -0.000000e+00, %45 ; <double> [#uses=2]
- %47 = fdiv double %46, 0.000000e+00 ; <double> [#uses=1]
- %48 = fmul double %30, %5 ; <double> [#uses=1]
- %49 = fdiv double 0.000000e+00, %48 ; <double> [#uses=1]
- %50 = fsub double %47, %49 ; <double> [#uses=1]
- %51 = fmul double %50, -4.000000e+00 ; <double> [#uses=1]
- %52 = fadd double %51, 0.000000e+00 ; <double> [#uses=1]
- %53 = fdiv double %46, %11 ; <double> [#uses=1]
- %54 = fsub double %53, 0.000000e+00 ; <double> [#uses=1]
- %55 = fmul double %31, %54 ; <double> [#uses=1]
- %56 = fadd double %52, %55 ; <double> [#uses=1]
- %57 = fadd double %56, 0.000000e+00 ; <double> [#uses=1]
- %58 = fadd double %44, %57 ; <double> [#uses=1]
- %59 = fsub double %58, 0.000000e+00 ; <double> [#uses=1]
- %60 = tail call double @llvm.exp.f64(double 0.000000e+00) nounwind ; <double> [#uses=1]
- %61 = fsub double -0.000000e+00, %60 ; <double> [#uses=1]
- %62 = fdiv double 0.000000e+00, -6.000000e+00 ; <double> [#uses=1]
- %63 = fdiv double %61, %5 ; <double> [#uses=1]
- %64 = fsub double 0.000000e+00, %63 ; <double> [#uses=1]
- %65 = fmul double %62, %64 ; <double> [#uses=1]
- %66 = fsub double 0.000000e+00, %65 ; <double> [#uses=1]
- %67 = fsub double -0.000000e+00, 0.000000e+00 ; <double> [#uses=2]
- %68 = tail call double @llvm.exp.f64(double %10) nounwind ; <double> [#uses=1]
- %69 = fsub double -0.000000e+00, %68 ; <double> [#uses=2]
- %70 = fdiv double %67, %.pn404 ; <double> [#uses=1]
- %71 = fdiv double %69, %.pn402 ; <double> [#uses=1]
- %72 = fsub double %70, %71 ; <double> [#uses=1]
- %73 = fmul double %72, -5.000000e-01 ; <double> [#uses=1]
- %74 = fdiv double %67, %4 ; <double> [#uses=1]
- %75 = fdiv double %69, %5 ; <double> [#uses=1]
- %76 = fsub double %74, %75 ; <double> [#uses=1]
- %77 = fmul double %76, 0.000000e+00 ; <double> [#uses=1]
- %78 = fadd double %73, %77 ; <double> [#uses=1]
- %79 = fmul double 0.000000e+00, %78 ; <double> [#uses=1]
- %80 = fadd double %66, %79 ; <double> [#uses=1]
- %81 = fdiv double 0.000000e+00, %.pn404 ; <double> [#uses=1]
- %82 = fdiv double 0.000000e+00, %.pn402 ; <double> [#uses=1]
- %83 = fsub double %81, %82 ; <double> [#uses=1]
- %84 = fmul double %83, -5.000000e-01 ; <double> [#uses=1]
- %85 = fdiv double 0.000000e+00, %4 ; <double> [#uses=1]
- %86 = fdiv double 0.000000e+00, %5 ; <double> [#uses=1]
- %87 = fsub double %85, %86 ; <double> [#uses=1]
- %88 = fmul double %87, 0.000000e+00 ; <double> [#uses=1]
- %89 = fadd double %84, %88 ; <double> [#uses=1]
- %90 = fmul double 0.000000e+00, %89 ; <double> [#uses=1]
- %91 = fsub double %80, %90 ; <double> [#uses=1]
- %92 = tail call double @llvm.exp.f64(double %8) nounwind ; <double> [#uses=1]
- %93 = fsub double -0.000000e+00, %92 ; <double> [#uses=1]
- %94 = tail call double @llvm.exp.f64(double %10) nounwind ; <double> [#uses=1]
- %95 = fsub double -0.000000e+00, %94 ; <double> [#uses=3]
- %96 = fdiv double %95, %.pn402 ; <double> [#uses=1]
- %97 = fsub double 0.000000e+00, %96 ; <double> [#uses=1]
- %98 = fmul double 0.000000e+00, %97 ; <double> [#uses=1]
- %99 = fdiv double %93, %11 ; <double> [#uses=1]
- %100 = fdiv double %95, %12 ; <double> [#uses=1]
- %101 = fsub double %99, %100 ; <double> [#uses=1]
- %102 = fsub double %98, %101 ; <double> [#uses=1]
- %103 = fdiv double %95, %5 ; <double> [#uses=1]
- %104 = fsub double 0.000000e+00, %103 ; <double> [#uses=1]
- %105 = fmul double %18, %104 ; <double> [#uses=1]
- %106 = fadd double %102, %105 ; <double> [#uses=1]
- %107 = fmul double %106, %k.4 ; <double> [#uses=1]
- %108 = fadd double %91, %107 ; <double> [#uses=1]
- %109 = fsub double %108, 0.000000e+00 ; <double> [#uses=1]
- %110 = tail call double @llvm.exp.f64(double %8) nounwind ; <double> [#uses=1]
- %111 = fsub double -0.000000e+00, %110 ; <double> [#uses=2]
- %112 = tail call double @llvm.exp.f64(double %10) nounwind ; <double> [#uses=1]
- %113 = fsub double -0.000000e+00, %112 ; <double> [#uses=2]
- %114 = fdiv double %111, %11 ; <double> [#uses=1]
- %115 = fdiv double %113, %12 ; <double> [#uses=1]
- %116 = fsub double %114, %115 ; <double> [#uses=1]
- %117 = fmul double 0.000000e+00, %116 ; <double> [#uses=1]
- %118 = fdiv double %111, %29 ; <double> [#uses=1]
- %119 = fdiv double %113, %30 ; <double> [#uses=1]
- %120 = fsub double %118, %119 ; <double> [#uses=1]
- %121 = fsub double %117, %120 ; <double> [#uses=1]
- %122 = fmul double %18, 0.000000e+00 ; <double> [#uses=1]
- %123 = fadd double %121, %122 ; <double> [#uses=1]
- %124 = fmul double %33, 0.000000e+00 ; <double> [#uses=1]
- %125 = fadd double %123, %124 ; <double> [#uses=1]
- %126 = fadd double %109, %125 ; <double> [#uses=1]
- %127 = tail call double @llvm.exp.f64(double 0.000000e+00) nounwind ; <double> [#uses=1]
- %128 = fsub double -0.000000e+00, %127 ; <double> [#uses=2]
- %129 = fdiv double %128, %30 ; <double> [#uses=1]
- %130 = fsub double 0.000000e+00, %129 ; <double> [#uses=1]
- %131 = fsub double 0.000000e+00, %130 ; <double> [#uses=1]
- %132 = fdiv double 0.000000e+00, %.pn404 ; <double> [#uses=1]
- %133 = fsub double %132, 0.000000e+00 ; <double> [#uses=1]
- %134 = fmul double %18, %133 ; <double> [#uses=1]
- %135 = fadd double %131, %134 ; <double> [#uses=1]
- %136 = fdiv double %128, %5 ; <double> [#uses=1]
- %137 = fsub double 0.000000e+00, %136 ; <double> [#uses=1]
- %138 = fmul double %33, %137 ; <double> [#uses=1]
- %139 = fadd double %135, %138 ; <double> [#uses=1]
- %140 = fsub double %126, %139 ; <double> [#uses=1]
- %141 = fadd double %N1.0, %59 ; <double> [#uses=1]
- %142 = fadd double %D1.0, %140 ; <double> [#uses=1]
- br label %bb64
-
-bb64: ; preds = %bb62, %bb55
- %N1.0.pn = phi double [ %141, %bb62 ], [ %N1.0, %bb55 ] ; <double> [#uses=1]
- %D1.0.pn = phi double [ %142, %bb62 ], [ %D1.0, %bb55 ] ; <double> [#uses=1]
- %x.1 = fdiv double %N1.0.pn, %D1.0.pn ; <double> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll
deleted file mode 100644
index 0ec17ae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll
+++ /dev/null
@@ -1,78 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin9 -mattr=+vfp2
-; rdar://6653182
-
- %struct.ggBRDF = type { i32 (...)** }
- %struct.ggPoint2 = type { [2 x double] }
- %struct.ggPoint3 = type { [3 x double] }
- %struct.ggSpectrum = type { [8 x float] }
- %struct.ggSphere = type { %struct.ggPoint3, double }
- %struct.mrDiffuseAreaSphereLuminaire = type { %struct.mrSphere, %struct.ggSpectrum }
- %struct.mrDiffuseCosineSphereLuminaire = type { %struct.mrDiffuseAreaSphereLuminaire }
- %struct.mrSphere = type { %struct.ggBRDF, %struct.ggSphere }
-
-declare void @llvm.memcpy.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind
-
-declare double @llvm.sqrt.f64(double) nounwind readonly
-
-declare double @sin(double) nounwind readonly
-
-declare double @acos(double) nounwind readonly
-
-define i32 @_ZNK34mrDiffuseSolidAngleSphereLuminaire18selectVisiblePointERK8ggPoint3RK9ggVector3RK8ggPoint2dRS0_Rd(%struct.mrDiffuseCosineSphereLuminaire* nocapture %this, %struct.ggPoint3* nocapture %x, %struct.ggPoint3* nocapture %unnamed_arg, %struct.ggPoint2* nocapture %uv, double %unnamed_arg2, %struct.ggPoint3* nocapture %on_light, double* nocapture %invProb) nounwind {
-entry:
- %0 = call double @llvm.sqrt.f64(double 0.000000e+00) nounwind ; <double> [#uses=4]
- %1 = fcmp ult double 0.000000e+00, %0 ; <i1> [#uses=1]
- br i1 %1, label %bb3, label %bb7
-
-bb3: ; preds = %entry
- %2 = fdiv double 1.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %3 = fmul double 0.000000e+00, %2 ; <double> [#uses=2]
- %4 = call double @llvm.sqrt.f64(double 0.000000e+00) nounwind ; <double> [#uses=1]
- %5 = fdiv double 1.000000e+00, %4 ; <double> [#uses=2]
- %6 = fmul double %3, %5 ; <double> [#uses=2]
- %7 = fmul double 0.000000e+00, %5 ; <double> [#uses=2]
- %8 = fmul double %3, %7 ; <double> [#uses=1]
- %9 = fsub double %8, 0.000000e+00 ; <double> [#uses=1]
- %10 = fmul double 0.000000e+00, %6 ; <double> [#uses=1]
- %11 = fsub double 0.000000e+00, %10 ; <double> [#uses=1]
- %12 = fsub double -0.000000e+00, %11 ; <double> [#uses=1]
- %13 = fmul double %0, %0 ; <double> [#uses=2]
- %14 = fsub double %13, 0.000000e+00 ; <double> [#uses=1]
- %15 = call double @llvm.sqrt.f64(double %14) ; <double> [#uses=1]
- %16 = fmul double 0.000000e+00, %15 ; <double> [#uses=1]
- %17 = fdiv double %16, %0 ; <double> [#uses=1]
- %18 = fadd double 0.000000e+00, %17 ; <double> [#uses=1]
- %19 = call double @acos(double %18) nounwind readonly ; <double> [#uses=1]
- %20 = load double* null, align 4 ; <double> [#uses=1]
- %21 = fmul double %20, 0x401921FB54442D18 ; <double> [#uses=1]
- %22 = call double @sin(double %19) nounwind readonly ; <double> [#uses=2]
- %23 = fmul double %22, 0.000000e+00 ; <double> [#uses=2]
- %24 = fmul double %6, %23 ; <double> [#uses=1]
- %25 = fmul double %7, %23 ; <double> [#uses=1]
- %26 = call double @sin(double %21) nounwind readonly ; <double> [#uses=1]
- %27 = fmul double %22, %26 ; <double> [#uses=2]
- %28 = fmul double %9, %27 ; <double> [#uses=1]
- %29 = fmul double %27, %12 ; <double> [#uses=1]
- %30 = fadd double %24, %28 ; <double> [#uses=1]
- %31 = fadd double 0.000000e+00, %29 ; <double> [#uses=1]
- %32 = fadd double %25, 0.000000e+00 ; <double> [#uses=1]
- %33 = fadd double %30, 0.000000e+00 ; <double> [#uses=1]
- %34 = fadd double %31, 0.000000e+00 ; <double> [#uses=1]
- %35 = fadd double %32, 0.000000e+00 ; <double> [#uses=1]
- %36 = bitcast %struct.ggPoint3* %x to i8* ; <i8*> [#uses=1]
- call void @llvm.memcpy.i32(i8* null, i8* %36, i32 24, i32 4) nounwind
- store double %33, double* null, align 8
- br i1 false, label %_Z20ggRaySphereIntersectRK6ggRay3RK8ggSphereddRd.exit, label %bb5.i.i.i
-
-bb5.i.i.i: ; preds = %bb3
- unreachable
-
-_Z20ggRaySphereIntersectRK6ggRay3RK8ggSphereddRd.exit: ; preds = %bb3
- %37 = fsub double %13, 0.000000e+00 ; <double> [#uses=0]
- %38 = fsub double -0.000000e+00, %34 ; <double> [#uses=0]
- %39 = fsub double -0.000000e+00, %35 ; <double> [#uses=0]
- ret i32 1
-
-bb7: ; preds = %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-03-09-AddrModeBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-03-09-AddrModeBug.ll
deleted file mode 100644
index a1ce384..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-03-09-AddrModeBug.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=arm
-
- %struct.hit_t = type { %struct.v_t, double }
- %struct.node_t = type { %struct.hit_t, %struct.hit_t, i32 }
- %struct.v_t = type { double, double, double }
-
-define fastcc %struct.node_t* @_ZL6createP6node_tii3v_tS1_d(%struct.node_t* %n, i32 %lvl, i32 %dist, i64 %c.0.0, i64 %c.0.1, i64 %c.0.2, i64 %d.0.0, i64 %d.0.1, i64 %d.0.2, double %r) nounwind {
-entry:
- %0 = getelementptr %struct.node_t* %n, i32 0, i32 1 ; <%struct.hit_t*> [#uses=1]
- %1 = bitcast %struct.hit_t* %0 to i256* ; <i256*> [#uses=1]
- store i256 0, i256* %1, align 4
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-06-AsmModifier.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-06-AsmModifier.ll
deleted file mode 100644
index 3526722..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-06-AsmModifier.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=arm | grep {swi 107}
-
-define i32 @_swilseek(i32) nounwind {
-entry:
- %ptr = alloca i32 ; <i32*> [#uses=2]
- store i32 %0, i32* %ptr
- %retval = alloca i32 ; <i32*> [#uses=2]
- store i32 0, i32* %retval
- %res = alloca i32 ; <i32*> [#uses=0]
- %fh = alloca i32 ; <i32*> [#uses=1]
- %1 = load i32* %fh ; <i32> [#uses=1]
- %2 = load i32* %ptr ; <i32> [#uses=1]
- %3 = call i32 asm "mov r0, $2; mov r1, $3; swi ${1:a}; mov $0, r0", "=r,i,r,r,~{r0},~{r1}"(i32 107, i32 %1, i32 %2) nounwind ; <i32> [#uses=1]
- store i32 %3, i32* %retval
- br label %return
-
-return: ; preds = %entry
- %4 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll
deleted file mode 100644
index f6b3d2c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=arm
-; PR3795
-
-define fastcc void @_D3foo3fooFAriZv({ i32, { double, double }* } %d_arg, i32 %x_arg) {
-entry:
- %d = alloca { i32, { double, double }* } ; <{ i32, { double, double }* }*> [#uses=2]
- %x = alloca i32 ; <i32*> [#uses=2]
- %b = alloca { double, double } ; <{ double, double }*> [#uses=1]
- store { i32, { double, double }* } %d_arg, { i32, { double, double }* }* %d
- store i32 %x_arg, i32* %x
- %tmp = load i32* %x ; <i32> [#uses=1]
- %tmp1 = getelementptr { i32, { double, double }* }* %d, i32 0, i32 1 ; <{ double, double }**> [#uses=1]
- %.ptr = load { double, double }** %tmp1 ; <{ double, double }*> [#uses=1]
- %tmp2 = getelementptr { double, double }* %.ptr, i32 %tmp ; <{ double, double }*> [#uses=1]
- %tmp3 = load { double, double }* %tmp2 ; <{ double, double }> [#uses=1]
- store { double, double } %tmp3, { double, double }* %b
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-08-FREM.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-08-FREM.ll
deleted file mode 100644
index 99907fc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-08-FREM.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=arm
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %rem_r = frem double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %1 = call i32 (i8*, ...)* @printf(i8* null, double %rem_r) ; <i32> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-08-FloatUndef.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-08-FloatUndef.ll
deleted file mode 100644
index 05d2f26..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-08-FloatUndef.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define void @execute_shader(<4 x float>* %OUT, <4 x float>* %IN, <4 x float>* %CONST) {
-entry:
- %input2 = load <4 x float>* null, align 16 ; <<4 x float>> [#uses=2]
- %shuffle7 = shufflevector <4 x float> %input2, <4 x float> <float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00>, <4 x i32> <i32 2, i32 2, i32 2, i32 2> ; <<4 x float>> [#uses=1]
- %mul1 = fmul <4 x float> %shuffle7, zeroinitializer ; <<4 x float>> [#uses=1]
- %add2 = fadd <4 x float> %mul1, %input2 ; <<4 x float>> [#uses=1]
- store <4 x float> %add2, <4 x float>* null, align 16
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll
deleted file mode 100644
index deb092b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=arm
-; PR3954
-
-define void @foo(...) nounwind {
-entry:
- %rr = alloca i32 ; <i32*> [#uses=2]
- %0 = load i32* %rr ; <i32> [#uses=1]
- %1 = call i32 asm "nop", "=r,0"(i32 %0) nounwind ; <i32> [#uses=1]
- store i32 %1, i32* %rr
- br label %return
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-05-DAGCombineBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-05-DAGCombineBug.ll
deleted file mode 100644
index 670d204..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-05-DAGCombineBug.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linuxeabi-unknown-gnu -mattr=+v6
-; PR4166
-
- %"byte[]" = type { i32, i8* }
- %tango.time.Time.Time = type { i64 }
-
-define fastcc void @t() {
-entry:
- %tmp28 = call fastcc i1 null(i32* null, %"byte[]" undef, %"byte[]" undef, %tango.time.Time.Time* byval null) ; <i1> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-07-RegAllocLocal.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-07-RegAllocLocal.ll
deleted file mode 100644
index 75610ff..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-07-RegAllocLocal.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=armv5-unknown-linux-gnueabi -O0 -regalloc=local
-; PR4100
- at .str = external constant [30 x i8] ; <[30 x i8]*> [#uses=1]
-
-define i16 @fn16(i16 %arg0.0, <2 x i16> %arg1, i16 %arg2.0) nounwind {
-entry:
- store <2 x i16> %arg1, <2 x i16>* null
- %0 = call i32 (i8*, ...)* @printf(i8* getelementptr ([30 x i8]* @.str, i32 0, i32 0), i32 0) nounwind ; <i32> [#uses=0]
- ret i16 0
-}
-
-declare i32 @printf(i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll
deleted file mode 100644
index 7046fcc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=arm
- %struct.List = type { %struct.List*, i32 }
- at Node5 = external constant %struct.List ; <%struct.List*> [#uses=1]
-@"\01LC" = external constant [7 x i8] ; <[7 x i8]*> [#uses=1]
-
-define i32 @main() nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb3, %entry
- %CurL.02 = phi %struct.List* [ @Node5, %entry ], [ %2, %bb3 ] ; <%struct.List*> [#uses=1]
- %PrevL.01 = phi %struct.List* [ null, %entry ], [ %CurL.02, %bb3 ] ; <%struct.List*> [#uses=1]
- %0 = icmp eq %struct.List* %PrevL.01, null ; <i1> [#uses=1]
- br i1 %0, label %bb3, label %bb1
-
-bb1: ; preds = %bb
- br label %bb3
-
-bb3: ; preds = %bb1, %bb
- %iftmp.0.0 = phi i32 [ 0, %bb1 ], [ -1, %bb ] ; <i32> [#uses=1]
- %1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i32 0), i32 0, i32 %iftmp.0.0) nounwind ; <i32> [#uses=0]
- %2 = load %struct.List** null, align 4 ; <%struct.List*> [#uses=2]
- %phitmp = icmp eq %struct.List* %2, null ; <i1> [#uses=1]
- br i1 %phitmp, label %bb5, label %bb
-
-bb5: ; preds = %bb3
- ret i32 0
-}
-
-declare i32 @printf(i8* nocapture, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
deleted file mode 100644
index 1e2707f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-05-18-InlineAsmMem.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-; RUN: llc < %s -march=thumb | FileCheck %s
-; PR4091
-
-define void @foo(i32 %i, i32* %p) nounwind {
-;CHECK: swp r2, r0, [r1]
- %asmtmp = call i32 asm sideeffect "swp $0, $2, $3", "=&r,=*m,r,*m,~{memory}"(i32* %p, i32 %i, i32* %p) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-02-ISelCrash.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-02-ISelCrash.ll
deleted file mode 100644
index 403e3f6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-02-ISelCrash.ll
+++ /dev/null
@@ -1,62 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -mattr=+v6,+vfp2
-
-@"\01LC" = external constant [15 x i8] ; <[15 x i8]*> [#uses=1]
-
-declare i32 @printf(i8* nocapture, ...) nounwind
-
-define i32 @main() nounwind {
-entry:
- br label %bb.i1.i
-
-bb.i1.i: ; preds = %Cos.exit.i.i, %entry
- br label %bb.i.i.i
-
-bb.i.i.i: ; preds = %bb.i.i.i, %bb.i1.i
- br i1 undef, label %Cos.exit.i.i, label %bb.i.i.i
-
-Cos.exit.i.i: ; preds = %bb.i.i.i
- br i1 undef, label %bb2.i.i, label %bb.i1.i
-
-bb2.i.i: ; preds = %Cos.exit.i.i
- br label %bb3.i.i
-
-bb3.i.i: ; preds = %bb5.i.i, %bb2.i.i
- br label %bb4.i.i
-
-bb4.i.i: ; preds = %bb4.i.i, %bb3.i.i
- br i1 undef, label %bb5.i.i, label %bb4.i.i
-
-bb5.i.i: ; preds = %bb4.i.i
- br i1 undef, label %bb.i, label %bb3.i.i
-
-bb.i: ; preds = %bb.i, %bb5.i.i
- br i1 undef, label %bb1.outer2.i.i.outer, label %bb.i
-
-bb1.outer2.i.i.outer: ; preds = %Fft.exit.i, %bb5.i12.i, %bb.i
- br label %bb1.outer2.i.i
-
-bb1.outer2.i.i: ; preds = %bb2.i9.i, %bb1.outer2.i.i.outer
- br label %bb1.i.i
-
-bb1.i.i: ; preds = %bb1.i.i, %bb1.outer2.i.i
- br i1 undef, label %bb2.i9.i, label %bb1.i.i
-
-bb2.i9.i: ; preds = %bb1.i.i
- br i1 undef, label %bb4.i11.i, label %bb1.outer2.i.i
-
-bb4.i11.i: ; preds = %bb4.i11.i, %bb2.i9.i
- br i1 undef, label %bb5.i12.i, label %bb4.i11.i
-
-bb5.i12.i: ; preds = %bb4.i11.i
- br i1 undef, label %bb7.i.i, label %bb1.outer2.i.i.outer
-
-bb7.i.i: ; preds = %bb7.i.i, %bb5.i12.i
- br i1 undef, label %Fft.exit.i, label %bb7.i.i
-
-Fft.exit.i: ; preds = %bb7.i.i
- br i1 undef, label %bb5.i, label %bb1.outer2.i.i.outer
-
-bb5.i: ; preds = %Fft.exit.i
- %0 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([15 x i8]* @"\01LC", i32 0, i32 0), double undef, double undef) nounwind ; <i32> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll
deleted file mode 100644
index 98e0023..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll
+++ /dev/null
@@ -1,263 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6
-
- %struct.anon = type { i16, i16 }
- %struct.cab_archive = type { i32, i16, i16, i16, i16, i8, %struct.cab_folder*, %struct.cab_file* }
- %struct.cab_file = type { i32, i16, i64, i8*, i32, i32, i32, %struct.cab_folder*, %struct.cab_file*, %struct.cab_archive*, %struct.cab_state* }
- %struct.cab_folder = type { i16, i16, %struct.cab_archive*, i64, %struct.cab_folder* }
- %struct.cab_state = type { i8*, i8*, [38912 x i8], i16, i16, i8*, i16 }
- %struct.qtm_model = type { i32, i32, %struct.anon* }
- %struct.qtm_stream = type { i32, i32, i8, i8*, i32, i32, i32, i16, i16, i16, i8, i32, i8*, i8*, i8*, i8*, i8*, i32, i32, i8, [42 x i32], [42 x i8], [27 x i8], [27 x i8], %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, %struct.qtm_model, [65 x %struct.anon], [65 x %struct.anon], [65 x %struct.anon], [65 x %struct.anon], [25 x %struct.anon], [37 x %struct.anon], [43 x %struct.anon], [28 x %struct.anon], [8 x %struct.anon], %struct.cab_file*, i32 (%struct.cab_file*, i8*, i32)* }
-
-declare fastcc i32 @qtm_read_input(%struct.qtm_stream* nocapture) nounwind
-
-define fastcc i32 @qtm_decompress(%struct.qtm_stream* %qtm, i64 %out_bytes) nounwind {
-entry:
- br i1 undef, label %bb245, label %bb3
-
-bb3: ; preds = %entry
- br i1 undef, label %bb5, label %bb4
-
-bb4: ; preds = %bb3
- ret i32 undef
-
-bb5: ; preds = %bb3
- br i1 undef, label %bb245, label %bb14
-
-bb14: ; preds = %bb5
- br label %bb238
-
-bb28: ; preds = %bb215
- br label %bb31
-
-bb29: ; preds = %bb31
- br i1 undef, label %bb31, label %bb32
-
-bb31: ; preds = %bb29, %bb28
- br i1 undef, label %bb29, label %bb32
-
-bb32: ; preds = %bb31, %bb29
- br label %bb33
-
-bb33: ; preds = %bb33, %bb32
- br i1 undef, label %bb34, label %bb33
-
-bb34: ; preds = %bb33
- br i1 undef, label %bb35, label %bb36
-
-bb35: ; preds = %bb34
- br label %bb36
-
-bb36: ; preds = %bb46, %bb35, %bb34
- br i1 undef, label %bb40, label %bb37
-
-bb37: ; preds = %bb36
- br i1 undef, label %bb77, label %bb60
-
-bb40: ; preds = %bb36
- br i1 undef, label %bb46, label %bb41
-
-bb41: ; preds = %bb40
- br i1 undef, label %bb45, label %bb42
-
-bb42: ; preds = %bb41
- ret i32 undef
-
-bb45: ; preds = %bb41
- br label %bb46
-
-bb46: ; preds = %bb45, %bb40
- br label %bb36
-
-bb60: ; preds = %bb60, %bb37
- br label %bb60
-
-bb77: ; preds = %bb37
- switch i32 undef, label %bb197 [
- i32 5, label %bb108
- i32 6, label %bb138
- ]
-
-bb108: ; preds = %bb77
- br label %bb111
-
-bb109: ; preds = %bb111
- br i1 undef, label %bb111, label %bb112
-
-bb111: ; preds = %bb109, %bb108
- br i1 undef, label %bb109, label %bb112
-
-bb112: ; preds = %bb111, %bb109
- br label %bb113
-
-bb113: ; preds = %bb113, %bb112
- br i1 undef, label %bb114, label %bb113
-
-bb114: ; preds = %bb113
- br i1 undef, label %bb115, label %bb116
-
-bb115: ; preds = %bb114
- br label %bb116
-
-bb116: ; preds = %bb115, %bb114
- br i1 undef, label %bb120, label %bb117
-
-bb117: ; preds = %bb116
- br label %bb136
-
-bb120: ; preds = %bb116
- ret i32 undef
-
-bb128: ; preds = %bb136
- br i1 undef, label %bb134, label %bb129
-
-bb129: ; preds = %bb128
- br i1 undef, label %bb133, label %bb130
-
-bb130: ; preds = %bb129
- br i1 undef, label %bb132, label %bb131
-
-bb131: ; preds = %bb130
- ret i32 undef
-
-bb132: ; preds = %bb130
- br label %bb133
-
-bb133: ; preds = %bb132, %bb129
- br label %bb134
-
-bb134: ; preds = %bb133, %bb128
- br label %bb136
-
-bb136: ; preds = %bb134, %bb117
- br i1 undef, label %bb198, label %bb128
-
-bb138: ; preds = %bb77
- %0 = trunc i32 undef to i16 ; <i16> [#uses=1]
- br label %bb141
-
-bb139: ; preds = %bb141
- %scevgep441442881 = load i16* undef ; <i16> [#uses=1]
- %1 = icmp ugt i16 %scevgep441442881, %0 ; <i1> [#uses=1]
- br i1 %1, label %bb141, label %bb142
-
-bb141: ; preds = %bb139, %bb138
- br i1 undef, label %bb139, label %bb142
-
-bb142: ; preds = %bb141, %bb139
- br label %bb143
-
-bb143: ; preds = %bb143, %bb142
- br i1 undef, label %bb144, label %bb143
-
-bb144: ; preds = %bb143
- br i1 undef, label %bb145, label %bb146
-
-bb145: ; preds = %bb144
- unreachable
-
-bb146: ; preds = %bb156, %bb144
- br i1 undef, label %bb150, label %bb147
-
-bb147: ; preds = %bb146
- br i1 undef, label %bb157, label %bb148
-
-bb148: ; preds = %bb147
- br i1 undef, label %bb149, label %bb157
-
-bb149: ; preds = %bb148
- br label %bb150
-
-bb150: ; preds = %bb149, %bb146
- br i1 undef, label %bb156, label %bb152
-
-bb152: ; preds = %bb150
- unreachable
-
-bb156: ; preds = %bb150
- br label %bb146
-
-bb157: ; preds = %bb148, %bb147
- br i1 undef, label %bb167, label %bb160
-
-bb160: ; preds = %bb157
- ret i32 undef
-
-bb167: ; preds = %bb157
- br label %bb170
-
-bb168: ; preds = %bb170
- br i1 undef, label %bb170, label %bb171
-
-bb170: ; preds = %bb168, %bb167
- br i1 undef, label %bb168, label %bb171
-
-bb171: ; preds = %bb170, %bb168
- br label %bb172
-
-bb172: ; preds = %bb172, %bb171
- br i1 undef, label %bb173, label %bb172
-
-bb173: ; preds = %bb172
- br i1 undef, label %bb174, label %bb175
-
-bb174: ; preds = %bb173
- unreachable
-
-bb175: ; preds = %bb179, %bb173
- br i1 undef, label %bb179, label %bb176
-
-bb176: ; preds = %bb175
- br i1 undef, label %bb186, label %bb177
-
-bb177: ; preds = %bb176
- br i1 undef, label %bb178, label %bb186
-
-bb178: ; preds = %bb177
- br label %bb179
-
-bb179: ; preds = %bb178, %bb175
- br label %bb175
-
-bb186: ; preds = %bb177, %bb176
- br label %bb195
-
-bb187: ; preds = %bb195
- br i1 undef, label %bb193, label %bb189
-
-bb189: ; preds = %bb187
- %2 = tail call fastcc i32 @qtm_read_input(%struct.qtm_stream* %qtm) nounwind ; <i32> [#uses=0]
- ret i32 undef
-
-bb193: ; preds = %bb187
- br label %bb195
-
-bb195: ; preds = %bb193, %bb186
- br i1 undef, label %bb198, label %bb187
-
-bb197: ; preds = %bb77
- ret i32 -124
-
-bb198: ; preds = %bb195, %bb136
- br i1 undef, label %bb211.preheader, label %bb214
-
-bb211.preheader: ; preds = %bb198
- br label %bb211
-
-bb211: ; preds = %bb211, %bb211.preheader
- br i1 undef, label %bb214, label %bb211
-
-bb214: ; preds = %bb211, %bb198
- br label %bb215
-
-bb215: ; preds = %bb238, %bb214
- br i1 undef, label %bb28, label %bb216
-
-bb216: ; preds = %bb215
- br label %bb238
-
-bb238: ; preds = %bb216, %bb14
- br label %bb215
-
-bb245: ; preds = %bb5, %entry
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-12-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-12-RegScavengerAssert.ll
deleted file mode 100644
index 27888d7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-12-RegScavengerAssert.ll
+++ /dev/null
@@ -1,77 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin
-
- type { i32, i32, %struct.D_Sym**, [3 x %struct.D_Sym*] } ; type %0
- type { i32, %struct.D_Reduction** } ; type %1
- type { i32, %struct.D_RightEpsilonHint* } ; type %2
- type { i32, %struct.D_ErrorRecoveryHint* } ; type %3
- type { i32, i32, %struct.D_Reduction**, [3 x %struct.D_Reduction*] } ; type %4
- %struct.D_ErrorRecoveryHint = type { i16, i16, i8* }
- %struct.D_ParseNode = type { i32, %struct.d_loc_t, i8*, i8*, %struct.D_Scope*, void (%struct.D_Parser*, %struct.d_loc_t*, i8**)*, i8*, i8* }
- %struct.D_Parser = type { i8*, void (%struct.D_Parser*, %struct.d_loc_t*, i8**)*, %struct.D_Scope*, void (%struct.D_Parser*)*, %struct.D_ParseNode* (%struct.D_Parser*, i32, %struct.D_ParseNode**)*, void (%struct.D_ParseNode*)*, %struct.d_loc_t, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.D_ParserTables = type { i32, %struct.D_State*, i16*, i32, i32, %struct.D_Symbol*, void (%struct.D_Parser*, %struct.d_loc_t*, i8**)*, i32, %struct.D_Pass*, i32 }
- %struct.D_Pass = type { i8*, i32, i32, i32 }
- %struct.D_Reduction = type { i16, i16, i32 (i8*, i8**, i32, i32, %struct.D_Parser*)*, i32 (i8*, i8**, i32, i32, %struct.D_Parser*)*, i16, i16, i32, i32, i32, i32, i32 (i8*, i8**, i32, i32, %struct.D_Parser*)** }
- %struct.D_RightEpsilonHint = type { i16, i16, %struct.D_Reduction* }
- %struct.D_Scope = type { i8, %struct.D_Sym*, %struct.D_SymHash*, %struct.D_Sym*, %struct.D_Scope*, %struct.D_Scope*, %struct.D_Scope*, %struct.D_Scope*, %struct.D_Scope* }
- %struct.D_Shift = type { i16, i8, i8, i32, i32, i32 (i8*, i8**, i32, i32, %struct.D_Parser*)* }
- %struct.D_State = type { i8*, i32, %1, %2, %3, %struct.D_Shift**, i32 (i8**, i32*, i32*, i16*, i32*, i8*, i32*)*, i8*, i8, i8, i8, i8*, %struct.D_Shift***, i32 }
- %struct.D_Sym = type { i8*, i32, i32, %struct.D_Sym*, %struct.D_Sym*, i32 }
- %struct.D_SymHash = type { i32, i32, %0 }
- %struct.D_Symbol = type { i32, i8*, i32 }
- %struct.PNode = type { i32, i32, i32, i32, %struct.D_Reduction*, %struct.D_Shift*, i32, %struct.VecPNode, i32, i8, i8, %struct.PNode*, %struct.PNode*, %struct.PNode*, %struct.PNode*, i8*, i8*, %struct.D_Scope*, i8*, %struct.D_ParseNode }
- %struct.PNodeHash = type { %struct.PNode**, i32, i32, i32, %struct.PNode* }
- %struct.Parser = type { %struct.D_Parser, i8*, i8*, %struct.D_ParserTables*, i32, i32, i32, i32, i32, i32, i32, %struct.PNodeHash, %struct.SNodeHash, %struct.Reduction*, %struct.Shift*, %struct.D_Scope*, %struct.SNode*, i32, %struct.Reduction*, %struct.Shift*, i32, %struct.PNode*, %struct.SNode*, %struct.ZNode*, %4, %struct.ShiftResult*, %struct.D_Shift, %struct.Parser*, i8* }
- %struct.Reduction = type { %struct.ZNode*, %struct.SNode*, %struct.D_Reduction*, %struct.SNode*, i32, %struct.Reduction* }
- %struct.SNode = type { %struct.D_State*, %struct.D_Scope*, i8*, %struct.d_loc_t, i32, %struct.PNode*, %struct.VecZNode, i32, %struct.SNode*, %struct.SNode* }
- %struct.SNodeHash = type { %struct.SNode**, i32, i32, i32, %struct.SNode*, %struct.SNode* }
- %struct.Shift = type { %struct.SNode*, %struct.Shift* }
- %struct.ShiftResult = type { %struct.D_Shift*, %struct.d_loc_t }
- %struct.VecPNode = type { i32, i32, %struct.PNode**, [3 x %struct.PNode*] }
- %struct.VecSNode = type { i32, i32, %struct.SNode**, [3 x %struct.SNode*] }
- %struct.VecZNode = type { i32, i32, %struct.ZNode**, [3 x %struct.ZNode*] }
- %struct.ZNode = type { %struct.PNode*, %struct.VecSNode }
- %struct.d_loc_t = type { i8*, i8*, i32, i32, i32 }
-
-declare void @llvm.memcpy.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind
-
-define fastcc i32 @exhaustive_parse(%struct.Parser* %p, i32 %state) nounwind {
-entry:
- store i8* undef, i8** undef, align 4
- %0 = getelementptr %struct.Parser* %p, i32 0, i32 0, i32 6 ; <%struct.d_loc_t*> [#uses=1]
- %1 = bitcast %struct.d_loc_t* %0 to i8* ; <i8*> [#uses=1]
- call void @llvm.memcpy.i32(i8* undef, i8* %1, i32 20, i32 4)
- br label %bb10
-
-bb10: ; preds = %bb30, %bb29, %bb26, %entry
- br i1 undef, label %bb18, label %bb20
-
-bb18: ; preds = %bb10
- br i1 undef, label %bb20, label %bb19
-
-bb19: ; preds = %bb18
- br label %bb20
-
-bb20: ; preds = %bb19, %bb18, %bb10
- br i1 undef, label %bb21, label %bb22
-
-bb21: ; preds = %bb20
- unreachable
-
-bb22: ; preds = %bb20
- br i1 undef, label %bb24, label %bb26
-
-bb24: ; preds = %bb22
- unreachable
-
-bb26: ; preds = %bb22
- br i1 undef, label %bb10, label %bb29
-
-bb29: ; preds = %bb26
- br i1 undef, label %bb10, label %bb30
-
-bb30: ; preds = %bb29
- br i1 undef, label %bb31, label %bb10
-
-bb31: ; preds = %bb30
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-15-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-15-RegScavengerAssert.ll
deleted file mode 100644
index a0f903b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-15-RegScavengerAssert.ll
+++ /dev/null
@@ -1,344 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin
-
- %struct.term = type { i32, i32, i32 }
-
-declare fastcc i8* @memory_Malloc(i32) nounwind
-
-define fastcc %struct.term* @t1() nounwind {
-entry:
- br i1 undef, label %bb, label %bb1
-
-bb: ; preds = %entry
- ret %struct.term* undef
-
-bb1: ; preds = %entry
- %0 = tail call fastcc i8* @memory_Malloc(i32 12) nounwind ; <i8*> [#uses=0]
- %1 = tail call fastcc i8* @memory_Malloc(i32 12) nounwind ; <i8*> [#uses=0]
- ret %struct.term* undef
-}
-
-
-define i32 @t2(i32 %argc, i8** nocapture %argv) nounwind {
-entry:
- br label %bb6.i8
-
-bb6.i8: ; preds = %memory_CalculateRealBlockSize1374.exit.i, %entry
- br i1 undef, label %memory_CalculateRealBlockSize1374.exit.i, label %bb.i.i9
-
-bb.i.i9: ; preds = %bb6.i8
- br label %memory_CalculateRealBlockSize1374.exit.i
-
-memory_CalculateRealBlockSize1374.exit.i: ; preds = %bb.i.i9, %bb6.i8
- %0 = phi i32 [ undef, %bb.i.i9 ], [ undef, %bb6.i8 ] ; <i32> [#uses=2]
- store i32 %0, i32* undef, align 4
- %1 = urem i32 8184, %0 ; <i32> [#uses=1]
- %2 = sub i32 8188, %1 ; <i32> [#uses=1]
- store i32 %2, i32* undef, align 4
- br i1 undef, label %memory_Init.exit, label %bb6.i8
-
-memory_Init.exit: ; preds = %memory_CalculateRealBlockSize1374.exit.i
- br label %bb.i.i
-
-bb.i.i: ; preds = %bb.i.i, %memory_Init.exit
- br i1 undef, label %symbol_Init.exit, label %bb.i.i
-
-symbol_Init.exit: ; preds = %bb.i.i
- br label %bb.i.i67
-
-bb.i.i67: ; preds = %bb.i.i67, %symbol_Init.exit
- br i1 undef, label %symbol_CreatePrecedence3522.exit, label %bb.i.i67
-
-symbol_CreatePrecedence3522.exit: ; preds = %bb.i.i67
- br label %bb.i.i8.i
-
-bb.i.i8.i: ; preds = %bb.i.i8.i, %symbol_CreatePrecedence3522.exit
- br i1 undef, label %cont_Create.exit9.i, label %bb.i.i8.i
-
-cont_Create.exit9.i: ; preds = %bb.i.i8.i
- br label %bb.i.i.i72
-
-bb.i.i.i72: ; preds = %bb.i.i.i72, %cont_Create.exit9.i
- br i1 undef, label %cont_Init.exit, label %bb.i.i.i72
-
-cont_Init.exit: ; preds = %bb.i.i.i72
- br label %bb.i103
-
-bb.i103: ; preds = %bb.i103, %cont_Init.exit
- br i1 undef, label %subs_Init.exit, label %bb.i103
-
-subs_Init.exit: ; preds = %bb.i103
- br i1 undef, label %bb1.i.i.i80, label %cc_Init.exit
-
-bb1.i.i.i80: ; preds = %subs_Init.exit
- unreachable
-
-cc_Init.exit: ; preds = %subs_Init.exit
- br label %bb.i.i375
-
-bb.i.i375: ; preds = %bb.i.i375, %cc_Init.exit
- br i1 undef, label %bb.i439, label %bb.i.i375
-
-bb.i439: ; preds = %bb.i439, %bb.i.i375
- br i1 undef, label %opts_DeclareSPASSFlagsAsOptions.exit, label %bb.i439
-
-opts_DeclareSPASSFlagsAsOptions.exit: ; preds = %bb.i439
- br i1 undef, label %opts_TranslateShortOptDeclarations.exit.i, label %bb.i.i82
-
-bb.i.i82: ; preds = %opts_DeclareSPASSFlagsAsOptions.exit
- unreachable
-
-opts_TranslateShortOptDeclarations.exit.i: ; preds = %opts_DeclareSPASSFlagsAsOptions.exit
- br i1 undef, label %list_Length.exit.i.thread.i, label %bb.i.i4.i
-
-list_Length.exit.i.thread.i: ; preds = %opts_TranslateShortOptDeclarations.exit.i
- br i1 undef, label %bb18.i.i.i, label %bb26.i.i.i
-
-bb.i.i4.i: ; preds = %opts_TranslateShortOptDeclarations.exit.i
- unreachable
-
-bb18.i.i.i: ; preds = %list_Length.exit.i.thread.i
- unreachable
-
-bb26.i.i.i: ; preds = %list_Length.exit.i.thread.i
- br i1 undef, label %bb27.i142, label %opts_GetOptLongOnly.exit.thread97.i
-
-opts_GetOptLongOnly.exit.thread97.i: ; preds = %bb26.i.i.i
- br label %bb27.i142
-
-bb27.i142: ; preds = %opts_GetOptLongOnly.exit.thread97.i, %bb26.i.i.i
- br label %bb1.i3.i
-
-bb1.i3.i: ; preds = %bb1.i3.i, %bb27.i142
- br i1 undef, label %opts_FreeLongOptsArray.exit.i, label %bb1.i3.i
-
-opts_FreeLongOptsArray.exit.i: ; preds = %bb1.i3.i
- br label %bb.i443
-
-bb.i443: ; preds = %bb.i443, %opts_FreeLongOptsArray.exit.i
- br i1 undef, label %flag_InitStoreByDefaults3542.exit, label %bb.i443
-
-flag_InitStoreByDefaults3542.exit: ; preds = %bb.i443
- br i1 undef, label %bb6.i449, label %bb.i503
-
-bb6.i449: ; preds = %flag_InitStoreByDefaults3542.exit
- unreachable
-
-bb.i503: ; preds = %bb.i503, %flag_InitStoreByDefaults3542.exit
- br i1 undef, label %flag_CleanStore3464.exit, label %bb.i503
-
-flag_CleanStore3464.exit: ; preds = %bb.i503
- br i1 undef, label %bb1.i81.i.preheader, label %bb.i173
-
-bb.i173: ; preds = %flag_CleanStore3464.exit
- unreachable
-
-bb1.i81.i.preheader: ; preds = %flag_CleanStore3464.exit
- br i1 undef, label %bb1.i64.i.preheader, label %bb5.i179
-
-bb5.i179: ; preds = %bb1.i81.i.preheader
- unreachable
-
-bb1.i64.i.preheader: ; preds = %bb1.i81.i.preheader
- br i1 undef, label %dfg_DeleteProofList.exit.i, label %bb.i9.i
-
-bb.i9.i: ; preds = %bb1.i64.i.preheader
- unreachable
-
-dfg_DeleteProofList.exit.i: ; preds = %bb1.i64.i.preheader
- br i1 undef, label %term_DeleteTermList621.exit.i, label %bb.i.i62.i
-
-bb.i.i62.i: ; preds = %bb.i.i62.i, %dfg_DeleteProofList.exit.i
- br i1 undef, label %term_DeleteTermList621.exit.i, label %bb.i.i62.i
-
-term_DeleteTermList621.exit.i: ; preds = %bb.i.i62.i, %dfg_DeleteProofList.exit.i
- br i1 undef, label %dfg_DFGParser.exit, label %bb.i.i211
-
-bb.i.i211: ; preds = %term_DeleteTermList621.exit.i
- unreachable
-
-dfg_DFGParser.exit: ; preds = %term_DeleteTermList621.exit.i
- br label %bb.i513
-
-bb.i513: ; preds = %bb2.i516, %dfg_DFGParser.exit
- br i1 undef, label %bb2.i516, label %bb1.i514
-
-bb1.i514: ; preds = %bb.i513
- unreachable
-
-bb2.i516: ; preds = %bb.i513
- br i1 undef, label %bb.i509, label %bb.i513
-
-bb.i509: ; preds = %bb.i509, %bb2.i516
- br i1 undef, label %symbol_TransferPrecedence3468.exit511, label %bb.i509
-
-symbol_TransferPrecedence3468.exit511: ; preds = %bb.i509
- br i1 undef, label %bb20, label %bb21
-
-bb20: ; preds = %symbol_TransferPrecedence3468.exit511
- unreachable
-
-bb21: ; preds = %symbol_TransferPrecedence3468.exit511
- br i1 undef, label %cnf_Init.exit, label %bb.i498
-
-bb.i498: ; preds = %bb21
- unreachable
-
-cnf_Init.exit: ; preds = %bb21
- br i1 undef, label %bb23, label %bb22
-
-bb22: ; preds = %cnf_Init.exit
- br i1 undef, label %bb2.i.i496, label %bb.i.i494
-
-bb.i.i494: ; preds = %bb22
- unreachable
-
-bb2.i.i496: ; preds = %bb22
- unreachable
-
-bb23: ; preds = %cnf_Init.exit
- br i1 undef, label %bb28, label %bb24
-
-bb24: ; preds = %bb23
- unreachable
-
-bb28: ; preds = %bb23
- br i1 undef, label %bb31, label %bb29
-
-bb29: ; preds = %bb28
- unreachable
-
-bb31: ; preds = %bb28
- br i1 undef, label %bb34, label %bb32
-
-bb32: ; preds = %bb31
- unreachable
-
-bb34: ; preds = %bb31
- br i1 undef, label %bb83, label %bb66
-
-bb66: ; preds = %bb34
- unreachable
-
-bb83: ; preds = %bb34
- br i1 undef, label %bb2.i1668, label %bb.i1667
-
-bb.i1667: ; preds = %bb83
- unreachable
-
-bb2.i1668: ; preds = %bb83
- br i1 undef, label %bb5.i205, label %bb3.i204
-
-bb3.i204: ; preds = %bb2.i1668
- unreachable
-
-bb5.i205: ; preds = %bb2.i1668
- br i1 undef, label %bb.i206.i, label %ana_AnalyzeSortStructure.exit.i
-
-bb.i206.i: ; preds = %bb5.i205
- br i1 undef, label %bb1.i207.i, label %ana_AnalyzeSortStructure.exit.i
-
-bb1.i207.i: ; preds = %bb.i206.i
- br i1 undef, label %bb25.i1801.thread, label %bb.i1688
-
-bb.i1688: ; preds = %bb1.i207.i
- unreachable
-
-bb25.i1801.thread: ; preds = %bb1.i207.i
- unreachable
-
-ana_AnalyzeSortStructure.exit.i: ; preds = %bb.i206.i, %bb5.i205
- br i1 undef, label %bb7.i207, label %bb.i1806
-
-bb.i1806: ; preds = %ana_AnalyzeSortStructure.exit.i
- br i1 undef, label %bb2.i.i.i1811, label %bb.i.i.i1809
-
-bb.i.i.i1809: ; preds = %bb.i1806
- unreachable
-
-bb2.i.i.i1811: ; preds = %bb.i1806
- unreachable
-
-bb7.i207: ; preds = %ana_AnalyzeSortStructure.exit.i
- br i1 undef, label %bb9.i, label %bb8.i
-
-bb8.i: ; preds = %bb7.i207
- unreachable
-
-bb9.i: ; preds = %bb7.i207
- br i1 undef, label %bb23.i, label %bb26.i
-
-bb23.i: ; preds = %bb9.i
- br i1 undef, label %bb25.i, label %bb24.i
-
-bb24.i: ; preds = %bb23.i
- br i1 undef, label %sort_SortTheoryIsTrivial.exit.i, label %bb.i2093
-
-bb.i2093: ; preds = %bb.i2093, %bb24.i
- br label %bb.i2093
-
-sort_SortTheoryIsTrivial.exit.i: ; preds = %bb24.i
- br i1 undef, label %bb3.i2141, label %bb4.i2143
-
-bb3.i2141: ; preds = %sort_SortTheoryIsTrivial.exit.i
- unreachable
-
-bb4.i2143: ; preds = %sort_SortTheoryIsTrivial.exit.i
- br i1 undef, label %bb8.i2178, label %bb5.i2144
-
-bb5.i2144: ; preds = %bb4.i2143
- br i1 undef, label %bb7.i2177, label %bb1.i28.i
-
-bb1.i28.i: ; preds = %bb5.i2144
- br i1 undef, label %bb4.i43.i, label %bb2.i.i2153
-
-bb2.i.i2153: ; preds = %bb1.i28.i
- br i1 undef, label %bb4.i.i33.i, label %bb.i.i30.i
-
-bb.i.i30.i: ; preds = %bb2.i.i2153
- unreachable
-
-bb4.i.i33.i: ; preds = %bb2.i.i2153
- br i1 undef, label %bb9.i.i36.i, label %bb5.i.i34.i
-
-bb5.i.i34.i: ; preds = %bb4.i.i33.i
- unreachable
-
-bb9.i.i36.i: ; preds = %bb4.i.i33.i
- br i1 undef, label %bb14.i.i.i2163, label %bb10.i.i37.i
-
-bb10.i.i37.i: ; preds = %bb9.i.i36.i
- unreachable
-
-bb14.i.i.i2163: ; preds = %bb9.i.i36.i
- br i1 undef, label %sort_LinkPrint.exit.i.i, label %bb15.i.i.i2164
-
-bb15.i.i.i2164: ; preds = %bb14.i.i.i2163
- unreachable
-
-sort_LinkPrint.exit.i.i: ; preds = %bb14.i.i.i2163
- unreachable
-
-bb4.i43.i: ; preds = %bb1.i28.i
- unreachable
-
-bb7.i2177: ; preds = %bb5.i2144
- unreachable
-
-bb8.i2178: ; preds = %bb4.i2143
- br i1 undef, label %sort_ApproxStaticSortTheory.exit, label %bb.i5.i2185.preheader
-
-bb.i5.i2185.preheader: ; preds = %bb8.i2178
- br label %bb.i5.i2185
-
-bb.i5.i2185: ; preds = %bb.i5.i2185, %bb.i5.i2185.preheader
- br i1 undef, label %sort_ApproxStaticSortTheory.exit, label %bb.i5.i2185
-
-sort_ApproxStaticSortTheory.exit: ; preds = %bb.i5.i2185, %bb8.i2178
- br label %bb25.i
-
-bb25.i: ; preds = %sort_ApproxStaticSortTheory.exit, %bb23.i
- unreachable
-
-bb26.i: ; preds = %bb9.i
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-19-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-19-RegScavengerAssert.ll
deleted file mode 100644
index b56b684..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-19-RegScavengerAssert.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-eabi -mattr=+vfp2 -float-abi=hard
-; PR4419
-
-define float @__ieee754_acosf(float %x) nounwind {
-entry:
- br i1 undef, label %bb, label %bb4
-
-bb: ; preds = %entry
- ret float undef
-
-bb4: ; preds = %entry
- br i1 undef, label %bb5, label %bb6
-
-bb5: ; preds = %bb4
- ret float undef
-
-bb6: ; preds = %bb4
- br i1 undef, label %bb11, label %bb12
-
-bb11: ; preds = %bb6
- %0 = tail call float @__ieee754_sqrtf(float undef) nounwind ; <float> [#uses=1]
- %1 = fmul float %0, -2.000000e+00 ; <float> [#uses=1]
- %2 = fadd float %1, 0x400921FB40000000 ; <float> [#uses=1]
- ret float %2
-
-bb12: ; preds = %bb6
- ret float undef
-}
-
-declare float @__ieee754_sqrtf(float)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll
deleted file mode 100644
index e068be7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin
-
- %struct.rtunion = type { i64 }
- %struct.rtx_def = type { i16, i8, i8, [1 x %struct.rtunion] }
-
-define arm_apcscc void @simplify_unary_real(i8* nocapture %p) nounwind {
-entry:
- %tmp121 = load i64* null, align 4 ; <i64> [#uses=1]
- %0 = getelementptr %struct.rtx_def* null, i32 0, i32 3, i32 3, i32 0 ; <i64*> [#uses=1]
- %tmp122 = load i64* %0, align 4 ; <i64> [#uses=1]
- %1 = zext i64 undef to i192 ; <i192> [#uses=2]
- %2 = zext i64 %tmp121 to i192 ; <i192> [#uses=1]
- %3 = shl i192 %2, 64 ; <i192> [#uses=2]
- %4 = zext i64 %tmp122 to i192 ; <i192> [#uses=1]
- %5 = shl i192 %4, 128 ; <i192> [#uses=1]
- %6 = or i192 %3, %1 ; <i192> [#uses=1]
- %7 = or i192 %6, %5 ; <i192> [#uses=2]
- switch i32 undef, label %bb82 [
- i32 77, label %bb38
- i32 129, label %bb21
- i32 130, label %bb20
- ]
-
-bb20: ; preds = %entry
- ret void
-
-bb21: ; preds = %entry
- br i1 undef, label %bb82, label %bb29
-
-bb29: ; preds = %bb21
- %tmp18.i = and i192 %3, 1208907372870555465154560 ; <i192> [#uses=1]
- %mask.i = or i192 %tmp18.i, %1 ; <i192> [#uses=1]
- %mask41.i = or i192 %mask.i, 0 ; <i192> [#uses=1]
- br label %bb82
-
-bb38: ; preds = %entry
- br label %bb82
-
-bb82: ; preds = %bb38, %bb29, %bb21, %entry
- %d.0 = phi i192 [ %mask41.i, %bb29 ], [ undef, %bb38 ], [ %7, %entry ], [ %7, %bb21 ] ; <i192> [#uses=1]
- %tmp51 = trunc i192 %d.0 to i64 ; <i64> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll
deleted file mode 100644
index 17efe00..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll
+++ /dev/null
@@ -1,122 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=armv6-apple-darwin9
-
- at nn = external global i32 ; <i32*> [#uses=1]
- at al_len = external global i32 ; <i32*> [#uses=2]
- at no_mat = external global i32 ; <i32*> [#uses=2]
- at no_mis = external global i32 ; <i32*> [#uses=2]
-@"\01LC12" = external constant [29 x i8], align 1 ; <[29 x i8]*> [#uses=1]
-@"\01LC16" = external constant [33 x i8], align 1 ; <[33 x i8]*> [#uses=1]
-@"\01LC17" = external constant [47 x i8], align 1 ; <[47 x i8]*> [#uses=1]
-
-declare arm_apcscc i32 @printf(i8* nocapture, ...) nounwind
-
-declare arm_apcscc void @diff(i8*, i8*, i32, i32, i32, i32) nounwind
-
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
-entry:
- br i1 undef, label %bb5, label %bb
-
-bb: ; preds = %bb, %entry
- br label %bb
-
-bb5: ; preds = %entry
- br i1 undef, label %bb6, label %bb8
-
-bb6: ; preds = %bb6, %bb5
- br i1 undef, label %bb8, label %bb6
-
-bb8: ; preds = %bb6, %bb5
- br label %bb15
-
-bb9: ; preds = %bb15
- br i1 undef, label %bb10, label %bb11
-
-bb10: ; preds = %bb9
- unreachable
-
-bb11: ; preds = %bb9
- %0 = load i32* undef, align 4 ; <i32> [#uses=2]
- %1 = add i32 %0, 1 ; <i32> [#uses=2]
- store i32 %1, i32* undef, align 4
- %2 = load i32* undef, align 4 ; <i32> [#uses=1]
- store i32 %2, i32* @nn, align 4
- store i32 0, i32* @al_len, align 4
- store i32 0, i32* @no_mat, align 4
- store i32 0, i32* @no_mis, align 4
- %3 = getelementptr i8* %B, i32 %0 ; <i8*> [#uses=1]
- tail call arm_apcscc void @diff(i8* undef, i8* %3, i32 undef, i32 undef, i32 undef, i32 undef) nounwind
- %4 = sitofp i32 undef to double ; <double> [#uses=1]
- %5 = fdiv double %4, 1.000000e+01 ; <double> [#uses=1]
- %6 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([29 x i8]* @"\01LC12", i32 0, i32 0), double %5) nounwind ; <i32> [#uses=0]
- %7 = load i32* @al_len, align 4 ; <i32> [#uses=1]
- %8 = load i32* @no_mat, align 4 ; <i32> [#uses=1]
- %9 = load i32* @no_mis, align 4 ; <i32> [#uses=1]
- %10 = sub i32 %7, %8 ; <i32> [#uses=1]
- %11 = sub i32 %10, %9 ; <i32> [#uses=1]
- %12 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC16", i32 0, i32 0), i32 %11) nounwind ; <i32> [#uses=0]
- %13 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([47 x i8]* @"\01LC17", i32 0, i32 0), i32 undef, i32 %1, i32 undef, i32 undef) nounwind ; <i32> [#uses=0]
- br i1 undef, label %bb15, label %bb12
-
-bb12: ; preds = %bb11
- br label %bb228.i
-
-bb74.i: ; preds = %bb228.i
- br i1 undef, label %bb138.i, label %bb145.i
-
-bb138.i: ; preds = %bb74.i
- br label %bb145.i
-
-bb145.i: ; preds = %bb228.i, %bb138.i, %bb74.i
- br i1 undef, label %bb146.i, label %bb151.i
-
-bb146.i: ; preds = %bb145.i
- br i1 undef, label %bb228.i, label %bb151.i
-
-bb151.i: ; preds = %bb146.i, %bb145.i
- br i1 undef, label %bb153.i, label %bb228.i
-
-bb153.i: ; preds = %bb151.i
- br i1 undef, label %bb220.i, label %bb.nph.i98
-
-bb.nph.i98: ; preds = %bb153.i
- br label %bb158.i
-
-bb158.i: ; preds = %bb218.i, %bb.nph.i98
- br i1 undef, label %bb168.i, label %bb160.i
-
-bb160.i: ; preds = %bb158.i
- br i1 undef, label %bb161.i, label %bb168.i
-
-bb161.i: ; preds = %bb160.i
- br i1 undef, label %bb168.i, label %bb163.i
-
-bb163.i: ; preds = %bb161.i
- br i1 undef, label %bb167.i, label %bb168.i
-
-bb167.i: ; preds = %bb163.i
- br label %bb168.i
-
-bb168.i: ; preds = %bb167.i, %bb163.i, %bb161.i, %bb160.i, %bb158.i
- br i1 undef, label %bb211.i, label %bb218.i
-
-bb211.i: ; preds = %bb168.i
- br label %bb218.i
-
-bb218.i: ; preds = %bb211.i, %bb168.i
- br i1 undef, label %bb220.i, label %bb158.i
-
-bb220.i: ; preds = %bb218.i, %bb153.i
- br i1 undef, label %bb221.i, label %bb228.i
-
-bb221.i: ; preds = %bb220.i
- br label %bb228.i
-
-bb228.i: ; preds = %bb221.i, %bb220.i, %bb151.i, %bb146.i, %bb12
- br i1 undef, label %bb74.i, label %bb145.i
-
-bb15: ; preds = %bb11, %bb8
- br i1 undef, label %return, label %bb9
-
-return: ; preds = %bb15
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll
deleted file mode 100644
index f520be3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll
+++ /dev/null
@@ -1,116 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=armv6-apple-darwin9
-
- at no_mat = external global i32 ; <i32*> [#uses=1]
- at no_mis = external global i32 ; <i32*> [#uses=2]
-@"\01LC11" = external constant [33 x i8], align 1 ; <[33 x i8]*> [#uses=1]
-@"\01LC15" = external constant [33 x i8], align 1 ; <[33 x i8]*> [#uses=1]
-@"\01LC17" = external constant [47 x i8], align 1 ; <[47 x i8]*> [#uses=1]
-
-declare arm_apcscc i32 @printf(i8* nocapture, ...) nounwind
-
-declare arm_apcscc void @diff(i8*, i8*, i32, i32, i32, i32) nounwind
-
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
-entry:
- br i1 undef, label %bb5, label %bb
-
-bb: ; preds = %bb, %entry
- br label %bb
-
-bb5: ; preds = %entry
- br i1 undef, label %bb6, label %bb8
-
-bb6: ; preds = %bb6, %bb5
- br i1 undef, label %bb8, label %bb6
-
-bb8: ; preds = %bb6, %bb5
- br label %bb15
-
-bb9: ; preds = %bb15
- br i1 undef, label %bb10, label %bb11
-
-bb10: ; preds = %bb9
- unreachable
-
-bb11: ; preds = %bb9
- %0 = load i32* undef, align 4 ; <i32> [#uses=3]
- %1 = add i32 %0, 1 ; <i32> [#uses=2]
- store i32 %1, i32* undef, align 4
- %2 = load i32* undef, align 4 ; <i32> [#uses=2]
- %3 = sub i32 %2, %0 ; <i32> [#uses=1]
- store i32 0, i32* @no_mat, align 4
- store i32 0, i32* @no_mis, align 4
- %4 = getelementptr i8* %B, i32 %0 ; <i8*> [#uses=1]
- tail call arm_apcscc void @diff(i8* undef, i8* %4, i32 undef, i32 %3, i32 undef, i32 undef) nounwind
- %5 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC11", i32 0, i32 0), i32 %tmp13) nounwind ; <i32> [#uses=0]
- %6 = load i32* @no_mis, align 4 ; <i32> [#uses=1]
- %7 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC15", i32 0, i32 0), i32 %6) nounwind ; <i32> [#uses=0]
- %8 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([47 x i8]* @"\01LC17", i32 0, i32 0), i32 undef, i32 %1, i32 undef, i32 %2) nounwind ; <i32> [#uses=0]
- br i1 undef, label %bb15, label %bb12
-
-bb12: ; preds = %bb11
- br label %bb228.i
-
-bb74.i: ; preds = %bb228.i
- br i1 undef, label %bb138.i, label %bb145.i
-
-bb138.i: ; preds = %bb74.i
- br label %bb145.i
-
-bb145.i: ; preds = %bb228.i, %bb138.i, %bb74.i
- br i1 undef, label %bb146.i, label %bb151.i
-
-bb146.i: ; preds = %bb145.i
- br i1 undef, label %bb228.i, label %bb151.i
-
-bb151.i: ; preds = %bb146.i, %bb145.i
- br i1 undef, label %bb153.i, label %bb228.i
-
-bb153.i: ; preds = %bb151.i
- br i1 undef, label %bb220.i, label %bb.nph.i98
-
-bb.nph.i98: ; preds = %bb153.i
- br label %bb158.i
-
-bb158.i: ; preds = %bb218.i, %bb.nph.i98
- br i1 undef, label %bb168.i, label %bb160.i
-
-bb160.i: ; preds = %bb158.i
- br i1 undef, label %bb161.i, label %bb168.i
-
-bb161.i: ; preds = %bb160.i
- br i1 undef, label %bb168.i, label %bb163.i
-
-bb163.i: ; preds = %bb161.i
- br i1 undef, label %bb167.i, label %bb168.i
-
-bb167.i: ; preds = %bb163.i
- br label %bb168.i
-
-bb168.i: ; preds = %bb167.i, %bb163.i, %bb161.i, %bb160.i, %bb158.i
- br i1 undef, label %bb211.i, label %bb218.i
-
-bb211.i: ; preds = %bb168.i
- br label %bb218.i
-
-bb218.i: ; preds = %bb211.i, %bb168.i
- br i1 undef, label %bb220.i, label %bb158.i
-
-bb220.i: ; preds = %bb218.i, %bb153.i
- br i1 undef, label %bb221.i, label %bb228.i
-
-bb221.i: ; preds = %bb220.i
- br label %bb228.i
-
-bb228.i: ; preds = %bb221.i, %bb220.i, %bb151.i, %bb146.i, %bb12
- br i1 undef, label %bb74.i, label %bb145.i
-
-bb15: ; preds = %bb11, %bb8
- %indvar11 = phi i32 [ 0, %bb8 ], [ %tmp13, %bb11 ] ; <i32> [#uses=2]
- %tmp13 = add i32 %indvar11, 1 ; <i32> [#uses=2]
- %count.0 = sub i32 undef, %indvar11 ; <i32> [#uses=0]
- br i1 undef, label %return, label %bb9
-
-return: ; preds = %bb15
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll
deleted file mode 100644
index eee6ff9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll
+++ /dev/null
@@ -1,128 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=armv6-apple-darwin9
-
- at JJ = external global i32* ; <i32**> [#uses=1]
-
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
-entry:
- br i1 undef, label %bb5, label %bb
-
-bb: ; preds = %bb, %entry
- br label %bb
-
-bb5: ; preds = %entry
- br i1 undef, label %bb6, label %bb8
-
-bb6: ; preds = %bb6, %bb5
- br i1 undef, label %bb8, label %bb6
-
-bb8: ; preds = %bb6, %bb5
- br label %bb15
-
-bb9: ; preds = %bb15
- br i1 undef, label %bb10, label %bb11
-
-bb10: ; preds = %bb9
- unreachable
-
-bb11: ; preds = %bb9
- br i1 undef, label %bb15, label %bb12
-
-bb12: ; preds = %bb11
- %0 = load i32** @JJ, align 4 ; <i32*> [#uses=1]
- br label %bb228.i
-
-bb74.i: ; preds = %bb228.i
- br i1 undef, label %bb138.i, label %bb145.i
-
-bb138.i: ; preds = %bb74.i
- br label %bb145.i
-
-bb145.i: ; preds = %bb228.i, %bb138.i, %bb74.i
- %cflag.0.i = phi i16 [ 0, %bb228.i ], [ 0, %bb74.i ], [ 1, %bb138.i ] ; <i16> [#uses=1]
- br i1 undef, label %bb146.i, label %bb151.i
-
-bb146.i: ; preds = %bb145.i
- br i1 undef, label %bb228.i, label %bb151.i
-
-bb151.i: ; preds = %bb146.i, %bb145.i
- %.not297 = icmp ne i16 %cflag.0.i, 0 ; <i1> [#uses=1]
- %or.cond298 = and i1 undef, %.not297 ; <i1> [#uses=1]
- br i1 %or.cond298, label %bb153.i, label %bb228.i
-
-bb153.i: ; preds = %bb151.i
- br i1 undef, label %bb220.i, label %bb.nph.i98
-
-bb.nph.i98: ; preds = %bb153.i
- br label %bb158.i
-
-bb158.i: ; preds = %bb218.i, %bb.nph.i98
- %c.1020.i = phi i32 [ 0, %bb.nph.i98 ], [ %c.14.i, %bb218.i ] ; <i32> [#uses=1]
- %cflag.418.i = phi i16 [ 0, %bb.nph.i98 ], [ %cflag.3.i, %bb218.i ] ; <i16> [#uses=1]
- %pj.317.i = phi i32 [ undef, %bb.nph.i98 ], [ %8, %bb218.i ] ; <i32> [#uses=1]
- %pi.316.i = phi i32 [ undef, %bb.nph.i98 ], [ %7, %bb218.i ] ; <i32> [#uses=1]
- %fj.515.i = phi i32 [ undef, %bb.nph.i98 ], [ %fj.4.i, %bb218.i ] ; <i32> [#uses=3]
- %ci.910.i = phi i32 [ undef, %bb.nph.i98 ], [ %ci.12.i, %bb218.i ] ; <i32> [#uses=2]
- %i.121.i = sub i32 undef, undef ; <i32> [#uses=3]
- %tmp105.i = sub i32 undef, undef ; <i32> [#uses=1]
- %1 = sub i32 %c.1020.i, undef ; <i32> [#uses=0]
- br i1 undef, label %bb168.i, label %bb160.i
-
-bb160.i: ; preds = %bb158.i
- br i1 undef, label %bb161.i, label %bb168.i
-
-bb161.i: ; preds = %bb160.i
- br i1 undef, label %bb168.i, label %bb163.i
-
-bb163.i: ; preds = %bb161.i
- %2 = icmp slt i32 %fj.515.i, undef ; <i1> [#uses=1]
- %3 = and i1 %2, undef ; <i1> [#uses=1]
- br i1 %3, label %bb167.i, label %bb168.i
-
-bb167.i: ; preds = %bb163.i
- br label %bb168.i
-
-bb168.i: ; preds = %bb167.i, %bb163.i, %bb161.i, %bb160.i, %bb158.i
- %fi.5.i = phi i32 [ undef, %bb167.i ], [ %ci.910.i, %bb158.i ], [ undef, %bb160.i ], [ %ci.910.i, %bb161.i ], [ undef, %bb163.i ] ; <i32> [#uses=1]
- %fj.4.i = phi i32 [ undef, %bb167.i ], [ undef, %bb158.i ], [ %fj.515.i, %bb160.i ], [ undef, %bb161.i ], [ %fj.515.i, %bb163.i ] ; <i32> [#uses=2]
- %scevgep88.i = getelementptr i32* null, i32 %i.121.i ; <i32*> [#uses=3]
- %4 = load i32* %scevgep88.i, align 4 ; <i32> [#uses=2]
- %scevgep89.i = getelementptr i32* %0, i32 %i.121.i ; <i32*> [#uses=3]
- %5 = load i32* %scevgep89.i, align 4 ; <i32> [#uses=1]
- %ci.10.i = select i1 undef, i32 %pi.316.i, i32 %i.121.i ; <i32> [#uses=0]
- %cj.9.i = select i1 undef, i32 %pj.317.i, i32 undef ; <i32> [#uses=0]
- %6 = icmp slt i32 undef, 0 ; <i1> [#uses=3]
- %ci.12.i = select i1 %6, i32 %fi.5.i, i32 %4 ; <i32> [#uses=2]
- %cj.11.i100 = select i1 %6, i32 %fj.4.i, i32 %5 ; <i32> [#uses=1]
- %c.14.i = select i1 %6, i32 0, i32 undef ; <i32> [#uses=2]
- store i32 %c.14.i, i32* undef, align 4
- %7 = load i32* %scevgep88.i, align 4 ; <i32> [#uses=1]
- %8 = load i32* %scevgep89.i, align 4 ; <i32> [#uses=1]
- store i32 %ci.12.i, i32* %scevgep88.i, align 4
- store i32 %cj.11.i100, i32* %scevgep89.i, align 4
- store i32 %4, i32* undef, align 4
- br i1 undef, label %bb211.i, label %bb218.i
-
-bb211.i: ; preds = %bb168.i
- br label %bb218.i
-
-bb218.i: ; preds = %bb211.i, %bb168.i
- %cflag.3.i = phi i16 [ %cflag.418.i, %bb168.i ], [ 1, %bb211.i ] ; <i16> [#uses=2]
- %9 = icmp slt i32 %tmp105.i, undef ; <i1> [#uses=1]
- br i1 %9, label %bb220.i, label %bb158.i
-
-bb220.i: ; preds = %bb218.i, %bb153.i
- %cflag.4.lcssa.i = phi i16 [ 0, %bb153.i ], [ %cflag.3.i, %bb218.i ] ; <i16> [#uses=0]
- br i1 undef, label %bb221.i, label %bb228.i
-
-bb221.i: ; preds = %bb220.i
- br label %bb228.i
-
-bb228.i: ; preds = %bb221.i, %bb220.i, %bb151.i, %bb146.i, %bb12
- br i1 undef, label %bb74.i, label %bb145.i
-
-bb15: ; preds = %bb11, %bb8
- br i1 undef, label %return, label %bb9
-
-return: ; preds = %bb15
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll
deleted file mode 100644
index 93c92b1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll
+++ /dev/null
@@ -1,128 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=armv6-apple-darwin9
-
- at r = external global i32 ; <i32*> [#uses=1]
- at qr = external global i32 ; <i32*> [#uses=1]
- at II = external global i32* ; <i32**> [#uses=1]
- at no_mis = external global i32 ; <i32*> [#uses=1]
- at name1 = external global i8* ; <i8**> [#uses=1]
-
-declare arm_apcscc void @diff(i8*, i8*, i32, i32, i32, i32) nounwind
-
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
-entry:
- br i1 undef, label %bb5, label %bb
-
-bb: ; preds = %bb, %entry
- br label %bb
-
-bb5: ; preds = %entry
- br i1 undef, label %bb6, label %bb8
-
-bb6: ; preds = %bb6, %bb5
- br i1 undef, label %bb8, label %bb6
-
-bb8: ; preds = %bb6, %bb5
- %0 = load i8** @name1, align 4 ; <i8*> [#uses=0]
- br label %bb15
-
-bb9: ; preds = %bb15
- br i1 undef, label %bb10, label %bb11
-
-bb10: ; preds = %bb9
- unreachable
-
-bb11: ; preds = %bb9
- store i32 0, i32* @no_mis, align 4
- %1 = getelementptr i8* %A, i32 0 ; <i8*> [#uses=1]
- %2 = getelementptr i8* %B, i32 0 ; <i8*> [#uses=1]
- tail call arm_apcscc void @diff(i8* %1, i8* %2, i32 undef, i32 undef, i32 undef, i32 undef) nounwind
- br i1 undef, label %bb15, label %bb12
-
-bb12: ; preds = %bb11
- %3 = load i32** @II, align 4 ; <i32*> [#uses=1]
- %4 = load i32* @r, align 4 ; <i32> [#uses=1]
- %5 = load i32* @qr, align 4 ; <i32> [#uses=1]
- br label %bb228.i
-
-bb74.i: ; preds = %bb228.i
- br i1 undef, label %bb138.i, label %bb145.i
-
-bb138.i: ; preds = %bb74.i
- br label %bb145.i
-
-bb145.i: ; preds = %bb228.i, %bb138.i, %bb74.i
- br i1 undef, label %bb146.i, label %bb151.i
-
-bb146.i: ; preds = %bb145.i
- br i1 undef, label %bb228.i, label %bb151.i
-
-bb151.i: ; preds = %bb146.i, %bb145.i
- br i1 undef, label %bb153.i, label %bb228.i
-
-bb153.i: ; preds = %bb151.i
- %6 = add i32 undef, -1 ; <i32> [#uses=3]
- br i1 undef, label %bb220.i, label %bb.nph.i98
-
-bb.nph.i98: ; preds = %bb153.i
- br label %bb158.i
-
-bb158.i: ; preds = %bb218.i, %bb.nph.i98
- %c.1020.i = phi i32 [ 0, %bb.nph.i98 ], [ %c.14.i, %bb218.i ] ; <i32> [#uses=1]
- %f.419.i = phi i32 [ undef, %bb.nph.i98 ], [ %f.5.i, %bb218.i ] ; <i32> [#uses=1]
- %pi.316.i = phi i32 [ undef, %bb.nph.i98 ], [ %10, %bb218.i ] ; <i32> [#uses=1]
- %fj.515.i = phi i32 [ %6, %bb.nph.i98 ], [ %fj.4.i, %bb218.i ] ; <i32> [#uses=2]
- %fi.614.i = phi i32 [ undef, %bb.nph.i98 ], [ %fi.5.i, %bb218.i ] ; <i32> [#uses=3]
- %cj.811.i = phi i32 [ %6, %bb.nph.i98 ], [ %cj.11.i100, %bb218.i ] ; <i32> [#uses=3]
- %ci.910.i = phi i32 [ undef, %bb.nph.i98 ], [ %ci.12.i, %bb218.i ] ; <i32> [#uses=2]
- %7 = sub i32 %f.419.i, %4 ; <i32> [#uses=5]
- %8 = sub i32 %c.1020.i, %5 ; <i32> [#uses=2]
- %9 = icmp slt i32 %7, %8 ; <i1> [#uses=1]
- br i1 %9, label %bb168.i, label %bb160.i
-
-bb160.i: ; preds = %bb158.i
- br i1 undef, label %bb161.i, label %bb168.i
-
-bb161.i: ; preds = %bb160.i
- br i1 undef, label %bb168.i, label %bb163.i
-
-bb163.i: ; preds = %bb161.i
- br i1 undef, label %bb167.i, label %bb168.i
-
-bb167.i: ; preds = %bb163.i
- br label %bb168.i
-
-bb168.i: ; preds = %bb167.i, %bb163.i, %bb161.i, %bb160.i, %bb158.i
- %fi.5.i = phi i32 [ %fi.614.i, %bb167.i ], [ %ci.910.i, %bb158.i ], [ %fi.614.i, %bb160.i ], [ %ci.910.i, %bb161.i ], [ %fi.614.i, %bb163.i ] ; <i32> [#uses=2]
- %fj.4.i = phi i32 [ %cj.811.i, %bb167.i ], [ %cj.811.i, %bb158.i ], [ %fj.515.i, %bb160.i ], [ %cj.811.i, %bb161.i ], [ %fj.515.i, %bb163.i ] ; <i32> [#uses=2]
- %f.5.i = phi i32 [ %7, %bb167.i ], [ %8, %bb158.i ], [ %7, %bb160.i ], [ %7, %bb161.i ], [ %7, %bb163.i ] ; <i32> [#uses=2]
- %scevgep88.i = getelementptr i32* %3, i32 undef ; <i32*> [#uses=1]
- %ci.10.i = select i1 undef, i32 %pi.316.i, i32 undef ; <i32> [#uses=0]
- %ci.12.i = select i1 undef, i32 %fi.5.i, i32 undef ; <i32> [#uses=1]
- %cj.11.i100 = select i1 undef, i32 %fj.4.i, i32 undef ; <i32> [#uses=1]
- %c.14.i = select i1 undef, i32 %f.5.i, i32 undef ; <i32> [#uses=1]
- %10 = load i32* %scevgep88.i, align 4 ; <i32> [#uses=1]
- br i1 undef, label %bb211.i, label %bb218.i
-
-bb211.i: ; preds = %bb168.i
- br label %bb218.i
-
-bb218.i: ; preds = %bb211.i, %bb168.i
- br i1 undef, label %bb220.i, label %bb158.i
-
-bb220.i: ; preds = %bb218.i, %bb153.i
- %11 = getelementptr i32* null, i32 %6 ; <i32*> [#uses=1]
- store i32 undef, i32* %11, align 4
- br i1 undef, label %bb221.i, label %bb228.i
-
-bb221.i: ; preds = %bb220.i
- br label %bb228.i
-
-bb228.i: ; preds = %bb221.i, %bb220.i, %bb151.i, %bb146.i, %bb12
- br i1 undef, label %bb74.i, label %bb145.i
-
-bb15: ; preds = %bb11, %bb8
- br i1 undef, label %return, label %bb9
-
-return: ; preds = %bb15
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll
deleted file mode 100644
index 277283d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll
+++ /dev/null
@@ -1,99 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=armv6-apple-darwin9
-
- at XX = external global i32* ; <i32**> [#uses=1]
-
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
-entry:
- br i1 undef, label %bb5, label %bb
-
-bb: ; preds = %bb, %entry
- br label %bb
-
-bb5: ; preds = %entry
- br i1 undef, label %bb6, label %bb8
-
-bb6: ; preds = %bb6, %bb5
- br i1 undef, label %bb8, label %bb6
-
-bb8: ; preds = %bb6, %bb5
- br label %bb15
-
-bb9: ; preds = %bb15
- br i1 undef, label %bb10, label %bb11
-
-bb10: ; preds = %bb9
- unreachable
-
-bb11: ; preds = %bb9
- br i1 undef, label %bb15, label %bb12
-
-bb12: ; preds = %bb11
- %0 = load i32** @XX, align 4 ; <i32*> [#uses=0]
- br label %bb228.i
-
-bb74.i: ; preds = %bb228.i
- br i1 undef, label %bb138.i, label %bb145.i
-
-bb138.i: ; preds = %bb74.i
- br label %bb145.i
-
-bb145.i: ; preds = %bb228.i, %bb138.i, %bb74.i
- br i1 undef, label %bb146.i, label %bb151.i
-
-bb146.i: ; preds = %bb145.i
- br i1 undef, label %bb228.i, label %bb151.i
-
-bb151.i: ; preds = %bb146.i, %bb145.i
- br i1 undef, label %bb153.i, label %bb228.i
-
-bb153.i: ; preds = %bb151.i
- br i1 undef, label %bb220.i, label %bb.nph.i98
-
-bb.nph.i98: ; preds = %bb153.i
- br label %bb158.i
-
-bb158.i: ; preds = %bb218.i, %bb.nph.i98
- %1 = sub i32 undef, undef ; <i32> [#uses=4]
- %2 = sub i32 undef, undef ; <i32> [#uses=1]
- br i1 undef, label %bb168.i, label %bb160.i
-
-bb160.i: ; preds = %bb158.i
- br i1 undef, label %bb161.i, label %bb168.i
-
-bb161.i: ; preds = %bb160.i
- br i1 undef, label %bb168.i, label %bb163.i
-
-bb163.i: ; preds = %bb161.i
- br i1 undef, label %bb167.i, label %bb168.i
-
-bb167.i: ; preds = %bb163.i
- br label %bb168.i
-
-bb168.i: ; preds = %bb167.i, %bb163.i, %bb161.i, %bb160.i, %bb158.i
- %f.5.i = phi i32 [ %1, %bb167.i ], [ %2, %bb158.i ], [ %1, %bb160.i ], [ %1, %bb161.i ], [ %1, %bb163.i ] ; <i32> [#uses=1]
- %c.14.i = select i1 undef, i32 %f.5.i, i32 undef ; <i32> [#uses=1]
- store i32 %c.14.i, i32* undef, align 4
- store i32 undef, i32* null, align 4
- br i1 undef, label %bb211.i, label %bb218.i
-
-bb211.i: ; preds = %bb168.i
- br label %bb218.i
-
-bb218.i: ; preds = %bb211.i, %bb168.i
- br i1 undef, label %bb220.i, label %bb158.i
-
-bb220.i: ; preds = %bb218.i, %bb153.i
- br i1 undef, label %bb221.i, label %bb228.i
-
-bb221.i: ; preds = %bb220.i
- br label %bb228.i
-
-bb228.i: ; preds = %bb221.i, %bb220.i, %bb151.i, %bb146.i, %bb12
- br i1 undef, label %bb74.i, label %bb145.i
-
-bb15: ; preds = %bb11, %bb8
- br i1 undef, label %return, label %bb9
-
-return: ; preds = %bb15
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-01-CommuteBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-01-CommuteBug.ll
deleted file mode 100644
index 5c0e5fa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-01-CommuteBug.ll
+++ /dev/null
@@ -1,130 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=armv6-apple-darwin9
-
- at qr = external global i32 ; <i32*> [#uses=1]
- at II = external global i32* ; <i32**> [#uses=1]
- at JJ = external global i32* ; <i32**> [#uses=1]
-
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
-entry:
- br i1 undef, label %bb5, label %bb
-
-bb: ; preds = %bb, %entry
- br label %bb
-
-bb5: ; preds = %entry
- br i1 undef, label %bb6, label %bb8
-
-bb6: ; preds = %bb6, %bb5
- br i1 undef, label %bb8, label %bb6
-
-bb8: ; preds = %bb6, %bb5
- br label %bb15
-
-bb9: ; preds = %bb15
- br i1 undef, label %bb10, label %bb11
-
-bb10: ; preds = %bb9
- unreachable
-
-bb11: ; preds = %bb9
- br i1 undef, label %bb15, label %bb12
-
-bb12: ; preds = %bb11
- %0 = load i32** @II, align 4 ; <i32*> [#uses=1]
- %1 = load i32** @JJ, align 4 ; <i32*> [#uses=1]
- %2 = load i32* @qr, align 4 ; <i32> [#uses=1]
- br label %bb228.i
-
-bb74.i: ; preds = %bb228.i
- br i1 undef, label %bb138.i, label %bb145.i
-
-bb138.i: ; preds = %bb74.i
- br label %bb145.i
-
-bb145.i: ; preds = %bb228.i, %bb138.i, %bb74.i
- %cflag.0.i = phi i16 [ %cflag.1.i, %bb228.i ], [ %cflag.1.i, %bb74.i ], [ 1, %bb138.i ] ; <i16> [#uses=2]
- br i1 undef, label %bb146.i, label %bb151.i
-
-bb146.i: ; preds = %bb145.i
- br i1 undef, label %bb228.i, label %bb151.i
-
-bb151.i: ; preds = %bb146.i, %bb145.i
- %.not297 = icmp ne i16 %cflag.0.i, 0 ; <i1> [#uses=1]
- %or.cond298 = and i1 undef, %.not297 ; <i1> [#uses=1]
- br i1 %or.cond298, label %bb153.i, label %bb228.i
-
-bb153.i: ; preds = %bb151.i
- br i1 undef, label %bb220.i, label %bb.nph.i98
-
-bb.nph.i98: ; preds = %bb153.i
- br label %bb158.i
-
-bb158.i: ; preds = %bb218.i, %bb.nph.i98
- %c.1020.i = phi i32 [ 0, %bb.nph.i98 ], [ %c.14.i, %bb218.i ] ; <i32> [#uses=1]
- %f.419.i = phi i32 [ undef, %bb.nph.i98 ], [ %f.5.i, %bb218.i ] ; <i32> [#uses=1]
- %cflag.418.i = phi i16 [ 0, %bb.nph.i98 ], [ %cflag.3.i, %bb218.i ] ; <i16> [#uses=1]
- %pj.317.i = phi i32 [ undef, %bb.nph.i98 ], [ %7, %bb218.i ] ; <i32> [#uses=1]
- %pi.316.i = phi i32 [ undef, %bb.nph.i98 ], [ %6, %bb218.i ] ; <i32> [#uses=1]
- %fj.515.i = phi i32 [ undef, %bb.nph.i98 ], [ %fj.4.i, %bb218.i ] ; <i32> [#uses=2]
- %fi.614.i = phi i32 [ undef, %bb.nph.i98 ], [ %fi.5.i, %bb218.i ] ; <i32> [#uses=3]
- %cj.811.i = phi i32 [ undef, %bb.nph.i98 ], [ %cj.11.i100, %bb218.i ] ; <i32> [#uses=3]
- %ci.910.i = phi i32 [ undef, %bb.nph.i98 ], [ %ci.12.i, %bb218.i ] ; <i32> [#uses=2]
- %3 = sub i32 %f.419.i, 0 ; <i32> [#uses=5]
- %4 = sub i32 %c.1020.i, %2 ; <i32> [#uses=2]
- %5 = icmp slt i32 %3, %4 ; <i1> [#uses=1]
- br i1 %5, label %bb168.i, label %bb160.i
-
-bb160.i: ; preds = %bb158.i
- br i1 undef, label %bb161.i, label %bb168.i
-
-bb161.i: ; preds = %bb160.i
- br i1 undef, label %bb168.i, label %bb163.i
-
-bb163.i: ; preds = %bb161.i
- br i1 undef, label %bb167.i, label %bb168.i
-
-bb167.i: ; preds = %bb163.i
- br label %bb168.i
-
-bb168.i: ; preds = %bb167.i, %bb163.i, %bb161.i, %bb160.i, %bb158.i
- %fi.5.i = phi i32 [ %fi.614.i, %bb167.i ], [ %ci.910.i, %bb158.i ], [ %fi.614.i, %bb160.i ], [ %ci.910.i, %bb161.i ], [ %fi.614.i, %bb163.i ] ; <i32> [#uses=2]
- %fj.4.i = phi i32 [ %cj.811.i, %bb167.i ], [ %cj.811.i, %bb158.i ], [ %fj.515.i, %bb160.i ], [ %cj.811.i, %bb161.i ], [ %fj.515.i, %bb163.i ] ; <i32> [#uses=2]
- %f.5.i = phi i32 [ %3, %bb167.i ], [ %4, %bb158.i ], [ %3, %bb160.i ], [ %3, %bb161.i ], [ %3, %bb163.i ] ; <i32> [#uses=2]
- %scevgep88.i = getelementptr i32* %0, i32 undef ; <i32*> [#uses=2]
- %scevgep89.i = getelementptr i32* %1, i32 undef ; <i32*> [#uses=2]
- %ci.10.i = select i1 undef, i32 %pi.316.i, i32 undef ; <i32> [#uses=0]
- %cj.9.i = select i1 undef, i32 %pj.317.i, i32 undef ; <i32> [#uses=0]
- %ci.12.i = select i1 undef, i32 %fi.5.i, i32 undef ; <i32> [#uses=2]
- %cj.11.i100 = select i1 undef, i32 %fj.4.i, i32 undef ; <i32> [#uses=2]
- %c.14.i = select i1 undef, i32 %f.5.i, i32 undef ; <i32> [#uses=1]
- %6 = load i32* %scevgep88.i, align 4 ; <i32> [#uses=1]
- %7 = load i32* %scevgep89.i, align 4 ; <i32> [#uses=1]
- store i32 %ci.12.i, i32* %scevgep88.i, align 4
- store i32 %cj.11.i100, i32* %scevgep89.i, align 4
- br i1 undef, label %bb211.i, label %bb218.i
-
-bb211.i: ; preds = %bb168.i
- br label %bb218.i
-
-bb218.i: ; preds = %bb211.i, %bb168.i
- %cflag.3.i = phi i16 [ %cflag.418.i, %bb168.i ], [ 1, %bb211.i ] ; <i16> [#uses=2]
- %8 = icmp slt i32 undef, undef ; <i1> [#uses=1]
- br i1 %8, label %bb220.i, label %bb158.i
-
-bb220.i: ; preds = %bb218.i, %bb153.i
- %cflag.4.lcssa.i = phi i16 [ 0, %bb153.i ], [ %cflag.3.i, %bb218.i ] ; <i16> [#uses=2]
- br i1 undef, label %bb221.i, label %bb228.i
-
-bb221.i: ; preds = %bb220.i
- br label %bb228.i
-
-bb228.i: ; preds = %bb221.i, %bb220.i, %bb151.i, %bb146.i, %bb12
- %cflag.1.i = phi i16 [ 0, %bb146.i ], [ %cflag.0.i, %bb151.i ], [ %cflag.4.lcssa.i, %bb220.i ], [ 1, %bb12 ], [ %cflag.4.lcssa.i, %bb221.i ] ; <i16> [#uses=2]
- br i1 false, label %bb74.i, label %bb145.i
-
-bb15: ; preds = %bb11, %bb8
- br i1 false, label %return, label %bb9
-
-return: ; preds = %bb15
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-09-asm-p-constraint.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-09-asm-p-constraint.ll
deleted file mode 100644
index e1e94b6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-09-asm-p-constraint.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6
-
-define void @test(i8* %x) nounwind {
-entry:
- call void asm sideeffect "pld\09${0:a}", "r,~{cc}"(i8* %x) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-18-RewriterBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-18-RewriterBug.ll
deleted file mode 100644
index 2b7ccd8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-18-RewriterBug.ll
+++ /dev/null
@@ -1,1323 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin10 -mattr=+vfp2 | grep vcmpe | count 13
-
- %struct.EDGE_PAIR = type { %struct.edge_rec*, %struct.edge_rec* }
- %struct.VEC2 = type { double, double, double }
- %struct.VERTEX = type { %struct.VEC2, %struct.VERTEX*, %struct.VERTEX* }
- %struct.edge_rec = type { %struct.VERTEX*, %struct.edge_rec*, i32, i8* }
- at avail_edge = internal global %struct.edge_rec* null ; <%struct.edge_rec**> [#uses=6]
- at _2E_str7 = internal constant [21 x i8] c"ERROR: Only 1 point!\00", section "__TEXT,__cstring,cstring_literals", align 1 ; <[21 x i8]*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (void (%struct.EDGE_PAIR*, %struct.VERTEX*, %struct.VERTEX*)* @build_delaunay to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define arm_apcscc void @build_delaunay(%struct.EDGE_PAIR* noalias nocapture sret %agg.result, %struct.VERTEX* %tree, %struct.VERTEX* %extra) nounwind {
-entry:
- %delright = alloca %struct.EDGE_PAIR, align 8 ; <%struct.EDGE_PAIR*> [#uses=3]
- %delleft = alloca %struct.EDGE_PAIR, align 8 ; <%struct.EDGE_PAIR*> [#uses=3]
- %0 = icmp eq %struct.VERTEX* %tree, null ; <i1> [#uses=1]
- br i1 %0, label %bb8, label %bb
-
-bb: ; preds = %entry
- %1 = getelementptr %struct.VERTEX* %tree, i32 0, i32 2 ; <%struct.VERTEX**> [#uses=1]
- %2 = load %struct.VERTEX** %1, align 4 ; <%struct.VERTEX*> [#uses=2]
- %3 = icmp eq %struct.VERTEX* %2, null ; <i1> [#uses=1]
- br i1 %3, label %bb7, label %bb1.i
-
-bb1.i: ; preds = %bb1.i, %bb
- %tree_addr.0.i = phi %struct.VERTEX* [ %5, %bb1.i ], [ %tree, %bb ] ; <%struct.VERTEX*> [#uses=3]
- %4 = getelementptr %struct.VERTEX* %tree_addr.0.i, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
- %5 = load %struct.VERTEX** %4, align 4 ; <%struct.VERTEX*> [#uses=2]
- %6 = icmp eq %struct.VERTEX* %5, null ; <i1> [#uses=1]
- br i1 %6, label %get_low.exit, label %bb1.i
-
-get_low.exit: ; preds = %bb1.i
- call arm_apcscc void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delright, %struct.VERTEX* %2, %struct.VERTEX* %extra) nounwind
- %7 = getelementptr %struct.VERTEX* %tree, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
- %8 = load %struct.VERTEX** %7, align 4 ; <%struct.VERTEX*> [#uses=1]
- call arm_apcscc void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delleft, %struct.VERTEX* %8, %struct.VERTEX* %tree) nounwind
- %9 = getelementptr %struct.EDGE_PAIR* %delleft, i32 0, i32 0 ; <%struct.edge_rec**> [#uses=1]
- %10 = load %struct.edge_rec** %9, align 8 ; <%struct.edge_rec*> [#uses=2]
- %11 = getelementptr %struct.EDGE_PAIR* %delleft, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %12 = load %struct.edge_rec** %11, align 4 ; <%struct.edge_rec*> [#uses=1]
- %13 = getelementptr %struct.EDGE_PAIR* %delright, i32 0, i32 0 ; <%struct.edge_rec**> [#uses=1]
- %14 = load %struct.edge_rec** %13, align 8 ; <%struct.edge_rec*> [#uses=1]
- %15 = getelementptr %struct.EDGE_PAIR* %delright, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %16 = load %struct.edge_rec** %15, align 4 ; <%struct.edge_rec*> [#uses=2]
- br label %bb.i
-
-bb.i: ; preds = %bb4.i, %get_low.exit
- %rdi_addr.0.i = phi %struct.edge_rec* [ %14, %get_low.exit ], [ %72, %bb4.i ] ; <%struct.edge_rec*> [#uses=2]
- %ldi_addr.1.i = phi %struct.edge_rec* [ %12, %get_low.exit ], [ %ldi_addr.0.i, %bb4.i ] ; <%struct.edge_rec*> [#uses=3]
- %17 = getelementptr %struct.edge_rec* %rdi_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %18 = load %struct.VERTEX** %17, align 4 ; <%struct.VERTEX*> [#uses=3]
- %19 = ptrtoint %struct.edge_rec* %ldi_addr.1.i to i32 ; <i32> [#uses=1]
- %20 = getelementptr %struct.VERTEX* %18, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %21 = load double* %20, align 4 ; <double> [#uses=3]
- %22 = getelementptr %struct.VERTEX* %18, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %23 = load double* %22, align 4 ; <double> [#uses=3]
- br label %bb2.i
-
-bb1.i1: ; preds = %bb2.i
- %24 = ptrtoint %struct.edge_rec* %ldi_addr.0.i to i32 ; <i32> [#uses=2]
- %25 = add i32 %24, 48 ; <i32> [#uses=1]
- %26 = and i32 %25, 63 ; <i32> [#uses=1]
- %27 = and i32 %24, -64 ; <i32> [#uses=1]
- %28 = or i32 %26, %27 ; <i32> [#uses=1]
- %29 = inttoptr i32 %28 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %30 = getelementptr %struct.edge_rec* %29, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %31 = load %struct.edge_rec** %30, align 4 ; <%struct.edge_rec*> [#uses=1]
- %32 = ptrtoint %struct.edge_rec* %31 to i32 ; <i32> [#uses=2]
- %33 = add i32 %32, 16 ; <i32> [#uses=1]
- %34 = and i32 %33, 63 ; <i32> [#uses=1]
- %35 = and i32 %32, -64 ; <i32> [#uses=1]
- %36 = or i32 %34, %35 ; <i32> [#uses=2]
- %37 = inttoptr i32 %36 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- br label %bb2.i
-
-bb2.i: ; preds = %bb1.i1, %bb.i
- %ldi_addr.1.pn.i = phi %struct.edge_rec* [ %ldi_addr.1.i, %bb.i ], [ %37, %bb1.i1 ] ; <%struct.edge_rec*> [#uses=1]
- %.pn6.in.in.i = phi i32 [ %19, %bb.i ], [ %36, %bb1.i1 ] ; <i32> [#uses=1]
- %ldi_addr.0.i = phi %struct.edge_rec* [ %ldi_addr.1.i, %bb.i ], [ %37, %bb1.i1 ] ; <%struct.edge_rec*> [#uses=4]
- %.pn6.in.i = xor i32 %.pn6.in.in.i, 32 ; <i32> [#uses=1]
- %.pn6.i = inttoptr i32 %.pn6.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %t1.0.in.i = getelementptr %struct.edge_rec* %ldi_addr.1.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %t2.0.in.i = getelementptr %struct.edge_rec* %.pn6.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %t1.0.i = load %struct.VERTEX** %t1.0.in.i ; <%struct.VERTEX*> [#uses=2]
- %t2.0.i = load %struct.VERTEX** %t2.0.in.i ; <%struct.VERTEX*> [#uses=2]
- %38 = getelementptr %struct.VERTEX* %t1.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %39 = load double* %38, align 4 ; <double> [#uses=3]
- %40 = getelementptr %struct.VERTEX* %t1.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %41 = load double* %40, align 4 ; <double> [#uses=3]
- %42 = getelementptr %struct.VERTEX* %t2.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %43 = load double* %42, align 4 ; <double> [#uses=1]
- %44 = getelementptr %struct.VERTEX* %t2.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %45 = load double* %44, align 4 ; <double> [#uses=1]
- %46 = fsub double %39, %21 ; <double> [#uses=1]
- %47 = fsub double %45, %23 ; <double> [#uses=1]
- %48 = fmul double %46, %47 ; <double> [#uses=1]
- %49 = fsub double %43, %21 ; <double> [#uses=1]
- %50 = fsub double %41, %23 ; <double> [#uses=1]
- %51 = fmul double %49, %50 ; <double> [#uses=1]
- %52 = fsub double %48, %51 ; <double> [#uses=1]
- %53 = fcmp ogt double %52, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %53, label %bb1.i1, label %bb3.i
-
-bb3.i: ; preds = %bb2.i
- %54 = ptrtoint %struct.edge_rec* %rdi_addr.0.i to i32 ; <i32> [#uses=1]
- %55 = xor i32 %54, 32 ; <i32> [#uses=3]
- %56 = inttoptr i32 %55 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %57 = getelementptr %struct.edge_rec* %56, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %58 = load %struct.VERTEX** %57, align 4 ; <%struct.VERTEX*> [#uses=2]
- %59 = getelementptr %struct.VERTEX* %58, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %60 = load double* %59, align 4 ; <double> [#uses=1]
- %61 = getelementptr %struct.VERTEX* %58, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %62 = load double* %61, align 4 ; <double> [#uses=1]
- %63 = fsub double %60, %39 ; <double> [#uses=1]
- %64 = fsub double %23, %41 ; <double> [#uses=1]
- %65 = fmul double %63, %64 ; <double> [#uses=1]
- %66 = fsub double %21, %39 ; <double> [#uses=1]
- %67 = fsub double %62, %41 ; <double> [#uses=1]
- %68 = fmul double %66, %67 ; <double> [#uses=1]
- %69 = fsub double %65, %68 ; <double> [#uses=1]
- %70 = fcmp ogt double %69, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %70, label %bb4.i, label %bb5.i
-
-bb4.i: ; preds = %bb3.i
- %71 = getelementptr %struct.edge_rec* %56, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %72 = load %struct.edge_rec** %71, align 4 ; <%struct.edge_rec*> [#uses=1]
- br label %bb.i
-
-bb5.i: ; preds = %bb3.i
- %73 = add i32 %55, 48 ; <i32> [#uses=1]
- %74 = and i32 %73, 63 ; <i32> [#uses=1]
- %75 = and i32 %55, -64 ; <i32> [#uses=1]
- %76 = or i32 %74, %75 ; <i32> [#uses=1]
- %77 = inttoptr i32 %76 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %78 = getelementptr %struct.edge_rec* %77, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %79 = load %struct.edge_rec** %78, align 4 ; <%struct.edge_rec*> [#uses=1]
- %80 = ptrtoint %struct.edge_rec* %79 to i32 ; <i32> [#uses=2]
- %81 = add i32 %80, 16 ; <i32> [#uses=1]
- %82 = and i32 %81, 63 ; <i32> [#uses=1]
- %83 = and i32 %80, -64 ; <i32> [#uses=1]
- %84 = or i32 %82, %83 ; <i32> [#uses=1]
- %85 = inttoptr i32 %84 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %86 = getelementptr %struct.edge_rec* %ldi_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %87 = load %struct.VERTEX** %86, align 4 ; <%struct.VERTEX*> [#uses=1]
- %88 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=6]
- %89 = getelementptr %struct.edge_rec* %88, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
- store %struct.edge_rec* %88, %struct.edge_rec** %89, align 4
- %90 = getelementptr %struct.edge_rec* %88, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=2]
- store %struct.VERTEX* %18, %struct.VERTEX** %90, align 4
- %91 = ptrtoint %struct.edge_rec* %88 to i32 ; <i32> [#uses=5]
- %92 = add i32 %91, 16 ; <i32> [#uses=2]
- %93 = inttoptr i32 %92 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %94 = add i32 %91, 48 ; <i32> [#uses=1]
- %95 = inttoptr i32 %94 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %96 = getelementptr %struct.edge_rec* %93, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %95, %struct.edge_rec** %96, align 4
- %97 = add i32 %91, 32 ; <i32> [#uses=1]
- %98 = inttoptr i32 %97 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %99 = getelementptr %struct.edge_rec* %98, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %98, %struct.edge_rec** %99, align 4
- %100 = getelementptr %struct.edge_rec* %98, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %87, %struct.VERTEX** %100, align 4
- %101 = getelementptr %struct.edge_rec* %95, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %93, %struct.edge_rec** %101, align 4
- %102 = load %struct.edge_rec** %89, align 4 ; <%struct.edge_rec*> [#uses=1]
- %103 = ptrtoint %struct.edge_rec* %102 to i32 ; <i32> [#uses=2]
- %104 = add i32 %103, 16 ; <i32> [#uses=1]
- %105 = and i32 %104, 63 ; <i32> [#uses=1]
- %106 = and i32 %103, -64 ; <i32> [#uses=1]
- %107 = or i32 %105, %106 ; <i32> [#uses=1]
- %108 = inttoptr i32 %107 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %109 = getelementptr %struct.edge_rec* %85, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %110 = load %struct.edge_rec** %109, align 4 ; <%struct.edge_rec*> [#uses=1]
- %111 = ptrtoint %struct.edge_rec* %110 to i32 ; <i32> [#uses=2]
- %112 = add i32 %111, 16 ; <i32> [#uses=1]
- %113 = and i32 %112, 63 ; <i32> [#uses=1]
- %114 = and i32 %111, -64 ; <i32> [#uses=1]
- %115 = or i32 %113, %114 ; <i32> [#uses=1]
- %116 = inttoptr i32 %115 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %117 = getelementptr %struct.edge_rec* %116, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %118 = load %struct.edge_rec** %117, align 4 ; <%struct.edge_rec*> [#uses=1]
- %119 = getelementptr %struct.edge_rec* %108, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %120 = load %struct.edge_rec** %119, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %118, %struct.edge_rec** %119, align 4
- store %struct.edge_rec* %120, %struct.edge_rec** %117, align 4
- %121 = load %struct.edge_rec** %89, align 4 ; <%struct.edge_rec*> [#uses=1]
- %122 = load %struct.edge_rec** %109, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %121, %struct.edge_rec** %109, align 4
- store %struct.edge_rec* %122, %struct.edge_rec** %89, align 4
- %123 = xor i32 %91, 32 ; <i32> [#uses=1]
- %124 = inttoptr i32 %123 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %125 = getelementptr %struct.edge_rec* %124, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %126 = load %struct.edge_rec** %125, align 4 ; <%struct.edge_rec*> [#uses=1]
- %127 = ptrtoint %struct.edge_rec* %126 to i32 ; <i32> [#uses=2]
- %128 = add i32 %127, 16 ; <i32> [#uses=1]
- %129 = and i32 %128, 63 ; <i32> [#uses=1]
- %130 = and i32 %127, -64 ; <i32> [#uses=1]
- %131 = or i32 %129, %130 ; <i32> [#uses=1]
- %132 = inttoptr i32 %131 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %133 = getelementptr %struct.edge_rec* %ldi_addr.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %134 = load %struct.edge_rec** %133, align 4 ; <%struct.edge_rec*> [#uses=1]
- %135 = ptrtoint %struct.edge_rec* %134 to i32 ; <i32> [#uses=2]
- %136 = add i32 %135, 16 ; <i32> [#uses=1]
- %137 = and i32 %136, 63 ; <i32> [#uses=1]
- %138 = and i32 %135, -64 ; <i32> [#uses=1]
- %139 = or i32 %137, %138 ; <i32> [#uses=1]
- %140 = inttoptr i32 %139 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %141 = getelementptr %struct.edge_rec* %140, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %142 = load %struct.edge_rec** %141, align 4 ; <%struct.edge_rec*> [#uses=1]
- %143 = getelementptr %struct.edge_rec* %132, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %144 = load %struct.edge_rec** %143, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %142, %struct.edge_rec** %143, align 4
- store %struct.edge_rec* %144, %struct.edge_rec** %141, align 4
- %145 = load %struct.edge_rec** %125, align 4 ; <%struct.edge_rec*> [#uses=1]
- %146 = load %struct.edge_rec** %133, align 4 ; <%struct.edge_rec*> [#uses=2]
- store %struct.edge_rec* %145, %struct.edge_rec** %133, align 4
- store %struct.edge_rec* %146, %struct.edge_rec** %125, align 4
- %147 = and i32 %92, 63 ; <i32> [#uses=1]
- %148 = and i32 %91, -64 ; <i32> [#uses=1]
- %149 = or i32 %147, %148 ; <i32> [#uses=1]
- %150 = inttoptr i32 %149 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %151 = getelementptr %struct.edge_rec* %150, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %152 = load %struct.edge_rec** %151, align 4 ; <%struct.edge_rec*> [#uses=1]
- %153 = ptrtoint %struct.edge_rec* %152 to i32 ; <i32> [#uses=2]
- %154 = add i32 %153, 16 ; <i32> [#uses=1]
- %155 = and i32 %154, 63 ; <i32> [#uses=1]
- %156 = and i32 %153, -64 ; <i32> [#uses=1]
- %157 = or i32 %155, %156 ; <i32> [#uses=1]
- %158 = inttoptr i32 %157 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %159 = load %struct.VERTEX** %90, align 4 ; <%struct.VERTEX*> [#uses=1]
- %160 = getelementptr %struct.edge_rec* %124, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %161 = load %struct.VERTEX** %160, align 4 ; <%struct.VERTEX*> [#uses=1]
- %162 = getelementptr %struct.edge_rec* %16, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %163 = load %struct.VERTEX** %162, align 4 ; <%struct.VERTEX*> [#uses=1]
- %164 = icmp eq %struct.VERTEX* %163, %159 ; <i1> [#uses=1]
- %rdo_addr.0.i = select i1 %164, %struct.edge_rec* %88, %struct.edge_rec* %16 ; <%struct.edge_rec*> [#uses=3]
- %165 = getelementptr %struct.edge_rec* %10, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %166 = load %struct.VERTEX** %165, align 4 ; <%struct.VERTEX*> [#uses=1]
- %167 = icmp eq %struct.VERTEX* %166, %161 ; <i1> [#uses=1]
- %ldo_addr.0.ph.i = select i1 %167, %struct.edge_rec* %124, %struct.edge_rec* %10 ; <%struct.edge_rec*> [#uses=3]
- br label %bb9.i
-
-bb9.i: ; preds = %bb25.i, %bb24.i, %bb5.i
- %lcand.2.i = phi %struct.edge_rec* [ %146, %bb5.i ], [ %lcand.1.i, %bb24.i ], [ %739, %bb25.i ] ; <%struct.edge_rec*> [#uses=5]
- %rcand.2.i = phi %struct.edge_rec* [ %158, %bb5.i ], [ %666, %bb24.i ], [ %rcand.1.i, %bb25.i ] ; <%struct.edge_rec*> [#uses=5]
- %basel.0.i = phi %struct.edge_rec* [ %88, %bb5.i ], [ %595, %bb24.i ], [ %716, %bb25.i ] ; <%struct.edge_rec*> [#uses=2]
- %168 = getelementptr %struct.edge_rec* %lcand.2.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %169 = load %struct.edge_rec** %168, align 4 ; <%struct.edge_rec*> [#uses=3]
- %170 = getelementptr %struct.edge_rec* %basel.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
- %171 = load %struct.VERTEX** %170, align 4 ; <%struct.VERTEX*> [#uses=4]
- %172 = ptrtoint %struct.edge_rec* %basel.0.i to i32 ; <i32> [#uses=3]
- %173 = xor i32 %172, 32 ; <i32> [#uses=1]
- %174 = inttoptr i32 %173 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %175 = getelementptr %struct.edge_rec* %174, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
- %176 = load %struct.VERTEX** %175, align 4 ; <%struct.VERTEX*> [#uses=3]
- %177 = ptrtoint %struct.edge_rec* %169 to i32 ; <i32> [#uses=1]
- %178 = xor i32 %177, 32 ; <i32> [#uses=1]
- %179 = inttoptr i32 %178 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %180 = getelementptr %struct.edge_rec* %179, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %181 = load %struct.VERTEX** %180, align 4 ; <%struct.VERTEX*> [#uses=2]
- %182 = getelementptr %struct.VERTEX* %171, i32 0, i32 0, i32 0 ; <double*> [#uses=2]
- %183 = load double* %182, align 4 ; <double> [#uses=2]
- %184 = getelementptr %struct.VERTEX* %171, i32 0, i32 0, i32 1 ; <double*> [#uses=2]
- %185 = load double* %184, align 4 ; <double> [#uses=2]
- %186 = getelementptr %struct.VERTEX* %181, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %187 = load double* %186, align 4 ; <double> [#uses=1]
- %188 = getelementptr %struct.VERTEX* %181, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %189 = load double* %188, align 4 ; <double> [#uses=1]
- %190 = getelementptr %struct.VERTEX* %176, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %191 = load double* %190, align 4 ; <double> [#uses=2]
- %192 = getelementptr %struct.VERTEX* %176, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %193 = load double* %192, align 4 ; <double> [#uses=2]
- %194 = fsub double %183, %191 ; <double> [#uses=1]
- %195 = fsub double %189, %193 ; <double> [#uses=1]
- %196 = fmul double %194, %195 ; <double> [#uses=1]
- %197 = fsub double %187, %191 ; <double> [#uses=1]
- %198 = fsub double %185, %193 ; <double> [#uses=1]
- %199 = fmul double %197, %198 ; <double> [#uses=1]
- %200 = fsub double %196, %199 ; <double> [#uses=1]
- %201 = fcmp ogt double %200, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %201, label %bb10.i, label %bb13.i
-
-bb10.i: ; preds = %bb9.i
- %202 = getelementptr %struct.VERTEX* %171, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %avail_edge.promoted25 = load %struct.edge_rec** @avail_edge ; <%struct.edge_rec*> [#uses=1]
- br label %bb12.i
-
-bb11.i: ; preds = %bb12.i
- %203 = ptrtoint %struct.edge_rec* %lcand.0.i to i32 ; <i32> [#uses=3]
- %204 = add i32 %203, 16 ; <i32> [#uses=1]
- %205 = and i32 %204, 63 ; <i32> [#uses=1]
- %206 = and i32 %203, -64 ; <i32> [#uses=3]
- %207 = or i32 %205, %206 ; <i32> [#uses=1]
- %208 = inttoptr i32 %207 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %209 = getelementptr %struct.edge_rec* %208, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %210 = load %struct.edge_rec** %209, align 4 ; <%struct.edge_rec*> [#uses=1]
- %211 = ptrtoint %struct.edge_rec* %210 to i32 ; <i32> [#uses=2]
- %212 = add i32 %211, 16 ; <i32> [#uses=1]
- %213 = and i32 %212, 63 ; <i32> [#uses=1]
- %214 = and i32 %211, -64 ; <i32> [#uses=1]
- %215 = or i32 %213, %214 ; <i32> [#uses=1]
- %216 = inttoptr i32 %215 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %217 = getelementptr %struct.edge_rec* %lcand.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %218 = load %struct.edge_rec** %217, align 4 ; <%struct.edge_rec*> [#uses=1]
- %219 = ptrtoint %struct.edge_rec* %218 to i32 ; <i32> [#uses=2]
- %220 = add i32 %219, 16 ; <i32> [#uses=1]
- %221 = and i32 %220, 63 ; <i32> [#uses=1]
- %222 = and i32 %219, -64 ; <i32> [#uses=1]
- %223 = or i32 %221, %222 ; <i32> [#uses=1]
- %224 = inttoptr i32 %223 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %225 = getelementptr %struct.edge_rec* %216, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %226 = load %struct.edge_rec** %225, align 4 ; <%struct.edge_rec*> [#uses=1]
- %227 = ptrtoint %struct.edge_rec* %226 to i32 ; <i32> [#uses=2]
- %228 = add i32 %227, 16 ; <i32> [#uses=1]
- %229 = and i32 %228, 63 ; <i32> [#uses=1]
- %230 = and i32 %227, -64 ; <i32> [#uses=1]
- %231 = or i32 %229, %230 ; <i32> [#uses=1]
- %232 = inttoptr i32 %231 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %233 = getelementptr %struct.edge_rec* %232, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %234 = load %struct.edge_rec** %233, align 4 ; <%struct.edge_rec*> [#uses=1]
- %235 = getelementptr %struct.edge_rec* %224, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %236 = load %struct.edge_rec** %235, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %234, %struct.edge_rec** %235, align 4
- store %struct.edge_rec* %236, %struct.edge_rec** %233, align 4
- %237 = load %struct.edge_rec** %217, align 4 ; <%struct.edge_rec*> [#uses=1]
- %238 = load %struct.edge_rec** %225, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %237, %struct.edge_rec** %225, align 4
- store %struct.edge_rec* %238, %struct.edge_rec** %217, align 4
- %239 = xor i32 %203, 32 ; <i32> [#uses=2]
- %240 = add i32 %239, 16 ; <i32> [#uses=1]
- %241 = and i32 %240, 63 ; <i32> [#uses=1]
- %242 = or i32 %241, %206 ; <i32> [#uses=1]
- %243 = inttoptr i32 %242 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %244 = getelementptr %struct.edge_rec* %243, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %245 = load %struct.edge_rec** %244, align 4 ; <%struct.edge_rec*> [#uses=1]
- %246 = ptrtoint %struct.edge_rec* %245 to i32 ; <i32> [#uses=2]
- %247 = add i32 %246, 16 ; <i32> [#uses=1]
- %248 = and i32 %247, 63 ; <i32> [#uses=1]
- %249 = and i32 %246, -64 ; <i32> [#uses=1]
- %250 = or i32 %248, %249 ; <i32> [#uses=1]
- %251 = inttoptr i32 %250 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %252 = inttoptr i32 %239 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %253 = getelementptr %struct.edge_rec* %252, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %254 = load %struct.edge_rec** %253, align 4 ; <%struct.edge_rec*> [#uses=1]
- %255 = ptrtoint %struct.edge_rec* %254 to i32 ; <i32> [#uses=2]
- %256 = add i32 %255, 16 ; <i32> [#uses=1]
- %257 = and i32 %256, 63 ; <i32> [#uses=1]
- %258 = and i32 %255, -64 ; <i32> [#uses=1]
- %259 = or i32 %257, %258 ; <i32> [#uses=1]
- %260 = inttoptr i32 %259 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %261 = getelementptr %struct.edge_rec* %251, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %262 = load %struct.edge_rec** %261, align 4 ; <%struct.edge_rec*> [#uses=1]
- %263 = ptrtoint %struct.edge_rec* %262 to i32 ; <i32> [#uses=2]
- %264 = add i32 %263, 16 ; <i32> [#uses=1]
- %265 = and i32 %264, 63 ; <i32> [#uses=1]
- %266 = and i32 %263, -64 ; <i32> [#uses=1]
- %267 = or i32 %265, %266 ; <i32> [#uses=1]
- %268 = inttoptr i32 %267 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %269 = getelementptr %struct.edge_rec* %268, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %270 = load %struct.edge_rec** %269, align 4 ; <%struct.edge_rec*> [#uses=1]
- %271 = getelementptr %struct.edge_rec* %260, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %272 = load %struct.edge_rec** %271, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %270, %struct.edge_rec** %271, align 4
- store %struct.edge_rec* %272, %struct.edge_rec** %269, align 4
- %273 = load %struct.edge_rec** %253, align 4 ; <%struct.edge_rec*> [#uses=1]
- %274 = load %struct.edge_rec** %261, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %273, %struct.edge_rec** %261, align 4
- store %struct.edge_rec* %274, %struct.edge_rec** %253, align 4
- %275 = inttoptr i32 %206 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %276 = getelementptr %struct.edge_rec* %275, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %avail_edge.tmp.026, %struct.edge_rec** %276, align 4
- %277 = getelementptr %struct.edge_rec* %t.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %278 = load %struct.edge_rec** %277, align 4 ; <%struct.edge_rec*> [#uses=2]
- %.pre.i = load double* %182, align 4 ; <double> [#uses=1]
- %.pre22.i = load double* %184, align 4 ; <double> [#uses=1]
- br label %bb12.i
-
-bb12.i: ; preds = %bb11.i, %bb10.i
- %avail_edge.tmp.026 = phi %struct.edge_rec* [ %avail_edge.promoted25, %bb10.i ], [ %275, %bb11.i ] ; <%struct.edge_rec*> [#uses=2]
- %279 = phi double [ %.pre22.i, %bb11.i ], [ %185, %bb10.i ] ; <double> [#uses=3]
- %280 = phi double [ %.pre.i, %bb11.i ], [ %183, %bb10.i ] ; <double> [#uses=3]
- %lcand.0.i = phi %struct.edge_rec* [ %lcand.2.i, %bb10.i ], [ %t.0.i, %bb11.i ] ; <%struct.edge_rec*> [#uses=3]
- %t.0.i = phi %struct.edge_rec* [ %169, %bb10.i ], [ %278, %bb11.i ] ; <%struct.edge_rec*> [#uses=4]
- %.pn5.in.in.in.i = phi %struct.edge_rec* [ %lcand.2.i, %bb10.i ], [ %t.0.i, %bb11.i ] ; <%struct.edge_rec*> [#uses=1]
- %.pn4.in.in.in.i = phi %struct.edge_rec* [ %169, %bb10.i ], [ %278, %bb11.i ] ; <%struct.edge_rec*> [#uses=1]
- %lcand.2.pn.i = phi %struct.edge_rec* [ %lcand.2.i, %bb10.i ], [ %t.0.i, %bb11.i ] ; <%struct.edge_rec*> [#uses=1]
- %.pn5.in.in.i = ptrtoint %struct.edge_rec* %.pn5.in.in.in.i to i32 ; <i32> [#uses=1]
- %.pn4.in.in.i = ptrtoint %struct.edge_rec* %.pn4.in.in.in.i to i32 ; <i32> [#uses=1]
- %.pn5.in.i = xor i32 %.pn5.in.in.i, 32 ; <i32> [#uses=1]
- %.pn4.in.i = xor i32 %.pn4.in.in.i, 32 ; <i32> [#uses=1]
- %.pn5.i = inttoptr i32 %.pn5.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %.pn4.i = inttoptr i32 %.pn4.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %v1.0.in.i = getelementptr %struct.edge_rec* %.pn5.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v2.0.in.i = getelementptr %struct.edge_rec* %.pn4.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v3.0.in.i = getelementptr %struct.edge_rec* %lcand.2.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v1.0.i = load %struct.VERTEX** %v1.0.in.i ; <%struct.VERTEX*> [#uses=3]
- %v2.0.i = load %struct.VERTEX** %v2.0.in.i ; <%struct.VERTEX*> [#uses=3]
- %v3.0.i = load %struct.VERTEX** %v3.0.in.i ; <%struct.VERTEX*> [#uses=3]
- %281 = load double* %202, align 4 ; <double> [#uses=3]
- %282 = getelementptr %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %283 = load double* %282, align 4 ; <double> [#uses=1]
- %284 = fsub double %283, %280 ; <double> [#uses=2]
- %285 = getelementptr %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %286 = load double* %285, align 4 ; <double> [#uses=1]
- %287 = fsub double %286, %279 ; <double> [#uses=2]
- %288 = getelementptr %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %289 = load double* %288, align 4 ; <double> [#uses=1]
- %290 = getelementptr %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %291 = load double* %290, align 4 ; <double> [#uses=1]
- %292 = fsub double %291, %280 ; <double> [#uses=2]
- %293 = getelementptr %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %294 = load double* %293, align 4 ; <double> [#uses=1]
- %295 = fsub double %294, %279 ; <double> [#uses=2]
- %296 = getelementptr %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %297 = load double* %296, align 4 ; <double> [#uses=1]
- %298 = getelementptr %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %299 = load double* %298, align 4 ; <double> [#uses=1]
- %300 = fsub double %299, %280 ; <double> [#uses=2]
- %301 = getelementptr %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %302 = load double* %301, align 4 ; <double> [#uses=1]
- %303 = fsub double %302, %279 ; <double> [#uses=2]
- %304 = getelementptr %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %305 = load double* %304, align 4 ; <double> [#uses=1]
- %306 = fsub double %289, %281 ; <double> [#uses=1]
- %307 = fmul double %292, %303 ; <double> [#uses=1]
- %308 = fmul double %295, %300 ; <double> [#uses=1]
- %309 = fsub double %307, %308 ; <double> [#uses=1]
- %310 = fmul double %306, %309 ; <double> [#uses=1]
- %311 = fsub double %297, %281 ; <double> [#uses=1]
- %312 = fmul double %300, %287 ; <double> [#uses=1]
- %313 = fmul double %303, %284 ; <double> [#uses=1]
- %314 = fsub double %312, %313 ; <double> [#uses=1]
- %315 = fmul double %311, %314 ; <double> [#uses=1]
- %316 = fadd double %315, %310 ; <double> [#uses=1]
- %317 = fsub double %305, %281 ; <double> [#uses=1]
- %318 = fmul double %284, %295 ; <double> [#uses=1]
- %319 = fmul double %287, %292 ; <double> [#uses=1]
- %320 = fsub double %318, %319 ; <double> [#uses=1]
- %321 = fmul double %317, %320 ; <double> [#uses=1]
- %322 = fadd double %321, %316 ; <double> [#uses=1]
- %323 = fcmp ogt double %322, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %323, label %bb11.i, label %bb13.loopexit.i
-
-bb13.loopexit.i: ; preds = %bb12.i
- store %struct.edge_rec* %avail_edge.tmp.026, %struct.edge_rec** @avail_edge
- %.pre23.i = load %struct.VERTEX** %170, align 4 ; <%struct.VERTEX*> [#uses=1]
- %.pre24.i = load %struct.VERTEX** %175, align 4 ; <%struct.VERTEX*> [#uses=1]
- br label %bb13.i
-
-bb13.i: ; preds = %bb13.loopexit.i, %bb9.i
- %324 = phi %struct.VERTEX* [ %.pre24.i, %bb13.loopexit.i ], [ %176, %bb9.i ] ; <%struct.VERTEX*> [#uses=4]
- %325 = phi %struct.VERTEX* [ %.pre23.i, %bb13.loopexit.i ], [ %171, %bb9.i ] ; <%struct.VERTEX*> [#uses=3]
- %lcand.1.i = phi %struct.edge_rec* [ %lcand.0.i, %bb13.loopexit.i ], [ %lcand.2.i, %bb9.i ] ; <%struct.edge_rec*> [#uses=3]
- %326 = ptrtoint %struct.edge_rec* %rcand.2.i to i32 ; <i32> [#uses=2]
- %327 = add i32 %326, 16 ; <i32> [#uses=1]
- %328 = and i32 %327, 63 ; <i32> [#uses=1]
- %329 = and i32 %326, -64 ; <i32> [#uses=1]
- %330 = or i32 %328, %329 ; <i32> [#uses=1]
- %331 = inttoptr i32 %330 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %332 = getelementptr %struct.edge_rec* %331, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %333 = load %struct.edge_rec** %332, align 4 ; <%struct.edge_rec*> [#uses=1]
- %334 = ptrtoint %struct.edge_rec* %333 to i32 ; <i32> [#uses=2]
- %335 = add i32 %334, 16 ; <i32> [#uses=1]
- %336 = and i32 %335, 63 ; <i32> [#uses=1]
- %337 = and i32 %334, -64 ; <i32> [#uses=1]
- %338 = or i32 %336, %337 ; <i32> [#uses=3]
- %339 = xor i32 %338, 32 ; <i32> [#uses=1]
- %340 = inttoptr i32 %339 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %341 = getelementptr %struct.edge_rec* %340, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %342 = load %struct.VERTEX** %341, align 4 ; <%struct.VERTEX*> [#uses=2]
- %343 = getelementptr %struct.VERTEX* %325, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %344 = load double* %343, align 4 ; <double> [#uses=1]
- %345 = getelementptr %struct.VERTEX* %325, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %346 = load double* %345, align 4 ; <double> [#uses=1]
- %347 = getelementptr %struct.VERTEX* %342, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %348 = load double* %347, align 4 ; <double> [#uses=1]
- %349 = getelementptr %struct.VERTEX* %342, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %350 = load double* %349, align 4 ; <double> [#uses=1]
- %351 = getelementptr %struct.VERTEX* %324, i32 0, i32 0, i32 0 ; <double*> [#uses=2]
- %352 = load double* %351, align 4 ; <double> [#uses=3]
- %353 = getelementptr %struct.VERTEX* %324, i32 0, i32 0, i32 1 ; <double*> [#uses=2]
- %354 = load double* %353, align 4 ; <double> [#uses=3]
- %355 = fsub double %344, %352 ; <double> [#uses=1]
- %356 = fsub double %350, %354 ; <double> [#uses=1]
- %357 = fmul double %355, %356 ; <double> [#uses=1]
- %358 = fsub double %348, %352 ; <double> [#uses=1]
- %359 = fsub double %346, %354 ; <double> [#uses=1]
- %360 = fmul double %358, %359 ; <double> [#uses=1]
- %361 = fsub double %357, %360 ; <double> [#uses=1]
- %362 = fcmp ogt double %361, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %362, label %bb14.i, label %bb17.i
-
-bb14.i: ; preds = %bb13.i
- %363 = getelementptr %struct.VERTEX* %324, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %avail_edge.promoted = load %struct.edge_rec** @avail_edge ; <%struct.edge_rec*> [#uses=1]
- br label %bb16.i
-
-bb15.i: ; preds = %bb16.i
- %364 = ptrtoint %struct.edge_rec* %rcand.0.i to i32 ; <i32> [#uses=3]
- %365 = add i32 %364, 16 ; <i32> [#uses=1]
- %366 = and i32 %365, 63 ; <i32> [#uses=1]
- %367 = and i32 %364, -64 ; <i32> [#uses=3]
- %368 = or i32 %366, %367 ; <i32> [#uses=1]
- %369 = inttoptr i32 %368 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %370 = getelementptr %struct.edge_rec* %369, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %371 = load %struct.edge_rec** %370, align 4 ; <%struct.edge_rec*> [#uses=1]
- %372 = ptrtoint %struct.edge_rec* %371 to i32 ; <i32> [#uses=2]
- %373 = add i32 %372, 16 ; <i32> [#uses=1]
- %374 = and i32 %373, 63 ; <i32> [#uses=1]
- %375 = and i32 %372, -64 ; <i32> [#uses=1]
- %376 = or i32 %374, %375 ; <i32> [#uses=1]
- %377 = inttoptr i32 %376 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %378 = getelementptr %struct.edge_rec* %rcand.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %379 = load %struct.edge_rec** %378, align 4 ; <%struct.edge_rec*> [#uses=1]
- %380 = ptrtoint %struct.edge_rec* %379 to i32 ; <i32> [#uses=2]
- %381 = add i32 %380, 16 ; <i32> [#uses=1]
- %382 = and i32 %381, 63 ; <i32> [#uses=1]
- %383 = and i32 %380, -64 ; <i32> [#uses=1]
- %384 = or i32 %382, %383 ; <i32> [#uses=1]
- %385 = inttoptr i32 %384 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %386 = getelementptr %struct.edge_rec* %377, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %387 = load %struct.edge_rec** %386, align 4 ; <%struct.edge_rec*> [#uses=1]
- %388 = ptrtoint %struct.edge_rec* %387 to i32 ; <i32> [#uses=2]
- %389 = add i32 %388, 16 ; <i32> [#uses=1]
- %390 = and i32 %389, 63 ; <i32> [#uses=1]
- %391 = and i32 %388, -64 ; <i32> [#uses=1]
- %392 = or i32 %390, %391 ; <i32> [#uses=1]
- %393 = inttoptr i32 %392 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %394 = getelementptr %struct.edge_rec* %393, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %395 = load %struct.edge_rec** %394, align 4 ; <%struct.edge_rec*> [#uses=1]
- %396 = getelementptr %struct.edge_rec* %385, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %397 = load %struct.edge_rec** %396, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %395, %struct.edge_rec** %396, align 4
- store %struct.edge_rec* %397, %struct.edge_rec** %394, align 4
- %398 = load %struct.edge_rec** %378, align 4 ; <%struct.edge_rec*> [#uses=1]
- %399 = load %struct.edge_rec** %386, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %398, %struct.edge_rec** %386, align 4
- store %struct.edge_rec* %399, %struct.edge_rec** %378, align 4
- %400 = xor i32 %364, 32 ; <i32> [#uses=2]
- %401 = add i32 %400, 16 ; <i32> [#uses=1]
- %402 = and i32 %401, 63 ; <i32> [#uses=1]
- %403 = or i32 %402, %367 ; <i32> [#uses=1]
- %404 = inttoptr i32 %403 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %405 = getelementptr %struct.edge_rec* %404, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %406 = load %struct.edge_rec** %405, align 4 ; <%struct.edge_rec*> [#uses=1]
- %407 = ptrtoint %struct.edge_rec* %406 to i32 ; <i32> [#uses=2]
- %408 = add i32 %407, 16 ; <i32> [#uses=1]
- %409 = and i32 %408, 63 ; <i32> [#uses=1]
- %410 = and i32 %407, -64 ; <i32> [#uses=1]
- %411 = or i32 %409, %410 ; <i32> [#uses=1]
- %412 = inttoptr i32 %411 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %413 = inttoptr i32 %400 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %414 = getelementptr %struct.edge_rec* %413, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %415 = load %struct.edge_rec** %414, align 4 ; <%struct.edge_rec*> [#uses=1]
- %416 = ptrtoint %struct.edge_rec* %415 to i32 ; <i32> [#uses=2]
- %417 = add i32 %416, 16 ; <i32> [#uses=1]
- %418 = and i32 %417, 63 ; <i32> [#uses=1]
- %419 = and i32 %416, -64 ; <i32> [#uses=1]
- %420 = or i32 %418, %419 ; <i32> [#uses=1]
- %421 = inttoptr i32 %420 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %422 = getelementptr %struct.edge_rec* %412, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %423 = load %struct.edge_rec** %422, align 4 ; <%struct.edge_rec*> [#uses=1]
- %424 = ptrtoint %struct.edge_rec* %423 to i32 ; <i32> [#uses=2]
- %425 = add i32 %424, 16 ; <i32> [#uses=1]
- %426 = and i32 %425, 63 ; <i32> [#uses=1]
- %427 = and i32 %424, -64 ; <i32> [#uses=1]
- %428 = or i32 %426, %427 ; <i32> [#uses=1]
- %429 = inttoptr i32 %428 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %430 = getelementptr %struct.edge_rec* %429, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %431 = load %struct.edge_rec** %430, align 4 ; <%struct.edge_rec*> [#uses=1]
- %432 = getelementptr %struct.edge_rec* %421, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %433 = load %struct.edge_rec** %432, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %431, %struct.edge_rec** %432, align 4
- store %struct.edge_rec* %433, %struct.edge_rec** %430, align 4
- %434 = load %struct.edge_rec** %414, align 4 ; <%struct.edge_rec*> [#uses=1]
- %435 = load %struct.edge_rec** %422, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %434, %struct.edge_rec** %422, align 4
- store %struct.edge_rec* %435, %struct.edge_rec** %414, align 4
- %436 = inttoptr i32 %367 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %437 = getelementptr %struct.edge_rec* %436, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %avail_edge.tmp.0, %struct.edge_rec** %437, align 4
- %438 = add i32 %t.1.in.i, 16 ; <i32> [#uses=1]
- %439 = and i32 %438, 63 ; <i32> [#uses=1]
- %440 = and i32 %t.1.in.i, -64 ; <i32> [#uses=1]
- %441 = or i32 %439, %440 ; <i32> [#uses=1]
- %442 = inttoptr i32 %441 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %443 = getelementptr %struct.edge_rec* %442, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %444 = load %struct.edge_rec** %443, align 4 ; <%struct.edge_rec*> [#uses=1]
- %445 = ptrtoint %struct.edge_rec* %444 to i32 ; <i32> [#uses=2]
- %446 = add i32 %445, 16 ; <i32> [#uses=1]
- %447 = and i32 %446, 63 ; <i32> [#uses=1]
- %448 = and i32 %445, -64 ; <i32> [#uses=1]
- %449 = or i32 %447, %448 ; <i32> [#uses=2]
- %.pre25.i = load double* %351, align 4 ; <double> [#uses=1]
- %.pre26.i = load double* %353, align 4 ; <double> [#uses=1]
- br label %bb16.i
-
-bb16.i: ; preds = %bb15.i, %bb14.i
- %avail_edge.tmp.0 = phi %struct.edge_rec* [ %avail_edge.promoted, %bb14.i ], [ %436, %bb15.i ] ; <%struct.edge_rec*> [#uses=2]
- %450 = phi double [ %.pre26.i, %bb15.i ], [ %354, %bb14.i ] ; <double> [#uses=3]
- %451 = phi double [ %.pre25.i, %bb15.i ], [ %352, %bb14.i ] ; <double> [#uses=3]
- %rcand.0.i = phi %struct.edge_rec* [ %rcand.2.i, %bb14.i ], [ %t.1.i, %bb15.i ] ; <%struct.edge_rec*> [#uses=3]
- %t.1.in.i = phi i32 [ %338, %bb14.i ], [ %449, %bb15.i ] ; <i32> [#uses=3]
- %.pn3.in.in.i = phi i32 [ %338, %bb14.i ], [ %449, %bb15.i ] ; <i32> [#uses=1]
- %.pn.in.in.in.i = phi %struct.edge_rec* [ %rcand.2.i, %bb14.i ], [ %t.1.i, %bb15.i ] ; <%struct.edge_rec*> [#uses=1]
- %rcand.2.pn.i = phi %struct.edge_rec* [ %rcand.2.i, %bb14.i ], [ %t.1.i, %bb15.i ] ; <%struct.edge_rec*> [#uses=1]
- %t.1.i = inttoptr i32 %t.1.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %.pn.in.in.i = ptrtoint %struct.edge_rec* %.pn.in.in.in.i to i32 ; <i32> [#uses=1]
- %.pn3.in.i = xor i32 %.pn3.in.in.i, 32 ; <i32> [#uses=1]
- %.pn.in.i = xor i32 %.pn.in.in.i, 32 ; <i32> [#uses=1]
- %.pn3.i = inttoptr i32 %.pn3.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %.pn.i = inttoptr i32 %.pn.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %v1.1.in.i = getelementptr %struct.edge_rec* %.pn3.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v2.1.in.i = getelementptr %struct.edge_rec* %.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v3.1.in.i = getelementptr %struct.edge_rec* %rcand.2.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v1.1.i = load %struct.VERTEX** %v1.1.in.i ; <%struct.VERTEX*> [#uses=3]
- %v2.1.i = load %struct.VERTEX** %v2.1.in.i ; <%struct.VERTEX*> [#uses=3]
- %v3.1.i = load %struct.VERTEX** %v3.1.in.i ; <%struct.VERTEX*> [#uses=3]
- %452 = load double* %363, align 4 ; <double> [#uses=3]
- %453 = getelementptr %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %454 = load double* %453, align 4 ; <double> [#uses=1]
- %455 = fsub double %454, %451 ; <double> [#uses=2]
- %456 = getelementptr %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %457 = load double* %456, align 4 ; <double> [#uses=1]
- %458 = fsub double %457, %450 ; <double> [#uses=2]
- %459 = getelementptr %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %460 = load double* %459, align 4 ; <double> [#uses=1]
- %461 = getelementptr %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %462 = load double* %461, align 4 ; <double> [#uses=1]
- %463 = fsub double %462, %451 ; <double> [#uses=2]
- %464 = getelementptr %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %465 = load double* %464, align 4 ; <double> [#uses=1]
- %466 = fsub double %465, %450 ; <double> [#uses=2]
- %467 = getelementptr %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %468 = load double* %467, align 4 ; <double> [#uses=1]
- %469 = getelementptr %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %470 = load double* %469, align 4 ; <double> [#uses=1]
- %471 = fsub double %470, %451 ; <double> [#uses=2]
- %472 = getelementptr %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %473 = load double* %472, align 4 ; <double> [#uses=1]
- %474 = fsub double %473, %450 ; <double> [#uses=2]
- %475 = getelementptr %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %476 = load double* %475, align 4 ; <double> [#uses=1]
- %477 = fsub double %460, %452 ; <double> [#uses=1]
- %478 = fmul double %463, %474 ; <double> [#uses=1]
- %479 = fmul double %466, %471 ; <double> [#uses=1]
- %480 = fsub double %478, %479 ; <double> [#uses=1]
- %481 = fmul double %477, %480 ; <double> [#uses=1]
- %482 = fsub double %468, %452 ; <double> [#uses=1]
- %483 = fmul double %471, %458 ; <double> [#uses=1]
- %484 = fmul double %474, %455 ; <double> [#uses=1]
- %485 = fsub double %483, %484 ; <double> [#uses=1]
- %486 = fmul double %482, %485 ; <double> [#uses=1]
- %487 = fadd double %486, %481 ; <double> [#uses=1]
- %488 = fsub double %476, %452 ; <double> [#uses=1]
- %489 = fmul double %455, %466 ; <double> [#uses=1]
- %490 = fmul double %458, %463 ; <double> [#uses=1]
- %491 = fsub double %489, %490 ; <double> [#uses=1]
- %492 = fmul double %488, %491 ; <double> [#uses=1]
- %493 = fadd double %492, %487 ; <double> [#uses=1]
- %494 = fcmp ogt double %493, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %494, label %bb15.i, label %bb17.loopexit.i
-
-bb17.loopexit.i: ; preds = %bb16.i
- store %struct.edge_rec* %avail_edge.tmp.0, %struct.edge_rec** @avail_edge
- %.pre27.i = load %struct.VERTEX** %170, align 4 ; <%struct.VERTEX*> [#uses=1]
- %.pre28.i = load %struct.VERTEX** %175, align 4 ; <%struct.VERTEX*> [#uses=1]
- br label %bb17.i
-
-bb17.i: ; preds = %bb17.loopexit.i, %bb13.i
- %495 = phi %struct.VERTEX* [ %.pre28.i, %bb17.loopexit.i ], [ %324, %bb13.i ] ; <%struct.VERTEX*> [#uses=3]
- %496 = phi %struct.VERTEX* [ %.pre27.i, %bb17.loopexit.i ], [ %325, %bb13.i ] ; <%struct.VERTEX*> [#uses=3]
- %rcand.1.i = phi %struct.edge_rec* [ %rcand.0.i, %bb17.loopexit.i ], [ %rcand.2.i, %bb13.i ] ; <%struct.edge_rec*> [#uses=3]
- %497 = ptrtoint %struct.edge_rec* %lcand.1.i to i32 ; <i32> [#uses=1]
- %498 = xor i32 %497, 32 ; <i32> [#uses=1]
- %499 = inttoptr i32 %498 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %500 = getelementptr %struct.edge_rec* %499, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %501 = load %struct.VERTEX** %500, align 4 ; <%struct.VERTEX*> [#uses=4]
- %502 = getelementptr %struct.VERTEX* %496, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %503 = load double* %502, align 4 ; <double> [#uses=1]
- %504 = getelementptr %struct.VERTEX* %496, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %505 = load double* %504, align 4 ; <double> [#uses=1]
- %506 = getelementptr %struct.VERTEX* %501, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %507 = load double* %506, align 4 ; <double> [#uses=2]
- %508 = getelementptr %struct.VERTEX* %501, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %509 = load double* %508, align 4 ; <double> [#uses=2]
- %510 = getelementptr %struct.VERTEX* %495, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %511 = load double* %510, align 4 ; <double> [#uses=3]
- %512 = getelementptr %struct.VERTEX* %495, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %513 = load double* %512, align 4 ; <double> [#uses=3]
- %514 = fsub double %503, %511 ; <double> [#uses=2]
- %515 = fsub double %509, %513 ; <double> [#uses=1]
- %516 = fmul double %514, %515 ; <double> [#uses=1]
- %517 = fsub double %507, %511 ; <double> [#uses=1]
- %518 = fsub double %505, %513 ; <double> [#uses=2]
- %519 = fmul double %517, %518 ; <double> [#uses=1]
- %520 = fsub double %516, %519 ; <double> [#uses=1]
- %521 = fcmp ogt double %520, 0.000000e+00 ; <i1> [#uses=2]
- %522 = ptrtoint %struct.edge_rec* %rcand.1.i to i32 ; <i32> [#uses=3]
- %523 = xor i32 %522, 32 ; <i32> [#uses=1]
- %524 = inttoptr i32 %523 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %525 = getelementptr %struct.edge_rec* %524, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %526 = load %struct.VERTEX** %525, align 4 ; <%struct.VERTEX*> [#uses=4]
- %527 = getelementptr %struct.VERTEX* %526, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %528 = load double* %527, align 4 ; <double> [#uses=4]
- %529 = getelementptr %struct.VERTEX* %526, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %530 = load double* %529, align 4 ; <double> [#uses=4]
- %531 = fsub double %530, %513 ; <double> [#uses=1]
- %532 = fmul double %514, %531 ; <double> [#uses=1]
- %533 = fsub double %528, %511 ; <double> [#uses=1]
- %534 = fmul double %533, %518 ; <double> [#uses=1]
- %535 = fsub double %532, %534 ; <double> [#uses=1]
- %536 = fcmp ogt double %535, 0.000000e+00 ; <i1> [#uses=2]
- %537 = or i1 %536, %521 ; <i1> [#uses=1]
- br i1 %537, label %bb21.i, label %do_merge.exit
-
-bb21.i: ; preds = %bb17.i
- %538 = getelementptr %struct.edge_rec* %lcand.1.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %539 = load %struct.VERTEX** %538, align 4 ; <%struct.VERTEX*> [#uses=3]
- %540 = getelementptr %struct.edge_rec* %rcand.1.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %541 = load %struct.VERTEX** %540, align 4 ; <%struct.VERTEX*> [#uses=3]
- br i1 %521, label %bb22.i, label %bb24.i
-
-bb22.i: ; preds = %bb21.i
- br i1 %536, label %bb23.i, label %bb25.i
-
-bb23.i: ; preds = %bb22.i
- %542 = getelementptr %struct.VERTEX* %526, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %543 = load double* %542, align 4 ; <double> [#uses=3]
- %544 = fsub double %507, %528 ; <double> [#uses=2]
- %545 = fsub double %509, %530 ; <double> [#uses=2]
- %546 = getelementptr %struct.VERTEX* %501, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %547 = load double* %546, align 4 ; <double> [#uses=1]
- %548 = getelementptr %struct.VERTEX* %539, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %549 = load double* %548, align 4 ; <double> [#uses=1]
- %550 = fsub double %549, %528 ; <double> [#uses=2]
- %551 = getelementptr %struct.VERTEX* %539, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %552 = load double* %551, align 4 ; <double> [#uses=1]
- %553 = fsub double %552, %530 ; <double> [#uses=2]
- %554 = getelementptr %struct.VERTEX* %539, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %555 = load double* %554, align 4 ; <double> [#uses=1]
- %556 = getelementptr %struct.VERTEX* %541, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %557 = load double* %556, align 4 ; <double> [#uses=1]
- %558 = fsub double %557, %528 ; <double> [#uses=2]
- %559 = getelementptr %struct.VERTEX* %541, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %560 = load double* %559, align 4 ; <double> [#uses=1]
- %561 = fsub double %560, %530 ; <double> [#uses=2]
- %562 = getelementptr %struct.VERTEX* %541, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %563 = load double* %562, align 4 ; <double> [#uses=1]
- %564 = fsub double %547, %543 ; <double> [#uses=1]
- %565 = fmul double %550, %561 ; <double> [#uses=1]
- %566 = fmul double %553, %558 ; <double> [#uses=1]
- %567 = fsub double %565, %566 ; <double> [#uses=1]
- %568 = fmul double %564, %567 ; <double> [#uses=1]
- %569 = fsub double %555, %543 ; <double> [#uses=1]
- %570 = fmul double %558, %545 ; <double> [#uses=1]
- %571 = fmul double %561, %544 ; <double> [#uses=1]
- %572 = fsub double %570, %571 ; <double> [#uses=1]
- %573 = fmul double %569, %572 ; <double> [#uses=1]
- %574 = fadd double %573, %568 ; <double> [#uses=1]
- %575 = fsub double %563, %543 ; <double> [#uses=1]
- %576 = fmul double %544, %553 ; <double> [#uses=1]
- %577 = fmul double %545, %550 ; <double> [#uses=1]
- %578 = fsub double %576, %577 ; <double> [#uses=1]
- %579 = fmul double %575, %578 ; <double> [#uses=1]
- %580 = fadd double %579, %574 ; <double> [#uses=1]
- %581 = fcmp ogt double %580, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %581, label %bb24.i, label %bb25.i
-
-bb24.i: ; preds = %bb23.i, %bb21.i
- %582 = add i32 %522, 48 ; <i32> [#uses=1]
- %583 = and i32 %582, 63 ; <i32> [#uses=1]
- %584 = and i32 %522, -64 ; <i32> [#uses=1]
- %585 = or i32 %583, %584 ; <i32> [#uses=1]
- %586 = inttoptr i32 %585 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %587 = getelementptr %struct.edge_rec* %586, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %588 = load %struct.edge_rec** %587, align 4 ; <%struct.edge_rec*> [#uses=1]
- %589 = ptrtoint %struct.edge_rec* %588 to i32 ; <i32> [#uses=2]
- %590 = add i32 %589, 16 ; <i32> [#uses=1]
- %591 = and i32 %590, 63 ; <i32> [#uses=1]
- %592 = and i32 %589, -64 ; <i32> [#uses=1]
- %593 = or i32 %591, %592 ; <i32> [#uses=1]
- %594 = inttoptr i32 %593 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %595 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=5]
- %596 = getelementptr %struct.edge_rec* %595, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
- store %struct.edge_rec* %595, %struct.edge_rec** %596, align 4
- %597 = getelementptr %struct.edge_rec* %595, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %526, %struct.VERTEX** %597, align 4
- %598 = ptrtoint %struct.edge_rec* %595 to i32 ; <i32> [#uses=5]
- %599 = add i32 %598, 16 ; <i32> [#uses=1]
- %600 = inttoptr i32 %599 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %601 = add i32 %598, 48 ; <i32> [#uses=1]
- %602 = inttoptr i32 %601 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %603 = getelementptr %struct.edge_rec* %600, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %602, %struct.edge_rec** %603, align 4
- %604 = add i32 %598, 32 ; <i32> [#uses=1]
- %605 = inttoptr i32 %604 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %606 = getelementptr %struct.edge_rec* %605, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %605, %struct.edge_rec** %606, align 4
- %607 = getelementptr %struct.edge_rec* %605, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %495, %struct.VERTEX** %607, align 4
- %608 = getelementptr %struct.edge_rec* %602, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %600, %struct.edge_rec** %608, align 4
- %609 = load %struct.edge_rec** %596, align 4 ; <%struct.edge_rec*> [#uses=1]
- %610 = ptrtoint %struct.edge_rec* %609 to i32 ; <i32> [#uses=2]
- %611 = add i32 %610, 16 ; <i32> [#uses=1]
- %612 = and i32 %611, 63 ; <i32> [#uses=1]
- %613 = and i32 %610, -64 ; <i32> [#uses=1]
- %614 = or i32 %612, %613 ; <i32> [#uses=1]
- %615 = inttoptr i32 %614 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %616 = getelementptr %struct.edge_rec* %594, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %617 = load %struct.edge_rec** %616, align 4 ; <%struct.edge_rec*> [#uses=1]
- %618 = ptrtoint %struct.edge_rec* %617 to i32 ; <i32> [#uses=2]
- %619 = add i32 %618, 16 ; <i32> [#uses=1]
- %620 = and i32 %619, 63 ; <i32> [#uses=1]
- %621 = and i32 %618, -64 ; <i32> [#uses=1]
- %622 = or i32 %620, %621 ; <i32> [#uses=1]
- %623 = inttoptr i32 %622 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %624 = getelementptr %struct.edge_rec* %623, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %625 = load %struct.edge_rec** %624, align 4 ; <%struct.edge_rec*> [#uses=1]
- %626 = getelementptr %struct.edge_rec* %615, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %627 = load %struct.edge_rec** %626, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %625, %struct.edge_rec** %626, align 4
- store %struct.edge_rec* %627, %struct.edge_rec** %624, align 4
- %628 = load %struct.edge_rec** %596, align 4 ; <%struct.edge_rec*> [#uses=1]
- %629 = load %struct.edge_rec** %616, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %628, %struct.edge_rec** %616, align 4
- store %struct.edge_rec* %629, %struct.edge_rec** %596, align 4
- %630 = xor i32 %598, 32 ; <i32> [#uses=2]
- %631 = inttoptr i32 %630 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %632 = getelementptr %struct.edge_rec* %631, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %633 = load %struct.edge_rec** %632, align 4 ; <%struct.edge_rec*> [#uses=1]
- %634 = ptrtoint %struct.edge_rec* %633 to i32 ; <i32> [#uses=2]
- %635 = add i32 %634, 16 ; <i32> [#uses=1]
- %636 = and i32 %635, 63 ; <i32> [#uses=1]
- %637 = and i32 %634, -64 ; <i32> [#uses=1]
- %638 = or i32 %636, %637 ; <i32> [#uses=1]
- %639 = inttoptr i32 %638 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %640 = getelementptr %struct.edge_rec* %174, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %641 = load %struct.edge_rec** %640, align 4 ; <%struct.edge_rec*> [#uses=1]
- %642 = ptrtoint %struct.edge_rec* %641 to i32 ; <i32> [#uses=2]
- %643 = add i32 %642, 16 ; <i32> [#uses=1]
- %644 = and i32 %643, 63 ; <i32> [#uses=1]
- %645 = and i32 %642, -64 ; <i32> [#uses=1]
- %646 = or i32 %644, %645 ; <i32> [#uses=1]
- %647 = inttoptr i32 %646 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %648 = getelementptr %struct.edge_rec* %647, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %649 = load %struct.edge_rec** %648, align 4 ; <%struct.edge_rec*> [#uses=1]
- %650 = getelementptr %struct.edge_rec* %639, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %651 = load %struct.edge_rec** %650, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %649, %struct.edge_rec** %650, align 4
- store %struct.edge_rec* %651, %struct.edge_rec** %648, align 4
- %652 = load %struct.edge_rec** %632, align 4 ; <%struct.edge_rec*> [#uses=1]
- %653 = load %struct.edge_rec** %640, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %652, %struct.edge_rec** %640, align 4
- store %struct.edge_rec* %653, %struct.edge_rec** %632, align 4
- %654 = add i32 %630, 48 ; <i32> [#uses=1]
- %655 = and i32 %654, 63 ; <i32> [#uses=1]
- %656 = and i32 %598, -64 ; <i32> [#uses=1]
- %657 = or i32 %655, %656 ; <i32> [#uses=1]
- %658 = inttoptr i32 %657 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %659 = getelementptr %struct.edge_rec* %658, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %660 = load %struct.edge_rec** %659, align 4 ; <%struct.edge_rec*> [#uses=1]
- %661 = ptrtoint %struct.edge_rec* %660 to i32 ; <i32> [#uses=2]
- %662 = add i32 %661, 16 ; <i32> [#uses=1]
- %663 = and i32 %662, 63 ; <i32> [#uses=1]
- %664 = and i32 %661, -64 ; <i32> [#uses=1]
- %665 = or i32 %663, %664 ; <i32> [#uses=1]
- %666 = inttoptr i32 %665 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- br label %bb9.i
-
-bb25.i: ; preds = %bb23.i, %bb22.i
- %667 = add i32 %172, 16 ; <i32> [#uses=1]
- %668 = and i32 %667, 63 ; <i32> [#uses=1]
- %669 = and i32 %172, -64 ; <i32> [#uses=1]
- %670 = or i32 %668, %669 ; <i32> [#uses=1]
- %671 = inttoptr i32 %670 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %672 = getelementptr %struct.edge_rec* %671, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %673 = load %struct.edge_rec** %672, align 4 ; <%struct.edge_rec*> [#uses=1]
- %674 = ptrtoint %struct.edge_rec* %673 to i32 ; <i32> [#uses=2]
- %675 = add i32 %674, 16 ; <i32> [#uses=1]
- %676 = and i32 %675, 63 ; <i32> [#uses=1]
- %677 = and i32 %674, -64 ; <i32> [#uses=1]
- %678 = or i32 %676, %677 ; <i32> [#uses=1]
- %679 = inttoptr i32 %678 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %680 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
- %681 = getelementptr %struct.edge_rec* %680, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=5]
- store %struct.edge_rec* %680, %struct.edge_rec** %681, align 4
- %682 = getelementptr %struct.edge_rec* %680, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %501, %struct.VERTEX** %682, align 4
- %683 = ptrtoint %struct.edge_rec* %680 to i32 ; <i32> [#uses=4]
- %684 = add i32 %683, 16 ; <i32> [#uses=1]
- %685 = inttoptr i32 %684 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %686 = add i32 %683, 48 ; <i32> [#uses=1]
- %687 = inttoptr i32 %686 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %688 = getelementptr %struct.edge_rec* %685, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %687, %struct.edge_rec** %688, align 4
- %689 = add i32 %683, 32 ; <i32> [#uses=1]
- %690 = inttoptr i32 %689 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %691 = getelementptr %struct.edge_rec* %690, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %690, %struct.edge_rec** %691, align 4
- %692 = getelementptr %struct.edge_rec* %690, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %496, %struct.VERTEX** %692, align 4
- %693 = getelementptr %struct.edge_rec* %687, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %685, %struct.edge_rec** %693, align 4
- %694 = load %struct.edge_rec** %681, align 4 ; <%struct.edge_rec*> [#uses=1]
- %695 = ptrtoint %struct.edge_rec* %694 to i32 ; <i32> [#uses=2]
- %696 = add i32 %695, 16 ; <i32> [#uses=1]
- %697 = and i32 %696, 63 ; <i32> [#uses=1]
- %698 = and i32 %695, -64 ; <i32> [#uses=1]
- %699 = or i32 %697, %698 ; <i32> [#uses=1]
- %700 = inttoptr i32 %699 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %701 = getelementptr %struct.edge_rec* %499, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %702 = load %struct.edge_rec** %701, align 4 ; <%struct.edge_rec*> [#uses=1]
- %703 = ptrtoint %struct.edge_rec* %702 to i32 ; <i32> [#uses=2]
- %704 = add i32 %703, 16 ; <i32> [#uses=1]
- %705 = and i32 %704, 63 ; <i32> [#uses=1]
- %706 = and i32 %703, -64 ; <i32> [#uses=1]
- %707 = or i32 %705, %706 ; <i32> [#uses=1]
- %708 = inttoptr i32 %707 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %709 = getelementptr %struct.edge_rec* %708, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %710 = load %struct.edge_rec** %709, align 4 ; <%struct.edge_rec*> [#uses=1]
- %711 = getelementptr %struct.edge_rec* %700, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %712 = load %struct.edge_rec** %711, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %710, %struct.edge_rec** %711, align 4
- store %struct.edge_rec* %712, %struct.edge_rec** %709, align 4
- %713 = load %struct.edge_rec** %681, align 4 ; <%struct.edge_rec*> [#uses=1]
- %714 = load %struct.edge_rec** %701, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %713, %struct.edge_rec** %701, align 4
- store %struct.edge_rec* %714, %struct.edge_rec** %681, align 4
- %715 = xor i32 %683, 32 ; <i32> [#uses=1]
- %716 = inttoptr i32 %715 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %717 = getelementptr %struct.edge_rec* %716, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %718 = load %struct.edge_rec** %717, align 4 ; <%struct.edge_rec*> [#uses=1]
- %719 = ptrtoint %struct.edge_rec* %718 to i32 ; <i32> [#uses=2]
- %720 = add i32 %719, 16 ; <i32> [#uses=1]
- %721 = and i32 %720, 63 ; <i32> [#uses=1]
- %722 = and i32 %719, -64 ; <i32> [#uses=1]
- %723 = or i32 %721, %722 ; <i32> [#uses=1]
- %724 = inttoptr i32 %723 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %725 = getelementptr %struct.edge_rec* %679, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %726 = load %struct.edge_rec** %725, align 4 ; <%struct.edge_rec*> [#uses=1]
- %727 = ptrtoint %struct.edge_rec* %726 to i32 ; <i32> [#uses=2]
- %728 = add i32 %727, 16 ; <i32> [#uses=1]
- %729 = and i32 %728, 63 ; <i32> [#uses=1]
- %730 = and i32 %727, -64 ; <i32> [#uses=1]
- %731 = or i32 %729, %730 ; <i32> [#uses=1]
- %732 = inttoptr i32 %731 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %733 = getelementptr %struct.edge_rec* %732, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %734 = load %struct.edge_rec** %733, align 4 ; <%struct.edge_rec*> [#uses=1]
- %735 = getelementptr %struct.edge_rec* %724, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %736 = load %struct.edge_rec** %735, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %734, %struct.edge_rec** %735, align 4
- store %struct.edge_rec* %736, %struct.edge_rec** %733, align 4
- %737 = load %struct.edge_rec** %717, align 4 ; <%struct.edge_rec*> [#uses=1]
- %738 = load %struct.edge_rec** %725, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %737, %struct.edge_rec** %725, align 4
- store %struct.edge_rec* %738, %struct.edge_rec** %717, align 4
- %739 = load %struct.edge_rec** %681, align 4 ; <%struct.edge_rec*> [#uses=1]
- br label %bb9.i
-
-do_merge.exit: ; preds = %bb17.i
- %740 = getelementptr %struct.edge_rec* %ldo_addr.0.ph.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %741 = load %struct.VERTEX** %740, align 4 ; <%struct.VERTEX*> [#uses=1]
- %742 = icmp eq %struct.VERTEX* %741, %tree_addr.0.i ; <i1> [#uses=1]
- br i1 %742, label %bb5.loopexit, label %bb2
-
-bb2: ; preds = %bb2, %do_merge.exit
- %ldo.07 = phi %struct.edge_rec* [ %747, %bb2 ], [ %ldo_addr.0.ph.i, %do_merge.exit ] ; <%struct.edge_rec*> [#uses=1]
- %743 = ptrtoint %struct.edge_rec* %ldo.07 to i32 ; <i32> [#uses=1]
- %744 = xor i32 %743, 32 ; <i32> [#uses=1]
- %745 = inttoptr i32 %744 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %746 = getelementptr %struct.edge_rec* %745, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %747 = load %struct.edge_rec** %746, align 4 ; <%struct.edge_rec*> [#uses=3]
- %748 = getelementptr %struct.edge_rec* %747, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %749 = load %struct.VERTEX** %748, align 4 ; <%struct.VERTEX*> [#uses=1]
- %750 = icmp eq %struct.VERTEX* %749, %tree_addr.0.i ; <i1> [#uses=1]
- br i1 %750, label %bb5.loopexit, label %bb2
-
-bb4: ; preds = %bb5.loopexit, %bb4
- %rdo.05 = phi %struct.edge_rec* [ %755, %bb4 ], [ %rdo_addr.0.i, %bb5.loopexit ] ; <%struct.edge_rec*> [#uses=1]
- %751 = getelementptr %struct.edge_rec* %rdo.05, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %752 = load %struct.edge_rec** %751, align 4 ; <%struct.edge_rec*> [#uses=1]
- %753 = ptrtoint %struct.edge_rec* %752 to i32 ; <i32> [#uses=1]
- %754 = xor i32 %753, 32 ; <i32> [#uses=1]
- %755 = inttoptr i32 %754 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %756 = getelementptr %struct.edge_rec* %755, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %757 = load %struct.VERTEX** %756, align 4 ; <%struct.VERTEX*> [#uses=1]
- %758 = icmp eq %struct.VERTEX* %757, %extra ; <i1> [#uses=1]
- br i1 %758, label %bb6, label %bb4
-
-bb5.loopexit: ; preds = %bb2, %do_merge.exit
- %ldo.0.lcssa = phi %struct.edge_rec* [ %ldo_addr.0.ph.i, %do_merge.exit ], [ %747, %bb2 ] ; <%struct.edge_rec*> [#uses=1]
- %759 = getelementptr %struct.edge_rec* %rdo_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %760 = load %struct.VERTEX** %759, align 4 ; <%struct.VERTEX*> [#uses=1]
- %761 = icmp eq %struct.VERTEX* %760, %extra ; <i1> [#uses=1]
- br i1 %761, label %bb6, label %bb4
-
-bb6: ; preds = %bb5.loopexit, %bb4
- %rdo.0.lcssa = phi %struct.edge_rec* [ %rdo_addr.0.i, %bb5.loopexit ], [ %755, %bb4 ] ; <%struct.edge_rec*> [#uses=1]
- %tmp16 = ptrtoint %struct.edge_rec* %ldo.0.lcssa to i32 ; <i32> [#uses=1]
- %tmp4 = ptrtoint %struct.edge_rec* %rdo.0.lcssa to i32 ; <i32> [#uses=1]
- br label %bb15
-
-bb7: ; preds = %bb
- %762 = getelementptr %struct.VERTEX* %tree, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
- %763 = load %struct.VERTEX** %762, align 4 ; <%struct.VERTEX*> [#uses=4]
- %764 = icmp eq %struct.VERTEX* %763, null ; <i1> [#uses=1]
- %765 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=5]
- %766 = getelementptr %struct.edge_rec* %765, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
- store %struct.edge_rec* %765, %struct.edge_rec** %766, align 4
- %767 = getelementptr %struct.edge_rec* %765, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
- br i1 %764, label %bb10, label %bb11
-
-bb8: ; preds = %entry
- %768 = call arm_apcscc i32 @puts(i8* getelementptr ([21 x i8]* @_2E_str7, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
- call arm_apcscc void @exit(i32 -1) noreturn nounwind
- unreachable
-
-bb10: ; preds = %bb7
- store %struct.VERTEX* %tree, %struct.VERTEX** %767, align 4
- %769 = ptrtoint %struct.edge_rec* %765 to i32 ; <i32> [#uses=5]
- %770 = add i32 %769, 16 ; <i32> [#uses=1]
- %771 = inttoptr i32 %770 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %772 = add i32 %769, 48 ; <i32> [#uses=1]
- %773 = inttoptr i32 %772 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %774 = getelementptr %struct.edge_rec* %771, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %773, %struct.edge_rec** %774, align 4
- %775 = add i32 %769, 32 ; <i32> [#uses=1]
- %776 = inttoptr i32 %775 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %777 = getelementptr %struct.edge_rec* %776, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %776, %struct.edge_rec** %777, align 4
- %778 = getelementptr %struct.edge_rec* %776, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %extra, %struct.VERTEX** %778, align 4
- %779 = getelementptr %struct.edge_rec* %773, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %771, %struct.edge_rec** %779, align 4
- %780 = xor i32 %769, 32 ; <i32> [#uses=1]
- br label %bb15
-
-bb11: ; preds = %bb7
- store %struct.VERTEX* %763, %struct.VERTEX** %767, align 4
- %781 = ptrtoint %struct.edge_rec* %765 to i32 ; <i32> [#uses=6]
- %782 = add i32 %781, 16 ; <i32> [#uses=1]
- %783 = inttoptr i32 %782 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %784 = add i32 %781, 48 ; <i32> [#uses=1]
- %785 = inttoptr i32 %784 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %786 = getelementptr %struct.edge_rec* %783, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %785, %struct.edge_rec** %786, align 4
- %787 = add i32 %781, 32 ; <i32> [#uses=1]
- %788 = inttoptr i32 %787 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %789 = getelementptr %struct.edge_rec* %788, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %788, %struct.edge_rec** %789, align 4
- %790 = getelementptr %struct.edge_rec* %788, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %tree, %struct.VERTEX** %790, align 4
- %791 = getelementptr %struct.edge_rec* %785, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %783, %struct.edge_rec** %791, align 4
- %792 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
- %793 = getelementptr %struct.edge_rec* %792, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
- store %struct.edge_rec* %792, %struct.edge_rec** %793, align 4
- %794 = getelementptr %struct.edge_rec* %792, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %tree, %struct.VERTEX** %794, align 4
- %795 = ptrtoint %struct.edge_rec* %792 to i32 ; <i32> [#uses=5]
- %796 = add i32 %795, 16 ; <i32> [#uses=1]
- %797 = inttoptr i32 %796 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %798 = add i32 %795, 48 ; <i32> [#uses=2]
- %799 = inttoptr i32 %798 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %800 = getelementptr %struct.edge_rec* %797, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %799, %struct.edge_rec** %800, align 4
- %801 = add i32 %795, 32 ; <i32> [#uses=1]
- %802 = inttoptr i32 %801 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %803 = getelementptr %struct.edge_rec* %802, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %802, %struct.edge_rec** %803, align 4
- %804 = getelementptr %struct.edge_rec* %802, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %extra, %struct.VERTEX** %804, align 4
- %805 = getelementptr %struct.edge_rec* %799, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %797, %struct.edge_rec** %805, align 4
- %806 = xor i32 %781, 32 ; <i32> [#uses=1]
- %807 = inttoptr i32 %806 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %808 = getelementptr %struct.edge_rec* %807, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %809 = load %struct.edge_rec** %808, align 4 ; <%struct.edge_rec*> [#uses=1]
- %810 = ptrtoint %struct.edge_rec* %809 to i32 ; <i32> [#uses=2]
- %811 = add i32 %810, 16 ; <i32> [#uses=1]
- %812 = and i32 %811, 63 ; <i32> [#uses=1]
- %813 = and i32 %810, -64 ; <i32> [#uses=1]
- %814 = or i32 %812, %813 ; <i32> [#uses=1]
- %815 = inttoptr i32 %814 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %816 = load %struct.edge_rec** %793, align 4 ; <%struct.edge_rec*> [#uses=1]
- %817 = ptrtoint %struct.edge_rec* %816 to i32 ; <i32> [#uses=2]
- %818 = add i32 %817, 16 ; <i32> [#uses=1]
- %819 = and i32 %818, 63 ; <i32> [#uses=1]
- %820 = and i32 %817, -64 ; <i32> [#uses=1]
- %821 = or i32 %819, %820 ; <i32> [#uses=1]
- %822 = inttoptr i32 %821 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %823 = getelementptr %struct.edge_rec* %822, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %824 = load %struct.edge_rec** %823, align 4 ; <%struct.edge_rec*> [#uses=1]
- %825 = getelementptr %struct.edge_rec* %815, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %826 = load %struct.edge_rec** %825, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %824, %struct.edge_rec** %825, align 4
- store %struct.edge_rec* %826, %struct.edge_rec** %823, align 4
- %827 = load %struct.edge_rec** %808, align 4 ; <%struct.edge_rec*> [#uses=1]
- %828 = load %struct.edge_rec** %793, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %827, %struct.edge_rec** %793, align 4
- store %struct.edge_rec* %828, %struct.edge_rec** %808, align 4
- %829 = xor i32 %795, 32 ; <i32> [#uses=3]
- %830 = inttoptr i32 %829 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %831 = getelementptr %struct.edge_rec* %830, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %832 = load %struct.VERTEX** %831, align 4 ; <%struct.VERTEX*> [#uses=1]
- %833 = and i32 %798, 63 ; <i32> [#uses=1]
- %834 = and i32 %795, -64 ; <i32> [#uses=1]
- %835 = or i32 %833, %834 ; <i32> [#uses=1]
- %836 = inttoptr i32 %835 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %837 = getelementptr %struct.edge_rec* %836, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %838 = load %struct.edge_rec** %837, align 4 ; <%struct.edge_rec*> [#uses=1]
- %839 = ptrtoint %struct.edge_rec* %838 to i32 ; <i32> [#uses=2]
- %840 = add i32 %839, 16 ; <i32> [#uses=1]
- %841 = and i32 %840, 63 ; <i32> [#uses=1]
- %842 = and i32 %839, -64 ; <i32> [#uses=1]
- %843 = or i32 %841, %842 ; <i32> [#uses=1]
- %844 = inttoptr i32 %843 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %845 = load %struct.VERTEX** %767, align 4 ; <%struct.VERTEX*> [#uses=1]
- %846 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
- %847 = getelementptr %struct.edge_rec* %846, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=7]
- store %struct.edge_rec* %846, %struct.edge_rec** %847, align 4
- %848 = getelementptr %struct.edge_rec* %846, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %832, %struct.VERTEX** %848, align 4
- %849 = ptrtoint %struct.edge_rec* %846 to i32 ; <i32> [#uses=6]
- %850 = add i32 %849, 16 ; <i32> [#uses=2]
- %851 = inttoptr i32 %850 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %852 = add i32 %849, 48 ; <i32> [#uses=1]
- %853 = inttoptr i32 %852 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %854 = getelementptr %struct.edge_rec* %851, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %853, %struct.edge_rec** %854, align 4
- %855 = add i32 %849, 32 ; <i32> [#uses=1]
- %856 = inttoptr i32 %855 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %857 = getelementptr %struct.edge_rec* %856, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %856, %struct.edge_rec** %857, align 4
- %858 = getelementptr %struct.edge_rec* %856, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- store %struct.VERTEX* %845, %struct.VERTEX** %858, align 4
- %859 = getelementptr %struct.edge_rec* %853, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %851, %struct.edge_rec** %859, align 4
- %860 = load %struct.edge_rec** %847, align 4 ; <%struct.edge_rec*> [#uses=1]
- %861 = ptrtoint %struct.edge_rec* %860 to i32 ; <i32> [#uses=2]
- %862 = add i32 %861, 16 ; <i32> [#uses=1]
- %863 = and i32 %862, 63 ; <i32> [#uses=1]
- %864 = and i32 %861, -64 ; <i32> [#uses=1]
- %865 = or i32 %863, %864 ; <i32> [#uses=1]
- %866 = inttoptr i32 %865 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %867 = getelementptr %struct.edge_rec* %844, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %868 = load %struct.edge_rec** %867, align 4 ; <%struct.edge_rec*> [#uses=1]
- %869 = ptrtoint %struct.edge_rec* %868 to i32 ; <i32> [#uses=2]
- %870 = add i32 %869, 16 ; <i32> [#uses=1]
- %871 = and i32 %870, 63 ; <i32> [#uses=1]
- %872 = and i32 %869, -64 ; <i32> [#uses=1]
- %873 = or i32 %871, %872 ; <i32> [#uses=1]
- %874 = inttoptr i32 %873 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %875 = getelementptr %struct.edge_rec* %874, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %876 = load %struct.edge_rec** %875, align 4 ; <%struct.edge_rec*> [#uses=1]
- %877 = getelementptr %struct.edge_rec* %866, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %878 = load %struct.edge_rec** %877, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %876, %struct.edge_rec** %877, align 4
- store %struct.edge_rec* %878, %struct.edge_rec** %875, align 4
- %879 = load %struct.edge_rec** %847, align 4 ; <%struct.edge_rec*> [#uses=1]
- %880 = load %struct.edge_rec** %867, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %879, %struct.edge_rec** %867, align 4
- store %struct.edge_rec* %880, %struct.edge_rec** %847, align 4
- %881 = xor i32 %849, 32 ; <i32> [#uses=3]
- %882 = inttoptr i32 %881 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %883 = getelementptr %struct.edge_rec* %882, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=6]
- %884 = load %struct.edge_rec** %883, align 4 ; <%struct.edge_rec*> [#uses=1]
- %885 = ptrtoint %struct.edge_rec* %884 to i32 ; <i32> [#uses=2]
- %886 = add i32 %885, 16 ; <i32> [#uses=1]
- %887 = and i32 %886, 63 ; <i32> [#uses=1]
- %888 = and i32 %885, -64 ; <i32> [#uses=1]
- %889 = or i32 %887, %888 ; <i32> [#uses=1]
- %890 = inttoptr i32 %889 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %891 = load %struct.edge_rec** %766, align 4 ; <%struct.edge_rec*> [#uses=1]
- %892 = ptrtoint %struct.edge_rec* %891 to i32 ; <i32> [#uses=2]
- %893 = add i32 %892, 16 ; <i32> [#uses=1]
- %894 = and i32 %893, 63 ; <i32> [#uses=1]
- %895 = and i32 %892, -64 ; <i32> [#uses=1]
- %896 = or i32 %894, %895 ; <i32> [#uses=1]
- %897 = inttoptr i32 %896 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %898 = getelementptr %struct.edge_rec* %897, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %899 = load %struct.edge_rec** %898, align 4 ; <%struct.edge_rec*> [#uses=1]
- %900 = getelementptr %struct.edge_rec* %890, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %901 = load %struct.edge_rec** %900, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %899, %struct.edge_rec** %900, align 4
- store %struct.edge_rec* %901, %struct.edge_rec** %898, align 4
- %902 = load %struct.edge_rec** %883, align 4 ; <%struct.edge_rec*> [#uses=1]
- %903 = load %struct.edge_rec** %766, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %902, %struct.edge_rec** %766, align 4
- store %struct.edge_rec* %903, %struct.edge_rec** %883, align 4
- %904 = getelementptr %struct.VERTEX* %763, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %905 = load double* %904, align 4 ; <double> [#uses=2]
- %906 = getelementptr %struct.VERTEX* %763, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %907 = load double* %906, align 4 ; <double> [#uses=2]
- %908 = getelementptr %struct.VERTEX* %extra, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %909 = load double* %908, align 4 ; <double> [#uses=3]
- %910 = getelementptr %struct.VERTEX* %extra, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %911 = load double* %910, align 4 ; <double> [#uses=3]
- %912 = getelementptr %struct.VERTEX* %tree, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %913 = load double* %912, align 4 ; <double> [#uses=3]
- %914 = getelementptr %struct.VERTEX* %tree, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %915 = load double* %914, align 4 ; <double> [#uses=3]
- %916 = fsub double %905, %913 ; <double> [#uses=1]
- %917 = fsub double %911, %915 ; <double> [#uses=1]
- %918 = fmul double %916, %917 ; <double> [#uses=1]
- %919 = fsub double %909, %913 ; <double> [#uses=1]
- %920 = fsub double %907, %915 ; <double> [#uses=1]
- %921 = fmul double %919, %920 ; <double> [#uses=1]
- %922 = fsub double %918, %921 ; <double> [#uses=1]
- %923 = fcmp ogt double %922, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %923, label %bb15, label %bb13
-
-bb13: ; preds = %bb11
- %924 = fsub double %905, %909 ; <double> [#uses=1]
- %925 = fsub double %915, %911 ; <double> [#uses=1]
- %926 = fmul double %924, %925 ; <double> [#uses=1]
- %927 = fsub double %913, %909 ; <double> [#uses=1]
- %928 = fsub double %907, %911 ; <double> [#uses=1]
- %929 = fmul double %927, %928 ; <double> [#uses=1]
- %930 = fsub double %926, %929 ; <double> [#uses=1]
- %931 = fcmp ogt double %930, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %931, label %bb15, label %bb14
-
-bb14: ; preds = %bb13
- %932 = and i32 %850, 63 ; <i32> [#uses=1]
- %933 = and i32 %849, -64 ; <i32> [#uses=3]
- %934 = or i32 %932, %933 ; <i32> [#uses=1]
- %935 = inttoptr i32 %934 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %936 = getelementptr %struct.edge_rec* %935, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %937 = load %struct.edge_rec** %936, align 4 ; <%struct.edge_rec*> [#uses=1]
- %938 = ptrtoint %struct.edge_rec* %937 to i32 ; <i32> [#uses=2]
- %939 = add i32 %938, 16 ; <i32> [#uses=1]
- %940 = and i32 %939, 63 ; <i32> [#uses=1]
- %941 = and i32 %938, -64 ; <i32> [#uses=1]
- %942 = or i32 %940, %941 ; <i32> [#uses=1]
- %943 = inttoptr i32 %942 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %944 = load %struct.edge_rec** %847, align 4 ; <%struct.edge_rec*> [#uses=1]
- %945 = ptrtoint %struct.edge_rec* %944 to i32 ; <i32> [#uses=2]
- %946 = add i32 %945, 16 ; <i32> [#uses=1]
- %947 = and i32 %946, 63 ; <i32> [#uses=1]
- %948 = and i32 %945, -64 ; <i32> [#uses=1]
- %949 = or i32 %947, %948 ; <i32> [#uses=1]
- %950 = inttoptr i32 %949 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %951 = getelementptr %struct.edge_rec* %943, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %952 = load %struct.edge_rec** %951, align 4 ; <%struct.edge_rec*> [#uses=1]
- %953 = ptrtoint %struct.edge_rec* %952 to i32 ; <i32> [#uses=2]
- %954 = add i32 %953, 16 ; <i32> [#uses=1]
- %955 = and i32 %954, 63 ; <i32> [#uses=1]
- %956 = and i32 %953, -64 ; <i32> [#uses=1]
- %957 = or i32 %955, %956 ; <i32> [#uses=1]
- %958 = inttoptr i32 %957 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %959 = getelementptr %struct.edge_rec* %958, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %960 = load %struct.edge_rec** %959, align 4 ; <%struct.edge_rec*> [#uses=1]
- %961 = getelementptr %struct.edge_rec* %950, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %962 = load %struct.edge_rec** %961, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %960, %struct.edge_rec** %961, align 4
- store %struct.edge_rec* %962, %struct.edge_rec** %959, align 4
- %963 = load %struct.edge_rec** %847, align 4 ; <%struct.edge_rec*> [#uses=1]
- %964 = load %struct.edge_rec** %951, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %963, %struct.edge_rec** %951, align 4
- store %struct.edge_rec* %964, %struct.edge_rec** %847, align 4
- %965 = add i32 %881, 16 ; <i32> [#uses=1]
- %966 = and i32 %965, 63 ; <i32> [#uses=1]
- %967 = or i32 %966, %933 ; <i32> [#uses=1]
- %968 = inttoptr i32 %967 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %969 = getelementptr %struct.edge_rec* %968, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- %970 = load %struct.edge_rec** %969, align 4 ; <%struct.edge_rec*> [#uses=1]
- %971 = ptrtoint %struct.edge_rec* %970 to i32 ; <i32> [#uses=2]
- %972 = add i32 %971, 16 ; <i32> [#uses=1]
- %973 = and i32 %972, 63 ; <i32> [#uses=1]
- %974 = and i32 %971, -64 ; <i32> [#uses=1]
- %975 = or i32 %973, %974 ; <i32> [#uses=1]
- %976 = inttoptr i32 %975 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %977 = load %struct.edge_rec** %883, align 4 ; <%struct.edge_rec*> [#uses=1]
- %978 = ptrtoint %struct.edge_rec* %977 to i32 ; <i32> [#uses=2]
- %979 = add i32 %978, 16 ; <i32> [#uses=1]
- %980 = and i32 %979, 63 ; <i32> [#uses=1]
- %981 = and i32 %978, -64 ; <i32> [#uses=1]
- %982 = or i32 %980, %981 ; <i32> [#uses=1]
- %983 = inttoptr i32 %982 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %984 = getelementptr %struct.edge_rec* %976, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
- %985 = load %struct.edge_rec** %984, align 4 ; <%struct.edge_rec*> [#uses=1]
- %986 = ptrtoint %struct.edge_rec* %985 to i32 ; <i32> [#uses=2]
- %987 = add i32 %986, 16 ; <i32> [#uses=1]
- %988 = and i32 %987, 63 ; <i32> [#uses=1]
- %989 = and i32 %986, -64 ; <i32> [#uses=1]
- %990 = or i32 %988, %989 ; <i32> [#uses=1]
- %991 = inttoptr i32 %990 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %992 = getelementptr %struct.edge_rec* %991, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %993 = load %struct.edge_rec** %992, align 4 ; <%struct.edge_rec*> [#uses=1]
- %994 = getelementptr %struct.edge_rec* %983, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
- %995 = load %struct.edge_rec** %994, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %993, %struct.edge_rec** %994, align 4
- store %struct.edge_rec* %995, %struct.edge_rec** %992, align 4
- %996 = load %struct.edge_rec** %883, align 4 ; <%struct.edge_rec*> [#uses=1]
- %997 = load %struct.edge_rec** %984, align 4 ; <%struct.edge_rec*> [#uses=1]
- store %struct.edge_rec* %996, %struct.edge_rec** %984, align 4
- store %struct.edge_rec* %997, %struct.edge_rec** %883, align 4
- %998 = inttoptr i32 %933 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %999 = load %struct.edge_rec** @avail_edge, align 4 ; <%struct.edge_rec*> [#uses=1]
- %1000 = getelementptr %struct.edge_rec* %998, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
- store %struct.edge_rec* %999, %struct.edge_rec** %1000, align 4
- store %struct.edge_rec* %998, %struct.edge_rec** @avail_edge, align 4
- br label %bb15
-
-bb15: ; preds = %bb14, %bb13, %bb11, %bb10, %bb6
- %retval.1.0 = phi i32 [ %780, %bb10 ], [ %829, %bb13 ], [ %829, %bb14 ], [ %tmp4, %bb6 ], [ %849, %bb11 ] ; <i32> [#uses=1]
- %retval.0.0 = phi i32 [ %769, %bb10 ], [ %781, %bb13 ], [ %781, %bb14 ], [ %tmp16, %bb6 ], [ %881, %bb11 ] ; <i32> [#uses=1]
- %agg.result162 = bitcast %struct.EDGE_PAIR* %agg.result to i64* ; <i64*> [#uses=1]
- %1001 = zext i32 %retval.0.0 to i64 ; <i64> [#uses=1]
- %1002 = zext i32 %retval.1.0 to i64 ; <i64> [#uses=1]
- %1003 = shl i64 %1002, 32 ; <i64> [#uses=1]
- %1004 = or i64 %1003, %1001 ; <i64> [#uses=1]
- store i64 %1004, i64* %agg.result162, align 4
- ret void
-}
-
-declare arm_apcscc i32 @puts(i8* nocapture) nounwind
-
-declare arm_apcscc void @exit(i32) noreturn nounwind
-
-declare arm_apcscc %struct.edge_rec* @alloc_edge() nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll
deleted file mode 100644
index b4b989b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll
+++ /dev/null
@@ -1,94 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin10
-
- %struct.cli_ac_alt = type { i8, i8*, i16, i16, %struct.cli_ac_alt* }
- %struct.cli_ac_node = type { i8, i8, %struct.cli_ac_patt*, %struct.cli_ac_node**, %struct.cli_ac_node* }
- %struct.cli_ac_patt = type { i16*, i16*, i16, i16, i8, i32, i32, i8*, i8*, i32, i16, i16, i16, i16, %struct.cli_ac_alt**, i8, i16, %struct.cli_ac_patt*, %struct.cli_ac_patt* }
- %struct.cli_bm_patt = type { i8*, i8*, i16, i16, i8*, i8*, i8, %struct.cli_bm_patt*, i16 }
- %struct.cli_matcher = type { i16, i8, i8*, %struct.cli_bm_patt**, i32*, i32, i8, i8, %struct.cli_ac_node*, %struct.cli_ac_node**, %struct.cli_ac_patt**, i32, i32, i32 }
-
-declare arm_apcscc i32 @strlen(i8* nocapture) nounwind readonly
-
-define arm_apcscc i32 @cli_ac_addsig(%struct.cli_matcher* nocapture %root, i8* %virname, i8* %hexsig, i32 %sigid, i16 zeroext %parts, i16 zeroext %partno, i16 zeroext %type, i32 %mindist, i32 %maxdist, i8* %offset, i8 zeroext %target) nounwind {
-entry:
- br i1 undef, label %bb126, label %bb1
-
-bb1: ; preds = %entry
- br i1 undef, label %cli_calloc.exit.thread, label %cli_calloc.exit
-
-cli_calloc.exit.thread: ; preds = %bb1
- ret i32 -114
-
-cli_calloc.exit: ; preds = %bb1
- store i16 %parts, i16* undef, align 4
- br i1 undef, label %bb52, label %bb4
-
-bb4: ; preds = %cli_calloc.exit
- br i1 undef, label %bb.i, label %bb1.i3
-
-bb.i: ; preds = %bb4
- unreachable
-
-bb1.i3: ; preds = %bb4
- br i1 undef, label %bb2.i4, label %cli_strdup.exit
-
-bb2.i4: ; preds = %bb1.i3
- ret i32 -114
-
-cli_strdup.exit: ; preds = %bb1.i3
- br i1 undef, label %cli_calloc.exit54.thread, label %cli_calloc.exit54
-
-cli_calloc.exit54.thread: ; preds = %cli_strdup.exit
- ret i32 -114
-
-cli_calloc.exit54: ; preds = %cli_strdup.exit
- br label %bb45
-
-cli_calloc.exit70.thread: ; preds = %bb45
- unreachable
-
-cli_calloc.exit70: ; preds = %bb45
- br i1 undef, label %bb.i83, label %bb1.i84
-
-bb.i83: ; preds = %cli_calloc.exit70
- unreachable
-
-bb1.i84: ; preds = %cli_calloc.exit70
- br i1 undef, label %bb2.i85, label %bb17
-
-bb2.i85: ; preds = %bb1.i84
- unreachable
-
-bb17: ; preds = %bb1.i84
- br i1 undef, label %bb22, label %bb.nph
-
-bb.nph: ; preds = %bb17
- br label %bb18
-
-bb18: ; preds = %bb18, %bb.nph
- br i1 undef, label %bb18, label %bb22
-
-bb22: ; preds = %bb18, %bb17
- br i1 undef, label %bb25, label %bb43.preheader
-
-bb43.preheader: ; preds = %bb22
- br i1 undef, label %bb28, label %bb45
-
-bb25: ; preds = %bb22
- unreachable
-
-bb28: ; preds = %bb43.preheader
- unreachable
-
-bb45: ; preds = %bb43.preheader, %cli_calloc.exit54
- br i1 undef, label %cli_calloc.exit70.thread, label %cli_calloc.exit70
-
-bb52: ; preds = %cli_calloc.exit
- %0 = load i16* undef, align 4 ; <i16> [#uses=1]
- %1 = icmp eq i16 %0, 0 ; <i1> [#uses=1]
- %iftmp.20.0 = select i1 %1, i8* %hexsig, i8* null ; <i8*> [#uses=1]
- %2 = tail call arm_apcscc i32 @strlen(i8* %iftmp.20.0) nounwind readonly ; <i32> [#uses=0]
- unreachable
-
-bb126: ; preds = %entry
- ret i32 -117
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll
deleted file mode 100644
index 24f4990..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll
+++ /dev/null
@@ -1,95 +0,0 @@
-; RUN: llc < %s -march=arm
-
- %struct.cli_ac_alt = type { i8, i8*, i16, i16, %struct.cli_ac_alt* }
- %struct.cli_ac_node = type { i8, i8, %struct.cli_ac_patt*, %struct.cli_ac_node**, %struct.cli_ac_node* }
- %struct.cli_ac_patt = type { i16*, i16*, i16, i16, i8, i32, i32, i8*, i8*, i32, i16, i16, i16, i16, %struct.cli_ac_alt**, i8, i16, %struct.cli_ac_patt*, %struct.cli_ac_patt* }
- %struct.cli_bm_patt = type { i8*, i8*, i16, i16, i8*, i8*, i8, %struct.cli_bm_patt*, i16 }
- %struct.cli_matcher = type { i16, i8, i8*, %struct.cli_bm_patt**, i32*, i32, i8, i8, %struct.cli_ac_node*, %struct.cli_ac_node**, %struct.cli_ac_patt**, i32, i32, i32 }
-
-define arm_apcscc i32 @cli_ac_addsig(%struct.cli_matcher* nocapture %root, i8* %virname, i8* %hexsig, i32 %sigid, i16 zeroext %parts, i16 zeroext %partno, i16 zeroext %type, i32 %mindist, i32 %maxdist, i8* %offset, i8 zeroext %target) nounwind {
-entry:
- br i1 undef, label %bb126, label %bb1
-
-bb1: ; preds = %entry
- br i1 undef, label %cli_calloc.exit.thread, label %cli_calloc.exit
-
-cli_calloc.exit.thread: ; preds = %bb1
- ret i32 -114
-
-cli_calloc.exit: ; preds = %bb1
- br i1 undef, label %bb52, label %bb4
-
-bb4: ; preds = %cli_calloc.exit
- br i1 undef, label %bb.i, label %bb1.i3
-
-bb.i: ; preds = %bb4
- unreachable
-
-bb1.i3: ; preds = %bb4
- br i1 undef, label %bb2.i4, label %cli_strdup.exit
-
-bb2.i4: ; preds = %bb1.i3
- ret i32 -114
-
-cli_strdup.exit: ; preds = %bb1.i3
- br i1 undef, label %cli_calloc.exit54.thread, label %cli_calloc.exit54
-
-cli_calloc.exit54.thread: ; preds = %cli_strdup.exit
- ret i32 -114
-
-cli_calloc.exit54: ; preds = %cli_strdup.exit
- br label %bb45
-
-cli_calloc.exit70.thread: ; preds = %bb45
- unreachable
-
-cli_calloc.exit70: ; preds = %bb45
- br i1 undef, label %bb.i83, label %bb1.i84
-
-bb.i83: ; preds = %cli_calloc.exit70
- unreachable
-
-bb1.i84: ; preds = %cli_calloc.exit70
- br i1 undef, label %bb2.i85, label %bb17
-
-bb2.i85: ; preds = %bb1.i84
- unreachable
-
-bb17: ; preds = %bb1.i84
- br i1 undef, label %bb22, label %bb.nph
-
-bb.nph: ; preds = %bb17
- br label %bb18
-
-bb18: ; preds = %bb18, %bb.nph
- br i1 undef, label %bb18, label %bb22
-
-bb22: ; preds = %bb18, %bb17
- %0 = getelementptr i8* null, i32 10 ; <i8*> [#uses=1]
- %1 = bitcast i8* %0 to i16* ; <i16*> [#uses=1]
- %2 = load i16* %1, align 2 ; <i16> [#uses=1]
- %3 = add i16 %2, 1 ; <i16> [#uses=1]
- %4 = zext i16 %3 to i32 ; <i32> [#uses=1]
- %5 = mul i32 %4, 3 ; <i32> [#uses=1]
- %6 = add i32 %5, -1 ; <i32> [#uses=1]
- %7 = icmp eq i32 %6, undef ; <i1> [#uses=1]
- br i1 %7, label %bb25, label %bb43.preheader
-
-bb43.preheader: ; preds = %bb22
- br i1 undef, label %bb28, label %bb45
-
-bb25: ; preds = %bb22
- unreachable
-
-bb28: ; preds = %bb43.preheader
- unreachable
-
-bb45: ; preds = %bb43.preheader, %cli_calloc.exit54
- br i1 undef, label %cli_calloc.exit70.thread, label %cli_calloc.exit70
-
-bb52: ; preds = %cli_calloc.exit
- unreachable
-
-bb126: ; preds = %entry
- ret i32 -117
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll
deleted file mode 100644
index e1d19d1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll
+++ /dev/null
@@ -1,108 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-apple-darwin10 -mattr=+vfp3
-
- at a = external global double ; <double*> [#uses=1]
-
-declare double @llvm.exp.f64(double) nounwind readonly
-
-define arm_apcscc void @findratio(double* nocapture %res1, double* nocapture %res2) nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- br i1 undef, label %bb28, label %bb
-
-bb28: ; preds = %bb
- %0 = load double* @a, align 4 ; <double> [#uses=2]
- %1 = fadd double %0, undef ; <double> [#uses=2]
- br i1 undef, label %bb59, label %bb60
-
-bb59: ; preds = %bb28
- %2 = fsub double -0.000000e+00, undef ; <double> [#uses=2]
- br label %bb61
-
-bb60: ; preds = %bb28
- %3 = tail call double @llvm.exp.f64(double undef) nounwind ; <double> [#uses=1]
- %4 = fsub double -0.000000e+00, %3 ; <double> [#uses=2]
- %5 = fsub double -0.000000e+00, undef ; <double> [#uses=1]
- %6 = fsub double -0.000000e+00, undef ; <double> [#uses=1]
- br label %bb61
-
-bb61: ; preds = %bb60, %bb59
- %.pn201 = phi double [ undef, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn111 = phi double [ undef, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn452 = phi double [ undef, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn85 = phi double [ undef, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn238 = phi double [ 0.000000e+00, %bb59 ], [ 0.000000e+00, %bb60 ] ; <double> [#uses=1]
- %.pn39 = phi double [ undef, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn230 = phi double [ undef, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn228 = phi double [ 0.000000e+00, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn224 = phi double [ undef, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn222 = phi double [ 0.000000e+00, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn218 = phi double [ %2, %bb59 ], [ %4, %bb60 ] ; <double> [#uses=1]
- %.pn214 = phi double [ 0.000000e+00, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn212 = phi double [ %2, %bb59 ], [ %4, %bb60 ] ; <double> [#uses=1]
- %.pn213 = phi double [ undef, %bb59 ], [ undef, %bb60 ] ; <double> [#uses=1]
- %.pn210 = phi double [ undef, %bb59 ], [ %5, %bb60 ] ; <double> [#uses=1]
- %.pn202 = phi double [ undef, %bb59 ], [ %6, %bb60 ] ; <double> [#uses=0]
- %.pn390 = fdiv double %.pn452, undef ; <double> [#uses=0]
- %.pn145 = fdiv double %.pn238, %1 ; <double> [#uses=0]
- %.pn138 = fdiv double %.pn230, undef ; <double> [#uses=1]
- %.pn139 = fdiv double %.pn228, undef ; <double> [#uses=1]
- %.pn134 = fdiv double %.pn224, %0 ; <double> [#uses=1]
- %.pn135 = fdiv double %.pn222, %1 ; <double> [#uses=1]
- %.pn133 = fdiv double %.pn218, undef ; <double> [#uses=0]
- %.pn128 = fdiv double %.pn214, undef ; <double> [#uses=1]
- %.pn129 = fdiv double %.pn212, %.pn213 ; <double> [#uses=1]
- %.pn126 = fdiv double %.pn210, undef ; <double> [#uses=0]
- %.pn54.in = fmul double undef, %.pn201 ; <double> [#uses=1]
- %.pn42.in = fmul double undef, undef ; <double> [#uses=1]
- %.pn76 = fsub double %.pn138, %.pn139 ; <double> [#uses=1]
- %.pn74 = fsub double %.pn134, %.pn135 ; <double> [#uses=1]
- %.pn70 = fsub double %.pn128, %.pn129 ; <double> [#uses=1]
- %.pn54 = fdiv double %.pn54.in, 6.000000e+00 ; <double> [#uses=1]
- %.pn64 = fmul double undef, 0x3FE5555555555555 ; <double> [#uses=1]
- %.pn65 = fmul double undef, undef ; <double> [#uses=1]
- %.pn50 = fmul double undef, %.pn111 ; <double> [#uses=0]
- %.pn42 = fdiv double %.pn42.in, 6.000000e+00 ; <double> [#uses=1]
- %.pn40 = fmul double undef, %.pn85 ; <double> [#uses=0]
- %.pn56 = fadd double %.pn76, undef ; <double> [#uses=1]
- %.pn57 = fmul double %.pn74, undef ; <double> [#uses=1]
- %.pn36 = fadd double undef, undef ; <double> [#uses=1]
- %.pn37 = fmul double %.pn70, undef ; <double> [#uses=1]
- %.pn33 = fmul double undef, 0x3FC5555555555555 ; <double> [#uses=1]
- %.pn29 = fsub double %.pn64, %.pn65 ; <double> [#uses=1]
- %.pn21 = fadd double undef, undef ; <double> [#uses=1]
- %.pn27 = fmul double undef, 0x3FC5555555555555 ; <double> [#uses=1]
- %.pn11 = fadd double %.pn56, %.pn57 ; <double> [#uses=1]
- %.pn32 = fmul double %.pn54, undef ; <double> [#uses=1]
- %.pn26 = fmul double %.pn42, undef ; <double> [#uses=1]
- %.pn15 = fmul double 0.000000e+00, %.pn39 ; <double> [#uses=1]
- %.pn7 = fadd double %.pn36, %.pn37 ; <double> [#uses=1]
- %.pn30 = fsub double %.pn32, %.pn33 ; <double> [#uses=1]
- %.pn28 = fadd double %.pn30, 0.000000e+00 ; <double> [#uses=1]
- %.pn24 = fsub double %.pn28, %.pn29 ; <double> [#uses=1]
- %.pn22 = fsub double %.pn26, %.pn27 ; <double> [#uses=1]
- %.pn20 = fadd double %.pn24, undef ; <double> [#uses=1]
- %.pn18 = fadd double %.pn22, 0.000000e+00 ; <double> [#uses=1]
- %.pn16 = fsub double %.pn20, %.pn21 ; <double> [#uses=1]
- %.pn14 = fsub double %.pn18, undef ; <double> [#uses=1]
- %.pn12 = fadd double %.pn16, undef ; <double> [#uses=1]
- %.pn10 = fadd double %.pn14, %.pn15 ; <double> [#uses=1]
- %.pn8 = fsub double %.pn12, undef ; <double> [#uses=1]
- %.pn6 = fsub double %.pn10, %.pn11 ; <double> [#uses=1]
- %.pn4 = fadd double %.pn8, undef ; <double> [#uses=1]
- %.pn2 = fadd double %.pn6, %.pn7 ; <double> [#uses=1]
- %N1.0 = fsub double %.pn4, undef ; <double> [#uses=1]
- %D1.0 = fsub double %.pn2, undef ; <double> [#uses=2]
- br i1 undef, label %bb62, label %bb64
-
-bb62: ; preds = %bb61
- %7 = fadd double %D1.0, undef ; <double> [#uses=1]
- br label %bb64
-
-bb64: ; preds = %bb62, %bb61
- %.pn = phi double [ undef, %bb62 ], [ %N1.0, %bb61 ] ; <double> [#uses=1]
- %.pn1 = phi double [ %7, %bb62 ], [ %D1.0, %bb61 ] ; <double> [#uses=1]
- %x.1 = fdiv double %.pn, %.pn1 ; <double> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll
deleted file mode 100644
index 2d4e58d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon
-; PR4657
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-apple-darwin9"
-
-define arm_apcscc <4 x i32> @scale(<4 x i32> %v, i32 %f) nounwind {
-entry:
- %v_addr = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
- %f_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
- %0 = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store <4 x i32> %v, <4 x i32>* %v_addr
- store i32 %f, i32* %f_addr
- %1 = load <4 x i32>* %v_addr, align 16 ; <<4 x i32>> [#uses=1]
- %2 = load i32* %f_addr, align 4 ; <i32> [#uses=1]
- %3 = insertelement <4 x i32> undef, i32 %2, i32 0 ; <<4 x i32>> [#uses=1]
- %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>> [#uses=1]
- %5 = mul <4 x i32> %1, %4 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %5, <4 x i32>* %0, align 16
- %6 = load <4 x i32>* %0, align 16 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %6, <4 x i32>* %retval, align 16
- br label %return
-
-return: ; preds = %entry
- %retval1 = load <4 x i32>* %retval ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %retval1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-04-RegScavengerAssert-2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-04-RegScavengerAssert-2.ll
deleted file mode 100644
index 65ffed2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-04-RegScavengerAssert-2.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-elf
-; PR4528
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv6-elf"
-
-define arm_aapcscc i32 @file_read_actor(i32* nocapture %desc, i32* %page, i32 %offset, i32 %size) nounwind optsize {
-entry:
- br i1 undef, label %fault_in_pages_writeable.exit, label %bb5.i
-
-bb5.i: ; preds = %entry
- %asmtmp.i = tail call i32 asm sideeffect "1:\09strbt\09$1,[$2]\0A2:\0A\09.section .fixup,\22ax\22\0A\09.align\092\0A3:\09mov\09$0, $3\0A\09b\092b\0A\09.previous\0A\09.section __ex_table,\22a\22\0A\09.align\093\0A\09.long\091b, 3b\0A\09.previous", "=r,r,r,i,0,~{cc}"(i8 0, i32 undef, i32 -14, i32 0) nounwind ; <i32> [#uses=1]
- %0 = icmp eq i32 %asmtmp.i, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb6.i, label %fault_in_pages_writeable.exit
-
-bb6.i: ; preds = %bb5.i
- br i1 undef, label %fault_in_pages_writeable.exit, label %bb7.i
-
-bb7.i: ; preds = %bb6.i
- unreachable
-
-fault_in_pages_writeable.exit: ; preds = %bb6.i, %bb5.i, %entry
- br i1 undef, label %bb2, label %bb3
-
-bb2: ; preds = %fault_in_pages_writeable.exit
- unreachable
-
-bb3: ; preds = %fault_in_pages_writeable.exit
- %1 = tail call arm_aapcscc i32 @__copy_to_user(i8* undef, i8* undef, i32 undef) nounwind ; <i32> [#uses=0]
- unreachable
-}
-
-declare arm_aapcscc i32 @__copy_to_user(i8*, i8*, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-04-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-04-RegScavengerAssert.ll
deleted file mode 100644
index 9e5372a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-04-RegScavengerAssert.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-elf
-; PR4528
-
-define arm_aapcscc i32 @file_read_actor(i32 %desc, i32 %page, i32 %offset, i32 %size) nounwind optsize {
-entry:
- br i1 undef, label %fault_in_pages_writeable.exit, label %bb5.i
-
-bb5.i: ; preds = %entry
- %asmtmp.i = tail call i32 asm sideeffect "1:\09strbt\09$1,[$2]\0A2:\0A\09.section .fixup,\22ax\22\0A\09.align\092\0A3:\09mov\09$0, $3\0A\09b\092b\0A\09.previous\0A\09.section __ex_table,\22a\22\0A\09.align\093\0A\09.long\091b, 3b\0A\09.previous", "=r,r,r,i,0,~{cc}"(i8 0, i32 undef, i32 -14, i32 0) nounwind ; <i32> [#uses=1]
- br label %fault_in_pages_writeable.exit
-
-fault_in_pages_writeable.exit: ; preds = %bb5.i, %entry
- %0 = phi i32 [ 0, %entry ], [ %asmtmp.i, %bb5.i ] ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %bb2, label %bb3
-
-bb2: ; preds = %fault_in_pages_writeable.exit
- unreachable
-
-bb3: ; preds = %fault_in_pages_writeable.exit
- %2 = tail call arm_aapcscc i32 @__copy_to_user(i8* undef, i8* undef, i32 undef) nounwind ; <i32> [#uses=0]
- unreachable
-}
-
-declare arm_aapcscc i32 @__copy_to_user(i8*, i8*, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll
deleted file mode 100644
index 18d68f7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=arm
-; PR4528
-
-; Inline asm is allowed to contain operands "=&r", "0".
-
-%struct.device_dma_parameters = type { i32, i32 }
-%struct.iovec = type { i8*, i32 }
-
-define arm_aapcscc i32 @generic_segment_checks(%struct.iovec* nocapture %iov, i32* nocapture %nr_segs, i32* nocapture %count, i32 %access_flags) nounwind optsize {
-entry:
- br label %bb8
-
-bb: ; preds = %bb8
- br i1 undef, label %bb10, label %bb2
-
-bb2: ; preds = %bb
- %asmtmp = tail call %struct.device_dma_parameters asm "adds $1, $2, $3; sbcccs $1, $1, $0; movcc $0, #0", "=&r,=&r,r,Ir,0,~{cc}"(i8* undef, i32 undef, i32 0) nounwind; <%struct.device_dma_parameters> [#uses=1]
- %asmresult = extractvalue %struct.device_dma_parameters %asmtmp, 0; <i32> [#uses=1]
- %0 = icmp eq i32 %asmresult, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb7, label %bb4
-
-bb4: ; preds = %bb2
- br i1 undef, label %bb10, label %bb9
-
-bb7: ; preds = %bb2
- %1 = add i32 %2, 1 ; <i32> [#uses=1]
- br label %bb8
-
-bb8: ; preds = %bb7, %entry
- %2 = phi i32 [ 0, %entry ], [ %1, %bb7 ] ; <i32> [#uses=3]
- %scevgep22 = getelementptr %struct.iovec* %iov, i32 %2, i32 0; <i8**> [#uses=0]
- %3 = load i32* %nr_segs, align 4 ; <i32> [#uses=1]
- %4 = icmp ult i32 %2, %3 ; <i1> [#uses=1]
- br i1 %4, label %bb, label %bb9
-
-bb9: ; preds = %bb8, %bb4
- store i32 undef, i32* %count, align 4
- ret i32 0
-
-bb10: ; preds = %bb4, %bb
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-15-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-15-RegScavengerAssert.ll
deleted file mode 100644
index a46482c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-15-RegScavengerAssert.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=arm
-; PR4716
-
-define arm_aapcscc void @_start() nounwind naked {
-entry:
- tail call arm_aapcscc void @exit(i32 undef) noreturn nounwind
- unreachable
-}
-
-declare arm_aapcscc void @exit(i32) noreturn nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill.ll
deleted file mode 100644
index 84915c4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 -post-RA-scheduler -mcpu=cortex-a8
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-apple-darwin9"
-
-%struct.tree = type { i32, double, double, %struct.tree*, %struct.tree*, %struct.tree*, %struct.tree* }
- at g = common global %struct.tree* null
-
-define arm_apcscc %struct.tree* @tsp(%struct.tree* %t, i32 %nproc) nounwind {
-entry:
- %t.idx51.val.i = load double* null ; <double> [#uses=1]
- br i1 undef, label %bb4.i, label %bb.i
-
-bb.i: ; preds = %entry
- unreachable
-
-bb4.i: ; preds = %entry
- %0 = load %struct.tree** @g, align 4 ; <%struct.tree*> [#uses=2]
- %.idx45.i = getelementptr %struct.tree* %0, i32 0, i32 1 ; <double*> [#uses=1]
- %.idx45.val.i = load double* %.idx45.i ; <double> [#uses=1]
- %.idx46.i = getelementptr %struct.tree* %0, i32 0, i32 2 ; <double*> [#uses=1]
- %.idx46.val.i = load double* %.idx46.i ; <double> [#uses=1]
- %1 = fsub double 0.000000e+00, %.idx45.val.i ; <double> [#uses=2]
- %2 = fmul double %1, %1 ; <double> [#uses=1]
- %3 = fsub double %t.idx51.val.i, %.idx46.val.i ; <double> [#uses=2]
- %4 = fmul double %3, %3 ; <double> [#uses=1]
- %5 = fadd double %2, %4 ; <double> [#uses=1]
- %6 = tail call double @llvm.sqrt.f64(double %5) nounwind ; <double> [#uses=1]
- br i1 undef, label %bb7.i4, label %bb6.i
-
-bb6.i: ; preds = %bb4.i
- br label %bb7.i4
-
-bb7.i4: ; preds = %bb6.i, %bb4.i
- %tton1.0.i = phi double [ %6, %bb6.i ], [ undef, %bb4.i ] ; <double> [#uses=0]
- unreachable
-}
-
-declare double @llvm.sqrt.f64(double) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
deleted file mode 100644
index a21ffc3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-apple-darwin9"
-
-%struct.anon = type { [3 x double], double, %struct.node*, [64 x %struct.bnode*], [64 x %struct.bnode*] }
-%struct.bnode = type { i16, double, [3 x double], i32, i32, [3 x double], [3 x double], [3 x double], double, %struct.bnode*, %struct.bnode* }
-%struct.icstruct = type { [3 x i32], i16 }
-%struct.node = type { i16, double, [3 x double], i32, i32 }
-
-declare arm_apcscc double @floor(double) nounwind readnone
-
-define void @intcoord(%struct.icstruct* noalias nocapture sret %agg.result, i1 %a, double %b) {
-entry:
- br i1 %a, label %bb3, label %bb1
-
-bb1: ; preds = %entry
- unreachable
-
-bb3: ; preds = %entry
- br i1 %a, label %bb7, label %bb5
-
-bb5: ; preds = %bb3
- unreachable
-
-bb7: ; preds = %bb3
- br i1 %a, label %bb11, label %bb9
-
-bb9: ; preds = %bb7
- %0 = tail call arm_apcscc double @floor(double %b) nounwind readnone ; <double> [#uses=0]
- br label %bb11
-
-bb11: ; preds = %bb9, %bb7
- %1 = getelementptr %struct.icstruct* %agg.result, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 0, i32* %1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
deleted file mode 100644
index e3d8ea6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-apple-darwin9"
-
-%struct.Hosp = type { i32, i32, i32, %struct.List, %struct.List, %struct.List, %struct.List }
-%struct.List = type { %struct.List*, %struct.Patient*, %struct.List* }
-%struct.Patient = type { i32, i32, i32, %struct.Village* }
-%struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
-
-define arm_apcscc %struct.Village* @alloc_tree(i32 %level, i32 %label, %struct.Village* %back, i1 %p) nounwind {
-entry:
- br i1 %p, label %bb8, label %bb1
-
-bb1: ; preds = %entry
- %0 = malloc %struct.Village ; <%struct.Village*> [#uses=3]
- %exp2 = call double @ldexp(double 1.000000e+00, i32 %level) nounwind ; <double> [#uses=1]
- %.c = fptosi double %exp2 to i32 ; <i32> [#uses=1]
- store i32 %.c, i32* null
- %1 = getelementptr %struct.Village* %0, i32 0, i32 3, i32 6, i32 0 ; <%struct.List**> [#uses=1]
- store %struct.List* null, %struct.List** %1
- %2 = getelementptr %struct.Village* %0, i32 0, i32 3, i32 6, i32 2 ; <%struct.List**> [#uses=1]
- store %struct.List* null, %struct.List** %2
- ret %struct.Village* %0
-
-bb8: ; preds = %entry
- ret %struct.Village* null
-}
-
-declare double @ldexp(double, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll
deleted file mode 100644
index 9123377..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-apple-darwin9"
-
- at .str = external constant [36 x i8], align 1 ; <[36 x i8]*> [#uses=0]
- at .str1 = external constant [31 x i8], align 1 ; <[31 x i8]*> [#uses=1]
- at .str2 = external constant [4 x i8], align 1 ; <[4 x i8]*> [#uses=1]
-
-declare arm_apcscc i32 @getUnknown(i32, ...) nounwind
-
-declare void @llvm.va_start(i8*) nounwind
-
-declare void @llvm.va_end(i8*) nounwind
-
-declare arm_apcscc i32 @printf(i8* nocapture, ...) nounwind
-
-define arm_apcscc i32 @main() nounwind {
-entry:
- %0 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 1, i32 1, i32 1, i32 1, i32 1, i32 1) nounwind ; <i32> [#uses=0]
- %1 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 -128, i32 116, i32 116, i32 -3852, i32 -31232, i32 -1708916736) nounwind ; <i32> [#uses=0]
- %2 = tail call arm_apcscc i32 (i32, ...)* @getUnknown(i32 undef, i32 116, i32 116, i32 -3852, i32 -31232, i32 30556, i32 -1708916736) nounwind ; <i32> [#uses=1]
- %3 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @.str2, i32 0, i32 0), i32 %2) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-23-linkerprivate.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-23-linkerprivate.ll
deleted file mode 100644
index 0fad533..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-23-linkerprivate.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | FileCheck %s
-
-; ModuleID = '/Volumes/MacOS9/tests/WebKit/JavaScriptCore/profiler/ProfilerServer.mm'
-
-@"\01l_objc_msgSend_fixup_alloc" = linker_private hidden global i32 0, section "__DATA, __objc_msgrefs, coalesced", align 16 ; <i32*> [#uses=0]
-
-; CHECK: .globl l_objc_msgSend_fixup_alloc
-; CHECK: .weak_definition l_objc_msgSend_fixup_alloc
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-26-ScalarToVector.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-26-ScalarToVector.ll
deleted file mode 100644
index c6ef256..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-26-ScalarToVector.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -mattr=+neon | not grep fldmfdd
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-elf"
-
-%bar = type { float, float, float }
-%baz = type { i32, [16 x %bar], [16 x float], [16 x i32], i8 }
-%foo = type { <4 x float> }
-%quux = type { i32 (...)**, %baz*, i32 }
-%quuz = type { %quux, i32, %bar, [128 x i8], [16 x %foo], %foo, %foo, %foo }
-
-declare <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-define arm_apcscc void @_ZN6squish10ClusterFit9Compress3EPv(%quuz* %this, i8* %block) {
-entry:
- %0 = lshr <4 x i32> zeroinitializer, <i32 31, i32 31, i32 31, i32 31> ; <<4 x i32>> [#uses=1]
- %1 = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> <i32 2, i32 3> ; <<2 x i32>> [#uses=1]
- %2 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> undef, <2 x i32> %1) nounwind ; <<2 x i32>> [#uses=1]
- %3 = extractelement <2 x i32> %2, i32 0 ; <i32> [#uses=1]
- %not..i = icmp eq i32 %3, undef ; <i1> [#uses=1]
- br i1 %not..i, label %return, label %bb221
-
-bb221: ; preds = %bb221, %entry
- br label %bb221
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-27-ScalarToVector.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-27-ScalarToVector.ll
deleted file mode 100644
index bc5bfe9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-27-ScalarToVector.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -mattr=+neon | not grep fldmfdd
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-elf"
-
-%bar = type { float, float, float }
-%baz = type { i32, [16 x %bar], [16 x float], [16 x i32], i8 }
-%foo = type { <4 x float> }
-%quux = type { i32 (...)**, %baz*, i32 }
-%quuz = type { %quux, i32, %bar, [128 x i8], [16 x %foo], %foo, %foo, %foo }
-
-define arm_apcscc void @aaaa(%quuz* %this, i8* %block) {
-entry:
- br i1 undef, label %bb.nph269, label %bb201
-
-bb.nph269: ; preds = %entry
- br label %bb12
-
-bb12: ; preds = %bb194, %bb.nph269
- %0 = fmul <4 x float> undef, undef ; <<4 x float>> [#uses=1]
- %1 = shufflevector <4 x float> %0, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
- %2 = shufflevector <2 x float> %1, <2 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %3 = fadd <4 x float> undef, %2 ; <<4 x float>> [#uses=1]
- br i1 undef, label %bb194, label %bb186
-
-bb186: ; preds = %bb12
- br label %bb194
-
-bb194: ; preds = %bb186, %bb12
- %besterror.0.0 = phi <4 x float> [ %3, %bb186 ], [ undef, %bb12 ] ; <<4 x float>> [#uses=0]
- %indvar.next294 = add i32 undef, 1 ; <i32> [#uses=0]
- br label %bb12
-
-bb201: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-29-ExtractEltf32.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-29-ExtractEltf32.ll
deleted file mode 100644
index d5178b4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-29-ExtractEltf32.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -mattr=+neon
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-elf"
-
-define arm_apcscc void @foo() nounwind {
-entry:
- %0 = tail call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> undef, <2 x float> undef) nounwind ; <<2 x float>> [#uses=1]
- %tmp28 = extractelement <2 x float> %0, i32 0 ; <float> [#uses=1]
- %1 = fcmp une float %tmp28, 4.900000e+01 ; <i1> [#uses=1]
- br i1 %1, label %bb, label %bb7
-
-bb: ; preds = %entry
- unreachable
-
-bb7: ; preds = %entry
- br i1 undef, label %bb8, label %bb9
-
-bb8: ; preds = %bb7
- unreachable
-
-bb9: ; preds = %bb7
- ret void
-}
-
-declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-29-TooLongSplat.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-29-TooLongSplat.ll
deleted file mode 100644
index 266fce6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-29-TooLongSplat.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -mattr=+neon
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-elf"
-
-define arm_apcscc void @aaa() nounwind {
-entry:
- %0 = fmul <4 x float> undef, <float 1.000000e+00, float 1.000000e+01, float 1.000000e+02, float 0x3EB0C6F7A0000000> ; <<4 x float>> [#uses=1]
- %tmp31 = extractelement <4 x float> %0, i32 0 ; <float> [#uses=1]
- %1 = fpext float %tmp31 to double ; <double> [#uses=1]
- %2 = fsub double 1.000000e+00, %1 ; <double> [#uses=1]
- %3 = fdiv double %2, 1.000000e+00 ; <double> [#uses=1]
- %4 = tail call double @fabs(double %3) nounwind readnone ; <double> [#uses=1]
- %5 = fcmp ogt double %4, 1.000000e-05 ; <i1> [#uses=1]
- br i1 %5, label %bb, label %bb7
-
-bb: ; preds = %entry
- unreachable
-
-bb7: ; preds = %entry
- unreachable
-}
-
-declare double @fabs(double)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll
deleted file mode 100644
index b6cf880..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll
+++ /dev/null
@@ -1,103 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin9 -march=arm | FileCheck %s
-
-%struct.A = type { i32* }
-
-define arm_apcscc void @"\01-[MyFunction Name:]"() {
-entry:
- %save_filt.1 = alloca i32 ; <i32*> [#uses=2]
- %save_eptr.0 = alloca i8* ; <i8**> [#uses=2]
- %a = alloca %struct.A ; <%struct.A*> [#uses=3]
- %eh_exception = alloca i8* ; <i8**> [#uses=5]
- %eh_selector = alloca i32 ; <i32*> [#uses=3]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call arm_apcscc void @_ZN1AC1Ev(%struct.A* %a)
- invoke arm_apcscc void @_Z3barv()
- to label %invcont unwind label %lpad
-
-invcont: ; preds = %entry
- call arm_apcscc void @_ZN1AD1Ev(%struct.A* %a) nounwind
- br label %return
-
-bb: ; preds = %ppad
- %eh_select = load i32* %eh_selector ; <i32> [#uses=1]
- store i32 %eh_select, i32* %save_filt.1, align 4
- %eh_value = load i8** %eh_exception ; <i8*> [#uses=1]
- store i8* %eh_value, i8** %save_eptr.0, align 4
- call arm_apcscc void @_ZN1AD1Ev(%struct.A* %a) nounwind
- %0 = load i8** %save_eptr.0, align 4 ; <i8*> [#uses=1]
- store i8* %0, i8** %eh_exception, align 4
- %1 = load i32* %save_filt.1, align 4 ; <i32> [#uses=1]
- store i32 %1, i32* %eh_selector, align 4
- br label %Unwind
-
-return: ; preds = %invcont
- ret void
-
-lpad: ; preds = %entry
- %eh_ptr = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
- store i8* %eh_ptr, i8** %eh_exception
- %eh_ptr1 = load i8** %eh_exception ; <i8*> [#uses=1]
- %eh_select2 = call i32 (i8*, i8*, ...)* @llvm.eh.selector.i32(i8* %eh_ptr1, i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*), i32 0) ; <i32> [#uses=1]
- store i32 %eh_select2, i32* %eh_selector
- br label %ppad
-
-ppad: ; preds = %lpad
- br label %bb
-
-Unwind: ; preds = %bb
- %eh_ptr3 = load i8** %eh_exception ; <i8*> [#uses=1]
- call arm_apcscc void @_Unwind_SjLj_Resume(i8* %eh_ptr3)
- unreachable
-}
-
-define linkonce_odr arm_apcscc void @_ZN1AC1Ev(%struct.A* %this) {
-entry:
- %this_addr = alloca %struct.A* ; <%struct.A**> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store %struct.A* %this, %struct.A** %this_addr
- %0 = call arm_apcscc i8* @_Znwm(i32 4) ; <i8*> [#uses=1]
- %1 = bitcast i8* %0 to i32* ; <i32*> [#uses=1]
- %2 = load %struct.A** %this_addr, align 4 ; <%struct.A*> [#uses=1]
- %3 = getelementptr inbounds %struct.A* %2, i32 0, i32 0 ; <i32**> [#uses=1]
- store i32* %1, i32** %3, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare arm_apcscc i8* @_Znwm(i32)
-
-define linkonce_odr arm_apcscc void @_ZN1AD1Ev(%struct.A* %this) nounwind {
-entry:
- %this_addr = alloca %struct.A* ; <%struct.A**> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store %struct.A* %this, %struct.A** %this_addr
- %0 = load %struct.A** %this_addr, align 4 ; <%struct.A*> [#uses=1]
- %1 = getelementptr inbounds %struct.A* %0, i32 0, i32 0 ; <i32**> [#uses=1]
- %2 = load i32** %1, align 4 ; <i32*> [#uses=1]
- %3 = bitcast i32* %2 to i8* ; <i8*> [#uses=1]
- call arm_apcscc void @_ZdlPv(i8* %3) nounwind
- br label %bb
-
-bb: ; preds = %entry
- br label %return
-
-return: ; preds = %bb
- ret void
-}
-;CHECK: L_LSDA_1:
-
-declare arm_apcscc void @_ZdlPv(i8*) nounwind
-
-declare arm_apcscc void @_Z3barv()
-
-declare i8* @llvm.eh.exception() nounwind
-
-declare i32 @llvm.eh.selector.i32(i8*, i8*, ...) nounwind
-
-declare i32 @llvm.eh.typeid.for.i32(i8*) nounwind
-
-declare arm_apcscc i32 @__gxx_personality_sj0(...)
-
-declare arm_apcscc void @_Unwind_SjLj_Resume(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
deleted file mode 100644
index e1e60e6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-; pr4843
-define <4 x i16> @v2regbug(<4 x i16>* %B) nounwind {
-;CHECK: v2regbug:
-;CHECK: vzip.16
- %tmp1 = load <4 x i16>* %B
- %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32><i32 0, i32 0, i32 1, i32 1>
- ret <4 x i16> %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll
deleted file mode 100644
index bf91fe0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll
+++ /dev/null
@@ -1,106 +0,0 @@
-; RUN: llc -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 < %s | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-apple-darwin9"
-
- at history = internal global [2 x [56 x i32]] [[56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0], [56 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 2, i32 5, i32 8, i32 10, i32 8, i32 5, i32 2, i32 -1, i32 1, i32 3, i32 5, i32 7, i32 5, i32 3, i32 1, i32 -1, i32 0, i32 1, i32 2, i32 4, i32 2, i32 1, i32 0]] ; <[2 x [56 x i32]]*> [#uses=3]
- at nodes = internal global i64 0 ; <i64*> [#uses=4]
- at .str = private constant [9 x i8] c"##-<=>+#\00", align 1 ; <[9 x i8]*> [#uses=2]
- at .str1 = private constant [6 x i8] c"%c%d\0A\00", align 1 ; <[6 x i8]*> [#uses=1]
- at .str2 = private constant [16 x i8] c"Fhourstones 2.0\00", align 1 ; <[16 x i8]*> [#uses=1]
- at .str3 = private constant [54 x i8] c"Using %d transposition table entries with %d probes.\0A\00", align 1 ; <[54 x i8]*> [#uses=1]
- at .str4 = private constant [31 x i8] c"Solving %d-ply position after \00", align 1 ; <[31 x i8]*> [#uses=1]
- at .str5 = private constant [7 x i8] c" . . .\00", align 1 ; <[7 x i8]*> [#uses=1]
- at .str6 = private constant [28 x i8] c"score = %d (%c) work = %d\0A\00", align 1 ; <[28 x i8]*> [#uses=1]
- at .str7 = private constant [36 x i8] c"%lu pos / %lu msec = %.1f Kpos/sec\0A\00", align 1 ; <[36 x i8]*> [#uses=1]
- at plycnt = internal global i32 0 ; <i32*> [#uses=21]
- at dias = internal global [19 x i32] zeroinitializer ; <[19 x i32]*> [#uses=43]
- at columns = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=18]
- at height = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=21]
- at rows = internal global [8 x i32] zeroinitializer ; <[8 x i32]*> [#uses=20]
- at colthr = internal global [128 x i32] zeroinitializer ; <[128 x i32]*> [#uses=5]
- at moves = internal global [44 x i32] zeroinitializer ; <[44 x i32]*> [#uses=9]
- at .str8 = private constant [3 x i8] c"%d\00", align 1 ; <[3 x i8]*> [#uses=1]
- at he = internal global i8* null ; <i8**> [#uses=9]
- at hits = internal global i64 0 ; <i64*> [#uses=8]
- at posed = internal global i64 0 ; <i64*> [#uses=7]
- at ht = internal global i32* null ; <i32**> [#uses=5]
- at .str16 = private constant [19 x i8] c"store rate = %.3f\0A\00", align 1 ; <[19 x i8]*> [#uses=1]
- at .str117 = private constant [45 x i8] c"- %5.3f < %5.3f = %5.3f > %5.3f + %5.3f\0A\00", align 1 ; <[45 x i8]*> [#uses=1]
- at .str218 = private constant [6 x i8] c"%7d%c\00", align 1 ; <[6 x i8]*> [#uses=1]
- at .str319 = private constant [30 x i8] c"Failed to allocate %u bytes.\0A\00", align 1 ; <[30 x i8]*> [#uses=1]
-
-declare arm_apcscc i32 @puts(i8* nocapture) nounwind
-
-declare arm_apcscc i32 @getchar() nounwind
-
-define internal arm_apcscc i32 @transpose() nounwind readonly {
-; CHECK: push
-entry:
- %0 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 1), align 4 ; <i32> [#uses=1]
- %1 = shl i32 %0, 7 ; <i32> [#uses=1]
- %2 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 2), align 4 ; <i32> [#uses=1]
- %3 = or i32 %1, %2 ; <i32> [#uses=1]
- %4 = shl i32 %3, 7 ; <i32> [#uses=1]
- %5 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 3), align 4 ; <i32> [#uses=1]
- %6 = or i32 %4, %5 ; <i32> [#uses=3]
- %7 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 7), align 4 ; <i32> [#uses=1]
- %8 = shl i32 %7, 7 ; <i32> [#uses=1]
- %9 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 6), align 4 ; <i32> [#uses=1]
- %10 = or i32 %8, %9 ; <i32> [#uses=1]
- %11 = shl i32 %10, 7 ; <i32> [#uses=1]
- %12 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 5), align 4 ; <i32> [#uses=1]
- %13 = or i32 %11, %12 ; <i32> [#uses=3]
- %14 = icmp ugt i32 %6, %13 ; <i1> [#uses=2]
- %.pn2.in.i = select i1 %14, i32 %6, i32 %13 ; <i32> [#uses=1]
- %.pn1.in.i = select i1 %14, i32 %13, i32 %6 ; <i32> [#uses=1]
- %.pn2.i = shl i32 %.pn2.in.i, 7 ; <i32> [#uses=1]
- %.pn3.i = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 4) ; <i32> [#uses=1]
- %.pn.in.in.i = or i32 %.pn2.i, %.pn3.i ; <i32> [#uses=1]
- %.pn.in.i = zext i32 %.pn.in.in.i to i64 ; <i64> [#uses=1]
- %.pn.i = shl i64 %.pn.in.i, 21 ; <i64> [#uses=1]
- %.pn1.i = zext i32 %.pn1.in.i to i64 ; <i64> [#uses=1]
- %iftmp.22.0.i = or i64 %.pn.i, %.pn1.i ; <i64> [#uses=2]
- %15 = lshr i64 %iftmp.22.0.i, 17 ; <i64> [#uses=1]
- %16 = trunc i64 %15 to i32 ; <i32> [#uses=2]
- %17 = urem i64 %iftmp.22.0.i, 1050011 ; <i64> [#uses=1]
- %18 = trunc i64 %17 to i32 ; <i32> [#uses=1]
- %19 = urem i32 %16, 179 ; <i32> [#uses=1]
- %20 = or i32 %19, 131072 ; <i32> [#uses=1]
- %21 = load i32** @ht, align 4 ; <i32*> [#uses=1]
- br label %bb5
-
-bb: ; preds = %bb5
- %22 = getelementptr inbounds i32* %21, i32 %x.0 ; <i32*> [#uses=1]
- %23 = load i32* %22, align 4 ; <i32> [#uses=1]
- %24 = icmp eq i32 %23, %16 ; <i1> [#uses=1]
- br i1 %24, label %bb1, label %bb2
-
-bb1: ; preds = %bb
- %25 = load i8** @he, align 4 ; <i8*> [#uses=1]
- %26 = getelementptr inbounds i8* %25, i32 %x.0 ; <i8*> [#uses=1]
- %27 = load i8* %26, align 1 ; <i8> [#uses=1]
- %28 = sext i8 %27 to i32 ; <i32> [#uses=1]
- ret i32 %28
-
-bb2: ; preds = %bb
- %29 = add nsw i32 %20, %x.0 ; <i32> [#uses=3]
- %30 = add i32 %29, -1050011 ; <i32> [#uses=1]
- %31 = icmp sgt i32 %29, 1050010 ; <i1> [#uses=1]
- %. = select i1 %31, i32 %30, i32 %29 ; <i32> [#uses=1]
- %32 = add i32 %33, 1 ; <i32> [#uses=1]
- br label %bb5
-
-bb5: ; preds = %bb2, %entry
- %33 = phi i32 [ 0, %entry ], [ %32, %bb2 ] ; <i32> [#uses=2]
- %x.0 = phi i32 [ %18, %entry ], [ %., %bb2 ] ; <i32> [#uses=3]
- %34 = icmp sgt i32 %33, 7 ; <i1> [#uses=1]
- br i1 %34, label %bb7, label %bb
-
-bb7: ; preds = %bb5
- ret i32 -128
-}
-
-declare arm_apcscc noalias i8* @calloc(i32, i32) nounwind
-
-declare void @llvm.memset.i64(i8* nocapture, i8, i64, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-09-AllOnes.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-09-AllOnes.ll
deleted file mode 100644
index f654a16..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-09-AllOnes.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc -mattr=+neon < %s
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-elf"
-
-define arm_apcscc void @foo() {
-entry:
- %0 = insertelement <4 x i32> undef, i32 -1, i32 3
- store <4 x i32> %0, <4 x i32>* undef, align 16
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll
deleted file mode 100644
index 3909c6a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc -O1 -march=arm -mattr=+vfp2 < %s | FileCheck %s
-; pr4939
-
-define void @test(double* %x, double* %y) nounwind {
- %1 = load double* %x, align 4
- %2 = load double* %y, align 4
- %3 = fsub double -0.000000e+00, %1
- %4 = fcmp ugt double %2, %3
- br i1 %4, label %bb1, label %bb2
-
-bb1:
-;CHECK: vstrhi.64
- store double %1, double* %y, align 4
- br label %bb2
-
-bb2:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-10-postdec.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-10-postdec.ll
deleted file mode 100644
index 10653b5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-10-postdec.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc -march=arm < %s | FileCheck %s
-; Radar 7213850
-
-define i32 @test(i8* %d, i32 %x, i32 %y) nounwind {
- %1 = ptrtoint i8* %d to i32
-;CHECK: sub
- %2 = sub i32 %x, %1
- %3 = add nsw i32 %2, %y
- store i8 0, i8* %d, align 1
- ret i32 %3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-13-InvalidSubreg.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-13-InvalidSubreg.ll
deleted file mode 100644
index 13adb24..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-13-InvalidSubreg.ll
+++ /dev/null
@@ -1,61 +0,0 @@
-; RUN: llc -mattr=+neon < %s
-; PR4965
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-eabi"
-
-%struct.fr = type { [6 x %struct.pl] }
-%struct.obb = type { %"struct.m4", %"struct.p3" }
-%struct.pl = type { %"struct.p3" }
-%"struct.m4" = type { %"struct.p3", %"struct.p3", %"struct.p3", %"struct.p3" }
-%"struct.p3" = type { <4 x float> }
-
-declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
-
-define arm_aapcs_vfpcc i8 @foo(%struct.fr* nocapture %this, %struct.obb* %box) nounwind {
-entry:
- %val.i.i = load <4 x float>* undef ; <<4 x float>> [#uses=1]
- %val2.i.i = load <4 x float>* null ; <<4 x float>> [#uses=1]
- %elt3.i.i = getelementptr inbounds %struct.obb* %box, i32 0, i32 0, i32 2, i32 0 ; <<4 x float>*> [#uses=1]
- %val4.i.i = load <4 x float>* %elt3.i.i ; <<4 x float>> [#uses=1]
- %0 = shufflevector <2 x float> undef, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
- %1 = fadd <4 x float> undef, zeroinitializer ; <<4 x float>> [#uses=1]
- br label %bb33
-
-bb: ; preds = %bb33
- %2 = fmul <4 x float> %val.i.i, undef ; <<4 x float>> [#uses=1]
- %3 = fmul <4 x float> %val2.i.i, undef ; <<4 x float>> [#uses=1]
- %4 = fadd <4 x float> %3, %2 ; <<4 x float>> [#uses=1]
- %5 = fmul <4 x float> %val4.i.i, undef ; <<4 x float>> [#uses=1]
- %6 = fadd <4 x float> %5, %4 ; <<4 x float>> [#uses=1]
- %7 = bitcast <4 x float> %6 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %8 = and <4 x i32> %7, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648> ; <<4 x i32>> [#uses=1]
- %9 = or <4 x i32> %8, undef ; <<4 x i32>> [#uses=1]
- %10 = bitcast <4 x i32> %9 to <4 x float> ; <<4 x float>> [#uses=1]
- %11 = shufflevector <4 x float> %10, <4 x float> undef, <2 x i32> <i32 0, i32 1> ; <<2 x float>> [#uses=1]
- %12 = shufflevector <2 x float> %11, <2 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %13 = fmul <4 x float> undef, %12 ; <<4 x float>> [#uses=1]
- %14 = fmul <4 x float> %0, undef ; <<4 x float>> [#uses=1]
- %15 = fadd <4 x float> %14, %13 ; <<4 x float>> [#uses=1]
- %16 = fadd <4 x float> undef, %15 ; <<4 x float>> [#uses=1]
- %17 = fadd <4 x float> %1, %16 ; <<4 x float>> [#uses=1]
- %18 = fmul <4 x float> zeroinitializer, %17 ; <<4 x float>> [#uses=1]
- %19 = insertelement <4 x float> %18, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=2]
- %20 = shufflevector <4 x float> %19, <4 x float> undef, <2 x i32> <i32 0, i32 1> ; <<2 x float>> [#uses=1]
- %21 = shufflevector <4 x float> %19, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
- %22 = tail call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %20, <2 x float> %21) nounwind ; <<2 x float>> [#uses=2]
- %23 = tail call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %22, <2 x float> %22) nounwind ; <<2 x float>> [#uses=2]
- %24 = shufflevector <2 x float> %23, <2 x float> %23, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %25 = fadd <4 x float> %24, zeroinitializer ; <<4 x float>> [#uses=1]
- %tmp46 = extractelement <4 x float> %25, i32 0 ; <float> [#uses=1]
- %26 = fcmp olt float %tmp46, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %26, label %bb41, label %bb33
-
-bb33: ; preds = %bb, %entry
- br i1 undef, label %bb34, label %bb
-
-bb34: ; preds = %bb33
- ret i8 undef
-
-bb41: ; preds = %bb
- ret i8 1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll
deleted file mode 100644
index 758b59a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon -mcpu=cortex-a9
-
-define arm_aapcs_vfpcc <4 x float> @foo(i8* nocapture %pBuffer, i32 %numItems) nounwind {
- %1 = ptrtoint i8* %pBuffer to i32
-
- %lsr.iv2641 = inttoptr i32 %1 to float*
- %tmp29 = add i32 %1, 4
- %tmp2930 = inttoptr i32 %tmp29 to float*
- %tmp31 = add i32 %1, 8
- %tmp3132 = inttoptr i32 %tmp31 to float*
- %tmp33 = add i32 %1, 12
- %tmp3334 = inttoptr i32 %tmp33 to float*
- %tmp35 = add i32 %1, 16
- %tmp3536 = inttoptr i32 %tmp35 to float*
- %tmp37 = add i32 %1, 20
- %tmp3738 = inttoptr i32 %tmp37 to float*
- %tmp39 = add i32 %1, 24
- %tmp3940 = inttoptr i32 %tmp39 to float*
- %2 = load float* %lsr.iv2641, align 4
- %3 = load float* %tmp2930, align 4
- %4 = load float* %tmp3132, align 4
- %5 = load float* %tmp3334, align 4
- %6 = load float* %tmp3536, align 4
- %7 = load float* %tmp3738, align 4
- %8 = load float* %tmp3940, align 4
- %9 = insertelement <4 x float> undef, float %6, i32 0
- %10 = shufflevector <4 x float> %9, <4 x float> undef, <4 x i32> zeroinitializer
- %11 = insertelement <4 x float> %10, float %7, i32 1
- %12 = insertelement <4 x float> %11, float %8, i32 2
- %13 = insertelement <4 x float> undef, float %2, i32 0
- %14 = shufflevector <4 x float> %13, <4 x float> undef, <4 x i32> zeroinitializer
- %15 = insertelement <4 x float> %14, float %3, i32 1
- %16 = insertelement <4 x float> %15, float %4, i32 2
- %17 = insertelement <4 x float> %16, float %5, i32 3
- %18 = fsub <4 x float> zeroinitializer, %12
- %19 = shufflevector <4 x float> %18, <4 x float> undef, <4 x i32> zeroinitializer
- %20 = shufflevector <4 x float> %17, <4 x float> undef, <2 x i32> <i32 0, i32 1>
- %21 = shufflevector <2 x float> %20, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-
- ret <4 x float> %21
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-20-LiveIntervalsBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-20-LiveIntervalsBug.ll
deleted file mode 100644
index 980f8ce..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-20-LiveIntervalsBug.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -mtriple=arm-eabi -mattr=+neon -mcpu=cortex-a9
-
-; PR4986
-
-define arm_aapcs_vfpcc void @foo(i8* nocapture %pBuffer, i32 %numItems) nounwind {
-entry:
- br i1 undef, label %return, label %bb.preheader
-
-bb.preheader: ; preds = %entry
- br label %bb
-
-bb: ; preds = %bb, %bb.preheader
- %0 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %1 = insertelement <4 x float> %0, float undef, i32 1 ; <<4 x float>> [#uses=1]
- %2 = insertelement <4 x float> %1, float undef, i32 2 ; <<4 x float>> [#uses=1]
- %3 = insertelement <4 x float> %2, float undef, i32 3 ; <<4 x float>> [#uses=1]
- %4 = fmul <4 x float> undef, %3 ; <<4 x float>> [#uses=1]
- %5 = extractelement <4 x float> %4, i32 3 ; <float> [#uses=1]
- store float %5, float* undef, align 4
- br i1 undef, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
-
-define arm_aapcs_vfpcc <4 x float> @bar(i8* nocapture %pBuffer, i32 %numItems) nounwind {
- %1 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %2 = insertelement <4 x float> %1, float undef, i32 1 ; <<4 x float>> [#uses=1]
- %3 = insertelement <4 x float> %2, float undef, i32 2 ; <<4 x float>> [#uses=1]
- %4 = insertelement <4 x float> %3, float undef, i32 3 ; <<4 x float>> [#uses=1]
- %5 = shufflevector <4 x float> %4, <4 x float> undef, <2 x i32> <i32 0, i32 1> ; <<2 x float>> [#uses=1]
- %6 = shufflevector <2 x float> %5, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>> [#uses=1]
- ret <4 x float> %6
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-21-LiveVariablesBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-21-LiveVariablesBug.ll
deleted file mode 100644
index aace475..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-21-LiveVariablesBug.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+neon
-
-; PR5024
-
-%bar = type { <4 x float> }
-%foo = type { %bar, %bar, %bar, %bar }
-
-declare arm_aapcs_vfpcc <4 x float> @bbb(%bar*) nounwind
-
-define arm_aapcs_vfpcc void @aaa(%foo* noalias sret %agg.result, %foo* %tfrm) nounwind {
-entry:
- %0 = call arm_aapcs_vfpcc <4 x float> @bbb(%bar* undef) nounwind ; <<4 x float>> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-22-LiveVariablesBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-22-LiveVariablesBug.ll
deleted file mode 100644
index 30931a2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-22-LiveVariablesBug.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+neon
-
-; PR5024
-
-%bar = type { %foo, %foo }
-%foo = type { <4 x float> }
-
-declare arm_aapcs_vfpcc float @aaa(%foo* nocapture) nounwind readonly
-
-declare arm_aapcs_vfpcc %bar* @bbb(%bar*, <4 x float>, <4 x float>) nounwind
-
-define arm_aapcs_vfpcc void @ccc(i8* nocapture %pBuffer, i32 %numItems) nounwind {
-entry:
- br i1 undef, label %return, label %bb.nph
-
-bb.nph: ; preds = %entry
- %0 = call arm_aapcs_vfpcc %bar* @bbb(%bar* undef, <4 x float> undef, <4 x float> undef) nounwind ; <%bar*> [#uses=0]
- %1 = call arm_aapcs_vfpcc float @aaa(%foo* undef) nounwind ; <float> [#uses=0]
- unreachable
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-23-LiveVariablesBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-23-LiveVariablesBug.ll
deleted file mode 100644
index 2ff479b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-23-LiveVariablesBug.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mattr=+neon
-
-; PR5024
-
-%struct.1 = type { %struct.4, %struct.4 }
-%struct.4 = type { <4 x float> }
-
-define arm_aapcs_vfpcc %struct.1* @hhh3(%struct.1* %this, <4 x float> %lenation.0, <4 x float> %legalation.0) nounwind {
-entry:
- %0 = call arm_aapcs_vfpcc %struct.4* @sss1(%struct.4* undef, float 0.000000e+00) nounwind ; <%struct.4*> [#uses=0]
- %1 = call arm_aapcs_vfpcc %struct.4* @qqq1(%struct.4* null, float 5.000000e-01) nounwind ; <%struct.4*> [#uses=0]
- %val92 = load <4 x float>* null ; <<4 x float>> [#uses=1]
- %2 = call arm_aapcs_vfpcc %struct.4* @zzz2(%struct.4* undef, <4 x float> %val92) nounwind ; <%struct.4*> [#uses=0]
- ret %struct.1* %this
-}
-
-declare arm_aapcs_vfpcc %struct.4* @qqq1(%struct.4*, float) nounwind
-
-declare arm_aapcs_vfpcc %struct.4* @sss1(%struct.4*, float) nounwind
-
-declare arm_aapcs_vfpcc %struct.4* @zzz2(%struct.4*, <4 x float>) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-24-spill-align.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-24-spill-align.ll
deleted file mode 100644
index 5476d5f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-24-spill-align.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-; pr4926
-
-define arm_apcscc void @test_vget_lanep16() nounwind {
-entry:
- %arg0_poly16x4_t = alloca <4 x i16> ; <<4 x i16>*> [#uses=1]
- %out_poly16_t = alloca i16 ; <i16*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
-; CHECK: vldr.64
- %0 = load <4 x i16>* %arg0_poly16x4_t, align 8 ; <<4 x i16>> [#uses=1]
- %1 = extractelement <4 x i16> %0, i32 1 ; <i16> [#uses=1]
- store i16 %1, i16* %out_poly16_t, align 2
- br label %return
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-27-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-27-CoalescerBug.ll
deleted file mode 100644
index ea2693a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-27-CoalescerBug.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-eabi -mcpu=cortex-a8
-; PR5055
-
-module asm ".globl\09__aeabi_f2lz"
-module asm ".set\09__aeabi_f2lz, __fixsfdi"
-module asm ""
-
-define arm_aapcs_vfpcc i64 @__fixsfdi(float %a) nounwind {
-entry:
- %0 = fcmp olt float %a, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %0, label %bb, label %bb1
-
-bb: ; preds = %entry
- %1 = fsub float -0.000000e+00, %a ; <float> [#uses=1]
- %2 = tail call arm_aapcs_vfpcc i64 @__fixunssfdi(float %1) nounwind ; <i64> [#uses=1]
- %3 = sub i64 0, %2 ; <i64> [#uses=1]
- ret i64 %3
-
-bb1: ; preds = %entry
- %4 = tail call arm_aapcs_vfpcc i64 @__fixunssfdi(float %a) nounwind ; <i64> [#uses=1]
- ret i64 %4
-}
-
-declare arm_aapcs_vfpcc i64 @__fixunssfdi(float)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
deleted file mode 100644
index 53bd668..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -mtriple=armv5-unknown-linux-gnueabi -mcpu=arm10tdmi | FileCheck %s
-; PR4687
-
-%0 = type { double, double }
-
-define arm_aapcscc void @foo(%0* noalias nocapture sret %agg.result, double %x.0, double %y.0) nounwind {
-; CHECK: foo:
-; CHECK: bl __adddf3
-; CHECK-NOT: strd
-; CHECK: mov
- %x76 = fmul double %y.0, 0.000000e+00 ; <double> [#uses=1]
- %x77 = fadd double %y.0, 0.000000e+00 ; <double> [#uses=1]
- %tmpr = fadd double %x.0, %x76 ; <double> [#uses=1]
- %agg.result.0 = getelementptr %0* %agg.result, i32 0, i32 0 ; <double*> [#uses=1]
- store double %tmpr, double* %agg.result.0, align 8
- %agg.result.1 = getelementptr %0* %agg.result, i32 0, i32 1 ; <double*> [#uses=1]
- store double %x77, double* %agg.result.1, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll
deleted file mode 100644
index 465368b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 -enable-unsafe-fp-math < %s
-; PR5367
-
-define arm_aapcs_vfpcc void @_Z27Benchmark_SceDualQuaternionPvm(i8* nocapture %pBuffer, i32 %numItems) nounwind {
-entry:
- br i1 undef, label %return, label %bb
-
-bb: ; preds = %bb, %entry
- %0 = load float* undef, align 4 ; <float> [#uses=1]
- %1 = load float* null, align 4 ; <float> [#uses=1]
- %2 = insertelement <4 x float> undef, float undef, i32 1 ; <<4 x float>> [#uses=1]
- %3 = insertelement <4 x float> %2, float %1, i32 2 ; <<4 x float>> [#uses=2]
- %4 = insertelement <4 x float> undef, float %0, i32 2 ; <<4 x float>> [#uses=1]
- %5 = insertelement <4 x float> %4, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=4]
- %6 = fsub <4 x float> zeroinitializer, %3 ; <<4 x float>> [#uses=1]
- %7 = shufflevector <4 x float> %6, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=2]
- %8 = shufflevector <4 x float> %5, <4 x float> undef, <2 x i32> <i32 0, i32 1> ; <<2 x float>> [#uses=1]
- %9 = shufflevector <2 x float> %8, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>> [#uses=2]
- %10 = fmul <4 x float> %7, %9 ; <<4 x float>> [#uses=1]
- %11 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %12 = shufflevector <4 x float> %5, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=2]
- %13 = shufflevector <2 x float> %12, <2 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %14 = fmul <4 x float> %11, %13 ; <<4 x float>> [#uses=1]
- %15 = fadd <4 x float> %10, %14 ; <<4 x float>> [#uses=1]
- %16 = shufflevector <2 x float> %12, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>> [#uses=1]
- %17 = fadd <4 x float> %15, zeroinitializer ; <<4 x float>> [#uses=1]
- %18 = shufflevector <4 x float> %17, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 undef, i32 undef> ; <<4 x float>> [#uses=1]
- %19 = fmul <4 x float> %7, %16 ; <<4 x float>> [#uses=1]
- %20 = fadd <4 x float> %19, zeroinitializer ; <<4 x float>> [#uses=1]
- %21 = shufflevector <4 x float> %3, <4 x float> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 undef> ; <<4 x float>> [#uses=1]
- %22 = shufflevector <4 x float> %21, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %23 = fmul <4 x float> %22, %9 ; <<4 x float>> [#uses=1]
- %24 = fadd <4 x float> %20, %23 ; <<4 x float>> [#uses=1]
- %25 = shufflevector <4 x float> %18, <4 x float> %24, <4 x i32> <i32 0, i32 1, i32 6, i32 undef> ; <<4 x float>> [#uses=1]
- %26 = shufflevector <4 x float> %25, <4 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 7> ; <<4 x float>> [#uses=1]
- %27 = fmul <4 x float> %26, <float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01> ; <<4 x float>> [#uses=1]
- %28 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %5 ; <<4 x float>> [#uses=1]
- %29 = tail call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=1]
- %30 = fmul <4 x float> zeroinitializer, %29 ; <<4 x float>> [#uses=1]
- %31 = fmul <4 x float> %30, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00> ; <<4 x float>> [#uses=1]
- %32 = shufflevector <4 x float> %27, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %33 = shufflevector <4 x float> %28, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
- %34 = shufflevector <2 x float> %33, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>> [#uses=1]
- %35 = fmul <4 x float> %32, %34 ; <<4 x float>> [#uses=1]
- %36 = fadd <4 x float> %35, zeroinitializer ; <<4 x float>> [#uses=1]
- %37 = shufflevector <4 x float> %5, <4 x float> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef> ; <<4 x float>> [#uses=1]
- %38 = shufflevector <4 x float> %37, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %39 = fmul <4 x float> zeroinitializer, %38 ; <<4 x float>> [#uses=1]
- %40 = fadd <4 x float> %36, %39 ; <<4 x float>> [#uses=1]
- %41 = fadd <4 x float> %40, zeroinitializer ; <<4 x float>> [#uses=1]
- %42 = shufflevector <4 x float> undef, <4 x float> %41, <4 x i32> <i32 0, i32 1, i32 6, i32 3> ; <<4 x float>> [#uses=1]
- %43 = fmul <4 x float> %42, %31 ; <<4 x float>> [#uses=1]
- store float undef, float* undef, align 4
- store float 0.000000e+00, float* null, align 4
- %44 = extractelement <4 x float> %43, i32 1 ; <float> [#uses=1]
- store float %44, float* undef, align 4
- br i1 undef, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
-
-declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-21-InvalidFNeg.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-21-InvalidFNeg.ll
deleted file mode 100644
index 0f021d2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-21-InvalidFNeg.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc -mcpu=cortex-a8 -mattr=+neon < %s | grep vneg
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-eabi"
-
-%aaa = type { %fff, %fff }
-%bbb = type { [6 x %ddd] }
-%ccc = type { %eee, %fff }
-%ddd = type { %fff }
-%eee = type { %fff, %fff, %fff, %fff }
-%fff = type { %struct.vec_float4 }
-%struct.vec_float4 = type { <4 x float> }
-
-define linkonce_odr arm_aapcs_vfpcc void @foo(%eee* noalias sret %agg.result, i64 %tfrm.0.0, i64 %tfrm.0.1, i64 %tfrm.0.2, i64 %tfrm.0.3, i64 %tfrm.0.4, i64 %tfrm.0.5, i64 %tfrm.0.6, i64 %tfrm.0.7) nounwind noinline {
-entry:
- %tmp104 = zext i64 %tfrm.0.2 to i512 ; <i512> [#uses=1]
- %tmp105 = shl i512 %tmp104, 128 ; <i512> [#uses=1]
- %tmp118 = zext i64 %tfrm.0.3 to i512 ; <i512> [#uses=1]
- %tmp119 = shl i512 %tmp118, 192 ; <i512> [#uses=1]
- %ins121 = or i512 %tmp119, %tmp105 ; <i512> [#uses=1]
- %tmp99 = zext i64 %tfrm.0.4 to i512 ; <i512> [#uses=1]
- %tmp100 = shl i512 %tmp99, 256 ; <i512> [#uses=1]
- %tmp123 = zext i64 %tfrm.0.5 to i512 ; <i512> [#uses=1]
- %tmp124 = shl i512 %tmp123, 320 ; <i512> [#uses=1]
- %tmp96 = zext i64 %tfrm.0.6 to i512 ; <i512> [#uses=1]
- %tmp97 = shl i512 %tmp96, 384 ; <i512> [#uses=1]
- %tmp128 = zext i64 %tfrm.0.7 to i512 ; <i512> [#uses=1]
- %tmp129 = shl i512 %tmp128, 448 ; <i512> [#uses=1]
- %mask.masked = or i512 %tmp124, %tmp100 ; <i512> [#uses=1]
- %ins131 = or i512 %tmp129, %tmp97 ; <i512> [#uses=1]
- %tmp109132 = zext i64 %tfrm.0.0 to i128 ; <i128> [#uses=1]
- %tmp113134 = zext i64 %tfrm.0.1 to i128 ; <i128> [#uses=1]
- %tmp114133 = shl i128 %tmp113134, 64 ; <i128> [#uses=1]
- %tmp94 = or i128 %tmp114133, %tmp109132 ; <i128> [#uses=1]
- %tmp95 = bitcast i128 %tmp94 to <4 x float> ; <<4 x float>> [#uses=0]
- %tmp82 = lshr i512 %ins121, 128 ; <i512> [#uses=1]
- %tmp83 = trunc i512 %tmp82 to i128 ; <i128> [#uses=1]
- %tmp84 = bitcast i128 %tmp83 to <4 x float> ; <<4 x float>> [#uses=0]
- %tmp86 = lshr i512 %mask.masked, 256 ; <i512> [#uses=1]
- %tmp87 = trunc i512 %tmp86 to i128 ; <i128> [#uses=1]
- %tmp88 = bitcast i128 %tmp87 to <4 x float> ; <<4 x float>> [#uses=0]
- %tmp90 = lshr i512 %ins131, 384 ; <i512> [#uses=1]
- %tmp91 = trunc i512 %tmp90 to i128 ; <i128> [#uses=1]
- %tmp92 = bitcast i128 %tmp91 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %tmp92 ; <<4 x float>> [#uses=1]
- %tmp28 = getelementptr inbounds %eee* %agg.result, i32 0, i32 3, i32 0, i32 0 ; <<4 x float>*> [#uses=1]
- store <4 x float> %tmp, <4 x float>* %tmp28, align 16
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-27-double-align.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-27-double-align.ll
deleted file mode 100644
index a4e7685..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-27-double-align.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s
-
- at .str = private constant [1 x i8] zeroinitializer, align 1
-
-define arm_aapcscc void @g() {
-entry:
-;CHECK: [sp, #+8]
-;CHECK: [sp, #+12]
-;CHECK: [sp]
- tail call arm_aapcscc void (i8*, ...)* @f(i8* getelementptr ([1 x i8]* @.str, i32 0, i32 0), i32 1, double 2.000000e+00, i32 3, double 4.000000e+00)
- ret void
-}
-
-declare arm_aapcscc void @f(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-30.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-30.ll
deleted file mode 100644
index 90a5bd2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-10-30.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s
-; This test checks that the address of the varg arguments is correctly
-; computed when there are 5 or more regular arguments.
-
-define void @f(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, ...) {
-entry:
-;CHECK: sub sp, sp, #4
-;CHECK: add r{{[0-9]+}}, sp, #8
-;CHECK: str r{{[0-9]+}}, [sp], #+4
-;CHECK: bx lr
- %ap = alloca i8*, align 4
- %ap1 = bitcast i8** %ap to i8*
- call void @llvm.va_start(i8* %ap1)
- ret void
-}
-
-declare void @llvm.va_start(i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-01-NeonMoves.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-01-NeonMoves.ll
deleted file mode 100644
index 62f3786..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-01-NeonMoves.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc -mcpu=cortex-a8 < %s | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-eabi"
-
-%foo = type { <4 x float> }
-
-define arm_aapcs_vfpcc void @bar(%foo* noalias sret %agg.result, <4 x float> %quat.0) nounwind {
-entry:
- %quat_addr = alloca %foo, align 16 ; <%foo*> [#uses=2]
- %0 = getelementptr inbounds %foo* %quat_addr, i32 0, i32 0 ; <<4 x float>*> [#uses=1]
- store <4 x float> %quat.0, <4 x float>* %0
- %1 = call arm_aapcs_vfpcc <4 x float> @quux(%foo* %quat_addr) nounwind ; <<4 x float>> [#uses=3]
-;CHECK: vmov.f32
-;CHECK: vmov.f32
- %2 = fmul <4 x float> %1, %1 ; <<4 x float>> [#uses=2]
- %3 = shufflevector <4 x float> %2, <4 x float> undef, <2 x i32> <i32 0, i32 1> ; <<2 x float>> [#uses=1]
- %4 = shufflevector <4 x float> %2, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
- %5 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %3, <2 x float> %4) nounwind ; <<2 x float>> [#uses=2]
- %6 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %5, <2 x float> %5) nounwind ; <<2 x float>> [#uses=2]
- %7 = shufflevector <2 x float> %6, <2 x float> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=2]
-;CHECK: vmov
- %8 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %7) nounwind ; <<4 x float>> [#uses=3]
- %9 = fmul <4 x float> %8, %8 ; <<4 x float>> [#uses=1]
- %10 = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %9, <4 x float> %7) nounwind ; <<4 x float>> [#uses=1]
- %11 = fmul <4 x float> %10, %8 ; <<4 x float>> [#uses=1]
- %12 = fmul <4 x float> %11, %1 ; <<4 x float>> [#uses=1]
- %13 = call arm_aapcs_vfpcc %foo* @baz(%foo* %agg.result, <4 x float> %12) nounwind ; <%foo*> [#uses=0]
- ret void
-}
-
-declare arm_aapcs_vfpcc %foo* @baz(%foo*, <4 x float>) nounwind
-
-declare arm_aapcs_vfpcc <4 x float> @quux(%foo* nocapture) nounwind readonly
-
-declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
-
-declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
-
-declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-02-NegativeLane.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
deleted file mode 100644
index f2288c3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc -mcpu=cortex-a8 < %s | grep vdup.32
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-eabi"
-
-define arm_aapcs_vfpcc void @foo(i8* nocapture %pBuffer, i32 %numItems) nounwind {
-entry:
- br i1 undef, label %return, label %bb
-
-bb: ; preds = %bb, %entry
- %0 = load float* undef, align 4 ; <float> [#uses=1]
- %1 = insertelement <4 x float> undef, float %0, i32 2 ; <<4 x float>> [#uses=1]
- %2 = insertelement <4 x float> %1, float undef, i32 3 ; <<4 x float>> [#uses=1]
- %3 = fmul <4 x float> undef, %2 ; <<4 x float>> [#uses=1]
- %4 = extractelement <4 x float> %3, i32 1 ; <float> [#uses=1]
- store float %4, float* undef, align 4
- br i1 undef, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
deleted file mode 100644
index 7aae3ac..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
+++ /dev/null
@@ -1,66 +0,0 @@
-; RUN: llc -mcpu=cortex-a8 < %s | FileCheck %s
-; PR5423
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-eabi"
-
-define arm_aapcs_vfpcc void @foo() {
-entry:
- %0 = load float* null, align 4 ; <float> [#uses=2]
- %1 = fmul float %0, undef ; <float> [#uses=2]
- %2 = fmul float 0.000000e+00, %1 ; <float> [#uses=2]
- %3 = fmul float %0, %1 ; <float> [#uses=1]
- %4 = fadd float 0.000000e+00, %3 ; <float> [#uses=1]
- %5 = fsub float 1.000000e+00, %4 ; <float> [#uses=1]
-; CHECK: foo:
-; CHECK: vmov.f32 s{{[0-9]+}}, #1.000000e+00
- %6 = fsub float 1.000000e+00, undef ; <float> [#uses=2]
- %7 = fsub float %2, undef ; <float> [#uses=1]
- %8 = fsub float 0.000000e+00, undef ; <float> [#uses=3]
- %9 = fadd float %2, undef ; <float> [#uses=3]
- %10 = load float* undef, align 8 ; <float> [#uses=3]
- %11 = fmul float %8, %10 ; <float> [#uses=1]
- %12 = fadd float undef, %11 ; <float> [#uses=2]
- %13 = fmul float undef, undef ; <float> [#uses=1]
- %14 = fmul float %6, 0.000000e+00 ; <float> [#uses=1]
- %15 = fadd float %13, %14 ; <float> [#uses=1]
- %16 = fmul float %9, %10 ; <float> [#uses=1]
- %17 = fadd float %15, %16 ; <float> [#uses=2]
- %18 = fmul float 0.000000e+00, undef ; <float> [#uses=1]
- %19 = fadd float %18, 0.000000e+00 ; <float> [#uses=1]
- %20 = fmul float undef, %10 ; <float> [#uses=1]
- %21 = fadd float %19, %20 ; <float> [#uses=1]
- %22 = load float* undef, align 8 ; <float> [#uses=1]
- %23 = fmul float %5, %22 ; <float> [#uses=1]
- %24 = fadd float %23, undef ; <float> [#uses=1]
- %25 = load float* undef, align 8 ; <float> [#uses=2]
- %26 = fmul float %8, %25 ; <float> [#uses=1]
- %27 = fadd float %24, %26 ; <float> [#uses=1]
- %28 = fmul float %9, %25 ; <float> [#uses=1]
- %29 = fadd float undef, %28 ; <float> [#uses=1]
- %30 = fmul float %8, undef ; <float> [#uses=1]
- %31 = fadd float undef, %30 ; <float> [#uses=1]
- %32 = fmul float %6, undef ; <float> [#uses=1]
- %33 = fadd float undef, %32 ; <float> [#uses=1]
- %34 = fmul float %9, undef ; <float> [#uses=1]
- %35 = fadd float %33, %34 ; <float> [#uses=1]
- %36 = fmul float 0.000000e+00, undef ; <float> [#uses=1]
- %37 = fmul float %7, undef ; <float> [#uses=1]
- %38 = fadd float %36, %37 ; <float> [#uses=1]
- %39 = fmul float undef, undef ; <float> [#uses=1]
- %40 = fadd float %38, %39 ; <float> [#uses=1]
- store float %12, float* undef, align 8
- store float %17, float* undef, align 4
- store float %21, float* undef, align 8
- store float %27, float* undef, align 8
- store float %29, float* undef, align 4
- store float %31, float* undef, align 8
- store float %40, float* undef, align 8
- store float %12, float* null, align 8
- %41 = fmul float %17, undef ; <float> [#uses=1]
- %42 = fadd float %41, undef ; <float> [#uses=1]
- %43 = fmul float %35, undef ; <float> [#uses=1]
- %44 = fadd float %42, %43 ; <float> [#uses=1]
- store float %44, float* null, align 4
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-CoalescerCrash.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-CoalescerCrash.ll
deleted file mode 100644
index efc4be1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-CoalescerCrash.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 < %s
-; PR5410
-
-%0 = type { float, float, float, float }
-%pln = type { %vec, float }
-%vec = type { [4 x float] }
-
-define arm_aapcs_vfpcc float @aaa(%vec* nocapture %ustart, %vec* nocapture %udir, %vec* nocapture %vstart, %vec* nocapture %vdir, %vec* %upoint, %vec* %vpoint) {
-entry:
- br i1 undef, label %bb81, label %bb48
-
-bb48: ; preds = %entry
- %0 = call arm_aapcs_vfpcc %0 @bbb(%pln* undef, %vec* %vstart, %vec* undef) nounwind ; <%0> [#uses=0]
- ret float 0.000000e+00
-
-bb81: ; preds = %entry
- ret float 0.000000e+00
-}
-
-declare arm_aapcs_vfpcc %0 @bbb(%pln* nocapture, %vec* nocapture, %vec* nocapture) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-ScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-ScavengerAssert.ll
deleted file mode 100644
index 6cce02d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-ScavengerAssert.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 < %s
-; PR5411
-
-%bar = type { %quad, float, float, [3 x %quux*], [3 x %bar*], [2 x %bar*], [3 x i8], i8 }
-%baz = type { %bar*, i32 }
-%foo = type { i8, %quuz, %quad, float, [64 x %quux], [128 x %bar], i32, %baz, %baz }
-%quad = type { [4 x float] }
-%quux = type { %quad, %quad }
-%quuz = type { [4 x %quux*], [4 x float], i32 }
-
-define arm_aapcs_vfpcc %bar* @aaa(%foo* nocapture %this, %quux* %a, %quux* %b, %quux* %c, i8 zeroext %forced) {
-entry:
- br i1 undef, label %bb85, label %bb
-
-bb: ; preds = %entry
- %0 = getelementptr inbounds %bar* null, i32 0, i32 0, i32 0, i32 2 ; <float*> [#uses=2]
- %1 = load float* undef, align 4 ; <float> [#uses=1]
- %2 = fsub float 0.000000e+00, undef ; <float> [#uses=2]
- %3 = fmul float 0.000000e+00, undef ; <float> [#uses=1]
- %4 = load float* %0, align 4 ; <float> [#uses=3]
- %5 = fmul float %4, %2 ; <float> [#uses=1]
- %6 = fsub float %3, %5 ; <float> [#uses=1]
- %7 = fmul float %4, undef ; <float> [#uses=1]
- %8 = fsub float %7, undef ; <float> [#uses=1]
- %9 = fmul float undef, %2 ; <float> [#uses=1]
- %10 = fmul float 0.000000e+00, undef ; <float> [#uses=1]
- %11 = fsub float %9, %10 ; <float> [#uses=1]
- %12 = fmul float undef, %6 ; <float> [#uses=1]
- %13 = fmul float 0.000000e+00, %8 ; <float> [#uses=1]
- %14 = fadd float %12, %13 ; <float> [#uses=1]
- %15 = fmul float %1, %11 ; <float> [#uses=1]
- %16 = fadd float %14, %15 ; <float> [#uses=1]
- %17 = select i1 undef, float undef, float %16 ; <float> [#uses=1]
- %18 = fdiv float %17, 0.000000e+00 ; <float> [#uses=1]
- store float %18, float* undef, align 4
- %19 = fmul float %4, undef ; <float> [#uses=1]
- store float %19, float* %0, align 4
- ret %bar* null
-
-bb85: ; preds = %entry
- ret %bar* null
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-ScavengerAssert2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-ScavengerAssert2.ll
deleted file mode 100644
index 3ff6631..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-ScavengerAssert2.ll
+++ /dev/null
@@ -1,123 +0,0 @@
-; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 < %s
-; PR5412
-
-%bar = type { %quad, float, float, [3 x %quuz*], [3 x %bar*], [2 x %bar*], [3 x i8], i8 }
-%baz = type { %bar*, i32 }
-%foo = type { i8, %quux, %quad, float, [64 x %quuz], [128 x %bar], i32, %baz, %baz }
-%quad = type { [4 x float] }
-%quux = type { [4 x %quuz*], [4 x float], i32 }
-%quuz = type { %quad, %quad }
-
-define arm_aapcs_vfpcc %bar* @aaa(%foo* nocapture %this, %quuz* %a, %quuz* %b, %quuz* %c, i8 zeroext %forced) {
-entry:
- br i1 undef, label %bb85, label %bb
-
-bb: ; preds = %entry
- br i1 undef, label %bb3.i, label %bb2.i
-
-bb2.i: ; preds = %bb
- br label %bb3.i
-
-bb3.i: ; preds = %bb2.i, %bb
- %0 = getelementptr inbounds %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=0]
- %1 = fsub float 0.000000e+00, undef ; <float> [#uses=1]
- %2 = getelementptr inbounds %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
- %3 = load float* %2, align 4 ; <float> [#uses=1]
- %4 = getelementptr inbounds %quuz* %a, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
- %5 = fsub float %3, undef ; <float> [#uses=2]
- %6 = getelementptr inbounds %quuz* %b, i32 0, i32 1, i32 0, i32 2 ; <float*> [#uses=2]
- %7 = load float* %6, align 4 ; <float> [#uses=1]
- %8 = fsub float %7, undef ; <float> [#uses=1]
- %9 = getelementptr inbounds %quuz* %c, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=2]
- %10 = load float* %9, align 4 ; <float> [#uses=1]
- %11 = fsub float %10, undef ; <float> [#uses=2]
- %12 = getelementptr inbounds %quuz* %c, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
- %13 = load float* %12, align 4 ; <float> [#uses=1]
- %14 = fsub float %13, undef ; <float> [#uses=1]
- %15 = load float* undef, align 4 ; <float> [#uses=1]
- %16 = fsub float %15, undef ; <float> [#uses=1]
- %17 = fmul float %5, %16 ; <float> [#uses=1]
- %18 = fsub float %17, 0.000000e+00 ; <float> [#uses=5]
- %19 = fmul float %8, %11 ; <float> [#uses=1]
- %20 = fsub float %19, undef ; <float> [#uses=3]
- %21 = fmul float %1, %14 ; <float> [#uses=1]
- %22 = fmul float %5, %11 ; <float> [#uses=1]
- %23 = fsub float %21, %22 ; <float> [#uses=2]
- store float %18, float* undef
- %24 = getelementptr inbounds %bar* null, i32 0, i32 0, i32 0, i32 1 ; <float*> [#uses=2]
- store float %20, float* %24
- store float %23, float* undef
- %25 = getelementptr inbounds %bar* null, i32 0, i32 0, i32 0, i32 3 ; <float*> [#uses=0]
- %26 = fmul float %18, %18 ; <float> [#uses=1]
- %27 = fadd float %26, undef ; <float> [#uses=1]
- %28 = fadd float %27, undef ; <float> [#uses=1]
- %29 = call arm_aapcs_vfpcc float @sqrtf(float %28) readnone ; <float> [#uses=1]
- %30 = load float* null, align 4 ; <float> [#uses=2]
- %31 = load float* %4, align 4 ; <float> [#uses=2]
- %32 = load float* %2, align 4 ; <float> [#uses=2]
- %33 = load float* null, align 4 ; <float> [#uses=3]
- %34 = load float* %6, align 4 ; <float> [#uses=2]
- %35 = fsub float %33, %34 ; <float> [#uses=2]
- %36 = fmul float %20, %35 ; <float> [#uses=1]
- %37 = fsub float %36, undef ; <float> [#uses=1]
- %38 = fmul float %23, 0.000000e+00 ; <float> [#uses=1]
- %39 = fmul float %18, %35 ; <float> [#uses=1]
- %40 = fsub float %38, %39 ; <float> [#uses=1]
- %41 = fmul float %18, 0.000000e+00 ; <float> [#uses=1]
- %42 = fmul float %20, 0.000000e+00 ; <float> [#uses=1]
- %43 = fsub float %41, %42 ; <float> [#uses=1]
- %44 = fmul float 0.000000e+00, %37 ; <float> [#uses=1]
- %45 = fmul float %31, %40 ; <float> [#uses=1]
- %46 = fadd float %44, %45 ; <float> [#uses=1]
- %47 = fmul float %33, %43 ; <float> [#uses=1]
- %48 = fadd float %46, %47 ; <float> [#uses=2]
- %49 = load float* %9, align 4 ; <float> [#uses=2]
- %50 = fsub float %30, %49 ; <float> [#uses=1]
- %51 = load float* %12, align 4 ; <float> [#uses=3]
- %52 = fsub float %32, %51 ; <float> [#uses=2]
- %53 = load float* undef, align 4 ; <float> [#uses=2]
- %54 = load float* %24, align 4 ; <float> [#uses=2]
- %55 = fmul float %54, undef ; <float> [#uses=1]
- %56 = fmul float undef, %52 ; <float> [#uses=1]
- %57 = fsub float %55, %56 ; <float> [#uses=1]
- %58 = fmul float undef, %52 ; <float> [#uses=1]
- %59 = fmul float %54, %50 ; <float> [#uses=1]
- %60 = fsub float %58, %59 ; <float> [#uses=1]
- %61 = fmul float %30, %57 ; <float> [#uses=1]
- %62 = fmul float %32, 0.000000e+00 ; <float> [#uses=1]
- %63 = fadd float %61, %62 ; <float> [#uses=1]
- %64 = fmul float %34, %60 ; <float> [#uses=1]
- %65 = fadd float %63, %64 ; <float> [#uses=2]
- %66 = fcmp olt float %48, %65 ; <i1> [#uses=1]
- %67 = fsub float %49, 0.000000e+00 ; <float> [#uses=1]
- %68 = fsub float %51, %31 ; <float> [#uses=1]
- %69 = fsub float %53, %33 ; <float> [#uses=1]
- %70 = fmul float undef, %67 ; <float> [#uses=1]
- %71 = load float* undef, align 4 ; <float> [#uses=2]
- %72 = fmul float %71, %69 ; <float> [#uses=1]
- %73 = fsub float %70, %72 ; <float> [#uses=1]
- %74 = fmul float %71, %68 ; <float> [#uses=1]
- %75 = fsub float %74, 0.000000e+00 ; <float> [#uses=1]
- %76 = fmul float %51, %73 ; <float> [#uses=1]
- %77 = fadd float undef, %76 ; <float> [#uses=1]
- %78 = fmul float %53, %75 ; <float> [#uses=1]
- %79 = fadd float %77, %78 ; <float> [#uses=1]
- %80 = select i1 %66, float %48, float %65 ; <float> [#uses=1]
- %81 = select i1 undef, float %80, float %79 ; <float> [#uses=1]
- %iftmp.164.0 = select i1 undef, float %29, float 1.000000e+00 ; <float> [#uses=1]
- %82 = fdiv float %81, %iftmp.164.0 ; <float> [#uses=1]
- %iftmp.165.0 = select i1 undef, float %82, float 0.000000e+00 ; <float> [#uses=1]
- store float %iftmp.165.0, float* undef, align 4
- br i1 false, label %bb4.i97, label %ccc.exit98
-
-bb4.i97: ; preds = %bb3.i
- br label %ccc.exit98
-
-ccc.exit98: ; preds = %bb4.i97, %bb3.i
- ret %bar* null
-
-bb85: ; preds = %entry
- ret %bar* null
-}
-
-declare arm_aapcs_vfpcc float @sqrtf(float) readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-VRRewriterCrash.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-VRRewriterCrash.ll
deleted file mode 100644
index 832ff4f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-13-VRRewriterCrash.ll
+++ /dev/null
@@ -1,113 +0,0 @@
-; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 < %s
-; PR5412
-; rdar://7384107
-
-%bar = type { %quad, float, float, [3 x %quuz*], [3 x %bar*], [2 x %bar*], [3 x i8], i8 }
-%baz = type { %bar*, i32 }
-%foo = type { i8, %quux, %quad, float, [64 x %quuz], [128 x %bar], i32, %baz, %baz }
-%quad = type { [4 x float] }
-%quux = type { [4 x %quuz*], [4 x float], i32 }
-%quuz = type { %quad, %quad }
-
-define arm_aapcs_vfpcc %bar* @aaa(%foo* nocapture %this, %quuz* %a, %quuz* %b, %quuz* %c, i8 zeroext %forced) {
-entry:
- %0 = load %bar** undef, align 4 ; <%bar*> [#uses=2]
- br i1 false, label %bb85, label %bb
-
-bb: ; preds = %entry
- br i1 undef, label %bb3.i, label %bb2.i
-
-bb2.i: ; preds = %bb
- br label %bb3.i
-
-bb3.i: ; preds = %bb2.i, %bb
- %1 = getelementptr inbounds %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=1]
- %2 = fsub float 0.000000e+00, undef ; <float> [#uses=1]
- %3 = getelementptr inbounds %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
- %4 = getelementptr inbounds %quuz* %b, i32 0, i32 1, i32 0, i32 2 ; <float*> [#uses=1]
- %5 = fsub float 0.000000e+00, undef ; <float> [#uses=1]
- %6 = getelementptr inbounds %quuz* %c, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=1]
- %7 = getelementptr inbounds %quuz* %c, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
- %8 = fsub float undef, undef ; <float> [#uses=1]
- %9 = fmul float 0.000000e+00, %8 ; <float> [#uses=1]
- %10 = fmul float %5, 0.000000e+00 ; <float> [#uses=1]
- %11 = fsub float %9, %10 ; <float> [#uses=3]
- %12 = fmul float %2, 0.000000e+00 ; <float> [#uses=1]
- %13 = fmul float 0.000000e+00, undef ; <float> [#uses=1]
- %14 = fsub float %12, %13 ; <float> [#uses=2]
- store float %14, float* undef
- %15 = getelementptr inbounds %bar* %0, i32 0, i32 0, i32 0, i32 3 ; <float*> [#uses=1]
- store float 0.000000e+00, float* %15
- %16 = fmul float %11, %11 ; <float> [#uses=1]
- %17 = fadd float %16, 0.000000e+00 ; <float> [#uses=1]
- %18 = fadd float %17, undef ; <float> [#uses=1]
- %19 = call arm_aapcs_vfpcc float @sqrtf(float %18) readnone ; <float> [#uses=2]
- %20 = fcmp ogt float %19, 0x3F1A36E2E0000000 ; <i1> [#uses=1]
- %21 = load float* %1, align 4 ; <float> [#uses=2]
- %22 = load float* %3, align 4 ; <float> [#uses=2]
- %23 = load float* undef, align 4 ; <float> [#uses=2]
- %24 = load float* %4, align 4 ; <float> [#uses=2]
- %25 = fsub float %23, %24 ; <float> [#uses=2]
- %26 = fmul float 0.000000e+00, %25 ; <float> [#uses=1]
- %27 = fsub float %26, undef ; <float> [#uses=1]
- %28 = fmul float %14, 0.000000e+00 ; <float> [#uses=1]
- %29 = fmul float %11, %25 ; <float> [#uses=1]
- %30 = fsub float %28, %29 ; <float> [#uses=1]
- %31 = fsub float undef, 0.000000e+00 ; <float> [#uses=1]
- %32 = fmul float %21, %27 ; <float> [#uses=1]
- %33 = fmul float undef, %30 ; <float> [#uses=1]
- %34 = fadd float %32, %33 ; <float> [#uses=1]
- %35 = fmul float %23, %31 ; <float> [#uses=1]
- %36 = fadd float %34, %35 ; <float> [#uses=1]
- %37 = load float* %6, align 4 ; <float> [#uses=2]
- %38 = load float* %7, align 4 ; <float> [#uses=2]
- %39 = fsub float %22, %38 ; <float> [#uses=2]
- %40 = load float* undef, align 4 ; <float> [#uses=1]
- %41 = load float* null, align 4 ; <float> [#uses=2]
- %42 = fmul float %41, undef ; <float> [#uses=1]
- %43 = fmul float undef, %39 ; <float> [#uses=1]
- %44 = fsub float %42, %43 ; <float> [#uses=1]
- %45 = fmul float undef, %39 ; <float> [#uses=1]
- %46 = fmul float %41, 0.000000e+00 ; <float> [#uses=1]
- %47 = fsub float %45, %46 ; <float> [#uses=1]
- %48 = fmul float 0.000000e+00, %44 ; <float> [#uses=1]
- %49 = fmul float %22, undef ; <float> [#uses=1]
- %50 = fadd float %48, %49 ; <float> [#uses=1]
- %51 = fmul float %24, %47 ; <float> [#uses=1]
- %52 = fadd float %50, %51 ; <float> [#uses=1]
- %53 = fsub float %37, %21 ; <float> [#uses=2]
- %54 = fmul float undef, undef ; <float> [#uses=1]
- %55 = fmul float undef, undef ; <float> [#uses=1]
- %56 = fsub float %54, %55 ; <float> [#uses=1]
- %57 = fmul float undef, %53 ; <float> [#uses=1]
- %58 = load float* undef, align 4 ; <float> [#uses=2]
- %59 = fmul float %58, undef ; <float> [#uses=1]
- %60 = fsub float %57, %59 ; <float> [#uses=1]
- %61 = fmul float %58, undef ; <float> [#uses=1]
- %62 = fmul float undef, %53 ; <float> [#uses=1]
- %63 = fsub float %61, %62 ; <float> [#uses=1]
- %64 = fmul float %37, %56 ; <float> [#uses=1]
- %65 = fmul float %38, %60 ; <float> [#uses=1]
- %66 = fadd float %64, %65 ; <float> [#uses=1]
- %67 = fmul float %40, %63 ; <float> [#uses=1]
- %68 = fadd float %66, %67 ; <float> [#uses=1]
- %69 = select i1 undef, float %36, float %52 ; <float> [#uses=1]
- %70 = select i1 undef, float %69, float %68 ; <float> [#uses=1]
- %iftmp.164.0 = select i1 %20, float %19, float 1.000000e+00 ; <float> [#uses=1]
- %71 = fdiv float %70, %iftmp.164.0 ; <float> [#uses=1]
- store float %71, float* null, align 4
- %72 = icmp eq %bar* null, %0 ; <i1> [#uses=1]
- br i1 %72, label %bb4.i97, label %ccc.exit98
-
-bb4.i97: ; preds = %bb3.i
- %73 = load %bar** undef, align 4 ; <%bar*> [#uses=0]
- br label %ccc.exit98
-
-ccc.exit98: ; preds = %bb4.i97, %bb3.i
- ret %bar* null
-
-bb85: ; preds = %entry
- ret %bar* null
-}
-
-declare arm_aapcs_vfpcc float @sqrtf(float) readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-30-LiveVariablesBug.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-30-LiveVariablesBug.ll
deleted file mode 100644
index efe74cf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-11-30-LiveVariablesBug.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc -mtriple=armv7-eabi -mcpu=cortex-a8 < %s
-; PR5614
-
-%"als" = type { i32 (...)** }
-%"av" = type { %"als" }
-%"c" = type { %"lsm", %"Vec3", %"av"*, float, i8, float, %"lsm", i8, %"Vec3", %"Vec3", %"Vec3", float, float, float, %"Vec3", %"Vec3" }
-%"lsm" = type { %"als", %"Vec3", %"Vec3", %"Vec3", %"Vec3" }
-%"Vec3" = type { float, float, float }
-
-define arm_aapcs_vfpcc void @foo(%"c"* %this, %"Vec3"* nocapture %adjustment) {
-entry:
- switch i32 undef, label %return [
- i32 1, label %bb
- i32 2, label %bb72
- i32 3, label %bb31
- i32 4, label %bb79
- i32 5, label %bb104
- ]
-
-bb: ; preds = %entry
- ret void
-
-bb31: ; preds = %entry
- %0 = call arm_aapcs_vfpcc %"Vec3" undef(%"lsm"* undef) ; <%"Vec3"> [#uses=1]
- %mrv_gr69 = extractvalue %"Vec3" %0, 1 ; <float> [#uses=1]
- %1 = fsub float %mrv_gr69, undef ; <float> [#uses=1]
- store float %1, float* undef, align 4
- ret void
-
-bb72: ; preds = %entry
- ret void
-
-bb79: ; preds = %entry
- ret void
-
-bb104: ; preds = %entry
- ret void
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll
deleted file mode 100644
index a737591..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc -mcpu=cortex-a8 < %s | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
-target triple = "armv7-apple-darwin10"
-
-%struct.int16x8_t = type { <8 x i16> }
-%struct.int16x8x2_t = type { [2 x %struct.int16x8_t] }
-
-define arm_apcscc void @t(%struct.int16x8x2_t* noalias nocapture sret %agg.result, <8 x i16> %tmp.0, %struct.int16x8x2_t* nocapture %dst) nounwind {
-entry:
-;CHECK: vtrn.16
- %0 = shufflevector <8 x i16> %tmp.0, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
- %1 = shufflevector <8 x i16> %tmp.0, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
- %agg.result1218.0 = getelementptr %struct.int16x8x2_t* %agg.result, i32 0, i32 0, i32 0, i32 0 ; <<8 x i16>*>
- store <8 x i16> %0, <8 x i16>* %agg.result1218.0, align 16
- %agg.result12.1.0 = getelementptr %struct.int16x8x2_t* %agg.result, i32 0, i32 0, i32 1, i32 0 ; <<8 x i16>*>
- store <8 x i16> %1, <8 x i16>* %agg.result12.1.0, align 16
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2010-03-04-eabi-fp-spill.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2010-03-04-eabi-fp-spill.ll
deleted file mode 100644
index f7adf73..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2010-03-04-eabi-fp-spill.ll
+++ /dev/null
@@ -1,65 +0,0 @@
-; RUN: llc < %s -mtriple=arm-unknown-linux-gnueabi
-
-define void @"java.lang.String::getChars"([84 x i8]* %method, i32 %base_pc, [788 x i8]* %thread) {
- %1 = load i32* undef ; <i32> [#uses=1]
- %2 = sub i32 %1, 48 ; <i32> [#uses=1]
- br i1 undef, label %stack_overflow, label %no_overflow
-
-stack_overflow: ; preds = %0
- unreachable
-
-no_overflow: ; preds = %0
- %frame = inttoptr i32 %2 to [17 x i32]* ; <[17 x i32]*> [#uses=4]
- %3 = load i32* undef ; <i32> [#uses=1]
- %4 = load i32* null ; <i32> [#uses=1]
- %5 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 13 ; <i32*> [#uses=1]
- %6 = bitcast i32* %5 to [8 x i8]** ; <[8 x i8]**> [#uses=1]
- %7 = load [8 x i8]** %6 ; <[8 x i8]*> [#uses=1]
- %8 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 12 ; <i32*> [#uses=1]
- %9 = load i32* %8 ; <i32> [#uses=1]
- br i1 undef, label %bci_13, label %bci_4
-
-bci_13: ; preds = %no_overflow
- br i1 undef, label %bci_30, label %bci_21
-
-bci_30: ; preds = %bci_13
- br i1 undef, label %bci_46, label %bci_35
-
-bci_46: ; preds = %bci_30
- %10 = sub i32 %4, %3 ; <i32> [#uses=1]
- %11 = load [8 x i8]** null ; <[8 x i8]*> [#uses=1]
- %callee = bitcast [8 x i8]* %11 to [84 x i8]* ; <[84 x i8]*> [#uses=1]
- %12 = bitcast i8* undef to i32* ; <i32*> [#uses=1]
- %base_pc7 = load i32* %12 ; <i32> [#uses=2]
- %13 = add i32 %base_pc7, 0 ; <i32> [#uses=1]
- %14 = inttoptr i32 %13 to void ([84 x i8]*, i32, [788 x i8]*)** ; <void ([84 x i8]*, i32, [788 x i8]*)**> [#uses=1]
- %entry_point = load void ([84 x i8]*, i32, [788 x i8]*)** %14 ; <void ([84 x i8]*, i32, [788 x i8]*)*> [#uses=1]
- %15 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 1 ; <i32*> [#uses=1]
- %16 = ptrtoint i32* %15 to i32 ; <i32> [#uses=1]
- %stack_pointer_addr9 = bitcast i8* undef to i32* ; <i32*> [#uses=1]
- store i32 %16, i32* %stack_pointer_addr9
- %17 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 2 ; <i32*> [#uses=1]
- store i32 %9, i32* %17
- store i32 %10, i32* undef
- store [84 x i8]* %method, [84 x i8]** undef
- %18 = add i32 %base_pc, 20 ; <i32> [#uses=1]
- store i32 %18, i32* undef
- store [8 x i8]* %7, [8 x i8]** undef
- call void %entry_point([84 x i8]* %callee, i32 %base_pc7, [788 x i8]* %thread)
- br i1 undef, label %no_exception, label %exception
-
-exception: ; preds = %bci_46
- ret void
-
-no_exception: ; preds = %bci_46
- ret void
-
-bci_35: ; preds = %bci_30
- ret void
-
-bci_21: ; preds = %bci_13
- ret void
-
-bci_4: ; preds = %no_overflow
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll b/libclamav/c++/llvm/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll
deleted file mode 100644
index b0b4cb3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define void @"java.lang.String::getChars"([84 x i8]* %method, i32 %base_pc, [788 x i8]* %thread) {
- %1 = sub i32 undef, 48 ; <i32> [#uses=1]
- br i1 undef, label %stack_overflow, label %no_overflow
-
-stack_overflow: ; preds = %0
- unreachable
-
-no_overflow: ; preds = %0
- %frame = inttoptr i32 %1 to [17 x i32]* ; <[17 x i32]*> [#uses=4]
- %2 = load i32* null ; <i32> [#uses=2]
- %3 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 14 ; <i32*> [#uses=1]
- %4 = load i32* %3 ; <i32> [#uses=2]
- %5 = load [8 x i8]** undef ; <[8 x i8]*> [#uses=2]
- br i1 undef, label %bci_13, label %bci_4
-
-bci_13: ; preds = %no_overflow
- br i1 undef, label %bci_30, label %bci_21
-
-bci_30: ; preds = %bci_13
- %6 = icmp sle i32 %2, %4 ; <i1> [#uses=1]
- br i1 %6, label %bci_46, label %bci_35
-
-bci_46: ; preds = %bci_30
- store [84 x i8]* %method, [84 x i8]** undef
- br i1 false, label %no_exception, label %exception
-
-exception: ; preds = %bci_46
- ret void
-
-no_exception: ; preds = %bci_46
- ret void
-
-bci_35: ; preds = %bci_30
- %7 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 15 ; <i32*> [#uses=1]
- store i32 %2, i32* %7
- %8 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 14 ; <i32*> [#uses=1]
- store i32 %4, i32* %8
- %9 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 13 ; <i32*> [#uses=1]
- %10 = bitcast i32* %9 to [8 x i8]** ; <[8 x i8]**> [#uses=1]
- store [8 x i8]* %5, [8 x i8]** %10
- call void inttoptr (i32 13839116 to void ([788 x i8]*, i32)*)([788 x i8]* %thread, i32 7)
- ret void
-
-bci_21: ; preds = %bci_13
- ret void
-
-bci_4: ; preds = %no_overflow
- store [8 x i8]* %5, [8 x i8]** undef
- store i32 undef, i32* undef
- call void inttoptr (i32 13839116 to void ([788 x i8]*, i32)*)([788 x i8]* %thread, i32 7)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/addrmode.ll b/libclamav/c++/llvm/test/CodeGen/ARM/addrmode.ll
deleted file mode 100644
index 9ccff07..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/addrmode.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=arm -stats |& grep asm-printer | grep 4
-
-define i32 @t1(i32 %a) {
- %b = mul i32 %a, 9
- %c = inttoptr i32 %b to i32*
- %d = load i32* %c
- ret i32 %d
-}
-
-define i32 @t2(i32 %a) {
- %b = mul i32 %a, -7
- %c = inttoptr i32 %b to i32*
- %d = load i32* %c
- ret i32 %d
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/aliases.ll b/libclamav/c++/llvm/test/CodeGen/ARM/aliases.ll
deleted file mode 100644
index 31c5007..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/aliases.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi -o %t
-; RUN: grep { = } %t | count 5
-; RUN: grep globl %t | count 4
-; RUN: grep weak %t | count 1
-
- at bar = external global i32
- at foo1 = alias i32* @bar
- at foo2 = alias i32* @bar
-
-%FunTy = type i32()
-
-declare i32 @foo_f()
- at bar_f = alias weak %FunTy* @foo_f
-
- at bar_i = alias internal i32* @bar
-
- at A = alias bitcast (i32* @bar to i64*)
-
-define i32 @test() {
-entry:
- %tmp = load i32* @foo1
- %tmp1 = load i32* @foo2
- %tmp0 = load i32* @bar_i
- %tmp2 = call i32 @foo_f()
- %tmp3 = add i32 %tmp, %tmp2
- %tmp4 = call %FunTy* @bar_f()
- %tmp5 = add i32 %tmp3, %tmp4
- %tmp6 = add i32 %tmp1, %tmp5
- %tmp7 = add i32 %tmp6, %tmp0
- ret i32 %tmp7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/align.ll b/libclamav/c++/llvm/test/CodeGen/ARM/align.ll
deleted file mode 100644
index d4d0128..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/align.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=ELF
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
-
- at a = global i1 true
-; no alignment
-
- at b = global i8 1
-; no alignment
-
- at c = global i16 2
-;ELF: .align 1
-;ELF: c:
-;DARWIN: .align 1
-;DARWIN: _c:
-
- at d = global i32 3
-;ELF: .align 2
-;ELF: d:
-;DARWIN: .align 2
-;DARWIN: _d:
-
- at e = global i64 4
-;ELF: .align 3
-;ELF: e
-;DARWIN: .align 2
-;DARWIN: _e:
-
- at f = global float 5.0
-;ELF: .align 2
-;ELF: f:
-;DARWIN: .align 2
-;DARWIN: _f:
-
- at g = global double 6.0
-;ELF: .align 3
-;ELF: g:
-;DARWIN: .align 2
-;DARWIN: _g:
-
- at bar = common global [75 x i8] zeroinitializer, align 128
-;ELF: .comm bar,75,128
-;DARWIN: .comm _bar,75,7
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/alloca.ll b/libclamav/c++/llvm/test/CodeGen/ARM/alloca.ll
deleted file mode 100644
index 82a8c98..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/alloca.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnu | FileCheck %s
-
-define void @f(i32 %a) {
-entry:
-; CHECK: mov r11, sp
- %tmp = alloca i8, i32 %a ; <i8*> [#uses=1]
- call void @g( i8* %tmp, i32 %a, i32 1, i32 2, i32 3 )
- ret void
-; CHECK: mov sp, r11
-}
-
-declare void @g(i8*, i32, i32, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/argaddr.ll b/libclamav/c++/llvm/test/CodeGen/ARM/argaddr.ll
deleted file mode 100644
index 116a32f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/argaddr.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define void @f(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
-entry:
- %a_addr = alloca i32 ; <i32*> [#uses=2]
- %b_addr = alloca i32 ; <i32*> [#uses=2]
- %c_addr = alloca i32 ; <i32*> [#uses=2]
- %d_addr = alloca i32 ; <i32*> [#uses=2]
- %e_addr = alloca i32 ; <i32*> [#uses=2]
- store i32 %a, i32* %a_addr
- store i32 %b, i32* %b_addr
- store i32 %c, i32* %c_addr
- store i32 %d, i32* %d_addr
- store i32 %e, i32* %e_addr
- call void @g( i32* %a_addr, i32* %b_addr, i32* %c_addr, i32* %d_addr, i32* %e_addr )
- ret void
-}
-
-declare void @g(i32*, i32*, i32*, i32*, i32*)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments-nosplit-double.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments-nosplit-double.ll
deleted file mode 100644
index 770e41d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments-nosplit-double.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | not grep r3
-; PR4059
-
-define i32 @f(i64 %z, i32 %a, double %b) {
- %tmp = call i32 @g(double %b)
- ret i32 %tmp
-}
-
-declare i32 @g(double)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments-nosplit-i64.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments-nosplit-i64.ll
deleted file mode 100644
index 815edfd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments-nosplit-i64.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | not grep r3
-; PR4058
-
-define i32 @f(i64 %z, i32 %a, i64 %b) {
- %tmp = call i32 @g(i64 %b)
- ret i32 %tmp
-}
-
-declare i32 @g(i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments.ll
deleted file mode 100644
index cc71839..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=ELF
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
-
-define i32 @f(i32 %a, i64 %b) {
-; ELF: mov r0, r2
-; DARWIN: mov r0, r1
- %tmp = call i32 @g(i64 %b)
- ret i32 %tmp
-}
-
-declare i32 @g(i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments2.ll
deleted file mode 100644
index a515ad7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments2.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-define i32 @f(i32 %a, i128 %b) {
- %tmp = call i32 @g(i128 %b)
- ret i32 %tmp
-}
-
-declare i32 @g(i128)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments3.ll
deleted file mode 100644
index 58f64c6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments3.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-define i64 @f(i32 %a, i128 %b) {
- %tmp = call i64 @g(i128 %b)
- ret i64 %tmp
-}
-
-declare i64 @g(i128)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments4.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments4.ll
deleted file mode 100644
index f5f4207..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments4.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-define float @f(i32 %a, i128 %b) {
- %tmp = call float @g(i128 %b)
- ret float %tmp
-}
-
-declare float @g(i128)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments5.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments5.ll
deleted file mode 100644
index 388a8eb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments5.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-define double @f(i32 %a, i128 %b) {
- %tmp = call double @g(i128 %b)
- ret double %tmp
-}
-
-declare double @g(i128)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments6.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments6.ll
deleted file mode 100644
index 3f757fe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments6.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-define i128 @f(i32 %a, i128 %b) {
- %tmp = call i128 @g(i128 %b)
- ret i128 %tmp
-}
-
-declare i128 @g(i128)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments7.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments7.ll
deleted file mode 100644
index 038e417..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments7.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-define double @f(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, double %b) {
- %tmp = call double @g(i32 %a2, i32 %a3, i32 %a4, i32 %a5, double %b)
- ret double %tmp
-}
-
-declare double @g(double)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments8.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments8.ll
deleted file mode 100644
index 6999a4d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments8.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi
-; RUN: llc < %s -mtriple=arm-apple-darwin
-
-define i64 @f(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i64 %b) {
- %tmp = call i64 @g(i32 %a2, i32 %a3, i32 %a4, i32 %a5, i64 %b)
- ret i64 %tmp
-}
-
-declare i64 @g(i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arguments_f64_backfill.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arguments_f64_backfill.ll
deleted file mode 100644
index 062133e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arguments_f64_backfill.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi -mattr=+vfp2 -float-abi=hard | FileCheck %s
-
-define float @f(float %z, double %a, float %b) {
-; CHECK: vmov.f32 s0, s1
- %tmp = call float @g(float %b)
- ret float %tmp
-}
-
-declare float @g(float)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arm-asm.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arm-asm.ll
deleted file mode 100644
index 2e35e39..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arm-asm.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define void @frame_dummy() {
-entry:
- %tmp1 = tail call void (i8*)* (void (i8*)*)* asm "", "=r,0,~{dirflag},~{fpsr},~{flags}"( void (i8*)* null ) ; <void (i8*)*> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arm-frameaddr.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arm-frameaddr.ll
deleted file mode 100644
index 2739860..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arm-frameaddr.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin | grep mov | grep r7
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | grep mov | grep r11
-; PR4344
-; PR4416
-
-define arm_aapcscc i8* @t() nounwind {
-entry:
- %0 = call i8* @llvm.frameaddress(i32 0)
- ret i8* %0
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/arm-negative-stride.ll b/libclamav/c++/llvm/test/CodeGen/ARM/arm-negative-stride.ll
deleted file mode 100644
index 52ab871..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/arm-negative-stride.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-; This loop is rewritten with an indvar which counts down, which
-; frees up a register from holding the trip count.
-
-define void @test(i32* %P, i32 %A, i32 %i) nounwind {
-entry:
-; CHECK: str r1, [{{r.*}}, +{{r.*}}, lsl #2]
- icmp eq i32 %i, 0 ; <i1>:0 [#uses=1]
- br i1 %0, label %return, label %bb
-
-bb: ; preds = %bb, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %i_addr.09.0 = sub i32 %i, %indvar ; <i32> [#uses=1]
- %tmp2 = getelementptr i32* %P, i32 %i_addr.09.0 ; <i32*> [#uses=1]
- store i32 %A, i32* %tmp2
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- icmp eq i32 %indvar.next, %i ; <i1>:1 [#uses=1]
- br i1 %1, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
-
-; This loop has a non-address use of the count-up indvar, so
-; it'll remain. Now the original store uses a negative-stride address.
-
-define void @test_with_forced_iv(i32* %P, i32 %A, i32 %i) nounwind {
-entry:
-; CHECK: str r1, [{{r.*}}, -{{r.*}}, lsl #2]
- icmp eq i32 %i, 0 ; <i1>:0 [#uses=1]
- br i1 %0, label %return, label %bb
-
-bb: ; preds = %bb, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %i_addr.09.0 = sub i32 %i, %indvar ; <i32> [#uses=1]
- %tmp2 = getelementptr i32* %P, i32 %i_addr.09.0 ; <i32*> [#uses=1]
- store i32 %A, i32* %tmp2
- store i32 %indvar, i32* null
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- icmp eq i32 %indvar.next, %i ; <i1>:1 [#uses=1]
- br i1 %1, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/armv4.ll b/libclamav/c++/llvm/test/CodeGen/ARM/armv4.ll
deleted file mode 100644
index 49b129d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/armv4.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=arm-unknown-eabi | FileCheck %s -check-prefix=THUMB
-; RUN: llc < %s -mtriple=arm-unknown-eabi -mcpu=strongarm | FileCheck %s -check-prefix=ARM
-; RUN: llc < %s -mtriple=arm-unknown-eabi -mcpu=cortex-a8 | FileCheck %s -check-prefix=THUMB
-; RUN: llc < %s -mtriple=arm-unknown-eabi -mattr=+v6 | FileCheck %s -check-prefix=THUMB
-; RUN: llc < %s -mtriple=armv4-unknown-eabi | FileCheck %s -check-prefix=ARM
-; RUN: llc < %s -mtriple=armv4t-unknown-eabi | FileCheck %s -check-prefix=THUMB
-
-define arm_aapcscc i32 @test(i32 %a) nounwind readnone {
-entry:
-; ARM: mov pc
-; THUMB: bx
- ret i32 %a
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/bfc.ll b/libclamav/c++/llvm/test/CodeGen/ARM/bfc.ll
deleted file mode 100644
index c4a44b4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/bfc.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s
-
-; 4278190095 = 0xff00000f
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: bfc
- %tmp = and i32 %a, 4278190095
- ret i32 %tmp
-}
-
-; 4286578688 = 0xff800000
-define i32 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: bfc
- %tmp = and i32 %a, 4286578688
- ret i32 %tmp
-}
-
-; 4095 = 0x00000fff
-define i32 @f3(i32 %a) {
-; CHECK: f3:
-; CHECK: bfc
- %tmp = and i32 %a, 4095
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/bic.ll b/libclamav/c++/llvm/test/CodeGen/ARM/bic.ll
deleted file mode 100644
index 1dfd627..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/bic.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
- %tmp = xor i32 %b, 4294967295
- %tmp1 = and i32 %a, %tmp
- ret i32 %tmp1
-}
-
-; CHECK: bic r0, r0, r1
-
-define i32 @f2(i32 %a, i32 %b) {
- %tmp = xor i32 %b, 4294967295
- %tmp1 = and i32 %tmp, %a
- ret i32 %tmp1
-}
-
-; CHECK: bic r0, r0, r1
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/bits.ll b/libclamav/c++/llvm/test/CodeGen/ARM/bits.ll
deleted file mode 100644
index 9e94efe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/bits.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=arm > %t
-; RUN: grep and %t | count 1
-; RUN: grep orr %t | count 1
-; RUN: grep eor %t | count 1
-; RUN: grep mov.*lsl %t | count 1
-; RUN: grep mov.*asr %t | count 1
-
-define i32 @f1(i32 %a, i32 %b) {
-entry:
- %tmp2 = and i32 %b, %a ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @f2(i32 %a, i32 %b) {
-entry:
- %tmp2 = or i32 %b, %a ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @f3(i32 %a, i32 %b) {
-entry:
- %tmp2 = xor i32 %b, %a ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @f4(i32 %a, i32 %b) {
-entry:
- %tmp3 = shl i32 %a, %b ; <i32> [#uses=1]
- ret i32 %tmp3
-}
-
-define i32 @f5(i32 %a, i32 %b) {
-entry:
- %tmp3 = ashr i32 %a, %b ; <i32> [#uses=1]
- ret i32 %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/bx_fold.ll b/libclamav/c++/llvm/test/CodeGen/ARM/bx_fold.ll
deleted file mode 100644
index 0e3e070..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/bx_fold.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm | not grep bx
-
-define void @test(i32 %Ptr, i8* %L) {
-entry:
- br label %bb1
-
-bb: ; preds = %bb1
- %gep.upgrd.1 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr i8* %L, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
- store i8 0, i8* %tmp7
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br label %bb1
-
-bb1: ; preds = %bb, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
- %i.0 = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
- %tmp = tail call i32 (...)* @bar( ) ; <i32> [#uses=1]
- %tmp2 = add i32 %i.0, %tmp ; <i32> [#uses=1]
- %Ptr_addr.0 = sub i32 %Ptr, %tmp2 ; <i32> [#uses=0]
- %tmp12 = icmp eq i32 %i.0, %Ptr ; <i1> [#uses=1]
- %tmp12.not = xor i1 %tmp12, true ; <i1> [#uses=1]
- %bothcond = and i1 %tmp12.not, false ; <i1> [#uses=1]
- br i1 %bothcond, label %bb, label %bb18
-
-bb18: ; preds = %bb1
- ret void
-}
-
-declare i32 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/call.ll b/libclamav/c++/llvm/test/CodeGen/ARM/call.ll
deleted file mode 100644
index c60b75b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/call.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=CHECKV4
-; RUN: llc < %s -march=arm -mattr=+v5t | FileCheck %s -check-prefix=CHECKV5
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi\
-; RUN: -relocation-model=pic | FileCheck %s -check-prefix=CHECKELF
-
- at t = weak global i32 ()* null ; <i32 ()**> [#uses=1]
-
-declare void @g(i32, i32, i32, i32)
-
-define void @f() {
-; CHECKV4: mov lr, pc
-; CHECKV5: blx
-; CHECKELF: PLT
- call void @g( i32 1, i32 2, i32 3, i32 4 )
- ret void
-}
-
-define void @g.upgrd.1() {
- %tmp = load i32 ()** @t ; <i32 ()*> [#uses=1]
- %tmp.upgrd.2 = tail call i32 %tmp( ) ; <i32> [#uses=0]
- ret void
-}
-
-define i32* @m_231b(i32, i32, i32*, i32*, i32*) nounwind {
-; CHECKV4: m_231b
-; CHECKV4: bx r{{.*}}
-BB0:
- %5 = inttoptr i32 %0 to i32* ; <i32*> [#uses=1]
- %t35 = volatile load i32* %5 ; <i32> [#uses=1]
- %6 = inttoptr i32 %t35 to i32** ; <i32**> [#uses=1]
- %7 = getelementptr i32** %6, i32 86 ; <i32**> [#uses=1]
- %8 = load i32** %7 ; <i32*> [#uses=1]
- %9 = bitcast i32* %8 to i32* (i32, i32*, i32, i32*, i32*, i32*)* ; <i32* (i32, i32*, i32, i32*, i32*, i32*)*> [#uses=1]
- %10 = call i32* %9(i32 %0, i32* null, i32 %1, i32* %2, i32* %3, i32* %4) ; <i32*> [#uses=1]
- ret i32* %10
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/call_nolink.ll b/libclamav/c++/llvm/test/CodeGen/ARM/call_nolink.ll
deleted file mode 100644
index efe29d8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/call_nolink.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | \
-; RUN: not grep {bx lr}
-
- %struct.anon = type { i32 (i32, i32, i32)*, i32, i32, [3 x i32], i8*, i8*, i8* }
- at r = external global [14 x i32] ; <[14 x i32]*> [#uses=4]
- at isa = external global [13 x %struct.anon] ; <[13 x %struct.anon]*> [#uses=1]
- at pgm = external global [2 x { i32, [3 x i32] }] ; <[2 x { i32, [3 x i32] }]*> [#uses=4]
- at numi = external global i32 ; <i32*> [#uses=1]
- at counter = external global [2 x i32] ; <[2 x i32]*> [#uses=1]
-
-
-define void @main_bb_2E_i_bb205_2E_i_2E_i_bb115_2E_i_2E_i() {
-newFuncRoot:
- br label %bb115.i.i
-
-bb115.i.i.bb170.i.i_crit_edge.exitStub: ; preds = %bb115.i.i
- ret void
-
-bb115.i.i.bb115.i.i_crit_edge: ; preds = %bb115.i.i
- br label %bb115.i.i
-
-bb115.i.i: ; preds = %bb115.i.i.bb115.i.i_crit_edge, %newFuncRoot
- %i_addr.3210.0.i.i = phi i32 [ %tmp166.i.i, %bb115.i.i.bb115.i.i_crit_edge ], [ 0, %newFuncRoot ] ; <i32> [#uses=7]
- %tmp124.i.i = getelementptr [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 0 ; <i32*> [#uses=1]
- %tmp125.i.i = load i32* %tmp124.i.i ; <i32> [#uses=1]
- %tmp126.i.i = getelementptr [14 x i32]* @r, i32 0, i32 %tmp125.i.i ; <i32*> [#uses=1]
- %tmp127.i.i = load i32* %tmp126.i.i ; <i32> [#uses=1]
- %tmp131.i.i = getelementptr [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 1 ; <i32*> [#uses=1]
- %tmp132.i.i = load i32* %tmp131.i.i ; <i32> [#uses=1]
- %tmp133.i.i = getelementptr [14 x i32]* @r, i32 0, i32 %tmp132.i.i ; <i32*> [#uses=1]
- %tmp134.i.i = load i32* %tmp133.i.i ; <i32> [#uses=1]
- %tmp138.i.i = getelementptr [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 2 ; <i32*> [#uses=1]
- %tmp139.i.i = load i32* %tmp138.i.i ; <i32> [#uses=1]
- %tmp140.i.i = getelementptr [14 x i32]* @r, i32 0, i32 %tmp139.i.i ; <i32*> [#uses=1]
- %tmp141.i.i = load i32* %tmp140.i.i ; <i32> [#uses=1]
- %tmp143.i.i = add i32 %i_addr.3210.0.i.i, 12 ; <i32> [#uses=1]
- %tmp146.i.i = getelementptr [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 0 ; <i32*> [#uses=1]
- %tmp147.i.i = load i32* %tmp146.i.i ; <i32> [#uses=1]
- %tmp149.i.i = getelementptr [13 x %struct.anon]* @isa, i32 0, i32 %tmp147.i.i, i32 0 ; <i32 (i32, i32, i32)**> [#uses=1]
- %tmp150.i.i = load i32 (i32, i32, i32)** %tmp149.i.i ; <i32 (i32, i32, i32)*> [#uses=1]
- %tmp154.i.i = tail call i32 %tmp150.i.i( i32 %tmp127.i.i, i32 %tmp134.i.i, i32 %tmp141.i.i ) ; <i32> [#uses=1]
- %tmp155.i.i = getelementptr [14 x i32]* @r, i32 0, i32 %tmp143.i.i ; <i32*> [#uses=1]
- store i32 %tmp154.i.i, i32* %tmp155.i.i
- %tmp159.i.i = getelementptr [2 x i32]* @counter, i32 0, i32 %i_addr.3210.0.i.i ; <i32*> [#uses=2]
- %tmp160.i.i = load i32* %tmp159.i.i ; <i32> [#uses=1]
- %tmp161.i.i = add i32 %tmp160.i.i, 1 ; <i32> [#uses=1]
- store i32 %tmp161.i.i, i32* %tmp159.i.i
- %tmp166.i.i = add i32 %i_addr.3210.0.i.i, 1 ; <i32> [#uses=2]
- %tmp168.i.i = load i32* @numi ; <i32> [#uses=1]
- icmp slt i32 %tmp166.i.i, %tmp168.i.i ; <i1>:0 [#uses=1]
- br i1 %0, label %bb115.i.i.bb115.i.i_crit_edge, label %bb115.i.i.bb170.i.i_crit_edge.exitStub
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/carry.ll b/libclamav/c++/llvm/test/CodeGen/ARM/carry.ll
deleted file mode 100644
index a6a7ed6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/carry.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i64 @f1(i64 %a, i64 %b) {
-; CHECK: f1:
-; CHECK: subs r
-; CHECK: sbc r
-entry:
- %tmp = sub i64 %a, %b
- ret i64 %tmp
-}
-
-define i64 @f2(i64 %a, i64 %b) {
-; CHECK: f2:
-; CHECK: adc r
-; CHECK: subs r
-; CHECK: sbc r
-entry:
- %tmp1 = shl i64 %a, 1
- %tmp2 = sub i64 %tmp1, %b
- ret i64 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/clz.ll b/libclamav/c++/llvm/test/CodeGen/ARM/clz.ll
deleted file mode 100644
index d2235c9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/clz.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v5t | grep clz
-
-declare i32 @llvm.ctlz.i32(i32)
-
-define i32 @test(i32 %x) {
- %tmp.1 = call i32 @llvm.ctlz.i32( i32 %x ) ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/compare-call.ll b/libclamav/c++/llvm/test/CodeGen/ARM/compare-call.ll
deleted file mode 100644
index fac2bc5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/compare-call.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | \
-; RUN: grep vcmpe.f32
-
-define void @test3(float* %glob, i32 %X) {
-entry:
- %tmp = load float* %glob ; <float> [#uses=1]
- %tmp2 = getelementptr float* %glob, i32 2 ; <float*> [#uses=1]
- %tmp3 = load float* %tmp2 ; <float> [#uses=1]
- %tmp.upgrd.1 = fcmp ogt float %tmp, %tmp3 ; <i1> [#uses=1]
- br i1 %tmp.upgrd.1, label %cond_true, label %UnifiedReturnBlock
-
-cond_true: ; preds = %entry
- %tmp.upgrd.2 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-declare i32 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/constants.ll b/libclamav/c++/llvm/test/CodeGen/ARM/constants.ll
deleted file mode 100644
index ce91936..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/constants.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i32 @f1() {
-; CHECK: f1
-; CHECK: mov r0, #0
- ret i32 0
-}
-
-define i32 @f2() {
-; CHECK: f2
-; CHECK: mov r0, #255
- ret i32 255
-}
-
-define i32 @f3() {
-; CHECK: f3
-; CHECK: mov r0{{.*}}256
- ret i32 256
-}
-
-define i32 @f4() {
-; CHECK: f4
-; CHECK: orr{{.*}}256
- ret i32 257
-}
-
-define i32 @f5() {
-; CHECK: f5
-; CHECK: mov r0, {{.*}}-1073741761
- ret i32 -1073741761
-}
-
-define i32 @f6() {
-; CHECK: f6
-; CHECK: mov r0, {{.*}}1008
- ret i32 1008
-}
-
-define void @f7(i32 %a) {
-; CHECK: f7
-; CHECK: cmp r0, #1, 16
- %b = icmp ugt i32 %a, 65536 ; <i1> [#uses=1]
- br i1 %b, label %r, label %r
-
-r: ; preds = %0, %0
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/cse-libcalls.ll b/libclamav/c++/llvm/test/CodeGen/ARM/cse-libcalls.ll
deleted file mode 100644
index 0dcf9dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/cse-libcalls.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=arm | grep {bl.\*__ltdf} | count 1
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-
-; Without CSE of libcalls, there are two calls in the output instead of one.
-
-define i32 @u_f_nonbon(double %lambda) nounwind {
-entry:
- %tmp19.i.i = load double* null, align 4 ; <double> [#uses=2]
- %tmp6.i = fcmp olt double %tmp19.i.i, 1.000000e+00 ; <i1> [#uses=1]
- %dielectric.0.i = select i1 %tmp6.i, double 1.000000e+00, double %tmp19.i.i ; <double> [#uses=1]
- %tmp10.i4 = fdiv double 0x4074C2D71F36262D, %dielectric.0.i ; <double> [#uses=1]
- br i1 false, label %bb28.i, label %bb508.i
-
-bb28.i: ; preds = %bb28.i, %entry
- br i1 false, label %bb502.loopexit.i, label %bb28.i
-
-bb.nph53.i: ; preds = %bb502.loopexit.i
- %tmp354.i = fsub double -0.000000e+00, %tmp10.i4 ; <double> [#uses=0]
- br label %bb244.i
-
-bb244.i: ; preds = %bb244.i, %bb.nph53.i
- br label %bb244.i
-
-bb502.loopexit.i: ; preds = %bb28.i
- br i1 false, label %bb.nph53.i, label %bb508.i
-
-bb508.i: ; preds = %bb502.loopexit.i, %entry
- ret i32 1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ctors_dtors.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ctors_dtors.ll
deleted file mode 100644
index fb94626..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ctors_dtors.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
-; RUN: llc < %s -mtriple=arm-linux-gnu | FileCheck %s -check-prefix=ELF
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=GNUEABI
-
-; DARWIN: .section __DATA,__mod_init_func,mod_init_funcs
-; DARWIN: .section __DATA,__mod_term_func,mod_term_funcs
-
-; ELF: .section .ctors,"aw",%progbits
-; ELF: .section .dtors,"aw",%progbits
-
-; GNUEABI: .section .init_array,"aw",%init_array
-; GNUEABI: .section .fini_array,"aw",%fini_array
-
- at llvm.global_ctors = appending global [1 x { i32, void ()* }] [ { i32, void ()* } { i32 65535, void ()* @__mf_init } ] ; <[1 x { i32, void ()* }]*> [#uses=0]
- at llvm.global_dtors = appending global [1 x { i32, void ()* }] [ { i32, void ()* } { i32 65535, void ()* @__mf_fini } ] ; <[1 x { i32, void ()* }]*> [#uses=0]
-
-define void @__mf_init() {
-entry:
- ret void
-}
-
-define void @__mf_fini() {
-entry:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ctz.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ctz.ll
deleted file mode 100644
index 1d2ced3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ctz.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s
-
-declare i32 @llvm.cttz.i32(i32)
-
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: rbit
-; CHECK: clz
- %tmp = call i32 @llvm.cttz.i32( i32 %a )
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/dg.exp b/libclamav/c++/llvm/test/CodeGen/ARM/dg.exp
deleted file mode 100644
index 3ff359a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/dg.exp
+++ /dev/null
@@ -1,5 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target ARM] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/div.ll b/libclamav/c++/llvm/test/CodeGen/ARM/div.ll
deleted file mode 100644
index 2f724e7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/div.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=arm > %t
-; RUN: grep __divsi3 %t
-; RUN: grep __udivsi3 %t
-; RUN: grep __modsi3 %t
-; RUN: grep __umodsi3 %t
-
-define i32 @f1(i32 %a, i32 %b) {
-entry:
- %tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @f2(i32 %a, i32 %b) {
-entry:
- %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @f3(i32 %a, i32 %b) {
-entry:
- %tmp1 = srem i32 %a, %b ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @f4(i32 %a, i32 %b) {
-entry:
- %tmp1 = urem i32 %a, %b ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/dyn-stackalloc.ll b/libclamav/c++/llvm/test/CodeGen/ARM/dyn-stackalloc.ll
deleted file mode 100644
index 92e2d13..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/dyn-stackalloc.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc < %s -march=arm
-
- %struct.state = type { i32, %struct.info*, float**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i64, i64, i64, i64, i8* }
- %struct.info = type { i32, i32, i32, i32, i32, i32, i32, i8* }
-
-define void @t1(%struct.state* %v) {
- %tmp6 = load i32* null
- %tmp8 = alloca float, i32 %tmp6
- store i32 1, i32* null
- br i1 false, label %bb123.preheader, label %return
-
-bb123.preheader:
- br i1 false, label %bb43, label %return
-
-bb43:
- call fastcc void @f1( float* %tmp8, float* null, i32 0 )
- %tmp70 = load i32* null
- %tmp85 = getelementptr float* %tmp8, i32 0
- call fastcc void @f2( float* null, float* null, float* %tmp85, i32 %tmp70 )
- ret void
-
-return:
- ret void
-}
-
-declare fastcc void @f1(float*, float*, i32)
-
-declare fastcc void @f2(float*, float*, float*, i32)
-
- %struct.comment = type { i8**, i32*, i32, i8* }
- at str215 = external global [2 x i8]
-
-define void @t2(%struct.comment* %vc, i8* %tag, i8* %contents) {
- %tmp1 = call i32 @strlen( i8* %tag )
- %tmp3 = call i32 @strlen( i8* %contents )
- %tmp4 = add i32 %tmp1, 2
- %tmp5 = add i32 %tmp4, %tmp3
- %tmp6 = alloca i8, i32 %tmp5
- %tmp9 = call i8* @strcpy( i8* %tmp6, i8* %tag )
- %tmp6.len = call i32 @strlen( i8* %tmp6 )
- %tmp6.indexed = getelementptr i8* %tmp6, i32 %tmp6.len
- call void @llvm.memcpy.i32( i8* %tmp6.indexed, i8* getelementptr ([2 x i8]* @str215, i32 0, i32 0), i32 2, i32 1 )
- %tmp15 = call i8* @strcat( i8* %tmp6, i8* %contents )
- call fastcc void @comment_add( %struct.comment* %vc, i8* %tmp6 )
- ret void
-}
-
-declare i32 @strlen(i8*)
-
-declare i8* @strcat(i8*, i8*)
-
-declare fastcc void @comment_add(%struct.comment*, i8*)
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-declare i8* @strcpy(i8*, i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/extloadi1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/extloadi1.ll
deleted file mode 100644
index dc45ce7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/extloadi1.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=arm
- at handler_installed.6144.b = external global i1 ; <i1*> [#uses=1]
-
-define void @__mf_sigusr1_respond() {
-entry:
- %tmp8.b = load i1* @handler_installed.6144.b ; <i1> [#uses=1]
- br i1 false, label %cond_true7, label %cond_next
-
-cond_next: ; preds = %entry
- br i1 %tmp8.b, label %bb, label %cond_next3
-
-cond_next3: ; preds = %cond_next
- ret void
-
-bb: ; preds = %cond_next
- ret void
-
-cond_true7: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fabss.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fabss.ll
deleted file mode 100644
index e5b5791..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fabss.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NFP1
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
-
-define float @test(float %a, float %b) {
-entry:
- %dum = fadd float %a, %b
- %0 = tail call float @fabsf(float %dum)
- %dum1 = fadd float %0, %b
- ret float %dum1
-}
-
-declare float @fabsf(float)
-
-; VFP2: test:
-; VFP2: vabs.f32 s1, s1
-
-; NFP1: test:
-; NFP1: vabs.f32 d1, d1
-; NFP0: test:
-; NFP0: vabs.f32 s1, s1
-
-; CORTEXA8: test:
-; CORTEXA8: vabs.f32 d1, d1
-; CORTEXA9: test:
-; CORTEXA9: vabs.f32 s1, s1
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fadds.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fadds.ll
deleted file mode 100644
index db18a86..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fadds.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NFP1
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
-
-define float @test(float %a, float %b) {
-entry:
- %0 = fadd float %a, %b
- ret float %0
-}
-
-; VFP2: test:
-; VFP2: vadd.f32 s0, s1, s0
-
-; NFP1: test:
-; NFP1: vadd.f32 d0, d1, d0
-; NFP0: test:
-; NFP0: vadd.f32 s0, s1, s0
-
-; CORTEXA8: test:
-; CORTEXA8: vadd.f32 d0, d1, d0
-; CORTEXA9: test:
-; CORTEXA9: vadd.f32 s0, s1, s0
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fcopysign.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fcopysign.ll
deleted file mode 100644
index a6d7410..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fcopysign.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=arm | grep bic | count 2
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | \
-; RUN: grep vneg | count 2
-
-define float @test1(float %x, double %y) {
- %tmp = fpext float %x to double
- %tmp2 = tail call double @copysign( double %tmp, double %y )
- %tmp3 = fptrunc double %tmp2 to float
- ret float %tmp3
-}
-
-define double @test2(double %x, float %y) {
- %tmp = fpext float %y to double
- %tmp2 = tail call double @copysign( double %x, double %tmp )
- ret double %tmp2
-}
-
-declare double @copysign(double, double)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fdivs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fdivs.ll
deleted file mode 100644
index a5c86bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fdivs.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NFP1
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
-
-define float @test(float %a, float %b) {
-entry:
- %0 = fdiv float %a, %b
- ret float %0
-}
-
-; VFP2: test:
-; VFP2: vdiv.f32 s0, s1, s0
-
-; NFP1: test:
-; NFP1: vdiv.f32 s0, s1, s0
-; NFP0: test:
-; NFP0: vdiv.f32 s0, s1, s0
-
-; CORTEXA8: test:
-; CORTEXA8: vdiv.f32 s0, s1, s0
-; CORTEXA9: test:
-; CORTEXA9: vdiv.f32 s0, s1, s0
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fixunsdfdi.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fixunsdfdi.ll
deleted file mode 100644
index 6db2385..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fixunsdfdi.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-; RUN: llc < %s -march=arm -mattr=vfp2 | not grep vstr.64
-
-define hidden i64 @__fixunsdfdi(double %x) nounwind readnone {
-entry:
- %x14 = bitcast double %x to i64 ; <i64> [#uses=1]
- br i1 true, label %bb3, label %bb10
-
-bb3: ; preds = %entry
- br i1 true, label %bb5, label %bb7
-
-bb5: ; preds = %bb3
- %u.in.mask = and i64 %x14, -4294967296 ; <i64> [#uses=1]
- %.ins = or i64 0, %u.in.mask ; <i64> [#uses=1]
- %0 = bitcast i64 %.ins to double ; <double> [#uses=1]
- %1 = fsub double %x, %0 ; <double> [#uses=1]
- %2 = fptosi double %1 to i32 ; <i32> [#uses=1]
- %3 = add i32 %2, 0 ; <i32> [#uses=1]
- %4 = zext i32 %3 to i64 ; <i64> [#uses=1]
- %5 = shl i64 %4, 32 ; <i64> [#uses=1]
- %6 = or i64 %5, 0 ; <i64> [#uses=1]
- ret i64 %6
-
-bb7: ; preds = %bb3
- ret i64 0
-
-bb10: ; preds = %entry
- ret i64 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fmacs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fmacs.ll
deleted file mode 100644
index 904a587..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fmacs.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NFP1
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
-
-define float @test(float %acc, float %a, float %b) {
-entry:
- %0 = fmul float %a, %b
- %1 = fadd float %acc, %0
- ret float %1
-}
-
-; VFP2: test:
-; VFP2: vmla.f32 s2, s1, s0
-
-; NFP1: test:
-; NFP1: vmul.f32 d0, d1, d0
-; NFP0: test:
-; NFP0: vmla.f32 s2, s1, s0
-
-; CORTEXA8: test:
-; CORTEXA8: vmul.f32 d0, d1, d0
-; CORTEXA9: test:
-; CORTEXA9: vmla.f32 s2, s1, s0
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fmdrr-fmrrd.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fmdrr-fmrrd.ll
deleted file mode 100644
index eb72faf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fmdrr-fmrrd.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=vfp2 | not grep fmdrr
-; RUN: llc < %s -march=arm -mattr=vfp2 | not grep fmrrd
-
-; naive codegen for this is:
-; _i:
-; fmdrr d0, r0, r1
-; fmrrd r0, r1, d0
-; bx lr
-
-define i64 @test(double %X) {
- %Y = bitcast double %X to i64
- ret i64 %Y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fmscs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fmscs.ll
deleted file mode 100644
index 7b9e029..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fmscs.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NFP1
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
-
-define float @test(float %acc, float %a, float %b) {
-entry:
- %0 = fmul float %a, %b
- %1 = fsub float %0, %acc
- ret float %1
-}
-
-; VFP2: test:
-; VFP2: vnmls.f32 s2, s1, s0
-
-; NFP1: test:
-; NFP1: vnmls.f32 s2, s1, s0
-; NFP0: test:
-; NFP0: vnmls.f32 s2, s1, s0
-
-; CORTEXA8: test:
-; CORTEXA8: vnmls.f32 s2, s1, s0
-; CORTEXA9: test:
-; CORTEXA9: vnmls.f32 s2, s1, s0
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fmuls.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fmuls.ll
deleted file mode 100644
index d3c9c82..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fmuls.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NFP1
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
-
-define float @test(float %a, float %b) {
-entry:
- %0 = fmul float %a, %b
- ret float %0
-}
-
-; VFP2: test:
-; VFP2: vmul.f32 s0, s1, s0
-
-; NFP1: test:
-; NFP1: vmul.f32 d0, d1, d0
-; NFP0: test:
-; NFP0: vmul.f32 s0, s1, s0
-
-; CORTEXA8: test:
-; CORTEXA8: vmul.f32 d0, d1, d0
-; CORTEXA9: test:
-; CORTEXA9: vmul.f32 s0, s1, s0
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fnegs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fnegs.ll
deleted file mode 100644
index d6c22f1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fnegs.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NFP1
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=NFP0
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=CORTEXA8
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=CORTEXA9
-
-define float @test1(float* %a) {
-entry:
- %0 = load float* %a, align 4 ; <float> [#uses=2]
- %1 = fsub float -0.000000e+00, %0 ; <float> [#uses=2]
- %2 = fpext float %1 to double ; <double> [#uses=1]
- %3 = fcmp olt double %2, 1.234000e+00 ; <i1> [#uses=1]
- %retval = select i1 %3, float %1, float %0 ; <float> [#uses=1]
- ret float %retval
-}
-; VFP2: test1:
-; VFP2: vneg.f32 s1, s0
-
-; NFP1: test1:
-; NFP1: vneg.f32 d1, d0
-
-; NFP0: test1:
-; NFP0: vneg.f32 s1, s0
-
-; CORTEXA8: test1:
-; CORTEXA8: vneg.f32 d1, d0
-
-; CORTEXA9: test1:
-; CORTEXA9: vneg.f32 s1, s0
-
-define float @test2(float* %a) {
-entry:
- %0 = load float* %a, align 4 ; <float> [#uses=2]
- %1 = fmul float -1.000000e+00, %0 ; <float> [#uses=2]
- %2 = fpext float %1 to double ; <double> [#uses=1]
- %3 = fcmp olt double %2, 1.234000e+00 ; <i1> [#uses=1]
- %retval = select i1 %3, float %1, float %0 ; <float> [#uses=1]
- ret float %retval
-}
-; VFP2: test2:
-; VFP2: vneg.f32 s1, s0
-
-; NFP1: test2:
-; NFP1: vneg.f32 d1, d0
-
-; NFP0: test2:
-; NFP0: vneg.f32 s1, s0
-
-; CORTEXA8: test2:
-; CORTEXA8: vneg.f32 d1, d0
-
-; CORTEXA9: test2:
-; CORTEXA9: vneg.f32 s1, s0
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fnmacs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fnmacs.ll
deleted file mode 100644
index 724947e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fnmacs.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NEONFP
-
-define float @test(float %acc, float %a, float %b) {
-entry:
-; VFP2: vmls.f32
-; NEON: vmls.f32
-
-; NEONFP-NOT: vmls
-; NEONFP-NOT: vmov.f32
-; NEONFP: vmul.f32
-; NEONFP: vsub.f32
-; NEONFP: vmov
-
- %0 = fmul float %a, %b
- %1 = fsub float %acc, %0
- ret float %1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fnmscs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fnmscs.ll
deleted file mode 100644
index ad21882..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fnmscs.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s
-
-define float @test1(float %acc, float %a, float %b) nounwind {
-; CHECK: vnmla.f32 s2, s1, s0
-entry:
- %0 = fmul float %a, %b
- %1 = fsub float -0.0, %0
- %2 = fsub float %1, %acc
- ret float %2
-}
-
-define float @test2(float %acc, float %a, float %b) nounwind {
-; CHECK: vnmla.f32 s2, s1, s0
-entry:
- %0 = fmul float %a, %b
- %1 = fmul float -1.0, %0
- %2 = fsub float %1, %acc
- ret float %2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fnmul.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fnmul.ll
deleted file mode 100644
index 6d7bc05..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fnmul.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | grep vnmul.f64
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 -enable-sign-dependent-rounding-fp-math | grep vmul.f64
-
-
-define double @t1(double %a, double %b) {
-entry:
- %tmp2 = fsub double -0.000000e+00, %a ; <double> [#uses=1]
- %tmp4 = fmul double %tmp2, %b ; <double> [#uses=1]
- ret double %tmp4
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fnmuls.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fnmuls.ll
deleted file mode 100644
index efd87d2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fnmuls.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; XFAIL: *
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s
-
-define float @test1(float %a, float %b) nounwind {
-; CHECK: fnmscs s2, s1, s0
-entry:
- %0 = fmul float %a, %b
- %1 = fsub float -0.0, %0
- ret float %1
-}
-
-define float @test2(float %a, float %b) nounwind {
-; CHECK: fnmscs s2, s1, s0
-entry:
- %0 = fmul float %a, %b
- %1 = fmul float -1.0, %0
- ret float %1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/formal.ll b/libclamav/c++/llvm/test/CodeGen/ARM/formal.ll
deleted file mode 100644
index 4ac10ba..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/formal.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-declare void @bar(i64 %x, i64 %y)
-
-define void @foo() {
- call void @bar(i64 2, i64 3)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fp.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fp.ll
deleted file mode 100644
index 8fbd45b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fp.ll
+++ /dev/null
@@ -1,78 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-
-define float @f(i32 %a) {
-;CHECK: f:
-;CHECK: vmov
-;CHECK-NEXT: vcvt.f32.s32
-;CHECK-NEXT: vmov
-entry:
- %tmp = sitofp i32 %a to float ; <float> [#uses=1]
- ret float %tmp
-}
-
-define double @g(i32 %a) {
-;CHECK: g:
-;CHECK: vmov
-;CHECK-NEXT: vcvt.f64.s32
-;CHECK-NEXT: vmov
-entry:
- %tmp = sitofp i32 %a to double ; <double> [#uses=1]
- ret double %tmp
-}
-
-define double @uint_to_double(i32 %a) {
-;CHECK: uint_to_double:
-;CHECK: vmov
-;CHECK-NEXT: vcvt.f64.u32
-;CHECK-NEXT: vmov
-entry:
- %tmp = uitofp i32 %a to double ; <double> [#uses=1]
- ret double %tmp
-}
-
-define float @uint_to_float(i32 %a) {
-;CHECK: uint_to_float:
-;CHECK: vmov
-;CHECK-NEXT: vcvt.f32.u32
-;CHECK-NEXT: vmov
-entry:
- %tmp = uitofp i32 %a to float ; <float> [#uses=1]
- ret float %tmp
-}
-
-define double @h(double* %v) {
-;CHECK: h:
-;CHECK: vldr.64
-;CHECK-NEXT: vmov
-entry:
- %tmp = load double* %v ; <double> [#uses=1]
- ret double %tmp
-}
-
-define float @h2() {
-;CHECK: h2:
-;CHECK: 1065353216
-entry:
- ret float 1.000000e+00
-}
-
-define double @f2(double %a) {
-;CHECK: f2:
-;CHECK-NOT: vmov
- ret double %a
-}
-
-define void @f3() {
-;CHECK: f3:
-;CHECK-NOT: vmov
-;CHECK: f4
-entry:
- %tmp = call double @f5( ) ; <double> [#uses=1]
- call void @f4( double %tmp )
- ret void
-}
-
-declare void @f4(double)
-
-declare double @f5()
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fp_convert.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fp_convert.ll
deleted file mode 100644
index 2adac78..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fp_convert.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s -check-prefix=NEON
-; RUN: llc < %s -march=arm -mcpu=cortex-a9 | FileCheck %s -check-prefix=VFP2
-
-define i32 @test1(float %a, float %b) {
-; VFP2: test1:
-; VFP2: vcvt.s32.f32 s0, s0
-; NEON: test1:
-; NEON: vcvt.s32.f32 d0, d0
-entry:
- %0 = fadd float %a, %b
- %1 = fptosi float %0 to i32
- ret i32 %1
-}
-
-define i32 @test2(float %a, float %b) {
-; VFP2: test2:
-; VFP2: vcvt.u32.f32 s0, s0
-; NEON: test2:
-; NEON: vcvt.u32.f32 d0, d0
-entry:
- %0 = fadd float %a, %b
- %1 = fptoui float %0 to i32
- ret i32 %1
-}
-
-define float @test3(i32 %a, i32 %b) {
-; VFP2: test3:
-; VFP2: vcvt.f32.u32 s0, s0
-; NEON: test3:
-; NEON: vcvt.f32.u32 d0, d0
-entry:
- %0 = add i32 %a, %b
- %1 = uitofp i32 %0 to float
- ret float %1
-}
-
-define float @test4(i32 %a, i32 %b) {
-; VFP2: test4:
-; VFP2: vcvt.f32.s32 s0, s0
-; NEON: test4:
-; NEON: vcvt.f32.s32 d0, d0
-entry:
- %0 = add i32 %a, %b
- %1 = sitofp i32 %0 to float
- ret float %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fparith.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fparith.ll
deleted file mode 100644
index ce6d6b2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fparith.ll
+++ /dev/null
@@ -1,101 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-
-define float @f1(float %a, float %b) {
-;CHECK: f1:
-;CHECK: vadd.f32
-entry:
- %tmp = fadd float %a, %b ; <float> [#uses=1]
- ret float %tmp
-}
-
-define double @f2(double %a, double %b) {
-;CHECK: f2:
-;CHECK: vadd.f64
-entry:
- %tmp = fadd double %a, %b ; <double> [#uses=1]
- ret double %tmp
-}
-
-define float @f3(float %a, float %b) {
-;CHECK: f3:
-;CHECK: vmul.f32
-entry:
- %tmp = fmul float %a, %b ; <float> [#uses=1]
- ret float %tmp
-}
-
-define double @f4(double %a, double %b) {
-;CHECK: f4:
-;CHECK: vmul.f64
-entry:
- %tmp = fmul double %a, %b ; <double> [#uses=1]
- ret double %tmp
-}
-
-define float @f5(float %a, float %b) {
-;CHECK: f5:
-;CHECK: vsub.f32
-entry:
- %tmp = fsub float %a, %b ; <float> [#uses=1]
- ret float %tmp
-}
-
-define double @f6(double %a, double %b) {
-;CHECK: f6:
-;CHECK: vsub.f64
-entry:
- %tmp = fsub double %a, %b ; <double> [#uses=1]
- ret double %tmp
-}
-
-define float @f7(float %a) {
-;CHECK: f7:
-;CHECK: eor
-entry:
- %tmp1 = fsub float -0.000000e+00, %a ; <float> [#uses=1]
- ret float %tmp1
-}
-
-define double @f8(double %a) {
-;CHECK: f8:
-;CHECK: vneg.f64
-entry:
- %tmp1 = fsub double -0.000000e+00, %a ; <double> [#uses=1]
- ret double %tmp1
-}
-
-define float @f9(float %a, float %b) {
-;CHECK: f9:
-;CHECK: vdiv.f32
-entry:
- %tmp1 = fdiv float %a, %b ; <float> [#uses=1]
- ret float %tmp1
-}
-
-define double @f10(double %a, double %b) {
-;CHECK: f10:
-;CHECK: vdiv.f64
-entry:
- %tmp1 = fdiv double %a, %b ; <double> [#uses=1]
- ret double %tmp1
-}
-
-define float @f11(float %a) {
-;CHECK: f11:
-;CHECK: bic
-entry:
- %tmp1 = call float @fabsf( float %a ) ; <float> [#uses=1]
- ret float %tmp1
-}
-
-declare float @fabsf(float)
-
-define double @f12(double %a) {
-;CHECK: f12:
-;CHECK: vabs.f64
-entry:
- %tmp1 = call double @fabs( double %a ) ; <double> [#uses=1]
- ret double %tmp1
-}
-
-declare double @fabs(double)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fpcmp.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fpcmp.ll
deleted file mode 100644
index 260ec49..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fpcmp.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-
-define i32 @f1(float %a) {
-;CHECK: f1:
-;CHECK: vcmpe.f32
-;CHECK: movmi
-entry:
- %tmp = fcmp olt float %a, 1.000000e+00 ; <i1> [#uses=1]
- %tmp1 = zext i1 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @f2(float %a) {
-;CHECK: f2:
-;CHECK: vcmpe.f32
-;CHECK: moveq
-entry:
- %tmp = fcmp oeq float %a, 1.000000e+00 ; <i1> [#uses=1]
- %tmp2 = zext i1 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @f3(float %a) {
-;CHECK: f3:
-;CHECK: vcmpe.f32
-;CHECK: movgt
-entry:
- %tmp = fcmp ogt float %a, 1.000000e+00 ; <i1> [#uses=1]
- %tmp3 = zext i1 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp3
-}
-
-define i32 @f4(float %a) {
-;CHECK: f4:
-;CHECK: vcmpe.f32
-;CHECK: movge
-entry:
- %tmp = fcmp oge float %a, 1.000000e+00 ; <i1> [#uses=1]
- %tmp4 = zext i1 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp4
-}
-
-define i32 @f5(float %a) {
-;CHECK: f5:
-;CHECK: vcmpe.f32
-;CHECK: movls
-entry:
- %tmp = fcmp ole float %a, 1.000000e+00 ; <i1> [#uses=1]
- %tmp5 = zext i1 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @f6(float %a) {
-;CHECK: f6:
-;CHECK: vcmpe.f32
-;CHECK: movne
-entry:
- %tmp = fcmp une float %a, 1.000000e+00 ; <i1> [#uses=1]
- %tmp6 = zext i1 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @g1(double %a) {
-;CHECK: g1:
-;CHECK: vcmpe.f64
-;CHECK: movmi
-entry:
- %tmp = fcmp olt double %a, 1.000000e+00 ; <i1> [#uses=1]
- %tmp7 = zext i1 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fpcmp_ueq.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fpcmp_ueq.ll
deleted file mode 100644
index 67f70e9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fpcmp_ueq.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=arm | grep moveq
-; RUN: llc < %s -march=arm -mattr=+vfp2 | grep movvs
-
-define i32 @f7(float %a, float %b) {
-entry:
- %tmp = fcmp ueq float %a,%b
- %retval = select i1 %tmp, i32 666, i32 42
- ret i32 %retval
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fpconsts.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fpconsts.ll
deleted file mode 100644
index 710994d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fpconsts.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp3 | FileCheck %s
-
-define arm_apcscc float @t1(float %x) nounwind readnone optsize {
-entry:
-; CHECK: t1:
-; CHECK: vmov.f32 s1, #4.000000e+00
- %0 = fadd float %x, 4.000000e+00
- ret float %0
-}
-
-define arm_apcscc double @t2(double %x) nounwind readnone optsize {
-entry:
-; CHECK: t2:
-; CHECK: vmov.f64 d1, #3.000000e+00
- %0 = fadd double %x, 3.000000e+00
- ret double %0
-}
-
-define arm_apcscc double @t3(double %x) nounwind readnone optsize {
-entry:
-; CHECK: t3:
-; CHECK: vmov.f64 d1, #-1.300000e+01
- %0 = fmul double %x, -1.300000e+01
- ret double %0
-}
-
-define arm_apcscc float @t4(float %x) nounwind readnone optsize {
-entry:
-; CHECK: t4:
-; CHECK: vmov.f32 s1, #-2.400000e+01
- %0 = fmul float %x, -2.400000e+01
- ret float %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fpconv.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fpconv.ll
deleted file mode 100644
index bf197a4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fpconv.ll
+++ /dev/null
@@ -1,102 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s --check-prefix=CHECK-VFP
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define float @f1(double %x) {
-;CHECK-VFP: f1:
-;CHECK-VFP: vcvt.f32.f64
-;CHECK: f1:
-;CHECK: truncdfsf2
-entry:
- %tmp1 = fptrunc double %x to float ; <float> [#uses=1]
- ret float %tmp1
-}
-
-define double @f2(float %x) {
-;CHECK-VFP: f2:
-;CHECK-VFP: vcvt.f64.f32
-;CHECK: f2:
-;CHECK: extendsfdf2
-entry:
- %tmp1 = fpext float %x to double ; <double> [#uses=1]
- ret double %tmp1
-}
-
-define i32 @f3(float %x) {
-;CHECK-VFP: f3:
-;CHECK-VFP: vcvt.s32.f32
-;CHECK: f3:
-;CHECK: fixsfsi
-entry:
- %tmp = fptosi float %x to i32 ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-define i32 @f4(float %x) {
-;CHECK-VFP: f4:
-;CHECK-VFP: vcvt.u32.f32
-;CHECK: f4:
-;CHECK: fixunssfsi
-entry:
- %tmp = fptoui float %x to i32 ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-define i32 @f5(double %x) {
-;CHECK-VFP: f5:
-;CHECK-VFP: vcvt.s32.f64
-;CHECK: f5:
-;CHECK: fixdfsi
-entry:
- %tmp = fptosi double %x to i32 ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-define i32 @f6(double %x) {
-;CHECK-VFP: f6:
-;CHECK-VFP: vcvt.u32.f64
-;CHECK: f6:
-;CHECK: fixunsdfsi
-entry:
- %tmp = fptoui double %x to i32 ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-define float @f7(i32 %a) {
-;CHECK-VFP: f7:
-;CHECK-VFP: vcvt.f32.s32
-;CHECK: f7:
-;CHECK: floatsisf
-entry:
- %tmp = sitofp i32 %a to float ; <float> [#uses=1]
- ret float %tmp
-}
-
-define double @f8(i32 %a) {
-;CHECK-VFP: f8:
-;CHECK-VFP: vcvt.f64.s32
-;CHECK: f8:
-;CHECK: floatsidf
-entry:
- %tmp = sitofp i32 %a to double ; <double> [#uses=1]
- ret double %tmp
-}
-
-define float @f9(i32 %a) {
-;CHECK-VFP: f9:
-;CHECK-VFP: vcvt.f32.u32
-;CHECK: f9:
-;CHECK: floatunsisf
-entry:
- %tmp = uitofp i32 %a to float ; <float> [#uses=1]
- ret float %tmp
-}
-
-define double @f10(i32 %a) {
-;CHECK-VFP: f10:
-;CHECK-VFP: vcvt.f64.u32
-;CHECK: f10:
-;CHECK: floatunsidf
-entry:
- %tmp = uitofp i32 %a to double ; <double> [#uses=1]
- ret double %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fpmem.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fpmem.ll
deleted file mode 100644
index c3cff18..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fpmem.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-
-define float @f1(float %a) {
-; CHECK: f1:
-; CHECK: mov r0, #0
- ret float 0.000000e+00
-}
-
-define float @f2(float* %v, float %u) {
-; CHECK: f2:
-; CHECK: vldr.32{{.*}}[
- %tmp = load float* %v ; <float> [#uses=1]
- %tmp1 = fadd float %tmp, %u ; <float> [#uses=1]
- ret float %tmp1
-}
-
-define void @f3(float %a, float %b, float* %v) {
-; CHECK: f3:
-; CHECK: vstr.32{{.*}}[
- %tmp = fadd float %a, %b ; <float> [#uses=1]
- store float %tmp, float* %v
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fpow.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fpow.ll
deleted file mode 100644
index 6d48792..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fpow.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define double @t(double %x, double %y) nounwind optsize {
-entry:
- %0 = tail call double @llvm.pow.f64( double %x, double %y ) ; <double> [#uses=1]
- ret double %0
-}
-
-declare double @llvm.pow.f64(double, double) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fpowi.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fpowi.ll
deleted file mode 100644
index 7f9d62a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fpowi.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | grep powidf2
-; PR1287
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "arm-linux-gnueabi"
-
-define double @_ZSt3powdi(double %__x, i32 %__i) {
-entry:
- %tmp3 = call double @llvm.powi.f64( double %__x, i32 %__i )
- ret double %tmp3
-}
-
-declare double @llvm.powi.f64(double, i32)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fptoint.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fptoint.ll
deleted file mode 100644
index 299cb8f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fptoint.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 | FileCheck %s
-
- at i = weak global i32 0 ; <i32*> [#uses=2]
- at u = weak global i32 0 ; <i32*> [#uses=2]
-
-define i32 @foo1(float *%x) {
- %tmp1 = load float* %x
- %tmp2 = bitcast float %tmp1 to i32
- ret i32 %tmp2
-}
-
-define i64 @foo2(double *%x) {
- %tmp1 = load double* %x
- %tmp2 = bitcast double %tmp1 to i64
- ret i64 %tmp2
-}
-
-define void @foo5(float %x) {
- %tmp1 = fptosi float %x to i32
- store i32 %tmp1, i32* @i
- ret void
-}
-
-define void @foo6(float %x) {
- %tmp1 = fptoui float %x to i32
- store i32 %tmp1, i32* @u
- ret void
-}
-
-define void @foo7(double %x) {
- %tmp1 = fptosi double %x to i32
- store i32 %tmp1, i32* @i
- ret void
-}
-
-define void @foo8(double %x) {
- %tmp1 = fptoui double %x to i32
- store i32 %tmp1, i32* @u
- ret void
-}
-
-define void @foo9(double %x) {
- %tmp = fptoui double %x to i16
- store i16 %tmp, i16* null
- ret void
-}
-; CHECK: foo9:
-; CHECK: vmov r0, s0
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/fsubs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/fsubs.ll
deleted file mode 100644
index ae98be3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/fsubs.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s -check-prefix=VFP2
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=1 | FileCheck %s -check-prefix=NFP1
-; RUN: llc < %s -march=arm -mattr=+neon -arm-use-neon-fp=0 | FileCheck %s -check-prefix=NFP0
-
-define float @test(float %a, float %b) {
-entry:
- %0 = fsub float %a, %b
- ret float %0
-}
-
-; VFP2: vsub.f32 s0, s1, s0
-; NFP1: vsub.f32 d0, d1, d0
-; NFP0: vsub.f32 s0, s1, s0
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/globals.ll b/libclamav/c++/llvm/test/CodeGen/ARM/globals.ll
deleted file mode 100644
index 886c0d5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/globals.ll
+++ /dev/null
@@ -1,75 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=static | FileCheck %s -check-prefix=DarwinStatic
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=dynamic-no-pic | FileCheck %s -check-prefix=DarwinDynamic
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic | FileCheck %s -check-prefix=DarwinPIC
-; RUN: llc < %s -mtriple=arm-linux-gnueabi -relocation-model=pic | FileCheck %s -check-prefix=LinuxPIC
-
- at G = external global i32
-
-define i32 @test1() {
- %tmp = load i32* @G
- ret i32 %tmp
-}
-
-; DarwinStatic: _test1:
-; DarwinStatic: ldr r0, LCPI1_0
-; DarwinStatic: ldr r0, [r0]
-; DarwinStatic: bx lr
-
-; DarwinStatic: .align 2
-; DarwinStatic: LCPI1_0:
-; DarwinStatic: .long {{_G$}}
-
-
-; DarwinDynamic: _test1:
-; DarwinDynamic: ldr r0, LCPI1_0
-; DarwinDynamic: ldr r0, [r0]
-; DarwinDynamic: ldr r0, [r0]
-; DarwinDynamic: bx lr
-
-; DarwinDynamic: .align 2
-; DarwinDynamic: LCPI1_0:
-; DarwinDynamic: .long L_G$non_lazy_ptr
-
-; DarwinDynamic: .section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-; DarwinDynamic: .align 2
-; DarwinDynamic: L_G$non_lazy_ptr:
-; DarwinDynamic: .indirect_symbol _G
-; DarwinDynamic: .long 0
-
-
-
-; DarwinPIC: _test1:
-; DarwinPIC: ldr r0, LCPI1_0
-; DarwinPIC: LPC1_0:
-; DarwinPIC: ldr r0, [pc, +r0]
-; DarwinPIC: ldr r0, [r0]
-; DarwinPIC: bx lr
-
-; DarwinPIC: .align 2
-; DarwinPIC: LCPI1_0:
-; DarwinPIC: .long L_G$non_lazy_ptr-(LPC1_0+8)
-
-; DarwinPIC: .section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-; DarwinPIC: .align 2
-; DarwinPIC: L_G$non_lazy_ptr:
-; DarwinPIC: .indirect_symbol _G
-; DarwinPIC: .long 0
-
-
-
-; LinuxPIC: test1:
-; LinuxPIC: ldr r0, .LCPI1_0
-; LinuxPIC: ldr r1, .LCPI1_1
-
-; LinuxPIC: .LPC1_0:
-; LinuxPIC: add r0, pc, r0
-; LinuxPIC: ldr r0, [r1, +r0]
-; LinuxPIC: ldr r0, [r0]
-; LinuxPIC: bx lr
-
-; LinuxPIC: .align 2
-; LinuxPIC: .LCPI1_0:
-; LinuxPIC: .long _GLOBAL_OFFSET_TABLE_-(.LPC1_0+8)
-; LinuxPIC: .align 2
-; LinuxPIC: .LCPI1_1:
-; LinuxPIC: .long G(GOT)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/hardfloat_neon.ll b/libclamav/c++/llvm/test/CodeGen/ARM/hardfloat_neon.ll
deleted file mode 100644
index 4abf04b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/hardfloat_neon.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi -mattr=+neon -float-abi=hard
-
-define <16 x i8> @vmulQi8_reg(<16 x i8> %A, <16 x i8> %B) nounwind {
- %tmp1 = mul <16 x i8> %A, %B
- ret <16 x i8> %tmp1
-}
-
-define <16 x i8> @f(<16 x i8> %a, <16 x i8> %b) {
- %tmp = call <16 x i8> @g(<16 x i8> %b)
- ret <16 x i8> %tmp
-}
-
-declare <16 x i8> @g(<16 x i8>)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/hello.ll b/libclamav/c++/llvm/test/CodeGen/ARM/hello.ll
deleted file mode 100644
index ccdc7bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/hello.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -mtriple=arm-linux-gnueabi | grep mov | count 1
-; RUN: llc < %s -mtriple=arm-linux-gnu --disable-fp-elim | \
-; RUN: grep mov | count 3
-; RUN: llc < %s -mtriple=arm-apple-darwin | grep mov | count 2
-
- at str = internal constant [12 x i8] c"Hello World\00"
-
-define i32 @main() {
- %tmp = call i32 @puts( i8* getelementptr ([12 x i8]* @str, i32 0, i64 0) ) ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i32 @puts(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/hidden-vis-2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/hidden-vis-2.ll
deleted file mode 100644
index 90f5308..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/hidden-vis-2.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
-
- at x = weak hidden global i32 0 ; <i32*> [#uses=1]
-
-define i32 @t() nounwind readonly {
-entry:
-; CHECK: t:
-; CHECK: ldr
-; CHECK-NEXT: ldr
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/hidden-vis-3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/hidden-vis-3.ll
deleted file mode 100644
index 3bd710a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/hidden-vis-3.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin9 | FileCheck %s
-
- at x = external hidden global i32 ; <i32*> [#uses=1]
- at y = extern_weak hidden global i32 ; <i32*> [#uses=1]
-
-define i32 @t() nounwind readonly {
-entry:
-; CHECK: LCPI1_0:
-; CHECK-NEXT: .long _x
-; CHECK: LCPI1_1:
-; CHECK-NEXT: .long _y
-
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- %1 = load i32* @y, align 4 ; <i32> [#uses=1]
- %2 = add i32 %1, %0 ; <i32> [#uses=1]
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/hidden-vis.ll b/libclamav/c++/llvm/test/CodeGen/ARM/hidden-vis.ll
deleted file mode 100644
index 3544ae8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/hidden-vis.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux | FileCheck %s -check-prefix=LINUX
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
-
- at a = hidden global i32 0
- at b = external global i32
-
-define weak hidden void @t1() nounwind {
-; LINUX: .hidden t1
-; LINUX: t1:
-
-; DARWIN: .private_extern _t1
-; DARWIN: t1:
- ret void
-}
-
-define weak void @t2() nounwind {
-; LINUX: t2:
-; LINUX: .hidden a
-
-; DARWIN: t2:
-; DARWIN: .private_extern _a
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/iabs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/iabs.ll
deleted file mode 100644
index 63808b2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/iabs.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-;; Integer absolute value, should produce something as good as: ARM:
-;; add r3, r0, r0, asr #31
-;; eor r0, r3, r0, asr #31
-;; bx lr
-
-define i32 @test(i32 %a) {
- %tmp1neg = sub i32 0, %a
- %b = icmp sgt i32 %a, -1
- %abs = select i1 %b, i32 %a, i32 %tmp1neg
- ret i32 %abs
-; CHECK: add r1, r0, r0, asr #31
-; CHECK: eor r0, r1, r0, asr #31
-; CHECK: bx lr
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt1.ll
deleted file mode 100644
index e6aa044..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt1.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm | grep bx | count 1
-
-define i32 @t1(i32 %a, i32 %b) {
- %tmp2 = icmp eq i32 %a, 0
- br i1 %tmp2, label %cond_false, label %cond_true
-
-cond_true:
- %tmp5 = add i32 %b, 1
- ret i32 %tmp5
-
-cond_false:
- %tmp7 = add i32 %b, -1
- ret i32 %tmp7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt2.ll
deleted file mode 100644
index ce57d73..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt2.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm | grep bxlt | count 1
-; RUN: llc < %s -march=arm | grep bxgt | count 1
-; RUN: llc < %s -march=arm | grep bxge | count 1
-
-define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) {
- %tmp2 = icmp sgt i32 %c, 10
- %tmp5 = icmp slt i32 %d, 4
- %tmp8 = or i1 %tmp5, %tmp2
- %tmp13 = add i32 %b, %a
- br i1 %tmp8, label %cond_true, label %UnifiedReturnBlock
-
-cond_true:
- %tmp15 = add i32 %tmp13, %c
- %tmp1821 = sub i32 %tmp15, %d
- ret i32 %tmp1821
-
-UnifiedReturnBlock:
- ret i32 %tmp13
-}
-
-define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d) {
- %tmp2 = icmp sgt i32 %c, 10
- %tmp5 = icmp slt i32 %d, 4
- %tmp8 = and i1 %tmp5, %tmp2
- %tmp13 = add i32 %b, %a
- br i1 %tmp8, label %cond_true, label %UnifiedReturnBlock
-
-cond_true:
- %tmp15 = add i32 %tmp13, %c
- %tmp1821 = sub i32 %tmp15, %d
- ret i32 %tmp1821
-
-UnifiedReturnBlock:
- ret i32 %tmp13
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt3.ll
deleted file mode 100644
index f7ebac6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt3.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm | grep cmpne | count 1
-; RUN: llc < %s -march=arm | grep bx | count 2
-
-define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) {
- switch i32 %c, label %cond_next [
- i32 1, label %cond_true
- i32 7, label %cond_true
- ]
-
-cond_true:
- %tmp12 = add i32 %a, 1
- %tmp1518 = add i32 %tmp12, %b
- ret i32 %tmp1518
-
-cond_next:
- %tmp15 = add i32 %b, %a
- ret i32 %tmp15
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt4.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt4.ll
deleted file mode 100644
index f28c61b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt4.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm | grep subgt | count 1
-; RUN: llc < %s -march=arm | grep suble | count 1
-; FIXME: Check for # of unconditional branch after adding branch folding post ifcvt.
-
-define i32 @t(i32 %a, i32 %b) {
-entry:
- %tmp1434 = icmp eq i32 %a, %b ; <i1> [#uses=1]
- br i1 %tmp1434, label %bb17, label %bb.outer
-
-bb.outer: ; preds = %cond_false, %entry
- %b_addr.021.0.ph = phi i32 [ %b, %entry ], [ %tmp10, %cond_false ] ; <i32> [#uses=5]
- %a_addr.026.0.ph = phi i32 [ %a, %entry ], [ %a_addr.026.0, %cond_false ] ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %cond_true, %bb.outer
- %indvar = phi i32 [ 0, %bb.outer ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
- %tmp. = sub i32 0, %b_addr.021.0.ph ; <i32> [#uses=1]
- %tmp.40 = mul i32 %indvar, %tmp. ; <i32> [#uses=1]
- %a_addr.026.0 = add i32 %tmp.40, %a_addr.026.0.ph ; <i32> [#uses=6]
- %tmp3 = icmp sgt i32 %a_addr.026.0, %b_addr.021.0.ph ; <i1> [#uses=1]
- br i1 %tmp3, label %cond_true, label %cond_false
-
-cond_true: ; preds = %bb
- %tmp7 = sub i32 %a_addr.026.0, %b_addr.021.0.ph ; <i32> [#uses=2]
- %tmp1437 = icmp eq i32 %tmp7, %b_addr.021.0.ph ; <i1> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %tmp1437, label %bb17, label %bb
-
-cond_false: ; preds = %bb
- %tmp10 = sub i32 %b_addr.021.0.ph, %a_addr.026.0 ; <i32> [#uses=2]
- %tmp14 = icmp eq i32 %a_addr.026.0, %tmp10 ; <i1> [#uses=1]
- br i1 %tmp14, label %bb17, label %bb.outer
-
-bb17: ; preds = %cond_false, %cond_true, %entry
- %a_addr.026.1 = phi i32 [ %a, %entry ], [ %tmp7, %cond_true ], [ %a_addr.026.0, %cond_false ] ; <i32> [#uses=1]
- ret i32 %a_addr.026.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt5.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt5.ll
deleted file mode 100644
index 623f2cb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt5.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
-
- at x = external global i32* ; <i32**> [#uses=1]
-
-define void @foo(i32 %a) {
-entry:
- %tmp = load i32** @x ; <i32*> [#uses=1]
- store i32 %a, i32* %tmp
- ret void
-}
-
-define void @t1(i32 %a, i32 %b) {
-; CHECK: t1:
-; CHECK: ldmfdlt sp!, {r7, pc}
-entry:
- %tmp1 = icmp sgt i32 %a, 10 ; <i1> [#uses=1]
- br i1 %tmp1, label %cond_true, label %UnifiedReturnBlock
-
-cond_true: ; preds = %entry
- tail call void @foo( i32 %b )
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt6.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt6.ll
deleted file mode 100644
index d7fcf7d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt6.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep cmpne | count 1
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep ldmfdhi | count 1
-
-define void @foo(i32 %X, i32 %Y) {
-entry:
- %tmp1 = icmp ult i32 %X, 4 ; <i1> [#uses=1]
- %tmp4 = icmp eq i32 %Y, 0 ; <i1> [#uses=1]
- %tmp7 = or i1 %tmp4, %tmp1 ; <i1> [#uses=1]
- br i1 %tmp7, label %cond_true, label %UnifiedReturnBlock
-
-cond_true: ; preds = %entry
- %tmp10 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-declare i32 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt7.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt7.ll
deleted file mode 100644
index c60ad93..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt7.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep cmpeq | count 1
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep moveq | count 1
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep ldmfdeq | count 1
-; FIXME: Need post-ifcvt branch folding to get rid of the extra br at end of BB1.
-
- %struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
-
-define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
-entry:
- br label %tailrecurse
-
-tailrecurse: ; preds = %bb, %entry
- %tmp6 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1]
- %tmp9 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=2]
- %tmp12 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1]
- %tmp14 = icmp eq %struct.quad_struct* null, null ; <i1> [#uses=1]
- %tmp17 = icmp eq %struct.quad_struct* %tmp6, null ; <i1> [#uses=1]
- %tmp23 = icmp eq %struct.quad_struct* %tmp9, null ; <i1> [#uses=1]
- %tmp29 = icmp eq %struct.quad_struct* %tmp12, null ; <i1> [#uses=1]
- %bothcond = and i1 %tmp17, %tmp14 ; <i1> [#uses=1]
- %bothcond1 = and i1 %bothcond, %tmp23 ; <i1> [#uses=1]
- %bothcond2 = and i1 %bothcond1, %tmp29 ; <i1> [#uses=1]
- br i1 %bothcond2, label %return, label %bb
-
-bb: ; preds = %tailrecurse
- %tmp41 = tail call fastcc i32 @CountTree( %struct.quad_struct* %tmp9 ) ; <i32> [#uses=0]
- br label %tailrecurse
-
-return: ; preds = %tailrecurse
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt8.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt8.ll
deleted file mode 100644
index a7da834..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt8.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-apple-darwin | \
-; RUN: grep ldmfdne | count 1
-
- %struct.SString = type { i8*, i32, i32 }
-
-declare void @abort()
-
-define fastcc void @t(%struct.SString* %word, i8 signext %c) {
-entry:
- %tmp1 = icmp eq %struct.SString* %word, null ; <i1> [#uses=1]
- br i1 %tmp1, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- tail call void @abort( )
- unreachable
-
-cond_false: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt9.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt9.ll
deleted file mode 100644
index 05bdc45..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ifcvt9.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define fastcc void @t() nounwind {
-entry:
- br i1 undef, label %bb.i.i3, label %growMapping.exit
-
-bb.i.i3: ; preds = %entry
- unreachable
-
-growMapping.exit: ; preds = %entry
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/illegal-vector-bitcast.ll b/libclamav/c++/llvm/test/CodeGen/ARM/illegal-vector-bitcast.ll
deleted file mode 100644
index febe6f5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/illegal-vector-bitcast.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -mtriple=arm-linux
-
-define void @foo(<8 x float>* %f, <8 x float>* %g, <4 x i64>* %y)
-{
- %h = load <8 x float>* %f
- %i = fmul <8 x float> %h, <float 0x3FF19999A0000000, float 0x400A666660000000, float 0x40119999A0000000, float 0x40159999A0000000, float 0.5, float 0x3FE3333340000000, float 0x3FE6666660000000, float 0x3FE99999A0000000>
- %m = bitcast <8 x float> %i to <4 x i64>
- %z = load <4 x i64>* %y
- %n = mul <4 x i64> %z, %m
- %p = bitcast <4 x i64> %n to <8 x float>
- store <8 x float> %p, <8 x float>* %g
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/imm.ll b/libclamav/c++/llvm/test/CodeGen/ARM/imm.ll
deleted file mode 100644
index 6f25f9d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/imm.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=arm | not grep CPI
-
-define i32 @test1(i32 %A) {
- %B = add i32 %A, -268435441 ; <i32> [#uses=1]
- ret i32 %B
-}
-
-define i32 @test2() {
- ret i32 65533
-}
-
-define i32 @test3(i32 %A) {
- %B = or i32 %A, 65533 ; <i32> [#uses=1]
- ret i32 %B
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/indirectbr.ll b/libclamav/c++/llvm/test/CodeGen/ARM/indirectbr.ll
deleted file mode 100644
index 5135d03..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/indirectbr.ll
+++ /dev/null
@@ -1,64 +0,0 @@
-; RUN: llc < %s -relocation-model=pic -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=ARM
-; RUN: llc < %s -relocation-model=pic -mtriple=thumb-apple-darwin | FileCheck %s -check-prefix=THUMB
-; RUN: llc < %s -relocation-model=static -mtriple=thumbv7-apple-darwin | FileCheck %s -check-prefix=THUMB2
-
- at nextaddr = global i8* null ; <i8**> [#uses=2]
- at C.0.2070 = private constant [5 x i8*] [i8* blockaddress(@foo, %L1), i8* blockaddress(@foo, %L2), i8* blockaddress(@foo, %L3), i8* blockaddress(@foo, %L4), i8* blockaddress(@foo, %L5)] ; <[5 x i8*]*> [#uses=1]
-
-define internal arm_apcscc i32 @foo(i32 %i) nounwind {
-; ARM: foo:
-; THUMB: foo:
-; THUMB2: foo:
-entry:
- %0 = load i8** @nextaddr, align 4 ; <i8*> [#uses=2]
- %1 = icmp eq i8* %0, null ; <i1> [#uses=1]
-; indirect branch gets duplicated here
-; ARM: bx
-; THUMB: mov pc, r1
-; THUMB2: mov pc, r1
- br i1 %1, label %bb3, label %bb2
-
-bb2: ; preds = %entry, %bb3
- %gotovar.4.0 = phi i8* [ %gotovar.4.0.pre, %bb3 ], [ %0, %entry ] ; <i8*> [#uses=1]
-; ARM: bx
-; THUMB: mov pc, r1
-; THUMB2: mov pc, r1
- indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
-
-bb3: ; preds = %entry
- %2 = getelementptr inbounds [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
- %gotovar.4.0.pre = load i8** %2, align 4 ; <i8*> [#uses=1]
- br label %bb2
-
-L5: ; preds = %bb2
- br label %L4
-
-L4: ; preds = %L5, %bb2
- %res.0 = phi i32 [ 385, %L5 ], [ 35, %bb2 ] ; <i32> [#uses=1]
- br label %L3
-
-L3: ; preds = %L4, %bb2
- %res.1 = phi i32 [ %res.0, %L4 ], [ 5, %bb2 ] ; <i32> [#uses=1]
- br label %L2
-
-L2: ; preds = %L3, %bb2
- %res.2 = phi i32 [ %res.1, %L3 ], [ 1, %bb2 ] ; <i32> [#uses=1]
- %phitmp = mul i32 %res.2, 6 ; <i32> [#uses=1]
- br label %L1
-
-L1: ; preds = %L2, %bb2
- %res.3 = phi i32 [ %phitmp, %L2 ], [ 2, %bb2 ] ; <i32> [#uses=1]
-; ARM: ldr r1, LCPI
-; ARM: add r1, pc, r1
-; ARM: str r1
-; THUMB: ldr.n r2, LCPI
-; THUMB: add r2, pc
-; THUMB: str r2
-; THUMB2: ldr.n r2, LCPI
-; THUMB2-NEXT: str r2
- store i8* blockaddress(@foo, %L5), i8** @nextaddr, align 4
- ret i32 %res.3
-}
-; ARM: .long L_BA4__foo_L5-(LPC{{.*}}+8)
-; THUMB: .long L_BA4__foo_L5-(LPC{{.*}}+4)
-; THUMB2: .long L_BA4__foo_L5
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm-imm-arm.ll b/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm-imm-arm.ll
deleted file mode 100644
index 45dfcf0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm-imm-arm.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=arm
-
-; Test ARM-mode "I" constraint, for any Data Processing immediate.
-define i32 @testI(i32 %x) {
- %y = call i32 asm "add $0, $1, $2", "=r,r,I"( i32 %x, i32 65280 ) nounwind
- ret i32 %y
-}
-
-; Test ARM-mode "J" constraint, for compatibility with unknown use in GCC.
-define void @testJ() {
- tail call void asm sideeffect ".word $0", "J"( i32 4080 ) nounwind
- ret void
-}
-
-; Test ARM-mode "K" constraint, for bitwise inverted Data Processing immediates.
-define void @testK() {
- tail call void asm sideeffect ".word $0", "K"( i32 16777215 ) nounwind
- ret void
-}
-
-; Test ARM-mode "L" constraint, for negated Data Processing immediates.
-define void @testL() {
- tail call void asm sideeffect ".word $0", "L"( i32 -65280 ) nounwind
- ret void
-}
-
-; Test ARM-mode "M" constraint, for value between 0 and 32.
-define i32 @testM(i32 %x) {
- %y = call i32 asm "lsl $0, $1, $2", "=r,r,M"( i32 %x, i32 31 ) nounwind
- ret i32 %y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm.ll b/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm.ll
deleted file mode 100644
index d522348..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6
-
-define i32 @test1(i32 %tmp54) {
- %tmp56 = tail call i32 asm "uxtb16 $0,$1", "=r,r"( i32 %tmp54 ) ; <i32> [#uses=1]
- ret i32 %tmp56
-}
-
-define void @test2() {
- %tmp1 = call i64 asm "ldmia $1!, {$0, ${0:H}}", "=r,=*r,1"( i32** null, i32* null ) ; <i64> [#uses=2]
- %tmp2 = lshr i64 %tmp1, 32 ; <i64> [#uses=1]
- %tmp3 = trunc i64 %tmp2 to i32 ; <i32> [#uses=1]
- %tmp4 = call i32 asm "pkhbt $0, $1, $2, lsl #16", "=r,r,r"( i32 0, i32 %tmp3 ) ; <i32> [#uses=0]
- ret void
-}
-
-define void @test3() {
- tail call void asm sideeffect "/* number: ${0:c} */", "i"( i32 1 )
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm2.ll
deleted file mode 100644
index a99bccf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm2.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define double @__ieee754_sqrt(double %x) {
- %tmp2 = tail call double asm "fsqrtd ${0:P}, ${1:P}", "=w,w"( double %x )
- ret double %tmp2
-}
-
-define float @__ieee754_sqrtf(float %x) {
- %tmp2 = tail call float asm "fsqrts $0, $1", "=w,w"( float %x )
- ret float %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm3.ll
deleted file mode 100644
index f062772..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/inlineasm3.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-; Radar 7449043
-%struct.int32x4_t = type { <4 x i32> }
-
-define arm_apcscc void @t() nounwind {
-entry:
-; CHECK: vmov.I64 q15, #0
-; CHECK: vmov.32 d30[0], r0
-; CHECK: vmov q0, q15
- %tmp = alloca %struct.int32x4_t, align 16
- call void asm sideeffect "vmov.I64 q15, #0\0Avmov.32 d30[0], $1\0Avmov ${0:q}, q15\0A", "=*w,r,~{d31},~{d30}"(%struct.int32x4_t* %tmp, i32 8192) nounwind
- ret void
-}
-
-; Radar 7457110
-%struct.int32x2_t = type { <4 x i32> }
-
-define arm_apcscc void @t2() nounwind {
-entry:
-; CHECK: vmov d30, d0
-; CHECK: vmov.32 r0, d30[0]
- %asmtmp2 = tail call i32 asm sideeffect "vmov d30, $1\0Avmov.32 $0, d30[0]\0A", "=r,w,~{d30}"(<2 x i32> undef) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/insn-sched1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/insn-sched1.ll
deleted file mode 100644
index 59f0d53..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/insn-sched1.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6 |\
-; RUN: grep mov | count 3
-
-define i32 @test(i32 %x) {
- %tmp = trunc i32 %x to i16 ; <i16> [#uses=1]
- %tmp2 = tail call i32 @f( i32 1, i16 %tmp ) ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-declare i32 @f(i32, i16)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ispositive.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ispositive.ll
deleted file mode 100644
index 245ed51..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ispositive.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i32 @test1(i32 %X) {
-; CHECK: mov r0, r0, lsr #31
-entry:
- icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
- zext i1 %0 to i32 ; <i32>:1 [#uses=1]
- ret i32 %1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/large-stack.ll b/libclamav/c++/llvm/test/CodeGen/ARM/large-stack.ll
deleted file mode 100644
index ddf0f0e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/large-stack.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define void @test1() {
- %tmp = alloca [ 64 x i32 ] , align 4
- ret void
-}
-
-define void @test2() {
- %tmp = alloca [ 4168 x i8 ] , align 4
- ret void
-}
-
-define i32 @test3() {
- %retval = alloca i32, align 4
- %tmp = alloca i32, align 4
- %a = alloca [805306369 x i8], align 16
- store i32 0, i32* %tmp
- %tmp1 = load i32* %tmp
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ldm.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ldm.ll
deleted file mode 100644
index 1a016a0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ldm.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s
-
- at X = external global [0 x i32] ; <[0 x i32]*> [#uses=5]
-
-define i32 @t1() {
-; CHECK: t1:
-; CHECK: ldmia
- %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 0) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp4 = tail call i32 @f1( i32 %tmp, i32 %tmp3 ) ; <i32> [#uses=1]
- ret i32 %tmp4
-}
-
-define i32 @t2() {
-; CHECK: t2:
-; CHECK: ldmia
- %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
- %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 4) ; <i32> [#uses=1]
- %tmp6 = tail call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 ) ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @t3() {
-; CHECK: t3:
-; CHECK: ldmib
-; CHECK: ldmfd sp!
- %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
- %tmp6 = tail call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 ) ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-declare i32 @f1(i32, i32)
-
-declare i32 @f2(i32, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ldr.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ldr.ll
deleted file mode 100644
index 011e61c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ldr.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i32 @f1(i32* %v) {
-; CHECK: f1:
-; CHECK: ldr r0
-entry:
- %tmp = load i32* %v
- ret i32 %tmp
-}
-
-define i32 @f2(i32* %v) {
-; CHECK: f2:
-; CHECK: ldr r0
-entry:
- %tmp2 = getelementptr i32* %v, i32 1023
- %tmp = load i32* %tmp2
- ret i32 %tmp
-}
-
-define i32 @f3(i32* %v) {
-; CHECK: f3:
-; CHECK: mov
-; CHECK: ldr r0
-entry:
- %tmp2 = getelementptr i32* %v, i32 1024
- %tmp = load i32* %tmp2
- ret i32 %tmp
-}
-
-define i32 @f4(i32 %base) {
-; CHECK: f4:
-; CHECK-NOT: mvn
-; CHECK: ldr r0
-entry:
- %tmp1 = sub i32 %base, 128
- %tmp2 = inttoptr i32 %tmp1 to i32*
- %tmp3 = load i32* %tmp2
- ret i32 %tmp3
-}
-
-define i32 @f5(i32 %base, i32 %offset) {
-; CHECK: f5:
-; CHECK: ldr r0
-entry:
- %tmp1 = add i32 %base, %offset
- %tmp2 = inttoptr i32 %tmp1 to i32*
- %tmp3 = load i32* %tmp2
- ret i32 %tmp3
-}
-
-define i32 @f6(i32 %base, i32 %offset) {
-; CHECK: f6:
-; CHECK: ldr r0{{.*}}lsl{{.*}}
-entry:
- %tmp1 = shl i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i32*
- %tmp4 = load i32* %tmp3
- ret i32 %tmp4
-}
-
-define i32 @f7(i32 %base, i32 %offset) {
-; CHECK: f7:
-; CHECK: ldr r0{{.*}}lsr{{.*}}
-entry:
- %tmp1 = lshr i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i32*
- %tmp4 = load i32* %tmp3
- ret i32 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ldr_ext.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ldr_ext.ll
deleted file mode 100644
index d29eb02..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ldr_ext.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i32 @test1(i8* %t1) nounwind {
-; CHECK: ldrb
- %tmp.u = load i8* %t1
- %tmp1.s = zext i8 %tmp.u to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test2(i16* %t1) nounwind {
-; CHECK: ldrh
- %tmp.u = load i16* %t1
- %tmp1.s = zext i16 %tmp.u to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test3(i8* %t0) nounwind {
-; CHECK: ldrsb
- %tmp.s = load i8* %t0
- %tmp1.s = sext i8 %tmp.s to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test4(i16* %t0) nounwind {
-; CHECK: ldrsh
- %tmp.s = load i16* %t0
- %tmp1.s = sext i16 %tmp.s to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test5() nounwind {
-; CHECK: mov r0, #0
-; CHECK: ldrsh
- %tmp.s = load i16* null
- %tmp1.s = sext i16 %tmp.s to i32
- ret i32 %tmp1.s
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ldr_frame.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ldr_frame.ll
deleted file mode 100644
index a3abdb6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ldr_frame.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=arm | not grep mov
-
-define i32 @f1() {
- %buf = alloca [32 x i32], align 4
- %tmp = getelementptr [32 x i32]* %buf, i32 0, i32 0
- %tmp1 = load i32* %tmp
- ret i32 %tmp1
-}
-
-define i32 @f2() {
- %buf = alloca [32 x i8], align 4
- %tmp = getelementptr [32 x i8]* %buf, i32 0, i32 0
- %tmp1 = load i8* %tmp
- %tmp2 = zext i8 %tmp1 to i32
- ret i32 %tmp2
-}
-
-define i32 @f3() {
- %buf = alloca [32 x i32], align 4
- %tmp = getelementptr [32 x i32]* %buf, i32 0, i32 32
- %tmp1 = load i32* %tmp
- ret i32 %tmp1
-}
-
-define i32 @f4() {
- %buf = alloca [32 x i8], align 4
- %tmp = getelementptr [32 x i8]* %buf, i32 0, i32 2
- %tmp1 = load i8* %tmp
- %tmp2 = zext i8 %tmp1 to i32
- ret i32 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ldr_post.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ldr_post.ll
deleted file mode 100644
index 97a48e1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ldr_post.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=arm | \
-; RUN: grep {ldr.*\\\[.*\],} | count 1
-
-define i32 @test(i32 %a, i32 %b, i32 %c) {
- %tmp1 = mul i32 %a, %b ; <i32> [#uses=2]
- %tmp2 = inttoptr i32 %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2 ; <i32> [#uses=1]
- %tmp4 = sub i32 %tmp1, %c ; <i32> [#uses=1]
- %tmp5 = mul i32 %tmp4, %tmp3 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ldr_pre.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ldr_pre.ll
deleted file mode 100644
index 7c44284..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ldr_pre.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=arm | \
-; RUN: grep {ldr.*\\!} | count 2
-
-define i32* @test1(i32* %X, i32* %dest) {
- %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
- %A = load i32* %Y ; <i32> [#uses=1]
- store i32 %A, i32* %dest
- ret i32* %Y
-}
-
-define i32 @test2(i32 %a, i32 %b, i32 %c) {
- %tmp1 = sub i32 %a, %b ; <i32> [#uses=2]
- %tmp2 = inttoptr i32 %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2 ; <i32> [#uses=1]
- %tmp4 = sub i32 %tmp1, %c ; <i32> [#uses=1]
- %tmp5 = add i32 %tmp4, %tmp3 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ldrd.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ldrd.ll
deleted file mode 100644
index c366e2d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ldrd.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin | FileCheck %s -check-prefix=V6
-; RUN: llc < %s -mtriple=armv5-apple-darwin | FileCheck %s -check-prefix=V5
-; RUN: llc < %s -mtriple=armv6-eabi | FileCheck %s -check-prefix=EABI
-; rdar://r6949835
-
- at b = external global i64*
-
-define i64 @t(i64 %a) nounwind readonly {
-entry:
-;V6: ldrd r2, [r2]
-
-;V5: ldr r3, [r2]
-;V5: ldr r2, [r2, #+4]
-
-;EABI: ldr r3, [r2]
-;EABI: ldr r2, [r2, #+4]
-
- %0 = load i64** @b, align 4
- %1 = load i64* %0, align 4
- %2 = mul i64 %1, %a
- ret i64 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/load.ll b/libclamav/c++/llvm/test/CodeGen/ARM/load.ll
deleted file mode 100644
index 253b0e1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/load.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=arm > %t
-; RUN: grep ldrsb %t
-; RUN: grep ldrb %t
-; RUN: grep ldrsh %t
-; RUN: grep ldrh %t
-
-
-define i32 @f1(i8* %p) {
-entry:
- %tmp = load i8* %p ; <i8> [#uses=1]
- %tmp1 = sext i8 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @f2(i8* %p) {
-entry:
- %tmp = load i8* %p ; <i8> [#uses=1]
- %tmp2 = zext i8 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @f3(i16* %p) {
-entry:
- %tmp = load i16* %p ; <i16> [#uses=1]
- %tmp3 = sext i16 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp3
-}
-
-define i32 @f4(i16* %p) {
-entry:
- %tmp = load i16* %p ; <i16> [#uses=1]
- %tmp4 = zext i16 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/long-setcc.ll b/libclamav/c++/llvm/test/CodeGen/ARM/long-setcc.ll
deleted file mode 100644
index c76a5e4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/long-setcc.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=arm | grep cmp | count 1
-
-
-define i1 @t1(i64 %x) {
- %B = icmp slt i64 %x, 0
- ret i1 %B
-}
-
-define i1 @t2(i64 %x) {
- %tmp = icmp ult i64 %x, 4294967296
- ret i1 %tmp
-}
-
-define i1 @t3(i32 %x) {
- %tmp = icmp ugt i32 %x, -1
- ret i1 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/long.ll b/libclamav/c++/llvm/test/CodeGen/ARM/long.ll
deleted file mode 100644
index 16ef7cc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/long.ll
+++ /dev/null
@@ -1,90 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i64 @f1() {
-; CHECK: f1:
-entry:
- ret i64 0
-}
-
-define i64 @f2() {
-; CHECK: f2:
-entry:
- ret i64 1
-}
-
-define i64 @f3() {
-; CHECK: f3:
-; CHECK: mvn{{.*}}-2147483648
-entry:
- ret i64 2147483647
-}
-
-define i64 @f4() {
-; CHECK: f4:
-; CHECK: -2147483648
-entry:
- ret i64 2147483648
-}
-
-define i64 @f5() {
-; CHECK: f5:
-; CHECK: mvn
-; CHECK: mvn{{.*}}-2147483648
-entry:
- ret i64 9223372036854775807
-}
-
-define i64 @f6(i64 %x, i64 %y) {
-; CHECK: f6:
-; CHECK: adds
-; CHECK: adc
-entry:
- %tmp1 = add i64 %y, 1 ; <i64> [#uses=1]
- ret i64 %tmp1
-}
-
-define void @f7() {
-; CHECK: f7:
-entry:
- %tmp = call i64 @f8( ) ; <i64> [#uses=0]
- ret void
-}
-
-declare i64 @f8()
-
-define i64 @f9(i64 %a, i64 %b) {
-; CHECK: f9:
-; CHECK: subs r
-; CHECK: sbc
-entry:
- %tmp = sub i64 %a, %b ; <i64> [#uses=1]
- ret i64 %tmp
-}
-
-define i64 @f(i32 %a, i32 %b) {
-; CHECK: f:
-; CHECK: smull
-entry:
- %tmp = sext i32 %a to i64 ; <i64> [#uses=1]
- %tmp1 = sext i32 %b to i64 ; <i64> [#uses=1]
- %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
- ret i64 %tmp2
-}
-
-define i64 @g(i32 %a, i32 %b) {
-; CHECK: g:
-; CHECK: umull
-entry:
- %tmp = zext i32 %a to i64 ; <i64> [#uses=1]
- %tmp1 = zext i32 %b to i64 ; <i64> [#uses=1]
- %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
- ret i64 %tmp2
-}
-
-define i64 @f10() {
-; CHECK: f10:
-entry:
- %a = alloca i64, align 8 ; <i64*> [#uses=1]
- %retval = load i64* %a ; <i64> [#uses=1]
- ret i64 %retval
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/long_shift.ll b/libclamav/c++/llvm/test/CodeGen/ARM/long_shift.ll
deleted file mode 100644
index 76332cc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/long_shift.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i64 @f0(i64 %A, i64 %B) {
-; CHECK: f0
-; CHECK: movs r3, r3, lsr #1
-; CHECK-NEXT: mov r2, r2, rrx
-; CHECK-NEXT: subs r0, r0, r2
-; CHECK-NEXT: sbc r1, r1, r3
- %tmp = bitcast i64 %A to i64
- %tmp2 = lshr i64 %B, 1
- %tmp3 = sub i64 %tmp, %tmp2
- ret i64 %tmp3
-}
-
-define i32 @f1(i64 %x, i64 %y) {
-; CHECK: f1
-; CHECK: mov r0, r0, lsl r2
- %a = shl i64 %x, %y
- %b = trunc i64 %a to i32
- ret i32 %b
-}
-
-define i32 @f2(i64 %x, i64 %y) {
-; CHECK: f2
-; CHECK: mov r0, r0, lsr r2
-; CHECK-NEXT: rsb r12, r2, #32
-; CHECK-NEXT: sub r2, r2, #32
-; CHECK-NEXT: cmp r2, #0
-; CHECK-NEXT: orr r0, r0, r1, lsl r12
-; CHECK-NEXT: movge r0, r1, asr r2
- %a = ashr i64 %x, %y
- %b = trunc i64 %a to i32
- ret i32 %b
-}
-
-define i32 @f3(i64 %x, i64 %y) {
-; CHECK: f3
-; CHECK: mov r0, r0, lsr r2
-; CHECK-NEXT: rsb r12, r2, #32
-; CHECK-NEXT: sub r2, r2, #32
-; CHECK-NEXT: cmp r2, #0
-; CHECK-NEXT: orr r0, r0, r1, lsl r12
-; CHECK-NEXT: movge r0, r1, lsr r2
- %a = lshr i64 %x, %y
- %b = trunc i64 %a to i32
- ret i32 %b
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/lsr-code-insertion.ll b/libclamav/c++/llvm/test/CodeGen/ARM/lsr-code-insertion.ll
deleted file mode 100644
index 1bbb96d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/lsr-code-insertion.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-; RUN: llc < %s -stats |& grep {39.*Number of machine instrs printed}
-; RUN: llc < %s -stats |& not grep {.*Number of re-materialization}
-; This test really wants to check that the resultant "cond_true" block only
-; has a single store in it, and that cond_true55 only has code to materialize
-; the constant and do a store. We do *not* want something like this:
-;
-;LBB1_3: @cond_true
-; add r8, r0, r6
-; str r10, [r8, #+4]
-;
-target triple = "arm-apple-darwin8"
-
-define void @foo(i32* %mc, i32* %mpp, i32* %ip, i32* %dpp, i32* %tpmm, i32 %M, i32* %tpim, i32* %tpdm, i32* %bp, i32* %ms, i32 %xmb) {
-entry:
- %tmp6584 = icmp slt i32 %M, 1 ; <i1> [#uses=1]
- br i1 %tmp6584, label %return, label %bb
-
-bb: ; preds = %cond_next59, %entry
- %indvar = phi i32 [ 0, %entry ], [ %k.069.0, %cond_next59 ] ; <i32> [#uses=6]
- %k.069.0 = add i32 %indvar, 1 ; <i32> [#uses=3]
- %tmp3 = getelementptr i32* %mpp, i32 %indvar ; <i32*> [#uses=1]
- %tmp4 = load i32* %tmp3 ; <i32> [#uses=1]
- %tmp8 = getelementptr i32* %tpmm, i32 %indvar ; <i32*> [#uses=1]
- %tmp9 = load i32* %tmp8 ; <i32> [#uses=1]
- %tmp10 = add i32 %tmp9, %tmp4 ; <i32> [#uses=2]
- %tmp13 = getelementptr i32* %mc, i32 %k.069.0 ; <i32*> [#uses=5]
- store i32 %tmp10, i32* %tmp13
- %tmp17 = getelementptr i32* %ip, i32 %indvar ; <i32*> [#uses=1]
- %tmp18 = load i32* %tmp17 ; <i32> [#uses=1]
- %tmp22 = getelementptr i32* %tpim, i32 %indvar ; <i32*> [#uses=1]
- %tmp23 = load i32* %tmp22 ; <i32> [#uses=1]
- %tmp24 = add i32 %tmp23, %tmp18 ; <i32> [#uses=2]
- %tmp30 = icmp sgt i32 %tmp24, %tmp10 ; <i1> [#uses=1]
- br i1 %tmp30, label %cond_true, label %cond_next
-
-cond_true: ; preds = %bb
- store i32 %tmp24, i32* %tmp13
- br label %cond_next
-
-cond_next: ; preds = %cond_true, %bb
- %tmp39 = load i32* %tmp13 ; <i32> [#uses=1]
- %tmp42 = getelementptr i32* %ms, i32 %k.069.0 ; <i32*> [#uses=1]
- %tmp43 = load i32* %tmp42 ; <i32> [#uses=1]
- %tmp44 = add i32 %tmp43, %tmp39 ; <i32> [#uses=2]
- store i32 %tmp44, i32* %tmp13
- %tmp52 = icmp slt i32 %tmp44, -987654321 ; <i1> [#uses=1]
- br i1 %tmp52, label %cond_true55, label %cond_next59
-
-cond_true55: ; preds = %cond_next
- store i32 -987654321, i32* %tmp13
- br label %cond_next59
-
-cond_next59: ; preds = %cond_true55, %cond_next
- %tmp61 = add i32 %indvar, 2 ; <i32> [#uses=1]
- %tmp65 = icmp sgt i32 %tmp61, %M ; <i1> [#uses=1]
- br i1 %tmp65, label %return, label %bb
-
-return: ; preds = %cond_next59, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/lsr-scale-addr-mode.ll b/libclamav/c++/llvm/test/CodeGen/ARM/lsr-scale-addr-mode.ll
deleted file mode 100644
index 8130019..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/lsr-scale-addr-mode.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=arm | grep lsl | grep -F {lsl #2\]}
-; Should use scaled addressing mode.
-
-define void @sintzero(i32* %a) nounwind {
-entry:
- store i32 0, i32* %a
- br label %cond_next
-
-cond_next: ; preds = %cond_next, %entry
- %indvar = phi i32 [ 0, %entry ], [ %tmp25, %cond_next ] ; <i32> [#uses=1]
- %tmp25 = add i32 %indvar, 1 ; <i32> [#uses=3]
- %tmp36 = getelementptr i32* %a, i32 %tmp25 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp36
- icmp eq i32 %tmp25, -1 ; <i1>:0 [#uses=1]
- br i1 %0, label %return, label %cond_next
-
-return: ; preds = %cond_next
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/mem.ll b/libclamav/c++/llvm/test/CodeGen/ARM/mem.ll
deleted file mode 100644
index f46c7a5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/mem.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=arm | grep strb
-; RUN: llc < %s -march=arm | grep strh
-
-define void @f1() {
-entry:
- store i8 0, i8* null
- ret void
-}
-
-define void @f2() {
-entry:
- store i16 0, i16* null
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/memcpy-inline.ll b/libclamav/c++/llvm/test/CodeGen/ARM/memcpy-inline.ll
deleted file mode 100644
index ed20c32..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/memcpy-inline.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin | grep ldmia
-; RUN: llc < %s -mtriple=arm-apple-darwin | grep stmia
-; RUN: llc < %s -mtriple=arm-apple-darwin | grep ldrb
-; RUN: llc < %s -mtriple=arm-apple-darwin | grep ldrh
-
- %struct.x = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
- at src = external global %struct.x
- at dst = external global %struct.x
-
-define i32 @t() {
-entry:
- call void @llvm.memcpy.i32( i8* getelementptr (%struct.x* @dst, i32 0, i32 0), i8* getelementptr (%struct.x* @src, i32 0, i32 0), i32 11, i32 8 )
- ret i32 0
-}
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/memfunc.ll b/libclamav/c++/llvm/test/CodeGen/ARM/memfunc.ll
deleted file mode 100644
index 41d5944..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/memfunc.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define void @f() {
-entry:
- call void @llvm.memmove.i32( i8* null, i8* null, i32 64, i32 0 )
- call void @llvm.memcpy.i32( i8* null, i8* null, i32 64, i32 0 )
- call void @llvm.memset.i32( i8* null, i8 64, i32 0, i32 0 )
- unreachable
-}
-
-declare void @llvm.memmove.i32(i8*, i8*, i32, i32)
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-declare void @llvm.memset.i32(i8*, i8, i32, i32)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/mls.ll b/libclamav/c++/llvm/test/CodeGen/ARM/mls.ll
deleted file mode 100644
index a6cdba4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/mls.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b, i32 %c) {
- %tmp1 = mul i32 %a, %b
- %tmp2 = sub i32 %c, %tmp1
- ret i32 %tmp2
-}
-
-; sub doesn't commute, so no mls for this one
-define i32 @f2(i32 %a, i32 %b, i32 %c) {
- %tmp1 = mul i32 %a, %b
- %tmp2 = sub i32 %tmp1, %c
- ret i32 %tmp2
-}
-
-; CHECK: mls r0, r0, r1, r2
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/movt-movw-global.ll b/libclamav/c++/llvm/test/CodeGen/ARM/movt-movw-global.ll
deleted file mode 100644
index 886ff3f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/movt-movw-global.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "armv7-eabi"
-
- at foo = common global i32 0 ; <i32*> [#uses=1]
-
-define arm_aapcs_vfpcc i32* @bar1() nounwind readnone {
-entry:
-; CHECK: movw r0, :lower16:foo
-; CHECK-NEXT: movt r0, :upper16:foo
- ret i32* @foo
-}
-
-define arm_aapcs_vfpcc void @bar2(i32 %baz) nounwind {
-entry:
-; CHECK: movw r1, :lower16:foo
-; CHECK-NEXT: movt r1, :upper16:foo
- store i32 %baz, i32* @foo, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/movt.ll b/libclamav/c++/llvm/test/CodeGen/ARM/movt.ll
deleted file mode 100644
index e82aca0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/movt.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+thumb2 | FileCheck %s
-; rdar://7317664
-
-define i32 @t(i32 %X) nounwind {
-; CHECK: t:
-; CHECK: movt r0, #65535
-entry:
- %0 = or i32 %X, -65536
- ret i32 %0
-}
-
-define i32 @t2(i32 %X) nounwind {
-; CHECK: t2:
-; CHECK: movt r0, #65534
-entry:
- %0 = or i32 %X, -131072
- %1 = and i32 %0, -65537
- ret i32 %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/mul.ll b/libclamav/c++/llvm/test/CodeGen/ARM/mul.ll
deleted file mode 100644
index 466a802..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/mul.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=arm | grep mul | count 2
-; RUN: llc < %s -march=arm | grep lsl | count 2
-
-define i32 @f1(i32 %u) {
- %tmp = mul i32 %u, %u
- ret i32 %tmp
-}
-
-define i32 @f2(i32 %u, i32 %v) {
- %tmp = mul i32 %u, %v
- ret i32 %tmp
-}
-
-define i32 @f3(i32 %u) {
- %tmp = mul i32 %u, 5
- ret i32 %tmp
-}
-
-define i32 @f4(i32 %u) {
- %tmp = mul i32 %u, 4
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/mul_const.ll b/libclamav/c++/llvm/test/CodeGen/ARM/mul_const.ll
deleted file mode 100644
index 93188cd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/mul_const.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i32 @t1(i32 %v) nounwind readnone {
-entry:
-; CHECK: t1:
-; CHECK: add r0, r0, r0, lsl #3
- %0 = mul i32 %v, 9
- ret i32 %0
-}
-
-define i32 @t2(i32 %v) nounwind readnone {
-entry:
-; CHECK: t2:
-; CHECK: rsb r0, r0, r0, lsl #3
- %0 = mul i32 %v, 7
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/mulhi.ll b/libclamav/c++/llvm/test/CodeGen/ARM/mulhi.ll
deleted file mode 100644
index 148f291..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/mulhi.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep smmul | count 1
-; RUN: llc < %s -march=arm | grep umull | count 1
-
-define i32 @smulhi(i32 %x, i32 %y) {
- %tmp = sext i32 %x to i64 ; <i64> [#uses=1]
- %tmp1 = sext i32 %y to i64 ; <i64> [#uses=1]
- %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
- %tmp3 = lshr i64 %tmp2, 32 ; <i64> [#uses=1]
- %tmp3.upgrd.1 = trunc i64 %tmp3 to i32 ; <i32> [#uses=1]
- ret i32 %tmp3.upgrd.1
-}
-
-define i32 @umulhi(i32 %x, i32 %y) {
- %tmp = zext i32 %x to i64 ; <i64> [#uses=1]
- %tmp1 = zext i32 %y to i64 ; <i64> [#uses=1]
- %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
- %tmp3 = lshr i64 %tmp2, 32 ; <i64> [#uses=1]
- %tmp3.upgrd.2 = trunc i64 %tmp3 to i32 ; <i32> [#uses=1]
- ret i32 %tmp3.upgrd.2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/mvn.ll b/libclamav/c++/llvm/test/CodeGen/ARM/mvn.ll
deleted file mode 100644
index 571c21a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/mvn.ll
+++ /dev/null
@@ -1,74 +0,0 @@
-; RUN: llc < %s -march=arm | grep mvn | count 8
-
-define i32 @f1() {
-entry:
- ret i32 -1
-}
-
-define i32 @f2(i32 %a) {
-entry:
- %tmpnot = xor i32 %a, -1 ; <i32> [#uses=1]
- ret i32 %tmpnot
-}
-
-define i32 @f3(i32 %a) {
-entry:
- %tmp1 = shl i32 %a, 2 ; <i32> [#uses=1]
- %tmp1not = xor i32 %tmp1, -1 ; <i32> [#uses=1]
- ret i32 %tmp1not
-}
-
-define i32 @f4(i32 %a, i8 %b) {
-entry:
- %shift.upgrd.1 = zext i8 %b to i32 ; <i32> [#uses=1]
- %tmp3 = shl i32 %a, %shift.upgrd.1 ; <i32> [#uses=1]
- %tmp3not = xor i32 %tmp3, -1 ; <i32> [#uses=1]
- ret i32 %tmp3not
-}
-
-define i32 @f5(i32 %a) {
-entry:
- %tmp1 = lshr i32 %a, 2 ; <i32> [#uses=1]
- %tmp1not = xor i32 %tmp1, -1 ; <i32> [#uses=1]
- ret i32 %tmp1not
-}
-
-define i32 @f6(i32 %a, i8 %b) {
-entry:
- %shift.upgrd.2 = zext i8 %b to i32 ; <i32> [#uses=1]
- %tmp2 = lshr i32 %a, %shift.upgrd.2 ; <i32> [#uses=1]
- %tmp2not = xor i32 %tmp2, -1 ; <i32> [#uses=1]
- ret i32 %tmp2not
-}
-
-define i32 @f7(i32 %a) {
-entry:
- %tmp1 = ashr i32 %a, 2 ; <i32> [#uses=1]
- %tmp1not = xor i32 %tmp1, -1 ; <i32> [#uses=1]
- ret i32 %tmp1not
-}
-
-define i32 @f8(i32 %a, i8 %b) {
-entry:
- %shift.upgrd.3 = zext i8 %b to i32 ; <i32> [#uses=1]
- %tmp3 = ashr i32 %a, %shift.upgrd.3 ; <i32> [#uses=1]
- %tmp3not = xor i32 %tmp3, -1 ; <i32> [#uses=1]
- ret i32 %tmp3not
-}
-
-define i32 @f9() {
-entry:
- %tmp4845 = add i32 0, 0 ; <i32> [#uses=1]
- br label %cond_true4848
-
-cond_true4848: ; preds = %entry
- %tmp4851 = sub i32 -3, 0 ; <i32> [#uses=1]
- %abc = add i32 %tmp4851, %tmp4845 ; <i32> [#uses=1]
- ret i32 %abc
-}
-
-define i1 @f10(i32 %a) {
-entry:
- %tmp102 = icmp eq i32 -2, %a ; <i1> [#uses=1]
- ret i1 %tmp102
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/neon_arith1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/neon_arith1.ll
deleted file mode 100644
index 5892737..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/neon_arith1.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | grep vadd
-
-define <8 x i8> @t_i8x8(<8 x i8> %a, <8 x i8> %b) nounwind {
-entry:
- %0 = add <8 x i8> %a, %b
- ret <8 x i8> %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/neon_ld1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/neon_ld1.ll
deleted file mode 100644
index c78872a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/neon_ld1.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | grep vldr.64 | count 4
-; RUN: llc < %s -march=arm -mattr=+neon | grep vstr.64
-; RUN: llc < %s -march=arm -mattr=+neon | grep vmov
-
-define void @t1(<2 x i32>* %r, <4 x i16>* %a, <4 x i16>* %b) nounwind {
-entry:
- %0 = load <4 x i16>* %a, align 8 ; <<4 x i16>> [#uses=1]
- %1 = load <4 x i16>* %b, align 8 ; <<4 x i16>> [#uses=1]
- %2 = add <4 x i16> %0, %1 ; <<4 x i16>> [#uses=1]
- %3 = bitcast <4 x i16> %2 to <2 x i32> ; <<2 x i32>> [#uses=1]
- store <2 x i32> %3, <2 x i32>* %r, align 8
- ret void
-}
-
-define <2 x i32> @t2(<4 x i16>* %a, <4 x i16>* %b) nounwind readonly {
-entry:
- %0 = load <4 x i16>* %a, align 8 ; <<4 x i16>> [#uses=1]
- %1 = load <4 x i16>* %b, align 8 ; <<4 x i16>> [#uses=1]
- %2 = sub <4 x i16> %0, %1 ; <<4 x i16>> [#uses=1]
- %3 = bitcast <4 x i16> %2 to <2 x i32> ; <<2 x i32>> [#uses=1]
- ret <2 x i32> %3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/neon_ld2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/neon_ld2.ll
deleted file mode 100644
index 130277b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/neon_ld2.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | grep vldmia | count 4
-; RUN: llc < %s -march=arm -mattr=+neon | grep vstmia | count 1
-; RUN: llc < %s -march=arm -mattr=+neon | grep vmov | count 2
-
-define void @t1(<4 x i32>* %r, <2 x i64>* %a, <2 x i64>* %b) nounwind {
-entry:
- %0 = load <2 x i64>* %a, align 16 ; <<2 x i64>> [#uses=1]
- %1 = load <2 x i64>* %b, align 16 ; <<2 x i64>> [#uses=1]
- %2 = add <2 x i64> %0, %1 ; <<2 x i64>> [#uses=1]
- %3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
- store <4 x i32> %3, <4 x i32>* %r, align 16
- ret void
-}
-
-define <4 x i32> @t2(<2 x i64>* %a, <2 x i64>* %b) nounwind readonly {
-entry:
- %0 = load <2 x i64>* %a, align 16 ; <<2 x i64>> [#uses=1]
- %1 = load <2 x i64>* %b, align 16 ; <<2 x i64>> [#uses=1]
- %2 = sub <2 x i64> %0, %1 ; <<2 x i64>> [#uses=1]
- %3 = bitcast <2 x i64> %2 to <4 x i32> ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/neon_minmax.ll b/libclamav/c++/llvm/test/CodeGen/ARM/neon_minmax.ll
deleted file mode 100644
index d301c6a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/neon_minmax.ll
+++ /dev/null
@@ -1,81 +0,0 @@
-; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
-
-define float @fmin_ole(float %x) nounwind {
-;CHECK: fmin_ole:
-;CHECK: vmin.f32
- %cond = fcmp ole float 1.0, %x
- %min1 = select i1 %cond, float 1.0, float %x
- ret float %min1
-}
-
-define float @fmin_ole_zero(float %x) nounwind {
-;CHECK: fmin_ole_zero:
-;CHECK-NOT: vmin.f32
- %cond = fcmp ole float 0.0, %x
- %min1 = select i1 %cond, float 0.0, float %x
- ret float %min1
-}
-
-define float @fmin_ult(float %x) nounwind {
-;CHECK: fmin_ult:
-;CHECK: vmin.f32
- %cond = fcmp ult float %x, 1.0
- %min1 = select i1 %cond, float %x, float 1.0
- ret float %min1
-}
-
-define float @fmax_ogt(float %x) nounwind {
-;CHECK: fmax_ogt:
-;CHECK: vmax.f32
- %cond = fcmp ogt float 1.0, %x
- %max1 = select i1 %cond, float 1.0, float %x
- ret float %max1
-}
-
-define float @fmax_uge(float %x) nounwind {
-;CHECK: fmax_uge:
-;CHECK: vmax.f32
- %cond = fcmp uge float %x, 1.0
- %max1 = select i1 %cond, float %x, float 1.0
- ret float %max1
-}
-
-define float @fmax_uge_zero(float %x) nounwind {
-;CHECK: fmax_uge_zero:
-;CHECK-NOT: vmax.f32
- %cond = fcmp uge float %x, 0.0
- %max1 = select i1 %cond, float %x, float 0.0
- ret float %max1
-}
-
-define float @fmax_olt_reverse(float %x) nounwind {
-;CHECK: fmax_olt_reverse:
-;CHECK: vmax.f32
- %cond = fcmp olt float %x, 1.0
- %max1 = select i1 %cond, float 1.0, float %x
- ret float %max1
-}
-
-define float @fmax_ule_reverse(float %x) nounwind {
-;CHECK: fmax_ule_reverse:
-;CHECK: vmax.f32
- %cond = fcmp ult float 1.0, %x
- %max1 = select i1 %cond, float %x, float 1.0
- ret float %max1
-}
-
-define float @fmin_oge_reverse(float %x) nounwind {
-;CHECK: fmin_oge_reverse:
-;CHECK: vmin.f32
- %cond = fcmp oge float %x, 1.0
- %min1 = select i1 %cond, float 1.0, float %x
- ret float %min1
-}
-
-define float @fmin_ugt_reverse(float %x) nounwind {
-;CHECK: fmin_ugt_reverse:
-;CHECK: vmin.f32
- %cond = fcmp ugt float 1.0, %x
- %min1 = select i1 %cond, float %x, float 1.0
- ret float %min1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/pack.ll b/libclamav/c++/llvm/test/CodeGen/ARM/pack.ll
deleted file mode 100644
index 1e2e7aa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/pack.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep pkhbt | count 5
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep pkhtb | count 4
-
-define i32 @test1(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp4 = shl i32 %Y, 16 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test1a(i32 %X, i32 %Y) {
- %tmp19 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp37 = shl i32 %Y, 16 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp37, %tmp19 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test2(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp3 = shl i32 %Y, 12 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp3, -65536 ; <i32> [#uses=1]
- %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp57
-}
-
-define i32 @test3(i32 %X, i32 %Y) {
- %tmp19 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp37 = shl i32 %Y, 18 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp37, %tmp19 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test4(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp3 = and i32 %Y, -65536 ; <i32> [#uses=1]
- %tmp46 = or i32 %tmp3, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp46
-}
-
-define i32 @test5(i32 %X, i32 %Y) {
- %tmp17 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp2 = bitcast i32 %Y to i32 ; <i32> [#uses=1]
- %tmp4 = lshr i32 %tmp2, 16 ; <i32> [#uses=2]
- %tmp5 = or i32 %tmp4, %tmp17 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test5a(i32 %X, i32 %Y) {
- %tmp110 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp37 = lshr i32 %Y, 16 ; <i32> [#uses=1]
- %tmp39 = bitcast i32 %tmp37 to i32 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp39, %tmp110 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test6(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp37 = lshr i32 %Y, 12 ; <i32> [#uses=1]
- %tmp38 = bitcast i32 %tmp37 to i32 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp38, 65535 ; <i32> [#uses=1]
- %tmp59 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp59
-}
-
-define i32 @test7(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp3 = ashr i32 %Y, 18 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp3, 65535 ; <i32> [#uses=1]
- %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp57
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/pr3502.ll b/libclamav/c++/llvm/test/CodeGen/ARM/pr3502.ll
deleted file mode 100644
index 606d969..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/pr3502.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -mtriple=arm-none-linux-gnueabi
-;pr3502
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
- %struct.ArmPTD = type { i32 }
- %struct.RegisterSave = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.SHARED_AREA = type { i32, %struct.SHARED_AREA*, %struct.SHARED_AREA*, %struct.SHARED_AREA*, %struct.ArmPTD, void (%struct.RegisterSave*)*, void (%struct.RegisterSave*)*, i32, [1024 x i8], i32, i32, i32, i32, i32, i8, i8, i16, i32, i32, i32, i32, [16 x i8], i32, i32, i32, i8, i8, i8, i32, i16, i32, i64, i32, i32, i32, i32, i32, i32, i8*, i32, [256 x i8], i32, i32, i32, [20 x i8], %struct.RegisterSave, { %struct.WorldSwitchV5 }, [4 x i32] }
- %struct.WorldSwitchV5 = type { i32, i32, i32, i32, i32, i32, i32 }
-
-define void @SomeCall(i32 %num) nounwind {
-entry:
- tail call void asm sideeffect "mcr p15, 0, $0, c7, c10, 4 \0A\09", "r,~{memory}"(i32 0) nounwind
- tail call void asm sideeffect "mcr p15,0,$0,c7,c14,0", "r,~{memory}"(i32 0) nounwind
- %0 = load %struct.SHARED_AREA** null, align 4 ; <%struct.SHARED_AREA*> [#uses=1]
- %1 = ptrtoint %struct.SHARED_AREA* %0 to i32 ; <i32> [#uses=1]
- %2 = lshr i32 %1, 20 ; <i32> [#uses=1]
- %3 = tail call i32 @SetCurrEntry(i32 %2, i32 0) nounwind ; <i32> [#uses=0]
- tail call void @ClearStuff(i32 0) nounwind
- ret void
-}
-
-declare i32 @SetCurrEntry(i32, i32)
-
-declare void @ClearStuff(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/private.ll b/libclamav/c++/llvm/test/CodeGen/ARM/private.ll
deleted file mode 100644
index d60ee3b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/private.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; Test to make sure that the 'private' is used correctly.
-;
-; RUN: llc < %s -mtriple=arm-linux-gnueabi > %t
-; RUN: grep .Lfoo: %t
-; RUN: grep -E bl.*\.Lfoo %t
-; RUN: grep .Lbaz: %t
-; RUN: grep long.*\.Lbaz %t
-
-declare void @foo()
-
-define private void @foo() {
- ret void
-}
-
- at baz = private global i32 4
-
-define i32 @bar() {
- call void @foo()
- %1 = load i32* @baz, align 4
- ret i32 %1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/remat.ll b/libclamav/c++/llvm/test/CodeGen/ARM/remat.ll
deleted file mode 100644
index 92c1cf1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/remat.ll
+++ /dev/null
@@ -1,65 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 -stats -info-output-file - | grep "Number of re-materialization"
-
-define arm_apcscc i32 @main(i32 %argc, i8** nocapture %argv, double %d1, double %d2) nounwind {
-entry:
- br i1 undef, label %smvp.exit, label %bb.i3
-
-bb.i3: ; preds = %bb.i3, %bb134
- br i1 undef, label %smvp.exit, label %bb.i3
-
-smvp.exit: ; preds = %bb.i3
- %0 = fmul double %d1, 2.400000e-03 ; <double> [#uses=2]
- br i1 undef, label %bb138.preheader, label %bb159
-
-bb138.preheader: ; preds = %smvp.exit
- br label %bb138
-
-bb138: ; preds = %bb138, %bb138.preheader
- br i1 undef, label %bb138, label %bb145.loopexit
-
-bb142: ; preds = %bb.nph218.bb.nph218.split_crit_edge, %phi0.exit
- %1 = fmul double %d1, -1.200000e-03 ; <double> [#uses=1]
- %2 = fadd double %d2, %1 ; <double> [#uses=1]
- %3 = fmul double %2, %d2 ; <double> [#uses=1]
- %4 = fsub double 0.000000e+00, %3 ; <double> [#uses=1]
- br i1 %14, label %phi1.exit, label %bb.i35
-
-bb.i35: ; preds = %bb142
- %5 = call arm_apcscc double @sin(double %15) nounwind readonly ; <double> [#uses=1]
- %6 = fmul double %5, 0x4031740AFA84AD8A ; <double> [#uses=1]
- %7 = fsub double 1.000000e+00, undef ; <double> [#uses=1]
- %8 = fdiv double %7, 6.000000e-01 ; <double> [#uses=1]
- br label %phi1.exit
-
-phi1.exit: ; preds = %bb.i35, %bb142
- %.pn = phi double [ %6, %bb.i35 ], [ 0.000000e+00, %bb142 ] ; <double> [#uses=1]
- %9 = phi double [ %8, %bb.i35 ], [ 0.000000e+00, %bb142 ] ; <double> [#uses=1]
- %10 = fmul double %.pn, %9 ; <double> [#uses=1]
- br i1 %14, label %phi0.exit, label %bb.i
-
-bb.i: ; preds = %phi1.exit
- unreachable
-
-phi0.exit: ; preds = %phi1.exit
- %11 = fsub double %4, %10 ; <double> [#uses=1]
- %12 = fadd double 0.000000e+00, %11 ; <double> [#uses=1]
- store double %12, double* undef, align 4
- br label %bb142
-
-bb145.loopexit: ; preds = %bb138
- br i1 undef, label %bb.nph218.bb.nph218.split_crit_edge, label %bb159
-
-bb.nph218.bb.nph218.split_crit_edge: ; preds = %bb145.loopexit
- %13 = fmul double %0, 0x401921FB54442D18 ; <double> [#uses=1]
- %14 = fcmp ugt double %0, 6.000000e-01 ; <i1> [#uses=2]
- %15 = fdiv double %13, 6.000000e-01 ; <double> [#uses=1]
- br label %bb142
-
-bb159: ; preds = %bb145.loopexit, %smvp.exit, %bb134
- unreachable
-
-bb166: ; preds = %bb127
- unreachable
-}
-
-declare arm_apcscc double @sin(double) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret0.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret0.ll
deleted file mode 100644
index 5c312eb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret0.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define i32 @test() {
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg1.ll
deleted file mode 100644
index 1ab947b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg1.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define i32 @test(i32 %a1) {
- ret i32 %a1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg2.ll
deleted file mode 100644
index 84477d0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg2.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define i32 @test(i32 %a1, i32 %a2) {
- ret i32 %a2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg3.ll
deleted file mode 100644
index f7f9057..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg3.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=arm
-define i32 @test(i32 %a1, i32 %a2, i32 %a3) {
- ret i32 %a3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg4.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg4.ll
deleted file mode 100644
index f7b3e4a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg4.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define i32 @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
- ret i32 %a4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg5.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg5.ll
deleted file mode 100644
index c4f9fb5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_arg5.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define i32 @test(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5) {
- ret i32 %a5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f32_arg2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_f32_arg2.ll
deleted file mode 100644
index 2bafea6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f32_arg2.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define float @test_f32(float %a1, float %a2) {
- ret float %a2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f32_arg5.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_f32_arg5.ll
deleted file mode 100644
index c6ce60e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f32_arg5.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define float @test_f32_arg5(float %a1, float %a2, float %a3, float %a4, float %a5) {
- ret float %a5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg2.ll
deleted file mode 100644
index 386e85f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg2.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define double @test_f64(double %a1, double %a2) {
- ret double %a2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg_reg_split.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg_reg_split.ll
deleted file mode 100644
index bdb0a60..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg_reg_split.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mcpu=arm8 -mattr=+vfp2
-
-define double @test_double_arg_reg_split(i32 %a1, double %a2) {
- ret double %a2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg_split.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg_split.ll
deleted file mode 100644
index 4f841a3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg_split.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define double @test_double_arg_split(i64 %a1, i32 %a2, double %a3) {
- ret double %a3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg_stack.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg_stack.ll
deleted file mode 100644
index 2144317..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_f64_arg_stack.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define double @test_double_arg_stack(i64 %a1, i32 %a2, i32 %a3, double %a4) {
- ret double %a4
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_i128_arg2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_i128_arg2.ll
deleted file mode 100644
index 908c34f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_i128_arg2.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define i128 @test_i128(i128 %a1, i128 %a2, i128 %a3) {
- ret i128 %a3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_i64_arg2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_i64_arg2.ll
deleted file mode 100644
index b1a1024..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_i64_arg2.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define i64 @test_i64(i64 %a1, i64 %a2) {
- ret i64 %a2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_i64_arg3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_i64_arg3.ll
deleted file mode 100644
index ffc1d2f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_i64_arg3.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define i64 @test_i64_arg3(i64 %a1, i64 %a2, i64 %a3) {
- ret i64 %a3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_i64_arg_split.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_i64_arg_split.ll
deleted file mode 100644
index 956bce5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_i64_arg_split.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2
-
-define i64 @test_i64_arg_split(i64 %a1, i32 %a2, i64 %a3) {
- ret i64 %a3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/ret_void.ll b/libclamav/c++/llvm/test/CodeGen/ARM/ret_void.ll
deleted file mode 100644
index 2b7ae05..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/ret_void.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=arm
-
-define void @test() {
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/rev.ll b/libclamav/c++/llvm/test/CodeGen/ARM/rev.ll
deleted file mode 100644
index 1c12268..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/rev.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | grep rev16
-; RUN: llc < %s -march=arm -mattr=+v6 | grep revsh
-
-define i32 @test1(i32 %X) {
- %tmp1 = lshr i32 %X, 8 ; <i32> [#uses=3]
- %X15 = bitcast i32 %X to i32 ; <i32> [#uses=1]
- %tmp4 = shl i32 %X15, 8 ; <i32> [#uses=2]
- %tmp2 = and i32 %tmp1, 16711680 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, -16777216 ; <i32> [#uses=1]
- %tmp9 = and i32 %tmp1, 255 ; <i32> [#uses=1]
- %tmp13 = and i32 %tmp4, 65280 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp5, %tmp2 ; <i32> [#uses=1]
- %tmp10 = or i32 %tmp6, %tmp13 ; <i32> [#uses=1]
- %tmp14 = or i32 %tmp10, %tmp9 ; <i32> [#uses=1]
- ret i32 %tmp14
-}
-
-define i32 @test2(i32 %X) {
- %tmp1 = lshr i32 %X, 8 ; <i32> [#uses=1]
- %tmp1.upgrd.1 = trunc i32 %tmp1 to i16 ; <i16> [#uses=1]
- %tmp3 = trunc i32 %X to i16 ; <i16> [#uses=1]
- %tmp2 = and i16 %tmp1.upgrd.1, 255 ; <i16> [#uses=1]
- %tmp4 = shl i16 %tmp3, 8 ; <i16> [#uses=1]
- %tmp5 = or i16 %tmp2, %tmp4 ; <i16> [#uses=1]
- %tmp5.upgrd.2 = sext i16 %tmp5 to i32 ; <i32> [#uses=1]
- ret i32 %tmp5.upgrd.2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/sbfx.ll b/libclamav/c++/llvm/test/CodeGen/ARM/sbfx.ll
deleted file mode 100644
index 6f1d87d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/sbfx.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6t2 | FileCheck %s
-
-define i32 @f1(i32 %a) {
-entry:
-; CHECK: f1:
-; CHECK: sbfx r0, r0, #0, #20
- %tmp = shl i32 %a, 12
- %tmp2 = ashr i32 %tmp, 12
- ret i32 %tmp2
-}
-
-define i32 @f2(i32 %a) {
-entry:
-; CHECK: f2:
-; CHECK: ubfx r0, r0, #0, #20
- %tmp = shl i32 %a, 12
- %tmp2 = lshr i32 %tmp, 12
- ret i32 %tmp2
-}
-
-define i32 @f3(i32 %a) {
-entry:
-; CHECK: f3:
-; CHECK: sbfx r0, r0, #5, #3
- %tmp = shl i32 %a, 24
- %tmp2 = ashr i32 %tmp, 29
- ret i32 %tmp2
-}
-
-define i32 @f4(i32 %a) {
-entry:
-; CHECK: f4:
-; CHECK: ubfx r0, r0, #5, #3
- %tmp = shl i32 %a, 24
- %tmp2 = lshr i32 %tmp, 29
- ret i32 %tmp2
-}
-
-define i32 @f5(i32 %a) {
-entry:
-; CHECK: f5:
-; CHECK-NOT: sbfx
-; CHECK: bx
- %tmp = shl i32 %a, 3
- %tmp2 = ashr i32 %tmp, 1
- ret i32 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/section.ll b/libclamav/c++/llvm/test/CodeGen/ARM/section.ll
deleted file mode 100644
index 7a566d4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/section.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux | \
-; RUN: grep {__DTOR_END__:}
-; RUN: llc < %s -mtriple=arm-linux | \
-; RUN: grep {\\.section.\\.dtors,"aw",.progbits}
-
- at __DTOR_END__ = internal global [1 x i32] zeroinitializer, section ".dtors" ; <[1 x i32]*> [#uses=0]
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/select-imm.ll b/libclamav/c++/llvm/test/CodeGen/ARM/select-imm.ll
deleted file mode 100644
index 07edc91..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/select-imm.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s --check-prefix=ARM
-; RUN: llc < %s -march=arm -mattr=+thumb2 | FileCheck %s --check-prefix=T2
-
-define arm_apcscc i32 @t1(i32 %c) nounwind readnone {
-entry:
-; ARM: t1:
-; ARM: mov r1, #101
-; ARM: orr r1, r1, #1, 24
-; ARM: movgt r0, #123
-
-; T2: t1:
-; T2: movw r0, #357
-; T2: movgt r0, #123
-
- %0 = icmp sgt i32 %c, 1
- %1 = select i1 %0, i32 123, i32 357
- ret i32 %1
-}
-
-define arm_apcscc i32 @t2(i32 %c) nounwind readnone {
-entry:
-; ARM: t2:
-; ARM: mov r1, #101
-; ARM: orr r1, r1, #1, 24
-; ARM: movle r0, #123
-
-; T2: t2:
-; T2: movw r0, #357
-; T2: movle r0, #123
-
- %0 = icmp sgt i32 %c, 1
- %1 = select i1 %0, i32 357, i32 123
- ret i32 %1
-}
-
-define arm_apcscc i32 @t3(i32 %a) nounwind readnone {
-entry:
-; ARM: t3:
-; ARM: mov r0, #0
-; ARM: moveq r0, #1
-
-; T2: t3:
-; T2: mov r0, #0
-; T2: moveq r0, #1
- %0 = icmp eq i32 %a, 160
- %1 = zext i1 %0 to i32
- ret i32 %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/select.ll b/libclamav/c++/llvm/test/CodeGen/ARM/select.ll
deleted file mode 100644
index 29c55c6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/select.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s --check-prefix=CHECK-VFP
-
-define i32 @f1(i32 %a.s) {
-;CHECK: f1:
-;CHECK: moveq
-entry:
- %tmp = icmp eq i32 %a.s, 4
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f2(i32 %a.s) {
-;CHECK: f2:
-;CHECK: movgt
-entry:
- %tmp = icmp sgt i32 %a.s, 4
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f3(i32 %a.s, i32 %b.s) {
-;CHECK: f3:
-;CHECK: movlt
-entry:
- %tmp = icmp slt i32 %a.s, %b.s
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f4(i32 %a.s, i32 %b.s) {
-;CHECK: f4:
-;CHECK: movle
-entry:
- %tmp = icmp sle i32 %a.s, %b.s
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f5(i32 %a.u, i32 %b.u) {
-;CHECK: f5:
-;CHECK: movls
-entry:
- %tmp = icmp ule i32 %a.u, %b.u
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f6(i32 %a.u, i32 %b.u) {
-;CHECK: f6:
-;CHECK: movhi
-entry:
- %tmp = icmp ugt i32 %a.u, %b.u
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define double @f7(double %a, double %b) {
-;CHECK: f7:
-;CHECK: movlt
-;CHECK: movlt
-;CHECK-VFP: f7:
-;CHECK-VFP: vmovmi
- %tmp = fcmp olt double %a, 1.234e+00
- %tmp1 = select i1 %tmp, double -1.000e+00, double %b
- ret double %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/select_xform.ll b/libclamav/c++/llvm/test/CodeGen/ARM/select_xform.ll
deleted file mode 100644
index 7fd91ce..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/select_xform.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=arm | grep mov | count 2
-
-define i32 @t1(i32 %a, i32 %b, i32 %c) nounwind {
- %tmp1 = icmp sgt i32 %c, 10
- %tmp2 = select i1 %tmp1, i32 0, i32 2147483647
- %tmp3 = add i32 %tmp2, %b
- ret i32 %tmp3
-}
-
-define i32 @t2(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
- %tmp1 = icmp sgt i32 %c, 10
- %tmp2 = select i1 %tmp1, i32 0, i32 10
- %tmp3 = sub i32 %b, %tmp2
- ret i32 %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/shifter_operand.ll b/libclamav/c++/llvm/test/CodeGen/ARM/shifter_operand.ll
deleted file mode 100644
index 2bbe9fd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/shifter_operand.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=arm | grep add | grep lsl
-; RUN: llc < %s -march=arm | grep bic | grep asr
-
-
-define i32 @test1(i32 %X, i32 %Y, i8 %sh) {
- %shift.upgrd.1 = zext i8 %sh to i32 ; <i32> [#uses=1]
- %A = shl i32 %Y, %shift.upgrd.1 ; <i32> [#uses=1]
- %B = add i32 %X, %A ; <i32> [#uses=1]
- ret i32 %B
-}
-
-define i32 @test2(i32 %X, i32 %Y, i8 %sh) {
- %shift.upgrd.2 = zext i8 %sh to i32 ; <i32> [#uses=1]
- %A = ashr i32 %Y, %shift.upgrd.2 ; <i32> [#uses=1]
- %B = xor i32 %A, -1 ; <i32> [#uses=1]
- %C = and i32 %X, %B ; <i32> [#uses=1]
- ret i32 %C
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/smul.ll b/libclamav/c++/llvm/test/CodeGen/ARM/smul.ll
deleted file mode 100644
index b7ab2e7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/smul.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm -mattr=+v5TE
-; RUN: llc < %s -march=arm -mattr=+v5TE | \
-; RUN: grep smulbt | count 1
-; RUN: llc < %s -march=arm -mattr=+v5TE | \
-; RUN: grep smultt | count 1
-; RUN: llc < %s -march=arm -mattr=+v5TE | \
-; RUN: grep smlabt | count 1
-
- at x = weak global i16 0 ; <i16*> [#uses=1]
- at y = weak global i16 0 ; <i16*> [#uses=0]
-
-define i32 @f1(i32 %y) {
- %tmp = load i16* @x ; <i16> [#uses=1]
- %tmp1 = add i16 %tmp, 2 ; <i16> [#uses=1]
- %tmp2 = sext i16 %tmp1 to i32 ; <i32> [#uses=1]
- %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1]
- %tmp4 = mul i32 %tmp2, %tmp3 ; <i32> [#uses=1]
- ret i32 %tmp4
-}
-
-define i32 @f2(i32 %x, i32 %y) {
- %tmp1 = ashr i32 %x, 16 ; <i32> [#uses=1]
- %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1]
- %tmp4 = mul i32 %tmp3, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp4
-}
-
-define i32 @f3(i32 %a, i16 %x, i32 %y) {
- %tmp = sext i16 %x to i32 ; <i32> [#uses=1]
- %tmp2 = ashr i32 %y, 16 ; <i32> [#uses=1]
- %tmp3 = mul i32 %tmp2, %tmp ; <i32> [#uses=1]
- %tmp5 = add i32 %tmp3, %a ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/spill-q.ll b/libclamav/c++/llvm/test/CodeGen/ARM/spill-q.ll
deleted file mode 100644
index 5ad7ecc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/spill-q.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-elf -mattr=+neon | FileCheck %s
-; PR4789
-
-%bar = type { float, float, float }
-%baz = type { i32, [16 x %bar], [16 x float], [16 x i32], i8 }
-%foo = type { <4 x float> }
-%quux = type { i32 (...)**, %baz*, i32 }
-%quuz = type { %quux, i32, %bar, [128 x i8], [16 x %foo], %foo, %foo, %foo }
-
-declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
-
-define arm_apcscc void @aaa(%quuz* %this, i8* %block) {
-; CHECK: aaa:
-; CHECK: bic sp, sp, #15
-; CHECK: vst1.64 {{.*}}sp, :128
-; CHECK: vld1.64 {{.*}}sp, :128
-entry:
- %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
- store float 6.300000e+01, float* undef, align 4
- %1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
- store float 0.000000e+00, float* undef, align 4
- %2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
- %val173 = load <4 x float>* undef ; <<4 x float>> [#uses=1]
- br label %bb4
-
-bb4: ; preds = %bb193, %entry
- %besterror.0.2264 = phi <4 x float> [ undef, %entry ], [ %besterror.0.0, %bb193 ] ; <<4 x float>> [#uses=2]
- %part0.0.0261 = phi <4 x float> [ zeroinitializer, %entry ], [ %23, %bb193 ] ; <<4 x float>> [#uses=2]
- %3 = fmul <4 x float> zeroinitializer, %0 ; <<4 x float>> [#uses=2]
- %4 = fadd <4 x float> %3, %part0.0.0261 ; <<4 x float>> [#uses=1]
- %5 = shufflevector <4 x float> %3, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
- %6 = shufflevector <2 x float> %5, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>> [#uses=1]
- %7 = fmul <4 x float> %1, undef ; <<4 x float>> [#uses=1]
- %8 = fadd <4 x float> %7, <float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01> ; <<4 x float>> [#uses=1]
- %9 = fptosi <4 x float> %8 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %10 = sitofp <4 x i32> %9 to <4 x float> ; <<4 x float>> [#uses=1]
- %11 = fmul <4 x float> %10, %2 ; <<4 x float>> [#uses=1]
- %12 = fmul <4 x float> undef, %6 ; <<4 x float>> [#uses=1]
- %13 = fmul <4 x float> %11, %4 ; <<4 x float>> [#uses=1]
- %14 = fsub <4 x float> %12, %13 ; <<4 x float>> [#uses=1]
- %15 = fsub <4 x float> %14, undef ; <<4 x float>> [#uses=1]
- %16 = fmul <4 x float> %15, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00> ; <<4 x float>> [#uses=1]
- %17 = fadd <4 x float> %16, undef ; <<4 x float>> [#uses=1]
- %18 = fmul <4 x float> %17, %val173 ; <<4 x float>> [#uses=1]
- %19 = shufflevector <4 x float> %18, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
- %20 = shufflevector <2 x float> %19, <2 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %21 = fadd <4 x float> zeroinitializer, %20 ; <<4 x float>> [#uses=2]
- %22 = fcmp ogt <4 x float> %besterror.0.2264, %21 ; <<4 x i1>> [#uses=0]
- br i1 undef, label %bb193, label %bb186
-
-bb186: ; preds = %bb4
- br label %bb193
-
-bb193: ; preds = %bb186, %bb4
- %besterror.0.0 = phi <4 x float> [ %21, %bb186 ], [ %besterror.0.2264, %bb4 ] ; <<4 x float>> [#uses=1]
- %23 = fadd <4 x float> %part0.0.0261, zeroinitializer ; <<4 x float>> [#uses=1]
- br label %bb4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/stack-frame.ll b/libclamav/c++/llvm/test/CodeGen/ARM/stack-frame.ll
deleted file mode 100644
index 1dd57dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/stack-frame.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=arm
-; RUN: llc < %s -march=arm | grep add | count 1
-
-define void @f1() {
- %c = alloca i8, align 1
- ret void
-}
-
-define i32 @f2() {
- ret i32 1
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/stm.ll b/libclamav/c++/llvm/test/CodeGen/ARM/stm.ll
deleted file mode 100644
index 22a7ecb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/stm.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+v6,+vfp2 | grep stm | count 2
-
-@"\01LC" = internal constant [32 x i8] c"Boolean Not: %d %d %d %d %d %d\0A\00", section "__TEXT,__cstring,cstring_literals" ; <[32 x i8]*> [#uses=1]
-@"\01LC1" = internal constant [26 x i8] c"Bitwise Not: %d %d %d %d\0A\00", section "__TEXT,__cstring,cstring_literals" ; <[26 x i8]*> [#uses=1]
-
-declare i32 @printf(i8* nocapture, ...) nounwind
-
-define i32 @main() nounwind {
-entry:
- %0 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([26 x i8]* @"\01LC1", i32 0, i32 0), i32 -2, i32 -3, i32 2, i32 -6) nounwind ; <i32> [#uses=0]
- %1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([32 x i8]* @"\01LC", i32 0, i32 0), i32 0, i32 1, i32 0, i32 1, i32 0, i32 1) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/str_post.ll b/libclamav/c++/llvm/test/CodeGen/ARM/str_post.ll
deleted file mode 100644
index 97916f1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/str_post.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s
-
-define i16 @test1(i32* %X, i16* %A) {
-; CHECK: test1:
-; CHECK: strh {{.*}}[{{.*}}], #-4
- %Y = load i32* %X ; <i32> [#uses=1]
- %tmp1 = trunc i32 %Y to i16 ; <i16> [#uses=1]
- store i16 %tmp1, i16* %A
- %tmp2 = ptrtoint i16* %A to i16 ; <i16> [#uses=1]
- %tmp3 = sub i16 %tmp2, 4 ; <i16> [#uses=1]
- ret i16 %tmp3
-}
-
-define i32 @test2(i32* %X, i32* %A) {
-; CHECK: test2:
-; CHECK: str {{.*}}[{{.*}}],
- %Y = load i32* %X ; <i32> [#uses=1]
- store i32 %Y, i32* %A
- %tmp1 = ptrtoint i32* %A to i32 ; <i32> [#uses=1]
- %tmp2 = sub i32 %tmp1, 4 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/str_pre-2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/str_pre-2.ll
deleted file mode 100644
index f8d3df2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/str_pre-2.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=arm-linux-gnu | grep {str.*\\!}
-; RUN: llc < %s -mtriple=arm-linux-gnu | grep {ldr.*\\\[.*\], #+4}
-
- at b = external global i64*
-
-define i64 @t(i64 %a) nounwind readonly {
-entry:
- %0 = load i64** @b, align 4
- %1 = load i64* %0, align 4
- %2 = mul i64 %1, %a
- ret i64 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/str_pre.ll b/libclamav/c++/llvm/test/CodeGen/ARM/str_pre.ll
deleted file mode 100644
index e56e3f2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/str_pre.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=arm | \
-; RUN: grep {str.*\\!} | count 2
-
-define void @test1(i32* %X, i32* %A, i32** %dest) {
- %B = load i32* %A ; <i32> [#uses=1]
- %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
- store i32 %B, i32* %Y
- store i32* %Y, i32** %dest
- ret void
-}
-
-define i16* @test2(i16* %X, i32* %A) {
- %B = load i32* %A ; <i32> [#uses=1]
- %Y = getelementptr i16* %X, i32 4 ; <i16*> [#uses=2]
- %tmp = trunc i32 %B to i16 ; <i16> [#uses=1]
- store i16 %tmp, i16* %Y
- ret i16* %Y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/str_trunc.ll b/libclamav/c++/llvm/test/CodeGen/ARM/str_trunc.ll
deleted file mode 100644
index 2f1166b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/str_trunc.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=arm | \
-; RUN: grep strb | count 1
-; RUN: llc < %s -march=arm | \
-; RUN: grep strh | count 1
-
-define void @test1(i32 %v, i16* %ptr) {
- %tmp = trunc i32 %v to i16 ; <i16> [#uses=1]
- store i16 %tmp, i16* %ptr
- ret void
-}
-
-define void @test2(i32 %v, i8* %ptr) {
- %tmp = trunc i32 %v to i8 ; <i8> [#uses=1]
- store i8 %tmp, i8* %ptr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/sxt_rot.ll b/libclamav/c++/llvm/test/CodeGen/ARM/sxt_rot.ll
deleted file mode 100644
index 4752f17..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/sxt_rot.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep sxtb | count 2
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep sxtb | grep ror | count 1
-; RUN: llc < %s -march=arm -mattr=+v6 | \
-; RUN: grep sxtab | count 1
-
-define i32 @test0(i8 %A) {
- %B = sext i8 %A to i32
- ret i32 %B
-}
-
-define i8 @test1(i32 %A) signext {
- %B = lshr i32 %A, 8
- %C = shl i32 %A, 24
- %D = or i32 %B, %C
- %E = trunc i32 %D to i8
- ret i8 %E
-}
-
-define i32 @test2(i32 %A, i32 %X) signext {
- %B = lshr i32 %A, 8
- %C = shl i32 %A, 24
- %D = or i32 %B, %C
- %E = trunc i32 %D to i8
- %F = sext i8 %E to i32
- %G = add i32 %F, %X
- ret i32 %G
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/t2-imm.ll b/libclamav/c++/llvm/test/CodeGen/ARM/t2-imm.ll
deleted file mode 100644
index 848a4df..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/t2-imm.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+thumb2 | FileCheck %s
-
-define i32 @f6(i32 %a) {
-; CHECK:f6
-; CHECK: movw r0, #:lower16:65537123
-; CHECK: movt r0, #:upper16:65537123
- %tmp = add i32 0, 65537123
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/tail-opts.ll b/libclamav/c++/llvm/test/CodeGen/ARM/tail-opts.ll
deleted file mode 100644
index 17c8bae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/tail-opts.ll
+++ /dev/null
@@ -1,64 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a8 -asm-verbose=false | FileCheck %s
-
-declare void @bar(i32)
-declare void @car(i32)
-declare void @dar(i32)
-declare void @ear(i32)
-declare void @far(i32)
-declare i1 @qux()
-
- at GHJK = global i32 0
-
-declare i8* @choose(i8*, i8*)
-
-; BranchFolding should tail-duplicate the indirect jump to avoid
-; redundant branching.
-
-; CHECK: tail_duplicate_me:
-; CHECK: qux
-; CHECK: qux
-; CHECK: ldr r{{.}}, LCPI
-; CHECK: str r
-; CHECK-NEXT: bx r
-; CHECK: ldr r{{.}}, LCPI
-; CHECK: str r
-; CHECK-NEXT: bx r
-; CHECK: ldr r{{.}}, LCPI
-; CHECK: str r
-; CHECK-NEXT: bx r
-
-define void @tail_duplicate_me() nounwind {
-entry:
- %a = call i1 @qux()
- %c = call i8* @choose(i8* blockaddress(@tail_duplicate_me, %return),
- i8* blockaddress(@tail_duplicate_me, %altret))
- br i1 %a, label %A, label %next
-next:
- %b = call i1 @qux()
- br i1 %b, label %B, label %C
-
-A:
- call void @bar(i32 0)
- store i32 0, i32* @GHJK
- br label %M
-
-B:
- call void @car(i32 1)
- store i32 0, i32* @GHJK
- br label %M
-
-C:
- call void @dar(i32 2)
- store i32 0, i32* @GHJK
- br label %M
-
-M:
- indirectbr i8* %c, [label %return, label %altret]
-
-return:
- call void @ear(i32 1000)
- ret void
-altret:
- call void @far(i32 1001)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/thread_pointer.ll b/libclamav/c++/llvm/test/CodeGen/ARM/thread_pointer.ll
deleted file mode 100644
index 3143387..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/thread_pointer.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | \
-; RUN: grep {__aeabi_read_tp}
-
-define i8* @test() {
-entry:
- %tmp1 = call i8* @llvm.arm.thread.pointer( ) ; <i8*> [#uses=0]
- ret i8* %tmp1
-}
-
-declare i8* @llvm.arm.thread.pointer()
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/tls1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/tls1.ll
deleted file mode 100644
index 1087094..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/tls1.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | \
-; RUN: grep {i(tpoff)}
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | \
-; RUN: grep {__aeabi_read_tp}
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi \
-; RUN: -relocation-model=pic | grep {__tls_get_addr}
-
-
- at i = thread_local global i32 15 ; <i32*> [#uses=2]
-
-define i32 @f() {
-entry:
- %tmp1 = load i32* @i ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32* @g() {
-entry:
- ret i32* @i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/tls2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/tls2.ll
deleted file mode 100644
index d932f90..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/tls2.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi \
-; RUN: | FileCheck %s -check-prefix=CHECK-NONPIC
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi \
-; RUN: -relocation-model=pic | FileCheck %s -check-prefix=CHECK-PIC
-
- at i = external thread_local global i32 ; <i32*> [#uses=2]
-
-define i32 @f() {
-; CHECK-NONPIC: f:
-; CHECK-NONPIC: ldr {{r.}}, [pc, +{{r.}}]
-; CHECK-NONPIC: i(gottpoff)
-; CHECK-PIC: f:
-; CHECK-PIC: __tls_get_addr
-entry:
- %tmp1 = load i32* @i ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32* @g() {
-; CHECK-NONPIC: g:
-; CHECK-NONPIC: ldr {{r.}}, [pc, +{{r.}}]
-; CHECK-NONPIC: i(gottpoff)
-; CHECK-PIC: g:
-; CHECK-PIC: __tls_get_addr
-entry:
- ret i32* @i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/tls3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/tls3.ll
deleted file mode 100644
index df7a4ca..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/tls3.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | \
-; RUN: grep {tbss}
-
-%struct.anon = type { i32, i32 }
- at teste = internal thread_local global %struct.anon zeroinitializer ; <%struct.anon*> [#uses=1]
-
-define i32 @main() {
-entry:
- %tmp2 = load i32* getelementptr (%struct.anon* @teste, i32 0, i32 0), align 8 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/trunc_ldr.ll b/libclamav/c++/llvm/test/CodeGen/ARM/trunc_ldr.ll
deleted file mode 100644
index 3033c2b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/trunc_ldr.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=arm | grep ldrb.*7 | count 1
-; RUN: llc < %s -march=arm | grep ldrsb.*7 | count 1
-
- %struct.A = type { i8, i8, i8, i8, i16, i8, i8, %struct.B** }
- %struct.B = type { float, float, i32, i32, i32, [0 x i8] }
-
-define i8 @f1(%struct.A* %d) {
- %tmp2 = getelementptr %struct.A* %d, i32 0, i32 4
- %tmp23 = bitcast i16* %tmp2 to i32*
- %tmp4 = load i32* %tmp23
- %tmp512 = lshr i32 %tmp4, 24
- %tmp56 = trunc i32 %tmp512 to i8
- ret i8 %tmp56
-}
-
-define i32 @f2(%struct.A* %d) {
- %tmp2 = getelementptr %struct.A* %d, i32 0, i32 4
- %tmp23 = bitcast i16* %tmp2 to i32*
- %tmp4 = load i32* %tmp23
- %tmp512 = lshr i32 %tmp4, 24
- %tmp56 = trunc i32 %tmp512 to i8
- %tmp57 = sext i8 %tmp56 to i32
- ret i32 %tmp57
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/truncstore-dag-combine.ll b/libclamav/c++/llvm/test/CodeGen/ARM/truncstore-dag-combine.ll
deleted file mode 100644
index 2da08b6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/truncstore-dag-combine.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=arm | not grep orr
-; RUN: llc < %s -march=arm | not grep mov
-
-define void @bar(i8* %P, i16* %Q) {
-entry:
- %P1 = bitcast i8* %P to i16* ; <i16*> [#uses=1]
- %tmp = load i16* %Q, align 1 ; <i16> [#uses=1]
- store i16 %tmp, i16* %P1, align 1
- ret void
-}
-
-define void @foo(i8* %P, i32* %Q) {
-entry:
- %P1 = bitcast i8* %P to i32* ; <i32*> [#uses=1]
- %tmp = load i32* %Q, align 1 ; <i32> [#uses=1]
- store i32 %tmp, i32* %P1, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/tst_teq.ll b/libclamav/c++/llvm/test/CodeGen/ARM/tst_teq.ll
deleted file mode 100644
index c83111e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/tst_teq.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=arm | grep tst
-; RUN: llc < %s -march=arm | grep teq
-
-define i32 @f(i32 %a) {
-entry:
- %tmp2 = and i32 %a, 255 ; <i32> [#uses=1]
- icmp eq i32 %tmp2, 0 ; <i1>:0 [#uses=1]
- %retval = select i1 %0, i32 20, i32 10 ; <i32> [#uses=1]
- ret i32 %retval
-}
-
-define i32 @g(i32 %a) {
-entry:
- %tmp2 = xor i32 %a, 255
- icmp eq i32 %tmp2, 0 ; <i1>:0 [#uses=1]
- %retval = select i1 %0, i32 20, i32 10 ; <i32> [#uses=1]
- ret i32 %retval
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/uint64tof64.ll b/libclamav/c++/llvm/test/CodeGen/ARM/uint64tof64.ll
deleted file mode 100644
index 32eb225..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/uint64tof64.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -mattr=+vfp2
-
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
-@"\01LC10" = external constant [54 x i8] ; <[54 x i8]*> [#uses=1]
-
-define fastcc void @t() {
-entry:
- %0 = load i64* null, align 4 ; <i64> [#uses=1]
- %1 = uitofp i64 %0 to double ; <double> [#uses=1]
- %2 = fdiv double 0.000000e+00, %1 ; <double> [#uses=1]
- %3 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* null, i8* getelementptr ([54 x i8]* @"\01LC10", i32 0, i32 0), i64 0, double %2) ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @fprintf(%struct.FILE*, i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/unaligned_load_store.ll b/libclamav/c++/llvm/test/CodeGen/ARM/unaligned_load_store.ll
deleted file mode 100644
index a4494f3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/unaligned_load_store.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=GENERIC
-; RUN: llc < %s -mtriple=armv6-apple-darwin | FileCheck %s -check-prefix=DARWIN_V6
-; RUN: llc < %s -mtriple=armv6-linux | FileCheck %s -check-prefix=GENERIC
-
-; rdar://7113725
-
-define arm_apcscc void @t(i8* nocapture %a, i8* nocapture %b) nounwind {
-entry:
-; GENERIC: t:
-; GENERIC: ldrb r2
-; GENERIC: ldrb r3
-; GENERIC: ldrb r12
-; GENERIC: ldrb r1
-; GENERIC: strb r1
-; GENERIC: strb r12
-; GENERIC: strb r3
-; GENERIC: strb r2
-
-; DARWIN_V6: t:
-; DARWIN_V6: ldr r1
-; DARWIN_V6: str r1
-
- %__src1.i = bitcast i8* %b to i32* ; <i32*> [#uses=1]
- %__dest2.i = bitcast i8* %a to i32* ; <i32*> [#uses=1]
- %tmp.i = load i32* %__src1.i, align 1 ; <i32> [#uses=1]
- store i32 %tmp.i, i32* %__dest2.i, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/unord.ll b/libclamav/c++/llvm/test/CodeGen/ARM/unord.ll
deleted file mode 100644
index bd28034..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/unord.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=arm | grep movne | count 1
-; RUN: llc < %s -march=arm | grep moveq | count 1
-
-define i32 @f1(float %X, float %Y) {
- %tmp = fcmp uno float %X, %Y
- %retval = select i1 %tmp, i32 1, i32 -1
- ret i32 %retval
-}
-
-define i32 @f2(float %X, float %Y) {
- %tmp = fcmp ord float %X, %Y
- %retval = select i1 %tmp, i32 1, i32 -1
- ret i32 %retval
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/uxt_rot.ll b/libclamav/c++/llvm/test/CodeGen/ARM/uxt_rot.ll
deleted file mode 100644
index 6307795..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/uxt_rot.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+v6 | grep uxtb | count 1
-; RUN: llc < %s -march=arm -mattr=+v6 | grep uxtab | count 1
-; RUN: llc < %s -march=arm -mattr=+v6 | grep uxth | count 1
-
-define i8 @test1(i32 %A.u) zeroext {
- %B.u = trunc i32 %A.u to i8
- ret i8 %B.u
-}
-
-define i32 @test2(i32 %A.u, i32 %B.u) zeroext {
- %C.u = trunc i32 %B.u to i8
- %D.u = zext i8 %C.u to i32
- %E.u = add i32 %A.u, %D.u
- ret i32 %E.u
-}
-
-define i32 @test3(i32 %A.u) zeroext {
- %B.u = lshr i32 %A.u, 8
- %C.u = shl i32 %A.u, 24
- %D.u = or i32 %B.u, %C.u
- %E.u = trunc i32 %D.u to i16
- %F.u = zext i16 %E.u to i32
- ret i32 %F.u
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/uxtb.ll b/libclamav/c++/llvm/test/CodeGen/ARM/uxtb.ll
deleted file mode 100644
index 9d6e4bd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/uxtb.ll
+++ /dev/null
@@ -1,74 +0,0 @@
-; RUN: llc < %s -mtriple=armv6-apple-darwin | \
-; RUN: grep uxt | count 10
-
-define i32 @test1(i32 %x) {
- %tmp1 = and i32 %x, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @test2(i32 %x) {
- %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @test3(i32 %x) {
- %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @test4(i32 %x) {
- %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
- %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test5(i32 %x) {
- %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @test6(i32 %x) {
- %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1]
- %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test7(i32 %x) {
- %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1]
- %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test8(i32 %x) {
- %tmp1 = shl i32 %x, 8 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16711680 ; <i32> [#uses=1]
- %tmp5 = lshr i32 %x, 24 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test9(i32 %x) {
- %tmp1 = lshr i32 %x, 24 ; <i32> [#uses=1]
- %tmp4 = shl i32 %x, 8 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp5, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test10(i32 %p0) {
- %tmp1 = lshr i32 %p0, 7 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16253176 ; <i32> [#uses=2]
- %tmp4 = lshr i32 %tmp2, 5 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, 458759 ; <i32> [#uses=1]
- %tmp7 = or i32 %tmp5, %tmp2 ; <i32> [#uses=1]
- ret i32 %tmp7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vaba.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vaba.ll
deleted file mode 100644
index e2dca46..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vaba.ll
+++ /dev/null
@@ -1,205 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vabas8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vabas8:
-;CHECK: vaba.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i8> @llvm.arm.neon.vabas.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vabas16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vabas16:
-;CHECK: vaba.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i16> @llvm.arm.neon.vabas.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vabas32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vabas32:
-;CHECK: vaba.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i32> @llvm.arm.neon.vabas.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i32> %tmp4
-}
-
-define <8 x i8> @vabau8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vabau8:
-;CHECK: vaba.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i8> @llvm.arm.neon.vabau.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vabau16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vabau16:
-;CHECK: vaba.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i16> @llvm.arm.neon.vabau.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vabau32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vabau32:
-;CHECK: vaba.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i32> @llvm.arm.neon.vabau.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i32> %tmp4
-}
-
-define <16 x i8> @vabaQs8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
-;CHECK: vabaQs8:
-;CHECK: vaba.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = load <16 x i8>* %C
- %tmp4 = call <16 x i8> @llvm.arm.neon.vabas.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> %tmp3)
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vabaQs16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: vabaQs16:
-;CHECK: vaba.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = load <8 x i16>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vabas.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> %tmp3)
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vabaQs32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: vabaQs32:
-;CHECK: vaba.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = load <4 x i32>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vabas.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-define <16 x i8> @vabaQu8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
-;CHECK: vabaQu8:
-;CHECK: vaba.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = load <16 x i8>* %C
- %tmp4 = call <16 x i8> @llvm.arm.neon.vabau.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> %tmp3)
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vabaQu16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: vabaQu16:
-;CHECK: vaba.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = load <8 x i16>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vabau.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> %tmp3)
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vabaQu32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: vabaQu32:
-;CHECK: vaba.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = load <4 x i32>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vabau.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-declare <8 x i8> @llvm.arm.neon.vabas.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vabas.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vabas.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vabau.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vabau.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vabau.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vabas.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vabas.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabas.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vabau.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vabau.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabau.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
-
-define <8 x i16> @vabals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vabals8:
-;CHECK: vabal.s8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vabals.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vabals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vabals16:
-;CHECK: vabal.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vabals.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vabals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vabals32:
-;CHECK: vabal.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vabals.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
-}
-
-define <8 x i16> @vabalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vabalu8:
-;CHECK: vabal.u8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vabalu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vabalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vabalu16:
-;CHECK: vabal.u16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vabalu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vabalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vabalu32:
-;CHECK: vabal.u32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vabalu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
-}
-
-declare <8 x i16> @llvm.arm.neon.vabals.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabals.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vabals.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vabalu.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabalu.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vabalu.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vabd.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vabd.ll
deleted file mode 100644
index 2b45393..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vabd.ll
+++ /dev/null
@@ -1,209 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vabds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vabds8:
-;CHECK: vabd.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vabds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vabds16:
-;CHECK: vabd.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vabds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vabds32:
-;CHECK: vabd.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i8> @vabdu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vabdu8:
-;CHECK: vabd.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vabdu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vabdu16:
-;CHECK: vabd.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vabdu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vabdu32:
-;CHECK: vabd.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <2 x float> @vabdf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vabdf32:
-;CHECK: vabd.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vabds.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
-}
-
-define <16 x i8> @vabdQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vabdQs8:
-;CHECK: vabd.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vabdQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vabdQs16:
-;CHECK: vabd.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vabdQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vabdQs32:
-;CHECK: vabd.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <16 x i8> @vabdQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vabdQu8:
-;CHECK: vabd.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vabdQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vabdQu16:
-;CHECK: vabd.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vabdQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vabdQu32:
-;CHECK: vabd.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <4 x float> @vabdQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vabdQf32:
-;CHECK: vabd.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = call <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
- ret <4 x float> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vabds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vabds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vabds.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vabdu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vabdu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vabdu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <2 x float> @llvm.arm.neon.vabds.v2f32(<2 x float>, <2 x float>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vabds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vabds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabds.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vabdu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vabdu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabdu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <4 x float> @llvm.arm.neon.vabds.v4f32(<4 x float>, <4 x float>) nounwind readnone
-
-define <8 x i16> @vabdls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vabdls8:
-;CHECK: vabdl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vabdls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vabdls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vabdls16:
-;CHECK: vabdl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vabdls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vabdls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vabdls32:
-;CHECK: vabdl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vabdls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i16> @vabdlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vabdlu8:
-;CHECK: vabdl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vabdlu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vabdlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vabdlu16:
-;CHECK: vabdl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vabdlu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vabdlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vabdlu32:
-;CHECK: vabdl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vabdlu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-declare <8 x i16> @llvm.arm.neon.vabdls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabdls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vabdls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vabdlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabdlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vabdlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vabs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vabs.ll
deleted file mode 100644
index 18ba61f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vabs.ll
+++ /dev/null
@@ -1,131 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vabss8(<8 x i8>* %A) nounwind {
-;CHECK: vabss8:
-;CHECK: vabs.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vabss16(<4 x i16>* %A) nounwind {
-;CHECK: vabss16:
-;CHECK: vabs.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vabss32(<2 x i32>* %A) nounwind {
-;CHECK: vabss32:
-;CHECK: vabs.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <2 x float> @vabsf32(<2 x float>* %A) nounwind {
-;CHECK: vabsf32:
-;CHECK: vabs.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = call <2 x float> @llvm.arm.neon.vabs.v2f32(<2 x float> %tmp1)
- ret <2 x float> %tmp2
-}
-
-define <16 x i8> @vabsQs8(<16 x i8>* %A) nounwind {
-;CHECK: vabsQs8:
-;CHECK: vabs.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8> %tmp1)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vabsQs16(<8 x i16>* %A) nounwind {
-;CHECK: vabsQs16:
-;CHECK: vabs.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %tmp1)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vabsQs32(<4 x i32>* %A) nounwind {
-;CHECK: vabsQs32:
-;CHECK: vabs.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-define <4 x float> @vabsQf32(<4 x float>* %A) nounwind {
-;CHECK: vabsQf32:
-;CHECK: vabs.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = call <4 x float> @llvm.arm.neon.vabs.v4f32(<4 x float> %tmp1)
- ret <4 x float> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vabs.v8i8(<8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vabs.v4i16(<4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vabs.v2i32(<2 x i32>) nounwind readnone
-declare <2 x float> @llvm.arm.neon.vabs.v2f32(<2 x float>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vabs.v16i8(<16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vabs.v4i32(<4 x i32>) nounwind readnone
-declare <4 x float> @llvm.arm.neon.vabs.v4f32(<4 x float>) nounwind readnone
-
-define <8 x i8> @vqabss8(<8 x i8>* %A) nounwind {
-;CHECK: vqabss8:
-;CHECK: vqabs.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqabs.v8i8(<8 x i8> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqabss16(<4 x i16>* %A) nounwind {
-;CHECK: vqabss16:
-;CHECK: vqabs.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqabs.v4i16(<4 x i16> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqabss32(<2 x i32>* %A) nounwind {
-;CHECK: vqabss32:
-;CHECK: vqabs.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqabs.v2i32(<2 x i32> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <16 x i8> @vqabsQs8(<16 x i8>* %A) nounwind {
-;CHECK: vqabsQs8:
-;CHECK: vqabs.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8> %tmp1)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vqabsQs16(<8 x i16>* %A) nounwind {
-;CHECK: vqabsQs16:
-;CHECK: vqabs.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16> %tmp1)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vqabsQs32(<4 x i32>* %A) nounwind {
-;CHECK: vqabsQs32:
-;CHECK: vqabs.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vqabs.v4i32(<4 x i32> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vqabs.v8i8(<8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqabs.v4i16(<4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqabs.v2i32(<2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqabs.v16i8(<16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqabs.v8i16(<8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqabs.v4i32(<4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vadd.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vadd.ll
deleted file mode 100644
index 9fa5307..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vadd.ll
+++ /dev/null
@@ -1,277 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddi8:
-;CHECK: vadd.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = add <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddi16:
-;CHECK: vadd.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = add <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddi32:
-;CHECK: vadd.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = add <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vaddi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vaddi64:
-;CHECK: vadd.i64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = add <1 x i64> %tmp1, %tmp2
- ret <1 x i64> %tmp3
-}
-
-define <2 x float> @vaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vaddf32:
-;CHECK: vadd.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = add <2 x float> %tmp1, %tmp2
- ret <2 x float> %tmp3
-}
-
-define <16 x i8> @vaddQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vaddQi8:
-;CHECK: vadd.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = add <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vaddQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vaddQi16:
-;CHECK: vadd.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = add <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vaddQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vaddQi32:
-;CHECK: vadd.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = add <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vaddQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vaddQi64:
-;CHECK: vadd.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = add <2 x i64> %tmp1, %tmp2
- ret <2 x i64> %tmp3
-}
-
-define <4 x float> @vaddQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vaddQf32:
-;CHECK: vadd.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = add <4 x float> %tmp1, %tmp2
- ret <4 x float> %tmp3
-}
-
-define <8 x i8> @vaddhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vaddhni16:
-;CHECK: vaddhn.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vaddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vaddhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vaddhni32:
-;CHECK: vaddhn.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vaddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vaddhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vaddhni64:
-;CHECK: vaddhn.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vaddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vaddhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vaddhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vaddhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-define <8 x i8> @vraddhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vraddhni16:
-;CHECK: vraddhn.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vraddhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vraddhni32:
-;CHECK: vraddhn.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vraddhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vraddhni64:
-;CHECK: vraddhn.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vraddhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vraddhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vraddhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-define <8 x i16> @vaddls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddls8:
-;CHECK: vaddl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vaddls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddls16:
-;CHECK: vaddl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vaddls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddls32:
-;CHECK: vaddl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i16> @vaddlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddlu8:
-;CHECK: vaddl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vaddlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddlu16:
-;CHECK: vaddl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vaddlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddlu32:
-;CHECK: vaddl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-declare <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-define <8 x i16> @vaddws8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddws8:
-;CHECK: vaddw.s8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vaddws.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vaddws16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddws16:
-;CHECK: vaddw.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vaddws.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vaddws32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddws32:
-;CHECK: vaddw.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vaddws.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i16> @vaddwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vaddwu8:
-;CHECK: vaddw.u8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vaddwu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vaddwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vaddwu16:
-;CHECK: vaddw.u16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vaddwu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vaddwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vaddwu32:
-;CHECK: vaddw.u32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vaddwu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-declare <8 x i16> @llvm.arm.neon.vaddws.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vaddws.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vaddws.v2i64(<2 x i64>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vaddwu.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vaddwu.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vaddwu.v2i64(<2 x i64>, <2 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vargs.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vargs.ll
deleted file mode 100644
index 5f3536c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vargs.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=arm
- at str = internal constant [43 x i8] c"Hello World %d %d %d %d %d %d %d %d %d %d\0A\00" ; <[43 x i8]*> [#uses=1]
-
-define i32 @main() {
-entry:
- %tmp = call i32 (i8*, ...)* @printf( i8* getelementptr ([43 x i8]* @str, i32 0, i64 0), i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10 ) ; <i32> [#uses=0]
- %tmp2 = call i32 (i8*, ...)* @printf( i8* getelementptr ([43 x i8]* @str, i32 0, i64 0), i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1 ) ; <i32> [#uses=0]
- ret i32 11
-}
-
-declare i32 @printf(i8*, ...)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vargs_align.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vargs_align.ll
deleted file mode 100644
index e4ef9e3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vargs_align.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=EABI
-; RUN: llc < %s -march=arm -mtriple=arm-linux-gnu | FileCheck %s -check-prefix=OABI
-
-define i32 @f(i32 %a, ...) {
-entry:
- %a_addr = alloca i32 ; <i32*> [#uses=1]
- %retval = alloca i32, align 4 ; <i32*> [#uses=2]
- %tmp = alloca i32, align 4 ; <i32*> [#uses=2]
- "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %a, i32* %a_addr
- store i32 0, i32* %tmp
- %tmp1 = load i32* %tmp ; <i32> [#uses=1]
- store i32 %tmp1, i32* %retval
- br label %return
-
-return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval2
-; EABI: add sp, sp, #12
-; EABI: add sp, sp, #16
-; OABI: add sp, sp, #12
-; OABI: add sp, sp, #12
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vbits.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vbits.ll
deleted file mode 100644
index 293d229..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vbits.ll
+++ /dev/null
@@ -1,507 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @v_andi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_andi8:
-;CHECK: vand
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = and <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @v_andi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_andi16:
-;CHECK: vand
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = and <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @v_andi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_andi32:
-;CHECK: vand
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = and <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @v_andi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_andi64:
-;CHECK: vand
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = and <1 x i64> %tmp1, %tmp2
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @v_andQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_andQi8:
-;CHECK: vand
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = and <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @v_andQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_andQi16:
-;CHECK: vand
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = and <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @v_andQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_andQi32:
-;CHECK: vand
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = and <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @v_andQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_andQi64:
-;CHECK: vand
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = and <2 x i64> %tmp1, %tmp2
- ret <2 x i64> %tmp3
-}
-
-define <8 x i8> @v_bici8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_bici8:
-;CHECK: vbic
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- %tmp4 = and <8 x i8> %tmp1, %tmp3
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @v_bici16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_bici16:
-;CHECK: vbic
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = xor <4 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1 >
- %tmp4 = and <4 x i16> %tmp1, %tmp3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @v_bici32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_bici32:
-;CHECK: vbic
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = xor <2 x i32> %tmp2, < i32 -1, i32 -1 >
- %tmp4 = and <2 x i32> %tmp1, %tmp3
- ret <2 x i32> %tmp4
-}
-
-define <1 x i64> @v_bici64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_bici64:
-;CHECK: vbic
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = xor <1 x i64> %tmp2, < i64 -1 >
- %tmp4 = and <1 x i64> %tmp1, %tmp3
- ret <1 x i64> %tmp4
-}
-
-define <16 x i8> @v_bicQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_bicQi8:
-;CHECK: vbic
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- %tmp4 = and <16 x i8> %tmp1, %tmp3
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @v_bicQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_bicQi16:
-;CHECK: vbic
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = xor <8 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
- %tmp4 = and <8 x i16> %tmp1, %tmp3
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @v_bicQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_bicQi32:
-;CHECK: vbic
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = xor <4 x i32> %tmp2, < i32 -1, i32 -1, i32 -1, i32 -1 >
- %tmp4 = and <4 x i32> %tmp1, %tmp3
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @v_bicQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_bicQi64:
-;CHECK: vbic
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = xor <2 x i64> %tmp2, < i64 -1, i64 -1 >
- %tmp4 = and <2 x i64> %tmp1, %tmp3
- ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @v_eori8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_eori8:
-;CHECK: veor
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = xor <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @v_eori16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_eori16:
-;CHECK: veor
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = xor <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @v_eori32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_eori32:
-;CHECK: veor
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = xor <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @v_eori64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_eori64:
-;CHECK: veor
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = xor <1 x i64> %tmp1, %tmp2
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @v_eorQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_eorQi8:
-;CHECK: veor
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = xor <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @v_eorQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_eorQi16:
-;CHECK: veor
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = xor <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @v_eorQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_eorQi32:
-;CHECK: veor
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = xor <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @v_eorQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_eorQi64:
-;CHECK: veor
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = xor <2 x i64> %tmp1, %tmp2
- ret <2 x i64> %tmp3
-}
-
-define <8 x i8> @v_mvni8(<8 x i8>* %A) nounwind {
-;CHECK: v_mvni8:
-;CHECK: vmvn
- %tmp1 = load <8 x i8>* %A
- %tmp2 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @v_mvni16(<4 x i16>* %A) nounwind {
-;CHECK: v_mvni16:
-;CHECK: vmvn
- %tmp1 = load <4 x i16>* %A
- %tmp2 = xor <4 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1 >
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @v_mvni32(<2 x i32>* %A) nounwind {
-;CHECK: v_mvni32:
-;CHECK: vmvn
- %tmp1 = load <2 x i32>* %A
- %tmp2 = xor <2 x i32> %tmp1, < i32 -1, i32 -1 >
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @v_mvni64(<1 x i64>* %A) nounwind {
-;CHECK: v_mvni64:
-;CHECK: vmvn
- %tmp1 = load <1 x i64>* %A
- %tmp2 = xor <1 x i64> %tmp1, < i64 -1 >
- ret <1 x i64> %tmp2
-}
-
-define <16 x i8> @v_mvnQi8(<16 x i8>* %A) nounwind {
-;CHECK: v_mvnQi8:
-;CHECK: vmvn
- %tmp1 = load <16 x i8>* %A
- %tmp2 = xor <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @v_mvnQi16(<8 x i16>* %A) nounwind {
-;CHECK: v_mvnQi16:
-;CHECK: vmvn
- %tmp1 = load <8 x i16>* %A
- %tmp2 = xor <8 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @v_mvnQi32(<4 x i32>* %A) nounwind {
-;CHECK: v_mvnQi32:
-;CHECK: vmvn
- %tmp1 = load <4 x i32>* %A
- %tmp2 = xor <4 x i32> %tmp1, < i32 -1, i32 -1, i32 -1, i32 -1 >
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @v_mvnQi64(<2 x i64>* %A) nounwind {
-;CHECK: v_mvnQi64:
-;CHECK: vmvn
- %tmp1 = load <2 x i64>* %A
- %tmp2 = xor <2 x i64> %tmp1, < i64 -1, i64 -1 >
- ret <2 x i64> %tmp2
-}
-
-define <8 x i8> @v_orri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_orri8:
-;CHECK: vorr
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = or <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @v_orri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_orri16:
-;CHECK: vorr
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = or <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @v_orri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_orri32:
-;CHECK: vorr
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = or <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @v_orri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_orri64:
-;CHECK: vorr
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = or <1 x i64> %tmp1, %tmp2
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @v_orrQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_orrQi8:
-;CHECK: vorr
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = or <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @v_orrQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_orrQi16:
-;CHECK: vorr
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = or <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @v_orrQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_orrQi32:
-;CHECK: vorr
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = or <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @v_orrQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_orrQi64:
-;CHECK: vorr
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = or <2 x i64> %tmp1, %tmp2
- ret <2 x i64> %tmp3
-}
-
-define <8 x i8> @v_orni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: v_orni8:
-;CHECK: vorn
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = xor <8 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- %tmp4 = or <8 x i8> %tmp1, %tmp3
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @v_orni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: v_orni16:
-;CHECK: vorn
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = xor <4 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1 >
- %tmp4 = or <4 x i16> %tmp1, %tmp3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @v_orni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: v_orni32:
-;CHECK: vorn
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = xor <2 x i32> %tmp2, < i32 -1, i32 -1 >
- %tmp4 = or <2 x i32> %tmp1, %tmp3
- ret <2 x i32> %tmp4
-}
-
-define <1 x i64> @v_orni64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: v_orni64:
-;CHECK: vorn
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = xor <1 x i64> %tmp2, < i64 -1 >
- %tmp4 = or <1 x i64> %tmp1, %tmp3
- ret <1 x i64> %tmp4
-}
-
-define <16 x i8> @v_ornQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: v_ornQi8:
-;CHECK: vorn
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = xor <16 x i8> %tmp2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- %tmp4 = or <16 x i8> %tmp1, %tmp3
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @v_ornQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: v_ornQi16:
-;CHECK: vorn
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = xor <8 x i16> %tmp2, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
- %tmp4 = or <8 x i16> %tmp1, %tmp3
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @v_ornQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: v_ornQi32:
-;CHECK: vorn
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = xor <4 x i32> %tmp2, < i32 -1, i32 -1, i32 -1, i32 -1 >
- %tmp4 = or <4 x i32> %tmp1, %tmp3
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @v_ornQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: v_ornQi64:
-;CHECK: vorn
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = xor <2 x i64> %tmp2, < i64 -1, i64 -1 >
- %tmp4 = or <2 x i64> %tmp1, %tmp3
- ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @vtsti8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vtsti8:
-;CHECK: vtst.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = and <8 x i8> %tmp1, %tmp2
- %tmp4 = icmp ne <8 x i8> %tmp3, zeroinitializer
- %tmp5 = sext <8 x i1> %tmp4 to <8 x i8>
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vtsti16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vtsti16:
-;CHECK: vtst.16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = and <4 x i16> %tmp1, %tmp2
- %tmp4 = icmp ne <4 x i16> %tmp3, zeroinitializer
- %tmp5 = sext <4 x i1> %tmp4 to <4 x i16>
- ret <4 x i16> %tmp5
-}
-
-define <2 x i32> @vtsti32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vtsti32:
-;CHECK: vtst.32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = and <2 x i32> %tmp1, %tmp2
- %tmp4 = icmp ne <2 x i32> %tmp3, zeroinitializer
- %tmp5 = sext <2 x i1> %tmp4 to <2 x i32>
- ret <2 x i32> %tmp5
-}
-
-define <16 x i8> @vtstQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vtstQi8:
-;CHECK: vtst.8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = and <16 x i8> %tmp1, %tmp2
- %tmp4 = icmp ne <16 x i8> %tmp3, zeroinitializer
- %tmp5 = sext <16 x i1> %tmp4 to <16 x i8>
- ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vtstQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vtstQi16:
-;CHECK: vtst.16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = and <8 x i16> %tmp1, %tmp2
- %tmp4 = icmp ne <8 x i16> %tmp3, zeroinitializer
- %tmp5 = sext <8 x i1> %tmp4 to <8 x i16>
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vtstQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vtstQi32:
-;CHECK: vtst.32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = and <4 x i32> %tmp1, %tmp2
- %tmp4 = icmp ne <4 x i32> %tmp3, zeroinitializer
- %tmp5 = sext <4 x i1> %tmp4 to <4 x i32>
- ret <4 x i32> %tmp5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vbsl.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vbsl.ll
deleted file mode 100644
index 9f3bb4e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vbsl.ll
+++ /dev/null
@@ -1,105 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @v_bsli8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: v_bsli8:
-;CHECK: vbsl
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = and <8 x i8> %tmp1, %tmp2
- %tmp5 = xor <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- %tmp6 = and <8 x i8> %tmp5, %tmp3
- %tmp7 = or <8 x i8> %tmp4, %tmp6
- ret <8 x i8> %tmp7
-}
-
-define <4 x i16> @v_bsli16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: v_bsli16:
-;CHECK: vbsl
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = and <4 x i16> %tmp1, %tmp2
- %tmp5 = xor <4 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1 >
- %tmp6 = and <4 x i16> %tmp5, %tmp3
- %tmp7 = or <4 x i16> %tmp4, %tmp6
- ret <4 x i16> %tmp7
-}
-
-define <2 x i32> @v_bsli32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: v_bsli32:
-;CHECK: vbsl
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = and <2 x i32> %tmp1, %tmp2
- %tmp5 = xor <2 x i32> %tmp1, < i32 -1, i32 -1 >
- %tmp6 = and <2 x i32> %tmp5, %tmp3
- %tmp7 = or <2 x i32> %tmp4, %tmp6
- ret <2 x i32> %tmp7
-}
-
-define <1 x i64> @v_bsli64(<1 x i64>* %A, <1 x i64>* %B, <1 x i64>* %C) nounwind {
-;CHECK: v_bsli64:
-;CHECK: vbsl
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = load <1 x i64>* %C
- %tmp4 = and <1 x i64> %tmp1, %tmp2
- %tmp5 = xor <1 x i64> %tmp1, < i64 -1 >
- %tmp6 = and <1 x i64> %tmp5, %tmp3
- %tmp7 = or <1 x i64> %tmp4, %tmp6
- ret <1 x i64> %tmp7
-}
-
-define <16 x i8> @v_bslQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
-;CHECK: v_bslQi8:
-;CHECK: vbsl
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = load <16 x i8>* %C
- %tmp4 = and <16 x i8> %tmp1, %tmp2
- %tmp5 = xor <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 >
- %tmp6 = and <16 x i8> %tmp5, %tmp3
- %tmp7 = or <16 x i8> %tmp4, %tmp6
- ret <16 x i8> %tmp7
-}
-
-define <8 x i16> @v_bslQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: v_bslQi16:
-;CHECK: vbsl
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = load <8 x i16>* %C
- %tmp4 = and <8 x i16> %tmp1, %tmp2
- %tmp5 = xor <8 x i16> %tmp1, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >
- %tmp6 = and <8 x i16> %tmp5, %tmp3
- %tmp7 = or <8 x i16> %tmp4, %tmp6
- ret <8 x i16> %tmp7
-}
-
-define <4 x i32> @v_bslQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: v_bslQi32:
-;CHECK: vbsl
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = load <4 x i32>* %C
- %tmp4 = and <4 x i32> %tmp1, %tmp2
- %tmp5 = xor <4 x i32> %tmp1, < i32 -1, i32 -1, i32 -1, i32 -1 >
- %tmp6 = and <4 x i32> %tmp5, %tmp3
- %tmp7 = or <4 x i32> %tmp4, %tmp6
- ret <4 x i32> %tmp7
-}
-
-define <2 x i64> @v_bslQi64(<2 x i64>* %A, <2 x i64>* %B, <2 x i64>* %C) nounwind {
-;CHECK: v_bslQi64:
-;CHECK: vbsl
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = load <2 x i64>* %C
- %tmp4 = and <2 x i64> %tmp1, %tmp2
- %tmp5 = xor <2 x i64> %tmp1, < i64 -1, i64 -1 >
- %tmp6 = and <2 x i64> %tmp5, %tmp3
- %tmp7 = or <2 x i64> %tmp4, %tmp6
- ret <2 x i64> %tmp7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vceq.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vceq.ll
deleted file mode 100644
index e478751..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vceq.ll
+++ /dev/null
@@ -1,81 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vceqi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vceqi8:
-;CHECK: vceq.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = icmp eq <8 x i8> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vceqi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vceqi16:
-;CHECK: vceq.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = icmp eq <4 x i16> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vceqi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vceqi32:
-;CHECK: vceq.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = icmp eq <2 x i32> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-define <2 x i32> @vceqf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vceqf32:
-;CHECK: vceq.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp oeq <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-define <16 x i8> @vceqQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vceqQi8:
-;CHECK: vceq.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = icmp eq <16 x i8> %tmp1, %tmp2
- %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vceqQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vceqQi16:
-;CHECK: vceq.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = icmp eq <8 x i16> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vceqQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vceqQi32:
-;CHECK: vceq.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = icmp eq <4 x i32> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
-
-define <4 x i32> @vceqQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vceqQf32:
-;CHECK: vceq.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = fcmp oeq <4 x float> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vcge.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vcge.ll
deleted file mode 100644
index 2c16111..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vcge.ll
+++ /dev/null
@@ -1,162 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vcges8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcges8:
-;CHECK: vcge.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = icmp sge <8 x i8> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vcges16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcges16:
-;CHECK: vcge.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = icmp sge <4 x i16> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vcges32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcges32:
-;CHECK: vcge.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = icmp sge <2 x i32> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-define <8 x i8> @vcgeu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcgeu8:
-;CHECK: vcge.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = icmp uge <8 x i8> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vcgeu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcgeu16:
-;CHECK: vcge.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = icmp uge <4 x i16> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vcgeu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcgeu32:
-;CHECK: vcge.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = icmp uge <2 x i32> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-define <2 x i32> @vcgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcgef32:
-;CHECK: vcge.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp oge <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-define <16 x i8> @vcgeQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcgeQs8:
-;CHECK: vcge.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = icmp sge <16 x i8> %tmp1, %tmp2
- %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vcgeQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcgeQs16:
-;CHECK: vcge.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = icmp sge <8 x i16> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vcgeQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcgeQs32:
-;CHECK: vcge.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = icmp sge <4 x i32> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
-
-define <16 x i8> @vcgeQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcgeQu8:
-;CHECK: vcge.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = icmp uge <16 x i8> %tmp1, %tmp2
- %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vcgeQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcgeQu16:
-;CHECK: vcge.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = icmp uge <8 x i16> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vcgeQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcgeQu32:
-;CHECK: vcge.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = icmp uge <4 x i32> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
-
-define <4 x i32> @vcgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vcgeQf32:
-;CHECK: vcge.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = fcmp oge <4 x float> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
-
-define <2 x i32> @vacgef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vacgef32:
-;CHECK: vacge.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vacged(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <4 x i32> @vacgeQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vacgeQf32:
-;CHECK: vacge.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vacgeq(<4 x float> %tmp1, <4 x float> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-declare <2 x i32> @llvm.arm.neon.vacged(<2 x float>, <2 x float>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vacgeq(<4 x float>, <4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vcgt.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vcgt.ll
deleted file mode 100644
index 6b11ba5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vcgt.ll
+++ /dev/null
@@ -1,162 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vcgts8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcgts8:
-;CHECK: vcgt.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = icmp sgt <8 x i8> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vcgts16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcgts16:
-;CHECK: vcgt.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = icmp sgt <4 x i16> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vcgts32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcgts32:
-;CHECK: vcgt.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = icmp sgt <2 x i32> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-define <8 x i8> @vcgtu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcgtu8:
-;CHECK: vcgt.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = icmp ugt <8 x i8> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vcgtu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcgtu16:
-;CHECK: vcgt.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = icmp ugt <4 x i16> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vcgtu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcgtu32:
-;CHECK: vcgt.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = icmp ugt <2 x i32> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-define <2 x i32> @vcgtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcgtf32:
-;CHECK: vcgt.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp ogt <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-define <16 x i8> @vcgtQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcgtQs8:
-;CHECK: vcgt.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = icmp sgt <16 x i8> %tmp1, %tmp2
- %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vcgtQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcgtQs16:
-;CHECK: vcgt.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = icmp sgt <8 x i16> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vcgtQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcgtQs32:
-;CHECK: vcgt.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = icmp sgt <4 x i32> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
-
-define <16 x i8> @vcgtQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcgtQu8:
-;CHECK: vcgt.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = icmp ugt <16 x i8> %tmp1, %tmp2
- %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vcgtQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcgtQu16:
-;CHECK: vcgt.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = icmp ugt <8 x i16> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vcgtQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcgtQu32:
-;CHECK: vcgt.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = icmp ugt <4 x i32> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
-
-define <4 x i32> @vcgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vcgtQf32:
-;CHECK: vcgt.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = fcmp ogt <4 x float> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
-
-define <2 x i32> @vacgtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vacgtf32:
-;CHECK: vacgt.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vacgtd(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <4 x i32> @vacgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vacgtQf32:
-;CHECK: vacgt.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vacgtq(<4 x float> %tmp1, <4 x float> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-declare <2 x i32> @llvm.arm.neon.vacgtd(<2 x float>, <2 x float>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vacgtq(<4 x float>, <4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vcnt.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vcnt.ll
deleted file mode 100644
index 450f90d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vcnt.ll
+++ /dev/null
@@ -1,132 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
-;CHECK: vcnt8:
-;CHECK: vcnt.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
-;CHECK: vcntQ8:
-;CHECK: vcnt.8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vcnt.v16i8(<16 x i8> %tmp1)
- ret <16 x i8> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8>) nounwind readnone
-declare <16 x i8> @llvm.arm.neon.vcnt.v16i8(<16 x i8>) nounwind readnone
-
-define <8 x i8> @vclz8(<8 x i8>* %A) nounwind {
-;CHECK: vclz8:
-;CHECK: vclz.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vclz.v8i8(<8 x i8> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
-;CHECK: vclz16:
-;CHECK: vclz.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vclz32(<2 x i32>* %A) nounwind {
-;CHECK: vclz32:
-;CHECK: vclz.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vclz.v2i32(<2 x i32> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <16 x i8> @vclzQ8(<16 x i8>* %A) nounwind {
-;CHECK: vclzQ8:
-;CHECK: vclz.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vclz.v16i8(<16 x i8> %tmp1)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind {
-;CHECK: vclzQ16:
-;CHECK: vclz.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vclz.v8i16(<8 x i16> %tmp1)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vclzQ32(<4 x i32>* %A) nounwind {
-;CHECK: vclzQ32:
-;CHECK: vclz.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vclz.v4i32(<4 x i32> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vclz.v8i8(<8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vclz.v2i32(<2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vclz.v16i8(<16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vclz.v8i16(<8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vclz.v4i32(<4 x i32>) nounwind readnone
-
-define <8 x i8> @vclss8(<8 x i8>* %A) nounwind {
-;CHECK: vclss8:
-;CHECK: vcls.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vclss16(<4 x i16>* %A) nounwind {
-;CHECK: vclss16:
-;CHECK: vcls.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vclss32(<2 x i32>* %A) nounwind {
-;CHECK: vclss32:
-;CHECK: vcls.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <16 x i8> @vclsQs8(<16 x i8>* %A) nounwind {
-;CHECK: vclsQs8:
-;CHECK: vcls.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vclsQs16(<8 x i16>* %A) nounwind {
-;CHECK: vclsQs16:
-;CHECK: vcls.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vclsQs32(<4 x i32>* %A) nounwind {
-;CHECK: vclsQs32:
-;CHECK: vcls.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vcombine.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vcombine.ll
deleted file mode 100644
index e673305..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vcombine.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon
-
-define <16 x i8> @vcombine8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vcombine16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vcombine32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x i32> %tmp3
-}
-
-define <4 x float> @vcombinefloat(<2 x float>* %A, <2 x float>* %B) nounwind {
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- ret <4 x float> %tmp3
-}
-
-define <2 x i64> @vcombine64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = shufflevector <1 x i64> %tmp1, <1 x i64> %tmp2, <2 x i32> <i32 0, i32 1>
- ret <2 x i64> %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vcvt.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vcvt.ll
deleted file mode 100644
index f4cc536..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vcvt.ll
+++ /dev/null
@@ -1,140 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <2 x i32> @vcvt_f32tos32(<2 x float>* %A) nounwind {
-;CHECK: vcvt_f32tos32:
-;CHECK: vcvt.s32.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = fptosi <2 x float> %tmp1 to <2 x i32>
- ret <2 x i32> %tmp2
-}
-
-define <2 x i32> @vcvt_f32tou32(<2 x float>* %A) nounwind {
-;CHECK: vcvt_f32tou32:
-;CHECK: vcvt.u32.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = fptoui <2 x float> %tmp1 to <2 x i32>
- ret <2 x i32> %tmp2
-}
-
-define <2 x float> @vcvt_s32tof32(<2 x i32>* %A) nounwind {
-;CHECK: vcvt_s32tof32:
-;CHECK: vcvt.f32.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = sitofp <2 x i32> %tmp1 to <2 x float>
- ret <2 x float> %tmp2
-}
-
-define <2 x float> @vcvt_u32tof32(<2 x i32>* %A) nounwind {
-;CHECK: vcvt_u32tof32:
-;CHECK: vcvt.f32.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = uitofp <2 x i32> %tmp1 to <2 x float>
- ret <2 x float> %tmp2
-}
-
-define <4 x i32> @vcvtQ_f32tos32(<4 x float>* %A) nounwind {
-;CHECK: vcvtQ_f32tos32:
-;CHECK: vcvt.s32.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = fptosi <4 x float> %tmp1 to <4 x i32>
- ret <4 x i32> %tmp2
-}
-
-define <4 x i32> @vcvtQ_f32tou32(<4 x float>* %A) nounwind {
-;CHECK: vcvtQ_f32tou32:
-;CHECK: vcvt.u32.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = fptoui <4 x float> %tmp1 to <4 x i32>
- ret <4 x i32> %tmp2
-}
-
-define <4 x float> @vcvtQ_s32tof32(<4 x i32>* %A) nounwind {
-;CHECK: vcvtQ_s32tof32:
-;CHECK: vcvt.f32.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = sitofp <4 x i32> %tmp1 to <4 x float>
- ret <4 x float> %tmp2
-}
-
-define <4 x float> @vcvtQ_u32tof32(<4 x i32>* %A) nounwind {
-;CHECK: vcvtQ_u32tof32:
-;CHECK: vcvt.f32.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = uitofp <4 x i32> %tmp1 to <4 x float>
- ret <4 x float> %tmp2
-}
-
-define <2 x i32> @vcvt_n_f32tos32(<2 x float>* %A) nounwind {
-;CHECK: vcvt_n_f32tos32:
-;CHECK: vcvt.s32.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float> %tmp1, i32 1)
- ret <2 x i32> %tmp2
-}
-
-define <2 x i32> @vcvt_n_f32tou32(<2 x float>* %A) nounwind {
-;CHECK: vcvt_n_f32tou32:
-;CHECK: vcvt.u32.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float> %tmp1, i32 1)
- ret <2 x i32> %tmp2
-}
-
-define <2 x float> @vcvt_n_s32tof32(<2 x i32>* %A) nounwind {
-;CHECK: vcvt_n_s32tof32:
-;CHECK: vcvt.f32.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32> %tmp1, i32 1)
- ret <2 x float> %tmp2
-}
-
-define <2 x float> @vcvt_n_u32tof32(<2 x i32>* %A) nounwind {
-;CHECK: vcvt_n_u32tof32:
-;CHECK: vcvt.f32.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32> %tmp1, i32 1)
- ret <2 x float> %tmp2
-}
-
-declare <2 x i32> @llvm.arm.neon.vcvtfp2fxs.v2i32.v2f32(<2 x float>, i32) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vcvtfp2fxu.v2i32.v2f32(<2 x float>, i32) nounwind readnone
-declare <2 x float> @llvm.arm.neon.vcvtfxs2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
-declare <2 x float> @llvm.arm.neon.vcvtfxu2fp.v2f32.v2i32(<2 x i32>, i32) nounwind readnone
-
-define <4 x i32> @vcvtQ_n_f32tos32(<4 x float>* %A) nounwind {
-;CHECK: vcvtQ_n_f32tos32:
-;CHECK: vcvt.s32.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float> %tmp1, i32 1)
- ret <4 x i32> %tmp2
-}
-
-define <4 x i32> @vcvtQ_n_f32tou32(<4 x float>* %A) nounwind {
-;CHECK: vcvtQ_n_f32tou32:
-;CHECK: vcvt.u32.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float> %tmp1, i32 1)
- ret <4 x i32> %tmp2
-}
-
-define <4 x float> @vcvtQ_n_s32tof32(<4 x i32>* %A) nounwind {
-;CHECK: vcvtQ_n_s32tof32:
-;CHECK: vcvt.f32.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1)
- ret <4 x float> %tmp2
-}
-
-define <4 x float> @vcvtQ_n_u32tof32(<4 x i32>* %A) nounwind {
-;CHECK: vcvtQ_n_u32tof32:
-;CHECK: vcvt.f32.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32> %tmp1, i32 1)
- ret <4 x float> %tmp2
-}
-
-declare <4 x i32> @llvm.arm.neon.vcvtfp2fxs.v4i32.v4f32(<4 x float>, i32) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vcvtfp2fxu.v4i32.v4f32(<4 x float>, i32) nounwind readnone
-declare <4 x float> @llvm.arm.neon.vcvtfxs2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
-declare <4 x float> @llvm.arm.neon.vcvtfxu2fp.v4f32.v4i32(<4 x i32>, i32) nounwind readnone
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vdup.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vdup.ll
deleted file mode 100644
index c9a68ca..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vdup.ll
+++ /dev/null
@@ -1,269 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @v_dup8(i8 %A) nounwind {
-;CHECK: v_dup8:
-;CHECK: vdup.8
- %tmp1 = insertelement <8 x i8> zeroinitializer, i8 %A, i32 0
- %tmp2 = insertelement <8 x i8> %tmp1, i8 %A, i32 1
- %tmp3 = insertelement <8 x i8> %tmp2, i8 %A, i32 2
- %tmp4 = insertelement <8 x i8> %tmp3, i8 %A, i32 3
- %tmp5 = insertelement <8 x i8> %tmp4, i8 %A, i32 4
- %tmp6 = insertelement <8 x i8> %tmp5, i8 %A, i32 5
- %tmp7 = insertelement <8 x i8> %tmp6, i8 %A, i32 6
- %tmp8 = insertelement <8 x i8> %tmp7, i8 %A, i32 7
- ret <8 x i8> %tmp8
-}
-
-define <4 x i16> @v_dup16(i16 %A) nounwind {
-;CHECK: v_dup16:
-;CHECK: vdup.16
- %tmp1 = insertelement <4 x i16> zeroinitializer, i16 %A, i32 0
- %tmp2 = insertelement <4 x i16> %tmp1, i16 %A, i32 1
- %tmp3 = insertelement <4 x i16> %tmp2, i16 %A, i32 2
- %tmp4 = insertelement <4 x i16> %tmp3, i16 %A, i32 3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @v_dup32(i32 %A) nounwind {
-;CHECK: v_dup32:
-;CHECK: vdup.32
- %tmp1 = insertelement <2 x i32> zeroinitializer, i32 %A, i32 0
- %tmp2 = insertelement <2 x i32> %tmp1, i32 %A, i32 1
- ret <2 x i32> %tmp2
-}
-
-define <2 x float> @v_dupfloat(float %A) nounwind {
-;CHECK: v_dupfloat:
-;CHECK: vdup.32
- %tmp1 = insertelement <2 x float> zeroinitializer, float %A, i32 0
- %tmp2 = insertelement <2 x float> %tmp1, float %A, i32 1
- ret <2 x float> %tmp2
-}
-
-define <16 x i8> @v_dupQ8(i8 %A) nounwind {
-;CHECK: v_dupQ8:
-;CHECK: vdup.8
- %tmp1 = insertelement <16 x i8> zeroinitializer, i8 %A, i32 0
- %tmp2 = insertelement <16 x i8> %tmp1, i8 %A, i32 1
- %tmp3 = insertelement <16 x i8> %tmp2, i8 %A, i32 2
- %tmp4 = insertelement <16 x i8> %tmp3, i8 %A, i32 3
- %tmp5 = insertelement <16 x i8> %tmp4, i8 %A, i32 4
- %tmp6 = insertelement <16 x i8> %tmp5, i8 %A, i32 5
- %tmp7 = insertelement <16 x i8> %tmp6, i8 %A, i32 6
- %tmp8 = insertelement <16 x i8> %tmp7, i8 %A, i32 7
- %tmp9 = insertelement <16 x i8> %tmp8, i8 %A, i32 8
- %tmp10 = insertelement <16 x i8> %tmp9, i8 %A, i32 9
- %tmp11 = insertelement <16 x i8> %tmp10, i8 %A, i32 10
- %tmp12 = insertelement <16 x i8> %tmp11, i8 %A, i32 11
- %tmp13 = insertelement <16 x i8> %tmp12, i8 %A, i32 12
- %tmp14 = insertelement <16 x i8> %tmp13, i8 %A, i32 13
- %tmp15 = insertelement <16 x i8> %tmp14, i8 %A, i32 14
- %tmp16 = insertelement <16 x i8> %tmp15, i8 %A, i32 15
- ret <16 x i8> %tmp16
-}
-
-define <8 x i16> @v_dupQ16(i16 %A) nounwind {
-;CHECK: v_dupQ16:
-;CHECK: vdup.16
- %tmp1 = insertelement <8 x i16> zeroinitializer, i16 %A, i32 0
- %tmp2 = insertelement <8 x i16> %tmp1, i16 %A, i32 1
- %tmp3 = insertelement <8 x i16> %tmp2, i16 %A, i32 2
- %tmp4 = insertelement <8 x i16> %tmp3, i16 %A, i32 3
- %tmp5 = insertelement <8 x i16> %tmp4, i16 %A, i32 4
- %tmp6 = insertelement <8 x i16> %tmp5, i16 %A, i32 5
- %tmp7 = insertelement <8 x i16> %tmp6, i16 %A, i32 6
- %tmp8 = insertelement <8 x i16> %tmp7, i16 %A, i32 7
- ret <8 x i16> %tmp8
-}
-
-define <4 x i32> @v_dupQ32(i32 %A) nounwind {
-;CHECK: v_dupQ32:
-;CHECK: vdup.32
- %tmp1 = insertelement <4 x i32> zeroinitializer, i32 %A, i32 0
- %tmp2 = insertelement <4 x i32> %tmp1, i32 %A, i32 1
- %tmp3 = insertelement <4 x i32> %tmp2, i32 %A, i32 2
- %tmp4 = insertelement <4 x i32> %tmp3, i32 %A, i32 3
- ret <4 x i32> %tmp4
-}
-
-define <4 x float> @v_dupQfloat(float %A) nounwind {
-;CHECK: v_dupQfloat:
-;CHECK: vdup.32
- %tmp1 = insertelement <4 x float> zeroinitializer, float %A, i32 0
- %tmp2 = insertelement <4 x float> %tmp1, float %A, i32 1
- %tmp3 = insertelement <4 x float> %tmp2, float %A, i32 2
- %tmp4 = insertelement <4 x float> %tmp3, float %A, i32 3
- ret <4 x float> %tmp4
-}
-
-; Check to make sure it works with shuffles, too.
-
-define <8 x i8> @v_shuffledup8(i8 %A) nounwind {
-;CHECK: v_shuffledup8:
-;CHECK: vdup.8
- %tmp1 = insertelement <8 x i8> undef, i8 %A, i32 0
- %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @v_shuffledup16(i16 %A) nounwind {
-;CHECK: v_shuffledup16:
-;CHECK: vdup.16
- %tmp1 = insertelement <4 x i16> undef, i16 %A, i32 0
- %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @v_shuffledup32(i32 %A) nounwind {
-;CHECK: v_shuffledup32:
-;CHECK: vdup.32
- %tmp1 = insertelement <2 x i32> undef, i32 %A, i32 0
- %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
- ret <2 x i32> %tmp2
-}
-
-define <2 x float> @v_shuffledupfloat(float %A) nounwind {
-;CHECK: v_shuffledupfloat:
-;CHECK: vdup.32
- %tmp1 = insertelement <2 x float> undef, float %A, i32 0
- %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
- ret <2 x float> %tmp2
-}
-
-define <16 x i8> @v_shuffledupQ8(i8 %A) nounwind {
-;CHECK: v_shuffledupQ8:
-;CHECK: vdup.8
- %tmp1 = insertelement <16 x i8> undef, i8 %A, i32 0
- %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> zeroinitializer
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @v_shuffledupQ16(i16 %A) nounwind {
-;CHECK: v_shuffledupQ16:
-;CHECK: vdup.16
- %tmp1 = insertelement <8 x i16> undef, i16 %A, i32 0
- %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> zeroinitializer
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @v_shuffledupQ32(i32 %A) nounwind {
-;CHECK: v_shuffledupQ32:
-;CHECK: vdup.32
- %tmp1 = insertelement <4 x i32> undef, i32 %A, i32 0
- %tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> zeroinitializer
- ret <4 x i32> %tmp2
-}
-
-define <4 x float> @v_shuffledupQfloat(float %A) nounwind {
-;CHECK: v_shuffledupQfloat:
-;CHECK: vdup.32
- %tmp1 = insertelement <4 x float> undef, float %A, i32 0
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
- ret <4 x float> %tmp2
-}
-
-define <2 x float> @v_shuffledupfloat2(float* %A) nounwind {
-;CHECK: v_shuffledupfloat2:
-;CHECK: vdup.32
- %tmp0 = load float* %A
- %tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0
- %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
- ret <2 x float> %tmp2
-}
-
-define <4 x float> @v_shuffledupQfloat2(float* %A) nounwind {
-;CHECK: v_shuffledupQfloat2:
-;CHECK: vdup.32
- %tmp0 = load float* %A
- %tmp1 = insertelement <4 x float> undef, float %tmp0, i32 0
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
- ret <4 x float> %tmp2
-}
-
-define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind {
-;CHECK: vduplane8:
-;CHECK: vdup.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind {
-;CHECK: vduplane16:
-;CHECK: vdup.16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind {
-;CHECK: vduplane32:
-;CHECK: vdup.32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
- ret <2 x i32> %tmp2
-}
-
-define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind {
-;CHECK: vduplanefloat:
-;CHECK: vdup.32
- %tmp1 = load <2 x float>* %A
- %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> < i32 1, i32 1 >
- ret <2 x float> %tmp2
-}
-
-define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind {
-;CHECK: vduplaneQ8:
-;CHECK: vdup.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <16 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind {
-;CHECK: vduplaneQ16:
-;CHECK: vdup.16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind {
-;CHECK: vduplaneQ32:
-;CHECK: vdup.32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
- ret <4 x i32> %tmp2
-}
-
-define <4 x float> @vduplaneQfloat(<2 x float>* %A) nounwind {
-;CHECK: vduplaneQfloat:
-;CHECK: vdup.32
- %tmp1 = load <2 x float>* %A
- %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
- ret <4 x float> %tmp2
-}
-
-define arm_apcscc <2 x i64> @foo(<2 x i64> %arg0_int64x1_t) nounwind readnone {
-entry:
- %0 = shufflevector <2 x i64> %arg0_int64x1_t, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
- ret <2 x i64> %0
-}
-
-define arm_apcscc <2 x i64> @bar(<2 x i64> %arg0_int64x1_t) nounwind readnone {
-entry:
- %0 = shufflevector <2 x i64> %arg0_int64x1_t, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
- ret <2 x i64> %0
-}
-
-define arm_apcscc <2 x double> @baz(<2 x double> %arg0_int64x1_t) nounwind readnone {
-entry:
- %0 = shufflevector <2 x double> %arg0_int64x1_t, <2 x double> undef, <2 x i32> <i32 1, i32 1>
- ret <2 x double> %0
-}
-
-define arm_apcscc <2 x double> @qux(<2 x double> %arg0_int64x1_t) nounwind readnone {
-entry:
- %0 = shufflevector <2 x double> %arg0_int64x1_t, <2 x double> undef, <2 x i32> <i32 0, i32 0>
- ret <2 x double> %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vext.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vext.ll
deleted file mode 100644
index 20d953b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vext.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define arm_apcscc <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: test_vextd:
-;CHECK: vext
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
- ret <8 x i8> %tmp3
-}
-
-define arm_apcscc <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: test_vextRd:
-;CHECK: vext
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
- ret <8 x i8> %tmp3
-}
-
-define arm_apcscc <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: test_vextq:
-;CHECK: vext
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
- ret <16 x i8> %tmp3
-}
-
-define arm_apcscc <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: test_vextRq:
-;CHECK: vext
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
- ret <16 x i8> %tmp3
-}
-
-define arm_apcscc <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: test_vextd16:
-;CHECK: vext
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
- ret <4 x i16> %tmp3
-}
-
-define arm_apcscc <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: test_vextq32:
-;CHECK: vext
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
- ret <4 x i32> %tmp3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vfcmp.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vfcmp.ll
deleted file mode 100644
index 6946d02..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vfcmp.ll
+++ /dev/null
@@ -1,139 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-; This tests fcmp operations that do not map directly to NEON instructions.
-
-; une is implemented with VCEQ/VMVN
-define <2 x i32> @vcunef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcunef32:
-;CHECK: vceq.f32
-;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp une <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; olt is implemented with VCGT
-define <2 x i32> @vcoltf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcoltf32:
-;CHECK: vcgt.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp olt <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; ole is implemented with VCGE
-define <2 x i32> @vcolef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcolef32:
-;CHECK: vcge.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp ole <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; uge is implemented with VCGT/VMVN
-define <2 x i32> @vcugef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcugef32:
-;CHECK: vcgt.f32
-;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp uge <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; ule is implemented with VCGT/VMVN
-define <2 x i32> @vculef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vculef32:
-;CHECK: vcgt.f32
-;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp ule <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; ugt is implemented with VCGE/VMVN
-define <2 x i32> @vcugtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcugtf32:
-;CHECK: vcge.f32
-;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp ugt <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; ult is implemented with VCGE/VMVN
-define <2 x i32> @vcultf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcultf32:
-;CHECK: vcge.f32
-;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp ult <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; ueq is implemented with VCGT/VCGT/VORR/VMVN
-define <2 x i32> @vcueqf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcueqf32:
-;CHECK: vcgt.f32
-;CHECK-NEXT: vcgt.f32
-;CHECK-NEXT: vorr
-;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp ueq <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; one is implemented with VCGT/VCGT/VORR
-define <2 x i32> @vconef32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vconef32:
-;CHECK: vcgt.f32
-;CHECK-NEXT: vcgt.f32
-;CHECK-NEXT: vorr
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp one <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; uno is implemented with VCGT/VCGE/VORR/VMVN
-define <2 x i32> @vcunof32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcunof32:
-;CHECK: vcge.f32
-;CHECK-NEXT: vcgt.f32
-;CHECK-NEXT: vorr
-;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp uno <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-; ord is implemented with VCGT/VCGE/VORR
-define <2 x i32> @vcordf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vcordf32:
-;CHECK: vcge.f32
-;CHECK-NEXT: vcgt.f32
-;CHECK-NEXT: vorr
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = fcmp ord <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vfp.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vfp.ll
deleted file mode 100644
index 44a44af..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vfp.ll
+++ /dev/null
@@ -1,155 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+vfp2 | FileCheck %s
-
-define void @test(float* %P, double* %D) {
- %A = load float* %P ; <float> [#uses=1]
- %B = load double* %D ; <double> [#uses=1]
- store float %A, float* %P
- store double %B, double* %D
- ret void
-}
-
-declare float @fabsf(float)
-
-declare double @fabs(double)
-
-define void @test_abs(float* %P, double* %D) {
-;CHECK: test_abs:
- %a = load float* %P ; <float> [#uses=1]
-;CHECK: vabs.f32
- %b = call float @fabsf( float %a ) ; <float> [#uses=1]
- store float %b, float* %P
- %A = load double* %D ; <double> [#uses=1]
-;CHECK: vabs.f64
- %B = call double @fabs( double %A ) ; <double> [#uses=1]
- store double %B, double* %D
- ret void
-}
-
-define void @test_add(float* %P, double* %D) {
-;CHECK: test_add:
- %a = load float* %P ; <float> [#uses=2]
- %b = fadd float %a, %a ; <float> [#uses=1]
- store float %b, float* %P
- %A = load double* %D ; <double> [#uses=2]
- %B = fadd double %A, %A ; <double> [#uses=1]
- store double %B, double* %D
- ret void
-}
-
-define void @test_ext_round(float* %P, double* %D) {
-;CHECK: test_ext_round:
- %a = load float* %P ; <float> [#uses=1]
-;CHECK: vcvt.f64.f32
- %b = fpext float %a to double ; <double> [#uses=1]
- %A = load double* %D ; <double> [#uses=1]
-;CHECK: vcvt.f32.f64
- %B = fptrunc double %A to float ; <float> [#uses=1]
- store double %b, double* %D
- store float %B, float* %P
- ret void
-}
-
-define void @test_fma(float* %P1, float* %P2, float* %P3) {
-;CHECK: test_fma:
- %a1 = load float* %P1 ; <float> [#uses=1]
- %a2 = load float* %P2 ; <float> [#uses=1]
- %a3 = load float* %P3 ; <float> [#uses=1]
-;CHECK: vnmls.f32
- %X = fmul float %a1, %a2 ; <float> [#uses=1]
- %Y = fsub float %X, %a3 ; <float> [#uses=1]
- store float %Y, float* %P1
- ret void
-}
-
-define i32 @test_ftoi(float* %P1) {
-;CHECK: test_ftoi:
- %a1 = load float* %P1 ; <float> [#uses=1]
-;CHECK: vcvt.s32.f32
- %b1 = fptosi float %a1 to i32 ; <i32> [#uses=1]
- ret i32 %b1
-}
-
-define i32 @test_ftou(float* %P1) {
-;CHECK: test_ftou:
- %a1 = load float* %P1 ; <float> [#uses=1]
-;CHECK: vcvt.u32.f32
- %b1 = fptoui float %a1 to i32 ; <i32> [#uses=1]
- ret i32 %b1
-}
-
-define i32 @test_dtoi(double* %P1) {
-;CHECK: test_dtoi:
- %a1 = load double* %P1 ; <double> [#uses=1]
-;CHECK: vcvt.s32.f64
- %b1 = fptosi double %a1 to i32 ; <i32> [#uses=1]
- ret i32 %b1
-}
-
-define i32 @test_dtou(double* %P1) {
-;CHECK: test_dtou:
- %a1 = load double* %P1 ; <double> [#uses=1]
-;CHECK: vcvt.u32.f64
- %b1 = fptoui double %a1 to i32 ; <i32> [#uses=1]
- ret i32 %b1
-}
-
-define void @test_utod(double* %P1, i32 %X) {
-;CHECK: test_utod:
-;CHECK: vcvt.f64.u32
- %b1 = uitofp i32 %X to double ; <double> [#uses=1]
- store double %b1, double* %P1
- ret void
-}
-
-define void @test_utod2(double* %P1, i8 %X) {
-;CHECK: test_utod2:
-;CHECK: vcvt.f64.u32
- %b1 = uitofp i8 %X to double ; <double> [#uses=1]
- store double %b1, double* %P1
- ret void
-}
-
-define void @test_cmp(float* %glob, i32 %X) {
-;CHECK: test_cmp:
-entry:
- %tmp = load float* %glob ; <float> [#uses=2]
- %tmp3 = getelementptr float* %glob, i32 2 ; <float*> [#uses=1]
- %tmp4 = load float* %tmp3 ; <float> [#uses=2]
- %tmp.upgrd.1 = fcmp oeq float %tmp, %tmp4 ; <i1> [#uses=1]
- %tmp5 = fcmp uno float %tmp, %tmp4 ; <i1> [#uses=1]
- %tmp6 = or i1 %tmp.upgrd.1, %tmp5 ; <i1> [#uses=1]
-;CHECK: bmi
-;CHECK-NEXT: bgt
- br i1 %tmp6, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- %tmp.upgrd.2 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- ret void
-
-cond_false: ; preds = %entry
- %tmp7 = tail call i32 (...)* @baz( ) ; <i32> [#uses=0]
- ret void
-}
-
-declare i1 @llvm.isunordered.f32(float, float)
-
-declare i32 @bar(...)
-
-declare i32 @baz(...)
-
-define void @test_cmpfp0(float* %glob, i32 %X) {
-;CHECK: test_cmpfp0:
-entry:
- %tmp = load float* %glob ; <float> [#uses=1]
-;CHECK: vcmpe.f32
- %tmp.upgrd.3 = fcmp ogt float %tmp, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %tmp.upgrd.3, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- %tmp.upgrd.4 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- ret void
-
-cond_false: ; preds = %entry
- %tmp1 = tail call i32 (...)* @baz( ) ; <i32> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vget_lane.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vget_lane.ll
deleted file mode 100644
index 5dd87d6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vget_lane.ll
+++ /dev/null
@@ -1,212 +0,0 @@
-; RUN: llc < %s -mattr=+neon | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-elf"
-
-define i32 @vget_lanes8(<8 x i8>* %A) nounwind {
-;CHECK: vget_lanes8:
-;CHECK: vmov.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = extractelement <8 x i8> %tmp1, i32 1
- %tmp3 = sext i8 %tmp2 to i32
- ret i32 %tmp3
-}
-
-define i32 @vget_lanes16(<4 x i16>* %A) nounwind {
-;CHECK: vget_lanes16:
-;CHECK: vmov.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = extractelement <4 x i16> %tmp1, i32 1
- %tmp3 = sext i16 %tmp2 to i32
- ret i32 %tmp3
-}
-
-define i32 @vget_laneu8(<8 x i8>* %A) nounwind {
-;CHECK: vget_laneu8:
-;CHECK: vmov.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = extractelement <8 x i8> %tmp1, i32 1
- %tmp3 = zext i8 %tmp2 to i32
- ret i32 %tmp3
-}
-
-define i32 @vget_laneu16(<4 x i16>* %A) nounwind {
-;CHECK: vget_laneu16:
-;CHECK: vmov.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = extractelement <4 x i16> %tmp1, i32 1
- %tmp3 = zext i16 %tmp2 to i32
- ret i32 %tmp3
-}
-
-; Do a vector add to keep the extraction from being done directly from memory.
-define i32 @vget_lanei32(<2 x i32>* %A) nounwind {
-;CHECK: vget_lanei32:
-;CHECK: vmov.32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = add <2 x i32> %tmp1, %tmp1
- %tmp3 = extractelement <2 x i32> %tmp2, i32 1
- ret i32 %tmp3
-}
-
-define i32 @vgetQ_lanes8(<16 x i8>* %A) nounwind {
-;CHECK: vgetQ_lanes8:
-;CHECK: vmov.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = extractelement <16 x i8> %tmp1, i32 1
- %tmp3 = sext i8 %tmp2 to i32
- ret i32 %tmp3
-}
-
-define i32 @vgetQ_lanes16(<8 x i16>* %A) nounwind {
-;CHECK: vgetQ_lanes16:
-;CHECK: vmov.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = extractelement <8 x i16> %tmp1, i32 1
- %tmp3 = sext i16 %tmp2 to i32
- ret i32 %tmp3
-}
-
-define i32 @vgetQ_laneu8(<16 x i8>* %A) nounwind {
-;CHECK: vgetQ_laneu8:
-;CHECK: vmov.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = extractelement <16 x i8> %tmp1, i32 1
- %tmp3 = zext i8 %tmp2 to i32
- ret i32 %tmp3
-}
-
-define i32 @vgetQ_laneu16(<8 x i16>* %A) nounwind {
-;CHECK: vgetQ_laneu16:
-;CHECK: vmov.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = extractelement <8 x i16> %tmp1, i32 1
- %tmp3 = zext i16 %tmp2 to i32
- ret i32 %tmp3
-}
-
-; Do a vector add to keep the extraction from being done directly from memory.
-define i32 @vgetQ_lanei32(<4 x i32>* %A) nounwind {
-;CHECK: vgetQ_lanei32:
-;CHECK: vmov.32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = add <4 x i32> %tmp1, %tmp1
- %tmp3 = extractelement <4 x i32> %tmp2, i32 1
- ret i32 %tmp3
-}
-
-define arm_aapcs_vfpcc void @test_vget_laneu16() nounwind {
-entry:
-; CHECK: vmov.u16 r0, d0[1]
- %arg0_uint16x4_t = alloca <4 x i16> ; <<4 x i16>*> [#uses=1]
- %out_uint16_t = alloca i16 ; <i16*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load <4 x i16>* %arg0_uint16x4_t, align 8 ; <<4 x i16>> [#uses=1]
- %1 = extractelement <4 x i16> %0, i32 1 ; <i16> [#uses=1]
- store i16 %1, i16* %out_uint16_t, align 2
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define arm_aapcs_vfpcc void @test_vget_laneu8() nounwind {
-entry:
-; CHECK: vmov.u8 r0, d0[1]
- %arg0_uint8x8_t = alloca <8 x i8> ; <<8 x i8>*> [#uses=1]
- %out_uint8_t = alloca i8 ; <i8*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load <8 x i8>* %arg0_uint8x8_t, align 8 ; <<8 x i8>> [#uses=1]
- %1 = extractelement <8 x i8> %0, i32 1 ; <i8> [#uses=1]
- store i8 %1, i8* %out_uint8_t, align 1
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define arm_aapcs_vfpcc void @test_vgetQ_laneu16() nounwind {
-entry:
-; CHECK: vmov.u16 r0, d0[1]
- %arg0_uint16x8_t = alloca <8 x i16> ; <<8 x i16>*> [#uses=1]
- %out_uint16_t = alloca i16 ; <i16*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load <8 x i16>* %arg0_uint16x8_t, align 16 ; <<8 x i16>> [#uses=1]
- %1 = extractelement <8 x i16> %0, i32 1 ; <i16> [#uses=1]
- store i16 %1, i16* %out_uint16_t, align 2
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define arm_aapcs_vfpcc void @test_vgetQ_laneu8() nounwind {
-entry:
-; CHECK: vmov.u8 r0, d0[1]
- %arg0_uint8x16_t = alloca <16 x i8> ; <<16 x i8>*> [#uses=1]
- %out_uint8_t = alloca i8 ; <i8*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load <16 x i8>* %arg0_uint8x16_t, align 16 ; <<16 x i8>> [#uses=1]
- %1 = extractelement <16 x i8> %0, i32 1 ; <i8> [#uses=1]
- store i8 %1, i8* %out_uint8_t, align 1
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define <8 x i8> @vset_lane8(<8 x i8>* %A, i8 %B) nounwind {
-;CHECK: vset_lane8:
-;CHECK: vmov.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = insertelement <8 x i8> %tmp1, i8 %B, i32 1
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vset_lane16(<4 x i16>* %A, i16 %B) nounwind {
-;CHECK: vset_lane16:
-;CHECK: vmov.16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = insertelement <4 x i16> %tmp1, i16 %B, i32 1
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vset_lane32(<2 x i32>* %A, i32 %B) nounwind {
-;CHECK: vset_lane32:
-;CHECK: vmov.32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = insertelement <2 x i32> %tmp1, i32 %B, i32 1
- ret <2 x i32> %tmp2
-}
-
-define <16 x i8> @vsetQ_lane8(<16 x i8>* %A, i8 %B) nounwind {
-;CHECK: vsetQ_lane8:
-;CHECK: vmov.8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = insertelement <16 x i8> %tmp1, i8 %B, i32 1
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vsetQ_lane16(<8 x i16>* %A, i16 %B) nounwind {
-;CHECK: vsetQ_lane16:
-;CHECK: vmov.16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = insertelement <8 x i16> %tmp1, i16 %B, i32 1
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vsetQ_lane32(<4 x i32>* %A, i32 %B) nounwind {
-;CHECK: vsetQ_lane32:
-;CHECK: vmov.32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = insertelement <4 x i32> %tmp1, i32 %B, i32 1
- ret <4 x i32> %tmp2
-}
-
-define arm_aapcs_vfpcc <2 x float> @test_vset_lanef32(float %arg0_float32_t, <2 x float> %arg1_float32x2_t) nounwind {
-;CHECK: test_vset_lanef32:
-;CHECK: vmov.f32
-;CHECK: vmov.f32
-entry:
- %0 = insertelement <2 x float> %arg1_float32x2_t, float %arg0_float32_t, i32 1 ; <<2 x float>> [#uses=1]
- ret <2 x float> %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vhadd.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vhadd.ll
deleted file mode 100644
index 379e062..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vhadd.ll
+++ /dev/null
@@ -1,249 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vhadds8:
-;CHECK: vhadd.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vhadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vhadds16:
-;CHECK: vhadd.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vhadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vhadds32:
-;CHECK: vhadd.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i8> @vhaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vhaddu8:
-;CHECK: vhadd.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vhaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vhaddu16:
-;CHECK: vhadd.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vhaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vhaddu32:
-;CHECK: vhadd.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vhaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <16 x i8> @vhaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vhaddQs8:
-;CHECK: vhadd.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vhaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vhaddQs16:
-;CHECK: vhadd.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vhaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vhaddQs32:
-;CHECK: vhadd.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vhadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <16 x i8> @vhaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vhaddQu8:
-;CHECK: vhadd.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vhaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vhaddQu16:
-;CHECK: vhadd.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vhaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vhaddQu32:
-;CHECK: vhadd.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vhaddu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vhadds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vhadds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vhadds.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vhaddu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vhaddu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vhadds.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vhaddu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-define <8 x i8> @vrhadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vrhadds8:
-;CHECK: vrhadd.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vrhadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vrhadds16:
-;CHECK: vrhadd.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vrhadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vrhadds32:
-;CHECK: vrhadd.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vrhadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i8> @vrhaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vrhaddu8:
-;CHECK: vrhadd.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vrhaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vrhaddu16:
-;CHECK: vrhadd.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vrhaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vrhaddu32:
-;CHECK: vrhadd.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vrhaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <16 x i8> @vrhaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vrhaddQs8:
-;CHECK: vrhadd.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vrhaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vrhaddQs16:
-;CHECK: vrhadd.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vrhaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vrhaddQs32:
-;CHECK: vrhadd.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vrhadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <16 x i8> @vrhaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vrhaddQu8:
-;CHECK: vrhadd.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vrhaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vrhaddQu16:
-;CHECK: vrhadd.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vrhaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vrhaddQu32:
-;CHECK: vrhadd.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vrhaddu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vrhadds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vrhadds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vrhadds.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vrhaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vrhaddu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vrhaddu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vrhadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vrhadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vrhadds.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vrhaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vrhaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vrhaddu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vhsub.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vhsub.ll
deleted file mode 100644
index 0f0d027..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vhsub.ll
+++ /dev/null
@@ -1,125 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vhsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vhsubs8:
-;CHECK: vhsub.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vhsubs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vhsubs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vhsubs16:
-;CHECK: vhsub.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vhsubs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vhsubs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vhsubs32:
-;CHECK: vhsub.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vhsubs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i8> @vhsubu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vhsubu8:
-;CHECK: vhsub.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vhsubu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vhsubu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vhsubu16:
-;CHECK: vhsub.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vhsubu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vhsubu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vhsubu32:
-;CHECK: vhsub.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vhsubu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <16 x i8> @vhsubQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vhsubQs8:
-;CHECK: vhsub.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vhsubQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vhsubQs16:
-;CHECK: vhsub.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vhsubQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vhsubQs32:
-;CHECK: vhsub.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vhsubs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <16 x i8> @vhsubQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vhsubQu8:
-;CHECK: vhsub.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vhsubu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vhsubQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vhsubQu16:
-;CHECK: vhsub.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vhsubu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vhsubQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vhsubQu32:
-;CHECK: vhsub.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vhsubu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vhsubs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vhsubs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vhsubs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vhsubu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vhsubu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vhsubu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vhsubs.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vhsubs.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vhsubs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vhsubu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vhsubu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vhsubu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vicmp.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vicmp.ll
deleted file mode 100644
index 2d8cb89..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vicmp.ll
+++ /dev/null
@@ -1,113 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-; This tests icmp operations that do not map directly to NEON instructions.
-; Not-equal (ne) operations are implemented by VCEQ/VMVN. Less-than (lt/ult)
-; and less-than-or-equal (le/ule) are implemented by swapping the arguments
-; to VCGT and VCGE. Test all the operand types for not-equal but only sample
-; the other operations.
-
-define <8 x i8> @vcnei8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vcnei8:
-;CHECK: vceq.i8
-;CHECK-NEXT: vmvn
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = icmp ne <8 x i8> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vcnei16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcnei16:
-;CHECK: vceq.i16
-;CHECK-NEXT: vmvn
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = icmp ne <4 x i16> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vcnei32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vcnei32:
-;CHECK: vceq.i32
-;CHECK-NEXT: vmvn
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = icmp ne <2 x i32> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
-}
-
-define <16 x i8> @vcneQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcneQi8:
-;CHECK: vceq.i8
-;CHECK-NEXT: vmvn
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = icmp ne <16 x i8> %tmp1, %tmp2
- %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vcneQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vcneQi16:
-;CHECK: vceq.i16
-;CHECK-NEXT: vmvn
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = icmp ne <8 x i16> %tmp1, %tmp2
- %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vcneQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcneQi32:
-;CHECK: vceq.i32
-;CHECK-NEXT: vmvn
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = icmp ne <4 x i32> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
-
-define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vcltQs8:
-;CHECK: vcgt.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = icmp slt <16 x i8> %tmp1, %tmp2
- %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
- ret <16 x i8> %tmp4
-}
-
-define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcles16:
-;CHECK: vcge.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = icmp sle <4 x i16> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
- ret <4 x i16> %tmp4
-}
-
-define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vcltu16:
-;CHECK: vcgt.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = icmp ult <4 x i16> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
- ret <4 x i16> %tmp4
-}
-
-define <4 x i32> @vcleQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vcleQu32:
-;CHECK: vcge.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = icmp ule <4 x i32> %tmp1, %tmp2
- %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
- ret <4 x i32> %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vld1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vld1.ll
deleted file mode 100644
index f5383aa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vld1.ll
+++ /dev/null
@@ -1,83 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vld1i8(i8* %A) nounwind {
-;CHECK: vld1i8:
-;CHECK: vld1.8
- %tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A)
- ret <8 x i8> %tmp1
-}
-
-define <4 x i16> @vld1i16(i16* %A) nounwind {
-;CHECK: vld1i16:
-;CHECK: vld1.16
- %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i16* %A)
- ret <4 x i16> %tmp1
-}
-
-define <2 x i32> @vld1i32(i32* %A) nounwind {
-;CHECK: vld1i32:
-;CHECK: vld1.32
- %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i32* %A)
- ret <2 x i32> %tmp1
-}
-
-define <2 x float> @vld1f(float* %A) nounwind {
-;CHECK: vld1f:
-;CHECK: vld1.32
- %tmp1 = call <2 x float> @llvm.arm.neon.vld1.v2f32(float* %A)
- ret <2 x float> %tmp1
-}
-
-define <1 x i64> @vld1i64(i64* %A) nounwind {
-;CHECK: vld1i64:
-;CHECK: vld1.64
- %tmp1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64(i64* %A)
- ret <1 x i64> %tmp1
-}
-
-define <16 x i8> @vld1Qi8(i8* %A) nounwind {
-;CHECK: vld1Qi8:
-;CHECK: vld1.8
- %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A)
- ret <16 x i8> %tmp1
-}
-
-define <8 x i16> @vld1Qi16(i16* %A) nounwind {
-;CHECK: vld1Qi16:
-;CHECK: vld1.16
- %tmp1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i16* %A)
- ret <8 x i16> %tmp1
-}
-
-define <4 x i32> @vld1Qi32(i32* %A) nounwind {
-;CHECK: vld1Qi32:
-;CHECK: vld1.32
- %tmp1 = call <4 x i32> @llvm.arm.neon.vld1.v4i32(i32* %A)
- ret <4 x i32> %tmp1
-}
-
-define <4 x float> @vld1Qf(float* %A) nounwind {
-;CHECK: vld1Qf:
-;CHECK: vld1.32
- %tmp1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(float* %A)
- ret <4 x float> %tmp1
-}
-
-define <2 x i64> @vld1Qi64(i64* %A) nounwind {
-;CHECK: vld1Qi64:
-;CHECK: vld1.64
- %tmp1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i64* %A)
- ret <2 x i64> %tmp1
-}
-
-declare <8 x i8> @llvm.arm.neon.vld1.v8i8(i8*) nounwind readonly
-declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*) nounwind readonly
-declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*) nounwind readonly
-declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*) nounwind readonly
-declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*) nounwind readonly
-
-declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*) nounwind readonly
-declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*) nounwind readonly
-declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*) nounwind readonly
-declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
-declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vld2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vld2.ll
deleted file mode 100644
index 23f7d2c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vld2.ll
+++ /dev/null
@@ -1,113 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
-%struct.__neon_int16x4x2_t = type { <4 x i16>, <4 x i16> }
-%struct.__neon_int32x2x2_t = type { <2 x i32>, <2 x i32> }
-%struct.__neon_float32x2x2_t = type { <2 x float>, <2 x float> }
-%struct.__neon_int64x1x2_t = type { <1 x i64>, <1 x i64> }
-
-%struct.__neon_int8x16x2_t = type { <16 x i8>, <16 x i8> }
-%struct.__neon_int16x8x2_t = type { <8 x i16>, <8 x i16> }
-%struct.__neon_int32x4x2_t = type { <4 x i32>, <4 x i32> }
-%struct.__neon_float32x4x2_t = type { <4 x float>, <4 x float> }
-
-define <8 x i8> @vld2i8(i8* %A) nounwind {
-;CHECK: vld2i8:
-;CHECK: vld2.8
- %tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8* %A)
- %tmp2 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 1
- %tmp4 = add <8 x i8> %tmp2, %tmp3
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vld2i16(i16* %A) nounwind {
-;CHECK: vld2i16:
-;CHECK: vld2.16
- %tmp1 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i16* %A)
- %tmp2 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 1
- %tmp4 = add <4 x i16> %tmp2, %tmp3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vld2i32(i32* %A) nounwind {
-;CHECK: vld2i32:
-;CHECK: vld2.32
- %tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i32* %A)
- %tmp2 = extractvalue %struct.__neon_int32x2x2_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp1, 1
- %tmp4 = add <2 x i32> %tmp2, %tmp3
- ret <2 x i32> %tmp4
-}
-
-define <2 x float> @vld2f(float* %A) nounwind {
-;CHECK: vld2f:
-;CHECK: vld2.32
- %tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(float* %A)
- %tmp2 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 1
- %tmp4 = add <2 x float> %tmp2, %tmp3
- ret <2 x float> %tmp4
-}
-
-define <1 x i64> @vld2i64(i64* %A) nounwind {
-;CHECK: vld2i64:
-;CHECK: vld1.64
- %tmp1 = call %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64(i64* %A)
- %tmp2 = extractvalue %struct.__neon_int64x1x2_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int64x1x2_t %tmp1, 1
- %tmp4 = add <1 x i64> %tmp2, %tmp3
- ret <1 x i64> %tmp4
-}
-
-define <16 x i8> @vld2Qi8(i8* %A) nounwind {
-;CHECK: vld2Qi8:
-;CHECK: vld2.8
- %tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8* %A)
- %tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 1
- %tmp4 = add <16 x i8> %tmp2, %tmp3
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vld2Qi16(i16* %A) nounwind {
-;CHECK: vld2Qi16:
-;CHECK: vld2.16
- %tmp1 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16(i16* %A)
- %tmp2 = extractvalue %struct.__neon_int16x8x2_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp1, 1
- %tmp4 = add <8 x i16> %tmp2, %tmp3
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vld2Qi32(i32* %A) nounwind {
-;CHECK: vld2Qi32:
-;CHECK: vld2.32
- %tmp1 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i32* %A)
- %tmp2 = extractvalue %struct.__neon_int32x4x2_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int32x4x2_t %tmp1, 1
- %tmp4 = add <4 x i32> %tmp2, %tmp3
- ret <4 x i32> %tmp4
-}
-
-define <4 x float> @vld2Qf(float* %A) nounwind {
-;CHECK: vld2Qf:
-;CHECK: vld2.32
- %tmp1 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2.v4f32(float* %A)
- %tmp2 = extractvalue %struct.__neon_float32x4x2_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_float32x4x2_t %tmp1, 1
- %tmp4 = add <4 x float> %tmp2, %tmp3
- ret <4 x float> %tmp4
-}
-
-declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8*) nounwind readonly
-declare %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i8*) nounwind readonly
-declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8*) nounwind readonly
-declare %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8*) nounwind readonly
-declare %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64(i8*) nounwind readonly
-
-declare %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8(i8*) nounwind readonly
-declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16(i8*) nounwind readonly
-declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*) nounwind readonly
-declare %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2.v4f32(i8*) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vld3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vld3.ll
deleted file mode 100644
index 207dc6a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vld3.ll
+++ /dev/null
@@ -1,117 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
-%struct.__neon_int32x2x3_t = type { <2 x i32>, <2 x i32>, <2 x i32> }
-%struct.__neon_float32x2x3_t = type { <2 x float>, <2 x float>, <2 x float> }
-%struct.__neon_int64x1x3_t = type { <1 x i64>, <1 x i64>, <1 x i64> }
-
-%struct.__neon_int8x16x3_t = type { <16 x i8>, <16 x i8>, <16 x i8> }
-%struct.__neon_int16x8x3_t = type { <8 x i16>, <8 x i16>, <8 x i16> }
-%struct.__neon_int32x4x3_t = type { <4 x i32>, <4 x i32>, <4 x i32> }
-%struct.__neon_float32x4x3_t = type { <4 x float>, <4 x float>, <4 x float> }
-
-define <8 x i8> @vld3i8(i8* %A) nounwind {
-;CHECK: vld3i8:
-;CHECK: vld3.8
- %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A)
- %tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2
- %tmp4 = add <8 x i8> %tmp2, %tmp3
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vld3i16(i16* %A) nounwind {
-;CHECK: vld3i16:
-;CHECK: vld3.16
- %tmp1 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i16* %A)
- %tmp2 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 2
- %tmp4 = add <4 x i16> %tmp2, %tmp3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vld3i32(i32* %A) nounwind {
-;CHECK: vld3i32:
-;CHECK: vld3.32
- %tmp1 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i32* %A)
- %tmp2 = extractvalue %struct.__neon_int32x2x3_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int32x2x3_t %tmp1, 2
- %tmp4 = add <2 x i32> %tmp2, %tmp3
- ret <2 x i32> %tmp4
-}
-
-define <2 x float> @vld3f(float* %A) nounwind {
-;CHECK: vld3f:
-;CHECK: vld3.32
- %tmp1 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(float* %A)
- %tmp2 = extractvalue %struct.__neon_float32x2x3_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_float32x2x3_t %tmp1, 2
- %tmp4 = add <2 x float> %tmp2, %tmp3
- ret <2 x float> %tmp4
-}
-
-define <1 x i64> @vld3i64(i64* %A) nounwind {
-;CHECK: vld3i64:
-;CHECK: vld1.64
- %tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64(i64* %A)
- %tmp2 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 2
- %tmp4 = add <1 x i64> %tmp2, %tmp3
- ret <1 x i64> %tmp4
-}
-
-define <16 x i8> @vld3Qi8(i8* %A) nounwind {
-;CHECK: vld3Qi8:
-;CHECK: vld3.8
-;CHECK: vld3.8
- %tmp1 = call %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8(i8* %A)
- %tmp2 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 2
- %tmp4 = add <16 x i8> %tmp2, %tmp3
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vld3Qi16(i16* %A) nounwind {
-;CHECK: vld3Qi16:
-;CHECK: vld3.16
-;CHECK: vld3.16
- %tmp1 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3.v8i16(i16* %A)
- %tmp2 = extractvalue %struct.__neon_int16x8x3_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int16x8x3_t %tmp1, 2
- %tmp4 = add <8 x i16> %tmp2, %tmp3
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vld3Qi32(i32* %A) nounwind {
-;CHECK: vld3Qi32:
-;CHECK: vld3.32
-;CHECK: vld3.32
- %tmp1 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32(i32* %A)
- %tmp2 = extractvalue %struct.__neon_int32x4x3_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int32x4x3_t %tmp1, 2
- %tmp4 = add <4 x i32> %tmp2, %tmp3
- ret <4 x i32> %tmp4
-}
-
-define <4 x float> @vld3Qf(float* %A) nounwind {
-;CHECK: vld3Qf:
-;CHECK: vld3.32
-;CHECK: vld3.32
- %tmp1 = call %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3.v4f32(float* %A)
- %tmp2 = extractvalue %struct.__neon_float32x4x3_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_float32x4x3_t %tmp1, 2
- %tmp4 = add <4 x float> %tmp2, %tmp3
- ret <4 x float> %tmp4
-}
-
-declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*) nounwind readonly
-declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i8*) nounwind readonly
-declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8*) nounwind readonly
-declare %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(i8*) nounwind readonly
-declare %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64(i8*) nounwind readonly
-
-declare %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8(i8*) nounwind readonly
-declare %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3.v8i16(i8*) nounwind readonly
-declare %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32(i8*) nounwind readonly
-declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3.v4f32(i8*) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vld4.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vld4.ll
deleted file mode 100644
index 0624f29..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vld4.ll
+++ /dev/null
@@ -1,117 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
-%struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
-%struct.__neon_float32x2x4_t = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
-%struct.__neon_int64x1x4_t = type { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> }
-
-%struct.__neon_int8x16x4_t = type { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> }
-%struct.__neon_int16x8x4_t = type { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }
-%struct.__neon_int32x4x4_t = type { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
-%struct.__neon_float32x4x4_t = type { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
-
-define <8 x i8> @vld4i8(i8* %A) nounwind {
-;CHECK: vld4i8:
-;CHECK: vld4.8
- %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A)
- %tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 2
- %tmp4 = add <8 x i8> %tmp2, %tmp3
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vld4i16(i16* %A) nounwind {
-;CHECK: vld4i16:
-;CHECK: vld4.16
- %tmp1 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i16* %A)
- %tmp2 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 2
- %tmp4 = add <4 x i16> %tmp2, %tmp3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vld4i32(i32* %A) nounwind {
-;CHECK: vld4i32:
-;CHECK: vld4.32
- %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i32* %A)
- %tmp2 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 2
- %tmp4 = add <2 x i32> %tmp2, %tmp3
- ret <2 x i32> %tmp4
-}
-
-define <2 x float> @vld4f(float* %A) nounwind {
-;CHECK: vld4f:
-;CHECK: vld4.32
- %tmp1 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(float* %A)
- %tmp2 = extractvalue %struct.__neon_float32x2x4_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_float32x2x4_t %tmp1, 2
- %tmp4 = add <2 x float> %tmp2, %tmp3
- ret <2 x float> %tmp4
-}
-
-define <1 x i64> @vld4i64(i64* %A) nounwind {
-;CHECK: vld4i64:
-;CHECK: vld1.64
- %tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64(i64* %A)
- %tmp2 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 2
- %tmp4 = add <1 x i64> %tmp2, %tmp3
- ret <1 x i64> %tmp4
-}
-
-define <16 x i8> @vld4Qi8(i8* %A) nounwind {
-;CHECK: vld4Qi8:
-;CHECK: vld4.8
-;CHECK: vld4.8
- %tmp1 = call %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8* %A)
- %tmp2 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 2
- %tmp4 = add <16 x i8> %tmp2, %tmp3
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vld4Qi16(i16* %A) nounwind {
-;CHECK: vld4Qi16:
-;CHECK: vld4.16
-;CHECK: vld4.16
- %tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i16* %A)
- %tmp2 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 2
- %tmp4 = add <8 x i16> %tmp2, %tmp3
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vld4Qi32(i32* %A) nounwind {
-;CHECK: vld4Qi32:
-;CHECK: vld4.32
-;CHECK: vld4.32
- %tmp1 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4.v4i32(i32* %A)
- %tmp2 = extractvalue %struct.__neon_int32x4x4_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_int32x4x4_t %tmp1, 2
- %tmp4 = add <4 x i32> %tmp2, %tmp3
- ret <4 x i32> %tmp4
-}
-
-define <4 x float> @vld4Qf(float* %A) nounwind {
-;CHECK: vld4Qf:
-;CHECK: vld4.32
-;CHECK: vld4.32
- %tmp1 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4.v4f32(float* %A)
- %tmp2 = extractvalue %struct.__neon_float32x4x4_t %tmp1, 0
- %tmp3 = extractvalue %struct.__neon_float32x4x4_t %tmp1, 2
- %tmp4 = add <4 x float> %tmp2, %tmp3
- ret <4 x float> %tmp4
-}
-
-declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8*) nounwind readonly
-declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i8*) nounwind readonly
-declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8*) nounwind readonly
-declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(i8*) nounwind readonly
-declare %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64(i8*) nounwind readonly
-
-declare %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8(i8*) nounwind readonly
-declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16(i8*) nounwind readonly
-declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4.v4i32(i8*) nounwind readonly
-declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4.v4f32(i8*) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vldlane.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vldlane.ll
deleted file mode 100644
index 53881a3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vldlane.ll
+++ /dev/null
@@ -1,328 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
-%struct.__neon_int16x4x2_t = type { <4 x i16>, <4 x i16> }
-%struct.__neon_int32x2x2_t = type { <2 x i32>, <2 x i32> }
-%struct.__neon_float32x2x2_t = type { <2 x float>, <2 x float> }
-
-%struct.__neon_int16x8x2_t = type { <8 x i16>, <8 x i16> }
-%struct.__neon_int32x4x2_t = type { <4 x i32>, <4 x i32> }
-%struct.__neon_float32x4x2_t = type { <4 x float>, <4 x float> }
-
-define <8 x i8> @vld2lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vld2lanei8:
-;CHECK: vld2.8
- %tmp1 = load <8 x i8>* %B
- %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vld2lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vld2lanei16:
-;CHECK: vld2.16
- %tmp1 = load <4 x i16>* %B
- %tmp2 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 1
- %tmp5 = add <4 x i16> %tmp3, %tmp4
- ret <4 x i16> %tmp5
-}
-
-define <2 x i32> @vld2lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vld2lanei32:
-;CHECK: vld2.32
- %tmp1 = load <2 x i32>* %B
- %tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1
- %tmp5 = add <2 x i32> %tmp3, %tmp4
- ret <2 x i32> %tmp5
-}
-
-define <2 x float> @vld2lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vld2lanef:
-;CHECK: vld2.32
- %tmp1 = load <2 x float>* %B
- %tmp2 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 1
- %tmp5 = add <2 x float> %tmp3, %tmp4
- ret <2 x float> %tmp5
-}
-
-define <8 x i16> @vld2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vld2laneQi16:
-;CHECK: vld2.16
- %tmp1 = load <8 x i16>* %B
- %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vld2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vld2laneQi32:
-;CHECK: vld2.32
- %tmp1 = load <4 x i32>* %B
- %tmp2 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
- %tmp3 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 1
- %tmp5 = add <4 x i32> %tmp3, %tmp4
- ret <4 x i32> %tmp5
-}
-
-define <4 x float> @vld2laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vld2laneQf:
-;CHECK: vld2.32
- %tmp1 = load <4 x float>* %B
- %tmp2 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_float32x4x2_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_float32x4x2_t %tmp2, 1
- %tmp5 = add <4 x float> %tmp3, %tmp4
- ret <4 x float> %tmp5
-}
-
-declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind readonly
-declare %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind readonly
-
-declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2lane.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind readonly
-
-%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
-%struct.__neon_int32x2x3_t = type { <2 x i32>, <2 x i32>, <2 x i32> }
-%struct.__neon_float32x2x3_t = type { <2 x float>, <2 x float>, <2 x float> }
-
-%struct.__neon_int16x8x3_t = type { <8 x i16>, <8 x i16>, <8 x i16> }
-%struct.__neon_int32x4x3_t = type { <4 x i32>, <4 x i32>, <4 x i32> }
-%struct.__neon_float32x4x3_t = type { <4 x float>, <4 x float>, <4 x float> }
-
-define <8 x i8> @vld3lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vld3lanei8:
-;CHECK: vld3.8
- %tmp1 = load <8 x i8>* %B
- %tmp2 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2
- %tmp6 = add <8 x i8> %tmp3, %tmp4
- %tmp7 = add <8 x i8> %tmp5, %tmp6
- ret <8 x i8> %tmp7
-}
-
-define <4 x i16> @vld3lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vld3lanei16:
-;CHECK: vld3.16
- %tmp1 = load <4 x i16>* %B
- %tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 2
- %tmp6 = add <4 x i16> %tmp3, %tmp4
- %tmp7 = add <4 x i16> %tmp5, %tmp6
- ret <4 x i16> %tmp7
-}
-
-define <2 x i32> @vld3lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vld3lanei32:
-;CHECK: vld3.32
- %tmp1 = load <2 x i32>* %B
- %tmp2 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 2
- %tmp6 = add <2 x i32> %tmp3, %tmp4
- %tmp7 = add <2 x i32> %tmp5, %tmp6
- ret <2 x i32> %tmp7
-}
-
-define <2 x float> @vld3lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vld3lanef:
-;CHECK: vld3.32
- %tmp1 = load <2 x float>* %B
- %tmp2 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 2
- %tmp6 = add <2 x float> %tmp3, %tmp4
- %tmp7 = add <2 x float> %tmp5, %tmp6
- ret <2 x float> %tmp7
-}
-
-define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vld3laneQi16:
-;CHECK: vld3.16
- %tmp1 = load <8 x i16>* %B
- %tmp2 = call %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 2
- %tmp6 = add <8 x i16> %tmp3, %tmp4
- %tmp7 = add <8 x i16> %tmp5, %tmp6
- ret <8 x i16> %tmp7
-}
-
-define <4 x i32> @vld3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vld3laneQi32:
-;CHECK: vld3.32
- %tmp1 = load <4 x i32>* %B
- %tmp2 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 3)
- %tmp3 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int32x4x3_t %tmp2, 2
- %tmp6 = add <4 x i32> %tmp3, %tmp4
- %tmp7 = add <4 x i32> %tmp5, %tmp6
- ret <4 x i32> %tmp7
-}
-
-define <4 x float> @vld3laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vld3laneQf:
-;CHECK: vld3.32
- %tmp1 = load <4 x float>* %B
- %tmp2 = call %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 2
- %tmp6 = add <4 x float> %tmp3, %tmp4
- %tmp7 = add <4 x float> %tmp5, %tmp6
- ret <4 x float> %tmp7
-}
-
-declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
-declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
-
-declare %struct.__neon_int16x8x3_t @llvm.arm.neon.vld3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32) nounwind readonly
-
-%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
-%struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
-%struct.__neon_float32x2x4_t = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
-
-%struct.__neon_int16x8x4_t = type { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }
-%struct.__neon_int32x4x4_t = type { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
-%struct.__neon_float32x4x4_t = type { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
-
-define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vld4lanei8:
-;CHECK: vld4.8
- %tmp1 = load <8 x i8>* %B
- %tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
- %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
- %tmp7 = add <8 x i8> %tmp3, %tmp4
- %tmp8 = add <8 x i8> %tmp5, %tmp6
- %tmp9 = add <8 x i8> %tmp7, %tmp8
- ret <8 x i8> %tmp9
-}
-
-define <4 x i16> @vld4lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vld4lanei16:
-;CHECK: vld4.16
- %tmp1 = load <4 x i16>* %B
- %tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 2
- %tmp6 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 3
- %tmp7 = add <4 x i16> %tmp3, %tmp4
- %tmp8 = add <4 x i16> %tmp5, %tmp6
- %tmp9 = add <4 x i16> %tmp7, %tmp8
- ret <4 x i16> %tmp9
-}
-
-define <2 x i32> @vld4lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vld4lanei32:
-;CHECK: vld4.32
- %tmp1 = load <2 x i32>* %B
- %tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 2
- %tmp6 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 3
- %tmp7 = add <2 x i32> %tmp3, %tmp4
- %tmp8 = add <2 x i32> %tmp5, %tmp6
- %tmp9 = add <2 x i32> %tmp7, %tmp8
- ret <2 x i32> %tmp9
-}
-
-define <2 x float> @vld4lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vld4lanef:
-;CHECK: vld4.32
- %tmp1 = load <2 x float>* %B
- %tmp2 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 2
- %tmp6 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 3
- %tmp7 = add <2 x float> %tmp3, %tmp4
- %tmp8 = add <2 x float> %tmp5, %tmp6
- %tmp9 = add <2 x float> %tmp7, %tmp8
- ret <2 x float> %tmp9
-}
-
-define <8 x i16> @vld4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vld4laneQi16:
-;CHECK: vld4.16
- %tmp1 = load <8 x i16>* %B
- %tmp2 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 2
- %tmp6 = extractvalue %struct.__neon_int16x8x4_t %tmp2, 3
- %tmp7 = add <8 x i16> %tmp3, %tmp4
- %tmp8 = add <8 x i16> %tmp5, %tmp6
- %tmp9 = add <8 x i16> %tmp7, %tmp8
- ret <8 x i16> %tmp9
-}
-
-define <4 x i32> @vld4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vld4laneQi32:
-;CHECK: vld4.32
- %tmp1 = load <4 x i32>* %B
- %tmp2 = call %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 2
- %tmp6 = extractvalue %struct.__neon_int32x4x4_t %tmp2, 3
- %tmp7 = add <4 x i32> %tmp3, %tmp4
- %tmp8 = add <4 x i32> %tmp5, %tmp6
- %tmp9 = add <4 x i32> %tmp7, %tmp8
- ret <4 x i32> %tmp9
-}
-
-define <4 x float> @vld4laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vld4laneQf:
-;CHECK: vld4.32
- %tmp1 = load <4 x float>* %B
- %tmp2 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 2
- %tmp6 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 3
- %tmp7 = add <4 x float> %tmp3, %tmp4
- %tmp8 = add <4 x float> %tmp5, %tmp6
- %tmp9 = add <4 x float> %tmp7, %tmp8
- ret <4 x float> %tmp9
-}
-
-declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
-declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
-
-declare %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind readonly
-declare %struct.__neon_int32x4x4_t @llvm.arm.neon.vld4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind readonly
-declare %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vminmax.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vminmax.ll
deleted file mode 100644
index e3527c1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vminmax.ll
+++ /dev/null
@@ -1,293 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmins8:
-;CHECK: vmin.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vmins16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmins16:
-;CHECK: vmin.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vmins32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmins32:
-;CHECK: vmin.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i8> @vminu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vminu8:
-;CHECK: vmin.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vminu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vminu16:
-;CHECK: vmin.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vminu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vminu32:
-;CHECK: vmin.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <2 x float> @vminf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vminf32:
-;CHECK: vmin.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
-}
-
-define <16 x i8> @vminQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vminQs8:
-;CHECK: vmin.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vminQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vminQs16:
-;CHECK: vmin.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vminQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vminQs32:
-;CHECK: vmin.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <16 x i8> @vminQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vminQu8:
-;CHECK: vmin.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vminQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vminQu16:
-;CHECK: vmin.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vminQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vminQu32:
-;CHECK: vmin.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <4 x float> @vminQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vminQf32:
-;CHECK: vmin.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = call <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
- ret <4 x float> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vmins.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vminu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vminu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <2 x float> @llvm.arm.neon.vmins.v2f32(<2 x float>, <2 x float>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vmins.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vmins.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmins.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vminu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vminu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vminu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <4 x float> @llvm.arm.neon.vmins.v4f32(<4 x float>, <4 x float>) nounwind readnone
-
-define <8 x i8> @vmaxs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmaxs8:
-;CHECK: vmax.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vmaxs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmaxs16:
-;CHECK: vmax.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vmaxs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmaxs32:
-;CHECK: vmax.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i8> @vmaxu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmaxu8:
-;CHECK: vmax.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vmaxu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmaxu16:
-;CHECK: vmax.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vmaxu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmaxu32:
-;CHECK: vmax.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <2 x float> @vmaxf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vmaxf32:
-;CHECK: vmax.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
-}
-
-define <16 x i8> @vmaxQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vmaxQs8:
-;CHECK: vmax.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vmaxQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vmaxQs16:
-;CHECK: vmax.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vmaxQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vmaxQs32:
-;CHECK: vmax.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <16 x i8> @vmaxQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vmaxQu8:
-;CHECK: vmax.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vmaxQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vmaxQu16:
-;CHECK: vmax.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vmaxQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vmaxQu32:
-;CHECK: vmax.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <4 x float> @vmaxQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vmaxQf32:
-;CHECK: vmax.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
- ret <4 x float> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vmaxs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vmaxs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vmaxu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vmaxu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <2 x float> @llvm.arm.neon.vmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vmaxs.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vmaxs.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmaxs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vmaxu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmaxu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float>, <4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vmla.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vmla.ll
deleted file mode 100644
index 8405218..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vmla.ll
+++ /dev/null
@@ -1,193 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vmlai8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
-;CHECK: vmlai8:
-;CHECK: vmla.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = mul <8 x i8> %tmp2, %tmp3
- %tmp5 = add <8 x i8> %tmp1, %tmp4
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vmlai16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlai16:
-;CHECK: vmla.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = mul <4 x i16> %tmp2, %tmp3
- %tmp5 = add <4 x i16> %tmp1, %tmp4
- ret <4 x i16> %tmp5
-}
-
-define <2 x i32> @vmlai32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlai32:
-;CHECK: vmla.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = mul <2 x i32> %tmp2, %tmp3
- %tmp5 = add <2 x i32> %tmp1, %tmp4
- ret <2 x i32> %tmp5
-}
-
-define <2 x float> @vmlaf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
-;CHECK: vmlaf32:
-;CHECK: vmla.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = load <2 x float>* %C
- %tmp4 = mul <2 x float> %tmp2, %tmp3
- %tmp5 = add <2 x float> %tmp1, %tmp4
- ret <2 x float> %tmp5
-}
-
-define <16 x i8> @vmlaQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind {
-;CHECK: vmlaQi8:
-;CHECK: vmla.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = load <16 x i8>* %C
- %tmp4 = mul <16 x i8> %tmp2, %tmp3
- %tmp5 = add <16 x i8> %tmp1, %tmp4
- ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vmlaQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: vmlaQi16:
-;CHECK: vmla.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = load <8 x i16>* %C
- %tmp4 = mul <8 x i16> %tmp2, %tmp3
- %tmp5 = add <8 x i16> %tmp1, %tmp4
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vmlaQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: vmlaQi32:
-;CHECK: vmla.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = load <4 x i32>* %C
- %tmp4 = mul <4 x i32> %tmp2, %tmp3
- %tmp5 = add <4 x i32> %tmp1, %tmp4
- ret <4 x i32> %tmp5
-}
-
-define <4 x float> @vmlaQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
-;CHECK: vmlaQf32:
-;CHECK: vmla.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = load <4 x float>* %C
- %tmp4 = mul <4 x float> %tmp2, %tmp3
- %tmp5 = add <4 x float> %tmp1, %tmp4
- ret <4 x float> %tmp5
-}
-
-define <8 x i16> @vmlals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vmlals8:
-;CHECK: vmlal.s8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vmlals.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlals16:
-;CHECK: vmlal.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlals32:
-;CHECK: vmlal.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
-}
-
-define <8 x i16> @vmlalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vmlalu8:
-;CHECK: vmlal.u8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vmlalu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vmlalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlalu16:
-;CHECK: vmlal.u16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vmlalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlalu32:
-;CHECK: vmlal.u32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vmlal_lanes16(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vmlal_lanes16
-; CHECK: vmlal.s16 q0, d2, d3[1]
- %0 = shufflevector <4 x i16> %arg2_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <2 x i64> @test_vmlal_lanes32(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmlal_lanes32
-; CHECK: vmlal.s32 q0, d2, d3[1]
- %0 = shufflevector <2 x i32> %arg2_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vmlal_laneu16(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %arg2_uint16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vmlal_laneu16
-; CHECK: vmlal.u16 q0, d2, d3[1]
- %0 = shufflevector <4 x i16> %arg2_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <2 x i64> @test_vmlal_laneu32(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %arg2_uint32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmlal_laneu32
-; CHECK: vmlal.u32 q0, d2, d3[1]
- %0 = shufflevector <2 x i32> %arg2_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
-}
-
-declare <8 x i16> @llvm.arm.neon.vmlals.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmlalu.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vmls.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vmls.ll
deleted file mode 100644
index c89552e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vmls.ll
+++ /dev/null
@@ -1,193 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vmlsi8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
-;CHECK: vmlsi8:
-;CHECK: vmls.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = mul <8 x i8> %tmp2, %tmp3
- %tmp5 = sub <8 x i8> %tmp1, %tmp4
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vmlsi16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlsi16:
-;CHECK: vmls.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = mul <4 x i16> %tmp2, %tmp3
- %tmp5 = sub <4 x i16> %tmp1, %tmp4
- ret <4 x i16> %tmp5
-}
-
-define <2 x i32> @vmlsi32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlsi32:
-;CHECK: vmls.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = mul <2 x i32> %tmp2, %tmp3
- %tmp5 = sub <2 x i32> %tmp1, %tmp4
- ret <2 x i32> %tmp5
-}
-
-define <2 x float> @vmlsf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
-;CHECK: vmlsf32:
-;CHECK: vmls.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = load <2 x float>* %C
- %tmp4 = mul <2 x float> %tmp2, %tmp3
- %tmp5 = sub <2 x float> %tmp1, %tmp4
- ret <2 x float> %tmp5
-}
-
-define <16 x i8> @vmlsQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind {
-;CHECK: vmlsQi8:
-;CHECK: vmls.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = load <16 x i8>* %C
- %tmp4 = mul <16 x i8> %tmp2, %tmp3
- %tmp5 = sub <16 x i8> %tmp1, %tmp4
- ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vmlsQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
-;CHECK: vmlsQi16:
-;CHECK: vmls.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = load <8 x i16>* %C
- %tmp4 = mul <8 x i16> %tmp2, %tmp3
- %tmp5 = sub <8 x i16> %tmp1, %tmp4
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vmlsQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
-;CHECK: vmlsQi32:
-;CHECK: vmls.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = load <4 x i32>* %C
- %tmp4 = mul <4 x i32> %tmp2, %tmp3
- %tmp5 = sub <4 x i32> %tmp1, %tmp4
- ret <4 x i32> %tmp5
-}
-
-define <4 x float> @vmlsQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
-;CHECK: vmlsQf32:
-;CHECK: vmls.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = load <4 x float>* %C
- %tmp4 = mul <4 x float> %tmp2, %tmp3
- %tmp5 = sub <4 x float> %tmp1, %tmp4
- ret <4 x float> %tmp5
-}
-
-define <8 x i16> @vmlsls8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vmlsls8:
-;CHECK: vmlsl.s8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vmlsls.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vmlsls16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlsls16:
-;CHECK: vmlsl.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vmlsls.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vmlsls32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlsls32:
-;CHECK: vmlsl.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vmlsls.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
-}
-
-define <8 x i16> @vmlslu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vmlslu8:
-;CHECK: vmlsl.u8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i16> @llvm.arm.neon.vmlslu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vmlslu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vmlslu16:
-;CHECK: vmlsl.u16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vmlslu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vmlslu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vmlslu32:
-;CHECK: vmlsl.u32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vmlslu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vmlsl_lanes16(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vmlsl_lanes16
-; CHECK: vmlsl.s16 q0, d2, d3[1]
- %0 = shufflevector <4 x i16> %arg2_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vmlsls.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <2 x i64> @test_vmlsl_lanes32(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmlsl_lanes32
-; CHECK: vmlsl.s32 q0, d2, d3[1]
- %0 = shufflevector <2 x i32> %arg2_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vmlsls.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vmlsl_laneu16(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %arg2_uint16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vmlsl_laneu16
-; CHECK: vmlsl.u16 q0, d2, d3[1]
- %0 = shufflevector <4 x i16> %arg2_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vmlslu.v4i32(<4 x i32> %arg0_uint32x4_t, <4 x i16> %arg1_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <2 x i64> @test_vmlsl_laneu32(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %arg2_uint32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmlsl_laneu32
-; CHECK: vmlsl.u32 q0, d2, d3[1]
- %0 = shufflevector <2 x i32> %arg2_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vmlslu.v2i64(<2 x i64> %arg0_uint64x2_t, <2 x i32> %arg1_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
-}
-
-declare <8 x i16> @llvm.arm.neon.vmlsls.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlsls.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlsls.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmlslu.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmlslu.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmlslu.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vmov.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vmov.ll
deleted file mode 100644
index e4368d6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vmov.ll
+++ /dev/null
@@ -1,323 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @v_movi8() nounwind {
-;CHECK: v_movi8:
-;CHECK: vmov.i8
- ret <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
-}
-
-define <4 x i16> @v_movi16a() nounwind {
-;CHECK: v_movi16a:
-;CHECK: vmov.i16
- ret <4 x i16> < i16 16, i16 16, i16 16, i16 16 >
-}
-
-; 0x1000 = 4096
-define <4 x i16> @v_movi16b() nounwind {
-;CHECK: v_movi16b:
-;CHECK: vmov.i16
- ret <4 x i16> < i16 4096, i16 4096, i16 4096, i16 4096 >
-}
-
-define <2 x i32> @v_movi32a() nounwind {
-;CHECK: v_movi32a:
-;CHECK: vmov.i32
- ret <2 x i32> < i32 32, i32 32 >
-}
-
-; 0x2000 = 8192
-define <2 x i32> @v_movi32b() nounwind {
-;CHECK: v_movi32b:
-;CHECK: vmov.i32
- ret <2 x i32> < i32 8192, i32 8192 >
-}
-
-; 0x200000 = 2097152
-define <2 x i32> @v_movi32c() nounwind {
-;CHECK: v_movi32c:
-;CHECK: vmov.i32
- ret <2 x i32> < i32 2097152, i32 2097152 >
-}
-
-; 0x20000000 = 536870912
-define <2 x i32> @v_movi32d() nounwind {
-;CHECK: v_movi32d:
-;CHECK: vmov.i32
- ret <2 x i32> < i32 536870912, i32 536870912 >
-}
-
-; 0x20ff = 8447
-define <2 x i32> @v_movi32e() nounwind {
-;CHECK: v_movi32e:
-;CHECK: vmov.i32
- ret <2 x i32> < i32 8447, i32 8447 >
-}
-
-; 0x20ffff = 2162687
-define <2 x i32> @v_movi32f() nounwind {
-;CHECK: v_movi32f:
-;CHECK: vmov.i32
- ret <2 x i32> < i32 2162687, i32 2162687 >
-}
-
-; 0xff0000ff0000ffff = 18374687574888349695
-define <1 x i64> @v_movi64() nounwind {
-;CHECK: v_movi64:
-;CHECK: vmov.i64
- ret <1 x i64> < i64 18374687574888349695 >
-}
-
-define <16 x i8> @v_movQi8() nounwind {
-;CHECK: v_movQi8:
-;CHECK: vmov.i8
- ret <16 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
-}
-
-define <8 x i16> @v_movQi16a() nounwind {
-;CHECK: v_movQi16a:
-;CHECK: vmov.i16
- ret <8 x i16> < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
-}
-
-; 0x1000 = 4096
-define <8 x i16> @v_movQi16b() nounwind {
-;CHECK: v_movQi16b:
-;CHECK: vmov.i16
- ret <8 x i16> < i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096 >
-}
-
-define <4 x i32> @v_movQi32a() nounwind {
-;CHECK: v_movQi32a:
-;CHECK: vmov.i32
- ret <4 x i32> < i32 32, i32 32, i32 32, i32 32 >
-}
-
-; 0x2000 = 8192
-define <4 x i32> @v_movQi32b() nounwind {
-;CHECK: v_movQi32b:
-;CHECK: vmov.i32
- ret <4 x i32> < i32 8192, i32 8192, i32 8192, i32 8192 >
-}
-
-; 0x200000 = 2097152
-define <4 x i32> @v_movQi32c() nounwind {
-;CHECK: v_movQi32c:
-;CHECK: vmov.i32
- ret <4 x i32> < i32 2097152, i32 2097152, i32 2097152, i32 2097152 >
-}
-
-; 0x20000000 = 536870912
-define <4 x i32> @v_movQi32d() nounwind {
-;CHECK: v_movQi32d:
-;CHECK: vmov.i32
- ret <4 x i32> < i32 536870912, i32 536870912, i32 536870912, i32 536870912 >
-}
-
-; 0x20ff = 8447
-define <4 x i32> @v_movQi32e() nounwind {
-;CHECK: v_movQi32e:
-;CHECK: vmov.i32
- ret <4 x i32> < i32 8447, i32 8447, i32 8447, i32 8447 >
-}
-
-; 0x20ffff = 2162687
-define <4 x i32> @v_movQi32f() nounwind {
-;CHECK: v_movQi32f:
-;CHECK: vmov.i32
- ret <4 x i32> < i32 2162687, i32 2162687, i32 2162687, i32 2162687 >
-}
-
-; 0xff0000ff0000ffff = 18374687574888349695
-define <2 x i64> @v_movQi64() nounwind {
-;CHECK: v_movQi64:
-;CHECK: vmov.i64
- ret <2 x i64> < i64 18374687574888349695, i64 18374687574888349695 >
-}
-
-; Check for correct assembler printing for immediate values.
-%struct.int8x8_t = type { <8 x i8> }
-define arm_apcscc void @vdupn128(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
-entry:
-;CHECK: vdupn128:
-;CHECK: vmov.i8 d0, #0x80
- %0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
- store <8 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>, <8 x i8>* %0, align 8
- ret void
-}
-
-define arm_apcscc void @vdupnneg75(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
-entry:
-;CHECK: vdupnneg75:
-;CHECK: vmov.i8 d0, #0xB5
- %0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
- store <8 x i8> <i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75>, <8 x i8>* %0, align 8
- ret void
-}
-
-define <8 x i16> @vmovls8(<8 x i8>* %A) nounwind {
-;CHECK: vmovls8:
-;CHECK: vmovl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vmovls.v8i16(<8 x i8> %tmp1)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vmovls16(<4 x i16>* %A) nounwind {
-;CHECK: vmovls16:
-;CHECK: vmovl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vmovls32(<2 x i32>* %A) nounwind {
-;CHECK: vmovls32:
-;CHECK: vmovl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vmovls.v2i64(<2 x i32> %tmp1)
- ret <2 x i64> %tmp2
-}
-
-define <8 x i16> @vmovlu8(<8 x i8>* %A) nounwind {
-;CHECK: vmovlu8:
-;CHECK: vmovl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vmovlu.v8i16(<8 x i8> %tmp1)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vmovlu16(<4 x i16>* %A) nounwind {
-;CHECK: vmovlu16:
-;CHECK: vmovl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vmovlu.v4i32(<4 x i16> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vmovlu32(<2 x i32>* %A) nounwind {
-;CHECK: vmovlu32:
-;CHECK: vmovl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vmovlu.v2i64(<2 x i32> %tmp1)
- ret <2 x i64> %tmp2
-}
-
-declare <8 x i16> @llvm.arm.neon.vmovls.v8i16(<8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmovls.v2i64(<2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmovlu.v8i16(<8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmovlu.v4i32(<4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmovlu.v2i64(<2 x i32>) nounwind readnone
-
-define <8 x i8> @vmovni16(<8 x i16>* %A) nounwind {
-;CHECK: vmovni16:
-;CHECK: vmovn.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vmovn.v8i8(<8 x i16> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vmovni32(<4 x i32>* %A) nounwind {
-;CHECK: vmovni32:
-;CHECK: vmovn.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vmovn.v4i16(<4 x i32> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vmovni64(<2 x i64>* %A) nounwind {
-;CHECK: vmovni64:
-;CHECK: vmovn.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vmovn.v2i32(<2 x i64> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vmovn.v8i8(<8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vmovn.v4i16(<4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vmovn.v2i32(<2 x i64>) nounwind readnone
-
-define <8 x i8> @vqmovns16(<8 x i16>* %A) nounwind {
-;CHECK: vqmovns16:
-;CHECK: vqmovn.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqmovns32(<4 x i32>* %A) nounwind {
-;CHECK: vqmovns32:
-;CHECK: vqmovn.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqmovns64(<2 x i64>* %A) nounwind {
-;CHECK: vqmovns64:
-;CHECK: vqmovn.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <8 x i8> @vqmovnu16(<8 x i16>* %A) nounwind {
-;CHECK: vqmovnu16:
-;CHECK: vqmovn.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqmovnu32(<4 x i32>* %A) nounwind {
-;CHECK: vqmovnu32:
-;CHECK: vqmovn.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqmovnu64(<2 x i64>* %A) nounwind {
-;CHECK: vqmovnu64:
-;CHECK: vqmovn.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <8 x i8> @vqmovuns16(<8 x i16>* %A) nounwind {
-;CHECK: vqmovuns16:
-;CHECK: vqmovun.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqmovuns32(<4 x i32>* %A) nounwind {
-;CHECK: vqmovuns32:
-;CHECK: vqmovun.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqmovuns64(<2 x i64>* %A) nounwind {
-;CHECK: vqmovuns64:
-;CHECK: vqmovun.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqmovns.v2i32(<2 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqmovnu.v2i32(<2 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqmovnsu.v2i32(<2 x i64>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vmul.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vmul.ll
deleted file mode 100644
index 325da5d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vmul.ll
+++ /dev/null
@@ -1,257 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vmuli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmuli8:
-;CHECK: vmul.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = mul <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vmuli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmuli16:
-;CHECK: vmul.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = mul <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vmuli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmuli32:
-;CHECK: vmul.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = mul <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
-}
-
-define <2 x float> @vmulf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vmulf32:
-;CHECK: vmul.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = mul <2 x float> %tmp1, %tmp2
- ret <2 x float> %tmp3
-}
-
-define <8 x i8> @vmulp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmulp8:
-;CHECK: vmul.p8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <16 x i8> @vmulQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vmulQi8:
-;CHECK: vmul.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = mul <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vmulQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vmulQi16:
-;CHECK: vmul.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = mul <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vmulQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vmulQi32:
-;CHECK: vmul.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = mul <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
-}
-
-define <4 x float> @vmulQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vmulQf32:
-;CHECK: vmul.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = mul <4 x float> %tmp1, %tmp2
- ret <4 x float> %tmp3
-}
-
-define <16 x i8> @vmulQp8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vmulQp8:
-;CHECK: vmul.p8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-
-define arm_aapcs_vfpcc <2 x float> @test_vmul_lanef32(<2 x float> %arg0_float32x2_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmul_lanef32:
-; CHECK: vmul.f32 d0, d0, d1[0]
- %0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <2 x i32> zeroinitializer ; <<2 x float>> [#uses=1]
- %1 = fmul <2 x float> %0, %arg0_float32x2_t ; <<2 x float>> [#uses=1]
- ret <2 x float> %1
-}
-
-define arm_aapcs_vfpcc <4 x i16> @test_vmul_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vmul_lanes16:
-; CHECK: vmul.i16 d0, d0, d1[1]
- %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses$
- %1 = mul <4 x i16> %0, %arg0_int16x4_t ; <<4 x i16>> [#uses=1]
- ret <4 x i16> %1
-}
-
-define arm_aapcs_vfpcc <2 x i32> @test_vmul_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmul_lanes32:
-; CHECK: vmul.i32 d0, d0, d1[1]
- %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = mul <2 x i32> %0, %arg0_int32x2_t ; <<2 x i32>> [#uses=1]
- ret <2 x i32> %1
-}
-
-define arm_aapcs_vfpcc <4 x float> @test_vmulQ_lanef32(<4 x float> %arg0_float32x4_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmulQ_lanef32:
-; CHECK: vmul.f32 q0, q0, d2[1]
- %0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>$
- %1 = fmul <4 x float> %0, %arg0_float32x4_t ; <<4 x float>> [#uses=1]
- ret <4 x float> %1
-}
-
-define arm_aapcs_vfpcc <8 x i16> @test_vmulQ_lanes16(<8 x i16> %arg0_int16x8_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vmulQ_lanes16:
-; CHECK: vmul.i16 q0, q0, d2[1]
- %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
- %1 = mul <8 x i16> %0, %arg0_int16x8_t ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %1
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vmulQ_lanes32(<4 x i32> %arg0_int32x4_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmulQ_lanes32:
-; CHECK: vmul.i32 q0, q0, d2[1]
- %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i32>> [#uses$
- %1 = mul <4 x i32> %0, %arg0_int32x4_t ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define <8 x i16> @vmulls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmulls8:
-;CHECK: vmull.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmulls16:
-;CHECK: vmull.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmulls32:
-;CHECK: vmull.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmullu8:
-;CHECK: vmull.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vmullu16:
-;CHECK: vmull.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vmullu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vmullu32:
-;CHECK: vmull.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i16> @vmullp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vmullp8:
-;CHECK: vmull.p8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vmull_lanes16
-; CHECK: vmull.s16 q0, d0, d1[1]
- %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %arg0_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <2 x i64> @test_vmull_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmull_lanes32
-; CHECK: vmull.s32 q0, d0, d1[1]
- %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_uint16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vmull_laneu16
-; CHECK: vmull.u16 q0, d0, d1[1]
- %0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %arg0_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <2 x i64> @test_vmull_laneu32(<2 x i32> %arg0_uint32x2_t, <2 x i32> %arg1_uint32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vmull_laneu32
-; CHECK: vmull.u32 q0, d0, d1[1]
- %0 = shufflevector <2 x i32> %arg1_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %arg0_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
-}
-
-declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vneg.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vneg.ll
deleted file mode 100644
index 7764e87..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vneg.ll
+++ /dev/null
@@ -1,121 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vnegs8(<8 x i8>* %A) nounwind {
-;CHECK: vnegs8:
-;CHECK: vneg.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = sub <8 x i8> zeroinitializer, %tmp1
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vnegs16(<4 x i16>* %A) nounwind {
-;CHECK: vnegs16:
-;CHECK: vneg.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = sub <4 x i16> zeroinitializer, %tmp1
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vnegs32(<2 x i32>* %A) nounwind {
-;CHECK: vnegs32:
-;CHECK: vneg.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = sub <2 x i32> zeroinitializer, %tmp1
- ret <2 x i32> %tmp2
-}
-
-define <2 x float> @vnegf32(<2 x float>* %A) nounwind {
-;CHECK: vnegf32:
-;CHECK: vneg.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = sub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1
- ret <2 x float> %tmp2
-}
-
-define <16 x i8> @vnegQs8(<16 x i8>* %A) nounwind {
-;CHECK: vnegQs8:
-;CHECK: vneg.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = sub <16 x i8> zeroinitializer, %tmp1
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vnegQs16(<8 x i16>* %A) nounwind {
-;CHECK: vnegQs16:
-;CHECK: vneg.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = sub <8 x i16> zeroinitializer, %tmp1
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vnegQs32(<4 x i32>* %A) nounwind {
-;CHECK: vnegQs32:
-;CHECK: vneg.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = sub <4 x i32> zeroinitializer, %tmp1
- ret <4 x i32> %tmp2
-}
-
-define <4 x float> @vnegQf32(<4 x float>* %A) nounwind {
-;CHECK: vnegQf32:
-;CHECK: vneg.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = sub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1
- ret <4 x float> %tmp2
-}
-
-define <8 x i8> @vqnegs8(<8 x i8>* %A) nounwind {
-;CHECK: vqnegs8:
-;CHECK: vqneg.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8> %tmp1)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqnegs16(<4 x i16>* %A) nounwind {
-;CHECK: vqnegs16:
-;CHECK: vqneg.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqnegs32(<2 x i32>* %A) nounwind {
-;CHECK: vqnegs32:
-;CHECK: vqneg.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <16 x i8> @vqnegQs8(<16 x i8>* %A) nounwind {
-;CHECK: vqnegQs8:
-;CHECK: vqneg.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8> %tmp1)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vqnegQs16(<8 x i16>* %A) nounwind {
-;CHECK: vqnegQs16:
-;CHECK: vqneg.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16> %tmp1)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vqnegQs32(<4 x i32>* %A) nounwind {
-;CHECK: vqnegQs32:
-;CHECK: vqneg.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vqneg.v8i8(<8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqneg.v4i16(<4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqneg.v2i32(<2 x i32>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqneg.v16i8(<16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqneg.v8i16(<8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqneg.v4i32(<4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vpadal.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vpadal.ll
deleted file mode 100644
index 7296e93..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vpadal.ll
+++ /dev/null
@@ -1,125 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <4 x i16> @vpadals8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpadals8:
-;CHECK: vpadal.s8
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vpadals16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpadals16:
-;CHECK: vpadal.s16
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vpadals32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpadals32:
-;CHECK: vpadal.s32
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <4 x i16> @vpadalu8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpadalu8:
-;CHECK: vpadal.u8
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vpadalu16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpadalu16:
-;CHECK: vpadal.u16
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vpadalu32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpadalu32:
-;CHECK: vpadal.u32
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <8 x i16> @vpadalQs8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vpadalQs8:
-;CHECK: vpadal.s8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vpadalQs16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vpadalQs16:
-;CHECK: vpadal.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vpadalQs32(<2 x i64>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vpadalQs32:
-;CHECK: vpadal.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i16> @vpadalQu8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vpadalQu8:
-;CHECK: vpadal.u8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vpadalQu16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vpadalQu16:
-;CHECK: vpadal.u16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vpadalQu32(<2 x i64>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vpadalQu32:
-;CHECK: vpadal.u32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-declare <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64>, <2 x i32>) nounwind readnone
-
-declare <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32>, <8 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32>, <8 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vpadd.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vpadd.ll
deleted file mode 100644
index 2125573..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vpadd.ll
+++ /dev/null
@@ -1,155 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vpaddi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpaddi8:
-;CHECK: vpadd.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vpaddi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpaddi16:
-;CHECK: vpadd.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vpaddi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpaddi32:
-;CHECK: vpadd.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <2 x float> @vpaddf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vpaddf32:
-;CHECK: vpadd.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vpadd.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vpadd.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float>, <2 x float>) nounwind readnone
-
-define <4 x i16> @vpaddls8(<8 x i8>* %A) nounwind {
-;CHECK: vpaddls8:
-;CHECK: vpaddl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vpaddls16(<4 x i16>* %A) nounwind {
-;CHECK: vpaddls16:
-;CHECK: vpaddl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vpaddls32(<2 x i32>* %A) nounwind {
-;CHECK: vpaddls32:
-;CHECK: vpaddl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32> %tmp1)
- ret <1 x i64> %tmp2
-}
-
-define <4 x i16> @vpaddlu8(<8 x i8>* %A) nounwind {
-;CHECK: vpaddlu8:
-;CHECK: vpaddl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8> %tmp1)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vpaddlu16(<4 x i16>* %A) nounwind {
-;CHECK: vpaddlu16:
-;CHECK: vpaddl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vpaddlu32(<2 x i32>* %A) nounwind {
-;CHECK: vpaddlu32:
-;CHECK: vpaddl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32> %tmp1)
- ret <1 x i64> %tmp2
-}
-
-define <8 x i16> @vpaddlQs8(<16 x i8>* %A) nounwind {
-;CHECK: vpaddlQs8:
-;CHECK: vpaddl.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8> %tmp1)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vpaddlQs16(<8 x i16>* %A) nounwind {
-;CHECK: vpaddlQs16:
-;CHECK: vpaddl.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vpaddlQs32(<4 x i32>* %A) nounwind {
-;CHECK: vpaddlQs32:
-;CHECK: vpaddl.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %tmp1)
- ret <2 x i64> %tmp2
-}
-
-define <8 x i16> @vpaddlQu8(<16 x i8>* %A) nounwind {
-;CHECK: vpaddlQu8:
-;CHECK: vpaddl.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8> %tmp1)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vpaddlQu16(<8 x i16>* %A) nounwind {
-;CHECK: vpaddlQu16:
-;CHECK: vpaddl.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vpaddlQu32(<4 x i32>* %A) nounwind {
-;CHECK: vpaddlQu32:
-;CHECK: vpaddl.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %tmp1)
- ret <2 x i64> %tmp2
-}
-
-declare <4 x i16> @llvm.arm.neon.vpaddls.v4i16.v8i8(<8 x i8>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vpaddls.v2i32.v4i16(<4 x i16>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vpaddls.v1i64.v2i32(<2 x i32>) nounwind readnone
-
-declare <4 x i16> @llvm.arm.neon.vpaddlu.v4i16.v8i8(<8 x i8>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vpaddlu.v2i32.v4i16(<4 x i16>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vpaddlu.v1i64.v2i32(<2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vpaddls.v8i16.v16i8(<16 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vpaddls.v4i32.v8i16(<8 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vpaddlu.v8i16.v16i8(<16 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vpaddlu.v4i32.v8i16(<8 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vpminmax.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vpminmax.ll
deleted file mode 100644
index b75bcc9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vpminmax.ll
+++ /dev/null
@@ -1,147 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vpmins8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpmins8:
-;CHECK: vpmin.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vpmins16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpmins16:
-;CHECK: vpmin.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vpmins32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpmins32:
-;CHECK: vpmin.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i8> @vpminu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpminu8:
-;CHECK: vpmin.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vpminu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpminu16:
-;CHECK: vpmin.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vpminu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpminu32:
-;CHECK: vpmin.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <2 x float> @vpminf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vpminf32:
-;CHECK: vpmin.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vpmins.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vpmins.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vpmins.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vpminu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vpminu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vpminu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <2 x float> @llvm.arm.neon.vpmins.v2f32(<2 x float>, <2 x float>) nounwind readnone
-
-define <8 x i8> @vpmaxs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpmaxs8:
-;CHECK: vpmax.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vpmaxs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpmaxs16:
-;CHECK: vpmax.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vpmaxs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpmaxs32:
-;CHECK: vpmax.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i8> @vpmaxu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vpmaxu8:
-;CHECK: vpmax.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vpmaxu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vpmaxu16:
-;CHECK: vpmax.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vpmaxu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vpmaxu32:
-;CHECK: vpmax.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <2 x float> @vpmaxf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vpmaxf32:
-;CHECK: vpmax.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vpmaxs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vpmaxs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vpmaxs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vpmaxu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vpmaxu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vpmaxu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <2 x float> @llvm.arm.neon.vpmaxs.v2f32(<2 x float>, <2 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vqadd.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vqadd.ll
deleted file mode 100644
index a1669b6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vqadd.ll
+++ /dev/null
@@ -1,165 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vqadds8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqadds8:
-;CHECK: vqadd.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vqadds16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqadds16:
-;CHECK: vqadd.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqadds32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqadds32:
-;CHECK: vqadd.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vqadds64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqadds64:
-;CHECK: vqadd.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <8 x i8> @vqaddu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqaddu8:
-;CHECK: vqadd.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vqaddu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqaddu16:
-;CHECK: vqadd.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqaddu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqaddu32:
-;CHECK: vqadd.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vqaddu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqaddu64:
-;CHECK: vqadd.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @vqaddQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqaddQs8:
-;CHECK: vqadd.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vqaddQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqaddQs16:
-;CHECK: vqadd.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqaddQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqaddQs32:
-;CHECK: vqadd.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vqaddQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqaddQs64:
-;CHECK: vqadd.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <16 x i8> @vqaddQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqaddQu8:
-;CHECK: vqadd.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vqaddQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqaddQu16:
-;CHECK: vqadd.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqaddQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqaddQu32:
-;CHECK: vqadd.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vqaddQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqaddQu64:
-;CHECK: vqadd.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vqadds.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vqadds.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqaddu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqaddu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqaddu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vqaddu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqadds.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqaddu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqaddu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqaddu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vqdmul.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vqdmul.ll
deleted file mode 100644
index 8dcc7f7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vqdmul.ll
+++ /dev/null
@@ -1,281 +0,0 @@
-; RUN: llc -mattr=+neon < %s | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv7-elf"
-
-define <4 x i16> @vqdmulhs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqdmulhs16:
-;CHECK: vqdmulh.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqdmulhs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqdmulhs32:
-;CHECK: vqdmulh.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i16> @vqdmulhQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqdmulhQs16:
-;CHECK: vqdmulh.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqdmulhQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqdmulhQs32:
-;CHECK: vqdmulh.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define arm_aapcs_vfpcc <8 x i16> @test_vqdmulhQ_lanes16(<8 x i16> %arg0_int16x8_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmulhQ_lanes16
-; CHECK: vqdmulh.s16 q0, q0, d2[1]
- %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> ; <<8 x i16>> [#uses=1]
- %1 = tail call <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16> %arg0_int16x8_t, <8 x i16> %0) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %1
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vqdmulhQ_lanes32(<4 x i32> %arg0_int32x4_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmulhQ_lanes32
-; CHECK: vqdmulh.s32 q0, q0, d2[1]
- %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i32>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i32> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <4 x i16> @test_vqdmulh_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmulh_lanes16
-; CHECK: vqdmulh.s16 d0, d0, d1[1]
- %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16> %arg0_int16x4_t, <4 x i16> %0) ; <<4 x i16>> [#uses=1]
- ret <4 x i16> %1
-}
-
-define arm_aapcs_vfpcc <2 x i32> @test_vqdmulh_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmulh_lanes32
-; CHECK: vqdmulh.s32 d0, d0, d1[1]
- %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i32>> [#uses=1]
- ret <2 x i32> %1
-}
-
-declare <4 x i16> @llvm.arm.neon.vqdmulh.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqdmulh.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vqdmulh.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqdmulh.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-define <4 x i16> @vqrdmulhs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqrdmulhs16:
-;CHECK: vqrdmulh.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqrdmulhs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqrdmulhs32:
-;CHECK: vqrdmulh.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <8 x i16> @vqrdmulhQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqrdmulhQs16:
-;CHECK: vqrdmulh.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqrdmulhQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqrdmulhQs32:
-;CHECK: vqrdmulh.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define arm_aapcs_vfpcc <8 x i16> @test_vqRdmulhQ_lanes16(<8 x i16> %arg0_int16x8_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vqRdmulhQ_lanes16
-; CHECK: vqrdmulh.s16 q0, q0, d2[1]
- %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> ; <<8 x i16>> [#uses=1]
- %1 = tail call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> %arg0_int16x8_t, <8 x i16> %0) ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %1
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vqRdmulhQ_lanes32(<4 x i32> %arg0_int32x4_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vqRdmulhQ_lanes32
-; CHECK: vqrdmulh.s32 q0, q0, d2[1]
- %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i32>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i32> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <4 x i16> @test_vqRdmulh_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vqRdmulh_lanes16
-; CHECK: vqrdmulh.s16 d0, d0, d1[1]
- %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> %arg0_int16x4_t, <4 x i16> %0) ; <<4 x i16>> [#uses=1]
- ret <4 x i16> %1
-}
-
-define arm_aapcs_vfpcc <2 x i32> @test_vqRdmulh_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vqRdmulh_lanes32
-; CHECK: vqrdmulh.s32 d0, d0, d1[1]
- %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i32>> [#uses=1]
- ret <2 x i32> %1
-}
-
-declare <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-
-define <4 x i32> @vqdmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqdmulls16:
-;CHECK: vqdmull.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vqdmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqdmulls32:
-;CHECK: vqdmull.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vqdmull_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmull_lanes16
-; CHECK: vqdmull.s16 q0, d0, d1[1]
- %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16> %arg0_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <2 x i64> @test_vqdmull_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmull_lanes32
-; CHECK: vqdmull.s32 q0, d0, d1[1]
- %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
-}
-
-declare <4 x i32> @llvm.arm.neon.vqdmull.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-define <4 x i32> @vqdmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vqdmlals16:
-;CHECK: vqdmlal.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmlal.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vqdmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vqdmlals32:
-;CHECK: vqdmlal.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vqdmlal_lanes16(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmlal_lanes16
-; CHECK: vqdmlal.s16 q0, d2, d3[1]
- %0 = shufflevector <4 x i16> %arg2_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vqdmlal.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <2 x i64> @test_vqdmlal_lanes32(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmlal_lanes32
-; CHECK: vqdmlal.s32 q0, d2, d3[1]
- %0 = shufflevector <2 x i32> %arg2_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
-}
-
-declare <4 x i32> @llvm.arm.neon.vqdmlal.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
-
-define <4 x i32> @vqdmlsls16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
-;CHECK: vqdmlsls16:
-;CHECK: vqdmlsl.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
- %tmp4 = call <4 x i32> @llvm.arm.neon.vqdmlsl.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vqdmlsls32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
-;CHECK: vqdmlsls32:
-;CHECK: vqdmlsl.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
- %tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
- ret <2 x i64> %tmp4
-}
-
-define arm_aapcs_vfpcc <4 x i32> @test_vqdmlsl_lanes16(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %arg2_int16x4_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmlsl_lanes16
-; CHECK: vqdmlsl.s16 q0, d2, d3[1]
- %0 = shufflevector <4 x i16> %arg2_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
- %1 = tail call <4 x i32> @llvm.arm.neon.vqdmlsl.v4i32(<4 x i32> %arg0_int32x4_t, <4 x i16> %arg1_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %1
-}
-
-define arm_aapcs_vfpcc <2 x i64> @test_vqdmlsl_lanes32(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %arg2_int32x2_t) nounwind readnone {
-entry:
-; CHECK: test_vqdmlsl_lanes32
-; CHECK: vqdmlsl.s32 q0, d2, d3[1]
- %0 = shufflevector <2 x i32> %arg2_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
- %1 = tail call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %1
-}
-
-declare <4 x i32> @llvm.arm.neon.vqdmlsl.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vqshl.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vqshl.ll
deleted file mode 100644
index e4d29a3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vqshl.ll
+++ /dev/null
@@ -1,531 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vqshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqshls8:
-;CHECK: vqshl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vqshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqshls16:
-;CHECK: vqshl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqshls32:
-;CHECK: vqshl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vqshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqshls64:
-;CHECK: vqshl.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <8 x i8> @vqshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqshlu8:
-;CHECK: vqshl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vqshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqshlu16:
-;CHECK: vqshl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqshlu32:
-;CHECK: vqshl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vqshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqshlu64:
-;CHECK: vqshl.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @vqshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqshlQs8:
-;CHECK: vqshl.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vqshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqshlQs16:
-;CHECK: vqshl.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqshlQs32:
-;CHECK: vqshl.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vqshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqshlQs64:
-;CHECK: vqshl.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <16 x i8> @vqshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqshlQu8:
-;CHECK: vqshl.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vqshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqshlQu16:
-;CHECK: vqshl.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqshlQu32:
-;CHECK: vqshl.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vqshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqshlQu64:
-;CHECK: vqshl.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i8> @vqshls_n8(<8 x i8>* %A) nounwind {
-;CHECK: vqshls_n8:
-;CHECK: vqshl.s8{{.*#7}}
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqshls_n16(<4 x i16>* %A) nounwind {
-;CHECK: vqshls_n16:
-;CHECK: vqshl.s16{{.*#15}}
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqshls_n32(<2 x i32>* %A) nounwind {
-;CHECK: vqshls_n32:
-;CHECK: vqshl.s32{{.*#31}}
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vqshls_n64(<1 x i64>* %A) nounwind {
-;CHECK: vqshls_n64:
-;CHECK: vqshl.s64{{.*#63}}
- %tmp1 = load <1 x i64>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
- ret <1 x i64> %tmp2
-}
-
-define <8 x i8> @vqshlu_n8(<8 x i8>* %A) nounwind {
-;CHECK: vqshlu_n8:
-;CHECK: vqshl.u8{{.*#7}}
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqshlu_n16(<4 x i16>* %A) nounwind {
-;CHECK: vqshlu_n16:
-;CHECK: vqshl.u16{{.*#15}}
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqshlu_n32(<2 x i32>* %A) nounwind {
-;CHECK: vqshlu_n32:
-;CHECK: vqshl.u32{{.*#31}}
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vqshlu_n64(<1 x i64>* %A) nounwind {
-;CHECK: vqshlu_n64:
-;CHECK: vqshl.u64{{.*#63}}
- %tmp1 = load <1 x i64>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
- ret <1 x i64> %tmp2
-}
-
-define <8 x i8> @vqshlsu_n8(<8 x i8>* %A) nounwind {
-;CHECK: vqshlsu_n8:
-;CHECK: vqshlu.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftsu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqshlsu_n16(<4 x i16>* %A) nounwind {
-;CHECK: vqshlsu_n16:
-;CHECK: vqshlu.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftsu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqshlsu_n32(<2 x i32>* %A) nounwind {
-;CHECK: vqshlsu_n32:
-;CHECK: vqshlu.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftsu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vqshlsu_n64(<1 x i64>* %A) nounwind {
-;CHECK: vqshlsu_n64:
-;CHECK: vqshlu.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftsu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
- ret <1 x i64> %tmp2
-}
-
-define <16 x i8> @vqshlQs_n8(<16 x i8>* %A) nounwind {
-;CHECK: vqshlQs_n8:
-;CHECK: vqshl.s8{{.*#7}}
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vqshlQs_n16(<8 x i16>* %A) nounwind {
-;CHECK: vqshlQs_n16:
-;CHECK: vqshl.s16{{.*#15}}
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vqshlQs_n32(<4 x i32>* %A) nounwind {
-;CHECK: vqshlQs_n32:
-;CHECK: vqshl.s32{{.*#31}}
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vqshlQs_n64(<2 x i64>* %A) nounwind {
-;CHECK: vqshlQs_n64:
-;CHECK: vqshl.s64{{.*#63}}
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
- ret <2 x i64> %tmp2
-}
-
-define <16 x i8> @vqshlQu_n8(<16 x i8>* %A) nounwind {
-;CHECK: vqshlQu_n8:
-;CHECK: vqshl.u8{{.*#7}}
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vqshlQu_n16(<8 x i16>* %A) nounwind {
-;CHECK: vqshlQu_n16:
-;CHECK: vqshl.u16{{.*#15}}
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vqshlQu_n32(<4 x i32>* %A) nounwind {
-;CHECK: vqshlQu_n32:
-;CHECK: vqshl.u32{{.*#31}}
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vqshlQu_n64(<2 x i64>* %A) nounwind {
-;CHECK: vqshlQu_n64:
-;CHECK: vqshl.u64{{.*#63}}
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
- ret <2 x i64> %tmp2
-}
-
-define <16 x i8> @vqshlQsu_n8(<16 x i8>* %A) nounwind {
-;CHECK: vqshlQsu_n8:
-;CHECK: vqshlu.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftsu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vqshlQsu_n16(<8 x i16>* %A) nounwind {
-;CHECK: vqshlQsu_n16:
-;CHECK: vqshlu.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vqshiftsu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vqshlQsu_n32(<4 x i32>* %A) nounwind {
-;CHECK: vqshlQsu_n32:
-;CHECK: vqshlu.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vqshiftsu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vqshlQsu_n64(<2 x i64>* %A) nounwind {
-;CHECK: vqshlQsu_n64:
-;CHECK: vqshlu.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
- ret <2 x i64> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqshiftsu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqshiftsu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqshiftsu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vqshiftsu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqshiftsu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqshiftsu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqshiftsu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-define <8 x i8> @vqrshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqrshls8:
-;CHECK: vqrshl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vqrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vqrshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqrshls16:
-;CHECK: vqrshl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqrshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqrshls32:
-;CHECK: vqrshl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vqrshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqrshls64:
-;CHECK: vqrshl.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <8 x i8> @vqrshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqrshlu8:
-;CHECK: vqrshl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vqrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vqrshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqrshlu16:
-;CHECK: vqrshl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqrshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqrshlu32:
-;CHECK: vqrshl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vqrshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqrshlu64:
-;CHECK: vqrshl.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @vqrshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqrshlQs8:
-;CHECK: vqrshl.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vqrshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqrshlQs16:
-;CHECK: vqrshl.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqrshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqrshlQs32:
-;CHECK: vqrshl.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vqrshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqrshlQs64:
-;CHECK: vqrshl.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <16 x i8> @vqrshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqrshlQu8:
-;CHECK: vqrshl.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vqrshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqrshlQu16:
-;CHECK: vqrshl.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqrshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqrshlQu32:
-;CHECK: vqrshl.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vqrshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqrshlQu64:
-;CHECK: vqrshl.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vqrshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqrshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqrshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqrshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqrshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqrshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqrshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqrshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqrshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vqshrn.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vqshrn.ll
deleted file mode 100644
index 5da7943..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vqshrn.ll
+++ /dev/null
@@ -1,169 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vqshrns8(<8 x i16>* %A) nounwind {
-;CHECK: vqshrns8:
-;CHECK: vqshrn.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqshrns16(<4 x i32>* %A) nounwind {
-;CHECK: vqshrns16:
-;CHECK: vqshrn.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqshrns32(<2 x i64>* %A) nounwind {
-;CHECK: vqshrns32:
-;CHECK: vqshrn.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
- ret <2 x i32> %tmp2
-}
-
-define <8 x i8> @vqshrnu8(<8 x i16>* %A) nounwind {
-;CHECK: vqshrnu8:
-;CHECK: vqshrn.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqshrnu16(<4 x i32>* %A) nounwind {
-;CHECK: vqshrnu16:
-;CHECK: vqshrn.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqshrnu32(<2 x i64>* %A) nounwind {
-;CHECK: vqshrnu32:
-;CHECK: vqshrn.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
- ret <2 x i32> %tmp2
-}
-
-define <8 x i8> @vqshruns8(<8 x i16>* %A) nounwind {
-;CHECK: vqshruns8:
-;CHECK: vqshrun.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqshruns16(<4 x i32>* %A) nounwind {
-;CHECK: vqshruns16:
-;CHECK: vqshrun.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqshruns32(<2 x i64>* %A) nounwind {
-;CHECK: vqshruns32:
-;CHECK: vqshrun.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
- ret <2 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-define <8 x i8> @vqrshrns8(<8 x i16>* %A) nounwind {
-;CHECK: vqrshrns8:
-;CHECK: vqrshrn.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqrshrns16(<4 x i32>* %A) nounwind {
-;CHECK: vqrshrns16:
-;CHECK: vqrshrn.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqrshrns32(<2 x i64>* %A) nounwind {
-;CHECK: vqrshrns32:
-;CHECK: vqrshrn.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
- ret <2 x i32> %tmp2
-}
-
-define <8 x i8> @vqrshrnu8(<8 x i16>* %A) nounwind {
-;CHECK: vqrshrnu8:
-;CHECK: vqrshrn.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqrshrnu16(<4 x i32>* %A) nounwind {
-;CHECK: vqrshrnu16:
-;CHECK: vqrshrn.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqrshrnu32(<2 x i64>* %A) nounwind {
-;CHECK: vqrshrnu32:
-;CHECK: vqrshrn.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
- ret <2 x i32> %tmp2
-}
-
-define <8 x i8> @vqrshruns8(<8 x i16>* %A) nounwind {
-;CHECK: vqrshruns8:
-;CHECK: vqrshrun.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vqrshruns16(<4 x i32>* %A) nounwind {
-;CHECK: vqrshruns16:
-;CHECK: vqrshrun.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vqrshruns32(<2 x i64>* %A) nounwind {
-;CHECK: vqrshruns32:
-;CHECK: vqrshrun.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
- ret <2 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqrshiftns.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqrshiftnu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqrshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqrshiftnsu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vqsub.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vqsub.ll
deleted file mode 100644
index 4231fca..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vqsub.ll
+++ /dev/null
@@ -1,165 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vqsubs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqsubs8:
-;CHECK: vqsub.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vqsubs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqsubs16:
-;CHECK: vqsub.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqsubs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqsubs32:
-;CHECK: vqsub.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vqsubs64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqsubs64:
-;CHECK: vqsub.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <8 x i8> @vqsubu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vqsubu8:
-;CHECK: vqsub.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vqsubu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vqsubu16:
-;CHECK: vqsub.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vqsubu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vqsubu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vqsubu32:
-;CHECK: vqsub.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vqsubu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vqsubu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vqsubu64:
-;CHECK: vqsub.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @vqsubQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqsubQs8:
-;CHECK: vqsub.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vqsubQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqsubQs16:
-;CHECK: vqsub.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqsubQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqsubQs32:
-;CHECK: vqsub.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vqsubQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqsubQs64:
-;CHECK: vqsub.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <16 x i8> @vqsubQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vqsubQu8:
-;CHECK: vqsub.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vqsubQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vqsubQu16:
-;CHECK: vqsub.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vqsubQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vqsubQu32:
-;CHECK: vqsub.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vqsubu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vqsubQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vqsubQu64:
-;CHECK: vqsub.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vqsubs.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vqsubs.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vqsubu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vqsubu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vqsubu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vqsubu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqsubs.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vqsubu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vqsubu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vqsubu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vrec.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vrec.ll
deleted file mode 100644
index 99989e9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vrec.ll
+++ /dev/null
@@ -1,119 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <2 x i32> @vrecpei32(<2 x i32>* %A) nounwind {
-;CHECK: vrecpei32:
-;CHECK: vrecpe.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <4 x i32> @vrecpeQi32(<4 x i32>* %A) nounwind {
-;CHECK: vrecpeQi32:
-;CHECK: vrecpe.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-define <2 x float> @vrecpef32(<2 x float>* %A) nounwind {
-;CHECK: vrecpef32:
-;CHECK: vrecpe.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = call <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float> %tmp1)
- ret <2 x float> %tmp2
-}
-
-define <4 x float> @vrecpeQf32(<4 x float>* %A) nounwind {
-;CHECK: vrecpeQf32:
-;CHECK: vrecpe.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = call <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float> %tmp1)
- ret <4 x float> %tmp2
-}
-
-declare <2 x i32> @llvm.arm.neon.vrecpe.v2i32(<2 x i32>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vrecpe.v4i32(<4 x i32>) nounwind readnone
-
-declare <2 x float> @llvm.arm.neon.vrecpe.v2f32(<2 x float>) nounwind readnone
-declare <4 x float> @llvm.arm.neon.vrecpe.v4f32(<4 x float>) nounwind readnone
-
-define <2 x float> @vrecpsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vrecpsf32:
-;CHECK: vrecps.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
-}
-
-define <4 x float> @vrecpsQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vrecpsQf32:
-;CHECK: vrecps.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = call <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
- ret <4 x float> %tmp3
-}
-
-declare <2 x float> @llvm.arm.neon.vrecps.v2f32(<2 x float>, <2 x float>) nounwind readnone
-declare <4 x float> @llvm.arm.neon.vrecps.v4f32(<4 x float>, <4 x float>) nounwind readnone
-
-define <2 x i32> @vrsqrtei32(<2 x i32>* %A) nounwind {
-;CHECK: vrsqrtei32:
-;CHECK: vrsqrte.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vrsqrte.v2i32(<2 x i32> %tmp1)
- ret <2 x i32> %tmp2
-}
-
-define <4 x i32> @vrsqrteQi32(<4 x i32>* %A) nounwind {
-;CHECK: vrsqrteQi32:
-;CHECK: vrsqrte.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vrsqrte.v4i32(<4 x i32> %tmp1)
- ret <4 x i32> %tmp2
-}
-
-define <2 x float> @vrsqrtef32(<2 x float>* %A) nounwind {
-;CHECK: vrsqrtef32:
-;CHECK: vrsqrte.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = call <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float> %tmp1)
- ret <2 x float> %tmp2
-}
-
-define <4 x float> @vrsqrteQf32(<4 x float>* %A) nounwind {
-;CHECK: vrsqrteQf32:
-;CHECK: vrsqrte.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %tmp1)
- ret <4 x float> %tmp2
-}
-
-declare <2 x i32> @llvm.arm.neon.vrsqrte.v2i32(<2 x i32>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vrsqrte.v4i32(<4 x i32>) nounwind readnone
-
-declare <2 x float> @llvm.arm.neon.vrsqrte.v2f32(<2 x float>) nounwind readnone
-declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
-
-define <2 x float> @vrsqrtsf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vrsqrtsf32:
-;CHECK: vrsqrts.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = call <2 x float> @llvm.arm.neon.vrsqrts.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
- ret <2 x float> %tmp3
-}
-
-define <4 x float> @vrsqrtsQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vrsqrtsQf32:
-;CHECK: vrsqrts.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = call <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
- ret <4 x float> %tmp3
-}
-
-declare <2 x float> @llvm.arm.neon.vrsqrts.v2f32(<2 x float>, <2 x float>) nounwind readnone
-declare <4 x float> @llvm.arm.neon.vrsqrts.v4f32(<4 x float>, <4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vrev.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vrev.ll
deleted file mode 100644
index f0a04a4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vrev.ll
+++ /dev/null
@@ -1,113 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define arm_apcscc <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
-;CHECK: test_vrev64D8:
-;CHECK: vrev64.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
- ret <8 x i8> %tmp2
-}
-
-define arm_apcscc <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
-;CHECK: test_vrev64D16:
-;CHECK: vrev64.16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- ret <4 x i16> %tmp2
-}
-
-define arm_apcscc <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
-;CHECK: test_vrev64D32:
-;CHECK: vrev64.32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
- ret <2 x i32> %tmp2
-}
-
-define arm_apcscc <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
-;CHECK: test_vrev64Df:
-;CHECK: vrev64.32
- %tmp1 = load <2 x float>* %A
- %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
- ret <2 x float> %tmp2
-}
-
-define arm_apcscc <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
-;CHECK: test_vrev64Q8:
-;CHECK: vrev64.8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
- ret <16 x i8> %tmp2
-}
-
-define arm_apcscc <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
-;CHECK: test_vrev64Q16:
-;CHECK: vrev64.16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
- ret <8 x i16> %tmp2
-}
-
-define arm_apcscc <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
-;CHECK: test_vrev64Q32:
-;CHECK: vrev64.32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
- ret <4 x i32> %tmp2
-}
-
-define arm_apcscc <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
-;CHECK: test_vrev64Qf:
-;CHECK: vrev64.32
- %tmp1 = load <4 x float>* %A
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
- ret <4 x float> %tmp2
-}
-
-define arm_apcscc <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
-;CHECK: test_vrev32D8:
-;CHECK: vrev32.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
- ret <8 x i8> %tmp2
-}
-
-define arm_apcscc <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
-;CHECK: test_vrev32D16:
-;CHECK: vrev32.16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
- ret <4 x i16> %tmp2
-}
-
-define arm_apcscc <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
-;CHECK: test_vrev32Q8:
-;CHECK: vrev32.8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
- ret <16 x i8> %tmp2
-}
-
-define arm_apcscc <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
-;CHECK: test_vrev32Q16:
-;CHECK: vrev32.16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
- ret <8 x i16> %tmp2
-}
-
-define arm_apcscc <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
-;CHECK: test_vrev16D8:
-;CHECK: vrev16.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
- ret <8 x i8> %tmp2
-}
-
-define arm_apcscc <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
-;CHECK: test_vrev16Q8:
-;CHECK: vrev16.8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
- ret <16 x i8> %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vshift.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vshift.ll
deleted file mode 100644
index f3cbec7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vshift.ll
+++ /dev/null
@@ -1,432 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vshls8:
-;CHECK: vshl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = shl <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vshls16:
-;CHECK: vshl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = shl <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vshls32:
-;CHECK: vshl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = shl <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vshls64:
-;CHECK: vshl.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = shl <1 x i64> %tmp1, %tmp2
- ret <1 x i64> %tmp3
-}
-
-define <8 x i8> @vshli8(<8 x i8>* %A) nounwind {
-;CHECK: vshli8:
-;CHECK: vshl.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vshli16(<4 x i16>* %A) nounwind {
-;CHECK: vshli16:
-;CHECK: vshl.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = shl <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15 >
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vshli32(<2 x i32>* %A) nounwind {
-;CHECK: vshli32:
-;CHECK: vshl.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = shl <2 x i32> %tmp1, < i32 31, i32 31 >
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vshli64(<1 x i64>* %A) nounwind {
-;CHECK: vshli64:
-;CHECK: vshl.i64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = shl <1 x i64> %tmp1, < i64 63 >
- ret <1 x i64> %tmp2
-}
-
-define <16 x i8> @vshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vshlQs8:
-;CHECK: vshl.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = shl <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vshlQs16:
-;CHECK: vshl.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = shl <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vshlQs32:
-;CHECK: vshl.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = shl <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vshlQs64:
-;CHECK: vshl.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = shl <2 x i64> %tmp1, %tmp2
- ret <2 x i64> %tmp3
-}
-
-define <16 x i8> @vshlQi8(<16 x i8>* %A) nounwind {
-;CHECK: vshlQi8:
-;CHECK: vshl.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = shl <16 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vshlQi16(<8 x i16>* %A) nounwind {
-;CHECK: vshlQi16:
-;CHECK: vshl.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = shl <8 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vshlQi32(<4 x i32>* %A) nounwind {
-;CHECK: vshlQi32:
-;CHECK: vshl.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = shl <4 x i32> %tmp1, < i32 31, i32 31, i32 31, i32 31 >
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vshlQi64(<2 x i64>* %A) nounwind {
-;CHECK: vshlQi64:
-;CHECK: vshl.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = shl <2 x i64> %tmp1, < i64 63, i64 63 >
- ret <2 x i64> %tmp2
-}
-
-define <8 x i8> @vlshru8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vlshru8:
-;CHECK: vneg.s8
-;CHECK: vshl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = lshr <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vlshru16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vlshru16:
-;CHECK: vneg.s16
-;CHECK: vshl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = lshr <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vlshru32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vlshru32:
-;CHECK: vneg.s32
-;CHECK: vshl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = lshr <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vlshru64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vlshru64:
-;CHECK: vsub.i64
-;CHECK: vshl.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = lshr <1 x i64> %tmp1, %tmp2
- ret <1 x i64> %tmp3
-}
-
-define <8 x i8> @vlshri8(<8 x i8>* %A) nounwind {
-;CHECK: vlshri8:
-;CHECK: vshr.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = lshr <8 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vlshri16(<4 x i16>* %A) nounwind {
-;CHECK: vlshri16:
-;CHECK: vshr.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = lshr <4 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16 >
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vlshri32(<2 x i32>* %A) nounwind {
-;CHECK: vlshri32:
-;CHECK: vshr.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = lshr <2 x i32> %tmp1, < i32 32, i32 32 >
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vlshri64(<1 x i64>* %A) nounwind {
-;CHECK: vlshri64:
-;CHECK: vshr.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = lshr <1 x i64> %tmp1, < i64 64 >
- ret <1 x i64> %tmp2
-}
-
-define <16 x i8> @vlshrQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vlshrQu8:
-;CHECK: vneg.s8
-;CHECK: vshl.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = lshr <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vlshrQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vlshrQu16:
-;CHECK: vneg.s16
-;CHECK: vshl.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = lshr <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vlshrQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vlshrQu32:
-;CHECK: vneg.s32
-;CHECK: vshl.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = lshr <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vlshrQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vlshrQu64:
-;CHECK: vsub.i64
-;CHECK: vshl.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = lshr <2 x i64> %tmp1, %tmp2
- ret <2 x i64> %tmp3
-}
-
-define <16 x i8> @vlshrQi8(<16 x i8>* %A) nounwind {
-;CHECK: vlshrQi8:
-;CHECK: vshr.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = lshr <16 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vlshrQi16(<8 x i16>* %A) nounwind {
-;CHECK: vlshrQi16:
-;CHECK: vshr.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = lshr <8 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vlshrQi32(<4 x i32>* %A) nounwind {
-;CHECK: vlshrQi32:
-;CHECK: vshr.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = lshr <4 x i32> %tmp1, < i32 32, i32 32, i32 32, i32 32 >
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vlshrQi64(<2 x i64>* %A) nounwind {
-;CHECK: vlshrQi64:
-;CHECK: vshr.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = lshr <2 x i64> %tmp1, < i64 64, i64 64 >
- ret <2 x i64> %tmp2
-}
-
-; Example that requires splitting and expanding a vector shift.
-define <2 x i64> @update(<2 x i64> %val) nounwind readnone {
-entry:
- %shr = lshr <2 x i64> %val, < i64 2, i64 2 > ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %shr
-}
-
-define <8 x i8> @vashrs8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vashrs8:
-;CHECK: vneg.s8
-;CHECK: vshl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = ashr <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vashrs16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vashrs16:
-;CHECK: vneg.s16
-;CHECK: vshl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = ashr <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vashrs32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vashrs32:
-;CHECK: vneg.s32
-;CHECK: vshl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = ashr <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vashrs64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vashrs64:
-;CHECK: vsub.i64
-;CHECK: vshl.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = ashr <1 x i64> %tmp1, %tmp2
- ret <1 x i64> %tmp3
-}
-
-define <8 x i8> @vashri8(<8 x i8>* %A) nounwind {
-;CHECK: vashri8:
-;CHECK: vshr.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = ashr <8 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vashri16(<4 x i16>* %A) nounwind {
-;CHECK: vashri16:
-;CHECK: vshr.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = ashr <4 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16 >
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vashri32(<2 x i32>* %A) nounwind {
-;CHECK: vashri32:
-;CHECK: vshr.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = ashr <2 x i32> %tmp1, < i32 32, i32 32 >
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vashri64(<1 x i64>* %A) nounwind {
-;CHECK: vashri64:
-;CHECK: vshr.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = ashr <1 x i64> %tmp1, < i64 64 >
- ret <1 x i64> %tmp2
-}
-
-define <16 x i8> @vashrQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vashrQs8:
-;CHECK: vneg.s8
-;CHECK: vshl.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = ashr <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vashrQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vashrQs16:
-;CHECK: vneg.s16
-;CHECK: vshl.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = ashr <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vashrQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vashrQs32:
-;CHECK: vneg.s32
-;CHECK: vshl.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = ashr <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vashrQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vashrQs64:
-;CHECK: vsub.i64
-;CHECK: vshl.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = ashr <2 x i64> %tmp1, %tmp2
- ret <2 x i64> %tmp3
-}
-
-define <16 x i8> @vashrQi8(<16 x i8>* %A) nounwind {
-;CHECK: vashrQi8:
-;CHECK: vshr.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = ashr <16 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vashrQi16(<8 x i16>* %A) nounwind {
-;CHECK: vashrQi16:
-;CHECK: vshr.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = ashr <8 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vashrQi32(<4 x i32>* %A) nounwind {
-;CHECK: vashrQi32:
-;CHECK: vshr.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = ashr <4 x i32> %tmp1, < i32 32, i32 32, i32 32, i32 32 >
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vashrQi64(<2 x i64>* %A) nounwind {
-;CHECK: vashrQi64:
-;CHECK: vshr.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = ashr <2 x i64> %tmp1, < i64 64, i64 64 >
- ret <2 x i64> %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vshiftins.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vshiftins.ll
deleted file mode 100644
index 3a4f857..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vshiftins.ll
+++ /dev/null
@@ -1,155 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vsli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vsli8:
-;CHECK: vsli.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vsli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vsli16:
-;CHECK: vsli.16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vsli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vsli32:
-;CHECK: vsli.32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 31, i32 31 >)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vsli64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vsli64:
-;CHECK: vsli.64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 63 >)
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @vsliQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vsliQ8:
-;CHECK: vsli.8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vsliQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vsliQ16:
-;CHECK: vsli.16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vsliQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vsliQ32:
-;CHECK: vsli.32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vsliQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vsliQ64:
-;CHECK: vsli.64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 63, i64 63 >)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i8> @vsri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vsri8:
-;CHECK: vsri.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vsri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vsri16:
-;CHECK: vsri.16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vsri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vsri32:
-;CHECK: vsri.32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vsri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vsri64:
-;CHECK: vsri.64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 -64 >)
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @vsriQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vsriQ8:
-;CHECK: vsri.8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vsriQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vsriQ16:
-;CHECK: vsri.16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vsriQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vsriQ32:
-;CHECK: vsri.32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vsriQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vsriQ64:
-;CHECK: vsri.64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
- ret <2 x i64> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vshl.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vshl.ll
deleted file mode 100644
index 818e71b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vshl.ll
+++ /dev/null
@@ -1,654 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vshls8:
-;CHECK: vshl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vshls16:
-;CHECK: vshl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vshls32:
-;CHECK: vshl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vshls64:
-;CHECK: vshl.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <8 x i8> @vshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vshlu8:
-;CHECK: vshl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vshlu16:
-;CHECK: vshl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vshlu32:
-;CHECK: vshl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vshlu64:
-;CHECK: vshl.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @vshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vshlQs8:
-;CHECK: vshl.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vshlQs16:
-;CHECK: vshl.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vshlQs32:
-;CHECK: vshl.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vshlQs64:
-;CHECK: vshl.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <16 x i8> @vshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vshlQu8:
-;CHECK: vshl.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vshlQu16:
-;CHECK: vshl.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vshlQu32:
-;CHECK: vshl.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vshlQu64:
-;CHECK: vshl.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-; For left shifts by immediates, the signedness is irrelevant.
-; Test a mix of both signed and unsigned intrinsics.
-
-define <8 x i8> @vshli8(<8 x i8>* %A) nounwind {
-;CHECK: vshli8:
-;CHECK: vshl.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vshli16(<4 x i16>* %A) nounwind {
-;CHECK: vshli16:
-;CHECK: vshl.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vshli32(<2 x i32>* %A) nounwind {
-;CHECK: vshli32:
-;CHECK: vshl.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vshli64(<1 x i64>* %A) nounwind {
-;CHECK: vshli64:
-;CHECK: vshl.i64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
- ret <1 x i64> %tmp2
-}
-
-define <16 x i8> @vshlQi8(<16 x i8>* %A) nounwind {
-;CHECK: vshlQi8:
-;CHECK: vshl.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vshlQi16(<8 x i16>* %A) nounwind {
-;CHECK: vshlQi16:
-;CHECK: vshl.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vshlQi32(<4 x i32>* %A) nounwind {
-;CHECK: vshlQi32:
-;CHECK: vshl.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vshlQi64(<2 x i64>* %A) nounwind {
-;CHECK: vshlQi64:
-;CHECK: vshl.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
- ret <2 x i64> %tmp2
-}
-
-; Right shift by immediate:
-
-define <8 x i8> @vshrs8(<8 x i8>* %A) nounwind {
-;CHECK: vshrs8:
-;CHECK: vshr.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vshrs16(<4 x i16>* %A) nounwind {
-;CHECK: vshrs16:
-;CHECK: vshr.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vshrs32(<2 x i32>* %A) nounwind {
-;CHECK: vshrs32:
-;CHECK: vshr.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vshrs64(<1 x i64>* %A) nounwind {
-;CHECK: vshrs64:
-;CHECK: vshr.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
- ret <1 x i64> %tmp2
-}
-
-define <8 x i8> @vshru8(<8 x i8>* %A) nounwind {
-;CHECK: vshru8:
-;CHECK: vshr.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vshru16(<4 x i16>* %A) nounwind {
-;CHECK: vshru16:
-;CHECK: vshr.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vshru32(<2 x i32>* %A) nounwind {
-;CHECK: vshru32:
-;CHECK: vshr.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vshru64(<1 x i64>* %A) nounwind {
-;CHECK: vshru64:
-;CHECK: vshr.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
- ret <1 x i64> %tmp2
-}
-
-define <16 x i8> @vshrQs8(<16 x i8>* %A) nounwind {
-;CHECK: vshrQs8:
-;CHECK: vshr.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vshrQs16(<8 x i16>* %A) nounwind {
-;CHECK: vshrQs16:
-;CHECK: vshr.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vshrQs32(<4 x i32>* %A) nounwind {
-;CHECK: vshrQs32:
-;CHECK: vshr.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vshrQs64(<2 x i64>* %A) nounwind {
-;CHECK: vshrQs64:
-;CHECK: vshr.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
- ret <2 x i64> %tmp2
-}
-
-define <16 x i8> @vshrQu8(<16 x i8>* %A) nounwind {
-;CHECK: vshrQu8:
-;CHECK: vshr.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vshrQu16(<8 x i16>* %A) nounwind {
-;CHECK: vshrQu16:
-;CHECK: vshr.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vshrQu32(<4 x i32>* %A) nounwind {
-;CHECK: vshrQu32:
-;CHECK: vshr.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vshrQu64(<2 x i64>* %A) nounwind {
-;CHECK: vshrQu64:
-;CHECK: vshr.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
- ret <2 x i64> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-define <8 x i8> @vrshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vrshls8:
-;CHECK: vrshl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vrshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vrshls16:
-;CHECK: vrshl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vrshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vrshls32:
-;CHECK: vrshl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vrshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vrshls64:
-;CHECK: vrshl.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <8 x i8> @vrshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vrshlu8:
-;CHECK: vrshl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vrshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vrshlu16:
-;CHECK: vrshl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vrshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vrshlu32:
-;CHECK: vrshl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vrshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vrshlu64:
-;CHECK: vrshl.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
- ret <1 x i64> %tmp3
-}
-
-define <16 x i8> @vrshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vrshlQs8:
-;CHECK: vrshl.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vrshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vrshlQs16:
-;CHECK: vrshl.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vrshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vrshlQs32:
-;CHECK: vrshl.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vrshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vrshlQs64:
-;CHECK: vrshl.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <16 x i8> @vrshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vrshlQu8:
-;CHECK: vrshl.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vrshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vrshlQu16:
-;CHECK: vrshl.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vrshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vrshlQu32:
-;CHECK: vrshl.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vrshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vrshlQu64:
-;CHECK: vrshl.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i8> @vrshrs8(<8 x i8>* %A) nounwind {
-;CHECK: vrshrs8:
-;CHECK: vrshr.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vrshrs16(<4 x i16>* %A) nounwind {
-;CHECK: vrshrs16:
-;CHECK: vrshr.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vrshrs32(<2 x i32>* %A) nounwind {
-;CHECK: vrshrs32:
-;CHECK: vrshr.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vrshrs64(<1 x i64>* %A) nounwind {
-;CHECK: vrshrs64:
-;CHECK: vrshr.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
- ret <1 x i64> %tmp2
-}
-
-define <8 x i8> @vrshru8(<8 x i8>* %A) nounwind {
-;CHECK: vrshru8:
-;CHECK: vrshr.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vrshru16(<4 x i16>* %A) nounwind {
-;CHECK: vrshru16:
-;CHECK: vrshr.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vrshru32(<2 x i32>* %A) nounwind {
-;CHECK: vrshru32:
-;CHECK: vrshr.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
- ret <2 x i32> %tmp2
-}
-
-define <1 x i64> @vrshru64(<1 x i64>* %A) nounwind {
-;CHECK: vrshru64:
-;CHECK: vrshr.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
- ret <1 x i64> %tmp2
-}
-
-define <16 x i8> @vrshrQs8(<16 x i8>* %A) nounwind {
-;CHECK: vrshrQs8:
-;CHECK: vrshr.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vrshrQs16(<8 x i16>* %A) nounwind {
-;CHECK: vrshrQs16:
-;CHECK: vrshr.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vrshrQs32(<4 x i32>* %A) nounwind {
-;CHECK: vrshrQs32:
-;CHECK: vrshr.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vrshrQs64(<2 x i64>* %A) nounwind {
-;CHECK: vrshrQs64:
-;CHECK: vrshr.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
- ret <2 x i64> %tmp2
-}
-
-define <16 x i8> @vrshrQu8(<16 x i8>* %A) nounwind {
-;CHECK: vrshrQu8:
-;CHECK: vrshr.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- ret <16 x i8> %tmp2
-}
-
-define <8 x i16> @vrshrQu16(<8 x i16>* %A) nounwind {
-;CHECK: vrshrQu16:
-;CHECK: vrshr.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vrshrQu32(<4 x i32>* %A) nounwind {
-;CHECK: vrshrQu32:
-;CHECK: vrshr.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vrshrQu64(<2 x i64>* %A) nounwind {
-;CHECK: vrshrQu64:
-;CHECK: vrshr.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
- ret <2 x i64> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vshll.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vshll.ll
deleted file mode 100644
index 8e85b98..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vshll.ll
+++ /dev/null
@@ -1,83 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i16> @vshlls8(<8 x i8>* %A) nounwind {
-;CHECK: vshlls8:
-;CHECK: vshll.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vshlls16(<4 x i16>* %A) nounwind {
-;CHECK: vshlls16:
-;CHECK: vshll.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vshlls32(<2 x i32>* %A) nounwind {
-;CHECK: vshlls32:
-;CHECK: vshll.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
- ret <2 x i64> %tmp2
-}
-
-define <8 x i16> @vshllu8(<8 x i8>* %A) nounwind {
-;CHECK: vshllu8:
-;CHECK: vshll.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vshllu16(<4 x i16>* %A) nounwind {
-;CHECK: vshllu16:
-;CHECK: vshll.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind {
-;CHECK: vshllu32:
-;CHECK: vshll.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
- ret <2 x i64> %tmp2
-}
-
-; The following tests use the maximum shift count, so the signedness is
-; irrelevant. Test both signed and unsigned versions.
-define <8 x i16> @vshlli8(<8 x i8>* %A) nounwind {
-;CHECK: vshlli8:
-;CHECK: vshll.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >)
- ret <8 x i16> %tmp2
-}
-
-define <4 x i32> @vshlli16(<4 x i16>* %A) nounwind {
-;CHECK: vshlli16:
-;CHECK: vshll.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 16, i16 16, i16 16, i16 16 >)
- ret <4 x i32> %tmp2
-}
-
-define <2 x i64> @vshlli32(<2 x i32>* %A) nounwind {
-;CHECK: vshlli32:
-;CHECK: vshll.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 32, i32 32 >)
- ret <2 x i64> %tmp2
-}
-
-declare <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vshrn.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vshrn.ll
deleted file mode 100644
index e2544f4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vshrn.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vshrns8(<8 x i16>* %A) nounwind {
-;CHECK: vshrns8:
-;CHECK: vshrn.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vshrns16(<4 x i32>* %A) nounwind {
-;CHECK: vshrns16:
-;CHECK: vshrn.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vshrns32(<2 x i64>* %A) nounwind {
-;CHECK: vshrns32:
-;CHECK: vshrn.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vshiftn.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
- ret <2 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vshiftn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-define <8 x i8> @vrshrns8(<8 x i16>* %A) nounwind {
-;CHECK: vrshrns8:
-;CHECK: vrshrn.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftn.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
- ret <8 x i8> %tmp2
-}
-
-define <4 x i16> @vrshrns16(<4 x i32>* %A) nounwind {
-;CHECK: vrshrns16:
-;CHECK: vrshrn.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftn.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
- ret <4 x i16> %tmp2
-}
-
-define <2 x i32> @vrshrns32(<2 x i64>* %A) nounwind {
-;CHECK: vrshrns32:
-;CHECK: vrshrn.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftn.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
- ret <2 x i32> %tmp2
-}
-
-declare <8 x i8> @llvm.arm.neon.vrshiftn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vrshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vrshiftn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vsra.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vsra.ll
deleted file mode 100644
index acb672d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vsra.ll
+++ /dev/null
@@ -1,341 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vsras8:
-;CHECK: vsra.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = ashr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
- %tmp4 = add <8 x i8> %tmp1, %tmp3
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vsras16:
-;CHECK: vsra.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = ashr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 >
- %tmp4 = add <4 x i16> %tmp1, %tmp3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vsras32:
-;CHECK: vsra.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = ashr <2 x i32> %tmp2, < i32 32, i32 32 >
- %tmp4 = add <2 x i32> %tmp1, %tmp3
- ret <2 x i32> %tmp4
-}
-
-define <1 x i64> @vsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vsras64:
-;CHECK: vsra.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = ashr <1 x i64> %tmp2, < i64 64 >
- %tmp4 = add <1 x i64> %tmp1, %tmp3
- ret <1 x i64> %tmp4
-}
-
-define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vsraQs8:
-;CHECK: vsra.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = ashr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
- %tmp4 = add <16 x i8> %tmp1, %tmp3
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vsraQs16:
-;CHECK: vsra.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = ashr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
- %tmp4 = add <8 x i16> %tmp1, %tmp3
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vsraQs32:
-;CHECK: vsra.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = ashr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 >
- %tmp4 = add <4 x i32> %tmp1, %tmp3
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vsraQs64:
-;CHECK: vsra.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = ashr <2 x i64> %tmp2, < i64 64, i64 64 >
- %tmp4 = add <2 x i64> %tmp1, %tmp3
- ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vsrau8:
-;CHECK: vsra.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = lshr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
- %tmp4 = add <8 x i8> %tmp1, %tmp3
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vsrau16:
-;CHECK: vsra.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = lshr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 >
- %tmp4 = add <4 x i16> %tmp1, %tmp3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vsrau32:
-;CHECK: vsra.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = lshr <2 x i32> %tmp2, < i32 32, i32 32 >
- %tmp4 = add <2 x i32> %tmp1, %tmp3
- ret <2 x i32> %tmp4
-}
-
-define <1 x i64> @vsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vsrau64:
-;CHECK: vsra.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = lshr <1 x i64> %tmp2, < i64 64 >
- %tmp4 = add <1 x i64> %tmp1, %tmp3
- ret <1 x i64> %tmp4
-}
-
-define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vsraQu8:
-;CHECK: vsra.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = lshr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
- %tmp4 = add <16 x i8> %tmp1, %tmp3
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vsraQu16:
-;CHECK: vsra.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = lshr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
- %tmp4 = add <8 x i16> %tmp1, %tmp3
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vsraQu32:
-;CHECK: vsra.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = lshr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 >
- %tmp4 = add <4 x i32> %tmp1, %tmp3
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vsraQu64:
-;CHECK: vsra.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = lshr <2 x i64> %tmp2, < i64 64, i64 64 >
- %tmp4 = add <2 x i64> %tmp1, %tmp3
- ret <2 x i64> %tmp4
-}
-
-define <8 x i8> @vrsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vrsras8:
-;CHECK: vrsra.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- %tmp4 = add <8 x i8> %tmp1, %tmp3
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vrsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vrsras16:
-;CHECK: vrsra.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
- %tmp4 = add <4 x i16> %tmp1, %tmp3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vrsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vrsras32:
-;CHECK: vrsra.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
- %tmp4 = add <2 x i32> %tmp1, %tmp3
- ret <2 x i32> %tmp4
-}
-
-define <1 x i64> @vrsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vrsras64:
-;CHECK: vrsra.s64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >)
- %tmp4 = add <1 x i64> %tmp1, %tmp3
- ret <1 x i64> %tmp4
-}
-
-define <8 x i8> @vrsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vrsrau8:
-;CHECK: vrsra.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- %tmp4 = add <8 x i8> %tmp1, %tmp3
- ret <8 x i8> %tmp4
-}
-
-define <4 x i16> @vrsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vrsrau16:
-;CHECK: vrsra.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
- %tmp4 = add <4 x i16> %tmp1, %tmp3
- ret <4 x i16> %tmp4
-}
-
-define <2 x i32> @vrsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vrsrau32:
-;CHECK: vrsra.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
- %tmp4 = add <2 x i32> %tmp1, %tmp3
- ret <2 x i32> %tmp4
-}
-
-define <1 x i64> @vrsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vrsrau64:
-;CHECK: vrsra.u64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >)
- %tmp4 = add <1 x i64> %tmp1, %tmp3
- ret <1 x i64> %tmp4
-}
-
-define <16 x i8> @vrsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vrsraQs8:
-;CHECK: vrsra.s8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- %tmp4 = add <16 x i8> %tmp1, %tmp3
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vrsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vrsraQs16:
-;CHECK: vrsra.s16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
- %tmp4 = add <8 x i16> %tmp1, %tmp3
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vrsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vrsraQs32:
-;CHECK: vrsra.s32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
- %tmp4 = add <4 x i32> %tmp1, %tmp3
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vrsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vrsraQs64:
-;CHECK: vrsra.s64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
- %tmp4 = add <2 x i64> %tmp1, %tmp3
- ret <2 x i64> %tmp4
-}
-
-define <16 x i8> @vrsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vrsraQu8:
-;CHECK: vrsra.u8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
- %tmp4 = add <16 x i8> %tmp1, %tmp3
- ret <16 x i8> %tmp4
-}
-
-define <8 x i16> @vrsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vrsraQu16:
-;CHECK: vrsra.u16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
- %tmp4 = add <8 x i16> %tmp1, %tmp3
- ret <8 x i16> %tmp4
-}
-
-define <4 x i32> @vrsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vrsraQu32:
-;CHECK: vrsra.u32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
- %tmp4 = add <4 x i32> %tmp1, %tmp3
- ret <4 x i32> %tmp4
-}
-
-define <2 x i64> @vrsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vrsraQu64:
-;CHECK: vrsra.u64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
- %tmp4 = add <2 x i64> %tmp1, %tmp3
- ret <2 x i64> %tmp4
-}
-
-declare <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-declare <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vst1.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vst1.ll
deleted file mode 100644
index 602b124..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vst1.ll
+++ /dev/null
@@ -1,93 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define void @vst1i8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vst1i8:
-;CHECK: vst1.8
- %tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst1.v8i8(i8* %A, <8 x i8> %tmp1)
- ret void
-}
-
-define void @vst1i16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vst1i16:
-;CHECK: vst1.16
- %tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst1.v4i16(i16* %A, <4 x i16> %tmp1)
- ret void
-}
-
-define void @vst1i32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vst1i32:
-;CHECK: vst1.32
- %tmp1 = load <2 x i32>* %B
- call void @llvm.arm.neon.vst1.v2i32(i32* %A, <2 x i32> %tmp1)
- ret void
-}
-
-define void @vst1f(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vst1f:
-;CHECK: vst1.32
- %tmp1 = load <2 x float>* %B
- call void @llvm.arm.neon.vst1.v2f32(float* %A, <2 x float> %tmp1)
- ret void
-}
-
-define void @vst1i64(i64* %A, <1 x i64>* %B) nounwind {
-;CHECK: vst1i64:
-;CHECK: vst1.64
- %tmp1 = load <1 x i64>* %B
- call void @llvm.arm.neon.vst1.v1i64(i64* %A, <1 x i64> %tmp1)
- ret void
-}
-
-define void @vst1Qi8(i8* %A, <16 x i8>* %B) nounwind {
-;CHECK: vst1Qi8:
-;CHECK: vst1.8
- %tmp1 = load <16 x i8>* %B
- call void @llvm.arm.neon.vst1.v16i8(i8* %A, <16 x i8> %tmp1)
- ret void
-}
-
-define void @vst1Qi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vst1Qi16:
-;CHECK: vst1.16
- %tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst1.v8i16(i16* %A, <8 x i16> %tmp1)
- ret void
-}
-
-define void @vst1Qi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vst1Qi32:
-;CHECK: vst1.32
- %tmp1 = load <4 x i32>* %B
- call void @llvm.arm.neon.vst1.v4i32(i32* %A, <4 x i32> %tmp1)
- ret void
-}
-
-define void @vst1Qf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vst1Qf:
-;CHECK: vst1.32
- %tmp1 = load <4 x float>* %B
- call void @llvm.arm.neon.vst1.v4f32(float* %A, <4 x float> %tmp1)
- ret void
-}
-
-define void @vst1Qi64(i64* %A, <2 x i64>* %B) nounwind {
-;CHECK: vst1Qi64:
-;CHECK: vst1.64
- %tmp1 = load <2 x i64>* %B
- call void @llvm.arm.neon.vst1.v2i64(i64* %A, <2 x i64> %tmp1)
- ret void
-}
-
-declare void @llvm.arm.neon.vst1.v8i8(i8*, <8 x i8>) nounwind
-declare void @llvm.arm.neon.vst1.v4i16(i8*, <4 x i16>) nounwind
-declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>) nounwind
-declare void @llvm.arm.neon.vst1.v2f32(i8*, <2 x float>) nounwind
-declare void @llvm.arm.neon.vst1.v1i64(i8*, <1 x i64>) nounwind
-
-declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>) nounwind
-declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>) nounwind
-declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>) nounwind
-declare void @llvm.arm.neon.vst1.v4f32(i8*, <4 x float>) nounwind
-declare void @llvm.arm.neon.vst1.v2i64(i8*, <2 x i64>) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vst2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vst2.ll
deleted file mode 100644
index 17d6bee..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vst2.ll
+++ /dev/null
@@ -1,84 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define void @vst2i8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vst2i8:
-;CHECK: vst2.8
- %tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1)
- ret void
-}
-
-define void @vst2i16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vst2i16:
-;CHECK: vst2.16
- %tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst2.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1)
- ret void
-}
-
-define void @vst2i32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vst2i32:
-;CHECK: vst2.32
- %tmp1 = load <2 x i32>* %B
- call void @llvm.arm.neon.vst2.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1)
- ret void
-}
-
-define void @vst2f(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vst2f:
-;CHECK: vst2.32
- %tmp1 = load <2 x float>* %B
- call void @llvm.arm.neon.vst2.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1)
- ret void
-}
-
-define void @vst2i64(i64* %A, <1 x i64>* %B) nounwind {
-;CHECK: vst2i64:
-;CHECK: vst1.64
- %tmp1 = load <1 x i64>* %B
- call void @llvm.arm.neon.vst2.v1i64(i64* %A, <1 x i64> %tmp1, <1 x i64> %tmp1)
- ret void
-}
-
-define void @vst2Qi8(i8* %A, <16 x i8>* %B) nounwind {
-;CHECK: vst2Qi8:
-;CHECK: vst2.8
- %tmp1 = load <16 x i8>* %B
- call void @llvm.arm.neon.vst2.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1)
- ret void
-}
-
-define void @vst2Qi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vst2Qi16:
-;CHECK: vst2.16
- %tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst2.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1)
- ret void
-}
-
-define void @vst2Qi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vst2Qi32:
-;CHECK: vst2.32
- %tmp1 = load <4 x i32>* %B
- call void @llvm.arm.neon.vst2.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1)
- ret void
-}
-
-define void @vst2Qf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vst2Qf:
-;CHECK: vst2.32
- %tmp1 = load <4 x float>* %B
- call void @llvm.arm.neon.vst2.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1)
- ret void
-}
-
-declare void @llvm.arm.neon.vst2.v8i8(i8*, <8 x i8>, <8 x i8>) nounwind
-declare void @llvm.arm.neon.vst2.v4i16(i8*, <4 x i16>, <4 x i16>) nounwind
-declare void @llvm.arm.neon.vst2.v2i32(i8*, <2 x i32>, <2 x i32>) nounwind
-declare void @llvm.arm.neon.vst2.v2f32(i8*, <2 x float>, <2 x float>) nounwind
-declare void @llvm.arm.neon.vst2.v1i64(i8*, <1 x i64>, <1 x i64>) nounwind
-
-declare void @llvm.arm.neon.vst2.v16i8(i8*, <16 x i8>, <16 x i8>) nounwind
-declare void @llvm.arm.neon.vst2.v8i16(i8*, <8 x i16>, <8 x i16>) nounwind
-declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>) nounwind
-declare void @llvm.arm.neon.vst2.v4f32(i8*, <4 x float>, <4 x float>) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vst3.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vst3.ll
deleted file mode 100644
index a831a0c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vst3.ll
+++ /dev/null
@@ -1,88 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define void @vst3i8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vst3i8:
-;CHECK: vst3.8
- %tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst3.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1)
- ret void
-}
-
-define void @vst3i16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vst3i16:
-;CHECK: vst3.16
- %tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst3.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1)
- ret void
-}
-
-define void @vst3i32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vst3i32:
-;CHECK: vst3.32
- %tmp1 = load <2 x i32>* %B
- call void @llvm.arm.neon.vst3.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1)
- ret void
-}
-
-define void @vst3f(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vst3f:
-;CHECK: vst3.32
- %tmp1 = load <2 x float>* %B
- call void @llvm.arm.neon.vst3.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1)
- ret void
-}
-
-define void @vst3i64(i64* %A, <1 x i64>* %B) nounwind {
-;CHECK: vst3i64:
-;CHECK: vst1.64
- %tmp1 = load <1 x i64>* %B
- call void @llvm.arm.neon.vst3.v1i64(i64* %A, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1)
- ret void
-}
-
-define void @vst3Qi8(i8* %A, <16 x i8>* %B) nounwind {
-;CHECK: vst3Qi8:
-;CHECK: vst3.8
-;CHECK: vst3.8
- %tmp1 = load <16 x i8>* %B
- call void @llvm.arm.neon.vst3.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1)
- ret void
-}
-
-define void @vst3Qi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vst3Qi16:
-;CHECK: vst3.16
-;CHECK: vst3.16
- %tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst3.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1)
- ret void
-}
-
-define void @vst3Qi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vst3Qi32:
-;CHECK: vst3.32
-;CHECK: vst3.32
- %tmp1 = load <4 x i32>* %B
- call void @llvm.arm.neon.vst3.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1)
- ret void
-}
-
-define void @vst3Qf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vst3Qf:
-;CHECK: vst3.32
-;CHECK: vst3.32
- %tmp1 = load <4 x float>* %B
- call void @llvm.arm.neon.vst3.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1)
- ret void
-}
-
-declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>) nounwind
-declare void @llvm.arm.neon.vst3.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>) nounwind
-declare void @llvm.arm.neon.vst3.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>) nounwind
-declare void @llvm.arm.neon.vst3.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>) nounwind
-declare void @llvm.arm.neon.vst3.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>) nounwind
-
-declare void @llvm.arm.neon.vst3.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>) nounwind
-declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>) nounwind
-declare void @llvm.arm.neon.vst3.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>) nounwind
-declare void @llvm.arm.neon.vst3.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vst4.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vst4.ll
deleted file mode 100644
index d92c017..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vst4.ll
+++ /dev/null
@@ -1,88 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define void @vst4i8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vst4i8:
-;CHECK: vst4.8
- %tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1)
- ret void
-}
-
-define void @vst4i16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vst4i16:
-;CHECK: vst4.16
- %tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst4.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1)
- ret void
-}
-
-define void @vst4i32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vst4i32:
-;CHECK: vst4.32
- %tmp1 = load <2 x i32>* %B
- call void @llvm.arm.neon.vst4.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1)
- ret void
-}
-
-define void @vst4f(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vst4f:
-;CHECK: vst4.32
- %tmp1 = load <2 x float>* %B
- call void @llvm.arm.neon.vst4.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1)
- ret void
-}
-
-define void @vst4i64(i64* %A, <1 x i64>* %B) nounwind {
-;CHECK: vst4i64:
-;CHECK: vst1.64
- %tmp1 = load <1 x i64>* %B
- call void @llvm.arm.neon.vst4.v1i64(i64* %A, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1)
- ret void
-}
-
-define void @vst4Qi8(i8* %A, <16 x i8>* %B) nounwind {
-;CHECK: vst4Qi8:
-;CHECK: vst4.8
-;CHECK: vst4.8
- %tmp1 = load <16 x i8>* %B
- call void @llvm.arm.neon.vst4.v16i8(i8* %A, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1, <16 x i8> %tmp1)
- ret void
-}
-
-define void @vst4Qi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vst4Qi16:
-;CHECK: vst4.16
-;CHECK: vst4.16
- %tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst4.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1)
- ret void
-}
-
-define void @vst4Qi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vst4Qi32:
-;CHECK: vst4.32
-;CHECK: vst4.32
- %tmp1 = load <4 x i32>* %B
- call void @llvm.arm.neon.vst4.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1)
- ret void
-}
-
-define void @vst4Qf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vst4Qf:
-;CHECK: vst4.32
-;CHECK: vst4.32
- %tmp1 = load <4 x float>* %B
- call void @llvm.arm.neon.vst4.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1)
- ret void
-}
-
-declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind
-declare void @llvm.arm.neon.vst4.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>) nounwind
-declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>) nounwind
-declare void @llvm.arm.neon.vst4.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>) nounwind
-declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>) nounwind
-
-declare void @llvm.arm.neon.vst4.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwind
-declare void @llvm.arm.neon.vst4.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>) nounwind
-declare void @llvm.arm.neon.vst4.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>) nounwind
-declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vstlane.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vstlane.ll
deleted file mode 100644
index 3bfb14f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vstlane.ll
+++ /dev/null
@@ -1,197 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define void @vst2lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vst2lanei8:
-;CHECK: vst2.8
- %tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
- ret void
-}
-
-define void @vst2lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vst2lanei16:
-;CHECK: vst2.16
- %tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst2lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
- ret void
-}
-
-define void @vst2lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vst2lanei32:
-;CHECK: vst2.32
- %tmp1 = load <2 x i32>* %B
- call void @llvm.arm.neon.vst2lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- ret void
-}
-
-define void @vst2lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vst2lanef:
-;CHECK: vst2.32
- %tmp1 = load <2 x float>* %B
- call void @llvm.arm.neon.vst2lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
- ret void
-}
-
-define void @vst2laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vst2laneQi16:
-;CHECK: vst2.16
- %tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst2lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
- ret void
-}
-
-define void @vst2laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vst2laneQi32:
-;CHECK: vst2.32
- %tmp1 = load <4 x i32>* %B
- call void @llvm.arm.neon.vst2lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
- ret void
-}
-
-define void @vst2laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vst2laneQf:
-;CHECK: vst2.32
- %tmp1 = load <4 x float>* %B
- call void @llvm.arm.neon.vst2lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, i32 3)
- ret void
-}
-
-declare void @llvm.arm.neon.vst2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind
-
-declare void @llvm.arm.neon.vst2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst2lane.v4f32(i8*, <4 x float>, <4 x float>, i32) nounwind
-
-define void @vst3lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vst3lanei8:
-;CHECK: vst3.8
- %tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
- ret void
-}
-
-define void @vst3lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vst3lanei16:
-;CHECK: vst3.16
- %tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst3lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
- ret void
-}
-
-define void @vst3lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vst3lanei32:
-;CHECK: vst3.32
- %tmp1 = load <2 x i32>* %B
- call void @llvm.arm.neon.vst3lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- ret void
-}
-
-define void @vst3lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vst3lanef:
-;CHECK: vst3.32
- %tmp1 = load <2 x float>* %B
- call void @llvm.arm.neon.vst3lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
- ret void
-}
-
-define void @vst3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vst3laneQi16:
-;CHECK: vst3.16
- %tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst3lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 6)
- ret void
-}
-
-define void @vst3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vst3laneQi32:
-;CHECK: vst3.32
- %tmp1 = load <4 x i32>* %B
- call void @llvm.arm.neon.vst3lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 0)
- ret void
-}
-
-define void @vst3laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vst3laneQf:
-;CHECK: vst3.32
- %tmp1 = load <4 x float>* %B
- call void @llvm.arm.neon.vst3lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
- ret void
-}
-
-declare void @llvm.arm.neon.vst3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind
-
-declare void @llvm.arm.neon.vst3lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst3lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, i32) nounwind
-
-
-define void @vst4lanei8(i8* %A, <8 x i8>* %B) nounwind {
-;CHECK: vst4lanei8:
-;CHECK: vst4.8
- %tmp1 = load <8 x i8>* %B
- call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
- ret void
-}
-
-define void @vst4lanei16(i16* %A, <4 x i16>* %B) nounwind {
-;CHECK: vst4lanei16:
-;CHECK: vst4.16
- %tmp1 = load <4 x i16>* %B
- call void @llvm.arm.neon.vst4lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
- ret void
-}
-
-define void @vst4lanei32(i32* %A, <2 x i32>* %B) nounwind {
-;CHECK: vst4lanei32:
-;CHECK: vst4.32
- %tmp1 = load <2 x i32>* %B
- call void @llvm.arm.neon.vst4lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- ret void
-}
-
-define void @vst4lanef(float* %A, <2 x float>* %B) nounwind {
-;CHECK: vst4lanef:
-;CHECK: vst4.32
- %tmp1 = load <2 x float>* %B
- call void @llvm.arm.neon.vst4lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
- ret void
-}
-
-define void @vst4laneQi16(i16* %A, <8 x i16>* %B) nounwind {
-;CHECK: vst4laneQi16:
-;CHECK: vst4.16
- %tmp1 = load <8 x i16>* %B
- call void @llvm.arm.neon.vst4lane.v8i16(i16* %A, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 7)
- ret void
-}
-
-define void @vst4laneQi32(i32* %A, <4 x i32>* %B) nounwind {
-;CHECK: vst4laneQi32:
-;CHECK: vst4.32
- %tmp1 = load <4 x i32>* %B
- call void @llvm.arm.neon.vst4lane.v4i32(i32* %A, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 2)
- ret void
-}
-
-define void @vst4laneQf(float* %A, <4 x float>* %B) nounwind {
-;CHECK: vst4laneQf:
-;CHECK: vst4.32
- %tmp1 = load <4 x float>* %B
- call void @llvm.arm.neon.vst4lane.v4f32(float* %A, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
- ret void
-}
-
-declare void @llvm.arm.neon.vst4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind
-
-declare void @llvm.arm.neon.vst4lane.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v4i32(i8*, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32) nounwind
-declare void @llvm.arm.neon.vst4lane.v4f32(i8*, <4 x float>, <4 x float>, <4 x float>, <4 x float>, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vsub.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vsub.ll
deleted file mode 100644
index 8f0055f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vsub.ll
+++ /dev/null
@@ -1,277 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vsubi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vsubi8:
-;CHECK: vsub.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = sub <8 x i8> %tmp1, %tmp2
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vsubi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vsubi16:
-;CHECK: vsub.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = sub <4 x i16> %tmp1, %tmp2
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vsubi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vsubi32:
-;CHECK: vsub.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = sub <2 x i32> %tmp1, %tmp2
- ret <2 x i32> %tmp3
-}
-
-define <1 x i64> @vsubi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
-;CHECK: vsubi64:
-;CHECK: vsub.i64
- %tmp1 = load <1 x i64>* %A
- %tmp2 = load <1 x i64>* %B
- %tmp3 = sub <1 x i64> %tmp1, %tmp2
- ret <1 x i64> %tmp3
-}
-
-define <2 x float> @vsubf32(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vsubf32:
-;CHECK: vsub.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = sub <2 x float> %tmp1, %tmp2
- ret <2 x float> %tmp3
-}
-
-define <16 x i8> @vsubQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vsubQi8:
-;CHECK: vsub.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = sub <16 x i8> %tmp1, %tmp2
- ret <16 x i8> %tmp3
-}
-
-define <8 x i16> @vsubQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vsubQi16:
-;CHECK: vsub.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = sub <8 x i16> %tmp1, %tmp2
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vsubQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vsubQi32:
-;CHECK: vsub.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = sub <4 x i32> %tmp1, %tmp2
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vsubQi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vsubQi64:
-;CHECK: vsub.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = sub <2 x i64> %tmp1, %tmp2
- ret <2 x i64> %tmp3
-}
-
-define <4 x float> @vsubQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vsubQf32:
-;CHECK: vsub.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = sub <4 x float> %tmp1, %tmp2
- ret <4 x float> %tmp3
-}
-
-define <8 x i8> @vsubhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vsubhni16:
-;CHECK: vsubhn.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vsubhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vsubhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vsubhni32:
-;CHECK: vsubhn.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vsubhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vsubhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vsubhni64:
-;CHECK: vsubhn.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vsubhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vsubhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vsubhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vsubhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-define <8 x i8> @vrsubhni16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vrsubhni16:
-;CHECK: vrsubhn.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <4 x i16> @vrsubhni32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vrsubhni32:
-;CHECK: vrsubhn.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = call <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
- ret <4 x i16> %tmp3
-}
-
-define <2 x i32> @vrsubhni64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
-;CHECK: vrsubhni64:
-;CHECK: vrsubhn.i64
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
- %tmp3 = call <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
- ret <2 x i32> %tmp3
-}
-
-declare <8 x i8> @llvm.arm.neon.vrsubhn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i16> @llvm.arm.neon.vrsubhn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
-declare <2 x i32> @llvm.arm.neon.vrsubhn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
-
-define <8 x i16> @vsubls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vsubls8:
-;CHECK: vsubl.s8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vsubls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vsubls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vsubls16:
-;CHECK: vsubl.s16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vsubls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vsubls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vsubls32:
-;CHECK: vsubl.s32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vsubls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i16> @vsublu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vsublu8:
-;CHECK: vsubl.u8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vsublu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vsublu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vsublu16:
-;CHECK: vsubl.u16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vsublu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vsublu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vsublu32:
-;CHECK: vsubl.u32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vsublu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-declare <8 x i16> @llvm.arm.neon.vsubls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vsubls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vsubls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vsublu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vsublu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vsublu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
-
-define <8 x i16> @vsubws8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vsubws8:
-;CHECK: vsubw.s8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vsubws.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vsubws16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vsubws16:
-;CHECK: vsubw.s16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vsubws.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vsubws32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vsubws32:
-;CHECK: vsubw.s32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vsubws.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-define <8 x i16> @vsubwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vsubwu8:
-;CHECK: vsubw.u8
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i16> @llvm.arm.neon.vsubwu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2)
- ret <8 x i16> %tmp3
-}
-
-define <4 x i32> @vsubwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vsubwu16:
-;CHECK: vsubw.u16
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = call <4 x i32> @llvm.arm.neon.vsubwu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2)
- ret <4 x i32> %tmp3
-}
-
-define <2 x i64> @vsubwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vsubwu32:
-;CHECK: vsubw.u32
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = call <2 x i64> @llvm.arm.neon.vsubwu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2)
- ret <2 x i64> %tmp3
-}
-
-declare <8 x i16> @llvm.arm.neon.vsubws.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vsubws.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vsubws.v2i64(<2 x i64>, <2 x i32>) nounwind readnone
-
-declare <8 x i16> @llvm.arm.neon.vsubwu.v8i16(<8 x i16>, <8 x i8>) nounwind readnone
-declare <4 x i32> @llvm.arm.neon.vsubwu.v4i32(<4 x i32>, <4 x i16>) nounwind readnone
-declare <2 x i64> @llvm.arm.neon.vsubwu.v2i64(<2 x i64>, <2 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vtbl.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vtbl.ll
deleted file mode 100644
index 9264987..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vtbl.ll
+++ /dev/null
@@ -1,109 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
-%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
-
-define <8 x i8> @vtbl1(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vtbl1:
-;CHECK: vtbl.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> %tmp1, <8 x i8> %tmp2)
- ret <8 x i8> %tmp3
-}
-
-define <8 x i8> @vtbl2(<8 x i8>* %A, %struct.__neon_int8x8x2_t* %B) nounwind {
-;CHECK: vtbl2:
-;CHECK: vtbl.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__neon_int8x8x2_t* %B
- %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
- %tmp5 = call <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4)
- ret <8 x i8> %tmp5
-}
-
-define <8 x i8> @vtbl3(<8 x i8>* %A, %struct.__neon_int8x8x3_t* %B) nounwind {
-;CHECK: vtbl3:
-;CHECK: vtbl.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__neon_int8x8x3_t* %B
- %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2
- %tmp6 = call <8 x i8> @llvm.arm.neon.vtbl3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5)
- ret <8 x i8> %tmp6
-}
-
-define <8 x i8> @vtbl4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B) nounwind {
-;CHECK: vtbl4:
-;CHECK: vtbl.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__neon_int8x8x4_t* %B
- %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
- %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
- %tmp7 = call <8 x i8> @llvm.arm.neon.vtbl4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6)
- ret <8 x i8> %tmp7
-}
-
-define <8 x i8> @vtbx1(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
-;CHECK: vtbx1:
-;CHECK: vtbx.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
- %tmp4 = call <8 x i8> @llvm.arm.neon.vtbx1(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
- ret <8 x i8> %tmp4
-}
-
-define <8 x i8> @vtbx2(<8 x i8>* %A, %struct.__neon_int8x8x2_t* %B, <8 x i8>* %C) nounwind {
-;CHECK: vtbx2:
-;CHECK: vtbx.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__neon_int8x8x2_t* %B
- %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
- %tmp5 = load <8 x i8>* %C
- %tmp6 = call <8 x i8> @llvm.arm.neon.vtbx2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5)
- ret <8 x i8> %tmp6
-}
-
-define <8 x i8> @vtbx3(<8 x i8>* %A, %struct.__neon_int8x8x3_t* %B, <8 x i8>* %C) nounwind {
-;CHECK: vtbx3:
-;CHECK: vtbx.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__neon_int8x8x3_t* %B
- %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2
- %tmp6 = load <8 x i8>* %C
- %tmp7 = call <8 x i8> @llvm.arm.neon.vtbx3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6)
- ret <8 x i8> %tmp7
-}
-
-define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind {
-;CHECK: vtbx4:
-;CHECK: vtbx.8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__neon_int8x8x4_t* %B
- %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
- %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
- %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
- %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
- %tmp7 = load <8 x i8>* %C
- %tmp8 = call <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
- ret <8 x i8> %tmp8
-}
-
-declare <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8>, <8 x i8>) nounwind readnone
-declare <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <8 x i8> @llvm.arm.neon.vtbl3(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <8 x i8> @llvm.arm.neon.vtbl4(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-
-declare <8 x i8> @llvm.arm.neon.vtbx1(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <8 x i8> @llvm.arm.neon.vtbx2(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <8 x i8> @llvm.arm.neon.vtbx3(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
-declare <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vtrn.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vtrn.ll
deleted file mode 100644
index 5122b09..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vtrn.ll
+++ /dev/null
@@ -1,97 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vtrni8:
-;CHECK: vtrn.8
-;CHECK-NEXT: vadd.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vtrni16:
-;CHECK: vtrn.16
-;CHECK-NEXT: vadd.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = add <4 x i16> %tmp3, %tmp4
- ret <4 x i16> %tmp5
-}
-
-define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
-;CHECK: vtrni32:
-;CHECK: vtrn.32
-;CHECK-NEXT: vadd.i32
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
- %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
- %tmp5 = add <2 x i32> %tmp3, %tmp4
- ret <2 x i32> %tmp5
-}
-
-define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
-;CHECK: vtrnf:
-;CHECK: vtrn.32
-;CHECK-NEXT: vadd.f32
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 0, i32 2>
- %tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
- %tmp5 = add <2 x float> %tmp3, %tmp4
- ret <2 x float> %tmp5
-}
-
-define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vtrnQi8:
-;CHECK: vtrn.8
-;CHECK-NEXT: vadd.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
- %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
- %tmp5 = add <16 x i8> %tmp3, %tmp4
- ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vtrnQi16:
-;CHECK: vtrn.16
-;CHECK-NEXT: vadd.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vtrnQi32:
-;CHECK: vtrn.32
-;CHECK-NEXT: vadd.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = add <4 x i32> %tmp3, %tmp4
- ret <4 x i32> %tmp5
-}
-
-define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vtrnQf:
-;CHECK: vtrn.32
-;CHECK-NEXT: vadd.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = add <4 x float> %tmp3, %tmp4
- ret <4 x float> %tmp5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vuzp.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vuzp.ll
deleted file mode 100644
index e531718..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vuzp.ll
+++ /dev/null
@@ -1,75 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vuzpi8:
-;CHECK: vuzp.8
-;CHECK-NEXT: vadd.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vuzpi16:
-;CHECK: vuzp.16
-;CHECK-NEXT: vadd.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
- %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
- %tmp5 = add <4 x i16> %tmp3, %tmp4
- ret <4 x i16> %tmp5
-}
-
-; VUZP.32 is equivalent to VTRN.32 for 64-bit vectors.
-
-define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vuzpQi8:
-;CHECK: vuzp.8
-;CHECK-NEXT: vadd.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
- %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
- %tmp5 = add <16 x i8> %tmp3, %tmp4
- ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vuzpQi16:
-;CHECK: vuzp.16
-;CHECK-NEXT: vadd.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vuzpQi32:
-;CHECK: vuzp.32
-;CHECK-NEXT: vadd.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
- %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
- %tmp5 = add <4 x i32> %tmp3, %tmp4
- ret <4 x i32> %tmp5
-}
-
-define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vuzpQf:
-;CHECK: vuzp.32
-;CHECK-NEXT: vadd.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
- %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
- %tmp5 = add <4 x float> %tmp3, %tmp4
- ret <4 x float> %tmp5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/vzip.ll b/libclamav/c++/llvm/test/CodeGen/ARM/vzip.ll
deleted file mode 100644
index 32f7e0d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/vzip.ll
+++ /dev/null
@@ -1,75 +0,0 @@
-; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-
-define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
-;CHECK: vzipi8:
-;CHECK: vzip.8
-;CHECK-NEXT: vadd.i8
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
-}
-
-define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
-;CHECK: vzipi16:
-;CHECK: vzip.16
-;CHECK-NEXT: vadd.i16
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
- %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
- %tmp5 = add <4 x i16> %tmp3, %tmp4
- ret <4 x i16> %tmp5
-}
-
-; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors.
-
-define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
-;CHECK: vzipQi8:
-;CHECK: vzip.8
-;CHECK-NEXT: vadd.i8
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
- %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
- %tmp5 = add <16 x i8> %tmp3, %tmp4
- ret <16 x i8> %tmp5
-}
-
-define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
-;CHECK: vzipQi16:
-;CHECK: vzip.16
-;CHECK-NEXT: vadd.i16
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
-}
-
-define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
-;CHECK: vzipQi32:
-;CHECK: vzip.32
-;CHECK-NEXT: vadd.i32
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
- %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
- %tmp5 = add <4 x i32> %tmp3, %tmp4
- ret <4 x i32> %tmp5
-}
-
-define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind {
-;CHECK: vzipQf:
-;CHECK: vzip.32
-;CHECK-NEXT: vadd.f32
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
- %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
- %tmp5 = add <4 x float> %tmp3, %tmp4
- ret <4 x float> %tmp5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/weak.ll b/libclamav/c++/llvm/test/CodeGen/ARM/weak.ll
deleted file mode 100644
index 5ac4b8c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/weak.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=arm | grep .weak.*f
-; RUN: llc < %s -march=arm | grep .weak.*h
-
-define weak i32 @f() {
-entry:
- unreachable
-}
-
-define void @g() {
-entry:
- tail call void @h( )
- ret void
-}
-
-declare extern_weak void @h()
-
diff --git a/libclamav/c++/llvm/test/CodeGen/ARM/weak2.ll b/libclamav/c++/llvm/test/CodeGen/ARM/weak2.ll
deleted file mode 100644
index cf327bb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/ARM/weak2.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=arm | grep .weak
-
-define i32 @f(i32 %a) {
-entry:
- %tmp2 = icmp eq i32 %a, 0 ; <i1> [#uses=1]
- %t.0 = select i1 %tmp2, i32 (...)* null, i32 (...)* @test_weak ; <i32 (...)*> [#uses=2]
- %tmp5 = icmp eq i32 (...)* %t.0, null ; <i1> [#uses=1]
- br i1 %tmp5, label %UnifiedReturnBlock, label %cond_true8
-
-cond_true8: ; preds = %entry
- %tmp10 = tail call i32 (...)* %t.0( ) ; <i32> [#uses=1]
- ret i32 %tmp10
-
-UnifiedReturnBlock: ; preds = %entry
- ret i32 250
-}
-
-declare extern_weak i32 @test_weak(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2002-04-14-UnexpectedUnsignedType.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2002-04-14-UnexpectedUnsignedType.ll
deleted file mode 100644
index dd382cf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2002-04-14-UnexpectedUnsignedType.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s
-
-; This caused the backend to assert out with:
-; SparcInstrInfo.cpp:103: failed assertion `0 && "Unexpected unsigned type"'
-;
-
-declare void @bar(i8*)
-
-define void @foo() {
- %cast225 = inttoptr i64 123456 to i8* ; <i8*> [#uses=1]
- call void @bar( i8* %cast225 )
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2002-04-16-StackFrameSizeAlignment.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2002-04-16-StackFrameSizeAlignment.ll
deleted file mode 100644
index 751ed40..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2002-04-16-StackFrameSizeAlignment.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s
-
-; Compiling this file produces:
-; Sparc.cpp:91: failed assertion `(offset - OFFSET) % getStackFrameSizeAlignment() == 0'
-;
-declare i32 @SIM(i8*, i8*, i32, i32, i32, [256 x i32]*, i32, i32, i32)
-
-define void @foo() {
-bb0:
- %V = alloca [256 x i32], i32 256 ; <[256 x i32]*> [#uses=1]
- call i32 @SIM( i8* null, i8* null, i32 0, i32 0, i32 0, [256 x i32]* %V, i32 0, i32 0, i32 2 ) ; <i32>:0 [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-27-phifcmpd.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-27-phifcmpd.ll
deleted file mode 100644
index 6fb1799..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-27-phifcmpd.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s
-
-define void @QRiterate(i32 %p.1, double %tmp.212) {
-entry:
- %tmp.184 = icmp sgt i32 %p.1, 0 ; <i1> [#uses=1]
- br i1 %tmp.184, label %shortcirc_next.1, label %shortcirc_done.1
-
-shortcirc_next.1: ; preds = %shortcirc_done.1, %entry
- %tmp.213 = fcmp une double %tmp.212, 0.000000e+00 ; <i1> [#uses=1]
- br label %shortcirc_done.1
-
-shortcirc_done.1: ; preds = %shortcirc_next.1, %entry
- %val.1 = phi i1 [ false, %entry ], [ %tmp.213, %shortcirc_next.1 ] ; <i1> [#uses=1]
- br i1 %val.1, label %shortcirc_next.1, label %exit.1
-
-exit.1: ; preds = %shortcirc_done.1
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-27-useboolinotherbb.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-27-useboolinotherbb.ll
deleted file mode 100644
index 14bb000..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-27-useboolinotherbb.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s
-
-define void @QRiterate(double %tmp.212) {
- %tmp.213 = fcmp une double %tmp.212, 0.000000e+00 ; <i1> [#uses=1]
- br label %shortcirc_next.1
-
-shortcirc_next.1: ; preds = %shortcirc_next.1, %0
- br i1 %tmp.213, label %shortcirc_next.1, label %exit.1
-
-exit.1: ; preds = %shortcirc_next.1
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-27-usefsubasbool.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-27-usefsubasbool.ll
deleted file mode 100644
index cc0eb5c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-27-usefsubasbool.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s
-
-define void @QRiterate(double %tmp.212) {
-entry:
- br label %shortcirc_next.1
-
-shortcirc_next.1: ; preds = %shortcirc_next.1, %entry
- %tmp.213 = fcmp une double %tmp.212, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %tmp.213, label %shortcirc_next.1, label %exit.1
-
-exit.1: ; preds = %shortcirc_next.1
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-28-ManyArgs.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-28-ManyArgs.ll
deleted file mode 100644
index c6fbdae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-28-ManyArgs.ll
+++ /dev/null
@@ -1,153 +0,0 @@
-; RUN: llc < %s
-
-;; Date: May 28, 2003.
-;; From: test/Programs/External/SPEC/CINT2000/175.vpr.llvm.bc
-;; Function: int %main(int %argc.1, sbyte** %argv.1)
-;;
-;; Error: A function call with about 56 arguments causes an assertion failure
-;; in llc because the register allocator cannot find a register
-;; not used explicitly by the call instruction.
-;;
-;; Cause: Regalloc was not keeping track of free registers correctly.
-;; It was counting the registers allocated to all outgoing arguments,
-;; even though most of those are copied to the stack (so those
-;; registers are not actually used by the call instruction).
-;;
-;; Fixed: By rewriting selection and allocation so that selection explicitly
-;; inserts all copy operations required for passing arguments and
-;; for the return value of a call, copying to/from registers
-;; and/or to stack locations as needed.
-;;
- %struct..s_annealing_sched = type { i32, float, float, float, float }
- %struct..s_chan = type { i32, float, float, float, float }
- %struct..s_det_routing_arch = type { i32, float, float, float, i32, i32, i16, i16, i16, float, float }
- %struct..s_placer_opts = type { i32, float, i32, i32, i8*, i32, i32 }
- %struct..s_router_opts = type { float, float, float, float, float, i32, i32, i32, i32 }
- %struct..s_segment_inf = type { float, i32, i16, i16, float, float, i32, float, float }
- %struct..s_switch_inf = type { i32, float, float, float, float }
-
-define i32 @main(i32 %argc.1, i8** %argv.1) {
-entry:
- %net_file = alloca [300 x i8] ; <[300 x i8]*> [#uses=1]
- %place_file = alloca [300 x i8] ; <[300 x i8]*> [#uses=1]
- %arch_file = alloca [300 x i8] ; <[300 x i8]*> [#uses=1]
- %route_file = alloca [300 x i8] ; <[300 x i8]*> [#uses=1]
- %full_stats = alloca i32 ; <i32*> [#uses=1]
- %operation = alloca i32 ; <i32*> [#uses=1]
- %verify_binary_search = alloca i32 ; <i32*> [#uses=1]
- %show_graphics = alloca i32 ; <i32*> [#uses=1]
- %annealing_sched = alloca %struct..s_annealing_sched ; <%struct..s_annealing_sched*> [#uses=5]
- %placer_opts = alloca %struct..s_placer_opts ; <%struct..s_placer_opts*> [#uses=7]
- %router_opts = alloca %struct..s_router_opts ; <%struct..s_router_opts*> [#uses=9]
- %det_routing_arch = alloca %struct..s_det_routing_arch ; <%struct..s_det_routing_arch*> [#uses=11]
- %segment_inf = alloca %struct..s_segment_inf* ; <%struct..s_segment_inf**> [#uses=1]
- %timing_inf = alloca { i32, float, float, float, float, float, float, float, float, float, float } ; <{ i32, float, float, float, float, float, float, float, float, float, float }*> [#uses=11]
- %tmp.101 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 4 ; <i8**> [#uses=1]
- %tmp.105 = getelementptr [300 x i8]* %net_file, i64 0, i64 0 ; <i8*> [#uses=1]
- %tmp.106 = getelementptr [300 x i8]* %arch_file, i64 0, i64 0 ; <i8*> [#uses=1]
- %tmp.107 = getelementptr [300 x i8]* %place_file, i64 0, i64 0 ; <i8*> [#uses=1]
- %tmp.108 = getelementptr [300 x i8]* %route_file, i64 0, i64 0 ; <i8*> [#uses=1]
- %tmp.109 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 0 ; <i32*> [#uses=1]
- %tmp.112 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 0 ; <i32*> [#uses=1]
- %tmp.114 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 6 ; <i32*> [#uses=1]
- %tmp.118 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 7 ; <i32*> [#uses=1]
- %tmp.135 = load i32* %operation ; <i32> [#uses=1]
- %tmp.137 = load i32* %tmp.112 ; <i32> [#uses=1]
- %tmp.138 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 1 ; <float*> [#uses=1]
- %tmp.139 = load float* %tmp.138 ; <float> [#uses=1]
- %tmp.140 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 2 ; <i32*> [#uses=1]
- %tmp.141 = load i32* %tmp.140 ; <i32> [#uses=1]
- %tmp.142 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 3 ; <i32*> [#uses=1]
- %tmp.143 = load i32* %tmp.142 ; <i32> [#uses=1]
- %tmp.145 = load i8** %tmp.101 ; <i8*> [#uses=1]
- %tmp.146 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 5 ; <i32*> [#uses=1]
- %tmp.147 = load i32* %tmp.146 ; <i32> [#uses=1]
- %tmp.149 = load i32* %tmp.114 ; <i32> [#uses=1]
- %tmp.154 = load i32* %full_stats ; <i32> [#uses=1]
- %tmp.155 = load i32* %verify_binary_search ; <i32> [#uses=1]
- %tmp.156 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 0 ; <i32*> [#uses=1]
- %tmp.157 = load i32* %tmp.156 ; <i32> [#uses=1]
- %tmp.158 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 1 ; <float*> [#uses=1]
- %tmp.159 = load float* %tmp.158 ; <float> [#uses=1]
- %tmp.160 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 2 ; <float*> [#uses=1]
- %tmp.161 = load float* %tmp.160 ; <float> [#uses=1]
- %tmp.162 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 3 ; <float*> [#uses=1]
- %tmp.163 = load float* %tmp.162 ; <float> [#uses=1]
- %tmp.164 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 4 ; <float*> [#uses=1]
- %tmp.165 = load float* %tmp.164 ; <float> [#uses=1]
- %tmp.166 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 0 ; <float*> [#uses=1]
- %tmp.167 = load float* %tmp.166 ; <float> [#uses=1]
- %tmp.168 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 1 ; <float*> [#uses=1]
- %tmp.169 = load float* %tmp.168 ; <float> [#uses=1]
- %tmp.170 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 2 ; <float*> [#uses=1]
- %tmp.171 = load float* %tmp.170 ; <float> [#uses=1]
- %tmp.172 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 3 ; <float*> [#uses=1]
- %tmp.173 = load float* %tmp.172 ; <float> [#uses=1]
- %tmp.174 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 4 ; <float*> [#uses=1]
- %tmp.175 = load float* %tmp.174 ; <float> [#uses=1]
- %tmp.176 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 5 ; <i32*> [#uses=1]
- %tmp.177 = load i32* %tmp.176 ; <i32> [#uses=1]
- %tmp.178 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 6 ; <i32*> [#uses=1]
- %tmp.179 = load i32* %tmp.178 ; <i32> [#uses=1]
- %tmp.181 = load i32* %tmp.118 ; <i32> [#uses=1]
- %tmp.182 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 8 ; <i32*> [#uses=1]
- %tmp.183 = load i32* %tmp.182 ; <i32> [#uses=1]
- %tmp.184 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 0 ; <i32*> [#uses=1]
- %tmp.185 = load i32* %tmp.184 ; <i32> [#uses=1]
- %tmp.186 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 1 ; <float*> [#uses=1]
- %tmp.187 = load float* %tmp.186 ; <float> [#uses=1]
- %tmp.188 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 2 ; <float*> [#uses=1]
- %tmp.189 = load float* %tmp.188 ; <float> [#uses=1]
- %tmp.190 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 3 ; <float*> [#uses=1]
- %tmp.191 = load float* %tmp.190 ; <float> [#uses=1]
- %tmp.192 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 4 ; <i32*> [#uses=1]
- %tmp.193 = load i32* %tmp.192 ; <i32> [#uses=1]
- %tmp.194 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 5 ; <i32*> [#uses=1]
- %tmp.195 = load i32* %tmp.194 ; <i32> [#uses=1]
- %tmp.196 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 6 ; <i16*> [#uses=1]
- %tmp.197 = load i16* %tmp.196 ; <i16> [#uses=1]
- %tmp.198 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 7 ; <i16*> [#uses=1]
- %tmp.199 = load i16* %tmp.198 ; <i16> [#uses=1]
- %tmp.200 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 8 ; <i16*> [#uses=1]
- %tmp.201 = load i16* %tmp.200 ; <i16> [#uses=1]
- %tmp.202 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 9 ; <float*> [#uses=1]
- %tmp.203 = load float* %tmp.202 ; <float> [#uses=1]
- %tmp.204 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 10 ; <float*> [#uses=1]
- %tmp.205 = load float* %tmp.204 ; <float> [#uses=1]
- %tmp.206 = load %struct..s_segment_inf** %segment_inf ; <%struct..s_segment_inf*> [#uses=1]
- %tmp.208 = load i32* %tmp.109 ; <i32> [#uses=1]
- %tmp.209 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 1 ; <float*> [#uses=1]
- %tmp.210 = load float* %tmp.209 ; <float> [#uses=1]
- %tmp.211 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 2 ; <float*> [#uses=1]
- %tmp.212 = load float* %tmp.211 ; <float> [#uses=1]
- %tmp.213 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 3 ; <float*> [#uses=1]
- %tmp.214 = load float* %tmp.213 ; <float> [#uses=1]
- %tmp.215 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 4 ; <float*> [#uses=1]
- %tmp.216 = load float* %tmp.215 ; <float> [#uses=1]
- %tmp.217 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 5 ; <float*> [#uses=1]
- %tmp.218 = load float* %tmp.217 ; <float> [#uses=1]
- %tmp.219 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 6 ; <float*> [#uses=1]
- %tmp.220 = load float* %tmp.219 ; <float> [#uses=1]
- %tmp.221 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 7 ; <float*> [#uses=1]
- %tmp.222 = load float* %tmp.221 ; <float> [#uses=1]
- %tmp.223 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 8 ; <float*> [#uses=1]
- %tmp.224 = load float* %tmp.223 ; <float> [#uses=1]
- %tmp.225 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 9 ; <float*> [#uses=1]
- %tmp.226 = load float* %tmp.225 ; <float> [#uses=1]
- %tmp.227 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 10 ; <float*> [#uses=1]
- %tmp.228 = load float* %tmp.227 ; <float> [#uses=1]
- call void @place_and_route( i32 %tmp.135, i32 %tmp.137, float %tmp.139, i32 %tmp.141, i32 %tmp.143, i8* %tmp.145, i32 %tmp.147, i32 %tmp.149, i8* %tmp.107, i8* %tmp.105, i8* %tmp.106, i8* %tmp.108, i32 %tmp.154, i32 %tmp.155, i32 %tmp.157, float %tmp.159, float %tmp.161, float %tmp.163, float %tmp.165, float %tmp.167, float %tmp.169, float %tmp.171, float %tmp.173, float %tmp.175, i32 %tmp.177, i32 %tmp.179, i32 %tmp.181, i32 %tmp.183, i32 %tmp.185, float %tmp.187, float %tmp.189, float %tmp.191, i32 %tmp.193, i32 %tmp.195, i16 %tmp.197, i16 %tmp.199, i16 %tmp.201, float %tmp.203, float %tmp.205, %struct..s_segment_inf* %tmp.206, i32 %tmp.208, float %tmp.210, float %tmp.212, float %tmp.214, float %tmp.216, float %tmp.218, float %tmp.220, float %tmp.222, float %tmp.224, float %tmp.226, float %tmp.228 )
- %tmp.231 = load i32* %show_graphics ; <i32> [#uses=1]
- %tmp.232 = icmp ne i32 %tmp.231, 0 ; <i1> [#uses=1]
- br i1 %tmp.232, label %then.2, label %endif.2
-
-then.2: ; preds = %entry
- br label %endif.2
-
-endif.2: ; preds = %then.2, %entry
- ret i32 0
-}
-
-declare i32 @printf(i8*, ...)
-
-declare void @place_and_route(i32, i32, float, i32, i32, i8*, i32, i32, i8*, i8*, i8*, i8*, i32, i32, i32, float, float, float, float, float, float, float, float, float, i32, i32, i32, i32, i32, float, float, float, i32, i32, i16, i16, i16, float, float, %struct..s_segment_inf*, i32, float, float, float, float, float, float, float, float, float, float)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-30-BadFoldGEP.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-30-BadFoldGEP.ll
deleted file mode 100644
index 10d3a11..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-30-BadFoldGEP.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s
-
-;; Date: May 28, 2003.
-;; From: test/Programs/External/SPEC/CINT2000/254.gap.llvm.bc
-;; Function: int %OpenOutput(sbyte* %filename.1)
-;;
-;; Error: A sequence of GEPs is folded incorrectly by llc during selection
-;; causing an assertion about a dynamic casting error.
-;; This code sequence was produced (correctly) by preselection
-;; from a nested pair of ConstantExpr getelementptrs.
-;; The code below is the output of preselection.
-;; The original ConstantExprs are included in a comment.
-;;
-;; Cause: FoldGetElemChain() was inserting an extra leading 0 even though
-;; the first instruction in the sequence contributes no indices.
-;; The next instruction contributes a leading non-zero so another
-;; zero should not be added before it!
-;;
- %FileType = type { i32, [256 x i8], i32, i32, i32, i32 }
- at OutputFiles = external global [16 x %FileType] ; <[16 x %FileType]*> [#uses=1]
- at Output = internal global %FileType* null ; <%FileType**> [#uses=1]
-
-define internal i32 @OpenOutput(i8* %filename.1) {
-entry:
- %tmp.0 = load %FileType** @Output ; <%FileType*> [#uses=1]
- %tmp.4 = getelementptr %FileType* %tmp.0, i64 1 ; <%FileType*> [#uses=1]
- %addrOfGlobal = getelementptr [16 x %FileType]* @OutputFiles, i64 0 ; <[16 x %FileType]*> [#uses=1]
- %constantGEP = getelementptr [16 x %FileType]* %addrOfGlobal, i64 1 ; <[16 x %FileType]*> [#uses=1]
- %constantGEP.upgrd.1 = getelementptr [16 x %FileType]* %constantGEP, i64 0, i64 0 ; <%FileType*> [#uses=1]
- %tmp.10 = icmp eq %FileType* %tmp.4, %constantGEP.upgrd.1 ; <i1> [#uses=1]
- br i1 %tmp.10, label %return, label %endif.0
-
-endif.0: ; preds = %entry
- ret i32 0
-
-return: ; preds = %entry
- ret i32 1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-30-BadPreselectPhi.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-30-BadPreselectPhi.ll
deleted file mode 100644
index f7c3e42..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-05-30-BadPreselectPhi.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s
-
-;; Date: May 28, 2003.
-;; From: test/Programs/SingleSource/richards_benchmark.c
-;; Function: struct task *handlerfn(struct packet *pkt)
-;;
-;; Error: PreSelection puts the arguments of the Phi just before
-;; the Phi instead of in predecessor blocks. This later
-;; causes llc to produces an invalid register <NULL VALUE>
-;; for the phi arguments.
-
- %struct..packet = type { %struct..packet*, i32, i32, i32, [4 x i8] }
- %struct..task = type { %struct..task*, i32, i32, %struct..packet*, i32, %struct..task* (%struct..packet*)*, i32, i32 }
- at v1 = external global i32 ; <i32*> [#uses=1]
- at v2 = external global i32 ; <i32*> [#uses=1]
-
-define %struct..task* @handlerfn(%struct..packet* %pkt.2) {
-entry:
- %tmp.1 = icmp ne %struct..packet* %pkt.2, null ; <i1> [#uses=1]
- br i1 %tmp.1, label %cond_false, label %cond_continue
-
-cond_false: ; preds = %entry
- br label %cond_continue
-
-cond_continue: ; preds = %cond_false, %entry
- %mem_tmp.0 = phi i32* [ @v2, %cond_false ], [ @v1, %entry ] ; <i32*> [#uses=1]
- %tmp.12 = bitcast i32* %mem_tmp.0 to %struct..packet* ; <%struct..packet*> [#uses=1]
- call void @append( %struct..packet* %pkt.2, %struct..packet* %tmp.12 )
- ret %struct..task* null
-}
-
-declare void @append(%struct..packet*, %struct..packet*)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-06-BadIntCmp.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-06-BadIntCmp.ll
deleted file mode 100644
index 1d1aad5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-06-BadIntCmp.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s
-
-;; Date: May 28, 2003.
-;; From: test/Programs/MultiSource/Olden-perimeter/maketree.c
-;; Function: int CheckOutside(int x, int y)
-;;
-;; Note: The .ll code below for this regression test has identical
-;; behavior to the above function up to the error, but then prints
-;; true/false on the two branches.
-;;
-;; Error: llc generates a branch-on-xcc instead of branch-on-icc, which
-;; is wrong because the value being compared (int euclid = x*x + y*y)
-;; overflows, so that the 64-bit and 32-bit compares are not equal.
-
- at .str_1 = internal constant [6 x i8] c"true\0A\00" ; <[6 x i8]*> [#uses=1]
- at .str_2 = internal constant [7 x i8] c"false\0A\00" ; <[7 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define internal void @__main() {
-entry:
- ret void
-}
-
-define internal void @CheckOutside(i32 %x.1, i32 %y.1) {
-entry:
- %tmp.2 = mul i32 %x.1, %x.1 ; <i32> [#uses=1]
- %tmp.5 = mul i32 %y.1, %y.1 ; <i32> [#uses=1]
- %tmp.6 = add i32 %tmp.2, %tmp.5 ; <i32> [#uses=1]
- %tmp.8 = icmp sle i32 %tmp.6, 4194304 ; <i1> [#uses=1]
- br i1 %tmp.8, label %then, label %else
-
-then: ; preds = %entry
- %tmp.11 = call i32 (i8*, ...)* @printf( i8* getelementptr ([6 x i8]* @.str_1, i64 0, i64 0) ) ; <i32> [#uses=0]
- br label %UnifiedExitNode
-
-else: ; preds = %entry
- %tmp.13 = call i32 (i8*, ...)* @printf( i8* getelementptr ([7 x i8]* @.str_2, i64 0, i64 0) ) ; <i32> [#uses=0]
- br label %UnifiedExitNode
-
-UnifiedExitNode: ; preds = %else, %then
- ret void
-}
-
-define i32 @main() {
-entry:
- call void @__main( )
- call void @CheckOutside( i32 2097152, i32 2097152 )
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-07-BadLongConst.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-07-BadLongConst.ll
deleted file mode 100644
index 64312ba..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-07-BadLongConst.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s
-
- at .str_1 = internal constant [42 x i8] c" ui = %u (0x%x)\09\09UL-ui = %lld (0x%llx)\0A\00" ; <[42 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define internal i64 @getL() {
-entry:
- ret i64 -5787213826675591005
-}
-
-define i32 @main(i32 %argc.1, i8** %argv.1) {
-entry:
- %tmp.11 = call i64 @getL( ) ; <i64> [#uses=2]
- %tmp.5 = trunc i64 %tmp.11 to i32 ; <i32> [#uses=2]
- %tmp.23 = and i64 %tmp.11, -4294967296 ; <i64> [#uses=2]
- %tmp.16 = call i32 (i8*, ...)* @printf( i8* getelementptr ([42 x i8]* @.str_1, i64 0, i64 0), i32 %tmp.5, i32 %tmp.5, i64 %tmp.23, i64 %tmp.23 ) ; <i32> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-08-BadCastToBool.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-08-BadCastToBool.ll
deleted file mode 100644
index 8019caa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-08-BadCastToBool.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s
-
-;; Date: Jul 8, 2003.
-;; From: test/Programs/MultiSource/Olden-perimeter
-;; Function: int %adj(uint %d.1, uint %ct.1)
-;;
-;; Errors: (1) cast-int-to-bool was being treated as a NOP (i.e., the int
-;; register was treated as effectively true if non-zero).
-;; This cannot be used for later boolean operations.
-;; (2) (A or NOT(B)) was being folded into A orn B, which is ok
-;; for bitwise operations but not booleans! For booleans,
-;; the result has to be compared with 0.
-
- at .str_1 = internal constant [30 x i8] c"d = %d, ct = %d, d ^ ct = %d\0A\00"
-
-declare i32 @printf(i8*, ...)
-
-define i32 @adj(i32 %d.1, i32 %ct.1) {
-entry:
- %tmp.19 = icmp eq i32 %ct.1, 2 ; <i1> [#uses=1]
- %tmp.22.not = trunc i32 %ct.1 to i1 ; <i1> [#uses=1]
- %tmp.221 = xor i1 %tmp.22.not, true ; <i1> [#uses=1]
- %tmp.26 = or i1 %tmp.19, %tmp.221 ; <i1> [#uses=1]
- %tmp.27 = zext i1 %tmp.26 to i32 ; <i32> [#uses=1]
- ret i32 %tmp.27
-}
-
-define i32 @main() {
-entry:
- %result = call i32 @adj( i32 3, i32 2 ) ; <i32> [#uses=1]
- %tmp.0 = call i32 (i8*, ...)* @printf( i8* getelementptr ([30 x i8]* @.str_1, i64 0, i64 0), i32 3, i32 2, i32 %result ) ; <i32> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
deleted file mode 100644
index 4e6fe1c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s
-
-;; Date: Jul 29, 2003.
-;; From: test/Programs/MultiSource/Ptrdist-bc
-;; Function: ---
-;; Global: %yy_ec = internal constant [256 x sbyte] ...
-;; A subset of this array is used in the test below.
-;;
-;; Error: Character '\07' was being emitted as '\a', at yy_ec[38].
-;; When loaded, this returned the value 97 ('a'), instead of 7.
-;;
-;; Incorrect LLC Output for the array yy_ec was:
-;; yy_ec_1094:
-;; .ascii "\000\001\001\001\001\001\001\001\001\002\003\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\002\004\005\001\001\006\a\001\b\t\n\v\f\r\016\017\020\020\020\020\020\020\020\020\020\020\001\021\022\023\024\001\001\025\025\025\025\025\025\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\026\027\030\031\032\001\033\034\035\036\037 !\"#$%&'()*+,-./$0$1$234\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001\001"
-;;
-
- at yy_ec = internal constant [6 x i8] c"\06\07\01\08\01\09" ; <[6 x i8]*> [#uses=1]
- at .str_3 = internal constant [8 x i8] c"[%d] = \00" ; <[8 x i8]*> [#uses=1]
- at .str_4 = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
-entry:
- br label %loopentry
-
-loopentry: ; preds = %loopentry, %entry
- %i = phi i64 [ 0, %entry ], [ %inc.i, %loopentry ] ; <i64> [#uses=3]
- %cptr = getelementptr [6 x i8]* @yy_ec, i64 0, i64 %i ; <i8*> [#uses=1]
- %c = load i8* %cptr ; <i8> [#uses=1]
- %ignore = call i32 (i8*, ...)* @printf( i8* getelementptr ([8 x i8]* @.str_3, i64 0, i64 0), i64 %i ) ; <i32> [#uses=0]
- %ignore2 = call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @.str_4, i64 0, i64 0), i8 %c ) ; <i32> [#uses=0]
- %inc.i = add i64 %i, 1 ; <i64> [#uses=2]
- %done = icmp sle i64 %inc.i, 5 ; <i1> [#uses=1]
- br i1 %done, label %loopentry, label %exit.1
-
-exit.1: ; preds = %loopentry
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2004-02-08-UnwindSupport.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2004-02-08-UnwindSupport.ll
deleted file mode 100644
index 393062a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2004-02-08-UnwindSupport.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -enable-correct-eh-support
-
-define i32 @test() {
- unwind
-}
-
-define i32 @main() {
- %X = invoke i32 @test( )
- to label %cont unwind label %EH ; <i32> [#uses=0]
-
-cont: ; preds = %0
- ret i32 1
-
-EH: ; preds = %0
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2004-05-09-LiveVarPartialRegister.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2004-05-09-LiveVarPartialRegister.ll
deleted file mode 100644
index d4a4cf8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2004-05-09-LiveVarPartialRegister.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s
- at global_long_1 = linkonce global i64 7 ; <i64*> [#uses=1]
- at global_long_2 = linkonce global i64 49 ; <i64*> [#uses=1]
-
-define i32 @main() {
- %l1 = load i64* @global_long_1 ; <i64> [#uses=1]
- %l2 = load i64* @global_long_2 ; <i64> [#uses=1]
- %cond = icmp sle i64 %l1, %l2 ; <i1> [#uses=1]
- %cast2 = zext i1 %cond to i32 ; <i32> [#uses=1]
- %RV = sub i32 1, %cast2 ; <i32> [#uses=1]
- ret i32 %RV
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2005-01-18-SetUO-InfLoop.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2005-01-18-SetUO-InfLoop.ll
deleted file mode 100644
index 7fd2361..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2005-01-18-SetUO-InfLoop.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s
-
-define void @intersect_pixel() {
-entry:
- %tmp125 = fcmp uno double 0.000000e+00, 0.000000e+00 ; <i1> [#uses=1]
- %tmp126 = or i1 %tmp125, false ; <i1> [#uses=1]
- %tmp126.not = xor i1 %tmp126, true ; <i1> [#uses=1]
- %brmerge1 = or i1 %tmp126.not, false ; <i1> [#uses=1]
- br i1 %brmerge1, label %bb154, label %cond_false133
-
-cond_false133: ; preds = %entry
- ret void
-
-bb154: ; preds = %entry
- %tmp164 = icmp eq i32 0, 0 ; <i1> [#uses=0]
- ret void
-}
-
-declare i1 @llvm.isunordered.f64(double, double)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2005-04-09-GlobalInPHI.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2005-04-09-GlobalInPHI.ll
deleted file mode 100644
index 353e411..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2005-04-09-GlobalInPHI.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s
- %struct.TypHeader = type { i32, %struct.TypHeader**, [3 x i8], i8 }
- at .str_67 = external global [4 x i8] ; <[4 x i8]*> [#uses=1]
- at .str_87 = external global [17 x i8] ; <[17 x i8]*> [#uses=1]
-
-define void @PrBinop() {
-entry:
- br i1 false, label %cond_true, label %else.0
-
-cond_true: ; preds = %entry
- br label %else.0
-
-else.0: ; preds = %cond_true, %entry
- %tmp.167.1 = phi i32 [ ptrtoint ([17 x i8]* @.str_87 to i32), %entry ], [ 0, %cond_true ] ; <i32> [#uses=0]
- call void @Pr( i8* getelementptr ([4 x i8]* @.str_67, i32 0, i32 0), i32 0, i32 0 )
- ret void
-}
-
-declare void @Pr(i8*, i32, i32)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2005-07-12-memcpy-i64-length.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2005-07-12-memcpy-i64-length.ll
deleted file mode 100644
index 733202c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2005-07-12-memcpy-i64-length.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-; Test that llvm.memcpy works with a i64 length operand on all targets.
-
-declare void @llvm.memcpy.i64(i8*, i8*, i64, i32)
-
-define void @l12_l94_bc_divide_endif_2E_3_2E_ce() {
-newFuncRoot:
- tail call void @llvm.memcpy.i64( i8* null, i8* null, i64 0, i32 1 )
- unreachable
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2005-10-18-ZeroSizeStackObject.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2005-10-18-ZeroSizeStackObject.ll
deleted file mode 100644
index 08060bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2005-10-18-ZeroSizeStackObject.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s
-
-define void @test() {
- %X = alloca { } ; <{ }*> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2005-10-21-longlonggtu.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2005-10-21-longlonggtu.ll
deleted file mode 100644
index 53a9cd0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2005-10-21-longlonggtu.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s
-
-define float @t(i64 %u_arg) {
- %u = bitcast i64 %u_arg to i64 ; <i64> [#uses=1]
- %tmp5 = add i64 %u, 9007199254740991 ; <i64> [#uses=1]
- %tmp = icmp ugt i64 %tmp5, 18014398509481982 ; <i1> [#uses=1]
- br i1 %tmp, label %T, label %F
-
-T: ; preds = %0
- ret float 1.000000e+00
-
-F: ; preds = %0
- call float @t( i64 0 ) ; <float>:1 [#uses=0]
- ret float 0.000000e+00
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2005-12-01-Crash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2005-12-01-Crash.ll
deleted file mode 100644
index a9eedde..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2005-12-01-Crash.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s
- at str = external global [36 x i8] ; <[36 x i8]*> [#uses=0]
- at str.upgrd.1 = external global [29 x i8] ; <[29 x i8]*> [#uses=0]
- at str1 = external global [29 x i8] ; <[29 x i8]*> [#uses=0]
- at str2 = external global [29 x i8] ; <[29 x i8]*> [#uses=1]
- at str.upgrd.2 = external global [2 x i8] ; <[2 x i8]*> [#uses=0]
- at str3 = external global [2 x i8] ; <[2 x i8]*> [#uses=0]
- at str4 = external global [2 x i8] ; <[2 x i8]*> [#uses=0]
- at str5 = external global [2 x i8] ; <[2 x i8]*> [#uses=0]
-
-define void @printArgsNoRet(i32 %a1, float %a2, i8 %a3, double %a4, i8* %a5, i32 %a6, float %a7, i8 %a8, double %a9, i8* %a10, i32 %a11, float %a12, i8 %a13, double %a14, i8* %a15) {
-entry:
- %tmp17 = sext i8 %a13 to i32 ; <i32> [#uses=1]
- %tmp23 = call i32 (i8*, ...)* @printf( i8* getelementptr ([29 x i8]* @str2, i32 0, i64 0), i32 %a11, double 0.000000e+00, i32 %tmp17, double %a14, i32 0 ) ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @printf(i8*, ...)
-
-declare i32 @main(i32, i8**)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2005-12-12-ExpandSextInreg.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2005-12-12-ExpandSextInreg.ll
deleted file mode 100644
index 349540f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2005-12-12-ExpandSextInreg.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s
-
-define i64 @test(i64 %A) {
- %B = trunc i64 %A to i8 ; <i8> [#uses=1]
- %C = sext i8 %B to i64 ; <i64> [#uses=1]
- ret i64 %C
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-01-12-BadSetCCFold.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-01-12-BadSetCCFold.ll
deleted file mode 100644
index 42e8ed0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-01-12-BadSetCCFold.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s
-; ModuleID = '2006-01-12-BadSetCCFold.ll'
- %struct.node_t = type { double*, %struct.node_t*, %struct.node_t**, double**, double*, i32, i32 }
-
-define void @main() {
-entry:
- br i1 false, label %then.2.i, label %endif.2.i
-
-then.2.i: ; preds = %entry
- br label %dealwithargs.exit
-
-endif.2.i: ; preds = %entry
- br i1 false, label %then.3.i, label %dealwithargs.exit
-
-then.3.i: ; preds = %endif.2.i
- br label %dealwithargs.exit
-
-dealwithargs.exit: ; preds = %then.3.i, %endif.2.i, %then.2.i
- %n_nodes.4 = phi i32 [ 64, %then.3.i ], [ 64, %then.2.i ], [ 64, %endif.2.i ] ; <i32> [#uses=1]
- %tmp.14.i1134.i.i = icmp sgt i32 %n_nodes.4, 1 ; <i1> [#uses=2]
- br i1 %tmp.14.i1134.i.i, label %no_exit.i12.i.i, label %fill_table.exit22.i.i
-
-no_exit.i12.i.i: ; preds = %no_exit.i12.i.i, %dealwithargs.exit
- br i1 false, label %fill_table.exit22.i.i, label %no_exit.i12.i.i
-
-fill_table.exit22.i.i: ; preds = %no_exit.i12.i.i, %dealwithargs.exit
- %cur_node.0.i8.1.i.i = phi %struct.node_t* [ undef, %dealwithargs.exit ], [ null, %no_exit.i12.i.i ] ; <%struct.node_t*> [#uses=0]
- br i1 %tmp.14.i1134.i.i, label %no_exit.i.preheader.i.i, label %make_tables.exit.i
-
-no_exit.i.preheader.i.i: ; preds = %fill_table.exit22.i.i
- ret void
-
-make_tables.exit.i: ; preds = %fill_table.exit22.i.i
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-01-18-InvalidBranchOpcodeAssert.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-01-18-InvalidBranchOpcodeAssert.ll
deleted file mode 100644
index f06d341..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-01-18-InvalidBranchOpcodeAssert.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s
-; This crashed the PPC backend.
-
-define void @test() {
- %tmp125 = fcmp uno double 0.000000e+00, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %tmp125, label %bb154, label %cond_false133
-
-cond_false133: ; preds = %0
- ret void
-
-bb154: ; preds = %0
- %tmp164 = icmp eq i32 0, 0 ; <i1> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-02-12-InsertLibcall.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-02-12-InsertLibcall.ll
deleted file mode 100644
index 5508272..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-02-12-InsertLibcall.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-; RUN: llc < %s
- at G = external global i32 ; <i32*> [#uses=1]
-
-define void @encode_one_frame(i64 %tmp.2i) {
-entry:
- %tmp.9 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp.9, label %endif.0, label %shortcirc_next.0
-
-then.5.i: ; preds = %shortcirc_next.i
- %tmp.114.i = sdiv i64 %tmp.2i, 3 ; <i64> [#uses=1]
- %tmp.111.i = call i64 @lseek( i32 0, i64 %tmp.114.i, i32 1 ) ; <i64> [#uses=0]
- ret void
-
-shortcirc_next.0: ; preds = %entry
- ret void
-
-endif.0: ; preds = %entry
- %tmp.324.i = icmp eq i32 0, 0 ; <i1> [#uses=2]
- %tmp.362.i = icmp slt i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp.324.i, label %else.4.i, label %then.11.i37
-
-then.11.i37: ; preds = %endif.0
- ret void
-
-else.4.i: ; preds = %endif.0
- br i1 %tmp.362.i, label %else.5.i, label %then.12.i
-
-then.12.i: ; preds = %else.4.i
- ret void
-
-else.5.i: ; preds = %else.4.i
- br i1 %tmp.324.i, label %then.0.i40, label %then.17.i
-
-then.17.i: ; preds = %else.5.i
- ret void
-
-then.0.i40: ; preds = %else.5.i
- %tmp.8.i42 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp.8.i42, label %else.1.i56, label %then.1.i52
-
-then.1.i52: ; preds = %then.0.i40
- ret void
-
-else.1.i56: ; preds = %then.0.i40
- %tmp.28.i = load i32* @G ; <i32> [#uses=1]
- %tmp.29.i = icmp eq i32 %tmp.28.i, 1 ; <i1> [#uses=1]
- br i1 %tmp.29.i, label %shortcirc_next.i, label %shortcirc_done.i
-
-shortcirc_next.i: ; preds = %else.1.i56
- %tmp.34.i = icmp eq i32 0, 3 ; <i1> [#uses=1]
- br i1 %tmp.34.i, label %then.5.i, label %endif.5.i
-
-shortcirc_done.i: ; preds = %else.1.i56
- ret void
-
-endif.5.i: ; preds = %shortcirc_next.i
- ret void
-}
-
-declare i64 @lseek(i32, i64, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-03-01-dagcombineinfloop.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-03-01-dagcombineinfloop.ll
deleted file mode 100644
index 2a6cc0c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-03-01-dagcombineinfloop.ll
+++ /dev/null
@@ -1,95 +0,0 @@
-; RUN: llc < %s
-; Infinite loop in the dag combiner, reduced from 176.gcc.
-%struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
- %struct.anon = type { i32 }
- %struct.lang_decl = type opaque
- %struct.lang_type = type { i32, [1 x %struct.tree_node*] }
- %struct.obstack = type { i32, %struct._obstack_chunk*, i8*, i8*, i8*, i32, i32, %struct._obstack_chunk* (...)*, void (...)*, i8*, i8 }
- %struct.rtx_def = type { i16, i8, i8, [1 x %struct.anon] }
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, i8, i8, i8, i8 }
- %struct.tree_decl = type { [12 x i8], i8*, i32, %struct.tree_node*, i32, i8, i8, i8, i8, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.anon, { %struct.rtx_def* }, %struct.tree_node*, %struct.lang_decl* }
- %struct.tree_list = type { [12 x i8], %struct.tree_node*, %struct.tree_node* }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.tree_type = type { [12 x i8], %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i32, i8, i8, i8, i8, i32, %struct.tree_node*, %struct.tree_node*, %struct.anon, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.obstack*, %struct.lang_type* }
- at void_type_node = external global %struct.tree_node* ; <%struct.tree_node**> [#uses=1]
- at char_type_node = external global %struct.tree_node* ; <%struct.tree_node**> [#uses=1]
- at short_integer_type_node = external global %struct.tree_node* ; <%struct.tree_node**> [#uses=1]
- at short_unsigned_type_node = external global %struct.tree_node* ; <%struct.tree_node**> [#uses=1]
- at float_type_node = external global %struct.tree_node* ; <%struct.tree_node**> [#uses=1]
- at signed_char_type_node = external global %struct.tree_node* ; <%struct.tree_node**> [#uses=1]
- at unsigned_char_type_node = external global %struct.tree_node* ; <%struct.tree_node**> [#uses=1]
-
-define fastcc i32 @self_promoting_args_p(%struct.tree_node* %parms) {
-entry:
- %tmp915 = icmp eq %struct.tree_node* %parms, null ; <i1> [#uses=1]
- br i1 %tmp915, label %return, label %cond_true92.preheader
-
-cond_true: ; preds = %cond_true92
- %tmp9.not = icmp ne %struct.tree_node* %tmp2, %tmp7 ; <i1> [#uses=1]
- %tmp14 = icmp eq %struct.tree_node* %tmp2, null ; <i1> [#uses=1]
- %bothcond = or i1 %tmp9.not, %tmp14 ; <i1> [#uses=1]
- br i1 %bothcond, label %return, label %cond_next18
-
-cond_next12: ; preds = %cond_true92
- %tmp14.old = icmp eq %struct.tree_node* %tmp2, null ; <i1> [#uses=1]
- br i1 %tmp14.old, label %return, label %cond_next18
-
-cond_next18: ; preds = %cond_next12, %cond_true
- %tmp20 = bitcast %struct.tree_node* %tmp2 to %struct.tree_type* ; <%struct.tree_type*> [#uses=1]
- %tmp21 = getelementptr %struct.tree_type* %tmp20, i32 0, i32 17 ; <%struct.tree_node**> [#uses=1]
- %tmp22 = load %struct.tree_node** %tmp21 ; <%struct.tree_node*> [#uses=6]
- %tmp24 = icmp eq %struct.tree_node* %tmp22, %tmp23 ; <i1> [#uses=1]
- br i1 %tmp24, label %return, label %cond_next28
-
-cond_next28: ; preds = %cond_next18
- %tmp30 = bitcast %struct.tree_node* %tmp2 to %struct.tree_common* ; <%struct.tree_common*> [#uses=1]
- %tmp = getelementptr %struct.tree_common* %tmp30, i32 0, i32 2 ; <i8*> [#uses=1]
- %tmp.upgrd.1 = bitcast i8* %tmp to i32* ; <i32*> [#uses=1]
- %tmp.upgrd.2 = load i32* %tmp.upgrd.1 ; <i32> [#uses=1]
- %tmp32 = trunc i32 %tmp.upgrd.2 to i8 ; <i8> [#uses=1]
- %tmp33 = icmp eq i8 %tmp32, 7 ; <i1> [#uses=1]
- br i1 %tmp33, label %cond_true34, label %cond_next84
-
-cond_true34: ; preds = %cond_next28
- %tmp40 = icmp eq %struct.tree_node* %tmp22, %tmp39 ; <i1> [#uses=1]
- %tmp49 = icmp eq %struct.tree_node* %tmp22, %tmp48 ; <i1> [#uses=1]
- %bothcond6 = or i1 %tmp40, %tmp49 ; <i1> [#uses=1]
- %tmp58 = icmp eq %struct.tree_node* %tmp22, %tmp57 ; <i1> [#uses=1]
- %bothcond7 = or i1 %bothcond6, %tmp58 ; <i1> [#uses=1]
- %tmp67 = icmp eq %struct.tree_node* %tmp22, %tmp66 ; <i1> [#uses=1]
- %bothcond8 = or i1 %bothcond7, %tmp67 ; <i1> [#uses=1]
- %tmp76 = icmp eq %struct.tree_node* %tmp22, %tmp75 ; <i1> [#uses=1]
- %bothcond9 = or i1 %bothcond8, %tmp76 ; <i1> [#uses=2]
- %brmerge = or i1 %bothcond9, %tmp.upgrd.6 ; <i1> [#uses=1]
- %bothcond9.upgrd.3 = zext i1 %bothcond9 to i32 ; <i32> [#uses=1]
- %.mux = xor i32 %bothcond9.upgrd.3, 1 ; <i32> [#uses=1]
- br i1 %brmerge, label %return, label %cond_true92
-
-cond_next84: ; preds = %cond_next28
- br i1 %tmp.upgrd.6, label %return, label %cond_true92
-
-cond_true92.preheader: ; preds = %entry
- %tmp7 = load %struct.tree_node** @void_type_node ; <%struct.tree_node*> [#uses=1]
- %tmp23 = load %struct.tree_node** @float_type_node ; <%struct.tree_node*> [#uses=1]
- %tmp39 = load %struct.tree_node** @char_type_node ; <%struct.tree_node*> [#uses=1]
- %tmp48 = load %struct.tree_node** @signed_char_type_node ; <%struct.tree_node*> [#uses=1]
- %tmp57 = load %struct.tree_node** @unsigned_char_type_node ; <%struct.tree_node*> [#uses=1]
- %tmp66 = load %struct.tree_node** @short_integer_type_node ; <%struct.tree_node*> [#uses=1]
- %tmp75 = load %struct.tree_node** @short_unsigned_type_node ; <%struct.tree_node*> [#uses=1]
- br label %cond_true92
-
-cond_true92: ; preds = %cond_true92.preheader, %cond_next84, %cond_true34
- %t.0.0 = phi %struct.tree_node* [ %parms, %cond_true92.preheader ], [ %tmp6, %cond_true34 ], [ %tmp6, %cond_next84 ] ; <%struct.tree_node*> [#uses=2]
- %tmp.upgrd.4 = bitcast %struct.tree_node* %t.0.0 to %struct.tree_list* ; <%struct.tree_list*> [#uses=1]
- %tmp.upgrd.5 = getelementptr %struct.tree_list* %tmp.upgrd.4, i32 0, i32 2 ; <%struct.tree_node**> [#uses=1]
- %tmp2 = load %struct.tree_node** %tmp.upgrd.5 ; <%struct.tree_node*> [#uses=5]
- %tmp4 = bitcast %struct.tree_node* %t.0.0 to %struct.tree_common* ; <%struct.tree_common*> [#uses=1]
- %tmp5 = getelementptr %struct.tree_common* %tmp4, i32 0, i32 0 ; <%struct.tree_node**> [#uses=1]
- %tmp6 = load %struct.tree_node** %tmp5 ; <%struct.tree_node*> [#uses=3]
- %tmp.upgrd.6 = icmp eq %struct.tree_node* %tmp6, null ; <i1> [#uses=3]
- br i1 %tmp.upgrd.6, label %cond_true, label %cond_next12
-
-return: ; preds = %cond_next84, %cond_true34, %cond_next18, %cond_next12, %cond_true, %entry
- %retval.0 = phi i32 [ 1, %entry ], [ 1, %cond_next84 ], [ %.mux, %cond_true34 ], [ 0, %cond_next18 ], [ 0, %cond_next12 ], [ 0, %cond_true ] ; <i32> [#uses=1]
- ret i32 %retval.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-04-26-SetCCAnd.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-04-26-SetCCAnd.ll
deleted file mode 100644
index 8465b82..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-04-26-SetCCAnd.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s
-; PR748
- at G = external global i16 ; <i16*> [#uses=1]
-
-define void @OmNewObjHdr() {
-entry:
- br i1 false, label %endif.4, label %then.0
-
-then.0: ; preds = %entry
- ret void
-
-endif.4: ; preds = %entry
- br i1 false, label %else.3, label %shortcirc_next.3
-
-shortcirc_next.3: ; preds = %endif.4
- ret void
-
-else.3: ; preds = %endif.4
- switch i32 0, label %endif.10 [
- i32 5001, label %then.10
- i32 -5008, label %then.10
- ]
-
-then.10: ; preds = %else.3, %else.3
- %tmp.112 = load i16* null ; <i16> [#uses=2]
- %tmp.113 = load i16* @G ; <i16> [#uses=2]
- %tmp.114 = icmp ugt i16 %tmp.112, %tmp.113 ; <i1> [#uses=1]
- %tmp.120 = icmp ult i16 %tmp.112, %tmp.113 ; <i1> [#uses=1]
- %bothcond = and i1 %tmp.114, %tmp.120 ; <i1> [#uses=1]
- br i1 %bothcond, label %else.4, label %then.11
-
-then.11: ; preds = %then.10
- ret void
-
-else.4: ; preds = %then.10
- ret void
-
-endif.10: ; preds = %else.3
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-04-28-Sign-extend-bool.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-04-28-Sign-extend-bool.ll
deleted file mode 100644
index 22d8f99..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-04-28-Sign-extend-bool.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s
-
-define i32 @test(i32 %tmp93) {
- %tmp98 = shl i32 %tmp93, 31 ; <i32> [#uses=1]
- %tmp99 = ashr i32 %tmp98, 31 ; <i32> [#uses=1]
- %tmp99.upgrd.1 = trunc i32 %tmp99 to i8 ; <i8> [#uses=1]
- %tmp99100 = sext i8 %tmp99.upgrd.1 to i32 ; <i32> [#uses=1]
- ret i32 %tmp99100
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-05-06-GEP-Cast-Sink-Crash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-05-06-GEP-Cast-Sink-Crash.ll
deleted file mode 100644
index 1a9fa9f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-05-06-GEP-Cast-Sink-Crash.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s
-%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.SYMBOL_TABLE_ENTRY = type { [9 x i8], [9 x i8], i32, i32, i32, %struct.SYMBOL_TABLE_ENTRY* }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- at str14 = external global [6 x i8] ; <[6 x i8]*> [#uses=0]
-
-declare void @fprintf(i32, ...)
-
-define void @OUTPUT_TABLE(%struct.SYMBOL_TABLE_ENTRY* %SYM_TAB) {
-entry:
- %tmp11 = getelementptr %struct.SYMBOL_TABLE_ENTRY* %SYM_TAB, i32 0, i32 1, i32 0 ; <i8*> [#uses=2]
- %tmp.i = bitcast i8* %tmp11 to i8* ; <i8*> [#uses=1]
- br label %bb.i
-
-bb.i: ; preds = %cond_next.i, %entry
- %s1.0.i = phi i8* [ %tmp.i, %entry ], [ null, %cond_next.i ] ; <i8*> [#uses=0]
- br i1 false, label %cond_true.i31, label %cond_next.i
-
-cond_true.i31: ; preds = %bb.i
- call void (i32, ...)* @fprintf( i32 0, i8* %tmp11, i8* null )
- ret void
-
-cond_next.i: ; preds = %bb.i
- br i1 false, label %bb.i, label %bb19.i
-
-bb19.i: ; preds = %cond_next.i
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-06-12-LowerSwitchCrash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-06-12-LowerSwitchCrash.ll
deleted file mode 100644
index a3720a9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-06-12-LowerSwitchCrash.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -O0
-
-define float @test(i32 %tmp12771278) {
- switch i32 %tmp12771278, label %bb1279 [
- ]
-
-bb1279: ; preds = %0
- ret float 1.000000e+00
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-06-13-ComputeMaskedBitsCrash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-06-13-ComputeMaskedBitsCrash.ll
deleted file mode 100644
index bd922b3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-06-13-ComputeMaskedBitsCrash.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -O0
-
-%struct.cl_perfunc_opts = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32 }
- at cl_pf_opts = external global %struct.cl_perfunc_opts ; <%struct.cl_perfunc_opts*> [#uses=2]
-
-define void @set_flags_from_O() {
-entry:
- %tmp22 = icmp sgt i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp22, label %cond_true23, label %cond_next159
-
-cond_true23: ; preds = %entry
- %tmp138 = getelementptr %struct.cl_perfunc_opts* @cl_pf_opts, i32 0, i32 8 ; <i8*> [#uses=1]
- %tmp138.upgrd.1 = bitcast i8* %tmp138 to i32* ; <i32*> [#uses=2]
- %tmp139 = load i32* %tmp138.upgrd.1 ; <i32> [#uses=1]
- %tmp140 = shl i32 1, 27 ; <i32> [#uses=1]
- %tmp141 = and i32 %tmp140, 134217728 ; <i32> [#uses=1]
- %tmp142 = and i32 %tmp139, -134217729 ; <i32> [#uses=1]
- %tmp143 = or i32 %tmp142, %tmp141 ; <i32> [#uses=1]
- store i32 %tmp143, i32* %tmp138.upgrd.1
- %tmp144 = getelementptr %struct.cl_perfunc_opts* @cl_pf_opts, i32 0, i32 8 ; <i8*> [#uses=1]
- %tmp144.upgrd.2 = bitcast i8* %tmp144 to i32* ; <i32*> [#uses=1]
- %tmp145 = load i32* %tmp144.upgrd.2 ; <i32> [#uses=1]
- %tmp146 = shl i32 %tmp145, 22 ; <i32> [#uses=1]
- %tmp147 = lshr i32 %tmp146, 31 ; <i32> [#uses=1]
- %tmp147.upgrd.3 = trunc i32 %tmp147 to i8 ; <i8> [#uses=1]
- %tmp148 = icmp eq i8 %tmp147.upgrd.3, 0 ; <i1> [#uses=1]
- br i1 %tmp148, label %cond_true149, label %cond_next159
-
-cond_true149: ; preds = %cond_true23
- %tmp150 = bitcast i8* null to i32* ; <i32*> [#uses=0]
- ret void
-
-cond_next159: ; preds = %cond_true23, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-06-28-SimplifySetCCCrash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-06-28-SimplifySetCCCrash.ll
deleted file mode 100644
index c4f2fb0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-06-28-SimplifySetCCCrash.ll
+++ /dev/null
@@ -1,279 +0,0 @@
-; RUN: llc < %s
-%struct.rtunion = type { i64 }
- %struct.rtx_def = type { i16, i8, i8, [1 x %struct.rtunion] }
- at ix86_cpu = external global i32 ; <i32*> [#uses=1]
- at which_alternative = external global i32 ; <i32*> [#uses=3]
-
-declare fastcc i32 @recog()
-
-define void @athlon_fp_unit_ready_cost() {
-entry:
- %tmp = icmp slt i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp, label %cond_true.i, label %cond_true
-
-cond_true: ; preds = %entry
- ret void
-
-cond_true.i: ; preds = %entry
- %tmp8.i = tail call fastcc i32 @recog( ) ; <i32> [#uses=1]
- switch i32 %tmp8.i, label %UnifiedReturnBlock [
- i32 -1, label %bb2063
- i32 19, label %bb2035
- i32 20, label %bb2035
- i32 21, label %bb2035
- i32 23, label %bb2035
- i32 24, label %bb2035
- i32 27, label %bb2035
- i32 32, label %bb2035
- i32 33, label %bb1994
- i32 35, label %bb2035
- i32 36, label %bb1994
- i32 90, label %bb1948
- i32 94, label %bb1948
- i32 95, label %bb1948
- i32 101, label %bb1648
- i32 102, label %bb1648
- i32 103, label %bb1648
- i32 104, label %bb1648
- i32 133, label %bb1419
- i32 135, label %bb1238
- i32 136, label %bb1238
- i32 137, label %bb1238
- i32 138, label %bb1238
- i32 139, label %bb1201
- i32 140, label %bb1201
- i32 141, label %bb1154
- i32 142, label %bb1126
- i32 144, label %bb1201
- i32 145, label %bb1126
- i32 146, label %bb1201
- i32 147, label %bb1126
- i32 148, label %bb1201
- i32 149, label %bb1126
- i32 150, label %bb1201
- i32 151, label %bb1126
- i32 152, label %bb1096
- i32 153, label %bb1096
- i32 154, label %bb1096
- i32 157, label %bb1096
- i32 158, label %bb1096
- i32 159, label %bb1096
- i32 162, label %bb1096
- i32 163, label %bb1096
- i32 164, label %bb1096
- i32 167, label %bb1201
- i32 168, label %bb1201
- i32 170, label %bb1201
- i32 171, label %bb1201
- i32 173, label %bb1201
- i32 174, label %bb1201
- i32 176, label %bb1201
- i32 177, label %bb1201
- i32 179, label %bb993
- i32 180, label %bb993
- i32 181, label %bb993
- i32 182, label %bb993
- i32 183, label %bb993
- i32 184, label %bb993
- i32 365, label %bb1126
- i32 366, label %bb1126
- i32 367, label %bb1126
- i32 368, label %bb1126
- i32 369, label %bb1126
- i32 370, label %bb1126
- i32 371, label %bb1126
- i32 372, label %bb1126
- i32 373, label %bb1126
- i32 384, label %bb1126
- i32 385, label %bb1126
- i32 386, label %bb1126
- i32 387, label %bb1126
- i32 388, label %bb1126
- i32 389, label %bb1126
- i32 390, label %bb1126
- i32 391, label %bb1126
- i32 392, label %bb1126
- i32 525, label %bb919
- i32 526, label %bb839
- i32 528, label %bb919
- i32 529, label %bb839
- i32 531, label %cond_next6.i119
- i32 532, label %cond_next6.i97
- i32 533, label %cond_next6.i81
- i32 534, label %bb495
- i32 536, label %cond_next6.i81
- i32 537, label %cond_next6.i81
- i32 538, label %bb396
- i32 539, label %bb288
- i32 541, label %bb396
- i32 542, label %bb396
- i32 543, label %bb396
- i32 544, label %bb396
- i32 545, label %bb189
- i32 546, label %cond_next6.i
- i32 547, label %bb189
- i32 548, label %cond_next6.i
- i32 549, label %bb189
- i32 550, label %cond_next6.i
- i32 551, label %bb189
- i32 552, label %cond_next6.i
- i32 553, label %bb189
- i32 554, label %cond_next6.i
- i32 555, label %bb189
- i32 556, label %cond_next6.i
- i32 557, label %bb189
- i32 558, label %cond_next6.i
- i32 618, label %bb40
- i32 619, label %bb18
- i32 620, label %bb40
- i32 621, label %bb10
- i32 622, label %bb10
- ]
-
-bb10: ; preds = %cond_true.i, %cond_true.i
- ret void
-
-bb18: ; preds = %cond_true.i
- ret void
-
-bb40: ; preds = %cond_true.i, %cond_true.i
- ret void
-
-cond_next6.i: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb189: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb288: ; preds = %cond_true.i
- ret void
-
-bb396: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb495: ; preds = %cond_true.i
- ret void
-
-cond_next6.i81: ; preds = %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-cond_next6.i97: ; preds = %cond_true.i
- ret void
-
-cond_next6.i119: ; preds = %cond_true.i
- %tmp.i126 = icmp eq i16 0, 78 ; <i1> [#uses=1]
- br i1 %tmp.i126, label %cond_next778, label %bb802
-
-cond_next778: ; preds = %cond_next6.i119
- %tmp781 = icmp eq i32 0, 1 ; <i1> [#uses=1]
- br i1 %tmp781, label %cond_next784, label %bb790
-
-cond_next784: ; preds = %cond_next778
- %tmp785 = load i32* @ix86_cpu ; <i32> [#uses=1]
- %tmp786 = icmp eq i32 %tmp785, 5 ; <i1> [#uses=1]
- br i1 %tmp786, label %UnifiedReturnBlock, label %bb790
-
-bb790: ; preds = %cond_next784, %cond_next778
- %tmp793 = icmp eq i32 0, 1 ; <i1> [#uses=0]
- ret void
-
-bb802: ; preds = %cond_next6.i119
- ret void
-
-bb839: ; preds = %cond_true.i, %cond_true.i
- ret void
-
-bb919: ; preds = %cond_true.i, %cond_true.i
- ret void
-
-bb993: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb1096: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb1126: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb1154: ; preds = %cond_true.i
- ret void
-
-bb1201: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb1238: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb1419: ; preds = %cond_true.i
- ret void
-
-bb1648: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- %tmp1650 = load i32* @which_alternative ; <i32> [#uses=1]
- switch i32 %tmp1650, label %bb1701 [
- i32 0, label %cond_next1675
- i32 1, label %cond_next1675
- i32 2, label %cond_next1675
- ]
-
-cond_next1675: ; preds = %bb1648, %bb1648, %bb1648
- ret void
-
-bb1701: ; preds = %bb1648
- %tmp1702 = load i32* @which_alternative ; <i32> [#uses=1]
- switch i32 %tmp1702, label %bb1808 [
- i32 0, label %cond_next1727
- i32 1, label %cond_next1727
- i32 2, label %cond_next1727
- ]
-
-cond_next1727: ; preds = %bb1701, %bb1701, %bb1701
- ret void
-
-bb1808: ; preds = %bb1701
- %bothcond696 = or i1 false, false ; <i1> [#uses=1]
- br i1 %bothcond696, label %bb1876, label %cond_next1834
-
-cond_next1834: ; preds = %bb1808
- ret void
-
-bb1876: ; preds = %bb1808
- %tmp1877signed = load i32* @which_alternative ; <i32> [#uses=4]
- %tmp1877 = bitcast i32 %tmp1877signed to i32 ; <i32> [#uses=1]
- %bothcond699 = icmp ult i32 %tmp1877, 2 ; <i1> [#uses=1]
- %tmp1888 = icmp eq i32 %tmp1877signed, 2 ; <i1> [#uses=1]
- %bothcond700 = or i1 %bothcond699, %tmp1888 ; <i1> [#uses=1]
- %bothcond700.not = xor i1 %bothcond700, true ; <i1> [#uses=1]
- %tmp1894 = icmp eq i32 %tmp1877signed, 3 ; <i1> [#uses=1]
- %bothcond701 = or i1 %tmp1894, %bothcond700.not ; <i1> [#uses=1]
- %bothcond702 = or i1 %bothcond701, false ; <i1> [#uses=1]
- br i1 %bothcond702, label %UnifiedReturnBlock, label %cond_next1902
-
-cond_next1902: ; preds = %bb1876
- switch i32 %tmp1877signed, label %cond_next1937 [
- i32 0, label %bb1918
- i32 1, label %bb1918
- i32 2, label %bb1918
- ]
-
-bb1918: ; preds = %cond_next1902, %cond_next1902, %cond_next1902
- ret void
-
-cond_next1937: ; preds = %cond_next1902
- ret void
-
-bb1948: ; preds = %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb1994: ; preds = %cond_true.i, %cond_true.i
- ret void
-
-bb2035: ; preds = %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i, %cond_true.i
- ret void
-
-bb2063: ; preds = %cond_true.i
- ret void
-
-UnifiedReturnBlock: ; preds = %bb1876, %cond_next784, %cond_true.i
- %UnifiedRetVal = phi i32 [ 100, %bb1876 ], [ 100, %cond_true.i ], [ 4, %cond_next784 ] ; <i32> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-07-03-schedulers.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-07-03-schedulers.ll
deleted file mode 100644
index 756bd5d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-07-03-schedulers.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -pre-RA-sched=default
-; RUN: llc < %s -pre-RA-sched=list-burr
-; RUN: llc < %s -pre-RA-sched=fast
-; PR859
-
-; The top-down schedulers are excluded here because they don't yet support
-; targets that use physreg defs.
-
-declare i32 @printf(i8*, i32, float)
-
-define i32 @testissue(i32 %i, float %x, float %y) {
- br label %bb1
-
-bb1: ; preds = %bb1, %0
- %x1 = fmul float %x, %y ; <float> [#uses=1]
- %y1 = fmul float %y, 7.500000e-01 ; <float> [#uses=1]
- %z1 = fadd float %x1, %y1 ; <float> [#uses=1]
- %x2 = fmul float %x, 5.000000e-01 ; <float> [#uses=1]
- %y2 = fmul float %y, 0x3FECCCCCC0000000 ; <float> [#uses=1]
- %z2 = fadd float %x2, %y2 ; <float> [#uses=1]
- %z3 = fadd float %z1, %z2 ; <float> [#uses=1]
- %i1 = shl i32 %i, 3 ; <i32> [#uses=1]
- %j1 = add i32 %i, 7 ; <i32> [#uses=1]
- %m1 = add i32 %i1, %j1 ; <i32> [#uses=2]
- %b = icmp sle i32 %m1, 6 ; <i1> [#uses=1]
- br i1 %b, label %bb1, label %bb2
-
-bb2: ; preds = %bb1
- %Msg = inttoptr i64 0 to i8* ; <i8*> [#uses=1]
- call i32 @printf( i8* %Msg, i32 %m1, float %z3 ) ; <i32>:1 [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-08-30-CoalescerCrash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-08-30-CoalescerCrash.ll
deleted file mode 100644
index cbe8b15..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-08-30-CoalescerCrash.ll
+++ /dev/null
@@ -1,112 +0,0 @@
-; RUN: llc < %s
-%struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.VEC_edge = type { i32, i32, [1 x %struct.edge_def*] }
- %struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
- %struct.basic_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.tree_node*, %struct.VEC_edge*, %struct.VEC_edge*, %struct.bitmap_head_def*, %struct.bitmap_head_def*, i8*, %struct.loop*, [2 x %struct.et_node*], %struct.basic_block_def*, %struct.basic_block_def*, %struct.reorder_block_def*, %struct.bb_ann_d*, i64, i32, i32, i32, i32 }
- %struct.bb_ann_d = type { %struct.tree_node*, i8, %struct.edge_prediction* }
- %struct.bitmap_element_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, [4 x i32] }
- %struct.bitmap_head_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, %struct.bitmap_obstack* }
- %struct.bitmap_obstack = type { %struct.bitmap_element_def*, %struct.bitmap_head_def*, %struct.obstack }
- %struct.cost_pair = type { %struct.iv_cand*, i32, %struct.bitmap_head_def* }
- %struct.dataflow_d = type { %struct.varray_head_tag*, [2 x %struct.tree_node*] }
- %struct.def_operand_ptr = type { %struct.tree_node** }
- %struct.def_optype_d = type { i32, [1 x %struct.def_operand_ptr] }
- %struct.edge_def = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.edge_def_insns, i8*, %struct.location_t*, i32, i32, i64, i32 }
- %struct.edge_def_insns = type { %struct.rtx_def* }
- %struct.edge_prediction = type { %struct.edge_prediction*, %struct.edge_def*, i32, i32 }
- %struct.eh_status = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.location_t, i32, i8*, %struct.rtx_def** }
- %struct.et_node = type opaque
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i1, i1, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.location_t, %struct.varray_head_tag*, %struct.tree_node*, i8, i8, i8 }
- %struct.htab = type { i32 (i8*)*, i32 (i8*, i8*)*, void (i8*)*, i8**, i32, i32, i32, i32, i32, i8* (i32, i32)*, void (i8*)*, i8*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i32 }
- %struct.initial_value_struct = type opaque
- %struct.iv = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i1, i1, i32 }
- %struct.iv_cand = type { i32, i1, i32, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.iv*, i32 }
- %struct.iv_use = type { i32, i32, %struct.iv*, %struct.tree_node*, %struct.tree_node**, %struct.bitmap_head_def*, i32, %struct.cost_pair*, %struct.iv_cand* }
- %struct.ivopts_data = type { %struct.loop*, %struct.htab*, i32, %struct.version_info*, %struct.bitmap_head_def*, i32, %struct.varray_head_tag*, %struct.varray_head_tag*, %struct.bitmap_head_def*, i1 }
- %struct.lang_decl = type opaque
- %struct.language_function = type opaque
- %struct.location_t = type { i8*, i32 }
- %struct.loop = type { i32, %struct.basic_block_def*, %struct.basic_block_def*, %struct.basic_block_def*, %struct.lpt_decision, i32, i32, %struct.edge_def**, i32, %struct.basic_block_def*, %struct.basic_block_def*, i32, %struct.edge_def**, i32, %struct.edge_def**, i32, %struct.simple_bitmap_def*, i32, %struct.loop**, i32, %struct.loop*, %struct.loop*, %struct.loop*, %struct.loop*, i32, i8*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i32, %struct.tree_node*, %struct.tree_node*, %struct.nb_iter_bound*, %struct.edge_def*, i1 }
- %struct.lpt_decision = type { i32, i32 }
- %struct.machine_function = type { %struct.stack_local_entry*, i8*, %struct.rtx_def*, i32, i32, i32, i32, i32 }
- %struct.nb_iter_bound = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.nb_iter_bound* }
- %struct.obstack = type { i32, %struct._obstack_chunk*, i8*, i8*, i8*, i32, i32, %struct._obstack_chunk* (i8*, i32)*, void (i8*, %struct._obstack_chunk*)*, i8*, i8 }
- %struct.reorder_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.basic_block_def*, %struct.basic_block_def*, %struct.basic_block_def*, i32, i32, i32 }
- %struct.rtvec_def = type { i32, [1 x %struct.rtx_def*] }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.simple_bitmap_def = type { i32, i32, i32, [1 x i64] }
- %struct.stack_local_entry = type opaque
- %struct.stmt_ann_d = type { %struct.tree_ann_common_d, i8, %struct.basic_block_def*, %struct.stmt_operands_d, %struct.dataflow_d*, %struct.bitmap_head_def*, i32 }
- %struct.stmt_operands_d = type { %struct.def_optype_d*, %struct.def_optype_d*, %struct.v_may_def_optype_d*, %struct.vuse_optype_d*, %struct.v_may_def_optype_d* }
- %struct.temp_slot = type opaque
- %struct.tree_ann_common_d = type { i32, i8*, %struct.tree_node* }
- %struct.tree_ann_d = type { %struct.stmt_ann_d }
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.u = type { [1 x i64] }
- %struct.v_def_use_operand_type_t = type { %struct.tree_node*, %struct.tree_node* }
- %struct.v_may_def_optype_d = type { i32, [1 x %struct.v_def_use_operand_type_t] }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.varray_head_tag = type { i32, i32, i32, i8*, %struct.u }
- %struct.version_info = type { %struct.tree_node*, %struct.iv*, i1, i32, i1 }
- %struct.vuse_optype_d = type { i32, [1 x %struct.tree_node*] }
-
-define i1 @determine_use_iv_cost(%struct.ivopts_data* %data, %struct.iv_use* %use, %struct.iv_cand* %cand) {
-entry:
- switch i32 0, label %bb91 [
- i32 0, label %bb
- i32 1, label %bb6
- i32 3, label %cond_next135
- ]
-
-bb: ; preds = %entry
- ret i1 false
-
-bb6: ; preds = %entry
- br i1 false, label %bb87, label %cond_next27
-
-cond_next27: ; preds = %bb6
- br i1 false, label %cond_true30, label %cond_next55
-
-cond_true30: ; preds = %cond_next27
- br i1 false, label %cond_next41, label %cond_true35
-
-cond_true35: ; preds = %cond_true30
- ret i1 false
-
-cond_next41: ; preds = %cond_true30
- %tmp44 = call i32 @force_var_cost( %struct.ivopts_data* %data, %struct.tree_node* null, %struct.bitmap_head_def** null ) ; <i32> [#uses=2]
- %tmp46 = udiv i32 %tmp44, 5 ; <i32> [#uses=1]
- call void @set_use_iv_cost( %struct.ivopts_data* %data, %struct.iv_use* %use, %struct.iv_cand* %cand, i32 %tmp46, %struct.bitmap_head_def* null )
- %tmp44.off = add i32 %tmp44, -50000000 ; <i32> [#uses=1]
- %tmp52 = icmp ugt i32 %tmp44.off, 4 ; <i1> [#uses=1]
- %tmp52.upgrd.1 = zext i1 %tmp52 to i32 ; <i32> [#uses=1]
- br label %bb87
-
-cond_next55: ; preds = %cond_next27
- ret i1 false
-
-bb87: ; preds = %cond_next41, %bb6
- %tmp2.0 = phi i32 [ %tmp52.upgrd.1, %cond_next41 ], [ 1, %bb6 ] ; <i32> [#uses=0]
- ret i1 false
-
-bb91: ; preds = %entry
- ret i1 false
-
-cond_next135: ; preds = %entry
- %tmp193 = call i1 @determine_use_iv_cost_generic( %struct.ivopts_data* %data, %struct.iv_use* %use, %struct.iv_cand* %cand ) ; <i1> [#uses=0]
- ret i1 false
-}
-
-declare void @set_use_iv_cost(%struct.ivopts_data*, %struct.iv_use*, %struct.iv_cand*, i32, %struct.bitmap_head_def*)
-
-declare i32 @force_var_cost(%struct.ivopts_data*, %struct.tree_node*, %struct.bitmap_head_def**)
-
-declare i1 @determine_use_iv_cost_generic(%struct.ivopts_data*, %struct.iv_use*, %struct.iv_cand*)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll
deleted file mode 100644
index 4b332b3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll
+++ /dev/null
@@ -1,117 +0,0 @@
-; RUN: llc < %s -regalloc=local
-
-%struct.CHESS_POSITION = type { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i32, i32, i8, i8, [64 x i8], i8, i8, i8, i8, i8 }
- at search = external global %struct.CHESS_POSITION ; <%struct.CHESS_POSITION*> [#uses=2]
- at bishop_shift_rl45 = external global [64 x i32] ; <[64 x i32]*> [#uses=1]
- at bishop_shift_rr45 = external global [64 x i32] ; <[64 x i32]*> [#uses=1]
- at black_outpost = external global [64 x i8] ; <[64 x i8]*> [#uses=1]
- at bishop_mobility_rl45 = external global [64 x [256 x i32]] ; <[64 x [256 x i32]]*> [#uses=1]
- at bishop_mobility_rr45 = external global [64 x [256 x i32]] ; <[64 x [256 x i32]]*> [#uses=1]
-
-declare fastcc i32 @FirstOne()
-
-define fastcc void @Evaluate() {
-entry:
- br i1 false, label %cond_false186, label %cond_true
-
-cond_true: ; preds = %entry
- ret void
-
-cond_false186: ; preds = %entry
- br i1 false, label %cond_true293, label %bb203
-
-bb203: ; preds = %cond_false186
- ret void
-
-cond_true293: ; preds = %cond_false186
- br i1 false, label %cond_true298, label %cond_next317
-
-cond_true298: ; preds = %cond_true293
- br i1 false, label %cond_next518, label %cond_true397.preheader
-
-cond_next317: ; preds = %cond_true293
- ret void
-
-cond_true397.preheader: ; preds = %cond_true298
- ret void
-
-cond_next518: ; preds = %cond_true298
- br i1 false, label %bb1069, label %cond_true522
-
-cond_true522: ; preds = %cond_next518
- ret void
-
-bb1069: ; preds = %cond_next518
- br i1 false, label %cond_next1131, label %bb1096
-
-bb1096: ; preds = %bb1069
- ret void
-
-cond_next1131: ; preds = %bb1069
- br i1 false, label %cond_next1207, label %cond_true1150
-
-cond_true1150: ; preds = %cond_next1131
- ret void
-
-cond_next1207: ; preds = %cond_next1131
- br i1 false, label %cond_next1219, label %cond_true1211
-
-cond_true1211: ; preds = %cond_next1207
- ret void
-
-cond_next1219: ; preds = %cond_next1207
- br i1 false, label %cond_true1223, label %cond_next1283
-
-cond_true1223: ; preds = %cond_next1219
- br i1 false, label %cond_true1254, label %cond_true1264
-
-cond_true1254: ; preds = %cond_true1223
- br i1 false, label %bb1567, label %cond_true1369.preheader
-
-cond_true1264: ; preds = %cond_true1223
- ret void
-
-cond_next1283: ; preds = %cond_next1219
- ret void
-
-cond_true1369.preheader: ; preds = %cond_true1254
- ret void
-
-bb1567: ; preds = %cond_true1254
- %tmp1580 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 3) ; <i64> [#uses=1]
- %tmp1591 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 4) ; <i64> [#uses=1]
- %tmp1572 = tail call fastcc i32 @FirstOne( ) ; <i32> [#uses=5]
- %tmp1582 = getelementptr [64 x i32]* @bishop_shift_rl45, i32 0, i32 %tmp1572 ; <i32*> [#uses=1]
- %tmp1583 = load i32* %tmp1582 ; <i32> [#uses=1]
- %tmp1583.upgrd.1 = trunc i32 %tmp1583 to i8 ; <i8> [#uses=1]
- %shift.upgrd.2 = zext i8 %tmp1583.upgrd.1 to i64 ; <i64> [#uses=1]
- %tmp1584 = lshr i64 %tmp1580, %shift.upgrd.2 ; <i64> [#uses=1]
- %tmp1584.upgrd.3 = trunc i64 %tmp1584 to i32 ; <i32> [#uses=1]
- %tmp1585 = and i32 %tmp1584.upgrd.3, 255 ; <i32> [#uses=1]
- %gep.upgrd.4 = zext i32 %tmp1585 to i64 ; <i64> [#uses=1]
- %tmp1587 = getelementptr [64 x [256 x i32]]* @bishop_mobility_rl45, i32 0, i32 %tmp1572, i64 %gep.upgrd.4 ; <i32*> [#uses=1]
- %tmp1588 = load i32* %tmp1587 ; <i32> [#uses=1]
- %tmp1593 = getelementptr [64 x i32]* @bishop_shift_rr45, i32 0, i32 %tmp1572 ; <i32*> [#uses=1]
- %tmp1594 = load i32* %tmp1593 ; <i32> [#uses=1]
- %tmp1594.upgrd.5 = trunc i32 %tmp1594 to i8 ; <i8> [#uses=1]
- %shift.upgrd.6 = zext i8 %tmp1594.upgrd.5 to i64 ; <i64> [#uses=1]
- %tmp1595 = lshr i64 %tmp1591, %shift.upgrd.6 ; <i64> [#uses=1]
- %tmp1595.upgrd.7 = trunc i64 %tmp1595 to i32 ; <i32> [#uses=1]
- %tmp1596 = and i32 %tmp1595.upgrd.7, 255 ; <i32> [#uses=1]
- %gep.upgrd.8 = zext i32 %tmp1596 to i64 ; <i64> [#uses=1]
- %tmp1598 = getelementptr [64 x [256 x i32]]* @bishop_mobility_rr45, i32 0, i32 %tmp1572, i64 %gep.upgrd.8 ; <i32*> [#uses=1]
- %tmp1599 = load i32* %tmp1598 ; <i32> [#uses=1]
- %tmp1600.neg = sub i32 0, %tmp1588 ; <i32> [#uses=1]
- %tmp1602 = sub i32 %tmp1600.neg, %tmp1599 ; <i32> [#uses=1]
- %tmp1604 = getelementptr [64 x i8]* @black_outpost, i32 0, i32 %tmp1572 ; <i8*> [#uses=1]
- %tmp1605 = load i8* %tmp1604 ; <i8> [#uses=1]
- %tmp1606 = icmp eq i8 %tmp1605, 0 ; <i1> [#uses=1]
- br i1 %tmp1606, label %cond_next1637, label %cond_true1607
-
-cond_true1607: ; preds = %bb1567
- ret void
-
-cond_next1637: ; preds = %bb1567
- %tmp1662 = sub i32 %tmp1602, 0 ; <i32> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-09-06-SwitchLowering.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-09-06-SwitchLowering.ll
deleted file mode 100644
index 3d592b3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-09-06-SwitchLowering.ll
+++ /dev/null
@@ -1,96 +0,0 @@
-; RUN: llc < %s
-
-define void @foo() {
- br label %cond_true813.i
-
-cond_true813.i: ; preds = %0
- br i1 false, label %cond_true818.i, label %cond_next1146.i
-
-cond_true818.i: ; preds = %cond_true813.i
- br i1 false, label %recog_memoized.exit52, label %cond_next1146.i
-
-recog_memoized.exit52: ; preds = %cond_true818.i
- switch i32 0, label %bb886.i.preheader [
- i32 0, label %bb907.i
- i32 44, label %bb866.i
- i32 103, label %bb874.i
- i32 114, label %bb874.i
- ]
-
-bb857.i: ; preds = %bb886.i, %bb866.i
- %tmp862.i494.24 = phi i8* [ null, %bb866.i ], [ %tmp862.i494.26, %bb886.i ] ; <i8*> [#uses=4]
- switch i32 0, label %bb886.i.preheader [
- i32 0, label %bb907.i
- i32 44, label %bb866.i
- i32 103, label %bb874.i
- i32 114, label %bb874.i
- ]
-
-bb866.i.loopexit: ; preds = %bb874.i
- br label %bb866.i
-
-bb866.i.loopexit31: ; preds = %cond_true903.i
- br label %bb866.i
-
-bb866.i: ; preds = %bb866.i.loopexit31, %bb866.i.loopexit, %bb857.i, %recog_memoized.exit52
- br i1 false, label %bb907.i, label %bb857.i
-
-bb874.i.preheader.loopexit: ; preds = %cond_true903.i, %cond_true903.i
- ret void
-
-bb874.i: ; preds = %bb857.i, %bb857.i, %recog_memoized.exit52, %recog_memoized.exit52
- %tmp862.i494.25 = phi i8* [ %tmp862.i494.24, %bb857.i ], [ %tmp862.i494.24, %bb857.i ], [ undef, %recog_memoized.exit52 ], [ undef, %recog_memoized.exit52 ] ; <i8*> [#uses=1]
- switch i32 0, label %bb886.i.preheader.loopexit [
- i32 0, label %bb907.i
- i32 44, label %bb866.i.loopexit
- i32 103, label %bb874.i.backedge
- i32 114, label %bb874.i.backedge
- ]
-
-bb874.i.backedge: ; preds = %bb874.i, %bb874.i
- ret void
-
-bb886.i.preheader.loopexit: ; preds = %bb874.i
- ret void
-
-bb886.i.preheader: ; preds = %bb857.i, %recog_memoized.exit52
- %tmp862.i494.26 = phi i8* [ undef, %recog_memoized.exit52 ], [ %tmp862.i494.24, %bb857.i ] ; <i8*> [#uses=1]
- br label %bb886.i
-
-bb886.i: ; preds = %cond_true903.i, %bb886.i.preheader
- br i1 false, label %bb857.i, label %cond_true903.i
-
-cond_true903.i: ; preds = %bb886.i
- switch i32 0, label %bb886.i [
- i32 0, label %bb907.i
- i32 44, label %bb866.i.loopexit31
- i32 103, label %bb874.i.preheader.loopexit
- i32 114, label %bb874.i.preheader.loopexit
- ]
-
-bb907.i: ; preds = %cond_true903.i, %bb874.i, %bb866.i, %bb857.i, %recog_memoized.exit52
- %tmp862.i494.0 = phi i8* [ %tmp862.i494.24, %bb857.i ], [ null, %bb866.i ], [ undef, %recog_memoized.exit52 ], [ %tmp862.i494.25, %bb874.i ], [ null, %cond_true903.i ] ; <i8*> [#uses=1]
- br i1 false, label %cond_next1146.i, label %cond_true910.i
-
-cond_true910.i: ; preds = %bb907.i
- ret void
-
-cond_next1146.i: ; preds = %bb907.i, %cond_true818.i, %cond_true813.i
- %tmp862.i494.1 = phi i8* [ %tmp862.i494.0, %bb907.i ], [ undef, %cond_true818.i ], [ undef, %cond_true813.i ] ; <i8*> [#uses=0]
- ret void
-
-bb2060.i: ; No predecessors!
- br i1 false, label %cond_true2064.i, label %bb2067.i
-
-cond_true2064.i: ; preds = %bb2060.i
- unreachable
-
-bb2067.i: ; preds = %bb2060.i
- ret void
-
-cond_next3473: ; No predecessors!
- ret void
-
-cond_next3521: ; No predecessors!
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-10-27-CondFolding.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-10-27-CondFolding.ll
deleted file mode 100644
index 51902c8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-10-27-CondFolding.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s
-
-define void @start_pass_huff(i32 %gather_statistics) {
-entry:
- %tmp = icmp eq i32 %gather_statistics, 0 ; <i1> [#uses=1]
- br i1 false, label %cond_next22, label %bb166
-
-cond_next22: ; preds = %entry
- %bothcond = and i1 false, %tmp ; <i1> [#uses=1]
- br i1 %bothcond, label %bb34, label %bb46
-
-bb34: ; preds = %cond_next22
- ret void
-
-bb46: ; preds = %cond_next22
- ret void
-
-bb166: ; preds = %entry
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-10-29-Crash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-10-29-Crash.ll
deleted file mode 100644
index 7dcb52c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-10-29-Crash.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s
-
-define void @form_component_prediction(i32 %dy) {
-entry:
- %tmp7 = and i32 %dy, 1 ; <i32> [#uses=1]
- %tmp27 = icmp eq i32 %tmp7, 0 ; <i1> [#uses=1]
- br i1 false, label %cond_next30, label %bb115
-
-cond_next30: ; preds = %entry
- ret void
-
-bb115: ; preds = %entry
- %bothcond1 = or i1 %tmp27, false ; <i1> [#uses=1]
- br i1 %bothcond1, label %bb228, label %cond_next125
-
-cond_next125: ; preds = %bb115
- ret void
-
-bb228: ; preds = %bb115
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2006-11-20-DAGCombineCrash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2006-11-20-DAGCombineCrash.ll
deleted file mode 100644
index 26d0f4f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2006-11-20-DAGCombineCrash.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s
-; PR1011
-%struct.mng_data = type { i8* (%struct.mng_data*, i32)*, i32, i32, i32, i8, i8, i32, i32, i32, i32, i32 }
-
-define void @mng_display_bgr565() {
-entry:
- br i1 false, label %bb.preheader, label %return
-
-bb.preheader: ; preds = %entry
- br i1 false, label %cond_true48, label %cond_next80
-
-cond_true48: ; preds = %bb.preheader
- %tmp = load i8* null ; <i8> [#uses=1]
- %tmp51 = zext i8 %tmp to i16 ; <i16> [#uses=1]
- %tmp99 = load i8* null ; <i8> [#uses=1]
- %tmp54 = bitcast i8 %tmp99 to i8 ; <i8> [#uses=1]
- %tmp54.upgrd.1 = zext i8 %tmp54 to i32 ; <i32> [#uses=1]
- %tmp55 = lshr i32 %tmp54.upgrd.1, 3 ; <i32> [#uses=1]
- %tmp55.upgrd.2 = trunc i32 %tmp55 to i16 ; <i16> [#uses=1]
- %tmp52 = shl i16 %tmp51, 5 ; <i16> [#uses=1]
- %tmp56 = and i16 %tmp55.upgrd.2, 28 ; <i16> [#uses=1]
- %tmp57 = or i16 %tmp56, %tmp52 ; <i16> [#uses=1]
- %tmp60 = zext i16 %tmp57 to i32 ; <i32> [#uses=1]
- %tmp62 = xor i32 0, 65535 ; <i32> [#uses=1]
- %tmp63 = mul i32 %tmp60, %tmp62 ; <i32> [#uses=1]
- %tmp65 = add i32 0, %tmp63 ; <i32> [#uses=1]
- %tmp69 = add i32 0, %tmp65 ; <i32> [#uses=1]
- %tmp70 = lshr i32 %tmp69, 16 ; <i32> [#uses=1]
- %tmp70.upgrd.3 = trunc i32 %tmp70 to i16 ; <i16> [#uses=1]
- %tmp75 = lshr i16 %tmp70.upgrd.3, 8 ; <i16> [#uses=1]
- %tmp75.upgrd.4 = trunc i16 %tmp75 to i8 ; <i8> [#uses=1]
- %tmp76 = lshr i8 %tmp75.upgrd.4, 5 ; <i8> [#uses=1]
- store i8 %tmp76, i8* null
- ret void
-
-cond_next80: ; preds = %bb.preheader
- ret void
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2007-01-15-LoadSelectCycle.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2007-01-15-LoadSelectCycle.ll
deleted file mode 100644
index 255b120..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2007-01-15-LoadSelectCycle.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s
-; PR1114
-
-declare i1 @foo()
-
-define i32 @test(i32* %A, i32* %B) {
- %a = load i32* %A
- %b = load i32* %B
- %cond = call i1 @foo()
- %c = select i1 %cond, i32 %a, i32 %b
- ret i32 %c
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2007-02-25-invoke.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2007-02-25-invoke.ll
deleted file mode 100644
index 6e20eaa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2007-02-25-invoke.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s
-
-; PR1224
-
-declare i32 @test()
-define i32 @test2() {
- %A = invoke i32 @test() to label %invcont unwind label %blat
-invcont:
- ret i32 %A
-blat:
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll
deleted file mode 100644
index 339f0f7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-08-MultipleFrameIndices.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-; XFAIL: sparc-sun-solaris2
-; PR1308
-; PR1557
-
-define i32 @stuff(i32, ...) {
- %foo = alloca i8*
- %bar = alloca i32*
- %A = call i32 asm sideeffect "inline asm $0 $2 $3 $4", "=r,0,i,m,m"( i32 0, i32 1, i8** %foo, i32** %bar )
- ret i32 %A
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-17-lsr-crash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-17-lsr-crash.ll
deleted file mode 100644
index 98f87e5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-17-lsr-crash.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s
-
-define void @foo(i32 %inTextSize) {
-entry:
- br label %bb236.outer
-
-cond_next193: ; preds = %bb236
- %tmp211 = add i32 %inTextSize_addr.1.ph17, -2 ; <i32> [#uses=1]
- br i1 false, label %cond_next232, label %cond_true227
-
-cond_true227: ; preds = %cond_next193
- ret void
-
-cond_next232: ; preds = %cond_next193
- %indvar.next49 = add i32 %indvar48, 1 ; <i32> [#uses=1]
- br label %bb236.outer
-
-bb236.outer: ; preds = %cond_next232, %entry
- %indvar48 = phi i32 [ %indvar.next49, %cond_next232 ], [ 0, %entry ] ; <i32> [#uses=2]
- %inTextSize_addr.1.ph17 = phi i32 [ %tmp211, %cond_next232 ], [ %inTextSize, %entry ] ; <i32> [#uses=3]
- %tmp.50 = sub i32 0, %indvar48 ; <i32> [#uses=1]
- %tmp219 = icmp eq i32 %tmp.50, 0 ; <i1> [#uses=1]
- br i1 %tmp219, label %bb236.us, label %bb236
-
-bb236.us: ; preds = %bb236.outer
- %inTextSize_addr.1.us = add i32 0, %inTextSize_addr.1.ph17 ; <i32> [#uses=0]
- ret void
-
-bb236: ; preds = %bb236.outer
- %tmp238 = icmp eq i32 %inTextSize_addr.1.ph17, 0 ; <i1> [#uses=1]
- br i1 %tmp238, label %exit, label %cond_next193
-
-exit: ; preds = %bb236
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll
deleted file mode 100644
index af522dc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-27-InlineAsm-X-Dest.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s
-
-; Test that we can have an "X" output constraint.
-
-define void @test(i16 * %t) {
- call void asm sideeffect "foo $0", "=*X,~{dirflag},~{fpsr},~{flags},~{memory}"( i16* %t )
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll
deleted file mode 100644
index f2c9b7f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-27-LargeMemObject.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s
-
- %struct..0anon = type { [100 x i32] }
-
-define void @test() {
-entry:
- %currfpu = alloca %struct..0anon, align 16 ; <%struct..0anon*> [#uses=2]
- %mxcsr = alloca %struct..0anon, align 16 ; <%struct..0anon*> [#uses=1]
- call void asm sideeffect "fnstenv $0", "=*m,~{dirflag},~{fpsr},~{flags}"( %struct..0anon* %currfpu )
- call void asm sideeffect "$0 $1", "=*m,*m,~{dirflag},~{fpsr},~{flags}"( %struct..0anon* %mxcsr, %struct..0anon* %currfpu )
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-30-LandingPadBranchFolding.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-30-LandingPadBranchFolding.ll
deleted file mode 100644
index 568b88f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2007-04-30-LandingPadBranchFolding.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; RUN: llc < %s
-; PR1228
-
- "struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Alloc_hider" = type { i8* }
- "struct.std::locale" = type { "struct.std::locale::_Impl"* }
- "struct.std::locale::_Impl" = type { i32, "struct.std::locale::facet"**, i32, "struct.std::locale::facet"**, i8** }
- "struct.std::locale::facet" = type { i32 (...)**, i32 }
- "struct.std::string" = type { "struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Alloc_hider" }
-
-define void @_ZNKSt6locale4nameEv("struct.std::string"* %agg.result) {
-entry:
- %tmp105 = icmp eq i8* null, null ; <i1> [#uses=1]
- br i1 %tmp105, label %cond_true, label %cond_true222
-
-cond_true: ; preds = %entry
- invoke void @_ZNSs14_M_replace_auxEjjjc( )
- to label %cond_next1328 unwind label %cond_true1402
-
-cond_true222: ; preds = %cond_true222, %entry
- %tmp207 = call i32 @strcmp( ) ; <i32> [#uses=1]
- %tmp208 = icmp eq i32 %tmp207, 0 ; <i1> [#uses=2]
- %bothcond1480 = and i1 %tmp208, false ; <i1> [#uses=1]
- br i1 %bothcond1480, label %cond_true222, label %cond_next226.loopexit
-
-cond_next226.loopexit: ; preds = %cond_true222
- %phitmp = xor i1 %tmp208, true ; <i1> [#uses=1]
- br i1 %phitmp, label %cond_false280, label %cond_true235
-
-cond_true235: ; preds = %cond_next226.loopexit
- invoke void @_ZNSs6assignEPKcj( )
- to label %cond_next1328 unwind label %cond_true1402
-
-cond_false280: ; preds = %cond_next226.loopexit
- invoke void @_ZNSs7reserveEj( )
- to label %invcont282 unwind label %cond_true1402
-
-invcont282: ; preds = %cond_false280
- invoke void @_ZNSs6appendEPKcj( )
- to label %invcont317 unwind label %cond_true1402
-
-invcont317: ; preds = %invcont282
- ret void
-
-cond_next1328: ; preds = %cond_true235, %cond_true
- ret void
-
-cond_true1402: ; preds = %invcont282, %cond_false280, %cond_true235, %cond_true
- ret void
-}
-
-declare void @_ZNSs14_M_replace_auxEjjjc()
-
-declare i32 @strcmp()
-
-declare void @_ZNSs6assignEPKcj()
-
-declare void @_ZNSs7reserveEj()
-
-declare void @_ZNSs6appendEPKcj()
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2007-05-03-EHTypeInfo.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2007-05-03-EHTypeInfo.ll
deleted file mode 100644
index bb774b4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2007-05-03-EHTypeInfo.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -enable-eh
-
- %struct.exception = type { i8, i8, i32, i8*, i8*, i32, i8* }
- at program_error = external global %struct.exception ; <%struct.exception*> [#uses=1]
-
-define void @typeinfo() {
-entry:
- %eh_typeid = tail call i32 @llvm.eh.typeid.for.i32( i8* getelementptr (%struct.exception* @program_error, i32 0, i32 0) ) ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @llvm.eh.typeid.for.i32(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2007-05-15-InfiniteRecursion.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2007-05-15-InfiniteRecursion.ll
deleted file mode 100644
index b989819..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2007-05-15-InfiniteRecursion.ll
+++ /dev/null
@@ -1,90 +0,0 @@
-; RUN: llc < %s
-
- %struct.AVClass = type { i8*, i8* (i8*)*, %struct.AVOption* }
- %struct.AVCodec = type { i8*, i32, i32, i32, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32, i8*)*, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32*, i8*, i32)*, i32, %struct.AVCodec*, void (%struct.AVCodecContext*)*, %struct.AVRational*, i32* }
- %struct.AVCodecContext = type { %struct.AVClass*, i32, i32, i32, i32, i32, i8*, i32, %struct.AVRational, i32, i32, i32, i32, i32, void (%struct.AVCodecContext*, %struct.AVFrame*, i32*, i32, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, float, float, i32, i32, i32, i32, float, i32, i32, i32, %struct.AVCodec*, i8*, i32, i32, void (%struct.AVCodecContext*, i8*, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, [32 x i8], i32, i32, i32, i32, i32, i32, i32, float, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, void (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i8*, i8*, float, float, i32, %struct.RcOverride*, i32, i8*, i32, i32, i32, float, float, float, float, i32, float, float, float, float, float, i32, i32, i32, i32*, i32, i32, i32, i32, %struct.AVRational, %struct.AVFrame*, i32, i32, [4 x i64], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32*)*, i32, i32, i32, i32, i32, i32, i8*, i32, i32, i32, i32, i32, i32, i16*, i16*, i32, i32, i32, i32, %struct.AVPaletteControl*, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32 (%struct.AVCodecContext*, i8*)*, i8**, i32*, i32)*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64 }
- %struct.AVEvalExpr = type opaque
- %struct.AVFrame = type { [4 x i8*], [4 x i32], [4 x i8*], i32, i32, i64, i32, i32, i32, i32, i32, i8*, i32, i8*, [2 x [2 x i16]*], i32*, i8, i8*, [4 x i64], i32, i32, i32, i32, i32, %struct.AVPanScan*, i32, i32, i16*, [2 x i8*] }
- %struct.AVOption = type opaque
- %struct.AVPaletteControl = type { i32, [256 x i32] }
- %struct.AVPanScan = type { i32, i32, i32, [3 x [2 x i16]] }
- %struct.AVRational = type { i32, i32 }
- %struct.DSPContext = type { void (i16*, i8*, i32)*, void (i16*, i8*, i8*, i32)*, void (i16*, i8*, i32)*, void (i16*, i8*, i32)*, void (i16*, i8*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void (i8*, i8*, i32, i32, i32, i32, i32)*, void (i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)*, void (i16*)*, i32 (i8*, i32)*, i32 (i8*, i32)*, [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], [5 x i32 (i8*, i8*, i8*, i32, i32)*], i32 (i8*, i16*, i32)*, [4 x [4 x void (i8*, i8*, i32, i32)*]], [4 x [4 x void (i8*, i8*, i32, i32)*]], [4 x [4 x void (i8*, i8*, i32, i32)*]], [4 x [4 x void (i8*, i8*, i32, i32)*]], [2 x void (i8*, i8*, i8*, i32, i32)*], [11 x void (i8*, i8*, i32, i32, i32)*], [11 x void (i8*, i8*, i32, i32, i32)*], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], [8 x void (i8*, i8*, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [3 x void (i8*, i8*, i32, i32, i32, i32)*], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [4 x [16 x void (i8*, i8*, i32)*]], [10 x void (i8*, i32, i32, i32, i32)*], [10 x void (i8*, i8*, i32, i32, i32, i32, i32)*], [2 x [16 x void (i8*, i8*, i32)*]], [2 x [16 x void (i8*, i8*, i32)*]], void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i32, i32, i32, i32, i32, i32)*, void (i8*, i16*, i32)*, [2 x [4 x i32 (i8*, i8*, i8*, i32, i32)*]], void (i8*, i8*, i32)*, void (i8*, i8*, i8*, i32)*, void (i8*, i8*, i8*, i32, i32*, i32*)*, void (i32*, i32*, i32)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32, i8*)*, void (i8*, i32, i32, i32)*, void (i8*, i32, i32, i32)*, void ([4 x [4 x i16]]*, i8*, [40 x i8]*, [40 x [2 x i16]]*, i32, i32, i32, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32, i32)*, void (i8*, i32)*, void (float*, float*, i32)*, void (float*, float*, i32)*, void (float*, float*, float*, i32)*, void (float*, float*, float*, float*, i32, i32, i32)*, void (i16*, float*, i32)*, void (i16*)*, void (i16*)*, void (i16*)*, void (i8*, i32, i16*)*, void (i8*, i32, i16*)*, [64 x i8], i32, i32 (i16*, i16*, i16*, i32)*, void (i16*, i16*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void (i8*, i16*, i32)*, void ([4 x i16]*)*, void (i32*, i32*, i32*, i32*, i32*, i32*, i32)*, void (i32*, i32)*, void (i8*, i32, i8**, i32, i32, i32, i32, i32, %struct.slice_buffer*, i32, i8*)*, void (i8*, i32, i32)*, [4 x void (i8*, i32, i8*, i32, i32, i32)*], void (i16*)*, void (i16*, i32)*, void (i16*, i32)*, void (i16*, i32)*, void (i8*, i32)*, void (i8*, i32)*, [16 x void (i8*, i8*, i32, i32)*] }
- %struct.FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct.FILE*, i32, i32, i32, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i32, i32, [40 x i8] }
- %struct.GetBitContext = type { i8*, i8*, i32*, i32, i32, i32, i32 }
- %struct.MJpegContext = type opaque
- %struct.MotionEstContext = type { %struct.AVCodecContext*, i32, [4 x [2 x i32]], [4 x [2 x i32]], i8*, i8*, [2 x i8*], i8*, i32, i32*, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [4 x [4 x i8*]], [4 x [4 x i8*]], i32, i32, i32, i32, i32, [4 x void (i8*, i8*, i32, i32)*]*, [4 x void (i8*, i8*, i32, i32)*]*, [16 x void (i8*, i8*, i32)*]*, [16 x void (i8*, i8*, i32)*]*, [4097 x i8]*, i8*, i32 (%struct.MpegEncContext*, i32*, i32*, i32, i32, i32, i32, i32)* }
- %struct.MpegEncContext = type { %struct.AVCodecContext*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.PutBitContext, i32, i32, i32, i32, i32, i32, i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.Picture*, %struct.Picture**, %struct.Picture**, i32, i32, [8 x %struct.MpegEncContext*], %struct.Picture, %struct.Picture, %struct.Picture, %struct.Picture, %struct.Picture*, %struct.Picture*, %struct.Picture*, [3 x i8*], [3 x i32], i16*, [3 x i16*], [20 x i16], i32, i32, i8*, i8*, i8*, i8*, i8*, [16 x i16]*, [3 x [16 x i16]*], i32, i8*, i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32*, i32, i32, i32, i32, i32, i32, i32, [5 x i32], i32, i32, i32, i32, %struct.DSPContext, i32, i32, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x [2 x [2 x i16]*]], [2 x [2 x [2 x [2 x i16]*]]], [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x i16]*, [2 x [2 x [2 x i16]*]], [2 x [2 x [2 x [2 x i16]*]]], [2 x i8*], [2 x [2 x i8*]], i32, i32, i32, [2 x [4 x [2 x i32]]], [2 x [2 x i32]], [2 x [2 x [2 x i32]]], i8*, [2 x [64 x i16]], %struct.MotionEstContext, i32, i32, i32, i32, i32, i32, i16*, [6 x i32], [6 x i32], [3 x i8*], i32*, [64 x i16], [64 x i16], [64 x i16], [64 x i16], i32, i32, i32, i32, i32, i8*, i8*, i8*, i8*, i8*, i8*, [8 x i32], [64 x i32]*, [64 x i32]*, [2 x [64 x i16]]*, [2 x [64 x i16]]*, [12 x i32], %struct.ScanTable, %struct.ScanTable, %struct.ScanTable, %struct.ScanTable, [64 x i32]*, [2 x i32], [64 x i16]*, i8*, i64, i64, i32, i32, %struct.RateControlContext, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i32, i32, %struct.GetBitContext, i32, i32, i32, %struct.ParseContext, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i16, i16, i16, i16, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x [2 x i32]], [2 x [2 x i32]], [2 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.PutBitContext, %struct.PutBitContext, i32, i32, i32, i32, i32, i32, i8*, i32, i32, i32, i32, i32, [3 x i32], %struct.MJpegContext*, [3 x i32], [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x [65 x [65 x [2 x i32]]]]*, i32, i32, %struct.GetBitContext, i32, i32, i32, i8*, i32, [2 x [2 x i32]], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], i32, i32, i32, i32, i8*, i32, [12 x i16*], [64 x i16]*, [8 x [64 x i16]]*, i32 (%struct.MpegEncContext*, [64 x i16]*)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, void (%struct.MpegEncContext*, i16*, i32, i32)*, i32 (%struct.MpegEncContext*, i16*, i32, i32, i32*)*, i32 (%struct.MpegEncContext*, i16*, i32, i32, i32*)*, void (%struct.MpegEncContext*, i16*)* }
- %struct.ParseContext = type { i8*, i32, i32, i32, i32, i32, i32, i32 }
- %struct.Picture = type { [4 x i8*], [4 x i32], [4 x i8*], i32, i32, i64, i32, i32, i32, i32, i32, i8*, i32, i8*, [2 x [2 x i16]*], i32*, i8, i8*, [4 x i64], i32, i32, i32, i32, i32, %struct.AVPanScan*, i32, i32, i16*, [2 x i8*], [3 x i8*], [2 x [2 x i16]*], i32*, [2 x i32], i32, i32, i32, i32, [2 x [16 x i32]], [2 x i32], i32, i32, i16*, i16*, i8*, i32*, i32 }
- %struct.Predictor = type { double, double, double }
- %struct.PutBitContext = type { i32, i32, i8*, i8*, i8* }
- %struct.RateControlContext = type { %struct.FILE*, i32, %struct.RateControlEntry*, double, [5 x %struct.Predictor], double, double, double, double, double, [5 x double], i32, i32, [5 x i64], [5 x i64], [5 x i64], [5 x i64], [5 x i32], i32, i8*, float, i32, %struct.AVEvalExpr* }
- %struct.RateControlEntry = type { i32, float, i32, i32, i32, i32, i32, i64, i32, float, i32, i32, i32, i32, i32, i32 }
- %struct.RcOverride = type { i32, i32, i32, float }
- %struct.ScanTable = type { i8*, [64 x i8], [64 x i8] }
- %struct._IO_marker = type { %struct._IO_marker*, %struct.FILE*, i32 }
- %struct.slice_buffer = type opaque
-
-define float @ff_rate_estimate_qscale(%struct.MpegEncContext* %s, i32 %dry_run) {
-entry:
- br i1 false, label %cond_false163, label %cond_true135
-
-cond_true135: ; preds = %entry
- ret float 0.000000e+00
-
-cond_false163: ; preds = %entry
- br i1 false, label %cond_true203, label %cond_next211
-
-cond_true203: ; preds = %cond_false163
- ret float 0.000000e+00
-
-cond_next211: ; preds = %cond_false163
- br i1 false, label %cond_false243, label %cond_true220
-
-cond_true220: ; preds = %cond_next211
- br i1 false, label %cond_next237, label %cond_true225
-
-cond_true225: ; preds = %cond_true220
- ret float 0.000000e+00
-
-cond_next237: ; preds = %cond_true220
- br i1 false, label %cond_false785, label %cond_true735
-
-cond_false243: ; preds = %cond_next211
- ret float 0.000000e+00
-
-cond_true735: ; preds = %cond_next237
- ret float 0.000000e+00
-
-cond_false785: ; preds = %cond_next237
- br i1 false, label %cond_true356.i.preheader, label %bb359.i
-
-cond_true356.i.preheader: ; preds = %cond_false785
- %tmp116117.i = zext i8 0 to i32 ; <i32> [#uses=1]
- br i1 false, label %cond_false.i, label %cond_next159.i
-
-cond_false.i: ; preds = %cond_true356.i.preheader
- ret float 0.000000e+00
-
-cond_next159.i: ; preds = %cond_true356.i.preheader
- %tmp178.i = add i32 %tmp116117.i, -128 ; <i32> [#uses=2]
- %tmp181.i = mul i32 %tmp178.i, %tmp178.i ; <i32> [#uses=1]
- %tmp181182.i = sitofp i32 %tmp181.i to float ; <float> [#uses=1]
- %tmp199200.pn.in.i = fmul float %tmp181182.i, 0.000000e+00 ; <float> [#uses=1]
- %tmp199200.pn.i = fpext float %tmp199200.pn.in.i to double ; <double> [#uses=1]
- %tmp201.pn.i = fsub double 1.000000e+00, %tmp199200.pn.i ; <double> [#uses=1]
- %factor.2.in.i = fmul double 0.000000e+00, %tmp201.pn.i ; <double> [#uses=1]
- %factor.2.i = fptrunc double %factor.2.in.i to float ; <float> [#uses=1]
- br i1 false, label %cond_next312.i, label %cond_false222.i
-
-cond_false222.i: ; preds = %cond_next159.i
- ret float 0.000000e+00
-
-cond_next312.i: ; preds = %cond_next159.i
- %tmp313314.i = fpext float %factor.2.i to double ; <double> [#uses=0]
- ret float 0.000000e+00
-
-bb359.i: ; preds = %cond_false785
- ret float 0.000000e+00
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2008-01-25-dag-combine-mul.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2008-01-25-dag-combine-mul.ll
deleted file mode 100644
index 314bb05..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2008-01-25-dag-combine-mul.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s
-; rdar://5707064
-
-define i32 @f(i16* %pc) {
-entry:
- %acc = alloca i64, align 8 ; <i64*> [#uses=4]
- %tmp97 = load i64* %acc, align 8 ; <i64> [#uses=1]
- %tmp98 = and i64 %tmp97, 4294967295 ; <i64> [#uses=1]
- %tmp99 = load i64* null, align 8 ; <i64> [#uses=1]
- %tmp100 = and i64 %tmp99, 4294967295 ; <i64> [#uses=1]
- %tmp101 = mul i64 %tmp98, %tmp100 ; <i64> [#uses=1]
- %tmp103 = lshr i64 %tmp101, 0 ; <i64> [#uses=1]
- %tmp104 = load i64* %acc, align 8 ; <i64> [#uses=1]
- %.cast105 = zext i32 32 to i64 ; <i64> [#uses=1]
- %tmp106 = lshr i64 %tmp104, %.cast105 ; <i64> [#uses=1]
- %tmp107 = load i64* null, align 8 ; <i64> [#uses=1]
- %tmp108 = and i64 %tmp107, 4294967295 ; <i64> [#uses=1]
- %tmp109 = mul i64 %tmp106, %tmp108 ; <i64> [#uses=1]
- %tmp112 = add i64 %tmp109, 0 ; <i64> [#uses=1]
- %tmp116 = add i64 %tmp112, 0 ; <i64> [#uses=1]
- %tmp117 = add i64 %tmp103, %tmp116 ; <i64> [#uses=1]
- %tmp118 = load i64* %acc, align 8 ; <i64> [#uses=1]
- %tmp120 = lshr i64 %tmp118, 0 ; <i64> [#uses=1]
- %tmp121 = load i64* null, align 8 ; <i64> [#uses=1]
- %tmp123 = lshr i64 %tmp121, 0 ; <i64> [#uses=1]
- %tmp124 = mul i64 %tmp120, %tmp123 ; <i64> [#uses=1]
- %tmp126 = shl i64 %tmp124, 0 ; <i64> [#uses=1]
- %tmp127 = add i64 %tmp117, %tmp126 ; <i64> [#uses=1]
- store i64 %tmp127, i64* %acc, align 8
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2008-01-30-LoadCrash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2008-01-30-LoadCrash.ll
deleted file mode 100644
index 70c3aaa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2008-01-30-LoadCrash.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s
-
- at letters.3100 = external constant [63 x i8] ; <[63 x i8]*> [#uses=2]
-
-define i32 @mkstemps(i8* %pattern, i32 %suffix_len, i64 %tmp42.rle) nounwind {
-bb20:
- br label %bb41
-
-bb41: ; preds = %bb20
- %tmp8182 = trunc i64 %tmp42.rle to i32 ; <i32> [#uses=1]
- %tmp83 = getelementptr [63 x i8]* @letters.3100, i32 0, i32 %tmp8182 ; <i8*> [#uses=1]
- %tmp84 = load i8* %tmp83, align 1 ; <i8> [#uses=1]
- store i8 %tmp84, i8* null, align 1
- %tmp90 = urem i64 %tmp42.rle, 62 ; <i64> [#uses=1]
- %tmp9091 = trunc i64 %tmp90 to i32 ; <i32> [#uses=1]
- %tmp92 = getelementptr [63 x i8]* @letters.3100, i32 0, i32 %tmp9091 ; <i8*> [#uses=1]
- store i8* %tmp92, i8** null, align 1
- ret i32 -1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-04-Ctlz.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-04-Ctlz.ll
deleted file mode 100644
index 288bfd2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-04-Ctlz.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s
-
- at .str = internal constant [14 x i8] c"%lld %d %d %d\00"
-
-define i32 @main(i64 %arg) nounwind {
-entry:
- %tmp37 = tail call i64 @llvm.ctlz.i64( i64 %arg ) ; <i64> [#uses=1]
- %tmp47 = tail call i64 @llvm.cttz.i64( i64 %arg ) ; <i64> [#uses=1]
- %tmp57 = tail call i64 @llvm.ctpop.i64( i64 %arg ) ; <i64> [#uses=1]
- %tmp38 = trunc i64 %tmp37 to i32 ; <i32>:0 [#uses=1]
- %tmp48 = trunc i64 %tmp47 to i32 ; <i32>:0 [#uses=1]
- %tmp58 = trunc i64 %tmp57 to i32 ; <i32>:0 [#uses=1]
- %tmp40 = tail call i32 (i8*, ...)* @printf( i8* noalias getelementptr ([14 x i8]* @.str, i32 0, i32 0), i64 %arg, i32 %tmp38, i32 %tmp48, i32 %tmp58 ) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i32 @printf(i8* noalias , ...) nounwind
-
-declare i64 @llvm.ctlz.i64(i64) nounwind readnone
-declare i64 @llvm.cttz.i64(i64) nounwind readnone
-declare i64 @llvm.ctpop.i64(i64) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-04-ExtractSubvector.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-04-ExtractSubvector.ll
deleted file mode 100644
index 8bf82df..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-04-ExtractSubvector.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s
-
-define i32 @main() nounwind {
-entry:
- br label %bb15
-
-bb15: ; preds = %bb15, %entry
- %tmp21 = fadd <8 x double> zeroinitializer, zeroinitializer ; <<8 x double>> [#uses=1]
- br i1 false, label %bb30, label %bb15
-
-bb30: ; preds = %bb15
- store <8 x double> %tmp21, <8 x double>* null, align 64
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-20-MatchingMem.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-20-MatchingMem.ll
deleted file mode 100644
index da1aeb5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-20-MatchingMem.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s
-; PR1133
-define void @test(i32* %X) nounwind {
-entry:
- %tmp1 = getelementptr i32* %X, i32 10 ; <i32*> [#uses=2]
- tail call void asm sideeffect " $0 $1 ", "=*im,*im,~{memory}"( i32* %tmp1, i32* %tmp1 ) nounwind
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-25-NegateZero.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-25-NegateZero.ll
deleted file mode 100644
index 97db667..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-25-NegateZero.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s
-; rdar://5763967
-
-define void @test() {
-entry:
- %tmp98 = load float* null, align 4 ; <float> [#uses=1]
- %tmp106 = load float* null, align 4 ; <float> [#uses=1]
- %tmp113 = fadd float %tmp98, %tmp106 ; <float> [#uses=1]
- %tmp119 = fsub float %tmp113, 0.000000e+00 ; <float> [#uses=1]
- call void (i32, ...)* @foo( i32 0, float 0.000000e+00, float %tmp119 ) nounwind
- ret void
-}
-
-declare void @foo(i32, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-26-NegatableCrash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-26-NegatableCrash.ll
deleted file mode 100644
index 10b3d44..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2008-02-26-NegatableCrash.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s
-; PR2096
- %struct.AVClass = type { i8*, i8* (i8*)*, %struct.AVOption* }
- %struct.AVCodec = type { i8*, i32, i32, i32, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32, i8*)*, i32 (%struct.AVCodecContext*)*, i32 (%struct.AVCodecContext*, i8*, i32*, i8*, i32)*, i32, %struct.AVCodec*, void (%struct.AVCodecContext*)*, %struct.AVRational*, i32* }
- %struct.AVCodecContext = type { %struct.AVClass*, i32, i32, i32, i32, i32, i8*, i32, %struct.AVRational, i32, i32, i32, i32, i32, void (%struct.AVCodecContext*, %struct.AVFrame*, i32*, i32, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, float, float, i32, i32, i32, i32, float, i32, i32, i32, %struct.AVCodec*, i8*, i32, i32, void (%struct.AVCodecContext*, i8*, i32, i32)*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, [32 x i8], i32, i32, i32, i32, i32, i32, i32, float, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, void (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i8*, i8*, float, float, i32, %struct.RcOverride*, i32, i8*, i32, i32, i32, float, float, float, float, i32, float, float, float, float, float, i32, i32, i32, i32*, i32, i32, i32, i32, %struct.AVRational, %struct.AVFrame*, i32, i32, [4 x i64], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32*)*, i32, i32, i32, i32, i32, i32, i8*, i32, i32, i32, i32, i32, i32, i16*, i16*, i32, i32, i32, i32, %struct.AVPaletteControl*, i32, i32 (%struct.AVCodecContext*, %struct.AVFrame*)*, i32, i32, i32, i32, i32, i32, i32, i32 (%struct.AVCodecContext*, i32 (%struct.AVCodecContext*, i8*)*, i8**, i32*, i32)*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i32, float }
- %struct.AVFrame = type { [4 x i8*], [4 x i32], [4 x i8*], i32, i32, i64, i32, i32, i32, i32, i32, i8*, i32, i8*, [2 x [2 x i16]*], i32*, i8, i8*, [4 x i64], i32, i32, i32, i32, i32, %struct.AVPanScan*, i32, i32, i16*, [2 x i8*] }
- %struct.AVOption = type opaque
- %struct.AVPaletteControl = type { i32, [256 x i32] }
- %struct.AVPanScan = type { i32, i32, i32, [3 x [2 x i16]] }
- %struct.AVRational = type { i32, i32 }
- %struct.RcOverride = type { i32, i32, i32, float }
-
-define i32 @sonic_encode_frame(%struct.AVCodecContext* %avctx, i8* %buf, i32 %buf_size, i8* %data) {
-entry:
- switch i32 0, label %bb429 [
- i32 0, label %bb244.preheader
- i32 1, label %bb279.preheader
- ]
-
-bb279.preheader: ; preds = %entry
- ret i32 0
-
-bb244.preheader: ; preds = %entry
- ret i32 0
-
-bb429: ; preds = %entry
- br i1 false, label %bb.nph1770, label %bb627
-
-bb.nph1770: ; preds = %bb429
- br i1 false, label %bb471, label %bb505
-
-bb471: ; preds = %bb471, %bb.nph1770
- %tmp487 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- br i1 false, label %bb505, label %bb471
-
-bb505: ; preds = %bb471, %bb.nph1770
- %xy.0.lcssa = phi double [ 0.000000e+00, %bb.nph1770 ], [ %tmp487, %bb471 ] ; <double> [#uses=1]
- %tmp507 = fsub double -0.000000e+00, %xy.0.lcssa ; <double> [#uses=1]
- %tmp509 = fdiv double %tmp507, 0.000000e+00 ; <double> [#uses=1]
- %tmp510 = fmul double %tmp509, 1.024000e+03 ; <double> [#uses=1]
- %tmp516 = fdiv double %tmp510, 0.000000e+00 ; <double> [#uses=1]
- %tmp517 = fadd double %tmp516, 5.000000e-01 ; <double> [#uses=1]
- %tmp518 = tail call double @floor( double %tmp517 ) nounwind readnone ; <double> [#uses=0]
- ret i32 0
-
-bb627: ; preds = %bb429
- ret i32 0
-}
-
-declare double @floor(double) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2009-03-17-LSR-APInt.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2009-03-17-LSR-APInt.ll
deleted file mode 100644
index 6281ada..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2009-03-17-LSR-APInt.ll
+++ /dev/null
@@ -1,92 +0,0 @@
-; RUN: llc < %s
-; PR3806
-
- %struct..0__pthread_mutex_s = type { i32, i32, i32, i32, i32, i32, %struct.__pthread_list_t }
- %struct.Alignment = type { i32 }
- %struct.QDesignerFormWindowInterface = type { %struct.QWidget }
- %struct.QFont = type { %struct.QFontPrivate*, i32 }
- %struct.QFontPrivate = type opaque
- %"struct.QHash<QString,QList<QAbstractExtensionFactory*> >" = type { %"struct.QHash<QString,QList<QAbstractExtensionFactory*> >::._120" }
- %"struct.QHash<QString,QList<QAbstractExtensionFactory*> >::._120" = type { %struct.QHashData* }
- %struct.QHashData = type { %"struct.QHashData::Node"*, %"struct.QHashData::Node"**, %struct.Alignment, i32, i32, i16, i16, i32, i8 }
- %"struct.QHashData::Node" = type { %"struct.QHashData::Node"*, i32 }
- %"struct.QList<QAbstractExtensionFactory*>" = type { %"struct.QList<QAbstractExtensionFactory*>::._101" }
- %"struct.QList<QAbstractExtensionFactory*>::._101" = type { %struct.QListData }
- %struct.QListData = type { %"struct.QListData::Data"* }
- %"struct.QListData::Data" = type { %struct.Alignment, i32, i32, i32, i8, [1 x i8*] }
- %struct.QObject = type { i32 (...)**, %struct.QObjectData* }
- %struct.QObjectData = type { i32 (...)**, %struct.QObject*, %struct.QObject*, %"struct.QList<QAbstractExtensionFactory*>", i32, i32 }
- %struct.QPaintDevice.base = type { i32 (...)**, i16 }
- %"struct.QPair<int,int>" = type { i32, i32 }
- %struct.QPalette = type { %struct.QPalettePrivate*, i32 }
- %struct.QPalettePrivate = type opaque
- %struct.QRect = type { i32, i32, i32, i32 }
- %struct.QWidget = type { %struct.QObject, %struct.QPaintDevice.base, %struct.QWidgetData* }
- %struct.QWidgetData = type { i64, i32, %struct.Alignment, i8, i8, i16, %struct.QRect, %struct.QPalette, %struct.QFont, %struct.QRect }
- %struct.__pthread_list_t = type { %struct.__pthread_list_t*, %struct.__pthread_list_t* }
- %struct.pthread_attr_t = type { i64, [48 x i8] }
- %struct.pthread_mutex_t = type { %struct..0__pthread_mutex_s }
- %"struct.qdesigner_internal::Grid" = type { i32, i32, %struct.QWidget**, i8*, i8* }
- %"struct.qdesigner_internal::GridLayout" = type { %"struct.qdesigner_internal::Layout", %"struct.QPair<int,int>", %"struct.qdesigner_internal::Grid"* }
- %"struct.qdesigner_internal::Layout" = type { %struct.QObject, %"struct.QList<QAbstractExtensionFactory*>", %struct.QWidget*, %"struct.QHash<QString,QList<QAbstractExtensionFactory*> >", %struct.QWidget*, %struct.QDesignerFormWindowInterface*, i8, %"struct.QPair<int,int>", %struct.QRect, i8 }
-
- at _ZL20__gthrw_pthread_oncePiPFvvE = alias weak i32 (i32*, void ()*)* @pthread_once ; <i32 (i32*, void ()*)*> [#uses=0]
- at _ZL27__gthrw_pthread_getspecificj = alias weak i8* (i32)* @pthread_getspecific ; <i8* (i32)*> [#uses=0]
- at _ZL27__gthrw_pthread_setspecificjPKv = alias weak i32 (i32, i8*)* @pthread_setspecific ; <i32 (i32, i8*)*> [#uses=0]
- at _ZL22__gthrw_pthread_createPmPK14pthread_attr_tPFPvS3_ES3_ = alias weak i32 (i64*, %struct.pthread_attr_t*, i8* (i8*)*, i8*)* @pthread_create ; <i32 (i64*, %struct.pthread_attr_t*, i8* (i8*)*, i8*)*> [#uses=0]
- at _ZL22__gthrw_pthread_cancelm = alias weak i32 (i64)* @pthread_cancel ; <i32 (i64)*> [#uses=0]
- at _ZL26__gthrw_pthread_mutex_lockP15pthread_mutex_t = alias weak i32 (%struct.pthread_mutex_t*)* @pthread_mutex_lock ; <i32 (%struct.pthread_mutex_t*)*> [#uses=0]
- at _ZL29__gthrw_pthread_mutex_trylockP15pthread_mutex_t = alias weak i32 (%struct.pthread_mutex_t*)* @pthread_mutex_trylock ; <i32 (%struct.pthread_mutex_t*)*> [#uses=0]
- at _ZL28__gthrw_pthread_mutex_unlockP15pthread_mutex_t = alias weak i32 (%struct.pthread_mutex_t*)* @pthread_mutex_unlock ; <i32 (%struct.pthread_mutex_t*)*> [#uses=0]
- at _ZL26__gthrw_pthread_mutex_initP15pthread_mutex_tPK19pthread_mutexattr_t = alias weak i32 (%struct.pthread_mutex_t*, %struct.Alignment*)* @pthread_mutex_init ; <i32 (%struct.pthread_mutex_t*, %struct.Alignment*)*> [#uses=0]
- at _ZL26__gthrw_pthread_key_createPjPFvPvE = alias weak i32 (i32*, void (i8*)*)* @pthread_key_create ; <i32 (i32*, void (i8*)*)*> [#uses=0]
- at _ZL26__gthrw_pthread_key_deletej = alias weak i32 (i32)* @pthread_key_delete ; <i32 (i32)*> [#uses=0]
- at _ZL30__gthrw_pthread_mutexattr_initP19pthread_mutexattr_t = alias weak i32 (%struct.Alignment*)* @pthread_mutexattr_init ; <i32 (%struct.Alignment*)*> [#uses=0]
- at _ZL33__gthrw_pthread_mutexattr_settypeP19pthread_mutexattr_ti = alias weak i32 (%struct.Alignment*, i32)* @pthread_mutexattr_settype ; <i32 (%struct.Alignment*, i32)*> [#uses=0]
- at _ZL33__gthrw_pthread_mutexattr_destroyP19pthread_mutexattr_t = alias weak i32 (%struct.Alignment*)* @pthread_mutexattr_destroy ; <i32 (%struct.Alignment*)*> [#uses=0]
-
-define void @_ZN18qdesigner_internal10GridLayout9buildGridEv(%"struct.qdesigner_internal::GridLayout"* %this) nounwind {
-entry:
- br label %bb44
-
-bb44: ; preds = %bb47, %entry
- %indvar = phi i128 [ %indvar.next144, %bb47 ], [ 0, %entry ] ; <i128> [#uses=2]
- br i1 false, label %bb46, label %bb47
-
-bb46: ; preds = %bb44
- %tmp = shl i128 %indvar, 64 ; <i128> [#uses=1]
- %tmp96 = and i128 %tmp, 79228162495817593519834398720 ; <i128> [#uses=0]
- br label %bb47
-
-bb47: ; preds = %bb46, %bb44
- %indvar.next144 = add i128 %indvar, 1 ; <i128> [#uses=1]
- br label %bb44
-}
-
-declare i32 @pthread_once(i32*, void ()*)
-
-declare i8* @pthread_getspecific(i32)
-
-declare i32 @pthread_setspecific(i32, i8*)
-
-declare i32 @pthread_create(i64*, %struct.pthread_attr_t*, i8* (i8*)*, i8*)
-
-declare i32 @pthread_cancel(i64)
-
-declare i32 @pthread_mutex_lock(%struct.pthread_mutex_t*)
-
-declare i32 @pthread_mutex_trylock(%struct.pthread_mutex_t*)
-
-declare i32 @pthread_mutex_unlock(%struct.pthread_mutex_t*)
-
-declare i32 @pthread_mutex_init(%struct.pthread_mutex_t*, %struct.Alignment*)
-
-declare i32 @pthread_key_create(i32*, void (i8*)*)
-
-declare i32 @pthread_key_delete(i32)
-
-declare i32 @pthread_mutexattr_init(%struct.Alignment*)
-
-declare i32 @pthread_mutexattr_settype(%struct.Alignment*, i32)
-
-declare i32 @pthread_mutexattr_destroy(%struct.Alignment*)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2009-03-29-SoftFloatVectorExtract.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2009-03-29-SoftFloatVectorExtract.ll
deleted file mode 100644
index 45b561a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2009-03-29-SoftFloatVectorExtract.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -soft-float
-; PR3899
-
- at m = external global <2 x double>
-
-define double @vector_ex() nounwind {
- %v = load <2 x double>* @m
- %x = extractelement <2 x double> %v, i32 1
- ret double %x
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2009-04-10-SinkCrash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2009-04-10-SinkCrash.ll
deleted file mode 100644
index 125f875..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2009-04-10-SinkCrash.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s
-
-define void @QRiterate(i32 %p.1, double %tmp.212) nounwind {
-entry:
- br i1 false, label %shortcirc_next.1, label %exit.1.critedge
-
-shortcirc_next.1: ; preds = %shortcirc_next.1, %entry
- %tmp.213 = fcmp une double %tmp.212, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %tmp.213, label %shortcirc_next.1, label %exit.1
-
-exit.1.critedge: ; preds = %entry
- ret void
-
-exit.1: ; preds = %shortcirc_next.1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll
deleted file mode 100644
index b62f811..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s
-; rdar://6836460
-; rdar://7516906
-; PR5963
-
-define i32 @test(i128* %P) nounwind {
-entry:
- %tmp48 = load i128* %P
- %and49 = and i128 %tmp48, 18446744073709551616 ; <i128> [#uses=1]
- %tobool = icmp ne i128 %and49, 0 ; <i1> [#uses=1]
- br i1 %tobool, label %if.then50, label %if.end61
-
-if.then50: ; preds = %if.then20
- ret i32 1241
-
-if.end61: ; preds = %if.then50, %if.then20, %entry
- ret i32 123
-}
-
-define i32 @test2(i320* %P) nounwind {
-entry:
- %tmp48 = load i320* %P
- %and49 = and i320 %tmp48, 25108406941546723055343157692830665664409421777856138051584
- %tobool = icmp ne i320 %and49, 0 ; <i1> [#uses=1]
- br i1 %tobool, label %if.then50, label %if.end61
-
-if.then50: ; preds = %if.then20
- ret i32 1241
-
-if.end61: ; preds = %if.then50, %if.then20, %entry
- ret i32 123
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll b/libclamav/c++/llvm/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll
deleted file mode 100644
index 112cac4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/2009-06-03-UnreachableSplitPad.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s
-; PR4317
-
-declare i32 @b()
-
-define void @a() {
-entry:
- ret void
-
-dummy:
- invoke i32 @b() to label %reg unwind label %reg
-
-reg:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/APIntLoadStore.ll b/libclamav/c++/llvm/test/CodeGen/Generic/APIntLoadStore.ll
deleted file mode 100644
index 7c71a33..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/APIntLoadStore.ll
+++ /dev/null
@@ -1,2049 +0,0 @@
-; RUN: llc < %s > %t
- at i1_l = external global i1 ; <i1*> [#uses=1]
- at i1_s = external global i1 ; <i1*> [#uses=1]
- at i2_l = external global i2 ; <i2*> [#uses=1]
- at i2_s = external global i2 ; <i2*> [#uses=1]
- at i3_l = external global i3 ; <i3*> [#uses=1]
- at i3_s = external global i3 ; <i3*> [#uses=1]
- at i4_l = external global i4 ; <i4*> [#uses=1]
- at i4_s = external global i4 ; <i4*> [#uses=1]
- at i5_l = external global i5 ; <i5*> [#uses=1]
- at i5_s = external global i5 ; <i5*> [#uses=1]
- at i6_l = external global i6 ; <i6*> [#uses=1]
- at i6_s = external global i6 ; <i6*> [#uses=1]
- at i7_l = external global i7 ; <i7*> [#uses=1]
- at i7_s = external global i7 ; <i7*> [#uses=1]
- at i8_l = external global i8 ; <i8*> [#uses=1]
- at i8_s = external global i8 ; <i8*> [#uses=1]
- at i9_l = external global i9 ; <i9*> [#uses=1]
- at i9_s = external global i9 ; <i9*> [#uses=1]
- at i10_l = external global i10 ; <i10*> [#uses=1]
- at i10_s = external global i10 ; <i10*> [#uses=1]
- at i11_l = external global i11 ; <i11*> [#uses=1]
- at i11_s = external global i11 ; <i11*> [#uses=1]
- at i12_l = external global i12 ; <i12*> [#uses=1]
- at i12_s = external global i12 ; <i12*> [#uses=1]
- at i13_l = external global i13 ; <i13*> [#uses=1]
- at i13_s = external global i13 ; <i13*> [#uses=1]
- at i14_l = external global i14 ; <i14*> [#uses=1]
- at i14_s = external global i14 ; <i14*> [#uses=1]
- at i15_l = external global i15 ; <i15*> [#uses=1]
- at i15_s = external global i15 ; <i15*> [#uses=1]
- at i16_l = external global i16 ; <i16*> [#uses=1]
- at i16_s = external global i16 ; <i16*> [#uses=1]
- at i17_l = external global i17 ; <i17*> [#uses=1]
- at i17_s = external global i17 ; <i17*> [#uses=1]
- at i18_l = external global i18 ; <i18*> [#uses=1]
- at i18_s = external global i18 ; <i18*> [#uses=1]
- at i19_l = external global i19 ; <i19*> [#uses=1]
- at i19_s = external global i19 ; <i19*> [#uses=1]
- at i20_l = external global i20 ; <i20*> [#uses=1]
- at i20_s = external global i20 ; <i20*> [#uses=1]
- at i21_l = external global i21 ; <i21*> [#uses=1]
- at i21_s = external global i21 ; <i21*> [#uses=1]
- at i22_l = external global i22 ; <i22*> [#uses=1]
- at i22_s = external global i22 ; <i22*> [#uses=1]
- at i23_l = external global i23 ; <i23*> [#uses=1]
- at i23_s = external global i23 ; <i23*> [#uses=1]
- at i24_l = external global i24 ; <i24*> [#uses=1]
- at i24_s = external global i24 ; <i24*> [#uses=1]
- at i25_l = external global i25 ; <i25*> [#uses=1]
- at i25_s = external global i25 ; <i25*> [#uses=1]
- at i26_l = external global i26 ; <i26*> [#uses=1]
- at i26_s = external global i26 ; <i26*> [#uses=1]
- at i27_l = external global i27 ; <i27*> [#uses=1]
- at i27_s = external global i27 ; <i27*> [#uses=1]
- at i28_l = external global i28 ; <i28*> [#uses=1]
- at i28_s = external global i28 ; <i28*> [#uses=1]
- at i29_l = external global i29 ; <i29*> [#uses=1]
- at i29_s = external global i29 ; <i29*> [#uses=1]
- at i30_l = external global i30 ; <i30*> [#uses=1]
- at i30_s = external global i30 ; <i30*> [#uses=1]
- at i31_l = external global i31 ; <i31*> [#uses=1]
- at i31_s = external global i31 ; <i31*> [#uses=1]
- at i32_l = external global i32 ; <i32*> [#uses=1]
- at i32_s = external global i32 ; <i32*> [#uses=1]
- at i33_l = external global i33 ; <i33*> [#uses=1]
- at i33_s = external global i33 ; <i33*> [#uses=1]
- at i34_l = external global i34 ; <i34*> [#uses=1]
- at i34_s = external global i34 ; <i34*> [#uses=1]
- at i35_l = external global i35 ; <i35*> [#uses=1]
- at i35_s = external global i35 ; <i35*> [#uses=1]
- at i36_l = external global i36 ; <i36*> [#uses=1]
- at i36_s = external global i36 ; <i36*> [#uses=1]
- at i37_l = external global i37 ; <i37*> [#uses=1]
- at i37_s = external global i37 ; <i37*> [#uses=1]
- at i38_l = external global i38 ; <i38*> [#uses=1]
- at i38_s = external global i38 ; <i38*> [#uses=1]
- at i39_l = external global i39 ; <i39*> [#uses=1]
- at i39_s = external global i39 ; <i39*> [#uses=1]
- at i40_l = external global i40 ; <i40*> [#uses=1]
- at i40_s = external global i40 ; <i40*> [#uses=1]
- at i41_l = external global i41 ; <i41*> [#uses=1]
- at i41_s = external global i41 ; <i41*> [#uses=1]
- at i42_l = external global i42 ; <i42*> [#uses=1]
- at i42_s = external global i42 ; <i42*> [#uses=1]
- at i43_l = external global i43 ; <i43*> [#uses=1]
- at i43_s = external global i43 ; <i43*> [#uses=1]
- at i44_l = external global i44 ; <i44*> [#uses=1]
- at i44_s = external global i44 ; <i44*> [#uses=1]
- at i45_l = external global i45 ; <i45*> [#uses=1]
- at i45_s = external global i45 ; <i45*> [#uses=1]
- at i46_l = external global i46 ; <i46*> [#uses=1]
- at i46_s = external global i46 ; <i46*> [#uses=1]
- at i47_l = external global i47 ; <i47*> [#uses=1]
- at i47_s = external global i47 ; <i47*> [#uses=1]
- at i48_l = external global i48 ; <i48*> [#uses=1]
- at i48_s = external global i48 ; <i48*> [#uses=1]
- at i49_l = external global i49 ; <i49*> [#uses=1]
- at i49_s = external global i49 ; <i49*> [#uses=1]
- at i50_l = external global i50 ; <i50*> [#uses=1]
- at i50_s = external global i50 ; <i50*> [#uses=1]
- at i51_l = external global i51 ; <i51*> [#uses=1]
- at i51_s = external global i51 ; <i51*> [#uses=1]
- at i52_l = external global i52 ; <i52*> [#uses=1]
- at i52_s = external global i52 ; <i52*> [#uses=1]
- at i53_l = external global i53 ; <i53*> [#uses=1]
- at i53_s = external global i53 ; <i53*> [#uses=1]
- at i54_l = external global i54 ; <i54*> [#uses=1]
- at i54_s = external global i54 ; <i54*> [#uses=1]
- at i55_l = external global i55 ; <i55*> [#uses=1]
- at i55_s = external global i55 ; <i55*> [#uses=1]
- at i56_l = external global i56 ; <i56*> [#uses=1]
- at i56_s = external global i56 ; <i56*> [#uses=1]
- at i57_l = external global i57 ; <i57*> [#uses=1]
- at i57_s = external global i57 ; <i57*> [#uses=1]
- at i58_l = external global i58 ; <i58*> [#uses=1]
- at i58_s = external global i58 ; <i58*> [#uses=1]
- at i59_l = external global i59 ; <i59*> [#uses=1]
- at i59_s = external global i59 ; <i59*> [#uses=1]
- at i60_l = external global i60 ; <i60*> [#uses=1]
- at i60_s = external global i60 ; <i60*> [#uses=1]
- at i61_l = external global i61 ; <i61*> [#uses=1]
- at i61_s = external global i61 ; <i61*> [#uses=1]
- at i62_l = external global i62 ; <i62*> [#uses=1]
- at i62_s = external global i62 ; <i62*> [#uses=1]
- at i63_l = external global i63 ; <i63*> [#uses=1]
- at i63_s = external global i63 ; <i63*> [#uses=1]
- at i64_l = external global i64 ; <i64*> [#uses=1]
- at i64_s = external global i64 ; <i64*> [#uses=1]
- at i65_l = external global i65 ; <i65*> [#uses=1]
- at i65_s = external global i65 ; <i65*> [#uses=1]
- at i66_l = external global i66 ; <i66*> [#uses=1]
- at i66_s = external global i66 ; <i66*> [#uses=1]
- at i67_l = external global i67 ; <i67*> [#uses=1]
- at i67_s = external global i67 ; <i67*> [#uses=1]
- at i68_l = external global i68 ; <i68*> [#uses=1]
- at i68_s = external global i68 ; <i68*> [#uses=1]
- at i69_l = external global i69 ; <i69*> [#uses=1]
- at i69_s = external global i69 ; <i69*> [#uses=1]
- at i70_l = external global i70 ; <i70*> [#uses=1]
- at i70_s = external global i70 ; <i70*> [#uses=1]
- at i71_l = external global i71 ; <i71*> [#uses=1]
- at i71_s = external global i71 ; <i71*> [#uses=1]
- at i72_l = external global i72 ; <i72*> [#uses=1]
- at i72_s = external global i72 ; <i72*> [#uses=1]
- at i73_l = external global i73 ; <i73*> [#uses=1]
- at i73_s = external global i73 ; <i73*> [#uses=1]
- at i74_l = external global i74 ; <i74*> [#uses=1]
- at i74_s = external global i74 ; <i74*> [#uses=1]
- at i75_l = external global i75 ; <i75*> [#uses=1]
- at i75_s = external global i75 ; <i75*> [#uses=1]
- at i76_l = external global i76 ; <i76*> [#uses=1]
- at i76_s = external global i76 ; <i76*> [#uses=1]
- at i77_l = external global i77 ; <i77*> [#uses=1]
- at i77_s = external global i77 ; <i77*> [#uses=1]
- at i78_l = external global i78 ; <i78*> [#uses=1]
- at i78_s = external global i78 ; <i78*> [#uses=1]
- at i79_l = external global i79 ; <i79*> [#uses=1]
- at i79_s = external global i79 ; <i79*> [#uses=1]
- at i80_l = external global i80 ; <i80*> [#uses=1]
- at i80_s = external global i80 ; <i80*> [#uses=1]
- at i81_l = external global i81 ; <i81*> [#uses=1]
- at i81_s = external global i81 ; <i81*> [#uses=1]
- at i82_l = external global i82 ; <i82*> [#uses=1]
- at i82_s = external global i82 ; <i82*> [#uses=1]
- at i83_l = external global i83 ; <i83*> [#uses=1]
- at i83_s = external global i83 ; <i83*> [#uses=1]
- at i84_l = external global i84 ; <i84*> [#uses=1]
- at i84_s = external global i84 ; <i84*> [#uses=1]
- at i85_l = external global i85 ; <i85*> [#uses=1]
- at i85_s = external global i85 ; <i85*> [#uses=1]
- at i86_l = external global i86 ; <i86*> [#uses=1]
- at i86_s = external global i86 ; <i86*> [#uses=1]
- at i87_l = external global i87 ; <i87*> [#uses=1]
- at i87_s = external global i87 ; <i87*> [#uses=1]
- at i88_l = external global i88 ; <i88*> [#uses=1]
- at i88_s = external global i88 ; <i88*> [#uses=1]
- at i89_l = external global i89 ; <i89*> [#uses=1]
- at i89_s = external global i89 ; <i89*> [#uses=1]
- at i90_l = external global i90 ; <i90*> [#uses=1]
- at i90_s = external global i90 ; <i90*> [#uses=1]
- at i91_l = external global i91 ; <i91*> [#uses=1]
- at i91_s = external global i91 ; <i91*> [#uses=1]
- at i92_l = external global i92 ; <i92*> [#uses=1]
- at i92_s = external global i92 ; <i92*> [#uses=1]
- at i93_l = external global i93 ; <i93*> [#uses=1]
- at i93_s = external global i93 ; <i93*> [#uses=1]
- at i94_l = external global i94 ; <i94*> [#uses=1]
- at i94_s = external global i94 ; <i94*> [#uses=1]
- at i95_l = external global i95 ; <i95*> [#uses=1]
- at i95_s = external global i95 ; <i95*> [#uses=1]
- at i96_l = external global i96 ; <i96*> [#uses=1]
- at i96_s = external global i96 ; <i96*> [#uses=1]
- at i97_l = external global i97 ; <i97*> [#uses=1]
- at i97_s = external global i97 ; <i97*> [#uses=1]
- at i98_l = external global i98 ; <i98*> [#uses=1]
- at i98_s = external global i98 ; <i98*> [#uses=1]
- at i99_l = external global i99 ; <i99*> [#uses=1]
- at i99_s = external global i99 ; <i99*> [#uses=1]
- at i100_l = external global i100 ; <i100*> [#uses=1]
- at i100_s = external global i100 ; <i100*> [#uses=1]
- at i101_l = external global i101 ; <i101*> [#uses=1]
- at i101_s = external global i101 ; <i101*> [#uses=1]
- at i102_l = external global i102 ; <i102*> [#uses=1]
- at i102_s = external global i102 ; <i102*> [#uses=1]
- at i103_l = external global i103 ; <i103*> [#uses=1]
- at i103_s = external global i103 ; <i103*> [#uses=1]
- at i104_l = external global i104 ; <i104*> [#uses=1]
- at i104_s = external global i104 ; <i104*> [#uses=1]
- at i105_l = external global i105 ; <i105*> [#uses=1]
- at i105_s = external global i105 ; <i105*> [#uses=1]
- at i106_l = external global i106 ; <i106*> [#uses=1]
- at i106_s = external global i106 ; <i106*> [#uses=1]
- at i107_l = external global i107 ; <i107*> [#uses=1]
- at i107_s = external global i107 ; <i107*> [#uses=1]
- at i108_l = external global i108 ; <i108*> [#uses=1]
- at i108_s = external global i108 ; <i108*> [#uses=1]
- at i109_l = external global i109 ; <i109*> [#uses=1]
- at i109_s = external global i109 ; <i109*> [#uses=1]
- at i110_l = external global i110 ; <i110*> [#uses=1]
- at i110_s = external global i110 ; <i110*> [#uses=1]
- at i111_l = external global i111 ; <i111*> [#uses=1]
- at i111_s = external global i111 ; <i111*> [#uses=1]
- at i112_l = external global i112 ; <i112*> [#uses=1]
- at i112_s = external global i112 ; <i112*> [#uses=1]
- at i113_l = external global i113 ; <i113*> [#uses=1]
- at i113_s = external global i113 ; <i113*> [#uses=1]
- at i114_l = external global i114 ; <i114*> [#uses=1]
- at i114_s = external global i114 ; <i114*> [#uses=1]
- at i115_l = external global i115 ; <i115*> [#uses=1]
- at i115_s = external global i115 ; <i115*> [#uses=1]
- at i116_l = external global i116 ; <i116*> [#uses=1]
- at i116_s = external global i116 ; <i116*> [#uses=1]
- at i117_l = external global i117 ; <i117*> [#uses=1]
- at i117_s = external global i117 ; <i117*> [#uses=1]
- at i118_l = external global i118 ; <i118*> [#uses=1]
- at i118_s = external global i118 ; <i118*> [#uses=1]
- at i119_l = external global i119 ; <i119*> [#uses=1]
- at i119_s = external global i119 ; <i119*> [#uses=1]
- at i120_l = external global i120 ; <i120*> [#uses=1]
- at i120_s = external global i120 ; <i120*> [#uses=1]
- at i121_l = external global i121 ; <i121*> [#uses=1]
- at i121_s = external global i121 ; <i121*> [#uses=1]
- at i122_l = external global i122 ; <i122*> [#uses=1]
- at i122_s = external global i122 ; <i122*> [#uses=1]
- at i123_l = external global i123 ; <i123*> [#uses=1]
- at i123_s = external global i123 ; <i123*> [#uses=1]
- at i124_l = external global i124 ; <i124*> [#uses=1]
- at i124_s = external global i124 ; <i124*> [#uses=1]
- at i125_l = external global i125 ; <i125*> [#uses=1]
- at i125_s = external global i125 ; <i125*> [#uses=1]
- at i126_l = external global i126 ; <i126*> [#uses=1]
- at i126_s = external global i126 ; <i126*> [#uses=1]
- at i127_l = external global i127 ; <i127*> [#uses=1]
- at i127_s = external global i127 ; <i127*> [#uses=1]
- at i128_l = external global i128 ; <i128*> [#uses=1]
- at i128_s = external global i128 ; <i128*> [#uses=1]
- at i129_l = external global i129 ; <i129*> [#uses=1]
- at i129_s = external global i129 ; <i129*> [#uses=1]
- at i130_l = external global i130 ; <i130*> [#uses=1]
- at i130_s = external global i130 ; <i130*> [#uses=1]
- at i131_l = external global i131 ; <i131*> [#uses=1]
- at i131_s = external global i131 ; <i131*> [#uses=1]
- at i132_l = external global i132 ; <i132*> [#uses=1]
- at i132_s = external global i132 ; <i132*> [#uses=1]
- at i133_l = external global i133 ; <i133*> [#uses=1]
- at i133_s = external global i133 ; <i133*> [#uses=1]
- at i134_l = external global i134 ; <i134*> [#uses=1]
- at i134_s = external global i134 ; <i134*> [#uses=1]
- at i135_l = external global i135 ; <i135*> [#uses=1]
- at i135_s = external global i135 ; <i135*> [#uses=1]
- at i136_l = external global i136 ; <i136*> [#uses=1]
- at i136_s = external global i136 ; <i136*> [#uses=1]
- at i137_l = external global i137 ; <i137*> [#uses=1]
- at i137_s = external global i137 ; <i137*> [#uses=1]
- at i138_l = external global i138 ; <i138*> [#uses=1]
- at i138_s = external global i138 ; <i138*> [#uses=1]
- at i139_l = external global i139 ; <i139*> [#uses=1]
- at i139_s = external global i139 ; <i139*> [#uses=1]
- at i140_l = external global i140 ; <i140*> [#uses=1]
- at i140_s = external global i140 ; <i140*> [#uses=1]
- at i141_l = external global i141 ; <i141*> [#uses=1]
- at i141_s = external global i141 ; <i141*> [#uses=1]
- at i142_l = external global i142 ; <i142*> [#uses=1]
- at i142_s = external global i142 ; <i142*> [#uses=1]
- at i143_l = external global i143 ; <i143*> [#uses=1]
- at i143_s = external global i143 ; <i143*> [#uses=1]
- at i144_l = external global i144 ; <i144*> [#uses=1]
- at i144_s = external global i144 ; <i144*> [#uses=1]
- at i145_l = external global i145 ; <i145*> [#uses=1]
- at i145_s = external global i145 ; <i145*> [#uses=1]
- at i146_l = external global i146 ; <i146*> [#uses=1]
- at i146_s = external global i146 ; <i146*> [#uses=1]
- at i147_l = external global i147 ; <i147*> [#uses=1]
- at i147_s = external global i147 ; <i147*> [#uses=1]
- at i148_l = external global i148 ; <i148*> [#uses=1]
- at i148_s = external global i148 ; <i148*> [#uses=1]
- at i149_l = external global i149 ; <i149*> [#uses=1]
- at i149_s = external global i149 ; <i149*> [#uses=1]
- at i150_l = external global i150 ; <i150*> [#uses=1]
- at i150_s = external global i150 ; <i150*> [#uses=1]
- at i151_l = external global i151 ; <i151*> [#uses=1]
- at i151_s = external global i151 ; <i151*> [#uses=1]
- at i152_l = external global i152 ; <i152*> [#uses=1]
- at i152_s = external global i152 ; <i152*> [#uses=1]
- at i153_l = external global i153 ; <i153*> [#uses=1]
- at i153_s = external global i153 ; <i153*> [#uses=1]
- at i154_l = external global i154 ; <i154*> [#uses=1]
- at i154_s = external global i154 ; <i154*> [#uses=1]
- at i155_l = external global i155 ; <i155*> [#uses=1]
- at i155_s = external global i155 ; <i155*> [#uses=1]
- at i156_l = external global i156 ; <i156*> [#uses=1]
- at i156_s = external global i156 ; <i156*> [#uses=1]
- at i157_l = external global i157 ; <i157*> [#uses=1]
- at i157_s = external global i157 ; <i157*> [#uses=1]
- at i158_l = external global i158 ; <i158*> [#uses=1]
- at i158_s = external global i158 ; <i158*> [#uses=1]
- at i159_l = external global i159 ; <i159*> [#uses=1]
- at i159_s = external global i159 ; <i159*> [#uses=1]
- at i160_l = external global i160 ; <i160*> [#uses=1]
- at i160_s = external global i160 ; <i160*> [#uses=1]
- at i161_l = external global i161 ; <i161*> [#uses=1]
- at i161_s = external global i161 ; <i161*> [#uses=1]
- at i162_l = external global i162 ; <i162*> [#uses=1]
- at i162_s = external global i162 ; <i162*> [#uses=1]
- at i163_l = external global i163 ; <i163*> [#uses=1]
- at i163_s = external global i163 ; <i163*> [#uses=1]
- at i164_l = external global i164 ; <i164*> [#uses=1]
- at i164_s = external global i164 ; <i164*> [#uses=1]
- at i165_l = external global i165 ; <i165*> [#uses=1]
- at i165_s = external global i165 ; <i165*> [#uses=1]
- at i166_l = external global i166 ; <i166*> [#uses=1]
- at i166_s = external global i166 ; <i166*> [#uses=1]
- at i167_l = external global i167 ; <i167*> [#uses=1]
- at i167_s = external global i167 ; <i167*> [#uses=1]
- at i168_l = external global i168 ; <i168*> [#uses=1]
- at i168_s = external global i168 ; <i168*> [#uses=1]
- at i169_l = external global i169 ; <i169*> [#uses=1]
- at i169_s = external global i169 ; <i169*> [#uses=1]
- at i170_l = external global i170 ; <i170*> [#uses=1]
- at i170_s = external global i170 ; <i170*> [#uses=1]
- at i171_l = external global i171 ; <i171*> [#uses=1]
- at i171_s = external global i171 ; <i171*> [#uses=1]
- at i172_l = external global i172 ; <i172*> [#uses=1]
- at i172_s = external global i172 ; <i172*> [#uses=1]
- at i173_l = external global i173 ; <i173*> [#uses=1]
- at i173_s = external global i173 ; <i173*> [#uses=1]
- at i174_l = external global i174 ; <i174*> [#uses=1]
- at i174_s = external global i174 ; <i174*> [#uses=1]
- at i175_l = external global i175 ; <i175*> [#uses=1]
- at i175_s = external global i175 ; <i175*> [#uses=1]
- at i176_l = external global i176 ; <i176*> [#uses=1]
- at i176_s = external global i176 ; <i176*> [#uses=1]
- at i177_l = external global i177 ; <i177*> [#uses=1]
- at i177_s = external global i177 ; <i177*> [#uses=1]
- at i178_l = external global i178 ; <i178*> [#uses=1]
- at i178_s = external global i178 ; <i178*> [#uses=1]
- at i179_l = external global i179 ; <i179*> [#uses=1]
- at i179_s = external global i179 ; <i179*> [#uses=1]
- at i180_l = external global i180 ; <i180*> [#uses=1]
- at i180_s = external global i180 ; <i180*> [#uses=1]
- at i181_l = external global i181 ; <i181*> [#uses=1]
- at i181_s = external global i181 ; <i181*> [#uses=1]
- at i182_l = external global i182 ; <i182*> [#uses=1]
- at i182_s = external global i182 ; <i182*> [#uses=1]
- at i183_l = external global i183 ; <i183*> [#uses=1]
- at i183_s = external global i183 ; <i183*> [#uses=1]
- at i184_l = external global i184 ; <i184*> [#uses=1]
- at i184_s = external global i184 ; <i184*> [#uses=1]
- at i185_l = external global i185 ; <i185*> [#uses=1]
- at i185_s = external global i185 ; <i185*> [#uses=1]
- at i186_l = external global i186 ; <i186*> [#uses=1]
- at i186_s = external global i186 ; <i186*> [#uses=1]
- at i187_l = external global i187 ; <i187*> [#uses=1]
- at i187_s = external global i187 ; <i187*> [#uses=1]
- at i188_l = external global i188 ; <i188*> [#uses=1]
- at i188_s = external global i188 ; <i188*> [#uses=1]
- at i189_l = external global i189 ; <i189*> [#uses=1]
- at i189_s = external global i189 ; <i189*> [#uses=1]
- at i190_l = external global i190 ; <i190*> [#uses=1]
- at i190_s = external global i190 ; <i190*> [#uses=1]
- at i191_l = external global i191 ; <i191*> [#uses=1]
- at i191_s = external global i191 ; <i191*> [#uses=1]
- at i192_l = external global i192 ; <i192*> [#uses=1]
- at i192_s = external global i192 ; <i192*> [#uses=1]
- at i193_l = external global i193 ; <i193*> [#uses=1]
- at i193_s = external global i193 ; <i193*> [#uses=1]
- at i194_l = external global i194 ; <i194*> [#uses=1]
- at i194_s = external global i194 ; <i194*> [#uses=1]
- at i195_l = external global i195 ; <i195*> [#uses=1]
- at i195_s = external global i195 ; <i195*> [#uses=1]
- at i196_l = external global i196 ; <i196*> [#uses=1]
- at i196_s = external global i196 ; <i196*> [#uses=1]
- at i197_l = external global i197 ; <i197*> [#uses=1]
- at i197_s = external global i197 ; <i197*> [#uses=1]
- at i198_l = external global i198 ; <i198*> [#uses=1]
- at i198_s = external global i198 ; <i198*> [#uses=1]
- at i199_l = external global i199 ; <i199*> [#uses=1]
- at i199_s = external global i199 ; <i199*> [#uses=1]
- at i200_l = external global i200 ; <i200*> [#uses=1]
- at i200_s = external global i200 ; <i200*> [#uses=1]
- at i201_l = external global i201 ; <i201*> [#uses=1]
- at i201_s = external global i201 ; <i201*> [#uses=1]
- at i202_l = external global i202 ; <i202*> [#uses=1]
- at i202_s = external global i202 ; <i202*> [#uses=1]
- at i203_l = external global i203 ; <i203*> [#uses=1]
- at i203_s = external global i203 ; <i203*> [#uses=1]
- at i204_l = external global i204 ; <i204*> [#uses=1]
- at i204_s = external global i204 ; <i204*> [#uses=1]
- at i205_l = external global i205 ; <i205*> [#uses=1]
- at i205_s = external global i205 ; <i205*> [#uses=1]
- at i206_l = external global i206 ; <i206*> [#uses=1]
- at i206_s = external global i206 ; <i206*> [#uses=1]
- at i207_l = external global i207 ; <i207*> [#uses=1]
- at i207_s = external global i207 ; <i207*> [#uses=1]
- at i208_l = external global i208 ; <i208*> [#uses=1]
- at i208_s = external global i208 ; <i208*> [#uses=1]
- at i209_l = external global i209 ; <i209*> [#uses=1]
- at i209_s = external global i209 ; <i209*> [#uses=1]
- at i210_l = external global i210 ; <i210*> [#uses=1]
- at i210_s = external global i210 ; <i210*> [#uses=1]
- at i211_l = external global i211 ; <i211*> [#uses=1]
- at i211_s = external global i211 ; <i211*> [#uses=1]
- at i212_l = external global i212 ; <i212*> [#uses=1]
- at i212_s = external global i212 ; <i212*> [#uses=1]
- at i213_l = external global i213 ; <i213*> [#uses=1]
- at i213_s = external global i213 ; <i213*> [#uses=1]
- at i214_l = external global i214 ; <i214*> [#uses=1]
- at i214_s = external global i214 ; <i214*> [#uses=1]
- at i215_l = external global i215 ; <i215*> [#uses=1]
- at i215_s = external global i215 ; <i215*> [#uses=1]
- at i216_l = external global i216 ; <i216*> [#uses=1]
- at i216_s = external global i216 ; <i216*> [#uses=1]
- at i217_l = external global i217 ; <i217*> [#uses=1]
- at i217_s = external global i217 ; <i217*> [#uses=1]
- at i218_l = external global i218 ; <i218*> [#uses=1]
- at i218_s = external global i218 ; <i218*> [#uses=1]
- at i219_l = external global i219 ; <i219*> [#uses=1]
- at i219_s = external global i219 ; <i219*> [#uses=1]
- at i220_l = external global i220 ; <i220*> [#uses=1]
- at i220_s = external global i220 ; <i220*> [#uses=1]
- at i221_l = external global i221 ; <i221*> [#uses=1]
- at i221_s = external global i221 ; <i221*> [#uses=1]
- at i222_l = external global i222 ; <i222*> [#uses=1]
- at i222_s = external global i222 ; <i222*> [#uses=1]
- at i223_l = external global i223 ; <i223*> [#uses=1]
- at i223_s = external global i223 ; <i223*> [#uses=1]
- at i224_l = external global i224 ; <i224*> [#uses=1]
- at i224_s = external global i224 ; <i224*> [#uses=1]
- at i225_l = external global i225 ; <i225*> [#uses=1]
- at i225_s = external global i225 ; <i225*> [#uses=1]
- at i226_l = external global i226 ; <i226*> [#uses=1]
- at i226_s = external global i226 ; <i226*> [#uses=1]
- at i227_l = external global i227 ; <i227*> [#uses=1]
- at i227_s = external global i227 ; <i227*> [#uses=1]
- at i228_l = external global i228 ; <i228*> [#uses=1]
- at i228_s = external global i228 ; <i228*> [#uses=1]
- at i229_l = external global i229 ; <i229*> [#uses=1]
- at i229_s = external global i229 ; <i229*> [#uses=1]
- at i230_l = external global i230 ; <i230*> [#uses=1]
- at i230_s = external global i230 ; <i230*> [#uses=1]
- at i231_l = external global i231 ; <i231*> [#uses=1]
- at i231_s = external global i231 ; <i231*> [#uses=1]
- at i232_l = external global i232 ; <i232*> [#uses=1]
- at i232_s = external global i232 ; <i232*> [#uses=1]
- at i233_l = external global i233 ; <i233*> [#uses=1]
- at i233_s = external global i233 ; <i233*> [#uses=1]
- at i234_l = external global i234 ; <i234*> [#uses=1]
- at i234_s = external global i234 ; <i234*> [#uses=1]
- at i235_l = external global i235 ; <i235*> [#uses=1]
- at i235_s = external global i235 ; <i235*> [#uses=1]
- at i236_l = external global i236 ; <i236*> [#uses=1]
- at i236_s = external global i236 ; <i236*> [#uses=1]
- at i237_l = external global i237 ; <i237*> [#uses=1]
- at i237_s = external global i237 ; <i237*> [#uses=1]
- at i238_l = external global i238 ; <i238*> [#uses=1]
- at i238_s = external global i238 ; <i238*> [#uses=1]
- at i239_l = external global i239 ; <i239*> [#uses=1]
- at i239_s = external global i239 ; <i239*> [#uses=1]
- at i240_l = external global i240 ; <i240*> [#uses=1]
- at i240_s = external global i240 ; <i240*> [#uses=1]
- at i241_l = external global i241 ; <i241*> [#uses=1]
- at i241_s = external global i241 ; <i241*> [#uses=1]
- at i242_l = external global i242 ; <i242*> [#uses=1]
- at i242_s = external global i242 ; <i242*> [#uses=1]
- at i243_l = external global i243 ; <i243*> [#uses=1]
- at i243_s = external global i243 ; <i243*> [#uses=1]
- at i244_l = external global i244 ; <i244*> [#uses=1]
- at i244_s = external global i244 ; <i244*> [#uses=1]
- at i245_l = external global i245 ; <i245*> [#uses=1]
- at i245_s = external global i245 ; <i245*> [#uses=1]
- at i246_l = external global i246 ; <i246*> [#uses=1]
- at i246_s = external global i246 ; <i246*> [#uses=1]
- at i247_l = external global i247 ; <i247*> [#uses=1]
- at i247_s = external global i247 ; <i247*> [#uses=1]
- at i248_l = external global i248 ; <i248*> [#uses=1]
- at i248_s = external global i248 ; <i248*> [#uses=1]
- at i249_l = external global i249 ; <i249*> [#uses=1]
- at i249_s = external global i249 ; <i249*> [#uses=1]
- at i250_l = external global i250 ; <i250*> [#uses=1]
- at i250_s = external global i250 ; <i250*> [#uses=1]
- at i251_l = external global i251 ; <i251*> [#uses=1]
- at i251_s = external global i251 ; <i251*> [#uses=1]
- at i252_l = external global i252 ; <i252*> [#uses=1]
- at i252_s = external global i252 ; <i252*> [#uses=1]
- at i253_l = external global i253 ; <i253*> [#uses=1]
- at i253_s = external global i253 ; <i253*> [#uses=1]
- at i254_l = external global i254 ; <i254*> [#uses=1]
- at i254_s = external global i254 ; <i254*> [#uses=1]
- at i255_l = external global i255 ; <i255*> [#uses=1]
- at i255_s = external global i255 ; <i255*> [#uses=1]
- at i256_l = external global i256 ; <i256*> [#uses=1]
- at i256_s = external global i256 ; <i256*> [#uses=1]
-
-define void @i1_ls() nounwind {
- %tmp = load i1* @i1_l ; <i1> [#uses=1]
- store i1 %tmp, i1* @i1_s
- ret void
-}
-
-define void @i2_ls() nounwind {
- %tmp = load i2* @i2_l ; <i2> [#uses=1]
- store i2 %tmp, i2* @i2_s
- ret void
-}
-
-define void @i3_ls() nounwind {
- %tmp = load i3* @i3_l ; <i3> [#uses=1]
- store i3 %tmp, i3* @i3_s
- ret void
-}
-
-define void @i4_ls() nounwind {
- %tmp = load i4* @i4_l ; <i4> [#uses=1]
- store i4 %tmp, i4* @i4_s
- ret void
-}
-
-define void @i5_ls() nounwind {
- %tmp = load i5* @i5_l ; <i5> [#uses=1]
- store i5 %tmp, i5* @i5_s
- ret void
-}
-
-define void @i6_ls() nounwind {
- %tmp = load i6* @i6_l ; <i6> [#uses=1]
- store i6 %tmp, i6* @i6_s
- ret void
-}
-
-define void @i7_ls() nounwind {
- %tmp = load i7* @i7_l ; <i7> [#uses=1]
- store i7 %tmp, i7* @i7_s
- ret void
-}
-
-define void @i8_ls() nounwind {
- %tmp = load i8* @i8_l ; <i8> [#uses=1]
- store i8 %tmp, i8* @i8_s
- ret void
-}
-
-define void @i9_ls() nounwind {
- %tmp = load i9* @i9_l ; <i9> [#uses=1]
- store i9 %tmp, i9* @i9_s
- ret void
-}
-
-define void @i10_ls() nounwind {
- %tmp = load i10* @i10_l ; <i10> [#uses=1]
- store i10 %tmp, i10* @i10_s
- ret void
-}
-
-define void @i11_ls() nounwind {
- %tmp = load i11* @i11_l ; <i11> [#uses=1]
- store i11 %tmp, i11* @i11_s
- ret void
-}
-
-define void @i12_ls() nounwind {
- %tmp = load i12* @i12_l ; <i12> [#uses=1]
- store i12 %tmp, i12* @i12_s
- ret void
-}
-
-define void @i13_ls() nounwind {
- %tmp = load i13* @i13_l ; <i13> [#uses=1]
- store i13 %tmp, i13* @i13_s
- ret void
-}
-
-define void @i14_ls() nounwind {
- %tmp = load i14* @i14_l ; <i14> [#uses=1]
- store i14 %tmp, i14* @i14_s
- ret void
-}
-
-define void @i15_ls() nounwind {
- %tmp = load i15* @i15_l ; <i15> [#uses=1]
- store i15 %tmp, i15* @i15_s
- ret void
-}
-
-define void @i16_ls() nounwind {
- %tmp = load i16* @i16_l ; <i16> [#uses=1]
- store i16 %tmp, i16* @i16_s
- ret void
-}
-
-define void @i17_ls() nounwind {
- %tmp = load i17* @i17_l ; <i17> [#uses=1]
- store i17 %tmp, i17* @i17_s
- ret void
-}
-
-define void @i18_ls() nounwind {
- %tmp = load i18* @i18_l ; <i18> [#uses=1]
- store i18 %tmp, i18* @i18_s
- ret void
-}
-
-define void @i19_ls() nounwind {
- %tmp = load i19* @i19_l ; <i19> [#uses=1]
- store i19 %tmp, i19* @i19_s
- ret void
-}
-
-define void @i20_ls() nounwind {
- %tmp = load i20* @i20_l ; <i20> [#uses=1]
- store i20 %tmp, i20* @i20_s
- ret void
-}
-
-define void @i21_ls() nounwind {
- %tmp = load i21* @i21_l ; <i21> [#uses=1]
- store i21 %tmp, i21* @i21_s
- ret void
-}
-
-define void @i22_ls() nounwind {
- %tmp = load i22* @i22_l ; <i22> [#uses=1]
- store i22 %tmp, i22* @i22_s
- ret void
-}
-
-define void @i23_ls() nounwind {
- %tmp = load i23* @i23_l ; <i23> [#uses=1]
- store i23 %tmp, i23* @i23_s
- ret void
-}
-
-define void @i24_ls() nounwind {
- %tmp = load i24* @i24_l ; <i24> [#uses=1]
- store i24 %tmp, i24* @i24_s
- ret void
-}
-
-define void @i25_ls() nounwind {
- %tmp = load i25* @i25_l ; <i25> [#uses=1]
- store i25 %tmp, i25* @i25_s
- ret void
-}
-
-define void @i26_ls() nounwind {
- %tmp = load i26* @i26_l ; <i26> [#uses=1]
- store i26 %tmp, i26* @i26_s
- ret void
-}
-
-define void @i27_ls() nounwind {
- %tmp = load i27* @i27_l ; <i27> [#uses=1]
- store i27 %tmp, i27* @i27_s
- ret void
-}
-
-define void @i28_ls() nounwind {
- %tmp = load i28* @i28_l ; <i28> [#uses=1]
- store i28 %tmp, i28* @i28_s
- ret void
-}
-
-define void @i29_ls() nounwind {
- %tmp = load i29* @i29_l ; <i29> [#uses=1]
- store i29 %tmp, i29* @i29_s
- ret void
-}
-
-define void @i30_ls() nounwind {
- %tmp = load i30* @i30_l ; <i30> [#uses=1]
- store i30 %tmp, i30* @i30_s
- ret void
-}
-
-define void @i31_ls() nounwind {
- %tmp = load i31* @i31_l ; <i31> [#uses=1]
- store i31 %tmp, i31* @i31_s
- ret void
-}
-
-define void @i32_ls() nounwind {
- %tmp = load i32* @i32_l ; <i32> [#uses=1]
- store i32 %tmp, i32* @i32_s
- ret void
-}
-
-define void @i33_ls() nounwind {
- %tmp = load i33* @i33_l ; <i33> [#uses=1]
- store i33 %tmp, i33* @i33_s
- ret void
-}
-
-define void @i34_ls() nounwind {
- %tmp = load i34* @i34_l ; <i34> [#uses=1]
- store i34 %tmp, i34* @i34_s
- ret void
-}
-
-define void @i35_ls() nounwind {
- %tmp = load i35* @i35_l ; <i35> [#uses=1]
- store i35 %tmp, i35* @i35_s
- ret void
-}
-
-define void @i36_ls() nounwind {
- %tmp = load i36* @i36_l ; <i36> [#uses=1]
- store i36 %tmp, i36* @i36_s
- ret void
-}
-
-define void @i37_ls() nounwind {
- %tmp = load i37* @i37_l ; <i37> [#uses=1]
- store i37 %tmp, i37* @i37_s
- ret void
-}
-
-define void @i38_ls() nounwind {
- %tmp = load i38* @i38_l ; <i38> [#uses=1]
- store i38 %tmp, i38* @i38_s
- ret void
-}
-
-define void @i39_ls() nounwind {
- %tmp = load i39* @i39_l ; <i39> [#uses=1]
- store i39 %tmp, i39* @i39_s
- ret void
-}
-
-define void @i40_ls() nounwind {
- %tmp = load i40* @i40_l ; <i40> [#uses=1]
- store i40 %tmp, i40* @i40_s
- ret void
-}
-
-define void @i41_ls() nounwind {
- %tmp = load i41* @i41_l ; <i41> [#uses=1]
- store i41 %tmp, i41* @i41_s
- ret void
-}
-
-define void @i42_ls() nounwind {
- %tmp = load i42* @i42_l ; <i42> [#uses=1]
- store i42 %tmp, i42* @i42_s
- ret void
-}
-
-define void @i43_ls() nounwind {
- %tmp = load i43* @i43_l ; <i43> [#uses=1]
- store i43 %tmp, i43* @i43_s
- ret void
-}
-
-define void @i44_ls() nounwind {
- %tmp = load i44* @i44_l ; <i44> [#uses=1]
- store i44 %tmp, i44* @i44_s
- ret void
-}
-
-define void @i45_ls() nounwind {
- %tmp = load i45* @i45_l ; <i45> [#uses=1]
- store i45 %tmp, i45* @i45_s
- ret void
-}
-
-define void @i46_ls() nounwind {
- %tmp = load i46* @i46_l ; <i46> [#uses=1]
- store i46 %tmp, i46* @i46_s
- ret void
-}
-
-define void @i47_ls() nounwind {
- %tmp = load i47* @i47_l ; <i47> [#uses=1]
- store i47 %tmp, i47* @i47_s
- ret void
-}
-
-define void @i48_ls() nounwind {
- %tmp = load i48* @i48_l ; <i48> [#uses=1]
- store i48 %tmp, i48* @i48_s
- ret void
-}
-
-define void @i49_ls() nounwind {
- %tmp = load i49* @i49_l ; <i49> [#uses=1]
- store i49 %tmp, i49* @i49_s
- ret void
-}
-
-define void @i50_ls() nounwind {
- %tmp = load i50* @i50_l ; <i50> [#uses=1]
- store i50 %tmp, i50* @i50_s
- ret void
-}
-
-define void @i51_ls() nounwind {
- %tmp = load i51* @i51_l ; <i51> [#uses=1]
- store i51 %tmp, i51* @i51_s
- ret void
-}
-
-define void @i52_ls() nounwind {
- %tmp = load i52* @i52_l ; <i52> [#uses=1]
- store i52 %tmp, i52* @i52_s
- ret void
-}
-
-define void @i53_ls() nounwind {
- %tmp = load i53* @i53_l ; <i53> [#uses=1]
- store i53 %tmp, i53* @i53_s
- ret void
-}
-
-define void @i54_ls() nounwind {
- %tmp = load i54* @i54_l ; <i54> [#uses=1]
- store i54 %tmp, i54* @i54_s
- ret void
-}
-
-define void @i55_ls() nounwind {
- %tmp = load i55* @i55_l ; <i55> [#uses=1]
- store i55 %tmp, i55* @i55_s
- ret void
-}
-
-define void @i56_ls() nounwind {
- %tmp = load i56* @i56_l ; <i56> [#uses=1]
- store i56 %tmp, i56* @i56_s
- ret void
-}
-
-define void @i57_ls() nounwind {
- %tmp = load i57* @i57_l ; <i57> [#uses=1]
- store i57 %tmp, i57* @i57_s
- ret void
-}
-
-define void @i58_ls() nounwind {
- %tmp = load i58* @i58_l ; <i58> [#uses=1]
- store i58 %tmp, i58* @i58_s
- ret void
-}
-
-define void @i59_ls() nounwind {
- %tmp = load i59* @i59_l ; <i59> [#uses=1]
- store i59 %tmp, i59* @i59_s
- ret void
-}
-
-define void @i60_ls() nounwind {
- %tmp = load i60* @i60_l ; <i60> [#uses=1]
- store i60 %tmp, i60* @i60_s
- ret void
-}
-
-define void @i61_ls() nounwind {
- %tmp = load i61* @i61_l ; <i61> [#uses=1]
- store i61 %tmp, i61* @i61_s
- ret void
-}
-
-define void @i62_ls() nounwind {
- %tmp = load i62* @i62_l ; <i62> [#uses=1]
- store i62 %tmp, i62* @i62_s
- ret void
-}
-
-define void @i63_ls() nounwind {
- %tmp = load i63* @i63_l ; <i63> [#uses=1]
- store i63 %tmp, i63* @i63_s
- ret void
-}
-
-define void @i64_ls() nounwind {
- %tmp = load i64* @i64_l ; <i64> [#uses=1]
- store i64 %tmp, i64* @i64_s
- ret void
-}
-
-define void @i65_ls() nounwind {
- %tmp = load i65* @i65_l ; <i65> [#uses=1]
- store i65 %tmp, i65* @i65_s
- ret void
-}
-
-define void @i66_ls() nounwind {
- %tmp = load i66* @i66_l ; <i66> [#uses=1]
- store i66 %tmp, i66* @i66_s
- ret void
-}
-
-define void @i67_ls() nounwind {
- %tmp = load i67* @i67_l ; <i67> [#uses=1]
- store i67 %tmp, i67* @i67_s
- ret void
-}
-
-define void @i68_ls() nounwind {
- %tmp = load i68* @i68_l ; <i68> [#uses=1]
- store i68 %tmp, i68* @i68_s
- ret void
-}
-
-define void @i69_ls() nounwind {
- %tmp = load i69* @i69_l ; <i69> [#uses=1]
- store i69 %tmp, i69* @i69_s
- ret void
-}
-
-define void @i70_ls() nounwind {
- %tmp = load i70* @i70_l ; <i70> [#uses=1]
- store i70 %tmp, i70* @i70_s
- ret void
-}
-
-define void @i71_ls() nounwind {
- %tmp = load i71* @i71_l ; <i71> [#uses=1]
- store i71 %tmp, i71* @i71_s
- ret void
-}
-
-define void @i72_ls() nounwind {
- %tmp = load i72* @i72_l ; <i72> [#uses=1]
- store i72 %tmp, i72* @i72_s
- ret void
-}
-
-define void @i73_ls() nounwind {
- %tmp = load i73* @i73_l ; <i73> [#uses=1]
- store i73 %tmp, i73* @i73_s
- ret void
-}
-
-define void @i74_ls() nounwind {
- %tmp = load i74* @i74_l ; <i74> [#uses=1]
- store i74 %tmp, i74* @i74_s
- ret void
-}
-
-define void @i75_ls() nounwind {
- %tmp = load i75* @i75_l ; <i75> [#uses=1]
- store i75 %tmp, i75* @i75_s
- ret void
-}
-
-define void @i76_ls() nounwind {
- %tmp = load i76* @i76_l ; <i76> [#uses=1]
- store i76 %tmp, i76* @i76_s
- ret void
-}
-
-define void @i77_ls() nounwind {
- %tmp = load i77* @i77_l ; <i77> [#uses=1]
- store i77 %tmp, i77* @i77_s
- ret void
-}
-
-define void @i78_ls() nounwind {
- %tmp = load i78* @i78_l ; <i78> [#uses=1]
- store i78 %tmp, i78* @i78_s
- ret void
-}
-
-define void @i79_ls() nounwind {
- %tmp = load i79* @i79_l ; <i79> [#uses=1]
- store i79 %tmp, i79* @i79_s
- ret void
-}
-
-define void @i80_ls() nounwind {
- %tmp = load i80* @i80_l ; <i80> [#uses=1]
- store i80 %tmp, i80* @i80_s
- ret void
-}
-
-define void @i81_ls() nounwind {
- %tmp = load i81* @i81_l ; <i81> [#uses=1]
- store i81 %tmp, i81* @i81_s
- ret void
-}
-
-define void @i82_ls() nounwind {
- %tmp = load i82* @i82_l ; <i82> [#uses=1]
- store i82 %tmp, i82* @i82_s
- ret void
-}
-
-define void @i83_ls() nounwind {
- %tmp = load i83* @i83_l ; <i83> [#uses=1]
- store i83 %tmp, i83* @i83_s
- ret void
-}
-
-define void @i84_ls() nounwind {
- %tmp = load i84* @i84_l ; <i84> [#uses=1]
- store i84 %tmp, i84* @i84_s
- ret void
-}
-
-define void @i85_ls() nounwind {
- %tmp = load i85* @i85_l ; <i85> [#uses=1]
- store i85 %tmp, i85* @i85_s
- ret void
-}
-
-define void @i86_ls() nounwind {
- %tmp = load i86* @i86_l ; <i86> [#uses=1]
- store i86 %tmp, i86* @i86_s
- ret void
-}
-
-define void @i87_ls() nounwind {
- %tmp = load i87* @i87_l ; <i87> [#uses=1]
- store i87 %tmp, i87* @i87_s
- ret void
-}
-
-define void @i88_ls() nounwind {
- %tmp = load i88* @i88_l ; <i88> [#uses=1]
- store i88 %tmp, i88* @i88_s
- ret void
-}
-
-define void @i89_ls() nounwind {
- %tmp = load i89* @i89_l ; <i89> [#uses=1]
- store i89 %tmp, i89* @i89_s
- ret void
-}
-
-define void @i90_ls() nounwind {
- %tmp = load i90* @i90_l ; <i90> [#uses=1]
- store i90 %tmp, i90* @i90_s
- ret void
-}
-
-define void @i91_ls() nounwind {
- %tmp = load i91* @i91_l ; <i91> [#uses=1]
- store i91 %tmp, i91* @i91_s
- ret void
-}
-
-define void @i92_ls() nounwind {
- %tmp = load i92* @i92_l ; <i92> [#uses=1]
- store i92 %tmp, i92* @i92_s
- ret void
-}
-
-define void @i93_ls() nounwind {
- %tmp = load i93* @i93_l ; <i93> [#uses=1]
- store i93 %tmp, i93* @i93_s
- ret void
-}
-
-define void @i94_ls() nounwind {
- %tmp = load i94* @i94_l ; <i94> [#uses=1]
- store i94 %tmp, i94* @i94_s
- ret void
-}
-
-define void @i95_ls() nounwind {
- %tmp = load i95* @i95_l ; <i95> [#uses=1]
- store i95 %tmp, i95* @i95_s
- ret void
-}
-
-define void @i96_ls() nounwind {
- %tmp = load i96* @i96_l ; <i96> [#uses=1]
- store i96 %tmp, i96* @i96_s
- ret void
-}
-
-define void @i97_ls() nounwind {
- %tmp = load i97* @i97_l ; <i97> [#uses=1]
- store i97 %tmp, i97* @i97_s
- ret void
-}
-
-define void @i98_ls() nounwind {
- %tmp = load i98* @i98_l ; <i98> [#uses=1]
- store i98 %tmp, i98* @i98_s
- ret void
-}
-
-define void @i99_ls() nounwind {
- %tmp = load i99* @i99_l ; <i99> [#uses=1]
- store i99 %tmp, i99* @i99_s
- ret void
-}
-
-define void @i100_ls() nounwind {
- %tmp = load i100* @i100_l ; <i100> [#uses=1]
- store i100 %tmp, i100* @i100_s
- ret void
-}
-
-define void @i101_ls() nounwind {
- %tmp = load i101* @i101_l ; <i101> [#uses=1]
- store i101 %tmp, i101* @i101_s
- ret void
-}
-
-define void @i102_ls() nounwind {
- %tmp = load i102* @i102_l ; <i102> [#uses=1]
- store i102 %tmp, i102* @i102_s
- ret void
-}
-
-define void @i103_ls() nounwind {
- %tmp = load i103* @i103_l ; <i103> [#uses=1]
- store i103 %tmp, i103* @i103_s
- ret void
-}
-
-define void @i104_ls() nounwind {
- %tmp = load i104* @i104_l ; <i104> [#uses=1]
- store i104 %tmp, i104* @i104_s
- ret void
-}
-
-define void @i105_ls() nounwind {
- %tmp = load i105* @i105_l ; <i105> [#uses=1]
- store i105 %tmp, i105* @i105_s
- ret void
-}
-
-define void @i106_ls() nounwind {
- %tmp = load i106* @i106_l ; <i106> [#uses=1]
- store i106 %tmp, i106* @i106_s
- ret void
-}
-
-define void @i107_ls() nounwind {
- %tmp = load i107* @i107_l ; <i107> [#uses=1]
- store i107 %tmp, i107* @i107_s
- ret void
-}
-
-define void @i108_ls() nounwind {
- %tmp = load i108* @i108_l ; <i108> [#uses=1]
- store i108 %tmp, i108* @i108_s
- ret void
-}
-
-define void @i109_ls() nounwind {
- %tmp = load i109* @i109_l ; <i109> [#uses=1]
- store i109 %tmp, i109* @i109_s
- ret void
-}
-
-define void @i110_ls() nounwind {
- %tmp = load i110* @i110_l ; <i110> [#uses=1]
- store i110 %tmp, i110* @i110_s
- ret void
-}
-
-define void @i111_ls() nounwind {
- %tmp = load i111* @i111_l ; <i111> [#uses=1]
- store i111 %tmp, i111* @i111_s
- ret void
-}
-
-define void @i112_ls() nounwind {
- %tmp = load i112* @i112_l ; <i112> [#uses=1]
- store i112 %tmp, i112* @i112_s
- ret void
-}
-
-define void @i113_ls() nounwind {
- %tmp = load i113* @i113_l ; <i113> [#uses=1]
- store i113 %tmp, i113* @i113_s
- ret void
-}
-
-define void @i114_ls() nounwind {
- %tmp = load i114* @i114_l ; <i114> [#uses=1]
- store i114 %tmp, i114* @i114_s
- ret void
-}
-
-define void @i115_ls() nounwind {
- %tmp = load i115* @i115_l ; <i115> [#uses=1]
- store i115 %tmp, i115* @i115_s
- ret void
-}
-
-define void @i116_ls() nounwind {
- %tmp = load i116* @i116_l ; <i116> [#uses=1]
- store i116 %tmp, i116* @i116_s
- ret void
-}
-
-define void @i117_ls() nounwind {
- %tmp = load i117* @i117_l ; <i117> [#uses=1]
- store i117 %tmp, i117* @i117_s
- ret void
-}
-
-define void @i118_ls() nounwind {
- %tmp = load i118* @i118_l ; <i118> [#uses=1]
- store i118 %tmp, i118* @i118_s
- ret void
-}
-
-define void @i119_ls() nounwind {
- %tmp = load i119* @i119_l ; <i119> [#uses=1]
- store i119 %tmp, i119* @i119_s
- ret void
-}
-
-define void @i120_ls() nounwind {
- %tmp = load i120* @i120_l ; <i120> [#uses=1]
- store i120 %tmp, i120* @i120_s
- ret void
-}
-
-define void @i121_ls() nounwind {
- %tmp = load i121* @i121_l ; <i121> [#uses=1]
- store i121 %tmp, i121* @i121_s
- ret void
-}
-
-define void @i122_ls() nounwind {
- %tmp = load i122* @i122_l ; <i122> [#uses=1]
- store i122 %tmp, i122* @i122_s
- ret void
-}
-
-define void @i123_ls() nounwind {
- %tmp = load i123* @i123_l ; <i123> [#uses=1]
- store i123 %tmp, i123* @i123_s
- ret void
-}
-
-define void @i124_ls() nounwind {
- %tmp = load i124* @i124_l ; <i124> [#uses=1]
- store i124 %tmp, i124* @i124_s
- ret void
-}
-
-define void @i125_ls() nounwind {
- %tmp = load i125* @i125_l ; <i125> [#uses=1]
- store i125 %tmp, i125* @i125_s
- ret void
-}
-
-define void @i126_ls() nounwind {
- %tmp = load i126* @i126_l ; <i126> [#uses=1]
- store i126 %tmp, i126* @i126_s
- ret void
-}
-
-define void @i127_ls() nounwind {
- %tmp = load i127* @i127_l ; <i127> [#uses=1]
- store i127 %tmp, i127* @i127_s
- ret void
-}
-
-define void @i128_ls() nounwind {
- %tmp = load i128* @i128_l ; <i128> [#uses=1]
- store i128 %tmp, i128* @i128_s
- ret void
-}
-
-define void @i129_ls() nounwind {
- %tmp = load i129* @i129_l ; <i129> [#uses=1]
- store i129 %tmp, i129* @i129_s
- ret void
-}
-
-define void @i130_ls() nounwind {
- %tmp = load i130* @i130_l ; <i130> [#uses=1]
- store i130 %tmp, i130* @i130_s
- ret void
-}
-
-define void @i131_ls() nounwind {
- %tmp = load i131* @i131_l ; <i131> [#uses=1]
- store i131 %tmp, i131* @i131_s
- ret void
-}
-
-define void @i132_ls() nounwind {
- %tmp = load i132* @i132_l ; <i132> [#uses=1]
- store i132 %tmp, i132* @i132_s
- ret void
-}
-
-define void @i133_ls() nounwind {
- %tmp = load i133* @i133_l ; <i133> [#uses=1]
- store i133 %tmp, i133* @i133_s
- ret void
-}
-
-define void @i134_ls() nounwind {
- %tmp = load i134* @i134_l ; <i134> [#uses=1]
- store i134 %tmp, i134* @i134_s
- ret void
-}
-
-define void @i135_ls() nounwind {
- %tmp = load i135* @i135_l ; <i135> [#uses=1]
- store i135 %tmp, i135* @i135_s
- ret void
-}
-
-define void @i136_ls() nounwind {
- %tmp = load i136* @i136_l ; <i136> [#uses=1]
- store i136 %tmp, i136* @i136_s
- ret void
-}
-
-define void @i137_ls() nounwind {
- %tmp = load i137* @i137_l ; <i137> [#uses=1]
- store i137 %tmp, i137* @i137_s
- ret void
-}
-
-define void @i138_ls() nounwind {
- %tmp = load i138* @i138_l ; <i138> [#uses=1]
- store i138 %tmp, i138* @i138_s
- ret void
-}
-
-define void @i139_ls() nounwind {
- %tmp = load i139* @i139_l ; <i139> [#uses=1]
- store i139 %tmp, i139* @i139_s
- ret void
-}
-
-define void @i140_ls() nounwind {
- %tmp = load i140* @i140_l ; <i140> [#uses=1]
- store i140 %tmp, i140* @i140_s
- ret void
-}
-
-define void @i141_ls() nounwind {
- %tmp = load i141* @i141_l ; <i141> [#uses=1]
- store i141 %tmp, i141* @i141_s
- ret void
-}
-
-define void @i142_ls() nounwind {
- %tmp = load i142* @i142_l ; <i142> [#uses=1]
- store i142 %tmp, i142* @i142_s
- ret void
-}
-
-define void @i143_ls() nounwind {
- %tmp = load i143* @i143_l ; <i143> [#uses=1]
- store i143 %tmp, i143* @i143_s
- ret void
-}
-
-define void @i144_ls() nounwind {
- %tmp = load i144* @i144_l ; <i144> [#uses=1]
- store i144 %tmp, i144* @i144_s
- ret void
-}
-
-define void @i145_ls() nounwind {
- %tmp = load i145* @i145_l ; <i145> [#uses=1]
- store i145 %tmp, i145* @i145_s
- ret void
-}
-
-define void @i146_ls() nounwind {
- %tmp = load i146* @i146_l ; <i146> [#uses=1]
- store i146 %tmp, i146* @i146_s
- ret void
-}
-
-define void @i147_ls() nounwind {
- %tmp = load i147* @i147_l ; <i147> [#uses=1]
- store i147 %tmp, i147* @i147_s
- ret void
-}
-
-define void @i148_ls() nounwind {
- %tmp = load i148* @i148_l ; <i148> [#uses=1]
- store i148 %tmp, i148* @i148_s
- ret void
-}
-
-define void @i149_ls() nounwind {
- %tmp = load i149* @i149_l ; <i149> [#uses=1]
- store i149 %tmp, i149* @i149_s
- ret void
-}
-
-define void @i150_ls() nounwind {
- %tmp = load i150* @i150_l ; <i150> [#uses=1]
- store i150 %tmp, i150* @i150_s
- ret void
-}
-
-define void @i151_ls() nounwind {
- %tmp = load i151* @i151_l ; <i151> [#uses=1]
- store i151 %tmp, i151* @i151_s
- ret void
-}
-
-define void @i152_ls() nounwind {
- %tmp = load i152* @i152_l ; <i152> [#uses=1]
- store i152 %tmp, i152* @i152_s
- ret void
-}
-
-define void @i153_ls() nounwind {
- %tmp = load i153* @i153_l ; <i153> [#uses=1]
- store i153 %tmp, i153* @i153_s
- ret void
-}
-
-define void @i154_ls() nounwind {
- %tmp = load i154* @i154_l ; <i154> [#uses=1]
- store i154 %tmp, i154* @i154_s
- ret void
-}
-
-define void @i155_ls() nounwind {
- %tmp = load i155* @i155_l ; <i155> [#uses=1]
- store i155 %tmp, i155* @i155_s
- ret void
-}
-
-define void @i156_ls() nounwind {
- %tmp = load i156* @i156_l ; <i156> [#uses=1]
- store i156 %tmp, i156* @i156_s
- ret void
-}
-
-define void @i157_ls() nounwind {
- %tmp = load i157* @i157_l ; <i157> [#uses=1]
- store i157 %tmp, i157* @i157_s
- ret void
-}
-
-define void @i158_ls() nounwind {
- %tmp = load i158* @i158_l ; <i158> [#uses=1]
- store i158 %tmp, i158* @i158_s
- ret void
-}
-
-define void @i159_ls() nounwind {
- %tmp = load i159* @i159_l ; <i159> [#uses=1]
- store i159 %tmp, i159* @i159_s
- ret void
-}
-
-define void @i160_ls() nounwind {
- %tmp = load i160* @i160_l ; <i160> [#uses=1]
- store i160 %tmp, i160* @i160_s
- ret void
-}
-
-define void @i161_ls() nounwind {
- %tmp = load i161* @i161_l ; <i161> [#uses=1]
- store i161 %tmp, i161* @i161_s
- ret void
-}
-
-define void @i162_ls() nounwind {
- %tmp = load i162* @i162_l ; <i162> [#uses=1]
- store i162 %tmp, i162* @i162_s
- ret void
-}
-
-define void @i163_ls() nounwind {
- %tmp = load i163* @i163_l ; <i163> [#uses=1]
- store i163 %tmp, i163* @i163_s
- ret void
-}
-
-define void @i164_ls() nounwind {
- %tmp = load i164* @i164_l ; <i164> [#uses=1]
- store i164 %tmp, i164* @i164_s
- ret void
-}
-
-define void @i165_ls() nounwind {
- %tmp = load i165* @i165_l ; <i165> [#uses=1]
- store i165 %tmp, i165* @i165_s
- ret void
-}
-
-define void @i166_ls() nounwind {
- %tmp = load i166* @i166_l ; <i166> [#uses=1]
- store i166 %tmp, i166* @i166_s
- ret void
-}
-
-define void @i167_ls() nounwind {
- %tmp = load i167* @i167_l ; <i167> [#uses=1]
- store i167 %tmp, i167* @i167_s
- ret void
-}
-
-define void @i168_ls() nounwind {
- %tmp = load i168* @i168_l ; <i168> [#uses=1]
- store i168 %tmp, i168* @i168_s
- ret void
-}
-
-define void @i169_ls() nounwind {
- %tmp = load i169* @i169_l ; <i169> [#uses=1]
- store i169 %tmp, i169* @i169_s
- ret void
-}
-
-define void @i170_ls() nounwind {
- %tmp = load i170* @i170_l ; <i170> [#uses=1]
- store i170 %tmp, i170* @i170_s
- ret void
-}
-
-define void @i171_ls() nounwind {
- %tmp = load i171* @i171_l ; <i171> [#uses=1]
- store i171 %tmp, i171* @i171_s
- ret void
-}
-
-define void @i172_ls() nounwind {
- %tmp = load i172* @i172_l ; <i172> [#uses=1]
- store i172 %tmp, i172* @i172_s
- ret void
-}
-
-define void @i173_ls() nounwind {
- %tmp = load i173* @i173_l ; <i173> [#uses=1]
- store i173 %tmp, i173* @i173_s
- ret void
-}
-
-define void @i174_ls() nounwind {
- %tmp = load i174* @i174_l ; <i174> [#uses=1]
- store i174 %tmp, i174* @i174_s
- ret void
-}
-
-define void @i175_ls() nounwind {
- %tmp = load i175* @i175_l ; <i175> [#uses=1]
- store i175 %tmp, i175* @i175_s
- ret void
-}
-
-define void @i176_ls() nounwind {
- %tmp = load i176* @i176_l ; <i176> [#uses=1]
- store i176 %tmp, i176* @i176_s
- ret void
-}
-
-define void @i177_ls() nounwind {
- %tmp = load i177* @i177_l ; <i177> [#uses=1]
- store i177 %tmp, i177* @i177_s
- ret void
-}
-
-define void @i178_ls() nounwind {
- %tmp = load i178* @i178_l ; <i178> [#uses=1]
- store i178 %tmp, i178* @i178_s
- ret void
-}
-
-define void @i179_ls() nounwind {
- %tmp = load i179* @i179_l ; <i179> [#uses=1]
- store i179 %tmp, i179* @i179_s
- ret void
-}
-
-define void @i180_ls() nounwind {
- %tmp = load i180* @i180_l ; <i180> [#uses=1]
- store i180 %tmp, i180* @i180_s
- ret void
-}
-
-define void @i181_ls() nounwind {
- %tmp = load i181* @i181_l ; <i181> [#uses=1]
- store i181 %tmp, i181* @i181_s
- ret void
-}
-
-define void @i182_ls() nounwind {
- %tmp = load i182* @i182_l ; <i182> [#uses=1]
- store i182 %tmp, i182* @i182_s
- ret void
-}
-
-define void @i183_ls() nounwind {
- %tmp = load i183* @i183_l ; <i183> [#uses=1]
- store i183 %tmp, i183* @i183_s
- ret void
-}
-
-define void @i184_ls() nounwind {
- %tmp = load i184* @i184_l ; <i184> [#uses=1]
- store i184 %tmp, i184* @i184_s
- ret void
-}
-
-define void @i185_ls() nounwind {
- %tmp = load i185* @i185_l ; <i185> [#uses=1]
- store i185 %tmp, i185* @i185_s
- ret void
-}
-
-define void @i186_ls() nounwind {
- %tmp = load i186* @i186_l ; <i186> [#uses=1]
- store i186 %tmp, i186* @i186_s
- ret void
-}
-
-define void @i187_ls() nounwind {
- %tmp = load i187* @i187_l ; <i187> [#uses=1]
- store i187 %tmp, i187* @i187_s
- ret void
-}
-
-define void @i188_ls() nounwind {
- %tmp = load i188* @i188_l ; <i188> [#uses=1]
- store i188 %tmp, i188* @i188_s
- ret void
-}
-
-define void @i189_ls() nounwind {
- %tmp = load i189* @i189_l ; <i189> [#uses=1]
- store i189 %tmp, i189* @i189_s
- ret void
-}
-
-define void @i190_ls() nounwind {
- %tmp = load i190* @i190_l ; <i190> [#uses=1]
- store i190 %tmp, i190* @i190_s
- ret void
-}
-
-define void @i191_ls() nounwind {
- %tmp = load i191* @i191_l ; <i191> [#uses=1]
- store i191 %tmp, i191* @i191_s
- ret void
-}
-
-define void @i192_ls() nounwind {
- %tmp = load i192* @i192_l ; <i192> [#uses=1]
- store i192 %tmp, i192* @i192_s
- ret void
-}
-
-define void @i193_ls() nounwind {
- %tmp = load i193* @i193_l ; <i193> [#uses=1]
- store i193 %tmp, i193* @i193_s
- ret void
-}
-
-define void @i194_ls() nounwind {
- %tmp = load i194* @i194_l ; <i194> [#uses=1]
- store i194 %tmp, i194* @i194_s
- ret void
-}
-
-define void @i195_ls() nounwind {
- %tmp = load i195* @i195_l ; <i195> [#uses=1]
- store i195 %tmp, i195* @i195_s
- ret void
-}
-
-define void @i196_ls() nounwind {
- %tmp = load i196* @i196_l ; <i196> [#uses=1]
- store i196 %tmp, i196* @i196_s
- ret void
-}
-
-define void @i197_ls() nounwind {
- %tmp = load i197* @i197_l ; <i197> [#uses=1]
- store i197 %tmp, i197* @i197_s
- ret void
-}
-
-define void @i198_ls() nounwind {
- %tmp = load i198* @i198_l ; <i198> [#uses=1]
- store i198 %tmp, i198* @i198_s
- ret void
-}
-
-define void @i199_ls() nounwind {
- %tmp = load i199* @i199_l ; <i199> [#uses=1]
- store i199 %tmp, i199* @i199_s
- ret void
-}
-
-define void @i200_ls() nounwind {
- %tmp = load i200* @i200_l ; <i200> [#uses=1]
- store i200 %tmp, i200* @i200_s
- ret void
-}
-
-define void @i201_ls() nounwind {
- %tmp = load i201* @i201_l ; <i201> [#uses=1]
- store i201 %tmp, i201* @i201_s
- ret void
-}
-
-define void @i202_ls() nounwind {
- %tmp = load i202* @i202_l ; <i202> [#uses=1]
- store i202 %tmp, i202* @i202_s
- ret void
-}
-
-define void @i203_ls() nounwind {
- %tmp = load i203* @i203_l ; <i203> [#uses=1]
- store i203 %tmp, i203* @i203_s
- ret void
-}
-
-define void @i204_ls() nounwind {
- %tmp = load i204* @i204_l ; <i204> [#uses=1]
- store i204 %tmp, i204* @i204_s
- ret void
-}
-
-define void @i205_ls() nounwind {
- %tmp = load i205* @i205_l ; <i205> [#uses=1]
- store i205 %tmp, i205* @i205_s
- ret void
-}
-
-define void @i206_ls() nounwind {
- %tmp = load i206* @i206_l ; <i206> [#uses=1]
- store i206 %tmp, i206* @i206_s
- ret void
-}
-
-define void @i207_ls() nounwind {
- %tmp = load i207* @i207_l ; <i207> [#uses=1]
- store i207 %tmp, i207* @i207_s
- ret void
-}
-
-define void @i208_ls() nounwind {
- %tmp = load i208* @i208_l ; <i208> [#uses=1]
- store i208 %tmp, i208* @i208_s
- ret void
-}
-
-define void @i209_ls() nounwind {
- %tmp = load i209* @i209_l ; <i209> [#uses=1]
- store i209 %tmp, i209* @i209_s
- ret void
-}
-
-define void @i210_ls() nounwind {
- %tmp = load i210* @i210_l ; <i210> [#uses=1]
- store i210 %tmp, i210* @i210_s
- ret void
-}
-
-define void @i211_ls() nounwind {
- %tmp = load i211* @i211_l ; <i211> [#uses=1]
- store i211 %tmp, i211* @i211_s
- ret void
-}
-
-define void @i212_ls() nounwind {
- %tmp = load i212* @i212_l ; <i212> [#uses=1]
- store i212 %tmp, i212* @i212_s
- ret void
-}
-
-define void @i213_ls() nounwind {
- %tmp = load i213* @i213_l ; <i213> [#uses=1]
- store i213 %tmp, i213* @i213_s
- ret void
-}
-
-define void @i214_ls() nounwind {
- %tmp = load i214* @i214_l ; <i214> [#uses=1]
- store i214 %tmp, i214* @i214_s
- ret void
-}
-
-define void @i215_ls() nounwind {
- %tmp = load i215* @i215_l ; <i215> [#uses=1]
- store i215 %tmp, i215* @i215_s
- ret void
-}
-
-define void @i216_ls() nounwind {
- %tmp = load i216* @i216_l ; <i216> [#uses=1]
- store i216 %tmp, i216* @i216_s
- ret void
-}
-
-define void @i217_ls() nounwind {
- %tmp = load i217* @i217_l ; <i217> [#uses=1]
- store i217 %tmp, i217* @i217_s
- ret void
-}
-
-define void @i218_ls() nounwind {
- %tmp = load i218* @i218_l ; <i218> [#uses=1]
- store i218 %tmp, i218* @i218_s
- ret void
-}
-
-define void @i219_ls() nounwind {
- %tmp = load i219* @i219_l ; <i219> [#uses=1]
- store i219 %tmp, i219* @i219_s
- ret void
-}
-
-define void @i220_ls() nounwind {
- %tmp = load i220* @i220_l ; <i220> [#uses=1]
- store i220 %tmp, i220* @i220_s
- ret void
-}
-
-define void @i221_ls() nounwind {
- %tmp = load i221* @i221_l ; <i221> [#uses=1]
- store i221 %tmp, i221* @i221_s
- ret void
-}
-
-define void @i222_ls() nounwind {
- %tmp = load i222* @i222_l ; <i222> [#uses=1]
- store i222 %tmp, i222* @i222_s
- ret void
-}
-
-define void @i223_ls() nounwind {
- %tmp = load i223* @i223_l ; <i223> [#uses=1]
- store i223 %tmp, i223* @i223_s
- ret void
-}
-
-define void @i224_ls() nounwind {
- %tmp = load i224* @i224_l ; <i224> [#uses=1]
- store i224 %tmp, i224* @i224_s
- ret void
-}
-
-define void @i225_ls() nounwind {
- %tmp = load i225* @i225_l ; <i225> [#uses=1]
- store i225 %tmp, i225* @i225_s
- ret void
-}
-
-define void @i226_ls() nounwind {
- %tmp = load i226* @i226_l ; <i226> [#uses=1]
- store i226 %tmp, i226* @i226_s
- ret void
-}
-
-define void @i227_ls() nounwind {
- %tmp = load i227* @i227_l ; <i227> [#uses=1]
- store i227 %tmp, i227* @i227_s
- ret void
-}
-
-define void @i228_ls() nounwind {
- %tmp = load i228* @i228_l ; <i228> [#uses=1]
- store i228 %tmp, i228* @i228_s
- ret void
-}
-
-define void @i229_ls() nounwind {
- %tmp = load i229* @i229_l ; <i229> [#uses=1]
- store i229 %tmp, i229* @i229_s
- ret void
-}
-
-define void @i230_ls() nounwind {
- %tmp = load i230* @i230_l ; <i230> [#uses=1]
- store i230 %tmp, i230* @i230_s
- ret void
-}
-
-define void @i231_ls() nounwind {
- %tmp = load i231* @i231_l ; <i231> [#uses=1]
- store i231 %tmp, i231* @i231_s
- ret void
-}
-
-define void @i232_ls() nounwind {
- %tmp = load i232* @i232_l ; <i232> [#uses=1]
- store i232 %tmp, i232* @i232_s
- ret void
-}
-
-define void @i233_ls() nounwind {
- %tmp = load i233* @i233_l ; <i233> [#uses=1]
- store i233 %tmp, i233* @i233_s
- ret void
-}
-
-define void @i234_ls() nounwind {
- %tmp = load i234* @i234_l ; <i234> [#uses=1]
- store i234 %tmp, i234* @i234_s
- ret void
-}
-
-define void @i235_ls() nounwind {
- %tmp = load i235* @i235_l ; <i235> [#uses=1]
- store i235 %tmp, i235* @i235_s
- ret void
-}
-
-define void @i236_ls() nounwind {
- %tmp = load i236* @i236_l ; <i236> [#uses=1]
- store i236 %tmp, i236* @i236_s
- ret void
-}
-
-define void @i237_ls() nounwind {
- %tmp = load i237* @i237_l ; <i237> [#uses=1]
- store i237 %tmp, i237* @i237_s
- ret void
-}
-
-define void @i238_ls() nounwind {
- %tmp = load i238* @i238_l ; <i238> [#uses=1]
- store i238 %tmp, i238* @i238_s
- ret void
-}
-
-define void @i239_ls() nounwind {
- %tmp = load i239* @i239_l ; <i239> [#uses=1]
- store i239 %tmp, i239* @i239_s
- ret void
-}
-
-define void @i240_ls() nounwind {
- %tmp = load i240* @i240_l ; <i240> [#uses=1]
- store i240 %tmp, i240* @i240_s
- ret void
-}
-
-define void @i241_ls() nounwind {
- %tmp = load i241* @i241_l ; <i241> [#uses=1]
- store i241 %tmp, i241* @i241_s
- ret void
-}
-
-define void @i242_ls() nounwind {
- %tmp = load i242* @i242_l ; <i242> [#uses=1]
- store i242 %tmp, i242* @i242_s
- ret void
-}
-
-define void @i243_ls() nounwind {
- %tmp = load i243* @i243_l ; <i243> [#uses=1]
- store i243 %tmp, i243* @i243_s
- ret void
-}
-
-define void @i244_ls() nounwind {
- %tmp = load i244* @i244_l ; <i244> [#uses=1]
- store i244 %tmp, i244* @i244_s
- ret void
-}
-
-define void @i245_ls() nounwind {
- %tmp = load i245* @i245_l ; <i245> [#uses=1]
- store i245 %tmp, i245* @i245_s
- ret void
-}
-
-define void @i246_ls() nounwind {
- %tmp = load i246* @i246_l ; <i246> [#uses=1]
- store i246 %tmp, i246* @i246_s
- ret void
-}
-
-define void @i247_ls() nounwind {
- %tmp = load i247* @i247_l ; <i247> [#uses=1]
- store i247 %tmp, i247* @i247_s
- ret void
-}
-
-define void @i248_ls() nounwind {
- %tmp = load i248* @i248_l ; <i248> [#uses=1]
- store i248 %tmp, i248* @i248_s
- ret void
-}
-
-define void @i249_ls() nounwind {
- %tmp = load i249* @i249_l ; <i249> [#uses=1]
- store i249 %tmp, i249* @i249_s
- ret void
-}
-
-define void @i250_ls() nounwind {
- %tmp = load i250* @i250_l ; <i250> [#uses=1]
- store i250 %tmp, i250* @i250_s
- ret void
-}
-
-define void @i251_ls() nounwind {
- %tmp = load i251* @i251_l ; <i251> [#uses=1]
- store i251 %tmp, i251* @i251_s
- ret void
-}
-
-define void @i252_ls() nounwind {
- %tmp = load i252* @i252_l ; <i252> [#uses=1]
- store i252 %tmp, i252* @i252_s
- ret void
-}
-
-define void @i253_ls() nounwind {
- %tmp = load i253* @i253_l ; <i253> [#uses=1]
- store i253 %tmp, i253* @i253_s
- ret void
-}
-
-define void @i254_ls() nounwind {
- %tmp = load i254* @i254_l ; <i254> [#uses=1]
- store i254 %tmp, i254* @i254_s
- ret void
-}
-
-define void @i255_ls() nounwind {
- %tmp = load i255* @i255_l ; <i255> [#uses=1]
- store i255 %tmp, i255* @i255_s
- ret void
-}
-
-define void @i256_ls() nounwind {
- %tmp = load i256* @i256_l ; <i256> [#uses=1]
- store i256 %tmp, i256* @i256_s
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/APIntParam.ll b/libclamav/c++/llvm/test/CodeGen/Generic/APIntParam.ll
deleted file mode 100644
index 8aa0b49..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/APIntParam.ll
+++ /dev/null
@@ -1,1537 +0,0 @@
-; RUN: llc < %s > %t
- at i1_s = external global i1 ; <i1*> [#uses=1]
- at i2_s = external global i2 ; <i2*> [#uses=1]
- at i3_s = external global i3 ; <i3*> [#uses=1]
- at i4_s = external global i4 ; <i4*> [#uses=1]
- at i5_s = external global i5 ; <i5*> [#uses=1]
- at i6_s = external global i6 ; <i6*> [#uses=1]
- at i7_s = external global i7 ; <i7*> [#uses=1]
- at i8_s = external global i8 ; <i8*> [#uses=1]
- at i9_s = external global i9 ; <i9*> [#uses=1]
- at i10_s = external global i10 ; <i10*> [#uses=1]
- at i11_s = external global i11 ; <i11*> [#uses=1]
- at i12_s = external global i12 ; <i12*> [#uses=1]
- at i13_s = external global i13 ; <i13*> [#uses=1]
- at i14_s = external global i14 ; <i14*> [#uses=1]
- at i15_s = external global i15 ; <i15*> [#uses=1]
- at i16_s = external global i16 ; <i16*> [#uses=1]
- at i17_s = external global i17 ; <i17*> [#uses=1]
- at i18_s = external global i18 ; <i18*> [#uses=1]
- at i19_s = external global i19 ; <i19*> [#uses=1]
- at i20_s = external global i20 ; <i20*> [#uses=1]
- at i21_s = external global i21 ; <i21*> [#uses=1]
- at i22_s = external global i22 ; <i22*> [#uses=1]
- at i23_s = external global i23 ; <i23*> [#uses=1]
- at i24_s = external global i24 ; <i24*> [#uses=1]
- at i25_s = external global i25 ; <i25*> [#uses=1]
- at i26_s = external global i26 ; <i26*> [#uses=1]
- at i27_s = external global i27 ; <i27*> [#uses=1]
- at i28_s = external global i28 ; <i28*> [#uses=1]
- at i29_s = external global i29 ; <i29*> [#uses=1]
- at i30_s = external global i30 ; <i30*> [#uses=1]
- at i31_s = external global i31 ; <i31*> [#uses=1]
- at i32_s = external global i32 ; <i32*> [#uses=1]
- at i33_s = external global i33 ; <i33*> [#uses=1]
- at i34_s = external global i34 ; <i34*> [#uses=1]
- at i35_s = external global i35 ; <i35*> [#uses=1]
- at i36_s = external global i36 ; <i36*> [#uses=1]
- at i37_s = external global i37 ; <i37*> [#uses=1]
- at i38_s = external global i38 ; <i38*> [#uses=1]
- at i39_s = external global i39 ; <i39*> [#uses=1]
- at i40_s = external global i40 ; <i40*> [#uses=1]
- at i41_s = external global i41 ; <i41*> [#uses=1]
- at i42_s = external global i42 ; <i42*> [#uses=1]
- at i43_s = external global i43 ; <i43*> [#uses=1]
- at i44_s = external global i44 ; <i44*> [#uses=1]
- at i45_s = external global i45 ; <i45*> [#uses=1]
- at i46_s = external global i46 ; <i46*> [#uses=1]
- at i47_s = external global i47 ; <i47*> [#uses=1]
- at i48_s = external global i48 ; <i48*> [#uses=1]
- at i49_s = external global i49 ; <i49*> [#uses=1]
- at i50_s = external global i50 ; <i50*> [#uses=1]
- at i51_s = external global i51 ; <i51*> [#uses=1]
- at i52_s = external global i52 ; <i52*> [#uses=1]
- at i53_s = external global i53 ; <i53*> [#uses=1]
- at i54_s = external global i54 ; <i54*> [#uses=1]
- at i55_s = external global i55 ; <i55*> [#uses=1]
- at i56_s = external global i56 ; <i56*> [#uses=1]
- at i57_s = external global i57 ; <i57*> [#uses=1]
- at i58_s = external global i58 ; <i58*> [#uses=1]
- at i59_s = external global i59 ; <i59*> [#uses=1]
- at i60_s = external global i60 ; <i60*> [#uses=1]
- at i61_s = external global i61 ; <i61*> [#uses=1]
- at i62_s = external global i62 ; <i62*> [#uses=1]
- at i63_s = external global i63 ; <i63*> [#uses=1]
- at i64_s = external global i64 ; <i64*> [#uses=1]
- at i65_s = external global i65 ; <i65*> [#uses=1]
- at i66_s = external global i66 ; <i66*> [#uses=1]
- at i67_s = external global i67 ; <i67*> [#uses=1]
- at i68_s = external global i68 ; <i68*> [#uses=1]
- at i69_s = external global i69 ; <i69*> [#uses=1]
- at i70_s = external global i70 ; <i70*> [#uses=1]
- at i71_s = external global i71 ; <i71*> [#uses=1]
- at i72_s = external global i72 ; <i72*> [#uses=1]
- at i73_s = external global i73 ; <i73*> [#uses=1]
- at i74_s = external global i74 ; <i74*> [#uses=1]
- at i75_s = external global i75 ; <i75*> [#uses=1]
- at i76_s = external global i76 ; <i76*> [#uses=1]
- at i77_s = external global i77 ; <i77*> [#uses=1]
- at i78_s = external global i78 ; <i78*> [#uses=1]
- at i79_s = external global i79 ; <i79*> [#uses=1]
- at i80_s = external global i80 ; <i80*> [#uses=1]
- at i81_s = external global i81 ; <i81*> [#uses=1]
- at i82_s = external global i82 ; <i82*> [#uses=1]
- at i83_s = external global i83 ; <i83*> [#uses=1]
- at i84_s = external global i84 ; <i84*> [#uses=1]
- at i85_s = external global i85 ; <i85*> [#uses=1]
- at i86_s = external global i86 ; <i86*> [#uses=1]
- at i87_s = external global i87 ; <i87*> [#uses=1]
- at i88_s = external global i88 ; <i88*> [#uses=1]
- at i89_s = external global i89 ; <i89*> [#uses=1]
- at i90_s = external global i90 ; <i90*> [#uses=1]
- at i91_s = external global i91 ; <i91*> [#uses=1]
- at i92_s = external global i92 ; <i92*> [#uses=1]
- at i93_s = external global i93 ; <i93*> [#uses=1]
- at i94_s = external global i94 ; <i94*> [#uses=1]
- at i95_s = external global i95 ; <i95*> [#uses=1]
- at i96_s = external global i96 ; <i96*> [#uses=1]
- at i97_s = external global i97 ; <i97*> [#uses=1]
- at i98_s = external global i98 ; <i98*> [#uses=1]
- at i99_s = external global i99 ; <i99*> [#uses=1]
- at i100_s = external global i100 ; <i100*> [#uses=1]
- at i101_s = external global i101 ; <i101*> [#uses=1]
- at i102_s = external global i102 ; <i102*> [#uses=1]
- at i103_s = external global i103 ; <i103*> [#uses=1]
- at i104_s = external global i104 ; <i104*> [#uses=1]
- at i105_s = external global i105 ; <i105*> [#uses=1]
- at i106_s = external global i106 ; <i106*> [#uses=1]
- at i107_s = external global i107 ; <i107*> [#uses=1]
- at i108_s = external global i108 ; <i108*> [#uses=1]
- at i109_s = external global i109 ; <i109*> [#uses=1]
- at i110_s = external global i110 ; <i110*> [#uses=1]
- at i111_s = external global i111 ; <i111*> [#uses=1]
- at i112_s = external global i112 ; <i112*> [#uses=1]
- at i113_s = external global i113 ; <i113*> [#uses=1]
- at i114_s = external global i114 ; <i114*> [#uses=1]
- at i115_s = external global i115 ; <i115*> [#uses=1]
- at i116_s = external global i116 ; <i116*> [#uses=1]
- at i117_s = external global i117 ; <i117*> [#uses=1]
- at i118_s = external global i118 ; <i118*> [#uses=1]
- at i119_s = external global i119 ; <i119*> [#uses=1]
- at i120_s = external global i120 ; <i120*> [#uses=1]
- at i121_s = external global i121 ; <i121*> [#uses=1]
- at i122_s = external global i122 ; <i122*> [#uses=1]
- at i123_s = external global i123 ; <i123*> [#uses=1]
- at i124_s = external global i124 ; <i124*> [#uses=1]
- at i125_s = external global i125 ; <i125*> [#uses=1]
- at i126_s = external global i126 ; <i126*> [#uses=1]
- at i127_s = external global i127 ; <i127*> [#uses=1]
- at i128_s = external global i128 ; <i128*> [#uses=1]
- at i129_s = external global i129 ; <i129*> [#uses=1]
- at i130_s = external global i130 ; <i130*> [#uses=1]
- at i131_s = external global i131 ; <i131*> [#uses=1]
- at i132_s = external global i132 ; <i132*> [#uses=1]
- at i133_s = external global i133 ; <i133*> [#uses=1]
- at i134_s = external global i134 ; <i134*> [#uses=1]
- at i135_s = external global i135 ; <i135*> [#uses=1]
- at i136_s = external global i136 ; <i136*> [#uses=1]
- at i137_s = external global i137 ; <i137*> [#uses=1]
- at i138_s = external global i138 ; <i138*> [#uses=1]
- at i139_s = external global i139 ; <i139*> [#uses=1]
- at i140_s = external global i140 ; <i140*> [#uses=1]
- at i141_s = external global i141 ; <i141*> [#uses=1]
- at i142_s = external global i142 ; <i142*> [#uses=1]
- at i143_s = external global i143 ; <i143*> [#uses=1]
- at i144_s = external global i144 ; <i144*> [#uses=1]
- at i145_s = external global i145 ; <i145*> [#uses=1]
- at i146_s = external global i146 ; <i146*> [#uses=1]
- at i147_s = external global i147 ; <i147*> [#uses=1]
- at i148_s = external global i148 ; <i148*> [#uses=1]
- at i149_s = external global i149 ; <i149*> [#uses=1]
- at i150_s = external global i150 ; <i150*> [#uses=1]
- at i151_s = external global i151 ; <i151*> [#uses=1]
- at i152_s = external global i152 ; <i152*> [#uses=1]
- at i153_s = external global i153 ; <i153*> [#uses=1]
- at i154_s = external global i154 ; <i154*> [#uses=1]
- at i155_s = external global i155 ; <i155*> [#uses=1]
- at i156_s = external global i156 ; <i156*> [#uses=1]
- at i157_s = external global i157 ; <i157*> [#uses=1]
- at i158_s = external global i158 ; <i158*> [#uses=1]
- at i159_s = external global i159 ; <i159*> [#uses=1]
- at i160_s = external global i160 ; <i160*> [#uses=1]
- at i161_s = external global i161 ; <i161*> [#uses=1]
- at i162_s = external global i162 ; <i162*> [#uses=1]
- at i163_s = external global i163 ; <i163*> [#uses=1]
- at i164_s = external global i164 ; <i164*> [#uses=1]
- at i165_s = external global i165 ; <i165*> [#uses=1]
- at i166_s = external global i166 ; <i166*> [#uses=1]
- at i167_s = external global i167 ; <i167*> [#uses=1]
- at i168_s = external global i168 ; <i168*> [#uses=1]
- at i169_s = external global i169 ; <i169*> [#uses=1]
- at i170_s = external global i170 ; <i170*> [#uses=1]
- at i171_s = external global i171 ; <i171*> [#uses=1]
- at i172_s = external global i172 ; <i172*> [#uses=1]
- at i173_s = external global i173 ; <i173*> [#uses=1]
- at i174_s = external global i174 ; <i174*> [#uses=1]
- at i175_s = external global i175 ; <i175*> [#uses=1]
- at i176_s = external global i176 ; <i176*> [#uses=1]
- at i177_s = external global i177 ; <i177*> [#uses=1]
- at i178_s = external global i178 ; <i178*> [#uses=1]
- at i179_s = external global i179 ; <i179*> [#uses=1]
- at i180_s = external global i180 ; <i180*> [#uses=1]
- at i181_s = external global i181 ; <i181*> [#uses=1]
- at i182_s = external global i182 ; <i182*> [#uses=1]
- at i183_s = external global i183 ; <i183*> [#uses=1]
- at i184_s = external global i184 ; <i184*> [#uses=1]
- at i185_s = external global i185 ; <i185*> [#uses=1]
- at i186_s = external global i186 ; <i186*> [#uses=1]
- at i187_s = external global i187 ; <i187*> [#uses=1]
- at i188_s = external global i188 ; <i188*> [#uses=1]
- at i189_s = external global i189 ; <i189*> [#uses=1]
- at i190_s = external global i190 ; <i190*> [#uses=1]
- at i191_s = external global i191 ; <i191*> [#uses=1]
- at i192_s = external global i192 ; <i192*> [#uses=1]
- at i193_s = external global i193 ; <i193*> [#uses=1]
- at i194_s = external global i194 ; <i194*> [#uses=1]
- at i195_s = external global i195 ; <i195*> [#uses=1]
- at i196_s = external global i196 ; <i196*> [#uses=1]
- at i197_s = external global i197 ; <i197*> [#uses=1]
- at i198_s = external global i198 ; <i198*> [#uses=1]
- at i199_s = external global i199 ; <i199*> [#uses=1]
- at i200_s = external global i200 ; <i200*> [#uses=1]
- at i201_s = external global i201 ; <i201*> [#uses=1]
- at i202_s = external global i202 ; <i202*> [#uses=1]
- at i203_s = external global i203 ; <i203*> [#uses=1]
- at i204_s = external global i204 ; <i204*> [#uses=1]
- at i205_s = external global i205 ; <i205*> [#uses=1]
- at i206_s = external global i206 ; <i206*> [#uses=1]
- at i207_s = external global i207 ; <i207*> [#uses=1]
- at i208_s = external global i208 ; <i208*> [#uses=1]
- at i209_s = external global i209 ; <i209*> [#uses=1]
- at i210_s = external global i210 ; <i210*> [#uses=1]
- at i211_s = external global i211 ; <i211*> [#uses=1]
- at i212_s = external global i212 ; <i212*> [#uses=1]
- at i213_s = external global i213 ; <i213*> [#uses=1]
- at i214_s = external global i214 ; <i214*> [#uses=1]
- at i215_s = external global i215 ; <i215*> [#uses=1]
- at i216_s = external global i216 ; <i216*> [#uses=1]
- at i217_s = external global i217 ; <i217*> [#uses=1]
- at i218_s = external global i218 ; <i218*> [#uses=1]
- at i219_s = external global i219 ; <i219*> [#uses=1]
- at i220_s = external global i220 ; <i220*> [#uses=1]
- at i221_s = external global i221 ; <i221*> [#uses=1]
- at i222_s = external global i222 ; <i222*> [#uses=1]
- at i223_s = external global i223 ; <i223*> [#uses=1]
- at i224_s = external global i224 ; <i224*> [#uses=1]
- at i225_s = external global i225 ; <i225*> [#uses=1]
- at i226_s = external global i226 ; <i226*> [#uses=1]
- at i227_s = external global i227 ; <i227*> [#uses=1]
- at i228_s = external global i228 ; <i228*> [#uses=1]
- at i229_s = external global i229 ; <i229*> [#uses=1]
- at i230_s = external global i230 ; <i230*> [#uses=1]
- at i231_s = external global i231 ; <i231*> [#uses=1]
- at i232_s = external global i232 ; <i232*> [#uses=1]
- at i233_s = external global i233 ; <i233*> [#uses=1]
- at i234_s = external global i234 ; <i234*> [#uses=1]
- at i235_s = external global i235 ; <i235*> [#uses=1]
- at i236_s = external global i236 ; <i236*> [#uses=1]
- at i237_s = external global i237 ; <i237*> [#uses=1]
- at i238_s = external global i238 ; <i238*> [#uses=1]
- at i239_s = external global i239 ; <i239*> [#uses=1]
- at i240_s = external global i240 ; <i240*> [#uses=1]
- at i241_s = external global i241 ; <i241*> [#uses=1]
- at i242_s = external global i242 ; <i242*> [#uses=1]
- at i243_s = external global i243 ; <i243*> [#uses=1]
- at i244_s = external global i244 ; <i244*> [#uses=1]
- at i245_s = external global i245 ; <i245*> [#uses=1]
- at i246_s = external global i246 ; <i246*> [#uses=1]
- at i247_s = external global i247 ; <i247*> [#uses=1]
- at i248_s = external global i248 ; <i248*> [#uses=1]
- at i249_s = external global i249 ; <i249*> [#uses=1]
- at i250_s = external global i250 ; <i250*> [#uses=1]
- at i251_s = external global i251 ; <i251*> [#uses=1]
- at i252_s = external global i252 ; <i252*> [#uses=1]
- at i253_s = external global i253 ; <i253*> [#uses=1]
- at i254_s = external global i254 ; <i254*> [#uses=1]
- at i255_s = external global i255 ; <i255*> [#uses=1]
- at i256_s = external global i256 ; <i256*> [#uses=1]
-
-define void @i1_ls(i1 %x) nounwind {
- store i1 %x, i1* @i1_s
- ret void
-}
-
-define void @i2_ls(i2 %x) nounwind {
- store i2 %x, i2* @i2_s
- ret void
-}
-
-define void @i3_ls(i3 %x) nounwind {
- store i3 %x, i3* @i3_s
- ret void
-}
-
-define void @i4_ls(i4 %x) nounwind {
- store i4 %x, i4* @i4_s
- ret void
-}
-
-define void @i5_ls(i5 %x) nounwind {
- store i5 %x, i5* @i5_s
- ret void
-}
-
-define void @i6_ls(i6 %x) nounwind {
- store i6 %x, i6* @i6_s
- ret void
-}
-
-define void @i7_ls(i7 %x) nounwind {
- store i7 %x, i7* @i7_s
- ret void
-}
-
-define void @i8_ls(i8 %x) nounwind {
- store i8 %x, i8* @i8_s
- ret void
-}
-
-define void @i9_ls(i9 %x) nounwind {
- store i9 %x, i9* @i9_s
- ret void
-}
-
-define void @i10_ls(i10 %x) nounwind {
- store i10 %x, i10* @i10_s
- ret void
-}
-
-define void @i11_ls(i11 %x) nounwind {
- store i11 %x, i11* @i11_s
- ret void
-}
-
-define void @i12_ls(i12 %x) nounwind {
- store i12 %x, i12* @i12_s
- ret void
-}
-
-define void @i13_ls(i13 %x) nounwind {
- store i13 %x, i13* @i13_s
- ret void
-}
-
-define void @i14_ls(i14 %x) nounwind {
- store i14 %x, i14* @i14_s
- ret void
-}
-
-define void @i15_ls(i15 %x) nounwind {
- store i15 %x, i15* @i15_s
- ret void
-}
-
-define void @i16_ls(i16 %x) nounwind {
- store i16 %x, i16* @i16_s
- ret void
-}
-
-define void @i17_ls(i17 %x) nounwind {
- store i17 %x, i17* @i17_s
- ret void
-}
-
-define void @i18_ls(i18 %x) nounwind {
- store i18 %x, i18* @i18_s
- ret void
-}
-
-define void @i19_ls(i19 %x) nounwind {
- store i19 %x, i19* @i19_s
- ret void
-}
-
-define void @i20_ls(i20 %x) nounwind {
- store i20 %x, i20* @i20_s
- ret void
-}
-
-define void @i21_ls(i21 %x) nounwind {
- store i21 %x, i21* @i21_s
- ret void
-}
-
-define void @i22_ls(i22 %x) nounwind {
- store i22 %x, i22* @i22_s
- ret void
-}
-
-define void @i23_ls(i23 %x) nounwind {
- store i23 %x, i23* @i23_s
- ret void
-}
-
-define void @i24_ls(i24 %x) nounwind {
- store i24 %x, i24* @i24_s
- ret void
-}
-
-define void @i25_ls(i25 %x) nounwind {
- store i25 %x, i25* @i25_s
- ret void
-}
-
-define void @i26_ls(i26 %x) nounwind {
- store i26 %x, i26* @i26_s
- ret void
-}
-
-define void @i27_ls(i27 %x) nounwind {
- store i27 %x, i27* @i27_s
- ret void
-}
-
-define void @i28_ls(i28 %x) nounwind {
- store i28 %x, i28* @i28_s
- ret void
-}
-
-define void @i29_ls(i29 %x) nounwind {
- store i29 %x, i29* @i29_s
- ret void
-}
-
-define void @i30_ls(i30 %x) nounwind {
- store i30 %x, i30* @i30_s
- ret void
-}
-
-define void @i31_ls(i31 %x) nounwind {
- store i31 %x, i31* @i31_s
- ret void
-}
-
-define void @i32_ls(i32 %x) nounwind {
- store i32 %x, i32* @i32_s
- ret void
-}
-
-define void @i33_ls(i33 %x) nounwind {
- store i33 %x, i33* @i33_s
- ret void
-}
-
-define void @i34_ls(i34 %x) nounwind {
- store i34 %x, i34* @i34_s
- ret void
-}
-
-define void @i35_ls(i35 %x) nounwind {
- store i35 %x, i35* @i35_s
- ret void
-}
-
-define void @i36_ls(i36 %x) nounwind {
- store i36 %x, i36* @i36_s
- ret void
-}
-
-define void @i37_ls(i37 %x) nounwind {
- store i37 %x, i37* @i37_s
- ret void
-}
-
-define void @i38_ls(i38 %x) nounwind {
- store i38 %x, i38* @i38_s
- ret void
-}
-
-define void @i39_ls(i39 %x) nounwind {
- store i39 %x, i39* @i39_s
- ret void
-}
-
-define void @i40_ls(i40 %x) nounwind {
- store i40 %x, i40* @i40_s
- ret void
-}
-
-define void @i41_ls(i41 %x) nounwind {
- store i41 %x, i41* @i41_s
- ret void
-}
-
-define void @i42_ls(i42 %x) nounwind {
- store i42 %x, i42* @i42_s
- ret void
-}
-
-define void @i43_ls(i43 %x) nounwind {
- store i43 %x, i43* @i43_s
- ret void
-}
-
-define void @i44_ls(i44 %x) nounwind {
- store i44 %x, i44* @i44_s
- ret void
-}
-
-define void @i45_ls(i45 %x) nounwind {
- store i45 %x, i45* @i45_s
- ret void
-}
-
-define void @i46_ls(i46 %x) nounwind {
- store i46 %x, i46* @i46_s
- ret void
-}
-
-define void @i47_ls(i47 %x) nounwind {
- store i47 %x, i47* @i47_s
- ret void
-}
-
-define void @i48_ls(i48 %x) nounwind {
- store i48 %x, i48* @i48_s
- ret void
-}
-
-define void @i49_ls(i49 %x) nounwind {
- store i49 %x, i49* @i49_s
- ret void
-}
-
-define void @i50_ls(i50 %x) nounwind {
- store i50 %x, i50* @i50_s
- ret void
-}
-
-define void @i51_ls(i51 %x) nounwind {
- store i51 %x, i51* @i51_s
- ret void
-}
-
-define void @i52_ls(i52 %x) nounwind {
- store i52 %x, i52* @i52_s
- ret void
-}
-
-define void @i53_ls(i53 %x) nounwind {
- store i53 %x, i53* @i53_s
- ret void
-}
-
-define void @i54_ls(i54 %x) nounwind {
- store i54 %x, i54* @i54_s
- ret void
-}
-
-define void @i55_ls(i55 %x) nounwind {
- store i55 %x, i55* @i55_s
- ret void
-}
-
-define void @i56_ls(i56 %x) nounwind {
- store i56 %x, i56* @i56_s
- ret void
-}
-
-define void @i57_ls(i57 %x) nounwind {
- store i57 %x, i57* @i57_s
- ret void
-}
-
-define void @i58_ls(i58 %x) nounwind {
- store i58 %x, i58* @i58_s
- ret void
-}
-
-define void @i59_ls(i59 %x) nounwind {
- store i59 %x, i59* @i59_s
- ret void
-}
-
-define void @i60_ls(i60 %x) nounwind {
- store i60 %x, i60* @i60_s
- ret void
-}
-
-define void @i61_ls(i61 %x) nounwind {
- store i61 %x, i61* @i61_s
- ret void
-}
-
-define void @i62_ls(i62 %x) nounwind {
- store i62 %x, i62* @i62_s
- ret void
-}
-
-define void @i63_ls(i63 %x) nounwind {
- store i63 %x, i63* @i63_s
- ret void
-}
-
-define void @i64_ls(i64 %x) nounwind {
- store i64 %x, i64* @i64_s
- ret void
-}
-
-define void @i65_ls(i65 %x) nounwind {
- store i65 %x, i65* @i65_s
- ret void
-}
-
-define void @i66_ls(i66 %x) nounwind {
- store i66 %x, i66* @i66_s
- ret void
-}
-
-define void @i67_ls(i67 %x) nounwind {
- store i67 %x, i67* @i67_s
- ret void
-}
-
-define void @i68_ls(i68 %x) nounwind {
- store i68 %x, i68* @i68_s
- ret void
-}
-
-define void @i69_ls(i69 %x) nounwind {
- store i69 %x, i69* @i69_s
- ret void
-}
-
-define void @i70_ls(i70 %x) nounwind {
- store i70 %x, i70* @i70_s
- ret void
-}
-
-define void @i71_ls(i71 %x) nounwind {
- store i71 %x, i71* @i71_s
- ret void
-}
-
-define void @i72_ls(i72 %x) nounwind {
- store i72 %x, i72* @i72_s
- ret void
-}
-
-define void @i73_ls(i73 %x) nounwind {
- store i73 %x, i73* @i73_s
- ret void
-}
-
-define void @i74_ls(i74 %x) nounwind {
- store i74 %x, i74* @i74_s
- ret void
-}
-
-define void @i75_ls(i75 %x) nounwind {
- store i75 %x, i75* @i75_s
- ret void
-}
-
-define void @i76_ls(i76 %x) nounwind {
- store i76 %x, i76* @i76_s
- ret void
-}
-
-define void @i77_ls(i77 %x) nounwind {
- store i77 %x, i77* @i77_s
- ret void
-}
-
-define void @i78_ls(i78 %x) nounwind {
- store i78 %x, i78* @i78_s
- ret void
-}
-
-define void @i79_ls(i79 %x) nounwind {
- store i79 %x, i79* @i79_s
- ret void
-}
-
-define void @i80_ls(i80 %x) nounwind {
- store i80 %x, i80* @i80_s
- ret void
-}
-
-define void @i81_ls(i81 %x) nounwind {
- store i81 %x, i81* @i81_s
- ret void
-}
-
-define void @i82_ls(i82 %x) nounwind {
- store i82 %x, i82* @i82_s
- ret void
-}
-
-define void @i83_ls(i83 %x) nounwind {
- store i83 %x, i83* @i83_s
- ret void
-}
-
-define void @i84_ls(i84 %x) nounwind {
- store i84 %x, i84* @i84_s
- ret void
-}
-
-define void @i85_ls(i85 %x) nounwind {
- store i85 %x, i85* @i85_s
- ret void
-}
-
-define void @i86_ls(i86 %x) nounwind {
- store i86 %x, i86* @i86_s
- ret void
-}
-
-define void @i87_ls(i87 %x) nounwind {
- store i87 %x, i87* @i87_s
- ret void
-}
-
-define void @i88_ls(i88 %x) nounwind {
- store i88 %x, i88* @i88_s
- ret void
-}
-
-define void @i89_ls(i89 %x) nounwind {
- store i89 %x, i89* @i89_s
- ret void
-}
-
-define void @i90_ls(i90 %x) nounwind {
- store i90 %x, i90* @i90_s
- ret void
-}
-
-define void @i91_ls(i91 %x) nounwind {
- store i91 %x, i91* @i91_s
- ret void
-}
-
-define void @i92_ls(i92 %x) nounwind {
- store i92 %x, i92* @i92_s
- ret void
-}
-
-define void @i93_ls(i93 %x) nounwind {
- store i93 %x, i93* @i93_s
- ret void
-}
-
-define void @i94_ls(i94 %x) nounwind {
- store i94 %x, i94* @i94_s
- ret void
-}
-
-define void @i95_ls(i95 %x) nounwind {
- store i95 %x, i95* @i95_s
- ret void
-}
-
-define void @i96_ls(i96 %x) nounwind {
- store i96 %x, i96* @i96_s
- ret void
-}
-
-define void @i97_ls(i97 %x) nounwind {
- store i97 %x, i97* @i97_s
- ret void
-}
-
-define void @i98_ls(i98 %x) nounwind {
- store i98 %x, i98* @i98_s
- ret void
-}
-
-define void @i99_ls(i99 %x) nounwind {
- store i99 %x, i99* @i99_s
- ret void
-}
-
-define void @i100_ls(i100 %x) nounwind {
- store i100 %x, i100* @i100_s
- ret void
-}
-
-define void @i101_ls(i101 %x) nounwind {
- store i101 %x, i101* @i101_s
- ret void
-}
-
-define void @i102_ls(i102 %x) nounwind {
- store i102 %x, i102* @i102_s
- ret void
-}
-
-define void @i103_ls(i103 %x) nounwind {
- store i103 %x, i103* @i103_s
- ret void
-}
-
-define void @i104_ls(i104 %x) nounwind {
- store i104 %x, i104* @i104_s
- ret void
-}
-
-define void @i105_ls(i105 %x) nounwind {
- store i105 %x, i105* @i105_s
- ret void
-}
-
-define void @i106_ls(i106 %x) nounwind {
- store i106 %x, i106* @i106_s
- ret void
-}
-
-define void @i107_ls(i107 %x) nounwind {
- store i107 %x, i107* @i107_s
- ret void
-}
-
-define void @i108_ls(i108 %x) nounwind {
- store i108 %x, i108* @i108_s
- ret void
-}
-
-define void @i109_ls(i109 %x) nounwind {
- store i109 %x, i109* @i109_s
- ret void
-}
-
-define void @i110_ls(i110 %x) nounwind {
- store i110 %x, i110* @i110_s
- ret void
-}
-
-define void @i111_ls(i111 %x) nounwind {
- store i111 %x, i111* @i111_s
- ret void
-}
-
-define void @i112_ls(i112 %x) nounwind {
- store i112 %x, i112* @i112_s
- ret void
-}
-
-define void @i113_ls(i113 %x) nounwind {
- store i113 %x, i113* @i113_s
- ret void
-}
-
-define void @i114_ls(i114 %x) nounwind {
- store i114 %x, i114* @i114_s
- ret void
-}
-
-define void @i115_ls(i115 %x) nounwind {
- store i115 %x, i115* @i115_s
- ret void
-}
-
-define void @i116_ls(i116 %x) nounwind {
- store i116 %x, i116* @i116_s
- ret void
-}
-
-define void @i117_ls(i117 %x) nounwind {
- store i117 %x, i117* @i117_s
- ret void
-}
-
-define void @i118_ls(i118 %x) nounwind {
- store i118 %x, i118* @i118_s
- ret void
-}
-
-define void @i119_ls(i119 %x) nounwind {
- store i119 %x, i119* @i119_s
- ret void
-}
-
-define void @i120_ls(i120 %x) nounwind {
- store i120 %x, i120* @i120_s
- ret void
-}
-
-define void @i121_ls(i121 %x) nounwind {
- store i121 %x, i121* @i121_s
- ret void
-}
-
-define void @i122_ls(i122 %x) nounwind {
- store i122 %x, i122* @i122_s
- ret void
-}
-
-define void @i123_ls(i123 %x) nounwind {
- store i123 %x, i123* @i123_s
- ret void
-}
-
-define void @i124_ls(i124 %x) nounwind {
- store i124 %x, i124* @i124_s
- ret void
-}
-
-define void @i125_ls(i125 %x) nounwind {
- store i125 %x, i125* @i125_s
- ret void
-}
-
-define void @i126_ls(i126 %x) nounwind {
- store i126 %x, i126* @i126_s
- ret void
-}
-
-define void @i127_ls(i127 %x) nounwind {
- store i127 %x, i127* @i127_s
- ret void
-}
-
-define void @i128_ls(i128 %x) nounwind {
- store i128 %x, i128* @i128_s
- ret void
-}
-
-define void @i129_ls(i129 %x) nounwind {
- store i129 %x, i129* @i129_s
- ret void
-}
-
-define void @i130_ls(i130 %x) nounwind {
- store i130 %x, i130* @i130_s
- ret void
-}
-
-define void @i131_ls(i131 %x) nounwind {
- store i131 %x, i131* @i131_s
- ret void
-}
-
-define void @i132_ls(i132 %x) nounwind {
- store i132 %x, i132* @i132_s
- ret void
-}
-
-define void @i133_ls(i133 %x) nounwind {
- store i133 %x, i133* @i133_s
- ret void
-}
-
-define void @i134_ls(i134 %x) nounwind {
- store i134 %x, i134* @i134_s
- ret void
-}
-
-define void @i135_ls(i135 %x) nounwind {
- store i135 %x, i135* @i135_s
- ret void
-}
-
-define void @i136_ls(i136 %x) nounwind {
- store i136 %x, i136* @i136_s
- ret void
-}
-
-define void @i137_ls(i137 %x) nounwind {
- store i137 %x, i137* @i137_s
- ret void
-}
-
-define void @i138_ls(i138 %x) nounwind {
- store i138 %x, i138* @i138_s
- ret void
-}
-
-define void @i139_ls(i139 %x) nounwind {
- store i139 %x, i139* @i139_s
- ret void
-}
-
-define void @i140_ls(i140 %x) nounwind {
- store i140 %x, i140* @i140_s
- ret void
-}
-
-define void @i141_ls(i141 %x) nounwind {
- store i141 %x, i141* @i141_s
- ret void
-}
-
-define void @i142_ls(i142 %x) nounwind {
- store i142 %x, i142* @i142_s
- ret void
-}
-
-define void @i143_ls(i143 %x) nounwind {
- store i143 %x, i143* @i143_s
- ret void
-}
-
-define void @i144_ls(i144 %x) nounwind {
- store i144 %x, i144* @i144_s
- ret void
-}
-
-define void @i145_ls(i145 %x) nounwind {
- store i145 %x, i145* @i145_s
- ret void
-}
-
-define void @i146_ls(i146 %x) nounwind {
- store i146 %x, i146* @i146_s
- ret void
-}
-
-define void @i147_ls(i147 %x) nounwind {
- store i147 %x, i147* @i147_s
- ret void
-}
-
-define void @i148_ls(i148 %x) nounwind {
- store i148 %x, i148* @i148_s
- ret void
-}
-
-define void @i149_ls(i149 %x) nounwind {
- store i149 %x, i149* @i149_s
- ret void
-}
-
-define void @i150_ls(i150 %x) nounwind {
- store i150 %x, i150* @i150_s
- ret void
-}
-
-define void @i151_ls(i151 %x) nounwind {
- store i151 %x, i151* @i151_s
- ret void
-}
-
-define void @i152_ls(i152 %x) nounwind {
- store i152 %x, i152* @i152_s
- ret void
-}
-
-define void @i153_ls(i153 %x) nounwind {
- store i153 %x, i153* @i153_s
- ret void
-}
-
-define void @i154_ls(i154 %x) nounwind {
- store i154 %x, i154* @i154_s
- ret void
-}
-
-define void @i155_ls(i155 %x) nounwind {
- store i155 %x, i155* @i155_s
- ret void
-}
-
-define void @i156_ls(i156 %x) nounwind {
- store i156 %x, i156* @i156_s
- ret void
-}
-
-define void @i157_ls(i157 %x) nounwind {
- store i157 %x, i157* @i157_s
- ret void
-}
-
-define void @i158_ls(i158 %x) nounwind {
- store i158 %x, i158* @i158_s
- ret void
-}
-
-define void @i159_ls(i159 %x) nounwind {
- store i159 %x, i159* @i159_s
- ret void
-}
-
-define void @i160_ls(i160 %x) nounwind {
- store i160 %x, i160* @i160_s
- ret void
-}
-
-define void @i161_ls(i161 %x) nounwind {
- store i161 %x, i161* @i161_s
- ret void
-}
-
-define void @i162_ls(i162 %x) nounwind {
- store i162 %x, i162* @i162_s
- ret void
-}
-
-define void @i163_ls(i163 %x) nounwind {
- store i163 %x, i163* @i163_s
- ret void
-}
-
-define void @i164_ls(i164 %x) nounwind {
- store i164 %x, i164* @i164_s
- ret void
-}
-
-define void @i165_ls(i165 %x) nounwind {
- store i165 %x, i165* @i165_s
- ret void
-}
-
-define void @i166_ls(i166 %x) nounwind {
- store i166 %x, i166* @i166_s
- ret void
-}
-
-define void @i167_ls(i167 %x) nounwind {
- store i167 %x, i167* @i167_s
- ret void
-}
-
-define void @i168_ls(i168 %x) nounwind {
- store i168 %x, i168* @i168_s
- ret void
-}
-
-define void @i169_ls(i169 %x) nounwind {
- store i169 %x, i169* @i169_s
- ret void
-}
-
-define void @i170_ls(i170 %x) nounwind {
- store i170 %x, i170* @i170_s
- ret void
-}
-
-define void @i171_ls(i171 %x) nounwind {
- store i171 %x, i171* @i171_s
- ret void
-}
-
-define void @i172_ls(i172 %x) nounwind {
- store i172 %x, i172* @i172_s
- ret void
-}
-
-define void @i173_ls(i173 %x) nounwind {
- store i173 %x, i173* @i173_s
- ret void
-}
-
-define void @i174_ls(i174 %x) nounwind {
- store i174 %x, i174* @i174_s
- ret void
-}
-
-define void @i175_ls(i175 %x) nounwind {
- store i175 %x, i175* @i175_s
- ret void
-}
-
-define void @i176_ls(i176 %x) nounwind {
- store i176 %x, i176* @i176_s
- ret void
-}
-
-define void @i177_ls(i177 %x) nounwind {
- store i177 %x, i177* @i177_s
- ret void
-}
-
-define void @i178_ls(i178 %x) nounwind {
- store i178 %x, i178* @i178_s
- ret void
-}
-
-define void @i179_ls(i179 %x) nounwind {
- store i179 %x, i179* @i179_s
- ret void
-}
-
-define void @i180_ls(i180 %x) nounwind {
- store i180 %x, i180* @i180_s
- ret void
-}
-
-define void @i181_ls(i181 %x) nounwind {
- store i181 %x, i181* @i181_s
- ret void
-}
-
-define void @i182_ls(i182 %x) nounwind {
- store i182 %x, i182* @i182_s
- ret void
-}
-
-define void @i183_ls(i183 %x) nounwind {
- store i183 %x, i183* @i183_s
- ret void
-}
-
-define void @i184_ls(i184 %x) nounwind {
- store i184 %x, i184* @i184_s
- ret void
-}
-
-define void @i185_ls(i185 %x) nounwind {
- store i185 %x, i185* @i185_s
- ret void
-}
-
-define void @i186_ls(i186 %x) nounwind {
- store i186 %x, i186* @i186_s
- ret void
-}
-
-define void @i187_ls(i187 %x) nounwind {
- store i187 %x, i187* @i187_s
- ret void
-}
-
-define void @i188_ls(i188 %x) nounwind {
- store i188 %x, i188* @i188_s
- ret void
-}
-
-define void @i189_ls(i189 %x) nounwind {
- store i189 %x, i189* @i189_s
- ret void
-}
-
-define void @i190_ls(i190 %x) nounwind {
- store i190 %x, i190* @i190_s
- ret void
-}
-
-define void @i191_ls(i191 %x) nounwind {
- store i191 %x, i191* @i191_s
- ret void
-}
-
-define void @i192_ls(i192 %x) nounwind {
- store i192 %x, i192* @i192_s
- ret void
-}
-
-define void @i193_ls(i193 %x) nounwind {
- store i193 %x, i193* @i193_s
- ret void
-}
-
-define void @i194_ls(i194 %x) nounwind {
- store i194 %x, i194* @i194_s
- ret void
-}
-
-define void @i195_ls(i195 %x) nounwind {
- store i195 %x, i195* @i195_s
- ret void
-}
-
-define void @i196_ls(i196 %x) nounwind {
- store i196 %x, i196* @i196_s
- ret void
-}
-
-define void @i197_ls(i197 %x) nounwind {
- store i197 %x, i197* @i197_s
- ret void
-}
-
-define void @i198_ls(i198 %x) nounwind {
- store i198 %x, i198* @i198_s
- ret void
-}
-
-define void @i199_ls(i199 %x) nounwind {
- store i199 %x, i199* @i199_s
- ret void
-}
-
-define void @i200_ls(i200 %x) nounwind {
- store i200 %x, i200* @i200_s
- ret void
-}
-
-define void @i201_ls(i201 %x) nounwind {
- store i201 %x, i201* @i201_s
- ret void
-}
-
-define void @i202_ls(i202 %x) nounwind {
- store i202 %x, i202* @i202_s
- ret void
-}
-
-define void @i203_ls(i203 %x) nounwind {
- store i203 %x, i203* @i203_s
- ret void
-}
-
-define void @i204_ls(i204 %x) nounwind {
- store i204 %x, i204* @i204_s
- ret void
-}
-
-define void @i205_ls(i205 %x) nounwind {
- store i205 %x, i205* @i205_s
- ret void
-}
-
-define void @i206_ls(i206 %x) nounwind {
- store i206 %x, i206* @i206_s
- ret void
-}
-
-define void @i207_ls(i207 %x) nounwind {
- store i207 %x, i207* @i207_s
- ret void
-}
-
-define void @i208_ls(i208 %x) nounwind {
- store i208 %x, i208* @i208_s
- ret void
-}
-
-define void @i209_ls(i209 %x) nounwind {
- store i209 %x, i209* @i209_s
- ret void
-}
-
-define void @i210_ls(i210 %x) nounwind {
- store i210 %x, i210* @i210_s
- ret void
-}
-
-define void @i211_ls(i211 %x) nounwind {
- store i211 %x, i211* @i211_s
- ret void
-}
-
-define void @i212_ls(i212 %x) nounwind {
- store i212 %x, i212* @i212_s
- ret void
-}
-
-define void @i213_ls(i213 %x) nounwind {
- store i213 %x, i213* @i213_s
- ret void
-}
-
-define void @i214_ls(i214 %x) nounwind {
- store i214 %x, i214* @i214_s
- ret void
-}
-
-define void @i215_ls(i215 %x) nounwind {
- store i215 %x, i215* @i215_s
- ret void
-}
-
-define void @i216_ls(i216 %x) nounwind {
- store i216 %x, i216* @i216_s
- ret void
-}
-
-define void @i217_ls(i217 %x) nounwind {
- store i217 %x, i217* @i217_s
- ret void
-}
-
-define void @i218_ls(i218 %x) nounwind {
- store i218 %x, i218* @i218_s
- ret void
-}
-
-define void @i219_ls(i219 %x) nounwind {
- store i219 %x, i219* @i219_s
- ret void
-}
-
-define void @i220_ls(i220 %x) nounwind {
- store i220 %x, i220* @i220_s
- ret void
-}
-
-define void @i221_ls(i221 %x) nounwind {
- store i221 %x, i221* @i221_s
- ret void
-}
-
-define void @i222_ls(i222 %x) nounwind {
- store i222 %x, i222* @i222_s
- ret void
-}
-
-define void @i223_ls(i223 %x) nounwind {
- store i223 %x, i223* @i223_s
- ret void
-}
-
-define void @i224_ls(i224 %x) nounwind {
- store i224 %x, i224* @i224_s
- ret void
-}
-
-define void @i225_ls(i225 %x) nounwind {
- store i225 %x, i225* @i225_s
- ret void
-}
-
-define void @i226_ls(i226 %x) nounwind {
- store i226 %x, i226* @i226_s
- ret void
-}
-
-define void @i227_ls(i227 %x) nounwind {
- store i227 %x, i227* @i227_s
- ret void
-}
-
-define void @i228_ls(i228 %x) nounwind {
- store i228 %x, i228* @i228_s
- ret void
-}
-
-define void @i229_ls(i229 %x) nounwind {
- store i229 %x, i229* @i229_s
- ret void
-}
-
-define void @i230_ls(i230 %x) nounwind {
- store i230 %x, i230* @i230_s
- ret void
-}
-
-define void @i231_ls(i231 %x) nounwind {
- store i231 %x, i231* @i231_s
- ret void
-}
-
-define void @i232_ls(i232 %x) nounwind {
- store i232 %x, i232* @i232_s
- ret void
-}
-
-define void @i233_ls(i233 %x) nounwind {
- store i233 %x, i233* @i233_s
- ret void
-}
-
-define void @i234_ls(i234 %x) nounwind {
- store i234 %x, i234* @i234_s
- ret void
-}
-
-define void @i235_ls(i235 %x) nounwind {
- store i235 %x, i235* @i235_s
- ret void
-}
-
-define void @i236_ls(i236 %x) nounwind {
- store i236 %x, i236* @i236_s
- ret void
-}
-
-define void @i237_ls(i237 %x) nounwind {
- store i237 %x, i237* @i237_s
- ret void
-}
-
-define void @i238_ls(i238 %x) nounwind {
- store i238 %x, i238* @i238_s
- ret void
-}
-
-define void @i239_ls(i239 %x) nounwind {
- store i239 %x, i239* @i239_s
- ret void
-}
-
-define void @i240_ls(i240 %x) nounwind {
- store i240 %x, i240* @i240_s
- ret void
-}
-
-define void @i241_ls(i241 %x) nounwind {
- store i241 %x, i241* @i241_s
- ret void
-}
-
-define void @i242_ls(i242 %x) nounwind {
- store i242 %x, i242* @i242_s
- ret void
-}
-
-define void @i243_ls(i243 %x) nounwind {
- store i243 %x, i243* @i243_s
- ret void
-}
-
-define void @i244_ls(i244 %x) nounwind {
- store i244 %x, i244* @i244_s
- ret void
-}
-
-define void @i245_ls(i245 %x) nounwind {
- store i245 %x, i245* @i245_s
- ret void
-}
-
-define void @i246_ls(i246 %x) nounwind {
- store i246 %x, i246* @i246_s
- ret void
-}
-
-define void @i247_ls(i247 %x) nounwind {
- store i247 %x, i247* @i247_s
- ret void
-}
-
-define void @i248_ls(i248 %x) nounwind {
- store i248 %x, i248* @i248_s
- ret void
-}
-
-define void @i249_ls(i249 %x) nounwind {
- store i249 %x, i249* @i249_s
- ret void
-}
-
-define void @i250_ls(i250 %x) nounwind {
- store i250 %x, i250* @i250_s
- ret void
-}
-
-define void @i251_ls(i251 %x) nounwind {
- store i251 %x, i251* @i251_s
- ret void
-}
-
-define void @i252_ls(i252 %x) nounwind {
- store i252 %x, i252* @i252_s
- ret void
-}
-
-define void @i253_ls(i253 %x) nounwind {
- store i253 %x, i253* @i253_s
- ret void
-}
-
-define void @i254_ls(i254 %x) nounwind {
- store i254 %x, i254* @i254_s
- ret void
-}
-
-define void @i255_ls(i255 %x) nounwind {
- store i255 %x, i255* @i255_s
- ret void
-}
-
-define void @i256_ls(i256 %x) nounwind {
- store i256 %x, i256* @i256_s
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/APIntSextParam.ll b/libclamav/c++/llvm/test/CodeGen/Generic/APIntSextParam.ll
deleted file mode 100644
index acc0eeb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/APIntSextParam.ll
+++ /dev/null
@@ -1,1537 +0,0 @@
-; RUN: llc < %s > %t
- at i1_s = external global i1 ; <i1*> [#uses=1]
- at i2_s = external global i2 ; <i2*> [#uses=1]
- at i3_s = external global i3 ; <i3*> [#uses=1]
- at i4_s = external global i4 ; <i4*> [#uses=1]
- at i5_s = external global i5 ; <i5*> [#uses=1]
- at i6_s = external global i6 ; <i6*> [#uses=1]
- at i7_s = external global i7 ; <i7*> [#uses=1]
- at i8_s = external global i8 ; <i8*> [#uses=1]
- at i9_s = external global i9 ; <i9*> [#uses=1]
- at i10_s = external global i10 ; <i10*> [#uses=1]
- at i11_s = external global i11 ; <i11*> [#uses=1]
- at i12_s = external global i12 ; <i12*> [#uses=1]
- at i13_s = external global i13 ; <i13*> [#uses=1]
- at i14_s = external global i14 ; <i14*> [#uses=1]
- at i15_s = external global i15 ; <i15*> [#uses=1]
- at i16_s = external global i16 ; <i16*> [#uses=1]
- at i17_s = external global i17 ; <i17*> [#uses=1]
- at i18_s = external global i18 ; <i18*> [#uses=1]
- at i19_s = external global i19 ; <i19*> [#uses=1]
- at i20_s = external global i20 ; <i20*> [#uses=1]
- at i21_s = external global i21 ; <i21*> [#uses=1]
- at i22_s = external global i22 ; <i22*> [#uses=1]
- at i23_s = external global i23 ; <i23*> [#uses=1]
- at i24_s = external global i24 ; <i24*> [#uses=1]
- at i25_s = external global i25 ; <i25*> [#uses=1]
- at i26_s = external global i26 ; <i26*> [#uses=1]
- at i27_s = external global i27 ; <i27*> [#uses=1]
- at i28_s = external global i28 ; <i28*> [#uses=1]
- at i29_s = external global i29 ; <i29*> [#uses=1]
- at i30_s = external global i30 ; <i30*> [#uses=1]
- at i31_s = external global i31 ; <i31*> [#uses=1]
- at i32_s = external global i32 ; <i32*> [#uses=1]
- at i33_s = external global i33 ; <i33*> [#uses=1]
- at i34_s = external global i34 ; <i34*> [#uses=1]
- at i35_s = external global i35 ; <i35*> [#uses=1]
- at i36_s = external global i36 ; <i36*> [#uses=1]
- at i37_s = external global i37 ; <i37*> [#uses=1]
- at i38_s = external global i38 ; <i38*> [#uses=1]
- at i39_s = external global i39 ; <i39*> [#uses=1]
- at i40_s = external global i40 ; <i40*> [#uses=1]
- at i41_s = external global i41 ; <i41*> [#uses=1]
- at i42_s = external global i42 ; <i42*> [#uses=1]
- at i43_s = external global i43 ; <i43*> [#uses=1]
- at i44_s = external global i44 ; <i44*> [#uses=1]
- at i45_s = external global i45 ; <i45*> [#uses=1]
- at i46_s = external global i46 ; <i46*> [#uses=1]
- at i47_s = external global i47 ; <i47*> [#uses=1]
- at i48_s = external global i48 ; <i48*> [#uses=1]
- at i49_s = external global i49 ; <i49*> [#uses=1]
- at i50_s = external global i50 ; <i50*> [#uses=1]
- at i51_s = external global i51 ; <i51*> [#uses=1]
- at i52_s = external global i52 ; <i52*> [#uses=1]
- at i53_s = external global i53 ; <i53*> [#uses=1]
- at i54_s = external global i54 ; <i54*> [#uses=1]
- at i55_s = external global i55 ; <i55*> [#uses=1]
- at i56_s = external global i56 ; <i56*> [#uses=1]
- at i57_s = external global i57 ; <i57*> [#uses=1]
- at i58_s = external global i58 ; <i58*> [#uses=1]
- at i59_s = external global i59 ; <i59*> [#uses=1]
- at i60_s = external global i60 ; <i60*> [#uses=1]
- at i61_s = external global i61 ; <i61*> [#uses=1]
- at i62_s = external global i62 ; <i62*> [#uses=1]
- at i63_s = external global i63 ; <i63*> [#uses=1]
- at i64_s = external global i64 ; <i64*> [#uses=1]
- at i65_s = external global i65 ; <i65*> [#uses=1]
- at i66_s = external global i66 ; <i66*> [#uses=1]
- at i67_s = external global i67 ; <i67*> [#uses=1]
- at i68_s = external global i68 ; <i68*> [#uses=1]
- at i69_s = external global i69 ; <i69*> [#uses=1]
- at i70_s = external global i70 ; <i70*> [#uses=1]
- at i71_s = external global i71 ; <i71*> [#uses=1]
- at i72_s = external global i72 ; <i72*> [#uses=1]
- at i73_s = external global i73 ; <i73*> [#uses=1]
- at i74_s = external global i74 ; <i74*> [#uses=1]
- at i75_s = external global i75 ; <i75*> [#uses=1]
- at i76_s = external global i76 ; <i76*> [#uses=1]
- at i77_s = external global i77 ; <i77*> [#uses=1]
- at i78_s = external global i78 ; <i78*> [#uses=1]
- at i79_s = external global i79 ; <i79*> [#uses=1]
- at i80_s = external global i80 ; <i80*> [#uses=1]
- at i81_s = external global i81 ; <i81*> [#uses=1]
- at i82_s = external global i82 ; <i82*> [#uses=1]
- at i83_s = external global i83 ; <i83*> [#uses=1]
- at i84_s = external global i84 ; <i84*> [#uses=1]
- at i85_s = external global i85 ; <i85*> [#uses=1]
- at i86_s = external global i86 ; <i86*> [#uses=1]
- at i87_s = external global i87 ; <i87*> [#uses=1]
- at i88_s = external global i88 ; <i88*> [#uses=1]
- at i89_s = external global i89 ; <i89*> [#uses=1]
- at i90_s = external global i90 ; <i90*> [#uses=1]
- at i91_s = external global i91 ; <i91*> [#uses=1]
- at i92_s = external global i92 ; <i92*> [#uses=1]
- at i93_s = external global i93 ; <i93*> [#uses=1]
- at i94_s = external global i94 ; <i94*> [#uses=1]
- at i95_s = external global i95 ; <i95*> [#uses=1]
- at i96_s = external global i96 ; <i96*> [#uses=1]
- at i97_s = external global i97 ; <i97*> [#uses=1]
- at i98_s = external global i98 ; <i98*> [#uses=1]
- at i99_s = external global i99 ; <i99*> [#uses=1]
- at i100_s = external global i100 ; <i100*> [#uses=1]
- at i101_s = external global i101 ; <i101*> [#uses=1]
- at i102_s = external global i102 ; <i102*> [#uses=1]
- at i103_s = external global i103 ; <i103*> [#uses=1]
- at i104_s = external global i104 ; <i104*> [#uses=1]
- at i105_s = external global i105 ; <i105*> [#uses=1]
- at i106_s = external global i106 ; <i106*> [#uses=1]
- at i107_s = external global i107 ; <i107*> [#uses=1]
- at i108_s = external global i108 ; <i108*> [#uses=1]
- at i109_s = external global i109 ; <i109*> [#uses=1]
- at i110_s = external global i110 ; <i110*> [#uses=1]
- at i111_s = external global i111 ; <i111*> [#uses=1]
- at i112_s = external global i112 ; <i112*> [#uses=1]
- at i113_s = external global i113 ; <i113*> [#uses=1]
- at i114_s = external global i114 ; <i114*> [#uses=1]
- at i115_s = external global i115 ; <i115*> [#uses=1]
- at i116_s = external global i116 ; <i116*> [#uses=1]
- at i117_s = external global i117 ; <i117*> [#uses=1]
- at i118_s = external global i118 ; <i118*> [#uses=1]
- at i119_s = external global i119 ; <i119*> [#uses=1]
- at i120_s = external global i120 ; <i120*> [#uses=1]
- at i121_s = external global i121 ; <i121*> [#uses=1]
- at i122_s = external global i122 ; <i122*> [#uses=1]
- at i123_s = external global i123 ; <i123*> [#uses=1]
- at i124_s = external global i124 ; <i124*> [#uses=1]
- at i125_s = external global i125 ; <i125*> [#uses=1]
- at i126_s = external global i126 ; <i126*> [#uses=1]
- at i127_s = external global i127 ; <i127*> [#uses=1]
- at i128_s = external global i128 ; <i128*> [#uses=1]
- at i129_s = external global i129 ; <i129*> [#uses=1]
- at i130_s = external global i130 ; <i130*> [#uses=1]
- at i131_s = external global i131 ; <i131*> [#uses=1]
- at i132_s = external global i132 ; <i132*> [#uses=1]
- at i133_s = external global i133 ; <i133*> [#uses=1]
- at i134_s = external global i134 ; <i134*> [#uses=1]
- at i135_s = external global i135 ; <i135*> [#uses=1]
- at i136_s = external global i136 ; <i136*> [#uses=1]
- at i137_s = external global i137 ; <i137*> [#uses=1]
- at i138_s = external global i138 ; <i138*> [#uses=1]
- at i139_s = external global i139 ; <i139*> [#uses=1]
- at i140_s = external global i140 ; <i140*> [#uses=1]
- at i141_s = external global i141 ; <i141*> [#uses=1]
- at i142_s = external global i142 ; <i142*> [#uses=1]
- at i143_s = external global i143 ; <i143*> [#uses=1]
- at i144_s = external global i144 ; <i144*> [#uses=1]
- at i145_s = external global i145 ; <i145*> [#uses=1]
- at i146_s = external global i146 ; <i146*> [#uses=1]
- at i147_s = external global i147 ; <i147*> [#uses=1]
- at i148_s = external global i148 ; <i148*> [#uses=1]
- at i149_s = external global i149 ; <i149*> [#uses=1]
- at i150_s = external global i150 ; <i150*> [#uses=1]
- at i151_s = external global i151 ; <i151*> [#uses=1]
- at i152_s = external global i152 ; <i152*> [#uses=1]
- at i153_s = external global i153 ; <i153*> [#uses=1]
- at i154_s = external global i154 ; <i154*> [#uses=1]
- at i155_s = external global i155 ; <i155*> [#uses=1]
- at i156_s = external global i156 ; <i156*> [#uses=1]
- at i157_s = external global i157 ; <i157*> [#uses=1]
- at i158_s = external global i158 ; <i158*> [#uses=1]
- at i159_s = external global i159 ; <i159*> [#uses=1]
- at i160_s = external global i160 ; <i160*> [#uses=1]
- at i161_s = external global i161 ; <i161*> [#uses=1]
- at i162_s = external global i162 ; <i162*> [#uses=1]
- at i163_s = external global i163 ; <i163*> [#uses=1]
- at i164_s = external global i164 ; <i164*> [#uses=1]
- at i165_s = external global i165 ; <i165*> [#uses=1]
- at i166_s = external global i166 ; <i166*> [#uses=1]
- at i167_s = external global i167 ; <i167*> [#uses=1]
- at i168_s = external global i168 ; <i168*> [#uses=1]
- at i169_s = external global i169 ; <i169*> [#uses=1]
- at i170_s = external global i170 ; <i170*> [#uses=1]
- at i171_s = external global i171 ; <i171*> [#uses=1]
- at i172_s = external global i172 ; <i172*> [#uses=1]
- at i173_s = external global i173 ; <i173*> [#uses=1]
- at i174_s = external global i174 ; <i174*> [#uses=1]
- at i175_s = external global i175 ; <i175*> [#uses=1]
- at i176_s = external global i176 ; <i176*> [#uses=1]
- at i177_s = external global i177 ; <i177*> [#uses=1]
- at i178_s = external global i178 ; <i178*> [#uses=1]
- at i179_s = external global i179 ; <i179*> [#uses=1]
- at i180_s = external global i180 ; <i180*> [#uses=1]
- at i181_s = external global i181 ; <i181*> [#uses=1]
- at i182_s = external global i182 ; <i182*> [#uses=1]
- at i183_s = external global i183 ; <i183*> [#uses=1]
- at i184_s = external global i184 ; <i184*> [#uses=1]
- at i185_s = external global i185 ; <i185*> [#uses=1]
- at i186_s = external global i186 ; <i186*> [#uses=1]
- at i187_s = external global i187 ; <i187*> [#uses=1]
- at i188_s = external global i188 ; <i188*> [#uses=1]
- at i189_s = external global i189 ; <i189*> [#uses=1]
- at i190_s = external global i190 ; <i190*> [#uses=1]
- at i191_s = external global i191 ; <i191*> [#uses=1]
- at i192_s = external global i192 ; <i192*> [#uses=1]
- at i193_s = external global i193 ; <i193*> [#uses=1]
- at i194_s = external global i194 ; <i194*> [#uses=1]
- at i195_s = external global i195 ; <i195*> [#uses=1]
- at i196_s = external global i196 ; <i196*> [#uses=1]
- at i197_s = external global i197 ; <i197*> [#uses=1]
- at i198_s = external global i198 ; <i198*> [#uses=1]
- at i199_s = external global i199 ; <i199*> [#uses=1]
- at i200_s = external global i200 ; <i200*> [#uses=1]
- at i201_s = external global i201 ; <i201*> [#uses=1]
- at i202_s = external global i202 ; <i202*> [#uses=1]
- at i203_s = external global i203 ; <i203*> [#uses=1]
- at i204_s = external global i204 ; <i204*> [#uses=1]
- at i205_s = external global i205 ; <i205*> [#uses=1]
- at i206_s = external global i206 ; <i206*> [#uses=1]
- at i207_s = external global i207 ; <i207*> [#uses=1]
- at i208_s = external global i208 ; <i208*> [#uses=1]
- at i209_s = external global i209 ; <i209*> [#uses=1]
- at i210_s = external global i210 ; <i210*> [#uses=1]
- at i211_s = external global i211 ; <i211*> [#uses=1]
- at i212_s = external global i212 ; <i212*> [#uses=1]
- at i213_s = external global i213 ; <i213*> [#uses=1]
- at i214_s = external global i214 ; <i214*> [#uses=1]
- at i215_s = external global i215 ; <i215*> [#uses=1]
- at i216_s = external global i216 ; <i216*> [#uses=1]
- at i217_s = external global i217 ; <i217*> [#uses=1]
- at i218_s = external global i218 ; <i218*> [#uses=1]
- at i219_s = external global i219 ; <i219*> [#uses=1]
- at i220_s = external global i220 ; <i220*> [#uses=1]
- at i221_s = external global i221 ; <i221*> [#uses=1]
- at i222_s = external global i222 ; <i222*> [#uses=1]
- at i223_s = external global i223 ; <i223*> [#uses=1]
- at i224_s = external global i224 ; <i224*> [#uses=1]
- at i225_s = external global i225 ; <i225*> [#uses=1]
- at i226_s = external global i226 ; <i226*> [#uses=1]
- at i227_s = external global i227 ; <i227*> [#uses=1]
- at i228_s = external global i228 ; <i228*> [#uses=1]
- at i229_s = external global i229 ; <i229*> [#uses=1]
- at i230_s = external global i230 ; <i230*> [#uses=1]
- at i231_s = external global i231 ; <i231*> [#uses=1]
- at i232_s = external global i232 ; <i232*> [#uses=1]
- at i233_s = external global i233 ; <i233*> [#uses=1]
- at i234_s = external global i234 ; <i234*> [#uses=1]
- at i235_s = external global i235 ; <i235*> [#uses=1]
- at i236_s = external global i236 ; <i236*> [#uses=1]
- at i237_s = external global i237 ; <i237*> [#uses=1]
- at i238_s = external global i238 ; <i238*> [#uses=1]
- at i239_s = external global i239 ; <i239*> [#uses=1]
- at i240_s = external global i240 ; <i240*> [#uses=1]
- at i241_s = external global i241 ; <i241*> [#uses=1]
- at i242_s = external global i242 ; <i242*> [#uses=1]
- at i243_s = external global i243 ; <i243*> [#uses=1]
- at i244_s = external global i244 ; <i244*> [#uses=1]
- at i245_s = external global i245 ; <i245*> [#uses=1]
- at i246_s = external global i246 ; <i246*> [#uses=1]
- at i247_s = external global i247 ; <i247*> [#uses=1]
- at i248_s = external global i248 ; <i248*> [#uses=1]
- at i249_s = external global i249 ; <i249*> [#uses=1]
- at i250_s = external global i250 ; <i250*> [#uses=1]
- at i251_s = external global i251 ; <i251*> [#uses=1]
- at i252_s = external global i252 ; <i252*> [#uses=1]
- at i253_s = external global i253 ; <i253*> [#uses=1]
- at i254_s = external global i254 ; <i254*> [#uses=1]
- at i255_s = external global i255 ; <i255*> [#uses=1]
- at i256_s = external global i256 ; <i256*> [#uses=1]
-
-define void @i1_ls(i1 signext %x) nounwind {
- store i1 %x, i1* @i1_s
- ret void
-}
-
-define void @i2_ls(i2 signext %x) nounwind {
- store i2 %x, i2* @i2_s
- ret void
-}
-
-define void @i3_ls(i3 signext %x) nounwind {
- store i3 %x, i3* @i3_s
- ret void
-}
-
-define void @i4_ls(i4 signext %x) nounwind {
- store i4 %x, i4* @i4_s
- ret void
-}
-
-define void @i5_ls(i5 signext %x) nounwind {
- store i5 %x, i5* @i5_s
- ret void
-}
-
-define void @i6_ls(i6 signext %x) nounwind {
- store i6 %x, i6* @i6_s
- ret void
-}
-
-define void @i7_ls(i7 signext %x) nounwind {
- store i7 %x, i7* @i7_s
- ret void
-}
-
-define void @i8_ls(i8 signext %x) nounwind {
- store i8 %x, i8* @i8_s
- ret void
-}
-
-define void @i9_ls(i9 signext %x) nounwind {
- store i9 %x, i9* @i9_s
- ret void
-}
-
-define void @i10_ls(i10 signext %x) nounwind {
- store i10 %x, i10* @i10_s
- ret void
-}
-
-define void @i11_ls(i11 signext %x) nounwind {
- store i11 %x, i11* @i11_s
- ret void
-}
-
-define void @i12_ls(i12 signext %x) nounwind {
- store i12 %x, i12* @i12_s
- ret void
-}
-
-define void @i13_ls(i13 signext %x) nounwind {
- store i13 %x, i13* @i13_s
- ret void
-}
-
-define void @i14_ls(i14 signext %x) nounwind {
- store i14 %x, i14* @i14_s
- ret void
-}
-
-define void @i15_ls(i15 signext %x) nounwind {
- store i15 %x, i15* @i15_s
- ret void
-}
-
-define void @i16_ls(i16 signext %x) nounwind {
- store i16 %x, i16* @i16_s
- ret void
-}
-
-define void @i17_ls(i17 signext %x) nounwind {
- store i17 %x, i17* @i17_s
- ret void
-}
-
-define void @i18_ls(i18 signext %x) nounwind {
- store i18 %x, i18* @i18_s
- ret void
-}
-
-define void @i19_ls(i19 signext %x) nounwind {
- store i19 %x, i19* @i19_s
- ret void
-}
-
-define void @i20_ls(i20 signext %x) nounwind {
- store i20 %x, i20* @i20_s
- ret void
-}
-
-define void @i21_ls(i21 signext %x) nounwind {
- store i21 %x, i21* @i21_s
- ret void
-}
-
-define void @i22_ls(i22 signext %x) nounwind {
- store i22 %x, i22* @i22_s
- ret void
-}
-
-define void @i23_ls(i23 signext %x) nounwind {
- store i23 %x, i23* @i23_s
- ret void
-}
-
-define void @i24_ls(i24 signext %x) nounwind {
- store i24 %x, i24* @i24_s
- ret void
-}
-
-define void @i25_ls(i25 signext %x) nounwind {
- store i25 %x, i25* @i25_s
- ret void
-}
-
-define void @i26_ls(i26 signext %x) nounwind {
- store i26 %x, i26* @i26_s
- ret void
-}
-
-define void @i27_ls(i27 signext %x) nounwind {
- store i27 %x, i27* @i27_s
- ret void
-}
-
-define void @i28_ls(i28 signext %x) nounwind {
- store i28 %x, i28* @i28_s
- ret void
-}
-
-define void @i29_ls(i29 signext %x) nounwind {
- store i29 %x, i29* @i29_s
- ret void
-}
-
-define void @i30_ls(i30 signext %x) nounwind {
- store i30 %x, i30* @i30_s
- ret void
-}
-
-define void @i31_ls(i31 signext %x) nounwind {
- store i31 %x, i31* @i31_s
- ret void
-}
-
-define void @i32_ls(i32 signext %x) nounwind {
- store i32 %x, i32* @i32_s
- ret void
-}
-
-define void @i33_ls(i33 signext %x) nounwind {
- store i33 %x, i33* @i33_s
- ret void
-}
-
-define void @i34_ls(i34 signext %x) nounwind {
- store i34 %x, i34* @i34_s
- ret void
-}
-
-define void @i35_ls(i35 signext %x) nounwind {
- store i35 %x, i35* @i35_s
- ret void
-}
-
-define void @i36_ls(i36 signext %x) nounwind {
- store i36 %x, i36* @i36_s
- ret void
-}
-
-define void @i37_ls(i37 signext %x) nounwind {
- store i37 %x, i37* @i37_s
- ret void
-}
-
-define void @i38_ls(i38 signext %x) nounwind {
- store i38 %x, i38* @i38_s
- ret void
-}
-
-define void @i39_ls(i39 signext %x) nounwind {
- store i39 %x, i39* @i39_s
- ret void
-}
-
-define void @i40_ls(i40 signext %x) nounwind {
- store i40 %x, i40* @i40_s
- ret void
-}
-
-define void @i41_ls(i41 signext %x) nounwind {
- store i41 %x, i41* @i41_s
- ret void
-}
-
-define void @i42_ls(i42 signext %x) nounwind {
- store i42 %x, i42* @i42_s
- ret void
-}
-
-define void @i43_ls(i43 signext %x) nounwind {
- store i43 %x, i43* @i43_s
- ret void
-}
-
-define void @i44_ls(i44 signext %x) nounwind {
- store i44 %x, i44* @i44_s
- ret void
-}
-
-define void @i45_ls(i45 signext %x) nounwind {
- store i45 %x, i45* @i45_s
- ret void
-}
-
-define void @i46_ls(i46 signext %x) nounwind {
- store i46 %x, i46* @i46_s
- ret void
-}
-
-define void @i47_ls(i47 signext %x) nounwind {
- store i47 %x, i47* @i47_s
- ret void
-}
-
-define void @i48_ls(i48 signext %x) nounwind {
- store i48 %x, i48* @i48_s
- ret void
-}
-
-define void @i49_ls(i49 signext %x) nounwind {
- store i49 %x, i49* @i49_s
- ret void
-}
-
-define void @i50_ls(i50 signext %x) nounwind {
- store i50 %x, i50* @i50_s
- ret void
-}
-
-define void @i51_ls(i51 signext %x) nounwind {
- store i51 %x, i51* @i51_s
- ret void
-}
-
-define void @i52_ls(i52 signext %x) nounwind {
- store i52 %x, i52* @i52_s
- ret void
-}
-
-define void @i53_ls(i53 signext %x) nounwind {
- store i53 %x, i53* @i53_s
- ret void
-}
-
-define void @i54_ls(i54 signext %x) nounwind {
- store i54 %x, i54* @i54_s
- ret void
-}
-
-define void @i55_ls(i55 signext %x) nounwind {
- store i55 %x, i55* @i55_s
- ret void
-}
-
-define void @i56_ls(i56 signext %x) nounwind {
- store i56 %x, i56* @i56_s
- ret void
-}
-
-define void @i57_ls(i57 signext %x) nounwind {
- store i57 %x, i57* @i57_s
- ret void
-}
-
-define void @i58_ls(i58 signext %x) nounwind {
- store i58 %x, i58* @i58_s
- ret void
-}
-
-define void @i59_ls(i59 signext %x) nounwind {
- store i59 %x, i59* @i59_s
- ret void
-}
-
-define void @i60_ls(i60 signext %x) nounwind {
- store i60 %x, i60* @i60_s
- ret void
-}
-
-define void @i61_ls(i61 signext %x) nounwind {
- store i61 %x, i61* @i61_s
- ret void
-}
-
-define void @i62_ls(i62 signext %x) nounwind {
- store i62 %x, i62* @i62_s
- ret void
-}
-
-define void @i63_ls(i63 signext %x) nounwind {
- store i63 %x, i63* @i63_s
- ret void
-}
-
-define void @i64_ls(i64 signext %x) nounwind {
- store i64 %x, i64* @i64_s
- ret void
-}
-
-define void @i65_ls(i65 signext %x) nounwind {
- store i65 %x, i65* @i65_s
- ret void
-}
-
-define void @i66_ls(i66 signext %x) nounwind {
- store i66 %x, i66* @i66_s
- ret void
-}
-
-define void @i67_ls(i67 signext %x) nounwind {
- store i67 %x, i67* @i67_s
- ret void
-}
-
-define void @i68_ls(i68 signext %x) nounwind {
- store i68 %x, i68* @i68_s
- ret void
-}
-
-define void @i69_ls(i69 signext %x) nounwind {
- store i69 %x, i69* @i69_s
- ret void
-}
-
-define void @i70_ls(i70 signext %x) nounwind {
- store i70 %x, i70* @i70_s
- ret void
-}
-
-define void @i71_ls(i71 signext %x) nounwind {
- store i71 %x, i71* @i71_s
- ret void
-}
-
-define void @i72_ls(i72 signext %x) nounwind {
- store i72 %x, i72* @i72_s
- ret void
-}
-
-define void @i73_ls(i73 signext %x) nounwind {
- store i73 %x, i73* @i73_s
- ret void
-}
-
-define void @i74_ls(i74 signext %x) nounwind {
- store i74 %x, i74* @i74_s
- ret void
-}
-
-define void @i75_ls(i75 signext %x) nounwind {
- store i75 %x, i75* @i75_s
- ret void
-}
-
-define void @i76_ls(i76 signext %x) nounwind {
- store i76 %x, i76* @i76_s
- ret void
-}
-
-define void @i77_ls(i77 signext %x) nounwind {
- store i77 %x, i77* @i77_s
- ret void
-}
-
-define void @i78_ls(i78 signext %x) nounwind {
- store i78 %x, i78* @i78_s
- ret void
-}
-
-define void @i79_ls(i79 signext %x) nounwind {
- store i79 %x, i79* @i79_s
- ret void
-}
-
-define void @i80_ls(i80 signext %x) nounwind {
- store i80 %x, i80* @i80_s
- ret void
-}
-
-define void @i81_ls(i81 signext %x) nounwind {
- store i81 %x, i81* @i81_s
- ret void
-}
-
-define void @i82_ls(i82 signext %x) nounwind {
- store i82 %x, i82* @i82_s
- ret void
-}
-
-define void @i83_ls(i83 signext %x) nounwind {
- store i83 %x, i83* @i83_s
- ret void
-}
-
-define void @i84_ls(i84 signext %x) nounwind {
- store i84 %x, i84* @i84_s
- ret void
-}
-
-define void @i85_ls(i85 signext %x) nounwind {
- store i85 %x, i85* @i85_s
- ret void
-}
-
-define void @i86_ls(i86 signext %x) nounwind {
- store i86 %x, i86* @i86_s
- ret void
-}
-
-define void @i87_ls(i87 signext %x) nounwind {
- store i87 %x, i87* @i87_s
- ret void
-}
-
-define void @i88_ls(i88 signext %x) nounwind {
- store i88 %x, i88* @i88_s
- ret void
-}
-
-define void @i89_ls(i89 signext %x) nounwind {
- store i89 %x, i89* @i89_s
- ret void
-}
-
-define void @i90_ls(i90 signext %x) nounwind {
- store i90 %x, i90* @i90_s
- ret void
-}
-
-define void @i91_ls(i91 signext %x) nounwind {
- store i91 %x, i91* @i91_s
- ret void
-}
-
-define void @i92_ls(i92 signext %x) nounwind {
- store i92 %x, i92* @i92_s
- ret void
-}
-
-define void @i93_ls(i93 signext %x) nounwind {
- store i93 %x, i93* @i93_s
- ret void
-}
-
-define void @i94_ls(i94 signext %x) nounwind {
- store i94 %x, i94* @i94_s
- ret void
-}
-
-define void @i95_ls(i95 signext %x) nounwind {
- store i95 %x, i95* @i95_s
- ret void
-}
-
-define void @i96_ls(i96 signext %x) nounwind {
- store i96 %x, i96* @i96_s
- ret void
-}
-
-define void @i97_ls(i97 signext %x) nounwind {
- store i97 %x, i97* @i97_s
- ret void
-}
-
-define void @i98_ls(i98 signext %x) nounwind {
- store i98 %x, i98* @i98_s
- ret void
-}
-
-define void @i99_ls(i99 signext %x) nounwind {
- store i99 %x, i99* @i99_s
- ret void
-}
-
-define void @i100_ls(i100 signext %x) nounwind {
- store i100 %x, i100* @i100_s
- ret void
-}
-
-define void @i101_ls(i101 signext %x) nounwind {
- store i101 %x, i101* @i101_s
- ret void
-}
-
-define void @i102_ls(i102 signext %x) nounwind {
- store i102 %x, i102* @i102_s
- ret void
-}
-
-define void @i103_ls(i103 signext %x) nounwind {
- store i103 %x, i103* @i103_s
- ret void
-}
-
-define void @i104_ls(i104 signext %x) nounwind {
- store i104 %x, i104* @i104_s
- ret void
-}
-
-define void @i105_ls(i105 signext %x) nounwind {
- store i105 %x, i105* @i105_s
- ret void
-}
-
-define void @i106_ls(i106 signext %x) nounwind {
- store i106 %x, i106* @i106_s
- ret void
-}
-
-define void @i107_ls(i107 signext %x) nounwind {
- store i107 %x, i107* @i107_s
- ret void
-}
-
-define void @i108_ls(i108 signext %x) nounwind {
- store i108 %x, i108* @i108_s
- ret void
-}
-
-define void @i109_ls(i109 signext %x) nounwind {
- store i109 %x, i109* @i109_s
- ret void
-}
-
-define void @i110_ls(i110 signext %x) nounwind {
- store i110 %x, i110* @i110_s
- ret void
-}
-
-define void @i111_ls(i111 signext %x) nounwind {
- store i111 %x, i111* @i111_s
- ret void
-}
-
-define void @i112_ls(i112 signext %x) nounwind {
- store i112 %x, i112* @i112_s
- ret void
-}
-
-define void @i113_ls(i113 signext %x) nounwind {
- store i113 %x, i113* @i113_s
- ret void
-}
-
-define void @i114_ls(i114 signext %x) nounwind {
- store i114 %x, i114* @i114_s
- ret void
-}
-
-define void @i115_ls(i115 signext %x) nounwind {
- store i115 %x, i115* @i115_s
- ret void
-}
-
-define void @i116_ls(i116 signext %x) nounwind {
- store i116 %x, i116* @i116_s
- ret void
-}
-
-define void @i117_ls(i117 signext %x) nounwind {
- store i117 %x, i117* @i117_s
- ret void
-}
-
-define void @i118_ls(i118 signext %x) nounwind {
- store i118 %x, i118* @i118_s
- ret void
-}
-
-define void @i119_ls(i119 signext %x) nounwind {
- store i119 %x, i119* @i119_s
- ret void
-}
-
-define void @i120_ls(i120 signext %x) nounwind {
- store i120 %x, i120* @i120_s
- ret void
-}
-
-define void @i121_ls(i121 signext %x) nounwind {
- store i121 %x, i121* @i121_s
- ret void
-}
-
-define void @i122_ls(i122 signext %x) nounwind {
- store i122 %x, i122* @i122_s
- ret void
-}
-
-define void @i123_ls(i123 signext %x) nounwind {
- store i123 %x, i123* @i123_s
- ret void
-}
-
-define void @i124_ls(i124 signext %x) nounwind {
- store i124 %x, i124* @i124_s
- ret void
-}
-
-define void @i125_ls(i125 signext %x) nounwind {
- store i125 %x, i125* @i125_s
- ret void
-}
-
-define void @i126_ls(i126 signext %x) nounwind {
- store i126 %x, i126* @i126_s
- ret void
-}
-
-define void @i127_ls(i127 signext %x) nounwind {
- store i127 %x, i127* @i127_s
- ret void
-}
-
-define void @i128_ls(i128 signext %x) nounwind {
- store i128 %x, i128* @i128_s
- ret void
-}
-
-define void @i129_ls(i129 signext %x) nounwind {
- store i129 %x, i129* @i129_s
- ret void
-}
-
-define void @i130_ls(i130 signext %x) nounwind {
- store i130 %x, i130* @i130_s
- ret void
-}
-
-define void @i131_ls(i131 signext %x) nounwind {
- store i131 %x, i131* @i131_s
- ret void
-}
-
-define void @i132_ls(i132 signext %x) nounwind {
- store i132 %x, i132* @i132_s
- ret void
-}
-
-define void @i133_ls(i133 signext %x) nounwind {
- store i133 %x, i133* @i133_s
- ret void
-}
-
-define void @i134_ls(i134 signext %x) nounwind {
- store i134 %x, i134* @i134_s
- ret void
-}
-
-define void @i135_ls(i135 signext %x) nounwind {
- store i135 %x, i135* @i135_s
- ret void
-}
-
-define void @i136_ls(i136 signext %x) nounwind {
- store i136 %x, i136* @i136_s
- ret void
-}
-
-define void @i137_ls(i137 signext %x) nounwind {
- store i137 %x, i137* @i137_s
- ret void
-}
-
-define void @i138_ls(i138 signext %x) nounwind {
- store i138 %x, i138* @i138_s
- ret void
-}
-
-define void @i139_ls(i139 signext %x) nounwind {
- store i139 %x, i139* @i139_s
- ret void
-}
-
-define void @i140_ls(i140 signext %x) nounwind {
- store i140 %x, i140* @i140_s
- ret void
-}
-
-define void @i141_ls(i141 signext %x) nounwind {
- store i141 %x, i141* @i141_s
- ret void
-}
-
-define void @i142_ls(i142 signext %x) nounwind {
- store i142 %x, i142* @i142_s
- ret void
-}
-
-define void @i143_ls(i143 signext %x) nounwind {
- store i143 %x, i143* @i143_s
- ret void
-}
-
-define void @i144_ls(i144 signext %x) nounwind {
- store i144 %x, i144* @i144_s
- ret void
-}
-
-define void @i145_ls(i145 signext %x) nounwind {
- store i145 %x, i145* @i145_s
- ret void
-}
-
-define void @i146_ls(i146 signext %x) nounwind {
- store i146 %x, i146* @i146_s
- ret void
-}
-
-define void @i147_ls(i147 signext %x) nounwind {
- store i147 %x, i147* @i147_s
- ret void
-}
-
-define void @i148_ls(i148 signext %x) nounwind {
- store i148 %x, i148* @i148_s
- ret void
-}
-
-define void @i149_ls(i149 signext %x) nounwind {
- store i149 %x, i149* @i149_s
- ret void
-}
-
-define void @i150_ls(i150 signext %x) nounwind {
- store i150 %x, i150* @i150_s
- ret void
-}
-
-define void @i151_ls(i151 signext %x) nounwind {
- store i151 %x, i151* @i151_s
- ret void
-}
-
-define void @i152_ls(i152 signext %x) nounwind {
- store i152 %x, i152* @i152_s
- ret void
-}
-
-define void @i153_ls(i153 signext %x) nounwind {
- store i153 %x, i153* @i153_s
- ret void
-}
-
-define void @i154_ls(i154 signext %x) nounwind {
- store i154 %x, i154* @i154_s
- ret void
-}
-
-define void @i155_ls(i155 signext %x) nounwind {
- store i155 %x, i155* @i155_s
- ret void
-}
-
-define void @i156_ls(i156 signext %x) nounwind {
- store i156 %x, i156* @i156_s
- ret void
-}
-
-define void @i157_ls(i157 signext %x) nounwind {
- store i157 %x, i157* @i157_s
- ret void
-}
-
-define void @i158_ls(i158 signext %x) nounwind {
- store i158 %x, i158* @i158_s
- ret void
-}
-
-define void @i159_ls(i159 signext %x) nounwind {
- store i159 %x, i159* @i159_s
- ret void
-}
-
-define void @i160_ls(i160 signext %x) nounwind {
- store i160 %x, i160* @i160_s
- ret void
-}
-
-define void @i161_ls(i161 signext %x) nounwind {
- store i161 %x, i161* @i161_s
- ret void
-}
-
-define void @i162_ls(i162 signext %x) nounwind {
- store i162 %x, i162* @i162_s
- ret void
-}
-
-define void @i163_ls(i163 signext %x) nounwind {
- store i163 %x, i163* @i163_s
- ret void
-}
-
-define void @i164_ls(i164 signext %x) nounwind {
- store i164 %x, i164* @i164_s
- ret void
-}
-
-define void @i165_ls(i165 signext %x) nounwind {
- store i165 %x, i165* @i165_s
- ret void
-}
-
-define void @i166_ls(i166 signext %x) nounwind {
- store i166 %x, i166* @i166_s
- ret void
-}
-
-define void @i167_ls(i167 signext %x) nounwind {
- store i167 %x, i167* @i167_s
- ret void
-}
-
-define void @i168_ls(i168 signext %x) nounwind {
- store i168 %x, i168* @i168_s
- ret void
-}
-
-define void @i169_ls(i169 signext %x) nounwind {
- store i169 %x, i169* @i169_s
- ret void
-}
-
-define void @i170_ls(i170 signext %x) nounwind {
- store i170 %x, i170* @i170_s
- ret void
-}
-
-define void @i171_ls(i171 signext %x) nounwind {
- store i171 %x, i171* @i171_s
- ret void
-}
-
-define void @i172_ls(i172 signext %x) nounwind {
- store i172 %x, i172* @i172_s
- ret void
-}
-
-define void @i173_ls(i173 signext %x) nounwind {
- store i173 %x, i173* @i173_s
- ret void
-}
-
-define void @i174_ls(i174 signext %x) nounwind {
- store i174 %x, i174* @i174_s
- ret void
-}
-
-define void @i175_ls(i175 signext %x) nounwind {
- store i175 %x, i175* @i175_s
- ret void
-}
-
-define void @i176_ls(i176 signext %x) nounwind {
- store i176 %x, i176* @i176_s
- ret void
-}
-
-define void @i177_ls(i177 signext %x) nounwind {
- store i177 %x, i177* @i177_s
- ret void
-}
-
-define void @i178_ls(i178 signext %x) nounwind {
- store i178 %x, i178* @i178_s
- ret void
-}
-
-define void @i179_ls(i179 signext %x) nounwind {
- store i179 %x, i179* @i179_s
- ret void
-}
-
-define void @i180_ls(i180 signext %x) nounwind {
- store i180 %x, i180* @i180_s
- ret void
-}
-
-define void @i181_ls(i181 signext %x) nounwind {
- store i181 %x, i181* @i181_s
- ret void
-}
-
-define void @i182_ls(i182 signext %x) nounwind {
- store i182 %x, i182* @i182_s
- ret void
-}
-
-define void @i183_ls(i183 signext %x) nounwind {
- store i183 %x, i183* @i183_s
- ret void
-}
-
-define void @i184_ls(i184 signext %x) nounwind {
- store i184 %x, i184* @i184_s
- ret void
-}
-
-define void @i185_ls(i185 signext %x) nounwind {
- store i185 %x, i185* @i185_s
- ret void
-}
-
-define void @i186_ls(i186 signext %x) nounwind {
- store i186 %x, i186* @i186_s
- ret void
-}
-
-define void @i187_ls(i187 signext %x) nounwind {
- store i187 %x, i187* @i187_s
- ret void
-}
-
-define void @i188_ls(i188 signext %x) nounwind {
- store i188 %x, i188* @i188_s
- ret void
-}
-
-define void @i189_ls(i189 signext %x) nounwind {
- store i189 %x, i189* @i189_s
- ret void
-}
-
-define void @i190_ls(i190 signext %x) nounwind {
- store i190 %x, i190* @i190_s
- ret void
-}
-
-define void @i191_ls(i191 signext %x) nounwind {
- store i191 %x, i191* @i191_s
- ret void
-}
-
-define void @i192_ls(i192 signext %x) nounwind {
- store i192 %x, i192* @i192_s
- ret void
-}
-
-define void @i193_ls(i193 signext %x) nounwind {
- store i193 %x, i193* @i193_s
- ret void
-}
-
-define void @i194_ls(i194 signext %x) nounwind {
- store i194 %x, i194* @i194_s
- ret void
-}
-
-define void @i195_ls(i195 signext %x) nounwind {
- store i195 %x, i195* @i195_s
- ret void
-}
-
-define void @i196_ls(i196 signext %x) nounwind {
- store i196 %x, i196* @i196_s
- ret void
-}
-
-define void @i197_ls(i197 signext %x) nounwind {
- store i197 %x, i197* @i197_s
- ret void
-}
-
-define void @i198_ls(i198 signext %x) nounwind {
- store i198 %x, i198* @i198_s
- ret void
-}
-
-define void @i199_ls(i199 signext %x) nounwind {
- store i199 %x, i199* @i199_s
- ret void
-}
-
-define void @i200_ls(i200 signext %x) nounwind {
- store i200 %x, i200* @i200_s
- ret void
-}
-
-define void @i201_ls(i201 signext %x) nounwind {
- store i201 %x, i201* @i201_s
- ret void
-}
-
-define void @i202_ls(i202 signext %x) nounwind {
- store i202 %x, i202* @i202_s
- ret void
-}
-
-define void @i203_ls(i203 signext %x) nounwind {
- store i203 %x, i203* @i203_s
- ret void
-}
-
-define void @i204_ls(i204 signext %x) nounwind {
- store i204 %x, i204* @i204_s
- ret void
-}
-
-define void @i205_ls(i205 signext %x) nounwind {
- store i205 %x, i205* @i205_s
- ret void
-}
-
-define void @i206_ls(i206 signext %x) nounwind {
- store i206 %x, i206* @i206_s
- ret void
-}
-
-define void @i207_ls(i207 signext %x) nounwind {
- store i207 %x, i207* @i207_s
- ret void
-}
-
-define void @i208_ls(i208 signext %x) nounwind {
- store i208 %x, i208* @i208_s
- ret void
-}
-
-define void @i209_ls(i209 signext %x) nounwind {
- store i209 %x, i209* @i209_s
- ret void
-}
-
-define void @i210_ls(i210 signext %x) nounwind {
- store i210 %x, i210* @i210_s
- ret void
-}
-
-define void @i211_ls(i211 signext %x) nounwind {
- store i211 %x, i211* @i211_s
- ret void
-}
-
-define void @i212_ls(i212 signext %x) nounwind {
- store i212 %x, i212* @i212_s
- ret void
-}
-
-define void @i213_ls(i213 signext %x) nounwind {
- store i213 %x, i213* @i213_s
- ret void
-}
-
-define void @i214_ls(i214 signext %x) nounwind {
- store i214 %x, i214* @i214_s
- ret void
-}
-
-define void @i215_ls(i215 signext %x) nounwind {
- store i215 %x, i215* @i215_s
- ret void
-}
-
-define void @i216_ls(i216 signext %x) nounwind {
- store i216 %x, i216* @i216_s
- ret void
-}
-
-define void @i217_ls(i217 signext %x) nounwind {
- store i217 %x, i217* @i217_s
- ret void
-}
-
-define void @i218_ls(i218 signext %x) nounwind {
- store i218 %x, i218* @i218_s
- ret void
-}
-
-define void @i219_ls(i219 signext %x) nounwind {
- store i219 %x, i219* @i219_s
- ret void
-}
-
-define void @i220_ls(i220 signext %x) nounwind {
- store i220 %x, i220* @i220_s
- ret void
-}
-
-define void @i221_ls(i221 signext %x) nounwind {
- store i221 %x, i221* @i221_s
- ret void
-}
-
-define void @i222_ls(i222 signext %x) nounwind {
- store i222 %x, i222* @i222_s
- ret void
-}
-
-define void @i223_ls(i223 signext %x) nounwind {
- store i223 %x, i223* @i223_s
- ret void
-}
-
-define void @i224_ls(i224 signext %x) nounwind {
- store i224 %x, i224* @i224_s
- ret void
-}
-
-define void @i225_ls(i225 signext %x) nounwind {
- store i225 %x, i225* @i225_s
- ret void
-}
-
-define void @i226_ls(i226 signext %x) nounwind {
- store i226 %x, i226* @i226_s
- ret void
-}
-
-define void @i227_ls(i227 signext %x) nounwind {
- store i227 %x, i227* @i227_s
- ret void
-}
-
-define void @i228_ls(i228 signext %x) nounwind {
- store i228 %x, i228* @i228_s
- ret void
-}
-
-define void @i229_ls(i229 signext %x) nounwind {
- store i229 %x, i229* @i229_s
- ret void
-}
-
-define void @i230_ls(i230 signext %x) nounwind {
- store i230 %x, i230* @i230_s
- ret void
-}
-
-define void @i231_ls(i231 signext %x) nounwind {
- store i231 %x, i231* @i231_s
- ret void
-}
-
-define void @i232_ls(i232 signext %x) nounwind {
- store i232 %x, i232* @i232_s
- ret void
-}
-
-define void @i233_ls(i233 signext %x) nounwind {
- store i233 %x, i233* @i233_s
- ret void
-}
-
-define void @i234_ls(i234 signext %x) nounwind {
- store i234 %x, i234* @i234_s
- ret void
-}
-
-define void @i235_ls(i235 signext %x) nounwind {
- store i235 %x, i235* @i235_s
- ret void
-}
-
-define void @i236_ls(i236 signext %x) nounwind {
- store i236 %x, i236* @i236_s
- ret void
-}
-
-define void @i237_ls(i237 signext %x) nounwind {
- store i237 %x, i237* @i237_s
- ret void
-}
-
-define void @i238_ls(i238 signext %x) nounwind {
- store i238 %x, i238* @i238_s
- ret void
-}
-
-define void @i239_ls(i239 signext %x) nounwind {
- store i239 %x, i239* @i239_s
- ret void
-}
-
-define void @i240_ls(i240 signext %x) nounwind {
- store i240 %x, i240* @i240_s
- ret void
-}
-
-define void @i241_ls(i241 signext %x) nounwind {
- store i241 %x, i241* @i241_s
- ret void
-}
-
-define void @i242_ls(i242 signext %x) nounwind {
- store i242 %x, i242* @i242_s
- ret void
-}
-
-define void @i243_ls(i243 signext %x) nounwind {
- store i243 %x, i243* @i243_s
- ret void
-}
-
-define void @i244_ls(i244 signext %x) nounwind {
- store i244 %x, i244* @i244_s
- ret void
-}
-
-define void @i245_ls(i245 signext %x) nounwind {
- store i245 %x, i245* @i245_s
- ret void
-}
-
-define void @i246_ls(i246 signext %x) nounwind {
- store i246 %x, i246* @i246_s
- ret void
-}
-
-define void @i247_ls(i247 signext %x) nounwind {
- store i247 %x, i247* @i247_s
- ret void
-}
-
-define void @i248_ls(i248 signext %x) nounwind {
- store i248 %x, i248* @i248_s
- ret void
-}
-
-define void @i249_ls(i249 signext %x) nounwind {
- store i249 %x, i249* @i249_s
- ret void
-}
-
-define void @i250_ls(i250 signext %x) nounwind {
- store i250 %x, i250* @i250_s
- ret void
-}
-
-define void @i251_ls(i251 signext %x) nounwind {
- store i251 %x, i251* @i251_s
- ret void
-}
-
-define void @i252_ls(i252 signext %x) nounwind {
- store i252 %x, i252* @i252_s
- ret void
-}
-
-define void @i253_ls(i253 signext %x) nounwind {
- store i253 %x, i253* @i253_s
- ret void
-}
-
-define void @i254_ls(i254 signext %x) nounwind {
- store i254 %x, i254* @i254_s
- ret void
-}
-
-define void @i255_ls(i255 signext %x) nounwind {
- store i255 %x, i255* @i255_s
- ret void
-}
-
-define void @i256_ls(i256 signext %x) nounwind {
- store i256 %x, i256* @i256_s
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/APIntZextParam.ll b/libclamav/c++/llvm/test/CodeGen/Generic/APIntZextParam.ll
deleted file mode 100644
index 173b9fd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/APIntZextParam.ll
+++ /dev/null
@@ -1,1537 +0,0 @@
-; RUN: llc < %s > %t
- at i1_s = external global i1 ; <i1*> [#uses=1]
- at i2_s = external global i2 ; <i2*> [#uses=1]
- at i3_s = external global i3 ; <i3*> [#uses=1]
- at i4_s = external global i4 ; <i4*> [#uses=1]
- at i5_s = external global i5 ; <i5*> [#uses=1]
- at i6_s = external global i6 ; <i6*> [#uses=1]
- at i7_s = external global i7 ; <i7*> [#uses=1]
- at i8_s = external global i8 ; <i8*> [#uses=1]
- at i9_s = external global i9 ; <i9*> [#uses=1]
- at i10_s = external global i10 ; <i10*> [#uses=1]
- at i11_s = external global i11 ; <i11*> [#uses=1]
- at i12_s = external global i12 ; <i12*> [#uses=1]
- at i13_s = external global i13 ; <i13*> [#uses=1]
- at i14_s = external global i14 ; <i14*> [#uses=1]
- at i15_s = external global i15 ; <i15*> [#uses=1]
- at i16_s = external global i16 ; <i16*> [#uses=1]
- at i17_s = external global i17 ; <i17*> [#uses=1]
- at i18_s = external global i18 ; <i18*> [#uses=1]
- at i19_s = external global i19 ; <i19*> [#uses=1]
- at i20_s = external global i20 ; <i20*> [#uses=1]
- at i21_s = external global i21 ; <i21*> [#uses=1]
- at i22_s = external global i22 ; <i22*> [#uses=1]
- at i23_s = external global i23 ; <i23*> [#uses=1]
- at i24_s = external global i24 ; <i24*> [#uses=1]
- at i25_s = external global i25 ; <i25*> [#uses=1]
- at i26_s = external global i26 ; <i26*> [#uses=1]
- at i27_s = external global i27 ; <i27*> [#uses=1]
- at i28_s = external global i28 ; <i28*> [#uses=1]
- at i29_s = external global i29 ; <i29*> [#uses=1]
- at i30_s = external global i30 ; <i30*> [#uses=1]
- at i31_s = external global i31 ; <i31*> [#uses=1]
- at i32_s = external global i32 ; <i32*> [#uses=1]
- at i33_s = external global i33 ; <i33*> [#uses=1]
- at i34_s = external global i34 ; <i34*> [#uses=1]
- at i35_s = external global i35 ; <i35*> [#uses=1]
- at i36_s = external global i36 ; <i36*> [#uses=1]
- at i37_s = external global i37 ; <i37*> [#uses=1]
- at i38_s = external global i38 ; <i38*> [#uses=1]
- at i39_s = external global i39 ; <i39*> [#uses=1]
- at i40_s = external global i40 ; <i40*> [#uses=1]
- at i41_s = external global i41 ; <i41*> [#uses=1]
- at i42_s = external global i42 ; <i42*> [#uses=1]
- at i43_s = external global i43 ; <i43*> [#uses=1]
- at i44_s = external global i44 ; <i44*> [#uses=1]
- at i45_s = external global i45 ; <i45*> [#uses=1]
- at i46_s = external global i46 ; <i46*> [#uses=1]
- at i47_s = external global i47 ; <i47*> [#uses=1]
- at i48_s = external global i48 ; <i48*> [#uses=1]
- at i49_s = external global i49 ; <i49*> [#uses=1]
- at i50_s = external global i50 ; <i50*> [#uses=1]
- at i51_s = external global i51 ; <i51*> [#uses=1]
- at i52_s = external global i52 ; <i52*> [#uses=1]
- at i53_s = external global i53 ; <i53*> [#uses=1]
- at i54_s = external global i54 ; <i54*> [#uses=1]
- at i55_s = external global i55 ; <i55*> [#uses=1]
- at i56_s = external global i56 ; <i56*> [#uses=1]
- at i57_s = external global i57 ; <i57*> [#uses=1]
- at i58_s = external global i58 ; <i58*> [#uses=1]
- at i59_s = external global i59 ; <i59*> [#uses=1]
- at i60_s = external global i60 ; <i60*> [#uses=1]
- at i61_s = external global i61 ; <i61*> [#uses=1]
- at i62_s = external global i62 ; <i62*> [#uses=1]
- at i63_s = external global i63 ; <i63*> [#uses=1]
- at i64_s = external global i64 ; <i64*> [#uses=1]
- at i65_s = external global i65 ; <i65*> [#uses=1]
- at i66_s = external global i66 ; <i66*> [#uses=1]
- at i67_s = external global i67 ; <i67*> [#uses=1]
- at i68_s = external global i68 ; <i68*> [#uses=1]
- at i69_s = external global i69 ; <i69*> [#uses=1]
- at i70_s = external global i70 ; <i70*> [#uses=1]
- at i71_s = external global i71 ; <i71*> [#uses=1]
- at i72_s = external global i72 ; <i72*> [#uses=1]
- at i73_s = external global i73 ; <i73*> [#uses=1]
- at i74_s = external global i74 ; <i74*> [#uses=1]
- at i75_s = external global i75 ; <i75*> [#uses=1]
- at i76_s = external global i76 ; <i76*> [#uses=1]
- at i77_s = external global i77 ; <i77*> [#uses=1]
- at i78_s = external global i78 ; <i78*> [#uses=1]
- at i79_s = external global i79 ; <i79*> [#uses=1]
- at i80_s = external global i80 ; <i80*> [#uses=1]
- at i81_s = external global i81 ; <i81*> [#uses=1]
- at i82_s = external global i82 ; <i82*> [#uses=1]
- at i83_s = external global i83 ; <i83*> [#uses=1]
- at i84_s = external global i84 ; <i84*> [#uses=1]
- at i85_s = external global i85 ; <i85*> [#uses=1]
- at i86_s = external global i86 ; <i86*> [#uses=1]
- at i87_s = external global i87 ; <i87*> [#uses=1]
- at i88_s = external global i88 ; <i88*> [#uses=1]
- at i89_s = external global i89 ; <i89*> [#uses=1]
- at i90_s = external global i90 ; <i90*> [#uses=1]
- at i91_s = external global i91 ; <i91*> [#uses=1]
- at i92_s = external global i92 ; <i92*> [#uses=1]
- at i93_s = external global i93 ; <i93*> [#uses=1]
- at i94_s = external global i94 ; <i94*> [#uses=1]
- at i95_s = external global i95 ; <i95*> [#uses=1]
- at i96_s = external global i96 ; <i96*> [#uses=1]
- at i97_s = external global i97 ; <i97*> [#uses=1]
- at i98_s = external global i98 ; <i98*> [#uses=1]
- at i99_s = external global i99 ; <i99*> [#uses=1]
- at i100_s = external global i100 ; <i100*> [#uses=1]
- at i101_s = external global i101 ; <i101*> [#uses=1]
- at i102_s = external global i102 ; <i102*> [#uses=1]
- at i103_s = external global i103 ; <i103*> [#uses=1]
- at i104_s = external global i104 ; <i104*> [#uses=1]
- at i105_s = external global i105 ; <i105*> [#uses=1]
- at i106_s = external global i106 ; <i106*> [#uses=1]
- at i107_s = external global i107 ; <i107*> [#uses=1]
- at i108_s = external global i108 ; <i108*> [#uses=1]
- at i109_s = external global i109 ; <i109*> [#uses=1]
- at i110_s = external global i110 ; <i110*> [#uses=1]
- at i111_s = external global i111 ; <i111*> [#uses=1]
- at i112_s = external global i112 ; <i112*> [#uses=1]
- at i113_s = external global i113 ; <i113*> [#uses=1]
- at i114_s = external global i114 ; <i114*> [#uses=1]
- at i115_s = external global i115 ; <i115*> [#uses=1]
- at i116_s = external global i116 ; <i116*> [#uses=1]
- at i117_s = external global i117 ; <i117*> [#uses=1]
- at i118_s = external global i118 ; <i118*> [#uses=1]
- at i119_s = external global i119 ; <i119*> [#uses=1]
- at i120_s = external global i120 ; <i120*> [#uses=1]
- at i121_s = external global i121 ; <i121*> [#uses=1]
- at i122_s = external global i122 ; <i122*> [#uses=1]
- at i123_s = external global i123 ; <i123*> [#uses=1]
- at i124_s = external global i124 ; <i124*> [#uses=1]
- at i125_s = external global i125 ; <i125*> [#uses=1]
- at i126_s = external global i126 ; <i126*> [#uses=1]
- at i127_s = external global i127 ; <i127*> [#uses=1]
- at i128_s = external global i128 ; <i128*> [#uses=1]
- at i129_s = external global i129 ; <i129*> [#uses=1]
- at i130_s = external global i130 ; <i130*> [#uses=1]
- at i131_s = external global i131 ; <i131*> [#uses=1]
- at i132_s = external global i132 ; <i132*> [#uses=1]
- at i133_s = external global i133 ; <i133*> [#uses=1]
- at i134_s = external global i134 ; <i134*> [#uses=1]
- at i135_s = external global i135 ; <i135*> [#uses=1]
- at i136_s = external global i136 ; <i136*> [#uses=1]
- at i137_s = external global i137 ; <i137*> [#uses=1]
- at i138_s = external global i138 ; <i138*> [#uses=1]
- at i139_s = external global i139 ; <i139*> [#uses=1]
- at i140_s = external global i140 ; <i140*> [#uses=1]
- at i141_s = external global i141 ; <i141*> [#uses=1]
- at i142_s = external global i142 ; <i142*> [#uses=1]
- at i143_s = external global i143 ; <i143*> [#uses=1]
- at i144_s = external global i144 ; <i144*> [#uses=1]
- at i145_s = external global i145 ; <i145*> [#uses=1]
- at i146_s = external global i146 ; <i146*> [#uses=1]
- at i147_s = external global i147 ; <i147*> [#uses=1]
- at i148_s = external global i148 ; <i148*> [#uses=1]
- at i149_s = external global i149 ; <i149*> [#uses=1]
- at i150_s = external global i150 ; <i150*> [#uses=1]
- at i151_s = external global i151 ; <i151*> [#uses=1]
- at i152_s = external global i152 ; <i152*> [#uses=1]
- at i153_s = external global i153 ; <i153*> [#uses=1]
- at i154_s = external global i154 ; <i154*> [#uses=1]
- at i155_s = external global i155 ; <i155*> [#uses=1]
- at i156_s = external global i156 ; <i156*> [#uses=1]
- at i157_s = external global i157 ; <i157*> [#uses=1]
- at i158_s = external global i158 ; <i158*> [#uses=1]
- at i159_s = external global i159 ; <i159*> [#uses=1]
- at i160_s = external global i160 ; <i160*> [#uses=1]
- at i161_s = external global i161 ; <i161*> [#uses=1]
- at i162_s = external global i162 ; <i162*> [#uses=1]
- at i163_s = external global i163 ; <i163*> [#uses=1]
- at i164_s = external global i164 ; <i164*> [#uses=1]
- at i165_s = external global i165 ; <i165*> [#uses=1]
- at i166_s = external global i166 ; <i166*> [#uses=1]
- at i167_s = external global i167 ; <i167*> [#uses=1]
- at i168_s = external global i168 ; <i168*> [#uses=1]
- at i169_s = external global i169 ; <i169*> [#uses=1]
- at i170_s = external global i170 ; <i170*> [#uses=1]
- at i171_s = external global i171 ; <i171*> [#uses=1]
- at i172_s = external global i172 ; <i172*> [#uses=1]
- at i173_s = external global i173 ; <i173*> [#uses=1]
- at i174_s = external global i174 ; <i174*> [#uses=1]
- at i175_s = external global i175 ; <i175*> [#uses=1]
- at i176_s = external global i176 ; <i176*> [#uses=1]
- at i177_s = external global i177 ; <i177*> [#uses=1]
- at i178_s = external global i178 ; <i178*> [#uses=1]
- at i179_s = external global i179 ; <i179*> [#uses=1]
- at i180_s = external global i180 ; <i180*> [#uses=1]
- at i181_s = external global i181 ; <i181*> [#uses=1]
- at i182_s = external global i182 ; <i182*> [#uses=1]
- at i183_s = external global i183 ; <i183*> [#uses=1]
- at i184_s = external global i184 ; <i184*> [#uses=1]
- at i185_s = external global i185 ; <i185*> [#uses=1]
- at i186_s = external global i186 ; <i186*> [#uses=1]
- at i187_s = external global i187 ; <i187*> [#uses=1]
- at i188_s = external global i188 ; <i188*> [#uses=1]
- at i189_s = external global i189 ; <i189*> [#uses=1]
- at i190_s = external global i190 ; <i190*> [#uses=1]
- at i191_s = external global i191 ; <i191*> [#uses=1]
- at i192_s = external global i192 ; <i192*> [#uses=1]
- at i193_s = external global i193 ; <i193*> [#uses=1]
- at i194_s = external global i194 ; <i194*> [#uses=1]
- at i195_s = external global i195 ; <i195*> [#uses=1]
- at i196_s = external global i196 ; <i196*> [#uses=1]
- at i197_s = external global i197 ; <i197*> [#uses=1]
- at i198_s = external global i198 ; <i198*> [#uses=1]
- at i199_s = external global i199 ; <i199*> [#uses=1]
- at i200_s = external global i200 ; <i200*> [#uses=1]
- at i201_s = external global i201 ; <i201*> [#uses=1]
- at i202_s = external global i202 ; <i202*> [#uses=1]
- at i203_s = external global i203 ; <i203*> [#uses=1]
- at i204_s = external global i204 ; <i204*> [#uses=1]
- at i205_s = external global i205 ; <i205*> [#uses=1]
- at i206_s = external global i206 ; <i206*> [#uses=1]
- at i207_s = external global i207 ; <i207*> [#uses=1]
- at i208_s = external global i208 ; <i208*> [#uses=1]
- at i209_s = external global i209 ; <i209*> [#uses=1]
- at i210_s = external global i210 ; <i210*> [#uses=1]
- at i211_s = external global i211 ; <i211*> [#uses=1]
- at i212_s = external global i212 ; <i212*> [#uses=1]
- at i213_s = external global i213 ; <i213*> [#uses=1]
- at i214_s = external global i214 ; <i214*> [#uses=1]
- at i215_s = external global i215 ; <i215*> [#uses=1]
- at i216_s = external global i216 ; <i216*> [#uses=1]
- at i217_s = external global i217 ; <i217*> [#uses=1]
- at i218_s = external global i218 ; <i218*> [#uses=1]
- at i219_s = external global i219 ; <i219*> [#uses=1]
- at i220_s = external global i220 ; <i220*> [#uses=1]
- at i221_s = external global i221 ; <i221*> [#uses=1]
- at i222_s = external global i222 ; <i222*> [#uses=1]
- at i223_s = external global i223 ; <i223*> [#uses=1]
- at i224_s = external global i224 ; <i224*> [#uses=1]
- at i225_s = external global i225 ; <i225*> [#uses=1]
- at i226_s = external global i226 ; <i226*> [#uses=1]
- at i227_s = external global i227 ; <i227*> [#uses=1]
- at i228_s = external global i228 ; <i228*> [#uses=1]
- at i229_s = external global i229 ; <i229*> [#uses=1]
- at i230_s = external global i230 ; <i230*> [#uses=1]
- at i231_s = external global i231 ; <i231*> [#uses=1]
- at i232_s = external global i232 ; <i232*> [#uses=1]
- at i233_s = external global i233 ; <i233*> [#uses=1]
- at i234_s = external global i234 ; <i234*> [#uses=1]
- at i235_s = external global i235 ; <i235*> [#uses=1]
- at i236_s = external global i236 ; <i236*> [#uses=1]
- at i237_s = external global i237 ; <i237*> [#uses=1]
- at i238_s = external global i238 ; <i238*> [#uses=1]
- at i239_s = external global i239 ; <i239*> [#uses=1]
- at i240_s = external global i240 ; <i240*> [#uses=1]
- at i241_s = external global i241 ; <i241*> [#uses=1]
- at i242_s = external global i242 ; <i242*> [#uses=1]
- at i243_s = external global i243 ; <i243*> [#uses=1]
- at i244_s = external global i244 ; <i244*> [#uses=1]
- at i245_s = external global i245 ; <i245*> [#uses=1]
- at i246_s = external global i246 ; <i246*> [#uses=1]
- at i247_s = external global i247 ; <i247*> [#uses=1]
- at i248_s = external global i248 ; <i248*> [#uses=1]
- at i249_s = external global i249 ; <i249*> [#uses=1]
- at i250_s = external global i250 ; <i250*> [#uses=1]
- at i251_s = external global i251 ; <i251*> [#uses=1]
- at i252_s = external global i252 ; <i252*> [#uses=1]
- at i253_s = external global i253 ; <i253*> [#uses=1]
- at i254_s = external global i254 ; <i254*> [#uses=1]
- at i255_s = external global i255 ; <i255*> [#uses=1]
- at i256_s = external global i256 ; <i256*> [#uses=1]
-
-define void @i1_ls(i1 zeroext %x) nounwind {
- store i1 %x, i1* @i1_s
- ret void
-}
-
-define void @i2_ls(i2 zeroext %x) nounwind {
- store i2 %x, i2* @i2_s
- ret void
-}
-
-define void @i3_ls(i3 zeroext %x) nounwind {
- store i3 %x, i3* @i3_s
- ret void
-}
-
-define void @i4_ls(i4 zeroext %x) nounwind {
- store i4 %x, i4* @i4_s
- ret void
-}
-
-define void @i5_ls(i5 zeroext %x) nounwind {
- store i5 %x, i5* @i5_s
- ret void
-}
-
-define void @i6_ls(i6 zeroext %x) nounwind {
- store i6 %x, i6* @i6_s
- ret void
-}
-
-define void @i7_ls(i7 zeroext %x) nounwind {
- store i7 %x, i7* @i7_s
- ret void
-}
-
-define void @i8_ls(i8 zeroext %x) nounwind {
- store i8 %x, i8* @i8_s
- ret void
-}
-
-define void @i9_ls(i9 zeroext %x) nounwind {
- store i9 %x, i9* @i9_s
- ret void
-}
-
-define void @i10_ls(i10 zeroext %x) nounwind {
- store i10 %x, i10* @i10_s
- ret void
-}
-
-define void @i11_ls(i11 zeroext %x) nounwind {
- store i11 %x, i11* @i11_s
- ret void
-}
-
-define void @i12_ls(i12 zeroext %x) nounwind {
- store i12 %x, i12* @i12_s
- ret void
-}
-
-define void @i13_ls(i13 zeroext %x) nounwind {
- store i13 %x, i13* @i13_s
- ret void
-}
-
-define void @i14_ls(i14 zeroext %x) nounwind {
- store i14 %x, i14* @i14_s
- ret void
-}
-
-define void @i15_ls(i15 zeroext %x) nounwind {
- store i15 %x, i15* @i15_s
- ret void
-}
-
-define void @i16_ls(i16 zeroext %x) nounwind {
- store i16 %x, i16* @i16_s
- ret void
-}
-
-define void @i17_ls(i17 zeroext %x) nounwind {
- store i17 %x, i17* @i17_s
- ret void
-}
-
-define void @i18_ls(i18 zeroext %x) nounwind {
- store i18 %x, i18* @i18_s
- ret void
-}
-
-define void @i19_ls(i19 zeroext %x) nounwind {
- store i19 %x, i19* @i19_s
- ret void
-}
-
-define void @i20_ls(i20 zeroext %x) nounwind {
- store i20 %x, i20* @i20_s
- ret void
-}
-
-define void @i21_ls(i21 zeroext %x) nounwind {
- store i21 %x, i21* @i21_s
- ret void
-}
-
-define void @i22_ls(i22 zeroext %x) nounwind {
- store i22 %x, i22* @i22_s
- ret void
-}
-
-define void @i23_ls(i23 zeroext %x) nounwind {
- store i23 %x, i23* @i23_s
- ret void
-}
-
-define void @i24_ls(i24 zeroext %x) nounwind {
- store i24 %x, i24* @i24_s
- ret void
-}
-
-define void @i25_ls(i25 zeroext %x) nounwind {
- store i25 %x, i25* @i25_s
- ret void
-}
-
-define void @i26_ls(i26 zeroext %x) nounwind {
- store i26 %x, i26* @i26_s
- ret void
-}
-
-define void @i27_ls(i27 zeroext %x) nounwind {
- store i27 %x, i27* @i27_s
- ret void
-}
-
-define void @i28_ls(i28 zeroext %x) nounwind {
- store i28 %x, i28* @i28_s
- ret void
-}
-
-define void @i29_ls(i29 zeroext %x) nounwind {
- store i29 %x, i29* @i29_s
- ret void
-}
-
-define void @i30_ls(i30 zeroext %x) nounwind {
- store i30 %x, i30* @i30_s
- ret void
-}
-
-define void @i31_ls(i31 zeroext %x) nounwind {
- store i31 %x, i31* @i31_s
- ret void
-}
-
-define void @i32_ls(i32 zeroext %x) nounwind {
- store i32 %x, i32* @i32_s
- ret void
-}
-
-define void @i33_ls(i33 zeroext %x) nounwind {
- store i33 %x, i33* @i33_s
- ret void
-}
-
-define void @i34_ls(i34 zeroext %x) nounwind {
- store i34 %x, i34* @i34_s
- ret void
-}
-
-define void @i35_ls(i35 zeroext %x) nounwind {
- store i35 %x, i35* @i35_s
- ret void
-}
-
-define void @i36_ls(i36 zeroext %x) nounwind {
- store i36 %x, i36* @i36_s
- ret void
-}
-
-define void @i37_ls(i37 zeroext %x) nounwind {
- store i37 %x, i37* @i37_s
- ret void
-}
-
-define void @i38_ls(i38 zeroext %x) nounwind {
- store i38 %x, i38* @i38_s
- ret void
-}
-
-define void @i39_ls(i39 zeroext %x) nounwind {
- store i39 %x, i39* @i39_s
- ret void
-}
-
-define void @i40_ls(i40 zeroext %x) nounwind {
- store i40 %x, i40* @i40_s
- ret void
-}
-
-define void @i41_ls(i41 zeroext %x) nounwind {
- store i41 %x, i41* @i41_s
- ret void
-}
-
-define void @i42_ls(i42 zeroext %x) nounwind {
- store i42 %x, i42* @i42_s
- ret void
-}
-
-define void @i43_ls(i43 zeroext %x) nounwind {
- store i43 %x, i43* @i43_s
- ret void
-}
-
-define void @i44_ls(i44 zeroext %x) nounwind {
- store i44 %x, i44* @i44_s
- ret void
-}
-
-define void @i45_ls(i45 zeroext %x) nounwind {
- store i45 %x, i45* @i45_s
- ret void
-}
-
-define void @i46_ls(i46 zeroext %x) nounwind {
- store i46 %x, i46* @i46_s
- ret void
-}
-
-define void @i47_ls(i47 zeroext %x) nounwind {
- store i47 %x, i47* @i47_s
- ret void
-}
-
-define void @i48_ls(i48 zeroext %x) nounwind {
- store i48 %x, i48* @i48_s
- ret void
-}
-
-define void @i49_ls(i49 zeroext %x) nounwind {
- store i49 %x, i49* @i49_s
- ret void
-}
-
-define void @i50_ls(i50 zeroext %x) nounwind {
- store i50 %x, i50* @i50_s
- ret void
-}
-
-define void @i51_ls(i51 zeroext %x) nounwind {
- store i51 %x, i51* @i51_s
- ret void
-}
-
-define void @i52_ls(i52 zeroext %x) nounwind {
- store i52 %x, i52* @i52_s
- ret void
-}
-
-define void @i53_ls(i53 zeroext %x) nounwind {
- store i53 %x, i53* @i53_s
- ret void
-}
-
-define void @i54_ls(i54 zeroext %x) nounwind {
- store i54 %x, i54* @i54_s
- ret void
-}
-
-define void @i55_ls(i55 zeroext %x) nounwind {
- store i55 %x, i55* @i55_s
- ret void
-}
-
-define void @i56_ls(i56 zeroext %x) nounwind {
- store i56 %x, i56* @i56_s
- ret void
-}
-
-define void @i57_ls(i57 zeroext %x) nounwind {
- store i57 %x, i57* @i57_s
- ret void
-}
-
-define void @i58_ls(i58 zeroext %x) nounwind {
- store i58 %x, i58* @i58_s
- ret void
-}
-
-define void @i59_ls(i59 zeroext %x) nounwind {
- store i59 %x, i59* @i59_s
- ret void
-}
-
-define void @i60_ls(i60 zeroext %x) nounwind {
- store i60 %x, i60* @i60_s
- ret void
-}
-
-define void @i61_ls(i61 zeroext %x) nounwind {
- store i61 %x, i61* @i61_s
- ret void
-}
-
-define void @i62_ls(i62 zeroext %x) nounwind {
- store i62 %x, i62* @i62_s
- ret void
-}
-
-define void @i63_ls(i63 zeroext %x) nounwind {
- store i63 %x, i63* @i63_s
- ret void
-}
-
-define void @i64_ls(i64 zeroext %x) nounwind {
- store i64 %x, i64* @i64_s
- ret void
-}
-
-define void @i65_ls(i65 zeroext %x) nounwind {
- store i65 %x, i65* @i65_s
- ret void
-}
-
-define void @i66_ls(i66 zeroext %x) nounwind {
- store i66 %x, i66* @i66_s
- ret void
-}
-
-define void @i67_ls(i67 zeroext %x) nounwind {
- store i67 %x, i67* @i67_s
- ret void
-}
-
-define void @i68_ls(i68 zeroext %x) nounwind {
- store i68 %x, i68* @i68_s
- ret void
-}
-
-define void @i69_ls(i69 zeroext %x) nounwind {
- store i69 %x, i69* @i69_s
- ret void
-}
-
-define void @i70_ls(i70 zeroext %x) nounwind {
- store i70 %x, i70* @i70_s
- ret void
-}
-
-define void @i71_ls(i71 zeroext %x) nounwind {
- store i71 %x, i71* @i71_s
- ret void
-}
-
-define void @i72_ls(i72 zeroext %x) nounwind {
- store i72 %x, i72* @i72_s
- ret void
-}
-
-define void @i73_ls(i73 zeroext %x) nounwind {
- store i73 %x, i73* @i73_s
- ret void
-}
-
-define void @i74_ls(i74 zeroext %x) nounwind {
- store i74 %x, i74* @i74_s
- ret void
-}
-
-define void @i75_ls(i75 zeroext %x) nounwind {
- store i75 %x, i75* @i75_s
- ret void
-}
-
-define void @i76_ls(i76 zeroext %x) nounwind {
- store i76 %x, i76* @i76_s
- ret void
-}
-
-define void @i77_ls(i77 zeroext %x) nounwind {
- store i77 %x, i77* @i77_s
- ret void
-}
-
-define void @i78_ls(i78 zeroext %x) nounwind {
- store i78 %x, i78* @i78_s
- ret void
-}
-
-define void @i79_ls(i79 zeroext %x) nounwind {
- store i79 %x, i79* @i79_s
- ret void
-}
-
-define void @i80_ls(i80 zeroext %x) nounwind {
- store i80 %x, i80* @i80_s
- ret void
-}
-
-define void @i81_ls(i81 zeroext %x) nounwind {
- store i81 %x, i81* @i81_s
- ret void
-}
-
-define void @i82_ls(i82 zeroext %x) nounwind {
- store i82 %x, i82* @i82_s
- ret void
-}
-
-define void @i83_ls(i83 zeroext %x) nounwind {
- store i83 %x, i83* @i83_s
- ret void
-}
-
-define void @i84_ls(i84 zeroext %x) nounwind {
- store i84 %x, i84* @i84_s
- ret void
-}
-
-define void @i85_ls(i85 zeroext %x) nounwind {
- store i85 %x, i85* @i85_s
- ret void
-}
-
-define void @i86_ls(i86 zeroext %x) nounwind {
- store i86 %x, i86* @i86_s
- ret void
-}
-
-define void @i87_ls(i87 zeroext %x) nounwind {
- store i87 %x, i87* @i87_s
- ret void
-}
-
-define void @i88_ls(i88 zeroext %x) nounwind {
- store i88 %x, i88* @i88_s
- ret void
-}
-
-define void @i89_ls(i89 zeroext %x) nounwind {
- store i89 %x, i89* @i89_s
- ret void
-}
-
-define void @i90_ls(i90 zeroext %x) nounwind {
- store i90 %x, i90* @i90_s
- ret void
-}
-
-define void @i91_ls(i91 zeroext %x) nounwind {
- store i91 %x, i91* @i91_s
- ret void
-}
-
-define void @i92_ls(i92 zeroext %x) nounwind {
- store i92 %x, i92* @i92_s
- ret void
-}
-
-define void @i93_ls(i93 zeroext %x) nounwind {
- store i93 %x, i93* @i93_s
- ret void
-}
-
-define void @i94_ls(i94 zeroext %x) nounwind {
- store i94 %x, i94* @i94_s
- ret void
-}
-
-define void @i95_ls(i95 zeroext %x) nounwind {
- store i95 %x, i95* @i95_s
- ret void
-}
-
-define void @i96_ls(i96 zeroext %x) nounwind {
- store i96 %x, i96* @i96_s
- ret void
-}
-
-define void @i97_ls(i97 zeroext %x) nounwind {
- store i97 %x, i97* @i97_s
- ret void
-}
-
-define void @i98_ls(i98 zeroext %x) nounwind {
- store i98 %x, i98* @i98_s
- ret void
-}
-
-define void @i99_ls(i99 zeroext %x) nounwind {
- store i99 %x, i99* @i99_s
- ret void
-}
-
-define void @i100_ls(i100 zeroext %x) nounwind {
- store i100 %x, i100* @i100_s
- ret void
-}
-
-define void @i101_ls(i101 zeroext %x) nounwind {
- store i101 %x, i101* @i101_s
- ret void
-}
-
-define void @i102_ls(i102 zeroext %x) nounwind {
- store i102 %x, i102* @i102_s
- ret void
-}
-
-define void @i103_ls(i103 zeroext %x) nounwind {
- store i103 %x, i103* @i103_s
- ret void
-}
-
-define void @i104_ls(i104 zeroext %x) nounwind {
- store i104 %x, i104* @i104_s
- ret void
-}
-
-define void @i105_ls(i105 zeroext %x) nounwind {
- store i105 %x, i105* @i105_s
- ret void
-}
-
-define void @i106_ls(i106 zeroext %x) nounwind {
- store i106 %x, i106* @i106_s
- ret void
-}
-
-define void @i107_ls(i107 zeroext %x) nounwind {
- store i107 %x, i107* @i107_s
- ret void
-}
-
-define void @i108_ls(i108 zeroext %x) nounwind {
- store i108 %x, i108* @i108_s
- ret void
-}
-
-define void @i109_ls(i109 zeroext %x) nounwind {
- store i109 %x, i109* @i109_s
- ret void
-}
-
-define void @i110_ls(i110 zeroext %x) nounwind {
- store i110 %x, i110* @i110_s
- ret void
-}
-
-define void @i111_ls(i111 zeroext %x) nounwind {
- store i111 %x, i111* @i111_s
- ret void
-}
-
-define void @i112_ls(i112 zeroext %x) nounwind {
- store i112 %x, i112* @i112_s
- ret void
-}
-
-define void @i113_ls(i113 zeroext %x) nounwind {
- store i113 %x, i113* @i113_s
- ret void
-}
-
-define void @i114_ls(i114 zeroext %x) nounwind {
- store i114 %x, i114* @i114_s
- ret void
-}
-
-define void @i115_ls(i115 zeroext %x) nounwind {
- store i115 %x, i115* @i115_s
- ret void
-}
-
-define void @i116_ls(i116 zeroext %x) nounwind {
- store i116 %x, i116* @i116_s
- ret void
-}
-
-define void @i117_ls(i117 zeroext %x) nounwind {
- store i117 %x, i117* @i117_s
- ret void
-}
-
-define void @i118_ls(i118 zeroext %x) nounwind {
- store i118 %x, i118* @i118_s
- ret void
-}
-
-define void @i119_ls(i119 zeroext %x) nounwind {
- store i119 %x, i119* @i119_s
- ret void
-}
-
-define void @i120_ls(i120 zeroext %x) nounwind {
- store i120 %x, i120* @i120_s
- ret void
-}
-
-define void @i121_ls(i121 zeroext %x) nounwind {
- store i121 %x, i121* @i121_s
- ret void
-}
-
-define void @i122_ls(i122 zeroext %x) nounwind {
- store i122 %x, i122* @i122_s
- ret void
-}
-
-define void @i123_ls(i123 zeroext %x) nounwind {
- store i123 %x, i123* @i123_s
- ret void
-}
-
-define void @i124_ls(i124 zeroext %x) nounwind {
- store i124 %x, i124* @i124_s
- ret void
-}
-
-define void @i125_ls(i125 zeroext %x) nounwind {
- store i125 %x, i125* @i125_s
- ret void
-}
-
-define void @i126_ls(i126 zeroext %x) nounwind {
- store i126 %x, i126* @i126_s
- ret void
-}
-
-define void @i127_ls(i127 zeroext %x) nounwind {
- store i127 %x, i127* @i127_s
- ret void
-}
-
-define void @i128_ls(i128 zeroext %x) nounwind {
- store i128 %x, i128* @i128_s
- ret void
-}
-
-define void @i129_ls(i129 zeroext %x) nounwind {
- store i129 %x, i129* @i129_s
- ret void
-}
-
-define void @i130_ls(i130 zeroext %x) nounwind {
- store i130 %x, i130* @i130_s
- ret void
-}
-
-define void @i131_ls(i131 zeroext %x) nounwind {
- store i131 %x, i131* @i131_s
- ret void
-}
-
-define void @i132_ls(i132 zeroext %x) nounwind {
- store i132 %x, i132* @i132_s
- ret void
-}
-
-define void @i133_ls(i133 zeroext %x) nounwind {
- store i133 %x, i133* @i133_s
- ret void
-}
-
-define void @i134_ls(i134 zeroext %x) nounwind {
- store i134 %x, i134* @i134_s
- ret void
-}
-
-define void @i135_ls(i135 zeroext %x) nounwind {
- store i135 %x, i135* @i135_s
- ret void
-}
-
-define void @i136_ls(i136 zeroext %x) nounwind {
- store i136 %x, i136* @i136_s
- ret void
-}
-
-define void @i137_ls(i137 zeroext %x) nounwind {
- store i137 %x, i137* @i137_s
- ret void
-}
-
-define void @i138_ls(i138 zeroext %x) nounwind {
- store i138 %x, i138* @i138_s
- ret void
-}
-
-define void @i139_ls(i139 zeroext %x) nounwind {
- store i139 %x, i139* @i139_s
- ret void
-}
-
-define void @i140_ls(i140 zeroext %x) nounwind {
- store i140 %x, i140* @i140_s
- ret void
-}
-
-define void @i141_ls(i141 zeroext %x) nounwind {
- store i141 %x, i141* @i141_s
- ret void
-}
-
-define void @i142_ls(i142 zeroext %x) nounwind {
- store i142 %x, i142* @i142_s
- ret void
-}
-
-define void @i143_ls(i143 zeroext %x) nounwind {
- store i143 %x, i143* @i143_s
- ret void
-}
-
-define void @i144_ls(i144 zeroext %x) nounwind {
- store i144 %x, i144* @i144_s
- ret void
-}
-
-define void @i145_ls(i145 zeroext %x) nounwind {
- store i145 %x, i145* @i145_s
- ret void
-}
-
-define void @i146_ls(i146 zeroext %x) nounwind {
- store i146 %x, i146* @i146_s
- ret void
-}
-
-define void @i147_ls(i147 zeroext %x) nounwind {
- store i147 %x, i147* @i147_s
- ret void
-}
-
-define void @i148_ls(i148 zeroext %x) nounwind {
- store i148 %x, i148* @i148_s
- ret void
-}
-
-define void @i149_ls(i149 zeroext %x) nounwind {
- store i149 %x, i149* @i149_s
- ret void
-}
-
-define void @i150_ls(i150 zeroext %x) nounwind {
- store i150 %x, i150* @i150_s
- ret void
-}
-
-define void @i151_ls(i151 zeroext %x) nounwind {
- store i151 %x, i151* @i151_s
- ret void
-}
-
-define void @i152_ls(i152 zeroext %x) nounwind {
- store i152 %x, i152* @i152_s
- ret void
-}
-
-define void @i153_ls(i153 zeroext %x) nounwind {
- store i153 %x, i153* @i153_s
- ret void
-}
-
-define void @i154_ls(i154 zeroext %x) nounwind {
- store i154 %x, i154* @i154_s
- ret void
-}
-
-define void @i155_ls(i155 zeroext %x) nounwind {
- store i155 %x, i155* @i155_s
- ret void
-}
-
-define void @i156_ls(i156 zeroext %x) nounwind {
- store i156 %x, i156* @i156_s
- ret void
-}
-
-define void @i157_ls(i157 zeroext %x) nounwind {
- store i157 %x, i157* @i157_s
- ret void
-}
-
-define void @i158_ls(i158 zeroext %x) nounwind {
- store i158 %x, i158* @i158_s
- ret void
-}
-
-define void @i159_ls(i159 zeroext %x) nounwind {
- store i159 %x, i159* @i159_s
- ret void
-}
-
-define void @i160_ls(i160 zeroext %x) nounwind {
- store i160 %x, i160* @i160_s
- ret void
-}
-
-define void @i161_ls(i161 zeroext %x) nounwind {
- store i161 %x, i161* @i161_s
- ret void
-}
-
-define void @i162_ls(i162 zeroext %x) nounwind {
- store i162 %x, i162* @i162_s
- ret void
-}
-
-define void @i163_ls(i163 zeroext %x) nounwind {
- store i163 %x, i163* @i163_s
- ret void
-}
-
-define void @i164_ls(i164 zeroext %x) nounwind {
- store i164 %x, i164* @i164_s
- ret void
-}
-
-define void @i165_ls(i165 zeroext %x) nounwind {
- store i165 %x, i165* @i165_s
- ret void
-}
-
-define void @i166_ls(i166 zeroext %x) nounwind {
- store i166 %x, i166* @i166_s
- ret void
-}
-
-define void @i167_ls(i167 zeroext %x) nounwind {
- store i167 %x, i167* @i167_s
- ret void
-}
-
-define void @i168_ls(i168 zeroext %x) nounwind {
- store i168 %x, i168* @i168_s
- ret void
-}
-
-define void @i169_ls(i169 zeroext %x) nounwind {
- store i169 %x, i169* @i169_s
- ret void
-}
-
-define void @i170_ls(i170 zeroext %x) nounwind {
- store i170 %x, i170* @i170_s
- ret void
-}
-
-define void @i171_ls(i171 zeroext %x) nounwind {
- store i171 %x, i171* @i171_s
- ret void
-}
-
-define void @i172_ls(i172 zeroext %x) nounwind {
- store i172 %x, i172* @i172_s
- ret void
-}
-
-define void @i173_ls(i173 zeroext %x) nounwind {
- store i173 %x, i173* @i173_s
- ret void
-}
-
-define void @i174_ls(i174 zeroext %x) nounwind {
- store i174 %x, i174* @i174_s
- ret void
-}
-
-define void @i175_ls(i175 zeroext %x) nounwind {
- store i175 %x, i175* @i175_s
- ret void
-}
-
-define void @i176_ls(i176 zeroext %x) nounwind {
- store i176 %x, i176* @i176_s
- ret void
-}
-
-define void @i177_ls(i177 zeroext %x) nounwind {
- store i177 %x, i177* @i177_s
- ret void
-}
-
-define void @i178_ls(i178 zeroext %x) nounwind {
- store i178 %x, i178* @i178_s
- ret void
-}
-
-define void @i179_ls(i179 zeroext %x) nounwind {
- store i179 %x, i179* @i179_s
- ret void
-}
-
-define void @i180_ls(i180 zeroext %x) nounwind {
- store i180 %x, i180* @i180_s
- ret void
-}
-
-define void @i181_ls(i181 zeroext %x) nounwind {
- store i181 %x, i181* @i181_s
- ret void
-}
-
-define void @i182_ls(i182 zeroext %x) nounwind {
- store i182 %x, i182* @i182_s
- ret void
-}
-
-define void @i183_ls(i183 zeroext %x) nounwind {
- store i183 %x, i183* @i183_s
- ret void
-}
-
-define void @i184_ls(i184 zeroext %x) nounwind {
- store i184 %x, i184* @i184_s
- ret void
-}
-
-define void @i185_ls(i185 zeroext %x) nounwind {
- store i185 %x, i185* @i185_s
- ret void
-}
-
-define void @i186_ls(i186 zeroext %x) nounwind {
- store i186 %x, i186* @i186_s
- ret void
-}
-
-define void @i187_ls(i187 zeroext %x) nounwind {
- store i187 %x, i187* @i187_s
- ret void
-}
-
-define void @i188_ls(i188 zeroext %x) nounwind {
- store i188 %x, i188* @i188_s
- ret void
-}
-
-define void @i189_ls(i189 zeroext %x) nounwind {
- store i189 %x, i189* @i189_s
- ret void
-}
-
-define void @i190_ls(i190 zeroext %x) nounwind {
- store i190 %x, i190* @i190_s
- ret void
-}
-
-define void @i191_ls(i191 zeroext %x) nounwind {
- store i191 %x, i191* @i191_s
- ret void
-}
-
-define void @i192_ls(i192 zeroext %x) nounwind {
- store i192 %x, i192* @i192_s
- ret void
-}
-
-define void @i193_ls(i193 zeroext %x) nounwind {
- store i193 %x, i193* @i193_s
- ret void
-}
-
-define void @i194_ls(i194 zeroext %x) nounwind {
- store i194 %x, i194* @i194_s
- ret void
-}
-
-define void @i195_ls(i195 zeroext %x) nounwind {
- store i195 %x, i195* @i195_s
- ret void
-}
-
-define void @i196_ls(i196 zeroext %x) nounwind {
- store i196 %x, i196* @i196_s
- ret void
-}
-
-define void @i197_ls(i197 zeroext %x) nounwind {
- store i197 %x, i197* @i197_s
- ret void
-}
-
-define void @i198_ls(i198 zeroext %x) nounwind {
- store i198 %x, i198* @i198_s
- ret void
-}
-
-define void @i199_ls(i199 zeroext %x) nounwind {
- store i199 %x, i199* @i199_s
- ret void
-}
-
-define void @i200_ls(i200 zeroext %x) nounwind {
- store i200 %x, i200* @i200_s
- ret void
-}
-
-define void @i201_ls(i201 zeroext %x) nounwind {
- store i201 %x, i201* @i201_s
- ret void
-}
-
-define void @i202_ls(i202 zeroext %x) nounwind {
- store i202 %x, i202* @i202_s
- ret void
-}
-
-define void @i203_ls(i203 zeroext %x) nounwind {
- store i203 %x, i203* @i203_s
- ret void
-}
-
-define void @i204_ls(i204 zeroext %x) nounwind {
- store i204 %x, i204* @i204_s
- ret void
-}
-
-define void @i205_ls(i205 zeroext %x) nounwind {
- store i205 %x, i205* @i205_s
- ret void
-}
-
-define void @i206_ls(i206 zeroext %x) nounwind {
- store i206 %x, i206* @i206_s
- ret void
-}
-
-define void @i207_ls(i207 zeroext %x) nounwind {
- store i207 %x, i207* @i207_s
- ret void
-}
-
-define void @i208_ls(i208 zeroext %x) nounwind {
- store i208 %x, i208* @i208_s
- ret void
-}
-
-define void @i209_ls(i209 zeroext %x) nounwind {
- store i209 %x, i209* @i209_s
- ret void
-}
-
-define void @i210_ls(i210 zeroext %x) nounwind {
- store i210 %x, i210* @i210_s
- ret void
-}
-
-define void @i211_ls(i211 zeroext %x) nounwind {
- store i211 %x, i211* @i211_s
- ret void
-}
-
-define void @i212_ls(i212 zeroext %x) nounwind {
- store i212 %x, i212* @i212_s
- ret void
-}
-
-define void @i213_ls(i213 zeroext %x) nounwind {
- store i213 %x, i213* @i213_s
- ret void
-}
-
-define void @i214_ls(i214 zeroext %x) nounwind {
- store i214 %x, i214* @i214_s
- ret void
-}
-
-define void @i215_ls(i215 zeroext %x) nounwind {
- store i215 %x, i215* @i215_s
- ret void
-}
-
-define void @i216_ls(i216 zeroext %x) nounwind {
- store i216 %x, i216* @i216_s
- ret void
-}
-
-define void @i217_ls(i217 zeroext %x) nounwind {
- store i217 %x, i217* @i217_s
- ret void
-}
-
-define void @i218_ls(i218 zeroext %x) nounwind {
- store i218 %x, i218* @i218_s
- ret void
-}
-
-define void @i219_ls(i219 zeroext %x) nounwind {
- store i219 %x, i219* @i219_s
- ret void
-}
-
-define void @i220_ls(i220 zeroext %x) nounwind {
- store i220 %x, i220* @i220_s
- ret void
-}
-
-define void @i221_ls(i221 zeroext %x) nounwind {
- store i221 %x, i221* @i221_s
- ret void
-}
-
-define void @i222_ls(i222 zeroext %x) nounwind {
- store i222 %x, i222* @i222_s
- ret void
-}
-
-define void @i223_ls(i223 zeroext %x) nounwind {
- store i223 %x, i223* @i223_s
- ret void
-}
-
-define void @i224_ls(i224 zeroext %x) nounwind {
- store i224 %x, i224* @i224_s
- ret void
-}
-
-define void @i225_ls(i225 zeroext %x) nounwind {
- store i225 %x, i225* @i225_s
- ret void
-}
-
-define void @i226_ls(i226 zeroext %x) nounwind {
- store i226 %x, i226* @i226_s
- ret void
-}
-
-define void @i227_ls(i227 zeroext %x) nounwind {
- store i227 %x, i227* @i227_s
- ret void
-}
-
-define void @i228_ls(i228 zeroext %x) nounwind {
- store i228 %x, i228* @i228_s
- ret void
-}
-
-define void @i229_ls(i229 zeroext %x) nounwind {
- store i229 %x, i229* @i229_s
- ret void
-}
-
-define void @i230_ls(i230 zeroext %x) nounwind {
- store i230 %x, i230* @i230_s
- ret void
-}
-
-define void @i231_ls(i231 zeroext %x) nounwind {
- store i231 %x, i231* @i231_s
- ret void
-}
-
-define void @i232_ls(i232 zeroext %x) nounwind {
- store i232 %x, i232* @i232_s
- ret void
-}
-
-define void @i233_ls(i233 zeroext %x) nounwind {
- store i233 %x, i233* @i233_s
- ret void
-}
-
-define void @i234_ls(i234 zeroext %x) nounwind {
- store i234 %x, i234* @i234_s
- ret void
-}
-
-define void @i235_ls(i235 zeroext %x) nounwind {
- store i235 %x, i235* @i235_s
- ret void
-}
-
-define void @i236_ls(i236 zeroext %x) nounwind {
- store i236 %x, i236* @i236_s
- ret void
-}
-
-define void @i237_ls(i237 zeroext %x) nounwind {
- store i237 %x, i237* @i237_s
- ret void
-}
-
-define void @i238_ls(i238 zeroext %x) nounwind {
- store i238 %x, i238* @i238_s
- ret void
-}
-
-define void @i239_ls(i239 zeroext %x) nounwind {
- store i239 %x, i239* @i239_s
- ret void
-}
-
-define void @i240_ls(i240 zeroext %x) nounwind {
- store i240 %x, i240* @i240_s
- ret void
-}
-
-define void @i241_ls(i241 zeroext %x) nounwind {
- store i241 %x, i241* @i241_s
- ret void
-}
-
-define void @i242_ls(i242 zeroext %x) nounwind {
- store i242 %x, i242* @i242_s
- ret void
-}
-
-define void @i243_ls(i243 zeroext %x) nounwind {
- store i243 %x, i243* @i243_s
- ret void
-}
-
-define void @i244_ls(i244 zeroext %x) nounwind {
- store i244 %x, i244* @i244_s
- ret void
-}
-
-define void @i245_ls(i245 zeroext %x) nounwind {
- store i245 %x, i245* @i245_s
- ret void
-}
-
-define void @i246_ls(i246 zeroext %x) nounwind {
- store i246 %x, i246* @i246_s
- ret void
-}
-
-define void @i247_ls(i247 zeroext %x) nounwind {
- store i247 %x, i247* @i247_s
- ret void
-}
-
-define void @i248_ls(i248 zeroext %x) nounwind {
- store i248 %x, i248* @i248_s
- ret void
-}
-
-define void @i249_ls(i249 zeroext %x) nounwind {
- store i249 %x, i249* @i249_s
- ret void
-}
-
-define void @i250_ls(i250 zeroext %x) nounwind {
- store i250 %x, i250* @i250_s
- ret void
-}
-
-define void @i251_ls(i251 zeroext %x) nounwind {
- store i251 %x, i251* @i251_s
- ret void
-}
-
-define void @i252_ls(i252 zeroext %x) nounwind {
- store i252 %x, i252* @i252_s
- ret void
-}
-
-define void @i253_ls(i253 zeroext %x) nounwind {
- store i253 %x, i253* @i253_s
- ret void
-}
-
-define void @i254_ls(i254 zeroext %x) nounwind {
- store i254 %x, i254* @i254_s
- ret void
-}
-
-define void @i255_ls(i255 zeroext %x) nounwind {
- store i255 %x, i255* @i255_s
- ret void
-}
-
-define void @i256_ls(i256 zeroext %x) nounwind {
- store i256 %x, i256* @i256_s
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/BasicInstrs.ll b/libclamav/c++/llvm/test/CodeGen/Generic/BasicInstrs.ll
deleted file mode 100644
index 578431e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/BasicInstrs.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; New testcase, this contains a bunch of simple instructions that should be
-; handled by a code generator.
-
-; RUN: llc < %s
-
-define i32 @add(i32 %A, i32 %B) {
- %R = add i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @sub(i32 %A, i32 %B) {
- %R = sub i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @mul(i32 %A, i32 %B) {
- %R = mul i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @sdiv(i32 %A, i32 %B) {
- %R = sdiv i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @udiv(i32 %A, i32 %B) {
- %R = udiv i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @srem(i32 %A, i32 %B) {
- %R = srem i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @urem(i32 %A, i32 %B) {
- %R = urem i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @and(i32 %A, i32 %B) {
- %R = and i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @or(i32 %A, i32 %B) {
- %R = or i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @xor(i32 %A, i32 %B) {
- %R = xor i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/BurgBadRegAlloc.ll b/libclamav/c++/llvm/test/CodeGen/Generic/BurgBadRegAlloc.ll
deleted file mode 100644
index 99d856a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/BurgBadRegAlloc.ll
+++ /dev/null
@@ -1,829 +0,0 @@
-; RUN: llc < %s
-
-;; Register allocation is doing a very poor job on this routine from yyparse
-;; in Burg:
-;; -- at least two long-lived values are being allocated to %o? registers
-;; -- even worse, those registers are being saved and restored repeatedly
-;; at function calls, even though there are no intervening uses.
-;; -- outgoing args of some function calls have to be swapped, causing
-;; another write/read from stack to do the exchange (use -dregalloc=y).
-;;
-%Arity = type %struct.arity*
- %Binding = type %struct.binding*
- %DeltaCost = type [4 x i16]
- %Dimension = type %struct.dimension*
- %Index_Map = type { i32, %Item_Set* }
- %IntList = type %struct.intlist*
- %Item = type { %DeltaCost, %Rule }
- %ItemArray = type %Item*
- %Item_Set = type %struct.item_set*
- %List = type %struct.list*
- %Mapping = type %struct.mapping*
- %NonTerminal = type %struct.nonterminal*
- %Operator = type %struct.operator*
- %Pattern = type %struct.pattern*
- %PatternAST = type %struct.patternAST*
- %Plank = type %struct.plank*
- %PlankMap = type %struct.plankMap*
- %ReadFn = type i32 ()*
- %Rule = type %struct.rule*
- %RuleAST = type %struct.ruleAST*
- %StateMap = type %struct.stateMap*
- %StrTableElement = type %struct.strTableElement*
- %Symbol = type %struct.symbol*
- %Table = type %struct.table*
- %YYSTYPE = type { %IntList }
- %struct.arity = type { i32, %List }
- %struct.binding = type { i8*, i32 }
- %struct.dimension = type { i16*, %Index_Map, %Mapping, i32, %PlankMap }
- %struct.index_map = type { i32, %Item_Set* }
- %struct.intlist = type { i32, %IntList }
- %struct.item = type { %DeltaCost, %Rule }
- %struct.item_set = type { i32, i32, %Operator, [2 x %Item_Set], %Item_Set, i16*, %ItemArray, %ItemArray }
- %struct.list = type { i8*, %List }
- %struct.mapping = type { %List*, i32, i32, i32, %Item_Set* }
- %struct.nonterminal = type { i8*, i32, i32, i32, %PlankMap, %Rule }
- %struct.operator = type { i8*, i32, i32, i32, i32, i32, %Table }
- %struct.pattern = type { %NonTerminal, %Operator, [2 x %NonTerminal] }
- %struct.patternAST = type { %Symbol, i8*, %List }
- %struct.plank = type { i8*, %List, i32 }
- %struct.plankMap = type { %List, i32, %StateMap }
- %struct.rule = type { %DeltaCost, i32, i32, i32, %NonTerminal, %Pattern, i32 }
- %struct.ruleAST = type { i8*, %PatternAST, i32, %IntList, %Rule, %StrTableElement, %StrTableElement }
- %struct.stateMap = type { i8*, %Plank, i32, i16* }
- %struct.strTableElement = type { i8*, %IntList, i8* }
- %struct.symbol = type { i8*, i32, { %Operator } }
- %struct.table = type { %Operator, %List, i16*, [2 x %Dimension], %Item_Set* }
- at yylval = external global %YYSTYPE ; <%YYSTYPE*> [#uses=1]
- at yylhs = external global [25 x i16] ; <[25 x i16]*> [#uses=1]
- at yylen = external global [25 x i16] ; <[25 x i16]*> [#uses=1]
- at yydefred = external global [43 x i16] ; <[43 x i16]*> [#uses=1]
- at yydgoto = external global [12 x i16] ; <[12 x i16]*> [#uses=1]
- at yysindex = external global [43 x i16] ; <[43 x i16]*> [#uses=2]
- at yyrindex = external global [43 x i16] ; <[43 x i16]*> [#uses=1]
- at yygindex = external global [12 x i16] ; <[12 x i16]*> [#uses=1]
- at yytable = external global [263 x i16] ; <[263 x i16]*> [#uses=4]
- at yycheck = external global [263 x i16] ; <[263 x i16]*> [#uses=4]
- at yynerrs = external global i32 ; <i32*> [#uses=3]
- at yyerrflag = external global i32 ; <i32*> [#uses=6]
- at yychar = external global i32 ; <i32*> [#uses=15]
- at yyssp = external global i16* ; <i16**> [#uses=15]
- at yyvsp = external global %YYSTYPE* ; <%YYSTYPE**> [#uses=30]
- at yyval = external global %YYSTYPE ; <%YYSTYPE*> [#uses=1]
- at yyss = external global i16* ; <i16**> [#uses=3]
- at yysslim = external global i16* ; <i16**> [#uses=3]
- at yyvs = external global %YYSTYPE* ; <%YYSTYPE**> [#uses=1]
- at .LC01 = external global [13 x i8] ; <[13 x i8]*> [#uses=1]
- at .LC1 = external global [20 x i8] ; <[20 x i8]*> [#uses=1]
-
-define i32 @yyparse() {
-bb0:
- store i32 0, i32* @yynerrs
- store i32 0, i32* @yyerrflag
- store i32 -1, i32* @yychar
- %reg113 = load i16** @yyss ; <i16*> [#uses=1]
- %cond581 = icmp ne i16* %reg113, null ; <i1> [#uses=1]
- br i1 %cond581, label %bb3, label %bb2
-
-bb2: ; preds = %bb0
- %reg584 = call i32 @yygrowstack( ) ; <i32> [#uses=1]
- %cond584 = icmp ne i32 %reg584, 0 ; <i1> [#uses=1]
- br i1 %cond584, label %bb113, label %bb3
-
-bb3: ; preds = %bb2, %bb0
- %reg115 = load i16** @yyss ; <i16*> [#uses=1]
- store i16* %reg115, i16** @yyssp
- %reg116 = load %YYSTYPE** @yyvs ; <%YYSTYPE*> [#uses=1]
- store %YYSTYPE* %reg116, %YYSTYPE** @yyvsp
- %reg117 = load i16** @yyssp ; <i16*> [#uses=1]
- store i16 0, i16* %reg117
- br label %bb4
-
-bb4: ; preds = %bb112, %bb102, %bb35, %bb31, %bb15, %bb14, %bb3
- %reg458 = phi i32 [ %reg476, %bb112 ], [ 1, %bb102 ], [ %reg458, %bb35 ], [ %cast768, %bb31 ], [ %cast658, %bb15 ], [ %cast658, %bb14 ], [ 0, %bb3 ] ; <i32> [#uses=2]
- %reg458-idxcast = zext i32 %reg458 to i64 ; <i64> [#uses=3]
- %reg594 = getelementptr [43 x i16]* @yydefred, i64 0, i64 %reg458-idxcast ; <i16*> [#uses=1]
- %reg125 = load i16* %reg594 ; <i16> [#uses=1]
- %cast599 = sext i16 %reg125 to i32 ; <i32> [#uses=2]
- %cond600 = icmp ne i32 %cast599, 0 ; <i1> [#uses=1]
- br i1 %cond600, label %bb36, label %bb5
-
-bb5: ; preds = %bb4
- %reg127 = load i32* @yychar ; <i32> [#uses=1]
- %cond603 = icmp sge i32 %reg127, 0 ; <i1> [#uses=1]
- br i1 %cond603, label %bb8, label %bb6
-
-bb6: ; preds = %bb5
- %reg607 = call i32 @yylex( ) ; <i32> [#uses=1]
- store i32 %reg607, i32* @yychar
- %reg129 = load i32* @yychar ; <i32> [#uses=1]
- %cond609 = icmp sge i32 %reg129, 0 ; <i1> [#uses=1]
- br i1 %cond609, label %bb8, label %bb7
-
-bb7: ; preds = %bb6
- store i32 0, i32* @yychar
- br label %bb8
-
-bb8: ; preds = %bb7, %bb6, %bb5
- %reg615 = getelementptr [43 x i16]* @yysindex, i64 0, i64 %reg458-idxcast ; <i16*> [#uses=1]
- %reg137 = load i16* %reg615 ; <i16> [#uses=1]
- %cast620 = sext i16 %reg137 to i32 ; <i32> [#uses=2]
- %cond621 = icmp eq i32 %cast620, 0 ; <i1> [#uses=1]
- br i1 %cond621, label %bb16, label %bb9
-
-bb9: ; preds = %bb8
- %reg139 = load i32* @yychar ; <i32> [#uses=2]
- %reg460 = add i32 %cast620, %reg139 ; <i32> [#uses=3]
- %cond624 = icmp slt i32 %reg460, 0 ; <i1> [#uses=1]
- br i1 %cond624, label %bb16, label %bb10
-
-bb10: ; preds = %bb9
- %cond627 = icmp sgt i32 %reg460, 262 ; <i1> [#uses=1]
- br i1 %cond627, label %bb16, label %bb11
-
-bb11: ; preds = %bb10
- %reg460-idxcast = sext i32 %reg460 to i64 ; <i64> [#uses=2]
- %reg632 = getelementptr [263 x i16]* @yycheck, i64 0, i64 %reg460-idxcast ; <i16*> [#uses=1]
- %reg148 = load i16* %reg632 ; <i16> [#uses=1]
- %cast637 = sext i16 %reg148 to i32 ; <i32> [#uses=1]
- %cond639 = icmp ne i32 %cast637, %reg139 ; <i1> [#uses=1]
- br i1 %cond639, label %bb16, label %bb12
-
-bb12: ; preds = %bb11
- %reg150 = load i16** @yyssp ; <i16*> [#uses=1]
- %cast640 = bitcast i16* %reg150 to i8* ; <i8*> [#uses=1]
- %reg151 = load i16** @yysslim ; <i16*> [#uses=1]
- %cast641 = bitcast i16* %reg151 to i8* ; <i8*> [#uses=1]
- %cond642 = icmp ult i8* %cast640, %cast641 ; <i1> [#uses=1]
- br i1 %cond642, label %bb14, label %bb13
-
-bb13: ; preds = %bb12
- %reg644 = call i32 @yygrowstack( ) ; <i32> [#uses=1]
- %cond644 = icmp ne i32 %reg644, 0 ; <i1> [#uses=1]
- br i1 %cond644, label %bb113, label %bb14
-
-bb14: ; preds = %bb13, %bb12
- %reg153 = load i16** @yyssp ; <i16*> [#uses=1]
- %reg647 = getelementptr i16* %reg153, i64 1 ; <i16*> [#uses=2]
- store i16* %reg647, i16** @yyssp
- %reg653 = getelementptr [263 x i16]* @yytable, i64 0, i64 %reg460-idxcast ; <i16*> [#uses=1]
- %reg162 = load i16* %reg653 ; <i16> [#uses=2]
- %cast658 = sext i16 %reg162 to i32 ; <i32> [#uses=2]
- store i16 %reg162, i16* %reg647
- %reg164 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %reg661 = getelementptr %YYSTYPE* %reg164, i64 1 ; <%YYSTYPE*> [#uses=1]
- store %YYSTYPE* %reg661, %YYSTYPE** @yyvsp
- %reg167 = load %IntList* getelementptr (%YYSTYPE* @yylval, i64 0, i32 0) ; <%IntList> [#uses=1]
- %reg661.idx1 = getelementptr %YYSTYPE* %reg164, i64 1, i32 0 ; <%IntList*> [#uses=1]
- store %IntList %reg167, %IntList* %reg661.idx1
- store i32 -1, i32* @yychar
- %reg169 = load i32* @yyerrflag ; <i32> [#uses=2]
- %cond669 = icmp sle i32 %reg169, 0 ; <i1> [#uses=1]
- br i1 %cond669, label %bb4, label %bb15
-
-bb15: ; preds = %bb14
- %reg171 = add i32 %reg169, -1 ; <i32> [#uses=1]
- store i32 %reg171, i32* @yyerrflag
- br label %bb4
-
-bb16: ; preds = %bb11, %bb10, %bb9, %bb8
- %reg677 = getelementptr [43 x i16]* @yyrindex, i64 0, i64 %reg458-idxcast ; <i16*> [#uses=1]
- %reg178 = load i16* %reg677 ; <i16> [#uses=1]
- %cast682 = sext i16 %reg178 to i32 ; <i32> [#uses=2]
- %cond683 = icmp eq i32 %cast682, 0 ; <i1> [#uses=1]
- br i1 %cond683, label %bb21, label %bb17
-
-bb17: ; preds = %bb16
- %reg180 = load i32* @yychar ; <i32> [#uses=2]
- %reg463 = add i32 %cast682, %reg180 ; <i32> [#uses=3]
- %cond686 = icmp slt i32 %reg463, 0 ; <i1> [#uses=1]
- br i1 %cond686, label %bb21, label %bb18
-
-bb18: ; preds = %bb17
- %cond689 = icmp sgt i32 %reg463, 262 ; <i1> [#uses=1]
- br i1 %cond689, label %bb21, label %bb19
-
-bb19: ; preds = %bb18
- %reg463-idxcast = sext i32 %reg463 to i64 ; <i64> [#uses=2]
- %reg694 = getelementptr [263 x i16]* @yycheck, i64 0, i64 %reg463-idxcast ; <i16*> [#uses=1]
- %reg189 = load i16* %reg694 ; <i16> [#uses=1]
- %cast699 = sext i16 %reg189 to i32 ; <i32> [#uses=1]
- %cond701 = icmp ne i32 %cast699, %reg180 ; <i1> [#uses=1]
- br i1 %cond701, label %bb21, label %bb20
-
-bb20: ; preds = %bb19
- %reg704 = getelementptr [263 x i16]* @yytable, i64 0, i64 %reg463-idxcast ; <i16*> [#uses=1]
- %reg197 = load i16* %reg704 ; <i16> [#uses=1]
- %cast709 = sext i16 %reg197 to i32 ; <i32> [#uses=1]
- br label %bb36
-
-bb21: ; preds = %bb19, %bb18, %bb17, %bb16
- %reg198 = load i32* @yyerrflag ; <i32> [#uses=1]
- %cond711 = icmp ne i32 %reg198, 0 ; <i1> [#uses=1]
- br i1 %cond711, label %bb23, label %bb22
-
-bb22: ; preds = %bb21
- call void @yyerror( i8* getelementptr ([13 x i8]* @.LC01, i64 0, i64 0) )
- %reg200 = load i32* @yynerrs ; <i32> [#uses=1]
- %reg201 = add i32 %reg200, 1 ; <i32> [#uses=1]
- store i32 %reg201, i32* @yynerrs
- br label %bb23
-
-bb23: ; preds = %bb22, %bb21
- %reg202 = load i32* @yyerrflag ; <i32> [#uses=1]
- %cond719 = icmp sgt i32 %reg202, 2 ; <i1> [#uses=1]
- br i1 %cond719, label %bb34, label %bb24
-
-bb24: ; preds = %bb23
- store i32 3, i32* @yyerrflag
- %reg241 = load i16** @yyss ; <i16*> [#uses=1]
- %cast778 = bitcast i16* %reg241 to i8* ; <i8*> [#uses=1]
- br label %bb25
-
-bb25: ; preds = %bb33, %bb24
- %reg204 = load i16** @yyssp ; <i16*> [#uses=4]
- %reg206 = load i16* %reg204 ; <i16> [#uses=1]
- %reg206-idxcast = sext i16 %reg206 to i64 ; <i64> [#uses=1]
- %reg727 = getelementptr [43 x i16]* @yysindex, i64 0, i64 %reg206-idxcast ; <i16*> [#uses=1]
- %reg212 = load i16* %reg727 ; <i16> [#uses=2]
- %cast732 = sext i16 %reg212 to i32 ; <i32> [#uses=2]
- %cond733 = icmp eq i32 %cast732, 0 ; <i1> [#uses=1]
- br i1 %cond733, label %bb32, label %bb26
-
-bb26: ; preds = %bb25
- %reg466 = add i32 %cast732, 256 ; <i32> [#uses=2]
- %cond736 = icmp slt i32 %reg466, 0 ; <i1> [#uses=1]
- br i1 %cond736, label %bb32, label %bb27
-
-bb27: ; preds = %bb26
- %cond739 = icmp sgt i32 %reg466, 262 ; <i1> [#uses=1]
- br i1 %cond739, label %bb32, label %bb28
-
-bb28: ; preds = %bb27
- %reg212-idxcast = sext i16 %reg212 to i64 ; <i64> [#uses=1]
- %reg212-idxcast-offset = add i64 %reg212-idxcast, 256 ; <i64> [#uses=2]
- %reg744 = getelementptr [263 x i16]* @yycheck, i64 0, i64 %reg212-idxcast-offset ; <i16*> [#uses=1]
- %reg221 = load i16* %reg744 ; <i16> [#uses=1]
- %cond748 = icmp ne i16 %reg221, 256 ; <i1> [#uses=1]
- br i1 %cond748, label %bb32, label %bb29
-
-bb29: ; preds = %bb28
- %cast750 = bitcast i16* %reg204 to i8* ; <i8*> [#uses=1]
- %reg223 = load i16** @yysslim ; <i16*> [#uses=1]
- %cast751 = bitcast i16* %reg223 to i8* ; <i8*> [#uses=1]
- %cond752 = icmp ult i8* %cast750, %cast751 ; <i1> [#uses=1]
- br i1 %cond752, label %bb31, label %bb30
-
-bb30: ; preds = %bb29
- %reg754 = call i32 @yygrowstack( ) ; <i32> [#uses=1]
- %cond754 = icmp ne i32 %reg754, 0 ; <i1> [#uses=1]
- br i1 %cond754, label %bb113, label %bb31
-
-bb31: ; preds = %bb30, %bb29
- %reg225 = load i16** @yyssp ; <i16*> [#uses=1]
- %reg757 = getelementptr i16* %reg225, i64 1 ; <i16*> [#uses=2]
- store i16* %reg757, i16** @yyssp
- %reg763 = getelementptr [263 x i16]* @yytable, i64 0, i64 %reg212-idxcast-offset ; <i16*> [#uses=1]
- %reg234 = load i16* %reg763 ; <i16> [#uses=2]
- %cast768 = sext i16 %reg234 to i32 ; <i32> [#uses=1]
- store i16 %reg234, i16* %reg757
- %reg236 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %reg771 = getelementptr %YYSTYPE* %reg236, i64 1 ; <%YYSTYPE*> [#uses=1]
- store %YYSTYPE* %reg771, %YYSTYPE** @yyvsp
- %reg239 = load %IntList* getelementptr (%YYSTYPE* @yylval, i64 0, i32 0) ; <%IntList> [#uses=1]
- %reg771.idx1 = getelementptr %YYSTYPE* %reg236, i64 1, i32 0 ; <%IntList*> [#uses=1]
- store %IntList %reg239, %IntList* %reg771.idx1
- br label %bb4
-
-bb32: ; preds = %bb28, %bb27, %bb26, %bb25
- %cast777 = bitcast i16* %reg204 to i8* ; <i8*> [#uses=1]
- %cond779 = icmp ule i8* %cast777, %cast778 ; <i1> [#uses=1]
- br i1 %cond779, label %UnifiedExitNode, label %bb33
-
-bb33: ; preds = %bb32
- %reg781 = getelementptr i16* %reg204, i64 -1 ; <i16*> [#uses=1]
- store i16* %reg781, i16** @yyssp
- %reg244 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=1]
- %reg786 = getelementptr %YYSTYPE* %reg244, i64 -1 ; <%YYSTYPE*> [#uses=1]
- store %YYSTYPE* %reg786, %YYSTYPE** @yyvsp
- br label %bb25
-
-bb34: ; preds = %bb23
- %reg246 = load i32* @yychar ; <i32> [#uses=1]
- %cond791 = icmp eq i32 %reg246, 0 ; <i1> [#uses=1]
- br i1 %cond791, label %UnifiedExitNode, label %bb35
-
-bb35: ; preds = %bb34
- store i32 -1, i32* @yychar
- br label %bb4
-
-bb36: ; preds = %bb20, %bb4
- %reg468 = phi i32 [ %cast709, %bb20 ], [ %cast599, %bb4 ] ; <i32> [#uses=31]
- %reg468-idxcast = sext i32 %reg468 to i64 ; <i64> [#uses=2]
- %reg796 = getelementptr [25 x i16]* @yylen, i64 0, i64 %reg468-idxcast ; <i16*> [#uses=1]
- %reg254 = load i16* %reg796 ; <i16> [#uses=2]
- %reg259 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=1]
- %reg254-idxcast = sext i16 %reg254 to i64 ; <i64> [#uses=1]
- %reg254-idxcast-scale = mul i64 %reg254-idxcast, -1 ; <i64> [#uses=1]
- %reg254-idxcast-scale-offset = add i64 %reg254-idxcast-scale, 1 ; <i64> [#uses=1]
- %reg261.idx1 = getelementptr %YYSTYPE* %reg259, i64 %reg254-idxcast-scale-offset, i32 0 ; <%IntList*> [#uses=1]
- %reg261 = load %IntList* %reg261.idx1 ; <%IntList> [#uses=1]
- store %IntList %reg261, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- %cond812 = icmp eq i32 %reg468, 13 ; <i1> [#uses=1]
- br i1 %cond812, label %bb85, label %bb37
-
-bb37: ; preds = %bb36
- %cond814 = icmp sgt i32 %reg468, 13 ; <i1> [#uses=1]
- br i1 %cond814, label %bb56, label %bb38
-
-bb38: ; preds = %bb37
- %cond817 = icmp eq i32 %reg468, 7 ; <i1> [#uses=1]
- br i1 %cond817, label %bb79, label %bb39
-
-bb39: ; preds = %bb38
- %cond819 = icmp sgt i32 %reg468, 7 ; <i1> [#uses=1]
- br i1 %cond819, label %bb48, label %bb40
-
-bb40: ; preds = %bb39
- %cond822 = icmp eq i32 %reg468, 4 ; <i1> [#uses=1]
- br i1 %cond822, label %bb76, label %bb41
-
-bb41: ; preds = %bb40
- %cond824 = icmp sgt i32 %reg468, 4 ; <i1> [#uses=1]
- br i1 %cond824, label %bb45, label %bb42
-
-bb42: ; preds = %bb41
- %cond827 = icmp eq i32 %reg468, 2 ; <i1> [#uses=1]
- br i1 %cond827, label %bb74, label %bb43
-
-bb43: ; preds = %bb42
- %cond829 = icmp eq i32 %reg468, 3 ; <i1> [#uses=1]
- br i1 %cond829, label %bb75, label %bb97
-
-bb45: ; preds = %bb41
- %cond831 = icmp eq i32 %reg468, 5 ; <i1> [#uses=1]
- br i1 %cond831, label %bb77, label %bb46
-
-bb46: ; preds = %bb45
- %cond833 = icmp eq i32 %reg468, 6 ; <i1> [#uses=1]
- br i1 %cond833, label %bb78, label %bb97
-
-bb48: ; preds = %bb39
- %cond835 = icmp eq i32 %reg468, 10 ; <i1> [#uses=1]
- br i1 %cond835, label %bb82, label %bb49
-
-bb49: ; preds = %bb48
- %cond837 = icmp sgt i32 %reg468, 10 ; <i1> [#uses=1]
- br i1 %cond837, label %bb53, label %bb50
-
-bb50: ; preds = %bb49
- %cond840 = icmp eq i32 %reg468, 8 ; <i1> [#uses=1]
- br i1 %cond840, label %bb80, label %bb51
-
-bb51: ; preds = %bb50
- %cond842 = icmp eq i32 %reg468, 9 ; <i1> [#uses=1]
- br i1 %cond842, label %bb81, label %bb97
-
-bb53: ; preds = %bb49
- %cond844 = icmp eq i32 %reg468, 11 ; <i1> [#uses=1]
- br i1 %cond844, label %bb83, label %bb54
-
-bb54: ; preds = %bb53
- %cond846 = icmp eq i32 %reg468, 12 ; <i1> [#uses=1]
- br i1 %cond846, label %bb84, label %bb97
-
-bb56: ; preds = %bb37
- %cond848 = icmp eq i32 %reg468, 19 ; <i1> [#uses=1]
- br i1 %cond848, label %bb91, label %bb57
-
-bb57: ; preds = %bb56
- %cond850 = icmp sgt i32 %reg468, 19 ; <i1> [#uses=1]
- br i1 %cond850, label %bb66, label %bb58
-
-bb58: ; preds = %bb57
- %cond853 = icmp eq i32 %reg468, 16 ; <i1> [#uses=1]
- br i1 %cond853, label %bb88, label %bb59
-
-bb59: ; preds = %bb58
- %cond855 = icmp sgt i32 %reg468, 16 ; <i1> [#uses=1]
- br i1 %cond855, label %bb63, label %bb60
-
-bb60: ; preds = %bb59
- %cond858 = icmp eq i32 %reg468, 14 ; <i1> [#uses=1]
- br i1 %cond858, label %bb86, label %bb61
-
-bb61: ; preds = %bb60
- %cond860 = icmp eq i32 %reg468, 15 ; <i1> [#uses=1]
- br i1 %cond860, label %bb87, label %bb97
-
-bb63: ; preds = %bb59
- %cond862 = icmp eq i32 %reg468, 17 ; <i1> [#uses=1]
- br i1 %cond862, label %bb89, label %bb64
-
-bb64: ; preds = %bb63
- %cond864 = icmp eq i32 %reg468, 18 ; <i1> [#uses=1]
- br i1 %cond864, label %bb90, label %bb97
-
-bb66: ; preds = %bb57
- %cond866 = icmp eq i32 %reg468, 22 ; <i1> [#uses=1]
- br i1 %cond866, label %bb94, label %bb67
-
-bb67: ; preds = %bb66
- %cond868 = icmp sgt i32 %reg468, 22 ; <i1> [#uses=1]
- br i1 %cond868, label %bb71, label %bb68
-
-bb68: ; preds = %bb67
- %cond871 = icmp eq i32 %reg468, 20 ; <i1> [#uses=1]
- br i1 %cond871, label %bb92, label %bb69
-
-bb69: ; preds = %bb68
- %cond873 = icmp eq i32 %reg468, 21 ; <i1> [#uses=1]
- br i1 %cond873, label %bb93, label %bb97
-
-bb71: ; preds = %bb67
- %cond875 = icmp eq i32 %reg468, 23 ; <i1> [#uses=1]
- br i1 %cond875, label %bb95, label %bb72
-
-bb72: ; preds = %bb71
- %cond877 = icmp eq i32 %reg468, 24 ; <i1> [#uses=1]
- br i1 %cond877, label %bb96, label %bb97
-
-bb74: ; preds = %bb42
- call void @yyfinished( )
- br label %bb97
-
-bb75: ; preds = %bb43
- %reg262 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %reg264.idx1 = getelementptr %YYSTYPE* %reg262, i64 -2, i32 0 ; <%IntList*> [#uses=1]
- %reg264 = load %IntList* %reg264.idx1 ; <%IntList> [#uses=1]
- %reg265.idx = getelementptr %YYSTYPE* %reg262, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg265 = load %IntList* %reg265.idx ; <%IntList> [#uses=1]
- %cast889 = bitcast %IntList %reg265 to %List ; <%List> [#uses=1]
- %cast890 = bitcast %IntList %reg264 to %List ; <%List> [#uses=1]
- call void @doSpec( %List %cast890, %List %cast889 )
- br label %bb97
-
-bb76: ; preds = %bb40
- store %IntList null, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb77: ; preds = %bb45
- %reg269 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %cast894 = getelementptr %YYSTYPE* %reg269, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg271 = load %IntList* %cast894 ; <%IntList> [#uses=1]
- %reg271.upgrd.1 = bitcast %IntList %reg271 to i8* ; <i8*> [#uses=1]
- %reg272.idx1 = getelementptr %YYSTYPE* %reg269, i64 -1, i32 0 ; <%IntList*> [#uses=1]
- %reg272 = load %IntList* %reg272.idx1 ; <%IntList> [#uses=1]
- %cast901 = bitcast %IntList %reg272 to %List ; <%List> [#uses=1]
- %reg901 = call %List @newList( i8* %reg271.upgrd.1, %List %cast901 ) ; <%List> [#uses=1]
- bitcast %List %reg901 to %IntList ; <%IntList>:0 [#uses=1]
- store %IntList %0, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb78: ; preds = %bb46
- %reg275 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=1]
- %reg277.idx = getelementptr %YYSTYPE* %reg275, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg277 = load %IntList* %reg277.idx ; <%IntList> [#uses=1]
- %cast907 = bitcast %IntList %reg277 to %List ; <%List> [#uses=1]
- %reg907 = call %Arity @newArity( i32 -1, %List %cast907 ) ; <%Arity> [#uses=1]
- bitcast %Arity %reg907 to %IntList ; <%IntList>:1 [#uses=1]
- store %IntList %1, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb79: ; preds = %bb38
- store %IntList null, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- %reg281 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=1]
- %cast912 = getelementptr %YYSTYPE* %reg281, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg282 = load %IntList* %cast912 ; <%IntList> [#uses=1]
- %reg282.upgrd.2 = bitcast %IntList %reg282 to %List ; <%List> [#uses=1]
- call void @doGram( %List %reg282.upgrd.2 )
- br label %bb97
-
-bb80: ; preds = %bb50
- store %IntList null, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- %reg285 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=1]
- %cast917 = getelementptr %YYSTYPE* %reg285, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg286 = load %IntList* %cast917 ; <%IntList> [#uses=1]
- %reg286.upgrd.3 = bitcast %IntList %reg286 to i8* ; <i8*> [#uses=1]
- call void @doStart( i8* %reg286.upgrd.3 )
- br label %bb97
-
-bb81: ; preds = %bb51
- store %IntList null, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb82: ; preds = %bb48
- %reg290 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %cast923 = getelementptr %YYSTYPE* %reg290, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg292 = load %IntList* %cast923 ; <%IntList> [#uses=1]
- %reg292.upgrd.4 = bitcast %IntList %reg292 to i8* ; <i8*> [#uses=1]
- %reg293.idx1 = getelementptr %YYSTYPE* %reg290, i64 -1, i32 0 ; <%IntList*> [#uses=1]
- %reg293 = load %IntList* %reg293.idx1 ; <%IntList> [#uses=1]
- %cast930 = bitcast %IntList %reg293 to %List ; <%List> [#uses=1]
- %reg930 = call %List @newList( i8* %reg292.upgrd.4, %List %cast930 ) ; <%List> [#uses=1]
- bitcast %List %reg930 to %IntList ; <%IntList>:2 [#uses=1]
- store %IntList %2, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb83: ; preds = %bb53
- store %IntList null, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb84: ; preds = %bb54
- %reg298 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %cast936 = getelementptr %YYSTYPE* %reg298, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg300 = load %IntList* %cast936 ; <%IntList> [#uses=1]
- %reg300.upgrd.5 = bitcast %IntList %reg300 to i8* ; <i8*> [#uses=1]
- %reg301.idx1 = getelementptr %YYSTYPE* %reg298, i64 -1, i32 0 ; <%IntList*> [#uses=1]
- %reg301 = load %IntList* %reg301.idx1 ; <%IntList> [#uses=1]
- %cast943 = bitcast %IntList %reg301 to %List ; <%List> [#uses=1]
- %reg943 = call %List @newList( i8* %reg300.upgrd.5, %List %cast943 ) ; <%List> [#uses=1]
- bitcast %List %reg943 to %IntList ; <%IntList>:3 [#uses=1]
- store %IntList %3, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb85: ; preds = %bb36
- %reg304 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %cast9521 = getelementptr %YYSTYPE* %reg304, i64 -2, i32 0 ; <%IntList*> [#uses=1]
- %reg306 = load %IntList* %cast9521 ; <%IntList> [#uses=1]
- %reg306.upgrd.6 = bitcast %IntList %reg306 to i8* ; <i8*> [#uses=1]
- %cast953 = bitcast %YYSTYPE* %reg304 to i32* ; <i32*> [#uses=1]
- %reg307 = load i32* %cast953 ; <i32> [#uses=1]
- %reg955 = call %Binding @newBinding( i8* %reg306.upgrd.6, i32 %reg307 ) ; <%Binding> [#uses=1]
- bitcast %Binding %reg955 to %IntList ; <%IntList>:4 [#uses=1]
- store %IntList %4, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb86: ; preds = %bb60
- store %IntList null, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb87: ; preds = %bb61
- %reg312 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %cast961 = getelementptr %YYSTYPE* %reg312, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg314 = load %IntList* %cast961 ; <%IntList> [#uses=1]
- %reg314.upgrd.7 = bitcast %IntList %reg314 to i8* ; <i8*> [#uses=1]
- %reg315.idx1 = getelementptr %YYSTYPE* %reg312, i64 -1, i32 0 ; <%IntList*> [#uses=1]
- %reg315 = load %IntList* %reg315.idx1 ; <%IntList> [#uses=1]
- %cast968 = bitcast %IntList %reg315 to %List ; <%List> [#uses=1]
- %reg968 = call %List @newList( i8* %reg314.upgrd.7, %List %cast968 ) ; <%List> [#uses=1]
- bitcast %List %reg968 to %IntList ; <%IntList>:5 [#uses=1]
- store %IntList %5, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb88: ; preds = %bb58
- %reg318 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=4]
- %cast9791 = getelementptr %YYSTYPE* %reg318, i64 -6, i32 0 ; <%IntList*> [#uses=1]
- %reg322 = load %IntList* %cast9791 ; <%IntList> [#uses=1]
- %reg322.upgrd.8 = bitcast %IntList %reg322 to i8* ; <i8*> [#uses=1]
- %reg323.idx1 = getelementptr %YYSTYPE* %reg318, i64 -4, i32 0 ; <%IntList*> [#uses=1]
- %reg323 = load %IntList* %reg323.idx1 ; <%IntList> [#uses=1]
- %reg987 = getelementptr %YYSTYPE* %reg318, i64 -2 ; <%YYSTYPE*> [#uses=1]
- %cast989 = bitcast %YYSTYPE* %reg987 to i32* ; <i32*> [#uses=1]
- %reg324 = load i32* %cast989 ; <i32> [#uses=1]
- %reg325.idx1 = getelementptr %YYSTYPE* %reg318, i64 -1, i32 0 ; <%IntList*> [#uses=1]
- %reg325 = load %IntList* %reg325.idx1 ; <%IntList> [#uses=1]
- %cast998 = bitcast %IntList %reg323 to %PatternAST ; <%PatternAST> [#uses=1]
- %reg996 = call %RuleAST @newRuleAST( i8* %reg322.upgrd.8, %PatternAST %cast998, i32 %reg324, %IntList %reg325 ) ; <%RuleAST> [#uses=1]
- bitcast %RuleAST %reg996 to %IntList ; <%IntList>:6 [#uses=1]
- store %IntList %6, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb89: ; preds = %bb63
- %reg328 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=1]
- %cast1002 = getelementptr %YYSTYPE* %reg328, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg329 = load %IntList* %cast1002 ; <%IntList> [#uses=1]
- %reg329.upgrd.9 = bitcast %IntList %reg329 to i8* ; <i8*> [#uses=1]
- %reg1004 = call %PatternAST @newPatternAST( i8* %reg329.upgrd.9, %List null ) ; <%PatternAST> [#uses=1]
- bitcast %PatternAST %reg1004 to %IntList ; <%IntList>:7 [#uses=1]
- store %IntList %7, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb90: ; preds = %bb64
- %reg333 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %cast10131 = getelementptr %YYSTYPE* %reg333, i64 -1, i32 0 ; <%IntList*> [#uses=1]
- %reg335 = load %IntList* %cast10131 ; <%IntList> [#uses=1]
- %reg335.upgrd.10 = bitcast %IntList %reg335 to i8* ; <i8*> [#uses=1]
- %reg1015 = call %List @newList( i8* %reg335.upgrd.10, %List null ) ; <%List> [#uses=1]
- %cast10211 = getelementptr %YYSTYPE* %reg333, i64 -3, i32 0 ; <%IntList*> [#uses=1]
- %reg338 = load %IntList* %cast10211 ; <%IntList> [#uses=1]
- %reg338.upgrd.11 = bitcast %IntList %reg338 to i8* ; <i8*> [#uses=1]
- %reg1023 = call %PatternAST @newPatternAST( i8* %reg338.upgrd.11, %List %reg1015 ) ; <%PatternAST> [#uses=1]
- bitcast %PatternAST %reg1023 to %IntList ; <%IntList>:8 [#uses=1]
- store %IntList %8, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb91: ; preds = %bb56
- %reg341 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=3]
- %cast10331 = getelementptr %YYSTYPE* %reg341, i64 -1, i32 0 ; <%IntList*> [#uses=1]
- %reg344 = load %IntList* %cast10331 ; <%IntList> [#uses=1]
- %reg344.upgrd.12 = bitcast %IntList %reg344 to i8* ; <i8*> [#uses=1]
- %reg1035 = call %List @newList( i8* %reg344.upgrd.12, %List null ) ; <%List> [#uses=1]
- %cast10411 = getelementptr %YYSTYPE* %reg341, i64 -3, i32 0 ; <%IntList*> [#uses=1]
- %reg347 = load %IntList* %cast10411 ; <%IntList> [#uses=1]
- %reg347.upgrd.13 = bitcast %IntList %reg347 to i8* ; <i8*> [#uses=1]
- %reg1043 = call %List @newList( i8* %reg347.upgrd.13, %List %reg1035 ) ; <%List> [#uses=1]
- %cast10491 = getelementptr %YYSTYPE* %reg341, i64 -5, i32 0 ; <%IntList*> [#uses=1]
- %reg349 = load %IntList* %cast10491 ; <%IntList> [#uses=1]
- %reg349.upgrd.14 = bitcast %IntList %reg349 to i8* ; <i8*> [#uses=1]
- %reg1051 = call %PatternAST @newPatternAST( i8* %reg349.upgrd.14, %List %reg1043 ) ; <%PatternAST> [#uses=1]
- bitcast %PatternAST %reg1051 to %IntList ; <%IntList>:9 [#uses=1]
- store %IntList %9, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb92: ; preds = %bb68
- store %IntList null, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb93: ; preds = %bb69
- %reg354 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %reg1059 = getelementptr %YYSTYPE* %reg354, i64 -2 ; <%YYSTYPE*> [#uses=1]
- %cast1061 = bitcast %YYSTYPE* %reg1059 to i32* ; <i32*> [#uses=1]
- %reg356 = load i32* %cast1061 ; <i32> [#uses=1]
- %reg357.idx1 = getelementptr %YYSTYPE* %reg354, i64 -1, i32 0 ; <%IntList*> [#uses=1]
- %reg357 = load %IntList* %reg357.idx1 ; <%IntList> [#uses=1]
- %reg1068 = call %IntList @newIntList( i32 %reg356, %IntList %reg357 ) ; <%IntList> [#uses=1]
- store %IntList %reg1068, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb94: ; preds = %bb66
- store %IntList null, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb95: ; preds = %bb71
- %reg362 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %reg1076 = getelementptr %YYSTYPE* %reg362, i64 -1 ; <%YYSTYPE*> [#uses=1]
- %cast1078 = bitcast %YYSTYPE* %reg1076 to i32* ; <i32*> [#uses=1]
- %reg364 = load i32* %cast1078 ; <i32> [#uses=1]
- %reg365.idx = getelementptr %YYSTYPE* %reg362, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg365 = load %IntList* %reg365.idx ; <%IntList> [#uses=1]
- %reg1081 = call %IntList @newIntList( i32 %reg364, %IntList %reg365 ) ; <%IntList> [#uses=1]
- store %IntList %reg1081, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb96: ; preds = %bb72
- %reg368 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %reg1088 = getelementptr %YYSTYPE* %reg368, i64 -1 ; <%YYSTYPE*> [#uses=1]
- %cast1090 = bitcast %YYSTYPE* %reg1088 to i32* ; <i32*> [#uses=1]
- %reg370 = load i32* %cast1090 ; <i32> [#uses=1]
- %reg371.idx = getelementptr %YYSTYPE* %reg368, i64 0, i32 0 ; <%IntList*> [#uses=1]
- %reg371 = load %IntList* %reg371.idx ; <%IntList> [#uses=1]
- %reg1093 = call %IntList @newIntList( i32 %reg370, %IntList %reg371 ) ; <%IntList> [#uses=1]
- store %IntList %reg1093, %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0)
- br label %bb97
-
-bb97: ; preds = %bb96, %bb95, %bb94, %bb93, %bb92, %bb91, %bb90, %bb89, %bb88, %bb87, %bb86, %bb85, %bb84, %bb83, %bb82, %bb81, %bb80, %bb79, %bb78, %bb77, %bb76, %bb75, %bb74, %bb72, %bb69, %bb64, %bb61, %bb54, %bb51, %bb46, %bb43
- %cast1097 = sext i16 %reg254 to i64 ; <i64> [#uses=3]
- %reg375 = add i64 %cast1097, %cast1097 ; <i64> [#uses=1]
- %reg377 = load i16** @yyssp ; <i16*> [#uses=1]
- %cast379 = ptrtoint i16* %reg377 to i64 ; <i64> [#uses=1]
- %reg381 = sub i64 %cast379, %reg375 ; <i64> [#uses=1]
- %cast1099 = inttoptr i64 %reg381 to i16* ; <i16*> [#uses=1]
- store i16* %cast1099, i16** @yyssp
- %reg382 = load i16** @yyssp ; <i16*> [#uses=3]
- %reg383 = load i16* %reg382 ; <i16> [#uses=1]
- %cast1103 = sext i16 %reg383 to i32 ; <i32> [#uses=3]
- %reg385 = mul i64 %cast1097, 8 ; <i64> [#uses=1]
- %reg387 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=1]
- %cast389 = ptrtoint %YYSTYPE* %reg387 to i64 ; <i64> [#uses=1]
- %reg391 = sub i64 %cast389, %reg385 ; <i64> [#uses=1]
- %cast1108 = inttoptr i64 %reg391 to %YYSTYPE* ; <%YYSTYPE*> [#uses=1]
- store %YYSTYPE* %cast1108, %YYSTYPE** @yyvsp
- %reg1111 = getelementptr [25 x i16]* @yylhs, i64 0, i64 %reg468-idxcast ; <i16*> [#uses=1]
- %reg398 = load i16* %reg1111 ; <i16> [#uses=2]
- %cast1116 = sext i16 %reg398 to i32 ; <i32> [#uses=1]
- %cond1117 = icmp ne i32 %cast1103, 0 ; <i1> [#uses=1]
- br i1 %cond1117, label %bb104, label %bb98
-
-bb98: ; preds = %bb97
- %cond1119 = icmp ne i32 %cast1116, 0 ; <i1> [#uses=1]
- br i1 %cond1119, label %bb104, label %bb99
-
-bb99: ; preds = %bb98
- %reg1122 = getelementptr i16* %reg382, i64 1 ; <i16*> [#uses=2]
- store i16* %reg1122, i16** @yyssp
- store i16 1, i16* %reg1122
- %reg403 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %reg1128 = getelementptr %YYSTYPE* %reg403, i64 1 ; <%YYSTYPE*> [#uses=1]
- store %YYSTYPE* %reg1128, %YYSTYPE** @yyvsp
- %reg406 = load %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0) ; <%IntList> [#uses=1]
- %reg1128.idx1 = getelementptr %YYSTYPE* %reg403, i64 1, i32 0 ; <%IntList*> [#uses=1]
- store %IntList %reg406, %IntList* %reg1128.idx1
- %reg407 = load i32* @yychar ; <i32> [#uses=1]
- %cond1135 = icmp sge i32 %reg407, 0 ; <i1> [#uses=1]
- br i1 %cond1135, label %bb102, label %bb100
-
-bb100: ; preds = %bb99
- %reg1139 = call i32 @yylex( ) ; <i32> [#uses=1]
- store i32 %reg1139, i32* @yychar
- %reg409 = load i32* @yychar ; <i32> [#uses=1]
- %cond1141 = icmp sge i32 %reg409, 0 ; <i1> [#uses=1]
- br i1 %cond1141, label %bb102, label %bb101
-
-bb101: ; preds = %bb100
- store i32 0, i32* @yychar
- br label %bb102
-
-bb102: ; preds = %bb101, %bb100, %bb99
- %reg411 = load i32* @yychar ; <i32> [#uses=1]
- %cond1146 = icmp ne i32 %reg411, 0 ; <i1> [#uses=1]
- br i1 %cond1146, label %bb4, label %UnifiedExitNode
-
-bb104: ; preds = %bb98, %bb97
- %reg398-idxcast = sext i16 %reg398 to i64 ; <i64> [#uses=2]
- %reg1150 = getelementptr [12 x i16]* @yygindex, i64 0, i64 %reg398-idxcast ; <i16*> [#uses=1]
- %reg418 = load i16* %reg1150 ; <i16> [#uses=1]
- %cast1155 = sext i16 %reg418 to i32 ; <i32> [#uses=2]
- %cond1156 = icmp eq i32 %cast1155, 0 ; <i1> [#uses=1]
- br i1 %cond1156, label %bb109, label %bb105
-
-bb105: ; preds = %bb104
- %reg473 = add i32 %cast1155, %cast1103 ; <i32> [#uses=3]
- %cond1158 = icmp slt i32 %reg473, 0 ; <i1> [#uses=1]
- br i1 %cond1158, label %bb109, label %bb106
-
-bb106: ; preds = %bb105
- %cond1161 = icmp sgt i32 %reg473, 262 ; <i1> [#uses=1]
- br i1 %cond1161, label %bb109, label %bb107
-
-bb107: ; preds = %bb106
- %reg473-idxcast = sext i32 %reg473 to i64 ; <i64> [#uses=2]
- %reg1166 = getelementptr [263 x i16]* @yycheck, i64 0, i64 %reg473-idxcast ; <i16*> [#uses=1]
- %reg428 = load i16* %reg1166 ; <i16> [#uses=1]
- %cast1171 = sext i16 %reg428 to i32 ; <i32> [#uses=1]
- %cond1172 = icmp ne i32 %cast1171, %cast1103 ; <i1> [#uses=1]
- br i1 %cond1172, label %bb109, label %bb108
-
-bb108: ; preds = %bb107
- %reg1175 = getelementptr [263 x i16]* @yytable, i64 0, i64 %reg473-idxcast ; <i16*> [#uses=1]
- %reg435 = load i16* %reg1175 ; <i16> [#uses=1]
- %cast1180 = sext i16 %reg435 to i32 ; <i32> [#uses=1]
- br label %bb110
-
-bb109: ; preds = %bb107, %bb106, %bb105, %bb104
- %reg1183 = getelementptr [12 x i16]* @yydgoto, i64 0, i64 %reg398-idxcast ; <i16*> [#uses=1]
- %reg442 = load i16* %reg1183 ; <i16> [#uses=1]
- %cast1188 = sext i16 %reg442 to i32 ; <i32> [#uses=1]
- br label %bb110
-
-bb110: ; preds = %bb109, %bb108
- %reg476 = phi i32 [ %cast1188, %bb109 ], [ %cast1180, %bb108 ] ; <i32> [#uses=2]
- %cast1189 = bitcast i16* %reg382 to i8* ; <i8*> [#uses=1]
- %reg444 = load i16** @yysslim ; <i16*> [#uses=1]
- %cast1190 = bitcast i16* %reg444 to i8* ; <i8*> [#uses=1]
- %cond1191 = icmp ult i8* %cast1189, %cast1190 ; <i1> [#uses=1]
- br i1 %cond1191, label %bb112, label %bb111
-
-bb111: ; preds = %bb110
- %reg1193 = call i32 @yygrowstack( ) ; <i32> [#uses=1]
- %cond1193 = icmp ne i32 %reg1193, 0 ; <i1> [#uses=1]
- br i1 %cond1193, label %bb113, label %bb112
-
-bb112: ; preds = %bb111, %bb110
- %reg446 = load i16** @yyssp ; <i16*> [#uses=1]
- %reg1196 = getelementptr i16* %reg446, i64 1 ; <i16*> [#uses=2]
- store i16* %reg1196, i16** @yyssp
- %cast1357 = trunc i32 %reg476 to i16 ; <i16> [#uses=1]
- store i16 %cast1357, i16* %reg1196
- %reg449 = load %YYSTYPE** @yyvsp ; <%YYSTYPE*> [#uses=2]
- %reg1202 = getelementptr %YYSTYPE* %reg449, i64 1 ; <%YYSTYPE*> [#uses=1]
- store %YYSTYPE* %reg1202, %YYSTYPE** @yyvsp
- %reg452 = load %IntList* getelementptr (%YYSTYPE* @yyval, i64 0, i32 0) ; <%IntList> [#uses=1]
- %reg1202.idx1 = getelementptr %YYSTYPE* %reg449, i64 1, i32 0 ; <%IntList*> [#uses=1]
- store %IntList %reg452, %IntList* %reg1202.idx1
- br label %bb4
-
-bb113: ; preds = %bb111, %bb30, %bb13, %bb2
- call void @yyerror( i8* getelementptr ([20 x i8]* @.LC1, i64 0, i64 0) )
- br label %UnifiedExitNode
-
-UnifiedExitNode: ; preds = %bb113, %bb102, %bb34, %bb32
- %UnifiedRetVal = phi i32 [ 1, %bb113 ], [ 1, %bb34 ], [ 1, %bb32 ], [ 0, %bb102 ] ; <i32> [#uses=1]
- ret i32 %UnifiedRetVal
-}
-
-declare %List @newList(i8*, %List)
-
-declare %IntList @newIntList(i32, %IntList)
-
-declare void @doStart(i8*)
-
-declare void @yyerror(i8*)
-
-declare void @doSpec(%List, %List)
-
-declare %Arity @newArity(i32, %List)
-
-declare %Binding @newBinding(i8*, i32)
-
-declare %PatternAST @newPatternAST(i8*, %List)
-
-declare %RuleAST @newRuleAST(i8*, %PatternAST, i32, %IntList)
-
-declare void @yyfinished()
-
-declare i32 @yylex()
-
-declare void @doGram(%List)
-
-declare i32 @yygrowstack()
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/ConstantExprLowering.ll b/libclamav/c++/llvm/test/CodeGen/Generic/ConstantExprLowering.ll
deleted file mode 100644
index 428d712..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/ConstantExprLowering.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s
-
- at .str_1 = internal constant [16 x i8] c"%d %d %d %d %d\0A\00" ; <[16 x i8]*> [#uses=1]
- at XA = external global i32 ; <i32*> [#uses=1]
- at XB = external global i32 ; <i32*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define void @test(i32 %A, i32 %B, i32 %C, i32 %D) {
-entry:
- %t1 = icmp slt i32 %A, 0 ; <i1> [#uses=1]
- br i1 %t1, label %less, label %not_less
-
-less: ; preds = %entry
- br label %not_less
-
-not_less: ; preds = %less, %entry
- %t2 = phi i32 [ sub (i32 ptrtoint (i32* @XA to i32), i32 ptrtoint (i32* @XB to i32)), %less ], [ sub (i32 ptrtoint (i32* @XA to i32), i32 ptrtoint (i32* @XB to i32)), %entry ] ; <i32> [#uses=1]
- %tmp.39 = call i32 (i8*, ...)* @printf( i8* getelementptr ([16 x i8]* @.str_1, i64 0, i64 0), i32 %t2 ) ; <i32> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/Makefile b/libclamav/c++/llvm/test/CodeGen/Generic/Makefile
deleted file mode 100644
index 26ebc31..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-# Makefile for running ad-hoc custom LLVM tests
-#
-%.bc: %.ll
- llvm-as $<
-
-%.llc.s: %.bc
- llc $< -o $@
-
-%.gcc.s: %.c
- gcc -O0 -S $< -o $@
-
-%.nat: %.s
- gcc -O0 -lm $< -o $@
-
-%.cbe.out: %.cbe.nat
- ./$< > $@
-
-%.out: %.nat
- ./$< > $@
-
-%.clean:
- rm -f $(patsubst %.clean,%.bc,$@) $(patsubst %.clean,%.*.s,$@) \
- $(patsubst %.clean,%.*.nat,$@) $(patsubst %.clean,%.*.out,$@)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/add-with-overflow-24.ll b/libclamav/c++/llvm/test/CodeGen/Generic/add-with-overflow-24.ll
deleted file mode 100644
index 63f5a22..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/add-with-overflow-24.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s
-
- at ok = internal constant [4 x i8] c"%d\0A\00"
- at no = internal constant [4 x i8] c"no\0A\00"
-
-define i1 @func1(i24 signext %v1, i24 signext %v2) nounwind {
-entry:
- %t = call {i24, i1} @llvm.sadd.with.overflow.i24(i24 %v1, i24 %v2)
- %sum = extractvalue {i24, i1} %t, 0
- %sum32 = sext i24 %sum to i32
- %obit = extractvalue {i24, i1} %t, 1
- br i1 %obit, label %overflow, label %normal
-
-normal:
- %t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum32 ) nounwind
- ret i1 true
-
-overflow:
- %t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
- ret i1 false
-}
-
-define i1 @func2(i24 zeroext %v1, i24 zeroext %v2) nounwind {
-entry:
- %t = call {i24, i1} @llvm.uadd.with.overflow.i24(i24 %v1, i24 %v2)
- %sum = extractvalue {i24, i1} %t, 0
- %sum32 = zext i24 %sum to i32
- %obit = extractvalue {i24, i1} %t, 1
- br i1 %obit, label %carry, label %normal
-
-normal:
- %t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum32 ) nounwind
- ret i1 true
-
-carry:
- %t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
- ret i1 false
-}
-
-declare i32 @printf(i8*, ...) nounwind
-declare {i24, i1} @llvm.sadd.with.overflow.i24(i24, i24)
-declare {i24, i1} @llvm.uadd.with.overflow.i24(i24, i24)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/add-with-overflow.ll b/libclamav/c++/llvm/test/CodeGen/Generic/add-with-overflow.ll
deleted file mode 100644
index 0c2c960..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/add-with-overflow.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s
-; RUN: llc < %s -fast-isel
-
- at ok = internal constant [4 x i8] c"%d\0A\00"
- at no = internal constant [4 x i8] c"no\0A\00"
-
-define i1 @func1(i32 %v1, i32 %v2) nounwind {
-entry:
- %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
- %sum = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %normal
-
-normal:
- %t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum ) nounwind
- ret i1 true
-
-overflow:
- %t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
- ret i1 false
-}
-
-define i1 @func2(i32 %v1, i32 %v2) nounwind {
-entry:
- %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
- %sum = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %normal
-
-normal:
- %t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum ) nounwind
- ret i1 true
-
-overflow:
- %t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
- ret i1 false
-}
-
-declare i32 @printf(i8*, ...) nounwind
-declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32)
-declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/asm-large-immediate.ll b/libclamav/c++/llvm/test/CodeGen/Generic/asm-large-immediate.ll
deleted file mode 100644
index 605665b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/asm-large-immediate.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s | grep 68719476738
-
-define void @test() {
-entry:
- tail call void asm sideeffect "/* result: ${0:c} */", "i,~{dirflag},~{fpsr},~{flags}"( i64 68719476738 )
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/badCallArgLRLLVM.ll b/libclamav/c++/llvm/test/CodeGen/Generic/badCallArgLRLLVM.ll
deleted file mode 100644
index 4ed88df..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/badCallArgLRLLVM.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s
-
-; This caused a problem because the argument of a call was defined by
-; the return value of another call that appears later in the code.
-; When processing the first call, the second call has not yet been processed
-; so no LiveRange has been created for its return value.
-;
-; llc dies in UltraSparcRegInfo::suggestRegs4CallArgs() with:
-; ERROR: In call instr, no LR for arg: 0x1009e0740
-;
-
-declare i32 @getInt(i32)
-
-define i32 @main(i32 %argc, i8** %argv) {
-bb0:
- br label %bb2
-
-bb1: ; preds = %bb2
- %reg222 = call i32 @getInt( i32 %reg218 ) ; <i32> [#uses=1]
- %reg110 = add i32 %reg222, 1 ; <i32> [#uses=2]
- %b = icmp sle i32 %reg110, 0 ; <i1> [#uses=1]
- br i1 %b, label %bb2, label %bb3
-
-bb2: ; preds = %bb1, %bb0
- %reg218 = call i32 @getInt( i32 %argc ) ; <i32> [#uses=1]
- br label %bb1
-
-bb3: ; preds = %bb1
- ret i32 %reg110
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/badFoldGEP.ll b/libclamav/c++/llvm/test/CodeGen/Generic/badFoldGEP.ll
deleted file mode 100644
index 2d4474b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/badFoldGEP.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s
-
-;; GetMemInstArgs() folded the two getElementPtr instructions together,
-;; producing an illegal getElementPtr. That's because the type generated
-;; by the last index for the first one is a structure field, not an array
-;; element, and the second one indexes off that structure field.
-;; The code is legal but not type-safe and the two GEPs should not be folded.
-;;
-;; This code fragment is from Spec/CINT2000/197.parser/197.parser.bc,
-;; file post_process.c, function build_domain().
-;; (Modified to replace store with load and return load value.)
-;;
- %Domain = type { i8*, i32, i32*, i32, i32, i32*, %Domain* }
- at domain_array = external global [497 x %Domain] ; <[497 x %Domain]*> [#uses=2]
-
-declare void @opaque([497 x %Domain]*)
-
-define i32 @main(i32 %argc, i8** %argv) {
-bb0:
- call void @opaque( [497 x %Domain]* @domain_array )
- %cann-indvar-idxcast = sext i32 %argc to i64 ; <i64> [#uses=1]
- %reg841 = getelementptr [497 x %Domain]* @domain_array, i64 0, i64 %cann-indvar-idxcast, i32 3 ; <i32*> [#uses=1]
- %reg846 = getelementptr i32* %reg841, i64 1 ; <i32*> [#uses=1]
- %reg820 = load i32* %reg846 ; <i32> [#uses=1]
- ret i32 %reg820
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/badarg6.ll b/libclamav/c++/llvm/test/CodeGen/Generic/badarg6.ll
deleted file mode 100644
index d6e5ac5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/badarg6.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s
-
-; On this code, llc did not pass the sixth argument (%reg321) to printf.
-; It passed the first five in %o0 - %o4, but never initialized %o5.
- at .LC12 = internal global [44 x i8] c"\09\09M = %g, I = %g, V = %g\0A\09\09O = %g, E = %g\0A\0A\00" ; <[44 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-declare double @opaque(double)
-
-define i32 @main(i32 %argc, i8** %argv) {
-bb25:
- %b = icmp sle i32 %argc, 2 ; <i1> [#uses=1]
- br i1 %b, label %bb42, label %bb43
-
-bb42: ; preds = %bb25
- %reg315 = call double @opaque( double 3.000000e+00 ) ; <double> [#uses=1]
- %reg316 = call double @opaque( double 3.100000e+00 ) ; <double> [#uses=1]
- %reg317 = call double @opaque( double 3.200000e+00 ) ; <double> [#uses=1]
- %reg318 = call double @opaque( double 3.300000e+00 ) ; <double> [#uses=1]
- %reg319 = call double @opaque( double 3.400000e+00 ) ; <double> [#uses=1]
- br label %bb43
-
-bb43: ; preds = %bb42, %bb25
- %reg321 = phi double [ 2.000000e-01, %bb25 ], [ %reg315, %bb42 ] ; <double> [#uses=1]
- %reg322 = phi double [ 6.000000e+00, %bb25 ], [ %reg316, %bb42 ] ; <double> [#uses=1]
- %reg323 = phi double [ -1.000000e+00, %bb25 ], [ %reg317, %bb42 ] ; <double> [#uses=1]
- %reg324 = phi double [ -1.000000e+00, %bb25 ], [ %reg318, %bb42 ] ; <double> [#uses=1]
- %reg325 = phi double [ 1.000000e+00, %bb25 ], [ %reg319, %bb42 ] ; <double> [#uses=1]
- %reg609 = call i32 (i8*, ...)* @printf( i8* getelementptr ([44 x i8]* @.LC12, i64 0, i64 0), double %reg325, double %reg324, double %reg323, double %reg322, double %reg321 ) ; <i32> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/badlive.ll b/libclamav/c++/llvm/test/CodeGen/Generic/badlive.ll
deleted file mode 100644
index 43b03e3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/badlive.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s
-
-define i32 @main() {
-bb0:
- %reg109 = malloc i32, i32 100 ; <i32*> [#uses=2]
- br label %bb2
-
-bb2: ; preds = %bb2, %bb0
- %cann-indvar1 = phi i32 [ 0, %bb0 ], [ %add1-indvar1, %bb2 ] ; <i32> [#uses=2]
- %reg127 = mul i32 %cann-indvar1, 2 ; <i32> [#uses=1]
- %add1-indvar1 = add i32 %cann-indvar1, 1 ; <i32> [#uses=1]
- store i32 999, i32* %reg109
- %cond1015 = icmp sle i32 1, 99 ; <i1> [#uses=1]
- %reg128 = add i32 %reg127, 2 ; <i32> [#uses=0]
- br i1 %cond1015, label %bb2, label %bb4
-
-bb4: ; preds = %bb4, %bb2
- %cann-indvar = phi i32 [ %add1-indvar, %bb4 ], [ 0, %bb2 ] ; <i32> [#uses=1]
- %add1-indvar = add i32 %cann-indvar, 1 ; <i32> [#uses=2]
- store i32 333, i32* %reg109
- %reg131 = add i32 %add1-indvar, 3 ; <i32> [#uses=1]
- %cond1017 = icmp ule i32 %reg131, 99 ; <i1> [#uses=1]
- br i1 %cond1017, label %bb4, label %bb5
-
-bb5: ; preds = %bb4
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/bool-to-double.ll b/libclamav/c++/llvm/test/CodeGen/Generic/bool-to-double.ll
deleted file mode 100644
index 81350a4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/bool-to-double.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s
-define double @test(i1 %X) {
- %Y = uitofp i1 %X to double ; <double> [#uses=1]
- ret double %Y
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/bool-vector.ll b/libclamav/c++/llvm/test/CodeGen/Generic/bool-vector.ll
deleted file mode 100644
index 4758697..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/bool-vector.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-; PR1845
-
-define void @boolVectorSelect(<4 x i1>* %boolVectorPtr) {
-Body:
- %castPtr = bitcast <4 x i1>* %boolVectorPtr to <4 x i1>*
- %someBools = load <4 x i1>* %castPtr, align 1 ; <<4 x i1>>
- %internal = alloca <4 x i1>, align 16 ; <<4 x i1>*> [#uses=1]
- store <4 x i1> %someBools, <4 x i1>* %internal, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/call-ret0.ll b/libclamav/c++/llvm/test/CodeGen/Generic/call-ret0.ll
deleted file mode 100644
index a8e00cd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/call-ret0.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s
-define i32 @foo(i32 %x) {
- ret i32 %x
-}
-
-define i32 @main() {
- %r = call i32 @foo( i32 0 ) ; <i32> [#uses=1]
- ret i32 %r
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/call-ret42.ll b/libclamav/c++/llvm/test/CodeGen/Generic/call-ret42.ll
deleted file mode 100644
index 95cc286..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/call-ret42.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s
-
-define i32 @foo(i32 %x) {
- ret i32 42
-}
-
-define i32 @main() {
- %r = call i32 @foo( i32 15 ) ; <i32> [#uses=1]
- ret i32 %r
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/call-void.ll b/libclamav/c++/llvm/test/CodeGen/Generic/call-void.ll
deleted file mode 100644
index 9ed4179..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/call-void.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-
-define void @foo() {
- ret void
-}
-
-define i32 @main() {
- call void @foo( )
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/call2-ret0.ll b/libclamav/c++/llvm/test/CodeGen/Generic/call2-ret0.ll
deleted file mode 100644
index 4e57ef8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/call2-ret0.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s
-
-define i32 @bar(i32 %x) {
- ret i32 0
-}
-
-define i32 @foo(i32 %x) {
- %q = call i32 @bar( i32 1 ) ; <i32> [#uses=1]
- ret i32 %q
-}
-
-define i32 @main() {
- %r = call i32 @foo( i32 2 ) ; <i32> [#uses=1]
- ret i32 %r
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/cast-fp.ll b/libclamav/c++/llvm/test/CodeGen/Generic/cast-fp.ll
deleted file mode 100644
index 590b7ce..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/cast-fp.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s
- at a_fstr = internal constant [8 x i8] c"a = %f\0A\00" ; <[8 x i8]*> [#uses=1]
- at a_lstr = internal constant [10 x i8] c"a = %lld\0A\00" ; <[10 x i8]*> [#uses=1]
- at a_dstr = internal constant [8 x i8] c"a = %d\0A\00" ; <[8 x i8]*> [#uses=1]
- at b_dstr = internal constant [8 x i8] c"b = %d\0A\00" ; <[8 x i8]*> [#uses=1]
- at b_fstr = internal constant [8 x i8] c"b = %f\0A\00" ; <[8 x i8]*> [#uses=1]
- at A = global double 2.000000e+00 ; <double*> [#uses=1]
- at B = global i32 2 ; <i32*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %a = load double* @A ; <double> [#uses=4]
- %a_fs = getelementptr [8 x i8]* @a_fstr, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_fs, double %a ) ; <i32>:1 [#uses=0]
- %a_d2l = fptosi double %a to i64 ; <i64> [#uses=1]
- %a_ls = getelementptr [10 x i8]* @a_lstr, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_ls, i64 %a_d2l ) ; <i32>:2 [#uses=0]
- %a_d2i = fptosi double %a to i32 ; <i32> [#uses=2]
- %a_ds = getelementptr [8 x i8]* @a_dstr, i64 0, i64 0 ; <i8*> [#uses=3]
- call i32 (i8*, ...)* @printf( i8* %a_ds, i32 %a_d2i ) ; <i32>:3 [#uses=0]
- %a_d2sb = fptosi double %a to i8 ; <i8> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_ds, i8 %a_d2sb ) ; <i32>:4 [#uses=0]
- %a_d2i2sb = trunc i32 %a_d2i to i8 ; <i8> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_ds, i8 %a_d2i2sb ) ; <i32>:5 [#uses=0]
- %b = load i32* @B ; <i32> [#uses=2]
- %b_ds = getelementptr [8 x i8]* @b_dstr, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %b_ds, i32 %b ) ; <i32>:6 [#uses=0]
- %b_i2d = sitofp i32 %b to double ; <double> [#uses=1]
- %b_fs = getelementptr [8 x i8]* @b_fstr, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %b_fs, double %b_i2d ) ; <i32>:7 [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/constindices.ll b/libclamav/c++/llvm/test/CodeGen/Generic/constindices.ll
deleted file mode 100644
index 7deb30f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/constindices.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc < %s
-
-; Test that a sequence of constant indices are folded correctly
-; into the equivalent offset at compile-time.
-
- %MixedA = type { float, [15 x i32], i8, float }
- %MixedB = type { float, %MixedA, float }
- at fmtArg = internal global [44 x i8] c"sqrt(2) = %g\0Aexp(1) = %g\0Api = %g\0Afive = %g\0A\00" ; <[44 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %ScalarA = alloca %MixedA ; <%MixedA*> [#uses=1]
- %ScalarB = alloca %MixedB ; <%MixedB*> [#uses=1]
- %ArrayA = alloca %MixedA, i32 4 ; <%MixedA*> [#uses=3]
- %ArrayB = alloca %MixedB, i32 3 ; <%MixedB*> [#uses=2]
- %I1 = getelementptr %MixedA* %ScalarA, i64 0, i32 0 ; <float*> [#uses=2]
- store float 0x3FF6A09020000000, float* %I1
- %I2 = getelementptr %MixedB* %ScalarB, i64 0, i32 1, i32 0 ; <float*> [#uses=2]
- store float 0x4005BF1420000000, float* %I2
- %fptrA = getelementptr %MixedA* %ArrayA, i64 1, i32 0 ; <float*> [#uses=1]
- %fptrB = getelementptr %MixedB* %ArrayB, i64 2, i32 1, i32 0 ; <float*> [#uses=1]
- store float 0x400921CAC0000000, float* %fptrA
- store float 5.000000e+00, float* %fptrB
-
- ;; Test that a sequence of GEPs with constant indices are folded right
- %fptrA1 = getelementptr %MixedA* %ArrayA, i64 3 ; <%MixedA*> [#uses=1]
- %fptrA2 = getelementptr %MixedA* %fptrA1, i64 0, i32 1 ; <[15 x i32]*> [#uses=1]
- %fptrA3 = getelementptr [15 x i32]* %fptrA2, i64 0, i64 8 ; <i32*> [#uses=1]
- store i32 5, i32* %fptrA3
- %sqrtTwo = load float* %I1 ; <float> [#uses=1]
- %exp = load float* %I2 ; <float> [#uses=1]
- %I3 = getelementptr %MixedA* %ArrayA, i64 1, i32 0 ; <float*> [#uses=1]
- %pi = load float* %I3 ; <float> [#uses=1]
- %I4 = getelementptr %MixedB* %ArrayB, i64 2, i32 1, i32 0 ; <float*> [#uses=1]
- %five = load float* %I4 ; <float> [#uses=1]
- %dsqrtTwo = fpext float %sqrtTwo to double ; <double> [#uses=1]
- %dexp = fpext float %exp to double ; <double> [#uses=1]
- %dpi = fpext float %pi to double ; <double> [#uses=1]
- %dfive = fpext float %five to double ; <double> [#uses=1]
- %castFmt = getelementptr [44 x i8]* @fmtArg, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %castFmt, double %dsqrtTwo, double %dexp, double %dpi, double %dfive ) ; <i32>:1 [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/dg.exp b/libclamav/c++/llvm/test/CodeGen/Generic/dg.exp
deleted file mode 100644
index f200589..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/dg.exp
+++ /dev/null
@@ -1,3 +0,0 @@
-load_lib llvm.exp
-
-RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/div-neg-power-2.ll b/libclamav/c++/llvm/test/CodeGen/Generic/div-neg-power-2.ll
deleted file mode 100644
index 246cd03..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/div-neg-power-2.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s
-
-define i32 @test(i32 %X) {
- %Y = sdiv i32 %X, -2 ; <i32> [#uses=1]
- ret i32 %Y
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/empty-load-store.ll b/libclamav/c++/llvm/test/CodeGen/Generic/empty-load-store.ll
deleted file mode 100644
index bca7305..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/empty-load-store.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s
-; PR2612
-
- at current_foo = internal global { } zeroinitializer
-
-define i32 @foo() {
-entry:
- %retval = alloca i32
- store i32 0, i32* %retval
- %local_foo = alloca { }
- load { }* @current_foo
- store { } %0, { }* %local_foo
- br label %return
-
-return:
- load i32* %retval
- ret i32 %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/externally_available.ll b/libclamav/c++/llvm/test/CodeGen/Generic/externally_available.ll
deleted file mode 100644
index 7976cc9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/externally_available.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s | not grep test_
-
-; test_function should not be emitted to the .s file.
-define available_externally i32 @test_function() {
- ret i32 4
-}
-
-; test_global should not be emitted to the .s file.
- at test_global = available_externally global i32 4
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/fastcall.ll b/libclamav/c++/llvm/test/CodeGen/Generic/fastcall.ll
deleted file mode 100644
index 35e04f1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/fastcall.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; Test fastcc works. Test from bug 2770.
-; RUN: llc < %s -relocation-model=pic
-
-
-%struct.__gcov_var = type { i32 }
- at __gcov_var = external global %struct.__gcov_var
-
-define fastcc void @gcov_read_words(i32 %words) {
-entry:
- store i32 %words, i32* getelementptr (%struct.__gcov_var*
- at __gcov_var,
-i32 0, i32 0)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/fneg-fabs.ll b/libclamav/c++/llvm/test/CodeGen/Generic/fneg-fabs.ll
deleted file mode 100644
index 2f2f597..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/fneg-fabs.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s
-
-define double @fneg(double %X) {
- %Y = fsub double -0.000000e+00, %X ; <double> [#uses=1]
- ret double %Y
-}
-
-define float @fnegf(float %X) {
- %Y = fsub float -0.000000e+00, %X ; <float> [#uses=1]
- ret float %Y
-}
-
-declare double @fabs(double)
-
-declare float @fabsf(float)
-
-define double @fabstest(double %X) {
- %Y = call double @fabs( double %X ) ; <double> [#uses=1]
- ret double %Y
-}
-
-define float @fabsftest(float %X) {
- %Y = call float @fabsf( float %X ) ; <float> [#uses=1]
- ret float %Y
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/fp-to-int-invalid.ll b/libclamav/c++/llvm/test/CodeGen/Generic/fp-to-int-invalid.ll
deleted file mode 100644
index cdcc3a2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/fp-to-int-invalid.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s
-; PR4057
-define void @test_cast_float_to_char(i8* %result) nounwind {
-entry:
- %result_addr = alloca i8* ; <i8**> [#uses=2]
- %test = alloca float ; <float*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i8* %result, i8** %result_addr
- store float 0x40B2AFA160000000, float* %test, align 4
- %0 = load float* %test, align 4 ; <float> [#uses=1]
- %1 = fptosi float %0 to i8 ; <i8> [#uses=1]
- %2 = load i8** %result_addr, align 4 ; <i8*> [#uses=1]
- store i8 %1, i8* %2, align 1
- br label %return
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/fp_to_int.ll b/libclamav/c++/llvm/test/CodeGen/Generic/fp_to_int.ll
deleted file mode 100644
index ad94413..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/fp_to_int.ll
+++ /dev/null
@@ -1,81 +0,0 @@
-; RUN: llc < %s
-
-define i8 @test1(double %X) {
- %tmp.1 = fptosi double %X to i8 ; <i8> [#uses=1]
- ret i8 %tmp.1
-}
-
-define i16 @test2(double %X) {
- %tmp.1 = fptosi double %X to i16 ; <i16> [#uses=1]
- ret i16 %tmp.1
-}
-
-define i32 @test3(double %X) {
- %tmp.1 = fptosi double %X to i32 ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
-
-define i64 @test4(double %X) {
- %tmp.1 = fptosi double %X to i64 ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
-
-define i8 @test1u(double %X) {
- %tmp.1 = fptoui double %X to i8 ; <i8> [#uses=1]
- ret i8 %tmp.1
-}
-
-define i16 @test2u(double %X) {
- %tmp.1 = fptoui double %X to i16 ; <i16> [#uses=1]
- ret i16 %tmp.1
-}
-
-define i32 @test3u(double %X) {
- %tmp.1 = fptoui double %X to i32 ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
-
-define i64 @test4u(double %X) {
- %tmp.1 = fptoui double %X to i64 ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
-
-define i8 @test1f(float %X) {
- %tmp.1 = fptosi float %X to i8 ; <i8> [#uses=1]
- ret i8 %tmp.1
-}
-
-define i16 @test2f(float %X) {
- %tmp.1 = fptosi float %X to i16 ; <i16> [#uses=1]
- ret i16 %tmp.1
-}
-
-define i32 @test3f(float %X) {
- %tmp.1 = fptosi float %X to i32 ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
-
-define i64 @test4f(float %X) {
- %tmp.1 = fptosi float %X to i64 ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
-
-define i8 @test1uf(float %X) {
- %tmp.1 = fptoui float %X to i8 ; <i8> [#uses=1]
- ret i8 %tmp.1
-}
-
-define i16 @test2uf(float %X) {
- %tmp.1 = fptoui float %X to i16 ; <i16> [#uses=1]
- ret i16 %tmp.1
-}
-
-define i32 @test3uf(float %X) {
- %tmp.1 = fptoui float %X to i32 ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
-
-define i64 @test4uf(float %X) {
- %tmp.1 = fptoui float %X to i64 ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/fpowi-promote.ll b/libclamav/c++/llvm/test/CodeGen/Generic/fpowi-promote.ll
deleted file mode 100644
index 8dacebe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/fpowi-promote.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-
-; PR1239
-
-define float @test(float %tmp23302331, i32 %tmp23282329 ) {
-
-%tmp2339 = call float @llvm.powi.f32( float %tmp23302331, i32 %tmp23282329 )
- ret float %tmp2339
-}
-
-declare float @llvm.powi.f32(float,i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/fwdtwice.ll b/libclamav/c++/llvm/test/CodeGen/Generic/fwdtwice.ll
deleted file mode 100644
index 6b38f04..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/fwdtwice.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s
-
-;;
-;; Test the sequence:
-;; cast -> setle 0, %cast -> br %cond
-;; This sequence should cause the cast value to be forwarded twice,
-;; i.e., cast is forwarded to the setle and the setle is forwarded
-;; to the branch.
-;; register argument of the "branch-on-register" instruction, i.e.,
-;;
-;; This produces the bogus output instruction:
-;; brlez <NULL VALUE>, .L_SumArray_bb3.
-;; This came from %bb1 of sumarrray.ll generated from sumarray.c.
-
-define i32 @SumArray(i32 %Num) {
- %Num.upgrd.1 = alloca i32 ; <i32*> [#uses=2]
- br label %Top
-
-Top: ; preds = %Top, %0
- store i32 %Num, i32* %Num.upgrd.1
- %reg108 = load i32* %Num.upgrd.1 ; <i32> [#uses=1]
- %cast1006 = bitcast i32 %reg108 to i32 ; <i32> [#uses=1]
- %cond1001 = icmp ule i32 %cast1006, 0 ; <i1> [#uses=1]
- br i1 %cond1001, label %bb6, label %Top
-
-bb6: ; preds = %Top
- ret i32 42
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/getresult-undef.ll b/libclamav/c++/llvm/test/CodeGen/Generic/getresult-undef.ll
deleted file mode 100644
index c675535..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/getresult-undef.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s
-
-define double @foo() {
- %t = getresult {double, double} undef, 1
- ret double %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/global-ret0.ll b/libclamav/c++/llvm/test/CodeGen/Generic/global-ret0.ll
deleted file mode 100644
index 74bff87..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/global-ret0.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s
-
- at g = global i32 0 ; <i32*> [#uses=1]
-
-define i32 @main() {
- %h = load i32* @g ; <i32> [#uses=1]
- ret i32 %h
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/hello.ll b/libclamav/c++/llvm/test/CodeGen/Generic/hello.ll
deleted file mode 100644
index 705945c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/hello.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-
- at .str_1 = internal constant [7 x i8] c"hello\0A\00" ; <[7 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %s = getelementptr [7 x i8]* @.str_1, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %s ) ; <i32>:1 [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/i128-addsub.ll b/libclamav/c++/llvm/test/CodeGen/Generic/i128-addsub.ll
deleted file mode 100644
index e7cbf4a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/i128-addsub.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s
-
-define void @test_add(i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) {
-entry:
- %tmp1 = zext i64 %AL to i128 ; <i128> [#uses=1]
- %tmp23 = zext i64 %AH to i128 ; <i128> [#uses=1]
- %tmp4 = shl i128 %tmp23, 64 ; <i128> [#uses=1]
- %tmp5 = or i128 %tmp4, %tmp1 ; <i128> [#uses=1]
- %tmp67 = zext i64 %BL to i128 ; <i128> [#uses=1]
- %tmp89 = zext i64 %BH to i128 ; <i128> [#uses=1]
- %tmp11 = shl i128 %tmp89, 64 ; <i128> [#uses=1]
- %tmp12 = or i128 %tmp11, %tmp67 ; <i128> [#uses=1]
- %tmp15 = add i128 %tmp12, %tmp5 ; <i128> [#uses=2]
- %tmp1617 = trunc i128 %tmp15 to i64 ; <i64> [#uses=1]
- store i64 %tmp1617, i64* %RL
- %tmp21 = lshr i128 %tmp15, 64 ; <i128> [#uses=1]
- %tmp2122 = trunc i128 %tmp21 to i64 ; <i64> [#uses=1]
- store i64 %tmp2122, i64* %RH
- ret void
-}
-
-define void @test_sub(i64 %AL, i64 %AH, i64 %BL, i64 %BH, i64* %RL, i64* %RH) {
-entry:
- %tmp1 = zext i64 %AL to i128 ; <i128> [#uses=1]
- %tmp23 = zext i64 %AH to i128 ; <i128> [#uses=1]
- %tmp4 = shl i128 %tmp23, 64 ; <i128> [#uses=1]
- %tmp5 = or i128 %tmp4, %tmp1 ; <i128> [#uses=1]
- %tmp67 = zext i64 %BL to i128 ; <i128> [#uses=1]
- %tmp89 = zext i64 %BH to i128 ; <i128> [#uses=1]
- %tmp11 = shl i128 %tmp89, 64 ; <i128> [#uses=1]
- %tmp12 = or i128 %tmp11, %tmp67 ; <i128> [#uses=1]
- %tmp15 = sub i128 %tmp5, %tmp12 ; <i128> [#uses=2]
- %tmp1617 = trunc i128 %tmp15 to i64 ; <i64> [#uses=1]
- store i64 %tmp1617, i64* %RL
- %tmp21 = lshr i128 %tmp15, 64 ; <i128> [#uses=1]
- %tmp2122 = trunc i128 %tmp21 to i64 ; <i64> [#uses=1]
- store i64 %tmp2122, i64* %RH
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/i128-arith.ll b/libclamav/c++/llvm/test/CodeGen/Generic/i128-arith.ll
deleted file mode 100644
index cf10463..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/i128-arith.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-
-define i64 @foo(i64 %x, i64 %y, i32 %amt) {
- %tmp0 = zext i64 %x to i128
- %tmp1 = sext i64 %y to i128
- %tmp2 = or i128 %tmp0, %tmp1
- %tmp7 = zext i32 13 to i128
- %tmp3 = lshr i128 %tmp2, %tmp7
- %tmp4 = trunc i128 %tmp3 to i64
- ret i64 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/inline-asm-special-strings.ll b/libclamav/c++/llvm/test/CodeGen/Generic/inline-asm-special-strings.ll
deleted file mode 100644
index d18221e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/inline-asm-special-strings.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s | grep "foo 0 0"
-
-define void @bar() nounwind {
- tail call void asm sideeffect "foo ${:uid} ${:uid}", ""() nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/intrinsics.ll b/libclamav/c++/llvm/test/CodeGen/Generic/intrinsics.ll
deleted file mode 100644
index 29bc499..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/intrinsics.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s
-
-;; SQRT
-declare float @llvm.sqrt.f32(float)
-
-declare double @llvm.sqrt.f64(double)
-
-define double @test_sqrt(float %F) {
- %G = call float @llvm.sqrt.f32( float %F ) ; <float> [#uses=1]
- %H = fpext float %G to double ; <double> [#uses=1]
- %I = call double @llvm.sqrt.f64( double %H ) ; <double> [#uses=1]
- ret double %I
-}
-
-
-; SIN
-declare float @sinf(float) readonly
-
-declare double @sin(double) readonly
-
-define double @test_sin(float %F) {
- %G = call float @sinf( float %F ) ; <float> [#uses=1]
- %H = fpext float %G to double ; <double> [#uses=1]
- %I = call double @sin( double %H ) ; <double> [#uses=1]
- ret double %I
-}
-
-
-; COS
-declare float @cosf(float) readonly
-
-declare double @cos(double) readonly
-
-define double @test_cos(float %F) {
- %G = call float @cosf( float %F ) ; <float> [#uses=1]
- %H = fpext float %G to double ; <double> [#uses=1]
- %I = call double @cos( double %H ) ; <double> [#uses=1]
- ret double %I
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/invalid-memcpy.ll b/libclamav/c++/llvm/test/CodeGen/Generic/invalid-memcpy.ll
deleted file mode 100644
index 8448565..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/invalid-memcpy.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s
-
-; This testcase is invalid (the alignment specified for memcpy is
-; greater than the alignment guaranteed for Qux or C.0.1173), but it
-; should compile, not crash the code generator.
-
- at C.0.1173 = external constant [33 x i8] ; <[33 x i8]*> [#uses=1]
-
-define void @Bork() {
-entry:
- %Qux = alloca [33 x i8] ; <[33 x i8]*> [#uses=1]
- %Qux1 = bitcast [33 x i8]* %Qux to i8* ; <i8*> [#uses=1]
- call void @llvm.memcpy.i64( i8* %Qux1, i8* getelementptr ([33 x i8]* @C.0.1173, i32 0, i32 0), i64 33, i32 8 )
- ret void
-}
-
-declare void @llvm.memcpy.i64(i8*, i8*, i64, i32)
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/isunord.ll b/libclamav/c++/llvm/test/CodeGen/Generic/isunord.ll
deleted file mode 100644
index ebbba01..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/isunord.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s
-
-declare i1 @llvm.isunordered.f64(double, double)
-
-define i1 @test(double %X, double %Y) {
- %tmp27 = fcmp uno double %X, %Y ; <i1> [#uses=1]
- ret i1 %tmp27
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/llvm-ct-intrinsics.ll b/libclamav/c++/llvm/test/CodeGen/Generic/llvm-ct-intrinsics.ll
deleted file mode 100644
index 1db7549..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/llvm-ct-intrinsics.ll
+++ /dev/null
@@ -1,62 +0,0 @@
-; Make sure this testcase is supported by all code generators
-; RUN: llc < %s
-
-declare i64 @llvm.ctpop.i64(i64)
-
-declare i32 @llvm.ctpop.i32(i32)
-
-declare i16 @llvm.ctpop.i16(i16)
-
-declare i8 @llvm.ctpop.i8(i8)
-
-define void @ctpoptest(i8 %A, i16 %B, i32 %C, i64 %D, i8* %AP, i16* %BP, i32* %CP, i64* %DP) {
- %a = call i8 @llvm.ctpop.i8( i8 %A ) ; <i8> [#uses=1]
- %b = call i16 @llvm.ctpop.i16( i16 %B ) ; <i16> [#uses=1]
- %c = call i32 @llvm.ctpop.i32( i32 %C ) ; <i32> [#uses=1]
- %d = call i64 @llvm.ctpop.i64( i64 %D ) ; <i64> [#uses=1]
- store i8 %a, i8* %AP
- store i16 %b, i16* %BP
- store i32 %c, i32* %CP
- store i64 %d, i64* %DP
- ret void
-}
-
-declare i64 @llvm.ctlz.i64(i64)
-
-declare i32 @llvm.ctlz.i32(i32)
-
-declare i16 @llvm.ctlz.i16(i16)
-
-declare i8 @llvm.ctlz.i8(i8)
-
-define void @ctlztest(i8 %A, i16 %B, i32 %C, i64 %D, i8* %AP, i16* %BP, i32* %CP, i64* %DP) {
- %a = call i8 @llvm.ctlz.i8( i8 %A ) ; <i8> [#uses=1]
- %b = call i16 @llvm.ctlz.i16( i16 %B ) ; <i16> [#uses=1]
- %c = call i32 @llvm.ctlz.i32( i32 %C ) ; <i32> [#uses=1]
- %d = call i64 @llvm.ctlz.i64( i64 %D ) ; <i64> [#uses=1]
- store i8 %a, i8* %AP
- store i16 %b, i16* %BP
- store i32 %c, i32* %CP
- store i64 %d, i64* %DP
- ret void
-}
-
-declare i64 @llvm.cttz.i64(i64)
-
-declare i32 @llvm.cttz.i32(i32)
-
-declare i16 @llvm.cttz.i16(i16)
-
-declare i8 @llvm.cttz.i8(i8)
-
-define void @cttztest(i8 %A, i16 %B, i32 %C, i64 %D, i8* %AP, i16* %BP, i32* %CP, i64* %DP) {
- %a = call i8 @llvm.cttz.i8( i8 %A ) ; <i8> [#uses=1]
- %b = call i16 @llvm.cttz.i16( i16 %B ) ; <i16> [#uses=1]
- %c = call i32 @llvm.cttz.i32( i32 %C ) ; <i32> [#uses=1]
- %d = call i64 @llvm.cttz.i64( i64 %D ) ; <i64> [#uses=1]
- store i8 %a, i8* %AP
- store i16 %b, i16* %BP
- store i32 %c, i32* %CP
- store i64 %d, i64* %DP
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll b/libclamav/c++/llvm/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll
deleted file mode 100644
index 282e973..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/multiple-return-values-cross-block-with-invoke.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s
-
-declare { i64, double } @wild()
-
-define void @foo(i64* %p, double* %q) nounwind {
- %t = invoke { i64, double } @wild() to label %normal unwind label %handler
-
-normal:
- %mrv_gr = getresult { i64, double } %t, 0
- store i64 %mrv_gr, i64* %p
- %mrv_gr12681 = getresult { i64, double } %t, 1
- store double %mrv_gr12681, double* %q
- ret void
-
-handler:
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/negintconst.ll b/libclamav/c++/llvm/test/CodeGen/Generic/negintconst.ll
deleted file mode 100644
index 67d775e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/negintconst.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s
-
-; Test that a negative constant smaller than 64 bits (e.g., int)
-; is correctly implemented with sign-extension.
-; In particular, the current code generated is:
-;
-; main:
-; .L_main_LL_0:
-; save %o6, -224, %o6
-; setx .G_fmtArg_1, %o1, %o0
-; setuw 1, %o1 ! i = 1
-; setuw 4294967295, %o3 ! THE BUG: 0x00000000ffffffff
-; setsw 0, %i0
-; add %i6, 1999, %o2 ! fval
-; add %o1, %g0, %o1
-; add %o0, 0, %o0
-; mulx %o1, %o3, %o1 ! ERROR: 0xffffffff; should be -1
-; add %o1, 3, %o1 ! ERROR: 0x100000002; should be 0x2
-; mulx %o1, 12, %o3 !
-; add %o2, %o3, %o3 ! produces bad address!
-; call printf
-; nop
-; jmpl %i7+8, %g0
-; restore %g0, 0, %g0
-;
-; llc produces:
-; ioff = 2 fval = 0xffffffff7fffec90 &fval[2] = 0xb7fffeca8
-; instead of:
-; ioff = 2 fval = 0xffffffff7fffec90 &fval[2] = 0xffffffff7fffeca8
-;
- %Results = type { float, float, float }
- at fmtArg = internal global [39 x i8] c"ioff = %u\09fval = 0x%p\09&fval[2] = 0x%p\0A\00" ; <[39 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %fval = alloca %Results, i32 4 ; <%Results*> [#uses=2]
- %i = add i32 1, 0 ; <i32> [#uses=1]
- %iscale = mul i32 %i, -1 ; <i32> [#uses=1]
- %ioff = add i32 %iscale, 3 ; <i32> [#uses=2]
- %ioff.upgrd.1 = zext i32 %ioff to i64 ; <i64> [#uses=1]
- %fptr = getelementptr %Results* %fval, i64 %ioff.upgrd.1 ; <%Results*> [#uses=1]
- %castFmt = getelementptr [39 x i8]* @fmtArg, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %castFmt, i32 %ioff, %Results* %fval, %Results* %fptr ) ; <i32>:1 [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/nested-select.ll b/libclamav/c++/llvm/test/CodeGen/Generic/nested-select.ll
deleted file mode 100644
index f81fed3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/nested-select.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -o /dev/null
-
-; Test that select of a select works
-
-%typedef.tree = type opaque
-
-define i32 @ic_test(double %p.0.2.0.val, double %p.0.2.1.val, double %p.0.2.2.val, %typedef.tree* %t) {
- %result.1.0 = zext i1 false to i32 ; <i32> [#uses=1]
- %tmp.55 = fcmp oge double 0.000000e+00, 1.000000e+00 ; <i1> [#uses=1]
- %tmp.66 = fdiv double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- br label %N
-
-N: ; preds = %0
- %result.1.1 = select i1 %tmp.55, i32 0, i32 %result.1.0 ; <i32> [#uses=1]
- %tmp.75 = fcmp oge double %tmp.66, 1.000000e+00 ; <i1> [#uses=1]
- %retval1 = select i1 %tmp.75, i32 0, i32 %result.1.1 ; <i32> [#uses=1]
- ret i32 %retval1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/pr2625.ll b/libclamav/c++/llvm/test/CodeGen/Generic/pr2625.ll
deleted file mode 100644
index 3e3dc4b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/pr2625.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s
-; PR2625
-
-define i32 @main({ i32, { i32 } }*) {
-entry:
- %state = alloca { i32, { i32 } }* ; <{ i32, { i32 } }**> [#uses=2]
- store { i32, { i32 } }* %0, { i32, { i32 } }** %state
- %retval = alloca i32 ; <i32*> [#uses=2]
- store i32 0, i32* %retval
- load { i32, { i32 } }** %state ; <{ i32, { i32 } }*>:1 [#uses=1]
- store { i32, { i32 } } zeroinitializer, { i32, { i32 } }* %1
- br label %return
-
-return: ; preds = %entry
- load i32* %retval ; <i32>:2 [#uses=1]
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/pr3288.ll b/libclamav/c++/llvm/test/CodeGen/Generic/pr3288.ll
deleted file mode 100644
index b62710f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/pr3288.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s
-; PR3288
-
-define void @a() {
- %i = insertvalue [2 x [2 x i32]] undef, [2 x i32] undef, 1
- ret void
-}
-define void @b() {
- %i = insertvalue {{i32,float},{i16,double}} undef, {i16,double} undef, 1
- ret void
-}
-define void @c() {
- %i = insertvalue [2 x [2 x i32]] zeroinitializer, [2 x i32] zeroinitializer, 1
- ret void
-}
-define void @d() {
- %i = insertvalue {{i32,float},{i16,double}} zeroinitializer, {i16,double} zeroinitializer, 1
- ret void
-}
-define void @e() {
- %i = insertvalue [2 x [2 x i32]] undef, [2 x i32] undef, 0
- ret void
-}
-define void @f() {
- %i = insertvalue {{i32,float},{i16,double}} undef, {i32,float} undef, 0
- ret void
-}
-define void @g() {
- %i = insertvalue [2 x [2 x i32]] zeroinitializer, [2 x i32] zeroinitializer, 0
- ret void
-}
-define void @h() {
- %i = insertvalue {{i32,float},{i16,double}} zeroinitializer, {i32,float} zeroinitializer, 0
- ret void
-}
-define void @ax() {
- %i = insertvalue [2 x [2 x i32]] undef, i32 undef, 1, 1
- ret void
-}
-define void @bx() {
- %i = insertvalue {{i32,float},{i16,double}} undef, double undef, 1, 1
- ret void
-}
-define void @cx() {
- %i = insertvalue [2 x [2 x i32]] zeroinitializer, i32 zeroinitializer, 1, 1
- ret void
-}
-define void @dx() {
- %i = insertvalue {{i32,float},{i16,double}} zeroinitializer, double zeroinitializer, 1, 1
- ret void
-}
-define void @ex() {
- %i = insertvalue [2 x [2 x i32]] undef, i32 undef, 0, 1
- ret void
-}
-define void @fx() {
- %i = insertvalue {{i32,float},{i16,double}} undef, float undef, 0, 1
- ret void
-}
-define void @gx() {
- %i = insertvalue [2 x [2 x i32]] zeroinitializer, i32 zeroinitializer, 0, 1
- ret void
-}
-define void @hx() {
- %i = insertvalue {{i32,float},{i16,double}} zeroinitializer, float zeroinitializer, 0, 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/print-add.ll b/libclamav/c++/llvm/test/CodeGen/Generic/print-add.ll
deleted file mode 100644
index 95608dc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/print-add.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s
-
- at .str_1 = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %f = getelementptr [4 x i8]* @.str_1, i64 0, i64 0 ; <i8*> [#uses=3]
- %d = add i32 1, 0 ; <i32> [#uses=3]
- call i32 (i8*, ...)* @printf( i8* %f, i32 %d ) ; <i32>:1 [#uses=0]
- %e = add i32 38, 2 ; <i32> [#uses=2]
- call i32 (i8*, ...)* @printf( i8* %f, i32 %e ) ; <i32>:2 [#uses=0]
- %g = add i32 %d, %d ; <i32> [#uses=1]
- %h = add i32 %e, %g ; <i32> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %f, i32 %h ) ; <i32>:3 [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/print-arith-fp.ll b/libclamav/c++/llvm/test/CodeGen/Generic/print-arith-fp.ll
deleted file mode 100644
index d129ff8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/print-arith-fp.ll
+++ /dev/null
@@ -1,61 +0,0 @@
-; RUN: llc < %s
- at a_str = internal constant [8 x i8] c"a = %f\0A\00" ; <[8 x i8]*> [#uses=1]
- at b_str = internal constant [8 x i8] c"b = %f\0A\00" ; <[8 x i8]*> [#uses=1]
- at add_str = internal constant [12 x i8] c"a + b = %f\0A\00" ; <[12 x i8]*> [#uses=1]
- at sub_str = internal constant [12 x i8] c"a - b = %f\0A\00" ; <[12 x i8]*> [#uses=1]
- at mul_str = internal constant [12 x i8] c"a * b = %f\0A\00" ; <[12 x i8]*> [#uses=1]
- at div_str = internal constant [12 x i8] c"b / a = %f\0A\00" ; <[12 x i8]*> [#uses=1]
- at rem_str = internal constant [13 x i8] c"b %% a = %f\0A\00" ; <[13 x i8]*> [#uses=1]
- at lt_str = internal constant [12 x i8] c"a < b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at le_str = internal constant [13 x i8] c"a <= b = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at gt_str = internal constant [12 x i8] c"a > b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at ge_str = internal constant [13 x i8] c"a >= b = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at eq_str = internal constant [13 x i8] c"a == b = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at ne_str = internal constant [13 x i8] c"a != b = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at A = global double 2.000000e+00 ; <double*> [#uses=1]
- at B = global double 5.000000e+00 ; <double*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %a = load double* @A ; <double> [#uses=12]
- %b = load double* @B ; <double> [#uses=12]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %b_s = getelementptr [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_s, double %a ) ; <i32>:1 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %b_s, double %b ) ; <i32>:2 [#uses=0]
- %add_r = fadd double %a, %b ; <double> [#uses=1]
- %sub_r = fsub double %a, %b ; <double> [#uses=1]
- %mul_r = fmul double %a, %b ; <double> [#uses=1]
- %div_r = fdiv double %b, %a ; <double> [#uses=1]
- %rem_r = frem double %b, %a ; <double> [#uses=1]
- %add_s = getelementptr [12 x i8]* @add_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %sub_s = getelementptr [12 x i8]* @sub_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %mul_s = getelementptr [12 x i8]* @mul_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %div_s = getelementptr [12 x i8]* @div_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %rem_s = getelementptr [13 x i8]* @rem_str, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %add_s, double %add_r ) ; <i32>:3 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %sub_s, double %sub_r ) ; <i32>:4 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %mul_s, double %mul_r ) ; <i32>:5 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %div_s, double %div_r ) ; <i32>:6 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %rem_s, double %rem_r ) ; <i32>:7 [#uses=0]
- %lt_r = fcmp olt double %a, %b ; <i1> [#uses=1]
- %le_r = fcmp ole double %a, %b ; <i1> [#uses=1]
- %gt_r = fcmp ogt double %a, %b ; <i1> [#uses=1]
- %ge_r = fcmp oge double %a, %b ; <i1> [#uses=1]
- %eq_r = fcmp oeq double %a, %b ; <i1> [#uses=1]
- %ne_r = fcmp une double %a, %b ; <i1> [#uses=1]
- %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %lt_s, i1 %lt_r ) ; <i32>:8 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %le_s, i1 %le_r ) ; <i32>:9 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %gt_s, i1 %gt_r ) ; <i32>:10 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %ge_s, i1 %ge_r ) ; <i32>:11 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %eq_s, i1 %eq_r ) ; <i32>:12 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %ne_s, i1 %ne_r ) ; <i32>:13 [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/print-arith-int.ll b/libclamav/c++/llvm/test/CodeGen/Generic/print-arith-int.ll
deleted file mode 100644
index ce938cf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/print-arith-int.ll
+++ /dev/null
@@ -1,84 +0,0 @@
-; RUN: llc < %s
- at a_str = internal constant [8 x i8] c"a = %d\0A\00" ; <[8 x i8]*> [#uses=1]
- at b_str = internal constant [8 x i8] c"b = %d\0A\00" ; <[8 x i8]*> [#uses=1]
- at add_str = internal constant [12 x i8] c"a + b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at sub_str = internal constant [12 x i8] c"a - b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at mul_str = internal constant [12 x i8] c"a * b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at div_str = internal constant [12 x i8] c"b / a = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at rem_str = internal constant [13 x i8] c"b \5C% a = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at lt_str = internal constant [12 x i8] c"a < b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at le_str = internal constant [13 x i8] c"a <= b = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at gt_str = internal constant [12 x i8] c"a > b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at ge_str = internal constant [13 x i8] c"a >= b = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at eq_str = internal constant [13 x i8] c"a == b = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at ne_str = internal constant [13 x i8] c"a != b = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at and_str = internal constant [12 x i8] c"a & b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at or_str = internal constant [12 x i8] c"a | b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at xor_str = internal constant [12 x i8] c"a ^ b = %d\0A\00" ; <[12 x i8]*> [#uses=1]
- at shl_str = internal constant [13 x i8] c"b << a = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at shr_str = internal constant [13 x i8] c"b >> a = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at A = global i32 2 ; <i32*> [#uses=1]
- at B = global i32 5 ; <i32*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %a = load i32* @A ; <i32> [#uses=16]
- %b = load i32* @B ; <i32> [#uses=17]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %b_s = getelementptr [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a ) ; <i32>:1 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %b_s, i32 %b ) ; <i32>:2 [#uses=0]
- %add_r = add i32 %a, %b ; <i32> [#uses=1]
- %sub_r = sub i32 %a, %b ; <i32> [#uses=1]
- %mul_r = mul i32 %a, %b ; <i32> [#uses=1]
- %div_r = sdiv i32 %b, %a ; <i32> [#uses=1]
- %rem_r = srem i32 %b, %a ; <i32> [#uses=1]
- %add_s = getelementptr [12 x i8]* @add_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %sub_s = getelementptr [12 x i8]* @sub_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %mul_s = getelementptr [12 x i8]* @mul_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %div_s = getelementptr [12 x i8]* @div_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %rem_s = getelementptr [13 x i8]* @rem_str, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %add_s, i32 %add_r ) ; <i32>:3 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %sub_s, i32 %sub_r ) ; <i32>:4 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %mul_s, i32 %mul_r ) ; <i32>:5 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %div_s, i32 %div_r ) ; <i32>:6 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %rem_s, i32 %rem_r ) ; <i32>:7 [#uses=0]
- %lt_r = icmp slt i32 %a, %b ; <i1> [#uses=1]
- %le_r = icmp sle i32 %a, %b ; <i1> [#uses=1]
- %gt_r = icmp sgt i32 %a, %b ; <i1> [#uses=1]
- %ge_r = icmp sge i32 %a, %b ; <i1> [#uses=1]
- %eq_r = icmp eq i32 %a, %b ; <i1> [#uses=1]
- %ne_r = icmp ne i32 %a, %b ; <i1> [#uses=1]
- %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %lt_s, i1 %lt_r ) ; <i32>:8 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %le_s, i1 %le_r ) ; <i32>:9 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %gt_s, i1 %gt_r ) ; <i32>:10 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %ge_s, i1 %ge_r ) ; <i32>:11 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %eq_s, i1 %eq_r ) ; <i32>:12 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %ne_s, i1 %ne_r ) ; <i32>:13 [#uses=0]
- %and_r = and i32 %a, %b ; <i32> [#uses=1]
- %or_r = or i32 %a, %b ; <i32> [#uses=1]
- %xor_r = xor i32 %a, %b ; <i32> [#uses=1]
- %u = trunc i32 %a to i8 ; <i8> [#uses=2]
- %shift.upgrd.1 = zext i8 %u to i32 ; <i32> [#uses=1]
- %shl_r = shl i32 %b, %shift.upgrd.1 ; <i32> [#uses=1]
- %shift.upgrd.2 = zext i8 %u to i32 ; <i32> [#uses=1]
- %shr_r = ashr i32 %b, %shift.upgrd.2 ; <i32> [#uses=1]
- %and_s = getelementptr [12 x i8]* @and_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %or_s = getelementptr [12 x i8]* @or_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %xor_s = getelementptr [12 x i8]* @xor_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %shl_s = getelementptr [13 x i8]* @shl_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %shr_s = getelementptr [13 x i8]* @shr_str, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %and_s, i32 %and_r ) ; <i32>:14 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %or_s, i32 %or_r ) ; <i32>:15 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %xor_s, i32 %xor_r ) ; <i32>:16 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %shl_s, i32 %shl_r ) ; <i32>:17 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %shr_s, i32 %shr_r ) ; <i32>:18 [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/print-int.ll b/libclamav/c++/llvm/test/CodeGen/Generic/print-int.ll
deleted file mode 100644
index 7ca4b3d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/print-int.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s
-
- at .str_1 = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %f = getelementptr [4 x i8]* @.str_1, i64 0, i64 0 ; <i8*> [#uses=1]
- %d = add i32 0, 0 ; <i32> [#uses=1]
- %tmp.0 = call i32 (i8*, ...)* @printf( i8* %f, i32 %d ) ; <i32> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/print-mul-exp.ll b/libclamav/c++/llvm/test/CodeGen/Generic/print-mul-exp.ll
deleted file mode 100644
index 90fc55b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/print-mul-exp.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s
-
- at a_str = internal constant [8 x i8] c"a = %d\0A\00" ; <[8 x i8]*> [#uses=1]
- at a_mul_str = internal constant [13 x i8] c"a * %d = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at A = global i32 2 ; <i32*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
- %a = load i32* @A ; <i32> [#uses=21]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %a_mul_s = getelementptr [13 x i8]* @a_mul_str, i64 0, i64 0 ; <i8*> [#uses=20]
- call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a ) ; <i32>:1 [#uses=0]
- %r_0 = mul i32 %a, 0 ; <i32> [#uses=1]
- %r_1 = mul i32 %a, 1 ; <i32> [#uses=1]
- %r_2 = mul i32 %a, 2 ; <i32> [#uses=1]
- %r_3 = mul i32 %a, 3 ; <i32> [#uses=1]
- %r_4 = mul i32 %a, 4 ; <i32> [#uses=1]
- %r_5 = mul i32 %a, 5 ; <i32> [#uses=1]
- %r_6 = mul i32 %a, 6 ; <i32> [#uses=1]
- %r_7 = mul i32 %a, 7 ; <i32> [#uses=1]
- %r_8 = mul i32 %a, 8 ; <i32> [#uses=1]
- %r_9 = mul i32 %a, 9 ; <i32> [#uses=1]
- %r_10 = mul i32 %a, 10 ; <i32> [#uses=1]
- %r_11 = mul i32 %a, 11 ; <i32> [#uses=1]
- %r_12 = mul i32 %a, 12 ; <i32> [#uses=1]
- %r_13 = mul i32 %a, 13 ; <i32> [#uses=1]
- %r_14 = mul i32 %a, 14 ; <i32> [#uses=1]
- %r_15 = mul i32 %a, 15 ; <i32> [#uses=1]
- %r_16 = mul i32 %a, 16 ; <i32> [#uses=1]
- %r_17 = mul i32 %a, 17 ; <i32> [#uses=1]
- %r_18 = mul i32 %a, 18 ; <i32> [#uses=1]
- %r_19 = mul i32 %a, 19 ; <i32> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 0, i32 %r_0 ) ; <i32>:2 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 1, i32 %r_1 ) ; <i32>:3 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 2, i32 %r_2 ) ; <i32>:4 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 3, i32 %r_3 ) ; <i32>:5 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 4, i32 %r_4 ) ; <i32>:6 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 5, i32 %r_5 ) ; <i32>:7 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 6, i32 %r_6 ) ; <i32>:8 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 7, i32 %r_7 ) ; <i32>:9 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 8, i32 %r_8 ) ; <i32>:10 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 9, i32 %r_9 ) ; <i32>:11 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 10, i32 %r_10 ) ; <i32>:12 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 11, i32 %r_11 ) ; <i32>:13 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 12, i32 %r_12 ) ; <i32>:14 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 13, i32 %r_13 ) ; <i32>:15 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 14, i32 %r_14 ) ; <i32>:16 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 15, i32 %r_15 ) ; <i32>:17 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 16, i32 %r_16 ) ; <i32>:18 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 17, i32 %r_17 ) ; <i32>:19 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 18, i32 %r_18 ) ; <i32>:20 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 19, i32 %r_19 ) ; <i32>:21 [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/print-mul.ll b/libclamav/c++/llvm/test/CodeGen/Generic/print-mul.ll
deleted file mode 100644
index 0707f3c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/print-mul.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s
-
- at a_str = internal constant [8 x i8] c"a = %d\0A\00" ; <[8 x i8]*> [#uses=1]
- at b_str = internal constant [8 x i8] c"b = %d\0A\00" ; <[8 x i8]*> [#uses=1]
- at a_mul_str = internal constant [13 x i8] c"a * %d = %d\0A\00" ; <[13 x i8]*> [#uses=1]
- at A = global i32 2 ; <i32*> [#uses=1]
- at B = global i32 5 ; <i32*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
-entry:
- %a = load i32* @A ; <i32> [#uses=2]
- %b = load i32* @B ; <i32> [#uses=1]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %b_s = getelementptr [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %a_mul_s = getelementptr [13 x i8]* @a_mul_str, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a ) ; <i32>:0 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %b_s, i32 %b ) ; <i32>:1 [#uses=0]
- br label %shl_test
-
-shl_test: ; preds = %shl_test, %entry
- %s = phi i32 [ 0, %entry ], [ %s_inc, %shl_test ] ; <i32> [#uses=4]
- %result = mul i32 %a, %s ; <i32> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_mul_s, i32 %s, i32 %result ) ; <i32>:2 [#uses=0]
- %s_inc = add i32 %s, 1 ; <i32> [#uses=1]
- %done = icmp eq i32 %s, 256 ; <i1> [#uses=1]
- br i1 %done, label %fini, label %shl_test
-
-fini: ; preds = %shl_test
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/print-shift.ll b/libclamav/c++/llvm/test/CodeGen/Generic/print-shift.ll
deleted file mode 100644
index 6c5d222..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/print-shift.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s
-
- at a_str = internal constant [8 x i8] c"a = %d\0A\00" ; <[8 x i8]*> [#uses=1]
- at b_str = internal constant [8 x i8] c"b = %d\0A\00" ; <[8 x i8]*> [#uses=1]
- at a_shl_str = internal constant [14 x i8] c"a << %d = %d\0A\00" ; <[14 x i8]*> [#uses=1]
- at A = global i32 2 ; <i32*> [#uses=1]
- at B = global i32 5 ; <i32*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main() {
-entry:
- %a = load i32* @A ; <i32> [#uses=2]
- %b = load i32* @B ; <i32> [#uses=1]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %b_s = getelementptr [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %a_shl_s = getelementptr [14 x i8]* @a_shl_str, i64 0, i64 0 ; <i8*> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a ) ; <i32>:0 [#uses=0]
- call i32 (i8*, ...)* @printf( i8* %b_s, i32 %b ) ; <i32>:1 [#uses=0]
- br label %shl_test
-
-shl_test: ; preds = %shl_test, %entry
- %s = phi i8 [ 0, %entry ], [ %s_inc, %shl_test ] ; <i8> [#uses=4]
- %shift.upgrd.1 = zext i8 %s to i32 ; <i32> [#uses=1]
- %result = shl i32 %a, %shift.upgrd.1 ; <i32> [#uses=1]
- call i32 (i8*, ...)* @printf( i8* %a_shl_s, i8 %s, i32 %result ) ; <i32>:2 [#uses=0]
- %s_inc = add i8 %s, 1 ; <i8> [#uses=1]
- %done = icmp eq i8 %s, 32 ; <i1> [#uses=1]
- br i1 %done, label %fini, label %shl_test
-
-fini: ; preds = %shl_test
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/ret0.ll b/libclamav/c++/llvm/test/CodeGen/Generic/ret0.ll
deleted file mode 100644
index 9e628a1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/ret0.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s
-
-define i32 @main() {
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/ret42.ll b/libclamav/c++/llvm/test/CodeGen/Generic/ret42.ll
deleted file mode 100644
index f5cd33d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/ret42.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s
-
-define i32 @main() {
- ret i32 42
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/select-cc.ll b/libclamav/c++/llvm/test/CodeGen/Generic/select-cc.ll
deleted file mode 100644
index b653e2a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/select-cc.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s
-; PR2504
-
-define <2 x double> @vector_select(<2 x double> %x, <2 x double> %y) nounwind {
- %x.lo = extractelement <2 x double> %x, i32 0 ; <double> [#uses=1]
- %x.lo.ge = fcmp oge double %x.lo, 0.000000e+00 ; <i1> [#uses=1]
- %a.d = select i1 %x.lo.ge, <2 x double> %y, <2 x double> %x ; <<2 x double>> [#uses=1]
- ret <2 x double> %a.d
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/select.ll b/libclamav/c++/llvm/test/CodeGen/Generic/select.ll
deleted file mode 100644
index 63052c1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/select.ll
+++ /dev/null
@@ -1,187 +0,0 @@
-; RUN: llc < %s
-
-%Domain = type { i8*, i32, i32*, i32, i32, i32*, %Domain* }
- at AConst = constant i32 123 ; <i32*> [#uses=1]
-
-; Test setting values of different constants in registers.
-;
-define void @testConsts(i32 %N, float %X) {
- %a = add i32 %N, 1 ; <i32> [#uses=0]
- %i = add i32 %N, 12345678 ; <i32> [#uses=0]
- %b = add i16 4, 3 ; <i16> [#uses=0]
- %c = fadd float %X, 0.000000e+00 ; <float> [#uses=0]
- %d = fadd float %X, 0x400921CAC0000000 ; <float> [#uses=0]
- %f = add i32 -1, 10 ; <i32> [#uses=0]
- %g = add i16 20, -1 ; <i16> [#uses=0]
- %j = add i16 -1, 30 ; <i16> [#uses=0]
- %h = add i8 40, -1 ; <i8> [#uses=0]
- %k = add i8 -1, 50 ; <i8> [#uses=0]
- ret void
-}
-
-; A SetCC whose result is used should produce instructions to
-; compute the boolean value in a register. One whose result
-; is unused will only generate the condition code but not
-; the boolean result.
-;
-define void @unusedBool(i32* %x, i32* %y) {
- icmp eq i32* %x, %y ; <i1>:1 [#uses=1]
- xor i1 %1, true ; <i1>:2 [#uses=0]
- icmp ne i32* %x, %y ; <i1>:3 [#uses=0]
- ret void
-}
-
-; A constant argument to a Phi produces a Cast instruction in the
-; corresponding predecessor basic block. This checks a few things:
-; -- phi arguments coming from the bottom of the same basic block
-; (they should not be forward substituted in the machine code!)
-; -- code generation for casts of various types
-; -- use of immediate fields for integral constants of different sizes
-; -- branch on a constant condition
-;
-define void @mergeConstants(i32* %x, i32* %y) {
-; <label>:0
- br label %Top
-
-Top: ; preds = %Next, %Top, %0
- phi i32 [ 0, %0 ], [ 1, %Top ], [ 524288, %Next ] ; <i32>:1 [#uses=0]
- phi float [ 0.000000e+00, %0 ], [ 1.000000e+00, %Top ], [ 2.000000e+00, %Next ] ; <float>:2 [#uses=0]
- phi double [ 5.000000e-01, %0 ], [ 1.500000e+00, %Top ], [ 2.500000e+00, %Next ]
- phi i1 [ true, %0 ], [ false, %Top ], [ true, %Next ] ; <i1>:4 [#uses=0]
- br i1 true, label %Top, label %Next
-
-Next: ; preds = %Top
- br label %Top
-}
-
-
-
-; A constant argument to a cast used only once should be forward substituted
-; and loaded where needed, which happens is:
-; -- User of cast has no immediate field
-; -- User of cast has immediate field but constant is too large to fit
-; or constant is not resolved until later (e.g., global address)
-; -- User of cast uses it as a call arg. or return value so it is an implicit
-; use but has to be loaded into a virtual register so that the reg.
-; allocator can allocate the appropriate phys. reg. for it
-;
-define i32* @castconst(float) {
- %castbig = trunc i64 99999999 to i32 ; <i32> [#uses=1]
- %castsmall = trunc i64 1 to i32 ; <i32> [#uses=1]
- %usebig = add i32 %castbig, %castsmall ; <i32> [#uses=0]
- %castglob = bitcast i32* @AConst to i64* ; <i64*> [#uses=1]
- %dummyl = load i64* %castglob ; <i64> [#uses=0]
- %castnull = inttoptr i64 0 to i32* ; <i32*> [#uses=1]
- ret i32* %castnull
-}
-
-; Test branch-on-comparison-with-zero, in two ways:
-; 1. can be folded
-; 2. cannot be folded because result of comparison is used twice
-;
-define void @testbool(i32 %A, i32 %B) {
- br label %Top
-
-Top: ; preds = %loop, %0
- %D = add i32 %A, %B ; <i32> [#uses=2]
- %E = sub i32 %D, -4 ; <i32> [#uses=1]
- %C = icmp sle i32 %E, 0 ; <i1> [#uses=1]
- br i1 %C, label %retlbl, label %loop
-
-loop: ; preds = %loop, %Top
- %F = add i32 %A, %B ; <i32> [#uses=0]
- %G = sub i32 %D, -4 ; <i32> [#uses=1]
- %D.upgrd.1 = icmp sle i32 %G, 0 ; <i1> [#uses=1]
- %E.upgrd.2 = xor i1 %D.upgrd.1, true ; <i1> [#uses=1]
- br i1 %E.upgrd.2, label %loop, label %Top
-
-retlbl: ; preds = %Top
- ret void
-}
-
-
-;; Test use of a boolean result in cast operations.
-;; Requires converting a condition code result into a 0/1 value in a reg.
-;;
-define i32 @castbool(i32 %A, i32 %B) {
-bb0:
- %cond213 = icmp slt i32 %A, %B ; <i1> [#uses=1]
- %cast110 = zext i1 %cond213 to i8 ; <i8> [#uses=1]
- %cast109 = zext i8 %cast110 to i32 ; <i32> [#uses=1]
- ret i32 %cast109
-}
-
-;; Test use of a boolean result in arithmetic and logical operations.
-;; Requires converting a condition code result into a 0/1 value in a reg.
-;;
-define i1 @boolexpr(i1 %b, i32 %N) {
- %b2 = icmp sge i32 %N, 0 ; <i1> [#uses=1]
- %b3 = and i1 %b, %b2 ; <i1> [#uses=1]
- ret i1 %b3
-}
-
-; Test branch on floating point comparison
-;
-define void @testfloatbool(float %x, float %y) {
- br label %Top
-
-Top: ; preds = %Top, %0
- %p = fadd float %x, %y ; <float> [#uses=1]
- %z = fsub float %x, %y ; <float> [#uses=1]
- %b = fcmp ole float %p, %z ; <i1> [#uses=2]
- %c = xor i1 %b, true ; <i1> [#uses=0]
- br i1 %b, label %Top, label %goon
-
-goon: ; preds = %Top
- ret void
-}
-
-
-; Test cases where an LLVM instruction requires no machine
-; instructions (e.g., cast int* to long). But there are 2 cases:
-; 1. If the result register has only a single use and the use is in the
-; same basic block, the operand will be copy-propagated during
-; instruction selection.
-; 2. If the result register has multiple uses or is in a different
-; basic block, it cannot (or will not) be copy propagated during
-; instruction selection. It will generate a
-; copy instruction (add-with-0), but this copy should get coalesced
-; away by the register allocator.
-;
-define i32 @checkForward(i32 %N, i32* %A) {
-bb2:
- %reg114 = shl i32 %N, 2 ; <i32> [#uses=1]
- %cast115 = sext i32 %reg114 to i64 ; <i64> [#uses=1]
- %cast116 = ptrtoint i32* %A to i64 ; <i64> [#uses=1]
- %reg116 = add i64 %cast116, %cast115 ; <i64> [#uses=1]
- %castPtr = inttoptr i64 %reg116 to i32* ; <i32*> [#uses=1]
- %reg118 = load i32* %castPtr ; <i32> [#uses=1]
- %cast117 = sext i32 %reg118 to i64 ; <i64> [#uses=2]
- %reg159 = add i64 1234567, %cast117 ; <i64> [#uses=0]
- %reg160 = add i64 7654321, %cast117 ; <i64> [#uses=0]
- ret i32 0
-}
-
-
-; Test case for unary NOT operation constructed from XOR.
-;
-define void @checkNot(i1 %b, i32 %i) {
- %notB = xor i1 %b, true ; <i1> [#uses=1]
- %notI = xor i32 %i, -1 ; <i32> [#uses=2]
- %F = icmp sge i32 %notI, 100 ; <i1> [#uses=1]
- %J = add i32 %i, %i ; <i32> [#uses=1]
- %andNotB = and i1 %F, %notB ; <i1> [#uses=0]
- %andNotI = and i32 %J, %notI ; <i32> [#uses=0]
- %notB2 = xor i1 true, %b ; <i1> [#uses=0]
- %notI2 = xor i32 -1, %i ; <i32> [#uses=0]
- ret void
-}
-
-; Test case for folding getelementptr into a load/store
-;
-define i32 @checkFoldGEP(%Domain* %D, i64 %idx) {
- %reg841 = getelementptr %Domain* %D, i64 0, i32 1 ; <i32*> [#uses=1]
- %reg820 = load i32* %reg841 ; <i32> [#uses=1]
- ret i32 %reg820
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/shift-int64.ll b/libclamav/c++/llvm/test/CodeGen/Generic/shift-int64.ll
deleted file mode 100644
index 670ef20..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/shift-int64.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s
-
-define i64 @test_imm(i64 %X) {
- %Y = ashr i64 %X, 17 ; <i64> [#uses=1]
- ret i64 %Y
-}
-
-define i64 @test_variable(i64 %X, i8 %Amt) {
- %shift.upgrd.1 = zext i8 %Amt to i64 ; <i64> [#uses=1]
- %Y = ashr i64 %X, %shift.upgrd.1 ; <i64> [#uses=1]
- ret i64 %Y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/spillccr.ll b/libclamav/c++/llvm/test/CodeGen/Generic/spillccr.ll
deleted file mode 100644
index 0a774c6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/spillccr.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s
-
-; July 6, 2002 -- LLC Regression test
-; This test case checks if the integer CC register %xcc (or %ccr)
-; is correctly spilled. The code fragment came from function
-; MakeGraph in Olden-mst.
-; The original code made all comparisons with 0, so that the %xcc
-; register is not needed for the branch in the first basic block.
-; Replace 0 with 1 in the first comparson so that the
-; branch-on-register instruction cannot be used directly, i.e.,
-; the %xcc register is needed for the first branch.
-;
-
- %Graph = type %struct.graph_st*
- %Hash = type %struct.hash*
- %HashEntry = type %struct.hash_entry*
- %Vertex = type %struct.vert_st*
- %struct.graph_st = type { [1 x %Vertex] }
- %struct.hash = type { %HashEntry*, i32 (i32)*, i32 }
- %struct.hash_entry = type { i32, i8*, %HashEntry }
- %struct.vert_st = type { i32, %Vertex, %Hash }
- at HashRange = external global i32 ; <i32*> [#uses=0]
- at .LC0 = internal global [13 x i8] c"Make phase 2\00" ; <[13 x i8]*> [#uses=0]
- at .LC1 = internal global [13 x i8] c"Make phase 3\00" ; <[13 x i8]*> [#uses=0]
- at .LC2 = internal global [13 x i8] c"Make phase 4\00" ; <[13 x i8]*> [#uses=0]
- at .LC3 = internal global [15 x i8] c"Make returning\00" ; <[15 x i8]*> [#uses=0]
-
-define %Graph @MakeGraph(i32 %numvert, i32 %numproc) {
-bb1:
- %reg111 = add i32 %numproc, -1 ; <i32> [#uses=2]
- %cond275 = icmp slt i32 %reg111, 1 ; <i1> [#uses=1]
- %cond276 = icmp sle i32 %reg111, 0 ; <i1> [#uses=1]
- %cond277 = icmp sge i32 %numvert, 0 ; <i1> [#uses=1]
- %reg162 = add i32 %numvert, 3 ; <i32> [#uses=0]
- br i1 %cond275, label %bb7, label %bb4
-
-bb4: ; preds = %bb1
- br i1 %cond276, label %bb7, label %bb5
-
-bb5: ; preds = %bb4
- br i1 %cond277, label %bb7, label %bb6
-
-bb6: ; preds = %bb5
- ret %Graph null
-
-bb7: ; preds = %bb5, %bb4, %bb1
- ret %Graph null
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/stack-protector.ll b/libclamav/c++/llvm/test/CodeGen/Generic/stack-protector.ll
deleted file mode 100644
index a59c649..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/stack-protector.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -o - | grep {__stack_chk_guard}
-; RUN: llc < %s -o - | grep {__stack_chk_fail}
-
-@"\01LC" = internal constant [11 x i8] c"buf == %s\0A\00" ; <[11 x i8]*> [#uses=1]
-
-define void @test(i8* %a) nounwind ssp {
-entry:
- %a_addr = alloca i8* ; <i8**> [#uses=2]
- %buf = alloca [8 x i8] ; <[8 x i8]*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i8* %a, i8** %a_addr
- %buf1 = bitcast [8 x i8]* %buf to i8* ; <i8*> [#uses=1]
- %0 = load i8** %a_addr, align 4 ; <i8*> [#uses=1]
- %1 = call i8* @strcpy(i8* %buf1, i8* %0) nounwind ; <i8*> [#uses=0]
- %buf2 = bitcast [8 x i8]* %buf to i8* ; <i8*> [#uses=1]
- %2 = call i32 (i8*, ...)* @printf(i8* getelementptr ([11 x i8]* @"\01LC", i32 0, i32 0), i8* %buf2) nounwind ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8* @strcpy(i8*, i8*) nounwind
-
-declare i32 @printf(i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/stacksave-restore.ll b/libclamav/c++/llvm/test/CodeGen/Generic/stacksave-restore.ll
deleted file mode 100644
index b124b5f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/stacksave-restore.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s
-
-declare i8* @llvm.stacksave()
-
-declare void @llvm.stackrestore(i8*)
-
-define i32* @test(i32 %N) {
- %tmp = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
- %P = alloca i32, i32 %N ; <i32*> [#uses=1]
- call void @llvm.stackrestore( i8* %tmp )
- %Q = alloca i32, i32 %N ; <i32*> [#uses=0]
- ret i32* %P
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/storetrunc-fp.ll b/libclamav/c++/llvm/test/CodeGen/Generic/storetrunc-fp.ll
deleted file mode 100644
index 7f7c7f7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/storetrunc-fp.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s
-
-define void @foo(double %a, double %b, float* %fp) {
- %c = fadd double %a, %b
- %d = fptrunc double %c to float
- store float %d, float* %fp
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/switch-lower-feature.ll b/libclamav/c++/llvm/test/CodeGen/Generic/switch-lower-feature.ll
deleted file mode 100644
index 1e9dbee..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/switch-lower-feature.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc < %s
-
-define i32 @test(i32 %tmp158) {
-entry:
- switch i32 %tmp158, label %bb336 [
- i32 120, label %bb338
- i32 121, label %bb338
- i32 122, label %bb338
- i32 123, label %bb338
- i32 124, label %bb338
- i32 125, label %bb338
- i32 126, label %bb338
- i32 1024, label %bb338
- i32 0, label %bb338
- i32 1, label %bb338
- i32 2, label %bb338
- i32 3, label %bb338
- i32 4, label %bb338
- i32 5, label %bb338
- ]
-bb336:
- ret i32 10
-bb338:
- ret i32 11
-}
-
-define i32 @test2(i32 %tmp158) {
-entry:
- switch i32 %tmp158, label %bb336 [
- i32 -2147483648, label %bb338
- i32 -2147483647, label %bb338
- i32 -2147483646, label %bb338
- i32 120, label %bb338
- i32 121, label %bb339
- i32 122, label %bb340
- i32 123, label %bb341
- i32 124, label %bb342
- i32 125, label %bb343
- i32 126, label %bb336
- i32 1024, label %bb338
- i32 0, label %bb338
- i32 1, label %bb338
- i32 2, label %bb338
- i32 3, label %bb338
- i32 4, label %bb338
- i32 5, label %bb338
- ]
-bb336:
- ret i32 10
-bb338:
- ret i32 11
-bb339:
- ret i32 12
-bb340:
- ret i32 13
-bb341:
- ret i32 14
-bb342:
- ret i32 15
-bb343:
- ret i32 18
-
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/switch-lower.ll b/libclamav/c++/llvm/test/CodeGen/Generic/switch-lower.ll
deleted file mode 100644
index 1cefe82..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/switch-lower.ll
+++ /dev/null
@@ -1,348 +0,0 @@
-; RUN: llc < %s
-
-
-; PR5421
-define void @test1() {
-entry:
- switch i128 undef, label %exit [
- i128 55340232221128654848, label %exit
- i128 92233720368547758080, label %exit
- i128 73786976294838206464, label %exit
- i128 147573952589676412928, label %exit
- ]
-exit:
- unreachable
-}
-
-
-; PR1197
-define void @test2() {
-entry:
- br i1 false, label %cond_next954, label %cond_true924
-
-cond_true924: ; preds = %entry
- ret void
-
-cond_next954: ; preds = %entry
- switch i8 0, label %cleanup7419 [
- i8 1, label %bb956
- i8 2, label %bb1069
- i8 4, label %bb7328
- i8 5, label %bb1267
- i8 8, label %bb1348
- i8 9, label %bb7328
- i8 11, label %bb1439
- i8 12, label %bb1484
- i8 13, label %bb1706
- i8 14, label %bb1783
- i8 17, label %bb1925
- i8 18, label %bb1929
- i8 19, label %bb2240
- i8 25, label %bb2447
- i8 27, label %bb2480
- i8 29, label %bb2590
- i8 30, label %bb2594
- i8 31, label %bb2621
- i8 32, label %bb2664
- i8 33, label %bb2697
- i8 34, label %bb2735
- i8 37, label %bb2786
- i8 38, label %bb2849
- i8 39, label %bb3269
- i8 41, label %bb3303
- i8 42, label %bb3346
- i8 43, label %bb3391
- i8 44, label %bb3395
- i8 50, label %bb3673
- i8 52, label %bb3677
- i8 53, label %bb3693
- i8 54, label %bb7328
- i8 56, label %bb3758
- i8 57, label %bb3787
- i8 64, label %bb5019
- i8 68, label %cond_true4235
- i8 69, label %bb4325
- i8 70, label %bb4526
- i8 72, label %bb4618
- i8 73, label %bb4991
- i8 80, label %bb5012
- i8 82, label %bb5019
- i8 84, label %bb5518
- i8 86, label %bb5752
- i8 87, label %bb5953
- i8 89, label %bb6040
- i8 90, label %bb6132
- i8 92, label %bb6186
- i8 93, label %bb6151
- i8 94, label %bb6155
- i8 97, label %bb6355
- i8 98, label %bb5019
- i8 99, label %bb6401
- i8 101, label %bb5019
- i8 102, label %bb1484
- i8 104, label %bb7064
- i8 105, label %bb7068
- i8 106, label %bb7072
- i8 108, label %bb1065
- i8 109, label %bb1702
- i8 110, label %bb2200
- i8 111, label %bb2731
- i8 112, label %bb2782
- i8 113, label %bb2845
- i8 114, label %bb2875
- i8 115, label %bb3669
- i8 116, label %bb7316
- i8 117, label %bb7316
- i8 118, label %bb3875
- i8 119, label %bb4359
- i8 120, label %bb4987
- i8 121, label %bb5008
- i8 122, label %bb5786
- i8 123, label %bb6147
- i8 124, label %bb6916
- i8 125, label %bb6920
- i8 126, label %bb6955
- i8 127, label %bb6990
- i8 -128, label %bb7027
- i8 -127, label %bb3879
- i8 -126, label %bb4700
- i8 -125, label %bb7076
- i8 -124, label %bb2366
- i8 -123, label %bb2366
- i8 -122, label %bb5490
- ]
-
-bb956: ; preds = %cond_next954
- ret void
-
-bb1065: ; preds = %cond_next954
- ret void
-
-bb1069: ; preds = %cond_next954
- ret void
-
-bb1267: ; preds = %cond_next954
- ret void
-
-bb1348: ; preds = %cond_next954
- ret void
-
-bb1439: ; preds = %cond_next954
- ret void
-
-bb1484: ; preds = %cond_next954, %cond_next954
- ret void
-
-bb1702: ; preds = %cond_next954
- ret void
-
-bb1706: ; preds = %cond_next954
- ret void
-
-bb1783: ; preds = %cond_next954
- ret void
-
-bb1925: ; preds = %cond_next954
- ret void
-
-bb1929: ; preds = %cond_next954
- ret void
-
-bb2200: ; preds = %cond_next954
- ret void
-
-bb2240: ; preds = %cond_next954
- ret void
-
-bb2366: ; preds = %cond_next954, %cond_next954
- ret void
-
-bb2447: ; preds = %cond_next954
- ret void
-
-bb2480: ; preds = %cond_next954
- ret void
-
-bb2590: ; preds = %cond_next954
- ret void
-
-bb2594: ; preds = %cond_next954
- ret void
-
-bb2621: ; preds = %cond_next954
- ret void
-
-bb2664: ; preds = %cond_next954
- ret void
-
-bb2697: ; preds = %cond_next954
- ret void
-
-bb2731: ; preds = %cond_next954
- ret void
-
-bb2735: ; preds = %cond_next954
- ret void
-
-bb2782: ; preds = %cond_next954
- ret void
-
-bb2786: ; preds = %cond_next954
- ret void
-
-bb2845: ; preds = %cond_next954
- ret void
-
-bb2849: ; preds = %cond_next954
- ret void
-
-bb2875: ; preds = %cond_next954
- ret void
-
-bb3269: ; preds = %cond_next954
- ret void
-
-bb3303: ; preds = %cond_next954
- ret void
-
-bb3346: ; preds = %cond_next954
- ret void
-
-bb3391: ; preds = %cond_next954
- ret void
-
-bb3395: ; preds = %cond_next954
- ret void
-
-bb3669: ; preds = %cond_next954
- ret void
-
-bb3673: ; preds = %cond_next954
- ret void
-
-bb3677: ; preds = %cond_next954
- ret void
-
-bb3693: ; preds = %cond_next954
- ret void
-
-bb3758: ; preds = %cond_next954
- ret void
-
-bb3787: ; preds = %cond_next954
- ret void
-
-bb3875: ; preds = %cond_next954
- ret void
-
-bb3879: ; preds = %cond_next954
- ret void
-
-cond_true4235: ; preds = %cond_next954
- ret void
-
-bb4325: ; preds = %cond_next954
- ret void
-
-bb4359: ; preds = %cond_next954
- ret void
-
-bb4526: ; preds = %cond_next954
- ret void
-
-bb4618: ; preds = %cond_next954
- ret void
-
-bb4700: ; preds = %cond_next954
- ret void
-
-bb4987: ; preds = %cond_next954
- ret void
-
-bb4991: ; preds = %cond_next954
- ret void
-
-bb5008: ; preds = %cond_next954
- ret void
-
-bb5012: ; preds = %cond_next954
- ret void
-
-bb5019: ; preds = %cond_next954, %cond_next954, %cond_next954, %cond_next954
- ret void
-
-bb5490: ; preds = %cond_next954
- ret void
-
-bb5518: ; preds = %cond_next954
- ret void
-
-bb5752: ; preds = %cond_next954
- ret void
-
-bb5786: ; preds = %cond_next954
- ret void
-
-bb5953: ; preds = %cond_next954
- ret void
-
-bb6040: ; preds = %cond_next954
- ret void
-
-bb6132: ; preds = %cond_next954
- ret void
-
-bb6147: ; preds = %cond_next954
- ret void
-
-bb6151: ; preds = %cond_next954
- ret void
-
-bb6155: ; preds = %cond_next954
- ret void
-
-bb6186: ; preds = %cond_next954
- ret void
-
-bb6355: ; preds = %cond_next954
- ret void
-
-bb6401: ; preds = %cond_next954
- ret void
-
-bb6916: ; preds = %cond_next954
- ret void
-
-bb6920: ; preds = %cond_next954
- ret void
-
-bb6955: ; preds = %cond_next954
- ret void
-
-bb6990: ; preds = %cond_next954
- ret void
-
-bb7027: ; preds = %cond_next954
- ret void
-
-bb7064: ; preds = %cond_next954
- ret void
-
-bb7068: ; preds = %cond_next954
- ret void
-
-bb7072: ; preds = %cond_next954
- ret void
-
-bb7076: ; preds = %cond_next954
- ret void
-
-bb7316: ; preds = %cond_next954, %cond_next954
- ret void
-
-bb7328: ; preds = %cond_next954, %cond_next954, %cond_next954
- ret void
-
-cleanup7419: ; preds = %cond_next954
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/trap.ll b/libclamav/c++/llvm/test/CodeGen/Generic/trap.ll
deleted file mode 100644
index 67d1a7a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/trap.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s
-define i32 @test() noreturn nounwind {
-entry:
- tail call void @llvm.trap( )
- unreachable
-}
-
-declare void @llvm.trap() nounwind
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/v-split.ll b/libclamav/c++/llvm/test/CodeGen/Generic/v-split.ll
deleted file mode 100644
index 634b562..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/v-split.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-%f8 = type <8 x float>
-
-define void @test_f8(%f8 *%P, %f8* %Q, %f8 *%S) {
- %p = load %f8* %P
- %q = load %f8* %Q
- %R = fadd %f8 %p, %q
- store %f8 %R, %f8 *%S
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/vector-casts.ll b/libclamav/c++/llvm/test/CodeGen/Generic/vector-casts.ll
deleted file mode 100644
index a26918b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/vector-casts.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; RUN: llc < %s
-; PR2671
-
-define void @a(<2 x double>* %p, <2 x i8>* %q) {
- %t = load <2 x double>* %p
- %r = fptosi <2 x double> %t to <2 x i8>
- store <2 x i8> %r, <2 x i8>* %q
- ret void
-}
-define void @b(<2 x double>* %p, <2 x i8>* %q) {
- %t = load <2 x double>* %p
- %r = fptoui <2 x double> %t to <2 x i8>
- store <2 x i8> %r, <2 x i8>* %q
- ret void
-}
-define void @c(<2 x i8>* %p, <2 x double>* %q) {
- %t = load <2 x i8>* %p
- %r = sitofp <2 x i8> %t to <2 x double>
- store <2 x double> %r, <2 x double>* %q
- ret void
-}
-define void @d(<2 x i8>* %p, <2 x double>* %q) {
- %t = load <2 x i8>* %p
- %r = uitofp <2 x i8> %t to <2 x double>
- store <2 x double> %r, <2 x double>* %q
- ret void
-}
-define void @e(<2 x i8>* %p, <2 x i16>* %q) {
- %t = load <2 x i8>* %p
- %r = sext <2 x i8> %t to <2 x i16>
- store <2 x i16> %r, <2 x i16>* %q
- ret void
-}
-define void @f(<2 x i8>* %p, <2 x i16>* %q) {
- %t = load <2 x i8>* %p
- %r = zext <2 x i8> %t to <2 x i16>
- store <2 x i16> %r, <2 x i16>* %q
- ret void
-}
-define void @g(<2 x i16>* %p, <2 x i8>* %q) {
- %t = load <2 x i16>* %p
- %r = trunc <2 x i16> %t to <2 x i8>
- store <2 x i8> %r, <2 x i8>* %q
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/vector-constantexpr.ll b/libclamav/c++/llvm/test/CodeGen/Generic/vector-constantexpr.ll
deleted file mode 100644
index d8e0258..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/vector-constantexpr.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s
-
-define void @""(float* %inregs, float* %outregs) {
- %a_addr.i = alloca <4 x float> ; <<4 x float>*> [#uses=1]
- store <4 x float> < float undef, float undef, float undef, float undef >, <4 x float>* %a_addr.i
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/vector-identity-shuffle.ll b/libclamav/c++/llvm/test/CodeGen/Generic/vector-identity-shuffle.ll
deleted file mode 100644
index 332d6d8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/vector-identity-shuffle.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s
-
-
-define void @test(<4 x float>* %tmp2.i) {
- %tmp2.i.upgrd.1 = load <4 x float>* %tmp2.i ; <<4 x float>> [#uses=4]
- %xFloat0.48 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 0 ; <float> [#uses=1]
- %inFloat0.49 = insertelement <4 x float> undef, float %xFloat0.48, i32 0 ; <<4 x float>> [#uses=1]
- %xFloat1.50 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 1 ; <float> [#uses=1]
- %inFloat1.52 = insertelement <4 x float> %inFloat0.49, float %xFloat1.50, i32 1 ; <<4 x float>> [#uses=1]
- %xFloat2.53 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 2 ; <float> [#uses=1]
- %inFloat2.55 = insertelement <4 x float> %inFloat1.52, float %xFloat2.53, i32 2 ; <<4 x float>> [#uses=1]
- %xFloat3.56 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 3 ; <float> [#uses=1]
- %inFloat3.58 = insertelement <4 x float> %inFloat2.55, float %xFloat3.56, i32 3 ; <<4 x float>> [#uses=1]
- store <4 x float> %inFloat3.58, <4 x float>* %tmp2.i
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Generic/vector.ll b/libclamav/c++/llvm/test/CodeGen/Generic/vector.ll
deleted file mode 100644
index a0f9a02..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Generic/vector.ll
+++ /dev/null
@@ -1,154 +0,0 @@
-; Test that vectors are scalarized/lowered correctly.
-; RUN: llc < %s
-
-
-%d8 = type <8 x double>
-%f1 = type <1 x float>
-%f2 = type <2 x float>
-%f4 = type <4 x float>
-%f8 = type <8 x float>
-%i4 = type <4 x i32>
-
-;;; TEST HANDLING OF VARIOUS VECTOR SIZES
-
-define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
- %p = load %f1* %P ; <%f1> [#uses=1]
- %q = load %f1* %Q ; <%f1> [#uses=1]
- %R = fadd %f1 %p, %q ; <%f1> [#uses=1]
- store %f1 %R, %f1* %S
- ret void
-}
-
-define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
- %p = load %f2* %P ; <%f2> [#uses=1]
- %q = load %f2* %Q ; <%f2> [#uses=1]
- %R = fadd %f2 %p, %q ; <%f2> [#uses=1]
- store %f2 %R, %f2* %S
- ret void
-}
-
-define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
- %R = fadd %f4 %p, %q ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
- %R = fadd %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
- ret void
-}
-
-define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
- %R = fmul %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
- ret void
-}
-
-define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
- %R = fdiv %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
- ret void
-}
-
-;;; TEST VECTOR CONSTRUCTS
-
-
-define void @test_cst(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_zero(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_undef(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %R = fadd %f4 %p, undef ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_constant_insert(%f4* %S) {
- %R = insertelement %f4 zeroinitializer, float 1.000000e+01, i32 0 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_variable_buildvector(float %F, %f4* %S) {
- %R = insertelement %f4 zeroinitializer, float %F, i32 0 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_scalar_to_vector(float %F, %f4* %S) {
- %R = insertelement %f4 undef, float %F, i32 0 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define float @test_extract_elt(%f8* %P) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %R = extractelement %f8 %p, i32 3 ; <float> [#uses=1]
- ret float %R
-}
-
-define double @test_extract_elt2(%d8* %P) {
- %p = load %d8* %P ; <%d8> [#uses=1]
- %R = extractelement %d8 %p, i32 3 ; <double> [#uses=1]
- ret double %R
-}
-
-define void @test_cast_1(%f4* %b, %i4* %a) {
- %tmp = load %f4* %b ; <%f4> [#uses=1]
- %tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1]
- %tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1]
- %tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 > ; <%i4> [#uses=1]
- store %i4 %tmp4, %i4* %a
- ret void
-}
-
-define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
- %T = load %f8* %a ; <%f8> [#uses=1]
- %T2 = bitcast %f8 %T to <8 x i32> ; <<8 x i32>> [#uses=1]
- store <8 x i32> %T2, <8 x i32>* %b
- ret void
-}
-
-;;; TEST IMPORTANT IDIOMS
-
-define void @splat(%f4* %P, %f4* %Q, float %X) {
- %tmp = insertelement %f4 undef, float %X, i32 0 ; <%f4> [#uses=1]
- %tmp2 = insertelement %f4 %tmp, float %X, i32 1 ; <%f4> [#uses=1]
- %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1]
- %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
- %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %P
- ret void
-}
-
-define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) {
- %tmp = insertelement %i4 undef, i32 %X, i32 0 ; <%i4> [#uses=1]
- %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1 ; <%i4> [#uses=1]
- %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1]
- %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1]
- %q = load %i4* %Q ; <%i4> [#uses=1]
- %R = add %i4 %q, %tmp6 ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-11-29-ShrCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-11-29-ShrCrash.ll
deleted file mode 100644
index f95465c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-11-29-ShrCrash.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=ppc32
-define void @test() {
- %tr1 = lshr i32 1, 0 ; <i32> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-11-30-shift-crash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-11-30-shift-crash.ll
deleted file mode 100644
index c3bfa49..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-11-30-shift-crash.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-define void @main() {
- %tr4 = shl i64 1, 0 ; <i64> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-11-30-shr-var-crash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-11-30-shr-var-crash.ll
deleted file mode 100644
index dea654a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-11-30-shr-var-crash.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-define void @main() {
- %shamt = add i8 0, 1 ; <i8> [#uses=1]
- %shift.upgrd.1 = zext i8 %shamt to i64 ; <i64> [#uses=1]
- %tr2 = ashr i64 1, %shift.upgrd.1 ; <i64> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-12-12-ZeroSizeCommon.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-12-12-ZeroSizeCommon.ll
deleted file mode 100644
index fc190a4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2004-12-12-ZeroSizeCommon.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep .comm.*X,0
-
- at X = linkonce global { } zeroinitializer ; <{ }*> [#uses=0]
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-01-14-SetSelectCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-01-14-SetSelectCrash.ll
deleted file mode 100644
index ad02ece..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-01-14-SetSelectCrash.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-define i32 @main() {
- %setle = icmp sle i64 1, 0 ; <i1> [#uses=1]
- %select = select i1 true, i1 %setle, i1 true ; <i1> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-01-14-UndefLong.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-01-14-UndefLong.ll
deleted file mode 100644
index 671bf80..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-01-14-UndefLong.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-define i64 @test() {
- ret i64 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-08-12-rlwimi-crash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-08-12-rlwimi-crash.ll
deleted file mode 100644
index 95012c3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-08-12-rlwimi-crash.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; this should not crash the ppc backend
-
-; RUN: llc < %s -march=ppc32
-
-
-define i32 @test(i32 %j.0.0.i) {
- %tmp.85.i = and i32 %j.0.0.i, 7 ; <i32> [#uses=1]
- %tmp.161278.i = bitcast i32 %tmp.85.i to i32 ; <i32> [#uses=1]
- %tmp.5.i77.i = lshr i32 %tmp.161278.i, 3 ; <i32> [#uses=1]
- ret i32 %tmp.5.i77.i
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-09-02-LegalizeDuplicatesCalls.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-09-02-LegalizeDuplicatesCalls.ll
deleted file mode 100644
index 5d1df46..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-09-02-LegalizeDuplicatesCalls.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; This function should have exactly one call to fixdfdi, no more!
-
-; RUN: llc < %s -march=ppc32 -mattr=-64bit | \
-; RUN: grep {bl .*fixdfdi} | count 1
-
-define double @test2(double %tmp.7705) {
- %mem_tmp.2.0.in = fptosi double %tmp.7705 to i64 ; <i64> [#uses=1]
- %mem_tmp.2.0 = sitofp i64 %mem_tmp.2.0.in to double ; <double> [#uses=1]
- ret double %mem_tmp.2.0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-10-08-ArithmeticRotate.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-10-08-ArithmeticRotate.ll
deleted file mode 100644
index 8a5d3b0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-10-08-ArithmeticRotate.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; This was erroneously being turned into an rlwinm instruction.
-; The sign bit does matter in this case.
-
-; RUN: llc < %s -march=ppc32 | grep srawi
-
-define i32 @test(i32 %X) {
- %Y = and i32 %X, -2 ; <i32> [#uses=1]
- %Z = ashr i32 %Y, 11 ; <i32> [#uses=1]
- ret i32 %Z
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-11-30-vastart-crash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-11-30-vastart-crash.ll
deleted file mode 100644
index 047a12b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2005-11-30-vastart-crash.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s
-
-target datalayout = "E-p:32:32"
-target triple = "powerpc-apple-darwin8.2.0"
-
-define void @bar(i32 %G, i32 %E, i32 %F, i32 %A, i32 %B, i32 %C, i32 %D, i8* %fmt, ...) {
- %ap = alloca i8* ; <i8**> [#uses=2]
- %va.upgrd.1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_start( i8* %va.upgrd.1 )
- %tmp.1 = load i8** %ap ; <i8*> [#uses=1]
- %tmp.0 = call double @foo( i8* %tmp.1 ) ; <double> [#uses=0]
- ret void
-}
-
-declare void @llvm.va_start(i8*)
-
-declare double @foo(i8*)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll
deleted file mode 100644
index 97bb48e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-01-11-darwin-fp-argument.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s | not grep {, f1}
-
-target datalayout = "E-p:32:32"
-target triple = "powerpc-apple-darwin8.2.0"
-
-; Dead argument should reserve an FP register.
-define double @bar(double %DEAD, double %X, double %Y) {
- %tmp.2 = fadd double %X, %Y ; <double> [#uses=1]
- ret double %tmp.2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll
deleted file mode 100644
index fbf2540..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s
-
-define void @iterative_hash_host_wide_int() {
- %zero = alloca i32 ; <i32*> [#uses=2]
- %b = alloca i32 ; <i32*> [#uses=1]
- store i32 0, i32* %zero
- %tmp = load i32* %zero ; <i32> [#uses=1]
- %tmp5 = bitcast i32 %tmp to i32 ; <i32> [#uses=1]
- %tmp6.u = add i32 %tmp5, 32 ; <i32> [#uses=1]
- %tmp6 = bitcast i32 %tmp6.u to i32 ; <i32> [#uses=1]
- %tmp7 = load i64* null ; <i64> [#uses=1]
- %tmp6.upgrd.1 = trunc i32 %tmp6 to i8 ; <i8> [#uses=1]
- %shift.upgrd.2 = zext i8 %tmp6.upgrd.1 to i64 ; <i64> [#uses=1]
- %tmp8 = ashr i64 %tmp7, %shift.upgrd.2 ; <i64> [#uses=1]
- %tmp8.upgrd.3 = trunc i64 %tmp8 to i32 ; <i32> [#uses=1]
- store i32 %tmp8.upgrd.3, i32* %b
- unreachable
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-04-01-FloatDoubleExtend.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-04-01-FloatDoubleExtend.ll
deleted file mode 100644
index 172e348..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-04-01-FloatDoubleExtend.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-
-define double @CalcSpeed(float %tmp127) {
- %tmp145 = fpext float %tmp127 to double ; <double> [#uses=1]
- %tmp150 = call double asm "frsqrte $0,$1", "=f,f"( double %tmp145 ) ; <double> [#uses=1]
- ret double %tmp150
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-04-05-splat-ish.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-04-05-splat-ish.ll
deleted file mode 100644
index 969772e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-04-05-splat-ish.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mcpu=g5 | \
-; RUN: grep {vspltish v.*, 10}
-
-define void @test(<8 x i16>* %P) {
- %tmp = load <8 x i16>* %P ; <<8 x i16>> [#uses=1]
- %tmp1 = add <8 x i16> %tmp, < i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10 > ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp1, <8 x i16>* %P
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-04-19-vmaddfp-crash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-04-19-vmaddfp-crash.ll
deleted file mode 100644
index d225664..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-04-19-vmaddfp-crash.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5
-; END.
-
-define void @test(i8* %stack) {
-entry:
- %tmp9 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- %tmp30 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp30, label %cond_next54, label %cond_true31
-cond_true860: ; preds = %bb855
- %tmp879 = tail call <4 x float> @llvm.ppc.altivec.vmaddfp( <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x float> zeroinitializer ) ; <<4 x float>> [#uses=1]
- %tmp880 = bitcast <4 x float> %tmp879 to <4 x i32> ; <<4 x i32>> [#uses=2]
- %tmp883 = shufflevector <4 x i32> %tmp880, <4 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x i32>> [#uses=1]
- %tmp883.upgrd.1 = bitcast <4 x i32> %tmp883 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp885 = shufflevector <4 x i32> %tmp880, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>> [#uses=1]
- %tmp885.upgrd.2 = bitcast <4 x i32> %tmp885 to <4 x float> ; <<4 x float>> [#uses=1]
- br label %cond_next905
-cond_true31: ; preds = %entry
- ret void
-cond_next54: ; preds = %entry
- br i1 %tmp9, label %cond_false385, label %bb279
-bb279: ; preds = %cond_next54
- ret void
-cond_false385: ; preds = %cond_next54
- %tmp388 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp388, label %cond_next463, label %cond_true389
-cond_true389: ; preds = %cond_false385
- ret void
-cond_next463: ; preds = %cond_false385
- %tmp1208107 = icmp ugt i8* null, %stack ; <i1> [#uses=1]
- br i1 %tmp1208107, label %cond_true1209.preheader, label %bb1212
-cond_true498: ; preds = %cond_true1209.preheader
- ret void
-cond_true519: ; preds = %cond_true1209.preheader
- %bothcond = or i1 false, false ; <i1> [#uses=1]
- br i1 %bothcond, label %bb855, label %bb980
-cond_false548: ; preds = %cond_true1209.preheader
- ret void
-bb855: ; preds = %cond_true519
- %tmp859 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp859, label %cond_true860, label %cond_next905
-cond_next905: ; preds = %bb855, %cond_true860
- %vfpw2.4 = phi <4 x float> [ %tmp885.upgrd.2, %cond_true860 ], [ undef, %bb855 ] ; <<4 x float>> [#uses=0]
- %vfpw1.4 = phi <4 x float> [ %tmp883.upgrd.1, %cond_true860 ], [ undef, %bb855 ] ; <<4 x float>> [#uses=0]
- %tmp930 = bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=0]
- ret void
-bb980: ; preds = %cond_true519
- ret void
-cond_true1209.preheader: ; preds = %cond_next463
- %tmp496 = and i32 0, 12288 ; <i32> [#uses=1]
- switch i32 %tmp496, label %cond_false548 [
- i32 0, label %cond_true498
- i32 4096, label %cond_true519
- ]
-bb1212: ; preds = %cond_next463
- ret void
-}
-
-declare <4 x float> @llvm.ppc.altivec.vmaddfp(<4 x float>, <4 x float>, <4 x float>)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll
deleted file mode 100644
index 0205d10..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; END.
-
- %struct.attr_desc = type { i8*, %struct.attr_desc*, %struct.attr_value*, %struct.attr_value*, i32 }
- %struct.attr_value = type { %struct.rtx_def*, %struct.attr_value*, %struct.insn_ent*, i32, i32 }
- %struct.insn_def = type { %struct.insn_def*, %struct.rtx_def*, i32, i32, i32, i32, i32 }
- %struct.insn_ent = type { %struct.insn_ent*, %struct.insn_def* }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.u = type { [1 x i64] }
-
-define void @find_attr() {
-entry:
- %tmp26 = icmp eq %struct.attr_desc* null, null ; <i1> [#uses=1]
- br i1 %tmp26, label %bb30, label %cond_true27
-cond_true27: ; preds = %entry
- ret void
-bb30: ; preds = %entry
- %tmp67 = icmp eq %struct.attr_desc* null, null ; <i1> [#uses=1]
- br i1 %tmp67, label %cond_next92, label %cond_true68
-cond_true68: ; preds = %bb30
- ret void
-cond_next92: ; preds = %bb30
- %tmp173 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp174 = load i32* %tmp173 ; <i32> [#uses=1]
- %tmp177 = and i32 %tmp174, -9 ; <i32> [#uses=1]
- store i32 %tmp177, i32* %tmp173
- %tmp180 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp181 = load i32* %tmp180 ; <i32> [#uses=1]
- %tmp185 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp186 = load i32* %tmp185 ; <i32> [#uses=1]
- %tmp183187 = shl i32 %tmp181, 1 ; <i32> [#uses=1]
- %tmp188 = and i32 %tmp183187, 16 ; <i32> [#uses=1]
- %tmp190 = and i32 %tmp186, -17 ; <i32> [#uses=1]
- %tmp191 = or i32 %tmp190, %tmp188 ; <i32> [#uses=1]
- store i32 %tmp191, i32* %tmp185
- %tmp193 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp194 = load i32* %tmp193 ; <i32> [#uses=1]
- %tmp198 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp199 = load i32* %tmp198 ; <i32> [#uses=1]
- %tmp196200 = shl i32 %tmp194, 2 ; <i32> [#uses=1]
- %tmp201 = and i32 %tmp196200, 64 ; <i32> [#uses=1]
- %tmp203 = and i32 %tmp199, -65 ; <i32> [#uses=1]
- %tmp204 = or i32 %tmp203, %tmp201 ; <i32> [#uses=1]
- store i32 %tmp204, i32* %tmp198
- %tmp206 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp207 = load i32* %tmp206 ; <i32> [#uses=1]
- %tmp211 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
- %tmp212 = load i32* %tmp211 ; <i32> [#uses=1]
- %tmp209213 = shl i32 %tmp207, 1 ; <i32> [#uses=1]
- %tmp214 = and i32 %tmp209213, 128 ; <i32> [#uses=1]
- %tmp216 = and i32 %tmp212, -129 ; <i32> [#uses=1]
- %tmp217 = or i32 %tmp216, %tmp214 ; <i32> [#uses=1]
- store i32 %tmp217, i32* %tmp211
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
deleted file mode 100644
index 1b8b064..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc64-apple-darwin | grep extsw | count 2
-
- at lens = external global i8* ; <i8**> [#uses=1]
- at vals = external global i32* ; <i32**> [#uses=1]
-
-define i32 @test(i32 %i) {
- %tmp = load i8** @lens ; <i8*> [#uses=1]
- %tmp1 = getelementptr i8* %tmp, i32 %i ; <i8*> [#uses=1]
- %tmp.upgrd.1 = load i8* %tmp1 ; <i8> [#uses=1]
- %tmp2 = zext i8 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
- %tmp3 = load i32** @vals ; <i32*> [#uses=1]
- %tmp5 = sub i32 1, %tmp2 ; <i32> [#uses=1]
- %tmp6 = getelementptr i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
- %tmp7 = load i32* %tmp6 ; <i32> [#uses=1]
- ret i32 %tmp7
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll
deleted file mode 100644
index 65dd568..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-define void @img2buf(i32 %symbol_size_in_bytes, i16* %ui16) nounwind {
- %tmp93 = load i16* null ; <i16> [#uses=1]
- %tmp99 = call i16 @llvm.bswap.i16( i16 %tmp93 ) ; <i16> [#uses=1]
- store i16 %tmp99, i16* %ui16
- ret void
-}
-
-declare i16 @llvm.bswap.i16(i16)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-08-11-RetVector.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-08-11-RetVector.ll
deleted file mode 100644
index a947e5c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-08-11-RetVector.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vsldoi
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep vor
-
-define <4 x float> @func(<4 x float> %fp0, <4 x float> %fp1) {
- %tmp76 = shufflevector <4 x float> %fp0, <4 x float> %fp1, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp76
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-08-15-SelectionCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-08-15-SelectionCrash.ll
deleted file mode 100644
index cb76b5c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-08-15-SelectionCrash.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s
-
- %struct..0anon = type { i32 }
- %struct.rtx_def = type { i16, i8, i8, [1 x %struct..0anon] }
-
-define fastcc void @immed_double_const(i32 %i0, i32 %i1) {
-entry:
- %tmp1 = load i32* null ; <i32> [#uses=1]
- switch i32 %tmp1, label %bb103 [
- i32 1, label %bb
- i32 3, label %bb
- ]
-bb: ; preds = %entry, %entry
- %tmp14 = icmp sgt i32 0, 31 ; <i1> [#uses=1]
- br i1 %tmp14, label %cond_next77, label %cond_next17
-cond_next17: ; preds = %bb
- ret void
-cond_next77: ; preds = %bb
- %tmp79.not = icmp ne i32 %i1, 0 ; <i1> [#uses=1]
- %tmp84 = icmp slt i32 %i0, 0 ; <i1> [#uses=2]
- %bothcond1 = or i1 %tmp79.not, %tmp84 ; <i1> [#uses=1]
- br i1 %bothcond1, label %bb88, label %bb99
-bb88: ; preds = %cond_next77
- %bothcond2 = and i1 false, %tmp84 ; <i1> [#uses=0]
- ret void
-bb99: ; preds = %cond_next77
- ret void
-bb103: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-09-28-shift_64.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-09-28-shift_64.ll
deleted file mode 100644
index f748a8b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-09-28-shift_64.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=ppc64
-
-target datalayout = "E-p:64:64"
-target triple = "powerpc64-apple-darwin8"
-
-define void @glArrayElement_CompExec() {
-entry:
- %tmp3 = and i64 0, -8388609 ; <i64> [#uses=1]
- br label %cond_true24
-cond_false: ; preds = %cond_true24
- ret void
-cond_true24: ; preds = %cond_true24, %entry
- %indvar.ph = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true24 ] ; <i32> [#uses=1]
- %indvar = add i32 0, %indvar.ph ; <i32> [#uses=2]
- %code.0 = trunc i32 %indvar to i8 ; <i8> [#uses=1]
- %tmp5 = add i8 %code.0, 16 ; <i8> [#uses=1]
- %shift.upgrd.1 = zext i8 %tmp5 to i64 ; <i64> [#uses=1]
- %tmp7 = lshr i64 %tmp3, %shift.upgrd.1 ; <i64> [#uses=1]
- %tmp7.upgrd.2 = trunc i64 %tmp7 to i32 ; <i32> [#uses=1]
- %tmp8 = and i32 %tmp7.upgrd.2, 1 ; <i32> [#uses=1]
- %tmp8.upgrd.3 = icmp eq i32 %tmp8, 0 ; <i1> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %tmp8.upgrd.3, label %cond_false, label %cond_true24
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll
deleted file mode 100644
index 57ed250..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=ppc32 -combiner-alias-analysis | grep f5
-
-target datalayout = "E-p:32:32"
-target triple = "powerpc-apple-darwin8.2.0"
- %struct.Point = type { double, double, double }
-
-define void @offset(%struct.Point* %pt, double %x, double %y, double %z) {
-entry:
- %tmp = getelementptr %struct.Point* %pt, i32 0, i32 0 ; <double*> [#uses=2]
- %tmp.upgrd.1 = load double* %tmp ; <double> [#uses=1]
- %tmp2 = fadd double %tmp.upgrd.1, %x ; <double> [#uses=1]
- store double %tmp2, double* %tmp
- %tmp6 = getelementptr %struct.Point* %pt, i32 0, i32 1 ; <double*> [#uses=2]
- %tmp7 = load double* %tmp6 ; <double> [#uses=1]
- %tmp9 = fadd double %tmp7, %y ; <double> [#uses=1]
- store double %tmp9, double* %tmp6
- %tmp13 = getelementptr %struct.Point* %pt, i32 0, i32 2 ; <double*> [#uses=2]
- %tmp14 = load double* %tmp13 ; <double> [#uses=1]
- %tmp16 = fadd double %tmp14, %z ; <double> [#uses=1]
- store double %tmp16, double* %tmp13
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-13-Miscompile.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-13-Miscompile.ll
deleted file mode 100644
index 002a064..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-13-Miscompile.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep IMPLICIT_DEF
-
-define void @foo(i64 %X) {
-entry:
- %tmp1 = and i64 %X, 3 ; <i64> [#uses=1]
- %tmp = icmp sgt i64 %tmp1, 2 ; <i1> [#uses=1]
- br i1 %tmp, label %UnifiedReturnBlock, label %cond_true
-cond_true: ; preds = %entry
- %tmp.upgrd.1 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- ret void
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-declare i32 @bar(...)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-17-brcc-miscompile.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-17-brcc-miscompile.ll
deleted file mode 100644
index 3d462b4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-17-brcc-miscompile.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep xor
-
-target datalayout = "E-p:32:32"
-target triple = "powerpc-apple-darwin8.7.0"
-
-define void @foo(i32 %X) {
-entry:
- %tmp1 = and i32 %X, 3 ; <i32> [#uses=1]
- %tmp2 = xor i32 %tmp1, 1 ; <i32> [#uses=1]
- %tmp = icmp eq i32 %tmp2, 0 ; <i1> [#uses=1]
- br i1 %tmp, label %UnifiedReturnBlock, label %cond_true
-cond_true: ; preds = %entry
- tail call i32 (...)* @bar( ) ; <i32>:0 [#uses=0]
- ret void
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-declare i32 @bar(...)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-17-ppc64-alloca.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-17-ppc64-alloca.ll
deleted file mode 100644
index 3284f0a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-10-17-ppc64-alloca.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=ppc64
-
-define i32* @foo(i32 %n) {
- %A = alloca i32, i32 %n ; <i32*> [#uses=1]
- ret i32* %A
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-11-10-DAGCombineMiscompile.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-11-10-DAGCombineMiscompile.ll
deleted file mode 100644
index 49b3b9d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-11-10-DAGCombineMiscompile.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep rlwimi
-
-define void @test(i16 %div.0.i.i.i.i, i32 %L_num.0.i.i.i.i, i32 %tmp1.i.i206.i.i, i16* %P) {
- %X = shl i16 %div.0.i.i.i.i, 1 ; <i16> [#uses=1]
- %tmp28.i.i.i.i = shl i32 %L_num.0.i.i.i.i, 1 ; <i32> [#uses=1]
- %tmp31.i.i.i.i = icmp slt i32 %tmp28.i.i.i.i, %tmp1.i.i206.i.i ; <i1> [#uses=1]
- %tmp31.i.i.i.i.upgrd.1 = zext i1 %tmp31.i.i.i.i to i16 ; <i16> [#uses=1]
- %tmp371.i.i.i.i1 = or i16 %tmp31.i.i.i.i.upgrd.1, %X ; <i16> [#uses=1]
- %div.0.be.i.i.i.i = xor i16 %tmp371.i.i.i.i1, 1 ; <i16> [#uses=1]
- store i16 %div.0.be.i.i.i.i, i16* %P
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-11-29-AltivecFPSplat.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-11-29-AltivecFPSplat.ll
deleted file mode 100644
index 61b9967..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-11-29-AltivecFPSplat.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5
-
-define void @glgRunProcessor15() {
- %tmp26355.i = shufflevector <4 x float> zeroinitializer, <4 x float> < float 0x379FFFE000000000, float 0x379FFFE000000000, float 0x379FFFE000000000, float 0x379FFFE000000000 >, <4 x i32> < i32 0, i32 1, i32 2, i32 7 >; <<4 x float>> [#uses=1]
- %tmp3030030304.i = bitcast <4 x float> %tmp26355.i to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp30305.i = shufflevector <8 x i16> zeroinitializer, <8 x i16> %tmp3030030304.i, <8 x i32> < i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15 > ; <<8 x i16>> [#uses=1]
- %tmp30305.i.upgrd.1 = bitcast <8 x i16> %tmp30305.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp30305.i.upgrd.1, <4 x i32>* null
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-12-07-LargeAlloca.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-12-07-LargeAlloca.ll
deleted file mode 100644
index ba86304..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-12-07-LargeAlloca.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=ppc64
-; RUN: llc < %s -march=ppc32
-; RUN: llc < %s
-
-define void @bitap() {
-entry:
- %RMask.i = alloca [256 x i32], align 16 ; <[256 x i32]*> [#uses=1]
- %buffer = alloca [147456 x i8], align 16 ; <[147456 x i8]*> [#uses=0]
- br i1 false, label %bb19, label %bb.preheader
-bb.preheader: ; preds = %entry
- ret void
-bb19: ; preds = %entry
- br i1 false, label %bb12.i, label %cond_next39
-bb12.i: ; preds = %bb12.i, %bb19
- %i.0.i = phi i32 [ %tmp11.i, %bb12.i ], [ 0, %bb19 ] ; <i32> [#uses=2]
- %gep.upgrd.1 = zext i32 %i.0.i to i64 ; <i64> [#uses=1]
- %tmp9.i = getelementptr [256 x i32]* %RMask.i, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp9.i
- %tmp11.i = add i32 %i.0.i, 1 ; <i32> [#uses=1]
- br label %bb12.i
-cond_next39: ; preds = %bb19
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll
deleted file mode 100644
index 6d9a3fa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=ppc64
-; RUN: llc < %s -march=ppc32
-; RUN: llc < %s
-
- at qsz.b = external global i1 ; <i1*> [#uses=1]
-
-define fastcc void @qst() {
-entry:
- br i1 true, label %cond_next71, label %cond_true
-cond_true: ; preds = %entry
- ret void
-cond_next71: ; preds = %entry
- %tmp73.b = load i1* @qsz.b ; <i1> [#uses=1]
- %ii.4.ph = select i1 %tmp73.b, i64 4, i64 0 ; <i64> [#uses=1]
- br label %bb139
-bb82: ; preds = %bb139
- ret void
-bb139: ; preds = %bb139, %cond_next71
- %exitcond89 = icmp eq i64 0, %ii.4.ph ; <i1> [#uses=1]
- br i1 %exitcond89, label %bb82, label %bb139
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-04-ArgExtension.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-04-ArgExtension.ll
deleted file mode 100644
index 805528c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-04-ArgExtension.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep extsb
-; RUN: llc < %s -march=ppc32 | grep extsh
-
-define i32 @p1(i8 %c, i16 %s) {
-entry:
- %tmp = sext i8 %c to i32 ; <i32> [#uses=1]
- %tmp1 = sext i16 %s to i32 ; <i32> [#uses=1]
- %tmp2 = add i32 %tmp1, %tmp ; <i32> [#uses=1]
- ret i32 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll
deleted file mode 100644
index 7b00ac6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | \
-; RUN: grep cntlzw
-
-define i32 @foo() nounwind {
-entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=2]
- %temp = alloca i32, align 4 ; <i32*> [#uses=2]
- %ctz_x = alloca i32, align 4 ; <i32*> [#uses=3]
- %ctz_c = alloca i32, align 4 ; <i32*> [#uses=2]
- "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 61440, i32* %ctz_x
- %tmp = load i32* %ctz_x ; <i32> [#uses=1]
- %tmp1 = sub i32 0, %tmp ; <i32> [#uses=1]
- %tmp2 = load i32* %ctz_x ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp1, %tmp2 ; <i32> [#uses=1]
- %tmp4 = call i32 asm "$(cntlz$|cntlzw$) $0,$1", "=r,r,~{dirflag},~{fpsr},~{flags}"( i32 %tmp3 ) ; <i32> [#uses=1]
- store i32 %tmp4, i32* %ctz_c
- %tmp5 = load i32* %ctz_c ; <i32> [#uses=1]
- store i32 %tmp5, i32* %temp
- %tmp6 = load i32* %temp ; <i32> [#uses=1]
- store i32 %tmp6, i32* %retval
- br label %return
-
-return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-29-lbrx-asm.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-29-lbrx-asm.ll
deleted file mode 100644
index 0c45472..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-29-lbrx-asm.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; RUN: llc < %s -march=ppc64
-
-define i16 @test(i8* %d1, i16* %d2) {
- %tmp237 = call i16 asm "lhbrx $0, $2, $1", "=r,r,bO,m"( i8* %d1, i32 0, i16* %d2 ) ; <i16> [#uses=1]
- ret i16 %tmp237
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-31-InlineAsmAddrMode.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-31-InlineAsmAddrMode.ll
deleted file mode 100644
index fe5145d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-01-31-InlineAsmAddrMode.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; RUN: llc < %s -march=ppc64
-
-; Test two things: 1) that a frameidx can be rewritten in an inline asm
-; 2) that inline asms can handle reg+imm addr modes.
-
- %struct.A = type { i32, i32 }
-
-
-define void @test1() {
-entry:
- %Out = alloca %struct.A, align 4 ; <%struct.A*> [#uses=1]
- %tmp2 = getelementptr %struct.A* %Out, i32 0, i32 1
- %tmp5 = call i32 asm "lwbrx $0, $1", "=r,m"(i32* %tmp2 )
- ret void
-}
-
-define void @test2() {
-entry:
- %Out = alloca %struct.A, align 4 ; <%struct.A*> [#uses=1]
- %tmp2 = getelementptr %struct.A* %Out, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp5 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,m"( i8* null, i32 0, i32* %tmp2 ) ; <i32> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-02-16-AlignPacked.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-02-16-AlignPacked.ll
deleted file mode 100644
index 621d43b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-02-16-AlignPacked.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8.8.0 | \
-; RUN: grep align.*3
-
- at X = global <{i32, i32}> <{ i32 1, i32 123 }>
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-02-16-InlineAsmNConstraint.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-02-16-InlineAsmNConstraint.ll
deleted file mode 100644
index f48f365..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-02-16-InlineAsmNConstraint.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-
-target datalayout = "E-p:32:32"
-target triple = "powerpc-apple-darwin8.8.0"
-
-
-define void @blargh() {
-entry:
- %tmp4 = call i32 asm "rlwimi $0,$2,$3,$4,$5", "=r,0,r,n,n,n"( i32 0, i32 0, i32 0, i32 24, i32 31 ) ; <i32> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-02-23-lr-saved-twice.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-02-23-lr-saved-twice.ll
deleted file mode 100644
index 0473857..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-02-23-lr-saved-twice.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s | grep mflr | count 1
-
-target datalayout = "e-p:32:32"
-target triple = "powerpc-apple-darwin8"
- at str = internal constant [18 x i8] c"hello world!, %d\0A\00" ; <[18 x i8]*> [#uses=1]
-
-
-define i32 @main() {
-entry:
- %tmp = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([18 x i8]* @str, i32 0, i32 0) ) ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
deleted file mode 100644
index e93395a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=ppc64 -mcpu=g5 | grep cntlzd
-
-define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) {
- %tmp19 = load i64* %t
- %tmp22 = tail call i64 @llvm.ctlz.i64( i64 %tmp19 ) ; <i64> [#uses=1]
- %tmp23 = trunc i64 %tmp22 to i32
- %tmp89 = add i32 %tmp23, -64 ; <i32> [#uses=1]
- %tmp90 = add i32 %tmp89, 0 ; <i32> [#uses=1]
- ret i32 %tmp90
-}
-
-declare i64 @llvm.ctlz.i64(i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll
deleted file mode 100644
index d43916d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll
+++ /dev/null
@@ -1,1801 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5
-
-define void @test(<4 x float>*, { { i16, i16, i32 } }*) {
-xOperationInitMasks.exit:
- %.sub7896 = getelementptr [4 x <4 x i32>]* null, i32 0, i32 0 ; <<4 x i32>*> [#uses=24]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 175, i32 3 ; <<4 x float>*>:2 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 2 ; <<4 x float>*>:3 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 3 ; <<4 x float>*>:4 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 1 ; <<4 x float>*>:5 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 2 ; <<4 x float>*>:6 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 3 ; <<4 x float>*>:7 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 1 ; <<4 x float>*>:8 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 2 ; <<4 x float>*>:9 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 3 ; <<4 x float>*>:10 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 1 ; <<4 x float>*>:11 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 2 ; <<4 x float>*>:12 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 3 ; <<4 x float>*>:13 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 1 ; <<4 x float>*>:14 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 2 ; <<4 x float>*>:15 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 3 ; <<4 x float>*>:16 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 1 ; <<4 x float>*>:17 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 2 ; <<4 x float>*>:18 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 3 ; <<4 x float>*>:19 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 1 ; <<4 x float>*>:20 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 2 ; <<4 x float>*>:21 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 3 ; <<4 x float>*>:22 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 1 ; <<4 x float>*>:23 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 2 ; <<4 x float>*>:24 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 3 ; <<4 x float>*>:25 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 1 ; <<4 x float>*>:26 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 2 ; <<4 x float>*>:27 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 3 ; <<4 x float>*>:28 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 1 ; <<4 x float>*>:29 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 2 ; <<4 x float>*>:30 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 3 ; <<4 x float>*>:31 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 1 ; <<4 x float>*>:32 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 2 ; <<4 x float>*>:33 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 3 ; <<4 x float>*>:34 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 1 ; <<4 x float>*>:35 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 2 ; <<4 x float>*>:36 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 3 ; <<4 x float>*>:37 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 1 ; <<4 x float>*>:38 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 2 ; <<4 x float>*>:39 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 3 ; <<4 x float>*>:40 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 1 ; <<4 x float>*>:41 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 2 ; <<4 x float>*>:42 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 3 ; <<4 x float>*>:43 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 1 ; <<4 x float>*>:44 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 2 ; <<4 x float>*>:45 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 3 ; <<4 x float>*>:46 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 1 ; <<4 x float>*>:47 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 2 ; <<4 x float>*>:48 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 3 ; <<4 x float>*>:49 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 1 ; <<4 x float>*>:50 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 2 ; <<4 x float>*>:51 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 3 ; <<4 x float>*>:52 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 1 ; <<4 x float>*>:53 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 2 ; <<4 x float>*>:54 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 3 ; <<4 x float>*>:55 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 1 ; <<4 x float>*>:56 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 2 ; <<4 x float>*>:57 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 3 ; <<4 x float>*>:58 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 1 ; <<4 x float>*>:59 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 2 ; <<4 x float>*>:60 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 3 ; <<4 x float>*>:61 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 1 ; <<4 x float>*>:62 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 2 ; <<4 x float>*>:63 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 3 ; <<4 x float>*>:64 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 1 ; <<4 x float>*>:65 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 2 ; <<4 x float>*>:66 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 3 ; <<4 x float>*>:67 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 1 ; <<4 x float>*>:68 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 2 ; <<4 x float>*>:69 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 3 ; <<4 x float>*>:70 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 1 ; <<4 x float>*>:71 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 2 ; <<4 x float>*>:72 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 3 ; <<4 x float>*>:73 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 1 ; <<4 x float>*>:74 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 2 ; <<4 x float>*>:75 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 3 ; <<4 x float>*>:76 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 1 ; <<4 x float>*>:77 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 2 ; <<4 x float>*>:78 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 3 ; <<4 x float>*>:79 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 1 ; <<4 x float>*>:80 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 2 ; <<4 x float>*>:81 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 3 ; <<4 x float>*>:82 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 1 ; <<4 x float>*>:83 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 2 ; <<4 x float>*>:84 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 3 ; <<4 x float>*>:85 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 1 ; <<4 x float>*>:86 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 2 ; <<4 x float>*>:87 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 3 ; <<4 x float>*>:88 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 1 ; <<4 x float>*>:89 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 2 ; <<4 x float>*>:90 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 3 ; <<4 x float>*>:91 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 1 ; <<4 x float>*>:92 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 2 ; <<4 x float>*>:93 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 3 ; <<4 x float>*>:94 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 1 ; <<4 x float>*>:95 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 2 ; <<4 x float>*>:96 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 3 ; <<4 x float>*>:97 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 1 ; <<4 x float>*>:98 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 2 ; <<4 x float>*>:99 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 3 ; <<4 x float>*>:100 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 1 ; <<4 x float>*>:101 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 2 ; <<4 x float>*>:102 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 3 ; <<4 x float>*>:103 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 1 ; <<4 x float>*>:104 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 2 ; <<4 x float>*>:105 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 3 ; <<4 x float>*>:106 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 1 ; <<4 x float>*>:107 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 2 ; <<4 x float>*>:108 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 3 ; <<4 x float>*>:109 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 1 ; <<4 x float>*>:110 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 2 ; <<4 x float>*>:111 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 3 ; <<4 x float>*>:112 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 1 ; <<4 x float>*>:113 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 2 ; <<4 x float>*>:114 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 3 ; <<4 x float>*>:115 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 1 ; <<4 x float>*>:116 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 2 ; <<4 x float>*>:117 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 3 ; <<4 x float>*>:118 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 1 ; <<4 x float>*>:119 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 2 ; <<4 x float>*>:120 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 3 ; <<4 x float>*>:121 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 1 ; <<4 x float>*>:122 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 2 ; <<4 x float>*>:123 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 3 ; <<4 x float>*>:124 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 1 ; <<4 x float>*>:125 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 2 ; <<4 x float>*>:126 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 3 ; <<4 x float>*>:127 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 1 ; <<4 x float>*>:128 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 2 ; <<4 x float>*>:129 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 3 ; <<4 x float>*>:130 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 1 ; <<4 x float>*>:131 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 2 ; <<4 x float>*>:132 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 3 ; <<4 x float>*>:133 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 1 ; <<4 x float>*>:134 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 2 ; <<4 x float>*>:135 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 3 ; <<4 x float>*>:136 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 1 ; <<4 x float>*>:137 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 2 ; <<4 x float>*>:138 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 3 ; <<4 x float>*>:139 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 1 ; <<4 x float>*>:140 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 2 ; <<4 x float>*>:141 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 3 ; <<4 x float>*>:142 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 1 ; <<4 x float>*>:143 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 2 ; <<4 x float>*>:144 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 3 ; <<4 x float>*>:145 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 1 ; <<4 x float>*>:146 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 2 ; <<4 x float>*>:147 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 3 ; <<4 x float>*>:148 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 1 ; <<4 x float>*>:149 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 2 ; <<4 x float>*>:150 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 3 ; <<4 x float>*>:151 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 1 ; <<4 x float>*>:152 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 2 ; <<4 x float>*>:153 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 3 ; <<4 x float>*>:154 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 1 ; <<4 x float>*>:155 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 2 ; <<4 x float>*>:156 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 3 ; <<4 x float>*>:157 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 1 ; <<4 x float>*>:158 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 2 ; <<4 x float>*>:159 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 3 ; <<4 x float>*>:160 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 1 ; <<4 x float>*>:161 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 2 ; <<4 x float>*>:162 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 3 ; <<4 x float>*>:163 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 1 ; <<4 x float>*>:164 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 2 ; <<4 x float>*>:165 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 3 ; <<4 x float>*>:166 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 1 ; <<4 x float>*>:167 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 2 ; <<4 x float>*>:168 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 3 ; <<4 x float>*>:169 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 1 ; <<4 x float>*>:170 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 2 ; <<4 x float>*>:171 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 3 ; <<4 x float>*>:172 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 1 ; <<4 x float>*>:173 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 2 ; <<4 x float>*>:174 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 3 ; <<4 x float>*>:175 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 1 ; <<4 x float>*>:176 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 2 ; <<4 x float>*>:177 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 3 ; <<4 x float>*>:178 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 1 ; <<4 x float>*>:179 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 2 ; <<4 x float>*>:180 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 3 ; <<4 x float>*>:181 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 1 ; <<4 x float>*>:182 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 2 ; <<4 x float>*>:183 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 3 ; <<4 x float>*>:184 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 1 ; <<4 x float>*>:185 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 2 ; <<4 x float>*>:186 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 3 ; <<4 x float>*>:187 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 1 ; <<4 x float>*>:188 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 2 ; <<4 x float>*>:189 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 3 ; <<4 x float>*>:190 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 1 ; <<4 x float>*>:191 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 2 ; <<4 x float>*>:192 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 3 ; <<4 x float>*>:193 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 1 ; <<4 x float>*>:194 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 2 ; <<4 x float>*>:195 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 3 ; <<4 x float>*>:196 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 1 ; <<4 x float>*>:197 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 2 ; <<4 x float>*>:198 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 3 ; <<4 x float>*>:199 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 1 ; <<4 x float>*>:200 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 2 ; <<4 x float>*>:201 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 3 ; <<4 x float>*>:202 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 1 ; <<4 x float>*>:203 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 2 ; <<4 x float>*>:204 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 3 ; <<4 x float>*>:205 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 1 ; <<4 x float>*>:206 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 2 ; <<4 x float>*>:207 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 3 ; <<4 x float>*>:208 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 1 ; <<4 x float>*>:209 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 2 ; <<4 x float>*>:210 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 3 ; <<4 x float>*>:211 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 1 ; <<4 x float>*>:212 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 2 ; <<4 x float>*>:213 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 3 ; <<4 x float>*>:214 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 1 ; <<4 x float>*>:215 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 2 ; <<4 x float>*>:216 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 3 ; <<4 x float>*>:217 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 1 ; <<4 x float>*>:218 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 2 ; <<4 x float>*>:219 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 3 ; <<4 x float>*>:220 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 1 ; <<4 x float>*>:221 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 2 ; <<4 x float>*>:222 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 3 ; <<4 x float>*>:223 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 1 ; <<4 x float>*>:224 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 2 ; <<4 x float>*>:225 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 3 ; <<4 x float>*>:226 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 1 ; <<4 x float>*>:227 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 2 ; <<4 x float>*>:228 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 3 ; <<4 x float>*>:229 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 1 ; <<4 x float>*>:230 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 2 ; <<4 x float>*>:231 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 3 ; <<4 x float>*>:232 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 1 ; <<4 x float>*>:233 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 2 ; <<4 x float>*>:234 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 3 ; <<4 x float>*>:235 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 1 ; <<4 x float>*>:236 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 2 ; <<4 x float>*>:237 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 3 ; <<4 x float>*>:238 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 1 ; <<4 x float>*>:239 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 2 ; <<4 x float>*>:240 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 3 ; <<4 x float>*>:241 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 1 ; <<4 x float>*>:242 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 2 ; <<4 x float>*>:243 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 3 ; <<4 x float>*>:244 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 1 ; <<4 x float>*>:245 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 2 ; <<4 x float>*>:246 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 3 ; <<4 x float>*>:247 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 1 ; <<4 x float>*>:248 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 2 ; <<4 x float>*>:249 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 3 ; <<4 x float>*>:250 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 1 ; <<4 x float>*>:251 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 2 ; <<4 x float>*>:252 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 3 ; <<4 x float>*>:253 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 1 ; <<4 x float>*>:254 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 2 ; <<4 x float>*>:255 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 3 ; <<4 x float>*>:256 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 1 ; <<4 x float>*>:257 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 2 ; <<4 x float>*>:258 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 3 ; <<4 x float>*>:259 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 1 ; <<4 x float>*>:260 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 2 ; <<4 x float>*>:261 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 3 ; <<4 x float>*>:262 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 1 ; <<4 x float>*>:263 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 2 ; <<4 x float>*>:264 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 3 ; <<4 x float>*>:265 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 1 ; <<4 x float>*>:266 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 2 ; <<4 x float>*>:267 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 3 ; <<4 x float>*>:268 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 1 ; <<4 x float>*>:269 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 2 ; <<4 x float>*>:270 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 3 ; <<4 x float>*>:271 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 1 ; <<4 x float>*>:272 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 2 ; <<4 x float>*>:273 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 3 ; <<4 x float>*>:274 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 1 ; <<4 x float>*>:275 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 2 ; <<4 x float>*>:276 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 3 ; <<4 x float>*>:277 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 1 ; <<4 x float>*>:278 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 2 ; <<4 x float>*>:279 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 3 ; <<4 x float>*>:280 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 1 ; <<4 x float>*>:281 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 2 ; <<4 x float>*>:282 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 3 ; <<4 x float>*>:283 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 1 ; <<4 x float>*>:284 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 2 ; <<4 x float>*>:285 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 3 ; <<4 x float>*>:286 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 1 ; <<4 x float>*>:287 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 2 ; <<4 x float>*>:288 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 3 ; <<4 x float>*>:289 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 1 ; <<4 x float>*>:290 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 2 ; <<4 x float>*>:291 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 3 ; <<4 x float>*>:292 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 1 ; <<4 x float>*>:293 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 2 ; <<4 x float>*>:294 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 3 ; <<4 x float>*>:295 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 1 ; <<4 x float>*>:296 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 2 ; <<4 x float>*>:297 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 3 ; <<4 x float>*>:298 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 1 ; <<4 x float>*>:299 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 2 ; <<4 x float>*>:300 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 3 ; <<4 x float>*>:301 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 1 ; <<4 x float>*>:302 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 2 ; <<4 x float>*>:303 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 3 ; <<4 x float>*>:304 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 1 ; <<4 x float>*>:305 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 2 ; <<4 x float>*>:306 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 3 ; <<4 x float>*>:307 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 1 ; <<4 x float>*>:308 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 2 ; <<4 x float>*>:309 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 3 ; <<4 x float>*>:310 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 1 ; <<4 x float>*>:311 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 2 ; <<4 x float>*>:312 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 3 ; <<4 x float>*>:313 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 1 ; <<4 x float>*>:314 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 2 ; <<4 x float>*>:315 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 3 ; <<4 x float>*>:316 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 1 ; <<4 x float>*>:317 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 2 ; <<4 x float>*>:318 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 3 ; <<4 x float>*>:319 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 1 ; <<4 x float>*>:320 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 2 ; <<4 x float>*>:321 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 3 ; <<4 x float>*>:322 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 1 ; <<4 x float>*>:323 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 2 ; <<4 x float>*>:324 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 3 ; <<4 x float>*>:325 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 1 ; <<4 x float>*>:326 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 2 ; <<4 x float>*>:327 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 3 ; <<4 x float>*>:328 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 1 ; <<4 x float>*>:329 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 2 ; <<4 x float>*>:330 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 3 ; <<4 x float>*>:331 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 1 ; <<4 x float>*>:332 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 2 ; <<4 x float>*>:333 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 3 ; <<4 x float>*>:334 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 1 ; <<4 x float>*>:335 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 2 ; <<4 x float>*>:336 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 3 ; <<4 x float>*>:337 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 1 ; <<4 x float>*>:338 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 2 ; <<4 x float>*>:339 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 3 ; <<4 x float>*>:340 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 1 ; <<4 x float>*>:341 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 2 ; <<4 x float>*>:342 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 3 ; <<4 x float>*>:343 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 1 ; <<4 x float>*>:344 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 2 ; <<4 x float>*>:345 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 3 ; <<4 x float>*>:346 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 1 ; <<4 x float>*>:347 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 2 ; <<4 x float>*>:348 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 3 ; <<4 x float>*>:349 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 1 ; <<4 x float>*>:350 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 2 ; <<4 x float>*>:351 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 3 ; <<4 x float>*>:352 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 1 ; <<4 x float>*>:353 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 2 ; <<4 x float>*>:354 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 3 ; <<4 x float>*>:355 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 1 ; <<4 x float>*>:356 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 2 ; <<4 x float>*>:357 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 3 ; <<4 x float>*>:358 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 1 ; <<4 x float>*>:359 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 2 ; <<4 x float>*>:360 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 3 ; <<4 x float>*>:361 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 1 ; <<4 x float>*>:362 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 2 ; <<4 x float>*>:363 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 3 ; <<4 x float>*>:364 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 1 ; <<4 x float>*>:365 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 2 ; <<4 x float>*>:366 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 3 ; <<4 x float>*>:367 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 1 ; <<4 x float>*>:368 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 2 ; <<4 x float>*>:369 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 3 ; <<4 x float>*>:370 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 1 ; <<4 x float>*>:371 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 2 ; <<4 x float>*>:372 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 3 ; <<4 x float>*>:373 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 1 ; <<4 x float>*>:374 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 2 ; <<4 x float>*>:375 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 3 ; <<4 x float>*>:376 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 1 ; <<4 x float>*>:377 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 2 ; <<4 x float>*>:378 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 3 ; <<4 x float>*>:379 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 1 ; <<4 x float>*>:380 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 2 ; <<4 x float>*>:381 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 3 ; <<4 x float>*>:382 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 1 ; <<4 x float>*>:383 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 2 ; <<4 x float>*>:384 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 3 ; <<4 x float>*>:385 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 1 ; <<4 x float>*>:386 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 2 ; <<4 x float>*>:387 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 3 ; <<4 x float>*>:388 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 1 ; <<4 x float>*>:389 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 2 ; <<4 x float>*>:390 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 3 ; <<4 x float>*>:391 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 1 ; <<4 x float>*>:392 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 2 ; <<4 x float>*>:393 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 3 ; <<4 x float>*>:394 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 1 ; <<4 x float>*>:395 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 2 ; <<4 x float>*>:396 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 3 ; <<4 x float>*>:397 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 1 ; <<4 x float>*>:398 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 2 ; <<4 x float>*>:399 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 3 ; <<4 x float>*>:400 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 1 ; <<4 x float>*>:401 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 2 ; <<4 x float>*>:402 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 3 ; <<4 x float>*>:403 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 1 ; <<4 x float>*>:404 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 2 ; <<4 x float>*>:405 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 3 ; <<4 x float>*>:406 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 1 ; <<4 x float>*>:407 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 2 ; <<4 x float>*>:408 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 3 ; <<4 x float>*>:409 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 1 ; <<4 x float>*>:410 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 2 ; <<4 x float>*>:411 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 3 ; <<4 x float>*>:412 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 1 ; <<4 x float>*>:413 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 2 ; <<4 x float>*>:414 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 3 ; <<4 x float>*>:415 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 1 ; <<4 x float>*>:416 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 2 ; <<4 x float>*>:417 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 3 ; <<4 x float>*>:418 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 1 ; <<4 x float>*>:419 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 2 ; <<4 x float>*>:420 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 3 ; <<4 x float>*>:421 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 1 ; <<4 x float>*>:422 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 2 ; <<4 x float>*>:423 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 3 ; <<4 x float>*>:424 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 1 ; <<4 x float>*>:425 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 2 ; <<4 x float>*>:426 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 3 ; <<4 x float>*>:427 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 1 ; <<4 x float>*>:428 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 2 ; <<4 x float>*>:429 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 3 ; <<4 x float>*>:430 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 1 ; <<4 x float>*>:431 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 2 ; <<4 x float>*>:432 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 3 ; <<4 x float>*>:433 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 1 ; <<4 x float>*>:434 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 2 ; <<4 x float>*>:435 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 3 ; <<4 x float>*>:436 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 1 ; <<4 x float>*>:437 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 2 ; <<4 x float>*>:438 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 3 ; <<4 x float>*>:439 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 1 ; <<4 x float>*>:440 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 2 ; <<4 x float>*>:441 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 3 ; <<4 x float>*>:442 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 1 ; <<4 x float>*>:443 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 2 ; <<4 x float>*>:444 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 3 ; <<4 x float>*>:445 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 1 ; <<4 x float>*>:446 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 2 ; <<4 x float>*>:447 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 3 ; <<4 x float>*>:448 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 1 ; <<4 x float>*>:449 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 2 ; <<4 x float>*>:450 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 3 ; <<4 x float>*>:451 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 1 ; <<4 x float>*>:452 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 2 ; <<4 x float>*>:453 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 3 ; <<4 x float>*>:454 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 1 ; <<4 x float>*>:455 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 2 ; <<4 x float>*>:456 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 3 ; <<4 x float>*>:457 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 1 ; <<4 x float>*>:458 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 2 ; <<4 x float>*>:459 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 3 ; <<4 x float>*>:460 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 1 ; <<4 x float>*>:461 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 2 ; <<4 x float>*>:462 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 3 ; <<4 x float>*>:463 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 1 ; <<4 x float>*>:464 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 2 ; <<4 x float>*>:465 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 3 ; <<4 x float>*>:466 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 1 ; <<4 x float>*>:467 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 2 ; <<4 x float>*>:468 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 3 ; <<4 x float>*>:469 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 1 ; <<4 x float>*>:470 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 2 ; <<4 x float>*>:471 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 3 ; <<4 x float>*>:472 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 1 ; <<4 x float>*>:473 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 2 ; <<4 x float>*>:474 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 3 ; <<4 x float>*>:475 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 1 ; <<4 x float>*>:476 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 2 ; <<4 x float>*>:477 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 3 ; <<4 x float>*>:478 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 1 ; <<4 x float>*>:479 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 2 ; <<4 x float>*>:480 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 3 ; <<4 x float>*>:481 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 1 ; <<4 x float>*>:482 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 2 ; <<4 x float>*>:483 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 3 ; <<4 x float>*>:484 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:485 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:486 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:487 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:488 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:489 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:490 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 1 ; <<4 x float>*>:491 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 2 ; <<4 x float>*>:492 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 3 ; <<4 x float>*>:493 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 1 ; <<4 x float>*>:494 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 2 ; <<4 x float>*>:495 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 3 ; <<4 x float>*>:496 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 1 ; <<4 x float>*>:497 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 2 ; <<4 x float>*>:498 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 3 ; <<4 x float>*>:499 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 1 ; <<4 x float>*>:500 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 2 ; <<4 x float>*>:501 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 3 ; <<4 x float>*>:502 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 1 ; <<4 x float>*>:503 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 2 ; <<4 x float>*>:504 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 3 ; <<4 x float>*>:505 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 1 ; <<4 x float>*>:506 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 2 ; <<4 x float>*>:507 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 3 ; <<4 x float>*>:508 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 1 ; <<4 x float>*>:509 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 2 ; <<4 x float>*>:510 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 3 ; <<4 x float>*>:511 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 1 ; <<4 x float>*>:512 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 2 ; <<4 x float>*>:513 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 3 ; <<4 x float>*>:514 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 1 ; <<4 x float>*>:515 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 2 ; <<4 x float>*>:516 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 3 ; <<4 x float>*>:517 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 1 ; <<4 x float>*>:518 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 2 ; <<4 x float>*>:519 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 3 ; <<4 x float>*>:520 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 1 ; <<4 x float>*>:521 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 2 ; <<4 x float>*>:522 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 3 ; <<4 x float>*>:523 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 1 ; <<4 x float>*>:524 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 2 ; <<4 x float>*>:525 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 3 ; <<4 x float>*>:526 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:527 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:528 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:529 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:530 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:531 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:532 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:533 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:534 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:535 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 1 ; <<4 x float>*>:536 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 2 ; <<4 x float>*>:537 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 3 ; <<4 x float>*>:538 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 1 ; <<4 x float>*>:539 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 2 ; <<4 x float>*>:540 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 3 ; <<4 x float>*>:541 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:542 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:543 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:544 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 1 ; <<4 x float>*>:545 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 2 ; <<4 x float>*>:546 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 3 ; <<4 x float>*>:547 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 1 ; <<4 x float>*>:548 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 2 ; <<4 x float>*>:549 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 3 ; <<4 x float>*>:550 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:551 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:552 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:553 [#uses=1]
- load <4 x float>* %553 ; <<4 x float>>:554 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 3 ; <<4 x float>*>:555 [#uses=0]
- shufflevector <4 x float> %554, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:556 [#uses=1]
- call <4 x i32> @llvm.ppc.altivec.vcmpgtfp( <4 x float> zeroinitializer, <4 x float> %556 ) ; <<4 x i32>>:557 [#uses=0]
- bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:558 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:559 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:560 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %560
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:561 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:562 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:563 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:564 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:565 [#uses=1]
- store <4 x float> %565, <4 x float>* null
- icmp eq i32 0, 0 ; <i1>:566 [#uses=1]
- br i1 %566, label %.critedge, label %xPIF.exit
-
-.critedge: ; preds = %xOperationInitMasks.exit
- getelementptr [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:567 [#uses=0]
- and <4 x i32> zeroinitializer, zeroinitializer ; <<4 x i32>>:568 [#uses=0]
- or <4 x i32> zeroinitializer, zeroinitializer ; <<4 x i32>>:569 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:570 [#uses=1]
- br i1 %570, label %.critedge7898, label %xPBRK.exit
-
-.critedge7898: ; preds = %.critedge
- br label %xPIF.exit
-
-xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:571 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:572 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:573 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:574 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:575 [#uses=0]
- load <4 x float>* %0 ; <<4 x float>>:576 [#uses=0]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:577 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 0 ; <<4 x float>*>:578 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:579 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:580 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:581 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:582 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:583 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:584 [#uses=1]
- load <4 x float>* %584 ; <<4 x float>>:585 [#uses=1]
- load <4 x float>* null ; <<4 x float>>:586 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:587 [#uses=1]
- load <4 x float>* %587 ; <<4 x float>>:588 [#uses=1]
- shufflevector <4 x float> %583, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:589 [#uses=1]
- shufflevector <4 x float> %585, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:590 [#uses=1]
- shufflevector <4 x float> %588, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:591 [#uses=1]
- fmul <4 x float> zeroinitializer, %589 ; <<4 x float>>:592 [#uses=0]
- fmul <4 x float> zeroinitializer, %590 ; <<4 x float>>:593 [#uses=0]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:594 [#uses=1]
- fmul <4 x float> zeroinitializer, %591 ; <<4 x float>>:595 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:596 [#uses=2]
- load <4 x float>* %596 ; <<4 x float>>:597 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %596
- load <4 x float>* null ; <<4 x float>>:598 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:599 [#uses=0]
- shufflevector <4 x float> %594, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:600 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:601 [#uses=2]
- load <4 x float>* %601 ; <<4 x float>>:602 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %601
- load <4 x float>* null ; <<4 x float>>:603 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:604 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:605 [#uses=1]
- load <4 x float>* %605 ; <<4 x float>>:606 [#uses=1]
- fsub <4 x float> zeroinitializer, %604 ; <<4 x float>>:607 [#uses=2]
- fsub <4 x float> zeroinitializer, %606 ; <<4 x float>>:608 [#uses=2]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:609 [#uses=0]
- br i1 false, label %617, label %610
-
-; <label>:610 ; preds = %xPIF.exit
- load <4 x float>* null ; <<4 x float>>:611 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:612 [#uses=2]
- load <4 x float>* %612 ; <<4 x float>>:613 [#uses=1]
- shufflevector <4 x float> %607, <4 x float> %613, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:614 [#uses=1]
- store <4 x float> %614, <4 x float>* %612
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:615 [#uses=2]
- load <4 x float>* %615 ; <<4 x float>>:616 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %615
- br label %xST.exit400
-
-; <label>:617 ; preds = %xPIF.exit
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:618 [#uses=0]
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x i32>>:619 [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %619, <4 x i32> zeroinitializer ) ; <i32>:620 [#uses=1]
- icmp eq i32 %620, 0 ; <i1>:621 [#uses=1]
- br i1 %621, label %625, label %622
-
-; <label>:622 ; preds = %617
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:623 [#uses=0]
- shufflevector <4 x float> %607, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:624 [#uses=0]
- br label %625
-
-; <label>:625 ; preds = %622, %617
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:626 [#uses=0]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:627 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:628 [#uses=1]
- load <4 x float>* %628 ; <<4 x float>>:629 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:630 [#uses=0]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:631 [#uses=1]
- icmp eq i32 %631, 0 ; <i1>:632 [#uses=1]
- br i1 %632, label %xST.exit400, label %633
-
-; <label>:633 ; preds = %625
- load <4 x float>* null ; <<4 x float>>:634 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %634, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:635 [#uses=1]
- store <4 x float> %635, <4 x float>* null
- br label %xST.exit400
-
-xST.exit400: ; preds = %633, %625, %610
- %.17218 = phi <4 x float> [ zeroinitializer, %610 ], [ %608, %633 ], [ %608, %625 ] ; <<4 x float>> [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:636 [#uses=1]
- load <4 x float>* %636 ; <<4 x float>>:637 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:638 [#uses=2]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:639 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:640 [#uses=2]
- fmul <4 x float> %638, %638 ; <<4 x float>>:641 [#uses=1]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:642 [#uses=0]
- fmul <4 x float> %640, %640 ; <<4 x float>>:643 [#uses=2]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>>:644 [#uses=0]
- shufflevector <4 x float> %643, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>>:645 [#uses=1]
- fadd <4 x float> %645, %643 ; <<4 x float>>:646 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:647 [#uses=1]
- shufflevector <4 x float> %641, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:648 [#uses=1]
- fadd <4 x float> zeroinitializer, %647 ; <<4 x float>>:649 [#uses=2]
- fadd <4 x float> zeroinitializer, %648 ; <<4 x float>>:650 [#uses=0]
- fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:651 [#uses=2]
- call <4 x float> @llvm.ppc.altivec.vrsqrtefp( <4 x float> %649 ) ; <<4 x float>>:652 [#uses=1]
- fmul <4 x float> %652, %649 ; <<4 x float>>:653 [#uses=1]
- call <4 x float> @llvm.ppc.altivec.vrsqrtefp( <4 x float> %651 ) ; <<4 x float>>:654 [#uses=1]
- fmul <4 x float> %654, %651 ; <<4 x float>>:655 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:656 [#uses=1]
- br i1 %656, label %665, label %657
-
-; <label>:657 ; preds = %xST.exit400
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:658 [#uses=0]
- shufflevector <4 x float> %653, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:659 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:660 [#uses=1]
- load <4 x float>* %660 ; <<4 x float>>:661 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:662 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:663 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:664 [#uses=0]
- br label %xST.exit402
-
-; <label>:665 ; preds = %xST.exit400
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:666 [#uses=0]
- br i1 false, label %669, label %667
-
-; <label>:667 ; preds = %665
- load <4 x float>* null ; <<4 x float>>:668 [#uses=0]
- br label %669
-
-; <label>:669 ; preds = %667, %665
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:670 [#uses=0]
- br label %xST.exit402
-
-xST.exit402: ; preds = %669, %657
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:671 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:672 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:673 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:674 [#uses=1]
- load <4 x float>* %674 ; <<4 x float>>:675 [#uses=1]
- load <4 x float>* null ; <<4 x float>>:676 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:677 [#uses=1]
- shufflevector <4 x float> %675, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:678 [#uses=1]
- fmul <4 x float> zeroinitializer, %677 ; <<4 x float>>:679 [#uses=0]
- fmul <4 x float> zeroinitializer, %678 ; <<4 x float>>:680 [#uses=0]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:681 [#uses=1]
- icmp eq i32 0, 0 ; <i1>:682 [#uses=1]
- br i1 %682, label %689, label %683
-
-; <label>:683 ; preds = %xST.exit402
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:684 [#uses=1]
- load <4 x float>* %684 ; <<4 x float>>:685 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:686 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:687 [#uses=0]
- shufflevector <4 x float> %681, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:688 [#uses=0]
- br label %xST.exit405
-
-; <label>:689 ; preds = %xST.exit402
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:690 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:691 [#uses=1]
- shufflevector <4 x i32> %691, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:692 [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %692, <4 x i32> zeroinitializer ) ; <i32>:693 [#uses=1]
- icmp eq i32 %693, 0 ; <i1>:694 [#uses=0]
- br label %xST.exit405
-
-xST.exit405: ; preds = %689, %683
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:695 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:696 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:697 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:698 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:699 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:700 [#uses=1]
- fadd <4 x float> zeroinitializer, %700 ; <<4 x float>>:701 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:702 [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %702, <4 x i32> zeroinitializer ) ; <i32>:703 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:704 [#uses=2]
- load <4 x float>* %704 ; <<4 x float>>:705 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %704
- load <4 x float>* null ; <<4 x float>>:706 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:707 [#uses=2]
- load <4 x float>* %707 ; <<4 x float>>:708 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %707
- load <4 x float>* null ; <<4 x float>>:709 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:710 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:711 [#uses=1]
- shufflevector <4 x float> %711, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:712 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:713 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:714 [#uses=1]
- load <4 x float>* %714 ; <<4 x float>>:715 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:716 [#uses=0]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:717 [#uses=1]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:718 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 0 ; <<4 x float>*>:719 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %719
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:720 [#uses=1]
- shufflevector <4 x float> %717, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:721 [#uses=1]
- store <4 x float> %721, <4 x float>* %720
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:722 [#uses=1]
- load <4 x float>* %722 ; <<4 x float>>:723 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %723, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:724 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:725 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %725
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:726 [#uses=1]
- load <4 x float>* %726 ; <<4 x float>>:727 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:728 [#uses=1]
- load <4 x float>* %728 ; <<4 x float>>:729 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:730 [#uses=1]
- load <4 x float>* %730 ; <<4 x float>>:731 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:732 [#uses=1]
- load <4 x float>* %732 ; <<4 x float>>:733 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:734 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:735 [#uses=1]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:736 [#uses=1]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:737 [#uses=1]
- fmul <4 x float> zeroinitializer, %735 ; <<4 x float>>:738 [#uses=1]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:739 [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:740 [#uses=1]
- icmp eq i32 %740, 0 ; <i1>:741 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:742 [#uses=2]
- load <4 x float>* %742 ; <<4 x float>>:743 [#uses=1]
- shufflevector <4 x float> %736, <4 x float> %743, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:744 [#uses=1]
- store <4 x float> %744, <4 x float>* %742
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:745 [#uses=1]
- load <4 x float>* %745 ; <<4 x float>>:746 [#uses=1]
- shufflevector <4 x float> %737, <4 x float> %746, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:747 [#uses=0]
- shufflevector <4 x float> %738, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:748 [#uses=1]
- store <4 x float> %748, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:749 [#uses=1]
- load <4 x float>* %749 ; <<4 x float>>:750 [#uses=1]
- shufflevector <4 x float> %739, <4 x float> %750, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:751 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:752 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:753 [#uses=1]
- load <4 x float>* %753 ; <<4 x float>>:754 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:755 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:756 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:757 [#uses=1]
- shufflevector <4 x float> %756, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:758 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:759 [#uses=1]
- load <4 x float>* %759 ; <<4 x float>>:760 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:761 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:762 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:763 [#uses=1]
- fadd <4 x float> %757, zeroinitializer ; <<4 x float>>:764 [#uses=0]
- fadd <4 x float> %758, %763 ; <<4 x float>>:765 [#uses=0]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:766 [#uses=1]
- br i1 false, label %773, label %767
-
-; <label>:767 ; preds = %xST.exit405
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:768 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:769 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %769, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:770 [#uses=1]
- store <4 x float> %770, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:771 [#uses=1]
- load <4 x float>* %771 ; <<4 x float>>:772 [#uses=0]
- br label %xST.exit422
-
-; <label>:773 ; preds = %xST.exit405
- br label %xST.exit422
-
-xST.exit422: ; preds = %773, %767
- %.07267 = phi <4 x float> [ %766, %767 ], [ undef, %773 ] ; <<4 x float>> [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:774 [#uses=0]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:775 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:776 [#uses=1]
- br i1 %776, label %780, label %777
-
-; <label>:777 ; preds = %xST.exit422
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:778 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:779 [#uses=0]
- br label %xST.exit431
-
-; <label>:780 ; preds = %xST.exit422
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:781 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:782 [#uses=2]
- load <4 x float>* %782 ; <<4 x float>>:783 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %782
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:784 [#uses=1]
- shufflevector <4 x i32> %784, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:785 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:786 [#uses=0]
- br label %xST.exit431
-
-xST.exit431: ; preds = %780, %777
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:787 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:788 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:789 [#uses=2]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %789, <4 x i32> zeroinitializer ) ; <i32>:790 [#uses=1]
- icmp eq i32 %790, 0 ; <i1>:791 [#uses=0]
- shufflevector <4 x i32> %789, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:792 [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %792, <4 x i32> zeroinitializer ) ; <i32>:793 [#uses=1]
- icmp eq i32 %793, 0 ; <i1>:794 [#uses=1]
- br i1 %794, label %797, label %795
-
-; <label>:795 ; preds = %xST.exit431
- load <4 x float>* null ; <<4 x float>>:796 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* null
- br label %797
-
-; <label>:797 ; preds = %795, %xST.exit431
- %.07332 = phi <4 x float> [ zeroinitializer, %795 ], [ undef, %xST.exit431 ] ; <<4 x float>> [#uses=0]
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x i32>>:798 [#uses=0]
- br i1 false, label %xST.exit434, label %799
-
-; <label>:799 ; preds = %797
- load <4 x float>* null ; <<4 x float>>:800 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* null
- br label %xST.exit434
-
-xST.exit434: ; preds = %799, %797
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:801 [#uses=1]
- shufflevector <4 x i32> %801, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:802 [#uses=0]
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:803 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:804 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:805 [#uses=1]
- load <4 x float>* %805 ; <<4 x float>>:806 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:807 [#uses=1]
- load <4 x float>* %807 ; <<4 x float>>:808 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:809 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:810 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:811 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:812 [#uses=1]
- load <4 x float>* %812 ; <<4 x float>>:813 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:814 [#uses=1]
- load <4 x float>* %814 ; <<4 x float>>:815 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:816 [#uses=0]
- unreachable
-
-xPBRK.exit: ; preds = %.critedge
- store <4 x i32> < i32 -1, i32 -1, i32 -1, i32 -1 >, <4 x i32>* %.sub7896
- store <4 x i32> zeroinitializer, <4 x i32>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:817 [#uses=1]
- load <4 x float>* %817 ; <<4 x float>>:818 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:819 [#uses=1]
- load <4 x float>* %819 ; <<4 x float>>:820 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:821 [#uses=1]
- load <4 x float>* %821 ; <<4 x float>>:822 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:823 [#uses=1]
- shufflevector <4 x float> %818, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:824 [#uses=1]
- shufflevector <4 x float> %820, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:825 [#uses=1]
- shufflevector <4 x float> %822, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:826 [#uses=1]
- shufflevector <4 x float> %823, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:827 [#uses=0]
- shufflevector <4 x float> %824, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:828 [#uses=1]
- store <4 x float> %828, <4 x float>* null
- load <4 x float>* null ; <<4 x float>>:829 [#uses=1]
- shufflevector <4 x float> %825, <4 x float> %829, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:830 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:831 [#uses=2]
- load <4 x float>* %831 ; <<4 x float>>:832 [#uses=1]
- shufflevector <4 x float> %826, <4 x float> %832, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:833 [#uses=1]
- store <4 x float> %833, <4 x float>* %831
- br label %xLS.exit449
-
-xLS.exit449: ; preds = %1215, %xPBRK.exit
- %.27464 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.17463, %1215 ] ; <<4 x float>> [#uses=2]
- %.27469 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.17468, %1215 ] ; <<4 x float>> [#uses=2]
- %.27474 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=1]
- %.17482 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=0]
- %.17486 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=0]
- %.17490 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07489, %1215 ] ; <<4 x float>> [#uses=2]
- %.17494 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=0]
- %.27504 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=0]
- %.17513 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=0]
- %.17517 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=0]
- %.17552 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07551, %1215 ] ; <<4 x float>> [#uses=2]
- %.17556 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07555, %1215 ] ; <<4 x float>> [#uses=2]
- %.17560 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=0]
- %.17583 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07582, %1215 ] ; <<4 x float>> [#uses=2]
- %.17591 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07590, %1215 ] ; <<4 x float>> [#uses=2]
- %.17599 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=0]
- %.17618 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07617, %1215 ] ; <<4 x float>> [#uses=2]
- %.17622 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07621, %1215 ] ; <<4 x float>> [#uses=2]
- %.17626 = phi <4 x float> [ undef, %xPBRK.exit ], [ zeroinitializer, %1215 ] ; <<4 x float>> [#uses=0]
- %.17653 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07652, %1215 ] ; <<4 x float>> [#uses=2]
- %.17657 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07656, %1215 ] ; <<4 x float>> [#uses=2]
- %.17661 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07660, %1215 ] ; <<4 x float>> [#uses=2]
- %.17665 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07664, %1215 ] ; <<4 x float>> [#uses=2]
- %.17723 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07722, %1215 ] ; <<4 x float>> [#uses=2]
- %.17727 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07726, %1215 ] ; <<4 x float>> [#uses=2]
- %.17731 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07730, %1215 ] ; <<4 x float>> [#uses=2]
- %.17735 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07734, %1215 ] ; <<4 x float>> [#uses=2]
- %.17770 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07769, %1215 ] ; <<4 x float>> [#uses=2]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:834 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:835 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:836 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:837 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:838 [#uses=0]
- shufflevector <4 x float> %835, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:839 [#uses=1]
- getelementptr <4 x float>* null, i32 878 ; <<4 x float>*>:840 [#uses=1]
- load <4 x float>* %840 ; <<4 x float>>:841 [#uses=0]
- call <4 x float> @llvm.ppc.altivec.vcfsx( <4 x i32> zeroinitializer, i32 0 ) ; <<4 x float>>:842 [#uses=1]
- shufflevector <4 x float> %842, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:843 [#uses=2]
- call <4 x i32> @llvm.ppc.altivec.vcmpgtfp( <4 x float> %843, <4 x float> %839 ) ; <<4 x i32>>:844 [#uses=1]
- bitcast <4 x i32> %844 to <4 x float> ; <<4 x float>>:845 [#uses=1]
- call <4 x i32> @llvm.ppc.altivec.vcmpgtfp( <4 x float> %843, <4 x float> zeroinitializer ) ; <<4 x i32>>:846 [#uses=0]
- bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:847 [#uses=1]
- icmp eq i32 0, 0 ; <i1>:848 [#uses=1]
- br i1 %848, label %854, label %849
-
-; <label>:849 ; preds = %xLS.exit449
- shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:850 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:851 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %851
- shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:852 [#uses=1]
- store <4 x float> %852, <4 x float>* null
- shufflevector <4 x float> %847, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:853 [#uses=0]
- br label %xST.exit451
-
-; <label>:854 ; preds = %xLS.exit449
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:855 [#uses=0]
- br i1 false, label %859, label %856
-
-; <label>:856 ; preds = %854
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:857 [#uses=2]
- load <4 x float>* %857 ; <<4 x float>>:858 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %857
- br label %859
-
-; <label>:859 ; preds = %856, %854
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:860 [#uses=0]
- br i1 false, label %864, label %861
-
-; <label>:861 ; preds = %859
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:862 [#uses=1]
- shufflevector <4 x float> %845, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:863 [#uses=1]
- store <4 x float> %863, <4 x float>* %862
- br label %864
-
-; <label>:864 ; preds = %861, %859
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:865 [#uses=1]
- shufflevector <4 x i32> %865, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:866 [#uses=0]
- br i1 false, label %868, label %867
-
-; <label>:867 ; preds = %864
- store <4 x float> zeroinitializer, <4 x float>* null
- br label %868
-
-; <label>:868 ; preds = %867, %864
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:869 [#uses=0]
- br label %xST.exit451
-
-xST.exit451: ; preds = %868, %849
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:870 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:871 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:872 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:873 [#uses=1]
- bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:874 [#uses=1]
- xor <4 x i32> %874, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>>:875 [#uses=0]
- bitcast <4 x float> %873 to <4 x i32> ; <<4 x i32>>:876 [#uses=1]
- xor <4 x i32> %876, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>>:877 [#uses=0]
- bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:878 [#uses=1]
- xor <4 x i32> %878, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>>:879 [#uses=1]
- bitcast <4 x i32> %879 to <4 x float> ; <<4 x float>>:880 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:881 [#uses=1]
- icmp eq i32 0, 0 ; <i1>:882 [#uses=1]
- br i1 %882, label %888, label %883
-
-; <label>:883 ; preds = %xST.exit451
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:884 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %884
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:885 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:886 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:887 [#uses=0]
- br label %xST.exit453
-
-; <label>:888 ; preds = %xST.exit451
- shufflevector <4 x i32> %881, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:889 [#uses=0]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:890 [#uses=0]
- br i1 false, label %894, label %891
-
-; <label>:891 ; preds = %888
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:892 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:893 [#uses=1]
- store <4 x float> %893, <4 x float>* %892
- br label %894
-
-; <label>:894 ; preds = %891, %888
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:895 [#uses=1]
- icmp eq i32 %895, 0 ; <i1>:896 [#uses=1]
- br i1 %896, label %898, label %897
-
-; <label>:897 ; preds = %894
- br label %898
-
-; <label>:898 ; preds = %897, %894
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:899 [#uses=0]
- br i1 false, label %xST.exit453, label %900
-
-; <label>:900 ; preds = %898
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:901 [#uses=1]
- load <4 x float>* %901 ; <<4 x float>>:902 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %902, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:903 [#uses=0]
- br label %xST.exit453
-
-xST.exit453: ; preds = %900, %898, %883
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:904 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:905 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:906 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:907 [#uses=1]
- shufflevector <4 x float> %905, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:908 [#uses=1]
- bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:909 [#uses=0]
- bitcast <4 x float> %908 to <4 x i32> ; <<4 x i32>>:910 [#uses=0]
- bitcast <4 x float> %907 to <4 x i32> ; <<4 x i32>>:911 [#uses=0]
- bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:912 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:913 [#uses=0]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 2, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:914 [#uses=0]
- br i1 false, label %915, label %xPIF.exit455
-
-; <label>:915 ; preds = %xST.exit453
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:916 [#uses=0]
- getelementptr [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:917 [#uses=1]
- store <4 x i32> zeroinitializer, <4 x i32>* %917
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:918 [#uses=1]
- and <4 x i32> %918, zeroinitializer ; <<4 x i32>>:919 [#uses=0]
- br label %.critedge7899
-
-.critedge7899: ; preds = %.critedge7899, %915
- or <4 x i32> zeroinitializer, zeroinitializer ; <<4 x i32>>:920 [#uses=1]
- br i1 false, label %.critedge7899, label %xPBRK.exit456
-
-xPBRK.exit456: ; preds = %.critedge7899
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 2, <4 x i32> %920, <4 x i32> zeroinitializer ) ; <i32>:921 [#uses=0]
- unreachable
-
-xPIF.exit455: ; preds = %xST.exit453
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:922 [#uses=1]
- load <4 x float>* %922 ; <<4 x float>>:923 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:924 [#uses=1]
- load <4 x float>* %924 ; <<4 x float>>:925 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:926 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:927 [#uses=0]
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:928 [#uses=0]
- bitcast { { i16, i16, i32 } }* %1 to <4 x float>* ; <<4 x float>*>:929 [#uses=0]
- bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:930 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:931 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:932 [#uses=1]
- br i1 %932, label %934, label %933
-
-; <label>:933 ; preds = %xPIF.exit455
- store <4 x float> zeroinitializer, <4 x float>* null
- br label %934
-
-; <label>:934 ; preds = %933, %xPIF.exit455
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x i32>>:935 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:936 [#uses=1]
- br i1 %936, label %xST.exit459, label %937
-
-; <label>:937 ; preds = %934
- br label %xST.exit459
-
-xST.exit459: ; preds = %937, %934
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:938 [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %938, <4 x i32> zeroinitializer ) ; <i32>:939 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:940 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %940
- load <4 x float>* null ; <<4 x float>>:941 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %941, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:942 [#uses=1]
- store <4 x float> %942, <4 x float>* null
- shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:943 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:944 [#uses=0]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:945 [#uses=0]
- br i1 false, label %947, label %946
-
-; <label>:946 ; preds = %xST.exit459
- br label %947
-
-; <label>:947 ; preds = %946, %xST.exit459
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x i32>>:948 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:949 [#uses=1]
- br i1 %949, label %952, label %950
-
-; <label>:950 ; preds = %947
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:951 [#uses=1]
- call void @llvm.ppc.altivec.stvewx( <4 x i32> %951, i8* null )
- br label %952
-
-; <label>:952 ; preds = %950, %947
- br i1 false, label %955, label %953
-
-; <label>:953 ; preds = %952
- getelementptr [4 x <4 x i32>]* null, i32 0, i32 2 ; <<4 x i32>*>:954 [#uses=0]
- br label %955
-
-; <label>:955 ; preds = %953, %952
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:956 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:957 [#uses=1]
- br i1 %957, label %xStoreDestAddressWithMask.exit461, label %958
-
-; <label>:958 ; preds = %955
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:959 [#uses=1]
- call void @llvm.ppc.altivec.stvewx( <4 x i32> %959, i8* null )
- br label %xStoreDestAddressWithMask.exit461
-
-xStoreDestAddressWithMask.exit461: ; preds = %958, %955
- load <4 x float>* %0 ; <<4 x float>>:960 [#uses=0]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:961 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 0 ; <<4 x float>*>:962 [#uses=0]
- br i1 false, label %968, label %xST.exit463
-
-xST.exit463: ; preds = %xStoreDestAddressWithMask.exit461
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:963 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:964 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:965 [#uses=0]
- load <4 x float>* %0 ; <<4 x float>>:966 [#uses=3]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:967 [#uses=0]
- br i1 false, label %972, label %969
-
-; <label>:968 ; preds = %xStoreDestAddressWithMask.exit461
- unreachable
-
-; <label>:969 ; preds = %xST.exit463
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:970 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:971 [#uses=1]
- store <4 x float> %966, <4 x float>* %971
- store <4 x float> %966, <4 x float>* null
- br label %xST.exit465
-
-; <label>:972 ; preds = %xST.exit463
- call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <<4 x i32>>:973 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* null
- store <4 x float> zeroinitializer, <4 x float>* null
- load <4 x float>* null ; <<4 x float>>:974 [#uses=0]
- bitcast <4 x float> %966 to <4 x i32> ; <<4 x i32>>:975 [#uses=1]
- call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> zeroinitializer, <4 x i32> %975, <4 x i32> zeroinitializer ) ; <<4 x i32>>:976 [#uses=1]
- bitcast <4 x i32> %976 to <4 x float> ; <<4 x float>>:977 [#uses=1]
- store <4 x float> %977, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:978 [#uses=0]
- bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:979 [#uses=1]
- call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> %979, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <<4 x i32>>:980 [#uses=1]
- bitcast <4 x i32> %980 to <4 x float> ; <<4 x float>>:981 [#uses=0]
- br label %xST.exit465
-
-xST.exit465: ; preds = %972, %969
- load <4 x float>* %0 ; <<4 x float>>:982 [#uses=3]
- icmp eq i32 0, 0 ; <i1>:983 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:984 [#uses=1]
- br i1 %983, label %989, label %985
-
-; <label>:985 ; preds = %xST.exit465
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:986 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:987 [#uses=1]
- store <4 x float> %982, <4 x float>* %987
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:988 [#uses=0]
- br label %xST.exit467
-
-; <label>:989 ; preds = %xST.exit465
- bitcast <4 x float> %982 to <4 x i32> ; <<4 x i32>>:990 [#uses=0]
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:991 [#uses=0]
- store <4 x float> zeroinitializer, <4 x float>* %984
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:992 [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:993 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:994 [#uses=0]
- bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:995 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:996 [#uses=0]
- bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:997 [#uses=1]
- bitcast <4 x float> %982 to <4 x i32> ; <<4 x i32>>:998 [#uses=1]
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:999 [#uses=1]
- call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> %997, <4 x i32> %998, <4 x i32> %999 ) ; <<4 x i32>>:1000 [#uses=1]
- bitcast <4 x i32> %1000 to <4 x float> ; <<4 x float>>:1001 [#uses=0]
- br label %xST.exit467
-
-xST.exit467: ; preds = %989, %985
- load <4 x float>* %0 ; <<4 x float>>:1002 [#uses=5]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:1003 [#uses=2]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1003, <4 x i32> zeroinitializer ) ; <i32>:1004 [#uses=0]
- br i1 false, label %1011, label %1005
-
-; <label>:1005 ; preds = %xST.exit467
- load <4 x float>* null ; <<4 x float>>:1006 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1007 [#uses=1]
- load <4 x float>* %1007 ; <<4 x float>>:1008 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:1009 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1010 [#uses=0]
- br label %xST.exit469
-
-; <label>:1011 ; preds = %xST.exit467
- shufflevector <4 x i32> %1003, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:1012 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:1013 [#uses=1]
- br i1 %1013, label %1015, label %1014
-
-; <label>:1014 ; preds = %1011
- br label %1015
-
-; <label>:1015 ; preds = %1014, %1011
- %.07472 = phi <4 x float> [ %1002, %1014 ], [ %.27474, %1011 ] ; <<4 x float>> [#uses=0]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:1016 [#uses=1]
- icmp eq i32 %1016, 0 ; <i1>:1017 [#uses=1]
- br i1 %1017, label %1021, label %1018
-
-; <label>:1018 ; preds = %1015
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1019 [#uses=0]
- shufflevector <4 x float> %1002, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1020 [#uses=0]
- br label %1021
-
-; <label>:1021 ; preds = %1018, %1015
- %.07467 = phi <4 x float> [ %1002, %1018 ], [ %.27469, %1015 ] ; <<4 x float>> [#uses=2]
- icmp eq i32 0, 0 ; <i1>:1022 [#uses=1]
- br i1 %1022, label %1025, label %1023
-
-; <label>:1023 ; preds = %1021
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1024 [#uses=1]
- store <4 x float> zeroinitializer, <4 x float>* %1024
- br label %1025
-
-; <label>:1025 ; preds = %1023, %1021
- %.07462 = phi <4 x float> [ %1002, %1023 ], [ %.27464, %1021 ] ; <<4 x float>> [#uses=2]
- icmp eq i32 0, 0 ; <i1>:1026 [#uses=1]
- br i1 %1026, label %xST.exit469, label %1027
-
-; <label>:1027 ; preds = %1025
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1028 [#uses=0]
- br label %xST.exit469
-
-xST.exit469: ; preds = %1027, %1025, %1005
- %.17463 = phi <4 x float> [ %.27464, %1005 ], [ %.07462, %1027 ], [ %.07462, %1025 ] ; <<4 x float>> [#uses=1]
- %.17468 = phi <4 x float> [ %.27469, %1005 ], [ %.07467, %1027 ], [ %.07467, %1025 ] ; <<4 x float>> [#uses=1]
- %.07489 = phi <4 x float> [ %1002, %1005 ], [ %.17490, %1027 ], [ %.17490, %1025 ] ; <<4 x float>> [#uses=1]
- load <4 x float>* null ; <<4 x float>>:1029 [#uses=0]
- load <4 x float>* null ; <<4 x float>>:1030 [#uses=0]
- fsub <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1031 [#uses=1]
- br i1 false, label %1037, label %1032
-
-; <label>:1032 ; preds = %xST.exit469
- load <4 x float>* null ; <<4 x float>>:1033 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:1034 [#uses=1]
- load <4 x float>* %1034 ; <<4 x float>>:1035 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:1036 [#uses=0]
- br label %xST.exit472
-
-; <label>:1037 ; preds = %xST.exit469
- icmp eq i32 0, 0 ; <i1>:1038 [#uses=1]
- br i1 %1038, label %1040, label %1039
-
-; <label>:1039 ; preds = %1037
- br label %1040
-
-; <label>:1040 ; preds = %1039, %1037
- %.07507 = phi <4 x float> [ zeroinitializer, %1039 ], [ zeroinitializer, %1037 ] ; <<4 x float>> [#uses=0]
- icmp eq i32 0, 0 ; <i1>:1041 [#uses=1]
- br i1 %1041, label %1045, label %1042
-
-; <label>:1042 ; preds = %1040
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:1043 [#uses=1]
- load <4 x float>* %1043 ; <<4 x float>>:1044 [#uses=0]
- br label %1045
-
-; <label>:1045 ; preds = %1042, %1040
- br i1 false, label %1048, label %1046
-
-; <label>:1046 ; preds = %1045
- shufflevector <4 x float> %1031, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1047 [#uses=0]
- br label %1048
-
-; <label>:1048 ; preds = %1046, %1045
- icmp eq i32 0, 0 ; <i1>:1049 [#uses=1]
- br i1 %1049, label %xST.exit472, label %1050
-
-; <label>:1050 ; preds = %1048
- br label %xST.exit472
-
-xST.exit472: ; preds = %1050, %1048, %1032
- br i1 false, label %1052, label %1051
-
-; <label>:1051 ; preds = %xST.exit472
- br label %xST.exit474
-
-; <label>:1052 ; preds = %xST.exit472
- br i1 false, label %1054, label %1053
-
-; <label>:1053 ; preds = %1052
- br label %1054
-
-; <label>:1054 ; preds = %1053, %1052
- br i1 false, label %1056, label %1055
-
-; <label>:1055 ; preds = %1054
- br label %1056
-
-; <label>:1056 ; preds = %1055, %1054
- br i1 false, label %1058, label %1057
-
-; <label>:1057 ; preds = %1056
- br label %1058
-
-; <label>:1058 ; preds = %1057, %1056
- br i1 false, label %xST.exit474, label %1059
-
-; <label>:1059 ; preds = %1058
- br label %xST.exit474
-
-xST.exit474: ; preds = %1059, %1058, %1051
- load <4 x float>* null ; <<4 x float>>:1060 [#uses=1]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1061 [#uses=1]
- fmul <4 x float> %1060, zeroinitializer ; <<4 x float>>:1062 [#uses=2]
- br i1 false, label %1065, label %1063
-
-; <label>:1063 ; preds = %xST.exit474
- shufflevector <4 x float> %1062, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1064 [#uses=1]
- store <4 x float> %1064, <4 x float>* null
- br label %xST.exit476
-
-; <label>:1065 ; preds = %xST.exit474
- br i1 false, label %1067, label %1066
-
-; <label>:1066 ; preds = %1065
- br label %1067
-
-; <label>:1067 ; preds = %1066, %1065
- shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x i32>>:1068 [#uses=0]
- br i1 false, label %1070, label %1069
-
-; <label>:1069 ; preds = %1067
- br label %1070
-
-; <label>:1070 ; preds = %1069, %1067
- br i1 false, label %1072, label %1071
-
-; <label>:1071 ; preds = %1070
- br label %1072
-
-; <label>:1072 ; preds = %1071, %1070
- br i1 false, label %xST.exit476, label %1073
-
-; <label>:1073 ; preds = %1072
- br label %xST.exit476
-
-xST.exit476: ; preds = %1073, %1072, %1063
- %.07551 = phi <4 x float> [ %1062, %1063 ], [ %.17552, %1073 ], [ %.17552, %1072 ] ; <<4 x float>> [#uses=1]
- %.07555 = phi <4 x float> [ %1061, %1063 ], [ %.17556, %1073 ], [ %.17556, %1072 ] ; <<4 x float>> [#uses=1]
- br i1 false, label %1075, label %1074
-
-; <label>:1074 ; preds = %xST.exit476
- br label %xST.exit479
-
-; <label>:1075 ; preds = %xST.exit476
- br i1 false, label %1077, label %1076
-
-; <label>:1076 ; preds = %1075
- br label %1077
-
-; <label>:1077 ; preds = %1076, %1075
- br i1 false, label %1079, label %1078
-
-; <label>:1078 ; preds = %1077
- br label %1079
-
-; <label>:1079 ; preds = %1078, %1077
- br i1 false, label %1081, label %1080
-
-; <label>:1080 ; preds = %1079
- br label %1081
-
-; <label>:1081 ; preds = %1080, %1079
- br i1 false, label %xST.exit479, label %1082
-
-; <label>:1082 ; preds = %1081
- br label %xST.exit479
-
-xST.exit479: ; preds = %1082, %1081, %1074
- br i1 false, label %1084, label %1083
-
-; <label>:1083 ; preds = %xST.exit479
- br label %xST.exit482
-
-; <label>:1084 ; preds = %xST.exit479
- br i1 false, label %1086, label %1085
-
-; <label>:1085 ; preds = %1084
- br label %1086
-
-; <label>:1086 ; preds = %1085, %1084
- br i1 false, label %1088, label %1087
-
-; <label>:1087 ; preds = %1086
- br label %1088
-
-; <label>:1088 ; preds = %1087, %1086
- br i1 false, label %1090, label %1089
-
-; <label>:1089 ; preds = %1088
- br label %1090
-
-; <label>:1090 ; preds = %1089, %1088
- br i1 false, label %xST.exit482, label %1091
-
-; <label>:1091 ; preds = %1090
- br label %xST.exit482
-
-xST.exit482: ; preds = %1091, %1090, %1083
- br i1 false, label %1093, label %1092
-
-; <label>:1092 ; preds = %xST.exit482
- br label %xST.exit486
-
-; <label>:1093 ; preds = %xST.exit482
- br i1 false, label %1095, label %1094
-
-; <label>:1094 ; preds = %1093
- br label %1095
-
-; <label>:1095 ; preds = %1094, %1093
- br i1 false, label %1097, label %1096
-
-; <label>:1096 ; preds = %1095
- br label %1097
-
-; <label>:1097 ; preds = %1096, %1095
- br i1 false, label %1099, label %1098
-
-; <label>:1098 ; preds = %1097
- br label %1099
-
-; <label>:1099 ; preds = %1098, %1097
- br i1 false, label %xST.exit486, label %1100
-
-; <label>:1100 ; preds = %1099
- br label %xST.exit486
-
-xST.exit486: ; preds = %1100, %1099, %1092
- br i1 false, label %1102, label %1101
-
-; <label>:1101 ; preds = %xST.exit486
- br label %xST.exit489
-
-; <label>:1102 ; preds = %xST.exit486
- br i1 false, label %1104, label %1103
-
-; <label>:1103 ; preds = %1102
- br label %1104
-
-; <label>:1104 ; preds = %1103, %1102
- br i1 false, label %1106, label %1105
-
-; <label>:1105 ; preds = %1104
- br label %1106
-
-; <label>:1106 ; preds = %1105, %1104
- br i1 false, label %1108, label %1107
-
-; <label>:1107 ; preds = %1106
- br label %1108
-
-; <label>:1108 ; preds = %1107, %1106
- br i1 false, label %xST.exit489, label %1109
-
-; <label>:1109 ; preds = %1108
- br label %xST.exit489
-
-xST.exit489: ; preds = %1109, %1108, %1101
- br i1 false, label %1111, label %1110
-
-; <label>:1110 ; preds = %xST.exit489
- br label %xST.exit492
-
-; <label>:1111 ; preds = %xST.exit489
- br i1 false, label %1113, label %1112
-
-; <label>:1112 ; preds = %1111
- br label %1113
-
-; <label>:1113 ; preds = %1112, %1111
- br i1 false, label %1115, label %1114
-
-; <label>:1114 ; preds = %1113
- br label %1115
-
-; <label>:1115 ; preds = %1114, %1113
- br i1 false, label %1117, label %1116
-
-; <label>:1116 ; preds = %1115
- br label %1117
-
-; <label>:1117 ; preds = %1116, %1115
- br i1 false, label %xST.exit492, label %1118
-
-; <label>:1118 ; preds = %1117
- br label %xST.exit492
-
-xST.exit492: ; preds = %1118, %1117, %1110
- load <4 x float>* null ; <<4 x float>>:1119 [#uses=1]
- fmul <4 x float> %1119, zeroinitializer ; <<4 x float>>:1120 [#uses=1]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1121 [#uses=1]
- br i1 false, label %1123, label %1122
-
-; <label>:1122 ; preds = %xST.exit492
- br label %xST.exit495
-
-; <label>:1123 ; preds = %xST.exit492
- br i1 false, label %1125, label %1124
-
-; <label>:1124 ; preds = %1123
- br label %1125
-
-; <label>:1125 ; preds = %1124, %1123
- br i1 false, label %1127, label %1126
-
-; <label>:1126 ; preds = %1125
- br label %1127
-
-; <label>:1127 ; preds = %1126, %1125
- br i1 false, label %1129, label %1128
-
-; <label>:1128 ; preds = %1127
- br label %1129
-
-; <label>:1129 ; preds = %1128, %1127
- br i1 false, label %xST.exit495, label %1130
-
-; <label>:1130 ; preds = %1129
- br label %xST.exit495
-
-xST.exit495: ; preds = %1130, %1129, %1122
- %.07582 = phi <4 x float> [ %1121, %1122 ], [ %.17583, %1130 ], [ %.17583, %1129 ] ; <<4 x float>> [#uses=1]
- %.07590 = phi <4 x float> [ %1120, %1122 ], [ %.17591, %1130 ], [ %.17591, %1129 ] ; <<4 x float>> [#uses=1]
- load <4 x float>* null ; <<4 x float>>:1131 [#uses=1]
- fadd <4 x float> %1131, zeroinitializer ; <<4 x float>>:1132 [#uses=1]
- fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1133 [#uses=1]
- br i1 false, label %1135, label %1134
-
-; <label>:1134 ; preds = %xST.exit495
- br label %xST.exit498
-
-; <label>:1135 ; preds = %xST.exit495
- br i1 false, label %1137, label %1136
-
-; <label>:1136 ; preds = %1135
- br label %1137
-
-; <label>:1137 ; preds = %1136, %1135
- br i1 false, label %1139, label %1138
-
-; <label>:1138 ; preds = %1137
- br label %1139
-
-; <label>:1139 ; preds = %1138, %1137
- br i1 false, label %1141, label %1140
-
-; <label>:1140 ; preds = %1139
- br label %1141
-
-; <label>:1141 ; preds = %1140, %1139
- br i1 false, label %xST.exit498, label %1142
-
-; <label>:1142 ; preds = %1141
- br label %xST.exit498
-
-xST.exit498: ; preds = %1142, %1141, %1134
- %.07617 = phi <4 x float> [ %1133, %1134 ], [ %.17618, %1142 ], [ %.17618, %1141 ] ; <<4 x float>> [#uses=1]
- %.07621 = phi <4 x float> [ %1132, %1134 ], [ %.17622, %1142 ], [ %.17622, %1141 ] ; <<4 x float>> [#uses=1]
- load <4 x float>* null ; <<4 x float>>:1143 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1144 [#uses=1]
- load <4 x float>* %1144 ; <<4 x float>>:1145 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1146 [#uses=1]
- load <4 x float>* %1146 ; <<4 x float>>:1147 [#uses=1]
- shufflevector <4 x float> %1143, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1148 [#uses=1]
- shufflevector <4 x float> %1145, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1149 [#uses=1]
- shufflevector <4 x float> %1147, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1150 [#uses=1]
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1151 [#uses=1]
- fmul <4 x float> zeroinitializer, %1148 ; <<4 x float>>:1152 [#uses=1]
- fmul <4 x float> zeroinitializer, %1149 ; <<4 x float>>:1153 [#uses=1]
- fmul <4 x float> zeroinitializer, %1150 ; <<4 x float>>:1154 [#uses=1]
- br i1 false, label %1156, label %1155
-
-; <label>:1155 ; preds = %xST.exit498
- br label %xST.exit501
-
-; <label>:1156 ; preds = %xST.exit498
- br i1 false, label %1158, label %1157
-
-; <label>:1157 ; preds = %1156
- br label %1158
-
-; <label>:1158 ; preds = %1157, %1156
- br i1 false, label %1160, label %1159
-
-; <label>:1159 ; preds = %1158
- br label %1160
-
-; <label>:1160 ; preds = %1159, %1158
- br i1 false, label %1162, label %1161
-
-; <label>:1161 ; preds = %1160
- br label %1162
-
-; <label>:1162 ; preds = %1161, %1160
- br i1 false, label %xST.exit501, label %1163
-
-; <label>:1163 ; preds = %1162
- br label %xST.exit501
-
-xST.exit501: ; preds = %1163, %1162, %1155
- %.07652 = phi <4 x float> [ %1154, %1155 ], [ %.17653, %1163 ], [ %.17653, %1162 ] ; <<4 x float>> [#uses=1]
- %.07656 = phi <4 x float> [ %1153, %1155 ], [ %.17657, %1163 ], [ %.17657, %1162 ] ; <<4 x float>> [#uses=1]
- %.07660 = phi <4 x float> [ %1152, %1155 ], [ %.17661, %1163 ], [ %.17661, %1162 ] ; <<4 x float>> [#uses=1]
- %.07664 = phi <4 x float> [ %1151, %1155 ], [ %.17665, %1163 ], [ %.17665, %1162 ] ; <<4 x float>> [#uses=1]
- load <4 x float>* null ; <<4 x float>>:1164 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1165 [#uses=1]
- load <4 x float>* %1165 ; <<4 x float>>:1166 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1167 [#uses=1]
- load <4 x float>* %1167 ; <<4 x float>>:1168 [#uses=1]
- fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1169 [#uses=1]
- fadd <4 x float> zeroinitializer, %1164 ; <<4 x float>>:1170 [#uses=1]
- fadd <4 x float> zeroinitializer, %1166 ; <<4 x float>>:1171 [#uses=1]
- fadd <4 x float> zeroinitializer, %1168 ; <<4 x float>>:1172 [#uses=1]
- br i1 false, label %1174, label %1173
-
-; <label>:1173 ; preds = %xST.exit501
- br label %xST.exit504
-
-; <label>:1174 ; preds = %xST.exit501
- br i1 false, label %1176, label %1175
-
-; <label>:1175 ; preds = %1174
- br label %1176
-
-; <label>:1176 ; preds = %1175, %1174
- br i1 false, label %1178, label %1177
-
-; <label>:1177 ; preds = %1176
- br label %1178
-
-; <label>:1178 ; preds = %1177, %1176
- br i1 false, label %1180, label %1179
-
-; <label>:1179 ; preds = %1178
- br label %1180
-
-; <label>:1180 ; preds = %1179, %1178
- br i1 false, label %xST.exit504, label %1181
-
-; <label>:1181 ; preds = %1180
- br label %xST.exit504
-
-xST.exit504: ; preds = %1181, %1180, %1173
- %.07722 = phi <4 x float> [ %1172, %1173 ], [ %.17723, %1181 ], [ %.17723, %1180 ] ; <<4 x float>> [#uses=1]
- %.07726 = phi <4 x float> [ %1171, %1173 ], [ %.17727, %1181 ], [ %.17727, %1180 ] ; <<4 x float>> [#uses=1]
- %.07730 = phi <4 x float> [ %1170, %1173 ], [ %.17731, %1181 ], [ %.17731, %1180 ] ; <<4 x float>> [#uses=1]
- %.07734 = phi <4 x float> [ %1169, %1173 ], [ %.17735, %1181 ], [ %.17735, %1180 ] ; <<4 x float>> [#uses=1]
- fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1182 [#uses=1]
- br i1 false, label %1184, label %1183
-
-; <label>:1183 ; preds = %xST.exit504
- br label %xST.exit507
-
-; <label>:1184 ; preds = %xST.exit504
- br i1 false, label %1186, label %1185
-
-; <label>:1185 ; preds = %1184
- br label %1186
-
-; <label>:1186 ; preds = %1185, %1184
- br i1 false, label %1188, label %1187
-
-; <label>:1187 ; preds = %1186
- store <4 x float> zeroinitializer, <4 x float>* null
- br label %1188
-
-; <label>:1188 ; preds = %1187, %1186
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:1189 [#uses=1]
- shufflevector <4 x i32> %1189, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:1190 [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1190, <4 x i32> zeroinitializer ) ; <i32>:1191 [#uses=1]
- icmp eq i32 %1191, 0 ; <i1>:1192 [#uses=1]
- br i1 %1192, label %1196, label %1193
-
-; <label>:1193 ; preds = %1188
- load <4 x float>* null ; <<4 x float>>:1194 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %1194, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1195 [#uses=1]
- store <4 x float> %1195, <4 x float>* null
- br label %1196
-
-; <label>:1196 ; preds = %1193, %1188
- %.07742 = phi <4 x float> [ zeroinitializer, %1193 ], [ zeroinitializer, %1188 ] ; <<4 x float>> [#uses=0]
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:1197 [#uses=1]
- shufflevector <4 x i32> %1197, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:1198 [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1198, <4 x i32> zeroinitializer ) ; <i32>:1199 [#uses=1]
- icmp eq i32 %1199, 0 ; <i1>:1200 [#uses=1]
- br i1 %1200, label %xST.exit507, label %1201
-
-; <label>:1201 ; preds = %1196
- store <4 x float> zeroinitializer, <4 x float>* null
- br label %xST.exit507
-
-xST.exit507: ; preds = %1201, %1196, %1183
- %.07769 = phi <4 x float> [ %1182, %1183 ], [ %.17770, %1201 ], [ %.17770, %1196 ] ; <<4 x float>> [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:1202 [#uses=1]
- icmp eq i32 %1202, 0 ; <i1>:1203 [#uses=1]
- br i1 %1203, label %1207, label %1204
-
-; <label>:1204 ; preds = %xST.exit507
- load <4 x float>* null ; <<4 x float>>:1205 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %1205, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:1206 [#uses=1]
- store <4 x float> %1206, <4 x float>* null
- br label %1207
-
-; <label>:1207 ; preds = %1204, %xST.exit507
- load <4 x i32>* %.sub7896 ; <<4 x i32>>:1208 [#uses=1]
- shufflevector <4 x i32> %1208, <4 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x i32>>:1209 [#uses=1]
- call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %1209, <4 x i32> zeroinitializer ) ; <i32>:1210 [#uses=1]
- icmp eq i32 %1210, 0 ; <i1>:1211 [#uses=1]
- br i1 %1211, label %1215, label %1212
-
-; <label>:1212 ; preds = %1207
- load <4 x float>* null ; <<4 x float>>:1213 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %1213, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:1214 [#uses=1]
- store <4 x float> %1214, <4 x float>* null
- br label %1215
-
-; <label>:1215 ; preds = %1212, %1207
- store <4 x float> zeroinitializer, <4 x float>* null
- br label %xLS.exit449
-}
-
-declare <4 x i32> @llvm.ppc.altivec.vsel(<4 x i32>, <4 x i32>, <4 x i32>)
-
-declare void @llvm.ppc.altivec.stvewx(<4 x i32>, i8*)
-
-declare <4 x float> @llvm.ppc.altivec.vrsqrtefp(<4 x float>)
-
-declare <4 x float> @llvm.ppc.altivec.vcfsx(<4 x i32>, i32)
-
-declare i32 @llvm.ppc.altivec.vcmpequw.p(i32, <4 x i32>, <4 x i32>)
-
-declare <4 x i32> @llvm.ppc.altivec.vcmpgtfp(<4 x float>, <4 x float>)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-04-24-InlineAsm-I-Modifier.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-04-24-InlineAsm-I-Modifier.ll
deleted file mode 100644
index 86fd947..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-04-24-InlineAsm-I-Modifier.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8.8.0 | grep {foo r3, r4}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8.8.0 | grep {bari r3, 47}
-
-; PR1351
-
-define i32 @test1(i32 %Y, i32 %X) nounwind {
- %tmp1 = tail call i32 asm "foo${1:I} $0, $1", "=r,rI"( i32 %X )
- ret i32 %tmp1
-}
-
-define i32 @test2(i32 %Y, i32 %X) nounwind {
- %tmp1 = tail call i32 asm "bar${1:I} $0, $1", "=r,rI"( i32 47 )
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
deleted file mode 100644
index be28a9a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s | grep {subfc r3,r5,r4}
-; RUN: llc < %s | grep {subfze r4,r6}
-; RUN: llc < %s -regalloc=local | grep {subfc r6,r5,r4}
-; RUN: llc < %s -regalloc=local | grep {subfze r3,r3}
-; The first argument of subfc must not be the same as any other register.
-
-; PR1357
-
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "powerpc-apple-darwin8.8.0"
-
-;long long test(int A, int B, int C) {
-; unsigned X, Y;
-; __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2"
-; : "=r" (X), "=&r" (Y)
-; : "r" (A), "rI" (B), "r" (C));
-; return ((long long)Y << 32) | X;
-;}
-
-define i64 @test(i32 %A, i32 %B, i32 %C) nounwind {
-entry:
- %Y = alloca i32, align 4 ; <i32*> [#uses=2]
- %tmp4 = call i32 asm "subf${3:I}c $1,$4,$3\0A\09subfze $0,$2", "=r,=*&r,r,rI,r"( i32* %Y, i32 %A, i32 %B, i32 %C ) ; <i32> [#uses=1]
- %tmp5 = load i32* %Y ; <i32> [#uses=1]
- %tmp56 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
- %tmp7 = shl i64 %tmp56, 32 ; <i64> [#uses=1]
- %tmp89 = zext i32 %tmp4 to i64 ; <i64> [#uses=1]
- %tmp10 = or i64 %tmp7, %tmp89 ; <i64> [#uses=1]
- ret i64 %tmp10
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll
deleted file mode 100644
index 1df5140..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-03-InlineAsm-S-Constraint.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s
-; PR1382
-
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "powerpc-apple-darwin8.8.0"
- at x = global [2 x i32] [ i32 1, i32 2 ] ; <[2 x i32]*> [#uses=1]
-
-define void @foo() {
-entry:
- tail call void asm sideeffect "$0 $1", "s,i"( i8* bitcast (i32* getelementptr ([2 x i32]* @x, i32 0, i32 1) to i8*), i8* bitcast (i32* getelementptr ([2 x i32]* @x, i32 0, i32 1) to i8*) )
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll
deleted file mode 100644
index e4e9314..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-14-InlineAsmSelectCrash.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=ppc32
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "powerpc-apple-darwin8.8.0"
- %struct..0anon = type { i32 }
- %struct.A = type { %struct.anon }
- %struct.anon = type <{ }>
-
-define void @bork(%struct.A* %In0P) {
-entry:
- %tmp56 = bitcast %struct.A* %In0P to float* ; <float*> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %entry
- %i.035.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %tmp8 = getelementptr float* %tmp56, i32 %i.035.0 ; <float*> [#uses=2]
- %tmp101112 = bitcast float* %tmp8 to i8* ; <i8*> [#uses=1]
- %tmp1617 = bitcast float* %tmp8 to i32* ; <i32*> [#uses=1]
- %tmp21 = tail call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"( i8* %tmp101112, i32 0, i32* %tmp1617 ) ; <i32> [#uses=0]
- %indvar.next = add i32 %i.035.0, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, 4 ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll
deleted file mode 100644
index 42f2152..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll
+++ /dev/null
@@ -1,68 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep bl.*baz | count 2
-; RUN: llc < %s -march=ppc32 | grep bl.*quux | count 2
-; RUN: llc < %s -march=ppc32 -enable-tail-merge | grep bl.*baz | count 1
-; RUN: llc < %s -march=ppc32 -enable-tail-merge=1 | grep bl.*quux | count 1
-; Check that tail merging is not the default on ppc, and that -enable-tail-merge works.
-
-; ModuleID = 'tail.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-define i32 @f(i32 %i, i32 %q) {
-entry:
- %i_addr = alloca i32 ; <i32*> [#uses=2]
- %q_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %i, i32* %i_addr
- store i32 %q, i32* %q_addr
- %tmp = load i32* %i_addr ; <i32> [#uses=1]
- %tmp1 = icmp ne i32 %tmp, 0 ; <i1> [#uses=1]
- %tmp12 = zext i1 %tmp1 to i8 ; <i8> [#uses=1]
- %toBool = icmp ne i8 %tmp12, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- %tmp3 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp4 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp7 = load i32* %q_addr ; <i32> [#uses=1]
- %tmp8 = icmp ne i32 %tmp7, 0 ; <i1> [#uses=1]
- %tmp89 = zext i1 %tmp8 to i8 ; <i8> [#uses=1]
- %toBool10 = icmp ne i8 %tmp89, 0 ; <i1> [#uses=1]
- br i1 %toBool10, label %cond_true11, label %cond_false15
-
-cond_false: ; preds = %entry
- %tmp5 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp6 = call i32 (...)* @baz( i32 5, i32 6 ) ; <i32> [#uses=0]
- %tmp27 = load i32* %q_addr ; <i32> [#uses=1]
- %tmp28 = icmp ne i32 %tmp27, 0 ; <i1> [#uses=1]
- %tmp289 = zext i1 %tmp28 to i8 ; <i8> [#uses=1]
- %toBool210 = icmp ne i8 %tmp289, 0 ; <i1> [#uses=1]
- br i1 %toBool210, label %cond_true11, label %cond_false15
-
-cond_true11: ; preds = %cond_next
- %tmp13 = call i32 (...)* @foo( ) ; <i32> [#uses=0]
- %tmp14 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
- br label %cond_next18
-
-cond_false15: ; preds = %cond_next
- %tmp16 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp17 = call i32 (...)* @quux( i32 3, i32 4 ) ; <i32> [#uses=0]
- br label %cond_next18
-
-cond_next18: ; preds = %cond_false15, %cond_true11
- %tmp19 = call i32 (...)* @bar( ) ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %cond_next18
- %retval20 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval20
-}
-
-declare i32 @bar(...)
-
-declare i32 @baz(...)
-
-declare i32 @foo(...)
-
-declare i32 @quux(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-30-dagcombine-miscomp.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-30-dagcombine-miscomp.ll
deleted file mode 100644
index 2938c70..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-05-30-dagcombine-miscomp.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "powerpc-apple-darwin8.8.0"
-
-; RUN: llc < %s -march=ppc32 | grep {rlwinm r3, r3, 23, 30, 30}
-; PR1473
-
-define i8 @foo(i16 zeroext %a) zeroext {
- %tmp2 = lshr i16 %a, 10 ; <i16> [#uses=1]
- %tmp23 = trunc i16 %tmp2 to i8 ; <i8> [#uses=1]
- %tmp4 = shl i8 %tmp23, 1 ; <i8> [#uses=1]
- %tmp5 = and i8 %tmp4, 2 ; <i8> [#uses=1]
- ret i8 %tmp5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-06-28-BCCISelBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-06-28-BCCISelBug.ll
deleted file mode 100644
index 6de7a09..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-06-28-BCCISelBug.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mattr=+altivec
-
- %struct.XATest = type { float, i16, i8, i8 }
- %struct.XArrayRange = type { i8, i8, i8, i8 }
- %struct.XBlendMode = type { i16, i16, i16, i16, %struct.GIC4, i16, i16, i8, i8, i8, i8 }
- %struct.XClearC = type { double, %struct.GIC4, %struct.GIC4, float, i32 }
- %struct.XClipPlane = type { i32, [6 x %struct.GIC4] }
- %struct.XCBuffer = type { i16, i16, [8 x i16] }
- %struct.XCMatrix = type { [16 x float]*, %struct.XICSS }
- %struct.XConvolution = type { %struct.GIC4, %struct.XICSS, i16, i16, float*, i32, i32 }
- %struct.XDepthTest = type { i16, i16, i8, i8, i8, i8, double, double }
- %struct.XFixedFunctionProgram = type { %struct.PPSToken* }
- %struct.XFogMode = type { %struct.GIC4, float, float, float, float, float, i16, i16, i16, i8, i8 }
- %struct.XFramebufferAttachment = type { i32, i32, i32, i32 }
- %struct.XHintMode = type { i16, i16, i16, i16, i16, i16, i16, i16, i16, i16 }
- %struct.XHistogram = type { %struct.XFramebufferAttachment*, i32, i16, i8, i8 }
- %struct.XICSS = type { %struct.GTCoord2, %struct.GTCoord2, %struct.GTCoord2, %struct.GTCoord2 }
- %struct.XISubset = type { %struct.XConvolution, %struct.XConvolution, %struct.XConvolution, %struct.XCMatrix, %struct.XMinmax, %struct.XHistogram, %struct.XICSS, %struct.XICSS, %struct.XICSS, %struct.XICSS, i32 }
- %struct.XLight = type { %struct.GIC4, %struct.GIC4, %struct.GIC4, %struct.GIC4, %struct.XPointLineLimits, float, float, float, float, float, %struct.XPointLineLimits, float, float, float, float, float }
- %struct.XLightModel = type { %struct.GIC4, [8 x %struct.XLight], [2 x %struct.XMaterial], i32, i16, i16, i16, i8, i8, i8, i8, i8, i8 }
- %struct.XLightProduct = type { %struct.GIC4, %struct.GIC4, %struct.GIC4 }
- %struct.XLineMode = type { float, i32, i16, i16, i8, i8, i8, i8 }
- %struct.XLogicOp = type { i16, i8, i8 }
- %struct.XMaskMode = type { i32, [3 x i32], i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XMaterial = type { %struct.GIC4, %struct.GIC4, %struct.GIC4, %struct.GIC4, float, float, float, float, [8 x %struct.XLightProduct], %struct.GIC4, [6 x i32], [2 x i32] }
- %struct.XMinmax = type { %struct.XMinmaxTable*, i16, i8, i8 }
- %struct.XMinmaxTable = type { %struct.GIC4, %struct.GIC4 }
- %struct.XMipmaplevel = type { [4 x i32], [4 x i32], [4 x float], [4 x i32], i32, i32, float*, i8*, i16, i16, i16, i16, [2 x float] }
- %struct.XMultisample = type { float, i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XPipelineProgramState = type { i8, i8, i8, i8, %struct.GIC4* }
- %struct.XPMap = type { i32*, float*, float*, float*, float*, float*, float*, float*, float*, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.XPMode = type { float, float, %struct.XPStore, %struct.XPTransfer, %struct.XPMap, %struct.XISubset, i32, i32 }
- %struct.XPPack = type { i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8 }
- %struct.XPStore = type { %struct.XPPack, %struct.XPPack }
- %struct.XPTransfer = type { float, float, float, float, float, float, float, float, float, float, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float }
- %struct.XPointLineLimits = type { float, float, float }
- %struct.XPointMode = type { float, float, float, float, %struct.XPointLineLimits, float, i8, i8, i8, i8, i16, i16, i32, i16, i16 }
- %struct.XPGMode = type { [128 x i8], float, float, i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XRegisterCCs = type { i8, i8, i8, i8, i32, [2 x %struct.GIC4], [8 x %struct.XRegisterCCsPerStageState], %struct.XRegisterCCsFinalStageState }
- %struct.XRegisterCCsFinalStageState = type { i8, i8, i8, i8, [7 x %struct.XRegisterCCsPerVariableState] }
- %struct.XRegisterCCsPerPortionState = type { [4 x %struct.XRegisterCCsPerVariableState], i8, i8, i8, i8, i16, i16, i16, i16, i16, i16 }
- %struct.XRegisterCCsPerStageState = type { [2 x %struct.XRegisterCCsPerPortionState], [2 x %struct.GIC4] }
- %struct.XRegisterCCsPerVariableState = type { i16, i16, i16, i16 }
- %struct.XScissorTest = type { %struct.XFramebufferAttachment, i8, i8, i8, i8 }
- %struct.XState = type { i16, i16, i16, i16, i32, i32, [256 x %struct.GIC4], [128 x %struct.GIC4], %struct.XViewport, %struct.XXF, %struct.XLightModel, %struct.XATest, %struct.XBlendMode, %struct.XClearC, %struct.XCBuffer, %struct.XDepthTest, %struct.XArrayRange, %struct.XFogMode, %struct.XHintMode, %struct.XLineMode, %struct.XLogicOp, %struct.XMaskMode, %struct.XPMode, %struct.XPointMode, %struct.XPGMode, %struct.XScissorTest, i32, %struct.XStencilTest, [16 x %struct.XTMode], %struct.XArrayRange, [8 x %struct.XTCoordGen], %struct.XClipPlane, %struct.XMultisample, %struct.XRegisterCCs, %struct.XArrayRange, %struct.XArrayRange, [3 x %struct.XPipelineProgramState], %struct.XXFFeedback, i32*, %struct.XFixedFunctionProgram, [3 x i32] }
- %struct.XStencilTest = type { [3 x { i32, i32, i16, i16, i16, i16 }], i32, [4 x i8] }
- %struct.XTCoordGen = type { { i16, i16, %struct.GIC4, %struct.GIC4 }, { i16, i16, %struct.GIC4, %struct.GIC4 }, { i16, i16, %struct.GIC4, %struct.GIC4 }, { i16, i16, %struct.GIC4, %struct.GIC4 }, i8, i8, i8, i8 }
- %struct.XTGeomState = type { i16, i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, [6 x i16], [6 x i16] }
- %struct.XTLevel = type { i32, i32, i16, i16, i16, i8, i8, i16, i16, i16, i16, i8* }
- %struct.XTMode = type { %struct.GIC4, i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, float, float, float, i16, i16, i16, i16, i16, i16, [4 x i16], i8, i8, i8, i8, [3 x float], [4 x float], float, float }
- %struct.XTParamState = type { i16, i16, i16, i16, i16, i16, %struct.GIC4, float, float, float, float, i16, i16, i16, i16, float, i16, i8, i8, i32, i8* }
- %struct.XTRec = type { %struct.XTState*, float, float, float, float, %struct.XMipmaplevel*, %struct.XMipmaplevel*, i32, i32, i32, i32, i32, i32, i32, [2 x %struct.PPSToken] }
- %struct.XTState = type { i16, i8, i8, i16, i16, float, i32, %struct.GISWRSurface*, %struct.XTParamState, %struct.XTGeomState, %struct.XTLevel, [6 x [15 x %struct.XTLevel]] }
- %struct.XXF = type { [24 x [16 x float]], [24 x [16 x float]], [16 x float], float, float, float, float, float, i8, i8, i8, i8, i32, i32, i32, i16, i16, i8, i8, i8, i8, i32 }
- %struct.XXFFeedback = type { i8, i8, i8, i8, [16 x i32], [16 x i32] }
- %struct.XViewport = type { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, double, double, i32, i32, i32, i32, float, float, float, float }
- %struct.GIC4 = type { float, float, float, float }
- %struct.GISWRSurface = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, [4 x i8*], i32 }
- %struct.GTCoord2 = type { float, float }
- %struct.GVMFPContext = type { float, i32, i32, i32, float, [3 x float] }
- %struct.GVMFPStack = type { [8 x i8*], i8*, i8*, i32, i32, { <4 x float> }, { <4 x float> }, <4 x i32> }
- %struct.GVMFGAttrib = type { <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, [8 x <4 x float>] }
- %struct.GVMTs = type { [16 x %struct.XTRec*] }
- %struct.PPSToken = type { { i16, i16, i32 } }
- %struct._GVMConstants = type { <4 x i32>, <4 x i32>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, float, float, float, float, float, float, float, float, float, float, float, float, [256 x float], [528 x i8] }
-
-declare <4 x i32> @llvm.ppc.altivec.lvewx(i8*)
-
-declare i32 @llvm.ppc.altivec.vcmpequw.p(i32, <4 x i32>, <4 x i32>)
-
-define void @test(%struct.XState* %gldst, <4 x float>* %prgrm, <4 x float>** %buffs, %struct._GVMConstants* %cnstn, %struct.PPSToken* %pstrm, %struct.GVMFPContext* %vmctx, %struct.GVMTs* %txtrs, %struct.GVMFPStack* %fpstk, %struct.GVMFGAttrib* %start, %struct.GVMFGAttrib* %deriv, i32 %fragx, i32 %fragy) {
-bb58.i:
- %tmp3405.i = getelementptr %struct.XTRec* null, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp34053406.i = bitcast float* %tmp3405.i to i8* ; <i8*> [#uses=1]
- %tmp3407.i = call <4 x i32> @llvm.ppc.altivec.lvewx( i8* %tmp34053406.i ) ; <<4 x i32>> [#uses=0]
- %tmp4146.i = call i32 @llvm.ppc.altivec.vcmpequw.p( i32 3, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32> [#uses=1]
- %tmp4147.i = icmp eq i32 %tmp4146.i, 0 ; <i1> [#uses=1]
- br i1 %tmp4147.i, label %bb8799.i, label %bb4150.i
-
-bb4150.i: ; preds = %bb58.i
- br label %bb8799.i
-
-bb8799.i: ; preds = %bb4150.i, %bb58.i
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-08-04-CoalescerAssert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-08-04-CoalescerAssert.ll
deleted file mode 100644
index 06f40d9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-08-04-CoalescerAssert.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=ppc64
-; PR1596
-
- %struct._obstack_chunk = type { i8* }
- %struct.obstack = type { i8*, %struct._obstack_chunk* (i8*, i64)*, i8*, i8 }
-
-define i32 @_obstack_newchunk(%struct.obstack* %h, i32 %length) {
-entry:
- br i1 false, label %cond_false, label %cond_true
-
-cond_true: ; preds = %entry
- br i1 false, label %cond_true28, label %cond_next30
-
-cond_false: ; preds = %entry
- %tmp22 = tail call %struct._obstack_chunk* null( i64 undef ) ; <%struct._obstack_chunk*> [#uses=2]
- br i1 false, label %cond_true28, label %cond_next30
-
-cond_true28: ; preds = %cond_false, %cond_true
- %iftmp.0.043.0 = phi %struct._obstack_chunk* [ null, %cond_true ], [ %tmp22, %cond_false ] ; <%struct._obstack_chunk*> [#uses=1]
- tail call void null( )
- br label %cond_next30
-
-cond_next30: ; preds = %cond_true28, %cond_false, %cond_true
- %iftmp.0.043.1 = phi %struct._obstack_chunk* [ %iftmp.0.043.0, %cond_true28 ], [ null, %cond_true ], [ %tmp22, %cond_false ] ; <%struct._obstack_chunk*> [#uses=1]
- %tmp41 = getelementptr %struct._obstack_chunk* %iftmp.0.043.1, i32 0, i32 0 ; <i8**> [#uses=1]
- store i8* null, i8** %tmp41, align 8
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-04-AltivecDST.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-04-AltivecDST.ll
deleted file mode 100644
index 82ef2b8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-04-AltivecDST.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=ppc64 | grep dst | count 4
-
-define hidden void @_Z4borkPc(i8* %image) {
-entry:
- tail call void @llvm.ppc.altivec.dst( i8* %image, i32 8, i32 0 )
- tail call void @llvm.ppc.altivec.dstt( i8* %image, i32 8, i32 0 )
- tail call void @llvm.ppc.altivec.dstst( i8* %image, i32 8, i32 0 )
- tail call void @llvm.ppc.altivec.dststt( i8* %image, i32 8, i32 0 )
- ret void
-}
-
-declare void @llvm.ppc.altivec.dst(i8*, i32, i32)
-declare void @llvm.ppc.altivec.dstt(i8*, i32, i32)
-declare void @llvm.ppc.altivec.dstst(i8*, i32, i32)
-declare void @llvm.ppc.altivec.dststt(i8*, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll
deleted file mode 100644
index ea7de98..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=ppc64 | grep lwzx
-
- %struct.__db_region = type { %struct.__mutex_t, [4 x i8], %struct.anon, i32, [1 x i32] }
- %struct.__mutex_t = type { i32 }
- %struct.anon = type { i64, i64 }
-
-define void @foo() {
-entry:
- %ttype = alloca i32, align 4 ; <i32*> [#uses=1]
- %regs = alloca [1024 x %struct.__db_region], align 16 ; <[1024 x %struct.__db_region]*> [#uses=0]
- %tmp = load i32* %ttype, align 4 ; <i32> [#uses=1]
- %tmp1 = call i32 (...)* @bork( i32 %tmp ) ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @bork(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-08-unaligned.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-08-unaligned.ll
deleted file mode 100644
index 898c470..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-08-unaligned.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc < %s | grep stfd | count 3
-; RUN: llc < %s | grep stfs | count 1
-; RUN: llc < %s | grep lfd | count 2
-; RUN: llc < %s | grep lfs | count 2
-; ModuleID = 'foo.c'
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin8"
- %struct.anon = type <{ i8, float }>
- at s = global %struct.anon <{ i8 3, float 0x4014666660000000 }> ; <%struct.anon*> [#uses=1]
- at u = global <{ i8, double }> <{ i8 3, double 5.100000e+00 }> ; <<{ i8, double }>*> [#uses=1]
- at t = weak global %struct.anon zeroinitializer ; <%struct.anon*> [#uses=2]
- at v = weak global <{ i8, double }> zeroinitializer ; <<{ i8, double }>*> [#uses=2]
- at .str = internal constant [8 x i8] c"%f %lf\0A\00" ; <[8 x i8]*> [#uses=1]
-
-define i32 @foo() {
-entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = getelementptr %struct.anon* @s, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp1 = load float* %tmp, align 1 ; <float> [#uses=1]
- %tmp2 = getelementptr %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
- store float %tmp1, float* %tmp2, align 1
- %tmp3 = getelementptr <{ i8, double }>* @u, i32 0, i32 1 ; <double*> [#uses=1]
- %tmp4 = load double* %tmp3, align 1 ; <double> [#uses=1]
- %tmp5 = getelementptr <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
- store double %tmp4, double* %tmp5, align 1
- br label %return
-
-return: ; preds = %entry
- %retval6 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval6
-}
-
-define i32 @main() {
-entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = call i32 @foo( ) ; <i32> [#uses=0]
- %tmp1 = getelementptr %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp2 = load float* %tmp1, align 1 ; <float> [#uses=1]
- %tmp23 = fpext float %tmp2 to double ; <double> [#uses=1]
- %tmp4 = getelementptr <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
- %tmp5 = load double* %tmp4, align 1 ; <double> [#uses=1]
- %tmp6 = getelementptr [8 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp7 = call i32 (i8*, ...)* @printf( i8* %tmp6, double %tmp23, double %tmp5 ) ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- %retval8 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval8
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-11-RegCoalescerAssert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-11-RegCoalescerAssert.ll
deleted file mode 100644
index d12698b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-11-RegCoalescerAssert.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc64
-
- %struct.TCMalloc_SpinLock = type { i32 }
-
-define void @_ZN17TCMalloc_SpinLock4LockEv(%struct.TCMalloc_SpinLock* %this) {
-entry:
- %tmp3 = call i32 asm sideeffect "1: lwarx $0, 0, $1\0A\09stwcx. $2, 0, $1\0A\09bne- 1b\0A\09isync", "=&r,=*r,r,1,~{dirflag},~{fpsr},~{flags},~{memory}"( i32** null, i32 1, i32* null ) ; <i32> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-12-LiveIntervalsAssert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-12-LiveIntervalsAssert.ll
deleted file mode 100644
index 5cfe54e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-09-12-LiveIntervalsAssert.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc64-apple-darwin
-
-declare void @cxa_atexit_check_1(i8*)
-
-define i32 @check_cxa_atexit(i32 (void (i8*)*, i8*, i8*)* %cxa_atexit, void (i8*)* %cxa_finalize) {
-entry:
- %tmp7 = call i32 null( void (i8*)* @cxa_atexit_check_1, i8* null, i8* null ) ; <i32> [#uses=0]
- br i1 false, label %cond_true, label %cond_next
-
-cond_true: ; preds = %entry
- ret i32 0
-
-cond_next: ; preds = %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-16-InlineAsmFrameOffset.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-16-InlineAsmFrameOffset.ll
deleted file mode 100644
index c4152b4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-16-InlineAsmFrameOffset.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; rdar://5538377
-
- %struct.disk_unsigned = type { i32 }
- %struct._StorePageMax = type { %struct.disk_unsigned, %struct.disk_unsigned, [65536 x i8] }
-
-define i32 @test() {
-entry:
- %data = alloca i32 ; <i32*> [#uses=1]
- %compressedPage = alloca %struct._StorePageMax ; <%struct._StorePageMax*> [#uses=0]
- %tmp107 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"( i8* null, i32 0, i32* %data ) ; <i32> [#uses=0]
- unreachable
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll
deleted file mode 100644
index 84fadd1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=ppc64 -mattr=+altivec
- %struct.inoutprops = type <{ i8, [3 x i8] }>
-
-define void @bork(float* %argA, float* %argB, float* %res, i8 %inoutspec.0) {
-entry:
- %.mask = and i8 %inoutspec.0, -16 ; <i8> [#uses=1]
- %tmp6 = icmp eq i8 %.mask, 16 ; <i1> [#uses=1]
- br i1 %tmp6, label %cond_true, label %UnifiedReturnBlock
-
-cond_true: ; preds = %entry
- %tmp89 = bitcast float* %res to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp1011 = bitcast float* %argA to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp14 = load <4 x i32>* %tmp1011, align 16 ; <<4 x i32>> [#uses=1]
- %tmp1516 = bitcast float* %argB to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp18 = load <4 x i32>* %tmp1516, align 16 ; <<4 x i32>> [#uses=1]
- %tmp19 = sdiv <4 x i32> %tmp14, %tmp18 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp19, <4 x i32>* %tmp89, align 16
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll
deleted file mode 100644
index ee61478..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc64-apple-darwin9 -regalloc=local -relocation-model=pic
-
- %struct.NSError = type opaque
- %struct.NSManagedObjectContext = type opaque
- %struct.NSPersistentStoreCoordinator = type opaque
- %struct.NSString = type opaque
- %struct.NSURL = type opaque
- %struct._message_ref_t = type { %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_selector* }
- %struct.objc_object = type { }
- %struct.objc_selector = type opaque
-@"\01L_OBJC_MESSAGE_REF_2" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=1]
-@"\01L_OBJC_MESSAGE_REF_6" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=1]
- at NSXMLStoreType = external constant %struct.NSString* ; <%struct.NSString**> [#uses=1]
-@"\01L_OBJC_MESSAGE_REF_5" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=2]
-@"\01L_OBJC_MESSAGE_REF_4" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=1]
-
-define %struct.NSManagedObjectContext* @"+[ListGenerator(Private) managedObjectContextWithModelURL:storeURL:]"(%struct.objc_object* %self, %struct._message_ref_t* %_cmd, %struct.NSURL* %modelURL, %struct.NSURL* %storeURL) {
-entry:
- %storeCoordinator = alloca %struct.NSPersistentStoreCoordinator* ; <%struct.NSPersistentStoreCoordinator**> [#uses=0]
- %tmp29 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2" ) ; <%struct.objc_object*> [#uses=0]
- %tmp34 = load %struct.NSString** @NSXMLStoreType, align 8 ; <%struct.NSString*> [#uses=1]
- %tmp37 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_5", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp42 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <%struct.objc_object*> [#uses=1]
- %tmp45 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* %tmp37( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_5", %struct.objc_object* %tmp42, %struct.NSString* null ) ; <%struct.objc_object*> [#uses=1]
- %tmp48 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* null( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", %struct.NSString* %tmp34, i8* null, %struct.NSURL* null, %struct.objc_object* %tmp45, %struct.NSError** null ) ; <%struct.objc_object*> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll
deleted file mode 100644
index 5a07a9b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc64-apple-darwin9 -regalloc=local -relocation-model=pic
-
- %struct.NSError = type opaque
- %struct.NSManagedObjectContext = type opaque
- %struct.NSString = type opaque
- %struct.NSURL = type opaque
- %struct._message_ref_t = type { %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*, %struct.objc_selector* }
- %struct.objc_object = type { }
- %struct.objc_selector = type opaque
-@"\01L_OBJC_MESSAGE_REF_2" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=2]
-@"\01L_OBJC_MESSAGE_REF_6" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=2]
- at NSXMLStoreType = external constant %struct.NSString* ; <%struct.NSString**> [#uses=1]
-@"\01L_OBJC_MESSAGE_REF_4" = external global %struct._message_ref_t ; <%struct._message_ref_t*> [#uses=2]
-
-define %struct.NSManagedObjectContext* @"+[ListGenerator(Private) managedObjectContextWithModelURL:storeURL:]"(%struct.objc_object* %self, %struct._message_ref_t* %_cmd, %struct.NSURL* %modelURL, %struct.NSURL* %storeURL) {
-entry:
- %tmp27 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp29 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* %tmp27( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_2" ) ; <%struct.objc_object*> [#uses=0]
- %tmp33 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp34 = load %struct.NSString** @NSXMLStoreType, align 8 ; <%struct.NSString*> [#uses=1]
- %tmp40 = load %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)** getelementptr (%struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 0, i32 0), align 8 ; <%struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)*> [#uses=1]
- %tmp42 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* %tmp40( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_4", i32 1 ) ; <%struct.objc_object*> [#uses=0]
- %tmp48 = call %struct.objc_object* (%struct.objc_object*, %struct._message_ref_t*, ...)* %tmp33( %struct.objc_object* null, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_6", %struct.NSString* %tmp34, i8* null, %struct.NSURL* null, %struct.objc_object* null, %struct.NSError** null ) ; <%struct.objc_object*> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-11-04-CoalescerCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-11-04-CoalescerCrash.ll
deleted file mode 100644
index a9f242b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-11-04-CoalescerCrash.ll
+++ /dev/null
@@ -1,148 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin
-
- %struct.HDescriptor = type <{ i32, i32 }>
-
-declare void @bcopy(i8*, i8*, i32)
-
-define i32 @main(i32 %argc, i8** %argv) {
-entry:
- br i1 false, label %bb31, label %bb
-
-bb: ; preds = %entry
- ret i32 -6
-
-bb31: ; preds = %entry
- switch i32 0, label %bb189 [
- i32 73, label %cond_next209
- i32 74, label %bb74
- i32 77, label %bb57
- i32 78, label %cond_next209
- i32 85, label %cond_next209
- i32 97, label %cond_next209
- i32 100, label %cond_next209
- i32 107, label %cond_next209
- i32 109, label %bb57
- i32 112, label %bb43
- i32 115, label %cond_next209
- i32 117, label %bb51
- ]
-
-bb43: ; preds = %bb31
- br i1 false, label %cond_true48, label %cond_true200.critedge2117
-
-cond_true48: ; preds = %bb43
- br i1 false, label %cond_next372, label %AllDone
-
-bb51: ; preds = %bb31
- ret i32 0
-
-bb57: ; preds = %bb31, %bb31
- ret i32 0
-
-bb74: ; preds = %bb31
- ret i32 0
-
-bb189: ; preds = %bb31
- ret i32 0
-
-cond_true200.critedge2117: ; preds = %bb43
- ret i32 0
-
-cond_next209: ; preds = %bb31, %bb31, %bb31, %bb31, %bb31, %bb31, %bb31
- ret i32 0
-
-cond_next372: ; preds = %cond_true48
- switch i32 0, label %bb1728 [
- i32 73, label %bb1723
- i32 74, label %cond_true1700
- i32 78, label %bb1718
- i32 85, label %bb1713
- i32 97, label %bb1620
- i32 107, label %AllDone
- i32 112, label %cond_next423
- i32 117, label %cond_next1453
- ]
-
-cond_next423: ; preds = %cond_next372
- switch i16 0, label %cond_next691 [
- i16 18475, label %cond_next807
- i16 18520, label %cond_next807
- ]
-
-cond_next691: ; preds = %cond_next423
- ret i32 0
-
-cond_next807: ; preds = %cond_next423, %cond_next423
- switch i16 0, label %cond_true1192 [
- i16 18475, label %cond_next21.i
- i16 18520, label %cond_next21.i
- ]
-
-cond_next21.i: ; preds = %cond_next807, %cond_next807
- br i1 false, label %cond_next934, label %free.i
-
-free.i: ; preds = %cond_next21.i
- ret i32 0
-
-cond_next934: ; preds = %bb1005, %cond_next21.i
- %listsize.1 = phi i32 [ 0, %bb1005 ], [ 64, %cond_next21.i ] ; <i32> [#uses=1]
- %catalogExtents.2 = phi %struct.HDescriptor* [ %catalogExtents.1.reg2mem.1, %bb1005 ], [ null, %cond_next21.i ] ; <%struct.HDescriptor*> [#uses=3]
- br i1 false, label %cond_next942, label %Return1020
-
-cond_next942: ; preds = %cond_next934
- br i1 false, label %bb1005, label %bb947
-
-bb947: ; preds = %cond_next971, %cond_next942
- %indvar = phi i32 [ 0, %cond_next942 ], [ %indvar.next2140, %cond_next971 ] ; <i32> [#uses=2]
- %catalogExtents.1.reg2mem.0 = phi %struct.HDescriptor* [ %catalogExtents.2, %cond_next942 ], [ %tmp977978, %cond_next971 ] ; <%struct.HDescriptor*> [#uses=1]
- %extents.0.reg2mem.0 = phi %struct.HDescriptor* [ null, %cond_next942 ], [ %tmp977978, %cond_next971 ] ; <%struct.HDescriptor*> [#uses=1]
- br i1 false, label %cond_next971, label %Return1020
-
-cond_next971: ; preds = %bb947
- %tmp = shl i32 %indvar, 6 ; <i32> [#uses=1]
- %listsize.0.reg2mem.0 = add i32 %tmp, %listsize.1 ; <i32> [#uses=1]
- %tmp973 = add i32 %listsize.0.reg2mem.0, 64 ; <i32> [#uses=1]
- %tmp974975 = bitcast %struct.HDescriptor* %extents.0.reg2mem.0 to i8* ; <i8*> [#uses=1]
- %tmp977 = call i8* @realloc( i8* %tmp974975, i32 %tmp973 ) ; <i8*> [#uses=1]
- %tmp977978 = bitcast i8* %tmp977 to %struct.HDescriptor* ; <%struct.HDescriptor*> [#uses=3]
- call void @bcopy( i8* null, i8* null, i32 64 )
- %indvar.next2140 = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 false, label %bb1005, label %bb947
-
-bb1005: ; preds = %cond_next971, %cond_next942
- %catalogExtents.1.reg2mem.1 = phi %struct.HDescriptor* [ %catalogExtents.2, %cond_next942 ], [ %tmp977978, %cond_next971 ] ; <%struct.HDescriptor*> [#uses=2]
- br i1 false, label %Return1020, label %cond_next934
-
-Return1020: ; preds = %bb1005, %bb947, %cond_next934
- %catalogExtents.3 = phi %struct.HDescriptor* [ %catalogExtents.1.reg2mem.0, %bb947 ], [ %catalogExtents.2, %cond_next934 ], [ %catalogExtents.1.reg2mem.1, %bb1005 ] ; <%struct.HDescriptor*> [#uses=0]
- ret i32 0
-
-cond_true1192: ; preds = %cond_next807
- ret i32 0
-
-cond_next1453: ; preds = %cond_next372
- ret i32 0
-
-bb1620: ; preds = %cond_next372
- ret i32 0
-
-cond_true1700: ; preds = %cond_next372
- ret i32 0
-
-bb1713: ; preds = %cond_next372
- ret i32 0
-
-bb1718: ; preds = %cond_next372
- ret i32 0
-
-bb1723: ; preds = %cond_next372
- ret i32 0
-
-bb1728: ; preds = %cond_next372
- ret i32 -6
-
-AllDone: ; preds = %cond_next372, %cond_true48
- ret i32 0
-}
-
-declare i8* @realloc(i8*, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
deleted file mode 100644
index 439ef14..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; RUN: llc < %s -enable-eh
-;; Formerly crashed, see PR 1508
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc64-apple-darwin8"
- %struct.Range = type { i64, i64 }
-
-define void @Bork(i64 %range.0.0, i64 %range.0.1, i64 %size) {
-entry:
- %effectiveRange = alloca %struct.Range, align 8 ; <%struct.Range*> [#uses=2]
- %tmp4 = call i8* @llvm.stacksave() ; <i8*> [#uses=1]
- %size1 = trunc i64 %size to i32 ; <i32> [#uses=1]
- %tmp17 = alloca i8*, i32 %size1 ; <i8**> [#uses=1]
- invoke void @Foo(i8** %tmp17)
- to label %bb30.preheader unwind label %unwind
-
-bb30.preheader: ; preds = %entry
- %tmp26 = getelementptr %struct.Range* %effectiveRange, i64 0, i32 1 ; <i64*> [#uses=1]
- br label %bb30
-
-unwind: ; preds = %cond_true, %entry
- %eh_ptr = call i8* @llvm.eh.exception() ; <i8*> [#uses=2]
- %eh_select = call i64 (i8*, i8*, ...)* @llvm.eh.selector.i64(i8* %eh_ptr, i8* bitcast (void ()* @__gxx_personality_v0 to i8*), i8* null) ; <i64> [#uses=0]
- call void @llvm.stackrestore(i8* %tmp4)
- call void @_Unwind_Resume(i8* %eh_ptr)
- unreachable
-
-invcont23: ; preds = %cond_true
- %tmp27 = load i64* %tmp26, align 8 ; <i64> [#uses=1]
- %tmp28 = sub i64 %range_addr.1.0, %tmp27 ; <i64> [#uses=1]
- br label %bb30
-
-bb30: ; preds = %invcont23, %bb30.preheader
- %range_addr.1.0 = phi i64 [ %tmp28, %invcont23 ], [ %range.0.1, %bb30.preheader ] ; <i64> [#uses=2]
- %tmp33 = icmp eq i64 %range_addr.1.0, 0 ; <i1> [#uses=1]
- br i1 %tmp33, label %cleanup, label %cond_true
-
-cond_true: ; preds = %bb30
- invoke void @Bar(i64 %range.0.0, %struct.Range* %effectiveRange)
- to label %invcont23 unwind label %unwind
-
-cleanup: ; preds = %bb30
- ret void
-}
-
-declare i8* @llvm.stacksave() nounwind
-
-declare void @Foo(i8**)
-
-declare i8* @llvm.eh.exception() nounwind
-
-declare i64 @llvm.eh.selector.i64(i8*, i8*, ...) nounwind
-
-declare void @__gxx_personality_v0()
-
-declare void @_Unwind_Resume(i8*)
-
-declare void @Bar(i64, %struct.Range*)
-
-declare void @llvm.stackrestore(i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll
deleted file mode 100644
index d1f0285..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s
-; RUN: llc < %s -march=ppc32 -mcpu=g3
-; RUN: llc < %s -march=ppc32 -mcpu=g5
-; PR1811
-
-define void @execute_shader(<4 x float>* %OUT, <4 x float>* %IN, <4 x float>*
-%CONST) {
-entry:
- %input2 = load <4 x float>* null, align 16 ; <<4 x float>>
- %shuffle7 = shufflevector <4 x float> %input2, <4 x float> < float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1]
-
- %mul1 = fmul <4 x float> %shuffle7, zeroinitializer ; <<4 x
- %add2 = fadd <4 x float> %mul1, %input2 ; <<4 x float>>
- store <4 x float> %add2, <4 x float>* null, align 16
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-01-25-EmptyFunction.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-01-25-EmptyFunction.ll
deleted file mode 100644
index a05245d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-01-25-EmptyFunction.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep .byte
-target triple = "powerpc-apple-darwin8"
-
-
-define void @bork() noreturn nounwind {
-entry:
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-02-05-LiveIntervalsAssert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-02-05-LiveIntervalsAssert.ll
deleted file mode 100644
index 791e9e6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-02-05-LiveIntervalsAssert.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin
-
- %struct.Handle = type { %struct.oopDesc** }
- %struct.JNI_ArgumentPusher = type { %struct.SignatureIterator, %struct.JavaCallArguments* }
- %struct.JNI_ArgumentPusherArray = type { %struct.JNI_ArgumentPusher, %struct.JvmtiEventEnabled* }
- %struct.JavaCallArguments = type { [9 x i32], [9 x i32], i32*, i32*, i32, i32, i32 }
- %struct.JvmtiEventEnabled = type { i64 }
- %struct.KlassHandle = type { %struct.Handle }
- %struct.SignatureIterator = type { i32 (...)**, %struct.KlassHandle, i32, i32, i32 }
- %struct.instanceOopDesc = type { %struct.oopDesc }
- %struct.oopDesc = type { %struct.instanceOopDesc*, %struct.instanceOopDesc* }
- at .str = external constant [44 x i8] ; <[44 x i8]*> [#uses=1]
-
-define void @_ZN23JNI_ArgumentPusherArray7iterateEy(%struct.JNI_ArgumentPusherArray* %this, i64 %fingerprint) nounwind {
-entry:
- br label %bb113
-
-bb22.preheader: ; preds = %bb113
- ret void
-
-bb32.preheader: ; preds = %bb113
- ret void
-
-bb42.preheader: ; preds = %bb113
- ret void
-
-bb52: ; preds = %bb113
- br label %bb113
-
-bb62.preheader: ; preds = %bb113
- ret void
-
-bb72.preheader: ; preds = %bb113
- ret void
-
-bb82: ; preds = %bb113
- br label %bb113
-
-bb93: ; preds = %bb113
- br label %bb113
-
-bb103.preheader: ; preds = %bb113
- ret void
-
-bb113: ; preds = %bb113, %bb93, %bb82, %bb52, %entry
- %fingerprint_addr.0.reg2mem.9 = phi i64 [ 0, %entry ], [ 0, %bb52 ], [ 0, %bb82 ], [ 0, %bb93 ], [ %tmp118, %bb113 ] ; <i64> [#uses=1]
- tail call void @_Z28report_should_not_reach_herePKci( i8* getelementptr ([44 x i8]* @.str, i32 0, i32 0), i32 817 ) nounwind
- %tmp118 = lshr i64 %fingerprint_addr.0.reg2mem.9, 4 ; <i64> [#uses=2]
- %tmp21158 = and i64 %tmp118, 15 ; <i64> [#uses=1]
- switch i64 %tmp21158, label %bb113 [
- i64 1, label %bb22.preheader
- i64 2, label %bb52
- i64 3, label %bb32.preheader
- i64 4, label %bb42.preheader
- i64 5, label %bb62.preheader
- i64 6, label %bb82
- i64 7, label %bb93
- i64 8, label %bb103.preheader
- i64 9, label %bb72.preheader
- i64 10, label %UnifiedReturnBlock
- ]
-
-UnifiedReturnBlock: ; preds = %bb113
- ret void
-}
-
-declare void @_Z28report_should_not_reach_herePKci(i8*, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll
deleted file mode 100644
index cfa1b10..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin -regalloc=local
-
-define i32 @bork(i64 %foo, i64 %bar) {
-entry:
- %tmp = load i64* null, align 8 ; <i64> [#uses=2]
- %tmp2 = icmp ule i64 %tmp, 0 ; <i1> [#uses=1]
- %min = select i1 %tmp2, i64 %tmp, i64 0 ; <i64> [#uses=1]
- store i64 %min, i64* null, align 8
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-05-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-05-RegScavengerAssert.ll
deleted file mode 100644
index e50fac4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-05-RegScavengerAssert.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin -enable-ppc32-regscavenger
-
-declare i8* @bar(i32)
-
-define void @foo(i8* %pp) nounwind {
-entry:
- %tmp2 = tail call i8* @bar( i32 14 ) nounwind ; <i8*> [#uses=0]
- %tmp28 = bitcast i8* %pp to void ()** ; <void ()**> [#uses=1]
- %tmp38 = load void ()** %tmp28, align 4 ; <void ()*> [#uses=2]
- br i1 false, label %bb34, label %bb25
-bb25: ; preds = %entry
- %tmp30 = bitcast void ()* %tmp38 to void (i8*)* ; <void (i8*)*> [#uses=1]
- tail call void %tmp30( i8* null ) nounwind
- ret void
-bb34: ; preds = %entry
- tail call void %tmp38( ) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-06-KillInfo.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-06-KillInfo.ll
deleted file mode 100644
index 222dde4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-06-KillInfo.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=ppc64 -enable-ppc64-regscavenger
- at .str242 = external constant [3 x i8] ; <[3 x i8]*> [#uses=1]
-
-define fastcc void @ParseContent(i8* %buf, i32 %bufsize) {
-entry:
- %items = alloca [10000 x i8*], align 16 ; <[10000 x i8*]*> [#uses=0]
- %tmp86 = add i32 0, -1 ; <i32> [#uses=1]
- br i1 false, label %cond_true94, label %cond_next99
-cond_true94: ; preds = %entry
- %tmp98 = call i32 (i8*, ...)* @printf( i8* getelementptr ([3 x i8]* @.str242, i32 0, i32 0), i8* null ) ; <i32> [#uses=0]
- %tmp20971 = icmp sgt i32 %tmp86, 0 ; <i1> [#uses=1]
- br i1 %tmp20971, label %bb101, label %bb212
-cond_next99: ; preds = %entry
- ret void
-bb101: ; preds = %cond_true94
- ret void
-bb212: ; preds = %cond_true94
- ret void
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-17-RegScavengerCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-17-RegScavengerCrash.ll
deleted file mode 100644
index 9f35b83..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-17-RegScavengerCrash.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=ppc32 -enable-ppc32-regscavenger
-
- %struct._cpp_strbuf = type { i8*, i32, i32 }
- %struct.cpp_string = type { i32, i8* }
-
-declare fastcc void @emit_numeric_escape(i32, i32, %struct._cpp_strbuf*, i32) nounwind
-
-define i32 @cpp_interpret_string(i32 %pfile, %struct.cpp_string* %from, i32 %wide) nounwind {
-entry:
- %tmp61 = load i32* null, align 4 ; <i32> [#uses=1]
- %toBool = icmp eq i32 %wide, 0 ; <i1> [#uses=2]
- %iftmp.87.0 = select i1 %toBool, i32 %tmp61, i32 0 ; <i32> [#uses=2]
- %tmp69 = icmp ult i32 %iftmp.87.0, 33 ; <i1> [#uses=1]
- %min = select i1 %tmp69, i32 %iftmp.87.0, i32 32 ; <i32> [#uses=1]
- %tmp71 = icmp ugt i32 %min, 31 ; <i1> [#uses=1]
- br i1 %tmp71, label %bb79, label %bb75
-bb75: ; preds = %entry
- ret i32 0
-bb79: ; preds = %entry
- br i1 %toBool, label %bb103, label %bb94
-bb94: ; preds = %bb79
- br i1 false, label %bb729, label %bb130.preheader
-bb103: ; preds = %bb79
- ret i32 0
-bb130.preheader: ; preds = %bb94
- %tmp134 = getelementptr %struct.cpp_string* %from, i32 0, i32 1 ; <i8**> [#uses=0]
- ret i32 0
-bb729: ; preds = %bb94
- call fastcc void @emit_numeric_escape( i32 %pfile, i32 0, %struct._cpp_strbuf* null, i32 %wide ) nounwind
- ret i32 1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-18-RegScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-18-RegScavengerAssert.ll
deleted file mode 100644
index dd425f5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-18-RegScavengerAssert.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=ppc64 -enable-ppc64-regscavenger
-
-define i16 @test(i8* %d1, i16* %d2) {
- %tmp237 = call i16 asm "lhbrx $0, $2, $1", "=r,r,bO,m"( i8* %d1, i32 0, i16* %d2 )
- ret i16 %tmp237
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll
deleted file mode 100644
index a8fef05..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=ppc64
-
-define fastcc i8* @page_rec_get_next(i8* %rec) nounwind {
-entry:
- %tmp2627 = ptrtoint i8* %rec to i64 ; <i64> [#uses=2]
- %tmp28 = and i64 %tmp2627, -16384 ; <i64> [#uses=2]
- %tmp2829 = inttoptr i64 %tmp28 to i8* ; <i8*> [#uses=1]
- %tmp37 = getelementptr i8* %tmp2829, i64 42 ; <i8*> [#uses=1]
- %tmp40 = load i8* %tmp37, align 1 ; <i8> [#uses=1]
- %tmp4041 = zext i8 %tmp40 to i64 ; <i64> [#uses=1]
- %tmp42 = shl i64 %tmp4041, 8 ; <i64> [#uses=1]
- %tmp47 = add i64 %tmp42, 0 ; <i64> [#uses=1]
- %tmp52 = and i64 %tmp47, 32768 ; <i64> [#uses=1]
- %tmp72 = icmp eq i64 %tmp52, 0 ; <i1> [#uses=1]
- br i1 %tmp72, label %bb91, label %bb
-bb: ; preds = %entry
- ret i8* null
-bb91: ; preds = %entry
- br i1 false, label %bb100, label %bb185
-bb100: ; preds = %bb91
- %tmp106 = sub i64 %tmp2627, %tmp28 ; <i64> [#uses=0]
- ret i8* null
-bb185: ; preds = %bb91
- ret i8* null
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-24-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-24-CoalescerBug.ll
deleted file mode 100644
index 8776d9a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-24-CoalescerBug.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
- %struct..0objc_object = type { %struct.objc_class* }
- %struct.NSArray = type { %struct..0objc_object }
- %struct.NSMutableArray = type { %struct.NSArray }
- %struct.PFTPersistentSymbols = type { %struct..0objc_object, %struct.VMUSymbolicator*, %struct.NSMutableArray*, %struct.__CFDictionary*, %struct.__CFDictionary*, %struct.__CFDictionary*, %struct.__CFDictionary*, %struct.NSMutableArray*, i8, %struct.pthread_mutex_t, %struct.NSMutableArray*, %struct.pthread_rwlock_t }
- %struct.VMUMachTaskContainer = type { %struct..0objc_object, i32, i32 }
- %struct.VMUSymbolicator = type { %struct..0objc_object, %struct.NSMutableArray*, %struct.NSArray*, %struct.NSArray*, %struct.VMUMachTaskContainer*, i8 }
- %struct.__CFDictionary = type opaque
- %struct.__builtin_CFString = type { i32*, i32, i8*, i32 }
- %struct.objc_class = type opaque
- %struct.objc_selector = type opaque
- %struct.pthread_mutex_t = type { i32, [40 x i8] }
- %struct.pthread_rwlock_t = type { i32, [124 x i8] }
-external constant %struct.__builtin_CFString ; <%struct.__builtin_CFString*>:0 [#uses=1]
-
-define void @"-[PFTPersistentSymbols saveSymbolWithName:address:path:lineNumber:flags:owner:]"(%struct.PFTPersistentSymbols* %self, %struct.objc_selector* %_cmd, %struct.NSArray* %name, i64 %address, %struct.NSArray* %path, i32 %lineNumber, i64 %flags, %struct..0objc_object* %owner) nounwind {
-entry:
- br i1 false, label %bb12, label %bb21
-bb12: ; preds = %entry
- %tmp17 = tail call i8 inttoptr (i64 4294901504 to i8 (%struct..0objc_object*, %struct.objc_selector*, %struct.NSArray*)*)( %struct..0objc_object* null, %struct.objc_selector* null, %struct.NSArray* bitcast (%struct.__builtin_CFString* @0 to %struct.NSArray*) ) signext nounwind ; <i8> [#uses=0]
- br i1 false, label %bb25, label %bb21
-bb21: ; preds = %bb12, %entry
- %tmp24 = or i64 %flags, 4 ; <i64> [#uses=1]
- br label %bb25
-bb25: ; preds = %bb21, %bb12
- %flags_addr.0 = phi i64 [ %tmp24, %bb21 ], [ %flags, %bb12 ] ; <i64> [#uses=1]
- %tmp3233 = trunc i64 %flags_addr.0 to i32 ; <i32> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll
deleted file mode 100644
index 8e5bf56..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin
-
-define i32 @t(i64 %byteStart, i32 %activeIndex) nounwind {
-entry:
- %tmp50 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp5051 = zext i32 %tmp50 to i64 ; <i64> [#uses=3]
- %tmp53 = udiv i64 %byteStart, %tmp5051 ; <i64> [#uses=1]
- %tmp5354 = trunc i64 %tmp53 to i32 ; <i32> [#uses=1]
- %tmp62 = urem i64 %byteStart, %tmp5051 ; <i64> [#uses=1]
- %tmp94 = add i32 0, 1 ; <i32> [#uses=1]
- %tmp100 = urem i32 %tmp94, 0 ; <i32> [#uses=2]
- %tmp108 = add i32 0, %activeIndex ; <i32> [#uses=1]
- %tmp110 = sub i32 %tmp108, 0 ; <i32> [#uses=1]
- %tmp112 = urem i32 %tmp110, 0 ; <i32> [#uses=2]
- %tmp122 = icmp ult i32 %tmp112, %tmp100 ; <i1> [#uses=1]
- %iftmp.175.0 = select i1 %tmp122, i32 %tmp112, i32 %tmp100 ; <i32> [#uses=1]
- %tmp119 = add i32 %tmp5354, 0 ; <i32> [#uses=1]
- %tmp131 = add i32 %tmp119, %iftmp.175.0 ; <i32> [#uses=1]
- %tmp131132 = zext i32 %tmp131 to i64 ; <i64> [#uses=1]
- %tmp147 = mul i64 %tmp131132, %tmp5051 ; <i64> [#uses=1]
- br i1 false, label %bb164, label %bb190
-bb164: ; preds = %entry
- %tmp171172 = and i64 %tmp62, 4294967295 ; <i64> [#uses=1]
- %tmp173 = add i64 %tmp171172, %tmp147 ; <i64> [#uses=0]
- ret i32 0
-bb190: ; preds = %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-04-10-LiveIntervalCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-04-10-LiveIntervalCrash.ll
deleted file mode 100644
index 2706337..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-04-10-LiveIntervalCrash.ll
+++ /dev/null
@@ -1,100 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin
-
-define fastcc i64 @nonzero_bits1() nounwind {
-entry:
- switch i32 0, label %bb1385 [
- i32 28, label %bb235
- i32 35, label %bb153
- i32 37, label %bb951
- i32 40, label %bb289
- i32 44, label %bb1344
- i32 46, label %bb651
- i32 47, label %bb651
- i32 48, label %bb322
- i32 49, label %bb651
- i32 50, label %bb651
- i32 51, label %bb651
- i32 52, label %bb651
- i32 53, label %bb651
- i32 54, label %bb535
- i32 55, label %bb565
- i32 56, label %bb565
- i32 58, label %bb1100
- i32 59, label %bb1100
- i32 60, label %bb1100
- i32 61, label %bb1100
- i32 63, label %bb565
- i32 64, label %bb565
- i32 65, label %bb565
- i32 66, label %bb565
- i32 73, label %bb302
- i32 74, label %bb302
- i32 75, label %bb302
- i32 76, label %bb302
- i32 77, label %bb302
- i32 78, label %bb302
- i32 79, label %bb302
- i32 80, label %bb302
- i32 81, label %bb302
- i32 82, label %bb302
- i32 83, label %bb302
- i32 84, label %bb302
- i32 85, label %bb302
- i32 86, label %bb302
- i32 87, label %bb302
- i32 88, label %bb302
- i32 89, label %bb302
- i32 90, label %bb302
- i32 91, label %bb507
- i32 92, label %bb375
- i32 93, label %bb355
- i32 103, label %bb1277
- i32 104, label %bb1310
- i32 105, label %UnifiedReturnBlock
- i32 106, label %bb1277
- i32 107, label %bb1343
- ]
-bb153: ; preds = %entry
- ret i64 0
-bb235: ; preds = %entry
- br i1 false, label %bb245, label %UnifiedReturnBlock
-bb245: ; preds = %bb235
- ret i64 0
-bb289: ; preds = %entry
- ret i64 0
-bb302: ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry
- ret i64 0
-bb322: ; preds = %entry
- ret i64 0
-bb355: ; preds = %entry
- ret i64 0
-bb375: ; preds = %entry
- ret i64 0
-bb507: ; preds = %entry
- ret i64 0
-bb535: ; preds = %entry
- ret i64 0
-bb565: ; preds = %entry, %entry, %entry, %entry, %entry, %entry
- ret i64 0
-bb651: ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry
- ret i64 0
-bb951: ; preds = %entry
- ret i64 0
-bb1100: ; preds = %entry, %entry, %entry, %entry
- ret i64 0
-bb1277: ; preds = %entry, %entry
- br i1 false, label %UnifiedReturnBlock, label %bb1284
-bb1284: ; preds = %bb1277
- ret i64 0
-bb1310: ; preds = %entry
- ret i64 0
-bb1343: ; preds = %entry
- ret i64 1
-bb1344: ; preds = %entry
- ret i64 0
-bb1385: ; preds = %entry
- ret i64 0
-UnifiedReturnBlock: ; preds = %bb1277, %bb235, %entry
- %UnifiedRetVal = phi i64 [ 0, %bb235 ], [ undef, %bb1277 ], [ -1, %entry ] ; <i64> [#uses=1]
- ret i64 %UnifiedRetVal
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-04-16-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-04-16-CoalescerBug.ll
deleted file mode 100644
index 839098e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-04-16-CoalescerBug.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin
-; Avoid reading memory that's already freed.
-
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (i32 (i64)* @_Z13GetSectorSizey to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define i32 @_Z13GetSectorSizey(i64 %Base) nounwind {
-entry:
- br i1 false, label %bb, label %UnifiedReturnBlock
-bb: ; preds = %entry
- %tmp10 = and i64 0, %Base ; <i64> [#uses=0]
- ret i32 0
-UnifiedReturnBlock: ; preds = %entry
- ret i32 131072
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll
deleted file mode 100644
index 7b6d491..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll
+++ /dev/null
@@ -1,89 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin
-
- at _ZL10DeviceCode = internal global i16 0 ; <i16*> [#uses=1]
- at .str19 = internal constant [64 x i8] c"unlock_then_erase_sector: failed to erase block (status= 0x%x)\0A\00" ; <[64 x i8]*> [#uses=1]
- at .str34 = internal constant [68 x i8] c"ProgramByWords - Erasing sector 0x%llx to 0x%llx (size 0x%x bytes)\0A\00" ; <[68 x i8]*> [#uses=1]
- at .str35 = internal constant [37 x i8] c"ProgramByWords - Done erasing flash\0A\00" ; <[37 x i8]*> [#uses=1]
- at .str36 = internal constant [48 x i8] c"ProgramByWords - Starting to write to FLASH...\0A\00" ; <[48 x i8]*> [#uses=1]
-
-declare void @IOLog(i8*, ...)
-
-declare void @IODelay(i32)
-
-define i32 @_Z14ProgramByWordsPvyy(i8* %buffer, i64 %Offset, i64 %bufferSize) nounwind {
-entry:
- volatile store i8 -1, i8* null, align 1
- %tmp28 = icmp eq i8 0, 0 ; <i1> [#uses=1]
- br i1 %tmp28, label %bb107, label %bb
-
-bb: ; preds = %entry
- %tmp9596430 = zext i32 0 to i64 ; <i64> [#uses=1]
- %tmp98431 = add i64 %tmp9596430, %Offset ; <i64> [#uses=1]
- %tmp100433 = icmp ugt i64 %tmp98431, %Offset ; <i1> [#uses=1]
- br i1 %tmp100433, label %bb31, label %bb103
-
-bb31: ; preds = %_Z24unlock_then_erase_sectory.exit, %bb
- %Pos.0.reg2mem.0 = phi i64 [ %tmp93, %_Z24unlock_then_erase_sectory.exit ], [ %Offset, %bb ] ; <i64> [#uses=3]
- %tmp35 = load i16* @_ZL10DeviceCode, align 2 ; <i16> [#uses=1]
- %tmp3536 = zext i16 %tmp35 to i32 ; <i32> [#uses=2]
- %tmp37 = and i32 %tmp3536, 65520 ; <i32> [#uses=1]
- %tmp38 = icmp eq i32 %tmp37, 35008 ; <i1> [#uses=1]
- %tmp34 = sub i64 %Pos.0.reg2mem.0, %Offset ; <i64> [#uses=2]
- br i1 %tmp38, label %bb41, label %bb68
-
-bb41: ; preds = %bb31
- %tmp43 = add i32 0, -1 ; <i32> [#uses=1]
- %tmp4344 = zext i32 %tmp43 to i64 ; <i64> [#uses=1]
- %tmp46 = and i64 %tmp4344, %tmp34 ; <i64> [#uses=0]
- %tmp49 = and i32 %tmp3536, 1 ; <i32> [#uses=0]
- ret i32 0
-
-bb68: ; preds = %bb31
- tail call void (i8*, ...)* @IOLog( i8* getelementptr ([68 x i8]* @.str34, i32 0, i32 0), i64 %tmp34, i64 0, i32 131072 ) nounwind
- %tmp2021.i = trunc i64 %Pos.0.reg2mem.0 to i32 ; <i32> [#uses=1]
- %tmp202122.i = inttoptr i32 %tmp2021.i to i8* ; <i8*> [#uses=1]
- tail call void @IODelay( i32 500 ) nounwind
- %tmp53.i = volatile load i16* null, align 2 ; <i16> [#uses=2]
- %tmp5455.i = zext i16 %tmp53.i to i32 ; <i32> [#uses=1]
- br i1 false, label %bb.i, label %bb65.i
-
-bb.i: ; preds = %bb68
- ret i32 0
-
-bb65.i: ; preds = %bb68
- %tmp67.i = icmp eq i16 %tmp53.i, 128 ; <i1> [#uses=1]
- br i1 %tmp67.i, label %_Z24unlock_then_erase_sectory.exit, label %bb70.i
-
-bb70.i: ; preds = %bb65.i
- tail call void (i8*, ...)* @IOLog( i8* getelementptr ([64 x i8]* @.str19, i32 0, i32 0), i32 %tmp5455.i ) nounwind
- ret i32 0
-
-_Z24unlock_then_erase_sectory.exit: ; preds = %bb65.i
- volatile store i8 -1, i8* %tmp202122.i, align 1
- %tmp93 = add i64 0, %Pos.0.reg2mem.0 ; <i64> [#uses=2]
- %tmp98 = add i64 0, %Offset ; <i64> [#uses=1]
- %tmp100 = icmp ugt i64 %tmp98, %tmp93 ; <i1> [#uses=1]
- br i1 %tmp100, label %bb31, label %bb103
-
-bb103: ; preds = %_Z24unlock_then_erase_sectory.exit, %bb
- tail call void (i8*, ...)* @IOLog( i8* getelementptr ([37 x i8]* @.str35, i32 0, i32 0) ) nounwind
- ret i32 0
-
-bb107: ; preds = %entry
- tail call void (i8*, ...)* @IOLog( i8* getelementptr ([48 x i8]* @.str36, i32 0, i32 0) ) nounwind
- %tmp114115 = bitcast i8* %buffer to i16* ; <i16*> [#uses=1]
- %tmp256 = lshr i64 %bufferSize, 1 ; <i64> [#uses=1]
- %tmp256257 = trunc i64 %tmp256 to i32 ; <i32> [#uses=1]
- %tmp258 = getelementptr i16* %tmp114115, i32 %tmp256257 ; <i16*> [#uses=0]
- ret i32 0
-}
-
-define i32 @_Z17program_64B_blockyPm(i64 %Base, i32* %pData) nounwind {
-entry:
- unreachable
-}
-
-define i32 @_Z15ProgramByBlocksyy(i64 %Offset, i64 %bufferSize) nounwind {
-entry:
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-05-01-ppc_fp128.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-05-01-ppc_fp128.ll
deleted file mode 100644
index d42c814..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-05-01-ppc_fp128.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=ppc32
-target triple = "powerpc-apple-darwin9.2.2"
-
-define i256 @func(ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind readnone {
-entry:
- br i1 false, label %bb36, label %bb484
-
-bb36: ; preds = %entry
- %tmp124 = fcmp ord ppc_fp128 %b, 0xM00000000000000000000000000000000 ; <i1> [#uses=1]
- %tmp140 = and i1 %tmp124, fcmp une (ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 0xM00000000000000000000000000000000) ; <i1> [#uses=0]
- unreachable
-
-bb484: ; preds = %entry
- ret i256 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-06-19-LegalizerCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-06-19-LegalizerCrash.ll
deleted file mode 100644
index 6b40b24..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-06-19-LegalizerCrash.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-define void @t() nounwind {
- call void null( ppc_fp128 undef )
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-06-21-F128LoadStore.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-06-21-F128LoadStore.ll
deleted file mode 100644
index 862559b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-06-21-F128LoadStore.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
- at g = external global ppc_fp128
- at h = external global ppc_fp128
-
-define void @f() {
- %tmp = load ppc_fp128* @g
- store ppc_fp128 %tmp, ppc_fp128* @h
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-06-23-LiveVariablesCrash.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-06-23-LiveVariablesCrash.ll
deleted file mode 100644
index 83c5511..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-06-23-LiveVariablesCrash.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; <rdar://problem/6020042>
-
-define i32 @bork() nounwind {
-entry:
- br i1 true, label %bb1, label %bb3
-
-bb1:
- %tmp1 = load i8* null, align 1
- %tmp2 = icmp eq i8 %tmp1, 0
- br label %bb2
-
-bb2:
- %val1 = phi i32 [ 0, %bb1 ], [ %val2, %bb2 ]
- %val2 = select i1 %tmp2, i32 -1, i32 %val1
- switch i32 %val2, label %bb2 [
- i32 -1, label %bb3
- i32 0, label %bb1
- i32 1, label %bb3
- i32 2, label %bb1
- ]
-
-bb3:
- ret i32 -1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-10-SplatMiscompile.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-10-SplatMiscompile.ll
deleted file mode 100644
index 8802b97..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-10-SplatMiscompile.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vadduhm
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vsubuhm
-
-define <4 x i32> @test() nounwind {
- ret <4 x i32> < i32 4293066722, i32 4293066722, i32 4293066722, i32 4293066722>
-}
-
-define <4 x i32> @test2() nounwind {
- ret <4 x i32> < i32 1114129, i32 1114129, i32 1114129, i32 1114129>
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-15-Bswap.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-15-Bswap.ll
deleted file mode 100644
index 4a834f9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-15-Bswap.ll
+++ /dev/null
@@ -1,386 +0,0 @@
-; RUN: llc < %s
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin9"
- %struct.BiPartSrcDescriptor = type <{ %"struct.BiPartSrcDescriptor::$_105" }>
- %"struct.BiPartSrcDescriptor::$_105" = type { %struct.BiPartSrcDescriptor_NO_VECTOR_ALIGNMENT_size_is_16 }
- %struct.BiPartSrcDescriptor_NO_VECTOR_ALIGNMENT_size_is_16 = type { [2 x %struct.MotionVectors], [2 x i8], %struct.Map4x4ToPartIdx, [2 x i8], i8, i8 }
- %struct.Condv = type opaque
- %struct.DHBFLayerId = type { i8 }
- %struct.DecodeComplexityInfo = type { i32, i32, i32, i32, %"struct.DecodeComplexityInfo::IntraStats", %"struct.DecodeComplexityInfo::InterStats" }
- %"struct.DecodeComplexityInfo::InterStats" = type { i32, i32, i32, i32, [5 x i32], [3 x i32], [4 x [4 x i32]], [4 x i32], i32, %struct.MotionVectors, %struct.MotionVectors }
- %"struct.DecodeComplexityInfo::IntraStats" = type { i32, i32, i32, [5 x i32], [3 x i32], [4 x i32], [3 x i32] }
- %struct.DecodeComplexityOptions = type { i8, i8, i32, double, i8, float, i8, float, i8, i8, i8, i8, i8 }
- %struct.DescriptorAllocator = type { %struct.Mutex*, %struct.Mutex*, i8**, i32, i32, i8**, i32, i32, i8**, i32, i32 }
- %struct.DetailsFromSliceType = type <{ i8 }>
- %struct.FlatnessAnalysis = type { i16, i16, i32, i32*, i8*, [512 x i32], [256 x i32] }
- %struct.Frame = type <{ i8, i8, i8, i8, i8, [3 x i8], i32, i32, %struct.Mutex*, %struct.Condv*, [8 x i8], %struct.FramePixels, %struct.FrameMotionVectorCache, %struct.FrameIndex, i32, i8*, i8*, i8*, i8*, i16*, %struct.FlatnessAnalysis, %struct.NoiseAnalysis, %struct.VisualActivity, %struct.FrameMotionInfo, %struct.FrameMotionAnalysis, %struct.FrameDataRateParameters, %struct.FrameEncoderTags, %struct.DecodeComplexityInfo, %struct.DecodeComplexityOptions, %struct.MotionInfoFor16x16_FasterSP*, [1 x i32] }>
- %struct.FrameDataRateParameters = type { i32, float, i8, i8 }
- %struct.FrameEncoderTags = type { i8, i8, i32, i8, i8, float }
- %struct.FrameIndex = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i32, i32, %struct.Frame*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %struct.DHBFLayerId }
- %struct.FrameMotionAnalysis = type { i32, i32, i32, %struct.MoEstMotion16x16*, %struct.MbAnalysis*, i32, i32, i16, i16, i32, i32, i32, i32, i8, i8 }
- %struct.FrameMotionInfo = type { i32, i32, %struct.MoEstMbMotionInfo*, i32, i32, i32, i32, i32 }
- %struct.FrameMotionVectorCache = type <{ %struct.ThreadAllocator**, i32, i32, i32, %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor, [3 x %struct.BiPartSrcDescriptor*], %struct.BiPartSrcDescriptor** }>
- %struct.FramePixels = type <{ i8, i8, i8, i8, i8, i8, i8, i8, i8*, i8*, i32, [4 x i8*], [4 x i8*], [2 x [4 x i32]], [2 x [4 x i32]], %struct.PixelData, %struct.InterpolationCache*, %struct.InterpolationCache*, %struct.InterpolationCache*, [16 x i16], [16 x i16], [12 x i8], %"struct.PortableSInt32Array<4>", %"struct.PortableSInt32Array<8>", %struct.ICOffsetArraysY, %struct.UVSrcOffsetEtcX_Struct*, i32*, i32*, [3 x i32] }>
- %struct.ICOffsetArraysY = type { [21 x i32], [21 x i32], [4 x [21 x i32]] }
- %struct.InterpolationCache = type opaque
- %struct.LoopFilterInfo = type { %struct.BiPartSrcDescriptor**, i32, i32, i32, i32, i32*, i32, %"struct.LoopFilterInfo::SliceInfoStruct"*, i32, %struct.Mutex*, i16*, %struct.FramePixels*, i8*, i8*, i8*, i8*, i8*, %struct.PerMacroblockBoundaryStrengths*, %struct.Mutex*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8*, i8*, i8, void (i8*, i8*, i32, i32, i32, i32, i32, i8*, i32)*, void (i8*, i8*, i32, i32, i32, i32, i32, i8*, i32, i8*)*, i32 }
- %"struct.LoopFilterInfo::SliceInfoStruct" = type { %"struct.LoopFilterInfo::SliceInfoStruct::LFDisableStats", i8, i8, i8, i8, [17 x %struct.Frame*], [17 x %struct.Frame*] }
- %"struct.LoopFilterInfo::SliceInfoStruct::LFDisableStats" = type { i32, i32 }
- %struct.LoopFilterParam = type { i32, %struct.LoopFilterInfo*, %struct.FramePixels*, %struct.FrameMotionVectorCache* }
- %struct.Map4x4ToPartIdx = type { i16 }
- %struct.MbAnalysis = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %struct.RdCost, %struct.RdCost, i32 }
- %struct.MoEstMbMotionInfo = type { i32, i32, i32, i32, [16 x %struct.MoEstPartMotionInfo] }
- %struct.MoEstMotion16x16 = type { [2 x i8], [2 x %struct.MotionVectors], i8, [3 x %struct.MoEstPredCost] }
- %struct.MoEstPartMotionInfo = type { i32, %struct.PartGeom, i32, i32, [2 x %struct.MotionVectors], [2 x i8], i16 }
- %struct.MoEstPredCost = type { i32, i16, i16 }
- %struct.MotionInfoFor16x16_FasterSP = type { [2 x %struct.MotionVectors], [2 x i8], i8, [2 x i32], i32, i32 }
- %struct.MotionVectors = type { %"struct.MotionVectors::$_103" }
- %"struct.MotionVectors::$_103" = type { i32 }
- %struct.Mutex = type opaque
- %struct.NoiseAnalysis = type { i16, i16, i32, i8*, i8*, i8*, [512 x i32] }
- %struct.PartGeom = type { %struct.Map4x4ToPartIdx }
- %struct.PerMacroblockBoundaryStrengths = type { [16 x i8], [16 x i8], [4 x i8], [4 x i8], [2 x i32] }
- %struct.PixelData = type { i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8, i8 }
- %"struct.PortableSInt32Array<4>" = type { [4 x i32] }
- %"struct.PortableSInt32Array<8>" = type { [8 x i32] }
- %struct.RdCost = type { i32, i32, i32, double }
- %struct.ThreadAllocator = type { %struct.DescriptorAllocator*, %struct.BiPartSrcDescriptor*, [256 x %struct.BiPartSrcDescriptor*], i32, i32, i32 }
- %struct.ThreadedBatch = type opaque
- %struct.UVSrcOffsetEtcX_Struct = type <{ i16 }>
- %struct.VisualActivity = type { i16, i16, i32, i32, i32*, i32*, i32, i32, i32*, i32, i32, i32, i32, i32, i8*, i32, [2 x i32], i32, i32, i32, i16*, i16, i16, i16, i16, float, i8*, i32*, i32, i32, i8 }
- at _ZL33table_8_14_indexA_to_alpha_scalar = external constant [64 x i8] ; <[64 x i8]*> [#uses=0]
- at _ZL32table_8_14_indexB_to_beta_scalar = external constant [64 x i8] ; <[64 x i8]*> [#uses=0]
- at _ZL34table_8_15_indexA_bS_to_tc0_scalar = external constant [64 x [4 x i8]] ; <[64 x [4 x i8]]*> [#uses=0]
- at gkDummy = external global i32 ; <i32*> [#uses=0]
- at gkDetailsFromSliceTypeArray = external constant [10 x %struct.DetailsFromSliceType] ; <[10 x %struct.DetailsFromSliceType]*> [#uses=0]
-
-declare i32 @_Z20LoopFilter_ConstructP14LoopFilterInfojj(%struct.LoopFilterInfo*, i32, i32)
-
-declare i32 @_Z25LF_Threading2_assert_doneP14LoopFilterInfo(%struct.LoopFilterInfo*) nounwind
-
-declare i32 @_Z54S_CalcIfLargeMVDeltaForBMbBothPredictionsFromSameFramePK19BiPartSrcDescriptorS1_ijj(%struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor*, i32, i32, i32) nounwind
-
-declare void @_Z30LoopFilter_Internal_FilterLumaPhiiiiii(i8*, i32, i32, i32, i32, i32, i32) nounwind
-
-declare void @_Z33LoopFilter_Internal_FilterChromaVPhiiiiiiiiii(i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind
-
-declare void @_Z33LoopFilter_Internal_FilterChromaHPhiiiiii(i8*, i32, i32, i32, i32, i32, i32) nounwind
-
-declare void @_Z42LoopFilter_Internal_filter_macroblock_lumaPK14LoopFilterInfoPhS2_iiiPK30PerMacroblockBoundaryStrengthsjj(%struct.LoopFilterInfo*, i8*, i8*, i32, i32, i32, %struct.PerMacroblockBoundaryStrengths*, i32, i32) nounwind
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) nounwind
-
-declare i32 @_Z40LoopFilter_Internal_FilterLumaPlaneMBAFFPK14LoopFilterInfojjj(%struct.LoopFilterInfo*, i32, i32, i32) nounwind
-
-declare void @_Z18LoopFilter_DestroyP14LoopFilterInfo(%struct.LoopFilterInfo*)
-
-declare void @MutexDispose(%struct.Mutex*)
-
-declare void @_ZdaPv(i8*) nounwind
-
-declare void @jvtDisposePTRVectorAligned(i8*)
-
-declare void @jvtDisposePTR(i8*)
-
-declare void @jvtDisposePTRMemAligned(i8*)
-
-declare void @_Z31LoopFilter_Internal_ResetTablesP14LoopFilterInfo(%struct.LoopFilterInfo*) nounwind
-
-declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind
-
-define i32 @_Z60LoopFilter_Internal_CalculateBoundaryStrengths_MbaffFramePicPK14LoopFilterInfoP22FrameMotionVectorCachejj(%struct.LoopFilterInfo* %lfiPtr, %struct.FrameMotionVectorCache* %frameMotionVectorCachePtr, i32 %mbY_min, i32 %mbY_maxPlus1) nounwind {
-entry:
- icmp ult i32 %mbY_min, %mbY_maxPlus1 ; <i1>:0 [#uses=1]
- br i1 %0, label %bb16, label %bb642
-
-bb16: ; preds = %entry
- bitcast %struct.PerMacroblockBoundaryStrengths* null to i32* ; <i32*>:1 [#uses=3]
- getelementptr i32* %1, i32 1 ; <i32*>:2 [#uses=0]
- getelementptr i32* %1, i32 2 ; <i32*>:3 [#uses=0]
- getelementptr i32* %1, i32 3 ; <i32*>:4 [#uses=0]
- bitcast [16 x i8]* null to i32* ; <i32*>:5 [#uses=3]
- getelementptr i32* %5, i32 1 ; <i32*>:6 [#uses=0]
- getelementptr i32* %5, i32 2 ; <i32*>:7 [#uses=0]
- getelementptr i32* %5, i32 3 ; <i32*>:8 [#uses=0]
- icmp eq i32 0, 0 ; <i1>:9 [#uses=0]
- lshr i32 0, 30 ; <i32>:10 [#uses=0]
- and i32 0, 268435455 ; <i32>:11 [#uses=0]
- lshr i32 0, 28 ; <i32>:12 [#uses=1]
- and i32 %12, 3 ; <i32>:13 [#uses=0]
- and i32 0, 1 ; <i32>:14 [#uses=1]
- icmp eq i32 %14, 0 ; <i1>:15 [#uses=0]
- zext i8 0 to i32 ; <i32>:16 [#uses=1]
- %.not656 = icmp ne i32 0, 0 ; <i1> [#uses=1]
- icmp eq i8 0, 0 ; <i1>:17 [#uses=0]
- trunc i32 0 to i8 ; <i8>:18 [#uses=2]
- add i32 0, 1 ; <i32>:19 [#uses=1]
- %.not658 = icmp ne i32 0, 0 ; <i1> [#uses=1]
- and i32 0, 268369920 ; <i32>:20 [#uses=1]
- icmp eq i32 %20, 268369920 ; <i1>:21 [#uses=2]
- getelementptr %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2 ; <[4 x i8]*>:22 [#uses=1]
- getelementptr %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2, i32 0 ; <i8*>:23 [#uses=0]
- and i32 0, -2 ; <i32>:24 [#uses=1]
- add i32 %24, -1 ; <i32>:25 [#uses=0]
- bitcast [4 x i8]* %22 to i32* ; <i32*>:26 [#uses=3]
- getelementptr i32* %26, i32 1 ; <i32*>:27 [#uses=0]
- getelementptr i32* %26, i32 2 ; <i32*>:28 [#uses=0]
- getelementptr i32* %26, i32 3 ; <i32*>:29 [#uses=0]
- br label %bb144
-
-bb144: ; preds = %bb395, %bb16
- %idxEachField11.0773 = phi i32 [ 0, %bb16 ], [ %162, %bb395 ] ; <i32> [#uses=3]
- %mbYLeft.2776 = phi i32 [ 0, %bb16 ], [ %mbYLeft.2776, %bb395 ] ; <i32> [#uses=3]
- %mbXYLeft.2775 = phi i32 [ 0, %bb16 ], [ %mbXYLeft.2775, %bb395 ] ; <i32> [#uses=1]
- %mixedModeLeftEdgeOfMbFlag.2774 = phi i32 [ 0, %bb16 ], [ 0, %bb395 ] ; <i32> [#uses=0]
- %mbIndexLeft.2772 = phi i32 [ 0, %bb16 ], [ %mbIndexLeft.2772, %bb395 ] ; <i32> [#uses=2]
- %boundaryStrengthsV.1771 = phi i8* [ null, %bb16 ], [ %158, %bb395 ] ; <i8*> [#uses=2]
- %numEdgesToTest.1770 = phi i32 [ 4, %bb16 ], [ %numEdgesToTest.2, %bb395 ] ; <i32> [#uses=1]
- icmp eq i32 %idxEachField11.0773, 0 ; <i1>:30 [#uses=0]
- getelementptr %struct.BiPartSrcDescriptor** null, i32 %mbIndexLeft.2772 ; <%struct.BiPartSrcDescriptor**>:31 [#uses=1]
- load %struct.BiPartSrcDescriptor** %31, align 4 ; <%struct.BiPartSrcDescriptor*>:32 [#uses=0]
- %fMacroblockHasNonZeroBS.4 = select i1 %21, i32 1, i32 0 ; <i32> [#uses=1]
- %numEdgesToTest.2 = select i1 %21, i32 1, i32 %numEdgesToTest.1770 ; <i32> [#uses=2]
- store i8 32, i8* %boundaryStrengthsV.1771, align 1
- br label %labelContinueEdgesLoopV
-
-bb200: ; preds = %labelContinueEdgesLoopV
- lshr i32 %159, 28 ; <i32>:33 [#uses=2]
- and i32 %160, %16 ; <i32>:34 [#uses=1]
- icmp eq i32 %34, 0 ; <i1>:35 [#uses=0]
- icmp eq i32 %160, 0 ; <i1>:36 [#uses=3]
- zext i1 %36 to i32 ; <i32>:37 [#uses=1]
- or i32 %37, -1 ; <i32>:38 [#uses=1]
- or i32 %38, %33 ; <i32>:39 [#uses=1]
- icmp eq i32 %39, 0 ; <i1>:40 [#uses=1]
- br i1 %40, label %bb205, label %bb206
-
-bb205: ; preds = %bb200
- store i8 32, i8* %158, align 1
- br label %labelContinueEdgesLoopV
-
-bb206: ; preds = %bb200
- icmp eq i32 %33, 15 ; <i1>:41 [#uses=1]
- br i1 %41, label %labelContinueEdgesLoopV, label %bb210.preheader
-
-bb210.preheader: ; preds = %bb206
- add i32 %160, 0 ; <i32>:42 [#uses=2]
- %bothcond657 = and i1 %36, %.not656 ; <i1> [#uses=0]
- shl i32 %idxEachField11.0773, 1 ; <i32>:43 [#uses=1]
- add i32 %43, 0 ; <i32>:44 [#uses=0]
- shl i32 %mbYLeft.2776, 2 ; <i32>:45 [#uses=0]
- add i32 %42, -1 ; <i32>:46 [#uses=1]
- icmp eq i32 0, 0 ; <i1>:47 [#uses=1]
- %brmerge689.not = and i1 %47, false ; <i1> [#uses=0]
- %bothcond659 = and i1 %36, %.not658 ; <i1> [#uses=0]
- shl i32 %mbYLeft.2776, 1 ; <i32>:48 [#uses=1]
- or i32 %48, 0 ; <i32>:49 [#uses=1]
- shl i32 %49, 1 ; <i32>:50 [#uses=0]
- add i32 0, 0 ; <i32>:51 [#uses=2]
- mul i32 %51, 0 ; <i32>:52 [#uses=1]
- add i32 %52, %42 ; <i32>:53 [#uses=1]
- mul i32 %51, 0 ; <i32>:54 [#uses=1]
- add i32 %46, %54 ; <i32>:55 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor** null, i32 %53 ; <%struct.BiPartSrcDescriptor**>:56 [#uses=1]
- load %struct.BiPartSrcDescriptor** %56, align 4 ; <%struct.BiPartSrcDescriptor*>:57 [#uses=7]
- getelementptr %struct.BiPartSrcDescriptor** null, i32 %55 ; <%struct.BiPartSrcDescriptor**>:58 [#uses=1]
- load %struct.BiPartSrcDescriptor** %58, align 4 ; <%struct.BiPartSrcDescriptor*>:59 [#uses=5]
- icmp slt i32 %159, 0 ; <i1>:60 [#uses=0]
- icmp eq %struct.BiPartSrcDescriptor* %57, %59 ; <i1>:61 [#uses=0]
- bitcast %struct.BiPartSrcDescriptor* %57 to i16* ; <i16*>:62 [#uses=5]
- load i16* %62, align 2 ; <i16>:63 [#uses=2]
- getelementptr i16* %62, i32 1 ; <i16*>:64 [#uses=1]
- load i16* %64, align 2 ; <i16>:65 [#uses=2]
- getelementptr i16* %62, i32 2 ; <i16*>:66 [#uses=1]
- load i16* %66, align 2 ; <i16>:67 [#uses=2]
- getelementptr i16* %62, i32 3 ; <i16*>:68 [#uses=1]
- load i16* %68, align 2 ; <i16>:69 [#uses=2]
- getelementptr i16* %62, i32 6 ; <i16*>:70 [#uses=1]
- load i16* %70, align 2 ; <i16>:71 [#uses=2]
- bitcast %struct.BiPartSrcDescriptor* %59 to i16* ; <i16*>:72 [#uses=5]
- load i16* %72, align 2 ; <i16>:73 [#uses=2]
- getelementptr i16* %72, i32 1 ; <i16*>:74 [#uses=1]
- load i16* %74, align 2 ; <i16>:75 [#uses=2]
- getelementptr i16* %72, i32 2 ; <i16*>:76 [#uses=1]
- load i16* %76, align 2 ; <i16>:77 [#uses=2]
- getelementptr i16* %72, i32 3 ; <i16*>:78 [#uses=1]
- load i16* %78, align 2 ; <i16>:79 [#uses=2]
- getelementptr i16* %72, i32 6 ; <i16*>:80 [#uses=1]
- load i16* %80, align 2 ; <i16>:81 [#uses=2]
- sub i16 %63, %73 ; <i16>:82 [#uses=3]
- sub i16 %65, %75 ; <i16>:83 [#uses=3]
- sub i16 %67, %77 ; <i16>:84 [#uses=3]
- sub i16 %69, %79 ; <i16>:85 [#uses=3]
- sub i16 %71, %81 ; <i16>:86 [#uses=3]
- sub i16 0, %82 ; <i16>:87 [#uses=1]
- icmp slt i16 %82, 0 ; <i1>:88 [#uses=1]
- %. = select i1 %88, i16 %87, i16 %82 ; <i16> [#uses=1]
- sub i16 0, %83 ; <i16>:89 [#uses=1]
- icmp slt i16 %83, 0 ; <i1>:90 [#uses=1]
- %.660 = select i1 %90, i16 %89, i16 %83 ; <i16> [#uses=1]
- sub i16 0, %84 ; <i16>:91 [#uses=1]
- icmp slt i16 %84, 0 ; <i1>:92 [#uses=1]
- %.661 = select i1 %92, i16 %91, i16 %84 ; <i16> [#uses=1]
- sub i16 0, %85 ; <i16>:93 [#uses=1]
- icmp slt i16 %85, 0 ; <i1>:94 [#uses=1]
- %.662 = select i1 %94, i16 %93, i16 %85 ; <i16> [#uses=1]
- sub i16 0, %86 ; <i16>:95 [#uses=1]
- icmp slt i16 %86, 0 ; <i1>:96 [#uses=1]
- %.663 = select i1 %96, i16 %95, i16 %86 ; <i16> [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 0 ; <i8*>:97 [#uses=1]
- load i8* %97, align 1 ; <i8>:98 [#uses=1]
- zext i8 %98 to i32 ; <i32>:99 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 1 ; <i8*>:100 [#uses=1]
- load i8* %100, align 1 ; <i8>:101 [#uses=1]
- zext i8 %101 to i32 ; <i32>:102 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:103 [#uses=1]
- load i8* %103, align 1 ; <i8>:104 [#uses=2]
- zext i8 %104 to i32 ; <i32>:105 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:106 [#uses=1]
- load i8* %106, align 1 ; <i8>:107 [#uses=2]
- zext i8 %107 to i32 ; <i32>:108 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:109 [#uses=1]
- load i8* %109, align 1 ; <i8>:110 [#uses=1]
- zext i8 %110 to i32 ; <i32>:111 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:112 [#uses=1]
- load i8* %112, align 1 ; <i8>:113 [#uses=1]
- zext i8 %113 to i32 ; <i32>:114 [#uses=1]
- lshr i32 %99, 4 ; <i32>:115 [#uses=1]
- and i32 %115, 2 ; <i32>:116 [#uses=1]
- lshr i32 %102, 5 ; <i32>:117 [#uses=1]
- or i32 %116, %117 ; <i32>:118 [#uses=3]
- icmp eq i32 %118, 0 ; <i1>:119 [#uses=0]
- icmp eq i32 %118, 1 ; <i1>:120 [#uses=1]
- br i1 %120, label %bb297, label %bb298
-
-bb297: ; preds = %bb210.preheader
- br label %bb298
-
-bb298: ; preds = %bb297, %bb210.preheader
- %vu8Mask_0.1 = phi i8 [ -1, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=1]
- %vu8Mask_1.1 = phi i8 [ -1, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=1]
- %vu8Mask_2.1 = phi i8 [ -1, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=0]
- %vu8Mask_3.1 = phi i8 [ -1, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=1]
- %vu8Mask_4.1 = phi i8 [ 0, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=0]
- %vu8Mask_5.1 = phi i8 [ 0, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=1]
- %vu8Mask_6.1 = phi i8 [ 0, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=0]
- %vu8Mask_7.1 = phi i8 [ 0, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=1]
- %vu8Mask_12.1 = phi i8 [ -1, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=0]
- %vu8Mask_13.1 = phi i8 [ -1, %bb297 ], [ 0, %bb210.preheader ] ; <i8> [#uses=0]
- icmp eq i32 %118, 2 ; <i1>:121 [#uses=0]
- and i8 %vu8Mask_1.1, 3 ; <i8>:122 [#uses=0]
- and i8 %vu8Mask_5.1, 3 ; <i8>:123 [#uses=0]
- and i8 %vu8Mask_3.1, %18 ; <i8>:124 [#uses=0]
- and i8 %vu8Mask_7.1, %18 ; <i8>:125 [#uses=0]
- icmp eq i8 %104, %107 ; <i1>:126 [#uses=1]
- br i1 %126, label %bb328, label %bb303
-
-bb303: ; preds = %bb298
- call i16 @llvm.bswap.i16( i16 %81 ) ; <i16>:127 [#uses=1]
- sub i16 %63, %77 ; <i16>:128 [#uses=3]
- sub i16 %65, %79 ; <i16>:129 [#uses=3]
- sub i16 %67, %73 ; <i16>:130 [#uses=3]
- sub i16 %69, %75 ; <i16>:131 [#uses=3]
- sub i16 %71, %127 ; <i16>:132 [#uses=3]
- sub i16 0, %128 ; <i16>:133 [#uses=1]
- icmp slt i16 %128, 0 ; <i1>:134 [#uses=1]
- %.673 = select i1 %134, i16 %133, i16 %128 ; <i16> [#uses=1]
- sub i16 0, %129 ; <i16>:135 [#uses=1]
- icmp slt i16 %129, 0 ; <i1>:136 [#uses=1]
- %.674 = select i1 %136, i16 %135, i16 %129 ; <i16> [#uses=1]
- sub i16 0, %130 ; <i16>:137 [#uses=1]
- icmp slt i16 %130, 0 ; <i1>:138 [#uses=1]
- %.675 = select i1 %138, i16 %137, i16 %130 ; <i16> [#uses=1]
- sub i16 0, %131 ; <i16>:139 [#uses=1]
- icmp slt i16 %131, 0 ; <i1>:140 [#uses=1]
- %.676 = select i1 %140, i16 %139, i16 %131 ; <i16> [#uses=1]
- sub i16 0, %132 ; <i16>:141 [#uses=1]
- icmp slt i16 %132, 0 ; <i1>:142 [#uses=1]
- %.677 = select i1 %142, i16 %141, i16 %132 ; <i16> [#uses=1]
- br label %bb328
-
-bb328: ; preds = %bb303, %bb298
- %vu16Delta_0.0 = phi i16 [ %.673, %bb303 ], [ %., %bb298 ] ; <i16> [#uses=1]
- %vu16Delta_1.0 = phi i16 [ %.674, %bb303 ], [ %.660, %bb298 ] ; <i16> [#uses=0]
- %vu16Delta_2.0 = phi i16 [ %.675, %bb303 ], [ %.661, %bb298 ] ; <i16> [#uses=0]
- %vu16Delta_3.0 = phi i16 [ %.676, %bb303 ], [ %.662, %bb298 ] ; <i16> [#uses=0]
- %vu16Delta_6.0 = phi i16 [ %.677, %bb303 ], [ %.663, %bb298 ] ; <i16> [#uses=0]
- lshr i16 %vu16Delta_0.0, 8 ; <i16>:143 [#uses=1]
- trunc i16 %143 to i8 ; <i8>:144 [#uses=1]
- and i8 %144, %vu8Mask_0.1 ; <i8>:145 [#uses=1]
- icmp eq i8 %145, 0 ; <i1>:146 [#uses=0]
- sub i32 %105, %114 ; <i32>:147 [#uses=1]
- sub i32 %111, %108 ; <i32>:148 [#uses=1]
- or i32 %147, %148 ; <i32>:149 [#uses=1]
- icmp eq i32 %149, 0 ; <i1>:150 [#uses=0]
- call i32 @_Z54S_CalcIfLargeMVDeltaForBMbBothPredictionsFromSameFramePK19BiPartSrcDescriptorS1_ijj( %struct.BiPartSrcDescriptor* %57, %struct.BiPartSrcDescriptor* %59, i32 %19, i32 0, i32 0 ) nounwind ; <i32>:151 [#uses=0]
- unreachable
-
-labelContinueEdgesLoopV: ; preds = %bb206, %bb205, %bb144
- %fEdgeHasNonZeroBS.0 = phi i32 [ 0, %bb205 ], [ 0, %bb144 ], [ 1, %bb206 ] ; <i32> [#uses=2]
- %fMacroblockHasNonZeroBS.6 = phi i32 [ %152, %bb205 ], [ %fMacroblockHasNonZeroBS.4, %bb144 ], [ %152, %bb206 ] ; <i32> [#uses=1]
- %ixEdge.1 = phi i32 [ %160, %bb205 ], [ 0, %bb144 ], [ %160, %bb206 ] ; <i32> [#uses=1]
- %bfNZ12.2 = phi i32 [ %159, %bb205 ], [ 0, %bb144 ], [ %159, %bb206 ] ; <i32> [#uses=1]
- %boundaryStrengthsV.3 = phi i8* [ %158, %bb205 ], [ %boundaryStrengthsV.1771, %bb144 ], [ %158, %bb206 ] ; <i8*> [#uses=3]
- or i32 %fMacroblockHasNonZeroBS.6, %fEdgeHasNonZeroBS.0 ; <i32>:152 [#uses=2]
- load i8* %boundaryStrengthsV.3, align 1 ; <i8>:153 [#uses=1]
- trunc i32 %fEdgeHasNonZeroBS.0 to i8 ; <i8>:154 [#uses=1]
- shl i8 %154, 5 ; <i8>:155 [#uses=1]
- xor i8 %155, 32 ; <i8>:156 [#uses=1]
- or i8 %153, %156 ; <i8>:157 [#uses=1]
- store i8 %157, i8* %boundaryStrengthsV.3, align 1
- getelementptr i8* %boundaryStrengthsV.3, i32 4 ; <i8*>:158 [#uses=4]
- shl i32 %bfNZ12.2, 4 ; <i32>:159 [#uses=4]
- add i32 %ixEdge.1, 1 ; <i32>:160 [#uses=6]
- icmp ult i32 %160, %numEdgesToTest.2 ; <i1>:161 [#uses=1]
- br i1 %161, label %bb200, label %bb395
-
-bb395: ; preds = %labelContinueEdgesLoopV
- add i32 %idxEachField11.0773, 1 ; <i32>:162 [#uses=2]
- icmp ugt i32 %162, 0 ; <i1>:163 [#uses=1]
- br i1 %163, label %bb398, label %bb144
-
-bb398: ; preds = %bb395
- call void asm sideeffect "dcbt $0, $1", "b%,r,~{memory}"( i32 19, i32* null ) nounwind
- unreachable
-
-bb642: ; preds = %entry
- ret i32 0
-}
-
-declare i16 @llvm.bswap.i16(i16) nounwind readnone
-
-declare i8* @jvtNewPtrVectorAligned(i32)
-
-declare i8* @jvtNewPtr(i32)
-
-declare i8* @jvtNewPtrMemAligned(i32)
-
-declare %struct.Mutex* @MutexNew()
-
-declare i8* @_Znam(i32)
-
-declare i32 @_Z24LoopFilter_FilterMbGroupP14LoopFilterInfoP11FramePixelsP22FrameMotionVectorCacheP19ThreadedBatchStructjjij(%struct.LoopFilterInfo*, %struct.FramePixels*, %struct.FrameMotionVectorCache*, %struct.ThreadedBatch*, i32, i32, i32, i32)
-
-declare void @MutexLock(%struct.Mutex*)
-
-declare void @MutexUnlock(%struct.Mutex*)
-
-declare i32 @_Z35LoopFilter_Internal_FilterLumaPlanePK14LoopFilterInfojjjjj(%struct.LoopFilterInfo*, i32, i32, i32, i32, i32)
-
-declare i32 @_Z37LoopFilter_Internal_FilterChromaPlanePK14LoopFilterInfojjjjj(%struct.LoopFilterInfo*, i32, i32, i32, i32, i32)
-
-declare void @_Z44LoopFilter_Internal_filter_macroblock_chromaPK14LoopFilterInfoPhS2_iiiPK30PerMacroblockBoundaryStrengthsjj(%struct.LoopFilterInfo*, i8*, i8*, i32, i32, i32, %struct.PerMacroblockBoundaryStrengths*, i32, i32) nounwind
-
-declare i32 @_Z42LoopFilter_Internal_FilterChromaPlaneMBAFFPK14LoopFilterInfojjj(%struct.LoopFilterInfo*, i32, i32, i32) nounwind
-
-declare i32 @_Z26LF_Threading2_ProcessTasksP14LoopFilterInfoP11FramePixelsP22FrameMotionVectorCacheij(%struct.LoopFilterInfo*, %struct.FramePixels*, %struct.FrameMotionVectorCache*, i32, i32)
-
-declare i32 @_Z46LoopFilter_Internal_CalculateBoundaryStrengthsPK14LoopFilterInfoP22FrameMotionVectorCachejj(%struct.LoopFilterInfo*, %struct.FrameMotionVectorCache*, i32, i32)
-
-declare i32 @_Z44LoopFilter_Internal_FilterLumaChromaPlane_PPP14LoopFilterInfojjjjj(%struct.LoopFilterInfo*, i32, i32, i32, i32, i32)
-
-declare i32 @_Z22LoopFilter_FilterFrameP14LoopFilterInfoP11FramePixelsP22FrameMotionVectorCacheP19ThreadedBatchStructij(%struct.LoopFilterInfo*, %struct.FramePixels*, %struct.FrameMotionVectorCache*, %struct.ThreadedBatch*, i32, i32)
-
-declare void @_Z34LF_Threading2_ProcessTasks_WrapperPv(i8*)
-
-declare void @llvm.memset.i64(i8*, i8, i64, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-15-Fabs.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-15-Fabs.ll
deleted file mode 100644
index 17737d9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-15-Fabs.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin9"
-
-define hidden i256 @__divtc3(ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind readnone {
-entry:
- call ppc_fp128 @fabsl( ppc_fp128 %d ) nounwind readnone ; <ppc_fp128>:0 [#uses=1]
- fcmp olt ppc_fp128 0xM00000000000000000000000000000000, %0 ; <i1>:1 [#uses=1]
- %.pn106 = select i1 %1, ppc_fp128 %a, ppc_fp128 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
- %.pn = fsub ppc_fp128 0xM00000000000000000000000000000000, %.pn106 ; <ppc_fp128> [#uses=1]
- %y.0 = fdiv ppc_fp128 %.pn, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
- fmul ppc_fp128 %y.0, 0xM3FF00000000000000000000000000000 ; <ppc_fp128>:2 [#uses=1]
- fadd ppc_fp128 %2, fmul (ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 0xM00000000000000000000000000000000) ; <ppc_fp128>:3 [#uses=1]
- %tmpi = fadd ppc_fp128 %3, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmpi, ppc_fp128* null, align 16
- ret i256 0
-}
-
-declare ppc_fp128 @fabsl(ppc_fp128) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll
deleted file mode 100644
index 5cd8c34..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin9"
-
-define i16 @t(i16* %dct) signext nounwind {
-entry:
- load i16* null, align 2 ; <i16>:0 [#uses=2]
- lshr i16 %0, 11 ; <i16>:1 [#uses=0]
- trunc i16 %0 to i8 ; <i8>:2 [#uses=1]
- sext i8 %2 to i16 ; <i16>:3 [#uses=1]
- add i16 0, %3 ; <i16>:4 [#uses=1]
- sext i16 %4 to i32 ; <i32>:5 [#uses=1]
- %dcval.0.in = shl i32 %5, 0 ; <i32> [#uses=1]
- %dcval.0 = trunc i32 %dcval.0.in to i16 ; <i16> [#uses=1]
- store i16 %dcval.0, i16* %dct, align 2
- ret i16 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-17-Fneg.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-17-Fneg.ll
deleted file mode 100644
index dc1e936..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-17-Fneg.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin9"
-
-define hidden i64 @__fixunstfdi(ppc_fp128 %a) nounwind {
-entry:
- br i1 false, label %bb3, label %bb4
-
-bb3: ; preds = %entry
- fsub ppc_fp128 0xM80000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128>:0 [#uses=1]
- fptoui ppc_fp128 %0 to i32 ; <i32>:1 [#uses=1]
- zext i32 %1 to i64 ; <i64>:2 [#uses=1]
- sub i64 0, %2 ; <i64>:3 [#uses=1]
- ret i64 %3
-
-bb4: ; preds = %entry
- ret i64 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-24-PPC64-CCBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-24-PPC64-CCBug.ll
deleted file mode 100644
index c9c05e1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-07-24-PPC64-CCBug.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc64-apple-darwin | grep lwz | grep 228
-
-@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
-
-define void @llvm_static_func(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, i32 %a10, i32 %a11, i32 %a12, i32 %a13, i32 %a14, i32 %a15) nounwind {
-entry:
- tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i64 0), i32 %a8 ) nounwind ; <i32>:0 [#uses=0]
- ret void
-}
-
-declare i32 @printf(i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll
deleted file mode 100644
index 97844dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll
+++ /dev/null
@@ -1,254 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin
-
- %struct.CGLDI = type { %struct.cgli*, i32, i32, i32, i32, i32, i8*, i32, void (%struct.CGLSI*, i32, %struct.CGLDI*)*, i8*, %struct.vv_t }
- %struct.cgli = type { i32, %struct.cgli*, void (%struct.cgli*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32)*, i32, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i8*, i32*, %struct._cgro*, %struct._cgro*, float, float, float, float, i32, i8*, float, i8*, [16 x i32] }
- %struct.CGLSI = type { %struct.cgli*, i32, i8*, i8*, i32, i32, i8*, void (%struct.cgli*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32)*, %struct.vv_t, %struct.vv_t, %struct.xx_t* }
- %struct._cgro = type opaque
- %struct.xx_t = type { [3 x %struct.vv_t], [2 x %struct.vv_t], [2 x [3 x i8*]] }
- %struct.vv_t = type { <16 x i8> }
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (void (%struct.CGLSI*, i32, %struct.CGLDI*)* @lb to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define void @lb(%struct.CGLSI* %src, i32 %n, %struct.CGLDI* %dst) nounwind {
-entry:
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
- %1 = icmp sgt i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %bb.nph4945, label %return
-
-bb.nph4945: ; preds = %entry
- %2 = bitcast [2 x %struct.vv_t]* null to i64* ; <i64*> [#uses=6]
- %3 = getelementptr [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=6]
- %4 = bitcast %struct.vv_t* null to i64* ; <i64*> [#uses=5]
- %5 = getelementptr [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=3]
- br label %bb2326
-
-bb2217: ; preds = %bb2326
- %6 = or i64 0, 0 ; <i64> [#uses=2]
- %7 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %8 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %9 = getelementptr float* null, i32 2 ; <float*> [#uses=1]
- %10 = load float* %9, align 4 ; <float> [#uses=1]
- %11 = getelementptr float* null, i32 3 ; <float*> [#uses=1]
- %12 = load float* %11, align 4 ; <float> [#uses=1]
- %13 = fmul float %10, 6.553500e+04 ; <float> [#uses=1]
- %14 = fadd float %13, 5.000000e-01 ; <float> [#uses=1]
- %15 = fmul float %12, 6.553500e+04 ; <float> [#uses=1]
- %16 = fadd float %15, 5.000000e-01 ; <float> [#uses=3]
- %17 = fcmp olt float %14, 0.000000e+00 ; <i1> [#uses=0]
- %18 = fcmp olt float %16, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %18, label %bb2265, label %bb2262
-
-bb2262: ; preds = %bb2217
- %19 = fcmp ogt float %16, 6.553500e+04 ; <i1> [#uses=1]
- br i1 %19, label %bb2264, label %bb2265
-
-bb2264: ; preds = %bb2262
- br label %bb2265
-
-bb2265: ; preds = %bb2264, %bb2262, %bb2217
- %f3596.0 = phi float [ 6.553500e+04, %bb2264 ], [ 0.000000e+00, %bb2217 ], [ %16, %bb2262 ] ; <float> [#uses=1]
- %20 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %21 = fptosi float %f3596.0 to i32 ; <i32> [#uses=1]
- %22 = zext i32 %7 to i64 ; <i64> [#uses=1]
- %23 = shl i64 %22, 48 ; <i64> [#uses=1]
- %24 = zext i32 %8 to i64 ; <i64> [#uses=1]
- %25 = shl i64 %24, 32 ; <i64> [#uses=1]
- %26 = sext i32 %20 to i64 ; <i64> [#uses=1]
- %27 = shl i64 %26, 16 ; <i64> [#uses=1]
- %28 = sext i32 %21 to i64 ; <i64> [#uses=1]
- %29 = or i64 %25, %23 ; <i64> [#uses=1]
- %30 = or i64 %29, %27 ; <i64> [#uses=1]
- %31 = or i64 %30, %28 ; <i64> [#uses=2]
- %32 = shl i64 %6, 48 ; <i64> [#uses=1]
- %33 = shl i64 %31, 32 ; <i64> [#uses=1]
- %34 = and i64 %33, 281470681743360 ; <i64> [#uses=1]
- store i64 %6, i64* %2, align 16
- store i64 %31, i64* %3, align 8
- %35 = getelementptr i8* null, i32 0 ; <i8*> [#uses=1]
- %36 = bitcast i8* %35 to float* ; <float*> [#uses=4]
- %37 = load float* %36, align 4 ; <float> [#uses=1]
- %38 = getelementptr float* %36, i32 1 ; <float*> [#uses=1]
- %39 = load float* %38, align 4 ; <float> [#uses=1]
- %40 = fmul float %37, 6.553500e+04 ; <float> [#uses=1]
- %41 = fadd float %40, 5.000000e-01 ; <float> [#uses=1]
- %42 = fmul float %39, 6.553500e+04 ; <float> [#uses=1]
- %43 = fadd float %42, 5.000000e-01 ; <float> [#uses=3]
- %44 = fcmp olt float %41, 0.000000e+00 ; <i1> [#uses=0]
- %45 = fcmp olt float %43, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %45, label %bb2277, label %bb2274
-
-bb2274: ; preds = %bb2265
- %46 = fcmp ogt float %43, 6.553500e+04 ; <i1> [#uses=0]
- br label %bb2277
-
-bb2277: ; preds = %bb2274, %bb2265
- %f1582.0 = phi float [ 0.000000e+00, %bb2265 ], [ %43, %bb2274 ] ; <float> [#uses=1]
- %47 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %48 = fptosi float %f1582.0 to i32 ; <i32> [#uses=1]
- %49 = getelementptr float* %36, i32 2 ; <float*> [#uses=1]
- %50 = load float* %49, align 4 ; <float> [#uses=1]
- %51 = getelementptr float* %36, i32 3 ; <float*> [#uses=1]
- %52 = load float* %51, align 4 ; <float> [#uses=1]
- %53 = fmul float %50, 6.553500e+04 ; <float> [#uses=1]
- %54 = fadd float %53, 5.000000e-01 ; <float> [#uses=1]
- %55 = fmul float %52, 6.553500e+04 ; <float> [#uses=1]
- %56 = fadd float %55, 5.000000e-01 ; <float> [#uses=1]
- %57 = fcmp olt float %54, 0.000000e+00 ; <i1> [#uses=0]
- %58 = fcmp olt float %56, 0.000000e+00 ; <i1> [#uses=0]
- %59 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %60 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %61 = zext i32 %47 to i64 ; <i64> [#uses=1]
- %62 = shl i64 %61, 48 ; <i64> [#uses=1]
- %63 = zext i32 %48 to i64 ; <i64> [#uses=1]
- %64 = shl i64 %63, 32 ; <i64> [#uses=1]
- %65 = sext i32 %59 to i64 ; <i64> [#uses=1]
- %66 = shl i64 %65, 16 ; <i64> [#uses=1]
- %67 = sext i32 %60 to i64 ; <i64> [#uses=1]
- %68 = or i64 %64, %62 ; <i64> [#uses=1]
- %69 = or i64 %68, %66 ; <i64> [#uses=1]
- %70 = or i64 %69, %67 ; <i64> [#uses=2]
- %71 = getelementptr i8* null, i32 0 ; <i8*> [#uses=1]
- %72 = bitcast i8* %71 to float* ; <float*> [#uses=4]
- %73 = load float* %72, align 4 ; <float> [#uses=1]
- %74 = getelementptr float* %72, i32 1 ; <float*> [#uses=1]
- %75 = load float* %74, align 4 ; <float> [#uses=1]
- %76 = fmul float %73, 6.553500e+04 ; <float> [#uses=1]
- %77 = fadd float %76, 5.000000e-01 ; <float> [#uses=3]
- %78 = fmul float %75, 6.553500e+04 ; <float> [#uses=1]
- %79 = fadd float %78, 5.000000e-01 ; <float> [#uses=1]
- %80 = fcmp olt float %77, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %80, label %bb2295, label %bb2292
-
-bb2292: ; preds = %bb2277
- %81 = fcmp ogt float %77, 6.553500e+04 ; <i1> [#uses=1]
- br i1 %81, label %bb2294, label %bb2295
-
-bb2294: ; preds = %bb2292
- br label %bb2295
-
-bb2295: ; preds = %bb2294, %bb2292, %bb2277
- %f0569.0 = phi float [ 6.553500e+04, %bb2294 ], [ 0.000000e+00, %bb2277 ], [ %77, %bb2292 ] ; <float> [#uses=1]
- %82 = fcmp olt float %79, 0.000000e+00 ; <i1> [#uses=0]
- %83 = fptosi float %f0569.0 to i32 ; <i32> [#uses=1]
- %84 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %85 = getelementptr float* %72, i32 2 ; <float*> [#uses=1]
- %86 = load float* %85, align 4 ; <float> [#uses=1]
- %87 = getelementptr float* %72, i32 3 ; <float*> [#uses=1]
- %88 = load float* %87, align 4 ; <float> [#uses=1]
- %89 = fmul float %86, 6.553500e+04 ; <float> [#uses=1]
- %90 = fadd float %89, 5.000000e-01 ; <float> [#uses=1]
- %91 = fmul float %88, 6.553500e+04 ; <float> [#uses=1]
- %92 = fadd float %91, 5.000000e-01 ; <float> [#uses=1]
- %93 = fcmp olt float %90, 0.000000e+00 ; <i1> [#uses=0]
- %94 = fcmp olt float %92, 0.000000e+00 ; <i1> [#uses=0]
- %95 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %96 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %97 = zext i32 %83 to i64 ; <i64> [#uses=1]
- %98 = shl i64 %97, 48 ; <i64> [#uses=1]
- %99 = zext i32 %84 to i64 ; <i64> [#uses=1]
- %100 = shl i64 %99, 32 ; <i64> [#uses=1]
- %101 = sext i32 %95 to i64 ; <i64> [#uses=1]
- %102 = shl i64 %101, 16 ; <i64> [#uses=1]
- %103 = sext i32 %96 to i64 ; <i64> [#uses=1]
- %104 = or i64 %100, %98 ; <i64> [#uses=1]
- %105 = or i64 %104, %102 ; <i64> [#uses=1]
- %106 = or i64 %105, %103 ; <i64> [#uses=2]
- %107 = shl i64 %70, 16 ; <i64> [#uses=1]
- %108 = and i64 %107, 4294901760 ; <i64> [#uses=1]
- %109 = and i64 %106, 65535 ; <i64> [#uses=1]
- %110 = or i64 %34, %32 ; <i64> [#uses=1]
- %111 = or i64 %110, %108 ; <i64> [#uses=1]
- %112 = or i64 %111, %109 ; <i64> [#uses=1]
- store i64 %70, i64* %4, align 16
- store i64 %106, i64* %5, align 8
- %113 = icmp eq i64 %112, 0 ; <i1> [#uses=1]
- br i1 %113, label %bb2325, label %bb2315
-
-bb2315: ; preds = %bb2295
- %114 = icmp eq %struct.xx_t* %159, null ; <i1> [#uses=1]
- br i1 %114, label %bb2318, label %bb2317
-
-bb2317: ; preds = %bb2315
- %115 = load i64* %2, align 16 ; <i64> [#uses=1]
- %116 = call i32 (...)* @_u16a_cm( i64 %115, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
- %117 = sext i32 %116 to i64 ; <i64> [#uses=1]
- store i64 %117, i64* %2, align 16
- %118 = load i64* %3, align 8 ; <i64> [#uses=1]
- %119 = call i32 (...)* @_u16a_cm( i64 %118, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
- %120 = sext i32 %119 to i64 ; <i64> [#uses=1]
- store i64 %120, i64* %3, align 8
- %121 = load i64* %4, align 16 ; <i64> [#uses=1]
- %122 = call i32 (...)* @_u16a_cm( i64 %121, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=1]
- %123 = sext i32 %122 to i64 ; <i64> [#uses=1]
- store i64 %123, i64* %4, align 16
- %124 = load i64* %5, align 8 ; <i64> [#uses=1]
- %125 = call i32 (...)* @_u16a_cm( i64 %124, %struct.xx_t* %159, double 0.000000e+00, double 1.047551e+06 ) nounwind ; <i32> [#uses=0]
- unreachable
-
-bb2318: ; preds = %bb2315
- %126 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 8 ; <%struct.vv_t*> [#uses=1]
- %127 = bitcast %struct.vv_t* %126 to i64* ; <i64*> [#uses=1]
- %128 = load i64* %127, align 8 ; <i64> [#uses=1]
- %129 = trunc i64 %128 to i32 ; <i32> [#uses=4]
- %130 = load i64* %2, align 16 ; <i64> [#uses=1]
- %131 = call i32 (...)* @_u16_ff( i64 %130, i32 %129 ) nounwind ; <i32> [#uses=1]
- %132 = sext i32 %131 to i64 ; <i64> [#uses=1]
- store i64 %132, i64* %2, align 16
- %133 = load i64* %3, align 8 ; <i64> [#uses=1]
- %134 = call i32 (...)* @_u16_ff( i64 %133, i32 %129 ) nounwind ; <i32> [#uses=1]
- %135 = sext i32 %134 to i64 ; <i64> [#uses=1]
- store i64 %135, i64* %3, align 8
- %136 = load i64* %4, align 16 ; <i64> [#uses=1]
- %137 = call i32 (...)* @_u16_ff( i64 %136, i32 %129 ) nounwind ; <i32> [#uses=1]
- %138 = sext i32 %137 to i64 ; <i64> [#uses=1]
- store i64 %138, i64* %4, align 16
- %139 = load i64* %5, align 8 ; <i64> [#uses=1]
- %140 = call i32 (...)* @_u16_ff( i64 %139, i32 %129 ) nounwind ; <i32> [#uses=0]
- unreachable
-
-bb2319: ; preds = %bb2326
- %141 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 2 ; <i8**> [#uses=1]
- %142 = load i8** %141, align 4 ; <i8*> [#uses=4]
- %143 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
- %144 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %143 ) nounwind ; <i32> [#uses=1]
- %145 = sext i32 %144 to i64 ; <i64> [#uses=2]
- %146 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
- %147 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %146 ) nounwind ; <i32> [#uses=1]
- %148 = sext i32 %147 to i64 ; <i64> [#uses=2]
- %149 = shl i64 %145, 48 ; <i64> [#uses=0]
- %150 = shl i64 %148, 32 ; <i64> [#uses=1]
- %151 = and i64 %150, 281470681743360 ; <i64> [#uses=0]
- store i64 %145, i64* %2, align 16
- store i64 %148, i64* %3, align 8
- %152 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
- %153 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %152 ) nounwind ; <i32> [#uses=1]
- %154 = sext i32 %153 to i64 ; <i64> [#uses=0]
- %155 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
- %156 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %155 ) nounwind ; <i32> [#uses=0]
- unreachable
-
-bb2325: ; preds = %bb2326, %bb2295
- %indvar.next5145 = add i32 %indvar5021, 1 ; <i32> [#uses=1]
- br label %bb2326
-
-bb2326: ; preds = %bb2325, %bb.nph4945
- %indvar5021 = phi i32 [ 0, %bb.nph4945 ], [ %indvar.next5145, %bb2325 ] ; <i32> [#uses=6]
- %157 = icmp slt i32 %indvar5021, %n ; <i1> [#uses=0]
- %158 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 10 ; <%struct.xx_t**> [#uses=1]
- %159 = load %struct.xx_t** %158, align 4 ; <%struct.xx_t*> [#uses=5]
- %160 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 1 ; <i32*> [#uses=1]
- %161 = load i32* %160, align 4 ; <i32> [#uses=1]
- %162 = and i32 %161, 255 ; <i32> [#uses=1]
- switch i32 %162, label %bb2325 [
- i32 59, label %bb2217
- i32 60, label %bb2319
- ]
-
-return: ; preds = %entry
- ret void
-}
-
-declare i32 @_u16_ff(...)
-
-declare i32 @_u16a_cm(...)
-
-declare i32 @_u16_sf32(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-17-AsmMatchingOperands.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-17-AsmMatchingOperands.ll
deleted file mode 100644
index 91c36ef..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-17-AsmMatchingOperands.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-; XFAIL: *
-; PR2356
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin9"
-
-define i32 @test(i64 %x, i32* %p) nounwind {
- %asmtmp = call i32 asm "", "=r,0"(i64 0) nounwind ; <i32> [#uses=0]
- %y = add i32 %asmtmp, 1
- ret i32 %y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll
deleted file mode 100644
index f474a6d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=ppc64
-
-define void @__divtc3({ ppc_fp128, ppc_fp128 }* noalias sret %agg.result, ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind {
-entry:
- %imag59 = load ppc_fp128* null, align 8 ; <ppc_fp128> [#uses=1]
- %0 = fmul ppc_fp128 0xM00000000000000000000000000000000, %imag59 ; <ppc_fp128> [#uses=1]
- %1 = fmul ppc_fp128 0xM00000000000000000000000000000000, 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
- %2 = fadd ppc_fp128 %0, %1 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %2, ppc_fp128* null, align 16
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
deleted file mode 100644
index f4c06fb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=ppc32 -o - | not grep fixunstfsi
-
-define i64 @__fixunstfdi(ppc_fp128 %a) nounwind readnone {
-entry:
- %0 = fcmp olt ppc_fp128 %a, 0xM00000000000000000000000000000000 ; <i1> [#uses=1]
- br i1 %0, label %bb5, label %bb1
-
-bb1: ; preds = %entry
- %1 = fmul ppc_fp128 %a, 0xM3DF00000000000000000000000000000 ; <ppc_fp128> [#uses=1]
- %2 = fptoui ppc_fp128 %1 to i32 ; <i32> [#uses=1]
- %3 = zext i32 %2 to i64 ; <i64> [#uses=1]
- %4 = shl i64 %3, 32 ; <i64> [#uses=3]
- %5 = uitofp i64 %4 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %6 = fsub ppc_fp128 %a, %5 ; <ppc_fp128> [#uses=3]
- %7 = fcmp olt ppc_fp128 %6, 0xM00000000000000000000000000000000 ; <i1> [#uses=1]
- br i1 %7, label %bb2, label %bb3
-
-bb2: ; preds = %bb1
- %8 = fsub ppc_fp128 0xM80000000000000000000000000000000, %6 ; <ppc_fp128> [#uses=1]
- %9 = fptoui ppc_fp128 %8 to i32 ; <i32> [#uses=1]
- %10 = zext i32 %9 to i64 ; <i64> [#uses=1]
- %11 = sub i64 %4, %10 ; <i64> [#uses=1]
- ret i64 %11
-
-bb3: ; preds = %bb1
- %12 = fptoui ppc_fp128 %6 to i32 ; <i32> [#uses=1]
- %13 = zext i32 %12 to i64 ; <i64> [#uses=1]
- %14 = or i64 %13, %4 ; <i64> [#uses=1]
- ret i64 %14
-
-bb5: ; preds = %entry
- ret i64 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-30-IllegalShift.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-30-IllegalShift.ll
deleted file mode 100644
index 83f3f6f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-30-IllegalShift.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; PR2986
- at argc = external global i32 ; <i32*> [#uses=1]
- at buffer = external global [32 x i8], align 4 ; <[32 x i8]*> [#uses=1]
-
-define void @test1() nounwind noinline {
-entry:
- %0 = load i32* @argc, align 4 ; <i32> [#uses=1]
- %1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
- tail call void @llvm.memset.i32(i8* getelementptr ([32 x i8]* @buffer, i32 0, i32 0), i8 %1, i32 17, i32 4)
- unreachable
-}
-
-declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll
deleted file mode 100644
index 20683b9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s
-; PR2988
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin10.0"
- at a = common global ppc_fp128 0xM00000000000000000000000000000000, align 16 ; <ppc_fp128*> [#uses=2]
- at b = common global ppc_fp128 0xM00000000000000000000000000000000, align 16 ; <ppc_fp128*> [#uses=2]
- at c = common global ppc_fp128 0xM00000000000000000000000000000000, align 16 ; <ppc_fp128*> [#uses=3]
- at d = common global ppc_fp128 0xM00000000000000000000000000000000, align 16 ; <ppc_fp128*> [#uses=2]
-
-define void @foo() nounwind {
-entry:
- %0 = load ppc_fp128* @a, align 16 ; <ppc_fp128> [#uses=1]
- %1 = call ppc_fp128 @llvm.sqrt.ppcf128(ppc_fp128 %0) ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %1, ppc_fp128* @a, align 16
- %2 = load ppc_fp128* @b, align 16 ; <ppc_fp128> [#uses=1]
- %3 = call ppc_fp128 @"\01_sinl$LDBL128"(ppc_fp128 %2) nounwind readonly ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %3, ppc_fp128* @b, align 16
- %4 = load ppc_fp128* @c, align 16 ; <ppc_fp128> [#uses=1]
- %5 = call ppc_fp128 @"\01_cosl$LDBL128"(ppc_fp128 %4) nounwind readonly ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %5, ppc_fp128* @c, align 16
- %6 = load ppc_fp128* @d, align 16 ; <ppc_fp128> [#uses=1]
- %7 = load ppc_fp128* @c, align 16 ; <ppc_fp128> [#uses=1]
- %8 = call ppc_fp128 @llvm.pow.ppcf128(ppc_fp128 %6, ppc_fp128 %7) ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %8, ppc_fp128* @d, align 16
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare ppc_fp128 @llvm.sqrt.ppcf128(ppc_fp128) nounwind readonly
-
-declare ppc_fp128 @"\01_sinl$LDBL128"(ppc_fp128) nounwind readonly
-
-declare ppc_fp128 @"\01_cosl$LDBL128"(ppc_fp128) nounwind readonly
-
-declare ppc_fp128 @llvm.pow.ppcf128(ppc_fp128, ppc_fp128) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-12-02-LegalizeTypeAssert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-12-02-LegalizeTypeAssert.ll
deleted file mode 100644
index 9ed7f6f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-12-02-LegalizeTypeAssert.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc64-apple-darwin9.5
-
-define void @__multc3({ ppc_fp128, ppc_fp128 }* noalias sret %agg.result, ppc_fp128 %a, ppc_fp128 %b, ppc_fp128 %c, ppc_fp128 %d) nounwind {
-entry:
- %.pre139 = and i1 false, false ; <i1> [#uses=1]
- br i1 false, label %bb6, label %bb21
-
-bb6: ; preds = %entry
- %0 = tail call ppc_fp128 @copysignl(ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 %a) nounwind readnone ; <ppc_fp128> [#uses=0]
- %iftmp.1.0 = select i1 %.pre139, ppc_fp128 0xM3FF00000000000000000000000000000, ppc_fp128 0xM00000000000000000000000000000000 ; <ppc_fp128> [#uses=1]
- %1 = tail call ppc_fp128 @copysignl(ppc_fp128 %iftmp.1.0, ppc_fp128 %b) nounwind readnone ; <ppc_fp128> [#uses=0]
- unreachable
-
-bb21: ; preds = %entry
- unreachable
-}
-
-declare ppc_fp128 @copysignl(ppc_fp128, ppc_fp128) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-12-12-EH.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-12-12-EH.ll
deleted file mode 100644
index 2315e36..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2008-12-12-EH.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin9 | grep ^__Z1fv.eh
-
-define void @_Z1fv() {
-entry:
- br label %return
-
-return:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll
deleted file mode 100644
index d49d58d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-01-16-DeclareISelBug.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin9.5
-; rdar://6499616
-
- %llvm.dbg.anchor.type = type { i32, i32 }
- %llvm.dbg.compile_unit.type = type { i32, { }*, i32, i8*, i8*, i8* }
- at llvm.dbg.compile_units = linkonce constant %llvm.dbg.anchor.type { i32 458752, i32 17 } ; <%llvm.dbg.anchor.type*> [#uses=1]
- at .str = internal constant [11 x i8] c"testcase.c\00" ; <[11 x i8]*> [#uses=1]
- at .str1 = internal constant [30 x i8] c"/Volumes/SandBox/NightlyTest/\00" ; <[30 x i8]*> [#uses=1]
- at .str2 = internal constant [57 x i8] c"4.2.1 (Based on Apple Inc. build 5628) (LLVM build 9999)\00" ; <[57 x i8]*> [#uses=1]
- at llvm.dbg.compile_unit = internal constant %llvm.dbg.compile_unit.type { i32 458769, { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.compile_units to { }*), i32 1, i8* getelementptr ([11 x i8]* @.str, i32 0, i32 0), i8* getelementptr ([30 x i8]* @.str1, i32 0, i32 0), i8* getelementptr ([57 x i8]* @.str2, i32 0, i32 0) } ; <%llvm.dbg.compile_unit.type*> [#uses=0]
-@"\01LC" = internal constant [13 x i8] c"conftest.val\00" ; <[13 x i8]*> [#uses=1]
-
-define i32 @main() nounwind {
-entry:
- %0 = call i8* @fopen(i8* getelementptr ([13 x i8]* @"\01LC", i32 0, i32 0), i8* null) nounwind ; <i8*> [#uses=0]
- unreachable
-}
-
-declare i8* @fopen(i8*, i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-03-17-LSRBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-03-17-LSRBug.ll
deleted file mode 100644
index 172531e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-03-17-LSRBug.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin10
-; rdar://6692215
-
-define fastcc void @_qsort(i8* %a, i32 %n, i32 %es, i32 (i8*, i8*)* %cmp, i32 %depth_limit) nounwind optsize ssp {
-entry:
- br i1 false, label %bb21, label %bb20.loopexit
-
-bb20.loopexit: ; preds = %entry
- ret void
-
-bb21: ; preds = %entry
- %0 = getelementptr i8* %a, i32 0 ; <i8*> [#uses=2]
- br label %bb35
-
-bb29: ; preds = %bb35
- br i1 false, label %bb7.i252, label %bb34
-
-bb7.i252: ; preds = %bb7.i252, %bb29
- %pj.0.rec.i247 = phi i32 [ %indvar.next488, %bb7.i252 ], [ 0, %bb29 ] ; <i32> [#uses=2]
- %pi.0.i248 = getelementptr i8* %pa.1, i32 %pj.0.rec.i247 ; <i8*> [#uses=0]
- %indvar.next488 = add i32 %pj.0.rec.i247, 1 ; <i32> [#uses=1]
- br i1 false, label %bb34, label %bb7.i252
-
-bb34: ; preds = %bb7.i252, %bb29
- %indvar.next505 = add i32 %indvar504, 1 ; <i32> [#uses=1]
- br label %bb35
-
-bb35: ; preds = %bb34, %bb21
- %indvar504 = phi i32 [ %indvar.next505, %bb34 ], [ 0, %bb21 ] ; <i32> [#uses=2]
- %pa.1 = phi i8* [ null, %bb34 ], [ %0, %bb21 ] ; <i8*> [#uses=2]
- %pb.0.rec = mul i32 %indvar504, %es ; <i32> [#uses=1]
- br i1 false, label %bb43, label %bb29
-
-bb43: ; preds = %bb43, %bb35
- br i1 false, label %bb50, label %bb43
-
-bb50: ; preds = %bb43
- %1 = ptrtoint i8* %pa.1 to i32 ; <i32> [#uses=1]
- %2 = sub i32 %1, 0 ; <i32> [#uses=2]
- %3 = icmp sle i32 0, %2 ; <i1> [#uses=1]
- %min = select i1 %3, i32 0, i32 %2 ; <i32> [#uses=1]
- br label %bb7.i161
-
-bb7.i161: ; preds = %bb7.i161, %bb50
- %pj.0.rec.i156 = phi i32 [ %indvar.next394, %bb7.i161 ], [ 0, %bb50 ] ; <i32> [#uses=2]
- %.sum279 = sub i32 %pj.0.rec.i156, %min ; <i32> [#uses=1]
- %pb.0.sum542 = add i32 %pb.0.rec, %.sum279 ; <i32> [#uses=1]
- %pj.0.i158 = getelementptr i8* %0, i32 %pb.0.sum542 ; <i8*> [#uses=0]
- %indvar.next394 = add i32 %pj.0.rec.i156, 1 ; <i32> [#uses=1]
- br label %bb7.i161
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-05-28-LegalizeBRCC.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-05-28-LegalizeBRCC.ll
deleted file mode 100644
index 29d115d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-05-28-LegalizeBRCC.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin10
-; PR4280
-
-define i32 @__fixunssfsi(float %a) nounwind readnone {
-entry:
- %0 = fcmp ult float %a, 0x41E0000000000000 ; <i1> [#uses=1]
- br i1 %0, label %bb1, label %bb
-
-bb: ; preds = %entry
- ret i32 1
-
-bb1: ; preds = %entry
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-07-16-InlineAsm-M-Operand.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-07-16-InlineAsm-M-Operand.ll
deleted file mode 100644
index f64e3dc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-07-16-InlineAsm-M-Operand.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=ppc32 -verify-machineinstrs
-
-; Machine code verifier will call isRegTiedToDefOperand() on /all/ register use
-; operands. We must make sure that the operand flag is found correctly.
-
-; This test case is actually not specific to PowerPC, but the (imm, reg) format
-; of PowerPC "m" operands trigger this bug.
-
-define void @memory_asm_operand(i32 %a) {
- ; "m" operand will be represented as:
- ; INLINEASM <es:fake $0>, 10, %R2, 20, -4, %R1
- ; It is difficult to find the flag operand (20) when starting from %R1
- call i32 asm "lbzx $0, $1", "=r,m" (i32 %a)
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
deleted file mode 100644
index 50a0278..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin10 -mcpu=g5 | FileCheck %s
-; ModuleID = '<stdin>'
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin10.0"
-; It is wrong on powerpc to substitute reg+reg for $0; the stw opcode
-; would have to change.
-
- at x = external global [0 x i32] ; <[0 x i32]*> [#uses=1]
-
-define void @foo(i32 %y) nounwind ssp {
-entry:
-; CHECK: foo
-; CHECK: add r4
-; CHECK: 0(r4)
- %y_addr = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %y, i32* %y_addr
- %0 = load i32* %y_addr, align 4 ; <i32> [#uses=1]
- %1 = getelementptr inbounds [0 x i32]* @x, i32 0, i32 %0 ; <i32*> [#uses=1]
- call void asm sideeffect "isync\0A\09eieio\0A\09stw $1, $0", "=*o,r,~{memory}"(i32* %1, i32 0) nounwind
- br label %return
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll
deleted file mode 100644
index 12c4c99..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=ppc-apple-darwin | FileCheck %s
-
-; ModuleID = '/Volumes/MacOS9/tests/WebKit/JavaScriptCore/profiler/ProfilerServer.mm'
-
-@"\01l_objc_msgSend_fixup_alloc" = linker_private hidden global i32 0, section "__DATA, __objc_msgrefs, coalesced", align 16 ; <i32*> [#uses=0]
-
-; CHECK: .globl l_objc_msgSend_fixup_alloc
-; CHECK: .weak_definition l_objc_msgSend_fixup_alloc
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-09-18-carrybit.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-09-18-carrybit.ll
deleted file mode 100644
index 6c23a61..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-09-18-carrybit.ll
+++ /dev/null
@@ -1,62 +0,0 @@
-; RUN: llc -march=ppc32 < %s | FileCheck %s
-; ModuleID = '<stdin>'
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin9.6"
-
-define i64 @foo(i64 %r.0.ph, i64 %q.0.ph, i32 %sr1.1.ph) nounwind {
-entry:
-; CHECK: foo:
-; CHECK: subfc
-; CHECK: subfe
-; CHECK: subfc
-; CHECK: subfe
- %tmp0 = add i64 %r.0.ph, -1 ; <i64> [#uses=1]
- br label %bb40
-
-bb40: ; preds = %bb40, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb40 ] ; <i32> [#uses=1]
- %carry.0274 = phi i32 [ 0, %entry ], [%tmp122, %bb40 ] ; <i32> [#uses=1]
- %r.0273 = phi i64 [ %r.0.ph, %entry ], [ %tmp124, %bb40 ] ; <i64> [#uses=2]
- %q.0272 = phi i64 [ %q.0.ph, %entry ], [ %ins169, %bb40 ] ; <i64> [#uses=3]
- %tmp1 = lshr i64 %r.0273, 31 ; <i64> [#uses=1]
- %tmp2 = trunc i64 %tmp1 to i32 ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp2, -2 ; <i32> [#uses=1]
- %tmp213 = trunc i64 %r.0273 to i32 ; <i32> [#uses=2]
- %tmp106 = lshr i32 %tmp213, 31 ; <i32> [#uses=1]
- %tmp107 = or i32 %tmp3, %tmp106 ; <i32> [#uses=1]
- %tmp215 = zext i32 %tmp107 to i64 ; <i64> [#uses=1]
- %tmp216 = shl i64 %tmp215, 32 ; <i64> [#uses=1]
- %tmp108 = shl i32 %tmp213, 1 ; <i32> [#uses=1]
- %tmp109 = lshr i64 %q.0272, 63 ; <i64> [#uses=1]
- %tmp110 = trunc i64 %tmp109 to i32 ; <i32> [#uses=1]
- %tmp111 = or i32 %tmp108, %tmp110 ; <i32> [#uses=1]
- %tmp222 = zext i32 %tmp111 to i64 ; <i64> [#uses=1]
- %ins224 = or i64 %tmp216, %tmp222 ; <i64> [#uses=2]
- %tmp112 = lshr i64 %q.0272, 31 ; <i64> [#uses=1]
- %tmp113 = trunc i64 %tmp112 to i32 ; <i32> [#uses=1]
- %tmp114 = and i32 %tmp113, -2 ; <i32> [#uses=1]
- %tmp158 = trunc i64 %q.0272 to i32 ; <i32> [#uses=2]
- %tmp115 = lshr i32 %tmp158, 31 ; <i32> [#uses=1]
- %tmp116 = or i32 %tmp114, %tmp115 ; <i32> [#uses=1]
- %tmp160 = zext i32 %tmp116 to i64 ; <i64> [#uses=1]
- %tmp161 = shl i64 %tmp160, 32 ; <i64> [#uses=1]
- %tmp117 = shl i32 %tmp158, 1 ; <i32> [#uses=1]
- %tmp118 = or i32 %tmp117, %carry.0274 ; <i32> [#uses=1]
- %tmp167 = zext i32 %tmp118 to i64 ; <i64> [#uses=1]
- %ins169 = or i64 %tmp161, %tmp167 ; <i64> [#uses=2]
- %tmp119 = sub i64 %tmp0, %ins224 ; <i64> [#uses=1]
- %tmp120 = ashr i64 %tmp119, 63 ; <i64> [#uses=2]
- %tmp121 = trunc i64 %tmp120 to i32 ; <i32> [#uses=1]
- %tmp122 = and i32 %tmp121, 1 ; <i32> [#uses=2]
- %tmp123 = and i64 %tmp120, %q.0.ph ; <i64> [#uses=1]
- %tmp124 = sub i64 %ins224, %tmp123 ; <i64> [#uses=2]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %sr1.1.ph ; <i1> [#uses=1]
- br i1 %exitcond, label %bb41.bb42_crit_edge, label %bb40
-
-bb41.bb42_crit_edge: ; preds = %bb40
- %phitmp278 = zext i32 %tmp122 to i64 ; <i64> [#uses=1]
- %tmp125 = shl i64 %ins169, 1 ; <i64> [#uses=1]
- %tmp126 = or i64 %phitmp278, %tmp125 ; <i64> [#uses=2]
- ret i64 %tmp126
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-11-15-ProcImpDefsBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-11-15-ProcImpDefsBug.ll
deleted file mode 100644
index 2d9d16a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-11-15-ProcImpDefsBug.ll
+++ /dev/null
@@ -1,105 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin8
-
-define void @gcov_exit() nounwind {
-entry:
- br i1 undef, label %return, label %bb.nph341
-
-bb.nph341: ; preds = %entry
- br label %bb25
-
-bb25: ; preds = %read_fatal, %bb.nph341
- br i1 undef, label %bb49.1, label %bb48
-
-bb48: ; preds = %bb25
- br label %bb49.1
-
-bb51: ; preds = %bb48.4, %bb49.3
- switch i32 undef, label %bb58 [
- i32 0, label %rewrite
- i32 1734567009, label %bb59
- ]
-
-bb58: ; preds = %bb51
- br label %read_fatal
-
-bb59: ; preds = %bb51
- br i1 undef, label %bb60, label %bb3.i156
-
-bb3.i156: ; preds = %bb59
- br label %read_fatal
-
-bb60: ; preds = %bb59
- br i1 undef, label %bb78.preheader, label %rewrite
-
-bb78.preheader: ; preds = %bb60
- br i1 undef, label %bb62, label %bb80
-
-bb62: ; preds = %bb78.preheader
- br i1 undef, label %bb64, label %read_mismatch
-
-bb64: ; preds = %bb62
- br i1 undef, label %bb65, label %read_mismatch
-
-bb65: ; preds = %bb64
- br i1 undef, label %bb75, label %read_mismatch
-
-read_mismatch: ; preds = %bb98, %bb119.preheader, %bb72, %bb71, %bb65, %bb64, %bb62
- br label %read_fatal
-
-bb71: ; preds = %bb75
- br i1 undef, label %bb72, label %read_mismatch
-
-bb72: ; preds = %bb71
- br i1 undef, label %bb73, label %read_mismatch
-
-bb73: ; preds = %bb72
- unreachable
-
-bb74: ; preds = %bb75
- br label %bb75
-
-bb75: ; preds = %bb74, %bb65
- br i1 undef, label %bb74, label %bb71
-
-bb80: ; preds = %bb78.preheader
- unreachable
-
-read_fatal: ; preds = %read_mismatch, %bb3.i156, %bb58
- br i1 undef, label %return, label %bb25
-
-rewrite: ; preds = %bb60, %bb51
- br i1 undef, label %bb94, label %bb119.preheader
-
-bb94: ; preds = %rewrite
- unreachable
-
-bb119.preheader: ; preds = %rewrite
- br i1 undef, label %read_mismatch, label %bb98
-
-bb98: ; preds = %bb119.preheader
- br label %read_mismatch
-
-return: ; preds = %read_fatal, %entry
- ret void
-
-bb49.1: ; preds = %bb48, %bb25
- br i1 undef, label %bb49.2, label %bb48.2
-
-bb49.2: ; preds = %bb48.2, %bb49.1
- br i1 undef, label %bb49.3, label %bb48.3
-
-bb48.2: ; preds = %bb49.1
- br label %bb49.2
-
-bb49.3: ; preds = %bb48.3, %bb49.2
- %c_ix.0.3 = phi i32 [ undef, %bb48.3 ], [ undef, %bb49.2 ] ; <i32> [#uses=1]
- br i1 undef, label %bb51, label %bb48.4
-
-bb48.3: ; preds = %bb49.2
- store i64* undef, i64** undef, align 4
- br label %bb49.3
-
-bb48.4: ; preds = %bb49.3
- %0 = getelementptr inbounds [5 x i64*]* undef, i32 0, i32 %c_ix.0.3 ; <i64**> [#uses=0]
- br label %bb51
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-11-15-ReMatBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-11-15-ReMatBug.ll
deleted file mode 100644
index 54f4b2e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-11-15-ReMatBug.ll
+++ /dev/null
@@ -1,155 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin8
-
-%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
-%struct.__gcov_var = type { %struct.FILE*, i32, i32, i32, i32, i32, i32, [1025 x i32] }
-%struct.__sFILEX = type opaque
-%struct.__sbuf = type { i8*, i32 }
-%struct.gcov_ctr_info = type { i32, i64*, void (i64*, i32)* }
-%struct.gcov_ctr_summary = type { i32, i32, i64, i64, i64 }
-%struct.gcov_fn_info = type { i32, i32, [0 x i32] }
-%struct.gcov_info = type { i32, %struct.gcov_info*, i32, i8*, i32, %struct.gcov_fn_info*, i32, [0 x %struct.gcov_ctr_info] }
-%struct.gcov_summary = type { i32, [1 x %struct.gcov_ctr_summary] }
-
- at __gcov_var = external global %struct.__gcov_var ; <%struct.__gcov_var*> [#uses=1]
- at __sF = external global [0 x %struct.FILE] ; <[0 x %struct.FILE]*> [#uses=1]
- at .str = external constant [56 x i8], align 4 ; <[56 x i8]*> [#uses=1]
- at gcov_list = external global %struct.gcov_info* ; <%struct.gcov_info**> [#uses=1]
- at .str7 = external constant [35 x i8], align 4 ; <[35 x i8]*> [#uses=1]
- at .str8 = external constant [9 x i8], align 4 ; <[9 x i8]*> [#uses=1]
- at .str9 = external constant [10 x i8], align 4 ; <[10 x i8]*> [#uses=1]
- at .str10 = external constant [36 x i8], align 4 ; <[36 x i8]*> [#uses=1]
-
-declare i32 @"\01_fprintf$LDBL128"(%struct.FILE*, i8*, ...) nounwind
-
-define void @gcov_exit() nounwind {
-entry:
- %gi_ptr.0357 = load %struct.gcov_info** @gcov_list, align 4 ; <%struct.gcov_info*> [#uses=1]
- %0 = alloca i8, i32 undef, align 1 ; <i8*> [#uses=3]
- br i1 undef, label %return, label %bb.nph341
-
-bb.nph341: ; preds = %entry
- %object27 = bitcast %struct.gcov_summary* undef to i8* ; <i8*> [#uses=1]
- br label %bb25
-
-bb25: ; preds = %read_fatal, %bb.nph341
- %gi_ptr.1329 = phi %struct.gcov_info* [ %gi_ptr.0357, %bb.nph341 ], [ undef, %read_fatal ] ; <%struct.gcov_info*> [#uses=1]
- call void @llvm.memset.i32(i8* %object27, i8 0, i32 36, i32 8)
- br i1 undef, label %bb49.1, label %bb48
-
-bb48: ; preds = %bb25
- br label %bb49.1
-
-bb51: ; preds = %bb48.4, %bb49.3
- switch i32 undef, label %bb58 [
- i32 0, label %rewrite
- i32 1734567009, label %bb59
- ]
-
-bb58: ; preds = %bb51
- %1 = call i32 (%struct.FILE*, i8*, ...)* @"\01_fprintf$LDBL128"(%struct.FILE* getelementptr inbounds ([0 x %struct.FILE]* @__sF, i32 0, i32 2), i8* getelementptr inbounds ([35 x i8]* @.str7, i32 0, i32 0), i8* %0) nounwind ; <i32> [#uses=0]
- br label %read_fatal
-
-bb59: ; preds = %bb51
- br i1 undef, label %bb60, label %bb3.i156
-
-bb3.i156: ; preds = %bb59
- store i8 52, i8* undef, align 1
- store i8 42, i8* undef, align 1
- %2 = call i32 (%struct.FILE*, i8*, ...)* @"\01_fprintf$LDBL128"(%struct.FILE* getelementptr inbounds ([0 x %struct.FILE]* @__sF, i32 0, i32 2), i8* getelementptr inbounds ([56 x i8]* @.str, i32 0, i32 0), i8* %0, i8* undef, i8* undef) nounwind ; <i32> [#uses=0]
- br label %read_fatal
-
-bb60: ; preds = %bb59
- br i1 undef, label %bb78.preheader, label %rewrite
-
-bb78.preheader: ; preds = %bb60
- br i1 undef, label %bb62, label %bb80
-
-bb62: ; preds = %bb78.preheader
- br i1 undef, label %bb64, label %read_mismatch
-
-bb64: ; preds = %bb62
- br i1 undef, label %bb65, label %read_mismatch
-
-bb65: ; preds = %bb64
- br i1 undef, label %bb75, label %read_mismatch
-
-read_mismatch: ; preds = %bb98, %bb119.preheader, %bb72, %bb71, %bb65, %bb64, %bb62
- %3 = icmp eq i32 undef, -1 ; <i1> [#uses=1]
- %iftmp.11.0 = select i1 %3, i8* getelementptr inbounds ([10 x i8]* @.str9, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8]* @.str8, i32 0, i32 0) ; <i8*> [#uses=1]
- %4 = call i32 (%struct.FILE*, i8*, ...)* @"\01_fprintf$LDBL128"(%struct.FILE* getelementptr inbounds ([0 x %struct.FILE]* @__sF, i32 0, i32 2), i8* getelementptr inbounds ([36 x i8]* @.str10, i32 0, i32 0), i8* %0, i8* %iftmp.11.0) nounwind ; <i32> [#uses=0]
- br label %read_fatal
-
-bb71: ; preds = %bb75
- %5 = load i32* undef, align 4 ; <i32> [#uses=1]
- %6 = getelementptr inbounds %struct.gcov_info* %gi_ptr.1329, i32 0, i32 7, i32 undef, i32 2 ; <void (i64*, i32)**> [#uses=1]
- %7 = load void (i64*, i32)** %6, align 4 ; <void (i64*, i32)*> [#uses=1]
- %8 = call i32 @__gcov_read_unsigned() nounwind ; <i32> [#uses=1]
- %9 = call i32 @__gcov_read_unsigned() nounwind ; <i32> [#uses=1]
- %10 = icmp eq i32 %tmp386, %8 ; <i1> [#uses=1]
- br i1 %10, label %bb72, label %read_mismatch
-
-bb72: ; preds = %bb71
- %11 = icmp eq i32 undef, %9 ; <i1> [#uses=1]
- br i1 %11, label %bb73, label %read_mismatch
-
-bb73: ; preds = %bb72
- call void %7(i64* null, i32 %5) nounwind
- unreachable
-
-bb74: ; preds = %bb75
- %12 = add i32 %13, 1 ; <i32> [#uses=1]
- br label %bb75
-
-bb75: ; preds = %bb74, %bb65
- %13 = phi i32 [ %12, %bb74 ], [ 0, %bb65 ] ; <i32> [#uses=2]
- %tmp386 = add i32 0, 27328512 ; <i32> [#uses=1]
- %14 = shl i32 1, %13 ; <i32> [#uses=1]
- %15 = load i32* undef, align 4 ; <i32> [#uses=1]
- %16 = and i32 %15, %14 ; <i32> [#uses=1]
- %17 = icmp eq i32 %16, 0 ; <i1> [#uses=1]
- br i1 %17, label %bb74, label %bb71
-
-bb80: ; preds = %bb78.preheader
- unreachable
-
-read_fatal: ; preds = %read_mismatch, %bb3.i156, %bb58
- br i1 undef, label %return, label %bb25
-
-rewrite: ; preds = %bb60, %bb51
- store i32 -1, i32* getelementptr inbounds (%struct.__gcov_var* @__gcov_var, i32 0, i32 6), align 4
- br i1 undef, label %bb94, label %bb119.preheader
-
-bb94: ; preds = %rewrite
- unreachable
-
-bb119.preheader: ; preds = %rewrite
- br i1 undef, label %read_mismatch, label %bb98
-
-bb98: ; preds = %bb119.preheader
- br label %read_mismatch
-
-return: ; preds = %read_fatal, %entry
- ret void
-
-bb49.1: ; preds = %bb48, %bb25
- br i1 undef, label %bb49.2, label %bb48.2
-
-bb49.2: ; preds = %bb48.2, %bb49.1
- br i1 undef, label %bb49.3, label %bb48.3
-
-bb48.2: ; preds = %bb49.1
- br label %bb49.2
-
-bb49.3: ; preds = %bb48.3, %bb49.2
- br i1 undef, label %bb51, label %bb48.4
-
-bb48.3: ; preds = %bb49.2
- br label %bb49.3
-
-bb48.4: ; preds = %bb49.3
- br label %bb51
-}
-
-declare i32 @__gcov_read_unsigned() nounwind
-
-declare void @llvm.memset.i32(i8* nocapture, i8, i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-11-25-ImpDefBug.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-11-25-ImpDefBug.ll
deleted file mode 100644
index 9a22a6f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2009-11-25-ImpDefBug.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin9.5 -mcpu=g5
-; rdar://7422268
-
-%struct..0EdgeT = type { i32, i32, float, float, i32, i32, i32, float, i32, i32 }
-
-define void @smooth_color_z_triangle(i32 %v0, i32 %v1, i32 %v2, i32 %pv) nounwind {
-entry:
- br i1 undef, label %return, label %bb14
-
-bb14: ; preds = %entry
- br i1 undef, label %bb15, label %return
-
-bb15: ; preds = %bb14
- br i1 undef, label %bb16, label %bb17
-
-bb16: ; preds = %bb15
- br label %bb17
-
-bb17: ; preds = %bb16, %bb15
- %0 = fcmp olt float undef, 0.000000e+00 ; <i1> [#uses=2]
- %eTop.eMaj = select i1 %0, %struct..0EdgeT* undef, %struct..0EdgeT* null ; <%struct..0EdgeT*> [#uses=1]
- br label %bb69
-
-bb24: ; preds = %bb69
- br i1 undef, label %bb25, label %bb28
-
-bb25: ; preds = %bb24
- br label %bb33
-
-bb28: ; preds = %bb24
- br i1 undef, label %return, label %bb32
-
-bb32: ; preds = %bb28
- br i1 %0, label %bb38, label %bb33
-
-bb33: ; preds = %bb32, %bb25
- br i1 undef, label %bb34, label %bb38
-
-bb34: ; preds = %bb33
- br label %bb38
-
-bb38: ; preds = %bb34, %bb33, %bb32
- %eRight.08 = phi %struct..0EdgeT* [ %eTop.eMaj, %bb32 ], [ undef, %bb34 ], [ undef, %bb33 ] ; <%struct..0EdgeT*> [#uses=0]
- %fdgOuter.0 = phi i32 [ %fdgOuter.1, %bb32 ], [ undef, %bb34 ], [ %fdgOuter.1, %bb33 ] ; <i32> [#uses=1]
- %fz.3 = phi i32 [ %fz.2, %bb32 ], [ 2147483647, %bb34 ], [ %fz.2, %bb33 ] ; <i32> [#uses=1]
- %1 = add i32 undef, 1 ; <i32> [#uses=0]
- br label %bb69
-
-bb69: ; preds = %bb38, %bb17
- %fdgOuter.1 = phi i32 [ undef, %bb17 ], [ %fdgOuter.0, %bb38 ] ; <i32> [#uses=2]
- %fz.2 = phi i32 [ undef, %bb17 ], [ %fz.3, %bb38 ] ; <i32> [#uses=2]
- br i1 undef, label %bb24, label %return
-
-return: ; preds = %bb69, %bb28, %bb14, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2010-02-04-EmptyGlobal.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2010-02-04-EmptyGlobal.ll
deleted file mode 100644
index 32ddb34..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2010-02-04-EmptyGlobal.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin10 -relocation-model=pic -disable-fp-elim | FileCheck %s
-; <rdar://problem/7604010>
-
-%cmd.type = type { }
-
- at _cmd = constant %cmd.type zeroinitializer
-
-; CHECK: .globl __cmd
-; CHECK-NEXT: .align 3
-; CHECK-NEXT: __cmd:
-; CHECK-NEXT: .space 1
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2010-02-12-saveCR.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2010-02-12-saveCR.ll
deleted file mode 100644
index b73382e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2010-02-12-saveCR.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin | FileCheck %s
-; ModuleID = 'hh.c'
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
-target triple = "powerpc-apple-darwin9.6"
-; This formerly used R0 for both the stack address and CR.
-
-define void @foo() nounwind {
-entry:
-;CHECK: mfcr r2
-;CHECK: rlwinm r2, r2, 8, 0, 31
-;CHECK: lis r0, 1
-;CHECK: ori r0, r0, 34540
-;CHECK: stwx r2, r1, r0
- %x = alloca [100000 x i8] ; <[100000 x i8]*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %x1 = bitcast [100000 x i8]* %x to i8* ; <i8*> [#uses=1]
- call void @bar(i8* %x1) nounwind
- call void asm sideeffect "", "~{cr2}"() nounwind
- br label %return
-
-return: ; preds = %entry
-;CHECK: lis r0, 1
-;CHECK: ori r0, r0, 34540
-;CHECK: lwzx r2, r1, r0
-;CHECK: rlwinm r2, r2, 24, 0, 31
-;CHECK: mtcrf 32, r2
- ret void
-}
-
-declare void @bar(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/2010-02-26-FoldFloats.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/2010-02-26-FoldFloats.ll
deleted file mode 100644
index f43f5ca..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/2010-02-26-FoldFloats.ll
+++ /dev/null
@@ -1,433 +0,0 @@
-; RUN: llc < %s -O3 | FileCheck %s
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
-target triple = "powerpc-apple-darwin9.6"
-
-; There should be no stfs spills
-; CHECK: main:
-; CHECK-NOT: stfs
-; CHECK: .section
-
- at .str66 = external constant [3 x i8], align 4 ; <[3 x i8]*> [#uses=1]
- at .str31 = external constant [6 x i8], align 4 ; <[6 x i8]*> [#uses=1]
- at .str61 = external constant [21 x i8], align 4 ; <[21 x i8]*> [#uses=1]
- at .str101 = external constant [61 x i8], align 4 ; <[61 x i8]*> [#uses=1]
- at .str104 = external constant [31 x i8], align 4 ; <[31 x i8]*> [#uses=1]
- at .str105 = external constant [45 x i8], align 4 ; <[45 x i8]*> [#uses=1]
- at .str112 = external constant [38 x i8], align 4 ; <[38 x i8]*> [#uses=1]
- at .str121 = external constant [36 x i8], align 4 ; <[36 x i8]*> [#uses=1]
- at .str12293 = external constant [67 x i8], align 4 ; <[67 x i8]*> [#uses=1]
- at .str123 = external constant [68 x i8], align 4 ; <[68 x i8]*> [#uses=1]
- at .str124 = external constant [52 x i8], align 4 ; <[52 x i8]*> [#uses=1]
- at .str125 = external constant [51 x i8], align 4 ; <[51 x i8]*> [#uses=1]
-
-define i32 @main(i32 %argc, i8** %argv) noreturn nounwind {
-entry:
- br i1 undef, label %bb4.i1, label %my_fopen.exit
-
-bb4.i1: ; preds = %entry
- unreachable
-
-my_fopen.exit: ; preds = %entry
- br i1 undef, label %bb.i, label %bb1.i
-
-bb.i: ; preds = %my_fopen.exit
- unreachable
-
-bb1.i: ; preds = %my_fopen.exit
- br label %bb134.i
-
-bb2.i: ; preds = %bb134.i
- %0 = icmp eq i32 undef, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb20.i, label %bb21.i
-
-bb20.i: ; preds = %bb2.i
- br label %bb134.i
-
-bb21.i: ; preds = %bb2.i
- %1 = call i32 @strcmp(i8* undef, i8* getelementptr inbounds ([6 x i8]* @.str31, i32 0, i32 0)) nounwind readonly ; <i32> [#uses=0]
- br i1 undef, label %bb30.i, label %bb31.i
-
-bb30.i: ; preds = %bb21.i
- br label %bb134.i
-
-bb31.i: ; preds = %bb21.i
- br i1 undef, label %bb41.i, label %bb44.i
-
-bb41.i: ; preds = %bb31.i
- %2 = icmp slt i32 undef, %argc ; <i1> [#uses=1]
- br i1 %2, label %bb1.i77.i, label %bb2.i78.i
-
-bb1.i77.i: ; preds = %bb41.i
- %3 = load float* undef, align 4 ; <float> [#uses=2]
- %4 = fcmp ugt float %3, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %4, label %bb43.i, label %bb42.i
-
-bb2.i78.i: ; preds = %bb41.i
- unreachable
-
-bb42.i: ; preds = %bb1.i77.i
- unreachable
-
-bb43.i: ; preds = %bb1.i77.i
- br label %bb134.i
-
-bb44.i: ; preds = %bb31.i
- br i1 undef, label %bb45.i, label %bb49.i
-
-bb45.i: ; preds = %bb44.i
- %5 = icmp slt i32 undef, %argc ; <i1> [#uses=1]
- br i1 %5, label %bb1.i72.i, label %bb2.i73.i
-
-bb1.i72.i: ; preds = %bb45.i
- %6 = load float* undef, align 4 ; <float> [#uses=3]
- %7 = fcmp ult float %6, 1.000000e+00 ; <i1> [#uses=1]
- %or.cond.i = and i1 undef, %7 ; <i1> [#uses=1]
- br i1 %or.cond.i, label %bb48.i, label %bb47.i
-
-bb2.i73.i: ; preds = %bb45.i
- unreachable
-
-bb47.i: ; preds = %bb1.i72.i
- unreachable
-
-bb48.i: ; preds = %bb1.i72.i
- br label %bb134.i
-
-bb49.i: ; preds = %bb44.i
- br i1 undef, label %bb50.i, label %bb53.i
-
-bb50.i: ; preds = %bb49.i
- br i1 false, label %bb1.i67.i, label %bb2.i68.i
-
-bb1.i67.i: ; preds = %bb50.i
- br i1 false, label %read_float_option.exit69.i, label %bb1.i67.bb2.i68_crit_edge.i
-
-bb1.i67.bb2.i68_crit_edge.i: ; preds = %bb1.i67.i
- br label %bb2.i68.i
-
-bb2.i68.i: ; preds = %bb1.i67.bb2.i68_crit_edge.i, %bb50.i
- unreachable
-
-read_float_option.exit69.i: ; preds = %bb1.i67.i
- br i1 undef, label %bb52.i, label %bb51.i
-
-bb51.i: ; preds = %read_float_option.exit69.i
- unreachable
-
-bb52.i: ; preds = %read_float_option.exit69.i
- br label %bb134.i
-
-bb53.i: ; preds = %bb49.i
- %8 = call i32 @strcmp(i8* undef, i8* getelementptr inbounds ([21 x i8]* @.str61, i32 0, i32 0)) nounwind readonly ; <i32> [#uses=0]
- br i1 false, label %bb89.i, label %bb92.i
-
-bb89.i: ; preds = %bb53.i
- br i1 undef, label %bb1.i27.i, label %bb2.i28.i
-
-bb1.i27.i: ; preds = %bb89.i
- unreachable
-
-bb2.i28.i: ; preds = %bb89.i
- unreachable
-
-bb92.i: ; preds = %bb53.i
- br i1 undef, label %bb93.i, label %bb96.i
-
-bb93.i: ; preds = %bb92.i
- br i1 undef, label %bb1.i22.i, label %bb2.i23.i
-
-bb1.i22.i: ; preds = %bb93.i
- br i1 undef, label %bb95.i, label %bb94.i
-
-bb2.i23.i: ; preds = %bb93.i
- unreachable
-
-bb94.i: ; preds = %bb1.i22.i
- unreachable
-
-bb95.i: ; preds = %bb1.i22.i
- br label %bb134.i
-
-bb96.i: ; preds = %bb92.i
- br i1 undef, label %bb97.i, label %bb100.i
-
-bb97.i: ; preds = %bb96.i
- %9 = icmp slt i32 undef, %argc ; <i1> [#uses=1]
- br i1 %9, label %bb1.i17.i, label %bb2.i18.i
-
-bb1.i17.i: ; preds = %bb97.i
- %10 = call i32 (i8*, i8*, ...)* @"\01_sscanf$LDBL128"(i8* undef, i8* getelementptr inbounds ([3 x i8]* @.str66, i32 0, i32 0), float* undef) nounwind ; <i32> [#uses=1]
- %phitmp.i16.i = icmp eq i32 %10, 1 ; <i1> [#uses=1]
- br i1 %phitmp.i16.i, label %read_float_option.exit19.i, label %bb1.i17.bb2.i18_crit_edge.i
-
-bb1.i17.bb2.i18_crit_edge.i: ; preds = %bb1.i17.i
- br label %bb2.i18.i
-
-bb2.i18.i: ; preds = %bb1.i17.bb2.i18_crit_edge.i, %bb97.i
- unreachable
-
-read_float_option.exit19.i: ; preds = %bb1.i17.i
- br i1 false, label %bb99.i, label %bb98.i
-
-bb98.i: ; preds = %read_float_option.exit19.i
- unreachable
-
-bb99.i: ; preds = %read_float_option.exit19.i
- br label %bb134.i
-
-bb100.i: ; preds = %bb96.i
- br i1 false, label %bb101.i, label %bb104.i
-
-bb101.i: ; preds = %bb100.i
- br i1 false, label %bb1.i12.i, label %bb2.i13.i
-
-bb1.i12.i: ; preds = %bb101.i
- br i1 undef, label %bb102.i, label %bb103.i
-
-bb2.i13.i: ; preds = %bb101.i
- unreachable
-
-bb102.i: ; preds = %bb1.i12.i
- unreachable
-
-bb103.i: ; preds = %bb1.i12.i
- br label %bb134.i
-
-bb104.i: ; preds = %bb100.i
- unreachable
-
-bb134.i: ; preds = %bb103.i, %bb99.i, %bb95.i, %bb52.i, %bb48.i, %bb43.i, %bb30.i, %bb20.i, %bb1.i
- %annealing_sched.1.0 = phi float [ 1.000000e+01, %bb1.i ], [ %annealing_sched.1.0, %bb20.i ], [ 1.000000e+00, %bb30.i ], [ %annealing_sched.1.0, %bb43.i ], [ %annealing_sched.1.0, %bb48.i ], [ %annealing_sched.1.0, %bb52.i ], [ %annealing_sched.1.0, %bb95.i ], [ %annealing_sched.1.0, %bb99.i ], [ %annealing_sched.1.0, %bb103.i ] ; <float> [#uses=8]
- %annealing_sched.2.0 = phi float [ 1.000000e+02, %bb1.i ], [ %annealing_sched.2.0, %bb20.i ], [ %annealing_sched.2.0, %bb30.i ], [ %3, %bb43.i ], [ %annealing_sched.2.0, %bb48.i ], [ %annealing_sched.2.0, %bb52.i ], [ %annealing_sched.2.0, %bb95.i ], [ %annealing_sched.2.0, %bb99.i ], [ %annealing_sched.2.0, %bb103.i ] ; <float> [#uses=8]
- %annealing_sched.3.0 = phi float [ 0x3FE99999A0000000, %bb1.i ], [ %annealing_sched.3.0, %bb20.i ], [ %annealing_sched.3.0, %bb30.i ], [ %annealing_sched.3.0, %bb43.i ], [ %6, %bb48.i ], [ %annealing_sched.3.0, %bb52.i ], [ %annealing_sched.3.0, %bb95.i ], [ %annealing_sched.3.0, %bb99.i ], [ %annealing_sched.3.0, %bb103.i ] ; <float> [#uses=8]
- %annealing_sched.4.0 = phi float [ 0x3F847AE140000000, %bb1.i ], [ %annealing_sched.4.0, %bb20.i ], [ %annealing_sched.4.0, %bb30.i ], [ %annealing_sched.4.0, %bb43.i ], [ %annealing_sched.4.0, %bb48.i ], [ 0.000000e+00, %bb52.i ], [ %annealing_sched.4.0, %bb95.i ], [ %annealing_sched.4.0, %bb99.i ], [ %annealing_sched.4.0, %bb103.i ] ; <float> [#uses=8]
- %router_opts.0.0 = phi float [ 0.000000e+00, %bb1.i ], [ %router_opts.0.0, %bb20.i ], [ 1.000000e+04, %bb30.i ], [ %router_opts.0.0, %bb43.i ], [ %router_opts.0.0, %bb48.i ], [ %router_opts.0.0, %bb52.i ], [ %router_opts.0.0, %bb95.i ], [ %router_opts.0.0, %bb99.i ], [ %router_opts.0.0, %bb103.i ] ; <float> [#uses=8]
- %router_opts.1.0 = phi float [ 5.000000e-01, %bb1.i ], [ %router_opts.1.0, %bb20.i ], [ 1.000000e+04, %bb30.i ], [ %router_opts.1.0, %bb43.i ], [ %router_opts.1.0, %bb48.i ], [ %router_opts.1.0, %bb52.i ], [ undef, %bb95.i ], [ %router_opts.1.0, %bb99.i ], [ %router_opts.1.0, %bb103.i ] ; <float> [#uses=7]
- %router_opts.2.0 = phi float [ 1.500000e+00, %bb1.i ], [ %router_opts.2.0, %bb20.i ], [ %router_opts.2.0, %bb30.i ], [ %router_opts.2.0, %bb43.i ], [ %router_opts.2.0, %bb48.i ], [ %router_opts.2.0, %bb52.i ], [ %router_opts.2.0, %bb95.i ], [ undef, %bb99.i ], [ %router_opts.2.0, %bb103.i ] ; <float> [#uses=8]
- %router_opts.3.0 = phi float [ 0x3FC99999A0000000, %bb1.i ], [ %router_opts.3.0, %bb20.i ], [ %router_opts.3.0, %bb30.i ], [ %router_opts.3.0, %bb43.i ], [ %router_opts.3.0, %bb48.i ], [ %router_opts.3.0, %bb52.i ], [ %router_opts.3.0, %bb95.i ], [ %router_opts.3.0, %bb99.i ], [ 0.000000e+00, %bb103.i ] ; <float> [#uses=8]
- %11 = phi float [ 0x3FC99999A0000000, %bb1.i ], [ %11, %bb20.i ], [ %11, %bb30.i ], [ %11, %bb43.i ], [ %11, %bb48.i ], [ %11, %bb52.i ], [ %11, %bb95.i ], [ %11, %bb99.i ], [ 0.000000e+00, %bb103.i ] ; <float> [#uses=8]
- %12 = phi float [ 1.500000e+00, %bb1.i ], [ %12, %bb20.i ], [ %12, %bb30.i ], [ %12, %bb43.i ], [ %12, %bb48.i ], [ %12, %bb52.i ], [ %12, %bb95.i ], [ undef, %bb99.i ], [ %12, %bb103.i ] ; <float> [#uses=8]
- %13 = phi float [ 5.000000e-01, %bb1.i ], [ %13, %bb20.i ], [ 1.000000e+04, %bb30.i ], [ %13, %bb43.i ], [ %13, %bb48.i ], [ %13, %bb52.i ], [ undef, %bb95.i ], [ %13, %bb99.i ], [ %13, %bb103.i ] ; <float> [#uses=7]
- %14 = phi float [ 0.000000e+00, %bb1.i ], [ %14, %bb20.i ], [ 1.000000e+04, %bb30.i ], [ %14, %bb43.i ], [ %14, %bb48.i ], [ %14, %bb52.i ], [ %14, %bb95.i ], [ %14, %bb99.i ], [ %14, %bb103.i ] ; <float> [#uses=8]
- %15 = phi float [ 0x3FE99999A0000000, %bb1.i ], [ %15, %bb20.i ], [ %15, %bb30.i ], [ %15, %bb43.i ], [ %6, %bb48.i ], [ %15, %bb52.i ], [ %15, %bb95.i ], [ %15, %bb99.i ], [ %15, %bb103.i ] ; <float> [#uses=8]
- %16 = phi float [ 0x3F847AE140000000, %bb1.i ], [ %16, %bb20.i ], [ %16, %bb30.i ], [ %16, %bb43.i ], [ %16, %bb48.i ], [ 0.000000e+00, %bb52.i ], [ %16, %bb95.i ], [ %16, %bb99.i ], [ %16, %bb103.i ] ; <float> [#uses=8]
- %17 = phi float [ 1.000000e+01, %bb1.i ], [ %17, %bb20.i ], [ 1.000000e+00, %bb30.i ], [ %17, %bb43.i ], [ %17, %bb48.i ], [ %17, %bb52.i ], [ %17, %bb95.i ], [ %17, %bb99.i ], [ %17, %bb103.i ] ; <float> [#uses=8]
- %18 = icmp slt i32 undef, %argc ; <i1> [#uses=1]
- br i1 %18, label %bb2.i, label %bb135.i
-
-bb135.i: ; preds = %bb134.i
- br i1 undef, label %bb141.i, label %bb142.i
-
-bb141.i: ; preds = %bb135.i
- unreachable
-
-bb142.i: ; preds = %bb135.i
- br i1 undef, label %bb145.i, label %bb144.i
-
-bb144.i: ; preds = %bb142.i
- unreachable
-
-bb145.i: ; preds = %bb142.i
- br i1 undef, label %bb146.i, label %bb147.i
-
-bb146.i: ; preds = %bb145.i
- unreachable
-
-bb147.i: ; preds = %bb145.i
- br i1 undef, label %bb148.i, label %bb155.i
-
-bb148.i: ; preds = %bb147.i
- br label %bb155.i
-
-bb155.i: ; preds = %bb148.i, %bb147.i
- br i1 undef, label %bb156.i, label %bb161.i
-
-bb156.i: ; preds = %bb155.i
- unreachable
-
-bb161.i: ; preds = %bb155.i
- br i1 undef, label %bb162.i, label %bb163.i
-
-bb162.i: ; preds = %bb161.i
- %19 = fpext float %17 to double ; <double> [#uses=1]
- %20 = call i32 (i8*, ...)* @"\01_printf$LDBL128"(i8* getelementptr inbounds ([61 x i8]* @.str101, i32 0, i32 0), double %19) nounwind ; <i32> [#uses=0]
- unreachable
-
-bb163.i: ; preds = %bb161.i
- %21 = fpext float %16 to double ; <double> [#uses=1]
- %22 = call i32 (i8*, ...)* @"\01_printf$LDBL128"(i8* getelementptr inbounds ([31 x i8]* @.str104, i32 0, i32 0), double %21) nounwind ; <i32> [#uses=0]
- %23 = fpext float %15 to double ; <double> [#uses=1]
- %24 = call i32 (i8*, ...)* @"\01_printf$LDBL128"(i8* getelementptr inbounds ([45 x i8]* @.str105, i32 0, i32 0), double %23) nounwind ; <i32> [#uses=0]
- %25 = call i32 (i8*, ...)* @"\01_printf$LDBL128"(i8* getelementptr inbounds ([38 x i8]* @.str112, i32 0, i32 0), double undef) nounwind ; <i32> [#uses=0]
- br i1 undef, label %parse_command.exit, label %bb176.i
-
-bb176.i: ; preds = %bb163.i
- br i1 undef, label %bb177.i, label %bb178.i
-
-bb177.i: ; preds = %bb176.i
- unreachable
-
-bb178.i: ; preds = %bb176.i
- %26 = call i32 (i8*, ...)* @"\01_printf$LDBL128"(i8* getelementptr inbounds ([36 x i8]* @.str121, i32 0, i32 0), double undef) nounwind ; <i32> [#uses=0]
- %27 = fpext float %14 to double ; <double> [#uses=1]
- %28 = call i32 (i8*, ...)* @"\01_printf$LDBL128"(i8* getelementptr inbounds ([67 x i8]* @.str12293, i32 0, i32 0), double %27) nounwind ; <i32> [#uses=0]
- %29 = fpext float %13 to double ; <double> [#uses=1]
- %30 = call i32 (i8*, ...)* @"\01_printf$LDBL128"(i8* getelementptr inbounds ([68 x i8]* @.str123, i32 0, i32 0), double %29) nounwind ; <i32> [#uses=0]
- %31 = fpext float %12 to double ; <double> [#uses=1]
- %32 = call i32 (i8*, ...)* @"\01_printf$LDBL128"(i8* getelementptr inbounds ([52 x i8]* @.str124, i32 0, i32 0), double %31) nounwind ; <i32> [#uses=0]
- %33 = fpext float %11 to double ; <double> [#uses=1]
- %34 = call i32 (i8*, ...)* @"\01_printf$LDBL128"(i8* getelementptr inbounds ([51 x i8]* @.str125, i32 0, i32 0), double %33) nounwind ; <i32> [#uses=0]
- unreachable
-
-parse_command.exit: ; preds = %bb163.i
- br i1 undef, label %bb4.i152.i, label %my_fopen.exit.i
-
-bb4.i152.i: ; preds = %parse_command.exit
- unreachable
-
-my_fopen.exit.i: ; preds = %parse_command.exit
- br i1 undef, label %bb.i6.i99, label %bb49.preheader.i.i
-
-bb.i6.i99: ; preds = %my_fopen.exit.i
- br i1 undef, label %bb3.i.i100, label %bb1.i8.i
-
-bb1.i8.i: ; preds = %bb.i6.i99
- unreachable
-
-bb3.i.i100: ; preds = %bb.i6.i99
- unreachable
-
-bb49.preheader.i.i: ; preds = %my_fopen.exit.i
- br i1 undef, label %bb7.i11.i, label %bb50.i.i
-
-bb7.i11.i: ; preds = %bb49.preheader.i.i
- unreachable
-
-bb50.i.i: ; preds = %bb49.preheader.i.i
- br i1 undef, label %bb.i.i.i20.i, label %my_calloc.exit.i.i.i
-
-bb.i.i.i20.i: ; preds = %bb50.i.i
- unreachable
-
-my_calloc.exit.i.i.i: ; preds = %bb50.i.i
- br i1 undef, label %bb.i.i37.i.i, label %alloc_hash_table.exit.i21.i
-
-bb.i.i37.i.i: ; preds = %my_calloc.exit.i.i.i
- unreachable
-
-alloc_hash_table.exit.i21.i: ; preds = %my_calloc.exit.i.i.i
- br i1 undef, label %bb51.i.i, label %bb3.i23.i.i
-
-bb51.i.i: ; preds = %alloc_hash_table.exit.i21.i
- unreachable
-
-bb3.i23.i.i: ; preds = %alloc_hash_table.exit.i21.i
- br i1 undef, label %bb.i8.i.i, label %bb.nph.i.i
-
-bb.nph.i.i: ; preds = %bb3.i23.i.i
- unreachable
-
-bb.i8.i.i: ; preds = %bb3.i.i34.i, %bb3.i23.i.i
- br i1 undef, label %bb3.i.i34.i, label %bb1.i.i32.i
-
-bb1.i.i32.i: ; preds = %bb.i8.i.i
- unreachable
-
-bb3.i.i34.i: ; preds = %bb.i8.i.i
- br i1 undef, label %free_hash_table.exit.i.i, label %bb.i8.i.i
-
-free_hash_table.exit.i.i: ; preds = %bb3.i.i34.i
- br i1 undef, label %check_netlist.exit.i, label %bb59.i.i
-
-bb59.i.i: ; preds = %free_hash_table.exit.i.i
- unreachable
-
-check_netlist.exit.i: ; preds = %free_hash_table.exit.i.i
- br label %bb.i.i3.i
-
-bb.i.i3.i: ; preds = %bb3.i.i4.i, %check_netlist.exit.i
- br i1 false, label %bb3.i.i4.i, label %bb1.i.i.i122
-
-bb1.i.i.i122: ; preds = %bb1.i.i.i122, %bb.i.i3.i
- br i1 false, label %bb3.i.i4.i, label %bb1.i.i.i122
-
-bb3.i.i4.i: ; preds = %bb1.i.i.i122, %bb.i.i3.i
- br i1 undef, label %read_net.exit, label %bb.i.i3.i
-
-read_net.exit: ; preds = %bb3.i.i4.i
- br i1 undef, label %bb.i44, label %bb3.i47
-
-bb.i44: ; preds = %read_net.exit
- unreachable
-
-bb3.i47: ; preds = %read_net.exit
- br i1 false, label %bb9.i50, label %bb8.i49
-
-bb8.i49: ; preds = %bb3.i47
- unreachable
-
-bb9.i50: ; preds = %bb3.i47
- br i1 undef, label %bb11.i51, label %bb12.i52
-
-bb11.i51: ; preds = %bb9.i50
- unreachable
-
-bb12.i52: ; preds = %bb9.i50
- br i1 undef, label %bb.i.i53, label %my_malloc.exit.i54
-
-bb.i.i53: ; preds = %bb12.i52
- unreachable
-
-my_malloc.exit.i54: ; preds = %bb12.i52
- br i1 undef, label %bb.i2.i55, label %my_malloc.exit3.i56
-
-bb.i2.i55: ; preds = %my_malloc.exit.i54
- unreachable
-
-my_malloc.exit3.i56: ; preds = %my_malloc.exit.i54
- br i1 undef, label %bb.i.i.i57, label %my_malloc.exit.i.i
-
-bb.i.i.i57: ; preds = %my_malloc.exit3.i56
- unreachable
-
-my_malloc.exit.i.i: ; preds = %my_malloc.exit3.i56
- br i1 undef, label %bb, label %bb10
-
-bb: ; preds = %my_malloc.exit.i.i
- unreachable
-
-bb10: ; preds = %my_malloc.exit.i.i
- br i1 false, label %bb12, label %bb11
-
-bb11: ; preds = %bb10
- unreachable
-
-bb12: ; preds = %bb10
- store float %annealing_sched.1.0, float* null, align 4
- store float %annealing_sched.2.0, float* undef, align 8
- store float %annealing_sched.3.0, float* undef, align 4
- store float %annealing_sched.4.0, float* undef, align 8
- store float %router_opts.0.0, float* undef, align 8
- store float %router_opts.1.0, float* undef, align 4
- store float %router_opts.2.0, float* null, align 8
- store float %router_opts.3.0, float* undef, align 4
- br i1 undef, label %place_and_route.exit, label %bb7.i22
-
-bb7.i22: ; preds = %bb12
- br i1 false, label %bb8.i23, label %bb9.i26
-
-bb8.i23: ; preds = %bb7.i22
- unreachable
-
-bb9.i26: ; preds = %bb7.i22
- unreachable
-
-place_and_route.exit: ; preds = %bb12
- unreachable
-}
-
-declare i32 @"\01_printf$LDBL128"(i8*, ...) nounwind
-
-declare i32 @strcmp(i8* nocapture, i8* nocapture) nounwind readonly
-
-declare i32 @"\01_sscanf$LDBL128"(i8*, i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/Atomics-32.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/Atomics-32.ll
deleted file mode 100644
index 03905a3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/Atomics-32.ll
+++ /dev/null
@@ -1,749 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; ModuleID = 'Atomics.c'
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin9"
- at sc = common global i8 0 ; <i8*> [#uses=52]
- at uc = common global i8 0 ; <i8*> [#uses=100]
- at ss = common global i16 0 ; <i16*> [#uses=15]
- at us = common global i16 0 ; <i16*> [#uses=15]
- at si = common global i32 0 ; <i32*> [#uses=15]
- at ui = common global i32 0 ; <i32*> [#uses=23]
- at sl = common global i32 0 ; <i32*> [#uses=15]
- at ul = common global i32 0 ; <i32*> [#uses=15]
- at sll = common global i64 0, align 8 ; <i64*> [#uses=1]
- at ull = common global i64 0, align 8 ; <i64*> [#uses=1]
-
-define void @test_op_ignore() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:14 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:15 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; <i16>:17 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; <i16>:19 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; <i32>:21 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; <i32>:23 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:28 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:29 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; <i16>:31 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; <i16>:33 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; <i32>:35 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; <i32>:37 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 1 ) ; <i32>:39 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 1 ) ; <i32>:41 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:42 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:43 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; <i16>:45 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; <i16>:47 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; <i32>:49 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; <i32>:51 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 1 ) ; <i32>:53 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 1 ) ; <i32>:55 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:56 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:57 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; <i16>:61 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; <i32>:65 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 1 ) ; <i32>:67 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 1 ) ; <i32>:69 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:70 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:71 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; <i16>:73 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; <i32>:77 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 1 ) ; <i32>:83 [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
-define void @test_fetch_and_op() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 11 ) ; <i32>:11 [#uses=1]
- store i32 %11, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 11 ) ; <i32>:13 [#uses=1]
- store i32 %13, i32* @ul, align 4
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:14 [#uses=1]
- store i8 %14, i8* @sc, align 1
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:15 [#uses=1]
- store i8 %15, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; <i16>:19 [#uses=1]
- store i16 %19, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; <i32>:21 [#uses=1]
- store i32 %21, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1]
- store i32 %25, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1]
- store i32 %27, i32* @ul, align 4
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:28 [#uses=1]
- store i8 %28, i8* @sc, align 1
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:29 [#uses=1]
- store i8 %29, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; <i16>:33 [#uses=1]
- store i16 %33, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; <i32>:35 [#uses=1]
- store i32 %35, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 11 ) ; <i32>:39 [#uses=1]
- store i32 %39, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 11 ) ; <i32>:41 [#uses=1]
- store i32 %41, i32* @ul, align 4
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:42 [#uses=1]
- store i8 %42, i8* @sc, align 1
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:43 [#uses=1]
- store i8 %43, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; <i16>:45 [#uses=1]
- store i16 %45, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; <i16>:47 [#uses=1]
- store i16 %47, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; <i32>:49 [#uses=1]
- store i32 %49, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; <i32>:51 [#uses=1]
- store i32 %51, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 11 ) ; <i32>:53 [#uses=1]
- store i32 %53, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 11 ) ; <i32>:55 [#uses=1]
- store i32 %55, i32* @ul, align 4
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:56 [#uses=1]
- store i8 %56, i8* @sc, align 1
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:57 [#uses=1]
- store i8 %57, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1]
- store i16 %59, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; <i16>:61 [#uses=1]
- store i16 %61, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1]
- store i32 %63, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; <i32>:65 [#uses=1]
- store i32 %65, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 11 ) ; <i32>:67 [#uses=1]
- store i32 %67, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 11 ) ; <i32>:69 [#uses=1]
- store i32 %69, i32* @ul, align 4
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:70 [#uses=1]
- store i8 %70, i8* @sc, align 1
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:71 [#uses=1]
- store i8 %71, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; <i16>:73 [#uses=1]
- store i16 %73, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1]
- store i16 %75, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; <i32>:77 [#uses=1]
- store i32 %77, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1]
- store i32 %79, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1]
- store i32 %81, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 11 ) ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ul, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_op_and_fetch() nounwind {
-entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %0 ) ; <i8>:1 [#uses=1]
- add i8 %1, %0 ; <i8>:2 [#uses=1]
- store i8 %2, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:3 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %3 ) ; <i8>:4 [#uses=1]
- add i8 %4, %3 ; <i8>:5 [#uses=1]
- store i8 %5, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:6 [#uses=1]
- zext i8 %6 to i16 ; <i16>:7 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:8 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %8, i16 %7 ) ; <i16>:9 [#uses=1]
- add i16 %9, %7 ; <i16>:10 [#uses=1]
- store i16 %10, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:11 [#uses=1]
- zext i8 %11 to i16 ; <i16>:12 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:13 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %13, i16 %12 ) ; <i16>:14 [#uses=1]
- add i16 %14, %12 ; <i16>:15 [#uses=1]
- store i16 %15, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:16 [#uses=1]
- zext i8 %16 to i32 ; <i32>:17 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:18 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %18, i32 %17 ) ; <i32>:19 [#uses=1]
- add i32 %19, %17 ; <i32>:20 [#uses=1]
- store i32 %20, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:21 [#uses=1]
- zext i8 %21 to i32 ; <i32>:22 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:23 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %23, i32 %22 ) ; <i32>:24 [#uses=1]
- add i32 %24, %22 ; <i32>:25 [#uses=1]
- store i32 %25, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:26 [#uses=1]
- zext i8 %26 to i32 ; <i32>:27 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:28 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %28, i32 %27 ) ; <i32>:29 [#uses=1]
- add i32 %29, %27 ; <i32>:30 [#uses=1]
- store i32 %30, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:31 [#uses=1]
- zext i8 %31 to i32 ; <i32>:32 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:33 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %33, i32 %32 ) ; <i32>:34 [#uses=1]
- add i32 %34, %32 ; <i32>:35 [#uses=1]
- store i32 %35, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:36 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %36 ) ; <i8>:37 [#uses=1]
- sub i8 %37, %36 ; <i8>:38 [#uses=1]
- store i8 %38, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:39 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %39 ) ; <i8>:40 [#uses=1]
- sub i8 %40, %39 ; <i8>:41 [#uses=1]
- store i8 %41, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:42 [#uses=1]
- zext i8 %42 to i16 ; <i16>:43 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %44, i16 %43 ) ; <i16>:45 [#uses=1]
- sub i16 %45, %43 ; <i16>:46 [#uses=1]
- store i16 %46, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:47 [#uses=1]
- zext i8 %47 to i16 ; <i16>:48 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:49 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %49, i16 %48 ) ; <i16>:50 [#uses=1]
- sub i16 %50, %48 ; <i16>:51 [#uses=1]
- store i16 %51, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %54, i32 %53 ) ; <i32>:55 [#uses=1]
- sub i32 %55, %53 ; <i32>:56 [#uses=1]
- store i32 %56, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:57 [#uses=1]
- zext i8 %57 to i32 ; <i32>:58 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:59 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %59, i32 %58 ) ; <i32>:60 [#uses=1]
- sub i32 %60, %58 ; <i32>:61 [#uses=1]
- store i32 %61, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:62 [#uses=1]
- zext i8 %62 to i32 ; <i32>:63 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %64, i32 %63 ) ; <i32>:65 [#uses=1]
- sub i32 %65, %63 ; <i32>:66 [#uses=1]
- store i32 %66, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:67 [#uses=1]
- zext i8 %67 to i32 ; <i32>:68 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:69 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %69, i32 %68 ) ; <i32>:70 [#uses=1]
- sub i32 %70, %68 ; <i32>:71 [#uses=1]
- store i32 %71, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:72 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %72 ) ; <i8>:73 [#uses=1]
- or i8 %73, %72 ; <i8>:74 [#uses=1]
- store i8 %74, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:75 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %75 ) ; <i8>:76 [#uses=1]
- or i8 %76, %75 ; <i8>:77 [#uses=1]
- store i8 %77, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:78 [#uses=1]
- zext i8 %78 to i16 ; <i16>:79 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:80 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %80, i16 %79 ) ; <i16>:81 [#uses=1]
- or i16 %81, %79 ; <i16>:82 [#uses=1]
- store i16 %82, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:83 [#uses=1]
- zext i8 %83 to i16 ; <i16>:84 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:85 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %85, i16 %84 ) ; <i16>:86 [#uses=1]
- or i16 %86, %84 ; <i16>:87 [#uses=1]
- store i16 %87, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:88 [#uses=1]
- zext i8 %88 to i32 ; <i32>:89 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:90 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %90, i32 %89 ) ; <i32>:91 [#uses=1]
- or i32 %91, %89 ; <i32>:92 [#uses=1]
- store i32 %92, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:93 [#uses=1]
- zext i8 %93 to i32 ; <i32>:94 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:95 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %95, i32 %94 ) ; <i32>:96 [#uses=1]
- or i32 %96, %94 ; <i32>:97 [#uses=1]
- store i32 %97, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:98 [#uses=1]
- zext i8 %98 to i32 ; <i32>:99 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:100 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %100, i32 %99 ) ; <i32>:101 [#uses=1]
- or i32 %101, %99 ; <i32>:102 [#uses=1]
- store i32 %102, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:103 [#uses=1]
- zext i8 %103 to i32 ; <i32>:104 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:105 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %105, i32 %104 ) ; <i32>:106 [#uses=1]
- or i32 %106, %104 ; <i32>:107 [#uses=1]
- store i32 %107, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:108 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %108 ) ; <i8>:109 [#uses=1]
- xor i8 %109, %108 ; <i8>:110 [#uses=1]
- store i8 %110, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:111 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %111 ) ; <i8>:112 [#uses=1]
- xor i8 %112, %111 ; <i8>:113 [#uses=1]
- store i8 %113, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:114 [#uses=1]
- zext i8 %114 to i16 ; <i16>:115 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:116 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %116, i16 %115 ) ; <i16>:117 [#uses=1]
- xor i16 %117, %115 ; <i16>:118 [#uses=1]
- store i16 %118, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:119 [#uses=1]
- zext i8 %119 to i16 ; <i16>:120 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:121 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %121, i16 %120 ) ; <i16>:122 [#uses=1]
- xor i16 %122, %120 ; <i16>:123 [#uses=1]
- store i16 %123, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:124 [#uses=1]
- zext i8 %124 to i32 ; <i32>:125 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:126 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %126, i32 %125 ) ; <i32>:127 [#uses=1]
- xor i32 %127, %125 ; <i32>:128 [#uses=1]
- store i32 %128, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:129 [#uses=1]
- zext i8 %129 to i32 ; <i32>:130 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:131 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %131, i32 %130 ) ; <i32>:132 [#uses=1]
- xor i32 %132, %130 ; <i32>:133 [#uses=1]
- store i32 %133, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:134 [#uses=1]
- zext i8 %134 to i32 ; <i32>:135 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:136 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %136, i32 %135 ) ; <i32>:137 [#uses=1]
- xor i32 %137, %135 ; <i32>:138 [#uses=1]
- store i32 %138, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:139 [#uses=1]
- zext i8 %139 to i32 ; <i32>:140 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:141 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %141, i32 %140 ) ; <i32>:142 [#uses=1]
- xor i32 %142, %140 ; <i32>:143 [#uses=1]
- store i32 %143, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:144 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %144 ) ; <i8>:145 [#uses=1]
- and i8 %145, %144 ; <i8>:146 [#uses=1]
- store i8 %146, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:147 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %147 ) ; <i8>:148 [#uses=1]
- and i8 %148, %147 ; <i8>:149 [#uses=1]
- store i8 %149, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:150 [#uses=1]
- zext i8 %150 to i16 ; <i16>:151 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:152 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %152, i16 %151 ) ; <i16>:153 [#uses=1]
- and i16 %153, %151 ; <i16>:154 [#uses=1]
- store i16 %154, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:155 [#uses=1]
- zext i8 %155 to i16 ; <i16>:156 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:157 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %157, i16 %156 ) ; <i16>:158 [#uses=1]
- and i16 %158, %156 ; <i16>:159 [#uses=1]
- store i16 %159, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:160 [#uses=1]
- zext i8 %160 to i32 ; <i32>:161 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:162 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %162, i32 %161 ) ; <i32>:163 [#uses=1]
- and i32 %163, %161 ; <i32>:164 [#uses=1]
- store i32 %164, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:165 [#uses=1]
- zext i8 %165 to i32 ; <i32>:166 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:167 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %167, i32 %166 ) ; <i32>:168 [#uses=1]
- and i32 %168, %166 ; <i32>:169 [#uses=1]
- store i32 %169, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:170 [#uses=1]
- zext i8 %170 to i32 ; <i32>:171 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:172 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %172, i32 %171 ) ; <i32>:173 [#uses=1]
- and i32 %173, %171 ; <i32>:174 [#uses=1]
- store i32 %174, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:175 [#uses=1]
- zext i8 %175 to i32 ; <i32>:176 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:177 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %177, i32 %176 ) ; <i32>:178 [#uses=1]
- and i32 %178, %176 ; <i32>:179 [#uses=1]
- store i32 %179, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:180 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %180 ) ; <i8>:181 [#uses=1]
- xor i8 %181, -1 ; <i8>:182 [#uses=1]
- and i8 %182, %180 ; <i8>:183 [#uses=1]
- store i8 %183, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:184 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %184 ) ; <i8>:185 [#uses=1]
- xor i8 %185, -1 ; <i8>:186 [#uses=1]
- and i8 %186, %184 ; <i8>:187 [#uses=1]
- store i8 %187, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:188 [#uses=1]
- zext i8 %188 to i16 ; <i16>:189 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:190 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %190, i16 %189 ) ; <i16>:191 [#uses=1]
- xor i16 %191, -1 ; <i16>:192 [#uses=1]
- and i16 %192, %189 ; <i16>:193 [#uses=1]
- store i16 %193, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:194 [#uses=1]
- zext i8 %194 to i16 ; <i16>:195 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:196 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %196, i16 %195 ) ; <i16>:197 [#uses=1]
- xor i16 %197, -1 ; <i16>:198 [#uses=1]
- and i16 %198, %195 ; <i16>:199 [#uses=1]
- store i16 %199, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:200 [#uses=1]
- zext i8 %200 to i32 ; <i32>:201 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:202 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %202, i32 %201 ) ; <i32>:203 [#uses=1]
- xor i32 %203, -1 ; <i32>:204 [#uses=1]
- and i32 %204, %201 ; <i32>:205 [#uses=1]
- store i32 %205, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:206 [#uses=1]
- zext i8 %206 to i32 ; <i32>:207 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:208 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %208, i32 %207 ) ; <i32>:209 [#uses=1]
- xor i32 %209, -1 ; <i32>:210 [#uses=1]
- and i32 %210, %207 ; <i32>:211 [#uses=1]
- store i32 %211, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:212 [#uses=1]
- zext i8 %212 to i32 ; <i32>:213 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:214 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %214, i32 %213 ) ; <i32>:215 [#uses=1]
- xor i32 %215, -1 ; <i32>:216 [#uses=1]
- and i32 %216, %213 ; <i32>:217 [#uses=1]
- store i32 %217, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:218 [#uses=1]
- zext i8 %218 to i32 ; <i32>:219 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:220 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %220, i32 %219 ) ; <i32>:221 [#uses=1]
- xor i32 %221, -1 ; <i32>:222 [#uses=1]
- and i32 %222, %219 ; <i32>:223 [#uses=1]
- store i32 %223, i32* @ul, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_compare_and_swap() nounwind {
-entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=1]
- load i8* @sc, align 1 ; <i8>:1 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %0, i8 %1 ) ; <i8>:2 [#uses=1]
- store i8 %2, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:3 [#uses=1]
- load i8* @sc, align 1 ; <i8>:4 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %3, i8 %4 ) ; <i8>:5 [#uses=1]
- store i8 %5, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:6 [#uses=1]
- zext i8 %6 to i16 ; <i16>:7 [#uses=1]
- load i8* @sc, align 1 ; <i8>:8 [#uses=1]
- sext i8 %8 to i16 ; <i16>:9 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:10 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %10, i16 %7, i16 %9 ) ; <i16>:11 [#uses=1]
- store i16 %11, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:12 [#uses=1]
- zext i8 %12 to i16 ; <i16>:13 [#uses=1]
- load i8* @sc, align 1 ; <i8>:14 [#uses=1]
- sext i8 %14 to i16 ; <i16>:15 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %16, i16 %13, i16 %15 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:18 [#uses=1]
- zext i8 %18 to i32 ; <i32>:19 [#uses=1]
- load i8* @sc, align 1 ; <i8>:20 [#uses=1]
- sext i8 %20 to i32 ; <i32>:21 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %22, i32 %19, i32 %21 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:24 [#uses=1]
- zext i8 %24 to i32 ; <i32>:25 [#uses=1]
- load i8* @sc, align 1 ; <i8>:26 [#uses=1]
- sext i8 %26 to i32 ; <i32>:27 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:28 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %28, i32 %25, i32 %27 ) ; <i32>:29 [#uses=1]
- store i32 %29, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:30 [#uses=1]
- zext i8 %30 to i32 ; <i32>:31 [#uses=1]
- load i8* @sc, align 1 ; <i8>:32 [#uses=1]
- sext i8 %32 to i32 ; <i32>:33 [#uses=1]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %34, i32 %31, i32 %33 ) ; <i32>:35 [#uses=1]
- store i32 %35, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:36 [#uses=1]
- zext i8 %36 to i32 ; <i32>:37 [#uses=1]
- load i8* @sc, align 1 ; <i8>:38 [#uses=1]
- sext i8 %38 to i32 ; <i32>:39 [#uses=1]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %40, i32 %37, i32 %39 ) ; <i32>:41 [#uses=1]
- store i32 %41, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:42 [#uses=2]
- load i8* @sc, align 1 ; <i8>:43 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %42, i8 %43 ) ; <i8>:44 [#uses=1]
- icmp eq i8 %44, %42 ; <i1>:45 [#uses=1]
- zext i1 %45 to i32 ; <i32>:46 [#uses=1]
- store i32 %46, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:47 [#uses=2]
- load i8* @sc, align 1 ; <i8>:48 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %47, i8 %48 ) ; <i8>:49 [#uses=1]
- icmp eq i8 %49, %47 ; <i1>:50 [#uses=1]
- zext i1 %50 to i32 ; <i32>:51 [#uses=1]
- store i32 %51, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i16 ; <i16>:53 [#uses=2]
- load i8* @sc, align 1 ; <i8>:54 [#uses=1]
- sext i8 %54 to i16 ; <i16>:55 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %56, i16 %53, i16 %55 ) ; <i16>:57 [#uses=1]
- icmp eq i16 %57, %53 ; <i1>:58 [#uses=1]
- zext i1 %58 to i32 ; <i32>:59 [#uses=1]
- store i32 %59, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:60 [#uses=1]
- zext i8 %60 to i16 ; <i16>:61 [#uses=2]
- load i8* @sc, align 1 ; <i8>:62 [#uses=1]
- sext i8 %62 to i16 ; <i16>:63 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:64 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %64, i16 %61, i16 %63 ) ; <i16>:65 [#uses=1]
- icmp eq i16 %65, %61 ; <i1>:66 [#uses=1]
- zext i1 %66 to i32 ; <i32>:67 [#uses=1]
- store i32 %67, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:68 [#uses=1]
- zext i8 %68 to i32 ; <i32>:69 [#uses=2]
- load i8* @sc, align 1 ; <i8>:70 [#uses=1]
- sext i8 %70 to i32 ; <i32>:71 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:72 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %72, i32 %69, i32 %71 ) ; <i32>:73 [#uses=1]
- icmp eq i32 %73, %69 ; <i1>:74 [#uses=1]
- zext i1 %74 to i32 ; <i32>:75 [#uses=1]
- store i32 %75, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:76 [#uses=1]
- zext i8 %76 to i32 ; <i32>:77 [#uses=2]
- load i8* @sc, align 1 ; <i8>:78 [#uses=1]
- sext i8 %78 to i32 ; <i32>:79 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %80, i32 %77, i32 %79 ) ; <i32>:81 [#uses=1]
- icmp eq i32 %81, %77 ; <i1>:82 [#uses=1]
- zext i1 %82 to i32 ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:84 [#uses=1]
- zext i8 %84 to i32 ; <i32>:85 [#uses=2]
- load i8* @sc, align 1 ; <i8>:86 [#uses=1]
- sext i8 %86 to i32 ; <i32>:87 [#uses=1]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:88 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %88, i32 %85, i32 %87 ) ; <i32>:89 [#uses=1]
- icmp eq i32 %89, %85 ; <i1>:90 [#uses=1]
- zext i1 %90 to i32 ; <i32>:91 [#uses=1]
- store i32 %91, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:92 [#uses=1]
- zext i8 %92 to i32 ; <i32>:93 [#uses=2]
- load i8* @sc, align 1 ; <i8>:94 [#uses=1]
- sext i8 %94 to i32 ; <i32>:95 [#uses=1]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:96 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %96, i32 %93, i32 %95 ) ; <i32>:97 [#uses=1]
- icmp eq i32 %97, %93 ; <i1>:98 [#uses=1]
- zext i1 %98 to i32 ; <i32>:99 [#uses=1]
- store i32 %99, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
-define void @test_lock() nounwind {
-entry:
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=1]
- store i32 %11, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=1]
- store i32 %13, i32* @ul, align 4
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:14 [#uses=1]
- volatile store i16 0, i16* %14, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:15 [#uses=1]
- volatile store i16 0, i16* %15, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:16 [#uses=1]
- volatile store i32 0, i32* %16, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:17 [#uses=1]
- volatile store i32 0, i32* %17, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:18 [#uses=1]
- volatile store i32 0, i32* %18, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:19 [#uses=1]
- volatile store i32 0, i32* %19, align 4
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:20 [#uses=1]
- volatile store i64 0, i64* %20, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:21 [#uses=1]
- volatile store i64 0, i64* %21, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/Atomics-64.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/Atomics-64.ll
deleted file mode 100644
index 1dc4310..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/Atomics-64.ll
+++ /dev/null
@@ -1,773 +0,0 @@
-; RUN: llc < %s -march=ppc64
-; ModuleID = 'Atomics.c'
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc64-apple-darwin9"
- at sc = common global i8 0 ; <i8*> [#uses=52]
- at uc = common global i8 0 ; <i8*> [#uses=100]
- at ss = common global i16 0 ; <i16*> [#uses=15]
- at us = common global i16 0 ; <i16*> [#uses=15]
- at si = common global i32 0 ; <i32*> [#uses=15]
- at ui = common global i32 0 ; <i32*> [#uses=23]
- at sl = common global i64 0, align 8 ; <i64*> [#uses=15]
- at ul = common global i64 0, align 8 ; <i64*> [#uses=15]
- at sll = common global i64 0, align 8 ; <i64*> [#uses=1]
- at ull = common global i64 0, align 8 ; <i64*> [#uses=1]
-
-define void @test_op_ignore() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:14 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:15 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; <i16>:17 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; <i16>:19 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; <i32>:21 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; <i32>:23 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:24 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %24, i64 1 ) ; <i64>:25 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:26 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %26, i64 1 ) ; <i64>:27 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:28 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:29 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; <i16>:31 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; <i16>:33 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; <i32>:35 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; <i32>:37 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:38 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %38, i64 1 ) ; <i64>:39 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:40 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %40, i64 1 ) ; <i64>:41 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:42 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:43 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; <i16>:45 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; <i16>:47 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; <i32>:49 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; <i32>:51 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:52 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %52, i64 1 ) ; <i64>:53 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %54, i64 1 ) ; <i64>:55 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:56 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:57 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; <i16>:61 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; <i32>:65 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %66, i64 1 ) ; <i64>:67 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:68 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %68, i64 1 ) ; <i64>:69 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:70 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:71 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; <i16>:73 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; <i32>:77 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:80 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %80, i64 1 ) ; <i64>:81 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:82 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %82, i64 1 ) ; <i64>:83 [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind
-
-define void @test_fetch_and_op() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 11 ) ; <i64>:11 [#uses=1]
- store i64 %11, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 11 ) ; <i64>:13 [#uses=1]
- store i64 %13, i64* @ul, align 8
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:14 [#uses=1]
- store i8 %14, i8* @sc, align 1
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:15 [#uses=1]
- store i8 %15, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; <i16>:19 [#uses=1]
- store i16 %19, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; <i32>:21 [#uses=1]
- store i32 %21, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:24 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %24, i64 11 ) ; <i64>:25 [#uses=1]
- store i64 %25, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:26 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %26, i64 11 ) ; <i64>:27 [#uses=1]
- store i64 %27, i64* @ul, align 8
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:28 [#uses=1]
- store i8 %28, i8* @sc, align 1
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:29 [#uses=1]
- store i8 %29, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; <i16>:33 [#uses=1]
- store i16 %33, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; <i32>:35 [#uses=1]
- store i32 %35, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:38 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %38, i64 11 ) ; <i64>:39 [#uses=1]
- store i64 %39, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:40 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %40, i64 11 ) ; <i64>:41 [#uses=1]
- store i64 %41, i64* @ul, align 8
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:42 [#uses=1]
- store i8 %42, i8* @sc, align 1
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:43 [#uses=1]
- store i8 %43, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; <i16>:45 [#uses=1]
- store i16 %45, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; <i16>:47 [#uses=1]
- store i16 %47, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; <i32>:49 [#uses=1]
- store i32 %49, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; <i32>:51 [#uses=1]
- store i32 %51, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:52 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %52, i64 11 ) ; <i64>:53 [#uses=1]
- store i64 %53, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %54, i64 11 ) ; <i64>:55 [#uses=1]
- store i64 %55, i64* @ul, align 8
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:56 [#uses=1]
- store i8 %56, i8* @sc, align 1
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:57 [#uses=1]
- store i8 %57, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1]
- store i16 %59, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; <i16>:61 [#uses=1]
- store i16 %61, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1]
- store i32 %63, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; <i32>:65 [#uses=1]
- store i32 %65, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %66, i64 11 ) ; <i64>:67 [#uses=1]
- store i64 %67, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:68 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %68, i64 11 ) ; <i64>:69 [#uses=1]
- store i64 %69, i64* @ul, align 8
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:70 [#uses=1]
- store i8 %70, i8* @sc, align 1
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:71 [#uses=1]
- store i8 %71, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; <i16>:73 [#uses=1]
- store i16 %73, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1]
- store i16 %75, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; <i32>:77 [#uses=1]
- store i32 %77, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1]
- store i32 %79, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:80 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %80, i64 11 ) ; <i64>:81 [#uses=1]
- store i64 %81, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:82 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %82, i64 11 ) ; <i64>:83 [#uses=1]
- store i64 %83, i64* @ul, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_op_and_fetch() nounwind {
-entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %0 ) ; <i8>:1 [#uses=1]
- add i8 %1, %0 ; <i8>:2 [#uses=1]
- store i8 %2, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:3 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %3 ) ; <i8>:4 [#uses=1]
- add i8 %4, %3 ; <i8>:5 [#uses=1]
- store i8 %5, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:6 [#uses=1]
- zext i8 %6 to i16 ; <i16>:7 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:8 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %8, i16 %7 ) ; <i16>:9 [#uses=1]
- add i16 %9, %7 ; <i16>:10 [#uses=1]
- store i16 %10, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:11 [#uses=1]
- zext i8 %11 to i16 ; <i16>:12 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:13 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %13, i16 %12 ) ; <i16>:14 [#uses=1]
- add i16 %14, %12 ; <i16>:15 [#uses=1]
- store i16 %15, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:16 [#uses=1]
- zext i8 %16 to i32 ; <i32>:17 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:18 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %18, i32 %17 ) ; <i32>:19 [#uses=1]
- add i32 %19, %17 ; <i32>:20 [#uses=1]
- store i32 %20, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:21 [#uses=1]
- zext i8 %21 to i32 ; <i32>:22 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:23 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %23, i32 %22 ) ; <i32>:24 [#uses=1]
- add i32 %24, %22 ; <i32>:25 [#uses=1]
- store i32 %25, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:26 [#uses=1]
- zext i8 %26 to i64 ; <i64>:27 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %28, i64 %27 ) ; <i64>:29 [#uses=1]
- add i64 %29, %27 ; <i64>:30 [#uses=1]
- store i64 %30, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:31 [#uses=1]
- zext i8 %31 to i64 ; <i64>:32 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:33 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %33, i64 %32 ) ; <i64>:34 [#uses=1]
- add i64 %34, %32 ; <i64>:35 [#uses=1]
- store i64 %35, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:36 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %36 ) ; <i8>:37 [#uses=1]
- sub i8 %37, %36 ; <i8>:38 [#uses=1]
- store i8 %38, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:39 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %39 ) ; <i8>:40 [#uses=1]
- sub i8 %40, %39 ; <i8>:41 [#uses=1]
- store i8 %41, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:42 [#uses=1]
- zext i8 %42 to i16 ; <i16>:43 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %44, i16 %43 ) ; <i16>:45 [#uses=1]
- sub i16 %45, %43 ; <i16>:46 [#uses=1]
- store i16 %46, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:47 [#uses=1]
- zext i8 %47 to i16 ; <i16>:48 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:49 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %49, i16 %48 ) ; <i16>:50 [#uses=1]
- sub i16 %50, %48 ; <i16>:51 [#uses=1]
- store i16 %51, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %54, i32 %53 ) ; <i32>:55 [#uses=1]
- sub i32 %55, %53 ; <i32>:56 [#uses=1]
- store i32 %56, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:57 [#uses=1]
- zext i8 %57 to i32 ; <i32>:58 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:59 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %59, i32 %58 ) ; <i32>:60 [#uses=1]
- sub i32 %60, %58 ; <i32>:61 [#uses=1]
- store i32 %61, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:62 [#uses=1]
- zext i8 %62 to i64 ; <i64>:63 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %64, i64 %63 ) ; <i64>:65 [#uses=1]
- sub i64 %65, %63 ; <i64>:66 [#uses=1]
- store i64 %66, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:67 [#uses=1]
- zext i8 %67 to i64 ; <i64>:68 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:69 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %69, i64 %68 ) ; <i64>:70 [#uses=1]
- sub i64 %70, %68 ; <i64>:71 [#uses=1]
- store i64 %71, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:72 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %72 ) ; <i8>:73 [#uses=1]
- or i8 %73, %72 ; <i8>:74 [#uses=1]
- store i8 %74, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:75 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %75 ) ; <i8>:76 [#uses=1]
- or i8 %76, %75 ; <i8>:77 [#uses=1]
- store i8 %77, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:78 [#uses=1]
- zext i8 %78 to i16 ; <i16>:79 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:80 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %80, i16 %79 ) ; <i16>:81 [#uses=1]
- or i16 %81, %79 ; <i16>:82 [#uses=1]
- store i16 %82, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:83 [#uses=1]
- zext i8 %83 to i16 ; <i16>:84 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:85 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %85, i16 %84 ) ; <i16>:86 [#uses=1]
- or i16 %86, %84 ; <i16>:87 [#uses=1]
- store i16 %87, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:88 [#uses=1]
- zext i8 %88 to i32 ; <i32>:89 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:90 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %90, i32 %89 ) ; <i32>:91 [#uses=1]
- or i32 %91, %89 ; <i32>:92 [#uses=1]
- store i32 %92, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:93 [#uses=1]
- zext i8 %93 to i32 ; <i32>:94 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:95 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %95, i32 %94 ) ; <i32>:96 [#uses=1]
- or i32 %96, %94 ; <i32>:97 [#uses=1]
- store i32 %97, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:98 [#uses=1]
- zext i8 %98 to i64 ; <i64>:99 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %100, i64 %99 ) ; <i64>:101 [#uses=1]
- or i64 %101, %99 ; <i64>:102 [#uses=1]
- store i64 %102, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:103 [#uses=1]
- zext i8 %103 to i64 ; <i64>:104 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:105 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %105, i64 %104 ) ; <i64>:106 [#uses=1]
- or i64 %106, %104 ; <i64>:107 [#uses=1]
- store i64 %107, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:108 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %108 ) ; <i8>:109 [#uses=1]
- xor i8 %109, %108 ; <i8>:110 [#uses=1]
- store i8 %110, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:111 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %111 ) ; <i8>:112 [#uses=1]
- xor i8 %112, %111 ; <i8>:113 [#uses=1]
- store i8 %113, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:114 [#uses=1]
- zext i8 %114 to i16 ; <i16>:115 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:116 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %116, i16 %115 ) ; <i16>:117 [#uses=1]
- xor i16 %117, %115 ; <i16>:118 [#uses=1]
- store i16 %118, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:119 [#uses=1]
- zext i8 %119 to i16 ; <i16>:120 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:121 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %121, i16 %120 ) ; <i16>:122 [#uses=1]
- xor i16 %122, %120 ; <i16>:123 [#uses=1]
- store i16 %123, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:124 [#uses=1]
- zext i8 %124 to i32 ; <i32>:125 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:126 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %126, i32 %125 ) ; <i32>:127 [#uses=1]
- xor i32 %127, %125 ; <i32>:128 [#uses=1]
- store i32 %128, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:129 [#uses=1]
- zext i8 %129 to i32 ; <i32>:130 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:131 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %131, i32 %130 ) ; <i32>:132 [#uses=1]
- xor i32 %132, %130 ; <i32>:133 [#uses=1]
- store i32 %133, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:134 [#uses=1]
- zext i8 %134 to i64 ; <i64>:135 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:136 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %136, i64 %135 ) ; <i64>:137 [#uses=1]
- xor i64 %137, %135 ; <i64>:138 [#uses=1]
- store i64 %138, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:139 [#uses=1]
- zext i8 %139 to i64 ; <i64>:140 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:141 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %141, i64 %140 ) ; <i64>:142 [#uses=1]
- xor i64 %142, %140 ; <i64>:143 [#uses=1]
- store i64 %143, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:144 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %144 ) ; <i8>:145 [#uses=1]
- and i8 %145, %144 ; <i8>:146 [#uses=1]
- store i8 %146, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:147 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %147 ) ; <i8>:148 [#uses=1]
- and i8 %148, %147 ; <i8>:149 [#uses=1]
- store i8 %149, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:150 [#uses=1]
- zext i8 %150 to i16 ; <i16>:151 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:152 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %152, i16 %151 ) ; <i16>:153 [#uses=1]
- and i16 %153, %151 ; <i16>:154 [#uses=1]
- store i16 %154, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:155 [#uses=1]
- zext i8 %155 to i16 ; <i16>:156 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:157 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %157, i16 %156 ) ; <i16>:158 [#uses=1]
- and i16 %158, %156 ; <i16>:159 [#uses=1]
- store i16 %159, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:160 [#uses=1]
- zext i8 %160 to i32 ; <i32>:161 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:162 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %162, i32 %161 ) ; <i32>:163 [#uses=1]
- and i32 %163, %161 ; <i32>:164 [#uses=1]
- store i32 %164, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:165 [#uses=1]
- zext i8 %165 to i32 ; <i32>:166 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:167 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %167, i32 %166 ) ; <i32>:168 [#uses=1]
- and i32 %168, %166 ; <i32>:169 [#uses=1]
- store i32 %169, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:170 [#uses=1]
- zext i8 %170 to i64 ; <i64>:171 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:172 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %172, i64 %171 ) ; <i64>:173 [#uses=1]
- and i64 %173, %171 ; <i64>:174 [#uses=1]
- store i64 %174, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:175 [#uses=1]
- zext i8 %175 to i64 ; <i64>:176 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:177 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %177, i64 %176 ) ; <i64>:178 [#uses=1]
- and i64 %178, %176 ; <i64>:179 [#uses=1]
- store i64 %179, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:180 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %180 ) ; <i8>:181 [#uses=1]
- xor i8 %181, -1 ; <i8>:182 [#uses=1]
- and i8 %182, %180 ; <i8>:183 [#uses=1]
- store i8 %183, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:184 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %184 ) ; <i8>:185 [#uses=1]
- xor i8 %185, -1 ; <i8>:186 [#uses=1]
- and i8 %186, %184 ; <i8>:187 [#uses=1]
- store i8 %187, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:188 [#uses=1]
- zext i8 %188 to i16 ; <i16>:189 [#uses=2]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:190 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %190, i16 %189 ) ; <i16>:191 [#uses=1]
- xor i16 %191, -1 ; <i16>:192 [#uses=1]
- and i16 %192, %189 ; <i16>:193 [#uses=1]
- store i16 %193, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:194 [#uses=1]
- zext i8 %194 to i16 ; <i16>:195 [#uses=2]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:196 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %196, i16 %195 ) ; <i16>:197 [#uses=1]
- xor i16 %197, -1 ; <i16>:198 [#uses=1]
- and i16 %198, %195 ; <i16>:199 [#uses=1]
- store i16 %199, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:200 [#uses=1]
- zext i8 %200 to i32 ; <i32>:201 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:202 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %202, i32 %201 ) ; <i32>:203 [#uses=1]
- xor i32 %203, -1 ; <i32>:204 [#uses=1]
- and i32 %204, %201 ; <i32>:205 [#uses=1]
- store i32 %205, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:206 [#uses=1]
- zext i8 %206 to i32 ; <i32>:207 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:208 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %208, i32 %207 ) ; <i32>:209 [#uses=1]
- xor i32 %209, -1 ; <i32>:210 [#uses=1]
- and i32 %210, %207 ; <i32>:211 [#uses=1]
- store i32 %211, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:212 [#uses=1]
- zext i8 %212 to i64 ; <i64>:213 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:214 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %214, i64 %213 ) ; <i64>:215 [#uses=1]
- xor i64 %215, -1 ; <i64>:216 [#uses=1]
- and i64 %216, %213 ; <i64>:217 [#uses=1]
- store i64 %217, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:218 [#uses=1]
- zext i8 %218 to i64 ; <i64>:219 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:220 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %220, i64 %219 ) ; <i64>:221 [#uses=1]
- xor i64 %221, -1 ; <i64>:222 [#uses=1]
- and i64 %222, %219 ; <i64>:223 [#uses=1]
- store i64 %223, i64* @ul, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_compare_and_swap() nounwind {
-entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=1]
- load i8* @sc, align 1 ; <i8>:1 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %0, i8 %1 ) ; <i8>:2 [#uses=1]
- store i8 %2, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:3 [#uses=1]
- load i8* @sc, align 1 ; <i8>:4 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %3, i8 %4 ) ; <i8>:5 [#uses=1]
- store i8 %5, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:6 [#uses=1]
- zext i8 %6 to i16 ; <i16>:7 [#uses=1]
- load i8* @sc, align 1 ; <i8>:8 [#uses=1]
- sext i8 %8 to i16 ; <i16>:9 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:10 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %10, i16 %7, i16 %9 ) ; <i16>:11 [#uses=1]
- store i16 %11, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:12 [#uses=1]
- zext i8 %12 to i16 ; <i16>:13 [#uses=1]
- load i8* @sc, align 1 ; <i8>:14 [#uses=1]
- sext i8 %14 to i16 ; <i16>:15 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %16, i16 %13, i16 %15 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:18 [#uses=1]
- zext i8 %18 to i32 ; <i32>:19 [#uses=1]
- load i8* @sc, align 1 ; <i8>:20 [#uses=1]
- sext i8 %20 to i32 ; <i32>:21 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %22, i32 %19, i32 %21 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:24 [#uses=1]
- zext i8 %24 to i32 ; <i32>:25 [#uses=1]
- load i8* @sc, align 1 ; <i8>:26 [#uses=1]
- sext i8 %26 to i32 ; <i32>:27 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:28 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %28, i32 %25, i32 %27 ) ; <i32>:29 [#uses=1]
- store i32 %29, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:30 [#uses=1]
- zext i8 %30 to i64 ; <i64>:31 [#uses=1]
- load i8* @sc, align 1 ; <i8>:32 [#uses=1]
- sext i8 %32 to i64 ; <i64>:33 [#uses=1]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:34 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %34, i64 %31, i64 %33 ) ; <i64>:35 [#uses=1]
- store i64 %35, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:36 [#uses=1]
- zext i8 %36 to i64 ; <i64>:37 [#uses=1]
- load i8* @sc, align 1 ; <i8>:38 [#uses=1]
- sext i8 %38 to i64 ; <i64>:39 [#uses=1]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:40 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %40, i64 %37, i64 %39 ) ; <i64>:41 [#uses=1]
- store i64 %41, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:42 [#uses=2]
- load i8* @sc, align 1 ; <i8>:43 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %42, i8 %43 ) ; <i8>:44 [#uses=1]
- icmp eq i8 %44, %42 ; <i1>:45 [#uses=1]
- zext i1 %45 to i8 ; <i8>:46 [#uses=1]
- zext i8 %46 to i32 ; <i32>:47 [#uses=1]
- store i32 %47, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:48 [#uses=2]
- load i8* @sc, align 1 ; <i8>:49 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %48, i8 %49 ) ; <i8>:50 [#uses=1]
- icmp eq i8 %50, %48 ; <i1>:51 [#uses=1]
- zext i1 %51 to i8 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=1]
- store i32 %53, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:54 [#uses=1]
- zext i8 %54 to i16 ; <i16>:55 [#uses=2]
- load i8* @sc, align 1 ; <i8>:56 [#uses=1]
- sext i8 %56 to i16 ; <i16>:57 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %58, i16 %55, i16 %57 ) ; <i16>:59 [#uses=1]
- icmp eq i16 %59, %55 ; <i1>:60 [#uses=1]
- zext i1 %60 to i8 ; <i8>:61 [#uses=1]
- zext i8 %61 to i32 ; <i32>:62 [#uses=1]
- store i32 %62, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:63 [#uses=1]
- zext i8 %63 to i16 ; <i16>:64 [#uses=2]
- load i8* @sc, align 1 ; <i8>:65 [#uses=1]
- sext i8 %65 to i16 ; <i16>:66 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:67 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %67, i16 %64, i16 %66 ) ; <i16>:68 [#uses=1]
- icmp eq i16 %68, %64 ; <i1>:69 [#uses=1]
- zext i1 %69 to i8 ; <i8>:70 [#uses=1]
- zext i8 %70 to i32 ; <i32>:71 [#uses=1]
- store i32 %71, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:72 [#uses=1]
- zext i8 %72 to i32 ; <i32>:73 [#uses=2]
- load i8* @sc, align 1 ; <i8>:74 [#uses=1]
- sext i8 %74 to i32 ; <i32>:75 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %76, i32 %73, i32 %75 ) ; <i32>:77 [#uses=1]
- icmp eq i32 %77, %73 ; <i1>:78 [#uses=1]
- zext i1 %78 to i8 ; <i8>:79 [#uses=1]
- zext i8 %79 to i32 ; <i32>:80 [#uses=1]
- store i32 %80, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:81 [#uses=1]
- zext i8 %81 to i32 ; <i32>:82 [#uses=2]
- load i8* @sc, align 1 ; <i8>:83 [#uses=1]
- sext i8 %83 to i32 ; <i32>:84 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:85 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %85, i32 %82, i32 %84 ) ; <i32>:86 [#uses=1]
- icmp eq i32 %86, %82 ; <i1>:87 [#uses=1]
- zext i1 %87 to i8 ; <i8>:88 [#uses=1]
- zext i8 %88 to i32 ; <i32>:89 [#uses=1]
- store i32 %89, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:90 [#uses=1]
- zext i8 %90 to i64 ; <i64>:91 [#uses=2]
- load i8* @sc, align 1 ; <i8>:92 [#uses=1]
- sext i8 %92 to i64 ; <i64>:93 [#uses=1]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:94 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %94, i64 %91, i64 %93 ) ; <i64>:95 [#uses=1]
- icmp eq i64 %95, %91 ; <i1>:96 [#uses=1]
- zext i1 %96 to i8 ; <i8>:97 [#uses=1]
- zext i8 %97 to i32 ; <i32>:98 [#uses=1]
- store i32 %98, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:99 [#uses=1]
- zext i8 %99 to i64 ; <i64>:100 [#uses=2]
- load i8* @sc, align 1 ; <i8>:101 [#uses=1]
- sext i8 %101 to i64 ; <i64>:102 [#uses=1]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:103 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %103, i64 %100, i64 %102 ) ; <i64>:104 [#uses=1]
- icmp eq i64 %104, %100 ; <i1>:105 [#uses=1]
- zext i1 %105 to i8 ; <i8>:106 [#uses=1]
- zext i8 %106 to i32 ; <i32>:107 [#uses=1]
- store i32 %107, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
-declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind
-
-define void @test_lock() nounwind {
-entry:
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=1]
- store i64 %11, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=1]
- store i64 %13, i64* @ul, align 8
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:14 [#uses=1]
- volatile store i16 0, i16* %14, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:15 [#uses=1]
- volatile store i16 0, i16* %15, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:16 [#uses=1]
- volatile store i32 0, i32* %16, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:17 [#uses=1]
- volatile store i32 0, i32* %17, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:18 [#uses=1]
- volatile store i64 0, i64* %18, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:19 [#uses=1]
- volatile store i64 0, i64* %19, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:20 [#uses=1]
- volatile store i64 0, i64* %20, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:21 [#uses=1]
- volatile store i64 0, i64* %21, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.swap.i64.p0i64(i64*, i64) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-alloca.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-alloca.ll
deleted file mode 100644
index 466ae80..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-alloca.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | FileCheck %s -check-prefix=PPC32
-; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin8 | FileCheck %s -check-prefix=PPC64
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -disable-fp-elim | FileCheck %s -check-prefix=PPC32-NOFP
-; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin8 -disable-fp-elim | FileCheck %s -check-prefix=PPC64-NOFP
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -enable-ppc32-regscavenger | FileCheck %s -check-prefix=PPC32
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -enable-ppc32-regscavenger | FileCheck %s -check-prefix=PPC32-RS
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -disable-fp-elim -enable-ppc32-regscavenger | FileCheck %s -check-prefix=PPC32-RS-NOFP
-
-; CHECK-PPC32: stw r31, -4(r1)
-; CHECK-PPC32: lwz r1, 0(r1)
-; CHECK-PPC32: lwz r31, -4(r1)
-; CHECK-PPC32-NOFP: stw r31, -4(r1)
-; CHECK-PPC32-NOFP: lwz r1, 0(r1)
-; CHECK-PPC32-NOFP: lwz r31, -4(r1)
-; CHECK-PPC32-RS: stwu r1, -80(r1)
-; CHECK-PPC32-RS-NOFP: stwu r1, -80(r1)
-
-; CHECK-PPC64: std r31, -8(r1)
-; CHECK-PPC64: stdu r1, -128(r1)
-; CHECK-PPC64: ld r1, 0(r1)
-; CHECK-PPC64: ld r31, -8(r1)
-; CHECK-PPC64-NOFP: std r31, -8(r1)
-; CHECK-PPC64-NOFP: stdu r1, -128(r1)
-; CHECK-PPC64-NOFP: ld r1, 0(r1)
-; CHECK-PPC64-NOFP: ld r31, -8(r1)
-
-define i32* @f1(i32 %n) nounwind {
- %tmp = alloca i32, i32 %n ; <i32*> [#uses=1]
- ret i32* %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-large.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-large.ll
deleted file mode 100644
index 302d3df..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-large.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; RUN: llvm-as < %s > %t.bc
-; RUN: llc < %t.bc -march=ppc32 | FileCheck %s -check-prefix=PPC32-NOFP
-; RUN: llc < %t.bc -march=ppc32 -disable-fp-elim | FileCheck %s -check-prefix=PPC32-FP
-
-; RUN: llc < %t.bc -march=ppc64 | FileCheck %s -check-prefix=PPC64-NOFP
-; RUN: llc < %t.bc -march=ppc64 -disable-fp-elim | FileCheck %s -check-prefix=PPC64-FP
-
-
-target triple = "powerpc-apple-darwin8"
-
-define i32* @f1() nounwind {
- %tmp = alloca i32, i32 8191 ; <i32*> [#uses=1]
- ret i32* %tmp
-}
-
-; PPC32-NOFP: _f1:
-; PPC32-NOFP: lis r0, -1
-; PPC32-NOFP: ori r0, r0, 32704
-; PPC32-NOFP: stwux r1, r1, r0
-; PPC32-NOFP: addi r3, r1, 68
-; PPC32-NOFP: lwz r1, 0(r1)
-; PPC32-NOFP: blr
-
-; PPC32-FP: _f1:
-; PPC32-FP: stw r31, -4(r1)
-; PPC32-FP: lis r0, -1
-; PPC32-FP: ori r0, r0, 32704
-; PPC32-FP: stwux r1, r1, r0
-; ...
-; PPC32-FP: lwz r1, 0(r1)
-; PPC32-FP: lwz r31, -4(r1)
-; PPC32-FP: blr
-
-
-; PPC64-NOFP: _f1:
-; PPC64-NOFP: lis r0, -1
-; PPC64-NOFP: ori r0, r0, 32656
-; PPC64-NOFP: stdux r1, r1, r0
-; PPC64-NOFP: addi r3, r1, 116
-; PPC64-NOFP: ld r1, 0(r1)
-; PPC64-NOFP: blr
-
-
-; PPC64-FP: _f1:
-; PPC64-FP: std r31, -8(r1)
-; PPC64-FP: lis r0, -1
-; PPC64-FP: ori r0, r0, 32640
-; PPC64-FP: stdux r1, r1, r0
-; ...
-; PPC64-FP: ld r1, 0(r1)
-; PPC64-FP: ld r31, -8(r1)
-; PPC64-FP: blr
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-leaf.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-leaf.ll
deleted file mode 100644
index c2e1d6b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-leaf.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -march=ppc32 | \
-; RUN: not grep {stw r31, 20(r1)}
-; RUN: llc < %s -march=ppc32 | \
-; RUN: not grep {stwu r1, -.*(r1)}
-; RUN: llc < %s -march=ppc32 | \
-; RUN: not grep {addi r1, r1, }
-; RUN: llc < %s -march=ppc32 | \
-; RUN: not grep {lwz r31, 20(r1)}
-; RUN: llc < %s -march=ppc32 -disable-fp-elim | \
-; RUN: not grep {stw r31, 20(r1)}
-; RUN: llc < %s -march=ppc32 -disable-fp-elim | \
-; RUN: not grep {stwu r1, -.*(r1)}
-; RUN: llc < %s -march=ppc32 -disable-fp-elim | \
-; RUN: not grep {addi r1, r1, }
-; RUN: llc < %s -march=ppc32 -disable-fp-elim | \
-; RUN: not grep {lwz r31, 20(r1)}
-; RUN: llc < %s -march=ppc64 | \
-; RUN: not grep {std r31, 40(r1)}
-; RUN: llc < %s -march=ppc64 | \
-; RUN: not grep {stdu r1, -.*(r1)}
-; RUN: llc < %s -march=ppc64 | \
-; RUN: not grep {addi r1, r1, }
-; RUN: llc < %s -march=ppc64 | \
-; RUN: not grep {ld r31, 40(r1)}
-; RUN: llc < %s -march=ppc64 -disable-fp-elim | \
-; RUN: not grep {stw r31, 40(r1)}
-; RUN: llc < %s -march=ppc64 -disable-fp-elim | \
-; RUN: not grep {stdu r1, -.*(r1)}
-; RUN: llc < %s -march=ppc64 -disable-fp-elim | \
-; RUN: not grep {addi r1, r1, }
-; RUN: llc < %s -march=ppc64 -disable-fp-elim | \
-; RUN: not grep {ld r31, 40(r1)}
-
-define i32* @f1() {
- %tmp = alloca i32, i32 2 ; <i32*> [#uses=1]
- ret i32* %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-small.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-small.ll
deleted file mode 100644
index 404fdd0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/Frames-small.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -o %t1
-; RUN not grep {stw r31, -4(r1)} %t1
-; RUN: grep {stwu r1, -16448(r1)} %t1
-; RUN: grep {addi r1, r1, 16448} %t1
-; RUN: llc < %s -march=ppc32 | \
-; RUN: not grep {lwz r31, -4(r1)}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -disable-fp-elim \
-; RUN: -o %t2
-; RUN: grep {stw r31, -4(r1)} %t2
-; RUN: grep {stwu r1, -16448(r1)} %t2
-; RUN: grep {addi r1, r1, 16448} %t2
-; RUN: grep {lwz r31, -4(r1)} %t2
-; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin8 -o %t3
-; RUN: not grep {std r31, -8(r1)} %t3
-; RUN: grep {stdu r1, -16496(r1)} %t3
-; RUN: grep {addi r1, r1, 16496} %t3
-; RUN: not grep {ld r31, -8(r1)} %t3
-; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin8 -disable-fp-elim \
-; RUN: -o %t4
-; RUN: grep {std r31, -8(r1)} %t4
-; RUN: grep {stdu r1, -16512(r1)} %t4
-; RUN: grep {addi r1, r1, 16512} %t4
-; RUN: grep {ld r31, -8(r1)} %t4
-
-define i32* @f1() {
- %tmp = alloca i32, i32 4095 ; <i32*> [#uses=1]
- ret i32* %tmp
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/LargeAbsoluteAddr.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/LargeAbsoluteAddr.ll
deleted file mode 100644
index b10a996..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/LargeAbsoluteAddr.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin | \
-; RUN: grep {stw r3, 32751}
-; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin | \
-; RUN: grep {stw r3, 32751}
-; RUN: llc < %s -march=ppc64 -mtriple=powerpc-apple-darwin | \
-; RUN: grep {std r3, 9024}
-
-define void @test() nounwind {
- store i32 0, i32* inttoptr (i64 48725999 to i32*)
- ret void
-}
-
-define void @test2() nounwind {
- store i64 0, i64* inttoptr (i64 74560 to i64*)
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/addc.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/addc.ll
deleted file mode 100644
index 8c928ce..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/addc.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; All of these should be codegen'd without loading immediates
-; RUN: llc < %s -mtriple=powerpc-apple-darwin | FileCheck %s
-
-define i64 @add_ll(i64 %a, i64 %b) nounwind {
-entry:
- %tmp.2 = add i64 %b, %a ; <i64> [#uses=1]
- ret i64 %tmp.2
-; CHECK: add_ll:
-; CHECK: addc r4, r6, r4
-; CHECK: adde r3, r5, r3
-; CHECK: blr
-}
-
-define i64 @add_l_5(i64 %a) nounwind {
-entry:
- %tmp.1 = add i64 %a, 5 ; <i64> [#uses=1]
- ret i64 %tmp.1
-; CHECK: add_l_5:
-; CHECK: addic r4, r4, 5
-; CHECK: addze r3, r3
-; CHECK: blr
-}
-
-define i64 @add_l_m5(i64 %a) nounwind {
-entry:
- %tmp.1 = add i64 %a, -5 ; <i64> [#uses=1]
- ret i64 %tmp.1
-; CHECK: add_l_m5:
-; CHECK: addic r4, r4, -5
-; CHECK: addme r3, r3
-; CHECK: blr
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/addi-reassoc.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/addi-reassoc.ll
deleted file mode 100644
index 2b71ce6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/addi-reassoc.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep addi
-
- %struct.X = type { [5 x i8] }
-
-define i32 @test1([4 x i32]* %P, i32 %i) {
- %tmp.2 = add i32 %i, 2 ; <i32> [#uses=1]
- %tmp.4 = getelementptr [4 x i32]* %P, i32 %tmp.2, i32 1 ; <i32*> [#uses=1]
- %tmp.5 = load i32* %tmp.4 ; <i32> [#uses=1]
- ret i32 %tmp.5
-}
-
-define i32 @test2(%struct.X* %P, i32 %i) {
- %tmp.2 = add i32 %i, 2 ; <i32> [#uses=1]
- %tmp.5 = getelementptr %struct.X* %P, i32 %tmp.2, i32 0, i32 1 ; <i8*> [#uses=1]
- %tmp.6 = load i8* %tmp.5 ; <i8> [#uses=1]
- %tmp.7 = sext i8 %tmp.6 to i32 ; <i32> [#uses=1]
- ret i32 %tmp.7
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/align.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/align.ll
deleted file mode 100644
index 109a837..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/align.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-linux-gnu | FileCheck %s -check-prefix=ELF
-; RUN: llc < %s -mtriple=powerpc-apple-darwin9 | FileCheck %s -check-prefix=DARWIN
-
- at a = global i1 true
-; no alignment
-
- at b = global i8 1
-; no alignment
-
- at c = global i16 2
-;ELF: .align 1
-;ELF: c:
-;DARWIN: .align 1
-;DARWIN: _c:
-
- at d = global i32 3
-;ELF: .align 2
-;ELF: d:
-;DARWIN: .align 2
-;DARWIN: _d:
-
- at e = global i64 4
-;ELF: .align 3
-;ELF: e
-;DARWIN: .align 3
-;DARWIN: _e:
-
- at f = global float 5.0
-;ELF: .align 2
-;ELF: f:
-;DARWIN: .align 2
-;DARWIN: _f:
-
- at g = global double 6.0
-;ELF: .align 3
-;ELF: g:
-;DARWIN: .align 3
-;DARWIN: _g:
-
- at bar = common global [75 x i8] zeroinitializer, align 128
-;ELF: .comm bar,75,128
-;DARWIN: .comm _bar,75,7
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/and-branch.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/and-branch.ll
deleted file mode 100644
index 0484f88..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/and-branch.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep mfcr
-
-define void @foo(i32 %X, i32 %Y, i32 %Z) {
-entry:
- %tmp = icmp eq i32 %X, 0 ; <i1> [#uses=1]
- %tmp3 = icmp slt i32 %Y, 5 ; <i1> [#uses=1]
- %tmp4 = and i1 %tmp3, %tmp ; <i1> [#uses=1]
- br i1 %tmp4, label %cond_true, label %UnifiedReturnBlock
-cond_true: ; preds = %entry
- %tmp5 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- ret void
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-declare i32 @bar(...)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/and-elim.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/and-elim.ll
deleted file mode 100644
index 3685361..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/and-elim.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep rlwin
-
-define void @test(i8* %P) {
- %W = load i8* %P
- %X = shl i8 %W, 1
- %Y = add i8 %X, 2
- %Z = and i8 %Y, 254 ; dead and
- store i8 %Z, i8* %P
- ret void
-}
-
-define i16 @test2(i16 zeroext %crc) zeroext {
- ; No and's should be needed for the i16s here.
- %tmp.1 = lshr i16 %crc, 1
- %tmp.7 = xor i16 %tmp.1, 40961
- ret i16 %tmp.7
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/and-imm.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/and-imm.ll
deleted file mode 100644
index 64a45e5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/and-imm.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep {ori\\|lis}
-
-; andi. r3, r3, 32769
-define i32 @test(i32 %X) {
- %Y = and i32 %X, 32769 ; <i32> [#uses=1]
- ret i32 %Y
-}
-
-; andis. r3, r3, 32769
-define i32 @test2(i32 %X) {
- %Y = and i32 %X, -2147418112 ; <i32> [#uses=1]
- ret i32 %Y
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/and_add.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/and_add.ll
deleted file mode 100644
index 517e775..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/and_add.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=ppc32 -o %t
-; RUN: grep slwi %t
-; RUN: not grep addi %t
-; RUN: not grep rlwinm %t
-
-define i32 @test(i32 %A) {
- ;; shift
- %B = mul i32 %A, 8 ; <i32> [#uses=1]
- ;; dead, no demanded bits.
- %C = add i32 %B, 7 ; <i32> [#uses=1]
- ;; dead once add is gone.
- %D = and i32 %C, -8 ; <i32> [#uses=1]
- ret i32 %D
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/and_sext.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/and_sext.ll
deleted file mode 100644
index c6d234e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/and_sext.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; These tests should not contain a sign extend.
-; RUN: llc < %s -march=ppc32 | not grep extsh
-; RUN: llc < %s -march=ppc32 | not grep extsb
-
-define i32 @test1(i32 %mode.0.i.0) {
- %tmp.79 = trunc i32 %mode.0.i.0 to i16
- %tmp.80 = sext i16 %tmp.79 to i32
- %tmp.81 = and i32 %tmp.80, 24
- ret i32 %tmp.81
-}
-
-define i16 @test2(i16 signext %X, i16 signext %x) signext {
- %tmp = sext i16 %X to i32
- %tmp1 = sext i16 %x to i32
- %tmp2 = add i32 %tmp, %tmp1
- %tmp4 = ashr i32 %tmp2, 1
- %tmp5 = trunc i32 %tmp4 to i16
- %tmp45 = sext i16 %tmp5 to i32
- %retval = trunc i32 %tmp45 to i16
- ret i16 %retval
-}
-
-define i16 @test3(i32 zeroext %X) signext {
- %tmp1 = lshr i32 %X, 16
- %tmp2 = trunc i32 %tmp1 to i16
- ret i16 %tmp2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/and_sra.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/and_sra.ll
deleted file mode 100644
index e6c02d8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/and_sra.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; Neither of these functions should contain algebraic right shifts
-; RUN: llc < %s -march=ppc32 | not grep srawi
-
-define i32 @test1(i32 %mode.0.i.0) {
- %tmp.79 = bitcast i32 %mode.0.i.0 to i32 ; <i32> [#uses=1]
- %tmp.80 = ashr i32 %tmp.79, 15 ; <i32> [#uses=1]
- %tmp.81 = and i32 %tmp.80, 24 ; <i32> [#uses=1]
- ret i32 %tmp.81
-}
-
-define i32 @test2(i32 %mode.0.i.0) {
- %tmp.79 = bitcast i32 %mode.0.i.0 to i32 ; <i32> [#uses=1]
- %tmp.80 = ashr i32 %tmp.79, 15 ; <i32> [#uses=1]
- %tmp.81 = lshr i32 %mode.0.i.0, 16 ; <i32> [#uses=1]
- %tmp.82 = bitcast i32 %tmp.81 to i32 ; <i32> [#uses=1]
- %tmp.83 = and i32 %tmp.80, %tmp.82 ; <i32> [#uses=1]
- ret i32 %tmp.83
-}
-
-define i32 @test3(i32 %specbits.6.1) {
- %tmp.2540 = ashr i32 %specbits.6.1, 11 ; <i32> [#uses=1]
- %tmp.2541 = bitcast i32 %tmp.2540 to i32 ; <i32> [#uses=1]
- %tmp.2542 = shl i32 %tmp.2541, 13 ; <i32> [#uses=1]
- %tmp.2543 = and i32 %tmp.2542, 8192 ; <i32> [#uses=1]
- ret i32 %tmp.2543
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/atomic-1.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/atomic-1.ll
deleted file mode 100644
index ec4e42d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/atomic-1.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep lwarx | count 3
-; RUN: llc < %s -march=ppc32 | grep stwcx. | count 4
-
-define i32 @exchange_and_add(i32* %mem, i32 %val) nounwind {
- %tmp = call i32 @llvm.atomic.load.add.i32( i32* %mem, i32 %val )
- ret i32 %tmp
-}
-
-define i32 @exchange_and_cmp(i32* %mem) nounwind {
- %tmp = call i32 @llvm.atomic.cmp.swap.i32( i32* %mem, i32 0, i32 1 )
- ret i32 %tmp
-}
-
-define i32 @exchange(i32* %mem, i32 %val) nounwind {
- %tmp = call i32 @llvm.atomic.swap.i32( i32* %mem, i32 1 )
- ret i32 %tmp
-}
-
-declare i32 @llvm.atomic.load.add.i32(i32*, i32) nounwind
-declare i32 @llvm.atomic.cmp.swap.i32(i32*, i32, i32) nounwind
-declare i32 @llvm.atomic.swap.i32(i32*, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/atomic-2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/atomic-2.ll
deleted file mode 100644
index 6d9daef..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/atomic-2.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=ppc64 | grep ldarx | count 3
-; RUN: llc < %s -march=ppc64 | grep stdcx. | count 4
-
-define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
- %tmp = call i64 @llvm.atomic.load.add.i64( i64* %mem, i64 %val )
- ret i64 %tmp
-}
-
-define i64 @exchange_and_cmp(i64* %mem) nounwind {
- %tmp = call i64 @llvm.atomic.cmp.swap.i64( i64* %mem, i64 0, i64 1 )
- ret i64 %tmp
-}
-
-define i64 @exchange(i64* %mem, i64 %val) nounwind {
- %tmp = call i64 @llvm.atomic.swap.i64( i64* %mem, i64 1 )
- ret i64 %tmp
-}
-
-declare i64 @llvm.atomic.load.add.i64(i64*, i64) nounwind
-declare i64 @llvm.atomic.cmp.swap.i64(i64*, i64, i64) nounwind
-declare i64 @llvm.atomic.swap.i64(i64*, i64) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/available-externally.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/available-externally.ll
deleted file mode 100644
index fdead7d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/available-externally.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc < %s -relocation-model=static | FileCheck %s -check-prefix=STATIC
-; RUN: llc < %s -relocation-model=pic | FileCheck %s -check-prefix=PIC
-; RUN: llc < %s -relocation-model=dynamic-no-pic | FileCheck %s -check-prefix=DYNAMIC
-; PR4482
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "powerpc-apple-darwin8"
-
-define i32 @foo(i64 %x) nounwind {
-entry:
-; STATIC: _foo:
-; STATIC: bl _exact_log2
-; STATIC: blr
-; STATIC: .subsections_via_symbols
-
-; PIC: _foo:
-; PIC: bl L_exact_log2$stub
-; PIC: blr
-
-; DYNAMIC: _foo:
-; DYNAMIC: bl L_exact_log2$stub
-; DYNAMIC: blr
-
- %A = call i32 @exact_log2(i64 %x) nounwind
- ret i32 %A
-}
-
-define available_externally i32 @exact_log2(i64 %x) nounwind {
-entry:
- ret i32 42
-}
-
-
-; PIC: .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
-; PIC: L_exact_log2$stub:
-; PIC: .indirect_symbol _exact_log2
-; PIC: mflr r0
-; PIC: bcl 20,31,L_exact_log2$stub$tmp
-
-; PIC: L_exact_log2$stub$tmp:
-; PIC: mflr r11
-; PIC: addis r11,r11,ha16(L_exact_log2$lazy_ptr-L_exact_log2$stub$tmp)
-; PIC: mtlr r0
-; PIC: lwzu r12,lo16(L_exact_log2$lazy_ptr-L_exact_log2$stub$tmp)(r11)
-; PIC: mtctr r12
-; PIC: bctr
-
-; PIC: .section __DATA,__la_symbol_ptr,lazy_symbol_pointers
-; PIC: L_exact_log2$lazy_ptr:
-; PIC: .indirect_symbol _exact_log2
-; PIC: .long dyld_stub_binding_helper
-
-; PIC: .subsections_via_symbols
-
-
-; DYNAMIC: .section __TEXT,__symbol_stub1,symbol_stubs,pure_instructions,16
-; DYNAMIC: L_exact_log2$stub:
-; DYNAMIC: .indirect_symbol _exact_log2
-; DYNAMIC: lis r11,ha16(L_exact_log2$lazy_ptr)
-; DYNAMIC: lwzu r12,lo16(L_exact_log2$lazy_ptr)(r11)
-; DYNAMIC: mtctr r12
-; DYNAMIC: bctr
-
-; DYNAMIC: .section __DATA,__la_symbol_ptr,lazy_symbol_pointers
-; DYNAMIC: L_exact_log2$lazy_ptr:
-; DYNAMIC: .indirect_symbol _exact_log2
-; DYNAMIC: .long dyld_stub_binding_helper
-
-
-
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/big-endian-actual-args.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/big-endian-actual-args.ll
deleted file mode 100644
index 009f468..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/big-endian-actual-args.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-unknown-linux-gnu | \
-; RUN: grep {addc 4, 4, 6}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-unknown-linux-gnu | \
-; RUN: grep {adde 3, 3, 5}
-
-define i64 @foo(i64 %x, i64 %y) {
- %z = add i64 %x, %y
- ret i64 %z
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/big-endian-call-result.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/big-endian-call-result.ll
deleted file mode 100644
index fe85404..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/big-endian-call-result.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-unknown-linux-gnu | \
-; RUN: grep {addic 4, 4, 1}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-unknown-linux-gnu | \
-; RUN: grep {addze 3, 3}
-
-declare i64 @foo()
-
-define i64 @bar()
-{
- %t = call i64 @foo()
- %s = add i64 %t, 1
- ret i64 %s
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/big-endian-formal-args.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/big-endian-formal-args.ll
deleted file mode 100644
index e46e1ec..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/big-endian-formal-args.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-unknown-linux-gnu | \
-; RUN: grep {li 6, 3}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-unknown-linux-gnu | \
-; RUN: grep {li 4, 2}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-unknown-linux-gnu | \
-; RUN: grep {li 3, 0}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-unknown-linux-gnu | \
-; RUN: grep {mr 5, 3}
-
-declare void @bar(i64 %x, i64 %y)
-
-define void @foo() {
- call void @bar(i64 2, i64 3)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/branch-opt.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/branch-opt.ll
deleted file mode 100644
index cc02e40..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/branch-opt.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep {b LBB.*} | count 4
-
-target datalayout = "E-p:32:32"
-target triple = "powerpc-apple-darwin8.7.0"
-
-define void @foo(i32 %W, i32 %X, i32 %Y, i32 %Z) {
-entry:
- %tmp1 = and i32 %W, 1 ; <i32> [#uses=1]
- %tmp1.upgrd.1 = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
- br i1 %tmp1.upgrd.1, label %cond_false, label %bb5
-bb: ; preds = %bb5, %bb
- %indvar77 = phi i32 [ %indvar.next78, %bb ], [ 0, %bb5 ] ; <i32> [#uses=1]
- %tmp2 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %indvar.next78 = add i32 %indvar77, 1 ; <i32> [#uses=2]
- %exitcond79 = icmp eq i32 %indvar.next78, %X ; <i1> [#uses=1]
- br i1 %exitcond79, label %cond_next48, label %bb
-bb5: ; preds = %entry
- %tmp = icmp eq i32 %X, 0 ; <i1> [#uses=1]
- br i1 %tmp, label %cond_next48, label %bb
-cond_false: ; preds = %entry
- %tmp10 = and i32 %W, 2 ; <i32> [#uses=1]
- %tmp10.upgrd.2 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1]
- br i1 %tmp10.upgrd.2, label %cond_false20, label %bb16
-bb12: ; preds = %bb16, %bb12
- %indvar72 = phi i32 [ %indvar.next73, %bb12 ], [ 0, %bb16 ] ; <i32> [#uses=1]
- %tmp13 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %indvar.next73 = add i32 %indvar72, 1 ; <i32> [#uses=2]
- %exitcond74 = icmp eq i32 %indvar.next73, %Y ; <i1> [#uses=1]
- br i1 %exitcond74, label %cond_next48, label %bb12
-bb16: ; preds = %cond_false
- %tmp18 = icmp eq i32 %Y, 0 ; <i1> [#uses=1]
- br i1 %tmp18, label %cond_next48, label %bb12
-cond_false20: ; preds = %cond_false
- %tmp23 = and i32 %W, 4 ; <i32> [#uses=1]
- %tmp23.upgrd.3 = icmp eq i32 %tmp23, 0 ; <i1> [#uses=1]
- br i1 %tmp23.upgrd.3, label %cond_false33, label %bb29
-bb25: ; preds = %bb29, %bb25
- %indvar67 = phi i32 [ %indvar.next68, %bb25 ], [ 0, %bb29 ] ; <i32> [#uses=1]
- %tmp26 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %indvar.next68 = add i32 %indvar67, 1 ; <i32> [#uses=2]
- %exitcond69 = icmp eq i32 %indvar.next68, %Z ; <i1> [#uses=1]
- br i1 %exitcond69, label %cond_next48, label %bb25
-bb29: ; preds = %cond_false20
- %tmp31 = icmp eq i32 %Z, 0 ; <i1> [#uses=1]
- br i1 %tmp31, label %cond_next48, label %bb25
-cond_false33: ; preds = %cond_false20
- %tmp36 = and i32 %W, 8 ; <i32> [#uses=1]
- %tmp36.upgrd.4 = icmp eq i32 %tmp36, 0 ; <i1> [#uses=1]
- br i1 %tmp36.upgrd.4, label %cond_next48, label %bb42
-bb38: ; preds = %bb42
- %tmp39 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br label %bb42
-bb42: ; preds = %bb38, %cond_false33
- %indvar = phi i32 [ %indvar.next, %bb38 ], [ 0, %cond_false33 ] ; <i32> [#uses=4]
- %W_addr.0 = sub i32 %W, %indvar ; <i32> [#uses=1]
- %exitcond = icmp eq i32 %indvar, %W ; <i1> [#uses=1]
- br i1 %exitcond, label %cond_next48, label %bb38
-cond_next48: ; preds = %bb42, %cond_false33, %bb29, %bb25, %bb16, %bb12, %bb5, %bb
- %W_addr.1 = phi i32 [ %W, %bb5 ], [ %W, %bb16 ], [ %W, %bb29 ], [ %W, %cond_false33 ], [ %W_addr.0, %bb42 ], [ %W, %bb25 ], [ %W, %bb12 ], [ %W, %bb ] ; <i32> [#uses=1]
- %tmp50 = icmp eq i32 %W_addr.1, 0 ; <i1> [#uses=1]
- br i1 %tmp50, label %UnifiedReturnBlock, label %cond_true51
-cond_true51: ; preds = %cond_next48
- %tmp52 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- ret void
-UnifiedReturnBlock: ; preds = %cond_next48
- ret void
-}
-
-declare i32 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/bswap-load-store.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/bswap-load-store.ll
deleted file mode 100644
index 4f6bfc7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/bswap-load-store.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s -march=ppc32 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -march=ppc64 | FileCheck %s -check-prefix=X64
-
-
-define void @STWBRX(i32 %i, i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
- %tmp1.upgrd.1 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp13 = tail call i32 @llvm.bswap.i32( i32 %i ) ; <i32> [#uses=1]
- store i32 %tmp13, i32* %tmp1.upgrd.1
- ret void
-}
-
-define i32 @LWBRX(i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
- %tmp1.upgrd.2 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp = load i32* %tmp1.upgrd.2 ; <i32> [#uses=1]
- %tmp14 = tail call i32 @llvm.bswap.i32( i32 %tmp ) ; <i32> [#uses=1]
- ret i32 %tmp14
-}
-
-define void @STHBRX(i16 %s, i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
- %tmp1.upgrd.3 = bitcast i8* %tmp1 to i16* ; <i16*> [#uses=1]
- %tmp5 = call i16 @llvm.bswap.i16( i16 %s ) ; <i16> [#uses=1]
- store i16 %tmp5, i16* %tmp1.upgrd.3
- ret void
-}
-
-define i16 @LHBRX(i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
- %tmp1.upgrd.4 = bitcast i8* %tmp1 to i16* ; <i16*> [#uses=1]
- %tmp = load i16* %tmp1.upgrd.4 ; <i16> [#uses=1]
- %tmp6 = call i16 @llvm.bswap.i16( i16 %tmp ) ; <i16> [#uses=1]
- ret i16 %tmp6
-}
-
-declare i32 @llvm.bswap.i32(i32)
-
-declare i16 @llvm.bswap.i16(i16)
-
-
-; X32: stwbrx
-; X32: lwbrx
-; X32: sthbrx
-; X32: lhbrx
-
-; X64: stwbrx
-; X64: lwbrx
-; X64: sthbrx
-; X64: lhbrx
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll
deleted file mode 100644
index 0454c58..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; There should be exactly one vxor here.
-; RUN: llc < %s -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \
-; RUN: grep vxor | count 1
-
-; There should be exactly one vsplti here.
-; RUN: llc < %s -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \
-; RUN: grep vsplti | count 1
-
-define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
- %tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %tmp4 = fmul <4 x float> %tmp, %tmp3 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp4, <4 x float>* %P3
- store <4 x float> zeroinitializer, <4 x float>* %P1
- store <4 x i32> zeroinitializer, <4 x i32>* %P2
- ret void
-}
-
-define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
- store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2
- store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/calls.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/calls.ll
deleted file mode 100644
index 0db184f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/calls.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; Test various forms of calls.
-
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep {bl } | count 2
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep {bctrl} | count 1
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep {bla } | count 1
-
-declare void @foo()
-
-define void @test_direct() {
- call void @foo( )
- ret void
-}
-
-define void @test_extsym(i8* %P) {
- free i8* %P
- ret void
-}
-
-define void @test_indirect(void ()* %fp) {
- call void %fp( )
- ret void
-}
-
-define void @test_abs() {
- %fp = inttoptr i32 400 to void ()* ; <void ()*> [#uses=1]
- call void %fp( )
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/cmp-cmp.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/cmp-cmp.ll
deleted file mode 100644
index 35a5e42..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/cmp-cmp.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep mfcr
-
-define void @test(i64 %X) {
- %tmp1 = and i64 %X, 3 ; <i64> [#uses=1]
- %tmp = icmp sgt i64 %tmp1, 2 ; <i1> [#uses=1]
- br i1 %tmp, label %UnifiedReturnBlock, label %cond_true
-cond_true: ; preds = %0
- tail call void @test( i64 0 )
- ret void
-UnifiedReturnBlock: ; preds = %0
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/compare-duplicate.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/compare-duplicate.ll
deleted file mode 100644
index f5108c3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/compare-duplicate.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin8 | not grep slwi
-
-define i32 @test(i32 %A, i32 %B) {
- %C = sub i32 %B, %A
- %D = icmp eq i32 %C, %A
- br i1 %D, label %T, label %F
-T:
- ret i32 19123
-F:
- ret i32 %C
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/compare-simm.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/compare-simm.ll
deleted file mode 100644
index 5ba0500..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/compare-simm.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | \
-; RUN: grep {cmpwi cr0, r3, -1}
-
-define i32 @test(i32 %x) {
- %c = icmp eq i32 %x, -1
- br i1 %c, label %T, label %F
-T:
- %A = call i32 @test(i32 123)
- %B = add i32 %A, 43
- ret i32 %B
-F:
- %G = add i32 %x, 1234
- ret i32 %G
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/constants.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/constants.ll
deleted file mode 100644
index 8901e02..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/constants.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; All of these routines should be perform optimal load of constants.
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep lis | count 5
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep ori | count 3
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep {li } | count 4
-
-define i32 @f1() {
-entry:
- ret i32 1
-}
-
-define i32 @f2() {
-entry:
- ret i32 -1
-}
-
-define i32 @f3() {
-entry:
- ret i32 0
-}
-
-define i32 @f4() {
-entry:
- ret i32 32767
-}
-
-define i32 @f5() {
-entry:
- ret i32 65535
-}
-
-define i32 @f6() {
-entry:
- ret i32 65536
-}
-
-define i32 @f7() {
-entry:
- ret i32 131071
-}
-
-define i32 @f8() {
-entry:
- ret i32 2147483647
-}
-
-define i32 @f9() {
-entry:
- ret i32 -2147483648
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/cr_spilling.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/cr_spilling.ll
deleted file mode 100644
index b215868..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/cr_spilling.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=ppc32 -regalloc=local -O0 -relocation-model=pic -o -
-; PR1638
-
- at .str242 = external constant [3 x i8] ; <[3 x i8]*> [#uses=1]
-
-define fastcc void @ParseContent(i8* %buf, i32 %bufsize) {
-entry:
- %items = alloca [10000 x i8*], align 16 ; <[10000 x i8*]*> [#uses=0]
- %tmp86 = add i32 0, -1 ; <i32> [#uses=1]
- br i1 false, label %cond_true94, label %cond_next99
-
-cond_true94: ; preds = %entry
- %tmp98 = call i32 (i8*, ...)* @printf(i8* getelementptr ([3 x i8]* @.str242, i32 0, i32 0), i8* null) ; <i32> [#uses=0]
- %tmp20971 = icmp sgt i32 %tmp86, 0 ; <i1> [#uses=1]
- br i1 %tmp20971, label %bb101, label %bb212
-
-cond_next99: ; preds = %entry
- ret void
-
-bb101: ; preds = %cond_true94
- ret void
-
-bb212: ; preds = %cond_true94
- ret void
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/cttz.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/cttz.ll
deleted file mode 100644
index ab493a0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/cttz.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; Make sure this testcase does not use ctpop
-; RUN: llc < %s -march=ppc32 | grep -i cntlzw
-
-declare i32 @llvm.cttz.i32(i32)
-
-define i32 @bar(i32 %x) {
-entry:
- %tmp.1 = call i32 @llvm.cttz.i32( i32 %x ) ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/darwin-labels.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/darwin-labels.ll
deleted file mode 100644
index af23369..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/darwin-labels.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s | grep {foo bar":}
-
-target datalayout = "E-p:32:32"
-target triple = "powerpc-apple-darwin8.2.0"
-@"foo bar" = global i32 4 ; <i32*> [#uses=0]
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/delete-node.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/delete-node.ll
deleted file mode 100644
index a26c211..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/delete-node.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-; The DAGCombiner leaves behind a dead node in this testcase. Currently
-; ISel is ignoring dead nodes, though it would be preferable for
-; DAGCombiner to be able to eliminate the dead node.
-
-define void @GrayATo32ARGBTabB(i8* %baseAddr, i16** %cmp, i32 %rowBytes) nounwind {
-entry:
- br label %bb1
-
-bb1: ; preds = %bb1, %entry
- %0 = load i16* null, align 2 ; <i16> [#uses=1]
- %1 = ashr i16 %0, 4 ; <i16> [#uses=1]
- %2 = sext i16 %1 to i32 ; <i32> [#uses=1]
- %3 = getelementptr i8* null, i32 %2 ; <i8*> [#uses=1]
- %4 = load i8* %3, align 1 ; <i8> [#uses=1]
- %5 = zext i8 %4 to i32 ; <i32> [#uses=1]
- %6 = shl i32 %5, 24 ; <i32> [#uses=1]
- %7 = or i32 0, %6 ; <i32> [#uses=1]
- store i32 %7, i32* null, align 4
- br label %bb1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/dg.exp b/libclamav/c++/llvm/test/CodeGen/PowerPC/dg.exp
deleted file mode 100644
index 9e50b55..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/dg.exp
+++ /dev/null
@@ -1,5 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target PowerPC] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/div-2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/div-2.ll
deleted file mode 100644
index 2fc916f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/div-2.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep srawi
-; RUN: llc < %s -march=ppc32 | grep blr
-
-define i32 @test1(i32 %X) {
- %Y = and i32 %X, 15 ; <i32> [#uses=1]
- %Z = sdiv i32 %Y, 4 ; <i32> [#uses=1]
- ret i32 %Z
-}
-
-define i32 @test2(i32 %W) {
- %X = and i32 %W, 15 ; <i32> [#uses=1]
- %Y = sub i32 16, %X ; <i32> [#uses=1]
- %Z = sdiv i32 %Y, 4 ; <i32> [#uses=1]
- ret i32 %Z
-}
-
-define i32 @test3(i32 %W) {
- %X = and i32 %W, 15 ; <i32> [#uses=1]
- %Y = sub i32 15, %X ; <i32> [#uses=1]
- %Z = sdiv i32 %Y, 4 ; <i32> [#uses=1]
- ret i32 %Z
-}
-
-define i32 @test4(i32 %W) {
- %X = and i32 %W, 2 ; <i32> [#uses=1]
- %Y = sub i32 5, %X ; <i32> [#uses=1]
- %Z = sdiv i32 %Y, 2 ; <i32> [#uses=1]
- ret i32 %Z
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll
deleted file mode 100644
index 558fd1b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll
+++ /dev/null
@@ -1,93 +0,0 @@
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep eqv | count 3
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | \
-; RUN: grep andc | count 3
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep orc | count 2
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | \
-; RUN: grep nor | count 3
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep nand | count 1
-
-define i32 @EQV1(i32 %X, i32 %Y) {
- %A = xor i32 %X, %Y ; <i32> [#uses=1]
- %B = xor i32 %A, -1 ; <i32> [#uses=1]
- ret i32 %B
-}
-
-define i32 @EQV2(i32 %X, i32 %Y) {
- %A = xor i32 %X, -1 ; <i32> [#uses=1]
- %B = xor i32 %A, %Y ; <i32> [#uses=1]
- ret i32 %B
-}
-
-define i32 @EQV3(i32 %X, i32 %Y) {
- %A = xor i32 %X, -1 ; <i32> [#uses=1]
- %B = xor i32 %Y, %A ; <i32> [#uses=1]
- ret i32 %B
-}
-
-define i32 @ANDC1(i32 %X, i32 %Y) {
- %A = xor i32 %Y, -1 ; <i32> [#uses=1]
- %B = and i32 %X, %A ; <i32> [#uses=1]
- ret i32 %B
-}
-
-define i32 @ANDC2(i32 %X, i32 %Y) {
- %A = xor i32 %X, -1 ; <i32> [#uses=1]
- %B = and i32 %A, %Y ; <i32> [#uses=1]
- ret i32 %B
-}
-
-define i32 @ORC1(i32 %X, i32 %Y) {
- %A = xor i32 %Y, -1 ; <i32> [#uses=1]
- %B = or i32 %X, %A ; <i32> [#uses=1]
- ret i32 %B
-}
-
-define i32 @ORC2(i32 %X, i32 %Y) {
- %A = xor i32 %X, -1 ; <i32> [#uses=1]
- %B = or i32 %A, %Y ; <i32> [#uses=1]
- ret i32 %B
-}
-
-define i32 @NOR1(i32 %X) {
- %Y = xor i32 %X, -1 ; <i32> [#uses=1]
- ret i32 %Y
-}
-
-define i32 @NOR2(i32 %X, i32 %Y) {
- %Z = or i32 %X, %Y ; <i32> [#uses=1]
- %R = xor i32 %Z, -1 ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @NAND1(i32 %X, i32 %Y) {
- %Z = and i32 %X, %Y ; <i32> [#uses=1]
- %W = xor i32 %Z, -1 ; <i32> [#uses=1]
- ret i32 %W
-}
-
-define void @VNOR(<4 x float>* %P, <4 x float>* %Q) {
- %tmp = load <4 x float>* %P ; <<4 x float>> [#uses=1]
- %tmp.upgrd.1 = bitcast <4 x float> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x float>* %Q ; <<4 x float>> [#uses=1]
- %tmp2.upgrd.2 = bitcast <4 x float> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp3 = or <4 x i32> %tmp.upgrd.1, %tmp2.upgrd.2 ; <<4 x i32>> [#uses=1]
- %tmp4 = xor <4 x i32> %tmp3, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %tmp4.upgrd.3 = bitcast <4 x i32> %tmp4 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp4.upgrd.3, <4 x float>* %P
- ret void
-}
-
-define void @VANDC(<4 x float>* %P, <4 x float>* %Q) {
- %tmp = load <4 x float>* %P ; <<4 x float>> [#uses=1]
- %tmp.upgrd.4 = bitcast <4 x float> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x float>* %Q ; <<4 x float>> [#uses=1]
- %tmp2.upgrd.5 = bitcast <4 x float> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp4 = xor <4 x i32> %tmp2.upgrd.5, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %tmp3 = and <4 x i32> %tmp.upgrd.4, %tmp4 ; <<4 x i32>> [#uses=1]
- %tmp4.upgrd.6 = bitcast <4 x i32> %tmp3 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp4.upgrd.6, <4 x float>* %P
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/extsh.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/extsh.ll
deleted file mode 100644
index 506ff86..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/extsh.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; This should turn into a single extsh
-; RUN: llc < %s -march=ppc32 | grep extsh | count 1
-define i32 @test(i32 %X) {
- %tmp.81 = shl i32 %X, 16 ; <i32> [#uses=1]
- %tmp.82 = ashr i32 %tmp.81, 16 ; <i32> [#uses=1]
- ret i32 %tmp.82
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fabs.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fabs.ll
deleted file mode 100644
index 6ef740f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fabs.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin | grep {fabs f1, f1}
-
-define double @fabs(double %f) {
-entry:
- %tmp2 = tail call double @fabs( double %f ) ; <double> [#uses=1]
- ret double %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fma.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fma.ll
deleted file mode 100644
index c47ae4b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fma.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep -E {fn?madd|fn?msub} | count 8
-
-define double @test_FMADD1(double %A, double %B, double %C) {
- %D = fmul double %A, %B ; <double> [#uses=1]
- %E = fadd double %D, %C ; <double> [#uses=1]
- ret double %E
-}
-
-define double @test_FMADD2(double %A, double %B, double %C) {
- %D = fmul double %A, %B ; <double> [#uses=1]
- %E = fadd double %D, %C ; <double> [#uses=1]
- ret double %E
-}
-
-define double @test_FMSUB(double %A, double %B, double %C) {
- %D = fmul double %A, %B ; <double> [#uses=1]
- %E = fsub double %D, %C ; <double> [#uses=1]
- ret double %E
-}
-
-define double @test_FNMADD1(double %A, double %B, double %C) {
- %D = fmul double %A, %B ; <double> [#uses=1]
- %E = fadd double %D, %C ; <double> [#uses=1]
- %F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
- ret double %F
-}
-
-define double @test_FNMADD2(double %A, double %B, double %C) {
- %D = fmul double %A, %B ; <double> [#uses=1]
- %E = fadd double %C, %D ; <double> [#uses=1]
- %F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
- ret double %F
-}
-
-define double @test_FNMSUB1(double %A, double %B, double %C) {
- %D = fmul double %A, %B ; <double> [#uses=1]
- %E = fsub double %C, %D ; <double> [#uses=1]
- ret double %E
-}
-
-define double @test_FNMSUB2(double %A, double %B, double %C) {
- %D = fmul double %A, %B ; <double> [#uses=1]
- %E = fsub double %D, %C ; <double> [#uses=1]
- %F = fsub double -0.000000e+00, %E ; <double> [#uses=1]
- ret double %F
-}
-
-define float @test_FNMSUBS(float %A, float %B, float %C) {
- %D = fmul float %A, %B ; <float> [#uses=1]
- %E = fsub float %D, %C ; <float> [#uses=1]
- %F = fsub float -0.000000e+00, %E ; <float> [#uses=1]
- ret float %F
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fnabs.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fnabs.ll
deleted file mode 100644
index bbd5c71..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fnabs.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep fnabs
-
-declare double @fabs(double)
-
-define double @test(double %X) {
- %Y = call double @fabs( double %X ) ; <double> [#uses=1]
- %Z = fsub double -0.000000e+00, %Y ; <double> [#uses=1]
- ret double %Z
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fneg.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fneg.ll
deleted file mode 100644
index 0bd31bb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fneg.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep fneg
-
-define double @test1(double %a, double %b, double %c, double %d) {
-entry:
- %tmp2 = fsub double -0.000000e+00, %c ; <double> [#uses=1]
- %tmp4 = fmul double %tmp2, %d ; <double> [#uses=1]
- %tmp7 = fmul double %a, %b ; <double> [#uses=1]
- %tmp9 = fsub double %tmp7, %tmp4 ; <double> [#uses=1]
- ret double %tmp9
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fold-li.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fold-li.ll
deleted file mode 100644
index 92d8da5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fold-li.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=ppc32 | \
-; RUN: grep -v align | not grep li
-
-;; Test that immediates are folded into these instructions correctly.
-
-define i32 @ADD(i32 %X) nounwind {
- %Y = add i32 %X, 65537 ; <i32> [#uses=1]
- ret i32 %Y
-}
-
-define i32 @SUB(i32 %X) nounwind {
- %Y = sub i32 %X, 65537 ; <i32> [#uses=1]
- ret i32 %Y
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fp-branch.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fp-branch.ll
deleted file mode 100644
index 673da02..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fp-branch.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep fcmp | count 1
-
-declare i1 @llvm.isunordered.f64(double, double)
-
-define i1 @intcoord_cond_next55(double %tmp48.reload) {
-newFuncRoot:
- br label %cond_next55
-
-bb72.exitStub: ; preds = %cond_next55
- ret i1 true
-
-cond_next62.exitStub: ; preds = %cond_next55
- ret i1 false
-
-cond_next55: ; preds = %newFuncRoot
- %tmp57 = fcmp oge double %tmp48.reload, 1.000000e+00 ; <i1> [#uses=1]
- %tmp58 = fcmp uno double %tmp48.reload, 1.000000e+00 ; <i1> [#uses=1]
- %tmp59 = or i1 %tmp57, %tmp58 ; <i1> [#uses=1]
- br i1 %tmp59, label %bb72.exitStub, label %cond_next62.exitStub
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fp-int-fp.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fp-int-fp.ll
deleted file mode 100644
index 18f7f83..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fp-int-fp.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep r1
-
-define double @test1(double %X) {
- %Y = fptosi double %X to i64 ; <i64> [#uses=1]
- %Z = sitofp i64 %Y to double ; <double> [#uses=1]
- ret double %Z
-}
-
-define float @test2(double %X) {
- %Y = fptosi double %X to i64 ; <i64> [#uses=1]
- %Z = sitofp i64 %Y to float ; <float> [#uses=1]
- ret float %Z
-}
-
-define double @test3(float %X) {
- %Y = fptosi float %X to i64 ; <i64> [#uses=1]
- %Z = sitofp i64 %Y to double ; <double> [#uses=1]
- ret double %Z
-}
-
-define float @test4(float %X) {
- %Y = fptosi float %X to i64 ; <i64> [#uses=1]
- %Z = sitofp i64 %Y to float ; <float> [#uses=1]
- ret float %Z
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fp_to_uint.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fp_to_uint.ll
deleted file mode 100644
index 1360b62..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fp_to_uint.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep fctiwz | count 1
-
-define i16 @foo(float %a) {
-entry:
- %tmp.1 = fptoui float %a to i16 ; <i16> [#uses=1]
- ret i16 %tmp.1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fpcopy.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fpcopy.ll
deleted file mode 100644
index 7b9446b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fpcopy.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep fmr
-
-define double @test(float %F) {
- %F.upgrd.1 = fpext float %F to double ; <double> [#uses=1]
- ret double %F.upgrd.1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/frounds.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/frounds.ll
deleted file mode 100644
index 8eeadc3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/frounds.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-define i32 @foo() {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=2]
- %tmp = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp1 = call i32 @llvm.flt.rounds( ) ; <i32> [#uses=1]
- store i32 %tmp1, i32* %tmp, align 4
- %tmp2 = load i32* %tmp, align 4 ; <i32> [#uses=1]
- store i32 %tmp2, i32* %retval, align 4
- br label %return
-
-return: ; preds = %entry
- %retval3 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval3
-}
-
-declare i32 @llvm.flt.rounds() nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/fsqrt.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/fsqrt.ll
deleted file mode 100644
index 74a8725..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/fsqrt.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; fsqrt should be generated when the fsqrt feature is enabled, but not
-; otherwise.
-
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=+fsqrt | \
-; RUN: grep {fsqrt f1, f1}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mcpu=g5 | \
-; RUN: grep {fsqrt f1, f1}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=-fsqrt | \
-; RUN: not grep {fsqrt f1, f1}
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mcpu=g4 | \
-; RUN: not grep {fsqrt f1, f1}
-
-declare double @llvm.sqrt.f64(double)
-
-define double @X(double %Y) {
- %Z = call double @llvm.sqrt.f64( double %Y ) ; <double> [#uses=1]
- ret double %Z
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/hello.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/hello.ll
deleted file mode 100644
index ea27e92..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/hello.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; RUN: llc < %s -march=ppc64
-; PR1399
-
- at .str = internal constant [13 x i8] c"Hello World!\00"
-
-define i32 @main() {
- %tmp2 = tail call i32 @puts( i8* getelementptr ([13 x i8]* @.str, i32 0, i64 0) )
- ret i32 0
-}
-
-declare i32 @puts(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/hidden-vis-2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/hidden-vis-2.ll
deleted file mode 100644
index e9e2c0a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/hidden-vis-2.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin9 | grep non_lazy_ptr | count 6
-
- at x = external hidden global i32 ; <i32*> [#uses=1]
- at y = extern_weak hidden global i32 ; <i32*> [#uses=1]
-
-define i32 @t() nounwind readonly {
-entry:
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- %1 = load i32* @y, align 4 ; <i32> [#uses=1]
- %2 = add i32 %1, %0 ; <i32> [#uses=1]
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/hidden-vis.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/hidden-vis.ll
deleted file mode 100644
index b2cc143..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/hidden-vis.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin9 | not grep non_lazy_ptr
-
- at x = weak hidden global i32 0 ; <i32*> [#uses=1]
-
-define i32 @t() nounwind readonly {
-entry:
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/i128-and-beyond.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/i128-and-beyond.ll
deleted file mode 100644
index 51bcab2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/i128-and-beyond.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep 4294967295 | count 28
-
-; These static initializers are too big to hand off to assemblers
-; as monolithic blobs.
-
- at x = global i128 -1
- at y = global i256 -1
- at z = global i512 -1
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/i64_fp.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/i64_fp.ll
deleted file mode 100644
index d53c948..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/i64_fp.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; fcfid and fctid should be generated when the 64bit feature is enabled, but not
-; otherwise.
-
-; RUN: llc < %s -march=ppc32 -mattr=+64bit | \
-; RUN: grep fcfid
-; RUN: llc < %s -march=ppc32 -mattr=+64bit | \
-; RUN: grep fctidz
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | \
-; RUN: grep fcfid
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | \
-; RUN: grep fctidz
-; RUN: llc < %s -march=ppc32 -mattr=-64bit | \
-; RUN: not grep fcfid
-; RUN: llc < %s -march=ppc32 -mattr=-64bit | \
-; RUN: not grep fctidz
-; RUN: llc < %s -march=ppc32 -mcpu=g4 | \
-; RUN: not grep fcfid
-; RUN: llc < %s -march=ppc32 -mcpu=g4 | \
-; RUN: not grep fctidz
-
-define double @X(double %Y) {
- %A = fptosi double %Y to i64 ; <i64> [#uses=1]
- %B = sitofp i64 %A to double ; <double> [#uses=1]
- ret double %B
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/iabs.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/iabs.ll
deleted file mode 100644
index a43f09c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/iabs.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=ppc32 -stats |& \
-; RUN: grep {4 .*Number of machine instrs printed}
-
-;; Integer absolute value, should produce something as good as:
-;; srawi r2, r3, 31
-;; add r3, r3, r2
-;; xor r3, r3, r2
-;; blr
-define i32 @test(i32 %a) {
- %tmp1neg = sub i32 0, %a
- %b = icmp sgt i32 %a, -1
- %abs = select i1 %b, i32 %a, i32 %tmp1neg
- ret i32 %abs
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/illegal-element-type.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/illegal-element-type.ll
deleted file mode 100644
index 58bd055..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/illegal-element-type.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g3
-
-define void @foo() {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- br i1 false, label %bb26, label %bb
-
-bb19: ; preds = %bb26
- ret void
-
-bb26: ; preds = %bb
- br i1 false, label %bb30, label %bb19
-
-bb30: ; preds = %bb26
- br label %bb45
-
-bb45: ; preds = %bb45, %bb30
- %V.0 = phi <8 x i16> [ %tmp42, %bb45 ], [ zeroinitializer, %bb30 ] ; <<8 x i16>> [#uses=1]
- %tmp42 = mul <8 x i16> zeroinitializer, %V.0 ; <<8 x i16>> [#uses=1]
- br label %bb45
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/indirectbr.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/indirectbr.ll
deleted file mode 100644
index 2094e10..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/indirectbr.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -relocation-model=pic -march=ppc32 -mtriple=powerpc-apple-darwin | FileCheck %s -check-prefix=PIC
-; RUN: llc < %s -relocation-model=static -march=ppc32 -mtriple=powerpc-apple-darwin | FileCheck %s -check-prefix=STATIC
-
- at nextaddr = global i8* null ; <i8**> [#uses=2]
- at C.0.2070 = private constant [5 x i8*] [i8* blockaddress(@foo, %L1), i8* blockaddress(@foo, %L2), i8* blockaddress(@foo, %L3), i8* blockaddress(@foo, %L4), i8* blockaddress(@foo, %L5)] ; <[5 x i8*]*> [#uses=1]
-
-define internal i32 @foo(i32 %i) nounwind {
-; PIC: foo:
-; STATIC: foo:
-entry:
- %0 = load i8** @nextaddr, align 4 ; <i8*> [#uses=2]
- %1 = icmp eq i8* %0, null ; <i1> [#uses=1]
- br i1 %1, label %bb3, label %bb2
-
-bb2: ; preds = %entry, %bb3
- %gotovar.4.0 = phi i8* [ %gotovar.4.0.pre, %bb3 ], [ %0, %entry ] ; <i8*> [#uses=1]
-; PIC: mtctr
-; PIC-NEXT: bctr
-; STATIC: mtctr
-; STATIC-NEXT: bctr
- indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
-
-bb3: ; preds = %entry
- %2 = getelementptr inbounds [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
- %gotovar.4.0.pre = load i8** %2, align 4 ; <i8*> [#uses=1]
- br label %bb2
-
-L5: ; preds = %bb2
- br label %L4
-
-L4: ; preds = %L5, %bb2
- %res.0 = phi i32 [ 385, %L5 ], [ 35, %bb2 ] ; <i32> [#uses=1]
- br label %L3
-
-L3: ; preds = %L4, %bb2
- %res.1 = phi i32 [ %res.0, %L4 ], [ 5, %bb2 ] ; <i32> [#uses=1]
- br label %L2
-
-L2: ; preds = %L3, %bb2
- %res.2 = phi i32 [ %res.1, %L3 ], [ 1, %bb2 ] ; <i32> [#uses=1]
- %phitmp = mul i32 %res.2, 6 ; <i32> [#uses=1]
- br label %L1
-
-L1: ; preds = %L2, %bb2
- %res.3 = phi i32 [ %phitmp, %L2 ], [ 2, %bb2 ] ; <i32> [#uses=1]
-; PIC: addis r5, r4, ha16(L_BA4__foo_L5-"L1$pb")
-; PIC: li r6, lo16(L_BA4__foo_L5-"L1$pb")
-; PIC: add r5, r5, r6
-; PIC: stw r5
-; STATIC: li r4, lo16(L_BA4__foo_L5)
-; STATIC: addis r4, r4, ha16(L_BA4__foo_L5)
-; STATIC: stw r4
- store i8* blockaddress(@foo, %L5), i8** @nextaddr, align 4
- ret i32 %res.3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/inlineasm-copy.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/inlineasm-copy.ll
deleted file mode 100644
index e1ff82d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/inlineasm-copy.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep mr
-
-define i32 @test(i32 %Y, i32 %X) {
-entry:
- %tmp = tail call i32 asm "foo $0", "=r"( ) ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-define i32 @test2(i32 %Y, i32 %X) {
-entry:
- %tmp1 = tail call i32 asm "foo $0, $1", "=r,r"( i32 %X ) ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/int-fp-conv-0.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/int-fp-conv-0.ll
deleted file mode 100644
index 983d2b8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/int-fp-conv-0.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=ppc64 > %t
-; RUN: grep __floattitf %t
-; RUN: grep __fixunstfti %t
-
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc64-apple-darwin9.2.0"
-
-define ppc_fp128 @foo(i128 %a) nounwind {
-entry:
- %tmp2829 = uitofp i128 %a to ppc_fp128 ; <i64> [#uses=1]
- ret ppc_fp128 %tmp2829
-}
-define i128 @boo(ppc_fp128 %a) nounwind {
-entry:
- %tmp2829 = fptoui ppc_fp128 %a to i128 ; <i64> [#uses=1]
- ret i128 %tmp2829
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/int-fp-conv-1.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/int-fp-conv-1.ll
deleted file mode 100644
index 6c82723..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/int-fp-conv-1.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=ppc64 | grep __floatditf
-
-define i64 @__fixunstfdi(ppc_fp128 %a) nounwind {
-entry:
- %tmp1213 = uitofp i64 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp15 = fsub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1]
- %tmp2829 = fptoui ppc_fp128 %tmp15 to i32 ; <i32> [#uses=1]
- %tmp282930 = zext i32 %tmp2829 to i64 ; <i64> [#uses=1]
- %tmp32 = add i64 %tmp282930, 0 ; <i64> [#uses=1]
- ret i64 %tmp32
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/invalid-memcpy.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/invalid-memcpy.ll
deleted file mode 100644
index 3b1f306..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/invalid-memcpy.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; RUN: llc < %s -march=ppc64
-
-; This testcase is invalid (the alignment specified for memcpy is
-; greater than the alignment guaranteed for Qux or C.0.1173, but it
-; should compile, not crash the code generator.
-
- at C.0.1173 = external constant [33 x i8] ; <[33 x i8]*> [#uses=1]
-
-define void @Bork() {
-entry:
- %Qux = alloca [33 x i8] ; <[33 x i8]*> [#uses=1]
- %Qux1 = bitcast [33 x i8]* %Qux to i8* ; <i8*> [#uses=1]
- call void @llvm.memcpy.i64( i8* %Qux1, i8* getelementptr ([33 x i8]* @C.0.1173, i32 0, i32 0), i64 33, i32 8 )
- ret void
-}
-
-declare void @llvm.memcpy.i64(i8*, i8*, i64, i32)
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/inverted-bool-compares.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/inverted-bool-compares.ll
deleted file mode 100644
index aa7e4d6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/inverted-bool-compares.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep xori
-
-define i32 @test(i1 %B, i32* %P) {
- br i1 %B, label %T, label %F
-
-T: ; preds = %0
- store i32 123, i32* %P
- ret i32 0
-
-F: ; preds = %0
- ret i32 17
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/ispositive.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/ispositive.ll
deleted file mode 100644
index 4161e34..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/ispositive.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | \
-; RUN: grep {srwi r3, r3, 31}
-
-define i32 @test1(i32 %X) {
-entry:
- icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
- zext i1 %0 to i32 ; <i32>:1 [#uses=1]
- ret i32 %1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/itofp128.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/itofp128.ll
deleted file mode 100644
index 6d9ef95..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/itofp128.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc64
-
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc64-apple-darwin9.2.0"
-
-define i128 @__fixunstfti(ppc_fp128 %a) nounwind {
-entry:
- %tmp1213 = uitofp i128 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp15 = fsub ppc_fp128 %a, %tmp1213 ; <ppc_fp128> [#uses=1]
- %tmp2829 = fptoui ppc_fp128 %tmp15 to i64 ; <i64> [#uses=1]
- %tmp282930 = zext i64 %tmp2829 to i128 ; <i128> [#uses=1]
- %tmp32 = add i128 %tmp282930, 0 ; <i128> [#uses=1]
- ret i128 %tmp32
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/lha.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/lha.ll
deleted file mode 100644
index 3a100c1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/lha.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep lha
-
-define i32 @test(i16* %a) {
- %tmp.1 = load i16* %a ; <i16> [#uses=1]
- %tmp.2 = sext i16 %tmp.1 to i32 ; <i32> [#uses=1]
- ret i32 %tmp.2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/load-constant-addr.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/load-constant-addr.ll
deleted file mode 100644
index f1d061c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/load-constant-addr.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; Should fold the ori into the lfs.
-; RUN: llc < %s -march=ppc32 | grep lfs
-; RUN: llc < %s -march=ppc32 | not grep ori
-
-define float @test() {
- %tmp.i = load float* inttoptr (i32 186018016 to float*) ; <float> [#uses=1]
- ret float %tmp.i
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/long-compare.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/long-compare.ll
deleted file mode 100644
index 94c2526..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/long-compare.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep cntlzw
-; RUN: llc < %s -march=ppc32 | not grep xori
-; RUN: llc < %s -march=ppc32 | not grep {li }
-; RUN: llc < %s -march=ppc32 | not grep {mr }
-
-define i1 @test(i64 %x) {
- %tmp = icmp ult i64 %x, 4294967296
- ret i1 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/longdbl-truncate.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/longdbl-truncate.ll
deleted file mode 100644
index e5f63c6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/longdbl-truncate.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin8"
-
-define double @SolveCubic(ppc_fp128 %X) {
-entry:
- %Y = fptrunc ppc_fp128 %X to double
- ret double %Y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/lsr-postinc-pos.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/lsr-postinc-pos.ll
deleted file mode 100644
index f441e42..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/lsr-postinc-pos.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -print-lsr-output |& FileCheck %s
-
-; The icmp is a post-inc use, and the increment is in %bb11, but the
-; scevgep needs to be inserted in %bb so that it is dominated by %t.
-
-; CHECK: %t = load i8** undef
-; CHECK: %scevgep = getelementptr i8* %t, i32 %lsr.iv.next
-; CHECK: %c1 = icmp ult i8* %scevgep, undef
-
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
-target triple = "powerpc-apple-darwin9"
-
-define void @foo() nounwind {
-entry:
- br label %bb11
-
-bb11:
- %i = phi i32 [ 0, %entry ], [ %i.next, %bb ] ; <i32> [#uses=3]
- %ii = shl i32 %i, 2 ; <i32> [#uses=1]
- %c0 = icmp eq i32 %i, undef ; <i1> [#uses=1]
- br i1 %c0, label %bb13, label %bb
-
-bb:
- %t = load i8** undef, align 16 ; <i8*> [#uses=1]
- %p = getelementptr i8* %t, i32 %ii ; <i8*> [#uses=1]
- %c1 = icmp ult i8* %p, undef ; <i1> [#uses=1]
- %i.next = add i32 %i, 1 ; <i32> [#uses=1]
- br i1 %c1, label %bb11, label %bb13
-
-bb13:
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/mask64.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/mask64.ll
deleted file mode 100644
index 139621a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/mask64.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s
-
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc64-apple-darwin9.2.0"
- %struct.re_pattern_buffer = type <{ i8*, i64, i8, [7 x i8] }>
-
-define i32 @xre_search_2(%struct.re_pattern_buffer* %bufp, i32 %range) nounwind {
-entry:
- br i1 false, label %bb16, label %bb49
-
-bb16: ; preds = %entry
- %tmp19 = load i8** null, align 1 ; <i8*> [#uses=1]
- %tmp21 = load i8* %tmp19, align 1 ; <i8> [#uses=1]
- switch i8 %tmp21, label %bb49 [
- i8 0, label %bb45
- i8 1, label %bb34
- ]
-
-bb34: ; preds = %bb16
- ret i32 0
-
-bb45: ; preds = %bb16
- ret i32 -1
-
-bb49: ; preds = %bb16, %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/mem-rr-addr-mode.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
deleted file mode 100644
index 5661ef9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep li.*16
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep addi
-
-; Codegen lvx (R+16) as t = li 16, lvx t,R
-; This shares the 16 between the two loads.
-
-define void @func(<4 x float>* %a, <4 x float>* %b) {
- %tmp1 = getelementptr <4 x float>* %b, i32 1 ; <<4 x float>*> [#uses=1]
- %tmp = load <4 x float>* %tmp1 ; <<4 x float>> [#uses=1]
- %tmp3 = getelementptr <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1]
- %tmp4 = load <4 x float>* %tmp3 ; <<4 x float>> [#uses=1]
- %tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1]
- %tmp8 = load <4 x float>* %b ; <<4 x float>> [#uses=1]
- %tmp9 = fadd <4 x float> %tmp5, %tmp8 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp9, <4 x float>* %a
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/mem_update.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/mem_update.ll
deleted file mode 100644
index 17e7e28..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/mem_update.ll
+++ /dev/null
@@ -1,68 +0,0 @@
-; RUN: llc < %s -march=ppc32 -enable-ppc-preinc | \
-; RUN: not grep addi
-; RUN: llc < %s -march=ppc64 -enable-ppc-preinc | \
-; RUN: not grep addi
-
- at Glob = global i64 4
-
-define i32* @test0(i32* %X, i32* %dest) nounwind {
- %Y = getelementptr i32* %X, i32 4
- %A = load i32* %Y
- store i32 %A, i32* %dest
- ret i32* %Y
-}
-
-define i32* @test1(i32* %X, i32* %dest) nounwind {
- %Y = getelementptr i32* %X, i32 4
- %A = load i32* %Y
- store i32 %A, i32* %dest
- ret i32* %Y
-}
-
-define i16* @test2(i16* %X, i32* %dest) nounwind {
- %Y = getelementptr i16* %X, i32 4
- %A = load i16* %Y
- %B = sext i16 %A to i32
- store i32 %B, i32* %dest
- ret i16* %Y
-}
-
-define i16* @test3(i16* %X, i32* %dest) nounwind {
- %Y = getelementptr i16* %X, i32 4
- %A = load i16* %Y
- %B = zext i16 %A to i32
- store i32 %B, i32* %dest
- ret i16* %Y
-}
-
-define i16* @test3a(i16* %X, i64* %dest) nounwind {
- %Y = getelementptr i16* %X, i32 4
- %A = load i16* %Y
- %B = sext i16 %A to i64
- store i64 %B, i64* %dest
- ret i16* %Y
-}
-
-define i64* @test4(i64* %X, i64* %dest) nounwind {
- %Y = getelementptr i64* %X, i32 4
- %A = load i64* %Y
- store i64 %A, i64* %dest
- ret i64* %Y
-}
-
-define i16* @test5(i16* %X) nounwind {
- %Y = getelementptr i16* %X, i32 4
- store i16 7, i16* %Y
- ret i16* %Y
-}
-
-define i64* @test6(i64* %X, i64 %A) nounwind {
- %Y = getelementptr i64* %X, i32 4
- store i64 %A, i64* %Y
- ret i64* %Y
-}
-
-define i64* @test7(i64* %X, i64 %A) nounwind {
- store i64 %A, i64* @Glob
- ret i64* @Glob
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/mul-neg-power-2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/mul-neg-power-2.ll
deleted file mode 100644
index 9688d6e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/mul-neg-power-2.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep mul
-
-define i32 @test1(i32 %a) {
- %tmp.1 = mul i32 %a, -2 ; <i32> [#uses=1]
- %tmp.2 = add i32 %tmp.1, 63 ; <i32> [#uses=1]
- ret i32 %tmp.2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/mul-with-overflow.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/mul-with-overflow.ll
deleted file mode 100644
index f03e3cb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/mul-with-overflow.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)
-define i1 @a(i32 %x) zeroext nounwind {
- %res = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 3)
- %obil = extractvalue {i32, i1} %res, 1
- ret i1 %obil
-}
-
-declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)
-define i1 @b(i32 %x) zeroext nounwind {
- %res = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %x, i32 3)
- %obil = extractvalue {i32, i1} %res, 1
- ret i1 %obil
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/mulhs.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/mulhs.ll
deleted file mode 100644
index 9ab8d99..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/mulhs.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; All of these ands and shifts should be folded into rlwimi's
-; RUN: llc < %s -march=ppc32 -o %t
-; RUN: not grep mulhwu %t
-; RUN: not grep srawi %t
-; RUN: not grep add %t
-; RUN: grep mulhw %t | count 1
-
-define i32 @mulhs(i32 %a, i32 %b) {
-entry:
- %tmp.1 = sext i32 %a to i64 ; <i64> [#uses=1]
- %tmp.3 = sext i32 %b to i64 ; <i64> [#uses=1]
- %tmp.4 = mul i64 %tmp.3, %tmp.1 ; <i64> [#uses=1]
- %tmp.6 = lshr i64 %tmp.4, 32 ; <i64> [#uses=1]
- %tmp.7 = trunc i64 %tmp.6 to i32 ; <i32> [#uses=1]
- ret i32 %tmp.7
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/multiple-return-values.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/multiple-return-values.ll
deleted file mode 100644
index b9317f9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/multiple-return-values.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; RUN: llc < %s -march=ppc64
-
-define {i64, float} @bar(i64 %a, float %b) {
- %y = add i64 %a, 7
- %z = fadd float %b, 7.0
- ret i64 %y, float %z
-}
-
-define i64 @foo() {
- %M = call {i64, float} @bar(i64 21, float 21.0)
- %N = getresult {i64, float} %M, 0
- %O = getresult {i64, float} %M, 1
- %P = fptosi float %O to i64
- %Q = add i64 %P, %N
- ret i64 %Q
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/neg.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/neg.ll
deleted file mode 100644
index c673912..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/neg.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep neg
-
-define i32 @test(i32 %X) {
- %Y = sub i32 0, %X ; <i32> [#uses=1]
- ret i32 %Y
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/no-dead-strip.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/no-dead-strip.ll
deleted file mode 100644
index 3459413..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/no-dead-strip.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s | grep {no_dead_strip.*_X}
-
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "powerpc-apple-darwin8.8.0"
- at X = weak global i32 0 ; <i32*> [#uses=1]
- at .str = internal constant [4 x i8] c"t.c\00", section "llvm.metadata" ; <[4 x i8]*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (i32* @X to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/or-addressing-mode.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/or-addressing-mode.ll
deleted file mode 100644
index e50374e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/or-addressing-mode.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin8 | not grep ori
-; RUN: llc < %s -mtriple=powerpc-apple-darwin8 | not grep rlwimi
-
-define i32 @test1(i8* %P) {
- %tmp.2.i = ptrtoint i8* %P to i32 ; <i32> [#uses=2]
- %tmp.4.i = and i32 %tmp.2.i, -65536 ; <i32> [#uses=1]
- %tmp.10.i = lshr i32 %tmp.2.i, 5 ; <i32> [#uses=1]
- %tmp.11.i = and i32 %tmp.10.i, 2040 ; <i32> [#uses=1]
- %tmp.13.i = or i32 %tmp.11.i, %tmp.4.i ; <i32> [#uses=1]
- %tmp.14.i = inttoptr i32 %tmp.13.i to i32* ; <i32*> [#uses=1]
- %tmp.3 = load i32* %tmp.14.i ; <i32> [#uses=1]
- ret i32 %tmp.3
-}
-
-define i32 @test2(i32 %P) {
- %tmp.2 = shl i32 %P, 4 ; <i32> [#uses=1]
- %tmp.3 = or i32 %tmp.2, 2 ; <i32> [#uses=1]
- %tmp.4 = inttoptr i32 %tmp.3 to i32* ; <i32*> [#uses=1]
- %tmp.5 = load i32* %tmp.4 ; <i32> [#uses=1]
- ret i32 %tmp.5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppc-prologue.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/ppc-prologue.ll
deleted file mode 100644
index e49dcb8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppc-prologue.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin8 -disable-fp-elim | FileCheck %s
-
-define i32 @_Z4funci(i32 %a) ssp {
-; CHECK: mflr r0
-; CHECK-NEXT: stw r31, -4(r1)
-; CHECK-NEXT: stw r0, 8(r1)
-; CHECK-NEXT: stwu r1, -80(r1)
-; CHECK-NEXT: Llabel1:
-; CHECK-NEXT: mr r31, r1
-; CHECK-NEXT: Llabel2:
-entry:
- %a_addr = alloca i32 ; <i32*> [#uses=2]
- %retval = alloca i32 ; <i32*> [#uses=2]
- %0 = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %a, i32* %a_addr
- %1 = call i32 @_Z3barPi(i32* %a_addr) ; <i32> [#uses=1]
- store i32 %1, i32* %0, align 4
- %2 = load i32* %0, align 4 ; <i32> [#uses=1]
- store i32 %2, i32* %retval, align 4
- br label %return
-
-return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval1
-}
-
-declare i32 @_Z3barPi(i32*)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-1-opt.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-1-opt.ll
deleted file mode 100644
index 2fc1720..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-1-opt.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s > %t
-; ModuleID = '<stdin>'
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin8"
-
-define ppc_fp128 @plus(ppc_fp128 %x, ppc_fp128 %y) {
-entry:
- %tmp3 = fadd ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
- ret ppc_fp128 %tmp3
-}
-
-define ppc_fp128 @minus(ppc_fp128 %x, ppc_fp128 %y) {
-entry:
- %tmp3 = fsub ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
- ret ppc_fp128 %tmp3
-}
-
-define ppc_fp128 @times(ppc_fp128 %x, ppc_fp128 %y) {
-entry:
- %tmp3 = fmul ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
- ret ppc_fp128 %tmp3
-}
-
-define ppc_fp128 @divide(ppc_fp128 %x, ppc_fp128 %y) {
-entry:
- %tmp3 = fdiv ppc_fp128 %x, %y ; <ppc_fp128> [#uses=1]
- ret ppc_fp128 %tmp3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-1.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-1.ll
deleted file mode 100644
index 84ec588..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-1.ll
+++ /dev/null
@@ -1,93 +0,0 @@
-; RUN: opt < %s -std-compile-opts | llc > %t
-; XFAIL: *
-; ModuleID = 'ld3.c'
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin8"
-
-define ppc_fp128 @plus(ppc_fp128 %x, ppc_fp128 %y) {
-entry:
- %x_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %y_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %retval = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %tmp = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store ppc_fp128 %x, ppc_fp128* %x_addr
- store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp3 = fadd ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
- br label %return
-
-return: ; preds = %entry
- %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
- ret ppc_fp128 %retval5
-}
-
-define ppc_fp128 @minus(ppc_fp128 %x, ppc_fp128 %y) {
-entry:
- %x_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %y_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %retval = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %tmp = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store ppc_fp128 %x, ppc_fp128* %x_addr
- store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp3 = fsub ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
- br label %return
-
-return: ; preds = %entry
- %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
- ret ppc_fp128 %retval5
-}
-
-define ppc_fp128 @times(ppc_fp128 %x, ppc_fp128 %y) {
-entry:
- %x_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %y_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %retval = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %tmp = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store ppc_fp128 %x, ppc_fp128* %x_addr
- store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp3 = fmul ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
- br label %return
-
-return: ; preds = %entry
- %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
- ret ppc_fp128 %retval5
-}
-
-define ppc_fp128 @divide(ppc_fp128 %x, ppc_fp128 %y) {
-entry:
- %x_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %y_addr = alloca ppc_fp128 ; <ppc_fp128*> [#uses=2]
- %retval = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %tmp = alloca ppc_fp128, align 16 ; <ppc_fp128*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store ppc_fp128 %x, ppc_fp128* %x_addr
- store ppc_fp128 %y, ppc_fp128* %y_addr
- %tmp1 = load ppc_fp128* %x_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp2 = load ppc_fp128* %y_addr, align 16 ; <ppc_fp128> [#uses=1]
- %tmp3 = fdiv ppc_fp128 %tmp1, %tmp2 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp3, ppc_fp128* %tmp, align 16
- %tmp4 = load ppc_fp128* %tmp, align 16 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp4, ppc_fp128* %retval, align 16
- br label %return
-
-return: ; preds = %entry
- %retval5 = load ppc_fp128* %retval ; <ppc_fp128> [#uses=1]
- ret ppc_fp128 %retval5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-2.ll
deleted file mode 100644
index 7eee354..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-2.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc64
-
-define i64 @__fixtfdi(ppc_fp128 %a) nounwind {
-entry:
- br i1 false, label %bb, label %bb8
-bb: ; preds = %entry
- %tmp5 = fsub ppc_fp128 0xM80000000000000000000000000000000, %a ; <ppc_fp128> [#uses=1]
- %tmp6 = tail call i64 @__fixunstfdi( ppc_fp128 %tmp5 ) nounwind ; <i64> [#uses=0]
- ret i64 0
-bb8: ; preds = %entry
- ret i64 0
-}
-
-declare i64 @__fixunstfdi(ppc_fp128)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-3.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-3.ll
deleted file mode 100644
index 5043b62..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-3.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -march=ppc32
- %struct.stp_sequence = type { double, double }
-
-define i32 @stp_sequence_set_short_data(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
-entry:
- %tmp1112 = sitofp i16 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp13 = call i32 (...)* @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-define i32 @stp_sequence_set_short_data2(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
-entry:
- %tmp1112 = sitofp i8 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp13 = call i32 (...)* @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-define i32 @stp_sequence_set_short_data3(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
-entry:
- %tmp1112 = uitofp i16 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp13 = call i32 (...)* @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-define i32 @stp_sequence_set_short_data4(%struct.stp_sequence* %sequence, i32 %count, i16* %data) {
-entry:
- %tmp1112 = uitofp i8 0 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- %tmp13 = call i32 (...)* @__inline_isfinite( ppc_fp128 %tmp1112 ) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i32 @__inline_isfinite(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-4.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-4.ll
deleted file mode 100644
index 104a25e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/ppcf128-4.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32
-
-define ppc_fp128 @__floatditf(i64 %u) nounwind {
-entry:
- %tmp6 = fmul ppc_fp128 0xM00000000000000000000000000000000, 0xM41F00000000000000000000000000000
- %tmp78 = trunc i64 %u to i32
- %tmp789 = uitofp i32 %tmp78 to ppc_fp128
- %tmp11 = fadd ppc_fp128 %tmp789, %tmp6
- ret ppc_fp128 %tmp11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/pr3711_widen_bit.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/pr3711_widen_bit.ll
deleted file mode 100644
index 7abdeda..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/pr3711_widen_bit.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5
-
-; Test that causes a abort in expanding a bit convert due to a missing support
-; for widening.
-
-define i32 @main() nounwind {
-entry:
- br i1 icmp ne (i32 trunc (i64 bitcast (<2 x i32> <i32 2, i32 2> to i64) to i32), i32 2), label %bb, label %bb1
-
-bb: ; preds = %entry
- tail call void @abort() noreturn nounwind
- unreachable
-
-bb1: ; preds = %entry
- ret i32 0
-}
-
-declare void @abort() noreturn nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/private.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/private.ll
deleted file mode 100644
index f9405f6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/private.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; Test to make sure that the 'private' is used correctly.
-;
-; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu > %t
-; RUN: grep .Lfoo: %t
-; RUN: grep bl.*\.Lfoo %t
-; RUN: grep .Lbaz: %t
-; RUN: grep lis.*\.Lbaz %t
-; RUN: llc < %s -mtriple=powerpc-apple-darwin > %t
-; RUN: grep L_foo: %t
-; RUN: grep bl.*\L_foo %t
-; RUN: grep L_baz: %t
-; RUN: grep lis.*\L_baz %t
-
-define private void @foo() nounwind {
- ret void
-}
-
- at baz = private global i32 4
-
-define i32 @bar() nounwind {
- call void @foo()
- %1 = load i32* @baz, align 4
- ret i32 %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/reg-coalesce-simple.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/reg-coalesce-simple.ll
deleted file mode 100644
index e0ddb42..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/reg-coalesce-simple.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep or
-
-%struct.foo = type { i32, i32, [0 x i8] }
-
-define i32 @test(%struct.foo* %X) nounwind {
- %tmp1 = getelementptr %struct.foo* %X, i32 0, i32 2, i32 100 ; <i8*> [#uses=1]
- %tmp = load i8* %tmp1 ; <i8> [#uses=1]
- %tmp2 = zext i8 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/retaddr.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/retaddr.ll
deleted file mode 100644
index cf16b4c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/retaddr.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep mflr
-; RUN: llc < %s -march=ppc32 | grep lwz
-; RUN: llc < %s -march=ppc64 | grep {ld r., 16(r1)}
-
-target triple = "powerpc-apple-darwin8"
-
-define void @foo(i8** %X) nounwind {
-entry:
- %tmp = tail call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
- store i8* %tmp, i8** %X, align 4
- ret void
-}
-
-declare i8* @llvm.returnaddress(i32)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/return-val-i128.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/return-val-i128.ll
deleted file mode 100644
index e14a438..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/return-val-i128.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=ppc64
-
-define i128 @__fixsfdi(float %a) {
-entry:
- %a_addr = alloca float ; <float*> [#uses=4]
- %retval = alloca i128, align 16 ; <i128*> [#uses=2]
- %tmp = alloca i128, align 16 ; <i128*> [#uses=3]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store float %a, float* %a_addr
- %tmp1 = load float* %a_addr, align 4 ; <float> [#uses=1]
- %tmp2 = fcmp olt float %tmp1, 0.000000e+00 ; <i1> [#uses=1]
- %tmp23 = zext i1 %tmp2 to i8 ; <i8> [#uses=1]
- %toBool = icmp ne i8 %tmp23, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %bb, label %bb8
-bb: ; preds = %entry
- %tmp4 = load float* %a_addr, align 4 ; <float> [#uses=1]
- %tmp5 = fsub float -0.000000e+00, %tmp4 ; <float> [#uses=1]
- %tmp6 = call i128 @__fixunssfDI( float %tmp5 ) nounwind ; <i128> [#uses=1]
- %tmp7 = sub i128 0, %tmp6 ; <i128> [#uses=1]
- store i128 %tmp7, i128* %tmp, align 16
- br label %bb11
-bb8: ; preds = %entry
- %tmp9 = load float* %a_addr, align 4 ; <float> [#uses=1]
- %tmp10 = call i128 @__fixunssfDI( float %tmp9 ) nounwind ; <i128> [#uses=1]
- store i128 %tmp10, i128* %tmp, align 16
- br label %bb11
-bb11: ; preds = %bb8, %bb
- %tmp12 = load i128* %tmp, align 16 ; <i128> [#uses=1]
- store i128 %tmp12, i128* %retval, align 16
- br label %return
-return: ; preds = %bb11
- %retval13 = load i128* %retval ; <i128> [#uses=1]
- ret i128 %retval13
-}
-
-declare i128 @__fixunssfDI(float)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi-commute.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi-commute.ll
deleted file mode 100644
index 6410c63..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi-commute.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep rlwimi
-; RUN: llc < %s -march=ppc32 | not grep {or }
-
-; Make sure there is no register-register copies here.
-
-define void @test1(i32* %A, i32* %B, i32* %D, i32* %E) {
- %A.upgrd.1 = load i32* %A ; <i32> [#uses=2]
- %B.upgrd.2 = load i32* %B ; <i32> [#uses=1]
- %X = and i32 %A.upgrd.1, 15 ; <i32> [#uses=1]
- %Y = and i32 %B.upgrd.2, -16 ; <i32> [#uses=1]
- %Z = or i32 %X, %Y ; <i32> [#uses=1]
- store i32 %Z, i32* %D
- store i32 %A.upgrd.1, i32* %E
- ret void
-}
-
-define void @test2(i32* %A, i32* %B, i32* %D, i32* %E) {
- %A.upgrd.3 = load i32* %A ; <i32> [#uses=1]
- %B.upgrd.4 = load i32* %B ; <i32> [#uses=2]
- %X = and i32 %A.upgrd.3, 15 ; <i32> [#uses=1]
- %Y = and i32 %B.upgrd.4, -16 ; <i32> [#uses=1]
- %Z = or i32 %X, %Y ; <i32> [#uses=1]
- store i32 %Z, i32* %D
- store i32 %B.upgrd.4, i32* %E
- ret void
-}
-
-define i32 @test3(i32 %a, i32 %b) {
- %tmp.1 = and i32 %a, 15 ; <i32> [#uses=1]
- %tmp.3 = and i32 %b, 240 ; <i32> [#uses=1]
- %tmp.4 = or i32 %tmp.3, %tmp.1 ; <i32> [#uses=1]
- ret i32 %tmp.4
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi-keep-rsh.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi-keep-rsh.ll
deleted file mode 100644
index 7bce01c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi-keep-rsh.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin | FileCheck %s
-; Formerly dropped the RHS of %tmp6 when constructing rlwimi.
-; 7346117
-
- at foo = external global i32
-
-define void @xxx(i32 %a, i32 %b, i32 %c, i32 %d) nounwind optsize {
-; CHECK: _xxx:
-; CHECK: or
-; CHECK: and
-; CHECK: rlwimi
-entry:
- %tmp0 = ashr i32 %d, 31
- %tmp1 = and i32 %tmp0, 255
- %tmp2 = xor i32 %tmp1, 255
- %tmp3 = ashr i32 %b, 31
- %tmp4 = ashr i32 %a, 4
- %tmp5 = or i32 %tmp3, %tmp4
- %tmp6 = and i32 %tmp2, %tmp5
- %tmp7 = shl i32 %c, 8
- %tmp8 = or i32 %tmp6, %tmp7
- store i32 %tmp8, i32* @foo, align 4
- br label %return
-
-return:
- ret void
-; CHECK: blr
-}
\ No newline at end of file
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi.ll
deleted file mode 100644
index 556ca3d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi.ll
+++ /dev/null
@@ -1,70 +0,0 @@
-; All of these ands and shifts should be folded into rlwimi's
-; RUN: llc < %s -march=ppc32 | not grep and
-; RUN: llc < %s -march=ppc32 | grep rlwimi | count 8
-
-define i32 @test1(i32 %x, i32 %y) {
-entry:
- %tmp.3 = shl i32 %x, 16 ; <i32> [#uses=1]
- %tmp.7 = and i32 %y, 65535 ; <i32> [#uses=1]
- %tmp.9 = or i32 %tmp.7, %tmp.3 ; <i32> [#uses=1]
- ret i32 %tmp.9
-}
-
-define i32 @test2(i32 %x, i32 %y) {
-entry:
- %tmp.7 = and i32 %x, 65535 ; <i32> [#uses=1]
- %tmp.3 = shl i32 %y, 16 ; <i32> [#uses=1]
- %tmp.9 = or i32 %tmp.7, %tmp.3 ; <i32> [#uses=1]
- ret i32 %tmp.9
-}
-
-define i32 @test3(i32 %x, i32 %y) {
-entry:
- %tmp.3 = lshr i32 %x, 16 ; <i32> [#uses=1]
- %tmp.6 = and i32 %y, -65536 ; <i32> [#uses=1]
- %tmp.7 = or i32 %tmp.6, %tmp.3 ; <i32> [#uses=1]
- ret i32 %tmp.7
-}
-
-define i32 @test4(i32 %x, i32 %y) {
-entry:
- %tmp.6 = and i32 %x, -65536 ; <i32> [#uses=1]
- %tmp.3 = lshr i32 %y, 16 ; <i32> [#uses=1]
- %tmp.7 = or i32 %tmp.6, %tmp.3 ; <i32> [#uses=1]
- ret i32 %tmp.7
-}
-
-define i32 @test5(i32 %x, i32 %y) {
-entry:
- %tmp.3 = shl i32 %x, 1 ; <i32> [#uses=1]
- %tmp.4 = and i32 %tmp.3, -65536 ; <i32> [#uses=1]
- %tmp.7 = and i32 %y, 65535 ; <i32> [#uses=1]
- %tmp.9 = or i32 %tmp.4, %tmp.7 ; <i32> [#uses=1]
- ret i32 %tmp.9
-}
-
-define i32 @test6(i32 %x, i32 %y) {
-entry:
- %tmp.7 = and i32 %x, 65535 ; <i32> [#uses=1]
- %tmp.3 = shl i32 %y, 1 ; <i32> [#uses=1]
- %tmp.4 = and i32 %tmp.3, -65536 ; <i32> [#uses=1]
- %tmp.9 = or i32 %tmp.4, %tmp.7 ; <i32> [#uses=1]
- ret i32 %tmp.9
-}
-
-define i32 @test7(i32 %x, i32 %y) {
-entry:
- %tmp.2 = and i32 %x, -65536 ; <i32> [#uses=1]
- %tmp.5 = and i32 %y, 65535 ; <i32> [#uses=1]
- %tmp.7 = or i32 %tmp.5, %tmp.2 ; <i32> [#uses=1]
- ret i32 %tmp.7
-}
-
-define i32 @test8(i32 %bar) {
-entry:
- %tmp.3 = shl i32 %bar, 1 ; <i32> [#uses=1]
- %tmp.4 = and i32 %tmp.3, 2 ; <i32> [#uses=1]
- %tmp.6 = and i32 %bar, -3 ; <i32> [#uses=1]
- %tmp.7 = or i32 %tmp.4, %tmp.6 ; <i32> [#uses=1]
- ret i32 %tmp.7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi2.ll
deleted file mode 100644
index 59a3655..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi2.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; All of these ands and shifts should be folded into rlwimi's
-; RUN: llc < %s -march=ppc32 -o %t
-; RUN: grep rlwimi %t | count 3
-; RUN: grep srwi %t | count 1
-; RUN: not grep slwi %t
-
-define i16 @test1(i32 %srcA, i32 %srcB, i32 %alpha) {
-entry:
- %tmp.1 = shl i32 %srcA, 15 ; <i32> [#uses=1]
- %tmp.4 = and i32 %tmp.1, 32505856 ; <i32> [#uses=1]
- %tmp.6 = and i32 %srcA, 31775 ; <i32> [#uses=1]
- %tmp.7 = or i32 %tmp.4, %tmp.6 ; <i32> [#uses=1]
- %tmp.9 = shl i32 %srcB, 15 ; <i32> [#uses=1]
- %tmp.12 = and i32 %tmp.9, 32505856 ; <i32> [#uses=1]
- %tmp.14 = and i32 %srcB, 31775 ; <i32> [#uses=1]
- %tmp.15 = or i32 %tmp.12, %tmp.14 ; <i32> [#uses=1]
- %tmp.18 = mul i32 %tmp.7, %alpha ; <i32> [#uses=1]
- %tmp.20 = sub i32 32, %alpha ; <i32> [#uses=1]
- %tmp.22 = mul i32 %tmp.15, %tmp.20 ; <i32> [#uses=1]
- %tmp.23 = add i32 %tmp.22, %tmp.18 ; <i32> [#uses=2]
- %tmp.27 = lshr i32 %tmp.23, 5 ; <i32> [#uses=1]
- %tmp.28 = trunc i32 %tmp.27 to i16 ; <i16> [#uses=1]
- %tmp.29 = and i16 %tmp.28, 31775 ; <i16> [#uses=1]
- %tmp.33 = lshr i32 %tmp.23, 20 ; <i32> [#uses=1]
- %tmp.34 = trunc i32 %tmp.33 to i16 ; <i16> [#uses=1]
- %tmp.35 = and i16 %tmp.34, 992 ; <i16> [#uses=1]
- %tmp.36 = or i16 %tmp.29, %tmp.35 ; <i16> [#uses=1]
- ret i16 %tmp.36
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi3.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi3.ll
deleted file mode 100644
index 05d37bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwimi3.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=ppc32 -stats |& \
-; RUN: grep {Number of machine instrs printed} | grep 12
-
-define i16 @Trans16Bit(i32 %srcA, i32 %srcB, i32 %alpha) {
- %tmp1 = shl i32 %srcA, 15 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 32505856 ; <i32> [#uses=1]
- %tmp4 = and i32 %srcA, 31775 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp2, %tmp4 ; <i32> [#uses=1]
- %tmp7 = shl i32 %srcB, 15 ; <i32> [#uses=1]
- %tmp8 = and i32 %tmp7, 32505856 ; <i32> [#uses=1]
- %tmp10 = and i32 %srcB, 31775 ; <i32> [#uses=1]
- %tmp11 = or i32 %tmp8, %tmp10 ; <i32> [#uses=1]
- %tmp14 = mul i32 %tmp5, %alpha ; <i32> [#uses=1]
- %tmp16 = sub i32 32, %alpha ; <i32> [#uses=1]
- %tmp18 = mul i32 %tmp11, %tmp16 ; <i32> [#uses=1]
- %tmp19 = add i32 %tmp18, %tmp14 ; <i32> [#uses=2]
- %tmp21 = lshr i32 %tmp19, 5 ; <i32> [#uses=1]
- %tmp21.upgrd.1 = trunc i32 %tmp21 to i16 ; <i16> [#uses=1]
- %tmp = and i16 %tmp21.upgrd.1, 31775 ; <i16> [#uses=1]
- %tmp23 = lshr i32 %tmp19, 20 ; <i32> [#uses=1]
- %tmp23.upgrd.2 = trunc i32 %tmp23 to i16 ; <i16> [#uses=1]
- %tmp24 = and i16 %tmp23.upgrd.2, 992 ; <i16> [#uses=1]
- %tmp25 = or i16 %tmp, %tmp24 ; <i16> [#uses=1]
- ret i16 %tmp25
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwinm.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwinm.ll
deleted file mode 100644
index 699f6e7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwinm.ll
+++ /dev/null
@@ -1,61 +0,0 @@
-; All of these ands and shifts should be folded into rlwimi's
-; RUN: llc < %s -march=ppc32 -o %t
-; RUN: not grep and %t
-; RUN: not grep srawi %t
-; RUN: not grep srwi %t
-; RUN: not grep slwi %t
-; RUN: grep rlwinm %t | count 8
-
-define i32 @test1(i32 %a) {
-entry:
- %tmp.1 = and i32 %a, 268431360 ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
-
-define i32 @test2(i32 %a) {
-entry:
- %tmp.1 = and i32 %a, -268435441 ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
-
-define i32 @test3(i32 %a) {
-entry:
- %tmp.2 = ashr i32 %a, 8 ; <i32> [#uses=1]
- %tmp.3 = and i32 %tmp.2, 255 ; <i32> [#uses=1]
- ret i32 %tmp.3
-}
-
-define i32 @test4(i32 %a) {
-entry:
- %tmp.3 = lshr i32 %a, 8 ; <i32> [#uses=1]
- %tmp.4 = and i32 %tmp.3, 255 ; <i32> [#uses=1]
- ret i32 %tmp.4
-}
-
-define i32 @test5(i32 %a) {
-entry:
- %tmp.2 = shl i32 %a, 8 ; <i32> [#uses=1]
- %tmp.3 = and i32 %tmp.2, -8388608 ; <i32> [#uses=1]
- ret i32 %tmp.3
-}
-
-define i32 @test6(i32 %a) {
-entry:
- %tmp.1 = and i32 %a, 65280 ; <i32> [#uses=1]
- %tmp.2 = ashr i32 %tmp.1, 8 ; <i32> [#uses=1]
- ret i32 %tmp.2
-}
-
-define i32 @test7(i32 %a) {
-entry:
- %tmp.1 = and i32 %a, 65280 ; <i32> [#uses=1]
- %tmp.2 = lshr i32 %tmp.1, 8 ; <i32> [#uses=1]
- ret i32 %tmp.2
-}
-
-define i32 @test8(i32 %a) {
-entry:
- %tmp.1 = and i32 %a, 16711680 ; <i32> [#uses=1]
- %tmp.2 = shl i32 %tmp.1, 8 ; <i32> [#uses=1]
- ret i32 %tmp.2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwinm2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwinm2.ll
deleted file mode 100644
index 46542d8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rlwinm2.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; All of these ands and shifts should be folded into rlw[i]nm instructions
-; RUN: llc < %s -march=ppc32 -o %t
-; RUN: not grep and %t
-; RUN: not grep srawi %t
-; RUN: not grep srwi %t
-; RUN: not grep slwi %t
-; RUN: grep rlwnm %t | count 1
-; RUN: grep rlwinm %t | count 1
-
-define i32 @test1(i32 %X, i32 %Y) {
-entry:
- %tmp = trunc i32 %Y to i8 ; <i8> [#uses=2]
- %tmp1 = shl i32 %X, %Y ; <i32> [#uses=1]
- %tmp2 = sub i32 32, %Y ; <i8> [#uses=1]
- %tmp3 = lshr i32 %X, %tmp2 ; <i32> [#uses=1]
- %tmp4 = or i32 %tmp1, %tmp3 ; <i32> [#uses=1]
- %tmp6 = and i32 %tmp4, 127 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test2(i32 %X) {
-entry:
- %tmp1 = lshr i32 %X, 27 ; <i32> [#uses=1]
- %tmp2 = shl i32 %X, 5 ; <i32> [#uses=1]
- %tmp2.masked = and i32 %tmp2, 96 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp1, %tmp2.masked ; <i32> [#uses=1]
- ret i32 %tmp5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rotl-2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rotl-2.ll
deleted file mode 100644
index d32ef59..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rotl-2.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep rlwinm | count 4
-; RUN: llc < %s -march=ppc32 | grep rlwnm | count 2
-; RUN: llc < %s -march=ppc32 | not grep or
-
-define i32 @rotl32(i32 %A, i8 %Amt) nounwind {
- %shift.upgrd.1 = zext i8 %Amt to i32 ; <i32> [#uses=1]
- %B = shl i32 %A, %shift.upgrd.1 ; <i32> [#uses=1]
- %Amt2 = sub i8 32, %Amt ; <i8> [#uses=1]
- %shift.upgrd.2 = zext i8 %Amt2 to i32 ; <i32> [#uses=1]
- %C = lshr i32 %A, %shift.upgrd.2 ; <i32> [#uses=1]
- %D = or i32 %B, %C ; <i32> [#uses=1]
- ret i32 %D
-}
-
-define i32 @rotr32(i32 %A, i8 %Amt) nounwind {
- %shift.upgrd.3 = zext i8 %Amt to i32 ; <i32> [#uses=1]
- %B = lshr i32 %A, %shift.upgrd.3 ; <i32> [#uses=1]
- %Amt2 = sub i8 32, %Amt ; <i8> [#uses=1]
- %shift.upgrd.4 = zext i8 %Amt2 to i32 ; <i32> [#uses=1]
- %C = shl i32 %A, %shift.upgrd.4 ; <i32> [#uses=1]
- %D = or i32 %B, %C ; <i32> [#uses=1]
- ret i32 %D
-}
-
-define i32 @rotli32(i32 %A) nounwind {
- %B = shl i32 %A, 5 ; <i32> [#uses=1]
- %C = lshr i32 %A, 27 ; <i32> [#uses=1]
- %D = or i32 %B, %C ; <i32> [#uses=1]
- ret i32 %D
-}
-
-define i32 @rotri32(i32 %A) nounwind {
- %B = lshr i32 %A, 5 ; <i32> [#uses=1]
- %C = shl i32 %A, 27 ; <i32> [#uses=1]
- %D = or i32 %B, %C ; <i32> [#uses=1]
- ret i32 %D
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rotl-64.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rotl-64.ll
deleted file mode 100644
index 674c9e4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rotl-64.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=ppc64 | grep rldicl
-; RUN: llc < %s -march=ppc64 | grep rldcl
-; PR1613
-
-define i64 @t1(i64 %A) {
- %tmp1 = lshr i64 %A, 57
- %tmp2 = shl i64 %A, 7
- %tmp3 = or i64 %tmp1, %tmp2
- ret i64 %tmp3
-}
-
-define i64 @t2(i64 %A, i8 zeroext %Amt) {
- %Amt1 = zext i8 %Amt to i64
- %tmp1 = lshr i64 %A, %Amt1
- %Amt2 = sub i8 64, %Amt
- %Amt3 = zext i8 %Amt2 to i64
- %tmp2 = shl i64 %A, %Amt3
- %tmp3 = or i64 %tmp1, %tmp2
- ret i64 %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/rotl.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/rotl.ll
deleted file mode 100644
index 56fc4a8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/rotl.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep rlwnm | count 2
-; RUN: llc < %s -march=ppc32 | grep rlwinm | count 2
-
-define i32 @rotlw(i32 %x, i32 %sh) {
-entry:
- %tmp.7 = sub i32 32, %sh ; <i32> [#uses=1]
- %tmp.10 = lshr i32 %x, %tmp.7 ; <i32> [#uses=2]
- %tmp.4 = shl i32 %x, %sh ; <i32> [#uses=1]
- %tmp.12 = or i32 %tmp.10, %tmp.4 ; <i32> [#uses=1]
- ret i32 %tmp.12
-}
-
-define i32 @rotrw(i32 %x, i32 %sh) {
-entry:
- %tmp.3 = trunc i32 %sh to i8 ; <i8> [#uses=1]
- %tmp.4 = lshr i32 %x, %sh ; <i32> [#uses=2]
- %tmp.7 = sub i32 32, %sh ; <i32> [#uses=1]
- %tmp.10 = shl i32 %x, %tmp.7 ; <i32> [#uses=1]
- %tmp.12 = or i32 %tmp.4, %tmp.10 ; <i32> [#uses=1]
- ret i32 %tmp.12
-}
-
-define i32 @rotlwi(i32 %x) {
-entry:
- %tmp.7 = lshr i32 %x, 27 ; <i32> [#uses=2]
- %tmp.3 = shl i32 %x, 5 ; <i32> [#uses=1]
- %tmp.9 = or i32 %tmp.3, %tmp.7 ; <i32> [#uses=1]
- ret i32 %tmp.9
-}
-
-define i32 @rotrwi(i32 %x) {
-entry:
- %tmp.3 = lshr i32 %x, 5 ; <i32> [#uses=2]
- %tmp.7 = shl i32 %x, 27 ; <i32> [#uses=1]
- %tmp.9 = or i32 %tmp.3, %tmp.7 ; <i32> [#uses=1]
- ret i32 %tmp.9
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/sections.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/sections.ll
deleted file mode 100644
index 0ff4a89..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/sections.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; Test to make sure that bss sections are printed with '.section' directive.
-; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu | FileCheck %s
-
- at A = global i32 0
-
-; CHECK: .section .bss,"aw", at nobits
-; CHECK: .globl A
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/select-cc.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/select-cc.ll
deleted file mode 100644
index ccc6489..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/select-cc.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc32
-; PR3011
-
-define <2 x double> @vector_select(<2 x double> %x, <2 x double> %y) nounwind {
- %x.lo = extractelement <2 x double> %x, i32 0 ; <double> [#uses=1]
- %x.lo.ge = fcmp oge double %x.lo, 0.000000e+00 ; <i1> [#uses=1]
- %a.d = select i1 %x.lo.ge, <2 x double> %y, <2 x double> %x ; <<2 x double>> [#uses=1]
- ret <2 x double> %a.d
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/select_lt0.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/select_lt0.ll
deleted file mode 100644
index 95ba84a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/select_lt0.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep cmp
-
-define i32 @seli32_1(i32 %a) {
-entry:
- %tmp.1 = icmp slt i32 %a, 0 ; <i1> [#uses=1]
- %retval = select i1 %tmp.1, i32 5, i32 0 ; <i32> [#uses=1]
- ret i32 %retval
-}
-
-define i32 @seli32_2(i32 %a, i32 %b) {
-entry:
- %tmp.1 = icmp slt i32 %a, 0 ; <i1> [#uses=1]
- %retval = select i1 %tmp.1, i32 %b, i32 0 ; <i32> [#uses=1]
- ret i32 %retval
-}
-
-define i32 @seli32_3(i32 %a, i16 %b) {
-entry:
- %tmp.2 = sext i16 %b to i32 ; <i32> [#uses=1]
- %tmp.1 = icmp slt i32 %a, 0 ; <i1> [#uses=1]
- %retval = select i1 %tmp.1, i32 %tmp.2, i32 0 ; <i32> [#uses=1]
- ret i32 %retval
-}
-
-define i32 @seli32_4(i32 %a, i16 %b) {
-entry:
- %tmp.2 = zext i16 %b to i32 ; <i32> [#uses=1]
- %tmp.1 = icmp slt i32 %a, 0 ; <i1> [#uses=1]
- %retval = select i1 %tmp.1, i32 %tmp.2, i32 0 ; <i32> [#uses=1]
- ret i32 %retval
-}
-
-define i16 @seli16_1(i16 %a) {
-entry:
- %tmp.1 = icmp slt i16 %a, 0 ; <i1> [#uses=1]
- %retval = select i1 %tmp.1, i16 7, i16 0 ; <i16> [#uses=1]
- ret i16 %retval
-}
-
-define i16 @seli16_2(i32 %a, i16 %b) {
- %tmp.1 = icmp slt i32 %a, 0 ; <i1> [#uses=1]
- %retval = select i1 %tmp.1, i16 %b, i16 0 ; <i16> [#uses=1]
- ret i16 %retval
-}
-
-define i32 @seli32_a_a(i32 %a) {
- %tmp = icmp slt i32 %a, 1 ; <i1> [#uses=1]
- %min = select i1 %tmp, i32 %a, i32 0 ; <i32> [#uses=1]
- ret i32 %min
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/setcc_no_zext.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/setcc_no_zext.ll
deleted file mode 100644
index 9b2036e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/setcc_no_zext.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep rlwinm
-
-define i32 @setcc_one_or_zero(i32* %a) {
-entry:
- %tmp.1 = icmp ne i32* %a, null ; <i1> [#uses=1]
- %inc.1 = zext i1 %tmp.1 to i32 ; <i32> [#uses=1]
- ret i32 %inc.1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/seteq-0.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/seteq-0.ll
deleted file mode 100644
index 688b29a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/seteq-0.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 | \
-; RUN: grep {srwi r., r., 5}
-
-define i32 @eq0(i32 %a) {
- %tmp.1 = icmp eq i32 %a, 0 ; <i1> [#uses=1]
- %tmp.2 = zext i1 %tmp.1 to i32 ; <i32> [#uses=1]
- ret i32 %tmp.2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/shift128.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/shift128.ll
deleted file mode 100644
index 8e518c1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/shift128.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc64 | grep sld | count 5
-
-define i128 @foo_lshr(i128 %x, i128 %y) {
- %r = lshr i128 %x, %y
- ret i128 %r
-}
-define i128 @foo_ashr(i128 %x, i128 %y) {
- %r = ashr i128 %x, %y
- ret i128 %r
-}
-define i128 @foo_shl(i128 %x, i128 %y) {
- %r = shl i128 %x, %y
- ret i128 %r
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/shl_elim.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/shl_elim.ll
deleted file mode 100644
index f177c4a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/shl_elim.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep slwi
-
-define i32 @test1(i64 %a) {
- %tmp29 = lshr i64 %a, 24 ; <i64> [#uses=1]
- %tmp23 = trunc i64 %tmp29 to i32 ; <i32> [#uses=1]
- %tmp410 = lshr i32 %tmp23, 9 ; <i32> [#uses=1]
- %tmp45 = trunc i32 %tmp410 to i16 ; <i16> [#uses=1]
- %tmp456 = sext i16 %tmp45 to i32 ; <i32> [#uses=1]
- ret i32 %tmp456
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/shl_sext.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/shl_sext.ll
deleted file mode 100644
index 1f35eb4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/shl_sext.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; This test should not contain a sign extend
-; RUN: llc < %s -march=ppc32 | not grep extsb
-
-define i32 @test(i32 %mode.0.i.0) {
- %tmp.79 = trunc i32 %mode.0.i.0 to i8 ; <i8> [#uses=1]
- %tmp.80 = sext i8 %tmp.79 to i32 ; <i32> [#uses=1]
- %tmp.81 = shl i32 %tmp.80, 24 ; <i32> [#uses=1]
- ret i32 %tmp.81
-}
-
-define i32 @test2(i32 %mode.0.i.0) {
- %tmp.79 = trunc i32 %mode.0.i.0 to i8 ; <i8> [#uses=1]
- %tmp.80 = sext i8 %tmp.79 to i32 ; <i32> [#uses=1]
- %tmp.81 = shl i32 %tmp.80, 16 ; <i32> [#uses=1]
- %tmp.82 = and i32 %tmp.81, 16711680 ; <i32> [#uses=1]
- ret i32 %tmp.82
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/sign_ext_inreg1.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/sign_ext_inreg1.ll
deleted file mode 100644
index 2679c8e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/sign_ext_inreg1.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep srwi
-; RUN: llc < %s -march=ppc32 | not grep rlwimi
-
-define i32 @baz(i64 %a) {
- %tmp29 = lshr i64 %a, 24 ; <i64> [#uses=1]
- %tmp23 = trunc i64 %tmp29 to i32 ; <i32> [#uses=1]
- %tmp410 = lshr i32 %tmp23, 9 ; <i32> [#uses=1]
- %tmp45 = trunc i32 %tmp410 to i16 ; <i16> [#uses=1]
- %tmp456 = sext i16 %tmp45 to i32 ; <i32> [#uses=1]
- ret i32 %tmp456
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/small-arguments.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/small-arguments.ll
deleted file mode 100644
index 31bcee6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/small-arguments.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep {extsh\\|rlwinm}
-
-declare i16 @foo() signext
-
-define i32 @test1(i16 signext %X) {
- %Y = sext i16 %X to i32 ;; dead
- ret i32 %Y
-}
-
-define i32 @test2(i16 zeroext %X) {
- %Y = sext i16 %X to i32
- %Z = and i32 %Y, 65535 ;; dead
- ret i32 %Z
-}
-
-define void @test3() {
- %tmp.0 = call i16 @foo() signext ;; no extsh!
- %tmp.1 = icmp slt i16 %tmp.0, 1234
- br i1 %tmp.1, label %then, label %UnifiedReturnBlock
-
-then:
- call i32 @test1(i16 0 signext)
- ret void
-UnifiedReturnBlock:
- ret void
-}
-
-define i32 @test4(i16* %P) {
- %tmp.1 = load i16* %P
- %tmp.2 = zext i16 %tmp.1 to i32
- %tmp.3 = and i32 %tmp.2, 255
- ret i32 %tmp.3
-}
-
-define i32 @test5(i16* %P) {
- %tmp.1 = load i16* %P
- %tmp.2 = bitcast i16 %tmp.1 to i16
- %tmp.3 = zext i16 %tmp.2 to i32
- %tmp.4 = and i32 %tmp.3, 255
- ret i32 %tmp.4
-}
-
-define i32 @test6(i32* %P) {
- %tmp.1 = load i32* %P
- %tmp.2 = and i32 %tmp.1, 255
- ret i32 %tmp.2
-}
-
-define i16 @test7(float %a) zeroext {
- %tmp.1 = fptoui float %a to i16
- ret i16 %tmp.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/stfiwx-2.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/stfiwx-2.ll
deleted file mode 100644
index c49b25c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/stfiwx-2.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; This cannot be a stfiwx
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep stb
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep stfiwx
-
-define void @test(float %F, i8* %P) {
- %I = fptosi float %F to i32
- %X = trunc i32 %I to i8
- store i8 %X, i8* %P
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/stfiwx.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/stfiwx.ll
deleted file mode 100644
index d1c3f52..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/stfiwx.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=stfiwx -o %t1
-; RUN: grep stfiwx %t1
-; RUN: not grep r1 %t1
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin8 -mattr=-stfiwx \
-; RUN: -o %t2
-; RUN: not grep stfiwx %t2
-; RUN: grep r1 %t2
-
-define void @test(float %a, i32* %b) {
- %tmp.2 = fptosi float %a to i32 ; <i32> [#uses=1]
- store i32 %tmp.2, i32* %b
- ret void
-}
-
-define void @test2(float %a, i32* %b, i32 %i) {
- %tmp.2 = getelementptr i32* %b, i32 1 ; <i32*> [#uses=1]
- %tmp.5 = getelementptr i32* %b, i32 %i ; <i32*> [#uses=1]
- %tmp.7 = fptosi float %a to i32 ; <i32> [#uses=3]
- store i32 %tmp.7, i32* %tmp.5
- store i32 %tmp.7, i32* %tmp.2
- store i32 %tmp.7, i32* %b
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/store-load-fwd.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/store-load-fwd.ll
deleted file mode 100644
index 25663c1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/store-load-fwd.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 | not grep lwz
-
-define i32 @test(i32* %P) {
- store i32 1, i32* %P
- %V = load i32* %P ; <i32> [#uses=1]
- ret i32 %V
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/stubs.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/stubs.ll
deleted file mode 100644
index 4889263..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/stubs.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc %s -o - -mtriple=powerpc-apple-darwin8 | FileCheck %s
-define ppc_fp128 @test1(i64 %X) nounwind readnone {
-entry:
- %0 = sitofp i64 %X to ppc_fp128
- ret ppc_fp128 %0
-}
-
-; CHECK: _test1:
-; CHECK: bl ___floatditf$stub
-; CHECK: .section __TEXT,__symbol_stub1,symbol_stubs,pure_instructions,16
-; CHECK: ___floatditf$stub:
-; CHECK: .indirect_symbol ___floatditf
-; CHECK: lis r11,ha16(___floatditf$lazy_ptr)
-; CHECK: lwzu r12,lo16(___floatditf$lazy_ptr)(r11)
-; CHECK: mtctr r12
-; CHECK: bctr
-; CHECK: .section __DATA,__la_symbol_ptr,lazy_symbol_pointers
-; CHECK: ___floatditf$lazy_ptr:
-; CHECK: .indirect_symbol ___floatditf
-; CHECK: .long dyld_stub_binding_helper
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/subc.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/subc.ll
deleted file mode 100644
index 5914dca..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/subc.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; All of these should be codegen'd without loading immediates
-; RUN: llc < %s -march=ppc32 -o %t
-; RUN: grep subfc %t | count 1
-; RUN: grep subfe %t | count 1
-; RUN: grep subfze %t | count 1
-; RUN: grep subfme %t | count 1
-; RUN: grep subfic %t | count 2
-
-define i64 @sub_ll(i64 %a, i64 %b) {
-entry:
- %tmp.2 = sub i64 %a, %b ; <i64> [#uses=1]
- ret i64 %tmp.2
-}
-
-define i64 @sub_l_5(i64 %a) {
-entry:
- %tmp.1 = sub i64 5, %a ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
-
-define i64 @sub_l_m5(i64 %a) {
-entry:
- %tmp.1 = sub i64 -5, %a ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/tailcall1-64.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/tailcall1-64.ll
deleted file mode 100644
index e9c83a5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/tailcall1-64.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=ppc64 -tailcallopt | grep TC_RETURNd8
-define fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
-entry:
- ret i32 %a3
-}
-
-define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
-entry:
- %tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 ) ; <i32> [#uses=1]
- ret i32 %tmp11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/tailcall1.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/tailcall1.ll
deleted file mode 100644
index 08f3392..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/tailcall1.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=ppc32 -tailcallopt | grep TC_RETURN
-define fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
-entry:
- ret i32 %a3
-}
-
-define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
-entry:
- %tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 ) ; <i32> [#uses=1]
- ret i32 %tmp11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/tailcallpic1.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/tailcallpic1.ll
deleted file mode 100644
index f3f5028..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/tailcallpic1.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -tailcallopt -mtriple=powerpc-apple-darwin -relocation-model=pic | grep TC_RETURN
-
-
-
-define protected fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
-entry:
- ret i32 %a3
-}
-
-define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
-entry:
- %tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 ) ; <i32> [#uses=1]
- ret i32 %tmp11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/tango.net.ftp.FtpClient.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/tango.net.ftp.FtpClient.ll
deleted file mode 100644
index 8a1288a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/tango.net.ftp.FtpClient.ll
+++ /dev/null
@@ -1,583 +0,0 @@
-; RUN: llc < %s
-; PR4534
-
-; ModuleID = 'tango.net.ftp.FtpClient.bc'
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin9.6.0"
- %"byte[]" = type { i32, i8* }
- at .str167 = external constant [11 x i8] ; <[11 x i8]*> [#uses=1]
- at .str170 = external constant [11 x i8] ; <[11 x i8]*> [#uses=2]
- at .str171 = external constant [5 x i8] ; <[5 x i8]*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (void (%"byte[]")* @foo to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define fastcc void @foo(%"byte[]" %line_arg) {
-entry:
- %line_arg830 = extractvalue %"byte[]" %line_arg, 0 ; <i32> [#uses=12]
- %line_arg831 = extractvalue %"byte[]" %line_arg, 1 ; <i8*> [#uses=17]
- %t5 = load i8* %line_arg831 ; <i8> [#uses=1]
- br label %forcondi
-
-forcondi: ; preds = %forbodyi, %entry
- %l.0i = phi i32 [ 10, %entry ], [ %t4i, %forbodyi ] ; <i32> [#uses=2]
- %p.0i = phi i8* [ getelementptr ([11 x i8]* @.str167, i32 0, i32 -1), %entry ], [ %t7i, %forbodyi ] ; <i8*> [#uses=1]
- %t4i = add i32 %l.0i, -1 ; <i32> [#uses=1]
- %t5i = icmp eq i32 %l.0i, 0 ; <i1> [#uses=1]
- br i1 %t5i, label %forcond.i, label %forbodyi
-
-forbodyi: ; preds = %forcondi
- %t7i = getelementptr i8* %p.0i, i32 1 ; <i8*> [#uses=2]
- %t8i = load i8* %t7i ; <i8> [#uses=1]
- %t12i = icmp eq i8 %t8i, %t5 ; <i1> [#uses=1]
- br i1 %t12i, label %forcond.i, label %forcondi
-
-forcond.i: ; preds = %forbody.i, %forbodyi, %forcondi
- %storemerge.i = phi i32 [ %t106.i, %forbody.i ], [ 1, %forcondi ], [ 1, %forbodyi ] ; <i32> [#uses=1]
- %t77.i286 = phi i1 [ %phit3, %forbody.i ], [ false, %forcondi ], [ false, %forbodyi ] ; <i1> [#uses=1]
- br i1 %t77.i286, label %forcond.i295, label %forbody.i
-
-forbody.i: ; preds = %forcond.i
- %t106.i = add i32 %storemerge.i, 1 ; <i32> [#uses=2]
- %phit3 = icmp ugt i32 %t106.i, 3 ; <i1> [#uses=1]
- br label %forcond.i
-
-forcond.i295: ; preds = %forbody.i301, %forcond.i
- %storemerge.i292 = phi i32 [ %t106.i325, %forbody.i301 ], [ 4, %forcond.i ] ; <i32> [#uses=1]
- %t77.i293 = phi i1 [ %phit2, %forbody.i301 ], [ false, %forcond.i ] ; <i1> [#uses=1]
- br i1 %t77.i293, label %forcond.i332, label %forbody.i301
-
-forbody.i301: ; preds = %forcond.i295
- %t106.i325 = add i32 %storemerge.i292, 1 ; <i32> [#uses=2]
- %phit2 = icmp ugt i32 %t106.i325, 6 ; <i1> [#uses=1]
- br label %forcond.i295
-
-forcond.i332: ; preds = %forbody.i338, %forcond.i295
- %storemerge.i329 = phi i32 [ %t106.i362, %forbody.i338 ], [ 7, %forcond.i295 ] ; <i32> [#uses=3]
- %t77.i330 = phi i1 [ %phit1, %forbody.i338 ], [ false, %forcond.i295 ] ; <i1> [#uses=1]
- br i1 %t77.i330, label %wcond.i370, label %forbody.i338
-
-forbody.i338: ; preds = %forcond.i332
- %t106.i362 = add i32 %storemerge.i329, 1 ; <i32> [#uses=2]
- %phit1 = icmp ugt i32 %t106.i362, 9 ; <i1> [#uses=1]
- br label %forcond.i332
-
-wcond.i370: ; preds = %wbody.i372, %forcond.i332
- %.frame.0.11 = phi i32 [ %t18.i371.c, %wbody.i372 ], [ %storemerge.i329, %forcond.i332 ] ; <i32> [#uses=2]
- %t3.i368 = phi i32 [ %t18.i371.c, %wbody.i372 ], [ %storemerge.i329, %forcond.i332 ] ; <i32> [#uses=5]
- %t4.i369 = icmp ult i32 %t3.i368, %line_arg830 ; <i1> [#uses=1]
- br i1 %t4.i369, label %andand.i378, label %wcond22.i383
-
-wbody.i372: ; preds = %andand.i378
- %t18.i371.c = add i32 %t3.i368, 1 ; <i32> [#uses=2]
- br label %wcond.i370
-
-andand.i378: ; preds = %wcond.i370
- %t11.i375 = getelementptr i8* %line_arg831, i32 %t3.i368 ; <i8*> [#uses=1]
- %t12.i376 = load i8* %t11.i375 ; <i8> [#uses=1]
- %t14.i377 = icmp eq i8 %t12.i376, 32 ; <i1> [#uses=1]
- br i1 %t14.i377, label %wbody.i372, label %wcond22.i383
-
-wcond22.i383: ; preds = %wbody23.i385, %andand.i378, %wcond.i370
- %.frame.0.10 = phi i32 [ %t50.i384, %wbody23.i385 ], [ %.frame.0.11, %wcond.i370 ], [ %.frame.0.11, %andand.i378 ] ; <i32> [#uses=2]
- %t49.i381 = phi i32 [ %t50.i384, %wbody23.i385 ], [ %t3.i368, %wcond.i370 ], [ %t3.i368, %andand.i378 ] ; <i32> [#uses=5]
- %t32.i382 = icmp ult i32 %t49.i381, %line_arg830 ; <i1> [#uses=1]
- br i1 %t32.i382, label %andand33.i391, label %wcond54.i396
-
-wbody23.i385: ; preds = %andand33.i391
- %t50.i384 = add i32 %t49.i381, 1 ; <i32> [#uses=2]
- br label %wcond22.i383
-
-andand33.i391: ; preds = %wcond22.i383
- %t42.i388 = getelementptr i8* %line_arg831, i32 %t49.i381 ; <i8*> [#uses=1]
- %t43.i389 = load i8* %t42.i388 ; <i8> [#uses=1]
- %t45.i390 = icmp eq i8 %t43.i389, 32 ; <i1> [#uses=1]
- br i1 %t45.i390, label %wcond54.i396, label %wbody23.i385
-
-wcond54.i396: ; preds = %wbody55.i401, %andand33.i391, %wcond22.i383
- %.frame.0.9 = phi i32 [ %t82.i400, %wbody55.i401 ], [ %.frame.0.10, %wcond22.i383 ], [ %.frame.0.10, %andand33.i391 ] ; <i32> [#uses=2]
- %t81.i394 = phi i32 [ %t82.i400, %wbody55.i401 ], [ %t49.i381, %wcond22.i383 ], [ %t49.i381, %andand33.i391 ] ; <i32> [#uses=3]
- %t64.i395 = icmp ult i32 %t81.i394, %line_arg830 ; <i1> [#uses=1]
- br i1 %t64.i395, label %andand65.i407, label %wcond.i716
-
-wbody55.i401: ; preds = %andand65.i407
- %t82.i400 = add i32 %t81.i394, 1 ; <i32> [#uses=2]
- br label %wcond54.i396
-
-andand65.i407: ; preds = %wcond54.i396
- %t74.i404 = getelementptr i8* %line_arg831, i32 %t81.i394 ; <i8*> [#uses=1]
- %t75.i405 = load i8* %t74.i404 ; <i8> [#uses=1]
- %t77.i406 = icmp eq i8 %t75.i405, 32 ; <i1> [#uses=1]
- br i1 %t77.i406, label %wbody55.i401, label %wcond.i716
-
-wcond.i716: ; preds = %wbody.i717, %andand65.i407, %wcond54.i396
- %.frame.0.0 = phi i32 [ %t18.i.c829, %wbody.i717 ], [ %.frame.0.9, %wcond54.i396 ], [ %.frame.0.9, %andand65.i407 ] ; <i32> [#uses=7]
- %t4.i715 = icmp ult i32 %.frame.0.0, %line_arg830 ; <i1> [#uses=1]
- br i1 %t4.i715, label %andand.i721, label %wcond22.i724
-
-wbody.i717: ; preds = %andand.i721
- %t18.i.c829 = add i32 %.frame.0.0, 1 ; <i32> [#uses=1]
- br label %wcond.i716
-
-andand.i721: ; preds = %wcond.i716
- %t11.i718 = getelementptr i8* %line_arg831, i32 %.frame.0.0 ; <i8*> [#uses=1]
- %t12.i719 = load i8* %t11.i718 ; <i8> [#uses=1]
- %t14.i720 = icmp eq i8 %t12.i719, 32 ; <i1> [#uses=1]
- br i1 %t14.i720, label %wbody.i717, label %wcond22.i724
-
-wcond22.i724: ; preds = %wbody23.i726, %andand.i721, %wcond.i716
- %.frame.0.1 = phi i32 [ %t50.i725, %wbody23.i726 ], [ %.frame.0.0, %wcond.i716 ], [ %.frame.0.0, %andand.i721 ] ; <i32> [#uses=2]
- %t49.i722 = phi i32 [ %t50.i725, %wbody23.i726 ], [ %.frame.0.0, %wcond.i716 ], [ %.frame.0.0, %andand.i721 ] ; <i32> [#uses=5]
- %t32.i723 = icmp ult i32 %t49.i722, %line_arg830 ; <i1> [#uses=1]
- br i1 %t32.i723, label %andand33.i731, label %wcond54.i734
-
-wbody23.i726: ; preds = %andand33.i731
- %t50.i725 = add i32 %t49.i722, 1 ; <i32> [#uses=2]
- br label %wcond22.i724
-
-andand33.i731: ; preds = %wcond22.i724
- %t42.i728 = getelementptr i8* %line_arg831, i32 %t49.i722 ; <i8*> [#uses=1]
- %t43.i729 = load i8* %t42.i728 ; <i8> [#uses=1]
- %t45.i730 = icmp eq i8 %t43.i729, 32 ; <i1> [#uses=1]
- br i1 %t45.i730, label %wcond54.i734, label %wbody23.i726
-
-wcond54.i734: ; preds = %wbody55.i736, %andand33.i731, %wcond22.i724
- %.frame.0.2 = phi i32 [ %t82.i735, %wbody55.i736 ], [ %.frame.0.1, %wcond22.i724 ], [ %.frame.0.1, %andand33.i731 ] ; <i32> [#uses=2]
- %t81.i732 = phi i32 [ %t82.i735, %wbody55.i736 ], [ %t49.i722, %wcond22.i724 ], [ %t49.i722, %andand33.i731 ] ; <i32> [#uses=3]
- %t64.i733 = icmp ult i32 %t81.i732, %line_arg830 ; <i1> [#uses=1]
- br i1 %t64.i733, label %andand65.i740, label %wcond.i750
-
-wbody55.i736: ; preds = %andand65.i740
- %t82.i735 = add i32 %t81.i732, 1 ; <i32> [#uses=2]
- br label %wcond54.i734
-
-andand65.i740: ; preds = %wcond54.i734
- %t74.i737 = getelementptr i8* %line_arg831, i32 %t81.i732 ; <i8*> [#uses=1]
- %t75.i738 = load i8* %t74.i737 ; <i8> [#uses=1]
- %t77.i739 = icmp eq i8 %t75.i738, 32 ; <i1> [#uses=1]
- br i1 %t77.i739, label %wbody55.i736, label %wcond.i750
-
-wcond.i750: ; preds = %wbody.i752, %andand65.i740, %wcond54.i734
- %.frame.0.3 = phi i32 [ %t18.i751.c, %wbody.i752 ], [ %.frame.0.2, %wcond54.i734 ], [ %.frame.0.2, %andand65.i740 ] ; <i32> [#uses=11]
- %t4.i749 = icmp ult i32 %.frame.0.3, %line_arg830 ; <i1> [#uses=1]
- br i1 %t4.i749, label %andand.i758, label %wcond22.i761
-
-wbody.i752: ; preds = %andand.i758
- %t18.i751.c = add i32 %.frame.0.3, 1 ; <i32> [#uses=1]
- br label %wcond.i750
-
-andand.i758: ; preds = %wcond.i750
- %t11.i755 = getelementptr i8* %line_arg831, i32 %.frame.0.3 ; <i8*> [#uses=1]
- %t12.i756 = load i8* %t11.i755 ; <i8> [#uses=1]
- %t14.i757 = icmp eq i8 %t12.i756, 32 ; <i1> [#uses=1]
- br i1 %t14.i757, label %wbody.i752, label %wcond22.i761
-
-wcond22.i761: ; preds = %wbody23.i763, %andand.i758, %wcond.i750
- %.frame.0.4 = phi i32 [ %t50.i762, %wbody23.i763 ], [ %.frame.0.3, %wcond.i750 ], [ %.frame.0.3, %andand.i758 ] ; <i32> [#uses=2]
- %t49.i759 = phi i32 [ %t50.i762, %wbody23.i763 ], [ %.frame.0.3, %wcond.i750 ], [ %.frame.0.3, %andand.i758 ] ; <i32> [#uses=7]
- %t32.i760 = icmp ult i32 %t49.i759, %line_arg830 ; <i1> [#uses=1]
- br i1 %t32.i760, label %andand33.i769, label %wcond54.i773
-
-wbody23.i763: ; preds = %andand33.i769
- %t50.i762 = add i32 %t49.i759, 1 ; <i32> [#uses=2]
- br label %wcond22.i761
-
-andand33.i769: ; preds = %wcond22.i761
- %t42.i766 = getelementptr i8* %line_arg831, i32 %t49.i759 ; <i8*> [#uses=1]
- %t43.i767 = load i8* %t42.i766 ; <i8> [#uses=1]
- %t45.i768 = icmp eq i8 %t43.i767, 32 ; <i1> [#uses=1]
- br i1 %t45.i768, label %wcond54.i773, label %wbody23.i763
-
-wcond54.i773: ; preds = %wbody55.i775, %andand33.i769, %wcond22.i761
- %.frame.0.5 = phi i32 [ %t82.i774, %wbody55.i775 ], [ %.frame.0.4, %wcond22.i761 ], [ %.frame.0.4, %andand33.i769 ] ; <i32> [#uses=1]
- %t81.i770 = phi i32 [ %t82.i774, %wbody55.i775 ], [ %t49.i759, %wcond22.i761 ], [ %t49.i759, %andand33.i769 ] ; <i32> [#uses=3]
- %t64.i771 = icmp ult i32 %t81.i770, %line_arg830 ; <i1> [#uses=1]
- br i1 %t64.i771, label %andand65.i780, label %Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit786
-
-wbody55.i775: ; preds = %andand65.i780
- %t82.i774 = add i32 %t81.i770, 1 ; <i32> [#uses=2]
- br label %wcond54.i773
-
-andand65.i780: ; preds = %wcond54.i773
- %t74.i777 = getelementptr i8* %line_arg831, i32 %t81.i770 ; <i8*> [#uses=1]
- %t75.i778 = load i8* %t74.i777 ; <i8> [#uses=1]
- %t77.i779 = icmp eq i8 %t75.i778, 32 ; <i1> [#uses=1]
- br i1 %t77.i779, label %wbody55.i775, label %Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit786
-
-Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit786: ; preds = %andand65.i780, %wcond54.i773
- %t89.i782 = getelementptr i8* %line_arg831, i32 %.frame.0.3 ; <i8*> [#uses=4]
- %t90.i783 = sub i32 %t49.i759, %.frame.0.3 ; <i32> [#uses=2]
- br label %wcond.i792
-
-wcond.i792: ; preds = %wbody.i794, %Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit786
- %.frame.0.6 = phi i32 [ %.frame.0.5, %Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit786 ], [ %t18.i793.c, %wbody.i794 ] ; <i32> [#uses=9]
- %t4.i791 = icmp ult i32 %.frame.0.6, %line_arg830 ; <i1> [#uses=1]
- br i1 %t4.i791, label %andand.i800, label %wcond22.i803
-
-wbody.i794: ; preds = %andand.i800
- %t18.i793.c = add i32 %.frame.0.6, 1 ; <i32> [#uses=1]
- br label %wcond.i792
-
-andand.i800: ; preds = %wcond.i792
- %t11.i797 = getelementptr i8* %line_arg831, i32 %.frame.0.6 ; <i8*> [#uses=1]
- %t12.i798 = load i8* %t11.i797 ; <i8> [#uses=1]
- %t14.i799 = icmp eq i8 %t12.i798, 32 ; <i1> [#uses=1]
- br i1 %t14.i799, label %wbody.i794, label %wcond22.i803
-
-wcond22.i803: ; preds = %wbody23.i805, %andand.i800, %wcond.i792
- %t49.i801 = phi i32 [ %t50.i804, %wbody23.i805 ], [ %.frame.0.6, %wcond.i792 ], [ %.frame.0.6, %andand.i800 ] ; <i32> [#uses=7]
- %t32.i802 = icmp ult i32 %t49.i801, %line_arg830 ; <i1> [#uses=1]
- br i1 %t32.i802, label %andand33.i811, label %wcond54.i815
-
-wbody23.i805: ; preds = %andand33.i811
- %t50.i804 = add i32 %t49.i801, 1 ; <i32> [#uses=1]
- br label %wcond22.i803
-
-andand33.i811: ; preds = %wcond22.i803
- %t42.i808 = getelementptr i8* %line_arg831, i32 %t49.i801 ; <i8*> [#uses=1]
- %t43.i809 = load i8* %t42.i808 ; <i8> [#uses=1]
- %t45.i810 = icmp eq i8 %t43.i809, 32 ; <i1> [#uses=1]
- br i1 %t45.i810, label %wcond54.i815, label %wbody23.i805
-
-wcond54.i815: ; preds = %wbody55.i817, %andand33.i811, %wcond22.i803
- %t81.i812 = phi i32 [ %t82.i816, %wbody55.i817 ], [ %t49.i801, %wcond22.i803 ], [ %t49.i801, %andand33.i811 ] ; <i32> [#uses=3]
- %t64.i813 = icmp ult i32 %t81.i812, %line_arg830 ; <i1> [#uses=1]
- br i1 %t64.i813, label %andand65.i822, label %Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit828
-
-wbody55.i817: ; preds = %andand65.i822
- %t82.i816 = add i32 %t81.i812, 1 ; <i32> [#uses=1]
- br label %wcond54.i815
-
-andand65.i822: ; preds = %wcond54.i815
- %t74.i819 = getelementptr i8* %line_arg831, i32 %t81.i812 ; <i8*> [#uses=1]
- %t75.i820 = load i8* %t74.i819 ; <i8> [#uses=1]
- %t77.i821 = icmp eq i8 %t75.i820, 32 ; <i1> [#uses=1]
- br i1 %t77.i821, label %wbody55.i817, label %Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit828
-
-Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit828: ; preds = %andand65.i822, %wcond54.i815
- %t89.i824 = getelementptr i8* %line_arg831, i32 %.frame.0.6 ; <i8*> [#uses=4]
- %t90.i825 = sub i32 %t49.i801, %.frame.0.6 ; <i32> [#uses=2]
- %t63 = load i8* %t89.i824 ; <i8> [#uses=2]
- br label %forcondi622
-
-forcondi622: ; preds = %forbodyi626, %Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit828
- %l.0i618 = phi i32 [ 10, %Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit828 ], [ %t4i620, %forbodyi626 ] ; <i32> [#uses=2]
- %p.0i619 = phi i8* [ getelementptr ([11 x i8]* @.str170, i32 0, i32 -1), %Dt3net3ftp9FClient13FConnection13pListLineMFAaZS5t3net3ftp9FClient11FFileInfo10p_wordMFZAa.exit828 ], [ %t7i623, %forbodyi626 ] ; <i8*> [#uses=1]
- %t4i620 = add i32 %l.0i618, -1 ; <i32> [#uses=1]
- %t5i621 = icmp eq i32 %l.0i618, 0 ; <i1> [#uses=1]
- br i1 %t5i621, label %if65, label %forbodyi626
-
-forbodyi626: ; preds = %forcondi622
- %t7i623 = getelementptr i8* %p.0i619, i32 1 ; <i8*> [#uses=3]
- %t8i624 = load i8* %t7i623 ; <i8> [#uses=1]
- %t12i625 = icmp eq i8 %t8i624, %t63 ; <i1> [#uses=1]
- br i1 %t12i625, label %ifi630, label %forcondi622
-
-ifi630: ; preds = %forbodyi626
- %t15i627 = ptrtoint i8* %t7i623 to i32 ; <i32> [#uses=1]
- %t17i629 = sub i32 %t15i627, ptrtoint ([11 x i8]* @.str170 to i32) ; <i32> [#uses=1]
- %phit636 = icmp eq i32 %t17i629, 10 ; <i1> [#uses=1]
- br i1 %phit636, label %if65, label %e67
-
-if65: ; preds = %ifi630, %forcondi622
- %t4i532 = icmp eq i32 %t49.i759, %.frame.0.3 ; <i1> [#uses=1]
- br i1 %t4i532, label %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i576, label %forcondi539
-
-forcondi539: ; preds = %zi546, %if65
- %sign.1.i533 = phi i1 [ %sign.0.i543, %zi546 ], [ false, %if65 ] ; <i1> [#uses=2]
- %l.0i534 = phi i32 [ %t33i545, %zi546 ], [ %t90.i783, %if65 ] ; <i32> [#uses=3]
- %p.0i535 = phi i8* [ %t30i544, %zi546 ], [ %t89.i782, %if65 ] ; <i8*> [#uses=6]
- %c.0.ini536 = phi i8* [ %t30i544, %zi546 ], [ %t89.i782, %if65 ] ; <i8*> [#uses=1]
- %c.0i537 = load i8* %c.0.ini536 ; <i8> [#uses=2]
- %t8i538 = icmp eq i32 %l.0i534, 0 ; <i1> [#uses=1]
- br i1 %t8i538, label %endfori550, label %forbodyi540
-
-forbodyi540: ; preds = %forcondi539
- switch i8 %c.0i537, label %endfori550 [
- i8 32, label %zi546
- i8 9, label %zi546
- i8 45, label %if20i541
- i8 43, label %if26i542
- ]
-
-if20i541: ; preds = %forbodyi540
- br label %zi546
-
-if26i542: ; preds = %forbodyi540
- br label %zi546
-
-zi546: ; preds = %if26i542, %if20i541, %forbodyi540, %forbodyi540
- %sign.0.i543 = phi i1 [ false, %if26i542 ], [ true, %if20i541 ], [ %sign.1.i533, %forbodyi540 ], [ %sign.1.i533, %forbodyi540 ] ; <i1> [#uses=1]
- %t30i544 = getelementptr i8* %p.0i535, i32 1 ; <i8*> [#uses=2]
- %t33i545 = add i32 %l.0i534, -1 ; <i32> [#uses=1]
- br label %forcondi539
-
-endfori550: ; preds = %forbodyi540, %forcondi539
- %t37i547 = icmp eq i8 %c.0i537, 48 ; <i1> [#uses=1]
- %t39i548 = icmp sgt i32 %l.0i534, 1 ; <i1> [#uses=1]
- %or.condi549 = and i1 %t37i547, %t39i548 ; <i1> [#uses=1]
- br i1 %or.condi549, label %if40i554, label %endif41i564
-
-if40i554: ; preds = %endfori550
- %t43i551 = getelementptr i8* %p.0i535, i32 1 ; <i8*> [#uses=2]
- %t44i552 = load i8* %t43i551 ; <i8> [#uses=1]
- %t45i553 = zext i8 %t44i552 to i32 ; <i32> [#uses=1]
- switch i32 %t45i553, label %endif41i564 [
- i32 120, label %case46i556
- i32 88, label %case46i556
- i32 98, label %case51i558
- i32 66, label %case51i558
- i32 111, label %case56i560
- i32 79, label %case56i560
- ]
-
-case46i556: ; preds = %if40i554, %if40i554
- %t48i555 = getelementptr i8* %p.0i535, i32 2 ; <i8*> [#uses=1]
- br label %endif41i564
-
-case51i558: ; preds = %if40i554, %if40i554
- %t53i557 = getelementptr i8* %p.0i535, i32 2 ; <i8*> [#uses=1]
- br label %endif41i564
-
-case56i560: ; preds = %if40i554, %if40i554
- %t58i559 = getelementptr i8* %p.0i535, i32 2 ; <i8*> [#uses=1]
- br label %endif41i564
-
-endif41i564: ; preds = %case56i560, %case51i558, %case46i556, %if40i554, %endfori550
- %r.0i561 = phi i32 [ 0, %if40i554 ], [ 8, %case56i560 ], [ 2, %case51i558 ], [ 16, %case46i556 ], [ 0, %endfori550 ] ; <i32> [#uses=2]
- %p.2i562 = phi i8* [ %t43i551, %if40i554 ], [ %t58i559, %case56i560 ], [ %t53i557, %case51i558 ], [ %t48i555, %case46i556 ], [ %p.0i535, %endfori550 ] ; <i8*> [#uses=2]
- %t63i563 = icmp eq i32 %r.0i561, 0 ; <i1> [#uses=1]
- br i1 %t63i563, label %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i576, label %if70i568
-
-if70i568: ; preds = %endif41i564
- br label %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i576
-
-Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i576: ; preds = %if70i568, %endif41i564, %if65
- %radix.0.i570 = phi i32 [ 0, %if65 ], [ %r.0i561, %if70i568 ], [ 10, %endif41i564 ] ; <i32> [#uses=2]
- %p.1i571 = phi i8* [ %p.2i562, %if70i568 ], [ %t89.i782, %if65 ], [ %p.2i562, %endif41i564 ] ; <i8*> [#uses=1]
- %t84i572 = ptrtoint i8* %p.1i571 to i32 ; <i32> [#uses=1]
- %t85i573 = ptrtoint i8* %t89.i782 to i32 ; <i32> [#uses=1]
- %t86i574 = sub i32 %t84i572, %t85i573 ; <i32> [#uses=2]
- %t6.i575 = sub i32 %t90.i783, %t86i574 ; <i32> [#uses=1]
- %t59i604 = zext i32 %radix.0.i570 to i64 ; <i64> [#uses=1]
- br label %fcondi581
-
-fcondi581: ; preds = %if55i610, %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i576
- %value.0i577 = phi i64 [ 0, %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i576 ], [ %t65i607, %if55i610 ] ; <i64> [#uses=1]
- %fkey.0i579 = phi i32 [ 0, %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i576 ], [ %t70i609, %if55i610 ] ; <i32> [#uses=3]
- %t3i580 = icmp ult i32 %fkey.0i579, %t6.i575 ; <i1> [#uses=1]
- br i1 %t3i580, label %fbodyi587, label %wcond.i422
-
-fbodyi587: ; preds = %fcondi581
- %t5.s.i582 = add i32 %t86i574, %fkey.0i579 ; <i32> [#uses=1]
- %t89.i782.s = add i32 %.frame.0.3, %t5.s.i582 ; <i32> [#uses=1]
- %t5i583 = getelementptr i8* %line_arg831, i32 %t89.i782.s ; <i8*> [#uses=1]
- %t6i584 = load i8* %t5i583 ; <i8> [#uses=6]
- %t6.off84i585 = add i8 %t6i584, -48 ; <i8> [#uses=1]
- %or.cond.i28.i586 = icmp ugt i8 %t6.off84i585, 9 ; <i1> [#uses=1]
- br i1 %or.cond.i28.i586, label %ei590, label %endifi603
-
-ei590: ; preds = %fbodyi587
- %t6.off83i588 = add i8 %t6i584, -97 ; <i8> [#uses=1]
- %or.cond81i589 = icmp ugt i8 %t6.off83i588, 25 ; <i1> [#uses=1]
- br i1 %or.cond81i589, label %e24i595, label %if22i592
-
-if22i592: ; preds = %ei590
- %t27i591 = add i8 %t6i584, -39 ; <i8> [#uses=1]
- br label %endifi603
-
-e24i595: ; preds = %ei590
- %t6.offi593 = add i8 %t6i584, -65 ; <i8> [#uses=1]
- %or.cond82i594 = icmp ugt i8 %t6.offi593, 25 ; <i1> [#uses=1]
- br i1 %or.cond82i594, label %wcond.i422, label %if39i597
-
-if39i597: ; preds = %e24i595
- %t44.i29.i596 = add i8 %t6i584, -7 ; <i8> [#uses=1]
- br label %endifi603
-
-endifi603: ; preds = %if39i597, %if22i592, %fbodyi587
- %c.0.i30.i598 = phi i8 [ %t27i591, %if22i592 ], [ %t44.i29.i596, %if39i597 ], [ %t6i584, %fbodyi587 ] ; <i8> [#uses=1]
- %t48.i31.i599 = zext i8 %c.0.i30.i598 to i32 ; <i32> [#uses=1]
- %t49i600 = add i32 %t48.i31.i599, 208 ; <i32> [#uses=1]
- %t52i601 = and i32 %t49i600, 255 ; <i32> [#uses=2]
- %t54i602 = icmp ult i32 %t52i601, %radix.0.i570 ; <i1> [#uses=1]
- br i1 %t54i602, label %if55i610, label %wcond.i422
-
-if55i610: ; preds = %endifi603
- %t61i605 = mul i64 %value.0i577, %t59i604 ; <i64> [#uses=1]
- %t64i606 = zext i32 %t52i601 to i64 ; <i64> [#uses=1]
- %t65i607 = add i64 %t61i605, %t64i606 ; <i64> [#uses=1]
- %t70i609 = add i32 %fkey.0i579, 1 ; <i32> [#uses=1]
- br label %fcondi581
-
-e67: ; preds = %ifi630
- %t4i447 = icmp eq i32 %t49.i801, %.frame.0.6 ; <i1> [#uses=1]
- br i1 %t4i447, label %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i491, label %forcondi454
-
-forcondi454: ; preds = %zi461, %e67
- %c.0i452 = phi i8 [ %c.0i452.pre, %zi461 ], [ %t63, %e67 ] ; <i8> [#uses=2]
- %sign.1.i448 = phi i1 [ %sign.0.i458, %zi461 ], [ false, %e67 ] ; <i1> [#uses=2]
- %l.0i449 = phi i32 [ %t33i460, %zi461 ], [ %t90.i825, %e67 ] ; <i32> [#uses=3]
- %p.0i450 = phi i8* [ %t30i459, %zi461 ], [ %t89.i824, %e67 ] ; <i8*> [#uses=5]
- %t8i453 = icmp eq i32 %l.0i449, 0 ; <i1> [#uses=1]
- br i1 %t8i453, label %endfori465, label %forbodyi455
-
-forbodyi455: ; preds = %forcondi454
- switch i8 %c.0i452, label %endfori465 [
- i8 32, label %zi461
- i8 9, label %zi461
- i8 45, label %if20i456
- i8 43, label %if26i457
- ]
-
-if20i456: ; preds = %forbodyi455
- br label %zi461
-
-if26i457: ; preds = %forbodyi455
- br label %zi461
-
-zi461: ; preds = %if26i457, %if20i456, %forbodyi455, %forbodyi455
- %sign.0.i458 = phi i1 [ false, %if26i457 ], [ true, %if20i456 ], [ %sign.1.i448, %forbodyi455 ], [ %sign.1.i448, %forbodyi455 ] ; <i1> [#uses=1]
- %t30i459 = getelementptr i8* %p.0i450, i32 1 ; <i8*> [#uses=2]
- %t33i460 = add i32 %l.0i449, -1 ; <i32> [#uses=1]
- %c.0i452.pre = load i8* %t30i459 ; <i8> [#uses=1]
- br label %forcondi454
-
-endfori465: ; preds = %forbodyi455, %forcondi454
- %t37i462 = icmp eq i8 %c.0i452, 48 ; <i1> [#uses=1]
- %t39i463 = icmp sgt i32 %l.0i449, 1 ; <i1> [#uses=1]
- %or.condi464 = and i1 %t37i462, %t39i463 ; <i1> [#uses=1]
- br i1 %or.condi464, label %if40i469, label %endif41i479
-
-if40i469: ; preds = %endfori465
- %t43i466 = getelementptr i8* %p.0i450, i32 1 ; <i8*> [#uses=2]
- %t44i467 = load i8* %t43i466 ; <i8> [#uses=1]
- %t45i468 = zext i8 %t44i467 to i32 ; <i32> [#uses=1]
- switch i32 %t45i468, label %endif41i479 [
- i32 120, label %case46i471
- i32 111, label %case56i475
- ]
-
-case46i471: ; preds = %if40i469
- %t48i470 = getelementptr i8* %p.0i450, i32 2 ; <i8*> [#uses=1]
- br label %endif41i479
-
-case56i475: ; preds = %if40i469
- %t58i474 = getelementptr i8* %p.0i450, i32 2 ; <i8*> [#uses=1]
- br label %endif41i479
-
-endif41i479: ; preds = %case56i475, %case46i471, %if40i469, %endfori465
- %r.0i476 = phi i32 [ 0, %if40i469 ], [ 8, %case56i475 ], [ 16, %case46i471 ], [ 0, %endfori465 ] ; <i32> [#uses=2]
- %p.2i477 = phi i8* [ %t43i466, %if40i469 ], [ %t58i474, %case56i475 ], [ %t48i470, %case46i471 ], [ %p.0i450, %endfori465 ] ; <i8*> [#uses=2]
- %t63i478 = icmp eq i32 %r.0i476, 0 ; <i1> [#uses=1]
- br i1 %t63i478, label %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i491, label %if70i483
-
-if70i483: ; preds = %endif41i479
- br label %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i491
-
-Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i491: ; preds = %if70i483, %endif41i479, %e67
- %radix.0.i485 = phi i32 [ 0, %e67 ], [ %r.0i476, %if70i483 ], [ 10, %endif41i479 ] ; <i32> [#uses=2]
- %p.1i486 = phi i8* [ %p.2i477, %if70i483 ], [ %t89.i824, %e67 ], [ %p.2i477, %endif41i479 ] ; <i8*> [#uses=1]
- %t84i487 = ptrtoint i8* %p.1i486 to i32 ; <i32> [#uses=1]
- %t85i488 = ptrtoint i8* %t89.i824 to i32 ; <i32> [#uses=1]
- %t86i489 = sub i32 %t84i487, %t85i488 ; <i32> [#uses=2]
- %ttt = sub i32 %t90.i825, %t86i489 ; <i32> [#uses=1]
- %t59i519 = zext i32 %radix.0.i485 to i64 ; <i64> [#uses=1]
- br label %fcondi496
-
-fcondi496: ; preds = %if55i525, %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i491
- %value.0i492 = phi i64 [ 0, %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i491 ], [ %t65i522, %if55i525 ] ; <i64> [#uses=1]
- %fkey.0i494 = phi i32 [ 0, %Dt4x7c7I11V4tTaZ4tFAaKbKkZk.exit.i491 ], [ %t70i524, %if55i525 ] ; <i32> [#uses=3]
- %t3i495 = icmp ult i32 %fkey.0i494, %ttt ; <i1> [#uses=1]
- br i1 %t3i495, label %fbodyi502, label %wcond.i422
-
-fbodyi502: ; preds = %fcondi496
- %t5.s.i497 = add i32 %t86i489, %fkey.0i494 ; <i32> [#uses=1]
- %t89.i824.s = add i32 %.frame.0.6, %t5.s.i497 ; <i32> [#uses=1]
- %t5i498 = getelementptr i8* %line_arg831, i32 %t89.i824.s ; <i8*> [#uses=1]
- %t6i499 = load i8* %t5i498 ; <i8> [#uses=6]
- %t6.off84i500 = add i8 %t6i499, -48 ; <i8> [#uses=1]
- %or.cond.i28.i501 = icmp ugt i8 %t6.off84i500, 9 ; <i1> [#uses=1]
- br i1 %or.cond.i28.i501, label %ei505, label %endifi518
-
-ei505: ; preds = %fbodyi502
- %t6.off83i503 = add i8 %t6i499, -97 ; <i8> [#uses=1]
- %or.cond81i504 = icmp ugt i8 %t6.off83i503, 25 ; <i1> [#uses=1]
- br i1 %or.cond81i504, label %e24i510, label %if22i507
-
-if22i507: ; preds = %ei505
- %t27i506 = add i8 %t6i499, -39 ; <i8> [#uses=1]
- br label %endifi518
-
-e24i510: ; preds = %ei505
- %t6.offi508 = add i8 %t6i499, -65 ; <i8> [#uses=1]
- %or.cond82i509 = icmp ugt i8 %t6.offi508, 25 ; <i1> [#uses=1]
- br i1 %or.cond82i509, label %wcond.i422, label %if39i512
-
-if39i512: ; preds = %e24i510
- %t44.i29.i511 = add i8 %t6i499, -7 ; <i8> [#uses=1]
- br label %endifi518
-
-endifi518: ; preds = %if39i512, %if22i507, %fbodyi502
- %c.0.i30.i513 = phi i8 [ %t27i506, %if22i507 ], [ %t44.i29.i511, %if39i512 ], [ %t6i499, %fbodyi502 ] ; <i8> [#uses=1]
- %t48.i31.i514 = zext i8 %c.0.i30.i513 to i32 ; <i32> [#uses=1]
- %t49i515 = add i32 %t48.i31.i514, 208 ; <i32> [#uses=1]
- %t52i516 = and i32 %t49i515, 255 ; <i32> [#uses=2]
- %t54i517 = icmp ult i32 %t52i516, %radix.0.i485 ; <i1> [#uses=1]
- br i1 %t54i517, label %if55i525, label %wcond.i422
-
-if55i525: ; preds = %endifi518
- %t61i520 = mul i64 %value.0i492, %t59i519 ; <i64> [#uses=1]
- %t64i521 = zext i32 %t52i516 to i64 ; <i64> [#uses=1]
- %t65i522 = add i64 %t61i520, %t64i521 ; <i64> [#uses=1]
- %t70i524 = add i32 %fkey.0i494, 1 ; <i32> [#uses=1]
- br label %fcondi496
-
-wcond.i422: ; preds = %e40.i, %endifi518, %e24i510, %fcondi496, %endifi603, %e24i595, %fcondi581
- %sarg60.pn.i = phi i8* [ %p.0.i, %e40.i ], [ undef, %fcondi496 ], [ undef, %e24i510 ], [ undef, %endifi518 ], [ undef, %endifi603 ], [ undef, %e24i595 ], [ undef, %fcondi581 ] ; <i8*> [#uses=3]
- %start_arg.pn.i = phi i32 [ %t49.i443, %e40.i ], [ 0, %fcondi496 ], [ 0, %e24i510 ], [ 0, %endifi518 ], [ 0, %endifi603 ], [ 0, %e24i595 ], [ 0, %fcondi581 ] ; <i32> [#uses=3]
- %extent.0.i = phi i32 [ %t51.i, %e40.i ], [ undef, %fcondi496 ], [ undef, %e24i510 ], [ undef, %endifi518 ], [ undef, %endifi603 ], [ undef, %e24i595 ], [ undef, %fcondi581 ] ; <i32> [#uses=3]
- %p.0.i = getelementptr i8* %sarg60.pn.i, i32 %start_arg.pn.i ; <i8*> [#uses=2]
- %p.0.s63.i = add i32 %start_arg.pn.i, -1 ; <i32> [#uses=1]
- %t2i424 = getelementptr i8* %sarg60.pn.i, i32 %p.0.s63.i ; <i8*> [#uses=1]
- br label %forcondi430
-
-forcondi430: ; preds = %forbodyi434, %wcond.i422
- %l.0i426 = phi i32 [ %extent.0.i, %wcond.i422 ], [ %t4i428, %forbodyi434 ] ; <i32> [#uses=2]
- %p.0i427 = phi i8* [ %t2i424, %wcond.i422 ], [ %t7i431, %forbodyi434 ] ; <i8*> [#uses=1]
- %t4i428 = add i32 %l.0i426, -1 ; <i32> [#uses=1]
- %t5i429 = icmp eq i32 %l.0i426, 0 ; <i1> [#uses=1]
- br i1 %t5i429, label %e.i441, label %forbodyi434
-
-forbodyi434: ; preds = %forcondi430
- %t7i431 = getelementptr i8* %p.0i427, i32 1 ; <i8*> [#uses=3]
- %t8i432 = load i8* %t7i431 ; <i8> [#uses=1]
- %t12i433 = icmp eq i8 %t8i432, 32 ; <i1> [#uses=1]
- br i1 %t12i433, label %ifi438, label %forcondi430
-
-ifi438: ; preds = %forbodyi434
- %t15i435 = ptrtoint i8* %t7i431 to i32 ; <i32> [#uses=1]
- %t16i436 = ptrtoint i8* %p.0.i to i32 ; <i32> [#uses=1]
- %t17i437 = sub i32 %t15i435, %t16i436 ; <i32> [#uses=1]
- br label %e.i441
-
-e.i441: ; preds = %ifi438, %forcondi430
- %t2561.i = phi i32 [ %t17i437, %ifi438 ], [ %extent.0.i, %forcondi430 ] ; <i32> [#uses=2]
- %p.0.s.i = add i32 %start_arg.pn.i, %t2561.i ; <i32> [#uses=1]
- %t32.s.i = add i32 %p.0.s.i, -1 ; <i32> [#uses=1]
- %t2i.i = getelementptr i8* %sarg60.pn.i, i32 %t32.s.i ; <i8*> [#uses=1]
- br label %forbodyi.i
-
-forbodyi.i: ; preds = %forbodyi.i, %e.i441
- %p.0i.i = phi i8* [ %t2i.i, %e.i441 ], [ %t7i.i, %forbodyi.i ] ; <i8*> [#uses=1]
- %s2.0i.i = phi i8* [ getelementptr ([5 x i8]* @.str171, i32 0, i32 0), %e.i441 ], [ %t11i.i, %forbodyi.i ] ; <i8*> [#uses=2]
- %t7i.i = getelementptr i8* %p.0i.i, i32 1 ; <i8*> [#uses=2]
- %t8i.i = load i8* %t7i.i ; <i8> [#uses=1]
- %t11i.i = getelementptr i8* %s2.0i.i, i32 1 ; <i8*> [#uses=1]
- %t12i.i = load i8* %s2.0i.i ; <i8> [#uses=1]
- %t14i.i = icmp eq i8 %t8i.i, %t12i.i ; <i1> [#uses=1]
- br i1 %t14i.i, label %forbodyi.i, label %e40.i
-
-e40.i: ; preds = %forbodyi.i
- %t49.i443 = add i32 %t2561.i, 1 ; <i32> [#uses=2]
- %t51.i = sub i32 %extent.0.i, %t49.i443 ; <i32> [#uses=1]
- br label %wcond.i422
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/trampoline.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/trampoline.ll
deleted file mode 100644
index bc05bb1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/trampoline.ll
+++ /dev/null
@@ -1,166 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep {__trampoline_setup}
-
-module asm "\09.lazy_reference .objc_class_name_NSImageRep"
-module asm "\09.objc_class_name_NSBitmapImageRep=0"
-module asm "\09.globl .objc_class_name_NSBitmapImageRep"
- %struct.CGImage = type opaque
- %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]" = type { %struct.NSBitmapImageRep*, void (%struct.__block_1*, %struct.CGImage*)* }
- %struct.NSBitmapImageRep = type { %struct.NSImageRep }
- %struct.NSImageRep = type { }
- %struct.NSZone = type opaque
- %struct.__block_1 = type { %struct.__invoke_impl, %struct.NSZone*, %struct.NSBitmapImageRep** }
- %struct.__builtin_trampoline = type { [40 x i8] }
- %struct.__invoke_impl = type { i8*, i32, i32, i8* }
- %struct._objc__method_prototype_list = type opaque
- %struct._objc_class = type { %struct._objc_class*, %struct._objc_class*, i8*, i32, i32, i32, %struct._objc_ivar_list*, %struct._objc_method_list*, %struct.objc_cache*, %struct._objc_protocol**, i8*, %struct._objc_class_ext* }
- %struct._objc_class_ext = type opaque
- %struct._objc_ivar_list = type opaque
- %struct._objc_method = type { %struct.objc_selector*, i8*, i8* }
- %struct._objc_method_list = type opaque
- %struct._objc_module = type { i32, i32, i8*, %struct._objc_symtab* }
- %struct._objc_protocol = type { %struct._objc_protocol_extension*, i8*, %struct._objc_protocol**, %struct._objc__method_prototype_list*, %struct._objc__method_prototype_list* }
- %struct._objc_protocol_extension = type opaque
- %struct._objc_super = type { %struct.objc_object*, %struct._objc_class* }
- %struct._objc_symtab = type { i32, %struct.objc_selector**, i16, i16, [1 x i8*] }
- %struct.anon = type { %struct._objc__method_prototype_list*, i32, [1 x %struct._objc_method] }
- %struct.objc_cache = type opaque
- %struct.objc_object = type opaque
- %struct.objc_selector = type opaque
- %struct.objc_super = type opaque
- at _NSConcreteStackBlock = external global i8* ; <i8**> [#uses=1]
-@"\01L_OBJC_SELECTOR_REFERENCES_1" = internal global %struct.objc_selector* bitcast ([34 x i8]* @"\01L_OBJC_METH_VAR_NAME_1" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip" ; <%struct.objc_selector**> [#uses=2]
-@"\01L_OBJC_CLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep", %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 1, i32 0, %struct._objc_ivar_list* null, %struct._objc_method_list* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to %struct._objc_method_list*), %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__class,regular,no_dead_strip" ; <%struct._objc_class*> [#uses=3]
-@"\01L_OBJC_SELECTOR_REFERENCES_0" = internal global %struct.objc_selector* bitcast ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*), section "__OBJC,__message_refs,literal_pointers,no_dead_strip" ; <%struct.objc_selector**> [#uses=2]
-@"\01L_OBJC_SYMBOLS" = internal global { i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] } { i32 0, %struct.objc_selector** null, i16 1, i16 0, [1 x %struct._objc_class*] [ %struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep" ] }, section "__OBJC,__symbols,regular,no_dead_strip" ; <{ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }*> [#uses=2]
-@"\01L_OBJC_METH_VAR_NAME_0" = internal global [14 x i8] c"copyWithZone:\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[14 x i8]*> [#uses=2]
-@"\01L_OBJC_METH_VAR_TYPE_0" = internal global [20 x i8] c"@12 at 0:4^{_NSZone=}8\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[20 x i8]*> [#uses=1]
-@"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" = internal global { i8*, i32, [1 x %struct._objc_method] } { i8* null, i32 1, [1 x %struct._objc_method] [ %struct._objc_method { %struct.objc_selector* bitcast ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*), i8* getelementptr ([20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast (%struct.objc_object* (%struct.NSBitmapImageRep*, %struct.objc_selector*, %struct.NSZone*)* @"-[NSBitmapImageRep copyWithZone:]" to i8*) } ] }, section "__OBJC,__inst_meth,regular,no_dead_strip" ; <{ i8*, i32, [1 x %struct._objc_method] }*> [#uses=2]
-@"\01L_OBJC_CLASS_NAME_0" = internal global [17 x i8] c"NSBitmapImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[17 x i8]*> [#uses=1]
-@"\01L_OBJC_CLASS_NAME_1" = internal global [11 x i8] c"NSImageRep\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[11 x i8]*> [#uses=2]
-@"\01L_OBJC_METACLASS_NSBitmapImageRep" = internal global %struct._objc_class { %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), %struct._objc_class* bitcast ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1" to %struct._objc_class*), i8* getelementptr ([17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i32 0, i32 2, i32 48, %struct._objc_ivar_list* null, %struct._objc_method_list* null, %struct.objc_cache* null, %struct._objc_protocol** null, i8* null, %struct._objc_class_ext* null }, section "__OBJC,__meta_class,regular,no_dead_strip" ; <%struct._objc_class*> [#uses=2]
-@"\01L_OBJC_METH_VAR_NAME_1" = internal global [34 x i8] c"_performBlockUsingBackingCGImage:\00", section "__TEXT,__cstring,cstring_literals", align 4 ; <[34 x i8]*> [#uses=2]
-@"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] zeroinitializer, section "__OBJC, __image_info,regular" ; <[2 x i32]*> [#uses=1]
-@"\01L_OBJC_CLASS_NAME_2" = internal global [1 x i8] zeroinitializer, section "__TEXT,__cstring,cstring_literals", align 4 ; <[1 x i8]*> [#uses=1]
-@"\01L_OBJC_MODULES" = internal global %struct._objc_module { i32 7, i32 16, i8* getelementptr ([1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), %struct._objc_symtab* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to %struct._objc_symtab*) }, section "__OBJC,__module_info,regular,no_dead_strip" ; <%struct._objc_module*> [#uses=1]
- at llvm.used = appending global [14 x i8*] [ i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1" to i8*), i8* bitcast (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep" to i8*), i8* bitcast (%struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0" to i8*), i8* bitcast ({ i32, %struct.objc_selector**, i16, i16, [1 x %struct._objc_class*] }* @"\01L_OBJC_SYMBOLS" to i8*), i8* getelementptr ([14 x i8]* @"\01L_OBJC_METH_VAR_NAME_0", i32 0, i32 0), i8* getelementptr ([20 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0), i8* bitcast ({ i8*, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_INSTANCE_METHODS_NSBitmapImageRep" to i8*), i8* getelementptr ([17 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i8* getelementptr ([11 x i8]* @"\01L_OBJC_CLASS_NAME_1", i32 0, i32 0), i8* bitcast (%struct._objc_class* @"\01L_OBJC_METACLASS_NSBitmapImageRep" to i8*), i8* getelementptr ([34 x i8]* @"\01L_OBJC_METH_VAR_NAME_1", i32 0, i32 0), i8* bitcast ([2 x i32]* @"\01L_OBJC_IMAGE_INFO" to i8*), i8* getelementptr ([1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), i8* bitcast (%struct._objc_module* @"\01L_OBJC_MODULES" to i8*) ], section "llvm.metadata" ; <[14 x i8*]*> [#uses=0]
-
-define internal %struct.objc_object* @"-[NSBitmapImageRep copyWithZone:]"(%struct.NSBitmapImageRep* %self, %struct.objc_selector* %_cmd, %struct.NSZone* %zone) nounwind {
-entry:
- %self_addr = alloca %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep**> [#uses=2]
- %_cmd_addr = alloca %struct.objc_selector* ; <%struct.objc_selector**> [#uses=1]
- %zone_addr = alloca %struct.NSZone* ; <%struct.NSZone**> [#uses=2]
- %retval = alloca %struct.objc_object* ; <%struct.objc_object**> [#uses=1]
- %__block_holder_tmp_1.0 = alloca %struct.__block_1 ; <%struct.__block_1*> [#uses=7]
- %new = alloca %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep**> [#uses=2]
- %self.1 = alloca %struct.objc_object* ; <%struct.objc_object**> [#uses=2]
- %0 = alloca i8* ; <i8**> [#uses=2]
- %TRAMP.9 = alloca %struct.__builtin_trampoline, align 4 ; <%struct.__builtin_trampoline*> [#uses=1]
- %1 = alloca void (%struct.__block_1*, %struct.CGImage*)* ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=2]
- %2 = alloca %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep**> [#uses=2]
- %FRAME.7 = alloca %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]" ; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=5]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store %struct.NSBitmapImageRep* %self, %struct.NSBitmapImageRep** %self_addr
- store %struct.objc_selector* %_cmd, %struct.objc_selector** %_cmd_addr
- store %struct.NSZone* %zone, %struct.NSZone** %zone_addr
- %3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
- %4 = load %struct.NSBitmapImageRep** %self_addr, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- store %struct.NSBitmapImageRep* %4, %struct.NSBitmapImageRep** %3, align 4
- %TRAMP.91 = bitcast %struct.__builtin_trampoline* %TRAMP.9 to i8* ; <i8*> [#uses=1]
- %FRAME.72 = bitcast %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7 to i8* ; <i8*> [#uses=1]
- %tramp = call i8* @llvm.init.trampoline(i8* %TRAMP.91, i8* bitcast (void (%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %struct.__block_1*, %struct.CGImage*)* @__helper_1.1632 to i8*), i8* %FRAME.72) ; <i8*> [#uses=1]
- store i8* %tramp, i8** %0, align 4
- %5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
- %6 = load i8** %0, align 4 ; <i8*> [#uses=1]
- %7 = bitcast i8* %6 to void (%struct.__block_1*, %struct.CGImage*)* ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
- store void (%struct.__block_1*, %struct.CGImage*)* %7, void (%struct.__block_1*, %struct.CGImage*)** %5, align 4
- store %struct.NSBitmapImageRep* null, %struct.NSBitmapImageRep** %new, align 4
- %8 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %9 = getelementptr %struct.__invoke_impl* %8, i32 0, i32 0 ; <i8**> [#uses=1]
- store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %9, align 4
- %10 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %11 = getelementptr %struct.__invoke_impl* %10, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 67108864, i32* %11, align 4
- %12 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %13 = getelementptr %struct.__invoke_impl* %12, i32 0, i32 2 ; <i32*> [#uses=1]
- store i32 24, i32* %13, align 4
- %14 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
- %15 = load void (%struct.__block_1*, %struct.CGImage*)** %14, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
- store void (%struct.__block_1*, %struct.CGImage*)* %15, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4
- %16 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %17 = getelementptr %struct.__invoke_impl* %16, i32 0, i32 3 ; <i8**> [#uses=1]
- %18 = load void (%struct.__block_1*, %struct.CGImage*)** %1, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
- %19 = bitcast void (%struct.__block_1*, %struct.CGImage*)* %18 to i8* ; <i8*> [#uses=1]
- store i8* %19, i8** %17, align 4
- %20 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
- %21 = load %struct.NSZone** %zone_addr, align 4 ; <%struct.NSZone*> [#uses=1]
- store %struct.NSZone* %21, %struct.NSZone** %20, align 4
- %22 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
- store %struct.NSBitmapImageRep** %new, %struct.NSBitmapImageRep*** %22, align 4
- %23 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
- %24 = load %struct.NSBitmapImageRep** %23, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- store %struct.NSBitmapImageRep* %24, %struct.NSBitmapImageRep** %2, align 4
- %25 = load %struct.NSBitmapImageRep** %2, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- %26 = bitcast %struct.NSBitmapImageRep* %25 to %struct.objc_object* ; <%struct.objc_object*> [#uses=1]
- store %struct.objc_object* %26, %struct.objc_object** %self.1, align 4
- %27 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_1", align 4 ; <%struct.objc_selector*> [#uses=1]
- %__block_holder_tmp_1.03 = bitcast %struct.__block_1* %__block_holder_tmp_1.0 to void (%struct.CGImage*)* ; <void (%struct.CGImage*)*> [#uses=1]
- %28 = load %struct.objc_object** %self.1, align 4 ; <%struct.objc_object*> [#uses=1]
- %29 = call %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...)* inttoptr (i64 4294901504 to %struct.objc_object* (%struct.objc_object*, %struct.objc_selector*, ...)*)(%struct.objc_object* %28, %struct.objc_selector* %27, void (%struct.CGImage*)* %__block_holder_tmp_1.03) nounwind ; <%struct.objc_object*> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- %retval5 = load %struct.objc_object** %retval ; <%struct.objc_object*> [#uses=1]
- ret %struct.objc_object* %retval5
-}
-
-declare i8* @llvm.init.trampoline(i8*, i8*, i8*) nounwind
-
-define internal void @__helper_1.1632(%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* nest %CHAIN.8, %struct.__block_1* %_self, %struct.CGImage* %cgImage) nounwind {
-entry:
- %CHAIN.8_addr = alloca %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* ; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"**> [#uses=2]
- %_self_addr = alloca %struct.__block_1* ; <%struct.__block_1**> [#uses=3]
- %cgImage_addr = alloca %struct.CGImage* ; <%struct.CGImage**> [#uses=1]
- %zone = alloca %struct.NSZone* ; <%struct.NSZone**> [#uses=2]
- %objc_super = alloca %struct._objc_super ; <%struct._objc_super*> [#uses=3]
- %new = alloca %struct.NSBitmapImageRep** ; <%struct.NSBitmapImageRep***> [#uses=2]
- %objc_super.5 = alloca %struct.objc_super* ; <%struct.objc_super**> [#uses=2]
- %0 = alloca %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep**> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %CHAIN.8, %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr
- store %struct.__block_1* %_self, %struct.__block_1** %_self_addr
- store %struct.CGImage* %cgImage, %struct.CGImage** %cgImage_addr
- %1 = load %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
- %2 = getelementptr %struct.__block_1* %1, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
- %3 = load %struct.NSBitmapImageRep*** %2, align 4 ; <%struct.NSBitmapImageRep**> [#uses=1]
- store %struct.NSBitmapImageRep** %3, %struct.NSBitmapImageRep*** %new, align 4
- %4 = load %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
- %5 = getelementptr %struct.__block_1* %4, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
- %6 = load %struct.NSZone** %5, align 4 ; <%struct.NSZone*> [#uses=1]
- store %struct.NSZone* %6, %struct.NSZone** %zone, align 4
- %7 = load %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr, align 4 ; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=1]
- %8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
- %9 = load %struct.NSBitmapImageRep** %8, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- store %struct.NSBitmapImageRep* %9, %struct.NSBitmapImageRep** %0, align 4
- %10 = load %struct.NSBitmapImageRep** %0, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
- %11 = bitcast %struct.NSBitmapImageRep* %10 to %struct.objc_object* ; <%struct.objc_object*> [#uses=1]
- %12 = getelementptr %struct._objc_super* %objc_super, i32 0, i32 0 ; <%struct.objc_object**> [#uses=1]
- store %struct.objc_object* %11, %struct.objc_object** %12, align 4
- %13 = load %struct._objc_class** getelementptr (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4 ; <%struct._objc_class*> [#uses=1]
- %14 = getelementptr %struct._objc_super* %objc_super, i32 0, i32 1 ; <%struct._objc_class**> [#uses=1]
- store %struct._objc_class* %13, %struct._objc_class** %14, align 4
- %objc_super1 = bitcast %struct._objc_super* %objc_super to %struct.objc_super* ; <%struct.objc_super*> [#uses=1]
- store %struct.objc_super* %objc_super1, %struct.objc_super** %objc_super.5, align 4
- %15 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_0", align 4 ; <%struct.objc_selector*> [#uses=1]
- %16 = load %struct.objc_super** %objc_super.5, align 4 ; <%struct.objc_super*> [#uses=1]
- %17 = load %struct.NSZone** %zone, align 4 ; <%struct.NSZone*> [#uses=1]
- %18 = call %struct.objc_object* (%struct.objc_super*, %struct.objc_selector*, ...)* @objc_msgSendSuper(%struct.objc_super* %16, %struct.objc_selector* %15, %struct.NSZone* %17) nounwind ; <%struct.objc_object*> [#uses=1]
- %19 = bitcast %struct.objc_object* %18 to %struct.NSBitmapImageRep* ; <%struct.NSBitmapImageRep*> [#uses=1]
- %20 = load %struct.NSBitmapImageRep*** %new, align 4 ; <%struct.NSBitmapImageRep**> [#uses=1]
- store %struct.NSBitmapImageRep* %19, %struct.NSBitmapImageRep** %20, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare %struct.objc_object* @objc_msgSendSuper(%struct.objc_super*, %struct.objc_selector*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/unsafe-math.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/unsafe-math.ll
deleted file mode 100644
index ef97912..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/unsafe-math.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32 | grep fmul | count 2
-; RUN: llc < %s -march=ppc32 -enable-unsafe-fp-math | \
-; RUN: grep fmul | count 1
-
-define double @foo(double %X) {
- %tmp1 = fmul double %X, 1.23
- %tmp2 = fmul double %tmp1, 4.124
- ret double %tmp2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vcmp-fold.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vcmp-fold.ll
deleted file mode 100644
index 7a42c27..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vcmp-fold.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; This should fold the "vcmpbfp." and "vcmpbfp" instructions into a single
-; "vcmpbfp.".
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vcmpbfp | count 1
-
-
-define void @test(<4 x float>* %x, <4 x float>* %y, i32* %P) {
-entry:
- %tmp = load <4 x float>* %x ; <<4 x float>> [#uses=1]
- %tmp2 = load <4 x float>* %y ; <<4 x float>> [#uses=1]
- %tmp.upgrd.1 = call i32 @llvm.ppc.altivec.vcmpbfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp2 ) ; <i32> [#uses=1]
- %tmp4 = load <4 x float>* %x ; <<4 x float>> [#uses=1]
- %tmp6 = load <4 x float>* %y ; <<4 x float>> [#uses=1]
- %tmp.upgrd.2 = call <4 x i32> @llvm.ppc.altivec.vcmpbfp( <4 x float> %tmp4, <4 x float> %tmp6 ) ; <<4 x i32>> [#uses=1]
- %tmp7 = bitcast <4 x i32> %tmp.upgrd.2 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp7, <4 x float>* %x
- store i32 %tmp.upgrd.1, i32* %P
- ret void
-}
-
-declare i32 @llvm.ppc.altivec.vcmpbfp.p(i32, <4 x float>, <4 x float>)
-
-declare <4 x i32> @llvm.ppc.altivec.vcmpbfp(<4 x float>, <4 x float>)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_auto_constant.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_auto_constant.ll
deleted file mode 100644
index 973f089..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_auto_constant.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin -mcpu=g5 | FileCheck %s
-; Formerly produced .long, 7320806 (partial)
-; CHECK: .byte 22
-; CHECK: .byte 21
-; CHECK: .byte 20
-; CHECK: .byte 3
-; CHECK: .byte 25
-; CHECK: .byte 24
-; CHECK: .byte 23
-; CHECK: .byte 3
-; CHECK: .byte 28
-; CHECK: .byte 27
-; CHECK: .byte 26
-; CHECK: .byte 3
-; CHECK: .byte 31
-; CHECK: .byte 30
-; CHECK: .byte 29
-; CHECK: .byte 3
- at baz = common global <16 x i8> zeroinitializer ; <<16 x i8>*> [#uses=1]
-
-define void @foo(<16 x i8> %x) nounwind ssp {
-entry:
- %x_addr = alloca <16 x i8> ; <<16 x i8>*> [#uses=2]
- %temp = alloca <16 x i8> ; <<16 x i8>*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store <16 x i8> %x, <16 x i8>* %x_addr
- store <16 x i8> <i8 22, i8 21, i8 20, i8 3, i8 25, i8 24, i8 23, i8 3, i8 28, i8 27, i8 26, i8 3, i8 31, i8 30, i8 29, i8 3>, <16 x i8>* %temp, align 16
- %0 = load <16 x i8>* %x_addr, align 16 ; <<16 x i8>> [#uses=1]
- %1 = load <16 x i8>* %temp, align 16 ; <<16 x i8>> [#uses=1]
- %tmp = add <16 x i8> %0, %1 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp, <16 x i8>* @baz, align 16
- br label %return
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_br_cmp.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_br_cmp.ll
deleted file mode 100644
index c34d850..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_br_cmp.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 -o %t
-; RUN: grep vcmpeqfp. %t
-; RUN: not grep mfcr %t
-
-; A predicate compare used immediately by a branch should not generate an mfcr.
-
-define void @test(<4 x float>* %A, <4 x float>* %B) {
- %tmp = load <4 x float>* %A ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=1]
- %tmp.upgrd.1 = tail call i32 @llvm.ppc.altivec.vcmpeqfp.p( i32 1, <4 x float> %tmp, <4 x float> %tmp3 ) ; <i32> [#uses=1]
- %tmp.upgrd.2 = icmp eq i32 %tmp.upgrd.1, 0 ; <i1> [#uses=1]
- br i1 %tmp.upgrd.2, label %cond_true, label %UnifiedReturnBlock
-
-cond_true: ; preds = %0
- store <4 x float> zeroinitializer, <4 x float>* %B
- ret void
-
-UnifiedReturnBlock: ; preds = %0
- ret void
-}
-
-declare i32 @llvm.ppc.altivec.vcmpeqfp.p(i32, <4 x float>, <4 x float>)
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
deleted file mode 100644
index 015c086..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin -mattr=+altivec | FileCheck %s
-; Formerly this did byte loads and word stores.
- at a = external global <16 x i8>
- at b = external global <16 x i8>
- at c = external global <16 x i8>
-
-define void @foo() nounwind ssp {
-; CHECK: _foo:
-; CHECK-NOT: stw
-entry:
- %tmp0 = load <16 x i8>* @a, align 16
- %tmp180.i = extractelement <16 x i8> %tmp0, i32 0 ; <i8> [#uses=1]
- %tmp181.i = insertelement <16 x i8> <i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp180.i, i32 2 ; <<16 x i8>> [#uses=1]
- %tmp182.i = extractelement <16 x i8> %tmp0, i32 1 ; <i8> [#uses=1]
- %tmp183.i = insertelement <16 x i8> %tmp181.i, i8 %tmp182.i, i32 3 ; <<16 x i8>> [#uses=1]
- %tmp184.i = insertelement <16 x i8> %tmp183.i, i8 0, i32 4 ; <<16 x i8>> [#uses=1]
- %tmp185.i = insertelement <16 x i8> %tmp184.i, i8 0, i32 5 ; <<16 x i8>> [#uses=1]
- %tmp186.i = extractelement <16 x i8> %tmp0, i32 4 ; <i8> [#uses=1]
- %tmp187.i = insertelement <16 x i8> %tmp185.i, i8 %tmp186.i, i32 6 ; <<16 x i8>> [#uses=1]
- %tmp188.i = extractelement <16 x i8> %tmp0, i32 5 ; <i8> [#uses=1]
- %tmp189.i = insertelement <16 x i8> %tmp187.i, i8 %tmp188.i, i32 7 ; <<16 x i8>> [#uses=1]
- %tmp190.i = insertelement <16 x i8> %tmp189.i, i8 0, i32 8 ; <<16 x i8>> [#uses=1]
- %tmp191.i = insertelement <16 x i8> %tmp190.i, i8 0, i32 9 ; <<16 x i8>> [#uses=1]
- %tmp192.i = extractelement <16 x i8> %tmp0, i32 8 ; <i8> [#uses=1]
- %tmp193.i = insertelement <16 x i8> %tmp191.i, i8 %tmp192.i, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp194.i = extractelement <16 x i8> %tmp0, i32 9 ; <i8> [#uses=1]
- %tmp195.i = insertelement <16 x i8> %tmp193.i, i8 %tmp194.i, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp196.i = insertelement <16 x i8> %tmp195.i, i8 0, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp197.i = insertelement <16 x i8> %tmp196.i, i8 0, i32 13 ; <<16 x i8>> [#uses=1]
-%tmp201 = shufflevector <16 x i8> %tmp197.i, <16 x i8> %tmp0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 28, i32 29>; ModuleID = 'try.c'
- store <16 x i8> %tmp201, <16 x i8>* @c, align 16
- br label %return
-
-return: ; preds = %bb2
- ret void
-; CHECK: blr
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_call.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_call.ll
deleted file mode 100644
index 4511315..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_call.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5
-
-define <4 x i32> @test_arg(<4 x i32> %A, <4 x i32> %B) {
- %C = add <4 x i32> %A, %B ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %C
-}
-
-define <4 x i32> @foo() {
- %X = call <4 x i32> @test_arg( <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %X
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_constants.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_constants.ll
deleted file mode 100644
index 32c6f48..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_constants.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep CPI
-
-define void @test1(<4 x i32>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
- %tmp = load <4 x i32>* %P1 ; <<4 x i32>> [#uses=1]
- %tmp4 = and <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp4, <4 x i32>* %P1
- %tmp7 = load <4 x i32>* %P2 ; <<4 x i32>> [#uses=1]
- %tmp9 = and <4 x i32> %tmp7, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %P2
- %tmp.upgrd.1 = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
- %tmp11 = bitcast <4 x float> %tmp.upgrd.1 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp12 = and <4 x i32> %tmp11, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>> [#uses=1]
- %tmp13 = bitcast <4 x i32> %tmp12 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp13, <4 x float>* %P3
- ret void
-}
-
-define <4 x i32> @test_30() {
- ret <4 x i32> < i32 30, i32 30, i32 30, i32 30 >
-}
-
-define <4 x i32> @test_29() {
- ret <4 x i32> < i32 29, i32 29, i32 29, i32 29 >
-}
-
-define <8 x i16> @test_n30() {
- ret <8 x i16> < i16 -30, i16 -30, i16 -30, i16 -30, i16 -30, i16 -30, i16 -30, i16 -30 >
-}
-
-define <16 x i8> @test_n104() {
- ret <16 x i8> < i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104 >
-}
-
-define <4 x i32> @test_vsldoi() {
- ret <4 x i32> < i32 512, i32 512, i32 512, i32 512 >
-}
-
-define <4 x i32> @test_rol() {
- ret <4 x i32> < i32 -11534337, i32 -11534337, i32 -11534337, i32 -11534337 >
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_fneg.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_fneg.ll
deleted file mode 100644
index e01e659..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_fneg.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vsubfp
-
-define void @t(<4 x float>* %A) {
- %tmp2 = load <4 x float>* %A
- %tmp3 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp2
- store <4 x float> %tmp3, <4 x float>* %A
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_insert.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_insert.ll
deleted file mode 100644
index 185454c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_insert.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep sth
-
-define <8 x i16> @insert(<8 x i16> %foo, i16 %a) nounwind {
-entry:
- %vecext = insertelement <8 x i16> %foo, i16 %a, i32 7 ; <i8> [#uses=1]
- ret <8 x i16> %vecext
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_misaligned.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_misaligned.ll
deleted file mode 100644
index d7ed64a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_misaligned.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5
-
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
-target triple = "powerpc-apple-darwin8"
- %struct.S2203 = type { %struct.u16qi }
- %struct.u16qi = type { <16 x i8> }
- at s = weak global %struct.S2203 zeroinitializer ; <%struct.S2203*> [#uses=1]
-
-define void @foo(i32 %x, ...) {
-entry:
- %x_addr = alloca i32 ; <i32*> [#uses=1]
- %ap = alloca i8* ; <i8**> [#uses=3]
- %ap.0 = alloca i8* ; <i8**> [#uses=3]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %x, i32* %x_addr
- %ap1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_start( i8* %ap1 )
- %tmp = load i8** %ap, align 4 ; <i8*> [#uses=1]
- store i8* %tmp, i8** %ap.0, align 4
- %tmp2 = load i8** %ap.0, align 4 ; <i8*> [#uses=1]
- %tmp3 = getelementptr i8* %tmp2, i64 16 ; <i8*> [#uses=1]
- store i8* %tmp3, i8** %ap, align 4
- %tmp4 = load i8** %ap.0, align 4 ; <i8*> [#uses=1]
- %tmp45 = bitcast i8* %tmp4 to %struct.S2203* ; <%struct.S2203*> [#uses=1]
- %tmp6 = getelementptr %struct.S2203* @s, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
- %tmp7 = getelementptr %struct.S2203* %tmp45, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
- %tmp8 = getelementptr %struct.u16qi* %tmp6, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
- %tmp9 = getelementptr %struct.u16qi* %tmp7, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
- %tmp10 = load <16 x i8>* %tmp9, align 4 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp10, <16 x i8>* %tmp8, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare void @llvm.va_start(i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_mul.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_mul.ll
deleted file mode 100644
index 80f4de4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_mul.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep mullw
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vmsumuhm
-
-define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
- %tmp = load <4 x i32>* %X ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x i32>* %Y ; <<4 x i32>> [#uses=1]
- %tmp3 = mul <4 x i32> %tmp, %tmp2 ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %tmp3
-}
-
-define <8 x i16> @test_v8i16(<8 x i16>* %X, <8 x i16>* %Y) {
- %tmp = load <8 x i16>* %X ; <<8 x i16>> [#uses=1]
- %tmp2 = load <8 x i16>* %Y ; <<8 x i16>> [#uses=1]
- %tmp3 = mul <8 x i16> %tmp, %tmp2 ; <<8 x i16>> [#uses=1]
- ret <8 x i16> %tmp3
-}
-
-define <16 x i8> @test_v16i8(<16 x i8>* %X, <16 x i8>* %Y) {
- %tmp = load <16 x i8>* %X ; <<16 x i8>> [#uses=1]
- %tmp2 = load <16 x i8>* %Y ; <<16 x i8>> [#uses=1]
- %tmp3 = mul <16 x i8> %tmp, %tmp2 ; <<16 x i8>> [#uses=1]
- ret <16 x i8> %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll
deleted file mode 100644
index 2c3594d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep vperm
-
-define <4 x float> @test_uu72(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
- %V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 undef, i32 undef, i32 7, i32 2 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %V3
-}
-
-define <4 x float> @test_30u5(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
- %V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 0, i32 undef, i32 5 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %V3
-}
-
-define <4 x float> @test_3u73(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
- %V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 undef, i32 7, i32 3 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %V3
-}
-
-define <4 x float> @test_3774(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
- %V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 3, i32 7, i32 7, i32 4 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %V3
-}
-
-define <4 x float> @test_4450(<4 x float>* %P1, <4 x float>* %P2) {
- %V1 = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %V2 = load <4 x float>* %P2 ; <<4 x float>> [#uses=1]
- %V3 = shufflevector <4 x float> %V1, <4 x float> %V2, <4 x i32> < i32 4, i32 4, i32 5, i32 0 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %V3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_shift.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_shift.ll
deleted file mode 100644
index 646fb5f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_shift.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5
-; PR3628
-
-define void @update(<4 x i32> %val, <4 x i32>* %dst) nounwind {
-entry:
- %shl = shl <4 x i32> %val, < i32 4, i32 3, i32 2, i32 1 >
- %shr = ashr <4 x i32> %shl, < i32 1, i32 2, i32 3, i32 4 >
- store <4 x i32> %shr, <4 x i32>* %dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_shuffle.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
deleted file mode 100644
index d025bbc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
+++ /dev/null
@@ -1,505 +0,0 @@
-; RUN: opt < %s -instcombine | \
-; RUN: llc -march=ppc32 -mcpu=g5 | not grep vperm
-; RUN: llc < %s -march=ppc32 -mcpu=g5 > %t
-; RUN: grep vsldoi %t | count 2
-; RUN: grep vmrgh %t | count 7
-; RUN: grep vmrgl %t | count 6
-; RUN: grep vpkuhum %t | count 1
-; RUN: grep vpkuwum %t | count 1
-; XFAIL: *
-
-define void @VSLDOI_xy(<8 x i16>* %A, <8 x i16>* %B) {
-entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=1]
- %tmp2 = load <8 x i16>* %B ; <<8 x i16>> [#uses=1]
- %tmp.upgrd.1 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=11]
- %tmp2.upgrd.2 = bitcast <8 x i16> %tmp2 to <16 x i8> ; <<16 x i8>> [#uses=5]
- %tmp.upgrd.3 = extractelement <16 x i8> %tmp.upgrd.1, i32 5 ; <i8> [#uses=1]
- %tmp3 = extractelement <16 x i8> %tmp.upgrd.1, i32 6 ; <i8> [#uses=1]
- %tmp4 = extractelement <16 x i8> %tmp.upgrd.1, i32 7 ; <i8> [#uses=1]
- %tmp5 = extractelement <16 x i8> %tmp.upgrd.1, i32 8 ; <i8> [#uses=1]
- %tmp6 = extractelement <16 x i8> %tmp.upgrd.1, i32 9 ; <i8> [#uses=1]
- %tmp7 = extractelement <16 x i8> %tmp.upgrd.1, i32 10 ; <i8> [#uses=1]
- %tmp8 = extractelement <16 x i8> %tmp.upgrd.1, i32 11 ; <i8> [#uses=1]
- %tmp9 = extractelement <16 x i8> %tmp.upgrd.1, i32 12 ; <i8> [#uses=1]
- %tmp10 = extractelement <16 x i8> %tmp.upgrd.1, i32 13 ; <i8> [#uses=1]
- %tmp11 = extractelement <16 x i8> %tmp.upgrd.1, i32 14 ; <i8> [#uses=1]
- %tmp12 = extractelement <16 x i8> %tmp.upgrd.1, i32 15 ; <i8> [#uses=1]
- %tmp13 = extractelement <16 x i8> %tmp2.upgrd.2, i32 0 ; <i8> [#uses=1]
- %tmp14 = extractelement <16 x i8> %tmp2.upgrd.2, i32 1 ; <i8> [#uses=1]
- %tmp15 = extractelement <16 x i8> %tmp2.upgrd.2, i32 2 ; <i8> [#uses=1]
- %tmp16 = extractelement <16 x i8> %tmp2.upgrd.2, i32 3 ; <i8> [#uses=1]
- %tmp17 = extractelement <16 x i8> %tmp2.upgrd.2, i32 4 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.3, i32 0 ; <<16 x i8>> [#uses=1]
- %tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
- %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
- %tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
- %tmp22 = insertelement <16 x i8> %tmp21, i8 %tmp6, i32 4 ; <<16 x i8>> [#uses=1]
- %tmp23 = insertelement <16 x i8> %tmp22, i8 %tmp7, i32 5 ; <<16 x i8>> [#uses=1]
- %tmp24 = insertelement <16 x i8> %tmp23, i8 %tmp8, i32 6 ; <<16 x i8>> [#uses=1]
- %tmp25 = insertelement <16 x i8> %tmp24, i8 %tmp9, i32 7 ; <<16 x i8>> [#uses=1]
- %tmp26 = insertelement <16 x i8> %tmp25, i8 %tmp10, i32 8 ; <<16 x i8>> [#uses=1]
- %tmp27 = insertelement <16 x i8> %tmp26, i8 %tmp11, i32 9 ; <<16 x i8>> [#uses=1]
- %tmp28 = insertelement <16 x i8> %tmp27, i8 %tmp12, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp29 = insertelement <16 x i8> %tmp28, i8 %tmp13, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp30 = insertelement <16 x i8> %tmp29, i8 %tmp14, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
- %tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
- %tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- %tmp33.upgrd.4 = bitcast <16 x i8> %tmp33 to <8 x i16> ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp33.upgrd.4, <8 x i16>* %A
- ret void
-}
-
-define void @VSLDOI_xx(<8 x i16>* %A, <8 x i16>* %B) {
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=1]
- %tmp2 = load <8 x i16>* %A ; <<8 x i16>> [#uses=1]
- %tmp.upgrd.5 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=11]
- %tmp2.upgrd.6 = bitcast <8 x i16> %tmp2 to <16 x i8> ; <<16 x i8>> [#uses=5]
- %tmp.upgrd.7 = extractelement <16 x i8> %tmp.upgrd.5, i32 5 ; <i8> [#uses=1]
- %tmp3 = extractelement <16 x i8> %tmp.upgrd.5, i32 6 ; <i8> [#uses=1]
- %tmp4 = extractelement <16 x i8> %tmp.upgrd.5, i32 7 ; <i8> [#uses=1]
- %tmp5 = extractelement <16 x i8> %tmp.upgrd.5, i32 8 ; <i8> [#uses=1]
- %tmp6 = extractelement <16 x i8> %tmp.upgrd.5, i32 9 ; <i8> [#uses=1]
- %tmp7 = extractelement <16 x i8> %tmp.upgrd.5, i32 10 ; <i8> [#uses=1]
- %tmp8 = extractelement <16 x i8> %tmp.upgrd.5, i32 11 ; <i8> [#uses=1]
- %tmp9 = extractelement <16 x i8> %tmp.upgrd.5, i32 12 ; <i8> [#uses=1]
- %tmp10 = extractelement <16 x i8> %tmp.upgrd.5, i32 13 ; <i8> [#uses=1]
- %tmp11 = extractelement <16 x i8> %tmp.upgrd.5, i32 14 ; <i8> [#uses=1]
- %tmp12 = extractelement <16 x i8> %tmp.upgrd.5, i32 15 ; <i8> [#uses=1]
- %tmp13 = extractelement <16 x i8> %tmp2.upgrd.6, i32 0 ; <i8> [#uses=1]
- %tmp14 = extractelement <16 x i8> %tmp2.upgrd.6, i32 1 ; <i8> [#uses=1]
- %tmp15 = extractelement <16 x i8> %tmp2.upgrd.6, i32 2 ; <i8> [#uses=1]
- %tmp16 = extractelement <16 x i8> %tmp2.upgrd.6, i32 3 ; <i8> [#uses=1]
- %tmp17 = extractelement <16 x i8> %tmp2.upgrd.6, i32 4 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.7, i32 0 ; <<16 x i8>> [#uses=1]
- %tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
- %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
- %tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
- %tmp22 = insertelement <16 x i8> %tmp21, i8 %tmp6, i32 4 ; <<16 x i8>> [#uses=1]
- %tmp23 = insertelement <16 x i8> %tmp22, i8 %tmp7, i32 5 ; <<16 x i8>> [#uses=1]
- %tmp24 = insertelement <16 x i8> %tmp23, i8 %tmp8, i32 6 ; <<16 x i8>> [#uses=1]
- %tmp25 = insertelement <16 x i8> %tmp24, i8 %tmp9, i32 7 ; <<16 x i8>> [#uses=1]
- %tmp26 = insertelement <16 x i8> %tmp25, i8 %tmp10, i32 8 ; <<16 x i8>> [#uses=1]
- %tmp27 = insertelement <16 x i8> %tmp26, i8 %tmp11, i32 9 ; <<16 x i8>> [#uses=1]
- %tmp28 = insertelement <16 x i8> %tmp27, i8 %tmp12, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp29 = insertelement <16 x i8> %tmp28, i8 %tmp13, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp30 = insertelement <16 x i8> %tmp29, i8 %tmp14, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
- %tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
- %tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- %tmp33.upgrd.8 = bitcast <16 x i8> %tmp33 to <8 x i16> ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp33.upgrd.8, <8 x i16>* %A
- ret void
-}
-
-define void @VPERM_promote(<8 x i16>* %A, <8 x i16>* %B) {
-entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=1]
- %tmp.upgrd.9 = bitcast <8 x i16> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp2 = load <8 x i16>* %B ; <<8 x i16>> [#uses=1]
- %tmp2.upgrd.10 = bitcast <8 x i16> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp3 = call <4 x i32> @llvm.ppc.altivec.vperm( <4 x i32> %tmp.upgrd.9, <4 x i32> %tmp2.upgrd.10, <16 x i8> < i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14, i8 14 > ) ; <<4 x i32>> [#uses=1]
- %tmp3.upgrd.11 = bitcast <4 x i32> %tmp3 to <8 x i16> ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp3.upgrd.11, <8 x i16>* %A
- ret void
-}
-
-declare <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32>, <4 x i32>, <16 x i8>)
-
-define void @tb_l(<16 x i8>* %A, <16 x i8>* %B) {
-entry:
- %tmp = load <16 x i8>* %A ; <<16 x i8>> [#uses=8]
- %tmp2 = load <16 x i8>* %B ; <<16 x i8>> [#uses=8]
- %tmp.upgrd.12 = extractelement <16 x i8> %tmp, i32 8 ; <i8> [#uses=1]
- %tmp3 = extractelement <16 x i8> %tmp2, i32 8 ; <i8> [#uses=1]
- %tmp4 = extractelement <16 x i8> %tmp, i32 9 ; <i8> [#uses=1]
- %tmp5 = extractelement <16 x i8> %tmp2, i32 9 ; <i8> [#uses=1]
- %tmp6 = extractelement <16 x i8> %tmp, i32 10 ; <i8> [#uses=1]
- %tmp7 = extractelement <16 x i8> %tmp2, i32 10 ; <i8> [#uses=1]
- %tmp8 = extractelement <16 x i8> %tmp, i32 11 ; <i8> [#uses=1]
- %tmp9 = extractelement <16 x i8> %tmp2, i32 11 ; <i8> [#uses=1]
- %tmp10 = extractelement <16 x i8> %tmp, i32 12 ; <i8> [#uses=1]
- %tmp11 = extractelement <16 x i8> %tmp2, i32 12 ; <i8> [#uses=1]
- %tmp12 = extractelement <16 x i8> %tmp, i32 13 ; <i8> [#uses=1]
- %tmp13 = extractelement <16 x i8> %tmp2, i32 13 ; <i8> [#uses=1]
- %tmp14 = extractelement <16 x i8> %tmp, i32 14 ; <i8> [#uses=1]
- %tmp15 = extractelement <16 x i8> %tmp2, i32 14 ; <i8> [#uses=1]
- %tmp16 = extractelement <16 x i8> %tmp, i32 15 ; <i8> [#uses=1]
- %tmp17 = extractelement <16 x i8> %tmp2, i32 15 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.12, i32 0 ; <<16 x i8>> [#uses=1]
- %tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
- %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
- %tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
- %tmp22 = insertelement <16 x i8> %tmp21, i8 %tmp6, i32 4 ; <<16 x i8>> [#uses=1]
- %tmp23 = insertelement <16 x i8> %tmp22, i8 %tmp7, i32 5 ; <<16 x i8>> [#uses=1]
- %tmp24 = insertelement <16 x i8> %tmp23, i8 %tmp8, i32 6 ; <<16 x i8>> [#uses=1]
- %tmp25 = insertelement <16 x i8> %tmp24, i8 %tmp9, i32 7 ; <<16 x i8>> [#uses=1]
- %tmp26 = insertelement <16 x i8> %tmp25, i8 %tmp10, i32 8 ; <<16 x i8>> [#uses=1]
- %tmp27 = insertelement <16 x i8> %tmp26, i8 %tmp11, i32 9 ; <<16 x i8>> [#uses=1]
- %tmp28 = insertelement <16 x i8> %tmp27, i8 %tmp12, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp29 = insertelement <16 x i8> %tmp28, i8 %tmp13, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp30 = insertelement <16 x i8> %tmp29, i8 %tmp14, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
- %tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
- %tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp33, <16 x i8>* %A
- ret void
-}
-
-define void @th_l(<8 x i16>* %A, <8 x i16>* %B) {
-entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=4]
- %tmp2 = load <8 x i16>* %B ; <<8 x i16>> [#uses=4]
- %tmp.upgrd.13 = extractelement <8 x i16> %tmp, i32 4 ; <i16> [#uses=1]
- %tmp3 = extractelement <8 x i16> %tmp2, i32 4 ; <i16> [#uses=1]
- %tmp4 = extractelement <8 x i16> %tmp, i32 5 ; <i16> [#uses=1]
- %tmp5 = extractelement <8 x i16> %tmp2, i32 5 ; <i16> [#uses=1]
- %tmp6 = extractelement <8 x i16> %tmp, i32 6 ; <i16> [#uses=1]
- %tmp7 = extractelement <8 x i16> %tmp2, i32 6 ; <i16> [#uses=1]
- %tmp8 = extractelement <8 x i16> %tmp, i32 7 ; <i16> [#uses=1]
- %tmp9 = extractelement <8 x i16> %tmp2, i32 7 ; <i16> [#uses=1]
- %tmp10 = insertelement <8 x i16> undef, i16 %tmp.upgrd.13, i32 0 ; <<8 x i16>> [#uses=1]
- %tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 1 ; <<8 x i16>> [#uses=1]
- %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 2 ; <<8 x i16>> [#uses=1]
- %tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 3 ; <<8 x i16>> [#uses=1]
- %tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp6, i32 4 ; <<8 x i16>> [#uses=1]
- %tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5 ; <<8 x i16>> [#uses=1]
- %tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6 ; <<8 x i16>> [#uses=1]
- %tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7 ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp17, <8 x i16>* %A
- ret void
-}
-
-define void @tw_l(<4 x i32>* %A, <4 x i32>* %B) {
-entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp2 = load <4 x i32>* %B ; <<4 x i32>> [#uses=2]
- %tmp.upgrd.14 = extractelement <4 x i32> %tmp, i32 2 ; <i32> [#uses=1]
- %tmp3 = extractelement <4 x i32> %tmp2, i32 2 ; <i32> [#uses=1]
- %tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
- %tmp5 = extractelement <4 x i32> %tmp2, i32 3 ; <i32> [#uses=1]
- %tmp6 = insertelement <4 x i32> undef, i32 %tmp.upgrd.14, i32 0 ; <<4 x i32>> [#uses=1]
- %tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
- %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
- %tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
- ret void
-}
-
-define void @tb_h(<16 x i8>* %A, <16 x i8>* %B) {
-entry:
- %tmp = load <16 x i8>* %A ; <<16 x i8>> [#uses=8]
- %tmp2 = load <16 x i8>* %B ; <<16 x i8>> [#uses=8]
- %tmp.upgrd.15 = extractelement <16 x i8> %tmp, i32 0 ; <i8> [#uses=1]
- %tmp3 = extractelement <16 x i8> %tmp2, i32 0 ; <i8> [#uses=1]
- %tmp4 = extractelement <16 x i8> %tmp, i32 1 ; <i8> [#uses=1]
- %tmp5 = extractelement <16 x i8> %tmp2, i32 1 ; <i8> [#uses=1]
- %tmp6 = extractelement <16 x i8> %tmp, i32 2 ; <i8> [#uses=1]
- %tmp7 = extractelement <16 x i8> %tmp2, i32 2 ; <i8> [#uses=1]
- %tmp8 = extractelement <16 x i8> %tmp, i32 3 ; <i8> [#uses=1]
- %tmp9 = extractelement <16 x i8> %tmp2, i32 3 ; <i8> [#uses=1]
- %tmp10 = extractelement <16 x i8> %tmp, i32 4 ; <i8> [#uses=1]
- %tmp11 = extractelement <16 x i8> %tmp2, i32 4 ; <i8> [#uses=1]
- %tmp12 = extractelement <16 x i8> %tmp, i32 5 ; <i8> [#uses=1]
- %tmp13 = extractelement <16 x i8> %tmp2, i32 5 ; <i8> [#uses=1]
- %tmp14 = extractelement <16 x i8> %tmp, i32 6 ; <i8> [#uses=1]
- %tmp15 = extractelement <16 x i8> %tmp2, i32 6 ; <i8> [#uses=1]
- %tmp16 = extractelement <16 x i8> %tmp, i32 7 ; <i8> [#uses=1]
- %tmp17 = extractelement <16 x i8> %tmp2, i32 7 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.15, i32 0 ; <<16 x i8>> [#uses=1]
- %tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
- %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
- %tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
- %tmp22 = insertelement <16 x i8> %tmp21, i8 %tmp6, i32 4 ; <<16 x i8>> [#uses=1]
- %tmp23 = insertelement <16 x i8> %tmp22, i8 %tmp7, i32 5 ; <<16 x i8>> [#uses=1]
- %tmp24 = insertelement <16 x i8> %tmp23, i8 %tmp8, i32 6 ; <<16 x i8>> [#uses=1]
- %tmp25 = insertelement <16 x i8> %tmp24, i8 %tmp9, i32 7 ; <<16 x i8>> [#uses=1]
- %tmp26 = insertelement <16 x i8> %tmp25, i8 %tmp10, i32 8 ; <<16 x i8>> [#uses=1]
- %tmp27 = insertelement <16 x i8> %tmp26, i8 %tmp11, i32 9 ; <<16 x i8>> [#uses=1]
- %tmp28 = insertelement <16 x i8> %tmp27, i8 %tmp12, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp29 = insertelement <16 x i8> %tmp28, i8 %tmp13, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp30 = insertelement <16 x i8> %tmp29, i8 %tmp14, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
- %tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
- %tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp33, <16 x i8>* %A
- ret void
-}
-
-define void @th_h(<8 x i16>* %A, <8 x i16>* %B) {
-entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=4]
- %tmp2 = load <8 x i16>* %B ; <<8 x i16>> [#uses=4]
- %tmp.upgrd.16 = extractelement <8 x i16> %tmp, i32 0 ; <i16> [#uses=1]
- %tmp3 = extractelement <8 x i16> %tmp2, i32 0 ; <i16> [#uses=1]
- %tmp4 = extractelement <8 x i16> %tmp, i32 1 ; <i16> [#uses=1]
- %tmp5 = extractelement <8 x i16> %tmp2, i32 1 ; <i16> [#uses=1]
- %tmp6 = extractelement <8 x i16> %tmp, i32 2 ; <i16> [#uses=1]
- %tmp7 = extractelement <8 x i16> %tmp2, i32 2 ; <i16> [#uses=1]
- %tmp8 = extractelement <8 x i16> %tmp, i32 3 ; <i16> [#uses=1]
- %tmp9 = extractelement <8 x i16> %tmp2, i32 3 ; <i16> [#uses=1]
- %tmp10 = insertelement <8 x i16> undef, i16 %tmp.upgrd.16, i32 0 ; <<8 x i16>> [#uses=1]
- %tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 1 ; <<8 x i16>> [#uses=1]
- %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 2 ; <<8 x i16>> [#uses=1]
- %tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 3 ; <<8 x i16>> [#uses=1]
- %tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp6, i32 4 ; <<8 x i16>> [#uses=1]
- %tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5 ; <<8 x i16>> [#uses=1]
- %tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6 ; <<8 x i16>> [#uses=1]
- %tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7 ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp17, <8 x i16>* %A
- ret void
-}
-
-define void @tw_h(<4 x i32>* %A, <4 x i32>* %B) {
-entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp2 = load <4 x i32>* %B ; <<4 x i32>> [#uses=2]
- %tmp.upgrd.17 = extractelement <4 x i32> %tmp2, i32 0 ; <i32> [#uses=1]
- %tmp3 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
- %tmp4 = extractelement <4 x i32> %tmp2, i32 1 ; <i32> [#uses=1]
- %tmp5 = extractelement <4 x i32> %tmp, i32 1 ; <i32> [#uses=1]
- %tmp6 = insertelement <4 x i32> undef, i32 %tmp.upgrd.17, i32 0 ; <<4 x i32>> [#uses=1]
- %tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
- %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
- %tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
- ret void
-}
-
-define void @tw_h_flop(<4 x i32>* %A, <4 x i32>* %B) {
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp2 = load <4 x i32>* %B ; <<4 x i32>> [#uses=2]
- %tmp.upgrd.18 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
- %tmp3 = extractelement <4 x i32> %tmp2, i32 0 ; <i32> [#uses=1]
- %tmp4 = extractelement <4 x i32> %tmp, i32 1 ; <i32> [#uses=1]
- %tmp5 = extractelement <4 x i32> %tmp2, i32 1 ; <i32> [#uses=1]
- %tmp6 = insertelement <4 x i32> undef, i32 %tmp.upgrd.18, i32 0 ; <<4 x i32>> [#uses=1]
- %tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
- %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
- %tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
- ret void
-}
-
-define void @VMRG_UNARY_tb_l(<16 x i8>* %A, <16 x i8>* %B) {
-entry:
- %tmp = load <16 x i8>* %A ; <<16 x i8>> [#uses=16]
- %tmp.upgrd.19 = extractelement <16 x i8> %tmp, i32 8 ; <i8> [#uses=1]
- %tmp3 = extractelement <16 x i8> %tmp, i32 8 ; <i8> [#uses=1]
- %tmp4 = extractelement <16 x i8> %tmp, i32 9 ; <i8> [#uses=1]
- %tmp5 = extractelement <16 x i8> %tmp, i32 9 ; <i8> [#uses=1]
- %tmp6 = extractelement <16 x i8> %tmp, i32 10 ; <i8> [#uses=1]
- %tmp7 = extractelement <16 x i8> %tmp, i32 10 ; <i8> [#uses=1]
- %tmp8 = extractelement <16 x i8> %tmp, i32 11 ; <i8> [#uses=1]
- %tmp9 = extractelement <16 x i8> %tmp, i32 11 ; <i8> [#uses=1]
- %tmp10 = extractelement <16 x i8> %tmp, i32 12 ; <i8> [#uses=1]
- %tmp11 = extractelement <16 x i8> %tmp, i32 12 ; <i8> [#uses=1]
- %tmp12 = extractelement <16 x i8> %tmp, i32 13 ; <i8> [#uses=1]
- %tmp13 = extractelement <16 x i8> %tmp, i32 13 ; <i8> [#uses=1]
- %tmp14 = extractelement <16 x i8> %tmp, i32 14 ; <i8> [#uses=1]
- %tmp15 = extractelement <16 x i8> %tmp, i32 14 ; <i8> [#uses=1]
- %tmp16 = extractelement <16 x i8> %tmp, i32 15 ; <i8> [#uses=1]
- %tmp17 = extractelement <16 x i8> %tmp, i32 15 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.19, i32 0 ; <<16 x i8>> [#uses=1]
- %tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
- %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
- %tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
- %tmp22 = insertelement <16 x i8> %tmp21, i8 %tmp6, i32 4 ; <<16 x i8>> [#uses=1]
- %tmp23 = insertelement <16 x i8> %tmp22, i8 %tmp7, i32 5 ; <<16 x i8>> [#uses=1]
- %tmp24 = insertelement <16 x i8> %tmp23, i8 %tmp8, i32 6 ; <<16 x i8>> [#uses=1]
- %tmp25 = insertelement <16 x i8> %tmp24, i8 %tmp9, i32 7 ; <<16 x i8>> [#uses=1]
- %tmp26 = insertelement <16 x i8> %tmp25, i8 %tmp10, i32 8 ; <<16 x i8>> [#uses=1]
- %tmp27 = insertelement <16 x i8> %tmp26, i8 %tmp11, i32 9 ; <<16 x i8>> [#uses=1]
- %tmp28 = insertelement <16 x i8> %tmp27, i8 %tmp12, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp29 = insertelement <16 x i8> %tmp28, i8 %tmp13, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp30 = insertelement <16 x i8> %tmp29, i8 %tmp14, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
- %tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
- %tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp33, <16 x i8>* %A
- ret void
-}
-
-define void @VMRG_UNARY_th_l(<8 x i16>* %A, <8 x i16>* %B) {
-entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=8]
- %tmp.upgrd.20 = extractelement <8 x i16> %tmp, i32 4 ; <i16> [#uses=1]
- %tmp3 = extractelement <8 x i16> %tmp, i32 4 ; <i16> [#uses=1]
- %tmp4 = extractelement <8 x i16> %tmp, i32 5 ; <i16> [#uses=1]
- %tmp5 = extractelement <8 x i16> %tmp, i32 5 ; <i16> [#uses=1]
- %tmp6 = extractelement <8 x i16> %tmp, i32 6 ; <i16> [#uses=1]
- %tmp7 = extractelement <8 x i16> %tmp, i32 6 ; <i16> [#uses=1]
- %tmp8 = extractelement <8 x i16> %tmp, i32 7 ; <i16> [#uses=1]
- %tmp9 = extractelement <8 x i16> %tmp, i32 7 ; <i16> [#uses=1]
- %tmp10 = insertelement <8 x i16> undef, i16 %tmp.upgrd.20, i32 0 ; <<8 x i16>> [#uses=1]
- %tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 1 ; <<8 x i16>> [#uses=1]
- %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 2 ; <<8 x i16>> [#uses=1]
- %tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 3 ; <<8 x i16>> [#uses=1]
- %tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp6, i32 4 ; <<8 x i16>> [#uses=1]
- %tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5 ; <<8 x i16>> [#uses=1]
- %tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6 ; <<8 x i16>> [#uses=1]
- %tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7 ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp17, <8 x i16>* %A
- ret void
-}
-
-define void @VMRG_UNARY_tw_l(<4 x i32>* %A, <4 x i32>* %B) {
-entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=4]
- %tmp.upgrd.21 = extractelement <4 x i32> %tmp, i32 2 ; <i32> [#uses=1]
- %tmp3 = extractelement <4 x i32> %tmp, i32 2 ; <i32> [#uses=1]
- %tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
- %tmp5 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
- %tmp6 = insertelement <4 x i32> undef, i32 %tmp.upgrd.21, i32 0 ; <<4 x i32>> [#uses=1]
- %tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
- %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
- %tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
- ret void
-}
-
-define void @VMRG_UNARY_tb_h(<16 x i8>* %A, <16 x i8>* %B) {
-entry:
- %tmp = load <16 x i8>* %A ; <<16 x i8>> [#uses=16]
- %tmp.upgrd.22 = extractelement <16 x i8> %tmp, i32 0 ; <i8> [#uses=1]
- %tmp3 = extractelement <16 x i8> %tmp, i32 0 ; <i8> [#uses=1]
- %tmp4 = extractelement <16 x i8> %tmp, i32 1 ; <i8> [#uses=1]
- %tmp5 = extractelement <16 x i8> %tmp, i32 1 ; <i8> [#uses=1]
- %tmp6 = extractelement <16 x i8> %tmp, i32 2 ; <i8> [#uses=1]
- %tmp7 = extractelement <16 x i8> %tmp, i32 2 ; <i8> [#uses=1]
- %tmp8 = extractelement <16 x i8> %tmp, i32 3 ; <i8> [#uses=1]
- %tmp9 = extractelement <16 x i8> %tmp, i32 3 ; <i8> [#uses=1]
- %tmp10 = extractelement <16 x i8> %tmp, i32 4 ; <i8> [#uses=1]
- %tmp11 = extractelement <16 x i8> %tmp, i32 4 ; <i8> [#uses=1]
- %tmp12 = extractelement <16 x i8> %tmp, i32 5 ; <i8> [#uses=1]
- %tmp13 = extractelement <16 x i8> %tmp, i32 5 ; <i8> [#uses=1]
- %tmp14 = extractelement <16 x i8> %tmp, i32 6 ; <i8> [#uses=1]
- %tmp15 = extractelement <16 x i8> %tmp, i32 6 ; <i8> [#uses=1]
- %tmp16 = extractelement <16 x i8> %tmp, i32 7 ; <i8> [#uses=1]
- %tmp17 = extractelement <16 x i8> %tmp, i32 7 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.22, i32 0 ; <<16 x i8>> [#uses=1]
- %tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
- %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
- %tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
- %tmp22 = insertelement <16 x i8> %tmp21, i8 %tmp6, i32 4 ; <<16 x i8>> [#uses=1]
- %tmp23 = insertelement <16 x i8> %tmp22, i8 %tmp7, i32 5 ; <<16 x i8>> [#uses=1]
- %tmp24 = insertelement <16 x i8> %tmp23, i8 %tmp8, i32 6 ; <<16 x i8>> [#uses=1]
- %tmp25 = insertelement <16 x i8> %tmp24, i8 %tmp9, i32 7 ; <<16 x i8>> [#uses=1]
- %tmp26 = insertelement <16 x i8> %tmp25, i8 %tmp10, i32 8 ; <<16 x i8>> [#uses=1]
- %tmp27 = insertelement <16 x i8> %tmp26, i8 %tmp11, i32 9 ; <<16 x i8>> [#uses=1]
- %tmp28 = insertelement <16 x i8> %tmp27, i8 %tmp12, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp29 = insertelement <16 x i8> %tmp28, i8 %tmp13, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp30 = insertelement <16 x i8> %tmp29, i8 %tmp14, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 13 ; <<16 x i8>> [#uses=1]
- %tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 14 ; <<16 x i8>> [#uses=1]
- %tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 15 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp33, <16 x i8>* %A
- ret void
-}
-
-define void @VMRG_UNARY_th_h(<8 x i16>* %A, <8 x i16>* %B) {
-entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=8]
- %tmp.upgrd.23 = extractelement <8 x i16> %tmp, i32 0 ; <i16> [#uses=1]
- %tmp3 = extractelement <8 x i16> %tmp, i32 0 ; <i16> [#uses=1]
- %tmp4 = extractelement <8 x i16> %tmp, i32 1 ; <i16> [#uses=1]
- %tmp5 = extractelement <8 x i16> %tmp, i32 1 ; <i16> [#uses=1]
- %tmp6 = extractelement <8 x i16> %tmp, i32 2 ; <i16> [#uses=1]
- %tmp7 = extractelement <8 x i16> %tmp, i32 2 ; <i16> [#uses=1]
- %tmp8 = extractelement <8 x i16> %tmp, i32 3 ; <i16> [#uses=1]
- %tmp9 = extractelement <8 x i16> %tmp, i32 3 ; <i16> [#uses=1]
- %tmp10 = insertelement <8 x i16> undef, i16 %tmp.upgrd.23, i32 0 ; <<8 x i16>> [#uses=1]
- %tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 1 ; <<8 x i16>> [#uses=1]
- %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 2 ; <<8 x i16>> [#uses=1]
- %tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 3 ; <<8 x i16>> [#uses=1]
- %tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp6, i32 4 ; <<8 x i16>> [#uses=1]
- %tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 5 ; <<8 x i16>> [#uses=1]
- %tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 6 ; <<8 x i16>> [#uses=1]
- %tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 7 ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp17, <8 x i16>* %A
- ret void
-}
-
-define void @VMRG_UNARY_tw_h(<4 x i32>* %A, <4 x i32>* %B) {
-entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=4]
- %tmp.upgrd.24 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
- %tmp3 = extractelement <4 x i32> %tmp, i32 0 ; <i32> [#uses=1]
- %tmp4 = extractelement <4 x i32> %tmp, i32 1 ; <i32> [#uses=1]
- %tmp5 = extractelement <4 x i32> %tmp, i32 1 ; <i32> [#uses=1]
- %tmp6 = insertelement <4 x i32> undef, i32 %tmp.upgrd.24, i32 0 ; <<4 x i32>> [#uses=1]
- %tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
- %tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
- %tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp9, <4 x i32>* %A
- ret void
-}
-
-define void @VPCKUHUM_unary(<8 x i16>* %A, <8 x i16>* %B) {
-entry:
- %tmp = load <8 x i16>* %A ; <<8 x i16>> [#uses=2]
- %tmp.upgrd.25 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=8]
- %tmp3 = bitcast <8 x i16> %tmp to <16 x i8> ; <<16 x i8>> [#uses=8]
- %tmp.upgrd.26 = extractelement <16 x i8> %tmp.upgrd.25, i32 1 ; <i8> [#uses=1]
- %tmp4 = extractelement <16 x i8> %tmp.upgrd.25, i32 3 ; <i8> [#uses=1]
- %tmp5 = extractelement <16 x i8> %tmp.upgrd.25, i32 5 ; <i8> [#uses=1]
- %tmp6 = extractelement <16 x i8> %tmp.upgrd.25, i32 7 ; <i8> [#uses=1]
- %tmp7 = extractelement <16 x i8> %tmp.upgrd.25, i32 9 ; <i8> [#uses=1]
- %tmp8 = extractelement <16 x i8> %tmp.upgrd.25, i32 11 ; <i8> [#uses=1]
- %tmp9 = extractelement <16 x i8> %tmp.upgrd.25, i32 13 ; <i8> [#uses=1]
- %tmp10 = extractelement <16 x i8> %tmp.upgrd.25, i32 15 ; <i8> [#uses=1]
- %tmp11 = extractelement <16 x i8> %tmp3, i32 1 ; <i8> [#uses=1]
- %tmp12 = extractelement <16 x i8> %tmp3, i32 3 ; <i8> [#uses=1]
- %tmp13 = extractelement <16 x i8> %tmp3, i32 5 ; <i8> [#uses=1]
- %tmp14 = extractelement <16 x i8> %tmp3, i32 7 ; <i8> [#uses=1]
- %tmp15 = extractelement <16 x i8> %tmp3, i32 9 ; <i8> [#uses=1]
- %tmp16 = extractelement <16 x i8> %tmp3, i32 11 ; <i8> [#uses=1]
- %tmp17 = extractelement <16 x i8> %tmp3, i32 13 ; <i8> [#uses=1]
- %tmp18 = extractelement <16 x i8> %tmp3, i32 15 ; <i8> [#uses=1]
- %tmp19 = insertelement <16 x i8> undef, i8 %tmp.upgrd.26, i32 0 ; <<16 x i8>> [#uses=1]
- %tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 1 ; <<16 x i8>> [#uses=1]
- %tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 2 ; <<16 x i8>> [#uses=1]
- %tmp22 = insertelement <16 x i8> %tmp21, i8 %tmp6, i32 3 ; <<16 x i8>> [#uses=1]
- %tmp23 = insertelement <16 x i8> %tmp22, i8 %tmp7, i32 4 ; <<16 x i8>> [#uses=1]
- %tmp24 = insertelement <16 x i8> %tmp23, i8 %tmp8, i32 5 ; <<16 x i8>> [#uses=1]
- %tmp25 = insertelement <16 x i8> %tmp24, i8 %tmp9, i32 6 ; <<16 x i8>> [#uses=1]
- %tmp26 = insertelement <16 x i8> %tmp25, i8 %tmp10, i32 7 ; <<16 x i8>> [#uses=1]
- %tmp27 = insertelement <16 x i8> %tmp26, i8 %tmp11, i32 8 ; <<16 x i8>> [#uses=1]
- %tmp28 = insertelement <16 x i8> %tmp27, i8 %tmp12, i32 9 ; <<16 x i8>> [#uses=1]
- %tmp29 = insertelement <16 x i8> %tmp28, i8 %tmp13, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp30 = insertelement <16 x i8> %tmp29, i8 %tmp14, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp31 = insertelement <16 x i8> %tmp30, i8 %tmp15, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp32 = insertelement <16 x i8> %tmp31, i8 %tmp16, i32 13 ; <<16 x i8>> [#uses=1]
- %tmp33 = insertelement <16 x i8> %tmp32, i8 %tmp17, i32 14 ; <<16 x i8>> [#uses=1]
- %tmp34 = insertelement <16 x i8> %tmp33, i8 %tmp18, i32 15 ; <<16 x i8>> [#uses=1]
- %tmp34.upgrd.27 = bitcast <16 x i8> %tmp34 to <8 x i16> ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp34.upgrd.27, <8 x i16>* %A
- ret void
-}
-
-define void @VPCKUWUM_unary(<4 x i32>* %A, <4 x i32>* %B) {
-entry:
- %tmp = load <4 x i32>* %A ; <<4 x i32>> [#uses=2]
- %tmp.upgrd.28 = bitcast <4 x i32> %tmp to <8 x i16> ; <<8 x i16>> [#uses=4]
- %tmp3 = bitcast <4 x i32> %tmp to <8 x i16> ; <<8 x i16>> [#uses=4]
- %tmp.upgrd.29 = extractelement <8 x i16> %tmp.upgrd.28, i32 1 ; <i16> [#uses=1]
- %tmp4 = extractelement <8 x i16> %tmp.upgrd.28, i32 3 ; <i16> [#uses=1]
- %tmp5 = extractelement <8 x i16> %tmp.upgrd.28, i32 5 ; <i16> [#uses=1]
- %tmp6 = extractelement <8 x i16> %tmp.upgrd.28, i32 7 ; <i16> [#uses=1]
- %tmp7 = extractelement <8 x i16> %tmp3, i32 1 ; <i16> [#uses=1]
- %tmp8 = extractelement <8 x i16> %tmp3, i32 3 ; <i16> [#uses=1]
- %tmp9 = extractelement <8 x i16> %tmp3, i32 5 ; <i16> [#uses=1]
- %tmp10 = extractelement <8 x i16> %tmp3, i32 7 ; <i16> [#uses=1]
- %tmp11 = insertelement <8 x i16> undef, i16 %tmp.upgrd.29, i32 0 ; <<8 x i16>> [#uses=1]
- %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 1 ; <<8 x i16>> [#uses=1]
- %tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 2 ; <<8 x i16>> [#uses=1]
- %tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp6, i32 3 ; <<8 x i16>> [#uses=1]
- %tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 4 ; <<8 x i16>> [#uses=1]
- %tmp16 = insertelement <8 x i16> %tmp15, i16 %tmp8, i32 5 ; <<8 x i16>> [#uses=1]
- %tmp17 = insertelement <8 x i16> %tmp16, i16 %tmp9, i32 6 ; <<8 x i16>> [#uses=1]
- %tmp18 = insertelement <8 x i16> %tmp17, i16 %tmp10, i32 7 ; <<8 x i16>> [#uses=1]
- %tmp18.upgrd.30 = bitcast <8 x i16> %tmp18 to <4 x i32> ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp18.upgrd.30, <4 x i32>* %A
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_splat.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_splat.ll
deleted file mode 100644
index 6123728..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_splat.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; Test that vectors are scalarized/lowered correctly.
-; RUN: llc < %s -march=ppc32 -mcpu=g3 | \
-; RUN: grep stfs | count 4
-; RUN: llc < %s -march=ppc32 -mcpu=g5 -o %t
-; RUN: grep vspltw %t | count 2
-; RUN: grep vsplti %t | count 3
-; RUN: grep vsplth %t | count 1
-
- %f4 = type <4 x float>
- %i4 = type <4 x i32>
-
-define void @splat(%f4* %P, %f4* %Q, float %X) nounwind {
- %tmp = insertelement %f4 undef, float %X, i32 0 ; <%f4> [#uses=1]
- %tmp2 = insertelement %f4 %tmp, float %X, i32 1 ; <%f4> [#uses=1]
- %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1]
- %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
- %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %P
- ret void
-}
-
-define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) nounwind {
- %tmp = insertelement %i4 undef, i32 %X, i32 0 ; <%i4> [#uses=1]
- %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1 ; <%i4> [#uses=1]
- %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1]
- %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1]
- %q = load %i4* %Q ; <%i4> [#uses=1]
- %R = add %i4 %q, %tmp6 ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
- ret void
-}
-
-define void @splat_imm_i32(%i4* %P, %i4* %Q, i32 %X) nounwind {
- %q = load %i4* %Q ; <%i4> [#uses=1]
- %R = add %i4 %q, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
- ret void
-}
-
-define void @splat_imm_i16(%i4* %P, %i4* %Q, i32 %X) nounwind {
- %q = load %i4* %Q ; <%i4> [#uses=1]
- %R = add %i4 %q, < i32 65537, i32 65537, i32 65537, i32 65537 > ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
- ret void
-}
-
-define void @splat_h(i16 %tmp, <16 x i8>* %dst) nounwind {
- %tmp.upgrd.1 = insertelement <8 x i16> undef, i16 %tmp, i32 0
- %tmp72 = insertelement <8 x i16> %tmp.upgrd.1, i16 %tmp, i32 1
- %tmp73 = insertelement <8 x i16> %tmp72, i16 %tmp, i32 2
- %tmp74 = insertelement <8 x i16> %tmp73, i16 %tmp, i32 3
- %tmp75 = insertelement <8 x i16> %tmp74, i16 %tmp, i32 4
- %tmp76 = insertelement <8 x i16> %tmp75, i16 %tmp, i32 5
- %tmp77 = insertelement <8 x i16> %tmp76, i16 %tmp, i32 6
- %tmp78 = insertelement <8 x i16> %tmp77, i16 %tmp, i32 7
- %tmp78.upgrd.2 = bitcast <8 x i16> %tmp78 to <16 x i8>
- store <16 x i8> %tmp78.upgrd.2, <16 x i8>* %dst
- ret void
-}
-
-define void @spltish(<16 x i8>* %A, <16 x i8>* %B) nounwind {
- %tmp = load <16 x i8>* %B ; <<16 x i8>> [#uses=1]
- %tmp.s = bitcast <16 x i8> %tmp to <16 x i8> ; <<16 x i8>> [#uses=1]
- %tmp4 = sub <16 x i8> %tmp.s, bitcast (<8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16
- 15, i16 15, i16 15 > to <16 x i8>) ; <<16 x i8>> [#uses=1]
- %tmp4.u = bitcast <16 x i8> %tmp4 to <16 x i8> ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp4.u, <16 x i8>* %A
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_splat_constant.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_splat_constant.ll
deleted file mode 100644
index b227794..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_splat_constant.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mtriple=powerpc-apple-darwin -mcpu=g5 | FileCheck %s
-; Formerly incorrectly inserted vsldoi (endian confusion)
-
- at baz = common global <16 x i8> zeroinitializer ; <<16 x i8>*> [#uses=1]
-
-define void @foo(<16 x i8> %x) nounwind ssp {
-entry:
-; CHECK: _foo:
-; CHECK-NOT: vsldoi
- %x_addr = alloca <16 x i8> ; <<16 x i8>*> [#uses=2]
- %temp = alloca <16 x i8> ; <<16 x i8>*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store <16 x i8> %x, <16 x i8>* %x_addr
- store <16 x i8> <i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14, i8 0, i8 0, i8 0, i8 14>, <16 x i8>* %temp, align 16
- %0 = load <16 x i8>* %x_addr, align 16 ; <<16 x i8>> [#uses=1]
- %1 = load <16 x i8>* %temp, align 16 ; <<16 x i8>> [#uses=1]
- %tmp = add <16 x i8> %0, %1 ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp, <16 x i8>* @baz, align 16
- br label %return
-
-return: ; preds = %entry
- ret void
-; CHECK: blr
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_vrsave.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_vrsave.ll
deleted file mode 100644
index 2a03d58..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_vrsave.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 -o %t
-; RUN: grep vrlw %t
-; RUN: not grep spr %t
-; RUN: not grep vrsave %t
-
-define <4 x i32> @test_rol() {
- ret <4 x i32> < i32 -11534337, i32 -11534337, i32 -11534337, i32 -11534337 >
-}
-
-define <4 x i32> @test_arg(<4 x i32> %A, <4 x i32> %B) {
- %C = add <4 x i32> %A, %B ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %C
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_zero.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_zero.ll
deleted file mode 100644
index f862b2c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vec_zero.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep vxor
-
-define void @foo(<4 x float>* %P) {
- %T = load <4 x float>* %P ; <<4 x float>> [#uses=1]
- %S = fadd <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1]
- store <4 x float> %S, <4 x float>* %P
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vector-identity-shuffle.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vector-identity-shuffle.ll
deleted file mode 100644
index dfa2e35..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vector-identity-shuffle.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | grep test:
-; RUN: llc < %s -march=ppc32 -mcpu=g5 | not grep vperm
-
-define void @test(<4 x float>* %tmp2.i) {
- %tmp2.i.upgrd.1 = load <4 x float>* %tmp2.i ; <<4 x float>> [#uses=4]
- %xFloat0.48 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 0 ; <float> [#uses=1]
- %inFloat0.49 = insertelement <4 x float> undef, float %xFloat0.48, i32 0 ; <<4 x float>> [#uses=1]
- %xFloat1.50 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 1 ; <float> [#uses=1]
- %inFloat1.52 = insertelement <4 x float> %inFloat0.49, float %xFloat1.50, i32 1 ; <<4 x float>> [#uses=1]
- %xFloat2.53 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 2 ; <float> [#uses=1]
- %inFloat2.55 = insertelement <4 x float> %inFloat1.52, float %xFloat2.53, i32 2 ; <<4 x float>> [#uses=1]
- %xFloat3.56 = extractelement <4 x float> %tmp2.i.upgrd.1, i32 3 ; <float> [#uses=1]
- %inFloat3.58 = insertelement <4 x float> %inFloat2.55, float %xFloat3.56, i32 3 ; <<4 x float>> [#uses=1]
- store <4 x float> %inFloat3.58, <4 x float>* %tmp2.i
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/PowerPC/vector.ll b/libclamav/c++/llvm/test/CodeGen/PowerPC/vector.ll
deleted file mode 100644
index ee4da31..0000000
--- a/libclamav/c++/llvm/test/CodeGen/PowerPC/vector.ll
+++ /dev/null
@@ -1,158 +0,0 @@
-; Test that vectors are scalarized/lowered correctly.
-; RUN: llc < %s -march=ppc32 -mcpu=g5 > %t
-; RUN: llc < %s -march=ppc32 -mcpu=g3 > %t
-
-%d8 = type <8 x double>
-%f1 = type <1 x float>
-%f2 = type <2 x float>
-%f4 = type <4 x float>
-%f8 = type <8 x float>
-%i4 = type <4 x i32>
-
-;;; TEST HANDLING OF VARIOUS VECTOR SIZES
-
-define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
- %p = load %f1* %P ; <%f1> [#uses=1]
- %q = load %f1* %Q ; <%f1> [#uses=1]
- %R = fadd %f1 %p, %q ; <%f1> [#uses=1]
- store %f1 %R, %f1* %S
- ret void
-}
-
-define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
- %p = load %f2* %P ; <%f2> [#uses=1]
- %q = load %f2* %Q ; <%f2> [#uses=1]
- %R = fadd %f2 %p, %q ; <%f2> [#uses=1]
- store %f2 %R, %f2* %S
- ret void
-}
-
-define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
- %R = fadd %f4 %p, %q ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
- %R = fadd %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
- ret void
-}
-
-define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
- %R = fmul %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
- ret void
-}
-
-define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
- %R = fdiv %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
- ret void
-}
-
-;;; TEST VECTOR CONSTRUCTS
-
-define void @test_cst(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float
- 2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_zero(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_undef(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %R = fadd %f4 %p, undef ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_constant_insert(%f4* %S) {
- %R = insertelement %f4 zeroinitializer, float 1.000000e+01, i32 0
- ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_variable_buildvector(float %F, %f4* %S) {
- %R = insertelement %f4 zeroinitializer, float %F, i32 0
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_scalar_to_vector(float %F, %f4* %S) {
- %R = insertelement %f4 undef, float %F, i32 0
- store %f4 %R, %f4* %S
- ret void
-}
-
-define float @test_extract_elt(%f8* %P) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %R = extractelement %f8 %p, i32 3 ; <float> [#uses=1]
- ret float %R
-}
-
-define double @test_extract_elt2(%d8* %P) {
- %p = load %d8* %P ; <%d8> [#uses=1]
- %R = extractelement %d8 %p, i32 3 ; <double> [#uses=1]
- ret double %R
-}
-
-define void @test_cast_1(%f4* %b, %i4* %a) {
- %tmp = load %f4* %b ; <%f4> [#uses=1]
- %tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float
-3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1]
- %tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1]
- %tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 >
- store %i4 %tmp4, %i4* %a
- ret void
-}
-
-define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
- %T = load %f8* %a ; <%f8> [#uses=1]
- %T2 = bitcast %f8 %T to <8 x i32>
- store <8 x i32> %T2, <8 x i32>* %b
- ret void
-}
-
-
-;;; TEST IMPORTANT IDIOMS
-
-define void @splat(%f4* %P, %f4* %Q, float %X) {
- %tmp = insertelement %f4 undef, float %X, i32 0
- %tmp2 = insertelement %f4 %tmp, float %X, i32 1
- %tmp4 = insertelement %f4 %tmp2, float %X, i32 2
- %tmp6 = insertelement %f4 %tmp4, float %X, i32 3
- %q = load %f4* %Q ; <%f4> [#uses=1]
- %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %P
- ret void
-}
-
-define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) {
- %tmp = insertelement %i4 undef, i32 %X, i32 0
- %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1
- %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2
- %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3
- %q = load %i4* %Q ; <%i4> [#uses=1]
- %R = add %i4 %q, %tmp6 ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2007-01-31-RegInfoAssert.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2007-01-31-RegInfoAssert.ll
deleted file mode 100644
index 1e61b23..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2007-01-31-RegInfoAssert.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin
-
-%struct.rtx_def = type { i8 }
- at str = external global [7 x i8]
-
-define void @f1() {
- %D = alloca %struct.rtx_def, align 1
- %tmp1 = bitcast %struct.rtx_def* %D to i32*
- %tmp7 = load i32* %tmp1
- %tmp14 = lshr i32 %tmp7, 1
- %tmp1415 = and i32 %tmp14, 1
- call void (i32, ...)* @printf( i32 undef, i32 0, i32 %tmp1415 )
- ret void
-}
-
-declare void @printf(i32, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2007-02-02-JoinIntervalsCrash.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2007-02-02-JoinIntervalsCrash.ll
deleted file mode 100644
index be2b839..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2007-02-02-JoinIntervalsCrash.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin
-
- %struct.color_sample = type { i32 }
- %struct.ref = type { %struct.color_sample, i16, i16 }
-
-define void @zcvrs() {
- br i1 false, label %bb22, label %UnifiedReturnBlock
-
-bb22:
- br i1 false, label %bb64, label %UnifiedReturnBlock
-
-bb64:
- %tmp67 = urem i32 0, 0
- %tmp69 = icmp slt i32 %tmp67, 10
- %iftmp.13.0 = select i1 %tmp69, i8 48, i8 55
- %tmp75 = add i8 %iftmp.13.0, 0
- store i8 %tmp75, i8* null
- %tmp81 = udiv i32 0, 0
- %tmp83 = icmp eq i32 %tmp81, 0
- br i1 %tmp83, label %bb85, label %bb64
-
-bb85:
- ret void
-
-UnifiedReturnBlock:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2007-03-06-AddR7.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2007-03-06-AddR7.ll
deleted file mode 100644
index 8d139e9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2007-03-06-AddR7.ll
+++ /dev/null
@@ -1,117 +0,0 @@
-; RUN: llc < %s -march=thumb
-; RUN: llc < %s -mtriple=thumb-apple-darwin -relocation-model=pic \
-; RUN: -mattr=+v6,+vfp2 | not grep {add r., r7, #2 \\* 4}
-
- %struct.__fooAllocator = type opaque
- %struct.__fooY = type { %struct.fooXBase, %struct.__fooString*, %struct.__fooU*, %struct.__fooV*, i8** }
- %struct.__fooZ = type opaque
- %struct.__fooU = type opaque
- %struct.__fooString = type opaque
- %struct.__fooV = type opaque
- %struct.fooXBase = type { i32, [4 x i8] }
- %struct.fooXClass = type { i32, i8*, void (i8*)*, i8* (%struct.__fooAllocator*, i8*)*, void (i8*)*, i8 (i8*, i8*) zeroext *, i32 (i8*)*, %struct.__fooString* (i8*, %struct.__fooZ*)*, %struct.__fooString* (i8*)* }
- %struct.aa_cache = type { i32, i32, [1 x %struct.aa_method*] }
- %struct.aa_class = type { %struct.aa_class*, %struct.aa_class*, i8*, i32, i32, i32, %struct.aa_ivar_list*, %struct.aa_method_list**, %struct.aa_cache*, %struct.aa_protocol_list* }
- %struct.aa_ivar = type { i8*, i8*, i32 }
- %struct.aa_ivar_list = type { i32, [1 x %struct.aa_ivar] }
- %struct.aa_method = type { %struct.aa_ss*, i8*, %struct.aa_object* (%struct.aa_object*, %struct.aa_ss*, ...)* }
- %struct.aa_method_list = type { %struct.aa_method_list*, i32, [1 x %struct.aa_method] }
- %struct.aa_object = type { %struct.aa_class* }
- %struct.aa_protocol_list = type { %struct.aa_protocol_list*, i32, [1 x %struct.aa_object*] }
- %struct.aa_ss = type opaque
- at __kfooYTypeID = external global i32 ; <i32*> [#uses=3]
- at __fooYClass = external constant %struct.fooXClass ; <%struct.fooXClass*> [#uses=1]
- at __fooXClassTableSize = external global i32 ; <i32*> [#uses=1]
- at __fooXAaClassTable = external global i32* ; <i32**> [#uses=1]
- at s.10319 = external global %struct.aa_ss* ; <%struct.aa_ss**> [#uses=2]
- at str15 = external constant [24 x i8] ; <[24 x i8]*> [#uses=1]
-
-
-define i8 @test(%struct.__fooY* %calendar, double* %atp, i8* %componentDesc, ...) zeroext {
-entry:
- %args = alloca i8*, align 4 ; <i8**> [#uses=5]
- %args4 = bitcast i8** %args to i8* ; <i8*> [#uses=2]
- call void @llvm.va_start( i8* %args4 )
- %tmp6 = load i32* @__kfooYTypeID ; <i32> [#uses=1]
- icmp eq i32 %tmp6, 0 ; <i1>:0 [#uses=1]
- br i1 %0, label %cond_true, label %cond_next
-
-cond_true: ; preds = %entry
- %tmp7 = call i32 @_fooXRegisterClass( %struct.fooXClass* @__fooYClass ) ; <i32> [#uses=1]
- store i32 %tmp7, i32* @__kfooYTypeID
- br label %cond_next
-
-cond_next: ; preds = %cond_true, %entry
- %tmp8 = load i32* @__kfooYTypeID ; <i32> [#uses=2]
- %tmp15 = load i32* @__fooXClassTableSize ; <i32> [#uses=1]
- icmp ugt i32 %tmp15, %tmp8 ; <i1>:1 [#uses=1]
- br i1 %1, label %cond_next18, label %cond_true58
-
-cond_next18: ; preds = %cond_next
- %tmp21 = getelementptr %struct.__fooY* %calendar, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp22 = load i32* %tmp21 ; <i32> [#uses=2]
- %tmp29 = load i32** @__fooXAaClassTable ; <i32*> [#uses=1]
- %tmp31 = getelementptr i32* %tmp29, i32 %tmp8 ; <i32*> [#uses=1]
- %tmp32 = load i32* %tmp31 ; <i32> [#uses=1]
- icmp eq i32 %tmp22, %tmp32 ; <i1>:2 [#uses=1]
- %.not = xor i1 %2, true ; <i1> [#uses=1]
- icmp ugt i32 %tmp22, 4095 ; <i1>:3 [#uses=1]
- %bothcond = and i1 %.not, %3 ; <i1> [#uses=1]
- br i1 %bothcond, label %cond_true58, label %bb48
-
-bb48: ; preds = %cond_next18
- %tmp78 = call i32 @strlen( i8* %componentDesc ) ; <i32> [#uses=4]
- %tmp92 = alloca i32, i32 %tmp78 ; <i32*> [#uses=2]
- icmp sgt i32 %tmp78, 0 ; <i1>:4 [#uses=1]
- br i1 %4, label %cond_true111, label %bb114
-
-cond_true58: ; preds = %cond_next18, %cond_next
- %tmp59 = load %struct.aa_ss** @s.10319 ; <%struct.aa_ss*> [#uses=2]
- icmp eq %struct.aa_ss* %tmp59, null ; <i1>:5 [#uses=1]
- %tmp6869 = bitcast %struct.__fooY* %calendar to i8* ; <i8*> [#uses=2]
- br i1 %5, label %cond_true60, label %cond_next64
-
-cond_true60: ; preds = %cond_true58
- %tmp63 = call %struct.aa_ss* @sel_registerName( i8* getelementptr ([24 x i8]* @str15, i32 0, i32 0) ) ; <%struct.aa_ss*> [#uses=2]
- store %struct.aa_ss* %tmp63, %struct.aa_ss** @s.10319
- %tmp66137 = volatile load i8** %args ; <i8*> [#uses=1]
- %tmp73138 = call i8 (i8*, %struct.aa_ss*, ...) zeroext * bitcast (%struct.aa_object* (%struct.aa_object*, %struct.aa_ss*, ...)* @aa_mm to i8 (i8*, %struct.aa_ss*, ...) zeroext *)( i8* %tmp6869, %struct.aa_ss* %tmp63, double* %atp, i8* %componentDesc, i8* %tmp66137) zeroext ; <i8> [#uses=1]
- ret i8 %tmp73138
-
-cond_next64: ; preds = %cond_true58
- %tmp66 = volatile load i8** %args ; <i8*> [#uses=1]
- %tmp73 = call i8 (i8*, %struct.aa_ss*, ...) zeroext * bitcast (%struct.aa_object* (%struct.aa_object*, %struct.aa_ss*, ...)* @aa_mm to i8 (i8*, %struct.aa_ss*, ...) zeroext *)( i8* %tmp6869, %struct.aa_ss* %tmp59, double* %atp, i8* %componentDesc, i8* %tmp66 ) zeroext ; <i8> [#uses=1]
- ret i8 %tmp73
-
-cond_true111: ; preds = %cond_true111, %bb48
- %idx.2132.0 = phi i32 [ 0, %bb48 ], [ %indvar.next, %cond_true111 ] ; <i32> [#uses=2]
- %tmp95 = volatile load i8** %args ; <i8*> [#uses=2]
- %tmp97 = getelementptr i8* %tmp95, i32 4 ; <i8*> [#uses=1]
- volatile store i8* %tmp97, i8** %args
- %tmp9899 = bitcast i8* %tmp95 to i32* ; <i32*> [#uses=1]
- %tmp100 = load i32* %tmp9899 ; <i32> [#uses=1]
- %tmp104 = getelementptr i32* %tmp92, i32 %idx.2132.0 ; <i32*> [#uses=1]
- store i32 %tmp100, i32* %tmp104
- %indvar.next = add i32 %idx.2132.0, 1 ; <i32> [#uses=2]
- icmp eq i32 %indvar.next, %tmp78 ; <i1>:6 [#uses=1]
- br i1 %6, label %bb114, label %cond_true111
-
-bb114: ; preds = %cond_true111, %bb48
- call void @llvm.va_end( i8* %args4 )
- %tmp122 = call i8 @_fooYCCV( %struct.__fooY* %calendar, double* %atp, i8* %componentDesc, i32* %tmp92, i32 %tmp78 ) zeroext ; <i8> [#uses=1]
- ret i8 %tmp122
-}
-
-declare i32 @_fooXRegisterClass(%struct.fooXClass*)
-
-declare i8 @_fooYCCV(%struct.__fooY*, double*, i8*, i32*, i32) zeroext
-
-declare %struct.aa_object* @aa_mm(%struct.aa_object*, %struct.aa_ss*, ...)
-
-declare %struct.aa_ss* @sel_registerName(i8*)
-
-declare void @llvm.va_start(i8*)
-
-declare i32 @strlen(i8*)
-
-declare void @llvm.va_end(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2007-05-05-InvalidPushPop.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2007-05-05-InvalidPushPop.ll
deleted file mode 100644
index 2074bfd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2007-05-05-InvalidPushPop.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s | not grep r11
-
-target triple = "thumb-linux-gnueabi"
- %struct.__sched_param = type { i32 }
- %struct.pthread_attr_t = type { i32, i32, %struct.__sched_param, i32, i32, i32, i32, i8*, i32 }
- at i.1882 = internal global i32 1 ; <i32*> [#uses=2]
- at .str = internal constant [14 x i8] c"Thread 1: %d\0A\00" ; <[14 x i8]*> [#uses=1]
- at .str1 = internal constant [14 x i8] c"Thread 2: %d\0A\00" ; <[14 x i8]*> [#uses=1]
-
-define i8* @f(i8* %a) {
-entry:
- %tmp1 = load i32* @i.1882 ; <i32> [#uses=1]
- %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=2]
- store i32 %tmp2, i32* @i.1882
- %tmp34 = inttoptr i32 %tmp2 to i8* ; <i8*> [#uses=1]
- ret i8* %tmp34
-}
-
-define i32 @main() {
-entry:
- %t = alloca i32, align 4 ; <i32*> [#uses=4]
- %ret = alloca i32, align 4 ; <i32*> [#uses=3]
- %tmp1 = call i32 @pthread_create( i32* %t, %struct.pthread_attr_t* null, i8* (i8*)* @f, i8* null ) ; <i32> [#uses=0]
- %tmp2 = load i32* %t ; <i32> [#uses=1]
- %ret3 = bitcast i32* %ret to i8** ; <i8**> [#uses=2]
- %tmp4 = call i32 @pthread_join( i32 %tmp2, i8** %ret3 ) ; <i32> [#uses=0]
- %tmp5 = load i32* %ret ; <i32> [#uses=1]
- %tmp7 = call i32 (i8*, ...)* @printf( i8* getelementptr ([14 x i8]* @.str, i32 0, i32 0), i32 %tmp5 ) ; <i32> [#uses=0]
- %tmp8 = call i32 @pthread_create( i32* %t, %struct.pthread_attr_t* null, i8* (i8*)* @f, i8* null ) ; <i32> [#uses=0]
- %tmp9 = load i32* %t ; <i32> [#uses=1]
- %tmp11 = call i32 @pthread_join( i32 %tmp9, i8** %ret3 ) ; <i32> [#uses=0]
- %tmp12 = load i32* %ret ; <i32> [#uses=1]
- %tmp14 = call i32 (i8*, ...)* @printf( i8* getelementptr ([14 x i8]* @.str1, i32 0, i32 0), i32 %tmp12 ) ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i32 @pthread_create(i32*, %struct.pthread_attr_t*, i8* (i8*)*, i8*)
-
-declare i32 @pthread_join(i32, i8**)
-
-declare i32 @printf(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-06-18-ThumbCommuteMul.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2009-06-18-ThumbCommuteMul.ll
deleted file mode 100644
index 5c883b3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-06-18-ThumbCommuteMul.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb | grep r0 | count 1
-
-define i32 @a(i32 %x, i32 %y) nounwind readnone {
-entry:
- %mul = mul i32 %y, %x ; <i32> [#uses=1]
- ret i32 %mul
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-07-19-SPDecBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2009-07-19-SPDecBug.ll
deleted file mode 100644
index 471a82f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-07-19-SPDecBug.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv6-elf | not grep "subs sp"
-; PR4567
-
-define arm_apcscc i8* @__gets_chk(i8* %s, i32 %slen) nounwind {
-entry:
- br i1 undef, label %bb, label %bb1
-
-bb: ; preds = %entry
- ret i8* undef
-
-bb1: ; preds = %entry
- br i1 undef, label %bb3, label %bb2
-
-bb2: ; preds = %bb1
- %0 = alloca i8, i32 undef, align 4 ; <i8*> [#uses=0]
- br label %bb4
-
-bb3: ; preds = %bb1
- %1 = malloc i8, i32 undef ; <i8*> [#uses=0]
- br label %bb4
-
-bb4: ; preds = %bb3, %bb2
- br i1 undef, label %bb5, label %bb6
-
-bb5: ; preds = %bb4
- %2 = call arm_apcscc i8* @gets(i8* %s) nounwind ; <i8*> [#uses=1]
- ret i8* %2
-
-bb6: ; preds = %bb4
- unreachable
-}
-
-declare arm_apcscc i8* @gets(i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll
deleted file mode 100644
index 6e035d0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv6-apple-darwin10
-
- at Time.2535 = external global i64 ; <i64*> [#uses=2]
-
-define arm_apcscc i64 @millisecs() nounwind {
-entry:
- %0 = load i64* @Time.2535, align 4 ; <i64> [#uses=2]
- %1 = add i64 %0, 1 ; <i64> [#uses=1]
- store i64 %1, i64* @Time.2535, align 4
- ret i64 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll
deleted file mode 100644
index f195348..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv6-apple-darwin -relocation-model=pic -disable-fp-elim
-
- %struct.LinkList = type { i32, %struct.LinkList* }
- %struct.List = type { i32, i32* }
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 ()* @main to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define arm_apcscc i32 @main() nounwind {
-entry:
- %ll = alloca %struct.LinkList*, align 4 ; <%struct.LinkList**> [#uses=1]
- %0 = call arm_apcscc i32 @ReadList(%struct.LinkList** %ll, %struct.List** null) nounwind ; <i32> [#uses=1]
- switch i32 %0, label %bb5 [
- i32 7, label %bb4
- i32 42, label %bb3
- ]
-
-bb3: ; preds = %entry
- ret i32 1
-
-bb4: ; preds = %entry
- ret i32 0
-
-bb5: ; preds = %entry
- ret i32 1
-}
-
-declare arm_apcscc i32 @ReadList(%struct.LinkList** nocapture, %struct.List** nocapture) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll
deleted file mode 100644
index ef4b5ce..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll
+++ /dev/null
@@ -1,737 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv6-apple-darwin
-
- %struct.BF_KEY = type { [18 x i32], [1024 x i32] }
-
-define arm_apcscc void @BF_encrypt(i32* nocapture %data, %struct.BF_KEY* nocapture %key, i32 %encrypt) nounwind {
-entry:
- %0 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 0; <i32*> [#uses=2]
- %1 = load i32* %data, align 4 ; <i32> [#uses=2]
- %2 = load i32* undef, align 4 ; <i32> [#uses=2]
- br i1 undef, label %bb1, label %bb
-
-bb: ; preds = %entry
- %3 = load i32* %0, align 4 ; <i32> [#uses=1]
- %4 = xor i32 %3, %1 ; <i32> [#uses=4]
- %5 = load i32* null, align 4 ; <i32> [#uses=1]
- %6 = lshr i32 %4, 24 ; <i32> [#uses=1]
- %7 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %6; <i32*> [#uses=1]
- %8 = load i32* %7, align 4 ; <i32> [#uses=1]
- %9 = lshr i32 %4, 16 ; <i32> [#uses=1]
- %10 = or i32 %9, 256 ; <i32> [#uses=1]
- %11 = and i32 %10, 511 ; <i32> [#uses=1]
- %12 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %11; <i32*> [#uses=1]
- %13 = load i32* %12, align 4 ; <i32> [#uses=1]
- %14 = add i32 %13, %8 ; <i32> [#uses=1]
- %15 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 undef; <i32*> [#uses=1]
- %16 = load i32* %15, align 4 ; <i32> [#uses=1]
- %17 = xor i32 %14, %16 ; <i32> [#uses=1]
- %18 = or i32 %4, 768 ; <i32> [#uses=1]
- %19 = and i32 %18, 1023 ; <i32> [#uses=1]
- %20 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %19; <i32*> [#uses=1]
- %21 = load i32* %20, align 4 ; <i32> [#uses=1]
- %22 = add i32 %17, %21 ; <i32> [#uses=1]
- %23 = xor i32 %5, %2 ; <i32> [#uses=1]
- %24 = xor i32 %23, %22 ; <i32> [#uses=5]
- %25 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 2; <i32*> [#uses=1]
- %26 = load i32* %25, align 4 ; <i32> [#uses=1]
- %27 = lshr i32 %24, 24 ; <i32> [#uses=1]
- %28 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %27; <i32*> [#uses=1]
- %29 = load i32* %28, align 4 ; <i32> [#uses=1]
- %30 = lshr i32 %24, 16 ; <i32> [#uses=1]
- %31 = or i32 %30, 256 ; <i32> [#uses=1]
- %32 = and i32 %31, 511 ; <i32> [#uses=1]
- %33 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %32; <i32*> [#uses=1]
- %34 = load i32* %33, align 4 ; <i32> [#uses=1]
- %35 = add i32 %34, %29 ; <i32> [#uses=1]
- %36 = lshr i32 %24, 8 ; <i32> [#uses=1]
- %37 = or i32 %36, 512 ; <i32> [#uses=1]
- %38 = and i32 %37, 767 ; <i32> [#uses=1]
- %39 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %38; <i32*> [#uses=1]
- %40 = load i32* %39, align 4 ; <i32> [#uses=1]
- %41 = xor i32 %35, %40 ; <i32> [#uses=1]
- %42 = or i32 %24, 768 ; <i32> [#uses=1]
- %43 = and i32 %42, 1023 ; <i32> [#uses=1]
- %44 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %43; <i32*> [#uses=1]
- %45 = load i32* %44, align 4 ; <i32> [#uses=1]
- %46 = add i32 %41, %45 ; <i32> [#uses=1]
- %47 = xor i32 %26, %4 ; <i32> [#uses=1]
- %48 = xor i32 %47, %46 ; <i32> [#uses=5]
- %49 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 3; <i32*> [#uses=1]
- %50 = load i32* %49, align 4 ; <i32> [#uses=1]
- %51 = lshr i32 %48, 24 ; <i32> [#uses=1]
- %52 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %51; <i32*> [#uses=1]
- %53 = load i32* %52, align 4 ; <i32> [#uses=1]
- %54 = lshr i32 %48, 16 ; <i32> [#uses=1]
- %55 = or i32 %54, 256 ; <i32> [#uses=1]
- %56 = and i32 %55, 511 ; <i32> [#uses=1]
- %57 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %56; <i32*> [#uses=1]
- %58 = load i32* %57, align 4 ; <i32> [#uses=1]
- %59 = add i32 %58, %53 ; <i32> [#uses=1]
- %60 = lshr i32 %48, 8 ; <i32> [#uses=1]
- %61 = or i32 %60, 512 ; <i32> [#uses=1]
- %62 = and i32 %61, 767 ; <i32> [#uses=1]
- %63 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %62; <i32*> [#uses=1]
- %64 = load i32* %63, align 4 ; <i32> [#uses=1]
- %65 = xor i32 %59, %64 ; <i32> [#uses=1]
- %66 = or i32 %48, 768 ; <i32> [#uses=1]
- %67 = and i32 %66, 1023 ; <i32> [#uses=1]
- %68 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %67; <i32*> [#uses=1]
- %69 = load i32* %68, align 4 ; <i32> [#uses=1]
- %70 = add i32 %65, %69 ; <i32> [#uses=1]
- %71 = xor i32 %50, %24 ; <i32> [#uses=1]
- %72 = xor i32 %71, %70 ; <i32> [#uses=5]
- %73 = load i32* null, align 4 ; <i32> [#uses=1]
- %74 = lshr i32 %72, 24 ; <i32> [#uses=1]
- %75 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %74; <i32*> [#uses=1]
- %76 = load i32* %75, align 4 ; <i32> [#uses=1]
- %77 = lshr i32 %72, 16 ; <i32> [#uses=1]
- %78 = or i32 %77, 256 ; <i32> [#uses=1]
- %79 = and i32 %78, 511 ; <i32> [#uses=1]
- %80 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %79; <i32*> [#uses=1]
- %81 = load i32* %80, align 4 ; <i32> [#uses=1]
- %82 = add i32 %81, %76 ; <i32> [#uses=1]
- %83 = lshr i32 %72, 8 ; <i32> [#uses=1]
- %84 = or i32 %83, 512 ; <i32> [#uses=1]
- %85 = and i32 %84, 767 ; <i32> [#uses=1]
- %86 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %85; <i32*> [#uses=1]
- %87 = load i32* %86, align 4 ; <i32> [#uses=1]
- %88 = xor i32 %82, %87 ; <i32> [#uses=1]
- %89 = or i32 %72, 768 ; <i32> [#uses=1]
- %90 = and i32 %89, 1023 ; <i32> [#uses=1]
- %91 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %90; <i32*> [#uses=1]
- %92 = load i32* %91, align 4 ; <i32> [#uses=1]
- %93 = add i32 %88, %92 ; <i32> [#uses=1]
- %94 = xor i32 %73, %48 ; <i32> [#uses=1]
- %95 = xor i32 %94, %93 ; <i32> [#uses=5]
- %96 = load i32* undef, align 4 ; <i32> [#uses=1]
- %97 = lshr i32 %95, 24 ; <i32> [#uses=1]
- %98 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %97; <i32*> [#uses=1]
- %99 = load i32* %98, align 4 ; <i32> [#uses=1]
- %100 = lshr i32 %95, 16 ; <i32> [#uses=1]
- %101 = or i32 %100, 256 ; <i32> [#uses=1]
- %102 = and i32 %101, 511 ; <i32> [#uses=1]
- %103 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %102; <i32*> [#uses=1]
- %104 = load i32* %103, align 4 ; <i32> [#uses=1]
- %105 = add i32 %104, %99 ; <i32> [#uses=1]
- %106 = lshr i32 %95, 8 ; <i32> [#uses=1]
- %107 = or i32 %106, 512 ; <i32> [#uses=1]
- %108 = and i32 %107, 767 ; <i32> [#uses=1]
- %109 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %108; <i32*> [#uses=1]
- %110 = load i32* %109, align 4 ; <i32> [#uses=1]
- %111 = xor i32 %105, %110 ; <i32> [#uses=1]
- %112 = or i32 %95, 768 ; <i32> [#uses=1]
- %113 = and i32 %112, 1023 ; <i32> [#uses=1]
- %114 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %113; <i32*> [#uses=1]
- %115 = load i32* %114, align 4 ; <i32> [#uses=1]
- %116 = add i32 %111, %115 ; <i32> [#uses=1]
- %117 = xor i32 %96, %72 ; <i32> [#uses=1]
- %118 = xor i32 %117, %116 ; <i32> [#uses=5]
- %119 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 6; <i32*> [#uses=1]
- %120 = load i32* %119, align 4 ; <i32> [#uses=1]
- %121 = lshr i32 %118, 24 ; <i32> [#uses=1]
- %122 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %121; <i32*> [#uses=1]
- %123 = load i32* %122, align 4 ; <i32> [#uses=1]
- %124 = lshr i32 %118, 16 ; <i32> [#uses=1]
- %125 = or i32 %124, 256 ; <i32> [#uses=1]
- %126 = and i32 %125, 511 ; <i32> [#uses=1]
- %127 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %126; <i32*> [#uses=1]
- %128 = load i32* %127, align 4 ; <i32> [#uses=1]
- %129 = add i32 %128, %123 ; <i32> [#uses=1]
- %130 = lshr i32 %118, 8 ; <i32> [#uses=1]
- %131 = or i32 %130, 512 ; <i32> [#uses=1]
- %132 = and i32 %131, 767 ; <i32> [#uses=1]
- %133 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %132; <i32*> [#uses=1]
- %134 = load i32* %133, align 4 ; <i32> [#uses=1]
- %135 = xor i32 %129, %134 ; <i32> [#uses=1]
- %136 = or i32 %118, 768 ; <i32> [#uses=1]
- %137 = and i32 %136, 1023 ; <i32> [#uses=1]
- %138 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %137; <i32*> [#uses=1]
- %139 = load i32* %138, align 4 ; <i32> [#uses=1]
- %140 = add i32 %135, %139 ; <i32> [#uses=1]
- %141 = xor i32 %120, %95 ; <i32> [#uses=1]
- %142 = xor i32 %141, %140 ; <i32> [#uses=5]
- %143 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 7; <i32*> [#uses=1]
- %144 = load i32* %143, align 4 ; <i32> [#uses=1]
- %145 = lshr i32 %142, 24 ; <i32> [#uses=1]
- %146 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %145; <i32*> [#uses=1]
- %147 = load i32* %146, align 4 ; <i32> [#uses=1]
- %148 = lshr i32 %142, 16 ; <i32> [#uses=1]
- %149 = or i32 %148, 256 ; <i32> [#uses=1]
- %150 = and i32 %149, 511 ; <i32> [#uses=1]
- %151 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %150; <i32*> [#uses=1]
- %152 = load i32* %151, align 4 ; <i32> [#uses=1]
- %153 = add i32 %152, %147 ; <i32> [#uses=1]
- %154 = lshr i32 %142, 8 ; <i32> [#uses=1]
- %155 = or i32 %154, 512 ; <i32> [#uses=1]
- %156 = and i32 %155, 767 ; <i32> [#uses=1]
- %157 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %156; <i32*> [#uses=1]
- %158 = load i32* %157, align 4 ; <i32> [#uses=1]
- %159 = xor i32 %153, %158 ; <i32> [#uses=1]
- %160 = or i32 %142, 768 ; <i32> [#uses=1]
- %161 = and i32 %160, 1023 ; <i32> [#uses=1]
- %162 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %161; <i32*> [#uses=1]
- %163 = load i32* %162, align 4 ; <i32> [#uses=1]
- %164 = add i32 %159, %163 ; <i32> [#uses=1]
- %165 = xor i32 %144, %118 ; <i32> [#uses=1]
- %166 = xor i32 %165, %164 ; <i32> [#uses=5]
- %167 = load i32* undef, align 4 ; <i32> [#uses=1]
- %168 = lshr i32 %166, 24 ; <i32> [#uses=1]
- %169 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %168; <i32*> [#uses=1]
- %170 = load i32* %169, align 4 ; <i32> [#uses=1]
- %171 = lshr i32 %166, 16 ; <i32> [#uses=1]
- %172 = or i32 %171, 256 ; <i32> [#uses=1]
- %173 = and i32 %172, 511 ; <i32> [#uses=1]
- %174 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %173; <i32*> [#uses=1]
- %175 = load i32* %174, align 4 ; <i32> [#uses=1]
- %176 = add i32 %175, %170 ; <i32> [#uses=1]
- %177 = lshr i32 %166, 8 ; <i32> [#uses=1]
- %178 = or i32 %177, 512 ; <i32> [#uses=1]
- %179 = and i32 %178, 767 ; <i32> [#uses=1]
- %180 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %179; <i32*> [#uses=1]
- %181 = load i32* %180, align 4 ; <i32> [#uses=1]
- %182 = xor i32 %176, %181 ; <i32> [#uses=1]
- %183 = or i32 %166, 768 ; <i32> [#uses=1]
- %184 = and i32 %183, 1023 ; <i32> [#uses=1]
- %185 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %184; <i32*> [#uses=1]
- %186 = load i32* %185, align 4 ; <i32> [#uses=1]
- %187 = add i32 %182, %186 ; <i32> [#uses=1]
- %188 = xor i32 %167, %142 ; <i32> [#uses=1]
- %189 = xor i32 %188, %187 ; <i32> [#uses=5]
- %190 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 9; <i32*> [#uses=1]
- %191 = load i32* %190, align 4 ; <i32> [#uses=1]
- %192 = lshr i32 %189, 24 ; <i32> [#uses=1]
- %193 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %192; <i32*> [#uses=1]
- %194 = load i32* %193, align 4 ; <i32> [#uses=1]
- %195 = lshr i32 %189, 16 ; <i32> [#uses=1]
- %196 = or i32 %195, 256 ; <i32> [#uses=1]
- %197 = and i32 %196, 511 ; <i32> [#uses=1]
- %198 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %197; <i32*> [#uses=1]
- %199 = load i32* %198, align 4 ; <i32> [#uses=1]
- %200 = add i32 %199, %194 ; <i32> [#uses=1]
- %201 = lshr i32 %189, 8 ; <i32> [#uses=1]
- %202 = or i32 %201, 512 ; <i32> [#uses=1]
- %203 = and i32 %202, 767 ; <i32> [#uses=1]
- %204 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %203; <i32*> [#uses=1]
- %205 = load i32* %204, align 4 ; <i32> [#uses=1]
- %206 = xor i32 %200, %205 ; <i32> [#uses=1]
- %207 = or i32 %189, 768 ; <i32> [#uses=1]
- %208 = and i32 %207, 1023 ; <i32> [#uses=1]
- %209 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %208; <i32*> [#uses=1]
- %210 = load i32* %209, align 4 ; <i32> [#uses=1]
- %211 = add i32 %206, %210 ; <i32> [#uses=1]
- %212 = xor i32 %191, %166 ; <i32> [#uses=1]
- %213 = xor i32 %212, %211 ; <i32> [#uses=5]
- %214 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 10; <i32*> [#uses=1]
- %215 = load i32* %214, align 4 ; <i32> [#uses=1]
- %216 = lshr i32 %213, 24 ; <i32> [#uses=1]
- %217 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %216; <i32*> [#uses=1]
- %218 = load i32* %217, align 4 ; <i32> [#uses=1]
- %219 = lshr i32 %213, 16 ; <i32> [#uses=1]
- %220 = or i32 %219, 256 ; <i32> [#uses=1]
- %221 = and i32 %220, 511 ; <i32> [#uses=1]
- %222 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %221; <i32*> [#uses=1]
- %223 = load i32* %222, align 4 ; <i32> [#uses=1]
- %224 = add i32 %223, %218 ; <i32> [#uses=1]
- %225 = lshr i32 %213, 8 ; <i32> [#uses=1]
- %226 = or i32 %225, 512 ; <i32> [#uses=1]
- %227 = and i32 %226, 767 ; <i32> [#uses=1]
- %228 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %227; <i32*> [#uses=1]
- %229 = load i32* %228, align 4 ; <i32> [#uses=1]
- %230 = xor i32 %224, %229 ; <i32> [#uses=1]
- %231 = or i32 %213, 768 ; <i32> [#uses=1]
- %232 = and i32 %231, 1023 ; <i32> [#uses=1]
- %233 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %232; <i32*> [#uses=1]
- %234 = load i32* %233, align 4 ; <i32> [#uses=1]
- %235 = add i32 %230, %234 ; <i32> [#uses=1]
- %236 = xor i32 %215, %189 ; <i32> [#uses=1]
- %237 = xor i32 %236, %235 ; <i32> [#uses=5]
- %238 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 11; <i32*> [#uses=1]
- %239 = load i32* %238, align 4 ; <i32> [#uses=1]
- %240 = lshr i32 %237, 24 ; <i32> [#uses=1]
- %241 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %240; <i32*> [#uses=1]
- %242 = load i32* %241, align 4 ; <i32> [#uses=1]
- %243 = lshr i32 %237, 16 ; <i32> [#uses=1]
- %244 = or i32 %243, 256 ; <i32> [#uses=1]
- %245 = and i32 %244, 511 ; <i32> [#uses=1]
- %246 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %245; <i32*> [#uses=1]
- %247 = load i32* %246, align 4 ; <i32> [#uses=1]
- %248 = add i32 %247, %242 ; <i32> [#uses=1]
- %249 = lshr i32 %237, 8 ; <i32> [#uses=1]
- %250 = or i32 %249, 512 ; <i32> [#uses=1]
- %251 = and i32 %250, 767 ; <i32> [#uses=1]
- %252 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %251; <i32*> [#uses=1]
- %253 = load i32* %252, align 4 ; <i32> [#uses=1]
- %254 = xor i32 %248, %253 ; <i32> [#uses=1]
- %255 = or i32 %237, 768 ; <i32> [#uses=1]
- %256 = and i32 %255, 1023 ; <i32> [#uses=1]
- %257 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %256; <i32*> [#uses=1]
- %258 = load i32* %257, align 4 ; <i32> [#uses=1]
- %259 = add i32 %254, %258 ; <i32> [#uses=1]
- %260 = xor i32 %239, %213 ; <i32> [#uses=1]
- %261 = xor i32 %260, %259 ; <i32> [#uses=5]
- %262 = load i32* undef, align 4 ; <i32> [#uses=1]
- %263 = lshr i32 %261, 24 ; <i32> [#uses=1]
- %264 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %263; <i32*> [#uses=1]
- %265 = load i32* %264, align 4 ; <i32> [#uses=1]
- %266 = lshr i32 %261, 16 ; <i32> [#uses=1]
- %267 = or i32 %266, 256 ; <i32> [#uses=1]
- %268 = and i32 %267, 511 ; <i32> [#uses=1]
- %269 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %268; <i32*> [#uses=1]
- %270 = load i32* %269, align 4 ; <i32> [#uses=1]
- %271 = add i32 %270, %265 ; <i32> [#uses=1]
- %272 = lshr i32 %261, 8 ; <i32> [#uses=1]
- %273 = or i32 %272, 512 ; <i32> [#uses=1]
- %274 = and i32 %273, 767 ; <i32> [#uses=1]
- %275 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %274; <i32*> [#uses=1]
- %276 = load i32* %275, align 4 ; <i32> [#uses=1]
- %277 = xor i32 %271, %276 ; <i32> [#uses=1]
- %278 = or i32 %261, 768 ; <i32> [#uses=1]
- %279 = and i32 %278, 1023 ; <i32> [#uses=1]
- %280 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %279; <i32*> [#uses=1]
- %281 = load i32* %280, align 4 ; <i32> [#uses=1]
- %282 = add i32 %277, %281 ; <i32> [#uses=1]
- %283 = xor i32 %262, %237 ; <i32> [#uses=1]
- %284 = xor i32 %283, %282 ; <i32> [#uses=4]
- %285 = load i32* null, align 4 ; <i32> [#uses=1]
- %286 = lshr i32 %284, 24 ; <i32> [#uses=1]
- %287 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %286; <i32*> [#uses=1]
- %288 = load i32* %287, align 4 ; <i32> [#uses=1]
- %289 = lshr i32 %284, 16 ; <i32> [#uses=1]
- %290 = or i32 %289, 256 ; <i32> [#uses=1]
- %291 = and i32 %290, 511 ; <i32> [#uses=1]
- %292 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %291; <i32*> [#uses=1]
- %293 = load i32* %292, align 4 ; <i32> [#uses=1]
- %294 = add i32 %293, %288 ; <i32> [#uses=1]
- %295 = lshr i32 %284, 8 ; <i32> [#uses=1]
- %296 = or i32 %295, 512 ; <i32> [#uses=1]
- %297 = and i32 %296, 767 ; <i32> [#uses=1]
- %298 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %297; <i32*> [#uses=1]
- %299 = load i32* %298, align 4 ; <i32> [#uses=1]
- %300 = xor i32 %294, %299 ; <i32> [#uses=1]
- %301 = or i32 %284, 768 ; <i32> [#uses=1]
- %302 = and i32 %301, 1023 ; <i32> [#uses=1]
- %303 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %302; <i32*> [#uses=1]
- %304 = load i32* %303, align 4 ; <i32> [#uses=1]
- %305 = add i32 %300, %304 ; <i32> [#uses=1]
- %306 = xor i32 %285, %261 ; <i32> [#uses=1]
- %307 = xor i32 %306, %305 ; <i32> [#uses=1]
- %308 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 15; <i32*> [#uses=1]
- %309 = load i32* %308, align 4 ; <i32> [#uses=1]
- %310 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 0; <i32*> [#uses=1]
- %311 = load i32* %310, align 4 ; <i32> [#uses=1]
- %312 = or i32 0, 256 ; <i32> [#uses=1]
- %313 = and i32 %312, 511 ; <i32> [#uses=1]
- %314 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %313; <i32*> [#uses=1]
- %315 = load i32* %314, align 4 ; <i32> [#uses=1]
- %316 = add i32 %315, %311 ; <i32> [#uses=1]
- %317 = or i32 0, 512 ; <i32> [#uses=1]
- %318 = and i32 %317, 767 ; <i32> [#uses=1]
- %319 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %318; <i32*> [#uses=1]
- %320 = load i32* %319, align 4 ; <i32> [#uses=1]
- %321 = xor i32 %316, %320 ; <i32> [#uses=1]
- %322 = or i32 0, 768 ; <i32> [#uses=1]
- %323 = and i32 %322, 1023 ; <i32> [#uses=1]
- %324 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %323; <i32*> [#uses=1]
- %325 = load i32* %324, align 4 ; <i32> [#uses=1]
- %326 = add i32 %321, %325 ; <i32> [#uses=1]
- %327 = xor i32 %309, %307 ; <i32> [#uses=1]
- %328 = xor i32 %327, %326 ; <i32> [#uses=5]
- %329 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 17; <i32*> [#uses=1]
- br label %bb2
-
-bb1: ; preds = %entry
- %330 = load i32* null, align 4 ; <i32> [#uses=1]
- %331 = xor i32 %330, %1 ; <i32> [#uses=4]
- %332 = load i32* null, align 4 ; <i32> [#uses=1]
- %333 = lshr i32 %331, 24 ; <i32> [#uses=1]
- %334 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %333; <i32*> [#uses=1]
- %335 = load i32* %334, align 4 ; <i32> [#uses=1]
- %336 = load i32* null, align 4 ; <i32> [#uses=1]
- %337 = add i32 %336, %335 ; <i32> [#uses=1]
- %338 = lshr i32 %331, 8 ; <i32> [#uses=1]
- %339 = or i32 %338, 512 ; <i32> [#uses=1]
- %340 = and i32 %339, 767 ; <i32> [#uses=1]
- %341 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %340; <i32*> [#uses=1]
- %342 = load i32* %341, align 4 ; <i32> [#uses=1]
- %343 = xor i32 %337, %342 ; <i32> [#uses=1]
- %344 = or i32 %331, 768 ; <i32> [#uses=1]
- %345 = and i32 %344, 1023 ; <i32> [#uses=1]
- %346 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %345; <i32*> [#uses=1]
- %347 = load i32* %346, align 4 ; <i32> [#uses=1]
- %348 = add i32 %343, %347 ; <i32> [#uses=1]
- %349 = xor i32 %332, %2 ; <i32> [#uses=1]
- %350 = xor i32 %349, %348 ; <i32> [#uses=5]
- %351 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 15; <i32*> [#uses=1]
- %352 = load i32* %351, align 4 ; <i32> [#uses=1]
- %353 = lshr i32 %350, 24 ; <i32> [#uses=1]
- %354 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %353; <i32*> [#uses=1]
- %355 = load i32* %354, align 4 ; <i32> [#uses=1]
- %356 = lshr i32 %350, 16 ; <i32> [#uses=1]
- %357 = or i32 %356, 256 ; <i32> [#uses=1]
- %358 = and i32 %357, 511 ; <i32> [#uses=1]
- %359 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %358; <i32*> [#uses=1]
- %360 = load i32* %359, align 4 ; <i32> [#uses=1]
- %361 = add i32 %360, %355 ; <i32> [#uses=1]
- %362 = lshr i32 %350, 8 ; <i32> [#uses=1]
- %363 = or i32 %362, 512 ; <i32> [#uses=1]
- %364 = and i32 %363, 767 ; <i32> [#uses=1]
- %365 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %364; <i32*> [#uses=1]
- %366 = load i32* %365, align 4 ; <i32> [#uses=1]
- %367 = xor i32 %361, %366 ; <i32> [#uses=1]
- %368 = or i32 %350, 768 ; <i32> [#uses=1]
- %369 = and i32 %368, 1023 ; <i32> [#uses=1]
- %370 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %369; <i32*> [#uses=1]
- %371 = load i32* %370, align 4 ; <i32> [#uses=1]
- %372 = add i32 %367, %371 ; <i32> [#uses=1]
- %373 = xor i32 %352, %331 ; <i32> [#uses=1]
- %374 = xor i32 %373, %372 ; <i32> [#uses=5]
- %375 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 14; <i32*> [#uses=1]
- %376 = load i32* %375, align 4 ; <i32> [#uses=1]
- %377 = lshr i32 %374, 24 ; <i32> [#uses=1]
- %378 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %377; <i32*> [#uses=1]
- %379 = load i32* %378, align 4 ; <i32> [#uses=1]
- %380 = lshr i32 %374, 16 ; <i32> [#uses=1]
- %381 = or i32 %380, 256 ; <i32> [#uses=1]
- %382 = and i32 %381, 511 ; <i32> [#uses=1]
- %383 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %382; <i32*> [#uses=1]
- %384 = load i32* %383, align 4 ; <i32> [#uses=1]
- %385 = add i32 %384, %379 ; <i32> [#uses=1]
- %386 = lshr i32 %374, 8 ; <i32> [#uses=1]
- %387 = or i32 %386, 512 ; <i32> [#uses=1]
- %388 = and i32 %387, 767 ; <i32> [#uses=1]
- %389 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %388; <i32*> [#uses=1]
- %390 = load i32* %389, align 4 ; <i32> [#uses=1]
- %391 = xor i32 %385, %390 ; <i32> [#uses=1]
- %392 = or i32 %374, 768 ; <i32> [#uses=1]
- %393 = and i32 %392, 1023 ; <i32> [#uses=1]
- %394 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %393; <i32*> [#uses=1]
- %395 = load i32* %394, align 4 ; <i32> [#uses=1]
- %396 = add i32 %391, %395 ; <i32> [#uses=1]
- %397 = xor i32 %376, %350 ; <i32> [#uses=1]
- %398 = xor i32 %397, %396 ; <i32> [#uses=5]
- %399 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 13; <i32*> [#uses=1]
- %400 = load i32* %399, align 4 ; <i32> [#uses=1]
- %401 = lshr i32 %398, 24 ; <i32> [#uses=1]
- %402 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %401; <i32*> [#uses=1]
- %403 = load i32* %402, align 4 ; <i32> [#uses=1]
- %404 = lshr i32 %398, 16 ; <i32> [#uses=1]
- %405 = or i32 %404, 256 ; <i32> [#uses=1]
- %406 = and i32 %405, 511 ; <i32> [#uses=1]
- %407 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %406; <i32*> [#uses=1]
- %408 = load i32* %407, align 4 ; <i32> [#uses=1]
- %409 = add i32 %408, %403 ; <i32> [#uses=1]
- %410 = lshr i32 %398, 8 ; <i32> [#uses=1]
- %411 = or i32 %410, 512 ; <i32> [#uses=1]
- %412 = and i32 %411, 767 ; <i32> [#uses=1]
- %413 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %412; <i32*> [#uses=1]
- %414 = load i32* %413, align 4 ; <i32> [#uses=1]
- %415 = xor i32 %409, %414 ; <i32> [#uses=1]
- %416 = or i32 %398, 768 ; <i32> [#uses=1]
- %417 = and i32 %416, 1023 ; <i32> [#uses=1]
- %418 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %417; <i32*> [#uses=1]
- %419 = load i32* %418, align 4 ; <i32> [#uses=1]
- %420 = add i32 %415, %419 ; <i32> [#uses=1]
- %421 = xor i32 %400, %374 ; <i32> [#uses=1]
- %422 = xor i32 %421, %420 ; <i32> [#uses=5]
- %423 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 12; <i32*> [#uses=1]
- %424 = load i32* %423, align 4 ; <i32> [#uses=1]
- %425 = lshr i32 %422, 24 ; <i32> [#uses=1]
- %426 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %425; <i32*> [#uses=1]
- %427 = load i32* %426, align 4 ; <i32> [#uses=1]
- %428 = lshr i32 %422, 16 ; <i32> [#uses=1]
- %429 = or i32 %428, 256 ; <i32> [#uses=1]
- %430 = and i32 %429, 511 ; <i32> [#uses=1]
- %431 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %430; <i32*> [#uses=1]
- %432 = load i32* %431, align 4 ; <i32> [#uses=1]
- %433 = add i32 %432, %427 ; <i32> [#uses=1]
- %434 = lshr i32 %422, 8 ; <i32> [#uses=1]
- %435 = or i32 %434, 512 ; <i32> [#uses=1]
- %436 = and i32 %435, 767 ; <i32> [#uses=1]
- %437 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %436; <i32*> [#uses=1]
- %438 = load i32* %437, align 4 ; <i32> [#uses=1]
- %439 = xor i32 %433, %438 ; <i32> [#uses=1]
- %440 = or i32 %422, 768 ; <i32> [#uses=1]
- %441 = and i32 %440, 1023 ; <i32> [#uses=1]
- %442 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %441; <i32*> [#uses=1]
- %443 = load i32* %442, align 4 ; <i32> [#uses=1]
- %444 = add i32 %439, %443 ; <i32> [#uses=1]
- %445 = xor i32 %424, %398 ; <i32> [#uses=1]
- %446 = xor i32 %445, %444 ; <i32> [#uses=5]
- %447 = load i32* undef, align 4 ; <i32> [#uses=1]
- %448 = lshr i32 %446, 24 ; <i32> [#uses=1]
- %449 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %448; <i32*> [#uses=1]
- %450 = load i32* %449, align 4 ; <i32> [#uses=1]
- %451 = lshr i32 %446, 16 ; <i32> [#uses=1]
- %452 = or i32 %451, 256 ; <i32> [#uses=1]
- %453 = and i32 %452, 511 ; <i32> [#uses=1]
- %454 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %453; <i32*> [#uses=1]
- %455 = load i32* %454, align 4 ; <i32> [#uses=1]
- %456 = add i32 %455, %450 ; <i32> [#uses=1]
- %457 = lshr i32 %446, 8 ; <i32> [#uses=1]
- %458 = or i32 %457, 512 ; <i32> [#uses=1]
- %459 = and i32 %458, 767 ; <i32> [#uses=1]
- %460 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %459; <i32*> [#uses=1]
- %461 = load i32* %460, align 4 ; <i32> [#uses=1]
- %462 = xor i32 %456, %461 ; <i32> [#uses=1]
- %463 = or i32 %446, 768 ; <i32> [#uses=1]
- %464 = and i32 %463, 1023 ; <i32> [#uses=1]
- %465 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %464; <i32*> [#uses=1]
- %466 = load i32* %465, align 4 ; <i32> [#uses=1]
- %467 = add i32 %462, %466 ; <i32> [#uses=1]
- %468 = xor i32 %447, %422 ; <i32> [#uses=1]
- %469 = xor i32 %468, %467 ; <i32> [#uses=5]
- %470 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 10; <i32*> [#uses=1]
- %471 = load i32* %470, align 4 ; <i32> [#uses=1]
- %472 = lshr i32 %469, 24 ; <i32> [#uses=1]
- %473 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %472; <i32*> [#uses=1]
- %474 = load i32* %473, align 4 ; <i32> [#uses=1]
- %475 = lshr i32 %469, 16 ; <i32> [#uses=1]
- %476 = or i32 %475, 256 ; <i32> [#uses=1]
- %477 = and i32 %476, 511 ; <i32> [#uses=1]
- %478 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %477; <i32*> [#uses=1]
- %479 = load i32* %478, align 4 ; <i32> [#uses=1]
- %480 = add i32 %479, %474 ; <i32> [#uses=1]
- %481 = lshr i32 %469, 8 ; <i32> [#uses=1]
- %482 = or i32 %481, 512 ; <i32> [#uses=1]
- %483 = and i32 %482, 767 ; <i32> [#uses=1]
- %484 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %483; <i32*> [#uses=1]
- %485 = load i32* %484, align 4 ; <i32> [#uses=1]
- %486 = xor i32 %480, %485 ; <i32> [#uses=1]
- %487 = or i32 %469, 768 ; <i32> [#uses=1]
- %488 = and i32 %487, 1023 ; <i32> [#uses=1]
- %489 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %488; <i32*> [#uses=1]
- %490 = load i32* %489, align 4 ; <i32> [#uses=1]
- %491 = add i32 %486, %490 ; <i32> [#uses=1]
- %492 = xor i32 %471, %446 ; <i32> [#uses=1]
- %493 = xor i32 %492, %491 ; <i32> [#uses=5]
- %494 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 9; <i32*> [#uses=1]
- %495 = load i32* %494, align 4 ; <i32> [#uses=1]
- %496 = lshr i32 %493, 24 ; <i32> [#uses=1]
- %497 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %496; <i32*> [#uses=1]
- %498 = load i32* %497, align 4 ; <i32> [#uses=1]
- %499 = lshr i32 %493, 16 ; <i32> [#uses=1]
- %500 = or i32 %499, 256 ; <i32> [#uses=1]
- %501 = and i32 %500, 511 ; <i32> [#uses=1]
- %502 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %501; <i32*> [#uses=1]
- %503 = load i32* %502, align 4 ; <i32> [#uses=1]
- %504 = add i32 %503, %498 ; <i32> [#uses=1]
- %505 = lshr i32 %493, 8 ; <i32> [#uses=1]
- %506 = or i32 %505, 512 ; <i32> [#uses=1]
- %507 = and i32 %506, 767 ; <i32> [#uses=1]
- %508 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %507; <i32*> [#uses=1]
- %509 = load i32* %508, align 4 ; <i32> [#uses=1]
- %510 = xor i32 %504, %509 ; <i32> [#uses=1]
- %511 = or i32 %493, 768 ; <i32> [#uses=1]
- %512 = and i32 %511, 1023 ; <i32> [#uses=1]
- %513 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %512; <i32*> [#uses=1]
- %514 = load i32* %513, align 4 ; <i32> [#uses=1]
- %515 = add i32 %510, %514 ; <i32> [#uses=1]
- %516 = xor i32 %495, %469 ; <i32> [#uses=1]
- %517 = xor i32 %516, %515 ; <i32> [#uses=5]
- %518 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 8; <i32*> [#uses=1]
- %519 = load i32* %518, align 4 ; <i32> [#uses=1]
- %520 = lshr i32 %517, 24 ; <i32> [#uses=1]
- %521 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %520; <i32*> [#uses=1]
- %522 = load i32* %521, align 4 ; <i32> [#uses=1]
- %523 = lshr i32 %517, 16 ; <i32> [#uses=1]
- %524 = or i32 %523, 256 ; <i32> [#uses=1]
- %525 = and i32 %524, 511 ; <i32> [#uses=1]
- %526 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %525; <i32*> [#uses=1]
- %527 = load i32* %526, align 4 ; <i32> [#uses=1]
- %528 = add i32 %527, %522 ; <i32> [#uses=1]
- %529 = lshr i32 %517, 8 ; <i32> [#uses=1]
- %530 = or i32 %529, 512 ; <i32> [#uses=1]
- %531 = and i32 %530, 767 ; <i32> [#uses=1]
- %532 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %531; <i32*> [#uses=1]
- %533 = load i32* %532, align 4 ; <i32> [#uses=1]
- %534 = xor i32 %528, %533 ; <i32> [#uses=1]
- %535 = or i32 %517, 768 ; <i32> [#uses=1]
- %536 = and i32 %535, 1023 ; <i32> [#uses=1]
- %537 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %536; <i32*> [#uses=1]
- %538 = load i32* %537, align 4 ; <i32> [#uses=1]
- %539 = add i32 %534, %538 ; <i32> [#uses=1]
- %540 = xor i32 %519, %493 ; <i32> [#uses=1]
- %541 = xor i32 %540, %539 ; <i32> [#uses=5]
- %542 = load i32* undef, align 4 ; <i32> [#uses=1]
- %543 = lshr i32 %541, 24 ; <i32> [#uses=1]
- %544 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %543; <i32*> [#uses=1]
- %545 = load i32* %544, align 4 ; <i32> [#uses=1]
- %546 = lshr i32 %541, 16 ; <i32> [#uses=1]
- %547 = or i32 %546, 256 ; <i32> [#uses=1]
- %548 = and i32 %547, 511 ; <i32> [#uses=1]
- %549 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %548; <i32*> [#uses=1]
- %550 = load i32* %549, align 4 ; <i32> [#uses=1]
- %551 = add i32 %550, %545 ; <i32> [#uses=1]
- %552 = lshr i32 %541, 8 ; <i32> [#uses=1]
- %553 = or i32 %552, 512 ; <i32> [#uses=1]
- %554 = and i32 %553, 767 ; <i32> [#uses=1]
- %555 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %554; <i32*> [#uses=1]
- %556 = load i32* %555, align 4 ; <i32> [#uses=1]
- %557 = xor i32 %551, %556 ; <i32> [#uses=1]
- %558 = or i32 %541, 768 ; <i32> [#uses=1]
- %559 = and i32 %558, 1023 ; <i32> [#uses=1]
- %560 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %559; <i32*> [#uses=1]
- %561 = load i32* %560, align 4 ; <i32> [#uses=1]
- %562 = add i32 %557, %561 ; <i32> [#uses=1]
- %563 = xor i32 %542, %517 ; <i32> [#uses=1]
- %564 = xor i32 %563, %562 ; <i32> [#uses=5]
- %565 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 6; <i32*> [#uses=1]
- %566 = load i32* %565, align 4 ; <i32> [#uses=1]
- %567 = lshr i32 %564, 24 ; <i32> [#uses=1]
- %568 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %567; <i32*> [#uses=1]
- %569 = load i32* %568, align 4 ; <i32> [#uses=1]
- %570 = lshr i32 %564, 16 ; <i32> [#uses=1]
- %571 = or i32 %570, 256 ; <i32> [#uses=1]
- %572 = and i32 %571, 511 ; <i32> [#uses=1]
- %573 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %572; <i32*> [#uses=1]
- %574 = load i32* %573, align 4 ; <i32> [#uses=1]
- %575 = add i32 %574, %569 ; <i32> [#uses=1]
- %576 = lshr i32 %564, 8 ; <i32> [#uses=1]
- %577 = or i32 %576, 512 ; <i32> [#uses=1]
- %578 = and i32 %577, 767 ; <i32> [#uses=1]
- %579 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %578; <i32*> [#uses=1]
- %580 = load i32* %579, align 4 ; <i32> [#uses=1]
- %581 = xor i32 %575, %580 ; <i32> [#uses=1]
- %582 = or i32 %564, 768 ; <i32> [#uses=1]
- %583 = and i32 %582, 1023 ; <i32> [#uses=1]
- %584 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %583; <i32*> [#uses=1]
- %585 = load i32* %584, align 4 ; <i32> [#uses=1]
- %586 = add i32 %581, %585 ; <i32> [#uses=1]
- %587 = xor i32 %566, %541 ; <i32> [#uses=1]
- %588 = xor i32 %587, %586 ; <i32> [#uses=5]
- %589 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 5; <i32*> [#uses=1]
- %590 = load i32* %589, align 4 ; <i32> [#uses=1]
- %591 = lshr i32 %588, 24 ; <i32> [#uses=1]
- %592 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %591; <i32*> [#uses=1]
- %593 = load i32* %592, align 4 ; <i32> [#uses=1]
- %594 = lshr i32 %588, 16 ; <i32> [#uses=1]
- %595 = or i32 %594, 256 ; <i32> [#uses=1]
- %596 = and i32 %595, 511 ; <i32> [#uses=1]
- %597 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %596; <i32*> [#uses=1]
- %598 = load i32* %597, align 4 ; <i32> [#uses=1]
- %599 = add i32 %598, %593 ; <i32> [#uses=1]
- %600 = lshr i32 %588, 8 ; <i32> [#uses=1]
- %601 = or i32 %600, 512 ; <i32> [#uses=1]
- %602 = and i32 %601, 767 ; <i32> [#uses=1]
- %603 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %602; <i32*> [#uses=1]
- %604 = load i32* %603, align 4 ; <i32> [#uses=1]
- %605 = xor i32 %599, %604 ; <i32> [#uses=1]
- %606 = or i32 %588, 768 ; <i32> [#uses=1]
- %607 = and i32 %606, 1023 ; <i32> [#uses=1]
- %608 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %607; <i32*> [#uses=1]
- %609 = load i32* %608, align 4 ; <i32> [#uses=1]
- %610 = add i32 %605, %609 ; <i32> [#uses=1]
- %611 = xor i32 %590, %564 ; <i32> [#uses=1]
- %612 = xor i32 %611, %610 ; <i32> [#uses=5]
- %613 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 4; <i32*> [#uses=1]
- %614 = load i32* %613, align 4 ; <i32> [#uses=1]
- %615 = lshr i32 %612, 24 ; <i32> [#uses=1]
- %616 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %615; <i32*> [#uses=1]
- %617 = load i32* %616, align 4 ; <i32> [#uses=1]
- %618 = lshr i32 %612, 16 ; <i32> [#uses=1]
- %619 = or i32 %618, 256 ; <i32> [#uses=1]
- %620 = and i32 %619, 511 ; <i32> [#uses=1]
- %621 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %620; <i32*> [#uses=1]
- %622 = load i32* %621, align 4 ; <i32> [#uses=1]
- %623 = add i32 %622, %617 ; <i32> [#uses=1]
- %624 = lshr i32 %612, 8 ; <i32> [#uses=1]
- %625 = or i32 %624, 512 ; <i32> [#uses=1]
- %626 = and i32 %625, 767 ; <i32> [#uses=1]
- %627 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %626; <i32*> [#uses=1]
- %628 = load i32* %627, align 4 ; <i32> [#uses=1]
- %629 = xor i32 %623, %628 ; <i32> [#uses=1]
- %630 = or i32 %612, 768 ; <i32> [#uses=1]
- %631 = and i32 %630, 1023 ; <i32> [#uses=1]
- %632 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %631; <i32*> [#uses=1]
- %633 = load i32* %632, align 4 ; <i32> [#uses=1]
- %634 = add i32 %629, %633 ; <i32> [#uses=1]
- %635 = xor i32 %614, %588 ; <i32> [#uses=1]
- %636 = xor i32 %635, %634 ; <i32> [#uses=5]
- %637 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 3; <i32*> [#uses=1]
- %638 = load i32* %637, align 4 ; <i32> [#uses=1]
- %639 = lshr i32 %636, 24 ; <i32> [#uses=1]
- %640 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %639; <i32*> [#uses=1]
- %641 = load i32* %640, align 4 ; <i32> [#uses=1]
- %642 = lshr i32 %636, 16 ; <i32> [#uses=1]
- %643 = or i32 %642, 256 ; <i32> [#uses=1]
- %644 = and i32 %643, 511 ; <i32> [#uses=1]
- %645 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %644; <i32*> [#uses=1]
- %646 = load i32* %645, align 4 ; <i32> [#uses=1]
- %647 = add i32 %646, %641 ; <i32> [#uses=1]
- %648 = lshr i32 %636, 8 ; <i32> [#uses=1]
- %649 = or i32 %648, 512 ; <i32> [#uses=1]
- %650 = and i32 %649, 767 ; <i32> [#uses=1]
- %651 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %650; <i32*> [#uses=1]
- %652 = load i32* %651, align 4 ; <i32> [#uses=1]
- %653 = xor i32 %647, %652 ; <i32> [#uses=1]
- %654 = or i32 %636, 768 ; <i32> [#uses=1]
- %655 = and i32 %654, 1023 ; <i32> [#uses=1]
- %656 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %655; <i32*> [#uses=1]
- %657 = load i32* %656, align 4 ; <i32> [#uses=1]
- %658 = add i32 %653, %657 ; <i32> [#uses=1]
- %659 = xor i32 %638, %612 ; <i32> [#uses=1]
- %660 = xor i32 %659, %658 ; <i32> [#uses=5]
- %661 = load i32* undef, align 4 ; <i32> [#uses=1]
- %662 = lshr i32 %660, 24 ; <i32> [#uses=1]
- %663 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %662; <i32*> [#uses=1]
- %664 = load i32* %663, align 4 ; <i32> [#uses=1]
- %665 = lshr i32 %660, 16 ; <i32> [#uses=1]
- %666 = or i32 %665, 256 ; <i32> [#uses=1]
- %667 = and i32 %666, 511 ; <i32> [#uses=1]
- %668 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %667; <i32*> [#uses=1]
- %669 = load i32* %668, align 4 ; <i32> [#uses=1]
- %670 = add i32 %669, %664 ; <i32> [#uses=1]
- %671 = lshr i32 %660, 8 ; <i32> [#uses=1]
- %672 = or i32 %671, 512 ; <i32> [#uses=1]
- %673 = and i32 %672, 767 ; <i32> [#uses=1]
- %674 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %673; <i32*> [#uses=1]
- %675 = load i32* %674, align 4 ; <i32> [#uses=1]
- %676 = xor i32 %670, %675 ; <i32> [#uses=1]
- %677 = or i32 %660, 768 ; <i32> [#uses=1]
- %678 = and i32 %677, 1023 ; <i32> [#uses=1]
- %679 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %678; <i32*> [#uses=1]
- %680 = load i32* %679, align 4 ; <i32> [#uses=1]
- %681 = add i32 %676, %680 ; <i32> [#uses=1]
- %682 = xor i32 %661, %636 ; <i32> [#uses=1]
- %683 = xor i32 %682, %681 ; <i32> [#uses=5]
- %684 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 1; <i32*> [#uses=1]
- br label %bb2
-
-bb2: ; preds = %bb1, %bb
- %.pn2.in = phi i32* [ %329, %bb ], [ %0, %bb1 ]; <i32*> [#uses=1]
- %.pn3 = phi i32 [ %328, %bb ], [ %683, %bb1 ]; <i32> [#uses=1]
- %.pn15.in = phi i32 [ %328, %bb ], [ %683, %bb1 ]; <i32> [#uses=1]
- %.pn14.in.in.in = phi i32 [ %328, %bb ], [ %683, %bb1 ]; <i32> [#uses=1]
- %.pn13.in.in.in = phi i32 [ %328, %bb ], [ %683, %bb1 ]; <i32> [#uses=1]
- %.pn10.in.in = phi i32 [ %328, %bb ], [ %683, %bb1 ]; <i32> [#uses=1]
- %.pn4.in = phi i32* [ null, %bb ], [ %684, %bb1 ]; <i32*> [#uses=1]
- %.pn5 = phi i32 [ 0, %bb ], [ %660, %bb1 ]; <i32> [#uses=1]
- %.pn14.in.in = lshr i32 %.pn14.in.in.in, 16; <i32> [#uses=1]
- %.pn14.in = or i32 %.pn14.in.in, 256 ; <i32> [#uses=1]
- %.pn13.in.in = lshr i32 %.pn13.in.in.in, 8; <i32> [#uses=1]
- %.pn15 = lshr i32 %.pn15.in, 24 ; <i32> [#uses=1]
- %.pn14 = and i32 %.pn14.in, 511 ; <i32> [#uses=1]
- %.pn13.in = or i32 %.pn13.in.in, 512 ; <i32> [#uses=1]
- %.pn11.in = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn15; <i32*> [#uses=1]
- %.pn12.in = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn14; <i32*> [#uses=1]
- %.pn13 = and i32 %.pn13.in, 767 ; <i32> [#uses=1]
- %.pn10.in = or i32 %.pn10.in.in, 768 ; <i32> [#uses=1]
- %.pn11 = load i32* %.pn11.in ; <i32> [#uses=1]
- %.pn12 = load i32* %.pn12.in ; <i32> [#uses=1]
- %.pn9.in = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn13; <i32*> [#uses=1]
- %.pn10 = and i32 %.pn10.in, 1023 ; <i32> [#uses=1]
- %.pn8 = add i32 %.pn12, %.pn11 ; <i32> [#uses=1]
- %.pn9 = load i32* %.pn9.in ; <i32> [#uses=1]
- %.pn7.in = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn10; <i32*> [#uses=1]
- %.pn6 = xor i32 %.pn8, %.pn9 ; <i32> [#uses=1]
- %.pn7 = load i32* %.pn7.in ; <i32> [#uses=1]
- %.pn4 = load i32* %.pn4.in ; <i32> [#uses=1]
- %.pn2 = load i32* %.pn2.in ; <i32> [#uses=1]
- %.pn = add i32 %.pn6, %.pn7 ; <i32> [#uses=1]
- %r.0 = xor i32 %.pn2, %.pn3 ; <i32> [#uses=1]
- %.pn1 = xor i32 %.pn, %.pn5 ; <i32> [#uses=1]
- %l.0 = xor i32 %.pn1, %.pn4 ; <i32> [#uses=1]
- store i32 %l.0, i32* undef, align 4
- store i32 %r.0, i32* %data, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll
deleted file mode 100644
index b6e67b1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv6-apple-darwin
-
- %struct.vorbis_comment = type { i8**, i32*, i32, i8* }
- at .str16 = external constant [2 x i8], align 1 ; <[2 x i8]*> [#uses=1]
-
-declare arm_apcscc i8* @__strcpy_chk(i8*, i8*, i32) nounwind
-
-declare arm_apcscc i8* @__strcat_chk(i8*, i8*, i32) nounwind
-
-define arm_apcscc i8* @vorbis_comment_query(%struct.vorbis_comment* nocapture %vc, i8* %tag, i32 %count) nounwind {
-entry:
- %0 = alloca i8, i32 undef, align 4 ; <i8*> [#uses=2]
- %1 = call arm_apcscc i8* @__strcpy_chk(i8* %0, i8* %tag, i32 -1) nounwind; <i8*> [#uses=0]
- %2 = call arm_apcscc i8* @__strcat_chk(i8* %0, i8* getelementptr ([2 x i8]* @.str16, i32 0, i32 0), i32 -1) nounwind; <i8*> [#uses=0]
- %3 = getelementptr %struct.vorbis_comment* %vc, i32 0, i32 0; <i8***> [#uses=1]
- br label %bb11
-
-bb6: ; preds = %bb11
- %4 = load i8*** %3, align 4 ; <i8**> [#uses=1]
- %scevgep = getelementptr i8** %4, i32 %8 ; <i8**> [#uses=1]
- %5 = load i8** %scevgep, align 4 ; <i8*> [#uses=1]
- br label %bb3.i
-
-bb3.i: ; preds = %bb3.i, %bb6
- %scevgep7.i = getelementptr i8* %5, i32 0 ; <i8*> [#uses=1]
- %6 = load i8* %scevgep7.i, align 1 ; <i8> [#uses=0]
- br i1 undef, label %bb3.i, label %bb10
-
-bb10: ; preds = %bb3.i
- %7 = add i32 %8, 1 ; <i32> [#uses=1]
- br label %bb11
-
-bb11: ; preds = %bb10, %entry
- %8 = phi i32 [ %7, %bb10 ], [ 0, %entry ] ; <i32> [#uses=3]
- %9 = icmp sgt i32 undef, %8 ; <i1> [#uses=1]
- br i1 %9, label %bb6, label %bb13
-
-bb13: ; preds = %bb11
- ret i8* null
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-08-20-ISelBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2009-08-20-ISelBug.ll
deleted file mode 100644
index c31b65b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-08-20-ISelBug.ll
+++ /dev/null
@@ -1,66 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv6-apple-darwin -relocation-model=pic -disable-fp-elim -mattr=+v6 | FileCheck %s
-; rdar://7157006
-
-%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
-%struct.__sFILEX = type opaque
-%struct.__sbuf = type { i8*, i32 }
-%struct.asl_file_t = type { i32, i32, i32, %struct.file_string_t*, i64, i64, i64, i64, i64, i64, i32, %struct.FILE*, i8*, i8* }
-%struct.file_string_t = type { i64, i32, %struct.file_string_t*, [0 x i8] }
-
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (%struct.asl_file_t*, i64, i64*)* @t to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define arm_apcscc i32 @t(%struct.asl_file_t* %s, i64 %off, i64* %out) nounwind optsize {
-; CHECK: t:
-; CHECK: adds r3, #8
-entry:
- %val = alloca i64, align 4 ; <i64*> [#uses=3]
- %0 = icmp eq %struct.asl_file_t* %s, null ; <i1> [#uses=1]
- br i1 %0, label %bb13, label %bb1
-
-bb1: ; preds = %entry
- %1 = getelementptr inbounds %struct.asl_file_t* %s, i32 0, i32 11 ; <%struct.FILE**> [#uses=2]
- %2 = load %struct.FILE** %1, align 4 ; <%struct.FILE*> [#uses=2]
- %3 = icmp eq %struct.FILE* %2, null ; <i1> [#uses=1]
- br i1 %3, label %bb13, label %bb3
-
-bb3: ; preds = %bb1
- %4 = add nsw i64 %off, 8 ; <i64> [#uses=1]
- %5 = getelementptr inbounds %struct.asl_file_t* %s, i32 0, i32 10 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=1]
- %7 = zext i32 %6 to i64 ; <i64> [#uses=1]
- %8 = icmp sgt i64 %4, %7 ; <i1> [#uses=1]
- br i1 %8, label %bb13, label %bb5
-
-bb5: ; preds = %bb3
- %9 = call arm_apcscc i32 @fseeko(%struct.FILE* %2, i64 %off, i32 0) nounwind ; <i32> [#uses=1]
- %10 = icmp eq i32 %9, 0 ; <i1> [#uses=1]
- br i1 %10, label %bb7, label %bb13
-
-bb7: ; preds = %bb5
- store i64 0, i64* %val, align 4
- %11 = load %struct.FILE** %1, align 4 ; <%struct.FILE*> [#uses=1]
- %val8 = bitcast i64* %val to i8* ; <i8*> [#uses=1]
- %12 = call arm_apcscc i32 @fread(i8* noalias %val8, i32 8, i32 1, %struct.FILE* noalias %11) nounwind ; <i32> [#uses=1]
- %13 = icmp eq i32 %12, 1 ; <i1> [#uses=1]
- br i1 %13, label %bb10, label %bb13
-
-bb10: ; preds = %bb7
- %14 = icmp eq i64* %out, null ; <i1> [#uses=1]
- br i1 %14, label %bb13, label %bb11
-
-bb11: ; preds = %bb10
- %15 = load i64* %val, align 4 ; <i64> [#uses=1]
- %16 = call arm_apcscc i64 @asl_core_ntohq(i64 %15) nounwind ; <i64> [#uses=1]
- store i64 %16, i64* %out, align 4
- ret i32 0
-
-bb13: ; preds = %bb10, %bb7, %bb5, %bb3, %bb1, %entry
- %.0 = phi i32 [ 2, %entry ], [ 2, %bb1 ], [ 7, %bb3 ], [ 7, %bb5 ], [ 7, %bb7 ], [ 0, %bb10 ] ; <i32> [#uses=1]
- ret i32 %.0
-}
-
-declare arm_apcscc i32 @fseeko(%struct.FILE* nocapture, i64, i32) nounwind
-
-declare arm_apcscc i32 @fread(i8* noalias nocapture, i32, i32, %struct.FILE* noalias nocapture) nounwind
-
-declare arm_apcscc i64 @asl_core_ntohq(i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-12-17-pre-regalloc-taildup.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2009-12-17-pre-regalloc-taildup.ll
deleted file mode 100644
index 2a5d9d6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2009-12-17-pre-regalloc-taildup.ll
+++ /dev/null
@@ -1,66 +0,0 @@
-; RUN: llc -O3 < %s | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
-target triple = "thumbv7-apple-darwin10"
-
-; This test should not produce any spills, even when tail duplication creates lots of phi nodes.
-; CHECK-NOT: push
-; CHECK-NOT: pop
-; CHECK: bx lr
-
- at codetable.2928 = internal constant [5 x i8*] [i8* blockaddress(@interpret_threaded, %RETURN), i8* blockaddress(@interpret_threaded, %INCREMENT), i8* blockaddress(@interpret_threaded, %DECREMENT), i8* blockaddress(@interpret_threaded, %DOUBLE), i8* blockaddress(@interpret_threaded, %SWAPWORD)] ; <[5 x i8*]*> [#uses=5]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (i8*)* @interpret_threaded to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define arm_apcscc i32 @interpret_threaded(i8* nocapture %opcodes) nounwind readonly optsize {
-entry:
- %0 = load i8* %opcodes, align 1 ; <i8> [#uses=1]
- %1 = zext i8 %0 to i32 ; <i32> [#uses=1]
- %2 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %1 ; <i8**> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb.backedge, %entry
- %indvar = phi i32 [ %phitmp, %bb.backedge ], [ 1, %entry ] ; <i32> [#uses=2]
- %gotovar.22.0.in = phi i8** [ %gotovar.22.0.in.be, %bb.backedge ], [ %2, %entry ] ; <i8**> [#uses=1]
- %result.0 = phi i32 [ %result.0.be, %bb.backedge ], [ 0, %entry ] ; <i32> [#uses=6]
- %opcodes_addr.0 = getelementptr i8* %opcodes, i32 %indvar ; <i8*> [#uses=4]
- %gotovar.22.0 = load i8** %gotovar.22.0.in, align 4 ; <i8*> [#uses=1]
- indirectbr i8* %gotovar.22.0, [label %RETURN, label %INCREMENT, label %DECREMENT, label %DOUBLE, label %SWAPWORD]
-
-RETURN: ; preds = %bb
- ret i32 %result.0
-
-INCREMENT: ; preds = %bb
- %3 = add nsw i32 %result.0, 1 ; <i32> [#uses=1]
- %4 = load i8* %opcodes_addr.0, align 1 ; <i8> [#uses=1]
- %5 = zext i8 %4 to i32 ; <i32> [#uses=1]
- %6 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %5 ; <i8**> [#uses=1]
- br label %bb.backedge
-
-bb.backedge: ; preds = %SWAPWORD, %DOUBLE, %DECREMENT, %INCREMENT
- %gotovar.22.0.in.be = phi i8** [ %20, %SWAPWORD ], [ %14, %DOUBLE ], [ %10, %DECREMENT ], [ %6, %INCREMENT ] ; <i8**> [#uses=1]
- %result.0.be = phi i32 [ %17, %SWAPWORD ], [ %11, %DOUBLE ], [ %7, %DECREMENT ], [ %3, %INCREMENT ] ; <i32> [#uses=1]
- %phitmp = add i32 %indvar, 1 ; <i32> [#uses=1]
- br label %bb
-
-DECREMENT: ; preds = %bb
- %7 = add i32 %result.0, -1 ; <i32> [#uses=1]
- %8 = load i8* %opcodes_addr.0, align 1 ; <i8> [#uses=1]
- %9 = zext i8 %8 to i32 ; <i32> [#uses=1]
- %10 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %9 ; <i8**> [#uses=1]
- br label %bb.backedge
-
-DOUBLE: ; preds = %bb
- %11 = shl i32 %result.0, 1 ; <i32> [#uses=1]
- %12 = load i8* %opcodes_addr.0, align 1 ; <i8> [#uses=1]
- %13 = zext i8 %12 to i32 ; <i32> [#uses=1]
- %14 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %13 ; <i8**> [#uses=1]
- br label %bb.backedge
-
-SWAPWORD: ; preds = %bb
- %15 = shl i32 %result.0, 16 ; <i32> [#uses=1]
- %16 = ashr i32 %result.0, 16 ; <i32> [#uses=1]
- %17 = or i32 %15, %16 ; <i32> [#uses=1]
- %18 = load i8* %opcodes_addr.0, align 1 ; <i8> [#uses=1]
- %19 = zext i8 %18 to i32 ; <i32> [#uses=1]
- %20 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %19 ; <i8**> [#uses=1]
- br label %bb.backedge
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/2010-01-15-local-alloc-spill-physical.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/2010-01-15-local-alloc-spill-physical.ll
deleted file mode 100644
index d676369..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/2010-01-15-local-alloc-spill-physical.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -regalloc=local -relocation-model=pic | FileCheck %s
-
-target triple = "thumbv6-apple-darwin10"
-
- at fred = internal global i32 0 ; <i32*> [#uses=1]
-
-define arm_apcscc void @foo() nounwind {
-entry:
-; CHECK: str r0, [sp]
- %0 = call arm_apcscc i32 (...)* @bar() nounwind ; <i32> [#uses=1]
-; CHECK: blx _bar
-; CHECK: ldr r1, [sp]
- store i32 %0, i32* @fred, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare arm_apcscc i32 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/asmprinter-bug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/asmprinter-bug.ll
deleted file mode 100644
index 1e3c070..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/asmprinter-bug.ll
+++ /dev/null
@@ -1,288 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv6-apple-darwin10 | grep rsbs | grep {#0}
-
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct.adpcm_state = type { i16, i8 }
- at stepsizeTable = internal constant [89 x i32] [i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 16, i32 17, i32 19, i32 21, i32 23, i32 25, i32 28, i32 31, i32 34, i32 37, i32 41, i32 45, i32 50, i32 55, i32 60, i32 66, i32 73, i32 80, i32 88, i32 97, i32 107, i32 118, i32 130, i32 143, i32 157, i32 173, i32 190, i32 209, i32 230, i32 253, i32 279, i32 307, i32 337, i32 371, i32 408, i32 449, i32 494, i32 544, i32 598, i32 658, i32 724, i32 796, i32 876, i32 963, i32 1060, i32 1166, i32 1282, i32 1411, i32 1552, i32 1707, i32 1878, i32 2066, i32 2272, i32 2499, i32 2749, i32 3024, i32 3327, i32 3660, i32 4026, i32 4428, i32 4871, i32 5358, i32 5894, i32 6484, i32 7132, i32 7845, i32 8630, i32 9493, i32 10442, i32 11487, i32 12635, i32 13899, i32 15289, i32 16818, i32 18500, i32 20350, i32 22385, i32 24623, i32 27086, i32 29794, i32 32767] ; <[89 x i32]*> [#uses=4]
- at indexTable = internal constant [16 x i32] [i32 -1, i32 -1, i32 -1, i32 -1, i32 2, i32 4, i32 6, i32 8, i32 -1, i32 -1, i32 -1, i32 -1, i32 2, i32 4, i32 6, i32 8] ; <[16 x i32]*> [#uses=2]
- at abuf = common global [500 x i8] zeroinitializer ; <[500 x i8]*> [#uses=1]
- at .str = private constant [11 x i8] c"input file\00", section "__TEXT,__cstring,cstring_literals", align 1 ; <[11 x i8]*> [#uses=1]
- at sbuf = common global [1000 x i16] zeroinitializer ; <[1000 x i16]*> [#uses=1]
- at state = common global %struct.adpcm_state zeroinitializer ; <%struct.adpcm_state*> [#uses=3]
- at __stderrp = external global %struct.FILE* ; <%struct.FILE**> [#uses=1]
- at .str1 = private constant [28 x i8] c"Final valprev=%d, index=%d\0A\00", section "__TEXT,__cstring,cstring_literals", align 1 ; <[28 x i8]*> [#uses=1]
-
-define arm_apcscc void @adpcm_coder(i16* nocapture %indata, i8* nocapture %outdata, i32 %len, %struct.adpcm_state* nocapture %state) nounwind {
-entry:
- %0 = getelementptr %struct.adpcm_state* %state, i32 0, i32 0 ; <i16*> [#uses=2]
- %1 = load i16* %0, align 2 ; <i16> [#uses=1]
- %2 = sext i16 %1 to i32 ; <i32> [#uses=2]
- %3 = getelementptr %struct.adpcm_state* %state, i32 0, i32 1 ; <i8*> [#uses=2]
- %4 = load i8* %3, align 2 ; <i8> [#uses=1]
- %5 = sext i8 %4 to i32 ; <i32> [#uses=3]
- %6 = getelementptr [89 x i32]* @stepsizeTable, i32 0, i32 %5 ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=1]
- %8 = icmp sgt i32 %len, 0 ; <i1> [#uses=1]
- br i1 %8, label %bb, label %bb27
-
-bb: ; preds = %bb25, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb25 ] ; <i32> [#uses=2]
- %outp.136 = phi i8* [ %outdata, %entry ], [ %outp.0, %bb25 ] ; <i8*> [#uses=3]
- %bufferstep.035 = phi i32 [ 1, %entry ], [ %tmp, %bb25 ] ; <i32> [#uses=3]
- %outputbuffer.134 = phi i32 [ undef, %entry ], [ %outputbuffer.0, %bb25 ] ; <i32> [#uses=2]
- %index.033 = phi i32 [ %5, %entry ], [ %index.2, %bb25 ] ; <i32> [#uses=1]
- %valpred.132 = phi i32 [ %2, %entry ], [ %valpred.2, %bb25 ] ; <i32> [#uses=2]
- %step.031 = phi i32 [ %7, %entry ], [ %36, %bb25 ] ; <i32> [#uses=5]
- %inp.038 = getelementptr i16* %indata, i32 %indvar ; <i16*> [#uses=1]
- %9 = load i16* %inp.038, align 2 ; <i16> [#uses=1]
- %10 = sext i16 %9 to i32 ; <i32> [#uses=1]
- %11 = sub i32 %10, %valpred.132 ; <i32> [#uses=3]
- %12 = icmp slt i32 %11, 0 ; <i1> [#uses=1]
- %iftmp.1.0 = select i1 %12, i32 8, i32 0 ; <i32> [#uses=2]
- %13 = sub i32 0, %11 ; <i32> [#uses=1]
- %14 = icmp eq i32 %iftmp.1.0, 0 ; <i1> [#uses=2]
- %. = select i1 %14, i32 %11, i32 %13 ; <i32> [#uses=2]
- %15 = ashr i32 %step.031, 3 ; <i32> [#uses=1]
- %16 = icmp slt i32 %., %step.031 ; <i1> [#uses=2]
- %delta.0 = select i1 %16, i32 0, i32 4 ; <i32> [#uses=2]
- %17 = select i1 %16, i32 0, i32 %step.031 ; <i32> [#uses=2]
- %diff.1 = sub i32 %., %17 ; <i32> [#uses=2]
- %18 = ashr i32 %step.031, 1 ; <i32> [#uses=2]
- %19 = icmp slt i32 %diff.1, %18 ; <i1> [#uses=2]
- %20 = or i32 %delta.0, 2 ; <i32> [#uses=1]
- %21 = select i1 %19, i32 %delta.0, i32 %20 ; <i32> [#uses=1]
- %22 = select i1 %19, i32 0, i32 %18 ; <i32> [#uses=2]
- %diff.2 = sub i32 %diff.1, %22 ; <i32> [#uses=1]
- %23 = ashr i32 %step.031, 2 ; <i32> [#uses=2]
- %24 = icmp slt i32 %diff.2, %23 ; <i1> [#uses=2]
- %25 = zext i1 %24 to i32 ; <i32> [#uses=1]
- %26 = select i1 %24, i32 0, i32 %23 ; <i32> [#uses=1]
- %vpdiff.0 = add i32 %17, %15 ; <i32> [#uses=1]
- %vpdiff.1 = add i32 %vpdiff.0, %22 ; <i32> [#uses=1]
- %vpdiff.2 = add i32 %vpdiff.1, %26 ; <i32> [#uses=2]
- %tmp30 = sub i32 0, %vpdiff.2 ; <i32> [#uses=1]
- %valpred.0.p = select i1 %14, i32 %vpdiff.2, i32 %tmp30 ; <i32> [#uses=1]
- %valpred.0 = add i32 %valpred.0.p, %valpred.132 ; <i32> [#uses=3]
- %27 = icmp sgt i32 %valpred.0, 32767 ; <i1> [#uses=1]
- br i1 %27, label %bb18, label %bb16
-
-bb16: ; preds = %bb
- %28 = icmp slt i32 %valpred.0, -32768 ; <i1> [#uses=1]
- br i1 %28, label %bb17, label %bb18
-
-bb17: ; preds = %bb16
- br label %bb18
-
-bb18: ; preds = %bb17, %bb16, %bb
- %valpred.2 = phi i32 [ -32768, %bb17 ], [ 32767, %bb ], [ %valpred.0, %bb16 ] ; <i32> [#uses=2]
- %delta.1 = or i32 %21, %iftmp.1.0 ; <i32> [#uses=1]
- %delta.2 = or i32 %delta.1, %25 ; <i32> [#uses=1]
- %29 = xor i32 %delta.2, 1 ; <i32> [#uses=3]
- %30 = getelementptr [16 x i32]* @indexTable, i32 0, i32 %29 ; <i32*> [#uses=1]
- %31 = load i32* %30, align 4 ; <i32> [#uses=1]
- %32 = add i32 %31, %index.033 ; <i32> [#uses=2]
- %33 = icmp slt i32 %32, 0 ; <i1> [#uses=1]
- %index.1 = select i1 %33, i32 0, i32 %32 ; <i32> [#uses=2]
- %34 = icmp sgt i32 %index.1, 88 ; <i1> [#uses=1]
- %index.2 = select i1 %34, i32 88, i32 %index.1 ; <i32> [#uses=3]
- %35 = getelementptr [89 x i32]* @stepsizeTable, i32 0, i32 %index.2 ; <i32*> [#uses=1]
- %36 = load i32* %35, align 4 ; <i32> [#uses=1]
- %37 = icmp eq i32 %bufferstep.035, 0 ; <i1> [#uses=1]
- br i1 %37, label %bb24, label %bb23
-
-bb23: ; preds = %bb18
- %38 = shl i32 %29, 4 ; <i32> [#uses=1]
- %39 = and i32 %38, 240 ; <i32> [#uses=1]
- br label %bb25
-
-bb24: ; preds = %bb18
- %40 = trunc i32 %29 to i8 ; <i8> [#uses=1]
- %41 = and i8 %40, 15 ; <i8> [#uses=1]
- %42 = trunc i32 %outputbuffer.134 to i8 ; <i8> [#uses=1]
- %43 = or i8 %41, %42 ; <i8> [#uses=1]
- store i8 %43, i8* %outp.136, align 1
- %44 = getelementptr i8* %outp.136, i32 1 ; <i8*> [#uses=1]
- br label %bb25
-
-bb25: ; preds = %bb24, %bb23
- %outputbuffer.0 = phi i32 [ %39, %bb23 ], [ %outputbuffer.134, %bb24 ] ; <i32> [#uses=2]
- %outp.0 = phi i8* [ %outp.136, %bb23 ], [ %44, %bb24 ] ; <i8*> [#uses=2]
- %tmp = xor i32 %bufferstep.035, 1 ; <i32> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %len ; <i1> [#uses=1]
- br i1 %exitcond, label %bb26.bb27_crit_edge, label %bb
-
-bb26.bb27_crit_edge: ; preds = %bb25
- %phitmp44 = icmp eq i32 %bufferstep.035, 1 ; <i1> [#uses=1]
- br label %bb27
-
-bb27: ; preds = %bb26.bb27_crit_edge, %entry
- %outp.1.lcssa = phi i8* [ %outp.0, %bb26.bb27_crit_edge ], [ %outdata, %entry ] ; <i8*> [#uses=1]
- %bufferstep.0.lcssa = phi i1 [ %phitmp44, %bb26.bb27_crit_edge ], [ false, %entry ] ; <i1> [#uses=1]
- %outputbuffer.1.lcssa = phi i32 [ %outputbuffer.0, %bb26.bb27_crit_edge ], [ undef, %entry ] ; <i32> [#uses=1]
- %index.0.lcssa = phi i32 [ %index.2, %bb26.bb27_crit_edge ], [ %5, %entry ] ; <i32> [#uses=1]
- %valpred.1.lcssa = phi i32 [ %valpred.2, %bb26.bb27_crit_edge ], [ %2, %entry ] ; <i32> [#uses=1]
- br i1 %bufferstep.0.lcssa, label %bb28, label %bb29
-
-bb28: ; preds = %bb27
- %45 = trunc i32 %outputbuffer.1.lcssa to i8 ; <i8> [#uses=1]
- store i8 %45, i8* %outp.1.lcssa, align 1
- br label %bb29
-
-bb29: ; preds = %bb28, %bb27
- %46 = trunc i32 %valpred.1.lcssa to i16 ; <i16> [#uses=1]
- store i16 %46, i16* %0, align 2
- %47 = trunc i32 %index.0.lcssa to i8 ; <i8> [#uses=1]
- store i8 %47, i8* %3, align 2
- ret void
-}
-
-define arm_apcscc void @adpcm_decoder(i8* nocapture %indata, i16* nocapture %outdata, i32 %len, %struct.adpcm_state* nocapture %state) nounwind {
-entry:
- %0 = getelementptr %struct.adpcm_state* %state, i32 0, i32 0 ; <i16*> [#uses=2]
- %1 = load i16* %0, align 2 ; <i16> [#uses=1]
- %2 = sext i16 %1 to i32 ; <i32> [#uses=2]
- %3 = getelementptr %struct.adpcm_state* %state, i32 0, i32 1 ; <i8*> [#uses=2]
- %4 = load i8* %3, align 2 ; <i8> [#uses=1]
- %5 = sext i8 %4 to i32 ; <i32> [#uses=3]
- %6 = getelementptr [89 x i32]* @stepsizeTable, i32 0, i32 %5 ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=1]
- %8 = icmp sgt i32 %len, 0 ; <i1> [#uses=1]
- br i1 %8, label %bb, label %bb22
-
-bb: ; preds = %bb20, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb20 ] ; <i32> [#uses=2]
- %inp.131 = phi i8* [ %indata, %entry ], [ %inp.0, %bb20 ] ; <i8*> [#uses=3]
- %bufferstep.028 = phi i32 [ 0, %entry ], [ %tmp, %bb20 ] ; <i32> [#uses=2]
- %inputbuffer.127 = phi i32 [ undef, %entry ], [ %inputbuffer.0, %bb20 ] ; <i32> [#uses=2]
- %index.026 = phi i32 [ %5, %entry ], [ %index.2, %bb20 ] ; <i32> [#uses=1]
- %valpred.125 = phi i32 [ %2, %entry ], [ %valpred.2, %bb20 ] ; <i32> [#uses=1]
- %step.024 = phi i32 [ %7, %entry ], [ %35, %bb20 ] ; <i32> [#uses=4]
- %outp.030 = getelementptr i16* %outdata, i32 %indvar ; <i16*> [#uses=1]
- %9 = icmp eq i32 %bufferstep.028, 0 ; <i1> [#uses=1]
- br i1 %9, label %bb2, label %bb3
-
-bb2: ; preds = %bb
- %10 = load i8* %inp.131, align 1 ; <i8> [#uses=1]
- %11 = sext i8 %10 to i32 ; <i32> [#uses=2]
- %12 = getelementptr i8* %inp.131, i32 1 ; <i8*> [#uses=1]
- %13 = ashr i32 %11, 4 ; <i32> [#uses=1]
- br label %bb3
-
-bb3: ; preds = %bb2, %bb
- %inputbuffer.0 = phi i32 [ %11, %bb2 ], [ %inputbuffer.127, %bb ] ; <i32> [#uses=1]
- %delta.0.in = phi i32 [ %13, %bb2 ], [ %inputbuffer.127, %bb ] ; <i32> [#uses=5]
- %inp.0 = phi i8* [ %12, %bb2 ], [ %inp.131, %bb ] ; <i8*> [#uses=1]
- %delta.0 = and i32 %delta.0.in, 15 ; <i32> [#uses=1]
- %tmp = xor i32 %bufferstep.028, 1 ; <i32> [#uses=1]
- %14 = getelementptr [16 x i32]* @indexTable, i32 0, i32 %delta.0 ; <i32*> [#uses=1]
- %15 = load i32* %14, align 4 ; <i32> [#uses=1]
- %16 = add i32 %15, %index.026 ; <i32> [#uses=2]
- %17 = icmp slt i32 %16, 0 ; <i1> [#uses=1]
- %index.1 = select i1 %17, i32 0, i32 %16 ; <i32> [#uses=2]
- %18 = icmp sgt i32 %index.1, 88 ; <i1> [#uses=1]
- %index.2 = select i1 %18, i32 88, i32 %index.1 ; <i32> [#uses=3]
- %19 = and i32 %delta.0.in, 8 ; <i32> [#uses=1]
- %20 = ashr i32 %step.024, 3 ; <i32> [#uses=1]
- %21 = and i32 %delta.0.in, 4 ; <i32> [#uses=1]
- %22 = icmp eq i32 %21, 0 ; <i1> [#uses=1]
- %23 = select i1 %22, i32 0, i32 %step.024 ; <i32> [#uses=1]
- %vpdiff.0 = add i32 %23, %20 ; <i32> [#uses=2]
- %24 = and i32 %delta.0.in, 2 ; <i32> [#uses=1]
- %25 = icmp eq i32 %24, 0 ; <i1> [#uses=1]
- br i1 %25, label %bb11, label %bb10
-
-bb10: ; preds = %bb3
- %26 = ashr i32 %step.024, 1 ; <i32> [#uses=1]
- %27 = add i32 %vpdiff.0, %26 ; <i32> [#uses=1]
- br label %bb11
-
-bb11: ; preds = %bb10, %bb3
- %vpdiff.1 = phi i32 [ %27, %bb10 ], [ %vpdiff.0, %bb3 ] ; <i32> [#uses=2]
- %28 = and i32 %delta.0.in, 1 ; <i32> [#uses=1]
- %toBool = icmp eq i32 %28, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %bb13, label %bb12
-
-bb12: ; preds = %bb11
- %29 = ashr i32 %step.024, 2 ; <i32> [#uses=1]
- %30 = add i32 %vpdiff.1, %29 ; <i32> [#uses=1]
- br label %bb13
-
-bb13: ; preds = %bb12, %bb11
- %vpdiff.2 = phi i32 [ %30, %bb12 ], [ %vpdiff.1, %bb11 ] ; <i32> [#uses=2]
- %31 = icmp eq i32 %19, 0 ; <i1> [#uses=1]
- %tmp23 = sub i32 0, %vpdiff.2 ; <i32> [#uses=1]
- %valpred.0.p = select i1 %31, i32 %vpdiff.2, i32 %tmp23 ; <i32> [#uses=1]
- %valpred.0 = add i32 %valpred.0.p, %valpred.125 ; <i32> [#uses=3]
- %32 = icmp sgt i32 %valpred.0, 32767 ; <i1> [#uses=1]
- br i1 %32, label %bb20, label %bb18
-
-bb18: ; preds = %bb13
- %33 = icmp slt i32 %valpred.0, -32768 ; <i1> [#uses=1]
- br i1 %33, label %bb19, label %bb20
-
-bb19: ; preds = %bb18
- br label %bb20
-
-bb20: ; preds = %bb19, %bb18, %bb13
- %valpred.2 = phi i32 [ -32768, %bb19 ], [ 32767, %bb13 ], [ %valpred.0, %bb18 ] ; <i32> [#uses=3]
- %34 = getelementptr [89 x i32]* @stepsizeTable, i32 0, i32 %index.2 ; <i32*> [#uses=1]
- %35 = load i32* %34, align 4 ; <i32> [#uses=1]
- %36 = trunc i32 %valpred.2 to i16 ; <i16> [#uses=1]
- store i16 %36, i16* %outp.030, align 2
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %len ; <i1> [#uses=1]
- br i1 %exitcond, label %bb22, label %bb
-
-bb22: ; preds = %bb20, %entry
- %index.0.lcssa = phi i32 [ %5, %entry ], [ %index.2, %bb20 ] ; <i32> [#uses=1]
- %valpred.1.lcssa = phi i32 [ %2, %entry ], [ %valpred.2, %bb20 ] ; <i32> [#uses=1]
- %37 = trunc i32 %valpred.1.lcssa to i16 ; <i16> [#uses=1]
- store i16 %37, i16* %0, align 2
- %38 = trunc i32 %index.0.lcssa to i8 ; <i8> [#uses=1]
- store i8 %38, i8* %3, align 2
- ret void
-}
-
-define arm_apcscc i32 @main() nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb3, %entry
- %0 = tail call arm_apcscc i32 (...)* @read(i32 0, i8* getelementptr ([500 x i8]* @abuf, i32 0, i32 0), i32 500) nounwind ; <i32> [#uses=4]
- %1 = icmp slt i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %bb1, label %bb2
-
-bb1: ; preds = %bb
- tail call arm_apcscc void @perror(i8* getelementptr ([11 x i8]* @.str, i32 0, i32 0)) nounwind
- ret i32 1
-
-bb2: ; preds = %bb
- %2 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb4, label %bb3
-
-bb3: ; preds = %bb2
- %3 = shl i32 %0, 1 ; <i32> [#uses=1]
- tail call arm_apcscc void @adpcm_decoder(i8* getelementptr ([500 x i8]* @abuf, i32 0, i32 0), i16* getelementptr ([1000 x i16]* @sbuf, i32 0, i32 0), i32 %3, %struct.adpcm_state* @state) nounwind
- %4 = shl i32 %0, 2 ; <i32> [#uses=1]
- %5 = tail call arm_apcscc i32 (...)* @write(i32 1, i16* getelementptr ([1000 x i16]* @sbuf, i32 0, i32 0), i32 %4) nounwind ; <i32> [#uses=0]
- br label %bb
-
-bb4: ; preds = %bb2
- %6 = load %struct.FILE** @__stderrp, align 4 ; <%struct.FILE*> [#uses=1]
- %7 = load i16* getelementptr (%struct.adpcm_state* @state, i32 0, i32 0), align 4 ; <i16> [#uses=1]
- %8 = sext i16 %7 to i32 ; <i32> [#uses=1]
- %9 = load i8* getelementptr (%struct.adpcm_state* @state, i32 0, i32 1), align 2 ; <i8> [#uses=1]
- %10 = sext i8 %9 to i32 ; <i32> [#uses=1]
- %11 = tail call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %6, i8* getelementptr ([28 x i8]* @.str1, i32 0, i32 0), i32 %8, i32 %10) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare arm_apcscc i32 @read(...)
-
-declare arm_apcscc void @perror(i8* nocapture) nounwind
-
-declare arm_apcscc i32 @write(...)
-
-declare arm_apcscc i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/dg.exp b/libclamav/c++/llvm/test/CodeGen/Thumb/dg.exp
deleted file mode 100644
index 3ff359a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/dg.exp
+++ /dev/null
@@ -1,5 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target ARM] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/dyn-stackalloc.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/dyn-stackalloc.ll
deleted file mode 100644
index acfdc91..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/dyn-stackalloc.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; RUN: llc < %s -march=thumb | not grep {ldr sp}
-; RUN: llc < %s -mtriple=thumb-apple-darwin | \
-; RUN: not grep {sub.*r7}
-; RUN: llc < %s -march=thumb | grep 4294967280
-
- %struct.state = type { i32, %struct.info*, float**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, i64, i64, i64, i64, i8* }
- %struct.info = type { i32, i32, i32, i32, i32, i32, i32, i8* }
-
-define void @t1(%struct.state* %v) {
- %tmp6 = load i32* null
- %tmp8 = alloca float, i32 %tmp6
- store i32 1, i32* null
- br i1 false, label %bb123.preheader, label %return
-
-bb123.preheader:
- br i1 false, label %bb43, label %return
-
-bb43:
- call fastcc void @f1( float* %tmp8, float* null, i32 0 )
- %tmp70 = load i32* null
- %tmp85 = getelementptr float* %tmp8, i32 0
- call fastcc void @f2( float* null, float* null, float* %tmp85, i32 %tmp70 )
- ret void
-
-return:
- ret void
-}
-
-declare fastcc void @f1(float*, float*, i32)
-
-declare fastcc void @f2(float*, float*, float*, i32)
-
- %struct.comment = type { i8**, i32*, i32, i8* }
- at str215 = external global [2 x i8]
-
-define void @t2(%struct.comment* %vc, i8* %tag, i8* %contents) {
- %tmp1 = call i32 @strlen( i8* %tag )
- %tmp3 = call i32 @strlen( i8* %contents )
- %tmp4 = add i32 %tmp1, 2
- %tmp5 = add i32 %tmp4, %tmp3
- %tmp6 = alloca i8, i32 %tmp5
- %tmp9 = call i8* @strcpy( i8* %tmp6, i8* %tag )
- %tmp6.len = call i32 @strlen( i8* %tmp6 )
- %tmp6.indexed = getelementptr i8* %tmp6, i32 %tmp6.len
- call void @llvm.memcpy.i32( i8* %tmp6.indexed, i8* getelementptr ([2 x i8]* @str215, i32 0, i32 0), i32 2, i32 1 )
- %tmp15 = call i8* @strcat( i8* %tmp6, i8* %contents )
- call fastcc void @comment_add( %struct.comment* %vc, i8* %tmp6 )
- ret void
-}
-
-declare i32 @strlen(i8*)
-
-declare i8* @strcat(i8*, i8*)
-
-declare fastcc void @comment_add(%struct.comment*, i8*)
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-declare i8* @strcpy(i8*, i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/fpconv.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/fpconv.ll
deleted file mode 100644
index 7da36dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/fpconv.ll
+++ /dev/null
@@ -1,61 +0,0 @@
-; RUN: llc < %s -march=thumb
-
-define float @f1(double %x) {
-entry:
- %tmp1 = fptrunc double %x to float ; <float> [#uses=1]
- ret float %tmp1
-}
-
-define double @f2(float %x) {
-entry:
- %tmp1 = fpext float %x to double ; <double> [#uses=1]
- ret double %tmp1
-}
-
-define i32 @f3(float %x) {
-entry:
- %tmp = fptosi float %x to i32 ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-define i32 @f4(float %x) {
-entry:
- %tmp = fptoui float %x to i32 ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-define i32 @f5(double %x) {
-entry:
- %tmp = fptosi double %x to i32 ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-define i32 @f6(double %x) {
-entry:
- %tmp = fptoui double %x to i32 ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-define float @f7(i32 %a) {
-entry:
- %tmp = sitofp i32 %a to float ; <float> [#uses=1]
- ret float %tmp
-}
-
-define double @f8(i32 %a) {
-entry:
- %tmp = sitofp i32 %a to double ; <double> [#uses=1]
- ret double %tmp
-}
-
-define float @f9(i32 %a) {
-entry:
- %tmp = uitofp i32 %a to float ; <float> [#uses=1]
- ret float %tmp
-}
-
-define double @f10(i32 %a) {
-entry:
- %tmp = uitofp i32 %a to double ; <double> [#uses=1]
- ret double %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/fpow.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/fpow.ll
deleted file mode 100644
index be3dc0b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/fpow.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=thumb
-
-define double @t(double %x, double %y) nounwind optsize {
-entry:
- %0 = tail call double @llvm.pow.f64( double %x, double %y ) ; <double> [#uses=1]
- ret double %0
-}
-
-declare double @llvm.pow.f64(double, double) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/frame_thumb.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/frame_thumb.ll
deleted file mode 100644
index 0cac755..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/frame_thumb.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin \
-; RUN: -disable-fp-elim | not grep {r11}
-; RUN: llc < %s -mtriple=thumb-linux-gnueabi \
-; RUN: -disable-fp-elim | not grep {r11}
-
-define i32 @f() {
-entry:
- ret i32 10
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/iabs.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/iabs.ll
deleted file mode 100644
index d7cdcd8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/iabs.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=thumb -stats |& \
-; RUN: grep {4 .*Number of machine instrs printed}
-
-;; Integer absolute value, should produce something as good as:
-;; Thumb:
-;; asr r2, r0, #31
-;; add r0, r0, r2
-;; eor r0, r2
-;; bx lr
-
-define i32 @test(i32 %a) {
- %tmp1neg = sub i32 0, %a
- %b = icmp sgt i32 %a, -1
- %abs = select i1 %b, i32 %a, i32 %tmp1neg
- ret i32 %abs
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/inlineasm-imm-thumb.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/inlineasm-imm-thumb.ll
deleted file mode 100644
index 5c8a52a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/inlineasm-imm-thumb.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; RUN: llc < %s -march=thumb
-
-; Test Thumb-mode "I" constraint, for ADD immediate.
-define i32 @testI(i32 %x) {
- %y = call i32 asm "add $0, $1, $2", "=r,r,I"( i32 %x, i32 255 ) nounwind
- ret i32 %y
-}
-
-; Test Thumb-mode "J" constraint, for negated ADD immediates.
-define void @testJ() {
- tail call void asm sideeffect ".word $0", "J"( i32 -255 ) nounwind
- ret void
-}
-
-; Test Thumb-mode "K" constraint, for compatibility with GCC's internal use.
-define void @testK() {
- tail call void asm sideeffect ".word $0", "K"( i32 65280 ) nounwind
- ret void
-}
-
-; Test Thumb-mode "L" constraint, for 3-operand ADD immediates.
-define i32 @testL(i32 %x) {
- %y = call i32 asm "add $0, $1, $2", "=r,r,L"( i32 %x, i32 -7 ) nounwind
- ret i32 %y
-}
-
-; Test Thumb-mode "M" constraint, for "ADD r = sp + imm".
-define i32 @testM() {
- %y = call i32 asm "add $0, sp, $1", "=r,M"( i32 1020 ) nounwind
- ret i32 %y
-}
-
-; Test Thumb-mode "N" constraint, for values between 0 and 31.
-define i32 @testN(i32 %x) {
- %y = call i32 asm "lsl $0, $1, $2", "=r,r,N"( i32 %x, i32 31 ) nounwind
- ret i32 %y
-}
-
-; Test Thumb-mode "O" constraint, for "ADD sp = sp + imm".
-define void @testO() {
- tail call void asm sideeffect "add sp, sp, $0; add sp, sp, $1", "O,O"( i32 -508, i32 508 ) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/ispositive.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/ispositive.ll
deleted file mode 100644
index eac3ef2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/ispositive.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=thumb | FileCheck %s
-
-define i32 @test1(i32 %X) {
-entry:
-; CHECK: test1:
-; CHECK: lsrs r0, r0, #31
- icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
- zext i1 %0 to i32 ; <i32>:1 [#uses=1]
- ret i32 %1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/large-stack.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/large-stack.ll
deleted file mode 100644
index 02de36a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/large-stack.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=thumb | grep {ldr.*LCP} | count 5
-
-define void @test1() {
- %tmp = alloca [ 64 x i32 ] , align 4
- ret void
-}
-
-define void @test2() {
- %tmp = alloca [ 4168 x i8 ] , align 4
- ret void
-}
-
-define i32 @test3() {
- %retval = alloca i32, align 4
- %tmp = alloca i32, align 4
- %a = alloca [805306369 x i8], align 16
- store i32 0, i32* %tmp
- %tmp1 = load i32* %tmp
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/ldr_ext.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/ldr_ext.ll
deleted file mode 100644
index 9a28124..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/ldr_ext.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: llc < %s -march=thumb | FileCheck %s -check-prefix=V5
-; RUN: llc < %s -march=thumb -mattr=+v6 | FileCheck %s -check-prefix=V6
-
-; rdar://7176514
-
-define i32 @test1(i8* %t1) nounwind {
-; V5: ldrb
-
-; V6: ldrb
- %tmp.u = load i8* %t1
- %tmp1.s = zext i8 %tmp.u to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test2(i16* %t1) nounwind {
-; V5: ldrh
-
-; V6: ldrh
- %tmp.u = load i16* %t1
- %tmp1.s = zext i16 %tmp.u to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test3(i8* %t0) nounwind {
-; V5: ldrb
-; V5: lsls
-; V5: asrs
-
-; V6: ldrb
-; V6: sxtb
- %tmp.s = load i8* %t0
- %tmp1.s = sext i8 %tmp.s to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test4(i16* %t0) nounwind {
-; V5: ldrh
-; V5: lsls
-; V5: asrs
-
-; V6: ldrh
-; V6: sxth
- %tmp.s = load i16* %t0
- %tmp1.s = sext i16 %tmp.s to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test5() nounwind {
-; V5: movs r0, #0
-; V5: ldrsh
-
-; V6: movs r0, #0
-; V6: ldrsh
- %tmp.s = load i16* null
- %tmp1.s = sext i16 %tmp.s to i32
- ret i32 %tmp1.s
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/ldr_frame.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/ldr_frame.ll
deleted file mode 100644
index 81782cd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/ldr_frame.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=thumb | FileCheck %s
-
-define i32 @f1() {
-; CHECK: f1:
-; CHECK: ldr r0
- %buf = alloca [32 x i32], align 4
- %tmp = getelementptr [32 x i32]* %buf, i32 0, i32 0
- %tmp1 = load i32* %tmp
- ret i32 %tmp1
-}
-
-define i32 @f2() {
-; CHECK: f2:
-; CHECK: mov r0
-; CHECK: ldrb
- %buf = alloca [32 x i8], align 4
- %tmp = getelementptr [32 x i8]* %buf, i32 0, i32 0
- %tmp1 = load i8* %tmp
- %tmp2 = zext i8 %tmp1 to i32
- ret i32 %tmp2
-}
-
-define i32 @f3() {
-; CHECK: f3:
-; CHECK: ldr r0
- %buf = alloca [32 x i32], align 4
- %tmp = getelementptr [32 x i32]* %buf, i32 0, i32 32
- %tmp1 = load i32* %tmp
- ret i32 %tmp1
-}
-
-define i32 @f4() {
-; CHECK: f4:
-; CHECK: mov r0
-; CHECK: ldrb
- %buf = alloca [32 x i8], align 4
- %tmp = getelementptr [32 x i8]* %buf, i32 0, i32 2
- %tmp1 = load i8* %tmp
- %tmp2 = zext i8 %tmp1 to i32
- ret i32 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/long-setcc.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/long-setcc.ll
deleted file mode 100644
index 8f2d98f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/long-setcc.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=thumb | grep cmp | count 1
-
-
-define i1 @t1(i64 %x) {
- %B = icmp slt i64 %x, 0
- ret i1 %B
-}
-
-define i1 @t2(i64 %x) {
- %tmp = icmp ult i64 %x, 4294967296
- ret i1 %tmp
-}
-
-define i1 @t3(i32 %x) {
- %tmp = icmp ugt i32 %x, -1
- ret i1 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/long.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/long.ll
deleted file mode 100644
index e3ef44a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/long.ll
+++ /dev/null
@@ -1,76 +0,0 @@
-; RUN: llc < %s -march=thumb | \
-; RUN: grep mvn | count 1
-; RUN: llc < %s -march=thumb | \
-; RUN: grep adc | count 1
-; RUN: llc < %s -march=thumb | \
-; RUN: grep sbc | count 1
-; RUN: llc < %s -march=thumb | grep __muldi3
-
-define i64 @f1() {
-entry:
- ret i64 0
-}
-
-define i64 @f2() {
-entry:
- ret i64 1
-}
-
-define i64 @f3() {
-entry:
- ret i64 2147483647
-}
-
-define i64 @f4() {
-entry:
- ret i64 2147483648
-}
-
-define i64 @f5() {
-entry:
- ret i64 9223372036854775807
-}
-
-define i64 @f6(i64 %x, i64 %y) {
-entry:
- %tmp1 = add i64 %y, 1 ; <i64> [#uses=1]
- ret i64 %tmp1
-}
-
-define void @f7() {
-entry:
- %tmp = call i64 @f8( ) ; <i64> [#uses=0]
- ret void
-}
-
-declare i64 @f8()
-
-define i64 @f9(i64 %a, i64 %b) {
-entry:
- %tmp = sub i64 %a, %b ; <i64> [#uses=1]
- ret i64 %tmp
-}
-
-define i64 @f(i32 %a, i32 %b) {
-entry:
- %tmp = sext i32 %a to i64 ; <i64> [#uses=1]
- %tmp1 = sext i32 %b to i64 ; <i64> [#uses=1]
- %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
- ret i64 %tmp2
-}
-
-define i64 @g(i32 %a, i32 %b) {
-entry:
- %tmp = zext i32 %a to i64 ; <i64> [#uses=1]
- %tmp1 = zext i32 %b to i64 ; <i64> [#uses=1]
- %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
- ret i64 %tmp2
-}
-
-define i64 @f10() {
-entry:
- %a = alloca i64, align 8 ; <i64*> [#uses=1]
- %retval = load i64* %a ; <i64> [#uses=1]
- ret i64 %retval
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/long_shift.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/long_shift.ll
deleted file mode 100644
index 2431714..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/long_shift.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=thumb
-
-define i64 @f0(i64 %A, i64 %B) {
- %tmp = bitcast i64 %A to i64
- %tmp2 = lshr i64 %B, 1
- %tmp3 = sub i64 %tmp, %tmp2
- ret i64 %tmp3
-}
-
-define i32 @f1(i64 %x, i64 %y) {
- %a = shl i64 %x, %y
- %b = trunc i64 %a to i32
- ret i32 %b
-}
-
-define i32 @f2(i64 %x, i64 %y) {
- %a = ashr i64 %x, %y
- %b = trunc i64 %a to i32
- ret i32 %b
-}
-
-define i32 @f3(i64 %x, i64 %y) {
- %a = lshr i64 %x, %y
- %b = trunc i64 %a to i32
- ret i32 %b
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/machine-licm.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/machine-licm.ll
deleted file mode 100644
index dae1412..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/machine-licm.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s
-; rdar://7353541
-; rdar://7354376
-
-; The generated code is no where near ideal. It's not recognizing the two
-; constantpool entries being loaded can be merged into one.
-
- at GV = external global i32 ; <i32*> [#uses=2]
-
-define arm_apcscc void @t(i32* nocapture %vals, i32 %c) nounwind {
-entry:
-; CHECK: t:
- %0 = icmp eq i32 %c, 0 ; <i1> [#uses=1]
- br i1 %0, label %return, label %bb.nph
-
-bb.nph: ; preds = %entry
-; CHECK: BB#1
-; CHECK: ldr.n r2, LCPI1_0
-; CHECK: add r2, pc
-; CHECK: ldr r{{[0-9]+}}, [r2]
-; CHECK: LBB1_2
-; CHECK: LCPI1_0:
-; CHECK-NOT: LCPI1_1:
-; CHECK: .section
- %.pre = load i32* @GV, align 4 ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %bb.nph
- %1 = phi i32 [ %.pre, %bb.nph ], [ %3, %bb ] ; <i32> [#uses=1]
- %i.03 = phi i32 [ 0, %bb.nph ], [ %4, %bb ] ; <i32> [#uses=2]
- %scevgep = getelementptr i32* %vals, i32 %i.03 ; <i32*> [#uses=1]
- %2 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
- %3 = add nsw i32 %1, %2 ; <i32> [#uses=2]
- store i32 %3, i32* @GV, align 4
- %4 = add i32 %i.03, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %4, %c ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/mul.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/mul.ll
deleted file mode 100644
index c1a2fb2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/mul.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=thumb | grep mul | count 3
-; RUN: llc < %s -march=thumb | grep lsl | count 1
-
-define i32 @f1(i32 %u) {
- %tmp = mul i32 %u, %u
- ret i32 %tmp
-}
-
-define i32 @f2(i32 %u, i32 %v) {
- %tmp = mul i32 %u, %v
- ret i32 %tmp
-}
-
-define i32 @f3(i32 %u) {
- %tmp = mul i32 %u, 5
- ret i32 %tmp
-}
-
-define i32 @f4(i32 %u) {
- %tmp = mul i32 %u, 4
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/pop.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/pop.ll
deleted file mode 100644
index 0e1b2e5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/pop.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin | FileCheck %s
-; rdar://7268481
-
-define arm_apcscc void @t(i8* %a, ...) nounwind {
-; CHECK: t:
-; CHECK: pop {r3}
-; CHECK-NEXT: add sp, #12
-; CHECK-NEXT: bx r3
-entry:
- %a.addr = alloca i8*
- store i8* %a, i8** %a.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/push.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/push.ll
deleted file mode 100644
index 63773c4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/push.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin -disable-fp-elim | FileCheck %s
-; rdar://7268481
-
-define arm_apcscc void @t() nounwind {
-; CHECK: t:
-; CHECK-NEXT : push {r7}
-entry:
- call void asm sideeffect ".long 0xe7ffdefe", ""() nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/select.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/select.ll
deleted file mode 100644
index 7a183b0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/select.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -march=thumb | grep beq | count 1
-; RUN: llc < %s -march=thumb | grep bgt | count 1
-; RUN: llc < %s -march=thumb | grep blt | count 3
-; RUN: llc < %s -march=thumb | grep ble | count 1
-; RUN: llc < %s -march=thumb | grep bls | count 1
-; RUN: llc < %s -march=thumb | grep bhi | count 1
-; RUN: llc < %s -march=thumb | grep __ltdf2
-
-define i32 @f1(i32 %a.s) {
-entry:
- %tmp = icmp eq i32 %a.s, 4
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f2(i32 %a.s) {
-entry:
- %tmp = icmp sgt i32 %a.s, 4
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f3(i32 %a.s, i32 %b.s) {
-entry:
- %tmp = icmp slt i32 %a.s, %b.s
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f4(i32 %a.s, i32 %b.s) {
-entry:
- %tmp = icmp sle i32 %a.s, %b.s
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f5(i32 %a.u, i32 %b.u) {
-entry:
- %tmp = icmp ule i32 %a.u, %b.u
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f6(i32 %a.u, i32 %b.u) {
-entry:
- %tmp = icmp ugt i32 %a.u, %b.u
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define double @f7(double %a, double %b) {
- %tmp = fcmp olt double %a, 1.234e+00
- %tmp1 = select i1 %tmp, double -1.000e+00, double %b
- ret double %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/stack-frame.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/stack-frame.ll
deleted file mode 100644
index b103b33..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/stack-frame.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=thumb
-; RUN: llc < %s -march=thumb | grep add | count 1
-
-define void @f1() {
- %c = alloca i8, align 1
- ret void
-}
-
-define i32 @f2() {
- ret i32 1
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/thumb-imm.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/thumb-imm.ll
deleted file mode 100644
index 74a57ff..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/thumb-imm.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=thumb | not grep CPI
-
-
-define i32 @test1() {
- ret i32 1000
-}
-
-define i32 @test2() {
- ret i32 -256
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/tst_teq.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/tst_teq.ll
deleted file mode 100644
index 21ada3e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/tst_teq.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=thumb | grep tst
-
-define i32 @f(i32 %a) {
-entry:
- %tmp2 = and i32 %a, 255 ; <i32> [#uses=1]
- icmp eq i32 %tmp2, 0 ; <i1>:0 [#uses=1]
- %retval = select i1 %0, i32 20, i32 10 ; <i32> [#uses=1]
- ret i32 %retval
-}
-
-define i32 @g(i32 %a) {
-entry:
- %tmp2 = xor i32 %a, 255
- icmp eq i32 %tmp2, 0 ; <i1>:0 [#uses=1]
- %retval = select i1 %0, i32 20, i32 10 ; <i32> [#uses=1]
- ret i32 %retval
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/unord.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/unord.ll
deleted file mode 100644
index 39458ae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/unord.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=thumb | grep bne | count 1
-; RUN: llc < %s -march=thumb | grep beq | count 1
-
-define i32 @f1(float %X, float %Y) {
- %tmp = fcmp uno float %X, %Y
- %retval = select i1 %tmp, i32 1, i32 -1
- ret i32 %retval
-}
-
-define i32 @f2(float %X, float %Y) {
- %tmp = fcmp ord float %X, %Y
- %retval = select i1 %tmp, i32 1, i32 -1
- ret i32 %retval
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb/vargs.ll b/libclamav/c++/llvm/test/CodeGen/Thumb/vargs.ll
deleted file mode 100644
index 16a9c44..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb/vargs.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=thumb
-; RUN: llc < %s -mtriple=thumb-linux | grep pop | count 1
-; RUN: llc < %s -mtriple=thumb-darwin | grep pop | count 2
-
- at str = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
-
-define void @f(i32 %a, ...) {
-entry:
- %va = alloca i8*, align 4 ; <i8**> [#uses=4]
- %va.upgrd.1 = bitcast i8** %va to i8* ; <i8*> [#uses=1]
- call void @llvm.va_start( i8* %va.upgrd.1 )
- br label %bb
-
-bb: ; preds = %bb, %entry
- %a_addr.0 = phi i32 [ %a, %entry ], [ %tmp5, %bb ] ; <i32> [#uses=2]
- %tmp = volatile load i8** %va ; <i8*> [#uses=2]
- %tmp2 = getelementptr i8* %tmp, i32 4 ; <i8*> [#uses=1]
- volatile store i8* %tmp2, i8** %va
- %tmp5 = add i32 %a_addr.0, -1 ; <i32> [#uses=1]
- %tmp.upgrd.2 = icmp eq i32 %a_addr.0, 1 ; <i1> [#uses=1]
- br i1 %tmp.upgrd.2, label %bb7, label %bb
-
-bb7: ; preds = %bb
- %tmp3 = bitcast i8* %tmp to i32* ; <i32*> [#uses=1]
- %tmp.upgrd.3 = load i32* %tmp3 ; <i32> [#uses=1]
- %tmp10 = call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @str, i32 0, i64 0), i32 %tmp.upgrd.3 ) ; <i32> [#uses=0]
- %va.upgrd.4 = bitcast i8** %va to i8* ; <i8*> [#uses=1]
- call void @llvm.va_end( i8* %va.upgrd.4 )
- ret void
-}
-
-declare void @llvm.va_start(i8*)
-
-declare i32 @printf(i8*, ...)
-
-declare void @llvm.va_end(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll
deleted file mode 100644
index 8f2283f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s
-
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
-target triple = "thumbv6t2-elf"
- %struct.dwarf_cie = type <{ i32, i32, i8, [0 x i8], [3 x i8] }>
-
-declare arm_apcscc i8* @read_sleb128(i8*, i32* nocapture) nounwind
-
-define arm_apcscc i32 @get_cie_encoding(%struct.dwarf_cie* %cie) nounwind {
-entry:
- br i1 undef, label %bb1, label %bb13
-
-bb1: ; preds = %entry
- %tmp38 = add i32 undef, 10 ; <i32> [#uses=1]
- br label %bb.i
-
-bb.i: ; preds = %bb.i, %bb1
- %indvar.i = phi i32 [ 0, %bb1 ], [ %2, %bb.i ] ; <i32> [#uses=3]
- %tmp39 = add i32 %indvar.i, %tmp38 ; <i32> [#uses=1]
- %p_addr.0.i = getelementptr i8* undef, i32 %tmp39 ; <i8*> [#uses=1]
- %0 = load i8* %p_addr.0.i, align 1 ; <i8> [#uses=1]
- %1 = icmp slt i8 %0, 0 ; <i1> [#uses=1]
- %2 = add i32 %indvar.i, 1 ; <i32> [#uses=1]
- br i1 %1, label %bb.i, label %read_uleb128.exit
-
-read_uleb128.exit: ; preds = %bb.i
- %.sum40 = add i32 %indvar.i, undef ; <i32> [#uses=1]
- %.sum31 = add i32 %.sum40, 2 ; <i32> [#uses=1]
- %scevgep.i = getelementptr %struct.dwarf_cie* %cie, i32 0, i32 3, i32 %.sum31 ; <i8*> [#uses=1]
- %3 = call arm_apcscc i8* @read_sleb128(i8* %scevgep.i, i32* undef) ; <i8*> [#uses=0]
- unreachable
-
-bb13: ; preds = %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll
deleted file mode 100644
index ef076a4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mattr=+vfp2,+thumb2 | FileCheck %s
-; rdar://7076238
-
-@"\01LC" = external constant [36 x i8], align 1 ; <[36 x i8]*> [#uses=1]
-
-define arm_apcscc i32 @t(i32, ...) nounwind {
-entry:
-; CHECK: t:
-; CHECK: add r7, sp, #12
- %1 = load i8** undef, align 4 ; <i8*> [#uses=3]
- %2 = getelementptr i8* %1, i32 4 ; <i8*> [#uses=1]
- %3 = getelementptr i8* %1, i32 8 ; <i8*> [#uses=1]
- %4 = bitcast i8* %2 to i32* ; <i32*> [#uses=1]
- %5 = load i32* %4, align 4 ; <i32> [#uses=1]
- %6 = trunc i32 %5 to i8 ; <i8> [#uses=1]
- %7 = getelementptr i8* %1, i32 12 ; <i8*> [#uses=1]
- %8 = bitcast i8* %3 to i32* ; <i32*> [#uses=1]
- %9 = load i32* %8, align 4 ; <i32> [#uses=1]
- %10 = trunc i32 %9 to i16 ; <i16> [#uses=1]
- %11 = bitcast i8* %7 to i32* ; <i32*> [#uses=1]
- %12 = load i32* %11, align 4 ; <i32> [#uses=1]
- %13 = trunc i32 %12 to i16 ; <i16> [#uses=1]
- %14 = load i32* undef, align 4 ; <i32> [#uses=2]
- %15 = sext i8 %6 to i32 ; <i32> [#uses=2]
- %16 = sext i16 %10 to i32 ; <i32> [#uses=2]
- %17 = sext i16 %13 to i32 ; <i32> [#uses=2]
- %18 = call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([36 x i8]* @"\01LC", i32 0, i32 0), i32 -128, i32 0, i32 %15, i32 %16, i32 %17, i32 0, i32 %14) nounwind ; <i32> [#uses=0]
- %19 = add i32 0, %15 ; <i32> [#uses=1]
- %20 = add i32 %19, %16 ; <i32> [#uses=1]
- %21 = add i32 %20, %14 ; <i32> [#uses=1]
- %22 = add i32 %21, %17 ; <i32> [#uses=1]
- %23 = add i32 %22, 0 ; <i32> [#uses=1]
- ret i32 %23
-}
-
-declare arm_apcscc i32 @printf(i8* nocapture, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-23-CPIslandBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-23-CPIslandBug.ll
deleted file mode 100644
index 4d21f9b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-23-CPIslandBug.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mattr=+vfp2,+thumb2
-; rdar://7083961
-
-define arm_apcscc i32 @value(i64 %b1, i64 %b2) nounwind readonly {
-entry:
- %0 = icmp eq i32 undef, 0 ; <i1> [#uses=1]
- %mod.0.ph.ph = select i1 %0, float -1.000000e+00, float 1.000000e+00 ; <float> [#uses=1]
- br label %bb7
-
-bb7: ; preds = %bb7, %entry
- br i1 undef, label %bb86.preheader, label %bb7
-
-bb86.preheader: ; preds = %bb7
- %1 = fmul float %mod.0.ph.ph, 5.000000e+00 ; <float> [#uses=0]
- br label %bb79
-
-bb79: ; preds = %bb79, %bb86.preheader
- br i1 undef, label %bb119, label %bb79
-
-bb119: ; preds = %bb79
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
deleted file mode 100644
index f74d12e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
+++ /dev/null
@@ -1,193 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim
-
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.JHUFF_TBL = type { [17 x i8], [256 x i8], i32 }
- %struct.JQUANT_TBL = type { [64 x i16], i32 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct.anon = type { [8 x i32], [48 x i8] }
- %struct.backing_store_info = type { void (%struct.jpeg_common_struct*, %struct.backing_store_info*, i8*, i32, i32)*, void (%struct.jpeg_common_struct*, %struct.backing_store_info*, i8*, i32, i32)*, void (%struct.jpeg_common_struct*, %struct.backing_store_info*)*, %struct.FILE*, [64 x i8] }
- %struct.jpeg_color_deconverter = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i8***, i32, i8**, i32)* }
- %struct.jpeg_color_quantizer = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8**, i8**, i32)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)* }
- %struct.jpeg_common_struct = type { %struct.jpeg_error_mgr*, %struct.jpeg_memory_mgr*, %struct.jpeg_progress_mgr*, i32, i32 }
- %struct.jpeg_component_info = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.JQUANT_TBL*, i8* }
- %struct.jpeg_d_coef_controller = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*, i8***)*, %struct.jvirt_barray_control** }
- %struct.jpeg_d_main_controller = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8**, i32*, i32)* }
- %struct.jpeg_d_post_controller = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8***, i32*, i32, i8**, i32*, i32)* }
- %struct.jpeg_decomp_master = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32 }
- %struct.jpeg_decompress_struct = type { %struct.jpeg_error_mgr*, %struct.jpeg_memory_mgr*, %struct.jpeg_progress_mgr*, i32, i32, %struct.jpeg_source_mgr*, i32, i32, i32, i32, i32, i32, i32, double, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8**, i32, i32, i32, i32, i32, [64 x i32]*, [4 x %struct.JQUANT_TBL*], [4 x %struct.JHUFF_TBL*], [4 x %struct.JHUFF_TBL*], i32, %struct.jpeg_component_info*, i32, i32, [16 x i8], [16 x i8], [16 x i8], i32, i32, i8, i16, i16, i32, i8, i32, i32, i32, i32, i32, i8*, i32, [4 x %struct.jpeg_component_info*], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, %struct.jpeg_decomp_master*, %struct.jpeg_d_main_controller*, %struct.jpeg_d_coef_controller*, %struct.jpeg_d_post_controller*, %struct.jpeg_input_controller*, %struct.jpeg_marker_reader*, %struct.jpeg_entropy_decoder*, %struct.jpeg_inverse_dct*, %struct.jpeg_upsampler*, %struct.jpeg_color_deconverter*, %struct.jpeg_color_quantizer* }
- %struct.jpeg_entropy_decoder = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*, [64 x i16]**)* }
- %struct.jpeg_error_mgr = type { void (%struct.jpeg_common_struct*)*, void (%struct.jpeg_common_struct*, i32)*, void (%struct.jpeg_common_struct*)*, void (%struct.jpeg_common_struct*, i8*)*, void (%struct.jpeg_common_struct*)*, i32, %struct.anon, i32, i32, i8**, i32, i8**, i32, i32 }
- %struct.jpeg_input_controller = type { i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32, i32 }
- %struct.jpeg_inverse_dct = type { void (%struct.jpeg_decompress_struct*)*, [10 x void (%struct.jpeg_decompress_struct*, %struct.jpeg_component_info*, i16*, i8**, i32)*] }
- %struct.jpeg_marker_reader = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, [16 x i32 (%struct.jpeg_decompress_struct*)*], i32, i32, i32, i32 }
- %struct.jpeg_memory_mgr = type { i8* (%struct.jpeg_common_struct*, i32, i32)*, i8* (%struct.jpeg_common_struct*, i32, i32)*, i8** (%struct.jpeg_common_struct*, i32, i32, i32)*, [64 x i16]** (%struct.jpeg_common_struct*, i32, i32, i32)*, %struct.jvirt_sarray_control* (%struct.jpeg_common_struct*, i32, i32, i32, i32, i32)*, %struct.jvirt_barray_control* (%struct.jpeg_common_struct*, i32, i32, i32, i32, i32)*, void (%struct.jpeg_common_struct*)*, i8** (%struct.jpeg_common_struct*, %struct.jvirt_sarray_control*, i32, i32, i32)*, [64 x i16]** (%struct.jpeg_common_struct*, %struct.jvirt_barray_control*, i32, i32, i32)*, void (%struct.jpeg_common_struct*, i32)*, void (%struct.jpeg_common_struct*)*, i32 }
- %struct.jpeg_progress_mgr = type { void (%struct.jpeg_common_struct*)*, i32, i32, i32, i32 }
- %struct.jpeg_source_mgr = type { i8*, i32, void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i32)*, i32 (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*)* }
- %struct.jpeg_upsampler = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i8***, i32*, i32, i8**, i32*, i32)*, i32 }
- %struct.jvirt_barray_control = type { [64 x i16]**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_barray_control*, %struct.backing_store_info }
- %struct.jvirt_sarray_control = type { i8**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_sarray_control*, %struct.backing_store_info }
-
-define arm_apcscc void @jpeg_idct_float(%struct.jpeg_decompress_struct* nocapture %cinfo, %struct.jpeg_component_info* nocapture %compptr, i16* nocapture %coef_block, i8** nocapture %output_buf, i32 %output_col) nounwind {
-entry:
- %workspace = alloca [64 x float], align 4 ; <[64 x float]*> [#uses=11]
- %0 = load i8** undef, align 4 ; <i8*> [#uses=5]
- br label %bb
-
-bb: ; preds = %bb, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=11]
- %tmp39 = add i32 %indvar, 8 ; <i32> [#uses=0]
- %tmp41 = add i32 %indvar, 16 ; <i32> [#uses=2]
- %scevgep42 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp41 ; <float*> [#uses=1]
- %tmp43 = add i32 %indvar, 24 ; <i32> [#uses=1]
- %scevgep44 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp43 ; <float*> [#uses=1]
- %tmp45 = add i32 %indvar, 32 ; <i32> [#uses=1]
- %scevgep46 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp45 ; <float*> [#uses=1]
- %tmp47 = add i32 %indvar, 40 ; <i32> [#uses=1]
- %scevgep48 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp47 ; <float*> [#uses=1]
- %tmp49 = add i32 %indvar, 48 ; <i32> [#uses=1]
- %scevgep50 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp49 ; <float*> [#uses=1]
- %tmp51 = add i32 %indvar, 56 ; <i32> [#uses=1]
- %scevgep52 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp51 ; <float*> [#uses=1]
- %wsptr.119 = getelementptr [64 x float]* %workspace, i32 0, i32 %indvar ; <float*> [#uses=1]
- %tmp54 = shl i32 %indvar, 2 ; <i32> [#uses=1]
- %scevgep76 = getelementptr i8* undef, i32 %tmp54 ; <i8*> [#uses=1]
- %quantptr.118 = bitcast i8* %scevgep76 to float* ; <float*> [#uses=1]
- %scevgep79 = getelementptr i16* %coef_block, i32 %tmp41 ; <i16*> [#uses=0]
- %inptr.117 = getelementptr i16* %coef_block, i32 %indvar ; <i16*> [#uses=1]
- %1 = load i16* null, align 2 ; <i16> [#uses=1]
- %2 = load i16* undef, align 2 ; <i16> [#uses=1]
- %3 = load i16* %inptr.117, align 2 ; <i16> [#uses=1]
- %4 = sitofp i16 %3 to float ; <float> [#uses=1]
- %5 = load float* %quantptr.118, align 4 ; <float> [#uses=1]
- %6 = fmul float %4, %5 ; <float> [#uses=1]
- %7 = fsub float %6, undef ; <float> [#uses=2]
- %8 = fmul float undef, 0x3FF6A09E60000000 ; <float> [#uses=1]
- %9 = fsub float %8, 0.000000e+00 ; <float> [#uses=2]
- %10 = fadd float undef, 0.000000e+00 ; <float> [#uses=2]
- %11 = fadd float %7, %9 ; <float> [#uses=2]
- %12 = fsub float %7, %9 ; <float> [#uses=2]
- %13 = sitofp i16 %1 to float ; <float> [#uses=1]
- %14 = fmul float %13, undef ; <float> [#uses=2]
- %15 = sitofp i16 %2 to float ; <float> [#uses=1]
- %16 = load float* undef, align 4 ; <float> [#uses=1]
- %17 = fmul float %15, %16 ; <float> [#uses=1]
- %18 = fadd float %14, undef ; <float> [#uses=2]
- %19 = fsub float %14, undef ; <float> [#uses=2]
- %20 = fadd float undef, %17 ; <float> [#uses=2]
- %21 = fadd float %20, %18 ; <float> [#uses=3]
- %22 = fsub float %20, %18 ; <float> [#uses=1]
- %23 = fmul float %22, 0x3FF6A09E60000000 ; <float> [#uses=1]
- %24 = fadd float %19, undef ; <float> [#uses=1]
- %25 = fmul float %24, 0x3FFD906BC0000000 ; <float> [#uses=2]
- %26 = fmul float undef, 0x3FF1517A80000000 ; <float> [#uses=1]
- %27 = fsub float %26, %25 ; <float> [#uses=1]
- %28 = fmul float %19, 0xC004E7AEA0000000 ; <float> [#uses=1]
- %29 = fadd float %28, %25 ; <float> [#uses=1]
- %30 = fsub float %29, %21 ; <float> [#uses=3]
- %31 = fsub float %23, %30 ; <float> [#uses=3]
- %32 = fadd float %27, %31 ; <float> [#uses=1]
- %33 = fadd float %10, %21 ; <float> [#uses=1]
- store float %33, float* %wsptr.119, align 4
- %34 = fsub float %10, %21 ; <float> [#uses=1]
- store float %34, float* %scevgep52, align 4
- %35 = fadd float %11, %30 ; <float> [#uses=1]
- store float %35, float* null, align 4
- %36 = fsub float %11, %30 ; <float> [#uses=1]
- store float %36, float* %scevgep50, align 4
- %37 = fadd float %12, %31 ; <float> [#uses=1]
- store float %37, float* %scevgep42, align 4
- %38 = fsub float %12, %31 ; <float> [#uses=1]
- store float %38, float* %scevgep48, align 4
- %39 = fadd float undef, %32 ; <float> [#uses=1]
- store float %39, float* %scevgep46, align 4
- store float undef, float* %scevgep44, align 4
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 undef, label %bb6, label %bb
-
-bb6: ; preds = %bb
- %.sum10 = add i32 %output_col, 1 ; <i32> [#uses=1]
- %.sum8 = add i32 %output_col, 6 ; <i32> [#uses=1]
- %.sum6 = add i32 %output_col, 2 ; <i32> [#uses=1]
- %.sum = add i32 %output_col, 3 ; <i32> [#uses=1]
- br label %bb8
-
-bb8: ; preds = %bb8, %bb6
- %ctr.116 = phi i32 [ 0, %bb6 ], [ %88, %bb8 ] ; <i32> [#uses=3]
- %scevgep = getelementptr i8** %output_buf, i32 %ctr.116 ; <i8**> [#uses=1]
- %tmp = shl i32 %ctr.116, 3 ; <i32> [#uses=5]
- %tmp2392 = or i32 %tmp, 4 ; <i32> [#uses=1]
- %scevgep24 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp2392 ; <float*> [#uses=1]
- %tmp2591 = or i32 %tmp, 2 ; <i32> [#uses=1]
- %scevgep26 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp2591 ; <float*> [#uses=1]
- %tmp2790 = or i32 %tmp, 6 ; <i32> [#uses=1]
- %scevgep28 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp2790 ; <float*> [#uses=1]
- %tmp3586 = or i32 %tmp, 7 ; <i32> [#uses=0]
- %wsptr.215 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp ; <float*> [#uses=1]
- %40 = load i8** %scevgep, align 4 ; <i8*> [#uses=4]
- %41 = load float* %wsptr.215, align 4 ; <float> [#uses=1]
- %42 = load float* %scevgep24, align 4 ; <float> [#uses=1]
- %43 = fadd float %41, %42 ; <float> [#uses=1]
- %44 = load float* %scevgep26, align 4 ; <float> [#uses=1]
- %45 = load float* %scevgep28, align 4 ; <float> [#uses=1]
- %46 = fadd float %44, %45 ; <float> [#uses=1]
- %47 = fsub float %43, %46 ; <float> [#uses=2]
- %48 = fsub float undef, 0.000000e+00 ; <float> [#uses=1]
- %49 = fadd float 0.000000e+00, undef ; <float> [#uses=1]
- %50 = fptosi float %49 to i32 ; <i32> [#uses=1]
- %51 = add i32 %50, 4 ; <i32> [#uses=1]
- %52 = lshr i32 %51, 3 ; <i32> [#uses=1]
- %53 = and i32 %52, 1023 ; <i32> [#uses=1]
- %.sum14 = add i32 %53, 128 ; <i32> [#uses=1]
- %54 = getelementptr i8* %0, i32 %.sum14 ; <i8*> [#uses=1]
- %55 = load i8* %54, align 1 ; <i8> [#uses=1]
- store i8 %55, i8* null, align 1
- %56 = getelementptr i8* %40, i32 %.sum10 ; <i8*> [#uses=1]
- store i8 0, i8* %56, align 1
- %57 = load i8* null, align 1 ; <i8> [#uses=1]
- %58 = getelementptr i8* %40, i32 %.sum8 ; <i8*> [#uses=1]
- store i8 %57, i8* %58, align 1
- %59 = fadd float undef, %48 ; <float> [#uses=1]
- %60 = fptosi float %59 to i32 ; <i32> [#uses=1]
- %61 = add i32 %60, 4 ; <i32> [#uses=1]
- %62 = lshr i32 %61, 3 ; <i32> [#uses=1]
- %63 = and i32 %62, 1023 ; <i32> [#uses=1]
- %.sum7 = add i32 %63, 128 ; <i32> [#uses=1]
- %64 = getelementptr i8* %0, i32 %.sum7 ; <i8*> [#uses=1]
- %65 = load i8* %64, align 1 ; <i8> [#uses=1]
- %66 = getelementptr i8* %40, i32 %.sum6 ; <i8*> [#uses=1]
- store i8 %65, i8* %66, align 1
- %67 = fptosi float undef to i32 ; <i32> [#uses=1]
- %68 = add i32 %67, 4 ; <i32> [#uses=1]
- %69 = lshr i32 %68, 3 ; <i32> [#uses=1]
- %70 = and i32 %69, 1023 ; <i32> [#uses=1]
- %.sum5 = add i32 %70, 128 ; <i32> [#uses=1]
- %71 = getelementptr i8* %0, i32 %.sum5 ; <i8*> [#uses=1]
- %72 = load i8* %71, align 1 ; <i8> [#uses=1]
- store i8 %72, i8* undef, align 1
- %73 = fadd float %47, undef ; <float> [#uses=1]
- %74 = fptosi float %73 to i32 ; <i32> [#uses=1]
- %75 = add i32 %74, 4 ; <i32> [#uses=1]
- %76 = lshr i32 %75, 3 ; <i32> [#uses=1]
- %77 = and i32 %76, 1023 ; <i32> [#uses=1]
- %.sum3 = add i32 %77, 128 ; <i32> [#uses=1]
- %78 = getelementptr i8* %0, i32 %.sum3 ; <i8*> [#uses=1]
- %79 = load i8* %78, align 1 ; <i8> [#uses=1]
- store i8 %79, i8* undef, align 1
- %80 = fsub float %47, undef ; <float> [#uses=1]
- %81 = fptosi float %80 to i32 ; <i32> [#uses=1]
- %82 = add i32 %81, 4 ; <i32> [#uses=1]
- %83 = lshr i32 %82, 3 ; <i32> [#uses=1]
- %84 = and i32 %83, 1023 ; <i32> [#uses=1]
- %.sum1 = add i32 %84, 128 ; <i32> [#uses=1]
- %85 = getelementptr i8* %0, i32 %.sum1 ; <i8*> [#uses=1]
- %86 = load i8* %85, align 1 ; <i8> [#uses=1]
- %87 = getelementptr i8* %40, i32 %.sum ; <i8*> [#uses=1]
- store i8 %86, i8* %87, align 1
- %88 = add i32 %ctr.116, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %88, 8 ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb8
-
-return: ; preds = %bb8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
deleted file mode 100644
index a8e86d5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s
-
- at csize = external global [100 x [20 x [4 x i8]]] ; <[100 x [20 x [4 x i8]]]*> [#uses=1]
- at vsize = external global [100 x [20 x [4 x i8]]] ; <[100 x [20 x [4 x i8]]]*> [#uses=1]
- at cll = external global [20 x [10 x i8]] ; <[20 x [10 x i8]]*> [#uses=1]
- at lefline = external global [100 x [20 x i32]] ; <[100 x [20 x i32]]*> [#uses=1]
- at sep = external global [20 x i32] ; <[20 x i32]*> [#uses=1]
-
-define arm_apcscc void @main(i32 %argc, i8** %argv) noreturn nounwind {
-; CHECK: main:
-; CHECK: ldrb
-entry:
- %nb.i.i.i = alloca [25 x i8], align 1 ; <[25 x i8]*> [#uses=0]
- %line.i.i.i = alloca [200 x i8], align 1 ; <[200 x i8]*> [#uses=1]
- %line.i = alloca [1024 x i8], align 1 ; <[1024 x i8]*> [#uses=0]
- br i1 undef, label %bb.i.i, label %bb4.preheader.i
-
-bb.i.i: ; preds = %entry
- unreachable
-
-bb4.preheader.i: ; preds = %entry
- br i1 undef, label %tbl.exit, label %bb.i.preheader
-
-bb.i.preheader: ; preds = %bb4.preheader.i
- %line3.i.i.i = getelementptr [200 x i8]* %line.i.i.i, i32 0, i32 0 ; <i8*> [#uses=1]
- br label %bb.i
-
-bb.i: ; preds = %bb4.backedge.i, %bb.i.preheader
- br i1 undef, label %bb3.i, label %bb4.backedge.i
-
-bb3.i: ; preds = %bb.i
- br i1 undef, label %bb2.i184.i.i, label %bb.i183.i.i
-
-bb.i183.i.i: ; preds = %bb.i183.i.i, %bb3.i
- br i1 undef, label %bb2.i184.i.i, label %bb.i183.i.i
-
-bb2.i184.i.i: ; preds = %bb.i183.i.i, %bb3.i
- br i1 undef, label %bb5.i185.i.i, label %bb35.preheader.i.i.i
-
-bb35.preheader.i.i.i: ; preds = %bb2.i184.i.i
- %0 = load i8* %line3.i.i.i, align 1 ; <i8> [#uses=1]
- %1 = icmp eq i8 %0, 59 ; <i1> [#uses=1]
- br i1 %1, label %bb36.i.i.i, label %bb9.i186.i.i
-
-bb5.i185.i.i: ; preds = %bb2.i184.i.i
- br label %bb.i171.i.i
-
-bb9.i186.i.i: ; preds = %bb35.preheader.i.i.i
- unreachable
-
-bb36.i.i.i: ; preds = %bb35.preheader.i.i.i
- br label %bb.i171.i.i
-
-bb.i171.i.i: ; preds = %bb3.i176.i.i, %bb36.i.i.i, %bb5.i185.i.i
- %2 = phi i32 [ %4, %bb3.i176.i.i ], [ 0, %bb36.i.i.i ], [ 0, %bb5.i185.i.i ] ; <i32> [#uses=6]
- %scevgep16.i.i.i = getelementptr [20 x i32]* @sep, i32 0, i32 %2 ; <i32*> [#uses=1]
- %scevgep18.i.i.i = getelementptr [20 x [10 x i8]]* @cll, i32 0, i32 %2, i32 0 ; <i8*> [#uses=0]
- store i32 -1, i32* %scevgep16.i.i.i, align 4
- br label %bb1.i175.i.i
-
-bb1.i175.i.i: ; preds = %bb1.i175.i.i, %bb.i171.i.i
- %i.03.i172.i.i = phi i32 [ 0, %bb.i171.i.i ], [ %3, %bb1.i175.i.i ] ; <i32> [#uses=4]
- %scevgep11.i.i.i = getelementptr [100 x [20 x i32]]* @lefline, i32 0, i32 %i.03.i172.i.i, i32 %2 ; <i32*> [#uses=1]
- %scevgep12.i.i.i = getelementptr [100 x [20 x [4 x i8]]]* @vsize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0 ; <i8*> [#uses=1]
- %scevgep13.i.i.i = getelementptr [100 x [20 x [4 x i8]]]* @csize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0 ; <i8*> [#uses=0]
- store i8 0, i8* %scevgep12.i.i.i, align 1
- store i32 0, i32* %scevgep11.i.i.i, align 4
- store i32 108, i32* undef, align 4
- %3 = add i32 %i.03.i172.i.i, 1 ; <i32> [#uses=2]
- %exitcond.i174.i.i = icmp eq i32 %3, 100 ; <i1> [#uses=1]
- br i1 %exitcond.i174.i.i, label %bb3.i176.i.i, label %bb1.i175.i.i
-
-bb3.i176.i.i: ; preds = %bb1.i175.i.i
- %4 = add i32 %2, 1 ; <i32> [#uses=1]
- br i1 undef, label %bb5.i177.i.i, label %bb.i171.i.i
-
-bb5.i177.i.i: ; preds = %bb3.i176.i.i
- unreachable
-
-bb4.backedge.i: ; preds = %bb.i
- br i1 undef, label %tbl.exit, label %bb.i
-
-tbl.exit: ; preds = %bb4.backedge.i, %bb4.preheader.i
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
deleted file mode 100644
index 6cbfd0d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim
-
- type { void (%"struct.xalanc_1_8::FormatterToXML"*, i16)*, i32 } ; type %0
- type { void (%"struct.xalanc_1_8::FormatterToXML"*, i16*)*, i32 } ; type %1
- type { void (%"struct.xalanc_1_8::FormatterToXML"*, %"struct.xalanc_1_8::XalanDOMString"*)*, i32 } ; type %2
- type { void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32, i32)*, i32 } ; type %3
- type { void (%"struct.xalanc_1_8::FormatterToXML"*)*, i32 } ; type %4
- %"struct.std::CharVectorType" = type { %"struct.std::_Vector_base<char,std::allocator<char> >" }
- %"struct.std::_Bit_const_iterator" = type { %"struct.std::_Bit_iterator_base" }
- %"struct.std::_Bit_iterator_base" = type { i32*, i32 }
- %"struct.std::_Bvector_base<std::allocator<bool> >" = type { %"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" }
- %"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" = type { %"struct.std::_Bit_const_iterator", %"struct.std::_Bit_const_iterator", i32* }
- %"struct.std::_Vector_base<char,std::allocator<char> >" = type { %"struct.std::_Vector_base<char,std::allocator<char> >::_Vector_impl" }
- %"struct.std::_Vector_base<char,std::allocator<char> >::_Vector_impl" = type { i8*, i8*, i8* }
- %"struct.std::_Vector_base<short unsigned int,std::allocator<short unsigned int> >" = type { %"struct.std::_Vector_base<short unsigned int,std::allocator<short unsigned int> >::_Vector_impl" }
- %"struct.std::_Vector_base<short unsigned int,std::allocator<short unsigned int> >::_Vector_impl" = type { i16*, i16*, i16* }
- %"struct.std::basic_ostream<char,std::char_traits<char> >.base" = type { i32 (...)** }
- %"struct.std::vector<bool,std::allocator<bool> >" = type { %"struct.std::_Bvector_base<std::allocator<bool> >" }
- %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >" = type { %"struct.std::_Vector_base<short unsigned int,std::allocator<short unsigned int> >" }
- %"struct.xalanc_1_8::FormatterListener" = type { %"struct.std::basic_ostream<char,std::char_traits<char> >.base", %"struct.std::basic_ostream<char,std::char_traits<char> >.base"*, i32 }
- %"struct.xalanc_1_8::FormatterToXML" = type { %"struct.xalanc_1_8::FormatterListener", %"struct.std::basic_ostream<char,std::char_traits<char> >.base"*, %"struct.xalanc_1_8::XalanOutputStream"*, i16, [256 x i16], [256 x i16], i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", i32, i32, %"struct.std::vector<bool,std::allocator<bool> >", %"struct.xalanc_1_8::XalanDOMString", i8, i8, i8, i8, i8, %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.xalanc_1_8::XalanDOMString", %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", i32, %"struct.std::CharVectorType", %"struct.std::vector<bool,std::allocator<bool> >", %0, %1, %2, %3, %0, %1, %2, %3, %4, i16*, i32 }
- %"struct.xalanc_1_8::XalanDOMString" = type { %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", i32 }
- %"struct.xalanc_1_8::XalanOutputStream" = type { i32 (...)**, i32, %"struct.std::basic_ostream<char,std::char_traits<char> >.base"*, i32, %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", %"struct.xalanc_1_8::XalanDOMString", i8, i8, %"struct.std::CharVectorType" }
-
-declare arm_apcscc void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(%"struct.xalanc_1_8::FormatterToXML"*)
-
-define arm_apcscc void @_ZN10xalanc_1_814FormatterToXML5cdataEPKtj(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length) {
-entry:
- %0 = getelementptr %"struct.xalanc_1_8::FormatterToXML"* %this, i32 0, i32 13 ; <i8*> [#uses=1]
- br i1 undef, label %bb4, label %bb
-
-bb: ; preds = %entry
- store i8 0, i8* %0, align 1
- %1 = getelementptr %"struct.xalanc_1_8::FormatterToXML"* %this, i32 0, i32 0, i32 0, i32 0 ; <i32 (...)***> [#uses=1]
- %2 = load i32 (...)*** %1, align 4 ; <i32 (...)**> [#uses=1]
- %3 = getelementptr i32 (...)** %2, i32 11 ; <i32 (...)**> [#uses=1]
- %4 = load i32 (...)** %3, align 4 ; <i32 (...)*> [#uses=1]
- %5 = bitcast i32 (...)* %4 to void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32)* ; <void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32)*> [#uses=1]
- tail call arm_apcscc void %5(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length)
- ret void
-
-bb4: ; preds = %entry
- tail call arm_apcscc void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(%"struct.xalanc_1_8::FormatterToXML"* %this)
- tail call arm_apcscc void undef(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 0, i32 %length, i8 zeroext undef)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll
deleted file mode 100644
index ebe9d46..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabi
-; PR4681
-
- %struct.FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct.FILE*, i32, i32, i32, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i32, i32, [40 x i8] }
- %struct._IO_marker = type { %struct._IO_marker*, %struct.FILE*, i32 }
- at .str2 = external constant [30 x i8], align 1 ; <[30 x i8]*> [#uses=1]
-
-define arm_aapcscc i32 @__mf_heuristic_check(i32 %ptr, i32 %ptr_high) nounwind {
-entry:
- br i1 undef, label %bb1, label %bb
-
-bb: ; preds = %entry
- unreachable
-
-bb1: ; preds = %entry
- br i1 undef, label %bb9, label %bb2
-
-bb2: ; preds = %bb1
- %0 = call i8* @llvm.frameaddress(i32 0) ; <i8*> [#uses=1]
- %1 = call arm_aapcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* noalias undef, i8* noalias getelementptr ([30 x i8]* @.str2, i32 0, i32 0), i8* %0, i8* null) nounwind ; <i32> [#uses=0]
- unreachable
-
-bb9: ; preds = %bb1
- ret i32 undef
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
-
-declare arm_aapcscc i32 @fprintf(%struct.FILE* noalias nocapture, i8* noalias nocapture, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
deleted file mode 100644
index 319d29b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
+++ /dev/null
@@ -1,153 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+neon -arm-use-neon-fp -relocation-model=pic -disable-fp-elim
-
- type { %struct.GAP } ; type %0
- type { i16, i8, i8 } ; type %1
- type { [2 x i32], [2 x i32] } ; type %2
- type { %struct.rec* } ; type %3
- type { i8, i8, i16, i8, i8, i8, i8 } ; type %4
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.FILE_POS = type { i8, i8, i16, i32 }
- %struct.FIRST_UNION = type { %struct.FILE_POS }
- %struct.FOURTH_UNION = type { %struct.STYLE }
- %struct.GAP = type { i8, i8, i16 }
- %struct.LIST = type { %struct.rec*, %struct.rec* }
- %struct.SECOND_UNION = type { %1 }
- %struct.STYLE = type { %0, %0, i16, i16, i32 }
- %struct.THIRD_UNION = type { %2 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, %struct.rec*, %3, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i32 }
- %struct.rec = type { %struct.head_type }
- at .str24239 = external constant [20 x i8], align 1 ; <[20 x i8]*> [#uses=1]
- at no_file_pos = external global %4 ; <%4*> [#uses=1]
- at zz_tmp = external global %struct.rec* ; <%struct.rec**> [#uses=1]
- at .str81872 = external constant [10 x i8], align 1 ; <[10 x i8]*> [#uses=1]
- at out_fp = external global %struct.FILE* ; <%struct.FILE**> [#uses=2]
- at cpexists = external global i32 ; <i32*> [#uses=2]
- at .str212784 = external constant [17 x i8], align 1 ; <[17 x i8]*> [#uses=1]
- at .str1822946 = external constant [8 x i8], align 1 ; <[8 x i8]*> [#uses=1]
- at .str1842948 = external constant [11 x i8], align 1 ; <[11 x i8]*> [#uses=1]
-
-declare arm_apcscc i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
-
-declare arm_apcscc i32 @"\01_fwrite"(i8*, i32, i32, i8*)
-
-declare arm_apcscc %struct.FILE* @OpenIncGraphicFile(i8*, i8 zeroext, %struct.rec** nocapture, %struct.FILE_POS*, i32* nocapture) nounwind
-
-declare arm_apcscc void @Error(i32, i32, i8*, i32, %struct.FILE_POS*, ...) nounwind
-
-declare arm_apcscc i8* @fgets(i8*, i32, %struct.FILE* nocapture) nounwind
-
-define arm_apcscc void @PS_PrintGraphicInclude(%struct.rec* %x, i32 %colmark, i32 %rowmark) nounwind {
-entry:
- br label %bb5
-
-bb5: ; preds = %bb5, %entry
- %.pn = phi %struct.rec* [ %y.0, %bb5 ], [ undef, %entry ] ; <%struct.rec*> [#uses=1]
- %y.0.in = getelementptr %struct.rec* %.pn, i32 0, i32 0, i32 0, i32 1, i32 0 ; <%struct.rec**> [#uses=1]
- %y.0 = load %struct.rec** %y.0.in ; <%struct.rec*> [#uses=2]
- br i1 undef, label %bb5, label %bb6
-
-bb6: ; preds = %bb5
- %0 = call arm_apcscc %struct.FILE* @OpenIncGraphicFile(i8* undef, i8 zeroext 0, %struct.rec** undef, %struct.FILE_POS* null, i32* undef) nounwind ; <%struct.FILE*> [#uses=1]
- br i1 false, label %bb.i, label %FontHalfXHeight.exit
-
-bb.i: ; preds = %bb6
- br label %FontHalfXHeight.exit
-
-FontHalfXHeight.exit: ; preds = %bb.i, %bb6
- br i1 undef, label %bb.i1, label %FontSize.exit
-
-bb.i1: ; preds = %FontHalfXHeight.exit
- br label %FontSize.exit
-
-FontSize.exit: ; preds = %bb.i1, %FontHalfXHeight.exit
- %1 = load i32* undef, align 4 ; <i32> [#uses=1]
- %2 = icmp ult i32 0, undef ; <i1> [#uses=1]
- br i1 %2, label %bb.i5, label %FontName.exit
-
-bb.i5: ; preds = %FontSize.exit
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([10 x i8]* @.str81872, i32 0, i32 0)) nounwind
- br label %FontName.exit
-
-FontName.exit: ; preds = %bb.i5, %FontSize.exit
- %3 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([8 x i8]* @.str1822946, i32 0, i32 0), i32 %1, i8* undef) nounwind ; <i32> [#uses=0]
- %4 = call arm_apcscc i32 @"\01_fwrite"(i8* getelementptr ([11 x i8]* @.str1842948, i32 0, i32 0), i32 1, i32 10, i8* undef) nounwind ; <i32> [#uses=0]
- %5 = sub i32 %colmark, undef ; <i32> [#uses=1]
- %6 = sub i32 %rowmark, undef ; <i32> [#uses=1]
- %7 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %8 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %7, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 %5, i32 %6) nounwind ; <i32> [#uses=0]
- store i32 0, i32* @cpexists, align 4
- %9 = getelementptr %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 1 ; <i32*> [#uses=1]
- %10 = load i32* %9, align 4 ; <i32> [#uses=1]
- %11 = sub i32 0, %10 ; <i32> [#uses=1]
- %12 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %13 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %12, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 undef, i32 %11) nounwind ; <i32> [#uses=0]
- store i32 0, i32* @cpexists, align 4
- br label %bb100.outer.outer
-
-bb100.outer.outer: ; preds = %bb79.critedge, %bb1.i3, %FontName.exit
- %x_addr.0.ph.ph = phi %struct.rec* [ %x, %FontName.exit ], [ null, %bb79.critedge ], [ null, %bb1.i3 ] ; <%struct.rec*> [#uses=1]
- %14 = getelementptr %struct.rec* %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=0]
- br label %bb100.outer
-
-bb.i80: ; preds = %bb3.i85
- br i1 undef, label %bb2.i84, label %bb2.i51
-
-bb2.i84: ; preds = %bb100.outer, %bb.i80
- br i1 undef, label %bb3.i77, label %bb3.i85
-
-bb3.i85: ; preds = %bb2.i84
- br i1 false, label %StringBeginsWith.exit88, label %bb.i80
-
-StringBeginsWith.exit88: ; preds = %bb3.i85
- br i1 undef, label %bb3.i77, label %bb2.i51
-
-bb2.i.i68: ; preds = %bb3.i77
- br label %bb3.i77
-
-bb3.i77: ; preds = %bb2.i.i68, %StringBeginsWith.exit88, %bb2.i84
- br i1 false, label %bb1.i58, label %bb2.i.i68
-
-bb1.i58: ; preds = %bb3.i77
- unreachable
-
-bb.i47: ; preds = %bb3.i52
- br i1 undef, label %bb2.i51, label %bb2.i.i15.critedge
-
-bb2.i51: ; preds = %bb.i47, %StringBeginsWith.exit88, %bb.i80
- %15 = load i8* undef, align 1 ; <i8> [#uses=0]
- br i1 false, label %StringBeginsWith.exit55thread-split, label %bb3.i52
-
-bb3.i52: ; preds = %bb2.i51
- br i1 false, label %StringBeginsWith.exit55, label %bb.i47
-
-StringBeginsWith.exit55thread-split: ; preds = %bb2.i51
- br label %StringBeginsWith.exit55
-
-StringBeginsWith.exit55: ; preds = %StringBeginsWith.exit55thread-split, %bb3.i52
- br label %bb2.i41
-
-bb2.i41: ; preds = %bb2.i41, %StringBeginsWith.exit55
- br label %bb2.i41
-
-bb2.i.i15.critedge: ; preds = %bb.i47
- %16 = call arm_apcscc i8* @fgets(i8* undef, i32 512, %struct.FILE* %0) nounwind ; <i8*> [#uses=0]
- %iftmp.560.0 = select i1 undef, i32 2, i32 0 ; <i32> [#uses=1]
- br label %bb100.outer
-
-bb2.i8: ; preds = %bb100.outer
- br i1 undef, label %bb1.i3, label %bb79.critedge
-
-bb1.i3: ; preds = %bb2.i8
- br label %bb100.outer.outer
-
-bb79.critedge: ; preds = %bb2.i8
- store %struct.rec* null, %struct.rec** @zz_tmp, align 4
- br label %bb100.outer.outer
-
-bb100.outer: ; preds = %bb2.i.i15.critedge, %bb100.outer.outer
- %state.0.ph = phi i32 [ 0, %bb100.outer.outer ], [ %iftmp.560.0, %bb2.i.i15.critedge ] ; <i32> [#uses=1]
- %cond = icmp eq i32 %state.0.ph, 1 ; <i1> [#uses=1]
- br i1 %cond, label %bb2.i8, label %bb2.i84
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
deleted file mode 100644
index a62b612..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
+++ /dev/null
@@ -1,508 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+neon -arm-use-neon-fp -relocation-model=pic -disable-fp-elim -O3
-
- type { i16, i8, i8 } ; type %0
- type { [2 x i32], [2 x i32] } ; type %1
- type { %struct.GAP } ; type %2
- type { %struct.rec* } ; type %3
- type { i8, i8, i16, i8, i8, i8, i8 } ; type %4
- type { i8, i8, i8, i8 } ; type %5
- %struct.COMPOSITE = type { i8, i16, i16 }
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.FILE_POS = type { i8, i8, i16, i32 }
- %struct.FIRST_UNION = type { %struct.FILE_POS }
- %struct.FONT_INFO = type { %struct.metrics*, i8*, i16*, %struct.COMPOSITE*, i32, %struct.rec*, %struct.rec*, i16, i16, i16*, i8*, i8*, i16* }
- %struct.FOURTH_UNION = type { %struct.STYLE }
- %struct.GAP = type { i8, i8, i16 }
- %struct.LIST = type { %struct.rec*, %struct.rec* }
- %struct.SECOND_UNION = type { %0 }
- %struct.STYLE = type { %2, %2, i16, i16, i32 }
- %struct.THIRD_UNION = type { %1 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, %struct.rec*, %3, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i32 }
- %struct.metrics = type { i16, i16, i16, i16, i16 }
- %struct.rec = type { %struct.head_type }
- at .str24239 = external constant [20 x i8], align 1 ; <[20 x i8]*> [#uses=1]
- at no_file_pos = external global %4 ; <%4*> [#uses=1]
- at .str19294 = external constant [9 x i8], align 1 ; <[9 x i8]*> [#uses=1]
- at zz_lengths = external global [150 x i8] ; <[150 x i8]*> [#uses=1]
- at next_free.4772 = external global i8** ; <i8***> [#uses=3]
- at top_free.4773 = external global i8** ; <i8***> [#uses=2]
- at .str1575 = external constant [32 x i8], align 1 ; <[32 x i8]*> [#uses=1]
- at zz_free = external global [524 x %struct.rec*] ; <[524 x %struct.rec*]*> [#uses=2]
- at zz_hold = external global %struct.rec* ; <%struct.rec**> [#uses=5]
- at zz_tmp = external global %struct.rec* ; <%struct.rec**> [#uses=2]
- at zz_res = external global %struct.rec* ; <%struct.rec**> [#uses=2]
- at xx_link = external global %struct.rec* ; <%struct.rec**> [#uses=2]
- at font_count = external global i32 ; <i32*> [#uses=1]
- at .str81872 = external constant [10 x i8], align 1 ; <[10 x i8]*> [#uses=1]
- at .str101874 = external constant [30 x i8], align 1 ; <[30 x i8]*> [#uses=1]
- at .str111875 = external constant [17 x i8], align 1 ; <[17 x i8]*> [#uses=1]
- at .str141878 = external constant [27 x i8], align 1 ; <[27 x i8]*> [#uses=1]
- at out_fp = external global %struct.FILE* ; <%struct.FILE**> [#uses=3]
- at .str192782 = external constant [17 x i8], align 1 ; <[17 x i8]*> [#uses=1]
- at cpexists = external global i32 ; <i32*> [#uses=2]
- at .str212784 = external constant [17 x i8], align 1 ; <[17 x i8]*> [#uses=1]
- at currentfont = external global i32 ; <i32*> [#uses=3]
- at wordcount = external global i32 ; <i32*> [#uses=1]
- at needs = external global %struct.rec* ; <%struct.rec**> [#uses=1]
- at .str742838 = external constant [6 x i8], align 1 ; <[6 x i8]*> [#uses=1]
- at .str752839 = external constant [10 x i8], align 1 ; <[10 x i8]*> [#uses=1]
- at .str1802944 = external constant [40 x i8], align 1 ; <[40 x i8]*> [#uses=1]
- at .str1822946 = external constant [8 x i8], align 1 ; <[8 x i8]*> [#uses=1]
- at .str1842948 = external constant [11 x i8], align 1 ; <[11 x i8]*> [#uses=1]
- at .str1852949 = external constant [23 x i8], align 1 ; <[23 x i8]*> [#uses=1]
- at .str1872951 = external constant [17 x i8], align 1 ; <[17 x i8]*> [#uses=1]
- at .str1932957 = external constant [26 x i8], align 1 ; <[26 x i8]*> [#uses=1]
-
-declare arm_apcscc i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
-
-declare arm_apcscc i32 @"\01_fwrite"(i8*, i32, i32, i8*)
-
-declare arm_apcscc i32 @remove(i8* nocapture) nounwind
-
-declare arm_apcscc %struct.FILE* @OpenIncGraphicFile(i8*, i8 zeroext, %struct.rec** nocapture, %struct.FILE_POS*, i32* nocapture) nounwind
-
-declare arm_apcscc %struct.rec* @MakeWord(i32, i8* nocapture, %struct.FILE_POS*) nounwind
-
-declare arm_apcscc void @Error(i32, i32, i8*, i32, %struct.FILE_POS*, ...) nounwind
-
-declare arm_apcscc i32 @"\01_fputs"(i8*, %struct.FILE*)
-
-declare arm_apcscc noalias i8* @calloc(i32, i32) nounwind
-
-declare arm_apcscc i8* @fgets(i8*, i32, %struct.FILE* nocapture) nounwind
-
-define arm_apcscc void @PS_PrintGraphicInclude(%struct.rec* %x, i32 %colmark, i32 %rowmark) nounwind {
-entry:
- %buff = alloca [512 x i8], align 4 ; <[512 x i8]*> [#uses=5]
- %0 = getelementptr %struct.rec* %x, i32 0, i32 0, i32 1, i32 0, i32 0 ; <i8*> [#uses=2]
- %1 = load i8* %0, align 4 ; <i8> [#uses=1]
- %2 = add i8 %1, -94 ; <i8> [#uses=1]
- %3 = icmp ugt i8 %2, 1 ; <i1> [#uses=1]
- br i1 %3, label %bb, label %bb1
-
-bb: ; preds = %entry
- br label %bb1
-
-bb1: ; preds = %bb, %entry
- %4 = getelementptr %struct.rec* %x, i32 0, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %5 = bitcast %struct.SECOND_UNION* %4 to %5* ; <%5*> [#uses=1]
- %6 = getelementptr %5* %5, i32 0, i32 1 ; <i8*> [#uses=1]
- %7 = load i8* %6, align 1 ; <i8> [#uses=1]
- %8 = icmp eq i8 %7, 0 ; <i1> [#uses=1]
- br i1 %8, label %bb2, label %bb3
-
-bb2: ; preds = %bb1
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([40 x i8]* @.str1802944, i32 0, i32 0)) nounwind
- br label %bb3
-
-bb3: ; preds = %bb2, %bb1
- %9 = load %struct.rec** undef, align 4 ; <%struct.rec*> [#uses=0]
- br label %bb5
-
-bb5: ; preds = %bb5, %bb3
- %y.0 = load %struct.rec** null ; <%struct.rec*> [#uses=2]
- br i1 false, label %bb5, label %bb6
-
-bb6: ; preds = %bb5
- %10 = load i8* %0, align 4 ; <i8> [#uses=1]
- %11 = getelementptr %struct.rec* %y.0, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=1]
- %12 = call arm_apcscc %struct.FILE* @OpenIncGraphicFile(i8* undef, i8 zeroext %10, %struct.rec** null, %struct.FILE_POS* %11, i32* undef) nounwind ; <%struct.FILE*> [#uses=4]
- br i1 false, label %bb7, label %bb8
-
-bb7: ; preds = %bb6
- unreachable
-
-bb8: ; preds = %bb6
- %13 = and i32 undef, 4095 ; <i32> [#uses=2]
- %14 = load i32* @currentfont, align 4 ; <i32> [#uses=0]
- br i1 false, label %bb10, label %bb9
-
-bb9: ; preds = %bb8
- %15 = icmp ult i32 0, %13 ; <i1> [#uses=1]
- br i1 %15, label %bb.i, label %FontHalfXHeight.exit
-
-bb.i: ; preds = %bb9
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([17 x i8]* @.str111875, i32 0, i32 0)) nounwind
- %.pre186 = load i32* @currentfont, align 4 ; <i32> [#uses=1]
- br label %FontHalfXHeight.exit
-
-FontHalfXHeight.exit: ; preds = %bb.i, %bb9
- %16 = phi i32 [ %.pre186, %bb.i ], [ %13, %bb9 ] ; <i32> [#uses=1]
- br i1 false, label %bb.i1, label %bb1.i
-
-bb.i1: ; preds = %FontHalfXHeight.exit
- br label %bb1.i
-
-bb1.i: ; preds = %bb.i1, %FontHalfXHeight.exit
- br i1 undef, label %bb2.i, label %FontSize.exit
-
-bb2.i: ; preds = %bb1.i
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 37, i32 61, i8* getelementptr ([30 x i8]* @.str101874, i32 0, i32 0), i32 1, %struct.FILE_POS* null) nounwind
- unreachable
-
-FontSize.exit: ; preds = %bb1.i
- %17 = getelementptr %struct.FONT_INFO* undef, i32 %16, i32 5 ; <%struct.rec**> [#uses=0]
- %18 = load i32* undef, align 4 ; <i32> [#uses=1]
- %19 = load i32* @currentfont, align 4 ; <i32> [#uses=2]
- %20 = load i32* @font_count, align 4 ; <i32> [#uses=1]
- %21 = icmp ult i32 %20, %19 ; <i1> [#uses=1]
- br i1 %21, label %bb.i5, label %FontName.exit
-
-bb.i5: ; preds = %FontSize.exit
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([10 x i8]* @.str81872, i32 0, i32 0)) nounwind
- br label %FontName.exit
-
-FontName.exit: ; preds = %bb.i5, %FontSize.exit
- %22 = phi %struct.FONT_INFO* [ undef, %bb.i5 ], [ undef, %FontSize.exit ] ; <%struct.FONT_INFO*> [#uses=1]
- %23 = getelementptr %struct.FONT_INFO* %22, i32 %19, i32 5 ; <%struct.rec**> [#uses=0]
- %24 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([8 x i8]* @.str1822946, i32 0, i32 0), i32 %18, i8* null) nounwind ; <i32> [#uses=0]
- br label %bb10
-
-bb10: ; preds = %FontName.exit, %bb8
- %25 = call arm_apcscc i32 @"\01_fwrite"(i8* getelementptr ([11 x i8]* @.str1842948, i32 0, i32 0), i32 1, i32 10, i8* undef) nounwind ; <i32> [#uses=0]
- %26 = sub i32 %rowmark, undef ; <i32> [#uses=1]
- %27 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %28 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %27, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 undef, i32 %26) nounwind ; <i32> [#uses=0]
- store i32 0, i32* @cpexists, align 4
- %29 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([17 x i8]* @.str192782, i32 0, i32 0), double 2.000000e+01, double 2.000000e+01) nounwind ; <i32> [#uses=0]
- %30 = getelementptr %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
- %31 = load i32* %30, align 4 ; <i32> [#uses=1]
- %32 = sub i32 0, %31 ; <i32> [#uses=1]
- %33 = load i32* undef, align 4 ; <i32> [#uses=1]
- %34 = sub i32 0, %33 ; <i32> [#uses=1]
- %35 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %36 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %35, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 %32, i32 %34) nounwind ; <i32> [#uses=0]
- store i32 0, i32* @cpexists, align 4
- %37 = load %struct.rec** null, align 4 ; <%struct.rec*> [#uses=1]
- %38 = getelementptr %struct.rec* %37, i32 0, i32 0, i32 4 ; <%struct.FOURTH_UNION*> [#uses=1]
- %39 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([23 x i8]* @.str1852949, i32 0, i32 0), %struct.FOURTH_UNION* %38) nounwind ; <i32> [#uses=0]
- %buff14 = getelementptr [512 x i8]* %buff, i32 0, i32 0 ; <i8*> [#uses=5]
- %40 = call arm_apcscc i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
- %iftmp.506.0 = select i1 undef, i32 2, i32 0 ; <i32> [#uses=1]
- %41 = getelementptr [512 x i8]* %buff, i32 0, i32 26 ; <i8*> [#uses=1]
- br label %bb100.outer.outer
-
-bb100.outer.outer: ; preds = %bb83, %bb10
- %state.0.ph.ph = phi i32 [ %iftmp.506.0, %bb10 ], [ undef, %bb83 ] ; <i32> [#uses=1]
- %x_addr.0.ph.ph = phi %struct.rec* [ %x, %bb10 ], [ %71, %bb83 ] ; <%struct.rec*> [#uses=1]
- %42 = getelementptr %struct.rec* %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=0]
- br label %bb100.outer
-
-bb.i80: ; preds = %bb3.i85
- %43 = icmp eq i8 %44, %46 ; <i1> [#uses=1]
- %indvar.next.i79 = add i32 %indvar.i81, 1 ; <i32> [#uses=1]
- br i1 %43, label %bb2.i84, label %bb2.i51
-
-bb2.i84: ; preds = %bb100.outer, %bb.i80
- %indvar.i81 = phi i32 [ %indvar.next.i79, %bb.i80 ], [ 0, %bb100.outer ] ; <i32> [#uses=3]
- %pp.0.i82 = getelementptr [27 x i8]* @.str141878, i32 0, i32 %indvar.i81 ; <i8*> [#uses=2]
- %sp.0.i83 = getelementptr [512 x i8]* %buff, i32 0, i32 %indvar.i81 ; <i8*> [#uses=1]
- %44 = load i8* %sp.0.i83, align 1 ; <i8> [#uses=2]
- %45 = icmp eq i8 %44, 0 ; <i1> [#uses=1]
- br i1 %45, label %StringBeginsWith.exit88thread-split, label %bb3.i85
-
-bb3.i85: ; preds = %bb2.i84
- %46 = load i8* %pp.0.i82, align 1 ; <i8> [#uses=3]
- %47 = icmp eq i8 %46, 0 ; <i1> [#uses=1]
- br i1 %47, label %StringBeginsWith.exit88, label %bb.i80
-
-StringBeginsWith.exit88thread-split: ; preds = %bb2.i84
- %.pr = load i8* %pp.0.i82 ; <i8> [#uses=1]
- br label %StringBeginsWith.exit88
-
-StringBeginsWith.exit88: ; preds = %StringBeginsWith.exit88thread-split, %bb3.i85
- %48 = phi i8 [ %.pr, %StringBeginsWith.exit88thread-split ], [ %46, %bb3.i85 ] ; <i8> [#uses=1]
- %phitmp91 = icmp eq i8 %48, 0 ; <i1> [#uses=1]
- br i1 %phitmp91, label %bb3.i77, label %bb2.i51
-
-bb2.i.i68: ; preds = %bb3.i77
- br i1 false, label %bb2.i51, label %bb2.i75
-
-bb2.i75: ; preds = %bb2.i.i68
- br label %bb3.i77
-
-bb3.i77: ; preds = %bb2.i75, %StringBeginsWith.exit88
- %sp.0.i76 = getelementptr [512 x i8]* %buff, i32 0, i32 undef ; <i8*> [#uses=1]
- %49 = load i8* %sp.0.i76, align 1 ; <i8> [#uses=1]
- %50 = icmp eq i8 %49, 0 ; <i1> [#uses=1]
- br i1 %50, label %bb24, label %bb2.i.i68
-
-bb24: ; preds = %bb3.i77
- %51 = call arm_apcscc %struct.rec* @MakeWord(i32 11, i8* %41, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind ; <%struct.rec*> [#uses=0]
- %52 = load i8* getelementptr ([150 x i8]* @zz_lengths, i32 0, i32 0), align 4 ; <i8> [#uses=1]
- %53 = zext i8 %52 to i32 ; <i32> [#uses=2]
- %54 = getelementptr [524 x %struct.rec*]* @zz_free, i32 0, i32 %53 ; <%struct.rec**> [#uses=2]
- %55 = load %struct.rec** %54, align 4 ; <%struct.rec*> [#uses=3]
- %56 = icmp eq %struct.rec* %55, null ; <i1> [#uses=1]
- br i1 %56, label %bb27, label %bb28
-
-bb27: ; preds = %bb24
- br i1 undef, label %bb.i56, label %GetMemory.exit62
-
-bb.i56: ; preds = %bb27
- br i1 undef, label %bb1.i58, label %bb2.i60
-
-bb1.i58: ; preds = %bb.i56
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 31, i32 1, i8* getelementptr ([32 x i8]* @.str1575, i32 0, i32 0), i32 1, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind
- br label %bb2.i60
-
-bb2.i60: ; preds = %bb1.i58, %bb.i56
- %.pre1.i59 = phi i8** [ undef, %bb1.i58 ], [ undef, %bb.i56 ] ; <i8**> [#uses=1]
- store i8** undef, i8*** @top_free.4773, align 4
- br label %GetMemory.exit62
-
-GetMemory.exit62: ; preds = %bb2.i60, %bb27
- %57 = phi i8** [ %.pre1.i59, %bb2.i60 ], [ undef, %bb27 ] ; <i8**> [#uses=1]
- %58 = getelementptr i8** %57, i32 %53 ; <i8**> [#uses=1]
- store i8** %58, i8*** @next_free.4772, align 4
- store %struct.rec* undef, %struct.rec** @zz_hold, align 4
- br label %bb29
-
-bb28: ; preds = %bb24
- store %struct.rec* %55, %struct.rec** @zz_hold, align 4
- %59 = load %struct.rec** null, align 4 ; <%struct.rec*> [#uses=1]
- store %struct.rec* %59, %struct.rec** %54, align 4
- br label %bb29
-
-bb29: ; preds = %bb28, %GetMemory.exit62
- %.pre184 = phi %struct.rec* [ %55, %bb28 ], [ undef, %GetMemory.exit62 ] ; <%struct.rec*> [#uses=3]
- store i8 0, i8* undef
- store %struct.rec* %.pre184, %struct.rec** @xx_link, align 4
- br i1 undef, label %bb35, label %bb31
-
-bb31: ; preds = %bb29
- store %struct.rec* %.pre184, %struct.rec** undef
- br label %bb35
-
-bb35: ; preds = %bb31, %bb29
- br i1 undef, label %bb41, label %bb37
-
-bb37: ; preds = %bb35
- %60 = load %struct.rec** null, align 4 ; <%struct.rec*> [#uses=1]
- store %struct.rec* %60, %struct.rec** undef
- store %struct.rec* undef, %struct.rec** null
- store %struct.rec* %.pre184, %struct.rec** null, align 4
- br label %bb41
-
-bb41: ; preds = %bb37, %bb35
- %61 = call arm_apcscc i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=1]
- %62 = icmp eq i8* %61, null ; <i1> [#uses=1]
- %iftmp.554.0 = select i1 %62, i32 2, i32 1 ; <i32> [#uses=1]
- br label %bb100.outer
-
-bb.i47: ; preds = %bb3.i52
- %63 = icmp eq i8 %64, %65 ; <i1> [#uses=1]
- br i1 %63, label %bb2.i51, label %bb2.i41
-
-bb2.i51: ; preds = %bb.i47, %bb2.i.i68, %StringBeginsWith.exit88, %bb.i80
- %pp.0.i49 = getelementptr [17 x i8]* @.str1872951, i32 0, i32 0 ; <i8*> [#uses=1]
- %64 = load i8* null, align 1 ; <i8> [#uses=1]
- br i1 false, label %StringBeginsWith.exit55thread-split, label %bb3.i52
-
-bb3.i52: ; preds = %bb2.i51
- %65 = load i8* %pp.0.i49, align 1 ; <i8> [#uses=1]
- br i1 false, label %StringBeginsWith.exit55, label %bb.i47
-
-StringBeginsWith.exit55thread-split: ; preds = %bb2.i51
- br label %StringBeginsWith.exit55
-
-StringBeginsWith.exit55: ; preds = %StringBeginsWith.exit55thread-split, %bb3.i52
- br i1 false, label %bb49, label %bb2.i41
-
-bb49: ; preds = %StringBeginsWith.exit55
- br label %bb2.i41
-
-bb2.i41: ; preds = %bb2.i41, %bb49, %StringBeginsWith.exit55, %bb.i47
- br i1 false, label %bb2.i41, label %bb2.i.i15
-
-bb2.i.i15: ; preds = %bb2.i41
- %pp.0.i.i13 = getelementptr [6 x i8]* @.str742838, i32 0, i32 0 ; <i8*> [#uses=1]
- br i1 false, label %StringBeginsWith.exitthread-split.i18, label %bb3.i.i16
-
-bb3.i.i16: ; preds = %bb2.i.i15
- %66 = load i8* %pp.0.i.i13, align 1 ; <i8> [#uses=1]
- br label %StringBeginsWith.exit.i20
-
-StringBeginsWith.exitthread-split.i18: ; preds = %bb2.i.i15
- br label %StringBeginsWith.exit.i20
-
-StringBeginsWith.exit.i20: ; preds = %StringBeginsWith.exitthread-split.i18, %bb3.i.i16
- %67 = phi i8 [ undef, %StringBeginsWith.exitthread-split.i18 ], [ %66, %bb3.i.i16 ] ; <i8> [#uses=1]
- %phitmp.i19 = icmp eq i8 %67, 0 ; <i1> [#uses=1]
- br i1 %phitmp.i19, label %bb58, label %bb2.i6.i26
-
-bb2.i6.i26: ; preds = %bb2.i6.i26, %StringBeginsWith.exit.i20
- %indvar.i3.i23 = phi i32 [ %indvar.next.i1.i21, %bb2.i6.i26 ], [ 0, %StringBeginsWith.exit.i20 ] ; <i32> [#uses=3]
- %sp.0.i5.i25 = getelementptr [512 x i8]* %buff, i32 0, i32 %indvar.i3.i23 ; <i8*> [#uses=0]
- %pp.0.i4.i24 = getelementptr [10 x i8]* @.str752839, i32 0, i32 %indvar.i3.i23 ; <i8*> [#uses=1]
- %68 = load i8* %pp.0.i4.i24, align 1 ; <i8> [#uses=0]
- %indvar.next.i1.i21 = add i32 %indvar.i3.i23, 1 ; <i32> [#uses=1]
- br i1 undef, label %bb2.i6.i26, label %bb55
-
-bb55: ; preds = %bb2.i6.i26
- %69 = call arm_apcscc i32 @"\01_fputs"(i8* %buff14, %struct.FILE* undef) nounwind ; <i32> [#uses=0]
- unreachable
-
-bb58: ; preds = %StringBeginsWith.exit.i20
- %70 = call arm_apcscc i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
- %iftmp.560.0 = select i1 undef, i32 2, i32 0 ; <i32> [#uses=1]
- br label %bb100.outer
-
-bb.i7: ; preds = %bb3.i
- br i1 false, label %bb2.i8, label %bb2.i.i
-
-bb2.i8: ; preds = %bb100.outer, %bb.i7
- br i1 undef, label %StringBeginsWith.exitthread-split, label %bb3.i
-
-bb3.i: ; preds = %bb2.i8
- br i1 undef, label %StringBeginsWith.exit, label %bb.i7
-
-StringBeginsWith.exitthread-split: ; preds = %bb2.i8
- br label %StringBeginsWith.exit
-
-StringBeginsWith.exit: ; preds = %StringBeginsWith.exitthread-split, %bb3.i
- %phitmp93 = icmp eq i8 undef, 0 ; <i1> [#uses=1]
- br i1 %phitmp93, label %bb66, label %bb2.i.i
-
-bb66: ; preds = %StringBeginsWith.exit
- %71 = call arm_apcscc %struct.rec* @MakeWord(i32 11, i8* undef, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind ; <%struct.rec*> [#uses=4]
- %72 = load i8* getelementptr ([150 x i8]* @zz_lengths, i32 0, i32 0), align 4 ; <i8> [#uses=1]
- %73 = zext i8 %72 to i32 ; <i32> [#uses=2]
- %74 = getelementptr [524 x %struct.rec*]* @zz_free, i32 0, i32 %73 ; <%struct.rec**> [#uses=2]
- %75 = load %struct.rec** %74, align 4 ; <%struct.rec*> [#uses=3]
- %76 = icmp eq %struct.rec* %75, null ; <i1> [#uses=1]
- br i1 %76, label %bb69, label %bb70
-
-bb69: ; preds = %bb66
- br i1 undef, label %bb.i2, label %GetMemory.exit
-
-bb.i2: ; preds = %bb69
- %77 = call arm_apcscc noalias i8* @calloc(i32 1020, i32 4) nounwind ; <i8*> [#uses=1]
- %78 = bitcast i8* %77 to i8** ; <i8**> [#uses=3]
- store i8** %78, i8*** @next_free.4772, align 4
- br i1 undef, label %bb1.i3, label %bb2.i4
-
-bb1.i3: ; preds = %bb.i2
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 31, i32 1, i8* getelementptr ([32 x i8]* @.str1575, i32 0, i32 0), i32 1, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind
- br label %bb2.i4
-
-bb2.i4: ; preds = %bb1.i3, %bb.i2
- %.pre1.i = phi i8** [ undef, %bb1.i3 ], [ %78, %bb.i2 ] ; <i8**> [#uses=1]
- %79 = phi i8** [ undef, %bb1.i3 ], [ %78, %bb.i2 ] ; <i8**> [#uses=1]
- %80 = getelementptr i8** %79, i32 1020 ; <i8**> [#uses=1]
- store i8** %80, i8*** @top_free.4773, align 4
- br label %GetMemory.exit
-
-GetMemory.exit: ; preds = %bb2.i4, %bb69
- %81 = phi i8** [ %.pre1.i, %bb2.i4 ], [ undef, %bb69 ] ; <i8**> [#uses=2]
- %82 = bitcast i8** %81 to %struct.rec* ; <%struct.rec*> [#uses=3]
- %83 = getelementptr i8** %81, i32 %73 ; <i8**> [#uses=1]
- store i8** %83, i8*** @next_free.4772, align 4
- store %struct.rec* %82, %struct.rec** @zz_hold, align 4
- br label %bb71
-
-bb70: ; preds = %bb66
- %84 = load %struct.rec** null, align 4 ; <%struct.rec*> [#uses=1]
- store %struct.rec* %84, %struct.rec** %74, align 4
- br label %bb71
-
-bb71: ; preds = %bb70, %GetMemory.exit
- %.pre185 = phi %struct.rec* [ %75, %bb70 ], [ %82, %GetMemory.exit ] ; <%struct.rec*> [#uses=8]
- %85 = phi %struct.rec* [ %75, %bb70 ], [ %82, %GetMemory.exit ] ; <%struct.rec*> [#uses=1]
- %86 = getelementptr %struct.rec* %85, i32 0, i32 0, i32 1, i32 0, i32 0 ; <i8*> [#uses=0]
- %87 = getelementptr %struct.rec* %.pre185, i32 0, i32 0, i32 0, i32 1, i32 1 ; <%struct.rec**> [#uses=0]
- %88 = getelementptr %struct.rec* %.pre185, i32 0, i32 0, i32 0, i32 1, i32 0 ; <%struct.rec**> [#uses=1]
- store %struct.rec* %.pre185, %struct.rec** @xx_link, align 4
- store %struct.rec* %.pre185, %struct.rec** @zz_res, align 4
- %89 = load %struct.rec** @needs, align 4 ; <%struct.rec*> [#uses=2]
- store %struct.rec* %89, %struct.rec** @zz_hold, align 4
- br i1 false, label %bb77, label %bb73
-
-bb73: ; preds = %bb71
- %90 = getelementptr %struct.rec* %89, i32 0, i32 0, i32 0, i32 0, i32 0 ; <%struct.rec**> [#uses=1]
- store %struct.rec* null, %struct.rec** @zz_tmp, align 4
- store %struct.rec* %.pre185, %struct.rec** %90
- store %struct.rec* %.pre185, %struct.rec** undef, align 4
- br label %bb77
-
-bb77: ; preds = %bb73, %bb71
- store %struct.rec* %.pre185, %struct.rec** @zz_res, align 4
- store %struct.rec* %71, %struct.rec** @zz_hold, align 4
- br i1 undef, label %bb83, label %bb79
-
-bb79: ; preds = %bb77
- %91 = getelementptr %struct.rec* %71, i32 0, i32 0, i32 0, i32 1, i32 0 ; <%struct.rec**> [#uses=1]
- store %struct.rec* null, %struct.rec** @zz_tmp, align 4
- %92 = load %struct.rec** %88, align 4 ; <%struct.rec*> [#uses=1]
- store %struct.rec* %92, %struct.rec** %91
- %93 = getelementptr %struct.rec* undef, i32 0, i32 0, i32 0, i32 1, i32 1 ; <%struct.rec**> [#uses=1]
- store %struct.rec* %71, %struct.rec** %93, align 4
- store %struct.rec* %.pre185, %struct.rec** undef, align 4
- br label %bb83
-
-bb83: ; preds = %bb79, %bb77
- br label %bb100.outer.outer
-
-bb.i.i: ; preds = %bb3.i.i
- br i1 undef, label %bb2.i.i, label %bb2.i6.i
-
-bb2.i.i: ; preds = %bb.i.i, %StringBeginsWith.exit, %bb.i7
- br i1 undef, label %StringBeginsWith.exitthread-split.i, label %bb3.i.i
-
-bb3.i.i: ; preds = %bb2.i.i
- br i1 undef, label %StringBeginsWith.exit.i, label %bb.i.i
-
-StringBeginsWith.exitthread-split.i: ; preds = %bb2.i.i
- br label %StringBeginsWith.exit.i
-
-StringBeginsWith.exit.i: ; preds = %StringBeginsWith.exitthread-split.i, %bb3.i.i
- br i1 false, label %bb94, label %bb2.i6.i
-
-bb.i2.i: ; preds = %bb3.i7.i
- br i1 false, label %bb2.i6.i, label %bb91
-
-bb2.i6.i: ; preds = %bb.i2.i, %StringBeginsWith.exit.i, %bb.i.i
- br i1 undef, label %strip_out.exitthread-split, label %bb3.i7.i
-
-bb3.i7.i: ; preds = %bb2.i6.i
- %94 = load i8* undef, align 1 ; <i8> [#uses=1]
- br i1 undef, label %strip_out.exit, label %bb.i2.i
-
-strip_out.exitthread-split: ; preds = %bb2.i6.i
- %.pr100 = load i8* undef ; <i8> [#uses=1]
- br label %strip_out.exit
-
-strip_out.exit: ; preds = %strip_out.exitthread-split, %bb3.i7.i
- %95 = phi i8 [ %.pr100, %strip_out.exitthread-split ], [ %94, %bb3.i7.i ] ; <i8> [#uses=0]
- br i1 undef, label %bb94, label %bb91
-
-bb91: ; preds = %strip_out.exit, %bb.i2.i
- unreachable
-
-bb94: ; preds = %strip_out.exit, %StringBeginsWith.exit.i
- %96 = call arm_apcscc i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
- unreachable
-
-bb100.outer: ; preds = %bb58, %bb41, %bb100.outer.outer
- %state.0.ph = phi i32 [ %state.0.ph.ph, %bb100.outer.outer ], [ %iftmp.560.0, %bb58 ], [ %iftmp.554.0, %bb41 ] ; <i32> [#uses=1]
- switch i32 %state.0.ph, label %bb2.i84 [
- i32 2, label %bb101.split
- i32 1, label %bb2.i8
- ]
-
-bb101.split: ; preds = %bb100.outer
- %97 = icmp eq i32 undef, 0 ; <i1> [#uses=1]
- br i1 %97, label %bb103, label %bb102
-
-bb102: ; preds = %bb101.split
- %98 = call arm_apcscc i32 @remove(i8* getelementptr ([9 x i8]* @.str19294, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
- unreachable
-
-bb103: ; preds = %bb101.split
- %99 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %100 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %99, i8* getelementptr ([26 x i8]* @.str1932957, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
- store i32 0, i32* @wordcount, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll
deleted file mode 100644
index 7647474..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mattr=+neon -arm-use-neon-fp
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mattr=+neon -arm-use-neon-fp | not grep fcpys
-; rdar://7117307
-
- %struct.Hosp = type { i32, i32, i32, %struct.List, %struct.List, %struct.List, %struct.List }
- %struct.List = type { %struct.List*, %struct.Patient*, %struct.List* }
- %struct.Patient = type { i32, i32, i32, %struct.Village* }
- %struct.Results = type { float, float, float }
- %struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
-
-define arm_apcscc void @get_results(%struct.Results* noalias nocapture sret %agg.result, %struct.Village* %village) nounwind {
-entry:
- br i1 undef, label %bb, label %bb6.preheader
-
-bb6.preheader: ; preds = %entry
- call void @llvm.memcpy.i32(i8* undef, i8* undef, i32 12, i32 4)
- br i1 undef, label %bb15, label %bb13
-
-bb: ; preds = %entry
- ret void
-
-bb13: ; preds = %bb13, %bb6.preheader
- %0 = fadd float undef, undef ; <float> [#uses=1]
- %1 = fadd float undef, 1.000000e+00 ; <float> [#uses=1]
- br i1 undef, label %bb15, label %bb13
-
-bb15: ; preds = %bb13, %bb6.preheader
- %r1.0.0.lcssa = phi float [ 0.000000e+00, %bb6.preheader ], [ %1, %bb13 ] ; <float> [#uses=1]
- %r1.1.0.lcssa = phi float [ undef, %bb6.preheader ], [ %0, %bb13 ] ; <float> [#uses=0]
- store float %r1.0.0.lcssa, float* undef, align 4
- ret void
-}
-
-declare void @llvm.memcpy.i32(i8* nocapture, i8* nocapture, i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll
deleted file mode 100644
index acf562c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mattr=+neon -arm-use-neon-fp
-; rdar://7117307
-
- %struct.Hosp = type { i32, i32, i32, %struct.List, %struct.List, %struct.List, %struct.List }
- %struct.List = type { %struct.List*, %struct.Patient*, %struct.List* }
- %struct.Patient = type { i32, i32, i32, %struct.Village* }
- %struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
-
-define arm_apcscc %struct.List* @sim(%struct.Village* %village) nounwind {
-entry:
- br i1 undef, label %bb14, label %bb3.preheader
-
-bb3.preheader: ; preds = %entry
- br label %bb5
-
-bb5: ; preds = %bb5, %bb3.preheader
- br i1 undef, label %bb11, label %bb5
-
-bb11: ; preds = %bb5
- %0 = fmul float undef, 0x41E0000000000000 ; <float> [#uses=1]
- %1 = fptosi float %0 to i32 ; <i32> [#uses=1]
- store i32 %1, i32* undef, align 4
- br i1 undef, label %generate_patient.exit, label %generate_patient.exit.thread
-
-generate_patient.exit.thread: ; preds = %bb11
- ret %struct.List* null
-
-generate_patient.exit: ; preds = %bb11
- br i1 undef, label %bb14, label %bb12
-
-bb12: ; preds = %generate_patient.exit
- br i1 undef, label %bb.i, label %bb1.i
-
-bb.i: ; preds = %bb12
- ret %struct.List* null
-
-bb1.i: ; preds = %bb12
- ret %struct.List* null
-
-bb14: ; preds = %generate_patient.exit, %entry
- ret %struct.List* undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll
deleted file mode 100644
index 3ada026..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mattr=+neon -arm-use-neon-fp
-; rdar://7117307
-
- %struct.Hosp = type { i32, i32, i32, %struct.List, %struct.List, %struct.List, %struct.List }
- %struct.List = type { %struct.List*, %struct.Patient*, %struct.List* }
- %struct.Patient = type { i32, i32, i32, %struct.Village* }
- %struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
-
-define arm_apcscc %struct.List* @sim(%struct.Village* %village) nounwind {
-entry:
- br i1 undef, label %bb14, label %bb3.preheader
-
-bb3.preheader: ; preds = %entry
- br label %bb5
-
-bb5: ; preds = %bb5, %bb3.preheader
- br i1 undef, label %bb11, label %bb5
-
-bb11: ; preds = %bb5
- %0 = load i32* undef, align 4 ; <i32> [#uses=1]
- %1 = xor i32 %0, 123459876 ; <i32> [#uses=1]
- %2 = sdiv i32 %1, 127773 ; <i32> [#uses=1]
- %3 = mul i32 %2, 2836 ; <i32> [#uses=1]
- %4 = sub i32 0, %3 ; <i32> [#uses=1]
- %5 = xor i32 %4, 123459876 ; <i32> [#uses=1]
- %idum_addr.0.i.i = select i1 undef, i32 undef, i32 %5 ; <i32> [#uses=1]
- %6 = sitofp i32 %idum_addr.0.i.i to double ; <double> [#uses=1]
- %7 = fmul double %6, 0x3E00000000200000 ; <double> [#uses=1]
- %8 = fptrunc double %7 to float ; <float> [#uses=2]
- %9 = fmul float %8, 0x41E0000000000000 ; <float> [#uses=1]
- %10 = fptosi float %9 to i32 ; <i32> [#uses=1]
- store i32 %10, i32* undef, align 4
- %11 = fpext float %8 to double ; <double> [#uses=1]
- %12 = fcmp ogt double %11, 6.660000e-01 ; <i1> [#uses=1]
- br i1 %12, label %generate_patient.exit, label %generate_patient.exit.thread
-
-generate_patient.exit.thread: ; preds = %bb11
- ret %struct.List* null
-
-generate_patient.exit: ; preds = %bb11
- br i1 undef, label %bb14, label %bb12
-
-bb12: ; preds = %generate_patient.exit
- br i1 undef, label %bb.i, label %bb1.i
-
-bb.i: ; preds = %bb12
- ret %struct.List* null
-
-bb1.i: ; preds = %bb12
- ret %struct.List* null
-
-bb14: ; preds = %generate_patient.exit, %entry
- ret %struct.List* undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
deleted file mode 100644
index 4077535..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-none-linux-gnueabi | FileCheck %s
-; PR4659
-; PR4682
-
-define hidden arm_aapcscc i32 @__gcov_execlp(i8* %path, i8* %arg, ...) nounwind {
-entry:
-; CHECK: __gcov_execlp:
-; CHECK: mov sp, r7
-; CHECK: sub sp, #4
- call arm_aapcscc void @__gcov_flush() nounwind
- br i1 undef, label %bb5, label %bb
-
-bb: ; preds = %bb, %entry
- br i1 undef, label %bb5, label %bb
-
-bb5: ; preds = %bb, %entry
- %0 = alloca i8*, i32 undef, align 4 ; <i8**> [#uses=1]
- %1 = call arm_aapcscc i32 @execvp(i8* %path, i8** %0) nounwind ; <i32> [#uses=1]
- ret i32 %1
-}
-
-declare hidden arm_aapcscc void @__gcov_flush()
-
-declare arm_aapcscc i32 @execvp(i8*, i8**) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-07-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-07-CoalescerBug.ll
deleted file mode 100644
index 93f5a0f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-07-CoalescerBug.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-eabi -mattr=+vfp2
-; PR4686
-
- %a = type { i32 (...)** }
- %b = type { %a }
- %c = type { float, float, float, float }
-
-declare arm_aapcs_vfpcc float @bar(%c*)
-
-define arm_aapcs_vfpcc void @foo(%b* %x, %c* %y) {
-entry:
- %0 = call arm_aapcs_vfpcc float @bar(%c* %y) ; <float> [#uses=0]
- %1 = fadd float undef, undef ; <float> [#uses=1]
- store float %1, float* undef, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll
deleted file mode 100644
index 090ed2d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll
+++ /dev/null
@@ -1,80 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -mcpu=cortex-a8 -arm-use-neon-fp
-
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.JHUFF_TBL = type { [17 x i8], [256 x i8], i32 }
- %struct.JQUANT_TBL = type { [64 x i16], i32 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct.anon = type { [8 x i32], [48 x i8] }
- %struct.backing_store_info = type { void (%struct.jpeg_common_struct*, %struct.backing_store_info*, i8*, i32, i32)*, void (%struct.jpeg_common_struct*, %struct.backing_store_info*, i8*, i32, i32)*, void (%struct.jpeg_common_struct*, %struct.backing_store_info*)*, %struct.FILE*, [64 x i8] }
- %struct.jpeg_color_deconverter = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i8***, i32, i8**, i32)* }
- %struct.jpeg_color_quantizer = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8**, i8**, i32)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)* }
- %struct.jpeg_common_struct = type { %struct.jpeg_error_mgr*, %struct.jpeg_memory_mgr*, %struct.jpeg_progress_mgr*, i32, i32 }
- %struct.jpeg_component_info = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.JQUANT_TBL*, i8* }
- %struct.jpeg_d_coef_controller = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*, i8***)*, %struct.jvirt_barray_control** }
- %struct.jpeg_d_main_controller = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8**, i32*, i32)* }
- %struct.jpeg_d_post_controller = type { void (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*, i8***, i32*, i32, i8**, i32*, i32)* }
- %struct.jpeg_decomp_master = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32 }
- %struct.jpeg_decompress_struct = type { %struct.jpeg_error_mgr*, %struct.jpeg_memory_mgr*, %struct.jpeg_progress_mgr*, i32, i32, %struct.jpeg_source_mgr*, i32, i32, i32, i32, i32, i32, i32, double, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8**, i32, i32, i32, i32, i32, [64 x i32]*, [4 x %struct.JQUANT_TBL*], [4 x %struct.JHUFF_TBL*], [4 x %struct.JHUFF_TBL*], i32, %struct.jpeg_component_info*, i32, i32, [16 x i8], [16 x i8], [16 x i8], i32, i32, i8, i16, i16, i32, i8, i32, i32, i32, i32, i32, i8*, i32, [4 x %struct.jpeg_component_info*], i32, i32, i32, [10 x i32], i32, i32, i32, i32, i32, %struct.jpeg_decomp_master*, %struct.jpeg_d_main_controller*, %struct.jpeg_d_coef_controller*, %struct.jpeg_d_post_controller*, %struct.jpeg_input_controller*, %struct.jpeg_marker_reader*, %struct.jpeg_entropy_decoder*, %struct.jpeg_inverse_dct*, %struct.jpeg_upsampler*, %struct.jpeg_color_deconverter*, %struct.jpeg_color_quantizer* }
- %struct.jpeg_entropy_decoder = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*, [64 x i16]**)* }
- %struct.jpeg_error_mgr = type { void (%struct.jpeg_common_struct*)*, void (%struct.jpeg_common_struct*, i32)*, void (%struct.jpeg_common_struct*)*, void (%struct.jpeg_common_struct*, i8*)*, void (%struct.jpeg_common_struct*)*, i32, %struct.anon, i32, i32, i8**, i32, i8**, i32, i32 }
- %struct.jpeg_input_controller = type { i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*)*, i32, i32 }
- %struct.jpeg_inverse_dct = type { void (%struct.jpeg_decompress_struct*)*, [10 x void (%struct.jpeg_decompress_struct*, %struct.jpeg_component_info*, i16*, i8**, i32)*] }
- %struct.jpeg_marker_reader = type { void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, [16 x i32 (%struct.jpeg_decompress_struct*)*], i32, i32, i32, i32 }
- %struct.jpeg_memory_mgr = type { i8* (%struct.jpeg_common_struct*, i32, i32)*, i8* (%struct.jpeg_common_struct*, i32, i32)*, i8** (%struct.jpeg_common_struct*, i32, i32, i32)*, [64 x i16]** (%struct.jpeg_common_struct*, i32, i32, i32)*, %struct.jvirt_sarray_control* (%struct.jpeg_common_struct*, i32, i32, i32, i32, i32)*, %struct.jvirt_barray_control* (%struct.jpeg_common_struct*, i32, i32, i32, i32, i32)*, void (%struct.jpeg_common_struct*)*, i8** (%struct.jpeg_common_struct*, %struct.jvirt_sarray_control*, i32, i32, i32)*, [64 x i16]** (%struct.jpeg_common_struct*, %struct.jvirt_barray_control*, i32, i32, i32)*, void (%struct.jpeg_common_struct*, i32)*, void (%struct.jpeg_common_struct*)*, i32 }
- %struct.jpeg_progress_mgr = type { void (%struct.jpeg_common_struct*)*, i32, i32, i32, i32 }
- %struct.jpeg_source_mgr = type { i8*, i32, void (%struct.jpeg_decompress_struct*)*, i32 (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i32)*, i32 (%struct.jpeg_decompress_struct*, i32)*, void (%struct.jpeg_decompress_struct*)* }
- %struct.jpeg_upsampler = type { void (%struct.jpeg_decompress_struct*)*, void (%struct.jpeg_decompress_struct*, i8***, i32*, i32, i8**, i32*, i32)*, i32 }
- %struct.jvirt_barray_control = type { [64 x i16]**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_barray_control*, %struct.backing_store_info }
- %struct.jvirt_sarray_control = type { i8**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_sarray_control*, %struct.backing_store_info }
-
-define arm_apcscc void @jpeg_idct_float(%struct.jpeg_decompress_struct* nocapture %cinfo, %struct.jpeg_component_info* nocapture %compptr, i16* nocapture %coef_block, i8** nocapture %output_buf, i32 %output_col) nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- %0 = load float* undef, align 4 ; <float> [#uses=1]
- %1 = fmul float undef, %0 ; <float> [#uses=2]
- %tmp73 = add i32 0, 224 ; <i32> [#uses=1]
- %scevgep74 = getelementptr i8* null, i32 %tmp73 ; <i8*> [#uses=1]
- %scevgep7475 = bitcast i8* %scevgep74 to float* ; <float*> [#uses=1]
- %2 = load float* null, align 4 ; <float> [#uses=1]
- %3 = fmul float 0.000000e+00, %2 ; <float> [#uses=2]
- %4 = fadd float %1, %3 ; <float> [#uses=1]
- %5 = fsub float %1, %3 ; <float> [#uses=2]
- %6 = fadd float undef, 0.000000e+00 ; <float> [#uses=2]
- %7 = fmul float undef, 0x3FF6A09E60000000 ; <float> [#uses=1]
- %8 = fsub float %7, %6 ; <float> [#uses=2]
- %9 = fsub float %4, %6 ; <float> [#uses=1]
- %10 = fadd float %5, %8 ; <float> [#uses=2]
- %11 = fsub float %5, %8 ; <float> [#uses=1]
- %12 = sitofp i16 undef to float ; <float> [#uses=1]
- %13 = fmul float %12, 0.000000e+00 ; <float> [#uses=2]
- %14 = sitofp i16 undef to float ; <float> [#uses=1]
- %15 = load float* %scevgep7475, align 4 ; <float> [#uses=1]
- %16 = fmul float %14, %15 ; <float> [#uses=2]
- %17 = fadd float undef, undef ; <float> [#uses=2]
- %18 = fadd float %13, %16 ; <float> [#uses=2]
- %19 = fsub float %13, %16 ; <float> [#uses=1]
- %20 = fadd float %18, %17 ; <float> [#uses=2]
- %21 = fsub float %18, %17 ; <float> [#uses=1]
- %22 = fmul float %21, 0x3FF6A09E60000000 ; <float> [#uses=1]
- %23 = fmul float undef, 0x3FFD906BC0000000 ; <float> [#uses=2]
- %24 = fmul float %19, 0x3FF1517A80000000 ; <float> [#uses=1]
- %25 = fsub float %24, %23 ; <float> [#uses=1]
- %26 = fadd float undef, %23 ; <float> [#uses=1]
- %27 = fsub float %26, %20 ; <float> [#uses=3]
- %28 = fsub float %22, %27 ; <float> [#uses=2]
- %29 = fadd float %25, %28 ; <float> [#uses=1]
- %30 = fadd float undef, %20 ; <float> [#uses=1]
- store float %30, float* undef, align 4
- %31 = fadd float %10, %27 ; <float> [#uses=1]
- store float %31, float* undef, align 4
- %32 = fsub float %10, %27 ; <float> [#uses=1]
- store float %32, float* undef, align 4
- %33 = fadd float %11, %28 ; <float> [#uses=1]
- store float %33, float* undef, align 4
- %34 = fsub float %9, %29 ; <float> [#uses=1]
- store float %34, float* undef, align 4
- br label %bb
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll
deleted file mode 100644
index a0f9918..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mtriple=armv7-eabi -mattr=+vfp2
-; PR4686
-
- at g_d = external global double ; <double*> [#uses=1]
-
-define arm_aapcscc void @foo(float %yIncr) {
-entry:
- br i1 undef, label %bb, label %bb4
-
-bb: ; preds = %entry
- %0 = call arm_aapcs_vfpcc float @bar() ; <float> [#uses=1]
- %1 = fpext float %0 to double ; <double> [#uses=1]
- store double %1, double* @g_d, align 8
- br label %bb4
-
-bb4: ; preds = %bb, %entry
- unreachable
-}
-
-declare arm_aapcs_vfpcc float @bar()
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll
deleted file mode 100644
index cbe250b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+vfp2
-
-define arm_apcscc float @t1(i32 %v0) nounwind {
-entry:
- store i32 undef, i32* undef, align 4
- %0 = load [4 x i8]** undef, align 4 ; <[4 x i8]*> [#uses=1]
- %1 = load i8* undef, align 1 ; <i8> [#uses=1]
- %2 = zext i8 %1 to i32 ; <i32> [#uses=1]
- %3 = getelementptr [4 x i8]* %0, i32 %v0, i32 0 ; <i8*> [#uses=1]
- %4 = load i8* %3, align 1 ; <i8> [#uses=1]
- %5 = zext i8 %4 to i32 ; <i32> [#uses=1]
- %6 = sub i32 %5, %2 ; <i32> [#uses=1]
- %7 = sitofp i32 %6 to float ; <float> [#uses=1]
- ret float %7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
deleted file mode 100644
index 8d03b52..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
+++ /dev/null
@@ -1,152 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s
-
-%struct.pix_pos = type { i32, i32, i32, i32, i32, i32 }
-
- at getNeighbour = external global void (i32, i32, i32, i32, %struct.pix_pos*)*, align 4 ; <void (i32, i32, i32, i32, %struct.pix_pos*)**> [#uses=2]
-
-define arm_apcscc void @t() nounwind {
-; CHECK: t:
-; CHECK: it eq
-; CHECK-NEXT: cmpeq
-entry:
- %pix_a.i294 = alloca [4 x %struct.pix_pos], align 4 ; <[4 x %struct.pix_pos]*> [#uses=2]
- br i1 undef, label %land.rhs, label %lor.end
-
-land.rhs: ; preds = %entry
- br label %lor.end
-
-lor.end: ; preds = %land.rhs, %entry
- switch i32 0, label %if.end371 [
- i32 10, label %if.then366
- i32 14, label %if.then366
- ]
-
-if.then366: ; preds = %lor.end, %lor.end
- unreachable
-
-if.end371: ; preds = %lor.end
- %arrayidx56.2.i = getelementptr [4 x %struct.pix_pos]* %pix_a.i294, i32 0, i32 2 ; <%struct.pix_pos*> [#uses=1]
- %arrayidx56.3.i = getelementptr [4 x %struct.pix_pos]* %pix_a.i294, i32 0, i32 3 ; <%struct.pix_pos*> [#uses=1]
- br i1 undef, label %for.body1857, label %for.end4557
-
-for.body1857: ; preds = %if.end371
- br i1 undef, label %if.then1867, label %for.cond1933
-
-if.then1867: ; preds = %for.body1857
- unreachable
-
-for.cond1933: ; preds = %for.body1857
- br i1 undef, label %for.body1940, label %if.then4493
-
-for.body1940: ; preds = %for.cond1933
- %shl = shl i32 undef, 2 ; <i32> [#uses=1]
- %shl1959 = shl i32 undef, 2 ; <i32> [#uses=4]
- br i1 undef, label %if.then1992, label %if.else2003
-
-if.then1992: ; preds = %for.body1940
- %tmp14.i302 = load i32* undef ; <i32> [#uses=4]
- %add.i307452 = or i32 %shl1959, 1 ; <i32> [#uses=1]
- %sub.i308 = add i32 %shl, -1 ; <i32> [#uses=4]
- call arm_apcscc void undef(i32 %tmp14.i302, i32 %sub.i308, i32 %shl1959, i32 0, %struct.pix_pos* undef) nounwind
- %tmp49.i309 = load void (i32, i32, i32, i32, %struct.pix_pos*)** @getNeighbour ; <void (i32, i32, i32, i32, %struct.pix_pos*)*> [#uses=1]
- call arm_apcscc void %tmp49.i309(i32 %tmp14.i302, i32 %sub.i308, i32 %add.i307452, i32 0, %struct.pix_pos* null) nounwind
- %tmp49.1.i = load void (i32, i32, i32, i32, %struct.pix_pos*)** @getNeighbour ; <void (i32, i32, i32, i32, %struct.pix_pos*)*> [#uses=1]
- call arm_apcscc void %tmp49.1.i(i32 %tmp14.i302, i32 %sub.i308, i32 undef, i32 0, %struct.pix_pos* %arrayidx56.2.i) nounwind
- call arm_apcscc void undef(i32 %tmp14.i302, i32 %sub.i308, i32 undef, i32 0, %struct.pix_pos* %arrayidx56.3.i) nounwind
- unreachable
-
-if.else2003: ; preds = %for.body1940
- switch i32 undef, label %if.then2015 [
- i32 10, label %if.then4382
- i32 14, label %if.then4382
- ]
-
-if.then2015: ; preds = %if.else2003
- br i1 undef, label %if.else2298, label %if.then2019
-
-if.then2019: ; preds = %if.then2015
- br i1 undef, label %if.then2065, label %if.else2081
-
-if.then2065: ; preds = %if.then2019
- br label %if.end2128
-
-if.else2081: ; preds = %if.then2019
- br label %if.end2128
-
-if.end2128: ; preds = %if.else2081, %if.then2065
- unreachable
-
-if.else2298: ; preds = %if.then2015
- br i1 undef, label %land.lhs.true2813, label %cond.end2841
-
-land.lhs.true2813: ; preds = %if.else2298
- br i1 undef, label %cond.end2841, label %cond.true2824
-
-cond.true2824: ; preds = %land.lhs.true2813
- br label %cond.end2841
-
-cond.end2841: ; preds = %cond.true2824, %land.lhs.true2813, %if.else2298
- br i1 undef, label %for.cond2882.preheader, label %for.cond2940.preheader
-
-for.cond2882.preheader: ; preds = %cond.end2841
- %mul3693 = shl i32 undef, 1 ; <i32> [#uses=2]
- br i1 undef, label %if.then3689, label %if.else3728
-
-for.cond2940.preheader: ; preds = %cond.end2841
- br label %for.inc3040
-
-for.inc3040: ; preds = %for.inc3040, %for.cond2940.preheader
- br label %for.inc3040
-
-if.then3689: ; preds = %for.cond2882.preheader
- %add3695 = add nsw i32 %mul3693, %shl1959 ; <i32> [#uses=1]
- %mul3697 = shl i32 %add3695, 2 ; <i32> [#uses=2]
- %arrayidx3705 = getelementptr inbounds i16* undef, i32 1 ; <i16*> [#uses=1]
- %tmp3706 = load i16* %arrayidx3705 ; <i16> [#uses=1]
- %conv3707 = sext i16 %tmp3706 to i32 ; <i32> [#uses=1]
- %add3708 = add nsw i32 %conv3707, %mul3697 ; <i32> [#uses=1]
- %arrayidx3724 = getelementptr inbounds i16* null, i32 1 ; <i16*> [#uses=1]
- %tmp3725 = load i16* %arrayidx3724 ; <i16> [#uses=1]
- %conv3726 = sext i16 %tmp3725 to i32 ; <i32> [#uses=1]
- %add3727 = add nsw i32 %conv3726, %mul3697 ; <i32> [#uses=1]
- br label %if.end3770
-
-if.else3728: ; preds = %for.cond2882.preheader
- %mul3733 = add i32 %shl1959, 1073741816 ; <i32> [#uses=1]
- %add3735 = add nsw i32 %mul3733, %mul3693 ; <i32> [#uses=1]
- %mul3737 = shl i32 %add3735, 2 ; <i32> [#uses=2]
- %tmp3746 = load i16* undef ; <i16> [#uses=1]
- %conv3747 = sext i16 %tmp3746 to i32 ; <i32> [#uses=1]
- %add3748 = add nsw i32 %conv3747, %mul3737 ; <i32> [#uses=1]
- %arrayidx3765 = getelementptr inbounds i16* null, i32 1 ; <i16*> [#uses=1]
- %tmp3766 = load i16* %arrayidx3765 ; <i16> [#uses=1]
- %conv3767 = sext i16 %tmp3766 to i32 ; <i32> [#uses=1]
- %add3768 = add nsw i32 %conv3767, %mul3737 ; <i32> [#uses=1]
- br label %if.end3770
-
-if.end3770: ; preds = %if.else3728, %if.then3689
- %vec2_y.1 = phi i32 [ %add3727, %if.then3689 ], [ %add3768, %if.else3728 ] ; <i32> [#uses=0]
- %vec1_y.2 = phi i32 [ %add3708, %if.then3689 ], [ %add3748, %if.else3728 ] ; <i32> [#uses=0]
- unreachable
-
-if.then4382: ; preds = %if.else2003, %if.else2003
- switch i32 undef, label %if.then4394 [
- i32 10, label %if.else4400
- i32 14, label %if.else4400
- ]
-
-if.then4394: ; preds = %if.then4382
- unreachable
-
-if.else4400: ; preds = %if.then4382, %if.then4382
- br label %for.cond4451.preheader
-
-for.cond4451.preheader: ; preds = %for.cond4451.preheader, %if.else4400
- br label %for.cond4451.preheader
-
-if.then4493: ; preds = %for.cond1933
- unreachable
-
-for.end4557: ; preds = %if.end371
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
deleted file mode 100644
index b4b6ed9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-eabi -mcpu=cortex-a8 -float-abi=hard | FileCheck %s
-
-; A fix for PR5204 will require this check to be changed.
-
-%"struct.__gnu_cxx::__normal_iterator<char*,std::basic_string<char, std::char_traits<char>, std::allocator<char> > >" = type { i8* }
-%"struct.__gnu_cxx::new_allocator<char>" = type <{ i8 }>
-%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >" = type { %"struct.__gnu_cxx::__normal_iterator<char*,std::basic_string<char, std::char_traits<char>, std::allocator<char> > >" }
-%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Rep" = type { %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Rep_base" }
-%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Rep_base" = type { i32, i32, i32 }
-
-
-define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this, %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) {
-; CHECK: _ZNKSs7compareERKSs:
-; CHECK: it ne
-; CHECK-NEXT: ldmfdne.w
-; CHECK-NEXT: itt eq
-; CHECK-NEXT: subeq.w
-; CHECK-NEXT: ldmfdeq.w
-entry:
- %0 = tail call arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i32> [#uses=3]
- %1 = tail call arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) ; <i32> [#uses=3]
- %2 = icmp ult i32 %1, %0 ; <i1> [#uses=1]
- %3 = select i1 %2, i32 %1, i32 %0 ; <i32> [#uses=1]
- %4 = tail call arm_aapcs_vfpcc i8* @_ZNKSs7_M_dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i8*> [#uses=1]
- %5 = tail call arm_aapcs_vfpcc i8* @_ZNKSs4dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) ; <i8*> [#uses=1]
- %6 = tail call arm_aapcs_vfpcc i32 @memcmp(i8* %4, i8* %5, i32 %3) nounwind readonly ; <i32> [#uses=2]
- %7 = icmp eq i32 %6, 0 ; <i1> [#uses=1]
- br i1 %7, label %bb, label %bb1
-
-bb: ; preds = %entry
- %8 = sub i32 %0, %1 ; <i32> [#uses=1]
- ret i32 %8
-
-bb1: ; preds = %entry
- ret i32 %6
-}
-
-declare arm_aapcs_vfpcc i32 @memcmp(i8* nocapture, i8* nocapture, i32) nounwind readonly
-
-declare arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this)
-
-declare arm_aapcs_vfpcc i8* @_ZNKSs7_M_dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this)
-
-declare arm_aapcs_vfpcc i8* @_ZNKSs4dataEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll
deleted file mode 100644
index 216f3e3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8
-
-define arm_apcscc void @get_initial_mb16x16_cost() nounwind {
-entry:
- br i1 undef, label %bb4, label %bb1
-
-bb1: ; preds = %entry
- br label %bb7
-
-bb4: ; preds = %entry
- br i1 undef, label %bb7.thread, label %bb5
-
-bb5: ; preds = %bb4
- br label %bb7
-
-bb7.thread: ; preds = %bb4
- br label %bb8
-
-bb7: ; preds = %bb5, %bb1
- br i1 undef, label %bb8, label %bb10
-
-bb8: ; preds = %bb7, %bb7.thread
- %0 = phi double [ 5.120000e+02, %bb7.thread ], [ undef, %bb7 ] ; <double> [#uses=1]
- %1 = fdiv double %0, undef ; <double> [#uses=0]
- unreachable
-
-bb10: ; preds = %bb7
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll
deleted file mode 100644
index 9f2e399..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10
-
-%struct.OP = type { %struct.OP*, %struct.OP*, %struct.OP* ()*, i32, i16, i16, i8, i8 }
-%struct.SV = type { i8*, i32, i32 }
-
-declare arm_apcscc void @Perl_mg_set(%struct.SV*) nounwind
-
-define arm_apcscc %struct.OP* @Perl_pp_complement() nounwind {
-entry:
- %0 = load %struct.SV** null, align 4 ; <%struct.SV*> [#uses=2]
- br i1 undef, label %bb21, label %bb5
-
-bb5: ; preds = %entry
- br i1 undef, label %bb13, label %bb6
-
-bb6: ; preds = %bb5
- br i1 undef, label %bb8, label %bb7
-
-bb7: ; preds = %bb6
- %1 = getelementptr inbounds %struct.SV* %0, i32 0, i32 0 ; <i8**> [#uses=1]
- %2 = load i8** %1, align 4 ; <i8*> [#uses=1]
- %3 = getelementptr inbounds i8* %2, i32 12 ; <i8*> [#uses=1]
- %4 = bitcast i8* %3 to i32* ; <i32*> [#uses=1]
- %5 = load i32* %4, align 4 ; <i32> [#uses=1]
- %storemerge5 = xor i32 %5, -1 ; <i32> [#uses=1]
- call arm_apcscc void @Perl_sv_setiv(%struct.SV* undef, i32 %storemerge5) nounwind
- %6 = getelementptr inbounds %struct.SV* undef, i32 0, i32 2 ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=1]
- %8 = and i32 %7, 16384 ; <i32> [#uses=1]
- %9 = icmp eq i32 %8, 0 ; <i1> [#uses=1]
- br i1 %9, label %bb12, label %bb11
-
-bb8: ; preds = %bb6
- unreachable
-
-bb11: ; preds = %bb7
- call arm_apcscc void @Perl_mg_set(%struct.SV* undef) nounwind
- br label %bb12
-
-bb12: ; preds = %bb11, %bb7
- store %struct.SV* undef, %struct.SV** null, align 4
- br label %bb44
-
-bb13: ; preds = %bb5
- %10 = call arm_apcscc i32 @Perl_sv_2uv(%struct.SV* %0) nounwind ; <i32> [#uses=0]
- br i1 undef, label %bb.i, label %bb1.i
-
-bb.i: ; preds = %bb13
- call arm_apcscc void @Perl_sv_setiv(%struct.SV* undef, i32 undef) nounwind
- br label %Perl_sv_setuv.exit
-
-bb1.i: ; preds = %bb13
- br label %Perl_sv_setuv.exit
-
-Perl_sv_setuv.exit: ; preds = %bb1.i, %bb.i
- %11 = getelementptr inbounds %struct.SV* undef, i32 0, i32 2 ; <i32*> [#uses=1]
- %12 = load i32* %11, align 4 ; <i32> [#uses=1]
- %13 = and i32 %12, 16384 ; <i32> [#uses=1]
- %14 = icmp eq i32 %13, 0 ; <i1> [#uses=1]
- br i1 %14, label %bb20, label %bb19
-
-bb19: ; preds = %Perl_sv_setuv.exit
- call arm_apcscc void @Perl_mg_set(%struct.SV* undef) nounwind
- br label %bb20
-
-bb20: ; preds = %bb19, %Perl_sv_setuv.exit
- store %struct.SV* undef, %struct.SV** null, align 4
- br label %bb44
-
-bb21: ; preds = %entry
- br i1 undef, label %bb23, label %bb22
-
-bb22: ; preds = %bb21
- unreachable
-
-bb23: ; preds = %bb21
- unreachable
-
-bb44: ; preds = %bb20, %bb12
- ret %struct.OP* undef
-}
-
-declare arm_apcscc void @Perl_sv_setiv(%struct.SV*, i32) nounwind
-
-declare arm_apcscc i32 @Perl_sv_2uv(%struct.SV*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll
deleted file mode 100644
index 8a67bb1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10
-; rdar://7394794
-
-define arm_apcscc void @lshift_double(i64 %l1, i64 %h1, i64 %count, i32 %prec, i64* nocapture %lv, i64* nocapture %hv, i32 %arith) nounwind {
-entry:
- %..i = select i1 false, i64 0, i64 0 ; <i64> [#uses=1]
- br i1 undef, label %bb11.i, label %bb6.i
-
-bb6.i: ; preds = %entry
- %0 = lshr i64 %h1, 0 ; <i64> [#uses=1]
- store i64 %0, i64* %hv, align 4
- %1 = lshr i64 %l1, 0 ; <i64> [#uses=1]
- %2 = or i64 0, %1 ; <i64> [#uses=1]
- store i64 %2, i64* %lv, align 4
- br label %bb11.i
-
-bb11.i: ; preds = %bb6.i, %entry
- store i64 %..i, i64* %lv, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
deleted file mode 100644
index 61bd4a3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
+++ /dev/null
@@ -1,129 +0,0 @@
-; RUNX: opt < %s -std-compile-opts | \
-; RUNX: llc -mtriple=thumbv7-apple-darwin10 -mattr=+neon | FileCheck %s
-; RUN: true
-
-define arm_apcscc void @fred(i32 %three_by_three, i8* %in, double %dt1, i32 %x_size, i32 %y_size, i8* %bp) nounwind {
-entry:
-; -- The loop following the load should only use a single add-literation
-; instruction.
-; CHECK: ldr.64
-; CHECK: adds r{{[0-9]+}}, #1
-; CHECK-NOT: adds r{{[0-9]+}}, #1
-; CHECK: subsections_via_symbols
-
-
- %three_by_three_addr = alloca i32 ; <i32*> [#uses=2]
- %in_addr = alloca i8* ; <i8**> [#uses=2]
- %dt_addr = alloca float ; <float*> [#uses=4]
- %x_size_addr = alloca i32 ; <i32*> [#uses=2]
- %y_size_addr = alloca i32 ; <i32*> [#uses=1]
- %bp_addr = alloca i8* ; <i8**> [#uses=1]
- %tmp_image = alloca i8* ; <i8**> [#uses=0]
- %out = alloca i8* ; <i8**> [#uses=1]
- %cp = alloca i8* ; <i8**> [#uses=0]
- %dpt = alloca i8* ; <i8**> [#uses=4]
- %dp = alloca i8* ; <i8**> [#uses=2]
- %ip = alloca i8* ; <i8**> [#uses=0]
- %centre = alloca i32 ; <i32*> [#uses=0]
- %tmp = alloca i32 ; <i32*> [#uses=0]
- %brightness = alloca i32 ; <i32*> [#uses=0]
- %area = alloca i32 ; <i32*> [#uses=0]
- %y = alloca i32 ; <i32*> [#uses=0]
- %x = alloca i32 ; <i32*> [#uses=2]
- %j = alloca i32 ; <i32*> [#uses=6]
- %i = alloca i32 ; <i32*> [#uses=1]
- %mask_size = alloca i32 ; <i32*> [#uses=5]
- %increment = alloca i32 ; <i32*> [#uses=1]
- %n_max = alloca i32 ; <i32*> [#uses=4]
- %temp = alloca float ; <float*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %three_by_three, i32* %three_by_three_addr
- store i8* %in, i8** %in_addr
- %dt = fptrunc double %dt1 to float ; <float> [#uses=1]
- store float %dt, float* %dt_addr
- store i32 %x_size, i32* %x_size_addr
- store i32 %y_size, i32* %y_size_addr
- store i8* %bp, i8** %bp_addr
- %0 = load i8** %in_addr, align 4 ; <i8*> [#uses=1]
- store i8* %0, i8** %out, align 4
- %1 = call arm_apcscc i32 (...)* @foo() nounwind ; <i32> [#uses=1]
- store i32 %1, i32* %i, align 4
- %2 = load i32* %three_by_three_addr, align 4 ; <i32> [#uses=1]
- %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
- br i1 %3, label %bb, label %bb2
-
-bb: ; preds = %entry
- %4 = load float* %dt_addr, align 4 ; <float> [#uses=1]
- %5 = fpext float %4 to double ; <double> [#uses=1]
- %6 = fmul double %5, 1.500000e+00 ; <double> [#uses=1]
- %7 = fptosi double %6 to i32 ; <i32> [#uses=1]
- %8 = add nsw i32 %7, 1 ; <i32> [#uses=1]
- store i32 %8, i32* %mask_size, align 4
- br label %bb3
-
-bb2: ; preds = %entry
- store i32 1, i32* %mask_size, align 4
- br label %bb3
-
-bb3: ; preds = %bb2, %bb
- %9 = load i32* %mask_size, align 4 ; <i32> [#uses=1]
- %10 = mul i32 %9, 2 ; <i32> [#uses=1]
- %11 = add nsw i32 %10, 1 ; <i32> [#uses=1]
- store i32 %11, i32* %n_max, align 4
- %12 = load i32* %x_size_addr, align 4 ; <i32> [#uses=1]
- %13 = load i32* %n_max, align 4 ; <i32> [#uses=1]
- %14 = sub i32 %12, %13 ; <i32> [#uses=1]
- store i32 %14, i32* %increment, align 4
- %15 = load i32* %n_max, align 4 ; <i32> [#uses=1]
- %16 = load i32* %n_max, align 4 ; <i32> [#uses=1]
- %17 = mul i32 %15, %16 ; <i32> [#uses=1]
- %18 = call arm_apcscc noalias i8* @malloc(i32 %17) nounwind ; <i8*> [#uses=1]
- store i8* %18, i8** %dp, align 4
- %19 = load i8** %dp, align 4 ; <i8*> [#uses=1]
- store i8* %19, i8** %dpt, align 4
- %20 = load float* %dt_addr, align 4 ; <float> [#uses=1]
- %21 = load float* %dt_addr, align 4 ; <float> [#uses=1]
- %22 = fmul float %20, %21 ; <float> [#uses=1]
- %23 = fsub float -0.000000e+00, %22 ; <float> [#uses=1]
- store float %23, float* %temp, align 4
- %24 = load i32* %mask_size, align 4 ; <i32> [#uses=1]
- %25 = sub i32 0, %24 ; <i32> [#uses=1]
- store i32 %25, i32* %j, align 4
- br label %bb5
-
-bb4: ; preds = %bb5
- %26 = load i32* %j, align 4 ; <i32> [#uses=1]
- %27 = load i32* %j, align 4 ; <i32> [#uses=1]
- %28 = mul i32 %26, %27 ; <i32> [#uses=1]
- %29 = sitofp i32 %28 to double ; <double> [#uses=1]
- %30 = fmul double %29, 1.234000e+00 ; <double> [#uses=1]
- %31 = fptosi double %30 to i32 ; <i32> [#uses=1]
- store i32 %31, i32* %x, align 4
- %32 = load i32* %x, align 4 ; <i32> [#uses=1]
- %33 = trunc i32 %32 to i8 ; <i8> [#uses=1]
- %34 = load i8** %dpt, align 4 ; <i8*> [#uses=1]
- store i8 %33, i8* %34, align 1
- %35 = load i8** %dpt, align 4 ; <i8*> [#uses=1]
- %36 = getelementptr inbounds i8* %35, i64 1 ; <i8*> [#uses=1]
- store i8* %36, i8** %dpt, align 4
- %37 = load i32* %j, align 4 ; <i32> [#uses=1]
- %38 = add nsw i32 %37, 1 ; <i32> [#uses=1]
- store i32 %38, i32* %j, align 4
- br label %bb5
-
-bb5: ; preds = %bb4, %bb3
- %39 = load i32* %j, align 4 ; <i32> [#uses=1]
- %40 = load i32* %mask_size, align 4 ; <i32> [#uses=1]
- %41 = icmp sle i32 %39, %40 ; <i1> [#uses=1]
- br i1 %41, label %bb4, label %bb6
-
-bb6: ; preds = %bb5
- br label %return
-
-return: ; preds = %bb6
- ret void
-}
-
-declare arm_apcscc i32 @foo(...)
-
-declare arm_apcscc noalias i8* @malloc(i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
deleted file mode 100644
index 07a3527..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
+++ /dev/null
@@ -1,89 +0,0 @@
-; RUN: llc -relocation-model=pic < %s | grep {:$} | sort | uniq -d | count 0
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
-target triple = "thumbv7-apple-darwin10"
-
-; This function produces a duplicate LPC label unless special care is taken when duplicating a t2LDRpci_pic instruction.
-
-%struct.PlatformMutex = type { i32, [40 x i8] }
-%struct.SpinLock = type { %struct.PlatformMutex }
-%"struct.WTF::TCMalloc_ThreadCache" = type { i32, %struct._opaque_pthread_t*, i8, [68 x %"struct.WTF::TCMalloc_ThreadCache_FreeList"], i32, i32, %"struct.WTF::TCMalloc_ThreadCache"*, %"struct.WTF::TCMalloc_ThreadCache"* }
-%"struct.WTF::TCMalloc_ThreadCache_FreeList" = type { i8*, i16, i16 }
-%struct.__darwin_pthread_handler_rec = type { void (i8*)*, i8*, %struct.__darwin_pthread_handler_rec* }
-%struct._opaque_pthread_t = type { i32, %struct.__darwin_pthread_handler_rec*, [596 x i8] }
-
- at _ZN3WTFL8heap_keyE = internal global i32 0 ; <i32*> [#uses=1]
- at _ZN3WTFL10tsd_initedE.b = internal global i1 false ; <i1*> [#uses=2]
- at _ZN3WTFL13pageheap_lockE = internal global %struct.SpinLock { %struct.PlatformMutex { i32 850045863, [40 x i8] zeroinitializer } } ; <%struct.SpinLock*> [#uses=1]
- at _ZN3WTFL12thread_heapsE = internal global %"struct.WTF::TCMalloc_ThreadCache"* null ; <%"struct.WTF::TCMalloc_ThreadCache"**> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (%"struct.WTF::TCMalloc_ThreadCache"* ()* @_ZN3WTF20TCMalloc_ThreadCache22CreateCacheIfNecessaryEv to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define arm_apcscc %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache22CreateCacheIfNecessaryEv() nounwind {
-entry:
- %0 = tail call arm_apcscc i32 @pthread_mutex_lock(%struct.PlatformMutex* getelementptr inbounds (%struct.SpinLock* @_ZN3WTFL13pageheap_lockE, i32 0, i32 0)) nounwind
- %.b24 = load i1* @_ZN3WTFL10tsd_initedE.b, align 4 ; <i1> [#uses=1]
- br i1 %.b24, label %bb5, label %bb6
-
-bb5: ; preds = %entry
- %1 = tail call arm_apcscc %struct._opaque_pthread_t* @pthread_self() nounwind
- br label %bb6
-
-bb6: ; preds = %bb5, %entry
- %me.0 = phi %struct._opaque_pthread_t* [ %1, %bb5 ], [ null, %entry ] ; <%struct._opaque_pthread_t*> [#uses=2]
- br label %bb11
-
-bb7: ; preds = %bb11
- %2 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache"* %h.0, i32 0, i32 1
- %3 = load %struct._opaque_pthread_t** %2, align 4
- %4 = tail call arm_apcscc i32 @pthread_equal(%struct._opaque_pthread_t* %3, %struct._opaque_pthread_t* %me.0) nounwind
- %5 = icmp eq i32 %4, 0
- br i1 %5, label %bb10, label %bb14
-
-bb10: ; preds = %bb7
- %6 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache"* %h.0, i32 0, i32 6
- br label %bb11
-
-bb11: ; preds = %bb10, %bb6
- %h.0.in = phi %"struct.WTF::TCMalloc_ThreadCache"** [ @_ZN3WTFL12thread_heapsE, %bb6 ], [ %6, %bb10 ] ; <%"struct.WTF::TCMalloc_ThreadCache"**> [#uses=1]
- %h.0 = load %"struct.WTF::TCMalloc_ThreadCache"** %h.0.in, align 4 ; <%"struct.WTF::TCMalloc_ThreadCache"*> [#uses=4]
- %7 = icmp eq %"struct.WTF::TCMalloc_ThreadCache"* %h.0, null
- br i1 %7, label %bb13, label %bb7
-
-bb13: ; preds = %bb11
- %8 = tail call arm_apcscc %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(%struct._opaque_pthread_t* %me.0) nounwind
- br label %bb14
-
-bb14: ; preds = %bb13, %bb7
- %heap.1 = phi %"struct.WTF::TCMalloc_ThreadCache"* [ %8, %bb13 ], [ %h.0, %bb7 ] ; <%"struct.WTF::TCMalloc_ThreadCache"*> [#uses=4]
- %9 = tail call arm_apcscc i32 @pthread_mutex_unlock(%struct.PlatformMutex* getelementptr inbounds (%struct.SpinLock* @_ZN3WTFL13pageheap_lockE, i32 0, i32 0)) nounwind
- %10 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache"* %heap.1, i32 0, i32 2
- %11 = load i8* %10, align 4
- %toBool15not = icmp eq i8 %11, 0 ; <i1> [#uses=1]
- br i1 %toBool15not, label %bb19, label %bb22
-
-bb19: ; preds = %bb14
- %.b = load i1* @_ZN3WTFL10tsd_initedE.b, align 4 ; <i1> [#uses=1]
- br i1 %.b, label %bb21, label %bb22
-
-bb21: ; preds = %bb19
- store i8 1, i8* %10, align 4
- %12 = load i32* @_ZN3WTFL8heap_keyE, align 4
- %13 = bitcast %"struct.WTF::TCMalloc_ThreadCache"* %heap.1 to i8*
- %14 = tail call arm_apcscc i32 @pthread_setspecific(i32 %12, i8* %13) nounwind
- ret %"struct.WTF::TCMalloc_ThreadCache"* %heap.1
-
-bb22: ; preds = %bb19, %bb14
- ret %"struct.WTF::TCMalloc_ThreadCache"* %heap.1
-}
-
-declare arm_apcscc i32 @pthread_mutex_lock(%struct.PlatformMutex*)
-
-declare arm_apcscc i32 @pthread_mutex_unlock(%struct.PlatformMutex*)
-
-declare hidden arm_apcscc %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(%struct._opaque_pthread_t*) nounwind
-
-declare arm_apcscc i32 @pthread_setspecific(i32, i8*)
-
-declare arm_apcscc %struct._opaque_pthread_t* @pthread_self()
-
-declare arm_apcscc i32 @pthread_equal(%struct._opaque_pthread_t*, %struct._opaque_pthread_t*)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll
deleted file mode 100644
index 41682c1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc -O3 -relocation-model=pic -mcpu=cortex-a8 -mattr=+thumb2 < %s
-;
-; This test creates a predicated t2ADDri instruction that is then turned into a t2MOVgpr2gpr instr.
-; Test that that the predicate operands are removed properly.
-;
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
-target triple = "thumbv7-apple-darwin10"
-
-declare arm_apcscc void @etoe53(i16* nocapture, i16* nocapture) nounwind
-
-define arm_apcscc void @earith(double* nocapture %value, i32 %icode, double* nocapture %r1, double* nocapture %r2) nounwind {
-entry:
- %v = alloca [6 x i16], align 4 ; <[6 x i16]*> [#uses=1]
- br i1 undef, label %bb2.i, label %bb5
-
-bb2.i: ; preds = %entry
- %0 = bitcast double* %value to i16* ; <i16*> [#uses=1]
- call arm_apcscc void @etoe53(i16* null, i16* %0) nounwind
- ret void
-
-bb5: ; preds = %entry
- switch i32 %icode, label %bb10 [
- i32 57, label %bb14
- i32 58, label %bb18
- i32 67, label %bb22
- i32 76, label %bb26
- i32 77, label %bb35
- ]
-
-bb10: ; preds = %bb5
- br label %bb46
-
-bb14: ; preds = %bb5
- unreachable
-
-bb18: ; preds = %bb5
- unreachable
-
-bb22: ; preds = %bb5
- unreachable
-
-bb26: ; preds = %bb5
- br label %bb46
-
-bb35: ; preds = %bb5
- unreachable
-
-bb46: ; preds = %bb26, %bb10
- %1 = bitcast double* %value to i16* ; <i16*> [#uses=1]
- %v47 = getelementptr inbounds [6 x i16]* %v, i32 0, i32 0 ; <i16*> [#uses=1]
- call arm_apcscc void @etoe53(i16* %v47, i16* %1) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll
deleted file mode 100644
index 363f571..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll
+++ /dev/null
@@ -1,76 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
-
-define arm_apcscc i32 @test(i32 %n) nounwind {
-; CHECK: test:
-; CHECK-NOT: mov
-; CHECK: return
-entry:
- %0 = icmp eq i32 %n, 1 ; <i1> [#uses=1]
- br i1 %0, label %return, label %bb.nph
-
-bb.nph: ; preds = %entry
- %tmp = add i32 %n, -1 ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb.nph, %bb
- %indvar = phi i32 [ 0, %bb.nph ], [ %indvar.next, %bb ] ; <i32> [#uses=1]
- %u.05 = phi i64 [ undef, %bb.nph ], [ %ins, %bb ] ; <i64> [#uses=1]
- %1 = tail call arm_apcscc i32 @f() nounwind ; <i32> [#uses=1]
- %tmp4 = zext i32 %1 to i64 ; <i64> [#uses=1]
- %mask = and i64 %u.05, -4294967296 ; <i64> [#uses=1]
- %ins = or i64 %tmp4, %mask ; <i64> [#uses=2]
- tail call arm_apcscc void @g(i64 %ins) nounwind
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %tmp ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret i32 undef
-}
-
-define arm_apcscc i32 @test_dead_cycle(i32 %n) nounwind {
-; CHECK: test_dead_cycle:
-; CHECK: blx
-; CHECK-NOT: mov
-; CHECK: blx
-entry:
- %0 = icmp eq i32 %n, 1 ; <i1> [#uses=1]
- br i1 %0, label %return, label %bb.nph
-
-bb.nph: ; preds = %entry
- %tmp = add i32 %n, -1 ; <i32> [#uses=2]
- br label %bb
-
-bb: ; preds = %bb.nph, %bb2
- %indvar = phi i32 [ 0, %bb.nph ], [ %indvar.next, %bb2 ] ; <i32> [#uses=2]
- %u.17 = phi i64 [ undef, %bb.nph ], [ %u.0, %bb2 ] ; <i64> [#uses=2]
- %tmp9 = sub i32 %tmp, %indvar ; <i32> [#uses=1]
- %1 = icmp sgt i32 %tmp9, 1 ; <i1> [#uses=1]
- br i1 %1, label %bb1, label %bb2
-
-bb1: ; preds = %bb
- %2 = tail call arm_apcscc i32 @f() nounwind ; <i32> [#uses=1]
- %tmp6 = zext i32 %2 to i64 ; <i64> [#uses=1]
- %mask = and i64 %u.17, -4294967296 ; <i64> [#uses=1]
- %ins = or i64 %tmp6, %mask ; <i64> [#uses=1]
- tail call arm_apcscc void @g(i64 %ins) nounwind
- br label %bb2
-
-bb2: ; preds = %bb1, %bb
-; also check for duplicate induction variables (radar 7645034)
-; CHECK: subs r{{.*}}, #1
-; CHECK-NOT: subs r{{.*}}, #1
-; CHECK: pop
- %u.0 = phi i64 [ %ins, %bb1 ], [ %u.17, %bb ] ; <i64> [#uses=2]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %tmp ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb2, %entry
- ret i32 undef
-}
-
-declare arm_apcscc i32 @f()
-
-declare arm_apcscc void @g(i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
deleted file mode 100644
index 533546b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -O0 -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 -mattr=+vfp2
-; This test creates a big stack frame without spilling any callee-saved registers.
-; Make sure the whole stack frame is addrerssable wiothout scavenger crashes.
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
-target triple = "thumbv7-apple-darwin3.0.0-iphoneos"
-
-define arm_apcscc void @FindMin(double* %panelTDEL, i8* %dclOfRow, i32 %numRows, i32 %numCols, double* %retMin_RES_TDEL) {
-entry:
- %panelTDEL.addr = alloca double*, align 4 ; <double**> [#uses=1]
- %panelResTDEL = alloca [2560 x double], align 4 ; <[2560 x double]*> [#uses=0]
- store double* %panelTDEL, double** %panelTDEL.addr
- store double* %retMin_RES_TDEL, double** undef
- store i32 0, i32* undef
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/carry.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/carry.ll
deleted file mode 100644
index de6f6e2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/carry.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i64 @f1(i64 %a, i64 %b) {
-entry:
-; CHECK: f1:
-; CHECK: subs r0, r0, r2
-; CHECK: sbcs r1, r3
- %tmp = sub i64 %a, %b
- ret i64 %tmp
-}
-
-define i64 @f2(i64 %a, i64 %b) {
-entry:
-; CHECK: f2:
-; CHECK: adds r0, r0, r0
-; CHECK: adcs r1, r1
-; CHECK: subs r0, r0, r2
-; CHECK: sbcs r1, r3
- %tmp1 = shl i64 %a, 1
- %tmp2 = sub i64 %tmp1, %b
- ret i64 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll
deleted file mode 100644
index 572f1e8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8
-
-%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
-%struct.__sFILEX = type opaque
-%struct.__sbuf = type { i8*, i32 }
-
-declare arm_apcscc i32 @fgetc(%struct.FILE* nocapture) nounwind
-
-define arm_apcscc i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
-entry:
- br i1 undef, label %bb, label %bb1
-
-bb: ; preds = %entry
- unreachable
-
-bb1: ; preds = %entry
- br i1 undef, label %bb.i1, label %bb1.i2
-
-bb.i1: ; preds = %bb1
- unreachable
-
-bb1.i2: ; preds = %bb1
- %0 = call arm_apcscc i32 @fgetc(%struct.FILE* undef) nounwind ; <i32> [#uses=0]
- br i1 undef, label %bb2.i3, label %bb3.i4
-
-bb2.i3: ; preds = %bb1.i2
- br i1 undef, label %bb4.i, label %bb3.i4
-
-bb3.i4: ; preds = %bb2.i3, %bb1.i2
- unreachable
-
-bb4.i: ; preds = %bb2.i3
- br i1 undef, label %bb5.i, label %get_image.exit
-
-bb5.i: ; preds = %bb4.i
- unreachable
-
-get_image.exit: ; preds = %bb4.i
- br i1 undef, label %bb28, label %bb27
-
-bb27: ; preds = %get_image.exit
- br label %bb.i
-
-bb.i: ; preds = %bb.i, %bb27
- %1 = fptrunc double undef to float ; <float> [#uses=1]
- %2 = fptoui float %1 to i8 ; <i8> [#uses=1]
- store i8 %2, i8* undef, align 1
- br label %bb.i
-
-bb28: ; preds = %get_image.exit
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
deleted file mode 100644
index 2b20931..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 | grep vmov.f32 | count 3
-
-define arm_apcscc void @fht(float* nocapture %fz, i16 signext %n) nounwind {
-entry:
- br label %bb5
-
-bb5: ; preds = %bb5, %entry
- br i1 undef, label %bb5, label %bb.nph
-
-bb.nph: ; preds = %bb5
- br label %bb7
-
-bb7: ; preds = %bb9, %bb.nph
- %s1.02 = phi float [ undef, %bb.nph ], [ %35, %bb9 ] ; <float> [#uses=3]
- %tmp79 = add i32 undef, undef ; <i32> [#uses=1]
- %tmp53 = sub i32 undef, undef ; <i32> [#uses=1]
- %0 = fadd float 0.000000e+00, 1.000000e+00 ; <float> [#uses=2]
- %1 = fmul float 0.000000e+00, 0.000000e+00 ; <float> [#uses=2]
- br label %bb8
-
-bb8: ; preds = %bb8, %bb7
- %tmp54 = add i32 0, %tmp53 ; <i32> [#uses=0]
- %fi.1 = getelementptr float* %fz, i32 undef ; <float*> [#uses=2]
- %tmp80 = add i32 0, %tmp79 ; <i32> [#uses=1]
- %scevgep81 = getelementptr float* %fz, i32 %tmp80 ; <float*> [#uses=1]
- %2 = load float* undef, align 4 ; <float> [#uses=1]
- %3 = fmul float %2, %1 ; <float> [#uses=1]
- %4 = load float* null, align 4 ; <float> [#uses=2]
- %5 = fmul float %4, %0 ; <float> [#uses=1]
- %6 = fsub float %3, %5 ; <float> [#uses=1]
- %7 = fmul float %4, %1 ; <float> [#uses=1]
- %8 = fadd float undef, %7 ; <float> [#uses=2]
- %9 = load float* %fi.1, align 4 ; <float> [#uses=2]
- %10 = fsub float %9, %8 ; <float> [#uses=1]
- %11 = fadd float %9, %8 ; <float> [#uses=1]
- %12 = fsub float 0.000000e+00, %6 ; <float> [#uses=1]
- %13 = fsub float 0.000000e+00, undef ; <float> [#uses=2]
- %14 = fmul float undef, %0 ; <float> [#uses=1]
- %15 = fadd float %14, undef ; <float> [#uses=2]
- %16 = load float* %scevgep81, align 4 ; <float> [#uses=2]
- %17 = fsub float %16, %15 ; <float> [#uses=1]
- %18 = fadd float %16, %15 ; <float> [#uses=2]
- %19 = load float* undef, align 4 ; <float> [#uses=2]
- %20 = fsub float %19, %13 ; <float> [#uses=2]
- %21 = fadd float %19, %13 ; <float> [#uses=1]
- %22 = fmul float %s1.02, %18 ; <float> [#uses=1]
- %23 = fmul float 0.000000e+00, %20 ; <float> [#uses=1]
- %24 = fsub float %22, %23 ; <float> [#uses=1]
- %25 = fmul float 0.000000e+00, %18 ; <float> [#uses=1]
- %26 = fmul float %s1.02, %20 ; <float> [#uses=1]
- %27 = fadd float %25, %26 ; <float> [#uses=1]
- %28 = fadd float %11, %27 ; <float> [#uses=1]
- store float %28, float* %fi.1, align 4
- %29 = fadd float %12, %24 ; <float> [#uses=1]
- store float %29, float* null, align 4
- %30 = fmul float 0.000000e+00, %21 ; <float> [#uses=1]
- %31 = fmul float %s1.02, %17 ; <float> [#uses=1]
- %32 = fsub float %30, %31 ; <float> [#uses=1]
- %33 = fsub float %10, %32 ; <float> [#uses=1]
- store float %33, float* undef, align 4
- %34 = icmp slt i32 undef, undef ; <i1> [#uses=1]
- br i1 %34, label %bb8, label %bb9
-
-bb9: ; preds = %bb8
- %35 = fadd float 0.000000e+00, undef ; <float> [#uses=1]
- br label %bb7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/dg.exp b/libclamav/c++/llvm/test/CodeGen/Thumb2/dg.exp
deleted file mode 100644
index 3ff359a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/dg.exp
+++ /dev/null
@@ -1,5 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target ARM] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/frameless.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/frameless.ll
deleted file mode 100644
index c3c8cf1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/frameless.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -disable-fp-elim | not grep mov
-; RUN: llc < %s -mtriple=thumbv7-linux -disable-fp-elim | not grep mov
-
-define arm_apcscc void @t() nounwind readnone {
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/frameless2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/frameless2.ll
deleted file mode 100644
index 7cc7b19..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/frameless2.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -disable-fp-elim | not grep r7
-
-%struct.noise3 = type { [3 x [17 x i32]] }
-%struct.noiseguard = type { i32, i32, i32 }
-
-define arm_apcscc void @vorbis_encode_noisebias_setup(i8* nocapture %vi.0.7.val, double %s, i32 %block, i32* nocapture %suppress, %struct.noise3* nocapture %in, %struct.noiseguard* nocapture %guard, double %userbias) nounwind {
-entry:
- %0 = getelementptr %struct.noiseguard* %guard, i32 %block, i32 2; <i32*> [#uses=1]
- %1 = load i32* %0, align 4 ; <i32> [#uses=1]
- store i32 %1, i32* undef, align 4
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/ifcvt-neon.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/ifcvt-neon.ll
deleted file mode 100644
index c667909..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/ifcvt-neon.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=thumb -mcpu=cortex-a8 | FileCheck %s
-; rdar://7368193
-
- at a = common global float 0.000000e+00 ; <float*> [#uses=2]
- at b = common global float 0.000000e+00 ; <float*> [#uses=1]
-
-define arm_apcscc float @t(i32 %c) nounwind {
-entry:
- %0 = icmp sgt i32 %c, 1 ; <i1> [#uses=1]
- %1 = load float* @a, align 4 ; <float> [#uses=2]
- %2 = load float* @b, align 4 ; <float> [#uses=2]
- br i1 %0, label %bb, label %bb1
-
-bb: ; preds = %entry
-; CHECK: ite lt
-; CHECK: vsublt.f32
-; CHECK-NEXT: vaddge.f32
- %3 = fadd float %1, %2 ; <float> [#uses=1]
- br label %bb2
-
-bb1: ; preds = %entry
- %4 = fsub float %1, %2 ; <float> [#uses=1]
- br label %bb2
-
-bb2: ; preds = %bb1, %bb
- %storemerge = phi float [ %4, %bb1 ], [ %3, %bb ] ; <float> [#uses=2]
- store float %storemerge, float* @a
- ret float %storemerge
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/large-stack.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/large-stack.ll
deleted file mode 100644
index fe0e506..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/large-stack.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=LINUX
-
-define void @test1() {
-; DARWIN: test1:
-; DARWIN: sub sp, #256
-; LINUX: test1:
-; LINUX: sub sp, #256
- %tmp = alloca [ 64 x i32 ] , align 4
- ret void
-}
-
-define void @test2() {
-; DARWIN: test2:
-; DARWIN: sub.w sp, sp, #4160
-; DARWIN: sub sp, #8
-; LINUX: test2:
-; LINUX: sub.w sp, sp, #4160
-; LINUX: sub sp, #8
- %tmp = alloca [ 4168 x i8 ] , align 4
- ret void
-}
-
-define i32 @test3() {
-; DARWIN: test3:
-; DARWIN: push {r4, r7, lr}
-; DARWIN: sub.w sp, sp, #805306368
-; DARWIN: sub sp, #20
-; LINUX: test3:
-; LINUX: stmfd sp!, {r4, r7, r11, lr}
-; LINUX: sub.w sp, sp, #805306368
-; LINUX: sub sp, #16
- %retval = alloca i32, align 4
- %tmp = alloca i32, align 4
- %a = alloca [805306369 x i8], align 16
- store i32 0, i32* %tmp
- %tmp1 = load i32* %tmp
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/ldr-str-imm12.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/ldr-str-imm12.ll
deleted file mode 100644
index f007b5c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/ldr-str-imm12.ll
+++ /dev/null
@@ -1,79 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim | FileCheck %s
-; rdar://7352504
-; Make sure we use "str r9, [sp, #+28]" instead of "sub.w r4, r7, #256" followed by "str r9, [r4, #-32]".
-
-%0 = type { i16, i8, i8 }
-%1 = type { [2 x i32], [2 x i32] }
-%2 = type { %union.rec* }
-%struct.FILE_POS = type { i8, i8, i16, i32 }
-%struct.GAP = type { i8, i8, i16 }
-%struct.LIST = type { %union.rec*, %union.rec* }
-%struct.STYLE = type { %union.anon, %union.anon, i16, i16, i32 }
-%struct.head_type = type { [2 x %struct.LIST], %union.FIRST_UNION, %union.SECOND_UNION, %union.THIRD_UNION, %union.FOURTH_UNION, %union.rec*, %2, %union.rec*, %union.rec*, %union.rec*, %union.rec*, %union.rec*, %union.rec*, %union.rec*, %union.rec*, i32 }
-%union.FIRST_UNION = type { %struct.FILE_POS }
-%union.FOURTH_UNION = type { %struct.STYLE }
-%union.SECOND_UNION = type { %0 }
-%union.THIRD_UNION = type { %1 }
-%union.anon = type { %struct.GAP }
-%union.rec = type { %struct.head_type }
-
- at zz_hold = external global %union.rec* ; <%union.rec**> [#uses=2]
- at zz_res = external global %union.rec* ; <%union.rec**> [#uses=1]
-
-define arm_apcscc %union.rec* @Manifest(%union.rec* %x, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind {
-entry:
-; CHECK: ldr.w r9, [r7, #+28]
- %xgaps.i = alloca [32 x %union.rec*], align 4 ; <[32 x %union.rec*]*> [#uses=0]
- %ycomp.i = alloca [32 x %union.rec*], align 4 ; <[32 x %union.rec*]*> [#uses=0]
- br i1 false, label %bb, label %bb20
-
-bb: ; preds = %entry
- unreachable
-
-bb20: ; preds = %entry
- switch i32 undef, label %bb1287 [
- i32 11, label %bb119
- i32 12, label %bb119
- i32 21, label %bb420
- i32 23, label %bb420
- i32 45, label %bb438
- i32 46, label %bb438
- i32 55, label %bb533
- i32 56, label %bb569
- i32 64, label %bb745
- i32 78, label %bb1098
- ]
-
-bb119: ; preds = %bb20, %bb20
- unreachable
-
-bb420: ; preds = %bb20, %bb20
-; CHECK: bb420
-; CHECK: str r{{[0-7]}}, [sp]
-; CHECK: str r{{[0-7]}}, [sp, #+4]
-; CHECK: str r{{[0-7]}}, [sp, #+8]
-; CHECK: str{{(.w)?}} r{{[0-9]+}}, [sp, #+24]
- store %union.rec* null, %union.rec** @zz_hold, align 4
- store %union.rec* null, %union.rec** @zz_res, align 4
- store %union.rec* %x, %union.rec** @zz_hold, align 4
- %0 = call arm_apcscc %union.rec* @Manifest(%union.rec* undef, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind ; <%union.rec*> [#uses=0]
- unreachable
-
-bb438: ; preds = %bb20, %bb20
- unreachable
-
-bb533: ; preds = %bb20
- ret %union.rec* %x
-
-bb569: ; preds = %bb20
- unreachable
-
-bb745: ; preds = %bb20
- unreachable
-
-bb1098: ; preds = %bb20
- unreachable
-
-bb1287: ; preds = %bb20
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/load-global.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/load-global.ll
deleted file mode 100644
index 9286670..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/load-global.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=static | FileCheck %s -check-prefix=STATIC
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=dynamic-no-pic | FileCheck %s -check-prefix=DYNAMIC
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic | FileCheck %s -check-prefix=PIC
-; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -relocation-model=pic | FileCheck %s -check-prefix=LINUX
-
- at G = external global i32
-
-define i32 @test1() {
-; STATIC: _test1:
-; STATIC: .long _G
-
-; DYNAMIC: _test1:
-; DYNAMIC: .long L_G$non_lazy_ptr
-
-; PIC: _test1
-; PIC: add r0, pc
-; PIC: .long L_G$non_lazy_ptr-(LPC1_0+4)
-
-; LINUX: test1
-; LINUX: .long G(GOT)
- %tmp = load i32* @G
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/lsr-deficiency.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/lsr-deficiency.ll
deleted file mode 100644
index ac2cd34..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/lsr-deficiency.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic | FileCheck %s
-; rdar://7387640
-
-; This now reduces to a single induction variable.
-
-; TODO: It still gets a GPR shuffle at the end of the loop
-; This is because something in instruction selection has decided
-; that comparing the pre-incremented value with zero is better
-; than comparing the post-incremented value with -4.
-
- at G = external global i32 ; <i32*> [#uses=2]
- at array = external global i32* ; <i32**> [#uses=1]
-
-define arm_apcscc void @t() nounwind optsize {
-; CHECK: t:
-; CHECK: mov.w r2, #1000
-entry:
- %.pre = load i32* @G, align 4 ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %entry
-; CHECK: LBB1_1:
-; CHECK: cmp r2, #0
-; CHECK: sub.w r9, r2, #1
-; CHECK: mov r2, r9
-
- %0 = phi i32 [ %.pre, %entry ], [ %3, %bb ] ; <i32> [#uses=1]
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %tmp5 = sub i32 1000, %indvar ; <i32> [#uses=1]
- %1 = load i32** @array, align 4 ; <i32*> [#uses=1]
- %scevgep = getelementptr i32* %1, i32 %tmp5 ; <i32*> [#uses=1]
- %2 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
- %3 = add nsw i32 %2, %0 ; <i32> [#uses=2]
- store i32 %3, i32* @G, align 4
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, 1001 ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/machine-licm.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/machine-licm.ll
deleted file mode 100644
index 9ab19e9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/machine-licm.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -disable-fp-elim | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s --check-prefix=PIC
-; rdar://7353541
-; rdar://7354376
-
-; The generated code is no where near ideal. It's not recognizing the two
-; constantpool entries being loaded can be merged into one.
-
- at GV = external global i32 ; <i32*> [#uses=2]
-
-define arm_apcscc void @t(i32* nocapture %vals, i32 %c) nounwind {
-entry:
-; CHECK: t:
-; CHECK: cbz
- %0 = icmp eq i32 %c, 0 ; <i1> [#uses=1]
- br i1 %0, label %return, label %bb.nph
-
-bb.nph: ; preds = %entry
-; CHECK: BB#1
-; CHECK: ldr.n r2, LCPI1_0
-; CHECK: ldr r3, [r2]
-; CHECK: ldr r3, [r3]
-; CHECK: ldr r2, [r2]
-; CHECK: LBB1_2
-; CHECK: LCPI1_0:
-; CHECK-NOT: LCPI1_1:
-; CHECK: .section
-
-; PIC: BB#1
-; PIC: ldr.n r2, LCPI1_0
-; PIC: add r2, pc
-; PIC: ldr r3, [r2]
-; PIC: ldr r3, [r3]
-; PIC: ldr r2, [r2]
-; PIC: LBB1_2
-; PIC: LCPI1_0:
-; PIC-NOT: LCPI1_1:
-; PIC: .section
- %.pre = load i32* @GV, align 4 ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %bb.nph
- %1 = phi i32 [ %.pre, %bb.nph ], [ %3, %bb ] ; <i32> [#uses=1]
- %i.03 = phi i32 [ 0, %bb.nph ], [ %4, %bb ] ; <i32> [#uses=2]
- %scevgep = getelementptr i32* %vals, i32 %i.03 ; <i32*> [#uses=1]
- %2 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
- %3 = add nsw i32 %1, %2 ; <i32> [#uses=2]
- store i32 %3, i32* @GV, align 4
- %4 = add i32 %i.03, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %4, %c ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/mul_const.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/mul_const.ll
deleted file mode 100644
index 9a2ec93..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/mul_const.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; rdar://7069502
-
-define i32 @t1(i32 %v) nounwind readnone {
-entry:
-; CHECK: t1:
-; CHECK: add.w r0, r0, r0, lsl #3
- %0 = mul i32 %v, 9
- ret i32 %0
-}
-
-define i32 @t2(i32 %v) nounwind readnone {
-entry:
-; CHECK: t2:
-; CHECK: rsb r0, r0, r0, lsl #3
- %0 = mul i32 %v, 7
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/pic-load.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/pic-load.ll
deleted file mode 100644
index 1f8aea9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/pic-load.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -relocation-model=pic | FileCheck %s
-
- %struct.anon = type { void ()* }
- %struct.one_atexit_routine = type { %struct.anon, i32, i8* }
- at __dso_handle = external global { } ; <{ }*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (void ()*)* @atexit to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define hidden arm_apcscc i32 @atexit(void ()* %func) nounwind {
-entry:
-; CHECK: atexit:
-; CHECK: add r0, pc
- %r = alloca %struct.one_atexit_routine, align 4 ; <%struct.one_atexit_routine*> [#uses=3]
- %0 = getelementptr %struct.one_atexit_routine* %r, i32 0, i32 0, i32 0 ; <void ()**> [#uses=1]
- store void ()* %func, void ()** %0, align 4
- %1 = getelementptr %struct.one_atexit_routine* %r, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 0, i32* %1, align 4
- %2 = call arm_apcscc i32 @atexit_common(%struct.one_atexit_routine* %r, i8* bitcast ({ }* @__dso_handle to i8*)) nounwind ; <i32> [#uses=1]
- ret i32 %2
-}
-
-declare arm_apcscc i32 @atexit_common(%struct.one_atexit_routine*, i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-adc.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-adc.ll
deleted file mode 100644
index 702df91..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-adc.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; 734439407618 = 0x000000ab00000002
-define i64 @f1(i64 %a) {
-; CHECK: f1:
-; CHECK: adds r0, #2
- %tmp = add i64 %a, 734439407618
- ret i64 %tmp
-}
-
-; 5066626890203138 = 0x0012001200000002
-define i64 @f2(i64 %a) {
-; CHECK: f2:
-; CHECK: adds r0, #2
- %tmp = add i64 %a, 5066626890203138
- ret i64 %tmp
-}
-
-; 3747052064576897026 = 0x3400340000000002
-define i64 @f3(i64 %a) {
-; CHECK: f3:
-; CHECK: adds r0, #2
- %tmp = add i64 %a, 3747052064576897026
- ret i64 %tmp
-}
-
-; 6221254862626095106 = 0x5656565600000002
-define i64 @f4(i64 %a) {
-; CHECK: f4:
-; CHECK: adds r0, #2
- %tmp = add i64 %a, 6221254862626095106
- ret i64 %tmp
-}
-
-; 287104476244869122 = 0x03fc000000000002
-define i64 @f5(i64 %a) {
-; CHECK: f5:
-; CHECK: adds r0, #2
- %tmp = add i64 %a, 287104476244869122
- ret i64 %tmp
-}
-
-define i64 @f6(i64 %a, i64 %b) {
-; CHECK: f6:
-; CHECK: adds r0, r0, r2
- %tmp = add i64 %a, %b
- ret i64 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add.ll
deleted file mode 100644
index 5e25cf6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep add | grep #255
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep add | grep #256
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep add | grep #257
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep add | grep #4094
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep add | grep #4095
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep add | grep #4096
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep add
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep add | grep lsl | grep #8
-
-define i32 @t2ADDrc_255(i32 %lhs) {
- %Rd = add i32 %lhs, 255
- ret i32 %Rd
-}
-
-define i32 @t2ADDrc_256(i32 %lhs) {
- %Rd = add i32 %lhs, 256
- ret i32 %Rd
-}
-
-define i32 @t2ADDrc_257(i32 %lhs) {
- %Rd = add i32 %lhs, 257
- ret i32 %Rd
-}
-
-define i32 @t2ADDrc_4094(i32 %lhs) {
- %Rd = add i32 %lhs, 4094
- ret i32 %Rd
-}
-
-define i32 @t2ADDrc_4095(i32 %lhs) {
- %Rd = add i32 %lhs, 4095
- ret i32 %Rd
-}
-
-define i32 @t2ADDrc_4096(i32 %lhs) {
- %Rd = add i32 %lhs, 4096
- ret i32 %Rd
-}
-
-define i32 @t2ADDrr(i32 %lhs, i32 %rhs) {
- %Rd = add i32 %lhs, %rhs
- ret i32 %Rd
-}
-
-define i32 @t2ADDrs(i32 %lhs, i32 %rhs) {
- %tmp = shl i32 %rhs, 8
- %Rd = add i32 %lhs, %tmp
- ret i32 %Rd
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add2.ll
deleted file mode 100644
index e496654..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add2.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; 171 = 0x000000ab
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: adds r0, #171
- %tmp = add i32 %a, 171
- ret i32 %tmp
-}
-
-; 1179666 = 0x00120012
-define i32 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: add.w r0, r0, #1179666
- %tmp = add i32 %a, 1179666
- ret i32 %tmp
-}
-
-; 872428544 = 0x34003400
-define i32 @f3(i32 %a) {
-; CHECK: f3:
-; CHECK: add.w r0, r0, #872428544
- %tmp = add i32 %a, 872428544
- ret i32 %tmp
-}
-
-; 1448498774 = 0x56565656
-define i32 @f4(i32 %a) {
-; CHECK: f4:
-; CHECK: add.w r0, r0, #1448498774
- %tmp = add i32 %a, 1448498774
- ret i32 %tmp
-}
-
-; 510 = 0x000001fe
-define i32 @f5(i32 %a) {
-; CHECK: f5:
-; CHECK: add.w r0, r0, #510
- %tmp = add i32 %a, 510
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add3.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add3.ll
deleted file mode 100644
index 58fc333..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add3.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a) {
- %tmp = add i32 %a, 4095
- ret i32 %tmp
-}
-
-; CHECK: f1:
-; CHECK: addw r0, r0, #4095
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add4.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add4.ll
deleted file mode 100644
index b94e84d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add4.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; 171 = 0x000000ab
-define i64 @f1(i64 %a) {
-; CHECK: f1:
-; CHECK: adds r0, #171
-; CHECK: adc r1, r1, #0
- %tmp = add i64 %a, 171
- ret i64 %tmp
-}
-
-; 1179666 = 0x00120012
-define i64 @f2(i64 %a) {
-; CHECK: f2:
-; CHECK: adds.w r0, r0, #1179666
-; CHECK: adc r1, r1, #0
- %tmp = add i64 %a, 1179666
- ret i64 %tmp
-}
-
-; 872428544 = 0x34003400
-define i64 @f3(i64 %a) {
-; CHECK: f3:
-; CHECK: adds.w r0, r0, #872428544
-; CHECK: adc r1, r1, #0
- %tmp = add i64 %a, 872428544
- ret i64 %tmp
-}
-
-; 1448498774 = 0x56565656
-define i64 @f4(i64 %a) {
-; CHECK: f4:
-; CHECK: adds.w r0, r0, #1448498774
-; CHECK: adc r1, r1, #0
- %tmp = add i64 %a, 1448498774
- ret i64 %tmp
-}
-
-; 66846720 = 0x03fc0000
-define i64 @f5(i64 %a) {
-; CHECK: f5:
-; CHECK: adds.w r0, r0, #66846720
-; CHECK: adc r1, r1, #0
- %tmp = add i64 %a, 66846720
- ret i64 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add5.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add5.ll
deleted file mode 100644
index 8b3a4f6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add5.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: add r0, r1
- %tmp = add i32 %a, %b
- ret i32 %tmp
-}
-
-define i32 @f2(i32 %a, i32 %b) {
-; CHECK: f2:
-; CHECK: add.w r0, r0, r1, lsl #5
- %tmp = shl i32 %b, 5
- %tmp1 = add i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f3(i32 %a, i32 %b) {
-; CHECK: f3:
-; CHECK: add.w r0, r0, r1, lsr #6
- %tmp = lshr i32 %b, 6
- %tmp1 = add i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f4(i32 %a, i32 %b) {
-; CHECK: f4:
-; CHECK: add.w r0, r0, r1, asr #7
- %tmp = ashr i32 %b, 7
- %tmp1 = add i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f5(i32 %a, i32 %b) {
-; CHECK: f5:
-; CHECK: add.w r0, r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = add i32 %a, %tmp
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add6.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add6.ll
deleted file mode 100644
index 0ecaa79..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-add6.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i64 @f1(i64 %a, i64 %b) {
-; CHECK: f1:
-; CHECK: adds r0, r0, r2
-; CHECK: adcs r1, r3
- %tmp = add i64 %a, %b
- ret i64 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-and.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-and.ll
deleted file mode 100644
index 8e2245a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-and.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: ands r0, r1
- %tmp = and i32 %a, %b
- ret i32 %tmp
-}
-
-define i32 @f2(i32 %a, i32 %b) {
-; CHECK: f2:
-; CHECK: and.w r0, r0, r1, lsl #5
- %tmp = shl i32 %b, 5
- %tmp1 = and i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f3(i32 %a, i32 %b) {
-; CHECK: f3:
-; CHECK: and.w r0, r0, r1, lsr #6
- %tmp = lshr i32 %b, 6
- %tmp1 = and i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f4(i32 %a, i32 %b) {
-; CHECK: f4:
-; CHECK: and.w r0, r0, r1, asr #7
- %tmp = ashr i32 %b, 7
- %tmp1 = and i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f5(i32 %a, i32 %b) {
-; CHECK: f5:
-; CHECK: and.w r0, r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = and i32 %a, %tmp
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-and2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-and2.ll
deleted file mode 100644
index 76c56d0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-and2.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; 171 = 0x000000ab
-define i32 @f1(i32 %a) {
- %tmp = and i32 %a, 171
- ret i32 %tmp
-}
-; CHECK: f1:
-; CHECK: and r0, r0, #171
-
-; 1179666 = 0x00120012
-define i32 @f2(i32 %a) {
- %tmp = and i32 %a, 1179666
- ret i32 %tmp
-}
-; CHECK: f2:
-; CHECK: and r0, r0, #1179666
-
-; 872428544 = 0x34003400
-define i32 @f3(i32 %a) {
- %tmp = and i32 %a, 872428544
- ret i32 %tmp
-}
-; CHECK: f3:
-; CHECK: and r0, r0, #872428544
-
-; 1448498774 = 0x56565656
-define i32 @f4(i32 %a) {
- %tmp = and i32 %a, 1448498774
- ret i32 %tmp
-}
-; CHECK: f4:
-; CHECK: and r0, r0, #1448498774
-
-; 66846720 = 0x03fc0000
-define i32 @f5(i32 %a) {
- %tmp = and i32 %a, 66846720
- ret i32 %tmp
-}
-; CHECK: f5:
-; CHECK: and r0, r0, #66846720
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-asr.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-asr.ll
deleted file mode 100644
index a0a60e6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-asr.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: asrs r0, r1
- %tmp = ashr i32 %a, %b
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-asr2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-asr2.ll
deleted file mode 100644
index 9c8634f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-asr2.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: asrs r0, r0, #17
- %tmp = ashr i32 %a, 17
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-bcc.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-bcc.ll
deleted file mode 100644
index aae9f5c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-bcc.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | not grep it
-
-define i32 @t1(i32 %a, i32 %b, i32 %c) {
-; CHECK: t1:
-; CHECK: cbz
- %tmp2 = icmp eq i32 %a, 0
- br i1 %tmp2, label %cond_false, label %cond_true
-
-cond_true:
- %tmp5 = add i32 %b, 1
- %tmp6 = and i32 %tmp5, %c
- ret i32 %tmp6
-
-cond_false:
- %tmp7 = add i32 %b, -1
- %tmp8 = xor i32 %tmp7, %c
- ret i32 %tmp8
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-bfc.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-bfc.ll
deleted file mode 100644
index b486045..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-bfc.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; 4278190095 = 0xff00000f
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: bfc r
- %tmp = and i32 %a, 4278190095
- ret i32 %tmp
-}
-
-; 4286578688 = 0xff800000
-define i32 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: bfc r
- %tmp = and i32 %a, 4286578688
- ret i32 %tmp
-}
-
-; 4095 = 0x00000fff
-define i32 @f3(i32 %a) {
-; CHECK: f3:
-; CHECK: bfc r
- %tmp = and i32 %a, 4095
- ret i32 %tmp
-}
-
-; 2147483646 = 0x7ffffffe not implementable w/ BFC
-define i32 @f4(i32 %a) {
-; CHECK: f4:
- %tmp = and i32 %a, 2147483646
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-bic.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-bic.ll
deleted file mode 100644
index 4e35383..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-bic.ll
+++ /dev/null
@@ -1,105 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: bics r0, r1
- %tmp = xor i32 %b, 4294967295
- %tmp1 = and i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f2(i32 %a, i32 %b) {
-; CHECK: f2:
-; CHECK: bics r0, r1
- %tmp = xor i32 %b, 4294967295
- %tmp1 = and i32 %tmp, %a
- ret i32 %tmp1
-}
-
-define i32 @f3(i32 %a, i32 %b) {
-; CHECK: f3:
-; CHECK: bics r0, r1
- %tmp = xor i32 4294967295, %b
- %tmp1 = and i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f4(i32 %a, i32 %b) {
-; CHECK: f4:
-; CHECK: bics r0, r1
- %tmp = xor i32 4294967295, %b
- %tmp1 = and i32 %tmp, %a
- ret i32 %tmp1
-}
-
-define i32 @f5(i32 %a, i32 %b) {
-; CHECK: f5:
-; CHECK: bic.w r0, r0, r1, lsl #5
- %tmp = shl i32 %b, 5
- %tmp1 = xor i32 4294967295, %tmp
- %tmp2 = and i32 %a, %tmp1
- ret i32 %tmp2
-}
-
-define i32 @f6(i32 %a, i32 %b) {
-; CHECK: f6:
-; CHECK: bic.w r0, r0, r1, lsr #6
- %tmp = lshr i32 %b, 6
- %tmp1 = xor i32 %tmp, 4294967295
- %tmp2 = and i32 %tmp1, %a
- ret i32 %tmp2
-}
-
-define i32 @f7(i32 %a, i32 %b) {
-; CHECK: f7:
-; CHECK: bic.w r0, r0, r1, asr #7
- %tmp = ashr i32 %b, 7
- %tmp1 = xor i32 %tmp, 4294967295
- %tmp2 = and i32 %a, %tmp1
- ret i32 %tmp2
-}
-
-define i32 @f8(i32 %a, i32 %b) {
-; CHECK: f8:
-; CHECK: bic.w r0, r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = xor i32 4294967295, %tmp
- %tmp2 = and i32 %tmp1, %a
- ret i32 %tmp2
-}
-
-; ~0x000000bb = 4294967108
-define i32 @f9(i32 %a) {
- %tmp = and i32 %a, 4294967108
- ret i32 %tmp
-
-; CHECK: f9:
-; CHECK: bic r0, r0, #187
-}
-
-; ~0x00aa00aa = 4283826005
-define i32 @f10(i32 %a) {
- %tmp = and i32 %a, 4283826005
- ret i32 %tmp
-
-; CHECK: f10:
-; CHECK: bic r0, r0, #11141290
-}
-
-; ~0xcc00cc00 = 872363007
-define i32 @f11(i32 %a) {
- %tmp = and i32 %a, 872363007
- ret i32 %tmp
-; CHECK: f11:
-; CHECK: bic r0, r0, #-872363008
-}
-
-; ~0x00110000 = 4293853183
-define i32 @f12(i32 %a) {
- %tmp = and i32 %a, 4293853183
- ret i32 %tmp
-; CHECK: f12:
-; CHECK: bic r0, r0, #1114112
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-branch.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-branch.ll
deleted file mode 100644
index 1298384..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-branch.ll
+++ /dev/null
@@ -1,61 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s
-
-define void @f1(i32 %a, i32 %b, i32* %v) {
-entry:
-; CHECK: f1:
-; CHECK: bne LBB
- %tmp = icmp eq i32 %a, %b ; <i1> [#uses=1]
- br i1 %tmp, label %cond_true, label %return
-
-cond_true: ; preds = %entry
- store i32 0, i32* %v
- ret void
-
-return: ; preds = %entry
- ret void
-}
-
-define void @f2(i32 %a, i32 %b, i32* %v) {
-entry:
-; CHECK: f2:
-; CHECK: bge LBB
- %tmp = icmp slt i32 %a, %b ; <i1> [#uses=1]
- br i1 %tmp, label %cond_true, label %return
-
-cond_true: ; preds = %entry
- store i32 0, i32* %v
- ret void
-
-return: ; preds = %entry
- ret void
-}
-
-define void @f3(i32 %a, i32 %b, i32* %v) {
-entry:
-; CHECK: f3:
-; CHECK: bhs LBB
- %tmp = icmp ult i32 %a, %b ; <i1> [#uses=1]
- br i1 %tmp, label %cond_true, label %return
-
-cond_true: ; preds = %entry
- store i32 0, i32* %v
- ret void
-
-return: ; preds = %entry
- ret void
-}
-
-define void @f4(i32 %a, i32 %b, i32* %v) {
-entry:
-; CHECK: f4:
-; CHECK: blo LBB
- %tmp = icmp ult i32 %a, %b ; <i1> [#uses=1]
- br i1 %tmp, label %return, label %cond_true
-
-cond_true: ; preds = %entry
- store i32 0, i32* %v
- ret void
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-call.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-call.ll
deleted file mode 100644
index 7dc6b26..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-call.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s -check-prefix=DARWIN
-; RUN: llc < %s -mtriple=thumbv7-linux -mattr=+thumb2 | FileCheck %s -check-prefix=LINUX
-
- at t = weak global i32 ()* null ; <i32 ()**> [#uses=1]
-
-declare void @g(i32, i32, i32, i32)
-
-define void @f() {
-; DARWIN: f:
-; DARWIN: blx _g
-
-; LINUX: f:
-; LINUX: bl g
- call void @g( i32 1, i32 2, i32 3, i32 4 )
- ret void
-}
-
-define void @h() {
-; DARWIN: h:
-; DARWIN: blx r0
-
-; LINUX: h:
-; LINUX: blx r0
- %tmp = load i32 ()** @t ; <i32 ()*> [#uses=1]
- %tmp.upgrd.2 = tail call i32 %tmp( ) ; <i32> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cbnz.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cbnz.ll
deleted file mode 100644
index 0fc6899..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cbnz.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s
-; rdar://7354379
-
-declare arm_apcscc double @floor(double) nounwind readnone
-
-define void @t(i1 %a, double %b) {
-entry:
- br i1 %a, label %bb3, label %bb1
-
-bb1: ; preds = %entry
- unreachable
-
-bb3: ; preds = %entry
- br i1 %a, label %bb7, label %bb5
-
-bb5: ; preds = %bb3
- unreachable
-
-bb7: ; preds = %bb3
- br i1 %a, label %bb11, label %bb9
-
-bb9: ; preds = %bb7
-; CHECK: cmp r0, #0
-; CHECK-NEXT: cmp r0, #0
-; CHECK-NEXT: cbnz
- %0 = tail call arm_apcscc double @floor(double %b) nounwind readnone ; <double> [#uses=0]
- br label %bb11
-
-bb11: ; preds = %bb9, %bb7
- %1 = getelementptr i32* undef, i32 0
- store i32 0, i32* %1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-clz.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-clz.ll
deleted file mode 100644
index 74728bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-clz.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+v7a | FileCheck %s
-
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: clz r
- %tmp = tail call i32 @llvm.ctlz.i32(i32 %a)
- ret i32 %tmp
-}
-
-declare i32 @llvm.ctlz.i32(i32) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmn.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmn.ll
deleted file mode 100644
index eeaaa7f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmn.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i1 @f1(i32 %a, i32 %b) {
- %nb = sub i32 0, %b
- %tmp = icmp ne i32 %a, %nb
- ret i1 %tmp
-}
-; CHECK: f1:
-; CHECK: cmn.w r0, r1
-
-define i1 @f2(i32 %a, i32 %b) {
- %nb = sub i32 0, %b
- %tmp = icmp ne i32 %nb, %a
- ret i1 %tmp
-}
-; CHECK: f2:
-; CHECK: cmn.w r0, r1
-
-define i1 @f3(i32 %a, i32 %b) {
- %nb = sub i32 0, %b
- %tmp = icmp eq i32 %a, %nb
- ret i1 %tmp
-}
-; CHECK: f3:
-; CHECK: cmn.w r0, r1
-
-define i1 @f4(i32 %a, i32 %b) {
- %nb = sub i32 0, %b
- %tmp = icmp eq i32 %nb, %a
- ret i1 %tmp
-}
-; CHECK: f4:
-; CHECK: cmn.w r0, r1
-
-define i1 @f5(i32 %a, i32 %b) {
- %tmp = shl i32 %b, 5
- %nb = sub i32 0, %tmp
- %tmp1 = icmp eq i32 %nb, %a
- ret i1 %tmp1
-}
-; CHECK: f5:
-; CHECK: cmn.w r0, r1, lsl #5
-
-define i1 @f6(i32 %a, i32 %b) {
- %tmp = lshr i32 %b, 6
- %nb = sub i32 0, %tmp
- %tmp1 = icmp ne i32 %nb, %a
- ret i1 %tmp1
-}
-; CHECK: f6:
-; CHECK: cmn.w r0, r1, lsr #6
-
-define i1 @f7(i32 %a, i32 %b) {
- %tmp = ashr i32 %b, 7
- %nb = sub i32 0, %tmp
- %tmp1 = icmp eq i32 %a, %nb
- ret i1 %tmp1
-}
-; CHECK: f7:
-; CHECK: cmn.w r0, r1, asr #7
-
-define i1 @f8(i32 %a, i32 %b) {
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %nb = sub i32 0, %tmp
- %tmp1 = icmp ne i32 %a, %nb
- ret i1 %tmp1
-}
-; CHECK: f8:
-; CHECK: cmn.w r0, r0, ror #8
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmn2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmn2.ll
deleted file mode 100644
index c0e19f6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmn2.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; -0x000000bb = 4294967109
-define i1 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: cmn.w {{r.*}}, #187
- %tmp = icmp ne i32 %a, 4294967109
- ret i1 %tmp
-}
-
-; -0x00aa00aa = 4283826006
-define i1 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: cmn.w {{r.*}}, #11141290
- %tmp = icmp eq i32 %a, 4283826006
- ret i1 %tmp
-}
-
-; -0xcc00cc00 = 872363008
-define i1 @f3(i32 %a) {
-; CHECK: f3:
-; CHECK: cmn.w {{r.*}}, #-872363008
- %tmp = icmp ne i32 %a, 872363008
- ret i1 %tmp
-}
-
-; -0x00110000 = 4293853184
-define i1 @f4(i32 %a) {
-; CHECK: f4:
-; CHECK: cmn.w {{r.*}}, #1114112
- %tmp = icmp eq i32 %a, 4293853184
- ret i1 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmp.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmp.ll
deleted file mode 100644
index d4773bb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmp.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; 0x000000bb = 187
-define i1 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: cmp r0, #187
- %tmp = icmp ne i32 %a, 187
- ret i1 %tmp
-}
-
-; 0x00aa00aa = 11141290
-define i1 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: cmp.w r0, #11141290
- %tmp = icmp eq i32 %a, 11141290
- ret i1 %tmp
-}
-
-; 0xcc00cc00 = 3422604288
-define i1 @f3(i32 %a) {
-; CHECK: f3:
-; CHECK: cmp.w r0, #-872363008
- %tmp = icmp ne i32 %a, 3422604288
- ret i1 %tmp
-}
-
-; 0xdddddddd = 3722304989
-define i1 @f4(i32 %a) {
-; CHECK: f4:
-; CHECK: cmp.w r0, #-572662307
- %tmp = icmp ne i32 %a, 3722304989
- ret i1 %tmp
-}
-
-; 0x00110000 = 1114112
-define i1 @f5(i32 %a) {
-; CHECK: f5:
-; CHECK: cmp.w r0, #1114112
- %tmp = icmp eq i32 %a, 1114112
- ret i1 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmp2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmp2.ll
deleted file mode 100644
index 55c321d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-cmp2.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i1 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: cmp r0, r1
- %tmp = icmp ne i32 %a, %b
- ret i1 %tmp
-}
-
-define i1 @f2(i32 %a, i32 %b) {
-; CHECK: f2:
-; CHECK: cmp r0, r1
- %tmp = icmp eq i32 %a, %b
- ret i1 %tmp
-}
-
-define i1 @f6(i32 %a, i32 %b) {
-; CHECK: f6:
-; CHECK: cmp.w r0, r1, lsl #5
- %tmp = shl i32 %b, 5
- %tmp1 = icmp eq i32 %tmp, %a
- ret i1 %tmp1
-}
-
-define i1 @f7(i32 %a, i32 %b) {
-; CHECK: f7:
-; CHECK: cmp.w r0, r1, lsr #6
- %tmp = lshr i32 %b, 6
- %tmp1 = icmp ne i32 %tmp, %a
- ret i1 %tmp1
-}
-
-define i1 @f8(i32 %a, i32 %b) {
-; CHECK: f8:
-; CHECK: cmp.w r0, r1, asr #7
- %tmp = ashr i32 %b, 7
- %tmp1 = icmp eq i32 %a, %tmp
- ret i1 %tmp1
-}
-
-define i1 @f9(i32 %a, i32 %b) {
-; CHECK: f9:
-; CHECK: cmp.w r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = icmp ne i32 %a, %tmp
- ret i1 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-eor.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-eor.ll
deleted file mode 100644
index b7e2766..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-eor.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: eors r0, r1
- %tmp = xor i32 %a, %b
- ret i32 %tmp
-}
-
-define i32 @f2(i32 %a, i32 %b) {
-; CHECK: f2:
-; CHECK: eor.w r0, r1, r0
- %tmp = xor i32 %b, %a
- ret i32 %tmp
-}
-
-define i32 @f3(i32 %a, i32 %b) {
-; CHECK: f3:
-; CHECK: eor.w r0, r0, r1, lsl #5
- %tmp = shl i32 %b, 5
- %tmp1 = xor i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f4(i32 %a, i32 %b) {
-; CHECK: f4:
-; CHECK: eor.w r0, r0, r1, lsr #6
- %tmp = lshr i32 %b, 6
- %tmp1 = xor i32 %tmp, %a
- ret i32 %tmp1
-}
-
-define i32 @f5(i32 %a, i32 %b) {
-; CHECK: f5:
-; CHECK: eor.w r0, r0, r1, asr #7
- %tmp = ashr i32 %b, 7
- %tmp1 = xor i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f6(i32 %a, i32 %b) {
-; CHECK: f6:
-; CHECK: eor.w r0, r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = xor i32 %tmp, %a
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-eor2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-eor2.ll
deleted file mode 100644
index 6b2e9dc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-eor2.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; 0x000000bb = 187
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: eor {{.*}}#187
- %tmp = xor i32 %a, 187
- ret i32 %tmp
-}
-
-; 0x00aa00aa = 11141290
-define i32 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: eor {{.*}}#11141290
- %tmp = xor i32 %a, 11141290
- ret i32 %tmp
-}
-
-; 0xcc00cc00 = 3422604288
-define i32 @f3(i32 %a) {
-; CHECK: f3:
-; CHECK: eor {{.*}}#-872363008
- %tmp = xor i32 %a, 3422604288
- ret i32 %tmp
-}
-
-; 0xdddddddd = 3722304989
-define i32 @f4(i32 %a) {
-; CHECK: f4:
-; CHECK: eor {{.*}}#-572662307
- %tmp = xor i32 %a, 3722304989
- ret i32 %tmp
-}
-
-; 0x00110000 = 1114112
-define i32 @f5(i32 %a) {
-; CHECK: f5:
-; CHECK: eor {{.*}}#1114112
- %tmp = xor i32 %a, 1114112
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
deleted file mode 100644
index 1d26756..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
+++ /dev/null
@@ -1,84 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
-
-define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
-; CHECK: t1:
-; CHECK: it ne
-; CHECK: cmpne
- switch i32 %c, label %cond_next [
- i32 1, label %cond_true
- i32 7, label %cond_true
- ]
-
-cond_true:
- %tmp12 = add i32 %a, 1
- %tmp1518 = add i32 %tmp12, %b
- ret i32 %tmp1518
-
-cond_next:
- %tmp15 = add i32 %b, %a
- ret i32 %tmp15
-}
-
-; FIXME: Check for # of unconditional branch after adding branch folding post ifcvt.
-define i32 @t2(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: t2:
-; CHECK: ite gt
-; CHECK: subgt
-; CHECK: suble
- %tmp1434 = icmp eq i32 %a, %b ; <i1> [#uses=1]
- br i1 %tmp1434, label %bb17, label %bb.outer
-
-bb.outer: ; preds = %cond_false, %entry
- %b_addr.021.0.ph = phi i32 [ %b, %entry ], [ %tmp10, %cond_false ] ; <i32> [#uses=5]
- %a_addr.026.0.ph = phi i32 [ %a, %entry ], [ %a_addr.026.0, %cond_false ] ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %cond_true, %bb.outer
- %indvar = phi i32 [ 0, %bb.outer ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
- %tmp. = sub i32 0, %b_addr.021.0.ph ; <i32> [#uses=1]
- %tmp.40 = mul i32 %indvar, %tmp. ; <i32> [#uses=1]
- %a_addr.026.0 = add i32 %tmp.40, %a_addr.026.0.ph ; <i32> [#uses=6]
- %tmp3 = icmp sgt i32 %a_addr.026.0, %b_addr.021.0.ph ; <i1> [#uses=1]
- br i1 %tmp3, label %cond_true, label %cond_false
-
-cond_true: ; preds = %bb
- %tmp7 = sub i32 %a_addr.026.0, %b_addr.021.0.ph ; <i32> [#uses=2]
- %tmp1437 = icmp eq i32 %tmp7, %b_addr.021.0.ph ; <i1> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %tmp1437, label %bb17, label %bb
-
-cond_false: ; preds = %bb
- %tmp10 = sub i32 %b_addr.021.0.ph, %a_addr.026.0 ; <i32> [#uses=2]
- %tmp14 = icmp eq i32 %a_addr.026.0, %tmp10 ; <i1> [#uses=1]
- br i1 %tmp14, label %bb17, label %bb.outer
-
-bb17: ; preds = %cond_false, %cond_true, %entry
- %a_addr.026.1 = phi i32 [ %a, %entry ], [ %tmp7, %cond_true ], [ %a_addr.026.0, %cond_false ] ; <i32> [#uses=1]
- ret i32 %a_addr.026.1
-}
-
- at x = external global i32* ; <i32**> [#uses=1]
-
-define void @foo(i32 %a) nounwind {
-entry:
- %tmp = load i32** @x ; <i32*> [#uses=1]
- store i32 %a, i32* %tmp
- ret void
-}
-
-define void @t3(i32 %a, i32 %b) nounwind {
-entry:
-; CHECK: t3:
-; CHECK: it lt
-; CHECK: poplt {r7, pc}
- %tmp1 = icmp sgt i32 %a, 10 ; <i1> [#uses=1]
- br i1 %tmp1, label %cond_true, label %UnifiedReturnBlock
-
-cond_true: ; preds = %entry
- tail call void @foo( i32 %b )
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
deleted file mode 100644
index d917ffe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
+++ /dev/null
@@ -1,93 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
-
-define void @foo(i32 %X, i32 %Y) {
-entry:
-; CHECK: foo:
-; CHECK: it ne
-; CHECK: cmpne
-; CHECK: it hi
-; CHECK: pophi {r7, pc}
- %tmp1 = icmp ult i32 %X, 4 ; <i1> [#uses=1]
- %tmp4 = icmp eq i32 %Y, 0 ; <i1> [#uses=1]
- %tmp7 = or i1 %tmp4, %tmp1 ; <i1> [#uses=1]
- br i1 %tmp7, label %cond_true, label %UnifiedReturnBlock
-
-cond_true: ; preds = %entry
- %tmp10 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-declare i32 @bar(...)
-
-; FIXME: Need post-ifcvt branch folding to get rid of the extra br at end of BB1.
-
- %struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
-
-define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
-entry:
-; CHECK: CountTree:
-; CHECK: it eq
-; CHECK: cmpeq
-; CHECK: bne
-; CHECK: itt eq
-; CHECK: moveq
-; CHECK: popeq
- br label %tailrecurse
-
-tailrecurse: ; preds = %bb, %entry
- %tmp6 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1]
- %tmp9 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=2]
- %tmp12 = load %struct.quad_struct** null ; <%struct.quad_struct*> [#uses=1]
- %tmp14 = icmp eq %struct.quad_struct* null, null ; <i1> [#uses=1]
- %tmp17 = icmp eq %struct.quad_struct* %tmp6, null ; <i1> [#uses=1]
- %tmp23 = icmp eq %struct.quad_struct* %tmp9, null ; <i1> [#uses=1]
- %tmp29 = icmp eq %struct.quad_struct* %tmp12, null ; <i1> [#uses=1]
- %bothcond = and i1 %tmp17, %tmp14 ; <i1> [#uses=1]
- %bothcond1 = and i1 %bothcond, %tmp23 ; <i1> [#uses=1]
- %bothcond2 = and i1 %bothcond1, %tmp29 ; <i1> [#uses=1]
- br i1 %bothcond2, label %return, label %bb
-
-bb: ; preds = %tailrecurse
- %tmp41 = tail call fastcc i32 @CountTree( %struct.quad_struct* %tmp9 ) ; <i32> [#uses=0]
- br label %tailrecurse
-
-return: ; preds = %tailrecurse
- ret i32 0
-}
-
- %struct.SString = type { i8*, i32, i32 }
-
-declare void @abort()
-
-define fastcc void @t1(%struct.SString* %word, i8 signext %c) {
-entry:
-; CHECK: t1:
-; CHECK: it ne
-; CHECK: popne {r7, pc}
- %tmp1 = icmp eq %struct.SString* %word, null ; <i1> [#uses=1]
- br i1 %tmp1, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- tail call void @abort( )
- unreachable
-
-cond_false: ; preds = %entry
- ret void
-}
-
-define fastcc void @t2() nounwind {
-entry:
-; CHECK: t2:
-; CHECK: cmp r0, #0
-; CHECK: beq
- br i1 undef, label %bb.i.i3, label %growMapping.exit
-
-bb.i.i3: ; preds = %entry
- unreachable
-
-growMapping.exit: ; preds = %entry
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
deleted file mode 100644
index 496158c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
-
-; There shouldn't be a unconditional branch at end of bb52.
-; rdar://7184787
-
- at posed = external global i64 ; <i64*> [#uses=1]
-
-define i1 @ab_bb52(i64 %.reload78, i64* %.out, i64* %.out1) nounwind {
-newFuncRoot:
- br label %bb52
-
-bb52.bb55_crit_edge.exitStub: ; preds = %bb52
- store i64 %0, i64* %.out
- store i64 %2, i64* %.out1
- ret i1 true
-
-bb52.bb53_crit_edge.exitStub: ; preds = %bb52
- store i64 %0, i64* %.out
- store i64 %2, i64* %.out1
- ret i1 false
-
-bb52: ; preds = %newFuncRoot
-; CHECK: movne
-; CHECK: moveq
-; CHECK: pop
-; CHECK-NEXT: LBB1_1:
- %0 = load i64* @posed, align 4 ; <i64> [#uses=3]
- %1 = sub i64 %0, %.reload78 ; <i64> [#uses=1]
- %2 = ashr i64 %1, 1 ; <i64> [#uses=3]
- %3 = icmp eq i64 %2, 0 ; <i1> [#uses=1]
- br i1 %3, label %bb52.bb55_crit_edge.exitStub, label %bb52.bb53_crit_edge.exitStub
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-jtb.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-jtb.ll
deleted file mode 100644
index f5a56e5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-jtb.ll
+++ /dev/null
@@ -1,120 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 -arm-adjust-jump-tables=0 | not grep tbb
-
-; Do not use tbb / tbh if any destination is before the jumptable.
-; rdar://7102917
-
-define i16 @main__getopt_internal_2E_exit_2E_ce(i32) nounwind {
-newFuncRoot:
- br label %_getopt_internal.exit.ce
-
-codeRepl127.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 0
-
-parse_options.exit.loopexit.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 1
-
-bb1.i.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 2
-
-bb90.i.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 3
-
-codeRepl104.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 4
-
-codeRepl113.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 5
-
-codeRepl51.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 6
-
-codeRepl70.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 7
-
-codeRepl119.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 8
-
-codeRepl93.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 9
-
-codeRepl101.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 10
-
-codeRepl120.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 11
-
-codeRepl89.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 12
-
-codeRepl45.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 13
-
-codeRepl58.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 14
-
-codeRepl46.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 15
-
-codeRepl50.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 16
-
-codeRepl52.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 17
-
-codeRepl53.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 18
-
-codeRepl61.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 19
-
-codeRepl85.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 20
-
-codeRepl97.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 21
-
-codeRepl79.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 22
-
-codeRepl102.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 23
-
-codeRepl54.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 24
-
-codeRepl57.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 25
-
-codeRepl103.exitStub: ; preds = %_getopt_internal.exit.ce
- ret i16 26
-
-_getopt_internal.exit.ce: ; preds = %newFuncRoot
- switch i32 %0, label %codeRepl127.exitStub [
- i32 -1, label %parse_options.exit.loopexit.exitStub
- i32 0, label %bb1.i.exitStub
- i32 63, label %bb90.i.exitStub
- i32 66, label %codeRepl104.exitStub
- i32 67, label %codeRepl113.exitStub
- i32 71, label %codeRepl51.exitStub
- i32 77, label %codeRepl70.exitStub
- i32 78, label %codeRepl119.exitStub
- i32 80, label %codeRepl93.exitStub
- i32 81, label %codeRepl101.exitStub
- i32 82, label %codeRepl120.exitStub
- i32 88, label %codeRepl89.exitStub
- i32 97, label %codeRepl45.exitStub
- i32 98, label %codeRepl58.exitStub
- i32 99, label %codeRepl46.exitStub
- i32 100, label %codeRepl50.exitStub
- i32 104, label %codeRepl52.exitStub
- i32 108, label %codeRepl53.exitStub
- i32 109, label %codeRepl61.exitStub
- i32 110, label %codeRepl85.exitStub
- i32 111, label %codeRepl97.exitStub
- i32 113, label %codeRepl79.exitStub
- i32 114, label %codeRepl102.exitStub
- i32 115, label %codeRepl54.exitStub
- i32 116, label %codeRepl57.exitStub
- i32 118, label %codeRepl103.exitStub
- ]
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldm.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldm.ll
deleted file mode 100644
index da2874d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldm.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s
-
- at X = external global [0 x i32] ; <[0 x i32]*> [#uses=5]
-
-define i32 @t1() {
-; CHECK: t1:
-; CHECK: push {r7, lr}
-; CHECK: pop {r7, pc}
- %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 0) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp4 = tail call i32 @f1( i32 %tmp, i32 %tmp3 ) ; <i32> [#uses=1]
- ret i32 %tmp4
-}
-
-define i32 @t2() {
-; CHECK: t2:
-; CHECK: push {r7, lr}
-; CHECK: ldmia
-; CHECK: pop {r7, pc}
- %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
- %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 4) ; <i32> [#uses=1]
- %tmp6 = tail call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 ) ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @t3() {
-; CHECK: t3:
-; CHECK: push {r7, lr}
-; CHECK: pop {r7, pc}
- %tmp = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp5 = load i32* getelementptr ([0 x i32]* @X, i32 0, i32 3) ; <i32> [#uses=1]
- %tmp6 = tail call i32 @f2( i32 %tmp, i32 %tmp3, i32 %tmp5 ) ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-declare i32 @f1(i32, i32)
-
-declare i32 @f2(i32, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr.ll
deleted file mode 100644
index 94888fd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32* %v) {
-entry:
-; CHECK: f1:
-; CHECK: ldr r0, [r0]
- %tmp = load i32* %v
- ret i32 %tmp
-}
-
-define i32 @f2(i32* %v) {
-entry:
-; CHECK: f2:
-; CHECK: ldr.w r0, [r0, #+4092]
- %tmp2 = getelementptr i32* %v, i32 1023
- %tmp = load i32* %tmp2
- ret i32 %tmp
-}
-
-define i32 @f3(i32* %v) {
-entry:
-; CHECK: f3:
-; CHECK: mov.w r1, #4096
-; CHECK: ldr r0, [r0, r1]
- %tmp2 = getelementptr i32* %v, i32 1024
- %tmp = load i32* %tmp2
- ret i32 %tmp
-}
-
-define i32 @f4(i32 %base) {
-entry:
-; CHECK: f4:
-; CHECK: ldr r0, [r0, #-128]
- %tmp1 = sub i32 %base, 128
- %tmp2 = inttoptr i32 %tmp1 to i32*
- %tmp3 = load i32* %tmp2
- ret i32 %tmp3
-}
-
-define i32 @f5(i32 %base, i32 %offset) {
-entry:
-; CHECK: f5:
-; CHECK: ldr r0, [r0, r1]
- %tmp1 = add i32 %base, %offset
- %tmp2 = inttoptr i32 %tmp1 to i32*
- %tmp3 = load i32* %tmp2
- ret i32 %tmp3
-}
-
-define i32 @f6(i32 %base, i32 %offset) {
-entry:
-; CHECK: f6:
-; CHECK: ldr.w r0, [r0, r1, lsl #2]
- %tmp1 = shl i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i32*
- %tmp4 = load i32* %tmp3
- ret i32 %tmp4
-}
-
-define i32 @f7(i32 %base, i32 %offset) {
-entry:
-; CHECK: f7:
-; CHECK: lsrs r1, r1, #2
-; CHECK: ldr r0, [r0, r1]
-
- %tmp1 = lshr i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i32*
- %tmp4 = load i32* %tmp3
- ret i32 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
deleted file mode 100644
index 9e6aef4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep ldrb | count 1
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep ldrh | count 1
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep ldrsb | count 1
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep ldrsh | count 1
-
-define i32 @test1(i8* %v.pntr.s0.u1) {
- %tmp.u = load i8* %v.pntr.s0.u1
- %tmp1.s = zext i8 %tmp.u to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test2(i16* %v.pntr.s0.u1) {
- %tmp.u = load i16* %v.pntr.s0.u1
- %tmp1.s = zext i16 %tmp.u to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test3(i8* %v.pntr.s1.u0) {
- %tmp.s = load i8* %v.pntr.s1.u0
- %tmp1.s = sext i8 %tmp.s to i32
- ret i32 %tmp1.s
-}
-
-define i32 @test4() {
- %tmp.s = load i16* null
- %tmp1.s = sext i16 %tmp.s to i32
- ret i32 %tmp1.s
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll
deleted file mode 100644
index d1af4ba..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep {ldr.*\\\[.*\],} | count 1
-
-define i32 @test(i32 %a, i32 %b, i32 %c) {
- %tmp1 = mul i32 %a, %b ; <i32> [#uses=2]
- %tmp2 = inttoptr i32 %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2 ; <i32> [#uses=1]
- %tmp4 = sub i32 %tmp1, 8 ; <i32> [#uses=1]
- %tmp5 = mul i32 %tmp4, %tmp3 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
deleted file mode 100644
index 9cc3f4a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep {ldr.*\\!} | count 3
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep {ldrsb.*\\!} | count 1
-
-define i32* @test1(i32* %X, i32* %dest) {
- %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
- %A = load i32* %Y ; <i32> [#uses=1]
- store i32 %A, i32* %dest
- ret i32* %Y
-}
-
-define i32 @test2(i32 %a, i32 %b) {
- %tmp1 = sub i32 %a, 64 ; <i32> [#uses=2]
- %tmp2 = inttoptr i32 %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2 ; <i32> [#uses=1]
- %tmp4 = sub i32 %tmp1, %b ; <i32> [#uses=1]
- %tmp5 = add i32 %tmp4, %tmp3 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i8* @test3(i8* %X, i32* %dest) {
- %tmp1 = getelementptr i8* %X, i32 4
- %tmp2 = load i8* %tmp1
- %tmp3 = sext i8 %tmp2 to i32
- store i32 %tmp3, i32* %dest
- ret i8* %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll
deleted file mode 100644
index bf10097..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i8 @f1(i8* %v) {
-entry:
-; CHECK: f1:
-; CHECK: ldrb r0, [r0]
- %tmp = load i8* %v
- ret i8 %tmp
-}
-
-define i8 @f2(i8* %v) {
-entry:
-; CHECK: f2:
-; CHECK: ldrb r0, [r0, #-1]
- %tmp2 = getelementptr i8* %v, i8 1023
- %tmp = load i8* %tmp2
- ret i8 %tmp
-}
-
-define i8 @f3(i32 %base) {
-entry:
-; CHECK: f3:
-; CHECK: mov.w r1, #4096
-; CHECK: ldrb r0, [r0, r1]
- %tmp1 = add i32 %base, 4096
- %tmp2 = inttoptr i32 %tmp1 to i8*
- %tmp3 = load i8* %tmp2
- ret i8 %tmp3
-}
-
-define i8 @f4(i32 %base) {
-entry:
-; CHECK: f4:
-; CHECK: ldrb r0, [r0, #-128]
- %tmp1 = sub i32 %base, 128
- %tmp2 = inttoptr i32 %tmp1 to i8*
- %tmp3 = load i8* %tmp2
- ret i8 %tmp3
-}
-
-define i8 @f5(i32 %base, i32 %offset) {
-entry:
-; CHECK: f5:
-; CHECK: ldrb r0, [r0, r1]
- %tmp1 = add i32 %base, %offset
- %tmp2 = inttoptr i32 %tmp1 to i8*
- %tmp3 = load i8* %tmp2
- ret i8 %tmp3
-}
-
-define i8 @f6(i32 %base, i32 %offset) {
-entry:
-; CHECK: f6:
-; CHECK: ldrb.w r0, [r0, r1, lsl #2]
- %tmp1 = shl i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i8*
- %tmp4 = load i8* %tmp3
- ret i8 %tmp4
-}
-
-define i8 @f7(i32 %base, i32 %offset) {
-entry:
-; CHECK: f7:
-; CHECK: lsrs r1, r1, #2
-; CHECK: ldrb r0, [r0, r1]
- %tmp1 = lshr i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i8*
- %tmp4 = load i8* %tmp3
- ret i8 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll
deleted file mode 100644
index 22d4e88..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+thumb2 | FileCheck %s
-
- at b = external global i64*
-
-define i64 @t(i64 %a) nounwind readonly {
-entry:
-;CHECK: ldrd r2, [r2]
- %0 = load i64** @b, align 4
- %1 = load i64* %0, align 4
- %2 = mul i64 %1, %a
- ret i64 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll
deleted file mode 100644
index f1fb79c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i16 @f1(i16* %v) {
-entry:
-; CHECK: f1:
-; CHECK: ldrh r0, [r0]
- %tmp = load i16* %v
- ret i16 %tmp
-}
-
-define i16 @f2(i16* %v) {
-entry:
-; CHECK: f2:
-; CHECK: ldrh.w r0, [r0, #+2046]
- %tmp2 = getelementptr i16* %v, i16 1023
- %tmp = load i16* %tmp2
- ret i16 %tmp
-}
-
-define i16 @f3(i16* %v) {
-entry:
-; CHECK: f3:
-; CHECK: mov.w r1, #4096
-; CHECK: ldrh r0, [r0, r1]
- %tmp2 = getelementptr i16* %v, i16 2048
- %tmp = load i16* %tmp2
- ret i16 %tmp
-}
-
-define i16 @f4(i32 %base) {
-entry:
-; CHECK: f4:
-; CHECK: ldrh r0, [r0, #-128]
- %tmp1 = sub i32 %base, 128
- %tmp2 = inttoptr i32 %tmp1 to i16*
- %tmp3 = load i16* %tmp2
- ret i16 %tmp3
-}
-
-define i16 @f5(i32 %base, i32 %offset) {
-entry:
-; CHECK: f5:
-; CHECK: ldrh r0, [r0, r1]
- %tmp1 = add i32 %base, %offset
- %tmp2 = inttoptr i32 %tmp1 to i16*
- %tmp3 = load i16* %tmp2
- ret i16 %tmp3
-}
-
-define i16 @f6(i32 %base, i32 %offset) {
-entry:
-; CHECK: f6:
-; CHECK: ldrh.w r0, [r0, r1, lsl #2]
- %tmp1 = shl i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i16*
- %tmp4 = load i16* %tmp3
- ret i16 %tmp4
-}
-
-define i16 @f7(i32 %base, i32 %offset) {
-entry:
-; CHECK: f7:
-; CHECK: lsrs r1, r1, #2
-; CHECK: ldrh r0, [r0, r1]
- %tmp1 = lshr i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i16*
- %tmp4 = load i16* %tmp3
- ret i16 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsl.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsl.ll
deleted file mode 100644
index 6b0818a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsl.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: lsls r0, r0, #5
- %tmp = shl i32 %a, 5
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsl2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsl2.ll
deleted file mode 100644
index f283eef..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsl2.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: lsls r0, r1
- %tmp = shl i32 %a, %b
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsr.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsr.ll
deleted file mode 100644
index 7cbee54..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsr.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: lsrs r0, r0, #13
- %tmp = lshr i32 %a, 13
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsr2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsr2.ll
deleted file mode 100644
index 87800f9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsr2.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: lsrs r0, r1
- %tmp = lshr i32 %a, %b
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsr3.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsr3.ll
deleted file mode 100644
index 5cfd3f5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-lsr3.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2
-
-define i1 @test1(i64 %poscnt, i32 %work) {
-entry:
-; CHECK: rrx r0, r0
-; CHECK: lsrs.w r1, r1, #1
- %0 = lshr i64 %poscnt, 1
- %1 = icmp eq i64 %0, 0
- ret i1 %1
-}
-
-define i1 @test2(i64 %poscnt, i32 %work) {
-entry:
-; CHECK: rrx r0, r0
-; CHECK: asrs.w r1, r1, #1
- %0 = ashr i64 %poscnt, 1
- %1 = icmp eq i64 %0, 0
- ret i1 %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mla.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mla.ll
deleted file mode 100644
index c4cc749..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mla.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b, i32 %c) {
- %tmp1 = mul i32 %a, %b
- %tmp2 = add i32 %c, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f1:
-; CHECK: mla r0, r0, r1, r2
-
-define i32 @f2(i32 %a, i32 %b, i32 %c) {
- %tmp1 = mul i32 %a, %b
- %tmp2 = add i32 %tmp1, %c
- ret i32 %tmp2
-}
-; CHECK: f2:
-; CHECK: mla r0, r0, r1, r2
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mls.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mls.ll
deleted file mode 100644
index fc9e6ba..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mls.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b, i32 %c) {
- %tmp1 = mul i32 %a, %b
- %tmp2 = sub i32 %c, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f1:
-; CHECK: mls r0, r0, r1, r2
-
-; sub doesn't commute, so no mls for this one
-define i32 @f2(i32 %a, i32 %b, i32 %c) {
- %tmp1 = mul i32 %a, %b
- %tmp2 = sub i32 %tmp1, %c
- ret i32 %tmp2
-}
-; CHECK: f2:
-; CHECK: muls r0, r1
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mov.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mov.ll
deleted file mode 100644
index 1dc3614..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mov.ll
+++ /dev/null
@@ -1,266 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; Test #<const>
-
-; var 2.1 - 0x00ab00ab
-define i32 @t2_const_var2_1_ok_1(i32 %lhs) {
-;CHECK: t2_const_var2_1_ok_1:
-;CHECK: add.w r0, r0, #11206827
- %ret = add i32 %lhs, 11206827 ; 0x00ab00ab
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_1_ok_2(i32 %lhs) {
-;CHECK: t2_const_var2_1_ok_2:
-;CHECK: add.w r0, r0, #11206656
-;CHECK: adds r0, #187
- %ret = add i32 %lhs, 11206843 ; 0x00ab00bb
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_1_ok_3(i32 %lhs) {
-;CHECK: t2_const_var2_1_ok_3:
-;CHECK: add.w r0, r0, #11206827
-;CHECK: add.w r0, r0, #16777216
- %ret = add i32 %lhs, 27984043 ; 0x01ab00ab
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_1_ok_4(i32 %lhs) {
-;CHECK: t2_const_var2_1_ok_4:
-;CHECK: add.w r0, r0, #16777472
-;CHECK: add.w r0, r0, #11206827
- %ret = add i32 %lhs, 27984299 ; 0x01ab01ab
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_1_fail_1(i32 %lhs) {
-;CHECK: t2_const_var2_1_fail_1:
-;CHECK: movw r1, #43777
-;CHECK: movt r1, #427
-;CHECK: add r0, r1
- %ret = add i32 %lhs, 28027649 ; 0x01abab01
- ret i32 %ret
-}
-
-; var 2.2 - 0xab00ab00
-define i32 @t2_const_var2_2_ok_1(i32 %lhs) {
-;CHECK: t2_const_var2_2_ok_1:
-;CHECK: add.w r0, r0, #-1426019584
- %ret = add i32 %lhs, 2868947712 ; 0xab00ab00
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_2_ok_2(i32 %lhs) {
-;CHECK: t2_const_var2_2_ok_2:
-;CHECK: add.w r0, r0, #-1426063360
-;CHECK: add.w r0, r0, #47616
- %ret = add i32 %lhs, 2868951552 ; 0xab00ba00
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_2_ok_3(i32 %lhs) {
-;CHECK: t2_const_var2_2_ok_3:
-;CHECK: add.w r0, r0, #-1426019584
-;CHECK: adds r0, #16
- %ret = add i32 %lhs, 2868947728 ; 0xab00ab10
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_2_ok_4(i32 %lhs) {
-;CHECK: t2_const_var2_2_ok_4:
-;CHECK: add.w r0, r0, #-1426019584
-;CHECK: add.w r0, r0, #1048592
- %ret = add i32 %lhs, 2869996304 ; 0xab10ab10
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_2_fail_1(i32 %lhs) {
-;CHECK: t2_const_var2_2_fail_1:
-;CHECK: movw r1, #43792
-;CHECK: movt r1, #4267
-;CHECK: add r0, r1
- %ret = add i32 %lhs, 279685904 ; 0x10abab10
- ret i32 %ret
-}
-
-; var 2.3 - 0xabababab
-define i32 @t2_const_var2_3_ok_1(i32 %lhs) {
-;CHECK: t2_const_var2_3_ok_1:
-;CHECK: add.w r0, r0, #-1414812757
- %ret = add i32 %lhs, 2880154539 ; 0xabababab
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_3_fail_1(i32 %lhs) {
-;CHECK: t2_const_var2_3_fail_1:
-;CHECK: movw r1, #43962
-;CHECK: movt r1, #43947
-;CHECK: add r0, r1
- %ret = add i32 %lhs, 2880154554 ; 0xabababba
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_3_fail_2(i32 %lhs) {
-;CHECK: t2_const_var2_3_fail_2:
-;CHECK: movw r1, #47787
-;CHECK: movt r1, #43947
-;CHECK: add r0, r1
- %ret = add i32 %lhs, 2880158379 ; 0xababbaab
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_3_fail_3(i32 %lhs) {
-;CHECK: t2_const_var2_3_fail_3:
-;CHECK: movw r1, #43947
-;CHECK: movt r1, #43962
-;CHECK: add r0, r1
- %ret = add i32 %lhs, 2881137579 ; 0xabbaabab
- ret i32 %ret
-}
-
-define i32 @t2_const_var2_3_fail_4(i32 %lhs) {
-;CHECK: t2_const_var2_3_fail_4:
-;CHECK: movw r1, #43947
-;CHECK: movt r1, #47787
-;CHECK: add r0, r1
- %ret = add i32 %lhs, 3131812779 ; 0xbaababab
- ret i32 %ret
-}
-
-; var 3 - 0x0F000000
-define i32 @t2_const_var3_1_ok_1(i32 %lhs) {
-;CHECK: t2_const_var3_1_ok_1:
-;CHECK: add.w r0, r0, #251658240
- %ret = add i32 %lhs, 251658240 ; 0x0F000000
- ret i32 %ret
-}
-
-define i32 @t2_const_var3_2_ok_1(i32 %lhs) {
-;CHECK: t2_const_var3_2_ok_1:
-;CHECK: add.w r0, r0, #3948544
- %ret = add i32 %lhs, 3948544 ; 0b00000000001111000100000000000000
- ret i32 %ret
-}
-
-define i32 @t2_const_var3_2_ok_2(i32 %lhs) {
-;CHECK: t2_const_var3_2_ok_2:
-;CHECK: add.w r0, r0, #2097152
-;CHECK: add.w r0, r0, #1843200
- %ret = add i32 %lhs, 3940352 ; 0b00000000001111000010000000000000
- ret i32 %ret
-}
-
-define i32 @t2_const_var3_3_ok_1(i32 %lhs) {
-;CHECK: t2_const_var3_3_ok_1:
-;CHECK: add.w r0, r0, #258
- %ret = add i32 %lhs, 258 ; 0b00000000000000000000000100000010
- ret i32 %ret
-}
-
-define i32 @t2_const_var3_4_ok_1(i32 %lhs) {
-;CHECK: t2_const_var3_4_ok_1:
-;CHECK: add.w r0, r0, #-268435456
- %ret = add i32 %lhs, 4026531840 ; 0xF0000000
- ret i32 %ret
-}
-
-define i32 @t2MOVTi16_ok_1(i32 %a) {
-; CHECK: t2MOVTi16_ok_1:
-; CHECK: movt r0, #1234
- %1 = and i32 %a, 65535
- %2 = shl i32 1234, 16
- %3 = or i32 %1, %2
-
- ret i32 %3
-}
-
-define i32 @t2MOVTi16_test_1(i32 %a) {
-; CHECK: t2MOVTi16_test_1:
-; CHECK: movt r0, #1234
- %1 = shl i32 255, 8
- %2 = shl i32 1234, 8
- %3 = or i32 %1, 255 ; This gives us 0xFFFF in %3
- %4 = shl i32 %2, 8 ; This gives us (1234 << 16) in %4
- %5 = and i32 %a, %3
- %6 = or i32 %4, %5
-
- ret i32 %6
-}
-
-define i32 @t2MOVTi16_test_2(i32 %a) {
-; CHECK: t2MOVTi16_test_2:
-; CHECK: movt r0, #1234
- %1 = shl i32 255, 8
- %2 = shl i32 1234, 8
- %3 = or i32 %1, 255 ; This gives us 0xFFFF in %3
- %4 = shl i32 %2, 6
- %5 = and i32 %a, %3
- %6 = shl i32 %4, 2 ; This gives us (1234 << 16) in %6
- %7 = or i32 %5, %6
-
- ret i32 %7
-}
-
-define i32 @t2MOVTi16_test_3(i32 %a) {
-; CHECK: t2MOVTi16_test_3:
-; CHECK: movt r0, #1234
- %1 = shl i32 255, 8
- %2 = shl i32 1234, 8
- %3 = or i32 %1, 255 ; This gives us 0xFFFF in %3
- %4 = shl i32 %2, 6
- %5 = and i32 %a, %3
- %6 = shl i32 %4, 2 ; This gives us (1234 << 16) in %6
- %7 = lshr i32 %6, 6
- %8 = shl i32 %7, 6
- %9 = or i32 %5, %8
-
- ret i32 %8
-}
-
-; 171 = 0x000000ab
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: movs r0, #171
- %tmp = add i32 0, 171
- ret i32 %tmp
-}
-
-; 1179666 = 0x00120012
-define i32 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: mov.w r0, #1179666
- %tmp = add i32 0, 1179666
- ret i32 %tmp
-}
-
-; 872428544 = 0x34003400
-define i32 @f3(i32 %a) {
-; CHECK: f3:
-; CHECK: mov.w r0, #872428544
- %tmp = add i32 0, 872428544
- ret i32 %tmp
-}
-
-; 1448498774 = 0x56565656
-define i32 @f4(i32 %a) {
-; CHECK: f4:
-; CHECK: mov.w r0, #1448498774
- %tmp = add i32 0, 1448498774
- ret i32 %tmp
-}
-
-; 66846720 = 0x03fc0000
-define i32 @f5(i32 %a) {
-; CHECK: f5:
-; CHECK: mov.w r0, #66846720
- %tmp = add i32 0, 66846720
- ret i32 %tmp
-}
-
-define i32 @f6(i32 %a) {
-;CHECK: f6
-;CHECK: movw r0, #65535
- %tmp = add i32 0, 65535
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mul.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mul.ll
deleted file mode 100644
index b1515b5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mul.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b, i32 %c) {
-; CHECK: f1:
-; CHECK: muls r0, r1
- %tmp = mul i32 %a, %b
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mulhi.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mulhi.ll
deleted file mode 100644
index 5d47770..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mulhi.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep smmul | count 1
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep umull | count 1
-
-define i32 @smulhi(i32 %x, i32 %y) {
- %tmp = sext i32 %x to i64 ; <i64> [#uses=1]
- %tmp1 = sext i32 %y to i64 ; <i64> [#uses=1]
- %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
- %tmp3 = lshr i64 %tmp2, 32 ; <i64> [#uses=1]
- %tmp3.upgrd.1 = trunc i64 %tmp3 to i32 ; <i32> [#uses=1]
- ret i32 %tmp3.upgrd.1
-}
-
-define i32 @umulhi(i32 %x, i32 %y) {
- %tmp = zext i32 %x to i64 ; <i64> [#uses=1]
- %tmp1 = zext i32 %y to i64 ; <i64> [#uses=1]
- %tmp2 = mul i64 %tmp1, %tmp ; <i64> [#uses=1]
- %tmp3 = lshr i64 %tmp2, 32 ; <i64> [#uses=1]
- %tmp3.upgrd.2 = trunc i64 %tmp3 to i32 ; <i32> [#uses=1]
- ret i32 %tmp3.upgrd.2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mvn.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mvn.ll
deleted file mode 100644
index a8c8f83..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mvn.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
-
-; 0x000000bb = 187
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: mvn r0, #187
- %tmp = xor i32 4294967295, 187
- ret i32 %tmp
-}
-
-; 0x00aa00aa = 11141290
-define i32 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: mvn r0, #11141290
- %tmp = xor i32 4294967295, 11141290
- ret i32 %tmp
-}
-
-; 0xcc00cc00 = 3422604288
-define i32 @f3(i32 %a) {
-; CHECK: f3:
-; CHECK: mvn r0, #-872363008
- %tmp = xor i32 4294967295, 3422604288
- ret i32 %tmp
-}
-
-; 0x00110000 = 1114112
-define i32 @f5(i32 %a) {
-; CHECK: f5:
-; CHECK: mvn r0, #1114112
- %tmp = xor i32 4294967295, 1114112
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mvn2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mvn2.ll
deleted file mode 100644
index 375d0aa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-mvn2.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: mvns r0, r0
- %tmp = xor i32 4294967295, %a
- ret i32 %tmp
-}
-
-define i32 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: mvns r0, r0
- %tmp = xor i32 %a, 4294967295
- ret i32 %tmp
-}
-
-define i32 @f5(i32 %a) {
-; CHECK: f5:
-; CHECK: mvn.w r0, r0, lsl #5
- %tmp = shl i32 %a, 5
- %tmp1 = xor i32 %tmp, 4294967295
- ret i32 %tmp1
-}
-
-define i32 @f6(i32 %a) {
-; CHECK: f6:
-; CHECK: mvn.w r0, r0, lsr #6
- %tmp = lshr i32 %a, 6
- %tmp1 = xor i32 %tmp, 4294967295
- ret i32 %tmp1
-}
-
-define i32 @f7(i32 %a) {
-; CHECK: f7:
-; CHECK: mvn.w r0, r0, asr #7
- %tmp = ashr i32 %a, 7
- %tmp1 = xor i32 %tmp, 4294967295
- ret i32 %tmp1
-}
-
-define i32 @f8(i32 %a) {
-; CHECK: f8:
-; CHECK: mvn.w r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = xor i32 %tmp, 4294967295
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-neg.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-neg.ll
deleted file mode 100644
index 6bf11ec..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-neg.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: rsbs r0, r0, #0
- %tmp = sub i32 0, %a
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orn.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orn.ll
deleted file mode 100644
index 97a3fd7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orn.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-
-define i32 @f1(i32 %a, i32 %b) {
- %tmp = xor i32 %b, 4294967295
- %tmp1 = or i32 %a, %tmp
- ret i32 %tmp1
-}
-; CHECK: f1:
-; CHECK: orn r0, r0, r1
-
-define i32 @f2(i32 %a, i32 %b) {
- %tmp = xor i32 %b, 4294967295
- %tmp1 = or i32 %tmp, %a
- ret i32 %tmp1
-}
-; CHECK: f2:
-; CHECK: orn r0, r0, r1
-
-define i32 @f3(i32 %a, i32 %b) {
- %tmp = xor i32 4294967295, %b
- %tmp1 = or i32 %a, %tmp
- ret i32 %tmp1
-}
-; CHECK: f3:
-; CHECK: orn r0, r0, r1
-
-define i32 @f4(i32 %a, i32 %b) {
- %tmp = xor i32 4294967295, %b
- %tmp1 = or i32 %tmp, %a
- ret i32 %tmp1
-}
-; CHECK: f4:
-; CHECK: orn r0, r0, r1
-
-define i32 @f5(i32 %a, i32 %b) {
- %tmp = shl i32 %b, 5
- %tmp1 = xor i32 4294967295, %tmp
- %tmp2 = or i32 %a, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f5:
-; CHECK: orn r0, r0, r1, lsl #5
-
-define i32 @f6(i32 %a, i32 %b) {
- %tmp = lshr i32 %b, 6
- %tmp1 = xor i32 4294967295, %tmp
- %tmp2 = or i32 %a, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f6:
-; CHECK: orn r0, r0, r1, lsr #6
-
-define i32 @f7(i32 %a, i32 %b) {
- %tmp = ashr i32 %b, 7
- %tmp1 = xor i32 4294967295, %tmp
- %tmp2 = or i32 %a, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f7:
-; CHECK: orn r0, r0, r1, asr #7
-
-define i32 @f8(i32 %a, i32 %b) {
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = xor i32 4294967295, %tmp
- %tmp2 = or i32 %a, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f8:
-; CHECK: orn r0, r0, r0, ror #8
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orn2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orn2.ll
deleted file mode 100644
index 34ab3a5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orn2.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-
-; 0x000000bb = 187
-define i32 @f1(i32 %a) {
- %tmp1 = xor i32 4294967295, 187
- %tmp2 = or i32 %a, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f1:
-; CHECK: orn r0, r0, #187
-
-; 0x00aa00aa = 11141290
-define i32 @f2(i32 %a) {
- %tmp1 = xor i32 4294967295, 11141290
- %tmp2 = or i32 %a, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f2:
-; CHECK: orn r0, r0, #11141290
-
-; 0xcc00cc00 = 3422604288
-define i32 @f3(i32 %a) {
- %tmp1 = xor i32 4294967295, 3422604288
- %tmp2 = or i32 %a, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f3:
-; CHECK: orn r0, r0, #-872363008
-
-; 0x00110000 = 1114112
-define i32 @f5(i32 %a) {
- %tmp1 = xor i32 4294967295, 1114112
- %tmp2 = or i32 %a, %tmp1
- ret i32 %tmp2
-}
-; CHECK: f5:
-; CHECK: orn r0, r0, #1114112
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orr.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orr.ll
deleted file mode 100644
index 89ab7b1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orr.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: orrs r0, r1
- %tmp2 = or i32 %a, %b
- ret i32 %tmp2
-}
-
-define i32 @f5(i32 %a, i32 %b) {
-; CHECK: f5:
-; CHECK: orr.w r0, r0, r1, lsl #5
- %tmp = shl i32 %b, 5
- %tmp2 = or i32 %a, %tmp
- ret i32 %tmp2
-}
-
-define i32 @f6(i32 %a, i32 %b) {
-; CHECK: f6:
-; CHECK: orr.w r0, r0, r1, lsr #6
- %tmp = lshr i32 %b, 6
- %tmp2 = or i32 %a, %tmp
- ret i32 %tmp2
-}
-
-define i32 @f7(i32 %a, i32 %b) {
-; CHECK: f7:
-; CHECK: orr.w r0, r0, r1, asr #7
- %tmp = ashr i32 %b, 7
- %tmp2 = or i32 %a, %tmp
- ret i32 %tmp2
-}
-
-define i32 @f8(i32 %a, i32 %b) {
-; CHECK: f8:
-; CHECK: orr.w r0, r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp2 = or i32 %a, %tmp
- ret i32 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orr2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orr2.ll
deleted file mode 100644
index 8f7a3c2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-orr2.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-
-; 0x000000bb = 187
-define i32 @f1(i32 %a) {
- %tmp2 = or i32 %a, 187
- ret i32 %tmp2
-}
-; CHECK: f1:
-; CHECK: orr r0, r0, #187
-
-; 0x00aa00aa = 11141290
-define i32 @f2(i32 %a) {
- %tmp2 = or i32 %a, 11141290
- ret i32 %tmp2
-}
-; CHECK: f2:
-; CHECK: orr r0, r0, #11141290
-
-; 0xcc00cc00 = 3422604288
-define i32 @f3(i32 %a) {
- %tmp2 = or i32 %a, 3422604288
- ret i32 %tmp2
-}
-; CHECK: f3:
-; CHECK: orr r0, r0, #-872363008
-
-; 0x44444444 = 1145324612
-define i32 @f4(i32 %a) {
- %tmp2 = or i32 %a, 1145324612
- ret i32 %tmp2
-}
-; CHECK: f4:
-; CHECK: orr r0, r0, #1145324612
-
-; 0x00110000 = 1114112
-define i32 @f5(i32 %a) {
- %tmp2 = or i32 %a, 1114112
- ret i32 %tmp2
-}
-; CHECK: f5:
-; CHECK: orr r0, r0, #1114112
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-pack.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-pack.ll
deleted file mode 100644
index a982249..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-pack.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep pkhbt | count 5
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | \
-; RUN: grep pkhtb | count 4
-
-define i32 @test1(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp4 = shl i32 %Y, 16 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test1a(i32 %X, i32 %Y) {
- %tmp19 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp37 = shl i32 %Y, 16 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp37, %tmp19 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test2(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp3 = shl i32 %Y, 12 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp3, -65536 ; <i32> [#uses=1]
- %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp57
-}
-
-define i32 @test3(i32 %X, i32 %Y) {
- %tmp19 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp37 = shl i32 %Y, 18 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp37, %tmp19 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test4(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, 65535 ; <i32> [#uses=1]
- %tmp3 = and i32 %Y, -65536 ; <i32> [#uses=1]
- %tmp46 = or i32 %tmp3, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp46
-}
-
-define i32 @test5(i32 %X, i32 %Y) {
- %tmp17 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp2 = bitcast i32 %Y to i32 ; <i32> [#uses=1]
- %tmp4 = lshr i32 %tmp2, 16 ; <i32> [#uses=2]
- %tmp5 = or i32 %tmp4, %tmp17 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test5a(i32 %X, i32 %Y) {
- %tmp110 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp37 = lshr i32 %Y, 16 ; <i32> [#uses=1]
- %tmp39 = bitcast i32 %tmp37 to i32 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp39, %tmp110 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
-define i32 @test6(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp37 = lshr i32 %Y, 12 ; <i32> [#uses=1]
- %tmp38 = bitcast i32 %tmp37 to i32 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp38, 65535 ; <i32> [#uses=1]
- %tmp59 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp59
-}
-
-define i32 @test7(i32 %X, i32 %Y) {
- %tmp1 = and i32 %X, -65536 ; <i32> [#uses=1]
- %tmp3 = ashr i32 %Y, 18 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp3, 65535 ; <i32> [#uses=1]
- %tmp57 = or i32 %tmp4, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp57
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rev.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rev.ll
deleted file mode 100644
index 27b1672..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rev.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2,+v7a | FileCheck %s
-
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: rev r0, r0
- %tmp = tail call i32 @llvm.bswap.i32(i32 %a)
- ret i32 %tmp
-}
-
-declare i32 @llvm.bswap.i32(i32) nounwind readnone
-
-define i32 @f2(i32 %X) {
-; CHECK: f2:
-; CHECK: revsh r0, r0
- %tmp1 = lshr i32 %X, 8
- %tmp1.upgrd.1 = trunc i32 %tmp1 to i16
- %tmp3 = trunc i32 %X to i16
- %tmp2 = and i16 %tmp1.upgrd.1, 255
- %tmp4 = shl i16 %tmp3, 8
- %tmp5 = or i16 %tmp2, %tmp4
- %tmp5.upgrd.2 = sext i16 %tmp5 to i32
- ret i32 %tmp5.upgrd.2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rev16.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rev16.ll
deleted file mode 100644
index 39b6ac3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rev16.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; XFAIL: *
-; fixme rev16 pattern is not matching
-
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | grep {rev16\\W*r\[0-9\]*,\\W*r\[0-9\]*} | count 1
-
-; 0xff00ff00 = 4278255360
-; 0x00ff00ff = 16711935
-define i32 @f1(i32 %a) {
- %l8 = shl i32 %a, 8
- %r8 = lshr i32 %a, 8
- %mask_l8 = and i32 %l8, 4278255360
- %mask_r8 = and i32 %r8, 16711935
- %tmp = or i32 %mask_l8, %mask_r8
- ret i32 %tmp
-}
-
-; 0xff000000 = 4278190080
-; 0x00ff0000 = 16711680
-; 0x0000ff00 = 65280
-; 0x000000ff = 255
-define i32 @f2(i32 %a) {
- %l8 = shl i32 %a, 8
- %r8 = lshr i32 %a, 8
- %masklo_l8 = and i32 %l8, 65280
- %maskhi_l8 = and i32 %l8, 4278190080
- %masklo_r8 = and i32 %r8, 255
- %maskhi_r8 = and i32 %r8, 16711680
- %tmp1 = or i32 %masklo_l8, %masklo_r8
- %tmp2 = or i32 %maskhi_l8, %maskhi_r8
- %tmp = or i32 %tmp1, %tmp2
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ror.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ror.ll
deleted file mode 100644
index 0200116..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ror.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-
-define i32 @f1(i32 %a) {
- %l8 = shl i32 %a, 10
- %r8 = lshr i32 %a, 22
- %tmp = or i32 %l8, %r8
- ret i32 %tmp
-}
-; CHECK: f1:
-; CHECK: ror.w r0, r0, #22
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ror2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ror2.ll
deleted file mode 100644
index ffd1dd7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-ror2.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: rors r0, r1
- %db = sub i32 32, %b
- %l8 = shl i32 %a, %b
- %r8 = lshr i32 %a, %db
- %tmp = or i32 %l8, %r8
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rsb.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rsb.ll
deleted file mode 100644
index 15185be..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rsb.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
- %tmp = shl i32 %b, 5
- %tmp1 = sub i32 %tmp, %a
- ret i32 %tmp1
-}
-; CHECK: f1:
-; CHECK: rsb r0, r0, r1, lsl #5
-
-define i32 @f2(i32 %a, i32 %b) {
- %tmp = lshr i32 %b, 6
- %tmp1 = sub i32 %tmp, %a
- ret i32 %tmp1
-}
-; CHECK: f2:
-; CHECK: rsb r0, r0, r1, lsr #6
-
-define i32 @f3(i32 %a, i32 %b) {
- %tmp = ashr i32 %b, 7
- %tmp1 = sub i32 %tmp, %a
- ret i32 %tmp1
-}
-; CHECK: f3:
-; CHECK: rsb r0, r0, r1, asr #7
-
-define i32 @f4(i32 %a, i32 %b) {
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = sub i32 %tmp, %a
- ret i32 %tmp1
-}
-; CHECK: f4:
-; CHECK: rsb r0, r0, r0, ror #8
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rsb2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rsb2.ll
deleted file mode 100644
index 61fb619..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-rsb2.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; 171 = 0x000000ab
-define i32 @f1(i32 %a) {
- %tmp = sub i32 171, %a
- ret i32 %tmp
-}
-; CHECK: f1:
-; CHECK: rsb.w r0, r0, #171
-
-; 1179666 = 0x00120012
-define i32 @f2(i32 %a) {
- %tmp = sub i32 1179666, %a
- ret i32 %tmp
-}
-; CHECK: f2:
-; CHECK: rsb.w r0, r0, #1179666
-
-; 872428544 = 0x34003400
-define i32 @f3(i32 %a) {
- %tmp = sub i32 872428544, %a
- ret i32 %tmp
-}
-; CHECK: f3:
-; CHECK: rsb.w r0, r0, #872428544
-
-; 1448498774 = 0x56565656
-define i32 @f4(i32 %a) {
- %tmp = sub i32 1448498774, %a
- ret i32 %tmp
-}
-; CHECK: f4:
-; CHECK: rsb.w r0, r0, #1448498774
-
-; 66846720 = 0x03fc0000
-define i32 @f5(i32 %a) {
- %tmp = sub i32 66846720, %a
- ret i32 %tmp
-}
-; CHECK: f5:
-; CHECK: rsb.w r0, r0, #66846720
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sbc.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sbc.ll
deleted file mode 100644
index ad96291..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sbc.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i64 @f1(i64 %a, i64 %b) {
-; CHECK: f1:
-; CHECK: subs r0, r0, r2
- %tmp = sub i64 %a, %b
- ret i64 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-select.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-select.ll
deleted file mode 100644
index 2dcf8aa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-select.ll
+++ /dev/null
@@ -1,98 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a.s) {
-entry:
-; CHECK: f1:
-; CHECK: it eq
-; CHECK: moveq
-
- %tmp = icmp eq i32 %a.s, 4
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f2(i32 %a.s) {
-entry:
-; CHECK: f2:
-; CHECK: it gt
-; CHECK: movgt
- %tmp = icmp sgt i32 %a.s, 4
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f3(i32 %a.s, i32 %b.s) {
-entry:
-; CHECK: f3:
-; CHECK: it lt
-; CHECK: movlt
- %tmp = icmp slt i32 %a.s, %b.s
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f4(i32 %a.s, i32 %b.s) {
-entry:
-; CHECK: f4:
-; CHECK: it le
-; CHECK: movle
-
- %tmp = icmp sle i32 %a.s, %b.s
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f5(i32 %a.u, i32 %b.u) {
-entry:
-; CHECK: f5:
-; CHECK: it ls
-; CHECK: movls
- %tmp = icmp ule i32 %a.u, %b.u
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f6(i32 %a.u, i32 %b.u) {
-entry:
-; CHECK: f6:
-; CHECK: it hi
-; CHECK: movhi
- %tmp = icmp ugt i32 %a.u, %b.u
- %tmp1.s = select i1 %tmp, i32 2, i32 3
- ret i32 %tmp1.s
-}
-
-define i32 @f7(i32 %a, i32 %b, i32 %c) {
-entry:
-; CHECK: f7:
-; CHECK: it hi
-; CHECK: lsrhi.w
- %tmp1 = icmp ugt i32 %a, %b
- %tmp2 = udiv i32 %c, 3
- %tmp3 = select i1 %tmp1, i32 %tmp2, i32 3
- ret i32 %tmp3
-}
-
-define i32 @f8(i32 %a, i32 %b, i32 %c) {
-entry:
-; CHECK: f8:
-; CHECK: it lo
-; CHECK: lsllo.w
- %tmp1 = icmp ult i32 %a, %b
- %tmp2 = mul i32 %c, 4
- %tmp3 = select i1 %tmp1, i32 %tmp2, i32 3
- ret i32 %tmp3
-}
-
-define i32 @f9(i32 %a, i32 %b, i32 %c) {
-entry:
-; CHECK: f9:
-; CHECK: it ge
-; CHECK: rorge.w
- %tmp1 = icmp sge i32 %a, %b
- %tmp2 = shl i32 %c, 10
- %tmp3 = lshr i32 %c, 22
- %tmp4 = or i32 %tmp2, %tmp3
- %tmp5 = select i1 %tmp1, i32 %tmp4, i32 3
- ret i32 %tmp5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-select_xform.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-select_xform.ll
deleted file mode 100644
index 7fc2e2a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-select_xform.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @t1(i32 %a, i32 %b, i32 %c) nounwind {
-; CHECK: t1
-; CHECK: sub.w r0, r1, #-2147483648
-; CHECK: cmp r2, #10
-; CHECK: sub.w r0, r0, #1
-; CHECK: it gt
-; CHECK: movgt r0, r1
- %tmp1 = icmp sgt i32 %c, 10
- %tmp2 = select i1 %tmp1, i32 0, i32 2147483647
- %tmp3 = add i32 %tmp2, %b
- ret i32 %tmp3
-}
-
-define i32 @t2(i32 %a, i32 %b, i32 %c) nounwind {
-; CHECK: t2
-; CHECK: add.w r0, r1, #-2147483648
-; CHECK: cmp r2, #10
-; CHECK: it gt
-; CHECK: movgt r0, r1
-
- %tmp1 = icmp sgt i32 %c, 10
- %tmp2 = select i1 %tmp1, i32 0, i32 2147483648
- %tmp3 = add i32 %tmp2, %b
- ret i32 %tmp3
-}
-
-define i32 @t3(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
-; CHECK: t3
-; CHECK: sub.w r0, r1, #10
-; CHECK: cmp r2, #10
-; CHECK: it gt
-; CHECK: movgt r0, r1
- %tmp1 = icmp sgt i32 %c, 10
- %tmp2 = select i1 %tmp1, i32 0, i32 10
- %tmp3 = sub i32 %b, %tmp2
- ret i32 %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-shifter.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-shifter.ll
deleted file mode 100644
index b106ced..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-shifter.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @t2ADDrs_lsl(i32 %X, i32 %Y) {
-; CHECK: t2ADDrs_lsl
-; CHECK: add.w r0, r0, r1, lsl #16
- %A = shl i32 %Y, 16
- %B = add i32 %X, %A
- ret i32 %B
-}
-
-define i32 @t2ADDrs_lsr(i32 %X, i32 %Y) {
-; CHECK: t2ADDrs_lsr
-; CHECK: add.w r0, r0, r1, lsr #16
- %A = lshr i32 %Y, 16
- %B = add i32 %X, %A
- ret i32 %B
-}
-
-define i32 @t2ADDrs_asr(i32 %X, i32 %Y) {
-; CHECK: t2ADDrs_asr
-; CHECK: add.w r0, r0, r1, asr #16
- %A = ashr i32 %Y, 16
- %B = add i32 %X, %A
- ret i32 %B
-}
-
-; i32 ror(n) = (x >> n) | (x << (32 - n))
-define i32 @t2ADDrs_ror(i32 %X, i32 %Y) {
-; CHECK: t2ADDrs_ror
-; CHECK: add.w r0, r0, r1, ror #16
- %A = lshr i32 %Y, 16
- %B = shl i32 %Y, 16
- %C = or i32 %B, %A
- %R = add i32 %X, %C
- ret i32 %R
-}
-
-define i32 @t2ADDrs_noRegShift(i32 %X, i32 %Y, i8 %sh) {
-; CHECK: t2ADDrs_noRegShift
-; CHECK: uxtb r2, r2
-; CHECK: lsls r1, r2
-; CHECK: add r0, r1
- %shift.upgrd.1 = zext i8 %sh to i32
- %A = shl i32 %Y, %shift.upgrd.1
- %B = add i32 %X, %A
- ret i32 %B
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-smla.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-smla.ll
deleted file mode 100644
index 092ec27..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-smla.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f3(i32 %a, i16 %x, i32 %y) {
-; CHECK: f3
-; CHECK: smlabt r0, r1, r2, r0
- %tmp = sext i16 %x to i32 ; <i32> [#uses=1]
- %tmp2 = ashr i32 %y, 16 ; <i32> [#uses=1]
- %tmp3 = mul i32 %tmp2, %tmp ; <i32> [#uses=1]
- %tmp5 = add i32 %tmp3, %a ; <i32> [#uses=1]
- ret i32 %tmp5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-smul.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-smul.ll
deleted file mode 100644
index 16ea85d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-smul.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
- at x = weak global i16 0 ; <i16*> [#uses=1]
- at y = weak global i16 0 ; <i16*> [#uses=0]
-
-define i32 @f1(i32 %y) {
-; CHECK: f1
-; CHECK: smulbt r0, r1, r0
- %tmp = load i16* @x ; <i16> [#uses=1]
- %tmp1 = add i16 %tmp, 2 ; <i16> [#uses=1]
- %tmp2 = sext i16 %tmp1 to i32 ; <i32> [#uses=1]
- %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1]
- %tmp4 = mul i32 %tmp2, %tmp3 ; <i32> [#uses=1]
- ret i32 %tmp4
-}
-
-define i32 @f2(i32 %x, i32 %y) {
-; CHECK: f2
-; CHECK: smultt r0, r1, r0
- %tmp1 = ashr i32 %x, 16 ; <i32> [#uses=1]
- %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1]
- %tmp4 = mul i32 %tmp3, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll
deleted file mode 100644
index ff178b4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-elf -mattr=+neon | FileCheck %s
-; PR4789
-
-%bar = type { float, float, float }
-%baz = type { i32, [16 x %bar], [16 x float], [16 x i32], i8 }
-%foo = type { <4 x float> }
-%quux = type { i32 (...)**, %baz*, i32 }
-%quuz = type { %quux, i32, %bar, [128 x i8], [16 x %foo], %foo, %foo, %foo }
-
-declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
-
-define arm_apcscc void @aaa(%quuz* %this, i8* %block) {
-; CHECK: aaa:
-; CHECK: bic r4, r4, #15
-; CHECK: vst1.64 {{.*}}[{{.*}}, :128]
-; CHECK: vld1.64 {{.*}}[{{.*}}, :128]
-entry:
- %0 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
- store float 6.300000e+01, float* undef, align 4
- %1 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
- store float 0.000000e+00, float* undef, align 4
- %2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* undef) nounwind ; <<4 x float>> [#uses=1]
- %val173 = load <4 x float>* undef ; <<4 x float>> [#uses=1]
- br label %bb4
-
-bb4: ; preds = %bb193, %entry
- %besterror.0.2264 = phi <4 x float> [ undef, %entry ], [ %besterror.0.0, %bb193 ] ; <<4 x float>> [#uses=2]
- %part0.0.0261 = phi <4 x float> [ zeroinitializer, %entry ], [ %23, %bb193 ] ; <<4 x float>> [#uses=2]
- %3 = fmul <4 x float> zeroinitializer, %0 ; <<4 x float>> [#uses=2]
- %4 = fadd <4 x float> %3, %part0.0.0261 ; <<4 x float>> [#uses=1]
- %5 = shufflevector <4 x float> %3, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
- %6 = shufflevector <2 x float> %5, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>> [#uses=1]
- %7 = fmul <4 x float> %1, undef ; <<4 x float>> [#uses=1]
- %8 = fadd <4 x float> %7, <float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01> ; <<4 x float>> [#uses=1]
- %9 = fptosi <4 x float> %8 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %10 = sitofp <4 x i32> %9 to <4 x float> ; <<4 x float>> [#uses=1]
- %11 = fmul <4 x float> %10, %2 ; <<4 x float>> [#uses=1]
- %12 = fmul <4 x float> undef, %6 ; <<4 x float>> [#uses=1]
- %13 = fmul <4 x float> %11, %4 ; <<4 x float>> [#uses=1]
- %14 = fsub <4 x float> %12, %13 ; <<4 x float>> [#uses=1]
- %15 = fsub <4 x float> %14, undef ; <<4 x float>> [#uses=1]
- %16 = fmul <4 x float> %15, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00> ; <<4 x float>> [#uses=1]
- %17 = fadd <4 x float> %16, undef ; <<4 x float>> [#uses=1]
- %18 = fmul <4 x float> %17, %val173 ; <<4 x float>> [#uses=1]
- %19 = shufflevector <4 x float> %18, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
- %20 = shufflevector <2 x float> %19, <2 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %21 = fadd <4 x float> zeroinitializer, %20 ; <<4 x float>> [#uses=2]
- %22 = fcmp ogt <4 x float> %besterror.0.2264, %21 ; <<4 x i1>> [#uses=0]
- br i1 undef, label %bb193, label %bb186
-
-bb186: ; preds = %bb4
- br label %bb193
-
-bb193: ; preds = %bb186, %bb4
- %besterror.0.0 = phi <4 x float> [ %21, %bb186 ], [ %besterror.0.2264, %bb4 ] ; <<4 x float>> [#uses=1]
- %23 = fadd <4 x float> %part0.0.0261, zeroinitializer ; <<4 x float>> [#uses=1]
- br label %bb4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-str.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-str.ll
deleted file mode 100644
index 3eeec8c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-str.ll
+++ /dev/null
@@ -1,76 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32* %v) {
-; CHECK: f1:
-; CHECK: str r0, [r1]
- store i32 %a, i32* %v
- ret i32 %a
-}
-
-define i32 @f2(i32 %a, i32* %v) {
-; CHECK: f2:
-; CHECK: str.w r0, [r1, #+4092]
- %tmp2 = getelementptr i32* %v, i32 1023
- store i32 %a, i32* %tmp2
- ret i32 %a
-}
-
-define i32 @f2a(i32 %a, i32* %v) {
-; CHECK: f2a:
-; CHECK: str r0, [r1, #-128]
- %tmp2 = getelementptr i32* %v, i32 -32
- store i32 %a, i32* %tmp2
- ret i32 %a
-}
-
-define i32 @f3(i32 %a, i32* %v) {
-; CHECK: f3:
-; CHECK: mov.w r2, #4096
-; CHECK: str r0, [r1, r2]
- %tmp2 = getelementptr i32* %v, i32 1024
- store i32 %a, i32* %tmp2
- ret i32 %a
-}
-
-define i32 @f4(i32 %a, i32 %base) {
-entry:
-; CHECK: f4:
-; CHECK: str r0, [r1, #-128]
- %tmp1 = sub i32 %base, 128
- %tmp2 = inttoptr i32 %tmp1 to i32*
- store i32 %a, i32* %tmp2
- ret i32 %a
-}
-
-define i32 @f5(i32 %a, i32 %base, i32 %offset) {
-entry:
-; CHECK: f5:
-; CHECK: str r0, [r1, r2]
- %tmp1 = add i32 %base, %offset
- %tmp2 = inttoptr i32 %tmp1 to i32*
- store i32 %a, i32* %tmp2
- ret i32 %a
-}
-
-define i32 @f6(i32 %a, i32 %base, i32 %offset) {
-entry:
-; CHECK: f6:
-; CHECK: str.w r0, [r1, r2, lsl #2]
- %tmp1 = shl i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i32*
- store i32 %a, i32* %tmp3
- ret i32 %a
-}
-
-define i32 @f7(i32 %a, i32 %base, i32 %offset) {
-entry:
-; CHECK: f7:
-; CHECK: lsrs r2, r2, #2
-; CHECK: str r0, [r1, r2]
- %tmp1 = lshr i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i32*
- store i32 %a, i32* %tmp3
- ret i32 %a
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-str_post.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-str_post.ll
deleted file mode 100644
index bbfb447..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-str_post.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i16 @test1(i32* %X, i16* %A) {
-; CHECK: test1:
-; CHECK: strh {{.*}}[{{.*}}], #-4
- %Y = load i32* %X ; <i32> [#uses=1]
- %tmp1 = trunc i32 %Y to i16 ; <i16> [#uses=1]
- store i16 %tmp1, i16* %A
- %tmp2 = ptrtoint i16* %A to i16 ; <i16> [#uses=1]
- %tmp3 = sub i16 %tmp2, 4 ; <i16> [#uses=1]
- ret i16 %tmp3
-}
-
-define i32 @test2(i32* %X, i32* %A) {
-; CHECK: test2:
-; CHECK: str {{.*}}[{{.*}}],
- %Y = load i32* %X ; <i32> [#uses=1]
- store i32 %Y, i32* %A
- %tmp1 = ptrtoint i32* %A to i32 ; <i32> [#uses=1]
- %tmp2 = sub i32 %tmp1, 4 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll
deleted file mode 100644
index 9af960b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define void @test1(i32* %X, i32* %A, i32** %dest) {
-; CHECK: test1
-; CHECK: str r1, [r0, #+16]!
- %B = load i32* %A ; <i32> [#uses=1]
- %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
- store i32 %B, i32* %Y
- store i32* %Y, i32** %dest
- ret void
-}
-
-define i16* @test2(i16* %X, i32* %A) {
-; CHECK: test2
-; CHECK: strh r1, [r0, #+8]!
- %B = load i32* %A ; <i32> [#uses=1]
- %Y = getelementptr i16* %X, i32 4 ; <i16*> [#uses=2]
- %tmp = trunc i32 %B to i16 ; <i16> [#uses=1]
- store i16 %tmp, i16* %Y
- ret i16* %Y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-strb.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-strb.ll
deleted file mode 100644
index 1ebb938..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-strb.ll
+++ /dev/null
@@ -1,76 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i8 @f1(i8 %a, i8* %v) {
-; CHECK: f1:
-; CHECK: strb r0, [r1]
- store i8 %a, i8* %v
- ret i8 %a
-}
-
-define i8 @f2(i8 %a, i8* %v) {
-; CHECK: f2:
-; CHECK: strb.w r0, [r1, #+4092]
- %tmp2 = getelementptr i8* %v, i32 4092
- store i8 %a, i8* %tmp2
- ret i8 %a
-}
-
-define i8 @f2a(i8 %a, i8* %v) {
-; CHECK: f2a:
-; CHECK: strb r0, [r1, #-128]
- %tmp2 = getelementptr i8* %v, i32 -128
- store i8 %a, i8* %tmp2
- ret i8 %a
-}
-
-define i8 @f3(i8 %a, i8* %v) {
-; CHECK: f3:
-; CHECK: mov.w r2, #4096
-; CHECK: strb r0, [r1, r2]
- %tmp2 = getelementptr i8* %v, i32 4096
- store i8 %a, i8* %tmp2
- ret i8 %a
-}
-
-define i8 @f4(i8 %a, i32 %base) {
-entry:
-; CHECK: f4:
-; CHECK: strb r0, [r1, #-128]
- %tmp1 = sub i32 %base, 128
- %tmp2 = inttoptr i32 %tmp1 to i8*
- store i8 %a, i8* %tmp2
- ret i8 %a
-}
-
-define i8 @f5(i8 %a, i32 %base, i32 %offset) {
-entry:
-; CHECK: f5:
-; CHECK: strb r0, [r1, r2]
- %tmp1 = add i32 %base, %offset
- %tmp2 = inttoptr i32 %tmp1 to i8*
- store i8 %a, i8* %tmp2
- ret i8 %a
-}
-
-define i8 @f6(i8 %a, i32 %base, i32 %offset) {
-entry:
-; CHECK: f6:
-; CHECK: strb.w r0, [r1, r2, lsl #2]
- %tmp1 = shl i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i8*
- store i8 %a, i8* %tmp3
- ret i8 %a
-}
-
-define i8 @f7(i8 %a, i32 %base, i32 %offset) {
-entry:
-; CHECK: f7:
-; CHECK: lsrs r2, r2, #2
-; CHECK: strb r0, [r1, r2]
- %tmp1 = lshr i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i8*
- store i8 %a, i8* %tmp3
- ret i8 %a
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-strh.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-strh.ll
deleted file mode 100644
index b0eb8c1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-strh.ll
+++ /dev/null
@@ -1,76 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i16 @f1(i16 %a, i16* %v) {
-; CHECK: f1:
-; CHECK: strh r0, [r1]
- store i16 %a, i16* %v
- ret i16 %a
-}
-
-define i16 @f2(i16 %a, i16* %v) {
-; CHECK: f2:
-; CHECK: strh.w r0, [r1, #+4092]
- %tmp2 = getelementptr i16* %v, i32 2046
- store i16 %a, i16* %tmp2
- ret i16 %a
-}
-
-define i16 @f2a(i16 %a, i16* %v) {
-; CHECK: f2a:
-; CHECK: strh r0, [r1, #-128]
- %tmp2 = getelementptr i16* %v, i32 -64
- store i16 %a, i16* %tmp2
- ret i16 %a
-}
-
-define i16 @f3(i16 %a, i16* %v) {
-; CHECK: f3:
-; CHECK: mov.w r2, #4096
-; CHECK: strh r0, [r1, r2]
- %tmp2 = getelementptr i16* %v, i32 2048
- store i16 %a, i16* %tmp2
- ret i16 %a
-}
-
-define i16 @f4(i16 %a, i32 %base) {
-entry:
-; CHECK: f4:
-; CHECK: strh r0, [r1, #-128]
- %tmp1 = sub i32 %base, 128
- %tmp2 = inttoptr i32 %tmp1 to i16*
- store i16 %a, i16* %tmp2
- ret i16 %a
-}
-
-define i16 @f5(i16 %a, i32 %base, i32 %offset) {
-entry:
-; CHECK: f5:
-; CHECK: strh r0, [r1, r2]
- %tmp1 = add i32 %base, %offset
- %tmp2 = inttoptr i32 %tmp1 to i16*
- store i16 %a, i16* %tmp2
- ret i16 %a
-}
-
-define i16 @f6(i16 %a, i32 %base, i32 %offset) {
-entry:
-; CHECK: f6:
-; CHECK: strh.w r0, [r1, r2, lsl #2]
- %tmp1 = shl i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i16*
- store i16 %a, i16* %tmp3
- ret i16 %a
-}
-
-define i16 @f7(i16 %a, i32 %base, i32 %offset) {
-entry:
-; CHECK: f7:
-; CHECK: lsrs r2, r2, #2
-; CHECK: strh r0, [r1, r2]
- %tmp1 = lshr i32 %offset, 2
- %tmp2 = add i32 %base, %tmp1
- %tmp3 = inttoptr i32 %tmp2 to i16*
- store i16 %a, i16* %tmp3
- ret i16 %a
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub.ll
deleted file mode 100644
index 95335a2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-; 171 = 0x000000ab
-define i32 @f1(i32 %a) {
-; CHECK: f1:
-; CHECK: subs r0, #171
- %tmp = sub i32 %a, 171
- ret i32 %tmp
-}
-
-; 1179666 = 0x00120012
-define i32 @f2(i32 %a) {
-; CHECK: f2:
-; CHECK: sub.w r0, r0, #1179666
- %tmp = sub i32 %a, 1179666
- ret i32 %tmp
-}
-
-; 872428544 = 0x34003400
-define i32 @f3(i32 %a) {
-; CHECK: f3:
-; CHECK: sub.w r0, r0, #872428544
- %tmp = sub i32 %a, 872428544
- ret i32 %tmp
-}
-
-; 1448498774 = 0x56565656
-define i32 @f4(i32 %a) {
-; CHECK: f4:
-; CHECK: sub.w r0, r0, #1448498774
- %tmp = sub i32 %a, 1448498774
- ret i32 %tmp
-}
-
-; 510 = 0x000001fe
-define i32 @f5(i32 %a) {
-; CHECK: f5:
-; CHECK: sub.w r0, r0, #510
- %tmp = sub i32 %a, 510
- ret i32 %tmp
-}
-
-; Don't change this to an add.
-define i32 @f6(i32 %a) {
-; CHECK: f6:
-; CHECK: subs r0, #1
- %tmp = sub i32 %a, 1
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub2.ll
deleted file mode 100644
index bb99cbd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub2.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a) {
- %tmp = sub i32 %a, 4095
- ret i32 %tmp
-}
-; CHECK: f1:
-; CHECK: subw r0, r0, #4095
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub4.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub4.ll
deleted file mode 100644
index a040d17..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub4.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: subs r0, r0, r1
- %tmp = sub i32 %a, %b
- ret i32 %tmp
-}
-
-define i32 @f2(i32 %a, i32 %b) {
-; CHECK: f2:
-; CHECK: sub.w r0, r0, r1, lsl #5
- %tmp = shl i32 %b, 5
- %tmp1 = sub i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f3(i32 %a, i32 %b) {
-; CHECK: f3:
-; CHECK: sub.w r0, r0, r1, lsr #6
- %tmp = lshr i32 %b, 6
- %tmp1 = sub i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f4(i32 %a, i32 %b) {
-; CHECK: f4:
-; CHECK: sub.w r0, r0, r1, asr #7
- %tmp = ashr i32 %b, 7
- %tmp1 = sub i32 %a, %tmp
- ret i32 %tmp1
-}
-
-define i32 @f5(i32 %a, i32 %b) {
-; CHECK: f5:
-; CHECK: sub.w r0, r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = sub i32 %a, %tmp
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll
deleted file mode 100644
index c3b56bc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sub5.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i64 @f1(i64 %a, i64 %b) {
-; CHECK: f1:
-; CHECK: subs r0, r0, r2
-; CHECK: sbcs r1, r3
- %tmp = sub i64 %a, %b
- ret i64 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sxt_rot.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
deleted file mode 100644
index 054d5df..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @test0(i8 %A) {
-; CHECK: test0
-; CHECK: sxtb r0, r0
- %B = sext i8 %A to i32
- ret i32 %B
-}
-
-define i8 @test1(i32 %A) signext {
-; CHECK: test1
-; CHECK: sxtb.w r0, r0, ror #8
- %B = lshr i32 %A, 8
- %C = shl i32 %A, 24
- %D = or i32 %B, %C
- %E = trunc i32 %D to i8
- ret i8 %E
-}
-
-define i32 @test2(i32 %A, i32 %X) signext {
-; CHECK: test2
-; CHECK: lsrs r0, r0, #8
-; CHECK: sxtab r0, r1, r0
- %B = lshr i32 %A, 8
- %C = shl i32 %A, 24
- %D = or i32 %B, %C
- %E = trunc i32 %D to i8
- %F = sext i8 %E to i32
- %G = add i32 %F, %X
- ret i32 %G
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tbb.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tbb.ll
deleted file mode 100644
index 5dc3cc3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tbb.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic | FileCheck %s
-
-define void @bar(i32 %n.u) {
-entry:
-; CHECK: bar:
-; CHECK: tbb
-; CHECK: .align 1
-
- switch i32 %n.u, label %bb12 [i32 1, label %bb i32 2, label %bb6 i32 4, label %bb7 i32 5, label %bb8 i32 6, label %bb10 i32 7, label %bb1 i32 8, label %bb3 i32 9, label %bb4 i32 10, label %bb9 i32 11, label %bb2 i32 12, label %bb5 i32 13, label %bb11 ]
-bb:
- tail call void(...)* @foo1()
- ret void
-bb1:
- tail call void(...)* @foo2()
- ret void
-bb2:
- tail call void(...)* @foo6()
- ret void
-bb3:
- tail call void(...)* @foo3()
- ret void
-bb4:
- tail call void(...)* @foo4()
- ret void
-bb5:
- tail call void(...)* @foo5()
- ret void
-bb6:
- tail call void(...)* @foo1()
- ret void
-bb7:
- tail call void(...)* @foo2()
- ret void
-bb8:
- tail call void(...)* @foo6()
- ret void
-bb9:
- tail call void(...)* @foo3()
- ret void
-bb10:
- tail call void(...)* @foo4()
- ret void
-bb11:
- tail call void(...)* @foo5()
- ret void
-bb12:
- tail call void(...)* @foo6()
- ret void
-}
-
-declare void @foo1(...)
-declare void @foo2(...)
-declare void @foo6(...)
-declare void @foo3(...)
-declare void @foo4(...)
-declare void @foo5(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tbh.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tbh.ll
deleted file mode 100644
index 2cf1d6a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tbh.ll
+++ /dev/null
@@ -1,84 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic | FileCheck %s
-
-; Thumb2 target should reorder the bb's in order to use tbb / tbh.
-
- %struct.R_flstr = type { i32, i32, i8* }
- %struct._T_tstr = type { i32, %struct.R_flstr*, %struct._T_tstr* }
- at _C_nextcmd = external global i32 ; <i32*> [#uses=3]
- at .str31 = external constant [28 x i8], align 1 ; <[28 x i8]*> [#uses=1]
- at _T_gtol = external global %struct._T_tstr* ; <%struct._T_tstr**> [#uses=2]
-
-declare arm_apcscc i32 @strlen(i8* nocapture) nounwind readonly
-
-declare arm_apcscc void @Z_fatal(i8*) noreturn nounwind
-
-declare arm_apcscc noalias i8* @calloc(i32, i32) nounwind
-
-define arm_apcscc i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
-; CHECK: main:
-; CHECK: tbb
-entry:
- br label %bb42.i
-
-bb1.i2: ; preds = %bb42.i
- br label %bb40.i
-
-bb5.i: ; preds = %bb42.i
- %0 = or i32 %argc, 32 ; <i32> [#uses=1]
- br label %bb40.i
-
-bb7.i: ; preds = %bb42.i
- call arm_apcscc void @_T_addtol(%struct._T_tstr** @_T_gtol, i32 0, i8* null) nounwind
- unreachable
-
-bb15.i: ; preds = %bb42.i
- call arm_apcscc void @_T_addtol(%struct._T_tstr** @_T_gtol, i32 2, i8* null) nounwind
- unreachable
-
-bb23.i: ; preds = %bb42.i
- %1 = call arm_apcscc i32 @strlen(i8* null) nounwind readonly ; <i32> [#uses=0]
- unreachable
-
-bb33.i: ; preds = %bb42.i
- store i32 0, i32* @_C_nextcmd, align 4
- %2 = call arm_apcscc noalias i8* @calloc(i32 21, i32 1) nounwind ; <i8*> [#uses=0]
- unreachable
-
-bb34.i: ; preds = %bb42.i
- %3 = load i32* @_C_nextcmd, align 4 ; <i32> [#uses=1]
- %4 = add i32 %3, 1 ; <i32> [#uses=1]
- store i32 %4, i32* @_C_nextcmd, align 4
- %5 = call arm_apcscc noalias i8* @calloc(i32 22, i32 1) nounwind ; <i8*> [#uses=0]
- unreachable
-
-bb35.i: ; preds = %bb42.i
- %6 = call arm_apcscc noalias i8* @calloc(i32 20, i32 1) nounwind ; <i8*> [#uses=0]
- unreachable
-
-bb37.i: ; preds = %bb42.i
- %7 = call arm_apcscc noalias i8* @calloc(i32 14, i32 1) nounwind ; <i8*> [#uses=0]
- unreachable
-
-bb39.i: ; preds = %bb42.i
- call arm_apcscc void @Z_fatal(i8* getelementptr ([28 x i8]* @.str31, i32 0, i32 0)) nounwind
- unreachable
-
-bb40.i: ; preds = %bb42.i, %bb5.i, %bb1.i2
- br label %bb42.i
-
-bb42.i: ; preds = %bb40.i, %entry
- switch i32 %argc, label %bb39.i [
- i32 67, label %bb33.i
- i32 70, label %bb35.i
- i32 77, label %bb37.i
- i32 83, label %bb34.i
- i32 97, label %bb7.i
- i32 100, label %bb5.i
- i32 101, label %bb40.i
- i32 102, label %bb23.i
- i32 105, label %bb15.i
- i32 116, label %bb1.i2
- ]
-}
-
-declare arm_apcscc void @_T_addtol(%struct._T_tstr** nocapture, i32, i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-teq.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-teq.ll
deleted file mode 100644
index 69f0383..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-teq.ll
+++ /dev/null
@@ -1,93 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-
-; 0x000000bb = 187
-define i1 @f1(i32 %a) {
- %tmp = xor i32 %a, 187
- %tmp1 = icmp ne i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f1:
-; CHECK: teq.w r0, #187
-
-; 0x000000bb = 187
-define i1 @f2(i32 %a) {
- %tmp = xor i32 %a, 187
- %tmp1 = icmp eq i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f2:
-; CHECK: teq.w r0, #187
-
-; 0x00aa00aa = 11141290
-define i1 @f3(i32 %a) {
- %tmp = xor i32 %a, 11141290
- %tmp1 = icmp eq i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f3:
-; CHECK: teq.w r0, #11141290
-
-; 0x00aa00aa = 11141290
-define i1 @f4(i32 %a) {
- %tmp = xor i32 %a, 11141290
- %tmp1 = icmp ne i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f4:
-; CHECK: teq.w r0, #11141290
-
-; 0xcc00cc00 = 3422604288
-define i1 @f5(i32 %a) {
- %tmp = xor i32 %a, 3422604288
- %tmp1 = icmp ne i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f5:
-; CHECK: teq.w r0, #-872363008
-
-; 0xcc00cc00 = 3422604288
-define i1 @f6(i32 %a) {
- %tmp = xor i32 %a, 3422604288
- %tmp1 = icmp eq i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f6:
-; CHECK: teq.w r0, #-872363008
-
-; 0xdddddddd = 3722304989
-define i1 @f7(i32 %a) {
- %tmp = xor i32 %a, 3722304989
- %tmp1 = icmp eq i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f7:
-; CHECK: teq.w r0, #-572662307
-
-; 0xdddddddd = 3722304989
-define i1 @f8(i32 %a) {
- %tmp = xor i32 %a, 3722304989
- %tmp1 = icmp ne i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f8:
-; CHECK: teq.w r0, #-572662307
-
-; 0x00110000 = 1114112
-define i1 @f9(i32 %a) {
- %tmp = xor i32 %a, 1114112
- %tmp1 = icmp ne i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f9:
-; CHECK: teq.w r0, #1114112
-
-; 0x00110000 = 1114112
-define i1 @f10(i32 %a) {
- %tmp = xor i32 %a, 1114112
- %tmp1 = icmp eq i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f10:
-; CHECK: teq.w r0, #1114112
-
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-teq2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-teq2.ll
deleted file mode 100644
index 0f122f2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-teq2.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i1 @f1(i32 %a, i32 %b) {
-; CHECK: f1
-; CHECK: teq.w r0, r1
- %tmp = xor i32 %a, %b
- %tmp1 = icmp ne i32 %tmp, 0
- ret i1 %tmp1
-}
-
-define i1 @f2(i32 %a, i32 %b) {
-; CHECK: f2
-; CHECK: teq.w r0, r1
- %tmp = xor i32 %a, %b
- %tmp1 = icmp eq i32 %tmp, 0
- ret i1 %tmp1
-}
-
-define i1 @f3(i32 %a, i32 %b) {
-; CHECK: f3
-; CHECK: teq.w r0, r1
- %tmp = xor i32 %a, %b
- %tmp1 = icmp ne i32 0, %tmp
- ret i1 %tmp1
-}
-
-define i1 @f4(i32 %a, i32 %b) {
-; CHECK: f4
-; CHECK: teq.w r0, r1
- %tmp = xor i32 %a, %b
- %tmp1 = icmp eq i32 0, %tmp
- ret i1 %tmp1
-}
-
-define i1 @f6(i32 %a, i32 %b) {
-; CHECK: f6
-; CHECK: teq.w r0, r1, lsl #5
- %tmp = shl i32 %b, 5
- %tmp1 = xor i32 %a, %tmp
- %tmp2 = icmp eq i32 %tmp1, 0
- ret i1 %tmp2
-}
-
-define i1 @f7(i32 %a, i32 %b) {
-; CHECK: f7
-; CHECK: teq.w r0, r1, lsr #6
- %tmp = lshr i32 %b, 6
- %tmp1 = xor i32 %a, %tmp
- %tmp2 = icmp eq i32 %tmp1, 0
- ret i1 %tmp2
-}
-
-define i1 @f8(i32 %a, i32 %b) {
-; CHECK: f8
-; CHECK: teq.w r0, r1, asr #7
- %tmp = ashr i32 %b, 7
- %tmp1 = xor i32 %a, %tmp
- %tmp2 = icmp eq i32 %tmp1, 0
- ret i1 %tmp2
-}
-
-define i1 @f9(i32 %a, i32 %b) {
-; CHECK: f9
-; CHECK: teq.w r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = xor i32 %a, %tmp
- %tmp2 = icmp eq i32 %tmp1, 0
- ret i1 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tst.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tst.ll
deleted file mode 100644
index d905217..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tst.ll
+++ /dev/null
@@ -1,92 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-
-; 0x000000bb = 187
-define i1 @f1(i32 %a) {
- %tmp = and i32 %a, 187
- %tmp1 = icmp ne i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f1:
-; CHECK: tst.w r0, #187
-
-; 0x000000bb = 187
-define i1 @f2(i32 %a) {
- %tmp = and i32 %a, 187
- %tmp1 = icmp eq i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f2:
-; CHECK: tst.w r0, #187
-
-; 0x00aa00aa = 11141290
-define i1 @f3(i32 %a) {
- %tmp = and i32 %a, 11141290
- %tmp1 = icmp eq i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f3:
-; CHECK: tst.w r0, #11141290
-
-; 0x00aa00aa = 11141290
-define i1 @f4(i32 %a) {
- %tmp = and i32 %a, 11141290
- %tmp1 = icmp ne i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f4:
-; CHECK: tst.w r0, #11141290
-
-; 0xcc00cc00 = 3422604288
-define i1 @f5(i32 %a) {
- %tmp = and i32 %a, 3422604288
- %tmp1 = icmp ne i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f5:
-; CHECK: tst.w r0, #-872363008
-
-; 0xcc00cc00 = 3422604288
-define i1 @f6(i32 %a) {
- %tmp = and i32 %a, 3422604288
- %tmp1 = icmp eq i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f6:
-; CHECK: tst.w r0, #-872363008
-
-; 0xdddddddd = 3722304989
-define i1 @f7(i32 %a) {
- %tmp = and i32 %a, 3722304989
- %tmp1 = icmp eq i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f7:
-; CHECK: tst.w r0, #-572662307
-
-; 0xdddddddd = 3722304989
-define i1 @f8(i32 %a) {
- %tmp = and i32 %a, 3722304989
- %tmp1 = icmp ne i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f8:
-; CHECK: tst.w r0, #-572662307
-
-; 0x00110000 = 1114112
-define i1 @f9(i32 %a) {
- %tmp = and i32 %a, 1114112
- %tmp1 = icmp ne i32 %tmp, 0
- ret i1 %tmp1
-}
-; CHECK: f9:
-; CHECK: tst.w r0, #1114112
-
-; 0x00110000 = 1114112
-define i1 @f10(i32 %a) {
- %tmp = and i32 %a, 1114112
- %tmp1 = icmp eq i32 0, %tmp
- ret i1 %tmp1
-}
-; CHECK: f10:
-; CHECK: tst.w r0, #1114112
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tst2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tst2.ll
deleted file mode 100644
index db202dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-tst2.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i1 @f1(i32 %a, i32 %b) {
-; CHECK: f1:
-; CHECK: tst r0, r1
- %tmp = and i32 %a, %b
- %tmp1 = icmp ne i32 %tmp, 0
- ret i1 %tmp1
-}
-
-define i1 @f2(i32 %a, i32 %b) {
-; CHECK: f2:
-; CHECK: tst r0, r1
- %tmp = and i32 %a, %b
- %tmp1 = icmp eq i32 %tmp, 0
- ret i1 %tmp1
-}
-
-define i1 @f3(i32 %a, i32 %b) {
-; CHECK: f3:
-; CHECK: tst r0, r1
- %tmp = and i32 %a, %b
- %tmp1 = icmp ne i32 0, %tmp
- ret i1 %tmp1
-}
-
-define i1 @f4(i32 %a, i32 %b) {
-; CHECK: f4:
-; CHECK: tst r0, r1
- %tmp = and i32 %a, %b
- %tmp1 = icmp eq i32 0, %tmp
- ret i1 %tmp1
-}
-
-define i1 @f6(i32 %a, i32 %b) {
-; CHECK: f6:
-; CHECK: tst.w r0, r1, lsl #5
- %tmp = shl i32 %b, 5
- %tmp1 = and i32 %a, %tmp
- %tmp2 = icmp eq i32 %tmp1, 0
- ret i1 %tmp2
-}
-
-define i1 @f7(i32 %a, i32 %b) {
-; CHECK: f7:
-; CHECK: tst.w r0, r1, lsr #6
- %tmp = lshr i32 %b, 6
- %tmp1 = and i32 %a, %tmp
- %tmp2 = icmp eq i32 %tmp1, 0
- ret i1 %tmp2
-}
-
-define i1 @f8(i32 %a, i32 %b) {
-; CHECK: f8:
-; CHECK: tst.w r0, r1, asr #7
- %tmp = ashr i32 %b, 7
- %tmp1 = and i32 %a, %tmp
- %tmp2 = icmp eq i32 %tmp1, 0
- ret i1 %tmp2
-}
-
-define i1 @f9(i32 %a, i32 %b) {
-; CHECK: f9:
-; CHECK: tst.w r0, r0, ror #8
- %l8 = shl i32 %a, 24
- %r8 = lshr i32 %a, 8
- %tmp = or i32 %l8, %r8
- %tmp1 = and i32 %a, %tmp
- %tmp2 = icmp eq i32 %tmp1, 0
- ret i1 %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-uxt_rot.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
deleted file mode 100644
index 75e1d70..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i8 @test1(i32 %A.u) zeroext {
-; CHECK: test1
-; CHECK: uxtb r0, r0
- %B.u = trunc i32 %A.u to i8
- ret i8 %B.u
-}
-
-define i32 @test2(i32 %A.u, i32 %B.u) zeroext {
-; CHECK: test2
-; CHECK: uxtab r0, r0, r1
- %C.u = trunc i32 %B.u to i8
- %D.u = zext i8 %C.u to i32
- %E.u = add i32 %A.u, %D.u
- ret i32 %E.u
-}
-
-define i32 @test3(i32 %A.u) zeroext {
-; CHECK: test3
-; CHECK: uxth.w r0, r0, ror #8
- %B.u = lshr i32 %A.u, 8
- %C.u = shl i32 %A.u, 24
- %D.u = or i32 %B.u, %C.u
- %E.u = trunc i32 %D.u to i16
- %F.u = zext i16 %E.u to i32
- ret i32 %F.u
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-uxtb.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-uxtb.ll
deleted file mode 100644
index 91598cd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/thumb2-uxtb.ll
+++ /dev/null
@@ -1,98 +0,0 @@
-; RUN: llc < %s -march=thumb -mattr=+thumb2 | FileCheck %s
-
-define i32 @test1(i32 %x) {
-; CHECK: test1
-; CHECK: uxtb16 r0, r0
- %tmp1 = and i32 %x, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @test2(i32 %x) {
-; CHECK: test2
-; CHECK: uxtb16 r0, r0, ror #8
- %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @test3(i32 %x) {
-; CHECK: test3
-; CHECK: uxtb16 r0, r0, ror #8
- %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @test4(i32 %x) {
-; CHECK: test4
-; CHECK: uxtb16 r0, r0, ror #8
- %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
- %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test5(i32 %x) {
-; CHECK: test5
-; CHECK: uxtb16 r0, r0, ror #8
- %tmp1 = lshr i32 %x, 8 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-define i32 @test6(i32 %x) {
-; CHECK: test6
-; CHECK: uxtb16 r0, r0, ror #16
- %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1]
- %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test7(i32 %x) {
-; CHECK: test7
-; CHECK: uxtb16 r0, r0, ror #16
- %tmp1 = lshr i32 %x, 16 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 255 ; <i32> [#uses=1]
- %tmp4 = shl i32 %x, 16 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test8(i32 %x) {
-; CHECK: test8
-; CHECK: uxtb16 r0, r0, ror #24
- %tmp1 = shl i32 %x, 8 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16711680 ; <i32> [#uses=1]
- %tmp5 = lshr i32 %x, 24 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test9(i32 %x) {
-; CHECK: test9
-; CHECK: uxtb16 r0, r0, ror #24
- %tmp1 = lshr i32 %x, 24 ; <i32> [#uses=1]
- %tmp4 = shl i32 %x, 8 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, 16711680 ; <i32> [#uses=1]
- %tmp6 = or i32 %tmp5, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp6
-}
-
-define i32 @test10(i32 %p0) {
-; CHECK: test10
-; CHECK: mov.w r1, #16253176
-; CHECK: and.w r0, r1, r0, lsr #7
-; CHECK: lsrs r1, r0, #5
-; CHECK: uxtb16 r1, r1
-; CHECK: orr.w r0, r1, r0
-
- %tmp1 = lshr i32 %p0, 7 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 16253176 ; <i32> [#uses=2]
- %tmp4 = lshr i32 %tmp2, 5 ; <i32> [#uses=1]
- %tmp5 = and i32 %tmp4, 458759 ; <i32> [#uses=1]
- %tmp7 = or i32 %tmp5, %tmp2 ; <i32> [#uses=1]
- ret i32 %tmp7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/tls1.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/tls1.ll
deleted file mode 100644
index 1e55557..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/tls1.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi | \
-; RUN: grep {i(tpoff)}
-; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi | \
-; RUN: grep {__aeabi_read_tp}
-; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi \
-; RUN: -relocation-model=pic | grep {__tls_get_addr}
-
-
- at i = thread_local global i32 15 ; <i32*> [#uses=2]
-
-define i32 @f() {
-entry:
- %tmp1 = load i32* @i ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32* @g() {
-entry:
- ret i32* @i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/Thumb2/tls2.ll b/libclamav/c++/llvm/test/CodeGen/Thumb2/tls2.ll
deleted file mode 100644
index b8a0657..0000000
--- a/libclamav/c++/llvm/test/CodeGen/Thumb2/tls2.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi | FileCheck %s -check-prefix=CHECK-NOT-PIC
-; RUN: llc < %s -mtriple=thumbv7-linux-gnueabi -relocation-model=pic | FileCheck %s -check-prefix=CHECK-PIC
-
- at i = external thread_local global i32 ; <i32*> [#uses=2]
-
-define i32 @f() {
-entry:
-; CHECK-NOT-PIC: f:
-; CHECK-NOT-PIC: add r0, pc
-; CHECK-NOT-PIC: ldr r1, [r0]
-; CHECK-NOT-PIC: i(gottpoff)
-
-; CHECK-PIC: f:
-; CHECK-PIC: bl __tls_get_addr(PLT)
- %tmp1 = load i32* @i ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32* @g() {
-entry:
-; CHECK-NOT-PIC: g:
-; CHECK-NOT-PIC: add r0, pc
-; CHECK-NOT-PIC: ldr r1, [r0]
-; CHECK-NOT-PIC: i(gottpoff)
-
-; CHECK-PIC: g:
-; CHECK-PIC: bl __tls_get_addr(PLT)
- ret i32* @i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.ll b/libclamav/c++/llvm/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.ll
deleted file mode 100644
index 2484860..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2003-08-03-CallArgLiveRanges.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; The old instruction selector used to load all arguments to a call up in
-; registers, then start pushing them all onto the stack. This is bad news as
-; it makes a ton of annoying overlapping live ranges. This code should not
-; cause spills!
-;
-; RUN: llc < %s -march=x86 -stats |& not grep spilled
-
-target datalayout = "e-p:32:32"
-
-define i32 @test(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
- ret i32 0
-}
-
-define i32 @main() {
- %X = call i32 @test( i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10 ) ; <i32> [#uses=1]
- ret i32 %X
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2003-08-23-DeadBlockTest.ll b/libclamav/c++/llvm/test/CodeGen/X86/2003-08-23-DeadBlockTest.ll
deleted file mode 100644
index 5c40eea..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2003-08-23-DeadBlockTest.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i32 @test() {
-entry:
- ret i32 7
-Test: ; No predecessors!
- %A = call i32 @test( ) ; <i32> [#uses=1]
- %B = call i32 @test( ) ; <i32> [#uses=1]
- %C = add i32 %A, %B ; <i32> [#uses=1]
- ret i32 %C
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2003-11-03-GlobalBool.ll b/libclamav/c++/llvm/test/CodeGen/X86/2003-11-03-GlobalBool.ll
deleted file mode 100644
index 8b0a185..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2003-11-03-GlobalBool.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: llc < %s -march=x86 | \
-; RUN: not grep {.byte\[\[:space:\]\]*true}
-
- at X = global i1 true ; <i1*> [#uses=0]
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2004-02-12-Memcpy.ll b/libclamav/c++/llvm/test/CodeGen/X86/2004-02-12-Memcpy.ll
deleted file mode 100644
index f15a1b4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2004-02-12-Memcpy.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep movs | count 1
-
- at A = global [32 x i32] zeroinitializer
- at B = global [32 x i32] zeroinitializer
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-define void @main() nounwind {
- ; dword copy
- call void @llvm.memcpy.i32(i8* bitcast ([32 x i32]* @A to i8*),
- i8* bitcast ([32 x i32]* @B to i8*),
- i32 128, i32 4 )
-
- ; word copy
- call void @llvm.memcpy.i32( i8* bitcast ([32 x i32]* @A to i8*),
- i8* bitcast ([32 x i32]* @B to i8*),
- i32 128, i32 2 )
-
- ; byte copy
- call void @llvm.memcpy.i32( i8* bitcast ([32 x i32]* @A to i8*),
- i8* bitcast ([32 x i32]* @B to i8*),
- i32 128, i32 1 )
-
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2004-02-13-FrameReturnAddress.ll b/libclamav/c++/llvm/test/CodeGen/X86/2004-02-13-FrameReturnAddress.ll
deleted file mode 100644
index fea2b54..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2004-02-13-FrameReturnAddress.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {(%esp}
-; RUN: llc < %s -march=x86 | grep {pushl %ebp} | count 1
-; RUN: llc < %s -march=x86 | grep {popl %ebp} | count 1
-
-declare i8* @llvm.returnaddress(i32)
-
-declare i8* @llvm.frameaddress(i32)
-
-define i8* @test1() {
- %X = call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
- ret i8* %X
-}
-
-define i8* @test2() {
- %X = call i8* @llvm.frameaddress( i32 0 ) ; <i8*> [#uses=1]
- ret i8* %X
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.ll b/libclamav/c++/llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.ll
deleted file mode 100644
index f986ebd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=x86 | grep -i ESP | not grep sub
-
-define i32 @test(i32 %X) {
- ret i32 %X
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2004-02-22-Casts.ll b/libclamav/c++/llvm/test/CodeGen/X86/2004-02-22-Casts.ll
deleted file mode 100644
index dabf7d3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2004-02-22-Casts.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86
-define i1 @test1(double %X) {
- %V = fcmp one double %X, 0.000000e+00 ; <i1> [#uses=1]
- ret i1 %V
-}
-
-define double @test2(i64 %X) {
- %V = uitofp i64 %X to double ; <double> [#uses=1]
- ret double %V
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2004-03-30-Select-Max.ll b/libclamav/c++/llvm/test/CodeGen/X86/2004-03-30-Select-Max.ll
deleted file mode 100644
index b6631b6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2004-03-30-Select-Max.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep {j\[lgbe\]}
-
-define i32 @max(i32 %A, i32 %B) {
- %gt = icmp sgt i32 %A, %B ; <i1> [#uses=1]
- %R = select i1 %gt, i32 %A, i32 %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2004-04-09-SameValueCoalescing.ll b/libclamav/c++/llvm/test/CodeGen/X86/2004-04-09-SameValueCoalescing.ll
deleted file mode 100644
index c62fee1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2004-04-09-SameValueCoalescing.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; Linear scan does not currently coalesce any two variables that have
-; overlapping live intervals. When two overlapping intervals have the same
-; value, they can be joined though.
-;
-; RUN: llc < %s -march=x86 -regalloc=linearscan | \
-; RUN: not grep {mov %\[A-Z\]\\\{2,3\\\}, %\[A-Z\]\\\{2,3\\\}}
-
-define i64 @test(i64 %x) {
-entry:
- %tmp.1 = mul i64 %x, 4294967297 ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2004-04-13-FPCMOV-Crash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2004-04-13-FPCMOV-Crash.ll
deleted file mode 100644
index f8ed016..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2004-04-13-FPCMOV-Crash.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define double @test(double %d) {
- %X = select i1 false, double %d, double %d ; <double> [#uses=1]
- ret double %X
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2004-06-10-StackifierCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2004-06-10-StackifierCrash.ll
deleted file mode 100644
index 036aa6a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2004-06-10-StackifierCrash.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i1 @T(double %X) {
- %V = fcmp oeq double %X, %X ; <i1> [#uses=1]
- ret i1 %V
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2004-10-08-SelectSetCCFold.ll b/libclamav/c++/llvm/test/CodeGen/X86/2004-10-08-SelectSetCCFold.ll
deleted file mode 100644
index db3af01..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2004-10-08-SelectSetCCFold.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i1 @test(i1 %C, i1 %D, i32 %X, i32 %Y) {
- %E = icmp slt i32 %X, %Y ; <i1> [#uses=1]
- %F = select i1 %C, i1 %D, i1 %E ; <i1> [#uses=1]
- ret i1 %F
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2005-01-17-CycleInDAG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
deleted file mode 100644
index fe6674d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; This testcase was distilled from 132.ijpeg. Bsaically we cannot fold the
-; load into the sub instruction here as it induces a cycle in the dag, which
-; is invalid code (there is no correct way to order the instruction). Check
-; that we do not fold the load into the sub.
-
-; RUN: llc < %s -march=x86 | not grep sub.*GLOBAL
-
- at GLOBAL = external global i32 ; <i32*> [#uses=1]
-
-define i32 @test(i32* %P1, i32* %P2, i32* %P3) nounwind {
- %L = load i32* @GLOBAL ; <i32> [#uses=1]
- store i32 12, i32* %P2
- %Y = load i32* %P3 ; <i32> [#uses=1]
- %Z = sub i32 %Y, %L ; <i32> [#uses=1]
- ret i32 %Z
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll b/libclamav/c++/llvm/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll
deleted file mode 100644
index 30a6ac6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2005-02-14-IllegalAssembler.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep 18446744073709551612
-
- at A = external global i32 ; <i32*> [#uses=1]
- at Y = global i32* getelementptr (i32* @A, i32 -1) ; <i32**> [#uses=0]
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll b/libclamav/c++/llvm/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll
deleted file mode 100644
index 5266009..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2005-05-08-FPStackifierPHI.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=generic
-; Make sure LLC doesn't crash in the stackifier due to FP PHI nodes.
-
-define void @radfg_() {
-entry:
- br i1 false, label %no_exit.16.preheader, label %loopentry.0
-loopentry.0: ; preds = %entry
- ret void
-no_exit.16.preheader: ; preds = %entry
- br label %no_exit.16
-no_exit.16: ; preds = %no_exit.16, %no_exit.16.preheader
- br i1 false, label %loopexit.16.loopexit, label %no_exit.16
-loopexit.16.loopexit: ; preds = %no_exit.16
- br label %no_exit.18
-no_exit.18: ; preds = %loopexit.20, %loopexit.16.loopexit
- %tmp.882 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=2]
- br i1 false, label %loopexit.19, label %no_exit.19.preheader
-no_exit.19.preheader: ; preds = %no_exit.18
- ret void
-loopexit.19: ; preds = %no_exit.18
- br i1 false, label %loopexit.20, label %no_exit.20
-no_exit.20: ; preds = %loopexit.21, %loopexit.19
- %ai2.1122.tmp.3 = phi float [ %tmp.958, %loopexit.21 ], [ %tmp.882, %loopexit.19 ] ; <float> [#uses=1]
- %tmp.950 = fmul float %tmp.882, %ai2.1122.tmp.3 ; <float> [#uses=1]
- %tmp.951 = fsub float 0.000000e+00, %tmp.950 ; <float> [#uses=1]
- %tmp.958 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=1]
- br i1 false, label %loopexit.21, label %no_exit.21.preheader
-no_exit.21.preheader: ; preds = %no_exit.20
- ret void
-loopexit.21: ; preds = %no_exit.20
- br i1 false, label %loopexit.20, label %no_exit.20
-loopexit.20: ; preds = %loopexit.21, %loopexit.19
- %ar2.1124.tmp.2 = phi float [ 0.000000e+00, %loopexit.19 ], [ %tmp.951, %loopexit.21 ] ; <float> [#uses=0]
- br i1 false, label %loopexit.18.loopexit, label %no_exit.18
-loopexit.18.loopexit: ; preds = %loopexit.20
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
deleted file mode 100644
index d906da4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 | \
-; RUN: grep shld | count 1
-;
-; Check that the isel does not fold the shld, which already folds a load
-; and has two uses, into a store.
-
- at A = external global i32 ; <i32*> [#uses=2]
-
-define i32 @test5(i32 %B, i8 %C) {
- %tmp.1 = load i32* @A ; <i32> [#uses=1]
- %shift.upgrd.1 = zext i8 %C to i32 ; <i32> [#uses=1]
- %tmp.2 = shl i32 %tmp.1, %shift.upgrd.1 ; <i32> [#uses=1]
- %tmp.3 = sub i8 32, %C ; <i8> [#uses=1]
- %shift.upgrd.2 = zext i8 %tmp.3 to i32 ; <i32> [#uses=1]
- %tmp.4 = lshr i32 %B, %shift.upgrd.2 ; <i32> [#uses=1]
- %tmp.5 = or i32 %tmp.4, %tmp.2 ; <i32> [#uses=2]
- store i32 %tmp.5, i32* @A
- ret i32 %tmp.5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
deleted file mode 100644
index dc69ef8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-03-01-InstrSchedBug.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep {subl.*%esp}
-
-define i32 @f(i32 %a, i32 %b) {
- %tmp.2 = mul i32 %a, %a ; <i32> [#uses=1]
- %tmp.5 = shl i32 %a, 1 ; <i32> [#uses=1]
- %tmp.6 = mul i32 %tmp.5, %b ; <i32> [#uses=1]
- %tmp.10 = mul i32 %b, %b ; <i32> [#uses=1]
- %tmp.7 = add i32 %tmp.10, %tmp.2 ; <i32> [#uses=1]
- %tmp.11 = add i32 %tmp.7, %tmp.6 ; <i32> [#uses=1]
- ret i32 %tmp.11
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll
deleted file mode 100644
index 0421896..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-03-02-InstrSchedBug.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -stats |& \
-; RUN: grep asm-printer | grep 7
-
-define i32 @g(i32 %a, i32 %b) nounwind {
- %tmp.1 = shl i32 %b, 1 ; <i32> [#uses=1]
- %tmp.3 = add i32 %tmp.1, %a ; <i32> [#uses=1]
- %tmp.5 = mul i32 %tmp.3, %a ; <i32> [#uses=1]
- %tmp.8 = mul i32 %b, %b ; <i32> [#uses=1]
- %tmp.9 = add i32 %tmp.5, %tmp.8 ; <i32> [#uses=1]
- ret i32 %tmp.9
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-04-04-CrossBlockCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-04-04-CrossBlockCrash.ll
deleted file mode 100644
index 3f67097..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-04-04-CrossBlockCrash.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah
-; END.
-
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin8.6.1"
- %struct.GLTColor4 = type { float, float, float, float }
- %struct.GLTCoord3 = type { float, float, float }
- %struct.__GLIContextRec = type { { %struct.anon, { [24 x [16 x float]], [24 x [16 x float]] }, %struct.GLTColor4, { float, float, float, float, %struct.GLTCoord3, float } }, { float, float, float, float, float, float, float, float, [4 x i32], [4 x i32], [4 x i32] } }
- %struct.__GLvertex = type { %struct.GLTColor4, %struct.GLTColor4, %struct.GLTColor4, %struct.GLTColor4, %struct.GLTColor4, %struct.GLTCoord3, float, %struct.GLTColor4, float, float, float, i8, i8, i8, i8, [4 x float], [2 x i8*], i32, i32, [16 x %struct.GLTColor4] }
- %struct.anon = type { float, float, float, float, float, float, float, float }
-
-declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8)
-
-declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>)
-
-declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>)
-
-define void @gleLLVMVecInterpolateClip() {
-entry:
- br i1 false, label %cond_false, label %cond_false183
-cond_false: ; preds = %entry
- br i1 false, label %cond_false183, label %cond_true69
-cond_true69: ; preds = %cond_false
- ret void
-cond_false183: ; preds = %cond_false, %entry
- %vuizmsk.0.1 = phi <4 x i32> [ < i32 -1, i32 -1, i32 -1, i32 0 >, %entry ], [ < i32 -1, i32 0, i32 0, i32 0 >, %cond_false ] ; <<4 x i32>> [#uses=2]
- %tmp192 = extractelement <4 x i32> %vuizmsk.0.1, i32 2 ; <i32> [#uses=1]
- %tmp193 = extractelement <4 x i32> %vuizmsk.0.1, i32 3 ; <i32> [#uses=2]
- %tmp195 = insertelement <4 x i32> zeroinitializer, i32 %tmp192, i32 1 ; <<4 x i32>> [#uses=1]
- %tmp196 = insertelement <4 x i32> %tmp195, i32 %tmp193, i32 2 ; <<4 x i32>> [#uses=1]
- %tmp197 = insertelement <4 x i32> %tmp196, i32 %tmp193, i32 3 ; <<4 x i32>> [#uses=1]
- %tmp336 = and <4 x i32> zeroinitializer, %tmp197 ; <<4 x i32>> [#uses=1]
- %tmp337 = bitcast <4 x i32> %tmp336 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp378 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp337, <4 x float> zeroinitializer, i8 1 ) ; <<4 x float>> [#uses=1]
- %tmp379 = bitcast <4 x float> %tmp378 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp388 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128( <4 x i32> zeroinitializer, <4 x i32> %tmp379 ) ; <<4 x i32>> [#uses=1]
- %tmp392 = bitcast <8 x i16> %tmp388 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp399 = extractelement <8 x i16> %tmp392, i32 7 ; <i16> [#uses=1]
- %tmp423 = insertelement <8 x i16> zeroinitializer, i16 %tmp399, i32 7 ; <<8 x i16>> [#uses=1]
- %tmp427 = bitcast <8 x i16> %tmp423 to <16 x i8> ; <<16 x i8>> [#uses=1]
- %tmp428 = tail call i32 @llvm.x86.sse2.pmovmskb.128( <16 x i8> %tmp427 ) ; <i32> [#uses=1]
- %tmp432 = trunc i32 %tmp428 to i8 ; <i8> [#uses=1]
- %tmp = and i8 %tmp432, 42 ; <i8> [#uses=1]
- %tmp436 = bitcast i8 %tmp to i8 ; <i8> [#uses=1]
- %tmp446 = zext i8 %tmp436 to i32 ; <i32> [#uses=1]
- %tmp447 = shl i32 %tmp446, 24 ; <i32> [#uses=1]
- %tmp449 = or i32 0, %tmp447 ; <i32> [#uses=1]
- store i32 %tmp449, i32* null
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
deleted file mode 100644
index 8783a11..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin8 -relocation-model=static > %t
-; RUN: grep {movl _last} %t | count 1
-; RUN: grep {cmpl.*_last} %t | count 1
-
- at block = external global i8* ; <i8**> [#uses=1]
- at last = external global i32 ; <i32*> [#uses=3]
-
-define i1 @loadAndRLEsource_no_exit_2E_1_label_2E_0(i32 %tmp.21.reload, i32 %tmp.8) {
-newFuncRoot:
- br label %label.0
-label.0.no_exit.1_crit_edge.exitStub: ; preds = %label.0
- ret i1 true
-codeRepl5.exitStub: ; preds = %label.0
- ret i1 false
-label.0: ; preds = %newFuncRoot
- %tmp.35 = load i32* @last ; <i32> [#uses=1]
- %inc.1 = add i32 %tmp.35, 1 ; <i32> [#uses=2]
- store i32 %inc.1, i32* @last
- %tmp.36 = load i8** @block ; <i8*> [#uses=1]
- %tmp.38 = getelementptr i8* %tmp.36, i32 %inc.1 ; <i8*> [#uses=1]
- %tmp.40 = trunc i32 %tmp.21.reload to i8 ; <i8> [#uses=1]
- store i8 %tmp.40, i8* %tmp.38
- %tmp.910 = load i32* @last ; <i32> [#uses=1]
- %tmp.1111 = icmp slt i32 %tmp.910, %tmp.8 ; <i1> [#uses=1]
- %tmp.1412 = icmp ne i32 %tmp.21.reload, 257 ; <i1> [#uses=1]
- %tmp.1613 = and i1 %tmp.1111, %tmp.1412 ; <i1> [#uses=1]
- br i1 %tmp.1613, label %label.0.no_exit.1_crit_edge.exitStub, label %codeRepl5.exitStub
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll
deleted file mode 100644
index b045329..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll
+++ /dev/null
@@ -1,76 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah -stats |& \
-; RUN: not grep {Number of register spills}
-; END.
-
-
-define i32 @foo(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
- %tmp44 = load <4 x float>* %a ; <<4 x float>> [#uses=9]
- %tmp46 = load <4 x float>* %b ; <<4 x float>> [#uses=1]
- %tmp48 = load <4 x float>* %c ; <<4 x float>> [#uses=1]
- %tmp50 = load <4 x float>* %d ; <<4 x float>> [#uses=1]
- %tmp51 = bitcast <4 x float> %tmp44 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp = shufflevector <4 x i32> %tmp51, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>> [#uses=2]
- %tmp52 = bitcast <4 x i32> %tmp to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp60 = xor <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
- %tmp61 = bitcast <4 x i32> %tmp60 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp74 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp52, <4 x float> %tmp44, i8 1 ) ; <<4 x float>> [#uses=1]
- %tmp75 = bitcast <4 x float> %tmp74 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp88 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp61, i8 1 ) ; <<4 x float>> [#uses=1]
- %tmp89 = bitcast <4 x float> %tmp88 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp98 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128( <4 x i32> %tmp75, <4 x i32> %tmp89 ) ; <<4 x i32>> [#uses=1]
- %tmp102 = bitcast <8 x i16> %tmp98 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp.upgrd.1 = shufflevector <8 x i16> %tmp102, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 4, i32 7 > ; <<8 x i16>> [#uses=1]
- %tmp105 = shufflevector <8 x i16> %tmp.upgrd.1, <8 x i16> undef, <8 x i32> < i32 2, i32 1, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1]
- %tmp105.upgrd.2 = bitcast <8 x i16> %tmp105 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp105.upgrd.2, <4 x float>* %a
- %tmp108 = bitcast <4 x float> %tmp46 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp109 = shufflevector <4 x i32> %tmp108, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>> [#uses=2]
- %tmp109.upgrd.3 = bitcast <4 x i32> %tmp109 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp119 = xor <4 x i32> %tmp109, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
- %tmp120 = bitcast <4 x i32> %tmp119 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp133 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp109.upgrd.3, <4 x float> %tmp44, i8 1 ) ; <<4 x float>> [#uses=1]
- %tmp134 = bitcast <4 x float> %tmp133 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp147 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp120, i8 1 ) ; <<4 x float>> [#uses=1]
- %tmp148 = bitcast <4 x float> %tmp147 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp159 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128( <4 x i32> %tmp134, <4 x i32> %tmp148 ) ; <<4 x i32>> [#uses=1]
- %tmp163 = bitcast <8 x i16> %tmp159 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp164 = shufflevector <8 x i16> %tmp163, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 4, i32 7 > ; <<8 x i16>> [#uses=1]
- %tmp166 = shufflevector <8 x i16> %tmp164, <8 x i16> undef, <8 x i32> < i32 2, i32 1, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1]
- %tmp166.upgrd.4 = bitcast <8 x i16> %tmp166 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp166.upgrd.4, <4 x float>* %b
- %tmp169 = bitcast <4 x float> %tmp48 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp170 = shufflevector <4 x i32> %tmp169, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>> [#uses=2]
- %tmp170.upgrd.5 = bitcast <4 x i32> %tmp170 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp180 = xor <4 x i32> %tmp170, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
- %tmp181 = bitcast <4 x i32> %tmp180 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp194 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp170.upgrd.5, <4 x float> %tmp44, i8 1 ) ; <<4 x float>> [#uses=1]
- %tmp195 = bitcast <4 x float> %tmp194 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp208 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp181, i8 1 ) ; <<4 x float>> [#uses=1]
- %tmp209 = bitcast <4 x float> %tmp208 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp220 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128( <4 x i32> %tmp195, <4 x i32> %tmp209 ) ; <<4 x i32>> [#uses=1]
- %tmp224 = bitcast <8 x i16> %tmp220 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp225 = shufflevector <8 x i16> %tmp224, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 4, i32 7 > ; <<8 x i16>> [#uses=1]
- %tmp227 = shufflevector <8 x i16> %tmp225, <8 x i16> undef, <8 x i32> < i32 2, i32 1, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1]
- %tmp227.upgrd.6 = bitcast <8 x i16> %tmp227 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp227.upgrd.6, <4 x float>* %c
- %tmp230 = bitcast <4 x float> %tmp50 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp231 = shufflevector <4 x i32> %tmp230, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>> [#uses=2]
- %tmp231.upgrd.7 = bitcast <4 x i32> %tmp231 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp241 = xor <4 x i32> %tmp231, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
- %tmp242 = bitcast <4 x i32> %tmp241 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp255 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp231.upgrd.7, <4 x float> %tmp44, i8 1 ) ; <<4 x float>> [#uses=1]
- %tmp256 = bitcast <4 x float> %tmp255 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp269 = tail call <4 x float> @llvm.x86.sse.cmp.ps( <4 x float> %tmp44, <4 x float> %tmp242, i8 1 ) ; <<4 x float>> [#uses=1]
- %tmp270 = bitcast <4 x float> %tmp269 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp281 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128( <4 x i32> %tmp256, <4 x i32> %tmp270 ) ; <<4 x i32>> [#uses=1]
- %tmp285 = bitcast <8 x i16> %tmp281 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp286 = shufflevector <8 x i16> %tmp285, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 6, i32 5, i32 4, i32 7 > ; <<8 x i16>> [#uses=1]
- %tmp288 = shufflevector <8 x i16> %tmp286, <8 x i16> undef, <8 x i32> < i32 2, i32 1, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1]
- %tmp288.upgrd.8 = bitcast <8 x i16> %tmp288 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp288.upgrd.8, <4 x float>* %d
- ret i32 0
-}
-
-declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8)
-
-declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
deleted file mode 100644
index 7d0a6ab..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=static -stats |& \
-; RUN: grep asm-printer | grep 14
-;
- at size20 = external global i32 ; <i32*> [#uses=1]
- at in5 = external global i8* ; <i8**> [#uses=1]
-
-define i32 @compare(i8* %a, i8* %b) nounwind {
- %tmp = bitcast i8* %a to i32* ; <i32*> [#uses=1]
- %tmp1 = bitcast i8* %b to i32* ; <i32*> [#uses=1]
- %tmp.upgrd.1 = load i32* @size20 ; <i32> [#uses=1]
- %tmp.upgrd.2 = load i8** @in5 ; <i8*> [#uses=2]
- %tmp3 = load i32* %tmp1 ; <i32> [#uses=1]
- %gep.upgrd.3 = zext i32 %tmp3 to i64 ; <i64> [#uses=1]
- %tmp4 = getelementptr i8* %tmp.upgrd.2, i64 %gep.upgrd.3 ; <i8*> [#uses=2]
- %tmp7 = load i32* %tmp ; <i32> [#uses=1]
- %gep.upgrd.4 = zext i32 %tmp7 to i64 ; <i64> [#uses=1]
- %tmp8 = getelementptr i8* %tmp.upgrd.2, i64 %gep.upgrd.4 ; <i8*> [#uses=2]
- %tmp.upgrd.5 = tail call i32 @memcmp( i8* %tmp8, i8* %tmp4, i32 %tmp.upgrd.1 ) ; <i32> [#uses=1]
- ret i32 %tmp.upgrd.5
-}
-
-declare i32 @memcmp(i8*, i8*, i32)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-02-InstrSched2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-05-02-InstrSched2.ll
deleted file mode 100644
index 23954d7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-02-InstrSched2.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -stats |& \
-; RUN: grep asm-printer | grep 13
-
-define void @_ZN9__gnu_cxx9hashtableISt4pairIKPKciES3_NS_4hashIS3_EESt10_Select1stIS5_E5eqstrSaIiEE14find_or_insertERKS5__cond_true456.i(i8* %tmp435.i, i32* %tmp449.i.out) nounwind {
-newFuncRoot:
- br label %cond_true456.i
-bb459.i.exitStub: ; preds = %cond_true456.i
- store i32 %tmp449.i, i32* %tmp449.i.out
- ret void
-cond_true456.i: ; preds = %cond_true456.i, %newFuncRoot
- %__s441.2.4.i = phi i8* [ %tmp451.i.upgrd.1, %cond_true456.i ], [ %tmp435.i, %newFuncRoot ] ; <i8*> [#uses=2]
- %__h.2.4.i = phi i32 [ %tmp449.i, %cond_true456.i ], [ 0, %newFuncRoot ] ; <i32> [#uses=1]
- %tmp446.i = mul i32 %__h.2.4.i, 5 ; <i32> [#uses=1]
- %tmp.i = load i8* %__s441.2.4.i ; <i8> [#uses=1]
- %tmp448.i = sext i8 %tmp.i to i32 ; <i32> [#uses=1]
- %tmp449.i = add i32 %tmp448.i, %tmp446.i ; <i32> [#uses=2]
- %tmp450.i = ptrtoint i8* %__s441.2.4.i to i32 ; <i32> [#uses=1]
- %tmp451.i = add i32 %tmp450.i, 1 ; <i32> [#uses=1]
- %tmp451.i.upgrd.1 = inttoptr i32 %tmp451.i to i8* ; <i8*> [#uses=2]
- %tmp45435.i = load i8* %tmp451.i.upgrd.1 ; <i8> [#uses=1]
- %tmp45536.i = icmp eq i8 %tmp45435.i, 0 ; <i1> [#uses=1]
- br i1 %tmp45536.i, label %bb459.i.exitStub, label %cond_true456.i
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll
deleted file mode 100644
index 8421483..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; Coalescing from R32 to a subset R32_. Once another register coalescer bug is
-; fixed, the movb should go away as well.
-
-; RUN: llc < %s -march=x86 -relocation-model=static | \
-; RUN: grep movl
-
- at B = external global i32 ; <i32*> [#uses=2]
- at C = external global i16* ; <i16**> [#uses=2]
-
-define void @test(i32 %A) {
- %A.upgrd.1 = trunc i32 %A to i8 ; <i8> [#uses=1]
- %tmp2 = load i32* @B ; <i32> [#uses=1]
- %tmp3 = and i8 %A.upgrd.1, 16 ; <i8> [#uses=1]
- %shift.upgrd.2 = zext i8 %tmp3 to i32 ; <i32> [#uses=1]
- %tmp4 = shl i32 %tmp2, %shift.upgrd.2 ; <i32> [#uses=1]
- store i32 %tmp4, i32* @B
- %tmp6 = lshr i32 %A, 3 ; <i32> [#uses=1]
- %tmp = load i16** @C ; <i16*> [#uses=1]
- %tmp8 = ptrtoint i16* %tmp to i32 ; <i32> [#uses=1]
- %tmp9 = add i32 %tmp8, %tmp6 ; <i32> [#uses=1]
- %tmp9.upgrd.3 = inttoptr i32 %tmp9 to i16* ; <i16*> [#uses=1]
- store i16* %tmp9.upgrd.3, i16** @C
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-08-InstrSched.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-05-08-InstrSched.ll
deleted file mode 100644
index d58d638..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-08-InstrSched.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=static | not grep {subl.*%esp}
-
- at A = external global i16* ; <i16**> [#uses=1]
- at B = external global i32 ; <i32*> [#uses=1]
- at C = external global i32 ; <i32*> [#uses=2]
-
-define void @test() {
- %tmp = load i16** @A ; <i16*> [#uses=1]
- %tmp1 = getelementptr i16* %tmp, i32 1 ; <i16*> [#uses=1]
- %tmp.upgrd.1 = load i16* %tmp1 ; <i16> [#uses=1]
- %tmp3 = zext i16 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
- %tmp.upgrd.2 = load i32* @B ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp.upgrd.2, 16 ; <i32> [#uses=1]
- %tmp5 = load i32* @C ; <i32> [#uses=1]
- %tmp6 = trunc i32 %tmp4 to i8 ; <i8> [#uses=2]
- %shift.upgrd.3 = zext i8 %tmp6 to i32 ; <i32> [#uses=1]
- %tmp7 = shl i32 %tmp5, %shift.upgrd.3 ; <i32> [#uses=1]
- %tmp9 = xor i8 %tmp6, 16 ; <i8> [#uses=1]
- %shift.upgrd.4 = zext i8 %tmp9 to i32 ; <i32> [#uses=1]
- %tmp11 = lshr i32 %tmp3, %shift.upgrd.4 ; <i32> [#uses=1]
- %tmp12 = or i32 %tmp11, %tmp7 ; <i32> [#uses=1]
- store i32 %tmp12, i32* @C
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-11-InstrSched.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-05-11-InstrSched.ll
deleted file mode 100644
index 56d6aa9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-11-InstrSched.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats -realign-stack=0 |&\
-; RUN: grep {asm-printer} | grep 34
-
-target datalayout = "e-p:32:32"
-define void @foo(i32* %mc, i32* %bp, i32* %ms, i32* %xmb, i32* %mpp, i32* %tpmm, i32* %ip, i32* %tpim, i32* %dpp, i32* %tpdm, i32* %bpi, i32 %M) nounwind {
-entry:
- %tmp9 = icmp slt i32 %M, 5 ; <i1> [#uses=1]
- br i1 %tmp9, label %return, label %cond_true
-
-cond_true: ; preds = %cond_true, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
- %tmp. = shl i32 %indvar, 2 ; <i32> [#uses=1]
- %tmp.10 = add nsw i32 %tmp., 1 ; <i32> [#uses=2]
- %tmp31 = add nsw i32 %tmp.10, -1 ; <i32> [#uses=4]
- %tmp32 = getelementptr i32* %mpp, i32 %tmp31 ; <i32*> [#uses=1]
- %tmp34 = bitcast i32* %tmp32 to <16 x i8>* ; <i8*> [#uses=1]
- %tmp = load <16 x i8>* %tmp34, align 1
- %tmp42 = getelementptr i32* %tpmm, i32 %tmp31 ; <i32*> [#uses=1]
- %tmp42.upgrd.1 = bitcast i32* %tmp42 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp46 = load <4 x i32>* %tmp42.upgrd.1 ; <<4 x i32>> [#uses=1]
- %tmp54 = bitcast <16 x i8> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp55 = add <4 x i32> %tmp54, %tmp46 ; <<4 x i32>> [#uses=2]
- %tmp55.upgrd.2 = bitcast <4 x i32> %tmp55 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp62 = getelementptr i32* %ip, i32 %tmp31 ; <i32*> [#uses=1]
- %tmp65 = bitcast i32* %tmp62 to <16 x i8>* ; <i8*> [#uses=1]
- %tmp66 = load <16 x i8>* %tmp65, align 1
- %tmp73 = getelementptr i32* %tpim, i32 %tmp31 ; <i32*> [#uses=1]
- %tmp73.upgrd.3 = bitcast i32* %tmp73 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp77 = load <4 x i32>* %tmp73.upgrd.3 ; <<4 x i32>> [#uses=1]
- %tmp87 = bitcast <16 x i8> %tmp66 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp88 = add <4 x i32> %tmp87, %tmp77 ; <<4 x i32>> [#uses=2]
- %tmp88.upgrd.4 = bitcast <4 x i32> %tmp88 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp99 = tail call <4 x i32> @llvm.x86.sse2.pcmpgt.d( <4 x i32> %tmp88, <4 x i32> %tmp55 ) ; <<4 x i32>> [#uses=1]
- %tmp99.upgrd.5 = bitcast <4 x i32> %tmp99 to <2 x i64> ; <<2 x i64>> [#uses=2]
- %tmp110 = xor <2 x i64> %tmp99.upgrd.5, < i64 -1, i64 -1 > ; <<2 x i64>> [#uses=1]
- %tmp111 = and <2 x i64> %tmp110, %tmp55.upgrd.2 ; <<2 x i64>> [#uses=1]
- %tmp121 = and <2 x i64> %tmp99.upgrd.5, %tmp88.upgrd.4 ; <<2 x i64>> [#uses=1]
- %tmp131 = or <2 x i64> %tmp121, %tmp111 ; <<2 x i64>> [#uses=1]
- %tmp137 = getelementptr i32* %mc, i32 %tmp.10 ; <i32*> [#uses=1]
- %tmp137.upgrd.7 = bitcast i32* %tmp137 to <2 x i64>* ; <<2 x i64>*> [#uses=1]
- store <2 x i64> %tmp131, <2 x i64>* %tmp137.upgrd.7
- %tmp147 = add nsw i32 %tmp.10, 8 ; <i32> [#uses=1]
- %tmp.upgrd.8 = icmp ne i32 %tmp147, %M ; <i1> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %tmp.upgrd.8, label %cond_true, label %return
-
-return: ; preds = %cond_true, %entry
- ret void
-}
-
-declare <4 x i32> @llvm.x86.sse2.pcmpgt.d(<4 x i32>, <4 x i32>)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-17-VectorArg.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-05-17-VectorArg.ll
deleted file mode 100644
index b36d61e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-17-VectorArg.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define <4 x float> @opRSQ(<4 x float> %a) nounwind {
-entry:
- %tmp2 = extractelement <4 x float> %a, i32 3 ; <float> [#uses=2]
- %abscond = fcmp oge float %tmp2, -0.000000e+00 ; <i1> [#uses=1]
- %abs = select i1 %abscond, float %tmp2, float 0.000000e+00 ; <float> [#uses=1]
- %tmp3 = tail call float @llvm.sqrt.f32( float %abs ) ; <float> [#uses=1]
- %tmp4 = fdiv float 1.000000e+00, %tmp3 ; <float> [#uses=1]
- %tmp11 = insertelement <4 x float> zeroinitializer, float %tmp4, i32 3 ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp11
-}
-
-declare float @llvm.sqrt.f32(float)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
deleted file mode 100644
index 083d068..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 | grep setnp
-; RUN: llc < %s -march=x86 -enable-unsafe-fp-math | \
-; RUN: not grep setnp
-
-define i32 @test(float %f) {
- %tmp = fcmp oeq float %f, 0.000000e+00 ; <i1> [#uses=1]
- %tmp.upgrd.1 = zext i1 %tmp to i32 ; <i32> [#uses=1]
- ret i32 %tmp.upgrd.1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-25-CycleInDAG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-05-25-CycleInDAG.ll
deleted file mode 100644
index 0288278..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-05-25-CycleInDAG.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i32 @test() {
- br i1 false, label %cond_next33, label %cond_true12
-cond_true12: ; preds = %0
- ret i32 0
-cond_next33: ; preds = %0
- %tmp44.i = call double @foo( double 0.000000e+00, i32 32 ) ; <double> [#uses=1]
- %tmp61.i = load i8* null ; <i8> [#uses=1]
- %tmp61.i.upgrd.1 = zext i8 %tmp61.i to i32 ; <i32> [#uses=1]
- %tmp58.i = or i32 0, %tmp61.i.upgrd.1 ; <i32> [#uses=1]
- %tmp62.i = or i32 %tmp58.i, 0 ; <i32> [#uses=1]
- %tmp62.i.upgrd.2 = sitofp i32 %tmp62.i to double ; <double> [#uses=1]
- %tmp64.i = fadd double %tmp62.i.upgrd.2, %tmp44.i ; <double> [#uses=1]
- %tmp68.i = call double @foo( double %tmp64.i, i32 0 ) ; <double> [#uses=0]
- ret i32 0
-}
-
-declare double @foo(double, i32)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-10-InlineAsmAConstraint.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-07-10-InlineAsmAConstraint.ll
deleted file mode 100644
index 4ea364d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-10-InlineAsmAConstraint.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR825
-
-define i64 @test() {
- %tmp.i5 = call i64 asm sideeffect "rdtsc", "=A,~{dirflag},~{fpsr},~{flags}"( ) ; <i64> [#uses=1]
- ret i64 %tmp.i5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-12-InlineAsmQConstraint.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-07-12-InlineAsmQConstraint.ll
deleted file mode 100644
index 568fbbc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-12-InlineAsmQConstraint.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR828
-
-target datalayout = "e-p:32:32"
-target triple = "i686-pc-linux-gnu"
-
-define void @_ZN5() {
-cond_true9:
- %tmp3.i.i = call i32 asm sideeffect "lock; cmpxchg $1,$2", "={ax},q,m,0,~{dirflag},~{fpsr},~{flags},~{memory}"( i32 0, i32* null, i32 0 ) ; <i32> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-19-ATTAsm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-07-19-ATTAsm.ll
deleted file mode 100644
index c8fd10f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-19-ATTAsm.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=att
-; PR834
-; END.
-
-target datalayout = "e-p:32:32"
-target triple = "i386-unknown-freebsd6.1"
- %llvm.dbg.anchor.type = type { i32, i32 }
- %llvm.dbg.basictype.type = type { i32, { }*, i8*, { }*, i32, i64, i64, i64, i32, i32 }
- %llvm.dbg.compile_unit.type = type { i32, { }*, i32, i8*, i8*, i8* }
- %llvm.dbg.global_variable.type = type { i32, { }*, { }*, i8*, i8 *, i8*, { }*, i32, { }*, i1, i1, { }* }
- at x = global i32 0 ; <i32*> [#uses=1]
- at llvm.dbg.global_variable = internal constant %llvm.dbg.global_variable.type {
- i32 327732,
- { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.global_variables to { }*),
- { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*),
- i8* getelementptr ([2 x i8]* @str, i64 0, i64 0),
- i8* getelementptr ([2 x i8]* @str, i64 0, i64 0),
- i8* null,
- { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*),
- i32 1,
- { }* bitcast (%llvm.dbg.basictype.type* @llvm.dbg.basictype to { }*),
- i1 false,
- i1 true,
- { }* bitcast (i32* @x to { }*) }, section "llvm.metadata" ; <%llvm.dbg.global_variable.type*> [#uses=0]
- at llvm.dbg.global_variables = linkonce constant %llvm.dbg.anchor.type { i32 327680, i32 52 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1]
- at llvm.dbg.compile_unit = internal constant %llvm.dbg.compile_unit.type {
- i32 327697,
- { }* bitcast (%llvm.dbg.anchor.type* @llvm.dbg.compile_units to { }*),
- i32 4,
- i8* getelementptr ([10 x i8]* @str1, i64 0, i64 0),
- i8* getelementptr ([32 x i8]* @str2, i64 0, i64 0),
- i8* getelementptr ([45 x i8]* @str3, i64 0, i64 0) }, section "llvm.metadata" ; <%llvm.dbg.compile_unit.type*> [#uses=1]
- at llvm.dbg.compile_units = linkonce constant %llvm.dbg.anchor.type { i32 327680, i32 17 }, section "llvm.metadata" ; <%llvm.dbg.anchor.type*> [#uses=1]
- at str1 = internal constant [10 x i8] c"testb.cpp\00", section "llvm.metadata" ; <[10 x i8]*> [#uses=1]
- at str2 = internal constant [32 x i8] c"/Sources/Projects/DwarfTesting/\00", section "llvm.metadata" ; <[32 x i8]*> [#uses=1]
- at str3 = internal constant [45 x i8] c"4.0.1 LLVM (Apple Computer, Inc. build 5400)\00", section "llvm.metadata" ; <[45 x i8]*> [#uses=1]
- at str = internal constant [2 x i8] c"x\00", section "llvm.metadata" ; <[2 x i8]*> [#uses=1]
- at llvm.dbg.basictype = internal constant %llvm.dbg.basictype.type {
- i32 327716,
- { }* bitcast (%llvm.dbg.compile_unit.type* @llvm.dbg.compile_unit to { }*),
- i8* getelementptr ([4 x i8]* @str4, i64 0, i64 0),
- { }* null,
- i32 0,
- i64 32,
- i64 32,
- i64 0,
- i32 0,
- i32 5 }, section "llvm.metadata" ; <%llvm.dbg.basictype.type*> [#uses=1]
- at str4 = internal constant [4 x i8] c"int\00", section "llvm.metadata" ; <[4 x i8]*> [#uses=1]
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll
deleted file mode 100644
index cac47cd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR833
-
- at G = weak global i32 0 ; <i32*> [#uses=3]
-
-define i32 @foo(i32 %X) {
-entry:
- %X_addr = alloca i32 ; <i32*> [#uses=3]
- store i32 %X, i32* %X_addr
- call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,m,1,~{dirflag},~{fpsr},~{flags}"( i32* @G, i32* %X_addr, i32* @G, i32 %X )
- %tmp1 = load i32* %X_addr ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @foo2(i32 %X) {
-entry:
- %X_addr = alloca i32 ; <i32*> [#uses=3]
- store i32 %X, i32* %X_addr
- call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,1,~{dirflag},~{fpsr},~{flags}"( i32* @G, i32* %X_addr, i32 %X )
- %tmp1 = load i32* %X_addr ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-28-AsmPrint-Long-As-Pointer.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-07-28-AsmPrint-Long-As-Pointer.ll
deleted file mode 100644
index deae086..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-28-AsmPrint-Long-As-Pointer.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=x86 | grep -- 4294967240
-; PR853
-
- at X = global i32* inttoptr (i64 -56 to i32*) ; <i32**> [#uses=0]
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-31-SingleRegClass.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
deleted file mode 100644
index 3159cec..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-07-31-SingleRegClass.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; PR850
-; RUN: llc < %s -march=x86 -x86-asm-syntax=att > %t
-; RUN: grep {movl 4(%eax),%ebp} %t
-; RUN: grep {movl 0(%eax), %ebx} %t
-
-define i32 @foo(i32 %__s.i.i, i32 %tmp5.i.i, i32 %tmp6.i.i, i32 %tmp7.i.i, i32 %tmp8.i.i) {
- %tmp9.i.i = call i32 asm sideeffect "push %ebp\0Apush %ebx\0Amovl 4($2),%ebp\0Amovl 0($2), %ebx\0Amovl $1,%eax\0Aint $$0x80\0Apop %ebx\0Apop %ebp", "={ax},i,0,{cx},{dx},{si},{di}"( i32 192, i32 %__s.i.i, i32 %tmp5.i.i, i32 %tmp6.i.i, i32 %tmp7.i.i, i32 %tmp8.i.i ) ; <i32> [#uses=1]
- ret i32 %tmp9.i.i
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-08-07-CycleInDAG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-08-07-CycleInDAG.ll
deleted file mode 100644
index aea707e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-08-07-CycleInDAG.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
- %struct.foo = type opaque
-
-define fastcc i32 @test(%struct.foo* %v, %struct.foo* %vi) {
- br i1 false, label %ilog2.exit, label %cond_true.i
-
-cond_true.i: ; preds = %0
- ret i32 0
-
-ilog2.exit: ; preds = %0
- %tmp24.i = load i32* null ; <i32> [#uses=1]
- %tmp13.i12.i = tail call double @ldexp( double 0.000000e+00, i32 0 ) ; <double> [#uses=1]
- %tmp13.i13.i = fptrunc double %tmp13.i12.i to float ; <float> [#uses=1]
- %tmp11.s = load i32* null ; <i32> [#uses=1]
- %tmp11.i = bitcast i32 %tmp11.s to i32 ; <i32> [#uses=1]
- %n.i = bitcast i32 %tmp24.i to i32 ; <i32> [#uses=1]
- %tmp13.i7 = mul i32 %tmp11.i, %n.i ; <i32> [#uses=1]
- %tmp.i8 = tail call i8* @calloc( i32 %tmp13.i7, i32 4 ) ; <i8*> [#uses=0]
- br i1 false, label %bb224.preheader.i, label %bb.i
-
-bb.i: ; preds = %ilog2.exit
- ret i32 0
-
-bb224.preheader.i: ; preds = %ilog2.exit
- %tmp165.i = fpext float %tmp13.i13.i to double ; <double> [#uses=0]
- ret i32 0
-}
-
-declare i8* @calloc(i32, i32)
-
-declare double @ldexp(double, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-08-16-CycleInDAG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-08-16-CycleInDAG.ll
deleted file mode 100644
index 5fee326..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-08-16-CycleInDAG.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86
- %struct.expr = type { %struct.rtx_def*, i32, %struct.expr*, %struct.occr*, %struct.occr*, %struct.rtx_def* }
- %struct.hash_table = type { %struct.expr**, i32, i32, i32 }
- %struct.occr = type { %struct.occr*, %struct.rtx_def*, i8, i8 }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.u = type { [1 x i64] }
-
-define void @test() {
- %tmp = load i32* null ; <i32> [#uses=1]
- %tmp8 = call i32 @hash_rtx( ) ; <i32> [#uses=1]
- %tmp11 = urem i32 %tmp8, %tmp ; <i32> [#uses=1]
- br i1 false, label %cond_next, label %return
-
-cond_next: ; preds = %0
- %gep.upgrd.1 = zext i32 %tmp11 to i64 ; <i64> [#uses=1]
- %tmp17 = getelementptr %struct.expr** null, i64 %gep.upgrd.1 ; <%struct.expr**> [#uses=0]
- ret void
-
-return: ; preds = %0
- ret void
-}
-
-declare i32 @hash_rtx()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll
deleted file mode 100644
index a19d8f7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-08-21-ExtraMovInst.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=i386 | \
-; RUN: not grep {movl %eax, %edx}
-
-define i32 @foo(i32 %t, i32 %C) {
-entry:
- br label %cond_true
-
-cond_true: ; preds = %cond_true, %entry
- %t_addr.0.0 = phi i32 [ %t, %entry ], [ %tmp7, %cond_true ] ; <i32> [#uses=2]
- %tmp7 = add i32 %t_addr.0.0, 1 ; <i32> [#uses=1]
- %tmp = icmp sgt i32 %C, 39 ; <i1> [#uses=1]
- br i1 %tmp, label %bb12, label %cond_true
-
-bb12: ; preds = %cond_true
- ret i32 %t_addr.0.0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-09-01-CycleInDAG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-09-01-CycleInDAG.ll
deleted file mode 100644
index 1e890bb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-09-01-CycleInDAG.ll
+++ /dev/null
@@ -1,131 +0,0 @@
-; RUN: llc < %s -march=x86
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin8"
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.VEC_edge = type { i32, i32, [1 x %struct.edge_def*] }
- %struct.VEC_tree = type { i32, i32, [1 x %struct.tree_node*] }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
- %struct._var_map = type { %struct.partition_def*, i32*, i32*, %struct.tree_node**, i32, i32, i32* }
- %struct.basic_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.tree_node*, %struct.VEC_edge*, %struct.VEC_edge*, %struct.bitmap_head_def*, %struct.bitmap_head_def*, i8*, %struct.loop*, [2 x %struct.et_node*], %struct.basic_block_def*, %struct.basic_block_def*, %struct.reorder_block_def*, %struct.bb_ann_d*, i64, i32, i32, i32, i32 }
- %struct.bb_ann_d = type { %struct.tree_node*, i8, %struct.edge_prediction* }
- %struct.bitmap_element_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, [4 x i32] }
- %struct.bitmap_head_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, %struct.bitmap_obstack* }
- %struct.bitmap_iterator = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, i32 }
- %struct.bitmap_obstack = type { %struct.bitmap_element_def*, %struct.bitmap_head_def*, %struct.obstack }
- %struct.block_stmt_iterator = type { %struct.tree_stmt_iterator, %struct.basic_block_def* }
- %struct.coalesce_list_d = type { %struct._var_map*, %struct.partition_pair_d**, i1 }
- %struct.conflict_graph_def = type opaque
- %struct.dataflow_d = type { %struct.varray_head_tag*, [2 x %struct.tree_node*] }
- %struct.def_operand_ptr = type { %struct.tree_node** }
- %struct.def_optype_d = type { i32, [1 x %struct.def_operand_ptr] }
- %struct.die_struct = type opaque
- %struct.edge_def = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.edge_def_insns, i8*, %struct.__sbuf*, i32, i32, i64, i32 }
- %struct.edge_def_insns = type { %struct.rtx_def* }
- %struct.edge_iterator = type { i32, %struct.VEC_edge** }
- %struct.edge_prediction = type { %struct.edge_prediction*, %struct.edge_def*, i32, i32 }
- %struct.eh_status = type opaque
- %struct.elt_list = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.__sbuf, i32, i8*, %struct.rtx_def** }
- %struct.et_node = type opaque
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i1, i1, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.__sbuf, %struct.varray_head_tag*, %struct.tree_node*, i8, i8, i8 }
- %struct.ht_identifier = type { i8*, i32, i32 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type opaque
- %struct.lang_type = type opaque
- %struct.language_function = type opaque
- %struct.location_t = type { i8*, i32 }
- %struct.loop = type opaque
- %struct.machine_function = type { i32, i32, i8*, i32, i32 }
- %struct.obstack = type { i32, %struct._obstack_chunk*, i8*, i8*, i8*, i32, i32, %struct._obstack_chunk* (i8*, i32)*, void (i8*, %struct._obstack_chunk*)*, i8*, i8 }
- %struct.partition_def = type { i32, [1 x %struct.partition_elem] }
- %struct.partition_elem = type { i32, %struct.partition_elem*, i32 }
- %struct.partition_pair_d = type { i32, i32, i32, %struct.partition_pair_d* }
- %struct.phi_arg_d = type { %struct.tree_node*, i1 }
- %struct.pointer_set_t = type opaque
- %struct.ptr_info_def = type { i8, %struct.bitmap_head_def*, %struct.tree_node* }
- %struct.real_value = type opaque
- %struct.reg_info_def = type opaque
- %struct.reorder_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.basic_block_def*, %struct.basic_block_def*, %struct.basic_block_def*, i32, i32, i32 }
- %struct.rtvec_def = type opaque
- %struct.rtx_def = type opaque
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.simple_bitmap_def = type { i32, i32, i32, [1 x i64] }
- %struct.ssa_op_iter = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.stmt_operands_d*, i1 }
- %struct.stmt_ann_d = type { %struct.tree_ann_common_d, i8, %struct.basic_block_def*, %struct.stmt_operands_d, %struct.dataflow_d*, %struct.bitmap_head_def*, i32 }
- %struct.stmt_operands_d = type { %struct.def_optype_d*, %struct.def_optype_d*, %struct.v_may_def_optype_d*, %struct.vuse_optype_d*, %struct.v_may_def_optype_d* }
- %struct.temp_slot = type opaque
- %struct.tree_ann_common_d = type { i32, i8*, %struct.tree_node* }
- %struct.tree_ann_d = type { %struct.stmt_ann_d }
- %struct.tree_binfo = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.VEC_tree*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.VEC_tree }
- %struct.tree_block = type { %struct.tree_common, i8, [3 x i8], %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_complex = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_decl = type { %struct.tree_common, %struct.__sbuf, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u1_a = type { i32 }
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_exp = type { %struct.tree_common, %struct.__sbuf*, i32, %struct.tree_node*, [1 x %struct.tree_node*] }
- %struct.tree_identifier = type { %struct.tree_common, %struct.ht_identifier }
- %struct.tree_int_cst = type { %struct.tree_common, %struct.tree_int_cst_lowhi }
- %struct.tree_int_cst_lowhi = type { i64, i64 }
- %struct.tree_list = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_live_info_d = type { %struct._var_map*, %struct.bitmap_head_def*, %struct.bitmap_head_def**, i32, %struct.bitmap_head_def** }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.tree_partition_associator_d = type { %struct.varray_head_tag*, %struct.varray_head_tag*, i32*, i32*, i32, i32, %struct._var_map* }
- %struct.tree_phi_node = type { %struct.tree_common, %struct.tree_node*, i32, i32, i32, %struct.basic_block_def*, %struct.dataflow_d*, [1 x %struct.phi_arg_d] }
- %struct.tree_real_cst = type { %struct.tree_common, %struct.real_value* }
- %struct.tree_ssa_name = type { %struct.tree_common, %struct.tree_node*, i32, %struct.ptr_info_def*, %struct.tree_node*, i8* }
- %struct.tree_statement_list = type { %struct.tree_common, %struct.tree_statement_list_node*, %struct.tree_statement_list_node* }
- %struct.tree_statement_list_node = type { %struct.tree_statement_list_node*, %struct.tree_statement_list_node*, %struct.tree_node* }
- %struct.tree_stmt_iterator = type { %struct.tree_statement_list_node*, %struct.tree_node* }
- %struct.tree_string = type { %struct.tree_common, i32, [1 x i8] }
- %struct.tree_type = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i32, i16, i8, i8, i32, %struct.tree_node*, %struct.tree_node*, %struct.tree_decl_u1_a, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_type* }
- %struct.tree_type_symtab = type { i32 }
- %struct.tree_value_handle = type { %struct.tree_common, %struct.value_set*, i32 }
- %struct.tree_vec = type { %struct.tree_common, i32, [1 x %struct.tree_node*] }
- %struct.tree_vector = type { %struct.tree_common, %struct.tree_node* }
- %struct.use_operand_ptr = type { %struct.tree_node** }
- %struct.use_optype_d = type { i32, [1 x %struct.def_operand_ptr] }
- %struct.v_def_use_operand_type_t = type { %struct.tree_node*, %struct.tree_node* }
- %struct.v_may_def_optype_d = type { i32, [1 x %struct.v_def_use_operand_type_t] }
- %struct.v_must_def_optype_d = type { i32, [1 x %struct.v_def_use_operand_type_t] }
- %struct.value_set = type opaque
- %struct.var_ann_d = type { %struct.tree_ann_common_d, i8, i8, %struct.tree_node*, %struct.varray_head_tag*, i32, i32, i32, %struct.tree_node*, %struct.tree_node* }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.varray_data = type { [1 x i64] }
- %struct.varray_head_tag = type { i32, i32, i32, i8*, %struct.varray_data }
- %struct.vuse_optype_d = type { i32, [1 x %struct.tree_node*] }
- at basic_block_info = external global %struct.varray_head_tag* ; <%struct.varray_head_tag**> [#uses=1]
-
-define void @calculate_live_on_entry_cond_true3632(%struct.varray_head_tag* %stack3023.6, i32* %tmp3629, %struct.VEC_edge*** %tmp3397.out) {
-newFuncRoot:
- br label %cond_true3632
-
-bb3502.exitStub: ; preds = %cond_true3632
- store %struct.VEC_edge** %tmp3397, %struct.VEC_edge*** %tmp3397.out
- ret void
-
-cond_true3632: ; preds = %newFuncRoot
- %tmp3378 = load i32* %tmp3629 ; <i32> [#uses=1]
- %tmp3379 = add i32 %tmp3378, -1 ; <i32> [#uses=1]
- %tmp3381 = getelementptr %struct.varray_head_tag* %stack3023.6, i32 0, i32 4 ; <%struct.varray_data*> [#uses=1]
- %tmp3382 = bitcast %struct.varray_data* %tmp3381 to [1 x i32]* ; <[1 x i32]*> [#uses=1]
- %gep.upgrd.1 = zext i32 %tmp3379 to i64 ; <i64> [#uses=1]
- %tmp3383 = getelementptr [1 x i32]* %tmp3382, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- %tmp3384 = load i32* %tmp3383 ; <i32> [#uses=1]
- %tmp3387 = load i32* %tmp3629 ; <i32> [#uses=1]
- %tmp3388 = add i32 %tmp3387, -1 ; <i32> [#uses=1]
- store i32 %tmp3388, i32* %tmp3629
- %tmp3391 = load %struct.varray_head_tag** @basic_block_info ; <%struct.varray_head_tag*> [#uses=1]
- %tmp3393 = getelementptr %struct.varray_head_tag* %tmp3391, i32 0, i32 4 ; <%struct.varray_data*> [#uses=1]
- %tmp3394 = bitcast %struct.varray_data* %tmp3393 to [1 x %struct.basic_block_def*]* ; <[1 x %struct.basic_block_def*]*> [#uses=1]
- %tmp3395 = getelementptr [1 x %struct.basic_block_def*]* %tmp3394, i32 0, i32 %tmp3384 ; <%struct.basic_block_def**> [#uses=1]
- %tmp3396 = load %struct.basic_block_def** %tmp3395 ; <%struct.basic_block_def*> [#uses=1]
- %tmp3397 = getelementptr %struct.basic_block_def* %tmp3396, i32 0, i32 3 ; <%struct.VEC_edge**> [#uses=1]
- br label %bb3502.exitStub
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-02-BoolRetCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-10-02-BoolRetCrash.ll
deleted file mode 100644
index 795d464..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-02-BoolRetCrash.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s
-; PR933
-
-define fastcc i1 @test() {
- ret i1 true
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll
deleted file mode 100644
index d09d061..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-07-ScalarSSEMiscompile.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse | grep movaps
-; Test that the load is NOT folded into the intrinsic, which would zero the top
-; elts of the loaded vector.
-
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin8.7.2"
-
-define <4 x float> @test(<4 x float> %A, <4 x float>* %B) nounwind {
- %BV = load <4 x float>* %B ; <<4 x float>> [#uses=1]
- %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %A, <4 x float> %BV ) ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp28
-}
-
-declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-09-CycleInDAG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-10-09-CycleInDAG.ll
deleted file mode 100644
index fbb14ee..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-09-CycleInDAG.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define void @_ZN13QFSFileEngine4readEPcx() {
- %tmp201 = load i32* null ; <i32> [#uses=1]
- %tmp201.upgrd.1 = sext i32 %tmp201 to i64 ; <i64> [#uses=1]
- %tmp202 = load i64* null ; <i64> [#uses=1]
- %tmp203 = add i64 %tmp201.upgrd.1, %tmp202 ; <i64> [#uses=1]
- store i64 %tmp203, i64* null
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll
deleted file mode 100644
index b1f0451..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=x86 | grep shrl
-; Bug in FindModifiedNodeSlot cause tmp14 load to become a zextload and shr 31
-; is then optimized away.
- at tree_code_type = external global [0 x i32] ; <[0 x i32]*> [#uses=1]
-
-define void @copy_if_shared_r() {
- %tmp = load i32* null ; <i32> [#uses=1]
- %tmp56 = and i32 %tmp, 255 ; <i32> [#uses=1]
- %gep.upgrd.1 = zext i32 %tmp56 to i64 ; <i64> [#uses=1]
- %tmp8 = getelementptr [0 x i32]* @tree_code_type, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- %tmp9 = load i32* %tmp8 ; <i32> [#uses=1]
- %tmp10 = add i32 %tmp9, -1 ; <i32> [#uses=1]
- %tmp.upgrd.2 = icmp ugt i32 %tmp10, 2 ; <i1> [#uses=1]
- %tmp14 = load i32* null ; <i32> [#uses=1]
- %tmp15 = lshr i32 %tmp14, 31 ; <i32> [#uses=1]
- %tmp15.upgrd.3 = trunc i32 %tmp15 to i8 ; <i8> [#uses=1]
- %tmp16 = icmp ne i8 %tmp15.upgrd.3, 0 ; <i1> [#uses=1]
- br i1 %tmp.upgrd.2, label %cond_false25, label %cond_true
-cond_true: ; preds = %0
- br i1 %tmp16, label %cond_true17, label %cond_false
-cond_true17: ; preds = %cond_true
- ret void
-cond_false: ; preds = %cond_true
- ret void
-cond_false25: ; preds = %0
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-12-CycleInDAG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-10-12-CycleInDAG.ll
deleted file mode 100644
index 3b987ac..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-12-CycleInDAG.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=x86
- %struct.function = type opaque
- %struct.lang_decl = type opaque
- %struct.location_t = type { i8*, i32 }
- %struct.rtx_def = type opaque
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_node = type { %struct.tree_decl }
- %union.tree_ann_d = type opaque
-
-define void @check_format_arg() {
- br i1 false, label %cond_next196, label %bb12.preheader
-
-bb12.preheader: ; preds = %0
- ret void
-
-cond_next196: ; preds = %0
- br i1 false, label %cond_next330, label %cond_true304
-
-cond_true304: ; preds = %cond_next196
- ret void
-
-cond_next330: ; preds = %cond_next196
- br i1 false, label %cond_next472, label %bb441
-
-bb441: ; preds = %cond_next330
- ret void
-
-cond_next472: ; preds = %cond_next330
- %tmp490 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
- %tmp492 = getelementptr %struct.tree_node* %tmp490, i32 0, i32 0, i32 0, i32 3 ; <i8*> [#uses=1]
- %tmp492.upgrd.1 = bitcast i8* %tmp492 to i32* ; <i32*> [#uses=1]
- %tmp493 = load i32* %tmp492.upgrd.1 ; <i32> [#uses=1]
- %tmp495 = trunc i32 %tmp493 to i8 ; <i8> [#uses=1]
- %tmp496 = icmp eq i8 %tmp495, 11 ; <i1> [#uses=1]
- %tmp496.upgrd.2 = zext i1 %tmp496 to i8 ; <i8> [#uses=1]
- store i8 %tmp496.upgrd.2, i8* null
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-13-CycleInDAG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-10-13-CycleInDAG.ll
deleted file mode 100644
index 6ed2e7b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-13-CycleInDAG.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86
- at str = external global [18 x i8] ; <[18 x i8]*> [#uses=1]
-
-define void @test() {
-bb.i:
- %tmp.i660 = load <4 x float>* null ; <<4 x float>> [#uses=1]
- call void (i32, ...)* @printf( i32 0, i8* getelementptr ([18 x i8]* @str, i32 0, i64 0), double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00 )
- %tmp152.i = load <4 x i32>* null ; <<4 x i32>> [#uses=1]
- %tmp156.i = bitcast <4 x i32> %tmp152.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp175.i = bitcast <4 x float> %tmp.i660 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp176.i = xor <4 x i32> %tmp156.i, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %tmp177.i = and <4 x i32> %tmp176.i, %tmp175.i ; <<4 x i32>> [#uses=1]
- %tmp190.i = or <4 x i32> %tmp177.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %tmp191.i = bitcast <4 x i32> %tmp190.i to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp191.i, <4 x float>* null
- ret void
-}
-
-declare void @printf(i32, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-19-SwitchUnnecessaryBranching.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-10-19-SwitchUnnecessaryBranching.ll
deleted file mode 100644
index 88e8b4a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-10-19-SwitchUnnecessaryBranching.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86 -asm-verbose | FileCheck %s
-
- at str = internal constant [14 x i8] c"Hello world!\0A\00" ; <[14 x i8]*> [#uses=1]
- at str.upgrd.1 = internal constant [13 x i8] c"Blah world!\0A\00" ; <[13 x i8]*> [#uses=1]
-
-define i32 @test(i32 %argc, i8** %argv) nounwind {
-entry:
-; CHECK: cmpl $2
-; CHECK-NEXT: je
-; CHECK-NEXT: %entry
-
- switch i32 %argc, label %UnifiedReturnBlock [
- i32 1, label %bb
- i32 2, label %bb2
- ]
-
-bb: ; preds = %entry
- %tmp1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([14 x i8]* @str, i32 0, i64 0) ) ; <i32> [#uses=0]
- ret i32 0
-
-bb2: ; preds = %entry
- %tmp4 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([13 x i8]* @str.upgrd.1, i32 0, i64 0) ) ; <i32> [#uses=0]
- ret i32 0
-
-UnifiedReturnBlock: ; preds = %entry
- ret i32 0
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll
deleted file mode 100644
index 91210ea..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {subl \$4, %esp}
-
-target triple = "i686-pc-linux-gnu"
- at str = internal constant [9 x i8] c"%f+%f*i\0A\00" ; <[9 x i8]*> [#uses=1]
-
-define i32 @main() {
-entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- %tmp = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
- %tmp1 = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
- %tmp2 = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=3]
- %pi = alloca double, align 8 ; <double*> [#uses=2]
- %z = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store double 0x400921FB54442D18, double* %pi
- %tmp.upgrd.1 = load double* %pi ; <double> [#uses=1]
- %real = getelementptr { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
- store double 0.000000e+00, double* %real
- %real3 = getelementptr { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
- store double %tmp.upgrd.1, double* %real3
- %tmp.upgrd.2 = getelementptr { double, double }* %tmp, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp4 = getelementptr { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp5 = load double* %tmp4 ; <double> [#uses=1]
- store double %tmp5, double* %tmp.upgrd.2
- %tmp6 = getelementptr { double, double }* %tmp, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp7 = getelementptr { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp8 = load double* %tmp7 ; <double> [#uses=1]
- store double %tmp8, double* %tmp6
- %tmp.upgrd.3 = bitcast { double, double }* %tmp to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
- %tmp.upgrd.4 = getelementptr { i64, i64 }* %tmp.upgrd.3, i64 0, i32 0 ; <i64*> [#uses=1]
- %tmp.upgrd.5 = load i64* %tmp.upgrd.4 ; <i64> [#uses=1]
- %tmp9 = bitcast { double, double }* %tmp to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
- %tmp10 = getelementptr { i64, i64 }* %tmp9, i64 0, i32 1 ; <i64*> [#uses=1]
- %tmp11 = load i64* %tmp10 ; <i64> [#uses=1]
- call void @cexp( { double, double }* sret %tmp2, i64 %tmp.upgrd.5, i64 %tmp11 )
- %tmp12 = getelementptr { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp13 = getelementptr { double, double }* %tmp2, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp14 = load double* %tmp13 ; <double> [#uses=1]
- store double %tmp14, double* %tmp12
- %tmp15 = getelementptr { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp16 = getelementptr { double, double }* %tmp2, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp17 = load double* %tmp16 ; <double> [#uses=1]
- store double %tmp17, double* %tmp15
- %tmp18 = getelementptr { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp19 = load double* %tmp18 ; <double> [#uses=1]
- %tmp20 = getelementptr { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp21 = load double* %tmp20 ; <double> [#uses=1]
- %tmp.upgrd.6 = getelementptr [9 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
- %tmp.upgrd.7 = call i32 (i8*, ...)* @printf( i8* %tmp.upgrd.6, double %tmp21, double %tmp19 ) ; <i32> [#uses=0]
- br label %return
-return: ; preds = %entry
- %retval.upgrd.8 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval.upgrd.8
-}
-
-declare void @cexp({ double, double }* sret , i64, i64)
-
-declare i32 @printf(i8*, ...)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll
deleted file mode 100644
index e839d72..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep movb %t | count 2
-; RUN: grep {movzb\[wl\]} %t
-
-
-define void @handle_vector_size_attribute() nounwind {
-entry:
- %tmp69 = load i32* null ; <i32> [#uses=1]
- switch i32 %tmp69, label %bb84 [
- i32 2, label %bb77
- i32 1, label %bb77
- ]
-
-bb77: ; preds = %entry, %entry
- %tmp99 = udiv i64 0, 0 ; <i64> [#uses=1]
- %tmp = load i8* null ; <i8> [#uses=1]
- %tmp114 = icmp eq i64 0, 0 ; <i1> [#uses=1]
- br i1 %tmp114, label %cond_true115, label %cond_next136
-
-bb84: ; preds = %entry
- ret void
-
-cond_true115: ; preds = %bb77
- %tmp118 = load i8* null ; <i8> [#uses=1]
- br i1 false, label %cond_next129, label %cond_true120
-
-cond_true120: ; preds = %cond_true115
- %tmp127 = udiv i8 %tmp, %tmp118 ; <i8> [#uses=1]
- %tmp127.upgrd.1 = zext i8 %tmp127 to i64 ; <i64> [#uses=1]
- br label %cond_next129
-
-cond_next129: ; preds = %cond_true120, %cond_true115
- %iftmp.30.0 = phi i64 [ %tmp127.upgrd.1, %cond_true120 ], [ 0, %cond_true115 ] ; <i64> [#uses=1]
- %tmp132 = icmp eq i64 %iftmp.30.0, %tmp99 ; <i1> [#uses=1]
- br i1 %tmp132, label %cond_false148, label %cond_next136
-
-cond_next136: ; preds = %cond_next129, %bb77
- ret void
-
-cond_false148: ; preds = %cond_next129
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-11-27-SelectLegalize.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-11-27-SelectLegalize.ll
deleted file mode 100644
index ea2e6db..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-11-27-SelectLegalize.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep test.*1
-; PR1016
-
-define i32 @test(i32 %A, i32 %B, i32 %C) {
- %a = trunc i32 %A to i1 ; <i1> [#uses=1]
- %D = select i1 %a, i32 %B, i32 %C ; <i32> [#uses=1]
- ret i32 %D
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-11-28-Memcpy.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-11-28-Memcpy.ll
deleted file mode 100644
index 8c1573f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-11-28-Memcpy.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; PR1022, PR1023
-; RUN: llc < %s -march=x86 | grep -- -573785174 | count 2
-; RUN: llc < %s -march=x86 | grep -E {movl _?bytes2} | count 1
-
- at fmt = constant [4 x i8] c"%x\0A\00" ; <[4 x i8]*> [#uses=2]
- at bytes = constant [4 x i8] c"\AA\BB\CC\DD" ; <[4 x i8]*> [#uses=1]
- at bytes2 = global [4 x i8] c"\AA\BB\CC\DD" ; <[4 x i8]*> [#uses=1]
-
-define i32 @test1() nounwind {
- %y = alloca i32 ; <i32*> [#uses=2]
- %c = bitcast i32* %y to i8* ; <i8*> [#uses=1]
- %z = getelementptr [4 x i8]* @bytes, i32 0, i32 0 ; <i8*> [#uses=1]
- call void @llvm.memcpy.i32( i8* %c, i8* %z, i32 4, i32 1 )
- %r = load i32* %y ; <i32> [#uses=1]
- %t = bitcast [4 x i8]* @fmt to i8* ; <i8*> [#uses=1]
- %tmp = call i32 (i8*, ...)* @printf( i8* %t, i32 %r ) ; <i32> [#uses=0]
- ret i32 0
-}
-
-define void @test2() nounwind {
- %y = alloca i32 ; <i32*> [#uses=2]
- %c = bitcast i32* %y to i8* ; <i8*> [#uses=1]
- %z = getelementptr [4 x i8]* @bytes2, i32 0, i32 0 ; <i8*> [#uses=1]
- call void @llvm.memcpy.i32( i8* %c, i8* %z, i32 4, i32 1 )
- %r = load i32* %y ; <i32> [#uses=1]
- %t = bitcast [4 x i8]* @fmt to i8* ; <i8*> [#uses=1]
- %tmp = call i32 (i8*, ...)* @printf( i8* %t, i32 %r ) ; <i32> [#uses=0]
- ret void
-}
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-declare i32 @printf(i8*, ...)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
deleted file mode 100644
index 50a244b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR1049
-target datalayout = "e-p:32:32"
-target triple = "i686-pc-linux-gnu"
- %struct.QBasicAtomic = type { i32 }
- %struct.QByteArray = type { %"struct.QByteArray::Data"* }
- %"struct.QByteArray::Data" = type { %struct.QBasicAtomic, i32, i32, i8*, [1 x i8] }
- %struct.QFactoryLoader = type { %struct.QObject }
- %struct.QImageIOHandler = type { i32 (...)**, %struct.QImageIOHandlerPrivate* }
- %struct.QImageIOHandlerPrivate = type opaque
- %struct.QImageWriter = type { %struct.QImageWriterPrivate* }
- %struct.QImageWriterPrivate = type { %struct.QByteArray, %struct.QFactoryLoader*, i1, %struct.QImageIOHandler*, i32, float, %struct.QString, %struct.QString, i32, %struct.QString, %struct.QImageWriter* }
- %"struct.QList<QByteArray>" = type { %"struct.QList<QByteArray>::._20" }
- %"struct.QList<QByteArray>::._20" = type { %struct.QListData }
- %struct.QListData = type { %"struct.QListData::Data"* }
- %"struct.QListData::Data" = type { %struct.QBasicAtomic, i32, i32, i32, i8, [1 x i8*] }
- %struct.QObject = type { i32 (...)**, %struct.QObjectData* }
- %struct.QObjectData = type { i32 (...)**, %struct.QObject*, %struct.QObject*, %"struct.QList<QByteArray>", i8, [3 x i8], i32, i32 }
- %struct.QString = type { %"struct.QString::Data"* }
- %"struct.QString::Data" = type { %struct.QBasicAtomic, i32, i32, i16*, i8, i8, [1 x i16] }
-
-define i1 @_ZNK12QImageWriter8canWriteEv() {
- %tmp62 = load %struct.QImageWriterPrivate** null ; <%struct.QImageWriterPrivate*> [#uses=1]
- %tmp = getelementptr %struct.QImageWriterPrivate* %tmp62, i32 0, i32 9 ; <%struct.QString*> [#uses=1]
- %tmp75 = call %struct.QString* @_ZN7QStringaSERKS_( %struct.QString* %tmp, %struct.QString* null ) ; <%struct.QString*> [#uses=0]
- call void asm sideeffect "lock\0Adecl $0\0Asetne 1", "=*m"( i32* null )
- ret i1 false
-}
-
-declare %struct.QString* @_ZN7QStringaSERKS_(%struct.QString*, %struct.QString*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2006-12-19-IntelSyntax.ll b/libclamav/c++/llvm/test/CodeGen/X86/2006-12-19-IntelSyntax.ll
deleted file mode 100644
index f81b303..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2006-12-19-IntelSyntax.ll
+++ /dev/null
@@ -1,86 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel
-; PR1061
-target datalayout = "e-p:32:32"
-target triple = "i686-pc-linux-gnu"
-
-define void @bar(i32 %n) {
-entry:
- switch i32 %n, label %bb12 [
- i32 1, label %bb
- i32 2, label %bb6
- i32 4, label %bb7
- i32 5, label %bb8
- i32 6, label %bb10
- i32 7, label %bb1
- i32 8, label %bb3
- i32 9, label %bb4
- i32 10, label %bb9
- i32 11, label %bb2
- i32 12, label %bb5
- i32 13, label %bb11
- ]
-
-bb: ; preds = %entry
- call void (...)* @foo1( )
- ret void
-
-bb1: ; preds = %entry
- call void (...)* @foo2( )
- ret void
-
-bb2: ; preds = %entry
- call void (...)* @foo6( )
- ret void
-
-bb3: ; preds = %entry
- call void (...)* @foo3( )
- ret void
-
-bb4: ; preds = %entry
- call void (...)* @foo4( )
- ret void
-
-bb5: ; preds = %entry
- call void (...)* @foo5( )
- ret void
-
-bb6: ; preds = %entry
- call void (...)* @foo1( )
- ret void
-
-bb7: ; preds = %entry
- call void (...)* @foo2( )
- ret void
-
-bb8: ; preds = %entry
- call void (...)* @foo6( )
- ret void
-
-bb9: ; preds = %entry
- call void (...)* @foo3( )
- ret void
-
-bb10: ; preds = %entry
- call void (...)* @foo4( )
- ret void
-
-bb11: ; preds = %entry
- call void (...)* @foo5( )
- ret void
-
-bb12: ; preds = %entry
- call void (...)* @foo6( )
- ret void
-}
-
-declare void @foo1(...)
-
-declare void @foo2(...)
-
-declare void @foo6(...)
-
-declare void @foo3(...)
-
-declare void @foo4(...)
-
-declare void @foo5(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-01-08-InstrSched.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-01-08-InstrSched.ll
deleted file mode 100644
index 317ed0a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-01-08-InstrSched.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; PR1075
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -O3 | FileCheck %s
-
-define float @foo(float %x) nounwind {
- %tmp1 = fmul float %x, 3.000000e+00
- %tmp3 = fmul float %x, 5.000000e+00
- %tmp5 = fmul float %x, 7.000000e+00
- %tmp7 = fmul float %x, 1.100000e+01
- %tmp10 = fadd float %tmp1, %tmp3
- %tmp12 = fadd float %tmp10, %tmp5
- %tmp14 = fadd float %tmp12, %tmp7
- ret float %tmp14
-
-; CHECK: mulss LCPI1_3(%rip)
-; CHECK-NEXT: mulss LCPI1_0(%rip)
-; CHECK-NEXT: mulss LCPI1_1(%rip)
-; CHECK-NEXT: mulss LCPI1_2(%rip)
-; CHECK-NEXT: addss
-; CHECK-NEXT: addss
-; CHECK-NEXT: addss
-; CHECK-NEXT: ret
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll
deleted file mode 100644
index de226a1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-01-08-X86-64-Pointer.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc %s -o - -march=x86-64 | grep {(%rdi,%rax,8)}
-; RUN: llc %s -o - -march=x86-64 | not grep {addq.*8}
-
-define void @foo(double* %y) nounwind {
-entry:
- br label %bb
-
-bb:
- %i = phi i64 [ 0, %entry ], [ %k, %bb ]
- %j = getelementptr double* %y, i64 %i
- store double 0.000000e+00, double* %j
- %k = add i64 %i, 1
- %n = icmp eq i64 %k, 0
- br i1 %n, label %return, label %bb
-
-return:
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
deleted file mode 100644
index 5e7c0a7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
+++ /dev/null
@@ -1,462 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep leaq %t
-; RUN: not grep {,%rsp)} %t
-; PR1103
-
-target datalayout = "e-p:64:64"
- at i6000 = global [128 x i64] zeroinitializer, align 16
-
-
-define void @foo(i32* %a0, i32* %a1, i32* %a2, i32* %a3, i32* %a4, i32* %a5) {
-b:
- %r = load i32* %a0
- %r2 = load i32* %a1
- %r4 = load i32* %a2
- %r6 = load i32* %a3
- %r8 = load i32* %a4
- %r14 = load i32* %a5
- %rx = sext i32 %r2 to i64
- %r9 = sext i32 %r to i64
- %r11 = add i64 %rx, 0
- %ras = icmp slt i64 %r11, 0
- %r12 = select i1 %ras, i64 0, i64 %r11
- %r16 = sext i32 %r14 to i64
- %r17 = sext i32 %r8 to i64
- %r18 = sub i64 %r16, 0
- %r19 = add i64 %r18, 0
- %r20 = icmp slt i64 %r19, 0
- %r19h = add i64 %r18, 0
- %r22 = select i1 %r20, i64 1, i64 %r19h
- %r23 = mul i64 %r22, 0
- %r23a = trunc i64 %r23 to i32
- %r24 = shl i32 %r23a, 0
- %r25 = add i32 %r24, 0
- %ras2 = alloca i8, i32 %r25, align 16
- %r28 = getelementptr i8* %ras2, i32 0
- %r38 = shl i64 %r12, 0
- %s2013 = add i64 %r38, 0
- %c22012 = getelementptr i8* %ras2, i64 %s2013
- %r42 = shl i64 %r12, 0
- %s2011 = add i64 %r42, 16
- %c22010 = getelementptr i8* %ras2, i64 %s2011
- %r50 = add i64 %r16, 0
- %r51 = icmp slt i64 %r50, 0
- %r50sh = shl i64 %r50, 0
- %r50j = add i64 %r50sh, 0
- %r54 = select i1 %r51, i64 0, i64 %r50j
- %r56 = mul i64 %r54, %r12
- %r28s = add i64 %r56, 16
- %c2 = getelementptr i8* %ras2, i64 %r28s
- %r60 = sub i32 %r2, %r
- %r61 = icmp slt i32 %r60, 0
- br i1 %r61, label %a29b, label %b63
-a29b:
- %r155 = sub i32 %r6, %r4
- %r156 = icmp slt i32 %r155, 0
- br i1 %r156, label %a109b, label %b158
-b63:
- %r66 = sext i32 %r60 to i64
- %r67 = add i64 %r66, 0
- %r76 = mul i64 %r17, 0
- %r82 = add i64 %r76, 0
- %r84 = icmp slt i64 %r67, 0
- br i1 %r84, label %b85, label %a25b
-b85:
- %e641 = phi i64 [ 0, %b63 ], [ %r129, %a25b ]
- %r137 = icmp slt i64 %e641, 0
- br i1 %r137, label %a25b140q, label %a29b
-a25b140q:
- br label %a25b140
-a25b:
- %w1989 = phi i64 [ 0, %b63 ], [ %v1990, %a25b ]
- %e642 = shl i64 %w1989, 0
- %r129 = add i64 %e642, 0
- %r132 = add i64 %e642, 0
- %r134 = icmp slt i64 %r132, 0
- %v1990 = add i64 %w1989, 0
- br i1 %r134, label %b85, label %a25b
-a25b140:
- %w1982 = phi i64 [ 0, %a25b140q ], [ %v1983, %a25b140 ]
- %r145 = add i64 %r82, 0
- %v1983 = add i64 %w1982, 0
- %u1987 = icmp slt i64 %v1983, 0
- br i1 %u1987, label %a29b, label %a25b140
-b158:
- %r161 = sext i32 %r to i64
- %r163 = sext i32 %r4 to i64
- br label %a29b173
-a29b173:
- %w1964 = phi i64 [ 0, %b158 ], [ %v1973, %b1606 ]
- %b1974 = mul i64 %r163, 0
- %b1975 = add i64 %r161, 0
- %b1976 = mul i64 %w1964, 0
- %b1977 = add i64 %b1976, 0
- %s761 = bitcast i64 %b1977 to i64
- %b1980 = mul i64 %w1964, 0
- %s661 = add i64 %b1980, 0
- br i1 %r61, label %a33b, label %b179
-a33b:
- %r328 = icmp slt i32 %r14, 0
- %r335 = or i1 %r328, %r61
- br i1 %r335, label %a50b, label %b341
-b179:
- %r182 = sext i32 %r60 to i64
- %r183 = add i64 %r182, 0
- %r187 = icmp slt i64 %r183, 0
- br i1 %r187, label %b188, label %a30b
-b188:
- %e653 = phi i64 [ 0, %b179 ], [ %r283, %a30b ]
- %r291 = icmp slt i64 %e653, 0
- br i1 %r291, label %a30b294q, label %a33b
-a30b294q:
- br label %a30b294
-a30b:
- %w = phi i64 [ 0, %b179 ], [ %v, %a30b ]
- %b2 = shl i64 %w, 0
- %r283 = add i64 %b2, 0
- %r286 = add i64 %b2, 0
- %r288 = icmp slt i64 %r286, 0
- %v = add i64 %w, 0
- br i1 %r288, label %b188, label %a30b
-a30b294:
- %w1847 = phi i64 [ 0, %a30b294q ], [ %v1848, %a30b294 ]
- %v1848 = add i64 %w1847, 0
- %u = icmp slt i64 %v1848, 0
- br i1 %u, label %a33b, label %a30b294
-a50b:
- %r814 = add i32 %r14, 0
- %r815 = icmp slt i32 %r814, 0
- %r817 = or i1 %r61, %r815
- br i1 %r817, label %a57b, label %b820
-b341:
- %w1874 = phi i64 [ 0, %a33b ], [ %v1880, %b463 ]
- %d753 = bitcast i64 %w1874 to i64
- %r343 = add i64 %s661, 0
- %r346 = add i64 %r343, 0
- %r347 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r346
- %r348 = load float* %r347
- %r352 = add i64 %r343, 0
- %r353 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r352
- %r354 = load float* %r353
- %r362 = load float* bitcast ([128 x i64]* @i6000 to float*)
- %r363 = fadd float 0.000000e+00, %r362
- %r370 = load float* bitcast ([128 x i64]* @i6000 to float*)
- %r376 = icmp slt i64 %r16, 0
- br i1 %r376, label %b377, label %a35b
-b377:
- %d753p = phi i64 [ %d753, %b341 ], [ %r411, %a35b ]
- %s761p = phi i64 [ %s761, %b341 ], [ 322, %a35b ]
- %e784 = phi i64 [ 0, %b341 ], [ %r454, %a35b ]
- %s794 = add i64 %d753p, 0
- %r462 = icmp slt i64 %e784, 0
- br i1 %r462, label %a35b465, label %b463
-a35b:
- %w1865 = phi i64 [ 0, %b341 ], [ %v1866, %a35b ]
- %e785 = shl i64 %w1865, 0
- %b1877 = mul i64 %w1865, 0
- %s795 = add i64 %b1877, 0
- %r399 = fadd float %r354, 0.000000e+00
- %r402 = fadd float %r370, 0.000000e+00
- %r403 = fadd float %r348, 0.000000e+00
- %r411 = add i64 %s795, 0
- %r431 = fadd float %r362, 0.000000e+00
- %r454 = add i64 %e785, 0
- %r457 = add i64 %e785, 0
- %r459 = icmp slt i64 %r457, 0
- %v1866 = add i64 %w1865, 0
- br i1 %r459, label %b377, label %a35b
-b463:
- %r506 = add i64 %d753, 0
- %r511 = sext i32 %r60 to i64
- %r512 = add i64 %r511, 0
- %r513 = icmp slt i64 %r506, 0
- %v1880 = add i64 %w1874, 0
- br i1 %r513, label %b341, label %b514
-a35b465:
- %r469 = add i64 %s794, 0
- br label %b463
-b514:
- %r525 = mul i64 %r17, 0
- %r533 = add i64 %r525, 0
- br label %b535
-b535:
- %w1855 = phi i64 [ 0, %b514 ], [ %v1856, %b712 ]
- %s923 = phi i64 [ 0, %b514 ], [ %r799, %b712 ]
- %s933 = phi i64 [ %r533, %b514 ], [ %r795, %b712 ]
- %r538 = add i64 %w1855, 0
- %r539 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r538
- %r540 = load float* %r539
- %r551 = load float* bitcast ([128 x i64]* @i6000 to float*)
- %r562 = sub i64 %s933, 0
- %r564 = icmp slt i64 %r512, 0
- br i1 %r564, label %b565, label %a45b
-b565:
- %e944 = phi i64 [ 0, %b535 ], [ %r703, %a45b ]
- %r711 = icmp slt i64 %e944, 0
- br i1 %r711, label %a45b714, label %b712
-a45b:
- %w1852 = phi i64 [ 0, %b535 ], [ %v1853, %a45b ]
- %e945 = shl i64 %w1852, 0
- %r609 = add i64 %r562, 0
- %r703 = add i64 %e945, 0
- %r706 = add i64 %e945, 0
- %r708 = icmp slt i64 %r706, 0
- %v1853 = add i64 %w1852, 0
- br i1 %r708, label %b565, label %a45b
-b712:
- %r795 = add i64 %rx, 0
- %r799 = add i64 %s923, 0
- %r802 = add i64 %w1855, 0
- %r807 = icmp slt i64 %r802, 0
- %v1856 = add i64 %w1855, 0
- br i1 %r807, label %b535, label %a50b
-a45b714:
- %r717 = add i64 %e944, 0
- %r720 = add i64 %r717, 0
- %r721 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r720
- %r722 = load float* %r721
- %r726 = add i64 %r717, 0
- %r727 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r726
- %r728 = load float* %r727
- %r732 = add i64 %r717, 0
- %r733 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r732
- %r734 = load float* %r733
- %r738 = add i64 %r717, 0
- %r739 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r738
- %r740 = load float* %r739
- %r744 = add i64 %r717, 0
- %r745 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r744
- %r746 = load float* %r745
- %r750 = add i64 %r717, 0
- %r751 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r750
- %r752 = load float* %r751
- %r753 = fadd float %r752, %r746
- %r754 = fadd float %r728, %r722
- %r755 = fadd float %r734, %r754
- %r756 = fadd float %r755, %r740
- %r757 = fadd float %r753, %r756
- %r759 = fadd float %r757, %r540
- %r770 = add i64 %r717, 0
- %r771 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r770
- %r772 = load float* %r771
- %r776 = add i64 %r717, 0
- %r777 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r776
- %r778 = load float* %r777
- %r781 = fadd float %r363, %r772
- %r782 = fadd float %r781, %r778
- %r783 = fadd float %r551, %r782
- br label %b712
-a57b:
- br i1 %r335, label %a66b, label %b1086
-b820:
- %r823 = sext i32 %r2 to i64
- %r834 = sext i32 %r8 to i64
- %r844 = add i64 %r16, 0
- %r846 = sext i32 %r60 to i64
- %r847 = add i64 %r846, 0
- %r851 = load float* bitcast ([128 x i64]* @i6000 to float*)
- %r856 = sub i64 %rx, 0
- br label %b858
-b858:
- %w1891 = phi i64 [ 0, %b820 ], [ %v1892, %b1016 ]
- %s1193 = phi i64 [ 0, %b820 ], [ %r1068, %b1016 ]
- %b1894 = mul i64 %r834, 0
- %b1896 = shl i64 %r823, 0
- %b1902 = mul i64 %w1891, 0
- %s1173 = add i64 %b1902, 0
- %r859 = add i64 %r856, 0
- %r862 = add i64 %w1891, 0
- %r863 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r862
- %r864 = load float* %r863
- %r868 = add i64 %w1891, 0
- %r869 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r868
- %r870 = load float* %r869
- %r873 = sub i64 %r859, 0
- %r876 = sub i64 %s1173, 0
- %r878 = icmp slt i64 %r847, 0
- br i1 %r878, label %b879, label %a53b
-b879:
- %e1204 = phi i64 [ 0, %b858 ], [ %r1007, %a53b ]
- %r1015 = icmp slt i64 %e1204, 0
- br i1 %r1015, label %a53b1019q, label %b1016
-a53b1019q:
- %b1888 = sub i64 %r846, 0
- %b1889 = add i64 %b1888, 0
- br label %a53b1019
-a53b:
- %w1881 = phi i64 [ 0, %b858 ], [ %v1882, %a53b ]
- %e1205 = shl i64 %w1881, 0
- %r1007 = add i64 %e1205, 0
- %r1010 = add i64 %e1205, 0
- %r1012 = icmp slt i64 %r1010, 0
- %v1882 = add i64 %w1881, 0
- br i1 %r1012, label %b879, label %a53b
-b1016:
- %r1068 = add i64 %s1193, 0
- %r1071 = add i64 %w1891, 0
- %r1073 = icmp slt i64 %r1071, %r844
- %v1892 = add i64 %w1891, 0
- br i1 %r1073, label %b858, label %a57b
-a53b1019:
- %w1885 = phi i64 [ 0, %a53b1019q ], [ %v1886, %a53b1019 ]
- %r1022 = add i64 %r876, 0
- %r1024 = bitcast i8* %c2 to float*
- %r1025 = add i64 %r1022, 0
- %r1026 = getelementptr float* %r1024, i64 %r1025
- %r1027 = load float* %r1026
- %r1032 = add i64 %r873, 0
- %r1033 = add i64 %r1032, 0
- %r1034 = getelementptr float* %r1024, i64 %r1033
- %r1035 = load float* %r1034
- %r1037 = bitcast i8* %c22010 to float*
- %r1040 = getelementptr float* %r1037, i64 %r1025
- %r1044 = fadd float %r864, %r1035
- %r1046 = fadd float %r870, %r1027
- %r1047 = fadd float %r1044, %r1046
- %r1048 = fadd float %r851, %r1047
- %v1886 = add i64 %w1885, 0
- %u1890 = icmp slt i64 %v1886, %b1889
- br i1 %u1890, label %b1016, label %a53b1019
-a66b:
- br i1 %r817, label %a93b, label %b1321
-b1086:
- %r1089 = sext i32 %r2 to i64
- %r1090 = add i64 %rx, 0
- %r1096 = mul i64 %r9, 0
- %r1101 = sext i32 %r8 to i64
- %r1104 = add i64 %r1096, 0
- %r1108 = sub i64 %r1104, 0
- %r1110 = sext i32 %r60 to i64
- %r1111 = add i64 %r1110, 0
- %r1113 = sext i32 %r14 to i64
- %r1114 = add i64 %r16, 0
- br label %b1117
-b1117:
- %w1915 = phi i64 [ 0, %b1086 ], [ %v1957, %b1263 ]
- %d1353 = bitcast i64 %w1915 to i64
- %r1120 = add i64 %s661, 0
- %r1121 = add i64 %r1120, 0
- %r1122 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1121
- %r1123 = load float* %r1122
- %r1132 = bitcast i8* %c22012 to float*
- %r1134 = getelementptr float* %r1132, i64 %w1915
- %r1135 = load float* %r1134
- %r1136 = fadd float %r1123, %r1135
- %r1138 = icmp slt i64 %r1114, 0
- br i1 %r1138, label %b1139, label %a63b
-b1139:
- %e1364 = phi i64 [ 0, %b1117 ], [ %r1254, %a63b ]
- %p1998 = phi i64 [ %s761, %b1117 ], [ %r1216, %a63b ]
- %r1108p = phi i64 [ %r1108, %b1117 ], [ %r1219, %a63b ]
- %p2004 = phi i64 [ %d1353, %b1117 ], [ %r1090, %a63b ]
- %s1374 = phi i64 [ 0, %b1117 ], [ %r1251, %a63b ]
- %s1384 = add i64 %r1108p, 0
- %s1394 = add i64 %p1998, 0
- %r1262 = icmp slt i64 %e1364, %r1114
- br i1 %r1262, label %a63b1266q, label %b1263
-a63b1266q:
- %b1947 = sub i64 %r1113, 0
- %b1948 = add i64 %b1947, 0
- br label %a63b1266
-a63b:
- %w1904 = phi i64 [ 0, %b1117 ], [ %v1905, %a63b ]
- %s1375 = phi i64 [ 0, %b1117 ], [ %r1251, %a63b ]
- %b1906 = add i64 %r1089, 0
- %b1907 = mul i64 %r1101, 0
- %b1929 = mul i64 %w1904, 0
- %s1395 = add i64 %b1929, 0
- %e1365 = shl i64 %w1904, 0
- %r1163 = add i64 %r1090, 0
- %r1167 = add i64 %s1375, 0
- %r1191 = add i64 %r1163, 0
- %r1195 = add i64 %r1167, 0
- %r1216 = add i64 %s1395, 0
- %r1219 = add i64 %r1191, 0
- %r1223 = add i64 %r1195, 0
- %r1251 = add i64 %r1223, 0
- %r1254 = add i64 %e1365, 0
- %r1257 = add i64 %e1365, 0
- %r1259 = icmp slt i64 %r1257, %r1114
- %v1905 = add i64 %w1904, 0
- br i1 %r1259, label %b1139, label %a63b
-b1263:
- %r1306 = add i64 %d1353, 0
- %r1308 = icmp slt i64 %r1306, %r1111
- %v1957 = add i64 %w1915, 0
- br i1 %r1308, label %b1117, label %a66b
-a63b1266:
- %w1944 = phi i64 [ 0, %a63b1266q ], [ %v1945, %a63b1266 ]
- %s1377 = phi i64 [ %s1374, %a63b1266q ], [ %r1297, %a63b1266 ]
- %r1282 = fadd float %r1136, 0.000000e+00
- %r1297 = add i64 %s1377, 0
- %v1945 = add i64 %w1944, 0
- %u1949 = icmp slt i64 %v1945, %b1948
- br i1 %u1949, label %b1263, label %a63b1266
-a93b:
- br i1 %r61, label %b1606, label %a97b
-b1321:
- %r1331 = mul i64 %r17, 0
- %r1339 = add i64 %r1331, 0
- br label %b1342
-b1342:
- %w1960 = phi i64 [ 0, %b1321 ], [ %v1961, %b1582 ]
- %s1523 = phi i64 [ %r1339, %b1321 ], [ %r1587, %b1582 ]
- %s1563 = phi i64 [ 0, %b1321 ], [ %r1591, %b1582 ]
- %d1533 = bitcast i64 %w1960 to i64
- %b1968 = mul i64 %w1960, 0
- %s1543 = add i64 %b1968, 0
- %r1345 = add i64 %s1523, 0
- %r1348 = sub i64 %r1345, 0
- %r1352 = add i64 %s1523, 0
- %r1355 = sub i64 %r1352, 0
- %r1370 = add i64 %d1533, 0
- %r1371 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1370
- %r1372 = load float* %r1371
- br label %a74b
-a74b:
- %w1958 = phi i64 [ 0, %b1342 ], [ %v1959, %a74b ]
- %r1379 = add i64 %s1543, 0
- %r1403 = add i64 %r1355, 0
- %r1422 = add i64 %r1348, 0
- %r1526 = fadd float %r1372, 0.000000e+00
- %r1573 = add i64 %w1958, 0
- %r1581 = icmp slt i64 %r1573, 0
- %v1959 = add i64 %w1958, 0
- br i1 %r1581, label %a74b, label %b1582
-b1582:
- %r1587 = add i64 %rx, 0
- %r1591 = add i64 %s1563, 0
- %r1596 = add i64 %d1533, 0
- %r1601 = icmp slt i64 %r1596, 0
- %v1961 = add i64 %w1960, 0
- br i1 %r1601, label %b1342, label %a93b
-b1606:
- %r1833 = add i64 %w1964, 0
- %r1840 = icmp slt i64 %r1833, 0
- %v1973 = add i64 %w1964, 0
- br i1 %r1840, label %a29b173, label %a109b
-a97b:
- %w1970 = phi i64 [ 0, %a93b ], [ %v1971, %a97b ]
- %r1613 = add i64 %w1964, 0
- %r1614 = mul i64 %r1613, 0
- %r1622 = add i64 %r1614, 0
- %r1754 = bitcast i8* %r28 to float*
- %r1756 = getelementptr float* %r1754, i64 %w1970
- %r1757 = load float* %r1756
- %r1761 = add i64 %r1622, 0
- %r1762 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1761
- %r1763 = load float* %r1762
- %r1767 = add i64 %r1622, 0
- %r1768 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1767
- %r1772 = fadd float %r1763, 0.000000e+00
- %r1773 = fadd float %r1772, 0.000000e+00
- %r1809 = fadd float %r1757, 0.000000e+00
- %r1810 = fadd float %r1773, %r1809
- store float %r1810, float* %r1768
- %r1818 = add i64 %w1970, 0
- %r1826 = icmp slt i64 %r1818, 0
- %v1971 = add i64 %w1970, 0
- br i1 %r1826, label %a97b, label %b1606
-a109b:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-01-29-InlineAsm-ir.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-01-29-InlineAsm-ir.ll
deleted file mode 100644
index e83e2e5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-01-29-InlineAsm-ir.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86
-; Test 'ri' constraint.
-
-define void @run_init_process() {
- %tmp = call i32 asm sideeffect "push %ebx ; movl $2,%ebx ; int $$0x80 ; pop %ebx", "={ax},0,ri,{cx},{dx},~{dirflag},~{fpsr},~{flags},~{memory}"( i32 11, i32 0, i32 0, i32 0 )
- unreachable
- }
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-04-OrAddrMode.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-02-04-OrAddrMode.ll
deleted file mode 100644
index 10bbe74..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-04-OrAddrMode.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {orl \$1, %eax}
-; RUN: llc < %s -march=x86 | grep {leal 3(,%eax,8)}
-
-;; This example can't fold the or into an LEA.
-define i32 @test(float ** %tmp2, i32 %tmp12) nounwind {
- %tmp3 = load float** %tmp2
- %tmp132 = shl i32 %tmp12, 2 ; <i32> [#uses=1]
- %tmp4 = bitcast float* %tmp3 to i8* ; <i8*> [#uses=1]
- %ctg2 = getelementptr i8* %tmp4, i32 %tmp132 ; <i8*> [#uses=1]
- %tmp6 = ptrtoint i8* %ctg2 to i32 ; <i32> [#uses=1]
- %tmp14 = or i32 %tmp6, 1 ; <i32> [#uses=1]
- ret i32 %tmp14
-}
-
-
-;; This can!
-define i32 @test2(i32 %a, i32 %b) nounwind {
- %c = shl i32 %a, 3
- %d = or i32 %c, 3
- ret i32 %d
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll
deleted file mode 100644
index 6bf5631..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll
+++ /dev/null
@@ -1,95 +0,0 @@
-; PR 1200
-; RUN: llc < %s -enable-tail-merge=0 | not grep jmp
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin8"
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.Index_Map = type { i32, %struct.item_set** }
- %struct.Item = type { [4 x i16], %struct.rule* }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct.dimension = type { i16*, %struct.Index_Map, %struct.mapping*, i32, %struct.plankMap* }
- %struct.item_set = type { i32, i32, %struct.operator*, [2 x %struct.item_set*], %struct.item_set*, i16*, %struct.Item*, %struct.Item* }
- %struct.list = type { i8*, %struct.list* }
- %struct.mapping = type { %struct.list**, i32, i32, i32, %struct.item_set** }
- %struct.nonterminal = type { i8*, i32, i32, i32, %struct.plankMap*, %struct.rule* }
- %struct.operator = type { i8*, i8, i32, i32, i32, i32, %struct.table* }
- %struct.pattern = type { %struct.nonterminal*, %struct.operator*, [2 x %struct.nonterminal*] }
- %struct.plank = type { i8*, %struct.list*, i32 }
- %struct.plankMap = type { %struct.list*, i32, %struct.stateMap* }
- %struct.rule = type { [4 x i16], i32, i32, i32, %struct.nonterminal*, %struct.pattern*, i8 }
- %struct.stateMap = type { i8*, %struct.plank*, i32, i16* }
- %struct.table = type { %struct.operator*, %struct.list*, i16*, [2 x %struct.dimension*], %struct.item_set** }
- at outfile = external global %struct.FILE* ; <%struct.FILE**> [#uses=1]
- at str1 = external global [11 x i8] ; <[11 x i8]*> [#uses=1]
-
-declare i32 @fprintf(%struct.FILE*, i8*, ...)
-
-define i16 @main_bb_2E_i9_2E_i_2E_i932_2E_ce(%struct.list* %l_addr.01.0.i2.i.i929, %struct.operator** %tmp66.i62.i.out) {
-newFuncRoot:
- br label %bb.i9.i.i932.ce
-
-NewDefault: ; preds = %LeafBlock, %LeafBlock1, %LeafBlock2, %LeafBlock3
- br label %bb36.i.i.exitStub
-
-bb36.i.i.exitStub: ; preds = %NewDefault
- store %struct.operator* %tmp66.i62.i, %struct.operator** %tmp66.i62.i.out
- ret i16 0
-
-bb.i14.i.exitStub: ; preds = %LeafBlock
- store %struct.operator* %tmp66.i62.i, %struct.operator** %tmp66.i62.i.out
- ret i16 1
-
-bb12.i.i935.exitStub: ; preds = %LeafBlock1
- store %struct.operator* %tmp66.i62.i, %struct.operator** %tmp66.i62.i.out
- ret i16 2
-
-bb20.i.i937.exitStub: ; preds = %LeafBlock2
- store %struct.operator* %tmp66.i62.i, %struct.operator** %tmp66.i62.i.out
- ret i16 3
-
-bb28.i.i938.exitStub: ; preds = %LeafBlock3
- store %struct.operator* %tmp66.i62.i, %struct.operator** %tmp66.i62.i.out
- ret i16 4
-
-bb.i9.i.i932.ce: ; preds = %newFuncRoot
- %tmp1.i3.i.i930 = getelementptr %struct.list* %l_addr.01.0.i2.i.i929, i32 0, i32 0 ; <i8**> [#uses=1]
- %tmp2.i4.i.i931 = load i8** %tmp1.i3.i.i930 ; <i8*> [#uses=1]
- %tmp66.i62.i = bitcast i8* %tmp2.i4.i.i931 to %struct.operator* ; <%struct.operator*> [#uses=7]
- %tmp1.i6.i = getelementptr %struct.operator* %tmp66.i62.i, i32 0, i32 2 ; <i32*> [#uses=1]
- %tmp2.i7.i = load i32* %tmp1.i6.i ; <i32> [#uses=1]
- %tmp3.i8.i = load %struct.FILE** @outfile ; <%struct.FILE*> [#uses=1]
- %tmp5.i9.i = call i32 (%struct.FILE*, i8*, ...)* @fprintf( %struct.FILE* %tmp3.i8.i, i8* getelementptr ([11 x i8]* @str1, i32 0, i32 0), i32 %tmp2.i7.i ) ; <i32> [#uses=0]
- %tmp7.i10.i = getelementptr %struct.operator* %tmp66.i62.i, i32 0, i32 5 ; <i32*> [#uses=1]
- %tmp8.i11.i = load i32* %tmp7.i10.i ; <i32> [#uses=7]
- br label %NodeBlock5
-
-NodeBlock5: ; preds = %bb.i9.i.i932.ce
- icmp slt i32 %tmp8.i11.i, 1 ; <i1>:0 [#uses=1]
- br i1 %0, label %NodeBlock, label %NodeBlock4
-
-NodeBlock4: ; preds = %NodeBlock5
- icmp slt i32 %tmp8.i11.i, 2 ; <i1>:1 [#uses=1]
- br i1 %1, label %LeafBlock2, label %LeafBlock3
-
-LeafBlock3: ; preds = %NodeBlock4
- icmp eq i32 %tmp8.i11.i, 2 ; <i1>:2 [#uses=1]
- br i1 %2, label %bb28.i.i938.exitStub, label %NewDefault
-
-LeafBlock2: ; preds = %NodeBlock4
- icmp eq i32 %tmp8.i11.i, 1 ; <i1>:3 [#uses=1]
- br i1 %3, label %bb20.i.i937.exitStub, label %NewDefault
-
-NodeBlock: ; preds = %NodeBlock5
- icmp slt i32 %tmp8.i11.i, 0 ; <i1>:4 [#uses=1]
- br i1 %4, label %LeafBlock, label %LeafBlock1
-
-LeafBlock1: ; preds = %NodeBlock
- icmp eq i32 %tmp8.i11.i, 0 ; <i1>:5 [#uses=1]
- br i1 %5, label %bb12.i.i935.exitStub, label %NewDefault
-
-LeafBlock: ; preds = %NodeBlock
- icmp eq i32 %tmp8.i11.i, -1 ; <i1>:6 [#uses=1]
- br i1 %6, label %bb.i14.i.exitStub, label %NewDefault
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-19-LiveIntervalAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-02-19-LiveIntervalAssert.ll
deleted file mode 100644
index 954c95d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-19-LiveIntervalAssert.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu -relocation-model=pic
-; PR1027
-
- %struct._IO_FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct._IO_FILE*, i32, i32, i32, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i32, i32, [40 x i8] }
- %struct._IO_marker = type { %struct._IO_marker*, %struct._IO_FILE*, i32 }
- at stderr = external global %struct._IO_FILE*
-
-define void @__eprintf(i8* %string, i8* %expression, i32 %line, i8* %filename) {
- %tmp = load %struct._IO_FILE** @stderr
- %tmp5 = tail call i32 (%struct._IO_FILE*, i8*, ...)* @fprintf( %struct._IO_FILE* %tmp, i8* %string, i8* %expression, i32 %line, i8* %filename )
- %tmp6 = load %struct._IO_FILE** @stderr
- %tmp7 = tail call i32 @fflush( %struct._IO_FILE* %tmp6 )
- tail call void @abort( )
- unreachable
-}
-
-declare i32 @fprintf(%struct._IO_FILE*, i8*, ...)
-
-declare i32 @fflush(%struct._IO_FILE*)
-
-declare void @abort()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-23-DAGCombine-Miscompile.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-02-23-DAGCombine-Miscompile.ll
deleted file mode 100644
index a8f0e57..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-23-DAGCombine-Miscompile.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; PR1219
-; RUN: llc < %s -march=x86 | grep {movl \$1, %eax}
-
-define i32 @test(i1 %X) {
-old_entry1:
- %hvar2 = zext i1 %X to i32
- %C = icmp sgt i32 %hvar2, -1
- br i1 %C, label %cond_true15, label %cond_true
-cond_true15:
- ret i32 1
-cond_true:
- ret i32 2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-25-FastCCStack.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-02-25-FastCCStack.ll
deleted file mode 100644
index 2e2b56d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-02-25-FastCCStack.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=pentium3
-
-define internal fastcc double @ggc_rlimit_bound(double %limit) {
- ret double %limit
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-01-SpillerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-03-01-SpillerCrash.ll
deleted file mode 100644
index 112d1ab..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-01-SpillerCrash.ll
+++ /dev/null
@@ -1,86 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin8 -mattr=+sse2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin8 -mattr=+sse2 | not grep movhlps
-
-define void @test() nounwind {
-test.exit:
- fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:0 [#uses=4]
- load <4 x float>* null ; <<4 x float>>:1 [#uses=1]
- shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:2 [#uses=1]
- fmul <4 x float> %0, %2 ; <<4 x float>>:3 [#uses=1]
- fsub <4 x float> zeroinitializer, %3 ; <<4 x float>>:4 [#uses=1]
- fmul <4 x float> %4, zeroinitializer ; <<4 x float>>:5 [#uses=2]
- bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:6 [#uses=1]
- and <4 x i32> %6, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>>:7 [#uses=1]
- bitcast <4 x i32> %7 to <4 x float> ; <<4 x float>>:8 [#uses=2]
- extractelement <4 x float> %8, i32 0 ; <float>:9 [#uses=1]
- extractelement <4 x float> %8, i32 1 ; <float>:10 [#uses=2]
- br i1 false, label %11, label %19
-
-; <label>:11 ; preds = %test.exit
- br i1 false, label %17, label %12
-
-; <label>:12 ; preds = %11
- br i1 false, label %19, label %13
-
-; <label>:13 ; preds = %12
- fsub float -0.000000e+00, 0.000000e+00 ; <float>:14 [#uses=1]
- %tmp207 = extractelement <4 x float> zeroinitializer, i32 0 ; <float> [#uses=1]
- %tmp208 = extractelement <4 x float> zeroinitializer, i32 2 ; <float> [#uses=1]
- fsub float -0.000000e+00, %tmp208 ; <float>:15 [#uses=1]
- %tmp155 = extractelement <4 x float> zeroinitializer, i32 0 ; <float> [#uses=1]
- %tmp156 = extractelement <4 x float> zeroinitializer, i32 2 ; <float> [#uses=1]
- fsub float -0.000000e+00, %tmp156 ; <float>:16 [#uses=1]
- br label %19
-
-; <label>:17 ; preds = %11
- br i1 false, label %19, label %18
-
-; <label>:18 ; preds = %17
- br label %19
-
-; <label>:19 ; preds = %18, %17, %13, %12, %test.exit
- phi i32 [ 5, %18 ], [ 3, %13 ], [ 1, %test.exit ], [ 2, %12 ], [ 4, %17 ] ; <i32>:20 [#uses=0]
- phi float [ 0.000000e+00, %18 ], [ %16, %13 ], [ 0.000000e+00, %test.exit ], [ 0.000000e+00, %12 ], [ 0.000000e+00, %17 ] ; <float>:21 [#uses=1]
- phi float [ 0.000000e+00, %18 ], [ %tmp155, %13 ], [ 0.000000e+00, %test.exit ], [ 0.000000e+00, %12 ], [ 0.000000e+00, %17 ] ; <float>:22 [#uses=1]
- phi float [ 0.000000e+00, %18 ], [ %15, %13 ], [ 0.000000e+00, %test.exit ], [ 0.000000e+00, %12 ], [ 0.000000e+00, %17 ] ; <float>:23 [#uses=1]
- phi float [ 0.000000e+00, %18 ], [ %tmp207, %13 ], [ 0.000000e+00, %test.exit ], [ 0.000000e+00, %12 ], [ 0.000000e+00, %17 ] ; <float>:24 [#uses=1]
- phi float [ 0.000000e+00, %18 ], [ %10, %13 ], [ %9, %test.exit ], [ %10, %12 ], [ 0.000000e+00, %17 ] ; <float>:25 [#uses=2]
- phi float [ 0.000000e+00, %18 ], [ %14, %13 ], [ 0.000000e+00, %test.exit ], [ 0.000000e+00, %12 ], [ 0.000000e+00, %17 ] ; <float>:26 [#uses=1]
- phi float [ 0.000000e+00, %18 ], [ 0.000000e+00, %13 ], [ 0.000000e+00, %test.exit ], [ 0.000000e+00, %12 ], [ 0.000000e+00, %17 ] ; <float>:27 [#uses=1]
- insertelement <4 x float> undef, float %27, i32 0 ; <<4 x float>>:28 [#uses=1]
- insertelement <4 x float> %28, float %26, i32 1 ; <<4 x float>>:29 [#uses=0]
- insertelement <4 x float> undef, float %24, i32 0 ; <<4 x float>>:30 [#uses=1]
- insertelement <4 x float> %30, float %23, i32 1 ; <<4 x float>>:31 [#uses=1]
- insertelement <4 x float> %31, float %25, i32 2 ; <<4 x float>>:32 [#uses=1]
- insertelement <4 x float> %32, float %25, i32 3 ; <<4 x float>>:33 [#uses=1]
- fdiv <4 x float> %33, zeroinitializer ; <<4 x float>>:34 [#uses=1]
- fmul <4 x float> %34, < float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01 > ; <<4 x float>>:35 [#uses=1]
- insertelement <4 x float> undef, float %22, i32 0 ; <<4 x float>>:36 [#uses=1]
- insertelement <4 x float> %36, float %21, i32 1 ; <<4 x float>>:37 [#uses=0]
- br i1 false, label %foo.exit, label %38
-
-; <label>:38 ; preds = %19
- extractelement <4 x float> %0, i32 0 ; <float>:39 [#uses=1]
- fcmp ogt float %39, 0.000000e+00 ; <i1>:40 [#uses=1]
- extractelement <4 x float> %0, i32 2 ; <float>:41 [#uses=1]
- extractelement <4 x float> %0, i32 1 ; <float>:42 [#uses=1]
- fsub float -0.000000e+00, %42 ; <float>:43 [#uses=2]
- %tmp189 = extractelement <4 x float> %5, i32 2 ; <float> [#uses=1]
- br i1 %40, label %44, label %46
-
-; <label>:44 ; preds = %38
- fsub float -0.000000e+00, %tmp189 ; <float>:45 [#uses=0]
- br label %foo.exit
-
-; <label>:46 ; preds = %38
- %tmp192 = extractelement <4 x float> %5, i32 1 ; <float> [#uses=1]
- fsub float -0.000000e+00, %tmp192 ; <float>:47 [#uses=1]
- br label %foo.exit
-
-foo.exit: ; preds = %46, %44, %19
- phi float [ 0.000000e+00, %44 ], [ %47, %46 ], [ 0.000000e+00, %19 ] ; <float>:48 [#uses=0]
- phi float [ %43, %44 ], [ %43, %46 ], [ 0.000000e+00, %19 ] ; <float>:49 [#uses=0]
- phi float [ 0.000000e+00, %44 ], [ %41, %46 ], [ 0.000000e+00, %19 ] ; <float>:50 [#uses=0]
- shufflevector <4 x float> %35, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:51 [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
deleted file mode 100644
index e1f8901..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-darwin | \
-; RUN: grep push | count 3
-
-define void @foo(i8** %buf, i32 %size, i32 %col, i8* %p) nounwind {
-entry:
- icmp sgt i32 %size, 0 ; <i1>:0 [#uses=1]
- br i1 %0, label %bb.preheader, label %return
-
-bb.preheader: ; preds = %entry
- %tmp5.sum72 = add i32 %col, 7 ; <i32> [#uses=1]
- %tmp5.sum71 = add i32 %col, 5 ; <i32> [#uses=1]
- %tmp5.sum70 = add i32 %col, 3 ; <i32> [#uses=1]
- %tmp5.sum69 = add i32 %col, 2 ; <i32> [#uses=1]
- %tmp5.sum68 = add i32 %col, 1 ; <i32> [#uses=1]
- %tmp5.sum66 = add i32 %col, 4 ; <i32> [#uses=1]
- %tmp5.sum = add i32 %col, 6 ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %bb.preheader
- %i.073.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
- %p_addr.076.0.rec = mul i32 %i.073.0, 9 ; <i32> [#uses=9]
- %p_addr.076.0 = getelementptr i8* %p, i32 %p_addr.076.0.rec ; <i8*> [#uses=1]
- %tmp2 = getelementptr i8** %buf, i32 %i.073.0 ; <i8**> [#uses=1]
- %tmp3 = load i8** %tmp2 ; <i8*> [#uses=8]
- %tmp5 = getelementptr i8* %tmp3, i32 %col ; <i8*> [#uses=1]
- %tmp7 = load i8* %p_addr.076.0 ; <i8> [#uses=1]
- store i8 %tmp7, i8* %tmp5
- %p_addr.076.0.sum93 = add i32 %p_addr.076.0.rec, 1 ; <i32> [#uses=1]
- %tmp11 = getelementptr i8* %p, i32 %p_addr.076.0.sum93 ; <i8*> [#uses=1]
- %tmp13 = load i8* %tmp11 ; <i8> [#uses=1]
- %tmp15 = getelementptr i8* %tmp3, i32 %tmp5.sum72 ; <i8*> [#uses=1]
- store i8 %tmp13, i8* %tmp15
- %p_addr.076.0.sum92 = add i32 %p_addr.076.0.rec, 2 ; <i32> [#uses=1]
- %tmp17 = getelementptr i8* %p, i32 %p_addr.076.0.sum92 ; <i8*> [#uses=1]
- %tmp19 = load i8* %tmp17 ; <i8> [#uses=1]
- %tmp21 = getelementptr i8* %tmp3, i32 %tmp5.sum71 ; <i8*> [#uses=1]
- store i8 %tmp19, i8* %tmp21
- %p_addr.076.0.sum91 = add i32 %p_addr.076.0.rec, 3 ; <i32> [#uses=1]
- %tmp23 = getelementptr i8* %p, i32 %p_addr.076.0.sum91 ; <i8*> [#uses=1]
- %tmp25 = load i8* %tmp23 ; <i8> [#uses=1]
- %tmp27 = getelementptr i8* %tmp3, i32 %tmp5.sum70 ; <i8*> [#uses=1]
- store i8 %tmp25, i8* %tmp27
- %p_addr.076.0.sum90 = add i32 %p_addr.076.0.rec, 4 ; <i32> [#uses=1]
- %tmp29 = getelementptr i8* %p, i32 %p_addr.076.0.sum90 ; <i8*> [#uses=1]
- %tmp31 = load i8* %tmp29 ; <i8> [#uses=1]
- %tmp33 = getelementptr i8* %tmp3, i32 %tmp5.sum69 ; <i8*> [#uses=2]
- store i8 %tmp31, i8* %tmp33
- %p_addr.076.0.sum89 = add i32 %p_addr.076.0.rec, 5 ; <i32> [#uses=1]
- %tmp35 = getelementptr i8* %p, i32 %p_addr.076.0.sum89 ; <i8*> [#uses=1]
- %tmp37 = load i8* %tmp35 ; <i8> [#uses=1]
- %tmp39 = getelementptr i8* %tmp3, i32 %tmp5.sum68 ; <i8*> [#uses=1]
- store i8 %tmp37, i8* %tmp39
- %p_addr.076.0.sum88 = add i32 %p_addr.076.0.rec, 6 ; <i32> [#uses=1]
- %tmp41 = getelementptr i8* %p, i32 %p_addr.076.0.sum88 ; <i8*> [#uses=1]
- %tmp43 = load i8* %tmp41 ; <i8> [#uses=1]
- store i8 %tmp43, i8* %tmp33
- %p_addr.076.0.sum87 = add i32 %p_addr.076.0.rec, 7 ; <i32> [#uses=1]
- %tmp47 = getelementptr i8* %p, i32 %p_addr.076.0.sum87 ; <i8*> [#uses=1]
- %tmp49 = load i8* %tmp47 ; <i8> [#uses=1]
- %tmp51 = getelementptr i8* %tmp3, i32 %tmp5.sum66 ; <i8*> [#uses=1]
- store i8 %tmp49, i8* %tmp51
- %p_addr.076.0.sum = add i32 %p_addr.076.0.rec, 8 ; <i32> [#uses=1]
- %tmp53 = getelementptr i8* %p, i32 %p_addr.076.0.sum ; <i8*> [#uses=1]
- %tmp55 = load i8* %tmp53 ; <i8> [#uses=1]
- %tmp57 = getelementptr i8* %tmp3, i32 %tmp5.sum ; <i8*> [#uses=1]
- store i8 %tmp55, i8* %tmp57
- %indvar.next = add i32 %i.073.0, 1 ; <i32> [#uses=2]
- icmp eq i32 %indvar.next, %size ; <i1>:1 [#uses=1]
- br i1 %1, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll
deleted file mode 100644
index 9580726..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86
-
-; ModuleID = 'a.bc'
-
-define i32 @foo(i32 %A, i32 %B) {
-entry:
- %A_addr = alloca i32 ; <i32*> [#uses=2]
- %B_addr = alloca i32 ; <i32*> [#uses=1]
- %retval = alloca i32, align 4 ; <i32*> [#uses=2]
- %tmp = alloca i32, align 4 ; <i32*> [#uses=2]
- %ret = alloca i32, align 4 ; <i32*> [#uses=2]
- "alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %A, i32* %A_addr
- store i32 %B, i32* %B_addr
- %tmp1 = load i32* %A_addr ; <i32> [#uses=1]
- %tmp2 = call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"( i32 7, i32 %tmp1 ) ; <i32> [#uses=1]
- store i32 %tmp2, i32* %ret
- %tmp3 = load i32* %ret ; <i32> [#uses=1]
- store i32 %tmp3, i32* %tmp
- %tmp4 = load i32* %tmp ; <i32> [#uses=1]
- store i32 %tmp4, i32* %retval
- br label %return
-
-return: ; preds = %entry
- %retval5 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-18-LiveIntervalAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-03-18-LiveIntervalAssert.ll
deleted file mode 100644
index 70936fb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-18-LiveIntervalAssert.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR1259
-
-define void @test() {
- %tmp2 = call i32 asm "...", "=r,~{dirflag},~{fpsr},~{flags},~{dx},~{cx},~{ax}"( )
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmMultiRegConstraint.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmMultiRegConstraint.ll
deleted file mode 100644
index 44d68dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmMultiRegConstraint.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i32 @test(i16 %tmp40414244) {
- %tmp48 = call i32 asm sideeffect "inl ${1:w}, $0", "={ax},N{dx},~{dirflag},~{fpsr},~{flags}"( i16 %tmp40414244 )
- ret i32 %tmp48
-}
-
-define i32 @test2(i16 %tmp40414244) {
- %tmp48 = call i32 asm sideeffect "inl ${1:w}, $0", "={ax},N{dx},~{dirflag},~{fpsr},~{flags}"( i16 14 )
- ret i32 %tmp48
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmPModifier.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmPModifier.ll
deleted file mode 100644
index 3312e01..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmPModifier.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {mov %gs:72, %eax}
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin9"
-
-define void @test() {
- %tmp1 = tail call i32* asm sideeffect "mov %gs:${1:P}, $0", "=r,i,~{dirflag},~{fpsr},~{flags}"( i32 72 ) ; <%struct._pthread*> [#uses=1]
- ret void
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll
deleted file mode 100644
index c1b1ad1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmVectorOp.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mcpu=yonah -march=x86 | \
-; RUN: grep {cmpltsd %xmm0, %xmm0}
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin9"
-
-
-define void @acoshf() {
- %tmp19 = tail call <2 x double> asm sideeffect "pcmpeqd $0, $0 \0A\09 cmpltsd $0, $0", "=x,0,~{dirflag},~{fpsr},~{flags}"( <2 x double> zeroinitializer ) ; <<2 x double>> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmXConstraint.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmXConstraint.ll
deleted file mode 100644
index 30453d5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-24-InlineAsmXConstraint.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {psrlw \$8, %xmm0}
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin9"
-
-define void @test() {
- tail call void asm sideeffect "psrlw $0, %xmm0", "X,~{dirflag},~{fpsr},~{flags}"( i32 8 )
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-26-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-03-26-CoalescerBug.ll
deleted file mode 100644
index 9676f14..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-03-26-CoalescerBug.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=x86
-
- at data = external global [339 x i64]
-
-define void @foo(...) {
-bb1:
- %t43 = load i64* getelementptr ([339 x i64]* @data, i32 0, i64 212), align 4
- br i1 false, label %bb80, label %bb6
-bb6:
- br i1 false, label %bb38, label %bb265
-bb265:
- ret void
-bb38:
- br i1 false, label %bb80, label %bb49
-bb80:
- br i1 false, label %bb146, label %bb268
-bb49:
- ret void
-bb113:
- ret void
-bb268:
- %t1062 = shl i64 %t43, 3
- %t1066 = shl i64 0, 3
- br label %bb85
-bb85:
- %t1025 = phi i64 [ 0, %bb268 ], [ %t102.0, %bb234 ]
- %t1028 = phi i64 [ 0, %bb268 ], [ %t1066, %bb234 ]
- %t1031 = phi i64 [ 0, %bb268 ], [ %t103.0, %bb234 ]
- %t1034 = phi i64 [ 0, %bb268 ], [ %t1066, %bb234 ]
- %t102.0 = add i64 %t1028, %t1025
- %t103.0 = add i64 %t1034, %t1031
- br label %bb86
-bb86:
- %t108.0 = phi i64 [ %t102.0, %bb85 ], [ %t1139, %bb248 ]
- %t110.0 = phi i64 [ %t103.0, %bb85 ], [ %t1142, %bb248 ]
- br label %bb193
-bb193:
- %t1081 = add i64 %t110.0, -8
- %t1087 = add i64 %t108.0, -8
- br i1 false, label %bb193, label %bb248
-bb248:
- %t1139 = add i64 %t108.0, %t1062
- %t1142 = add i64 %t110.0, %t1062
- br i1 false, label %bb86, label %bb234
-bb234:
- br i1 false, label %bb85, label %bb113
-bb146:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll
deleted file mode 100644
index 9f09e88..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s
-; PR1314
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "x86_64-unknown-linux-gnu"
- %struct.CycleCount = type { i64, i64 }
- %struct.bc_struct = type { i32, i32, i32, i32, %struct.bc_struct*, i8*, i8* }
- at _programStartTime = external global %struct.CycleCount ; <%struct.CycleCount*> [#uses=1]
-
-define fastcc i32 @bc_divide(%struct.bc_struct* %n1, %struct.bc_struct* %n2, %struct.bc_struct** %quot, i32 %scale) nounwind {
-entry:
- %tmp7.i46 = tail call i64 asm sideeffect ".byte 0x0f,0x31", "={dx},=*{ax},~{dirflag},~{fpsr},~{flags}"( i64* getelementptr (%struct.CycleCount* @_programStartTime, i32 0, i32 1) ) ; <i64> [#uses=0]
- %tmp221 = sdiv i32 10, 0 ; <i32> [#uses=1]
- tail call fastcc void @_one_mult( i8* null, i32 0, i32 %tmp221, i8* null )
- ret i32 0
-}
-
-declare fastcc void @_one_mult(i8*, i32, i32, i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-11-InlineAsmVectorResult.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-04-11-InlineAsmVectorResult.ll
deleted file mode 100644
index f48c132..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-11-InlineAsmVectorResult.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-define void @test(<4 x float> %tmp42i) {
- %tmp42 = call <4 x float> asm "movss $1, $0", "=x,m,~{dirflag},~{fpsr},~{flags}"( float* null ) ; <<4 x float>> [#uses=1]
- %tmp49 = shufflevector <4 x float> %tmp42, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %cond_true10
- %tmp52 = bitcast <4 x float> %tmp49 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp53 = call <4 x i32> @llvm.x86.sse2.psll.d( <4 x i32> %tmp52, <4 x i32> < i32 8, i32 undef, i32 undef, i32 undef > ) ; <<4 x i32>> [#uses=1]
- %tmp105 = bitcast <4 x i32> %tmp53 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp108 = fsub <4 x float> zeroinitializer, %tmp105 ; <<4 x float>> [#uses=0]
- br label %bb
-
-return: ; preds = %entry
- ret void
-}
-
-declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-13-SwitchLowerBadPhi.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-04-13-SwitchLowerBadPhi.ll
deleted file mode 100644
index a0b1403..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-13-SwitchLowerBadPhi.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -O0
-; PR 1323
-
-; ModuleID = 'test.bc'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
- %struct.comp = type { i8*, i32, i8*, [3 x i8], i32 }
-
-define void @regbranch() {
-cond_next240.i:
- br i1 false, label %cond_true251.i, label %cond_next272.i
-
-cond_true251.i: ; preds = %cond_next240.i
- switch i8 0, label %cond_next272.i [
- i8 42, label %bb268.i
- i8 43, label %bb268.i
- i8 63, label %bb268.i
- ]
-
-bb268.i: ; preds = %cond_true251.i, %cond_true251.i, %cond_true251.i
- br label %cond_next272.i
-
-cond_next272.i: ; preds = %bb268.i, %cond_true251.i, %cond_next240.i
- %len.2.i = phi i32 [ 0, %bb268.i ], [ 0, %cond_next240.i ], [ 0, %cond_true251.i ] ; <i32> [#uses=1]
- %tmp278.i = icmp eq i32 %len.2.i, 1 ; <i1> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll
deleted file mode 100644
index 4604f46..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -relocation-model=pic --disable-fp-elim
-
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct.partition_def = type { i32, [1 x %struct.partition_elem] }
- %struct.partition_elem = type { i32, %struct.partition_elem*, i32 }
-
-define void @partition_print(%struct.partition_def* %part) {
-entry:
- br i1 false, label %bb.preheader, label %bb99
-
-bb.preheader: ; preds = %entry
- br i1 false, label %cond_true, label %cond_next90
-
-cond_true: ; preds = %bb.preheader
- br i1 false, label %bb32, label %bb87.critedge
-
-bb32: ; preds = %bb32, %cond_true
- %i.2115.0 = phi i32 [ 0, %cond_true ], [ %indvar.next127, %bb32 ] ; <i32> [#uses=1]
- %c.2112.0 = phi i32 [ 0, %cond_true ], [ %tmp49, %bb32 ] ; <i32> [#uses=1]
- %tmp43 = getelementptr %struct.partition_def* %part, i32 0, i32 1, i32 %c.2112.0, i32 1 ; <%struct.partition_elem**> [#uses=1]
- %tmp44 = load %struct.partition_elem** %tmp43 ; <%struct.partition_elem*> [#uses=1]
- %tmp4445 = ptrtoint %struct.partition_elem* %tmp44 to i32 ; <i32> [#uses=1]
- %tmp48 = sub i32 %tmp4445, 0 ; <i32> [#uses=1]
- %tmp49 = sdiv i32 %tmp48, 12 ; <i32> [#uses=1]
- %indvar.next127 = add i32 %i.2115.0, 1 ; <i32> [#uses=2]
- %exitcond128 = icmp eq i32 %indvar.next127, 0 ; <i1> [#uses=1]
- br i1 %exitcond128, label %bb58, label %bb32
-
-bb58: ; preds = %bb32
- ret void
-
-bb87.critedge: ; preds = %cond_true
- ret void
-
-cond_next90: ; preds = %bb.preheader
- ret void
-
-bb99: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-24-Huge-Stack.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-04-24-Huge-Stack.ll
deleted file mode 100644
index 7528129..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-24-Huge-Stack.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86-64 | not grep 4294967112
-; PR1348
-
- %struct.md5_ctx = type { i32, i32, i32, i32, [2 x i32], i32, [128 x i8], [4294967288 x i8] }
-
-define i8* @md5_buffer(i8* %buffer, i64 %len, i8* %resblock) {
-entry:
- %ctx = alloca %struct.md5_ctx, align 16 ; <%struct.md5_ctx*> [#uses=3]
- call void @md5_init_ctx( %struct.md5_ctx* %ctx )
- call void @md5_process_bytes( i8* %buffer, i64 %len, %struct.md5_ctx* %ctx )
- %tmp4 = call i8* @md5_finish_ctx( %struct.md5_ctx* %ctx, i8* %resblock ) ; <i8*> [#uses=1]
- ret i8* %tmp4
-}
-
-declare void @md5_init_ctx(%struct.md5_ctx*)
-
-declare i8* @md5_finish_ctx(%struct.md5_ctx*, i8*)
-
-declare void @md5_process_bytes(i8*, i64, %struct.md5_ctx*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-24-VectorCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-04-24-VectorCrash.ll
deleted file mode 100644
index e38992d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-24-VectorCrash.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc < %s -mcpu=yonah
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>)
-
-define void @test(float* %P) {
-entry:
- or <4 x i32> zeroinitializer, and (<4 x i32> bitcast (<4 x float> shufflevector (<4 x float> undef, <4 x float> undef, <4 x i32> zeroinitializer) to <4 x i32>), <4 x i32> < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 >) ; <<4 x i32>>:0 [#uses=1]
- bitcast <4 x i32> %0 to <4 x float> ; <<4 x float>>:1 [#uses=1]
- fsub <4 x float> %1, zeroinitializer ; <<4 x float>>:2 [#uses=1]
- fsub <4 x float> shufflevector (<4 x float> undef, <4 x float> undef, <4 x i32> zeroinitializer), %2 ; <<4 x float>>:3 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %3, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:4 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %4, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:5 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %5, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:6 [#uses=1]
- shufflevector <4 x float> %6, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:7 [#uses=1]
- shufflevector <4 x float> %7, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:8 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %8, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:9 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %9, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:10 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %10, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:11 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %11, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:12 [#uses=1]
- shufflevector <4 x float> %12, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:13 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %13, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:14 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %14, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:15 [#uses=1]
- shufflevector <4 x float> %15, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:16 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %16, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:17 [#uses=1]
- shufflevector <4 x float> %17, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:18 [#uses=1]
- shufflevector <4 x float> %18, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:19 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %19, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:20 [#uses=1]
- shufflevector <4 x float> %20, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:21 [#uses=1]
- shufflevector <4 x float> %21, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:22 [#uses=1]
- fmul <4 x float> %22, zeroinitializer ; <<4 x float>>:23 [#uses=1]
- shufflevector <4 x float> %23, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:24 [#uses=1]
- call <4 x float> @llvm.x86.sse.add.ss( <4 x float> zeroinitializer, <4 x float> %24 ) ; <<4 x float>>:25 [#uses=1]
- shufflevector <4 x float> %25, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:26 [#uses=1]
- shufflevector <4 x float> %26, <4 x float> zeroinitializer, <4 x i32> zeroinitializer ; <<4 x float>>:27 [#uses=1]
- shufflevector <4 x float> %27, <4 x float> zeroinitializer, <4 x i32> < i32 4, i32 1, i32 6, i32 7 > ; <<4 x float>>:28 [#uses=1]
- fmul <4 x float> zeroinitializer, %28 ; <<4 x float>>:29 [#uses=1]
- fadd <4 x float> %29, zeroinitializer ; <<4 x float>>:30 [#uses=1]
- fmul <4 x float> zeroinitializer, %30 ; <<4 x float>>:31 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %31, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:32 [#uses=1]
- fmul <4 x float> zeroinitializer, %32 ; <<4 x float>>:33 [#uses=1]
- shufflevector <4 x float> %33, <4 x float> zeroinitializer, <4 x i32> zeroinitializer ; <<4 x float>>:34 [#uses=1]
- fmul <4 x float> zeroinitializer, %34 ; <<4 x float>>:35 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %35, <4 x i32> < i32 0, i32 1, i32 6, i32 7 > ; <<4 x float>>:36 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %36, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:37 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %37, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:38 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %38, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:39 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %39, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:40 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %40, <4 x i32> < i32 4, i32 1, i32 6, i32 7 > ; <<4 x float>>:41 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %41, <4 x i32> < i32 4, i32 1, i32 6, i32 7 > ; <<4 x float>>:42 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %42, <4 x i32> < i32 4, i32 1, i32 6, i32 7 > ; <<4 x float>>:43 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %43, <4 x i32> < i32 4, i32 1, i32 6, i32 7 > ; <<4 x float>>:44 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %44, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:45 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %45, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:46 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %46, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:47 [#uses=1]
- shufflevector <4 x float> zeroinitializer, <4 x float> %47, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:48 [#uses=1]
- shufflevector <4 x float> %48, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>>:49 [#uses=1]
- fadd <4 x float> %49, zeroinitializer ; <<4 x float>>:50 [#uses=1]
- %tmp5845 = extractelement <4 x float> %50, i32 2 ; <float> [#uses=1]
- store float %tmp5845, float* %P
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll
deleted file mode 100644
index 113d0eb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-25-MMX-PADDQ.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -o - -march=x86 -mattr=+mmx | grep paddq | count 2
-; RUN: llc < %s -o - -march=x86 -mattr=+mmx | grep movq | count 2
-
-define <1 x i64> @unsigned_add3(<1 x i64>* %a, <1 x i64>* %b, i32 %count) {
-entry:
- %tmp2942 = icmp eq i32 %count, 0 ; <i1> [#uses=1]
- br i1 %tmp2942, label %bb31, label %bb26
-
-bb26: ; preds = %bb26, %entry
- %i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ] ; <i32> [#uses=3]
- %sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1]
- %tmp13 = getelementptr <1 x i64>* %b, i32 %i.037.0 ; <<1 x i64>*> [#uses=1]
- %tmp14 = load <1 x i64>* %tmp13 ; <<1 x i64>> [#uses=1]
- %tmp18 = getelementptr <1 x i64>* %a, i32 %i.037.0 ; <<1 x i64>*> [#uses=1]
- %tmp19 = load <1 x i64>* %tmp18 ; <<1 x i64>> [#uses=1]
- %tmp21 = add <1 x i64> %tmp19, %tmp14 ; <<1 x i64>> [#uses=1]
- %tmp22 = add <1 x i64> %tmp21, %sum.035.0 ; <<1 x i64>> [#uses=2]
- %tmp25 = add i32 %i.037.0, 1 ; <i32> [#uses=2]
- %tmp29 = icmp ult i32 %tmp25, %count ; <i1> [#uses=1]
- br i1 %tmp29, label %bb26, label %bb31
-
-bb31: ; preds = %bb26, %entry
- %sum.035.1 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ] ; <<1 x i64>> [#uses=1]
- ret <1 x i64> %sum.035.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-27-InlineAsm-IntMemInput.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-04-27-InlineAsm-IntMemInput.ll
deleted file mode 100644
index 85a2ecc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-04-27-InlineAsm-IntMemInput.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s | not grep {bsrl.*10}
-; PR1356
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-define i32 @main() {
-entry:
- %tmp4 = tail call i32 asm "bsrl $1, $0", "=r,ro,~{dirflag},~{fpsr},~{flags},~{cc}"( i32 10 ) ; <i32> [#uses=1]
- ret i32 %tmp4
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-05-Personality.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-05-05-Personality.ll
deleted file mode 100644
index c92783e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-05-Personality.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -enable-eh -o - | grep zPL
-
- at error = external global i8 ; <i8*> [#uses=2]
-
-define void @_ada_x() {
-entry:
- invoke void @raise( )
- to label %eh_then unwind label %unwind
-
-unwind: ; preds = %entry
- %eh_ptr = tail call i8* @llvm.eh.exception( ) ; <i8*> [#uses=2]
- %eh_select = tail call i32 (i8*, i8*, ...)* @llvm.eh.selector.i32( i8* %eh_ptr, i8* bitcast (i32 (...)* @__gnat_eh_personality to i8*), i8* @error ) ; <i32> [#uses=1]
- %eh_typeid = tail call i32 @llvm.eh.typeid.for.i32( i8* @error ) ; <i32> [#uses=1]
- %tmp2 = icmp eq i32 %eh_select, %eh_typeid ; <i1> [#uses=1]
- br i1 %tmp2, label %eh_then, label %Unwind
-
-eh_then: ; preds = %unwind, %entry
- ret void
-
-Unwind: ; preds = %unwind
- tail call i32 (...)* @_Unwind_Resume( i8* %eh_ptr ) ; <i32>:0 [#uses=0]
- unreachable
-}
-
-declare void @raise()
-
-declare i8* @llvm.eh.exception()
-
-declare i32 @llvm.eh.selector.i32(i8*, i8*, ...)
-
-declare i32 @llvm.eh.typeid.for.i32(i8*)
-
-declare i32 @__gnat_eh_personality(...)
-
-declare i32 @_Unwind_Resume(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-05-VecCastExpand.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-05-05-VecCastExpand.ll
deleted file mode 100644
index e58b193..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-05-VecCastExpand.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=i386 -mattr=+sse
-; PR1371
-
- at str = external global [18 x i8] ; <[18 x i8]*> [#uses=1]
-
-define void @test() {
-bb.i:
- %tmp.i660 = load <4 x float>* null ; <<4 x float>> [#uses=1]
- call void (i32, ...)* @printf( i32 0, i8* getelementptr ([18 x i8]* @str, i32 0, i64 0), double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00 )
- %tmp152.i = load <4 x i32>* null ; <<4 x i32>> [#uses=1]
- %tmp156.i = bitcast <4 x i32> %tmp152.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp175.i = bitcast <4 x float> %tmp.i660 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp176.i = xor <4 x i32> %tmp156.i, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %tmp177.i = and <4 x i32> %tmp176.i, %tmp175.i ; <<4 x i32>> [#uses=1]
- %tmp190.i = or <4 x i32> %tmp177.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %tmp191.i = bitcast <4 x i32> %tmp190.i to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp191.i, <4 x float>* null
- ret void
-}
-
-declare void @printf(i32, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-07-InvokeSRet.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-05-07-InvokeSRet.ll
deleted file mode 100644
index a3ff2f6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-07-InvokeSRet.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -enable-eh -disable-fp-elim | not grep {addl .12, %esp}
-; PR1398
-
- %struct.S = type { i32, i32 }
-
-declare void @invokee(%struct.S* sret )
-
-define void @invoker(%struct.S* %name.0.0) {
-entry:
- invoke void @invokee( %struct.S* %name.0.0 sret )
- to label %return unwind label %return
-
-return: ; preds = %entry, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-14-LiveIntervalAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-05-14-LiveIntervalAssert.ll
deleted file mode 100644
index 8ef2538..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-14-LiveIntervalAssert.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
- %struct.XDesc = type <{ i32, %struct.OpaqueXDataStorageType** }>
- %struct.OpaqueXDataStorageType = type opaque
-
-declare i16 @GetParamDesc(%struct.XDesc*, i32, i32, %struct.XDesc*) signext
-
-declare void @r_raise(i64, i8*, ...)
-
-define i64 @app_send_event(i64 %self, i64 %event_class, i64 %event_id, i64 %params, i64 %need_retval) {
-entry:
- br i1 false, label %cond_true109, label %bb83.preheader
-
-bb83.preheader: ; preds = %entry
- ret i64 0
-
-cond_true109: ; preds = %entry
- br i1 false, label %cond_next164, label %cond_true239
-
-cond_next164: ; preds = %cond_true109
- %tmp176 = call i16 @GetParamDesc( %struct.XDesc* null, i32 1701999219, i32 1413830740, %struct.XDesc* null ) signext ; <i16> [#uses=0]
- call void (i64, i8*, ...)* @r_raise( i64 0, i8* null )
- unreachable
-
-cond_true239: ; preds = %cond_true109
- ret i64 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-15-maskmovq.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-05-15-maskmovq.ll
deleted file mode 100644
index 2093b8f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-15-maskmovq.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mcpu=yonah
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-define void @test(<1 x i64> %c64, <1 x i64> %mask1, i8* %P) {
-entry:
- %tmp4 = bitcast <1 x i64> %mask1 to <8 x i8> ; <<8 x i8>> [#uses=1]
- %tmp6 = bitcast <1 x i64> %c64 to <8 x i8> ; <<8 x i8>> [#uses=1]
- tail call void @llvm.x86.mmx.maskmovq( <8 x i8> %tmp6, <8 x i8> %tmp4, i8* %P )
- ret void
-}
-
-declare void @llvm.x86.mmx.maskmovq(<8 x i8>, <8 x i8>, i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-17-ShuffleISelBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-05-17-ShuffleISelBug.ll
deleted file mode 100644
index b27ef83..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-05-17-ShuffleISelBug.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep punpckhwd
-
-declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>)
-
-declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>)
-
-define fastcc void @test(i32* %src, i32 %sbpr, i32* %dst, i32 %dbpr, i32 %w, i32 %h, i32 %dstalpha, i32 %mask) {
- %tmp633 = shufflevector <8 x i16> zeroinitializer, <8 x i16> undef, <8 x i32> < i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7 >
- %tmp715 = mul <8 x i16> zeroinitializer, %tmp633
- %tmp776 = bitcast <8 x i16> %tmp715 to <4 x i32>
- %tmp777 = add <4 x i32> %tmp776, shufflevector (<4 x i32> < i32 65537, i32 0, i32 0, i32 0 >, <4 x i32> < i32 65537, i32 0, i32 0, i32 0 >, <4 x i32> zeroinitializer)
- %tmp805 = add <4 x i32> %tmp777, zeroinitializer
- %tmp832 = bitcast <4 x i32> %tmp805 to <8 x i16>
- %tmp838 = tail call <8 x i16> @llvm.x86.sse2.psrl.w( <8 x i16> %tmp832, <8 x i16> < i16 8, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef > )
- %tmp1020 = tail call <16 x i8> @llvm.x86.sse2.packuswb.128( <8 x i16> zeroinitializer, <8 x i16> %tmp838 )
- %tmp1030 = bitcast <16 x i8> %tmp1020 to <4 x i32>
- %tmp1033 = add <4 x i32> zeroinitializer, %tmp1030
- %tmp1048 = bitcast <4 x i32> %tmp1033 to <2 x i64>
- %tmp1049 = or <2 x i64> %tmp1048, zeroinitializer
- store <2 x i64> %tmp1049, <2 x i64>* null
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-04-X86-64-CtorAsmBugs.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-06-04-X86-64-CtorAsmBugs.ll
deleted file mode 100644
index 321e116..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-04-X86-64-CtorAsmBugs.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | not grep GOTPCREL
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep ".align.*3"
-
- %struct.A = type { [1024 x i8] }
- at _ZN1A1aE = global %struct.A zeroinitializer, align 32 ; <%struct.A*> [#uses=1]
- at llvm.global_ctors = appending global [1 x { i32, void ()* }] [ { i32, void ()* } { i32 65535, void ()* @_GLOBAL__I__ZN1A1aE } ] ; <[1 x { i32, void ()* }]*> [#uses=0]
-
-define internal void @_GLOBAL__I__ZN1A1aE() section "__TEXT,__StaticInit,regular,pure_instructions" {
-entry:
- br label %bb.i
-
-bb.i: ; preds = %bb.i, %entry
- %i.1.i1.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb.i ] ; <i32> [#uses=2]
- %tmp1012.i = sext i32 %i.1.i1.0 to i64 ; <i64> [#uses=1]
- %tmp13.i = getelementptr %struct.A* @_ZN1A1aE, i32 0, i32 0, i64 %tmp1012.i ; <i8*> [#uses=1]
- store i8 0, i8* %tmp13.i
- %indvar.next = add i32 %i.1.i1.0, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, 1024 ; <i1> [#uses=1]
- br i1 %exitcond, label %_Z41__static_initialization_and_destruction_0ii.exit, label %bb.i
-
-_Z41__static_initialization_and_destruction_0ii.exit: ; preds = %bb.i
- ret void
-}
-
-define i32 @main(i32 %argc, i8** %argv) {
-entry:
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-04-tailmerge4.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-06-04-tailmerge4.ll
deleted file mode 100644
index baf2377..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-04-tailmerge4.ll
+++ /dev/null
@@ -1,454 +0,0 @@
-; RUN: llc < %s -enable-eh -asm-verbose | grep invcont131
-; PR 1496: tail merge was incorrectly removing this block
-
-; ModuleID = 'report.1.bc'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-pc-linux-gnu"
- %struct.ALLOC = type { %struct.string___XUB, [2 x i8] }
- %struct.RETURN = type { i32, i32, i32, i64 }
- %struct.ada__streams__root_stream_type = type { %struct.ada__tags__dispatch_table* }
- %struct.ada__tags__dispatch_table = type { [1 x i8*] }
- %struct.ada__text_io__text_afcb = type { %struct.system__file_control_block__afcb, i32, i32, i32, i32, i32, %struct.ada__text_io__text_afcb*, i8, i8 }
- %struct.string___XUB = type { i32, i32 }
- %struct.string___XUP = type { i8*, %struct.string___XUB* }
- %struct.system__file_control_block__afcb = type { %struct.ada__streams__root_stream_type, i32, %struct.string___XUP, i32, %struct.string___XUP, i8, i8, i8, i8, i8, i8, i8, %struct.system__file_control_block__afcb*, %struct.system__file_control_block__afcb* }
- %struct.system__secondary_stack__mark_id = type { i8*, i32 }
- %struct.wide_string___XUP = type { i16*, %struct.string___XUB* }
- at report_E = global i8 0 ; <i8*> [#uses=0]
- at report__test_status = internal global i8 1 ; <i8*> [#uses=8]
- at report__test_name = internal global [15 x i8] zeroinitializer ; <[15 x i8]*> [#uses=10]
- at report__test_name_len = internal global i32 0 ; <i32*> [#uses=15]
- at .str = internal constant [12 x i8] c"report.adb\00\00" ; <[12 x i8]*> [#uses=1]
- at C.26.599 = internal constant %struct.string___XUB { i32 1, i32 1 } ; <%struct.string___XUB*> [#uses=1]
- at .str1 = internal constant [1 x i8] c":" ; <[1 x i8]*> [#uses=1]
- at .str2 = internal constant [1 x i8] c" " ; <[1 x i8]*> [#uses=1]
- at .str3 = internal constant [1 x i8] c"-" ; <[1 x i8]*> [#uses=1]
- at .str5 = internal constant [10 x i8] c"0123456789" ; <[10 x i8]*> [#uses=12]
- at C.59.855 = internal constant %struct.string___XUB { i32 1, i32 0 } ; <%struct.string___XUB*> [#uses=1]
- at C.69.876 = internal constant %struct.string___XUB { i32 1, i32 3 } ; <%struct.string___XUB*> [#uses=1]
- at C.70.879 = internal constant %struct.string___XUB { i32 1, i32 6 } ; <%struct.string___XUB*> [#uses=1]
- at C.81.900 = internal constant %struct.string___XUB { i32 1, i32 5 } ; <%struct.string___XUB*> [#uses=1]
- at .str6 = internal constant [0 x i8] zeroinitializer ; <[0 x i8]*> [#uses=1]
- at .str7 = internal constant [3 x i8] c"2.5" ; <[3 x i8]*> [#uses=1]
- at .str8 = internal constant [6 x i8] c"ACATS " ; <[6 x i8]*> [#uses=1]
- at .str9 = internal constant [5 x i8] c",.,. " ; <[5 x i8]*> [#uses=1]
- at .str10 = internal constant [1 x i8] c"." ; <[1 x i8]*> [#uses=1]
- at .str11 = internal constant [5 x i8] c"---- " ; <[5 x i8]*> [#uses=1]
- at .str12 = internal constant [5 x i8] c" - " ; <[5 x i8]*> [#uses=1]
- at .str13 = internal constant [5 x i8] c" * " ; <[5 x i8]*> [#uses=1]
- at .str14 = internal constant [5 x i8] c" + " ; <[5 x i8]*> [#uses=1]
- at .str15 = internal constant [5 x i8] c" ! " ; <[5 x i8]*> [#uses=1]
- at C.209.1380 = internal constant %struct.string___XUB { i32 1, i32 37 } ; <%struct.string___XUB*> [#uses=1]
- at .str16 = internal constant [37 x i8] c" PASSED ============================." ; <[37 x i8]*> [#uses=1]
- at .str17 = internal constant [5 x i8] c"==== " ; <[5 x i8]*> [#uses=1]
- at .str18 = internal constant [37 x i8] c" NOT-APPLICABLE ++++++++++++++++++++." ; <[37 x i8]*> [#uses=1]
- at .str19 = internal constant [5 x i8] c"++++ " ; <[5 x i8]*> [#uses=1]
- at .str20 = internal constant [37 x i8] c" TENTATIVELY PASSED !!!!!!!!!!!!!!!!." ; <[37 x i8]*> [#uses=1]
- at .str21 = internal constant [5 x i8] c"!!!! " ; <[5 x i8]*> [#uses=1]
- at .str22 = internal constant [37 x i8] c" SEE '!' COMMENTS FOR SPECIAL NOTES!!" ; <[37 x i8]*> [#uses=1]
- at .str23 = internal constant [37 x i8] c" FAILED ****************************." ; <[37 x i8]*> [#uses=1]
- at .str24 = internal constant [5 x i8] c"**** " ; <[5 x i8]*> [#uses=1]
- at __gnat_others_value = external constant i32 ; <i32*> [#uses=2]
- at system__soft_links__abort_undefer = external global void ()* ; <void ()**> [#uses=1]
- at C.320.1854 = internal constant %struct.string___XUB { i32 2, i32 6 } ; <%struct.string___XUB*> [#uses=1]
-
-declare void @report__put_msg(i64 %msg.0.0)
-
-declare void @__gnat_rcheck_05(i8*, i32)
-
-declare void @__gnat_rcheck_12(i8*, i32)
-
-declare %struct.ada__text_io__text_afcb* @ada__text_io__standard_output()
-
-declare void @ada__text_io__set_col(%struct.ada__text_io__text_afcb*, i32)
-
-declare void @ada__text_io__put_line(%struct.ada__text_io__text_afcb*, i64)
-
-declare void @report__time_stamp(%struct.string___XUP* sret %agg.result)
-
-declare i64 @ada__calendar__clock()
-
-declare void @ada__calendar__split(%struct.RETURN* sret , i64)
-
-declare void @system__string_ops_concat_5__str_concat_5(%struct.string___XUP* sret , i64, i64, i64, i64, i64)
-
-declare void @system__string_ops_concat_3__str_concat_3(%struct.string___XUP* sret , i64, i64, i64)
-
-declare i8* @system__secondary_stack__ss_allocate(i32)
-
-declare void @report__test(i64 %name.0.0, i64 %descr.0.0)
-
-declare void @system__secondary_stack__ss_mark(%struct.system__secondary_stack__mark_id* sret )
-
-declare i8* @llvm.eh.exception()
-
-declare i32 @llvm.eh.selector(i8*, i8*, ...)
-
-declare i32 @llvm.eh.typeid.for(i8*)
-
-declare i32 @__gnat_eh_personality(...)
-
-declare i32 @_Unwind_Resume(...)
-
-declare void @__gnat_rcheck_07(i8*, i32)
-
-declare void @system__secondary_stack__ss_release(i64)
-
-declare void @report__comment(i64 %descr.0.0)
-
-declare void @report__failed(i64 %descr.0.0)
-
-declare void @report__not_applicable(i64 %descr.0.0)
-
-declare void @report__special_action(i64 %descr.0.0)
-
-define void @report__result() {
-entry:
- %tmp = alloca %struct.system__secondary_stack__mark_id, align 8 ; <%struct.system__secondary_stack__mark_id*> [#uses=3]
- %A.210 = alloca %struct.string___XUB, align 8 ; <%struct.string___XUB*> [#uses=3]
- %tmp5 = alloca %struct.string___XUP, align 8 ; <%struct.string___XUP*> [#uses=3]
- %A.229 = alloca %struct.string___XUB, align 8 ; <%struct.string___XUB*> [#uses=3]
- %tmp10 = alloca %struct.string___XUP, align 8 ; <%struct.string___XUP*> [#uses=3]
- %A.248 = alloca %struct.string___XUB, align 8 ; <%struct.string___XUB*> [#uses=3]
- %tmp15 = alloca %struct.string___XUP, align 8 ; <%struct.string___XUP*> [#uses=3]
- %A.270 = alloca %struct.string___XUB, align 8 ; <%struct.string___XUB*> [#uses=3]
- %tmp20 = alloca %struct.string___XUP, align 8 ; <%struct.string___XUP*> [#uses=3]
- %A.284 = alloca %struct.string___XUB, align 8 ; <%struct.string___XUB*> [#uses=3]
- %tmp25 = alloca %struct.string___XUP, align 8 ; <%struct.string___XUP*> [#uses=3]
- call void @system__secondary_stack__ss_mark( %struct.system__secondary_stack__mark_id* %tmp sret )
- %tmp28 = getelementptr %struct.system__secondary_stack__mark_id* %tmp, i32 0, i32 0 ; <i8**> [#uses=1]
- %tmp29 = load i8** %tmp28 ; <i8*> [#uses=2]
- %tmp31 = getelementptr %struct.system__secondary_stack__mark_id* %tmp, i32 0, i32 1 ; <i32*> [#uses=1]
- %tmp32 = load i32* %tmp31 ; <i32> [#uses=2]
- %tmp33 = load i8* @report__test_status ; <i8> [#uses=1]
- switch i8 %tmp33, label %bb483 [
- i8 0, label %bb
- i8 2, label %bb143
- i8 3, label %bb261
- ]
-
-bb: ; preds = %entry
- %tmp34 = load i32* @report__test_name_len ; <i32> [#uses=4]
- %tmp35 = icmp sgt i32 %tmp34, 0 ; <i1> [#uses=2]
- %tmp40 = icmp sgt i32 %tmp34, 15 ; <i1> [#uses=1]
- %bothcond139 = and i1 %tmp35, %tmp40 ; <i1> [#uses=1]
- br i1 %bothcond139, label %cond_true43, label %cond_next44
-
-cond_true43: ; preds = %bb
- invoke void @__gnat_rcheck_12( i8* getelementptr ([12 x i8]* @.str, i32 0, i32 0), i32 212 )
- to label %UnifiedUnreachableBlock unwind label %unwind
-
-unwind: ; preds = %invcont589, %cond_next567, %bb555, %cond_true497, %invcont249, %cond_next227, %bb215, %cond_true157, %invcont131, %cond_next109, %bb97, %cond_true43
- %eh_ptr = call i8* @llvm.eh.exception( ) ; <i8*> [#uses=1]
- br label %cleanup717
-
-cond_next44: ; preds = %bb
- %tmp72 = getelementptr %struct.string___XUB* %A.210, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp72
- %tmp73 = getelementptr %struct.string___XUB* %A.210, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 %tmp34, i32* %tmp73
- br i1 %tmp35, label %cond_true80, label %cond_next109
-
-cond_true80: ; preds = %cond_next44
- %tmp45.off = add i32 %tmp34, -1 ; <i32> [#uses=1]
- %bothcond = icmp ugt i32 %tmp45.off, 14 ; <i1> [#uses=1]
- br i1 %bothcond, label %bb97, label %cond_next109
-
-bb97: ; preds = %cond_true80
- invoke void @__gnat_rcheck_05( i8* getelementptr ([12 x i8]* @.str, i32 0, i32 0), i32 212 )
- to label %UnifiedUnreachableBlock unwind label %unwind
-
-cond_next109: ; preds = %cond_true80, %cond_next44
- %A.210128 = ptrtoint %struct.string___XUB* %A.210 to i32 ; <i32> [#uses=1]
- %A.210128129 = zext i32 %A.210128 to i64 ; <i64> [#uses=1]
- %A.210128129130 = shl i64 %A.210128129, 32 ; <i64> [#uses=1]
- %A.210128129130.ins = or i64 %A.210128129130, zext (i32 ptrtoint ([15 x i8]* @report__test_name to i32) to i64) ; <i64> [#uses=1]
- invoke void @system__string_ops_concat_3__str_concat_3( %struct.string___XUP* %tmp5 sret , i64 or (i64 zext (i32 ptrtoint ([5 x i8]* @.str17 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.81.900 to i32) to i64), i64 32)), i64 %A.210128129130.ins, i64 or (i64 zext (i32 ptrtoint ([37 x i8]* @.str16 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.209.1380 to i32) to i64), i64 32)) )
- to label %invcont131 unwind label %unwind
-
-invcont131: ; preds = %cond_next109
- %tmp133 = getelementptr %struct.string___XUP* %tmp5, i32 0, i32 0 ; <i8**> [#uses=1]
- %tmp134 = load i8** %tmp133 ; <i8*> [#uses=1]
- %tmp134120 = ptrtoint i8* %tmp134 to i32 ; <i32> [#uses=1]
- %tmp134120121 = zext i32 %tmp134120 to i64 ; <i64> [#uses=1]
- %tmp136 = getelementptr %struct.string___XUP* %tmp5, i32 0, i32 1 ; <%struct.string___XUB**> [#uses=1]
- %tmp137 = load %struct.string___XUB** %tmp136 ; <%struct.string___XUB*> [#uses=1]
- %tmp137116 = ptrtoint %struct.string___XUB* %tmp137 to i32 ; <i32> [#uses=1]
- %tmp137116117 = zext i32 %tmp137116 to i64 ; <i64> [#uses=1]
- %tmp137116117118 = shl i64 %tmp137116117, 32 ; <i64> [#uses=1]
- %tmp137116117118.ins = or i64 %tmp137116117118, %tmp134120121 ; <i64> [#uses=1]
- invoke fastcc void @report__put_msg( i64 %tmp137116117118.ins )
- to label %cond_next618 unwind label %unwind
-
-bb143: ; preds = %entry
- %tmp144 = load i32* @report__test_name_len ; <i32> [#uses=4]
- %tmp147 = icmp sgt i32 %tmp144, 0 ; <i1> [#uses=2]
- %tmp154 = icmp sgt i32 %tmp144, 15 ; <i1> [#uses=1]
- %bothcond140 = and i1 %tmp147, %tmp154 ; <i1> [#uses=1]
- br i1 %bothcond140, label %cond_true157, label %cond_next160
-
-cond_true157: ; preds = %bb143
- invoke void @__gnat_rcheck_12( i8* getelementptr ([12 x i8]* @.str, i32 0, i32 0), i32 215 )
- to label %UnifiedUnreachableBlock unwind label %unwind
-
-cond_next160: ; preds = %bb143
- %tmp189 = getelementptr %struct.string___XUB* %A.229, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp189
- %tmp190 = getelementptr %struct.string___XUB* %A.229, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 %tmp144, i32* %tmp190
- br i1 %tmp147, label %cond_true197, label %cond_next227
-
-cond_true197: ; preds = %cond_next160
- %tmp161.off = add i32 %tmp144, -1 ; <i32> [#uses=1]
- %bothcond1 = icmp ugt i32 %tmp161.off, 14 ; <i1> [#uses=1]
- br i1 %bothcond1, label %bb215, label %cond_next227
-
-bb215: ; preds = %cond_true197
- invoke void @__gnat_rcheck_05( i8* getelementptr ([12 x i8]* @.str, i32 0, i32 0), i32 215 )
- to label %UnifiedUnreachableBlock unwind label %unwind
-
-cond_next227: ; preds = %cond_true197, %cond_next160
- %A.229105 = ptrtoint %struct.string___XUB* %A.229 to i32 ; <i32> [#uses=1]
- %A.229105106 = zext i32 %A.229105 to i64 ; <i64> [#uses=1]
- %A.229105106107 = shl i64 %A.229105106, 32 ; <i64> [#uses=1]
- %A.229105106107.ins = or i64 %A.229105106107, zext (i32 ptrtoint ([15 x i8]* @report__test_name to i32) to i64) ; <i64> [#uses=1]
- invoke void @system__string_ops_concat_3__str_concat_3( %struct.string___XUP* %tmp10 sret , i64 or (i64 zext (i32 ptrtoint ([5 x i8]* @.str19 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.81.900 to i32) to i64), i64 32)), i64 %A.229105106107.ins, i64 or (i64 zext (i32 ptrtoint ([37 x i8]* @.str18 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.209.1380 to i32) to i64), i64 32)) )
- to label %invcont249 unwind label %unwind
-
-invcont249: ; preds = %cond_next227
- %tmp251 = getelementptr %struct.string___XUP* %tmp10, i32 0, i32 0 ; <i8**> [#uses=1]
- %tmp252 = load i8** %tmp251 ; <i8*> [#uses=1]
- %tmp25297 = ptrtoint i8* %tmp252 to i32 ; <i32> [#uses=1]
- %tmp2529798 = zext i32 %tmp25297 to i64 ; <i64> [#uses=1]
- %tmp254 = getelementptr %struct.string___XUP* %tmp10, i32 0, i32 1 ; <%struct.string___XUB**> [#uses=1]
- %tmp255 = load %struct.string___XUB** %tmp254 ; <%struct.string___XUB*> [#uses=1]
- %tmp25593 = ptrtoint %struct.string___XUB* %tmp255 to i32 ; <i32> [#uses=1]
- %tmp2559394 = zext i32 %tmp25593 to i64 ; <i64> [#uses=1]
- %tmp255939495 = shl i64 %tmp2559394, 32 ; <i64> [#uses=1]
- %tmp255939495.ins = or i64 %tmp255939495, %tmp2529798 ; <i64> [#uses=1]
- invoke fastcc void @report__put_msg( i64 %tmp255939495.ins )
- to label %cond_next618 unwind label %unwind
-
-bb261: ; preds = %entry
- %tmp262 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=2]
- %tmp263 = load i32* @report__test_name_len ; <i32> [#uses=4]
- %tmp266 = icmp sgt i32 %tmp263, 0 ; <i1> [#uses=2]
- %tmp273 = icmp sgt i32 %tmp263, 15 ; <i1> [#uses=1]
- %bothcond141 = and i1 %tmp266, %tmp273 ; <i1> [#uses=1]
- br i1 %bothcond141, label %cond_true276, label %cond_next281
-
-cond_true276: ; preds = %bb261
- invoke void @__gnat_rcheck_12( i8* getelementptr ([12 x i8]* @.str, i32 0, i32 0), i32 218 )
- to label %UnifiedUnreachableBlock unwind label %unwind277
-
-unwind277: ; preds = %invcont467, %cond_next442, %invcont370, %cond_next348, %bb336, %cond_true276
- %eh_ptr278 = call i8* @llvm.eh.exception( ) ; <i8*> [#uses=1]
- call void @llvm.stackrestore( i8* %tmp262 )
- br label %cleanup717
-
-cond_next281: ; preds = %bb261
- %tmp310 = getelementptr %struct.string___XUB* %A.248, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp310
- %tmp311 = getelementptr %struct.string___XUB* %A.248, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 %tmp263, i32* %tmp311
- br i1 %tmp266, label %cond_true318, label %cond_next348
-
-cond_true318: ; preds = %cond_next281
- %tmp282.off = add i32 %tmp263, -1 ; <i32> [#uses=1]
- %bothcond2 = icmp ugt i32 %tmp282.off, 14 ; <i1> [#uses=1]
- br i1 %bothcond2, label %bb336, label %cond_next348
-
-bb336: ; preds = %cond_true318
- invoke void @__gnat_rcheck_05( i8* getelementptr ([12 x i8]* @.str, i32 0, i32 0), i32 218 )
- to label %UnifiedUnreachableBlock unwind label %unwind277
-
-cond_next348: ; preds = %cond_true318, %cond_next281
- %A.24882 = ptrtoint %struct.string___XUB* %A.248 to i32 ; <i32> [#uses=1]
- %A.2488283 = zext i32 %A.24882 to i64 ; <i64> [#uses=1]
- %A.248828384 = shl i64 %A.2488283, 32 ; <i64> [#uses=1]
- %A.248828384.ins = or i64 %A.248828384, zext (i32 ptrtoint ([15 x i8]* @report__test_name to i32) to i64) ; <i64> [#uses=1]
- invoke void @system__string_ops_concat_3__str_concat_3( %struct.string___XUP* %tmp15 sret , i64 or (i64 zext (i32 ptrtoint ([5 x i8]* @.str21 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.81.900 to i32) to i64), i64 32)), i64 %A.248828384.ins, i64 or (i64 zext (i32 ptrtoint ([37 x i8]* @.str20 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.209.1380 to i32) to i64), i64 32)) )
- to label %invcont370 unwind label %unwind277
-
-invcont370: ; preds = %cond_next348
- %tmp372 = getelementptr %struct.string___XUP* %tmp15, i32 0, i32 0 ; <i8**> [#uses=1]
- %tmp373 = load i8** %tmp372 ; <i8*> [#uses=1]
- %tmp37374 = ptrtoint i8* %tmp373 to i32 ; <i32> [#uses=1]
- %tmp3737475 = zext i32 %tmp37374 to i64 ; <i64> [#uses=1]
- %tmp375 = getelementptr %struct.string___XUP* %tmp15, i32 0, i32 1 ; <%struct.string___XUB**> [#uses=1]
- %tmp376 = load %struct.string___XUB** %tmp375 ; <%struct.string___XUB*> [#uses=1]
- %tmp37670 = ptrtoint %struct.string___XUB* %tmp376 to i32 ; <i32> [#uses=1]
- %tmp3767071 = zext i32 %tmp37670 to i64 ; <i64> [#uses=1]
- %tmp376707172 = shl i64 %tmp3767071, 32 ; <i64> [#uses=1]
- %tmp376707172.ins = or i64 %tmp376707172, %tmp3737475 ; <i64> [#uses=1]
- invoke fastcc void @report__put_msg( i64 %tmp376707172.ins )
- to label %invcont381 unwind label %unwind277
-
-invcont381: ; preds = %invcont370
- %tmp382 = load i32* @report__test_name_len ; <i32> [#uses=6]
- %tmp415 = icmp sgt i32 %tmp382, -1 ; <i1> [#uses=1]
- %max416 = select i1 %tmp415, i32 %tmp382, i32 0 ; <i32> [#uses=1]
- %tmp417 = alloca i8, i32 %max416 ; <i8*> [#uses=3]
- %tmp423 = icmp sgt i32 %tmp382, 0 ; <i1> [#uses=1]
- br i1 %tmp423, label %bb427, label %cond_next442
-
-bb427: ; preds = %invcont381
- store i8 32, i8* %tmp417
- %tmp434 = icmp eq i32 %tmp382, 1 ; <i1> [#uses=1]
- br i1 %tmp434, label %cond_next442, label %cond_next438.preheader
-
-cond_next438.preheader: ; preds = %bb427
- %tmp. = add i32 %tmp382, -1 ; <i32> [#uses=1]
- br label %cond_next438
-
-cond_next438: ; preds = %cond_next438, %cond_next438.preheader
- %indvar = phi i32 [ 0, %cond_next438.preheader ], [ %J130b.513.5, %cond_next438 ] ; <i32> [#uses=1]
- %J130b.513.5 = add i32 %indvar, 1 ; <i32> [#uses=3]
- %tmp43118 = getelementptr i8* %tmp417, i32 %J130b.513.5 ; <i8*> [#uses=1]
- store i8 32, i8* %tmp43118
- %exitcond = icmp eq i32 %J130b.513.5, %tmp. ; <i1> [#uses=1]
- br i1 %exitcond, label %cond_next442, label %cond_next438
-
-cond_next442: ; preds = %cond_next438, %bb427, %invcont381
- %tmp448 = getelementptr %struct.string___XUB* %A.270, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp448
- %tmp449 = getelementptr %struct.string___XUB* %A.270, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 %tmp382, i32* %tmp449
- %tmp41762 = ptrtoint i8* %tmp417 to i32 ; <i32> [#uses=1]
- %tmp4176263 = zext i32 %tmp41762 to i64 ; <i64> [#uses=1]
- %A.27058 = ptrtoint %struct.string___XUB* %A.270 to i32 ; <i32> [#uses=1]
- %A.2705859 = zext i32 %A.27058 to i64 ; <i64> [#uses=1]
- %A.270585960 = shl i64 %A.2705859, 32 ; <i64> [#uses=1]
- %A.270585960.ins = or i64 %tmp4176263, %A.270585960 ; <i64> [#uses=1]
- invoke void @system__string_ops_concat_3__str_concat_3( %struct.string___XUP* %tmp20 sret , i64 or (i64 zext (i32 ptrtoint ([5 x i8]* @.str21 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.81.900 to i32) to i64), i64 32)), i64 %A.270585960.ins, i64 or (i64 zext (i32 ptrtoint ([37 x i8]* @.str22 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.209.1380 to i32) to i64), i64 32)) )
- to label %invcont467 unwind label %unwind277
-
-invcont467: ; preds = %cond_next442
- %tmp469 = getelementptr %struct.string___XUP* %tmp20, i32 0, i32 0 ; <i8**> [#uses=1]
- %tmp470 = load i8** %tmp469 ; <i8*> [#uses=1]
- %tmp47050 = ptrtoint i8* %tmp470 to i32 ; <i32> [#uses=1]
- %tmp4705051 = zext i32 %tmp47050 to i64 ; <i64> [#uses=1]
- %tmp472 = getelementptr %struct.string___XUP* %tmp20, i32 0, i32 1 ; <%struct.string___XUB**> [#uses=1]
- %tmp473 = load %struct.string___XUB** %tmp472 ; <%struct.string___XUB*> [#uses=1]
- %tmp47346 = ptrtoint %struct.string___XUB* %tmp473 to i32 ; <i32> [#uses=1]
- %tmp4734647 = zext i32 %tmp47346 to i64 ; <i64> [#uses=1]
- %tmp473464748 = shl i64 %tmp4734647, 32 ; <i64> [#uses=1]
- %tmp473464748.ins = or i64 %tmp473464748, %tmp4705051 ; <i64> [#uses=1]
- invoke fastcc void @report__put_msg( i64 %tmp473464748.ins )
- to label %cleanup unwind label %unwind277
-
-cleanup: ; preds = %invcont467
- call void @llvm.stackrestore( i8* %tmp262 )
- br label %cond_next618
-
-bb483: ; preds = %entry
- %tmp484 = load i32* @report__test_name_len ; <i32> [#uses=4]
- %tmp487 = icmp sgt i32 %tmp484, 0 ; <i1> [#uses=2]
- %tmp494 = icmp sgt i32 %tmp484, 15 ; <i1> [#uses=1]
- %bothcond142 = and i1 %tmp487, %tmp494 ; <i1> [#uses=1]
- br i1 %bothcond142, label %cond_true497, label %cond_next500
-
-cond_true497: ; preds = %bb483
- invoke void @__gnat_rcheck_12( i8* getelementptr ([12 x i8]* @.str, i32 0, i32 0), i32 223 )
- to label %UnifiedUnreachableBlock unwind label %unwind
-
-cond_next500: ; preds = %bb483
- %tmp529 = getelementptr %struct.string___XUB* %A.284, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp529
- %tmp530 = getelementptr %struct.string___XUB* %A.284, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 %tmp484, i32* %tmp530
- br i1 %tmp487, label %cond_true537, label %cond_next567
-
-cond_true537: ; preds = %cond_next500
- %tmp501.off = add i32 %tmp484, -1 ; <i32> [#uses=1]
- %bothcond3 = icmp ugt i32 %tmp501.off, 14 ; <i1> [#uses=1]
- br i1 %bothcond3, label %bb555, label %cond_next567
-
-bb555: ; preds = %cond_true537
- invoke void @__gnat_rcheck_05( i8* getelementptr ([12 x i8]* @.str, i32 0, i32 0), i32 223 )
- to label %UnifiedUnreachableBlock unwind label %unwind
-
-cond_next567: ; preds = %cond_true537, %cond_next500
- %A.28435 = ptrtoint %struct.string___XUB* %A.284 to i32 ; <i32> [#uses=1]
- %A.2843536 = zext i32 %A.28435 to i64 ; <i64> [#uses=1]
- %A.284353637 = shl i64 %A.2843536, 32 ; <i64> [#uses=1]
- %A.284353637.ins = or i64 %A.284353637, zext (i32 ptrtoint ([15 x i8]* @report__test_name to i32) to i64) ; <i64> [#uses=1]
- invoke void @system__string_ops_concat_3__str_concat_3( %struct.string___XUP* %tmp25 sret , i64 or (i64 zext (i32 ptrtoint ([5 x i8]* @.str24 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.81.900 to i32) to i64), i64 32)), i64 %A.284353637.ins, i64 or (i64 zext (i32 ptrtoint ([37 x i8]* @.str23 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.string___XUB* @C.209.1380 to i32) to i64), i64 32)) )
- to label %invcont589 unwind label %unwind
-
-invcont589: ; preds = %cond_next567
- %tmp591 = getelementptr %struct.string___XUP* %tmp25, i32 0, i32 0 ; <i8**> [#uses=1]
- %tmp592 = load i8** %tmp591 ; <i8*> [#uses=1]
- %tmp59228 = ptrtoint i8* %tmp592 to i32 ; <i32> [#uses=1]
- %tmp5922829 = zext i32 %tmp59228 to i64 ; <i64> [#uses=1]
- %tmp594 = getelementptr %struct.string___XUP* %tmp25, i32 0, i32 1 ; <%struct.string___XUB**> [#uses=1]
- %tmp595 = load %struct.string___XUB** %tmp594 ; <%struct.string___XUB*> [#uses=1]
- %tmp59524 = ptrtoint %struct.string___XUB* %tmp595 to i32 ; <i32> [#uses=1]
- %tmp5952425 = zext i32 %tmp59524 to i64 ; <i64> [#uses=1]
- %tmp595242526 = shl i64 %tmp5952425, 32 ; <i64> [#uses=1]
- %tmp595242526.ins = or i64 %tmp595242526, %tmp5922829 ; <i64> [#uses=1]
- invoke fastcc void @report__put_msg( i64 %tmp595242526.ins )
- to label %cond_next618 unwind label %unwind
-
-cond_next618: ; preds = %invcont589, %cleanup, %invcont249, %invcont131
- store i8 1, i8* @report__test_status
- store i32 7, i32* @report__test_name_len
- store i8 78, i8* getelementptr ([15 x i8]* @report__test_name, i32 0, i32 0)
- store i8 79, i8* getelementptr ([15 x i8]* @report__test_name, i32 0, i32 1)
- store i8 95, i8* getelementptr ([15 x i8]* @report__test_name, i32 0, i32 2)
- store i8 78, i8* getelementptr ([15 x i8]* @report__test_name, i32 0, i32 3)
- store i8 65, i8* getelementptr ([15 x i8]* @report__test_name, i32 0, i32 4)
- store i8 77, i8* getelementptr ([15 x i8]* @report__test_name, i32 0, i32 5)
- store i8 69, i8* getelementptr ([15 x i8]* @report__test_name, i32 0, i32 6)
- %CHAIN.310.0.0.0.val5.i = ptrtoint i8* %tmp29 to i32 ; <i32> [#uses=1]
- %CHAIN.310.0.0.0.val56.i = zext i32 %CHAIN.310.0.0.0.val5.i to i64 ; <i64> [#uses=1]
- %CHAIN.310.0.0.1.val2.i = zext i32 %tmp32 to i64 ; <i64> [#uses=1]
- %CHAIN.310.0.0.1.val23.i = shl i64 %CHAIN.310.0.0.1.val2.i, 32 ; <i64> [#uses=1]
- %CHAIN.310.0.0.1.val23.ins.i = or i64 %CHAIN.310.0.0.1.val23.i, %CHAIN.310.0.0.0.val56.i ; <i64> [#uses=1]
- call void @system__secondary_stack__ss_release( i64 %CHAIN.310.0.0.1.val23.ins.i )
- ret void
-
-cleanup717: ; preds = %unwind277, %unwind
- %eh_exception.0 = phi i8* [ %eh_ptr278, %unwind277 ], [ %eh_ptr, %unwind ] ; <i8*> [#uses=1]
- %CHAIN.310.0.0.0.val5.i8 = ptrtoint i8* %tmp29 to i32 ; <i32> [#uses=1]
- %CHAIN.310.0.0.0.val56.i9 = zext i32 %CHAIN.310.0.0.0.val5.i8 to i64 ; <i64> [#uses=1]
- %CHAIN.310.0.0.1.val2.i10 = zext i32 %tmp32 to i64 ; <i64> [#uses=1]
- %CHAIN.310.0.0.1.val23.i11 = shl i64 %CHAIN.310.0.0.1.val2.i10, 32 ; <i64> [#uses=1]
- %CHAIN.310.0.0.1.val23.ins.i12 = or i64 %CHAIN.310.0.0.1.val23.i11, %CHAIN.310.0.0.0.val56.i9 ; <i64> [#uses=1]
- call void @system__secondary_stack__ss_release( i64 %CHAIN.310.0.0.1.val23.ins.i12 )
- call i32 (...)* @_Unwind_Resume( i8* %eh_exception.0 ) ; <i32>:0 [#uses=0]
- unreachable
-
-UnifiedUnreachableBlock: ; preds = %bb555, %cond_true497, %bb336, %cond_true276, %bb215, %cond_true157, %bb97, %cond_true43
- unreachable
-}
-
-declare i8* @llvm.stacksave()
-
-declare void @llvm.stackrestore(i8*)
-
-declare i32 @report__ident_int(i32 %x)
-
-declare i8 @report__equal(i32 %x, i32 %y)
-
-declare i8 @report__ident_char(i8 zeroext %x)
-
-declare i16 @report__ident_wide_char(i16 zeroext %x)
-
-declare i8 @report__ident_bool(i8 %x)
-
-declare void @report__ident_str(%struct.string___XUP* sret %agg.result, i64 %x.0.0)
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-declare void @report__ident_wide_str(%struct.wide_string___XUP* sret %agg.result, i64 %x.0.0)
-
-declare void @__gnat_begin_handler(i8*)
-
-declare void @__gnat_end_handler(i8*)
-
-declare void @report__legal_file_name(%struct.string___XUP* sret %agg.result, i32 %x, i64 %nam.0.0)
-
-declare void @__gnat_rcheck_06(i8*, i32)
-
-declare void @system__string_ops__str_concat_cs(%struct.string___XUP* sret , i8 zeroext , i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-05-LSR-Dominator.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-06-05-LSR-Dominator.ll
deleted file mode 100644
index 36a97ef..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-05-LSR-Dominator.ll
+++ /dev/null
@@ -1,129 +0,0 @@
-; PR1495
-; RUN: llc < %s -march=x86
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-pc-linux-gnu"
- %struct.AVRational = type { i32, i32 }
- %struct.FFTComplex = type { float, float }
- %struct.FFTContext = type { i32, i32, i16*, %struct.FFTComplex*, %struct.FFTComplex*, void (%struct.FFTContext*, %struct.FFTComplex*)*, void (%struct.MDCTContext*, float*, float*, float*)* }
- %struct.MDCTContext = type { i32, i32, float*, float*, %struct.FFTContext }
- %struct.Minima = type { i32, i32, i32, i32 }
- %struct.codebook_t = type { i32, i8*, i32*, i32, float, float, i32, i32, i32*, float*, float* }
- %struct.floor_class_t = type { i32, i32, i32, i32* }
- %struct.floor_t = type { i32, i32*, i32, %struct.floor_class_t*, i32, i32, i32, %struct.Minima* }
- %struct.mapping_t = type { i32, i32*, i32*, i32*, i32, i32*, i32* }
- %struct.residue_t = type { i32, i32, i32, i32, i32, i32, [8 x i8]*, [2 x float]* }
- %struct.venc_context_t = type { i32, i32, [2 x i32], [2 x %struct.MDCTContext], [2 x float*], i32, float*, float*, float*, float*, float, i32, %struct.codebook_t*, i32, %struct.floor_t*, i32, %struct.residue_t*, i32, %struct.mapping_t*, i32, %struct.AVRational* }
-
-define fastcc i32 @put_main_header(%struct.venc_context_t* %venc, i8** %out) {
-entry:
- br i1 false, label %bb1820, label %bb288.bb148_crit_edge
-
-bb288.bb148_crit_edge: ; preds = %entry
- ret i32 0
-
-cond_next1712: ; preds = %bb1820.bb1680_crit_edge
- ret i32 0
-
-bb1817: ; preds = %bb1820.bb1680_crit_edge
- br label %bb1820
-
-bb1820: ; preds = %bb1817, %entry
- %pb.1.50 = phi i32 [ %tmp1693, %bb1817 ], [ 8, %entry ] ; <i32> [#uses=3]
- br i1 false, label %bb2093, label %bb1820.bb1680_crit_edge
-
-bb1820.bb1680_crit_edge: ; preds = %bb1820
- %tmp1693 = add i32 %pb.1.50, 8 ; <i32> [#uses=2]
- %tmp1702 = icmp slt i32 %tmp1693, 0 ; <i1> [#uses=1]
- br i1 %tmp1702, label %cond_next1712, label %bb1817
-
-bb2093: ; preds = %bb1820
- %tmp2102 = add i32 %pb.1.50, 65 ; <i32> [#uses=0]
- %tmp2236 = add i32 %pb.1.50, 72 ; <i32> [#uses=1]
- %tmp2237 = sdiv i32 %tmp2236, 8 ; <i32> [#uses=2]
- br i1 false, label %bb2543, label %bb2536.bb2396_crit_edge
-
-bb2536.bb2396_crit_edge: ; preds = %bb2093
- ret i32 0
-
-bb2543: ; preds = %bb2093
- br i1 false, label %cond_next2576, label %bb2690
-
-cond_next2576: ; preds = %bb2543
- ret i32 0
-
-bb2682: ; preds = %bb2690
- ret i32 0
-
-bb2690: ; preds = %bb2543
- br i1 false, label %bb2682, label %bb2698
-
-bb2698: ; preds = %bb2690
- br i1 false, label %cond_next2726, label %bb2831
-
-cond_next2726: ; preds = %bb2698
- ret i32 0
-
-bb2831: ; preds = %bb2698
- br i1 false, label %cond_next2859, label %bb2964
-
-cond_next2859: ; preds = %bb2831
- br i1 false, label %bb2943, label %cond_true2866
-
-cond_true2866: ; preds = %cond_next2859
- br i1 false, label %cond_true2874, label %cond_false2897
-
-cond_true2874: ; preds = %cond_true2866
- ret i32 0
-
-cond_false2897: ; preds = %cond_true2866
- ret i32 0
-
-bb2943: ; preds = %cond_next2859
- ret i32 0
-
-bb2964: ; preds = %bb2831
- br i1 false, label %cond_next2997, label %bb4589
-
-cond_next2997: ; preds = %bb2964
- ret i32 0
-
-bb3103: ; preds = %bb4589
- ret i32 0
-
-bb4589: ; preds = %bb2964
- br i1 false, label %bb3103, label %bb4597
-
-bb4597: ; preds = %bb4589
- br i1 false, label %cond_next4630, label %bb4744
-
-cond_next4630: ; preds = %bb4597
- br i1 false, label %bb4744, label %cond_true4724
-
-cond_true4724: ; preds = %cond_next4630
- br i1 false, label %bb4736, label %bb7531
-
-bb4736: ; preds = %cond_true4724
- ret i32 0
-
-bb4744: ; preds = %cond_next4630, %bb4597
- ret i32 0
-
-bb7531: ; preds = %cond_true4724
- %v_addr.023.0.i6 = add i32 %tmp2237, -255 ; <i32> [#uses=1]
- br label %bb.i14
-
-bb.i14: ; preds = %bb.i14, %bb7531
- %n.021.0.i8 = phi i32 [ 0, %bb7531 ], [ %indvar.next, %bb.i14 ] ; <i32> [#uses=2]
- %tmp..i9 = mul i32 %n.021.0.i8, -255 ; <i32> [#uses=1]
- %tmp5.i11 = add i32 %v_addr.023.0.i6, %tmp..i9 ; <i32> [#uses=1]
- %tmp10.i12 = icmp ugt i32 %tmp5.i11, 254 ; <i1> [#uses=1]
- %indvar.next = add i32 %n.021.0.i8, 1 ; <i32> [#uses=1]
- br i1 %tmp10.i12, label %bb.i14, label %bb12.loopexit.i18
-
-bb12.loopexit.i18: ; preds = %bb.i14
- call void @llvm.memcpy.i32( i8* null, i8* null, i32 %tmp2237, i32 1 )
- ret i32 0
-}
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-14-branchfold.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-06-14-branchfold.ll
deleted file mode 100644
index 2680b15..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-14-branchfold.ll
+++ /dev/null
@@ -1,133 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=i686 | not grep jmp
-; check that branch folding understands FP_REG_KILL is not a branch
-
-target triple = "i686-pc-linux-gnu"
- %struct.FRAME.c34003a = type { float, float }
- at report_E = global i8 0 ; <i8*> [#uses=0]
-
-define void @main() {
-entry:
- %FRAME.31 = alloca %struct.FRAME.c34003a, align 8 ; <%struct.FRAME.c34003a*> [#uses=4]
- %tmp20 = call i32 @report__ident_int( i32 -50 ) ; <i32> [#uses=1]
- %tmp2021 = sitofp i32 %tmp20 to float ; <float> [#uses=5]
- %tmp23 = fcmp ult float %tmp2021, 0xC7EFFFFFE0000000 ; <i1> [#uses=1]
- %tmp26 = fcmp ugt float %tmp2021, 0x47EFFFFFE0000000 ; <i1> [#uses=1]
- %bothcond = or i1 %tmp23, %tmp26 ; <i1> [#uses=1]
- br i1 %bothcond, label %bb, label %bb30
-
-bb: ; preds = %entry
- unwind
-
-bb30: ; preds = %entry
- %tmp35 = call i32 @report__ident_int( i32 50 ) ; <i32> [#uses=1]
- %tmp3536 = sitofp i32 %tmp35 to float ; <float> [#uses=4]
- %tmp38 = fcmp ult float %tmp3536, 0xC7EFFFFFE0000000 ; <i1> [#uses=1]
- %tmp44 = fcmp ugt float %tmp3536, 0x47EFFFFFE0000000 ; <i1> [#uses=1]
- %bothcond226 = or i1 %tmp38, %tmp44 ; <i1> [#uses=1]
- br i1 %bothcond226, label %bb47, label %bb49
-
-bb47: ; preds = %bb30
- unwind
-
-bb49: ; preds = %bb30
- %tmp60 = fcmp ult float %tmp3536, %tmp2021 ; <i1> [#uses=1]
- %tmp60.not = xor i1 %tmp60, true ; <i1> [#uses=1]
- %tmp65 = fcmp olt float %tmp2021, 0xC7EFFFFFE0000000 ; <i1> [#uses=1]
- %bothcond227 = and i1 %tmp65, %tmp60.not ; <i1> [#uses=1]
- br i1 %bothcond227, label %cond_true68, label %cond_next70
-
-cond_true68: ; preds = %bb49
- unwind
-
-cond_next70: ; preds = %bb49
- %tmp71 = call i32 @report__ident_int( i32 -30 ) ; <i32> [#uses=1]
- %tmp7172 = sitofp i32 %tmp71 to float ; <float> [#uses=3]
- %tmp74 = fcmp ult float %tmp7172, 0xC7EFFFFFE0000000 ; <i1> [#uses=1]
- %tmp80 = fcmp ugt float %tmp7172, 0x47EFFFFFE0000000 ; <i1> [#uses=1]
- %bothcond228 = or i1 %tmp74, %tmp80 ; <i1> [#uses=1]
- br i1 %bothcond228, label %bb83, label %bb85
-
-bb83: ; preds = %cond_next70
- unwind
-
-bb85: ; preds = %cond_next70
- %tmp90 = getelementptr %struct.FRAME.c34003a* %FRAME.31, i32 0, i32 1 ; <float*> [#uses=3]
- store float %tmp7172, float* %tmp90
- %tmp92 = call i32 @report__ident_int( i32 30 ) ; <i32> [#uses=1]
- %tmp9293 = sitofp i32 %tmp92 to float ; <float> [#uses=7]
- %tmp95 = fcmp ult float %tmp9293, 0xC7EFFFFFE0000000 ; <i1> [#uses=1]
- %tmp101 = fcmp ugt float %tmp9293, 0x47EFFFFFE0000000 ; <i1> [#uses=1]
- %bothcond229 = or i1 %tmp95, %tmp101 ; <i1> [#uses=1]
- br i1 %bothcond229, label %bb104, label %bb106
-
-bb104: ; preds = %bb85
- unwind
-
-bb106: ; preds = %bb85
- %tmp111 = getelementptr %struct.FRAME.c34003a* %FRAME.31, i32 0, i32 0 ; <float*> [#uses=2]
- store float %tmp9293, float* %tmp111
- %tmp123 = load float* %tmp90 ; <float> [#uses=4]
- %tmp125 = fcmp ult float %tmp9293, %tmp123 ; <i1> [#uses=1]
- br i1 %tmp125, label %cond_next147, label %cond_true128
-
-cond_true128: ; preds = %bb106
- %tmp133 = fcmp olt float %tmp123, %tmp2021 ; <i1> [#uses=1]
- %tmp142 = fcmp ogt float %tmp9293, %tmp3536 ; <i1> [#uses=1]
- %bothcond230 = or i1 %tmp133, %tmp142 ; <i1> [#uses=1]
- br i1 %bothcond230, label %bb145, label %cond_next147
-
-bb145: ; preds = %cond_true128
- unwind
-
-cond_next147: ; preds = %cond_true128, %bb106
- %tmp157 = fcmp ugt float %tmp123, -3.000000e+01 ; <i1> [#uses=1]
- %tmp165 = fcmp ult float %tmp9293, -3.000000e+01 ; <i1> [#uses=1]
- %bothcond231 = or i1 %tmp157, %tmp165 ; <i1> [#uses=1]
- br i1 %bothcond231, label %bb168, label %bb169
-
-bb168: ; preds = %cond_next147
- unwind
-
-bb169: ; preds = %cond_next147
- %tmp176 = fcmp ugt float %tmp123, 3.000000e+01 ; <i1> [#uses=1]
- %tmp184 = fcmp ult float %tmp9293, 3.000000e+01 ; <i1> [#uses=1]
- %bothcond232 = or i1 %tmp176, %tmp184 ; <i1> [#uses=1]
- br i1 %bothcond232, label %bb187, label %bb188
-
-bb187: ; preds = %bb169
- unwind
-
-bb188: ; preds = %bb169
- %tmp192 = call fastcc float @c34003a__ident.154( %struct.FRAME.c34003a* %FRAME.31, float 3.000000e+01 ) ; <float> [#uses=2]
- %tmp194 = load float* %tmp90 ; <float> [#uses=1]
- %tmp196 = fcmp ugt float %tmp194, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %tmp196, label %bb207, label %cond_next200
-
-cond_next200: ; preds = %bb188
- %tmp202 = load float* %tmp111 ; <float> [#uses=1]
- %tmp204 = fcmp ult float %tmp202, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %tmp204, label %bb207, label %bb208
-
-bb207: ; preds = %cond_next200, %bb188
- unwind
-
-bb208: ; preds = %cond_next200
- %tmp212 = call fastcc float @c34003a__ident.154( %struct.FRAME.c34003a* %FRAME.31, float 0.000000e+00 ) ; <float> [#uses=1]
- %tmp214 = fcmp oge float %tmp212, %tmp192 ; <i1> [#uses=1]
- %tmp217 = fcmp oge float %tmp192, 1.000000e+02 ; <i1> [#uses=1]
- %tmp221 = or i1 %tmp214, %tmp217 ; <i1> [#uses=1]
- br i1 %tmp221, label %cond_true224, label %UnifiedReturnBlock
-
-cond_true224: ; preds = %bb208
- call void @abort( ) noreturn
- ret void
-
-UnifiedReturnBlock: ; preds = %bb208
- ret void
-}
-
-declare fastcc float @c34003a__ident.154(%struct.FRAME.c34003a* %CHAIN.32, float %x)
-
-declare i32 @report__ident_int(i32 %x)
-
-declare void @abort() noreturn
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-15-IntToMMX.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-06-15-IntToMMX.ll
deleted file mode 100644
index 6128d8b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-15-IntToMMX.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep paddusw
- at R = external global <1 x i64> ; <<1 x i64>*> [#uses=1]
-
-define void @foo(<1 x i64> %A, <1 x i64> %B) {
-entry:
- %tmp4 = bitcast <1 x i64> %B to <4 x i16> ; <<4 x i16>> [#uses=1]
- %tmp6 = bitcast <1 x i64> %A to <4 x i16> ; <<4 x i16>> [#uses=1]
- %tmp7 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp6, <4 x i16> %tmp4 ) ; <<4 x i16>> [#uses=1]
- %tmp8 = bitcast <4 x i16> %tmp7 to <1 x i64> ; <<1 x i64>> [#uses=1]
- store <1 x i64> %tmp8, <1 x i64>* @R
- tail call void @llvm.x86.mmx.emms( )
- ret void
-}
-
-declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
-
-declare void @llvm.x86.mmx.emms()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-28-X86-64-isel.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-06-28-X86-64-isel.ll
deleted file mode 100644
index 9d42c49..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-28-X86-64-isel.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse2
-
-define void @test() {
- %tmp1 = call <8 x i16> @llvm.x86.sse2.pmins.w( <8 x i16> zeroinitializer, <8 x i16> bitcast (<4 x i32> < i32 7, i32 7, i32 7, i32 7 > to <8 x i16>) )
- %tmp2 = bitcast <8 x i16> %tmp1 to <4 x i32>
- br i1 false, label %bb1, label %bb2
-
-bb2:
- %tmp38007.i = extractelement <4 x i32> %tmp2, i32 3
- ret void
-
-bb1:
- ret void
-}
-
-declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-29-DAGCombinerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-06-29-DAGCombinerBug.ll
deleted file mode 100644
index d2d6388..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-29-DAGCombinerBug.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define void @test() {
-entry:
- br i1 false, label %bb13944.preheader, label %cond_true418
-
-cond_true418: ; preds = %entry
- ret void
-
-bb13944.preheader: ; preds = %entry
- br i1 false, label %bb3517, label %bb13968.preheader
-
-bb3517: ; preds = %bb13944.preheader
- br i1 false, label %cond_false7408, label %cond_next11422
-
-cond_false7408: ; preds = %bb3517
- switch i32 0, label %cond_false10578 [
- i32 7, label %cond_next11422
- i32 6, label %cond_true7828
- i32 1, label %cond_true10095
- i32 3, label %cond_true10095
- i32 5, label %cond_true10176
- i32 24, label %cond_true10176
- ]
-
-cond_true7828: ; preds = %cond_false7408
- br i1 false, label %cond_next8191, label %cond_true8045
-
-cond_true8045: ; preds = %cond_true7828
- ret void
-
-cond_next8191: ; preds = %cond_true7828
- %tmp8234 = sub <4 x i32> < i32 939524096, i32 939524096, i32 939524096, i32 939524096 >, zeroinitializer ; <<4 x i32>> [#uses=0]
- ret void
-
-cond_true10095: ; preds = %cond_false7408, %cond_false7408
- ret void
-
-cond_true10176: ; preds = %cond_false7408, %cond_false7408
- ret void
-
-cond_false10578: ; preds = %cond_false7408
- ret void
-
-cond_next11422: ; preds = %cond_false7408, %bb3517
- ret void
-
-bb13968.preheader: ; preds = %bb13944.preheader
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll
deleted file mode 100644
index dc11eec..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define void @test(<4 x float>* %arg) {
- %tmp89 = getelementptr <4 x float>* %arg, i64 3
- %tmp1144 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, zeroinitializer
- store <4 x float> %tmp1144, <4 x float>* null
- %tmp1149 = load <4 x float>* %tmp89
- %tmp1150 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1149
- store <4 x float> %tmp1150, <4 x float>* %tmp89
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-07-03-GR64ToVR64.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-07-03-GR64ToVR64.ll
deleted file mode 100644
index 2c513f1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-07-03-GR64ToVR64.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx | grep {movd %rsi, %mm0}
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx | grep {movd %rdi, %mm1}
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx | grep {paddusw %mm0, %mm1}
-
- at R = external global <1 x i64> ; <<1 x i64>*> [#uses=1]
-
-define void @foo(<1 x i64> %A, <1 x i64> %B) nounwind {
-entry:
- %tmp4 = bitcast <1 x i64> %B to <4 x i16> ; <<4 x i16>> [#uses=1]
- %tmp6 = bitcast <1 x i64> %A to <4 x i16> ; <<4 x i16>> [#uses=1]
- %tmp7 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp6, <4 x i16> %tmp4 ) ; <<4 x i16>> [#uses=1]
- %tmp8 = bitcast <4 x i16> %tmp7 to <1 x i64> ; <<1 x i64>> [#uses=1]
- store <1 x i64> %tmp8, <1 x i64>* @R
- tail call void @llvm.x86.mmx.emms( )
- ret void
-}
-
-declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
-
-declare void @llvm.x86.mmx.emms()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-07-10-StackerAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-07-10-StackerAssert.ll
deleted file mode 100644
index d611677..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-07-10-StackerAssert.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -mcpu=athlon -relocation-model=pic
-; PR1545
-
- at .str97 = external constant [56 x i8] ; <[56 x i8]*> [#uses=1]
-
-declare void @PR_LogPrint(i8*, ...)
-
-define i32 @_ZN13nsPrintEngine19SetupToPrintContentEP16nsIDeviceContextP12nsIDOMWindow() {
-entry:
- br i1 false, label %cond_true122, label %cond_next453
-
-cond_true122: ; preds = %entry
- br i1 false, label %bb164, label %cond_true136
-
-cond_true136: ; preds = %cond_true122
- ret i32 0
-
-bb164: ; preds = %cond_true122
- br i1 false, label %bb383, label %cond_true354
-
-cond_true354: ; preds = %bb164
- ret i32 0
-
-bb383: ; preds = %bb164
- %tmp408 = load float* null ; <float> [#uses=2]
- br i1 false, label %cond_true425, label %cond_next443
-
-cond_true425: ; preds = %bb383
- %tmp430 = load float* null ; <float> [#uses=1]
- %tmp432 = fsub float %tmp430, %tmp408 ; <float> [#uses=1]
- %tmp432433 = fpext float %tmp432 to double ; <double> [#uses=1]
- %tmp434435 = fpext float %tmp408 to double ; <double> [#uses=1]
- call void (i8*, ...)* @PR_LogPrint( i8* getelementptr ([56 x i8]* @.str97, i32 0, i32 0), double 0.000000e+00, double %tmp434435, double %tmp432433 )
- ret i32 0
-
-cond_next443: ; preds = %bb383
- ret i32 0
-
-cond_next453: ; preds = %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll
deleted file mode 100644
index 8625b27..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse | grep {movq (%rdi), %rax}
-; RUN: llc < %s -march=x86-64 -mattr=+sse | grep {movq 8(%rdi), %rax}
-define i64 @foo_0(<2 x i64>* %val) {
-entry:
- %val12 = getelementptr <2 x i64>* %val, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp7 = load i64* %val12 ; <i64> [#uses=1]
- ret i64 %tmp7
-}
-
-define i64 @foo_1(<2 x i64>* %val) {
-entry:
- %tmp2.gep = getelementptr <2 x i64>* %val, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp4 = load i64* %tmp2.gep ; <i64> [#uses=1]
- ret i64 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-08-01-LiveVariablesBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-08-01-LiveVariablesBug.ll
deleted file mode 100644
index 3cd8052..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-08-01-LiveVariablesBug.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep movl
-
-define i8 @t(i8 zeroext %x, i8 zeroext %y) zeroext {
- %tmp2 = add i8 %x, 2
- %tmp4 = add i8 %y, -2
- %tmp5 = mul i8 %tmp4, %tmp2
- ret i8 %tmp5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
deleted file mode 100644
index 7768f36..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
+++ /dev/null
@@ -1,235 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | not grep "movb %ah, %r"
-
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, [4 x i8], i64 }
- %struct.PyBoolScalarObject = type { i64, %struct._typeobject*, i8 }
- %struct.PyBufferProcs = type { i64 (%struct.PyObject*, i64, i8**)*, i64 (%struct.PyObject*, i64, i8**)*, i64 (%struct.PyObject*, i64*)*, i64 (%struct.PyObject*, i64, i8**)* }
- %struct.PyGetSetDef = type { i8*, %struct.PyObject* (%struct.PyObject*, i8*)*, i32 (%struct.PyObject*, %struct.PyObject*, i8*)*, i8*, i8* }
- %struct.PyMappingMethods = type { i64 (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, i32 (%struct.PyObject*, %struct.PyObject*, %struct.PyObject*)* }
- %struct.PyMemberDef = type opaque
- %struct.PyMethodDef = type { i8*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, i32, i8* }
- %struct.PyNumberMethods = type { %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, i32 (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, i32 (%struct.PyObject**, %struct.PyObject**)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)* }
- %struct.PyObject = type { i64, %struct._typeobject* }
- %struct.PySequenceMethods = type { i64 (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, i64)*, %struct.PyObject* (%struct.PyObject*, i64)*, %struct.PyObject* (%struct.PyObject*, i64, i64)*, i32 (%struct.PyObject*, i64, %struct.PyObject*)*, i32 (%struct.PyObject*, i64, i64, %struct.PyObject*)*, i32 (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, i64)* }
- %struct.PyTupleObject = type { i64, %struct._typeobject*, i64, [1 x %struct.PyObject*] }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct._typeobject = type { i64, %struct._typeobject*, i64, i8*, i64, i64, void (%struct.PyObject*)*, i32 (%struct.PyObject*, %struct.FILE*, i32)*, %struct.PyObject* (%struct.PyObject*, i8*)*, i32 (%struct.PyObject*, i8*, %struct.PyObject*)*, i32 (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyNumberMethods*, %struct.PySequenceMethods*, %struct.PyMappingMethods*, i64 (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, i32 (%struct.PyObject*, %struct.PyObject*, %struct.PyObject*)*, %struct.PyBufferProcs*, i64, i8*, i32 (%struct.PyObject*, i32 (%struct.PyObject*, i8*)*, i8*)*, i32 (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*, i32)*, i64, %struct.PyObject* (%struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*)*, %struct.PyMethodDef*, %struct.PyMemberDef*, %struct.PyGetSetDef*, %struct._typeobject*, %struct.PyObject*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*, %struct.PyObject*)*, i32 (%struct.PyObject*, %struct.PyObject*, %struct.PyObject*)*, i64, i32 (%struct.PyObject*, %struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct._typeobject*, i64)*, %struct.PyObject* (%struct._typeobject*, %struct.PyObject*, %struct.PyObject*)*, void (i8*)*, i32 (%struct.PyObject*)*, %struct.PyObject*, %struct.PyObject*, %struct.PyObject*, %struct.PyObject*, %struct.PyObject*, void (%struct.PyObject*)* }
- at PyArray_API = external global i8** ; <i8***> [#uses=4]
- at PyUFunc_API = external global i8** ; <i8***> [#uses=4]
- at .str5 = external constant [14 x i8] ; <[14 x i8]*> [#uses=1]
-
-define %struct.PyObject* @ubyte_divmod(%struct.PyObject* %a, %struct.PyObject* %b) {
-entry:
- %arg1 = alloca i8, align 1 ; <i8*> [#uses=3]
- %arg2 = alloca i8, align 1 ; <i8*> [#uses=3]
- %first = alloca i32, align 4 ; <i32*> [#uses=2]
- %bufsize = alloca i32, align 4 ; <i32*> [#uses=1]
- %errmask = alloca i32, align 4 ; <i32*> [#uses=2]
- %errobj = alloca %struct.PyObject*, align 8 ; <%struct.PyObject**> [#uses=2]
- %tmp3.i = call fastcc i32 @_ubyte_convert_to_ctype( %struct.PyObject* %a, i8* %arg1 ) ; <i32> [#uses=2]
- %tmp5.i = icmp slt i32 %tmp3.i, 0 ; <i1> [#uses=1]
- br i1 %tmp5.i, label %_ubyte_convert2_to_ctypes.exit, label %cond_next.i
-
-cond_next.i: ; preds = %entry
- %tmp11.i = call fastcc i32 @_ubyte_convert_to_ctype( %struct.PyObject* %b, i8* %arg2 ) ; <i32> [#uses=2]
- %tmp13.i = icmp slt i32 %tmp11.i, 0 ; <i1> [#uses=1]
- %retval.i = select i1 %tmp13.i, i32 %tmp11.i, i32 0 ; <i32> [#uses=1]
- switch i32 %retval.i, label %bb35 [
- i32 -2, label %bb17
- i32 -1, label %bb4
- ]
-
-_ubyte_convert2_to_ctypes.exit: ; preds = %entry
- switch i32 %tmp3.i, label %bb35 [
- i32 -2, label %bb17
- i32 -1, label %bb4
- ]
-
-bb4: ; preds = %_ubyte_convert2_to_ctypes.exit, %cond_next.i
- %tmp5 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
- %tmp6 = getelementptr i8** %tmp5, i64 2 ; <i8**> [#uses=1]
- %tmp7 = load i8** %tmp6 ; <i8*> [#uses=1]
- %tmp78 = bitcast i8* %tmp7 to %struct._typeobject* ; <%struct._typeobject*> [#uses=1]
- %tmp9 = getelementptr %struct._typeobject* %tmp78, i32 0, i32 12 ; <%struct.PyNumberMethods**> [#uses=1]
- %tmp10 = load %struct.PyNumberMethods** %tmp9 ; <%struct.PyNumberMethods*> [#uses=1]
- %tmp11 = getelementptr %struct.PyNumberMethods* %tmp10, i32 0, i32 5 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)**> [#uses=1]
- %tmp12 = load %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)** %tmp11 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*> [#uses=1]
- %tmp15 = call %struct.PyObject* %tmp12( %struct.PyObject* %a, %struct.PyObject* %b ) ; <%struct.PyObject*> [#uses=1]
- ret %struct.PyObject* %tmp15
-
-bb17: ; preds = %_ubyte_convert2_to_ctypes.exit, %cond_next.i
- %tmp18 = call %struct.PyObject* @PyErr_Occurred( ) ; <%struct.PyObject*> [#uses=1]
- %tmp19 = icmp eq %struct.PyObject* %tmp18, null ; <i1> [#uses=1]
- br i1 %tmp19, label %cond_next, label %UnifiedReturnBlock
-
-cond_next: ; preds = %bb17
- %tmp22 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
- %tmp23 = getelementptr i8** %tmp22, i64 10 ; <i8**> [#uses=1]
- %tmp24 = load i8** %tmp23 ; <i8*> [#uses=1]
- %tmp2425 = bitcast i8* %tmp24 to %struct._typeobject* ; <%struct._typeobject*> [#uses=1]
- %tmp26 = getelementptr %struct._typeobject* %tmp2425, i32 0, i32 12 ; <%struct.PyNumberMethods**> [#uses=1]
- %tmp27 = load %struct.PyNumberMethods** %tmp26 ; <%struct.PyNumberMethods*> [#uses=1]
- %tmp28 = getelementptr %struct.PyNumberMethods* %tmp27, i32 0, i32 5 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)**> [#uses=1]
- %tmp29 = load %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)** %tmp28 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*> [#uses=1]
- %tmp32 = call %struct.PyObject* %tmp29( %struct.PyObject* %a, %struct.PyObject* %b ) ; <%struct.PyObject*> [#uses=1]
- ret %struct.PyObject* %tmp32
-
-bb35: ; preds = %_ubyte_convert2_to_ctypes.exit, %cond_next.i
- %tmp36 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
- %tmp37 = getelementptr i8** %tmp36, i64 27 ; <i8**> [#uses=1]
- %tmp38 = load i8** %tmp37 ; <i8*> [#uses=1]
- %tmp3839 = bitcast i8* %tmp38 to void ()* ; <void ()*> [#uses=1]
- call void %tmp3839( )
- %tmp40 = load i8* %arg2, align 1 ; <i8> [#uses=4]
- %tmp1.i = icmp eq i8 %tmp40, 0 ; <i1> [#uses=2]
- br i1 %tmp1.i, label %cond_true.i, label %cond_false.i
-
-cond_true.i: ; preds = %bb35
- %tmp3.i196 = call i32 @feraiseexcept( i32 4 ) ; <i32> [#uses=0]
- %tmp46207 = load i8* %arg2, align 1 ; <i8> [#uses=3]
- %tmp48208 = load i8* %arg1, align 1 ; <i8> [#uses=2]
- %tmp1.i197210 = icmp eq i8 %tmp48208, 0 ; <i1> [#uses=1]
- %tmp4.i212 = icmp eq i8 %tmp46207, 0 ; <i1> [#uses=1]
- %tmp7.i198213 = or i1 %tmp1.i197210, %tmp4.i212 ; <i1> [#uses=1]
- br i1 %tmp7.i198213, label %cond_true.i200, label %cond_next17.i
-
-cond_false.i: ; preds = %bb35
- %tmp42 = load i8* %arg1, align 1 ; <i8> [#uses=3]
- %tmp7.i = udiv i8 %tmp42, %tmp40 ; <i8> [#uses=2]
- %tmp1.i197 = icmp eq i8 %tmp42, 0 ; <i1> [#uses=1]
- %tmp7.i198 = or i1 %tmp1.i197, %tmp1.i ; <i1> [#uses=1]
- br i1 %tmp7.i198, label %cond_true.i200, label %cond_next17.i
-
-cond_true.i200: ; preds = %cond_false.i, %cond_true.i
- %out.0 = phi i8 [ 0, %cond_true.i ], [ %tmp7.i, %cond_false.i ] ; <i8> [#uses=2]
- %tmp46202.0 = phi i8 [ %tmp46207, %cond_true.i ], [ %tmp40, %cond_false.i ] ; <i8> [#uses=1]
- %tmp11.i199 = icmp eq i8 %tmp46202.0, 0 ; <i1> [#uses=1]
- br i1 %tmp11.i199, label %cond_true14.i, label %ubyte_ctype_remainder.exit
-
-cond_true14.i: ; preds = %cond_true.i200
- %tmp15.i = call i32 @feraiseexcept( i32 4 ) ; <i32> [#uses=0]
- br label %ubyte_ctype_remainder.exit
-
-cond_next17.i: ; preds = %cond_false.i, %cond_true.i
- %out.1 = phi i8 [ 0, %cond_true.i ], [ %tmp7.i, %cond_false.i ] ; <i8> [#uses=1]
- %tmp46202.1 = phi i8 [ %tmp46207, %cond_true.i ], [ %tmp40, %cond_false.i ] ; <i8> [#uses=1]
- %tmp48205.1 = phi i8 [ %tmp48208, %cond_true.i ], [ %tmp42, %cond_false.i ] ; <i8> [#uses=1]
- %tmp20.i = urem i8 %tmp48205.1, %tmp46202.1 ; <i8> [#uses=1]
- br label %ubyte_ctype_remainder.exit
-
-ubyte_ctype_remainder.exit: ; preds = %cond_next17.i, %cond_true14.i, %cond_true.i200
- %out2.0 = phi i8 [ %tmp20.i, %cond_next17.i ], [ 0, %cond_true14.i ], [ 0, %cond_true.i200 ] ; <i8> [#uses=1]
- %out.2 = phi i8 [ %out.1, %cond_next17.i ], [ %out.0, %cond_true14.i ], [ %out.0, %cond_true.i200 ] ; <i8> [#uses=1]
- %tmp52 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
- %tmp53 = getelementptr i8** %tmp52, i64 28 ; <i8**> [#uses=1]
- %tmp54 = load i8** %tmp53 ; <i8*> [#uses=1]
- %tmp5455 = bitcast i8* %tmp54 to i32 ()* ; <i32 ()*> [#uses=1]
- %tmp56 = call i32 %tmp5455( ) ; <i32> [#uses=2]
- %tmp58 = icmp eq i32 %tmp56, 0 ; <i1> [#uses=1]
- br i1 %tmp58, label %cond_next89, label %cond_true61
-
-cond_true61: ; preds = %ubyte_ctype_remainder.exit
- %tmp62 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
- %tmp63 = getelementptr i8** %tmp62, i64 25 ; <i8**> [#uses=1]
- %tmp64 = load i8** %tmp63 ; <i8*> [#uses=1]
- %tmp6465 = bitcast i8* %tmp64 to i32 (i8*, i32*, i32*, %struct.PyObject**)* ; <i32 (i8*, i32*, i32*, %struct.PyObject**)*> [#uses=1]
- %tmp67 = call i32 %tmp6465( i8* getelementptr ([14 x i8]* @.str5, i32 0, i64 0), i32* %bufsize, i32* %errmask, %struct.PyObject** %errobj ) ; <i32> [#uses=1]
- %tmp68 = icmp slt i32 %tmp67, 0 ; <i1> [#uses=1]
- br i1 %tmp68, label %UnifiedReturnBlock, label %cond_next73
-
-cond_next73: ; preds = %cond_true61
- store i32 1, i32* %first, align 4
- %tmp74 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
- %tmp75 = getelementptr i8** %tmp74, i64 29 ; <i8**> [#uses=1]
- %tmp76 = load i8** %tmp75 ; <i8*> [#uses=1]
- %tmp7677 = bitcast i8* %tmp76 to i32 (i32, %struct.PyObject*, i32, i32*)* ; <i32 (i32, %struct.PyObject*, i32, i32*)*> [#uses=1]
- %tmp79 = load %struct.PyObject** %errobj, align 8 ; <%struct.PyObject*> [#uses=1]
- %tmp80 = load i32* %errmask, align 4 ; <i32> [#uses=1]
- %tmp82 = call i32 %tmp7677( i32 %tmp80, %struct.PyObject* %tmp79, i32 %tmp56, i32* %first ) ; <i32> [#uses=1]
- %tmp83 = icmp eq i32 %tmp82, 0 ; <i1> [#uses=1]
- br i1 %tmp83, label %cond_next89, label %UnifiedReturnBlock
-
-cond_next89: ; preds = %cond_next73, %ubyte_ctype_remainder.exit
- %tmp90 = call %struct.PyObject* @PyTuple_New( i64 2 ) ; <%struct.PyObject*> [#uses=9]
- %tmp92 = icmp eq %struct.PyObject* %tmp90, null ; <i1> [#uses=1]
- br i1 %tmp92, label %UnifiedReturnBlock, label %cond_next97
-
-cond_next97: ; preds = %cond_next89
- %tmp98 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
- %tmp99 = getelementptr i8** %tmp98, i64 25 ; <i8**> [#uses=1]
- %tmp100 = load i8** %tmp99 ; <i8*> [#uses=1]
- %tmp100101 = bitcast i8* %tmp100 to %struct._typeobject* ; <%struct._typeobject*> [#uses=2]
- %tmp102 = getelementptr %struct._typeobject* %tmp100101, i32 0, i32 38 ; <%struct.PyObject* (%struct._typeobject*, i64)**> [#uses=1]
- %tmp103 = load %struct.PyObject* (%struct._typeobject*, i64)** %tmp102 ; <%struct.PyObject* (%struct._typeobject*, i64)*> [#uses=1]
- %tmp108 = call %struct.PyObject* %tmp103( %struct._typeobject* %tmp100101, i64 0 ) ; <%struct.PyObject*> [#uses=3]
- %tmp110 = icmp eq %struct.PyObject* %tmp108, null ; <i1> [#uses=1]
- br i1 %tmp110, label %cond_true113, label %cond_next135
-
-cond_true113: ; preds = %cond_next97
- %tmp115 = getelementptr %struct.PyObject* %tmp90, i32 0, i32 0 ; <i64*> [#uses=2]
- %tmp116 = load i64* %tmp115 ; <i64> [#uses=1]
- %tmp117 = add i64 %tmp116, -1 ; <i64> [#uses=2]
- store i64 %tmp117, i64* %tmp115
- %tmp123 = icmp eq i64 %tmp117, 0 ; <i1> [#uses=1]
- br i1 %tmp123, label %cond_true126, label %UnifiedReturnBlock
-
-cond_true126: ; preds = %cond_true113
- %tmp128 = getelementptr %struct.PyObject* %tmp90, i32 0, i32 1 ; <%struct._typeobject**> [#uses=1]
- %tmp129 = load %struct._typeobject** %tmp128 ; <%struct._typeobject*> [#uses=1]
- %tmp130 = getelementptr %struct._typeobject* %tmp129, i32 0, i32 6 ; <void (%struct.PyObject*)**> [#uses=1]
- %tmp131 = load void (%struct.PyObject*)** %tmp130 ; <void (%struct.PyObject*)*> [#uses=1]
- call void %tmp131( %struct.PyObject* %tmp90 )
- ret %struct.PyObject* null
-
-cond_next135: ; preds = %cond_next97
- %tmp136137 = bitcast %struct.PyObject* %tmp108 to %struct.PyBoolScalarObject* ; <%struct.PyBoolScalarObject*> [#uses=1]
- %tmp139 = getelementptr %struct.PyBoolScalarObject* %tmp136137, i32 0, i32 2 ; <i8*> [#uses=1]
- store i8 %out.2, i8* %tmp139
- %tmp140141 = bitcast %struct.PyObject* %tmp90 to %struct.PyTupleObject* ; <%struct.PyTupleObject*> [#uses=2]
- %tmp143 = getelementptr %struct.PyTupleObject* %tmp140141, i32 0, i32 3, i64 0 ; <%struct.PyObject**> [#uses=1]
- store %struct.PyObject* %tmp108, %struct.PyObject** %tmp143
- %tmp145 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
- %tmp146 = getelementptr i8** %tmp145, i64 25 ; <i8**> [#uses=1]
- %tmp147 = load i8** %tmp146 ; <i8*> [#uses=1]
- %tmp147148 = bitcast i8* %tmp147 to %struct._typeobject* ; <%struct._typeobject*> [#uses=2]
- %tmp149 = getelementptr %struct._typeobject* %tmp147148, i32 0, i32 38 ; <%struct.PyObject* (%struct._typeobject*, i64)**> [#uses=1]
- %tmp150 = load %struct.PyObject* (%struct._typeobject*, i64)** %tmp149 ; <%struct.PyObject* (%struct._typeobject*, i64)*> [#uses=1]
- %tmp155 = call %struct.PyObject* %tmp150( %struct._typeobject* %tmp147148, i64 0 ) ; <%struct.PyObject*> [#uses=3]
- %tmp157 = icmp eq %struct.PyObject* %tmp155, null ; <i1> [#uses=1]
- br i1 %tmp157, label %cond_true160, label %cond_next182
-
-cond_true160: ; preds = %cond_next135
- %tmp162 = getelementptr %struct.PyObject* %tmp90, i32 0, i32 0 ; <i64*> [#uses=2]
- %tmp163 = load i64* %tmp162 ; <i64> [#uses=1]
- %tmp164 = add i64 %tmp163, -1 ; <i64> [#uses=2]
- store i64 %tmp164, i64* %tmp162
- %tmp170 = icmp eq i64 %tmp164, 0 ; <i1> [#uses=1]
- br i1 %tmp170, label %cond_true173, label %UnifiedReturnBlock
-
-cond_true173: ; preds = %cond_true160
- %tmp175 = getelementptr %struct.PyObject* %tmp90, i32 0, i32 1 ; <%struct._typeobject**> [#uses=1]
- %tmp176 = load %struct._typeobject** %tmp175 ; <%struct._typeobject*> [#uses=1]
- %tmp177 = getelementptr %struct._typeobject* %tmp176, i32 0, i32 6 ; <void (%struct.PyObject*)**> [#uses=1]
- %tmp178 = load void (%struct.PyObject*)** %tmp177 ; <void (%struct.PyObject*)*> [#uses=1]
- call void %tmp178( %struct.PyObject* %tmp90 )
- ret %struct.PyObject* null
-
-cond_next182: ; preds = %cond_next135
- %tmp183184 = bitcast %struct.PyObject* %tmp155 to %struct.PyBoolScalarObject* ; <%struct.PyBoolScalarObject*> [#uses=1]
- %tmp186 = getelementptr %struct.PyBoolScalarObject* %tmp183184, i32 0, i32 2 ; <i8*> [#uses=1]
- store i8 %out2.0, i8* %tmp186
- %tmp190 = getelementptr %struct.PyTupleObject* %tmp140141, i32 0, i32 3, i64 1 ; <%struct.PyObject**> [#uses=1]
- store %struct.PyObject* %tmp155, %struct.PyObject** %tmp190
- ret %struct.PyObject* %tmp90
-
-UnifiedReturnBlock: ; preds = %cond_true160, %cond_true113, %cond_next89, %cond_next73, %cond_true61, %bb17
- ret %struct.PyObject* null
-}
-
-declare i32 @feraiseexcept(i32)
-
-declare fastcc i32 @_ubyte_convert_to_ctype(%struct.PyObject*, i8*)
-
-declare %struct.PyObject* @PyErr_Occurred()
-
-declare %struct.PyObject* @PyTuple_New(i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-08-10-SignExtSubreg.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-08-10-SignExtSubreg.ll
deleted file mode 100644
index e93092f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-08-10-SignExtSubreg.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {movsbl}
-
- at X = global i32 0 ; <i32*> [#uses=1]
-
-define i8 @_Z3fooi(i32 %x) signext {
-entry:
- store i32 %x, i32* @X, align 4
- %retval67 = trunc i32 %x to i8 ; <i8> [#uses=1]
- ret i8 %retval67
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-08-13-AppendingLinkage.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-08-13-AppendingLinkage.ll
deleted file mode 100644
index c90a85f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-08-13-AppendingLinkage.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep drectve
-; PR1607
-
-%hlvm_programs_element = type { i8*, i32 (i32, i8**)* }
- at hlvm_programs = appending constant [1 x %hlvm_programs_element]
-zeroinitializer
-
-define %hlvm_programs_element* @hlvm_get_programs() {
-entry:
- ret %hlvm_programs_element* getelementptr([1 x %hlvm_programs_element]*
- @hlvm_programs, i32 0, i32 0)
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-05-InvalidAsm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-09-05-InvalidAsm.ll
deleted file mode 100644
index 5acb051..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-05-InvalidAsm.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -x86-asm-syntax=intel | not grep {lea\[\[:space:\]\]R}
-
- %struct.AGenericCall = type { %struct.AGenericManager*, %struct.ComponentParameters*, i32* }
- %struct.AGenericManager = type <{ i8 }>
- %struct.ComponentInstanceRecord = type opaque
- %struct.ComponentParameters = type { [1 x i64] }
-
-define i32 @_ZN12AGenericCall10MapIDPtrAtEsRP23ComponentInstanceRecord(%struct.AGenericCall* %this, i16 signext %param, %struct.ComponentInstanceRecord** %instance) {
-entry:
- %tmp4 = icmp slt i16 %param, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %cond_true, label %cond_next
-
-cond_true: ; preds = %entry
- %tmp1415 = shl i16 %param, 3 ; <i16> [#uses=1]
- %tmp17 = getelementptr %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp18 = load %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
- %tmp1920 = bitcast %struct.ComponentParameters* %tmp18 to i8* ; <i8*> [#uses=1]
- %tmp212223 = sext i16 %tmp1415 to i64 ; <i64> [#uses=1]
- %tmp24 = getelementptr i8* %tmp1920, i64 %tmp212223 ; <i8*> [#uses=1]
- %tmp2425 = bitcast i8* %tmp24 to i64* ; <i64*> [#uses=1]
- %tmp28 = load i64* %tmp2425, align 8 ; <i64> [#uses=1]
- %tmp2829 = inttoptr i64 %tmp28 to i32* ; <i32*> [#uses=1]
- %tmp31 = getelementptr %struct.AGenericCall* %this, i32 0, i32 2 ; <i32**> [#uses=1]
- store i32* %tmp2829, i32** %tmp31, align 8
- br label %cond_next
-
-cond_next: ; preds = %cond_true, %entry
- %tmp4243 = shl i16 %param, 3 ; <i16> [#uses=1]
- %tmp46 = getelementptr %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp47 = load %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
- %tmp4849 = bitcast %struct.ComponentParameters* %tmp47 to i8* ; <i8*> [#uses=1]
- %tmp505152 = sext i16 %tmp4243 to i64 ; <i64> [#uses=1]
- %tmp53 = getelementptr i8* %tmp4849, i64 %tmp505152 ; <i8*> [#uses=1]
- %tmp5354 = bitcast i8* %tmp53 to i64* ; <i64*> [#uses=1]
- %tmp58 = load i64* %tmp5354, align 8 ; <i64> [#uses=1]
- %tmp59 = icmp eq i64 %tmp58, 0 ; <i1> [#uses=1]
- br i1 %tmp59, label %UnifiedReturnBlock, label %cond_true63
-
-cond_true63: ; preds = %cond_next
- %tmp65 = getelementptr %struct.AGenericCall* %this, i32 0, i32 0 ; <%struct.AGenericManager**> [#uses=1]
- %tmp66 = load %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
- %tmp69 = tail call i32 @_ZN15AGenericManager24DefaultComponentInstanceERP23ComponentInstanceRecord( %struct.AGenericManager* %tmp66, %struct.ComponentInstanceRecord** %instance ) ; <i32> [#uses=1]
- ret i32 %tmp69
-
-UnifiedReturnBlock: ; preds = %cond_next
- ret i32 undef
-}
-
-declare i32 @_ZN15AGenericManager24DefaultComponentInstanceERP23ComponentInstanceRecord(%struct.AGenericManager*, %struct.ComponentInstanceRecord**)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-06-ExtWeakAliasee.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-09-06-ExtWeakAliasee.ll
deleted file mode 100644
index c5d2a46..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-06-ExtWeakAliasee.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: llc < %s -march=x86 | grep weak | count 2
- at __gthrw_pthread_once = alias weak i32 (i32*, void ()*)* @pthread_once ; <i32 (i32*, void ()*)*> [#uses=0]
-
-declare extern_weak i32 @pthread_once(i32*, void ()*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-17-ObjcFrameEH.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-09-17-ObjcFrameEH.ll
deleted file mode 100644
index 56ee2a3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-17-ObjcFrameEH.ll
+++ /dev/null
@@ -1,65 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin -enable-eh | grep {isNullOrNil].eh"} | count 2
-
- %struct.NSString = type { }
- %struct._objc__method_prototype_list = type opaque
- %struct._objc_category = type { i8*, i8*, %struct._objc_method_list*, %struct._objc_method_list*, %struct._objc_protocol**, i32, %struct._prop_list_t* }
- %struct._objc_method = type { %struct.objc_selector*, i8*, i8* }
- %struct._objc_method_list = type opaque
- %struct._objc_module = type { i32, i32, i8*, %struct._objc_symtab* }
- %struct._objc_protocol = type { %struct._objc_protocol_extension*, i8*, %struct._objc_protocol**, %struct._objc__method_prototype_list*, %struct._objc__method_prototype_list* }
- %struct._objc_protocol_extension = type opaque
- %struct._objc_symtab = type { i32, %struct.objc_selector**, i16, i16, [1 x i8*] }
- %struct._prop_list_t = type opaque
- %struct.anon = type { %struct._objc__method_prototype_list*, i32, [1 x %struct._objc_method] }
- %struct.objc_selector = type opaque
-@"\01L_OBJC_SYMBOLS" = internal global { i32, i32, i16, i16, [1 x %struct._objc_category*] } {
- i32 0,
- i32 0,
- i16 0,
- i16 1,
- [1 x %struct._objc_category*] [ %struct._objc_category* bitcast ({ i8*, i8*, %struct._objc_method_list*, i32, i32, i32, i32 }* @"\01L_OBJC_CATEGORY_NSString_local" to %struct._objc_category*) ] }, section "__OBJC,__symbols,regular,no_dead_strip" ; <{ i32, i32, i16, i16, [1 x %struct._objc_category*] }*> [#uses=2]
-@"\01L_OBJC_CATEGORY_INSTANCE_METHODS_NSString_local" = internal global { i32, i32, [1 x %struct._objc_method] } {
- i32 0,
- i32 1,
- [1 x %struct._objc_method] [ %struct._objc_method {
- %struct.objc_selector* bitcast ([12 x i8]* @"\01L_OBJC_METH_VAR_NAME_0" to %struct.objc_selector*),
- i8* getelementptr ([7 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0),
- i8* bitcast (i8 (%struct.NSString*, %struct.objc_selector*) signext * @"-[NSString(local) isNullOrNil]" to i8*) } ] }, section "__OBJC,__cat_inst_meth,regular,no_dead_strip" ; <{ i32, i32, [1 x %struct._objc_method] }*> [#uses=3]
-@"\01L_OBJC_CATEGORY_NSString_local" = internal global { i8*, i8*, %struct._objc_method_list*, i32, i32, i32, i32 } {
- i8* getelementptr ([6 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0),
- i8* getelementptr ([9 x i8]* @"\01L_OBJC_CLASS_NAME_1", i32 0, i32 0),
- %struct._objc_method_list* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_CATEGORY_INSTANCE_METHODS_NSString_local" to %struct._objc_method_list*),
- i32 0,
- i32 0,
- i32 28,
- i32 0 }, section "__OBJC,__category,regular,no_dead_strip" ; <{ i8*, i8*, %struct._objc_method_list*, i32, i32, i32, i32 }*> [#uses=2]
-@"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] zeroinitializer, section "__OBJC,__image_info,regular" ; <[2 x i32]*> [#uses=1]
-@"\01L_OBJC_MODULES" = internal global %struct._objc_module {
- i32 7,
- i32 16,
- i8* getelementptr ([1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0),
- %struct._objc_symtab* bitcast ({ i32, i32, i16, i16, [1 x %struct._objc_category*] }* @"\01L_OBJC_SYMBOLS" to %struct._objc_symtab*) }, section "__OBJC,__module_info,regular,no_dead_strip" ; <%struct._objc_module*> [#uses=1]
-@"\01.objc_class_ref_NSString" = internal global i8* @"\01.objc_class_name_NSString" ; <i8**> [#uses=0]
-@"\01.objc_class_name_NSString" = external global i8 ; <i8*> [#uses=1]
-@"\01.objc_category_name_NSString_local" = constant i32 0 ; <i32*> [#uses=1]
-@"\01L_OBJC_CLASS_NAME_2" = internal global [1 x i8] zeroinitializer, section "__TEXT,__cstring,cstring_literals" ; <[1 x i8]*> [#uses=2]
-@"\01L_OBJC_CLASS_NAME_1" = internal global [9 x i8] c"NSString\00", section "__TEXT,__cstring,cstring_literals" ; <[9 x i8]*> [#uses=2]
-@"\01L_OBJC_CLASS_NAME_0" = internal global [6 x i8] c"local\00", section "__TEXT,__cstring,cstring_literals" ; <[6 x i8]*> [#uses=2]
-@"\01L_OBJC_METH_VAR_NAME_0" = internal global [12 x i8] c"isNullOrNil\00", section "__TEXT,__cstring,cstring_literals" ; <[12 x i8]*> [#uses=3]
-@"\01L_OBJC_METH_VAR_TYPE_0" = internal global [7 x i8] c"c8 at 0:4\00", section "__TEXT,__cstring,cstring_literals" ; <[7 x i8]*> [#uses=2]
- at llvm.used = appending global [11 x i8*] [ i8* bitcast ({ i32, i32, i16, i16, [1 x %struct._objc_category*] }* @"\01L_OBJC_SYMBOLS" to i8*), i8* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01L_OBJC_CATEGORY_INSTANCE_METHODS_NSString_local" to i8*), i8* bitcast ({ i8*, i8*, %struct._objc_method_list*, i32, i32, i32, i32 }* @"\01L_OBJC_CATEGORY_NSString_local" to i8*), i8* bitcast ([2 x i32]* @"\01L_OBJC_IMAGE_INFO" to i8*), i8* bitcast (%struct._objc_module* @"\01L_OBJC_MODULES" to i8*), i8* bitcast (i32* @"\01.objc_category_name_NSString_local" to i8*), i8* getelementptr ([1 x i8]* @"\01L_OBJC_CLASS_NAME_2", i32 0, i32 0), i8* getelementptr ([9 x i8]* @"\01L_OBJC_CLASS_NAME_1", i32 0, i32 0), i8* getelementptr ([6 x i8]* @"\01L_OBJC_CLASS_NAME_0", i32 0, i32 0), i8* getelementptr ([12 x i8]* @"\01L_OBJC_METH_VAR_NAME_0", i32 0, i32 0), i8* getelementptr ([7 x i8]* @"\01L_OBJC_METH_VAR_TYPE_0", i32 0, i32 0) ], section "llvm.metadata" ; <[11 x i8*]*> [#uses=0]
-
-define internal i8 @"-[NSString(local) isNullOrNil]"(%struct.NSString* %self, %struct.objc_selector* %_cmd) signext {
-entry:
- %self_addr = alloca %struct.NSString* ; <%struct.NSString**> [#uses=1]
- %_cmd_addr = alloca %struct.objc_selector* ; <%struct.objc_selector**> [#uses=1]
- %retval = alloca i8, align 1 ; <i8*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store %struct.NSString* %self, %struct.NSString** %self_addr
- store %struct.objc_selector* %_cmd, %struct.objc_selector** %_cmd_addr
- br label %return
-
-return: ; preds = %entry
- %retval1 = load i8* %retval ; <i8> [#uses=1]
- ret i8 %retval1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll
deleted file mode 100644
index 0ae1897..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-18-ShuffleXformBug.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep -- -86
-
-define i16 @f(<4 x float>* %tmp116117.i1061.i) nounwind {
-entry:
- alloca [4 x <4 x float>] ; <[4 x <4 x float>]*>:0 [#uses=167]
- alloca [4 x <4 x float>] ; <[4 x <4 x float>]*>:1 [#uses=170]
- alloca [4 x <4 x i32>] ; <[4 x <4 x i32>]*>:2 [#uses=12]
- %.sub6235.i = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0 ; <<4 x float>*> [#uses=76]
- %.sub.i = getelementptr [4 x <4 x float>]* %1, i32 0, i32 0 ; <<4 x float>*> [#uses=59]
-
- %tmp124.i1062.i = getelementptr <4 x float>* %tmp116117.i1061.i, i32 63 ; <<4 x float>*> [#uses=1]
- %tmp125.i1063.i = load <4 x float>* %tmp124.i1062.i ; <<4 x float>> [#uses=5]
- %tmp828.i1077.i = shufflevector <4 x float> %tmp125.i1063.i, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>> [#uses=4]
- %tmp704.i1085.i = load <4 x float>* %.sub6235.i ; <<4 x float>> [#uses=1]
- %tmp712.i1086.i = call <4 x float> @llvm.x86.sse.max.ps( <4 x float> %tmp704.i1085.i, <4 x float> %tmp828.i1077.i ) ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp712.i1086.i, <4 x float>* %.sub.i
-
- %tmp2587.i1145.gep.i = getelementptr [4 x <4 x float>]* %1, i32 0, i32 0, i32 2 ; <float*> [#uses=1]
- %tmp5334.i = load float* %tmp2587.i1145.gep.i ; <float> [#uses=5]
- %tmp2723.i1170.i = insertelement <4 x float> undef, float %tmp5334.i, i32 2 ; <<4 x float>> [#uses=5]
- store <4 x float> %tmp2723.i1170.i, <4 x float>* %.sub6235.i
-
- %tmp1406.i1367.i = shufflevector <4 x float> %tmp2723.i1170.i, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>> [#uses=1]
- %tmp84.i1413.i = load <4 x float>* %.sub6235.i ; <<4 x float>> [#uses=1]
- %tmp89.i1415.i = fmul <4 x float> %tmp84.i1413.i, %tmp1406.i1367.i ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp89.i1415.i, <4 x float>* %.sub.i
- ret i16 0
-}
-
-declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-27-LDIntrinsics.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-09-27-LDIntrinsics.ll
deleted file mode 100644
index 4d69715..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-09-27-LDIntrinsics.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
-
-define x86_fp80 @foo(x86_fp80 %x) nounwind{
-entry:
- %tmp2 = call x86_fp80 @llvm.sqrt.f80( x86_fp80 %x )
- ret x86_fp80 %tmp2
-
-; CHECK: foo:
-; CHECK: fldt 4(%esp)
-; CHECK-NEXT: fsqrt
-; CHECK-NEXT: ret
-}
-
-declare x86_fp80 @llvm.sqrt.f80(x86_fp80)
-
-define x86_fp80 @bar(x86_fp80 %x) nounwind {
-entry:
- %tmp2 = call x86_fp80 @llvm.powi.f80( x86_fp80 %x, i32 3 )
- ret x86_fp80 %tmp2
-; CHECK: bar:
-; CHECK: fldt 4(%esp)
-; CHECK-NEXT: fld %st(0)
-; CHECK-NEXT: fmul %st(1)
-; CHECK-NEXT: fmulp %st(1)
-; CHECK-NEXT: ret
-}
-
-declare x86_fp80 @llvm.powi.f80(x86_fp80, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll
deleted file mode 100644
index 6fc8ec9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep pushf
-
- %struct.gl_texture_image = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8* }
- %struct.gl_texture_object = type { i32, i32, i32, float, [4 x i32], i32, i32, i32, i32, i32, float, [11 x %struct.gl_texture_image*], [1024 x i8], i32, i32, i32, i8, i8*, i8, void (%struct.gl_texture_object*, i32, float*, float*, float*, float*, i8*, i8*, i8*, i8*)*, %struct.gl_texture_object* }
-
-define fastcc void @sample_3d_linear(%struct.gl_texture_object* %tObj, %struct.gl_texture_image* %img, float %s, float %t, float %r, i8* %red, i8* %green, i8* %blue, i8* %alpha) {
-entry:
- %tmp15 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp16 = icmp eq i32 %tmp15, 10497 ; <i1> [#uses=1]
- %tmp2152 = call float @floorf( float 0.000000e+00 ) ; <float> [#uses=0]
- br i1 %tmp16, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- ret void
-
-cond_false: ; preds = %entry
- ret void
-}
-
-declare float @floorf(float)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-05-3AddrConvert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-05-3AddrConvert.ll
deleted file mode 100644
index 2c2706d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-05-3AddrConvert.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -march=x86 | grep lea
-
- %struct.anon = type { [3 x double], double, %struct.node*, [64 x %struct.bnode*], [64 x %struct.bnode*] }
- %struct.bnode = type { i16, double, [3 x double], i32, i32, [3 x double], [3 x double], [3 x double], double, %struct.bnode*, %struct.bnode* }
- %struct.node = type { i16, double, [3 x double], i32, i32 }
-
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
-entry:
- %0 = malloc %struct.anon ; <%struct.anon*> [#uses=2]
- %1 = getelementptr %struct.anon* %0, i32 0, i32 2 ; <%struct.node**> [#uses=1]
- br label %bb14.i
-
-bb14.i: ; preds = %bb14.i, %entry
- %i8.0.reg2mem.0.i = phi i32 [ 0, %entry ], [ %2, %bb14.i ] ; <i32> [#uses=1]
- %2 = add i32 %i8.0.reg2mem.0.i, 1 ; <i32> [#uses=2]
- %exitcond74.i = icmp eq i32 %2, 32 ; <i1> [#uses=1]
- br i1 %exitcond74.i, label %bb32.i, label %bb14.i
-
-bb32.i: ; preds = %bb32.i, %bb14.i
- %tmp.0.reg2mem.0.i = phi i32 [ %indvar.next63.i, %bb32.i ], [ 0, %bb14.i ] ; <i32> [#uses=1]
- %indvar.next63.i = add i32 %tmp.0.reg2mem.0.i, 1 ; <i32> [#uses=2]
- %exitcond64.i = icmp eq i32 %indvar.next63.i, 64 ; <i1> [#uses=1]
- br i1 %exitcond64.i, label %bb47.loopexit.i, label %bb32.i
-
-bb.i.i: ; preds = %bb47.loopexit.i
- unreachable
-
-stepsystem.exit.i: ; preds = %bb47.loopexit.i
- store %struct.node* null, %struct.node** %1, align 4
- br label %bb.i6.i
-
-bb.i6.i: ; preds = %bb.i6.i, %stepsystem.exit.i
- %tmp.0.i.i = add i32 0, -1 ; <i32> [#uses=1]
- %3 = icmp slt i32 %tmp.0.i.i, 0 ; <i1> [#uses=1]
- br i1 %3, label %bb107.i.i, label %bb.i6.i
-
-bb107.i.i: ; preds = %bb107.i.i, %bb.i6.i
- %q_addr.0.i.i.in = phi %struct.bnode** [ null, %bb107.i.i ], [ %4, %bb.i6.i ] ; <%struct.bnode**> [#uses=1]
- %q_addr.0.i.i = load %struct.bnode** %q_addr.0.i.i.in ; <%struct.bnode*> [#uses=1]
- %q_addr.1 = getelementptr %struct.anon* %0, i32 0, i32 4, i32 1
- store %struct.bnode* %q_addr.0.i.i, %struct.bnode** %q_addr.1, align 4
- br label %bb107.i.i
-
-bb47.loopexit.i: ; preds = %bb32.i
- %4 = getelementptr %struct.anon* %0, i32 0, i32 4, i32 0 ; <%struct.bnode**> [#uses=1]
- %5 = icmp eq %struct.node* null, null ; <i1> [#uses=1]
- br i1 %5, label %stepsystem.exit.i, label %bb.i.i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
deleted file mode 100644
index fc11347..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep movb
-
-define i16 @f(i32* %bp, i32* %ss) signext {
-entry:
- br label %cond_next127
-
-cond_next127: ; preds = %cond_next391, %entry
- %v.1 = phi i32 [ undef, %entry ], [ %tmp411, %cond_next391 ] ; <i32> [#uses=1]
- %tmp149 = mul i32 0, %v.1 ; <i32> [#uses=0]
- %tmp254 = and i32 0, 15 ; <i32> [#uses=1]
- %tmp256 = and i32 0, 15 ; <i32> [#uses=2]
- br i1 false, label %cond_true267, label %cond_next391
-
-cond_true267: ; preds = %cond_next127
- ret i16 0
-
-cond_next391: ; preds = %cond_next127
- %tmp393 = load i32* %ss, align 4 ; <i32> [#uses=1]
- %tmp395 = load i32* %bp, align 4 ; <i32> [#uses=2]
- %tmp396 = shl i32 %tmp393, %tmp395 ; <i32> [#uses=2]
- %tmp398 = sub i32 32, %tmp256 ; <i32> [#uses=2]
- %tmp399 = lshr i32 %tmp396, %tmp398 ; <i32> [#uses=1]
- %tmp405 = lshr i32 %tmp396, 31 ; <i32> [#uses=1]
- %tmp406 = add i32 %tmp405, -1 ; <i32> [#uses=1]
- %tmp409 = lshr i32 %tmp406, %tmp398 ; <i32> [#uses=1]
- %tmp411 = sub i32 %tmp399, %tmp409 ; <i32> [#uses=1]
- %tmp422445 = add i32 %tmp254, 0 ; <i32> [#uses=1]
- %tmp426447 = add i32 %tmp395, %tmp256 ; <i32> [#uses=1]
- store i32 %tmp426447, i32* %bp, align 4
- %tmp429448 = icmp ult i32 %tmp422445, 63 ; <i1> [#uses=1]
- br i1 %tmp429448, label %cond_next127, label %UnifiedReturnBlock
-
-UnifiedReturnBlock: ; preds = %cond_next391
- ret i16 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll
deleted file mode 100644
index ea1bbc4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep addss | not grep esp
-
-define fastcc void @fht(float* %fz, i16 signext %n) {
-entry:
- br i1 true, label %bb171.preheader, label %bb431
-
-bb171.preheader: ; preds = %entry
- %tmp176 = fadd float 0.000000e+00, 1.000000e+00 ; <float> [#uses=2]
- %gi.1 = getelementptr float* %fz, i32 0 ; <float*> [#uses=2]
- %tmp240 = load float* %gi.1, align 4 ; <float> [#uses=1]
- %tmp242 = fsub float %tmp240, 0.000000e+00 ; <float> [#uses=2]
- %tmp251 = getelementptr float* %fz, i32 0 ; <float*> [#uses=1]
- %tmp252 = load float* %tmp251, align 4 ; <float> [#uses=1]
- %tmp258 = getelementptr float* %fz, i32 0 ; <float*> [#uses=2]
- %tmp259 = load float* %tmp258, align 4 ; <float> [#uses=2]
- %tmp261 = fmul float %tmp259, %tmp176 ; <float> [#uses=1]
- %tmp262 = fsub float 0.000000e+00, %tmp261 ; <float> [#uses=2]
- %tmp269 = fmul float %tmp252, %tmp176 ; <float> [#uses=1]
- %tmp276 = fmul float %tmp259, 0.000000e+00 ; <float> [#uses=1]
- %tmp277 = fadd float %tmp269, %tmp276 ; <float> [#uses=2]
- %tmp281 = getelementptr float* %fz, i32 0 ; <float*> [#uses=1]
- %tmp282 = load float* %tmp281, align 4 ; <float> [#uses=2]
- %tmp284 = fsub float %tmp282, %tmp277 ; <float> [#uses=1]
- %tmp291 = fadd float %tmp282, %tmp277 ; <float> [#uses=1]
- %tmp298 = fsub float 0.000000e+00, %tmp262 ; <float> [#uses=1]
- %tmp305 = fadd float 0.000000e+00, %tmp262 ; <float> [#uses=1]
- %tmp315 = fmul float 0.000000e+00, %tmp291 ; <float> [#uses=1]
- %tmp318 = fmul float 0.000000e+00, %tmp298 ; <float> [#uses=1]
- %tmp319 = fadd float %tmp315, %tmp318 ; <float> [#uses=1]
- %tmp329 = fadd float 0.000000e+00, %tmp319 ; <float> [#uses=1]
- store float %tmp329, float* null, align 4
- %tmp336 = fsub float %tmp242, 0.000000e+00 ; <float> [#uses=1]
- store float %tmp336, float* %tmp258, align 4
- %tmp343 = fadd float %tmp242, 0.000000e+00 ; <float> [#uses=1]
- store float %tmp343, float* null, align 4
- %tmp355 = fmul float 0.000000e+00, %tmp305 ; <float> [#uses=1]
- %tmp358 = fmul float 0.000000e+00, %tmp284 ; <float> [#uses=1]
- %tmp359 = fadd float %tmp355, %tmp358 ; <float> [#uses=1]
- %tmp369 = fadd float 0.000000e+00, %tmp359 ; <float> [#uses=1]
- store float %tmp369, float* %gi.1, align 4
- ret void
-
-bb431: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
deleted file mode 100644
index a3872ad..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: llc < %s -march=x86 | grep sarl | not grep esp
-
-define i16 @t(i16* %qmatrix, i16* %dct, i16* %acBaseTable, i16* %acExtTable, i16 signext %acBaseRes, i16 signext %acMaskRes, i16 signext %acExtRes, i32* %bitptr, i32* %source, i32 %markerPrefix, i8** %byteptr, i32 %scale, i32 %round, i32 %bits) signext {
-entry:
- br label %cond_next127
-
-cond_next127: ; preds = %cond_next391, %entry
- %tmp151 = add i32 0, %round ; <i32> [#uses=1]
- %tmp153 = ashr i32 %tmp151, %scale ; <i32> [#uses=1]
- %tmp158 = xor i32 0, %tmp153 ; <i32> [#uses=1]
- %tmp160 = or i32 %tmp158, 0 ; <i32> [#uses=1]
- %tmp180181 = sext i16 0 to i32 ; <i32> [#uses=1]
- %tmp183 = add i32 %tmp160, 1 ; <i32> [#uses=1]
- br i1 false, label %cond_true188, label %cond_next245
-
-cond_true188: ; preds = %cond_next127
- ret i16 0
-
-cond_next245: ; preds = %cond_next127
- %tmp253444 = lshr i32 %tmp180181, 4 ; <i32> [#uses=1]
- %tmp254 = and i32 %tmp253444, 15 ; <i32> [#uses=1]
- br i1 false, label %cond_true267, label %cond_next391
-
-cond_true267: ; preds = %cond_next245
- %tmp269 = load i8** %byteptr, align 4 ; <i8*> [#uses=3]
- %tmp270 = load i8* %tmp269, align 1 ; <i8> [#uses=1]
- %tmp270271 = zext i8 %tmp270 to i32 ; <i32> [#uses=1]
- %tmp272 = getelementptr i8* %tmp269, i32 1 ; <i8*> [#uses=2]
- store i8* %tmp272, i8** %byteptr, align 4
- %tmp276 = load i8* %tmp272, align 1 ; <i8> [#uses=1]
- %tmp278 = getelementptr i8* %tmp269, i32 2 ; <i8*> [#uses=1]
- store i8* %tmp278, i8** %byteptr, align 4
- %tmp286 = icmp eq i32 %tmp270271, %markerPrefix ; <i1> [#uses=1]
- %cond = icmp eq i8 %tmp276, 0 ; <i1> [#uses=1]
- %bothcond = and i1 %tmp286, %cond ; <i1> [#uses=1]
- br i1 %bothcond, label %cond_true294, label %cond_next327
-
-cond_true294: ; preds = %cond_true267
- ret i16 0
-
-cond_next327: ; preds = %cond_true267
- br i1 false, label %cond_true343, label %cond_next391
-
-cond_true343: ; preds = %cond_next327
- %tmp345 = load i8** %byteptr, align 4 ; <i8*> [#uses=1]
- store i8* null, i8** %byteptr, align 4
- store i8* %tmp345, i8** %byteptr, align 4
- br label %cond_next391
-
-cond_next391: ; preds = %cond_true343, %cond_next327, %cond_next245
- %tmp422445 = add i32 %tmp254, %tmp183 ; <i32> [#uses=1]
- %tmp429448 = icmp ult i32 %tmp422445, 63 ; <i1> [#uses=1]
- br i1 %tmp429448, label %cond_next127, label %UnifiedReturnBlock
-
-UnifiedReturnBlock: ; preds = %cond_next391
- ret i16 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll
deleted file mode 100644
index 8a55935..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin
-
- %struct._Unwind_Context = type { }
-
-define i32 @execute_stack_op(i8* %op_ptr, i8* %op_end, %struct._Unwind_Context* %context, i64 %initial) {
-entry:
- br i1 false, label %bb, label %return
-
-bb: ; preds = %bb31, %entry
- br i1 false, label %bb6, label %bb31
-
-bb6: ; preds = %bb
- %tmp10 = load i64* null, align 8 ; <i64> [#uses=1]
- %tmp16 = load i64* null, align 8 ; <i64> [#uses=1]
- br i1 false, label %bb23, label %bb31
-
-bb23: ; preds = %bb6
- %tmp2526.cast = and i64 %tmp16, 4294967295 ; <i64> [#uses=1]
- %tmp27 = ashr i64 %tmp10, %tmp2526.cast ; <i64> [#uses=1]
- br label %bb31
-
-bb31: ; preds = %bb23, %bb6, %bb
- %result.0 = phi i64 [ %tmp27, %bb23 ], [ 0, %bb ], [ 0, %bb6 ] ; <i64> [#uses=0]
- br i1 false, label %bb, label %return
-
-return: ; preds = %bb31, %entry
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll
deleted file mode 100644
index 1e4ae84..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-15-CoalescerCrash.ll
+++ /dev/null
@@ -1,400 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux-gnu
-; PR1729
-
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.VEC_edge = type { i32, i32, [1 x %struct.edge_def*] }
- %struct.VEC_tree = type { i32, i32, [1 x %struct.tree_node*] }
- %struct._IO_FILE = type { i32, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, %struct._IO_marker*, %struct._IO_FILE*, i32, i32, i64, i16, i8, [1 x i8], i8*, i64, i8*, i8*, i8*, i8*, i64, i32, [20 x i8] }
- %struct._IO_marker = type { %struct._IO_marker*, %struct._IO_FILE*, i32 }
- %struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
- %struct.addr_diff_vec_flags = type <{ i8, i8, i8, i8 }>
- %struct.alloc_pool_def = type { i8*, i64, i64, %struct.alloc_pool_list_def*, i64, i64, i64, %struct.alloc_pool_list_def*, i64, i64 }
- %struct.alloc_pool_list_def = type { %struct.alloc_pool_list_def* }
- %struct.basic_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.tree_node*, %struct.VEC_edge*, %struct.VEC_edge*, %struct.bitmap_head_def*, %struct.bitmap_head_def*, i8*, %struct.loop*, [2 x %struct.et_node*], %struct.basic_block_def*, %struct.basic_block_def*, %struct.reorder_block_def*, %struct.bb_ann_d*, i64, i32, i32, i32, i32 }
- %struct.bb_ann_d = type opaque
- %struct.bitmap_element_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, [2 x i64] }
- %struct.bitmap_head_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, %struct.bitmap_obstack* }
- %struct.bitmap_obstack = type { %struct.bitmap_element_def*, %struct.bitmap_head_def*, %struct.obstack }
- %struct.cselib_val_struct = type opaque
- %struct.dataflow_d = type opaque
- %struct.die_struct = type opaque
- %struct.edge_def = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.edge_def_insns, i8*, %struct.location_t*, i32, i32, i64, i32 }
- %struct.edge_def_insns = type { %struct.rtx_def* }
- %struct.edge_iterator = type { i32, %struct.VEC_edge** }
- %struct.eh_status = type opaque
- %struct.elt_list = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.location_t, i32, i8*, %struct.rtx_def** }
- %struct.et_node = type opaque
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i8, i8, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.location_t, %struct.varray_head_tag*, %struct.tree_node*, %struct.tree_node*, i8, i8, i8 }
- %struct.ht_identifier = type { i8*, i32, i32 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type opaque
- %struct.lang_type = type opaque
- %struct.language_function = type opaque
- %struct.location_t = type { i8*, i32 }
- %struct.loop = type opaque
- %struct.machine_function = type { %struct.stack_local_entry*, i8*, %struct.rtx_def*, i32, i32, i32, i32, i32 }
- %struct.mem_attrs = type { i64, %struct.tree_node*, %struct.rtx_def*, %struct.rtx_def*, i32 }
- %struct.obstack = type { i64, %struct._obstack_chunk*, i8*, i8*, i8*, i64, i32, %struct._obstack_chunk* (i8*, i64)*, void (i8*, %struct._obstack_chunk*)*, i8*, i8 }
- %struct.phi_arg_d = type { %struct.tree_node*, i8 }
- %struct.ptr_info_def = type opaque
- %struct.real_value = type opaque
- %struct.reg_attrs = type { %struct.tree_node*, i64 }
- %struct.reg_info_def = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.reorder_block_def = type { %struct.rtx_def*, %struct.rtx_def*, %struct.basic_block_def*, %struct.basic_block_def*, %struct.basic_block_def*, i32, i32, i32 }
- %struct.rtunion = type { i8* }
- %struct.rtvec_def = type { i32, [1 x %struct.rtx_def*] }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.simple_bitmap_def = type { i32, i32, i32, [1 x i64] }
- %struct.stack_local_entry = type opaque
- %struct.temp_slot = type opaque
- %struct.tree_binfo = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.VEC_tree*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.VEC_tree }
- %struct.tree_block = type { %struct.tree_common, i32, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_complex = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u1_a = type <{ i32 }>
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_exp = type { %struct.tree_common, %struct.location_t*, i32, %struct.tree_node*, [1 x %struct.tree_node*] }
- %struct.tree_identifier = type { %struct.tree_common, %struct.ht_identifier }
- %struct.tree_int_cst = type { %struct.tree_common, %struct.tree_int_cst_lowhi }
- %struct.tree_int_cst_lowhi = type { i64, i64 }
- %struct.tree_list = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.tree_phi_node = type { %struct.tree_common, %struct.tree_node*, i32, i32, i32, %struct.basic_block_def*, %struct.dataflow_d*, [1 x %struct.phi_arg_d] }
- %struct.tree_real_cst = type { %struct.tree_common, %struct.real_value* }
- %struct.tree_ssa_name = type { %struct.tree_common, %struct.tree_node*, i32, %struct.ptr_info_def*, %struct.tree_node*, i8* }
- %struct.tree_statement_list = type { %struct.tree_common, %struct.tree_statement_list_node*, %struct.tree_statement_list_node* }
- %struct.tree_statement_list_node = type { %struct.tree_statement_list_node*, %struct.tree_statement_list_node*, %struct.tree_node* }
- %struct.tree_string = type { %struct.tree_common, i32, [1 x i8] }
- %struct.tree_type = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i32, i16, i8, i8, i32, %struct.tree_node*, %struct.tree_node*, %struct.rtunion, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_type* }
- %struct.tree_type_symtab = type { i8* }
- %struct.tree_value_handle = type { %struct.tree_common, %struct.value_set*, i32 }
- %struct.tree_vec = type { %struct.tree_common, i32, [1 x %struct.tree_node*] }
- %struct.tree_vector = type { %struct.tree_common, %struct.tree_node* }
- %struct.u = type { [1 x %struct.rtunion] }
- %struct.value_set = type opaque
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.varray_data = type { [1 x i64] }
- %struct.varray_head_tag = type { i64, i64, i32, i8*, %struct.varray_data }
- %union.tree_ann_d = type opaque
- at first_edge_aux_obj = external global i8* ; <i8**> [#uses=0]
- at first_block_aux_obj = external global i8* ; <i8**> [#uses=0]
- at n_edges = external global i32 ; <i32*> [#uses=0]
- at ENTRY_BLOCK_PTR = external global %struct.basic_block_def* ; <%struct.basic_block_def**> [#uses=0]
- at EXIT_BLOCK_PTR = external global %struct.basic_block_def* ; <%struct.basic_block_def**> [#uses=0]
- at n_basic_blocks = external global i32 ; <i32*> [#uses=0]
- at .str = external constant [9 x i8] ; <[9 x i8]*> [#uses=0]
- at rbi_pool = external global %struct.alloc_pool_def* ; <%struct.alloc_pool_def**> [#uses=0]
- at __FUNCTION__.19643 = external constant [18 x i8] ; <[18 x i8]*> [#uses=0]
- at .str1 = external constant [20 x i8] ; <[20 x i8]*> [#uses=0]
- at __FUNCTION__.19670 = external constant [15 x i8] ; <[15 x i8]*> [#uses=0]
- at basic_block_info = external global %struct.varray_head_tag* ; <%struct.varray_head_tag**> [#uses=0]
- at last_basic_block = external global i32 ; <i32*> [#uses=0]
- at __FUNCTION__.19696 = external constant [14 x i8] ; <[14 x i8]*> [#uses=0]
- at __FUNCTION__.20191 = external constant [20 x i8] ; <[20 x i8]*> [#uses=0]
- at block_aux_obstack = external global %struct.obstack ; <%struct.obstack*> [#uses=0]
- at __FUNCTION__.20301 = external constant [20 x i8] ; <[20 x i8]*> [#uses=0]
- at __FUNCTION__.20316 = external constant [19 x i8] ; <[19 x i8]*> [#uses=0]
- at edge_aux_obstack = external global %struct.obstack ; <%struct.obstack*> [#uses=0]
- at stderr = external global %struct._IO_FILE* ; <%struct._IO_FILE**> [#uses=0]
- at __FUNCTION__.20463 = external constant [11 x i8] ; <[11 x i8]*> [#uses=0]
- at .str2 = external constant [7 x i8] ; <[7 x i8]*> [#uses=0]
- at .str3 = external constant [6 x i8] ; <[6 x i8]*> [#uses=0]
- at .str4 = external constant [4 x i8] ; <[4 x i8]*> [#uses=0]
- at .str5 = external constant [11 x i8] ; <[11 x i8]*> [#uses=0]
- at .str6 = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
- at .str7 = external constant [4 x i8] ; <[4 x i8]*> [#uses=0]
- at bitnames.20157 = external constant [13 x i8*] ; <[13 x i8*]*> [#uses=0]
- at .str8 = external constant [9 x i8] ; <[9 x i8]*> [#uses=0]
- at .str9 = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
- at .str10 = external constant [7 x i8] ; <[7 x i8]*> [#uses=0]
- at .str11 = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
- at .str12 = external constant [5 x i8] ; <[5 x i8]*> [#uses=0]
- at .str13 = external constant [9 x i8] ; <[9 x i8]*> [#uses=0]
- at .str14 = external constant [13 x i8] ; <[13 x i8]*> [#uses=0]
- at .str15 = external constant [12 x i8] ; <[12 x i8]*> [#uses=0]
- at .str16 = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
- at .str17 = external constant [10 x i8] ; <[10 x i8]*> [#uses=0]
- at .str18 = external constant [5 x i8] ; <[5 x i8]*> [#uses=0]
- at .str19 = external constant [6 x i8] ; <[6 x i8]*> [#uses=0]
- at .str20 = external constant [5 x i8] ; <[5 x i8]*> [#uses=0]
- at .str21 = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
- at .str22 = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
- at __FUNCTION__.19709 = external constant [20 x i8] ; <[20 x i8]*> [#uses=0]
- at .str23 = external constant [5 x i8] ; <[5 x i8]*> [#uses=0]
- at .str24 = external constant [10 x i8] ; <[10 x i8]*> [#uses=0]
- at __FUNCTION__.19813 = external constant [19 x i8] ; <[19 x i8]*> [#uses=0]
- at .str25 = external constant [7 x i8] ; <[7 x i8]*> [#uses=0]
- at .str26 = external constant [6 x i8] ; <[6 x i8]*> [#uses=0]
- at initialized.20241.b = external global i1 ; <i1*> [#uses=0]
- at __FUNCTION__.20244 = external constant [21 x i8] ; <[21 x i8]*> [#uses=0]
- at __FUNCTION__.19601 = external constant [12 x i8] ; <[12 x i8]*> [#uses=0]
- at __FUNCTION__.14571 = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
- at __FUNCTION__.14535 = external constant [13 x i8] ; <[13 x i8]*> [#uses=0]
- at .str27 = external constant [28 x i8] ; <[28 x i8]*> [#uses=0]
- at __FUNCTION__.14589 = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
- at __FUNCTION__.19792 = external constant [12 x i8] ; <[12 x i8]*> [#uses=0]
- at __FUNCTION__.19851 = external constant [19 x i8] ; <[19 x i8]*> [#uses=0]
- at profile_status = external global i32 ; <i32*> [#uses=0]
- at .str29 = external constant [46 x i8] ; <[46 x i8]*> [#uses=0]
- at .str30 = external constant [49 x i8] ; <[49 x i8]*> [#uses=0]
- at .str31 = external constant [54 x i8] ; <[54 x i8]*> [#uses=0]
- at .str32 = external constant [49 x i8] ; <[49 x i8]*> [#uses=1]
- at __FUNCTION__.19948 = external constant [15 x i8] ; <[15 x i8]*> [#uses=0]
- at reg_n_info = external global %struct.varray_head_tag* ; <%struct.varray_head_tag**> [#uses=0]
- at reload_completed = external global i32 ; <i32*> [#uses=0]
- at .str33 = external constant [15 x i8] ; <[15 x i8]*> [#uses=0]
- at .str34 = external constant [43 x i8] ; <[43 x i8]*> [#uses=0]
- at .str35 = external constant [13 x i8] ; <[13 x i8]*> [#uses=0]
- at .str36 = external constant [1 x i8] ; <[1 x i8]*> [#uses=0]
- at .str37 = external constant [2 x i8] ; <[2 x i8]*> [#uses=0]
- at .str38 = external constant [16 x i8] ; <[16 x i8]*> [#uses=0]
- at cfun = external global %struct.function* ; <%struct.function**> [#uses=0]
- at .str39 = external constant [14 x i8] ; <[14 x i8]*> [#uses=0]
- at .str40 = external constant [11 x i8] ; <[11 x i8]*> [#uses=0]
- at .str41 = external constant [20 x i8] ; <[20 x i8]*> [#uses=0]
- at .str42 = external constant [17 x i8] ; <[17 x i8]*> [#uses=0]
- at .str43 = external constant [19 x i8] ; <[19 x i8]*> [#uses=0]
- at mode_size = external global [48 x i8] ; <[48 x i8]*> [#uses=0]
- at target_flags = external global i32 ; <i32*> [#uses=0]
- at .str44 = external constant [11 x i8] ; <[11 x i8]*> [#uses=0]
- at reg_class_names = external global [0 x i8*] ; <[0 x i8*]*> [#uses=0]
- at .str45 = external constant [10 x i8] ; <[10 x i8]*> [#uses=0]
- at .str46 = external constant [13 x i8] ; <[13 x i8]*> [#uses=0]
- at .str47 = external constant [19 x i8] ; <[19 x i8]*> [#uses=0]
- at .str48 = external constant [12 x i8] ; <[12 x i8]*> [#uses=0]
- at .str49 = external constant [10 x i8] ; <[10 x i8]*> [#uses=0]
- at .str50 = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
- at .str51 = external constant [29 x i8] ; <[29 x i8]*> [#uses=0]
- at .str52 = external constant [17 x i8] ; <[17 x i8]*> [#uses=0]
- at .str53 = external constant [19 x i8] ; <[19 x i8]*> [#uses=0]
- at .str54 = external constant [22 x i8] ; <[22 x i8]*> [#uses=0]
- at .str55 = external constant [10 x i8] ; <[10 x i8]*> [#uses=0]
- at .str56 = external constant [12 x i8] ; <[12 x i8]*> [#uses=0]
- at .str57 = external constant [26 x i8] ; <[26 x i8]*> [#uses=0]
- at .str58 = external constant [15 x i8] ; <[15 x i8]*> [#uses=0]
- at .str59 = external constant [14 x i8] ; <[14 x i8]*> [#uses=0]
- at .str60 = external constant [26 x i8] ; <[26 x i8]*> [#uses=0]
- at .str61 = external constant [24 x i8] ; <[24 x i8]*> [#uses=0]
- at initialized.20366.b = external global i1 ; <i1*> [#uses=0]
- at __FUNCTION__.20369 = external constant [20 x i8] ; <[20 x i8]*> [#uses=0]
- at __FUNCTION__.20442 = external constant [19 x i8] ; <[19 x i8]*> [#uses=0]
- at bb_bitnames.20476 = external constant [6 x i8*] ; <[6 x i8*]*> [#uses=0]
- at .str62 = external constant [6 x i8] ; <[6 x i8]*> [#uses=0]
- at .str63 = external constant [4 x i8] ; <[4 x i8]*> [#uses=0]
- at .str64 = external constant [10 x i8] ; <[10 x i8]*> [#uses=0]
- at .str65 = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
- at .str66 = external constant [17 x i8] ; <[17 x i8]*> [#uses=0]
- at .str67 = external constant [11 x i8] ; <[11 x i8]*> [#uses=0]
- at .str68 = external constant [15 x i8] ; <[15 x i8]*> [#uses=0]
- at .str69 = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
- at .str70 = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
- at __FUNCTION__.20520 = external constant [32 x i8] ; <[32 x i8]*> [#uses=0]
- at dump_file = external global %struct._IO_FILE* ; <%struct._IO_FILE**> [#uses=0]
- at .str71 = external constant [86 x i8] ; <[86 x i8]*> [#uses=0]
- at .str72 = external constant [94 x i8] ; <[94 x i8]*> [#uses=0]
- at reg_obstack = external global %struct.bitmap_obstack ; <%struct.bitmap_obstack*> [#uses=0]
-
-declare void @init_flow()
-
-declare i8* @ggc_alloc_cleared_stat(i64)
-
-declare fastcc void @free_edge(%struct.edge_def*)
-
-declare void @ggc_free(i8*)
-
-declare %struct.basic_block_def* @alloc_block()
-
-declare void @alloc_rbi_pool()
-
-declare %struct.alloc_pool_def* @create_alloc_pool(i8*, i64, i64)
-
-declare void @free_rbi_pool()
-
-declare void @free_alloc_pool(%struct.alloc_pool_def*)
-
-declare void @initialize_bb_rbi(%struct.basic_block_def*)
-
-declare void @fancy_abort(i8*, i32, i8*)
-
-declare i8* @pool_alloc(%struct.alloc_pool_def*)
-
-declare void @llvm.memset.i64(i8*, i8, i64, i32)
-
-declare void @link_block(%struct.basic_block_def*, %struct.basic_block_def*)
-
-declare void @unlink_block(%struct.basic_block_def*)
-
-declare void @compact_blocks()
-
-declare void @varray_check_failed(%struct.varray_head_tag*, i64, i8*, i32, i8*)
-
-declare void @expunge_block(%struct.basic_block_def*)
-
-declare void @clear_bb_flags()
-
-declare void @alloc_aux_for_block(%struct.basic_block_def*, i32)
-
-declare void @_obstack_newchunk(%struct.obstack*, i32)
-
-declare void @clear_aux_for_blocks()
-
-declare void @free_aux_for_blocks()
-
-declare void @obstack_free(%struct.obstack*, i8*)
-
-declare void @alloc_aux_for_edge(%struct.edge_def*, i32)
-
-declare void @debug_bb(%struct.basic_block_def*)
-
-declare void @dump_bb(%struct.basic_block_def*, %struct._IO_FILE*, i32)
-
-declare %struct.basic_block_def* @debug_bb_n(i32)
-
-declare void @dump_edge_info(%struct._IO_FILE*, %struct.edge_def*, i32)
-
-declare i32 @fputs_unlocked(i8* noalias , %struct._IO_FILE* noalias )
-
-declare i32 @fprintf(%struct._IO_FILE* noalias , i8* noalias , ...)
-
-declare i64 @fwrite(i8*, i64, i64, i8*)
-
-declare i32 @__overflow(%struct._IO_FILE*, i32)
-
-declare %struct.edge_def* @unchecked_make_edge(%struct.basic_block_def*, %struct.basic_block_def*, i32)
-
-declare i8* @vec_gc_p_reserve(i8*, i32)
-
-declare void @vec_assert_fail(i8*, i8*, i8*, i32, i8*)
-
-declare void @execute_on_growing_pred(%struct.edge_def*)
-
-declare %struct.edge_def* @make_edge(%struct.basic_block_def*, %struct.basic_block_def*, i32)
-
-declare %struct.edge_def* @find_edge(%struct.basic_block_def*, %struct.basic_block_def*)
-
-declare %struct.edge_def* @make_single_succ_edge(%struct.basic_block_def*, %struct.basic_block_def*, i32)
-
-declare %struct.edge_def* @cached_make_edge(%struct.simple_bitmap_def**, %struct.basic_block_def*, %struct.basic_block_def*, i32)
-
-declare void @redirect_edge_succ(%struct.edge_def*, %struct.basic_block_def*)
-
-declare void @execute_on_shrinking_pred(%struct.edge_def*)
-
-declare void @alloc_aux_for_blocks(i32)
-
-declare i8* @xmalloc(i64)
-
-declare i32 @_obstack_begin(%struct.obstack*, i32, i32, i8* (i64)*, void (i8*)*)
-
-declare void @free(i8*)
-
-declare void @clear_edges()
-
-declare void @remove_edge(%struct.edge_def*)
-
-declare %struct.edge_def* @redirect_edge_succ_nodup(%struct.edge_def*, %struct.basic_block_def*)
-
-declare void @redirect_edge_pred(%struct.edge_def*, %struct.basic_block_def*)
-
-define void @check_bb_profile(%struct.basic_block_def* %bb, %struct._IO_FILE* %file) {
-entry:
- br i1 false, label %cond_false759.preheader, label %cond_false149.preheader
-
-cond_false149.preheader: ; preds = %entry
- ret void
-
-cond_false759.preheader: ; preds = %entry
- br i1 false, label %cond_next873, label %cond_true794
-
-bb644: ; preds = %cond_next873
- ret void
-
-cond_true794: ; preds = %cond_false759.preheader
- ret void
-
-cond_next873: ; preds = %cond_false759.preheader
- br i1 false, label %bb882, label %bb644
-
-bb882: ; preds = %cond_next873
- br i1 false, label %cond_true893, label %cond_next901
-
-cond_true893: ; preds = %bb882
- br label %cond_false1036
-
-cond_next901: ; preds = %bb882
- ret void
-
-bb929: ; preds = %cond_next1150
- %tmp934 = add i64 0, %lsum.11225.0 ; <i64> [#uses=1]
- br i1 false, label %cond_next979, label %cond_true974
-
-cond_true974: ; preds = %bb929
- ret void
-
-cond_next979: ; preds = %bb929
- br label %cond_false1036
-
-cond_false1036: ; preds = %cond_next979, %cond_true893
- %lsum.11225.0 = phi i64 [ 0, %cond_true893 ], [ %tmp934, %cond_next979 ] ; <i64> [#uses=2]
- br i1 false, label %cond_next1056, label %cond_true1051
-
-cond_true1051: ; preds = %cond_false1036
- ret void
-
-cond_next1056: ; preds = %cond_false1036
- br i1 false, label %cond_next1150, label %cond_true1071
-
-cond_true1071: ; preds = %cond_next1056
- ret void
-
-cond_next1150: ; preds = %cond_next1056
- %tmp1156 = icmp eq %struct.edge_def* null, null ; <i1> [#uses=1]
- br i1 %tmp1156, label %bb1159, label %bb929
-
-bb1159: ; preds = %cond_next1150
- br i1 false, label %cond_true1169, label %UnifiedReturnBlock
-
-cond_true1169: ; preds = %bb1159
- %tmp11741175 = trunc i64 %lsum.11225.0 to i32 ; <i32> [#uses=1]
- %tmp1178 = tail call i32 (%struct._IO_FILE* noalias , i8* noalias , ...)* @fprintf( %struct._IO_FILE* %file noalias , i8* getelementptr ([49 x i8]* @.str32, i32 0, i64 0) noalias , i32 %tmp11741175, i32 0 ) ; <i32> [#uses=0]
- ret void
-
-UnifiedReturnBlock: ; preds = %bb1159
- ret void
-}
-
-declare void @dump_flow_info(%struct._IO_FILE*)
-
-declare i32 @max_reg_num()
-
-declare void @rtl_check_failed_flag(i8*, %struct.rtx_def*, i8*, i32, i8*)
-
-declare i32 @reg_preferred_class(i32)
-
-declare i32 @reg_alternate_class(i32)
-
-declare i8 @maybe_hot_bb_p(%struct.basic_block_def*) zeroext
-
-declare i8 @probably_never_executed_bb_p(%struct.basic_block_def*) zeroext
-
-declare void @dump_regset(%struct.bitmap_head_def*, %struct._IO_FILE*)
-
-declare void @debug_flow_info()
-
-declare void @alloc_aux_for_edges(i32)
-
-declare void @clear_aux_for_edges()
-
-declare void @free_aux_for_edges()
-
-declare void @brief_dump_cfg(%struct._IO_FILE*)
-
-declare i32 @fputc(i32, i8*)
-
-declare void @update_bb_profile_for_threading(%struct.basic_block_def*, i32, i64, %struct.edge_def*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-16-CoalescerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-16-CoalescerCrash.ll
deleted file mode 100644
index fbcac50..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-16-CoalescerCrash.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin
-
-define i64 @__ashldi3(i64 %u, i64 %b) {
-entry:
- br i1 false, label %UnifiedReturnBlock, label %cond_next
-
-cond_next: ; preds = %entry
- %tmp9 = sub i64 32, %b ; <i64> [#uses=2]
- %tmp11 = icmp slt i64 %tmp9, 1 ; <i1> [#uses=1]
- %tmp2180 = trunc i64 %u to i32 ; <i32> [#uses=2]
- %tmp2223 = trunc i64 %tmp9 to i32 ; <i32> [#uses=2]
- br i1 %tmp11, label %cond_true14, label %cond_false
-
-cond_true14: ; preds = %cond_next
- %tmp24 = sub i32 0, %tmp2223 ; <i32> [#uses=1]
- %tmp25 = shl i32 %tmp2180, %tmp24 ; <i32> [#uses=1]
- %tmp2569 = zext i32 %tmp25 to i64 ; <i64> [#uses=1]
- %tmp256970 = shl i64 %tmp2569, 32 ; <i64> [#uses=1]
- ret i64 %tmp256970
-
-cond_false: ; preds = %cond_next
- %tmp35 = lshr i32 %tmp2180, %tmp2223 ; <i32> [#uses=1]
- %tmp54 = or i32 %tmp35, 0 ; <i32> [#uses=1]
- %tmp5464 = zext i32 %tmp54 to i64 ; <i64> [#uses=1]
- %tmp546465 = shl i64 %tmp5464, 32 ; <i64> [#uses=1]
- %tmp546465.ins = or i64 %tmp546465, 0 ; <i64> [#uses=1]
- ret i64 %tmp546465.ins
-
-UnifiedReturnBlock:
- ret i64 %u
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-16-IllegalAsm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-16-IllegalAsm.ll
deleted file mode 100644
index 6d0cb47..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-16-IllegalAsm.ll
+++ /dev/null
@@ -1,272 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | grep movb | not grep x
-; PR1734
-
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.eh_status = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.location_t, i32, i8*, %struct.rtx_def** }
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i8, i8, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.location_t, %struct.varray_head_tag*, %struct.tree_node*, %struct.tree_node*, i8, i8, i8 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type opaque
- %struct.lang_type = type opaque
- %struct.language_function = type opaque
- %struct.location_t = type { i8*, i32 }
- %struct.machine_function = type { %struct.stack_local_entry*, i8*, %struct.rtx_def*, i32, i32, i32, i32, i32 }
- %struct.rtunion = type { i8* }
- %struct.rtvec_def = type { i32, [1 x %struct.rtx_def*] }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.stack_local_entry = type opaque
- %struct.temp_slot = type opaque
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.tree_type = type { %struct.tree_common, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i32, i16, i8, i8, i32, %struct.tree_node*, %struct.tree_node*, %struct.rtunion, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_type* }
- %struct.u = type { [1 x %struct.rtunion] }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.varray_data = type { [1 x i64] }
- %struct.varray_head_tag = type { i64, i64, i32, i8*, %struct.varray_data }
- %union.tree_ann_d = type opaque
- at .str = external constant [28 x i8] ; <[28 x i8]*> [#uses=1]
- at tree_code_type = external constant [0 x i32] ; <[0 x i32]*> [#uses=5]
- at global_trees = external global [47 x %struct.tree_node*] ; <[47 x %struct.tree_node*]*> [#uses=1]
- at mode_size = external global [48 x i8] ; <[48 x i8]*> [#uses=1]
- at __FUNCTION__.22683 = external constant [12 x i8] ; <[12 x i8]*> [#uses=1]
-
-define void @layout_type(%struct.tree_node* %type) {
-entry:
- %tmp15 = icmp eq %struct.tree_node* %type, null ; <i1> [#uses=1]
- br i1 %tmp15, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- tail call void @fancy_abort( i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1713, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-cond_false: ; preds = %entry
- %tmp19 = load %struct.tree_node** getelementptr ([47 x %struct.tree_node*]* @global_trees, i32 0, i64 0), align 8 ; <%struct.tree_node*> [#uses=1]
- %tmp21 = icmp eq %struct.tree_node* %tmp19, %type ; <i1> [#uses=1]
- br i1 %tmp21, label %UnifiedReturnBlock, label %cond_next25
-
-cond_next25: ; preds = %cond_false
- %tmp30 = getelementptr %struct.tree_node* %type, i32 0, i32 0, i32 0, i32 3 ; <i8*> [#uses=1]
- %tmp3031 = bitcast i8* %tmp30 to i32* ; <i32*> [#uses=6]
- %tmp32 = load i32* %tmp3031, align 8 ; <i32> [#uses=3]
- %tmp3435 = trunc i32 %tmp32 to i8 ; <i8> [#uses=3]
- %tmp34353637 = zext i8 %tmp3435 to i64 ; <i64> [#uses=1]
- %tmp38 = getelementptr [0 x i32]* @tree_code_type, i32 0, i64 %tmp34353637 ; <i32*> [#uses=1]
- %tmp39 = load i32* %tmp38, align 4 ; <i32> [#uses=1]
- %tmp40 = icmp eq i32 %tmp39, 2 ; <i1> [#uses=4]
- br i1 %tmp40, label %cond_next46, label %cond_true43
-
-cond_true43: ; preds = %cond_next25
- tail call void @tree_class_check_failed( %struct.tree_node* %type, i32 2, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1719, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-cond_next46: ; preds = %cond_next25
- %tmp4950 = bitcast %struct.tree_node* %type to %struct.tree_type* ; <%struct.tree_type*> [#uses=2]
- %tmp51 = getelementptr %struct.tree_type* %tmp4950, i32 0, i32 2 ; <%struct.tree_node**> [#uses=2]
- %tmp52 = load %struct.tree_node** %tmp51, align 8 ; <%struct.tree_node*> [#uses=1]
- %tmp53 = icmp eq %struct.tree_node* %tmp52, null ; <i1> [#uses=1]
- br i1 %tmp53, label %cond_next57, label %UnifiedReturnBlock
-
-cond_next57: ; preds = %cond_next46
- %tmp65 = and i32 %tmp32, 255 ; <i32> [#uses=1]
- switch i32 %tmp65, label %UnifiedReturnBlock [
- i32 6, label %bb140
- i32 7, label %bb69
- i32 8, label %bb140
- i32 13, label %bb478
- i32 23, label %bb
- ]
-
-bb: ; preds = %cond_next57
- tail call void @fancy_abort( i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1727, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-bb69: ; preds = %cond_next57
- br i1 %tmp40, label %cond_next91, label %cond_true88
-
-cond_true88: ; preds = %bb69
- tail call void @tree_class_check_failed( %struct.tree_node* %type, i32 2, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1730, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-cond_next91: ; preds = %bb69
- %tmp96 = getelementptr %struct.tree_node* %type, i32 0, i32 0, i32 8 ; <i8*> [#uses=1]
- %tmp9697 = bitcast i8* %tmp96 to i32* ; <i32*> [#uses=2]
- %tmp98 = load i32* %tmp9697, align 8 ; <i32> [#uses=2]
- %tmp100101552 = and i32 %tmp98, 511 ; <i32> [#uses=1]
- %tmp102 = icmp eq i32 %tmp100101552, 0 ; <i1> [#uses=1]
- br i1 %tmp102, label %cond_true105, label %bb140
-
-cond_true105: ; preds = %cond_next91
- br i1 %tmp40, label %cond_next127, label %cond_true124
-
-cond_true124: ; preds = %cond_true105
- tail call void @tree_class_check_failed( %struct.tree_node* %type, i32 2, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1731, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-cond_next127: ; preds = %cond_true105
- %tmp136 = or i32 %tmp98, 1 ; <i32> [#uses=1]
- %tmp137 = and i32 %tmp136, -511 ; <i32> [#uses=1]
- store i32 %tmp137, i32* %tmp9697, align 8
- br label %bb140
-
-bb140: ; preds = %cond_next127, %cond_next91, %cond_next57, %cond_next57
- switch i8 %tmp3435, label %cond_true202 [
- i8 6, label %cond_next208
- i8 9, label %cond_next208
- i8 7, label %cond_next208
- i8 8, label %cond_next208
- i8 10, label %cond_next208
- ]
-
-cond_true202: ; preds = %bb140
- tail call void (%struct.tree_node*, i8*, i32, i8*, ...)* @tree_check_failed( %struct.tree_node* %type, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1738, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0), i32 9, i32 6, i32 7, i32 8, i32 10, i32 0 )
- unreachable
-
-cond_next208: ; preds = %bb140, %bb140, %bb140, %bb140, %bb140
- %tmp213 = getelementptr %struct.tree_type* %tmp4950, i32 0, i32 14 ; <%struct.tree_node**> [#uses=1]
- %tmp214 = load %struct.tree_node** %tmp213, align 8 ; <%struct.tree_node*> [#uses=2]
- %tmp217 = getelementptr %struct.tree_node* %tmp214, i32 0, i32 0, i32 0, i32 3 ; <i8*> [#uses=1]
- %tmp217218 = bitcast i8* %tmp217 to i32* ; <i32*> [#uses=1]
- %tmp219 = load i32* %tmp217218, align 8 ; <i32> [#uses=1]
- %tmp221222 = trunc i32 %tmp219 to i8 ; <i8> [#uses=1]
- %tmp223 = icmp eq i8 %tmp221222, 24 ; <i1> [#uses=1]
- br i1 %tmp223, label %cond_true226, label %cond_next340
-
-cond_true226: ; preds = %cond_next208
- switch i8 %tmp3435, label %cond_true288 [
- i8 6, label %cond_next294
- i8 9, label %cond_next294
- i8 7, label %cond_next294
- i8 8, label %cond_next294
- i8 10, label %cond_next294
- ]
-
-cond_true288: ; preds = %cond_true226
- tail call void (%struct.tree_node*, i8*, i32, i8*, ...)* @tree_check_failed( %struct.tree_node* %type, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1739, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0), i32 9, i32 6, i32 7, i32 8, i32 10, i32 0 )
- unreachable
-
-cond_next294: ; preds = %cond_true226, %cond_true226, %cond_true226, %cond_true226, %cond_true226
- %tmp301 = tail call i32 @tree_int_cst_sgn( %struct.tree_node* %tmp214 ) ; <i32> [#uses=1]
- %tmp302 = icmp sgt i32 %tmp301, -1 ; <i1> [#uses=1]
- br i1 %tmp302, label %cond_true305, label %cond_next340
-
-cond_true305: ; preds = %cond_next294
- %tmp313 = load i32* %tmp3031, align 8 ; <i32> [#uses=2]
- %tmp315316 = trunc i32 %tmp313 to i8 ; <i8> [#uses=1]
- %tmp315316317318 = zext i8 %tmp315316 to i64 ; <i64> [#uses=1]
- %tmp319 = getelementptr [0 x i32]* @tree_code_type, i32 0, i64 %tmp315316317318 ; <i32*> [#uses=1]
- %tmp320 = load i32* %tmp319, align 4 ; <i32> [#uses=1]
- %tmp321 = icmp eq i32 %tmp320, 2 ; <i1> [#uses=1]
- br i1 %tmp321, label %cond_next327, label %cond_true324
-
-cond_true324: ; preds = %cond_true305
- tail call void @tree_class_check_failed( %struct.tree_node* %type, i32 2, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1740, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-cond_next327: ; preds = %cond_true305
- %tmp338 = or i32 %tmp313, 8192 ; <i32> [#uses=1]
- store i32 %tmp338, i32* %tmp3031, align 8
- br label %cond_next340
-
-cond_next340: ; preds = %cond_next327, %cond_next294, %cond_next208
- %tmp348 = load i32* %tmp3031, align 8 ; <i32> [#uses=1]
- %tmp350351 = trunc i32 %tmp348 to i8 ; <i8> [#uses=1]
- %tmp350351352353 = zext i8 %tmp350351 to i64 ; <i64> [#uses=1]
- %tmp354 = getelementptr [0 x i32]* @tree_code_type, i32 0, i64 %tmp350351352353 ; <i32*> [#uses=1]
- %tmp355 = load i32* %tmp354, align 4 ; <i32> [#uses=1]
- %tmp356 = icmp eq i32 %tmp355, 2 ; <i1> [#uses=1]
- br i1 %tmp356, label %cond_next385, label %cond_true359
-
-cond_true359: ; preds = %cond_next340
- tail call void @tree_class_check_failed( %struct.tree_node* %type, i32 2, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1742, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-cond_next385: ; preds = %cond_next340
- %tmp390 = getelementptr %struct.tree_node* %type, i32 0, i32 0, i32 8 ; <i8*> [#uses=1]
- %tmp390391 = bitcast i8* %tmp390 to i32* ; <i32*> [#uses=3]
- %tmp392 = load i32* %tmp390391, align 8 ; <i32> [#uses=1]
- %tmp394 = and i32 %tmp392, 511 ; <i32> [#uses=1]
- %tmp397 = tail call i32 @smallest_mode_for_size( i32 %tmp394, i32 2 ) ; <i32> [#uses=1]
- %tmp404 = load i32* %tmp390391, align 8 ; <i32> [#uses=1]
- %tmp397398405 = shl i32 %tmp397, 9 ; <i32> [#uses=1]
- %tmp407 = and i32 %tmp397398405, 65024 ; <i32> [#uses=1]
- %tmp408 = and i32 %tmp404, -65025 ; <i32> [#uses=1]
- %tmp409 = or i32 %tmp408, %tmp407 ; <i32> [#uses=2]
- store i32 %tmp409, i32* %tmp390391, align 8
- %tmp417 = load i32* %tmp3031, align 8 ; <i32> [#uses=1]
- %tmp419420 = trunc i32 %tmp417 to i8 ; <i8> [#uses=1]
- %tmp419420421422 = zext i8 %tmp419420 to i64 ; <i64> [#uses=1]
- %tmp423 = getelementptr [0 x i32]* @tree_code_type, i32 0, i64 %tmp419420421422 ; <i32*> [#uses=1]
- %tmp424 = load i32* %tmp423, align 4 ; <i32> [#uses=1]
- %tmp425 = icmp eq i32 %tmp424, 2 ; <i1> [#uses=1]
- br i1 %tmp425, label %cond_next454, label %cond_true428
-
-cond_true428: ; preds = %cond_next385
- tail call void @tree_class_check_failed( %struct.tree_node* %type, i32 2, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1744, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-cond_next454: ; preds = %cond_next385
- lshr i32 %tmp409, 9 ; <i32>:0 [#uses=1]
- trunc i32 %0 to i8 ; <i8>:1 [#uses=1]
- %tmp463464 = and i8 %1, 127 ; <i8> [#uses=1]
- %tmp463464465466 = zext i8 %tmp463464 to i64 ; <i64> [#uses=1]
- %tmp467 = getelementptr [48 x i8]* @mode_size, i32 0, i64 %tmp463464465466 ; <i8*> [#uses=1]
- %tmp468 = load i8* %tmp467, align 1 ; <i8> [#uses=1]
- %tmp468469553 = zext i8 %tmp468 to i16 ; <i16> [#uses=1]
- %tmp470471 = shl i16 %tmp468469553, 3 ; <i16> [#uses=1]
- %tmp470471472 = zext i16 %tmp470471 to i64 ; <i64> [#uses=1]
- %tmp473 = tail call %struct.tree_node* @size_int_kind( i64 %tmp470471472, i32 2 ) ; <%struct.tree_node*> [#uses=1]
- store %struct.tree_node* %tmp473, %struct.tree_node** %tmp51, align 8
- ret void
-
-bb478: ; preds = %cond_next57
- br i1 %tmp40, label %cond_next500, label %cond_true497
-
-cond_true497: ; preds = %bb478
- tail call void @tree_class_check_failed( %struct.tree_node* %type, i32 2, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1755, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-cond_next500: ; preds = %bb478
- %tmp506 = getelementptr %struct.tree_node* %type, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
- %tmp507 = load %struct.tree_node** %tmp506, align 8 ; <%struct.tree_node*> [#uses=2]
- %tmp511 = getelementptr %struct.tree_node* %tmp507, i32 0, i32 0, i32 0, i32 3 ; <i8*> [#uses=1]
- %tmp511512 = bitcast i8* %tmp511 to i32* ; <i32*> [#uses=1]
- %tmp513 = load i32* %tmp511512, align 8 ; <i32> [#uses=2]
- %tmp515516 = trunc i32 %tmp513 to i8 ; <i8> [#uses=1]
- %tmp515516517518 = zext i8 %tmp515516 to i64 ; <i64> [#uses=1]
- %tmp519 = getelementptr [0 x i32]* @tree_code_type, i32 0, i64 %tmp515516517518 ; <i32*> [#uses=1]
- %tmp520 = load i32* %tmp519, align 4 ; <i32> [#uses=1]
- %tmp521 = icmp eq i32 %tmp520, 2 ; <i1> [#uses=1]
- br i1 %tmp521, label %cond_next527, label %cond_true524
-
-cond_true524: ; preds = %cond_next500
- tail call void @tree_class_check_failed( %struct.tree_node* %tmp507, i32 2, i8* getelementptr ([28 x i8]* @.str, i32 0, i64 0), i32 1755, i8* getelementptr ([12 x i8]* @__FUNCTION__.22683, i32 0, i32 0) )
- unreachable
-
-cond_next527: ; preds = %cond_next500
- %tmp545 = and i32 %tmp513, 8192 ; <i32> [#uses=1]
- %tmp547 = and i32 %tmp32, -8193 ; <i32> [#uses=1]
- %tmp548 = or i32 %tmp547, %tmp545 ; <i32> [#uses=1]
- store i32 %tmp548, i32* %tmp3031, align 8
- ret void
-
-UnifiedReturnBlock: ; preds = %cond_next57, %cond_next46, %cond_false
- ret void
-}
-
-declare void @fancy_abort(i8*, i32, i8*)
-
-declare void @tree_class_check_failed(%struct.tree_node*, i32, i8*, i32, i8*)
-
-declare i32 @smallest_mode_for_size(i32, i32)
-
-declare %struct.tree_node* @size_int_kind(i64, i32)
-
-declare void @tree_check_failed(%struct.tree_node*, i8*, i32, i8*, ...)
-
-declare i32 @tree_int_cst_sgn(%struct.tree_node*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-16-fp80_select.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-16-fp80_select.ll
deleted file mode 100644
index 3f9845c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-16-fp80_select.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86
-; ModuleID = 'bugpoint-reduced-simplified.bc'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin9"
- %struct.wxPoint2DInt = type { i32, i32 }
-
-define x86_fp80 @_ZNK12wxPoint2DInt14GetVectorAngleEv(%struct.wxPoint2DInt* %this) {
-entry:
- br i1 false, label %cond_true, label %UnifiedReturnBlock
-
-cond_true: ; preds = %entry
- %tmp8 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp9 = icmp sgt i32 %tmp8, -1 ; <i1> [#uses=1]
- %retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000 ; <x86_fp80> [#uses=1]
- ret x86_fp80 %retval
-
-UnifiedReturnBlock: ; preds = %entry
- ret x86_fp80 0xK4005B400000000000000
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-17-IllegalAsm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-17-IllegalAsm.ll
deleted file mode 100644
index c0bb55e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-17-IllegalAsm.ll
+++ /dev/null
@@ -1,87 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | grep addb | not grep x
-; RUN: llc < %s -mtriple=x86_64-linux-gnu | grep cmpb | not grep x
-; PR1734
-
-target triple = "x86_64-unknown-linux-gnu"
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.eh_status = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.location_t, i32, i8*, %struct.rtx_def** }
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.varray_head_tag*, %struct.temp_slot*, i32, %struct.var_refs_queue*, i32, i32, %struct.rtvec_def*, %struct.tree_node*, i32, i32, i32, %struct.machine_function*, i32, i32, i8, i8, %struct.language_function*, %struct.rtx_def*, i32, i32, i32, i32, %struct.location_t, %struct.varray_head_tag*, %struct.tree_node*, %struct.tree_node*, i8, i8, i8 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type opaque
- %struct.language_function = type opaque
- %struct.location_t = type { i8*, i32 }
- %struct.machine_function = type { %struct.stack_local_entry*, i8*, %struct.rtx_def*, i32, i32, i32, i32, i32 }
- %struct.rtunion = type { i8* }
- %struct.rtvec_def = type { i32, [1 x %struct.rtx_def*] }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.stack_local_entry = type opaque
- %struct.temp_slot = type opaque
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_decl = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.tree_decl_u2, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_u2 = type { %struct.function* }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.u = type { [1 x %struct.rtunion] }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.varray_data = type { [1 x i64] }
- %struct.varray_head_tag = type { i64, i64, i32, i8*, %struct.varray_data }
- %union.tree_ann_d = type opaque
-
-define void @layout_type(%struct.tree_node* %type) {
-entry:
- %tmp32 = load i32* null, align 8 ; <i32> [#uses=3]
- %tmp3435 = trunc i32 %tmp32 to i8 ; <i8> [#uses=1]
- %tmp53 = icmp eq %struct.tree_node* null, null ; <i1> [#uses=1]
- br i1 %tmp53, label %cond_next57, label %UnifiedReturnBlock
-
-cond_next57: ; preds = %entry
- %tmp65 = and i32 %tmp32, 255 ; <i32> [#uses=1]
- switch i32 %tmp65, label %UnifiedReturnBlock [
- i32 6, label %bb140
- i32 7, label %bb140
- i32 8, label %bb140
- i32 13, label %bb478
- ]
-
-bb140: ; preds = %cond_next57, %cond_next57, %cond_next57
- %tmp219 = load i32* null, align 8 ; <i32> [#uses=1]
- %tmp221222 = trunc i32 %tmp219 to i8 ; <i8> [#uses=1]
- %tmp223 = icmp eq i8 %tmp221222, 24 ; <i1> [#uses=1]
- br i1 %tmp223, label %cond_true226, label %cond_next340
-
-cond_true226: ; preds = %bb140
- switch i8 %tmp3435, label %cond_true288 [
- i8 6, label %cond_next340
- i8 9, label %cond_next340
- i8 7, label %cond_next340
- i8 8, label %cond_next340
- i8 10, label %cond_next340
- ]
-
-cond_true288: ; preds = %cond_true226
- unreachable
-
-cond_next340: ; preds = %cond_true226, %cond_true226, %cond_true226, %cond_true226, %cond_true226, %bb140
- ret void
-
-bb478: ; preds = %cond_next57
- br i1 false, label %cond_next500, label %cond_true497
-
-cond_true497: ; preds = %bb478
- unreachable
-
-cond_next500: ; preds = %bb478
- %tmp513 = load i32* null, align 8 ; <i32> [#uses=1]
- %tmp545 = and i32 %tmp513, 8192 ; <i32> [#uses=1]
- %tmp547 = and i32 %tmp32, -8193 ; <i32> [#uses=1]
- %tmp548 = or i32 %tmp547, %tmp545 ; <i32> [#uses=1]
- store i32 %tmp548, i32* null, align 8
- ret void
-
-UnifiedReturnBlock: ; preds = %cond_next57, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll
deleted file mode 100644
index 600bd1f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll
+++ /dev/null
@@ -1,84 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | grep inc | not grep PTR
-
-define i16 @t(i32* %bitptr, i32* %source, i8** %byteptr, i32 %scale, i32 %round) signext {
-entry:
- br label %bb
-
-bb: ; preds = %cond_next391, %entry
- %cnt.0 = phi i32 [ 0, %entry ], [ %tmp422445, %cond_next391 ] ; <i32> [#uses=1]
- %v.1 = phi i32 [ undef, %entry ], [ %tmp411, %cond_next391 ] ; <i32> [#uses=0]
- br i1 false, label %cond_true, label %cond_next127
-
-cond_true: ; preds = %bb
- store i8* null, i8** %byteptr, align 4
- store i8* null, i8** %byteptr, align 4
- br label %cond_next127
-
-cond_next127: ; preds = %cond_true, %bb
- %tmp151 = add i32 0, %round ; <i32> [#uses=1]
- %tmp153 = ashr i32 %tmp151, %scale ; <i32> [#uses=2]
- %tmp154155 = trunc i32 %tmp153 to i16 ; <i16> [#uses=1]
- %tmp154155156 = sext i16 %tmp154155 to i32 ; <i32> [#uses=1]
- %tmp158 = xor i32 %tmp154155156, %tmp153 ; <i32> [#uses=1]
- %tmp160 = or i32 %tmp158, %cnt.0 ; <i32> [#uses=1]
- %tmp171 = load i32* %bitptr, align 4 ; <i32> [#uses=1]
- %tmp180181 = sext i16 0 to i32 ; <i32> [#uses=3]
- %tmp183 = add i32 %tmp160, 1 ; <i32> [#uses=1]
- br i1 false, label %cond_true188, label %cond_next245
-
-cond_true188: ; preds = %cond_next127
- ret i16 0
-
-cond_next245: ; preds = %cond_next127
- %tmp249 = ashr i32 %tmp180181, 8 ; <i32> [#uses=1]
- %tmp250 = add i32 %tmp171, %tmp249 ; <i32> [#uses=1]
- %tmp253444 = lshr i32 %tmp180181, 4 ; <i32> [#uses=1]
- %tmp254 = and i32 %tmp253444, 15 ; <i32> [#uses=1]
- %tmp256 = and i32 %tmp180181, 15 ; <i32> [#uses=2]
- %tmp264 = icmp ugt i32 %tmp250, 15 ; <i1> [#uses=1]
- br i1 %tmp264, label %cond_true267, label %cond_next391
-
-cond_true267: ; preds = %cond_next245
- store i8* null, i8** %byteptr, align 4
- store i8* null, i8** %byteptr, align 4
- br i1 false, label %cond_true289, label %cond_next327
-
-cond_true289: ; preds = %cond_true267
- ret i16 0
-
-cond_next327: ; preds = %cond_true267
- br i1 false, label %cond_true343, label %cond_next385
-
-cond_true343: ; preds = %cond_next327
- %tmp345 = load i8** %byteptr, align 4 ; <i8*> [#uses=1]
- store i8* null, i8** %byteptr, align 4
- br i1 false, label %cond_next385, label %cond_true352
-
-cond_true352: ; preds = %cond_true343
- store i8* %tmp345, i8** %byteptr, align 4
- br i1 false, label %cond_true364, label %cond_next385
-
-cond_true364: ; preds = %cond_true352
- ret i16 0
-
-cond_next385: ; preds = %cond_true352, %cond_true343, %cond_next327
- br label %cond_next391
-
-cond_next391: ; preds = %cond_next385, %cond_next245
- %tmp393 = load i32* %source, align 4 ; <i32> [#uses=1]
- %tmp395 = load i32* %bitptr, align 4 ; <i32> [#uses=2]
- %tmp396 = shl i32 %tmp393, %tmp395 ; <i32> [#uses=1]
- %tmp398 = sub i32 32, %tmp256 ; <i32> [#uses=1]
- %tmp405 = lshr i32 %tmp396, 31 ; <i32> [#uses=1]
- %tmp406 = add i32 %tmp405, -1 ; <i32> [#uses=1]
- %tmp409 = lshr i32 %tmp406, %tmp398 ; <i32> [#uses=1]
- %tmp411 = sub i32 0, %tmp409 ; <i32> [#uses=1]
- %tmp422445 = add i32 %tmp254, %tmp183 ; <i32> [#uses=2]
- %tmp426447 = add i32 %tmp395, %tmp256 ; <i32> [#uses=1]
- store i32 %tmp426447, i32* %bitptr, align 4
- %tmp429448 = icmp ult i32 %tmp422445, 63 ; <i1> [#uses=1]
- br i1 %tmp429448, label %bb, label %UnifiedReturnBlock
-
-UnifiedReturnBlock: ; preds = %cond_next391
- ret i16 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll
deleted file mode 100644
index 984094d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-28-inlineasm-q-modifier.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-; PR1748
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define i32 @kernel_init(i8* %unused) {
-entry:
- call void asm sideeffect "foo ${0:q}", "=*imr"( i64* null )
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
deleted file mode 100644
index 86d3bbf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 1
-
-define i16 @t() signext {
-entry:
- %tmp180 = load i16* null, align 2 ; <i16> [#uses=3]
- %tmp180181 = sext i16 %tmp180 to i32 ; <i32> [#uses=1]
- %tmp185 = icmp slt i16 %tmp180, 0 ; <i1> [#uses=1]
- br i1 %tmp185, label %cond_true188, label %cond_next245
-
-cond_true188: ; preds = %entry
- %tmp195196 = trunc i16 %tmp180 to i8 ; <i8> [#uses=0]
- ret i16 0
-
-cond_next245: ; preds = %entry
- %tmp256 = and i32 %tmp180181, 15 ; <i32> [#uses=0]
- ret i16 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-30-LSRCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-30-LSRCrash.ll
deleted file mode 100644
index 42db98b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-30-LSRCrash.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i32 @unique(i8* %full, i32 %p, i32 %len, i32 %mode, i32 %verbos, i32 %flags) {
-entry:
- br i1 false, label %cond_true15, label %cond_next107
-
-cond_true15: ; preds = %entry
- br i1 false, label %bb98.preheader, label %bb
-
-bb: ; preds = %cond_true15
- ret i32 0
-
-bb98.preheader: ; preds = %cond_true15
- br i1 false, label %bb103, label %bb69.outer
-
-bb76.split: ; preds = %bb69.outer.split.split, %bb69.us208
- br i1 false, label %bb103, label %bb69.outer
-
-bb69.outer: ; preds = %bb76.split, %bb98.preheader
- %from.0.reg2mem.0.ph.rec = phi i32 [ %tmp75.rec, %bb76.split ], [ 0, %bb98.preheader ] ; <i32> [#uses=1]
- %tmp75.rec = add i32 %from.0.reg2mem.0.ph.rec, 1 ; <i32> [#uses=2]
- %tmp75 = getelementptr i8* null, i32 %tmp75.rec ; <i8*> [#uses=6]
- br i1 false, label %bb69.us208, label %bb69.outer.split.split
-
-bb69.us208: ; preds = %bb69.outer
- switch i32 0, label %bb76.split [
- i32 47, label %bb89
- i32 58, label %bb89
- i32 92, label %bb89
- ]
-
-bb69.outer.split.split: ; preds = %bb69.outer
- switch i8 0, label %bb76.split [
- i8 47, label %bb89
- i8 58, label %bb89
- i8 92, label %bb89
- ]
-
-bb89: ; preds = %bb69.outer.split.split, %bb69.outer.split.split, %bb69.outer.split.split, %bb69.us208, %bb69.us208, %bb69.us208
- %tmp75.lcssa189 = phi i8* [ %tmp75, %bb69.us208 ], [ %tmp75, %bb69.us208 ], [ %tmp75, %bb69.us208 ], [ %tmp75, %bb69.outer.split.split ], [ %tmp75, %bb69.outer.split.split ], [ %tmp75, %bb69.outer.split.split ] ; <i8*> [#uses=0]
- ret i32 0
-
-bb103: ; preds = %bb76.split, %bb98.preheader
- ret i32 0
-
-cond_next107: ; preds = %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-31-extractelement-i64.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-10-31-extractelement-i64.ll
deleted file mode 100644
index 1b8e67d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-10-31-extractelement-i64.ll
+++ /dev/null
@@ -1,82 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse2
-; ModuleID = 'yyy.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
-
-define <1 x i64> @a(<2 x i64> %__A) {
-entry:
- %__A_addr = alloca <2 x i64> ; <<2 x i64>*> [#uses=2]
- %retval = alloca <1 x i64>, align 8 ; <<1 x i64>*> [#uses=3]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store <2 x i64> %__A, <2 x i64>* %__A_addr
- %tmp = load <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
- %tmp1 = bitcast <2 x i64> %tmp to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp2 = extractelement <2 x i64> %tmp1, i32 0 ; <i64> [#uses=1]
- %tmp3 = bitcast i64 %tmp2 to <1 x i64> ; <<1 x i64>> [#uses=1]
- store <1 x i64> %tmp3, <1 x i64>* %retval, align 8
- %tmp4 = load <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- %retval5 = load <1 x i64>* %retval ; <<1 x i64>> [#uses=1]
- ret <1 x i64> %retval5
-}
-
-define <1 x i64> @b(<2 x i64> %__A) {
-entry:
- %__A_addr = alloca <2 x i64> ; <<2 x i64>*> [#uses=2]
- %retval = alloca <1 x i64>, align 8 ; <<1 x i64>*> [#uses=3]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store <2 x i64> %__A, <2 x i64>* %__A_addr
- %tmp = load <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
- %tmp1 = bitcast <2 x i64> %tmp to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp2 = extractelement <2 x i64> %tmp1, i32 1 ; <i64> [#uses=1]
- %tmp3 = bitcast i64 %tmp2 to <1 x i64> ; <<1 x i64>> [#uses=1]
- store <1 x i64> %tmp3, <1 x i64>* %retval, align 8
- %tmp4 = load <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- %retval5 = load <1 x i64>* %retval ; <<1 x i64>> [#uses=1]
- ret <1 x i64> %retval5
-}
-
-define i64 @c(<2 x i64> %__A) {
-entry:
- %__A_addr = alloca <2 x i64> ; <<2 x i64>*> [#uses=2]
- %retval = alloca i64, align 8 ; <i64*> [#uses=2]
- %tmp = alloca i64, align 8 ; <i64*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store <2 x i64> %__A, <2 x i64>* %__A_addr
- %tmp1 = load <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
- %tmp2 = bitcast <2 x i64> %tmp1 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp3 = extractelement <2 x i64> %tmp2, i32 0 ; <i64> [#uses=1]
- store i64 %tmp3, i64* %tmp, align 8
- %tmp4 = load i64* %tmp, align 8 ; <i64> [#uses=1]
- store i64 %tmp4, i64* %retval, align 8
- br label %return
-
-return: ; preds = %entry
- %retval5 = load i64* %retval ; <i64> [#uses=1]
- ret i64 %retval5
-}
-
-define i64 @d(<2 x i64> %__A) {
-entry:
- %__A_addr = alloca <2 x i64> ; <<2 x i64>*> [#uses=2]
- %retval = alloca i64, align 8 ; <i64*> [#uses=2]
- %tmp = alloca i64, align 8 ; <i64*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store <2 x i64> %__A, <2 x i64>* %__A_addr
- %tmp1 = load <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
- %tmp2 = bitcast <2 x i64> %tmp1 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp3 = extractelement <2 x i64> %tmp2, i32 1 ; <i64> [#uses=1]
- store i64 %tmp3, i64* %tmp, align 8
- %tmp4 = load i64* %tmp, align 8 ; <i64> [#uses=1]
- store i64 %tmp4, i64* %retval, align 8
- br label %return
-
-return: ; preds = %entry
- %retval5 = load i64* %retval ; <i64> [#uses=1]
- ret i64 %retval5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-01-ISelCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-01-ISelCrash.ll
deleted file mode 100644
index 019c6a8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-01-ISelCrash.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86
-
- %"struct.K::JL" = type <{ i8 }>
- %struct.jv = type { i64 }
-
-declare fastcc i64 @f(i32, %"struct.K::JL"*, i8*, i8*, %struct.jv*)
-
-define void @t(%"struct.K::JL"* %obj, i8* %name, i8* %sig, %struct.jv* %args) {
-entry:
- %tmp5 = tail call fastcc i64 @f( i32 1, %"struct.K::JL"* %obj, i8* %name, i8* %sig, %struct.jv* %args ) ; <i64> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-02-BadAsm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-02-BadAsm.ll
deleted file mode 100644
index 4e11cda..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-02-BadAsm.ll
+++ /dev/null
@@ -1,144 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep movl | not grep rax
-
- %struct.color_sample = type { i64 }
- %struct.gs_matrix = type { float, i64, float, i64, float, i64, float, i64, float, i64, float, i64 }
- %struct.ref = type { %struct.color_sample, i16, i16 }
- %struct.status = type { %struct.gs_matrix, i8*, i32, i32, i8*, i32, i32, i32, i32, i32, i32, i32 }
-
-define i32 @ztype1imagepath(%struct.ref* %op) {
-entry:
- br i1 false, label %cond_next, label %UnifiedReturnBlock
-
-cond_next: ; preds = %entry
- br i1 false, label %cond_next68, label %UnifiedReturnBlock
-
-cond_next68: ; preds = %cond_next
- %tmp5.i.i = malloc i8, i32 0 ; <i8*> [#uses=2]
- br i1 false, label %bb81.outer.i, label %xit.i
-
-bb81.outer.i: ; preds = %bb87.i, %cond_next68
- %tmp67.i = add i32 0, 1 ; <i32> [#uses=1]
- br label %bb81.i
-
-bb61.i: ; preds = %bb81.i
- %tmp71.i = getelementptr i8* %tmp5.i.i, i64 0 ; <i8*> [#uses=1]
- %tmp72.i = load i8* %tmp71.i, align 1 ; <i8> [#uses=1]
- %tmp73.i = icmp eq i8 %tmp72.i, 0 ; <i1> [#uses=1]
- br i1 %tmp73.i, label %bb81.i, label %xit.i
-
-bb81.i: ; preds = %bb61.i, %bb81.outer.i
- br i1 false, label %bb87.i, label %bb61.i
-
-bb87.i: ; preds = %bb81.i
- br i1 false, label %bb81.outer.i, label %xit.i
-
-xit.i: ; preds = %bb87.i, %bb61.i, %cond_next68
- %lsbx.0.reg2mem.1.i = phi i32 [ 0, %cond_next68 ], [ 0, %bb61.i ], [ %tmp67.i, %bb87.i ] ; <i32> [#uses=1]
- %tmp6162.i.i = fptrunc double 0.000000e+00 to float ; <float> [#uses=1]
- %tmp67.i15.i = fptrunc double 0.000000e+00 to float ; <float> [#uses=1]
- %tmp24.i27.i = icmp eq i64 0, 0 ; <i1> [#uses=1]
- br i1 %tmp24.i27.i, label %cond_next.i79.i, label %cond_true.i34.i
-
-cond_true.i34.i: ; preds = %xit.i
- ret i32 0
-
-cond_next.i79.i: ; preds = %xit.i
- %phitmp167.i = fptosi double 0.000000e+00 to i64 ; <i64> [#uses=1]
- %tmp142143.i = fpext float %tmp6162.i.i to double ; <double> [#uses=1]
- %tmp2.i139.i = fadd double %tmp142143.i, 5.000000e-01 ; <double> [#uses=1]
- %tmp23.i140.i = fptosi double %tmp2.i139.i to i64 ; <i64> [#uses=1]
- br i1 false, label %cond_true.i143.i, label %round_coord.exit148.i
-
-cond_true.i143.i: ; preds = %cond_next.i79.i
- %tmp8.i142.i = icmp sgt i64 %tmp23.i140.i, -32768 ; <i1> [#uses=1]
- br i1 %tmp8.i142.i, label %cond_true11.i145.i, label %round_coord.exit148.i
-
-cond_true11.i145.i: ; preds = %cond_true.i143.i
- ret i32 0
-
-round_coord.exit148.i: ; preds = %cond_true.i143.i, %cond_next.i79.i
- %tmp144149.i = phi i32 [ 32767, %cond_next.i79.i ], [ -32767, %cond_true.i143.i ] ; <i32> [#uses=1]
- store i32 %tmp144149.i, i32* null, align 8
- %tmp147148.i = fpext float %tmp67.i15.i to double ; <double> [#uses=1]
- %tmp2.i128.i = fadd double %tmp147148.i, 5.000000e-01 ; <double> [#uses=1]
- %tmp23.i129.i = fptosi double %tmp2.i128.i to i64 ; <i64> [#uses=2]
- %tmp5.i130.i = icmp slt i64 %tmp23.i129.i, 32768 ; <i1> [#uses=1]
- br i1 %tmp5.i130.i, label %cond_true.i132.i, label %round_coord.exit137.i
-
-cond_true.i132.i: ; preds = %round_coord.exit148.i
- %tmp8.i131.i = icmp sgt i64 %tmp23.i129.i, -32768 ; <i1> [#uses=1]
- br i1 %tmp8.i131.i, label %cond_true11.i134.i, label %round_coord.exit137.i
-
-cond_true11.i134.i: ; preds = %cond_true.i132.i
- br label %round_coord.exit137.i
-
-round_coord.exit137.i: ; preds = %cond_true11.i134.i, %cond_true.i132.i, %round_coord.exit148.i
- %tmp149138.i = phi i32 [ 0, %cond_true11.i134.i ], [ 32767, %round_coord.exit148.i ], [ -32767, %cond_true.i132.i ] ; <i32> [#uses=1]
- br i1 false, label %cond_true.i121.i, label %round_coord.exit126.i
-
-cond_true.i121.i: ; preds = %round_coord.exit137.i
- br i1 false, label %cond_true11.i123.i, label %round_coord.exit126.i
-
-cond_true11.i123.i: ; preds = %cond_true.i121.i
- br label %round_coord.exit126.i
-
-round_coord.exit126.i: ; preds = %cond_true11.i123.i, %cond_true.i121.i, %round_coord.exit137.i
- %tmp153127.i = phi i32 [ 0, %cond_true11.i123.i ], [ 32767, %round_coord.exit137.i ], [ -32767, %cond_true.i121.i ] ; <i32> [#uses=1]
- br i1 false, label %cond_true.i110.i, label %round_coord.exit115.i
-
-cond_true.i110.i: ; preds = %round_coord.exit126.i
- br i1 false, label %cond_true11.i112.i, label %round_coord.exit115.i
-
-cond_true11.i112.i: ; preds = %cond_true.i110.i
- br label %round_coord.exit115.i
-
-round_coord.exit115.i: ; preds = %cond_true11.i112.i, %cond_true.i110.i, %round_coord.exit126.i
- %tmp157116.i = phi i32 [ 0, %cond_true11.i112.i ], [ 32767, %round_coord.exit126.i ], [ -32767, %cond_true.i110.i ] ; <i32> [#uses=2]
- br i1 false, label %cond_true.i99.i, label %round_coord.exit104.i
-
-cond_true.i99.i: ; preds = %round_coord.exit115.i
- br i1 false, label %cond_true11.i101.i, label %round_coord.exit104.i
-
-cond_true11.i101.i: ; preds = %cond_true.i99.i
- %tmp1213.i100.i = trunc i64 %phitmp167.i to i32 ; <i32> [#uses=1]
- br label %cond_next172.i
-
-round_coord.exit104.i: ; preds = %cond_true.i99.i, %round_coord.exit115.i
- %UnifiedRetVal.i102.i = phi i32 [ 32767, %round_coord.exit115.i ], [ -32767, %cond_true.i99.i ] ; <i32> [#uses=1]
- %tmp164.i = call fastcc i32 @put_int( %struct.status* null, i32 %tmp157116.i ) ; <i32> [#uses=0]
- br label %cond_next172.i
-
-cond_next172.i: ; preds = %round_coord.exit104.i, %cond_true11.i101.i
- %tmp161105.reg2mem.0.i = phi i32 [ %tmp1213.i100.i, %cond_true11.i101.i ], [ %UnifiedRetVal.i102.i, %round_coord.exit104.i ] ; <i32> [#uses=1]
- %tmp174.i = icmp eq i32 %tmp153127.i, 0 ; <i1> [#uses=1]
- %bothcond.i = and i1 false, %tmp174.i ; <i1> [#uses=1]
- %tmp235.i = call fastcc i32 @put_int( %struct.status* null, i32 %tmp149138.i ) ; <i32> [#uses=0]
- %tmp245.i = load i8** null, align 8 ; <i8*> [#uses=2]
- %tmp246.i = getelementptr i8* %tmp245.i, i64 1 ; <i8*> [#uses=1]
- br i1 %bothcond.i, label %cond_next254.i, label %bb259.i
-
-cond_next254.i: ; preds = %cond_next172.i
- store i8 13, i8* %tmp245.i, align 1
- br label %bb259.i
-
-bb259.i: ; preds = %cond_next254.i, %cond_next172.i
- %storemerge.i = phi i8* [ %tmp246.i, %cond_next254.i ], [ null, %cond_next172.i ] ; <i8*> [#uses=0]
- %tmp261.i = shl i32 %lsbx.0.reg2mem.1.i, 2 ; <i32> [#uses=1]
- store i32 %tmp261.i, i32* null, align 8
- %tmp270.i = add i32 0, %tmp157116.i ; <i32> [#uses=1]
- store i32 %tmp270.i, i32* null, align 8
- %tmp275.i = add i32 0, %tmp161105.reg2mem.0.i ; <i32> [#uses=0]
- br i1 false, label %trace_cells.exit.i, label %bb.preheader.i.i
-
-bb.preheader.i.i: ; preds = %bb259.i
- ret i32 0
-
-trace_cells.exit.i: ; preds = %bb259.i
- free i8* %tmp5.i.i
- ret i32 0
-
-UnifiedReturnBlock: ; preds = %cond_next, %entry
- ret i32 -20
-}
-
-declare fastcc i32 @put_int(%struct.status*, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-03-x86-64-q-constraint.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-03-x86-64-q-constraint.ll
deleted file mode 100644
index 27ec826..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-03-x86-64-q-constraint.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s
-; PR1763
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @yield() {
- %tmp9 = call i64 asm sideeffect "xchgb ${0:b},$1", "=q,*m,0,~{dirflag},~{fpsr},~{flags},~{memory}"( i64* null, i64 0 ) ; <i64>
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-04-LiveIntervalCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-04-LiveIntervalCrash.ll
deleted file mode 100644
index 4045618..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-04-LiveIntervalCrash.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu
-; PR1766
-
- %struct.dentry = type { %struct.dentry_operations* }
- %struct.dentry_operations = type { i32 (%struct.dentry*, %struct.qstr*)* }
- %struct.qstr = type { i32, i32, i8* }
-
-define %struct.dentry* @d_hash_and_lookup(%struct.dentry* %dir, %struct.qstr* %name) {
-entry:
- br i1 false, label %bb37, label %bb
-
-bb: ; preds = %bb, %entry
- %name8.0.reg2mem.0.rec = phi i64 [ %indvar.next, %bb ], [ 0, %entry ] ; <i64> [#uses=1]
- %hash.0.reg2mem.0 = phi i64 [ %tmp27, %bb ], [ 0, %entry ] ; <i64> [#uses=1]
- %tmp13 = load i8* null, align 1 ; <i8> [#uses=1]
- %tmp1314 = zext i8 %tmp13 to i64 ; <i64> [#uses=1]
- %tmp25 = lshr i64 %tmp1314, 4 ; <i64> [#uses=1]
- %tmp22 = add i64 %tmp25, %hash.0.reg2mem.0 ; <i64> [#uses=1]
- %tmp26 = add i64 %tmp22, 0 ; <i64> [#uses=1]
- %tmp27 = mul i64 %tmp26, 11 ; <i64> [#uses=2]
- %indvar.next = add i64 %name8.0.reg2mem.0.rec, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %indvar.next, 0 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb37.loopexit, label %bb
-
-bb37.loopexit: ; preds = %bb
- %phitmp = trunc i64 %tmp27 to i32 ; <i32> [#uses=1]
- br label %bb37
-
-bb37: ; preds = %bb37.loopexit, %entry
- %hash.0.reg2mem.1 = phi i32 [ %phitmp, %bb37.loopexit ], [ 0, %entry ] ; <i32> [#uses=1]
- store i32 %hash.0.reg2mem.1, i32* null, align 8
- %tmp75 = tail call i32 null( %struct.dentry* %dir, %struct.qstr* %name ) ; <i32> [#uses=0]
- %tmp84 = tail call i32 (...)* @d_lookup( %struct.dentry* %dir, %struct.qstr* %name ) ; <i32> [#uses=0]
- ret %struct.dentry* null
-}
-
-declare i32 @d_lookup(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll
deleted file mode 100644
index 6b871aa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-04-LiveVariablesBug.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu
-; PR1767
-
-define void @xor_sse_2(i64 %bytes, i64* %p1, i64* %p2) {
-entry:
- %p2_addr = alloca i64* ; <i64**> [#uses=2]
- %lines = alloca i32 ; <i32*> [#uses=2]
- store i64* %p2, i64** %p2_addr, align 8
- %tmp1 = lshr i64 %bytes, 8 ; <i64> [#uses=1]
- %tmp12 = trunc i64 %tmp1 to i32 ; <i32> [#uses=2]
- store i32 %tmp12, i32* %lines, align 4
- %tmp6 = call i64* asm sideeffect "foo",
-"=r,=*r,=*r,r,0,1,2,~{dirflag},~{fpsr},~{flags},~{memory}"( i64** %p2_addr,
-i32* %lines, i64 256, i64* %p1, i64* %p2, i32 %tmp12 ) ; <i64*> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll
deleted file mode 100644
index 8e586a7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -relocation-model=static | grep {foo _str$}
-; PR1761
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
- at str = internal constant [12 x i8] c"init/main.c\00" ; <[12 x i8]*> [#uses=1]
-
-define i32 @unknown_bootoption() {
-entry:
- tail call void asm sideeffect "foo ${0:c}\0A", "i,~{dirflag},~{fpsr},~{flags}"( i8* getelementptr ([12 x i8]* @str, i32 0, i64 0) )
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll
deleted file mode 100644
index f6db0d0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep lea
-
-define float @foo(i32* %x, float* %y, i32 %c) nounwind {
-entry:
- %tmp2132 = icmp eq i32 %c, 0 ; <i1> [#uses=1]
- br i1 %tmp2132, label %bb23, label %bb18
-
-bb18: ; preds = %bb18, %entry
- %i.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp17, %bb18 ] ; <i32> [#uses=3]
- %res.0.reg2mem.0 = phi float [ 0.000000e+00, %entry ], [ %tmp14, %bb18 ] ; <float> [#uses=1]
- %tmp3 = getelementptr i32* %x, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
- %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
- %tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
- %tmp8 = getelementptr float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1]
- %tmp9 = load float* %tmp8, align 4 ; <float> [#uses=1]
- %tmp11 = fmul float %tmp9, %tmp45 ; <float> [#uses=1]
- %tmp14 = fadd float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2]
- %tmp17 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
- %tmp21 = icmp ult i32 %tmp17, %c ; <i1> [#uses=1]
- br i1 %tmp21, label %bb18, label %bb23
-
-bb23: ; preds = %bb18, %entry
- %res.0.reg2mem.1 = phi float [ 0.000000e+00, %entry ], [ %tmp14, %bb18 ] ; <float> [#uses=1]
- ret float %res.0.reg2mem.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll
deleted file mode 100644
index d5b630b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll
+++ /dev/null
@@ -1,129 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep imul
-
- %struct.eebb = type { %struct.eebb*, i16* }
- %struct.hf = type { %struct.hf*, i16*, i8*, i32, i32, %struct.eebb*, i32, i32, i8*, i8*, i8*, i8*, i16*, i8*, i16*, %struct.ri, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [30 x i32], %struct.eebb, i32, i8* }
- %struct.foo_data = type { i32, i32, i32, i32*, i32, i32, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i16*, i16*, i16*, i16*, i32, i32, i32, %struct.ri*, i8*, %struct.hf* }
- %struct.ri = type { %struct.ri*, i32, i8*, i16*, i32*, i32 }
-
-define fastcc i32 @foo(i16* %eptr, i8* %ecode, %struct.foo_data* %md, i32 %ims) {
-entry:
- %tmp36 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp37 = icmp ult i32 0, %tmp36 ; <i1> [#uses=1]
- br i1 %tmp37, label %cond_next79, label %cond_true
-
-cond_true: ; preds = %entry
- ret i32 0
-
-cond_next79: ; preds = %entry
- %tmp85 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp86 = icmp ult i32 0, %tmp85 ; <i1> [#uses=1]
- br i1 %tmp86, label %cond_next130, label %cond_true89
-
-cond_true89: ; preds = %cond_next79
- ret i32 0
-
-cond_next130: ; preds = %cond_next79
- %tmp173 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %tmp173, label %cond_next201, label %cond_true176
-
-cond_true176: ; preds = %cond_next130
- ret i32 0
-
-cond_next201: ; preds = %cond_next130
- switch i32 0, label %bb19955 [
- i32 0, label %bb1266
- i32 1, label %bb5018
- i32 2, label %bb5075
- i32 3, label %cond_true5534
- i32 4, label %cond_true5534
- i32 5, label %bb6039
- i32 6, label %bb6181
- i32 7, label %bb6323
- i32 8, label %bb6463
- i32 9, label %bb6605
- i32 10, label %bb6746
- i32 11, label %cond_next5871
- i32 16, label %bb5452
- i32 17, label %bb5395
- i32 19, label %bb4883
- i32 20, label %bb5136
- i32 23, label %bb12899
- i32 64, label %bb2162
- i32 69, label %bb1447
- i32 70, label %bb1737
- i32 71, label %bb1447
- i32 72, label %bb1737
- i32 73, label %cond_true1984
- i32 75, label %bb740
- i32 80, label %bb552
- ]
-
-bb552: ; preds = %cond_next201
- ret i32 0
-
-bb740: ; preds = %cond_next201
- ret i32 0
-
-bb1266: ; preds = %cond_next201
- ret i32 0
-
-bb1447: ; preds = %cond_next201, %cond_next201
- ret i32 0
-
-bb1737: ; preds = %cond_next201, %cond_next201
- ret i32 0
-
-cond_true1984: ; preds = %cond_next201
- ret i32 0
-
-bb2162: ; preds = %cond_next201
- ret i32 0
-
-bb4883: ; preds = %cond_next201
- ret i32 0
-
-bb5018: ; preds = %cond_next201
- ret i32 0
-
-bb5075: ; preds = %cond_next201
- ret i32 0
-
-bb5136: ; preds = %cond_next201
- ret i32 0
-
-bb5395: ; preds = %cond_next201
- ret i32 0
-
-bb5452: ; preds = %cond_next201
- ret i32 0
-
-cond_true5534: ; preds = %cond_next201, %cond_next201
- ret i32 0
-
-cond_next5871: ; preds = %cond_next201
- ret i32 0
-
-bb6039: ; preds = %cond_next201
- ret i32 0
-
-bb6181: ; preds = %cond_next201
- ret i32 0
-
-bb6323: ; preds = %cond_next201
- ret i32 0
-
-bb6463: ; preds = %cond_next201
- ret i32 0
-
-bb6605: ; preds = %cond_next201
- ret i32 0
-
-bb6746: ; preds = %cond_next201
- ret i32 0
-
-bb12899: ; preds = %cond_next201
- ret i32 0
-
-bb19955: ; preds = %cond_next201
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-14-Coalescer-Bug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-14-Coalescer-Bug.ll
deleted file mode 100644
index 9c004f9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-14-Coalescer-Bug.ll
+++ /dev/null
@@ -1,68 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=att | grep movl | count 2
-; RUN: llc < %s -march=x86 -x86-asm-syntax=att | not grep movb
-
- %struct.double_int = type { i64, i64 }
- %struct.tree_common = type <{ i8, [3 x i8] }>
- %struct.tree_int_cst = type { %struct.tree_common, %struct.double_int }
- %struct.tree_node = type { %struct.tree_int_cst }
- at tree_code_type = external constant [0 x i32] ; <[0 x i32]*> [#uses=1]
-
-define i32 @simple_cst_equal(%struct.tree_node* %t1, %struct.tree_node* %t2) nounwind {
-entry:
- %tmp2526 = bitcast %struct.tree_node* %t1 to i32* ; <i32*> [#uses=1]
- br i1 false, label %UnifiedReturnBlock, label %bb21
-
-bb21: ; preds = %entry
- %tmp27 = load i32* %tmp2526, align 4 ; <i32> [#uses=1]
- %tmp29 = and i32 %tmp27, 255 ; <i32> [#uses=3]
- %tmp2930 = trunc i32 %tmp29 to i8 ; <i8> [#uses=1]
- %tmp37 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp39 = and i32 %tmp37, 255 ; <i32> [#uses=2]
- %tmp3940 = trunc i32 %tmp39 to i8 ; <i8> [#uses=1]
- %tmp43 = add i32 %tmp29, -3 ; <i32> [#uses=1]
- %tmp44 = icmp ult i32 %tmp43, 3 ; <i1> [#uses=1]
- br i1 %tmp44, label %bb47.split, label %bb76
-
-bb47.split: ; preds = %bb21
- ret i32 0
-
-bb76: ; preds = %bb21
- br i1 false, label %bb82, label %bb146.split
-
-bb82: ; preds = %bb76
- %tmp94 = getelementptr [0 x i32]* @tree_code_type, i32 0, i32 %tmp39 ; <i32*> [#uses=1]
- %tmp95 = load i32* %tmp94, align 4 ; <i32> [#uses=1]
- %tmp9596 = trunc i32 %tmp95 to i8 ; <i8> [#uses=1]
- %tmp98 = add i8 %tmp9596, -4 ; <i8> [#uses=1]
- %tmp99 = icmp ugt i8 %tmp98, 5 ; <i1> [#uses=1]
- br i1 %tmp99, label %bb102, label %bb106
-
-bb102: ; preds = %bb82
- ret i32 0
-
-bb106: ; preds = %bb82
- ret i32 0
-
-bb146.split: ; preds = %bb76
- %tmp149 = icmp eq i8 %tmp2930, %tmp3940 ; <i1> [#uses=1]
- br i1 %tmp149, label %bb153, label %UnifiedReturnBlock
-
-bb153: ; preds = %bb146.split
- switch i32 %tmp29, label %UnifiedReturnBlock [
- i32 0, label %bb155
- i32 1, label %bb187
- ]
-
-bb155: ; preds = %bb153
- ret i32 0
-
-bb187: ; preds = %bb153
- %tmp198 = icmp eq %struct.tree_node* %t1, %t2 ; <i1> [#uses=1]
- br i1 %tmp198, label %bb201, label %UnifiedReturnBlock
-
-bb201: ; preds = %bb187
- ret i32 0
-
-UnifiedReturnBlock: ; preds = %bb187, %bb153, %bb146.split, %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-21-UndeadIllegalNode.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-21-UndeadIllegalNode.ll
deleted file mode 100644
index e220be6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-21-UndeadIllegalNode.ll
+++ /dev/null
@@ -1,159 +0,0 @@
-; RUN: llc < %s -o -
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
- %struct.RETURN = type { i32, i32 }
- %struct.ada__finalization__controlled = type { %struct.system__finalization_root__root_controlled }
- %struct.ada__streams__root_stream_type = type { %struct.ada__tags__dispatch_table* }
- %struct.ada__strings__unbounded__string_access = type { i8*, %struct.RETURN* }
- %struct.ada__strings__unbounded__unbounded_string = type { %struct.ada__finalization__controlled, %struct.ada__strings__unbounded__string_access, i32 }
- %struct.ada__tags__dispatch_table = type { [1 x i32] }
- %struct.exception = type { i8, i8, i32, i8*, i8*, i32, i8* }
- %struct.system__finalization_root__root_controlled = type { %struct.ada__streams__root_stream_type, %struct.system__finalization_root__root_controlled*, %struct.system__finalization_root__root_controlled* }
- %struct.system__standard_library__exception_data = type { i8, i8, i32, i32, %struct.system__standard_library__exception_data*, i32, void ()* }
- at C.495.7639 = internal constant %struct.RETURN { i32 1, i32 16 } ; <%struct.RETURN*> [#uses=1]
- at ada__strings__index_error = external global %struct.exception ; <%struct.exception*> [#uses=1]
- at .str5 = internal constant [16 x i8] c"a-strunb.adb:690" ; <[16 x i8]*> [#uses=1]
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-declare void @ada__strings__unbounded__realloc_for_chunk(%struct.ada__strings__unbounded__unbounded_string*, i32)
-
-declare void @__gnat_raise_exception(%struct.system__standard_library__exception_data*, i64)
-
-define void @ada__strings__unbounded__insert__2(%struct.ada__strings__unbounded__unbounded_string* %source, i32 %before, i64 %new_item.0.0) {
-entry:
- %tmp24636 = lshr i64 %new_item.0.0, 32 ; <i64> [#uses=1]
- %tmp24637 = trunc i64 %tmp24636 to i32 ; <i32> [#uses=1]
- %tmp24638 = inttoptr i32 %tmp24637 to %struct.RETURN* ; <%struct.RETURN*> [#uses=2]
- %tmp25 = getelementptr %struct.RETURN* %tmp24638, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp26 = load i32* %tmp25, align 4 ; <i32> [#uses=1]
- %tmp29 = getelementptr %struct.RETURN* %tmp24638, i32 0, i32 1 ; <i32*> [#uses=1]
- %tmp30 = load i32* %tmp29, align 4 ; <i32> [#uses=1]
- %tmp63 = getelementptr %struct.ada__strings__unbounded__unbounded_string* %source, i32 0, i32 1, i32 1 ; <%struct.RETURN**> [#uses=5]
- %tmp64 = load %struct.RETURN** %tmp63, align 4 ; <%struct.RETURN*> [#uses=1]
- %tmp65 = getelementptr %struct.RETURN* %tmp64, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp66 = load i32* %tmp65, align 4 ; <i32> [#uses=1]
- %tmp67 = icmp sgt i32 %tmp66, %before ; <i1> [#uses=1]
- br i1 %tmp67, label %bb77, label %bb
-
-bb: ; preds = %entry
- %tmp71 = getelementptr %struct.ada__strings__unbounded__unbounded_string* %source, i32 0, i32 2 ; <i32*> [#uses=4]
- %tmp72 = load i32* %tmp71, align 4 ; <i32> [#uses=1]
- %tmp73 = add i32 %tmp72, 1 ; <i32> [#uses=1]
- %tmp74 = icmp slt i32 %tmp73, %before ; <i1> [#uses=1]
- br i1 %tmp74, label %bb77, label %bb84
-
-bb77: ; preds = %bb, %entry
- tail call void @__gnat_raise_exception( %struct.system__standard_library__exception_data* bitcast (%struct.exception* @ada__strings__index_error to %struct.system__standard_library__exception_data*), i64 or (i64 zext (i32 ptrtoint ([16 x i8]* @.str5 to i32) to i64), i64 shl (i64 zext (i32 ptrtoint (%struct.RETURN* @C.495.7639 to i32) to i64), i64 32)) )
- unreachable
-
-bb84: ; preds = %bb
- %tmp93 = sub i32 %tmp30, %tmp26 ; <i32> [#uses=2]
- %tmp9394 = sext i32 %tmp93 to i36 ; <i36> [#uses=1]
- %tmp95 = shl i36 %tmp9394, 3 ; <i36> [#uses=1]
- %tmp96 = add i36 %tmp95, 8 ; <i36> [#uses=2]
- %tmp97 = icmp sgt i36 %tmp96, -1 ; <i1> [#uses=1]
- %tmp100 = select i1 %tmp97, i36 %tmp96, i36 0 ; <i36> [#uses=2]
- %tmp101 = icmp slt i36 %tmp100, 17179869177 ; <i1> [#uses=1]
- %tmp100.cast = trunc i36 %tmp100 to i32 ; <i32> [#uses=1]
- %min102 = select i1 %tmp101, i32 %tmp100.cast, i32 -8 ; <i32> [#uses=1]
- tail call void @ada__strings__unbounded__realloc_for_chunk( %struct.ada__strings__unbounded__unbounded_string* %source, i32 %min102 )
- %tmp148 = load i32* %tmp71, align 4 ; <i32> [#uses=4]
- %tmp152 = add i32 %tmp93, 1 ; <i32> [#uses=2]
- %tmp153 = icmp sgt i32 %tmp152, -1 ; <i1> [#uses=1]
- %max154 = select i1 %tmp153, i32 %tmp152, i32 0 ; <i32> [#uses=5]
- %tmp155 = add i32 %tmp148, %max154 ; <i32> [#uses=5]
- %tmp315 = getelementptr %struct.ada__strings__unbounded__unbounded_string* %source, i32 0, i32 1, i32 0 ; <i8**> [#uses=4]
- %tmp328 = load %struct.RETURN** %tmp63, align 4 ; <%struct.RETURN*> [#uses=1]
- %tmp329 = getelementptr %struct.RETURN* %tmp328, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp330 = load i32* %tmp329, align 4 ; <i32> [#uses=4]
- %tmp324 = add i32 %max154, %before ; <i32> [#uses=3]
- %tmp331 = sub i32 %tmp324, %tmp330 ; <i32> [#uses=1]
- %tmp349 = sub i32 %before, %tmp330 ; <i32> [#uses=1]
- %tmp356 = icmp sgt i32 %tmp331, %tmp349 ; <i1> [#uses=1]
- %tmp431 = icmp sgt i32 %tmp324, %tmp155 ; <i1> [#uses=2]
- br i1 %tmp356, label %bb420, label %bb359
-
-bb359: ; preds = %bb84
- br i1 %tmp431, label %bb481, label %bb382
-
-bb382: ; preds = %bb382, %bb359
- %indvar = phi i32 [ 0, %bb359 ], [ %indvar.next, %bb382 ] ; <i32> [#uses=2]
- %max379.pn = phi i32 [ %max154, %bb359 ], [ %L492b.0, %bb382 ] ; <i32> [#uses=1]
- %before.pn = phi i32 [ %before, %bb359 ], [ 1, %bb382 ] ; <i32> [#uses=1]
- %L492b.0 = add i32 %before.pn, %max379.pn ; <i32> [#uses=3]
- %tmp386 = load %struct.RETURN** %tmp63, align 4 ; <%struct.RETURN*> [#uses=1]
- %tmp387 = getelementptr %struct.RETURN* %tmp386, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp388 = load i32* %tmp387, align 4 ; <i32> [#uses=2]
- %tmp392 = load i8** %tmp315, align 4 ; <i8*> [#uses=2]
- %R493b.0 = add i32 %indvar, %before ; <i32> [#uses=1]
- %tmp405 = sub i32 %R493b.0, %tmp388 ; <i32> [#uses=1]
- %tmp406 = getelementptr i8* %tmp392, i32 %tmp405 ; <i8*> [#uses=1]
- %tmp407 = load i8* %tmp406, align 1 ; <i8> [#uses=1]
- %tmp408 = sub i32 %L492b.0, %tmp388 ; <i32> [#uses=1]
- %tmp409 = getelementptr i8* %tmp392, i32 %tmp408 ; <i8*> [#uses=1]
- store i8 %tmp407, i8* %tmp409, align 1
- %tmp414 = icmp eq i32 %L492b.0, %tmp155 ; <i1> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %tmp414, label %bb481, label %bb382
-
-bb420: ; preds = %bb84
- br i1 %tmp431, label %bb481, label %bb436.preheader
-
-bb436.preheader: ; preds = %bb420
- %tmp4468 = load i8** %tmp315, align 4 ; <i8*> [#uses=2]
- %tmp4599 = sub i32 %tmp148, %tmp330 ; <i32> [#uses=1]
- %tmp46010 = getelementptr i8* %tmp4468, i32 %tmp4599 ; <i8*> [#uses=1]
- %tmp46111 = load i8* %tmp46010, align 1 ; <i8> [#uses=1]
- %tmp46212 = sub i32 %tmp155, %tmp330 ; <i32> [#uses=1]
- %tmp46313 = getelementptr i8* %tmp4468, i32 %tmp46212 ; <i8*> [#uses=1]
- store i8 %tmp46111, i8* %tmp46313, align 1
- %exitcond14 = icmp eq i32 %tmp155, %tmp324 ; <i1> [#uses=1]
- br i1 %exitcond14, label %bb481, label %bb.nph
-
-bb.nph: ; preds = %bb436.preheader
- %tmp5 = sub i32 %tmp148, %before ; <i32> [#uses=1]
- br label %bb478
-
-bb478: ; preds = %bb478, %bb.nph
- %indvar6422 = phi i32 [ 0, %bb.nph ], [ %indvar.next643, %bb478 ] ; <i32> [#uses=1]
- %indvar.next643 = add i32 %indvar6422, 1 ; <i32> [#uses=4]
- %L490b.0 = sub i32 %tmp155, %indvar.next643 ; <i32> [#uses=1]
- %R491b.0 = sub i32 %tmp148, %indvar.next643 ; <i32> [#uses=1]
- %tmp440 = load %struct.RETURN** %tmp63, align 4 ; <%struct.RETURN*> [#uses=1]
- %tmp441 = getelementptr %struct.RETURN* %tmp440, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp442 = load i32* %tmp441, align 4 ; <i32> [#uses=2]
- %tmp446 = load i8** %tmp315, align 4 ; <i8*> [#uses=2]
- %tmp459 = sub i32 %R491b.0, %tmp442 ; <i32> [#uses=1]
- %tmp460 = getelementptr i8* %tmp446, i32 %tmp459 ; <i8*> [#uses=1]
- %tmp461 = load i8* %tmp460, align 1 ; <i8> [#uses=1]
- %tmp462 = sub i32 %L490b.0, %tmp442 ; <i32> [#uses=1]
- %tmp463 = getelementptr i8* %tmp446, i32 %tmp462 ; <i8*> [#uses=1]
- store i8 %tmp461, i8* %tmp463, align 1
- %exitcond = icmp eq i32 %indvar.next643, %tmp5 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb481, label %bb478
-
-bb481: ; preds = %bb478, %bb436.preheader, %bb420, %bb382, %bb359
- %tmp577 = add i32 %before, -1 ; <i32> [#uses=3]
- %tmp578 = add i32 %max154, %tmp577 ; <i32> [#uses=2]
- %tmp581 = icmp sge i32 %tmp578, %tmp577 ; <i1> [#uses=1]
- %max582 = select i1 %tmp581, i32 %tmp578, i32 %tmp577 ; <i32> [#uses=1]
- %tmp584 = sub i32 %max582, %before ; <i32> [#uses=1]
- %tmp585 = add i32 %tmp584, 1 ; <i32> [#uses=2]
- %tmp586 = icmp sgt i32 %tmp585, -1 ; <i1> [#uses=1]
- %max587 = select i1 %tmp586, i32 %tmp585, i32 0 ; <i32> [#uses=1]
- %tmp591 = load %struct.RETURN** %tmp63, align 4 ; <%struct.RETURN*> [#uses=1]
- %tmp592 = getelementptr %struct.RETURN* %tmp591, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp593 = load i32* %tmp592, align 4 ; <i32> [#uses=1]
- %tmp597 = load i8** %tmp315, align 4 ; <i8*> [#uses=1]
- %tmp600621 = trunc i64 %new_item.0.0 to i32 ; <i32> [#uses=1]
- %tmp600622 = inttoptr i32 %tmp600621 to i8* ; <i8*> [#uses=1]
- %tmp601 = sub i32 %before, %tmp593 ; <i32> [#uses=1]
- %tmp602 = getelementptr i8* %tmp597, i32 %tmp601 ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.i32( i8* %tmp602, i8* %tmp600622, i32 %max587, i32 1 )
- %tmp606 = load i32* %tmp71, align 4 ; <i32> [#uses=1]
- %tmp613 = add i32 %tmp606, %max154 ; <i32> [#uses=1]
- store i32 %tmp613, i32* %tmp71, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll
deleted file mode 100644
index 8e315f4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll
+++ /dev/null
@@ -1,86 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
-; Increment in loop bb.i28.i adjusted to 2, to prevent loop reversal from
-; kicking in.
-
-declare fastcc void @rdft(i32, i32, double*, i32*, double*)
-
-define fastcc void @mp_sqrt(i32 %n, i32 %radix, i32* %in, i32* %out, i32* %tmp1, i32* %tmp2, i32 %nfft, double* %tmp1fft, double* %tmp2fft, i32* %ip, double* %w) nounwind {
-entry:
- br label %bb.i5
-
-bb.i5: ; preds = %bb.i5, %entry
- %nfft_init.0.i = phi i32 [ 1, %entry ], [ %tmp7.i3, %bb.i5 ] ; <i32> [#uses=1]
- %foo = phi i1 [1, %entry], [0, %bb.i5]
- %tmp7.i3 = shl i32 %nfft_init.0.i, 1 ; <i32> [#uses=2]
- br i1 %foo, label %bb.i5, label %mp_unexp_mp2d.exit.i
-
-mp_unexp_mp2d.exit.i: ; preds = %bb.i5
- br i1 %foo, label %cond_next.i, label %cond_true.i
-
-cond_true.i: ; preds = %mp_unexp_mp2d.exit.i
- ret void
-
-cond_next.i: ; preds = %mp_unexp_mp2d.exit.i
- %tmp22.i = sdiv i32 0, 2 ; <i32> [#uses=2]
- br i1 %foo, label %cond_true29.i, label %cond_next36.i
-
-cond_true29.i: ; preds = %cond_next.i
- ret void
-
-cond_next36.i: ; preds = %cond_next.i
- store i32 %tmp22.i, i32* null, align 4
- %tmp8.i14.i = select i1 %foo, i32 1, i32 0 ; <i32> [#uses=1]
- br label %bb.i28.i
-
-bb.i28.i: ; preds = %bb.i28.i, %cond_next36.i
-; CHECK: %bb.i28.i
-; CHECK: addl $2
-; CHECK: addl $-2
- %j.0.reg2mem.0.i16.i = phi i32 [ 0, %cond_next36.i ], [ %indvar.next39.i, %bb.i28.i ] ; <i32> [#uses=2]
- %din_addr.1.reg2mem.0.i17.i = phi double [ 0.000000e+00, %cond_next36.i ], [ %tmp16.i25.i, %bb.i28.i ] ; <double> [#uses=1]
- %tmp1.i18.i = fptosi double %din_addr.1.reg2mem.0.i17.i to i32 ; <i32> [#uses=2]
- %tmp4.i19.i = icmp slt i32 %tmp1.i18.i, %radix ; <i1> [#uses=1]
- %x.0.i21.i = select i1 %tmp4.i19.i, i32 %tmp1.i18.i, i32 0 ; <i32> [#uses=1]
- %tmp41.sum.i = add i32 %j.0.reg2mem.0.i16.i, 2 ; <i32> [#uses=0]
- %tmp1213.i23.i = sitofp i32 %x.0.i21.i to double ; <double> [#uses=1]
- %tmp15.i24.i = fsub double 0.000000e+00, %tmp1213.i23.i ; <double> [#uses=1]
- %tmp16.i25.i = fmul double 0.000000e+00, %tmp15.i24.i ; <double> [#uses=1]
- %indvar.next39.i = add i32 %j.0.reg2mem.0.i16.i, 2 ; <i32> [#uses=2]
- %exitcond40.i = icmp eq i32 %indvar.next39.i, %tmp8.i14.i ; <i1> [#uses=1]
- br i1 %exitcond40.i, label %mp_unexp_d2mp.exit29.i, label %bb.i28.i
-
-mp_unexp_d2mp.exit29.i: ; preds = %bb.i28.i
- %tmp46.i = sub i32 0, %tmp22.i ; <i32> [#uses=1]
- store i32 %tmp46.i, i32* null, align 4
- br i1 %exitcond40.i, label %bb.i.i, label %mp_sqrt_init.exit
-
-bb.i.i: ; preds = %bb.i.i, %mp_unexp_d2mp.exit29.i
- br label %bb.i.i
-
-mp_sqrt_init.exit: ; preds = %mp_unexp_d2mp.exit29.i
- tail call fastcc void @mp_mul_csqu( i32 0, double* %tmp1fft )
- tail call fastcc void @rdft( i32 0, i32 -1, double* null, i32* %ip, double* %w )
- tail call fastcc void @mp_mul_d2i( i32 0, i32 %radix, i32 0, double* %tmp1fft, i32* %tmp2 )
- br i1 %exitcond40.i, label %cond_false.i, label %cond_true36.i
-
-cond_true36.i: ; preds = %mp_sqrt_init.exit
- ret void
-
-cond_false.i: ; preds = %mp_sqrt_init.exit
- tail call fastcc void @mp_round( i32 0, i32 %radix, i32 0, i32* %out )
- tail call fastcc void @mp_add( i32 0, i32 %radix, i32* %tmp1, i32* %tmp2, i32* %tmp1 )
- tail call fastcc void @mp_sub( i32 0, i32 %radix, i32* %in, i32* %tmp2, i32* %tmp2 )
- tail call fastcc void @mp_round( i32 0, i32 %radix, i32 0, i32* %tmp1 )
- tail call fastcc void @mp_mul_d2i( i32 0, i32 %radix, i32 %tmp7.i3, double* %tmp2fft, i32* %tmp2 )
- ret void
-}
-
-declare fastcc void @mp_add(i32, i32, i32*, i32*, i32*)
-
-declare fastcc void @mp_sub(i32, i32, i32*, i32*, i32*)
-
-declare fastcc void @mp_round(i32, i32, i32, i32*)
-
-declare fastcc void @mp_mul_csqu(i32, double*)
-
-declare fastcc void @mp_mul_d2i(i32, i32, i32, double*, i32*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-11-FoldImpDefSpill.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-12-11-FoldImpDefSpill.ll
deleted file mode 100644
index ca995cc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-11-FoldImpDefSpill.ll
+++ /dev/null
@@ -1,680 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin | not grep IMPLICIT_DEF
-
- %struct.__sbuf = type { i8*, i32 }
- %struct.ggBRDF = type { i32 (...)** }
- %"struct.ggBST<ggMaterial>" = type { %"struct.ggBSTNode<ggMaterial>"*, i32 }
- %"struct.ggBST<ggRasterSurfaceTexture>" = type { %"struct.ggBSTNode<ggRasterSurfaceTexture>"*, i32 }
- %"struct.ggBST<ggSolidTexture>" = type { %"struct.ggBSTNode<ggSolidTexture>"*, i32 }
- %"struct.ggBST<ggSpectrum>" = type { %"struct.ggBSTNode<ggSpectrum>"*, i32 }
- %"struct.ggBST<mrObjectRecord>" = type { %"struct.ggBSTNode<mrObjectRecord>"*, i32 }
- %"struct.ggBSTNode<ggMaterial>" = type { %"struct.ggBSTNode<ggMaterial>"*, %"struct.ggBSTNode<ggMaterial>"*, %struct.ggString, %struct.ggMaterial* }
- %"struct.ggBSTNode<ggRasterSurfaceTexture>" = type { %"struct.ggBSTNode<ggRasterSurfaceTexture>"*, %"struct.ggBSTNode<ggRasterSurfaceTexture>"*, %struct.ggString, %struct.ggRasterSurfaceTexture* }
- %"struct.ggBSTNode<ggSolidTexture>" = type { %"struct.ggBSTNode<ggSolidTexture>"*, %"struct.ggBSTNode<ggSolidTexture>"*, %struct.ggString, %struct.ggBRDF* }
- %"struct.ggBSTNode<ggSpectrum>" = type { %"struct.ggBSTNode<ggSpectrum>"*, %"struct.ggBSTNode<ggSpectrum>"*, %struct.ggString, %struct.ggSpectrum* }
- %"struct.ggBSTNode<mrObjectRecord>" = type { %"struct.ggBSTNode<mrObjectRecord>"*, %"struct.ggBSTNode<mrObjectRecord>"*, %struct.ggString, %struct.mrObjectRecord* }
- %"struct.ggDictionary<ggMaterial>" = type { %"struct.ggBST<ggMaterial>" }
- %"struct.ggDictionary<ggRasterSurfaceTexture>" = type { %"struct.ggBST<ggRasterSurfaceTexture>" }
- %"struct.ggDictionary<ggSolidTexture>" = type { %"struct.ggBST<ggSolidTexture>" }
- %"struct.ggDictionary<ggSpectrum>" = type { %"struct.ggBST<ggSpectrum>" }
- %"struct.ggDictionary<mrObjectRecord>" = type { %"struct.ggBST<mrObjectRecord>" }
- %struct.ggHAffineMatrix3 = type { %struct.ggHMatrix3 }
- %struct.ggHBoxMatrix3 = type { %struct.ggHAffineMatrix3 }
- %struct.ggHMatrix3 = type { [4 x [4 x double]] }
- %struct.ggMaterial = type { i32 (...)**, %struct.ggBRDF* }
- %struct.ggPoint3 = type { [3 x double] }
- %"struct.ggRGBPixel<char>" = type { [3 x i8], i8 }
- %"struct.ggRaster<ggRGBPixel<unsigned char> >" = type { i32, i32, %"struct.ggRGBPixel<char>"* }
- %struct.ggRasterSurfaceTexture = type { %"struct.ggRaster<ggRGBPixel<unsigned char> >"* }
- %struct.ggSolidNoise3 = type { i32, [256 x %struct.ggPoint3], [256 x i32] }
- %struct.ggSpectrum = type { [8 x float] }
- %struct.ggString = type { %"struct.ggString::StringRep"* }
- %"struct.ggString::StringRep" = type { i32, i32, [1 x i8] }
- %"struct.ggTrain<mrPixelRenderer*>" = type { %struct.ggBRDF**, i32, i32 }
- %struct.mrObjectRecord = type { %struct.ggHBoxMatrix3, %struct.ggHBoxMatrix3, %struct.mrSurfaceList, %struct.ggMaterial*, i32, %struct.ggRasterSurfaceTexture*, %struct.ggBRDF*, i32, i32 }
- %struct.mrScene = type { %struct.ggSpectrum, %struct.ggSpectrum, %struct.ggBRDF*, %struct.ggBRDF*, %struct.ggBRDF*, i32, double, %"struct.ggDictionary<mrObjectRecord>", %"struct.ggDictionary<ggRasterSurfaceTexture>", %"struct.ggDictionary<ggSolidTexture>", %"struct.ggDictionary<ggSpectrum>", %"struct.ggDictionary<ggMaterial>" }
- %struct.mrSurfaceList = type { %struct.ggBRDF, %"struct.ggTrain<mrPixelRenderer*>" }
- %"struct.std::__codecvt_abstract_base<char,char,__mbstate_t>" = type { %"struct.std::locale::facet" }
- %"struct.std::basic_ios<char,std::char_traits<char> >" = type { %"struct.std::ios_base", %"struct.std::basic_ostream<char,std::char_traits<char> >"*, i8, i8, %"struct.std::basic_streambuf<char,std::char_traits<char> >"*, %"struct.std::ctype<char>"*, %"struct.std::__codecvt_abstract_base<char,char,__mbstate_t>"*, %"struct.std::__codecvt_abstract_base<char,char,__mbstate_t>"* }
- %"struct.std::basic_istream<char,std::char_traits<char> >" = type { i32 (...)**, i32, %"struct.std::basic_ios<char,std::char_traits<char> >" }
- %"struct.std::basic_ostream<char,std::char_traits<char> >" = type { i32 (...)**, %"struct.std::basic_ios<char,std::char_traits<char> >" }
- %"struct.std::basic_streambuf<char,std::char_traits<char> >" = type { i32 (...)**, i8*, i8*, i8*, i8*, i8*, i8*, %"struct.std::locale" }
- %"struct.std::ctype<char>" = type { %"struct.std::locale::facet", i32*, i8, i32*, i32*, i32*, i8, [256 x i8], [256 x i8], i8 }
- %"struct.std::ios_base" = type { i32 (...)**, i32, i32, i32, i32, i32, %"struct.std::ios_base::_Callback_list"*, %struct.__sbuf, [8 x %struct.__sbuf], i32, %struct.__sbuf*, %"struct.std::locale" }
- %"struct.std::ios_base::_Callback_list" = type { %"struct.std::ios_base::_Callback_list"*, void (i32, %"struct.std::ios_base"*, i32)*, i32, i32 }
- %"struct.std::locale" = type { %"struct.std::locale::_Impl"* }
- %"struct.std::locale::_Impl" = type { i32, %"struct.std::locale::facet"**, i32, %"struct.std::locale::facet"**, i8** }
- %"struct.std::locale::facet" = type { i32 (...)**, i32 }
- at .str80 = external constant [7 x i8] ; <[7 x i8]*> [#uses=1]
- at .str81 = external constant [11 x i8] ; <[11 x i8]*> [#uses=1]
-
-define fastcc void @_ZN7mrScene4ReadERSi(%struct.mrScene* %this, %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces) {
-entry:
- %tmp6.i.i8288 = invoke i8* @_Znam( i32 12 )
- to label %_ZN8ggStringC1Ei.exit unwind label %lpad ; <i8*> [#uses=0]
-
-_ZN8ggStringC1Ei.exit: ; preds = %entry
- %tmp6.i.i8995 = invoke i8* @_Znam( i32 12 )
- to label %_ZN8ggStringC1Ei.exit96 unwind label %lpad3825 ; <i8*> [#uses=0]
-
-_ZN8ggStringC1Ei.exit96: ; preds = %_ZN8ggStringC1Ei.exit
- %tmp6.i.i97103 = invoke i8* @_Znam( i32 12 )
- to label %_ZN8ggStringC1Ei.exit104 unwind label %lpad3829 ; <i8*> [#uses=0]
-
-_ZN8ggStringC1Ei.exit104: ; preds = %_ZN8ggStringC1Ei.exit96
- %tmp6.i.i105111 = invoke i8* @_Znam( i32 12 )
- to label %_ZN8ggStringC1Ei.exit112 unwind label %lpad3833 ; <i8*> [#uses=0]
-
-_ZN8ggStringC1Ei.exit112: ; preds = %_ZN8ggStringC1Ei.exit104
- %tmp6.i.i122128 = invoke i8* @_Znam( i32 12 )
- to label %_ZN8ggStringC1Ei.exit129 unwind label %lpad3837 ; <i8*> [#uses=0]
-
-_ZN8ggStringC1Ei.exit129: ; preds = %_ZN8ggStringC1Ei.exit112
- %tmp6.i.i132138 = invoke i8* @_Znam( i32 12 )
- to label %_ZN8ggStringC1Ei.exit139 unwind label %lpad3841 ; <i8*> [#uses=0]
-
-_ZN8ggStringC1Ei.exit139: ; preds = %_ZN8ggStringC1Ei.exit129
- %tmp295 = invoke i8* @_Znwm( i32 16 )
- to label %invcont294 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-invcont294: ; preds = %_ZN8ggStringC1Ei.exit139
- %tmp10.i.i141 = invoke i8* @_Znam( i32 16 )
- to label %_ZN13mrSurfaceListC1Ev.exit unwind label %lpad3849 ; <i8*> [#uses=0]
-
-_ZN13mrSurfaceListC1Ev.exit: ; preds = %invcont294
- %tmp3.i148 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i.noexc: ; preds = %_ZN13mrSurfaceListC1Ev.exit
- %tmp15.i149 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i.noexc unwind label %lpad3845 ; <i8*> [#uses=0]
-
-tmp15.i.noexc: ; preds = %tmp3.i.noexc
- br i1 false, label %bb308, label %bb.i
-
-bb.i: ; preds = %tmp15.i.noexc
- ret void
-
-bb308: ; preds = %tmp15.i.noexc
- br i1 false, label %bb3743.preheader, label %bb315
-
-bb3743.preheader: ; preds = %bb308
- %tmp16.i3862 = getelementptr %struct.ggPoint3* null, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp16.i3859 = getelementptr %struct.ggPoint3* null, i32 0, i32 0, i32 0 ; <double*> [#uses=3]
- br label %bb3743
-
-bb315: ; preds = %bb308
- ret void
-
-bb333: ; preds = %invcont3758, %invcont335
- %tmp3.i167180 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i167.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i167.noexc: ; preds = %bb333
- %tmp15.i182 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i.noexc181 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-tmp15.i.noexc181: ; preds = %tmp3.i167.noexc
- br i1 false, label %invcont335, label %bb.i178
-
-bb.i178: ; preds = %tmp15.i.noexc181
- ret void
-
-invcont335: ; preds = %tmp15.i.noexc181
- br i1 false, label %bb3743, label %bb333
-
-bb345: ; preds = %invcont3758
- br i1 false, label %bb353, label %bb360
-
-bb353: ; preds = %bb345
- %tmp356 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, double* null )
- to label %bb3743 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-bb360: ; preds = %bb345
- br i1 false, label %bb368, label %bb374
-
-bb368: ; preds = %bb360
- %tmp373 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, double* null )
- to label %bb3743 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-bb374: ; preds = %bb360
- br i1 false, label %bb396, label %bb421
-
-bb396: ; preds = %bb374
- ret void
-
-bb421: ; preds = %bb374
- br i1 false, label %bb429, label %bb530
-
-bb429: ; preds = %bb421
- ret void
-
-bb530: ; preds = %bb421
- br i1 false, label %bb538, label %bb673
-
-bb538: ; preds = %bb530
- ret void
-
-bb673: ; preds = %bb530
- br i1 false, label %bb681, label %bb778
-
-bb681: ; preds = %bb673
- ret void
-
-bb778: ; preds = %bb673
- br i1 false, label %bb786, label %bb891
-
-bb786: ; preds = %bb778
- ret void
-
-bb891: ; preds = %bb778
- br i1 false, label %bb899, label %bb998
-
-bb899: ; preds = %bb891
- ret void
-
-bb998: ; preds = %bb891
- br i1 false, label %bb1168, label %bb1190
-
-bb1168: ; preds = %bb998
- ret void
-
-bb1190: ; preds = %bb998
- br i1 false, label %bb1198, label %bb1220
-
-bb1198: ; preds = %bb1190
- ret void
-
-bb1220: ; preds = %bb1190
- br i1 false, label %bb1228, label %bb1250
-
-bb1228: ; preds = %bb1220
- ret void
-
-bb1250: ; preds = %bb1220
- br i1 false, label %bb1258, label %bb1303
-
-bb1258: ; preds = %bb1250
- ret void
-
-bb1303: ; preds = %bb1250
- br i1 false, label %bb1311, label %bb1366
-
-bb1311: ; preds = %bb1303
- ret void
-
-bb1366: ; preds = %bb1303
- br i1 false, label %bb1374, label %bb1432
-
-bb1374: ; preds = %bb1366
- ret void
-
-bb1432: ; preds = %bb1366
- br i1 false, label %bb1440, label %bb1495
-
-bb1440: ; preds = %bb1432
- ret void
-
-bb1495: ; preds = %bb1432
- br i1 false, label %bb1503, label %bb1561
-
-bb1503: ; preds = %bb1495
- ret void
-
-bb1561: ; preds = %bb1495
- br i1 false, label %bb1569, label %bb1624
-
-bb1569: ; preds = %bb1561
- ret void
-
-bb1624: ; preds = %bb1561
- br i1 false, label %bb1632, label %bb1654
-
-bb1632: ; preds = %bb1624
- store double 0.000000e+00, double* %tmp16.i3859, align 8
- %tmp3.i38383852 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i3838.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i3838.noexc: ; preds = %bb1632
- %tmp15.i38473853 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i3847.noexc unwind label %lpad3845 ; <i8*> [#uses=0]
-
-tmp15.i3847.noexc: ; preds = %tmp3.i3838.noexc
- br i1 false, label %invcont1634, label %bb.i3850
-
-bb.i3850: ; preds = %tmp15.i3847.noexc
- ret void
-
-invcont1634: ; preds = %tmp15.i3847.noexc
- %tmp3.i38173831 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i3817.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i3817.noexc: ; preds = %invcont1634
- %tmp15.i38263832 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i3826.noexc unwind label %lpad3845 ; <i8*> [#uses=0]
-
-tmp15.i3826.noexc: ; preds = %tmp3.i3817.noexc
- br i1 false, label %invcont1636, label %bb.i3829
-
-bb.i3829: ; preds = %tmp15.i3826.noexc
- ret void
-
-invcont1636: ; preds = %tmp15.i3826.noexc
- %tmp8.i38083811 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, double* %tmp16.i3862 )
- to label %tmp8.i3808.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-tmp8.i3808.noexc: ; preds = %invcont1636
- %tmp9.i38093812 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp8.i38083811, double* null )
- to label %tmp9.i3809.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-tmp9.i3809.noexc: ; preds = %tmp8.i3808.noexc
- %tmp10.i38103813 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp9.i38093812, double* null )
- to label %invcont1638 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-invcont1638: ; preds = %tmp9.i3809.noexc
- %tmp8.i37983801 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, double* %tmp16.i3859 )
- to label %tmp8.i3798.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-tmp8.i3798.noexc: ; preds = %invcont1638
- %tmp9.i37993802 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp8.i37983801, double* null )
- to label %tmp9.i3799.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-tmp9.i3799.noexc: ; preds = %tmp8.i3798.noexc
- %tmp10.i38003803 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp9.i37993802, double* null )
- to label %invcont1640 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-invcont1640: ; preds = %tmp9.i3799.noexc
- %tmp3.i3778 = load double* %tmp16.i3859, align 8 ; <double> [#uses=1]
- %tmp1643 = invoke i8* @_Znwm( i32 76 )
- to label %invcont1642 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-invcont1642: ; preds = %invcont1640
- %tmp18.i3770 = fsub double %tmp3.i3778, 0.000000e+00 ; <double> [#uses=0]
- invoke fastcc void @_ZN7mrScene9AddObjectEP9mrSurfaceRK8ggStringS4_i( %struct.mrScene* %this, %struct.ggBRDF* null, %struct.ggString* null, %struct.ggString* null, i32 0 )
- to label %bb3743 unwind label %lpad3845
-
-bb1654: ; preds = %bb1624
- br i1 false, label %bb1662, label %bb1693
-
-bb1662: ; preds = %bb1654
- %tmp3.i37143728 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i3714.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i3714.noexc: ; preds = %bb1662
- %tmp15.i37233729 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i3723.noexc unwind label %lpad3845 ; <i8*> [#uses=0]
-
-tmp15.i3723.noexc: ; preds = %tmp3.i3714.noexc
- ret void
-
-bb1693: ; preds = %bb1654
- br i1 false, label %bb1701, label %bb1745
-
-bb1701: ; preds = %bb1693
- %tmp3.i36493663 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i3649.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i3649.noexc: ; preds = %bb1701
- ret void
-
-bb1745: ; preds = %bb1693
- br i1 false, label %bb1753, label %bb1797
-
-bb1753: ; preds = %bb1745
- ret void
-
-bb1797: ; preds = %bb1745
- br i1 false, label %bb1805, label %bb1847
-
-bb1805: ; preds = %bb1797
- ret void
-
-bb1847: ; preds = %bb1797
- br i1 false, label %bb1855, label %bb1897
-
-bb1855: ; preds = %bb1847
- %tmp3.i34633477 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i3463.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i3463.noexc: ; preds = %bb1855
- %tmp15.i34723478 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i3472.noexc unwind label %lpad3845 ; <i8*> [#uses=0]
-
-tmp15.i3472.noexc: ; preds = %tmp3.i3463.noexc
- br i1 false, label %invcont1857, label %bb.i3475
-
-bb.i3475: ; preds = %tmp15.i3472.noexc
- invoke fastcc void @_ZN8ggStringaSEPKc( %struct.ggString* null, i8* null )
- to label %invcont1857 unwind label %lpad3845
-
-invcont1857: ; preds = %bb.i3475, %tmp15.i3472.noexc
- %tmp1860 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, double* null )
- to label %invcont1859 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-invcont1859: ; preds = %invcont1857
- %tmp1862 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp1860, double* null )
- to label %invcont1861 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-invcont1861: ; preds = %invcont1859
- %tmp1864 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp1862, double* null )
- to label %invcont1863 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-invcont1863: ; preds = %invcont1861
- %tmp1866 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp1864, double* null )
- to label %invcont1865 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-invcont1865: ; preds = %invcont1863
- %tmp1868 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp1866, double* null )
- to label %invcont1867 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-invcont1867: ; preds = %invcont1865
- %tmp1881 = invoke i8 @_ZNKSt9basic_iosIcSt11char_traitsIcEE4goodEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null ) zeroext
- to label %invcont1880 unwind label %lpad3845 ; <i8> [#uses=0]
-
-invcont1880: ; preds = %invcont1867
- %tmp1883 = invoke i8* @_Znwm( i32 24 )
- to label %invcont1882 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-invcont1882: ; preds = %invcont1880
- invoke fastcc void @_ZN7mrScene9AddObjectEP9mrSurfaceRK8ggStringS4_i( %struct.mrScene* %this, %struct.ggBRDF* null, %struct.ggString* null, %struct.ggString* null, i32 0 )
- to label %bb3743 unwind label %lpad3845
-
-bb1897: ; preds = %bb1847
- br i1 false, label %bb1905, label %bb1947
-
-bb1905: ; preds = %bb1897
- ret void
-
-bb1947: ; preds = %bb1897
- br i1 false, label %bb1955, label %bb2000
-
-bb1955: ; preds = %bb1947
- ret void
-
-bb2000: ; preds = %bb1947
- br i1 false, label %bb2008, label %bb2053
-
-bb2008: ; preds = %bb2000
- ret void
-
-bb2053: ; preds = %bb2000
- br i1 false, label %bb2061, label %bb2106
-
-bb2061: ; preds = %bb2053
- %tmp3.i32433257 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i3243.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i3243.noexc: ; preds = %bb2061
- %tmp15.i32523258 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %bb.i3255 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-bb.i3255: ; preds = %tmp3.i3243.noexc
- invoke fastcc void @_ZN8ggStringaSEPKc( %struct.ggString* null, i8* null )
- to label %invcont2063 unwind label %lpad3845
-
-invcont2063: ; preds = %bb.i3255
- ret void
-
-bb2106: ; preds = %bb2053
- %tmp7.i3214 = call i32 @strcmp( i8* %tmp5.i161, i8* getelementptr ([7 x i8]* @.str80, i32 0, i32 0) ) nounwind readonly ; <i32> [#uses=0]
- br i1 false, label %bb2114, label %bb2136
-
-bb2114: ; preds = %bb2106
- %tmp3.i31923206 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i3192.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i3192.noexc: ; preds = %bb2114
- %tmp15.i32013207 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i3201.noexc unwind label %lpad3845 ; <i8*> [#uses=0]
-
-tmp15.i3201.noexc: ; preds = %tmp3.i3192.noexc
- br i1 false, label %invcont2116, label %bb.i3204
-
-bb.i3204: ; preds = %tmp15.i3201.noexc
- ret void
-
-invcont2116: ; preds = %tmp15.i3201.noexc
- %tmp3.i31713185 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i3171.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i3171.noexc: ; preds = %invcont2116
- %tmp15.i31803186 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i3180.noexc unwind label %lpad3845 ; <i8*> [#uses=0]
-
-tmp15.i3180.noexc: ; preds = %tmp3.i3171.noexc
- br i1 false, label %invcont2118, label %bb.i3183
-
-bb.i3183: ; preds = %tmp15.i3180.noexc
- ret void
-
-invcont2118: ; preds = %tmp15.i3180.noexc
- %tmp8.i31623165 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, double* null )
- to label %tmp8.i3162.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-tmp8.i3162.noexc: ; preds = %invcont2118
- %tmp9.i31633166 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp8.i31623165, double* null )
- to label %tmp9.i3163.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-tmp9.i3163.noexc: ; preds = %tmp8.i3162.noexc
- %tmp10.i31643167 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp9.i31633166, double* null )
- to label %invcont2120 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-invcont2120: ; preds = %tmp9.i3163.noexc
- %tmp2123 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, double* null )
- to label %invcont2122 unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-invcont2122: ; preds = %invcont2120
- %tmp2125 = invoke i8* @_Znwm( i32 36 )
- to label %invcont2124 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-invcont2124: ; preds = %invcont2122
- invoke fastcc void @_ZN7mrScene9AddObjectEP9mrSurfaceRK8ggStringS4_i( %struct.mrScene* %this, %struct.ggBRDF* null, %struct.ggString* null, %struct.ggString* null, i32 0 )
- to label %bb3743 unwind label %lpad3845
-
-bb2136: ; preds = %bb2106
- %tmp7.i3128 = call i32 @strcmp( i8* %tmp5.i161, i8* getelementptr ([11 x i8]* @.str81, i32 0, i32 0) ) nounwind readonly ; <i32> [#uses=0]
- br i1 false, label %bb2144, label %bb3336
-
-bb2144: ; preds = %bb2136
- %tmp6.i.i31173123 = invoke i8* @_Znam( i32 12 )
- to label %_ZN8ggStringC1Ei.exit3124 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-_ZN8ggStringC1Ei.exit3124: ; preds = %bb2144
- %tmp3.i30983112 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i3098.noexc unwind label %lpad3921 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i3098.noexc: ; preds = %_ZN8ggStringC1Ei.exit3124
- %tmp15.i31073113 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i3107.noexc unwind label %lpad3921 ; <i8*> [#uses=0]
-
-tmp15.i3107.noexc: ; preds = %tmp3.i3098.noexc
- br i1 false, label %invcont2147, label %bb.i3110
-
-bb.i3110: ; preds = %tmp15.i3107.noexc
- ret void
-
-invcont2147: ; preds = %tmp15.i3107.noexc
- %tmp2161 = invoke i8 @_ZNKSt9basic_iosIcSt11char_traitsIcEE4goodEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null ) zeroext
- to label %invcont2160 unwind label %lpad3921 ; <i8> [#uses=0]
-
-invcont2160: ; preds = %invcont2147
- %tmp4.i30933094 = invoke fastcc %struct.ggSpectrum* @_ZN5ggBSTI10ggSpectrumE4findERK8ggString3( %"struct.ggBSTNode<ggSpectrum>"* null, %struct.ggString* null )
- to label %invcont2164 unwind label %lpad3921 ; <%struct.ggSpectrum*> [#uses=0]
-
-invcont2164: ; preds = %invcont2160
- br i1 false, label %bb2170, label %bb2181
-
-bb2170: ; preds = %invcont2164
- ret void
-
-bb2181: ; preds = %invcont2164
- invoke fastcc void @_ZN8ggStringD1Ev( %struct.ggString* null )
- to label %bb3743 unwind label %lpad3845
-
-bb3336: ; preds = %bb2136
- br i1 false, label %bb3344, label %bb3734
-
-bb3344: ; preds = %bb3336
- %tmp6.i.i773779 = invoke i8* @_Znam( i32 12 )
- to label %_ZN8ggStringC1Ei.exit780 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-_ZN8ggStringC1Ei.exit780: ; preds = %bb3344
- %tmp6.i.i765771 = invoke i8* @_Znam( i32 12 )
- to label %_ZN8ggStringC1Ei.exit772 unwind label %lpad4025 ; <i8*> [#uses=0]
-
-_ZN8ggStringC1Ei.exit772: ; preds = %_ZN8ggStringC1Ei.exit780
- %tmp3.i746760 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i746.noexc unwind label %lpad4029 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i746.noexc: ; preds = %_ZN8ggStringC1Ei.exit772
- %tmp15.i755761 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i755.noexc unwind label %lpad4029 ; <i8*> [#uses=0]
-
-tmp15.i755.noexc: ; preds = %tmp3.i746.noexc
- br i1 false, label %invcont3348, label %bb.i758
-
-bb.i758: ; preds = %tmp15.i755.noexc
- ret void
-
-invcont3348: ; preds = %tmp15.i755.noexc
- %tmp3.i726740 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i726.noexc unwind label %lpad4029 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i726.noexc: ; preds = %invcont3348
- %tmp15.i735741 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i735.noexc unwind label %lpad4029 ; <i8*> [#uses=0]
-
-tmp15.i735.noexc: ; preds = %tmp3.i726.noexc
- br i1 false, label %bb3458, label %bb.i738
-
-bb.i738: ; preds = %tmp15.i735.noexc
- ret void
-
-bb3458: ; preds = %tmp15.i735.noexc
- br i1 false, label %bb3466, label %bb3491
-
-bb3466: ; preds = %bb3458
- %tmp3469 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, double* null )
- to label %invcont3468 unwind label %lpad4029 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-invcont3468: ; preds = %bb3466
- %tmp3471 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp3469, double* null )
- to label %invcont3470 unwind label %lpad4029 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=1]
-
-invcont3470: ; preds = %invcont3468
- %tmp3473 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERi( %"struct.std::basic_istream<char,std::char_traits<char> >"* %tmp3471, i32* null )
- to label %invcont3472 unwind label %lpad4029 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-invcont3472: ; preds = %invcont3470
- %tmp3475 = invoke i8* @_Znwm( i32 7196 )
- to label %invcont3474 unwind label %lpad4029 ; <i8*> [#uses=1]
-
-invcont3474: ; preds = %invcont3472
- invoke fastcc void @_ZN13ggSolidNoise3C1Ev( %struct.ggSolidNoise3* null )
- to label %_ZN22ggCoverageSolidTextureC1Eddi.exit unwind label %lpad4045
-
-_ZN22ggCoverageSolidTextureC1Eddi.exit: ; preds = %invcont3474
- %tmp34823483 = bitcast i8* %tmp3475 to %struct.ggBRDF* ; <%struct.ggBRDF*> [#uses=2]
- invoke fastcc void @_ZN5ggBSTI14ggSolidTextureE17InsertIntoSubtreeERK8ggStringPS0_RP9ggBSTNodeIS0_E( %"struct.ggBST<ggSolidTexture>"* null, %struct.ggString* null, %struct.ggBRDF* %tmp34823483, %"struct.ggBSTNode<ggSolidTexture>"** null )
- to label %bb3662 unwind label %lpad4029
-
-bb3491: ; preds = %bb3458
- ret void
-
-bb3662: ; preds = %_ZN22ggCoverageSolidTextureC1Eddi.exit
- invoke fastcc void @_ZN8ggStringD1Ev( %struct.ggString* null )
- to label %invcont3663 unwind label %lpad4025
-
-invcont3663: ; preds = %bb3662
- invoke fastcc void @_ZN8ggStringD1Ev( %struct.ggString* null )
- to label %bb3743 unwind label %lpad3845
-
-bb3734: ; preds = %bb3336
- ret void
-
-bb3743: ; preds = %invcont3663, %bb2181, %invcont2124, %invcont1882, %invcont1642, %bb368, %bb353, %invcont335, %bb3743.preheader
- %tex1.3 = phi %struct.ggBRDF* [ undef, %bb3743.preheader ], [ %tex1.3, %bb368 ], [ %tex1.3, %invcont1642 ], [ %tex1.3, %invcont1882 ], [ %tex1.3, %invcont2124 ], [ %tex1.3, %bb2181 ], [ %tex1.3, %invcont335 ], [ %tmp34823483, %invcont3663 ], [ %tex1.3, %bb353 ] ; <%struct.ggBRDF*> [#uses=7]
- %tmp3.i312325 = invoke %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_( %"struct.std::basic_istream<char,std::char_traits<char> >"* %surfaces, i8* null )
- to label %tmp3.i312.noexc unwind label %lpad3845 ; <%"struct.std::basic_istream<char,std::char_traits<char> >"*> [#uses=0]
-
-tmp3.i312.noexc: ; preds = %bb3743
- %tmp15.i327 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %tmp15.i.noexc326 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-tmp15.i.noexc326: ; preds = %tmp3.i312.noexc
- br i1 false, label %invcont3745, label %bb.i323
-
-bb.i323: ; preds = %tmp15.i.noexc326
- ret void
-
-invcont3745: ; preds = %tmp15.i.noexc326
- %tmp3759 = invoke i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv( %"struct.std::basic_ios<char,std::char_traits<char> >"* null )
- to label %invcont3758 unwind label %lpad3845 ; <i8*> [#uses=0]
-
-invcont3758: ; preds = %invcont3745
- %tmp5.i161 = getelementptr %"struct.ggString::StringRep"* null, i32 0, i32 2, i32 0 ; <i8*> [#uses=2]
- br i1 false, label %bb333, label %bb345
-
-lpad: ; preds = %entry
- ret void
-
-lpad3825: ; preds = %_ZN8ggStringC1Ei.exit
- ret void
-
-lpad3829: ; preds = %_ZN8ggStringC1Ei.exit96
- ret void
-
-lpad3833: ; preds = %_ZN8ggStringC1Ei.exit104
- ret void
-
-lpad3837: ; preds = %_ZN8ggStringC1Ei.exit112
- ret void
-
-lpad3841: ; preds = %_ZN8ggStringC1Ei.exit129
- ret void
-
-lpad3845: ; preds = %invcont3745, %tmp3.i312.noexc, %bb3743, %invcont3663, %bb3344, %bb2181, %bb2144, %invcont2124, %invcont2122, %invcont2120, %tmp9.i3163.noexc, %tmp8.i3162.noexc, %invcont2118, %tmp3.i3171.noexc, %invcont2116, %tmp3.i3192.noexc, %bb2114, %bb.i3255, %tmp3.i3243.noexc, %bb2061, %invcont1882, %invcont1880, %invcont1867, %invcont1865, %invcont1863, %invcont1861, %invcont1859, %invcont1857, %bb.i3475, %tmp3.i3463.noexc, %bb1855, %bb1701, %tmp3.i3714.noexc, %bb1662, %invcont1642, %invcont1640, %tmp9.i3799.noexc, %tmp8.i3798.noexc, %invcont1638, %tmp9.i3809.noexc, %tmp8.i3808.noexc, %invcont1636, %tmp3.i3817.noexc, %invcont1634, %tmp3.i3838.noexc, %bb1632, %bb368, %bb353, %tmp3.i167.noexc, %bb333, %tmp3.i.noexc, %_ZN13mrSurfaceListC1Ev.exit, %_ZN8ggStringC1Ei.exit139
- ret void
-
-lpad3849: ; preds = %invcont294
- ret void
-
-lpad3921: ; preds = %invcont2160, %invcont2147, %tmp3.i3098.noexc, %_ZN8ggStringC1Ei.exit3124
- ret void
-
-lpad4025: ; preds = %bb3662, %_ZN8ggStringC1Ei.exit780
- ret void
-
-lpad4029: ; preds = %_ZN22ggCoverageSolidTextureC1Eddi.exit, %invcont3472, %invcont3470, %invcont3468, %bb3466, %tmp3.i726.noexc, %invcont3348, %tmp3.i746.noexc, %_ZN8ggStringC1Ei.exit772
- ret void
-
-lpad4045: ; preds = %invcont3474
- ret void
-}
-
-declare fastcc void @_ZN8ggStringD1Ev(%struct.ggString*)
-
-declare i8* @_Znam(i32)
-
-declare fastcc void @_ZN8ggStringaSEPKc(%struct.ggString*, i8*)
-
-declare i32 @strcmp(i8*, i8*) nounwind readonly
-
-declare %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERi(%"struct.std::basic_istream<char,std::char_traits<char> >"*, i32*)
-
-declare i8* @_Znwm(i32)
-
-declare i8* @_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv(%"struct.std::basic_ios<char,std::char_traits<char> >"*)
-
-declare %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZNSirsERd(%"struct.std::basic_istream<char,std::char_traits<char> >"*, double*)
-
-declare %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_(%"struct.std::basic_istream<char,std::char_traits<char> >"*, i8*)
-
-declare fastcc void @_ZN13ggSolidNoise3C1Ev(%struct.ggSolidNoise3*)
-
-declare i8 @_ZNKSt9basic_iosIcSt11char_traitsIcEE4goodEv(%"struct.std::basic_ios<char,std::char_traits<char> >"*) zeroext
-
-declare fastcc %struct.ggSpectrum* @_ZN5ggBSTI10ggSpectrumE4findERK8ggString3(%"struct.ggBSTNode<ggSpectrum>"*, %struct.ggString*)
-
-declare fastcc void @_ZN5ggBSTI14ggSolidTextureE17InsertIntoSubtreeERK8ggStringPS0_RP9ggBSTNodeIS0_E(%"struct.ggBST<ggSolidTexture>"*, %struct.ggString*, %struct.ggBRDF*, %"struct.ggBSTNode<ggSolidTexture>"**)
-
-declare fastcc void @_ZN7mrScene9AddObjectEP9mrSurfaceRK8ggStringS4_i(%struct.mrScene*, %struct.ggBRDF*, %struct.ggString*, %struct.ggString*, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll
deleted file mode 100644
index 455de91..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu
-; PR1799
-
- %struct.c34007g__designated___XUB = type { i32, i32, i32, i32 }
- %struct.c34007g__pkg__parent = type { i32*, %struct.c34007g__designated___XUB* }
-
-define void @_ada_c34007g() {
-entry:
- %x8 = alloca %struct.c34007g__pkg__parent, align 8 ; <%struct.c34007g__pkg__parent*> [#uses=2]
- br i1 true, label %bb1271, label %bb848
-
-bb848: ; preds = %entry
- ret void
-
-bb1271: ; preds = %bb898
- %tmp1272 = getelementptr %struct.c34007g__pkg__parent* %x8, i32 0, i32 0 ; <i32**> [#uses=1]
- %x82167 = bitcast %struct.c34007g__pkg__parent* %x8 to i64* ; <i64*> [#uses=1]
- br i1 true, label %bb4668, label %bb848
-
-bb4668: ; preds = %bb4648
- %tmp5464 = load i64* %x82167, align 8 ; <i64> [#uses=1]
- %tmp5467 = icmp ne i64 0, %tmp5464 ; <i1> [#uses=1]
- %tmp5470 = load i32** %tmp1272, align 8 ; <i32*> [#uses=1]
- %tmp5471 = icmp eq i32* %tmp5470, null ; <i1> [#uses=1]
- call fastcc void @c34007g__pkg__create.311( %struct.c34007g__pkg__parent* null, i32 7, i32 9, i32 2, i32 4, i32 1 )
- %tmp5475 = or i1 %tmp5471, %tmp5467 ; <i1> [#uses=1]
- %tmp5497 = or i1 %tmp5475, false ; <i1> [#uses=1]
- br i1 %tmp5497, label %bb848, label %bb5507
-
-bb5507: ; preds = %bb4668
- ret void
-
-}
-
-declare fastcc void @c34007g__pkg__create.311(%struct.c34007g__pkg__parent*, i32, i32, i32, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-17-InvokeAsm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-12-17-InvokeAsm.ll
deleted file mode 100644
index bd26481..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-17-InvokeAsm.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -enable-eh
-
-target triple = "i686-pc-linux-gnu"
-
-define fastcc void @bc__support__high_resolution_time__initialize_clock_rate() {
-entry:
- invoke void asm "rdtsc\0A\09movl %eax, $0\0A\09movl %edx, $1", "=*imr,=*imr,~{dirflag},~{fpsr},~{flags},~{dx},~{ax}"( i32* null, i32* null )
- to label %.noexc unwind label %cleanup144
-
-.noexc: ; preds = %entry
- ret void
-
-cleanup144: ; preds = %entry
- unwind
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll
deleted file mode 100644
index 265d968..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {(%esp)} | count 2
-; PR1872
-
- %struct.c34007g__designated___XUB = type { i32, i32, i32, i32 }
- %struct.c34007g__pkg__parent = type { i32*, %struct.c34007g__designated___XUB* }
-
-define void @_ada_c34007g() {
-entry:
- %x8 = alloca %struct.c34007g__pkg__parent, align 8 ; <%struct.c34007g__pkg__parent*> [#uses=2]
- %tmp1272 = getelementptr %struct.c34007g__pkg__parent* %x8, i32 0, i32 0 ; <i32**> [#uses=1]
- %x82167 = bitcast %struct.c34007g__pkg__parent* %x8 to i64* ; <i64*> [#uses=1]
- br i1 true, label %bb4668, label %bb848
-
-bb4668: ; preds = %bb4648
- %tmp5464 = load i64* %x82167, align 8 ; <i64> [#uses=1]
- %tmp5467 = icmp ne i64 0, %tmp5464 ; <i1> [#uses=1]
- %tmp5470 = load i32** %tmp1272, align 8 ; <i32*> [#uses=1]
- %tmp5471 = icmp eq i32* %tmp5470, null ; <i1> [#uses=1]
- %tmp5475 = or i1 %tmp5471, %tmp5467 ; <i1> [#uses=1]
- %tmp5497 = or i1 %tmp5475, false ; <i1> [#uses=1]
- br i1 %tmp5497, label %bb848, label %bb5507
-
-bb848: ; preds = %entry
- ret void
-
-bb5507: ; preds = %bb4668
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-31-UnusedSelector.ll b/libclamav/c++/llvm/test/CodeGen/X86/2007-12-31-UnusedSelector.ll
deleted file mode 100644
index fc9164f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2007-12-31-UnusedSelector.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -enable-eh
-; PR1833
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
- %struct.__class_type_info_pseudo = type { %struct.__type_info_pseudo }
- %struct.__type_info_pseudo = type { i8*, i8* }
- at _ZTI2e1 = external constant %struct.__class_type_info_pseudo ; <%struct.__class_type_info_pseudo*> [#uses=1]
-
-define void @_Z7ex_testv() {
-entry:
- invoke void @__cxa_throw( i8* null, i8* bitcast (%struct.__class_type_info_pseudo* @_ZTI2e1 to i8*), void (i8*)* null ) noreturn
- to label %UnifiedUnreachableBlock unwind label %lpad
-
-bb14: ; preds = %lpad
- unreachable
-
-lpad: ; preds = %entry
- invoke void @__cxa_end_catch( )
- to label %bb14 unwind label %lpad17
-
-lpad17: ; preds = %lpad
- %eh_select20 = tail call i32 (i8*, i8*, ...)* @llvm.eh.selector.i32( i8* null, i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i8* null ) ; <i32> [#uses=0]
- unreachable
-
-UnifiedUnreachableBlock: ; preds = %entry
- unreachable
-}
-
-declare void @__cxa_throw(i8*, i8*, void (i8*)*) noreturn
-
-declare i32 @llvm.eh.selector.i32(i8*, i8*, ...)
-
-declare void @__cxa_end_catch()
-
-declare i32 @__gxx_personality_v0(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-08-IllegalCMP.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-01-08-IllegalCMP.ll
deleted file mode 100644
index 7aec613..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-08-IllegalCMP.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
-
-define i64 @__absvdi2(i64 %a) nounwind {
-entry:
- %w.0 = select i1 false, i64 0, i64 %a ; <i64> [#uses=2]
- %tmp9 = icmp slt i64 %w.0, 0 ; <i1> [#uses=1]
- br i1 %tmp9, label %bb12, label %bb13
-
-bb12: ; preds = %entry
- unreachable
-
-bb13: ; preds = %entry
- ret i64 %w.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
deleted file mode 100644
index b040095..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep pushf
-
- %struct.indexentry = type { i32, i8*, i8*, i8*, i8*, i8* }
-
-define i32 @_bfd_stab_section_find_nearest_line(i32 %offset) nounwind {
-entry:
- %tmp910 = add i32 0, %offset ; <i32> [#uses=1]
- br i1 true, label %bb951, label %bb917
-
-bb917: ; preds = %entry
- ret i32 0
-
-bb951: ; preds = %bb986, %entry
- %tmp955 = sdiv i32 0, 2 ; <i32> [#uses=3]
- %tmp961 = getelementptr %struct.indexentry* null, i32 %tmp955, i32 0 ; <i32*> [#uses=1]
- br i1 true, label %bb986, label %bb967
-
-bb967: ; preds = %bb951
- ret i32 0
-
-bb986: ; preds = %bb951
- %tmp993 = load i32* %tmp961, align 4 ; <i32> [#uses=1]
- %tmp995 = icmp ugt i32 %tmp993, %tmp910 ; <i1> [#uses=2]
- %tmp1002 = add i32 %tmp955, 1 ; <i32> [#uses=1]
- %low.0 = select i1 %tmp995, i32 0, i32 %tmp1002 ; <i32> [#uses=1]
- %high.0 = select i1 %tmp995, i32 %tmp955, i32 0 ; <i32> [#uses=1]
- %tmp1006 = icmp eq i32 %low.0, %high.0 ; <i1> [#uses=1]
- br i1 %tmp1006, label %UnifiedReturnBlock, label %bb951
-
-UnifiedReturnBlock: ; preds = %bb986
- ret i32 1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-09-LongDoubleSin.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-01-09-LongDoubleSin.ll
deleted file mode 100644
index 6997d53..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-09-LongDoubleSin.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -o - | grep sinl
-
-target triple = "i686-pc-linux-gnu"
-
-define x86_fp80 @f(x86_fp80 %x) nounwind {
-entry:
- %tmp2 = tail call x86_fp80 @sinl( x86_fp80 %x ) nounwind readonly ; <x86_fp80> [#uses=1]
- ret x86_fp80 %tmp2
-}
-
-declare x86_fp80 @sinl(x86_fp80) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll
deleted file mode 100644
index d795610..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -regalloc=local
-
-define void @SolveCubic(double %a, double %b, double %c, double %d, i32* %solutions, double* %x) {
-entry:
- %tmp71 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1]
- %tmp72 = fdiv x86_fp80 %tmp71, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1]
- %tmp73 = fadd x86_fp80 0xK00000000000000000000, %tmp72 ; <x86_fp80> [#uses=1]
- %tmp7374 = fptrunc x86_fp80 %tmp73 to double ; <double> [#uses=1]
- store double %tmp7374, double* null, align 8
- %tmp81 = load double* null, align 8 ; <double> [#uses=1]
- %tmp82 = fadd double %tmp81, 0x401921FB54442D18 ; <double> [#uses=1]
- %tmp83 = fdiv double %tmp82, 3.000000e+00 ; <double> [#uses=1]
- %tmp84 = call double @cos( double %tmp83 ) ; <double> [#uses=1]
- %tmp85 = fmul double 0.000000e+00, %tmp84 ; <double> [#uses=1]
- %tmp8586 = fpext double %tmp85 to x86_fp80 ; <x86_fp80> [#uses=1]
- %tmp87 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1]
- %tmp88 = fdiv x86_fp80 %tmp87, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1]
- %tmp89 = fadd x86_fp80 %tmp8586, %tmp88 ; <x86_fp80> [#uses=1]
- %tmp8990 = fptrunc x86_fp80 %tmp89 to double ; <double> [#uses=1]
- store double %tmp8990, double* null, align 8
- %tmp97 = load double* null, align 8 ; <double> [#uses=1]
- %tmp98 = fadd double %tmp97, 0x402921FB54442D18 ; <double> [#uses=1]
- %tmp99 = fdiv double %tmp98, 3.000000e+00 ; <double> [#uses=1]
- %tmp100 = call double @cos( double %tmp99 ) ; <double> [#uses=1]
- %tmp101 = fmul double 0.000000e+00, %tmp100 ; <double> [#uses=1]
- %tmp101102 = fpext double %tmp101 to x86_fp80 ; <x86_fp80> [#uses=1]
- %tmp103 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1]
- %tmp104 = fdiv x86_fp80 %tmp103, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1]
- %tmp105 = fadd x86_fp80 %tmp101102, %tmp104 ; <x86_fp80> [#uses=1]
- %tmp105106 = fptrunc x86_fp80 %tmp105 to double ; <double> [#uses=1]
- store double %tmp105106, double* null, align 8
- ret void
-}
-
-declare double @cos(double)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-16-InvalidDAGCombineXform.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-01-16-InvalidDAGCombineXform.ll
deleted file mode 100644
index e91f52e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-16-InvalidDAGCombineXform.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep IMPLICIT_DEF
-
- %struct.node_t = type { double*, %struct.node_t*, %struct.node_t**, double**, double*, i32, i32 }
-
-define void @localize_local_bb19_bb(%struct.node_t** %cur_node) {
-newFuncRoot:
- %tmp1 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp2 = getelementptr %struct.node_t* %tmp1, i32 0, i32 4 ; <double**> [#uses=1]
- %tmp3 = load double** %tmp2, align 4 ; <double*> [#uses=1]
- %tmp4 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp5 = getelementptr %struct.node_t* %tmp4, i32 0, i32 4 ; <double**> [#uses=1]
- store double* %tmp3, double** %tmp5, align 4
- %tmp6 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp7 = getelementptr %struct.node_t* %tmp6, i32 0, i32 3 ; <double***> [#uses=1]
- %tmp8 = load double*** %tmp7, align 4 ; <double**> [#uses=1]
- %tmp9 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp10 = getelementptr %struct.node_t* %tmp9, i32 0, i32 3 ; <double***> [#uses=1]
- store double** %tmp8, double*** %tmp10, align 4
- %tmp11 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp12 = getelementptr %struct.node_t* %tmp11, i32 0, i32 0 ; <double**> [#uses=1]
- %tmp13 = load double** %tmp12, align 4 ; <double*> [#uses=1]
- %tmp14 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp15 = getelementptr %struct.node_t* %tmp14, i32 0, i32 0 ; <double**> [#uses=1]
- store double* %tmp13, double** %tmp15, align 4
- %tmp16 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp17 = getelementptr %struct.node_t* %tmp16, i32 0, i32 1 ; <%struct.node_t**> [#uses=1]
- %tmp18 = load %struct.node_t** %tmp17, align 4 ; <%struct.node_t*> [#uses=1]
- store %struct.node_t* %tmp18, %struct.node_t** %cur_node, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-16-Trampoline.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-01-16-Trampoline.ll
deleted file mode 100644
index 704b2ba..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-16-Trampoline.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86
-; RUN: llc < %s -march=x86-64
-
- %struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets = type { i32, i32, void (i32, i32)*, i8 (i32, i32)* }
-
-define fastcc i32 @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets.5146(i64 %table.0.0, i64 %table.0.1, i32 %last, i32 %pos) {
-entry:
- %tramp22 = call i8* @llvm.init.trampoline( i8* null, i8* bitcast (void (%struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets*, i32, i32)* @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets__move.5177 to i8*), i8* null ) ; <i8*> [#uses=0]
- unreachable
-}
-
-declare void @gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets__move.5177(%struct.FRAME.gnat__perfect_hash_generators__select_char_position__build_identical_keys_sets* nest , i32, i32) nounwind
-
-declare i8* @llvm.init.trampoline(i8*, i8*, i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-25-EmptyFunction.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-01-25-EmptyFunction.ll
deleted file mode 100644
index 387645f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-01-25-EmptyFunction.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {.byte 0}
-target triple = "i686-apple-darwin8"
-
-
-define void @bork() noreturn nounwind {
-entry:
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll
deleted file mode 100644
index 443a32d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR1975
-
- at nodes = external global i64 ; <i64*> [#uses=2]
-
-define fastcc i32 @ab(i32 %alpha, i32 %beta) nounwind {
-entry:
- %tmp1 = load i64* @nodes, align 8 ; <i64> [#uses=1]
- %tmp2 = add i64 %tmp1, 1 ; <i64> [#uses=1]
- store i64 %tmp2, i64* @nodes, align 8
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll
deleted file mode 100644
index d2d5149..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep xor | grep CPI
-
-define void @casin({ double, double }* sret %agg.result, double %z.0, double %z.1) nounwind {
-entry:
- %memtmp = alloca { double, double }, align 8 ; <{ double, double }*> [#uses=3]
- %tmp4 = fsub double -0.000000e+00, %z.1 ; <double> [#uses=1]
- call void @casinh( { double, double }* sret %memtmp, double %tmp4, double %z.0 ) nounwind
- %tmp19 = getelementptr { double, double }* %memtmp, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp20 = load double* %tmp19, align 8 ; <double> [#uses=1]
- %tmp22 = getelementptr { double, double }* %memtmp, i32 0, i32 1 ; <double*> [#uses=1]
- %tmp23 = load double* %tmp22, align 8 ; <double> [#uses=1]
- %tmp32 = fsub double -0.000000e+00, %tmp20 ; <double> [#uses=1]
- %tmp37 = getelementptr { double, double }* %agg.result, i32 0, i32 0 ; <double*> [#uses=1]
- store double %tmp23, double* %tmp37, align 8
- %tmp40 = getelementptr { double, double }* %agg.result, i32 0, i32 1 ; <double*> [#uses=1]
- store double %tmp32, double* %tmp40, align 8
- ret void
-}
-
-declare void @casinh({ double, double }* sret , double, double) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll
deleted file mode 100644
index b772d77..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll
+++ /dev/null
@@ -1,99 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep andpd | not grep esp
-
-declare double @llvm.sqrt.f64(double) nounwind readnone
-
-declare fastcc void @ApplyGivens(double**, double, double, i32, i32, i32, i32) nounwind
-
-declare double @fabs(double)
-
-define void @main_bb114_2E_outer_2E_i_bb3_2E_i27(double** %tmp12.sub.i.i, [51 x double*]* %tmp12.i.i.i, i32 %i.0.reg2mem.0.ph.i, i32 %tmp11688.i, i32 %tmp19.i, i32 %tmp24.i, [51 x double*]* %tmp12.i.i) {
-newFuncRoot:
- br label %bb3.i27
-
-bb111.i77.bb121.i_crit_edge.exitStub: ; preds = %bb111.i77
- ret void
-
-bb3.i27: ; preds = %bb111.i77.bb3.i27_crit_edge, %newFuncRoot
- %indvar94.i = phi i32 [ 0, %newFuncRoot ], [ %tmp113.i76, %bb111.i77.bb3.i27_crit_edge ] ; <i32> [#uses=6]
- %tmp6.i20 = getelementptr [51 x double*]* %tmp12.i.i, i32 0, i32 %indvar94.i ; <double**> [#uses=1]
- %tmp7.i21 = load double** %tmp6.i20, align 4 ; <double*> [#uses=2]
- %tmp10.i = add i32 %indvar94.i, %i.0.reg2mem.0.ph.i ; <i32> [#uses=5]
- %tmp11.i22 = getelementptr double* %tmp7.i21, i32 %tmp10.i ; <double*> [#uses=1]
- %tmp12.i23 = load double* %tmp11.i22, align 8 ; <double> [#uses=4]
- %tmp20.i24 = add i32 %tmp19.i, %indvar94.i ; <i32> [#uses=3]
- %tmp21.i = getelementptr double* %tmp7.i21, i32 %tmp20.i24 ; <double*> [#uses=1]
- %tmp22.i25 = load double* %tmp21.i, align 8 ; <double> [#uses=3]
- %tmp1.i.i26 = fcmp oeq double %tmp12.i23, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %tmp1.i.i26, label %bb3.i27.Givens.exit.i49_crit_edge, label %bb5.i.i31
-
-bb5.i.i31: ; preds = %bb3.i27
- %tmp7.i.i28 = call double @fabs( double %tmp12.i23 ) nounwind ; <double> [#uses=1]
- %tmp9.i.i29 = call double @fabs( double %tmp22.i25 ) nounwind ; <double> [#uses=1]
- %tmp10.i.i30 = fcmp ogt double %tmp7.i.i28, %tmp9.i.i29 ; <i1> [#uses=1]
- br i1 %tmp10.i.i30, label %bb13.i.i37, label %bb30.i.i43
-
-bb13.i.i37: ; preds = %bb5.i.i31
- %tmp15.i.i32 = fsub double -0.000000e+00, %tmp22.i25 ; <double> [#uses=1]
- %tmp17.i.i33 = fdiv double %tmp15.i.i32, %tmp12.i23 ; <double> [#uses=3]
- %tmp20.i4.i = fmul double %tmp17.i.i33, %tmp17.i.i33 ; <double> [#uses=1]
- %tmp21.i.i34 = fadd double %tmp20.i4.i, 1.000000e+00 ; <double> [#uses=1]
- %tmp22.i.i35 = call double @llvm.sqrt.f64( double %tmp21.i.i34 ) nounwind ; <double> [#uses=1]
- %tmp23.i5.i = fdiv double 1.000000e+00, %tmp22.i.i35 ; <double> [#uses=2]
- %tmp28.i.i36 = fmul double %tmp23.i5.i, %tmp17.i.i33 ; <double> [#uses=1]
- br label %Givens.exit.i49
-
-bb30.i.i43: ; preds = %bb5.i.i31
- %tmp32.i.i38 = fsub double -0.000000e+00, %tmp12.i23 ; <double> [#uses=1]
- %tmp34.i.i39 = fdiv double %tmp32.i.i38, %tmp22.i25 ; <double> [#uses=3]
- %tmp37.i6.i = fmul double %tmp34.i.i39, %tmp34.i.i39 ; <double> [#uses=1]
- %tmp38.i.i40 = fadd double %tmp37.i6.i, 1.000000e+00 ; <double> [#uses=1]
- %tmp39.i7.i = call double @llvm.sqrt.f64( double %tmp38.i.i40 ) nounwind ; <double> [#uses=1]
- %tmp40.i.i41 = fdiv double 1.000000e+00, %tmp39.i7.i ; <double> [#uses=2]
- %tmp45.i.i42 = fmul double %tmp40.i.i41, %tmp34.i.i39 ; <double> [#uses=1]
- br label %Givens.exit.i49
-
-Givens.exit.i49: ; preds = %bb3.i27.Givens.exit.i49_crit_edge, %bb30.i.i43, %bb13.i.i37
- %s.0.i44 = phi double [ %tmp45.i.i42, %bb30.i.i43 ], [ %tmp23.i5.i, %bb13.i.i37 ], [ 0.000000e+00, %bb3.i27.Givens.exit.i49_crit_edge ] ; <double> [#uses=2]
- %c.0.i45 = phi double [ %tmp40.i.i41, %bb30.i.i43 ], [ %tmp28.i.i36, %bb13.i.i37 ], [ 1.000000e+00, %bb3.i27.Givens.exit.i49_crit_edge ] ; <double> [#uses=2]
- %tmp26.i46 = add i32 %tmp24.i, %indvar94.i ; <i32> [#uses=2]
- %tmp27.i47 = icmp slt i32 %tmp26.i46, 51 ; <i1> [#uses=1]
- %min.i48 = select i1 %tmp27.i47, i32 %tmp26.i46, i32 50 ; <i32> [#uses=1]
- call fastcc void @ApplyGivens( double** %tmp12.sub.i.i, double %s.0.i44, double %c.0.i45, i32 %tmp20.i24, i32 %tmp10.i, i32 %indvar94.i, i32 %min.i48 ) nounwind
- br label %codeRepl
-
-codeRepl: ; preds = %Givens.exit.i49
- call void @main_bb114_2E_outer_2E_i_bb3_2E_i27_bb_2E_i48_2E_i( i32 %tmp10.i, i32 %tmp20.i24, double %s.0.i44, double %c.0.i45, [51 x double*]* %tmp12.i.i.i )
- br label %ApplyRGivens.exit49.i
-
-ApplyRGivens.exit49.i: ; preds = %codeRepl
- %tmp10986.i = icmp sgt i32 %tmp11688.i, %tmp10.i ; <i1> [#uses=1]
- br i1 %tmp10986.i, label %ApplyRGivens.exit49.i.bb52.i57_crit_edge, label %ApplyRGivens.exit49.i.bb111.i77_crit_edge
-
-codeRepl1: ; preds = %ApplyRGivens.exit49.i.bb52.i57_crit_edge
- call void @main_bb114_2E_outer_2E_i_bb3_2E_i27_bb52_2E_i57( i32 %tmp10.i, double** %tmp12.sub.i.i, [51 x double*]* %tmp12.i.i.i, i32 %i.0.reg2mem.0.ph.i, i32 %tmp11688.i, i32 %tmp19.i, i32 %tmp24.i, [51 x double*]* %tmp12.i.i )
- br label %bb105.i.bb111.i77_crit_edge
-
-bb111.i77: ; preds = %bb105.i.bb111.i77_crit_edge, %ApplyRGivens.exit49.i.bb111.i77_crit_edge
- %tmp113.i76 = add i32 %indvar94.i, 1 ; <i32> [#uses=2]
- %tmp118.i = icmp sgt i32 %tmp11688.i, %tmp113.i76 ; <i1> [#uses=1]
- br i1 %tmp118.i, label %bb111.i77.bb3.i27_crit_edge, label %bb111.i77.bb121.i_crit_edge.exitStub
-
-bb3.i27.Givens.exit.i49_crit_edge: ; preds = %bb3.i27
- br label %Givens.exit.i49
-
-ApplyRGivens.exit49.i.bb52.i57_crit_edge: ; preds = %ApplyRGivens.exit49.i
- br label %codeRepl1
-
-ApplyRGivens.exit49.i.bb111.i77_crit_edge: ; preds = %ApplyRGivens.exit49.i
- br label %bb111.i77
-
-bb105.i.bb111.i77_crit_edge: ; preds = %codeRepl1
- br label %bb111.i77
-
-bb111.i77.bb3.i27_crit_edge: ; preds = %bb111.i77
- br label %bb3.i27
-}
-
-declare void @main_bb114_2E_outer_2E_i_bb3_2E_i27_bb_2E_i48_2E_i(i32, i32, double, double, [51 x double*]*)
-
-declare void @main_bb114_2E_outer_2E_i_bb3_2E_i27_bb52_2E_i57(i32, double**, [51 x double*]*, i32, i32, i32, i32, [51 x double*]*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-14-BitMiscompile.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-14-BitMiscompile.ll
deleted file mode 100644
index 1983f1d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-14-BitMiscompile.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | grep and
-define i32 @test(i1 %A) {
- %B = zext i1 %A to i32 ; <i32> [#uses=1]
- %C = sub i32 0, %B ; <i32> [#uses=1]
- %D = and i32 %C, 255 ; <i32> [#uses=1]
- ret i32 %D
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
deleted file mode 100644
index 7463a0e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
+++ /dev/null
@@ -1,219 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah -stats |& grep {Number of block tails merged} | grep 9
-; PR1909
-
- at .str = internal constant [48 x i8] c"transformed bounds: (%.2f, %.2f), (%.2f, %.2f)\0A\00" ; <[48 x i8]*> [#uses=1]
-
-define void @minmax(float* %result) nounwind optsize {
-entry:
- %tmp2 = load float* %result, align 4 ; <float> [#uses=6]
- %tmp4 = getelementptr float* %result, i32 2 ; <float*> [#uses=5]
- %tmp5 = load float* %tmp4, align 4 ; <float> [#uses=10]
- %tmp7 = getelementptr float* %result, i32 4 ; <float*> [#uses=5]
- %tmp8 = load float* %tmp7, align 4 ; <float> [#uses=8]
- %tmp10 = getelementptr float* %result, i32 6 ; <float*> [#uses=3]
- %tmp11 = load float* %tmp10, align 4 ; <float> [#uses=8]
- %tmp12 = fcmp olt float %tmp8, %tmp11 ; <i1> [#uses=5]
- br i1 %tmp12, label %bb, label %bb21
-
-bb: ; preds = %entry
- %tmp23469 = fcmp olt float %tmp5, %tmp8 ; <i1> [#uses=1]
- br i1 %tmp23469, label %bb26, label %bb30
-
-bb21: ; preds = %entry
- %tmp23 = fcmp olt float %tmp5, %tmp11 ; <i1> [#uses=1]
- br i1 %tmp23, label %bb26, label %bb30
-
-bb26: ; preds = %bb21, %bb
- %tmp52471 = fcmp olt float %tmp2, %tmp5 ; <i1> [#uses=1]
- br i1 %tmp52471, label %bb111, label %bb59
-
-bb30: ; preds = %bb21, %bb
- br i1 %tmp12, label %bb40, label %bb50
-
-bb40: ; preds = %bb30
- %tmp52473 = fcmp olt float %tmp2, %tmp8 ; <i1> [#uses=1]
- br i1 %tmp52473, label %bb111, label %bb59
-
-bb50: ; preds = %bb30
- %tmp52 = fcmp olt float %tmp2, %tmp11 ; <i1> [#uses=1]
- br i1 %tmp52, label %bb111, label %bb59
-
-bb59: ; preds = %bb50, %bb40, %bb26
- br i1 %tmp12, label %bb72, label %bb80
-
-bb72: ; preds = %bb59
- %tmp82475 = fcmp olt float %tmp5, %tmp8 ; <i1> [#uses=2]
- %brmerge786 = or i1 %tmp82475, %tmp12 ; <i1> [#uses=1]
- %tmp4.mux787 = select i1 %tmp82475, float* %tmp4, float* %tmp7 ; <float*> [#uses=1]
- br i1 %brmerge786, label %bb111, label %bb103
-
-bb80: ; preds = %bb59
- %tmp82 = fcmp olt float %tmp5, %tmp11 ; <i1> [#uses=2]
- %brmerge = or i1 %tmp82, %tmp12 ; <i1> [#uses=1]
- %tmp4.mux = select i1 %tmp82, float* %tmp4, float* %tmp7 ; <float*> [#uses=1]
- br i1 %brmerge, label %bb111, label %bb103
-
-bb103: ; preds = %bb80, %bb72
- br label %bb111
-
-bb111: ; preds = %bb103, %bb80, %bb72, %bb50, %bb40, %bb26
- %iftmp.0.0.in = phi float* [ %tmp10, %bb103 ], [ %result, %bb26 ], [ %result, %bb40 ], [ %result, %bb50 ], [ %tmp4.mux, %bb80 ], [ %tmp4.mux787, %bb72 ] ; <float*> [#uses=1]
- %iftmp.0.0 = load float* %iftmp.0.0.in ; <float> [#uses=1]
- %tmp125 = fcmp ogt float %tmp8, %tmp11 ; <i1> [#uses=5]
- br i1 %tmp125, label %bb128, label %bb136
-
-bb128: ; preds = %bb111
- %tmp138477 = fcmp ogt float %tmp5, %tmp8 ; <i1> [#uses=1]
- br i1 %tmp138477, label %bb141, label %bb145
-
-bb136: ; preds = %bb111
- %tmp138 = fcmp ogt float %tmp5, %tmp11 ; <i1> [#uses=1]
- br i1 %tmp138, label %bb141, label %bb145
-
-bb141: ; preds = %bb136, %bb128
- %tmp167479 = fcmp ogt float %tmp2, %tmp5 ; <i1> [#uses=1]
- br i1 %tmp167479, label %bb226, label %bb174
-
-bb145: ; preds = %bb136, %bb128
- br i1 %tmp125, label %bb155, label %bb165
-
-bb155: ; preds = %bb145
- %tmp167481 = fcmp ogt float %tmp2, %tmp8 ; <i1> [#uses=1]
- br i1 %tmp167481, label %bb226, label %bb174
-
-bb165: ; preds = %bb145
- %tmp167 = fcmp ogt float %tmp2, %tmp11 ; <i1> [#uses=1]
- br i1 %tmp167, label %bb226, label %bb174
-
-bb174: ; preds = %bb165, %bb155, %bb141
- br i1 %tmp125, label %bb187, label %bb195
-
-bb187: ; preds = %bb174
- %tmp197483 = fcmp ogt float %tmp5, %tmp8 ; <i1> [#uses=2]
- %brmerge790 = or i1 %tmp197483, %tmp125 ; <i1> [#uses=1]
- %tmp4.mux791 = select i1 %tmp197483, float* %tmp4, float* %tmp7 ; <float*> [#uses=1]
- br i1 %brmerge790, label %bb226, label %bb218
-
-bb195: ; preds = %bb174
- %tmp197 = fcmp ogt float %tmp5, %tmp11 ; <i1> [#uses=2]
- %brmerge788 = or i1 %tmp197, %tmp125 ; <i1> [#uses=1]
- %tmp4.mux789 = select i1 %tmp197, float* %tmp4, float* %tmp7 ; <float*> [#uses=1]
- br i1 %brmerge788, label %bb226, label %bb218
-
-bb218: ; preds = %bb195, %bb187
- br label %bb226
-
-bb226: ; preds = %bb218, %bb195, %bb187, %bb165, %bb155, %bb141
- %iftmp.7.0.in = phi float* [ %tmp10, %bb218 ], [ %result, %bb141 ], [ %result, %bb155 ], [ %result, %bb165 ], [ %tmp4.mux789, %bb195 ], [ %tmp4.mux791, %bb187 ] ; <float*> [#uses=1]
- %iftmp.7.0 = load float* %iftmp.7.0.in ; <float> [#uses=1]
- %tmp229 = getelementptr float* %result, i32 1 ; <float*> [#uses=7]
- %tmp230 = load float* %tmp229, align 4 ; <float> [#uses=6]
- %tmp232 = getelementptr float* %result, i32 3 ; <float*> [#uses=5]
- %tmp233 = load float* %tmp232, align 4 ; <float> [#uses=10]
- %tmp235 = getelementptr float* %result, i32 5 ; <float*> [#uses=5]
- %tmp236 = load float* %tmp235, align 4 ; <float> [#uses=8]
- %tmp238 = getelementptr float* %result, i32 7 ; <float*> [#uses=3]
- %tmp239 = load float* %tmp238, align 4 ; <float> [#uses=8]
- %tmp240 = fcmp olt float %tmp236, %tmp239 ; <i1> [#uses=5]
- br i1 %tmp240, label %bb243, label %bb251
-
-bb243: ; preds = %bb226
- %tmp253485 = fcmp olt float %tmp233, %tmp236 ; <i1> [#uses=1]
- br i1 %tmp253485, label %bb256, label %bb260
-
-bb251: ; preds = %bb226
- %tmp253 = fcmp olt float %tmp233, %tmp239 ; <i1> [#uses=1]
- br i1 %tmp253, label %bb256, label %bb260
-
-bb256: ; preds = %bb251, %bb243
- %tmp282487 = fcmp olt float %tmp230, %tmp233 ; <i1> [#uses=1]
- br i1 %tmp282487, label %bb341, label %bb289
-
-bb260: ; preds = %bb251, %bb243
- br i1 %tmp240, label %bb270, label %bb280
-
-bb270: ; preds = %bb260
- %tmp282489 = fcmp olt float %tmp230, %tmp236 ; <i1> [#uses=1]
- br i1 %tmp282489, label %bb341, label %bb289
-
-bb280: ; preds = %bb260
- %tmp282 = fcmp olt float %tmp230, %tmp239 ; <i1> [#uses=1]
- br i1 %tmp282, label %bb341, label %bb289
-
-bb289: ; preds = %bb280, %bb270, %bb256
- br i1 %tmp240, label %bb302, label %bb310
-
-bb302: ; preds = %bb289
- %tmp312491 = fcmp olt float %tmp233, %tmp236 ; <i1> [#uses=2]
- %brmerge793 = or i1 %tmp312491, %tmp240 ; <i1> [#uses=1]
- %tmp232.mux794 = select i1 %tmp312491, float* %tmp232, float* %tmp235 ; <float*> [#uses=1]
- br i1 %brmerge793, label %bb341, label %bb333
-
-bb310: ; preds = %bb289
- %tmp312 = fcmp olt float %tmp233, %tmp239 ; <i1> [#uses=2]
- %brmerge792 = or i1 %tmp312, %tmp240 ; <i1> [#uses=1]
- %tmp232.mux = select i1 %tmp312, float* %tmp232, float* %tmp235 ; <float*> [#uses=1]
- br i1 %brmerge792, label %bb341, label %bb333
-
-bb333: ; preds = %bb310, %bb302
- br label %bb341
-
-bb341: ; preds = %bb333, %bb310, %bb302, %bb280, %bb270, %bb256
- %iftmp.14.0.in = phi float* [ %tmp238, %bb333 ], [ %tmp229, %bb280 ], [ %tmp229, %bb270 ], [ %tmp229, %bb256 ], [ %tmp232.mux, %bb310 ], [ %tmp232.mux794, %bb302 ] ; <float*> [#uses=1]
- %iftmp.14.0 = load float* %iftmp.14.0.in ; <float> [#uses=1]
- %tmp355 = fcmp ogt float %tmp236, %tmp239 ; <i1> [#uses=5]
- br i1 %tmp355, label %bb358, label %bb366
-
-bb358: ; preds = %bb341
- %tmp368493 = fcmp ogt float %tmp233, %tmp236 ; <i1> [#uses=1]
- br i1 %tmp368493, label %bb371, label %bb375
-
-bb366: ; preds = %bb341
- %tmp368 = fcmp ogt float %tmp233, %tmp239 ; <i1> [#uses=1]
- br i1 %tmp368, label %bb371, label %bb375
-
-bb371: ; preds = %bb366, %bb358
- %tmp397495 = fcmp ogt float %tmp230, %tmp233 ; <i1> [#uses=1]
- br i1 %tmp397495, label %bb456, label %bb404
-
-bb375: ; preds = %bb366, %bb358
- br i1 %tmp355, label %bb385, label %bb395
-
-bb385: ; preds = %bb375
- %tmp397497 = fcmp ogt float %tmp230, %tmp236 ; <i1> [#uses=1]
- br i1 %tmp397497, label %bb456, label %bb404
-
-bb395: ; preds = %bb375
- %tmp397 = fcmp ogt float %tmp230, %tmp239 ; <i1> [#uses=1]
- br i1 %tmp397, label %bb456, label %bb404
-
-bb404: ; preds = %bb395, %bb385, %bb371
- br i1 %tmp355, label %bb417, label %bb425
-
-bb417: ; preds = %bb404
- %tmp427499 = fcmp ogt float %tmp233, %tmp236 ; <i1> [#uses=2]
- %brmerge797 = or i1 %tmp427499, %tmp355 ; <i1> [#uses=1]
- %tmp232.mux798 = select i1 %tmp427499, float* %tmp232, float* %tmp235 ; <float*> [#uses=1]
- br i1 %brmerge797, label %bb456, label %bb448
-
-bb425: ; preds = %bb404
- %tmp427 = fcmp ogt float %tmp233, %tmp239 ; <i1> [#uses=2]
- %brmerge795 = or i1 %tmp427, %tmp355 ; <i1> [#uses=1]
- %tmp232.mux796 = select i1 %tmp427, float* %tmp232, float* %tmp235 ; <float*> [#uses=1]
- br i1 %brmerge795, label %bb456, label %bb448
-
-bb448: ; preds = %bb425, %bb417
- br label %bb456
-
-bb456: ; preds = %bb448, %bb425, %bb417, %bb395, %bb385, %bb371
- %iftmp.21.0.in = phi float* [ %tmp238, %bb448 ], [ %tmp229, %bb395 ], [ %tmp229, %bb385 ], [ %tmp229, %bb371 ], [ %tmp232.mux796, %bb425 ], [ %tmp232.mux798, %bb417 ] ; <float*> [#uses=1]
- %iftmp.21.0 = load float* %iftmp.21.0.in ; <float> [#uses=1]
- %tmp458459 = fpext float %iftmp.21.0 to double ; <double> [#uses=1]
- %tmp460461 = fpext float %iftmp.7.0 to double ; <double> [#uses=1]
- %tmp462463 = fpext float %iftmp.14.0 to double ; <double> [#uses=1]
- %tmp464465 = fpext float %iftmp.0.0 to double ; <double> [#uses=1]
- %tmp467 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([48 x i8]* @.str, i32 0, i32 0), double %tmp464465, double %tmp462463, double %tmp460461, double %tmp458459 ) nounwind ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @printf(i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
deleted file mode 100644
index 5115e48..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s | grep {a:} | not grep ax
-; RUN: llc < %s | grep {b:} | not grep ax
-; PR2078
-; The clobber list says that "ax" is clobbered. Make sure that eax isn't
-; allocated to the input/output register.
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
- at pixels = weak global i32 0 ; <i32*> [#uses=2]
-
-define void @test() nounwind {
-entry:
- %tmp = load i32* @pixels, align 4 ; <i32> [#uses=1]
- %tmp1 = tail call i32 asm sideeffect "a: $0 $1", "=r,0,~{dirflag},~{fpsr},~{flags},~{ax}"( i32 %tmp ) nounwind ; <i32> [#uses=1]
- store i32 %tmp1, i32* @pixels, align 4
- ret void
-}
-
-define void @test2(i16* %block, i8* %pixels, i32 %line_size) nounwind {
-entry:
- %tmp1 = getelementptr i16* %block, i32 64 ; <i16*> [#uses=1]
- %tmp3 = tail call i8* asm sideeffect "b: $0 $1 $2", "=r,r,0,~{dirflag},~{fpsr},~{flags},~{ax}"( i16* %tmp1, i8* %pixels ) nounwind ; <i8*> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
deleted file mode 100644
index 6b1eefe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -regalloc=local -march=x86 -mattr=+mmx | grep esi
-; PR2082
-; Local register allocator was refusing to use ESI, EDI, and EBP so it ran out of
-; registers.
-define void @transpose4x4(i8* %dst, i8* %src, i32 %dst_stride, i32 %src_stride) {
-entry:
- %dst_addr = alloca i8* ; <i8**> [#uses=5]
- %src_addr = alloca i8* ; <i8**> [#uses=5]
- %dst_stride_addr = alloca i32 ; <i32*> [#uses=4]
- %src_stride_addr = alloca i32 ; <i32*> [#uses=4]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i8* %dst, i8** %dst_addr
- store i8* %src, i8** %src_addr
- store i32 %dst_stride, i32* %dst_stride_addr
- store i32 %src_stride, i32* %src_stride_addr
- %tmp = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
- %tmp1 = getelementptr i8* %tmp, i32 0 ; <i8*> [#uses=1]
- %tmp12 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp3 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
- %tmp4 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
- %tmp5 = getelementptr i8* %tmp3, i32 %tmp4 ; <i8*> [#uses=1]
- %tmp56 = bitcast i8* %tmp5 to i32* ; <i32*> [#uses=1]
- %tmp7 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
- %tmp8 = mul i32 %tmp7, 2 ; <i32> [#uses=1]
- %tmp9 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
- %tmp10 = getelementptr i8* %tmp9, i32 %tmp8 ; <i8*> [#uses=1]
- %tmp1011 = bitcast i8* %tmp10 to i32* ; <i32*> [#uses=1]
- %tmp13 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
- %tmp14 = mul i32 %tmp13, 3 ; <i32> [#uses=1]
- %tmp15 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
- %tmp16 = getelementptr i8* %tmp15, i32 %tmp14 ; <i8*> [#uses=1]
- %tmp1617 = bitcast i8* %tmp16 to i32* ; <i32*> [#uses=1]
- %tmp18 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
- %tmp19 = getelementptr i8* %tmp18, i32 0 ; <i8*> [#uses=1]
- %tmp1920 = bitcast i8* %tmp19 to i32* ; <i32*> [#uses=1]
- %tmp21 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
- %tmp22 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
- %tmp23 = getelementptr i8* %tmp21, i32 %tmp22 ; <i8*> [#uses=1]
- %tmp2324 = bitcast i8* %tmp23 to i32* ; <i32*> [#uses=1]
- %tmp25 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
- %tmp26 = mul i32 %tmp25, 2 ; <i32> [#uses=1]
- %tmp27 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
- %tmp28 = getelementptr i8* %tmp27, i32 %tmp26 ; <i8*> [#uses=1]
- %tmp2829 = bitcast i8* %tmp28 to i32* ; <i32*> [#uses=1]
- %tmp30 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
- %tmp31 = mul i32 %tmp30, 3 ; <i32> [#uses=1]
- %tmp32 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
- %tmp33 = getelementptr i8* %tmp32, i32 %tmp31 ; <i8*> [#uses=1]
- %tmp3334 = bitcast i8* %tmp33 to i32* ; <i32*> [#uses=1]
- call void asm sideeffect "movd $4, %mm0 \0A\09movd $5, %mm1 \0A\09movd $6, %mm2 \0A\09movd $7, %mm3 \0A\09punpcklbw %mm1, %mm0 \0A\09punpcklbw %mm3, %mm2 \0A\09movq %mm0, %mm1 \0A\09punpcklwd %mm2, %mm0 \0A\09punpckhwd %mm2, %mm1 \0A\09movd %mm0, $0 \0A\09punpckhdq %mm0, %mm0 \0A\09movd %mm0, $1 \0A\09movd %mm1, $2 \0A\09punpckhdq %mm1, %mm1 \0A\09movd %mm1, $3 \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* %tmp12, i32* %tmp56, i32* %tmp1011, i32* %tmp1617, i32* %tmp1920, i32* %tmp2324, i32* %tmp2829, i32* %tmp3334 ) nounwind
- br label %return
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-22-ReMatBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-22-ReMatBug.ll
deleted file mode 100644
index a91ac27..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-22-ReMatBug.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=x86 -stats |& grep {Number of re-materialization} | grep 2
-; rdar://5761454
-
- %struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
-
-define %struct.quad_struct* @MakeTree(i32 %size, i32 %center_x, i32 %center_y, i32 %lo_proc, i32 %hi_proc, %struct.quad_struct* %parent, i32 %ct, i32 %level) nounwind {
-entry:
- br i1 true, label %bb43.i, label %bb.i
-
-bb.i: ; preds = %entry
- ret %struct.quad_struct* null
-
-bb43.i: ; preds = %entry
- br i1 true, label %CheckOutside.exit40.i, label %bb11.i38.i
-
-bb11.i38.i: ; preds = %bb43.i
- ret %struct.quad_struct* null
-
-CheckOutside.exit40.i: ; preds = %bb43.i
- br i1 true, label %CheckOutside.exit30.i, label %bb11.i28.i
-
-bb11.i28.i: ; preds = %CheckOutside.exit40.i
- ret %struct.quad_struct* null
-
-CheckOutside.exit30.i: ; preds = %CheckOutside.exit40.i
- br i1 true, label %CheckOutside.exit20.i, label %bb11.i18.i
-
-bb11.i18.i: ; preds = %CheckOutside.exit30.i
- ret %struct.quad_struct* null
-
-CheckOutside.exit20.i: ; preds = %CheckOutside.exit30.i
- br i1 true, label %bb34, label %bb11.i8.i
-
-bb11.i8.i: ; preds = %CheckOutside.exit20.i
- ret %struct.quad_struct* null
-
-bb34: ; preds = %CheckOutside.exit20.i
- %tmp15.reg2mem.0 = sdiv i32 %size, 2 ; <i32> [#uses=7]
- %tmp85 = sub i32 %center_y, %tmp15.reg2mem.0 ; <i32> [#uses=2]
- %tmp88 = sub i32 %center_x, %tmp15.reg2mem.0 ; <i32> [#uses=2]
- %tmp92 = tail call %struct.quad_struct* @MakeTree( i32 %tmp15.reg2mem.0, i32 %tmp88, i32 %tmp85, i32 0, i32 %hi_proc, %struct.quad_struct* null, i32 2, i32 0 ) nounwind ; <%struct.quad_struct*> [#uses=0]
- %tmp99 = add i32 0, %hi_proc ; <i32> [#uses=1]
- %tmp100 = sdiv i32 %tmp99, 2 ; <i32> [#uses=1]
- %tmp110 = tail call %struct.quad_struct* @MakeTree( i32 %tmp15.reg2mem.0, i32 0, i32 %tmp85, i32 0, i32 %tmp100, %struct.quad_struct* null, i32 3, i32 0 ) nounwind ; <%struct.quad_struct*> [#uses=0]
- %tmp122 = add i32 %tmp15.reg2mem.0, %center_y ; <i32> [#uses=2]
- %tmp129 = tail call %struct.quad_struct* @MakeTree( i32 %tmp15.reg2mem.0, i32 0, i32 %tmp122, i32 0, i32 0, %struct.quad_struct* null, i32 1, i32 0 ) nounwind ; <%struct.quad_struct*> [#uses=0]
- %tmp147 = tail call %struct.quad_struct* @MakeTree( i32 %tmp15.reg2mem.0, i32 %tmp88, i32 %tmp122, i32 %lo_proc, i32 0, %struct.quad_struct* null, i32 0, i32 0 ) nounwind ; <%struct.quad_struct*> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-25-InlineAsmBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-25-InlineAsmBug.ll
deleted file mode 100644
index 1d31859..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-25-InlineAsmBug.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -mattr=+sse2
-; PR2076
-
-define void @h264_h_loop_filter_luma_mmx2(i8* %pix, i32 %stride, i32 %alpha, i32 %beta, i8* %tc0) nounwind {
-entry:
- %tmp164 = getelementptr [16 x i32]* null, i32 0, i32 11 ; <i32*> [#uses=1]
- %tmp169 = getelementptr [16 x i32]* null, i32 0, i32 13 ; <i32*> [#uses=1]
- %tmp174 = getelementptr [16 x i32]* null, i32 0, i32 15 ; <i32*> [#uses=1]
- %tmp154.sum317 = add i32 0, %stride ; <i32> [#uses=1]
- %tmp154.sum315 = mul i32 %stride, 6 ; <i32> [#uses=1]
- %tmp154.sum = mul i32 %stride, 7 ; <i32> [#uses=1]
- %pix_addr.0327.rec = mul i32 0, 0 ; <i32> [#uses=4]
- br i1 false, label %bb292, label %bb32
-
-bb32: ; preds = %entry
- %pix_addr.0327.sum340 = add i32 %pix_addr.0327.rec, 0 ; <i32> [#uses=1]
- %tmp154 = getelementptr i8* %pix, i32 %pix_addr.0327.sum340 ; <i8*> [#uses=1]
- %tmp177178 = bitcast i8* %tmp154 to i32* ; <i32*> [#uses=1]
- %pix_addr.0327.sum339 = add i32 %pix_addr.0327.rec, %tmp154.sum317 ; <i32> [#uses=1]
- %tmp181 = getelementptr i8* %pix, i32 %pix_addr.0327.sum339 ; <i8*> [#uses=1]
- %tmp181182 = bitcast i8* %tmp181 to i32* ; <i32*> [#uses=1]
- %pix_addr.0327.sum338 = add i32 %pix_addr.0327.rec, %tmp154.sum315 ; <i32> [#uses=1]
- %tmp186 = getelementptr i8* %pix, i32 %pix_addr.0327.sum338 ; <i8*> [#uses=1]
- %tmp186187 = bitcast i8* %tmp186 to i32* ; <i32*> [#uses=1]
- %pix_addr.0327.sum337 = add i32 %pix_addr.0327.rec, %tmp154.sum ; <i32> [#uses=1]
- %tmp191 = getelementptr i8* %pix, i32 %pix_addr.0327.sum337 ; <i8*> [#uses=1]
- %tmp191192 = bitcast i8* %tmp191 to i32* ; <i32*> [#uses=1]
- call void asm sideeffect "movd $4, %mm0 \0A\09movd $5, %mm1 \0A\09movd $6, %mm2 \0A\09movd $7, %mm3 \0A\09punpcklbw %mm1, %mm0 \0A\09punpcklbw %mm3, %mm2 \0A\09movq %mm0, %mm1 \0A\09punpcklwd %mm2, %mm0 \0A\09punpckhwd %mm2, %mm1 \0A\09movd %mm0, $0 \0A\09punpckhdq %mm0, %mm0 \0A\09movd %mm0, $1 \0A\09movd %mm1, $2 \0A\09punpckhdq %mm1, %mm1 \0A\09movd %mm1, $3 \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* null, i32* %tmp164, i32* %tmp169, i32* %tmp174, i32* %tmp177178, i32* %tmp181182, i32* %tmp186187, i32* %tmp191192 ) nounwind
- unreachable
-
-bb292: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll
deleted file mode 100644
index 6615b8c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
- %struct.XX = type <{ i8 }>
- %struct.YY = type { i64 }
- %struct.ZZ = type opaque
-
-define i8 @f(%struct.XX*** %fontMap, %struct.XX* %uen) signext {
-entry:
- %tmp45 = add i16 0, 1 ; <i16> [#uses=2]
- br i1 false, label %bb124, label %bb53
-
-bb53: ; preds = %entry
- %tmp55 = call %struct.YY** @AA( i64 1, %struct.XX* %uen ) ; <%struct.YY**> [#uses=3]
- %tmp2728128 = load %struct.XX** null ; <%struct.XX*> [#uses=1]
- %tmp61 = load %struct.YY** %tmp55, align 8 ; <%struct.YY*> [#uses=1]
- %tmp62 = getelementptr %struct.YY* %tmp61, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp63 = load i64* %tmp62, align 8 ; <i64> [#uses=1]
- %tmp6566 = zext i16 %tmp45 to i64 ; <i64> [#uses=1]
- %tmp67 = shl i64 %tmp6566, 1 ; <i64> [#uses=1]
- call void @BB( %struct.YY** %tmp55, i64 %tmp67, i8 signext 0, %struct.XX* %uen )
- %tmp121131 = icmp eq i16 %tmp45, 1 ; <i1> [#uses=1]
- br i1 %tmp121131, label %bb124, label %bb70.preheader
-
-bb70.preheader: ; preds = %bb53
- %tmp72 = bitcast %struct.XX* %tmp2728128 to %struct.ZZ*** ; <%struct.ZZ***> [#uses=1]
- br label %bb70
-
-bb70: ; preds = %bb119, %bb70.preheader
- %indvar133 = phi i32 [ %indvar.next134, %bb119 ], [ 0, %bb70.preheader ] ; <i32> [#uses=2]
- %tmp.135 = trunc i64 %tmp63 to i32 ; <i32> [#uses=1]
- %tmp136 = shl i32 %indvar133, 1 ; <i32> [#uses=1]
- %DD = add i32 %tmp136, %tmp.135 ; <i32> [#uses=1]
- %tmp73 = load %struct.ZZ*** %tmp72, align 8 ; <%struct.ZZ**> [#uses=0]
- br i1 false, label %bb119, label %bb77
-
-bb77: ; preds = %bb70
- %tmp8384 = trunc i32 %DD to i16 ; <i16> [#uses=1]
- %tmp85 = sub i16 0, %tmp8384 ; <i16> [#uses=1]
- store i16 %tmp85, i16* null, align 8
- call void @CC( %struct.YY** %tmp55, i64 0, i64 2, i8* null, %struct.XX* %uen )
- ret i8 0
-
-bb119: ; preds = %bb70
- %indvar.next134 = add i32 %indvar133, 1 ; <i32> [#uses=1]
- br label %bb70
-
-bb124: ; preds = %bb53, %entry
- ret i8 undef
-}
-
-declare %struct.YY** @AA(i64, %struct.XX*)
-
-declare void @BB(%struct.YY**, i64, i8 signext , %struct.XX*)
-
-declare void @CC(%struct.YY**, i64, i64, i8*, %struct.XX*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-26-AsmDirectMemOp.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-26-AsmDirectMemOp.ll
deleted file mode 100644
index 0b4eb3a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-26-AsmDirectMemOp.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
-
-define void @dct_unquantize_h263_intra_mmx(i16* %block, i32 %n, i32 %qscale) nounwind {
-entry:
- %tmp1 = shl i32 %qscale, 1 ; <i32> [#uses=1]
- br i1 false, label %bb46, label %bb59
-
-bb46: ; preds = %entry
- ret void
-
-bb59: ; preds = %entry
- tail call void asm sideeffect "movd $1, %mm6 \0A\09packssdw %mm6, %mm6 \0A\09packssdw %mm6, %mm6 \0A\09movd $2, %mm5 \0A\09pxor %mm7, %mm7 \0A\09packssdw %mm5, %mm5 \0A\09packssdw %mm5, %mm5 \0A\09psubw %mm5, %mm7 \0A\09pxor %mm4, %mm4 \0A\09.align 1<<4\0A\091: \0A\09movq ($0, $3), %mm0 \0A\09movq 8($0, $3), %mm1 \0A\09pmullw %mm6, %mm0 \0A\09pmullw %mm6, %mm1 \0A\09movq ($0, $3), %mm2 \0A\09movq 8($0, $3), %mm3 \0A\09pcmpgtw %mm4, %mm2 \0A\09pcmpgtw %mm4, %mm3 \0A\09pxor %mm2, %mm0 \0A\09pxor %mm3, %mm1 \0A\09paddw %mm7, %mm0 \0A\09paddw %mm7, %mm1 \0A\09pxor %mm0, %mm2 \0A\09pxor %mm1, %mm3 \0A\09pcmpeqw %mm7, %mm0 \0A\09pcmpeqw %mm7, %mm1 \0A\09pandn %mm2, %mm0 \0A\09pandn %mm3, %mm1 \0A\09movq %mm0, ($0, $3) \0A\09movq %mm1, 8($0, $3) \0A\09add $$16, $3 \0A\09jng 1b \0A\09", "r,imr,imr,r,~{dirflag},~{fpsr},~{flags},~{memory}"( i16* null, i32 %tmp1, i32 0, i32 0 ) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll
deleted file mode 100644
index ad7950c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll
+++ /dev/null
@@ -1,66 +0,0 @@
-; RUN: llc < %s -march=x86
-
- %struct.CompAtom = type <{ %struct.Position, float, i32 }>
- %struct.Lattice = type { %struct.Position, %struct.Position, %struct.Position, %struct.Position, %struct.Position, %struct.Position, %struct.Position, i32, i32, i32 }
- %struct.Position = type { double, double, double }
-
-define fastcc %struct.CompAtom* @_ZNK7Lattice6createEP8CompAtomii(%struct.Lattice* %this, %struct.CompAtom* %d, i32 %n, i32 %i) {
-entry:
- %tmp18 = tail call i8* @_Znam( i32 0 ) ; <i8*> [#uses=1]
- %tmp1819 = bitcast i8* %tmp18 to %struct.CompAtom* ; <%struct.CompAtom*> [#uses=4]
- %tmp3286 = icmp eq i32 %n, 0 ; <i1> [#uses=1]
- br i1 %tmp3286, label %bb35, label %bb24
-
-bb24: ; preds = %bb24, %entry
- %tmp9.0.reg2mem.0.rec = phi i32 [ %indvar.next, %bb24 ], [ 0, %entry ] ; <i32> [#uses=3]
- %tmp3.i.i = getelementptr %struct.CompAtom* %tmp1819, i32 %tmp9.0.reg2mem.0.rec, i32 0, i32 1 ; <double*> [#uses=0]
- %tmp5.i.i = getelementptr %struct.CompAtom* %tmp1819, i32 %tmp9.0.reg2mem.0.rec, i32 0, i32 2 ; <double*> [#uses=1]
- store double -9.999900e+04, double* %tmp5.i.i, align 4
- %indvar.next = add i32 %tmp9.0.reg2mem.0.rec, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %n ; <i1> [#uses=1]
- br i1 %exitcond, label %bb35, label %bb24
-
-bb35: ; preds = %bb24, %entry
- %tmp42 = sdiv i32 %i, 9 ; <i32> [#uses=1]
- %tmp43 = add i32 %tmp42, -1 ; <i32> [#uses=1]
- %tmp4344 = sitofp i32 %tmp43 to double ; <double> [#uses=1]
- %tmp17.i76 = fmul double %tmp4344, 0.000000e+00 ; <double> [#uses=1]
- %tmp48 = sdiv i32 %i, 3 ; <i32> [#uses=1]
- %tmp49 = srem i32 %tmp48, 3 ; <i32> [#uses=1]
- %tmp50 = add i32 %tmp49, -1 ; <i32> [#uses=1]
- %tmp5051 = sitofp i32 %tmp50 to double ; <double> [#uses=1]
- %tmp17.i63 = fmul double %tmp5051, 0.000000e+00 ; <double> [#uses=1]
- %tmp55 = srem i32 %i, 3 ; <i32> [#uses=1]
- %tmp56 = add i32 %tmp55, -1 ; <i32> [#uses=1]
- %tmp5657 = sitofp i32 %tmp56 to double ; <double> [#uses=1]
- %tmp15.i49 = getelementptr %struct.Lattice* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp16.i50 = load double* %tmp15.i49, align 4 ; <double> [#uses=1]
- %tmp17.i = fmul double %tmp5657, %tmp16.i50 ; <double> [#uses=1]
- %tmp20.i39 = fadd double %tmp17.i, %tmp17.i63 ; <double> [#uses=1]
- %tmp20.i23 = fadd double %tmp20.i39, %tmp17.i76 ; <double> [#uses=1]
- br i1 false, label %bb58.preheader, label %bb81
-
-bb58.preheader: ; preds = %bb35
- %smax = select i1 false, i32 1, i32 %n ; <i32> [#uses=1]
- br label %bb58
-
-bb58: ; preds = %bb58, %bb58.preheader
- %tmp20.i7 = getelementptr %struct.CompAtom* %d, i32 0, i32 2 ; <i32*> [#uses=2]
- %tmp25.i = getelementptr %struct.CompAtom* %tmp1819, i32 0, i32 2 ; <i32*> [#uses=2]
- %tmp74.i = load i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
- %tmp82.i = and i32 %tmp74.i, 134217728 ; <i32> [#uses=1]
- %tmp85.i = or i32 0, %tmp82.i ; <i32> [#uses=1]
- store i32 %tmp85.i, i32* %tmp25.i, align 1
- %tmp88.i = load i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
- %tmp95.i = and i32 %tmp88.i, -268435456 ; <i32> [#uses=1]
- %tmp97.i = or i32 0, %tmp95.i ; <i32> [#uses=1]
- store i32 %tmp97.i, i32* %tmp25.i, align 1
- %tmp6.i = fadd double 0.000000e+00, %tmp20.i23 ; <double> [#uses=0]
- %exitcond96 = icmp eq i32 0, %smax ; <i1> [#uses=1]
- br i1 %exitcond96, label %bb81, label %bb58
-
-bb81: ; preds = %bb58, %bb35
- ret %struct.CompAtom* %tmp1819
-}
-
-declare i8* @_Znam(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-27-PEICrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-02-27-PEICrash.ll
deleted file mode 100644
index d842967..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-02-27-PEICrash.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define i64 @__divsc3(float %a, float %b, float %c, float %d) nounwind readnone {
-entry:
- br i1 false, label %bb56, label %bb33
-
-bb33: ; preds = %entry
- br label %bb56
-
-bb56: ; preds = %bb33, %entry
- %tmp36.pn = phi float [ 0.000000e+00, %bb33 ], [ 0.000000e+00, %entry ] ; <float> [#uses=1]
- %b.pn509 = phi float [ %b, %bb33 ], [ %a, %entry ] ; <float> [#uses=1]
- %a.pn = phi float [ %a, %bb33 ], [ %b, %entry ] ; <float> [#uses=1]
- %tmp41.pn508 = phi float [ 0.000000e+00, %bb33 ], [ 0.000000e+00, %entry ] ; <float> [#uses=1]
- %tmp51.pn = phi float [ 0.000000e+00, %bb33 ], [ %a, %entry ] ; <float> [#uses=1]
- %tmp44.pn = fmul float %tmp36.pn, %b.pn509 ; <float> [#uses=1]
- %tmp46.pn = fadd float %tmp44.pn, %a.pn ; <float> [#uses=1]
- %tmp53.pn = fsub float 0.000000e+00, %tmp51.pn ; <float> [#uses=1]
- %x.0 = fdiv float %tmp46.pn, %tmp41.pn508 ; <float> [#uses=1]
- %y.0 = fdiv float %tmp53.pn, 0.000000e+00 ; <float> [#uses=1]
- br i1 false, label %bb433, label %bb98
-
-bb98: ; preds = %bb56
- %tmp102 = fmul float 0.000000e+00, %a ; <float> [#uses=1]
- %tmp106 = fmul float 0.000000e+00, %b ; <float> [#uses=1]
- br label %bb433
-
-bb433: ; preds = %bb98, %bb56
- %x.1 = phi float [ %tmp102, %bb98 ], [ %x.0, %bb56 ] ; <float> [#uses=0]
- %y.1 = phi float [ %tmp106, %bb98 ], [ %y.0, %bb56 ] ; <float> [#uses=1]
- %tmp460 = fadd float %y.1, 0.000000e+00 ; <float> [#uses=0]
- ret i64 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-06-frem-fpstack.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-06-frem-fpstack.ll
deleted file mode 100644
index 70a83b5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-06-frem-fpstack.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=i386
-; PR2122
-define float @func(float %a, float %b) nounwind {
-entry:
- %tmp3 = frem float %a, %b ; <float> [#uses=1]
- ret float %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll
deleted file mode 100644
index 84e4827..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll
+++ /dev/null
@@ -1,94 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=i386 | not grep 255
-
- %struct.CONSTRAINT = type { i32, i32, i32, i32 }
- %struct.FIRST_UNION = type { %struct.anon }
- %struct.FOURTH_UNION = type { %struct.CONSTRAINT }
- %struct.LIST = type { %struct.rec*, %struct.rec* }
- %struct.SECOND_UNION = type { { i16, i8, i8 } }
- %struct.THIRD_UNION = type { { [2 x i32], [2 x i32] } }
- %struct.anon = type { i8, i8, i32 }
- %struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, %struct.rec*, { %struct.rec* }, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i32 }
- %struct.rec = type { %struct.head_type }
- %struct.symbol_type = type <{ [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i16, i16, i8, i8, i8, i8 }>
- %struct.word_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, [4 x i8] }
-
-define void @InsertSym_bb1163(%struct.rec** %s) {
-newFuncRoot:
- br label %bb1163
-bb1233.exitStub: ; preds = %bb1163
- ret void
-bb1163: ; preds = %newFuncRoot
- %tmp1164 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
- %tmp1165 = getelementptr %struct.rec* %tmp1164, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
- %tmp11651166 = bitcast %struct.head_type* %tmp1165 to %struct.symbol_type* ; <%struct.symbol_type*> [#uses=1]
- %tmp1167 = getelementptr %struct.symbol_type* %tmp11651166, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
- %tmp1168 = load %struct.rec** %tmp1167, align 1 ; <%struct.rec*> [#uses=2]
- %tmp1169 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
- %tmp1170 = getelementptr %struct.rec* %tmp1169, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
- %tmp11701171 = bitcast %struct.head_type* %tmp1170 to %struct.symbol_type* ; <%struct.symbol_type*> [#uses=1]
- %tmp1172 = getelementptr %struct.symbol_type* %tmp11701171, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
- %tmp1173 = load %struct.rec** %tmp1172, align 1 ; <%struct.rec*> [#uses=2]
- %tmp1174 = getelementptr %struct.rec* %tmp1173, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
- %tmp11741175 = bitcast %struct.head_type* %tmp1174 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1176 = getelementptr %struct.word_type* %tmp11741175, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1177 = getelementptr %struct.SECOND_UNION* %tmp1176, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
- %tmp11771178 = bitcast { i16, i8, i8 }* %tmp1177 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1179 = getelementptr <{ i8, i8, i8, i8 }>* %tmp11771178, i32 0, i32 2 ; <i8*> [#uses=2]
- %mask1180 = and i8 1, 1 ; <i8> [#uses=2]
- %tmp1181 = load i8* %tmp1179, align 1 ; <i8> [#uses=1]
- %tmp1182 = shl i8 %mask1180, 7 ; <i8> [#uses=1]
- %tmp1183 = and i8 %tmp1181, 127 ; <i8> [#uses=1]
- %tmp1184 = or i8 %tmp1183, %tmp1182 ; <i8> [#uses=1]
- store i8 %tmp1184, i8* %tmp1179, align 1
- %mask1185 = and i8 %mask1180, 1 ; <i8> [#uses=0]
- %tmp1186 = getelementptr %struct.rec* %tmp1173, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
- %tmp11861187 = bitcast %struct.head_type* %tmp1186 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1188 = getelementptr %struct.word_type* %tmp11861187, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1189 = getelementptr %struct.SECOND_UNION* %tmp1188, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
- %tmp11891190 = bitcast { i16, i8, i8 }* %tmp1189 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1191 = getelementptr <{ i8, i8, i8, i8 }>* %tmp11891190, i32 0, i32 2 ; <i8*> [#uses=1]
- %tmp1192 = load i8* %tmp1191, align 1 ; <i8> [#uses=1]
- %tmp1193 = lshr i8 %tmp1192, 7 ; <i8> [#uses=1]
- %mask1194 = and i8 %tmp1193, 1 ; <i8> [#uses=2]
- %mask1195 = and i8 %mask1194, 1 ; <i8> [#uses=0]
- %tmp1196 = getelementptr %struct.rec* %tmp1168, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
- %tmp11961197 = bitcast %struct.head_type* %tmp1196 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1198 = getelementptr %struct.word_type* %tmp11961197, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1199 = getelementptr %struct.SECOND_UNION* %tmp1198, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
- %tmp11991200 = bitcast { i16, i8, i8 }* %tmp1199 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1201 = getelementptr <{ i8, i8, i8, i8 }>* %tmp11991200, i32 0, i32 1 ; <i8*> [#uses=2]
- %mask1202 = and i8 %mask1194, 1 ; <i8> [#uses=2]
- %tmp1203 = load i8* %tmp1201, align 1 ; <i8> [#uses=1]
- %tmp1204 = shl i8 %mask1202, 1 ; <i8> [#uses=1]
- %tmp1205 = and i8 %tmp1204, 2 ; <i8> [#uses=1]
- %tmp1206 = and i8 %tmp1203, -3 ; <i8> [#uses=1]
- %tmp1207 = or i8 %tmp1206, %tmp1205 ; <i8> [#uses=1]
- store i8 %tmp1207, i8* %tmp1201, align 1
- %mask1208 = and i8 %mask1202, 1 ; <i8> [#uses=0]
- %tmp1209 = getelementptr %struct.rec* %tmp1168, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
- %tmp12091210 = bitcast %struct.head_type* %tmp1209 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1211 = getelementptr %struct.word_type* %tmp12091210, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1212 = getelementptr %struct.SECOND_UNION* %tmp1211, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
- %tmp12121213 = bitcast { i16, i8, i8 }* %tmp1212 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1214 = getelementptr <{ i8, i8, i8, i8 }>* %tmp12121213, i32 0, i32 1 ; <i8*> [#uses=1]
- %tmp1215 = load i8* %tmp1214, align 1 ; <i8> [#uses=1]
- %tmp1216 = shl i8 %tmp1215, 6 ; <i8> [#uses=1]
- %tmp1217 = lshr i8 %tmp1216, 7 ; <i8> [#uses=1]
- %mask1218 = and i8 %tmp1217, 1 ; <i8> [#uses=2]
- %mask1219 = and i8 %mask1218, 1 ; <i8> [#uses=0]
- %tmp1220 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
- %tmp1221 = getelementptr %struct.rec* %tmp1220, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
- %tmp12211222 = bitcast %struct.head_type* %tmp1221 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1223 = getelementptr %struct.word_type* %tmp12211222, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1224 = getelementptr %struct.SECOND_UNION* %tmp1223, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
- %tmp12241225 = bitcast { i16, i8, i8 }* %tmp1224 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1226 = getelementptr <{ i8, i8, i8, i8 }>* %tmp12241225, i32 0, i32 1 ; <i8*> [#uses=2]
- %mask1227 = and i8 %mask1218, 1 ; <i8> [#uses=2]
- %tmp1228 = load i8* %tmp1226, align 1 ; <i8> [#uses=1]
- %tmp1229 = and i8 %mask1227, 1 ; <i8> [#uses=1]
- %tmp1230 = and i8 %tmp1228, -2 ; <i8> [#uses=1]
- %tmp1231 = or i8 %tmp1230, %tmp1229 ; <i8> [#uses=1]
- store i8 %tmp1231, i8* %tmp1226, align 1
- %mask1232 = and i8 %mask1227, 1 ; <i8> [#uses=0]
- br label %bb1233.exitStub
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
deleted file mode 100644
index cd2d609..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu -relocation-model=pic -disable-fp-elim
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu -relocation-model=pic -disable-fp-elim -schedule-livein-copies | not grep {Number of register spills}
-; PR2134
-
-declare fastcc i8* @w_addchar(i8*, i32*, i32*, i8 signext ) nounwind
-
-define x86_stdcallcc i32 @parse_backslash(i8** inreg %word, i32* inreg %word_length, i32* inreg %max_length) nounwind {
-entry:
- %tmp6 = load i8* null, align 1 ; <i8> [#uses=1]
- br label %bb13
-bb13: ; preds = %entry
- %tmp26 = call fastcc i8* @w_addchar( i8* null, i32* %word_length, i32* %max_length, i8 signext %tmp6 ) nounwind ; <i8*> [#uses=1]
- store i8* %tmp26, i8** %word, align 4
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
deleted file mode 100644
index e673d31..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -relocation-model=pic | grep TLSGD | count 2
-; PR2137
-
-; ModuleID = '1.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
- %struct.__res_state = type { i32 }
- at __resp = thread_local global %struct.__res_state* @_res ; <%struct.__res_state**> [#uses=1]
- at _res = global %struct.__res_state zeroinitializer, section ".bss" ; <%struct.__res_state*> [#uses=1]
-
- at __libc_resp = hidden alias %struct.__res_state** @__resp ; <%struct.__res_state**> [#uses=2]
-
-define i32 @foo() {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
- %tmp1 = getelementptr %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 0, i32* %tmp1, align 4
- br label %return
-return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval2
-}
-
-define i32 @bar() {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
- %tmp1 = getelementptr %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
- store i32 1, i32* %tmp1, align 4
- br label %return
-return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-13-TwoAddrPassCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-13-TwoAddrPassCrash.ll
deleted file mode 100644
index c6ba22e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-13-TwoAddrPassCrash.ll
+++ /dev/null
@@ -1,68 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i16 @t(i32 %depth) signext nounwind {
-entry:
- br i1 false, label %bb74, label %bb
-bb: ; preds = %entry
- ret i16 0
-bb74: ; preds = %entry
- switch i32 0, label %bail [
- i32 17, label %bb84
- i32 18, label %bb81
- i32 33, label %bb80
- i32 34, label %bb84
- ]
-bb80: ; preds = %bb74
- switch i32 %depth, label %bb103 [
- i32 16, label %bb96
- i32 32, label %bb91
- i32 846624121, label %bb96
- i32 1094862674, label %bb91
- i32 1096368963, label %bb91
- i32 1111970369, label %bb91
- i32 1278555445, label %bb96
- i32 1278555701, label %bb96
- i32 1380401729, label %bb91
- i32 1668118891, label %bb91
- i32 1916022840, label %bb91
- i32 1983131704, label %bb91
- i32 2037741171, label %bb96
- i32 2037741173, label %bb96
- ]
-bb81: ; preds = %bb74
- ret i16 0
-bb84: ; preds = %bb74, %bb74
- switch i32 %depth, label %bb103 [
- i32 16, label %bb96
- i32 32, label %bb91
- i32 846624121, label %bb96
- i32 1094862674, label %bb91
- i32 1096368963, label %bb91
- i32 1111970369, label %bb91
- i32 1278555445, label %bb96
- i32 1278555701, label %bb96
- i32 1380401729, label %bb91
- i32 1668118891, label %bb91
- i32 1916022840, label %bb91
- i32 1983131704, label %bb91
- i32 2037741171, label %bb96
- i32 2037741173, label %bb96
- ]
-bb91: ; preds = %bb84, %bb84, %bb84, %bb84, %bb84, %bb84, %bb84, %bb84, %bb80, %bb80, %bb80, %bb80, %bb80, %bb80, %bb80, %bb80
- %wMB.0.reg2mem.0 = phi i16 [ 16, %bb80 ], [ 16, %bb80 ], [ 16, %bb80 ], [ 16, %bb80 ], [ 16, %bb80 ], [ 16, %bb80 ], [ 16, %bb80 ], [ 16, %bb80 ], [ 0, %bb84 ], [ 0, %bb84 ], [ 0, %bb84 ], [ 0, %bb84 ], [ 0, %bb84 ], [ 0, %bb84 ], [ 0, %bb84 ], [ 0, %bb84 ] ; <i16> [#uses=2]
- %tmp941478 = shl i16 %wMB.0.reg2mem.0, 2 ; <i16> [#uses=1]
- br label %bb103
-bb96: ; preds = %bb84, %bb84, %bb84, %bb84, %bb84, %bb84, %bb80, %bb80, %bb80, %bb80, %bb80, %bb80
- ret i16 0
-bb103: ; preds = %bb91, %bb84, %bb80
- %wMB.0.reg2mem.2 = phi i16 [ %wMB.0.reg2mem.0, %bb91 ], [ 16, %bb80 ], [ 0, %bb84 ] ; <i16> [#uses=1]
- %bBump.0 = phi i16 [ %tmp941478, %bb91 ], [ 16, %bb80 ], [ 0, %bb84 ] ; <i16> [#uses=0]
- br i1 false, label %bb164, label %UnifiedReturnBlock
-bb164: ; preds = %bb103
- %tmp167168 = sext i16 %wMB.0.reg2mem.2 to i32 ; <i32> [#uses=0]
- ret i16 0
-bail: ; preds = %bb74
- ret i16 0
-UnifiedReturnBlock: ; preds = %bb103
- ret i16 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-14-SpillerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
deleted file mode 100644
index 8946415..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu
-; PR2138
-
- %struct.__locale_struct = type { [13 x %struct.locale_data*], i16*, i32*, i32*, [13 x i8*] }
- %struct.anon = type { i8* }
- %struct.locale_data = type { i8*, i8*, i32, i32, { void (%struct.locale_data*)*, %struct.anon }, i32, i32, i32, [0 x %struct.locale_data_value] }
- %struct.locale_data_value = type { i32* }
-
- at wcstoll_l = alias i64 (i32*, i32**, i32, %struct.__locale_struct*)* @__wcstoll_l ; <i64 (i32*, i32**, i32, %struct.__locale_struct*)*> [#uses=0]
-
-define i64 @____wcstoll_l_internal(i32* %nptr, i32** %endptr, i32 %base, i32 %group, %struct.__locale_struct* %loc) nounwind {
-entry:
- %tmp27 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp83 = getelementptr i32* %nptr, i32 1 ; <i32*> [#uses=1]
- %tmp233 = add i32 0, -48 ; <i32> [#uses=1]
- br label %bb271.us
-bb271.us: ; preds = %entry
- br label %bb374.outer
-bb311.split: ; preds = %bb305.us
- %tmp313 = add i32 %tmp378.us, -48 ; <i32> [#uses=1]
- br i1 false, label %bb374.outer, label %bb383
-bb327.split: ; preds = %bb314.us
- ret i64 0
-bb374.outer: ; preds = %bb311.split, %bb271.us
- %tmp370371552.pn.in = phi i32 [ %tmp233, %bb271.us ], [ %tmp313, %bb311.split ] ; <i32> [#uses=1]
- %tmp278279.pn = phi i64 [ 0, %bb271.us ], [ %tmp373.reg2mem.0.ph, %bb311.split ] ; <i64> [#uses=1]
- %s.5.ph = phi i32* [ null, %bb271.us ], [ %tmp376.us, %bb311.split ] ; <i32*> [#uses=1]
- %tmp366367550.pn = sext i32 %base to i64 ; <i64> [#uses=1]
- %tmp370371552.pn = zext i32 %tmp370371552.pn.in to i64 ; <i64> [#uses=1]
- %tmp369551.pn = mul i64 %tmp278279.pn, %tmp366367550.pn ; <i64> [#uses=1]
- %tmp373.reg2mem.0.ph = add i64 %tmp370371552.pn, %tmp369551.pn ; <i64> [#uses=1]
- br label %bb374.us
-bb374.us: ; preds = %bb314.us, %bb374.outer
- %tmp376.us = getelementptr i32* %s.5.ph, i32 0 ; <i32*> [#uses=3]
- %tmp378.us = load i32* %tmp376.us, align 4 ; <i32> [#uses=2]
- %tmp302.us = icmp eq i32* %tmp376.us, %tmp83 ; <i1> [#uses=1]
- %bothcond484.us = or i1 false, %tmp302.us ; <i1> [#uses=1]
- br i1 %bothcond484.us, label %bb383, label %bb305.us
-bb305.us: ; preds = %bb374.us
- br i1 false, label %bb311.split, label %bb314.us
-bb314.us: ; preds = %bb305.us
- %tmp320.us = icmp eq i32 %tmp378.us, %tmp27 ; <i1> [#uses=1]
- br i1 %tmp320.us, label %bb374.us, label %bb327.split
-bb383: ; preds = %bb374.us, %bb311.split
- ret i64 0
-}
-
-declare i64 @__wcstoll_l(i32*, i32**, i32, %struct.__locale_struct*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-18-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-18-CoalescerBug.ll
deleted file mode 100644
index ccc4d75..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-18-CoalescerBug.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 -disable-fp-elim | grep movss | count 1
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 -disable-fp-elim -stats |& grep {Number of re-materialization} | grep 1
-
- %struct..0objc_object = type opaque
- %struct.OhBoy = type { }
- %struct.BooHoo = type { i32 }
- %struct.objc_selector = type opaque
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (void (%struct.OhBoy*, %struct.objc_selector*, i32, %struct.BooHoo*)* @"-[MessageHeaderDisplay adjustFontSizeBy:viewingState:]" to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define void @"-[MessageHeaderDisplay adjustFontSizeBy:viewingState:]"(%struct.OhBoy* %self, %struct.objc_selector* %_cmd, i32 %delta, %struct.BooHoo* %viewingState) nounwind {
-entry:
- %tmp19 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp24 = tail call float bitcast (void (%struct..0objc_object*, ...)* @objc_msgSend_fpret to float (%struct..0objc_object*, %struct.objc_selector*)*)( %struct..0objc_object* null, %struct.objc_selector* null ) nounwind ; <float> [#uses=2]
- %tmp30 = icmp sgt i32 %delta, 0 ; <i1> [#uses=1]
- br i1 %tmp30, label %bb33, label %bb87.preheader
-bb33: ; preds = %entry
- %tmp28 = fadd float 0.000000e+00, %tmp24 ; <float> [#uses=1]
- %tmp35 = fcmp ogt float %tmp28, 1.800000e+01 ; <i1> [#uses=1]
- br i1 %tmp35, label %bb38, label %bb87.preheader
-bb38: ; preds = %bb33
- %tmp53 = add i32 %tmp19, %delta ; <i32> [#uses=2]
- br i1 false, label %bb50, label %bb43
-bb43: ; preds = %bb38
- store i32 %tmp53, i32* null, align 4
- ret void
-bb50: ; preds = %bb38
- %tmp56 = fsub float 1.800000e+01, %tmp24 ; <float> [#uses=1]
- %tmp57 = fcmp ugt float 0.000000e+00, %tmp56 ; <i1> [#uses=1]
- br i1 %tmp57, label %bb64, label %bb87.preheader
-bb64: ; preds = %bb50
- ret void
-bb87.preheader: ; preds = %bb50, %bb33, %entry
- %usableDelta.0 = phi i32 [ %delta, %entry ], [ %delta, %bb33 ], [ %tmp53, %bb50 ] ; <i32> [#uses=1]
- %tmp100 = tail call %struct..0objc_object* (%struct..0objc_object*, %struct.objc_selector*, ...)* @objc_msgSend( %struct..0objc_object* null, %struct.objc_selector* null, %struct..0objc_object* null ) nounwind ; <%struct..0objc_object*> [#uses=2]
- %tmp106 = tail call %struct..0objc_object* (%struct..0objc_object*, %struct.objc_selector*, ...)* @objc_msgSend( %struct..0objc_object* %tmp100, %struct.objc_selector* null ) nounwind ; <%struct..0objc_object*> [#uses=0]
- %umax = select i1 false, i32 1, i32 0 ; <i32> [#uses=1]
- br label %bb108
-bb108: ; preds = %bb108, %bb87.preheader
- %attachmentIndex.0.reg2mem.0 = phi i32 [ 0, %bb87.preheader ], [ %indvar.next, %bb108 ] ; <i32> [#uses=2]
- %tmp114 = tail call %struct..0objc_object* (%struct..0objc_object*, %struct.objc_selector*, ...)* @objc_msgSend( %struct..0objc_object* %tmp100, %struct.objc_selector* null, i32 %attachmentIndex.0.reg2mem.0 ) nounwind ; <%struct..0objc_object*> [#uses=1]
- %tmp121 = tail call %struct..0objc_object* (%struct..0objc_object*, %struct.objc_selector*, ...)* @objc_msgSend( %struct..0objc_object* %tmp114, %struct.objc_selector* null, i32 %usableDelta.0 ) nounwind ; <%struct..0objc_object*> [#uses=0]
- %indvar.next = add i32 %attachmentIndex.0.reg2mem.0, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %umax ; <i1> [#uses=1]
- br i1 %exitcond, label %bb130, label %bb108
-bb130: ; preds = %bb108
- ret void
-}
-
-declare %struct..0objc_object* @objc_msgSend(%struct..0objc_object*, %struct.objc_selector*, ...)
-
-declare void @objc_msgSend_fpret(%struct..0objc_object*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-19-DAGCombinerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-19-DAGCombinerBug.ll
deleted file mode 100644
index eaa883c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-19-DAGCombinerBug.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i32 @t() nounwind {
-entry:
- %tmp54 = add i32 0, 1 ; <i32> [#uses=1]
- br i1 false, label %bb71, label %bb77
-bb71: ; preds = %entry
- %tmp74 = shl i32 %tmp54, 1 ; <i32> [#uses=1]
- %tmp76 = ashr i32 %tmp74, 3 ; <i32> [#uses=1]
- br label %bb77
-bb77: ; preds = %bb71, %entry
- %payLoadSize.0 = phi i32 [ %tmp76, %bb71 ], [ 0, %entry ] ; <i32> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll
deleted file mode 100644
index 4dc3a10..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -asm-verbose | grep {#} | not grep -v {##}
-
- %struct.AGenericCall = type { %struct.AGenericManager*, %struct.ComponentParameters*, i32* }
- %struct.AGenericManager = type <{ i8 }>
- %struct.ComponentInstanceRecord = type opaque
- %struct.ComponentParameters = type { [1 x i64] }
-
-define i32 @_ZN12AGenericCall10MapIDPtrAtEsRP23ComponentInstanceRecord(%struct.AGenericCall* %this, i16 signext %param, %struct.ComponentInstanceRecord** %instance) {
-entry:
- %tmp4 = icmp slt i16 %param, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %cond_true, label %cond_next
-
-cond_true: ; preds = %entry
- %tmp1415 = shl i16 %param, 3 ; <i16> [#uses=1]
- %tmp17 = getelementptr %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp18 = load %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
- %tmp1920 = bitcast %struct.ComponentParameters* %tmp18 to i8* ; <i8*> [#uses=1]
- %tmp212223 = sext i16 %tmp1415 to i64 ; <i64> [#uses=1]
- %tmp24 = getelementptr i8* %tmp1920, i64 %tmp212223 ; <i8*> [#uses=1]
- %tmp2425 = bitcast i8* %tmp24 to i64* ; <i64*> [#uses=1]
- %tmp28 = load i64* %tmp2425, align 8 ; <i64> [#uses=1]
- %tmp2829 = inttoptr i64 %tmp28 to i32* ; <i32*> [#uses=1]
- %tmp31 = getelementptr %struct.AGenericCall* %this, i32 0, i32 2 ; <i32**> [#uses=1]
- store i32* %tmp2829, i32** %tmp31, align 8
- br label %cond_next
-
-cond_next: ; preds = %cond_true, %entry
- %tmp4243 = shl i16 %param, 3 ; <i16> [#uses=1]
- %tmp46 = getelementptr %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp47 = load %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
- %tmp4849 = bitcast %struct.ComponentParameters* %tmp47 to i8* ; <i8*> [#uses=1]
- %tmp505152 = sext i16 %tmp4243 to i64 ; <i64> [#uses=1]
- %tmp53 = getelementptr i8* %tmp4849, i64 %tmp505152 ; <i8*> [#uses=1]
- %tmp5354 = bitcast i8* %tmp53 to i64* ; <i64*> [#uses=1]
- %tmp58 = load i64* %tmp5354, align 8 ; <i64> [#uses=1]
- %tmp59 = icmp eq i64 %tmp58, 0 ; <i1> [#uses=1]
- br i1 %tmp59, label %UnifiedReturnBlock, label %cond_true63
-
-cond_true63: ; preds = %cond_next
- %tmp65 = getelementptr %struct.AGenericCall* %this, i32 0, i32 0 ; <%struct.AGenericManager**> [#uses=1]
- %tmp66 = load %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
- %tmp69 = tail call i32 @_ZN15AGenericManager24DefaultComponentInstanceERP23ComponentInstanceRecord( %struct.AGenericManager* %tmp66, %struct.ComponentInstanceRecord** %instance ) ; <i32> [#uses=1]
- ret i32 %tmp69
-
-UnifiedReturnBlock: ; preds = %cond_next
- ret i32 undef
-}
-
-declare i32 @_ZN15AGenericManager24DefaultComponentInstanceERP23ComponentInstanceRecord(%struct.AGenericManager*, %struct.ComponentInstanceRecord**)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-25-TwoAddrPassBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-25-TwoAddrPassBug.ll
deleted file mode 100644
index 2d868e0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-25-TwoAddrPassBug.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define void @t() {
-entry:
- %tmp455 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> < i32 1, i32 0, i32 3, i32 2 > ; <<4 x float>> [#uses=1]
- %tmp457 = fmul <4 x float> zeroinitializer, %tmp455 ; <<4 x float>> [#uses=2]
- %tmp461 = shufflevector <4 x float> %tmp457, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
- %tmp465 = shufflevector <4 x float> %tmp457, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>> [#uses=1]
- %tmp466 = fsub <4 x float> %tmp461, %tmp465 ; <<4 x float>> [#uses=1]
- %tmp536 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp466, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1]
- %tmp542 = shufflevector <4 x float> %tmp536, <4 x float> zeroinitializer, <4 x i32> < i32 6, i32 7, i32 2, i32 3 > ; <<4 x float>> [#uses=1]
- %tmp580 = bitcast <4 x float> %tmp542 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp582 = and <4 x i32> %tmp580, zeroinitializer ; <<4 x i32>> [#uses=1]
- %tmp591 = or <4 x i32> %tmp582, zeroinitializer ; <<4 x i32>> [#uses=1]
- %tmp592 = bitcast <4 x i32> %tmp591 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp609 = fdiv <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, %tmp592 ; <<4 x float>> [#uses=1]
- %tmp652 = shufflevector <4 x float> %tmp609, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>> [#uses=1]
- %tmp662 = fmul <4 x float> zeroinitializer, %tmp652 ; <<4 x float>> [#uses=1]
- %tmp678 = shufflevector <4 x float> %tmp662, <4 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ; <<4 x float>> [#uses=1]
- %tmp753 = fmul <4 x float> zeroinitializer, %tmp678 ; <<4 x float>> [#uses=1]
- %tmp754 = fsub <4 x float> zeroinitializer, %tmp753 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp754, <4 x float>* null, align 16
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
deleted file mode 100644
index 305968a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -disable-fp-elim | grep add | grep 12 | not grep non_lazy_ptr
-; Don't fold re-materialized load into a two address instruction
-
- %"struct.Smarts::Runnable" = type { i32 (...)**, i32 }
- %struct.__sbuf = type { i8*, i32 }
- %"struct.std::ios_base" = type { i32 (...)**, i32, i32, i32, i32, i32, %"struct.std::ios_base::_Callback_list"*, %struct.__sbuf, [8 x %struct.__sbuf], i32, %struct.__sbuf*, %"struct.std::locale" }
- %"struct.std::ios_base::_Callback_list" = type { %"struct.std::ios_base::_Callback_list"*, void (i32, %"struct.std::ios_base"*, i32)*, i32, i32 }
- %"struct.std::locale" = type { %"struct.std::locale::_Impl"* }
- %"struct.std::locale::_Impl" = type { i32, %"struct.Smarts::Runnable"**, i32, %"struct.Smarts::Runnable"**, i8** }
- at _ZTVSt9basic_iosIcSt11char_traitsIcEE = external constant [4 x i32 (...)*] ; <[4 x i32 (...)*]*> [#uses=1]
- at _ZTTSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE = external constant [4 x i8*] ; <[4 x i8*]*> [#uses=1]
- at _ZTVSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE = external constant [10 x i32 (...)*] ; <[10 x i32 (...)*]*> [#uses=2]
- at _ZTVSt15basic_streambufIcSt11char_traitsIcEE = external constant [16 x i32 (...)*] ; <[16 x i32 (...)*]*> [#uses=1]
- at _ZTVSt15basic_stringbufIcSt11char_traitsIcESaIcEE = external constant [16 x i32 (...)*] ; <[16 x i32 (...)*]*> [#uses=1]
-
-define void @_GLOBAL__I__ZN5Pooma5pinfoE() nounwind {
-entry:
- store i32 (...)** getelementptr ([10 x i32 (...)*]* @_ZTVSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE, i32 0, i32 8), i32 (...)*** null, align 4
- %tmp96.i.i142.i = call i8* @_Znwm( i32 180 ) nounwind ; <i8*> [#uses=2]
- call void @_ZNSt8ios_baseC2Ev( %"struct.std::ios_base"* null ) nounwind
- store i32 (...)** getelementptr ([4 x i32 (...)*]* @_ZTVSt9basic_iosIcSt11char_traitsIcEE, i32 0, i32 2), i32 (...)*** null, align 4
- store i32 (...)** null, i32 (...)*** null, align 4
- %ctg2242.i.i163.i = getelementptr i8* %tmp96.i.i142.i, i32 0 ; <i8*> [#uses=1]
- %tmp150.i.i164.i = load i8** getelementptr ([4 x i8*]* @_ZTTSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE, i32 0, i64 2), align 4 ; <i8*> [#uses=1]
- %tmp150151.i.i165.i = bitcast i8* %tmp150.i.i164.i to i32 (...)** ; <i32 (...)**> [#uses=1]
- %tmp153.i.i166.i = bitcast i8* %ctg2242.i.i163.i to i32 (...)*** ; <i32 (...)***> [#uses=1]
- store i32 (...)** %tmp150151.i.i165.i, i32 (...)*** %tmp153.i.i166.i, align 4
- %tmp159.i.i167.i = bitcast i8* %tmp96.i.i142.i to i32 (...)*** ; <i32 (...)***> [#uses=1]
- store i32 (...)** getelementptr ([10 x i32 (...)*]* @_ZTVSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE, i32 0, i32 3), i32 (...)*** %tmp159.i.i167.i, align 4
- store i32 (...)** getelementptr ([16 x i32 (...)*]* @_ZTVSt15basic_streambufIcSt11char_traitsIcEE, i32 0, i32 2), i32 (...)*** null, align 4
- call void @_ZNSt6localeC1Ev( %"struct.std::locale"* null ) nounwind
- store i32 (...)** getelementptr ([16 x i32 (...)*]* @_ZTVSt15basic_stringbufIcSt11char_traitsIcESaIcEE, i32 0, i32 2), i32 (...)*** null, align 4
- unreachable
-}
-
-declare i8* @_Znwm(i32)
-
-declare void @_ZNSt8ios_baseC2Ev(%"struct.std::ios_base"*)
-
-declare void @_ZNSt6localeC1Ev(%"struct.std::locale"*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-02-unnamedEH.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-02-unnamedEH.ll
deleted file mode 100644
index 27bbbaa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-02-unnamedEH.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-
-define void @_Z3bazv() {
- call void @0( ) ; <i32>:1 [#uses=0]
- ret void
-}
-
-define internal void @""() {
- call i32 @_Z3barv( ) ; <i32>:4 [#uses=1]
- ret void
-}
-; CHECK: unnamed_1.eh
-
-declare i32 @_Z3barv()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll
deleted file mode 100644
index dc8c097..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-08-CoalescerCrash.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx
-
-define i32 @t2() nounwind {
-entry:
- tail call void asm sideeffect "# top of block", "~{dirflag},~{fpsr},~{flags},~{di},~{si},~{dx},~{cx},~{ax}"( ) nounwind
- tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
- tail call void asm sideeffect ".line 8", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
- %tmp1 = tail call <2 x i32> asm sideeffect "movd $1, $0", "=={mm4},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( i32 undef ) nounwind ; <<2 x i32>> [#uses=1]
- tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
- tail call void asm sideeffect ".line 9", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
- %tmp3 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm3},~{dirflag},~{fpsr},~{flags},~{memory}"( <2 x i32> undef ) nounwind ; <i32> [#uses=1]
- tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
- tail call void asm sideeffect ".line 10", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
- tail call void asm sideeffect "movntq $0, 0($1,$2)", "{mm0},{di},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( <2 x i32> undef, i32 undef, i32 %tmp3 ) nounwind
- tail call void asm sideeffect ".file \224443946.c\22", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
- tail call void asm sideeffect ".line 11", "~{dirflag},~{fpsr},~{flags}"( ) nounwind
- %tmp8 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm4},~{dirflag},~{fpsr},~{flags},~{memory}"( <2 x i32> %tmp1 ) nounwind ; <i32> [#uses=0]
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll
deleted file mode 100644
index 41fbdd1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep jmp
-
- %struct..0anon = type { i32 }
- %struct.binding_level = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.binding_level*, i8, i8, i8, i8, i8, i32, %struct.tree_node* }
- %struct.lang_decl = type opaque
- %struct.rtx_def = type { i16, i8, i8, [1 x %struct..0anon] }
- %struct.tree_decl = type { [12 x i8], i8*, i32, %struct.tree_node*, i32, i8, i8, i8, i8, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct..0anon, { %struct.rtx_def* }, %struct.tree_node*, %struct.lang_decl* }
- %struct.tree_node = type { %struct.tree_decl }
-
-define fastcc %struct.tree_node* @pushdecl(%struct.tree_node* %x) nounwind {
-entry:
- %tmp3.i40 = icmp eq %struct.binding_level* null, null ; <i1> [#uses=2]
- br i1 false, label %bb143, label %bb140
-bb140: ; preds = %entry
- br i1 %tmp3.i40, label %bb160, label %bb17.i
-bb17.i: ; preds = %bb140
- ret %struct.tree_node* null
-bb143: ; preds = %entry
- %tmp8.i43 = load %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
- br i1 %tmp3.i40, label %bb160, label %bb9.i48
-bb9.i48: ; preds = %bb143
- ret %struct.tree_node* null
-bb160: ; preds = %bb143, %bb140
- %t.0.reg2mem.0 = phi %struct.tree_node* [ null, %bb140 ], [ %tmp8.i43, %bb143 ] ; <%struct.tree_node*> [#uses=1]
- %tmp162 = icmp eq %struct.tree_node* %t.0.reg2mem.0, null ; <i1> [#uses=2]
- br i1 %tmp162, label %bb174, label %bb165
-bb165: ; preds = %bb160
- br label %bb174
-bb174: ; preds = %bb165, %bb160
- %line.0 = phi i32 [ 0, %bb165 ], [ undef, %bb160 ] ; <i32> [#uses=1]
- %file.0 = phi i8* [ null, %bb165 ], [ undef, %bb160 ] ; <i8*> [#uses=1]
- br i1 %tmp162, label %bb344, label %bb73.i
-bb73.i: ; preds = %bb174
- br i1 false, label %bb226.i, label %bb220.i
-bb220.i: ; preds = %bb73.i
- ret %struct.tree_node* null
-bb226.i: ; preds = %bb73.i
- br i1 false, label %bb260, label %bb273.i
-bb273.i: ; preds = %bb226.i
- ret %struct.tree_node* null
-bb260: ; preds = %bb226.i
- tail call void (i8*, i32, ...)* @pedwarn_with_file_and_line( i8* %file.0, i32 %line.0, i8* null ) nounwind
- ret %struct.tree_node* null
-bb344: ; preds = %bb174
- ret %struct.tree_node* null
-}
-
-declare void @pedwarn_with_file_and_line(i8*, i32, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
deleted file mode 100644
index 2aea9c5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -disable-fp-elim -O0 -regalloc=local
-; PR5534
-
- %struct.CGPoint = type { double, double }
- %struct.NSArray = type { %struct.NSObject }
- %struct.NSAssertionHandler = type { %struct.NSObject, i8* }
- %struct.NSDockTile = type { %struct.NSObject, %struct.NSObject*, i8*, %struct.NSView*, %struct.NSView*, %struct.NSView*, %struct.NSArray*, %struct._SPFlags, %struct.CGPoint, [5 x %struct.NSObject*] }
- %struct.NSDocument = type { %struct.NSObject, %struct.NSWindow*, %struct.NSObject*, %struct.NSURL*, %struct.NSArray*, %struct.NSPrintInfo*, i64, %struct.NSView*, %struct.NSObject*, %struct.NSObject*, %struct.NSUndoManager*, %struct._BCFlags2, %struct.NSArray* }
- %struct.AA = type { %struct.NSObject, %struct.NSDocument*, %struct.NSURL*, %struct.NSArray*, %struct.NSArray* }
- %struct.NSError = type { %struct.NSObject, i8*, i64, %struct.NSArray*, %struct.NSArray* }
- %struct.NSImage = type { %struct.NSObject, %struct.NSArray*, %struct.CGPoint, %struct._BCFlags2, %struct.NSObject*, %struct._NSImageAuxiliary* }
- %struct.NSMutableArray = type { %struct.NSArray }
- %struct.NSObject = type { %struct.NSObject* }
- %struct.NSPrintInfo = type { %struct.NSObject, %struct.NSMutableArray*, %struct.NSObject* }
- %struct.NSRect = type { %struct.CGPoint, %struct.CGPoint }
- %struct.NSRegion = type opaque
- %struct.NSResponder = type { %struct.NSObject, %struct.NSObject* }
- %struct.NSToolbar = type { %struct.NSObject, %struct.NSArray*, %struct.NSMutableArray*, %struct.NSMutableArray*, %struct.NSArray*, %struct.NSObject*, %struct.NSArray*, i8*, %struct.NSObject*, %struct.NSWindow*, %struct.NSObject*, %struct.NSObject*, i64, %struct._BCFlags2, i64, %struct.NSObject* }
- %struct.NSURL = type { %struct.NSObject, %struct.NSArray*, %struct.NSURL*, i8*, i8* }
- %struct.NSUndoManager = type { %struct.NSObject, %struct.NSObject*, %struct.NSObject*, %struct.NSArray*, i64, %struct._SPFlags, %struct.NSObject*, i8*, i8*, i8* }
- %struct.NSView = type { %struct.NSResponder, %struct.NSRect, %struct.NSRect, %struct.NSObject*, %struct.NSObject*, %struct.NSWindow*, %struct.NSObject*, %struct.NSObject*, %struct.NSObject*, %struct.NSObject*, %struct._NSViewAuxiliary*, %struct._BCFlags, %struct._SPFlags }
- %struct.NSWindow = type { %struct.NSResponder, %struct.NSRect, %struct.NSObject*, %struct.NSObject*, %struct.NSResponder*, %struct.NSView*, %struct.NSView*, %struct.NSObject*, %struct.NSObject*, i32, i64, i32, %struct.NSArray*, %struct.NSObject*, i8, i8, i8, i8, i8*, i8*, %struct.NSImage*, i32, %struct.NSMutableArray*, %struct.NSURL*, %struct.CGPoint*, %struct.NSArray*, %struct.NSArray*, %struct.__wFlags, %struct.NSObject*, %struct.NSView*, %struct.NSWindowAuxiliary* }
- %struct.NSWindowAuxiliary = type { %struct.NSObject, %struct.NSArray*, %struct.NSDockTile*, %struct._NSWindowAnimator*, %struct.NSRect, i32, %struct.NSAssertionHandler*, %struct.NSUndoManager*, %struct.NSWindowController*, %struct.NSAssertionHandler*, %struct.NSObject*, i32, %struct.__CFRunLoopObserver*, %struct.__CFRunLoopObserver*, %struct.NSArray*, %struct.NSArray*, %struct.NSView*, %struct.NSRegion*, %struct.NSWindow*, %struct.NSWindow*, %struct.NSArray*, %struct.NSMutableArray*, %struct.NSArray*, %struct.NSWindow*, %struct.CGPoint, %struct.NSObject*, i8*, i8*, i32, %struct.NSObject*, %struct.NSArray*, double, %struct.CGPoint, %struct.NSArray*, %struct.NSMutableArray*, %struct.NSMutableArray*, %struct.NSWindow*, %struct.NSView*, %struct.NSArray*, %struct.__auxWFlags, i32, i8*, double, %struct.NSObject*, %struct.NSObject*, %struct.__CFArray*, %struct.NSRegion*, %struct.NSArray*, %struct.NSRect, %struct.NSToolbar*, %struct.NSRect, %struct.NSMutableArray* }
- %struct.NSWindowController = type { %struct.NSResponder, %struct.NSWindow*, %struct.NSArray*, %struct.NSDocument*, %struct.NSArray*, %struct.NSObject*, %struct._SPFlags, %struct.NSArray*, %struct.NSObject* }
- %struct._BCFlags = type <{ i8, i8, i8, i8 }>
- %struct._BCFlags2 = type <{ i8, [3 x i8] }>
- %struct._NSImageAuxiliary = type opaque
- %struct._NSViewAuxiliary = type opaque
- %struct._NSWindowAnimator = type opaque
- %struct._SPFlags = type <{ i32 }>
- %struct.__CFArray = type opaque
- %struct.__CFRunLoopObserver = type opaque
- %struct.__auxWFlags = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16 }
- %struct.__wFlags = type <{ i8, i8, i8, i8, i8, i8, i8, i8 }>
- %struct._message_ref_t = type { %struct.NSObject* (%struct.NSObject*, %struct._message_ref_t*, ...)*, %struct.objc_selector* }
- %struct.objc_selector = type opaque
-@"\01L_OBJC_MESSAGE_REF_228" = internal global %struct._message_ref_t zeroinitializer ; <%struct._message_ref_t*> [#uses=1]
- at llvm.used1 = appending global [1 x i8*] [ i8* bitcast (void (%struct.AA*, %struct._message_ref_t*, %struct.NSError*, i64, %struct.NSObject*, %struct.objc_selector*, i8*)* @"-[AA BB:optionIndex:delegate:CC:contextInfo:]" to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define void @"-[AA BB:optionIndex:delegate:CC:contextInfo:]"(%struct.AA* %self, %struct._message_ref_t* %_cmd, %struct.NSError* %inError, i64 %inOptionIndex, %struct.NSObject* %inDelegate, %struct.objc_selector* %inDidRecoverSelector, i8* %inContextInfo) {
-entry:
- %tmp105 = load %struct.NSArray** null, align 8 ; <%struct.NSArray*> [#uses=1]
- %tmp107 = load %struct.NSObject** null, align 8 ; <%struct.NSObject*> [#uses=1]
- call void null( %struct.NSObject* %tmp107, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_228", %struct.NSArray* %tmp105, i8 signext 0 )
- %tmp111 = call %struct.NSObject* (%struct.NSObject*, %struct.objc_selector*, ...)* @objc_msgSend( %struct.NSObject* null, %struct.objc_selector* null, i32 0, i8* null ) ; <%struct.NSObject*> [#uses=0]
- ret void
-}
-
-declare %struct.NSObject* @objc_msgSend(%struct.NSObject*, %struct.objc_selector*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-16-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-16-CoalescerBug.ll
deleted file mode 100644
index 3ccc0fe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-16-CoalescerBug.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define void @Hubba(i8* %saveunder, i32 %firstBlob, i32 %select) nounwind {
-entry:
- br i1 false, label %bb53.us, label %bb53
-bb53.us: ; preds = %bb94.us, %bb53.us, %entry
- switch i8 1, label %bb71.us [
- i8 0, label %bb53.us
- i8 1, label %bb94.us
- ]
-bb94.us: ; preds = %bb71.us, %bb53.us
- %result.0.us = phi i32 [ %tmp93.us, %bb71.us ], [ 0, %bb53.us ] ; <i32> [#uses=2]
- %tmp101.us = lshr i32 %result.0.us, 3 ; <i32> [#uses=1]
- %result.0163.us = trunc i32 %result.0.us to i16 ; <i16> [#uses=2]
- shl i16 %result.0163.us, 7 ; <i16>:0 [#uses=1]
- %tmp106.us = and i16 %0, -1024 ; <i16> [#uses=1]
- shl i16 %result.0163.us, 2 ; <i16>:1 [#uses=1]
- %tmp109.us = and i16 %1, -32 ; <i16> [#uses=1]
- %tmp111112.us = trunc i32 %tmp101.us to i16 ; <i16> [#uses=1]
- %tmp110.us = or i16 %tmp109.us, %tmp111112.us ; <i16> [#uses=1]
- %tmp113.us = or i16 %tmp110.us, %tmp106.us ; <i16> [#uses=1]
- store i16 %tmp113.us, i16* null, align 2
- br label %bb53.us
-bb71.us: ; preds = %bb53.us
- %tmp80.us = load i8* null, align 1 ; <i8> [#uses=1]
- %tmp8081.us = zext i8 %tmp80.us to i32 ; <i32> [#uses=1]
- %tmp87.us = mul i32 %tmp8081.us, 0 ; <i32> [#uses=1]
- %tmp92.us = add i32 0, %tmp87.us ; <i32> [#uses=1]
- %tmp93.us = udiv i32 %tmp92.us, 255 ; <i32> [#uses=1]
- br label %bb94.us
-bb53: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-16-ReMatBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-16-ReMatBug.ll
deleted file mode 100644
index 6e8891b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-16-ReMatBug.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | grep movw | not grep {, %e}
-
- %struct.DBC_t = type { i32, i8*, i16, %struct.DBC_t*, i8*, i8*, i8*, i8*, i8*, %struct.DBC_t*, i32, i32, i32, i32, i8*, i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i32*, i8, i16, %struct.DRVOPT*, i16 }
- %struct.DRVOPT = type { i16, i32, i8, %struct.DRVOPT* }
- %struct.GENV_t = type { i32, i8*, i16, i8*, i8*, i32, i32, i32, i32, %struct.DBC_t*, i16 }
- %struct.pthread_mutex_t = type { i32, [40 x i8] }
- at iodbcdm_global_lock = external global %struct.pthread_mutex_t ; <%struct.pthread_mutex_t*> [#uses=1]
-
-define i16 @SQLDriversW(i8* %henv, i16 zeroext %fDir, i32* %szDrvDesc, i16 signext %cbDrvDescMax, i16* %pcbDrvDesc, i32* %szDrvAttr, i16 signext %cbDrvAttrMax, i16* %pcbDrvAttr) signext nounwind {
-entry:
- %tmp12 = bitcast i8* %henv to %struct.GENV_t* ; <%struct.GENV_t*> [#uses=1]
- br i1 true, label %bb28, label %bb
-bb: ; preds = %entry
- ret i16 0
-bb28: ; preds = %entry
- br i1 false, label %bb37, label %done
-bb37: ; preds = %bb28
- %tmp46 = getelementptr %struct.GENV_t* %tmp12, i32 0, i32 10 ; <i16*> [#uses=1]
- store i16 0, i16* %tmp46, align 4
- br i1 false, label %bb74, label %bb92
-bb74: ; preds = %bb37
- br label %bb92
-bb92: ; preds = %bb74, %bb37
- %tmp95180 = shl i16 %cbDrvAttrMax, 2 ; <i16> [#uses=1]
- %tmp100178 = shl i16 %cbDrvDescMax, 2 ; <i16> [#uses=1]
- %tmp113 = tail call i16 @SQLDrivers_Internal( i8* %henv, i16 zeroext %fDir, i8* null, i16 signext %tmp100178, i16* %pcbDrvDesc, i8* null, i16 signext %tmp95180, i16* %pcbDrvAttr, i8 zeroext 87 ) signext nounwind ; <i16> [#uses=1]
- br i1 false, label %done, label %bb137
-bb137: ; preds = %bb92
- ret i16 0
-done: ; preds = %bb92, %bb28
- %retcode.0 = phi i16 [ -2, %bb28 ], [ %tmp113, %bb92 ] ; <i16> [#uses=2]
- br i1 false, label %bb167, label %bb150
-bb150: ; preds = %done
- %tmp157158 = sext i16 %retcode.0 to i32 ; <i32> [#uses=1]
- tail call void @trace_SQLDriversW( i32 1, i32 %tmp157158, i8* %henv, i16 zeroext %fDir, i32* %szDrvDesc, i16 signext %cbDrvDescMax, i16* %pcbDrvDesc, i32* %szDrvAttr, i16 signext %cbDrvAttrMax, i16* %pcbDrvAttr ) nounwind
- ret i16 0
-bb167: ; preds = %done
- %tmp168 = tail call i32 @pthread_mutex_unlock( %struct.pthread_mutex_t* @iodbcdm_global_lock ) nounwind ; <i32> [#uses=0]
- ret i16 %retcode.0
-}
-
-declare i32 @pthread_mutex_unlock(%struct.pthread_mutex_t*)
-
-declare i16 @SQLDrivers_Internal(i8*, i16 zeroext , i8*, i16 signext , i16*, i8*, i16 signext , i16*, i8 zeroext ) signext nounwind
-
-declare void @trace_SQLDriversW(i32, i32, i8*, i16 zeroext , i32*, i16 signext , i16*, i32*, i16 signext , i16*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
deleted file mode 100644
index ac48285..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
+++ /dev/null
@@ -1,171 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | grep xorl | grep {%e}
-; Make sure xorl operands are 32-bit registers.
-
- %struct.tm = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8* }
- %struct.wxDateTime = type { %struct.wxLongLong }
- %"struct.wxDateTime::TimeZone" = type { i32 }
- %struct.wxLongLong = type { i64 }
- %struct.wxString = type { %struct.wxStringBase }
- %struct.wxStringBase = type { i32* }
- at .str = external constant [27 x i32] ; <[27 x i32]*> [#uses=1]
- at .str4 = external constant [14 x i32] ; <[14 x i32]*> [#uses=1]
- at _ZZNK10wxDateTime5GetTmERKNS_8TimeZoneEE12__FUNCTION__ = external constant [6 x i8] ; <[6 x i8]*> [#uses=1]
- at .str33 = external constant [29 x i32] ; <[29 x i32]*> [#uses=1]
- at .str89 = external constant [5 x i32] ; <[5 x i32]*> [#uses=1]
-
-define void @_ZNK10wxDateTime6FormatEPKwRKNS_8TimeZoneE(%struct.wxString* noalias sret %agg.result, %struct.wxDateTime* %this, i32* %format, %"struct.wxDateTime::TimeZone"* %tz, i1 %foo) {
-entry:
- br i1 %foo, label %bb116.i, label %bb115.critedge.i
-bb115.critedge.i: ; preds = %entry
- ret void
-bb116.i: ; preds = %entry
- br i1 %foo, label %bb52.i.i, label %bb3118
-bb3118: ; preds = %bb116.i
- ret void
-bb52.i.i: ; preds = %bb116.i
- br i1 %foo, label %bb142.i, label %bb115.critedge.i.i
-bb115.critedge.i.i: ; preds = %bb52.i.i
- ret void
-bb142.i: ; preds = %bb52.i.i
- br i1 %foo, label %bb161.i, label %bb182.i
-bb161.i: ; preds = %bb142.i
- br label %bb3261
-bb182.i: ; preds = %bb142.i
- ret void
-bb3261: ; preds = %bb7834, %bb161.i
- %tmp3263 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp3264 = icmp eq i32 %tmp3263, 37 ; <i1> [#uses=1]
- br i1 %tmp3264, label %bb3306, label %bb3267
-bb3267: ; preds = %bb3261
- ret void
-bb3306: ; preds = %bb3261
- %tmp3310 = invoke %struct.wxStringBase* @_ZN12wxStringBaseaSEPKw( %struct.wxStringBase* null, i32* getelementptr ([5 x i32]* @.str89, i32 0, i32 0) )
- to label %bb3314 unwind label %lpad ; <%struct.wxStringBase*> [#uses=0]
-bb3314: ; preds = %bb3306
- %tmp3316 = load i32* null, align 4 ; <i32> [#uses=1]
- switch i32 %tmp3316, label %bb7595 [
- i32 0, label %bb7819
- i32 37, label %bb7806
- i32 66, label %bb3477
- i32 72, label %bb5334
- i32 73, label %bb5484
- i32 77, label %bb6118
- i32 83, label %bb6406
- i32 85, label %bb6556
- i32 87, label %bb6708
- i32 89, label %bb7308
- i32 98, label %bb3477
- i32 99, label %bb3626
- i32 100, label %bb5184
- i32 106, label %bb5657
- i32 108, label %bb5809
- i32 109, label %bb5968
- i32 119, label %bb6860
- i32 120, label %bb3626
- i32 121, label %bb7158
- ]
-bb3477: ; preds = %bb3314, %bb3314
- ret void
-bb3626: ; preds = %bb3314, %bb3314
- ret void
-bb5184: ; preds = %bb3314
- ret void
-bb5334: ; preds = %bb3314
- ret void
-bb5484: ; preds = %bb3314
- ret void
-bb5657: ; preds = %bb3314
- %tmp5661 = invoke i16 @_ZNK10wxDateTime12GetDayOfYearERKNS_8TimeZoneE( %struct.wxDateTime* %this, %"struct.wxDateTime::TimeZone"* %tz ) zeroext
- to label %invcont5660 unwind label %lpad ; <i16> [#uses=0]
-invcont5660: ; preds = %bb5657
- ret void
-bb5809: ; preds = %bb3314
- %tmp61.i.i8486 = icmp sgt i64 0, -1 ; <i1> [#uses=1]
- %tmp95.i.i8490 = icmp slt i64 0, 2147483647000 ; <i1> [#uses=1]
- %bothcond9308 = and i1 %tmp61.i.i8486, %tmp95.i.i8490 ; <i1> [#uses=1]
- br i1 %bothcond9308, label %bb91.i8504, label %bb115.critedge.i.i8492
-bb115.critedge.i.i8492: ; preds = %bb5809
- ret void
-bb91.i8504: ; preds = %bb5809
- br i1 %foo, label %bb155.i8541, label %bb182.i8560
-bb155.i8541: ; preds = %bb91.i8504
- %tmp156.i85398700 = invoke %struct.tm* @gmtime_r( i32* null, %struct.tm* null )
- to label %bb182.i8560 unwind label %lpad ; <%struct.tm*> [#uses=1]
-bb182.i8560: ; preds = %bb155.i8541, %bb91.i8504
- %tm48.0.i8558 = phi %struct.tm* [ null, %bb91.i8504 ], [ %tmp156.i85398700, %bb155.i8541 ] ; <%struct.tm*> [#uses=0]
- br i1 %foo, label %bb278.i8617, label %bb187.i8591
-bb187.i8591: ; preds = %bb182.i8560
- %tmp245.i8588 = srem i64 0, 86400000 ; <i64> [#uses=1]
- br i1 %foo, label %bb264.i8592, label %bb265.i8606
-bb264.i8592: ; preds = %bb187.i8591
- ret void
-bb265.i8606: ; preds = %bb187.i8591
- %tmp268269.i8593 = trunc i64 %tmp245.i8588 to i32 ; <i32> [#uses=1]
- %tmp273.i8594 = srem i32 %tmp268269.i8593, 1000 ; <i32> [#uses=1]
- %tmp273274.i8595 = trunc i32 %tmp273.i8594 to i16 ; <i16> [#uses=1]
- br label %invcont5814
-bb278.i8617: ; preds = %bb182.i8560
- %timeOnly50.0.i8622 = add i32 0, 0 ; <i32> [#uses=1]
- br i1 %foo, label %bb440.i8663, label %bb448.i8694
-bb440.i8663: ; preds = %bb278.i8617
- invoke void @_Z10wxOnAssertPKwiPKcS0_S0_( i32* getelementptr ([27 x i32]* @.str, i32 0, i32 0), i32 1717, i8* getelementptr ([6 x i8]* @_ZZNK10wxDateTime5GetTmERKNS_8TimeZoneEE12__FUNCTION__, i32 0, i32 0), i32* getelementptr ([29 x i32]* @.str33, i32 0, i32 0), i32* getelementptr ([14 x i32]* @.str4, i32 0, i32 0) )
- to label %bb448.i8694 unwind label %lpad
-bb448.i8694: ; preds = %bb440.i8663, %bb278.i8617
- %tmp477.i8669 = srem i32 %timeOnly50.0.i8622, 1000 ; <i32> [#uses=1]
- %tmp477478.i8670 = trunc i32 %tmp477.i8669 to i16 ; <i16> [#uses=1]
- br label %invcont5814
-invcont5814: ; preds = %bb448.i8694, %bb265.i8606
- %tmp812.0.0 = phi i16 [ %tmp477478.i8670, %bb448.i8694 ], [ %tmp273274.i8595, %bb265.i8606 ] ; <i16> [#uses=1]
- %tmp58165817 = zext i16 %tmp812.0.0 to i32 ; <i32> [#uses=1]
- invoke void (%struct.wxString*, i32*, ...)* @_ZN8wxString6FormatEPKwz( %struct.wxString* noalias sret null, i32* null, i32 %tmp58165817 )
- to label %invcont5831 unwind label %lpad
-invcont5831: ; preds = %invcont5814
- %tmp5862 = invoke i8 @_ZN12wxStringBase10ConcatSelfEmPKwm( %struct.wxStringBase* null, i32 0, i32* null, i32 0 ) zeroext
- to label %bb7834 unwind label %lpad8185 ; <i8> [#uses=0]
-bb5968: ; preds = %bb3314
- invoke void (%struct.wxString*, i32*, ...)* @_ZN8wxString6FormatEPKwz( %struct.wxString* noalias sret null, i32* null, i32 0 )
- to label %invcont5981 unwind label %lpad
-invcont5981: ; preds = %bb5968
- ret void
-bb6118: ; preds = %bb3314
- ret void
-bb6406: ; preds = %bb3314
- ret void
-bb6556: ; preds = %bb3314
- ret void
-bb6708: ; preds = %bb3314
- ret void
-bb6860: ; preds = %bb3314
- ret void
-bb7158: ; preds = %bb3314
- ret void
-bb7308: ; preds = %bb3314
- ret void
-bb7595: ; preds = %bb3314
- ret void
-bb7806: ; preds = %bb3314
- %tmp7814 = invoke %struct.wxStringBase* @_ZN12wxStringBase6appendEmw( %struct.wxStringBase* null, i32 1, i32 0 )
- to label %bb7834 unwind label %lpad ; <%struct.wxStringBase*> [#uses=0]
-bb7819: ; preds = %bb3314
- ret void
-bb7834: ; preds = %bb7806, %invcont5831
- br label %bb3261
-lpad: ; preds = %bb7806, %bb5968, %invcont5814, %bb440.i8663, %bb155.i8541, %bb5657, %bb3306
- ret void
-lpad8185: ; preds = %invcont5831
- ret void
-}
-
-declare void @_Z10wxOnAssertPKwiPKcS0_S0_(i32*, i32, i8*, i32*, i32*)
-
-declare i8 @_ZN12wxStringBase10ConcatSelfEmPKwm(%struct.wxStringBase*, i32, i32*, i32) zeroext
-
-declare %struct.tm* @gmtime_r(i32*, %struct.tm*)
-
-declare i16 @_ZNK10wxDateTime12GetDayOfYearERKNS_8TimeZoneE(%struct.wxDateTime*, %"struct.wxDateTime::TimeZone"*) zeroext
-
-declare %struct.wxStringBase* @_ZN12wxStringBase6appendEmw(%struct.wxStringBase*, i32, i32)
-
-declare %struct.wxStringBase* @_ZN12wxStringBaseaSEPKw(%struct.wxStringBase*, i32*)
-
-declare void @_ZN8wxString6FormatEPKwz(%struct.wxString* noalias sret , i32*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-24-MemCpyBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-24-MemCpyBug.ll
deleted file mode 100644
index 6389267..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-24-MemCpyBug.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep 120
-; Don't accidentally add the offset twice for trailing bytes.
-
- %struct.S63 = type { [63 x i8] }
- at g1s63 = external global %struct.S63 ; <%struct.S63*> [#uses=1]
-
-declare void @test63(%struct.S63* byval align 4 ) nounwind
-
-define void @testit63_entry_2E_ce() nounwind {
- tail call void @test63( %struct.S63* byval align 4 @g1s63 ) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll
deleted file mode 100644
index 4eaca17..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mattr=+sse41
-; rdar://5886601
-; gcc testsuite: gcc.target/i386/sse4_1-pblendw.c
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-
-define i32 @main() nounwind {
-entry:
- %tmp122 = load <2 x i64>* null, align 16 ; <<2 x i64>> [#uses=1]
- %tmp126 = bitcast <2 x i64> %tmp122 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp129 = call <8 x i16> @llvm.x86.sse41.pblendw( <8 x i16> zeroinitializer, <8 x i16> %tmp126, i32 2 ) nounwind ; <<8 x i16>> [#uses=0]
- ret i32 0
-}
-
-declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll
deleted file mode 100644
index 38d6aa6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s | grep {1 \$2 3}
-; rdar://5720231
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-
-define void @test() nounwind {
-entry:
- tail call void asm sideeffect " ${0:c} $1 ${2:c} ", "imr,imr,i,~{dirflag},~{fpsr},~{flags}"( i32 1, i32 2, i32 3 ) nounwind
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
deleted file mode 100644
index 5b97eb7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
+++ /dev/null
@@ -1,167 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep movl > %t
-; RUN: not grep {r\[abcd\]x} %t
-; RUN: not grep {r\[ds\]i} %t
-; RUN: not grep {r\[bs\]p} %t
-
- %struct.BITMAP = type { i16, i16, i32, i32, i32, i32, i32, i32, i8*, i8* }
- %struct.BltData = type { float, float, float, float }
- %struct.BltDepth = type { i32, i8**, i32, %struct.BITMAP* (%struct.BltDepth**, %struct.BITMAP*, i32, i32, float*, float, i32)*, i32 (%struct.BltDepth**, %struct.BltOp*)*, i32 (%struct.BltDepth**, %struct.BltOp*, %struct.BltImg*)*, i32 (%struct.BltDepth**, %struct.BltOp*, %struct.BltSh*)*, [28 x [2 x [2 x i32]]]*, %struct.BltData* }
- %struct.BltImg = type { i32, i8, i8, i8, float, float*, float*, i32, i32, float*, i32 (i8*, i8*, i8**, i32*, i8**, i32*)*, i8* }
- %struct.BltOp = type { i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i8* }
- %struct.BltSh = type { i8, i8, i8, i8, float, float*, float*, float*, float*, i32, i32, float*, float*, float* }
-
-define void @t(%struct.BltDepth* %depth, %struct.BltOp* %bop, i32 %mode) nounwind {
-entry:
- switch i32 %mode, label %return [
- i32 1, label %bb2898.us
- i32 18, label %bb13086.preheader
- ]
-
-bb13086.preheader: ; preds = %entry
- %tmp13098 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- %tmp13238 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br label %bb13088
-
-bb2898.us: ; preds = %bb2898.us, %entry
- br label %bb2898.us
-
-bb13088: ; preds = %bb13572, %bb13567, %bb13107, %bb13086.preheader
- br i1 %tmp13098, label %bb13107, label %bb13101
-
-bb13101: ; preds = %bb13088
- br label %bb13107
-
-bb13107: ; preds = %bb13101, %bb13088
- %iftmp.684.0 = phi i32 [ 0, %bb13101 ], [ 65535, %bb13088 ] ; <i32> [#uses=2]
- %tmp13111 = load i64* null, align 8 ; <i64> [#uses=3]
- %tmp13116 = lshr i64 %tmp13111, 16 ; <i64> [#uses=1]
- %tmp1311613117 = trunc i64 %tmp13116 to i32 ; <i32> [#uses=1]
- %tmp13118 = and i32 %tmp1311613117, 65535 ; <i32> [#uses=1]
- %tmp13120 = lshr i64 %tmp13111, 32 ; <i64> [#uses=1]
- %tmp1312013121 = trunc i64 %tmp13120 to i32 ; <i32> [#uses=1]
- %tmp13122 = and i32 %tmp1312013121, 65535 ; <i32> [#uses=2]
- %tmp13124 = lshr i64 %tmp13111, 48 ; <i64> [#uses=1]
- %tmp1312413125 = trunc i64 %tmp13124 to i32 ; <i32> [#uses=2]
- %tmp1314013141not = xor i16 0, -1 ; <i16> [#uses=1]
- %tmp1314013141not13142 = zext i16 %tmp1314013141not to i32 ; <i32> [#uses=3]
- %tmp13151 = mul i32 %tmp13122, %tmp1314013141not13142 ; <i32> [#uses=1]
- %tmp13154 = mul i32 %tmp1312413125, %tmp1314013141not13142 ; <i32> [#uses=1]
- %tmp13157 = mul i32 %iftmp.684.0, %tmp1314013141not13142 ; <i32> [#uses=1]
- %tmp13171 = add i32 %tmp13151, 1 ; <i32> [#uses=1]
- %tmp13172 = add i32 %tmp13171, 0 ; <i32> [#uses=1]
- %tmp13176 = add i32 %tmp13154, 1 ; <i32> [#uses=1]
- %tmp13177 = add i32 %tmp13176, 0 ; <i32> [#uses=1]
- %tmp13181 = add i32 %tmp13157, 1 ; <i32> [#uses=1]
- %tmp13182 = add i32 %tmp13181, 0 ; <i32> [#uses=1]
- %tmp13188 = lshr i32 %tmp13172, 16 ; <i32> [#uses=1]
- %tmp13190 = lshr i32 %tmp13177, 16 ; <i32> [#uses=1]
- %tmp13192 = lshr i32 %tmp13182, 16 ; <i32> [#uses=1]
- %tmp13198 = sub i32 %tmp13118, 0 ; <i32> [#uses=1]
- %tmp13201 = sub i32 %tmp13122, %tmp13188 ; <i32> [#uses=1]
- %tmp13204 = sub i32 %tmp1312413125, %tmp13190 ; <i32> [#uses=1]
- %tmp13207 = sub i32 %iftmp.684.0, %tmp13192 ; <i32> [#uses=1]
- %tmp1320813209 = zext i32 %tmp13204 to i64 ; <i64> [#uses=1]
- %tmp13211 = shl i64 %tmp1320813209, 48 ; <i64> [#uses=1]
- %tmp1321213213 = zext i32 %tmp13201 to i64 ; <i64> [#uses=1]
- %tmp13214 = shl i64 %tmp1321213213, 32 ; <i64> [#uses=1]
- %tmp13215 = and i64 %tmp13214, 281470681743360 ; <i64> [#uses=1]
- %tmp1321713218 = zext i32 %tmp13198 to i64 ; <i64> [#uses=1]
- %tmp13219 = shl i64 %tmp1321713218, 16 ; <i64> [#uses=1]
- %tmp13220 = and i64 %tmp13219, 4294901760 ; <i64> [#uses=1]
- %tmp13216 = or i64 %tmp13211, 0 ; <i64> [#uses=1]
- %tmp13221 = or i64 %tmp13216, %tmp13215 ; <i64> [#uses=1]
- %tmp13225 = or i64 %tmp13221, %tmp13220 ; <i64> [#uses=4]
- %tmp1322713228 = trunc i32 %tmp13207 to i16 ; <i16> [#uses=4]
- %tmp13233 = icmp eq i16 %tmp1322713228, 0 ; <i1> [#uses=1]
- br i1 %tmp13233, label %bb13088, label %bb13236
-
-bb13236: ; preds = %bb13107
- br i1 false, label %bb13567, label %bb13252
-
-bb13252: ; preds = %bb13236
- %tmp1329013291 = zext i16 %tmp1322713228 to i64 ; <i64> [#uses=8]
- %tmp13296 = lshr i64 %tmp13225, 16 ; <i64> [#uses=1]
- %tmp13297 = and i64 %tmp13296, 65535 ; <i64> [#uses=1]
- %tmp13299 = lshr i64 %tmp13225, 32 ; <i64> [#uses=1]
- %tmp13300 = and i64 %tmp13299, 65535 ; <i64> [#uses=1]
- %tmp13302 = lshr i64 %tmp13225, 48 ; <i64> [#uses=1]
- %tmp13306 = sub i64 %tmp1329013291, 0 ; <i64> [#uses=0]
- %tmp13309 = sub i64 %tmp1329013291, %tmp13297 ; <i64> [#uses=1]
- %tmp13312 = sub i64 %tmp1329013291, %tmp13300 ; <i64> [#uses=1]
- %tmp13315 = sub i64 %tmp1329013291, %tmp13302 ; <i64> [#uses=1]
- %tmp13318 = mul i64 %tmp1329013291, %tmp1329013291 ; <i64> [#uses=1]
- br i1 false, label %bb13339, label %bb13324
-
-bb13324: ; preds = %bb13252
- br i1 false, label %bb13339, label %bb13330
-
-bb13330: ; preds = %bb13324
- %tmp13337 = sdiv i64 0, 0 ; <i64> [#uses=1]
- br label %bb13339
-
-bb13339: ; preds = %bb13330, %bb13324, %bb13252
- %r0120.0 = phi i64 [ %tmp13337, %bb13330 ], [ 0, %bb13252 ], [ 4294836225, %bb13324 ] ; <i64> [#uses=1]
- br i1 false, label %bb13360, label %bb13345
-
-bb13345: ; preds = %bb13339
- br i1 false, label %bb13360, label %bb13351
-
-bb13351: ; preds = %bb13345
- %tmp13354 = mul i64 0, %tmp13318 ; <i64> [#uses=1]
- %tmp13357 = sub i64 %tmp1329013291, %tmp13309 ; <i64> [#uses=1]
- %tmp13358 = sdiv i64 %tmp13354, %tmp13357 ; <i64> [#uses=1]
- br label %bb13360
-
-bb13360: ; preds = %bb13351, %bb13345, %bb13339
- %r1121.0 = phi i64 [ %tmp13358, %bb13351 ], [ 0, %bb13339 ], [ 4294836225, %bb13345 ] ; <i64> [#uses=1]
- br i1 false, label %bb13402, label %bb13387
-
-bb13387: ; preds = %bb13360
- br label %bb13402
-
-bb13402: ; preds = %bb13387, %bb13360
- %r3123.0 = phi i64 [ 0, %bb13360 ], [ 4294836225, %bb13387 ] ; <i64> [#uses=1]
- %tmp13404 = icmp eq i16 %tmp1322713228, -1 ; <i1> [#uses=1]
- br i1 %tmp13404, label %bb13435, label %bb13407
-
-bb13407: ; preds = %bb13402
- br label %bb13435
-
-bb13435: ; preds = %bb13407, %bb13402
- %r0120.1 = phi i64 [ 0, %bb13407 ], [ %r0120.0, %bb13402 ] ; <i64> [#uses=0]
- %r1121.1 = phi i64 [ 0, %bb13407 ], [ %r1121.0, %bb13402 ] ; <i64> [#uses=0]
- %r3123.1 = phi i64 [ 0, %bb13407 ], [ %r3123.0, %bb13402 ] ; <i64> [#uses=0]
- %tmp13450 = mul i64 0, %tmp13312 ; <i64> [#uses=0]
- %tmp13455 = mul i64 0, %tmp13315 ; <i64> [#uses=0]
- %tmp13461 = add i64 0, %tmp1329013291 ; <i64> [#uses=1]
- %tmp13462 = mul i64 %tmp13461, 65535 ; <i64> [#uses=1]
- %tmp13466 = sub i64 %tmp13462, 0 ; <i64> [#uses=1]
- %tmp13526 = add i64 %tmp13466, 1 ; <i64> [#uses=1]
- %tmp13527 = add i64 %tmp13526, 0 ; <i64> [#uses=1]
- %tmp13528 = ashr i64 %tmp13527, 16 ; <i64> [#uses=4]
- %tmp13536 = sub i64 %tmp13528, 0 ; <i64> [#uses=1]
- %tmp13537 = shl i64 %tmp13536, 32 ; <i64> [#uses=1]
- %tmp13538 = and i64 %tmp13537, 281470681743360 ; <i64> [#uses=1]
- %tmp13542 = sub i64 %tmp13528, 0 ; <i64> [#uses=1]
- %tmp13543 = shl i64 %tmp13542, 16 ; <i64> [#uses=1]
- %tmp13544 = and i64 %tmp13543, 4294901760 ; <i64> [#uses=1]
- %tmp13548 = sub i64 %tmp13528, 0 ; <i64> [#uses=1]
- %tmp13549 = and i64 %tmp13548, 65535 ; <i64> [#uses=1]
- %tmp13539 = or i64 %tmp13538, 0 ; <i64> [#uses=1]
- %tmp13545 = or i64 %tmp13539, %tmp13549 ; <i64> [#uses=1]
- %tmp13550 = or i64 %tmp13545, %tmp13544 ; <i64> [#uses=1]
- %tmp1355213553 = trunc i64 %tmp13528 to i16 ; <i16> [#uses=1]
- br label %bb13567
-
-bb13567: ; preds = %bb13435, %bb13236
- %tsp1040.0.0 = phi i64 [ %tmp13550, %bb13435 ], [ %tmp13225, %bb13236 ] ; <i64> [#uses=0]
- %tsp1040.1.0 = phi i16 [ %tmp1355213553, %bb13435 ], [ %tmp1322713228, %bb13236 ] ; <i16> [#uses=1]
- br i1 %tmp13238, label %bb13088, label %bb13572
-
-bb13572: ; preds = %bb13567
- store i16 %tsp1040.1.0, i16* null, align 2
- br label %bb13088
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-28-CyclicSchedUnit.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-04-28-CyclicSchedUnit.ll
deleted file mode 100644
index 6e8e98d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-04-28-CyclicSchedUnit.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i64 @t(i64 %maxIdleDuration) nounwind {
- call void asm sideeffect "wrmsr", "{cx},A,~{dirflag},~{fpsr},~{flags}"( i32 416, i64 0 ) nounwind
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll
deleted file mode 100644
index a708224..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -enable-unsafe-fp-math -march=x86 | grep jnp
-; rdar://5902801
-
-declare void @test2()
-
-define i32 @test(double %p) nounwind {
- %tmp5 = fcmp uno double %p, 0.000000e+00
- br i1 %tmp5, label %bb, label %UnifiedReturnBlock
-bb:
- call void @test2()
- ret i32 17
-UnifiedReturnBlock:
- ret i32 42
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-09-PHIElimBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-05-09-PHIElimBug.ll
deleted file mode 100644
index cea0076..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-09-PHIElimBug.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86
-
- %struct.V = type { <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x i32>, float*, float*, float*, float*, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, i32, i32, i32, i32, i32, i32, i32, i32 }
-
-define fastcc void @t() nounwind {
-entry:
- br i1 false, label %bb23816.preheader, label %bb23821
-
-bb23816.preheader: ; preds = %entry
- %tmp23735 = and i32 0, 2 ; <i32> [#uses=0]
- br label %bb23830
-
-bb23821: ; preds = %entry
- br i1 false, label %bb23830, label %bb23827
-
-bb23827: ; preds = %bb23821
- %tmp23829 = getelementptr %struct.V* null, i32 0, i32 42 ; <i32*> [#uses=0]
- br label %bb23830
-
-bb23830: ; preds = %bb23827, %bb23821, %bb23816.preheader
- %scaledInDst.2.reg2mem.5 = phi i8 [ undef, %bb23827 ], [ undef, %bb23821 ], [ undef, %bb23816.preheader ] ; <i8> [#uses=1]
- %toBool35047 = icmp eq i8 %scaledInDst.2.reg2mem.5, 0 ; <i1> [#uses=1]
- %bothcond39107 = or i1 %toBool35047, false ; <i1> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll
deleted file mode 100644
index 5ceb546..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define fastcc void @glgVectorFloatConversion() nounwind {
- %tmp12745 = load <4 x float>* null, align 16 ; <<4 x float>> [#uses=1]
- %tmp12773 = insertelement <4 x float> %tmp12745, float 1.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
- %tmp12774 = insertelement <4 x float> %tmp12773, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
- %tmp12775 = insertelement <4 x float> %tmp12774, float 1.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp12775, <4 x float>* null, align 16
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-12-tailmerge-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-05-12-tailmerge-5.ll
deleted file mode 100644
index 4852e89..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-12-tailmerge-5.ll
+++ /dev/null
@@ -1,145 +0,0 @@
-; RUN: llc < %s | grep abort | count 1
-; Calls to abort should all be merged
-
-; ModuleID = '5898899.c'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
- %struct.BoundaryAlignment = type { [3 x i8], i8, i16, i16, i8, [2 x i8] }
-
-define void @passing2(i64 %str.0, i64 %str.1, i16 signext %s, i32 %j, i8 signext %c, i16 signext %t, i16 signext %u, i8 signext %d) nounwind optsize {
-entry:
- %str_addr = alloca %struct.BoundaryAlignment ; <%struct.BoundaryAlignment*> [#uses=7]
- %s_addr = alloca i16 ; <i16*> [#uses=1]
- %j_addr = alloca i32 ; <i32*> [#uses=2]
- %c_addr = alloca i8 ; <i8*> [#uses=2]
- %t_addr = alloca i16 ; <i16*> [#uses=2]
- %u_addr = alloca i16 ; <i16*> [#uses=2]
- %d_addr = alloca i8 ; <i8*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = bitcast %struct.BoundaryAlignment* %str_addr to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
- %tmp1 = getelementptr { i64, i64 }* %tmp, i32 0, i32 0 ; <i64*> [#uses=1]
- store i64 %str.0, i64* %tmp1
- %tmp2 = bitcast %struct.BoundaryAlignment* %str_addr to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
- %tmp3 = getelementptr { i64, i64 }* %tmp2, i32 0, i32 1 ; <i64*> [#uses=1]
- %bc = bitcast i64* %tmp3 to i8* ; <i8*> [#uses=2]
- %byte = trunc i64 %str.1 to i8 ; <i8> [#uses=1]
- store i8 %byte, i8* %bc
- %shft = lshr i64 %str.1, 8 ; <i64> [#uses=2]
- %Loc = getelementptr i8* %bc, i32 1 ; <i8*> [#uses=2]
- %byte4 = trunc i64 %shft to i8 ; <i8> [#uses=1]
- store i8 %byte4, i8* %Loc
- %shft5 = lshr i64 %shft, 8 ; <i64> [#uses=2]
- %Loc6 = getelementptr i8* %Loc, i32 1 ; <i8*> [#uses=2]
- %byte7 = trunc i64 %shft5 to i8 ; <i8> [#uses=1]
- store i8 %byte7, i8* %Loc6
- %shft8 = lshr i64 %shft5, 8 ; <i64> [#uses=2]
- %Loc9 = getelementptr i8* %Loc6, i32 1 ; <i8*> [#uses=2]
- %byte10 = trunc i64 %shft8 to i8 ; <i8> [#uses=1]
- store i8 %byte10, i8* %Loc9
- %shft11 = lshr i64 %shft8, 8 ; <i64> [#uses=0]
- %Loc12 = getelementptr i8* %Loc9, i32 1 ; <i8*> [#uses=0]
- store i16 %s, i16* %s_addr
- store i32 %j, i32* %j_addr
- store i8 %c, i8* %c_addr
- store i16 %t, i16* %t_addr
- store i16 %u, i16* %u_addr
- store i8 %d, i8* %d_addr
- %tmp13 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 0 ; <[3 x i8]*> [#uses=1]
- %tmp1314 = bitcast [3 x i8]* %tmp13 to i32* ; <i32*> [#uses=1]
- %tmp15 = load i32* %tmp1314, align 4 ; <i32> [#uses=1]
- %tmp16 = shl i32 %tmp15, 14 ; <i32> [#uses=1]
- %tmp17 = ashr i32 %tmp16, 23 ; <i32> [#uses=1]
- %tmp1718 = trunc i32 %tmp17 to i16 ; <i16> [#uses=1]
- %sextl = shl i16 %tmp1718, 7 ; <i16> [#uses=1]
- %sextr = ashr i16 %sextl, 7 ; <i16> [#uses=2]
- %sextl19 = shl i16 %sextr, 7 ; <i16> [#uses=1]
- %sextr20 = ashr i16 %sextl19, 7 ; <i16> [#uses=0]
- %sextl21 = shl i16 %sextr, 7 ; <i16> [#uses=1]
- %sextr22 = ashr i16 %sextl21, 7 ; <i16> [#uses=1]
- %sextr2223 = sext i16 %sextr22 to i32 ; <i32> [#uses=1]
- %tmp24 = load i32* %j_addr, align 4 ; <i32> [#uses=1]
- %tmp25 = icmp ne i32 %sextr2223, %tmp24 ; <i1> [#uses=1]
- %tmp2526 = zext i1 %tmp25 to i8 ; <i8> [#uses=1]
- %toBool = icmp ne i8 %tmp2526, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %bb, label %bb27
-
-bb: ; preds = %entry
- call void (...)* @abort( ) noreturn nounwind
- unreachable
-
-bb27: ; preds = %entry
- %tmp28 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 1 ; <i8*> [#uses=1]
- %tmp29 = load i8* %tmp28, align 4 ; <i8> [#uses=1]
- %tmp30 = load i8* %c_addr, align 1 ; <i8> [#uses=1]
- %tmp31 = icmp ne i8 %tmp29, %tmp30 ; <i1> [#uses=1]
- %tmp3132 = zext i1 %tmp31 to i8 ; <i8> [#uses=1]
- %toBool33 = icmp ne i8 %tmp3132, 0 ; <i1> [#uses=1]
- br i1 %toBool33, label %bb34, label %bb35
-
-bb34: ; preds = %bb27
- call void (...)* @abort( ) noreturn nounwind
- unreachable
-
-bb35: ; preds = %bb27
- %tmp36 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 2 ; <i16*> [#uses=1]
- %tmp37 = load i16* %tmp36, align 4 ; <i16> [#uses=1]
- %tmp38 = shl i16 %tmp37, 7 ; <i16> [#uses=1]
- %tmp39 = ashr i16 %tmp38, 7 ; <i16> [#uses=1]
- %sextl40 = shl i16 %tmp39, 7 ; <i16> [#uses=1]
- %sextr41 = ashr i16 %sextl40, 7 ; <i16> [#uses=2]
- %sextl42 = shl i16 %sextr41, 7 ; <i16> [#uses=1]
- %sextr43 = ashr i16 %sextl42, 7 ; <i16> [#uses=0]
- %sextl44 = shl i16 %sextr41, 7 ; <i16> [#uses=1]
- %sextr45 = ashr i16 %sextl44, 7 ; <i16> [#uses=1]
- %tmp46 = load i16* %t_addr, align 2 ; <i16> [#uses=1]
- %tmp47 = icmp ne i16 %sextr45, %tmp46 ; <i1> [#uses=1]
- %tmp4748 = zext i1 %tmp47 to i8 ; <i8> [#uses=1]
- %toBool49 = icmp ne i8 %tmp4748, 0 ; <i1> [#uses=1]
- br i1 %toBool49, label %bb50, label %bb51
-
-bb50: ; preds = %bb35
- call void (...)* @abort( ) noreturn nounwind
- unreachable
-
-bb51: ; preds = %bb35
- %tmp52 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 3 ; <i16*> [#uses=1]
- %tmp53 = load i16* %tmp52, align 4 ; <i16> [#uses=1]
- %tmp54 = shl i16 %tmp53, 7 ; <i16> [#uses=1]
- %tmp55 = ashr i16 %tmp54, 7 ; <i16> [#uses=1]
- %sextl56 = shl i16 %tmp55, 7 ; <i16> [#uses=1]
- %sextr57 = ashr i16 %sextl56, 7 ; <i16> [#uses=2]
- %sextl58 = shl i16 %sextr57, 7 ; <i16> [#uses=1]
- %sextr59 = ashr i16 %sextl58, 7 ; <i16> [#uses=0]
- %sextl60 = shl i16 %sextr57, 7 ; <i16> [#uses=1]
- %sextr61 = ashr i16 %sextl60, 7 ; <i16> [#uses=1]
- %tmp62 = load i16* %u_addr, align 2 ; <i16> [#uses=1]
- %tmp63 = icmp ne i16 %sextr61, %tmp62 ; <i1> [#uses=1]
- %tmp6364 = zext i1 %tmp63 to i8 ; <i8> [#uses=1]
- %toBool65 = icmp ne i8 %tmp6364, 0 ; <i1> [#uses=1]
- br i1 %toBool65, label %bb66, label %bb67
-
-bb66: ; preds = %bb51
- call void (...)* @abort( ) noreturn nounwind
- unreachable
-
-bb67: ; preds = %bb51
- %tmp68 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 4 ; <i8*> [#uses=1]
- %tmp69 = load i8* %tmp68, align 4 ; <i8> [#uses=1]
- %tmp70 = load i8* %d_addr, align 1 ; <i8> [#uses=1]
- %tmp71 = icmp ne i8 %tmp69, %tmp70 ; <i1> [#uses=1]
- %tmp7172 = zext i1 %tmp71 to i8 ; <i8> [#uses=1]
- %toBool73 = icmp ne i8 %tmp7172, 0 ; <i1> [#uses=1]
- br i1 %toBool73, label %bb74, label %bb75
-
-bb74: ; preds = %bb67
- call void (...)* @abort( ) noreturn nounwind
- unreachable
-
-bb75: ; preds = %bb67
- br label %return
-
-return: ; preds = %bb75
- ret void
-}
-
-declare void @abort(...) noreturn nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-21-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-05-21-CoalescerBug.ll
deleted file mode 100644
index 9cf50f4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-21-CoalescerBug.ll
+++ /dev/null
@@ -1,98 +0,0 @@
-; RUN: llc < %s -march=x86 -O0 -fast-isel=false | grep mov | count 5
-; PR2343
-
- %llvm.dbg.anchor.type = type { i32, i32 }
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.VEC_basic_block_base = type { i32, i32, [1 x %struct.basic_block_def*] }
- %struct.VEC_basic_block_gc = type { %struct.VEC_basic_block_base }
- %struct.VEC_edge_base = type { i32, i32, [1 x %struct.edge_def*] }
- %struct.VEC_edge_gc = type { %struct.VEC_edge_base }
- %struct.VEC_rtx_base = type { i32, i32, [1 x %struct.rtx_def*] }
- %struct.VEC_rtx_gc = type { %struct.VEC_rtx_base }
- %struct.VEC_temp_slot_p_base = type { i32, i32, [1 x %struct.temp_slot*] }
- %struct.VEC_temp_slot_p_gc = type { %struct.VEC_temp_slot_p_base }
- %struct.VEC_tree_base = type { i32, i32, [1 x %struct.tree_node*] }
- %struct.VEC_tree_gc = type { %struct.VEC_tree_base }
- %struct.__sbuf = type { i8*, i32 }
- %struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
- %struct.basic_block_def = type { %struct.tree_node*, %struct.VEC_edge_gc*, %struct.VEC_edge_gc*, i8*, %struct.loop*, [2 x %struct.et_node*], %struct.basic_block_def*, %struct.basic_block_def*, %struct.basic_block_il_dependent, %struct.tree_node*, %struct.edge_prediction*, i64, i32, i32, i32, i32 }
- %struct.basic_block_il_dependent = type { %struct.rtl_bb_info* }
- %struct.bitmap_element_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, [4 x i32] }
- %struct.bitmap_head_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, %struct.bitmap_obstack* }
- %struct.bitmap_obstack = type { %struct.bitmap_element_def*, %struct.bitmap_head_def*, %struct.obstack }
- %struct.block_symbol = type { [3 x %struct.cfg_stats_d], %struct.object_block*, i64 }
- %struct.cfg_stats_d = type { i32 }
- %struct.control_flow_graph = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.VEC_basic_block_gc*, i32, i32, i32, %struct.VEC_basic_block_gc*, i32 }
- %struct.def_optype_d = type { %struct.def_optype_d*, %struct.tree_node** }
- %struct.edge_def = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.edge_def_insns, i8*, %struct.__sbuf*, i32, i32, i64, i32 }
- %struct.edge_def_insns = type { %struct.rtx_def* }
- %struct.edge_prediction = type { %struct.edge_prediction*, %struct.edge_def*, i32, i32 }
- %struct.eh_status = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.__sbuf, i32, i8*, %struct.rtx_def** }
- %struct.et_node = type opaque
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.control_flow_graph*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.VEC_temp_slot_p_gc*, %struct.temp_slot*, %struct.var_refs_queue*, i32, i32, i32, i32, %struct.machine_function*, i32, i32, %struct.language_function*, %struct.htab*, %struct.rtx_def*, i32, i32, i32, %struct.__sbuf, %struct.VEC_tree_gc*, %struct.tree_node*, i8*, i8*, i8*, i8*, i8*, %struct.tree_node*, i8, i8, i8, i8, i8, i8 }
- %struct.htab = type { i32 (i8*)*, i32 (i8*, i8*)*, void (i8*)*, i8**, i32, i32, i32, i32, i32, i8* (i32, i32)*, void (i8*)*, i8*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i32 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type opaque
- %struct.language_function = type opaque
- %struct.loop = type { i32, %struct.basic_block_def*, %struct.basic_block_def*, %llvm.dbg.anchor.type, i32, i32, i32, i32, %struct.loop**, i32, %struct.loop*, %struct.loop*, %struct.loop*, %struct.loop*, i8*, %struct.tree_node*, %struct.tree_node*, %struct.nb_iter_bound*, %struct.edge_def*, i32 }
- %struct.machine_function = type opaque
- %struct.maydef_optype_d = type { %struct.maydef_optype_d*, %struct.tree_node*, %struct.tree_node*, %struct.ssa_use_operand_d }
- %struct.nb_iter_bound = type { %struct.tree_node*, %struct.tree_node*, %struct.nb_iter_bound* }
- %struct.object_block = type { %struct.section*, i32, i64, %struct.VEC_rtx_gc*, %struct.VEC_rtx_gc* }
- %struct.obstack = type { i32, %struct._obstack_chunk*, i8*, i8*, i8*, i32, i32, %struct._obstack_chunk* (i8*, i32)*, void (i8*, %struct._obstack_chunk*)*, i8*, i8 }
- %struct.rtl_bb_info = type { %struct.rtx_def*, %struct.rtx_def*, %struct.bitmap_head_def*, %struct.bitmap_head_def*, %struct.rtx_def*, %struct.rtx_def*, i32 }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.section = type { %struct.unnamed_section }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.ssa_use_operand_d = type { %struct.ssa_use_operand_d*, %struct.ssa_use_operand_d*, %struct.tree_node*, %struct.tree_node** }
- %struct.stmt_ann_d = type { %struct.tree_ann_common_d, i8, %struct.basic_block_def*, %struct.stmt_operands_d, %struct.bitmap_head_def*, i32, i8* }
- %struct.stmt_operands_d = type { %struct.def_optype_d*, %struct.use_optype_d*, %struct.maydef_optype_d*, %struct.vuse_optype_d*, %struct.maydef_optype_d* }
- %struct.temp_slot = type opaque
- %struct.tree_ann_common_d = type { i32, i8*, %struct.tree_node* }
- %struct.tree_ann_d = type { %struct.stmt_ann_d }
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_decl_common = type { %struct.tree_decl_minimal, %struct.tree_node*, i8, i8, i8, i8, i8, i32, %struct.tree_decl_u1, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_minimal = type { %struct.tree_common, %struct.__sbuf, i32, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_decl_non_common = type { %struct.tree_decl_with_vis, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_decl_u1 = type { i64 }
- %struct.tree_decl_with_rtl = type { %struct.tree_decl_common, %struct.rtx_def*, i32 }
- %struct.tree_decl_with_vis = type { %struct.tree_decl_with_rtl, %struct.tree_node*, %struct.tree_node*, i8, i8, i8 }
- %struct.tree_function_decl = type { %struct.tree_decl_non_common, i8, i8, i64, %struct.function* }
- %struct.tree_node = type { %struct.tree_function_decl }
- %struct.u = type { %struct.block_symbol }
- %struct.unnamed_section = type { %struct.cfg_stats_d, void (i8*)*, i8*, %struct.section* }
- %struct.use_optype_d = type { %struct.use_optype_d*, %struct.ssa_use_operand_d }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %struct.vuse_optype_d = type { %struct.vuse_optype_d*, %struct.tree_node*, %struct.ssa_use_operand_d }
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (%struct.edge_def* (%struct.edge_def*, %struct.basic_block_def*)* @tree_redirect_edge_and_branch to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define %struct.edge_def* @tree_redirect_edge_and_branch(%struct.edge_def* %e1, %struct.basic_block_def* %dest2) nounwind {
-entry:
- br label %bb497
-
-bb483: ; preds = %bb497
- %tmp496 = load %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
- br label %bb497
-
-bb497: ; preds = %bb483, %entry
- %cases.0 = phi %struct.tree_node* [ %tmp496, %bb483 ], [ null, %entry ] ; <%struct.tree_node*> [#uses=1]
- %last.0 = phi %struct.tree_node* [ %cases.0, %bb483 ], [ undef, %entry ] ; <%struct.tree_node*> [#uses=1]
- %foo = phi i1 [ 0, %bb483 ], [ 1, %entry ]
- br i1 %foo, label %bb483, label %bb502
-
-bb502: ; preds = %bb497
- br i1 %foo, label %bb507, label %bb841
-
-bb507: ; preds = %bb502
- %tmp517 = getelementptr %struct.tree_node* %last.0, i32 0, i32 0 ; <%struct.tree_function_decl*> [#uses=1]
- %tmp517518 = bitcast %struct.tree_function_decl* %tmp517 to %struct.tree_common* ; <%struct.tree_common*> [#uses=1]
- %tmp519 = getelementptr %struct.tree_common* %tmp517518, i32 0, i32 0 ; <%struct.tree_node**> [#uses=1]
- store %struct.tree_node* null, %struct.tree_node** %tmp519, align 4
- br label %bb841
-
-bb841: ; preds = %bb507, %bb502
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll
deleted file mode 100644
index 19a7354..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movups | count 2
-
-define void @a(<4 x float>* %x) nounwind {
-entry:
- %tmp2 = load <4 x float>* %x, align 1
- %inv = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %tmp2)
- store <4 x float> %inv, <4 x float>* %x, align 1
- ret void
-}
-
-declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-28-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-05-28-CoalescerBug.ll
deleted file mode 100644
index 32bf8d4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-28-CoalescerBug.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu
-; PR2289
-
-define void @_ada_ca11001() {
-entry:
- %tmp59 = call i16 @ca11001_0__cartesian_assign( i8 zeroext 0, i8 zeroext 0, i16 undef ) ; <i16> [#uses=0]
- unreachable
-}
-
-declare i16 @ca11001_0__cartesian_assign(i8 zeroext , i8 zeroext , i16)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll
deleted file mode 100644
index f1a19ec..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -regalloc=local
-
- at _ZTVN10Evaluation10GridOutputILi3EEE = external constant [5 x i32 (...)*] ; <[5 x i32 (...)*]*> [#uses=1]
-
-declare i8* @llvm.eh.exception() nounwind
-
-declare i8* @_Znwm(i32)
-
-declare i8* @__cxa_begin_catch(i8*) nounwind
-
-define i32 @main(i32 %argc, i8** %argv) {
-entry:
- br i1 false, label %bb37, label %bb34
-
-bb34: ; preds = %entry
- ret i32 1
-
-bb37: ; preds = %entry
- %tmp12.i.i.i.i.i66 = invoke i8* @_Znwm( i32 12 )
- to label %tmp12.i.i.i.i.i.noexc65 unwind label %lpad243 ; <i8*> [#uses=0]
-
-tmp12.i.i.i.i.i.noexc65: ; preds = %bb37
- unreachable
-
-lpad243: ; preds = %bb37
- %eh_ptr244 = call i8* @llvm.eh.exception( ) ; <i8*> [#uses=1]
- store i32 (...)** getelementptr ([5 x i32 (...)*]* @_ZTVN10Evaluation10GridOutputILi3EEE, i32 0, i32 2), i32 (...)*** null, align 8
- %tmp133 = call i8* @__cxa_begin_catch( i8* %eh_ptr244 ) nounwind ; <i8*> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-04-MemCpyLoweringBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-06-04-MemCpyLoweringBug.ll
deleted file mode 100644
index 236b7cd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-04-MemCpyLoweringBug.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 -disable-fp-elim | grep subl | grep 24
-
- %struct.argument_t = type { i8*, %struct.argument_t*, i32, %struct.ipc_type_t*, i32, void (...)*, void (...)*, void (...)*, void (...)*, void (...)*, i8*, i8*, i8*, i8*, i8*, i32, i32, i32, %struct.routine*, %struct.argument_t*, %struct.argument_t*, %struct.argument_t*, %struct.argument_t*, %struct.argument_t*, %struct.argument_t*, %struct.argument_t*, i32, i32, i32, i32, i32, i32 }
- %struct.ipc_type_t = type { i8*, %struct.ipc_type_t*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i32, i32, i32, i32, i32, i32, %struct.ipc_type_t*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8* }
- %struct.routine = type opaque
-@"\01LC" = external constant [11 x i8] ; <[11 x i8]*> [#uses=1]
-
-define i8* @InArgMsgField(%struct.argument_t* %arg, i8* %str) nounwind {
-entry:
- %who = alloca [20 x i8] ; <[20 x i8]*> [#uses=1]
- %who1 = getelementptr [20 x i8]* %who, i32 0, i32 0 ; <i8*> [#uses=2]
- call void @llvm.memset.i32( i8* %who1, i8 0, i32 20, i32 1 )
- call void @llvm.memcpy.i32( i8* %who1, i8* getelementptr ([11 x i8]* @"\01LC", i32 0, i32 0), i32 11, i32 1 )
- unreachable
-}
-
-declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
deleted file mode 100644
index 90af387..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep movsd
-; RUN: llc < %s -march=x86 | grep movw
-; RUN: llc < %s -march=x86 | grep addw
-; These transforms are turned off for volatile loads and stores.
-; Check that they weren't turned off for all loads and stores!
-
- at atomic = global double 0.000000e+00 ; <double*> [#uses=1]
- at atomic2 = global double 0.000000e+00 ; <double*> [#uses=1]
- at ioport = global i32 0 ; <i32*> [#uses=1]
- at ioport2 = global i32 0 ; <i32*> [#uses=1]
-
-define i16 @f(i64 %x) {
- %b = bitcast i64 %x to double ; <double> [#uses=1]
- store double %b, double* @atomic
- store double 0.000000e+00, double* @atomic2
- %l = load i32* @ioport ; <i32> [#uses=1]
- %t = trunc i32 %l to i16 ; <i16> [#uses=1]
- %l2 = load i32* @ioport2 ; <i32> [#uses=1]
- %tmp = lshr i32 %l2, 16 ; <i32> [#uses=1]
- %t2 = trunc i32 %tmp to i16 ; <i16> [#uses=1]
- %f = add i16 %t, %t2 ; <i16> [#uses=1]
- ret i16 %f
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
deleted file mode 100644
index 500cd1f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd | count 5
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movl | count 2
-
- at atomic = global double 0.000000e+00 ; <double*> [#uses=1]
- at atomic2 = global double 0.000000e+00 ; <double*> [#uses=1]
- at anything = global i64 0 ; <i64*> [#uses=1]
- at ioport = global i32 0 ; <i32*> [#uses=2]
-
-define i16 @f(i64 %x, double %y) {
- %b = bitcast i64 %x to double ; <double> [#uses=1]
- volatile store double %b, double* @atomic ; one processor operation only
- volatile store double 0.000000e+00, double* @atomic2 ; one processor operation only
- %b2 = bitcast double %y to i64 ; <i64> [#uses=1]
- volatile store i64 %b2, i64* @anything ; may transform to store of double
- %l = volatile load i32* @ioport ; must not narrow
- %t = trunc i32 %l to i16 ; <i16> [#uses=1]
- %l2 = volatile load i32* @ioport ; must not narrow
- %tmp = lshr i32 %l2, 16 ; <i32> [#uses=1]
- %t2 = trunc i32 %tmp to i16 ; <i16> [#uses=1]
- %f = add i16 %t, %t2 ; <i16> [#uses=1]
- ret i16 %f
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-16-SubregsBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-06-16-SubregsBug.ll
deleted file mode 100644
index 4d4819a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-16-SubregsBug.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | grep mov | count 4
-
-define i16 @test(i16* %tmp179) nounwind {
- %tmp180 = load i16* %tmp179, align 2 ; <i16> [#uses=2]
- %tmp184 = and i16 %tmp180, -1024 ; <i16> [#uses=1]
- %tmp186 = icmp eq i16 %tmp184, -32768 ; <i1> [#uses=1]
- br i1 %tmp186, label %bb189, label %bb288
-
-bb189: ; preds = %0
- ret i16 %tmp180
-
-bb288: ; preds = %0
- ret i16 32
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-18-BadShuffle.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-06-18-BadShuffle.ll
deleted file mode 100644
index 66f9065..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-18-BadShuffle.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=i386 -mattr=+sse2 | grep pinsrw
-
-; Test to make sure we actually insert the bottom element of the vector
-define <8 x i16> @a(<8 x i16> %a) nounwind {
-entry:
- shufflevector <8 x i16> %a, <8 x i16> zeroinitializer, <8 x i32> < i32 0, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8 >
- %add = add <8 x i16> %0, %a
- ret <8 x i16> %add
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-25-VecISelBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-06-25-VecISelBug.ll
deleted file mode 100644
index 72d1907..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-06-25-VecISelBug.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep pslldq
-
-define void @t() nounwind {
-entry:
- %tmp1 = shufflevector <4 x float> zeroinitializer, <4 x float> < float 0.000000e+00, float 1.000000e+00, float 0.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 4, i32 5 >
- %tmp2 = insertelement <4 x float> %tmp1, float 1.000000e+00, i32 3
- store <4 x float> %tmp2, <4 x float>* null, align 16
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll
deleted file mode 100644
index 46341fc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll
+++ /dev/null
@@ -1,99 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9
-
- %struct.ogg_stream_state = type { i8*, i32, i32, i32, i32*, i64*, i32, i32, i32, i32, [282 x i8], i32, i32, i32, i32, i32, i64, i64 }
- %struct.res_state = type { i32, i32, i32, i32, float*, float*, i32, i32 }
- %struct.vorbis_comment = type { i8**, i32*, i32, i8* }
-
-declare i32 @strlen(i8*) nounwind readonly
-
-define i32 @res_init(%struct.res_state* %state, i32 %channels, i32 %outfreq, i32 %infreq, i32 %op1, ...) nounwind {
-entry:
- br i1 false, label %bb95, label %bb
-
-bb: ; preds = %entry
- br i1 false, label %bb95, label %bb24
-
-bb24: ; preds = %bb
- br i1 false, label %bb40.preheader, label %bb26
-
-bb26: ; preds = %bb24
- ret i32 -1
-
-bb40.preheader: ; preds = %bb24
- br i1 false, label %bb39, label %bb49.outer
-
-bb39: ; preds = %bb39, %bb40.preheader
- shl i32 0, 1 ; <i32>:0 [#uses=0]
- br i1 false, label %bb39, label %bb49.outer
-
-bb49.outer: ; preds = %bb39, %bb40.preheader
- getelementptr %struct.res_state* %state, i32 0, i32 3 ; <i32*>:1 [#uses=0]
- getelementptr %struct.res_state* %state, i32 0, i32 7 ; <i32*>:2 [#uses=0]
- %base10.1 = select i1 false, float* null, float* null ; <float*> [#uses=1]
- br label %bb74
-
-bb69: ; preds = %bb74
- br label %bb71
-
-bb71: ; preds = %bb74, %bb69
- store float 0.000000e+00, float* null, align 4
- add i32 0, 1 ; <i32>:3 [#uses=1]
- %indvar.next137 = add i32 %indvar136, 1 ; <i32> [#uses=1]
- br i1 false, label %bb74, label %bb73
-
-bb73: ; preds = %bb71
- %.rec = add i32 %base10.2.ph.rec, 1 ; <i32> [#uses=2]
- getelementptr float* %base10.1, i32 %.rec ; <float*>:4 [#uses=1]
- br label %bb74
-
-bb74: ; preds = %bb73, %bb71, %bb49.outer
- %N13.1.ph = phi i32 [ 0, %bb49.outer ], [ 0, %bb73 ], [ %N13.1.ph, %bb71 ] ; <i32> [#uses=1]
- %dest12.2.ph = phi float* [ null, %bb49.outer ], [ %4, %bb73 ], [ %dest12.2.ph, %bb71 ] ; <float*> [#uses=1]
- %x8.0.ph = phi i32 [ 0, %bb49.outer ], [ %3, %bb73 ], [ %x8.0.ph, %bb71 ] ; <i32> [#uses=1]
- %base10.2.ph.rec = phi i32 [ 0, %bb49.outer ], [ %.rec, %bb73 ], [ %base10.2.ph.rec, %bb71 ] ; <i32> [#uses=2]
- %indvar136 = phi i32 [ %indvar.next137, %bb71 ], [ 0, %bb73 ], [ 0, %bb49.outer ] ; <i32> [#uses=1]
- br i1 false, label %bb71, label %bb69
-
-bb95: ; preds = %bb, %entry
- ret i32 -1
-}
-
-define i32 @read_resampled(i8* %d, float** %buffer, i32 %samples) nounwind {
-entry:
- br i1 false, label %bb17.preheader, label %bb30
-
-bb17.preheader: ; preds = %entry
- load i32* null, align 4 ; <i32>:0 [#uses=0]
- br label %bb16
-
-bb16: ; preds = %bb16, %bb17.preheader
- %i1.036 = phi i32 [ 0, %bb17.preheader ], [ %1, %bb16 ] ; <i32> [#uses=1]
- add i32 %i1.036, 1 ; <i32>:1 [#uses=2]
- icmp ult i32 %1, 0 ; <i1>:2 [#uses=0]
- br label %bb16
-
-bb30: ; preds = %entry
- ret i32 0
-}
-
-define i32 @ogg_stream_reset_serialno(%struct.ogg_stream_state* %os, i32 %serialno) nounwind {
-entry:
- unreachable
-}
-
-define void @vorbis_lsp_to_curve(float* %curve, i32* %map, i32 %n, i32 %ln, float* %lsp, i32 %m, float %amp, float %ampoffset) nounwind {
-entry:
- unreachable
-}
-
-define i32 @vorbis_comment_query_count(%struct.vorbis_comment* %vc, i8* %tag) nounwind {
-entry:
- %strlen = call i32 @strlen( i8* null ) ; <i32> [#uses=1]
- %endptr = getelementptr i8* null, i32 %strlen ; <i8*> [#uses=0]
- unreachable
-}
-
-define fastcc i32 @push(%struct.res_state* %state, float* %pool, i32* %poolfill, i32* %offset, float* %dest, i32 %dststep, float* %source, i32 %srcstep, i32 %srclen) nounwind {
-entry:
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-09-ELFSectionAttributes.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-07-09-ELFSectionAttributes.ll
deleted file mode 100644
index 1a786ef..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-09-ELFSectionAttributes.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s | grep ax
-; PR2024
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
-
-define i32 @foo(i32 %A, i32 %B) nounwind section ".init.text" {
-entry:
- tail call i32 @bar( i32 %A, i32 %B ) nounwind ; <i32>:0 [#uses=1]
- ret i32 %0
-}
-
-declare i32 @bar(i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-11-SHLBy1.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-07-11-SHLBy1.ll
deleted file mode 100644
index ff2b05f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-11-SHLBy1.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=x86-64 -o - | not grep shr
-define i128 @sl(i128 %x) {
- %t = shl i128 %x, 1
- ret i128 %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-11-SpillerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-07-11-SpillerBug.ll
deleted file mode 100644
index 548b44d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-11-SpillerBug.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=static -disable-fp-elim -post-RA-scheduler=false -asm-verbose=0 | FileCheck %s
-; PR2536
-
-; CHECK: andl $65534, %
-; CHECK-NEXT: movl %
-; CHECK-NEXT: movl $17
-
- at g_5 = external global i16 ; <i16*> [#uses=2]
- at g_107 = external global i16 ; <i16*> [#uses=1]
- at g_229 = external global i32 ; <i32*> [#uses=1]
- at g_227 = external global i16 ; <i16*> [#uses=1]
-
-define i32 @func_54(i32 %p_55, i16 zeroext %p_56) nounwind {
-entry:
- load i16* @g_5, align 2 ; <i16>:0 [#uses=1]
- zext i16 %0 to i32 ; <i32>:1 [#uses=1]
- %.mask = and i32 %1, 65534 ; <i32> [#uses=1]
- icmp eq i32 %.mask, 0 ; <i1>:2 [#uses=1]
- load i32* @g_229, align 4 ; <i32>:3 [#uses=1]
- load i16* @g_227, align 2 ; <i16>:4 [#uses=1]
- icmp eq i16 %4, 0 ; <i1>:5 [#uses=1]
- load i16* @g_5, align 2 ; <i16>:6 [#uses=1]
- br label %bb
-
-bb: ; preds = %bb7.preheader, %entry
- %indvar4 = phi i32 [ 0, %entry ], [ %indvar.next5, %bb7.preheader ] ; <i32> [#uses=1]
- %p_56_addr.1.reg2mem.0 = phi i16 [ %p_56, %entry ], [ %p_56_addr.0, %bb7.preheader ] ; <i16> [#uses=2]
- br i1 %2, label %bb7.preheader, label %bb5
-
-bb5: ; preds = %bb
- store i16 %6, i16* @g_107, align 2
- br label %bb7.preheader
-
-bb7.preheader: ; preds = %bb5, %bb
- icmp eq i16 %p_56_addr.1.reg2mem.0, 0 ; <i1>:7 [#uses=1]
- %.0 = select i1 %7, i32 1, i32 %3 ; <i32> [#uses=1]
- urem i32 1, %.0 ; <i32>:8 [#uses=1]
- icmp eq i32 %8, 0 ; <i1>:9 [#uses=1]
- %.not = xor i1 %9, true ; <i1> [#uses=1]
- %.not1 = xor i1 %5, true ; <i1> [#uses=1]
- %brmerge = or i1 %.not, %.not1 ; <i1> [#uses=1]
- %iftmp.6.0 = select i1 %brmerge, i32 3, i32 0 ; <i32> [#uses=1]
- mul i32 %iftmp.6.0, %3 ; <i32>:10 [#uses=1]
- icmp eq i32 %10, 0 ; <i1>:11 [#uses=1]
- %p_56_addr.0 = select i1 %11, i16 %p_56_addr.1.reg2mem.0, i16 1 ; <i16> [#uses=1]
- %indvar.next5 = add i32 %indvar4, 1 ; <i32> [#uses=2]
- %exitcond6 = icmp eq i32 %indvar.next5, 17 ; <i1> [#uses=1]
- br i1 %exitcond6, label %bb25, label %bb
-
-bb25: ; preds = %bb7.preheader
- ret i32 1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-16-CoalescerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-07-16-CoalescerCrash.ll
deleted file mode 100644
index f56604b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-16-CoalescerCrash.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin
-
- %struct.SV = type { i8*, i64, i64 }
-@"\01LC25" = external constant [8 x i8] ; <[8 x i8]*> [#uses=1]
-
-declare void @Perl_sv_catpvf(%struct.SV*, i8*, ...) nounwind
-
-declare fastcc i64 @Perl_utf8n_to_uvuni(i8*, i64, i64*, i64) nounwind
-
-define fastcc i8* @Perl_pv_uni_display(%struct.SV* %dsv, i8* %spv, i64 %len, i64 %pvlim, i64 %flags) nounwind {
-entry:
- br i1 false, label %bb, label %bb40
-
-bb: ; preds = %entry
- tail call fastcc i64 @Perl_utf8n_to_uvuni( i8* null, i64 13, i64* null, i64 255 ) nounwind ; <i64>:0 [#uses=1]
- br i1 false, label %bb6, label %bb33
-
-bb6: ; preds = %bb
- br i1 false, label %bb30, label %bb31
-
-bb30: ; preds = %bb6
- unreachable
-
-bb31: ; preds = %bb6
- icmp eq i8 0, 0 ; <i1>:1 [#uses=0]
- br label %bb33
-
-bb33: ; preds = %bb31, %bb
- tail call void (%struct.SV*, i8*, ...)* @Perl_sv_catpvf( %struct.SV* %dsv, i8* getelementptr ([8 x i8]* @"\01LC25", i32 0, i64 0), i64 %0 ) nounwind
- unreachable
-
-bb40: ; preds = %entry
- ret i8* null
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-19-movups-spills.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-07-19-movups-spills.ll
deleted file mode 100644
index 98919ee..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-19-movups-spills.ll
+++ /dev/null
@@ -1,636 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux -realign-stack=1 -mattr=sse2 | grep movaps | count 75
-; RUN: llc < %s -mtriple=i686-pc-linux -realign-stack=0 -mattr=sse2 | grep movaps | count 1
-; PR2539
-
-external global <4 x float>, align 1 ; <<4 x float>*>:0 [#uses=2]
-external global <4 x float>, align 1 ; <<4 x float>*>:1 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:2 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:3 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:4 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:5 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:6 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:7 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:8 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:9 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:10 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:11 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:12 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:13 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:14 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:15 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:16 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:17 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:18 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:19 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:20 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:21 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:22 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:23 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:24 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:25 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:26 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:27 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:28 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:29 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:30 [#uses=1]
-external global <4 x float>, align 1 ; <<4 x float>*>:31 [#uses=1]
-
-declare void @abort()
-
-define void @""() {
- load <4 x float>* @0, align 1 ; <<4 x float>>:1 [#uses=2]
- load <4 x float>* @1, align 1 ; <<4 x float>>:2 [#uses=3]
- load <4 x float>* @2, align 1 ; <<4 x float>>:3 [#uses=4]
- load <4 x float>* @3, align 1 ; <<4 x float>>:4 [#uses=5]
- load <4 x float>* @4, align 1 ; <<4 x float>>:5 [#uses=6]
- load <4 x float>* @5, align 1 ; <<4 x float>>:6 [#uses=7]
- load <4 x float>* @6, align 1 ; <<4 x float>>:7 [#uses=8]
- load <4 x float>* @7, align 1 ; <<4 x float>>:8 [#uses=9]
- load <4 x float>* @8, align 1 ; <<4 x float>>:9 [#uses=10]
- load <4 x float>* @9, align 1 ; <<4 x float>>:10 [#uses=11]
- load <4 x float>* @10, align 1 ; <<4 x float>>:11 [#uses=12]
- load <4 x float>* @11, align 1 ; <<4 x float>>:12 [#uses=13]
- load <4 x float>* @12, align 1 ; <<4 x float>>:13 [#uses=14]
- load <4 x float>* @13, align 1 ; <<4 x float>>:14 [#uses=15]
- load <4 x float>* @14, align 1 ; <<4 x float>>:15 [#uses=16]
- load <4 x float>* @15, align 1 ; <<4 x float>>:16 [#uses=17]
- load <4 x float>* @16, align 1 ; <<4 x float>>:17 [#uses=18]
- load <4 x float>* @17, align 1 ; <<4 x float>>:18 [#uses=19]
- load <4 x float>* @18, align 1 ; <<4 x float>>:19 [#uses=20]
- load <4 x float>* @19, align 1 ; <<4 x float>>:20 [#uses=21]
- load <4 x float>* @20, align 1 ; <<4 x float>>:21 [#uses=22]
- load <4 x float>* @21, align 1 ; <<4 x float>>:22 [#uses=23]
- load <4 x float>* @22, align 1 ; <<4 x float>>:23 [#uses=24]
- load <4 x float>* @23, align 1 ; <<4 x float>>:24 [#uses=25]
- load <4 x float>* @24, align 1 ; <<4 x float>>:25 [#uses=26]
- load <4 x float>* @25, align 1 ; <<4 x float>>:26 [#uses=27]
- load <4 x float>* @26, align 1 ; <<4 x float>>:27 [#uses=28]
- load <4 x float>* @27, align 1 ; <<4 x float>>:28 [#uses=29]
- load <4 x float>* @28, align 1 ; <<4 x float>>:29 [#uses=30]
- load <4 x float>* @29, align 1 ; <<4 x float>>:30 [#uses=31]
- load <4 x float>* @30, align 1 ; <<4 x float>>:31 [#uses=32]
- load <4 x float>* @31, align 1 ; <<4 x float>>:32 [#uses=33]
- fmul <4 x float> %1, %1 ; <<4 x float>>:33 [#uses=1]
- fmul <4 x float> %33, %2 ; <<4 x float>>:34 [#uses=1]
- fmul <4 x float> %34, %3 ; <<4 x float>>:35 [#uses=1]
- fmul <4 x float> %35, %4 ; <<4 x float>>:36 [#uses=1]
- fmul <4 x float> %36, %5 ; <<4 x float>>:37 [#uses=1]
- fmul <4 x float> %37, %6 ; <<4 x float>>:38 [#uses=1]
- fmul <4 x float> %38, %7 ; <<4 x float>>:39 [#uses=1]
- fmul <4 x float> %39, %8 ; <<4 x float>>:40 [#uses=1]
- fmul <4 x float> %40, %9 ; <<4 x float>>:41 [#uses=1]
- fmul <4 x float> %41, %10 ; <<4 x float>>:42 [#uses=1]
- fmul <4 x float> %42, %11 ; <<4 x float>>:43 [#uses=1]
- fmul <4 x float> %43, %12 ; <<4 x float>>:44 [#uses=1]
- fmul <4 x float> %44, %13 ; <<4 x float>>:45 [#uses=1]
- fmul <4 x float> %45, %14 ; <<4 x float>>:46 [#uses=1]
- fmul <4 x float> %46, %15 ; <<4 x float>>:47 [#uses=1]
- fmul <4 x float> %47, %16 ; <<4 x float>>:48 [#uses=1]
- fmul <4 x float> %48, %17 ; <<4 x float>>:49 [#uses=1]
- fmul <4 x float> %49, %18 ; <<4 x float>>:50 [#uses=1]
- fmul <4 x float> %50, %19 ; <<4 x float>>:51 [#uses=1]
- fmul <4 x float> %51, %20 ; <<4 x float>>:52 [#uses=1]
- fmul <4 x float> %52, %21 ; <<4 x float>>:53 [#uses=1]
- fmul <4 x float> %53, %22 ; <<4 x float>>:54 [#uses=1]
- fmul <4 x float> %54, %23 ; <<4 x float>>:55 [#uses=1]
- fmul <4 x float> %55, %24 ; <<4 x float>>:56 [#uses=1]
- fmul <4 x float> %56, %25 ; <<4 x float>>:57 [#uses=1]
- fmul <4 x float> %57, %26 ; <<4 x float>>:58 [#uses=1]
- fmul <4 x float> %58, %27 ; <<4 x float>>:59 [#uses=1]
- fmul <4 x float> %59, %28 ; <<4 x float>>:60 [#uses=1]
- fmul <4 x float> %60, %29 ; <<4 x float>>:61 [#uses=1]
- fmul <4 x float> %61, %30 ; <<4 x float>>:62 [#uses=1]
- fmul <4 x float> %62, %31 ; <<4 x float>>:63 [#uses=1]
- fmul <4 x float> %63, %32 ; <<4 x float>>:64 [#uses=3]
- fmul <4 x float> %2, %2 ; <<4 x float>>:65 [#uses=1]
- fmul <4 x float> %65, %3 ; <<4 x float>>:66 [#uses=1]
- fmul <4 x float> %66, %4 ; <<4 x float>>:67 [#uses=1]
- fmul <4 x float> %67, %5 ; <<4 x float>>:68 [#uses=1]
- fmul <4 x float> %68, %6 ; <<4 x float>>:69 [#uses=1]
- fmul <4 x float> %69, %7 ; <<4 x float>>:70 [#uses=1]
- fmul <4 x float> %70, %8 ; <<4 x float>>:71 [#uses=1]
- fmul <4 x float> %71, %9 ; <<4 x float>>:72 [#uses=1]
- fmul <4 x float> %72, %10 ; <<4 x float>>:73 [#uses=1]
- fmul <4 x float> %73, %11 ; <<4 x float>>:74 [#uses=1]
- fmul <4 x float> %74, %12 ; <<4 x float>>:75 [#uses=1]
- fmul <4 x float> %75, %13 ; <<4 x float>>:76 [#uses=1]
- fmul <4 x float> %76, %14 ; <<4 x float>>:77 [#uses=1]
- fmul <4 x float> %77, %15 ; <<4 x float>>:78 [#uses=1]
- fmul <4 x float> %78, %16 ; <<4 x float>>:79 [#uses=1]
- fmul <4 x float> %79, %17 ; <<4 x float>>:80 [#uses=1]
- fmul <4 x float> %80, %18 ; <<4 x float>>:81 [#uses=1]
- fmul <4 x float> %81, %19 ; <<4 x float>>:82 [#uses=1]
- fmul <4 x float> %82, %20 ; <<4 x float>>:83 [#uses=1]
- fmul <4 x float> %83, %21 ; <<4 x float>>:84 [#uses=1]
- fmul <4 x float> %84, %22 ; <<4 x float>>:85 [#uses=1]
- fmul <4 x float> %85, %23 ; <<4 x float>>:86 [#uses=1]
- fmul <4 x float> %86, %24 ; <<4 x float>>:87 [#uses=1]
- fmul <4 x float> %87, %25 ; <<4 x float>>:88 [#uses=1]
- fmul <4 x float> %88, %26 ; <<4 x float>>:89 [#uses=1]
- fmul <4 x float> %89, %27 ; <<4 x float>>:90 [#uses=1]
- fmul <4 x float> %90, %28 ; <<4 x float>>:91 [#uses=1]
- fmul <4 x float> %91, %29 ; <<4 x float>>:92 [#uses=1]
- fmul <4 x float> %92, %30 ; <<4 x float>>:93 [#uses=1]
- fmul <4 x float> %93, %31 ; <<4 x float>>:94 [#uses=1]
- fmul <4 x float> %94, %32 ; <<4 x float>>:95 [#uses=1]
- fmul <4 x float> %3, %3 ; <<4 x float>>:96 [#uses=1]
- fmul <4 x float> %96, %4 ; <<4 x float>>:97 [#uses=1]
- fmul <4 x float> %97, %5 ; <<4 x float>>:98 [#uses=1]
- fmul <4 x float> %98, %6 ; <<4 x float>>:99 [#uses=1]
- fmul <4 x float> %99, %7 ; <<4 x float>>:100 [#uses=1]
- fmul <4 x float> %100, %8 ; <<4 x float>>:101 [#uses=1]
- fmul <4 x float> %101, %9 ; <<4 x float>>:102 [#uses=1]
- fmul <4 x float> %102, %10 ; <<4 x float>>:103 [#uses=1]
- fmul <4 x float> %103, %11 ; <<4 x float>>:104 [#uses=1]
- fmul <4 x float> %104, %12 ; <<4 x float>>:105 [#uses=1]
- fmul <4 x float> %105, %13 ; <<4 x float>>:106 [#uses=1]
- fmul <4 x float> %106, %14 ; <<4 x float>>:107 [#uses=1]
- fmul <4 x float> %107, %15 ; <<4 x float>>:108 [#uses=1]
- fmul <4 x float> %108, %16 ; <<4 x float>>:109 [#uses=1]
- fmul <4 x float> %109, %17 ; <<4 x float>>:110 [#uses=1]
- fmul <4 x float> %110, %18 ; <<4 x float>>:111 [#uses=1]
- fmul <4 x float> %111, %19 ; <<4 x float>>:112 [#uses=1]
- fmul <4 x float> %112, %20 ; <<4 x float>>:113 [#uses=1]
- fmul <4 x float> %113, %21 ; <<4 x float>>:114 [#uses=1]
- fmul <4 x float> %114, %22 ; <<4 x float>>:115 [#uses=1]
- fmul <4 x float> %115, %23 ; <<4 x float>>:116 [#uses=1]
- fmul <4 x float> %116, %24 ; <<4 x float>>:117 [#uses=1]
- fmul <4 x float> %117, %25 ; <<4 x float>>:118 [#uses=1]
- fmul <4 x float> %118, %26 ; <<4 x float>>:119 [#uses=1]
- fmul <4 x float> %119, %27 ; <<4 x float>>:120 [#uses=1]
- fmul <4 x float> %120, %28 ; <<4 x float>>:121 [#uses=1]
- fmul <4 x float> %121, %29 ; <<4 x float>>:122 [#uses=1]
- fmul <4 x float> %122, %30 ; <<4 x float>>:123 [#uses=1]
- fmul <4 x float> %123, %31 ; <<4 x float>>:124 [#uses=1]
- fmul <4 x float> %124, %32 ; <<4 x float>>:125 [#uses=1]
- fmul <4 x float> %4, %4 ; <<4 x float>>:126 [#uses=1]
- fmul <4 x float> %126, %5 ; <<4 x float>>:127 [#uses=1]
- fmul <4 x float> %127, %6 ; <<4 x float>>:128 [#uses=1]
- fmul <4 x float> %128, %7 ; <<4 x float>>:129 [#uses=1]
- fmul <4 x float> %129, %8 ; <<4 x float>>:130 [#uses=1]
- fmul <4 x float> %130, %9 ; <<4 x float>>:131 [#uses=1]
- fmul <4 x float> %131, %10 ; <<4 x float>>:132 [#uses=1]
- fmul <4 x float> %132, %11 ; <<4 x float>>:133 [#uses=1]
- fmul <4 x float> %133, %12 ; <<4 x float>>:134 [#uses=1]
- fmul <4 x float> %134, %13 ; <<4 x float>>:135 [#uses=1]
- fmul <4 x float> %135, %14 ; <<4 x float>>:136 [#uses=1]
- fmul <4 x float> %136, %15 ; <<4 x float>>:137 [#uses=1]
- fmul <4 x float> %137, %16 ; <<4 x float>>:138 [#uses=1]
- fmul <4 x float> %138, %17 ; <<4 x float>>:139 [#uses=1]
- fmul <4 x float> %139, %18 ; <<4 x float>>:140 [#uses=1]
- fmul <4 x float> %140, %19 ; <<4 x float>>:141 [#uses=1]
- fmul <4 x float> %141, %20 ; <<4 x float>>:142 [#uses=1]
- fmul <4 x float> %142, %21 ; <<4 x float>>:143 [#uses=1]
- fmul <4 x float> %143, %22 ; <<4 x float>>:144 [#uses=1]
- fmul <4 x float> %144, %23 ; <<4 x float>>:145 [#uses=1]
- fmul <4 x float> %145, %24 ; <<4 x float>>:146 [#uses=1]
- fmul <4 x float> %146, %25 ; <<4 x float>>:147 [#uses=1]
- fmul <4 x float> %147, %26 ; <<4 x float>>:148 [#uses=1]
- fmul <4 x float> %148, %27 ; <<4 x float>>:149 [#uses=1]
- fmul <4 x float> %149, %28 ; <<4 x float>>:150 [#uses=1]
- fmul <4 x float> %150, %29 ; <<4 x float>>:151 [#uses=1]
- fmul <4 x float> %151, %30 ; <<4 x float>>:152 [#uses=1]
- fmul <4 x float> %152, %31 ; <<4 x float>>:153 [#uses=1]
- fmul <4 x float> %153, %32 ; <<4 x float>>:154 [#uses=1]
- fmul <4 x float> %5, %5 ; <<4 x float>>:155 [#uses=1]
- fmul <4 x float> %155, %6 ; <<4 x float>>:156 [#uses=1]
- fmul <4 x float> %156, %7 ; <<4 x float>>:157 [#uses=1]
- fmul <4 x float> %157, %8 ; <<4 x float>>:158 [#uses=1]
- fmul <4 x float> %158, %9 ; <<4 x float>>:159 [#uses=1]
- fmul <4 x float> %159, %10 ; <<4 x float>>:160 [#uses=1]
- fmul <4 x float> %160, %11 ; <<4 x float>>:161 [#uses=1]
- fmul <4 x float> %161, %12 ; <<4 x float>>:162 [#uses=1]
- fmul <4 x float> %162, %13 ; <<4 x float>>:163 [#uses=1]
- fmul <4 x float> %163, %14 ; <<4 x float>>:164 [#uses=1]
- fmul <4 x float> %164, %15 ; <<4 x float>>:165 [#uses=1]
- fmul <4 x float> %165, %16 ; <<4 x float>>:166 [#uses=1]
- fmul <4 x float> %166, %17 ; <<4 x float>>:167 [#uses=1]
- fmul <4 x float> %167, %18 ; <<4 x float>>:168 [#uses=1]
- fmul <4 x float> %168, %19 ; <<4 x float>>:169 [#uses=1]
- fmul <4 x float> %169, %20 ; <<4 x float>>:170 [#uses=1]
- fmul <4 x float> %170, %21 ; <<4 x float>>:171 [#uses=1]
- fmul <4 x float> %171, %22 ; <<4 x float>>:172 [#uses=1]
- fmul <4 x float> %172, %23 ; <<4 x float>>:173 [#uses=1]
- fmul <4 x float> %173, %24 ; <<4 x float>>:174 [#uses=1]
- fmul <4 x float> %174, %25 ; <<4 x float>>:175 [#uses=1]
- fmul <4 x float> %175, %26 ; <<4 x float>>:176 [#uses=1]
- fmul <4 x float> %176, %27 ; <<4 x float>>:177 [#uses=1]
- fmul <4 x float> %177, %28 ; <<4 x float>>:178 [#uses=1]
- fmul <4 x float> %178, %29 ; <<4 x float>>:179 [#uses=1]
- fmul <4 x float> %179, %30 ; <<4 x float>>:180 [#uses=1]
- fmul <4 x float> %180, %31 ; <<4 x float>>:181 [#uses=1]
- fmul <4 x float> %181, %32 ; <<4 x float>>:182 [#uses=1]
- fmul <4 x float> %6, %6 ; <<4 x float>>:183 [#uses=1]
- fmul <4 x float> %183, %7 ; <<4 x float>>:184 [#uses=1]
- fmul <4 x float> %184, %8 ; <<4 x float>>:185 [#uses=1]
- fmul <4 x float> %185, %9 ; <<4 x float>>:186 [#uses=1]
- fmul <4 x float> %186, %10 ; <<4 x float>>:187 [#uses=1]
- fmul <4 x float> %187, %11 ; <<4 x float>>:188 [#uses=1]
- fmul <4 x float> %188, %12 ; <<4 x float>>:189 [#uses=1]
- fmul <4 x float> %189, %13 ; <<4 x float>>:190 [#uses=1]
- fmul <4 x float> %190, %14 ; <<4 x float>>:191 [#uses=1]
- fmul <4 x float> %191, %15 ; <<4 x float>>:192 [#uses=1]
- fmul <4 x float> %192, %16 ; <<4 x float>>:193 [#uses=1]
- fmul <4 x float> %193, %17 ; <<4 x float>>:194 [#uses=1]
- fmul <4 x float> %194, %18 ; <<4 x float>>:195 [#uses=1]
- fmul <4 x float> %195, %19 ; <<4 x float>>:196 [#uses=1]
- fmul <4 x float> %196, %20 ; <<4 x float>>:197 [#uses=1]
- fmul <4 x float> %197, %21 ; <<4 x float>>:198 [#uses=1]
- fmul <4 x float> %198, %22 ; <<4 x float>>:199 [#uses=1]
- fmul <4 x float> %199, %23 ; <<4 x float>>:200 [#uses=1]
- fmul <4 x float> %200, %24 ; <<4 x float>>:201 [#uses=1]
- fmul <4 x float> %201, %25 ; <<4 x float>>:202 [#uses=1]
- fmul <4 x float> %202, %26 ; <<4 x float>>:203 [#uses=1]
- fmul <4 x float> %203, %27 ; <<4 x float>>:204 [#uses=1]
- fmul <4 x float> %204, %28 ; <<4 x float>>:205 [#uses=1]
- fmul <4 x float> %205, %29 ; <<4 x float>>:206 [#uses=1]
- fmul <4 x float> %206, %30 ; <<4 x float>>:207 [#uses=1]
- fmul <4 x float> %207, %31 ; <<4 x float>>:208 [#uses=1]
- fmul <4 x float> %208, %32 ; <<4 x float>>:209 [#uses=1]
- fmul <4 x float> %7, %7 ; <<4 x float>>:210 [#uses=1]
- fmul <4 x float> %210, %8 ; <<4 x float>>:211 [#uses=1]
- fmul <4 x float> %211, %9 ; <<4 x float>>:212 [#uses=1]
- fmul <4 x float> %212, %10 ; <<4 x float>>:213 [#uses=1]
- fmul <4 x float> %213, %11 ; <<4 x float>>:214 [#uses=1]
- fmul <4 x float> %214, %12 ; <<4 x float>>:215 [#uses=1]
- fmul <4 x float> %215, %13 ; <<4 x float>>:216 [#uses=1]
- fmul <4 x float> %216, %14 ; <<4 x float>>:217 [#uses=1]
- fmul <4 x float> %217, %15 ; <<4 x float>>:218 [#uses=1]
- fmul <4 x float> %218, %16 ; <<4 x float>>:219 [#uses=1]
- fmul <4 x float> %219, %17 ; <<4 x float>>:220 [#uses=1]
- fmul <4 x float> %220, %18 ; <<4 x float>>:221 [#uses=1]
- fmul <4 x float> %221, %19 ; <<4 x float>>:222 [#uses=1]
- fmul <4 x float> %222, %20 ; <<4 x float>>:223 [#uses=1]
- fmul <4 x float> %223, %21 ; <<4 x float>>:224 [#uses=1]
- fmul <4 x float> %224, %22 ; <<4 x float>>:225 [#uses=1]
- fmul <4 x float> %225, %23 ; <<4 x float>>:226 [#uses=1]
- fmul <4 x float> %226, %24 ; <<4 x float>>:227 [#uses=1]
- fmul <4 x float> %227, %25 ; <<4 x float>>:228 [#uses=1]
- fmul <4 x float> %228, %26 ; <<4 x float>>:229 [#uses=1]
- fmul <4 x float> %229, %27 ; <<4 x float>>:230 [#uses=1]
- fmul <4 x float> %230, %28 ; <<4 x float>>:231 [#uses=1]
- fmul <4 x float> %231, %29 ; <<4 x float>>:232 [#uses=1]
- fmul <4 x float> %232, %30 ; <<4 x float>>:233 [#uses=1]
- fmul <4 x float> %233, %31 ; <<4 x float>>:234 [#uses=1]
- fmul <4 x float> %234, %32 ; <<4 x float>>:235 [#uses=1]
- fmul <4 x float> %8, %8 ; <<4 x float>>:236 [#uses=1]
- fmul <4 x float> %236, %9 ; <<4 x float>>:237 [#uses=1]
- fmul <4 x float> %237, %10 ; <<4 x float>>:238 [#uses=1]
- fmul <4 x float> %238, %11 ; <<4 x float>>:239 [#uses=1]
- fmul <4 x float> %239, %12 ; <<4 x float>>:240 [#uses=1]
- fmul <4 x float> %240, %13 ; <<4 x float>>:241 [#uses=1]
- fmul <4 x float> %241, %14 ; <<4 x float>>:242 [#uses=1]
- fmul <4 x float> %242, %15 ; <<4 x float>>:243 [#uses=1]
- fmul <4 x float> %243, %16 ; <<4 x float>>:244 [#uses=1]
- fmul <4 x float> %244, %17 ; <<4 x float>>:245 [#uses=1]
- fmul <4 x float> %245, %18 ; <<4 x float>>:246 [#uses=1]
- fmul <4 x float> %246, %19 ; <<4 x float>>:247 [#uses=1]
- fmul <4 x float> %247, %20 ; <<4 x float>>:248 [#uses=1]
- fmul <4 x float> %248, %21 ; <<4 x float>>:249 [#uses=1]
- fmul <4 x float> %249, %22 ; <<4 x float>>:250 [#uses=1]
- fmul <4 x float> %250, %23 ; <<4 x float>>:251 [#uses=1]
- fmul <4 x float> %251, %24 ; <<4 x float>>:252 [#uses=1]
- fmul <4 x float> %252, %25 ; <<4 x float>>:253 [#uses=1]
- fmul <4 x float> %253, %26 ; <<4 x float>>:254 [#uses=1]
- fmul <4 x float> %254, %27 ; <<4 x float>>:255 [#uses=1]
- fmul <4 x float> %255, %28 ; <<4 x float>>:256 [#uses=1]
- fmul <4 x float> %256, %29 ; <<4 x float>>:257 [#uses=1]
- fmul <4 x float> %257, %30 ; <<4 x float>>:258 [#uses=1]
- fmul <4 x float> %258, %31 ; <<4 x float>>:259 [#uses=1]
- fmul <4 x float> %259, %32 ; <<4 x float>>:260 [#uses=1]
- fmul <4 x float> %9, %9 ; <<4 x float>>:261 [#uses=1]
- fmul <4 x float> %261, %10 ; <<4 x float>>:262 [#uses=1]
- fmul <4 x float> %262, %11 ; <<4 x float>>:263 [#uses=1]
- fmul <4 x float> %263, %12 ; <<4 x float>>:264 [#uses=1]
- fmul <4 x float> %264, %13 ; <<4 x float>>:265 [#uses=1]
- fmul <4 x float> %265, %14 ; <<4 x float>>:266 [#uses=1]
- fmul <4 x float> %266, %15 ; <<4 x float>>:267 [#uses=1]
- fmul <4 x float> %267, %16 ; <<4 x float>>:268 [#uses=1]
- fmul <4 x float> %268, %17 ; <<4 x float>>:269 [#uses=1]
- fmul <4 x float> %269, %18 ; <<4 x float>>:270 [#uses=1]
- fmul <4 x float> %270, %19 ; <<4 x float>>:271 [#uses=1]
- fmul <4 x float> %271, %20 ; <<4 x float>>:272 [#uses=1]
- fmul <4 x float> %272, %21 ; <<4 x float>>:273 [#uses=1]
- fmul <4 x float> %273, %22 ; <<4 x float>>:274 [#uses=1]
- fmul <4 x float> %274, %23 ; <<4 x float>>:275 [#uses=1]
- fmul <4 x float> %275, %24 ; <<4 x float>>:276 [#uses=1]
- fmul <4 x float> %276, %25 ; <<4 x float>>:277 [#uses=1]
- fmul <4 x float> %277, %26 ; <<4 x float>>:278 [#uses=1]
- fmul <4 x float> %278, %27 ; <<4 x float>>:279 [#uses=1]
- fmul <4 x float> %279, %28 ; <<4 x float>>:280 [#uses=1]
- fmul <4 x float> %280, %29 ; <<4 x float>>:281 [#uses=1]
- fmul <4 x float> %281, %30 ; <<4 x float>>:282 [#uses=1]
- fmul <4 x float> %282, %31 ; <<4 x float>>:283 [#uses=1]
- fmul <4 x float> %283, %32 ; <<4 x float>>:284 [#uses=1]
- fmul <4 x float> %10, %10 ; <<4 x float>>:285 [#uses=1]
- fmul <4 x float> %285, %11 ; <<4 x float>>:286 [#uses=1]
- fmul <4 x float> %286, %12 ; <<4 x float>>:287 [#uses=1]
- fmul <4 x float> %287, %13 ; <<4 x float>>:288 [#uses=1]
- fmul <4 x float> %288, %14 ; <<4 x float>>:289 [#uses=1]
- fmul <4 x float> %289, %15 ; <<4 x float>>:290 [#uses=1]
- fmul <4 x float> %290, %16 ; <<4 x float>>:291 [#uses=1]
- fmul <4 x float> %291, %17 ; <<4 x float>>:292 [#uses=1]
- fmul <4 x float> %292, %18 ; <<4 x float>>:293 [#uses=1]
- fmul <4 x float> %293, %19 ; <<4 x float>>:294 [#uses=1]
- fmul <4 x float> %294, %20 ; <<4 x float>>:295 [#uses=1]
- fmul <4 x float> %295, %21 ; <<4 x float>>:296 [#uses=1]
- fmul <4 x float> %296, %22 ; <<4 x float>>:297 [#uses=1]
- fmul <4 x float> %297, %23 ; <<4 x float>>:298 [#uses=1]
- fmul <4 x float> %298, %24 ; <<4 x float>>:299 [#uses=1]
- fmul <4 x float> %299, %25 ; <<4 x float>>:300 [#uses=1]
- fmul <4 x float> %300, %26 ; <<4 x float>>:301 [#uses=1]
- fmul <4 x float> %301, %27 ; <<4 x float>>:302 [#uses=1]
- fmul <4 x float> %302, %28 ; <<4 x float>>:303 [#uses=1]
- fmul <4 x float> %303, %29 ; <<4 x float>>:304 [#uses=1]
- fmul <4 x float> %304, %30 ; <<4 x float>>:305 [#uses=1]
- fmul <4 x float> %305, %31 ; <<4 x float>>:306 [#uses=1]
- fmul <4 x float> %306, %32 ; <<4 x float>>:307 [#uses=1]
- fmul <4 x float> %11, %11 ; <<4 x float>>:308 [#uses=1]
- fmul <4 x float> %308, %12 ; <<4 x float>>:309 [#uses=1]
- fmul <4 x float> %309, %13 ; <<4 x float>>:310 [#uses=1]
- fmul <4 x float> %310, %14 ; <<4 x float>>:311 [#uses=1]
- fmul <4 x float> %311, %15 ; <<4 x float>>:312 [#uses=1]
- fmul <4 x float> %312, %16 ; <<4 x float>>:313 [#uses=1]
- fmul <4 x float> %313, %17 ; <<4 x float>>:314 [#uses=1]
- fmul <4 x float> %314, %18 ; <<4 x float>>:315 [#uses=1]
- fmul <4 x float> %315, %19 ; <<4 x float>>:316 [#uses=1]
- fmul <4 x float> %316, %20 ; <<4 x float>>:317 [#uses=1]
- fmul <4 x float> %317, %21 ; <<4 x float>>:318 [#uses=1]
- fmul <4 x float> %318, %22 ; <<4 x float>>:319 [#uses=1]
- fmul <4 x float> %319, %23 ; <<4 x float>>:320 [#uses=1]
- fmul <4 x float> %320, %24 ; <<4 x float>>:321 [#uses=1]
- fmul <4 x float> %321, %25 ; <<4 x float>>:322 [#uses=1]
- fmul <4 x float> %322, %26 ; <<4 x float>>:323 [#uses=1]
- fmul <4 x float> %323, %27 ; <<4 x float>>:324 [#uses=1]
- fmul <4 x float> %324, %28 ; <<4 x float>>:325 [#uses=1]
- fmul <4 x float> %325, %29 ; <<4 x float>>:326 [#uses=1]
- fmul <4 x float> %326, %30 ; <<4 x float>>:327 [#uses=1]
- fmul <4 x float> %327, %31 ; <<4 x float>>:328 [#uses=1]
- fmul <4 x float> %328, %32 ; <<4 x float>>:329 [#uses=1]
- fmul <4 x float> %12, %12 ; <<4 x float>>:330 [#uses=1]
- fmul <4 x float> %330, %13 ; <<4 x float>>:331 [#uses=1]
- fmul <4 x float> %331, %14 ; <<4 x float>>:332 [#uses=1]
- fmul <4 x float> %332, %15 ; <<4 x float>>:333 [#uses=1]
- fmul <4 x float> %333, %16 ; <<4 x float>>:334 [#uses=1]
- fmul <4 x float> %334, %17 ; <<4 x float>>:335 [#uses=1]
- fmul <4 x float> %335, %18 ; <<4 x float>>:336 [#uses=1]
- fmul <4 x float> %336, %19 ; <<4 x float>>:337 [#uses=1]
- fmul <4 x float> %337, %20 ; <<4 x float>>:338 [#uses=1]
- fmul <4 x float> %338, %21 ; <<4 x float>>:339 [#uses=1]
- fmul <4 x float> %339, %22 ; <<4 x float>>:340 [#uses=1]
- fmul <4 x float> %340, %23 ; <<4 x float>>:341 [#uses=1]
- fmul <4 x float> %341, %24 ; <<4 x float>>:342 [#uses=1]
- fmul <4 x float> %342, %25 ; <<4 x float>>:343 [#uses=1]
- fmul <4 x float> %343, %26 ; <<4 x float>>:344 [#uses=1]
- fmul <4 x float> %344, %27 ; <<4 x float>>:345 [#uses=1]
- fmul <4 x float> %345, %28 ; <<4 x float>>:346 [#uses=1]
- fmul <4 x float> %346, %29 ; <<4 x float>>:347 [#uses=1]
- fmul <4 x float> %347, %30 ; <<4 x float>>:348 [#uses=1]
- fmul <4 x float> %348, %31 ; <<4 x float>>:349 [#uses=1]
- fmul <4 x float> %349, %32 ; <<4 x float>>:350 [#uses=1]
- fmul <4 x float> %13, %13 ; <<4 x float>>:351 [#uses=1]
- fmul <4 x float> %351, %14 ; <<4 x float>>:352 [#uses=1]
- fmul <4 x float> %352, %15 ; <<4 x float>>:353 [#uses=1]
- fmul <4 x float> %353, %16 ; <<4 x float>>:354 [#uses=1]
- fmul <4 x float> %354, %17 ; <<4 x float>>:355 [#uses=1]
- fmul <4 x float> %355, %18 ; <<4 x float>>:356 [#uses=1]
- fmul <4 x float> %356, %19 ; <<4 x float>>:357 [#uses=1]
- fmul <4 x float> %357, %20 ; <<4 x float>>:358 [#uses=1]
- fmul <4 x float> %358, %21 ; <<4 x float>>:359 [#uses=1]
- fmul <4 x float> %359, %22 ; <<4 x float>>:360 [#uses=1]
- fmul <4 x float> %360, %23 ; <<4 x float>>:361 [#uses=1]
- fmul <4 x float> %361, %24 ; <<4 x float>>:362 [#uses=1]
- fmul <4 x float> %362, %25 ; <<4 x float>>:363 [#uses=1]
- fmul <4 x float> %363, %26 ; <<4 x float>>:364 [#uses=1]
- fmul <4 x float> %364, %27 ; <<4 x float>>:365 [#uses=1]
- fmul <4 x float> %365, %28 ; <<4 x float>>:366 [#uses=1]
- fmul <4 x float> %366, %29 ; <<4 x float>>:367 [#uses=1]
- fmul <4 x float> %367, %30 ; <<4 x float>>:368 [#uses=1]
- fmul <4 x float> %368, %31 ; <<4 x float>>:369 [#uses=1]
- fmul <4 x float> %369, %32 ; <<4 x float>>:370 [#uses=1]
- fmul <4 x float> %14, %14 ; <<4 x float>>:371 [#uses=1]
- fmul <4 x float> %371, %15 ; <<4 x float>>:372 [#uses=1]
- fmul <4 x float> %372, %16 ; <<4 x float>>:373 [#uses=1]
- fmul <4 x float> %373, %17 ; <<4 x float>>:374 [#uses=1]
- fmul <4 x float> %374, %18 ; <<4 x float>>:375 [#uses=1]
- fmul <4 x float> %375, %19 ; <<4 x float>>:376 [#uses=1]
- fmul <4 x float> %376, %20 ; <<4 x float>>:377 [#uses=1]
- fmul <4 x float> %377, %21 ; <<4 x float>>:378 [#uses=1]
- fmul <4 x float> %378, %22 ; <<4 x float>>:379 [#uses=1]
- fmul <4 x float> %379, %23 ; <<4 x float>>:380 [#uses=1]
- fmul <4 x float> %380, %24 ; <<4 x float>>:381 [#uses=1]
- fmul <4 x float> %381, %25 ; <<4 x float>>:382 [#uses=1]
- fmul <4 x float> %382, %26 ; <<4 x float>>:383 [#uses=1]
- fmul <4 x float> %383, %27 ; <<4 x float>>:384 [#uses=1]
- fmul <4 x float> %384, %28 ; <<4 x float>>:385 [#uses=1]
- fmul <4 x float> %385, %29 ; <<4 x float>>:386 [#uses=1]
- fmul <4 x float> %386, %30 ; <<4 x float>>:387 [#uses=1]
- fmul <4 x float> %387, %31 ; <<4 x float>>:388 [#uses=1]
- fmul <4 x float> %388, %32 ; <<4 x float>>:389 [#uses=1]
- fmul <4 x float> %15, %15 ; <<4 x float>>:390 [#uses=1]
- fmul <4 x float> %390, %16 ; <<4 x float>>:391 [#uses=1]
- fmul <4 x float> %391, %17 ; <<4 x float>>:392 [#uses=1]
- fmul <4 x float> %392, %18 ; <<4 x float>>:393 [#uses=1]
- fmul <4 x float> %393, %19 ; <<4 x float>>:394 [#uses=1]
- fmul <4 x float> %394, %20 ; <<4 x float>>:395 [#uses=1]
- fmul <4 x float> %395, %21 ; <<4 x float>>:396 [#uses=1]
- fmul <4 x float> %396, %22 ; <<4 x float>>:397 [#uses=1]
- fmul <4 x float> %397, %23 ; <<4 x float>>:398 [#uses=1]
- fmul <4 x float> %398, %24 ; <<4 x float>>:399 [#uses=1]
- fmul <4 x float> %399, %25 ; <<4 x float>>:400 [#uses=1]
- fmul <4 x float> %400, %26 ; <<4 x float>>:401 [#uses=1]
- fmul <4 x float> %401, %27 ; <<4 x float>>:402 [#uses=1]
- fmul <4 x float> %402, %28 ; <<4 x float>>:403 [#uses=1]
- fmul <4 x float> %403, %29 ; <<4 x float>>:404 [#uses=1]
- fmul <4 x float> %404, %30 ; <<4 x float>>:405 [#uses=1]
- fmul <4 x float> %405, %31 ; <<4 x float>>:406 [#uses=1]
- fmul <4 x float> %406, %32 ; <<4 x float>>:407 [#uses=1]
- fmul <4 x float> %16, %16 ; <<4 x float>>:408 [#uses=1]
- fmul <4 x float> %408, %17 ; <<4 x float>>:409 [#uses=1]
- fmul <4 x float> %409, %18 ; <<4 x float>>:410 [#uses=1]
- fmul <4 x float> %410, %19 ; <<4 x float>>:411 [#uses=1]
- fmul <4 x float> %411, %20 ; <<4 x float>>:412 [#uses=1]
- fmul <4 x float> %412, %21 ; <<4 x float>>:413 [#uses=1]
- fmul <4 x float> %413, %22 ; <<4 x float>>:414 [#uses=1]
- fmul <4 x float> %414, %23 ; <<4 x float>>:415 [#uses=1]
- fmul <4 x float> %415, %24 ; <<4 x float>>:416 [#uses=1]
- fmul <4 x float> %416, %25 ; <<4 x float>>:417 [#uses=1]
- fmul <4 x float> %417, %26 ; <<4 x float>>:418 [#uses=1]
- fmul <4 x float> %418, %27 ; <<4 x float>>:419 [#uses=1]
- fmul <4 x float> %419, %28 ; <<4 x float>>:420 [#uses=1]
- fmul <4 x float> %420, %29 ; <<4 x float>>:421 [#uses=1]
- fmul <4 x float> %421, %30 ; <<4 x float>>:422 [#uses=1]
- fmul <4 x float> %422, %31 ; <<4 x float>>:423 [#uses=1]
- fmul <4 x float> %423, %32 ; <<4 x float>>:424 [#uses=1]
- fmul <4 x float> %17, %17 ; <<4 x float>>:425 [#uses=1]
- fmul <4 x float> %425, %18 ; <<4 x float>>:426 [#uses=1]
- fmul <4 x float> %426, %19 ; <<4 x float>>:427 [#uses=1]
- fmul <4 x float> %427, %20 ; <<4 x float>>:428 [#uses=1]
- fmul <4 x float> %428, %21 ; <<4 x float>>:429 [#uses=1]
- fmul <4 x float> %429, %22 ; <<4 x float>>:430 [#uses=1]
- fmul <4 x float> %430, %23 ; <<4 x float>>:431 [#uses=1]
- fmul <4 x float> %431, %24 ; <<4 x float>>:432 [#uses=1]
- fmul <4 x float> %432, %25 ; <<4 x float>>:433 [#uses=1]
- fmul <4 x float> %433, %26 ; <<4 x float>>:434 [#uses=1]
- fmul <4 x float> %434, %27 ; <<4 x float>>:435 [#uses=1]
- fmul <4 x float> %435, %28 ; <<4 x float>>:436 [#uses=1]
- fmul <4 x float> %436, %29 ; <<4 x float>>:437 [#uses=1]
- fmul <4 x float> %437, %30 ; <<4 x float>>:438 [#uses=1]
- fmul <4 x float> %438, %31 ; <<4 x float>>:439 [#uses=1]
- fmul <4 x float> %439, %32 ; <<4 x float>>:440 [#uses=1]
- fmul <4 x float> %18, %18 ; <<4 x float>>:441 [#uses=1]
- fmul <4 x float> %441, %19 ; <<4 x float>>:442 [#uses=1]
- fmul <4 x float> %442, %20 ; <<4 x float>>:443 [#uses=1]
- fmul <4 x float> %443, %21 ; <<4 x float>>:444 [#uses=1]
- fmul <4 x float> %444, %22 ; <<4 x float>>:445 [#uses=1]
- fmul <4 x float> %445, %23 ; <<4 x float>>:446 [#uses=1]
- fmul <4 x float> %446, %24 ; <<4 x float>>:447 [#uses=1]
- fmul <4 x float> %447, %25 ; <<4 x float>>:448 [#uses=1]
- fmul <4 x float> %448, %26 ; <<4 x float>>:449 [#uses=1]
- fmul <4 x float> %449, %27 ; <<4 x float>>:450 [#uses=1]
- fmul <4 x float> %450, %28 ; <<4 x float>>:451 [#uses=1]
- fmul <4 x float> %451, %29 ; <<4 x float>>:452 [#uses=1]
- fmul <4 x float> %452, %30 ; <<4 x float>>:453 [#uses=1]
- fmul <4 x float> %453, %31 ; <<4 x float>>:454 [#uses=1]
- fmul <4 x float> %454, %32 ; <<4 x float>>:455 [#uses=1]
- fmul <4 x float> %19, %19 ; <<4 x float>>:456 [#uses=1]
- fmul <4 x float> %456, %20 ; <<4 x float>>:457 [#uses=1]
- fmul <4 x float> %457, %21 ; <<4 x float>>:458 [#uses=1]
- fmul <4 x float> %458, %22 ; <<4 x float>>:459 [#uses=1]
- fmul <4 x float> %459, %23 ; <<4 x float>>:460 [#uses=1]
- fmul <4 x float> %460, %24 ; <<4 x float>>:461 [#uses=1]
- fmul <4 x float> %461, %25 ; <<4 x float>>:462 [#uses=1]
- fmul <4 x float> %462, %26 ; <<4 x float>>:463 [#uses=1]
- fmul <4 x float> %463, %27 ; <<4 x float>>:464 [#uses=1]
- fmul <4 x float> %464, %28 ; <<4 x float>>:465 [#uses=1]
- fmul <4 x float> %465, %29 ; <<4 x float>>:466 [#uses=1]
- fmul <4 x float> %466, %30 ; <<4 x float>>:467 [#uses=1]
- fmul <4 x float> %467, %31 ; <<4 x float>>:468 [#uses=1]
- fmul <4 x float> %468, %32 ; <<4 x float>>:469 [#uses=1]
- fmul <4 x float> %20, %20 ; <<4 x float>>:470 [#uses=1]
- fmul <4 x float> %470, %21 ; <<4 x float>>:471 [#uses=1]
- fmul <4 x float> %471, %22 ; <<4 x float>>:472 [#uses=1]
- fmul <4 x float> %472, %23 ; <<4 x float>>:473 [#uses=1]
- fmul <4 x float> %473, %24 ; <<4 x float>>:474 [#uses=1]
- fmul <4 x float> %474, %25 ; <<4 x float>>:475 [#uses=1]
- fmul <4 x float> %475, %26 ; <<4 x float>>:476 [#uses=1]
- fmul <4 x float> %476, %27 ; <<4 x float>>:477 [#uses=1]
- fmul <4 x float> %477, %28 ; <<4 x float>>:478 [#uses=1]
- fmul <4 x float> %478, %29 ; <<4 x float>>:479 [#uses=1]
- fmul <4 x float> %479, %30 ; <<4 x float>>:480 [#uses=1]
- fmul <4 x float> %480, %31 ; <<4 x float>>:481 [#uses=1]
- fmul <4 x float> %481, %32 ; <<4 x float>>:482 [#uses=1]
- fmul <4 x float> %21, %21 ; <<4 x float>>:483 [#uses=1]
- fmul <4 x float> %483, %22 ; <<4 x float>>:484 [#uses=1]
- fmul <4 x float> %484, %23 ; <<4 x float>>:485 [#uses=1]
- fmul <4 x float> %485, %24 ; <<4 x float>>:486 [#uses=1]
- fmul <4 x float> %486, %25 ; <<4 x float>>:487 [#uses=1]
- fmul <4 x float> %487, %26 ; <<4 x float>>:488 [#uses=1]
- fmul <4 x float> %488, %27 ; <<4 x float>>:489 [#uses=1]
- fmul <4 x float> %489, %28 ; <<4 x float>>:490 [#uses=1]
- fmul <4 x float> %490, %29 ; <<4 x float>>:491 [#uses=1]
- fmul <4 x float> %491, %30 ; <<4 x float>>:492 [#uses=1]
- fmul <4 x float> %492, %31 ; <<4 x float>>:493 [#uses=1]
- fmul <4 x float> %493, %32 ; <<4 x float>>:494 [#uses=1]
- fmul <4 x float> %22, %22 ; <<4 x float>>:495 [#uses=1]
- fmul <4 x float> %495, %23 ; <<4 x float>>:496 [#uses=1]
- fmul <4 x float> %496, %24 ; <<4 x float>>:497 [#uses=1]
- fmul <4 x float> %497, %25 ; <<4 x float>>:498 [#uses=1]
- fmul <4 x float> %498, %26 ; <<4 x float>>:499 [#uses=1]
- fmul <4 x float> %499, %27 ; <<4 x float>>:500 [#uses=1]
- fmul <4 x float> %500, %28 ; <<4 x float>>:501 [#uses=1]
- fmul <4 x float> %501, %29 ; <<4 x float>>:502 [#uses=1]
- fmul <4 x float> %502, %30 ; <<4 x float>>:503 [#uses=1]
- fmul <4 x float> %503, %31 ; <<4 x float>>:504 [#uses=1]
- fmul <4 x float> %504, %32 ; <<4 x float>>:505 [#uses=1]
- fmul <4 x float> %23, %23 ; <<4 x float>>:506 [#uses=1]
- fmul <4 x float> %506, %24 ; <<4 x float>>:507 [#uses=1]
- fmul <4 x float> %507, %25 ; <<4 x float>>:508 [#uses=1]
- fmul <4 x float> %508, %26 ; <<4 x float>>:509 [#uses=1]
- fmul <4 x float> %509, %27 ; <<4 x float>>:510 [#uses=1]
- fmul <4 x float> %510, %28 ; <<4 x float>>:511 [#uses=1]
- fmul <4 x float> %511, %29 ; <<4 x float>>:512 [#uses=1]
- fmul <4 x float> %512, %30 ; <<4 x float>>:513 [#uses=1]
- fmul <4 x float> %513, %31 ; <<4 x float>>:514 [#uses=1]
- fmul <4 x float> %514, %32 ; <<4 x float>>:515 [#uses=1]
- fmul <4 x float> %24, %24 ; <<4 x float>>:516 [#uses=1]
- fmul <4 x float> %516, %25 ; <<4 x float>>:517 [#uses=1]
- fmul <4 x float> %517, %26 ; <<4 x float>>:518 [#uses=1]
- fmul <4 x float> %518, %27 ; <<4 x float>>:519 [#uses=1]
- fmul <4 x float> %519, %28 ; <<4 x float>>:520 [#uses=1]
- fmul <4 x float> %520, %29 ; <<4 x float>>:521 [#uses=1]
- fmul <4 x float> %521, %30 ; <<4 x float>>:522 [#uses=1]
- fmul <4 x float> %522, %31 ; <<4 x float>>:523 [#uses=1]
- fmul <4 x float> %523, %32 ; <<4 x float>>:524 [#uses=1]
- fmul <4 x float> %25, %25 ; <<4 x float>>:525 [#uses=1]
- fmul <4 x float> %525, %26 ; <<4 x float>>:526 [#uses=1]
- fmul <4 x float> %526, %27 ; <<4 x float>>:527 [#uses=1]
- fmul <4 x float> %527, %28 ; <<4 x float>>:528 [#uses=1]
- fmul <4 x float> %528, %29 ; <<4 x float>>:529 [#uses=1]
- fmul <4 x float> %529, %30 ; <<4 x float>>:530 [#uses=1]
- fmul <4 x float> %530, %31 ; <<4 x float>>:531 [#uses=1]
- fmul <4 x float> %531, %32 ; <<4 x float>>:532 [#uses=1]
- fmul <4 x float> %26, %26 ; <<4 x float>>:533 [#uses=1]
- fmul <4 x float> %533, %27 ; <<4 x float>>:534 [#uses=1]
- fmul <4 x float> %534, %28 ; <<4 x float>>:535 [#uses=1]
- fmul <4 x float> %535, %29 ; <<4 x float>>:536 [#uses=1]
- fmul <4 x float> %536, %30 ; <<4 x float>>:537 [#uses=1]
- fmul <4 x float> %537, %31 ; <<4 x float>>:538 [#uses=1]
- fmul <4 x float> %538, %32 ; <<4 x float>>:539 [#uses=1]
- fmul <4 x float> %27, %27 ; <<4 x float>>:540 [#uses=1]
- fmul <4 x float> %540, %28 ; <<4 x float>>:541 [#uses=1]
- fmul <4 x float> %541, %29 ; <<4 x float>>:542 [#uses=1]
- fmul <4 x float> %542, %30 ; <<4 x float>>:543 [#uses=1]
- fmul <4 x float> %543, %31 ; <<4 x float>>:544 [#uses=1]
- fmul <4 x float> %544, %32 ; <<4 x float>>:545 [#uses=1]
- fmul <4 x float> %28, %28 ; <<4 x float>>:546 [#uses=1]
- fmul <4 x float> %546, %29 ; <<4 x float>>:547 [#uses=1]
- fmul <4 x float> %547, %30 ; <<4 x float>>:548 [#uses=1]
- fmul <4 x float> %548, %31 ; <<4 x float>>:549 [#uses=1]
- fmul <4 x float> %549, %32 ; <<4 x float>>:550 [#uses=1]
- fmul <4 x float> %29, %29 ; <<4 x float>>:551 [#uses=1]
- fmul <4 x float> %551, %30 ; <<4 x float>>:552 [#uses=1]
- fmul <4 x float> %552, %31 ; <<4 x float>>:553 [#uses=1]
- fmul <4 x float> %553, %32 ; <<4 x float>>:554 [#uses=1]
- fmul <4 x float> %30, %30 ; <<4 x float>>:555 [#uses=1]
- fmul <4 x float> %555, %31 ; <<4 x float>>:556 [#uses=1]
- fmul <4 x float> %556, %32 ; <<4 x float>>:557 [#uses=1]
- fmul <4 x float> %31, %31 ; <<4 x float>>:558 [#uses=1]
- fmul <4 x float> %558, %32 ; <<4 x float>>:559 [#uses=1]
- fmul <4 x float> %32, %32 ; <<4 x float>>:560 [#uses=1]
- fadd <4 x float> %64, %64 ; <<4 x float>>:561 [#uses=1]
- fadd <4 x float> %561, %64 ; <<4 x float>>:562 [#uses=1]
- fadd <4 x float> %562, %95 ; <<4 x float>>:563 [#uses=1]
- fadd <4 x float> %563, %125 ; <<4 x float>>:564 [#uses=1]
- fadd <4 x float> %564, %154 ; <<4 x float>>:565 [#uses=1]
- fadd <4 x float> %565, %182 ; <<4 x float>>:566 [#uses=1]
- fadd <4 x float> %566, %209 ; <<4 x float>>:567 [#uses=1]
- fadd <4 x float> %567, %235 ; <<4 x float>>:568 [#uses=1]
- fadd <4 x float> %568, %260 ; <<4 x float>>:569 [#uses=1]
- fadd <4 x float> %569, %284 ; <<4 x float>>:570 [#uses=1]
- fadd <4 x float> %570, %307 ; <<4 x float>>:571 [#uses=1]
- fadd <4 x float> %571, %329 ; <<4 x float>>:572 [#uses=1]
- fadd <4 x float> %572, %350 ; <<4 x float>>:573 [#uses=1]
- fadd <4 x float> %573, %370 ; <<4 x float>>:574 [#uses=1]
- fadd <4 x float> %574, %389 ; <<4 x float>>:575 [#uses=1]
- fadd <4 x float> %575, %407 ; <<4 x float>>:576 [#uses=1]
- fadd <4 x float> %576, %424 ; <<4 x float>>:577 [#uses=1]
- fadd <4 x float> %577, %440 ; <<4 x float>>:578 [#uses=1]
- fadd <4 x float> %578, %455 ; <<4 x float>>:579 [#uses=1]
- fadd <4 x float> %579, %469 ; <<4 x float>>:580 [#uses=1]
- fadd <4 x float> %580, %482 ; <<4 x float>>:581 [#uses=1]
- fadd <4 x float> %581, %494 ; <<4 x float>>:582 [#uses=1]
- fadd <4 x float> %582, %505 ; <<4 x float>>:583 [#uses=1]
- fadd <4 x float> %583, %515 ; <<4 x float>>:584 [#uses=1]
- fadd <4 x float> %584, %524 ; <<4 x float>>:585 [#uses=1]
- fadd <4 x float> %585, %532 ; <<4 x float>>:586 [#uses=1]
- fadd <4 x float> %586, %539 ; <<4 x float>>:587 [#uses=1]
- fadd <4 x float> %587, %545 ; <<4 x float>>:588 [#uses=1]
- fadd <4 x float> %588, %550 ; <<4 x float>>:589 [#uses=1]
- fadd <4 x float> %589, %554 ; <<4 x float>>:590 [#uses=1]
- fadd <4 x float> %590, %557 ; <<4 x float>>:591 [#uses=1]
- fadd <4 x float> %591, %559 ; <<4 x float>>:592 [#uses=1]
- fadd <4 x float> %592, %560 ; <<4 x float>>:593 [#uses=1]
- store <4 x float> %593, <4 x float>* @0, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-22-CombinerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-07-22-CombinerCrash.ll
deleted file mode 100644
index 0f67145..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-22-CombinerCrash.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-; PR2566
-
-external global i16 ; <i16*>:0 [#uses=1]
-external global <4 x i16> ; <<4 x i16>*>:1 [#uses=1]
-
-declare void @abort()
-
-define void @t() nounwind {
- load i16* @0 ; <i16>:1 [#uses=1]
- zext i16 %1 to i64 ; <i64>:2 [#uses=1]
- bitcast i64 %2 to <4 x i16> ; <<4 x i16>>:3 [#uses=1]
- shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> zeroinitializer ; <<4 x i16>>:4 [#uses=1]
- store <4 x i16> %4, <4 x i16>* @1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-23-VSetCC.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-07-23-VSetCC.ll
deleted file mode 100644
index 684ca5c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-07-23-VSetCC.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=pentium
-; PR2575
-
-define void @entry(i32 %m_task_id, i32 %start_x, i32 %end_x) nounwind {
- br i1 false, label %bb.nph, label %._crit_edge
-
-bb.nph: ; preds = %bb.nph, %0
- %X = icmp sgt <4 x i32> zeroinitializer, < i32 -128, i32 -128, i32 -128, i32 -128 > ; <<4 x i32>>:1 [#uses=1]
- sext <4 x i1> %X to <4 x i32>
- extractelement <4 x i32> %1, i32 3 ; <i32>:2 [#uses=1]
- lshr i32 %2, 31 ; <i32>:3 [#uses=1]
- trunc i32 %3 to i1 ; <i1>:4 [#uses=1]
- select i1 %4, i32 -1, i32 0 ; <i32>:5 [#uses=1]
- insertelement <4 x i32> zeroinitializer, i32 %5, i32 3 ; <<4 x i32>>:6 [#uses=1]
- and <4 x i32> zeroinitializer, %6 ; <<4 x i32>>:7 [#uses=1]
- bitcast <4 x i32> %7 to <4 x float> ; <<4 x float>>:8 [#uses=1]
- fmul <4 x float> zeroinitializer, %8 ; <<4 x float>>:9 [#uses=1]
- bitcast <4 x float> %9 to <4 x i32> ; <<4 x i32>>:10 [#uses=1]
- or <4 x i32> %10, zeroinitializer ; <<4 x i32>>:11 [#uses=1]
- bitcast <4 x i32> %11 to <4 x float> ; <<4 x float>>:12 [#uses=1]
- fmul <4 x float> %12, < float 1.000000e+02, float 1.000000e+02, float 1.000000e+02, float 1.000000e+02 > ; <<4 x float>>:13 [#uses=1]
- fsub <4 x float> %13, < float 1.000000e+02, float 1.000000e+02, float 1.000000e+02, float 1.000000e+02 > ; <<4 x float>>:14 [#uses=1]
- extractelement <4 x float> %14, i32 3 ; <float>:15 [#uses=1]
- call float @fmaxf( float 0.000000e+00, float %15 ) ; <float>:16 [#uses=0]
- br label %bb.nph
-
-._crit_edge: ; preds = %0
- ret void
-}
-
-
-declare float @fmaxf(float, float)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-05-SpillerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-05-SpillerBug.ll
deleted file mode 100644
index 4c64934..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-05-SpillerBug.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -disable-fp-elim -stats |& grep asm-printer | grep 55
-; PR2568
-
- at g_3 = external global i16 ; <i16*> [#uses=1]
- at g_5 = external global i32 ; <i32*> [#uses=3]
-
-declare i32 @func_15(i16 signext , i16 signext , i32) nounwind
-
-define void @func_9_entry_2E_ce(i8 %p_11) nounwind {
-newFuncRoot:
- br label %entry.ce
-
-entry.ce.ret.exitStub: ; preds = %entry.ce
- ret void
-
-entry.ce: ; preds = %newFuncRoot
- load i16* @g_3, align 2 ; <i16>:0 [#uses=1]
- icmp sgt i16 %0, 0 ; <i1>:1 [#uses=1]
- zext i1 %1 to i32 ; <i32>:2 [#uses=1]
- load i32* @g_5, align 4 ; <i32>:3 [#uses=4]
- icmp ugt i32 %2, %3 ; <i1>:4 [#uses=1]
- zext i1 %4 to i32 ; <i32>:5 [#uses=1]
- icmp eq i32 %3, 0 ; <i1>:6 [#uses=1]
- %.0 = select i1 %6, i32 1, i32 %3 ; <i32> [#uses=1]
- urem i32 1, %.0 ; <i32>:7 [#uses=2]
- sext i8 %p_11 to i16 ; <i16>:8 [#uses=1]
- trunc i32 %3 to i16 ; <i16>:9 [#uses=1]
- tail call i32 @func_15( i16 signext %8, i16 signext %9, i32 1 ) nounwind ; <i32>:10 [#uses=0]
- load i32* @g_5, align 4 ; <i32>:11 [#uses=1]
- trunc i32 %11 to i16 ; <i16>:12 [#uses=1]
- tail call i32 @func_15( i16 signext %12, i16 signext 1, i32 %7 ) nounwind ; <i32>:13 [#uses=0]
- sext i8 %p_11 to i32 ; <i32>:14 [#uses=1]
- %p_11.lobit = lshr i8 %p_11, 7 ; <i8> [#uses=1]
- %tmp = zext i8 %p_11.lobit to i32 ; <i32> [#uses=1]
- %tmp.not = xor i32 %tmp, 1 ; <i32> [#uses=1]
- %.015 = ashr i32 %14, %tmp.not ; <i32> [#uses=2]
- icmp eq i32 %.015, 0 ; <i1>:15 [#uses=1]
- %.016 = select i1 %15, i32 1, i32 %.015 ; <i32> [#uses=1]
- udiv i32 %7, %.016 ; <i32>:16 [#uses=1]
- icmp ult i32 %5, %16 ; <i1>:17 [#uses=1]
- zext i1 %17 to i32 ; <i32>:18 [#uses=1]
- store i32 %18, i32* @g_5, align 4
- br label %entry.ce.ret.exitStub
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-06-RewriterBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-06-RewriterBug.ll
deleted file mode 100644
index 4428035..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-06-RewriterBug.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR2596
-
- at data = external global [400 x i64] ; <[400 x i64]*> [#uses=5]
-
-define void @foo(double* noalias, double* noalias) {
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 200), align 4 ; <i64>:3 [#uses=1]
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 199), align 4 ; <i64>:4 [#uses=1]
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 198), align 4 ; <i64>:5 [#uses=2]
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 197), align 4 ; <i64>:6 [#uses=1]
- br i1 false, label %28, label %7
-
-; <label>:7 ; preds = %2
- load double** getelementptr (double** bitcast ([400 x i64]* @data to double**), i64 180), align 8 ; <double*>:8 [#uses=1]
- bitcast double* %8 to double* ; <double*>:9 [#uses=1]
- ptrtoint double* %9 to i64 ; <i64>:10 [#uses=1]
- mul i64 %4, %3 ; <i64>:11 [#uses=1]
- add i64 0, %11 ; <i64>:12 [#uses=1]
- shl i64 %12, 3 ; <i64>:13 [#uses=1]
- sub i64 %10, %13 ; <i64>:14 [#uses=1]
- add i64 %5, 0 ; <i64>:15 [#uses=1]
- shl i64 %15, 3 ; <i64>:16 [#uses=1]
- bitcast i64 %16 to i64 ; <i64>:17 [#uses=1]
- mul i64 %6, %5 ; <i64>:18 [#uses=1]
- add i64 0, %18 ; <i64>:19 [#uses=1]
- shl i64 %19, 3 ; <i64>:20 [#uses=1]
- sub i64 %17, %20 ; <i64>:21 [#uses=1]
- add i64 0, %21 ; <i64>:22 [#uses=1]
- add i64 0, %14 ; <i64>:23 [#uses=1]
- br label %24
-
-; <label>:24 ; preds = %24, %7
- phi i64 [ 0, %24 ], [ %22, %7 ] ; <i64>:25 [#uses=1]
- phi i64 [ 0, %24 ], [ %23, %7 ] ; <i64>:26 [#uses=0]
- add i64 %25, 24 ; <i64>:27 [#uses=0]
- br label %24
-
-; <label>:28 ; preds = %2
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-07-PtrToInt-SmallerInt.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-07-PtrToInt-SmallerInt.ll
deleted file mode 100644
index 4f95dfe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-07-PtrToInt-SmallerInt.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s
-; PR2603
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
- %struct.A = type { i8 }
- %struct.B = type { i8, [1 x i8] }
- at Foo = constant %struct.A { i8 ptrtoint (i8* getelementptr ([1 x i8]* inttoptr (i32 17 to [1 x i8]*), i32 0, i32 -16) to i8) } ; <%struct.A*> [#uses=0]
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-17-UComiCodeGenBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-17-UComiCodeGenBug.ll
deleted file mode 100644
index 32f6ca0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-17-UComiCodeGenBug.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep movzbl
-
-define i32 @foo(<4 x float> %a, <4 x float> %b) nounwind {
-entry:
- tail call i32 @llvm.x86.sse.ucomige.ss( <4 x float> %a, <4 x float> %b ) nounwind readnone
- ret i32 %0
-}
-
-declare i32 @llvm.x86.sse.ucomige.ss(<4 x float>, <4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-19-SubAndFetch.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-19-SubAndFetch.ll
deleted file mode 100644
index 8475e8d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-19-SubAndFetch.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
- at var = external global i64 ; <i64*> [#uses=1]
-
-define i32 @main() nounwind {
-entry:
-; CHECK: main:
-; CHECK: lock
-; CHECK: decq
- tail call i64 @llvm.atomic.load.sub.i64.p0i64( i64* @var, i64 1 ) ; <i64>:0 [#uses=0]
- unreachable
-}
-
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-23-64Bit-maskmovq.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-23-64Bit-maskmovq.ll
deleted file mode 100644
index c76dd7d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-23-64Bit-maskmovq.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
- %struct.DrawHelper = type { void (i32, %struct.QT_FT_Span*, i8*)*, void (i32, %struct.QT_FT_Span*, i8*)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i32, i32)* }
- %struct.QBasicAtomic = type { i32 }
- %struct.QClipData = type { i32, %"struct.QClipData::ClipLine"*, i32, i32, %struct.QT_FT_Span*, i32, i32, i32, i32 }
- %"struct.QClipData::ClipLine" = type { i32, %struct.QT_FT_Span* }
- %struct.QRasterBuffer = type { %struct.QRect, %struct.QRect, %struct.QRegion, %struct.QRegion, %struct.QClipData*, %struct.QClipData*, i8, i8, i32, i32, i32, i32, %struct.DrawHelper*, i32, i32, i32, i8* }
- %struct.QRect = type { i32, i32, i32, i32 }
- %struct.QRegion = type { %"struct.QRegion::QRegionData"* }
- %"struct.QRegion::QRegionData" = type { %struct.QBasicAtomic, %struct._XRegion*, i8*, %struct.QRegionPrivate* }
- %struct.QRegionPrivate = type opaque
- %struct.QT_FT_Span = type { i16, i16, i16, i8 }
- %struct._XRegion = type opaque
-
-define hidden void @_Z24qt_bitmapblit16_sse3dnowP13QRasterBufferiijPKhiii(%struct.QRasterBuffer* %rasterBuffer, i32 %x, i32 %y, i32 %color, i8* %src, i32 %width, i32 %height, i32 %stride) nounwind {
-entry:
- br i1 false, label %bb.nph144.split, label %bb133
-
-bb.nph144.split: ; preds = %entry
- tail call void @llvm.x86.mmx.maskmovq( <8 x i8> zeroinitializer, <8 x i8> zeroinitializer, i8* null ) nounwind
- unreachable
-
-bb133: ; preds = %entry
- ret void
-}
-
-declare void @llvm.x86.mmx.maskmovq(<8 x i8>, <8 x i8>, i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-23-X86-64AsmBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-23-X86-64AsmBug.ll
deleted file mode 100644
index eacb4a5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-23-X86-64AsmBug.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep movd | count 1
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep movq
-; PR2677
-
-
- %struct.Bigint = type { %struct.Bigint*, i32, i32, i32, i32, [1 x i32] }
-
-define double @_Z7qstrtodPKcPS0_Pb(i8* %s00, i8** %se, i8* %ok) nounwind {
-entry:
- br i1 false, label %bb151, label %bb163
-
-bb151: ; preds = %entry
- br label %bb163
-
-bb163: ; preds = %bb151, %entry
- %tmp366 = load double* null, align 8 ; <double> [#uses=1]
- %tmp368 = fmul double %tmp366, 0.000000e+00 ; <double> [#uses=1]
- %tmp368226 = bitcast double %tmp368 to i64 ; <i64> [#uses=1]
- br label %bb5.i
-
-bb5.i: ; preds = %bb5.i57.i, %bb163
- %b.0.i = phi %struct.Bigint* [ null, %bb163 ], [ %tmp9.i.i41.i, %bb5.i57.i ] ; <%struct.Bigint*> [#uses=1]
- %tmp3.i7.i728 = load i32* null, align 4 ; <i32> [#uses=1]
- br label %bb.i27.i
-
-bb.i27.i: ; preds = %bb.i27.i, %bb5.i
- %tmp23.i20.i = lshr i32 0, 16 ; <i32> [#uses=1]
- br i1 false, label %bb.i27.i, label %bb5.i57.i
-
-bb5.i57.i: ; preds = %bb.i27.i
- %tmp50.i35.i = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp51.i36.i = add i32 %tmp50.i35.i, 1 ; <i32> [#uses=2]
- %tmp2.i.i37.i = shl i32 1, %tmp51.i36.i ; <i32> [#uses=2]
- %tmp4.i.i38.i = shl i32 %tmp2.i.i37.i, 2 ; <i32> [#uses=1]
- %tmp7.i.i39.i = add i32 %tmp4.i.i38.i, 28 ; <i32> [#uses=1]
- %tmp8.i.i40.i = malloc i8, i32 %tmp7.i.i39.i ; <i8*> [#uses=1]
- %tmp9.i.i41.i = bitcast i8* %tmp8.i.i40.i to %struct.Bigint* ; <%struct.Bigint*> [#uses=2]
- store i32 %tmp51.i36.i, i32* null, align 8
- store i32 %tmp2.i.i37.i, i32* null, align 4
- free %struct.Bigint* %b.0.i
- store i32 %tmp23.i20.i, i32* null, align 4
- %tmp74.i61.i = add i32 %tmp3.i7.i728, 1 ; <i32> [#uses=1]
- store i32 %tmp74.i61.i, i32* null, align 4
- br i1 false, label %bb5.i, label %bb7.i
-
-bb7.i: ; preds = %bb5.i57.i
- %tmp514 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp515 = sext i32 %tmp514 to i64 ; <i64> [#uses=1]
- %tmp516 = shl i64 %tmp515, 2 ; <i64> [#uses=1]
- %tmp517 = add i64 %tmp516, 8 ; <i64> [#uses=1]
- %tmp519 = getelementptr %struct.Bigint* %tmp9.i.i41.i, i32 0, i32 3 ; <i32*> [#uses=1]
- %tmp523 = bitcast i32* %tmp519 to i8* ; <i8*> [#uses=1]
- call void @llvm.memcpy.i64( i8* null, i8* %tmp523, i64 %tmp517, i32 1 )
- %tmp524136 = bitcast i64 %tmp368226 to double ; <double> [#uses=1]
- store double %tmp524136, double* null
- unreachable
-}
-
-declare void @llvm.memcpy.i64(i8*, i8*, i64, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-25-AsmRegTypeMismatch.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-25-AsmRegTypeMismatch.ll
deleted file mode 100644
index 101b3c5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-25-AsmRegTypeMismatch.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -mcpu=core2 | grep pxor | count 2
-; RUN: llc < %s -mcpu=core2 | not grep movapd
-; PR2715
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- %struct.XPTTypeDescriptorPrefix = type { i8 }
- %struct.nsISupports = type { i32 (...)** }
- %struct.nsXPTCMiniVariant = type { %"struct.nsXPTCMiniVariant::._39" }
- %"struct.nsXPTCMiniVariant::._39" = type { i64 }
- %struct.nsXPTCVariant = type { %struct.nsXPTCMiniVariant, i8*, %struct.nsXPTType, i8 }
- %struct.nsXPTType = type { %struct.XPTTypeDescriptorPrefix }
-
-define i32 @XPTC_InvokeByIndex(%struct.nsISupports* %that, i32 %methodIndex, i32 %paramCount, %struct.nsXPTCVariant* %params) nounwind {
-entry:
- call void asm sideeffect "", "{xmm0},{xmm1},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},~{dirflag},~{fpsr},~{flags}"( double undef, double undef, double undef, double 1.0, double undef, double 0.0, double undef, double 0.0 ) nounwind
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN32.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN32.ll
deleted file mode 100644
index b92c789..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN32.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; Check that eh_return & unwind_init were properly lowered
-; RUN: llc < %s | grep %ebp | count 7
-; RUN: llc < %s | grep %ecx | count 5
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i386-pc-linux"
-
-define i8* @test(i32 %a, i8* %b) {
-entry:
- call void @llvm.eh.unwind.init()
- %foo = alloca i32
- call void @llvm.eh.return.i32(i32 %a, i8* %b)
- unreachable
-}
-
-declare void @llvm.eh.return.i32(i32, i8*)
-declare void @llvm.eh.unwind.init()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
deleted file mode 100644
index 00ab735..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; Check that eh_return & unwind_init were properly lowered
-; RUN: llc < %s | grep %rbp | count 5
-; RUN: llc < %s | grep %rcx | count 3
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define i8* @test(i64 %a, i8* %b) {
-entry:
- call void @llvm.eh.unwind.init()
- %foo = alloca i32
- call void @llvm.eh.return.i64(i64 %a, i8* %b)
- unreachable
-}
-
-declare void @llvm.eh.return.i64(i64, i8*)
-declare void @llvm.eh.unwind.init()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll
deleted file mode 100644
index 60be0d5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-05-sinttofp-2xi32.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvttpd2pi | count 1
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep cvtpi2pd | count 1
-; PR2687
-
-define <2 x double> @a(<2 x i32> %x) nounwind {
-entry:
- %y = sitofp <2 x i32> %x to <2 x double>
- ret <2 x double> %y
-}
-
-define <2 x i32> @b(<2 x double> %x) nounwind {
-entry:
- %y = fptosi <2 x double> %x to <2 x i32>
- ret <2 x i32> %y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-09-LinearScanBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-09-LinearScanBug.ll
deleted file mode 100644
index b3312d9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-09-LinearScanBug.ll
+++ /dev/null
@@ -1,65 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin
-; PR2757
-
- at g_3 = external global i32 ; <i32*> [#uses=1]
-
-define i32 @func_125(i32 %p_126, i32 %p_128, i32 %p_129) nounwind {
-entry:
- %tmp2.i = load i32* @g_3 ; <i32> [#uses=2]
- %conv = trunc i32 %tmp2.i to i16 ; <i16> [#uses=3]
- br label %forcond1.preheader.i.i7
-
-forcond1.preheader.i.i7: ; preds = %forinc6.i.i25, %entry
- %p_86.addr.06.i.i4 = phi i32 [ 0, %entry ], [ %sub.i.i.i23, %forinc6.i.i25 ] ; <i32> [#uses=1]
- %p_87.addr.15.i.i5 = phi i32 [ 0, %entry ], [ %p_87.addr.0.lcssa.i.i21, %forinc6.i.i25 ] ; <i32> [#uses=2]
- br i1 false, label %forinc6.i.i25, label %forinc.i.i11
-
-forinc.i.i11: ; preds = %forcond1.backedge.i.i20, %forcond1.preheader.i.i7
- %p_87.addr.02.i.i8 = phi i32 [ %p_87.addr.15.i.i5, %forcond1.preheader.i.i7 ], [ %p_87.addr.0.be.i.i18, %forcond1.backedge.i.i20 ] ; <i32> [#uses=1]
- %conv.i.i9 = trunc i32 %p_87.addr.02.i.i8 to i8 ; <i8> [#uses=1]
- br i1 false, label %land_rhs3.i.i.i14, label %lor_rhs.i.i.i17
-
-land_rhs3.i.i.i14: ; preds = %forinc.i.i11
- br i1 false, label %forcond1.backedge.i.i20, label %lor_rhs.i.i.i17
-
-lor_rhs.i.i.i17: ; preds = %land_rhs3.i.i.i14, %forinc.i.i11
- %conv29.i.i.i15 = sext i8 %conv.i.i9 to i32 ; <i32> [#uses=1]
- %add.i.i.i16 = add i32 %conv29.i.i.i15, 1 ; <i32> [#uses=1]
- br label %forcond1.backedge.i.i20
-
-forcond1.backedge.i.i20: ; preds = %lor_rhs.i.i.i17, %land_rhs3.i.i.i14
- %p_87.addr.0.be.i.i18 = phi i32 [ %add.i.i.i16, %lor_rhs.i.i.i17 ], [ 0, %land_rhs3.i.i.i14 ] ; <i32> [#uses=3]
- %tobool3.i.i19 = icmp eq i32 %p_87.addr.0.be.i.i18, 0 ; <i1> [#uses=1]
- br i1 %tobool3.i.i19, label %forinc6.i.i25, label %forinc.i.i11
-
-forinc6.i.i25: ; preds = %forcond1.backedge.i.i20, %forcond1.preheader.i.i7
- %p_87.addr.0.lcssa.i.i21 = phi i32 [ %p_87.addr.15.i.i5, %forcond1.preheader.i.i7 ], [ %p_87.addr.0.be.i.i18, %forcond1.backedge.i.i20 ] ; <i32> [#uses=1]
- %conv.i.i.i22 = and i32 %p_86.addr.06.i.i4, 255 ; <i32> [#uses=1]
- %sub.i.i.i23 = add i32 %conv.i.i.i22, -1 ; <i32> [#uses=2]
- %phitmp.i.i24 = icmp eq i32 %sub.i.i.i23, 0 ; <i1> [#uses=1]
- br i1 %phitmp.i.i24, label %func_106.exit27, label %forcond1.preheader.i.i7
-
-func_106.exit27: ; preds = %forinc6.i.i25
- %cmp = icmp ne i32 %tmp2.i, 1 ; <i1> [#uses=3]
- %cmp.ext = zext i1 %cmp to i32 ; <i32> [#uses=1]
- br i1 %cmp, label %safe_mod_int16_t_s_s.exit, label %lor_rhs.i
-
-lor_rhs.i: ; preds = %func_106.exit27
- %tobool.i = xor i1 %cmp, true ; <i1> [#uses=1]
- %or.cond.i = or i1 false, %tobool.i ; <i1> [#uses=1]
- br i1 %or.cond.i, label %ifend.i, label %safe_mod_int16_t_s_s.exit
-
-ifend.i: ; preds = %lor_rhs.i
- %conv6.i = sext i16 %conv to i32 ; <i32> [#uses=1]
- %rem.i = urem i32 %conv6.i, %cmp.ext ; <i32> [#uses=1]
- %conv8.i = trunc i32 %rem.i to i16 ; <i16> [#uses=1]
- br label %safe_mod_int16_t_s_s.exit
-
-safe_mod_int16_t_s_s.exit: ; preds = %ifend.i, %lor_rhs.i, %func_106.exit27
- %call31 = phi i16 [ %conv8.i, %ifend.i ], [ %conv, %func_106.exit27 ], [ %conv, %lor_rhs.i ] ; <i16> [#uses=1]
- %conv4 = sext i16 %call31 to i32 ; <i32> [#uses=1]
- %call5 = tail call i32 (...)* @func_104( i32 %conv4 ) ; <i32> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @func_104(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug.ll
deleted file mode 100644
index 108f243..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR2783
-
- at g_15 = external global i16 ; <i16*> [#uses=2]
-
-define i32 @func_3(i32 %p_5) nounwind {
-entry:
- %0 = srem i32 1, 0 ; <i32> [#uses=2]
- %1 = load i16* @g_15, align 2 ; <i16> [#uses=1]
- %2 = zext i16 %1 to i32 ; <i32> [#uses=1]
- %3 = and i32 %2, 1 ; <i32> [#uses=1]
- %4 = tail call i32 (...)* @rshift_u_s( i32 1 ) nounwind ; <i32> [#uses=1]
- %5 = icmp slt i32 %4, 2 ; <i1> [#uses=1]
- %6 = zext i1 %5 to i32 ; <i32> [#uses=1]
- %7 = icmp sge i32 %3, %6 ; <i1> [#uses=1]
- %8 = zext i1 %7 to i32 ; <i32> [#uses=1]
- %9 = load i16* @g_15, align 2 ; <i16> [#uses=1]
- %10 = icmp eq i16 %9, 0 ; <i1> [#uses=1]
- %11 = zext i1 %10 to i32 ; <i32> [#uses=1]
- %12 = tail call i32 (...)* @func_20( i32 1 ) nounwind ; <i32> [#uses=1]
- %13 = icmp sge i32 %11, %12 ; <i1> [#uses=1]
- %14 = zext i1 %13 to i32 ; <i32> [#uses=1]
- %15 = sub i32 %8, %14 ; <i32> [#uses=1]
- %16 = icmp ult i32 %15, 2 ; <i1> [#uses=1]
- %17 = zext i1 %16 to i32 ; <i32> [#uses=1]
- %18 = icmp ugt i32 %0, 3 ; <i1> [#uses=1]
- %or.cond = or i1 false, %18 ; <i1> [#uses=1]
- %19 = select i1 %or.cond, i32 0, i32 %0 ; <i32> [#uses=1]
- %.0 = lshr i32 %17, %19 ; <i32> [#uses=1]
- %20 = tail call i32 (...)* @func_7( i32 %.0 ) nounwind ; <i32> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @rshift_u_s(...)
-
-declare i32 @func_20(...)
-
-declare i32 @func_7(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll
deleted file mode 100644
index 534f990..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR2748
-
- at g_73 = external global i32 ; <i32*> [#uses=1]
- at g_5 = external global i32 ; <i32*> [#uses=1]
-
-define i32 @func_44(i16 signext %p_46) nounwind {
-entry:
- %0 = load i32* @g_5, align 4 ; <i32> [#uses=1]
- %1 = ashr i32 %0, 1 ; <i32> [#uses=1]
- %2 = icmp sgt i32 %1, 1 ; <i1> [#uses=1]
- %3 = zext i1 %2 to i32 ; <i32> [#uses=1]
- %4 = load i32* @g_73, align 4 ; <i32> [#uses=1]
- %5 = zext i16 %p_46 to i64 ; <i64> [#uses=1]
- %6 = sub i64 0, %5 ; <i64> [#uses=1]
- %7 = trunc i64 %6 to i8 ; <i8> [#uses=2]
- %8 = trunc i32 %4 to i8 ; <i8> [#uses=2]
- %9 = icmp eq i8 %8, 0 ; <i1> [#uses=1]
- br i1 %9, label %bb11, label %bb12
-
-bb11: ; preds = %entry
- %10 = urem i8 %7, %8 ; <i8> [#uses=1]
- br label %bb12
-
-bb12: ; preds = %bb11, %entry
- %.014.in = phi i8 [ %10, %bb11 ], [ %7, %entry ] ; <i8> [#uses=1]
- %11 = icmp ne i8 %.014.in, 0 ; <i1> [#uses=1]
- %12 = zext i1 %11 to i32 ; <i32> [#uses=1]
- %13 = tail call i32 (...)* @func_48( i32 %12, i32 %3, i32 0 ) nounwind ; <i32> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @func_48(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
deleted file mode 100644
index 74429c3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep "movl %eax, %eax"
-; RUN: llc < %s -march=x86 | not grep "movl %edx, %edx"
-; RUN: llc < %s -march=x86 | not grep "movl (%eax), %eax"
-; RUN: llc < %s -march=x86 | not grep "movl (%edx), %edx"
-; RUN: llc < %s -march=x86 -regalloc=local | not grep "movl %eax, %eax"
-; RUN: llc < %s -march=x86 -regalloc=local | not grep "movl %edx, %edx"
-; RUN: llc < %s -march=x86 -regalloc=local | not grep "movl (%eax), %eax"
-; RUN: llc < %s -march=x86 -regalloc=local | not grep "movl (%edx), %edx"
-
-; %0 must not be put in EAX or EDX.
-; In the first asm, $0 and $2 must not be put in EAX.
-; In the second asm, $0 and $2 must not be put in EDX.
-; This is kind of hard to test thoroughly, but the things above should continue
-; to pass, I think.
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
- at x = common global i32 0 ; <i32*> [#uses=1]
-
-define i32 @aci(i32* %pw) nounwind {
-entry:
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- %asmtmp = tail call { i32, i32 } asm "movl $0, %eax\0A\090:\0A\09test %eax, %eax\0A\09je 1f\0A\09movl %eax, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{ax},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind ; <{ i32, i32 }> [#uses=0]
- %asmtmp2 = tail call { i32, i32 } asm "movl $0, %edx\0A\090:\0A\09test %edx, %edx\0A\09je 1f\0A\09movl %edx, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{dx},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind ; <{ i32, i32 }> [#uses=1]
- %asmresult3 = extractvalue { i32, i32 } %asmtmp2, 0 ; <i32> [#uses=1]
- %1 = add i32 %asmresult3, %0 ; <i32> [#uses=1]
- ret i32 %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
deleted file mode 100644
index e3b6fdf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86 | grep "#%ebp %esi %edi 8(%edx) %eax (%ebx)"
-; RUN: llc < %s -march=x86 -regalloc=local | grep "#%edi %ebp %edx 8(%ebx) %eax (%esi)"
-; The 1st, 2nd, 3rd and 5th registers above must all be different. The registers
-; referenced in the 4th and 6th operands must not be the same as the 1st or 5th
-; operand. There are many combinations that work; this is what llc puts out now.
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
- %struct.foo = type { i32, i32, i8* }
-
-define i32 @get(%struct.foo* %c, i8* %state) nounwind {
-entry:
- %0 = getelementptr %struct.foo* %c, i32 0, i32 0 ; <i32*> [#uses=2]
- %1 = getelementptr %struct.foo* %c, i32 0, i32 1 ; <i32*> [#uses=2]
- %2 = getelementptr %struct.foo* %c, i32 0, i32 2 ; <i8**> [#uses=2]
- %3 = load i32* %0, align 4 ; <i32> [#uses=1]
- %4 = load i32* %1, align 4 ; <i32> [#uses=1]
- %5 = load i8* %state, align 1 ; <i8> [#uses=1]
- %asmtmp = tail call { i32, i32, i32, i32 } asm sideeffect "#$0 $1 $2 $3 $4 $5", "=&r,=r,=r,=*m,=&q,=*imr,1,2,*m,5,~{dirflag},~{fpsr},~{flags},~{cx}"(i8** %2, i8* %state, i32 %3, i32 %4, i8** %2, i8 %5) nounwind ; <{ i32, i32, i32, i32 }> [#uses=3]
- %asmresult = extractvalue { i32, i32, i32, i32 } %asmtmp, 0 ; <i32> [#uses=1]
- %asmresult1 = extractvalue { i32, i32, i32, i32 } %asmtmp, 1 ; <i32> [#uses=1]
- store i32 %asmresult1, i32* %0
- %asmresult2 = extractvalue { i32, i32, i32, i32 } %asmtmp, 2 ; <i32> [#uses=1]
- store i32 %asmresult2, i32* %1
- ret i32 %asmresult
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-19-RegAllocBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-19-RegAllocBug.ll
deleted file mode 100644
index a8f2912..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-19-RegAllocBug.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin
-; PR2808
-
- at g_3 = external global i32 ; <i32*> [#uses=1]
-
-define i32 @func_4() nounwind {
-entry:
- %0 = load i32* @g_3, align 4 ; <i32> [#uses=2]
- %1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
- %2 = sub i8 1, %1 ; <i8> [#uses=1]
- %3 = sext i8 %2 to i32 ; <i32> [#uses=1]
- %.0 = ashr i32 %3, select (i1 icmp ne (i8 zext (i1 icmp ugt (i32 ptrtoint (i32 ()* @func_4 to i32), i32 3) to i8), i8 0), i32 0, i32 ptrtoint (i32 ()* @func_4 to i32)) ; <i32> [#uses=1]
- %4 = urem i32 %0, %.0 ; <i32> [#uses=1]
- %5 = icmp eq i32 %4, 0 ; <i1> [#uses=1]
- br i1 %5, label %return, label %bb4
-
-bb4: ; preds = %entry
- ret i32 undef
-
-return: ; preds = %entry
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-25-sseregparm-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-25-sseregparm-1.ll
deleted file mode 100644
index c92a8f4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-25-sseregparm-1.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movs | count 2
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep fld | count 2
-; check 'inreg' attribute for sse_regparm
-
-define double @foo1() inreg nounwind {
- ret double 1.0
-}
-
-define float @foo2() inreg nounwind {
- ret float 1.0
-}
-
-define double @bar() nounwind {
- ret double 1.0
-}
-
-define float @bar2() nounwind {
- ret float 1.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-26-FrameAddrBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-26-FrameAddrBug.ll
deleted file mode 100644
index f1ada28..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-26-FrameAddrBug.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9
-
- %struct._Unwind_Context = type { [18 x i8*], i8*, i8*, i8*, %struct.dwarf_eh_bases, i32, i32, i32, [18 x i8] }
- %struct._Unwind_Exception = type { i64, void (i32, %struct._Unwind_Exception*)*, i32, i32, [3 x i32] }
- %struct.dwarf_eh_bases = type { i8*, i8*, i8* }
-
-declare fastcc void @uw_init_context_1(%struct._Unwind_Context*, i8*, i8*)
-
-declare i8* @llvm.eh.dwarf.cfa(i32) nounwind
-
-define hidden void @_Unwind_Resume(%struct._Unwind_Exception* %exc) noreturn noreturn {
-entry:
- %0 = call i8* @llvm.eh.dwarf.cfa(i32 0) ; <i8*> [#uses=1]
- call fastcc void @uw_init_context_1(%struct._Unwind_Context* null, i8* %0, i8* null)
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll
deleted file mode 100644
index c36cf39..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -disable-fp-elim
-
- %struct..0objc_selector = type opaque
- %struct.NSString = type opaque
- %struct.XCStringList = type { i32, %struct._XCStringListNode* }
- %struct._XCStringListNode = type { [3 x i8], [0 x i8], i8 }
- %struct.__builtin_CFString = type { i32*, i32, i8*, i32 }
-internal constant %struct.__builtin_CFString { i32* getelementptr ([0 x i32]* @__CFConstantStringClassReference, i32 0, i32 0), i32 1992, i8* getelementptr ([3 x i8]* @"\01LC", i32 0, i32 0), i32 2 } ; <%struct.__builtin_CFString*>:0 [#uses=1]
- at __CFConstantStringClassReference = external global [0 x i32] ; <[0 x i32]*> [#uses=1]
-@"\01LC" = internal constant [3 x i8] c"NO\00" ; <[3 x i8]*> [#uses=1]
-@"\01LC1" = internal constant [1 x i8] zeroinitializer ; <[1 x i8]*> [#uses=1]
- at llvm.used1 = appending global [1 x i8*] [ i8* bitcast (%struct.NSString* (%struct.XCStringList*, %struct..0objc_selector*)* @"-[XCStringList stringRepresentation]" to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define %struct.NSString* @"-[XCStringList stringRepresentation]"(%struct.XCStringList* %self, %struct..0objc_selector* %_cmd) nounwind {
-entry:
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
- %1 = and i32 %0, 16777215 ; <i32> [#uses=1]
- %2 = icmp eq i32 %1, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb44, label %bb4
-
-bb4: ; preds = %entry
- %3 = load %struct._XCStringListNode** null, align 4 ; <%struct._XCStringListNode*> [#uses=2]
- %4 = icmp eq %struct._XCStringListNode* %3, null ; <i1> [#uses=1]
- %5 = bitcast %struct._XCStringListNode* %3 to i32* ; <i32*> [#uses=1]
- br label %bb37.outer
-
-bb6: ; preds = %bb37
- br label %bb19
-
-bb19: ; preds = %bb37, %bb6
- %.rle = phi i32 [ 0, %bb6 ], [ %10, %bb37 ] ; <i32> [#uses=1]
- %bufptr.0.lcssa = phi i8* [ null, %bb6 ], [ null, %bb37 ] ; <i8*> [#uses=2]
- %6 = and i32 %.rle, 16777215 ; <i32> [#uses=1]
- %7 = icmp eq i32 %6, 0 ; <i1> [#uses=1]
- br i1 %7, label %bb25.split, label %bb37
-
-bb25.split: ; preds = %bb19
- call void @foo(i8* getelementptr ([1 x i8]* @"\01LC1", i32 0, i32 0)) nounwind nounwind
- br label %bb35.outer
-
-bb34: ; preds = %bb35, %bb35, %bb35, %bb35
- %8 = getelementptr i8* %bufptr.0.lcssa, i32 %totalLength.0.ph ; <i8*> [#uses=1]
- store i8 92, i8* %8, align 1
- br label %bb35.outer
-
-bb35.outer: ; preds = %bb34, %bb25.split
- %totalLength.0.ph = add i32 0, %totalLength.1.ph ; <i32> [#uses=2]
- br label %bb35
-
-bb35: ; preds = %bb35, %bb35.outer
- %9 = load i8* null, align 1 ; <i8> [#uses=1]
- switch i8 %9, label %bb35 [
- i8 0, label %bb37.outer
- i8 32, label %bb34
- i8 92, label %bb34
- i8 34, label %bb34
- i8 39, label %bb34
- ]
-
-bb37.outer: ; preds = %bb35, %bb4
- %totalLength.1.ph = phi i32 [ 0, %bb4 ], [ %totalLength.0.ph, %bb35 ] ; <i32> [#uses=1]
- %bufptr.1.ph = phi i8* [ null, %bb4 ], [ %bufptr.0.lcssa, %bb35 ] ; <i8*> [#uses=2]
- br i1 %4, label %bb39.split, label %bb37
-
-bb37: ; preds = %bb37.outer, %bb19
- %10 = load i32* %5, align 4 ; <i32> [#uses=1]
- br i1 false, label %bb6, label %bb19
-
-bb39.split: ; preds = %bb37.outer
- %11 = bitcast i8* null to %struct.NSString* ; <%struct.NSString*> [#uses=2]
- %12 = icmp eq i8* null, %bufptr.1.ph ; <i1> [#uses=1]
- br i1 %12, label %bb44, label %bb42
-
-bb42: ; preds = %bb39.split
- call void @quux(i8* %bufptr.1.ph) nounwind nounwind
- ret %struct.NSString* %11
-
-bb44: ; preds = %bb39.split, %entry
- %.0 = phi %struct.NSString* [ bitcast (%struct.__builtin_CFString* @0 to %struct.NSString*), %entry ], [ %11, %bb39.split ] ; <%struct.NSString*> [#uses=1]
- ret %struct.NSString* %.0
-}
-
-declare void @foo(i8*)
-
-declare void @quux(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-29-VolatileBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-09-29-VolatileBug.ll
deleted file mode 100644
index 935c4c5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-09-29-VolatileBug.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep movz
-; PR2835
-
- at g_407 = internal global i32 0 ; <i32*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (i32 ()* @main to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define i32 @main() nounwind {
-entry:
- %0 = volatile load i32* @g_407, align 4 ; <i32> [#uses=1]
- %1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
- %2 = tail call i32 @func_45(i8 zeroext %1) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i32 @func_45(i8 zeroext) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-02-Atomics32-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-02-Atomics32-2.ll
deleted file mode 100644
index b48c4ad..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-02-Atomics32-2.ll
+++ /dev/null
@@ -1,969 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-;; This version includes 64-bit version of binary operators (in 32-bit mode).
-;; Swap, cmp-and-swap not supported yet in this mode.
-; ModuleID = 'Atomics.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
- at sc = common global i8 0 ; <i8*> [#uses=52]
- at uc = common global i8 0 ; <i8*> [#uses=112]
- at ss = common global i16 0 ; <i16*> [#uses=15]
- at us = common global i16 0 ; <i16*> [#uses=15]
- at si = common global i32 0 ; <i32*> [#uses=15]
- at ui = common global i32 0 ; <i32*> [#uses=23]
- at sl = common global i32 0 ; <i32*> [#uses=15]
- at ul = common global i32 0 ; <i32*> [#uses=15]
- at sll = common global i64 0, align 8 ; <i64*> [#uses=13]
- at ull = common global i64 0, align 8 ; <i64*> [#uses=13]
-
-define void @test_op_ignore() nounwind {
-entry:
- %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %1 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %3 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %2, i16 1) ; <i16> [#uses=0]
- %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %5 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %4, i16 1) ; <i16> [#uses=0]
- %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %7 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %6, i32 1) ; <i32> [#uses=0]
- %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %9 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %8, i32 1) ; <i32> [#uses=0]
- %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %11 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %10, i32 1) ; <i32> [#uses=0]
- %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %13 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %12, i32 1) ; <i32> [#uses=0]
- %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %15 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %14, i64 1) ; <i64> [#uses=0]
- %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %17 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %16, i64 1) ; <i64> [#uses=0]
- %18 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %19 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %21 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %20, i16 1) ; <i16> [#uses=0]
- %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %23 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %22, i16 1) ; <i16> [#uses=0]
- %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %25 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %24, i32 1) ; <i32> [#uses=0]
- %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %27 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %26, i32 1) ; <i32> [#uses=0]
- %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %29 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %28, i32 1) ; <i32> [#uses=0]
- %30 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %31 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %30, i32 1) ; <i32> [#uses=0]
- %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %33 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %32, i64 1) ; <i64> [#uses=0]
- %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %35 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %34, i64 1) ; <i64> [#uses=0]
- %36 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %37 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %39 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %38, i16 1) ; <i16> [#uses=0]
- %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %41 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %40, i16 1) ; <i16> [#uses=0]
- %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %43 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %42, i32 1) ; <i32> [#uses=0]
- %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %45 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %44, i32 1) ; <i32> [#uses=0]
- %46 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %47 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %46, i32 1) ; <i32> [#uses=0]
- %48 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %49 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %48, i32 1) ; <i32> [#uses=0]
- %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %51 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %50, i64 1) ; <i64> [#uses=0]
- %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %53 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %52, i64 1) ; <i64> [#uses=0]
- %54 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %55 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %57 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %56, i16 1) ; <i16> [#uses=0]
- %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %59 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %58, i16 1) ; <i16> [#uses=0]
- %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %61 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %60, i32 1) ; <i32> [#uses=0]
- %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %63 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %62, i32 1) ; <i32> [#uses=0]
- %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %65 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %64, i32 1) ; <i32> [#uses=0]
- %66 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %67 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %66, i32 1) ; <i32> [#uses=0]
- %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %69 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %68, i64 1) ; <i64> [#uses=0]
- %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %71 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %70, i64 1) ; <i64> [#uses=0]
- %72 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %73 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %75 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %74, i16 1) ; <i16> [#uses=0]
- %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %77 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %76, i16 1) ; <i16> [#uses=0]
- %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %79 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %78, i32 1) ; <i32> [#uses=0]
- %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %81 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %80, i32 1) ; <i32> [#uses=0]
- %82 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %83 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %82, i32 1) ; <i32> [#uses=0]
- %84 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %85 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %84, i32 1) ; <i32> [#uses=0]
- %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %87 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %86, i64 1) ; <i64> [#uses=0]
- %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %89 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %88, i64 1) ; <i64> [#uses=0]
- %90 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0]
- %91 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0]
- %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %93 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %92, i16 1) ; <i16> [#uses=0]
- %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %95 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %94, i16 1) ; <i16> [#uses=0]
- %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %97 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %96, i32 1) ; <i32> [#uses=0]
- %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %99 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %98, i32 1) ; <i32> [#uses=0]
- %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %101 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %100, i32 1) ; <i32> [#uses=0]
- %102 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %103 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %102, i32 1) ; <i32> [#uses=0]
- %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %105 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %104, i64 1) ; <i64> [#uses=0]
- %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %107 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %106, i64 1) ; <i64> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind
-
-define void @test_fetch_and_op() nounwind {
-entry:
- %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %0, i8* @sc, align 1
- %1 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %1, i8* @uc, align 1
- %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %3 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %2, i16 11) ; <i16> [#uses=1]
- store i16 %3, i16* @ss, align 2
- %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %5 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %4, i16 11) ; <i16> [#uses=1]
- store i16 %5, i16* @us, align 2
- %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %7 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %6, i32 11) ; <i32> [#uses=1]
- store i32 %7, i32* @si, align 4
- %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %9 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %8, i32 11) ; <i32> [#uses=1]
- store i32 %9, i32* @ui, align 4
- %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %11 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %10, i32 11) ; <i32> [#uses=1]
- store i32 %11, i32* @sl, align 4
- %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %13 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %12, i32 11) ; <i32> [#uses=1]
- store i32 %13, i32* @ul, align 4
- %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %15 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %14, i64 11) ; <i64> [#uses=1]
- store i64 %15, i64* @sll, align 8
- %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %17 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %16, i64 11) ; <i64> [#uses=1]
- store i64 %17, i64* @ull, align 8
- %18 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %18, i8* @sc, align 1
- %19 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %19, i8* @uc, align 1
- %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %21 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %20, i16 11) ; <i16> [#uses=1]
- store i16 %21, i16* @ss, align 2
- %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %23 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %22, i16 11) ; <i16> [#uses=1]
- store i16 %23, i16* @us, align 2
- %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %25 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %24, i32 11) ; <i32> [#uses=1]
- store i32 %25, i32* @si, align 4
- %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %27 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %26, i32 11) ; <i32> [#uses=1]
- store i32 %27, i32* @ui, align 4
- %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %29 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %28, i32 11) ; <i32> [#uses=1]
- store i32 %29, i32* @sl, align 4
- %30 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %31 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %30, i32 11) ; <i32> [#uses=1]
- store i32 %31, i32* @ul, align 4
- %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %33 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %32, i64 11) ; <i64> [#uses=1]
- store i64 %33, i64* @sll, align 8
- %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %35 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %34, i64 11) ; <i64> [#uses=1]
- store i64 %35, i64* @ull, align 8
- %36 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %36, i8* @sc, align 1
- %37 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %37, i8* @uc, align 1
- %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %39 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %38, i16 11) ; <i16> [#uses=1]
- store i16 %39, i16* @ss, align 2
- %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %41 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %40, i16 11) ; <i16> [#uses=1]
- store i16 %41, i16* @us, align 2
- %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %43 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %42, i32 11) ; <i32> [#uses=1]
- store i32 %43, i32* @si, align 4
- %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %45 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %44, i32 11) ; <i32> [#uses=1]
- store i32 %45, i32* @ui, align 4
- %46 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %47 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %46, i32 11) ; <i32> [#uses=1]
- store i32 %47, i32* @sl, align 4
- %48 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %49 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %48, i32 11) ; <i32> [#uses=1]
- store i32 %49, i32* @ul, align 4
- %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %51 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %50, i64 11) ; <i64> [#uses=1]
- store i64 %51, i64* @sll, align 8
- %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %53 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %52, i64 11) ; <i64> [#uses=1]
- store i64 %53, i64* @ull, align 8
- %54 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %54, i8* @sc, align 1
- %55 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %55, i8* @uc, align 1
- %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %57 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %56, i16 11) ; <i16> [#uses=1]
- store i16 %57, i16* @ss, align 2
- %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %59 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %58, i16 11) ; <i16> [#uses=1]
- store i16 %59, i16* @us, align 2
- %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %61 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %60, i32 11) ; <i32> [#uses=1]
- store i32 %61, i32* @si, align 4
- %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %63 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %62, i32 11) ; <i32> [#uses=1]
- store i32 %63, i32* @ui, align 4
- %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %65 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %64, i32 11) ; <i32> [#uses=1]
- store i32 %65, i32* @sl, align 4
- %66 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %67 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %66, i32 11) ; <i32> [#uses=1]
- store i32 %67, i32* @ul, align 4
- %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %69 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %68, i64 11) ; <i64> [#uses=1]
- store i64 %69, i64* @sll, align 8
- %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %71 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %70, i64 11) ; <i64> [#uses=1]
- store i64 %71, i64* @ull, align 8
- %72 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %72, i8* @sc, align 1
- %73 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %73, i8* @uc, align 1
- %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %75 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %74, i16 11) ; <i16> [#uses=1]
- store i16 %75, i16* @ss, align 2
- %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %77 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %76, i16 11) ; <i16> [#uses=1]
- store i16 %77, i16* @us, align 2
- %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %79 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %78, i32 11) ; <i32> [#uses=1]
- store i32 %79, i32* @si, align 4
- %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %81 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %80, i32 11) ; <i32> [#uses=1]
- store i32 %81, i32* @ui, align 4
- %82 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %83 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %82, i32 11) ; <i32> [#uses=1]
- store i32 %83, i32* @sl, align 4
- %84 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %85 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %84, i32 11) ; <i32> [#uses=1]
- store i32 %85, i32* @ul, align 4
- %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %87 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %86, i64 11) ; <i64> [#uses=1]
- store i64 %87, i64* @sll, align 8
- %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %89 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %88, i64 11) ; <i64> [#uses=1]
- store i64 %89, i64* @ull, align 8
- %90 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1]
- store i8 %90, i8* @sc, align 1
- %91 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1]
- store i8 %91, i8* @uc, align 1
- %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %93 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %92, i16 11) ; <i16> [#uses=1]
- store i16 %93, i16* @ss, align 2
- %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %95 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %94, i16 11) ; <i16> [#uses=1]
- store i16 %95, i16* @us, align 2
- %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %97 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %96, i32 11) ; <i32> [#uses=1]
- store i32 %97, i32* @si, align 4
- %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %99 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %98, i32 11) ; <i32> [#uses=1]
- store i32 %99, i32* @ui, align 4
- %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %101 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %100, i32 11) ; <i32> [#uses=1]
- store i32 %101, i32* @sl, align 4
- %102 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %103 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %102, i32 11) ; <i32> [#uses=1]
- store i32 %103, i32* @ul, align 4
- %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %105 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %104, i64 11) ; <i64> [#uses=1]
- store i64 %105, i64* @sll, align 8
- %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %107 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %106, i64 11) ; <i64> [#uses=1]
- store i64 %107, i64* @ull, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_op_and_fetch() nounwind {
-entry:
- %0 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %1 = zext i8 %0 to i32 ; <i32> [#uses=1]
- %2 = trunc i32 %1 to i8 ; <i8> [#uses=2]
- %3 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 %2) ; <i8> [#uses=1]
- %4 = add i8 %3, %2 ; <i8> [#uses=1]
- store i8 %4, i8* @sc, align 1
- %5 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %6 = zext i8 %5 to i32 ; <i32> [#uses=1]
- %7 = trunc i32 %6 to i8 ; <i8> [#uses=2]
- %8 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 %7) ; <i8> [#uses=1]
- %9 = add i8 %8, %7 ; <i8> [#uses=1]
- store i8 %9, i8* @uc, align 1
- %10 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %11 = zext i8 %10 to i32 ; <i32> [#uses=1]
- %12 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %13 = trunc i32 %11 to i16 ; <i16> [#uses=2]
- %14 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %12, i16 %13) ; <i16> [#uses=1]
- %15 = add i16 %14, %13 ; <i16> [#uses=1]
- store i16 %15, i16* @ss, align 2
- %16 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %17 = zext i8 %16 to i32 ; <i32> [#uses=1]
- %18 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %19 = trunc i32 %17 to i16 ; <i16> [#uses=2]
- %20 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %18, i16 %19) ; <i16> [#uses=1]
- %21 = add i16 %20, %19 ; <i16> [#uses=1]
- store i16 %21, i16* @us, align 2
- %22 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %23 = zext i8 %22 to i32 ; <i32> [#uses=2]
- %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %25 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %24, i32 %23) ; <i32> [#uses=1]
- %26 = add i32 %25, %23 ; <i32> [#uses=1]
- store i32 %26, i32* @si, align 4
- %27 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %28 = zext i8 %27 to i32 ; <i32> [#uses=2]
- %29 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %30 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %29, i32 %28) ; <i32> [#uses=1]
- %31 = add i32 %30, %28 ; <i32> [#uses=1]
- store i32 %31, i32* @ui, align 4
- %32 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %33 = zext i8 %32 to i32 ; <i32> [#uses=2]
- %34 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %35 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %34, i32 %33) ; <i32> [#uses=1]
- %36 = add i32 %35, %33 ; <i32> [#uses=1]
- store i32 %36, i32* @sl, align 4
- %37 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %38 = zext i8 %37 to i32 ; <i32> [#uses=2]
- %39 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %40 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %39, i32 %38) ; <i32> [#uses=1]
- %41 = add i32 %40, %38 ; <i32> [#uses=1]
- store i32 %41, i32* @ul, align 4
- %42 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %43 = zext i8 %42 to i64 ; <i64> [#uses=2]
- %44 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %45 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %44, i64 %43) ; <i64> [#uses=1]
- %46 = add i64 %45, %43 ; <i64> [#uses=1]
- store i64 %46, i64* @sll, align 8
- %47 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %48 = zext i8 %47 to i64 ; <i64> [#uses=2]
- %49 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %50 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %49, i64 %48) ; <i64> [#uses=1]
- %51 = add i64 %50, %48 ; <i64> [#uses=1]
- store i64 %51, i64* @ull, align 8
- %52 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %53 = zext i8 %52 to i32 ; <i32> [#uses=1]
- %54 = trunc i32 %53 to i8 ; <i8> [#uses=2]
- %55 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 %54) ; <i8> [#uses=1]
- %56 = sub i8 %55, %54 ; <i8> [#uses=1]
- store i8 %56, i8* @sc, align 1
- %57 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %58 = zext i8 %57 to i32 ; <i32> [#uses=1]
- %59 = trunc i32 %58 to i8 ; <i8> [#uses=2]
- %60 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 %59) ; <i8> [#uses=1]
- %61 = sub i8 %60, %59 ; <i8> [#uses=1]
- store i8 %61, i8* @uc, align 1
- %62 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %63 = zext i8 %62 to i32 ; <i32> [#uses=1]
- %64 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %65 = trunc i32 %63 to i16 ; <i16> [#uses=2]
- %66 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %64, i16 %65) ; <i16> [#uses=1]
- %67 = sub i16 %66, %65 ; <i16> [#uses=1]
- store i16 %67, i16* @ss, align 2
- %68 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %69 = zext i8 %68 to i32 ; <i32> [#uses=1]
- %70 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %71 = trunc i32 %69 to i16 ; <i16> [#uses=2]
- %72 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %70, i16 %71) ; <i16> [#uses=1]
- %73 = sub i16 %72, %71 ; <i16> [#uses=1]
- store i16 %73, i16* @us, align 2
- %74 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %75 = zext i8 %74 to i32 ; <i32> [#uses=2]
- %76 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %77 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %76, i32 %75) ; <i32> [#uses=1]
- %78 = sub i32 %77, %75 ; <i32> [#uses=1]
- store i32 %78, i32* @si, align 4
- %79 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %80 = zext i8 %79 to i32 ; <i32> [#uses=2]
- %81 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %82 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %81, i32 %80) ; <i32> [#uses=1]
- %83 = sub i32 %82, %80 ; <i32> [#uses=1]
- store i32 %83, i32* @ui, align 4
- %84 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %85 = zext i8 %84 to i32 ; <i32> [#uses=2]
- %86 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %87 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %86, i32 %85) ; <i32> [#uses=1]
- %88 = sub i32 %87, %85 ; <i32> [#uses=1]
- store i32 %88, i32* @sl, align 4
- %89 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %90 = zext i8 %89 to i32 ; <i32> [#uses=2]
- %91 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %92 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %91, i32 %90) ; <i32> [#uses=1]
- %93 = sub i32 %92, %90 ; <i32> [#uses=1]
- store i32 %93, i32* @ul, align 4
- %94 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %95 = zext i8 %94 to i64 ; <i64> [#uses=2]
- %96 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %97 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %96, i64 %95) ; <i64> [#uses=1]
- %98 = sub i64 %97, %95 ; <i64> [#uses=1]
- store i64 %98, i64* @sll, align 8
- %99 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %100 = zext i8 %99 to i64 ; <i64> [#uses=2]
- %101 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %102 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %101, i64 %100) ; <i64> [#uses=1]
- %103 = sub i64 %102, %100 ; <i64> [#uses=1]
- store i64 %103, i64* @ull, align 8
- %104 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %105 = zext i8 %104 to i32 ; <i32> [#uses=1]
- %106 = trunc i32 %105 to i8 ; <i8> [#uses=2]
- %107 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 %106) ; <i8> [#uses=1]
- %108 = or i8 %107, %106 ; <i8> [#uses=1]
- store i8 %108, i8* @sc, align 1
- %109 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %110 = zext i8 %109 to i32 ; <i32> [#uses=1]
- %111 = trunc i32 %110 to i8 ; <i8> [#uses=2]
- %112 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 %111) ; <i8> [#uses=1]
- %113 = or i8 %112, %111 ; <i8> [#uses=1]
- store i8 %113, i8* @uc, align 1
- %114 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %115 = zext i8 %114 to i32 ; <i32> [#uses=1]
- %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %117 = trunc i32 %115 to i16 ; <i16> [#uses=2]
- %118 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %116, i16 %117) ; <i16> [#uses=1]
- %119 = or i16 %118, %117 ; <i16> [#uses=1]
- store i16 %119, i16* @ss, align 2
- %120 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %121 = zext i8 %120 to i32 ; <i32> [#uses=1]
- %122 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %123 = trunc i32 %121 to i16 ; <i16> [#uses=2]
- %124 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %122, i16 %123) ; <i16> [#uses=1]
- %125 = or i16 %124, %123 ; <i16> [#uses=1]
- store i16 %125, i16* @us, align 2
- %126 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %127 = zext i8 %126 to i32 ; <i32> [#uses=2]
- %128 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %129 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %128, i32 %127) ; <i32> [#uses=1]
- %130 = or i32 %129, %127 ; <i32> [#uses=1]
- store i32 %130, i32* @si, align 4
- %131 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %132 = zext i8 %131 to i32 ; <i32> [#uses=2]
- %133 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %134 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %133, i32 %132) ; <i32> [#uses=1]
- %135 = or i32 %134, %132 ; <i32> [#uses=1]
- store i32 %135, i32* @ui, align 4
- %136 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %137 = zext i8 %136 to i32 ; <i32> [#uses=2]
- %138 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %139 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %138, i32 %137) ; <i32> [#uses=1]
- %140 = or i32 %139, %137 ; <i32> [#uses=1]
- store i32 %140, i32* @sl, align 4
- %141 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %142 = zext i8 %141 to i32 ; <i32> [#uses=2]
- %143 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %144 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %143, i32 %142) ; <i32> [#uses=1]
- %145 = or i32 %144, %142 ; <i32> [#uses=1]
- store i32 %145, i32* @ul, align 4
- %146 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %147 = zext i8 %146 to i64 ; <i64> [#uses=2]
- %148 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %149 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %148, i64 %147) ; <i64> [#uses=1]
- %150 = or i64 %149, %147 ; <i64> [#uses=1]
- store i64 %150, i64* @sll, align 8
- %151 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %152 = zext i8 %151 to i64 ; <i64> [#uses=2]
- %153 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %154 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %153, i64 %152) ; <i64> [#uses=1]
- %155 = or i64 %154, %152 ; <i64> [#uses=1]
- store i64 %155, i64* @ull, align 8
- %156 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %157 = zext i8 %156 to i32 ; <i32> [#uses=1]
- %158 = trunc i32 %157 to i8 ; <i8> [#uses=2]
- %159 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 %158) ; <i8> [#uses=1]
- %160 = xor i8 %159, %158 ; <i8> [#uses=1]
- store i8 %160, i8* @sc, align 1
- %161 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %162 = zext i8 %161 to i32 ; <i32> [#uses=1]
- %163 = trunc i32 %162 to i8 ; <i8> [#uses=2]
- %164 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 %163) ; <i8> [#uses=1]
- %165 = xor i8 %164, %163 ; <i8> [#uses=1]
- store i8 %165, i8* @uc, align 1
- %166 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %167 = zext i8 %166 to i32 ; <i32> [#uses=1]
- %168 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %169 = trunc i32 %167 to i16 ; <i16> [#uses=2]
- %170 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %168, i16 %169) ; <i16> [#uses=1]
- %171 = xor i16 %170, %169 ; <i16> [#uses=1]
- store i16 %171, i16* @ss, align 2
- %172 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %173 = zext i8 %172 to i32 ; <i32> [#uses=1]
- %174 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %175 = trunc i32 %173 to i16 ; <i16> [#uses=2]
- %176 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %174, i16 %175) ; <i16> [#uses=1]
- %177 = xor i16 %176, %175 ; <i16> [#uses=1]
- store i16 %177, i16* @us, align 2
- %178 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %179 = zext i8 %178 to i32 ; <i32> [#uses=2]
- %180 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %181 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %180, i32 %179) ; <i32> [#uses=1]
- %182 = xor i32 %181, %179 ; <i32> [#uses=1]
- store i32 %182, i32* @si, align 4
- %183 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %184 = zext i8 %183 to i32 ; <i32> [#uses=2]
- %185 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %186 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %185, i32 %184) ; <i32> [#uses=1]
- %187 = xor i32 %186, %184 ; <i32> [#uses=1]
- store i32 %187, i32* @ui, align 4
- %188 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %189 = zext i8 %188 to i32 ; <i32> [#uses=2]
- %190 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %191 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %190, i32 %189) ; <i32> [#uses=1]
- %192 = xor i32 %191, %189 ; <i32> [#uses=1]
- store i32 %192, i32* @sl, align 4
- %193 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %194 = zext i8 %193 to i32 ; <i32> [#uses=2]
- %195 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %196 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %195, i32 %194) ; <i32> [#uses=1]
- %197 = xor i32 %196, %194 ; <i32> [#uses=1]
- store i32 %197, i32* @ul, align 4
- %198 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %199 = zext i8 %198 to i64 ; <i64> [#uses=2]
- %200 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %201 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %200, i64 %199) ; <i64> [#uses=1]
- %202 = xor i64 %201, %199 ; <i64> [#uses=1]
- store i64 %202, i64* @sll, align 8
- %203 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %204 = zext i8 %203 to i64 ; <i64> [#uses=2]
- %205 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %206 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %205, i64 %204) ; <i64> [#uses=1]
- %207 = xor i64 %206, %204 ; <i64> [#uses=1]
- store i64 %207, i64* @ull, align 8
- %208 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %209 = zext i8 %208 to i32 ; <i32> [#uses=1]
- %210 = trunc i32 %209 to i8 ; <i8> [#uses=2]
- %211 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 %210) ; <i8> [#uses=1]
- %212 = and i8 %211, %210 ; <i8> [#uses=1]
- store i8 %212, i8* @sc, align 1
- %213 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %214 = zext i8 %213 to i32 ; <i32> [#uses=1]
- %215 = trunc i32 %214 to i8 ; <i8> [#uses=2]
- %216 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 %215) ; <i8> [#uses=1]
- %217 = and i8 %216, %215 ; <i8> [#uses=1]
- store i8 %217, i8* @uc, align 1
- %218 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %219 = zext i8 %218 to i32 ; <i32> [#uses=1]
- %220 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %221 = trunc i32 %219 to i16 ; <i16> [#uses=2]
- %222 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %220, i16 %221) ; <i16> [#uses=1]
- %223 = and i16 %222, %221 ; <i16> [#uses=1]
- store i16 %223, i16* @ss, align 2
- %224 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %225 = zext i8 %224 to i32 ; <i32> [#uses=1]
- %226 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %227 = trunc i32 %225 to i16 ; <i16> [#uses=2]
- %228 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %226, i16 %227) ; <i16> [#uses=1]
- %229 = and i16 %228, %227 ; <i16> [#uses=1]
- store i16 %229, i16* @us, align 2
- %230 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %231 = zext i8 %230 to i32 ; <i32> [#uses=2]
- %232 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %233 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %232, i32 %231) ; <i32> [#uses=1]
- %234 = and i32 %233, %231 ; <i32> [#uses=1]
- store i32 %234, i32* @si, align 4
- %235 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %236 = zext i8 %235 to i32 ; <i32> [#uses=2]
- %237 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %238 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %237, i32 %236) ; <i32> [#uses=1]
- %239 = and i32 %238, %236 ; <i32> [#uses=1]
- store i32 %239, i32* @ui, align 4
- %240 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %241 = zext i8 %240 to i32 ; <i32> [#uses=2]
- %242 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %243 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %242, i32 %241) ; <i32> [#uses=1]
- %244 = and i32 %243, %241 ; <i32> [#uses=1]
- store i32 %244, i32* @sl, align 4
- %245 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %246 = zext i8 %245 to i32 ; <i32> [#uses=2]
- %247 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %248 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %247, i32 %246) ; <i32> [#uses=1]
- %249 = and i32 %248, %246 ; <i32> [#uses=1]
- store i32 %249, i32* @ul, align 4
- %250 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %251 = zext i8 %250 to i64 ; <i64> [#uses=2]
- %252 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %253 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %252, i64 %251) ; <i64> [#uses=1]
- %254 = and i64 %253, %251 ; <i64> [#uses=1]
- store i64 %254, i64* @sll, align 8
- %255 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %256 = zext i8 %255 to i64 ; <i64> [#uses=2]
- %257 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %258 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %257, i64 %256) ; <i64> [#uses=1]
- %259 = and i64 %258, %256 ; <i64> [#uses=1]
- store i64 %259, i64* @ull, align 8
- %260 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %261 = zext i8 %260 to i32 ; <i32> [#uses=1]
- %262 = trunc i32 %261 to i8 ; <i8> [#uses=2]
- %263 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 %262) ; <i8> [#uses=1]
- %264 = xor i8 %263, -1 ; <i8> [#uses=1]
- %265 = and i8 %264, %262 ; <i8> [#uses=1]
- store i8 %265, i8* @sc, align 1
- %266 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %267 = zext i8 %266 to i32 ; <i32> [#uses=1]
- %268 = trunc i32 %267 to i8 ; <i8> [#uses=2]
- %269 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 %268) ; <i8> [#uses=1]
- %270 = xor i8 %269, -1 ; <i8> [#uses=1]
- %271 = and i8 %270, %268 ; <i8> [#uses=1]
- store i8 %271, i8* @uc, align 1
- %272 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %273 = zext i8 %272 to i32 ; <i32> [#uses=1]
- %274 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %275 = trunc i32 %273 to i16 ; <i16> [#uses=2]
- %276 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %274, i16 %275) ; <i16> [#uses=1]
- %277 = xor i16 %276, -1 ; <i16> [#uses=1]
- %278 = and i16 %277, %275 ; <i16> [#uses=1]
- store i16 %278, i16* @ss, align 2
- %279 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %280 = zext i8 %279 to i32 ; <i32> [#uses=1]
- %281 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %282 = trunc i32 %280 to i16 ; <i16> [#uses=2]
- %283 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %281, i16 %282) ; <i16> [#uses=1]
- %284 = xor i16 %283, -1 ; <i16> [#uses=1]
- %285 = and i16 %284, %282 ; <i16> [#uses=1]
- store i16 %285, i16* @us, align 2
- %286 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %287 = zext i8 %286 to i32 ; <i32> [#uses=2]
- %288 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %289 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %288, i32 %287) ; <i32> [#uses=1]
- %290 = xor i32 %289, -1 ; <i32> [#uses=1]
- %291 = and i32 %290, %287 ; <i32> [#uses=1]
- store i32 %291, i32* @si, align 4
- %292 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %293 = zext i8 %292 to i32 ; <i32> [#uses=2]
- %294 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %295 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %294, i32 %293) ; <i32> [#uses=1]
- %296 = xor i32 %295, -1 ; <i32> [#uses=1]
- %297 = and i32 %296, %293 ; <i32> [#uses=1]
- store i32 %297, i32* @ui, align 4
- %298 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %299 = zext i8 %298 to i32 ; <i32> [#uses=2]
- %300 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %301 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %300, i32 %299) ; <i32> [#uses=1]
- %302 = xor i32 %301, -1 ; <i32> [#uses=1]
- %303 = and i32 %302, %299 ; <i32> [#uses=1]
- store i32 %303, i32* @sl, align 4
- %304 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %305 = zext i8 %304 to i32 ; <i32> [#uses=2]
- %306 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %307 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %306, i32 %305) ; <i32> [#uses=1]
- %308 = xor i32 %307, -1 ; <i32> [#uses=1]
- %309 = and i32 %308, %305 ; <i32> [#uses=1]
- store i32 %309, i32* @ul, align 4
- %310 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %311 = zext i8 %310 to i64 ; <i64> [#uses=2]
- %312 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- %313 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %312, i64 %311) ; <i64> [#uses=1]
- %314 = xor i64 %313, -1 ; <i64> [#uses=1]
- %315 = and i64 %314, %311 ; <i64> [#uses=1]
- store i64 %315, i64* @sll, align 8
- %316 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %317 = zext i8 %316 to i64 ; <i64> [#uses=2]
- %318 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- %319 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %318, i64 %317) ; <i64> [#uses=1]
- %320 = xor i64 %319, -1 ; <i64> [#uses=1]
- %321 = and i64 %320, %317 ; <i64> [#uses=1]
- store i64 %321, i64* @ull, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_compare_and_swap() nounwind {
-entry:
- %0 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %1 = zext i8 %0 to i32 ; <i32> [#uses=1]
- %2 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %3 = zext i8 %2 to i32 ; <i32> [#uses=1]
- %4 = trunc i32 %3 to i8 ; <i8> [#uses=1]
- %5 = trunc i32 %1 to i8 ; <i8> [#uses=1]
- %6 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @sc, i8 %4, i8 %5) ; <i8> [#uses=1]
- store i8 %6, i8* @sc, align 1
- %7 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %8 = zext i8 %7 to i32 ; <i32> [#uses=1]
- %9 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %10 = zext i8 %9 to i32 ; <i32> [#uses=1]
- %11 = trunc i32 %10 to i8 ; <i8> [#uses=1]
- %12 = trunc i32 %8 to i8 ; <i8> [#uses=1]
- %13 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @uc, i8 %11, i8 %12) ; <i8> [#uses=1]
- store i8 %13, i8* @uc, align 1
- %14 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %15 = sext i8 %14 to i16 ; <i16> [#uses=1]
- %16 = zext i16 %15 to i32 ; <i32> [#uses=1]
- %17 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %18 = zext i8 %17 to i32 ; <i32> [#uses=1]
- %19 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %20 = trunc i32 %18 to i16 ; <i16> [#uses=1]
- %21 = trunc i32 %16 to i16 ; <i16> [#uses=1]
- %22 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %19, i16 %20, i16 %21) ; <i16> [#uses=1]
- store i16 %22, i16* @ss, align 2
- %23 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %24 = sext i8 %23 to i16 ; <i16> [#uses=1]
- %25 = zext i16 %24 to i32 ; <i32> [#uses=1]
- %26 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %27 = zext i8 %26 to i32 ; <i32> [#uses=1]
- %28 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %29 = trunc i32 %27 to i16 ; <i16> [#uses=1]
- %30 = trunc i32 %25 to i16 ; <i16> [#uses=1]
- %31 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %28, i16 %29, i16 %30) ; <i16> [#uses=1]
- store i16 %31, i16* @us, align 2
- %32 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %33 = sext i8 %32 to i32 ; <i32> [#uses=1]
- %34 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %35 = zext i8 %34 to i32 ; <i32> [#uses=1]
- %36 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %37 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %36, i32 %35, i32 %33) ; <i32> [#uses=1]
- store i32 %37, i32* @si, align 4
- %38 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %39 = sext i8 %38 to i32 ; <i32> [#uses=1]
- %40 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %41 = zext i8 %40 to i32 ; <i32> [#uses=1]
- %42 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %43 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %42, i32 %41, i32 %39) ; <i32> [#uses=1]
- store i32 %43, i32* @ui, align 4
- %44 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %45 = sext i8 %44 to i32 ; <i32> [#uses=1]
- %46 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %47 = zext i8 %46 to i32 ; <i32> [#uses=1]
- %48 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %49 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %48, i32 %47, i32 %45) ; <i32> [#uses=1]
- store i32 %49, i32* @sl, align 4
- %50 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %51 = sext i8 %50 to i32 ; <i32> [#uses=1]
- %52 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %53 = zext i8 %52 to i32 ; <i32> [#uses=1]
- %54 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %55 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %54, i32 %53, i32 %51) ; <i32> [#uses=1]
- store i32 %55, i32* @ul, align 4
- %56 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %57 = zext i8 %56 to i32 ; <i32> [#uses=1]
- %58 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %59 = zext i8 %58 to i32 ; <i32> [#uses=1]
- %60 = trunc i32 %59 to i8 ; <i8> [#uses=2]
- %61 = trunc i32 %57 to i8 ; <i8> [#uses=1]
- %62 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @sc, i8 %60, i8 %61) ; <i8> [#uses=1]
- %63 = icmp eq i8 %62, %60 ; <i1> [#uses=1]
- %64 = zext i1 %63 to i8 ; <i8> [#uses=1]
- %65 = zext i8 %64 to i32 ; <i32> [#uses=1]
- store i32 %65, i32* @ui, align 4
- %66 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %67 = zext i8 %66 to i32 ; <i32> [#uses=1]
- %68 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %69 = zext i8 %68 to i32 ; <i32> [#uses=1]
- %70 = trunc i32 %69 to i8 ; <i8> [#uses=2]
- %71 = trunc i32 %67 to i8 ; <i8> [#uses=1]
- %72 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @uc, i8 %70, i8 %71) ; <i8> [#uses=1]
- %73 = icmp eq i8 %72, %70 ; <i1> [#uses=1]
- %74 = zext i1 %73 to i8 ; <i8> [#uses=1]
- %75 = zext i8 %74 to i32 ; <i32> [#uses=1]
- store i32 %75, i32* @ui, align 4
- %76 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %77 = sext i8 %76 to i16 ; <i16> [#uses=1]
- %78 = zext i16 %77 to i32 ; <i32> [#uses=1]
- %79 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %80 = zext i8 %79 to i32 ; <i32> [#uses=1]
- %81 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %82 = trunc i32 %80 to i16 ; <i16> [#uses=2]
- %83 = trunc i32 %78 to i16 ; <i16> [#uses=1]
- %84 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %81, i16 %82, i16 %83) ; <i16> [#uses=1]
- %85 = icmp eq i16 %84, %82 ; <i1> [#uses=1]
- %86 = zext i1 %85 to i8 ; <i8> [#uses=1]
- %87 = zext i8 %86 to i32 ; <i32> [#uses=1]
- store i32 %87, i32* @ui, align 4
- %88 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %89 = sext i8 %88 to i16 ; <i16> [#uses=1]
- %90 = zext i16 %89 to i32 ; <i32> [#uses=1]
- %91 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %92 = zext i8 %91 to i32 ; <i32> [#uses=1]
- %93 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %94 = trunc i32 %92 to i16 ; <i16> [#uses=2]
- %95 = trunc i32 %90 to i16 ; <i16> [#uses=1]
- %96 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %93, i16 %94, i16 %95) ; <i16> [#uses=1]
- %97 = icmp eq i16 %96, %94 ; <i1> [#uses=1]
- %98 = zext i1 %97 to i8 ; <i8> [#uses=1]
- %99 = zext i8 %98 to i32 ; <i32> [#uses=1]
- store i32 %99, i32* @ui, align 4
- %100 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %101 = sext i8 %100 to i32 ; <i32> [#uses=1]
- %102 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %103 = zext i8 %102 to i32 ; <i32> [#uses=2]
- %104 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %105 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %104, i32 %103, i32 %101) ; <i32> [#uses=1]
- %106 = icmp eq i32 %105, %103 ; <i1> [#uses=1]
- %107 = zext i1 %106 to i8 ; <i8> [#uses=1]
- %108 = zext i8 %107 to i32 ; <i32> [#uses=1]
- store i32 %108, i32* @ui, align 4
- %109 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %110 = sext i8 %109 to i32 ; <i32> [#uses=1]
- %111 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %112 = zext i8 %111 to i32 ; <i32> [#uses=2]
- %113 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %114 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %113, i32 %112, i32 %110) ; <i32> [#uses=1]
- %115 = icmp eq i32 %114, %112 ; <i1> [#uses=1]
- %116 = zext i1 %115 to i8 ; <i8> [#uses=1]
- %117 = zext i8 %116 to i32 ; <i32> [#uses=1]
- store i32 %117, i32* @ui, align 4
- %118 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %119 = sext i8 %118 to i32 ; <i32> [#uses=1]
- %120 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %121 = zext i8 %120 to i32 ; <i32> [#uses=2]
- %122 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %123 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %122, i32 %121, i32 %119) ; <i32> [#uses=1]
- %124 = icmp eq i32 %123, %121 ; <i1> [#uses=1]
- %125 = zext i1 %124 to i8 ; <i8> [#uses=1]
- %126 = zext i8 %125 to i32 ; <i32> [#uses=1]
- store i32 %126, i32* @ui, align 4
- %127 = load i8* @sc, align 1 ; <i8> [#uses=1]
- %128 = sext i8 %127 to i32 ; <i32> [#uses=1]
- %129 = load i8* @uc, align 1 ; <i8> [#uses=1]
- %130 = zext i8 %129 to i32 ; <i32> [#uses=2]
- %131 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %132 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %131, i32 %130, i32 %128) ; <i32> [#uses=1]
- %133 = icmp eq i32 %132, %130 ; <i1> [#uses=1]
- %134 = zext i1 %133 to i8 ; <i8> [#uses=1]
- %135 = zext i8 %134 to i32 ; <i32> [#uses=1]
- store i32 %135, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
-define void @test_lock() nounwind {
-entry:
- %0 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=1]
- store i8 %0, i8* @sc, align 1
- %1 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=1]
- store i8 %1, i8* @uc, align 1
- %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- %3 = call i16 @llvm.atomic.swap.i16.p0i16(i16* %2, i16 1) ; <i16> [#uses=1]
- store i16 %3, i16* @ss, align 2
- %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- %5 = call i16 @llvm.atomic.swap.i16.p0i16(i16* %4, i16 1) ; <i16> [#uses=1]
- store i16 %5, i16* @us, align 2
- %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- %7 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %6, i32 1) ; <i32> [#uses=1]
- store i32 %7, i32* @si, align 4
- %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- %9 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %8, i32 1) ; <i32> [#uses=1]
- store i32 %9, i32* @ui, align 4
- %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- %11 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %10, i32 1) ; <i32> [#uses=1]
- store i32 %11, i32* @sl, align 4
- %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- %13 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %12, i32 1) ; <i32> [#uses=1]
- store i32 %13, i32* @ul, align 4
- call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false)
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1]
- volatile store i16 0, i16* %14, align 2
- %15 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1]
- volatile store i16 0, i16* %15, align 2
- %16 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1]
- volatile store i32 0, i32* %16, align 4
- %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1]
- volatile store i32 0, i32* %17, align 4
- %18 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1]
- volatile store i32 0, i32* %18, align 4
- %19 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1]
- volatile store i32 0, i32* %19, align 4
- %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1]
- volatile store i64 0, i64* %20, align 8
- %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1]
- volatile store i64 0, i64* %21, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-06-MMXISelBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-06-MMXISelBug.ll
deleted file mode 100644
index 7f7b1a4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-06-MMXISelBug.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2
-; PR2850
-
- at tmp_V2i = common global <2 x i32> zeroinitializer ; <<2 x i32>*> [#uses=2]
-
-define void @f0() nounwind {
-entry:
- %0 = load <2 x i32>* @tmp_V2i, align 8 ; <<2 x i32>> [#uses=1]
- %1 = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer ; <<2 x i32>> [#uses=1]
- store <2 x i32> %1, <2 x i32>* @tmp_V2i, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-1.ll
deleted file mode 100644
index a135cd4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-1.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; ModuleID = 'nan.bc'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-f80:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-; RUN: llc < %s -march=x86 -mattr=-sse2,-sse3,-sse | grep fldl
-; This NaN should be shortened to a double (not a float).
-
-declare x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 %f)
-
-define i32 @main() {
-entry_nan.main:
- call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 0xK7FFFC001234000000800)
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll
deleted file mode 100644
index bd48105..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; ModuleID = 'nan.bc'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-f80:32:32-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-; RUN: llc < %s -march=x86 -mattr=-sse2,-sse3,-sse | grep fldt | count 3
-; it is not safe to shorten any of these NaNs.
-
-declare x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 %f)
-
- at _D3nan4rvale = global x86_fp80 0xK7FFF8001234000000000 ; <x86_fp80*> [#uses=1]
-
-define i32 @main() {
-entry_nan.main:
- %tmp = load x86_fp80* @_D3nan4rvale ; <x86_fp80> [#uses=1]
- call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 %tmp)
- call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 0xK7FFF8001234000000000)
- call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 0xK7FFFC001234000000400)
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-07-SSEISelBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-07-SSEISelBug.ll
deleted file mode 100644
index bc57612..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-07-SSEISelBug.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse,-sse2
-
-define <4 x float> @f(float %w) nounwind {
-entry:
- %retval = alloca <4 x float> ; <<4 x float>*> [#uses=2]
- %w.addr = alloca float ; <float*> [#uses=2]
- %.compoundliteral = alloca <4 x float> ; <<4 x float>*> [#uses=2]
- store float %w, float* %w.addr
- %tmp = load float* %w.addr ; <float> [#uses=1]
- %0 = insertelement <4 x float> undef, float %tmp, i32 0 ; <<4 x float>> [#uses=1]
- %1 = insertelement <4 x float> %0, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
- %2 = insertelement <4 x float> %1, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
- %3 = insertelement <4 x float> %2, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
- store <4 x float> %3, <4 x float>* %.compoundliteral
- %tmp1 = load <4 x float>* %.compoundliteral ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp1, <4 x float>* %retval
- br label %return
-
-return: ; preds = %entry
- %4 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
- ret <4 x float> %4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll
deleted file mode 100644
index efc6125..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s
-; PR2735
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin7"
- at g_385 = external global i32 ; <i32*> [#uses=1]
-
-define i32 @func_45(i64 %p_46, i32 %p_48) nounwind {
-entry:
- %0 = tail call i32 (...)* @lshift_s_u(i64 %p_46, i64 0) nounwind ; <i32> [#uses=0]
- %1 = load i32* @g_385, align 4 ; <i32> [#uses=1]
- %2 = shl i32 %1, 1 ; <i32> [#uses=1]
- %3 = and i32 %2, 32 ; <i32> [#uses=1]
- %4 = tail call i32 (...)* @func_87(i32 undef, i32 %p_48, i32 1) nounwind ; <i32> [#uses=1]
- %5 = add i32 %3, %4 ; <i32> [#uses=1]
- %6 = tail call i32 (...)* @div_rhs(i32 %5) nounwind ; <i32> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @lshift_s_u(...)
-declare i32 @func_87(...)
-declare i32 @div_rhs(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-13-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-13-CoalescerBug.ll
deleted file mode 100644
index 4d3f8c2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-13-CoalescerBug.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR2775
-
-define i32 @func_77(i8 zeroext %p_79) nounwind {
-entry:
- %0 = tail call i32 (...)* @func_43(i32 1) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %bb3, label %bb
-
-bb: ; preds = %entry
- br label %bb3
-
-bb3: ; preds = %bb, %entry
- %p_79_addr.0 = phi i8 [ 0, %bb ], [ %p_79, %entry ] ; <i8> [#uses=1]
- %2 = zext i8 %p_79_addr.0 to i32 ; <i32> [#uses=2]
- %3 = zext i1 false to i32 ; <i32> [#uses=2]
- %4 = tail call i32 (...)* @rshift_u_s(i32 1) nounwind ; <i32> [#uses=0]
- %5 = lshr i32 %2, %2 ; <i32> [#uses=3]
- %6 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %6, label %bb6, label %bb9
-
-bb6: ; preds = %bb3
- %7 = ashr i32 %5, %3 ; <i32> [#uses=1]
- %8 = icmp eq i32 %7, 0 ; <i1> [#uses=1]
- %9 = select i1 %8, i32 %3, i32 0 ; <i32> [#uses=1]
- %. = shl i32 %5, %9 ; <i32> [#uses=1]
- br label %bb9
-
-bb9: ; preds = %bb6, %bb3
- %.0 = phi i32 [ %., %bb6 ], [ %5, %bb3 ] ; <i32> [#uses=0]
- br i1 false, label %return, label %bb10
-
-bb10: ; preds = %bb9
- ret i32 undef
-
-return: ; preds = %bb9
- ret i32 undef
-}
-
-declare i32 @func_43(...)
-
-declare i32 @rshift_u_s(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-16-SpillerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-16-SpillerBug.ll
deleted file mode 100644
index b8ca364..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-16-SpillerBug.ll
+++ /dev/null
@@ -1,155 +0,0 @@
-; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mtriple=i386-apple-darwin | grep {andl.*7.*edi}
-
- %struct.XXDActiveTextureTargets = type { i64, i64, i64, i64, i64, i64 }
- %struct.XXDAlphaTest = type { float, i16, i8, i8 }
- %struct.XXDArrayRange = type { i8, i8, i8, i8 }
- %struct.XXDBlendMode = type { i16, i16, i16, i16, %struct.XXTColor4, i16, i16, i8, i8, i8, i8 }
- %struct.XXDClearColor = type { double, %struct.XXTColor4, %struct.XXTColor4, float, i32 }
- %struct.XXDClipPlane = type { i32, [6 x %struct.XXTColor4] }
- %struct.XXDColorBuffer = type { i16, i8, i8, [8 x i16], i8, i8, i8, i8 }
- %struct.XXDColorMatrix = type { [16 x float]*, %struct.XXDImagingCC }
- %struct.XXDConvolution = type { %struct.XXTColor4, %struct.XXDImagingCC, i16, i16, [0 x i32], float*, i32, i32 }
- %struct.XXDDepthTest = type { i16, i16, i8, i8, i8, i8, double, double }
- %struct.XXDFixedFunction = type { %struct.YYToken* }
- %struct.XXDFogMode = type { %struct.XXTColor4, float, float, float, float, float, i16, i16, i16, i8, i8 }
- %struct.XXDHintMode = type { i16, i16, i16, i16, i16, i16, i16, i16, i16, i16 }
- %struct.XXDHistogram = type { %struct.XXTFixedColor4*, i32, i16, i8, i8 }
- %struct.XXDImagingCC = type { { float, float }, { float, float }, { float, float }, { float, float } }
- %struct.XXDImagingSubset = type { %struct.XXDConvolution, %struct.XXDConvolution, %struct.XXDConvolution, %struct.XXDColorMatrix, %struct.XXDMinmax, %struct.XXDHistogram, %struct.XXDImagingCC, %struct.XXDImagingCC, %struct.XXDImagingCC, %struct.XXDImagingCC, i32, [0 x i32] }
- %struct.XXDLight = type { %struct.XXTColor4, %struct.XXTColor4, %struct.XXTColor4, %struct.XXTColor4, %struct.XXTCoord3, float, float, float, float, float, %struct.XXTCoord3, float, %struct.XXTCoord3, float, %struct.XXTCoord3, float, float, float, float, float }
- %struct.XXDLightModel = type { %struct.XXTColor4, [8 x %struct.XXDLight], [2 x %struct.XXDMaterial], i32, i16, i16, i16, i8, i8, i8, i8, i8, i8 }
- %struct.XXDLightProduct = type { %struct.XXTColor4, %struct.XXTColor4, %struct.XXTColor4 }
- %struct.XXDLineMode = type { float, i32, i16, i16, i8, i8, i8, i8 }
- %struct.XXDLogicOp = type { i16, i8, i8 }
- %struct.XXDMaskMode = type { i32, [3 x i32], i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XXDMaterial = type { %struct.XXTColor4, %struct.XXTColor4, %struct.XXTColor4, %struct.XXTColor4, float, float, float, float, [8 x %struct.XXDLightProduct], %struct.XXTColor4, [8 x i32] }
- %struct.XXDMinmax = type { %struct.XXDMinmaxTable*, i16, i8, i8, [0 x i32] }
- %struct.XXDMinmaxTable = type { %struct.XXTColor4, %struct.XXTColor4 }
- %struct.XXDMultisample = type { float, i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XXDPipelineProgramState = type { i8, i8, i8, i8, [0 x i32], %struct.XXTColor4* }
- %struct.XXDPixelMap = type { i32*, float*, float*, float*, float*, float*, float*, float*, float*, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.XXDPixelMode = type { float, float, %struct.XXDPixelStore, %struct.XXDPixelTransfer, %struct.XXDPixelMap, %struct.XXDImagingSubset, i32, i32 }
- %struct.XXDPixelPack = type { i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8 }
- %struct.XXDPixelStore = type { %struct.XXDPixelPack, %struct.XXDPixelPack }
- %struct.XXDPixelTransfer = type { float, float, float, float, float, float, float, float, float, float, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float }
- %struct.XXDPointMode = type { float, float, float, float, %struct.XXTCoord3, float, i8, i8, i8, i8, i16, i16, i32, i16, i16 }
- %struct.XXDPolygonMode = type { [128 x i8], float, float, i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XXDRegisterCombiners = type { i8, i8, i8, i8, i32, [2 x %struct.XXTColor4], [8 x %struct.XXDRegisterCombinersPerStageState], %struct.XXDRegisterCombinersFinalStageState }
- %struct.XXDRegisterCombinersFinalStageState = type { i8, i8, i8, i8, [7 x %struct.XXDRegisterCombinersPerVariableState] }
- %struct.XXDRegisterCombinersPerPortionState = type { [4 x %struct.XXDRegisterCombinersPerVariableState], i8, i8, i8, i8, i16, i16, i16, i16, i16, i16 }
- %struct.XXDRegisterCombinersPerStageState = type { [2 x %struct.XXDRegisterCombinersPerPortionState], [2 x %struct.XXTColor4] }
- %struct.XXDRegisterCombinersPerVariableState = type { i16, i16, i16, i16 }
- %struct.XXDScissorTest = type { %struct.XXTFixedColor4, i8, i8, i8, i8 }
- %struct.XXDState = type <{ i16, i16, i16, i16, i32, i32, [256 x %struct.XXTColor4], [128 x %struct.XXTColor4], %struct.XXDViewport, %struct.XXDTransform, %struct.XXDLightModel, %struct.XXDActiveTextureTargets, %struct.XXDAlphaTest, %struct.XXDBlendMode, %struct.XXDClearColor, %struct.XXDColorBuffer, %struct.XXDDepthTest, %struct.XXDArrayRange, %struct.XXDFogMode, %struct.XXDHintMode, %struct.XXDLineMode, %struct.XXDLogicOp, %struct.XXDMaskMode, %struct.XXDPixelMode, %struct.XXDPointMode, %struct.XXDPolygonMode, %struct.XXDScissorTest, i32, %struct.XXDStencilTest, [8 x %struct.XXDTextureMode], [16 x %struct.XXDTextureImageMode], %struct.XXDArrayRange, [8 x %struct.XXDTextureCoordGen], %struct.XXDClipPlane, %struct.XXDMultisample, %struct.XXDRegisterCombiners, %struct.XXDArrayRange, %struct.XXDArrayRange, [3 x %struct.XXDPipelineProgramState], %struct.XXDArrayRange, %struct.XXDTransformFeedback, i32*, %struct.XXDFixedFunction, [3 x i32], [2 x i32] }>
- %struct.XXDStencilTest = type { [3 x { i32, i32, i16, i16, i16, i16 }], i32, [4 x i8] }
- %struct.XXDTextureCoordGen = type { { i16, i16, %struct.XXTColor4, %struct.XXTColor4 }, { i16, i16, %struct.XXTColor4, %struct.XXTColor4 }, { i16, i16, %struct.XXTColor4, %struct.XXTColor4 }, { i16, i16, %struct.XXTColor4, %struct.XXTColor4 }, i8, i8, i8, i8 }
- %struct.XXDTextureImageMode = type { float }
- %struct.XXDTextureMode = type { %struct.XXTColor4, i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, float, float, i16, i16, i16, i16, i16, i16, [4 x i16], i8, i8, i8, i8, [3 x float], [4 x float], float, float }
- %struct.XXDTextureRec = type opaque
- %struct.XXDTransform = type <{ [24 x [16 x float]], [24 x [16 x float]], [16 x float], float, float, float, float, float, i8, i8, i8, i8, i32, i32, i32, i16, i16, i8, i8, i8, i8, i32 }>
- %struct.XXDTransformFeedback = type { i8, i8, i8, i8, [0 x i32], [16 x i32], [16 x i32] }
- %struct.XXDViewport = type { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, double, double, i32, i32, i32, i32, float, float, float, float }
- %struct.XXTColor4 = type { float, float, float, float }
- %struct.XXTCoord3 = type { float, float, float }
- %struct.XXTFixedColor4 = type { i32, i32, i32, i32 }
- %struct.XXVMTextures = type { [16 x %struct.XXDTextureRec*] }
- %struct.XXVMVPContext = type { i32 }
- %struct.XXVMVPStack = type { i32, i32 }
- %struct.YYToken = type { { i16, i16, i32 } }
- %struct._XXVMConstants = type { <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, float, float, float, float, float, float, float, float, float, float, float, float, [256 x float], [4096 x i8], [8 x float], [48 x float], [128 x float], [528 x i8], { void (i8*, i8*, i32, i8*)*, float (float)*, float (float)*, float (float)*, i32 (float)* } }
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (void (%struct.XXDState*, <4 x float>*, <4 x float>**, %struct._XXVMConstants*, %struct.YYToken*, %struct.XXVMVPContext*, %struct.XXVMTextures*, %struct.XXVMVPStack*, <4 x float>*, <4 x float>*, <4 x float>*, <4 x float>*, <4 x float>*, <4 x float>*, <4 x float>*, <4 x float>*, [4 x <4 x float>]*, i32*, <4 x i32>*, i64)* @t to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define void @t(%struct.XXDState* %gldst, <4 x float>* %prgrm, <4 x float>** %buffs, %struct._XXVMConstants* %cnstn, %struct.YYToken* %pstrm, %struct.XXVMVPContext* %vmctx, %struct.XXVMTextures* %txtrs, %struct.XXVMVPStack* %vpstk, <4 x float>* %atr0, <4 x float>* %atr1, <4 x float>* %atr2, <4 x float>* %atr3, <4 x float>* %vtx0, <4 x float>* %vtx1, <4 x float>* %vtx2, <4 x float>* %vtx3, [4 x <4 x float>]* %tmpGbl, i32* %oldMsk, <4 x i32>* %adrGbl, i64 %key_token) nounwind {
-entry:
- %0 = trunc i64 %key_token to i32 ; <i32> [#uses=1]
- %1 = getelementptr %struct.YYToken* %pstrm, i32 %0 ; <%struct.YYToken*> [#uses=5]
- br label %bb1132
-
-bb51: ; preds = %bb1132
- %2 = getelementptr %struct.YYToken* %1, i32 %operation.0.rec, i32 0, i32 0 ; <i16*> [#uses=1]
- %3 = load i16* %2, align 1 ; <i16> [#uses=3]
- %4 = lshr i16 %3, 6 ; <i16> [#uses=1]
- %5 = trunc i16 %4 to i8 ; <i8> [#uses=1]
- %6 = zext i8 %5 to i32 ; <i32> [#uses=1]
- %7 = trunc i16 %3 to i8 ; <i8> [#uses=1]
- %8 = and i8 %7, 7 ; <i8> [#uses=1]
- %mask5556 = zext i8 %8 to i32 ; <i32> [#uses=3]
- %.sum1324 = add i32 %mask5556, 2 ; <i32> [#uses=1]
- %.rec = add i32 %operation.0.rec, %.sum1324 ; <i32> [#uses=1]
- %9 = bitcast %struct.YYToken* %operation.0 to i32* ; <i32*> [#uses=1]
- %10 = load i32* %9, align 1 ; <i32> [#uses=1]
- %11 = lshr i32 %10, 16 ; <i32> [#uses=2]
- %12 = trunc i32 %11 to i8 ; <i8> [#uses=1]
- %13 = and i8 %12, 1 ; <i8> [#uses=1]
- %14 = lshr i16 %3, 15 ; <i16> [#uses=1]
- %15 = trunc i16 %14 to i8 ; <i8> [#uses=1]
- %16 = or i8 %13, %15 ; <i8> [#uses=1]
- %17 = icmp eq i8 %16, 0 ; <i1> [#uses=1]
- br i1 %17, label %bb94, label %bb75
-
-bb75: ; preds = %bb51
- %18 = getelementptr %struct.YYToken* %1, i32 0, i32 0, i32 0 ; <i16*> [#uses=1]
- %19 = load i16* %18, align 4 ; <i16> [#uses=1]
- %20 = load i16* null, align 2 ; <i16> [#uses=1]
- %21 = zext i16 %19 to i64 ; <i64> [#uses=1]
- %22 = zext i16 %20 to i64 ; <i64> [#uses=1]
- %23 = shl i64 %22, 16 ; <i64> [#uses=1]
- %.ins1177 = or i64 %23, %21 ; <i64> [#uses=1]
- %.ins1175 = or i64 %.ins1177, 0 ; <i64> [#uses=1]
- %24 = and i32 %11, 1 ; <i32> [#uses=1]
- %.neg1333 = sub i32 %mask5556, %24 ; <i32> [#uses=1]
- %.neg1335 = sub i32 %.neg1333, 0 ; <i32> [#uses=1]
- %25 = sub i32 %.neg1335, 0 ; <i32> [#uses=1]
- br label %bb94
-
-bb94: ; preds = %bb75, %bb51
- %extraToken.0 = phi i64 [ %.ins1175, %bb75 ], [ %extraToken.1, %bb51 ] ; <i64> [#uses=1]
- %argCount.0 = phi i32 [ %25, %bb75 ], [ %mask5556, %bb51 ] ; <i32> [#uses=1]
- %operation.0.sum1392 = add i32 %operation.0.rec, 1 ; <i32> [#uses=2]
- %26 = getelementptr %struct.YYToken* %1, i32 %operation.0.sum1392, i32 0, i32 0 ; <i16*> [#uses=1]
- %27 = load i16* %26, align 4 ; <i16> [#uses=1]
- %28 = getelementptr %struct.YYToken* %1, i32 %operation.0.sum1392, i32 0, i32 1 ; <i16*> [#uses=1]
- %29 = load i16* %28, align 2 ; <i16> [#uses=1]
- store i16 %27, i16* null, align 8
- store i16 %29, i16* null, align 2
- br i1 false, label %bb1132, label %bb110
-
-bb110: ; preds = %bb94
- switch i32 %6, label %bb1078 [
- i32 30, label %bb960
- i32 32, label %bb801
- i32 38, label %bb809
- i32 78, label %bb1066
- ]
-
-bb801: ; preds = %bb110
- unreachable
-
-bb809: ; preds = %bb110
- unreachable
-
-bb960: ; preds = %bb110
- %30 = icmp eq i32 %argCount.0, 1 ; <i1> [#uses=1]
- br i1 %30, label %bb962, label %bb965
-
-bb962: ; preds = %bb960
- unreachable
-
-bb965: ; preds = %bb960
- unreachable
-
-bb1066: ; preds = %bb110
- unreachable
-
-bb1078: ; preds = %bb110
- unreachable
-
-bb1132: ; preds = %bb94, %entry
- %extraToken.1 = phi i64 [ undef, %entry ], [ %extraToken.0, %bb94 ] ; <i64> [#uses=1]
- %operation.0.rec = phi i32 [ 0, %entry ], [ %.rec, %bb94 ] ; <i32> [#uses=4]
- %operation.0 = getelementptr %struct.YYToken* %1, i32 %operation.0.rec ; <%struct.YYToken*> [#uses=1]
- br i1 false, label %bb1134, label %bb51
-
-bb1134: ; preds = %bb1132
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll
deleted file mode 100644
index de4c1e7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-; PR2762
-define void @foo(<4 x i32>* %p, <4 x double>* %q) {
- %n = load <4 x i32>* %p
- %z = sitofp <4 x i32> %n to <4 x double>
- store <4 x double> %z, <4 x double>* %q
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-17-Asm64bitRConstraint.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-17-Asm64bitRConstraint.ll
deleted file mode 100644
index b2e6061..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-17-Asm64bitRConstraint.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86
-; RUN: llc < %s -march=x86-64
-
-define void @test(i64 %x) nounwind {
-entry:
- tail call void asm sideeffect "ASM: $0", "r,~{dirflag},~{fpsr},~{flags}"(i64 %x) nounwind
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-20-AsmDoubleInI32.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-20-AsmDoubleInI32.ll
deleted file mode 100644
index 353d1c7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-20-AsmDoubleInI32.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86
-; RUN: llc < %s -march=x86-64
-
-; from gcc.c-torture/compile/920520-1.c
-
-define i32 @g() nounwind {
-entry:
- call void asm sideeffect "$0", "r"(double 1.500000e+00) nounwind
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-24-FlippedCompare.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-24-FlippedCompare.ll
deleted file mode 100644
index 421b931..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-24-FlippedCompare.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o - | not grep {ucomiss\[^,\]*esp}
-
-define void @f(float %wt) {
-entry:
- %0 = fcmp ogt float %wt, 0.000000e+00 ; <i1> [#uses=1]
- %1 = tail call i32 @g(i32 44) ; <i32> [#uses=3]
- %2 = inttoptr i32 %1 to i8* ; <i8*> [#uses=2]
- br i1 %0, label %bb, label %bb1
-
-bb: ; preds = %entry
- ret void
-
-bb1: ; preds = %entry
- ret void
-}
-
-declare i32 @g(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll
deleted file mode 100644
index afeb358..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats |& not grep {Number of register spills}
-
-define fastcc void @fourn(double* %data, i32 %isign) nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- %indvar93 = phi i32 [ 0, %entry ], [ %idim.030, %bb ] ; <i32> [#uses=2]
- %idim.030 = add i32 %indvar93, 1 ; <i32> [#uses=1]
- %0 = add i32 %indvar93, 2 ; <i32> [#uses=1]
- %1 = icmp sgt i32 %0, 2 ; <i1> [#uses=1]
- br i1 %1, label %bb30.loopexit, label %bb
-
-bb3: ; preds = %bb30.loopexit, %bb25, %bb3
- %2 = load i32* null, align 4 ; <i32> [#uses=1]
- %3 = mul i32 %2, 0 ; <i32> [#uses=1]
- %4 = icmp slt i32 0, %3 ; <i1> [#uses=1]
- br i1 %4, label %bb18, label %bb3
-
-bb18: ; preds = %bb3
- %5 = fdiv double %11, 0.000000e+00 ; <double> [#uses=1]
- %6 = tail call double @sin(double %5) nounwind readonly ; <double> [#uses=1]
- br label %bb24.preheader
-
-bb22.preheader: ; preds = %bb24.preheader, %bb22.preheader
- br label %bb22.preheader
-
-bb25: ; preds = %bb24.preheader
- %7 = fmul double 0.000000e+00, %6 ; <double> [#uses=0]
- %8 = add i32 %i3.122100, 0 ; <i32> [#uses=1]
- %9 = icmp sgt i32 %8, 0 ; <i1> [#uses=1]
- br i1 %9, label %bb3, label %bb24.preheader
-
-bb24.preheader: ; preds = %bb25, %bb18
- %i3.122100 = or i32 0, 1 ; <i32> [#uses=2]
- %10 = icmp slt i32 0, %i3.122100 ; <i1> [#uses=1]
- br i1 %10, label %bb25, label %bb22.preheader
-
-bb30.loopexit: ; preds = %bb
- %11 = fmul double 0.000000e+00, 0x401921FB54442D1C ; <double> [#uses=1]
- br label %bb3
-}
-
-declare double @sin(double) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-27-StackRealignment.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-27-StackRealignment.ll
deleted file mode 100644
index 784bc72..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-27-StackRealignment.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; Linux doesn't support stack realignment for functions with allocas (PR2888).
-; Until it does, we shouldn't use movaps to access the stack. On targets with
-; sufficiently aligned stack (e.g. darwin) we should.
-
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu -mcpu=yonah | not grep movaps
-; RUN: llc < %s -mtriple=i686-apple-darwin9 -mcpu=yonah | grep movaps | count 2
-
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
-
-define void @foo(i32 %t) nounwind {
- %tmp1210 = alloca i8, i32 32, align 4
- call void @llvm.memset.i64(i8* %tmp1210, i8 0, i64 32, i32 4)
-
- %x = alloca i8, i32 %t
- call void @dummy(i8* %x)
- ret void
-}
-
-declare void @dummy(i8* %x)
-declare void @llvm.memset.i64(i8*, i8, i64, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-29-ExpandVAARG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-10-29-ExpandVAARG.ll
deleted file mode 100644
index 7ad94f1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-10-29-ExpandVAARG.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR2977
-define i8* @ap_php_conv_p2(){
-entry:
- %ap.addr = alloca i8* ; <i8**> [#uses=36]
- br label %sw.bb301
-sw.bb301:
- %0 = va_arg i8** %ap.addr, i64 ; <i64> [#uses=1]
- br label %sw.bb301
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-03-F80VAARG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-11-03-F80VAARG.ll
deleted file mode 100644
index 507799b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-03-F80VAARG.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -o - | not grep 10
-
-declare void @llvm.va_start(i8*) nounwind
-
-declare void @llvm.va_copy(i8*, i8*) nounwind
-
-declare void @llvm.va_end(i8*) nounwind
-
-define x86_fp80 @test(...) nounwind {
- %ap = alloca i8* ; <i8**> [#uses=3]
- %v1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_start(i8* %v1)
- %t1 = va_arg i8** %ap, x86_fp80 ; <x86_fp80> [#uses=1]
- %t2 = va_arg i8** %ap, x86_fp80 ; <x86_fp80> [#uses=1]
- %t = fadd x86_fp80 %t1, %t2 ; <x86_fp80> [#uses=1]
- ret x86_fp80 %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-06-testb.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-11-06-testb.ll
deleted file mode 100644
index f8f317c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-06-testb.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | grep testb
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
- %struct.x = type <{ i8, i8, i16 }>
-
-define i32 @foo(%struct.x* %p) nounwind {
-entry:
- %0 = getelementptr %struct.x* %p, i32 0, i32 0 ; <i8*> [#uses=1]
- store i8 55, i8* %0, align 1
- %1 = bitcast %struct.x* %p to i32* ; <i32*> [#uses=1]
- %2 = load i32* %1, align 1 ; <i32> [#uses=1]
- %3 = and i32 %2, 512 ; <i32> [#uses=1]
- %4 = icmp eq i32 %3, 0 ; <i1> [#uses=1]
- br i1 %4, label %bb5, label %bb
-
-bb: ; preds = %entry
- %5 = tail call i32 (...)* @xx() nounwind ; <i32> [#uses=1]
- ret i32 %5
-
-bb5: ; preds = %entry
- ret i32 0
-}
-
-declare i32 @xx(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-13-inlineasm-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-11-13-inlineasm-3.ll
deleted file mode 100644
index 1dc97fc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-13-inlineasm-3.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu
-; PR 1779
-; Using 'A' constraint and a tied constraint together used to crash.
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
- %struct.linux_dirent64 = type { i64, i64, i16, i8, [0 x i8] }
-
-define i32 @sys_getdents64(i32 %fd, %struct.linux_dirent64* %dirent, i32 %count) {
-entry:
- br i1 true, label %cond_next29, label %UnifiedReturnBlock
-
-cond_next29: ; preds = %entry
- %tmp83 = call i32 asm sideeffect "1:\09movl %eax,0($2)\0A2:\09movl %edx,4($2)\0A3:\0A.section .fixup,\22ax\22\0A4:\09movl $3,$0\0A\09jmp 3b\0A.previous\0A .section __ex_table,\22a\22\0A .balign 4 \0A .long 1b,4b\0A .previous\0A .section __ex_table,\22a\22\0A .balign 4 \0A .long 2b,4b\0A .previous\0A", "=r,A,r,i,0,~{dirflag},~{fpsr},~{flags}"(i64 0, i64* null, i32 -14, i32 0) nounwind ; <i32> [#uses=0]
- br label %UnifiedReturnBlock
-
-UnifiedReturnBlock: ; preds = %entry
- ret i32 -14
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-29-DivideConstant16bit.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-11-29-DivideConstant16bit.ll
deleted file mode 100644
index 2e114ab..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-29-DivideConstant16bit.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu | grep -- -1985 | count 1
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
-
-define zeroext i16 @a(i16 zeroext %x) nounwind {
-entry:
- %div = udiv i16 %x, 33 ; <i32> [#uses=1]
- ret i16 %div
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-29-DivideConstant16bitSigned.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-11-29-DivideConstant16bitSigned.ll
deleted file mode 100644
index 7c811af..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-29-DivideConstant16bitSigned.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu | grep -- -1985
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
-
-define signext i16 @a(i16 signext %x) nounwind {
-entry:
- %div = sdiv i16 %x, 33 ; <i32> [#uses=1]
- ret i16 %div
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-29-ULT-Sign.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-11-29-ULT-Sign.ll
deleted file mode 100644
index 6dca141..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-11-29-ULT-Sign.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu | grep "jns" | count 1
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
-
-define i32 @a(i32 %x) nounwind {
-entry:
- %cmp = icmp ult i32 %x, -2147483648 ; <i1> [#uses=1]
- br i1 %cmp, label %if.end, label %if.then
-
-if.then: ; preds = %entry
- %call = call i32 (...)* @b() ; <i32> [#uses=0]
- br label %if.end
-
-if.end: ; preds = %if.then, %entry
- br label %return
-
-return: ; preds = %if.end
- ret i32 undef
-}
-
-declare i32 @b(...)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-01-SpillerAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-01-SpillerAssert.ll
deleted file mode 100644
index d96d806..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-01-SpillerAssert.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu
-; PR3124
-
- %struct.cpuinfo_x86 = type { i8, i8, i8, i8, i32, i8, i8, i8, i32, i32, [9 x i32], [16 x i8], [64 x i8], i32, i32, i32, i64, %struct.cpumask_t, i16, i16, i16, i16, i16, i16, i16, i16, i32 }
- %struct.cpumask_t = type { [1 x i64] }
- at .str10 = external constant [70 x i8] ; <[70 x i8]*> [#uses=1]
-
-declare i32 @printk(i8*, ...)
-
-define void @display_cacheinfo(%struct.cpuinfo_x86* %c) nounwind section ".cpuinit.text" {
-entry:
- %asmtmp = tail call { i32, i32, i32, i32 } asm "cpuid", "={ax},={bx},={cx},={dx},0,2,~{dirflag},~{fpsr},~{flags}"(i32 -2147483643, i32 0) nounwind ; <{ i32, i32, i32, i32 }> [#uses=0]
- %0 = tail call i32 (i8*, ...)* @printk(i8* getelementptr ([70 x i8]* @.str10, i32 0, i64 0), i32 0, i32 0, i32 0, i32 0) nounwind ; <i32> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
deleted file mode 100644
index 1f8bd45..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | not grep lea
-; The inner loop should use [reg] addressing, not [reg+reg] addressing.
-; rdar://6403965
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
-
-define i8* @test(i8* %Q, i32* %L) nounwind {
-entry:
- br label %bb1
-
-bb: ; preds = %bb1, %bb1
- %indvar.next = add i32 %P.0.rec, 1 ; <i32> [#uses=1]
- br label %bb1
-
-bb1: ; preds = %bb, %entry
- %P.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
- %P.0 = getelementptr i8* %Q, i32 %P.0.rec ; <i8*> [#uses=2]
- %0 = load i8* %P.0, align 1 ; <i8> [#uses=1]
- switch i8 %0, label %bb3 [
- i8 12, label %bb
- i8 42, label %bb
- ]
-
-bb3: ; preds = %bb1
- %P.0.sum = add i32 %P.0.rec, 2 ; <i32> [#uses=1]
- %1 = getelementptr i8* %Q, i32 %P.0.sum ; <i8*> [#uses=1]
- store i8 4, i8* %1, align 1
- ret i8* %P.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-IllegalResultType.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-IllegalResultType.ll
deleted file mode 100644
index 4b72cb9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-IllegalResultType.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s
-; PR3117
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
- at g_118 = external global i8 ; <i8*> [#uses=1]
- at g_7 = external global i32 ; <i32*> [#uses=1]
-
-define i32 @func_73(i32 %p_74) nounwind {
-entry:
- %0 = load i32* @g_7, align 4 ; <i32> [#uses=1]
- %1 = or i8 0, 118 ; <i8> [#uses=1]
- %2 = zext i8 %1 to i64 ; <i64> [#uses=1]
- %3 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
- %4 = zext i1 %3 to i64 ; <i64> [#uses=1]
- %5 = or i64 %4, -758998846 ; <i64> [#uses=3]
- %6 = icmp sle i64 %2, %5 ; <i1> [#uses=1]
- %7 = zext i1 %6 to i8 ; <i8> [#uses=1]
- %8 = or i8 %7, 118 ; <i8> [#uses=1]
- %9 = zext i8 %8 to i64 ; <i64> [#uses=1]
- %10 = icmp sle i64 %9, 0 ; <i1> [#uses=1]
- %11 = zext i1 %10 to i8 ; <i8> [#uses=1]
- %12 = or i8 %11, 118 ; <i8> [#uses=1]
- %13 = zext i8 %12 to i64 ; <i64> [#uses=1]
- %14 = icmp sle i64 %13, %5 ; <i1> [#uses=1]
- %15 = zext i1 %14 to i8 ; <i8> [#uses=1]
- %16 = or i8 %15, 118 ; <i8> [#uses=1]
- %17 = zext i8 %16 to i64 ; <i64> [#uses=1]
- %18 = icmp sle i64 %17, 0 ; <i1> [#uses=1]
- %19 = zext i1 %18 to i8 ; <i8> [#uses=1]
- %20 = or i8 %19, 118 ; <i8> [#uses=1]
- %21 = zext i8 %20 to i64 ; <i64> [#uses=1]
- %22 = icmp sle i64 %21, %5 ; <i1> [#uses=1]
- %23 = zext i1 %22 to i8 ; <i8> [#uses=1]
- %24 = or i8 %23, 118 ; <i8> [#uses=1]
- store i8 %24, i8* @g_118, align 1
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-dagcombine-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-dagcombine-1.ll
deleted file mode 100644
index fe5bff3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-dagcombine-1.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 | grep "(%esp)" | count 2
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
-; a - a should be found and removed, leaving refs to only L and P
-define i8* @test(i8* %a, i8* %L, i8* %P) nounwind {
-entry:
- %0 = ptrtoint i8* %a to i32
- %1 = sub i32 -2, %0
- %2 = ptrtoint i8* %P to i32
- %3 = sub i32 0, %2
- %4 = ptrtoint i8* %L to i32
- %5 = add i32 %4, %3
- %6 = add i32 %5, %1 ; <i32> [#uses=1]
- %7 = getelementptr i8* %a, i32 %6 ; <i8*> [#uses=1]
- br label %return
-
-return: ; preds = %bb3
- ret i8* %7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-dagcombine-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-dagcombine-2.ll
deleted file mode 100644
index 4cb1b42..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-dagcombine-2.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 | grep "(%esp)" | count 2
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
-; a - a should be found and removed, leaving refs to only L and P
-define i8* @test(i8* %a, i8* %L, i8* %P) nounwind {
-entry:
- %0 = ptrtoint i8* %a to i32
- %1 = ptrtoint i8* %P to i32
- %2 = sub i32 %1, %0
- %3 = ptrtoint i8* %L to i32
- %4 = sub i32 %2, %3 ; <i32> [#uses=1]
- %5 = getelementptr i8* %a, i32 %4 ; <i8*> [#uses=1]
- br label %return
-
-return: ; preds = %bb3
- ret i8* %5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-dagcombine-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-dagcombine-3.ll
deleted file mode 100644
index d5a676a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-02-dagcombine-3.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 | grep add | count 2
-; RUN: llc < %s -march=x86 | grep sub | grep -v subsections | count 1
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
-; this should be rearranged to have two +s and one -
-define i32 @test(i8* %a, i8* %L, i8* %P) nounwind {
-entry:
- %0 = ptrtoint i8* %P to i32
- %1 = sub i32 -2, %0
- %2 = ptrtoint i8* %L to i32
- %3 = ptrtoint i8* %a to i32
- %4 = sub i32 %2, %3 ; <i32> [#uses=1]
- %5 = add i32 %1, %4 ; <i32> [#uses=1]
- br label %return
-
-return: ; preds = %bb3
- ret i32 %5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-05-SpillerCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-05-SpillerCrash.ll
deleted file mode 100644
index 7fd2e6f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-05-SpillerCrash.ll
+++ /dev/null
@@ -1,237 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9.5 -mattr=+sse41 -relocation-model=pic
-
- %struct.XXActiveTextureTargets = type { i64, i64, i64, i64, i64, i64 }
- %struct.XXAlphaTest = type { float, i16, i8, i8 }
- %struct.XXArrayRange = type { i8, i8, i8, i8 }
- %struct.XXBlendMode = type { i16, i16, i16, i16, %struct.ZZIColor4, i16, i16, i8, i8, i8, i8 }
- %struct.XXBBRec = type opaque
- %struct.XXBBstate = type { %struct.ZZGTransformKey, %struct.ZZGTransformKey, %struct.XXProgramLimits, %struct.XXProgramLimits, i8, i8, i8, i8, %struct.ZZSBB, %struct.ZZSBB, [4 x %struct.ZZSBB], %struct.ZZSBB, %struct.ZZSBB, %struct.ZZSBB, [8 x %struct.ZZSBB], %struct.ZZSBB }
- %struct.XXClearColor = type { double, %struct.ZZIColor4, %struct.ZZIColor4, float, i32 }
- %struct.XXClipPlane = type { i32, [6 x %struct.ZZIColor4] }
- %struct.XXColorBB = type { i16, i8, i8, [8 x i16], i8, i8, i8, i8 }
- %struct.XXColorMatrix = type { [16 x float]*, %struct.XXImagingColorScale }
- %struct.XXConfig = type { i32, float, %struct.ZZGTransformKey, %struct.ZZGTransformKey, i8, i8, i8, i8, i8, i8, i16, i32, i32, i32, %struct.XXPixelFormatInfo, %struct.XXPointLineLimits, %struct.XXPointLineLimits, %struct.XXRenderFeatures, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.XXTextureLimits, [3 x %struct.XXPipelineProgramLimits], %struct.XXFragmentProgramLimits, %struct.XXVertexProgramLimits, %struct.XXGeometryShaderLimits, %struct.XXProgramLimits, %struct.XXGeometryShaderLimits, %struct.XXVertexDescriptor*, %struct.XXVertexDescriptor*, [3 x i32], [4 x i32], [0 x i32] }
- %struct.XXContextRec = type { float, float, float, float, float, float, float, float, %struct.ZZIColor4, %struct.ZZIColor4, %struct.YYFPContext, [16 x [2 x %struct.PPStreamToken]], %struct.ZZGProcessor, %struct._YYConstants*, void (%struct.XXContextRec*, i32, i32, %struct.YYFragmentAttrib*, %struct.YYFragmentAttrib*, i32)*, %struct._YYFunction*, %struct.PPStreamToken*, void (%struct.XXContextRec*, %struct.XXVertex*)*, void (%struct.XXContextRec*, %struct.XXVertex*, %struct.XXVertex*)*, void (%struct.XXContextRec*, %struct.XXVertex*, %struct.XXVertex*, %struct.XXVertex*)*, %struct._YYFunction*, %struct._YYFunction*, %struct._YYFunction*, [4 x i32], [3 x i32], [3 x i32], float, float, float, %struct.PPStreamToken, i32, %struct.ZZSDrawable, %struct.XXFramebufferRec*, %struct.XXFramebufferRec*, %struct.XXRect, %struct.XXFormat, %struct.XXFormat, %struct.XXFormat, %struct.XXConfig*, %struct.XXBBstate, %struct.XXBBstate, %struct.XXSharedRec*, %struct.XXState*, %struct.XXPluginState*, %struct.XXVertex*, %struct.YYFragmentAttrib*, %struct.YYFragmentAttrib*, %struct.YYFragmentAttrib*, %struct.XXProgramRec*, %struct.XXPipelineProgramRec*, %struct.YYTextures, %struct.XXStippleData, i8, i16, i8, i32, i32, i32, %struct.XXQueryRec*, %struct.XXQueryRec*, %struct.XXFallback, { void (i8*, i8*, i32, i8*)* } }
- %struct.XXConvolution = type { %struct.ZZIColor4, %struct.XXImagingColorScale, i16, i16, [0 x i32], float*, i32, i32 }
- %struct.XXCurrent16A = type { [8 x %struct.ZZIColor4], [16 x %struct.ZZIColor4], %struct.ZZIColor4, %struct.XXPointLineLimits, float, %struct.XXPointLineLimits, float, [4 x float], %struct.XXPointLineLimits, float, float, float, float, i8, i8, i8, i8 }
- %struct.XXDepthTest = type { i16, i16, i8, i8, i8, i8, double, double }
- %struct.XXDrawableWindow = type { i32, i32, i32 }
- %struct.XXFallback = type { float*, %struct.XXRenderDispatch*, %struct.XXConfig*, i8*, i8*, i32, i32 }
- %struct.XXFenceRec = type opaque
- %struct.XXFixedFunction = type { %struct.PPStreamToken* }
- %struct.XXFogMode = type { %struct.ZZIColor4, float, float, float, float, float, i16, i16, i16, i8, i8 }
- %struct.XXFormat = type { i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8, i32, i32, i32 }
- %struct.XXFragmentProgramLimits = type { i32, i32, i32, i16, i16, i32, i32 }
- %struct.XXFramebufferAttachment = type { i16, i16, i32, i32, i32 }
- %struct.XXFramebufferData = type { [10 x %struct.XXFramebufferAttachment], [8 x i16], i16, i16, i16, i8, i8, i32, i32 }
- %struct.XXFramebufferRec = type { %struct.XXFramebufferData*, %struct.XXPluginFramebufferData*, %struct.XXFormat, i8, i8, i8, i8 }
- %struct.XXGeometryShaderLimits = type { i32, i32, i32, i32, i32 }
- %struct.XXHintMode = type { i16, i16, i16, i16, i16, i16, i16, i16, i16, i16 }
- %struct.XXHistogram = type { %struct.XXProgramLimits*, i32, i16, i8, i8 }
- %struct.XXImagingColorScale = type { %struct.ZZTCoord2, %struct.ZZTCoord2, %struct.ZZTCoord2, %struct.ZZTCoord2 }
- %struct.XXImagingSubset = type { %struct.XXConvolution, %struct.XXConvolution, %struct.XXConvolution, %struct.XXColorMatrix, %struct.XXMinmax, %struct.XXHistogram, %struct.XXImagingColorScale, %struct.XXImagingColorScale, %struct.XXImagingColorScale, %struct.XXImagingColorScale, i32, [0 x i32] }
- %struct.XXLight = type { %struct.ZZIColor4, %struct.ZZIColor4, %struct.ZZIColor4, %struct.ZZIColor4, %struct.XXPointLineLimits, float, float, float, float, float, %struct.XXPointLineLimits, float, %struct.XXPointLineLimits, float, %struct.XXPointLineLimits, float, float, float, float, float }
- %struct.XXLightModel = type { %struct.ZZIColor4, [8 x %struct.XXLight], [2 x %struct.XXMaterial], i32, i16, i16, i16, i8, i8, i8, i8, i8, i8 }
- %struct.XXLightProduct = type { %struct.ZZIColor4, %struct.ZZIColor4, %struct.ZZIColor4 }
- %struct.XXLineMode = type { float, i32, i16, i16, i8, i8, i8, i8 }
- %struct.XXLogicOp = type { i16, i8, i8 }
- %struct.XXMaskMode = type { i32, [3 x i32], i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XXMaterial = type { %struct.ZZIColor4, %struct.ZZIColor4, %struct.ZZIColor4, %struct.ZZIColor4, float, float, float, float, [8 x %struct.XXLightProduct], %struct.ZZIColor4, [8 x i32] }
- %struct.XXMinmax = type { %struct.XXMinmaxTable*, i16, i8, i8, [0 x i32] }
- %struct.XXMinmaxTable = type { %struct.ZZIColor4, %struct.ZZIColor4 }
- %struct.XXMipmaplevel = type { [4 x i32], [4 x i32], [4 x float], [4 x i32], i32, i32, float*, i8*, i16, i16, i16, i16, [2 x float] }
- %struct.XXMultisample = type { float, i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XXPipelineProgramData = type { i16, i8, i8, i32, %struct.PPStreamToken*, i64, %struct.ZZIColor4*, i32, [0 x i32] }
- %struct.XXPipelineProgramLimits = type { i32, i16, i16, i32, i16, i16, i32, i32 }
- %struct.XXPipelineProgramRec = type { %struct.XXPipelineProgramData*, %struct.PPStreamToken*, %struct.XXContextRec*, { %struct._YYFunction*, \2, \2, [20 x i32], [64 x i32], i32, i32, i32 }*, i32, i32 }
- %struct.XXPipelineProgramState = type { i8, i8, i8, i8, [0 x i32], %struct.ZZIColor4* }
- %struct.XXPixelFormatInfo = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XXPixelMap = type { i32*, float*, float*, float*, float*, float*, float*, float*, float*, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.XXPixelMode = type { float, float, %struct.XXPixelStore, %struct.XXPixelTransfer, %struct.XXPixelMap, %struct.XXImagingSubset, i32, i32 }
- %struct.XXPixelPack = type { i32, i32, i32, i32, i32, i32, i32, i32, i8, i8, i8, i8 }
- %struct.XXPixelStore = type { %struct.XXPixelPack, %struct.XXPixelPack }
- %struct.XXPixelTransfer = type { float, float, float, float, float, float, float, float, float, float, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float }
- %struct.XXPluginFramebufferData = type { [10 x %struct.XXTextureRec*], i8, i8, i8, i8 }
- %struct.XXPluginProgramData = type { [3 x %struct.XXPipelineProgramRec*], %struct.XXBBRec**, i32, [0 x i32] }
- %struct.XXPluginState = type { [16 x [5 x %struct.XXTextureRec*]], [3 x %struct.XXTextureRec*], [3 x %struct.XXPipelineProgramRec*], [3 x %struct.XXPipelineProgramRec*], %struct.XXProgramRec*, %struct.XXVertexArrayRec*, [16 x %struct.XXBBRec*], %struct.XXFramebufferRec*, %struct.XXFramebufferRec* }
- %struct.XXPointLineLimits = type { float, float, float }
- %struct.XXPointMode = type { float, float, float, float, %struct.XXPointLineLimits, float, i8, i8, i8, i8, i16, i16, i32, i16, i16 }
- %struct.XXPolygonMode = type { [128 x i8], float, float, i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XXProgramData = type { i32, i32, i32, i32, %struct.PPStreamToken*, i32*, i32, i32, i32, i32, i8, i8, i8, i8, [0 x i32] }
- %struct.XXProgramLimits = type { i32, i32, i32, i32 }
- %struct.XXProgramRec = type { %struct.XXProgramData*, %struct.XXPluginProgramData*, %struct.ZZIColor4**, i32 }
- %struct.XXQueryRec = type { i32, i32, %struct.XXQueryRec* }
- %struct.XXRect = type { i32, i32, i32, i32, i32, i32 }
- %struct.XXRegisterCombiners = type { i8, i8, i8, i8, i32, [2 x %struct.ZZIColor4], [8 x %struct.XXRegisterCombinersPerStageState], %struct.XXRegisterCombinersFinalStageState }
- %struct.XXRegisterCombinersFinalStageState = type { i8, i8, i8, i8, [7 x %struct.XXRegisterCombinersPerVariableState] }
- %struct.XXRegisterCombinersPerPortionState = type { [4 x %struct.XXRegisterCombinersPerVariableState], i8, i8, i8, i8, i16, i16, i16, i16, i16, i16 }
- %struct.XXRegisterCombinersPerStageState = type { [2 x %struct.XXRegisterCombinersPerPortionState], [2 x %struct.ZZIColor4] }
- %struct.XXRegisterCombinersPerVariableState = type { i16, i16, i16, i16 }
- %struct.XXRenderDispatch = type { void (%struct.XXContextRec*, i32, float)*, void (%struct.XXContextRec*, i32)*, i32 (%struct.XXContextRec*, i32, i32, i32, i32, i32, i32, i8*, i32, %struct.XXBBRec*)*, i32 (%struct.XXContextRec*, %struct.XXVertex*, i32, i32, i32, i32, i8*, i32, %struct.XXBBRec*)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32, i32, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32, float, float, i8*, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex*, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex**, i32)*, void (%struct.XXContextRec*, %struct.XXVertex**, i32, i32)*, void (%struct.XXContextRec*, %struct.XXVertex**, i32, i32)*, i8* (%struct.XXContextRec*, i32, i32*)*, void (%struct.XXContextRec*, i32, i32, i32)*, i8* (%struct.XXContextRec*, i32, i32, i32, i32, i32)*, void (%struct.XXContextRec*, i32, i32, i32, i32, i32, i8*)*, void (%struct.XXContextRec*)*, void (%struct.XXContextRec*)*, void (%struct.XXContextRec*)*, void (%struct.XXContextRec*, %struct.XXFenceRec*)*, void (%struct.XXContextRec*, i32, %struct.XXQueryRec*)*, void (%struct.XXContextRec*, %struct.XXQueryRec*)*, i32 (%struct.XXContextRec*, i32, i32, i32, i32, i32, i8*, %struct.ZZIColor4*, %struct.XXCurrent16A*)*, i32 (%struct.XXContextRec*, %struct.XXTextureRec*, i32, i32, i32, i32, i32, i32, i32, i32, i32)*, i32 (%struct.XXContextRec*, %struct.XXTextureRec*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i32, %struct.XXBBRec*)*, i32 (%struct.XXContextRec*, %struct.XXTextureRec*, i32)*, i32 (%struct.XXContextRec*, %struct.XXBBRec*, i32, i32, i8*)*, void (%struct.XXContextRec*, i32)*, void (%struct.XXContextRec*)*, void (%struct.XXContextRec*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)*, i32 (%struct.XXContextRec*, %struct.XXQueryRec*)*, void (%struct.XXContextRec*)* }
- %struct.XXRenderFeatures = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
- %struct.XXSWRSurfaceRec = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i8*, [4 x i8*], i32 }
- %struct.XXScissorTest = type { %struct.XXProgramLimits, i8, i8, i8, i8 }
- %struct.XXSharedData = type { }
- %struct.XXSharedRec = type { %struct.__ZZarrayelementDrawInfoListType, %struct.XXSharedData*, i32, i8, i8, i8, i8 }
- %struct.XXState = type <{ i16, i16, i16, i16, i32, i32, [256 x %struct.ZZIColor4], [128 x %struct.ZZIColor4], %struct.XXViewport, %struct.XXTransform, %struct.XXLightModel, %struct.XXActiveTextureTargets, %struct.XXAlphaTest, %struct.XXBlendMode, %struct.XXClearColor, %struct.XXColorBB, %struct.XXDepthTest, %struct.XXArrayRange, %struct.XXFogMode, %struct.XXHintMode, %struct.XXLineMode, %struct.XXLogicOp, %struct.XXMaskMode, %struct.XXPixelMode, %struct.XXPointMode, %struct.XXPolygonMode, %struct.XXScissorTest, i32, %struct.XXStencilTest, [8 x %struct.XXTextureMode], [16 x %struct.XXTextureImageMode], %struct.XXArrayRange, [8 x %struct.XXTextureCoordGen], %struct.XXClipPlane, %struct.XXMultisample, %struct.XXRegisterCombiners, %struct.XXArrayRange, %struct.XXArrayRange, [3 x %struct.XXPipelineProgramState], %struct.XXArrayRange, %struct.XXTransformFeedback, i32*, %struct.XXFixedFunction, [1 x i32] }>
- %struct.XXStencilTest = type { [3 x { i32, i32, i16, i16, i16, i16 }], i32, [4 x i8] }
- %struct.XXStippleData = type { i32, i16, i16, [32 x [32 x i8]] }
- %struct.XXTextureCoordGen = type { { i16, i16, %struct.ZZIColor4, %struct.ZZIColor4 }, { i16, i16, %struct.ZZIColor4, %struct.ZZIColor4 }, { i16, i16, %struct.ZZIColor4, %struct.ZZIColor4 }, { i16, i16, %struct.ZZIColor4, %struct.ZZIColor4 }, i8, i8, i8, i8 }
- %struct.XXTextureGeomState = type { i16, i16, i16, i16, i16, i8, i8, i8, i8, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, [6 x i16], [6 x i16] }
- %struct.XXTextureImageMode = type { float }
- %struct.XXTextureLevel = type { i32, i32, i16, i16, i16, i8, i8, i16, i16, i16, i16, i8* }
- %struct.XXTextureLimits = type { float, float, i16, i16, i16, i16, i16, i16, i16, i16, i16, i8, i8, [16 x i16], i32 }
- %struct.XXTextureMode = type { %struct.ZZIColor4, i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, float, float, i16, i16, i16, i16, i16, i16, [4 x i16], i8, i8, i8, i8, [3 x float], [4 x float], float, float }
- %struct.XXTextureParamState = type { i16, i16, i16, i16, i16, i16, %struct.ZZIColor4, float, float, float, float, i16, i16, i16, i16, float, i16, i8, i8, i32, i8* }
- %struct.XXTextureRec = type { [4 x float], %struct.XXTextureState*, %struct.XXMipmaplevel*, %struct.XXMipmaplevel*, float, float, float, float, i8, i8, i8, i8, i16, i16, i16, i16, i32, float, [2 x %struct.PPStreamToken] }
- %struct.XXTextureState = type { i16, i8, i8, i16, i16, float, i32, %struct.XXSWRSurfaceRec*, %struct.XXTextureParamState, %struct.XXTextureGeomState, i16, i16, i8*, %struct.XXTextureLevel, [1 x [15 x %struct.XXTextureLevel]] }
- %struct.XXTransform = type <{ [24 x [16 x float]], [24 x [16 x float]], [16 x float], float, float, float, float, float, i8, i8, i8, i8, i32, i32, i32, i16, i16, i8, i8, i8, i8, i32 }>
- %struct.XXTransformFeedback = type { i8, i8, i8, i8, [0 x i32], [16 x i32], [16 x i32] }
- %struct.XXVertex = type { %struct.ZZIColor4, %struct.ZZIColor4, %struct.ZZIColor4, %struct.ZZIColor4, %struct.ZZIColor4, %struct.XXPointLineLimits, float, %struct.ZZIColor4, float, i8, i8, i8, i8, float, float, i32, i32, i32, i32, [4 x float], [2 x %struct.XXMaterial*], [2 x i32], [8 x %struct.ZZIColor4] }
- %struct.XXVertexArrayRec = type opaque
- %struct.XXVertexDescriptor = type { i8, i8, i8, i8, [0 x i32] }
- %struct.XXVertexProgramLimits = type { i16, i16, i32, i32 }
- %struct.XXViewport = type { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, double, double, i32, i32, i32, i32, float, float, float, float }
- %struct.ZZGColorTable = type { i32, i32, i32, i8* }
- %struct.ZZGOperation = type { i8*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, %struct.ZZGColorTable, %struct.ZZGColorTable, %struct.ZZGColorTable }
- %struct.ZZGProcessor = type { void (%struct.XXPixelMode*, %struct.ZZGOperation*, %struct._ZZGProcessorData*, %union._ZZGFunctionKey*)*, %struct._YYFunction*, %union._ZZGFunctionKey*, %struct._ZZGProcessorData* }
- %struct.ZZGTransformKey = type { i32, i32 }
- %struct.ZZIColor4 = type { float, float, float, float }
- %struct.ZZSBB = type { i8* }
- %struct.ZZSDrawable = type { %struct.ZZSWindowRec* }
- %struct.ZZSWindowRec = type { %struct.ZZGTransformKey, %struct.ZZGTransformKey, i32, i32, %struct.ZZSDrawable, i8*, i8*, i8*, i8*, i8*, [4 x i8*], i32, i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, %struct.XXDrawableWindow, i32, i32, i8*, i8* }
- %struct.ZZTCoord2 = type { float, float }
- %struct.YYFPContext = type { float, i32, i32, i32, float, [3 x float] }
- %struct.YYFragmentAttrib = type { <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, [8 x <4 x float>] }
- %struct.YYTextures = type { [16 x %struct.XXTextureRec*] }
- %struct.PPStreamToken = type { { i16, i16, i32 } }
- %struct._ZZGProcessorData = type { void (i8*, i8*, i32, i32, i32, i32, i32, i32, i32)*, void (i8*, i8*, i32, i32, i32, i32, i32, i32, i32)*, i8* (i32)*, void (i8*)* }
- %struct._YYConstants = type { <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, <4 x float>, float, float, float, float, float, float, float, float, float, float, float, float, [256 x float], [4096 x i8], [8 x float], [48 x float], [128 x float], [528 x i8], { void (i8*, i8*, i32, i8*)*, float (float)*, float (float)*, float (float)*, i32 (float)* } }
- %struct._YYFunction = type opaque
- %struct.__ZZarrayelementDrawInfoListType = type { i32, [40 x i8] }
- %union._ZZGFunctionKey = type opaque
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (void (%struct.XXContextRec*, i32, i32, %struct.YYFragmentAttrib*, %struct.YYFragmentAttrib*, i32)* @t to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define void @t(%struct.XXContextRec* %ctx, i32 %x, i32 %y, %struct.YYFragmentAttrib* %start, %struct.YYFragmentAttrib* %deriv, i32 %num_frags) nounwind {
-entry:
- %tmp7485.i.i.i = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %tmp8382.i.i.i = extractelement <4 x i32> zeroinitializer, i32 1 ; <i32> [#uses=1]
- %tmp8383.i.i.i = extractelement <4 x i32> zeroinitializer, i32 2 ; <i32> [#uses=2]
- %tmp8384.i.i.i = extractelement <4 x i32> zeroinitializer, i32 3 ; <i32> [#uses=2]
- br label %bb7551.i.i.i
-
-bb4426.i.i.i: ; preds = %bb7551.i.i.i
- %0 = getelementptr %struct.XXMipmaplevel* null, i32 %tmp8383.i.i.i, i32 3 ; <[4 x i32]*> [#uses=1]
- %1 = bitcast [4 x i32]* %0 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %2 = load <4 x i32>* %1, align 16 ; <<4 x i32>> [#uses=1]
- %3 = getelementptr %struct.XXMipmaplevel* null, i32 %tmp8384.i.i.i, i32 3 ; <[4 x i32]*> [#uses=1]
- %4 = bitcast [4 x i32]* %3 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %5 = load <4 x i32>* %4, align 16 ; <<4 x i32>> [#uses=1]
- %6 = shufflevector <4 x i32> %2, <4 x i32> %5, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x i32>> [#uses=1]
- %7 = bitcast <4 x i32> %6 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %8 = shufflevector <2 x i64> zeroinitializer, <2 x i64> %7, <2 x i32> < i32 1, i32 3 > ; <<2 x i64>> [#uses=1]
- %9 = getelementptr %struct.XXMipmaplevel* null, i32 %tmp8382.i.i.i, i32 6 ; <float**> [#uses=1]
- %10 = load float** %9, align 4 ; <float*> [#uses=1]
- %11 = bitcast float* %10 to i8* ; <i8*> [#uses=1]
- %12 = getelementptr %struct.XXMipmaplevel* null, i32 %tmp8383.i.i.i, i32 6 ; <float**> [#uses=1]
- %13 = load float** %12, align 4 ; <float*> [#uses=1]
- %14 = bitcast float* %13 to i8* ; <i8*> [#uses=1]
- %15 = getelementptr %struct.XXMipmaplevel* null, i32 %tmp8384.i.i.i, i32 6 ; <float**> [#uses=1]
- %16 = load float** %15, align 4 ; <float*> [#uses=1]
- %17 = bitcast float* %16 to i8* ; <i8*> [#uses=1]
- %tmp7308.i.i.i = and <2 x i64> zeroinitializer, %8 ; <<2 x i64>> [#uses=1]
- %18 = bitcast <2 x i64> %tmp7308.i.i.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %19 = mul <4 x i32> %18, zeroinitializer ; <<4 x i32>> [#uses=1]
- %20 = add <4 x i32> %19, zeroinitializer ; <<4 x i32>> [#uses=3]
- %21 = load i32* null, align 4 ; <i32> [#uses=0]
- %22 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> zeroinitializer) nounwind readnone ; <<4 x float>> [#uses=1]
- %23 = fmul <4 x float> %22, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1]
- %tmp2114.i119.i.i = extractelement <4 x i32> %20, i32 1 ; <i32> [#uses=1]
- %24 = shl i32 %tmp2114.i119.i.i, 2 ; <i32> [#uses=1]
- %25 = getelementptr i8* %11, i32 %24 ; <i8*> [#uses=1]
- %26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
- %27 = load i32* %26, align 4 ; <i32> [#uses=1]
- %28 = or i32 %27, -16777216 ; <i32> [#uses=1]
- %tmp1927.i120.i.i = insertelement <4 x i32> undef, i32 %28, i32 0 ; <<4 x i32>> [#uses=1]
- %29 = bitcast <4 x i32> %tmp1927.i120.i.i to <16 x i8> ; <<16 x i8>> [#uses=1]
- %30 = shufflevector <16 x i8> %29, <16 x i8> < i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef >, <16 x i32> < i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23 > ; <<16 x i8>> [#uses=1]
- %31 = bitcast <16 x i8> %30 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %32 = shufflevector <8 x i16> %31, <8 x i16> < i16 0, i16 0, i16 0, i16 0, i16 undef, i16 undef, i16 undef, i16 undef >, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 > ; <<8 x i16>> [#uses=1]
- %33 = bitcast <8 x i16> %32 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %34 = shufflevector <4 x i32> %33, <4 x i32> undef, <4 x i32> < i32 2, i32 1, i32 0, i32 3 > ; <<4 x i32>> [#uses=1]
- %35 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %34) nounwind readnone ; <<4 x float>> [#uses=1]
- %36 = fmul <4 x float> %35, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1]
- %tmp2113.i124.i.i = extractelement <4 x i32> %20, i32 2 ; <i32> [#uses=1]
- %37 = shl i32 %tmp2113.i124.i.i, 2 ; <i32> [#uses=1]
- %38 = getelementptr i8* %14, i32 %37 ; <i8*> [#uses=1]
- %39 = bitcast i8* %38 to i32* ; <i32*> [#uses=1]
- %40 = load i32* %39, align 4 ; <i32> [#uses=1]
- %41 = or i32 %40, -16777216 ; <i32> [#uses=1]
- %tmp1963.i125.i.i = insertelement <4 x i32> undef, i32 %41, i32 0 ; <<4 x i32>> [#uses=1]
- %42 = bitcast <4 x i32> %tmp1963.i125.i.i to <16 x i8> ; <<16 x i8>> [#uses=1]
- %43 = shufflevector <16 x i8> %42, <16 x i8> < i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef >, <16 x i32> < i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23 > ; <<16 x i8>> [#uses=1]
- %44 = bitcast <16 x i8> %43 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %45 = shufflevector <8 x i16> %44, <8 x i16> < i16 0, i16 0, i16 0, i16 0, i16 undef, i16 undef, i16 undef, i16 undef >, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 > ; <<8 x i16>> [#uses=1]
- %46 = bitcast <8 x i16> %45 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %47 = shufflevector <4 x i32> %46, <4 x i32> undef, <4 x i32> < i32 2, i32 1, i32 0, i32 3 > ; <<4 x i32>> [#uses=1]
- %48 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %47) nounwind readnone ; <<4 x float>> [#uses=1]
- %49 = fmul <4 x float> %48, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1]
- %tmp2112.i129.i.i = extractelement <4 x i32> %20, i32 3 ; <i32> [#uses=1]
- %50 = shl i32 %tmp2112.i129.i.i, 2 ; <i32> [#uses=1]
- %51 = getelementptr i8* %17, i32 %50 ; <i8*> [#uses=1]
- %52 = bitcast i8* %51 to i32* ; <i32*> [#uses=1]
- %53 = load i32* %52, align 4 ; <i32> [#uses=1]
- %54 = or i32 %53, -16777216 ; <i32> [#uses=1]
- %tmp1999.i130.i.i = insertelement <4 x i32> undef, i32 %54, i32 0 ; <<4 x i32>> [#uses=1]
- %55 = bitcast <4 x i32> %tmp1999.i130.i.i to <16 x i8> ; <<16 x i8>> [#uses=1]
- %56 = shufflevector <16 x i8> %55, <16 x i8> < i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef >, <16 x i32> < i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23 > ; <<16 x i8>> [#uses=1]
- %57 = bitcast <16 x i8> %56 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %58 = shufflevector <8 x i16> %57, <8 x i16> < i16 0, i16 0, i16 0, i16 0, i16 undef, i16 undef, i16 undef, i16 undef >, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 > ; <<8 x i16>> [#uses=1]
- %59 = bitcast <8 x i16> %58 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %60 = shufflevector <4 x i32> %59, <4 x i32> undef, <4 x i32> < i32 2, i32 1, i32 0, i32 3 > ; <<4 x i32>> [#uses=1]
- %61 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %60) nounwind readnone ; <<4 x float>> [#uses=1]
- %62 = fmul <4 x float> %61, < float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000 > ; <<4 x float>> [#uses=1]
- %63 = fmul <4 x float> %23, zeroinitializer ; <<4 x float>> [#uses=1]
- %64 = fadd <4 x float> zeroinitializer, %63 ; <<4 x float>> [#uses=1]
- %65 = fmul <4 x float> %36, zeroinitializer ; <<4 x float>> [#uses=1]
- %66 = fadd <4 x float> zeroinitializer, %65 ; <<4 x float>> [#uses=1]
- %67 = fmul <4 x float> %49, zeroinitializer ; <<4 x float>> [#uses=1]
- %68 = fadd <4 x float> zeroinitializer, %67 ; <<4 x float>> [#uses=1]
- %69 = fmul <4 x float> %62, zeroinitializer ; <<4 x float>> [#uses=1]
- %70 = fadd <4 x float> zeroinitializer, %69 ; <<4 x float>> [#uses=1]
- %tmp7452.i.i.i = bitcast <4 x float> %64 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp7454.i.i.i = and <4 x i32> %tmp7452.i.i.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %tmp7459.i.i.i = or <4 x i32> %tmp7454.i.i.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %tmp7460.i.i.i = bitcast <4 x i32> %tmp7459.i.i.i to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp7468.i.i.i = bitcast <4 x float> %66 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp7470.i.i.i = and <4 x i32> %tmp7468.i.i.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %tmp7475.i.i.i = or <4 x i32> %tmp7470.i.i.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %tmp7476.i.i.i = bitcast <4 x i32> %tmp7475.i.i.i to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp7479.i.i.i = bitcast <4 x float> %.279.1.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp7480.i.i.i = and <4 x i32> zeroinitializer, %tmp7479.i.i.i ; <<4 x i32>> [#uses=1]
- %tmp7484.i.i.i = bitcast <4 x float> %68 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp7486.i.i.i = and <4 x i32> %tmp7484.i.i.i, %tmp7485.i.i.i ; <<4 x i32>> [#uses=1]
- %tmp7491.i.i.i = or <4 x i32> %tmp7486.i.i.i, %tmp7480.i.i.i ; <<4 x i32>> [#uses=1]
- %tmp7492.i.i.i = bitcast <4 x i32> %tmp7491.i.i.i to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp7495.i.i.i = bitcast <4 x float> %.380.1.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp7496.i.i.i = and <4 x i32> zeroinitializer, %tmp7495.i.i.i ; <<4 x i32>> [#uses=1]
- %tmp7500.i.i.i = bitcast <4 x float> %70 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp7502.i.i.i = and <4 x i32> %tmp7500.i.i.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %tmp7507.i.i.i = or <4 x i32> %tmp7502.i.i.i, %tmp7496.i.i.i ; <<4 x i32>> [#uses=1]
- %tmp7508.i.i.i = bitcast <4 x i32> %tmp7507.i.i.i to <4 x float> ; <<4 x float>> [#uses=1]
- %indvar.next.i.i.i = add i32 %aniso.0.i.i.i, 1 ; <i32> [#uses=1]
- br label %bb7551.i.i.i
-
-bb7551.i.i.i: ; preds = %bb4426.i.i.i, %entry
- %.077.1.i = phi <4 x float> [ undef, %entry ], [ %tmp7460.i.i.i, %bb4426.i.i.i ] ; <<4 x float>> [#uses=0]
- %.178.1.i = phi <4 x float> [ undef, %entry ], [ %tmp7476.i.i.i, %bb4426.i.i.i ] ; <<4 x float>> [#uses=0]
- %.279.1.i = phi <4 x float> [ undef, %entry ], [ %tmp7492.i.i.i, %bb4426.i.i.i ] ; <<4 x float>> [#uses=1]
- %.380.1.i = phi <4 x float> [ undef, %entry ], [ %tmp7508.i.i.i, %bb4426.i.i.i ] ; <<4 x float>> [#uses=1]
- %aniso.0.i.i.i = phi i32 [ 0, %entry ], [ %indvar.next.i.i.i, %bb4426.i.i.i ] ; <i32> [#uses=1]
- br i1 false, label %glvmInterpretFPTransformFour6.exit, label %bb4426.i.i.i
-
-glvmInterpretFPTransformFour6.exit: ; preds = %bb7551.i.i.i
- unreachable
-}
-
-declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-12-PrivateEHSymbol.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-12-PrivateEHSymbol.ll
deleted file mode 100644
index e97b63d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-12-PrivateEHSymbol.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu | grep ^.L_Z1fv.eh
-; RUN: llc < %s -march=x86 -mtriple=i686-unknown-linux-gnu | grep ^.L_Z1fv.eh
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin9 | grep ^__Z1fv.eh
-; RUN: llc < %s -march=x86 -mtriple=i386-apple-darwin9 | grep ^__Z1fv.eh
-
-define void @_Z1fv() {
-entry:
- br label %return
-
-return:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-16-BadShift.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-16-BadShift.ll
deleted file mode 100644
index 6c70c5b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-16-BadShift.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s | not grep shrl
-; Note: this test is really trying to make sure that the shift
-; returns the right result; shrl is most likely wrong,
-; but if CodeGen starts legitimately using an shrl here,
-; please adjust the test appropriately.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
- at .str = internal constant [6 x i8] c"%lld\0A\00" ; <[6 x i8]*> [#uses=1]
-
-define i64 @mebbe_shift(i32 %xx, i32 %test) nounwind {
-entry:
- %conv = zext i32 %xx to i64 ; <i64> [#uses=1]
- %tobool = icmp ne i32 %test, 0 ; <i1> [#uses=1]
- %shl = select i1 %tobool, i64 3, i64 0 ; <i64> [#uses=1]
- %x.0 = shl i64 %conv, %shl ; <i64> [#uses=1]
- ret i64 %x.0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-16-dagcombine-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-16-dagcombine-4.ll
deleted file mode 100644
index 3080d08..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-16-dagcombine-4.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 | grep "(%esp)" | count 2
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
-; a - a should be found and removed, leaving refs to only L and P
-define i32 @test(i32 %a, i32 %L, i32 %P) nounwind {
-entry:
- %0 = sub i32 %a, %L
- %1 = add i32 %P, %0
- %2 = sub i32 %1, %a
- br label %return
-
-return: ; preds = %bb3
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-19-EarlyClobberBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-19-EarlyClobberBug.ll
deleted file mode 100644
index a6cabc4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-19-EarlyClobberBug.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -asm-verbose=0 | FileCheck %s
-; PR3149
-; Make sure the copy after inline asm is not coalesced away.
-
-; CHECK: ## InlineAsm End
-; CHECK-NEXT: BB1_2:
-; CHECK-NEXT: movl %esi, %eax
-
-
-@"\01LC" = internal constant [7 x i8] c"n0=%d\0A\00" ; <[7 x i8]*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (i32 (i64, i64)* @umoddi3 to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define i32 @umoddi3(i64 %u, i64 %v) nounwind noinline {
-entry:
- %0 = trunc i64 %v to i32 ; <i32> [#uses=2]
- %1 = trunc i64 %u to i32 ; <i32> [#uses=4]
- %2 = lshr i64 %u, 32 ; <i64> [#uses=1]
- %3 = trunc i64 %2 to i32 ; <i32> [#uses=2]
- %4 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i32 0), i32 %1) nounwind ; <i32> [#uses=0]
- %5 = icmp ult i32 %1, %0 ; <i1> [#uses=1]
- br i1 %5, label %bb2, label %bb
-
-bb: ; preds = %entry
- %6 = lshr i64 %v, 32 ; <i64> [#uses=1]
- %7 = trunc i64 %6 to i32 ; <i32> [#uses=1]
- %asmtmp = tail call { i32, i32 } asm "subl $5,$1\0A\09sbbl $3,$0", "=r,=&r,0,imr,1,imr,~{dirflag},~{fpsr},~{flags}"(i32 %3, i32 %7, i32 %1, i32 %0) nounwind ; <{ i32, i32 }> [#uses=2]
- %asmresult = extractvalue { i32, i32 } %asmtmp, 0 ; <i32> [#uses=1]
- %asmresult1 = extractvalue { i32, i32 } %asmtmp, 1 ; <i32> [#uses=1]
- br label %bb2
-
-bb2: ; preds = %bb, %entry
- %n1.0 = phi i32 [ %asmresult, %bb ], [ %3, %entry ] ; <i32> [#uses=1]
- %n0.0 = phi i32 [ %asmresult1, %bb ], [ %1, %entry ] ; <i32> [#uses=1]
- %8 = add i32 %n0.0, %n1.0 ; <i32> [#uses=1]
- ret i32 %8
-}
-
-declare i32 @printf(i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-22-dagcombine-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-22-dagcombine-5.ll
deleted file mode 100644
index 75773e0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-22-dagcombine-5.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 | grep "(%esp)" | count 2
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
-; -(-a) - a should be found and removed, leaving refs to only L and P
-define i32 @test(i32 %a, i32 %L, i32 %P) nounwind {
-entry:
- %0 = sub i32 %L, %a
- %1 = sub i32 %P, %0
- %2 = sub i32 %1, %a
- br label %return
-
-return: ; preds = %bb3
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-23-crazy-address.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-23-crazy-address.ll
deleted file mode 100644
index 2edcaea..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-23-crazy-address.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=static | grep {lea.*X.*esp} | count 2
-
- at X = external global [0 x i32]
-
-define void @foo() nounwind {
-entry:
- %Y = alloca i32
- call void @frob(i32* %Y) nounwind
- %Y3 = bitcast i32* %Y to i8*
- %ctg2 = getelementptr i8* %Y3, i32 ptrtoint ([0 x i32]* @X to i32)
- %0 = ptrtoint i8* %ctg2 to i32
- call void @borf(i32 %0) nounwind
- ret void
-}
-
-define void @bar(i32 %i) nounwind {
-entry:
- %Y = alloca [10 x i32]
- %0 = getelementptr [10 x i32]* %Y, i32 0, i32 0
- call void @frob(i32* %0) nounwind
- %1 = getelementptr [0 x i32]* @X, i32 0, i32 %i
- %2 = getelementptr [10 x i32]* %Y, i32 0, i32 0
- %3 = ptrtoint i32* %2 to i32
- %4 = bitcast i32* %1 to i8*
- %ctg2 = getelementptr i8* %4, i32 %3
- %5 = ptrtoint i8* %ctg2 to i32
- call void @borf(i32 %5) nounwind
- ret void
-}
-
-declare void @frob(i32*)
-
-declare void @borf(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-23-dagcombine-6.ll b/libclamav/c++/llvm/test/CodeGen/X86/2008-12-23-dagcombine-6.ll
deleted file mode 100644
index bae9283..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2008-12-23-dagcombine-6.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 | grep "(%esp)" | count 4
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
-; a - a should be found and removed, leaving refs to only L and P
-define i32 @test(i32 %a, i32 %L, i32 %P) nounwind {
-entry:
- %0 = add i32 %a, %L
- %1 = add i32 %P, %0
- %2 = sub i32 %1, %a
- br label %return
-
-return: ; preds = %bb3
- ret i32 %2
-}
-define i32 @test2(i32 %a, i32 %L, i32 %P) nounwind {
-entry:
- %0 = add i32 %L, %a
- %1 = add i32 %P, %0
- %2 = sub i32 %1, %a
- br label %return
-
-return: ; preds = %bb3
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-12-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-12-CoalescerBug.ll
deleted file mode 100644
index 27a7113..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-12-CoalescerBug.ll
+++ /dev/null
@@ -1,84 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | grep movq | count 2
-; PR3311
-
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.VEC_basic_block_base = type { i32, i32, [1 x %struct.basic_block_def*] }
- %struct.VEC_basic_block_gc = type { %struct.VEC_basic_block_base }
- %struct.VEC_edge_base = type { i32, i32, [1 x %struct.edge_def*] }
- %struct.VEC_edge_gc = type { %struct.VEC_edge_base }
- %struct.VEC_rtx_base = type { i32, i32, [1 x %struct.rtx_def*] }
- %struct.VEC_rtx_gc = type { %struct.VEC_rtx_base }
- %struct.VEC_temp_slot_p_base = type { i32, i32, [1 x %struct.temp_slot*] }
- %struct.VEC_temp_slot_p_gc = type { %struct.VEC_temp_slot_p_base }
- %struct.VEC_tree_base = type { i32, i32, [1 x %struct.tree_node*] }
- %struct.VEC_tree_gc = type { %struct.VEC_tree_base }
- %struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
- %struct.basic_block_def = type { %struct.tree_node*, %struct.VEC_edge_gc*, %struct.VEC_edge_gc*, i8*, %struct.loop*, [2 x %struct.et_node*], %struct.basic_block_def*, %struct.basic_block_def*, %struct.basic_block_il_dependent, %struct.tree_node*, %struct.edge_prediction*, i64, i32, i32, i32, i32 }
- %struct.basic_block_il_dependent = type { %struct.rtl_bb_info* }
- %struct.bitmap_element_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, [2 x i64] }
- %struct.bitmap_head_def = type { %struct.bitmap_element_def*, %struct.bitmap_element_def*, i32, %struct.bitmap_obstack* }
- %struct.bitmap_obstack = type { %struct.bitmap_element_def*, %struct.bitmap_head_def*, %struct.obstack }
- %struct.block_symbol = type { [3 x %struct.rtunion], %struct.object_block*, i64 }
- %struct.c_arg_info = type { %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i8 }
- %struct.c_language_function = type { %struct.stmt_tree_s }
- %struct.c_switch = type opaque
- %struct.control_flow_graph = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.VEC_basic_block_gc*, i32, i32, i32, %struct.VEC_basic_block_gc*, i32 }
- %struct.edge_def = type { %struct.basic_block_def*, %struct.basic_block_def*, %struct.edge_def_insns, i8*, %struct.location_t*, i32, i32, i64, i32 }
- %struct.edge_def_insns = type { %struct.rtx_def* }
- %struct.edge_prediction = type opaque
- %struct.eh_status = type opaque
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack*, i32, %struct.location_t, i32, i8*, %struct.rtx_def** }
- %struct.et_node = type opaque
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, %struct.control_flow_graph*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, %struct.initial_value_struct*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i8, i32, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.VEC_temp_slot_p_gc*, %struct.temp_slot*, %struct.var_refs_queue*, i32, i32, i32, i32, %struct.machine_function*, i32, i32, %struct.language_function*, %struct.htab*, %struct.rtx_def*, i32, i32, i32, %struct.location_t, %struct.VEC_tree_gc*, %struct.tree_node*, i8*, i8*, i8*, i8*, i8*, %struct.tree_node*, i8, i8, i8, i8, i8, i8 }
- %struct.htab = type { i32 (i8*)*, i32 (i8*, i8*)*, void (i8*)*, i8**, i64, i64, i64, i32, i32, i8* (i64, i64)*, void (i8*)*, i8*, i8* (i8*, i64, i64)*, void (i8*, i8*)*, i32 }
- %struct.initial_value_struct = type opaque
- %struct.lang_decl = type { i8 }
- %struct.language_function = type { %struct.c_language_function, %struct.tree_node*, %struct.tree_node*, %struct.c_switch*, %struct.c_arg_info*, i32, i32, i32, i32 }
- %struct.location_t = type { i8*, i32 }
- %struct.loop = type opaque
- %struct.machine_function = type { %struct.stack_local_entry*, i8*, %struct.rtx_def*, i32, i32, [4 x i32], i32, i32, i32 }
- %struct.object_block = type { %struct.section*, i32, i64, %struct.VEC_rtx_gc*, %struct.VEC_rtx_gc* }
- %struct.obstack = type { i64, %struct._obstack_chunk*, i8*, i8*, i8*, i64, i32, %struct._obstack_chunk* (i8*, i64)*, void (i8*, %struct._obstack_chunk*)*, i8*, i8 }
- %struct.omp_clause_subcode = type { i32 }
- %struct.rtl_bb_info = type { %struct.rtx_def*, %struct.rtx_def*, %struct.bitmap_head_def*, %struct.bitmap_head_def*, %struct.rtx_def*, %struct.rtx_def*, i32 }
- %struct.rtunion = type { i8* }
- %struct.rtx_def = type { i16, i8, i8, %struct.u }
- %struct.section = type { %struct.unnamed_section }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.sequence_stack* }
- %struct.stack_local_entry = type opaque
- %struct.stmt_tree_s = type { %struct.tree_node*, i32 }
- %struct.temp_slot = type opaque
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, %union.tree_ann_d*, i8, i8, i8, i8, i8 }
- %struct.tree_decl_common = type { %struct.tree_decl_minimal, %struct.tree_node*, i8, i8, i8, i8, i8, i32, i32, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_decl_minimal = type { %struct.tree_common, %struct.location_t, i32, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_decl_non_common = type { %struct.tree_decl_with_vis, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node* }
- %struct.tree_decl_with_rtl = type { %struct.tree_decl_common, %struct.rtx_def*, i32 }
- %struct.tree_decl_with_vis = type { %struct.tree_decl_with_rtl, %struct.tree_node*, %struct.tree_node*, i8, i8, i8, i8 }
- %struct.tree_function_decl = type { %struct.tree_decl_non_common, i32, i8, i8, i64, %struct.function* }
- %struct.tree_node = type { %struct.tree_function_decl }
- %struct.u = type { %struct.block_symbol }
- %struct.unnamed_section = type { %struct.omp_clause_subcode, void (i8*)*, i8*, %struct.section* }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type opaque
- %union.tree_ann_d = type opaque
- at .str1 = external constant [31 x i8] ; <[31 x i8]*> [#uses=1]
- at integer_types = external global [11 x %struct.tree_node*] ; <[11 x %struct.tree_node*]*> [#uses=1]
- at __FUNCTION__.31164 = external constant [23 x i8], align 16 ; <[23 x i8]*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (i32 (i32, i32)* @c_common_type_for_size to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define i32 @c_common_type_for_size(i32 %bits, i32 %unsignedp) nounwind {
-entry:
- %0 = load %struct.tree_node** getelementptr ([11 x %struct.tree_node*]* @integer_types, i32 0, i64 5), align 8 ; <%struct.tree_node*> [#uses=1]
- br i1 false, label %bb16, label %bb
-
-bb: ; preds = %entry
- tail call void @tree_class_check_failed(%struct.tree_node* %0, i32 2, i8* getelementptr ([31 x i8]* @.str1, i32 0, i64 0), i32 1785, i8* getelementptr ([23 x i8]* @__FUNCTION__.31164, i32 0, i32 0)) noreturn nounwind
- unreachable
-
-bb16: ; preds = %entry
- %tmp = add i32 %bits, %unsignedp ; <i32> [#uses=1]
- ret i32 %tmp
-}
-
-declare void @tree_class_check_failed(%struct.tree_node*, i32, i8*, i32, i8*) noreturn
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-13-DoubleUpdate.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-13-DoubleUpdate.ll
deleted file mode 100644
index 9c71469..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-13-DoubleUpdate.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -disable-mmx -enable-legalize-types-checking
-
-declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>) nounwind readnone
-
-define void @__mindd16(<16 x double>* sret %vec.result, <16 x double> %x, double %y) nounwind {
-entry:
- %tmp3.i = shufflevector <16 x double> zeroinitializer, <16 x double> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 > ; <<8 x double>> [#uses=1]
- %tmp10.i.i = shufflevector <8 x double> %tmp3.i, <8 x double> undef, <4 x i32> < i32 4, i32 5, i32 6, i32 7 > ; <<4 x double>> [#uses=1]
- %tmp3.i2.i.i = shufflevector <4 x double> %tmp10.i.i, <4 x double> undef, <2 x i32> < i32 0, i32 1 > ; <<2 x double>> [#uses=1]
- %0 = tail call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> zeroinitializer, <2 x double> %tmp3.i2.i.i) nounwind ; <<2 x double>> [#uses=1]
- %tmp5.i3.i.i = shufflevector <2 x double> %0, <2 x double> undef, <4 x i32> < i32 0, i32 1, i32 undef, i32 undef > ; <<4 x double>> [#uses=1]
- %tmp6.i4.i.i = shufflevector <4 x double> zeroinitializer, <4 x double> %tmp5.i3.i.i, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x double>> [#uses=1]
- %tmp14.i8.i.i = shufflevector <4 x double> %tmp6.i4.i.i, <4 x double> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 4, i32 5 > ; <<4 x double>> [#uses=1]
- %tmp13.i.i = shufflevector <4 x double> %tmp14.i8.i.i, <4 x double> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef > ; <<8 x double>> [#uses=1]
- %tmp14.i.i = shufflevector <8 x double> zeroinitializer, <8 x double> %tmp13.i.i, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11 > ; <<8 x double>> [#uses=1]
- %tmp5.i = shufflevector <8 x double> %tmp14.i.i, <8 x double> undef, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef > ; <<16 x double>> [#uses=1]
- %tmp6.i = shufflevector <16 x double> %x, <16 x double> %tmp5.i, <16 x i32> < i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15 > ; <<16 x double>> [#uses=1]
- %tmp14.i = shufflevector <16 x double> %tmp6.i, <16 x double> zeroinitializer, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23 > ; <<16 x double>> [#uses=1]
- store <16 x double> %tmp14.i, <16 x double>* %vec.result
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-16-SchedulerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-16-SchedulerBug.ll
deleted file mode 100644
index 99bef6c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-16-SchedulerBug.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin
-; rdar://6501631
-
- %CF = type { %Register }
- %XXV = type { i32 (...)** }
- %Register = type { %"struct.XXC::BCFs", i32 }
- %"struct.XXC::BCFs" = type { i32 }
-
-declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind
-
-define fastcc %XXV* @bar(%CF* %call_frame, %XXV** %exception) nounwind {
-prologue:
- %param_x = load %XXV** null ; <%XXV*> [#uses=1]
- %unique_1.i = ptrtoint %XXV* %param_x to i1 ; <i1> [#uses=1]
- br i1 %unique_1.i, label %NextVerify42, label %FailedVerify
-
-NextVerify42: ; preds = %prologue
- %param_y = load %XXV** null ; <%XXV*> [#uses=1]
- %unique_1.i58 = ptrtoint %XXV* %param_y to i1 ; <i1> [#uses=1]
- br i1 %unique_1.i58, label %function_setup.cont, label %FailedVerify
-
-function_setup.cont: ; preds = %NextVerify42
- br i1 false, label %label13, label %label
-
-label: ; preds = %function_setup.cont
- %has_exn = icmp eq %XXV* null, null ; <i1> [#uses=1]
- br i1 %has_exn, label %kjsNumberLiteral.exit, label %handle_exception
-
-kjsNumberLiteral.exit: ; preds = %label
- %0 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 0, i32 0) ; <{ i32, i1 }> [#uses=2]
- %intAdd = extractvalue { i32, i1 } %0, 0 ; <i32> [#uses=2]
- %intAddOverflow = extractvalue { i32, i1 } %0, 1 ; <i1> [#uses=1]
- %toint56 = ashr i32 %intAdd, 1 ; <i32> [#uses=1]
- %toFP57 = sitofp i32 %toint56 to double ; <double> [#uses=1]
- br i1 %intAddOverflow, label %rematerializeAdd, label %label13
-
-label13: ; preds = %kjsNumberLiteral.exit, %function_setup.cont
- %var_lr1.0 = phi double [ %toFP57, %kjsNumberLiteral.exit ], [ 0.000000e+00, %function_setup.cont ] ; <double> [#uses=0]
- unreachable
-
-FailedVerify: ; preds = %NextVerify42, %prologue
- ret %XXV* null
-
-rematerializeAdd: ; preds = %kjsNumberLiteral.exit
- %rematerializedInt = sub i32 %intAdd, 0 ; <i32> [#uses=0]
- ret %XXV* null
-
-handle_exception: ; preds = %label
- ret %XXV* undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-16-UIntToFP.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-16-UIntToFP.ll
deleted file mode 100644
index 2eab5f1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-16-UIntToFP.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-
-define hidden float @__floatundisf(i64 %u) nounwind readnone {
-entry:
- %0 = icmp ugt i64 %u, 9007199254740991 ; <i1> [#uses=1]
- br i1 %0, label %bb, label %bb2
-
-bb: ; preds = %entry
- %1 = and i64 %u, 2047 ; <i64> [#uses=1]
- %2 = icmp eq i64 %1, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb2, label %bb1
-
-bb1: ; preds = %bb
- %3 = or i64 %u, 2048 ; <i64> [#uses=1]
- %4 = and i64 %3, -2048 ; <i64> [#uses=1]
- br label %bb2
-
-bb2: ; preds = %bb1, %bb, %entry
- %u_addr.0 = phi i64 [ %4, %bb1 ], [ %u, %entry ], [ %u, %bb ] ; <i64> [#uses=2]
- %5 = lshr i64 %u_addr.0, 32 ; <i64> [#uses=1]
- %6 = trunc i64 %5 to i32 ; <i32> [#uses=1]
- %7 = uitofp i32 %6 to double ; <double> [#uses=1]
- %8 = fmul double %7, 0x41F0000000000000 ; <double> [#uses=1]
- %9 = trunc i64 %u_addr.0 to i32 ; <i32> [#uses=1]
- %10 = uitofp i32 %9 to double ; <double> [#uses=1]
- %11 = fadd double %10, %8 ; <double> [#uses=1]
- %12 = fptrunc double %11 to float ; <float> [#uses=1]
- ret float %12
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll
deleted file mode 100644
index f895336..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s
-; rdar://6505632
-; reduced from 483.xalancbmk
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin7"
- %"struct.std::basic_ostream<char,std::char_traits<char> >.base" = type { i32 (...)** }
- %"struct.xercesc_2_5::ASCIIRangeFactory" = type { %"struct.std::basic_ostream<char,std::char_traits<char> >.base", i8, i8 }
- at _ZN11xercesc_2_5L17gIdeographicCharsE = external constant [7 x i16] ; <[7 x i16]*> [#uses=3]
-
-define void @_ZN11xercesc_2_515XMLRangeFactory11buildRangesEv(%"struct.xercesc_2_5::ASCIIRangeFactory"* %this) {
-entry:
- br i1 false, label %bb5, label %return
-
-bb5: ; preds = %entry
- br label %bb4.i.i
-
-bb4.i.i: ; preds = %bb4.i.i, %bb5
- br i1 false, label %bb.i51, label %bb4.i.i
-
-bb.i51: ; preds = %bb.i51, %bb4.i.i
- br i1 false, label %bb4.i.i70, label %bb.i51
-
-bb4.i.i70: ; preds = %bb4.i.i70, %bb.i51
- br i1 false, label %_ZN11xercesc_2_59XMLString9stringLenEPKt.exit.i73, label %bb4.i.i70
-
-_ZN11xercesc_2_59XMLString9stringLenEPKt.exit.i73: ; preds = %bb4.i.i70
- %0 = load i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 add (i32 ashr (i32 sub (i32 ptrtoint (i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 4) to i32), i32 ptrtoint ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE to i32)), i32 1), i32 1)), align 4 ; <i16> [#uses=0]
- br label %bb4.i5.i141
-
-bb4.i5.i141: ; preds = %bb4.i5.i141, %_ZN11xercesc_2_59XMLString9stringLenEPKt.exit.i73
- br label %bb4.i5.i141
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll
deleted file mode 100644
index 0583ef1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-25-NoSSE.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=-sse,-sse2 | not grep xmm
-; PR3402
-target datalayout =
-"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- %struct.ktermios = type { i32, i32, i32, i32, i8, [19 x i8], i32, i32 }
-
-define void @foo() nounwind {
-entry:
- %termios = alloca %struct.ktermios, align 8
- %termios1 = bitcast %struct.ktermios* %termios to i8*
- call void @llvm.memset.i64(i8* %termios1, i8 0, i64 44, i32 8)
- call void @bar(%struct.ktermios* %termios) nounwind
- ret void
-}
-
-declare void @llvm.memset.i64(i8* nocapture, i8, i64, i32) nounwind
-
-declare void @bar(%struct.ktermios*)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-26-WrongCheck.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-26-WrongCheck.ll
deleted file mode 100644
index 117ff47..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-26-WrongCheck.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86 -enable-legalize-types-checking
-; PR3393
-
-define void @foo(i32 inreg %x) {
- %t709 = select i1 false, i32 0, i32 %x ; <i32> [#uses=1]
- %t711 = add i32 %t709, 1 ; <i32> [#uses=4]
- %t801 = icmp slt i32 %t711, 0 ; <i1> [#uses=1]
- %t712 = zext i32 %t711 to i64 ; <i64> [#uses=1]
- %t804 = select i1 %t801, i64 0, i64 %t712 ; <i64> [#uses=1]
- store i64 %t804, i64* null
- %t815 = icmp slt i32 %t711, 0 ; <i1> [#uses=1]
- %t814 = sext i32 %t711 to i64 ; <i64> [#uses=1]
- %t816 = select i1 %t815, i64 0, i64 %t814 ; <i64> [#uses=1]
- store i64 %t816, i64* null
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-27-NullStrings.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-27-NullStrings.ll
deleted file mode 100644
index 8684f4a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-27-NullStrings.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin | FileCheck %s
-; CHECK: .section __TEXT,__cstring,cstring_literals
-
- at x = internal constant [1 x i8] zeroinitializer ; <[1 x i8]*> [#uses=1]
-
- at y = global [1 x i8]* @x
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-29-LocalRegAllocBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-29-LocalRegAllocBug.ll
deleted file mode 100644
index ce3ea82..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-29-LocalRegAllocBug.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9.6 -regalloc=local -disable-fp-elim
-; rdar://6538384
-
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.Lit = type { i32 }
- %struct.StreamBuffer = type { %struct.FILE*, [1048576 x i8], i32, i32 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
-
-declare fastcc i32 @_Z8parseIntI12StreamBufferEiRT_(%struct.StreamBuffer*)
-
-declare i8* @llvm.eh.exception() nounwind
-
-define i32 @main(i32 %argc, i8** nocapture %argv) noreturn {
-entry:
- %0 = invoke fastcc i32 @_Z8parseIntI12StreamBufferEiRT_(%struct.StreamBuffer* null)
- to label %bb1.i16.i.i unwind label %lpad.i.i ; <i32> [#uses=0]
-
-bb1.i16.i.i: ; preds = %entry
- br i1 false, label %bb.i.i.i.i, label %_ZN3vecI3LitE4pushERKS0_.exit.i.i.i
-
-bb.i.i.i.i: ; preds = %bb1.i16.i.i
- br label %_ZN3vecI3LitE4pushERKS0_.exit.i.i.i
-
-_ZN3vecI3LitE4pushERKS0_.exit.i.i.i: ; preds = %bb.i.i.i.i, %bb1.i16.i.i
- %lits.i.i.0.0 = phi %struct.Lit* [ null, %bb1.i16.i.i ], [ null, %bb.i.i.i.i ] ; <%struct.Lit*> [#uses=1]
- %1 = invoke fastcc i32 @_Z8parseIntI12StreamBufferEiRT_(%struct.StreamBuffer* null)
- to label %.noexc21.i.i unwind label %lpad.i.i ; <i32> [#uses=0]
-
-.noexc21.i.i: ; preds = %_ZN3vecI3LitE4pushERKS0_.exit.i.i.i
- unreachable
-
-lpad.i.i: ; preds = %_ZN3vecI3LitE4pushERKS0_.exit.i.i.i, %entry
- %lits.i.i.0.3 = phi %struct.Lit* [ %lits.i.i.0.0, %_ZN3vecI3LitE4pushERKS0_.exit.i.i.i ], [ null, %entry ] ; <%struct.Lit*> [#uses=1]
- %eh_ptr.i.i = call i8* @llvm.eh.exception() ; <i8*> [#uses=0]
- free %struct.Lit* %lits.i.i.0.3
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-31-BigShift.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-31-BigShift.ll
deleted file mode 100644
index 4eb0ec1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-31-BigShift.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep and
-; PR3401
-
-define void @x(i288 %i) nounwind {
- call void @add(i288 %i)
- ret void
-}
-
-declare void @add(i288)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll
deleted file mode 100644
index 9d24084..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {mov.*56}
-; PR3449
-
-define void @test(<8 x double>* %P, i64* %Q) nounwind {
- %A = load <8 x double>* %P ; <<8 x double>> [#uses=1]
- %B = bitcast <8 x double> %A to i512 ; <i512> [#uses=1]
- %C = lshr i512 %B, 448 ; <i512> [#uses=1]
- %D = trunc i512 %C to i64 ; <i64> [#uses=1]
- volatile store i64 %D, i64* %Q
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-31-BigShift3.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-01-31-BigShift3.ll
deleted file mode 100644
index 1b531e3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-01-31-BigShift3.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3450
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin7"
- %struct.BitMap = type { i8* }
- %struct.BitMapListStruct = type { %struct.BitMap, %struct.BitMapListStruct*, %struct.BitMapListStruct* }
- %struct.Material = type { float, float, float, %struct.Material*, %struct.Material* }
- %struct.ObjPoint = type { double, double, double, double, double, double }
- %struct.ObjectStruct = type { [57 x i8], %struct.PointListStruct*, %struct.Poly3Struct*, %struct.Poly4Struct*, %struct.Texture*, %struct.Material*, %struct.Point, i32, i32, %struct.Point, %struct.Point, %struct.Point, %struct.ObjectStruct*, %struct.ObjectStruct*, i32, i32, i32, i32, i32, i32, i32, %struct.ObjectStruct*, %struct.ObjectStruct* }
- %struct.Point = type { double, double, double }
- %struct.PointListStruct = type { %struct.ObjPoint*, %struct.PointListStruct*, %struct.PointListStruct* }
- %struct.Poly3Struct = type { [3 x %struct.ObjPoint*], %struct.Material*, %struct.Texture*, %struct.Poly3Struct*, %struct.Poly3Struct* }
- %struct.Poly4Struct = type { [4 x %struct.ObjPoint*], %struct.Material*, %struct.Texture*, %struct.Poly4Struct*, %struct.Poly4Struct* }
- %struct.Texture = type { %struct.Point, %struct.BitMapListStruct*, %struct.Point, %struct.Point, %struct.Point, %struct.Texture*, %struct.Texture* }
-
-define fastcc void @ScaleObjectAdd(%struct.ObjectStruct* %o, double %sx, double %sy, double %sz) nounwind {
-entry:
- %sz101112.ins = or i960 0, 0 ; <i960> [#uses=1]
- br i1 false, label %return, label %bb1.preheader
-
-bb1.preheader: ; preds = %entry
- %0 = lshr i960 %sz101112.ins, 640 ; <i960> [#uses=0]
- br label %bb1
-
-bb1: ; preds = %bb1, %bb1.preheader
- br label %bb1
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll
deleted file mode 100644
index c4042e6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3453
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
- %struct.cl_engine = type { i32, i16, i32, i8**, i8**, i8*, i8*, i8*, i8*, i8*, i8*, i8* }
- %struct.cl_limits = type { i32, i32, i32, i32, i16, i32 }
- %struct.cli_ac_alt = type { i8, i8*, i16, i16, %struct.cli_ac_alt* }
- %struct.cli_ac_node = type { i8, i8, %struct.cli_ac_patt*, %struct.cli_ac_node**, %struct.cli_ac_node* }
- %struct.cli_ac_patt = type { i16*, i16*, i16, i16, i8, i32, i32, i8*, i8*, i32, i16, i16, i16, i16, %struct.cli_ac_alt**, i8, i16, %struct.cli_ac_patt*, %struct.cli_ac_patt* }
- %struct.cli_bm_patt = type { i8*, i8*, i16, i16, i8*, i8*, i8, %struct.cli_bm_patt*, i16 }
- %struct.cli_ctx = type { i8**, i32*, %struct.cli_matcher*, %struct.cl_engine*, %struct.cl_limits*, i32, i32, i32, i32, %struct.cli_dconf* }
- %struct.cli_dconf = type { i32, i32, i32, i32, i32, i32, i32 }
- %struct.cli_matcher = type { i16, i8, i8*, %struct.cli_bm_patt**, i32*, i32, i8, i8, %struct.cli_ac_node*, %struct.cli_ac_node**, %struct.cli_ac_patt**, i32, i32, i32 }
-
-define fastcc i32 @cli_scanautoit(i32 %desc, %struct.cli_ctx* %ctx, i32 %offset) nounwind {
-entry:
- br i1 false, label %bb.i49.i72, label %bb14
-
-bb.i49.i72: ; preds = %bb.i49.i72, %entry
- %UNP.i1482.0 = phi i288 [ %.ins659, %bb.i49.i72 ], [ undef, %entry ] ; <i288> [#uses=1]
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
- %1 = xor i32 %0, 17834 ; <i32> [#uses=1]
- %2 = zext i32 %1 to i288 ; <i288> [#uses=1]
- %3 = shl i288 %2, 160 ; <i288> [#uses=1]
- %UNP.i1482.in658.mask = and i288 %UNP.i1482.0, -6277101733925179126504886505003981583386072424808101969921 ; <i288> [#uses=1]
- %.ins659 = or i288 %3, %UNP.i1482.in658.mask ; <i288> [#uses=1]
- br label %bb.i49.i72
-
-bb14: ; preds = %entry
- ret i32 -123
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll
deleted file mode 100644
index e75af13..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3411
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
- at g_3 = external global i32 ; <i32*> [#uses=1]
-
-define void @bar(i64 %p_66) nounwind {
-entry:
- br i1 false, label %bb, label %bb1
-
-bb: ; preds = %entry
- unreachable
-
-bb1: ; preds = %entry
- %0 = load i32* @g_3, align 4 ; <i32> [#uses=2]
- %1 = sext i32 %0 to i64 ; <i64> [#uses=1]
- %2 = or i64 %1, %p_66 ; <i64> [#uses=1]
- %3 = shl i64 %2, 0 ; <i64> [#uses=1]
- %4 = and i64 %3, %p_66 ; <i64> [#uses=1]
- %5 = icmp eq i64 %4, 1 ; <i1> [#uses=1]
- %6 = trunc i64 %p_66 to i32 ; <i32> [#uses=2]
- %7 = or i32 %0, %6 ; <i32> [#uses=2]
- %8 = sub i32 %7, %6 ; <i32> [#uses=1]
- %iftmp.0.0 = select i1 %5, i32 %8, i32 %7 ; <i32> [#uses=1]
- %9 = tail call i32 @foo(i32 %iftmp.0.0) nounwind ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @foo(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-04-sext-i64-gep.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-04-sext-i64-gep.ll
deleted file mode 100644
index 4880f62..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-04-sext-i64-gep.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s | grep p-92
-; PR3481
-; The offset should print as -92, not +17179869092
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
- at p = common global [10 x i32] zeroinitializer, align 4 ; <[10 x i32]*>
- at g = global [1 x i32*] [ i32* bitcast (i8* getelementptr (i8* bitcast
-([10 x i32]* @p to i8*), i64 17179869092) to i32*) ], align 4
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-05-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-05-CoalescerBug.ll
deleted file mode 100644
index 0ffa8fd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-05-CoalescerBug.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2,-sse41 | grep movss | count 2
-; RUN: llc < %s -march=x86 -mattr=+sse2,-sse41 | grep movaps | count 4
-
-define i1 @t([2 x float]* %y, [2 x float]* %w, i32, [2 x float]* %x.pn59, i32 %smax190, i32 %j.1180, <4 x float> %wu.2179, <4 x float> %wr.2178, <4 x float>* %tmp89.out, <4 x float>* %tmp107.out, i32* %indvar.next218.out) nounwind {
-newFuncRoot:
- %tmp82 = insertelement <4 x float> %wr.2178, float 0.000000e+00, i32 0 ; <<4 x float>> [#uses=1]
- %tmp85 = insertelement <4 x float> %tmp82, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
- %tmp87 = insertelement <4 x float> %tmp85, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
- %tmp89 = insertelement <4 x float> %tmp87, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp89, <4 x float>* %tmp89.out
- ret i1 false
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-08-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-08-CoalescerBug.ll
deleted file mode 100644
index 908cc08..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-08-CoalescerBug.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3486
-
-define i32 @foo(i8 signext %p_26) nounwind {
-entry:
- %0 = icmp eq i8 %p_26, 0 ; <i1> [#uses=2]
- %or.cond = or i1 false, %0 ; <i1> [#uses=2]
- %iftmp.1.0 = zext i1 %or.cond to i16 ; <i16> [#uses=1]
- br i1 %0, label %bb.i, label %bar.exit
-
-bb.i: ; preds = %entry
- %1 = zext i1 %or.cond to i32 ; <i32> [#uses=1]
- %2 = sdiv i32 %1, 0 ; <i32> [#uses=1]
- %3 = trunc i32 %2 to i16 ; <i16> [#uses=1]
- br label %bar.exit
-
-bar.exit: ; preds = %bb.i, %entry
- %4 = phi i16 [ %3, %bb.i ], [ %iftmp.1.0, %entry ] ; <i16> [#uses=1]
- %5 = trunc i16 %4 to i8 ; <i8> [#uses=1]
- %6 = sext i8 %5 to i32 ; <i32> [#uses=1]
- ret i32 %6
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
deleted file mode 100644
index 1284b0d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s
-; PR3537
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.6"
- %struct.GetBitContext = type <{ i8*, i8*, i32, i32 }>
-
-define i32 @alac_decode_frame() nounwind {
-entry:
- %tmp2 = load i8** null ; <i8*> [#uses=2]
- %tmp34 = getelementptr i8* %tmp2, i32 4 ; <i8*> [#uses=2]
- %tmp5.i424 = bitcast i8* %tmp34 to i8** ; <i8**> [#uses=2]
- %tmp15.i = getelementptr i8* %tmp2, i32 12 ; <i8*> [#uses=1]
- %0 = bitcast i8* %tmp15.i to i32* ; <i32*> [#uses=1]
- br i1 false, label %if.then43, label %if.end47
-
-if.then43: ; preds = %entry
- ret i32 0
-
-if.end47: ; preds = %entry
- %tmp5.i590 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
- store i32 19, i32* %0
- %tmp6.i569 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
- %1 = call i32 asm "bswap $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 0) nounwind ; <i32> [#uses=0]
- br i1 false, label %bb.nph, label %if.then63
-
-if.then63: ; preds = %if.end47
- unreachable
-
-bb.nph: ; preds = %if.end47
- %2 = bitcast i8* %tmp34 to %struct.GetBitContext* ; <%struct.GetBitContext*> [#uses=1]
- %call9.i = call fastcc i32 @decode_scalar(%struct.GetBitContext* %2, i32 0, i32 0, i32 0) nounwind ; <i32> [#uses=0]
- unreachable
-}
-
-declare fastcc i32 @decode_scalar(%struct.GetBitContext* nocapture, i32, i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
deleted file mode 100644
index 0dca14d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-; RUN: llc < %s
-; RUN: llc < %s -march=x86-64
-; PR3538
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9"
-define signext i8 @foo(i8* %s1) nounwind ssp {
-entry:
- %s1_addr = alloca i8* ; <i8**> [#uses=2]
- %retval = alloca i32 ; <i32*> [#uses=2]
- %saved_stack.1 = alloca i8* ; <i8**> [#uses=2]
- %0 = alloca i32 ; <i32*> [#uses=2]
- %str.0 = alloca [0 x i8]* ; <[0 x i8]**> [#uses=3]
- %1 = alloca i64 ; <i64*> [#uses=2]
- %2 = alloca i64 ; <i64*> [#uses=1]
- %3 = alloca i64 ; <i64*> [#uses=6]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @llvm.dbg.declare(metadata !{i8** %s1_addr}, metadata !0), !dbg !7
- store i8* %s1, i8** %s1_addr
- call void @llvm.dbg.declare(metadata !{[0 x i8]** %str.0}, metadata !8), !dbg !7
- %4 = call i8* @llvm.stacksave(), !dbg !7 ; <i8*> [#uses=1]
- store i8* %4, i8** %saved_stack.1, align 8, !dbg !7
- %5 = load i8** %s1_addr, align 8, !dbg !13 ; <i8*> [#uses=1]
- %6 = call i64 @strlen(i8* %5) nounwind readonly, !dbg !13 ; <i64> [#uses=1]
- %7 = add i64 %6, 1, !dbg !13 ; <i64> [#uses=1]
- store i64 %7, i64* %3, align 8, !dbg !13
- %8 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
- %9 = sub nsw i64 %8, 1, !dbg !13 ; <i64> [#uses=0]
- %10 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
- %11 = mul i64 %10, 8, !dbg !13 ; <i64> [#uses=0]
- %12 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
- store i64 %12, i64* %2, align 8, !dbg !13
- %13 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
- %14 = mul i64 %13, 8, !dbg !13 ; <i64> [#uses=0]
- %15 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
- store i64 %15, i64* %1, align 8, !dbg !13
- %16 = load i64* %1, align 8, !dbg !13 ; <i64> [#uses=1]
- %17 = trunc i64 %16 to i32, !dbg !13 ; <i32> [#uses=1]
- %18 = alloca i8, i32 %17, !dbg !13 ; <i8*> [#uses=1]
- %19 = bitcast i8* %18 to [0 x i8]*, !dbg !13 ; <[0 x i8]*> [#uses=1]
- store [0 x i8]* %19, [0 x i8]** %str.0, align 8, !dbg !13
- %20 = load [0 x i8]** %str.0, align 8, !dbg !15 ; <[0 x i8]*> [#uses=1]
- %21 = getelementptr inbounds [0 x i8]* %20, i64 0, i64 0, !dbg !15 ; <i8*> [#uses=1]
- store i8 0, i8* %21, align 1, !dbg !15
- %22 = load [0 x i8]** %str.0, align 8, !dbg !16 ; <[0 x i8]*> [#uses=1]
- %23 = getelementptr inbounds [0 x i8]* %22, i64 0, i64 0, !dbg !16 ; <i8*> [#uses=1]
- %24 = load i8* %23, align 1, !dbg !16 ; <i8> [#uses=1]
- %25 = sext i8 %24 to i32, !dbg !16 ; <i32> [#uses=1]
- store i32 %25, i32* %0, align 4, !dbg !16
- %26 = load i8** %saved_stack.1, align 8, !dbg !16 ; <i8*> [#uses=1]
- call void @llvm.stackrestore(i8* %26), !dbg !16
- %27 = load i32* %0, align 4, !dbg !16 ; <i32> [#uses=1]
- store i32 %27, i32* %retval, align 4, !dbg !16
- br label %return, !dbg !16
-
-return: ; preds = %entry
- %retval1 = load i32* %retval, !dbg !16 ; <i32> [#uses=1]
- %retval12 = trunc i32 %retval1 to i8, !dbg !16 ; <i8> [#uses=1]
- ret i8 %retval12, !dbg !16
-}
-
-declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
-
-declare i8* @llvm.stacksave() nounwind
-
-declare i64 @strlen(i8*) nounwind readonly
-
-declare void @llvm.stackrestore(i8*) nounwind
-
-!0 = metadata !{i32 459009, metadata !1, metadata !"s1", metadata !2, i32 2, metadata !6} ; [ DW_TAG_arg_variable ]
-!1 = metadata !{i32 458798, i32 0, metadata !2, metadata !"foo", metadata !"foo", metadata !"foo", metadata !2, i32 2, metadata !3, i1 false, i1 true, i32 0, i32 0, null, i1 false} ; [ DW_TAG_subprogram ]
-!2 = metadata !{i32 458769, i32 0, i32 1, metadata !"vla.c", metadata !"/tmp/", metadata !"4.2.1 (Based on Apple Inc. build 5658) (LLVM build)", i1 true, i1 false, metadata !"", i32 0} ; [ DW_TAG_compile_unit ]
-!3 = metadata !{i32 458773, metadata !2, metadata !"", metadata !2, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !4, i32 0, null} ; [ DW_TAG_subroutine_type ]
-!4 = metadata !{metadata !5, metadata !6}
-!5 = metadata !{i32 458788, metadata !2, metadata !"char", metadata !2, i32 0, i64 8, i64 8, i64 0, i32 0, i32 6} ; [ DW_TAG_base_type ]
-!6 = metadata !{i32 458767, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 0, metadata !5} ; [ DW_TAG_pointer_type ]
-!7 = metadata !{i32 2, i32 0, metadata !1, null}
-!8 = metadata !{i32 459008, metadata !1, metadata !"str.0", metadata !2, i32 3, metadata !9} ; [ DW_TAG_auto_variable ]
-!9 = metadata !{i32 458767, metadata !2, metadata !"", metadata !2, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !10} ; [ DW_TAG_pointer_type ]
-!10 = metadata !{i32 458753, metadata !2, metadata !"", metadata !2, i32 0, i64 8, i64 8, i64 0, i32 0, metadata !5, metadata !11, i32 0, null} ; [ DW_TAG_array_type ]
-!11 = metadata !{metadata !12}
-!12 = metadata !{i32 458785, i64 0, i64 0} ; [ DW_TAG_subrange_type ]
-!13 = metadata !{i32 3, i32 0, metadata !14, null}
-!14 = metadata !{i32 458763, metadata !1, i32 0, i32 0} ; [ DW_TAG_lexical_block ]
-!15 = metadata !{i32 4, i32 0, metadata !14, null}
-!16 = metadata !{i32 5, i32 0, metadata !14, null}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll
deleted file mode 100644
index 2e148ad..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {\$-81920} | count 3
-; RUN: llc < %s -march=x86 | grep {\$4294885376} | count 1
-
-; ModuleID = 'shant.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.6"
-
-define void @f() nounwind {
-entry:
- call void asm sideeffect "foo $0", "n,~{dirflag},~{fpsr},~{flags}"(i32 -81920) nounwind
- call void asm sideeffect "foo $0", "i,~{dirflag},~{fpsr},~{flags}"(i32 -81920) nounwind
- call void asm sideeffect "foo $0", "e,~{dirflag},~{fpsr},~{flags}"(i32 -81920) nounwind
- call void asm sideeffect "foo $0", "Z,~{dirflag},~{fpsr},~{flags}"(i64 4294885376) nounwind
- br label %return
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-12-SpillerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-12-SpillerBug.ll
deleted file mode 100644
index 4f8a5e7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-12-SpillerBug.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-apple-darwin8
-; PR3561
-
-define hidden void @__mulxc3({ x86_fp80, x86_fp80 }* noalias nocapture sret %agg.result, x86_fp80 %a, x86_fp80 %b, x86_fp80 %c, x86_fp80 %d) nounwind {
-entry:
- %0 = fmul x86_fp80 %b, %d ; <x86_fp80> [#uses=1]
- %1 = fsub x86_fp80 0xK00000000000000000000, %0 ; <x86_fp80> [#uses=1]
- %2 = fadd x86_fp80 0xK00000000000000000000, 0xK00000000000000000000 ; <x86_fp80> [#uses=1]
- %3 = fcmp uno x86_fp80 %1, 0xK00000000000000000000 ; <i1> [#uses=1]
- %4 = fcmp uno x86_fp80 %2, 0xK00000000000000000000 ; <i1> [#uses=1]
- %or.cond = and i1 %3, %4 ; <i1> [#uses=1]
- br i1 %or.cond, label %bb47, label %bb71
-
-bb47: ; preds = %entry
- %5 = fcmp uno x86_fp80 %a, 0xK00000000000000000000 ; <i1> [#uses=1]
- br i1 %5, label %bb60, label %bb62
-
-bb60: ; preds = %bb47
- %6 = tail call x86_fp80 @copysignl(x86_fp80 0xK00000000000000000000, x86_fp80 %a) nounwind readnone ; <x86_fp80> [#uses=0]
- br label %bb62
-
-bb62: ; preds = %bb60, %bb47
- unreachable
-
-bb71: ; preds = %entry
- ret void
-}
-
-declare x86_fp80 @copysignl(x86_fp80, x86_fp80) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-20-PreAllocSplit-Crash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-20-PreAllocSplit-Crash.ll
deleted file mode 100644
index 58a7f9f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-20-PreAllocSplit-Crash.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-apple-darwin8 -pre-alloc-split
-
-define i32 @main() nounwind {
-bb4.i.thread:
- br label %bb5.i4
-
-bb16: ; preds = %bb111.i
- %phitmp = add i32 %indvar.reg2mem.4, 1 ; <i32> [#uses=2]
- switch i32 %indvar.reg2mem.4, label %bb100.i [
- i32 0, label %bb5.i4
- i32 1, label %bb5.i4
- i32 2, label %bb5.i4
- i32 5, label %bb.i14.i
- i32 6, label %bb.i14.i
- i32 7, label %bb.i14.i
- ]
-
-bb5.i4: ; preds = %bb16, %bb16, %bb16, %bb4.i.thread
- br i1 false, label %bb102.i, label %bb103.i
-
-bb.i14.i: ; preds = %bb16, %bb16, %bb16
- %0 = malloc [600 x i32] ; <[600 x i32]*> [#uses=0]
- %1 = icmp eq i32 %phitmp, 7 ; <i1> [#uses=1]
- %tl.0.i = select i1 %1, float 1.000000e+02, float 1.000000e+00 ; <float> [#uses=1]
- %2 = icmp eq i32 %phitmp, 8 ; <i1> [#uses=1]
- %tu.0.i = select i1 %2, float 1.000000e+02, float 1.000000e+00 ; <float> [#uses=1]
- br label %bb30.i
-
-bb30.i: ; preds = %bb36.i, %bb.i14.i
- %i.1173.i = phi i32 [ 0, %bb.i14.i ], [ %indvar.next240.i, %bb36.i ] ; <i32> [#uses=3]
- %3 = icmp eq i32 0, %i.1173.i ; <i1> [#uses=1]
- br i1 %3, label %bb33.i, label %bb34.i
-
-bb33.i: ; preds = %bb30.i
- store float %tl.0.i, float* null, align 4
- br label %bb36.i
-
-bb34.i: ; preds = %bb30.i
- %4 = icmp eq i32 0, %i.1173.i ; <i1> [#uses=1]
- br i1 %4, label %bb35.i, label %bb36.i
-
-bb35.i: ; preds = %bb34.i
- store float %tu.0.i, float* null, align 4
- br label %bb36.i
-
-bb36.i: ; preds = %bb35.i, %bb34.i, %bb33.i
- %indvar.next240.i = add i32 %i.1173.i, 1 ; <i32> [#uses=1]
- br label %bb30.i
-
-bb100.i: ; preds = %bb16
- ret i32 0
-
-bb102.i: ; preds = %bb5.i4
- br label %bb103.i
-
-bb103.i: ; preds = %bb102.i, %bb5.i4
- %indvar.reg2mem.4 = phi i32 [ 0, %bb5.i4 ], [ 0, %bb102.i ] ; <i32> [#uses=2]
- %n.0.reg2mem.1.i = phi i32 [ 0, %bb102.i ], [ 0, %bb5.i4 ] ; <i32> [#uses=1]
- %5 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %5, label %bb111.i, label %bb108.i
-
-bb108.i: ; preds = %bb103.i
- ret i32 0
-
-bb111.i: ; preds = %bb103.i
- %6 = icmp sgt i32 %n.0.reg2mem.1.i, 7 ; <i1> [#uses=1]
- br i1 %6, label %bb16, label %bb112.i
-
-bb112.i: ; preds = %bb111.i
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-21-ExtWeakInitializer.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-21-ExtWeakInitializer.ll
deleted file mode 100644
index b3dd13c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-21-ExtWeakInitializer.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s | grep weak | count 3
-; PR3629
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "x86_64-unknown-freebsd7.1"
-module asm ".ident\09\22$FreeBSD$\22"
- %struct.anon = type <{ %struct.uart_devinfo* }>
- %struct.lock_object = type <{ i8*, i32, i32, %struct.witness* }>
- %struct.mtx = type <{ %struct.lock_object, i64 }>
- %struct.uart_bas = type <{ i64, i64, i32, i32, i32, i8, i8, i8, i8 }>
- %struct.uart_class = type opaque
- %struct.uart_devinfo = type <{ %struct.anon, %struct.uart_ops*, %struct.uart_bas, i32, i32, i32, i32, i32, i8, i8, i8, i8, i32 (%struct.uart_softc*)*, i32 (%struct.uart_softc*)*, i8*, %struct.mtx* }>
- %struct.uart_ops = type <{ i32 (%struct.uart_bas*)*, void (%struct.uart_bas*, i32, i32, i32, i32)*, void (%struct.uart_bas*)*, void (%struct.uart_bas*, i32)*, i32 (%struct.uart_bas*)*, i32 (%struct.uart_bas*, %struct.mtx*)* }>
- %struct.uart_softc = type opaque
- %struct.witness = type opaque
-
- at uart_classes = internal global [3 x %struct.uart_class*] [%struct.uart_class* @uart_ns8250_class, %struct.uart_class* @uart_sab82532_class, %struct.uart_class* @uart_z8530_class], align 8 ; <[3 x %struct.uart_class*]*> [#uses=1]
- at uart_ns8250_class = extern_weak global %struct.uart_class ; <%struct.uart_class*> [#uses=1]
- at uart_sab82532_class = extern_weak global %struct.uart_class ; <%struct.uart_class*> [#uses=1]
- at uart_z8530_class = extern_weak global %struct.uart_class ; <%struct.uart_class*> [#uses=1]
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-25-CommuteBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-25-CommuteBug.ll
deleted file mode 100644
index 7ea6998..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-25-CommuteBug.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats |& not grep commuted
-; rdar://6608609
-
-define <2 x double> @t(<2 x double> %A, <2 x double> %B, <2 x double> %C) nounwind readnone {
-entry:
- %tmp.i2 = bitcast <2 x double> %B to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp2.i = or <2 x i64> %tmp.i2, <i64 4607632778762754458, i64 4607632778762754458> ; <<2 x i64>> [#uses=1]
- %tmp3.i = bitcast <2 x i64> %tmp2.i to <2 x double> ; <<2 x double>> [#uses=1]
- %0 = tail call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %A, <2 x double> %tmp3.i) nounwind readnone ; <<2 x double>> [#uses=1]
- %tmp.i = fadd <2 x double> %0, %C ; <<2 x double>> [#uses=1]
- ret <2 x double> %tmp.i
-}
-
-declare <2 x double> @llvm.x86.sse2.add.sd(<2 x double>, <2 x double>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
deleted file mode 100644
index a4d642b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse3 -stats |& not grep {machine-licm}
-; rdar://6627786
-
-target triple = "x86_64-apple-darwin10.0"
- %struct.Key = type { i64 }
- %struct.__Rec = type opaque
- %struct.__vv = type { }
-
-define %struct.__vv* @t(%struct.Key* %desc, i64 %p) nounwind ssp {
-entry:
- br label %bb4
-
-bb4: ; preds = %bb.i, %bb26, %bb4, %entry
- %0 = call i32 (...)* @xxGetOffsetForCode(i32 undef) nounwind ; <i32> [#uses=0]
- %ins = or i64 %p, 2097152 ; <i64> [#uses=1]
- %1 = call i32 (...)* @xxCalculateMidType(%struct.Key* %desc, i32 0) nounwind ; <i32> [#uses=1]
- %cond = icmp eq i32 %1, 1 ; <i1> [#uses=1]
- br i1 %cond, label %bb26, label %bb4
-
-bb26: ; preds = %bb4
- %2 = and i64 %ins, 15728640 ; <i64> [#uses=1]
- %cond.i = icmp eq i64 %2, 1048576 ; <i1> [#uses=1]
- br i1 %cond.i, label %bb.i, label %bb4
-
-bb.i: ; preds = %bb26
- %3 = load i32* null, align 4 ; <i32> [#uses=1]
- %4 = uitofp i32 %3 to float ; <float> [#uses=1]
- %.sum13.i = add i64 0, 4 ; <i64> [#uses=1]
- %5 = getelementptr i8* null, i64 %.sum13.i ; <i8*> [#uses=1]
- %6 = bitcast i8* %5 to i32* ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=1]
- %8 = uitofp i32 %7 to float ; <float> [#uses=1]
- %.sum.i = add i64 0, 8 ; <i64> [#uses=1]
- %9 = getelementptr i8* null, i64 %.sum.i ; <i8*> [#uses=1]
- %10 = bitcast i8* %9 to i32* ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
- %12 = uitofp i32 %11 to float ; <float> [#uses=1]
- %13 = insertelement <4 x float> undef, float %4, i32 0 ; <<4 x float>> [#uses=1]
- %14 = insertelement <4 x float> %13, float %8, i32 1 ; <<4 x float>> [#uses=1]
- %15 = insertelement <4 x float> %14, float %12, i32 2 ; <<4 x float>> [#uses=1]
- store <4 x float> %15, <4 x float>* null, align 16
- br label %bb4
-}
-
-declare i32 @xxGetOffsetForCode(...)
-
-declare i32 @xxCalculateMidType(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-03-BTHang.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-03-BTHang.ll
deleted file mode 100644
index bb95925..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-03-BTHang.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -march=x86
-; rdar://6642541
-
- %struct.HandleBlock = type { [30 x i32], [990 x i8*], %struct.HandleBlockTrailer }
- %struct.HandleBlockTrailer = type { %struct.HandleBlock* }
-
-define hidden zeroext i8 @IsHandleAllocatedFromPool(i8** %h) nounwind optsize {
-entry:
- %0 = ptrtoint i8** %h to i32 ; <i32> [#uses=2]
- %1 = and i32 %0, -4096 ; <i32> [#uses=1]
- %2 = inttoptr i32 %1 to %struct.HandleBlock* ; <%struct.HandleBlock*> [#uses=3]
- %3 = getelementptr %struct.HandleBlock* %2, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4096 ; <i32> [#uses=1]
- %5 = icmp eq i32 %4, 1751280747 ; <i1> [#uses=1]
- br i1 %5, label %bb, label %bb1
-
-bb: ; preds = %entry
- %6 = getelementptr %struct.HandleBlock* %2, i32 0, i32 1 ; <[990 x i8*]*> [#uses=1]
- %7 = ptrtoint [990 x i8*]* %6 to i32 ; <i32> [#uses=1]
- %8 = sub i32 %0, %7 ; <i32> [#uses=2]
- %9 = lshr i32 %8, 2 ; <i32> [#uses=1]
- %10 = ashr i32 %8, 7 ; <i32> [#uses=1]
- %11 = and i32 %10, 134217727 ; <i32> [#uses=1]
- %12 = getelementptr %struct.HandleBlock* %2, i32 0, i32 0, i32 %11 ; <i32*> [#uses=1]
- %not.i = and i32 %9, 31 ; <i32> [#uses=1]
- %13 = xor i32 %not.i, 31 ; <i32> [#uses=1]
- %14 = shl i32 1, %13 ; <i32> [#uses=1]
- %15 = load i32* %12, align 4 ; <i32> [#uses=1]
- %16 = and i32 %15, %14 ; <i32> [#uses=1]
- %17 = icmp eq i32 %16, 0 ; <i1> [#uses=1]
- %tmp = zext i1 %17 to i8 ; <i8> [#uses=1]
- ret i8 %tmp
-
-bb1: ; preds = %entry
- ret i8 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-03-BitcastLongDouble.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-03-BitcastLongDouble.ll
deleted file mode 100644
index 9deeceb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-03-BitcastLongDouble.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3686
-; rdar://6661799
-
-define i32 @x(i32 %y) nounwind readnone {
-entry:
- %tmp14 = zext i32 %y to i80 ; <i80> [#uses=1]
- %tmp15 = bitcast i80 %tmp14 to x86_fp80 ; <x86_fp80> [#uses=1]
- %add = fadd x86_fp80 %tmp15, 0xK3FFF8000000000000000 ; <x86_fp80> [#uses=1]
- %tmp11 = bitcast x86_fp80 %add to i80 ; <i80> [#uses=1]
- %tmp10 = trunc i80 %tmp11 to i32 ; <i32> [#uses=1]
- ret i32 %tmp10
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-05-burr-list-crash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-05-burr-list-crash.ll
deleted file mode 100644
index 411a0c9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-05-burr-list-crash.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-external global i32 ; <i32*>:0 [#uses=1]
-
-declare i64 @strlen(i8* nocapture) nounwind readonly
-
-define fastcc i8* @1(i8*) nounwind {
- br i1 false, label %3, label %2
-
-; <label>:2 ; preds = %1
- ret i8* %0
-
-; <label>:3 ; preds = %1
- %4 = call i64 @strlen(i8* %0) nounwind readonly ; <i64> [#uses=1]
- %5 = trunc i64 %4 to i32 ; <i32> [#uses=2]
- %6 = load i32* @0, align 4 ; <i32> [#uses=1]
- %7 = sub i32 %5, %6 ; <i32> [#uses=2]
- %8 = sext i32 %5 to i64 ; <i64> [#uses=1]
- %9 = sext i32 %7 to i64 ; <i64> [#uses=1]
- %10 = sub i64 %8, %9 ; <i64> [#uses=1]
- %11 = getelementptr i8* %0, i64 %10 ; <i8*> [#uses=1]
- %12 = icmp sgt i32 %7, 0 ; <i1> [#uses=1]
- br i1 %12, label %13, label %14
-
-; <label>:13 ; preds = %13, %3
- br label %13
-
-; <label>:14 ; preds = %3
- %15 = call noalias i8* @make_temp_file(i8* %11) nounwind ; <i8*> [#uses=0]
- unreachable
-}
-
-declare noalias i8* @make_temp_file(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-07-FPConstSelect.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-07-FPConstSelect.ll
deleted file mode 100644
index 39caddc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-07-FPConstSelect.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | not grep xmm
-; This should do a single load into the fp stack for the return, not diddle with xmm registers.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin7"
-
-define float @f(i32 %x) nounwind readnone {
-entry:
- %0 = icmp eq i32 %x, 0 ; <i1> [#uses=1]
- %iftmp.0.0 = select i1 %0, float 4.200000e+01, float 2.300000e+01
- ret float %iftmp.0.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-09-APIntCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-09-APIntCrash.ll
deleted file mode 100644
index 896c968..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-09-APIntCrash.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; PR3763
- %struct.__block_descriptor = type { i64, i64 }
-
-define %struct.__block_descriptor @evUTCTime() nounwind {
-entry:
- br i1 false, label %if.then, label %return
-
-if.then: ; preds = %entry
- %srcval18 = load i128* null, align 8 ; <i128> [#uses=1]
- %tmp15 = lshr i128 %srcval18, 64 ; <i128> [#uses=1]
- %tmp9 = mul i128 %tmp15, 18446744073709551616000 ; <i128> [#uses=1]
- br label %return
-
-return: ; preds = %if.then, %entry
- %retval.0 = phi i128 [ %tmp9, %if.then ], [ undef, %entry ] ; <i128> [#uses=0]
- ret %struct.__block_descriptor undef
-}
-
-define i128 @test(i128 %arg) nounwind {
- %A = shl i128 1, 92
- %B = sub i128 0, %A
- %C = mul i128 %arg, %B
- ret i128 %C ;; should codegen to neg(shift)
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-09-SpillerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-09-SpillerBug.ll
deleted file mode 100644
index 4224210..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-09-SpillerBug.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu
-; PR3706
-
-define void @__mulxc3(x86_fp80 %b) nounwind {
-entry:
- %call = call x86_fp80 @y(x86_fp80* null, x86_fp80* null) ; <x86_fp80> [#uses=0]
- %cmp = fcmp ord x86_fp80 %b, 0xK00000000000000000000 ; <i1> [#uses=1]
- %sub = fsub x86_fp80 %b, %b ; <x86_fp80> [#uses=1]
- %cmp7 = fcmp uno x86_fp80 %sub, 0xK00000000000000000000 ; <i1> [#uses=1]
- %and12 = and i1 %cmp7, %cmp ; <i1> [#uses=1]
- %and = zext i1 %and12 to i32 ; <i32> [#uses=1]
- %conv9 = sitofp i32 %and to x86_fp80 ; <x86_fp80> [#uses=1]
- store x86_fp80 %conv9, x86_fp80* null
- store x86_fp80 %b, x86_fp80* null
- ret void
-}
-
-declare x86_fp80 @y(x86_fp80*, x86_fp80*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-10-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-10-CoalescerBug.ll
deleted file mode 100644
index 90dff88..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-10-CoalescerBug.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin
-; rdar://r6661945
-
- %struct.WINDOW = type { i16, i16, i16, i16, i16, i16, i16, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, %struct.ldat*, i16, i16, i32, i32, %struct.WINDOW*, %struct.pdat, i16, %struct.cchar_t }
- %struct.cchar_t = type { i32, [5 x i32] }
- %struct.ldat = type { %struct.cchar_t*, i16, i16, i16 }
- %struct.pdat = type { i16, i16, i16, i16, i16, i16 }
-
-define i32 @pnoutrefresh(%struct.WINDOW* %win, i32 %pminrow, i32 %pmincol, i32 %sminrow, i32 %smincol, i32 %smaxrow, i32 %smaxcol) nounwind optsize ssp {
-entry:
- %0 = load i16* null, align 4 ; <i16> [#uses=2]
- %1 = icmp sgt i16 0, %0 ; <i1> [#uses=1]
- br i1 %1, label %bb12, label %bb13
-
-bb12: ; preds = %entry
- %2 = sext i16 %0 to i32 ; <i32> [#uses=1]
- %3 = sub i32 %2, 0 ; <i32> [#uses=1]
- %4 = add i32 %3, %smaxrow ; <i32> [#uses=2]
- %5 = trunc i32 %4 to i16 ; <i16> [#uses=1]
- %6 = add i16 0, %5 ; <i16> [#uses=1]
- br label %bb13
-
-bb13: ; preds = %bb12, %entry
- %pmaxrow.0 = phi i16 [ %6, %bb12 ], [ 0, %entry ] ; <i16> [#uses=0]
- %smaxrow_addr.0 = phi i32 [ %4, %bb12 ], [ %smaxrow, %entry ] ; <i32> [#uses=1]
- %7 = trunc i32 %smaxrow_addr.0 to i16 ; <i16> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-11-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-11-CoalescerBug.ll
deleted file mode 100644
index d5ba93e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-11-CoalescerBug.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 -stats |& grep regcoalescing | grep commuting
-
- at lookupTable5B = external global [64 x i32], align 32 ; <[64 x i32]*> [#uses=1]
- at lookupTable3B = external global [16 x i32], align 32 ; <[16 x i32]*> [#uses=1]
- at disparity0 = external global i32 ; <i32*> [#uses=5]
- at disparity1 = external global i32 ; <i32*> [#uses=3]
-
-define i32 @calc(i32 %theWord, i32 %k) nounwind {
-entry:
- %0 = lshr i32 %theWord, 3 ; <i32> [#uses=1]
- %1 = and i32 %0, 31 ; <i32> [#uses=1]
- %2 = shl i32 %k, 5 ; <i32> [#uses=1]
- %3 = or i32 %1, %2 ; <i32> [#uses=1]
- %4 = and i32 %theWord, 7 ; <i32> [#uses=1]
- %5 = shl i32 %k, 3 ; <i32> [#uses=1]
- %6 = or i32 %5, %4 ; <i32> [#uses=1]
- %7 = getelementptr [64 x i32]* @lookupTable5B, i32 0, i32 %3 ; <i32*> [#uses=1]
- %8 = load i32* %7, align 4 ; <i32> [#uses=5]
- %9 = getelementptr [16 x i32]* @lookupTable3B, i32 0, i32 %6 ; <i32*> [#uses=1]
- %10 = load i32* %9, align 4 ; <i32> [#uses=5]
- %11 = and i32 %8, 65536 ; <i32> [#uses=1]
- %12 = icmp eq i32 %11, 0 ; <i1> [#uses=1]
- br i1 %12, label %bb1, label %bb
-
-bb: ; preds = %entry
- %13 = and i32 %8, 994 ; <i32> [#uses=1]
- %14 = load i32* @disparity0, align 4 ; <i32> [#uses=2]
- store i32 %14, i32* @disparity1, align 4
- br label %bb8
-
-bb1: ; preds = %entry
- %15 = lshr i32 %8, 18 ; <i32> [#uses=1]
- %16 = and i32 %15, 1 ; <i32> [#uses=1]
- %17 = load i32* @disparity0, align 4 ; <i32> [#uses=4]
- %18 = icmp eq i32 %16, %17 ; <i1> [#uses=1]
- %not = select i1 %18, i32 0, i32 994 ; <i32> [#uses=1]
- %.masked = and i32 %8, 994 ; <i32> [#uses=1]
- %result.1 = xor i32 %not, %.masked ; <i32> [#uses=2]
- %19 = and i32 %8, 524288 ; <i32> [#uses=1]
- %20 = icmp eq i32 %19, 0 ; <i1> [#uses=1]
- br i1 %20, label %bb7, label %bb6
-
-bb6: ; preds = %bb1
- %21 = xor i32 %17, 1 ; <i32> [#uses=2]
- store i32 %21, i32* @disparity1, align 4
- br label %bb8
-
-bb7: ; preds = %bb1
- store i32 %17, i32* @disparity1, align 4
- br label %bb8
-
-bb8: ; preds = %bb7, %bb6, %bb
- %22 = phi i32 [ %17, %bb7 ], [ %21, %bb6 ], [ %14, %bb ] ; <i32> [#uses=4]
- %result.0 = phi i32 [ %result.1, %bb7 ], [ %result.1, %bb6 ], [ %13, %bb ] ; <i32> [#uses=2]
- %23 = and i32 %10, 65536 ; <i32> [#uses=1]
- %24 = icmp eq i32 %23, 0 ; <i1> [#uses=1]
- br i1 %24, label %bb10, label %bb9
-
-bb9: ; preds = %bb8
- %25 = and i32 %10, 29 ; <i32> [#uses=1]
- %26 = or i32 %result.0, %25 ; <i32> [#uses=1]
- store i32 %22, i32* @disparity0, align 4
- ret i32 %26
-
-bb10: ; preds = %bb8
- %27 = lshr i32 %10, 18 ; <i32> [#uses=1]
- %28 = and i32 %27, 1 ; <i32> [#uses=1]
- %29 = icmp eq i32 %28, %22 ; <i1> [#uses=1]
- %not13 = select i1 %29, i32 0, i32 29 ; <i32> [#uses=1]
- %.masked20 = and i32 %10, 29 ; <i32> [#uses=1]
- %.pn = xor i32 %not13, %.masked20 ; <i32> [#uses=1]
- %result.3 = or i32 %.pn, %result.0 ; <i32> [#uses=2]
- %30 = and i32 %10, 524288 ; <i32> [#uses=1]
- %31 = icmp eq i32 %30, 0 ; <i1> [#uses=1]
- br i1 %31, label %bb17, label %bb16
-
-bb16: ; preds = %bb10
- %32 = xor i32 %22, 1 ; <i32> [#uses=1]
- store i32 %32, i32* @disparity0, align 4
- ret i32 %result.3
-
-bb17: ; preds = %bb10
- store i32 %22, i32* @disparity0, align 4
- ret i32 %result.3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-12-CPAlignBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-12-CPAlignBug.ll
deleted file mode 100644
index 3564f01..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-12-CPAlignBug.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 | not grep {.space}
-; rdar://6668548
-
-declare double @llvm.sqrt.f64(double) nounwind readonly
-
-declare double @fabs(double)
-
-declare double @llvm.pow.f64(double, double) nounwind readonly
-
-define void @SolveCubic_bb1(i32* %solutions, double* %x, x86_fp80 %.reload, x86_fp80 %.reload5, x86_fp80 %.reload6, double %.reload8) nounwind {
-newFuncRoot:
- br label %bb1
-
-bb1.ret.exitStub: ; preds = %bb1
- ret void
-
-bb1: ; preds = %newFuncRoot
- store i32 1, i32* %solutions, align 4
- %0 = tail call double @llvm.sqrt.f64(double %.reload8) ; <double> [#uses=1]
- %1 = fptrunc x86_fp80 %.reload6 to double ; <double> [#uses=1]
- %2 = tail call double @fabs(double %1) nounwind readnone ; <double> [#uses=1]
- %3 = fadd double %0, %2 ; <double> [#uses=1]
- %4 = tail call double @llvm.pow.f64(double %3, double 0x3FD5555555555555) ; <double> [#uses=1]
- %5 = fpext double %4 to x86_fp80 ; <x86_fp80> [#uses=2]
- %6 = fdiv x86_fp80 %.reload5, %5 ; <x86_fp80> [#uses=1]
- %7 = fadd x86_fp80 %5, %6 ; <x86_fp80> [#uses=1]
- %8 = fptrunc x86_fp80 %7 to double ; <double> [#uses=1]
- %9 = fcmp olt x86_fp80 %.reload6, 0xK00000000000000000000 ; <i1> [#uses=1]
- %iftmp.6.0 = select i1 %9, double 1.000000e+00, double -1.000000e+00 ; <double> [#uses=1]
- %10 = fmul double %8, %iftmp.6.0 ; <double> [#uses=1]
- %11 = fpext double %10 to x86_fp80 ; <x86_fp80> [#uses=1]
- %12 = fdiv x86_fp80 %.reload, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1]
- %13 = fadd x86_fp80 %11, %12 ; <x86_fp80> [#uses=1]
- %14 = fptrunc x86_fp80 %13 to double ; <double> [#uses=1]
- store double %14, double* %x, align 1
- br label %bb1.ret.exitStub
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-13-PHIElimBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-13-PHIElimBug.ll
deleted file mode 100644
index ad7f9f7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-13-PHIElimBug.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
-; Check the register copy comes after the call to f and before the call to g
-; PR3784
-
-declare i32 @f()
-
-declare i32 @g()
-
-define i32 @phi() {
-entry:
- %a = call i32 @f() ; <i32> [#uses=1]
- %b = invoke i32 @g()
- to label %cont unwind label %lpad ; <i32> [#uses=1]
-
-cont: ; preds = %entry
- %x = phi i32 [ %b, %entry ] ; <i32> [#uses=0]
- %aa = call i32 @g() ; <i32> [#uses=1]
- %bb = invoke i32 @g()
- to label %cont2 unwind label %lpad ; <i32> [#uses=1]
-
-cont2: ; preds = %cont
- %xx = phi i32 [ %bb, %cont ] ; <i32> [#uses=1]
- ret i32 %xx
-
-lpad: ; preds = %cont, %entry
- %y = phi i32 [ %a, %entry ], [ %aa, %cont ] ; <i32> [#uses=1]
- ret i32 %y
-}
-
-; CHECK: call{{.*}}f
-; CHECK-NEXT: Llabel1:
-; CHECK-NEXT: movl %eax, %esi
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll
deleted file mode 100644
index 11c4101..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -asm-verbose | FileCheck %s
-; Check that register copies in the landing pad come after the EH_LABEL
-
-declare i32 @f()
-
-define i32 @phi(i32 %x) {
-entry:
- %a = invoke i32 @f()
- to label %cont unwind label %lpad ; <i32> [#uses=1]
-
-cont: ; preds = %entry
- %b = invoke i32 @f()
- to label %cont2 unwind label %lpad ; <i32> [#uses=1]
-
-cont2: ; preds = %cont
- ret i32 %b
-
-lpad: ; preds = %cont, %entry
- %v = phi i32 [ %x, %entry ], [ %a, %cont ] ; <i32> [#uses=1]
- ret i32 %v
-}
-
-; CHECK: lpad
-; CHECK-NEXT: Llabel
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-16-SpillerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-16-SpillerBug.ll
deleted file mode 100644
index 80e7639..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-16-SpillerBug.ll
+++ /dev/null
@@ -1,167 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -stats |& grep virtregrewriter | not grep {stores unfolded}
-; rdar://6682365
-
-; Do not clobber a register if another spill slot is available in it and it's marked "do not clobber".
-
- %struct.CAST_KEY = type { [32 x i32], i32 }
- at CAST_S_table0 = constant [2 x i32] [i32 821772500, i32 -1616838901], align 32 ; <[2 x i32]*> [#uses=0]
- at CAST_S_table4 = constant [2 x i32] [i32 2127105028, i32 745436345], align 32 ; <[2 x i32]*> [#uses=6]
- at CAST_S_table5 = constant [2 x i32] [i32 -151351395, i32 749497569], align 32 ; <[2 x i32]*> [#uses=5]
- at CAST_S_table6 = constant [2 x i32] [i32 -2048901095, i32 858518887], align 32 ; <[2 x i32]*> [#uses=4]
- at CAST_S_table7 = constant [2 x i32] [i32 -501862387, i32 -1143078916], align 32 ; <[2 x i32]*> [#uses=5]
- at CAST_S_table1 = constant [2 x i32] [i32 522195092, i32 -284448933], align 32 ; <[2 x i32]*> [#uses=0]
- at CAST_S_table2 = constant [2 x i32] [i32 -1913667008, i32 637164959], align 32 ; <[2 x i32]*> [#uses=0]
- at CAST_S_table3 = constant [2 x i32] [i32 -1649212384, i32 532081118], align 32 ; <[2 x i32]*> [#uses=0]
-
-define void @CAST_set_key(%struct.CAST_KEY* nocapture %key, i32 %len, i8* nocapture %data) nounwind ssp {
-bb1.thread:
- %0 = getelementptr [16 x i32]* null, i32 0, i32 5 ; <i32*> [#uses=1]
- %1 = getelementptr [16 x i32]* null, i32 0, i32 8 ; <i32*> [#uses=1]
- %2 = load i32* null, align 4 ; <i32> [#uses=1]
- %3 = shl i32 %2, 24 ; <i32> [#uses=1]
- %4 = load i32* null, align 4 ; <i32> [#uses=1]
- %5 = shl i32 %4, 16 ; <i32> [#uses=1]
- %6 = load i32* null, align 4 ; <i32> [#uses=1]
- %7 = or i32 %5, %3 ; <i32> [#uses=1]
- %8 = or i32 %7, %6 ; <i32> [#uses=1]
- %9 = or i32 %8, 0 ; <i32> [#uses=1]
- %10 = load i32* null, align 4 ; <i32> [#uses=1]
- %11 = shl i32 %10, 24 ; <i32> [#uses=1]
- %12 = load i32* %0, align 4 ; <i32> [#uses=1]
- %13 = shl i32 %12, 16 ; <i32> [#uses=1]
- %14 = load i32* null, align 4 ; <i32> [#uses=1]
- %15 = or i32 %13, %11 ; <i32> [#uses=1]
- %16 = or i32 %15, %14 ; <i32> [#uses=1]
- %17 = or i32 %16, 0 ; <i32> [#uses=1]
- br label %bb11
-
-bb11: ; preds = %bb11, %bb1.thread
- %18 = phi i32 [ %110, %bb11 ], [ 0, %bb1.thread ] ; <i32> [#uses=1]
- %19 = phi i32 [ %112, %bb11 ], [ 0, %bb1.thread ] ; <i32> [#uses=0]
- %20 = phi i32 [ 0, %bb11 ], [ 0, %bb1.thread ] ; <i32> [#uses=0]
- %21 = phi i32 [ %113, %bb11 ], [ 0, %bb1.thread ] ; <i32> [#uses=1]
- %X.0.0 = phi i32 [ %9, %bb1.thread ], [ %92, %bb11 ] ; <i32> [#uses=0]
- %X.1.0 = phi i32 [ %17, %bb1.thread ], [ 0, %bb11 ] ; <i32> [#uses=0]
- %22 = getelementptr [2 x i32]* @CAST_S_table6, i32 0, i32 %21 ; <i32*> [#uses=0]
- %23 = getelementptr [2 x i32]* @CAST_S_table5, i32 0, i32 %18 ; <i32*> [#uses=0]
- %24 = load i32* null, align 4 ; <i32> [#uses=1]
- %25 = xor i32 0, %24 ; <i32> [#uses=1]
- %26 = xor i32 %25, 0 ; <i32> [#uses=1]
- %27 = xor i32 %26, 0 ; <i32> [#uses=4]
- %28 = and i32 %27, 255 ; <i32> [#uses=2]
- %29 = lshr i32 %27, 8 ; <i32> [#uses=1]
- %30 = and i32 %29, 255 ; <i32> [#uses=2]
- %31 = lshr i32 %27, 16 ; <i32> [#uses=1]
- %32 = and i32 %31, 255 ; <i32> [#uses=1]
- %33 = getelementptr [2 x i32]* @CAST_S_table4, i32 0, i32 %28 ; <i32*> [#uses=1]
- %34 = load i32* %33, align 4 ; <i32> [#uses=2]
- %35 = getelementptr [2 x i32]* @CAST_S_table5, i32 0, i32 %30 ; <i32*> [#uses=1]
- %36 = load i32* %35, align 4 ; <i32> [#uses=2]
- %37 = xor i32 %34, 0 ; <i32> [#uses=1]
- %38 = xor i32 %37, %36 ; <i32> [#uses=1]
- %39 = xor i32 %38, 0 ; <i32> [#uses=1]
- %40 = xor i32 %39, 0 ; <i32> [#uses=1]
- %41 = xor i32 %40, 0 ; <i32> [#uses=3]
- %42 = lshr i32 %41, 8 ; <i32> [#uses=1]
- %43 = and i32 %42, 255 ; <i32> [#uses=2]
- %44 = lshr i32 %41, 16 ; <i32> [#uses=1]
- %45 = and i32 %44, 255 ; <i32> [#uses=1]
- %46 = getelementptr [2 x i32]* @CAST_S_table4, i32 0, i32 %43 ; <i32*> [#uses=1]
- %47 = load i32* %46, align 4 ; <i32> [#uses=1]
- %48 = load i32* null, align 4 ; <i32> [#uses=1]
- %49 = xor i32 %47, 0 ; <i32> [#uses=1]
- %50 = xor i32 %49, %48 ; <i32> [#uses=1]
- %51 = xor i32 %50, 0 ; <i32> [#uses=1]
- %52 = xor i32 %51, 0 ; <i32> [#uses=1]
- %53 = xor i32 %52, 0 ; <i32> [#uses=2]
- %54 = and i32 %53, 255 ; <i32> [#uses=1]
- %55 = lshr i32 %53, 24 ; <i32> [#uses=1]
- %56 = getelementptr [2 x i32]* @CAST_S_table6, i32 0, i32 %55 ; <i32*> [#uses=1]
- %57 = load i32* %56, align 4 ; <i32> [#uses=1]
- %58 = xor i32 0, %57 ; <i32> [#uses=1]
- %59 = xor i32 %58, 0 ; <i32> [#uses=1]
- %60 = xor i32 %59, 0 ; <i32> [#uses=1]
- store i32 %60, i32* null, align 4
- %61 = getelementptr [2 x i32]* @CAST_S_table4, i32 0, i32 0 ; <i32*> [#uses=1]
- %62 = load i32* %61, align 4 ; <i32> [#uses=1]
- %63 = getelementptr [2 x i32]* @CAST_S_table7, i32 0, i32 %54 ; <i32*> [#uses=1]
- %64 = load i32* %63, align 4 ; <i32> [#uses=1]
- %65 = xor i32 0, %64 ; <i32> [#uses=1]
- %66 = xor i32 %65, 0 ; <i32> [#uses=1]
- store i32 %66, i32* null, align 4
- %67 = getelementptr [2 x i32]* @CAST_S_table7, i32 0, i32 %45 ; <i32*> [#uses=1]
- %68 = load i32* %67, align 4 ; <i32> [#uses=1]
- %69 = xor i32 %36, %34 ; <i32> [#uses=1]
- %70 = xor i32 %69, 0 ; <i32> [#uses=1]
- %71 = xor i32 %70, %68 ; <i32> [#uses=1]
- %72 = xor i32 %71, 0 ; <i32> [#uses=1]
- store i32 %72, i32* null, align 4
- %73 = getelementptr [2 x i32]* @CAST_S_table4, i32 0, i32 %32 ; <i32*> [#uses=1]
- %74 = load i32* %73, align 4 ; <i32> [#uses=2]
- %75 = load i32* null, align 4 ; <i32> [#uses=1]
- %76 = getelementptr [2 x i32]* @CAST_S_table6, i32 0, i32 %43 ; <i32*> [#uses=1]
- %77 = load i32* %76, align 4 ; <i32> [#uses=1]
- %78 = getelementptr [2 x i32]* @CAST_S_table7, i32 0, i32 0 ; <i32*> [#uses=1]
- %79 = load i32* %78, align 4 ; <i32> [#uses=1]
- %80 = getelementptr [2 x i32]* @CAST_S_table7, i32 0, i32 %30 ; <i32*> [#uses=1]
- %81 = load i32* %80, align 4 ; <i32> [#uses=2]
- %82 = xor i32 %75, %74 ; <i32> [#uses=1]
- %83 = xor i32 %82, %77 ; <i32> [#uses=1]
- %84 = xor i32 %83, %79 ; <i32> [#uses=1]
- %85 = xor i32 %84, %81 ; <i32> [#uses=1]
- store i32 %85, i32* null, align 4
- %86 = getelementptr [2 x i32]* @CAST_S_table5, i32 0, i32 %28 ; <i32*> [#uses=1]
- %87 = load i32* %86, align 4 ; <i32> [#uses=1]
- %88 = xor i32 %74, %41 ; <i32> [#uses=1]
- %89 = xor i32 %88, %87 ; <i32> [#uses=1]
- %90 = xor i32 %89, 0 ; <i32> [#uses=1]
- %91 = xor i32 %90, %81 ; <i32> [#uses=1]
- %92 = xor i32 %91, 0 ; <i32> [#uses=3]
- %93 = lshr i32 %92, 16 ; <i32> [#uses=1]
- %94 = and i32 %93, 255 ; <i32> [#uses=1]
- store i32 %94, i32* null, align 4
- %95 = lshr i32 %92, 24 ; <i32> [#uses=2]
- %96 = getelementptr [2 x i32]* @CAST_S_table4, i32 0, i32 %95 ; <i32*> [#uses=1]
- %97 = load i32* %96, align 4 ; <i32> [#uses=1]
- %98 = getelementptr [2 x i32]* @CAST_S_table5, i32 0, i32 0 ; <i32*> [#uses=1]
- %99 = load i32* %98, align 4 ; <i32> [#uses=1]
- %100 = load i32* null, align 4 ; <i32> [#uses=0]
- %101 = xor i32 %97, 0 ; <i32> [#uses=1]
- %102 = xor i32 %101, %99 ; <i32> [#uses=1]
- %103 = xor i32 %102, 0 ; <i32> [#uses=1]
- %104 = xor i32 %103, 0 ; <i32> [#uses=0]
- store i32 0, i32* null, align 4
- %105 = xor i32 0, %27 ; <i32> [#uses=1]
- %106 = xor i32 %105, 0 ; <i32> [#uses=1]
- %107 = xor i32 %106, 0 ; <i32> [#uses=1]
- %108 = xor i32 %107, 0 ; <i32> [#uses=1]
- %109 = xor i32 %108, %62 ; <i32> [#uses=3]
- %110 = and i32 %109, 255 ; <i32> [#uses=1]
- %111 = lshr i32 %109, 16 ; <i32> [#uses=1]
- %112 = and i32 %111, 255 ; <i32> [#uses=1]
- %113 = lshr i32 %109, 24 ; <i32> [#uses=3]
- store i32 %113, i32* %1, align 4
- %114 = load i32* null, align 4 ; <i32> [#uses=1]
- %115 = xor i32 0, %114 ; <i32> [#uses=1]
- %116 = xor i32 %115, 0 ; <i32> [#uses=1]
- %117 = xor i32 %116, 0 ; <i32> [#uses=1]
- %K.0.sum42 = or i32 0, 12 ; <i32> [#uses=1]
- %118 = getelementptr [32 x i32]* null, i32 0, i32 %K.0.sum42 ; <i32*> [#uses=1]
- store i32 %117, i32* %118, align 4
- %119 = getelementptr [2 x i32]* @CAST_S_table5, i32 0, i32 0 ; <i32*> [#uses=0]
- store i32 0, i32* null, align 4
- %120 = getelementptr [2 x i32]* @CAST_S_table6, i32 0, i32 %113 ; <i32*> [#uses=1]
- %121 = load i32* %120, align 4 ; <i32> [#uses=1]
- %122 = xor i32 0, %121 ; <i32> [#uses=1]
- store i32 %122, i32* null, align 4
- %123 = getelementptr [2 x i32]* @CAST_S_table4, i32 0, i32 0 ; <i32*> [#uses=1]
- %124 = load i32* %123, align 4 ; <i32> [#uses=1]
- %125 = getelementptr [2 x i32]* @CAST_S_table7, i32 0, i32 %95 ; <i32*> [#uses=1]
- %126 = load i32* %125, align 4 ; <i32> [#uses=1]
- %127 = xor i32 0, %124 ; <i32> [#uses=1]
- %128 = xor i32 %127, 0 ; <i32> [#uses=1]
- %129 = xor i32 %128, %126 ; <i32> [#uses=1]
- %130 = xor i32 %129, 0 ; <i32> [#uses=1]
- store i32 %130, i32* null, align 4
- br label %bb11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-23-LinearScanBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-23-LinearScanBug.ll
deleted file mode 100644
index 06dfdc0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-23-LinearScanBug.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -O0
-
-define fastcc void @optimize_bit_field() nounwind {
-bb4:
- %a = load i32* null ; <i32> [#uses=1]
- %s = load i32* getelementptr (i32* null, i32 1) ; <i32> [#uses=1]
- %z = load i32* getelementptr (i32* null, i32 2) ; <i32> [#uses=1]
- %r = bitcast i32 0 to i32 ; <i32> [#uses=1]
- %q = trunc i32 %z to i8 ; <i8> [#uses=1]
- %b = icmp eq i8 0, %q ; <i1> [#uses=1]
- br i1 %b, label %bb73, label %bb72
-
-bb72: ; preds = %bb4
- %f = tail call fastcc i32 @gen_lowpart(i32 %r, i32 %a) nounwind ; <i32> [#uses=1]
- br label %bb73
-
-bb73: ; preds = %bb72, %bb4
- %y = phi i32 [ %f, %bb72 ], [ %s, %bb4 ] ; <i32> [#uses=1]
- store i32 %y, i32* getelementptr (i32* null, i32 3)
- unreachable
-}
-
-declare fastcc i32 @gen_lowpart(i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-23-MultiUseSched.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
deleted file mode 100644
index b5873ba..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
+++ /dev/null
@@ -1,242 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux -relocation-model=static -stats -info-output-file - > %t
-; RUN: not grep spill %t
-; RUN: not grep {%rsp} %t
-; RUN: not grep {%rbp} %t
-
-; The register-pressure scheduler should be able to schedule this in a
-; way that does not require spills.
-
- at X = external global i64 ; <i64*> [#uses=25]
-
-define fastcc i64 @foo() nounwind {
- %tmp = volatile load i64* @X ; <i64> [#uses=7]
- %tmp1 = volatile load i64* @X ; <i64> [#uses=5]
- %tmp2 = volatile load i64* @X ; <i64> [#uses=3]
- %tmp3 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp4 = volatile load i64* @X ; <i64> [#uses=5]
- %tmp5 = volatile load i64* @X ; <i64> [#uses=3]
- %tmp6 = volatile load i64* @X ; <i64> [#uses=2]
- %tmp7 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp8 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp9 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp10 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp11 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp12 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp13 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp14 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp15 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp16 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp17 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp18 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp19 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp20 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp21 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp22 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp23 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp24 = call i64 @llvm.bswap.i64(i64 %tmp8) ; <i64> [#uses=1]
- %tmp25 = add i64 %tmp6, %tmp5 ; <i64> [#uses=1]
- %tmp26 = add i64 %tmp25, %tmp4 ; <i64> [#uses=1]
- %tmp27 = add i64 %tmp7, %tmp4 ; <i64> [#uses=1]
- %tmp28 = add i64 %tmp27, %tmp26 ; <i64> [#uses=1]
- %tmp29 = add i64 %tmp28, %tmp24 ; <i64> [#uses=2]
- %tmp30 = add i64 %tmp2, %tmp1 ; <i64> [#uses=1]
- %tmp31 = add i64 %tmp30, %tmp ; <i64> [#uses=1]
- %tmp32 = add i64 %tmp2, %tmp1 ; <i64> [#uses=1]
- %tmp33 = add i64 %tmp31, %tmp32 ; <i64> [#uses=1]
- %tmp34 = add i64 %tmp29, %tmp3 ; <i64> [#uses=5]
- %tmp35 = add i64 %tmp33, %tmp ; <i64> [#uses=1]
- %tmp36 = add i64 %tmp35, %tmp29 ; <i64> [#uses=7]
- %tmp37 = call i64 @llvm.bswap.i64(i64 %tmp9) ; <i64> [#uses=1]
- %tmp38 = add i64 %tmp4, %tmp5 ; <i64> [#uses=1]
- %tmp39 = add i64 %tmp38, %tmp34 ; <i64> [#uses=1]
- %tmp40 = add i64 %tmp6, %tmp37 ; <i64> [#uses=1]
- %tmp41 = add i64 %tmp40, %tmp39 ; <i64> [#uses=1]
- %tmp42 = add i64 %tmp41, %tmp34 ; <i64> [#uses=2]
- %tmp43 = add i64 %tmp1, %tmp ; <i64> [#uses=1]
- %tmp44 = add i64 %tmp36, %tmp43 ; <i64> [#uses=1]
- %tmp45 = add i64 %tmp1, %tmp ; <i64> [#uses=1]
- %tmp46 = add i64 %tmp44, %tmp45 ; <i64> [#uses=1]
- %tmp47 = add i64 %tmp42, %tmp2 ; <i64> [#uses=5]
- %tmp48 = add i64 %tmp36, %tmp46 ; <i64> [#uses=1]
- %tmp49 = add i64 %tmp48, %tmp42 ; <i64> [#uses=7]
- %tmp50 = call i64 @llvm.bswap.i64(i64 %tmp10) ; <i64> [#uses=1]
- %tmp51 = add i64 %tmp34, %tmp4 ; <i64> [#uses=1]
- %tmp52 = add i64 %tmp51, %tmp47 ; <i64> [#uses=1]
- %tmp53 = add i64 %tmp5, %tmp50 ; <i64> [#uses=1]
- %tmp54 = add i64 %tmp53, %tmp52 ; <i64> [#uses=1]
- %tmp55 = add i64 %tmp54, %tmp47 ; <i64> [#uses=2]
- %tmp56 = add i64 %tmp36, %tmp ; <i64> [#uses=1]
- %tmp57 = add i64 %tmp49, %tmp56 ; <i64> [#uses=1]
- %tmp58 = add i64 %tmp36, %tmp ; <i64> [#uses=1]
- %tmp59 = add i64 %tmp57, %tmp58 ; <i64> [#uses=1]
- %tmp60 = add i64 %tmp55, %tmp1 ; <i64> [#uses=5]
- %tmp61 = add i64 %tmp49, %tmp59 ; <i64> [#uses=1]
- %tmp62 = add i64 %tmp61, %tmp55 ; <i64> [#uses=7]
- %tmp63 = call i64 @llvm.bswap.i64(i64 %tmp11) ; <i64> [#uses=1]
- %tmp64 = add i64 %tmp47, %tmp34 ; <i64> [#uses=1]
- %tmp65 = add i64 %tmp64, %tmp60 ; <i64> [#uses=1]
- %tmp66 = add i64 %tmp4, %tmp63 ; <i64> [#uses=1]
- %tmp67 = add i64 %tmp66, %tmp65 ; <i64> [#uses=1]
- %tmp68 = add i64 %tmp67, %tmp60 ; <i64> [#uses=2]
- %tmp69 = add i64 %tmp49, %tmp36 ; <i64> [#uses=1]
- %tmp70 = add i64 %tmp62, %tmp69 ; <i64> [#uses=1]
- %tmp71 = add i64 %tmp49, %tmp36 ; <i64> [#uses=1]
- %tmp72 = add i64 %tmp70, %tmp71 ; <i64> [#uses=1]
- %tmp73 = add i64 %tmp68, %tmp ; <i64> [#uses=5]
- %tmp74 = add i64 %tmp62, %tmp72 ; <i64> [#uses=1]
- %tmp75 = add i64 %tmp74, %tmp68 ; <i64> [#uses=7]
- %tmp76 = call i64 @llvm.bswap.i64(i64 %tmp12) ; <i64> [#uses=1]
- %tmp77 = add i64 %tmp60, %tmp47 ; <i64> [#uses=1]
- %tmp78 = add i64 %tmp77, %tmp73 ; <i64> [#uses=1]
- %tmp79 = add i64 %tmp34, %tmp76 ; <i64> [#uses=1]
- %tmp80 = add i64 %tmp79, %tmp78 ; <i64> [#uses=1]
- %tmp81 = add i64 %tmp80, %tmp73 ; <i64> [#uses=2]
- %tmp82 = add i64 %tmp62, %tmp49 ; <i64> [#uses=1]
- %tmp83 = add i64 %tmp75, %tmp82 ; <i64> [#uses=1]
- %tmp84 = add i64 %tmp62, %tmp49 ; <i64> [#uses=1]
- %tmp85 = add i64 %tmp83, %tmp84 ; <i64> [#uses=1]
- %tmp86 = add i64 %tmp81, %tmp36 ; <i64> [#uses=5]
- %tmp87 = add i64 %tmp75, %tmp85 ; <i64> [#uses=1]
- %tmp88 = add i64 %tmp87, %tmp81 ; <i64> [#uses=7]
- %tmp89 = call i64 @llvm.bswap.i64(i64 %tmp13) ; <i64> [#uses=1]
- %tmp90 = add i64 %tmp73, %tmp60 ; <i64> [#uses=1]
- %tmp91 = add i64 %tmp90, %tmp86 ; <i64> [#uses=1]
- %tmp92 = add i64 %tmp47, %tmp89 ; <i64> [#uses=1]
- %tmp93 = add i64 %tmp92, %tmp91 ; <i64> [#uses=1]
- %tmp94 = add i64 %tmp93, %tmp86 ; <i64> [#uses=2]
- %tmp95 = add i64 %tmp75, %tmp62 ; <i64> [#uses=1]
- %tmp96 = add i64 %tmp88, %tmp95 ; <i64> [#uses=1]
- %tmp97 = add i64 %tmp75, %tmp62 ; <i64> [#uses=1]
- %tmp98 = add i64 %tmp96, %tmp97 ; <i64> [#uses=1]
- %tmp99 = add i64 %tmp94, %tmp49 ; <i64> [#uses=5]
- %tmp100 = add i64 %tmp88, %tmp98 ; <i64> [#uses=1]
- %tmp101 = add i64 %tmp100, %tmp94 ; <i64> [#uses=7]
- %tmp102 = call i64 @llvm.bswap.i64(i64 %tmp14) ; <i64> [#uses=1]
- %tmp103 = add i64 %tmp86, %tmp73 ; <i64> [#uses=1]
- %tmp104 = add i64 %tmp103, %tmp99 ; <i64> [#uses=1]
- %tmp105 = add i64 %tmp102, %tmp60 ; <i64> [#uses=1]
- %tmp106 = add i64 %tmp105, %tmp104 ; <i64> [#uses=1]
- %tmp107 = add i64 %tmp106, %tmp99 ; <i64> [#uses=2]
- %tmp108 = add i64 %tmp88, %tmp75 ; <i64> [#uses=1]
- %tmp109 = add i64 %tmp101, %tmp108 ; <i64> [#uses=1]
- %tmp110 = add i64 %tmp88, %tmp75 ; <i64> [#uses=1]
- %tmp111 = add i64 %tmp109, %tmp110 ; <i64> [#uses=1]
- %tmp112 = add i64 %tmp107, %tmp62 ; <i64> [#uses=5]
- %tmp113 = add i64 %tmp101, %tmp111 ; <i64> [#uses=1]
- %tmp114 = add i64 %tmp113, %tmp107 ; <i64> [#uses=7]
- %tmp115 = call i64 @llvm.bswap.i64(i64 %tmp15) ; <i64> [#uses=1]
- %tmp116 = add i64 %tmp99, %tmp86 ; <i64> [#uses=1]
- %tmp117 = add i64 %tmp116, %tmp112 ; <i64> [#uses=1]
- %tmp118 = add i64 %tmp115, %tmp73 ; <i64> [#uses=1]
- %tmp119 = add i64 %tmp118, %tmp117 ; <i64> [#uses=1]
- %tmp120 = add i64 %tmp119, %tmp112 ; <i64> [#uses=2]
- %tmp121 = add i64 %tmp101, %tmp88 ; <i64> [#uses=1]
- %tmp122 = add i64 %tmp114, %tmp121 ; <i64> [#uses=1]
- %tmp123 = add i64 %tmp101, %tmp88 ; <i64> [#uses=1]
- %tmp124 = add i64 %tmp122, %tmp123 ; <i64> [#uses=1]
- %tmp125 = add i64 %tmp120, %tmp75 ; <i64> [#uses=5]
- %tmp126 = add i64 %tmp114, %tmp124 ; <i64> [#uses=1]
- %tmp127 = add i64 %tmp126, %tmp120 ; <i64> [#uses=7]
- %tmp128 = call i64 @llvm.bswap.i64(i64 %tmp16) ; <i64> [#uses=1]
- %tmp129 = add i64 %tmp112, %tmp99 ; <i64> [#uses=1]
- %tmp130 = add i64 %tmp129, %tmp125 ; <i64> [#uses=1]
- %tmp131 = add i64 %tmp128, %tmp86 ; <i64> [#uses=1]
- %tmp132 = add i64 %tmp131, %tmp130 ; <i64> [#uses=1]
- %tmp133 = add i64 %tmp132, %tmp125 ; <i64> [#uses=2]
- %tmp134 = add i64 %tmp114, %tmp101 ; <i64> [#uses=1]
- %tmp135 = add i64 %tmp127, %tmp134 ; <i64> [#uses=1]
- %tmp136 = add i64 %tmp114, %tmp101 ; <i64> [#uses=1]
- %tmp137 = add i64 %tmp135, %tmp136 ; <i64> [#uses=1]
- %tmp138 = add i64 %tmp133, %tmp88 ; <i64> [#uses=5]
- %tmp139 = add i64 %tmp127, %tmp137 ; <i64> [#uses=1]
- %tmp140 = add i64 %tmp139, %tmp133 ; <i64> [#uses=7]
- %tmp141 = call i64 @llvm.bswap.i64(i64 %tmp17) ; <i64> [#uses=1]
- %tmp142 = add i64 %tmp125, %tmp112 ; <i64> [#uses=1]
- %tmp143 = add i64 %tmp142, %tmp138 ; <i64> [#uses=1]
- %tmp144 = add i64 %tmp141, %tmp99 ; <i64> [#uses=1]
- %tmp145 = add i64 %tmp144, %tmp143 ; <i64> [#uses=1]
- %tmp146 = add i64 %tmp145, %tmp138 ; <i64> [#uses=2]
- %tmp147 = add i64 %tmp127, %tmp114 ; <i64> [#uses=1]
- %tmp148 = add i64 %tmp140, %tmp147 ; <i64> [#uses=1]
- %tmp149 = add i64 %tmp127, %tmp114 ; <i64> [#uses=1]
- %tmp150 = add i64 %tmp148, %tmp149 ; <i64> [#uses=1]
- %tmp151 = add i64 %tmp146, %tmp101 ; <i64> [#uses=5]
- %tmp152 = add i64 %tmp140, %tmp150 ; <i64> [#uses=1]
- %tmp153 = add i64 %tmp152, %tmp146 ; <i64> [#uses=7]
- %tmp154 = call i64 @llvm.bswap.i64(i64 %tmp18) ; <i64> [#uses=1]
- %tmp155 = add i64 %tmp138, %tmp125 ; <i64> [#uses=1]
- %tmp156 = add i64 %tmp155, %tmp151 ; <i64> [#uses=1]
- %tmp157 = add i64 %tmp154, %tmp112 ; <i64> [#uses=1]
- %tmp158 = add i64 %tmp157, %tmp156 ; <i64> [#uses=1]
- %tmp159 = add i64 %tmp158, %tmp151 ; <i64> [#uses=2]
- %tmp160 = add i64 %tmp140, %tmp127 ; <i64> [#uses=1]
- %tmp161 = add i64 %tmp153, %tmp160 ; <i64> [#uses=1]
- %tmp162 = add i64 %tmp140, %tmp127 ; <i64> [#uses=1]
- %tmp163 = add i64 %tmp161, %tmp162 ; <i64> [#uses=1]
- %tmp164 = add i64 %tmp159, %tmp114 ; <i64> [#uses=5]
- %tmp165 = add i64 %tmp153, %tmp163 ; <i64> [#uses=1]
- %tmp166 = add i64 %tmp165, %tmp159 ; <i64> [#uses=7]
- %tmp167 = call i64 @llvm.bswap.i64(i64 %tmp19) ; <i64> [#uses=1]
- %tmp168 = add i64 %tmp151, %tmp138 ; <i64> [#uses=1]
- %tmp169 = add i64 %tmp168, %tmp164 ; <i64> [#uses=1]
- %tmp170 = add i64 %tmp167, %tmp125 ; <i64> [#uses=1]
- %tmp171 = add i64 %tmp170, %tmp169 ; <i64> [#uses=1]
- %tmp172 = add i64 %tmp171, %tmp164 ; <i64> [#uses=2]
- %tmp173 = add i64 %tmp153, %tmp140 ; <i64> [#uses=1]
- %tmp174 = add i64 %tmp166, %tmp173 ; <i64> [#uses=1]
- %tmp175 = add i64 %tmp153, %tmp140 ; <i64> [#uses=1]
- %tmp176 = add i64 %tmp174, %tmp175 ; <i64> [#uses=1]
- %tmp177 = add i64 %tmp172, %tmp127 ; <i64> [#uses=5]
- %tmp178 = add i64 %tmp166, %tmp176 ; <i64> [#uses=1]
- %tmp179 = add i64 %tmp178, %tmp172 ; <i64> [#uses=6]
- %tmp180 = call i64 @llvm.bswap.i64(i64 %tmp20) ; <i64> [#uses=1]
- %tmp181 = add i64 %tmp164, %tmp151 ; <i64> [#uses=1]
- %tmp182 = add i64 %tmp181, %tmp177 ; <i64> [#uses=1]
- %tmp183 = add i64 %tmp180, %tmp138 ; <i64> [#uses=1]
- %tmp184 = add i64 %tmp183, %tmp182 ; <i64> [#uses=1]
- %tmp185 = add i64 %tmp184, %tmp177 ; <i64> [#uses=2]
- %tmp186 = add i64 %tmp166, %tmp153 ; <i64> [#uses=1]
- %tmp187 = add i64 %tmp179, %tmp186 ; <i64> [#uses=1]
- %tmp188 = add i64 %tmp166, %tmp153 ; <i64> [#uses=1]
- %tmp189 = add i64 %tmp187, %tmp188 ; <i64> [#uses=1]
- %tmp190 = add i64 %tmp185, %tmp140 ; <i64> [#uses=4]
- %tmp191 = add i64 %tmp179, %tmp189 ; <i64> [#uses=1]
- %tmp192 = add i64 %tmp191, %tmp185 ; <i64> [#uses=4]
- %tmp193 = call i64 @llvm.bswap.i64(i64 %tmp21) ; <i64> [#uses=1]
- %tmp194 = add i64 %tmp177, %tmp164 ; <i64> [#uses=1]
- %tmp195 = add i64 %tmp194, %tmp190 ; <i64> [#uses=1]
- %tmp196 = add i64 %tmp193, %tmp151 ; <i64> [#uses=1]
- %tmp197 = add i64 %tmp196, %tmp195 ; <i64> [#uses=1]
- %tmp198 = add i64 %tmp197, %tmp190 ; <i64> [#uses=2]
- %tmp199 = add i64 %tmp179, %tmp166 ; <i64> [#uses=1]
- %tmp200 = add i64 %tmp192, %tmp199 ; <i64> [#uses=1]
- %tmp201 = add i64 %tmp179, %tmp166 ; <i64> [#uses=1]
- %tmp202 = add i64 %tmp200, %tmp201 ; <i64> [#uses=1]
- %tmp203 = add i64 %tmp198, %tmp153 ; <i64> [#uses=3]
- %tmp204 = add i64 %tmp192, %tmp202 ; <i64> [#uses=1]
- %tmp205 = add i64 %tmp204, %tmp198 ; <i64> [#uses=2]
- %tmp206 = call i64 @llvm.bswap.i64(i64 %tmp22) ; <i64> [#uses=1]
- %tmp207 = add i64 %tmp190, %tmp177 ; <i64> [#uses=1]
- %tmp208 = add i64 %tmp207, %tmp203 ; <i64> [#uses=1]
- %tmp209 = add i64 %tmp206, %tmp164 ; <i64> [#uses=1]
- %tmp210 = add i64 %tmp209, %tmp208 ; <i64> [#uses=1]
- %tmp211 = add i64 %tmp210, %tmp203 ; <i64> [#uses=2]
- %tmp212 = add i64 %tmp192, %tmp179 ; <i64> [#uses=1]
- %tmp213 = add i64 %tmp205, %tmp212 ; <i64> [#uses=1]
- %tmp214 = add i64 %tmp192, %tmp179 ; <i64> [#uses=1]
- %tmp215 = add i64 %tmp213, %tmp214 ; <i64> [#uses=1]
- %tmp216 = add i64 %tmp211, %tmp166 ; <i64> [#uses=2]
- %tmp217 = add i64 %tmp205, %tmp215 ; <i64> [#uses=1]
- %tmp218 = add i64 %tmp217, %tmp211 ; <i64> [#uses=1]
- %tmp219 = call i64 @llvm.bswap.i64(i64 %tmp23) ; <i64> [#uses=2]
- volatile store i64 %tmp219, i64* @X, align 8
- %tmp220 = add i64 %tmp203, %tmp190 ; <i64> [#uses=1]
- %tmp221 = add i64 %tmp220, %tmp216 ; <i64> [#uses=1]
- %tmp222 = add i64 %tmp219, %tmp177 ; <i64> [#uses=1]
- %tmp223 = add i64 %tmp222, %tmp221 ; <i64> [#uses=1]
- %tmp224 = add i64 %tmp223, %tmp216 ; <i64> [#uses=1]
- %tmp225 = add i64 %tmp224, %tmp218 ; <i64> [#uses=1]
- ret i64 %tmp225
-}
-
-declare i64 @llvm.bswap.i64(i64) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-23-i80-fp80.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-23-i80-fp80.ll
deleted file mode 100644
index a938440..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-23-i80-fp80.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUNX: opt < %s -instcombine -S | grep 302245289961712575840256
-; RUNX: opt < %s -instcombine -S | grep K40018000000000000000
-; RUN: true
-; ClamAV local: no opt
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin9"
-
-define i80 @from() {
- %tmp = bitcast x86_fp80 0xK4000C000000000000000 to i80
- ret i80 %tmp
-}
-
-define x86_fp80 @to() {
- %tmp = bitcast i80 302259125019767858003968 to x86_fp80
- ret x86_fp80 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-25-TestBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-25-TestBug.ll
deleted file mode 100644
index f40fddc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-25-TestBug.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -o %t
-; RUN: not grep and %t
-; RUN: not grep shr %t
-; rdar://6661955
-
- at hello = internal constant [7 x i8] c"hello\0A\00"
- at world = internal constant [7 x i8] c"world\0A\00"
-
-define void @func(i32* %b) nounwind {
-bb1579.i.i: ; preds = %bb1514.i.i, %bb191.i.i
- %tmp176 = load i32* %b, align 4
- %tmp177 = and i32 %tmp176, 2
- %tmp178 = icmp eq i32 %tmp177, 0
- br i1 %tmp178, label %hello, label %world
-
-hello:
- %h = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([7 x i8]* @hello, i32 0, i32 0))
- ret void
-
-world:
- %w = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([7 x i8]* @world, i32 0, i32 0))
- ret void
-}
-
-declare i32 @printf(i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-26-NoImplicitFPBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-03-26-NoImplicitFPBug.ll
deleted file mode 100644
index f486479..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-03-26-NoImplicitFPBug.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define double @t(double %x) nounwind ssp noimplicitfloat {
-entry:
- br i1 false, label %return, label %bb3
-
-bb3: ; preds = %entry
- ret double 0.000000e+00
-
-return: ; preds = %entry
- ret double undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-09-InlineAsmCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-09-InlineAsmCrash.ll
deleted file mode 100644
index 97bbd93..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-09-InlineAsmCrash.ll
+++ /dev/null
@@ -1,165 +0,0 @@
-; RUN: llc < %s
-; rdar://6774324
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin10.0"
- type <{ i32, %1 }> ; type %0
- type <{ [216 x i8] }> ; type %1
- type <{ %3, %4*, %28*, i64, i32, %6, %6, i32, i32, i32, i32, void (i8*, i32)*, i8*, %29*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i8*], i32, %30, i32, %24, %4*, %4*, i64, i64, i32, i32, void (i32, %2*)*, i32, i32, i32, i32, i32, i32, i32, i32, %24, i64, i64, i64, i64, i64, %21, i32, i32, %21, i32, %31*, %3, %33, %34, %9*, i32, i32, %3, %3, %35, %41*, %42*, %11, i32, i32, i32, i8, i8, i8, i8, %69*, %69, %9*, %9*, [11 x %61], %3, i8*, i32, i64, i64, i32, i32, i32, i64 }> ; type %2
- type <{ %3*, %3* }> ; type %3
- type <{ %3, i32, %2*, %2*, %2*, %5*, i32, i32, %21, i64, i64, i64, i32, %22, %9*, %6, %4*, %23 }> ; type %4
- type <{ %3, %3, %4*, %4*, i32, %6, %9*, %9*, %5*, %20* }> ; type %5
- type <{ %7, i16, i8, i8, %8 }> ; type %6
- type <{ i32 }> ; type %7
- type <{ i8*, i8*, [2 x i32], i16, i8, i8, i8*, i8, i8, i8, i8, i8* }> ; type %8
- type <{ %10, %13, %15, i32, i32, i32, i32, %9*, %9*, %16*, i32, %17*, i64, i32 }> ; type %9
- type <{ i32, i32, %11 }> ; type %10
- type <{ %12 }> ; type %11
- type <{ [12 x i8] }> ; type %12
- type <{ %14 }> ; type %13
- type <{ [40 x i8] }> ; type %14
- type <{ [4 x i8] }> ; type %15
- type <{ %15, %15 }> ; type %16
- type <{ %17*, %17*, %9*, i32, %18*, %19* }> ; type %17
- type opaque ; type %18
- type <{ i32, i32, %9*, %9*, i32, i32 }> ; type %19
- type <{ %5*, %20*, %20*, %20* }> ; type %20
- type <{ %3, %3*, void (i8*, i8*)*, i8*, i8*, i64 }> ; type %21
- type <{ i32, [4 x i32], i32, i32, [128 x %3] }> ; type %22
- type <{ %24, %24, %24, %24*, %24*, %24*, %25, %26, %27, i32, i32, i8* }> ; type %23
- type <{ i64, i32, i32, i32 }> ; type %24
- type <{ i32, i32 }> ; type %25
- type <{ i32, i32, i32, i32, i64, i64, i64, i64, i64, i64, i64, i64, i64, i32, i32 }> ; type %26
- type <{ [16 x %17*], i32 }> ; type %27
- type <{ i8, i8, i8, i8, %7, %3 }> ; type %28
- type <{ i32, %11*, i8*, i8*, %11* }> ; type %29
- type <{ i32, i32, i32, i32, i64 }> ; type %30
- type <{ %32*, %3, %3, i32, i32, i32, %5* }> ; type %31
- type opaque ; type %32
- type <{ [44 x i8] }> ; type %33
- type <{ %17* }> ; type %34
- type <{ %36, %36*, i32, [4 x %40], i32, i32, i64, i32 }> ; type %35
- type <{ i8*, %0*, %37*, i64, %39, i32, %39, %6, i64, i64, i8*, i32 }> ; type %36
- type <{ i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, %38 }> ; type %37
- type <{ i16, i16, i8, i8, i16, i32, i16, i16, i32, i16, i16, i32, i32, [8 x [8 x i16]], [8 x [16 x i16]], [96 x i8] }> ; type %38
- type <{ i8, i8, i8, i8, i8, i8, i8, i8 }> ; type %39
- type <{ i64 }> ; type %40
- type <{ %11, i32, i32, i32, %42*, %3, i8*, %3, %5*, %32*, i32, i32, i32, i32, i32, i32, i32, %59, %60, i64, i64, i32, %11, %9*, %9*, %9*, [11 x %61], %9*, %9*, %9*, %9*, %9*, [3 x %9*], %62*, %3, %3, i32, i32, %9*, %9*, i32, %67*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, %68*, [2 x i32], i64, i64, i32 }> ; type %41
- type <{ %43, %44, %47*, i64, i64, i64, i32, %11, %54, %46*, %46*, i32, i32, i32, i32, i32, i32, i32 }> ; type %42
- type <{ i16, i8, i8, i32, i32 }> ; type %43
- type <{ %45, i32, i32 }> ; type %44
- type <{ %46*, %46*, i64, i64 }> ; type %45
- type <{ %45, %15, i64, i8, i8, i8, i8, i16, i16 }> ; type %46
- type <{ i64*, i64, %48*, i32, i32, i32, %6, %53, i32, i64, i64*, i64*, %48*, %48*, %48*, i32 }> ; type %47
- type <{ %3, %43, i64, %49*, i32, i32, i32, i32, %48*, %48*, i64, %50*, i64, %52*, i32, i16, i16, i8, i8, i8, i8, %3, %3, i64, i32, i32, i32, i8*, i32, i8, i8, i8, i8, %3 }> ; type %48
- type <{ %3, %3, %49*, %48*, i64, i8, i8, i8, i8, i32, i8, i8, i8, i8 }> ; type %49
- type <{ i32, %51* }> ; type %50
- type <{ void (%50*)*, void (%50*)*, i32 (%50*, %52*, i32)*, i32 (%50*)*, i32 (%50*, i64, i32, i32, i32*)*, i32 (%50*, i64, i32, i64*, i32*, i32, i32, i32)*, i32 (%50*, i64, i32)*, i32 (%50*, i64, i64, i32)*, i32 (%50*, i64, i64, i32)*, i32 (%50*, i32)*, i32 (%50*)*, i8* }> ; type %51
- type <{ i32, %48* }> ; type %52
- type <{ i32, i32, i32 }> ; type %53
- type <{ %11, %55*, i32, %53, i64 }> ; type %54
- type <{ %3, i32, i32, i32, i32, i32, [64 x i8], %56 }> ; type %55
- type <{ %57, %58, %58 }> ; type %56
- type <{ i64, i64, i64, i64, i64 }> ; type %57
- type <{ i64, i64, i64, i64, i64, i64, i64, i64 }> ; type %58
- type <{ [2 x i32] }> ; type %59
- type <{ [8 x i32] }> ; type %60
- type <{ %9*, i32, i32, i32 }> ; type %61
- type <{ %11, i32, %11, i32, i32, %63*, i32, %64*, %65, i32, i32, i32, i32, %41* }> ; type %62
- type <{ %10*, i32, %15, %15 }> ; type %63
- type opaque ; type %64
- type <{ i32, %66*, %66*, %66**, %66*, %66** }> ; type %65
- type <{ %63, i32, %62*, %66*, %66* }> ; type %66
- type <{ i32, i32, [0 x %39] }> ; type %67
- type opaque ; type %68
- type <{ %69*, void (%69*, %2*)* }> ; type %69
- type <{ %70*, %2*, i8*, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i32, i32, i32, i32, i32, i32, i32, %71, i32, i32, i64, i64, i64, %72, i8*, i8*, %73, %4*, %79*, %81*, %39*, %84, i32, i32, i32, i8*, i32, i32, i32, i32, i32, i32, i32, i64*, i32, i64*, i8*, i32, [256 x i32], i64, i64, %86, %77*, i64, i64, %88*, %2*, %2* }> ; type %70
- type <{ %3, i64, i32, i32 }> ; type %71
- type <{ i64, i64, i64 }> ; type %72
- type <{ %73*, %73*, %73*, %73*, %74*, %75*, %76*, %70*, i32, i32, i32, i32, i32, i32, i32, i32, i64, i64, [3 x %78*], i8*, i8* }> ; type %73
- type <{ %74*, %74*, %75*, %76*, %73*, i32, i32, i32, i32, i32, i8*, i8* }> ; type %74
- type <{ %75*, %73*, %74*, %76*, i32, i32, i32, i32, %78*, i8*, i8* }> ; type %75
- type <{ %76*, %73*, %74*, %75*, i32, i32, i32, i32, i8*, i8*, %77* }> ; type %76
- type opaque ; type %77
- type <{ %78*, %75*, i8, i8, i8, i8, i16, i16, i16, i8, i8, i32, [0 x %73*] }> ; type %78
- type <{ i32, i32, i32, [20 x %80] }> ; type %79
- type <{ i64*, i8* }> ; type %80
- type <{ [256 x %39], [19 x %39], i8, i8, i8, i8, i8, i8, i8, i8, %82, i8, i8, i8, i8, i8, i8, i8, i8, %82, %83 }> ; type %81
- type <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i16 }> ; type %82
- type <{ [16 x i64], i64 }> ; type %83
- type <{ %82*, %85, %85, %39*, i32 }> ; type %84
- type <{ i16, %39* }> ; type %85
- type <{ %87, i8* }> ; type %86
- type <{ i32, i32, i32, i8, i8, i16, i32, i32, i32, i32, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }> ; type %87
- type <{ i64, i64, i32, i32, i32, i32 }> ; type %88
- type <{ i32, i32, i32, i32, i32, i32, i32 }> ; type %89
- at kernel_stack_size = external global i32 ; <i32*> [#uses=1]
-
-define void @test(%0*) nounwind {
- %2 = tail call %2* asm sideeffect "mov %gs:${1:P},$0", "=r,i,~{dirflag},~{fpsr},~{flags}"(i32 ptrtoint (%2** getelementptr (%70* null, i32 0, i32 1) to i32)) nounwind ; <%2*> [#uses=1]
- %3 = getelementptr %2* %2, i32 0, i32 15 ; <i32*> [#uses=1]
- %4 = load i32* %3 ; <i32> [#uses=2]
- %5 = icmp eq i32 %4, 0 ; <i1> [#uses=1]
- br i1 %5, label %47, label %6
-
-; <label>:6 ; preds = %1
- %7 = load i32* @kernel_stack_size ; <i32> [#uses=1]
- %8 = add i32 %7, %4 ; <i32> [#uses=1]
- %9 = inttoptr i32 %8 to %89* ; <%89*> [#uses=12]
- %10 = tail call %2* asm sideeffect "mov %gs:${1:P},$0", "=r,i,~{dirflag},~{fpsr},~{flags}"(i32 ptrtoint (%2** getelementptr (%70* null, i32 0, i32 1) to i32)) nounwind ; <%2*> [#uses=1]
- %11 = getelementptr %2* %10, i32 0, i32 65, i32 1 ; <%36**> [#uses=1]
- %12 = load %36** %11 ; <%36*> [#uses=1]
- %13 = getelementptr %36* %12, i32 0, i32 1 ; <%0**> [#uses=1]
- %14 = load %0** %13 ; <%0*> [#uses=1]
- %15 = icmp eq %0* %14, %0 ; <i1> [#uses=1]
- br i1 %15, label %40, label %16
-
-; <label>:16 ; preds = %6
- %17 = getelementptr %0* %0, i32 0, i32 1 ; <%1*> [#uses=1]
- %18 = getelementptr %89* %9, i32 -1, i32 0 ; <i32*> [#uses=1]
- %19 = getelementptr %0* %0, i32 0, i32 1, i32 0, i32 32 ; <i8*> [#uses=1]
- %20 = bitcast i8* %19 to i32* ; <i32*> [#uses=1]
- %21 = load i32* %20 ; <i32> [#uses=1]
- store i32 %21, i32* %18
- %22 = getelementptr %89* %9, i32 -1, i32 1 ; <i32*> [#uses=1]
- %23 = ptrtoint %1* %17 to i32 ; <i32> [#uses=1]
- store i32 %23, i32* %22
- %24 = getelementptr %89* %9, i32 -1, i32 2 ; <i32*> [#uses=1]
- %25 = getelementptr %0* %0, i32 0, i32 1, i32 0, i32 24 ; <i8*> [#uses=1]
- %26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
- %27 = load i32* %26 ; <i32> [#uses=1]
- store i32 %27, i32* %24
- %28 = getelementptr %89* %9, i32 -1, i32 3 ; <i32*> [#uses=1]
- %29 = getelementptr %0* %0, i32 0, i32 1, i32 0, i32 16 ; <i8*> [#uses=1]
- %30 = bitcast i8* %29 to i32* ; <i32*> [#uses=1]
- %31 = load i32* %30 ; <i32> [#uses=1]
- store i32 %31, i32* %28
- %32 = getelementptr %89* %9, i32 -1, i32 4 ; <i32*> [#uses=1]
- %33 = getelementptr %0* %0, i32 0, i32 1, i32 0, i32 20 ; <i8*> [#uses=1]
- %34 = bitcast i8* %33 to i32* ; <i32*> [#uses=1]
- %35 = load i32* %34 ; <i32> [#uses=1]
- store i32 %35, i32* %32
- %36 = getelementptr %89* %9, i32 -1, i32 5 ; <i32*> [#uses=1]
- %37 = getelementptr %0* %0, i32 0, i32 1, i32 0, i32 56 ; <i8*> [#uses=1]
- %38 = bitcast i8* %37 to i32* ; <i32*> [#uses=1]
- %39 = load i32* %38 ; <i32> [#uses=1]
- store i32 %39, i32* %36
- ret void
-
-; <label>:40 ; preds = %6
- %41 = getelementptr %89* %9, i32 -1, i32 0 ; <i32*> [#uses=1]
- tail call void asm sideeffect "movl %ebx, $0", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %41) nounwind
- %42 = getelementptr %89* %9, i32 -1, i32 1 ; <i32*> [#uses=1]
- tail call void asm sideeffect "movl %esp, $0", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %42) nounwind
- %43 = getelementptr %89* %9, i32 -1, i32 2 ; <i32*> [#uses=1]
- tail call void asm sideeffect "movl %ebp, $0", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %43) nounwind
- %44 = getelementptr %89* %9, i32 -1, i32 3 ; <i32*> [#uses=1]
- tail call void asm sideeffect "movl %edi, $0", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %44) nounwind
- %45 = getelementptr %89* %9, i32 -1, i32 4 ; <i32*> [#uses=1]
- tail call void asm sideeffect "movl %esi, $0", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %45) nounwind
- %46 = getelementptr %89* %9, i32 -1, i32 5 ; <i32*> [#uses=1]
- tail call void asm sideeffect "movl $$1f, $0\0A1:", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %46) nounwind
- ret void
-
-; <label>:47 ; preds = %1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll
deleted file mode 100644
index 27f11cf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-12-FastIselOverflowCrash.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -fast-isel
-; radr://6772169
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin10"
- type { i32, i1 } ; type %0
-
-declare %0 @llvm.sadd.with.overflow.i32(i32, i32) nounwind
-
-define fastcc i32 @test() nounwind {
-entry:
- %tmp1 = call %0 @llvm.sadd.with.overflow.i32(i32 1, i32 0)
- %tmp2 = extractvalue %0 %tmp1, 1
- br i1 %tmp2, label %.backedge, label %BB3
-
-BB3:
- %tmp4 = extractvalue %0 %tmp1, 0
- br label %.backedge
-
-.backedge:
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-12-picrel.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-12-picrel.ll
deleted file mode 100644
index f194280..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-12-picrel.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -march=x86-64 -relocation-model=static -code-model=small > %t
-; RUN: grep leaq %t | count 1
-
- at dst = external global [131072 x i32]
- at ptr = external global i32*
-
-define void @off01(i64 %i) nounwind {
-entry:
- %.sum = add i64 %i, 16
- %0 = getelementptr [131072 x i32]* @dst, i64 0, i64 %.sum
- store i32* %0, i32** @ptr, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll
deleted file mode 100644
index ff8cf0a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert-2.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin
-; rdar://6781755
-; PR3934
-
- type { i32, i32 } ; type %0
-
-define void @bn_sqr_comba8(i32* nocapture %r, i32* %a) nounwind {
-entry:
- %asmtmp23 = tail call %0 asm "mulq $3", "={ax},={dx},{ax},*m,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 0, i32* %a) nounwind ; <%0> [#uses=1]
- %asmresult25 = extractvalue %0 %asmtmp23, 1 ; <i32> [#uses=1]
- %asmtmp26 = tail call %0 asm "addq $0,$0; adcq $2,$1", "={dx},=r,imr,0,1,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 0, i32 %asmresult25, i32 0) nounwind ; <%0> [#uses=1]
- %asmresult27 = extractvalue %0 %asmtmp26, 0 ; <i32> [#uses=1]
- %asmtmp29 = tail call %0 asm "addq $0,$0; adcq $2,$1", "={ax},={dx},imr,0,1,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 0, i32 0, i32 %asmresult27) nounwind ; <%0> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert.ll
deleted file mode 100644
index 4362ba4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-13-2AddrAssert.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s
-; rdar://6781755
-; PR3934
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "x86_64-undermydesk-freebsd8.0"
-
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
-entry:
- %call = tail call i32 (...)* @getpid() ; <i32> [#uses=1]
- %conv = trunc i32 %call to i16 ; <i16> [#uses=1]
- %0 = tail call i16 asm "xchgb ${0:h}, ${0:b}","=Q,0,~{dirflag},~{fpsr},~{flags}"(i16 %conv) nounwind ; <i16> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @getpid(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-14-IllegalRegs.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-14-IllegalRegs.ll
deleted file mode 100644
index bfa3eaa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-14-IllegalRegs.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -O0 -regalloc=local | not grep sil
-; rdar://6787136
-
- %struct.X = type { i8, [32 x i8] }
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 ()* @z to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define i32 @z() nounwind ssp {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=2]
- %xxx = alloca %struct.X ; <%struct.X*> [#uses=6]
- %0 = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %1 = getelementptr %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
- %2 = getelementptr [32 x i8]* %1, i32 0, i32 31 ; <i8*> [#uses=1]
- store i8 48, i8* %2, align 1
- %3 = getelementptr %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
- %4 = getelementptr [32 x i8]* %3, i32 0, i32 31 ; <i8*> [#uses=1]
- %5 = load i8* %4, align 1 ; <i8> [#uses=1]
- %6 = getelementptr %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
- %7 = getelementptr [32 x i8]* %6, i32 0, i32 0 ; <i8*> [#uses=1]
- store i8 %5, i8* %7, align 1
- %8 = getelementptr %struct.X* %xxx, i32 0, i32 0 ; <i8*> [#uses=1]
- store i8 15, i8* %8, align 1
- %9 = call i32 (...)* bitcast (i32 (%struct.X*, %struct.X*)* @f to i32 (...)*)(%struct.X* byval align 4 %xxx, %struct.X* byval align 4 %xxx) nounwind ; <i32> [#uses=1]
- store i32 %9, i32* %0, align 4
- %10 = load i32* %0, align 4 ; <i32> [#uses=1]
- store i32 %10, i32* %retval, align 4
- br label %return
-
-return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval1
-}
-
-declare i32 @f(%struct.X* byval align 4, %struct.X* byval align 4) nounwind ssp
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
deleted file mode 100644
index f46eed4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
+++ /dev/null
@@ -1,141 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -disable-fp-elim -stats |& grep {Number of modref unfolded}
-; XFAIL: *
-; 69408 removed the opportunity for this optimization to work
-
- %struct.SHA512_CTX = type { [8 x i64], i64, i64, %struct.anon, i32, i32 }
- %struct.anon = type { [16 x i64] }
- at K512 = external constant [80 x i64], align 32 ; <[80 x i64]*> [#uses=2]
-
-define fastcc void @sha512_block_data_order(%struct.SHA512_CTX* nocapture %ctx, i8* nocapture %in, i64 %num) nounwind ssp {
-entry:
- br label %bb349
-
-bb349: ; preds = %bb349, %entry
- %e.0489 = phi i64 [ 0, %entry ], [ %e.0, %bb349 ] ; <i64> [#uses=3]
- %b.0472 = phi i64 [ 0, %entry ], [ %87, %bb349 ] ; <i64> [#uses=2]
- %asmtmp356 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 41, i64 %e.0489) nounwind ; <i64> [#uses=1]
- %0 = xor i64 0, %asmtmp356 ; <i64> [#uses=1]
- %1 = add i64 0, %0 ; <i64> [#uses=1]
- %2 = add i64 %1, 0 ; <i64> [#uses=1]
- %3 = add i64 %2, 0 ; <i64> [#uses=1]
- %4 = add i64 %3, 0 ; <i64> [#uses=5]
- %asmtmp372 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 34, i64 %4) nounwind ; <i64> [#uses=1]
- %asmtmp373 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 39, i64 %4) nounwind ; <i64> [#uses=0]
- %5 = xor i64 %asmtmp372, 0 ; <i64> [#uses=0]
- %6 = xor i64 0, %b.0472 ; <i64> [#uses=1]
- %7 = and i64 %4, %6 ; <i64> [#uses=1]
- %8 = xor i64 %7, 0 ; <i64> [#uses=1]
- %9 = add i64 0, %8 ; <i64> [#uses=1]
- %10 = add i64 %9, 0 ; <i64> [#uses=2]
- %asmtmp377 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 61, i64 0) nounwind ; <i64> [#uses=1]
- %11 = xor i64 0, %asmtmp377 ; <i64> [#uses=1]
- %12 = add i64 0, %11 ; <i64> [#uses=1]
- %13 = add i64 %12, 0 ; <i64> [#uses=1]
- %not381 = xor i64 0, -1 ; <i64> [#uses=1]
- %14 = and i64 %e.0489, %not381 ; <i64> [#uses=1]
- %15 = xor i64 0, %14 ; <i64> [#uses=1]
- %16 = add i64 %15, 0 ; <i64> [#uses=1]
- %17 = add i64 %16, %13 ; <i64> [#uses=1]
- %18 = add i64 %17, 0 ; <i64> [#uses=1]
- %19 = add i64 %18, 0 ; <i64> [#uses=2]
- %20 = add i64 %19, %b.0472 ; <i64> [#uses=3]
- %21 = add i64 %19, 0 ; <i64> [#uses=1]
- %22 = add i64 %21, 0 ; <i64> [#uses=1]
- %23 = add i32 0, 12 ; <i32> [#uses=1]
- %24 = and i32 %23, 12 ; <i32> [#uses=1]
- %25 = zext i32 %24 to i64 ; <i64> [#uses=1]
- %26 = getelementptr [16 x i64]* null, i64 0, i64 %25 ; <i64*> [#uses=0]
- %27 = add i64 0, %e.0489 ; <i64> [#uses=1]
- %28 = add i64 %27, 0 ; <i64> [#uses=1]
- %29 = add i64 %28, 0 ; <i64> [#uses=1]
- %30 = add i64 %29, 0 ; <i64> [#uses=2]
- %31 = and i64 %10, %4 ; <i64> [#uses=1]
- %32 = xor i64 0, %31 ; <i64> [#uses=1]
- %33 = add i64 %30, 0 ; <i64> [#uses=3]
- %34 = add i64 %30, %32 ; <i64> [#uses=1]
- %35 = add i64 %34, 0 ; <i64> [#uses=1]
- %36 = and i64 %33, %20 ; <i64> [#uses=1]
- %37 = xor i64 %36, 0 ; <i64> [#uses=1]
- %38 = add i64 %37, 0 ; <i64> [#uses=1]
- %39 = add i64 %38, 0 ; <i64> [#uses=1]
- %40 = add i64 %39, 0 ; <i64> [#uses=1]
- %41 = add i64 %40, 0 ; <i64> [#uses=1]
- %42 = add i64 %41, %4 ; <i64> [#uses=3]
- %43 = or i32 0, 6 ; <i32> [#uses=1]
- %44 = and i32 %43, 14 ; <i32> [#uses=1]
- %45 = zext i32 %44 to i64 ; <i64> [#uses=1]
- %46 = getelementptr [16 x i64]* null, i64 0, i64 %45 ; <i64*> [#uses=1]
- %not417 = xor i64 %42, -1 ; <i64> [#uses=1]
- %47 = and i64 %20, %not417 ; <i64> [#uses=1]
- %48 = xor i64 0, %47 ; <i64> [#uses=1]
- %49 = getelementptr [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
- %50 = load i64* %49, align 8 ; <i64> [#uses=1]
- %51 = add i64 %48, 0 ; <i64> [#uses=1]
- %52 = add i64 %51, 0 ; <i64> [#uses=1]
- %53 = add i64 %52, 0 ; <i64> [#uses=1]
- %54 = add i64 %53, %50 ; <i64> [#uses=2]
- %asmtmp420 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 34, i64 0) nounwind ; <i64> [#uses=1]
- %asmtmp421 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 39, i64 0) nounwind ; <i64> [#uses=1]
- %55 = xor i64 %asmtmp420, 0 ; <i64> [#uses=1]
- %56 = xor i64 %55, %asmtmp421 ; <i64> [#uses=1]
- %57 = add i64 %54, %10 ; <i64> [#uses=5]
- %58 = add i64 %54, 0 ; <i64> [#uses=1]
- %59 = add i64 %58, %56 ; <i64> [#uses=2]
- %60 = or i32 0, 7 ; <i32> [#uses=1]
- %61 = and i32 %60, 15 ; <i32> [#uses=1]
- %62 = zext i32 %61 to i64 ; <i64> [#uses=1]
- %63 = getelementptr [16 x i64]* null, i64 0, i64 %62 ; <i64*> [#uses=2]
- %64 = load i64* null, align 8 ; <i64> [#uses=1]
- %65 = lshr i64 %64, 6 ; <i64> [#uses=1]
- %66 = xor i64 0, %65 ; <i64> [#uses=1]
- %67 = xor i64 %66, 0 ; <i64> [#uses=1]
- %68 = load i64* %46, align 8 ; <i64> [#uses=1]
- %69 = load i64* null, align 8 ; <i64> [#uses=1]
- %70 = add i64 %68, 0 ; <i64> [#uses=1]
- %71 = add i64 %70, %67 ; <i64> [#uses=1]
- %72 = add i64 %71, %69 ; <i64> [#uses=1]
- %asmtmp427 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 18, i64 %57) nounwind ; <i64> [#uses=1]
- %asmtmp428 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 41, i64 %57) nounwind ; <i64> [#uses=1]
- %73 = xor i64 %asmtmp427, 0 ; <i64> [#uses=1]
- %74 = xor i64 %73, %asmtmp428 ; <i64> [#uses=1]
- %75 = and i64 %57, %42 ; <i64> [#uses=1]
- %not429 = xor i64 %57, -1 ; <i64> [#uses=1]
- %76 = and i64 %33, %not429 ; <i64> [#uses=1]
- %77 = xor i64 %75, %76 ; <i64> [#uses=1]
- %78 = getelementptr [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
- %79 = load i64* %78, align 16 ; <i64> [#uses=1]
- %80 = add i64 %77, %20 ; <i64> [#uses=1]
- %81 = add i64 %80, %72 ; <i64> [#uses=1]
- %82 = add i64 %81, %74 ; <i64> [#uses=1]
- %83 = add i64 %82, %79 ; <i64> [#uses=1]
- %asmtmp432 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 34, i64 %59) nounwind ; <i64> [#uses=1]
- %asmtmp433 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 39, i64 %59) nounwind ; <i64> [#uses=1]
- %84 = xor i64 %asmtmp432, 0 ; <i64> [#uses=1]
- %85 = xor i64 %84, %asmtmp433 ; <i64> [#uses=1]
- %86 = add i64 %83, %22 ; <i64> [#uses=2]
- %87 = add i64 0, %85 ; <i64> [#uses=1]
- %asmtmp435 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 8, i64 0) nounwind ; <i64> [#uses=1]
- %88 = xor i64 0, %asmtmp435 ; <i64> [#uses=1]
- %89 = load i64* null, align 8 ; <i64> [#uses=3]
- %asmtmp436 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 19, i64 %89) nounwind ; <i64> [#uses=1]
- %asmtmp437 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 61, i64 %89) nounwind ; <i64> [#uses=1]
- %90 = lshr i64 %89, 6 ; <i64> [#uses=1]
- %91 = xor i64 %asmtmp436, %90 ; <i64> [#uses=1]
- %92 = xor i64 %91, %asmtmp437 ; <i64> [#uses=1]
- %93 = load i64* %63, align 8 ; <i64> [#uses=1]
- %94 = load i64* null, align 8 ; <i64> [#uses=1]
- %95 = add i64 %93, %88 ; <i64> [#uses=1]
- %96 = add i64 %95, %92 ; <i64> [#uses=1]
- %97 = add i64 %96, %94 ; <i64> [#uses=2]
- store i64 %97, i64* %63, align 8
- %98 = and i64 %86, %57 ; <i64> [#uses=1]
- %not441 = xor i64 %86, -1 ; <i64> [#uses=1]
- %99 = and i64 %42, %not441 ; <i64> [#uses=1]
- %100 = xor i64 %98, %99 ; <i64> [#uses=1]
- %101 = add i64 %100, %33 ; <i64> [#uses=1]
- %102 = add i64 %101, %97 ; <i64> [#uses=1]
- %103 = add i64 %102, 0 ; <i64> [#uses=1]
- %104 = add i64 %103, 0 ; <i64> [#uses=1]
- %e.0 = add i64 %104, %35 ; <i64> [#uses=1]
- br label %bb349
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-20-LinearScanOpt.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-20-LinearScanOpt.ll
deleted file mode 100644
index d7b9463..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-20-LinearScanOpt.ll
+++ /dev/null
@@ -1,121 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -disable-fp-elim -stats |& grep asm-printer | grep 83
-; rdar://6802189
-
-; Test if linearscan is unfavoring registers for allocation to allow more reuse
-; of reloads from stack slots.
-
- %struct.SHA_CTX = type { i32, i32, i32, i32, i32, i32, i32, [16 x i32], i32 }
-
-define fastcc void @sha1_block_data_order(%struct.SHA_CTX* nocapture %c, i8* %p, i64 %num) nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- %asmtmp511 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 0) nounwind ; <i32> [#uses=3]
- %asmtmp513 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 0) nounwind ; <i32> [#uses=2]
- %asmtmp516 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 0) nounwind ; <i32> [#uses=1]
- %asmtmp517 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 0) nounwind ; <i32> [#uses=2]
- %0 = xor i32 0, %asmtmp513 ; <i32> [#uses=0]
- %1 = add i32 0, %asmtmp517 ; <i32> [#uses=1]
- %2 = add i32 %1, 0 ; <i32> [#uses=1]
- %3 = add i32 %2, 0 ; <i32> [#uses=1]
- %asmtmp519 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 0) nounwind ; <i32> [#uses=1]
- %4 = xor i32 0, %asmtmp511 ; <i32> [#uses=1]
- %asmtmp520 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 %4) nounwind ; <i32> [#uses=2]
- %5 = xor i32 0, %asmtmp516 ; <i32> [#uses=1]
- %6 = xor i32 %5, %asmtmp519 ; <i32> [#uses=1]
- %7 = add i32 %asmtmp513, -899497514 ; <i32> [#uses=1]
- %8 = add i32 %7, %asmtmp520 ; <i32> [#uses=1]
- %9 = add i32 %8, %6 ; <i32> [#uses=1]
- %10 = add i32 %9, 0 ; <i32> [#uses=1]
- %asmtmp523 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 0) nounwind ; <i32> [#uses=1]
- %asmtmp525 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 %3) nounwind ; <i32> [#uses=2]
- %11 = xor i32 0, %asmtmp525 ; <i32> [#uses=1]
- %12 = add i32 0, %11 ; <i32> [#uses=1]
- %13 = add i32 %12, 0 ; <i32> [#uses=2]
- %asmtmp528 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 %10) nounwind ; <i32> [#uses=1]
- %14 = xor i32 0, %asmtmp520 ; <i32> [#uses=1]
- %asmtmp529 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 %14) nounwind ; <i32> [#uses=1]
- %asmtmp530 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 5, i32 %13) nounwind ; <i32> [#uses=1]
- %15 = add i32 0, %asmtmp530 ; <i32> [#uses=1]
- %16 = xor i32 0, %asmtmp523 ; <i32> [#uses=1]
- %asmtmp532 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 %16) nounwind ; <i32> [#uses=2]
- %asmtmp533 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 5, i32 %15) nounwind ; <i32> [#uses=1]
- %17 = xor i32 %13, %asmtmp528 ; <i32> [#uses=1]
- %18 = xor i32 %17, 0 ; <i32> [#uses=1]
- %19 = add i32 %asmtmp525, -899497514 ; <i32> [#uses=1]
- %20 = add i32 %19, %asmtmp532 ; <i32> [#uses=1]
- %21 = add i32 %20, %18 ; <i32> [#uses=1]
- %22 = add i32 %21, %asmtmp533 ; <i32> [#uses=1]
- %23 = xor i32 0, %asmtmp511 ; <i32> [#uses=1]
- %24 = xor i32 %23, 0 ; <i32> [#uses=1]
- %asmtmp535 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 %24) nounwind ; <i32> [#uses=3]
- %25 = add i32 0, %asmtmp535 ; <i32> [#uses=1]
- %26 = add i32 %25, 0 ; <i32> [#uses=1]
- %27 = add i32 %26, 0 ; <i32> [#uses=1]
- %28 = xor i32 0, %asmtmp529 ; <i32> [#uses=0]
- %29 = xor i32 %22, 0 ; <i32> [#uses=1]
- %30 = xor i32 %29, 0 ; <i32> [#uses=1]
- %31 = add i32 0, %30 ; <i32> [#uses=1]
- %32 = add i32 %31, 0 ; <i32> [#uses=3]
- %asmtmp541 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 0) nounwind ; <i32> [#uses=2]
- %asmtmp542 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 5, i32 %32) nounwind ; <i32> [#uses=1]
- %33 = add i32 0, %asmtmp541 ; <i32> [#uses=1]
- %34 = add i32 %33, 0 ; <i32> [#uses=1]
- %35 = add i32 %34, %asmtmp542 ; <i32> [#uses=1]
- %asmtmp543 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 %27) nounwind ; <i32> [#uses=2]
- %36 = xor i32 0, %asmtmp535 ; <i32> [#uses=0]
- %37 = xor i32 %32, 0 ; <i32> [#uses=1]
- %38 = xor i32 %37, %asmtmp543 ; <i32> [#uses=1]
- %39 = add i32 0, %38 ; <i32> [#uses=1]
- %40 = add i32 %39, 0 ; <i32> [#uses=2]
- %asmtmp546 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 %32) nounwind ; <i32> [#uses=1]
- %asmtmp547 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 0) nounwind ; <i32> [#uses=2]
- %41 = add i32 0, -899497514 ; <i32> [#uses=1]
- %42 = add i32 %41, %asmtmp547 ; <i32> [#uses=1]
- %43 = add i32 %42, 0 ; <i32> [#uses=1]
- %44 = add i32 %43, 0 ; <i32> [#uses=3]
- %asmtmp549 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 %35) nounwind ; <i32> [#uses=2]
- %45 = xor i32 0, %asmtmp541 ; <i32> [#uses=1]
- %asmtmp550 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 %45) nounwind ; <i32> [#uses=2]
- %asmtmp551 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 5, i32 %44) nounwind ; <i32> [#uses=1]
- %46 = xor i32 %40, %asmtmp546 ; <i32> [#uses=1]
- %47 = xor i32 %46, %asmtmp549 ; <i32> [#uses=1]
- %48 = add i32 %asmtmp543, -899497514 ; <i32> [#uses=1]
- %49 = add i32 %48, %asmtmp550 ; <i32> [#uses=1]
- %50 = add i32 %49, %47 ; <i32> [#uses=1]
- %51 = add i32 %50, %asmtmp551 ; <i32> [#uses=1]
- %asmtmp552 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 %40) nounwind ; <i32> [#uses=2]
- %52 = xor i32 %44, %asmtmp549 ; <i32> [#uses=1]
- %53 = xor i32 %52, %asmtmp552 ; <i32> [#uses=1]
- %54 = add i32 0, %53 ; <i32> [#uses=1]
- %55 = add i32 %54, 0 ; <i32> [#uses=2]
- %asmtmp555 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 %44) nounwind ; <i32> [#uses=2]
- %56 = xor i32 0, %asmtmp532 ; <i32> [#uses=1]
- %57 = xor i32 %56, %asmtmp547 ; <i32> [#uses=1]
- %asmtmp556 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 %57) nounwind ; <i32> [#uses=1]
- %58 = add i32 0, %asmtmp556 ; <i32> [#uses=1]
- %59 = add i32 %58, 0 ; <i32> [#uses=1]
- %60 = add i32 %59, 0 ; <i32> [#uses=1]
- %asmtmp558 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 %51) nounwind ; <i32> [#uses=1]
- %61 = xor i32 %asmtmp517, %asmtmp511 ; <i32> [#uses=1]
- %62 = xor i32 %61, %asmtmp535 ; <i32> [#uses=1]
- %63 = xor i32 %62, %asmtmp550 ; <i32> [#uses=1]
- %asmtmp559 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i32 %63) nounwind ; <i32> [#uses=1]
- %64 = xor i32 %55, %asmtmp555 ; <i32> [#uses=1]
- %65 = xor i32 %64, %asmtmp558 ; <i32> [#uses=1]
- %asmtmp561 = tail call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 30, i32 %55) nounwind ; <i32> [#uses=1]
- %66 = add i32 %asmtmp552, -899497514 ; <i32> [#uses=1]
- %67 = add i32 %66, %65 ; <i32> [#uses=1]
- %68 = add i32 %67, %asmtmp559 ; <i32> [#uses=1]
- %69 = add i32 %68, 0 ; <i32> [#uses=1]
- %70 = add i32 %69, 0 ; <i32> [#uses=1]
- store i32 %70, i32* null, align 4
- %71 = add i32 0, %60 ; <i32> [#uses=1]
- store i32 %71, i32* null, align 4
- %72 = add i32 0, %asmtmp561 ; <i32> [#uses=1]
- store i32 %72, i32* null, align 4
- %73 = add i32 0, %asmtmp555 ; <i32> [#uses=1]
- store i32 %73, i32* null, align 4
- br label %bb
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-21-NoReloadImpDef.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-21-NoReloadImpDef.ll
deleted file mode 100644
index abbe97a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-21-NoReloadImpDef.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc -mtriple=i386-apple-darwin10.0 -relocation-model=pic -asm-verbose=false \
-; RUN: -disable-fp-elim -mattr=-sse41,-sse3,+sse2 -post-RA-scheduler=false < %s | \
-; RUN: FileCheck %s
-; rdar://6808032
-
-; CHECK: pextrw $14
-; CHECK-NEXT: movzbl
-; CHECK-NEXT: (%ebp)
-; CHECK-NEXT: pinsrw
-
-define void @update(i8** %args_list) nounwind {
-entry:
- %cmp.i = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %cmp.i, label %if.then.i, label %test_cl.exit
-
-if.then.i: ; preds = %entry
- %val = load <16 x i8> addrspace(1)* null ; <<16 x i8>> [#uses=8]
- %tmp10.i = shufflevector <16 x i8> <i8 0, i8 0, i8 0, i8 undef, i8 0, i8 undef, i8 0, i8 undef, i8 undef, i8 undef, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef>, <16 x i8> %val, <16 x i32> <i32 0, i32 1, i32 2, i32 undef, i32 4, i32 undef, i32 6, i32 undef, i32 29, i32 undef, i32 10, i32 11, i32 12, i32 undef, i32 undef, i32 undef> ; <<16 x i8>> [#uses=1]
- %tmp17.i = shufflevector <16 x i8> %tmp10.i, <16 x i8> %val, <16 x i32> <i32 0, i32 1, i32 2, i32 18, i32 4, i32 undef, i32 6, i32 undef, i32 8, i32 undef, i32 10, i32 11, i32 12, i32 undef, i32 undef, i32 undef> ; <<16 x i8>> [#uses=1]
- %tmp24.i = shufflevector <16 x i8> %tmp17.i, <16 x i8> %val, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 24, i32 6, i32 undef, i32 8, i32 undef, i32 10, i32 11, i32 12, i32 undef, i32 undef, i32 undef> ; <<16 x i8>> [#uses=1]
- %tmp31.i = shufflevector <16 x i8> %tmp24.i, <16 x i8> %val, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 undef, i32 8, i32 undef, i32 10, i32 11, i32 12, i32 21, i32 undef, i32 undef> ; <<16 x i8>> [#uses=1]
- %tmp38.i = shufflevector <16 x i8> %tmp31.i, <16 x i8> %val, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 27, i32 8, i32 undef, i32 10, i32 11, i32 12, i32 13, i32 undef, i32 undef> ; <<16 x i8>> [#uses=1]
- %tmp45.i = shufflevector <16 x i8> %tmp38.i, <16 x i8> %val, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 undef, i32 10, i32 11, i32 12, i32 13, i32 29, i32 undef> ; <<16 x i8>> [#uses=1]
- %tmp52.i = shufflevector <16 x i8> %tmp45.i, <16 x i8> %val, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 21, i32 10, i32 11, i32 12, i32 13, i32 14, i32 undef> ; <<16 x i8>> [#uses=1]
- %tmp59.i = shufflevector <16 x i8> %tmp52.i, <16 x i8> %val, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 20> ; <<16 x i8>> [#uses=1]
- store <16 x i8> %tmp59.i, <16 x i8> addrspace(1)* null
- ret void
-
-test_cl.exit: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-24.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-24.ll
deleted file mode 100644
index c1ec45f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-24.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -regalloc=local -relocation-model=pic > %t
-; RUN: grep {leal.*TLSGD.*___tls_get_addr} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu -regalloc=local -relocation-model=pic > %t2
-; RUN: grep {leaq.*TLSGD.*__tls_get_addr} %t2
-; PR4004
-
- at i = thread_local global i32 15
-
-define i32 @f() {
-entry:
- %tmp1 = load i32* @i
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
deleted file mode 100644
index 94d3eb2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep mov | count 2
-; rdar://6806252
-
-define i64 @test(i32* %tmp13) nounwind {
-entry:
- br label %while.cond
-
-while.cond: ; preds = %while.cond, %entry
- %tmp15 = load i32* %tmp13 ; <i32> [#uses=2]
- %bf.lo = lshr i32 %tmp15, 1 ; <i32> [#uses=1]
- %bf.lo.cleared = and i32 %bf.lo, 2147483647 ; <i32> [#uses=1]
- %conv = zext i32 %bf.lo.cleared to i64 ; <i64> [#uses=1]
- %bf.lo.cleared25 = and i32 %tmp15, 1 ; <i32> [#uses=1]
- %tobool = icmp ne i32 %bf.lo.cleared25, 0 ; <i1> [#uses=1]
- br i1 %tobool, label %while.cond, label %while.end
-
-while.end: ; preds = %while.cond
- ret i64 %conv
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll
deleted file mode 100644
index 7981a52..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll
+++ /dev/null
@@ -1,1457 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu
-; PR4034
-
- %struct.BiContextType = type { i16, i8 }
- %struct.Bitstream = type { i32, i32, i32, i32, i8*, i32 }
- %struct.DataPartition = type { %struct.Bitstream*, %struct.DecodingEnvironment, i32 (%struct.SyntaxElement*, %struct.ImageParameters*, %struct.DataPartition*)* }
- %struct.DecRefPicMarking_t = type { i32, i32, i32, i32, i32, %struct.DecRefPicMarking_t* }
- %struct.DecodingEnvironment = type { i32, i32, i32, i32, i32, i8*, i32* }
- %struct.ImageParameters = type { i32, i32, i32, i32, i32*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [16 x [16 x i16]], [6 x [32 x i32]], [16 x [16 x i32]], [4 x [12 x [4 x [4 x i32]]]], [16 x i32], i8**, i32*, i32***, i32**, i32, i32, i32, i32, %struct.Slice*, %struct.Macroblock*, i32, i32, i32, i32, i32, i32, %struct.DecRefPicMarking_t*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32***, i32***, i32****, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [3 x [2 x i32]], [3 x [2 x i32]], i32, i32, i64, i64, %struct.timeb, %struct.timeb, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.Macroblock = type { i32, [2 x i32], i32, i32, %struct.Macroblock*, %struct.Macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], i32, i64, i64, i32, i32, [4 x i8], [4 x i8], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.MotionInfoContexts = type { [4 x [11 x %struct.BiContextType]], [2 x [9 x %struct.BiContextType]], [2 x [10 x %struct.BiContextType]], [2 x [6 x %struct.BiContextType]], [4 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x %struct.BiContextType] }
- %struct.PixelPos = type { i32, i32, i32, i32, i32, i32 }
- %struct.Slice = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.DataPartition*, %struct.MotionInfoContexts*, %struct.TextureInfoContexts*, i32, i32*, i32*, i32*, i32, i32*, i32*, i32*, i32 (%struct.ImageParameters*, %struct.inp_par*)*, i32, i32, i32, i32 }
- %struct.SyntaxElement = type { i32, i32, i32, i32, i32, i32, i32, i32, void (i32, i32, i32*, i32*)*, void (%struct.SyntaxElement*, %struct.ImageParameters*, %struct.DecodingEnvironment*)* }
- %struct.TextureInfoContexts = type { [2 x %struct.BiContextType], [4 x %struct.BiContextType], [3 x [4 x %struct.BiContextType]], [10 x [4 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [5 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]], [10 x [15 x %struct.BiContextType]] }
- %struct.inp_par = type { [1000 x i8], [1000 x i8], [1000 x i8], i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.timeb = type { i64, i16, i16, i16 }
- at get_mb_block_pos = external global void (i32, i32*, i32*)* ; <void (i32, i32*, i32*)**> [#uses=1]
- at img = external global %struct.ImageParameters* ; <%struct.ImageParameters**> [#uses=14]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (void (i32, i32, i32, i32, %struct.PixelPos*)* @getAffNeighbour to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define void @getAffNeighbour(i32 %curr_mb_nr, i32 %xN, i32 %yN, i32 %is_chroma, %struct.PixelPos* %pix) nounwind {
-entry:
- %Opq.sa.calc = add i32 0, 2 ; <i32> [#uses=2]
- %0 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=3]
- %1 = getelementptr %struct.ImageParameters* %0, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %2 = load %struct.Macroblock** %1, align 8 ; <%struct.Macroblock*> [#uses=24]
- %3 = zext i32 %curr_mb_nr to i64 ; <i64> [#uses=24]
- %4 = sext i32 %is_chroma to i64 ; <i64> [#uses=8]
- br label %meshBB392
-
-entry.fragment: ; preds = %meshBB392
- %Opq.sa.calc747 = add i32 %Opq.sa.calc921, 70 ; <i32> [#uses=0]
- %5 = getelementptr %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 0 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=2]
- %7 = getelementptr %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 1 ; <i32*> [#uses=1]
- %8 = load i32* %7, align 4 ; <i32> [#uses=5]
- br label %entry.fragment181
-
-entry.fragment181: ; preds = %entry.fragment
- %Opq.sa.calc863 = add i32 %Opq.sa.calc921, -50 ; <i32> [#uses=4]
- %9 = getelementptr %struct.PixelPos* %pix, i64 0, i32 0 ; <i32*> [#uses=4]
- store i32 0, i32* %9, align 4
- %10 = add i32 %8, -1 ; <i32> [#uses=6]
- %11 = icmp slt i32 %10, %yN ; <i1> [#uses=1]
- br i1 %11, label %meshBB448, label %bb
-
-bb: ; preds = %entry.fragment181
- %Opq.sa.calc460 = add i32 %Opq.sa.calc863, 50 ; <i32> [#uses=0]
- %12 = add i32 %6, -1 ; <i32> [#uses=5]
- %13 = icmp slt i32 %12, %xN ; <i1> [#uses=1]
- br label %bb.fragment
-
-bb.fragment: ; preds = %bb
- %Opq.sa.calc976 = add i32 %Opq.sa.calc863, 13 ; <i32> [#uses=3]
- %.not8 = icmp sgt i32 %yN, -1 ; <i1> [#uses=1]
- %14 = icmp sgt i32 %8, %yN ; <i1> [#uses=1]
- %or.cond.not = and i1 %14, %.not8 ; <i1> [#uses=3]
- %or.cond1 = and i1 %or.cond.not, %13 ; <i1> [#uses=1]
- br i1 %or.cond1, label %meshBB396, label %bb3
-
-bb3: ; preds = %bb.fragment
- %Opq.sa.calc462 = sub i32 %Opq.sa.calc976, -152 ; <i32> [#uses=5]
- %Opq.sa.calc461 = sub i32 %Opq.sa.calc462, 168 ; <i32> [#uses=2]
- %15 = icmp slt i32 %xN, 0 ; <i1> [#uses=1]
- br i1 %15, label %bb4, label %meshBB404
-
-bb4: ; preds = %bb3
- %Opq.sa.calc467 = xor i32 %Opq.sa.calc462, 171 ; <i32> [#uses=2]
- %Opq.sa.calc465 = sub i32 %Opq.sa.calc467, %Opq.sa.calc462 ; <i32> [#uses=1]
- %Opq.sa.calc466 = xor i32 %Opq.sa.calc465, -164 ; <i32> [#uses=1]
- %16 = icmp slt i32 %yN, 0 ; <i1> [#uses=1]
- br i1 %16, label %meshBB428, label %meshBB392
-
-bb5: ; preds = %meshBB428
- %Opq.sa.calc470 = sub i32 %Opq.sa.calc897, -49 ; <i32> [#uses=1]
- %17 = getelementptr %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %18 = load i32* %17, align 4 ; <i32> [#uses=1]
- br label %bb5.fragment
-
-bb5.fragment: ; preds = %bb5
- %Opq.sa.calc873 = sub i32 %Opq.sa.calc470, 169 ; <i32> [#uses=7]
- %19 = icmp eq i32 %18, 0 ; <i1> [#uses=1]
- %20 = and i32 %curr_mb_nr, 1 ; <i32> [#uses=1]
- %21 = icmp eq i32 %20, 0 ; <i1> [#uses=2]
- br i1 %19, label %bb6, label %bb13
-
-bb6: ; preds = %bb5.fragment
- %Opq.sa.calc473 = xor i32 %Opq.sa.calc873, 81 ; <i32> [#uses=1]
- br i1 %21, label %bb7, label %meshBB348
-
-bb7: ; preds = %bb6
- %Opq.sa.calc476 = add i32 %Opq.sa.calc873, -58 ; <i32> [#uses=1]
- %22 = getelementptr %struct.Macroblock* %2, i64 %3, i32 25 ; <i32*> [#uses=1]
- %23 = load i32* %22, align 8 ; <i32> [#uses=1]
- %24 = add i32 %23, 1 ; <i32> [#uses=1]
- %25 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- br label %meshBB388
-
-bb7.fragment: ; preds = %meshBB388
- %Opq.sa.calc709 = sub i32 %Opq.sa.calc886, 143 ; <i32> [#uses=1]
- %Opq.sa.calc707 = add i32 %Opq.sa.calc709, %Opq.sa.calc886 ; <i32> [#uses=1]
- %Opq.sa.calc708 = xor i32 %Opq.sa.calc707, 474 ; <i32> [#uses=0]
- store i32 %.SV194.phi, i32* %.SV196.phi, align 4
- %26 = getelementptr %struct.Macroblock* %.load17.SV.phi, i64 %.load36.SV.phi, i32 29 ; <i32*> [#uses=1]
- %27 = load i32* %26, align 8 ; <i32> [#uses=2]
- store i32 %27, i32* %.load67.SV.phi, align 4
- br label %bb96
-
-bb8: ; preds = %meshBB348
- %Opq.sa.calc479 = sub i32 %Opq.sa.calc805, 141 ; <i32> [#uses=1]
- %28 = getelementptr %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=2]
- %29 = load i32* %28, align 4 ; <i32> [#uses=2]
- %30 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
- br label %meshBB368
-
-bb8.fragment: ; preds = %meshBB368
- %Opq.sa.calc765 = sub i32 %Opq.sa.calc768, -115 ; <i32> [#uses=2]
- store i32 %.SV198.phi, i32* %.SV200.phi, align 4
- %31 = getelementptr %struct.Macroblock* %.load16.SV.phi, i64 %.load35.SV.phi, i32 26 ; <i32*> [#uses=2]
- %32 = load i32* %31, align 4 ; <i32> [#uses=4]
- store i32 %32, i32* %.load66.SV.phi, align 4
- %33 = load i32* %31, align 4 ; <i32> [#uses=1]
- %34 = icmp eq i32 %33, 0 ; <i1> [#uses=1]
- br i1 %34, label %bb96, label %bb9
-
-bb9: ; preds = %bb8.fragment
- %Opq.sa.calc482 = xor i32 %Opq.sa.calc765, 163 ; <i32> [#uses=0]
- %35 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %36 = getelementptr %struct.ImageParameters* %35, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %37 = load %struct.Macroblock** %36, align 8 ; <%struct.Macroblock*> [#uses=1]
- %38 = load i32* %.SV76.phi, align 4 ; <i32> [#uses=1]
- br label %bb9.fragment
-
-bb9.fragment: ; preds = %bb9
- %Opq.sa.calc999 = add i32 %Opq.sa.calc765, -44 ; <i32> [#uses=1]
- %39 = sext i32 %38 to i64 ; <i64> [#uses=1]
- %40 = getelementptr %struct.Macroblock* %37, i64 %39, i32 20 ; <i32*> [#uses=1]
- %41 = load i32* %40, align 4 ; <i32> [#uses=1]
- %42 = icmp eq i32 %41, 0 ; <i1> [#uses=1]
- br i1 %42, label %bb96, label %bb11
-
-bb11: ; preds = %bb9.fragment
- %Opq.sa.calc485 = sub i32 %Opq.sa.calc999, 200 ; <i32> [#uses=2]
- %43 = add i32 %.SV78.phi, 1 ; <i32> [#uses=1]
- br label %meshBB332
-
-bb11.fragment: ; preds = %meshBB332
- %Opq.sa.calc954 = xor i32 %Opq.link.mask859, 233 ; <i32> [#uses=0]
- store i32 %.SV206.phi, i32* %.load81.SV.phi, align 4
- %44 = add i32 %.load50.SV.phi, %yN ; <i32> [#uses=1]
- %45 = ashr i32 %44, 1 ; <i32> [#uses=1]
- br label %bb96
-
-bb13: ; preds = %bb5.fragment
- %Opq.sa.calc490 = xor i32 %Opq.sa.calc873, 175 ; <i32> [#uses=1]
- %Opq.sa.calc488 = sub i32 %Opq.sa.calc490, %Opq.sa.calc873 ; <i32> [#uses=1]
- %Opq.sa.calc489 = sub i32 %Opq.sa.calc488, 133 ; <i32> [#uses=1]
- %46 = getelementptr %struct.Macroblock* %2, i64 %3, i32 25 ; <i32*> [#uses=1]
- br label %meshBB360
-
-bb13.fragment: ; preds = %meshBB360
- %Opq.sa.calc870 = add i32 %Opq.sa.calc866, -129 ; <i32> [#uses=3]
- %47 = load i32* %.SV208.phi, align 8 ; <i32> [#uses=3]
- br i1 %.load74.SV.phi, label %bb14, label %meshBB412
-
-bb14: ; preds = %bb13.fragment
- %Opq.sa.calc493 = add i32 %Opq.sa.calc870, 103 ; <i32> [#uses=1]
- %48 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
- store i32 %47, i32* %48, align 4
- %49 = getelementptr %struct.Macroblock* %2, i64 %3, i32 29 ; <i32*> [#uses=2]
- br label %bb14.fragment
-
-bb14.fragment: ; preds = %bb14
- %Opq.sa.calc723 = sub i32 %Opq.sa.calc493, 117 ; <i32> [#uses=4]
- %50 = load i32* %49, align 8 ; <i32> [#uses=4]
- store i32 %50, i32* %.SV52.phi1113, align 4
- %51 = load i32* %49, align 8 ; <i32> [#uses=1]
- %52 = icmp eq i32 %51, 0 ; <i1> [#uses=1]
- br i1 %52, label %meshBB, label %bb15
-
-bb15: ; preds = %bb14.fragment
- %Opq.sa.calc496 = sub i32 %Opq.sa.calc723, -8 ; <i32> [#uses=1]
- %53 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %54 = getelementptr %struct.ImageParameters* %53, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %55 = load %struct.Macroblock** %54, align 8 ; <%struct.Macroblock*> [#uses=1]
- %56 = load i32* %.SV208.phi, align 8 ; <i32> [#uses=1]
- br label %meshBB324
-
-bb15.fragment: ; preds = %meshBB324
- %Opq.sa.calc925 = xor i32 %Opq.sa.calc750, 215 ; <i32> [#uses=2]
- %57 = sext i32 %.SV214.phi to i64 ; <i64> [#uses=1]
- %58 = getelementptr %struct.Macroblock* %.SV212.phi, i64 %57, i32 20 ; <i32*> [#uses=1]
- %59 = load i32* %58, align 4 ; <i32> [#uses=1]
- %60 = icmp eq i32 %59, 0 ; <i1> [#uses=1]
- br i1 %60, label %bb16, label %bb96
-
-bb16: ; preds = %bb15.fragment
- %Opq.sa.calc499 = sub i32 %Opq.sa.calc925, -140 ; <i32> [#uses=0]
- %61 = add i32 %.SV87.phi, 1 ; <i32> [#uses=1]
- br label %bb16.fragment
-
-bb16.fragment: ; preds = %bb16
- %Opq.sa.calc968 = add i32 %Opq.sa.calc925, 129 ; <i32> [#uses=0]
- store i32 %61, i32* %.SV91.phi, align 4
- %62 = shl i32 %yN, 1 ; <i32> [#uses=1]
- br label %bb96
-
-bb19: ; preds = %meshBB412
- %Opq.sa.calc502 = sub i32 %Opq.sa.calc932, -94 ; <i32> [#uses=0]
- %63 = add i32 %.SV87.phi1030, 1 ; <i32> [#uses=1]
- %64 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- br label %bb19.fragment
-
-bb19.fragment: ; preds = %bb19
- %Opq.sa.calc880 = xor i32 %Opq.sa.calc932, 246 ; <i32> [#uses=0]
- store i32 %63, i32* %64, align 4
- %65 = getelementptr %struct.Macroblock* %2, i64 %3, i32 29 ; <i32*> [#uses=1]
- %66 = load i32* %65, align 8 ; <i32> [#uses=2]
- store i32 %66, i32* %.SV52.phi1186, align 4
- br label %bb96
-
-bb21: ; preds = %meshBB392
- %Opq.sa.calc505 = add i32 %Opq.sa.calc921, -40 ; <i32> [#uses=2]
- br i1 %or.cond.not.SV.phi, label %meshBB360, label %bb97
-
-bb23: ; preds = %meshBB360
- %Opq.sa.calc509 = xor i32 %Opq.sa.calc866, 70 ; <i32> [#uses=1]
- %Opq.sa.calc508 = sub i32 %Opq.sa.calc509, -19 ; <i32> [#uses=0]
- %67 = getelementptr %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %68 = load i32* %67, align 4 ; <i32> [#uses=1]
- %69 = icmp eq i32 %68, 0 ; <i1> [#uses=1]
- %70 = and i32 %curr_mb_nr, 1 ; <i32> [#uses=1]
- %71 = icmp eq i32 %70, 0 ; <i1> [#uses=2]
- br label %bb23.fragment
-
-bb23.fragment: ; preds = %bb23
- %Opq.sa.calc847 = sub i32 %Opq.sa.calc866, -9 ; <i32> [#uses=2]
- %72 = getelementptr %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=3]
- %73 = load i32* %72, align 4 ; <i32> [#uses=3]
- %74 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
- store i32 %73, i32* %74, align 4
- br label %bb23.fragment182
-
-bb23.fragment182: ; preds = %bb23.fragment
- %Opq.sa.calc744 = xor i32 %Opq.sa.calc847, 152 ; <i32> [#uses=4]
- %Opq.sa.calc742 = add i32 %Opq.sa.calc744, %Opq.sa.calc847 ; <i32> [#uses=1]
- %Opq.sa.calc743 = add i32 %Opq.sa.calc742, -149 ; <i32> [#uses=2]
- %75 = getelementptr %struct.Macroblock* %2, i64 %3, i32 26 ; <i32*> [#uses=2]
- %76 = load i32* %75, align 4 ; <i32> [#uses=3]
- store i32 %76, i32* %.SV52.phi1113, align 4
- %77 = load i32* %75, align 4 ; <i32> [#uses=1]
- %78 = icmp ne i32 %77, 0 ; <i1> [#uses=2]
- br i1 %69, label %meshBB344, label %meshBB432
-
-bb24: ; preds = %meshBB344
- %Opq.sa.calc512 = add i32 %Opq.sa.calc716, -55 ; <i32> [#uses=3]
- br i1 %.SV96.phi, label %bb25, label %bb32
-
-bb25: ; preds = %bb24
- %Opq.sa.calc515 = sub i32 %Opq.sa.calc716, 18 ; <i32> [#uses=1]
- br i1 %.SV135.phi, label %bb26, label %bb96
-
-bb26: ; preds = %bb25
- %Opq.sa.calc519 = xor i32 %Opq.sa.calc515, 23 ; <i32> [#uses=2]
- %Opq.sa.calc518 = xor i32 %Opq.sa.calc519, 84 ; <i32> [#uses=1]
- %79 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %80 = getelementptr %struct.ImageParameters* %79, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %81 = load %struct.Macroblock** %80, align 8 ; <%struct.Macroblock*> [#uses=1]
- %82 = load i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
- br label %meshBB340
-
-bb26.fragment: ; preds = %meshBB340
- %Opq.sa.calc918 = xor i32 %Opq.sa.calc754, 228 ; <i32> [#uses=4]
- %Opq.sa.calc916 = add i32 %Opq.sa.calc918, %Opq.sa.calc754 ; <i32> [#uses=1]
- %Opq.sa.calc917 = add i32 %Opq.sa.calc916, -237 ; <i32> [#uses=1]
- %83 = sext i32 %.SV230.phi to i64 ; <i64> [#uses=1]
- %84 = getelementptr %struct.Macroblock* %.SV228.phi, i64 %83, i32 20 ; <i32*> [#uses=1]
- %85 = load i32* %84, align 4 ; <i32> [#uses=1]
- %86 = icmp eq i32 %85, 0 ; <i1> [#uses=1]
- br i1 %86, label %meshBB420, label %meshBB356
-
-bb28: ; preds = %meshBB356
- %Opq.sa.calc522 = xor i32 %Opq.sa.calc983, 107 ; <i32> [#uses=2]
- %87 = and i32 %yN, 1 ; <i32> [#uses=1]
- %88 = icmp eq i32 %87, 0 ; <i1> [#uses=1]
- br i1 %88, label %bb29, label %bb30
-
-bb29: ; preds = %bb28
- %Opq.sa.calc525 = xor i32 %Opq.sa.calc522, 151 ; <i32> [#uses=2]
- %89 = ashr i32 %yN, 1 ; <i32> [#uses=1]
- br label %meshBB340
-
-bb30: ; preds = %bb28
- %Opq.sa.calc528 = sub i32 %Opq.sa.calc522, -64 ; <i32> [#uses=1]
- %90 = add i32 %.SV104.phi1160, 1 ; <i32> [#uses=1]
- br label %bb30.fragment
-
-bb30.fragment: ; preds = %bb30
- %Opq.sa.calc791 = add i32 %Opq.sa.calc528, -14 ; <i32> [#uses=0]
- store i32 %90, i32* %.SV111.phi1159, align 4
- %91 = ashr i32 %yN, 1 ; <i32> [#uses=1]
- br label %bb96
-
-bb32: ; preds = %bb24
- %Opq.sa.calc531 = xor i32 %Opq.sa.calc512, 50 ; <i32> [#uses=1]
- br i1 %.SV135.phi, label %bb33, label %meshBB324
-
-bb33: ; preds = %bb32
- %Opq.sa.calc534 = sub i32 %Opq.sa.calc512, -75 ; <i32> [#uses=2]
- %92 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %93 = getelementptr %struct.ImageParameters* %92, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %94 = load %struct.Macroblock** %93, align 8 ; <%struct.Macroblock*> [#uses=1]
- %95 = load i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
- br label %bb33.fragment
-
-bb33.fragment: ; preds = %bb33
- %Opq.sa.calc712 = add i32 %Opq.sa.calc534, -109 ; <i32> [#uses=3]
- %96 = sext i32 %95 to i64 ; <i64> [#uses=1]
- %97 = getelementptr %struct.Macroblock* %94, i64 %96, i32 20 ; <i32*> [#uses=1]
- %98 = load i32* %97, align 4 ; <i32> [#uses=1]
- %99 = icmp eq i32 %98, 0 ; <i1> [#uses=1]
- br i1 %99, label %bb34, label %meshBB
-
-bb34: ; preds = %bb33.fragment
- %Opq.sa.calc537 = add i32 %Opq.sa.calc712, 8 ; <i32> [#uses=1]
- %100 = add i32 %.SV104.phi, 1 ; <i32> [#uses=1]
- br label %meshBB328
-
-bb34.fragment: ; preds = %meshBB328
- %Opq.sa.calc965 = xor i32 %Opq.sa.calc787, 251 ; <i32> [#uses=0]
- store i32 %.SV238.phi, i32* %.load116.SV.phi, align 4
- br label %bb96
-
-bb35: ; preds = %meshBB
- %Opq.sa.calc541 = add i32 %Opq.sa.calc828, -112 ; <i32> [#uses=3]
- %Opq.sa.calc540 = xor i32 %Opq.sa.calc541, 3 ; <i32> [#uses=1]
- %101 = and i32 %yN, 1 ; <i32> [#uses=1]
- %102 = icmp eq i32 %101, 0 ; <i1> [#uses=1]
- br i1 %102, label %meshBB372, label %meshBB448
-
-bb36: ; preds = %meshBB372
- %Opq.sa.calc544 = sub i32 %Opq.sa.calc812, -10 ; <i32> [#uses=0]
- %103 = add i32 %.SV43.phi1015, %yN ; <i32> [#uses=1]
- br label %bb36.fragment
-
-bb36.fragment: ; preds = %bb36
- %Opq.sa.calc762 = add i32 %Opq.sa.calc812, -69 ; <i32> [#uses=0]
- %104 = ashr i32 %103, 1 ; <i32> [#uses=1]
- br label %bb96
-
-bb37: ; preds = %meshBB448
- %Opq.sa.calc547 = add i32 %Opq.sa.calc958, -49 ; <i32> [#uses=1]
- %105 = add i32 %.SV104.phi1157, 1 ; <i32> [#uses=1]
- br label %meshBB348
-
-bb37.fragment: ; preds = %meshBB348
- %Opq.sa.calc728 = add i32 %Opq.sa.calc805, -5 ; <i32> [#uses=0]
- store i32 %.SV242.phi, i32* %.load115.SV.phi, align 4
- %106 = add i32 %.load48.SV.phi, %yN ; <i32> [#uses=1]
- %107 = ashr i32 %106, 1 ; <i32> [#uses=1]
- br label %bb96
-
-bb39: ; preds = %meshBB432
- %Opq.sa.calc550 = sub i32 %Opq.sa.calc798, -214 ; <i32> [#uses=0]
- br i1 %.SV96.phi1038, label %bb40, label %bb48
-
-bb40: ; preds = %bb39
- %Opq.sa.calc554 = xor i32 %Opq.sa.calc798, 14 ; <i32> [#uses=4]
- %Opq.sa.calc553 = sub i32 %Opq.sa.calc554, 7 ; <i32> [#uses=1]
- br i1 %.SV135.phi1039, label %meshBB336, label %meshBB444
-
-bb41: ; preds = %meshBB336
- %Opq.sa.calc557 = sub i32 %Opq.sa.calc979, 143 ; <i32> [#uses=1]
- %108 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %109 = getelementptr %struct.ImageParameters* %108, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %110 = load %struct.Macroblock** %109, align 8 ; <%struct.Macroblock*> [#uses=1]
- %111 = load i32* %.SV99.phi1128, align 4 ; <i32> [#uses=1]
- br label %bb41.fragment
-
-bb41.fragment: ; preds = %bb41
- %Opq.sa.calc987 = xor i32 %Opq.sa.calc557, 213 ; <i32> [#uses=4]
- %112 = sext i32 %111 to i64 ; <i64> [#uses=1]
- %113 = getelementptr %struct.Macroblock* %110, i64 %112, i32 20 ; <i32*> [#uses=1]
- %114 = load i32* %113, align 4 ; <i32> [#uses=1]
- %115 = icmp eq i32 %114, 0 ; <i1> [#uses=1]
- br i1 %115, label %bb42, label %bb96
-
-bb42: ; preds = %bb41.fragment
- %Opq.sa.calc560 = add i32 %Opq.sa.calc987, -221 ; <i32> [#uses=1]
- %116 = ashr i32 %.SV43.phi1230, 1 ; <i32> [#uses=1]
- %117 = icmp sgt i32 %116, %yN ; <i1> [#uses=1]
- br i1 %117, label %meshBB432, label %bb44
-
-bb43: ; preds = %meshBB432
- %Opq.sa.calc563 = xor i32 %Opq.sa.calc798, 31 ; <i32> [#uses=0]
- %118 = shl i32 %yN, 1 ; <i32> [#uses=1]
- br label %bb96
-
-bb44: ; preds = %bb42
- %Opq.sa.calc566 = sub i32 %Opq.sa.calc987, 217 ; <i32> [#uses=1]
- %119 = add i32 %.SV104.phi1127, 1 ; <i32> [#uses=1]
- br label %meshBB332
-
-bb44.fragment: ; preds = %meshBB332
- %Opq.sa.calc894 = add i32 %Opq.sa.calc856, -200 ; <i32> [#uses=1]
- store i32 %.SV248.phi, i32* %.load114.SV.phi, align 4
- %120 = shl i32 %yN, 1 ; <i32> [#uses=1]
- %121 = sub i32 %120, %.load46.SV.phi ; <i32> [#uses=1]
- br label %meshBB376
-
-bb48: ; preds = %bb39
- %Opq.sa.calc569 = sub i32 %Opq.sa.calc798, -110 ; <i32> [#uses=1]
- br i1 %.SV135.phi1039, label %bb49, label %bb96
-
-bb49: ; preds = %bb48
- %Opq.sa.calc572 = add i32 %Opq.sa.calc798, 84 ; <i32> [#uses=0]
- %122 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %123 = getelementptr %struct.ImageParameters* %122, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %124 = load %struct.Macroblock** %123, align 8 ; <%struct.Macroblock*> [#uses=1]
- %125 = load i32* %.SV99.phi1037, align 4 ; <i32> [#uses=1]
- br label %bb49.fragment
-
-bb49.fragment: ; preds = %bb49
- %Opq.sa.calc860 = sub i32 %Opq.sa.calc569, 114 ; <i32> [#uses=5]
- %126 = sext i32 %125 to i64 ; <i64> [#uses=1]
- %127 = getelementptr %struct.Macroblock* %124, i64 %126, i32 20 ; <i32*> [#uses=1]
- %128 = load i32* %127, align 4 ; <i32> [#uses=1]
- %129 = icmp eq i32 %128, 0 ; <i1> [#uses=1]
- br i1 %129, label %bb50, label %meshBB380
-
-bb50: ; preds = %bb49.fragment
- %Opq.sa.calc577 = add i32 %Opq.sa.calc860, 12 ; <i32> [#uses=2]
- %130 = ashr i32 %.SV43.phi1178, 1 ; <i32> [#uses=1]
- %131 = icmp sgt i32 %130, %yN ; <i1> [#uses=1]
- br i1 %131, label %meshBB328, label %bb52
-
-bb51: ; preds = %meshBB328
- %Opq.sa.calc580 = xor i32 %Opq.sa.calc787, 194 ; <i32> [#uses=0]
- %132 = shl i32 %yN, 1 ; <i32> [#uses=1]
- %133 = or i32 %132, 1 ; <i32> [#uses=1]
- br label %bb96
-
-bb52: ; preds = %bb50
- %Opq.sa.calc584 = sub i32 %Opq.sa.calc860, -65 ; <i32> [#uses=2]
- %Opq.sa.calc583 = sub i32 %Opq.sa.calc584, 50 ; <i32> [#uses=1]
- %134 = add i32 %.SV104.phi1036, 1 ; <i32> [#uses=1]
- store i32 %134, i32* %.SV111.phi1035, align 4
- br label %meshBB384
-
-bb52.fragment: ; preds = %meshBB384
- %Opq.sa.calc844 = add i32 %Opq.sa.calc901, -214 ; <i32> [#uses=1]
- %135 = shl i32 %yN, 1 ; <i32> [#uses=1]
- %136 = or i32 %135, 1 ; <i32> [#uses=1]
- %137 = sub i32 %136, %.load44.SV.phi ; <i32> [#uses=1]
- br label %meshBB388
-
-bb54: ; preds = %meshBB380
- %Opq.sa.calc589 = add i32 %Opq.sa.calc946, 108 ; <i32> [#uses=1]
- %138 = add i32 %.SV104.phi1124, 1 ; <i32> [#uses=1]
- br label %bb54.fragment
-
-bb54.fragment: ; preds = %bb54
- %Opq.sa.calc883 = xor i32 %Opq.sa.calc589, 119 ; <i32> [#uses=2]
- store i32 %138, i32* %.SV111.phi1123, align 4
- br label %meshBB440
-
-bb56: ; preds = %meshBB404
- %Opq.sa.calc592 = sub i32 %Opq.sa.calc939, 87 ; <i32> [#uses=2]
- %.not4 = icmp sgt i32 %xN, -1 ; <i1> [#uses=1]
- %139 = icmp sgt i32 %.SV40.phi, %xN ; <i1> [#uses=1]
- br label %meshBB364
-
-bb56.fragment: ; preds = %meshBB364
- %Opq.sa.calc1002 = xor i32 %Opq.link.mask737, 77 ; <i32> [#uses=6]
- %or.cond5 = and i1 %.SV256.phi, %.not4.SV.phi ; <i1> [#uses=1]
- %140 = icmp slt i32 %yN, 0 ; <i1> [#uses=2]
- br i1 %or.cond5, label %bb58, label %bb83
-
-bb58: ; preds = %bb56.fragment
- %Opq.sa.calc596 = xor i32 %Opq.sa.calc1002, 73 ; <i32> [#uses=1]
- %Opq.sa.calc595 = add i32 %Opq.sa.calc596, 147 ; <i32> [#uses=0]
- br i1 %140, label %bb59, label %bb76
-
-bb59: ; preds = %bb58
- %Opq.sa.calc599 = add i32 %Opq.sa.calc1002, 151 ; <i32> [#uses=0]
- %141 = getelementptr %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %142 = load i32* %141, align 4 ; <i32> [#uses=1]
- br label %bb59.fragment
-
-bb59.fragment: ; preds = %bb59
- %Opq.sa.calc731 = sub i32 %Opq.sa.calc1002, -161 ; <i32> [#uses=3]
- %143 = icmp eq i32 %142, 0 ; <i1> [#uses=1]
- %144 = and i32 %curr_mb_nr, 1 ; <i32> [#uses=1]
- %145 = icmp eq i32 %144, 0 ; <i1> [#uses=2]
- br i1 %143, label %bb60, label %bb68
-
-bb60: ; preds = %bb59.fragment
- %Opq.sa.calc602 = xor i32 %Opq.sa.calc731, 1 ; <i32> [#uses=2]
- br i1 %145, label %bb61, label %bb66
-
-bb61: ; preds = %bb60
- %Opq.sa.calc605 = xor i32 %Opq.sa.calc731, 57 ; <i32> [#uses=1]
- %146 = getelementptr %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=2]
- %147 = load i32* %146, align 8 ; <i32> [#uses=3]
- %148 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
- br label %bb61.fragment
-
-bb61.fragment: ; preds = %bb61
- %Opq.sa.calc700 = sub i32 %Opq.sa.calc605, 108 ; <i32> [#uses=3]
- store i32 %147, i32* %148, align 4
- %149 = getelementptr %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=4]
- %150 = load i32* %149, align 8 ; <i32> [#uses=1]
- %151 = icmp eq i32 %150, 0 ; <i1> [#uses=1]
- br i1 %151, label %bb65, label %bb62
-
-bb62: ; preds = %bb61.fragment
- %Opq.sa.calc608 = add i32 %Opq.sa.calc700, -94 ; <i32> [#uses=1]
- %152 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=2]
- %153 = getelementptr %struct.ImageParameters* %152, i64 0, i32 45 ; <i32*> [#uses=1]
- %154 = load i32* %153, align 4 ; <i32> [#uses=1]
- %155 = icmp eq i32 %154, 1 ; <i1> [#uses=1]
- br i1 %155, label %bb63, label %bb64
-
-bb63: ; preds = %bb62
- %Opq.sa.calc611 = add i32 %Opq.sa.calc700, -101 ; <i32> [#uses=2]
- %156 = getelementptr %struct.ImageParameters* %152, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %157 = load %struct.Macroblock** %156, align 8 ; <%struct.Macroblock*> [#uses=1]
- %158 = load i32* %146, align 8 ; <i32> [#uses=1]
- br label %meshBB452
-
-bb63.fragment: ; preds = %meshBB452
- %Opq.sa.calc891 = add i32 %Opq.link.mask823, 18 ; <i32> [#uses=2]
- %Opq.sa.calc890 = add i32 %Opq.sa.calc891, -3 ; <i32> [#uses=2]
- %159 = sext i32 %.SV266.phi to i64 ; <i64> [#uses=1]
- %160 = getelementptr %struct.Macroblock* %.SV264.phi, i64 %159, i32 20 ; <i32*> [#uses=1]
- %161 = load i32* %160, align 4 ; <i32> [#uses=1]
- %162 = icmp eq i32 %161, 0 ; <i1> [#uses=1]
- br i1 %162, label %bb64, label %meshBB456
-
-bb64: ; preds = %bb63.fragment, %bb62
- %.SV38.phi1132 = phi i64 [ %.SV38.phi1110, %bb63.fragment ], [ %.SV38.phi1098, %bb62 ] ; <i64> [#uses=1]
- %.SV52.phi1131 = phi i32* [ %.SV52.phi1109, %bb63.fragment ], [ %.SV52.phi1097, %bb62 ] ; <i32*> [#uses=1]
- %.SV68.phi1130 = phi i32 [ %.SV68.phi1108, %bb63.fragment ], [ %.SV68.phi1096, %bb62 ] ; <i32> [#uses=1]
- %.SV70.phi1129 = phi i32 [ %.SV70.phi1107, %bb63.fragment ], [ %.SV70.phi1095, %bb62 ] ; <i32> [#uses=1]
- %Opq.link.SV615.phi = phi i32 [ %Opq.sa.calc890, %bb63.fragment ], [ %Opq.sa.calc608, %bb62 ] ; <i32> [#uses=1]
- %.SV150.phi = phi i32* [ %.SV150.phi1060, %bb63.fragment ], [ %148, %bb62 ] ; <i32*> [#uses=1]
- %.SV152.phi = phi i32* [ %.SV152.phi1059, %bb63.fragment ], [ %149, %bb62 ] ; <i32*> [#uses=1]
- %.SV148.phi = phi i32 [ %.SV148.phi1057, %bb63.fragment ], [ %147, %bb62 ] ; <i32> [#uses=1]
- %Opq.link.mask = and i32 %Opq.link.SV615.phi, 1 ; <i32> [#uses=1]
- %Opq.sa.calc614 = add i32 %Opq.link.mask, 189 ; <i32> [#uses=1]
- %163 = add i32 %.SV148.phi, 1 ; <i32> [#uses=1]
- store i32 %163, i32* %.SV150.phi, align 4
- br label %bb65
-
-bb65: ; preds = %meshBB456, %bb64, %bb61.fragment
- %.SV38.phi1144 = phi i64 [ %.SV38.phi1137, %meshBB456 ], [ %.SV38.phi1098, %bb61.fragment ], [ %.SV38.phi1132, %bb64 ] ; <i64> [#uses=1]
- %.SV52.phi1143 = phi i32* [ %.SV52.phi1136, %meshBB456 ], [ %.SV52.phi1097, %bb61.fragment ], [ %.SV52.phi1131, %bb64 ] ; <i32*> [#uses=1]
- %.SV68.phi1142 = phi i32 [ %.SV68.phi1135, %meshBB456 ], [ %.SV68.phi1096, %bb61.fragment ], [ %.SV68.phi1130, %bb64 ] ; <i32> [#uses=1]
- %.SV70.phi1141 = phi i32 [ %.SV70.phi1134, %meshBB456 ], [ %.SV70.phi1095, %bb61.fragment ], [ %.SV70.phi1129, %bb64 ] ; <i32> [#uses=1]
- %.SV152.phi1058 = phi i32* [ %.SV152.phi1133, %meshBB456 ], [ %149, %bb61.fragment ], [ %.SV152.phi, %bb64 ] ; <i32*> [#uses=1]
- %Opq.link.SV618.phi = phi i32 [ %Opq.sa.calc816, %meshBB456 ], [ %Opq.sa.calc700, %bb61.fragment ], [ %Opq.sa.calc614, %bb64 ] ; <i32> [#uses=1]
- %Opq.link.mask620 = and i32 %Opq.link.SV618.phi, 40 ; <i32> [#uses=1]
- %Opq.sa.calc617 = add i32 %Opq.link.mask620, -35 ; <i32> [#uses=2]
- %164 = load i32* %.SV152.phi1058, align 8 ; <i32> [#uses=1]
- br label %meshBB436
-
-bb65.fragment: ; preds = %meshBB436
- %Opq.sa.calc832 = add i32 %Opq.link.mask706, 1 ; <i32> [#uses=2]
- store i32 %.SV268.phi, i32* %.load62.SV.phi, align 4
- br label %meshBB364
-
-bb66: ; preds = %bb60
- %Opq.sa.calc621 = add i32 %Opq.sa.calc602, -217 ; <i32> [#uses=1]
- %165 = add i32 %curr_mb_nr, -1 ; <i32> [#uses=1]
- %166 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- br label %meshBB420
-
-bb66.fragment: ; preds = %meshBB420
- %Opq.sa.calc795 = xor i32 %Opq.sa.calc837, 105 ; <i32> [#uses=2]
- %Opq.sa.calc794 = sub i32 %Opq.sa.calc795, 167 ; <i32> [#uses=1]
- store i32 %.SV270.phi, i32* %.SV272.phi, align 4
- store i32 1, i32* %.load61.SV.phi, align 4
- br label %meshBB444
-
-bb68: ; preds = %bb59.fragment
- %Opq.sa.calc624 = sub i32 %Opq.sa.calc731, 229 ; <i32> [#uses=3]
- %167 = getelementptr %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=1]
- br label %meshBB344
-
-bb68.fragment: ; preds = %meshBB344
- %Opq.sa.calc784 = sub i32 %Opq.link.mask722, 3 ; <i32> [#uses=5]
- %168 = load i32* %.SV274.phi, align 8 ; <i32> [#uses=3]
- br i1 %.load144.SV.phi, label %bb69, label %meshBB412
-
-bb69: ; preds = %bb68.fragment
- %Opq.sa.calc627 = add i32 %Opq.sa.calc784, 163 ; <i32> [#uses=0]
- %169 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
- store i32 %168, i32* %169, align 4
- %170 = getelementptr %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=2]
- br label %bb69.fragment
-
-bb69.fragment: ; preds = %bb69
- %Opq.sa.calc996 = sub i32 %Opq.sa.calc784, -9 ; <i32> [#uses=3]
- %Opq.sa.calc994 = sub i32 %Opq.sa.calc996, %Opq.sa.calc784 ; <i32> [#uses=1]
- %Opq.sa.calc995 = sub i32 %Opq.sa.calc994, 3 ; <i32> [#uses=2]
- %171 = load i32* %170, align 8 ; <i32> [#uses=3]
- store i32 %171, i32* %.SV52.phi1170, align 4
- %172 = load i32* %170, align 8 ; <i32> [#uses=1]
- %173 = icmp eq i32 %172, 0 ; <i1> [#uses=1]
- br i1 %173, label %meshBB396, label %meshBB400
-
-bb70: ; preds = %meshBB400
- %Opq.sa.calc630 = add i32 %Opq.sa.calc824, -203 ; <i32> [#uses=2]
- %174 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %175 = getelementptr %struct.ImageParameters* %174, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %176 = load %struct.Macroblock** %175, align 8 ; <%struct.Macroblock*> [#uses=1]
- %177 = load i32* %.SV156.phi, align 8 ; <i32> [#uses=1]
- br label %meshBB428
-
-bb70.fragment: ; preds = %meshBB428
- %Opq.sa.calc739 = xor i32 %Opq.sa.calc897, 213 ; <i32> [#uses=2]
- %Opq.sa.calc738 = sub i32 %Opq.sa.calc739, 1 ; <i32> [#uses=2]
- %178 = sext i32 %.SV280.phi to i64 ; <i64> [#uses=1]
- %179 = getelementptr %struct.Macroblock* %.SV278.phi, i64 %178, i32 20 ; <i32*> [#uses=1]
- %180 = load i32* %179, align 4 ; <i32> [#uses=1]
- %181 = icmp eq i32 %180, 0 ; <i1> [#uses=1]
- br i1 %181, label %meshBB452, label %meshBB356
-
-bb71: ; preds = %meshBB452
- %Opq.sa.calc633 = xor i32 %Opq.sa.calc820, 118 ; <i32> [#uses=1]
- %182 = add i32 %.SV158.phi1106, 1 ; <i32> [#uses=1]
- br label %meshBB352
-
-bb71.fragment: ; preds = %meshBB352
- %Opq.sa.calc809 = sub i32 %Opq.sa.calc876, 17 ; <i32> [#uses=2]
- store i32 %.SV282.phi, i32* %.load163.SV.phi, align 4
- %183 = shl i32 %yN, 1 ; <i32> [#uses=1]
- br label %meshBB436
-
-bb74: ; preds = %meshBB412
- %Opq.sa.calc636 = xor i32 %Opq.sa.calc932, 233 ; <i32> [#uses=1]
- %184 = add i32 %.SV158.phi1063, 1 ; <i32> [#uses=1]
- %185 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- br label %bb74.fragment
-
-bb74.fragment: ; preds = %bb74
- %Opq.sa.calc1011 = sub i32 %Opq.sa.calc636, -19 ; <i32> [#uses=0]
- store i32 %184, i32* %185, align 4
- %186 = getelementptr %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=1]
- %187 = load i32* %186, align 8 ; <i32> [#uses=2]
- store i32 %187, i32* %.SV52.phi1186, align 4
- br label %bb96
-
-bb76: ; preds = %bb58
- %Opq.sa.calc640 = xor i32 %Opq.sa.calc1002, 71 ; <i32> [#uses=4]
- %Opq.sa.calc639 = xor i32 %Opq.sa.calc640, 219 ; <i32> [#uses=0]
- %188 = icmp eq i32 %yN, 0 ; <i1> [#uses=1]
- br i1 %188, label %bb77, label %bb79
-
-bb77: ; preds = %bb76
- %Opq.sa.calc643 = add i32 %Opq.sa.calc640, 2 ; <i32> [#uses=2]
- %189 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %190 = getelementptr %struct.ImageParameters* %189, i64 0, i32 45 ; <i32*> [#uses=1]
- %191 = load i32* %190, align 4 ; <i32> [#uses=1]
- %192 = icmp eq i32 %191, 2 ; <i1> [#uses=1]
- br i1 %192, label %meshBB416, label %bb79
-
-bb78: ; preds = %meshBB416
- %Opq.sa.calc647 = xor i32 %Opq.sa.calc971, 25 ; <i32> [#uses=2]
- %Opq.sa.calc646 = sub i32 %Opq.sa.calc647, 29 ; <i32> [#uses=0]
- %193 = getelementptr %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=1]
- %194 = load i32* %193, align 8 ; <i32> [#uses=1]
- %195 = add i32 %194, 1 ; <i32> [#uses=1]
- br label %bb78.fragment
-
-bb78.fragment: ; preds = %bb78
- %Opq.sa.calc850 = sub i32 %Opq.sa.calc647, -93 ; <i32> [#uses=0]
- %196 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- store i32 %195, i32* %196, align 4
- store i32 1, i32* %.SV52.phi1200, align 4
- %197 = add i32 %yN, -1 ; <i32> [#uses=1]
- br label %bb98
-
-bb79: ; preds = %bb77, %bb76
- %Opq.link.SV652.phi = phi i32 [ %Opq.sa.calc643, %bb77 ], [ %Opq.sa.calc640, %bb76 ] ; <i32> [#uses=1]
- %Opq.link.mask654 = and i32 %Opq.link.SV652.phi, 8 ; <i32> [#uses=1]
- %Opq.sa.calc651 = sub i32 %Opq.link.mask654, -2 ; <i32> [#uses=3]
- %Opq.sa.calc650 = xor i32 %Opq.sa.calc651, 1 ; <i32> [#uses=2]
- br i1 %or.cond.not.SV.phi1094, label %meshBB456, label %meshBB352
-
-bb81: ; preds = %meshBB456
- %Opq.sa.calc655 = add i32 %Opq.sa.calc816, 56 ; <i32> [#uses=0]
- %198 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- store i32 %curr_mb_nr, i32* %198, align 4
- store i32 1, i32* %.SV52.phi1136, align 4
- br label %bb98
-
-bb83: ; preds = %bb56.fragment
- %Opq.sa.calc658 = sub i32 %Opq.sa.calc1002, 73 ; <i32> [#uses=3]
- br i1 %140, label %bb84, label %meshBB424
-
-bb84: ; preds = %bb83
- %Opq.sa.calc661 = xor i32 %Opq.sa.calc658, 22 ; <i32> [#uses=1]
- %199 = getelementptr %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %200 = load i32* %199, align 4 ; <i32> [#uses=1]
- br label %meshBB400
-
-bb84.fragment: ; preds = %meshBB400
- %Opq.sa.calc802 = xor i32 %Opq.sa.calc824, 240 ; <i32> [#uses=3]
- %201 = icmp eq i32 %.SV290.phi, 0 ; <i1> [#uses=1]
- %202 = and i32 %curr_mb_nr, 1 ; <i32> [#uses=1]
- %203 = icmp eq i32 %202, 0 ; <i1> [#uses=2]
- br i1 %201, label %meshBB372, label %bb89
-
-bb85: ; preds = %meshBB372
- %Opq.sa.calc667 = sub i32 %Opq.sa.calc812, 20 ; <i32> [#uses=3]
- %Opq.sa.calc666 = sub i32 %Opq.sa.calc667, 84 ; <i32> [#uses=2]
- %Opq.sa.calc664 = add i32 %Opq.sa.calc666, %Opq.sa.calc667 ; <i32> [#uses=1]
- %Opq.sa.calc665 = add i32 %Opq.sa.calc664, -112 ; <i32> [#uses=2]
- br i1 %.SV167.phi, label %meshBB336, label %meshBB440
-
-bb86: ; preds = %meshBB336
- %Opq.sa.calc670 = sub i32 %Opq.sa.calc979, 35 ; <i32> [#uses=1]
- %204 = getelementptr %struct.Macroblock* %2, i64 %3, i32 24 ; <i32*> [#uses=1]
- %205 = load i32* %204, align 4 ; <i32> [#uses=1]
- %206 = add i32 %205, 1 ; <i32> [#uses=1]
- %207 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- br label %bb86.fragment
-
-bb86.fragment: ; preds = %bb86
- %Opq.sa.calc943 = xor i32 %Opq.sa.calc670, 123 ; <i32> [#uses=2]
- store i32 %206, i32* %207, align 4
- %208 = getelementptr %struct.Macroblock* %2, i64 %3, i32 28 ; <i32*> [#uses=1]
- %209 = load i32* %208, align 4 ; <i32> [#uses=2]
- store i32 %209, i32* %.SV52.phi1234, align 4
- br label %meshBB424
-
-bb87: ; preds = %meshBB440
- %Opq.sa.calc674 = xor i32 %Opq.sa.calc990, 44 ; <i32> [#uses=1]
- %Opq.sa.calc673 = xor i32 %Opq.sa.calc674, 160 ; <i32> [#uses=1]
- store i32 0, i32* %.SV52.phi1235, align 4
- br label %meshBB408
-
-bb89: ; preds = %bb84.fragment
- %Opq.sa.calc677 = sub i32 %Opq.sa.calc802, -183 ; <i32> [#uses=1]
- %210 = getelementptr %struct.Macroblock* %2, i64 %3, i32 24 ; <i32*> [#uses=2]
- br label %bb89.fragment
-
-bb89.fragment: ; preds = %bb89
- %Opq.sa.calc962 = add i32 %Opq.sa.calc677, -188 ; <i32> [#uses=3]
- %211 = load i32* %210, align 4 ; <i32> [#uses=3]
- br i1 %203, label %bb90, label %meshBB408
-
-bb90: ; preds = %bb89.fragment
- %Opq.sa.calc680 = xor i32 %Opq.sa.calc962, 92 ; <i32> [#uses=1]
- %212 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
- store i32 %211, i32* %212, align 4
- %213 = getelementptr %struct.Macroblock* %2, i64 %3, i32 28 ; <i32*> [#uses=2]
- br label %bb90.fragment
-
-bb90.fragment: ; preds = %bb90
- %Opq.sa.calc773 = sub i32 %Opq.sa.calc680, 60 ; <i32> [#uses=3]
- %Opq.sa.calc772 = add i32 %Opq.sa.calc773, -25 ; <i32> [#uses=2]
- %214 = load i32* %213, align 4 ; <i32> [#uses=3]
- store i32 %214, i32* %.SV52.phi1190, align 4
- %215 = load i32* %213, align 4 ; <i32> [#uses=1]
- %216 = icmp eq i32 %215, 0 ; <i1> [#uses=1]
- br i1 %216, label %meshBB416, label %meshBB368
-
-bb91: ; preds = %meshBB368
- %Opq.sa.calc683 = sub i32 %Opq.sa.calc768, -7 ; <i32> [#uses=0]
- %217 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %218 = getelementptr %struct.ImageParameters* %217, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %219 = load %struct.Macroblock** %218, align 8 ; <%struct.Macroblock*> [#uses=1]
- %220 = load i32* %.SV170.phi, align 4 ; <i32> [#uses=1]
- br label %bb91.fragment
-
-bb91.fragment: ; preds = %bb91
- %Opq.sa.calc853 = xor i32 %Opq.sa.calc768, 8 ; <i32> [#uses=1]
- %221 = sext i32 %220 to i64 ; <i64> [#uses=1]
- %222 = getelementptr %struct.Macroblock* %219, i64 %221, i32 20 ; <i32*> [#uses=1]
- %223 = load i32* %222, align 4 ; <i32> [#uses=1]
- %224 = icmp eq i32 %223, 0 ; <i1> [#uses=1]
- br i1 %224, label %bb92, label %bb96
-
-bb92: ; preds = %bb91.fragment
- %Opq.sa.calc686 = xor i32 %Opq.sa.calc853, 2 ; <i32> [#uses=1]
- %225 = add i32 %.SV172.phi, 1 ; <i32> [#uses=1]
- br label %bb92.fragment
-
-bb92.fragment: ; preds = %bb92
- %Opq.sa.calc1005 = xor i32 %Opq.sa.calc686, 130 ; <i32> [#uses=2]
- store i32 %225, i32* %.SV176.phi, align 4
- %226 = shl i32 %yN, 1 ; <i32> [#uses=1]
- br label %meshBB380
-
-bb95: ; preds = %meshBB408
- %Opq.sa.calc689 = xor i32 %Opq.sa.calc912, 207 ; <i32> [#uses=3]
- %227 = add i32 %.SV172.phi1074, 1 ; <i32> [#uses=1]
- %228 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- br label %meshBB384
-
-bb95.fragment: ; preds = %meshBB384
- %Opq.sa.calc841 = sub i32 %Opq.sa.calc901, 76 ; <i32> [#uses=0]
- store i32 %.SV306.phi, i32* %.SV308.phi, align 4
- %229 = getelementptr %struct.Macroblock* %.load.SV.phi, i64 %.load20.SV.phi, i32 28 ; <i32*> [#uses=1]
- %230 = load i32* %229, align 4 ; <i32> [#uses=2]
- store i32 %230, i32* %.load53.SV.phi, align 4
- br label %bb96
-
-bb96: ; preds = %meshBB444, %meshBB440, %meshBB436, %meshBB424, %meshBB420, %meshBB416, %meshBB396, %meshBB388, %meshBB380, %meshBB376, %meshBB364, %meshBB356, %meshBB340, %meshBB324, %meshBB, %bb95.fragment, %bb91.fragment, %bb74.fragment, %bb51, %bb48, %bb43, %bb41.fragment, %bb37.fragment, %bb36.fragment, %bb34.fragment, %bb30.fragment, %bb25, %bb19.fragment, %bb16.fragment, %bb15.fragment, %bb11.fragment, %bb9.fragment, %bb8.fragment, %bb7.fragment
- %.SV38.phi1087 = phi i64 [ %.SV38.phi1224, %meshBB444 ], [ %.SV38.phi1210, %meshBB440 ], [ %.SV38.phi1147, %meshBB436 ], [ %.SV38.phi1197, %meshBB424 ], [ %.SV38.phi1194, %meshBB420 ], [ %.SV38.phi1201, %meshBB416 ], [ %.SV38.phi, %meshBB396 ], [ %.SV38.phi1118, %meshBB388 ], [ %.SV38.phi1207, %meshBB380 ], [ %.SV38.phi1153, %meshBB376 ], [ %.SV38.phi1098, %meshBB364 ], [ %.SV38.phi1121, %meshBB356 ], [ %.SV38.phi1167, %meshBB340 ], [ %.SV38.phi1175, %meshBB324 ], [ %.SV38.phi1183, %meshBB ], [ %.SV38.phi1164, %bb91.fragment ], [ %.SV38.phi1179, %bb48 ], [ %.SV38.phi1231, %bb41.fragment ], [ %.SV38.phi1172, %bb25 ], [ %.SV38.phi1175, %bb15.fragment ], [ %.SV38.phi1164, %bb9.fragment ], [ %.SV38.phi1164, %bb8.fragment ], [ %.SV38.phi1221, %bb95.fragment ], [ %.SV38.phi1187, %bb74.fragment ], [ %.SV38.phi1227, %bb51 ], [ %.SV38.phi1179, %bb43 ], [ %.SV38.phi1103, %bb37.fragment ], [ %.SV38.phi1214, %bb36.fragment ], [ %.SV38.phi1227, %bb34.fragment ], [ %.SV38.phi1121, %bb30.fragment ], [ %.SV38.phi1187, %bb19.fragment ], [ %.SV38.phi1175, %bb16.fragment ], [ %.SV38.phi1204, %bb11.fragment ], [ %.SV38.phi1118, %bb7.fragment ] ; <i64> [#uses=2]
- %.SV68.phi1086 = phi i32 [ %.SV68.phi1223, %meshBB444 ], [ %.SV68.phi1209, %meshBB440 ], [ %.SV68.phi1146, %meshBB436 ], [ %.SV68.phi1196, %meshBB424 ], [ %.SV68.phi1193, %meshBB420 ], [ %.SV68.phi1199, %meshBB416 ], [ %.SV68.phi, %meshBB396 ], [ %.SV68.phi1117, %meshBB388 ], [ %.SV68.phi1206, %meshBB380 ], [ %.SV68.phi1152, %meshBB376 ], [ %.SV68.phi1096, %meshBB364 ], [ %.SV68.phi1120, %meshBB356 ], [ %.SV68.phi1166, %meshBB340 ], [ %.SV68.phi1174, %meshBB324 ], [ %.SV68.phi1181, %meshBB ], [ %.SV68.phi1162, %bb91.fragment ], [ %.SV68.phi1177, %bb48 ], [ %.SV68.phi1229, %bb41.fragment ], [ %.SV68.phi1169, %bb25 ], [ %.SV68.phi1174, %bb15.fragment ], [ %.SV68.phi1162, %bb9.fragment ], [ %.SV68.phi1162, %bb8.fragment ], [ %.SV68.phi1220, %bb95.fragment ], [ %.SV68.phi1185, %bb74.fragment ], [ %.SV68.phi1226, %bb51 ], [ %.SV68.phi1177, %bb43 ], [ %.SV68.phi1100, %bb37.fragment ], [ %.SV68.phi1212, %bb36.fragment ], [ %.SV68.phi1226, %bb34.fragment ], [ %.SV68.phi1120, %bb30.fragment ], [ %.SV68.phi1185, %bb19.fragment ], [ %.SV68.phi1174, %bb16.fragment ], [ %.SV68.phi1203, %bb11.fragment ], [ %.SV68.phi1117, %bb7.fragment ] ; <i32> [#uses=2]
- %.SV70.phi1085 = phi i32 [ %.SV70.phi1222, %meshBB444 ], [ %.SV70.phi1208, %meshBB440 ], [ %.SV70.phi1145, %meshBB436 ], [ %.SV70.phi1195, %meshBB424 ], [ %.SV70.phi1192, %meshBB420 ], [ %.SV70.phi1198, %meshBB416 ], [ %.SV70.phi, %meshBB396 ], [ %.SV70.phi1116, %meshBB388 ], [ %.SV70.phi1205, %meshBB380 ], [ %.SV70.phi1151, %meshBB376 ], [ %.SV70.phi1095, %meshBB364 ], [ %.SV70.phi1119, %meshBB356 ], [ %.SV70.phi1165, %meshBB340 ], [ %.SV70.phi1173, %meshBB324 ], [ %.SV70.phi1180, %meshBB ], [ %.SV70.phi1161, %bb91.fragment ], [ %.SV70.phi1176, %bb48 ], [ %.SV70.phi1228, %bb41.fragment ], [ %.SV70.phi1168, %bb25 ], [ %.SV70.phi1173, %bb15.fragment ], [ %.SV70.phi1161, %bb9.fragment ], [ %.SV70.phi1161, %bb8.fragment ], [ %.SV70.phi1219, %bb95.fragment ], [ %.SV70.phi1184, %bb74.fragment ], [ %.SV70.phi1225, %bb51 ], [ %.SV70.phi1176, %bb43 ], [ %.SV70.phi1099, %bb37.fragment ], [ %.SV70.phi1211, %bb36.fragment ], [ %.SV70.phi1225, %bb34.fragment ], [ %.SV70.phi1119, %bb30.fragment ], [ %.SV70.phi1184, %bb19.fragment ], [ %.SV70.phi1173, %bb16.fragment ], [ %.SV70.phi1202, %bb11.fragment ], [ %.SV70.phi1116, %bb7.fragment ] ; <i32> [#uses=2]
- %.SV.phi = phi i32 [ %.SV.phi1048, %meshBB444 ], [ %.SV.phi1056, %meshBB440 ], [ %.SV.phi1067, %meshBB436 ], [ %.SV.phi1072, %meshBB424 ], [ %.SV.phi1044, %meshBB420 ], [ %.SV.phi1076, %meshBB416 ], [ %.SV.phi1065, %meshBB396 ], [ %.SV.phi1054, %meshBB388 ], [ %.SV.phi1052, %meshBB380 ], [ %.SV.phi1050, %meshBB376 ], [ %.SV.phi1062, %meshBB364 ], [ %.SV.phi1046, %meshBB356 ], [ %.SV.phi1042, %meshBB340 ], [ %.SV.phi1032, %meshBB324 ], [ %.SV.phi1034, %meshBB ], [ %.SV178.phi, %bb91.fragment ], [ %.SV118.phi1040, %bb48 ], [ %.SV118.phi1125, %bb41.fragment ], [ %.SV118.phi, %bb25 ], [ %.load94.SV.phi, %bb15.fragment ], [ %32, %bb9.fragment ], [ %32, %bb8.fragment ], [ %230, %bb95.fragment ], [ %187, %bb74.fragment ], [ %.SV118.phi1081, %bb51 ], [ %.SV118.phi1040, %bb43 ], [ %.load131.SV.phi, %bb37.fragment ], [ %.SV118.phi1154, %bb36.fragment ], [ %.load129.SV.phi, %bb34.fragment ], [ %.SV118.phi1158, %bb30.fragment ], [ %66, %bb19.fragment ], [ %.SV93.phi, %bb16.fragment ], [ %.load84.SV.phi, %bb11.fragment ], [ %27, %bb7.fragment ] ; <i32> [#uses=1]
- %yM.0.SV.phi = phi i32 [ -1, %meshBB444 ], [ %yN, %meshBB440 ], [ %yM.0.SV.phi1066, %meshBB436 ], [ %yN, %meshBB424 ], [ %yN, %meshBB420 ], [ -1, %meshBB416 ], [ -1, %meshBB396 ], [ %yM.0.SV.phi1053, %meshBB388 ], [ %yM.0.SV.phi1051, %meshBB380 ], [ %yM.0.SV.phi1049, %meshBB376 ], [ %yN, %meshBB364 ], [ %yN, %meshBB356 ], [ %yM.0.SV.phi1041, %meshBB340 ], [ -1, %meshBB324 ], [ -1, %meshBB ], [ %yN, %bb91.fragment ], [ -1, %bb48 ], [ %yN, %bb41.fragment ], [ -1, %bb25 ], [ %yN, %bb15.fragment ], [ %yN, %bb9.fragment ], [ -1, %bb8.fragment ], [ %yN, %bb95.fragment ], [ %yN, %bb74.fragment ], [ %133, %bb51 ], [ %118, %bb43 ], [ %107, %bb37.fragment ], [ %104, %bb36.fragment ], [ %yN, %bb34.fragment ], [ %91, %bb30.fragment ], [ %yN, %bb19.fragment ], [ %62, %bb16.fragment ], [ %45, %bb11.fragment ], [ %yN, %bb7.fragment ] ; <i32> [#uses=2]
- %Opq.sa.calc693 = add i32 0, 15 ; <i32> [#uses=2]
- %Opq.sa.calc692 = xor i32 %Opq.sa.calc693, 8 ; <i32> [#uses=1]
- %231 = icmp eq i32 %.SV.phi, 0 ; <i1> [#uses=1]
- br i1 %231, label %bb97, label %meshBB404
-
-bb97: ; preds = %meshBB424, %meshBB408, %meshBB352, %bb96, %bb21
- %.SV38.phi1150 = phi i64 [ %.SV38.phi1197, %meshBB424 ], [ %.SV38.phi1218, %meshBB408 ], [ %.SV38.phi1140, %meshBB352 ], [ %.SV38.phi1087, %bb96 ], [ %4, %bb21 ] ; <i64> [#uses=1]
- %.SV68.phi1149 = phi i32 [ %.SV68.phi1196, %meshBB424 ], [ %.SV68.phi1216, %meshBB408 ], [ %.SV68.phi1139, %meshBB352 ], [ %.SV68.phi1086, %bb96 ], [ %.SV68.phi1021, %bb21 ] ; <i32> [#uses=1]
- %.SV70.phi1148 = phi i32 [ %.SV70.phi1195, %meshBB424 ], [ %.SV70.phi1215, %meshBB408 ], [ %.SV70.phi1138, %meshBB352 ], [ %.SV70.phi1085, %bb96 ], [ %.SV70.phi1027, %bb21 ] ; <i32> [#uses=1]
- %yM.0.reg2mem.0.SV.phi = phi i32 [ -1, %meshBB424 ], [ -1, %meshBB408 ], [ -1, %meshBB352 ], [ %yM.0.SV.phi, %bb96 ], [ -1, %bb21 ] ; <i32> [#uses=1]
- %Opq.sa.calc694 = xor i32 0, 243 ; <i32> [#uses=1]
- %232 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %233 = getelementptr %struct.ImageParameters* %232, i64 0, i32 45 ; <i32*> [#uses=1]
- br label %bb97.fragment
-
-bb97.fragment: ; preds = %bb97
- %Opq.sa.calc928 = xor i32 %Opq.sa.calc694, 128 ; <i32> [#uses=1]
- %234 = load i32* %233, align 4 ; <i32> [#uses=1]
- %235 = icmp eq i32 %234, 0 ; <i1> [#uses=1]
- br i1 %235, label %return, label %bb98
-
-bb98: ; preds = %meshBB444, %meshBB404, %bb97.fragment, %bb81, %bb78.fragment
- %.SV38.phi1093 = phi i64 [ %.SV38.phi1224, %meshBB444 ], [ %.SV38.phi1017, %meshBB404 ], [ %.SV38.phi1150, %bb97.fragment ], [ %.SV38.phi1137, %bb81 ], [ %.SV38.phi1201, %bb78.fragment ] ; <i64> [#uses=2]
- %.SV68.phi1092 = phi i32 [ %.SV68.phi1223, %meshBB444 ], [ %.SV68.phi1023, %meshBB404 ], [ %.SV68.phi1149, %bb97.fragment ], [ %.SV68.phi1135, %bb81 ], [ %.SV68.phi1199, %bb78.fragment ] ; <i32> [#uses=2]
- %.SV70.phi1091 = phi i32 [ %.SV70.phi1222, %meshBB444 ], [ %.SV70.phi1028, %meshBB404 ], [ %.SV70.phi1148, %bb97.fragment ], [ %.SV70.phi1134, %bb81 ], [ %.SV70.phi1198, %bb78.fragment ] ; <i32> [#uses=2]
- %yM.0.reg2mem.1.SV.phi1068 = phi i32 [ %yN, %meshBB444 ], [ %yM.0.reg2mem.1.SV.phi1077, %meshBB404 ], [ %yM.0.reg2mem.0.SV.phi, %bb97.fragment ], [ %yN, %bb81 ], [ %197, %bb78.fragment ] ; <i32> [#uses=1]
- %Opq.sa.calc695 = xor i32 0, 23 ; <i32> [#uses=2]
- %236 = and i32 %.SV70.phi1091, %xN ; <i32> [#uses=1]
- %237 = getelementptr %struct.PixelPos* %pix, i64 0, i32 2 ; <i32*> [#uses=2]
- store i32 %236, i32* %237, align 4
- %238 = and i32 %yM.0.reg2mem.1.SV.phi1068, %.SV68.phi1092 ; <i32> [#uses=1]
- %239 = getelementptr %struct.PixelPos* %pix, i64 0, i32 3 ; <i32*> [#uses=2]
- store i32 %238, i32* %239, align 4
- %240 = getelementptr %struct.PixelPos* %pix, i64 0, i32 5 ; <i32*> [#uses=1]
- br label %meshBB376
-
-bb98.fragment: ; preds = %meshBB376
- %Opq.sa.calc1008 = sub i32 %Opq.link.mask911, 13 ; <i32> [#uses=1]
- %241 = getelementptr %struct.PixelPos* %pix, i64 0, i32 4 ; <i32*> [#uses=4]
- %242 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- %243 = load i32* %242, align 4 ; <i32> [#uses=1]
- %244 = load void (i32, i32*, i32*)** @get_mb_block_pos, align 8 ; <void (i32, i32*, i32*)*> [#uses=1]
- tail call void %244(i32 %243, i32* %241, i32* %.SV317.phi) nounwind
- %245 = load i32* %241, align 4 ; <i32> [#uses=1]
- %246 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %247 = getelementptr %struct.ImageParameters* %246, i64 0, i32 119, i64 %.load39.SV.phi, i64 0 ; <i32*> [#uses=1]
- %248 = load i32* %247, align 4 ; <i32> [#uses=1]
- %249 = mul i32 %248, %245 ; <i32> [#uses=2]
- store i32 %249, i32* %241, align 4
- br label %bb98.fragment183
-
-bb98.fragment183: ; preds = %bb98.fragment
- %Opq.sa.calc777 = sub i32 %Opq.sa.calc1008, -158 ; <i32> [#uses=1]
- %Opq.sa.calc776 = sub i32 %Opq.sa.calc777, 46 ; <i32> [#uses=0]
- %250 = load i32* %.SV317.phi, align 4 ; <i32> [#uses=1]
- %251 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %252 = getelementptr %struct.ImageParameters* %251, i64 0, i32 119, i64 %.load39.SV.phi, i64 1 ; <i32*> [#uses=1]
- %253 = load i32* %252, align 4 ; <i32> [#uses=1]
- %254 = mul i32 %253, %250 ; <i32> [#uses=1]
- %255 = load i32* %.SV313.phi, align 4 ; <i32> [#uses=1]
- %256 = add i32 %255, %249 ; <i32> [#uses=1]
- store i32 %256, i32* %241, align 4
- %257 = load i32* %.SV315.phi, align 4 ; <i32> [#uses=1]
- %258 = add i32 %257, %254 ; <i32> [#uses=1]
- store i32 %258, i32* %.SV317.phi, align 4
- ret void
-
-return: ; preds = %meshBB448, %meshBB396, %bb97.fragment
- %Opq.link.SV697.phi = phi i32 [ %Opq.sa.calc957, %meshBB448 ], [ %Opq.sa.calc758, %meshBB396 ], [ %Opq.sa.calc928, %bb97.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask699 = and i32 %Opq.link.SV697.phi, 0 ; <i32> [#uses=1]
- %Opq.sa.calc696 = add i32 %Opq.link.mask699, 238 ; <i32> [#uses=0]
- ret void
-
-meshBB: ; preds = %bb33.fragment, %bb14.fragment
- %.SV38.phi1183 = phi i64 [ %.SV38.phi1115, %bb14.fragment ], [ %.SV38.phi1172, %bb33.fragment ] ; <i64> [#uses=3]
- %.SV68.phi1181 = phi i32 [ %.SV68.phi1112, %bb14.fragment ], [ %.SV68.phi1169, %bb33.fragment ] ; <i32> [#uses=3]
- %.SV70.phi1180 = phi i32 [ %.SV70.phi1111, %bb14.fragment ], [ %.SV70.phi1168, %bb33.fragment ] ; <i32> [#uses=3]
- %.SV104.phi1084 = phi i32 [ undef, %bb14.fragment ], [ %.SV104.phi, %bb33.fragment ] ; <i32> [#uses=1]
- %.SV111.phi1083 = phi i32* [ undef, %bb14.fragment ], [ %.SV111.phi, %bb33.fragment ] ; <i32*> [#uses=1]
- %.SV118.phi1082 = phi i32 [ undef, %bb14.fragment ], [ %.SV118.phi, %bb33.fragment ] ; <i32> [#uses=2]
- %.SV.phi1034 = phi i32 [ %50, %bb14.fragment ], [ undef, %bb33.fragment ] ; <i32> [#uses=1]
- %meshStackVariable.phi = phi i32 [ %Opq.sa.calc723, %bb14.fragment ], [ %Opq.sa.calc712, %bb33.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV829.phi = phi i32 [ %Opq.sa.calc723, %bb14.fragment ], [ %Opq.sa.calc534, %bb33.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask831 = and i32 %Opq.link.SV829.phi, 0 ; <i32> [#uses=1]
- %Opq.sa.calc828 = sub i32 %Opq.link.mask831, -117 ; <i32> [#uses=2]
- %meshCmp = icmp eq i32 %meshStackVariable.phi, 3 ; <i1> [#uses=1]
- br i1 %meshCmp, label %bb35, label %bb96
-
-meshBB324: ; preds = %bb32, %bb15
- %.SV38.phi1175 = phi i64 [ %.SV38.phi1172, %bb32 ], [ %.SV38.phi1115, %bb15 ] ; <i64> [#uses=3]
- %.SV68.phi1174 = phi i32 [ %.SV68.phi1169, %bb32 ], [ %.SV68.phi1112, %bb15 ] ; <i32> [#uses=3]
- %.SV70.phi1173 = phi i32 [ %.SV70.phi1168, %bb32 ], [ %.SV70.phi1111, %bb15 ] ; <i32> [#uses=3]
- %.load94.SV.phi = phi i32 [ undef, %bb32 ], [ %50, %bb15 ] ; <i32> [#uses=1]
- %.SV212.phi = phi %struct.Macroblock* [ undef, %bb32 ], [ %55, %bb15 ] ; <%struct.Macroblock*> [#uses=1]
- %.SV214.phi = phi i32 [ undef, %bb32 ], [ %56, %bb15 ] ; <i32> [#uses=1]
- %meshStackVariable325.phi = phi i32 [ %Opq.sa.calc531, %bb32 ], [ %Opq.sa.calc496, %bb15 ] ; <i32> [#uses=1]
- %Opq.link.SV751.phi = phi i32 [ %Opq.sa.calc512, %bb32 ], [ %Opq.sa.calc723, %bb15 ] ; <i32> [#uses=1]
- %.SV.phi1032 = phi i32 [ %.SV118.phi, %bb32 ], [ undef, %bb15 ] ; <i32> [#uses=1]
- %.SV93.phi = phi i32 [ undef, %bb32 ], [ %50, %bb15 ] ; <i32> [#uses=1]
- %.SV91.phi = phi i32* [ undef, %bb32 ], [ %48, %bb15 ] ; <i32*> [#uses=1]
- %.SV87.phi = phi i32 [ undef, %bb32 ], [ %47, %bb15 ] ; <i32> [#uses=1]
- %Opq.link.mask753 = and i32 %Opq.link.SV751.phi, 4 ; <i32> [#uses=1]
- %Opq.sa.calc750 = add i32 %Opq.link.mask753, 203 ; <i32> [#uses=1]
- %meshCmp327 = icmp eq i32 %meshStackVariable325.phi, 14 ; <i1> [#uses=1]
- br i1 %meshCmp327, label %bb15.fragment, label %bb96
-
-meshBB328: ; preds = %bb50, %bb34
- %.SV38.phi1227 = phi i64 [ %.SV38.phi1179, %bb50 ], [ %.SV38.phi1172, %bb34 ] ; <i64> [#uses=2]
- %.SV68.phi1226 = phi i32 [ %.SV68.phi1177, %bb50 ], [ %.SV68.phi1169, %bb34 ] ; <i32> [#uses=2]
- %.SV70.phi1225 = phi i32 [ %.SV70.phi1176, %bb50 ], [ %.SV70.phi1168, %bb34 ] ; <i32> [#uses=2]
- %.SV118.phi1081 = phi i32 [ %.SV118.phi1040, %bb50 ], [ %.SV118.phi, %bb34 ] ; <i32> [#uses=1]
- %.load129.SV.phi = phi i32 [ undef, %bb50 ], [ %.SV118.phi, %bb34 ] ; <i32> [#uses=1]
- %.load116.SV.phi = phi i32* [ undef, %bb50 ], [ %.SV111.phi, %bb34 ] ; <i32*> [#uses=1]
- %.SV238.phi = phi i32 [ undef, %bb50 ], [ %100, %bb34 ] ; <i32> [#uses=1]
- %meshStackVariable329.phi = phi i32 [ %Opq.sa.calc577, %bb50 ], [ %Opq.sa.calc537, %bb34 ] ; <i32> [#uses=1]
- %Opq.link.SV788.phi = phi i32 [ %Opq.sa.calc577, %bb50 ], [ %Opq.sa.calc712, %bb34 ] ; <i32> [#uses=1]
- %Opq.link.mask790 = and i32 %Opq.link.SV788.phi, 1 ; <i32> [#uses=1]
- %Opq.sa.calc787 = sub i32 %Opq.link.mask790, -227 ; <i32> [#uses=2]
- %meshCmp331 = icmp eq i32 %meshStackVariable329.phi, 11 ; <i1> [#uses=1]
- br i1 %meshCmp331, label %bb34.fragment, label %bb51
-
-meshBB332: ; preds = %bb44, %bb11
- %.SV38.phi1204 = phi i64 [ %.SV38.phi1231, %bb44 ], [ %.SV38.phi1164, %bb11 ] ; <i64> [#uses=2]
- %.SV68.phi1203 = phi i32 [ %.SV68.phi1229, %bb44 ], [ %.SV68.phi1162, %bb11 ] ; <i32> [#uses=2]
- %.SV70.phi1202 = phi i32 [ %.SV70.phi1228, %bb44 ], [ %.SV70.phi1161, %bb11 ] ; <i32> [#uses=2]
- %.load127.SV.phi = phi i32 [ %.SV118.phi1125, %bb44 ], [ undef, %bb11 ] ; <i32> [#uses=1]
- %.load114.SV.phi = phi i32* [ %.SV111.phi1126, %bb44 ], [ undef, %bb11 ] ; <i32*> [#uses=1]
- %.load46.SV.phi = phi i32 [ %.SV43.phi1230, %bb44 ], [ undef, %bb11 ] ; <i32> [#uses=1]
- %.SV248.phi = phi i32 [ %119, %bb44 ], [ undef, %bb11 ] ; <i32> [#uses=1]
- %.load84.SV.phi = phi i32 [ undef, %bb44 ], [ %32, %bb11 ] ; <i32> [#uses=1]
- %.load81.SV.phi = phi i32* [ undef, %bb44 ], [ %.SV80.phi, %bb11 ] ; <i32*> [#uses=1]
- %.load50.SV.phi = phi i32 [ undef, %bb44 ], [ %.SV43.phi1163, %bb11 ] ; <i32> [#uses=1]
- %.SV206.phi = phi i32 [ undef, %bb44 ], [ %43, %bb11 ] ; <i32> [#uses=1]
- %meshStackVariable333.phi = phi i32 [ %Opq.sa.calc566, %bb44 ], [ %Opq.sa.calc485, %bb11 ] ; <i32> [#uses=1]
- %Opq.link.SV857.phi = phi i32 [ %Opq.sa.calc987, %bb44 ], [ %Opq.sa.calc485, %bb11 ] ; <i32> [#uses=1]
- %Opq.link.mask859 = and i32 %Opq.link.SV857.phi, 4 ; <i32> [#uses=2]
- %Opq.sa.calc856 = add i32 %Opq.link.mask859, 204 ; <i32> [#uses=2]
- %meshCmp335 = icmp eq i32 %meshStackVariable333.phi, 4 ; <i1> [#uses=1]
- br i1 %meshCmp335, label %bb11.fragment, label %bb44.fragment
-
-meshBB336: ; preds = %bb85, %bb40
- %.SV52.phi1234 = phi i32* [ %.SV52.phi1213, %bb85 ], [ undef, %bb40 ] ; <i32*> [#uses=1]
- %.SV38.phi1231 = phi i64 [ %.SV38.phi1214, %bb85 ], [ %.SV38.phi1179, %bb40 ] ; <i64> [#uses=4]
- %.SV43.phi1230 = phi i32 [ undef, %bb85 ], [ %.SV43.phi1178, %bb40 ] ; <i32> [#uses=3]
- %.SV68.phi1229 = phi i32 [ %.SV68.phi1212, %bb85 ], [ %.SV68.phi1177, %bb40 ] ; <i32> [#uses=4]
- %.SV70.phi1228 = phi i32 [ %.SV70.phi1211, %bb85 ], [ %.SV70.phi1176, %bb40 ] ; <i32> [#uses=4]
- %.SV99.phi1128 = phi i32* [ undef, %bb85 ], [ %.SV99.phi1037, %bb40 ] ; <i32*> [#uses=1]
- %.SV104.phi1127 = phi i32 [ undef, %bb85 ], [ %.SV104.phi1036, %bb40 ] ; <i32> [#uses=2]
- %.SV111.phi1126 = phi i32* [ undef, %bb85 ], [ %.SV111.phi1035, %bb40 ] ; <i32*> [#uses=2]
- %.SV118.phi1125 = phi i32 [ undef, %bb85 ], [ %.SV118.phi1040, %bb40 ] ; <i32> [#uses=3]
- %meshStackVariable337.phi = phi i32 [ %Opq.sa.calc665, %bb85 ], [ %Opq.sa.calc553, %bb40 ] ; <i32> [#uses=1]
- %Opq.link.SV980.phi = phi i32 [ %Opq.sa.calc667, %bb85 ], [ %Opq.sa.calc554, %bb40 ] ; <i32> [#uses=1]
- %Opq.link.mask982 = and i32 %Opq.link.SV980.phi, 1 ; <i32> [#uses=1]
- %Opq.sa.calc979 = sub i32 %Opq.link.mask982, -153 ; <i32> [#uses=2]
- %meshCmp339 = icmp eq i32 %meshStackVariable337.phi, 4 ; <i1> [#uses=1]
- br i1 %meshCmp339, label %bb41, label %bb86
-
-meshBB340: ; preds = %bb29, %bb26
- %.SV38.phi1167 = phi i64 [ %.SV38.phi1121, %bb29 ], [ %.SV38.phi1172, %bb26 ] ; <i64> [#uses=3]
- %.SV68.phi1166 = phi i32 [ %.SV68.phi1120, %bb29 ], [ %.SV68.phi1169, %bb26 ] ; <i32> [#uses=3]
- %.SV70.phi1165 = phi i32 [ %.SV70.phi1119, %bb29 ], [ %.SV70.phi1168, %bb26 ] ; <i32> [#uses=3]
- %.SV104.phi1080 = phi i32 [ undef, %bb29 ], [ %.SV104.phi, %bb26 ] ; <i32> [#uses=1]
- %.SV111.phi1079 = phi i32* [ undef, %bb29 ], [ %.SV111.phi, %bb26 ] ; <i32*> [#uses=1]
- %.SV118.phi1078 = phi i32 [ %.SV118.phi1158, %bb29 ], [ %.SV118.phi, %bb26 ] ; <i32> [#uses=1]
- %.load123.SV.phi = phi i32 [ undef, %bb29 ], [ %.SV118.phi, %bb26 ] ; <i32> [#uses=2]
- %.SV228.phi = phi %struct.Macroblock* [ undef, %bb29 ], [ %81, %bb26 ] ; <%struct.Macroblock*> [#uses=1]
- %.SV230.phi = phi i32 [ undef, %bb29 ], [ %82, %bb26 ] ; <i32> [#uses=1]
- %meshStackVariable341.phi = phi i32 [ %Opq.sa.calc525, %bb29 ], [ %Opq.sa.calc518, %bb26 ] ; <i32> [#uses=1]
- %Opq.link.SV755.phi = phi i32 [ %Opq.sa.calc525, %bb29 ], [ %Opq.sa.calc519, %bb26 ] ; <i32> [#uses=1]
- %.SV.phi1042 = phi i32 [ %.SV118.phi1158, %bb29 ], [ undef, %bb26 ] ; <i32> [#uses=1]
- %yM.0.SV.phi1041 = phi i32 [ %89, %bb29 ], [ undef, %bb26 ] ; <i32> [#uses=1]
- %Opq.link.mask757 = and i32 %Opq.link.SV755.phi, 12 ; <i32> [#uses=1]
- %Opq.sa.calc754 = add i32 %Opq.link.mask757, 225 ; <i32> [#uses=2]
- %meshCmp343 = icmp eq i32 %meshStackVariable341.phi, 9 ; <i1> [#uses=1]
- br i1 %meshCmp343, label %bb26.fragment, label %bb96
-
-meshBB344: ; preds = %bb68, %bb23.fragment182
- %.SV38.phi1172 = phi i64 [ %.SV38.phi1115, %bb23.fragment182 ], [ %.SV38.phi1098, %bb68 ] ; <i64> [#uses=8]
- %.SV52.phi1170 = phi i32* [ undef, %bb23.fragment182 ], [ %.SV52.phi1097, %bb68 ] ; <i32*> [#uses=2]
- %.SV68.phi1169 = phi i32 [ %.SV68.phi1112, %bb23.fragment182 ], [ %.SV68.phi1096, %bb68 ] ; <i32> [#uses=8]
- %.SV70.phi1168 = phi i32 [ %.SV70.phi1111, %bb23.fragment182 ], [ %.SV70.phi1095, %bb68 ] ; <i32> [#uses=8]
- %.load144.SV.phi = phi i1 [ undef, %bb23.fragment182 ], [ %145, %bb68 ] ; <i1> [#uses=1]
- %.SV274.phi = phi i32* [ undef, %bb23.fragment182 ], [ %167, %bb68 ] ; <i32*> [#uses=2]
- %.SV118.phi = phi i32 [ %76, %bb23.fragment182 ], [ undef, %bb68 ] ; <i32> [#uses=7]
- %.SV135.phi = phi i1 [ %78, %bb23.fragment182 ], [ undef, %bb68 ] ; <i1> [#uses=2]
- %meshStackVariable345.phi = phi i32 [ %Opq.sa.calc743, %bb23.fragment182 ], [ %Opq.sa.calc624, %bb68 ] ; <i32> [#uses=1]
- %Opq.link.SV717.phi = phi i32 [ %Opq.sa.calc744, %bb23.fragment182 ], [ %Opq.sa.calc624, %bb68 ] ; <i32> [#uses=1]
- %Opq.link.SV720.phi = phi i32 [ %Opq.sa.calc743, %bb23.fragment182 ], [ %Opq.sa.calc624, %bb68 ] ; <i32> [#uses=1]
- %.SV96.phi = phi i1 [ %71, %bb23.fragment182 ], [ undef, %bb68 ] ; <i1> [#uses=1]
- %.SV99.phi = phi i32* [ %72, %bb23.fragment182 ], [ undef, %bb68 ] ; <i32*> [#uses=2]
- %.SV104.phi = phi i32 [ %73, %bb23.fragment182 ], [ undef, %bb68 ] ; <i32> [#uses=3]
- %.SV111.phi = phi i32* [ %74, %bb23.fragment182 ], [ undef, %bb68 ] ; <i32*> [#uses=3]
- %Opq.link.mask722 = and i32 %Opq.link.SV720.phi, 9 ; <i32> [#uses=3]
- %Opq.link.mask719 = and i32 %Opq.link.SV717.phi, 0 ; <i32> [#uses=1]
- %Opq.sa.calc715 = sub i32 %Opq.link.mask719, %Opq.link.mask722 ; <i32> [#uses=1]
- %Opq.sa.calc716 = sub i32 %Opq.sa.calc715, -101 ; <i32> [#uses=2]
- %meshCmp347 = icmp eq i32 %meshStackVariable345.phi, 9 ; <i1> [#uses=1]
- br i1 %meshCmp347, label %bb68.fragment, label %bb24
-
-meshBB348: ; preds = %bb37, %bb6
- %.SV38.phi1103 = phi i64 [ %.SV38.phi1014, %bb6 ], [ %.SV38.phi1019, %bb37 ] ; <i64> [#uses=2]
- %.SV43.phi1102 = phi i32 [ %.SV43.phi, %bb6 ], [ %.SV43.phi1018, %bb37 ] ; <i32> [#uses=1]
- %.SV52.phi1101 = phi i32* [ %.SV52.phi, %bb6 ], [ undef, %bb37 ] ; <i32*> [#uses=1]
- %.SV68.phi1100 = phi i32 [ %.SV68.phi1020, %bb6 ], [ %.SV68.phi1025, %bb37 ] ; <i32> [#uses=2]
- %.SV70.phi1099 = phi i32 [ %.SV70.phi1026, %bb6 ], [ %.SV70.phi1233, %bb37 ] ; <i32> [#uses=2]
- %.load131.SV.phi = phi i32 [ undef, %bb6 ], [ %.SV118.phi1155, %bb37 ] ; <i32> [#uses=1]
- %.load115.SV.phi = phi i32* [ undef, %bb6 ], [ %.SV111.phi1156, %bb37 ] ; <i32*> [#uses=1]
- %.load48.SV.phi = phi i32 [ undef, %bb6 ], [ %.SV43.phi1018, %bb37 ] ; <i32> [#uses=1]
- %.SV242.phi = phi i32 [ undef, %bb6 ], [ %105, %bb37 ] ; <i32> [#uses=1]
- %meshStackVariable349.phi = phi i32 [ %Opq.sa.calc473, %bb6 ], [ %Opq.sa.calc547, %bb37 ] ; <i32> [#uses=1]
- %Opq.link.SV806.phi = phi i32 [ %Opq.sa.calc873, %bb6 ], [ %Opq.sa.calc958, %bb37 ] ; <i32> [#uses=1]
- %Opq.link.mask808 = and i32 %Opq.link.SV806.phi, 12 ; <i32> [#uses=1]
- %Opq.sa.calc805 = sub i32 %Opq.link.mask808, -147 ; <i32> [#uses=3]
- %meshCmp351 = icmp eq i32 %meshStackVariable349.phi, 13 ; <i1> [#uses=1]
- br i1 %meshCmp351, label %bb37.fragment, label %bb8
-
-meshBB352: ; preds = %bb79, %bb71
- %.SV38.phi1140 = phi i64 [ %.SV38.phi1110, %bb71 ], [ %.SV38.phi1098, %bb79 ] ; <i64> [#uses=2]
- %.SV68.phi1139 = phi i32 [ %.SV68.phi1108, %bb71 ], [ %.SV68.phi1096, %bb79 ] ; <i32> [#uses=2]
- %.SV70.phi1138 = phi i32 [ %.SV70.phi1107, %bb71 ], [ %.SV70.phi1095, %bb79 ] ; <i32> [#uses=2]
- %.load166.SV.phi = phi i32 [ %.SV164.phi1104, %bb71 ], [ undef, %bb79 ] ; <i32> [#uses=1]
- %.load163.SV.phi = phi i32* [ %.SV162.phi1105, %bb71 ], [ undef, %bb79 ] ; <i32*> [#uses=1]
- %.SV282.phi = phi i32 [ %182, %bb71 ], [ undef, %bb79 ] ; <i32> [#uses=1]
- %meshStackVariable353.phi = phi i32 [ %Opq.sa.calc633, %bb71 ], [ %Opq.sa.calc650, %bb79 ] ; <i32> [#uses=1]
- %Opq.link.SV877.phi = phi i32 [ %Opq.sa.calc820, %bb71 ], [ %Opq.sa.calc650, %bb79 ] ; <i32> [#uses=1]
- %Opq.link.mask879 = and i32 %Opq.link.SV877.phi, 1 ; <i32> [#uses=1]
- %Opq.sa.calc876 = add i32 %Opq.link.mask879, 18 ; <i32> [#uses=1]
- %meshCmp355 = icmp eq i32 %meshStackVariable353.phi, 11 ; <i1> [#uses=1]
- br i1 %meshCmp355, label %bb97, label %bb71.fragment
-
-meshBB356: ; preds = %bb70.fragment, %bb26.fragment
- %.SV104.phi1160 = phi i32 [ undef, %bb70.fragment ], [ %.SV104.phi1080, %bb26.fragment ] ; <i32> [#uses=1]
- %.SV111.phi1159 = phi i32* [ undef, %bb70.fragment ], [ %.SV111.phi1079, %bb26.fragment ] ; <i32*> [#uses=1]
- %.SV118.phi1158 = phi i32 [ undef, %bb70.fragment ], [ %.SV118.phi1078, %bb26.fragment ] ; <i32> [#uses=3]
- %.SV38.phi1121 = phi i64 [ %.SV38.phi1014, %bb70.fragment ], [ %.SV38.phi1167, %bb26.fragment ] ; <i64> [#uses=3]
- %.SV68.phi1120 = phi i32 [ %.SV68.phi1020, %bb70.fragment ], [ %.SV68.phi1166, %bb26.fragment ] ; <i32> [#uses=3]
- %.SV70.phi1119 = phi i32 [ %.SV70.phi1026, %bb70.fragment ], [ %.SV70.phi1165, %bb26.fragment ] ; <i32> [#uses=3]
- %.SV.phi1046 = phi i32 [ %.load165.SV.phi, %bb70.fragment ], [ %.load123.SV.phi, %bb26.fragment ] ; <i32> [#uses=1]
- %meshStackVariable357.phi = phi i32 [ %Opq.sa.calc738, %bb70.fragment ], [ %Opq.sa.calc917, %bb26.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV984.phi = phi i32 [ %Opq.sa.calc738, %bb70.fragment ], [ %Opq.sa.calc918, %bb26.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask986 = and i32 %Opq.link.SV984.phi, 9 ; <i32> [#uses=1]
- %Opq.sa.calc983 = xor i32 %Opq.link.mask986, 251 ; <i32> [#uses=1]
- %meshCmp359 = icmp eq i32 %meshStackVariable357.phi, 9 ; <i1> [#uses=1]
- br i1 %meshCmp359, label %bb28, label %bb96
-
-meshBB360: ; preds = %bb21, %bb13
- %.SV38.phi1115 = phi i64 [ %4, %bb21 ], [ %.SV38.phi1014, %bb13 ] ; <i64> [#uses=5]
- %.SV52.phi1113 = phi i32* [ %.SV52.phi1022, %bb21 ], [ %.SV52.phi, %bb13 ] ; <i32*> [#uses=3]
- %.SV68.phi1112 = phi i32 [ %.SV68.phi1021, %bb21 ], [ %.SV68.phi1020, %bb13 ] ; <i32> [#uses=5]
- %.SV70.phi1111 = phi i32 [ %.SV70.phi1027, %bb21 ], [ %.SV70.phi1026, %bb13 ] ; <i32> [#uses=5]
- %.load74.SV.phi = phi i1 [ undef, %bb21 ], [ %21, %bb13 ] ; <i1> [#uses=1]
- %.SV208.phi = phi i32* [ undef, %bb21 ], [ %46, %bb13 ] ; <i32*> [#uses=2]
- %meshStackVariable361.phi = phi i32 [ %Opq.sa.calc505, %bb21 ], [ %Opq.sa.calc489, %bb13 ] ; <i32> [#uses=1]
- %Opq.link.SV867.phi = phi i32 [ %Opq.sa.calc505, %bb21 ], [ %Opq.sa.calc873, %bb13 ] ; <i32> [#uses=1]
- %Opq.link.mask869 = and i32 %Opq.link.SV867.phi, 1 ; <i32> [#uses=1]
- %Opq.sa.calc866 = add i32 %Opq.link.mask869, 148 ; <i32> [#uses=4]
- %meshCmp363 = icmp eq i32 %meshStackVariable361.phi, 16 ; <i1> [#uses=1]
- br i1 %meshCmp363, label %bb13.fragment, label %bb23
-
-meshBB364: ; preds = %bb65.fragment, %bb56
- %.SV38.phi1098 = phi i64 [ %.SV38.phi1017, %bb56 ], [ %.SV38.phi1147, %bb65.fragment ] ; <i64> [#uses=11]
- %.SV52.phi1097 = phi i32* [ %.SV52.phi1024, %bb56 ], [ undef, %bb65.fragment ] ; <i32*> [#uses=8]
- %.SV68.phi1096 = phi i32 [ %.SV68.phi1023, %bb56 ], [ %.SV68.phi1146, %bb65.fragment ] ; <i32> [#uses=11]
- %.SV70.phi1095 = phi i32 [ %.SV70.phi1028, %bb56 ], [ %.SV70.phi1145, %bb65.fragment ] ; <i32> [#uses=11]
- %or.cond.not.SV.phi1094 = phi i1 [ %or.cond.not.SV.phi1029, %bb56 ], [ undef, %bb65.fragment ] ; <i1> [#uses=1]
- %.SV.phi1062 = phi i32 [ undef, %bb56 ], [ %.SV268.phi, %bb65.fragment ] ; <i32> [#uses=1]
- %.not4.SV.phi = phi i1 [ %.not4, %bb56 ], [ undef, %bb65.fragment ] ; <i1> [#uses=1]
- %.SV256.phi = phi i1 [ %139, %bb56 ], [ undef, %bb65.fragment ] ; <i1> [#uses=1]
- %meshStackVariable365.phi = phi i32 [ %Opq.sa.calc592, %bb56 ], [ %Opq.sa.calc832, %bb65.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV735.phi = phi i32 [ %Opq.sa.calc592, %bb56 ], [ %Opq.sa.calc832, %bb65.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask737 = and i32 %Opq.link.SV735.phi, 0 ; <i32> [#uses=2]
- %Opq.sa.calc734 = sub i32 %Opq.link.mask737, -242 ; <i32> [#uses=0]
- %meshCmp367 = icmp eq i32 %meshStackVariable365.phi, 1 ; <i1> [#uses=1]
- br i1 %meshCmp367, label %bb96, label %bb56.fragment
-
-meshBB368: ; preds = %bb90.fragment, %bb8
- %.SV38.phi1164 = phi i64 [ %.SV38.phi1103, %bb8 ], [ %.SV38.phi1191, %bb90.fragment ] ; <i64> [#uses=5]
- %.SV43.phi1163 = phi i32 [ %.SV43.phi1102, %bb8 ], [ undef, %bb90.fragment ] ; <i32> [#uses=1]
- %.SV68.phi1162 = phi i32 [ %.SV68.phi1100, %bb8 ], [ %.SV68.phi1189, %bb90.fragment ] ; <i32> [#uses=5]
- %.SV70.phi1161 = phi i32 [ %.SV70.phi1099, %bb8 ], [ %.SV70.phi1188, %bb90.fragment ] ; <i32> [#uses=5]
- %.SV178.phi = phi i32 [ undef, %bb8 ], [ %214, %bb90.fragment ] ; <i32> [#uses=2]
- %.SV176.phi = phi i32* [ undef, %bb8 ], [ %212, %bb90.fragment ] ; <i32*> [#uses=1]
- %.SV170.phi = phi i32* [ undef, %bb8 ], [ %210, %bb90.fragment ] ; <i32*> [#uses=1]
- %.SV172.phi = phi i32 [ undef, %bb8 ], [ %211, %bb90.fragment ] ; <i32> [#uses=1]
- %.SV76.phi = phi i32* [ %28, %bb8 ], [ undef, %bb90.fragment ] ; <i32*> [#uses=1]
- %.SV78.phi = phi i32 [ %29, %bb8 ], [ undef, %bb90.fragment ] ; <i32> [#uses=1]
- %.SV80.phi = phi i32* [ %30, %bb8 ], [ undef, %bb90.fragment ] ; <i32*> [#uses=1]
- %.load66.SV.phi = phi i32* [ %.SV52.phi1101, %bb8 ], [ undef, %bb90.fragment ] ; <i32*> [#uses=1]
- %.load35.SV.phi = phi i64 [ %3, %bb8 ], [ undef, %bb90.fragment ] ; <i64> [#uses=1]
- %.load16.SV.phi = phi %struct.Macroblock* [ %2, %bb8 ], [ undef, %bb90.fragment ] ; <%struct.Macroblock*> [#uses=1]
- %.SV198.phi = phi i32 [ %29, %bb8 ], [ undef, %bb90.fragment ] ; <i32> [#uses=1]
- %.SV200.phi = phi i32* [ %30, %bb8 ], [ undef, %bb90.fragment ] ; <i32*> [#uses=1]
- %meshStackVariable369.phi = phi i32 [ %Opq.sa.calc479, %bb8 ], [ %Opq.sa.calc772, %bb90.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV769.phi = phi i32 [ %Opq.sa.calc805, %bb8 ], [ %Opq.sa.calc772, %bb90.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask771 = and i32 %Opq.link.SV769.phi, 2 ; <i32> [#uses=1]
- %Opq.sa.calc768 = xor i32 %Opq.link.mask771, 135 ; <i32> [#uses=3]
- %meshCmp371 = icmp eq i32 %meshStackVariable369.phi, 2 ; <i1> [#uses=1]
- br i1 %meshCmp371, label %bb91, label %bb8.fragment
-
-meshBB372: ; preds = %bb84.fragment, %bb35
- %.SV38.phi1214 = phi i64 [ %.SV38.phi1191, %bb84.fragment ], [ %.SV38.phi1183, %bb35 ] ; <i64> [#uses=3]
- %.SV52.phi1213 = phi i32* [ %.SV52.phi1190, %bb84.fragment ], [ undef, %bb35 ] ; <i32*> [#uses=2]
- %.SV68.phi1212 = phi i32 [ %.SV68.phi1189, %bb84.fragment ], [ %.SV68.phi1181, %bb35 ] ; <i32> [#uses=3]
- %.SV70.phi1211 = phi i32 [ %.SV70.phi1188, %bb84.fragment ], [ %.SV70.phi1180, %bb35 ] ; <i32> [#uses=3]
- %.SV118.phi1154 = phi i32 [ undef, %bb84.fragment ], [ %.SV118.phi1082, %bb35 ] ; <i32> [#uses=1]
- %.SV167.phi = phi i1 [ %203, %bb84.fragment ], [ undef, %bb35 ] ; <i1> [#uses=1]
- %meshStackVariable373.phi = phi i32 [ %Opq.sa.calc802, %bb84.fragment ], [ %Opq.sa.calc540, %bb35 ] ; <i32> [#uses=1]
- %Opq.link.SV813.phi = phi i32 [ %Opq.sa.calc802, %bb84.fragment ], [ %Opq.sa.calc541, %bb35 ] ; <i32> [#uses=1]
- %Opq.link.mask815 = and i32 %Opq.link.SV813.phi, 0 ; <i32> [#uses=1]
- %Opq.sa.calc812 = sub i32 %Opq.link.mask815, -121 ; <i32> [#uses=3]
- %meshCmp375 = icmp eq i32 %meshStackVariable373.phi, 6 ; <i1> [#uses=1]
- br i1 %meshCmp375, label %bb36, label %bb85
-
-meshBB376: ; preds = %bb98, %bb44.fragment
- %.SV38.phi1153 = phi i64 [ %.SV38.phi1093, %bb98 ], [ %.SV38.phi1204, %bb44.fragment ] ; <i64> [#uses=1]
- %.SV68.phi1152 = phi i32 [ %.SV68.phi1092, %bb98 ], [ %.SV68.phi1203, %bb44.fragment ] ; <i32> [#uses=1]
- %.SV70.phi1151 = phi i32 [ %.SV70.phi1091, %bb98 ], [ %.SV70.phi1202, %bb44.fragment ] ; <i32> [#uses=1]
- %.load39.SV.phi = phi i64 [ %.SV38.phi1093, %bb98 ], [ undef, %bb44.fragment ] ; <i64> [#uses=2]
- %.SV313.phi = phi i32* [ %237, %bb98 ], [ undef, %bb44.fragment ] ; <i32*> [#uses=1]
- %.SV315.phi = phi i32* [ %239, %bb98 ], [ undef, %bb44.fragment ] ; <i32*> [#uses=1]
- %.SV317.phi = phi i32* [ %240, %bb98 ], [ undef, %bb44.fragment ] ; <i32*> [#uses=3]
- %.SV.phi1050 = phi i32 [ undef, %bb98 ], [ %.load127.SV.phi, %bb44.fragment ] ; <i32> [#uses=1]
- %yM.0.SV.phi1049 = phi i32 [ undef, %bb98 ], [ %121, %bb44.fragment ] ; <i32> [#uses=1]
- %meshStackVariable377.phi = phi i32 [ %Opq.sa.calc695, %bb98 ], [ %Opq.sa.calc894, %bb44.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV909.phi = phi i32 [ %Opq.sa.calc695, %bb98 ], [ %Opq.sa.calc856, %bb44.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask911 = and i32 %Opq.link.SV909.phi, 16 ; <i32> [#uses=2]
- %Opq.sa.calc908 = add i32 %Opq.link.mask911, -11 ; <i32> [#uses=0]
- %meshCmp379 = icmp eq i32 %meshStackVariable377.phi, 8 ; <i1> [#uses=1]
- br i1 %meshCmp379, label %bb96, label %bb98.fragment
-
-meshBB380: ; preds = %bb92.fragment, %bb49.fragment
- %.SV38.phi1207 = phi i64 [ %.SV38.phi1164, %bb92.fragment ], [ %.SV38.phi1179, %bb49.fragment ] ; <i64> [#uses=2]
- %.SV68.phi1206 = phi i32 [ %.SV68.phi1162, %bb92.fragment ], [ %.SV68.phi1177, %bb49.fragment ] ; <i32> [#uses=2]
- %.SV70.phi1205 = phi i32 [ %.SV70.phi1161, %bb92.fragment ], [ %.SV70.phi1176, %bb49.fragment ] ; <i32> [#uses=2]
- %.SV104.phi1124 = phi i32 [ undef, %bb92.fragment ], [ %.SV104.phi1036, %bb49.fragment ] ; <i32> [#uses=1]
- %.SV111.phi1123 = phi i32* [ undef, %bb92.fragment ], [ %.SV111.phi1035, %bb49.fragment ] ; <i32*> [#uses=1]
- %.SV118.phi1122 = phi i32 [ undef, %bb92.fragment ], [ %.SV118.phi1040, %bb49.fragment ] ; <i32> [#uses=1]
- %meshStackVariable381.phi = phi i32 [ %Opq.sa.calc1005, %bb92.fragment ], [ %Opq.sa.calc860, %bb49.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV947.phi = phi i32 [ %Opq.sa.calc1005, %bb92.fragment ], [ %Opq.sa.calc860, %bb49.fragment ] ; <i32> [#uses=1]
- %.SV.phi1052 = phi i32 [ %.SV178.phi, %bb92.fragment ], [ undef, %bb49.fragment ] ; <i32> [#uses=1]
- %yM.0.SV.phi1051 = phi i32 [ %226, %bb92.fragment ], [ undef, %bb49.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask949 = and i32 %Opq.link.SV947.phi, 1 ; <i32> [#uses=1]
- %Opq.sa.calc946 = sub i32 %Opq.link.mask949, -4 ; <i32> [#uses=1]
- %meshCmp383 = icmp eq i32 %meshStackVariable381.phi, 1 ; <i1> [#uses=1]
- br i1 %meshCmp383, label %bb54, label %bb96
-
-meshBB384: ; preds = %bb95, %bb52
- %.SV38.phi1221 = phi i64 [ %.SV38.phi1179, %bb52 ], [ %.SV38.phi1218, %bb95 ] ; <i64> [#uses=2]
- %.SV68.phi1220 = phi i32 [ %.SV68.phi1177, %bb52 ], [ %.SV68.phi1216, %bb95 ] ; <i32> [#uses=2]
- %.SV70.phi1219 = phi i32 [ %.SV70.phi1176, %bb52 ], [ %.SV70.phi1215, %bb95 ] ; <i32> [#uses=2]
- %.load53.SV.phi = phi i32* [ undef, %bb52 ], [ %.SV52.phi1217, %bb95 ] ; <i32*> [#uses=1]
- %.load20.SV.phi = phi i64 [ undef, %bb52 ], [ %3, %bb95 ] ; <i64> [#uses=1]
- %.load.SV.phi = phi %struct.Macroblock* [ undef, %bb52 ], [ %2, %bb95 ] ; <%struct.Macroblock*> [#uses=1]
- %.SV306.phi = phi i32 [ undef, %bb52 ], [ %227, %bb95 ] ; <i32> [#uses=1]
- %.SV308.phi = phi i32* [ undef, %bb52 ], [ %228, %bb95 ] ; <i32*> [#uses=1]
- %.load126.SV.phi = phi i32 [ %.SV118.phi1040, %bb52 ], [ undef, %bb95 ] ; <i32> [#uses=1]
- %.load44.SV.phi = phi i32 [ %.SV43.phi1178, %bb52 ], [ undef, %bb95 ] ; <i32> [#uses=1]
- %meshStackVariable385.phi = phi i32 [ %Opq.sa.calc583, %bb52 ], [ %Opq.sa.calc689, %bb95 ] ; <i32> [#uses=1]
- %Opq.link.SV902.phi = phi i32 [ %Opq.sa.calc860, %bb52 ], [ %Opq.sa.calc689, %bb95 ] ; <i32> [#uses=1]
- %Opq.link.SV905.phi = phi i32 [ %Opq.sa.calc584, %bb52 ], [ %Opq.sa.calc689, %bb95 ] ; <i32> [#uses=1]
- %Opq.link.mask907 = and i32 %Opq.link.SV905.phi, 0 ; <i32> [#uses=0]
- %Opq.link.mask904 = and i32 %Opq.link.SV902.phi, 1 ; <i32> [#uses=1]
- %Opq.sa.calc901 = xor i32 %Opq.link.mask904, 227 ; <i32> [#uses=3]
- %meshCmp387 = icmp eq i32 %meshStackVariable385.phi, 5 ; <i1> [#uses=1]
- br i1 %meshCmp387, label %bb95.fragment, label %bb52.fragment
-
-meshBB388: ; preds = %bb52.fragment, %bb7
- %.SV38.phi1118 = phi i64 [ %.SV38.phi1014, %bb7 ], [ %.SV38.phi1221, %bb52.fragment ] ; <i64> [#uses=2]
- %.SV68.phi1117 = phi i32 [ %.SV68.phi1020, %bb7 ], [ %.SV68.phi1220, %bb52.fragment ] ; <i32> [#uses=2]
- %.SV70.phi1116 = phi i32 [ %.SV70.phi1026, %bb7 ], [ %.SV70.phi1219, %bb52.fragment ] ; <i32> [#uses=2]
- %.SV.phi1054 = phi i32 [ undef, %bb7 ], [ %.load126.SV.phi, %bb52.fragment ] ; <i32> [#uses=1]
- %yM.0.SV.phi1053 = phi i32 [ undef, %bb7 ], [ %137, %bb52.fragment ] ; <i32> [#uses=1]
- %.load67.SV.phi = phi i32* [ %.SV52.phi, %bb7 ], [ undef, %bb52.fragment ] ; <i32*> [#uses=1]
- %.load36.SV.phi = phi i64 [ %3, %bb7 ], [ undef, %bb52.fragment ] ; <i64> [#uses=1]
- %.load17.SV.phi = phi %struct.Macroblock* [ %2, %bb7 ], [ undef, %bb52.fragment ] ; <%struct.Macroblock*> [#uses=1]
- %.SV194.phi = phi i32 [ %24, %bb7 ], [ undef, %bb52.fragment ] ; <i32> [#uses=1]
- %.SV196.phi = phi i32* [ %25, %bb7 ], [ undef, %bb52.fragment ] ; <i32*> [#uses=1]
- %meshStackVariable389.phi = phi i32 [ %Opq.sa.calc476, %bb7 ], [ %Opq.sa.calc844, %bb52.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV887.phi = phi i32 [ %Opq.sa.calc873, %bb7 ], [ %Opq.sa.calc901, %bb52.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask889 = and i32 %Opq.link.SV887.phi, 64 ; <i32> [#uses=1]
- %Opq.sa.calc886 = sub i32 %Opq.link.mask889, -170 ; <i32> [#uses=2]
- %meshCmp391 = icmp eq i32 %meshStackVariable389.phi, 12 ; <i1> [#uses=1]
- br i1 %meshCmp391, label %bb96, label %bb7.fragment
-
-meshBB392: ; preds = %bb4, %entry
- %meshStackVariable393.phi = phi i32 [ %Opq.sa.calc466, %bb4 ], [ %Opq.sa.calc, %entry ] ; <i32> [#uses=1]
- %Opq.link.SV922.phi = phi i32 [ %Opq.sa.calc462, %bb4 ], [ %Opq.sa.calc, %entry ] ; <i32> [#uses=1]
- %or.cond.not.SV.phi = phi i1 [ %or.cond.not, %bb4 ], [ undef, %entry ] ; <i1> [#uses=1]
- %.SV70.phi1027 = phi i32 [ %12, %bb4 ], [ undef, %entry ] ; <i32> [#uses=2]
- %.SV52.phi1022 = phi i32* [ %9, %bb4 ], [ undef, %entry ] ; <i32*> [#uses=1]
- %.SV68.phi1021 = phi i32 [ %10, %bb4 ], [ undef, %entry ] ; <i32> [#uses=2]
- %.SV43.phi1015 = phi i32 [ %8, %bb4 ], [ undef, %entry ] ; <i32> [#uses=3]
- %Opq.link.mask924 = and i32 %Opq.link.SV922.phi, 2 ; <i32> [#uses=1]
- %Opq.sa.calc921 = add i32 %Opq.link.mask924, 57 ; <i32> [#uses=3]
- %meshCmp395 = icmp eq i32 %meshStackVariable393.phi, 2 ; <i1> [#uses=1]
- br i1 %meshCmp395, label %entry.fragment, label %bb21
-
-meshBB396: ; preds = %bb69.fragment, %bb.fragment
- %.SV.phi1065 = phi i32 [ undef, %bb.fragment ], [ %171, %bb69.fragment ] ; <i32> [#uses=1]
- %meshStackVariable397.phi = phi i32 [ %Opq.sa.calc976, %bb.fragment ], [ %Opq.sa.calc995, %bb69.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV759.phi = phi i32 [ %Opq.sa.calc976, %bb.fragment ], [ %Opq.sa.calc995, %bb69.fragment ] ; <i32> [#uses=1]
- %.SV70.phi = phi i32 [ %12, %bb.fragment ], [ %.SV70.phi1168, %bb69.fragment ] ; <i32> [#uses=1]
- %.SV68.phi = phi i32 [ %10, %bb.fragment ], [ %.SV68.phi1169, %bb69.fragment ] ; <i32> [#uses=1]
- %.SV38.phi = phi i64 [ %4, %bb.fragment ], [ %.SV38.phi1172, %bb69.fragment ] ; <i64> [#uses=1]
- %Opq.link.mask761 = and i32 %Opq.link.SV759.phi, 6 ; <i32> [#uses=1]
- %Opq.sa.calc758 = add i32 %Opq.link.mask761, 53 ; <i32> [#uses=1]
- %meshCmp399 = icmp eq i32 %meshStackVariable397.phi, 6 ; <i1> [#uses=1]
- br i1 %meshCmp399, label %bb96, label %return
-
-meshBB400: ; preds = %bb84, %bb69.fragment
- %.SV38.phi1191 = phi i64 [ %.SV38.phi1098, %bb84 ], [ %.SV38.phi1172, %bb69.fragment ] ; <i64> [#uses=5]
- %.SV52.phi1190 = phi i32* [ %.SV52.phi1097, %bb84 ], [ undef, %bb69.fragment ] ; <i32*> [#uses=3]
- %.SV68.phi1189 = phi i32 [ %.SV68.phi1096, %bb84 ], [ %.SV68.phi1169, %bb69.fragment ] ; <i32> [#uses=5]
- %.SV70.phi1188 = phi i32 [ %.SV70.phi1095, %bb84 ], [ %.SV70.phi1168, %bb69.fragment ] ; <i32> [#uses=5]
- %.SV290.phi = phi i32 [ %200, %bb84 ], [ undef, %bb69.fragment ] ; <i32> [#uses=1]
- %.SV164.phi = phi i32 [ undef, %bb84 ], [ %171, %bb69.fragment ] ; <i32> [#uses=2]
- %meshStackVariable401.phi = phi i32 [ %Opq.sa.calc661, %bb84 ], [ %Opq.sa.calc996, %bb69.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV825.phi = phi i32 [ %Opq.sa.calc658, %bb84 ], [ %Opq.sa.calc996, %bb69.fragment ] ; <i32> [#uses=1]
- %.SV162.phi = phi i32* [ undef, %bb84 ], [ %169, %bb69.fragment ] ; <i32*> [#uses=1]
- %.SV156.phi = phi i32* [ undef, %bb84 ], [ %.SV274.phi, %bb69.fragment ] ; <i32*> [#uses=1]
- %.SV158.phi = phi i32 [ undef, %bb84 ], [ %168, %bb69.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask827 = and i32 %Opq.link.SV825.phi, 4 ; <i32> [#uses=1]
- %Opq.sa.calc824 = xor i32 %Opq.link.mask827, 228 ; <i32> [#uses=2]
- %meshCmp403 = icmp eq i32 %meshStackVariable401.phi, 15 ; <i1> [#uses=1]
- br i1 %meshCmp403, label %bb70, label %bb84.fragment
-
-meshBB404: ; preds = %bb96, %bb3
- %yM.0.reg2mem.1.SV.phi1077 = phi i32 [ %yM.0.SV.phi, %bb96 ], [ undef, %bb3 ] ; <i32> [#uses=1]
- %meshStackVariable405.phi = phi i32 [ %Opq.sa.calc692, %bb96 ], [ %Opq.sa.calc461, %bb3 ] ; <i32> [#uses=1]
- %Opq.link.SV940.phi = phi i32 [ %Opq.sa.calc693, %bb96 ], [ %Opq.sa.calc461, %bb3 ] ; <i32> [#uses=1]
- %or.cond.not.SV.phi1029 = phi i1 [ undef, %bb96 ], [ %or.cond.not, %bb3 ] ; <i1> [#uses=1]
- %.SV70.phi1028 = phi i32 [ %.SV70.phi1085, %bb96 ], [ %12, %bb3 ] ; <i32> [#uses=2]
- %.SV52.phi1024 = phi i32* [ undef, %bb96 ], [ %9, %bb3 ] ; <i32*> [#uses=1]
- %.SV68.phi1023 = phi i32 [ %.SV68.phi1086, %bb96 ], [ %10, %bb3 ] ; <i32> [#uses=2]
- %.SV38.phi1017 = phi i64 [ %.SV38.phi1087, %bb96 ], [ %4, %bb3 ] ; <i64> [#uses=2]
- %.SV40.phi = phi i32 [ undef, %bb96 ], [ %6, %bb3 ] ; <i32> [#uses=1]
- %Opq.link.mask942 = and i32 %Opq.link.SV940.phi, 6 ; <i32> [#uses=1]
- %Opq.sa.calc939 = sub i32 %Opq.link.mask942, -87 ; <i32> [#uses=1]
- %meshCmp407 = icmp eq i32 %meshStackVariable405.phi, 6 ; <i1> [#uses=1]
- br i1 %meshCmp407, label %bb56, label %bb98
-
-meshBB408: ; preds = %bb89.fragment, %bb87
- %.SV38.phi1218 = phi i64 [ %.SV38.phi1191, %bb89.fragment ], [ %.SV38.phi1210, %bb87 ] ; <i64> [#uses=2]
- %.SV52.phi1217 = phi i32* [ %.SV52.phi1190, %bb89.fragment ], [ %.SV52.phi1235, %bb87 ] ; <i32*> [#uses=1]
- %.SV68.phi1216 = phi i32 [ %.SV68.phi1189, %bb89.fragment ], [ %.SV68.phi1209, %bb87 ] ; <i32> [#uses=2]
- %.SV70.phi1215 = phi i32 [ %.SV70.phi1188, %bb89.fragment ], [ %.SV70.phi1208, %bb87 ] ; <i32> [#uses=2]
- %.SV172.phi1074 = phi i32 [ %211, %bb89.fragment ], [ undef, %bb87 ] ; <i32> [#uses=1]
- %meshStackVariable409.phi = phi i32 [ %Opq.sa.calc962, %bb89.fragment ], [ %Opq.sa.calc673, %bb87 ] ; <i32> [#uses=1]
- %Opq.link.SV913.phi = phi i32 [ %Opq.sa.calc962, %bb89.fragment ], [ %Opq.sa.calc990, %bb87 ] ; <i32> [#uses=1]
- %Opq.link.mask915 = and i32 %Opq.link.SV913.phi, 9 ; <i32> [#uses=1]
- %Opq.sa.calc912 = xor i32 %Opq.link.mask915, 195 ; <i32> [#uses=1]
- %meshCmp411 = icmp eq i32 %meshStackVariable409.phi, 1 ; <i1> [#uses=1]
- br i1 %meshCmp411, label %bb97, label %bb95
-
-meshBB412: ; preds = %bb68.fragment, %bb13.fragment
- %.SV38.phi1187 = phi i64 [ %.SV38.phi1115, %bb13.fragment ], [ %.SV38.phi1172, %bb68.fragment ] ; <i64> [#uses=2]
- %.SV52.phi1186 = phi i32* [ %.SV52.phi1113, %bb13.fragment ], [ %.SV52.phi1170, %bb68.fragment ] ; <i32*> [#uses=2]
- %.SV68.phi1185 = phi i32 [ %.SV68.phi1112, %bb13.fragment ], [ %.SV68.phi1169, %bb68.fragment ] ; <i32> [#uses=2]
- %.SV70.phi1184 = phi i32 [ %.SV70.phi1111, %bb13.fragment ], [ %.SV70.phi1168, %bb68.fragment ] ; <i32> [#uses=2]
- %.SV158.phi1063 = phi i32 [ undef, %bb13.fragment ], [ %168, %bb68.fragment ] ; <i32> [#uses=1]
- %.SV87.phi1030 = phi i32 [ %47, %bb13.fragment ], [ undef, %bb68.fragment ] ; <i32> [#uses=1]
- %meshStackVariable413.phi = phi i32 [ %Opq.sa.calc870, %bb13.fragment ], [ %Opq.sa.calc784, %bb68.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV933.phi = phi i32 [ %Opq.sa.calc870, %bb13.fragment ], [ %Opq.link.mask722, %bb68.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV936.phi = phi i32 [ %Opq.sa.calc866, %bb13.fragment ], [ %Opq.sa.calc784, %bb68.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask938 = and i32 %Opq.link.SV936.phi, 4 ; <i32> [#uses=1]
- %Opq.link.mask935 = and i32 %Opq.link.SV933.phi, 0 ; <i32> [#uses=1]
- %Opq.sa.calc931 = sub i32 %Opq.link.mask935, %Opq.link.mask938 ; <i32> [#uses=1]
- %Opq.sa.calc932 = xor i32 %Opq.sa.calc931, -51 ; <i32> [#uses=3]
- %meshCmp415 = icmp eq i32 %meshStackVariable413.phi, 6 ; <i1> [#uses=1]
- br i1 %meshCmp415, label %bb74, label %bb19
-
-meshBB416: ; preds = %bb90.fragment, %bb77
- %.SV38.phi1201 = phi i64 [ %.SV38.phi1191, %bb90.fragment ], [ %.SV38.phi1098, %bb77 ] ; <i64> [#uses=2]
- %.SV52.phi1200 = phi i32* [ undef, %bb90.fragment ], [ %.SV52.phi1097, %bb77 ] ; <i32*> [#uses=1]
- %.SV68.phi1199 = phi i32 [ %.SV68.phi1189, %bb90.fragment ], [ %.SV68.phi1096, %bb77 ] ; <i32> [#uses=2]
- %.SV70.phi1198 = phi i32 [ %.SV70.phi1188, %bb90.fragment ], [ %.SV70.phi1095, %bb77 ] ; <i32> [#uses=2]
- %.SV.phi1076 = phi i32 [ %214, %bb90.fragment ], [ undef, %bb77 ] ; <i32> [#uses=1]
- %meshStackVariable417.phi = phi i32 [ %Opq.sa.calc773, %bb90.fragment ], [ %Opq.sa.calc643, %bb77 ] ; <i32> [#uses=1]
- %Opq.link.SV973.phi = phi i32 [ %Opq.sa.calc773, %bb90.fragment ], [ %Opq.sa.calc640, %bb77 ] ; <i32> [#uses=1]
- %Opq.link.mask975 = and i32 %Opq.link.SV973.phi, 10 ; <i32> [#uses=1]
- %Opq.sa.calc972 = xor i32 %Opq.link.mask975, 110 ; <i32> [#uses=1]
- %Opq.sa.calc971 = add i32 %Opq.sa.calc972, -19 ; <i32> [#uses=1]
- %meshCmp419 = icmp eq i32 %meshStackVariable417.phi, 12 ; <i1> [#uses=1]
- br i1 %meshCmp419, label %bb78, label %bb96
-
-meshBB420: ; preds = %bb66, %bb26.fragment
- %.SV38.phi1194 = phi i64 [ %.SV38.phi1098, %bb66 ], [ %.SV38.phi1167, %bb26.fragment ] ; <i64> [#uses=2]
- %.SV68.phi1193 = phi i32 [ %.SV68.phi1096, %bb66 ], [ %.SV68.phi1166, %bb26.fragment ] ; <i32> [#uses=2]
- %.SV70.phi1192 = phi i32 [ %.SV70.phi1095, %bb66 ], [ %.SV70.phi1165, %bb26.fragment ] ; <i32> [#uses=2]
- %.load61.SV.phi = phi i32* [ %.SV52.phi1097, %bb66 ], [ undef, %bb26.fragment ] ; <i32*> [#uses=1]
- %.SV270.phi = phi i32 [ %165, %bb66 ], [ undef, %bb26.fragment ] ; <i32> [#uses=1]
- %.SV272.phi = phi i32* [ %166, %bb66 ], [ undef, %bb26.fragment ] ; <i32*> [#uses=1]
- %.SV.phi1044 = phi i32 [ undef, %bb66 ], [ %.load123.SV.phi, %bb26.fragment ] ; <i32> [#uses=1]
- %meshStackVariable421.phi = phi i32 [ %Opq.sa.calc621, %bb66 ], [ %Opq.sa.calc918, %bb26.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV838.phi = phi i32 [ %Opq.sa.calc602, %bb66 ], [ %Opq.sa.calc918, %bb26.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask840 = and i32 %Opq.link.SV838.phi, 9 ; <i32> [#uses=2]
- %Opq.sa.calc837 = sub i32 %Opq.link.mask840, -202 ; <i32> [#uses=2]
- %Opq.sa.calc835 = sub i32 %Opq.sa.calc837, %Opq.link.mask840 ; <i32> [#uses=1]
- %Opq.sa.calc836 = xor i32 %Opq.sa.calc835, 176 ; <i32> [#uses=0]
- %meshCmp423 = icmp eq i32 %meshStackVariable421.phi, 9 ; <i1> [#uses=1]
- br i1 %meshCmp423, label %bb96, label %bb66.fragment
-
-meshBB424: ; preds = %bb86.fragment, %bb83
- %.SV38.phi1197 = phi i64 [ %.SV38.phi1231, %bb86.fragment ], [ %.SV38.phi1098, %bb83 ] ; <i64> [#uses=2]
- %.SV68.phi1196 = phi i32 [ %.SV68.phi1229, %bb86.fragment ], [ %.SV68.phi1096, %bb83 ] ; <i32> [#uses=2]
- %.SV70.phi1195 = phi i32 [ %.SV70.phi1228, %bb86.fragment ], [ %.SV70.phi1095, %bb83 ] ; <i32> [#uses=2]
- %.SV.phi1072 = phi i32 [ %209, %bb86.fragment ], [ undef, %bb83 ] ; <i32> [#uses=1]
- %meshStackVariable425.phi = phi i32 [ %Opq.sa.calc943, %bb86.fragment ], [ %Opq.sa.calc658, %bb83 ] ; <i32> [#uses=1]
- %Opq.link.SV951.phi = phi i32 [ %Opq.sa.calc943, %bb86.fragment ], [ %Opq.sa.calc1002, %bb83 ] ; <i32> [#uses=1]
- %Opq.link.mask953 = and i32 %Opq.link.SV951.phi, 12 ; <i32> [#uses=1]
- %Opq.sa.calc950 = sub i32 %Opq.link.mask953, -208 ; <i32> [#uses=0]
- %meshCmp427 = icmp eq i32 %meshStackVariable425.phi, 4 ; <i1> [#uses=1]
- br i1 %meshCmp427, label %bb97, label %bb96
-
-meshBB428: ; preds = %bb70, %bb4
- %.SV158.phi1090 = phi i32 [ %.SV158.phi, %bb70 ], [ undef, %bb4 ] ; <i32> [#uses=1]
- %.SV162.phi1089 = phi i32* [ %.SV162.phi, %bb70 ], [ undef, %bb4 ] ; <i32*> [#uses=1]
- %.SV164.phi1088 = phi i32 [ %.SV164.phi, %bb70 ], [ undef, %bb4 ] ; <i32> [#uses=1]
- %.load165.SV.phi = phi i32 [ %.SV164.phi, %bb70 ], [ undef, %bb4 ] ; <i32> [#uses=1]
- %.SV278.phi = phi %struct.Macroblock* [ %176, %bb70 ], [ undef, %bb4 ] ; <%struct.Macroblock*> [#uses=1]
- %.SV280.phi = phi i32 [ %177, %bb70 ], [ undef, %bb4 ] ; <i32> [#uses=1]
- %meshStackVariable429.phi = phi i32 [ %Opq.sa.calc630, %bb70 ], [ %Opq.sa.calc467, %bb4 ] ; <i32> [#uses=1]
- %Opq.link.SV898.phi = phi i32 [ %Opq.sa.calc630, %bb70 ], [ %Opq.sa.calc462, %bb4 ] ; <i32> [#uses=1]
- %.SV70.phi1026 = phi i32 [ %.SV70.phi1188, %bb70 ], [ %12, %bb4 ] ; <i32> [#uses=5]
- %.SV52.phi = phi i32* [ undef, %bb70 ], [ %9, %bb4 ] ; <i32*> [#uses=3]
- %.SV68.phi1020 = phi i32 [ %.SV68.phi1189, %bb70 ], [ %10, %bb4 ] ; <i32> [#uses=5]
- %.SV38.phi1014 = phi i64 [ %.SV38.phi1191, %bb70 ], [ %4, %bb4 ] ; <i64> [#uses=5]
- %.SV43.phi = phi i32 [ undef, %bb70 ], [ %8, %bb4 ] ; <i32> [#uses=1]
- %Opq.link.mask900 = and i32 %Opq.link.SV898.phi, 4 ; <i32> [#uses=1]
- %Opq.sa.calc897 = xor i32 %Opq.link.mask900, 193 ; <i32> [#uses=3]
- %meshCmp431 = icmp eq i32 %meshStackVariable429.phi, 5 ; <i1> [#uses=1]
- br i1 %meshCmp431, label %bb5, label %bb70.fragment
-
-meshBB432: ; preds = %bb42, %bb23.fragment182
- %.SV38.phi1179 = phi i64 [ %.SV38.phi1115, %bb23.fragment182 ], [ %.SV38.phi1231, %bb42 ] ; <i64> [#uses=7]
- %.SV43.phi1178 = phi i32 [ %.SV43.phi1015, %bb23.fragment182 ], [ %.SV43.phi1230, %bb42 ] ; <i32> [#uses=3]
- %.SV68.phi1177 = phi i32 [ %.SV68.phi1112, %bb23.fragment182 ], [ %.SV68.phi1229, %bb42 ] ; <i32> [#uses=7]
- %.SV70.phi1176 = phi i32 [ %.SV70.phi1111, %bb23.fragment182 ], [ %.SV70.phi1228, %bb42 ] ; <i32> [#uses=7]
- %.SV118.phi1040 = phi i32 [ %76, %bb23.fragment182 ], [ %.SV118.phi1125, %bb42 ] ; <i32> [#uses=7]
- %.SV135.phi1039 = phi i1 [ %78, %bb23.fragment182 ], [ undef, %bb42 ] ; <i1> [#uses=2]
- %meshStackVariable433.phi = phi i32 [ %Opq.sa.calc744, %bb23.fragment182 ], [ %Opq.sa.calc560, %bb42 ] ; <i32> [#uses=1]
- %Opq.link.SV799.phi = phi i32 [ %Opq.sa.calc744, %bb23.fragment182 ], [ %Opq.sa.calc987, %bb42 ] ; <i32> [#uses=1]
- %.SV96.phi1038 = phi i1 [ %71, %bb23.fragment182 ], [ undef, %bb42 ] ; <i1> [#uses=1]
- %.SV99.phi1037 = phi i32* [ %72, %bb23.fragment182 ], [ undef, %bb42 ] ; <i32*> [#uses=2]
- %.SV104.phi1036 = phi i32 [ %73, %bb23.fragment182 ], [ %.SV104.phi1127, %bb42 ] ; <i32> [#uses=3]
- %.SV111.phi1035 = phi i32* [ %74, %bb23.fragment182 ], [ %.SV111.phi1126, %bb42 ] ; <i32*> [#uses=3]
- %Opq.link.mask801 = and i32 %Opq.link.SV799.phi, 6 ; <i32> [#uses=1]
- %Opq.sa.calc798 = xor i32 %Opq.link.mask801, 3 ; <i32> [#uses=5]
- %meshCmp435 = icmp eq i32 %meshStackVariable433.phi, 1 ; <i1> [#uses=1]
- br i1 %meshCmp435, label %bb43, label %bb39
-
-meshBB436: ; preds = %bb71.fragment, %bb65
- %.SV38.phi1147 = phi i64 [ %.SV38.phi1144, %bb65 ], [ %.SV38.phi1140, %bb71.fragment ] ; <i64> [#uses=2]
- %.SV68.phi1146 = phi i32 [ %.SV68.phi1142, %bb65 ], [ %.SV68.phi1139, %bb71.fragment ] ; <i32> [#uses=2]
- %.SV70.phi1145 = phi i32 [ %.SV70.phi1141, %bb65 ], [ %.SV70.phi1138, %bb71.fragment ] ; <i32> [#uses=2]
- %.SV.phi1067 = phi i32 [ undef, %bb65 ], [ %.load166.SV.phi, %bb71.fragment ] ; <i32> [#uses=1]
- %yM.0.SV.phi1066 = phi i32 [ undef, %bb65 ], [ %183, %bb71.fragment ] ; <i32> [#uses=1]
- %.load62.SV.phi = phi i32* [ %.SV52.phi1143, %bb65 ], [ undef, %bb71.fragment ] ; <i32*> [#uses=1]
- %.SV268.phi = phi i32 [ %164, %bb65 ], [ undef, %bb71.fragment ] ; <i32> [#uses=2]
- %meshStackVariable437.phi = phi i32 [ %Opq.sa.calc617, %bb65 ], [ %Opq.sa.calc809, %bb71.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV704.phi = phi i32 [ %Opq.sa.calc617, %bb65 ], [ %Opq.sa.calc809, %bb71.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask706 = and i32 %Opq.link.SV704.phi, 0 ; <i32> [#uses=2]
- %Opq.sa.calc703 = add i32 %Opq.link.mask706, 216 ; <i32> [#uses=0]
- %meshCmp439 = icmp eq i32 %meshStackVariable437.phi, 2 ; <i1> [#uses=1]
- br i1 %meshCmp439, label %bb96, label %bb65.fragment
-
-meshBB440: ; preds = %bb85, %bb54.fragment
- %.SV52.phi1235 = phi i32* [ %.SV52.phi1213, %bb85 ], [ undef, %bb54.fragment ] ; <i32*> [#uses=2]
- %.SV38.phi1210 = phi i64 [ %.SV38.phi1214, %bb85 ], [ %.SV38.phi1207, %bb54.fragment ] ; <i64> [#uses=2]
- %.SV68.phi1209 = phi i32 [ %.SV68.phi1212, %bb85 ], [ %.SV68.phi1206, %bb54.fragment ] ; <i32> [#uses=2]
- %.SV70.phi1208 = phi i32 [ %.SV70.phi1211, %bb85 ], [ %.SV70.phi1205, %bb54.fragment ] ; <i32> [#uses=2]
- %.SV.phi1056 = phi i32 [ undef, %bb85 ], [ %.SV118.phi1122, %bb54.fragment ] ; <i32> [#uses=1]
- %meshStackVariable441.phi = phi i32 [ %Opq.sa.calc666, %bb85 ], [ %Opq.sa.calc883, %bb54.fragment ] ; <i32> [#uses=1]
- %Opq.link.SV991.phi = phi i32 [ %Opq.sa.calc665, %bb85 ], [ %Opq.sa.calc883, %bb54.fragment ] ; <i32> [#uses=1]
- %Opq.link.mask993 = and i32 %Opq.link.SV991.phi, 6 ; <i32> [#uses=1]
- %Opq.sa.calc990 = xor i32 %Opq.link.mask993, 139 ; <i32> [#uses=2]
- %meshCmp443 = icmp eq i32 %meshStackVariable441.phi, 6 ; <i1> [#uses=1]
- br i1 %meshCmp443, label %bb96, label %bb87
-
-meshBB444: ; preds = %bb66.fragment, %bb40
- %.SV38.phi1224 = phi i64 [ %.SV38.phi1194, %bb66.fragment ], [ %.SV38.phi1179, %bb40 ] ; <i64> [#uses=2]
- %.SV68.phi1223 = phi i32 [ %.SV68.phi1193, %bb66.fragment ], [ %.SV68.phi1177, %bb40 ] ; <i32> [#uses=2]
- %.SV70.phi1222 = phi i32 [ %.SV70.phi1192, %bb66.fragment ], [ %.SV70.phi1176, %bb40 ] ; <i32> [#uses=2]
- %.SV.phi1048 = phi i32 [ undef, %bb66.fragment ], [ %.SV118.phi1040, %bb40 ] ; <i32> [#uses=1]
- %meshStackVariable445.phi = phi i32 [ %Opq.sa.calc794, %bb66.fragment ], [ %Opq.sa.calc554, %bb40 ] ; <i32> [#uses=1]
- %Opq.link.SV781.phi = phi i32 [ %Opq.sa.calc795, %bb66.fragment ], [ %Opq.sa.calc554, %bb40 ] ; <i32> [#uses=1]
- %Opq.link.mask783 = and i32 %Opq.link.SV781.phi, 10 ; <i32> [#uses=1]
- %Opq.sa.calc780 = add i32 %Opq.link.mask783, 1 ; <i32> [#uses=0]
- %meshCmp447 = icmp eq i32 %meshStackVariable445.phi, 11 ; <i1> [#uses=1]
- br i1 %meshCmp447, label %bb96, label %bb98
-
-meshBB448: ; preds = %bb35, %entry.fragment181
- %.SV70.phi1233 = phi i32 [ undef, %entry.fragment181 ], [ %.SV70.phi1180, %bb35 ] ; <i32> [#uses=1]
- %.SV104.phi1157 = phi i32 [ undef, %entry.fragment181 ], [ %.SV104.phi1084, %bb35 ] ; <i32> [#uses=1]
- %.SV111.phi1156 = phi i32* [ undef, %entry.fragment181 ], [ %.SV111.phi1083, %bb35 ] ; <i32*> [#uses=1]
- %.SV118.phi1155 = phi i32 [ undef, %entry.fragment181 ], [ %.SV118.phi1082, %bb35 ] ; <i32> [#uses=1]
- %.SV68.phi1025 = phi i32 [ %10, %entry.fragment181 ], [ %.SV68.phi1181, %bb35 ] ; <i32> [#uses=1]
- %meshStackVariable449.phi = phi i32 [ %Opq.sa.calc863, %entry.fragment181 ], [ %Opq.sa.calc541, %bb35 ] ; <i32> [#uses=1]
- %Opq.link.SV959.phi = phi i32 [ %Opq.sa.calc863, %entry.fragment181 ], [ %Opq.sa.calc828, %bb35 ] ; <i32> [#uses=1]
- %.SV38.phi1019 = phi i64 [ %4, %entry.fragment181 ], [ %.SV38.phi1183, %bb35 ] ; <i64> [#uses=1]
- %.SV43.phi1018 = phi i32 [ %8, %entry.fragment181 ], [ %.SV43.phi1015, %bb35 ] ; <i32> [#uses=2]
- %Opq.link.mask961 = and i32 %Opq.link.SV959.phi, 1 ; <i32> [#uses=1]
- %Opq.sa.calc958 = xor i32 %Opq.link.mask961, 63 ; <i32> [#uses=3]
- %Opq.sa.calc957 = xor i32 %Opq.sa.calc958, 126 ; <i32> [#uses=1]
- %meshCmp451 = icmp eq i32 %meshStackVariable449.phi, 5 ; <i1> [#uses=1]
- br i1 %meshCmp451, label %bb37, label %return
-
-meshBB452: ; preds = %bb70.fragment, %bb63
- %.SV38.phi1110 = phi i64 [ %.SV38.phi1014, %bb70.fragment ], [ %.SV38.phi1098, %bb63 ] ; <i64> [#uses=3]
- %.SV52.phi1109 = phi i32* [ undef, %bb70.fragment ], [ %.SV52.phi1097, %bb63 ] ; <i32*> [#uses=2]
- %.SV68.phi1108 = phi i32 [ %.SV68.phi1020, %bb70.fragment ], [ %.SV68.phi1096, %bb63 ] ; <i32> [#uses=3]
- %.SV70.phi1107 = phi i32 [ %.SV70.phi1026, %bb70.fragment ], [ %.SV70.phi1095, %bb63 ] ; <i32> [#uses=3]
- %.SV158.phi1106 = phi i32 [ %.SV158.phi1090, %bb70.fragment ], [ undef, %bb63 ] ; <i32> [#uses=1]
- %.SV162.phi1105 = phi i32* [ %.SV162.phi1089, %bb70.fragment ], [ undef, %bb63 ] ; <i32*> [#uses=1]
- %.SV164.phi1104 = phi i32 [ %.SV164.phi1088, %bb70.fragment ], [ undef, %bb63 ] ; <i32> [#uses=1]
- %.SV264.phi = phi %struct.Macroblock* [ undef, %bb70.fragment ], [ %157, %bb63 ] ; <%struct.Macroblock*> [#uses=1]
- %.SV266.phi = phi i32 [ undef, %bb70.fragment ], [ %158, %bb63 ] ; <i32> [#uses=1]
- %meshStackVariable453.phi = phi i32 [ %Opq.sa.calc739, %bb70.fragment ], [ %Opq.sa.calc611, %bb63 ] ; <i32> [#uses=1]
- %Opq.link.SV821.phi = phi i32 [ %Opq.sa.calc897, %bb70.fragment ], [ %Opq.sa.calc611, %bb63 ] ; <i32> [#uses=1]
- %.SV150.phi1060 = phi i32* [ undef, %bb70.fragment ], [ %148, %bb63 ] ; <i32*> [#uses=1]
- %.SV152.phi1059 = phi i32* [ undef, %bb70.fragment ], [ %149, %bb63 ] ; <i32*> [#uses=2]
- %.SV148.phi1057 = phi i32 [ undef, %bb70.fragment ], [ %147, %bb63 ] ; <i32> [#uses=1]
- %Opq.link.mask823 = and i32 %Opq.link.SV821.phi, 4 ; <i32> [#uses=2]
- %Opq.sa.calc820 = sub i32 %Opq.link.mask823, -97 ; <i32> [#uses=2]
- %meshCmp455 = icmp eq i32 %meshStackVariable453.phi, 6 ; <i1> [#uses=1]
- br i1 %meshCmp455, label %bb63.fragment, label %bb71
-
-meshBB456: ; preds = %bb79, %bb63.fragment
- %.SV38.phi1137 = phi i64 [ %.SV38.phi1110, %bb63.fragment ], [ %.SV38.phi1098, %bb79 ] ; <i64> [#uses=2]
- %.SV52.phi1136 = phi i32* [ %.SV52.phi1109, %bb63.fragment ], [ %.SV52.phi1097, %bb79 ] ; <i32*> [#uses=2]
- %.SV68.phi1135 = phi i32 [ %.SV68.phi1108, %bb63.fragment ], [ %.SV68.phi1096, %bb79 ] ; <i32> [#uses=2]
- %.SV70.phi1134 = phi i32 [ %.SV70.phi1107, %bb63.fragment ], [ %.SV70.phi1095, %bb79 ] ; <i32> [#uses=2]
- %.SV152.phi1133 = phi i32* [ %.SV152.phi1059, %bb63.fragment ], [ undef, %bb79 ] ; <i32*> [#uses=1]
- %meshStackVariable457.phi = phi i32 [ %Opq.sa.calc890, %bb63.fragment ], [ %Opq.sa.calc651, %bb79 ] ; <i32> [#uses=1]
- %Opq.link.SV817.phi = phi i32 [ %Opq.sa.calc891, %bb63.fragment ], [ %Opq.sa.calc651, %bb79 ] ; <i32> [#uses=1]
- %Opq.link.mask819 = and i32 %Opq.link.SV817.phi, 2 ; <i32> [#uses=1]
- %Opq.sa.calc816 = add i32 %Opq.link.mask819, 186 ; <i32> [#uses=2]
- %meshCmp459 = icmp eq i32 %meshStackVariable457.phi, 10 ; <i1> [#uses=1]
- br i1 %meshCmp459, label %bb81, label %bb65
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-LiveIntervalsAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-LiveIntervalsAssert.ll
deleted file mode 100644
index d77e528..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-LiveIntervalsAssert.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9
-; PR4056
-
-define void @int163(i32 %p_4, i32 %p_5) nounwind {
-entry:
- %0 = tail call i32 @bar(i32 1) nounwind ; <i32> [#uses=2]
- %1 = icmp sgt i32 %0, 7 ; <i1> [#uses=1]
- br i1 %1, label %foo.exit, label %bb.i
-
-bb.i: ; preds = %entry
- %2 = lshr i32 1, %0 ; <i32> [#uses=1]
- %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
- %4 = zext i1 %3 to i32 ; <i32> [#uses=1]
- %.p_5 = shl i32 %p_5, %4 ; <i32> [#uses=1]
- br label %foo.exit
-
-foo.exit: ; preds = %bb.i, %entry
- %5 = phi i32 [ %.p_5, %bb.i ], [ %p_5, %entry ] ; <i32> [#uses=1]
- %6 = icmp eq i32 %5, 0 ; <i1> [#uses=0]
- %7 = tail call i32 @bar(i32 %p_5) nounwind ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @bar(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-LiveIntervalsAssert2.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-LiveIntervalsAssert2.ll
deleted file mode 100644
index f025654..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-LiveIntervalsAssert2.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9
-; PR4051
-
-define void @int163(i32 %p_4, i32 %p_5) nounwind {
-entry:
- %0 = tail call i32 @foo(i32 1) nounwind ; <i32> [#uses=2]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %bb.i, label %bar.exit
-
-bb.i: ; preds = %entry
- %2 = lshr i32 1, %0 ; <i32> [#uses=1]
- %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
- %retval.i = select i1 %3, i32 1, i32 %p_5 ; <i32> [#uses=1]
- br label %bar.exit
-
-bar.exit: ; preds = %bb.i, %entry
- %4 = phi i32 [ %retval.i, %bb.i ], [ %p_5, %entry ] ; <i32> [#uses=1]
- %5 = icmp eq i32 %4, 0 ; <i1> [#uses=0]
- %6 = tail call i32 @foo(i32 %p_5) nounwind ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @foo(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-LiveIntervalsBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-LiveIntervalsBug.ll
deleted file mode 100644
index 0a2fcdb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-27-LiveIntervalsBug.ll
+++ /dev/null
@@ -1,165 +0,0 @@
-; RUN: llc < %s -mtriple=i386-unknown-linux-gnu | grep cmpxchgl | not grep eax
-; PR4076
-
- type { i8, i8, i8 } ; type %0
- type { i32, i8** } ; type %1
- type { %3* } ; type %2
- type { %4 } ; type %3
- type { %5 } ; type %4
- type { %6, i32, %7 } ; type %5
- type { i8* } ; type %6
- type { i32, [12 x i8] } ; type %7
- type { %9 } ; type %8
- type { %10, %11*, i8 } ; type %9
- type { %11* } ; type %10
- type { i32, %6, i8*, %12, %13*, i8, i32, %28, %29, i32, %30, i32, i32, i32, i8*, i8*, i8, i8 } ; type %11
- type { %13* } ; type %12
- type { %14, i32, %13*, %21 } ; type %13
- type { %15, %16 } ; type %14
- type { i32 (...)** } ; type %15
- type { %17, i8* (i32)*, void (i8*)*, i8 } ; type %16
- type { i32 (...)**, i8*, i8*, i8*, i8*, i8*, i8*, %18 } ; type %17
- type { %19* } ; type %18
- type { i32, %20**, i32, %20**, i8** } ; type %19
- type { i32 (...)**, i32 } ; type %20
- type { %22, %25*, i8, i8, %17*, %26*, %27*, %27* } ; type %21
- type { i32 (...)**, i32, i32, i32, i32, i32, %23*, %24, [8 x %24], i32, %24*, %18 } ; type %22
- type { %23*, void (i32, %22*, i32)*, i32, i32 } ; type %23
- type { i8*, i32 } ; type %24
- type { i32 (...)**, %21 } ; type %25
- type { %20, i32*, i8, i32*, i32*, i16*, i8, [256 x i8], [256 x i8], i8 } ; type %26
- type { %20 } ; type %27
- type { void (%9*)*, i32 } ; type %28
- type { %15* } ; type %29
- type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8* } ; type %30
- at AtomicOps_Internalx86CPUFeatures = external global %0 ; <%0*> [#uses=1]
-internal constant [19 x i8] c"xxxxxxxxxxxxxxxxxx\00" ; <[19 x i8]*>:0 [#uses=1]
-internal constant [47 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\00" ; <[47 x i8]*>:1 [#uses=1]
-
-define i8** @func6(i8 zeroext, i32, i32, %1*) nounwind {
-; <label>:4
- %5 = alloca i32, align 4 ; <i32*> [#uses=2]
- %6 = alloca i32, align 4 ; <i32*> [#uses=2]
- %7 = alloca %2, align 8 ; <%2*> [#uses=3]
- %8 = alloca %8, align 8 ; <%8*> [#uses=2]
- br label %17
-
-; <label>:9 ; preds = %17
- %10 = getelementptr %1* %3, i32 %19, i32 0 ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
- %12 = icmp eq i32 %11, %2 ; <i1> [#uses=1]
- br i1 %12, label %13, label %16
-
-; <label>:13 ; preds = %9
- %14 = getelementptr %1* %3, i32 %19, i32 1 ; <i8***> [#uses=1]
- %15 = load i8*** %14, align 4 ; <i8**> [#uses=1]
- ret i8** %15
-
-; <label>:16 ; preds = %9
- %indvar.next13 = add i32 %18, 1 ; <i32> [#uses=1]
- br label %17
-
-; <label>:17 ; preds = %16, %4
- %18 = phi i32 [ 0, %4 ], [ %indvar.next13, %16 ] ; <i32> [#uses=2]
- %19 = add i32 %18, %1 ; <i32> [#uses=3]
- %20 = icmp sgt i32 %19, 3 ; <i1> [#uses=1]
- br i1 %20, label %21, label %9
-
-; <label>:21 ; preds = %17
- call void @func5()
- %22 = getelementptr %1* %3, i32 0, i32 0 ; <i32*> [#uses=1]
- %23 = load i32* %22, align 4 ; <i32> [#uses=1]
- %24 = icmp eq i32 %23, 0 ; <i1> [#uses=1]
- br i1 %24, label %._crit_edge, label %._crit_edge1
-
-._crit_edge1: ; preds = %._crit_edge1, %21
- %25 = phi i32 [ 0, %21 ], [ %26, %._crit_edge1 ] ; <i32> [#uses=1]
- %26 = add i32 %25, 1 ; <i32> [#uses=4]
- %27 = getelementptr %1* %3, i32 %26, i32 0 ; <i32*> [#uses=1]
- %28 = load i32* %27, align 4 ; <i32> [#uses=1]
- %29 = icmp ne i32 %28, 0 ; <i1> [#uses=1]
- %30 = icmp ne i32 %26, 4 ; <i1> [#uses=1]
- %31 = and i1 %29, %30 ; <i1> [#uses=1]
- br i1 %31, label %._crit_edge1, label %._crit_edge
-
-._crit_edge: ; preds = %._crit_edge1, %21
- %32 = phi i32 [ 0, %21 ], [ %26, %._crit_edge1 ] ; <i32> [#uses=3]
- %33 = call i8* @pthread_getspecific(i32 0) nounwind ; <i8*> [#uses=2]
- %34 = icmp ne i8* %33, null ; <i1> [#uses=1]
- %35 = icmp eq i8 %0, 0 ; <i1> [#uses=1]
- %36 = or i1 %34, %35 ; <i1> [#uses=1]
- br i1 %36, label %._crit_edge4, label %37
-
-; <label>:37 ; preds = %._crit_edge
- %38 = call i8* @func2(i32 2048) ; <i8*> [#uses=4]
- call void @llvm.memset.i32(i8* %38, i8 0, i32 2048, i32 4)
- %39 = call i32 @pthread_setspecific(i32 0, i8* %38) nounwind ; <i32> [#uses=2]
- store i32 %39, i32* %5
- store i32 0, i32* %6
- %40 = icmp eq i32 %39, 0 ; <i1> [#uses=1]
- br i1 %40, label %41, label %43
-
-; <label>:41 ; preds = %37
- %42 = getelementptr %2* %7, i32 0, i32 0 ; <%3**> [#uses=1]
- store %3* null, %3** %42, align 8
- br label %._crit_edge4
-
-; <label>:43 ; preds = %37
- %44 = call %3* @func1(i32* %5, i32* %6, i8* getelementptr ([47 x i8]* @1, i32 0, i32 0)) ; <%3*> [#uses=2]
- %45 = getelementptr %2* %7, i32 0, i32 0 ; <%3**> [#uses=1]
- store %3* %44, %3** %45, align 8
- %46 = icmp eq %3* %44, null ; <i1> [#uses=1]
- br i1 %46, label %._crit_edge4, label %47
-
-; <label>:47 ; preds = %43
- call void @func4(%8* %8, i8* getelementptr ([19 x i8]* @0, i32 0, i32 0), i32 165, %2* %7)
- call void @func3(%8* %8) noreturn
- unreachable
-
-._crit_edge4: ; preds = %43, %41, %._crit_edge
- %48 = phi i8* [ %38, %41 ], [ %33, %._crit_edge ], [ %38, %43 ] ; <i8*> [#uses=2]
- %49 = bitcast i8* %48 to i8** ; <i8**> [#uses=3]
- %50 = icmp ne i8* %48, null ; <i1> [#uses=1]
- %51 = icmp slt i32 %32, 4 ; <i1> [#uses=1]
- %52 = and i1 %50, %51 ; <i1> [#uses=1]
- br i1 %52, label %53, label %._crit_edge6
-
-; <label>:53 ; preds = %._crit_edge4
- %54 = getelementptr %1* %3, i32 %32, i32 0 ; <i32*> [#uses=1]
- %55 = call i32 asm sideeffect "lock; cmpxchgl $1,$2", "={ax},q,*m,0,~{dirflag},~{fpsr},~{flags},~{memory}"(i32 %2, i32* %54, i32 0) nounwind ; <i32> [#uses=1]
- %56 = load i8* getelementptr (%0* @AtomicOps_Internalx86CPUFeatures, i32 0, i32 0), align 8 ; <i8> [#uses=1]
- %57 = icmp eq i8 %56, 0 ; <i1> [#uses=1]
- br i1 %57, label %._crit_edge7, label %58
-
-; <label>:58 ; preds = %53
- call void asm sideeffect "lfence", "~{dirflag},~{fpsr},~{flags},~{memory}"() nounwind
- br label %._crit_edge7
-
-._crit_edge7: ; preds = %58, %53
- %59 = icmp eq i32 %55, 0 ; <i1> [#uses=1]
- br i1 %59, label %60, label %._crit_edge6
-
-._crit_edge6: ; preds = %._crit_edge7, %._crit_edge4
- ret i8** %49
-
-; <label>:60 ; preds = %._crit_edge7
- %61 = getelementptr %1* %3, i32 %32, i32 1 ; <i8***> [#uses=1]
- store i8** %49, i8*** %61, align 4
- ret i8** %49
-}
-
-declare %3* @func1(i32* nocapture, i32* nocapture, i8*)
-
-declare void @func5()
-
-declare void @func4(%8*, i8*, i32, %2*)
-
-declare void @func3(%8*) noreturn
-
-declare i8* @pthread_getspecific(i32) nounwind
-
-declare i8* @func2(i32)
-
-declare void @llvm.memset.i32(i8* nocapture, i8, i32, i32) nounwind
-
-declare i32 @pthread_setspecific(i32, i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
deleted file mode 100644
index a2fd2e4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s | grep {movl.*%ebx, 8(%esi)}
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.0"
-
-define void @cpuid(i32* %data) nounwind {
-entry:
- %arrayidx = getelementptr i32* %data, i32 1 ; <i32*> [#uses=1]
- %arrayidx2 = getelementptr i32* %data, i32 2 ; <i32*> [#uses=1]
- %arrayidx4 = getelementptr i32* %data, i32 3 ; <i32*> [#uses=1]
- %arrayidx6 = getelementptr i32* %data, i32 4 ; <i32*> [#uses=1]
- %arrayidx8 = getelementptr i32* %data, i32 5 ; <i32*> [#uses=1]
- %tmp9 = load i32* %arrayidx8 ; <i32> [#uses=1]
- %arrayidx11 = getelementptr i32* %data, i32 6 ; <i32*> [#uses=1]
- %tmp12 = load i32* %arrayidx11 ; <i32> [#uses=1]
- %arrayidx14 = getelementptr i32* %data, i32 7 ; <i32*> [#uses=1]
- %tmp15 = load i32* %arrayidx14 ; <i32> [#uses=1]
- %arrayidx17 = getelementptr i32* %data, i32 8 ; <i32*> [#uses=1]
- %tmp18 = load i32* %arrayidx17 ; <i32> [#uses=1]
- %0 = call i32 asm "cpuid", "={ax},=*{bx},=*{cx},=*{dx},{ax},{bx},{cx},{dx},~{dirflag},~{fpsr},~{flags}"(i32* %arrayidx2, i32* %arrayidx4, i32* %arrayidx6, i32 %tmp9, i32 %tmp12, i32 %tmp15, i32 %tmp18) nounwind ; <i32> [#uses=1]
- store i32 %0, i32* %arrayidx
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-29-LinearScanBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-29-LinearScanBug.ll
deleted file mode 100644
index 6843723..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-29-LinearScanBug.ll
+++ /dev/null
@@ -1,215 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10
-; rdar://6837009
-
- type { %struct.pf_state*, %struct.pf_state*, %struct.pf_state*, i32 } ; type %0
- type { %2 } ; type %1
- type { %struct.pf_addr, %struct.pf_addr } ; type %2
- type { %struct.in6_addr } ; type %3
- type { [4 x i32] } ; type %4
- type { %struct.pfi_dynaddr*, [4 x i8] } ; type %5
- type { %struct.pfi_dynaddr*, %struct.pfi_dynaddr** } ; type %6
- type { %struct.pfr_ktable*, %struct.pfr_ktable*, %struct.pfr_ktable*, i32 } ; type %7
- type { %struct.pfr_ktable* } ; type %8
- type { i8* } ; type %9
- type { %11 } ; type %10
- type { i8*, i8*, %struct.radix_node* } ; type %11
- type { [2 x %struct.pf_rulequeue], %13, %13 } ; type %12
- type { %struct.pf_rulequeue*, %struct.pf_rule**, i32, i32, i32 } ; type %13
- type { %struct.pf_anchor*, %struct.pf_anchor*, %struct.pf_anchor*, i32 } ; type %14
- type { %struct.pfi_kif*, %struct.pfi_kif*, %struct.pfi_kif*, i32 } ; type %15
- type { %struct.ifnet*, %struct.ifnet** } ; type %16
- type { %18 } ; type %17
- type { %struct.pkthdr, %19 } ; type %18
- type { %struct.m_ext, [176 x i8] } ; type %19
- type { %struct.ifmultiaddr*, %struct.ifmultiaddr** } ; type %20
- type { i32, %22 } ; type %21
- type { i8*, [4 x i8] } ; type %22
- type { %struct.tcphdr* } ; type %23
- type { %struct.pf_ike_state } ; type %24
- type { %struct.pf_state_key*, %struct.pf_state_key*, %struct.pf_state_key*, i32 } ; type %25
- type { %struct.pf_src_node*, %struct.pf_src_node*, %struct.pf_src_node*, i32 } ; type %26
- %struct.anon = type { %struct.pf_state*, %struct.pf_state** }
- %struct.au_mask_t = type { i32, i32 }
- %struct.bpf_if = type opaque
- %struct.dlil_threading_info = type opaque
- %struct.ether_header = type { [6 x i8], [6 x i8], i16 }
- %struct.ext_refsq = type { %struct.ext_refsq*, %struct.ext_refsq* }
- %struct.hook_desc = type { %struct.hook_desc_head, void (i8*)*, i8* }
- %struct.hook_desc_head = type { %struct.hook_desc*, %struct.hook_desc** }
- %struct.if_data_internal = type { i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i32, i32, %struct.au_mask_t, i32, i32, i32 }
- %struct.ifaddr = type { %struct.sockaddr*, %struct.sockaddr*, %struct.sockaddr*, %struct.ifnet*, %struct.ifaddrhead, void (i32, %struct.rtentry*, %struct.sockaddr*)*, i32, i32, i32, void (%struct.ifaddr*)*, void (%struct.ifaddr*, i32)*, i32 }
- %struct.ifaddrhead = type { %struct.ifaddr*, %struct.ifaddr** }
- %struct.ifmultiaddr = type { %20, %struct.sockaddr*, %struct.ifmultiaddr*, %struct.ifnet*, i32, i8*, i32, void (i8*)* }
- %struct.ifmultihead = type { %struct.ifmultiaddr* }
- %struct.ifnet = type { i8*, i8*, %16, %struct.ifaddrhead, i32, i32 (%struct.ifnet*, %struct.sockaddr*)*, i32, %struct.bpf_if*, i16, i16, i16, i16, i32, i8*, i32, %struct.if_data_internal, i32, i32 (%struct.ifnet*, %struct.mbuf*)*, i32 (%struct.ifnet*, i32, i8*)*, i32 (%struct.ifnet*, i32, i32 (%struct.ifnet*, %struct.mbuf*)*)*, void (%struct.ifnet*)*, i32 (%struct.ifnet*, %struct.mbuf*, i8*, i32*)*, void (%struct.ifnet*, %struct.kev_msg*)*, i32 (%struct.ifnet*, %struct.mbuf**, %struct.sockaddr*, i8*, i8*)*, i32, %struct.ifnet_filter_head, i32, i8*, i32, %struct.ifmultihead, i32, i32 (%struct.ifnet*, i32, %struct.ifnet_demux_desc*, i32)*, i32 (%struct.ifnet*, i32)*, %struct.proto_hash_entry*, i8*, %struct.dlil_threading_info*, i8*, %struct.ifqueue, [1 x i32], i32, %struct.ifprefixhead, %struct.lck_rw_t*, %21, i32, %struct.thread*, %struct.pfi_kif*, %struct.lck_mtx_t*, %struct.route }
- %struct.ifnet_demux_desc = type { i32, i8*, i32 }
- %struct.ifnet_filter = type opaque
- %struct.ifnet_filter_head = type { %struct.ifnet_filter*, %struct.ifnet_filter** }
- %struct.ifprefix = type { %struct.sockaddr*, %struct.ifnet*, %struct.ifprefixhead, i8, i8 }
- %struct.ifprefixhead = type { %struct.ifprefix*, %struct.ifprefix** }
- %struct.ifqueue = type { i8*, i8*, i32, i32, i32 }
- %struct.in6_addr = type { %4 }
- %struct.in_addr = type { i32 }
- %struct.kev_d_vectors = type { i32, i8* }
- %struct.kev_msg = type { i32, i32, i32, i32, [5 x %struct.kev_d_vectors] }
- %struct.lck_mtx_t = type { [3 x i32] }
- %struct.lck_rw_t = type <{ [3 x i32] }>
- %struct.m_ext = type { i8*, void (i8*, i32, i8*)*, i32, i8*, %struct.ext_refsq, %struct.au_mask_t* }
- %struct.m_hdr = type { %struct.mbuf*, %struct.mbuf*, i32, i8*, i16, i16 }
- %struct.m_tag = type { %struct.packet_tags, i16, i16, i32 }
- %struct.mbuf = type { %struct.m_hdr, %17 }
- %struct.packet_tags = type { %struct.m_tag* }
- %struct.pf_addr = type { %3 }
- %struct.pf_addr_wrap = type <{ %1, %5, i8, i8, [6 x i8] }>
- %struct.pf_anchor = type { %14, %14, %struct.pf_anchor*, %struct.pf_anchor_node, [64 x i8], [1024 x i8], %struct.pf_ruleset, i32, i32 }
- %struct.pf_anchor_node = type { %struct.pf_anchor* }
- %struct.pf_app_state = type { void (%struct.pf_state*, i32, i32, %struct.pf_pdesc*, %struct.pfi_kif*)*, i32 (%struct.pf_app_state*, %struct.pf_app_state*)*, i32 (%struct.pf_app_state*, %struct.pf_app_state*)*, %24 }
- %struct.pf_ike_state = type { i64 }
- %struct.pf_mtag = type { i8*, i32, i32, i16, i8, i8 }
- %struct.pf_palist = type { %struct.pf_pooladdr*, %struct.pf_pooladdr** }
- %struct.pf_pdesc = type { %struct.pf_threshold, i64, %23, %struct.pf_addr, %struct.pf_addr, %struct.pf_rule*, %struct.pf_addr*, %struct.pf_addr*, %struct.ether_header*, %struct.mbuf*, i32, %struct.pf_mtag*, i16*, i32, i16, i8, i8, i8, i8 }
- %struct.pf_pool = type { %struct.pf_palist, [2 x i32], %struct.pf_pooladdr*, [4 x i8], %struct.in6_addr, %struct.pf_addr, i32, [2 x i16], i8, i8, [1 x i32] }
- %struct.pf_pooladdr = type <{ %struct.pf_addr_wrap, %struct.pf_palist, [2 x i32], [16 x i8], %struct.pfi_kif*, [1 x i32] }>
- %struct.pf_rule = type <{ %struct.pf_rule_addr, %struct.pf_rule_addr, [8 x %struct.pf_rule_ptr], [64 x i8], [16 x i8], [64 x i8], [64 x i8], [64 x i8], [64 x i8], [32 x i8], %struct.pf_rulequeue, [2 x i32], %struct.pf_pool, i64, [2 x i64], [2 x i64], %struct.pfi_kif*, [4 x i8], %struct.pf_anchor*, [4 x i8], %struct.pfr_ktable*, [4 x i8], i32, i32, [26 x i32], i32, i32, i32, i32, i32, i32, %struct.au_mask_t, i32, i32, i32, i32, i32, i32, i32, i16, i16, i16, i16, i16, [2 x i8], %struct.pf_rule_gid, %struct.pf_rule_gid, i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, [2 x i8] }>
- %struct.pf_rule_addr = type <{ %struct.pf_addr_wrap, %struct.pf_rule_xport, i8, [7 x i8] }>
- %struct.pf_rule_gid = type { [2 x i32], i8, [3 x i8] }
- %struct.pf_rule_ptr = type { %struct.pf_rule*, [4 x i8] }
- %struct.pf_rule_xport = type { i32, [4 x i8] }
- %struct.pf_rulequeue = type { %struct.pf_rule*, %struct.pf_rule** }
- %struct.pf_ruleset = type { [5 x %12], %struct.pf_anchor*, i32, i32, i32 }
- %struct.pf_src_node = type <{ %26, %struct.pf_addr, %struct.pf_addr, %struct.pf_rule_ptr, %struct.pfi_kif*, [2 x i64], [2 x i64], i32, i32, %struct.pf_threshold, i64, i64, i8, i8, [2 x i8] }>
- %struct.pf_state = type <{ i64, i32, i32, %struct.anon, %struct.anon, %0, %struct.pf_state_peer, %struct.pf_state_peer, %struct.pf_rule_ptr, %struct.pf_rule_ptr, %struct.pf_rule_ptr, %struct.pf_addr, %struct.hook_desc_head, %struct.pf_state_key*, %struct.pfi_kif*, %struct.pfi_kif*, %struct.pf_src_node*, %struct.pf_src_node*, [2 x i64], [2 x i64], i64, i64, i64, i16, i8, i8, i8, i8, [6 x i8] }>
- %struct.pf_state_host = type { %struct.pf_addr, %struct.in_addr }
- %struct.pf_state_key = type { %struct.pf_state_host, %struct.pf_state_host, %struct.pf_state_host, i8, i8, i8, i8, %struct.pf_app_state*, %25, %25, %struct.anon, i16 }
- %struct.pf_state_peer = type { i32, i32, i32, i16, i8, i8, i16, i8, %struct.pf_state_scrub*, [3 x i8] }
- %struct.pf_state_scrub = type { %struct.au_mask_t, i32, i32, i32, i16, i8, i8, i32 }
- %struct.pf_threshold = type { i32, i32, i32, i32 }
- %struct.pfi_dynaddr = type { %6, %struct.pf_addr, %struct.pf_addr, %struct.pf_addr, %struct.pf_addr, %struct.pfr_ktable*, %struct.pfi_kif*, i8*, i32, i32, i32, i8, i8 }
- %struct.pfi_kif = type { [16 x i8], %15, [2 x [2 x [2 x i64]]], [2 x [2 x [2 x i64]]], i64, i32, i8*, %struct.ifnet*, i32, i32, %6 }
- %struct.pfr_ktable = type { %struct.pfr_tstats, %7, %8, %struct.radix_node_head*, %struct.radix_node_head*, %struct.pfr_ktable*, %struct.pfr_ktable*, %struct.pf_ruleset*, i64, i32 }
- %struct.pfr_table = type { [1024 x i8], [32 x i8], i32, i8 }
- %struct.pfr_tstats = type { %struct.pfr_table, [2 x [3 x i64]], [2 x [3 x i64]], i64, i64, i64, i32, [2 x i32] }
- %struct.pkthdr = type { i32, %struct.ifnet*, i8*, i32, i32, i32, i16, i16, %struct.packet_tags }
- %struct.proto_hash_entry = type opaque
- %struct.radix_mask = type { i16, i8, i8, %struct.radix_mask*, %9, i32 }
- %struct.radix_node = type { %struct.radix_mask*, %struct.radix_node*, i16, i8, i8, %10 }
- %struct.radix_node_head = type { %struct.radix_node*, i32, i32, %struct.radix_node* (i8*, i8*, %struct.radix_node_head*, %struct.radix_node*)*, %struct.radix_node* (i8*, i8*, %struct.radix_node_head*, %struct.radix_node*)*, %struct.radix_node* (i8*, i8*, %struct.radix_node_head*)*, %struct.radix_node* (i8*, i8*, %struct.radix_node_head*)*, %struct.radix_node* (i8*, %struct.radix_node_head*)*, %struct.radix_node* (i8*, %struct.radix_node_head*, i32 (%struct.radix_node*, i8*)*, i8*)*, %struct.radix_node* (i8*, i8*, %struct.radix_node_head*)*, %struct.radix_node* (i8*, i8*, %struct.radix_node_head*, i32 (%struct.radix_node*, i8*)*, i8*)*, %struct.radix_node* (i8*, %struct.radix_node_head*)*, i32 (%struct.radix_node_head*, i32 (%struct.radix_node*, i8*)*, i8*)*, i32 (%struct.radix_node_head*, i8*, i8*, i32 (%struct.radix_node*, i8*)*, i8*)*, void (%struct.radix_node*, %struct.radix_node_head*)*, [3 x %struct.radix_node], i32 }
- %struct.route = type { %struct.rtentry*, i32, %struct.sockaddr }
- %struct.rt_metrics = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [4 x i32] }
- %struct.rtentry = type { [2 x %struct.radix_node], %struct.sockaddr*, i32, i32, %struct.ifnet*, %struct.ifaddr*, %struct.sockaddr*, i8*, void (i8*)*, %struct.rt_metrics, %struct.rtentry*, %struct.rtentry*, i32, %struct.lck_mtx_t }
- %struct.sockaddr = type { i8, i8, [14 x i8] }
- %struct.tcphdr = type { i16, i16, i32, i32, i8, i8, i16, i16, i16 }
- %struct.thread = type opaque
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (%struct.pf_state_key*, %struct.pf_state_key*)* @pf_state_compare_ext_gwy to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define fastcc i32 @pf_state_compare_ext_gwy(%struct.pf_state_key* nocapture %a, %struct.pf_state_key* nocapture %b) nounwind optsize ssp {
-entry:
- %0 = zext i8 0 to i32 ; <i32> [#uses=2]
- %1 = load i8* null, align 1 ; <i8> [#uses=2]
- %2 = zext i8 %1 to i32 ; <i32> [#uses=1]
- %3 = sub i32 %0, %2 ; <i32> [#uses=1]
- %4 = icmp eq i8 0, %1 ; <i1> [#uses=1]
- br i1 %4, label %bb1, label %bb79
-
-bb1: ; preds = %entry
- %5 = load i8* null, align 4 ; <i8> [#uses=2]
- %6 = zext i8 %5 to i32 ; <i32> [#uses=2]
- %7 = getelementptr %struct.pf_state_key* %b, i32 0, i32 3 ; <i8*> [#uses=1]
- %8 = load i8* %7, align 4 ; <i8> [#uses=2]
- %9 = zext i8 %8 to i32 ; <i32> [#uses=1]
- %10 = sub i32 %6, %9 ; <i32> [#uses=1]
- %11 = icmp eq i8 %5, %8 ; <i1> [#uses=1]
- br i1 %11, label %bb3, label %bb79
-
-bb3: ; preds = %bb1
- switch i32 %0, label %bb23 [
- i32 1, label %bb4
- i32 6, label %bb6
- i32 17, label %bb10
- i32 47, label %bb17
- i32 50, label %bb21
- i32 58, label %bb4
- ]
-
-bb4: ; preds = %bb3, %bb3
- %12 = load i16* null, align 4 ; <i16> [#uses=1]
- %13 = zext i16 %12 to i32 ; <i32> [#uses=1]
- %14 = sub i32 0, %13 ; <i32> [#uses=1]
- br i1 false, label %bb23, label %bb79
-
-bb6: ; preds = %bb3
- %15 = load i16* null, align 4 ; <i16> [#uses=1]
- %16 = zext i16 %15 to i32 ; <i32> [#uses=1]
- %17 = sub i32 0, %16 ; <i32> [#uses=1]
- ret i32 %17
-
-bb10: ; preds = %bb3
- %18 = load i8* null, align 1 ; <i8> [#uses=2]
- %19 = zext i8 %18 to i32 ; <i32> [#uses=1]
- %20 = sub i32 0, %19 ; <i32> [#uses=1]
- %21 = icmp eq i8 0, %18 ; <i1> [#uses=1]
- br i1 %21, label %bb12, label %bb79
-
-bb12: ; preds = %bb10
- %22 = load i16* null, align 4 ; <i16> [#uses=1]
- %23 = zext i16 %22 to i32 ; <i32> [#uses=1]
- %24 = sub i32 0, %23 ; <i32> [#uses=1]
- ret i32 %24
-
-bb17: ; preds = %bb3
- %25 = load i8* null, align 1 ; <i8> [#uses=2]
- %26 = icmp eq i8 %25, 1 ; <i1> [#uses=1]
- br i1 %26, label %bb18, label %bb23
-
-bb18: ; preds = %bb17
- %27 = icmp eq i8 %25, 0 ; <i1> [#uses=1]
- br i1 %27, label %bb19, label %bb23
-
-bb19: ; preds = %bb18
- %28 = load i16* null, align 4 ; <i16> [#uses=1]
- %29 = zext i16 %28 to i32 ; <i32> [#uses=1]
- %30 = sub i32 0, %29 ; <i32> [#uses=1]
- br i1 false, label %bb23, label %bb79
-
-bb21: ; preds = %bb3
- %31 = getelementptr %struct.pf_state_key* %a, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
- %32 = load i32* %31, align 4 ; <i32> [#uses=2]
- %33 = getelementptr %struct.pf_state_key* %b, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
- %34 = load i32* %33, align 4 ; <i32> [#uses=2]
- %35 = sub i32 %32, %34 ; <i32> [#uses=1]
- %36 = icmp eq i32 %32, %34 ; <i1> [#uses=1]
- br i1 %36, label %bb23, label %bb79
-
-bb23: ; preds = %bb21, %bb19, %bb18, %bb17, %bb4, %bb3
- %cond = icmp eq i32 %6, 2 ; <i1> [#uses=1]
- br i1 %cond, label %bb24, label %bb70
-
-bb24: ; preds = %bb23
- ret i32 1
-
-bb70: ; preds = %bb23
- %37 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=3]
- br i1 false, label %bb78, label %bb73
-
-bb73: ; preds = %bb70
- %38 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=2]
- %39 = icmp eq i32 (%struct.pf_app_state*, %struct.pf_app_state*)* %38, null ; <i1> [#uses=1]
- br i1 %39, label %bb78, label %bb74
-
-bb74: ; preds = %bb73
- %40 = ptrtoint i32 (%struct.pf_app_state*, %struct.pf_app_state*)* %37 to i32 ; <i32> [#uses=1]
- %41 = sub i32 0, %40 ; <i32> [#uses=1]
- %42 = icmp eq i32 (%struct.pf_app_state*, %struct.pf_app_state*)* %38, %37 ; <i1> [#uses=1]
- br i1 %42, label %bb76, label %bb79
-
-bb76: ; preds = %bb74
- %43 = tail call i32 %37(%struct.pf_app_state* null, %struct.pf_app_state* null) nounwind ; <i32> [#uses=1]
- ret i32 %43
-
-bb78: ; preds = %bb73, %bb70
- ret i32 0
-
-bb79: ; preds = %bb74, %bb21, %bb19, %bb10, %bb4, %bb1, %entry
- %.0 = phi i32 [ %3, %entry ], [ %10, %bb1 ], [ %14, %bb4 ], [ %20, %bb10 ], [ %30, %bb19 ], [ %35, %bb21 ], [ %41, %bb74 ] ; <i32> [#uses=1]
- ret i32 %.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
deleted file mode 100644
index d1f9cf8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
+++ /dev/null
@@ -1,117 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -disable-fp-elim -relocation-model=pic
-; PR4099
-
- type { [62 x %struct.Bitvec*] } ; type %0
- type { i8* } ; type %1
- type { double } ; type %2
- %struct..5sPragmaType = type { i8*, i32 }
- %struct.AggInfo = type { i8, i8, i32, %struct.ExprList*, i32, %struct.AggInfo_col*, i32, i32, i32, %struct.AggInfo_func*, i32, i32 }
- %struct.AggInfo_col = type { %struct.Table*, i32, i32, i32, i32, %struct.Expr* }
- %struct.AggInfo_func = type { %struct.Expr*, %struct.FuncDef*, i32, i32 }
- %struct.AuxData = type { i8*, void (i8*)* }
- %struct.Bitvec = type { i32, i32, i32, %0 }
- %struct.BtCursor = type { %struct.Btree*, %struct.BtShared*, %struct.BtCursor*, %struct.BtCursor*, i32 (i8*, i32, i8*, i32, i8*)*, i8*, i32, %struct.MemPage*, i32, %struct.CellInfo, i8, i8, i8*, i64, i32, i8, i32* }
- %struct.BtLock = type { %struct.Btree*, i32, i8, %struct.BtLock* }
- %struct.BtShared = type { %struct.Pager*, %struct.sqlite3*, %struct.BtCursor*, %struct.MemPage*, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i16, i32, i32, i32, i32, i8, i32, i8*, void (i8*)*, %struct.sqlite3_mutex*, %struct.BusyHandler, i32, %struct.BtShared*, %struct.BtLock*, %struct.Btree* }
- %struct.Btree = type { %struct.sqlite3*, %struct.BtShared*, i8, i8, i8, i32, %struct.Btree*, %struct.Btree* }
- %struct.BtreeMutexArray = type { i32, [11 x %struct.Btree*] }
- %struct.BusyHandler = type { i32 (i8*, i32)*, i8*, i32 }
- %struct.CellInfo = type { i8*, i64, i32, i32, i16, i16, i16, i16 }
- %struct.CollSeq = type { i8*, i8, i8, i8*, i32 (i8*, i32, i8*, i32, i8*)*, void (i8*)* }
- %struct.Column = type { i8*, %struct.Expr*, i8*, i8*, i8, i8, i8, i8 }
- %struct.Context = type { i64, i32, %struct.Fifo }
- %struct.CountCtx = type { i64 }
- %struct.Cursor = type { %struct.BtCursor*, i32, i64, i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i64, %struct.Btree*, i32, i8*, i64, i8*, %struct.KeyInfo*, i32, i64, %struct.sqlite3_vtab_cursor*, %struct.sqlite3_module*, i32, i32, i32*, i32*, i8* }
- %struct.Db = type { i8*, %struct.Btree*, i8, i8, i8*, void (i8*)*, %struct.Schema* }
- %struct.DbPage = type { %struct.Pager*, i32, %struct.DbPage*, %struct.DbPage*, %struct.PagerLruLink, %struct.DbPage*, i8, i8, i8, i8, i8, i16, %struct.DbPage*, %struct.DbPage*, i8* }
- %struct.Expr = type { i8, i8, i16, %struct.CollSeq*, %struct.Expr*, %struct.Expr*, %struct.ExprList*, %struct..5sPragmaType, %struct..5sPragmaType, i32, i32, %struct.AggInfo*, i32, i32, %struct.Select*, %struct.Table*, i32 }
- %struct.ExprList = type { i32, i32, i32, %struct.ExprList_item* }
- %struct.ExprList_item = type { %struct.Expr*, i8*, i8, i8, i8 }
- %struct.FKey = type { %struct.Table*, %struct.FKey*, i8*, %struct.FKey*, i32, %struct.sColMap*, i8, i8, i8, i8 }
- %struct.Fifo = type { i32, %struct.FifoPage*, %struct.FifoPage* }
- %struct.FifoPage = type { i32, i32, i32, %struct.FifoPage*, [1 x i64] }
- %struct.FuncDef = type { i16, i8, i8, i8, i8*, %struct.FuncDef*, void (%struct.sqlite3_context*, i32, %struct.Mem**)*, void (%struct.sqlite3_context*, i32, %struct.Mem**)*, void (%struct.sqlite3_context*)*, [1 x i8] }
- %struct.Hash = type { i8, i8, i32, i32, %struct.HashElem*, %struct._ht* }
- %struct.HashElem = type { %struct.HashElem*, %struct.HashElem*, i8*, i8*, i32 }
- %struct.IdList = type { %struct..5sPragmaType*, i32, i32 }
- %struct.Index = type { i8*, i32, i32*, i32*, %struct.Table*, i32, i8, i8, i8*, %struct.Index*, %struct.Schema*, i8*, i8** }
- %struct.KeyInfo = type { %struct.sqlite3*, i8, i8, i8, i32, i8*, [1 x %struct.CollSeq*] }
- %struct.Mem = type { %struct.CountCtx, double, %struct.sqlite3*, i8*, i32, i16, i8, i8, void (i8*)* }
- %struct.MemPage = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, [5 x %struct._OvflCell], %struct.BtShared*, i8*, %struct.DbPage*, i32, %struct.MemPage* }
- %struct.Module = type { %struct.sqlite3_module*, i8*, i8*, void (i8*)* }
- %struct.Op = type { i8, i8, i8, i8, i32, i32, i32, %1 }
- %struct.Pager = type { %struct.sqlite3_vfs*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.Bitvec*, %struct.Bitvec*, i8*, i8*, i8*, i8*, %struct.sqlite3_file*, %struct.sqlite3_file*, %struct.sqlite3_file*, %struct.BusyHandler*, %struct.PagerLruList, %struct.DbPage*, %struct.DbPage*, %struct.DbPage*, i64, i64, i64, i64, i64, i32, void (%struct.DbPage*, i32)*, void (%struct.DbPage*, i32)*, i32, %struct.DbPage**, i8*, [16 x i8] }
- %struct.PagerLruLink = type { %struct.DbPage*, %struct.DbPage* }
- %struct.PagerLruList = type { %struct.DbPage*, %struct.DbPage*, %struct.DbPage* }
- %struct.Schema = type { i32, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Table*, i8, i8, i16, i32, %struct.sqlite3* }
- %struct.Select = type { %struct.ExprList*, i8, i8, i8, i8, i8, i8, i8, %struct.SrcList*, %struct.Expr*, %struct.ExprList*, %struct.Expr*, %struct.ExprList*, %struct.Select*, %struct.Select*, %struct.Select*, %struct.Expr*, %struct.Expr*, i32, i32, [3 x i32] }
- %struct.SrcList = type { i16, i16, [1 x %struct.SrcList_item] }
- %struct.SrcList_item = type { i8*, i8*, i8*, %struct.Table*, %struct.Select*, i8, i8, i32, %struct.Expr*, %struct.IdList*, i64 }
- %struct.Table = type { i8*, i32, %struct.Column*, i32, %struct.Index*, i32, %struct.Select*, i32, %struct.Trigger*, %struct.FKey*, i8*, %struct.Expr*, i32, i8, i8, i8, i8, i8, i8, i8, %struct.Module*, %struct.sqlite3_vtab*, i32, i8**, %struct.Schema* }
- %struct.Trigger = type { i8*, i8*, i8, i8, %struct.Expr*, %struct.IdList*, %struct..5sPragmaType, %struct.Schema*, %struct.Schema*, %struct.TriggerStep*, %struct.Trigger* }
- %struct.TriggerStep = type { i32, i32, %struct.Trigger*, %struct.Select*, %struct..5sPragmaType, %struct.Expr*, %struct.ExprList*, %struct.IdList*, %struct.TriggerStep*, %struct.TriggerStep* }
- %struct.Vdbe = type { %struct.sqlite3*, %struct.Vdbe*, %struct.Vdbe*, i32, i32, %struct.Op*, i32, i32, i32*, %struct.Mem**, %struct.Mem*, i32, %struct.Cursor**, i32, %struct.Mem*, i8**, i32, i32, i32, %struct.Mem*, i32, i32, %struct.Fifo, i32, i32, %struct.Context*, i32, i32, i32, i32, i32, [25 x i32], i32, i32, i8**, i8*, %struct.Mem*, i8, i8, i8, i8, i8, i8, i32, i64, i32, %struct.BtreeMutexArray, i32, i8*, i32 }
- %struct.VdbeFunc = type { %struct.FuncDef*, i32, [1 x %struct.AuxData] }
- %struct._OvflCell = type { i8*, i16 }
- %struct._ht = type { i32, %struct.HashElem* }
- %struct.sColMap = type { i32, i8* }
- %struct.sqlite3 = type { %struct.sqlite3_vfs*, i32, %struct.Db*, i32, i32, i32, i32, i8, i8, i8, i8, i32, %struct.CollSeq*, i64, i64, i32, i32, i32, %struct.sqlite3_mutex*, %struct.sqlite3InitInfo, i32, i8**, %struct.Vdbe*, i32, void (i8*, i8*)*, i8*, void (i8*, i8*, i64)*, i8*, i8*, i32 (i8*)*, i8*, void (i8*)*, i8*, void (i8*, i32, i8*, i8*, i64)*, void (i8*, %struct.sqlite3*, i32, i8*)*, void (i8*, %struct.sqlite3*, i32, i8*)*, i8*, %struct.Mem*, i8*, i8*, %2, i32 (i8*, i32, i8*, i8*, i8*, i8*)*, i8*, i32 (i8*)*, i8*, i32, %struct.Hash, %struct.Table*, %struct.sqlite3_vtab**, i32, %struct.Hash, %struct.Hash, %struct.BusyHandler, i32, [2 x %struct.Db], i8 }
- %struct.sqlite3InitInfo = type { i32, i32, i8 }
- %struct.sqlite3_context = type { %struct.FuncDef*, %struct.VdbeFunc*, %struct.Mem, %struct.Mem*, i32, %struct.CollSeq* }
- %struct.sqlite3_file = type { %struct.sqlite3_io_methods* }
- %struct.sqlite3_index_constraint = type { i32, i8, i8, i32 }
- %struct.sqlite3_index_constraint_usage = type { i32, i8 }
- %struct.sqlite3_index_info = type { i32, %struct.sqlite3_index_constraint*, i32, %struct.sqlite3_index_constraint_usage*, %struct.sqlite3_index_constraint_usage*, i32, i8*, i32, i32, double }
- %struct.sqlite3_io_methods = type { i32, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*, i8*, i32, i64)*, i32 (%struct.sqlite3_file*, i8*, i32, i64)*, i32 (%struct.sqlite3_file*, i64)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*, i64*)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*, i32, i8*)*, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*)* }
- %struct.sqlite3_module = type { i32, i32 (%struct.sqlite3*, i8*, i32, i8**, %struct.sqlite3_vtab**, i8**)*, i32 (%struct.sqlite3*, i8*, i32, i8**, %struct.sqlite3_vtab**, i8**)*, i32 (%struct.sqlite3_vtab*, %struct.sqlite3_index_info*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*, %struct.sqlite3_vtab_cursor**)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*, i32, i8*, i32, %struct.Mem**)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*, %struct.sqlite3_context*, i32)*, i32 (%struct.sqlite3_vtab_cursor*, i64*)*, i32 (%struct.sqlite3_vtab*, i32, %struct.Mem**, i64*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*, i32, i8*, void (%struct.sqlite3_context*, i32, %struct.Mem**)**, i8**)*, i32 (%struct.sqlite3_vtab*, i8*)* }
- %struct.sqlite3_mutex = type opaque
- %struct.sqlite3_vfs = type { i32, i32, i32, %struct.sqlite3_vfs*, i8*, i8*, i32 (%struct.sqlite3_vfs*, i8*, %struct.sqlite3_file*, i32, i32*)*, i32 (%struct.sqlite3_vfs*, i8*, i32)*, i32 (%struct.sqlite3_vfs*, i8*, i32)*, i32 (%struct.sqlite3_vfs*, i32, i8*)*, i32 (%struct.sqlite3_vfs*, i8*, i32, i8*)*, i8* (%struct.sqlite3_vfs*, i8*)*, void (%struct.sqlite3_vfs*, i32, i8*)*, i8* (%struct.sqlite3_vfs*, i8*, i8*)*, void (%struct.sqlite3_vfs*, i8*)*, i32 (%struct.sqlite3_vfs*, i32, i8*)*, i32 (%struct.sqlite3_vfs*, i32)*, i32 (%struct.sqlite3_vfs*, double*)* }
- %struct.sqlite3_vtab = type { %struct.sqlite3_module*, i32, i8* }
- %struct.sqlite3_vtab_cursor = type { %struct.sqlite3_vtab* }
-
-define fastcc void @dropCell(%struct.MemPage* nocapture %pPage, i32 %idx, i32 %sz) nounwind ssp {
-entry:
- %0 = load i8** null, align 8 ; <i8*> [#uses=4]
- %1 = or i32 0, 0 ; <i32> [#uses=1]
- %2 = icmp slt i32 %sz, 4 ; <i1> [#uses=1]
- %size_addr.0.i = select i1 %2, i32 4, i32 %sz ; <i32> [#uses=1]
- br label %bb3.i
-
-bb3.i: ; preds = %bb3.i, %entry
- %3 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- %or.cond.i = or i1 %3, false ; <i1> [#uses=1]
- br i1 %or.cond.i, label %bb5.i, label %bb3.i
-
-bb5.i: ; preds = %bb3.i
- %4 = getelementptr i8* %0, i64 0 ; <i8*> [#uses=1]
- store i8 0, i8* %4, align 1
- %5 = getelementptr i8* %0, i64 0 ; <i8*> [#uses=1]
- store i8 0, i8* %5, align 1
- %6 = add i32 %1, 2 ; <i32> [#uses=1]
- %7 = zext i32 %6 to i64 ; <i64> [#uses=2]
- %8 = getelementptr i8* %0, i64 %7 ; <i8*> [#uses=1]
- %9 = lshr i32 %size_addr.0.i, 8 ; <i32> [#uses=1]
- %10 = trunc i32 %9 to i8 ; <i8> [#uses=1]
- store i8 %10, i8* %8, align 1
- %.sum31.i = add i64 %7, 1 ; <i64> [#uses=1]
- %11 = getelementptr i8* %0, i64 %.sum31.i ; <i8*> [#uses=1]
- store i8 0, i8* %11, align 1
- br label %bb11.outer.i
-
-bb11.outer.i: ; preds = %bb11.outer.i, %bb5.i
- %12 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %12, label %bb12.i, label %bb11.outer.i
-
-bb12.i: ; preds = %bb11.outer.i
- %i.08 = add i32 %idx, 1 ; <i32> [#uses=1]
- %13 = icmp sgt i32 0, %i.08 ; <i1> [#uses=1]
- br i1 %13, label %bb, label %bb2
-
-bb: ; preds = %bb12.i
- br label %bb2
-
-bb2: ; preds = %bb, %bb12.i
- %14 = getelementptr %struct.MemPage* %pPage, i64 0, i32 1 ; <i8*> [#uses=1]
- store i8 1, i8* %14, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-scale.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-04-scale.ll
deleted file mode 100644
index e4c756c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-04-scale.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-unknown-linux-gnu
-; PR3995
-
- %struct.vtable = type { i32 (...)** }
- %struct.array = type { %struct.impl, [256 x %struct.pair], [256 x %struct.pair], [256 x %struct.pair], [256 x %struct.pair], [256 x %struct.pair], [256 x %struct.pair] }
- %struct.impl = type { %struct.vtable, i8, %struct.impl*, i32, i32, i64, i64 }
- %struct.pair = type { i64, i64 }
-
-define void @test() {
-entry:
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
- %1 = lshr i32 %0, 8 ; <i32> [#uses=1]
- %2 = and i32 %1, 255 ; <i32> [#uses=1]
- %3 = getelementptr %struct.array* null, i32 0, i32 3 ; <[256 x %struct.pair]*> [#uses=1]
- %4 = getelementptr [256 x %struct.pair]* %3, i32 0, i32 %2 ; <%struct.pair*> [#uses=1]
- %5 = getelementptr %struct.pair* %4, i32 0, i32 1 ; <i64*> [#uses=1]
- %6 = load i64* %5, align 4 ; <i64> [#uses=1]
- %7 = xor i64 0, %6 ; <i64> [#uses=1]
- %8 = xor i64 %7, 0 ; <i64> [#uses=1]
- %9 = xor i64 %8, 0 ; <i64> [#uses=1]
- store i64 %9, i64* null, align 8
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll
deleted file mode 100644
index 738b5fb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-08-InlineAsmIOffset.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -relocation-model=static > %t
-; RUN: grep "1: ._pv_cpu_ops+8" %t
-; RUN: grep "2: ._G" %t
-; PR4152
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.6"
- %struct.pv_cpu_ops = type { i32, [2 x i32] }
- at pv_cpu_ops = external global %struct.pv_cpu_ops ; <%struct.pv_cpu_ops*> [#uses=1]
- at G = external global i32 ; <i32*> [#uses=1]
-
-define void @x() nounwind {
-entry:
- tail call void asm sideeffect "1: $0", "i,~{dirflag},~{fpsr},~{flags}"(i32* getelementptr (%struct.pv_cpu_ops* @pv_cpu_ops, i32 0, i32 1, i32 1)) nounwind
- tail call void asm sideeffect "2: $0", "i,~{dirflag},~{fpsr},~{flags}"(i32* @G) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
deleted file mode 100644
index a5e28c0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR4188
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.6"
- at g_9 = external global i32 ; <i32*> [#uses=1]
-
-define i32 @int86(i32 %p_87) nounwind {
-entry:
- %0 = trunc i32 %p_87 to i8 ; <i8> [#uses=1]
- %1 = icmp ne i8 %0, 0 ; <i1> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb.i, %bb, %entry
- %2 = volatile load i32* @g_9, align 4 ; <i32> [#uses=2]
- %3 = icmp sgt i32 %2, 1 ; <i1> [#uses=1]
- %4 = and i1 %3, %1 ; <i1> [#uses=1]
- br i1 %4, label %bb.i, label %bb
-
-bb.i: ; preds = %bb
- %5 = icmp sgt i32 0, %2 ; <i1> [#uses=0]
- br label %bb
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-19-SingleElementExtractElement.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-05-19-SingleElementExtractElement.ll
deleted file mode 100644
index 6e062fb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-19-SingleElementExtractElement.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; PR3886
-
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
-entry:
- %a = call <1 x i64> @bar()
- %tmp5.i = extractelement <1 x i64> %a, i32 0
- %tmp11 = bitcast i64 %tmp5.i to <1 x i64>
- %tmp8 = extractelement <1 x i64> %tmp11, i32 0
- %call6 = call i32 (i64)* @foo(i64 %tmp8)
- ret i32 undef
-}
-
-declare i32 @foo(i64)
-
-declare <1 x i64> @bar()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-23-available_externally.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-05-23-available_externally.ll
deleted file mode 100644
index 94773d9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-23-available_externally.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -relocation-model=pic | grep atoi | grep PLT
-; PR4253
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define i32 @foo(i8* %x) nounwind readonly {
-entry:
- %call = tail call fastcc i32 @atoi(i8* %x) nounwind readonly ; <i32> [#uses=1]
- ret i32 %call
-}
-
-define available_externally fastcc i32 @atoi(i8* %__nptr) nounwind readonly {
-entry:
- %call = tail call i64 @strtol(i8* nocapture %__nptr, i8** null, i32 10) nounwind readonly ; <i64> [#uses=1]
- %conv = trunc i64 %call to i32 ; <i32> [#uses=1]
- ret i32 %conv
-}
-
-declare i64 @strtol(i8*, i8** nocapture, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll
deleted file mode 100644
index 3cd5416..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-23-dagcombine-shifts.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-
-; Check that the shr(shl X, 56), 48) is not mistakenly turned into
-; a shr (X, -8) that gets subsequently "optimized away" as undef
-; PR4254
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define i64 @foo(i64 %b) nounwind readnone {
-entry:
-; CHECK: foo:
-; CHECK: shlq $56, %rdi
-; CHECK: sarq $48, %rdi
-; CHECK: leaq 1(%rdi), %rax
- %shl = shl i64 %b, 56 ; <i64> [#uses=1]
- %shr = ashr i64 %shl, 48 ; <i64> [#uses=1]
- %add5 = or i64 %shr, 1 ; <i64> [#uses=1]
- ret i64 %add5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll
deleted file mode 100644
index 2fd42f4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
- %struct.tempsym_t = type { i8*, i8*, i8*, i8*, i32, i32, i32, i32, i32 }
-
-define fastcc signext i8 @S_next_symbol(%struct.tempsym_t* %symptr) nounwind ssp {
-entry:
- br label %bb116
-
-bb: ; preds = %bb116
- switch i8 undef, label %bb14 [
- i8 9, label %bb116
- i8 32, label %bb116
- i8 10, label %bb116
- i8 13, label %bb116
- i8 12, label %bb116
- ]
-
-bb14: ; preds = %bb
- br i1 undef, label %bb75, label %bb115
-
-bb75: ; preds = %bb14
- %srcval16 = load i448* null, align 8 ; <i448> [#uses=1]
- %tmp = zext i32 undef to i448 ; <i448> [#uses=1]
- %tmp15 = shl i448 %tmp, 288 ; <i448> [#uses=1]
- %mask = and i448 %srcval16, -2135987035423586845985235064014169866455883682256196619149693890381755748887481053010428711403521 ; <i448> [#uses=1]
- %ins = or i448 %tmp15, %mask ; <i448> [#uses=1]
- store i448 %ins, i448* null, align 8
- ret i8 1
-
-bb115: ; preds = %bb14
- ret i8 1
-
-bb116: ; preds = %bb, %bb, %bb, %bb, %bb, %entry
- br i1 undef, label %bb, label %bb117
-
-bb117: ; preds = %bb116
- ret i8 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll
deleted file mode 100644
index af552d4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=x86-64 | not grep {movzbl %\[abcd\]h,}
-
-define void @BZ2_bzDecompress_bb5_2E_outer_bb35_2E_i_bb54_2E_i(i32*, i32 %c_nblock_used.2.i, i32 %.reload51, i32* %.out, i32* %.out1, i32* %.out2, i32* %.out3) nounwind {
-newFuncRoot:
- br label %bb54.i
-
-bb35.i.backedge.exitStub: ; preds = %bb54.i
- store i32 %6, i32* %.out
- store i32 %10, i32* %.out1
- store i32 %11, i32* %.out2
- store i32 %12, i32* %.out3
- ret void
-
-bb54.i: ; preds = %newFuncRoot
- %1 = zext i32 %.reload51 to i64 ; <i64> [#uses=1]
- %2 = getelementptr i32* %0, i64 %1 ; <i32*> [#uses=1]
- %3 = load i32* %2, align 4 ; <i32> [#uses=2]
- %4 = lshr i32 %3, 8 ; <i32> [#uses=1]
- %5 = and i32 %3, 255 ; <i32> [#uses=1]
- %6 = add i32 %5, 4 ; <i32> [#uses=1]
- %7 = zext i32 %4 to i64 ; <i64> [#uses=1]
- %8 = getelementptr i32* %0, i64 %7 ; <i32*> [#uses=1]
- %9 = load i32* %8, align 4 ; <i32> [#uses=2]
- %10 = and i32 %9, 255 ; <i32> [#uses=1]
- %11 = lshr i32 %9, 8 ; <i32> [#uses=1]
- %12 = add i32 %c_nblock_used.2.i, 5 ; <i32> [#uses=1]
- br label %bb35.i.backedge.exitStub
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-02-RewriterBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-02-RewriterBug.ll
deleted file mode 100644
index 779f985..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-02-RewriterBug.ll
+++ /dev/null
@@ -1,362 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-undermydesk-freebsd8.0 -relocation-model=pic -disable-fp-elim
-; PR4225
-
-define void @sha256_block1(i32* nocapture %arr, i8* nocapture %in, i64 %num) nounwind {
-entry:
- br i1 undef, label %while.end, label %bb.nph
-
-bb.nph: ; preds = %entry
- br label %while.body
-
-while.body: ; preds = %for.end, %bb.nph
- %indvar2787 = phi i64 [ 0, %bb.nph ], [ %indvar.next2788, %for.end ] ; <i64> [#uses=2]
- %tmp2791 = mul i64 %indvar2787, 44 ; <i64> [#uses=0]
- %ctg22996 = getelementptr i8* %in, i64 0 ; <i8*> [#uses=1]
- %conv = zext i32 undef to i64 ; <i64> [#uses=1]
- %conv11 = zext i32 undef to i64 ; <i64> [#uses=1]
- %tmp18 = load i32* undef ; <i32> [#uses=1]
- %conv19 = zext i32 %tmp18 to i64 ; <i64> [#uses=1]
- %tmp30 = load i32* undef ; <i32> [#uses=1]
- %conv31 = zext i32 %tmp30 to i64 ; <i64> [#uses=4]
- %ptrincdec3065 = load i8* null ; <i8> [#uses=1]
- %conv442709 = zext i8 %ptrincdec3065 to i64 ; <i64> [#uses=1]
- %shl45 = shl i64 %conv442709, 16 ; <i64> [#uses=1]
- %conv632707 = zext i8 undef to i64 ; <i64> [#uses=1]
- %or = or i64 %shl45, 0 ; <i64> [#uses=1]
- %or55 = or i64 %or, %conv632707 ; <i64> [#uses=1]
- %or64 = or i64 %or55, 0 ; <i64> [#uses=1]
- %shr85 = lshr i64 %conv31, 25 ; <i64> [#uses=0]
- %add = add i64 %conv11, 1508970993 ; <i64> [#uses=1]
- %add95 = add i64 %add, 0 ; <i64> [#uses=1]
- %add98 = add i64 %add95, 0 ; <i64> [#uses=1]
- %add99 = add i64 %add98, %or64 ; <i64> [#uses=1]
- %add134 = add i64 %add99, 0 ; <i64> [#uses=4]
- store i32 undef, i32* undef
- %shl187 = shl i64 %add134, 21 ; <i64> [#uses=0]
- %and203 = and i64 %add134, %conv31 ; <i64> [#uses=1]
- %xor208 = xor i64 0, %and203 ; <i64> [#uses=1]
- %add212 = add i64 0, %xor208 ; <i64> [#uses=1]
- %add213 = add i64 %add212, 0 ; <i64> [#uses=1]
- %add248 = add i64 %add213, 0 ; <i64> [#uses=3]
- %conv2852690 = zext i8 undef to i64 ; <i64> [#uses=1]
- %or277 = or i64 0, %conv2852690 ; <i64> [#uses=1]
- %or286 = or i64 %or277, 0 ; <i64> [#uses=1]
- %neg319 = xor i64 %add248, 4294967295 ; <i64> [#uses=1]
- %and321 = and i64 %neg319, %conv31 ; <i64> [#uses=1]
- %xor322 = xor i64 %and321, 0 ; <i64> [#uses=1]
- %add314 = add i64 %conv, 2870763221 ; <i64> [#uses=1]
- %add323 = add i64 %add314, %or286 ; <i64> [#uses=1]
- %add326 = add i64 %add323, %xor322 ; <i64> [#uses=1]
- %add327 = add i64 %add326, 0 ; <i64> [#uses=2]
- %add362 = add i64 %add327, %conv19 ; <i64> [#uses=4]
- %add365 = add i64 0, %add327 ; <i64> [#uses=3]
- %shl409 = shl i64 %add362, 26 ; <i64> [#uses=0]
- %and431 = and i64 %add362, %add248 ; <i64> [#uses=1]
- %neg433 = xor i64 %add362, -1 ; <i64> [#uses=1]
- %and435 = and i64 %add134, %neg433 ; <i64> [#uses=1]
- %xor436 = xor i64 %and431, %and435 ; <i64> [#uses=1]
- %add428 = add i64 %conv31, 3624381080 ; <i64> [#uses=1]
- %add437 = add i64 %add428, 0 ; <i64> [#uses=1]
- %add440 = add i64 %add437, %xor436 ; <i64> [#uses=1]
- %add441 = add i64 %add440, 0 ; <i64> [#uses=1]
- %shl443 = shl i64 %add365, 30 ; <i64> [#uses=1]
- %and445 = lshr i64 %add365, 2 ; <i64> [#uses=1]
- %shr446 = and i64 %and445, 1073741823 ; <i64> [#uses=1]
- %or447 = or i64 %shr446, %shl443 ; <i64> [#uses=1]
- %xor461 = xor i64 0, %or447 ; <i64> [#uses=1]
- %add473 = add i64 %xor461, 0 ; <i64> [#uses=1]
- %add479 = add i64 %add473, %add441 ; <i64> [#uses=3]
- %conv4932682 = zext i8 undef to i64 ; <i64> [#uses=1]
- %shl494 = shl i64 %conv4932682, 16 ; <i64> [#uses=1]
- %ptrincdec4903012 = load i8* null ; <i8> [#uses=1]
- %conv5032681 = zext i8 %ptrincdec4903012 to i64 ; <i64> [#uses=1]
- %shl504 = shl i64 %conv5032681, 8 ; <i64> [#uses=1]
- %ptrincdec5003009 = load i8* null ; <i8> [#uses=1]
- %conv5132680 = zext i8 %ptrincdec5003009 to i64 ; <i64> [#uses=1]
- %or495 = or i64 %shl494, 0 ; <i64> [#uses=1]
- %or505 = or i64 %or495, %conv5132680 ; <i64> [#uses=1]
- %or514 = or i64 %or505, %shl504 ; <i64> [#uses=1]
- store i32 undef, i32* undef
- %or540 = or i64 undef, 0 ; <i64> [#uses=0]
- %add542 = add i64 %add134, 310598401 ; <i64> [#uses=1]
- %add551 = add i64 %add542, %or514 ; <i64> [#uses=1]
- %add554 = add i64 %add551, 0 ; <i64> [#uses=1]
- %add555 = add i64 %add554, 0 ; <i64> [#uses=1]
- %or561 = or i64 undef, undef ; <i64> [#uses=1]
- %or567 = or i64 undef, undef ; <i64> [#uses=1]
- %and572 = lshr i64 %add479, 22 ; <i64> [#uses=1]
- %shr573 = and i64 %and572, 1023 ; <i64> [#uses=1]
- %or574 = or i64 %shr573, 0 ; <i64> [#uses=1]
- %xor568 = xor i64 %or567, %or574 ; <i64> [#uses=1]
- %xor575 = xor i64 %xor568, %or561 ; <i64> [#uses=1]
- %add587 = add i64 %xor575, 0 ; <i64> [#uses=1]
- %add593 = add i64 %add587, %add555 ; <i64> [#uses=1]
- %ptrincdec6043000 = load i8* null ; <i8> [#uses=1]
- %conv6172676 = zext i8 %ptrincdec6043000 to i64 ; <i64> [#uses=1]
- %shl618 = shl i64 %conv6172676, 8 ; <i64> [#uses=1]
- %ptrincdec6142997 = load i8* %ctg22996 ; <i8> [#uses=1]
- %conv6272675 = zext i8 %ptrincdec6142997 to i64 ; <i64> [#uses=1]
- %or619 = or i64 0, %conv6272675 ; <i64> [#uses=1]
- %or628 = or i64 %or619, %shl618 ; <i64> [#uses=1]
- %add656 = add i64 %add248, 607225278 ; <i64> [#uses=1]
- %add665 = add i64 %add656, %or628 ; <i64> [#uses=1]
- %add668 = add i64 %add665, 0 ; <i64> [#uses=1]
- %add669 = add i64 %add668, 0 ; <i64> [#uses=1]
- %and699 = and i64 %add479, %add365 ; <i64> [#uses=1]
- %xor700 = xor i64 0, %and699 ; <i64> [#uses=1]
- %add701 = add i64 0, %xor700 ; <i64> [#uses=1]
- %add707 = add i64 %add701, %add669 ; <i64> [#uses=4]
- %ptrincdec6242994 = load i8* null ; <i8> [#uses=1]
- %conv7122673 = zext i8 %ptrincdec6242994 to i64 ; <i64> [#uses=1]
- %shl713 = shl i64 %conv7122673, 24 ; <i64> [#uses=1]
- %conv7412670 = zext i8 undef to i64 ; <i64> [#uses=1]
- %or723 = or i64 0, %shl713 ; <i64> [#uses=1]
- %or733 = or i64 %or723, %conv7412670 ; <i64> [#uses=1]
- %or742 = or i64 %or733, 0 ; <i64> [#uses=2]
- %conv743 = trunc i64 %or742 to i32 ; <i32> [#uses=1]
- store i32 %conv743, i32* undef
- %xor762 = xor i64 undef, 0 ; <i64> [#uses=0]
- %add770 = add i64 %add362, 1426881987 ; <i64> [#uses=1]
- %add779 = add i64 %add770, %or742 ; <i64> [#uses=1]
- %add782 = add i64 %add779, 0 ; <i64> [#uses=1]
- %add783 = add i64 %add782, 0 ; <i64> [#uses=1]
- %shl785 = shl i64 %add707, 30 ; <i64> [#uses=1]
- %and787 = lshr i64 %add707, 2 ; <i64> [#uses=1]
- %shr788 = and i64 %and787, 1073741823 ; <i64> [#uses=1]
- %or789 = or i64 %shr788, %shl785 ; <i64> [#uses=1]
- %shl791 = shl i64 %add707, 19 ; <i64> [#uses=0]
- %xor803 = xor i64 0, %or789 ; <i64> [#uses=1]
- %and813 = and i64 %add593, %add479 ; <i64> [#uses=1]
- %xor814 = xor i64 0, %and813 ; <i64> [#uses=1]
- %add815 = add i64 %xor803, %xor814 ; <i64> [#uses=1]
- %add821 = add i64 %add815, %add783 ; <i64> [#uses=1]
- %add1160 = add i64 0, %add707 ; <i64> [#uses=0]
- %add1157 = add i64 undef, undef ; <i64> [#uses=0]
- %ptrincdec11742940 = load i8* null ; <i8> [#uses=1]
- %conv11872651 = zext i8 %ptrincdec11742940 to i64 ; <i64> [#uses=1]
- %shl1188 = shl i64 %conv11872651, 8 ; <i64> [#uses=1]
- %or1198 = or i64 0, %shl1188 ; <i64> [#uses=1]
- store i32 undef, i32* undef
- %add1226 = add i64 %or1198, 3248222580 ; <i64> [#uses=1]
- %add1235 = add i64 %add1226, 0 ; <i64> [#uses=1]
- %add1238 = add i64 %add1235, 0 ; <i64> [#uses=1]
- %add1239 = add i64 %add1238, 0 ; <i64> [#uses=1]
- br label %for.cond
-
-for.cond: ; preds = %for.body, %while.body
- %add821.pn = phi i64 [ %add821, %while.body ], [ undef, %for.body ] ; <i64> [#uses=0]
- %add1239.pn = phi i64 [ %add1239, %while.body ], [ 0, %for.body ] ; <i64> [#uses=0]
- br i1 undef, label %for.end, label %for.body
-
-for.body: ; preds = %for.cond
- br label %for.cond
-
-for.end: ; preds = %for.cond
- %indvar.next2788 = add i64 %indvar2787, 1 ; <i64> [#uses=1]
- br i1 undef, label %while.end, label %while.body
-
-while.end: ; preds = %for.end, %entry
- ret void
-}
-
-define void @sha256_block2(i32* nocapture %arr, i8* nocapture %in, i64 %num) nounwind {
-entry:
- br i1 undef, label %while.end, label %bb.nph
-
-bb.nph: ; preds = %entry
- %arrayidx5 = getelementptr i32* %arr, i64 1 ; <i32*> [#uses=1]
- %arrayidx9 = getelementptr i32* %arr, i64 2 ; <i32*> [#uses=2]
- %arrayidx13 = getelementptr i32* %arr, i64 3 ; <i32*> [#uses=2]
- %arrayidx25 = getelementptr i32* %arr, i64 6 ; <i32*> [#uses=1]
- %arrayidx29 = getelementptr i32* %arr, i64 7 ; <i32*> [#uses=1]
- br label %while.body
-
-while.body: ; preds = %for.end, %bb.nph
- %tmp3 = load i32* %arr ; <i32> [#uses=2]
- %conv = zext i32 %tmp3 to i64 ; <i64> [#uses=1]
- %tmp10 = load i32* %arrayidx9 ; <i32> [#uses=1]
- %conv11 = zext i32 %tmp10 to i64 ; <i64> [#uses=1]
- %tmp14 = load i32* %arrayidx13 ; <i32> [#uses=3]
- %conv15 = zext i32 %tmp14 to i64 ; <i64> [#uses=2]
- %tmp18 = load i32* undef ; <i32> [#uses=2]
- %conv19 = zext i32 %tmp18 to i64 ; <i64> [#uses=1]
- %conv23 = zext i32 undef to i64 ; <i64> [#uses=1]
- %tmp26 = load i32* %arrayidx25 ; <i32> [#uses=1]
- %conv27 = zext i32 %tmp26 to i64 ; <i64> [#uses=1]
- %tmp30 = load i32* %arrayidx29 ; <i32> [#uses=2]
- %conv31 = zext i32 %tmp30 to i64 ; <i64> [#uses=5]
- %shl72 = shl i64 %conv31, 26 ; <i64> [#uses=1]
- %shr = lshr i64 %conv31, 6 ; <i64> [#uses=1]
- %or74 = or i64 %shl72, %shr ; <i64> [#uses=1]
- %shr85 = lshr i64 %conv31, 25 ; <i64> [#uses=0]
- %xor87 = xor i64 0, %or74 ; <i64> [#uses=1]
- %and902706 = and i32 %tmp30, %tmp3 ; <i32> [#uses=1]
- %and90 = zext i32 %and902706 to i64 ; <i64> [#uses=1]
- %xor94 = xor i64 0, %and90 ; <i64> [#uses=1]
- %add = add i64 %conv11, 1508970993 ; <i64> [#uses=1]
- %add95 = add i64 %add, %xor94 ; <i64> [#uses=1]
- %add98 = add i64 %add95, %xor87 ; <i64> [#uses=1]
- %add99 = add i64 %add98, 0 ; <i64> [#uses=2]
- %xor130 = zext i32 undef to i64 ; <i64> [#uses=1]
- %add134 = add i64 %add99, %conv27 ; <i64> [#uses=2]
- %add131 = add i64 %xor130, 0 ; <i64> [#uses=1]
- %add137 = add i64 %add131, %add99 ; <i64> [#uses=5]
- %conv1422700 = zext i8 undef to i64 ; <i64> [#uses=1]
- %shl143 = shl i64 %conv1422700, 24 ; <i64> [#uses=1]
- %ptrincdec1393051 = load i8* undef ; <i8> [#uses=1]
- %conv1512699 = zext i8 %ptrincdec1393051 to i64 ; <i64> [#uses=1]
- %shl152 = shl i64 %conv1512699, 16 ; <i64> [#uses=1]
- %conv1712697 = zext i8 undef to i64 ; <i64> [#uses=1]
- %or153 = or i64 %shl152, %shl143 ; <i64> [#uses=1]
- %or163 = or i64 %or153, %conv1712697 ; <i64> [#uses=1]
- %or172 = or i64 %or163, 0 ; <i64> [#uses=1]
- %and203 = and i64 %add134, %conv31 ; <i64> [#uses=1]
- %xor208 = xor i64 0, %and203 ; <i64> [#uses=1]
- %add200 = add i64 0, 2453635748 ; <i64> [#uses=1]
- %add209 = add i64 %add200, %or172 ; <i64> [#uses=1]
- %add212 = add i64 %add209, %xor208 ; <i64> [#uses=1]
- %add213 = add i64 %add212, 0 ; <i64> [#uses=2]
- %shl228 = shl i64 %add137, 10 ; <i64> [#uses=1]
- %and230 = lshr i64 %add137, 22 ; <i64> [#uses=1]
- %shr231 = and i64 %and230, 1023 ; <i64> [#uses=1]
- %or232 = or i64 %shr231, %shl228 ; <i64> [#uses=1]
- %xor226 = xor i64 0, %or232 ; <i64> [#uses=1]
- %xor233 = xor i64 %xor226, 0 ; <i64> [#uses=1]
- %and2362695 = zext i32 undef to i64 ; <i64> [#uses=1]
- %xor240 = and i64 %add137, %and2362695 ; <i64> [#uses=1]
- %and2432694 = and i32 %tmp18, %tmp14 ; <i32> [#uses=1]
- %and243 = zext i32 %and2432694 to i64 ; <i64> [#uses=1]
- %xor244 = xor i64 %xor240, %and243 ; <i64> [#uses=1]
- %add248 = add i64 %add213, %conv23 ; <i64> [#uses=2]
- %add245 = add i64 %xor233, %xor244 ; <i64> [#uses=1]
- %add251 = add i64 %add245, %add213 ; <i64> [#uses=1]
- %conv2752691 = zext i8 undef to i64 ; <i64> [#uses=1]
- %shl276 = shl i64 %conv2752691, 8 ; <i64> [#uses=0]
- %and317 = and i64 %add248, %add134 ; <i64> [#uses=1]
- %neg319 = xor i64 %add248, 4294967295 ; <i64> [#uses=1]
- %and321 = and i64 %neg319, %conv31 ; <i64> [#uses=1]
- %xor322 = xor i64 %and321, %and317 ; <i64> [#uses=1]
- %add314 = add i64 %conv, 2870763221 ; <i64> [#uses=1]
- %add323 = add i64 %add314, 0 ; <i64> [#uses=1]
- %add326 = add i64 %add323, %xor322 ; <i64> [#uses=1]
- %add327 = add i64 %add326, 0 ; <i64> [#uses=2]
- %and3502689 = xor i64 %add137, %conv15 ; <i64> [#uses=1]
- %xor354 = and i64 %add251, %and3502689 ; <i64> [#uses=1]
- %and357 = and i64 %add137, %conv15 ; <i64> [#uses=1]
- %xor358 = xor i64 %xor354, %and357 ; <i64> [#uses=1]
- %add362 = add i64 %add327, %conv19 ; <i64> [#uses=1]
- %add359 = add i64 0, %xor358 ; <i64> [#uses=1]
- %add365 = add i64 %add359, %add327 ; <i64> [#uses=1]
- %add770 = add i64 %add362, 1426881987 ; <i64> [#uses=1]
- %add779 = add i64 %add770, 0 ; <i64> [#uses=1]
- %add782 = add i64 %add779, 0 ; <i64> [#uses=1]
- %add783 = add i64 %add782, 0 ; <i64> [#uses=2]
- %add818 = add i64 %add783, %add365 ; <i64> [#uses=1]
- %add821 = add i64 0, %add783 ; <i64> [#uses=1]
- store i32 undef, i32* undef
- %add1046 = add i64 undef, undef ; <i64> [#uses=1]
- %add1160 = add i64 undef, undef ; <i64> [#uses=1]
- store i32 0, i32* undef
- %add1235 = add i64 0, %add818 ; <i64> [#uses=1]
- %add1238 = add i64 %add1235, 0 ; <i64> [#uses=1]
- %add1239 = add i64 %add1238, 0 ; <i64> [#uses=1]
- br label %for.cond
-
-for.cond: ; preds = %for.body, %while.body
- %h.0 = phi i64 [ undef, %while.body ], [ %add2035, %for.body ] ; <i64> [#uses=1]
- %g.0 = phi i64 [ %add1046, %while.body ], [ undef, %for.body ] ; <i64> [#uses=1]
- %f.0 = phi i64 [ %add1160, %while.body ], [ undef, %for.body ] ; <i64> [#uses=1]
- %add821.pn = phi i64 [ %add821, %while.body ], [ undef, %for.body ] ; <i64> [#uses=0]
- %add1239.pn2648 = phi i64 [ %add1239, %while.body ], [ undef, %for.body ] ; <i64> [#uses=0]
- %d.0 = phi i64 [ undef, %while.body ], [ %add2038, %for.body ] ; <i64> [#uses=2]
- br i1 undef, label %for.end, label %for.body
-
-for.body: ; preds = %for.cond
- %conv1390 = zext i32 undef to i64 ; <i64> [#uses=1]
- %add1375 = add i64 0, %h.0 ; <i64> [#uses=1]
- %add1384 = add i64 %add1375, 0 ; <i64> [#uses=1]
- %add1391 = add i64 %add1384, %conv1390 ; <i64> [#uses=1]
- %add1392 = add i64 %add1391, 0 ; <i64> [#uses=2]
- %or1411 = or i64 0, undef ; <i64> [#uses=1]
- %xor1405 = xor i64 0, %or1411 ; <i64> [#uses=1]
- %xor1412 = xor i64 %xor1405, 0 ; <i64> [#uses=1]
- %add1427 = add i64 %add1392, %d.0 ; <i64> [#uses=1]
- %add1424 = add i64 %xor1412, 0 ; <i64> [#uses=1]
- %add1430 = add i64 %add1424, %add1392 ; <i64> [#uses=5]
- %tmp1438 = load i32* undef ; <i32> [#uses=1]
- %conv1439 = zext i32 %tmp1438 to i64 ; <i64> [#uses=4]
- %shl1441 = shl i64 %conv1439, 25 ; <i64> [#uses=1]
- %shr1444 = lshr i64 %conv1439, 7 ; <i64> [#uses=1]
- %or1445 = or i64 %shl1441, %shr1444 ; <i64> [#uses=1]
- %shr1450 = lshr i64 %conv1439, 18 ; <i64> [#uses=1]
- %or1451 = or i64 0, %shr1450 ; <i64> [#uses=1]
- %shr1454 = lshr i64 %conv1439, 3 ; <i64> [#uses=1]
- %xor1452 = xor i64 %or1451, %shr1454 ; <i64> [#uses=1]
- %xor1455 = xor i64 %xor1452, %or1445 ; <i64> [#uses=1]
- %conv1464 = zext i32 undef to i64 ; <i64> [#uses=4]
- %shl1466 = shl i64 %conv1464, 15 ; <i64> [#uses=1]
- %shr1469 = lshr i64 %conv1464, 17 ; <i64> [#uses=1]
- %or1470 = or i64 %shl1466, %shr1469 ; <i64> [#uses=1]
- %shr1475 = lshr i64 %conv1464, 19 ; <i64> [#uses=1]
- %or1476 = or i64 0, %shr1475 ; <i64> [#uses=1]
- %shr1479 = lshr i64 %conv1464, 10 ; <i64> [#uses=1]
- %xor1477 = xor i64 %or1476, %shr1479 ; <i64> [#uses=1]
- %xor1480 = xor i64 %xor1477, %or1470 ; <i64> [#uses=1]
- %tmp1499 = load i32* null ; <i32> [#uses=1]
- %conv1500 = zext i32 %tmp1499 to i64 ; <i64> [#uses=1]
- %add1491 = add i64 %conv1500, 0 ; <i64> [#uses=1]
- %add1501 = add i64 %add1491, %xor1455 ; <i64> [#uses=1]
- %add1502 = add i64 %add1501, %xor1480 ; <i64> [#uses=1]
- %conv1504 = and i64 %add1502, 4294967295 ; <i64> [#uses=1]
- %tmp1541 = load i32* undef ; <i32> [#uses=1]
- %conv1542 = zext i32 %tmp1541 to i64 ; <i64> [#uses=1]
- %add1527 = add i64 %conv1542, %g.0 ; <i64> [#uses=1]
- %add1536 = add i64 %add1527, 0 ; <i64> [#uses=1]
- %add1543 = add i64 %add1536, %conv1504 ; <i64> [#uses=1]
- %add1544 = add i64 %add1543, 0 ; <i64> [#uses=1]
- %shl1546 = shl i64 %add1430, 30 ; <i64> [#uses=1]
- %and1548 = lshr i64 %add1430, 2 ; <i64> [#uses=1]
- %shr1549 = and i64 %and1548, 1073741823 ; <i64> [#uses=1]
- %or1550 = or i64 %shr1549, %shl1546 ; <i64> [#uses=1]
- %shl1552 = shl i64 %add1430, 19 ; <i64> [#uses=1]
- %or1556 = or i64 0, %shl1552 ; <i64> [#uses=1]
- %shl1559 = shl i64 %add1430, 10 ; <i64> [#uses=1]
- %or1563 = or i64 0, %shl1559 ; <i64> [#uses=1]
- %xor1557 = xor i64 %or1556, %or1563 ; <i64> [#uses=1]
- %xor1564 = xor i64 %xor1557, %or1550 ; <i64> [#uses=1]
- %add1576 = add i64 %xor1564, 0 ; <i64> [#uses=1]
- %add1582 = add i64 %add1576, %add1544 ; <i64> [#uses=3]
- store i32 undef, i32* undef
- %tmp1693 = load i32* undef ; <i32> [#uses=1]
- %conv1694 = zext i32 %tmp1693 to i64 ; <i64> [#uses=1]
- %add1679 = add i64 %conv1694, %f.0 ; <i64> [#uses=1]
- %add1688 = add i64 %add1679, 0 ; <i64> [#uses=1]
- %add1695 = add i64 %add1688, 0 ; <i64> [#uses=1]
- %add1696 = add i64 %add1695, 0 ; <i64> [#uses=1]
- %shl1698 = shl i64 %add1582, 30 ; <i64> [#uses=0]
- %shl1704 = shl i64 %add1582, 19 ; <i64> [#uses=0]
- %add1734 = add i64 0, %add1696 ; <i64> [#uses=1]
- %add1983 = add i64 0, %add1427 ; <i64> [#uses=1]
- %add1992 = add i64 %add1983, 0 ; <i64> [#uses=1]
- %add1999 = add i64 %add1992, 0 ; <i64> [#uses=1]
- %add2000 = add i64 %add1999, 0 ; <i64> [#uses=2]
- %and2030 = and i64 %add1734, %add1582 ; <i64> [#uses=1]
- %xor2031 = xor i64 0, %and2030 ; <i64> [#uses=1]
- %add2035 = add i64 %add2000, %add1430 ; <i64> [#uses=1]
- %add2032 = add i64 0, %xor2031 ; <i64> [#uses=1]
- %add2038 = add i64 %add2032, %add2000 ; <i64> [#uses=1]
- store i32 0, i32* undef
- br label %for.cond
-
-for.end: ; preds = %for.cond
- store i32 undef, i32* %arrayidx5
- store i32 undef, i32* %arrayidx9
- %d.02641 = trunc i64 %d.0 to i32 ; <i32> [#uses=1]
- %conv2524 = add i32 %tmp14, %d.02641 ; <i32> [#uses=1]
- store i32 %conv2524, i32* %arrayidx13
- %exitcond2789 = icmp eq i64 undef, %num ; <i1> [#uses=1]
- br i1 %exitcond2789, label %while.end, label %while.body
-
-while.end: ; preds = %for.end, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll
deleted file mode 100644
index e6f3008..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-03-Win64DisableRedZone.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s | grep "subq.*\\\$40, \\\%rsp"
-target triple = "x86_64-mingw64"
-
-define x86_fp80 @a(i64 %x) nounwind readnone {
-entry:
- %conv = sitofp i64 %x to x86_fp80 ; <x86_fp80> [#uses=1]
- ret x86_fp80 %conv
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
deleted file mode 100644
index cb64bf2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-03-Win64SpillXMM.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -o %t1
-; RUN: grep "subq.*\\\$72, \\\%rsp" %t1
-; RUN: grep "movaps \\\%xmm8, 32\\\(\\\%rsp\\\)" %t1
-; RUN: grep "movaps \\\%xmm7, 48\\\(\\\%rsp\\\)" %t1
-target triple = "x86_64-mingw64"
-
-define i32 @a() nounwind {
-entry:
- tail call void asm sideeffect "", "~{xmm7},~{xmm8},~{dirflag},~{fpsr},~{flags}"() nounwind
- ret i32 undef
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll
deleted file mode 100644
index 9415732..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -march=x86
-
- type { %struct.GAP } ; type %0
- type { i16, i8, i8 } ; type %1
- type { [2 x i32], [2 x i32] } ; type %2
- type { %struct.rec* } ; type %3
- %struct.FILE_POS = type { i8, i8, i16, i32 }
- %struct.FIRST_UNION = type { %struct.FILE_POS }
- %struct.FOURTH_UNION = type { %struct.STYLE }
- %struct.GAP = type { i8, i8, i16 }
- %struct.LIST = type { %struct.rec*, %struct.rec* }
- %struct.SECOND_UNION = type { %1 }
- %struct.STYLE = type { %0, %0, i16, i16, i32 }
- %struct.THIRD_UNION = type { %2 }
- %struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, %struct.rec*, %3, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i32 }
- %struct.rec = type { %struct.head_type }
-
-define fastcc void @MinSize(%struct.rec* %x) nounwind {
-entry:
- %tmp13 = load i8* undef, align 4 ; <i8> [#uses=3]
- %tmp14 = zext i8 %tmp13 to i32 ; <i32> [#uses=2]
- switch i32 %tmp14, label %bb1109 [
- i32 42, label %bb246
- ]
-
-bb246: ; preds = %entry, %entry
- switch i8 %tmp13, label %bb249 [
- i8 42, label %bb269
- i8 44, label %bb269
- ]
-
-bb249: ; preds = %bb246
- %tmp3240 = icmp eq i8 %tmp13, 0 ; <i1> [#uses=1]
- br i1 %tmp3240, label %bb974, label %bb269
-
-bb269:
- %tmp3424 = getelementptr %struct.rec* %x, i32 0, i32 0, i32 0, i32 0, i32 1 ; <%struct.rec**> [#uses=0]
- unreachable
-
-bb974:
- unreachable
-
-bb1109: ; preds = %entry
- call fastcc void @Image(i32 %tmp14) nounwind ; <i8*> [#uses=0]
- unreachable
-}
-
-declare fastcc void @Image(i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
deleted file mode 100644
index 336f17e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-ScalarToVectorByteMMX.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | not grep movl
-
-define <8 x i8> @a(i8 zeroext %x) nounwind {
- %r = insertelement <8 x i8> undef, i8 %x, i32 0
- ret <8 x i8> %r
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-VZextByteShort.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-VZextByteShort.ll
deleted file mode 100644
index 5c51480..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-VZextByteShort.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 > %t1
-; RUN: grep movzwl %t1 | count 2
-; RUN: grep movzbl %t1 | count 2
-; RUN: grep movd %t1 | count 4
-
-define <4 x i16> @a(i32* %x1) nounwind {
- %x2 = load i32* %x1
- %x3 = lshr i32 %x2, 1
- %x = trunc i32 %x3 to i16
- %r = insertelement <4 x i16> zeroinitializer, i16 %x, i32 0
- ret <4 x i16> %r
-}
-
-define <8 x i16> @b(i32* %x1) nounwind {
- %x2 = load i32* %x1
- %x3 = lshr i32 %x2, 1
- %x = trunc i32 %x3 to i16
- %r = insertelement <8 x i16> zeroinitializer, i16 %x, i32 0
- ret <8 x i16> %r
-}
-
-define <8 x i8> @c(i32* %x1) nounwind {
- %x2 = load i32* %x1
- %x3 = lshr i32 %x2, 1
- %x = trunc i32 %x3 to i8
- %r = insertelement <8 x i8> zeroinitializer, i8 %x, i32 0
- ret <8 x i8> %r
-}
-
-define <16 x i8> @d(i32* %x1) nounwind {
- %x2 = load i32* %x1
- %x3 = lshr i32 %x2, 1
- %x = trunc i32 %x3 to i8
- %r = insertelement <16 x i8> zeroinitializer, i8 %x, i32 0
- ret <16 x i8> %r
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
deleted file mode 100644
index 8bb3dc6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-VariableIndexInsert.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-
-define <2 x i64> @_mm_insert_epi16(<2 x i64> %a, i32 %b, i32 %imm) nounwind readnone {
-entry:
- %conv = bitcast <2 x i64> %a to <8 x i16> ; <<8 x i16>> [#uses=1]
- %conv2 = trunc i32 %b to i16 ; <i16> [#uses=1]
- %and = and i32 %imm, 7 ; <i32> [#uses=1]
- %vecins = insertelement <8 x i16> %conv, i16 %conv2, i32 %and ; <<8 x i16>> [#uses=1]
- %conv6 = bitcast <8 x i16> %vecins to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %conv6
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-sitofpCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-sitofpCrash.ll
deleted file mode 100644
index e361804..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-05-sitofpCrash.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse
-; PR2598
-
-define <2 x float> @a(<2 x i32> %i) nounwind {
- %r = sitofp <2 x i32> %i to <2 x float>
- ret <2 x float> %r
-}
-
-define <2 x i32> @b(<2 x float> %i) nounwind {
- %r = fptosi <2 x float> %i to <2 x i32>
- ret <2 x i32> %r
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-06-ConcatVectors.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-06-ConcatVectors.ll
deleted file mode 100644
index 92419fc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-06-ConcatVectors.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s
-
-define <2 x i64> @_mm_movpi64_pi64(<1 x i64> %a, <1 x i64> %b) nounwind readnone {
-entry:
- %0 = shufflevector <1 x i64> %a, <1 x i64> %b, <2 x i32> <i32 0, i32 1>
- ret <2 x i64> %0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll
deleted file mode 100644
index 07ef53e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-07-ExpandMMXBitcast.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep movl | count 2
-
-define i64 @a(i32 %a, i32 %b) nounwind readnone {
-entry:
- %0 = insertelement <2 x i32> undef, i32 %a, i32 0 ; <<2 x i32>> [#uses=1]
- %1 = insertelement <2 x i32> %0, i32 %b, i32 1 ; <<2 x i32>> [#uses=1]
- %conv = bitcast <2 x i32> %1 to i64 ; <i64> [#uses=1]
- ret i64 %conv
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-12-x86_64-tail-call-conv-out-of-sync-bug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-12-x86_64-tail-call-conv-out-of-sync-bug.ll
deleted file mode 100644
index 673e936..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-12-x86_64-tail-call-conv-out-of-sync-bug.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -tailcallopt -march=x86-64 -mattr=+sse2 -mtriple=x86_64-apple-darwin | grep fstpt
-; RUN: llc < %s -tailcallopt -march=x86-64 -mattr=+sse2 -mtriple=x86_64-apple-darwin | grep xmm
-
-; Check that x86-64 tail calls support x86_fp80 and v2f32 types. (Tail call
-; calling convention out of sync with standard c calling convention on x86_64)
-; Bug 4278.
-
-declare fastcc double @tailcallee(x86_fp80, <2 x float>)
-
-define fastcc double @tailcall() {
-entry:
- %tmp = fpext float 1.000000e+00 to x86_fp80
- %tmp2 = tail call fastcc double @tailcallee( x86_fp80 %tmp, <2 x float> <float 1.000000e+00, float 1.000000e+00>)
- ret double %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-15-not-a-tail-call.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-15-not-a-tail-call.ll
deleted file mode 100644
index feb5780..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-15-not-a-tail-call.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 -tailcallopt | not grep TAILCALL
-
-; Bug 4396. This tail call can NOT be optimized.
-
-declare fastcc i8* @_D3gcx2GC12mallocNoSyncMFmkZPv() nounwind
-
-define fastcc i8* @_D3gcx2GC12callocNoSyncMFmkZPv() nounwind {
-entry:
- %tmp6 = tail call fastcc i8* @_D3gcx2GC12mallocNoSyncMFmkZPv() ; <i8*> [#uses=2]
- %tmp9 = tail call i8* @memset(i8* %tmp6, i32 0, i64 2) ; <i8*> [#uses=0]
- ret i8* %tmp6
-}
-
-declare i8* @memset(i8*, i32, i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll
deleted file mode 100644
index 228cd48..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-06-18-movlp-shuffle-register.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse,-sse2
-; PR2484
-
-define <4 x float> @f4523(<4 x float> %a,<4 x float> %b) nounwind {
-entry:
-%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4,i32
-5,i32 2,i32 3>
-ret <4 x float> %shuffle
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-06-TwoAddrAssert.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-07-06-TwoAddrAssert.ll
deleted file mode 100644
index fcc71ae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-06-TwoAddrAssert.ll
+++ /dev/null
@@ -1,137 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=x86_64-unknown-freebsd7.2
-; PR4478
-
- %struct.sockaddr = type <{ i8, i8, [14 x i8] }>
-
-define i32 @main(i32 %argc, i8** %argv) nounwind {
-entry:
- br label %while.cond
-
-while.cond: ; preds = %sw.bb6, %entry
- switch i32 undef, label %sw.default [
- i32 -1, label %while.end
- i32 119, label %sw.bb6
- ]
-
-sw.bb6: ; preds = %while.cond
- br i1 undef, label %if.then, label %while.cond
-
-if.then: ; preds = %sw.bb6
- ret i32 1
-
-sw.default: ; preds = %while.cond
- ret i32 1
-
-while.end: ; preds = %while.cond
- br i1 undef, label %if.then15, label %if.end16
-
-if.then15: ; preds = %while.end
- ret i32 1
-
-if.end16: ; preds = %while.end
- br i1 undef, label %lor.lhs.false, label %if.then21
-
-lor.lhs.false: ; preds = %if.end16
- br i1 undef, label %if.end22, label %if.then21
-
-if.then21: ; preds = %lor.lhs.false, %if.end16
- ret i32 1
-
-if.end22: ; preds = %lor.lhs.false
- br i1 undef, label %lor.lhs.false27, label %if.then51
-
-lor.lhs.false27: ; preds = %if.end22
- br i1 undef, label %lor.lhs.false39, label %if.then51
-
-lor.lhs.false39: ; preds = %lor.lhs.false27
- br i1 undef, label %if.end52, label %if.then51
-
-if.then51: ; preds = %lor.lhs.false39, %lor.lhs.false27, %if.end22
- ret i32 1
-
-if.end52: ; preds = %lor.lhs.false39
- br i1 undef, label %if.then57, label %if.end58
-
-if.then57: ; preds = %if.end52
- ret i32 1
-
-if.end58: ; preds = %if.end52
- br i1 undef, label %if.then64, label %if.end65
-
-if.then64: ; preds = %if.end58
- ret i32 1
-
-if.end65: ; preds = %if.end58
- br i1 undef, label %if.then71, label %if.end72
-
-if.then71: ; preds = %if.end65
- ret i32 1
-
-if.end72: ; preds = %if.end65
- br i1 undef, label %if.then83, label %if.end84
-
-if.then83: ; preds = %if.end72
- ret i32 1
-
-if.end84: ; preds = %if.end72
- br i1 undef, label %if.then101, label %if.end102
-
-if.then101: ; preds = %if.end84
- ret i32 1
-
-if.end102: ; preds = %if.end84
- br i1 undef, label %if.then113, label %if.end114
-
-if.then113: ; preds = %if.end102
- ret i32 1
-
-if.end114: ; preds = %if.end102
- br i1 undef, label %if.then209, label %if.end210
-
-if.then209: ; preds = %if.end114
- ret i32 1
-
-if.end210: ; preds = %if.end114
- br i1 undef, label %if.then219, label %if.end220
-
-if.then219: ; preds = %if.end210
- ret i32 1
-
-if.end220: ; preds = %if.end210
- br i1 undef, label %if.end243, label %lor.lhs.false230
-
-lor.lhs.false230: ; preds = %if.end220
- unreachable
-
-if.end243: ; preds = %if.end220
- br i1 undef, label %if.then249, label %if.end250
-
-if.then249: ; preds = %if.end243
- ret i32 1
-
-if.end250: ; preds = %if.end243
- br i1 undef, label %if.end261, label %if.then260
-
-if.then260: ; preds = %if.end250
- ret i32 1
-
-if.end261: ; preds = %if.end250
- br i1 undef, label %if.then270, label %if.end271
-
-if.then270: ; preds = %if.end261
- ret i32 1
-
-if.end271: ; preds = %if.end261
- %call.i = call i32 @arc4random() nounwind ; <i32> [#uses=1]
- %rem.i = urem i32 %call.i, 16383 ; <i32> [#uses=1]
- %rem1.i = trunc i32 %rem.i to i16 ; <i16> [#uses=1]
- %conv2.i = or i16 %rem1.i, -16384 ; <i16> [#uses=1]
- %0 = call i16 asm "xchgb ${0:h}, ${0:b}", "=Q,0,~{dirflag},~{fpsr},~{flags}"(i16 %conv2.i) nounwind ; <i16> [#uses=1]
- store i16 %0, i16* undef
- %call281 = call i32 @bind(i32 undef, %struct.sockaddr* undef, i32 16) nounwind ; <i32> [#uses=0]
- unreachable
-}
-
-declare i32 @bind(i32, %struct.sockaddr*, i32)
-
-declare i32 @arc4random()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-07-SplitICmp.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-07-07-SplitICmp.ll
deleted file mode 100644
index eb9378b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-07-SplitICmp.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -disable-mmx
-
-define void @test2(<2 x i32> %A, <2 x i32> %B, <2 x i32>* %C) nounwind {
- %D = icmp sgt <2 x i32> %A, %B
- %E = zext <2 x i1> %D to <2 x i32>
- store <2 x i32> %E, <2 x i32>* %C
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-09-ExtractBoolFromVector.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-07-09-ExtractBoolFromVector.ll
deleted file mode 100644
index 0fdfdcb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-09-ExtractBoolFromVector.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3037
-
-define void @entry(<4 x i8>* %dest) {
- %1 = xor <4 x i1> zeroinitializer, < i1 true, i1 true, i1 true, i1 true >
- %2 = extractelement <4 x i1> %1, i32 3
- %3 = zext i1 %2 to i8
- %4 = insertelement <4 x i8> zeroinitializer, i8 %3, i32 3
- store <4 x i8> %4, <4 x i8>* %dest, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-15-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-07-15-CoalescerBug.ll
deleted file mode 100644
index eabaf77..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-15-CoalescerBug.ll
+++ /dev/null
@@ -1,958 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10
-
- %struct.ANY = type { i8* }
- %struct.AV = type { %struct.XPVAV*, i32, i32 }
- %struct.CLONE_PARAMS = type { %struct.AV*, i64, %struct.PerlInterpreter* }
- %struct.CV = type { %struct.XPVCV*, i32, i32 }
- %struct.DIR = type { i32, i64, i64, i8*, i32, i64, i64, i32, %struct.__darwin_pthread_mutex_t, %struct._telldir* }
- %struct.GP = type { %struct.SV*, i32, %struct.io*, %struct.CV*, %struct.AV*, %struct.HV*, %struct.GV*, %struct.CV*, i32, i32, i32, i8* }
- %struct.GV = type { %struct.XPVGV*, i32, i32 }
- %struct.HE = type { %struct.HE*, %struct.HEK*, %struct.SV* }
- %struct.HEK = type { i32, i32, [1 x i8] }
- %struct.HV = type { %struct.XPVHV*, i32, i32 }
- %struct.MAGIC = type { %struct.MAGIC*, %struct.MGVTBL*, i16, i8, i8, %struct.SV*, i8*, i32 }
- %struct.MGVTBL = type { i32 (%struct.SV*, %struct.MAGIC*)*, i32 (%struct.SV*, %struct.MAGIC*)*, i32 (%struct.SV*, %struct.MAGIC*)*, i32 (%struct.SV*, %struct.MAGIC*)*, i32 (%struct.SV*, %struct.MAGIC*)*, i32 (%struct.SV*, %struct.MAGIC*, %struct.SV*, i8*, i32)*, i32 (%struct.MAGIC*, %struct.CLONE_PARAMS*)* }
- %struct.OP = type { %struct.OP*, %struct.OP*, %struct.OP* ()*, i64, i16, i16, i8, i8 }
- %struct.PMOP = type { %struct.OP*, %struct.OP*, %struct.OP* ()*, i64, i16, i16, i8, i8, %struct.OP*, %struct.OP*, %struct.OP*, %struct.OP*, %struct.PMOP*, %struct.REGEXP*, i32, i32, i8, %struct.HV* }
- %struct.PerlIO_funcs = type { i64, i8*, i64, i32, i64 (%struct.PerlIOl**, i8*, %struct.SV*, %struct.PerlIO_funcs*)*, i64 (%struct.PerlIOl**)*, %struct.PerlIOl** (%struct.PerlIO_funcs*, %struct.PerlIO_list_t*, i64, i8*, i32, i32, i32, %struct.PerlIOl**, i32, %struct.SV**)*, i64 (%struct.PerlIOl**)*, %struct.SV* (%struct.PerlIOl**, %struct.CLONE_PARAMS*, i32)*, i64 (%struct.PerlIOl**)*, %struct.PerlIOl** (%struct.PerlIOl**, %struct.PerlIOl**, %struct.CLONE_PARAMS*, i32)*, i64 (%struct.PerlIOl**, i8*, i64)*, i64 (%struct.PerlIOl**, i8*, i64)*, i64 (%struct.PerlIOl**, i8*, i64)*, i64 (%struct.PerlIOl**, i64, i32)*, i64 (%struct.PerlIOl**)*, i64 (%struct.PerlIOl**)*, i64 (%struct.PerlIOl**)*, i64 (%struct.PerlIOl**)*, i64 (%struct.PerlIOl**)*, i64 (%struct.PerlIOl**)*, void (%struct.PerlIOl**)*, void (%struct.PerlIOl**)*, i8* (%struct.PerlIOl**)*, i64 (%struct.PerlIOl**)*, i8* (%struct.PerlIOl**)*, i64 (%struct.PerlIOl**)*, void (%struct.PerlIOl**, i8*, i64)* }
- %struct.PerlIO_list_t = type { i64, i64, i64, %struct.PerlIO_pair_t* }
- %struct.PerlIO_pair_t = type { %struct.PerlIO_funcs*, %struct.SV* }
- %struct.PerlIOl = type { %struct.PerlIOl*, %struct.PerlIO_funcs*, i32 }
- %struct.PerlInterpreter = type { i8 }
- %struct.REGEXP = type { i32*, i32*, %struct.regnode*, %struct.reg_substr_data*, i8*, %struct.reg_data*, i8*, i32*, i32, i32, i32, i32, i32, i32, i32, i32, [1 x %struct.regnode] }
- %struct.SV = type { i8*, i32, i32 }
- %struct.XPVAV = type { i8*, i64, i64, i64, double, %struct.MAGIC*, %struct.HV*, %struct.SV**, %struct.SV*, i8 }
- %struct.XPVCV = type { i8*, i64, i64, i64, double, %struct.MAGIC*, %struct.HV*, %struct.HV*, %struct.OP*, %struct.OP*, void (%struct.CV*)*, %struct.ANY, %struct.GV*, i8*, i64, %struct.AV*, %struct.CV*, i16, i32 }
- %struct.XPVGV = type { i8*, i64, i64, i64, double, %struct.MAGIC*, %struct.HV*, %struct.GP*, i8*, i64, %struct.HV*, i8 }
- %struct.XPVHV = type { i8*, i64, i64, i64, double, %struct.MAGIC*, %struct.HV*, i32, %struct.HE*, %struct.PMOP*, i8* }
- %struct.XPVIO = type { i8*, i64, i64, i64, double, %struct.MAGIC*, %struct.HV*, %struct.PerlIOl**, %struct.PerlIOl**, %struct.anon, i64, i64, i64, i64, i8*, %struct.GV*, i8*, %struct.GV*, i8*, %struct.GV*, i16, i8, i8 }
- %struct.__darwin_pthread_mutex_t = type { i64, [56 x i8] }
- %struct._telldir = type opaque
- %struct.anon = type { %struct.DIR* }
- %struct.io = type { %struct.XPVIO*, i32, i32 }
- %struct.reg_data = type { i32, i8*, [1 x i8*] }
- %struct.reg_substr_data = type { [3 x %struct.reg_substr_datum] }
- %struct.reg_substr_datum = type { i32, i32, %struct.SV*, %struct.SV* }
- %struct.regnode = type { i8, i8, i16 }
-
-define i32 @Perl_yylex() nounwind ssp {
-entry:
- br i1 undef, label %bb21, label %bb
-
-bb: ; preds = %entry
- unreachable
-
-bb21: ; preds = %entry
- switch i32 undef, label %bb103 [
- i32 1, label %bb101
- i32 4, label %bb75
- i32 6, label %bb68
- i32 7, label %bb67
- i32 8, label %bb25
- ]
-
-bb25: ; preds = %bb21
- ret i32 41
-
-bb67: ; preds = %bb21
- ret i32 40
-
-bb68: ; preds = %bb21
- br i1 undef, label %bb69, label %bb70
-
-bb69: ; preds = %bb68
- ret i32 undef
-
-bb70: ; preds = %bb68
- unreachable
-
-bb75: ; preds = %bb21
- unreachable
-
-bb101: ; preds = %bb21
- unreachable
-
-bb103: ; preds = %bb21
- switch i32 undef, label %bb104 [
- i32 0, label %bb126
- i32 4, label %fake_eof
- i32 26, label %fake_eof
- i32 34, label %bb1423
- i32 36, label %bb1050
- i32 37, label %bb534
- i32 39, label %bb1412
- i32 41, label %bb643
- i32 44, label %bb544
- i32 48, label %bb1406
- i32 49, label %bb1406
- i32 50, label %bb1406
- i32 51, label %bb1406
- i32 52, label %bb1406
- i32 53, label %bb1406
- i32 54, label %bb1406
- i32 55, label %bb1406
- i32 56, label %bb1406
- i32 57, label %bb1406
- i32 59, label %bb639
- i32 65, label %keylookup
- i32 66, label %keylookup
- i32 67, label %keylookup
- i32 68, label %keylookup
- i32 69, label %keylookup
- i32 70, label %keylookup
- i32 71, label %keylookup
- i32 72, label %keylookup
- i32 73, label %keylookup
- i32 74, label %keylookup
- i32 75, label %keylookup
- i32 76, label %keylookup
- i32 77, label %keylookup
- i32 78, label %keylookup
- i32 79, label %keylookup
- i32 80, label %keylookup
- i32 81, label %keylookup
- i32 82, label %keylookup
- i32 83, label %keylookup
- i32 84, label %keylookup
- i32 85, label %keylookup
- i32 86, label %keylookup
- i32 87, label %keylookup
- i32 88, label %keylookup
- i32 89, label %keylookup
- i32 90, label %keylookup
- i32 92, label %bb1455
- i32 95, label %keylookup
- i32 96, label %bb1447
- i32 97, label %keylookup
- i32 98, label %keylookup
- i32 99, label %keylookup
- i32 100, label %keylookup
- i32 101, label %keylookup
- i32 102, label %keylookup
- i32 103, label %keylookup
- i32 104, label %keylookup
- i32 105, label %keylookup
- i32 106, label %keylookup
- i32 107, label %keylookup
- i32 108, label %keylookup
- i32 109, label %keylookup
- i32 110, label %keylookup
- i32 111, label %keylookup
- i32 112, label %keylookup
- i32 113, label %keylookup
- i32 114, label %keylookup
- i32 115, label %keylookup
- i32 116, label %keylookup
- i32 117, label %keylookup
- i32 118, label %keylookup
- i32 119, label %keylookup
- i32 120, label %keylookup
- i32 121, label %keylookup
- i32 122, label %keylookup
- i32 126, label %bb544
- ]
-
-bb104: ; preds = %bb103
- unreachable
-
-bb126: ; preds = %bb103
- ret i32 0
-
-fake_eof: ; preds = %bb1841, %bb103, %bb103
- unreachable
-
-bb534: ; preds = %bb103
- unreachable
-
-bb544: ; preds = %bb103, %bb103
- ret i32 undef
-
-bb639: ; preds = %bb103
- unreachable
-
-bb643: ; preds = %bb103
- unreachable
-
-bb1050: ; preds = %bb103
- unreachable
-
-bb1406: ; preds = %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103
- unreachable
-
-bb1412: ; preds = %bb103
- unreachable
-
-bb1423: ; preds = %bb103
- unreachable
-
-bb1447: ; preds = %bb103
- unreachable
-
-bb1455: ; preds = %bb103
- unreachable
-
-keylookup: ; preds = %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103, %bb103
- br i1 undef, label %bb1498, label %bb1496
-
-bb1496: ; preds = %keylookup
- br i1 undef, label %bb1498, label %bb1510.preheader
-
-bb1498: ; preds = %bb1496, %keylookup
- unreachable
-
-bb1510.preheader: ; preds = %bb1496
- br i1 undef, label %bb1511, label %bb1518
-
-bb1511: ; preds = %bb1510.preheader
- br label %bb1518
-
-bb1518: ; preds = %bb1511, %bb1510.preheader
- switch i32 undef, label %bb741.i4285 [
- i32 95, label %bb744.i4287
- i32 115, label %bb852.i4394
- ]
-
-bb741.i4285: ; preds = %bb1518
- br label %Perl_keyword.exit4735
-
-bb744.i4287: ; preds = %bb1518
- br label %Perl_keyword.exit4735
-
-bb852.i4394: ; preds = %bb1518
- br i1 undef, label %bb861.i4404, label %bb856.i4399
-
-bb856.i4399: ; preds = %bb852.i4394
- br label %Perl_keyword.exit4735
-
-bb861.i4404: ; preds = %bb852.i4394
- br label %Perl_keyword.exit4735
-
-Perl_keyword.exit4735: ; preds = %bb861.i4404, %bb856.i4399, %bb744.i4287, %bb741.i4285
- br i1 undef, label %bb1544, label %reserved_word
-
-bb1544: ; preds = %Perl_keyword.exit4735
- br i1 undef, label %bb1565, label %bb1545
-
-bb1545: ; preds = %bb1544
- br i1 undef, label %bb1563, label %bb1558
-
-bb1558: ; preds = %bb1545
- %0 = load %struct.SV** undef ; <%struct.SV*> [#uses=1]
- %1 = bitcast %struct.SV* %0 to %struct.GV* ; <%struct.GV*> [#uses=5]
- br i1 undef, label %bb1563, label %bb1559
-
-bb1559: ; preds = %bb1558
- br i1 undef, label %bb1560, label %bb1563
-
-bb1560: ; preds = %bb1559
- br i1 undef, label %bb1563, label %bb1561
-
-bb1561: ; preds = %bb1560
- br i1 undef, label %bb1562, label %bb1563
-
-bb1562: ; preds = %bb1561
- br label %bb1563
-
-bb1563: ; preds = %bb1562, %bb1561, %bb1560, %bb1559, %bb1558, %bb1545
- %gv19.3 = phi %struct.GV* [ %1, %bb1562 ], [ undef, %bb1545 ], [ %1, %bb1558 ], [ %1, %bb1559 ], [ %1, %bb1560 ], [ %1, %bb1561 ] ; <%struct.GV*> [#uses=0]
- br i1 undef, label %bb1565, label %reserved_word
-
-bb1565: ; preds = %bb1563, %bb1544
- br i1 undef, label %bb1573, label %bb1580
-
-bb1573: ; preds = %bb1565
- br label %bb1580
-
-bb1580: ; preds = %bb1573, %bb1565
- br i1 undef, label %bb1595, label %reserved_word
-
-bb1595: ; preds = %bb1580
- br i1 undef, label %reserved_word, label %bb1597
-
-bb1597: ; preds = %bb1595
- br i1 undef, label %reserved_word, label %bb1602
-
-bb1602: ; preds = %bb1597
- br label %reserved_word
-
-reserved_word: ; preds = %bb1602, %bb1597, %bb1595, %bb1580, %bb1563, %Perl_keyword.exit4735
- switch i32 undef, label %bb2012 [
- i32 1, label %bb1819
- i32 2, label %bb1830
- i32 4, label %bb1841
- i32 5, label %bb1841
- i32 8, label %bb1880
- i32 14, label %bb1894
- i32 16, label %bb1895
- i32 17, label %bb1896
- i32 18, label %bb1897
- i32 19, label %bb1898
- i32 20, label %bb1899
- i32 22, label %bb1906
- i32 23, label %bb1928
- i32 24, label %bb2555
- i32 26, label %bb1929
- i32 31, label %bb1921
- i32 32, label %bb1930
- i32 33, label %bb1905
- i32 34, label %bb1936
- i32 35, label %bb1927
- i32 37, label %bb1962
- i32 40, label %bb1951
- i32 41, label %bb1946
- i32 42, label %bb1968
- i32 44, label %bb1969
- i32 45, label %bb1970
- i32 46, label %bb2011
- i32 47, label %bb2006
- i32 48, label %bb2007
- i32 49, label %bb2009
- i32 50, label %bb2010
- i32 51, label %bb2008
- i32 53, label %bb1971
- i32 54, label %bb1982
- i32 55, label %bb2005
- i32 59, label %bb2081
- i32 61, label %bb2087
- i32 64, label %bb2080
- i32 65, label %really_sub
- i32 66, label %bb2079
- i32 67, label %bb2089
- i32 69, label %bb2155
- i32 72, label %bb2137
- i32 74, label %bb2138
- i32 75, label %bb2166
- i32 76, label %bb2144
- i32 78, label %bb2145
- i32 81, label %bb2102
- i32 82, label %bb2108
- i32 84, label %bb2114
- i32 85, label %bb2115
- i32 86, label %bb2116
- i32 89, label %bb2146
- i32 90, label %bb2147
- i32 91, label %bb2148
- i32 93, label %bb2154
- i32 94, label %bb2167
- i32 96, label %bb2091
- i32 97, label %bb2090
- i32 98, label %bb2088
- i32 100, label %bb2173
- i32 101, label %bb2174
- i32 102, label %bb2175
- i32 103, label %bb2180
- i32 104, label %bb2181
- i32 106, label %bb2187
- i32 107, label %bb2188
- i32 110, label %bb2206
- i32 112, label %bb2217
- i32 113, label %bb2218
- i32 114, label %bb2199
- i32 119, label %bb2205
- i32 120, label %bb2229
- i32 121, label %bb2233
- i32 122, label %bb2234
- i32 123, label %bb2235
- i32 124, label %bb2236
- i32 125, label %bb2237
- i32 126, label %bb2238
- i32 127, label %bb2239
- i32 128, label %bb2268
- i32 129, label %bb2267
- i32 133, label %bb2276
- i32 134, label %bb2348
- i32 135, label %bb2337
- i32 137, label %bb2239
- i32 138, label %bb2367
- i32 139, label %bb2368
- i32 140, label %bb2369
- i32 141, label %bb2357
- i32 143, label %bb2349
- i32 144, label %bb2350
- i32 146, label %bb2356
- i32 147, label %bb2370
- i32 148, label %bb2445
- i32 149, label %bb2453
- i32 151, label %bb2381
- i32 152, label %bb2457
- i32 154, label %bb2516
- i32 156, label %bb2522
- i32 158, label %bb2527
- i32 159, label %bb2537
- i32 160, label %bb2503
- i32 162, label %bb2504
- i32 163, label %bb2464
- i32 165, label %bb2463
- i32 166, label %bb2538
- i32 168, label %bb2515
- i32 170, label %bb2549
- i32 172, label %bb2566
- i32 173, label %bb2595
- i32 174, label %bb2565
- i32 175, label %bb2567
- i32 176, label %bb2568
- i32 177, label %bb2569
- i32 178, label %bb2570
- i32 179, label %bb2594
- i32 182, label %bb2571
- i32 183, label %bb2572
- i32 185, label %bb2593
- i32 186, label %bb2583
- i32 187, label %bb2596
- i32 189, label %bb2602
- i32 190, label %bb2603
- i32 191, label %bb2604
- i32 192, label %bb2605
- i32 193, label %bb2606
- i32 196, label %bb2617
- i32 197, label %bb2618
- i32 198, label %bb2619
- i32 199, label %bb2627
- i32 200, label %bb2625
- i32 201, label %bb2626
- i32 206, label %really_sub
- i32 207, label %bb2648
- i32 208, label %bb2738
- i32 209, label %bb2739
- i32 210, label %bb2740
- i32 211, label %bb2742
- i32 212, label %bb2741
- i32 213, label %bb2737
- i32 214, label %bb2743
- i32 217, label %bb2758
- i32 219, label %bb2764
- i32 220, label %bb2765
- i32 221, label %bb2744
- i32 222, label %bb2766
- i32 226, label %bb2785
- i32 227, label %bb2783
- i32 228, label %bb2784
- i32 229, label %bb2790
- i32 230, label %bb2797
- i32 232, label %bb2782
- i32 234, label %bb2791
- i32 236, label %bb2815
- i32 237, label %bb2818
- i32 238, label %bb2819
- i32 239, label %bb2820
- i32 240, label %bb2817
- i32 241, label %bb2816
- i32 242, label %bb2821
- i32 243, label %bb2826
- i32 244, label %bb2829
- i32 245, label %bb2830
- ]
-
-bb1819: ; preds = %reserved_word
- unreachable
-
-bb1830: ; preds = %reserved_word
- unreachable
-
-bb1841: ; preds = %reserved_word, %reserved_word
- br i1 undef, label %fake_eof, label %bb1842
-
-bb1842: ; preds = %bb1841
- unreachable
-
-bb1880: ; preds = %reserved_word
- unreachable
-
-bb1894: ; preds = %reserved_word
- ret i32 undef
-
-bb1895: ; preds = %reserved_word
- ret i32 301
-
-bb1896: ; preds = %reserved_word
- ret i32 undef
-
-bb1897: ; preds = %reserved_word
- ret i32 undef
-
-bb1898: ; preds = %reserved_word
- ret i32 undef
-
-bb1899: ; preds = %reserved_word
- ret i32 undef
-
-bb1905: ; preds = %reserved_word
- ret i32 278
-
-bb1906: ; preds = %reserved_word
- unreachable
-
-bb1921: ; preds = %reserved_word
- ret i32 288
-
-bb1927: ; preds = %reserved_word
- ret i32 undef
-
-bb1928: ; preds = %reserved_word
- ret i32 undef
-
-bb1929: ; preds = %reserved_word
- ret i32 undef
-
-bb1930: ; preds = %reserved_word
- ret i32 undef
-
-bb1936: ; preds = %reserved_word
- br i1 undef, label %bb2834, label %bb1937
-
-bb1937: ; preds = %bb1936
- ret i32 undef
-
-bb1946: ; preds = %reserved_word
- unreachable
-
-bb1951: ; preds = %reserved_word
- ret i32 undef
-
-bb1962: ; preds = %reserved_word
- ret i32 undef
-
-bb1968: ; preds = %reserved_word
- ret i32 280
-
-bb1969: ; preds = %reserved_word
- ret i32 276
-
-bb1970: ; preds = %reserved_word
- ret i32 277
-
-bb1971: ; preds = %reserved_word
- ret i32 288
-
-bb1982: ; preds = %reserved_word
- br i1 undef, label %bb2834, label %bb1986
-
-bb1986: ; preds = %bb1982
- ret i32 undef
-
-bb2005: ; preds = %reserved_word
- ret i32 undef
-
-bb2006: ; preds = %reserved_word
- ret i32 282
-
-bb2007: ; preds = %reserved_word
- ret i32 282
-
-bb2008: ; preds = %reserved_word
- ret i32 282
-
-bb2009: ; preds = %reserved_word
- ret i32 282
-
-bb2010: ; preds = %reserved_word
- ret i32 282
-
-bb2011: ; preds = %reserved_word
- ret i32 282
-
-bb2012: ; preds = %reserved_word
- unreachable
-
-bb2079: ; preds = %reserved_word
- ret i32 undef
-
-bb2080: ; preds = %reserved_word
- ret i32 282
-
-bb2081: ; preds = %reserved_word
- ret i32 undef
-
-bb2087: ; preds = %reserved_word
- ret i32 undef
-
-bb2088: ; preds = %reserved_word
- ret i32 287
-
-bb2089: ; preds = %reserved_word
- ret i32 287
-
-bb2090: ; preds = %reserved_word
- ret i32 undef
-
-bb2091: ; preds = %reserved_word
- ret i32 280
-
-bb2102: ; preds = %reserved_word
- ret i32 282
-
-bb2108: ; preds = %reserved_word
- ret i32 undef
-
-bb2114: ; preds = %reserved_word
- ret i32 undef
-
-bb2115: ; preds = %reserved_word
- ret i32 282
-
-bb2116: ; preds = %reserved_word
- ret i32 282
-
-bb2137: ; preds = %reserved_word
- ret i32 undef
-
-bb2138: ; preds = %reserved_word
- ret i32 282
-
-bb2144: ; preds = %reserved_word
- ret i32 undef
-
-bb2145: ; preds = %reserved_word
- ret i32 282
-
-bb2146: ; preds = %reserved_word
- ret i32 undef
-
-bb2147: ; preds = %reserved_word
- ret i32 undef
-
-bb2148: ; preds = %reserved_word
- ret i32 282
-
-bb2154: ; preds = %reserved_word
- ret i32 undef
-
-bb2155: ; preds = %reserved_word
- ret i32 282
-
-bb2166: ; preds = %reserved_word
- ret i32 282
-
-bb2167: ; preds = %reserved_word
- ret i32 undef
-
-bb2173: ; preds = %reserved_word
- ret i32 274
-
-bb2174: ; preds = %reserved_word
- ret i32 undef
-
-bb2175: ; preds = %reserved_word
- br i1 undef, label %bb2834, label %bb2176
-
-bb2176: ; preds = %bb2175
- ret i32 undef
-
-bb2180: ; preds = %reserved_word
- ret i32 undef
-
-bb2181: ; preds = %reserved_word
- ret i32 undef
-
-bb2187: ; preds = %reserved_word
- ret i32 undef
-
-bb2188: ; preds = %reserved_word
- ret i32 280
-
-bb2199: ; preds = %reserved_word
- ret i32 295
-
-bb2205: ; preds = %reserved_word
- ret i32 287
-
-bb2206: ; preds = %reserved_word
- ret i32 287
-
-bb2217: ; preds = %reserved_word
- ret i32 undef
-
-bb2218: ; preds = %reserved_word
- ret i32 undef
-
-bb2229: ; preds = %reserved_word
- unreachable
-
-bb2233: ; preds = %reserved_word
- ret i32 undef
-
-bb2234: ; preds = %reserved_word
- ret i32 undef
-
-bb2235: ; preds = %reserved_word
- ret i32 undef
-
-bb2236: ; preds = %reserved_word
- ret i32 undef
-
-bb2237: ; preds = %reserved_word
- ret i32 undef
-
-bb2238: ; preds = %reserved_word
- ret i32 undef
-
-bb2239: ; preds = %reserved_word, %reserved_word
- unreachable
-
-bb2267: ; preds = %reserved_word
- ret i32 280
-
-bb2268: ; preds = %reserved_word
- ret i32 288
-
-bb2276: ; preds = %reserved_word
- unreachable
-
-bb2337: ; preds = %reserved_word
- ret i32 300
-
-bb2348: ; preds = %reserved_word
- ret i32 undef
-
-bb2349: ; preds = %reserved_word
- ret i32 undef
-
-bb2350: ; preds = %reserved_word
- ret i32 undef
-
-bb2356: ; preds = %reserved_word
- ret i32 undef
-
-bb2357: ; preds = %reserved_word
- br i1 undef, label %bb2834, label %bb2358
-
-bb2358: ; preds = %bb2357
- ret i32 undef
-
-bb2367: ; preds = %reserved_word
- ret i32 undef
-
-bb2368: ; preds = %reserved_word
- ret i32 270
-
-bb2369: ; preds = %reserved_word
- ret i32 undef
-
-bb2370: ; preds = %reserved_word
- unreachable
-
-bb2381: ; preds = %reserved_word
- unreachable
-
-bb2445: ; preds = %reserved_word
- unreachable
-
-bb2453: ; preds = %reserved_word
- unreachable
-
-bb2457: ; preds = %reserved_word
- unreachable
-
-bb2463: ; preds = %reserved_word
- ret i32 286
-
-bb2464: ; preds = %reserved_word
- unreachable
-
-bb2503: ; preds = %reserved_word
- ret i32 280
-
-bb2504: ; preds = %reserved_word
- ret i32 undef
-
-bb2515: ; preds = %reserved_word
- ret i32 undef
-
-bb2516: ; preds = %reserved_word
- ret i32 undef
-
-bb2522: ; preds = %reserved_word
- unreachable
-
-bb2527: ; preds = %reserved_word
- unreachable
-
-bb2537: ; preds = %reserved_word
- ret i32 undef
-
-bb2538: ; preds = %reserved_word
- ret i32 undef
-
-bb2549: ; preds = %reserved_word
- unreachable
-
-bb2555: ; preds = %reserved_word
- br i1 undef, label %bb2834, label %bb2556
-
-bb2556: ; preds = %bb2555
- ret i32 undef
-
-bb2565: ; preds = %reserved_word
- ret i32 undef
-
-bb2566: ; preds = %reserved_word
- ret i32 undef
-
-bb2567: ; preds = %reserved_word
- ret i32 undef
-
-bb2568: ; preds = %reserved_word
- ret i32 undef
-
-bb2569: ; preds = %reserved_word
- ret i32 undef
-
-bb2570: ; preds = %reserved_word
- ret i32 undef
-
-bb2571: ; preds = %reserved_word
- ret i32 undef
-
-bb2572: ; preds = %reserved_word
- ret i32 undef
-
-bb2583: ; preds = %reserved_word
- br i1 undef, label %bb2834, label %bb2584
-
-bb2584: ; preds = %bb2583
- ret i32 undef
-
-bb2593: ; preds = %reserved_word
- ret i32 282
-
-bb2594: ; preds = %reserved_word
- ret i32 282
-
-bb2595: ; preds = %reserved_word
- ret i32 undef
-
-bb2596: ; preds = %reserved_word
- ret i32 undef
-
-bb2602: ; preds = %reserved_word
- ret i32 undef
-
-bb2603: ; preds = %reserved_word
- ret i32 undef
-
-bb2604: ; preds = %reserved_word
- ret i32 undef
-
-bb2605: ; preds = %reserved_word
- ret i32 undef
-
-bb2606: ; preds = %reserved_word
- ret i32 undef
-
-bb2617: ; preds = %reserved_word
- ret i32 undef
-
-bb2618: ; preds = %reserved_word
- ret i32 undef
-
-bb2619: ; preds = %reserved_word
- unreachable
-
-bb2625: ; preds = %reserved_word
- ret i32 undef
-
-bb2626: ; preds = %reserved_word
- ret i32 undef
-
-bb2627: ; preds = %reserved_word
- ret i32 undef
-
-bb2648: ; preds = %reserved_word
- ret i32 undef
-
-really_sub: ; preds = %reserved_word, %reserved_word
- unreachable
-
-bb2737: ; preds = %reserved_word
- ret i32 undef
-
-bb2738: ; preds = %reserved_word
- ret i32 undef
-
-bb2739: ; preds = %reserved_word
- ret i32 undef
-
-bb2740: ; preds = %reserved_word
- ret i32 undef
-
-bb2741: ; preds = %reserved_word
- ret i32 undef
-
-bb2742: ; preds = %reserved_word
- ret i32 undef
-
-bb2743: ; preds = %reserved_word
- ret i32 undef
-
-bb2744: ; preds = %reserved_word
- unreachable
-
-bb2758: ; preds = %reserved_word
- ret i32 undef
-
-bb2764: ; preds = %reserved_word
- ret i32 282
-
-bb2765: ; preds = %reserved_word
- ret i32 282
-
-bb2766: ; preds = %reserved_word
- ret i32 undef
-
-bb2782: ; preds = %reserved_word
- ret i32 273
-
-bb2783: ; preds = %reserved_word
- ret i32 275
-
-bb2784: ; preds = %reserved_word
- ret i32 undef
-
-bb2785: ; preds = %reserved_word
- br i1 undef, label %bb2834, label %bb2786
-
-bb2786: ; preds = %bb2785
- ret i32 undef
-
-bb2790: ; preds = %reserved_word
- ret i32 undef
-
-bb2791: ; preds = %reserved_word
- ret i32 undef
-
-bb2797: ; preds = %reserved_word
- ret i32 undef
-
-bb2815: ; preds = %reserved_word
- ret i32 undef
-
-bb2816: ; preds = %reserved_word
- ret i32 272
-
-bb2817: ; preds = %reserved_word
- ret i32 undef
-
-bb2818: ; preds = %reserved_word
- ret i32 282
-
-bb2819: ; preds = %reserved_word
- ret i32 undef
-
-bb2820: ; preds = %reserved_word
- ret i32 282
-
-bb2821: ; preds = %reserved_word
- unreachable
-
-bb2826: ; preds = %reserved_word
- unreachable
-
-bb2829: ; preds = %reserved_word
- ret i32 300
-
-bb2830: ; preds = %reserved_word
- unreachable
-
-bb2834: ; preds = %bb2785, %bb2583, %bb2555, %bb2357, %bb2175, %bb1982, %bb1936
- ret i32 283
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-16-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-07-16-CoalescerBug.ll
deleted file mode 100644
index 48af440..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-16-CoalescerBug.ll
+++ /dev/null
@@ -1,210 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10
-; rdar://7059496
-
- %struct.brinfo = type <{ %struct.brinfo*, %struct.brinfo*, i8*, i32, i32, i32, i8, i8, i8, i8 }>
- %struct.cadata = type <{ i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i32, i32, %struct.cmatcher*, i8*, i8*, i8*, i8*, i8*, i8*, i32, i8, i8, i8, i8 }>
- %struct.cline = type <{ %struct.cline*, i32, i8, i8, i8, i8, i8*, i32, i8, i8, i8, i8, i8*, i32, i8, i8, i8, i8, i8*, i32, i32, %struct.cline*, %struct.cline*, i32, i32 }>
- %struct.cmatch = type <{ i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i32, i8, i8, i8, i8, i32*, i32*, i8*, i8*, i32, i32, i32, i32, i16, i8, i8, i16, i8, i8 }>
- %struct.cmatcher = type <{ i32, i8, i8, i8, i8, %struct.cmatcher*, i32, i8, i8, i8, i8, %struct.cpattern*, i32, i8, i8, i8, i8, %struct.cpattern*, i32, i8, i8, i8, i8, %struct.cpattern*, i32, i8, i8, i8, i8, %struct.cpattern*, i32, i8, i8, i8, i8 }>
- %struct.cpattern = type <{ %struct.cpattern*, i32, i8, i8, i8, i8, %union.anon }>
- %struct.patprog = type <{ i64, i64, i64, i64, i32, i32, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8 }>
- %union.anon = type <{ [8 x i8] }>
-
-define i32 @addmatches(%struct.cadata* %dat, i8** nocapture %argv) nounwind ssp {
-entry:
- br i1 undef, label %if.else, label %if.then91
-
-if.then91: ; preds = %entry
- br label %if.end96
-
-if.else: ; preds = %entry
- br label %if.end96
-
-if.end96: ; preds = %if.else, %if.then91
- br i1 undef, label %lor.lhs.false, label %if.then105
-
-lor.lhs.false: ; preds = %if.end96
- br i1 undef, label %if.else139, label %if.then105
-
-if.then105: ; preds = %lor.lhs.false, %if.end96
- unreachable
-
-if.else139: ; preds = %lor.lhs.false
- br i1 undef, label %land.end, label %land.rhs
-
-land.rhs: ; preds = %if.else139
- unreachable
-
-land.end: ; preds = %if.else139
- br i1 undef, label %land.lhs.true285, label %if.then315
-
-land.lhs.true285: ; preds = %land.end
- br i1 undef, label %if.end324, label %if.then322
-
-if.then315: ; preds = %land.end
- unreachable
-
-if.then322: ; preds = %land.lhs.true285
- unreachable
-
-if.end324: ; preds = %land.lhs.true285
- br i1 undef, label %if.end384, label %if.then358
-
-if.then358: ; preds = %if.end324
- unreachable
-
-if.end384: ; preds = %if.end324
- br i1 undef, label %if.end394, label %land.lhs.true387
-
-land.lhs.true387: ; preds = %if.end384
- unreachable
-
-if.end394: ; preds = %if.end384
- br i1 undef, label %if.end498, label %land.lhs.true399
-
-land.lhs.true399: ; preds = %if.end394
- br i1 undef, label %if.end498, label %if.then406
-
-if.then406: ; preds = %land.lhs.true399
- unreachable
-
-if.end498: ; preds = %land.lhs.true399, %if.end394
- br i1 undef, label %if.end514, label %if.then503
-
-if.then503: ; preds = %if.end498
- unreachable
-
-if.end514: ; preds = %if.end498
- br i1 undef, label %if.end585, label %if.then520
-
-if.then520: ; preds = %if.end514
- br i1 undef, label %lor.lhs.false547, label %if.then560
-
-lor.lhs.false547: ; preds = %if.then520
- unreachable
-
-if.then560: ; preds = %if.then520
- br i1 undef, label %if.end585, label %land.lhs.true566
-
-land.lhs.true566: ; preds = %if.then560
- br i1 undef, label %if.end585, label %if.then573
-
-if.then573: ; preds = %land.lhs.true566
- unreachable
-
-if.end585: ; preds = %land.lhs.true566, %if.then560, %if.end514
- br i1 undef, label %cond.true593, label %cond.false599
-
-cond.true593: ; preds = %if.end585
- unreachable
-
-cond.false599: ; preds = %if.end585
- br i1 undef, label %if.end647, label %if.then621
-
-if.then621: ; preds = %cond.false599
- br i1 undef, label %cond.true624, label %cond.false630
-
-cond.true624: ; preds = %if.then621
- br label %if.end647
-
-cond.false630: ; preds = %if.then621
- unreachable
-
-if.end647: ; preds = %cond.true624, %cond.false599
- br i1 undef, label %if.end723, label %if.then701
-
-if.then701: ; preds = %if.end647
- br label %if.end723
-
-if.end723: ; preds = %if.then701, %if.end647
- br i1 undef, label %if.else1090, label %if.then729
-
-if.then729: ; preds = %if.end723
- br i1 undef, label %if.end887, label %if.then812
-
-if.then812: ; preds = %if.then729
- unreachable
-
-if.end887: ; preds = %if.then729
- br i1 undef, label %if.end972, label %if.then893
-
-if.then893: ; preds = %if.end887
- br i1 undef, label %if.end919, label %if.then903
-
-if.then903: ; preds = %if.then893
- unreachable
-
-if.end919: ; preds = %if.then893
- br label %if.end972
-
-if.end972: ; preds = %if.end919, %if.end887
- %sline.0 = phi %struct.cline* [ undef, %if.end919 ], [ null, %if.end887 ] ; <%struct.cline*> [#uses=5]
- %bcs.0 = phi i32 [ undef, %if.end919 ], [ 0, %if.end887 ] ; <i32> [#uses=5]
- br i1 undef, label %if.end1146, label %land.lhs.true975
-
-land.lhs.true975: ; preds = %if.end972
- br i1 undef, label %if.end1146, label %if.then980
-
-if.then980: ; preds = %land.lhs.true975
- br i1 undef, label %cond.false1025, label %cond.false1004
-
-cond.false1004: ; preds = %if.then980
- unreachable
-
-cond.false1025: ; preds = %if.then980
- br i1 undef, label %if.end1146, label %if.then1071
-
-if.then1071: ; preds = %cond.false1025
- br i1 undef, label %if.then1074, label %if.end1081
-
-if.then1074: ; preds = %if.then1071
- br label %if.end1081
-
-if.end1081: ; preds = %if.then1074, %if.then1071
- %call1083 = call %struct.patprog* @patcompile(i8* undef, i32 0, i8** null) nounwind ssp ; <%struct.patprog*> [#uses=2]
- br i1 undef, label %if.end1146, label %if.then1086
-
-if.then1086: ; preds = %if.end1081
- br label %if.end1146
-
-if.else1090: ; preds = %if.end723
- br i1 undef, label %if.end1146, label %land.lhs.true1093
-
-land.lhs.true1093: ; preds = %if.else1090
- br i1 undef, label %if.end1146, label %if.then1098
-
-if.then1098: ; preds = %land.lhs.true1093
- unreachable
-
-if.end1146: ; preds = %land.lhs.true1093, %if.else1090, %if.then1086, %if.end1081, %cond.false1025, %land.lhs.true975, %if.end972
- %cp.0 = phi %struct.patprog* [ %call1083, %if.then1086 ], [ null, %if.end972 ], [ null, %land.lhs.true975 ], [ null, %cond.false1025 ], [ %call1083, %if.end1081 ], [ null, %if.else1090 ], [ null, %land.lhs.true1093 ] ; <%struct.patprog*> [#uses=1]
- %sline.1 = phi %struct.cline* [ %sline.0, %if.then1086 ], [ %sline.0, %if.end972 ], [ %sline.0, %land.lhs.true975 ], [ %sline.0, %cond.false1025 ], [ %sline.0, %if.end1081 ], [ null, %if.else1090 ], [ null, %land.lhs.true1093 ] ; <%struct.cline*> [#uses=1]
- %bcs.1 = phi i32 [ %bcs.0, %if.then1086 ], [ %bcs.0, %if.end972 ], [ %bcs.0, %land.lhs.true975 ], [ %bcs.0, %cond.false1025 ], [ %bcs.0, %if.end1081 ], [ 0, %if.else1090 ], [ 0, %land.lhs.true1093 ] ; <i32> [#uses=1]
- br i1 undef, label %if.end1307, label %do.body1270
-
-do.body1270: ; preds = %if.end1146
- unreachable
-
-if.end1307: ; preds = %if.end1146
- br i1 undef, label %if.end1318, label %if.then1312
-
-if.then1312: ; preds = %if.end1307
- unreachable
-
-if.end1318: ; preds = %if.end1307
- br i1 undef, label %for.cond1330.preheader, label %if.then1323
-
-if.then1323: ; preds = %if.end1318
- unreachable
-
-for.cond1330.preheader: ; preds = %if.end1318
- %call1587 = call i8* @comp_match(i8* undef, i8* undef, i8* undef, %struct.patprog* %cp.0, %struct.cline** undef, i32 0, %struct.brinfo** undef, i32 0, %struct.brinfo** undef, i32 %bcs.1, i32* undef) nounwind ssp ; <i8*> [#uses=0]
- %call1667 = call %struct.cmatch* @add_match_data(i32 0, i8* undef, i8* undef, %struct.cline* undef, i8* undef, i8* null, i8* undef, i8* undef, i8* undef, i8* undef, %struct.cline* null, i8* undef, %struct.cline* %sline.1, i8* undef, i32 undef, i32 undef) ssp ; <%struct.cmatch*> [#uses=0]
- unreachable
-}
-
-declare %struct.patprog* @patcompile(i8*, i32, i8**) ssp
-
-declare i8* @comp_match(i8*, i8*, i8*, %struct.patprog*, %struct.cline**, i32, %struct.brinfo**, i32, %struct.brinfo**, i32, i32*) ssp
-
-declare %struct.cmatch* @add_match_data(i32, i8*, i8*, %struct.cline*, i8*, i8*, i8*, i8*, i8*, i8*, %struct.cline*, i8*, %struct.cline*, i8*, i32, i32) nounwind ssp
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-17-StackColoringBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-07-17-StackColoringBug.ll
deleted file mode 100644
index 3e5bd34..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-17-StackColoringBug.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu -disable-fp-elim -color-ss-with-regs | not grep dil
-; PR4552
-
-target triple = "i386-pc-linux-gnu"
- at g_8 = internal global i32 0 ; <i32*> [#uses=1]
- at g_72 = internal global i32 0 ; <i32*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (i32, i8, i8)* @uint84 to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define i32 @uint84(i32 %p_15, i8 signext %p_17, i8 signext %p_19) nounwind {
-entry:
- %g_72.promoted = load i32* @g_72 ; <i32> [#uses=1]
- %g_8.promoted = load i32* @g_8 ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %func_40.exit, %entry
- %g_8.tmp.1 = phi i32 [ %g_8.promoted, %entry ], [ %g_8.tmp.0, %func_40.exit ] ; <i32> [#uses=3]
- %g_72.tmp.1 = phi i32 [ %g_72.promoted, %entry ], [ %g_72.tmp.0, %func_40.exit ] ; <i32> [#uses=3]
- %retval12.i4.i.i = trunc i32 %g_8.tmp.1 to i8 ; <i8> [#uses=2]
- %0 = trunc i32 %g_72.tmp.1 to i8 ; <i8> [#uses=2]
- %1 = mul i8 %retval12.i4.i.i, %0 ; <i8> [#uses=1]
- %2 = icmp eq i8 %1, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb2.i.i, label %bb.i.i
-
-bb.i.i: ; preds = %bb
- %3 = sext i8 %0 to i32 ; <i32> [#uses=1]
- %4 = and i32 %3, 50295 ; <i32> [#uses=1]
- %5 = icmp eq i32 %4, 0 ; <i1> [#uses=1]
- br i1 %5, label %bb2.i.i, label %func_55.exit.i
-
-bb2.i.i: ; preds = %bb.i.i, %bb
- br label %func_55.exit.i
-
-func_55.exit.i: ; preds = %bb2.i.i, %bb.i.i
- %g_72.tmp.2 = phi i32 [ 1, %bb2.i.i ], [ %g_72.tmp.1, %bb.i.i ] ; <i32> [#uses=1]
- %6 = phi i32 [ 1, %bb2.i.i ], [ %g_72.tmp.1, %bb.i.i ] ; <i32> [#uses=1]
- %7 = trunc i32 %6 to i8 ; <i8> [#uses=2]
- %8 = mul i8 %7, %retval12.i4.i.i ; <i8> [#uses=1]
- %9 = icmp eq i8 %8, 0 ; <i1> [#uses=1]
- br i1 %9, label %bb2.i4.i, label %bb.i3.i
-
-bb.i3.i: ; preds = %func_55.exit.i
- %10 = sext i8 %7 to i32 ; <i32> [#uses=1]
- %11 = and i32 %10, 50295 ; <i32> [#uses=1]
- %12 = icmp eq i32 %11, 0 ; <i1> [#uses=1]
- br i1 %12, label %bb2.i4.i, label %func_40.exit
-
-bb2.i4.i: ; preds = %bb.i3.i, %func_55.exit.i
- br label %func_40.exit
-
-func_40.exit: ; preds = %bb2.i4.i, %bb.i3.i
- %g_72.tmp.0 = phi i32 [ 1, %bb2.i4.i ], [ %g_72.tmp.2, %bb.i3.i ] ; <i32> [#uses=1]
- %phitmp = icmp sgt i32 %g_8.tmp.1, 0 ; <i1> [#uses=1]
- %g_8.tmp.0 = select i1 %phitmp, i32 %g_8.tmp.1, i32 1 ; <i32> [#uses=1]
- br label %bb
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-19-AsmExtraOperands.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-07-19-AsmExtraOperands.ll
deleted file mode 100644
index a0095ab..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-19-AsmExtraOperands.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; PR4583
-
-define i32 @atomic_cmpset_long(i64* %dst, i64 %exp, i64 %src) nounwind ssp noredzone noimplicitfloat {
-entry:
- %0 = call i8 asm sideeffect "\09lock ; \09\09\09cmpxchgq $2,$1 ;\09 sete\09$0 ;\09\091:\09\09\09\09# atomic_cmpset_long", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* undef, i64 undef, i64 undef, i64* undef) nounwind ; <i8> [#uses=0]
- br label %1
-
-; <label>:1 ; preds = %entry
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-20-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-07-20-CoalescerBug.ll
deleted file mode 100644
index e99edd6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-20-CoalescerBug.ll
+++ /dev/null
@@ -1,165 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10
-; PR4587
-; rdar://7072590
-
- %struct.re_pattern_buffer = type <{ i8*, i64, i64, i64, i8*, i64, i64, i8, i8, i8, i8, i8, i8, i8, i8 }>
-
-define fastcc i32 @regex_compile(i8* %pattern, i64 %size, i64 %syntax, %struct.re_pattern_buffer* nocapture %bufp) nounwind ssp {
-entry:
- br i1 undef, label %return, label %if.end
-
-if.end: ; preds = %entry
- %tmp35 = getelementptr %struct.re_pattern_buffer* %bufp, i64 0, i32 3 ; <i64*> [#uses=1]
- store i64 %syntax, i64* %tmp35
- store i32 undef, i32* undef
- br i1 undef, label %if.then66, label %if.end102
-
-if.then66: ; preds = %if.end
- br i1 false, label %if.else, label %if.then70
-
-if.then70: ; preds = %if.then66
- %call74 = call i8* @xrealloc(i8* undef, i64 32) nounwind ssp ; <i8*> [#uses=0]
- unreachable
-
-if.else: ; preds = %if.then66
- br i1 false, label %do.body86, label %if.end99
-
-do.body86: ; preds = %if.else
- br i1 false, label %do.end, label %if.then90
-
-if.then90: ; preds = %do.body86
- unreachable
-
-do.end: ; preds = %do.body86
- ret i32 12
-
-if.end99: ; preds = %if.else
- br label %if.end102
-
-if.end102: ; preds = %if.end99, %if.end
- br label %while.body
-
-while.body: ; preds = %if.end1126, %sw.bb532, %while.body, %if.end102
- %laststart.2 = phi i8* [ null, %if.end102 ], [ %laststart.7.ph, %if.end1126 ], [ %laststart.2, %sw.bb532 ], [ %laststart.2, %while.body ] ; <i8*> [#uses=6]
- %b.1 = phi i8* [ undef, %if.end102 ], [ %ctg29688, %if.end1126 ], [ %b.1, %sw.bb532 ], [ %b.1, %while.body ] ; <i8*> [#uses=5]
- br i1 undef, label %while.body, label %if.end127
-
-if.end127: ; preds = %while.body
- switch i32 undef, label %sw.bb532 [
- i32 123, label %handle_interval
- i32 92, label %do.body3527
- ]
-
-sw.bb532: ; preds = %if.end127
- br i1 undef, label %while.body, label %if.end808
-
-if.end808: ; preds = %sw.bb532
- br i1 undef, label %while.cond1267.preheader, label %if.then811
-
-while.cond1267.preheader: ; preds = %if.end808
- br i1 false, label %return, label %if.end1294
-
-if.then811: ; preds = %if.end808
- %call817 = call fastcc i8* @skip_one_char(i8* %laststart.2) ssp ; <i8*> [#uses=0]
- br i1 undef, label %cond.end834, label %lor.lhs.false827
-
-lor.lhs.false827: ; preds = %if.then811
- br label %cond.end834
-
-cond.end834: ; preds = %lor.lhs.false827, %if.then811
- br i1 undef, label %land.lhs.true838, label %while.cond979.preheader
-
-land.lhs.true838: ; preds = %cond.end834
- br i1 undef, label %if.then842, label %while.cond979.preheader
-
-if.then842: ; preds = %land.lhs.true838
- %conv851 = trunc i64 undef to i32 ; <i32> [#uses=1]
- br label %while.cond979.preheader
-
-while.cond979.preheader: ; preds = %if.then842, %land.lhs.true838, %cond.end834
- %startoffset.0.ph = phi i32 [ 0, %cond.end834 ], [ 0, %land.lhs.true838 ], [ %conv851, %if.then842 ] ; <i32> [#uses=2]
- %laststart.7.ph = phi i8* [ %laststart.2, %cond.end834 ], [ %laststart.2, %land.lhs.true838 ], [ %laststart.2, %if.then842 ] ; <i8*> [#uses=3]
- %b.4.ph = phi i8* [ %b.1, %cond.end834 ], [ %b.1, %land.lhs.true838 ], [ %b.1, %if.then842 ] ; <i8*> [#uses=3]
- %ctg29688 = getelementptr i8* %b.4.ph, i64 6 ; <i8*> [#uses=1]
- br label %while.cond979
-
-while.cond979: ; preds = %if.end1006, %while.cond979.preheader
- %cmp991 = icmp ugt i64 undef, 0 ; <i1> [#uses=1]
- br i1 %cmp991, label %do.body994, label %while.end1088
-
-do.body994: ; preds = %while.cond979
- br i1 undef, label %return, label %if.end1006
-
-if.end1006: ; preds = %do.body994
- %cmp1014 = icmp ugt i64 undef, 32768 ; <i1> [#uses=1]
- %storemerge10953 = select i1 %cmp1014, i64 32768, i64 undef ; <i64> [#uses=1]
- store i64 %storemerge10953, i64* undef
- br i1 false, label %return, label %while.cond979
-
-while.end1088: ; preds = %while.cond979
- br i1 undef, label %if.then1091, label %if.else1101
-
-if.then1091: ; preds = %while.end1088
- store i8 undef, i8* undef
- %idx.ext1132.pre = zext i32 %startoffset.0.ph to i64 ; <i64> [#uses=1]
- %add.ptr1133.pre = getelementptr i8* %laststart.7.ph, i64 %idx.ext1132.pre ; <i8*> [#uses=1]
- %sub.ptr.lhs.cast1135.pre = ptrtoint i8* %add.ptr1133.pre to i64 ; <i64> [#uses=1]
- br label %if.end1126
-
-if.else1101: ; preds = %while.end1088
- %cond1109 = select i1 undef, i32 18, i32 14 ; <i32> [#uses=1]
- %idx.ext1112 = zext i32 %startoffset.0.ph to i64 ; <i64> [#uses=1]
- %add.ptr1113 = getelementptr i8* %laststart.7.ph, i64 %idx.ext1112 ; <i8*> [#uses=2]
- %sub.ptr.rhs.cast1121 = ptrtoint i8* %add.ptr1113 to i64 ; <i64> [#uses=1]
- call fastcc void @insert_op1(i32 %cond1109, i8* %add.ptr1113, i32 undef, i8* %b.4.ph) ssp
- br label %if.end1126
-
-if.end1126: ; preds = %if.else1101, %if.then1091
- %sub.ptr.lhs.cast1135.pre-phi = phi i64 [ %sub.ptr.rhs.cast1121, %if.else1101 ], [ %sub.ptr.lhs.cast1135.pre, %if.then1091 ] ; <i64> [#uses=1]
- %add.ptr1128 = getelementptr i8* %b.4.ph, i64 3 ; <i8*> [#uses=1]
- %sub.ptr.rhs.cast1136 = ptrtoint i8* %add.ptr1128 to i64 ; <i64> [#uses=1]
- %sub.ptr.sub1137 = sub i64 %sub.ptr.lhs.cast1135.pre-phi, %sub.ptr.rhs.cast1136 ; <i64> [#uses=1]
- %sub.ptr.sub11378527 = trunc i64 %sub.ptr.sub1137 to i32 ; <i32> [#uses=1]
- %conv1139 = add i32 %sub.ptr.sub11378527, -3 ; <i32> [#uses=1]
- store i8 undef, i8* undef
- %shr10.i8599 = lshr i32 %conv1139, 8 ; <i32> [#uses=1]
- %conv6.i8600 = trunc i32 %shr10.i8599 to i8 ; <i8> [#uses=1]
- store i8 %conv6.i8600, i8* undef
- br label %while.body
-
-if.end1294: ; preds = %while.cond1267.preheader
- ret i32 12
-
-do.body3527: ; preds = %if.end127
- br i1 undef, label %do.end3536, label %if.then3531
-
-if.then3531: ; preds = %do.body3527
- unreachable
-
-do.end3536: ; preds = %do.body3527
- ret i32 5
-
-handle_interval: ; preds = %if.end127
- br i1 undef, label %do.body4547, label %cond.false4583
-
-do.body4547: ; preds = %handle_interval
- br i1 undef, label %do.end4556, label %if.then4551
-
-if.then4551: ; preds = %do.body4547
- unreachable
-
-do.end4556: ; preds = %do.body4547
- ret i32 9
-
-cond.false4583: ; preds = %handle_interval
- unreachable
-
-return: ; preds = %if.end1006, %do.body994, %while.cond1267.preheader, %entry
- ret i32 undef
-}
-
-declare i8* @xrealloc(i8*, i64) ssp
-
-declare fastcc i8* @skip_one_char(i8*) nounwind readonly ssp
-
-declare fastcc void @insert_op1(i32, i8*, i32, i8*) nounwind ssp
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll
deleted file mode 100644
index e83b3a7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86
-
- at bsBuff = internal global i32 0 ; <i32*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 ()* @bsGetUInt32 to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define fastcc i32 @bsGetUInt32() nounwind ssp {
-entry:
- %bsBuff.promoted44 = load i32* @bsBuff ; <i32> [#uses=1]
- %0 = add i32 0, -8 ; <i32> [#uses=1]
- %1 = lshr i32 %bsBuff.promoted44, %0 ; <i32> [#uses=1]
- %2 = shl i32 %1, 8 ; <i32> [#uses=1]
- br label %bb3.i17
-
-bb3.i9: ; preds = %bb3.i17
- br i1 false, label %bb2.i16, label %bb1.i15
-
-bb1.i15: ; preds = %bb3.i9
- unreachable
-
-bb2.i16: ; preds = %bb3.i9
- br label %bb3.i17
-
-bb3.i17: ; preds = %bb2.i16, %entry
- br i1 false, label %bb3.i9, label %bsR.exit18
-
-bsR.exit18: ; preds = %bb3.i17
- %3 = or i32 0, %2 ; <i32> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll
deleted file mode 100644
index b9b09a3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-02-mmx-scalar-to-vector.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; PR4669
-declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32)
-
-define <1 x i64> @test(i64 %t) {
-entry:
- %t1 = insertelement <1 x i64> undef, i64 %t, i32 0
- %t2 = tail call <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64> %t1, i32 48)
- ret <1 x i64> %t2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll
deleted file mode 100644
index b329c91..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll
+++ /dev/null
@@ -1,142 +0,0 @@
-; RUN: llc < %s -O3
-; PR4626
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
- at g_3 = common global i8 0, align 1 ; <i8*> [#uses=2]
-
-define signext i8 @safe_mul_func_int16_t_s_s(i32 %_si1, i8 signext %_si2) nounwind readnone {
-entry:
- %tobool = icmp eq i32 %_si1, 0 ; <i1> [#uses=1]
- %cmp = icmp sgt i8 %_si2, 0 ; <i1> [#uses=2]
- %or.cond = or i1 %cmp, %tobool ; <i1> [#uses=1]
- br i1 %or.cond, label %lor.rhs, label %land.lhs.true3
-
-land.lhs.true3: ; preds = %entry
- %conv5 = sext i8 %_si2 to i32 ; <i32> [#uses=1]
- %cmp7 = icmp slt i32 %conv5, %_si1 ; <i1> [#uses=1]
- br i1 %cmp7, label %cond.end, label %lor.rhs
-
-lor.rhs: ; preds = %land.lhs.true3, %entry
- %cmp10.not = icmp slt i32 %_si1, 1 ; <i1> [#uses=1]
- %or.cond23 = and i1 %cmp, %cmp10.not ; <i1> [#uses=1]
- br i1 %or.cond23, label %lor.end, label %cond.false
-
-lor.end: ; preds = %lor.rhs
- %tobool19 = icmp ne i8 %_si2, 0 ; <i1> [#uses=2]
- %lor.ext = zext i1 %tobool19 to i32 ; <i32> [#uses=1]
- br i1 %tobool19, label %cond.end, label %cond.false
-
-cond.false: ; preds = %lor.end, %lor.rhs
- %conv21 = sext i8 %_si2 to i32 ; <i32> [#uses=1]
- br label %cond.end
-
-cond.end: ; preds = %cond.false, %lor.end, %land.lhs.true3
- %cond = phi i32 [ %conv21, %cond.false ], [ 1, %land.lhs.true3 ], [ %lor.ext, %lor.end ] ; <i32> [#uses=1]
- %conv22 = trunc i32 %cond to i8 ; <i8> [#uses=1]
- ret i8 %conv22
-}
-
-define i32 @func_34(i8 signext %p_35) nounwind readonly {
-entry:
- %tobool = icmp eq i8 %p_35, 0 ; <i1> [#uses=1]
- br i1 %tobool, label %lor.lhs.false, label %if.then
-
-lor.lhs.false: ; preds = %entry
- %tmp1 = load i8* @g_3 ; <i8> [#uses=1]
- %tobool3 = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
- br i1 %tobool3, label %return, label %if.then
-
-if.then: ; preds = %lor.lhs.false, %entry
- %tmp4 = load i8* @g_3 ; <i8> [#uses=1]
- %conv5 = sext i8 %tmp4 to i32 ; <i32> [#uses=1]
- ret i32 %conv5
-
-return: ; preds = %lor.lhs.false
- ret i32 0
-}
-
-define void @foo(i32 %p_5) noreturn nounwind {
-entry:
- %cmp = icmp sgt i32 %p_5, 0 ; <i1> [#uses=2]
- %call = tail call i32 @safe() nounwind ; <i32> [#uses=1]
- %conv1 = trunc i32 %call to i8 ; <i8> [#uses=3]
- %tobool.i = xor i1 %cmp, true ; <i1> [#uses=3]
- %cmp.i = icmp sgt i8 %conv1, 0 ; <i1> [#uses=3]
- %or.cond.i = or i1 %cmp.i, %tobool.i ; <i1> [#uses=1]
- br i1 %or.cond.i, label %lor.rhs.i, label %land.lhs.true3.i
-
-land.lhs.true3.i: ; preds = %entry
- %xor = zext i1 %cmp to i32 ; <i32> [#uses=1]
- %conv5.i = sext i8 %conv1 to i32 ; <i32> [#uses=1]
- %cmp7.i = icmp slt i32 %conv5.i, %xor ; <i1> [#uses=1]
- %cmp7.i.not = xor i1 %cmp7.i, true ; <i1> [#uses=1]
- %or.cond23.i = and i1 %cmp.i, %tobool.i ; <i1> [#uses=1]
- %or.cond = and i1 %cmp7.i.not, %or.cond23.i ; <i1> [#uses=1]
- br i1 %or.cond, label %lor.end.i, label %for.inc
-
-lor.rhs.i: ; preds = %entry
- %or.cond23.i.old = and i1 %cmp.i, %tobool.i ; <i1> [#uses=1]
- br i1 %or.cond23.i.old, label %lor.end.i, label %for.inc
-
-lor.end.i: ; preds = %lor.rhs.i, %land.lhs.true3.i
- %tobool19.i = icmp eq i8 %conv1, 0 ; <i1> [#uses=0]
- br label %for.inc
-
-for.inc: ; preds = %for.inc, %lor.end.i, %lor.rhs.i, %land.lhs.true3.i
- br label %for.inc
-}
-
-declare i32 @safe()
-
-define i32 @func_35(i8 signext %p_35) nounwind readonly {
-entry:
- %tobool = icmp eq i8 %p_35, 0 ; <i1> [#uses=1]
- br i1 %tobool, label %lor.lhs.false, label %if.then
-
-lor.lhs.false: ; preds = %entry
- %tmp1 = load i8* @g_3 ; <i8> [#uses=1]
- %tobool3 = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
- br i1 %tobool3, label %return, label %if.then
-
-if.then: ; preds = %lor.lhs.false, %entry
- %tmp4 = load i8* @g_3 ; <i8> [#uses=1]
- %conv5 = sext i8 %tmp4 to i32 ; <i32> [#uses=1]
- ret i32 %conv5
-
-return: ; preds = %lor.lhs.false
- ret i32 0
-}
-
-define void @bar(i32 %p_5) noreturn nounwind {
-entry:
- %cmp = icmp sgt i32 %p_5, 0 ; <i1> [#uses=2]
- %call = tail call i32 @safe() nounwind ; <i32> [#uses=1]
- %conv1 = trunc i32 %call to i8 ; <i8> [#uses=3]
- %tobool.i = xor i1 %cmp, true ; <i1> [#uses=3]
- %cmp.i = icmp sgt i8 %conv1, 0 ; <i1> [#uses=3]
- %or.cond.i = or i1 %cmp.i, %tobool.i ; <i1> [#uses=1]
- br i1 %or.cond.i, label %lor.rhs.i, label %land.lhs.true3.i
-
-land.lhs.true3.i: ; preds = %entry
- %xor = zext i1 %cmp to i32 ; <i32> [#uses=1]
- %conv5.i = sext i8 %conv1 to i32 ; <i32> [#uses=1]
- %cmp7.i = icmp slt i32 %conv5.i, %xor ; <i1> [#uses=1]
- %cmp7.i.not = xor i1 %cmp7.i, true ; <i1> [#uses=1]
- %or.cond23.i = and i1 %cmp.i, %tobool.i ; <i1> [#uses=1]
- %or.cond = and i1 %cmp7.i.not, %or.cond23.i ; <i1> [#uses=1]
- br i1 %or.cond, label %lor.end.i, label %for.inc
-
-lor.rhs.i: ; preds = %entry
- %or.cond23.i.old = and i1 %cmp.i, %tobool.i ; <i1> [#uses=1]
- br i1 %or.cond23.i.old, label %lor.end.i, label %for.inc
-
-lor.end.i: ; preds = %lor.rhs.i, %land.lhs.true3.i
- %tobool19.i = icmp eq i8 %conv1, 0 ; <i1> [#uses=0]
- br label %for.inc
-
-for.inc: ; preds = %for.inc, %lor.end.i, %lor.rhs.i, %land.lhs.true3.i
- br label %for.inc
-}
-
-declare i32 @safe()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-06-inlineasm.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-08-06-inlineasm.ll
deleted file mode 100644
index cc2f3d8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-06-inlineasm.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s
-; PR4668
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
-
-define i32 @x(i32 %qscale) nounwind {
-entry:
- %temp_block = alloca [64 x i16], align 16 ; <[64 x i16]*> [#uses=0]
- %tmp = call i32 asm sideeffect "xor %edx, %edx", "={dx},~{dirflag},~{fpsr},~{flags}"() nounwind ; <i32> [#uses=1]
- br i1 undef, label %if.end78, label %if.then28
-
-if.then28: ; preds = %entry
- br label %if.end78
-
-if.end78: ; preds = %if.then28, %entry
- %level.1 = phi i32 [ %tmp, %if.then28 ], [ 0, %entry ] ; <i32> [#uses=1]
- %add.ptr1 = getelementptr [64 x i16]* null, i32 0, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr2 = getelementptr [64 x i16]* null, i32 1, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr3 = getelementptr [64 x i16]* null, i32 2, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr4 = getelementptr [64 x i16]* null, i32 3, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr5 = getelementptr [64 x i16]* null, i32 4, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr6 = getelementptr [64 x i16]* null, i32 5, i32 %qscale ; <i16*> [#uses=1]
- %tmp1 = call i32 asm sideeffect "nop", "={ax},r,r,r,r,r,0,~{dirflag},~{fpsr},~{flags}"(i16* %add.ptr6, i16* %add.ptr5, i16* %add.ptr4, i16* %add.ptr3, i16* %add.ptr2, i16* %add.ptr1) nounwind ; <i32> [#uses=0]
- ret i32 %level.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-08-CastError.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-08-08-CastError.ll
deleted file mode 100644
index 9456d91..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-08-CastError.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-mingw64 | grep movabsq
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-
-define <4 x float> @RecursiveTestFunc1(i8*) {
-EntryBlock:
- %1 = call <4 x float> inttoptr (i64 5367207198 to <4 x float> (i8*, float, float, float, float)*)(i8* %0, float 8.000000e+00, float 5.000000e+00, float 3.000000e+00, float 4.000000e+00) ; <<4 x float>> [#uses=1]
- ret <4 x float> %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-12-badswitch.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-08-12-badswitch.ll
deleted file mode 100644
index a94fce0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-12-badswitch.ll
+++ /dev/null
@@ -1,176 +0,0 @@
-; RUN: llc < %s | grep LJT
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin10"
-
-declare void @f1() nounwind readnone
-declare void @f2() nounwind readnone
-declare void @f3() nounwind readnone
-declare void @f4() nounwind readnone
-declare void @f5() nounwind readnone
-declare void @f6() nounwind readnone
-declare void @f7() nounwind readnone
-declare void @f8() nounwind readnone
-declare void @f9() nounwind readnone
-declare void @f10() nounwind readnone
-declare void @f11() nounwind readnone
-declare void @f12() nounwind readnone
-declare void @f13() nounwind readnone
-declare void @f14() nounwind readnone
-declare void @f15() nounwind readnone
-declare void @f16() nounwind readnone
-declare void @f17() nounwind readnone
-declare void @f18() nounwind readnone
-declare void @f19() nounwind readnone
-declare void @f20() nounwind readnone
-declare void @f21() nounwind readnone
-declare void @f22() nounwind readnone
-declare void @f23() nounwind readnone
-declare void @f24() nounwind readnone
-declare void @f25() nounwind readnone
-declare void @f26() nounwind readnone
-
-define internal fastcc i32 @foo(i64 %bar) nounwind ssp {
-entry:
- br label %bb49
-
-bb49:
- switch i64 %bar, label %RETURN [
- i64 2, label %RRETURN_2
- i64 3, label %RRETURN_6
- i64 4, label %RRETURN_7
- i64 5, label %RRETURN_14
- i64 6, label %RRETURN_15
- i64 7, label %RRETURN_16
- i64 8, label %RRETURN_17
- i64 9, label %RRETURN_18
- i64 10, label %RRETURN_19
- i64 11, label %RRETURN_20
- i64 12, label %RRETURN_21
- i64 13, label %RRETURN_22
- i64 14, label %RRETURN_24
- i64 15, label %RRETURN_26
- i64 16, label %RRETURN_27
- i64 17, label %RRETURN_28
- i64 18, label %RRETURN_29
- i64 19, label %RRETURN_30
- i64 20, label %RRETURN_31
- i64 21, label %RRETURN_38
- i64 22, label %RRETURN_40
- i64 23, label %RRETURN_42
- i64 24, label %RRETURN_44
- i64 25, label %RRETURN_48
- i64 26, label %RRETURN_52
- i64 27, label %RRETURN_1
- ]
-
-RETURN:
- call void @f1()
- br label %EXIT
-
-RRETURN_2: ; preds = %bb49
- call void @f2()
- br label %EXIT
-
-RRETURN_6: ; preds = %bb49
- call void @f2()
- br label %EXIT
-
-RRETURN_7: ; preds = %bb49
- call void @f3()
- br label %EXIT
-
-RRETURN_14: ; preds = %bb49
- call void @f4()
- br label %EXIT
-
-RRETURN_15: ; preds = %bb49
- call void @f5()
- br label %EXIT
-
-RRETURN_16: ; preds = %bb49
- call void @f6()
- br label %EXIT
-
-RRETURN_17: ; preds = %bb49
- call void @f7()
- br label %EXIT
-
-RRETURN_18: ; preds = %bb49
- call void @f8()
- br label %EXIT
-
-RRETURN_19: ; preds = %bb49
- call void @f9()
- br label %EXIT
-
-RRETURN_20: ; preds = %bb49
- call void @f10()
- br label %EXIT
-
-RRETURN_21: ; preds = %bb49
- call void @f11()
- br label %EXIT
-
-RRETURN_22: ; preds = %bb49
- call void @f12()
- br label %EXIT
-
-RRETURN_24: ; preds = %bb49
- call void @f13()
- br label %EXIT
-
-RRETURN_26: ; preds = %bb49
- call void @f14()
- br label %EXIT
-
-RRETURN_27: ; preds = %bb49
- call void @f15()
- br label %EXIT
-
-RRETURN_28: ; preds = %bb49
- call void @f16()
- br label %EXIT
-
-RRETURN_29: ; preds = %bb49
- call void @f17()
- br label %EXIT
-
-RRETURN_30: ; preds = %bb49
- call void @f18()
- br label %EXIT
-
-RRETURN_31: ; preds = %bb49
- call void @f19()
- br label %EXIT
-
-RRETURN_38: ; preds = %bb49
- call void @f20()
- br label %EXIT
-
-RRETURN_40: ; preds = %bb49
- call void @f21()
- br label %EXIT
-
-RRETURN_42: ; preds = %bb49
- call void @f22()
- br label %EXIT
-
-RRETURN_44: ; preds = %bb49
- call void @f23()
- br label %EXIT
-
-RRETURN_48: ; preds = %bb49
- call void @f24()
- br label %EXIT
-
-RRETURN_52: ; preds = %bb49
- call void @f25()
- br label %EXIT
-
-RRETURN_1: ; preds = %bb49
- call void @f26()
- br label %EXIT
-
-EXIT:
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll
deleted file mode 100644
index 6b0d6d9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: llc < %s
-target triple = "x86_64-mingw"
-
-; ModuleID = 'mm.bc'
- type opaque ; type %0
- type opaque ; type %1
-
-define internal fastcc float @computeMipmappingRho(%0* %shaderExecutionStatePtr, i32 %index, <4 x float> %texCoord, <4 x float> %texCoordDX, <4 x float> %texCoordDY) readonly {
-indexCheckBlock:
- %indexCmp = icmp ugt i32 %index, 16 ; <i1> [#uses=1]
- br i1 %indexCmp, label %zeroReturnBlock, label %primitiveTextureFetchBlock
-
-primitiveTextureFetchBlock: ; preds = %indexCheckBlock
- %pointerArithmeticTmp = bitcast %0* %shaderExecutionStatePtr to i8* ; <i8*> [#uses=1]
- %pointerArithmeticTmp1 = getelementptr i8* %pointerArithmeticTmp, i64 1808 ; <i8*> [#uses=1]
- %pointerArithmeticTmp2 = bitcast i8* %pointerArithmeticTmp1 to %1** ; <%1**> [#uses=1]
- %primitivePtr = load %1** %pointerArithmeticTmp2 ; <%1*> [#uses=1]
- %pointerArithmeticTmp3 = bitcast %1* %primitivePtr to i8* ; <i8*> [#uses=1]
- %pointerArithmeticTmp4 = getelementptr i8* %pointerArithmeticTmp3, i64 19408 ; <i8*> [#uses=1]
- %pointerArithmeticTmp5 = bitcast i8* %pointerArithmeticTmp4 to %1** ; <%1**> [#uses=1]
- %primitiveTexturePtr = getelementptr %1** %pointerArithmeticTmp5, i32 %index ; <%1**> [#uses=1]
- %primitiveTexturePtr6 = load %1** %primitiveTexturePtr ; <%1*> [#uses=2]
- br label %textureCheckBlock
-
-textureCheckBlock: ; preds = %primitiveTextureFetchBlock
- %texturePtrInt = ptrtoint %1* %primitiveTexturePtr6 to i64 ; <i64> [#uses=1]
- %testTextureNULL = icmp eq i64 %texturePtrInt, 0 ; <i1> [#uses=1]
- br i1 %testTextureNULL, label %zeroReturnBlock, label %rhoCalculateBlock
-
-rhoCalculateBlock: ; preds = %textureCheckBlock
- %pointerArithmeticTmp7 = bitcast %1* %primitiveTexturePtr6 to i8* ; <i8*> [#uses=1]
- %pointerArithmeticTmp8 = getelementptr i8* %pointerArithmeticTmp7, i64 640 ; <i8*> [#uses=1]
- %pointerArithmeticTmp9 = bitcast i8* %pointerArithmeticTmp8 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %dimensionsPtr = load <4 x float>* %pointerArithmeticTmp9, align 1 ; <<4 x float>> [#uses=2]
- %texDiffDX = fsub <4 x float> %texCoordDX, %texCoord ; <<4 x float>> [#uses=1]
- %texDiffDY = fsub <4 x float> %texCoordDY, %texCoord ; <<4 x float>> [#uses=1]
- %ddx = fmul <4 x float> %texDiffDX, %dimensionsPtr ; <<4 x float>> [#uses=2]
- %ddx10 = fmul <4 x float> %texDiffDY, %dimensionsPtr ; <<4 x float>> [#uses=2]
- %ddxSquared = fmul <4 x float> %ddx, %ddx ; <<4 x float>> [#uses=3]
- %0 = shufflevector <4 x float> %ddxSquared, <4 x float> %ddxSquared, <4 x i32> <i32 1, i32 0, i32 0, i32 0> ; <<4 x float>> [#uses=1]
- %dxSquared = fadd <4 x float> %ddxSquared, %0 ; <<4 x float>> [#uses=1]
- %1 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %dxSquared) ; <<4 x float>> [#uses=1]
- %ddySquared = fmul <4 x float> %ddx10, %ddx10 ; <<4 x float>> [#uses=3]
- %2 = shufflevector <4 x float> %ddySquared, <4 x float> %ddySquared, <4 x i32> <i32 1, i32 0, i32 0, i32 0> ; <<4 x float>> [#uses=1]
- %dySquared = fadd <4 x float> %ddySquared, %2 ; <<4 x float>> [#uses=1]
- %3 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %dySquared) ; <<4 x float>> [#uses=1]
- %4 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %1, <4 x float> %3) ; <<4 x float>> [#uses=1]
- %rho = extractelement <4 x float> %4, i32 0 ; <float> [#uses=1]
- ret float %rho
-
-zeroReturnBlock: ; preds = %textureCheckBlock, %indexCheckBlock
- ret float 0.000000e+00
-}
-
-declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
-
-declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll
deleted file mode 100644
index 5f6cf3b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-pc-linux | FileCheck %s
-
- at a = external global i96, align 4
- at b = external global i64, align 8
-
-define void @c() nounwind {
-; CHECK: movl a+8, %eax
- %srcval1 = load i96* @a, align 4
- %sroa.store.elt2 = lshr i96 %srcval1, 64
- %tmp = trunc i96 %sroa.store.elt2 to i64
-; CHECK: movl %eax, b
-; CHECK: movl $0, b+4
- store i64 %tmp, i64* @b, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
deleted file mode 100644
index 790fd88..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
+++ /dev/null
@@ -1,69 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR4753
-
-; This function has a sub-register reuse undone.
-
- at uint8 = external global i32 ; <i32*> [#uses=3]
-
-declare signext i8 @foo(i32, i8 signext) nounwind readnone
-
-declare signext i8 @bar(i32, i8 signext) nounwind readnone
-
-define i32 @uint80(i8 signext %p_52) nounwind {
-entry:
- %0 = sext i8 %p_52 to i16 ; <i16> [#uses=1]
- %1 = tail call i32 @func_24(i16 zeroext %0, i8 signext ptrtoint (i8 (i32, i8)* @foo to i8)) nounwind; <i32> [#uses=1]
- %2 = trunc i32 %1 to i8 ; <i8> [#uses=1]
- %3 = or i8 %2, 1 ; <i8> [#uses=1]
- %4 = tail call i32 @safe(i32 1) nounwind ; <i32> [#uses=0]
- %5 = tail call i32 @func_24(i16 zeroext 0, i8 signext undef) nounwind; <i32> [#uses=1]
- %6 = trunc i32 %5 to i8 ; <i8> [#uses=1]
- %7 = xor i8 %3, %p_52 ; <i8> [#uses=1]
- %8 = xor i8 %7, %6 ; <i8> [#uses=1]
- %9 = icmp ne i8 %p_52, 0 ; <i1> [#uses=1]
- %10 = zext i1 %9 to i8 ; <i8> [#uses=1]
- %11 = tail call i32 @func_24(i16 zeroext ptrtoint (i8 (i32, i8)* @bar to i16), i8 signext %10) nounwind; <i32> [#uses=1]
- %12 = tail call i32 @func_24(i16 zeroext 0, i8 signext 1) nounwind; <i32> [#uses=0]
- br i1 undef, label %bb2, label %bb
-
-bb: ; preds = %entry
- br i1 undef, label %bb2, label %bb3
-
-bb2: ; preds = %bb, %entry
- br label %bb3
-
-bb3: ; preds = %bb2, %bb
- %iftmp.2.0 = phi i32 [ 0, %bb2 ], [ 1, %bb ] ; <i32> [#uses=1]
- %13 = icmp ne i32 %11, %iftmp.2.0 ; <i1> [#uses=1]
- %14 = tail call i32 @safe(i32 -2) nounwind ; <i32> [#uses=0]
- %15 = zext i1 %13 to i8 ; <i8> [#uses=1]
- %16 = tail call signext i8 @func_53(i8 signext undef, i8 signext 1, i8 signext %15, i8 signext %8) nounwind; <i8> [#uses=0]
- br i1 undef, label %bb5, label %bb4
-
-bb4: ; preds = %bb3
- %17 = volatile load i32* @uint8, align 4 ; <i32> [#uses=0]
- br label %bb5
-
-bb5: ; preds = %bb4, %bb3
- %18 = volatile load i32* @uint8, align 4 ; <i32> [#uses=0]
- %19 = sext i8 undef to i16 ; <i16> [#uses=1]
- %20 = tail call i32 @func_24(i16 zeroext %19, i8 signext 1) nounwind; <i32> [#uses=0]
- br i1 undef, label %return, label %bb6.preheader
-
-bb6.preheader: ; preds = %bb5
- %21 = sext i8 %p_52 to i32 ; <i32> [#uses=1]
- %22 = volatile load i32* @uint8, align 4 ; <i32> [#uses=0]
- %23 = tail call i32 (...)* @safefuncts(i32 %21, i32 1) nounwind; <i32> [#uses=0]
- unreachable
-
-return: ; preds = %bb5
- ret i32 undef
-}
-
-declare i32 @func_24(i16 zeroext, i8 signext)
-
-declare i32 @safe(i32)
-
-declare signext i8 @func_53(i8 signext, i8 signext, i8 signext, i8 signext)
-
-declare i32 @safefuncts(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-23-linkerprivate.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-08-23-linkerprivate.ll
deleted file mode 100644
index 3da8f00..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-08-23-linkerprivate.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin | FileCheck %s
-
-; ModuleID = '/Volumes/MacOS9/tests/WebKit/JavaScriptCore/profiler/ProfilerServer.mm'
-
-@"\01l_objc_msgSend_fixup_alloc" = linker_private hidden global i32 0, section "__DATA, __objc_msgrefs, coalesced", align 16 ; <i32*> [#uses=0]
-
-; CHECK: .globl l_objc_msgSend_fixup_alloc
-; CHECK: .weak_definition l_objc_msgSend_fixup_alloc
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-07-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-09-07-CoalescerBug.ll
deleted file mode 100644
index 41b4bc0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-07-CoalescerBug.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-freebsd7.2 -code-model=kernel | FileCheck %s
-; PR4689
-
-%struct.__s = type { [8 x i8] }
-%struct.pcb = type { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i16, i8* }
-%struct.pcpu = type { i32*, i32*, i32*, i32*, %struct.pcb*, i64, i32, i32, i32, i32 }
-
-define i64 @hammer_time(i64 %modulep, i64 %physfree) nounwind ssp noredzone noimplicitfloat {
-; CHECK: hammer_time:
-; CHECK: movq $Xrsvd, %rax
-; CHECK: movq $Xrsvd, %rcx
-entry:
- br i1 undef, label %if.then, label %if.end
-
-if.then: ; preds = %entry
- br label %if.end
-
-if.end: ; preds = %if.then, %entry
- br label %for.body
-
-for.body: ; preds = %for.inc, %if.end
- switch i32 undef, label %if.then76 [
- i32 9, label %for.inc
- i32 10, label %for.inc
- i32 11, label %for.inc
- i32 12, label %for.inc
- ]
-
-if.then76: ; preds = %for.body
- unreachable
-
-for.inc: ; preds = %for.body, %for.body, %for.body, %for.body
- br i1 undef, label %for.end, label %for.body
-
-for.end: ; preds = %for.inc
- call void asm sideeffect "mov $1,%gs:$0", "=*m,r,~{dirflag},~{fpsr},~{flags}"(%struct.__s* bitcast (%struct.pcb** getelementptr (%struct.pcpu* null, i32 0, i32 4) to %struct.__s*), i64 undef) nounwind
- br label %for.body170
-
-for.body170: ; preds = %for.body170, %for.end
- store i64 or (i64 and (i64 or (i64 ptrtoint (void (i32, i32, i32, i32)* @Xrsvd to i64), i64 2097152), i64 2162687), i64 or (i64 or (i64 and (i64 shl (i64 ptrtoint (void (i32, i32, i32, i32)* @Xrsvd to i64), i64 32), i64 -281474976710656), i64 140737488355328), i64 15393162788864)), i64* undef
- br i1 undef, label %for.end175, label %for.body170
-
-for.end175: ; preds = %for.body170
- unreachable
-}
-
-declare void @Xrsvd(i32, i32, i32, i32) ssp noredzone noimplicitfloat
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
deleted file mode 100644
index 7b5e871..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=pic -disable-fp-elim | FileCheck %s
-
-; It's not legal to fold a load from 32-bit stack slot into a 64-bit
-; instruction. If done, the instruction does a 64-bit load and that's not
-; safe. This can happen we a subreg_to_reg 0 has been coalesced. One
-; exception is when the instruction that folds the load is a move, then we
-; can simply turn it into a 32-bit load from the stack slot.
-; rdar://7170444
-
-%struct.ComplexType = type { i32 }
-
-define i32 @t(i32 %clientPort, i32 %pluginID, i32 %requestID, i32 %objectID, i64 %serverIdentifier, i64 %argumentsData, i32 %argumentsLength) ssp {
-entry:
-; CHECK: _t:
-; CHECK: movl 16(%rbp),
- %0 = zext i32 %argumentsLength to i64 ; <i64> [#uses=1]
- %1 = zext i32 %clientPort to i64 ; <i64> [#uses=1]
- %2 = inttoptr i64 %1 to %struct.ComplexType* ; <%struct.ComplexType*> [#uses=1]
- %3 = invoke i8* @pluginInstance(i8* undef, i32 %pluginID)
- to label %invcont unwind label %lpad ; <i8*> [#uses=1]
-
-invcont: ; preds = %entry
- %4 = add i32 %requestID, %pluginID ; <i32> [#uses=0]
- %5 = invoke zeroext i8 @invoke(i8* %3, i32 %objectID, i8* undef, i64 %argumentsData, i32 %argumentsLength, i64* undef, i32* undef)
- to label %invcont1 unwind label %lpad ; <i8> [#uses=0]
-
-invcont1: ; preds = %invcont
- %6 = getelementptr inbounds %struct.ComplexType* %2, i64 0, i32 0 ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=1]
- invoke void @booleanAndDataReply(i32 %7, i32 undef, i32 %requestID, i32 undef, i64 undef, i32 undef)
- to label %invcont2 unwind label %lpad
-
-invcont2: ; preds = %invcont1
- ret i32 0
-
-lpad: ; preds = %invcont1, %invcont, %entry
- %8 = call i32 @vm_deallocate(i32 undef, i64 0, i64 %0) ; <i32> [#uses=0]
- unreachable
-}
-
-declare i32 @vm_deallocate(i32, i64, i64)
-
-declare i8* @pluginInstance(i8*, i32)
-
-declare zeroext i8 @invoke(i8*, i32, i8*, i64, i32, i64*, i32*)
-
-declare void @booleanAndDataReply(i32, i32, i32, i32, i64, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-10-SpillComments.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-09-10-SpillComments.ll
deleted file mode 100644
index f9ca861..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-10-SpillComments.ll
+++ /dev/null
@@ -1,108 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux | FileCheck %s
-
-; This test shouldn't require spills.
-
-; CHECK: subq $8, %rsp
-; CHECK-NOT: $rsp
-; CHECK: addq $8, %rsp
-
- %struct..0anon = type { i32 }
- %struct.rtvec_def = type { i32, [1 x %struct..0anon] }
- %struct.rtx_def = type { i16, i8, i8, [1 x %struct..0anon] }
- at rtx_format = external global [116 x i8*] ; <[116 x i8*]*> [#uses=1]
- at rtx_length = external global [117 x i32] ; <[117 x i32]*> [#uses=1]
-
-declare %struct.rtx_def* @fixup_memory_subreg(%struct.rtx_def*, %struct.rtx_def*, i32)
-
-define %struct.rtx_def* @walk_fixup_memory_subreg(%struct.rtx_def* %x, %struct.rtx_def* %insn) {
-entry:
- %tmp2 = icmp eq %struct.rtx_def* %x, null ; <i1> [#uses=1]
- br i1 %tmp2, label %UnifiedReturnBlock, label %cond_next
-
-cond_next: ; preds = %entry
- %tmp6 = getelementptr %struct.rtx_def* %x, i32 0, i32 0 ; <i16*> [#uses=1]
- %tmp7 = load i16* %tmp6 ; <i16> [#uses=2]
- %tmp78 = zext i16 %tmp7 to i32 ; <i32> [#uses=2]
- %tmp10 = icmp eq i16 %tmp7, 54 ; <i1> [#uses=1]
- br i1 %tmp10, label %cond_true13, label %cond_next32
-
-cond_true13: ; preds = %cond_next
- %tmp15 = getelementptr %struct.rtx_def* %x, i32 0, i32 3 ; <[1 x %struct..0anon]*> [#uses=1]
- %tmp1718 = bitcast [1 x %struct..0anon]* %tmp15 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
- %tmp19 = load %struct.rtx_def** %tmp1718 ; <%struct.rtx_def*> [#uses=1]
- %tmp20 = getelementptr %struct.rtx_def* %tmp19, i32 0, i32 0 ; <i16*> [#uses=1]
- %tmp21 = load i16* %tmp20 ; <i16> [#uses=1]
- %tmp22 = icmp eq i16 %tmp21, 57 ; <i1> [#uses=1]
- br i1 %tmp22, label %cond_true25, label %cond_next32
-
-cond_true25: ; preds = %cond_true13
- %tmp29 = tail call %struct.rtx_def* @fixup_memory_subreg( %struct.rtx_def* %x, %struct.rtx_def* %insn, i32 1 ) nounwind ; <%struct.rtx_def*> [#uses=1]
- ret %struct.rtx_def* %tmp29
-
-cond_next32: ; preds = %cond_true13, %cond_next
- %tmp34 = getelementptr [116 x i8*]* @rtx_format, i32 0, i32 %tmp78 ; <i8**> [#uses=1]
- %tmp35 = load i8** %tmp34, align 4 ; <i8*> [#uses=1]
- %tmp37 = getelementptr [117 x i32]* @rtx_length, i32 0, i32 %tmp78 ; <i32*> [#uses=1]
- %tmp38 = load i32* %tmp37, align 4 ; <i32> [#uses=1]
- %i.011 = add i32 %tmp38, -1 ; <i32> [#uses=2]
- %tmp12513 = icmp sgt i32 %i.011, -1 ; <i1> [#uses=1]
- br i1 %tmp12513, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %bb123, %cond_next32
- %indvar = phi i32 [ %indvar.next26, %bb123 ], [ 0, %cond_next32 ] ; <i32> [#uses=2]
- %i.01.0 = sub i32 %i.011, %indvar ; <i32> [#uses=5]
- %tmp42 = getelementptr i8* %tmp35, i32 %i.01.0 ; <i8*> [#uses=2]
- %tmp43 = load i8* %tmp42 ; <i8> [#uses=1]
- switch i8 %tmp43, label %bb123 [
- i8 101, label %cond_true47
- i8 69, label %bb105.preheader
- ]
-
-cond_true47: ; preds = %bb
- %tmp52 = getelementptr %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
- %tmp5354 = bitcast %struct..0anon* %tmp52 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
- %tmp55 = load %struct.rtx_def** %tmp5354 ; <%struct.rtx_def*> [#uses=1]
- %tmp58 = tail call %struct.rtx_def* @walk_fixup_memory_subreg( %struct.rtx_def* %tmp55, %struct.rtx_def* %insn ) nounwind ; <%struct.rtx_def*> [#uses=1]
- %tmp62 = getelementptr %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0, i32 0 ; <i32*> [#uses=1]
- %tmp58.c = ptrtoint %struct.rtx_def* %tmp58 to i32 ; <i32> [#uses=1]
- store i32 %tmp58.c, i32* %tmp62
- %tmp6816 = load i8* %tmp42 ; <i8> [#uses=1]
- %tmp6917 = icmp eq i8 %tmp6816, 69 ; <i1> [#uses=1]
- br i1 %tmp6917, label %bb105.preheader, label %bb123
-
-bb105.preheader: ; preds = %cond_true47, %bb
- %tmp11020 = getelementptr %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
- %tmp11111221 = bitcast %struct..0anon* %tmp11020 to %struct.rtvec_def** ; <%struct.rtvec_def**> [#uses=3]
- %tmp11322 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
- %tmp11423 = getelementptr %struct.rtvec_def* %tmp11322, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp11524 = load i32* %tmp11423 ; <i32> [#uses=1]
- %tmp11625 = icmp eq i32 %tmp11524, 0 ; <i1> [#uses=1]
- br i1 %tmp11625, label %bb123, label %bb73
-
-bb73: ; preds = %bb73, %bb105.preheader
- %j.019 = phi i32 [ %tmp104, %bb73 ], [ 0, %bb105.preheader ] ; <i32> [#uses=3]
- %tmp81 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=2]
- %tmp92 = getelementptr %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019 ; <%struct..0anon*> [#uses=1]
- %tmp9394 = bitcast %struct..0anon* %tmp92 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
- %tmp95 = load %struct.rtx_def** %tmp9394 ; <%struct.rtx_def*> [#uses=1]
- %tmp98 = tail call %struct.rtx_def* @walk_fixup_memory_subreg( %struct.rtx_def* %tmp95, %struct.rtx_def* %insn ) nounwind ; <%struct.rtx_def*> [#uses=1]
- %tmp101 = getelementptr %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019, i32 0 ; <i32*> [#uses=1]
- %tmp98.c = ptrtoint %struct.rtx_def* %tmp98 to i32 ; <i32> [#uses=1]
- store i32 %tmp98.c, i32* %tmp101
- %tmp104 = add i32 %j.019, 1 ; <i32> [#uses=2]
- %tmp113 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
- %tmp114 = getelementptr %struct.rtvec_def* %tmp113, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp115 = load i32* %tmp114 ; <i32> [#uses=1]
- %tmp116 = icmp ult i32 %tmp104, %tmp115 ; <i1> [#uses=1]
- br i1 %tmp116, label %bb73, label %bb123
-
-bb123: ; preds = %bb73, %bb105.preheader, %cond_true47, %bb
- %i.0 = add i32 %i.01.0, -1 ; <i32> [#uses=1]
- %tmp125 = icmp sgt i32 %i.0, -1 ; <i1> [#uses=1]
- %indvar.next26 = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %tmp125, label %bb, label %UnifiedReturnBlock
-
-UnifiedReturnBlock: ; preds = %bb123, %cond_next32, %entry
- %UnifiedRetVal = phi %struct.rtx_def* [ null, %entry ], [ %x, %cond_next32 ], [ %x, %bb123 ] ; <%struct.rtx_def*> [#uses=1]
- ret %struct.rtx_def* %UnifiedRetVal
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-16-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-09-16-CoalescerBug.ll
deleted file mode 100644
index 18b5a17..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-16-CoalescerBug.ll
+++ /dev/null
@@ -1,64 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10
-; PR4910
-
-%0 = type { i32, i32, i32, i32 }
-
- at boot_cpu_id = external global i32 ; <i32*> [#uses=1]
- at cpu_logical = common global i32 0, align 4 ; <i32*> [#uses=1]
-
-define void @topo_probe_0xb() nounwind ssp {
-entry:
- br label %for.cond
-
-for.cond: ; preds = %for.inc38, %entry
- %0 = phi i32 [ 0, %entry ], [ %inc40, %for.inc38 ] ; <i32> [#uses=3]
- %cmp = icmp slt i32 %0, 3 ; <i1> [#uses=1]
- br i1 %cmp, label %for.body, label %for.end41
-
-for.body: ; preds = %for.cond
- %1 = tail call %0 asm sideeffect "cpuid", "={ax},={bx},={cx},={dx},0,{cx},~{dirflag},~{fpsr},~{flags}"(i32 11, i32 %0) nounwind ; <%0> [#uses=3]
- %asmresult.i = extractvalue %0 %1, 0 ; <i32> [#uses=1]
- %asmresult10.i = extractvalue %0 %1, 2 ; <i32> [#uses=1]
- %and = and i32 %asmresult.i, 31 ; <i32> [#uses=2]
- %shr42 = lshr i32 %asmresult10.i, 8 ; <i32> [#uses=1]
- %and12 = and i32 %shr42, 255 ; <i32> [#uses=2]
- %cmp14 = icmp eq i32 %and12, 0 ; <i1> [#uses=1]
- br i1 %cmp14, label %for.end41, label %lor.lhs.false
-
-lor.lhs.false: ; preds = %for.body
- %asmresult9.i = extractvalue %0 %1, 1 ; <i32> [#uses=1]
- %and7 = and i32 %asmresult9.i, 65535 ; <i32> [#uses=1]
- %cmp16 = icmp eq i32 %and7, 0 ; <i1> [#uses=1]
- br i1 %cmp16, label %for.end41, label %for.cond17.preheader
-
-for.cond17.preheader: ; preds = %lor.lhs.false
- %tmp24 = load i32* @boot_cpu_id ; <i32> [#uses=1]
- %shr26 = ashr i32 %tmp24, %and ; <i32> [#uses=1]
- br label %for.body20
-
-for.body20: ; preds = %for.body20, %for.cond17.preheader
- %2 = phi i32 [ 0, %for.cond17.preheader ], [ %inc32, %for.body20 ] ; <i32> [#uses=2]
- %cnt.143 = phi i32 [ 0, %for.cond17.preheader ], [ %inc.cnt.1, %for.body20 ] ; <i32> [#uses=1]
- %shr23 = ashr i32 %2, %and ; <i32> [#uses=1]
- %cmp27 = icmp eq i32 %shr23, %shr26 ; <i1> [#uses=1]
- %inc = zext i1 %cmp27 to i32 ; <i32> [#uses=1]
- %inc.cnt.1 = add i32 %inc, %cnt.143 ; <i32> [#uses=2]
- %inc32 = add nsw i32 %2, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %inc32, 255 ; <i1> [#uses=1]
- br i1 %exitcond, label %for.end, label %for.body20
-
-for.end: ; preds = %for.body20
- %cmp34 = icmp eq i32 %and12, 1 ; <i1> [#uses=1]
- br i1 %cmp34, label %if.then35, label %for.inc38
-
-if.then35: ; preds = %for.end
- store i32 %inc.cnt.1, i32* @cpu_logical
- br label %for.inc38
-
-for.inc38: ; preds = %for.end, %if.then35
- %inc40 = add nsw i32 %0, 1 ; <i32> [#uses=1]
- br label %for.cond
-
-for.end41: ; preds = %lor.lhs.false, %for.body, %for.cond
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-19-SchedCustomLoweringBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-09-19-SchedCustomLoweringBug.ll
deleted file mode 100644
index 8cb538b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-19-SchedCustomLoweringBug.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10 -post-RA-scheduler=true | FileCheck %s
-
-; PR4958
-
-define i32 @main() nounwind ssp {
-entry:
-; CHECK: main:
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- br label %bb
-
-bb: ; preds = %bb1, %entry
-; CHECK: addl $1
-; CHECK-NEXT: movl %e
-; CHECK-NEXT: adcl $0
- %i.0 = phi i64 [ 0, %entry ], [ %0, %bb1 ] ; <i64> [#uses=1]
- %0 = add nsw i64 %i.0, 1 ; <i64> [#uses=2]
- %1 = icmp sgt i32 0, 0 ; <i1> [#uses=1]
- br i1 %1, label %bb2, label %bb1
-
-bb1: ; preds = %bb
- %2 = icmp sle i64 %0, 1 ; <i1> [#uses=1]
- br i1 %2, label %bb, label %bb2
-
-bb2: ; preds = %bb1, %bb
- br label %return
-
-return: ; preds = %bb2
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-19-earlyclobber.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-09-19-earlyclobber.ll
deleted file mode 100644
index 4f44cae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-19-earlyclobber.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-; ModuleID = '4964.c'
-; PR 4964
-; Registers other than RAX, RCX are OK, but they must be different.
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin10.0"
- type { i64, i64 } ; type %0
-
-define i64 @flsst(i64 %find) nounwind ssp {
-entry:
-; CHECK: FOO %rax %rcx
- %asmtmp = tail call %0 asm sideeffect "FOO $0 $1 $2", "=r,=&r,rm,~{dirflag},~{fpsr},~{flags},~{cc}"(i64 %find) nounwind ; <%0> [#uses=1]
- %asmresult = extractvalue %0 %asmtmp, 0 ; <i64> [#uses=1]
- ret i64 %asmresult
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll
deleted file mode 100644
index 80b8835..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10.0 -relocation-model=pic | FileCheck %s
-
-define void @dot(i16* nocapture %A, i32 %As, i16* nocapture %B, i32 %Bs, i16* nocapture %C, i32 %N) nounwind ssp {
-; CHECK: dot:
-; CHECK: decl %
-; CHECK-NEXT: jne
-entry:
- %0 = icmp sgt i32 %N, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb, label %bb2
-
-bb: ; preds = %bb, %entry
- %i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
- %sum.04 = phi i32 [ 0, %entry ], [ %10, %bb ] ; <i32> [#uses=1]
- %1 = mul i32 %i.03, %As ; <i32> [#uses=1]
- %2 = getelementptr i16* %A, i32 %1 ; <i16*> [#uses=1]
- %3 = load i16* %2, align 2 ; <i16> [#uses=1]
- %4 = sext i16 %3 to i32 ; <i32> [#uses=1]
- %5 = mul i32 %i.03, %Bs ; <i32> [#uses=1]
- %6 = getelementptr i16* %B, i32 %5 ; <i16*> [#uses=1]
- %7 = load i16* %6, align 2 ; <i16> [#uses=1]
- %8 = sext i16 %7 to i32 ; <i32> [#uses=1]
- %9 = mul i32 %8, %4 ; <i32> [#uses=1]
- %10 = add i32 %9, %sum.04 ; <i32> [#uses=2]
- %indvar.next = add i32 %i.03, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
- br i1 %exitcond, label %bb1.bb2_crit_edge, label %bb
-
-bb1.bb2_crit_edge: ; preds = %bb
- %phitmp = trunc i32 %10 to i16 ; <i16> [#uses=1]
- br label %bb2
-
-bb2: ; preds = %entry, %bb1.bb2_crit_edge
- %sum.0.lcssa = phi i16 [ %phitmp, %bb1.bb2_crit_edge ], [ 0, %entry ] ; <i16> [#uses=1]
- store i16 %sum.0.lcssa, i16* %C, align 2
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-22-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-09-22-CoalescerBug.ll
deleted file mode 100644
index 33f35f8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-22-CoalescerBug.ll
+++ /dev/null
@@ -1,124 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10
-
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind ssp {
-entry:
- br i1 undef, label %bb, label %bb1
-
-bb: ; preds = %entry
- ret i32 3
-
-bb1: ; preds = %entry
- br i1 undef, label %bb3, label %bb2
-
-bb2: ; preds = %bb1
- ret i32 3
-
-bb3: ; preds = %bb1
- br i1 undef, label %bb.i18, label %quantum_getwidth.exit
-
-bb.i18: ; preds = %bb.i18, %bb3
- br i1 undef, label %bb.i18, label %quantum_getwidth.exit
-
-quantum_getwidth.exit: ; preds = %bb.i18, %bb3
- br i1 undef, label %bb4, label %bb6.preheader
-
-bb4: ; preds = %quantum_getwidth.exit
- unreachable
-
-bb6.preheader: ; preds = %quantum_getwidth.exit
- br i1 undef, label %bb.i1, label %bb1.i2
-
-bb.i1: ; preds = %bb6.preheader
- unreachable
-
-bb1.i2: ; preds = %bb6.preheader
- br i1 undef, label %bb2.i, label %bb3.i4
-
-bb2.i: ; preds = %bb1.i2
- unreachable
-
-bb3.i4: ; preds = %bb1.i2
- br i1 undef, label %quantum_new_qureg.exit, label %bb4.i
-
-bb4.i: ; preds = %bb3.i4
- unreachable
-
-quantum_new_qureg.exit: ; preds = %bb3.i4
- br i1 undef, label %bb9, label %bb11.thread
-
-bb11.thread: ; preds = %quantum_new_qureg.exit
- %.cast.i = zext i32 undef to i64 ; <i64> [#uses=1]
- br label %bb.i37
-
-bb9: ; preds = %quantum_new_qureg.exit
- unreachable
-
-bb.i37: ; preds = %bb.i37, %bb11.thread
- %0 = load i64* undef, align 8 ; <i64> [#uses=1]
- %1 = shl i64 %0, %.cast.i ; <i64> [#uses=1]
- store i64 %1, i64* undef, align 8
- br i1 undef, label %bb.i37, label %quantum_addscratch.exit
-
-quantum_addscratch.exit: ; preds = %bb.i37
- br i1 undef, label %bb12.preheader, label %bb14
-
-bb12.preheader: ; preds = %quantum_addscratch.exit
- unreachable
-
-bb14: ; preds = %quantum_addscratch.exit
- br i1 undef, label %bb17, label %bb.nph
-
-bb.nph: ; preds = %bb14
- unreachable
-
-bb17: ; preds = %bb14
- br i1 undef, label %bb1.i7, label %quantum_measure.exit
-
-bb1.i7: ; preds = %bb17
- br label %quantum_measure.exit
-
-quantum_measure.exit: ; preds = %bb1.i7, %bb17
- switch i32 undef, label %bb21 [
- i32 -1, label %bb18
- i32 0, label %bb20
- ]
-
-bb18: ; preds = %quantum_measure.exit
- unreachable
-
-bb20: ; preds = %quantum_measure.exit
- unreachable
-
-bb21: ; preds = %quantum_measure.exit
- br i1 undef, label %quantum_frac_approx.exit, label %bb1.i
-
-bb1.i: ; preds = %bb21
- unreachable
-
-quantum_frac_approx.exit: ; preds = %bb21
- br i1 undef, label %bb25, label %bb26
-
-bb25: ; preds = %quantum_frac_approx.exit
- unreachable
-
-bb26: ; preds = %quantum_frac_approx.exit
- br i1 undef, label %quantum_gcd.exit, label %bb.i
-
-bb.i: ; preds = %bb.i, %bb26
- br i1 undef, label %quantum_gcd.exit, label %bb.i
-
-quantum_gcd.exit: ; preds = %bb.i, %bb26
- br i1 undef, label %bb32, label %bb33
-
-bb32: ; preds = %quantum_gcd.exit
- br i1 undef, label %bb.i.i, label %quantum_delete_qureg.exit
-
-bb.i.i: ; preds = %bb32
- ret i32 0
-
-quantum_delete_qureg.exit: ; preds = %bb32
- ret i32 0
-
-bb33: ; preds = %quantum_gcd.exit
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-23-LiveVariablesBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-09-23-LiveVariablesBug.ll
deleted file mode 100644
index d37d4b8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-09-23-LiveVariablesBug.ll
+++ /dev/null
@@ -1,91 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10
-
-; rdar://7247745
-
-%struct._lck_mtx_ = type { %union.anon }
-%struct._lck_rw_t_internal_ = type <{ i16, i8, i8, i32, i32, i32 }>
-%struct.anon = type { i64, i64, [2 x i8], i8, i8, i32 }
-%struct.memory_object = type { i32, i32, %struct.memory_object_pager_ops* }
-%struct.memory_object_control = type { i32, i32, %struct.vm_object* }
-%struct.memory_object_pager_ops = type { void (%struct.memory_object*)*, void (%struct.memory_object*)*, i32 (%struct.memory_object*, %struct.memory_object_control*, i32)*, i32 (%struct.memory_object*)*, i32 (%struct.memory_object*, i64, i32, i32, i32*)*, i32 (%struct.memory_object*, i64, i32, i64*, i32*, i32, i32, i32)*, i32 (%struct.memory_object*, i64, i32)*, i32 (%struct.memory_object*, i64, i64, i32)*, i32 (%struct.memory_object*, i64, i64, i32)*, i32 (%struct.memory_object*, i32)*, i32 (%struct.memory_object*)*, i8* }
-%struct.queue_entry = type { %struct.queue_entry*, %struct.queue_entry* }
-%struct.upl = type { %struct._lck_mtx_, i32, i32, %struct.vm_object*, i64, i32, i64, %struct.vm_object*, i32, i8* }
-%struct.upl_page_info = type <{ i32, i8, [3 x i8] }>
-%struct.vm_object = type { %struct.queue_entry, %struct._lck_rw_t_internal_, i64, %struct.vm_page*, i32, i32, i32, i32, %struct.vm_object*, %struct.vm_object*, i64, %struct.memory_object*, i64, %struct.memory_object_control*, i32, i16, i16, [2 x i8], i8, i8, %struct.queue_entry, %struct.queue_entry, i64, i32, i32, i32, i8*, i64, i8, i8, [2 x i8], %struct.queue_entry }
-%struct.vm_page = type { %struct.queue_entry, %struct.queue_entry, %struct.vm_page*, %struct.vm_object*, i64, [2 x i8], i8, i8, i32, i8, i8, i8, i8, i32 }
-%union.anon = type { %struct.anon }
-
-declare i64 @OSAddAtomic64(i64, i64*) noredzone noimplicitfloat
-
-define i32 @upl_commit_range(%struct.upl* %upl, i32 %offset, i32 %size, i32 %flags, %struct.upl_page_info* %page_list, i32 %count, i32* nocapture %empty) nounwind noredzone noimplicitfloat {
-entry:
- br i1 undef, label %if.then, label %if.end
-
-if.end: ; preds = %entry
- br i1 undef, label %if.end143, label %if.then136
-
-if.then136: ; preds = %if.end
- unreachable
-
-if.end143: ; preds = %if.end
- br i1 undef, label %if.else155, label %if.then153
-
-if.then153: ; preds = %if.end143
- br label %while.cond
-
-if.else155: ; preds = %if.end143
- unreachable
-
-while.cond: ; preds = %if.end1039, %if.then153
- br i1 undef, label %if.then1138, label %while.body
-
-while.body: ; preds = %while.cond
- br i1 undef, label %if.end260, label %if.then217
-
-if.then217: ; preds = %while.body
- br i1 undef, label %if.end260, label %if.then230
-
-if.then230: ; preds = %if.then217
- br i1 undef, label %if.then246, label %if.end260
-
-if.then246: ; preds = %if.then230
- br label %if.end260
-
-if.end260: ; preds = %if.then246, %if.then230, %if.then217, %while.body
- br i1 undef, label %if.end296, label %if.then266
-
-if.then266: ; preds = %if.end260
- unreachable
-
-if.end296: ; preds = %if.end260
- br i1 undef, label %if.end1039, label %if.end306
-
-if.end306: ; preds = %if.end296
- br i1 undef, label %if.end796, label %if.then616
-
-if.then616: ; preds = %if.end306
- br i1 undef, label %commit_next_page, label %do.body716
-
-do.body716: ; preds = %if.then616
- %call721 = call i64 @OSAddAtomic64(i64 1, i64* undef) nounwind noredzone noimplicitfloat ; <i64> [#uses=0]
- call void asm sideeffect "movq\090x0($0),%rdi\0A\09movq\090x8($0),%rsi\0A\09.section __DATA, __data\0A\09.globl __dtrace_probeDOLLAR${:uid}4794___vminfo____pgrec\0A\09__dtrace_probeDOLLAR${:uid}4794___vminfo____pgrec:.quad 1f\0A\09.text\0A\091:nop\0A\09nop\0A\09nop\0A\09", "r,~{memory},~{di},~{si},~{dirflag},~{fpsr},~{flags}"(i64* undef) nounwind
- br label %commit_next_page
-
-if.end796: ; preds = %if.end306
- unreachable
-
-commit_next_page: ; preds = %do.body716, %if.then616
- br i1 undef, label %if.end1039, label %if.then1034
-
-if.then1034: ; preds = %commit_next_page
- br label %if.end1039
-
-if.end1039: ; preds = %if.then1034, %commit_next_page, %if.end296
- br label %while.cond
-
-if.then1138: ; preds = %while.cond
- unreachable
-
-if.then: ; preds = %entry
- ret i32 4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-08-MachineLICMBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-10-08-MachineLICMBug.ll
deleted file mode 100644
index 91c5440..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-08-MachineLICMBug.ll
+++ /dev/null
@@ -1,264 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -stats |& grep {machine-licm} | grep 2
-; rdar://7274692
-
-%0 = type { [125 x i32] }
-%1 = type { i32 }
-%struct..5sPragmaType = type { i8*, i32 }
-%struct.AggInfo = type { i8, i8, i32, %struct.ExprList*, i32, %struct.AggInfo_col*, i32, i32, i32, %struct.AggInfo_func*, i32, i32 }
-%struct.AggInfo_col = type { %struct.Table*, i32, i32, i32, i32, %struct.Expr* }
-%struct.AggInfo_func = type { %struct.Expr*, %struct.FuncDef*, i32, i32 }
-%struct.AuxData = type { i8*, void (i8*)* }
-%struct.Bitvec = type { i32, i32, i32, %0 }
-%struct.BtCursor = type { %struct.Btree*, %struct.BtShared*, %struct.BtCursor*, %struct.BtCursor*, i32 (i8*, i32, i8*, i32, i8*)*, i8*, i32, %struct.MemPage*, i32, %struct.CellInfo, i8, i8, i8*, i64, i32, i8, i32* }
-%struct.BtLock = type { %struct.Btree*, i32, i8, %struct.BtLock* }
-%struct.BtShared = type { %struct.Pager*, %struct.sqlite3*, %struct.BtCursor*, %struct.MemPage*, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i16, i32, i32, i32, i32, i8, i32, i8*, void (i8*)*, %struct.sqlite3_mutex*, %struct.BusyHandler, i32, %struct.BtShared*, %struct.BtLock*, %struct.Btree* }
-%struct.Btree = type { %struct.sqlite3*, %struct.BtShared*, i8, i8, i8, i32, %struct.Btree*, %struct.Btree* }
-%struct.BtreeMutexArray = type { i32, [11 x %struct.Btree*] }
-%struct.BusyHandler = type { i32 (i8*, i32)*, i8*, i32 }
-%struct.CellInfo = type { i8*, i64, i32, i32, i16, i16, i16, i16 }
-%struct.CollSeq = type { i8*, i8, i8, i8*, i32 (i8*, i32, i8*, i32, i8*)*, void (i8*)* }
-%struct.Column = type { i8*, %struct.Expr*, i8*, i8*, i8, i8, i8, i8 }
-%struct.Context = type { i64, i32, %struct.Fifo }
-%struct.CountCtx = type { i64 }
-%struct.Cursor = type { %struct.BtCursor*, i32, i64, i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i64, %struct.Btree*, i32, i8*, i64, i8*, %struct.KeyInfo*, i32, i64, %struct.sqlite3_vtab_cursor*, %struct.sqlite3_module*, i32, i32, i32*, i32*, i8* }
-%struct.Db = type { i8*, %struct.Btree*, i8, i8, i8*, void (i8*)*, %struct.Schema* }
-%struct.DbPage = type { %struct.Pager*, i32, %struct.DbPage*, %struct.DbPage*, %struct.PagerLruLink, %struct.DbPage*, i8, i8, i8, i8, i8, i16, %struct.DbPage*, %struct.DbPage*, i8* }
-%struct.Expr = type { i8, i8, i16, %struct.CollSeq*, %struct.Expr*, %struct.Expr*, %struct.ExprList*, %struct..5sPragmaType, %struct..5sPragmaType, i32, i32, %struct.AggInfo*, i32, i32, %struct.Select*, %struct.Table*, i32 }
-%struct.ExprList = type { i32, i32, i32, %struct.ExprList_item* }
-%struct.ExprList_item = type { %struct.Expr*, i8*, i8, i8, i8 }
-%struct.FILE = type { i8*, i32, i32, i16, i16, %struct..5sPragmaType, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct..5sPragmaType, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct..5sPragmaType, i32, i64 }
-%struct.FKey = type { %struct.Table*, %struct.FKey*, i8*, %struct.FKey*, i32, %struct.sColMap*, i8, i8, i8, i8 }
-%struct.Fifo = type { i32, %struct.FifoPage*, %struct.FifoPage* }
-%struct.FifoPage = type { i32, i32, i32, %struct.FifoPage*, [1 x i64] }
-%struct.FuncDef = type { i16, i8, i8, i8, i8*, %struct.FuncDef*, void (%struct.sqlite3_context*, i32, %struct.Mem**)*, void (%struct.sqlite3_context*, i32, %struct.Mem**)*, void (%struct.sqlite3_context*)*, [1 x i8] }
-%struct.Hash = type { i8, i8, i32, i32, %struct.HashElem*, %struct._ht* }
-%struct.HashElem = type { %struct.HashElem*, %struct.HashElem*, i8*, i8*, i32 }
-%struct.IdList = type { %struct..5sPragmaType*, i32, i32 }
-%struct.Index = type { i8*, i32, i32*, i32*, %struct.Table*, i32, i8, i8, i8*, %struct.Index*, %struct.Schema*, i8*, i8** }
-%struct.KeyInfo = type { %struct.sqlite3*, i8, i8, i8, i32, i8*, [1 x %struct.CollSeq*] }
-%struct.Mem = type { %struct.CountCtx, double, %struct.sqlite3*, i8*, i32, i16, i8, i8, void (i8*)* }
-%struct.MemPage = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, [5 x %struct._OvflCell], %struct.BtShared*, i8*, %struct.DbPage*, i32, %struct.MemPage* }
-%struct.Module = type { %struct.sqlite3_module*, i8*, i8*, void (i8*)* }
-%struct.Op = type { i8, i8, i8, i8, i32, i32, i32, %1 }
-%struct.Pager = type { %struct.sqlite3_vfs*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.Bitvec*, %struct.Bitvec*, i8*, i8*, i8*, i8*, %struct.sqlite3_file*, %struct.sqlite3_file*, %struct.sqlite3_file*, %struct.BusyHandler*, %struct.PagerLruList, %struct.DbPage*, %struct.DbPage*, %struct.DbPage*, i64, i64, i64, i64, i64, i32, void (%struct.DbPage*, i32)*, void (%struct.DbPage*, i32)*, i32, %struct.DbPage**, i8*, [16 x i8] }
-%struct.PagerLruLink = type { %struct.DbPage*, %struct.DbPage* }
-%struct.PagerLruList = type { %struct.DbPage*, %struct.DbPage*, %struct.DbPage* }
-%struct.Schema = type { i32, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Table*, i8, i8, i16, i32, %struct.sqlite3* }
-%struct.Select = type { %struct.ExprList*, i8, i8, i8, i8, i8, i8, i8, %struct.SrcList*, %struct.Expr*, %struct.ExprList*, %struct.Expr*, %struct.ExprList*, %struct.Select*, %struct.Select*, %struct.Select*, %struct.Expr*, %struct.Expr*, i32, i32, [3 x i32] }
-%struct.SrcList = type { i16, i16, [1 x %struct.SrcList_item] }
-%struct.SrcList_item = type { i8*, i8*, i8*, %struct.Table*, %struct.Select*, i8, i8, i32, %struct.Expr*, %struct.IdList*, i64 }
-%struct.Table = type { i8*, i32, %struct.Column*, i32, %struct.Index*, i32, %struct.Select*, i32, %struct.Trigger*, %struct.FKey*, i8*, %struct.Expr*, i32, i8, i8, i8, i8, i8, i8, i8, %struct.Module*, %struct.sqlite3_vtab*, i32, i8**, %struct.Schema* }
-%struct.Trigger = type { i8*, i8*, i8, i8, %struct.Expr*, %struct.IdList*, %struct..5sPragmaType, %struct.Schema*, %struct.Schema*, %struct.TriggerStep*, %struct.Trigger* }
-%struct.TriggerStep = type { i32, i32, %struct.Trigger*, %struct.Select*, %struct..5sPragmaType, %struct.Expr*, %struct.ExprList*, %struct.IdList*, %struct.TriggerStep*, %struct.TriggerStep* }
-%struct.Vdbe = type { %struct.sqlite3*, %struct.Vdbe*, %struct.Vdbe*, i32, i32, %struct.Op*, i32, i32, i32*, %struct.Mem**, %struct.Mem*, i32, %struct.Cursor**, i32, %struct.Mem*, i8**, i32, i32, i32, %struct.Mem*, i32, i32, %struct.Fifo, i32, i32, %struct.Context*, i32, i32, i32, i32, i32, [25 x i32], i32, i32, i8**, i8*, %struct.Mem*, i8, i8, i8, i8, i8, i8, i32, i64, i32, %struct.BtreeMutexArray, i32, i8*, i32 }
-%struct.VdbeFunc = type { %struct.FuncDef*, i32, [1 x %struct.AuxData] }
-%struct._OvflCell = type { i8*, i16 }
-%struct._RuneCharClass = type { [14 x i8], i32 }
-%struct._RuneEntry = type { i32, i32, i32, i32* }
-%struct._RuneLocale = type { [8 x i8], [32 x i8], i32 (i8*, i32, i8**)*, i32 (i32, i8*, i32, i8**)*, i32, [256 x i32], [256 x i32], [256 x i32], %struct._RuneRange, %struct._RuneRange, %struct._RuneRange, i8*, i32, i32, %struct._RuneCharClass* }
-%struct._RuneRange = type { i32, %struct._RuneEntry* }
-%struct.__sFILEX = type opaque
-%struct._ht = type { i32, %struct.HashElem* }
-%struct.callback_data = type { %struct.sqlite3*, i32, i32, %struct.FILE*, i32, i32, i32, i8*, [20 x i8], [100 x i32], [100 x i32], [20 x i8], %struct.previous_mode_data, [1024 x i8], i8* }
-%struct.previous_mode_data = type { i32, i32, i32, [100 x i32] }
-%struct.sColMap = type { i32, i8* }
-%struct.sqlite3 = type { %struct.sqlite3_vfs*, i32, %struct.Db*, i32, i32, i32, i32, i8, i8, i8, i8, i32, %struct.CollSeq*, i64, i64, i32, i32, i32, %struct.sqlite3_mutex*, %struct.sqlite3InitInfo, i32, i8**, %struct.Vdbe*, i32, void (i8*, i8*)*, i8*, void (i8*, i8*, i64)*, i8*, i8*, i32 (i8*)*, i8*, void (i8*)*, i8*, void (i8*, i32, i8*, i8*, i64)*, void (i8*, %struct.sqlite3*, i32, i8*)*, void (i8*, %struct.sqlite3*, i32, i8*)*, i8*, %struct.Mem*, i8*, i8*, %union.anon, i32 (i8*, i32, i8*, i8*, i8*, i8*)*, i8*, i32 (i8*)*, i8*, i32, %struct.Hash, %struct.Table*, %struct.sqlite3_vtab**, i32, %struct.Hash, %struct.Hash, %struct.BusyHandler, i32, [2 x %struct.Db], i8 }
-%struct.sqlite3InitInfo = type { i32, i32, i8 }
-%struct.sqlite3_context = type { %struct.FuncDef*, %struct.VdbeFunc*, %struct.Mem, %struct.Mem*, i32, %struct.CollSeq* }
-%struct.sqlite3_file = type { %struct.sqlite3_io_methods* }
-%struct.sqlite3_index_constraint = type { i32, i8, i8, i32 }
-%struct.sqlite3_index_constraint_usage = type { i32, i8 }
-%struct.sqlite3_index_info = type { i32, %struct.sqlite3_index_constraint*, i32, %struct.sqlite3_index_constraint_usage*, %struct.sqlite3_index_constraint_usage*, i32, i8*, i32, i32, double }
-%struct.sqlite3_io_methods = type { i32, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*, i8*, i32, i64)*, i32 (%struct.sqlite3_file*, i8*, i32, i64)*, i32 (%struct.sqlite3_file*, i64)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*, i64*)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*, i32, i8*)*, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*)* }
-%struct.sqlite3_module = type { i32, i32 (%struct.sqlite3*, i8*, i32, i8**, %struct.sqlite3_vtab**, i8**)*, i32 (%struct.sqlite3*, i8*, i32, i8**, %struct.sqlite3_vtab**, i8**)*, i32 (%struct.sqlite3_vtab*, %struct.sqlite3_index_info*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*, %struct.sqlite3_vtab_cursor**)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*, i32, i8*, i32, %struct.Mem**)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*, %struct.sqlite3_context*, i32)*, i32 (%struct.sqlite3_vtab_cursor*, i64*)*, i32 (%struct.sqlite3_vtab*, i32, %struct.Mem**, i64*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*, i32, i8*, void (%struct.sqlite3_context*, i32, %struct.Mem**)**, i8**)*, i32 (%struct.sqlite3_vtab*, i8*)* }
-%struct.sqlite3_mutex = type opaque
-%struct.sqlite3_vfs = type { i32, i32, i32, %struct.sqlite3_vfs*, i8*, i8*, i32 (%struct.sqlite3_vfs*, i8*, %struct.sqlite3_file*, i32, i32*)*, i32 (%struct.sqlite3_vfs*, i8*, i32)*, i32 (%struct.sqlite3_vfs*, i8*, i32)*, i32 (%struct.sqlite3_vfs*, i32, i8*)*, i32 (%struct.sqlite3_vfs*, i8*, i32, i8*)*, i8* (%struct.sqlite3_vfs*, i8*)*, void (%struct.sqlite3_vfs*, i32, i8*)*, i8* (%struct.sqlite3_vfs*, i8*, i8*)*, void (%struct.sqlite3_vfs*, i8*)*, i32 (%struct.sqlite3_vfs*, i32, i8*)*, i32 (%struct.sqlite3_vfs*, i32)*, i32 (%struct.sqlite3_vfs*, double*)* }
-%struct.sqlite3_vtab = type { %struct.sqlite3_module*, i32, i8* }
-%struct.sqlite3_vtab_cursor = type { %struct.sqlite3_vtab* }
-%union.anon = type { double }
-
- at _DefaultRuneLocale = external global %struct._RuneLocale ; <%struct._RuneLocale*> [#uses=2]
- at __stderrp = external global %struct.FILE* ; <%struct.FILE**> [#uses=1]
- at .str10 = internal constant [16 x i8] c"Out of memory!\0A\00", align 1 ; <[16 x i8]*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (void (%struct.callback_data*, i8*)* @set_table_name to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define fastcc void @set_table_name(%struct.callback_data* nocapture %p, i8* %zName) nounwind ssp {
-entry:
- %0 = getelementptr inbounds %struct.callback_data* %p, i32 0, i32 7 ; <i8**> [#uses=3]
- %1 = load i8** %0, align 4 ; <i8*> [#uses=2]
- %2 = icmp eq i8* %1, null ; <i1> [#uses=1]
- br i1 %2, label %bb1, label %bb
-
-bb: ; preds = %entry
- free i8* %1
- store i8* null, i8** %0, align 4
- br label %bb1
-
-bb1: ; preds = %bb, %entry
- %3 = icmp eq i8* %zName, null ; <i1> [#uses=1]
- br i1 %3, label %return, label %bb2
-
-bb2: ; preds = %bb1
- %4 = load i8* %zName, align 1 ; <i8> [#uses=2]
- %5 = zext i8 %4 to i32 ; <i32> [#uses=2]
- %6 = icmp sgt i8 %4, -1 ; <i1> [#uses=1]
- br i1 %6, label %bb.i.i, label %bb1.i.i
-
-bb.i.i: ; preds = %bb2
- %7 = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i32 0, i32 5, i32 %5 ; <i32*> [#uses=1]
- %8 = load i32* %7, align 4 ; <i32> [#uses=1]
- %9 = and i32 %8, 256 ; <i32> [#uses=1]
- br label %isalpha.exit
-
-bb1.i.i: ; preds = %bb2
- %10 = tail call i32 @__maskrune(i32 %5, i32 256) nounwind ; <i32> [#uses=1]
- br label %isalpha.exit
-
-isalpha.exit: ; preds = %bb1.i.i, %bb.i.i
- %storemerge.in.in.i.i = phi i32 [ %9, %bb.i.i ], [ %10, %bb1.i.i ] ; <i32> [#uses=1]
- %storemerge.in.i.i = icmp eq i32 %storemerge.in.in.i.i, 0 ; <i1> [#uses=1]
- br i1 %storemerge.in.i.i, label %bb3, label %bb5
-
-bb3: ; preds = %isalpha.exit
- %11 = load i8* %zName, align 1 ; <i8> [#uses=2]
- %12 = icmp eq i8 %11, 95 ; <i1> [#uses=1]
- br i1 %12, label %bb5, label %bb12.preheader
-
-bb5: ; preds = %bb3, %isalpha.exit
- %.pre = load i8* %zName, align 1 ; <i8> [#uses=1]
- br label %bb12.preheader
-
-bb12.preheader: ; preds = %bb5, %bb3
- %13 = phi i8 [ %.pre, %bb5 ], [ %11, %bb3 ] ; <i8> [#uses=1]
- %needQuote.1.ph = phi i32 [ 0, %bb5 ], [ 1, %bb3 ] ; <i32> [#uses=2]
- %14 = icmp eq i8 %13, 0 ; <i1> [#uses=1]
- br i1 %14, label %bb13, label %bb7
-
-bb7: ; preds = %bb11, %bb12.preheader
- %i.011 = phi i32 [ %tmp17, %bb11 ], [ 0, %bb12.preheader ] ; <i32> [#uses=2]
- %n.110 = phi i32 [ %26, %bb11 ], [ 0, %bb12.preheader ] ; <i32> [#uses=3]
- %needQuote.19 = phi i32 [ %needQuote.0, %bb11 ], [ %needQuote.1.ph, %bb12.preheader ] ; <i32> [#uses=2]
- %scevgep16 = getelementptr i8* %zName, i32 %i.011 ; <i8*> [#uses=2]
- %tmp17 = add i32 %i.011, 1 ; <i32> [#uses=2]
- %scevgep18 = getelementptr i8* %zName, i32 %tmp17 ; <i8*> [#uses=1]
- %15 = load i8* %scevgep16, align 1 ; <i8> [#uses=2]
- %16 = zext i8 %15 to i32 ; <i32> [#uses=2]
- %17 = icmp sgt i8 %15, -1 ; <i1> [#uses=1]
- br i1 %17, label %bb.i.i2, label %bb1.i.i3
-
-bb.i.i2: ; preds = %bb7
- %18 = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i32 0, i32 5, i32 %16 ; <i32*> [#uses=1]
- %19 = load i32* %18, align 4 ; <i32> [#uses=1]
- %20 = and i32 %19, 1280 ; <i32> [#uses=1]
- br label %isalnum.exit
-
-bb1.i.i3: ; preds = %bb7
- %21 = tail call i32 @__maskrune(i32 %16, i32 1280) nounwind ; <i32> [#uses=1]
- br label %isalnum.exit
-
-isalnum.exit: ; preds = %bb1.i.i3, %bb.i.i2
- %storemerge.in.in.i.i4 = phi i32 [ %20, %bb.i.i2 ], [ %21, %bb1.i.i3 ] ; <i32> [#uses=1]
- %storemerge.in.i.i5 = icmp eq i32 %storemerge.in.in.i.i4, 0 ; <i1> [#uses=1]
- br i1 %storemerge.in.i.i5, label %bb8, label %bb11
-
-bb8: ; preds = %isalnum.exit
- %22 = load i8* %scevgep16, align 1 ; <i8> [#uses=2]
- %23 = icmp eq i8 %22, 95 ; <i1> [#uses=1]
- br i1 %23, label %bb11, label %bb9
-
-bb9: ; preds = %bb8
- %24 = icmp eq i8 %22, 39 ; <i1> [#uses=1]
- %25 = zext i1 %24 to i32 ; <i32> [#uses=1]
- %.n.1 = add i32 %n.110, %25 ; <i32> [#uses=1]
- br label %bb11
-
-bb11: ; preds = %bb9, %bb8, %isalnum.exit
- %needQuote.0 = phi i32 [ 1, %bb9 ], [ %needQuote.19, %isalnum.exit ], [ %needQuote.19, %bb8 ] ; <i32> [#uses=2]
- %n.0 = phi i32 [ %.n.1, %bb9 ], [ %n.110, %isalnum.exit ], [ %n.110, %bb8 ] ; <i32> [#uses=1]
- %26 = add nsw i32 %n.0, 1 ; <i32> [#uses=2]
- %27 = load i8* %scevgep18, align 1 ; <i8> [#uses=1]
- %28 = icmp eq i8 %27, 0 ; <i1> [#uses=1]
- br i1 %28, label %bb13, label %bb7
-
-bb13: ; preds = %bb11, %bb12.preheader
- %n.1.lcssa = phi i32 [ 0, %bb12.preheader ], [ %26, %bb11 ] ; <i32> [#uses=2]
- %needQuote.1.lcssa = phi i32 [ %needQuote.1.ph, %bb12.preheader ], [ %needQuote.0, %bb11 ] ; <i32> [#uses=1]
- %29 = add nsw i32 %n.1.lcssa, 2 ; <i32> [#uses=1]
- %30 = icmp eq i32 %needQuote.1.lcssa, 0 ; <i1> [#uses=3]
- %n.1. = select i1 %30, i32 %n.1.lcssa, i32 %29 ; <i32> [#uses=1]
- %31 = add nsw i32 %n.1., 1 ; <i32> [#uses=1]
- %32 = malloc i8, i32 %31 ; <i8*> [#uses=7]
- store i8* %32, i8** %0, align 4
- %33 = icmp eq i8* %32, null ; <i1> [#uses=1]
- br i1 %33, label %bb16, label %bb17
-
-bb16: ; preds = %bb13
- %34 = load %struct.FILE** @__stderrp, align 4 ; <%struct.FILE*> [#uses=1]
- %35 = bitcast %struct.FILE* %34 to i8* ; <i8*> [#uses=1]
- %36 = tail call i32 @"\01_fwrite$UNIX2003"(i8* getelementptr inbounds ([16 x i8]* @.str10, i32 0, i32 0), i32 1, i32 15, i8* %35) nounwind ; <i32> [#uses=0]
- tail call void @exit(i32 1) noreturn nounwind
- unreachable
-
-bb17: ; preds = %bb13
- br i1 %30, label %bb23.preheader, label %bb18
-
-bb18: ; preds = %bb17
- store i8 39, i8* %32, align 4
- br label %bb23.preheader
-
-bb23.preheader: ; preds = %bb18, %bb17
- %n.3.ph = phi i32 [ 1, %bb18 ], [ 0, %bb17 ] ; <i32> [#uses=2]
- %37 = load i8* %zName, align 1 ; <i8> [#uses=1]
- %38 = icmp eq i8 %37, 0 ; <i1> [#uses=1]
- br i1 %38, label %bb24, label %bb20
-
-bb20: ; preds = %bb22, %bb23.preheader
- %storemerge18 = phi i32 [ %tmp, %bb22 ], [ 0, %bb23.preheader ] ; <i32> [#uses=2]
- %n.37 = phi i32 [ %n.4, %bb22 ], [ %n.3.ph, %bb23.preheader ] ; <i32> [#uses=3]
- %scevgep = getelementptr i8* %zName, i32 %storemerge18 ; <i8*> [#uses=1]
- %tmp = add i32 %storemerge18, 1 ; <i32> [#uses=2]
- %scevgep15 = getelementptr i8* %zName, i32 %tmp ; <i8*> [#uses=1]
- %39 = load i8* %scevgep, align 1 ; <i8> [#uses=2]
- %40 = getelementptr inbounds i8* %32, i32 %n.37 ; <i8*> [#uses=1]
- store i8 %39, i8* %40, align 1
- %41 = add nsw i32 %n.37, 1 ; <i32> [#uses=2]
- %42 = icmp eq i8 %39, 39 ; <i1> [#uses=1]
- br i1 %42, label %bb21, label %bb22
-
-bb21: ; preds = %bb20
- %43 = getelementptr inbounds i8* %32, i32 %41 ; <i8*> [#uses=1]
- store i8 39, i8* %43, align 1
- %44 = add nsw i32 %n.37, 2 ; <i32> [#uses=1]
- br label %bb22
-
-bb22: ; preds = %bb21, %bb20
- %n.4 = phi i32 [ %44, %bb21 ], [ %41, %bb20 ] ; <i32> [#uses=2]
- %45 = load i8* %scevgep15, align 1 ; <i8> [#uses=1]
- %46 = icmp eq i8 %45, 0 ; <i1> [#uses=1]
- br i1 %46, label %bb24, label %bb20
-
-bb24: ; preds = %bb22, %bb23.preheader
- %n.3.lcssa = phi i32 [ %n.3.ph, %bb23.preheader ], [ %n.4, %bb22 ] ; <i32> [#uses=3]
- br i1 %30, label %bb26, label %bb25
-
-bb25: ; preds = %bb24
- %47 = getelementptr inbounds i8* %32, i32 %n.3.lcssa ; <i8*> [#uses=1]
- store i8 39, i8* %47, align 1
- %48 = add nsw i32 %n.3.lcssa, 1 ; <i32> [#uses=1]
- br label %bb26
-
-bb26: ; preds = %bb25, %bb24
- %n.5 = phi i32 [ %48, %bb25 ], [ %n.3.lcssa, %bb24 ] ; <i32> [#uses=1]
- %49 = getelementptr inbounds i8* %32, i32 %n.5 ; <i8*> [#uses=1]
- store i8 0, i8* %49, align 1
- ret void
-
-return: ; preds = %bb1
- ret void
-}
-
-declare i32 @"\01_fwrite$UNIX2003"(i8*, i32, i32, i8*)
-
-declare void @exit(i32) noreturn nounwind
-
-declare i32 @__maskrune(i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-14-LiveVariablesBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-10-14-LiveVariablesBug.ll
deleted file mode 100644
index c1aa17c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-14-LiveVariablesBug.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin
-; rdar://7299435
-
- at i = internal global i32 0 ; <i32*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (void (i16)* @foo to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define void @foo(i16 signext %source) nounwind ssp {
-entry:
- %source_addr = alloca i16, align 2 ; <i16*> [#uses=2]
- store i16 %source, i16* %source_addr
- store i32 4, i32* @i, align 4
- call void asm sideeffect "# top of block", "~{dirflag},~{fpsr},~{flags},~{edi},~{esi},~{edx},~{ecx},~{eax}"() nounwind
- %asmtmp = call i16 asm sideeffect "movw $1, $0", "=={ax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i16* %source_addr) nounwind ; <i16> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-19-EmergencySpill.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
deleted file mode 100644
index ba44a2e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -disable-fp-elim
-; rdar://7291624
-
-%union.RtreeCoord = type { float }
-%struct.RtreeCell = type { i64, [10 x %union.RtreeCoord] }
-%struct.Rtree = type { i32, i32*, i32, i32, i32, i32, i8*, i8* }
-%struct.RtreeNode = type { i32*, i64, i32, i32, i8*, i32* }
-
-define fastcc void @nodeOverwriteCell(%struct.Rtree* nocapture %pRtree, %struct.RtreeNode* nocapture %pNode, %struct.RtreeCell* nocapture %pCell, i32 %iCell) nounwind ssp {
-entry:
- %0 = load i8** undef, align 8 ; <i8*> [#uses=2]
- %1 = load i32* undef, align 8 ; <i32> [#uses=1]
- %2 = mul i32 %1, %iCell ; <i32> [#uses=1]
- %3 = add nsw i32 %2, 4 ; <i32> [#uses=1]
- %4 = sext i32 %3 to i64 ; <i64> [#uses=2]
- %5 = load i64* null, align 8 ; <i64> [#uses=2]
- %6 = lshr i64 %5, 48 ; <i64> [#uses=1]
- %7 = trunc i64 %6 to i8 ; <i8> [#uses=1]
- store i8 %7, i8* undef, align 1
- %8 = lshr i64 %5, 8 ; <i64> [#uses=1]
- %9 = trunc i64 %8 to i8 ; <i8> [#uses=1]
- %.sum4 = add i64 %4, 6 ; <i64> [#uses=1]
- %10 = getelementptr inbounds i8* %0, i64 %.sum4 ; <i8*> [#uses=1]
- store i8 %9, i8* %10, align 1
- %11 = getelementptr inbounds %struct.Rtree* %pRtree, i64 0, i32 3 ; <i32*> [#uses=1]
- br i1 undef, label %bb.nph, label %bb2
-
-bb.nph: ; preds = %entry
- %tmp25 = add i64 %4, 11 ; <i64> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %bb.nph
- %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %bb ] ; <i64> [#uses=3]
- %scevgep = getelementptr %struct.RtreeCell* %pCell, i64 0, i32 1, i64 %indvar ; <%union.RtreeCoord*> [#uses=1]
- %scevgep12 = bitcast %union.RtreeCoord* %scevgep to i32* ; <i32*> [#uses=1]
- %tmp = shl i64 %indvar, 2 ; <i64> [#uses=1]
- %tmp26 = add i64 %tmp, %tmp25 ; <i64> [#uses=1]
- %scevgep27 = getelementptr i8* %0, i64 %tmp26 ; <i8*> [#uses=1]
- %12 = load i32* %scevgep12, align 4 ; <i32> [#uses=1]
- %13 = lshr i32 %12, 24 ; <i32> [#uses=1]
- %14 = trunc i32 %13 to i8 ; <i8> [#uses=1]
- store i8 %14, i8* undef, align 1
- store i8 undef, i8* %scevgep27, align 1
- %15 = load i32* %11, align 4 ; <i32> [#uses=1]
- %16 = shl i32 %15, 1 ; <i32> [#uses=1]
- %17 = icmp sgt i32 %16, undef ; <i1> [#uses=1]
- %indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
- br i1 %17, label %bb, label %bb2
-
-bb2: ; preds = %bb, %entry
- %18 = getelementptr inbounds %struct.RtreeNode* %pNode, i64 0, i32 3 ; <i32*> [#uses=1]
- store i32 1, i32* %18, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
deleted file mode 100644
index d7f0c1a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
+++ /dev/null
@@ -1,69 +0,0 @@
-; RUN: llvm-as <%s | llc | FileCheck %s
-; PR 5247
-; check that cmp is not scheduled before the add
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
- at .str76843 = external constant [45 x i8] ; <[45 x i8]*> [#uses=1]
- at __profiling_callsite_timestamps_live = external global [1216 x i64] ; <[1216 x i64]*> [#uses=2]
-
-define i32 @cl_init(i32 %initoptions) nounwind {
-entry:
- %retval.i = alloca i32 ; <i32*> [#uses=3]
- %retval = alloca i32 ; <i32*> [#uses=2]
- %initoptions.addr = alloca i32 ; <i32*> [#uses=2]
- tail call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
- %0 = tail call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
- store i32 %initoptions, i32* %initoptions.addr
- %1 = bitcast i32* %initoptions.addr to { }* ; <{ }*> [#uses=0]
- call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
- %2 = call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
- %call.i = call i32 @lt_dlinit() nounwind ; <i32> [#uses=1]
- %tobool.i = icmp ne i32 %call.i, 0 ; <i1> [#uses=1]
- br i1 %tobool.i, label %if.then.i, label %if.end.i
-
-if.then.i: ; preds = %entry
- %call1.i = call i32 @warn_dlerror(i8* getelementptr inbounds ([45 x i8]* @.str76843, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
- store i32 -1, i32* %retval.i
- br label %lt_init.exit
-
-if.end.i: ; preds = %entry
- store i32 0, i32* %retval.i
- br label %lt_init.exit
-
-lt_init.exit: ; preds = %if.end.i, %if.then.i
- %3 = load i32* %retval.i ; <i32> [#uses=1]
- call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
- %4 = call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
- %5 = sub i64 %4, %2 ; <i64> [#uses=1]
- %6 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 51), i64 %5) nounwind ; <i64> [#uses=0]
-;CHECK: lock
-;CHECK-NEXT: {{xadd|addq}} %rdx, __profiling_callsite_timestamps_live
-;CHECK-NEXT: cmpl $0,
-;CHECK-NEXT: jne
- %cmp = icmp eq i32 %3, 0 ; <i1> [#uses=1]
- br i1 %cmp, label %if.then, label %if.end
-
-if.then: ; preds = %lt_init.exit
- call void @cli_rarload()
- br label %if.end
-
-if.end: ; preds = %if.then, %lt_init.exit
- store i32 0, i32* %retval
- %7 = load i32* %retval ; <i32> [#uses=1]
- tail call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
- %8 = tail call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
- %9 = sub i64 %8, %0 ; <i64> [#uses=1]
- %10 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* getelementptr inbounds ([1216 x i64]* @__profiling_callsite_timestamps_live, i32 0, i32 50), i64 %9) ; <i64> [#uses=0]
- ret i32 %7
-}
-
-declare void @cli_rarload() nounwind
-
-declare i32 @lt_dlinit()
-
-declare i32 @warn_dlerror(i8*) nounwind
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
-
-declare i64 @llvm.readcyclecounter() nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-25-RewriterBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-10-25-RewriterBug.ll
deleted file mode 100644
index 5b4e818..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-10-25-RewriterBug.ll
+++ /dev/null
@@ -1,171 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -disable-fp-elim
-
-%struct.DecRefPicMarking_t = type { i32, i32, i32, i32, i32, %struct.DecRefPicMarking_t* }
-%struct.FrameStore = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.StorablePicture*, %struct.StorablePicture*, %struct.StorablePicture* }
-%struct.StorablePicture = type { i32, i32, i32, i32, i32, [50 x [6 x [33 x i64]]], [50 x [6 x [33 x i64]]], [50 x [6 x [33 x i64]]], [50 x [6 x [33 x i64]]], i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16**, i16***, i8*, i16**, i8***, i64***, i64***, i16****, i8**, i8**, %struct.StorablePicture*, %struct.StorablePicture*, %struct.StorablePicture*, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], i32, %struct.DecRefPicMarking_t*, i32 }
-
-define fastcc void @insert_picture_in_dpb(%struct.FrameStore* nocapture %fs, %struct.StorablePicture* %p) nounwind ssp {
-entry:
- %0 = getelementptr inbounds %struct.FrameStore* %fs, i64 0, i32 12 ; <%struct.StorablePicture**> [#uses=1]
- %1 = icmp eq i32 undef, 0 ; <i1> [#uses=1]
- br i1 %1, label %bb.i, label %bb36.i
-
-bb.i: ; preds = %entry
- br i1 undef, label %bb3.i, label %bb14.preheader.i
-
-bb3.i: ; preds = %bb.i
- unreachable
-
-bb14.preheader.i: ; preds = %bb.i
- br i1 undef, label %bb9.i, label %bb20.preheader.i
-
-bb9.i: ; preds = %bb9.i, %bb14.preheader.i
- br i1 undef, label %bb9.i, label %bb20.preheader.i
-
-bb20.preheader.i: ; preds = %bb9.i, %bb14.preheader.i
- br i1 undef, label %bb18.i, label %bb29.preheader.i
-
-bb18.i: ; preds = %bb20.preheader.i
- unreachable
-
-bb29.preheader.i: ; preds = %bb20.preheader.i
- br i1 undef, label %bb24.i, label %bb30.i
-
-bb24.i: ; preds = %bb29.preheader.i
- unreachable
-
-bb30.i: ; preds = %bb29.preheader.i
- store i32 undef, i32* undef, align 8
- br label %bb67.preheader.i
-
-bb36.i: ; preds = %entry
- br label %bb67.preheader.i
-
-bb67.preheader.i: ; preds = %bb36.i, %bb30.i
- %2 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=2]
- %3 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=2]
- %4 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=2]
- %5 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=1]
- %6 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=1]
- %7 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=1]
- %8 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=1]
- %9 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=1]
- %10 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=1]
- %11 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=1]
- %12 = phi %struct.StorablePicture* [ null, %bb36.i ], [ undef, %bb30.i ] ; <%struct.StorablePicture*> [#uses=1]
- br i1 undef, label %bb38.i, label %bb68.i
-
-bb38.i: ; preds = %bb66.i, %bb67.preheader.i
- %13 = phi %struct.StorablePicture* [ %37, %bb66.i ], [ %2, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %14 = phi %struct.StorablePicture* [ %38, %bb66.i ], [ %3, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %15 = phi %struct.StorablePicture* [ %39, %bb66.i ], [ %4, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %16 = phi %struct.StorablePicture* [ %40, %bb66.i ], [ %5, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %17 = phi %struct.StorablePicture* [ %40, %bb66.i ], [ %6, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %18 = phi %struct.StorablePicture* [ %40, %bb66.i ], [ %7, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %19 = phi %struct.StorablePicture* [ %40, %bb66.i ], [ %8, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %20 = phi %struct.StorablePicture* [ %40, %bb66.i ], [ %9, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %21 = phi %struct.StorablePicture* [ %40, %bb66.i ], [ %10, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %22 = phi %struct.StorablePicture* [ %40, %bb66.i ], [ %11, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %23 = phi %struct.StorablePicture* [ %40, %bb66.i ], [ %12, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
- %indvar248.i = phi i64 [ %indvar.next249.i, %bb66.i ], [ 0, %bb67.preheader.i ] ; <i64> [#uses=3]
- %storemerge52.i = trunc i64 %indvar248.i to i32 ; <i32> [#uses=1]
- %24 = getelementptr inbounds %struct.StorablePicture* %23, i64 0, i32 19 ; <i32*> [#uses=0]
- br i1 undef, label %bb.nph51.i, label %bb66.i
-
-bb.nph51.i: ; preds = %bb38.i
- %25 = sdiv i32 %storemerge52.i, 8 ; <i32> [#uses=0]
- br label %bb39.i
-
-bb39.i: ; preds = %bb64.i, %bb.nph51.i
- %26 = phi %struct.StorablePicture* [ %17, %bb.nph51.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=1]
- %27 = phi %struct.StorablePicture* [ %18, %bb.nph51.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=0]
- %28 = phi %struct.StorablePicture* [ %19, %bb.nph51.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=0]
- %29 = phi %struct.StorablePicture* [ %20, %bb.nph51.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=0]
- %30 = phi %struct.StorablePicture* [ %21, %bb.nph51.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=0]
- %31 = phi %struct.StorablePicture* [ %22, %bb.nph51.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=0]
- br i1 undef, label %bb57.i, label %bb40.i
-
-bb40.i: ; preds = %bb39.i
- br i1 undef, label %bb57.i, label %bb41.i
-
-bb41.i: ; preds = %bb40.i
- %storemerge10.i = select i1 undef, i32 2, i32 4 ; <i32> [#uses=1]
- %32 = zext i32 %storemerge10.i to i64 ; <i64> [#uses=1]
- br i1 undef, label %bb45.i, label %bb47.i
-
-bb45.i: ; preds = %bb41.i
- %33 = getelementptr inbounds %struct.StorablePicture* %26, i64 0, i32 5, i64 undef, i64 %32, i64 undef ; <i64*> [#uses=1]
- %34 = load i64* %33, align 8 ; <i64> [#uses=1]
- br label %bb47.i
-
-bb47.i: ; preds = %bb45.i, %bb41.i
- %storemerge11.i = phi i64 [ %34, %bb45.i ], [ 0, %bb41.i ] ; <i64> [#uses=0]
- %scevgep246.i = getelementptr i64* undef, i64 undef ; <i64*> [#uses=0]
- br label %bb64.i
-
-bb57.i: ; preds = %bb40.i, %bb39.i
- br i1 undef, label %bb58.i, label %bb60.i
-
-bb58.i: ; preds = %bb57.i
- br label %bb60.i
-
-bb60.i: ; preds = %bb58.i, %bb57.i
- %35 = load i64*** undef, align 8 ; <i64**> [#uses=1]
- %scevgep256.i = getelementptr i64** %35, i64 %indvar248.i ; <i64**> [#uses=1]
- %36 = load i64** %scevgep256.i, align 8 ; <i64*> [#uses=1]
- %scevgep243.i = getelementptr i64* %36, i64 undef ; <i64*> [#uses=1]
- store i64 -1, i64* %scevgep243.i, align 8
- br label %bb64.i
-
-bb64.i: ; preds = %bb60.i, %bb47.i
- br i1 undef, label %bb39.i, label %bb66.i
-
-bb66.i: ; preds = %bb64.i, %bb38.i
- %37 = phi %struct.StorablePicture* [ %13, %bb38.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=2]
- %38 = phi %struct.StorablePicture* [ %14, %bb38.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=2]
- %39 = phi %struct.StorablePicture* [ %15, %bb38.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=2]
- %40 = phi %struct.StorablePicture* [ %16, %bb38.i ], [ null, %bb64.i ] ; <%struct.StorablePicture*> [#uses=8]
- %indvar.next249.i = add i64 %indvar248.i, 1 ; <i64> [#uses=1]
- br i1 undef, label %bb38.i, label %bb68.i
-
-bb68.i: ; preds = %bb66.i, %bb67.preheader.i
- %41 = phi %struct.StorablePicture* [ %2, %bb67.preheader.i ], [ %37, %bb66.i ] ; <%struct.StorablePicture*> [#uses=0]
- %42 = phi %struct.StorablePicture* [ %3, %bb67.preheader.i ], [ %38, %bb66.i ] ; <%struct.StorablePicture*> [#uses=1]
- %43 = phi %struct.StorablePicture* [ %4, %bb67.preheader.i ], [ %39, %bb66.i ] ; <%struct.StorablePicture*> [#uses=1]
- br i1 undef, label %bb.nph48.i, label %bb108.i
-
-bb.nph48.i: ; preds = %bb68.i
- br label %bb80.i
-
-bb80.i: ; preds = %bb104.i, %bb.nph48.i
- %44 = phi %struct.StorablePicture* [ %42, %bb.nph48.i ], [ null, %bb104.i ] ; <%struct.StorablePicture*> [#uses=1]
- %45 = phi %struct.StorablePicture* [ %43, %bb.nph48.i ], [ null, %bb104.i ] ; <%struct.StorablePicture*> [#uses=1]
- br i1 undef, label %bb.nph39.i, label %bb104.i
-
-bb.nph39.i: ; preds = %bb80.i
- br label %bb81.i
-
-bb81.i: ; preds = %bb102.i, %bb.nph39.i
- %46 = phi %struct.StorablePicture* [ %44, %bb.nph39.i ], [ %48, %bb102.i ] ; <%struct.StorablePicture*> [#uses=0]
- %47 = phi %struct.StorablePicture* [ %45, %bb.nph39.i ], [ %48, %bb102.i ] ; <%struct.StorablePicture*> [#uses=0]
- br i1 undef, label %bb83.i, label %bb82.i
-
-bb82.i: ; preds = %bb81.i
- br i1 undef, label %bb83.i, label %bb101.i
-
-bb83.i: ; preds = %bb82.i, %bb81.i
- br label %bb102.i
-
-bb101.i: ; preds = %bb82.i
- br label %bb102.i
-
-bb102.i: ; preds = %bb101.i, %bb83.i
- %48 = load %struct.StorablePicture** %0, align 8 ; <%struct.StorablePicture*> [#uses=2]
- br i1 undef, label %bb81.i, label %bb104.i
-
-bb104.i: ; preds = %bb102.i, %bb80.i
- br label %bb80.i
-
-bb108.i: ; preds = %bb68.i
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-04-SubregCoalescingBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-11-04-SubregCoalescingBug.ll
deleted file mode 100644
index b5be65f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-04-SubregCoalescingBug.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin11 | FileCheck %s
-; rdar://7362871
-
-define void @bar(i32 %b, i32 %a) nounwind optsize ssp {
-entry:
-; CHECK: leal 15(%rsi), %edi
-; CHECK-NOT: movl
-; CHECK: _foo
- %0 = add i32 %a, 15 ; <i32> [#uses=1]
- %1 = zext i32 %0 to i64 ; <i64> [#uses=1]
- tail call void @foo(i64 %1) nounwind
- ret void
-}
-
-declare void @foo(i64)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll
deleted file mode 100644
index 5398eef..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll
+++ /dev/null
@@ -1,133 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic -disable-fp-elim
-; rdar://7394770
-
-%struct.JVTLib_100487 = type <{ i8 }>
-
-define i32 @_Z13JVTLib_10335613JVTLib_10266513JVTLib_100579S_S_S_jPhj(i16* nocapture %ResidualX_Array.0, %struct.JVTLib_100487* nocapture byval align 4 %xqp, i16* nocapture %ResidualL_Array.0, i16* %ResidualDCZ_Array.0, i16* nocapture %ResidualACZ_FOArray.0, i32 %useFRextDequant, i8* nocapture %JVTLib_103357, i32 %use_field_scan) ssp {
-bb.nph:
- %0 = shl i32 undef, 1 ; <i32> [#uses=2]
- %mask133.masked.masked.masked.masked.masked.masked = or i640 undef, undef ; <i640> [#uses=1]
- br label %bb
-
-bb: ; preds = %_ZL13JVTLib_105204PKsPK13JVTLib_105184PsPhjS5_j.exit, %bb.nph
- br i1 undef, label %bb2, label %bb1
-
-bb1: ; preds = %bb
- br i1 undef, label %bb.i, label %bb1.i
-
-bb2: ; preds = %bb
- unreachable
-
-bb.i: ; preds = %bb1
- br label %_ZL13JVTLib_105204PKsPK13JVTLib_105184PsPhjS5_j.exit
-
-bb1.i: ; preds = %bb1
- br label %_ZL13JVTLib_105204PKsPK13JVTLib_105184PsPhjS5_j.exit
-
-_ZL13JVTLib_105204PKsPK13JVTLib_105184PsPhjS5_j.exit: ; preds = %bb1.i, %bb.i
- br i1 undef, label %bb5, label %bb
-
-bb5: ; preds = %_ZL13JVTLib_105204PKsPK13JVTLib_105184PsPhjS5_j.exit
- %mask271.masked.masked.masked.masked.masked.masked.masked = or i256 0, undef ; <i256> [#uses=2]
- %mask266.masked.masked.masked.masked.masked.masked = or i256 %mask271.masked.masked.masked.masked.masked.masked.masked, undef ; <i256> [#uses=1]
- %mask241.masked = or i256 undef, undef ; <i256> [#uses=1]
- %ins237 = or i256 undef, 0 ; <i256> [#uses=1]
- br i1 undef, label %bb9, label %bb10
-
-bb9: ; preds = %bb5
- br i1 undef, label %bb12.i, label %_ZL13JVTLib_105255PKsPK13JVTLib_105184Psj.exit
-
-bb12.i: ; preds = %bb9
- br label %_ZL13JVTLib_105255PKsPK13JVTLib_105184Psj.exit
-
-_ZL13JVTLib_105255PKsPK13JVTLib_105184Psj.exit: ; preds = %bb12.i, %bb9
- ret i32 undef
-
-bb10: ; preds = %bb5
- %1 = sext i16 undef to i32 ; <i32> [#uses=1]
- %2 = sext i16 undef to i32 ; <i32> [#uses=1]
- %3 = sext i16 undef to i32 ; <i32> [#uses=1]
- %4 = sext i16 undef to i32 ; <i32> [#uses=1]
- %5 = sext i16 undef to i32 ; <i32> [#uses=1]
- %6 = sext i16 undef to i32 ; <i32> [#uses=1]
- %tmp211 = lshr i256 %mask271.masked.masked.masked.masked.masked.masked.masked, 112 ; <i256> [#uses=0]
- %7 = sext i16 undef to i32 ; <i32> [#uses=1]
- %tmp208 = lshr i256 %mask266.masked.masked.masked.masked.masked.masked, 128 ; <i256> [#uses=1]
- %tmp209 = trunc i256 %tmp208 to i16 ; <i16> [#uses=1]
- %8 = sext i16 %tmp209 to i32 ; <i32> [#uses=1]
- %9 = sext i16 undef to i32 ; <i32> [#uses=1]
- %10 = sext i16 undef to i32 ; <i32> [#uses=1]
- %tmp193 = lshr i256 %mask241.masked, 208 ; <i256> [#uses=1]
- %tmp194 = trunc i256 %tmp193 to i16 ; <i16> [#uses=1]
- %11 = sext i16 %tmp194 to i32 ; <i32> [#uses=1]
- %tmp187 = lshr i256 %ins237, 240 ; <i256> [#uses=1]
- %tmp188 = trunc i256 %tmp187 to i16 ; <i16> [#uses=1]
- %12 = sext i16 %tmp188 to i32 ; <i32> [#uses=1]
- %13 = add nsw i32 %4, %1 ; <i32> [#uses=1]
- %14 = add nsw i32 %5, 0 ; <i32> [#uses=1]
- %15 = add nsw i32 %6, %2 ; <i32> [#uses=1]
- %16 = add nsw i32 %7, %3 ; <i32> [#uses=1]
- %17 = add nsw i32 0, %8 ; <i32> [#uses=1]
- %18 = add nsw i32 %11, %9 ; <i32> [#uses=1]
- %19 = add nsw i32 0, %10 ; <i32> [#uses=1]
- %20 = add nsw i32 %12, 0 ; <i32> [#uses=1]
- %21 = add nsw i32 %17, %13 ; <i32> [#uses=2]
- %22 = add nsw i32 %18, %14 ; <i32> [#uses=2]
- %23 = add nsw i32 %19, %15 ; <i32> [#uses=2]
- %24 = add nsw i32 %20, %16 ; <i32> [#uses=2]
- %25 = add nsw i32 %22, %21 ; <i32> [#uses=2]
- %26 = add nsw i32 %24, %23 ; <i32> [#uses=2]
- %27 = sub i32 %21, %22 ; <i32> [#uses=1]
- %28 = sub i32 %23, %24 ; <i32> [#uses=1]
- %29 = add nsw i32 %26, %25 ; <i32> [#uses=1]
- %30 = sub i32 %25, %26 ; <i32> [#uses=1]
- %31 = sub i32 %27, %28 ; <i32> [#uses=1]
- %32 = ashr i32 %29, 1 ; <i32> [#uses=2]
- %33 = ashr i32 %30, 1 ; <i32> [#uses=2]
- %34 = ashr i32 %31, 1 ; <i32> [#uses=2]
- %35 = icmp sgt i32 %32, 32767 ; <i1> [#uses=1]
- %o0_0.0.i = select i1 %35, i32 32767, i32 %32 ; <i32> [#uses=2]
- %36 = icmp slt i32 %o0_0.0.i, -32768 ; <i1> [#uses=1]
- %37 = icmp sgt i32 %33, 32767 ; <i1> [#uses=1]
- %o1_0.0.i = select i1 %37, i32 32767, i32 %33 ; <i32> [#uses=2]
- %38 = icmp slt i32 %o1_0.0.i, -32768 ; <i1> [#uses=1]
- %39 = icmp sgt i32 %34, 32767 ; <i1> [#uses=1]
- %o2_0.0.i = select i1 %39, i32 32767, i32 %34 ; <i32> [#uses=2]
- %40 = icmp slt i32 %o2_0.0.i, -32768 ; <i1> [#uses=1]
- %tmp101 = lshr i640 %mask133.masked.masked.masked.masked.masked.masked, 256 ; <i640> [#uses=1]
- %41 = trunc i32 %o0_0.0.i to i16 ; <i16> [#uses=1]
- %tmp358 = select i1 %36, i16 -32768, i16 %41 ; <i16> [#uses=2]
- %42 = trunc i32 %o1_0.0.i to i16 ; <i16> [#uses=1]
- %tmp347 = select i1 %38, i16 -32768, i16 %42 ; <i16> [#uses=1]
- %43 = trunc i32 %o2_0.0.i to i16 ; <i16> [#uses=1]
- %tmp335 = select i1 %40, i16 -32768, i16 %43 ; <i16> [#uses=1]
- %44 = icmp sgt i16 %tmp358, -1 ; <i1> [#uses=2]
- %..i24 = select i1 %44, i16 %tmp358, i16 undef ; <i16> [#uses=1]
- %45 = icmp sgt i16 %tmp347, -1 ; <i1> [#uses=1]
- %46 = icmp sgt i16 %tmp335, -1 ; <i1> [#uses=1]
- %47 = zext i16 %..i24 to i32 ; <i32> [#uses=1]
- %tmp = trunc i640 %tmp101 to i32 ; <i32> [#uses=1]
- %48 = and i32 %tmp, 65535 ; <i32> [#uses=2]
- %49 = mul i32 %47, %48 ; <i32> [#uses=1]
- %50 = zext i16 undef to i32 ; <i32> [#uses=1]
- %51 = mul i32 %50, %48 ; <i32> [#uses=1]
- %52 = add i32 %49, %0 ; <i32> [#uses=1]
- %53 = add i32 %51, %0 ; <i32> [#uses=1]
- %54 = lshr i32 %52, undef ; <i32> [#uses=1]
- %55 = lshr i32 %53, undef ; <i32> [#uses=1]
- %56 = trunc i32 %54 to i16 ; <i16> [#uses=1]
- %57 = trunc i32 %55 to i16 ; <i16> [#uses=1]
- %vs16Out0_0.0.i = select i1 %44, i16 %56, i16 undef ; <i16> [#uses=1]
- %vs16Out0_4.0.i = select i1 %45, i16 0, i16 undef ; <i16> [#uses=1]
- %vs16Out1_0.0.i = select i1 %46, i16 %57, i16 undef ; <i16> [#uses=1]
- br i1 undef, label %bb129.i, label %_ZL13JVTLib_105207PKsPK13JVTLib_105184Psj.exit
-
-bb129.i: ; preds = %bb10
- br label %_ZL13JVTLib_105207PKsPK13JVTLib_105184Psj.exit
-
-_ZL13JVTLib_105207PKsPK13JVTLib_105184Psj.exit: ; preds = %bb129.i, %bb10
- %58 = phi i16 [ %vs16Out0_4.0.i, %bb129.i ], [ undef, %bb10 ] ; <i16> [#uses=0]
- %59 = phi i16 [ undef, %bb129.i ], [ %vs16Out1_0.0.i, %bb10 ] ; <i16> [#uses=0]
- store i16 %vs16Out0_0.0.i, i16* %ResidualDCZ_Array.0, align 2
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-16-BadKillsCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-11-16-BadKillsCrash.ll
deleted file mode 100644
index a51c75d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-16-BadKillsCrash.ll
+++ /dev/null
@@ -1,75 +0,0 @@
-; RUN: llc < %s
-; PR5495
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
-target triple = "i386-pc-linux-gnu"
-
-%"struct.std::__ctype_abstract_base<wchar_t>" = type { %"struct.std::locale::facet" }
-%"struct.std::basic_ios<char,std::char_traits<char> >" = type { %"struct.std::ios_base", %"struct.std::basic_ostream<char,std::char_traits<char> >"*, i8, i8, %"struct.std::basic_streambuf<char,std::char_traits<char> >"*, %"struct.std::ctype<char>"*, %"struct.std::__ctype_abstract_base<wchar_t>"*, %"struct.std::__ctype_abstract_base<wchar_t>"* }
-%"struct.std::basic_istream<char,std::char_traits<char> >" = type { i32 (...)**, i32, %"struct.std::basic_ios<char,std::char_traits<char> >" }
-%"struct.std::basic_ostream<char,std::char_traits<char> >" = type { i32 (...)**, %"struct.std::basic_ios<char,std::char_traits<char> >" }
-%"struct.std::basic_streambuf<char,std::char_traits<char> >" = type { i32 (...)**, i8*, i8*, i8*, i8*, i8*, i8*, %"struct.std::locale" }
-%"struct.std::ctype<char>" = type { %"struct.std::locale::facet", i32*, i8, i32*, i32*, i16*, i8, [256 x i8], [256 x i8], i8 }
-%"struct.std::ios_base" = type { i32 (...)**, i32, i32, i32, i32, i32, %"struct.std::ios_base::_Callback_list"*, %"struct.std::ios_base::_Words", [8 x %"struct.std::ios_base::_Words"], i32, %"struct.std::ios_base::_Words"*, %"struct.std::locale" }
-%"struct.std::ios_base::_Callback_list" = type { %"struct.std::ios_base::_Callback_list"*, void (i32, %"struct.std::ios_base"*, i32)*, i32, i32 }
-%"struct.std::ios_base::_Words" = type { i8*, i32 }
-%"struct.std::locale" = type { %"struct.std::locale::_Impl"* }
-%"struct.std::locale::_Impl" = type { i32, %"struct.std::locale::facet"**, i32, %"struct.std::locale::facet"**, i8** }
-%"struct.std::locale::facet" = type { i32 (...)**, i32 }
-%union..0._15 = type { i32 }
-
-declare i8* @llvm.eh.exception() nounwind readonly
-
-declare i8* @__cxa_begin_catch(i8*) nounwind
-
-declare %"struct.std::ctype<char>"* @_ZSt9use_facetISt5ctypeIcEERKT_RKSt6locale(%"struct.std::locale"*)
-
-define %"struct.std::basic_istream<char,std::char_traits<char> >"* @_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_(%"struct.std::basic_istream<char,std::char_traits<char> >"* %__in, i8* nocapture %__s) {
-entry:
- %0 = invoke %"struct.std::ctype<char>"* @_ZSt9use_facetISt5ctypeIcEERKT_RKSt6locale(%"struct.std::locale"* undef)
- to label %invcont8 unwind label %lpad74 ; <%"struct.std::ctype<char>"*> [#uses=0]
-
-invcont8: ; preds = %entry
- %1 = invoke i32 undef(%"struct.std::basic_streambuf<char,std::char_traits<char> >"* undef)
- to label %bb26.preheader unwind label %lpad ; <i32> [#uses=0]
-
-bb26.preheader: ; preds = %invcont8
- br label %invcont38
-
-bb1.i100: ; preds = %invcont38
- %2 = add nsw i32 1, %__extracted.0 ; <i32> [#uses=3]
- br i1 undef, label %bb.i97, label %bb1.i
-
-bb.i97: ; preds = %bb1.i100
- br label %invcont38
-
-bb1.i: ; preds = %bb1.i100
- %3 = invoke i32 undef(%"struct.std::basic_streambuf<char,std::char_traits<char> >"* undef)
- to label %invcont38 unwind label %lpad ; <i32> [#uses=0]
-
-invcont24: ; preds = %invcont38
- %4 = invoke i32 undef(%"struct.std::basic_streambuf<char,std::char_traits<char> >"* undef)
- to label %_ZNSt15basic_streambufIcSt11char_traitsIcEE6sbumpcEv.exit.i unwind label %lpad ; <i32> [#uses=0]
-
-_ZNSt15basic_streambufIcSt11char_traitsIcEE6sbumpcEv.exit.i: ; preds = %invcont24
- br i1 undef, label %invcont25, label %bb.i93
-
-bb.i93: ; preds = %_ZNSt15basic_streambufIcSt11char_traitsIcEE6sbumpcEv.exit.i
- %5 = invoke i32 undef(%"struct.std::basic_streambuf<char,std::char_traits<char> >"* undef)
- to label %invcont25 unwind label %lpad ; <i32> [#uses=0]
-
-invcont25: ; preds = %bb.i93, %_ZNSt15basic_streambufIcSt11char_traitsIcEE6sbumpcEv.exit.i
- br label %invcont38
-
-invcont38: ; preds = %invcont25, %bb1.i, %bb.i97, %bb26.preheader
- %__extracted.0 = phi i32 [ 0, %bb26.preheader ], [ undef, %invcont25 ], [ %2, %bb.i97 ], [ %2, %bb1.i ] ; <i32> [#uses=1]
- br i1 false, label %bb1.i100, label %invcont24
-
-lpad: ; preds = %bb.i93, %invcont24, %bb1.i, %invcont8
- %__extracted.1 = phi i32 [ 0, %invcont8 ], [ %2, %bb1.i ], [ undef, %bb.i93 ], [ undef, %invcont24 ] ; <i32> [#uses=0]
- %eh_ptr = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
- %6 = call i8* @__cxa_begin_catch(i8* %eh_ptr) nounwind ; <i8*> [#uses=0]
- unreachable
-
-lpad74: ; preds = %entry
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-16-MachineLICM.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-11-16-MachineLICM.ll
deleted file mode 100644
index 8f274df..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-16-MachineLICM.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
-; rdar://7395200
-
- at g = common global [4 x float] zeroinitializer, align 16 ; <[4 x float]*> [#uses=4]
-
-define void @foo(i32 %n, float* nocapture %x) nounwind ssp {
-entry:
-; CHECK: foo:
- %0 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb.nph, label %return
-
-bb.nph: ; preds = %entry
-; CHECK: movq _g at GOTPCREL(%rip), [[REG:%[a-z]+]]
- %tmp = zext i32 %n to i64 ; <i64> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %bb.nph
-; CHECK: LBB1_2:
- %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %bb ] ; <i64> [#uses=2]
- %tmp9 = shl i64 %indvar, 2 ; <i64> [#uses=4]
- %tmp1016 = or i64 %tmp9, 1 ; <i64> [#uses=1]
- %scevgep = getelementptr float* %x, i64 %tmp1016 ; <float*> [#uses=1]
- %tmp1117 = or i64 %tmp9, 2 ; <i64> [#uses=1]
- %scevgep12 = getelementptr float* %x, i64 %tmp1117 ; <float*> [#uses=1]
- %tmp1318 = or i64 %tmp9, 3 ; <i64> [#uses=1]
- %scevgep14 = getelementptr float* %x, i64 %tmp1318 ; <float*> [#uses=1]
- %x_addr.03 = getelementptr float* %x, i64 %tmp9 ; <float*> [#uses=1]
- %1 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 0), align 16 ; <float> [#uses=1]
- store float %1, float* %x_addr.03, align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 1), align 4 ; <float> [#uses=1]
- store float %2, float* %scevgep, align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 2), align 8 ; <float> [#uses=1]
- store float %3, float* %scevgep12, align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 3), align 4 ; <float> [#uses=1]
- store float %4, float* %scevgep14, align 4
- %indvar.next = add i64 %indvar, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %indvar.next, %tmp ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll
deleted file mode 100644
index 3ce9edb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-16-UnfoldMemOpBug.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
-; rdar://7396984
-
- at str = private constant [28 x i8] c"xxxxxxxxxxxxxxxxxxxxxxxxxxx\00", align 1
-
-define void @t(i32 %count) ssp nounwind {
-entry:
-; CHECK: t:
-; CHECK: movq ___stack_chk_guard at GOTPCREL(%rip)
-; CHECK: movups L_str(%rip), %xmm0
- %tmp0 = alloca [60 x i8], align 1
- %tmp1 = getelementptr inbounds [60 x i8]* %tmp0, i64 0, i64 0
- br label %bb1
-
-bb1:
-; CHECK: LBB1_1:
-; CHECK: movaps %xmm0, (%rsp)
- %tmp2 = phi i32 [ %tmp3, %bb1 ], [ 0, %entry ]
- call void @llvm.memcpy.i64(i8* %tmp1, i8* getelementptr inbounds ([28 x i8]* @str, i64 0, i64 0), i64 28, i32 1)
- %tmp3 = add i32 %tmp2, 1
- %tmp4 = icmp eq i32 %tmp3, %count
- br i1 %tmp4, label %bb2, label %bb1
-
-bb2:
- ret void
-}
-
-declare void @llvm.memcpy.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-17-UpdateTerminator.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-11-17-UpdateTerminator.ll
deleted file mode 100644
index 5c1a2bc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-17-UpdateTerminator.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; RUN: llc -O3 < %s
-; This test fails with:
-; Assertion failed: (!B && "UpdateTerminators requires analyzable predecessors!"), function updateTerminator, MachineBasicBlock.cpp, line 255.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin10.2"
-
-%"struct.llvm::InlineAsm::ConstraintInfo" = type { i32, i8, i8, i8, i8, %"struct.std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >,std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >" }
-%"struct.std::_Vector_base<llvm::InlineAsm::ConstraintInfo,std::allocator<llvm::InlineAsm::ConstraintInfo> >" = type { %"struct.std::_Vector_base<llvm::InlineAsm::ConstraintInfo,std::allocator<llvm::InlineAsm::ConstraintInfo> >::_Vector_impl" }
-%"struct.std::_Vector_base<llvm::InlineAsm::ConstraintInfo,std::allocator<llvm::InlineAsm::ConstraintInfo> >::_Vector_impl" = type { %"struct.llvm::InlineAsm::ConstraintInfo"*, %"struct.llvm::InlineAsm::ConstraintInfo"*, %"struct.llvm::InlineAsm::ConstraintInfo"* }
-%"struct.std::_Vector_base<std::basic_string<char, std::char_traits<char>, std::allocator<char> >,std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >" = type { %"struct.std::_Vector_base<std::basic_string<char, std::char_traits<char>, std::allocator<char> >,std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >::_Vector_impl" }
-%"struct.std::_Vector_base<std::basic_string<char, std::char_traits<char>, std::allocator<char> >,std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >::_Vector_impl" = type { %"struct.std::string"*, %"struct.std::string"*, %"struct.std::string"* }
-%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Alloc_hider" = type { i8* }
-%"struct.std::string" = type { %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >::_Alloc_hider" }
-%"struct.std::vector<llvm::InlineAsm::ConstraintInfo,std::allocator<llvm::InlineAsm::ConstraintInfo> >" = type { %"struct.std::_Vector_base<llvm::InlineAsm::ConstraintInfo,std::allocator<llvm::InlineAsm::ConstraintInfo> >" }
-%"struct.std::vector<std::basic_string<char, std::char_traits<char>, std::allocator<char> >,std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >" = type { %"struct.std::_Vector_base<std::basic_string<char, std::char_traits<char>, std::allocator<char> >,std::allocator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > >" }
-
-define zeroext i8 @_ZN4llvm9InlineAsm14ConstraintInfo5ParseENS_9StringRefERSt6vectorIS1_SaIS1_EE(%"struct.llvm::InlineAsm::ConstraintInfo"* nocapture %this, i64 %Str.0, i64 %Str.1, %"struct.std::vector<llvm::InlineAsm::ConstraintInfo,std::allocator<llvm::InlineAsm::ConstraintInfo> >"* nocapture %ConstraintsSoFar) nounwind ssp align 2 {
-entry:
- br i1 undef, label %bb56, label %bb27.outer
-
-bb8: ; preds = %bb27.outer108, %bb13
- switch i8 undef, label %bb27.outer [
- i8 35, label %bb56
- i8 37, label %bb14
- i8 38, label %bb10
- i8 42, label %bb56
- ]
-
-bb27.outer: ; preds = %bb8, %entry
- %I.2.ph = phi i8* [ undef, %entry ], [ %I.2.ph109, %bb8 ] ; <i8*> [#uses=2]
- br label %bb27.outer108
-
-bb10: ; preds = %bb8
- %toBool = icmp eq i8 0, 0 ; <i1> [#uses=1]
- %or.cond = and i1 undef, %toBool ; <i1> [#uses=1]
- br i1 %or.cond, label %bb13, label %bb56
-
-bb13: ; preds = %bb10
- br i1 undef, label %bb27.outer108, label %bb8
-
-bb14: ; preds = %bb8
- ret i8 1
-
-bb27.outer108: ; preds = %bb13, %bb27.outer
- %I.2.ph109 = getelementptr i8* %I.2.ph, i64 undef ; <i8*> [#uses=1]
- %scevgep = getelementptr i8* %I.2.ph, i64 undef ; <i8*> [#uses=0]
- br label %bb8
-
-bb56: ; preds = %bb10, %bb8, %bb8, %entry
- ret i8 1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-18-TwoAddrKill.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-11-18-TwoAddrKill.ll
deleted file mode 100644
index 0edaa70..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-18-TwoAddrKill.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s
-; PR 5300
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
-target triple = "i386-pc-linux-gnu"
-
- at g_296 = external global i8, align 1 ; <i8*> [#uses=1]
-
-define noalias i8** @func_31(i32** nocapture %int8p_33, i8** nocapture %p_34, i8* nocapture %p_35) nounwind {
-entry:
- %cmp.i = icmp sgt i16 undef, 234 ; <i1> [#uses=1]
- %tmp17 = select i1 %cmp.i, i16 undef, i16 0 ; <i16> [#uses=2]
- %conv8 = trunc i16 %tmp17 to i8 ; <i8> [#uses=3]
- br i1 undef, label %cond.false.i29, label %land.lhs.true.i
-
-land.lhs.true.i: ; preds = %entry
- %tobool5.i = icmp eq i32 undef, undef ; <i1> [#uses=1]
- br i1 %tobool5.i, label %cond.false.i29, label %bar.exit
-
-cond.false.i29: ; preds = %land.lhs.true.i, %entry
- %tmp = sub i8 0, %conv8 ; <i8> [#uses=1]
- %mul.i = and i8 %conv8, %tmp ; <i8> [#uses=1]
- br label %bar.exit
-
-bar.exit: ; preds = %cond.false.i29, %land.lhs.true.i
- %call1231 = phi i8 [ %mul.i, %cond.false.i29 ], [ %conv8, %land.lhs.true.i ] ; <i8> [#uses=0]
- %conv21 = trunc i16 %tmp17 to i8 ; <i8> [#uses=1]
- store i8 %conv21, i8* @g_296
- ret i8** undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
deleted file mode 100644
index 7606c0e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
+++ /dev/null
@@ -1,116 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu
-; pr5600
-
-%struct..0__pthread_mutex_s = type { i32, i32, i32, i32, i32, i32, %struct.__pthread_list_t }
-%struct.ASN1ObjHeader = type { i8, %"struct.__gmp_expr<__mpz_struct [1],__mpz_struct [1]>", i64, i32, i32, i32 }
-%struct.ASN1Object = type { i32 (...)**, i32, i32, i64 }
-%struct.ASN1Unit = type { [4 x i32 (%struct.ASN1ObjHeader*, %struct.ASN1Object**)*], %"struct.std::ASN1ObjList" }
-%"struct.__gmp_expr<__mpz_struct [1],__mpz_struct [1]>" = type { [1 x %struct.__mpz_struct] }
-%struct.__mpz_struct = type { i32, i32, i64* }
-%struct.__pthread_list_t = type { %struct.__pthread_list_t*, %struct.__pthread_list_t* }
-%struct.pthread_attr_t = type { i64, [48 x i8] }
-%struct.pthread_mutex_t = type { %struct..0__pthread_mutex_s }
-%struct.pthread_mutexattr_t = type { i32 }
-%"struct.std::ASN1ObjList" = type { %"struct.std::_Vector_base<ASN1Object*,std::allocator<ASN1Object*> >" }
-%"struct.std::_Vector_base<ASN1Object*,std::allocator<ASN1Object*> >" = type { %"struct.std::_Vector_base<ASN1Object*,std::allocator<ASN1Object*> >::_Vector_impl" }
-%"struct.std::_Vector_base<ASN1Object*,std::allocator<ASN1Object*> >::_Vector_impl" = type { %struct.ASN1Object**, %struct.ASN1Object**, %struct.ASN1Object** }
-%struct.xmstream = type { i8*, i64, i64, i64, i8 }
-
-declare void @_ZNSt6vectorIP10ASN1ObjectSaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%"struct.std::ASN1ObjList"* nocapture, i64, %struct.ASN1Object** nocapture)
-
-declare i32 @_Z17LoadObjectFromBERR8xmstreamPP10ASN1ObjectPPF10ASN1StatusP13ASN1ObjHeaderS3_E(%struct.xmstream*, %struct.ASN1Object**, i32 (%struct.ASN1ObjHeader*, %struct.ASN1Object**)**)
-
-define i32 @_ZN8ASN1Unit4loadER8xmstreamjm18ASN1LengthEncoding(%struct.ASN1Unit* %this, %struct.xmstream* nocapture %stream, i32 %numObjects, i64 %size, i32 %lEncoding) {
-entry:
- br label %meshBB85
-
-bb5: ; preds = %bb13.fragment.cl135, %bb13.fragment.cl, %bb.i.i.bbcl.disp, %bb13.fragment
- %0 = invoke i32 @_Z17LoadObjectFromBERR8xmstreamPP10ASN1ObjectPPF10ASN1StatusP13ASN1ObjHeaderS3_E(%struct.xmstream* undef, %struct.ASN1Object** undef, i32 (%struct.ASN1ObjHeader*, %struct.ASN1Object**)** undef)
- to label %meshBB81.bbcl.disp unwind label %lpad ; <i32> [#uses=0]
-
-bb10.fragment: ; preds = %bb13.fragment.bbcl.disp
- br i1 undef, label %bb1.i.fragment.bbcl.disp, label %bb.i.i.bbcl.disp
-
-bb1.i.fragment: ; preds = %bb1.i.fragment.bbcl.disp
- invoke void @_ZNSt6vectorIP10ASN1ObjectSaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%"struct.std::ASN1ObjList"* undef, i64 undef, %struct.ASN1Object** undef)
- to label %meshBB81.bbcl.disp unwind label %lpad
-
-bb13.fragment: ; preds = %bb13.fragment.bbcl.disp
- br i1 undef, label %meshBB81.bbcl.disp, label %bb5
-
-bb.i4: ; preds = %bb.i4.bbcl.disp, %bb1.i.fragment.bbcl.disp
- ret i32 undef
-
-bb1.i5: ; preds = %bb.i1
- ret i32 undef
-
-lpad: ; preds = %bb1.i.fragment.cl, %bb1.i.fragment, %bb5
- %.SV10.phi807 = phi i8* [ undef, %bb1.i.fragment.cl ], [ undef, %bb1.i.fragment ], [ undef, %bb5 ] ; <i8*> [#uses=1]
- %1 = load i8* %.SV10.phi807, align 8 ; <i8> [#uses=0]
- br i1 undef, label %meshBB81.bbcl.disp, label %bb13.fragment.bbcl.disp
-
-bb.i1: ; preds = %bb.i.i.bbcl.disp
- br i1 undef, label %meshBB81.bbcl.disp, label %bb1.i5
-
-meshBB81: ; preds = %meshBB81.bbcl.disp, %bb.i.i.bbcl.disp
- br i1 undef, label %meshBB81.bbcl.disp, label %bb.i4.bbcl.disp
-
-meshBB85: ; preds = %meshBB81.bbcl.disp, %bb.i4.bbcl.disp, %bb1.i.fragment.bbcl.disp, %bb.i.i.bbcl.disp, %entry
- br i1 undef, label %meshBB81.bbcl.disp, label %bb13.fragment.bbcl.disp
-
-bb.i.i.bbcl.disp: ; preds = %bb10.fragment
- switch i8 undef, label %meshBB85 [
- i8 123, label %bb.i1
- i8 97, label %bb5
- i8 44, label %meshBB81
- i8 1, label %meshBB81.cl
- i8 51, label %meshBB81.cl141
- ]
-
-bb1.i.fragment.cl: ; preds = %bb1.i.fragment.bbcl.disp
- invoke void @_ZNSt6vectorIP10ASN1ObjectSaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%"struct.std::ASN1ObjList"* undef, i64 undef, %struct.ASN1Object** undef)
- to label %meshBB81.bbcl.disp unwind label %lpad
-
-bb1.i.fragment.bbcl.disp: ; preds = %bb10.fragment
- switch i8 undef, label %bb.i4 [
- i8 97, label %bb1.i.fragment
- i8 7, label %bb1.i.fragment.cl
- i8 35, label %bb.i4.cl
- i8 77, label %meshBB85
- ]
-
-bb13.fragment.cl: ; preds = %bb13.fragment.bbcl.disp
- br i1 undef, label %meshBB81.bbcl.disp, label %bb5
-
-bb13.fragment.cl135: ; preds = %bb13.fragment.bbcl.disp
- br i1 undef, label %meshBB81.bbcl.disp, label %bb5
-
-bb13.fragment.bbcl.disp: ; preds = %meshBB85, %lpad
- switch i8 undef, label %bb10.fragment [
- i8 67, label %bb13.fragment.cl
- i8 108, label %bb13.fragment
- i8 58, label %bb13.fragment.cl135
- ]
-
-bb.i4.cl: ; preds = %bb.i4.bbcl.disp, %bb1.i.fragment.bbcl.disp
- ret i32 undef
-
-bb.i4.bbcl.disp: ; preds = %meshBB81.cl141, %meshBB81.cl, %meshBB81
- switch i8 undef, label %bb.i4 [
- i8 35, label %bb.i4.cl
- i8 77, label %meshBB85
- ]
-
-meshBB81.cl: ; preds = %meshBB81.bbcl.disp, %bb.i.i.bbcl.disp
- br i1 undef, label %meshBB81.bbcl.disp, label %bb.i4.bbcl.disp
-
-meshBB81.cl141: ; preds = %meshBB81.bbcl.disp, %bb.i.i.bbcl.disp
- br i1 undef, label %meshBB81.bbcl.disp, label %bb.i4.bbcl.disp
-
-meshBB81.bbcl.disp: ; preds = %meshBB81.cl141, %meshBB81.cl, %bb13.fragment.cl135, %bb13.fragment.cl, %bb1.i.fragment.cl, %meshBB85, %meshBB81, %bb.i1, %lpad, %bb13.fragment, %bb1.i.fragment, %bb5
- switch i8 undef, label %meshBB85 [
- i8 44, label %meshBB81
- i8 1, label %meshBB81.cl
- i8 51, label %meshBB81.cl141
- ]
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
deleted file mode 100644
index 1e7a418..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
-; pr5391
-
-define void @t() nounwind ssp {
-entry:
-; CHECK: t:
-; CHECK: movl %ecx, %eax
-; CHECK: %eax = foo (%eax, %ecx)
- %b = alloca i32 ; <i32*> [#uses=2]
- %a = alloca i32 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load i32* %b, align 4 ; <i32> [#uses=1]
- %1 = load i32* %b, align 4 ; <i32> [#uses=1]
- %asmtmp = call i32 asm "$0 = foo ($1, $2)", "=&{ax},%0,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1) nounwind ; <i32> [#uses=1]
- store i32 %asmtmp, i32* %a
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @t2() nounwind ssp {
-entry:
-; CHECK: t2:
-; CHECK: movl %eax, %ecx
-; CHECK: %ecx = foo (%ecx, %eax)
- %b = alloca i32 ; <i32*> [#uses=2]
- %a = alloca i32 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load i32* %b, align 4 ; <i32> [#uses=1]
- %1 = load i32* %b, align 4 ; <i32> [#uses=1]
- %asmtmp = call i32 asm "$0 = foo ($1, $2)", "=&r,%0,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1) nounwind ; <i32> [#uses=1]
- store i32 %asmtmp, i32* %a
- br label %return
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll
deleted file mode 100644
index f7ba661..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc -relocation-model=pic < %s | FileCheck %s
-; PR5723
-target datalayout = "e-p:64:64"
-target triple = "x86_64-unknown-linux-gnu"
-
-%0 = type { [1 x i64] }
-%link = type { %0* }
-%test = type { i32, %link }
-
- at data = global [2 x i64] zeroinitializer, align 64 ; <[2 x i64]*> [#uses=1]
- at ptr = linkonce thread_local global [1 x i64] [i64 ptrtoint ([2 x i64]* @data to i64)], align 64 ; <[1 x i64]*> [#uses=1]
- at link_ptr = linkonce thread_local global [1 x i64] zeroinitializer, align 64 ; <[1 x i64]*> [#uses=1]
- at _dm_my_pe = external global [1 x i64], align 64 ; <[1 x i64]*> [#uses=0]
- at _dm_pes_in_prog = external global [1 x i64], align 64 ; <[1 x i64]*> [#uses=0]
- at _dm_npes_div_mult = external global [1 x i64], align 64 ; <[1 x i64]*> [#uses=0]
- at _dm_npes_div_shift = external global [1 x i64], align 64 ; <[1 x i64]*> [#uses=0]
- at _dm_pe_addr_loc = external global [1 x i64], align 64 ; <[1 x i64]*> [#uses=0]
- at _dm_offset_addr_mask = external global [1 x i64], align 64 ; <[1 x i64]*> [#uses=0]
-
-define void @leaf() nounwind {
-; CHECK: leaf:
-; CHECK-NOT: -8(%rsp)
-; CHECK: leaq link_ptr at TLSGD
-; CHECK: call __tls_get_addr at PLT
-"file foo2.c, line 14, bb1":
- %p = alloca %test*, align 8 ; <%test**> [#uses=4]
- br label %"file foo2.c, line 14, bb2"
-
-"file foo2.c, line 14, bb2": ; preds = %"file foo2.c, line 14, bb1"
- br label %"@CFE_debug_label_0"
-
-"@CFE_debug_label_0": ; preds = %"file foo2.c, line 14, bb2"
- %r = load %test** bitcast ([1 x i64]* @ptr to %test**), align 8 ; <%test*> [#uses=1]
- store %test* %r, %test** %p, align 8
- br label %"@CFE_debug_label_2"
-
-"@CFE_debug_label_2": ; preds = %"@CFE_debug_label_0"
- %r1 = load %link** bitcast ([1 x i64]* @link_ptr to %link**), align 8 ; <%link*> [#uses=1]
- %r2 = load %test** %p, align 8 ; <%test*> [#uses=1]
- %r3 = ptrtoint %test* %r2 to i64 ; <i64> [#uses=1]
- %r4 = inttoptr i64 %r3 to %link** ; <%link**> [#uses=1]
- %r5 = getelementptr %link** %r4, i64 1 ; <%link**> [#uses=1]
- store %link* %r1, %link** %r5, align 8
- br label %"@CFE_debug_label_3"
-
-"@CFE_debug_label_3": ; preds = %"@CFE_debug_label_2"
- %r6 = load %test** %p, align 8 ; <%test*> [#uses=1]
- %r7 = ptrtoint %test* %r6 to i64 ; <i64> [#uses=1]
- %r8 = inttoptr i64 %r7 to %link* ; <%link*> [#uses=1]
- %r9 = getelementptr %link* %r8, i64 1 ; <%link*> [#uses=1]
- store %link* %r9, %link** bitcast ([1 x i64]* @link_ptr to %link**), align 8
- br label %"@CFE_debug_label_4"
-
-"@CFE_debug_label_4": ; preds = %"@CFE_debug_label_3"
- %r10 = load %test** %p, align 8 ; <%test*> [#uses=1]
- %r11 = ptrtoint %test* %r10 to i64 ; <i64> [#uses=1]
- %r12 = inttoptr i64 %r11 to i32* ; <i32*> [#uses=1]
- store i32 1, i32* %r12, align 4
- br label %"@CFE_debug_label_5"
-
-"@CFE_debug_label_5": ; preds = %"@CFE_debug_label_4"
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2009-12-12-CoalescerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2009-12-12-CoalescerBug.ll
deleted file mode 100644
index 4e8f5fd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2009-12-12-CoalescerBug.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s
-
-define i32 @do_loop(i32* nocapture %sdp, i32* nocapture %ddp, i8* %mdp, i8* nocapture %cdp, i32 %w) nounwind readonly optsize ssp {
-entry:
- br label %bb
-
-bb: ; preds = %bb5, %entry
- %mask.1.in = load i8* undef, align 1 ; <i8> [#uses=3]
- %0 = icmp eq i8 %mask.1.in, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb5, label %bb1
-
-bb1: ; preds = %bb
- br i1 undef, label %bb2, label %bb3
-
-bb2: ; preds = %bb1
-; CHECK: %bb2
-; CHECK: movb %ch, %al
- %1 = zext i8 %mask.1.in to i32 ; <i32> [#uses=1]
- %2 = zext i8 undef to i32 ; <i32> [#uses=1]
- %3 = mul i32 %2, %1 ; <i32> [#uses=1]
- %4 = add i32 %3, 1 ; <i32> [#uses=1]
- %5 = add i32 %4, 0 ; <i32> [#uses=1]
- %6 = lshr i32 %5, 8 ; <i32> [#uses=1]
- %retval12.i = trunc i32 %6 to i8 ; <i8> [#uses=1]
- br label %bb3
-
-bb3: ; preds = %bb2, %bb1
- %mask.0.in = phi i8 [ %retval12.i, %bb2 ], [ %mask.1.in, %bb1 ] ; <i8> [#uses=1]
- %7 = icmp eq i8 %mask.0.in, 0 ; <i1> [#uses=1]
- br i1 %7, label %bb5, label %bb4
-
-bb4: ; preds = %bb3
- br label %bb5
-
-bb5: ; preds = %bb4, %bb3, %bb
- br i1 undef, label %bb6, label %bb
-
-bb6: ; preds = %bb5
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/20090313-signext.ll b/libclamav/c++/llvm/test/CodeGen/X86/20090313-signext.ll
deleted file mode 100644
index de930d5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/20090313-signext.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86-64 -relocation-model=pic > %t
-; RUN: grep {movswl %ax, %edi} %t
-; RUN: grep {movw (%rax), %ax} %t
-; XFAIL: *
-
- at x = common global i16 0
-
-define signext i16 @f() nounwind {
-entry:
- %0 = tail call signext i16 @h() nounwind
- %1 = sext i16 %0 to i32
- tail call void @g(i32 %1) nounwind
- %2 = load i16* @x, align 2
- ret i16 %2
-}
-
-declare signext i16 @h()
-
-declare void @g(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-05-ZExt-Shl.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-01-05-ZExt-Shl.ll
deleted file mode 100644
index e7004e2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-05-ZExt-Shl.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; <rdar://problem/7499313>
-target triple = "i686-apple-darwin8"
-
-declare void @func2(i16 zeroext)
-
-define void @func1() nounwind {
-entry:
- %t1 = icmp ne i8 undef, 0
- %t2 = icmp eq i8 undef, 14
- %t3 = and i1 %t1, %t2
- %t4 = select i1 %t3, i16 0, i16 128
- call void @func2(i16 zeroext %t4) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-07-ISelBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-01-07-ISelBug.ll
deleted file mode 100644
index 081fab7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-07-ISelBug.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10
-; rdar://r7519827
-
-define i32 @t() nounwind ssp {
-entry:
- br label %if.end.i11
-
-if.end.i11: ; preds = %lor.lhs.false.i10, %lor.lhs.false.i10, %lor.lhs.false.i10
- br i1 undef, label %for.body161, label %for.end197
-
-for.body161: ; preds = %if.end.i11
- br label %for.end197
-
-for.end197: ; preds = %for.body161, %if.end.i11
- %mlucEntry.4 = phi i96 [ undef, %for.body161 ], [ undef, %if.end.i11 ] ; <i96> [#uses=2]
- store i96 %mlucEntry.4, i96* undef, align 8
- %tmp172 = lshr i96 %mlucEntry.4, 64 ; <i96> [#uses=1]
- %tmp173 = trunc i96 %tmp172 to i32 ; <i32> [#uses=1]
- %tmp1.i1.i = call i32 @llvm.bswap.i32(i32 %tmp173) nounwind ; <i32> [#uses=1]
- store i32 %tmp1.i1.i, i32* undef, align 8
- unreachable
-
-if.then283: ; preds = %lor.lhs.false.i10, %do.end105, %for.end
- ret i32 undef
-}
-
-declare i32 @llvm.bswap.i32(i32) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-07-UAMemFeature.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-01-07-UAMemFeature.ll
deleted file mode 100644
index 3728f15..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-07-UAMemFeature.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc -mcpu=yonah -mattr=vector-unaligned-mem -march=x86 < %s | FileCheck %s
-; CHECK: addps (
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define <4 x float> @foo(<4 x float>* %P, <4 x float> %In) nounwind {
- %A = load <4 x float>* %P, align 4
- %B = add <4 x float> %A, %In
- ret <4 x float> %B
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
deleted file mode 100644
index 172e1c7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-08-Atomic64Bug.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s
-; rdar://r7512579
-
-; PHI defs in the atomic loop should be used by the add / adc
-; instructions. They should not be dead.
-
-define void @t(i64* nocapture %p) nounwind ssp {
-entry:
-; CHECK: t:
-; CHECK: movl $1
-; CHECK: movl (%ebp), %eax
-; CHECK: movl 4(%ebp), %edx
-; CHECK: LBB1_1:
-; CHECK-NOT: movl $1
-; CHECK-NOT: movl $0
-; CHECK: addl
-; CHECK: adcl
-; CHECK: lock
-; CHECK: cmpxchg8b
-; CHECK: jne
- tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
- tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
- ret void
-}
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-11-ExtraPHIArg.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-01-11-ExtraPHIArg.ll
deleted file mode 100644
index db98eef..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-11-ExtraPHIArg.ll
+++ /dev/null
@@ -1,97 +0,0 @@
-; RUN: llc -verify-machineinstrs < %s
-;
-; The lowering of a switch combined with constand folding would leave spurious extra arguments on a PHI instruction.
-;
-target triple = "x86_64-apple-darwin10"
-
-define void @foo() {
- br label %cond_true813.i
-
-cond_true813.i: ; preds = %0
- br i1 false, label %cond_true818.i, label %cond_next1146.i
-
-cond_true818.i: ; preds = %cond_true813.i
- br i1 false, label %recog_memoized.exit52, label %cond_next1146.i
-
-recog_memoized.exit52: ; preds = %cond_true818.i
- switch i32 0, label %bb886.i.preheader [
- i32 0, label %bb907.i
- i32 44, label %bb866.i
- i32 103, label %bb874.i
- i32 114, label %bb874.i
- ]
-
-bb857.i: ; preds = %bb886.i, %bb866.i
- %tmp862.i494.24 = phi i8* [ null, %bb866.i ], [ %tmp862.i494.26, %bb886.i ] ; <i8*> [#uses=1]
- switch i32 0, label %bb886.i.preheader [
- i32 0, label %bb907.i
- i32 44, label %bb866.i
- i32 103, label %bb874.i
- i32 114, label %bb874.i
- ]
-
-bb866.i.loopexit: ; preds = %bb874.i
- br label %bb866.i
-
-bb866.i.loopexit31: ; preds = %cond_true903.i
- br label %bb866.i
-
-bb866.i: ; preds = %bb866.i.loopexit31, %bb866.i.loopexit, %bb857.i, %recog_memoized.exit52
- br i1 false, label %bb907.i, label %bb857.i
-
-bb874.i.preheader.loopexit: ; preds = %cond_true903.i, %cond_true903.i
- ret void
-
-bb874.i: ; preds = %bb857.i, %bb857.i, %recog_memoized.exit52, %recog_memoized.exit52
- switch i32 0, label %bb886.i.preheader.loopexit [
- i32 0, label %bb907.i
- i32 44, label %bb866.i.loopexit
- i32 103, label %bb874.i.backedge
- i32 114, label %bb874.i.backedge
- ]
-
-bb874.i.backedge: ; preds = %bb874.i, %bb874.i
- ret void
-
-bb886.i.preheader.loopexit: ; preds = %bb874.i
- ret void
-
-bb886.i.preheader: ; preds = %bb857.i, %recog_memoized.exit52
- %tmp862.i494.26 = phi i8* [ undef, %recog_memoized.exit52 ], [ %tmp862.i494.24, %bb857.i ] ; <i8*> [#uses=1]
- br label %bb886.i
-
-bb886.i: ; preds = %cond_true903.i, %bb886.i.preheader
- br i1 false, label %bb857.i, label %cond_true903.i
-
-cond_true903.i: ; preds = %bb886.i
- switch i32 0, label %bb886.i [
- i32 0, label %bb907.i
- i32 44, label %bb866.i.loopexit31
- i32 103, label %bb874.i.preheader.loopexit
- i32 114, label %bb874.i.preheader.loopexit
- ]
-
-bb907.i: ; preds = %cond_true903.i, %bb874.i, %bb866.i, %bb857.i, %recog_memoized.exit52
- br i1 false, label %cond_next1146.i, label %cond_true910.i
-
-cond_true910.i: ; preds = %bb907.i
- ret void
-
-cond_next1146.i: ; preds = %bb907.i, %cond_true818.i, %cond_true813.i
- ret void
-
-bb2060.i: ; No predecessors!
- br i1 false, label %cond_true2064.i, label %bb2067.i
-
-cond_true2064.i: ; preds = %bb2060.i
- unreachable
-
-bb2067.i: ; preds = %bb2060.i
- ret void
-
-cond_next3473: ; No predecessors!
- ret void
-
-cond_next3521: ; No predecessors!
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll
deleted file mode 100644
index d49e2a8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu
-; PR6027
-
-%class.OlsonTimeZone = type { i16, i32*, i8*, i16 }
-
-define void @XX(%class.OlsonTimeZone* %this) align 2 {
-entry:
- %call = tail call i8* @_Z15uprv_malloc_4_2v()
- %0 = bitcast i8* %call to double*
- %tmp = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 3
- %tmp2 = load i16* %tmp
- %tmp525 = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 0
- %tmp626 = load i16* %tmp525
- %cmp27 = icmp slt i16 %tmp2, %tmp626
- br i1 %cmp27, label %bb.nph, label %for.end
-
-for.cond:
- %tmp6 = load i16* %tmp5
- %cmp = icmp slt i16 %inc, %tmp6
- %indvar.next = add i32 %indvar, 1
- br i1 %cmp, label %for.body, label %for.end
-
-bb.nph:
- %tmp10 = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 2
- %tmp17 = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 1
- %tmp5 = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 0
- %tmp29 = sext i16 %tmp2 to i32
- %tmp31 = add i16 %tmp2, 1
- %tmp32 = zext i16 %tmp31 to i32
- br label %for.body
-
-for.body:
- %indvar = phi i32 [ 0, %bb.nph ], [ %indvar.next, %for.cond ]
- %tmp30 = add i32 %indvar, %tmp29
- %tmp33 = add i32 %indvar, %tmp32
- %inc = trunc i32 %tmp33 to i16
- %tmp11 = load i8** %tmp10
- %arrayidx = getelementptr i8* %tmp11, i32 %tmp30
- %tmp12 = load i8* %arrayidx
- br label %for.cond
-
-for.end:
- ret void
-}
-
-declare i8* @_Z15uprv_malloc_4_2v()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll
deleted file mode 100644
index 5d96e4a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; ModuleID = 'bugpoint-reduced-simplified.bc'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @numvec_(i32* noalias %ncelet, i32* noalias %ncel, i32* noalias %nfac, i32* noalias %nfabor, i32* noalias %lregis, i32* noalias %irveci, i32* noalias %irvecb, [0 x [2 x i32]]* noalias %ifacel, [0 x i32]* noalias %ifabor, [0 x i32]* noalias %inumfi, [0 x i32]* noalias %inumfb, [1 x i32]* noalias %iworkf, [0 x i32]* noalias %ismbs) {
-"file bug754399.f90, line 1, bb1":
- %r1037 = bitcast <2 x double> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=1]
- br label %"file bug754399.f90, line 184, in inner vector loop at depth 0, bb164"
-
-"file bug754399.f90, line 184, in inner vector loop at depth 0, bb164": ; preds = %"file bug754399.f90, line 184, in inner vector loop at depth 0, bb164", %"file bug754399.f90, line 1, bb1"
- %tmp641 = add i64 0, 48 ; <i64> [#uses=1]
- %tmp641642 = inttoptr i64 %tmp641 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %r1258 = load <4 x i32>* %tmp641642, align 4 ; <<4 x i32>> [#uses=2]
- %r1295 = extractelement <4 x i32> %r1258, i32 3 ; <i32> [#uses=1]
- %r1296 = sext i32 %r1295 to i64 ; <i64> [#uses=1]
- %r1297 = add i64 %r1296, -1 ; <i64> [#uses=1]
- %r1298183 = getelementptr [0 x i32]* %ismbs, i64 0, i64 %r1297 ; <i32*> [#uses=1]
- %r1298184 = load i32* %r1298183, align 4 ; <i32> [#uses=1]
- %r1301 = extractelement <4 x i32> %r1037, i32 3 ; <i32> [#uses=1]
- %r1302 = mul i32 %r1298184, %r1301 ; <i32> [#uses=1]
- %r1306 = insertelement <4 x i32> zeroinitializer, i32 %r1302, i32 3 ; <<4 x i32>> [#uses=1]
- %r1321 = add <4 x i32> %r1306, %r1258 ; <<4 x i32>> [#uses=1]
- %tmp643 = add i64 0, 48 ; <i64> [#uses=1]
- %tmp643644 = inttoptr i64 %tmp643 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- store <4 x i32> %r1321, <4 x i32>* %tmp643644, align 4
- br label %"file bug754399.f90, line 184, in inner vector loop at depth 0, bb164"
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll
deleted file mode 100644
index cd8960b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin11 -relocation-model=pic -disable-fp-elim -stats |& not grep ext-opt
-
-define fastcc i8* @S_scan_str(i8* %start, i32 %keep_quoted, i32 %keep_delims) nounwind ssp {
-entry:
- switch i8 undef, label %bb6 [
- i8 9, label %bb5
- i8 32, label %bb5
- i8 10, label %bb5
- i8 13, label %bb5
- i8 12, label %bb5
- ]
-
-bb5: ; preds = %entry, %entry, %entry, %entry, %entry
- br label %bb6
-
-bb6: ; preds = %bb5, %entry
- br i1 undef, label %bb7, label %bb9
-
-bb7: ; preds = %bb6
- unreachable
-
-bb9: ; preds = %bb6
- %0 = load i8* undef, align 1 ; <i8> [#uses=3]
- br i1 undef, label %bb12, label %bb10
-
-bb10: ; preds = %bb9
- br i1 undef, label %bb12, label %bb11
-
-bb11: ; preds = %bb10
- unreachable
-
-bb12: ; preds = %bb10, %bb9
- br i1 undef, label %bb13, label %bb14
-
-bb13: ; preds = %bb12
- store i8 %0, i8* undef, align 1
- %1 = zext i8 %0 to i32 ; <i32> [#uses=1]
- br label %bb18
-
-bb14: ; preds = %bb12
- br label %bb18
-
-bb18: ; preds = %bb14, %bb13
- %termcode.0 = phi i32 [ %1, %bb13 ], [ undef, %bb14 ] ; <i32> [#uses=2]
- %2 = icmp eq i8 %0, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb21, label %bb19
-
-bb19: ; preds = %bb18
- br i1 undef, label %bb21, label %bb20
-
-bb20: ; preds = %bb19
- br label %bb21
-
-bb21: ; preds = %bb20, %bb19, %bb18
- %termcode.1 = phi i32 [ %termcode.0, %bb18 ], [ %termcode.0, %bb19 ], [ undef, %bb20 ] ; <i32> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-01-TaillCallCrash.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-01-TaillCallCrash.ll
deleted file mode 100644
index 2751174..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-01-TaillCallCrash.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu
-; PR6196
-
-%"char[]" = type [1 x i8]
-
- at .str = external constant %"char[]", align 1 ; <%"char[]"*> [#uses=1]
-
-define i32 @regex_subst() nounwind {
-entry:
- %0 = tail call i32 bitcast (%"char[]"* @.str to i32 (i32)*)(i32 0) nounwind ; <i32> [#uses=1]
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-03-DualUndef.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-03-DualUndef.ll
deleted file mode 100644
index d116ecc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-03-DualUndef.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; PR6086
-define fastcc void @prepOutput() nounwind {
-bb: ; preds = %output.exit
- br label %bb.i1
-
-bb.i1: ; preds = %bb7.i, %bb
- br i1 undef, label %bb7.i, label %bb.nph.i
-
-bb.nph.i: ; preds = %bb.i1
- br label %bb3.i
-
-bb3.i: ; preds = %bb5.i6, %bb.nph.i
- %tmp10.i = trunc i64 undef to i32 ; <i32> [#uses=1]
- br i1 undef, label %bb4.i, label %bb5.i6
-
-bb4.i: ; preds = %bb3.i
- br label %bb5.i6
-
-bb5.i6: ; preds = %bb4.i, %bb3.i
- %0 = phi i32 [ undef, %bb4.i ], [ undef, %bb3.i ] ; <i32> [#uses=1]
- %1 = icmp slt i32 %0, %tmp10.i ; <i1> [#uses=1]
- br i1 %1, label %bb7.i, label %bb3.i
-
-bb7.i: ; preds = %bb5.i6, %bb.i1
- br label %bb.i1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-04-SchedulerBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-04-SchedulerBug.ll
deleted file mode 100644
index c966e21..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-04-SchedulerBug.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin11
-; rdar://7604000
-
-%struct.a_t = type { i8*, i64*, i8*, i32, i32, i64*, i64*, i64* }
-%struct.b_t = type { i32, i32, i32, i32, i64, i64, i64, i64 }
-
-define void @t(i32 %cNum, i64 %max) nounwind optsize ssp noimplicitfloat {
-entry:
- %0 = load %struct.b_t** null, align 4 ; <%struct.b_t*> [#uses=1]
- %1 = getelementptr inbounds %struct.b_t* %0, i32 %cNum, i32 5 ; <i64*> [#uses=1]
- %2 = load i64* %1, align 4 ; <i64> [#uses=1]
- %3 = icmp ult i64 %2, %max ; <i1> [#uses=1]
- %4 = getelementptr inbounds %struct.a_t* null, i32 0, i32 7 ; <i64**> [#uses=1]
- %5 = load i64** %4, align 4 ; <i64*> [#uses=0]
- %6 = load i64* null, align 4 ; <i64> [#uses=1]
- br i1 %3, label %bb2, label %bb
-
-bb: ; preds = %entry
- br label %bb3
-
-bb2: ; preds = %entry
- %7 = or i64 %6, undef ; <i64> [#uses=1]
- br label %bb3
-
-bb3: ; preds = %bb2, %bb
- %misc_enables.0 = phi i64 [ undef, %bb ], [ %7, %bb2 ] ; <i64> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-11-NonTemporal.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-11-NonTemporal.ll
deleted file mode 100644
index 5789a0b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-11-NonTemporal.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-; CHECK: movnt
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-!0 = metadata !{ i32 1 }
-
-define void @sub_(i32* noalias %n) {
-"file movnt.f90, line 2, bb1":
- %n1 = alloca i32*, align 8
- %i = alloca i32, align 4
- %"$LCS_0" = alloca i64, align 8
- %"$LCS_S2" = alloca <2 x double>, align 16
- %r9 = load <2 x double>* %"$LCS_S2", align 8
- %r10 = load i64* %"$LCS_0", align 8
- %r11 = inttoptr i64 %r10 to <2 x double>*
- store <2 x double> %r9, <2 x double>* %r11, align 16, !nontemporal !0
- br label %"file movnt.f90, line 18, bb5"
-
-"file movnt.f90, line 18, bb5":
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
deleted file mode 100644
index c5d3d16..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
+++ /dev/null
@@ -1,260 +0,0 @@
-; RUN: llc < %s > %t
-; PR6283
-
-; Tricky coalescer bug:
-; After coalescing %RAX with a virtual register, this instruction was rematted:
-;
-; %EAX<def> = MOV32rr %reg1070<kill>
-;
-; This instruction silently defined %RAX, and when rematting removed the
-; instruction, the live interval for %RAX was not properly updated. The valno
-; referred to a deleted instruction and bad things happened.
-;
-; The fix is to implicitly define %RAX when coalescing:
-;
-; %EAX<def> = MOV32rr %reg1070<kill>, %RAX<imp-def>
-;
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-unknown-linux-gnu"
-
-module asm "\09.ident\09\22GCC: (GNU) 4.5.0 20100212 (experimental) LLVM: 95975\22"
-
-%0 = type { %"union gimple_statement_d"* }
-%"BITMAP_WORD[]" = type [2 x i64]
-%"char[]" = type [4 x i8]
-%"enum dom_state[]" = type [2 x i32]
-%"int[]" = type [4 x i32]
-%"struct VEC_basic_block_base" = type { i32, i32, [1 x %"struct basic_block_def"*] }
-%"struct VEC_basic_block_gc" = type { %"struct VEC_basic_block_base" }
-%"struct VEC_edge_base" = type { i32, i32, [1 x %"struct edge_def"*] }
-%"struct VEC_edge_gc" = type { %"struct VEC_edge_base" }
-%"struct VEC_gimple_base" = type { i32, i32, [1 x %"union gimple_statement_d"*] }
-%"struct VEC_gimple_gc" = type { %"struct VEC_gimple_base" }
-%"struct VEC_iv_cand_p_base" = type { i32, i32, [1 x %"struct iv_cand"*] }
-%"struct VEC_iv_cand_p_heap" = type { %"struct VEC_iv_cand_p_base" }
-%"struct VEC_iv_use_p_base" = type { i32, i32, [1 x %"struct iv_use"*] }
-%"struct VEC_iv_use_p_heap" = type { %"struct VEC_iv_use_p_base" }
-%"struct VEC_loop_p_base" = type { i32, i32, [1 x %"struct loop"*] }
-%"struct VEC_loop_p_gc" = type { %"struct VEC_loop_p_base" }
-%"struct VEC_rtx_base" = type { i32, i32, [1 x %"struct rtx_def"*] }
-%"struct VEC_rtx_gc" = type { %"struct VEC_rtx_base" }
-%"struct VEC_tree_base" = type { i32, i32, [1 x %"union tree_node"*] }
-%"struct VEC_tree_gc" = type { %"struct VEC_tree_base" }
-%"struct _obstack_chunk" = type { i8*, %"struct _obstack_chunk"*, %"char[]" }
-%"struct basic_block_def" = type { %"struct VEC_edge_gc"*, %"struct VEC_edge_gc"*, i8*, %"struct loop"*, [2 x %"struct et_node"*], %"struct basic_block_def"*, %"struct basic_block_def"*, %"union basic_block_il_dependent", i64, i32, i32, i32, i32, i32 }
-%"struct bitmap_element" = type { %"struct bitmap_element"*, %"struct bitmap_element"*, i32, %"BITMAP_WORD[]" }
-%"struct bitmap_head_def" = type { %"struct bitmap_element"*, %"struct bitmap_element"*, i32, %"struct bitmap_obstack"* }
-%"struct bitmap_obstack" = type { %"struct bitmap_element"*, %"struct bitmap_head_def"*, %"struct obstack" }
-%"struct block_symbol" = type { [3 x %"union rtunion"], %"struct object_block"*, i64 }
-%"struct comp_cost" = type { i32, i32 }
-%"struct control_flow_graph" = type { %"struct basic_block_def"*, %"struct basic_block_def"*, %"struct VEC_basic_block_gc"*, i32, i32, i32, %"struct VEC_basic_block_gc"*, i32, %"enum dom_state[]", %"enum dom_state[]", i32, i32 }
-%"struct cost_pair" = type { %"struct iv_cand"*, %"struct comp_cost", %"struct bitmap_head_def"*, %"union tree_node"* }
-%"struct def_optype_d" = type { %"struct def_optype_d"*, %"union tree_node"** }
-%"struct double_int" = type { i64, i64 }
-%"struct edge_def" = type { %"struct basic_block_def"*, %"struct basic_block_def"*, %"union edge_def_insns", i8*, %"union tree_node"*, i32, i32, i32, i32, i64 }
-%"struct eh_status" = type opaque
-%"struct et_node" = type opaque
-%"struct function" = type { %"struct eh_status"*, %"struct control_flow_graph"*, %"struct gimple_seq_d"*, %"struct gimple_df"*, %"struct loops"*, %"struct htab"*, %"union tree_node"*, %"union tree_node"*, %"union tree_node"*, %"union tree_node"*, %"struct machine_function"*, %"struct language_function"*, %"struct htab"*, i32, i32, i32, i32, i32, i32, i8*, i8, i8, i8, i8 }
-%"struct gimple_bb_info" = type { %"struct gimple_seq_d"*, %"struct gimple_seq_d"* }
-%"struct gimple_df" = type { %"struct htab"*, %"struct VEC_gimple_gc"*, %"struct VEC_tree_gc"*, %"union tree_node"*, %"struct pt_solution", %"struct pt_solution", %"struct pointer_map_t"*, %"union tree_node"*, %"struct htab"*, %"struct bitmap_head_def"*, i8, %"struct ssa_operands" }
-%"struct gimple_seq_d" = type { %"struct gimple_seq_node_d"*, %"struct gimple_seq_node_d"*, %"struct gimple_seq_d"* }
-%"struct gimple_seq_node_d" = type { %"union gimple_statement_d"*, %"struct gimple_seq_node_d"*, %"struct gimple_seq_node_d"* }
-%"struct gimple_statement_base" = type { i8, i8, i16, i32, i32, i32, %"struct basic_block_def"*, %"union tree_node"* }
-%"struct gimple_statement_phi" = type { %"struct gimple_statement_base", i32, i32, %"union tree_node"*, %"struct phi_arg_d[]" }
-%"struct htab" = type { i32 (i8*)*, i32 (i8*, i8*)*, void (i8*)*, i8**, i64, i64, i64, i32, i32, i8* (i64, i64)*, void (i8*)*, i8*, i8* (i8*, i64, i64)*, void (i8*, i8*)*, i32 }
-%"struct iv" = type { %"union tree_node"*, %"union tree_node"*, %"union tree_node"*, %"union tree_node"*, i8, i8, i32 }
-%"struct iv_cand" = type { i32, i8, i32, %"union gimple_statement_d"*, %"union tree_node"*, %"union tree_node"*, %"struct iv"*, i32, i32, %"struct iv_use"*, %"struct bitmap_head_def"* }
-%"struct iv_use" = type { i32, i32, %"struct iv"*, %"union gimple_statement_d"*, %"union tree_node"**, %"struct bitmap_head_def"*, i32, %"struct cost_pair"*, %"struct iv_cand"* }
-%"struct ivopts_data" = type { %"struct loop"*, %"struct pointer_map_t"*, i32, i32, %"struct version_info"*, %"struct bitmap_head_def"*, %"struct VEC_iv_use_p_heap"*, %"struct VEC_iv_cand_p_heap"*, %"struct bitmap_head_def"*, i32, i8, i8 }
-%"struct lang_decl" = type opaque
-%"struct language_function" = type opaque
-%"struct loop" = type { i32, i32, %"struct basic_block_def"*, %"struct basic_block_def"*, %"struct comp_cost", i32, i32, %"struct VEC_loop_p_gc"*, %"struct loop"*, %"struct loop"*, i8*, %"union tree_node"*, %"struct double_int", %"struct double_int", i8, i8, i32, %"struct nb_iter_bound"*, %"struct loop_exit"*, i8, %"union tree_node"* }
-%"struct loop_exit" = type { %"struct edge_def"*, %"struct loop_exit"*, %"struct loop_exit"*, %"struct loop_exit"* }
-%"struct loops" = type { i32, %"struct VEC_loop_p_gc"*, %"struct htab"*, %"struct loop"* }
-%"struct machine_cfa_state" = type { %"struct rtx_def"*, i64 }
-%"struct machine_function" = type { %"struct stack_local_entry"*, i8*, i32, i32, %"int[]", i32, %"struct machine_cfa_state", i32, i8 }
-%"struct nb_iter_bound" = type { %"union gimple_statement_d"*, %"struct double_int", i8, %"struct nb_iter_bound"* }
-%"struct object_block" = type { %"union section"*, i32, i64, %"struct VEC_rtx_gc"*, %"struct VEC_rtx_gc"* }
-%"struct obstack" = type { i64, %"struct _obstack_chunk"*, i8*, i8*, i8*, i64, i32, %"struct _obstack_chunk"* (i8*, i64)*, void (i8*, %"struct _obstack_chunk"*)*, i8*, i8 }
-%"struct phi_arg_d" = type { %"struct ssa_use_operand_d", %"union tree_node"*, i32 }
-%"struct phi_arg_d[]" = type [1 x %"struct phi_arg_d"]
-%"struct pointer_map_t" = type opaque
-%"struct pt_solution" = type { i8, %"struct bitmap_head_def"* }
-%"struct rtx_def" = type { i16, i8, i8, %"union u" }
-%"struct section_common" = type { i32 }
-%"struct ssa_operand_memory_d" = type { %"struct ssa_operand_memory_d"*, %"uchar[]" }
-%"struct ssa_operands" = type { %"struct ssa_operand_memory_d"*, i32, i32, i8, %"struct def_optype_d"*, %"struct use_optype_d"* }
-%"struct ssa_use_operand_d" = type { %"struct ssa_use_operand_d"*, %"struct ssa_use_operand_d"*, %0, %"union tree_node"** }
-%"struct stack_local_entry" = type opaque
-%"struct tree_base" = type <{ i16, i8, i8, i8, [2 x i8], i8 }>
-%"struct tree_common" = type { %"struct tree_base", %"union tree_node"*, %"union tree_node"* }
-%"struct tree_decl_common" = type { %"struct tree_decl_minimal", %"union tree_node"*, i8, i8, i8, i8, i8, i32, %"union tree_node"*, %"union tree_node"*, %"union tree_node"*, %"union tree_node"*, %"struct lang_decl"* }
-%"struct tree_decl_minimal" = type { %"struct tree_common", i32, i32, %"union tree_node"*, %"union tree_node"* }
-%"struct tree_decl_non_common" = type { %"struct tree_decl_with_vis", %"union tree_node"*, %"union tree_node"*, %"union tree_node"*, %"union tree_node"* }
-%"struct tree_decl_with_rtl" = type { %"struct tree_decl_common", %"struct rtx_def"* }
-%"struct tree_decl_with_vis" = type { %"struct tree_decl_with_rtl", %"union tree_node"*, %"union tree_node"*, %"union tree_node"*, i8, i8, i8 }
-%"struct tree_function_decl" = type { %"struct tree_decl_non_common", %"struct function"*, %"union tree_node"*, %"union tree_node"*, %"union tree_node"*, i16, i8, i8 }
-%"struct unnamed_section" = type { %"struct section_common", void (i8*)*, i8*, %"union section"* }
-%"struct use_optype_d" = type { %"struct use_optype_d"*, %"struct ssa_use_operand_d" }
-%"struct version_info" = type { %"union tree_node"*, %"struct iv"*, i8, i32, i8 }
-%"uchar[]" = type [1 x i8]
-%"union basic_block_il_dependent" = type { %"struct gimple_bb_info"* }
-%"union edge_def_insns" = type { %"struct gimple_seq_d"* }
-%"union gimple_statement_d" = type { %"struct gimple_statement_phi" }
-%"union rtunion" = type { i8* }
-%"union section" = type { %"struct unnamed_section" }
-%"union tree_node" = type { %"struct tree_function_decl" }
-%"union u" = type { %"struct block_symbol" }
-
-declare fastcc %"union tree_node"* @get_computation_at(%"struct loop"*, %"struct iv_use"* nocapture, %"struct iv_cand"* nocapture, %"union gimple_statement_d"*) nounwind
-
-declare fastcc i32 @computation_cost(%"union tree_node"*, i8 zeroext) nounwind
-
-define fastcc i64 @get_computation_cost_at(%"struct ivopts_data"* %data, %"struct iv_use"* nocapture %use, %"struct iv_cand"* nocapture %cand, i8 zeroext %address_p, %"struct bitmap_head_def"** %depends_on, %"union gimple_statement_d"* %at, i8* %can_autoinc) nounwind {
-entry:
- br i1 undef, label %"100", label %"4"
-
-"4": ; preds = %entry
- br i1 undef, label %"6", label %"5"
-
-"5": ; preds = %"4"
- unreachable
-
-"6": ; preds = %"4"
- br i1 undef, label %"8", label %"7"
-
-"7": ; preds = %"6"
- unreachable
-
-"8": ; preds = %"6"
- br i1 undef, label %"100", label %"10"
-
-"10": ; preds = %"8"
- br i1 undef, label %"17", label %"16"
-
-"16": ; preds = %"10"
- unreachable
-
-"17": ; preds = %"10"
- br i1 undef, label %"19", label %"18"
-
-"18": ; preds = %"17"
- unreachable
-
-"19": ; preds = %"17"
- br i1 undef, label %"93", label %"20"
-
-"20": ; preds = %"19"
- br i1 undef, label %"23", label %"21"
-
-"21": ; preds = %"20"
- unreachable
-
-"23": ; preds = %"20"
- br i1 undef, label %"100", label %"25"
-
-"25": ; preds = %"23"
- br i1 undef, label %"100", label %"26"
-
-"26": ; preds = %"25"
- br i1 undef, label %"30", label %"28"
-
-"28": ; preds = %"26"
- unreachable
-
-"30": ; preds = %"26"
- br i1 undef, label %"59", label %"51"
-
-"51": ; preds = %"30"
- br i1 undef, label %"55", label %"52"
-
-"52": ; preds = %"51"
- unreachable
-
-"55": ; preds = %"51"
- %0 = icmp ugt i32 0, undef ; <i1> [#uses=1]
- br i1 %0, label %"50.i", label %"9.i"
-
-"9.i": ; preds = %"55"
- unreachable
-
-"50.i": ; preds = %"55"
- br i1 undef, label %"55.i", label %"54.i"
-
-"54.i": ; preds = %"50.i"
- br i1 undef, label %"57.i", label %"55.i"
-
-"55.i": ; preds = %"54.i", %"50.i"
- unreachable
-
-"57.i": ; preds = %"54.i"
- br label %"63.i"
-
-"61.i": ; preds = %"63.i"
- br i1 undef, label %"64.i", label %"62.i"
-
-"62.i": ; preds = %"61.i"
- br label %"63.i"
-
-"63.i": ; preds = %"62.i", %"57.i"
- br i1 undef, label %"61.i", label %"64.i"
-
-"64.i": ; preds = %"63.i", %"61.i"
- unreachable
-
-"59": ; preds = %"30"
- br i1 undef, label %"60", label %"82"
-
-"60": ; preds = %"59"
- br i1 undef, label %"61", label %"82"
-
-"61": ; preds = %"60"
- br i1 undef, label %"62", label %"82"
-
-"62": ; preds = %"61"
- br i1 undef, label %"100", label %"63"
-
-"63": ; preds = %"62"
- br i1 undef, label %"65", label %"64"
-
-"64": ; preds = %"63"
- unreachable
-
-"65": ; preds = %"63"
- br i1 undef, label %"66", label %"67"
-
-"66": ; preds = %"65"
- unreachable
-
-"67": ; preds = %"65"
- %1 = load i32* undef, align 4 ; <i32> [#uses=0]
- br label %"100"
-
-"82": ; preds = %"61", %"60", %"59"
- unreachable
-
-"93": ; preds = %"19"
- %2 = call fastcc %"union tree_node"* @get_computation_at(%"struct loop"* undef, %"struct iv_use"* %use, %"struct iv_cand"* %cand, %"union gimple_statement_d"* %at) nounwind ; <%"union tree_node"*> [#uses=1]
- br i1 undef, label %"100", label %"97"
-
-"97": ; preds = %"93"
- br i1 undef, label %"99", label %"98"
-
-"98": ; preds = %"97"
- br label %"99"
-
-"99": ; preds = %"98", %"97"
- %3 = phi %"union tree_node"* [ undef, %"98" ], [ %2, %"97" ] ; <%"union tree_node"*> [#uses=1]
- %4 = call fastcc i32 @computation_cost(%"union tree_node"* %3, i8 zeroext undef) nounwind ; <i32> [#uses=1]
- br label %"100"
-
-"100": ; preds = %"99", %"93", %"67", %"62", %"25", %"23", %"8", %entry
- %memtmp1.1.0 = phi i32 [ 0, %"99" ], [ 10000000, %entry ], [ 10000000, %"8" ], [ 10000000, %"23" ], [ 10000000, %"25" ], [ undef, %"62" ], [ undef, %"67" ], [ 10000000, %"93" ] ; <i32> [#uses=1]
- %memtmp1.0.0 = phi i32 [ %4, %"99" ], [ 10000000, %entry ], [ 10000000, %"8" ], [ 10000000, %"23" ], [ 10000000, %"25" ], [ undef, %"62" ], [ undef, %"67" ], [ 10000000, %"93" ] ; <i32> [#uses=1]
- %5 = zext i32 %memtmp1.0.0 to i64 ; <i64> [#uses=1]
- %6 = zext i32 %memtmp1.1.0 to i64 ; <i64> [#uses=1]
- %7 = shl i64 %6, 32 ; <i64> [#uses=1]
- %8 = or i64 %7, %5 ; <i64> [#uses=1]
- ret i64 %8
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-15-ImplicitDefBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-15-ImplicitDefBug.ll
deleted file mode 100644
index c429172..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-15-ImplicitDefBug.ll
+++ /dev/null
@@ -1,80 +0,0 @@
-; RUN: llc < %s > %t
-; PR6300
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
-target triple = "i386-pc-linux-gnu"
-
-; When the "154" loops back onto itself, it defines a register after using it.
-; The first value of the register is implicit-def.
-
-%"struct location_chain_def" = type { %"struct location_chain_def"*, %"struct rtx_def"*, %"struct rtx_def"*, i32 }
-%"struct real_value" = type { i32, [5 x i32] }
-%"struct rtx_def" = type { i16, i8, i8, %"union u" }
-%"union u" = type { %"struct real_value" }
-
-define i32 @variable_union(i8** nocapture %slot, i8* nocapture %data) nounwind {
-entry:
- br i1 undef, label %"4.thread", label %"3"
-
-"4.thread": ; preds = %entry
- unreachable
-
-"3": ; preds = %entry
- br i1 undef, label %"19", label %"20"
-
-"19": ; preds = %"3"
- unreachable
-
-"20": ; preds = %"3"
- br i1 undef, label %"56.preheader", label %dv_onepart_p.exit
-
-dv_onepart_p.exit: ; preds = %"20"
- unreachable
-
-"56.preheader": ; preds = %"20"
- br label %"56"
-
-"50": ; preds = %"57"
- br label %"56"
-
-"56": ; preds = %"50", %"56.preheader"
- br i1 undef, label %"57", label %"58"
-
-"57": ; preds = %"56"
- br i1 undef, label %"50", label %"58"
-
-"58": ; preds = %"57", %"56"
- br i1 undef, label %"62", label %"63"
-
-"62": ; preds = %"58"
- unreachable
-
-"63": ; preds = %"58"
- br i1 undef, label %"67", label %"66"
-
-"66": ; preds = %"63"
- br label %"67"
-
-"67": ; preds = %"66", %"63"
- br label %"68"
-
-"68": ; preds = %"161", %"67"
- br i1 undef, label %"153", label %"161"
-
-"153": ; preds = %"68"
- br i1 undef, label %"160", label %bb.nph46
-
-bb.nph46: ; preds = %"153"
- br label %"154"
-
-"154": ; preds = %"154", %bb.nph46
- %0 = phi %"struct location_chain_def"** [ undef, %bb.nph46 ], [ %1, %"154" ] ; <%"struct location_chain_def"**> [#uses=1]
- %1 = bitcast i8* undef to %"struct location_chain_def"** ; <%"struct location_chain_def"**> [#uses=1]
- store %"struct location_chain_def"* undef, %"struct location_chain_def"** %0, align 4
- br i1 undef, label %"160", label %"154"
-
-"160": ; preds = %"154", %"153"
- br label %"161"
-
-"161": ; preds = %"160", %"68"
- br label %"68"
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
deleted file mode 100644
index eb21dc2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc -mtriple=i386-apple-darwin -tailcallopt < %s | FileCheck %s
-; Check that lowered argumens do not overwrite the return address before it is moved.
-; Bug 6225
-;
-; If a call is a fastcc tail call and tail call optimization is enabled, the
-; caller frame is replaced by the callee frame. This can require that arguments are
-; placed on the former return address stack slot. Special care needs to be taken
-; taken that the return address is moved / or stored in a register before
-; lowering of arguments potentially overwrites the value.
-;
-; Move return address (76(%esp)) to a temporary register (%ebp)
-; CHECK: movl 76(%esp), %ebp
-; Overwrite return addresss
-; CHECK: movl %ecx, 76(%esp)
-; Move return address from temporary register (%ebp) to new stack location (60(%esp))
-; CHECK: movl %ebp, 60(%esp)
-
-%tupl_p = type [9 x i32]*
-
-declare fastcc void @l297(i32 %r10, i32 %r9, i32 %r8, i32 %r7, i32 %r6, i32 %r5, i32 %r3, i32 %r2) noreturn nounwind
-declare fastcc void @l298(i32 %r10, i32 %r9, i32 %r4) noreturn nounwind
-
-define fastcc void @l186(%tupl_p %r1) noreturn nounwind {
-entry:
- %ptr1 = getelementptr %tupl_p %r1, i32 0, i32 0
- %r2 = load i32* %ptr1
- %ptr3 = getelementptr %tupl_p %r1, i32 0, i32 1
- %r3 = load i32* %ptr3
- %ptr5 = getelementptr %tupl_p %r1, i32 0, i32 2
- %r4 = load i32* %ptr5
- %ptr7 = getelementptr %tupl_p %r1, i32 0, i32 3
- %r5 = load i32* %ptr7
- %ptr9 = getelementptr %tupl_p %r1, i32 0, i32 4
- %r6 = load i32* %ptr9
- %ptr11 = getelementptr %tupl_p %r1, i32 0, i32 5
- %r7 = load i32* %ptr11
- %ptr13 = getelementptr %tupl_p %r1, i32 0, i32 6
- %r8 = load i32* %ptr13
- %ptr15 = getelementptr %tupl_p %r1, i32 0, i32 7
- %r9 = load i32* %ptr15
- %ptr17 = getelementptr %tupl_p %r1, i32 0, i32 8
- %r10 = load i32* %ptr17
- %cond = icmp eq i32 %r10, 3
- br i1 %cond, label %true, label %false
-
-true:
- tail call fastcc void @l297(i32 %r10, i32 %r9, i32 %r8, i32 %r7, i32 %r6, i32 %r5, i32 %r3, i32 %r2) noreturn nounwind
- ret void
-
-false:
- tail call fastcc void @l298(i32 %r10, i32 %r9, i32 %r4) noreturn nounwind
- ret void
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-DAGCombineBug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-DAGCombineBug.ll
deleted file mode 100644
index 6a58e9e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-DAGCombineBug.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
-
-define i32* @t() nounwind optsize ssp {
-entry:
-; CHECK: t:
-; CHECK: testl %eax, %eax
-; CHECK: js
- %cmp = icmp slt i32 undef, 0 ; <i1> [#uses=1]
- %outsearch.0 = select i1 %cmp, i1 false, i1 true ; <i1> [#uses=1]
- br i1 %outsearch.0, label %if.then27, label %if.else29
-
-if.then27: ; preds = %entry
- ret i32* undef
-
-if.else29: ; preds = %entry
- unreachable
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-DIV8rDefinesAX.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-DIV8rDefinesAX.ll
deleted file mode 100644
index 8543c80..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-DIV8rDefinesAX.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s
-; PR6374
-;
-; This test produces a DIV8r instruction and uses %AX instead of %AH and %AL.
-; The DIV8r must have the right imp-defs for that to work.
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin10.0.0"
-
-%struct._i386_state = type { %union.anon }
-%union.anon = type { [0 x i8] }
-
-define void @i386_aam(%struct._i386_state* nocapture %cpustate) nounwind ssp {
-entry:
- %call = tail call fastcc signext i8 @FETCH() ; <i8> [#uses=1]
- %rem = urem i8 0, %call ; <i8> [#uses=1]
- store i8 %rem, i8* undef
- ret void
-}
-
-declare fastcc signext i8 @FETCH() nounwind readnone ssp
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
deleted file mode 100644
index 4a26ba0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s
-; PR6372
-;
-; This test produces a move instruction with an implicitly defined super-register:
-;
-; %DL<def> = MOV8rr %reg1038<kill>, %RDX<imp-def>
-;
-; When %DL is rematerialized, we must remember to update live intervals for
-; sub-registers %DX and %EDX.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin10.0.0"
-
-define noalias i8* @foo() nounwind ssp {
-entry:
- br i1 undef, label %for.end, label %for.body
-
-for.body: ; preds = %if.end40, %entry
- %tmp6 = load i8* undef, align 2 ; <i8> [#uses=3]
- %conv11 = sext i8 %tmp6 to i64 ; <i64> [#uses=1]
- %cmp15 = icmp slt i64 %conv11, undef ; <i1> [#uses=1]
- br i1 %cmp15, label %if.end, label %if.then
-
-if.then: ; preds = %for.body
- %conv18 = sext i8 %tmp6 to i32 ; <i32> [#uses=1]
- %call = tail call i32 (...)* @invalid(i32 0, i32 0, i32 %conv18) nounwind ; <i32> [#uses=0]
- br label %if.end
-
-if.end: ; preds = %if.then, %for.body
- %index.0 = phi i8 [ 0, %if.then ], [ %tmp6, %for.body ] ; <i8> [#uses=1]
- store i8 %index.0, i8* undef
- %tmp24 = load i8* undef ; <i8> [#uses=2]
- br i1 undef, label %if.end40, label %if.then36
-
-if.then36: ; preds = %if.end
- %conv38 = sext i8 %tmp24 to i32 ; <i32> [#uses=1]
- %call39 = tail call i32 (...)* @invalid(i32 0, i32 0, i32 %conv38) nounwind ; <i32> [#uses=0]
- br label %if.end40
-
-if.end40: ; preds = %if.then36, %if.end
- %index.1 = phi i8 [ 0, %if.then36 ], [ %tmp24, %if.end ] ; <i8> [#uses=1]
- store i8 %index.1, i8* undef
- br i1 false, label %for.body, label %for.end
-
-for.end: ; preds = %if.end40, %entry
- ret i8* undef
-}
-
-declare i32 @invalid(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-SingleDefPhiJoin.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-SingleDefPhiJoin.ll
deleted file mode 100644
index aeed401..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-02-23-SingleDefPhiJoin.ll
+++ /dev/null
@@ -1,146 +0,0 @@
-; RUN: llc < %s
-; PR6363
-;
-; This test case creates a phi join register with a single definition. The other
-; predecessor blocks are implicit-def.
-;
-; If LiveIntervalAnalysis fails to recognize this as a phi join, the coalescer
-; will detect an infinity valno loop.
-;
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define i32 @decode(i8* nocapture %input, i32 %offset, i8* nocapture %output) nounwind {
-entry:
- br i1 undef, label %meshBB86, label %meshBB102
-
-bb: ; preds = %meshBB106, %meshBB102
- br i1 false, label %bb9, label %meshBB90
-
-bb.nph: ; preds = %meshBB90
- br label %meshBB114
-
-bb.nph.fragment: ; preds = %meshBB114
- br label %meshBB118
-
-bb1.fragment: ; preds = %meshBB118
- br i1 false, label %bb2, label %bb3
-
-bb2: ; preds = %bb1.fragment
- br label %meshBB74
-
-bb2.fragment15: ; preds = %meshBB74
- br label %meshBB98
-
-bb3: ; preds = %bb1.fragment
- br i1 undef, label %meshBB, label %meshBB102
-
-bb4: ; preds = %meshBB
- br label %meshBB118
-
-bb4.fragment: ; preds = %meshBB118
- br label %meshBB82
-
-bb5: ; preds = %meshBB102, %meshBB82
- br i1 false, label %bb6, label %bb7
-
-bb6: ; preds = %bb5
- br label %bb7
-
-bb7: ; preds = %meshBB98, %bb6, %bb5
- br label %meshBB114
-
-bb7.fragment: ; preds = %meshBB114
- br i1 undef, label %meshBB74, label %bb9
-
-bb9: ; preds = %bb7.fragment, %bb
- br label %bb1.i23
-
-bb1.i23: ; preds = %meshBB110, %bb9
- br i1 undef, label %meshBB106, label %meshBB110
-
-skip_to_newline.exit26: ; preds = %meshBB106
- br label %meshBB86
-
-skip_to_newline.exit26.fragment: ; preds = %meshBB86
- br i1 false, label %meshBB90, label %meshBB106
-
-bb11.fragment: ; preds = %meshBB90, %meshBB86
- br label %meshBB122
-
-bb1.i: ; preds = %meshBB122, %meshBB
- %ooffset.2.lcssa.phi.SV.phi203 = phi i32 [ 0, %meshBB122 ], [ %ooffset.2.lcssa.phi.SV.phi233, %meshBB ] ; <i32> [#uses=1]
- br label %meshBB98
-
-bb1.i.fragment: ; preds = %meshBB98
- br i1 undef, label %meshBB78, label %meshBB
-
-skip_to_newline.exit: ; preds = %meshBB78
- br i1 undef, label %bb12, label %meshBB110
-
-bb12: ; preds = %skip_to_newline.exit
- br label %meshBB94
-
-bb12.fragment: ; preds = %meshBB94
- br i1 false, label %bb13, label %meshBB78
-
-bb13: ; preds = %bb12.fragment
- br label %meshBB82
-
-bb13.fragment: ; preds = %meshBB82
- br i1 undef, label %meshBB94, label %meshBB122
-
-bb14: ; preds = %meshBB94
- ret i32 %ooffset.2.lcssa.phi.SV.phi250
-
-bb15: ; preds = %meshBB122, %meshBB110, %meshBB78
- unreachable
-
-meshBB: ; preds = %bb1.i.fragment, %bb3
- %ooffset.2.lcssa.phi.SV.phi233 = phi i32 [ undef, %bb3 ], [ %ooffset.2.lcssa.phi.SV.phi209, %bb1.i.fragment ] ; <i32> [#uses=1]
- br i1 undef, label %bb1.i, label %bb4
-
-meshBB74: ; preds = %bb7.fragment, %bb2
- br i1 false, label %meshBB118, label %bb2.fragment15
-
-meshBB78: ; preds = %bb12.fragment, %bb1.i.fragment
- %ooffset.2.lcssa.phi.SV.phi239 = phi i32 [ %ooffset.2.lcssa.phi.SV.phi209, %bb1.i.fragment ], [ %ooffset.2.lcssa.phi.SV.phi250, %bb12.fragment ] ; <i32> [#uses=1]
- br i1 false, label %bb15, label %skip_to_newline.exit
-
-meshBB82: ; preds = %bb13, %bb4.fragment
- br i1 false, label %bb5, label %bb13.fragment
-
-meshBB86: ; preds = %skip_to_newline.exit26, %entry
- br i1 undef, label %skip_to_newline.exit26.fragment, label %bb11.fragment
-
-meshBB90: ; preds = %skip_to_newline.exit26.fragment, %bb
- br i1 false, label %bb11.fragment, label %bb.nph
-
-meshBB94: ; preds = %bb13.fragment, %bb12
- %ooffset.2.lcssa.phi.SV.phi250 = phi i32 [ 0, %bb13.fragment ], [ %ooffset.2.lcssa.phi.SV.phi239, %bb12 ] ; <i32> [#uses=2]
- br i1 false, label %bb12.fragment, label %bb14
-
-meshBB98: ; preds = %bb1.i, %bb2.fragment15
- %ooffset.2.lcssa.phi.SV.phi209 = phi i32 [ undef, %bb2.fragment15 ], [ %ooffset.2.lcssa.phi.SV.phi203, %bb1.i ] ; <i32> [#uses=2]
- br i1 undef, label %bb1.i.fragment, label %bb7
-
-meshBB102: ; preds = %bb3, %entry
- br i1 undef, label %bb5, label %bb
-
-meshBB106: ; preds = %skip_to_newline.exit26.fragment, %bb1.i23
- br i1 undef, label %bb, label %skip_to_newline.exit26
-
-meshBB110: ; preds = %skip_to_newline.exit, %bb1.i23
- br i1 false, label %bb15, label %bb1.i23
-
-meshBB114: ; preds = %bb7, %bb.nph
- %meshStackVariable115.phi = phi i32 [ 19, %bb7 ], [ 8, %bb.nph ] ; <i32> [#uses=0]
- br i1 undef, label %bb.nph.fragment, label %bb7.fragment
-
-meshBB118: ; preds = %meshBB74, %bb4, %bb.nph.fragment
- %meshCmp121 = icmp eq i32 undef, 10 ; <i1> [#uses=1]
- br i1 %meshCmp121, label %bb4.fragment, label %bb1.fragment
-
-meshBB122: ; preds = %bb13.fragment, %bb11.fragment
- br i1 false, label %bb1.i, label %bb15
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-03-04-Mul8Bug.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-03-04-Mul8Bug.ll
deleted file mode 100644
index 48e75e9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-03-04-Mul8Bug.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s
-; PR6489
-;
-; This test case produces a MUL8 instruction and then tries to read the result
-; from the AX register instead of AH/AL. That confuses live interval analysis.
-;
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin10.0.0"
-
-define void @func_56(i64 %p_57, i32*** %p_58) nounwind ssp {
-for.end:
- %conv49 = trunc i32 undef to i8 ; <i8> [#uses=1]
- %div.i = udiv i8 %conv49, 5 ; <i8> [#uses=1]
- %conv51 = zext i8 %div.i to i32 ; <i32> [#uses=1]
- %call55 = call i32 @qux(i32 undef, i32 -2) nounwind ; <i32> [#uses=1]
- %rem.i = urem i32 %call55, -1 ; <i32> [#uses=1]
- %cmp57 = icmp uge i32 %conv51, %rem.i ; <i1> [#uses=1]
- %conv58 = zext i1 %cmp57 to i32 ; <i32> [#uses=1]
- %call85 = call i32 @func_35(i32*** undef, i32 undef, i32 %conv58, i32 1247, i32 0) nounwind ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @func_35(i32***, i32, i32, i32, i32)
-
-declare i32 @qux(i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-03-05-ConstantFoldCFG.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-03-05-ConstantFoldCFG.ll
deleted file mode 100644
index 5de1966..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-03-05-ConstantFoldCFG.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs
-;
-; When BRCOND is constant-folded to BR, make sure that PHI nodes don't get
-; spurious operands when the CFG is trimmed.
-;
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin10.2"
-
-define fastcc void @_ZSt16__introsort_loopIPdl17less_than_functorEvT_S2_T0_T1_(double* %__first, double* %__last, i64 %__depth_limit) nounwind ssp {
-entry:
- br i1 undef, label %bb1, label %bb2
-
-bb1: ; preds = %entry
- ret void
-
-bb2: ; preds = %entry
- br label %bb2.outer.i
-
-bb2.outer.i: ; preds = %bb9.i, %bb2
- br i1 undef, label %bb1.i, label %bb5.preheader.i
-
-bb1.i: ; preds = %bb1.i, %bb2.outer.i
- %indvar5.i = phi i64 [ %tmp, %bb1.i ], [ 0, %bb2.outer.i ] ; <i64> [#uses=1]
- %tmp = add i64 %indvar5.i, 1 ; <i64> [#uses=2]
- %scevgep.i = getelementptr double* undef, i64 %tmp ; <double*> [#uses=0]
- br i1 undef, label %bb1.i, label %bb5.preheader.i
-
-bb5.preheader.i: ; preds = %bb1.i, %bb2.outer.i
- br label %bb5.i
-
-bb5.i: ; preds = %bb5.i, %bb5.preheader.i
- br i1 undef, label %bb5.i, label %bb7.i6
-
-bb7.i6: ; preds = %bb5.i
- br i1 undef, label %bb9.i, label %_ZSt21__unguarded_partitionIPdd17less_than_functorET_S2_S2_T0_T1_.exit
-
-bb9.i: ; preds = %bb7.i6
- br label %bb2.outer.i
-
-_ZSt21__unguarded_partitionIPdd17less_than_functorET_S2_S2_T0_T1_.exit: ; preds = %bb7.i6
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/2010-03-05-EFLAGS-Redef.ll b/libclamav/c++/llvm/test/CodeGen/X86/2010-03-05-EFLAGS-Redef.ll
deleted file mode 100644
index 3cca10e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/2010-03-05-EFLAGS-Redef.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs
-;
-; This test case is transformed into a single basic block by the machine
-; branch folding pass. That makes a complete mess of the %EFLAGS liveness, but
-; we don't care about liveness this late anyway.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin10.2"
-
-define i32 @main(i32 %argc, i8** nocapture %argv) ssp {
-entry:
- br i1 undef, label %bb, label %bb2
-
-bb: ; preds = %entry
- br label %bb2
-
-bb2: ; preds = %bb, %entry
- br i1 undef, label %bb3, label %bb5
-
-bb3: ; preds = %bb2
- br label %bb5
-
-bb5: ; preds = %bb3, %bb2
- br i1 undef, label %bb.nph239, label %bb8
-
-bb.nph239: ; preds = %bb5
- unreachable
-
-bb8: ; preds = %bb5
- br i1 undef, label %bb.nph237, label %bb47
-
-bb.nph237: ; preds = %bb8
- unreachable
-
-bb47: ; preds = %bb8
- br i1 undef, label %bb49, label %bb48
-
-bb48: ; preds = %bb47
- unreachable
-
-bb49: ; preds = %bb47
- br i1 undef, label %bb51, label %bb50
-
-bb50: ; preds = %bb49
- ret i32 0
-
-bb51: ; preds = %bb49
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/3addr-16bit.ll b/libclamav/c++/llvm/test/CodeGen/X86/3addr-16bit.ll
deleted file mode 100644
index c51247a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/3addr-16bit.ll
+++ /dev/null
@@ -1,95 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -asm-verbose=false | FileCheck %s -check-prefix=64BIT
-; rdar://7329206
-
-; In 32-bit the partial register stall would degrade performance.
-
-define zeroext i16 @t1(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
-entry:
-; 32BIT: t1:
-; 32BIT: movw 20(%esp), %ax
-; 32BIT-NOT: movw %ax, %cx
-; 32BIT: leal 1(%eax), %ecx
-
-; 64BIT: t1:
-; 64BIT-NOT: movw %si, %ax
-; 64BIT: leal 1(%rsi), %eax
- %0 = icmp eq i16 %k, %c ; <i1> [#uses=1]
- %1 = add i16 %k, 1 ; <i16> [#uses=3]
- br i1 %0, label %bb, label %bb1
-
-bb: ; preds = %entry
- tail call void @foo(i16 zeroext %1) nounwind
- ret i16 %1
-
-bb1: ; preds = %entry
- ret i16 %1
-}
-
-define zeroext i16 @t2(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
-entry:
-; 32BIT: t2:
-; 32BIT: movw 20(%esp), %ax
-; 32BIT-NOT: movw %ax, %cx
-; 32BIT: leal -1(%eax), %ecx
-
-; 64BIT: t2:
-; 64BIT-NOT: movw %si, %ax
-; 64BIT: leal -1(%rsi), %eax
- %0 = icmp eq i16 %k, %c ; <i1> [#uses=1]
- %1 = add i16 %k, -1 ; <i16> [#uses=3]
- br i1 %0, label %bb, label %bb1
-
-bb: ; preds = %entry
- tail call void @foo(i16 zeroext %1) nounwind
- ret i16 %1
-
-bb1: ; preds = %entry
- ret i16 %1
-}
-
-declare void @foo(i16 zeroext)
-
-define zeroext i16 @t3(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
-entry:
-; 32BIT: t3:
-; 32BIT: movw 20(%esp), %ax
-; 32BIT-NOT: movw %ax, %cx
-; 32BIT: leal 2(%eax), %ecx
-
-; 64BIT: t3:
-; 64BIT-NOT: movw %si, %ax
-; 64BIT: leal 2(%rsi), %eax
- %0 = add i16 %k, 2 ; <i16> [#uses=3]
- %1 = icmp eq i16 %k, %c ; <i1> [#uses=1]
- br i1 %1, label %bb, label %bb1
-
-bb: ; preds = %entry
- tail call void @foo(i16 zeroext %0) nounwind
- ret i16 %0
-
-bb1: ; preds = %entry
- ret i16 %0
-}
-
-define zeroext i16 @t4(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
-entry:
-; 32BIT: t4:
-; 32BIT: movw 16(%esp), %ax
-; 32BIT: movw 20(%esp), %cx
-; 32BIT-NOT: movw %cx, %dx
-; 32BIT: leal (%ecx,%eax), %edx
-
-; 64BIT: t4:
-; 64BIT-NOT: movw %si, %ax
-; 64BIT: leal (%rsi,%rdi), %eax
- %0 = add i16 %k, %c ; <i16> [#uses=3]
- %1 = icmp eq i16 %k, %c ; <i1> [#uses=1]
- br i1 %1, label %bb, label %bb1
-
-bb: ; preds = %entry
- tail call void @foo(i16 zeroext %0) nounwind
- ret i16 %0
-
-bb1: ; preds = %entry
- ret i16 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/3addr-or.ll b/libclamav/c++/llvm/test/CodeGen/X86/3addr-or.ll
deleted file mode 100644
index 30a1f36..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/3addr-or.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
-; rdar://7527734
-
-define i32 @test(i32 %x) nounwind readnone ssp {
-entry:
-; CHECK: test:
-; CHECK: leal 3(%rdi), %eax
- %0 = shl i32 %x, 5 ; <i32> [#uses=1]
- %1 = or i32 %0, 3 ; <i32> [#uses=1]
- ret i32 %1
-}
-
-define i64 @test2(i8 %A, i8 %B) nounwind {
-; CHECK: test2:
-; CHECK: shrq $4
-; CHECK-NOT: movq
-; CHECK-NOT: orq
-; CHECK: leaq
-; CHECK: ret
- %C = zext i8 %A to i64 ; <i64> [#uses=1]
- %D = shl i64 %C, 4 ; <i64> [#uses=1]
- %E = and i64 %D, 48 ; <i64> [#uses=1]
- %F = zext i8 %B to i64 ; <i64> [#uses=1]
- %G = lshr i64 %F, 4 ; <i64> [#uses=1]
- %H = or i64 %G, %E ; <i64> [#uses=1]
- ret i64 %H
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/Atomics-32.ll b/libclamav/c++/llvm/test/CodeGen/X86/Atomics-32.ll
deleted file mode 100644
index 0e9b73e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/Atomics-32.ll
+++ /dev/null
@@ -1,818 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-;; Note the 64-bit variants are not supported yet (in 32-bit mode).
-; ModuleID = 'Atomics.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
- at sc = common global i8 0 ; <i8*> [#uses=52]
- at uc = common global i8 0 ; <i8*> [#uses=100]
- at ss = common global i16 0 ; <i16*> [#uses=15]
- at us = common global i16 0 ; <i16*> [#uses=15]
- at si = common global i32 0 ; <i32*> [#uses=15]
- at ui = common global i32 0 ; <i32*> [#uses=23]
- at sl = common global i32 0 ; <i32*> [#uses=15]
- at ul = common global i32 0 ; <i32*> [#uses=15]
-
-define void @test_op_ignore() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:14 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:15 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; <i16>:17 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; <i16>:19 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; <i32>:21 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; <i32>:23 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:28 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:29 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; <i16>:31 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; <i16>:33 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; <i32>:35 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; <i32>:37 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 1 ) ; <i32>:39 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 1 ) ; <i32>:41 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:42 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:43 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; <i16>:45 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; <i16>:47 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; <i32>:49 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; <i32>:51 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 1 ) ; <i32>:53 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 1 ) ; <i32>:55 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:56 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:57 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; <i16>:61 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; <i32>:65 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 1 ) ; <i32>:67 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 1 ) ; <i32>:69 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:70 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:71 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; <i16>:73 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; <i32>:77 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 1 ) ; <i32>:83 [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
-define void @test_fetch_and_op() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 11 ) ; <i32>:11 [#uses=1]
- store i32 %11, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 11 ) ; <i32>:13 [#uses=1]
- store i32 %13, i32* @ul, align 4
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:14 [#uses=1]
- store i8 %14, i8* @sc, align 1
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:15 [#uses=1]
- store i8 %15, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:16 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; <i16>:17 [#uses=1]
- store i16 %17, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; <i16>:19 [#uses=1]
- store i16 %19, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; <i32>:21 [#uses=1]
- store i32 %21, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:22 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; <i32>:23 [#uses=1]
- store i32 %23, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1]
- store i32 %25, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1]
- store i32 %27, i32* @ul, align 4
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:28 [#uses=1]
- store i8 %28, i8* @sc, align 1
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:29 [#uses=1]
- store i8 %29, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:30 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:32 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; <i16>:33 [#uses=1]
- store i16 %33, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; <i32>:35 [#uses=1]
- store i32 %35, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:38 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 11 ) ; <i32>:39 [#uses=1]
- store i32 %39, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:40 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 11 ) ; <i32>:41 [#uses=1]
- store i32 %41, i32* @ul, align 4
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:42 [#uses=1]
- store i8 %42, i8* @sc, align 1
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:43 [#uses=1]
- store i8 %43, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:44 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; <i16>:45 [#uses=1]
- store i16 %45, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:46 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; <i16>:47 [#uses=1]
- store i16 %47, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; <i32>:49 [#uses=1]
- store i32 %49, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:50 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; <i32>:51 [#uses=1]
- store i32 %51, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:52 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 11 ) ; <i32>:53 [#uses=1]
- store i32 %53, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 11 ) ; <i32>:55 [#uses=1]
- store i32 %55, i32* @ul, align 4
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:56 [#uses=1]
- store i8 %56, i8* @sc, align 1
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:57 [#uses=1]
- store i8 %57, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1]
- store i16 %59, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; <i16>:61 [#uses=1]
- store i16 %61, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1]
- store i32 %63, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:64 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; <i32>:65 [#uses=1]
- store i32 %65, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 11 ) ; <i32>:67 [#uses=1]
- store i32 %67, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:68 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 11 ) ; <i32>:69 [#uses=1]
- store i32 %69, i32* @ul, align 4
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:70 [#uses=1]
- store i8 %70, i8* @sc, align 1
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:71 [#uses=1]
- store i8 %71, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:72 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; <i16>:73 [#uses=1]
- store i16 %73, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1]
- store i16 %75, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; <i32>:77 [#uses=1]
- store i32 %77, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1]
- store i32 %79, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1]
- store i32 %81, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:82 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 11 ) ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ul, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_op_and_fetch() nounwind {
-entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=1]
- zext i8 %0 to i32 ; <i32>:1 [#uses=1]
- trunc i32 %1 to i8 ; <i8>:2 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; <i8>:3 [#uses=1]
- add i8 %3, %2 ; <i8>:4 [#uses=1]
- store i8 %4, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:5 [#uses=1]
- zext i8 %5 to i32 ; <i32>:6 [#uses=1]
- trunc i32 %6 to i8 ; <i8>:7 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; <i8>:8 [#uses=1]
- add i8 %8, %7 ; <i8>:9 [#uses=1]
- store i8 %9, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:10 [#uses=1]
- zext i8 %10 to i32 ; <i32>:11 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:12 [#uses=1]
- trunc i32 %11 to i16 ; <i16>:13 [#uses=2]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; <i16>:14 [#uses=1]
- add i16 %14, %13 ; <i16>:15 [#uses=1]
- store i16 %15, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:16 [#uses=1]
- zext i8 %16 to i32 ; <i32>:17 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- trunc i32 %17 to i16 ; <i16>:19 [#uses=2]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; <i16>:20 [#uses=1]
- add i16 %20, %19 ; <i16>:21 [#uses=1]
- store i16 %21, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:22 [#uses=1]
- zext i8 %22 to i32 ; <i32>:23 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; <i32>:25 [#uses=1]
- add i32 %25, %23 ; <i32>:26 [#uses=1]
- store i32 %26, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:27 [#uses=1]
- zext i8 %27 to i32 ; <i32>:28 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:29 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; <i32>:30 [#uses=1]
- add i32 %30, %28 ; <i32>:31 [#uses=1]
- store i32 %31, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:32 [#uses=1]
- zext i8 %32 to i32 ; <i32>:33 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:34 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %34, i32 %33 ) ; <i32>:35 [#uses=1]
- add i32 %35, %33 ; <i32>:36 [#uses=1]
- store i32 %36, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:37 [#uses=1]
- zext i8 %37 to i32 ; <i32>:38 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:39 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %39, i32 %38 ) ; <i32>:40 [#uses=1]
- add i32 %40, %38 ; <i32>:41 [#uses=1]
- store i32 %41, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:42 [#uses=1]
- zext i8 %42 to i32 ; <i32>:43 [#uses=1]
- trunc i32 %43 to i8 ; <i8>:44 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %44 ) ; <i8>:45 [#uses=1]
- sub i8 %45, %44 ; <i8>:46 [#uses=1]
- store i8 %46, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:47 [#uses=1]
- zext i8 %47 to i32 ; <i32>:48 [#uses=1]
- trunc i32 %48 to i8 ; <i8>:49 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %49 ) ; <i8>:50 [#uses=1]
- sub i8 %50, %49 ; <i8>:51 [#uses=1]
- store i8 %51, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:54 [#uses=1]
- trunc i32 %53 to i16 ; <i16>:55 [#uses=2]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %54, i16 %55 ) ; <i16>:56 [#uses=1]
- sub i16 %56, %55 ; <i16>:57 [#uses=1]
- store i16 %57, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:58 [#uses=1]
- zext i8 %58 to i32 ; <i32>:59 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:60 [#uses=1]
- trunc i32 %59 to i16 ; <i16>:61 [#uses=2]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %60, i16 %61 ) ; <i16>:62 [#uses=1]
- sub i16 %62, %61 ; <i16>:63 [#uses=1]
- store i16 %63, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:64 [#uses=1]
- zext i8 %64 to i32 ; <i32>:65 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:66 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %66, i32 %65 ) ; <i32>:67 [#uses=1]
- sub i32 %67, %65 ; <i32>:68 [#uses=1]
- store i32 %68, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:69 [#uses=1]
- zext i8 %69 to i32 ; <i32>:70 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:71 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %71, i32 %70 ) ; <i32>:72 [#uses=1]
- sub i32 %72, %70 ; <i32>:73 [#uses=1]
- store i32 %73, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:74 [#uses=1]
- zext i8 %74 to i32 ; <i32>:75 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; <i32>:77 [#uses=1]
- sub i32 %77, %75 ; <i32>:78 [#uses=1]
- store i32 %78, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:79 [#uses=1]
- zext i8 %79 to i32 ; <i32>:80 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:81 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; <i32>:82 [#uses=1]
- sub i32 %82, %80 ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:84 [#uses=1]
- zext i8 %84 to i32 ; <i32>:85 [#uses=1]
- trunc i32 %85 to i8 ; <i8>:86 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %86 ) ; <i8>:87 [#uses=1]
- or i8 %87, %86 ; <i8>:88 [#uses=1]
- store i8 %88, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:89 [#uses=1]
- zext i8 %89 to i32 ; <i32>:90 [#uses=1]
- trunc i32 %90 to i8 ; <i8>:91 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %91 ) ; <i8>:92 [#uses=1]
- or i8 %92, %91 ; <i8>:93 [#uses=1]
- store i8 %93, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:94 [#uses=1]
- zext i8 %94 to i32 ; <i32>:95 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:96 [#uses=1]
- trunc i32 %95 to i16 ; <i16>:97 [#uses=2]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %96, i16 %97 ) ; <i16>:98 [#uses=1]
- or i16 %98, %97 ; <i16>:99 [#uses=1]
- store i16 %99, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:100 [#uses=1]
- zext i8 %100 to i32 ; <i32>:101 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:102 [#uses=1]
- trunc i32 %101 to i16 ; <i16>:103 [#uses=2]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %102, i16 %103 ) ; <i16>:104 [#uses=1]
- or i16 %104, %103 ; <i16>:105 [#uses=1]
- store i16 %105, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:106 [#uses=1]
- zext i8 %106 to i32 ; <i32>:107 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:108 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %108, i32 %107 ) ; <i32>:109 [#uses=1]
- or i32 %109, %107 ; <i32>:110 [#uses=1]
- store i32 %110, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:111 [#uses=1]
- zext i8 %111 to i32 ; <i32>:112 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:113 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %113, i32 %112 ) ; <i32>:114 [#uses=1]
- or i32 %114, %112 ; <i32>:115 [#uses=1]
- store i32 %115, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:116 [#uses=1]
- zext i8 %116 to i32 ; <i32>:117 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:118 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %118, i32 %117 ) ; <i32>:119 [#uses=1]
- or i32 %119, %117 ; <i32>:120 [#uses=1]
- store i32 %120, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:121 [#uses=1]
- zext i8 %121 to i32 ; <i32>:122 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:123 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %123, i32 %122 ) ; <i32>:124 [#uses=1]
- or i32 %124, %122 ; <i32>:125 [#uses=1]
- store i32 %125, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:126 [#uses=1]
- zext i8 %126 to i32 ; <i32>:127 [#uses=1]
- trunc i32 %127 to i8 ; <i8>:128 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %128 ) ; <i8>:129 [#uses=1]
- xor i8 %129, %128 ; <i8>:130 [#uses=1]
- store i8 %130, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:131 [#uses=1]
- zext i8 %131 to i32 ; <i32>:132 [#uses=1]
- trunc i32 %132 to i8 ; <i8>:133 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %133 ) ; <i8>:134 [#uses=1]
- xor i8 %134, %133 ; <i8>:135 [#uses=1]
- store i8 %135, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:136 [#uses=1]
- zext i8 %136 to i32 ; <i32>:137 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:138 [#uses=1]
- trunc i32 %137 to i16 ; <i16>:139 [#uses=2]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %138, i16 %139 ) ; <i16>:140 [#uses=1]
- xor i16 %140, %139 ; <i16>:141 [#uses=1]
- store i16 %141, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:142 [#uses=1]
- zext i8 %142 to i32 ; <i32>:143 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:144 [#uses=1]
- trunc i32 %143 to i16 ; <i16>:145 [#uses=2]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %144, i16 %145 ) ; <i16>:146 [#uses=1]
- xor i16 %146, %145 ; <i16>:147 [#uses=1]
- store i16 %147, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:148 [#uses=1]
- zext i8 %148 to i32 ; <i32>:149 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:150 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %150, i32 %149 ) ; <i32>:151 [#uses=1]
- xor i32 %151, %149 ; <i32>:152 [#uses=1]
- store i32 %152, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:153 [#uses=1]
- zext i8 %153 to i32 ; <i32>:154 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:155 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %155, i32 %154 ) ; <i32>:156 [#uses=1]
- xor i32 %156, %154 ; <i32>:157 [#uses=1]
- store i32 %157, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:158 [#uses=1]
- zext i8 %158 to i32 ; <i32>:159 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:160 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %160, i32 %159 ) ; <i32>:161 [#uses=1]
- xor i32 %161, %159 ; <i32>:162 [#uses=1]
- store i32 %162, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:163 [#uses=1]
- zext i8 %163 to i32 ; <i32>:164 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:165 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %165, i32 %164 ) ; <i32>:166 [#uses=1]
- xor i32 %166, %164 ; <i32>:167 [#uses=1]
- store i32 %167, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:168 [#uses=1]
- zext i8 %168 to i32 ; <i32>:169 [#uses=1]
- trunc i32 %169 to i8 ; <i8>:170 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %170 ) ; <i8>:171 [#uses=1]
- and i8 %171, %170 ; <i8>:172 [#uses=1]
- store i8 %172, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:173 [#uses=1]
- zext i8 %173 to i32 ; <i32>:174 [#uses=1]
- trunc i32 %174 to i8 ; <i8>:175 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %175 ) ; <i8>:176 [#uses=1]
- and i8 %176, %175 ; <i8>:177 [#uses=1]
- store i8 %177, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:178 [#uses=1]
- zext i8 %178 to i32 ; <i32>:179 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:180 [#uses=1]
- trunc i32 %179 to i16 ; <i16>:181 [#uses=2]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %180, i16 %181 ) ; <i16>:182 [#uses=1]
- and i16 %182, %181 ; <i16>:183 [#uses=1]
- store i16 %183, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:184 [#uses=1]
- zext i8 %184 to i32 ; <i32>:185 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:186 [#uses=1]
- trunc i32 %185 to i16 ; <i16>:187 [#uses=2]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %186, i16 %187 ) ; <i16>:188 [#uses=1]
- and i16 %188, %187 ; <i16>:189 [#uses=1]
- store i16 %189, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:190 [#uses=1]
- zext i8 %190 to i32 ; <i32>:191 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:192 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %192, i32 %191 ) ; <i32>:193 [#uses=1]
- and i32 %193, %191 ; <i32>:194 [#uses=1]
- store i32 %194, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:195 [#uses=1]
- zext i8 %195 to i32 ; <i32>:196 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:197 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %197, i32 %196 ) ; <i32>:198 [#uses=1]
- and i32 %198, %196 ; <i32>:199 [#uses=1]
- store i32 %199, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:200 [#uses=1]
- zext i8 %200 to i32 ; <i32>:201 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:202 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %202, i32 %201 ) ; <i32>:203 [#uses=1]
- and i32 %203, %201 ; <i32>:204 [#uses=1]
- store i32 %204, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:205 [#uses=1]
- zext i8 %205 to i32 ; <i32>:206 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:207 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %207, i32 %206 ) ; <i32>:208 [#uses=1]
- and i32 %208, %206 ; <i32>:209 [#uses=1]
- store i32 %209, i32* @ul, align 4
- load i8* @uc, align 1 ; <i8>:210 [#uses=1]
- zext i8 %210 to i32 ; <i32>:211 [#uses=1]
- trunc i32 %211 to i8 ; <i8>:212 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %212 ) ; <i8>:213 [#uses=1]
- xor i8 %213, -1 ; <i8>:214 [#uses=1]
- and i8 %214, %212 ; <i8>:215 [#uses=1]
- store i8 %215, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:216 [#uses=1]
- zext i8 %216 to i32 ; <i32>:217 [#uses=1]
- trunc i32 %217 to i8 ; <i8>:218 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %218 ) ; <i8>:219 [#uses=1]
- xor i8 %219, -1 ; <i8>:220 [#uses=1]
- and i8 %220, %218 ; <i8>:221 [#uses=1]
- store i8 %221, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:222 [#uses=1]
- zext i8 %222 to i32 ; <i32>:223 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:224 [#uses=1]
- trunc i32 %223 to i16 ; <i16>:225 [#uses=2]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %224, i16 %225 ) ; <i16>:226 [#uses=1]
- xor i16 %226, -1 ; <i16>:227 [#uses=1]
- and i16 %227, %225 ; <i16>:228 [#uses=1]
- store i16 %228, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:229 [#uses=1]
- zext i8 %229 to i32 ; <i32>:230 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:231 [#uses=1]
- trunc i32 %230 to i16 ; <i16>:232 [#uses=2]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %231, i16 %232 ) ; <i16>:233 [#uses=1]
- xor i16 %233, -1 ; <i16>:234 [#uses=1]
- and i16 %234, %232 ; <i16>:235 [#uses=1]
- store i16 %235, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:236 [#uses=1]
- zext i8 %236 to i32 ; <i32>:237 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:238 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %238, i32 %237 ) ; <i32>:239 [#uses=1]
- xor i32 %239, -1 ; <i32>:240 [#uses=1]
- and i32 %240, %237 ; <i32>:241 [#uses=1]
- store i32 %241, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:242 [#uses=1]
- zext i8 %242 to i32 ; <i32>:243 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:244 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %244, i32 %243 ) ; <i32>:245 [#uses=1]
- xor i32 %245, -1 ; <i32>:246 [#uses=1]
- and i32 %246, %243 ; <i32>:247 [#uses=1]
- store i32 %247, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:248 [#uses=1]
- zext i8 %248 to i32 ; <i32>:249 [#uses=2]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:250 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %250, i32 %249 ) ; <i32>:251 [#uses=1]
- xor i32 %251, -1 ; <i32>:252 [#uses=1]
- and i32 %252, %249 ; <i32>:253 [#uses=1]
- store i32 %253, i32* @sl, align 4
- load i8* @uc, align 1 ; <i8>:254 [#uses=1]
- zext i8 %254 to i32 ; <i32>:255 [#uses=2]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:256 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %256, i32 %255 ) ; <i32>:257 [#uses=1]
- xor i32 %257, -1 ; <i32>:258 [#uses=1]
- and i32 %258, %255 ; <i32>:259 [#uses=1]
- store i32 %259, i32* @ul, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_compare_and_swap() nounwind {
-entry:
- load i8* @sc, align 1 ; <i8>:0 [#uses=1]
- zext i8 %0 to i32 ; <i32>:1 [#uses=1]
- load i8* @uc, align 1 ; <i8>:2 [#uses=1]
- zext i8 %2 to i32 ; <i32>:3 [#uses=1]
- trunc i32 %3 to i8 ; <i8>:4 [#uses=1]
- trunc i32 %1 to i8 ; <i8>:5 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; <i8>:6 [#uses=1]
- store i8 %6, i8* @sc, align 1
- load i8* @sc, align 1 ; <i8>:7 [#uses=1]
- zext i8 %7 to i32 ; <i32>:8 [#uses=1]
- load i8* @uc, align 1 ; <i8>:9 [#uses=1]
- zext i8 %9 to i32 ; <i32>:10 [#uses=1]
- trunc i32 %10 to i8 ; <i8>:11 [#uses=1]
- trunc i32 %8 to i8 ; <i8>:12 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; <i8>:13 [#uses=1]
- store i8 %13, i8* @uc, align 1
- load i8* @sc, align 1 ; <i8>:14 [#uses=1]
- sext i8 %14 to i16 ; <i16>:15 [#uses=1]
- zext i16 %15 to i32 ; <i32>:16 [#uses=1]
- load i8* @uc, align 1 ; <i8>:17 [#uses=1]
- zext i8 %17 to i32 ; <i32>:18 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:19 [#uses=1]
- trunc i32 %18 to i16 ; <i16>:20 [#uses=1]
- trunc i32 %16 to i16 ; <i16>:21 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; <i16>:22 [#uses=1]
- store i16 %22, i16* @ss, align 2
- load i8* @sc, align 1 ; <i8>:23 [#uses=1]
- sext i8 %23 to i16 ; <i16>:24 [#uses=1]
- zext i16 %24 to i32 ; <i32>:25 [#uses=1]
- load i8* @uc, align 1 ; <i8>:26 [#uses=1]
- zext i8 %26 to i32 ; <i32>:27 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:28 [#uses=1]
- trunc i32 %27 to i16 ; <i16>:29 [#uses=1]
- trunc i32 %25 to i16 ; <i16>:30 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @us, align 2
- load i8* @sc, align 1 ; <i8>:32 [#uses=1]
- sext i8 %32 to i32 ; <i32>:33 [#uses=1]
- load i8* @uc, align 1 ; <i8>:34 [#uses=1]
- zext i8 %34 to i32 ; <i32>:35 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @si, align 4
- load i8* @sc, align 1 ; <i8>:38 [#uses=1]
- sext i8 %38 to i32 ; <i32>:39 [#uses=1]
- load i8* @uc, align 1 ; <i8>:40 [#uses=1]
- zext i8 %40 to i32 ; <i32>:41 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:42 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; <i32>:43 [#uses=1]
- store i32 %43, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:44 [#uses=1]
- sext i8 %44 to i32 ; <i32>:45 [#uses=1]
- load i8* @uc, align 1 ; <i8>:46 [#uses=1]
- zext i8 %46 to i32 ; <i32>:47 [#uses=1]
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:48 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %48, i32 %47, i32 %45 ) ; <i32>:49 [#uses=1]
- store i32 %49, i32* @sl, align 4
- load i8* @sc, align 1 ; <i8>:50 [#uses=1]
- sext i8 %50 to i32 ; <i32>:51 [#uses=1]
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=1]
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:54 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %54, i32 %53, i32 %51 ) ; <i32>:55 [#uses=1]
- store i32 %55, i32* @ul, align 4
- load i8* @sc, align 1 ; <i8>:56 [#uses=1]
- zext i8 %56 to i32 ; <i32>:57 [#uses=1]
- load i8* @uc, align 1 ; <i8>:58 [#uses=1]
- zext i8 %58 to i32 ; <i32>:59 [#uses=1]
- trunc i32 %59 to i8 ; <i8>:60 [#uses=2]
- trunc i32 %57 to i8 ; <i8>:61 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %60, i8 %61 ) ; <i8>:62 [#uses=1]
- icmp eq i8 %62, %60 ; <i1>:63 [#uses=1]
- zext i1 %63 to i8 ; <i8>:64 [#uses=1]
- zext i8 %64 to i32 ; <i32>:65 [#uses=1]
- store i32 %65, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:66 [#uses=1]
- zext i8 %66 to i32 ; <i32>:67 [#uses=1]
- load i8* @uc, align 1 ; <i8>:68 [#uses=1]
- zext i8 %68 to i32 ; <i32>:69 [#uses=1]
- trunc i32 %69 to i8 ; <i8>:70 [#uses=2]
- trunc i32 %67 to i8 ; <i8>:71 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %70, i8 %71 ) ; <i8>:72 [#uses=1]
- icmp eq i8 %72, %70 ; <i1>:73 [#uses=1]
- zext i1 %73 to i8 ; <i8>:74 [#uses=1]
- zext i8 %74 to i32 ; <i32>:75 [#uses=1]
- store i32 %75, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:76 [#uses=1]
- sext i8 %76 to i16 ; <i16>:77 [#uses=1]
- zext i16 %77 to i32 ; <i32>:78 [#uses=1]
- load i8* @uc, align 1 ; <i8>:79 [#uses=1]
- zext i8 %79 to i32 ; <i32>:80 [#uses=1]
- trunc i32 %80 to i8 ; <i8>:81 [#uses=2]
- trunc i32 %78 to i8 ; <i8>:82 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %81, i8 %82 ) ; <i8>:83 [#uses=1]
- icmp eq i8 %83, %81 ; <i1>:84 [#uses=1]
- zext i1 %84 to i8 ; <i8>:85 [#uses=1]
- zext i8 %85 to i32 ; <i32>:86 [#uses=1]
- store i32 %86, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:87 [#uses=1]
- sext i8 %87 to i16 ; <i16>:88 [#uses=1]
- zext i16 %88 to i32 ; <i32>:89 [#uses=1]
- load i8* @uc, align 1 ; <i8>:90 [#uses=1]
- zext i8 %90 to i32 ; <i32>:91 [#uses=1]
- trunc i32 %91 to i8 ; <i8>:92 [#uses=2]
- trunc i32 %89 to i8 ; <i8>:93 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %92, i8 %93 ) ; <i8>:94 [#uses=1]
- icmp eq i8 %94, %92 ; <i1>:95 [#uses=1]
- zext i1 %95 to i8 ; <i8>:96 [#uses=1]
- zext i8 %96 to i32 ; <i32>:97 [#uses=1]
- store i32 %97, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:98 [#uses=1]
- sext i8 %98 to i32 ; <i32>:99 [#uses=1]
- load i8* @uc, align 1 ; <i8>:100 [#uses=1]
- zext i8 %100 to i32 ; <i32>:101 [#uses=1]
- trunc i32 %101 to i8 ; <i8>:102 [#uses=2]
- trunc i32 %99 to i8 ; <i8>:103 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %102, i8 %103 ) ; <i8>:104 [#uses=1]
- icmp eq i8 %104, %102 ; <i1>:105 [#uses=1]
- zext i1 %105 to i8 ; <i8>:106 [#uses=1]
- zext i8 %106 to i32 ; <i32>:107 [#uses=1]
- store i32 %107, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:108 [#uses=1]
- sext i8 %108 to i32 ; <i32>:109 [#uses=1]
- load i8* @uc, align 1 ; <i8>:110 [#uses=1]
- zext i8 %110 to i32 ; <i32>:111 [#uses=1]
- trunc i32 %111 to i8 ; <i8>:112 [#uses=2]
- trunc i32 %109 to i8 ; <i8>:113 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %112, i8 %113 ) ; <i8>:114 [#uses=1]
- icmp eq i8 %114, %112 ; <i1>:115 [#uses=1]
- zext i1 %115 to i8 ; <i8>:116 [#uses=1]
- zext i8 %116 to i32 ; <i32>:117 [#uses=1]
- store i32 %117, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:118 [#uses=1]
- sext i8 %118 to i32 ; <i32>:119 [#uses=1]
- load i8* @uc, align 1 ; <i8>:120 [#uses=1]
- zext i8 %120 to i32 ; <i32>:121 [#uses=1]
- trunc i32 %121 to i8 ; <i8>:122 [#uses=2]
- trunc i32 %119 to i8 ; <i8>:123 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @sl to i8*), i8 %122, i8 %123 ) ; <i8>:124 [#uses=1]
- icmp eq i8 %124, %122 ; <i1>:125 [#uses=1]
- zext i1 %125 to i8 ; <i8>:126 [#uses=1]
- zext i8 %126 to i32 ; <i32>:127 [#uses=1]
- store i32 %127, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:128 [#uses=1]
- sext i8 %128 to i32 ; <i32>:129 [#uses=1]
- load i8* @uc, align 1 ; <i8>:130 [#uses=1]
- zext i8 %130 to i32 ; <i32>:131 [#uses=1]
- trunc i32 %131 to i8 ; <i8>:132 [#uses=2]
- trunc i32 %129 to i8 ; <i8>:133 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ul to i8*), i8 %132, i8 %133 ) ; <i8>:134 [#uses=1]
- icmp eq i8 %134, %132 ; <i1>:135 [#uses=1]
- zext i1 %135 to i8 ; <i8>:136 [#uses=1]
- zext i8 %136 to i32 ; <i32>:137 [#uses=1]
- store i32 %137, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
-define void @test_lock() nounwind {
-entry:
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:10 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %10, i32 1 ) ; <i32>:11 [#uses=1]
- store i32 %11, i32* @sl, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:12 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %12, i32 1 ) ; <i32>:13 [#uses=1]
- store i32 %13, i32* @ul, align 4
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:14 [#uses=1]
- volatile store i16 0, i16* %14, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:15 [#uses=1]
- volatile store i16 0, i16* %15, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:16 [#uses=1]
- volatile store i32 0, i32* %16, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:17 [#uses=1]
- volatile store i32 0, i32* %17, align 4
- bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*>:18 [#uses=1]
- volatile store i32 0, i32* %18, align 4
- bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*>:19 [#uses=1]
- volatile store i32 0, i32* %19, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/Atomics-64.ll b/libclamav/c++/llvm/test/CodeGen/X86/Atomics-64.ll
deleted file mode 100644
index ac174b9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/Atomics-64.ll
+++ /dev/null
@@ -1,1015 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; ModuleID = 'Atomics.c'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
- at sc = common global i8 0 ; <i8*> [#uses=56]
- at uc = common global i8 0 ; <i8*> [#uses=116]
- at ss = common global i16 0 ; <i16*> [#uses=15]
- at us = common global i16 0 ; <i16*> [#uses=15]
- at si = common global i32 0 ; <i32*> [#uses=15]
- at ui = common global i32 0 ; <i32*> [#uses=25]
- at sl = common global i64 0 ; <i64*> [#uses=15]
- at ul = common global i64 0 ; <i64*> [#uses=15]
- at sll = common global i64 0 ; <i64*> [#uses=15]
- at ull = common global i64 0 ; <i64*> [#uses=15]
-
-define void @test_op_ignore() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 1 ) ; <i64>:15 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 1 ) ; <i64>:17 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:18 [#uses=0]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:19 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:20 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 1 ) ; <i16>:21 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:22 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 1 ) ; <i16>:23 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 1 ) ; <i64>:29 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:30 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 1 ) ; <i64>:31 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:32 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 1 ) ; <i64>:33 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:34 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 1 ) ; <i64>:35 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:36 [#uses=0]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:37 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:38 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 1 ) ; <i16>:39 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:40 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 1 ) ; <i16>:41 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:42 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 1 ) ; <i32>:43 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:44 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 1 ) ; <i32>:45 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:46 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 1 ) ; <i64>:47 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:48 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 1 ) ; <i64>:49 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:50 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 1 ) ; <i64>:51 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:52 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 1 ) ; <i64>:53 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:54 [#uses=0]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:55 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 1 ) ; <i16>:57 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:60 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 1 ) ; <i32>:61 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 1 ) ; <i64>:65 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 1 ) ; <i64>:67 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:68 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 1 ) ; <i64>:69 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:70 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 1 ) ; <i64>:71 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:72 [#uses=0]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:73 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:76 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 1 ) ; <i16>:77 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:82 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 1 ) ; <i64>:83 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:84 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 1 ) ; <i64>:85 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:86 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 1 ) ; <i64>:87 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:88 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 1 ) ; <i64>:89 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:90 [#uses=0]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:91 [#uses=0]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:92 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 1 ) ; <i16>:93 [#uses=0]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:94 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 1 ) ; <i16>:95 [#uses=0]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:96 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 1 ) ; <i32>:97 [#uses=0]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:98 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 1 ) ; <i32>:99 [#uses=0]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 1 ) ; <i64>:101 [#uses=0]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:102 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 1 ) ; <i64>:103 [#uses=0]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:104 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 1 ) ; <i64>:105 [#uses=0]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:106 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 1 ) ; <i64>:107 [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind
-
-declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind
-
-define void @test_fetch_and_op() nounwind {
-entry:
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 11 ) ; <i64>:11 [#uses=1]
- store i64 %11, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 11 ) ; <i64>:13 [#uses=1]
- store i64 %13, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 11 ) ; <i64>:15 [#uses=1]
- store i64 %15, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 11 ) ; <i64>:17 [#uses=1]
- store i64 %17, i64* @ull, align 8
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:18 [#uses=1]
- store i8 %18, i8* @sc, align 1
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:19 [#uses=1]
- store i8 %19, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:20 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 11 ) ; <i16>:21 [#uses=1]
- store i16 %21, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:22 [#uses=1]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 11 ) ; <i16>:23 [#uses=1]
- store i16 %23, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1]
- store i32 %25, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:26 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1]
- store i32 %27, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 11 ) ; <i64>:29 [#uses=1]
- store i64 %29, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:30 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 11 ) ; <i64>:31 [#uses=1]
- store i64 %31, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:32 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 11 ) ; <i64>:33 [#uses=1]
- store i64 %33, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:34 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 11 ) ; <i64>:35 [#uses=1]
- store i64 %35, i64* @ull, align 8
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:36 [#uses=1]
- store i8 %36, i8* @sc, align 1
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:37 [#uses=1]
- store i8 %37, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:38 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 11 ) ; <i16>:39 [#uses=1]
- store i16 %39, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:40 [#uses=1]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 11 ) ; <i16>:41 [#uses=1]
- store i16 %41, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:42 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 11 ) ; <i32>:43 [#uses=1]
- store i32 %43, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:44 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 11 ) ; <i32>:45 [#uses=1]
- store i32 %45, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:46 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 11 ) ; <i64>:47 [#uses=1]
- store i64 %47, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:48 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 11 ) ; <i64>:49 [#uses=1]
- store i64 %49, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:50 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 11 ) ; <i64>:51 [#uses=1]
- store i64 %51, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:52 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 11 ) ; <i64>:53 [#uses=1]
- store i64 %53, i64* @ull, align 8
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:54 [#uses=1]
- store i8 %54, i8* @sc, align 1
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:55 [#uses=1]
- store i8 %55, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 11 ) ; <i16>:57 [#uses=1]
- store i16 %57, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:58 [#uses=1]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1]
- store i16 %59, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:60 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 11 ) ; <i32>:61 [#uses=1]
- store i32 %61, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:62 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1]
- store i32 %63, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 11 ) ; <i64>:65 [#uses=1]
- store i64 %65, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 11 ) ; <i64>:67 [#uses=1]
- store i64 %67, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:68 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 11 ) ; <i64>:69 [#uses=1]
- store i64 %69, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:70 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 11 ) ; <i64>:71 [#uses=1]
- store i64 %71, i64* @ull, align 8
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:72 [#uses=1]
- store i8 %72, i8* @sc, align 1
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:73 [#uses=1]
- store i8 %73, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:74 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1]
- store i16 %75, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:76 [#uses=1]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 11 ) ; <i16>:77 [#uses=1]
- store i16 %77, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:78 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1]
- store i32 %79, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1]
- store i32 %81, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:82 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 11 ) ; <i64>:83 [#uses=1]
- store i64 %83, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:84 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 11 ) ; <i64>:85 [#uses=1]
- store i64 %85, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:86 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 11 ) ; <i64>:87 [#uses=1]
- store i64 %87, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:88 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 11 ) ; <i64>:89 [#uses=1]
- store i64 %89, i64* @ull, align 8
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:90 [#uses=1]
- store i8 %90, i8* @sc, align 1
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:91 [#uses=1]
- store i8 %91, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:92 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 11 ) ; <i16>:93 [#uses=1]
- store i16 %93, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:94 [#uses=1]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 11 ) ; <i16>:95 [#uses=1]
- store i16 %95, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:96 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 11 ) ; <i32>:97 [#uses=1]
- store i32 %97, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:98 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 11 ) ; <i32>:99 [#uses=1]
- store i32 %99, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 11 ) ; <i64>:101 [#uses=1]
- store i64 %101, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:102 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 11 ) ; <i64>:103 [#uses=1]
- store i64 %103, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:104 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 11 ) ; <i64>:105 [#uses=1]
- store i64 %105, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:106 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 11 ) ; <i64>:107 [#uses=1]
- store i64 %107, i64* @ull, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_op_and_fetch() nounwind {
-entry:
- load i8* @uc, align 1 ; <i8>:0 [#uses=1]
- zext i8 %0 to i32 ; <i32>:1 [#uses=1]
- trunc i32 %1 to i8 ; <i8>:2 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; <i8>:3 [#uses=1]
- add i8 %3, %2 ; <i8>:4 [#uses=1]
- store i8 %4, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:5 [#uses=1]
- zext i8 %5 to i32 ; <i32>:6 [#uses=1]
- trunc i32 %6 to i8 ; <i8>:7 [#uses=2]
- call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; <i8>:8 [#uses=1]
- add i8 %8, %7 ; <i8>:9 [#uses=1]
- store i8 %9, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:10 [#uses=1]
- zext i8 %10 to i32 ; <i32>:11 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:12 [#uses=1]
- trunc i32 %11 to i16 ; <i16>:13 [#uses=2]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; <i16>:14 [#uses=1]
- add i16 %14, %13 ; <i16>:15 [#uses=1]
- store i16 %15, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:16 [#uses=1]
- zext i8 %16 to i32 ; <i32>:17 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1]
- trunc i32 %17 to i16 ; <i16>:19 [#uses=2]
- call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; <i16>:20 [#uses=1]
- add i16 %20, %19 ; <i16>:21 [#uses=1]
- store i16 %21, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:22 [#uses=1]
- zext i8 %22 to i32 ; <i32>:23 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; <i32>:25 [#uses=1]
- add i32 %25, %23 ; <i32>:26 [#uses=1]
- store i32 %26, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:27 [#uses=1]
- zext i8 %27 to i32 ; <i32>:28 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:29 [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; <i32>:30 [#uses=1]
- add i32 %30, %28 ; <i32>:31 [#uses=1]
- store i32 %31, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:32 [#uses=1]
- zext i8 %32 to i64 ; <i64>:33 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:34 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %34, i64 %33 ) ; <i64>:35 [#uses=1]
- add i64 %35, %33 ; <i64>:36 [#uses=1]
- store i64 %36, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:37 [#uses=1]
- zext i8 %37 to i64 ; <i64>:38 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:39 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %39, i64 %38 ) ; <i64>:40 [#uses=1]
- add i64 %40, %38 ; <i64>:41 [#uses=1]
- store i64 %41, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:42 [#uses=1]
- zext i8 %42 to i64 ; <i64>:43 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:44 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %44, i64 %43 ) ; <i64>:45 [#uses=1]
- add i64 %45, %43 ; <i64>:46 [#uses=1]
- store i64 %46, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:47 [#uses=1]
- zext i8 %47 to i64 ; <i64>:48 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:49 [#uses=1]
- call i64 @llvm.atomic.load.add.i64.p0i64( i64* %49, i64 %48 ) ; <i64>:50 [#uses=1]
- add i64 %50, %48 ; <i64>:51 [#uses=1]
- store i64 %51, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i32 ; <i32>:53 [#uses=1]
- trunc i32 %53 to i8 ; <i8>:54 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %54 ) ; <i8>:55 [#uses=1]
- sub i8 %55, %54 ; <i8>:56 [#uses=1]
- store i8 %56, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:57 [#uses=1]
- zext i8 %57 to i32 ; <i32>:58 [#uses=1]
- trunc i32 %58 to i8 ; <i8>:59 [#uses=2]
- call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %59 ) ; <i8>:60 [#uses=1]
- sub i8 %60, %59 ; <i8>:61 [#uses=1]
- store i8 %61, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:62 [#uses=1]
- zext i8 %62 to i32 ; <i32>:63 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:64 [#uses=1]
- trunc i32 %63 to i16 ; <i16>:65 [#uses=2]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %64, i16 %65 ) ; <i16>:66 [#uses=1]
- sub i16 %66, %65 ; <i16>:67 [#uses=1]
- store i16 %67, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:68 [#uses=1]
- zext i8 %68 to i32 ; <i32>:69 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:70 [#uses=1]
- trunc i32 %69 to i16 ; <i16>:71 [#uses=2]
- call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %70, i16 %71 ) ; <i16>:72 [#uses=1]
- sub i16 %72, %71 ; <i16>:73 [#uses=1]
- store i16 %73, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:74 [#uses=1]
- zext i8 %74 to i32 ; <i32>:75 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; <i32>:77 [#uses=1]
- sub i32 %77, %75 ; <i32>:78 [#uses=1]
- store i32 %78, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:79 [#uses=1]
- zext i8 %79 to i32 ; <i32>:80 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:81 [#uses=1]
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; <i32>:82 [#uses=1]
- sub i32 %82, %80 ; <i32>:83 [#uses=1]
- store i32 %83, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:84 [#uses=1]
- zext i8 %84 to i64 ; <i64>:85 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:86 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %86, i64 %85 ) ; <i64>:87 [#uses=1]
- sub i64 %87, %85 ; <i64>:88 [#uses=1]
- store i64 %88, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:89 [#uses=1]
- zext i8 %89 to i64 ; <i64>:90 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:91 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %91, i64 %90 ) ; <i64>:92 [#uses=1]
- sub i64 %92, %90 ; <i64>:93 [#uses=1]
- store i64 %93, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:94 [#uses=1]
- zext i8 %94 to i64 ; <i64>:95 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:96 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %96, i64 %95 ) ; <i64>:97 [#uses=1]
- sub i64 %97, %95 ; <i64>:98 [#uses=1]
- store i64 %98, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:99 [#uses=1]
- zext i8 %99 to i64 ; <i64>:100 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:101 [#uses=1]
- call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %101, i64 %100 ) ; <i64>:102 [#uses=1]
- sub i64 %102, %100 ; <i64>:103 [#uses=1]
- store i64 %103, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:104 [#uses=1]
- zext i8 %104 to i32 ; <i32>:105 [#uses=1]
- trunc i32 %105 to i8 ; <i8>:106 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %106 ) ; <i8>:107 [#uses=1]
- or i8 %107, %106 ; <i8>:108 [#uses=1]
- store i8 %108, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:109 [#uses=1]
- zext i8 %109 to i32 ; <i32>:110 [#uses=1]
- trunc i32 %110 to i8 ; <i8>:111 [#uses=2]
- call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %111 ) ; <i8>:112 [#uses=1]
- or i8 %112, %111 ; <i8>:113 [#uses=1]
- store i8 %113, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:114 [#uses=1]
- zext i8 %114 to i32 ; <i32>:115 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:116 [#uses=1]
- trunc i32 %115 to i16 ; <i16>:117 [#uses=2]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %116, i16 %117 ) ; <i16>:118 [#uses=1]
- or i16 %118, %117 ; <i16>:119 [#uses=1]
- store i16 %119, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:120 [#uses=1]
- zext i8 %120 to i32 ; <i32>:121 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:122 [#uses=1]
- trunc i32 %121 to i16 ; <i16>:123 [#uses=2]
- call i16 @llvm.atomic.load.or.i16.p0i16( i16* %122, i16 %123 ) ; <i16>:124 [#uses=1]
- or i16 %124, %123 ; <i16>:125 [#uses=1]
- store i16 %125, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:126 [#uses=1]
- zext i8 %126 to i32 ; <i32>:127 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:128 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %128, i32 %127 ) ; <i32>:129 [#uses=1]
- or i32 %129, %127 ; <i32>:130 [#uses=1]
- store i32 %130, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:131 [#uses=1]
- zext i8 %131 to i32 ; <i32>:132 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:133 [#uses=1]
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %133, i32 %132 ) ; <i32>:134 [#uses=1]
- or i32 %134, %132 ; <i32>:135 [#uses=1]
- store i32 %135, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:136 [#uses=1]
- zext i8 %136 to i64 ; <i64>:137 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:138 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %138, i64 %137 ) ; <i64>:139 [#uses=1]
- or i64 %139, %137 ; <i64>:140 [#uses=1]
- store i64 %140, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:141 [#uses=1]
- zext i8 %141 to i64 ; <i64>:142 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:143 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %143, i64 %142 ) ; <i64>:144 [#uses=1]
- or i64 %144, %142 ; <i64>:145 [#uses=1]
- store i64 %145, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:146 [#uses=1]
- zext i8 %146 to i64 ; <i64>:147 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:148 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %148, i64 %147 ) ; <i64>:149 [#uses=1]
- or i64 %149, %147 ; <i64>:150 [#uses=1]
- store i64 %150, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:151 [#uses=1]
- zext i8 %151 to i64 ; <i64>:152 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:153 [#uses=1]
- call i64 @llvm.atomic.load.or.i64.p0i64( i64* %153, i64 %152 ) ; <i64>:154 [#uses=1]
- or i64 %154, %152 ; <i64>:155 [#uses=1]
- store i64 %155, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:156 [#uses=1]
- zext i8 %156 to i32 ; <i32>:157 [#uses=1]
- trunc i32 %157 to i8 ; <i8>:158 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %158 ) ; <i8>:159 [#uses=1]
- xor i8 %159, %158 ; <i8>:160 [#uses=1]
- store i8 %160, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:161 [#uses=1]
- zext i8 %161 to i32 ; <i32>:162 [#uses=1]
- trunc i32 %162 to i8 ; <i8>:163 [#uses=2]
- call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %163 ) ; <i8>:164 [#uses=1]
- xor i8 %164, %163 ; <i8>:165 [#uses=1]
- store i8 %165, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:166 [#uses=1]
- zext i8 %166 to i32 ; <i32>:167 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:168 [#uses=1]
- trunc i32 %167 to i16 ; <i16>:169 [#uses=2]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %168, i16 %169 ) ; <i16>:170 [#uses=1]
- xor i16 %170, %169 ; <i16>:171 [#uses=1]
- store i16 %171, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:172 [#uses=1]
- zext i8 %172 to i32 ; <i32>:173 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:174 [#uses=1]
- trunc i32 %173 to i16 ; <i16>:175 [#uses=2]
- call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %174, i16 %175 ) ; <i16>:176 [#uses=1]
- xor i16 %176, %175 ; <i16>:177 [#uses=1]
- store i16 %177, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:178 [#uses=1]
- zext i8 %178 to i32 ; <i32>:179 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:180 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %180, i32 %179 ) ; <i32>:181 [#uses=1]
- xor i32 %181, %179 ; <i32>:182 [#uses=1]
- store i32 %182, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:183 [#uses=1]
- zext i8 %183 to i32 ; <i32>:184 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:185 [#uses=1]
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %185, i32 %184 ) ; <i32>:186 [#uses=1]
- xor i32 %186, %184 ; <i32>:187 [#uses=1]
- store i32 %187, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:188 [#uses=1]
- zext i8 %188 to i64 ; <i64>:189 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:190 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %190, i64 %189 ) ; <i64>:191 [#uses=1]
- xor i64 %191, %189 ; <i64>:192 [#uses=1]
- store i64 %192, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:193 [#uses=1]
- zext i8 %193 to i64 ; <i64>:194 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:195 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %195, i64 %194 ) ; <i64>:196 [#uses=1]
- xor i64 %196, %194 ; <i64>:197 [#uses=1]
- store i64 %197, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:198 [#uses=1]
- zext i8 %198 to i64 ; <i64>:199 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:200 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %200, i64 %199 ) ; <i64>:201 [#uses=1]
- xor i64 %201, %199 ; <i64>:202 [#uses=1]
- store i64 %202, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:203 [#uses=1]
- zext i8 %203 to i64 ; <i64>:204 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:205 [#uses=1]
- call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %205, i64 %204 ) ; <i64>:206 [#uses=1]
- xor i64 %206, %204 ; <i64>:207 [#uses=1]
- store i64 %207, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:208 [#uses=1]
- zext i8 %208 to i32 ; <i32>:209 [#uses=1]
- trunc i32 %209 to i8 ; <i8>:210 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %210 ) ; <i8>:211 [#uses=1]
- and i8 %211, %210 ; <i8>:212 [#uses=1]
- store i8 %212, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:213 [#uses=1]
- zext i8 %213 to i32 ; <i32>:214 [#uses=1]
- trunc i32 %214 to i8 ; <i8>:215 [#uses=2]
- call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %215 ) ; <i8>:216 [#uses=1]
- and i8 %216, %215 ; <i8>:217 [#uses=1]
- store i8 %217, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:218 [#uses=1]
- zext i8 %218 to i32 ; <i32>:219 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:220 [#uses=1]
- trunc i32 %219 to i16 ; <i16>:221 [#uses=2]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %220, i16 %221 ) ; <i16>:222 [#uses=1]
- and i16 %222, %221 ; <i16>:223 [#uses=1]
- store i16 %223, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:224 [#uses=1]
- zext i8 %224 to i32 ; <i32>:225 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:226 [#uses=1]
- trunc i32 %225 to i16 ; <i16>:227 [#uses=2]
- call i16 @llvm.atomic.load.and.i16.p0i16( i16* %226, i16 %227 ) ; <i16>:228 [#uses=1]
- and i16 %228, %227 ; <i16>:229 [#uses=1]
- store i16 %229, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:230 [#uses=1]
- zext i8 %230 to i32 ; <i32>:231 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:232 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %232, i32 %231 ) ; <i32>:233 [#uses=1]
- and i32 %233, %231 ; <i32>:234 [#uses=1]
- store i32 %234, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:235 [#uses=1]
- zext i8 %235 to i32 ; <i32>:236 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:237 [#uses=1]
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %237, i32 %236 ) ; <i32>:238 [#uses=1]
- and i32 %238, %236 ; <i32>:239 [#uses=1]
- store i32 %239, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:240 [#uses=1]
- zext i8 %240 to i64 ; <i64>:241 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:242 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %242, i64 %241 ) ; <i64>:243 [#uses=1]
- and i64 %243, %241 ; <i64>:244 [#uses=1]
- store i64 %244, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:245 [#uses=1]
- zext i8 %245 to i64 ; <i64>:246 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:247 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %247, i64 %246 ) ; <i64>:248 [#uses=1]
- and i64 %248, %246 ; <i64>:249 [#uses=1]
- store i64 %249, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:250 [#uses=1]
- zext i8 %250 to i64 ; <i64>:251 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:252 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %252, i64 %251 ) ; <i64>:253 [#uses=1]
- and i64 %253, %251 ; <i64>:254 [#uses=1]
- store i64 %254, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:255 [#uses=1]
- zext i8 %255 to i64 ; <i64>:256 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:257 [#uses=1]
- call i64 @llvm.atomic.load.and.i64.p0i64( i64* %257, i64 %256 ) ; <i64>:258 [#uses=1]
- and i64 %258, %256 ; <i64>:259 [#uses=1]
- store i64 %259, i64* @ull, align 8
- load i8* @uc, align 1 ; <i8>:260 [#uses=1]
- zext i8 %260 to i32 ; <i32>:261 [#uses=1]
- trunc i32 %261 to i8 ; <i8>:262 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %262 ) ; <i8>:263 [#uses=1]
- xor i8 %263, -1 ; <i8>:264 [#uses=1]
- and i8 %264, %262 ; <i8>:265 [#uses=1]
- store i8 %265, i8* @sc, align 1
- load i8* @uc, align 1 ; <i8>:266 [#uses=1]
- zext i8 %266 to i32 ; <i32>:267 [#uses=1]
- trunc i32 %267 to i8 ; <i8>:268 [#uses=2]
- call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %268 ) ; <i8>:269 [#uses=1]
- xor i8 %269, -1 ; <i8>:270 [#uses=1]
- and i8 %270, %268 ; <i8>:271 [#uses=1]
- store i8 %271, i8* @uc, align 1
- load i8* @uc, align 1 ; <i8>:272 [#uses=1]
- zext i8 %272 to i32 ; <i32>:273 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:274 [#uses=1]
- trunc i32 %273 to i16 ; <i16>:275 [#uses=2]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %274, i16 %275 ) ; <i16>:276 [#uses=1]
- xor i16 %276, -1 ; <i16>:277 [#uses=1]
- and i16 %277, %275 ; <i16>:278 [#uses=1]
- store i16 %278, i16* @ss, align 2
- load i8* @uc, align 1 ; <i8>:279 [#uses=1]
- zext i8 %279 to i32 ; <i32>:280 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:281 [#uses=1]
- trunc i32 %280 to i16 ; <i16>:282 [#uses=2]
- call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %281, i16 %282 ) ; <i16>:283 [#uses=1]
- xor i16 %283, -1 ; <i16>:284 [#uses=1]
- and i16 %284, %282 ; <i16>:285 [#uses=1]
- store i16 %285, i16* @us, align 2
- load i8* @uc, align 1 ; <i8>:286 [#uses=1]
- zext i8 %286 to i32 ; <i32>:287 [#uses=2]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:288 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %288, i32 %287 ) ; <i32>:289 [#uses=1]
- xor i32 %289, -1 ; <i32>:290 [#uses=1]
- and i32 %290, %287 ; <i32>:291 [#uses=1]
- store i32 %291, i32* @si, align 4
- load i8* @uc, align 1 ; <i8>:292 [#uses=1]
- zext i8 %292 to i32 ; <i32>:293 [#uses=2]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:294 [#uses=1]
- call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %294, i32 %293 ) ; <i32>:295 [#uses=1]
- xor i32 %295, -1 ; <i32>:296 [#uses=1]
- and i32 %296, %293 ; <i32>:297 [#uses=1]
- store i32 %297, i32* @ui, align 4
- load i8* @uc, align 1 ; <i8>:298 [#uses=1]
- zext i8 %298 to i64 ; <i64>:299 [#uses=2]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:300 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %300, i64 %299 ) ; <i64>:301 [#uses=1]
- xor i64 %301, -1 ; <i64>:302 [#uses=1]
- and i64 %302, %299 ; <i64>:303 [#uses=1]
- store i64 %303, i64* @sl, align 8
- load i8* @uc, align 1 ; <i8>:304 [#uses=1]
- zext i8 %304 to i64 ; <i64>:305 [#uses=2]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:306 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %306, i64 %305 ) ; <i64>:307 [#uses=1]
- xor i64 %307, -1 ; <i64>:308 [#uses=1]
- and i64 %308, %305 ; <i64>:309 [#uses=1]
- store i64 %309, i64* @ul, align 8
- load i8* @uc, align 1 ; <i8>:310 [#uses=1]
- zext i8 %310 to i64 ; <i64>:311 [#uses=2]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:312 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %312, i64 %311 ) ; <i64>:313 [#uses=1]
- xor i64 %313, -1 ; <i64>:314 [#uses=1]
- and i64 %314, %311 ; <i64>:315 [#uses=1]
- store i64 %315, i64* @sll, align 8
- load i8* @uc, align 1 ; <i8>:316 [#uses=1]
- zext i8 %316 to i64 ; <i64>:317 [#uses=2]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:318 [#uses=1]
- call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %318, i64 %317 ) ; <i64>:319 [#uses=1]
- xor i64 %319, -1 ; <i64>:320 [#uses=1]
- and i64 %320, %317 ; <i64>:321 [#uses=1]
- store i64 %321, i64* @ull, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-define void @test_compare_and_swap() nounwind {
-entry:
- load i8* @sc, align 1 ; <i8>:0 [#uses=1]
- zext i8 %0 to i32 ; <i32>:1 [#uses=1]
- load i8* @uc, align 1 ; <i8>:2 [#uses=1]
- zext i8 %2 to i32 ; <i32>:3 [#uses=1]
- trunc i32 %3 to i8 ; <i8>:4 [#uses=1]
- trunc i32 %1 to i8 ; <i8>:5 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; <i8>:6 [#uses=1]
- store i8 %6, i8* @sc, align 1
- load i8* @sc, align 1 ; <i8>:7 [#uses=1]
- zext i8 %7 to i32 ; <i32>:8 [#uses=1]
- load i8* @uc, align 1 ; <i8>:9 [#uses=1]
- zext i8 %9 to i32 ; <i32>:10 [#uses=1]
- trunc i32 %10 to i8 ; <i8>:11 [#uses=1]
- trunc i32 %8 to i8 ; <i8>:12 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; <i8>:13 [#uses=1]
- store i8 %13, i8* @uc, align 1
- load i8* @sc, align 1 ; <i8>:14 [#uses=1]
- sext i8 %14 to i16 ; <i16>:15 [#uses=1]
- zext i16 %15 to i32 ; <i32>:16 [#uses=1]
- load i8* @uc, align 1 ; <i8>:17 [#uses=1]
- zext i8 %17 to i32 ; <i32>:18 [#uses=1]
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:19 [#uses=1]
- trunc i32 %18 to i16 ; <i16>:20 [#uses=1]
- trunc i32 %16 to i16 ; <i16>:21 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; <i16>:22 [#uses=1]
- store i16 %22, i16* @ss, align 2
- load i8* @sc, align 1 ; <i8>:23 [#uses=1]
- sext i8 %23 to i16 ; <i16>:24 [#uses=1]
- zext i16 %24 to i32 ; <i32>:25 [#uses=1]
- load i8* @uc, align 1 ; <i8>:26 [#uses=1]
- zext i8 %26 to i32 ; <i32>:27 [#uses=1]
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:28 [#uses=1]
- trunc i32 %27 to i16 ; <i16>:29 [#uses=1]
- trunc i32 %25 to i16 ; <i16>:30 [#uses=1]
- call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; <i16>:31 [#uses=1]
- store i16 %31, i16* @us, align 2
- load i8* @sc, align 1 ; <i8>:32 [#uses=1]
- sext i8 %32 to i32 ; <i32>:33 [#uses=1]
- load i8* @uc, align 1 ; <i8>:34 [#uses=1]
- zext i8 %34 to i32 ; <i32>:35 [#uses=1]
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:36 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; <i32>:37 [#uses=1]
- store i32 %37, i32* @si, align 4
- load i8* @sc, align 1 ; <i8>:38 [#uses=1]
- sext i8 %38 to i32 ; <i32>:39 [#uses=1]
- load i8* @uc, align 1 ; <i8>:40 [#uses=1]
- zext i8 %40 to i32 ; <i32>:41 [#uses=1]
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:42 [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; <i32>:43 [#uses=1]
- store i32 %43, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:44 [#uses=1]
- sext i8 %44 to i64 ; <i64>:45 [#uses=1]
- load i8* @uc, align 1 ; <i8>:46 [#uses=1]
- zext i8 %46 to i64 ; <i64>:47 [#uses=1]
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:48 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %48, i64 %47, i64 %45 ) ; <i64>:49 [#uses=1]
- store i64 %49, i64* @sl, align 8
- load i8* @sc, align 1 ; <i8>:50 [#uses=1]
- sext i8 %50 to i64 ; <i64>:51 [#uses=1]
- load i8* @uc, align 1 ; <i8>:52 [#uses=1]
- zext i8 %52 to i64 ; <i64>:53 [#uses=1]
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %54, i64 %53, i64 %51 ) ; <i64>:55 [#uses=1]
- store i64 %55, i64* @ul, align 8
- load i8* @sc, align 1 ; <i8>:56 [#uses=1]
- sext i8 %56 to i64 ; <i64>:57 [#uses=1]
- load i8* @uc, align 1 ; <i8>:58 [#uses=1]
- zext i8 %58 to i64 ; <i64>:59 [#uses=1]
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:60 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %60, i64 %59, i64 %57 ) ; <i64>:61 [#uses=1]
- store i64 %61, i64* @sll, align 8
- load i8* @sc, align 1 ; <i8>:62 [#uses=1]
- sext i8 %62 to i64 ; <i64>:63 [#uses=1]
- load i8* @uc, align 1 ; <i8>:64 [#uses=1]
- zext i8 %64 to i64 ; <i64>:65 [#uses=1]
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:66 [#uses=1]
- call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %66, i64 %65, i64 %63 ) ; <i64>:67 [#uses=1]
- store i64 %67, i64* @ull, align 8
- load i8* @sc, align 1 ; <i8>:68 [#uses=1]
- zext i8 %68 to i32 ; <i32>:69 [#uses=1]
- load i8* @uc, align 1 ; <i8>:70 [#uses=1]
- zext i8 %70 to i32 ; <i32>:71 [#uses=1]
- trunc i32 %71 to i8 ; <i8>:72 [#uses=2]
- trunc i32 %69 to i8 ; <i8>:73 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %72, i8 %73 ) ; <i8>:74 [#uses=1]
- icmp eq i8 %74, %72 ; <i1>:75 [#uses=1]
- zext i1 %75 to i8 ; <i8>:76 [#uses=1]
- zext i8 %76 to i32 ; <i32>:77 [#uses=1]
- store i32 %77, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:78 [#uses=1]
- zext i8 %78 to i32 ; <i32>:79 [#uses=1]
- load i8* @uc, align 1 ; <i8>:80 [#uses=1]
- zext i8 %80 to i32 ; <i32>:81 [#uses=1]
- trunc i32 %81 to i8 ; <i8>:82 [#uses=2]
- trunc i32 %79 to i8 ; <i8>:83 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %82, i8 %83 ) ; <i8>:84 [#uses=1]
- icmp eq i8 %84, %82 ; <i1>:85 [#uses=1]
- zext i1 %85 to i8 ; <i8>:86 [#uses=1]
- zext i8 %86 to i32 ; <i32>:87 [#uses=1]
- store i32 %87, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:88 [#uses=1]
- sext i8 %88 to i16 ; <i16>:89 [#uses=1]
- zext i16 %89 to i32 ; <i32>:90 [#uses=1]
- load i8* @uc, align 1 ; <i8>:91 [#uses=1]
- zext i8 %91 to i32 ; <i32>:92 [#uses=1]
- trunc i32 %92 to i8 ; <i8>:93 [#uses=2]
- trunc i32 %90 to i8 ; <i8>:94 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 ) ; <i8>:95 [#uses=1]
- icmp eq i8 %95, %93 ; <i1>:96 [#uses=1]
- zext i1 %96 to i8 ; <i8>:97 [#uses=1]
- zext i8 %97 to i32 ; <i32>:98 [#uses=1]
- store i32 %98, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:99 [#uses=1]
- sext i8 %99 to i16 ; <i16>:100 [#uses=1]
- zext i16 %100 to i32 ; <i32>:101 [#uses=1]
- load i8* @uc, align 1 ; <i8>:102 [#uses=1]
- zext i8 %102 to i32 ; <i32>:103 [#uses=1]
- trunc i32 %103 to i8 ; <i8>:104 [#uses=2]
- trunc i32 %101 to i8 ; <i8>:105 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 ) ; <i8>:106 [#uses=1]
- icmp eq i8 %106, %104 ; <i1>:107 [#uses=1]
- zext i1 %107 to i8 ; <i8>:108 [#uses=1]
- zext i8 %108 to i32 ; <i32>:109 [#uses=1]
- store i32 %109, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:110 [#uses=1]
- sext i8 %110 to i32 ; <i32>:111 [#uses=1]
- load i8* @uc, align 1 ; <i8>:112 [#uses=1]
- zext i8 %112 to i32 ; <i32>:113 [#uses=1]
- trunc i32 %113 to i8 ; <i8>:114 [#uses=2]
- trunc i32 %111 to i8 ; <i8>:115 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 ) ; <i8>:116 [#uses=1]
- icmp eq i8 %116, %114 ; <i1>:117 [#uses=1]
- zext i1 %117 to i8 ; <i8>:118 [#uses=1]
- zext i8 %118 to i32 ; <i32>:119 [#uses=1]
- store i32 %119, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:120 [#uses=1]
- sext i8 %120 to i32 ; <i32>:121 [#uses=1]
- load i8* @uc, align 1 ; <i8>:122 [#uses=1]
- zext i8 %122 to i32 ; <i32>:123 [#uses=1]
- trunc i32 %123 to i8 ; <i8>:124 [#uses=2]
- trunc i32 %121 to i8 ; <i8>:125 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 ) ; <i8>:126 [#uses=1]
- icmp eq i8 %126, %124 ; <i1>:127 [#uses=1]
- zext i1 %127 to i8 ; <i8>:128 [#uses=1]
- zext i8 %128 to i32 ; <i32>:129 [#uses=1]
- store i32 %129, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:130 [#uses=1]
- sext i8 %130 to i64 ; <i64>:131 [#uses=1]
- load i8* @uc, align 1 ; <i8>:132 [#uses=1]
- zext i8 %132 to i64 ; <i64>:133 [#uses=1]
- trunc i64 %133 to i8 ; <i8>:134 [#uses=2]
- trunc i64 %131 to i8 ; <i8>:135 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 ) ; <i8>:136 [#uses=1]
- icmp eq i8 %136, %134 ; <i1>:137 [#uses=1]
- zext i1 %137 to i8 ; <i8>:138 [#uses=1]
- zext i8 %138 to i32 ; <i32>:139 [#uses=1]
- store i32 %139, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:140 [#uses=1]
- sext i8 %140 to i64 ; <i64>:141 [#uses=1]
- load i8* @uc, align 1 ; <i8>:142 [#uses=1]
- zext i8 %142 to i64 ; <i64>:143 [#uses=1]
- trunc i64 %143 to i8 ; <i8>:144 [#uses=2]
- trunc i64 %141 to i8 ; <i8>:145 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 ) ; <i8>:146 [#uses=1]
- icmp eq i8 %146, %144 ; <i1>:147 [#uses=1]
- zext i1 %147 to i8 ; <i8>:148 [#uses=1]
- zext i8 %148 to i32 ; <i32>:149 [#uses=1]
- store i32 %149, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:150 [#uses=1]
- sext i8 %150 to i64 ; <i64>:151 [#uses=1]
- load i8* @uc, align 1 ; <i8>:152 [#uses=1]
- zext i8 %152 to i64 ; <i64>:153 [#uses=1]
- trunc i64 %153 to i8 ; <i8>:154 [#uses=2]
- trunc i64 %151 to i8 ; <i8>:155 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 ) ; <i8>:156 [#uses=1]
- icmp eq i8 %156, %154 ; <i1>:157 [#uses=1]
- zext i1 %157 to i8 ; <i8>:158 [#uses=1]
- zext i8 %158 to i32 ; <i32>:159 [#uses=1]
- store i32 %159, i32* @ui, align 4
- load i8* @sc, align 1 ; <i8>:160 [#uses=1]
- sext i8 %160 to i64 ; <i64>:161 [#uses=1]
- load i8* @uc, align 1 ; <i8>:162 [#uses=1]
- zext i8 %162 to i64 ; <i64>:163 [#uses=1]
- trunc i64 %163 to i8 ; <i8>:164 [#uses=2]
- trunc i64 %161 to i8 ; <i8>:165 [#uses=1]
- call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 ) ; <i8>:166 [#uses=1]
- icmp eq i8 %166, %164 ; <i1>:167 [#uses=1]
- zext i1 %167 to i8 ; <i8>:168 [#uses=1]
- zext i8 %168 to i32 ; <i32>:169 [#uses=1]
- store i32 %169, i32* @ui, align 4
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
-
-declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
-
-declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind
-
-define void @test_lock() nounwind {
-entry:
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1]
- store i8 %0, i8* @sc, align 1
- call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1]
- store i8 %1, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1]
- store i16 %3, i16* @ss, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1]
- call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1]
- store i16 %5, i16* @us, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* @si, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1]
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* @ui, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=1]
- store i64 %11, i64* @sl, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=1]
- store i64 %13, i64* @ul, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %14, i64 1 ) ; <i64>:15 [#uses=1]
- store i64 %15, i64* @sll, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1]
- call i64 @llvm.atomic.swap.i64.p0i64( i64* %16, i64 1 ) ; <i64>:17 [#uses=1]
- store i64 %17, i64* @ull, align 8
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
- volatile store i8 0, i8* @sc, align 1
- volatile store i8 0, i8* @uc, align 1
- bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:18 [#uses=1]
- volatile store i16 0, i16* %18, align 2
- bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:19 [#uses=1]
- volatile store i16 0, i16* %19, align 2
- bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1]
- volatile store i32 0, i32* %20, align 4
- bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:21 [#uses=1]
- volatile store i32 0, i32* %21, align 4
- bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:22 [#uses=1]
- volatile store i64 0, i64* %22, align 8
- bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:23 [#uses=1]
- volatile store i64 0, i64* %23, align 8
- bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:24 [#uses=1]
- volatile store i64 0, i64* %24, align 8
- bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:25 [#uses=1]
- volatile store i64 0, i64* %25, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
-
-declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare i64 @llvm.atomic.swap.i64.p0i64(i64*, i64) nounwind
-
-declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/alloc_loop.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/alloc_loop.ll
deleted file mode 100644
index fb78ba2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/alloc_loop.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc < %s
-
-
-declare i8* @llvm_gc_allocate(i32)
-declare void @llvm_gc_initialize(i32)
-
-declare void @llvm.gcroot(i8**, i8*)
-declare void @llvm.gcwrite(i8*, i8*, i8**)
-
-define i32 @main() gc "shadow-stack" {
-entry:
- %A = alloca i8*
- %B = alloca i8**
-
- call void @llvm_gc_initialize(i32 1048576) ; Start with 1MB heap
-
- ;; void *A;
- call void @llvm.gcroot(i8** %A, i8* null)
-
- ;; A = gcalloc(10);
- %Aptr = call i8* @llvm_gc_allocate(i32 10)
- store i8* %Aptr, i8** %A
-
- ;; void **B;
- %tmp.1 = bitcast i8*** %B to i8**
- call void @llvm.gcroot(i8** %tmp.1, i8* null)
-
- ;; B = gcalloc(4);
- %B.upgrd.1 = call i8* @llvm_gc_allocate(i32 8)
- %tmp.2 = bitcast i8* %B.upgrd.1 to i8**
- store i8** %tmp.2, i8*** %B
-
- ;; *B = A;
- %B.1 = load i8*** %B
- %A.1 = load i8** %A
- call void @llvm.gcwrite(i8* %A.1, i8* %B.upgrd.1, i8** %B.1)
-
- br label %AllocLoop
-
-AllocLoop:
- %i = phi i32 [ 0, %entry ], [ %indvar.next, %AllocLoop ]
- ;; Allocated mem: allocated memory is immediately dead.
- call i8* @llvm_gc_allocate(i32 100)
-
- %indvar.next = add i32 %i, 1
- %exitcond = icmp eq i32 %indvar.next, 10000000
- br i1 %exitcond, label %Exit, label %AllocLoop
-
-Exit:
- ret i32 0
-}
-
-declare void @__main()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/argpromotion.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/argpromotion.ll
deleted file mode 100644
index 18f6155..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/argpromotion.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: opt < %s -argpromotion
-; XFAIL: *
-
-declare void @llvm.gcroot(i8**, i8*)
-
-define i32 @g() {
-entry:
- %var = alloca i32
- store i32 1, i32* %var
- %x = call i32 @f(i32* %var)
- ret i32 %x
-}
-
-define internal i32 @f(i32* %xp) gc "example" {
-entry:
- %var = alloca i8*
- call void @llvm.gcroot(i8** %var, i8* null)
- %x = load i32* %xp
- ret i32 %x
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/badreadproto.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/badreadproto.ll
deleted file mode 100644
index 4fe90b9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/badreadproto.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
- %list = type { i32, %list* }
-
-; This usage is invalid now; instead, objects must be bitcast to i8* for input
-; to the gc intrinsics.
-declare %list* @llvm.gcread(%list*, %list**)
-
-define %list* @tl(%list* %l) gc "example" {
- %hd.ptr = getelementptr %list* %l, i32 0, i32 0
- %hd = call %list* @llvm.gcread(%list* %l, %list** %hd.ptr)
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/badrootproto.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/badrootproto.ll
deleted file mode 100644
index ff86d03..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/badrootproto.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
- %list = type { i32, %list* }
- %meta = type opaque
-
-; This usage is invalid now; instead, objects must be bitcast to i8* for input
-; to the gc intrinsics.
-declare void @llvm.gcroot(%list*, %meta*)
-
-define void @root() gc "example" {
- %x.var = alloca i8*
- call void @llvm.gcroot(i8** %x.var, %meta* null)
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/badwriteproto.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/badwriteproto.ll
deleted file mode 100644
index be81f84..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/badwriteproto.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
- %list = type { i32, %list* }
-
-; This usage is invalid now; instead, objects must be bitcast to i8* for input
-; to the gc intrinsics.
-declare void @llvm.gcwrite(%list*, %list*, %list**)
-
-define %list* @cons(i32 %hd, %list* %tl) gc "example" {
- %tmp = call i8* @gcalloc(i32 bitcast(%list* getelementptr(%list* null, i32 1) to i32))
- %cell = bitcast i8* %tmp to %list*
-
- %hd.ptr = getelementptr %list* %cell, i32 0, i32 0
- store i32 %hd, i32* %hd.ptr
-
- %tl.ptr = getelementptr %list* %cell, i32 0, i32 0
- call void @llvm.gcwrite(%list* %tl, %list* %cell, %list** %tl.ptr)
-
- ret %cell.2
-}
-
-declare i8* @gcalloc(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/deadargelim.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/deadargelim.ll
deleted file mode 100644
index b057030..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/deadargelim.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: opt < %s -deadargelim
-; XFAIL: *
-
-declare void @llvm.gcroot(i8**, i8*)
-
-define void @g() {
-entry:
- call void @f(i32 0)
- ret void
-}
-
-define internal void @f(i32 %unused) gc "example" {
-entry:
- %var = alloca i8*
- call void @llvm.gcroot(i8** %var, i8* null)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/dg.exp b/libclamav/c++/llvm/test/CodeGen/X86/GC/dg.exp
deleted file mode 100644
index 1f48ada..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/dg.exp
+++ /dev/null
@@ -1,5 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target X86] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/fat.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/fat.ll
deleted file mode 100644
index d05ca3d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/fat.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
-declare void @llvm.gcroot(i8**, i8*) nounwind
-
-define void @f() gc "x" {
- %st = alloca { i8*, i1 } ; <{ i8*, i1 }*> [#uses=1]
- %st_ptr = bitcast { i8*, i1 }* %st to i8** ; <i8**> [#uses=1]
- call void @llvm.gcroot(i8** %st_ptr, i8* null)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/inline.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/inline.ll
deleted file mode 100644
index bd6d378..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/inline.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: opt < %s -inline -S | grep example
-; XFAIL: *
-
- %IntArray = type { i32, [0 x i32*] }
-
-declare void @llvm.gcroot(i8**, i8*) nounwind
-
-define i32 @f() {
- %x = call i32 @g( ) ; <i32> [#uses=1]
- ret i32 %x
-}
-
-define internal i32 @g() gc "example" {
- %root = alloca i8* ; <i8**> [#uses=2]
- call void @llvm.gcroot( i8** %root, i8* null )
- %obj = call %IntArray* @h( ) ; <%IntArray*> [#uses=2]
- %obj.2 = bitcast %IntArray* %obj to i8* ; <i8*> [#uses=1]
- store i8* %obj.2, i8** %root
- %Length.ptr = getelementptr %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
- %Length = load i32* %Length.ptr ; <i32> [#uses=1]
- ret i32 %Length
-}
-
-declare %IntArray* @h()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/inline2.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/inline2.ll
deleted file mode 100644
index 127bbdf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/inline2.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: opt < %s -inline -S | grep sample
-; RUN: opt < %s -inline -S | grep example
-; XFAIL: *
-
- %IntArray = type { i32, [0 x i32*] }
-
-declare void @llvm.gcroot(i8**, i8*) nounwind
-
-define i32 @f() gc "sample" {
- %x = call i32 @g( ) ; <i32> [#uses=1]
- ret i32 %x
-}
-
-define internal i32 @g() gc "example" {
- %root = alloca i8* ; <i8**> [#uses=2]
- call void @llvm.gcroot( i8** %root, i8* null )
- %obj = call %IntArray* @h( ) ; <%IntArray*> [#uses=2]
- %obj.2 = bitcast %IntArray* %obj to i8* ; <i8*> [#uses=1]
- store i8* %obj.2, i8** %root
- %Length.ptr = getelementptr %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
- %Length = load i32* %Length.ptr ; <i32> [#uses=1]
- ret i32 %Length
-}
-
-declare %IntArray* @h()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/lower_gcroot.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/lower_gcroot.ll
deleted file mode 100644
index c2d418a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/lower_gcroot.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-
- %Env = type i8*
-
-define void @.main(%Env) gc "shadow-stack" {
- %Root = alloca %Env
- call void @llvm.gcroot( %Env* %Root, %Env null )
- unreachable
-}
-
-declare void @llvm.gcroot(%Env*, %Env)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/outside.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/outside.ll
deleted file mode 100644
index 2968c69..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/outside.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
-declare void @llvm.gcroot(i8**, i8*)
-
-define void @f(i8* %x) {
- %root = alloca i8*
- call void @llvm.gcroot(i8** %root, i8* null)
- store i8* %x, i8** %root
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/GC/simple_ocaml.ll b/libclamav/c++/llvm/test/CodeGen/X86/GC/simple_ocaml.ll
deleted file mode 100644
index f765dc0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/GC/simple_ocaml.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s | grep caml.*__frametable
-; RUN: llc < %s -march=x86 | grep {movl .0}
-
-%struct.obj = type { i8*, %struct.obj* }
-
-define %struct.obj* @fun(%struct.obj* %head) gc "ocaml" {
-entry:
- %gcroot.0 = alloca i8*
- %gcroot.1 = alloca i8*
-
- call void @llvm.gcroot(i8** %gcroot.0, i8* null)
- call void @llvm.gcroot(i8** %gcroot.1, i8* null)
-
- %local.0 = bitcast i8** %gcroot.0 to %struct.obj**
- %local.1 = bitcast i8** %gcroot.1 to %struct.obj**
-
- store %struct.obj* %head, %struct.obj** %local.0
- br label %bb.loop
-bb.loop:
- %t0 = load %struct.obj** %local.0
- %t1 = getelementptr %struct.obj* %t0, i32 0, i32 1
- %t2 = bitcast %struct.obj* %t0 to i8*
- %t3 = bitcast %struct.obj** %t1 to i8**
- %t4 = call i8* @llvm.gcread(i8* %t2, i8** %t3)
- %t5 = bitcast i8* %t4 to %struct.obj*
- %t6 = icmp eq %struct.obj* %t5, null
- br i1 %t6, label %bb.loop, label %bb.end
-bb.end:
- %t7 = malloc %struct.obj
- store %struct.obj* %t7, %struct.obj** %local.1
- %t8 = bitcast %struct.obj* %t7 to i8*
- %t9 = load %struct.obj** %local.0
- %t10 = getelementptr %struct.obj* %t9, i32 0, i32 1
- %t11 = bitcast %struct.obj* %t9 to i8*
- %t12 = bitcast %struct.obj** %t10 to i8**
- call void @llvm.gcwrite(i8* %t8, i8* %t11, i8** %t12)
- ret %struct.obj* %t7
-}
-
-declare void @llvm.gcroot(i8** %value, i8* %tag)
-declare void @llvm.gcwrite(i8* %value, i8* %obj, i8** %field)
-declare i8* @llvm.gcread(i8* %obj, i8** %field)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/SwitchLowering.ll b/libclamav/c++/llvm/test/CodeGen/X86/SwitchLowering.ll
deleted file mode 100644
index 29a0e82..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/SwitchLowering.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=x86 | grep cmp | count 1
-; PR964
-
-define i8* @FindChar(i8* %CurPtr) {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
- %CurPtr_addr.0.rec = bitcast i32 %indvar to i32 ; <i32> [#uses=1]
- %gep.upgrd.1 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %CurPtr_addr.0 = getelementptr i8* %CurPtr, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
- %tmp = load i8* %CurPtr_addr.0 ; <i8> [#uses=3]
- %tmp2.rec = add i32 %CurPtr_addr.0.rec, 1 ; <i32> [#uses=1]
- %tmp2 = getelementptr i8* %CurPtr, i32 %tmp2.rec ; <i8*> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- switch i8 %tmp, label %bb [
- i8 0, label %bb7
- i8 120, label %bb7
- ]
-
-bb7: ; preds = %bb, %bb
- tail call void @foo( i8 %tmp )
- ret i8* %tmp2
-}
-
-declare void @foo(i8)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/abi-isel.ll b/libclamav/c++/llvm/test/CodeGen/X86/abi-isel.ll
deleted file mode 100644
index 9208738..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/abi-isel.ll
+++ /dev/null
@@ -1,9646 +0,0 @@
-; RUN: llc < %s -asm-verbose=0 -mtriple=i686-unknown-linux-gnu -march=x86 -relocation-model=static -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=LINUX-32-STATIC
-; RUN: llc < %s -asm-verbose=0 -mtriple=i686-unknown-linux-gnu -march=x86 -relocation-model=static -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=LINUX-32-PIC
-
-; RUN: llc < %s -asm-verbose=0 -mtriple=x86_64-unknown-linux-gnu -march=x86-64 -relocation-model=static -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=LINUX-64-STATIC
-; RUN: llc < %s -asm-verbose=0 -mtriple=x86_64-unknown-linux-gnu -march=x86-64 -relocation-model=pic -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=LINUX-64-PIC
-
-; RUN: llc < %s -asm-verbose=0 -mtriple=i686-apple-darwin -march=x86 -relocation-model=static -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=DARWIN-32-STATIC
-; RUN: llc < %s -asm-verbose=0 -mtriple=i686-apple-darwin -march=x86 -relocation-model=dynamic-no-pic -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=DARWIN-32-DYNAMIC
-; RUN: llc < %s -asm-verbose=0 -mtriple=i686-apple-darwin -march=x86 -relocation-model=pic -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=DARWIN-32-PIC
-
-; RUN: llc < %s -asm-verbose=0 -mtriple=x86_64-apple-darwin -march=x86-64 -relocation-model=static -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=DARWIN-64-STATIC
-; RUN: llc < %s -asm-verbose=0 -mtriple=x86_64-apple-darwin -march=x86-64 -relocation-model=dynamic-no-pic -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=DARWIN-64-DYNAMIC
-; RUN: llc < %s -asm-verbose=0 -mtriple=x86_64-apple-darwin -march=x86-64 -relocation-model=pic -code-model=small -post-RA-scheduler=false | FileCheck %s -check-prefix=DARWIN-64-PIC
-
- at src = external global [131072 x i32]
- at dst = external global [131072 x i32]
- at xsrc = external global [32 x i32]
- at xdst = external global [32 x i32]
- at ptr = external global i32*
- at dsrc = global [131072 x i32] zeroinitializer, align 32
- at ddst = global [131072 x i32] zeroinitializer, align 32
- at dptr = global i32* null
- at lsrc = internal global [131072 x i32] zeroinitializer
- at ldst = internal global [131072 x i32] zeroinitializer
- at lptr = internal global i32* null
- at ifunc = external global void ()*
- at difunc = global void ()* null
- at lifunc = internal global void ()* null
- at lxsrc = internal global [32 x i32] zeroinitializer, align 32
- at lxdst = internal global [32 x i32] zeroinitializer, align 32
- at dxsrc = global [32 x i32] zeroinitializer, align 32
- at dxdst = global [32 x i32] zeroinitializer, align 32
-
-define void @foo00() nounwind {
-entry:
- %0 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
- store i32 %0, i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 0), align 4
- ret void
-
-; LINUX-64-STATIC: foo00:
-; LINUX-64-STATIC: movl src(%rip), %eax
-; LINUX-64-STATIC: movl %eax, dst
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: foo00:
-; LINUX-32-STATIC: movl src, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, dst
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: foo00:
-; LINUX-32-PIC: movl src, %eax
-; LINUX-32-PIC-NEXT: movl %eax, dst
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: foo00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax), %eax
-; LINUX-64-PIC-NEXT: movq dst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _foo00:
-; DARWIN-32-STATIC: movl _src, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _dst
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _foo00:
-; DARWIN-32-DYNAMIC: movl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl (%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _foo00:
-; DARWIN-32-PIC: call L1$pb
-; DARWIN-32-PIC-NEXT: L1$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L1$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl (%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L1$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _foo00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _foo00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _foo00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @fxo00() nounwind {
-entry:
- %0 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
- store i32 %0, i32* getelementptr ([32 x i32]* @xdst, i32 0, i64 0), align 4
- ret void
-
-; LINUX-64-STATIC: fxo00:
-; LINUX-64-STATIC: movl xsrc(%rip), %eax
-; LINUX-64-STATIC: movl %eax, xdst
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: fxo00:
-; LINUX-32-STATIC: movl xsrc, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, xdst
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: fxo00:
-; LINUX-32-PIC: movl xsrc, %eax
-; LINUX-32-PIC-NEXT: movl %eax, xdst
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: fxo00:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax), %eax
-; LINUX-64-PIC-NEXT: movq xdst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _fxo00:
-; DARWIN-32-STATIC: movl _xsrc, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _xdst
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _fxo00:
-; DARWIN-32-DYNAMIC: movl L_xsrc$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl (%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xdst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _fxo00:
-; DARWIN-32-PIC: call L2$pb
-; DARWIN-32-PIC-NEXT: L2$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L2$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl (%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L2$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _fxo00:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _fxo00:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _fxo00:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @foo01() nounwind {
-entry:
- store i32* getelementptr ([131072 x i32]* @dst, i32 0, i32 0), i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: foo01:
-; LINUX-64-STATIC: movq $dst, ptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: foo01:
-; LINUX-32-STATIC: movl $dst, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: foo01:
-; LINUX-32-PIC: movl $dst, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: foo01:
-; LINUX-64-PIC: movq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _foo01:
-; DARWIN-32-STATIC: movl $_dst, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _foo01:
-; DARWIN-32-DYNAMIC: movl L_dst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _foo01:
-; DARWIN-32-PIC: call L3$pb
-; DARWIN-32-PIC-NEXT: L3$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L3$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L3$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _foo01:
-; DARWIN-64-STATIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _foo01:
-; DARWIN-64-DYNAMIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _foo01:
-; DARWIN-64-PIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @fxo01() nounwind {
-entry:
- store i32* getelementptr ([32 x i32]* @xdst, i32 0, i32 0), i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: fxo01:
-; LINUX-64-STATIC: movq $xdst, ptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: fxo01:
-; LINUX-32-STATIC: movl $xdst, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: fxo01:
-; LINUX-32-PIC: movl $xdst, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: fxo01:
-; LINUX-64-PIC: movq xdst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _fxo01:
-; DARWIN-32-STATIC: movl $_xdst, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _fxo01:
-; DARWIN-32-DYNAMIC: movl L_xdst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _fxo01:
-; DARWIN-32-PIC: call L4$pb
-; DARWIN-32-PIC-NEXT: L4$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L4$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L4$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _fxo01:
-; DARWIN-64-STATIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _fxo01:
-; DARWIN-64-DYNAMIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _fxo01:
-; DARWIN-64-PIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @foo02() nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
- store i32 %1, i32* %0, align 4
- ret void
-; LINUX-64-STATIC: foo02:
-; LINUX-64-STATIC: movl src(%rip), %
-; LINUX-64-STATIC: movq ptr(%rip), %
-; LINUX-64-STATIC: movl
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: foo02:
-; LINUX-32-STATIC: movl src, %eax
-; LINUX-32-STATIC-NEXT: movl ptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, (%ecx)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: foo02:
-; LINUX-32-PIC: movl src, %eax
-; LINUX-32-PIC-NEXT: movl ptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, (%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: foo02:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _foo02:
-; DARWIN-32-STATIC: movl _src, %eax
-; DARWIN-32-STATIC-NEXT: movl _ptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _foo02:
-; DARWIN-32-DYNAMIC: movl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl (%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl (%ecx), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _foo02:
-; DARWIN-32-PIC: call L5$pb
-; DARWIN-32-PIC-NEXT: L5$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L5$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl (%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L5$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _foo02:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _foo02:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _foo02:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @fxo02() nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
- store i32 %1, i32* %0, align 4
-; LINUX-64-STATIC: fxo02:
-; LINUX-64-STATIC: movl xsrc(%rip), %
-; LINUX-64-STATIC: movq ptr(%rip), %
-; LINUX-64-STATIC: movl
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: fxo02:
-; LINUX-32-STATIC: movl xsrc, %eax
-; LINUX-32-STATIC-NEXT: movl ptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, (%ecx)
-; LINUX-32-STATIC-NEXT: ret
- ret void
-
-; LINUX-32-PIC: fxo02:
-; LINUX-32-PIC: movl xsrc, %eax
-; LINUX-32-PIC-NEXT: movl ptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, (%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: fxo02:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _fxo02:
-; DARWIN-32-STATIC: movl _xsrc, %eax
-; DARWIN-32-STATIC-NEXT: movl _ptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _fxo02:
-; DARWIN-32-DYNAMIC: movl L_xsrc$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl (%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl (%ecx), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _fxo02:
-; DARWIN-32-PIC: call L6$pb
-; DARWIN-32-PIC-NEXT: L6$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L6$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl (%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L6$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _fxo02:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _fxo02:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _fxo02:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @foo03() nounwind {
-entry:
- %0 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
- store i32 %0, i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 0), align 32
- ret void
-; LINUX-64-STATIC: foo03:
-; LINUX-64-STATIC: movl dsrc(%rip), %eax
-; LINUX-64-STATIC: movl %eax, ddst
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: foo03:
-; LINUX-32-STATIC: movl dsrc, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ddst
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: foo03:
-; LINUX-32-PIC: movl dsrc, %eax
-; LINUX-32-PIC-NEXT: movl %eax, ddst
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: foo03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax), %eax
-; LINUX-64-PIC-NEXT: movq ddst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _foo03:
-; DARWIN-32-STATIC: movl _dsrc, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ddst
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _foo03:
-; DARWIN-32-DYNAMIC: movl _dsrc, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _ddst
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _foo03:
-; DARWIN-32-PIC: call L7$pb
-; DARWIN-32-PIC-NEXT: L7$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl _dsrc-L7$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _ddst-L7$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _foo03:
-; DARWIN-64-STATIC: movl _dsrc(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movl %eax, _ddst(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _foo03:
-; DARWIN-64-DYNAMIC: movl _dsrc(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, _ddst(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _foo03:
-; DARWIN-64-PIC: movl _dsrc(%rip), %eax
-; DARWIN-64-PIC-NEXT: movl %eax, _ddst(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @foo04() nounwind {
-entry:
- store i32* getelementptr ([131072 x i32]* @ddst, i32 0, i32 0), i32** @dptr, align 8
- ret void
-; LINUX-64-STATIC: foo04:
-; LINUX-64-STATIC: movq $ddst, dptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: foo04:
-; LINUX-32-STATIC: movl $ddst, dptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: foo04:
-; LINUX-32-PIC: movl $ddst, dptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: foo04:
-; LINUX-64-PIC: movq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _foo04:
-; DARWIN-32-STATIC: movl $_ddst, _dptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _foo04:
-; DARWIN-32-DYNAMIC: movl $_ddst, _dptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _foo04:
-; DARWIN-32-PIC: call L8$pb
-; DARWIN-32-PIC-NEXT: L8$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _ddst-L8$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _dptr-L8$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _foo04:
-; DARWIN-64-STATIC: leaq _ddst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _foo04:
-; DARWIN-64-DYNAMIC: leaq _ddst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _foo04:
-; DARWIN-64-PIC: leaq _ddst(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @foo05() nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
- store i32 %1, i32* %0, align 4
- ret void
-; LINUX-64-STATIC: foo05:
-; LINUX-64-STATIC: movl dsrc(%rip), %
-; LINUX-64-STATIC: movq dptr(%rip), %
-; LINUX-64-STATIC: movl
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: foo05:
-; LINUX-32-STATIC: movl dsrc, %eax
-; LINUX-32-STATIC-NEXT: movl dptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, (%ecx)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: foo05:
-; LINUX-32-PIC: movl dsrc, %eax
-; LINUX-32-PIC-NEXT: movl dptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, (%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: foo05:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax), %eax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _foo05:
-; DARWIN-32-STATIC: movl _dsrc, %eax
-; DARWIN-32-STATIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _foo05:
-; DARWIN-32-DYNAMIC: movl _dsrc, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _foo05:
-; DARWIN-32-PIC: call L9$pb
-; DARWIN-32-PIC-NEXT: L9$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl _dsrc-L9$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl _dptr-L9$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _foo05:
-; DARWIN-64-STATIC: movl _dsrc(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _foo05:
-; DARWIN-64-DYNAMIC: movl _dsrc(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _foo05:
-; DARWIN-64-PIC: movl _dsrc(%rip), %eax
-; DARWIN-64-PIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @foo06() nounwind {
-entry:
- %0 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
- store i32 %0, i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 0), align 4
- ret void
-; LINUX-64-STATIC: foo06:
-; LINUX-64-STATIC: movl lsrc(%rip), %eax
-; LINUX-64-STATIC: movl %eax, ldst(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: foo06:
-; LINUX-32-STATIC: movl lsrc, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ldst
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: foo06:
-; LINUX-32-PIC: movl lsrc, %eax
-; LINUX-32-PIC-NEXT: movl %eax, ldst
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: foo06:
-; LINUX-64-PIC: movl lsrc(%rip), %eax
-; LINUX-64-PIC-NEXT: movl %eax, ldst(%rip)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _foo06:
-; DARWIN-32-STATIC: movl _lsrc, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ldst
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _foo06:
-; DARWIN-32-DYNAMIC: movl _lsrc, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _ldst
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _foo06:
-; DARWIN-32-PIC: call L10$pb
-; DARWIN-32-PIC-NEXT: L10$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl _lsrc-L10$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _ldst-L10$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _foo06:
-; DARWIN-64-STATIC: movl _lsrc(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movl %eax, _ldst(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _foo06:
-; DARWIN-64-DYNAMIC: movl _lsrc(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, _ldst(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _foo06:
-; DARWIN-64-PIC: movl _lsrc(%rip), %eax
-; DARWIN-64-PIC-NEXT: movl %eax, _ldst(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @foo07() nounwind {
-entry:
- store i32* getelementptr ([131072 x i32]* @ldst, i32 0, i32 0), i32** @lptr, align 8
- ret void
-; LINUX-64-STATIC: foo07:
-; LINUX-64-STATIC: movq $ldst, lptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: foo07:
-; LINUX-32-STATIC: movl $ldst, lptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: foo07:
-; LINUX-32-PIC: movl $ldst, lptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: foo07:
-; LINUX-64-PIC: leaq ldst(%rip), %rax
-; LINUX-64-PIC-NEXT: movq %rax, lptr(%rip)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _foo07:
-; DARWIN-32-STATIC: movl $_ldst, _lptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _foo07:
-; DARWIN-32-DYNAMIC: movl $_ldst, _lptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _foo07:
-; DARWIN-32-PIC: call L11$pb
-; DARWIN-32-PIC-NEXT: L11$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _ldst-L11$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _lptr-L11$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _foo07:
-; DARWIN-64-STATIC: leaq _ldst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _foo07:
-; DARWIN-64-DYNAMIC: leaq _ldst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _foo07:
-; DARWIN-64-PIC: leaq _ldst(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @foo08() nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
- store i32 %1, i32* %0, align 4
- ret void
-; LINUX-64-STATIC: foo08:
-; LINUX-64-STATIC: movl lsrc(%rip), %
-; LINUX-64-STATIC: movq lptr(%rip), %
-; LINUX-64-STATIC: movl
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: foo08:
-; LINUX-32-STATIC: movl lsrc, %eax
-; LINUX-32-STATIC-NEXT: movl lptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, (%ecx)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: foo08:
-; LINUX-32-PIC: movl lsrc, %eax
-; LINUX-32-PIC-NEXT: movl lptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, (%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: foo08:
-; LINUX-64-PIC: movl lsrc(%rip), %eax
-; LINUX-64-PIC-NEXT: movq lptr(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _foo08:
-; DARWIN-32-STATIC: movl _lsrc, %eax
-; DARWIN-32-STATIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _foo08:
-; DARWIN-32-DYNAMIC: movl _lsrc, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _foo08:
-; DARWIN-32-PIC: call L12$pb
-; DARWIN-32-PIC-NEXT: L12$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl _lsrc-L12$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl _lptr-L12$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _foo08:
-; DARWIN-64-STATIC: movl _lsrc(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _foo08:
-; DARWIN-64-DYNAMIC: movl _lsrc(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _foo08:
-; DARWIN-64-PIC: movl _lsrc(%rip), %eax
-; DARWIN-64-PIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qux00() nounwind {
-entry:
- %0 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
- store i32 %0, i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 16), align 4
- ret void
-; LINUX-64-STATIC: qux00:
-; LINUX-64-STATIC: movl src+64(%rip), %eax
-; LINUX-64-STATIC: movl %eax, dst+64(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qux00:
-; LINUX-32-STATIC: movl src+64, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, dst+64
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: qux00:
-; LINUX-32-PIC: movl src+64, %eax
-; LINUX-32-PIC-NEXT: movl %eax, dst+64
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qux00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax), %eax
-; LINUX-64-PIC-NEXT: movq dst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qux00:
-; DARWIN-32-STATIC: movl _src+64, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _dst+64
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qux00:
-; DARWIN-32-DYNAMIC: movl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl 64(%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qux00:
-; DARWIN-32-PIC: call L13$pb
-; DARWIN-32-PIC-NEXT: L13$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L13$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl 64(%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L13$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 64(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qux00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qux00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qux00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qxx00() nounwind {
-entry:
- %0 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
- store i32 %0, i32* getelementptr ([32 x i32]* @xdst, i32 0, i64 16), align 4
- ret void
-; LINUX-64-STATIC: qxx00:
-; LINUX-64-STATIC: movl xsrc+64(%rip), %eax
-; LINUX-64-STATIC: movl %eax, xdst+64(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qxx00:
-; LINUX-32-STATIC: movl xsrc+64, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, xdst+64
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: qxx00:
-; LINUX-32-PIC: movl xsrc+64, %eax
-; LINUX-32-PIC-NEXT: movl %eax, xdst+64
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qxx00:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax), %eax
-; LINUX-64-PIC-NEXT: movq xdst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qxx00:
-; DARWIN-32-STATIC: movl _xsrc+64, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _xdst+64
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qxx00:
-; DARWIN-32-DYNAMIC: movl L_xsrc$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl 64(%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xdst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qxx00:
-; DARWIN-32-PIC: call L14$pb
-; DARWIN-32-PIC-NEXT: L14$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L14$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl 64(%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L14$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 64(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qxx00:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qxx00:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qxx00:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qux01() nounwind {
-entry:
- store i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 16), i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: qux01:
-; LINUX-64-STATIC: movq $dst+64, ptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qux01:
-; LINUX-32-STATIC: movl $dst+64, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: qux01:
-; LINUX-32-PIC: movl $dst+64, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qux01:
-; LINUX-64-PIC: movq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qux01:
-; DARWIN-32-STATIC: movl $_dst+64, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qux01:
-; DARWIN-32-DYNAMIC: movl L_dst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl $64, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qux01:
-; DARWIN-32-PIC: call L15$pb
-; DARWIN-32-PIC-NEXT: L15$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L15$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: addl $64, %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L15$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qux01:
-; DARWIN-64-STATIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: addq $64, %rax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qux01:
-; DARWIN-64-DYNAMIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: addq $64, %rax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qux01:
-; DARWIN-64-PIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: addq $64, %rax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qxx01() nounwind {
-entry:
- store i32* getelementptr ([32 x i32]* @xdst, i32 0, i64 16), i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: qxx01:
-; LINUX-64-STATIC: movq $xdst+64, ptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qxx01:
-; LINUX-32-STATIC: movl $xdst+64, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: qxx01:
-; LINUX-32-PIC: movl $xdst+64, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qxx01:
-; LINUX-64-PIC: movq xdst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qxx01:
-; DARWIN-32-STATIC: movl $_xdst+64, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qxx01:
-; DARWIN-32-DYNAMIC: movl L_xdst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl $64, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qxx01:
-; DARWIN-32-PIC: call L16$pb
-; DARWIN-32-PIC-NEXT: L16$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L16$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: addl $64, %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L16$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qxx01:
-; DARWIN-64-STATIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: addq $64, %rax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qxx01:
-; DARWIN-64-DYNAMIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: addq $64, %rax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qxx01:
-; DARWIN-64-PIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: addq $64, %rax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qux02() nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
- %2 = getelementptr i32* %0, i64 16
- store i32 %1, i32* %2, align 4
-; LINUX-64-STATIC: qux02:
-; LINUX-64-STATIC: movl src+64(%rip), %eax
-; LINUX-64-STATIC: movq ptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 64(%rcx)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qux02:
-; LINUX-32-STATIC: movl src+64, %eax
-; LINUX-32-STATIC-NEXT: movl ptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, 64(%ecx)
-; LINUX-32-STATIC-NEXT: ret
- ret void
-
-; LINUX-32-PIC: qux02:
-; LINUX-32-PIC: movl src+64, %eax
-; LINUX-32-PIC-NEXT: movl ptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, 64(%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qux02:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qux02:
-; DARWIN-32-STATIC: movl _src+64, %eax
-; DARWIN-32-STATIC-NEXT: movl _ptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qux02:
-; DARWIN-32-DYNAMIC: movl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl 64(%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl (%ecx), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qux02:
-; DARWIN-32-PIC: call L17$pb
-; DARWIN-32-PIC-NEXT: L17$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L17$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl 64(%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L17$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 64(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qux02:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qux02:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qux02:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qxx02() nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
- %2 = getelementptr i32* %0, i64 16
- store i32 %1, i32* %2, align 4
-; LINUX-64-STATIC: qxx02:
-; LINUX-64-STATIC: movl xsrc+64(%rip), %eax
-; LINUX-64-STATIC: movq ptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 64(%rcx)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qxx02:
-; LINUX-32-STATIC: movl xsrc+64, %eax
-; LINUX-32-STATIC-NEXT: movl ptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, 64(%ecx)
-; LINUX-32-STATIC-NEXT: ret
- ret void
-
-; LINUX-32-PIC: qxx02:
-; LINUX-32-PIC: movl xsrc+64, %eax
-; LINUX-32-PIC-NEXT: movl ptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, 64(%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qxx02:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qxx02:
-; DARWIN-32-STATIC: movl _xsrc+64, %eax
-; DARWIN-32-STATIC-NEXT: movl _ptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qxx02:
-; DARWIN-32-DYNAMIC: movl L_xsrc$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl 64(%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl (%ecx), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qxx02:
-; DARWIN-32-PIC: call L18$pb
-; DARWIN-32-PIC-NEXT: L18$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L18$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl 64(%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L18$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 64(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qxx02:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qxx02:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qxx02:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qux03() nounwind {
-entry:
- %0 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
- store i32 %0, i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 16), align 32
- ret void
-; LINUX-64-STATIC: qux03:
-; LINUX-64-STATIC: movl dsrc+64(%rip), %eax
-; LINUX-64-STATIC: movl %eax, ddst+64(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qux03:
-; LINUX-32-STATIC: movl dsrc+64, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ddst+64
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: qux03:
-; LINUX-32-PIC: movl dsrc+64, %eax
-; LINUX-32-PIC-NEXT: movl %eax, ddst+64
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qux03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax), %eax
-; LINUX-64-PIC-NEXT: movq ddst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qux03:
-; DARWIN-32-STATIC: movl _dsrc+64, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ddst+64
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qux03:
-; DARWIN-32-DYNAMIC: movl _dsrc+64, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _ddst+64
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qux03:
-; DARWIN-32-PIC: call L19$pb
-; DARWIN-32-PIC-NEXT: L19$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl (_dsrc-L19$pb)+64(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, (_ddst-L19$pb)+64(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qux03:
-; DARWIN-64-STATIC: movl _dsrc+64(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movl %eax, _ddst+64(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qux03:
-; DARWIN-64-DYNAMIC: movl _dsrc+64(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, _ddst+64(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qux03:
-; DARWIN-64-PIC: movl _dsrc+64(%rip), %eax
-; DARWIN-64-PIC-NEXT: movl %eax, _ddst+64(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qux04() nounwind {
-entry:
- store i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 16), i32** @dptr, align 8
- ret void
-; LINUX-64-STATIC: qux04:
-; LINUX-64-STATIC: movq $ddst+64, dptr(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qux04:
-; LINUX-32-STATIC: movl $ddst+64, dptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: qux04:
-; LINUX-32-PIC: movl $ddst+64, dptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qux04:
-; LINUX-64-PIC: movq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qux04:
-; DARWIN-32-STATIC: movl $_ddst+64, _dptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qux04:
-; DARWIN-32-DYNAMIC: movl $_ddst+64, _dptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qux04:
-; DARWIN-32-PIC: call L20$pb
-; DARWIN-32-PIC-NEXT: L20$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_ddst-L20$pb)+64(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _dptr-L20$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qux04:
-; DARWIN-64-STATIC: leaq _ddst+64(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qux04:
-; DARWIN-64-DYNAMIC: leaq _ddst+64(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qux04:
-; DARWIN-64-PIC: leaq _ddst+64(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qux05() nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
- %2 = getelementptr i32* %0, i64 16
- store i32 %1, i32* %2, align 4
-; LINUX-64-STATIC: qux05:
-; LINUX-64-STATIC: movl dsrc+64(%rip), %eax
-; LINUX-64-STATIC: movq dptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 64(%rcx)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qux05:
-; LINUX-32-STATIC: movl dsrc+64, %eax
-; LINUX-32-STATIC-NEXT: movl dptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, 64(%ecx)
-; LINUX-32-STATIC-NEXT: ret
- ret void
-
-; LINUX-32-PIC: qux05:
-; LINUX-32-PIC: movl dsrc+64, %eax
-; LINUX-32-PIC-NEXT: movl dptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, 64(%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qux05:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax), %eax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qux05:
-; DARWIN-32-STATIC: movl _dsrc+64, %eax
-; DARWIN-32-STATIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qux05:
-; DARWIN-32-DYNAMIC: movl _dsrc+64, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qux05:
-; DARWIN-32-PIC: call L21$pb
-; DARWIN-32-PIC-NEXT: L21$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl (_dsrc-L21$pb)+64(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl _dptr-L21$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 64(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qux05:
-; DARWIN-64-STATIC: movl _dsrc+64(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qux05:
-; DARWIN-64-DYNAMIC: movl _dsrc+64(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qux05:
-; DARWIN-64-PIC: movl _dsrc+64(%rip), %eax
-; DARWIN-64-PIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qux06() nounwind {
-entry:
- %0 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
- store i32 %0, i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 16), align 4
- ret void
-; LINUX-64-STATIC: qux06:
-; LINUX-64-STATIC: movl lsrc+64(%rip), %eax
-; LINUX-64-STATIC: movl %eax, ldst+64
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qux06:
-; LINUX-32-STATIC: movl lsrc+64, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ldst+64
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: qux06:
-; LINUX-32-PIC: movl lsrc+64, %eax
-; LINUX-32-PIC-NEXT: movl %eax, ldst+64
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qux06:
-; LINUX-64-PIC: movl lsrc+64(%rip), %eax
-; LINUX-64-PIC-NEXT: movl %eax, ldst+64(%rip)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qux06:
-; DARWIN-32-STATIC: movl _lsrc+64, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ldst+64
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qux06:
-; DARWIN-32-DYNAMIC: movl _lsrc+64, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _ldst+64
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qux06:
-; DARWIN-32-PIC: call L22$pb
-; DARWIN-32-PIC-NEXT: L22$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl (_lsrc-L22$pb)+64(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, (_ldst-L22$pb)+64(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qux06:
-; DARWIN-64-STATIC: movl _lsrc+64(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movl %eax, _ldst+64(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qux06:
-; DARWIN-64-DYNAMIC: movl _lsrc+64(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, _ldst+64(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qux06:
-; DARWIN-64-PIC: movl _lsrc+64(%rip), %eax
-; DARWIN-64-PIC-NEXT: movl %eax, _ldst+64(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qux07() nounwind {
-entry:
- store i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 16), i32** @lptr, align 8
- ret void
-; LINUX-64-STATIC: qux07:
-; LINUX-64-STATIC: movq $ldst+64, lptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qux07:
-; LINUX-32-STATIC: movl $ldst+64, lptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: qux07:
-; LINUX-32-PIC: movl $ldst+64, lptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qux07:
-; LINUX-64-PIC: leaq ldst+64(%rip), %rax
-; LINUX-64-PIC-NEXT: movq %rax, lptr(%rip)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qux07:
-; DARWIN-32-STATIC: movl $_ldst+64, _lptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qux07:
-; DARWIN-32-DYNAMIC: movl $_ldst+64, _lptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qux07:
-; DARWIN-32-PIC: call L23$pb
-; DARWIN-32-PIC-NEXT: L23$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_ldst-L23$pb)+64(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _lptr-L23$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qux07:
-; DARWIN-64-STATIC: leaq _ldst+64(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qux07:
-; DARWIN-64-DYNAMIC: leaq _ldst+64(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qux07:
-; DARWIN-64-PIC: leaq _ldst+64(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @qux08() nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
- %2 = getelementptr i32* %0, i64 16
- store i32 %1, i32* %2, align 4
-; LINUX-64-STATIC: qux08:
-; LINUX-64-STATIC: movl lsrc+64(%rip), %eax
-; LINUX-64-STATIC: movq lptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 64(%rcx)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: qux08:
-; LINUX-32-STATIC: movl lsrc+64, %eax
-; LINUX-32-STATIC-NEXT: movl lptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, 64(%ecx)
-; LINUX-32-STATIC-NEXT: ret
- ret void
-
-; LINUX-32-PIC: qux08:
-; LINUX-32-PIC: movl lsrc+64, %eax
-; LINUX-32-PIC-NEXT: movl lptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, 64(%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: qux08:
-; LINUX-64-PIC: movl lsrc+64(%rip), %eax
-; LINUX-64-PIC-NEXT: movq lptr(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _qux08:
-; DARWIN-32-STATIC: movl _lsrc+64, %eax
-; DARWIN-32-STATIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _qux08:
-; DARWIN-32-DYNAMIC: movl _lsrc+64, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 64(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _qux08:
-; DARWIN-32-PIC: call L24$pb
-; DARWIN-32-PIC-NEXT: L24$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl (_lsrc-L24$pb)+64(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl _lptr-L24$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 64(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _qux08:
-; DARWIN-64-STATIC: movl _lsrc+64(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _qux08:
-; DARWIN-64-DYNAMIC: movl _lsrc+64(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _qux08:
-; DARWIN-64-PIC: movl _lsrc+64(%rip), %eax
-; DARWIN-64-PIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ind00(i64 %i) nounwind {
-entry:
- %0 = getelementptr [131072 x i32]* @src, i64 0, i64 %i
- %1 = load i32* %0, align 4
- %2 = getelementptr [131072 x i32]* @dst, i64 0, i64 %i
- store i32 %1, i32* %2, align 4
- ret void
-; LINUX-64-STATIC: ind00:
-; LINUX-64-STATIC: movl src(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, dst(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ind00:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl src(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, dst(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ind00:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl src(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, dst(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ind00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq dst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ind00:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _src(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _dst(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ind00:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_src$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl (%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ind00:
-; DARWIN-32-PIC: call L25$pb
-; DARWIN-32-PIC-NEXT: L25$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L25$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl (%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L25$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, (%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ind00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ind00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ind00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ixd00(i64 %i) nounwind {
-entry:
- %0 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %i
- %1 = load i32* %0, align 4
- %2 = getelementptr [32 x i32]* @xdst, i64 0, i64 %i
- store i32 %1, i32* %2, align 4
- ret void
-; LINUX-64-STATIC: ixd00:
-; LINUX-64-STATIC: movl xsrc(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, xdst(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ixd00:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl xsrc(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, xdst(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ixd00:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl xsrc(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, xdst(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ixd00:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq xdst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ixd00:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _xsrc(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _xdst(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ixd00:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xsrc$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl (%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_xdst$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ixd00:
-; DARWIN-32-PIC: call L26$pb
-; DARWIN-32-PIC-NEXT: L26$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L26$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl (%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L26$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, (%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ixd00:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ixd00:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ixd00:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ind01(i64 %i) nounwind {
-entry:
- %0 = getelementptr [131072 x i32]* @dst, i64 0, i64 %i
- store i32* %0, i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: ind01:
-; LINUX-64-STATIC: leaq dst(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, ptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ind01:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal dst(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ind01:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal dst(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ind01:
-; LINUX-64-PIC: shlq $2, %rdi
-; LINUX-64-PIC-NEXT: addq dst at GOTPCREL(%rip), %rdi
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq %rdi, (%rax)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ind01:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _dst(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ind01:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: shll $2, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl L_dst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ind01:
-; DARWIN-32-PIC: call L27$pb
-; DARWIN-32-PIC-NEXT: L27$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: shll $2, %ecx
-; DARWIN-32-PIC-NEXT: addl L_dst$non_lazy_ptr-L27$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L27$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ind01:
-; DARWIN-64-STATIC: shlq $2, %rdi
-; DARWIN-64-STATIC-NEXT: addq _dst at GOTPCREL(%rip), %rdi
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq %rdi, (%rax)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ind01:
-; DARWIN-64-DYNAMIC: shlq $2, %rdi
-; DARWIN-64-DYNAMIC-NEXT: addq _dst at GOTPCREL(%rip), %rdi
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rdi, (%rax)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ind01:
-; DARWIN-64-PIC: shlq $2, %rdi
-; DARWIN-64-PIC-NEXT: addq _dst at GOTPCREL(%rip), %rdi
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq %rdi, (%rax)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ixd01(i64 %i) nounwind {
-entry:
- %0 = getelementptr [32 x i32]* @xdst, i64 0, i64 %i
- store i32* %0, i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: ixd01:
-; LINUX-64-STATIC: leaq xdst(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, ptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ixd01:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal xdst(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ixd01:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal xdst(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ixd01:
-; LINUX-64-PIC: shlq $2, %rdi
-; LINUX-64-PIC-NEXT: addq xdst at GOTPCREL(%rip), %rdi
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq %rdi, (%rax)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ixd01:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _xdst(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ixd01:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: shll $2, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl L_xdst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ixd01:
-; DARWIN-32-PIC: call L28$pb
-; DARWIN-32-PIC-NEXT: L28$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: shll $2, %ecx
-; DARWIN-32-PIC-NEXT: addl L_xdst$non_lazy_ptr-L28$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L28$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ixd01:
-; DARWIN-64-STATIC: shlq $2, %rdi
-; DARWIN-64-STATIC-NEXT: addq _xdst at GOTPCREL(%rip), %rdi
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq %rdi, (%rax)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ixd01:
-; DARWIN-64-DYNAMIC: shlq $2, %rdi
-; DARWIN-64-DYNAMIC-NEXT: addq _xdst at GOTPCREL(%rip), %rdi
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rdi, (%rax)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ixd01:
-; DARWIN-64-PIC: shlq $2, %rdi
-; DARWIN-64-PIC-NEXT: addq _xdst at GOTPCREL(%rip), %rdi
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq %rdi, (%rax)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ind02(i64 %i) nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %i
- %2 = load i32* %1, align 4
- %3 = getelementptr i32* %0, i64 %i
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: ind02:
-; LINUX-64-STATIC: movl src(,%rdi,4), %eax
-; LINUX-64-STATIC: movq ptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ind02:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl src(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl ptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, (%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ind02:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl src(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl ptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, (%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ind02:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ind02:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _src(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _ptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ind02:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_src$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl (%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl (%edx), %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ind02:
-; DARWIN-32-PIC: call L29$pb
-; DARWIN-32-PIC-NEXT: L29$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L29$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl (%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L29$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, (%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ind02:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ind02:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ind02:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ixd02(i64 %i) nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %i
- %2 = load i32* %1, align 4
- %3 = getelementptr i32* %0, i64 %i
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: ixd02:
-; LINUX-64-STATIC: movl xsrc(,%rdi,4), %eax
-; LINUX-64-STATIC: movq ptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ixd02:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl xsrc(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl ptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, (%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ixd02:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl xsrc(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl ptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, (%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ixd02:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ixd02:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _xsrc(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _ptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ixd02:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xsrc$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl (%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl (%edx), %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ixd02:
-; DARWIN-32-PIC: call L30$pb
-; DARWIN-32-PIC-NEXT: L30$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L30$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl (%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L30$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, (%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ixd02:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ixd02:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ixd02:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ind03(i64 %i) nounwind {
-entry:
- %0 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %i
- %1 = load i32* %0, align 4
- %2 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %i
- store i32 %1, i32* %2, align 4
- ret void
-; LINUX-64-STATIC: ind03:
-; LINUX-64-STATIC: movl dsrc(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, ddst(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ind03:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl dsrc(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, ddst(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ind03:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl dsrc(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, ddst(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ind03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq ddst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ind03:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _dsrc(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _ddst(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ind03:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dsrc(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, _ddst(,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ind03:
-; DARWIN-32-PIC: call L31$pb
-; DARWIN-32-PIC-NEXT: L31$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl _dsrc-L31$pb(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl %edx, _ddst-L31$pb(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ind03:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: leaq _ddst(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ind03:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: leaq _ddst(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ind03:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: leaq _ddst(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ind04(i64 %i) nounwind {
-entry:
- %0 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %i
- store i32* %0, i32** @dptr, align 8
- ret void
-; LINUX-64-STATIC: ind04:
-; LINUX-64-STATIC: leaq ddst(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, dptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ind04:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ddst(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, dptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ind04:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ddst(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, dptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ind04:
-; LINUX-64-PIC: shlq $2, %rdi
-; LINUX-64-PIC-NEXT: addq ddst at GOTPCREL(%rip), %rdi
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq %rdi, (%rax)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ind04:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ddst(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _dptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ind04:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ddst(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _dptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ind04:
-; DARWIN-32-PIC: call L32$pb
-; DARWIN-32-PIC-NEXT: L32$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal _ddst-L32$pb(%eax,%ecx,4), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _dptr-L32$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ind04:
-; DARWIN-64-STATIC: leaq _ddst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq (%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ind04:
-; DARWIN-64-DYNAMIC: leaq _ddst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq (%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ind04:
-; DARWIN-64-PIC: leaq _ddst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq (%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ind05(i64 %i) nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %i
- %2 = load i32* %1, align 4
- %3 = getelementptr i32* %0, i64 %i
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: ind05:
-; LINUX-64-STATIC: movl dsrc(,%rdi,4), %eax
-; LINUX-64-STATIC: movq dptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ind05:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl dsrc(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl dptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, (%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ind05:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl dsrc(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl dptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, (%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ind05:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ind05:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _dsrc(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _dptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ind05:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dsrc(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl _dptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ind05:
-; DARWIN-32-PIC: call L33$pb
-; DARWIN-32-PIC-NEXT: L33$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl _dsrc-L33$pb(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl _dptr-L33$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, (%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ind05:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ind05:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ind05:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ind06(i64 %i) nounwind {
-entry:
- %0 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %i
- %1 = load i32* %0, align 4
- %2 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %i
- store i32 %1, i32* %2, align 4
- ret void
-; LINUX-64-STATIC: ind06:
-; LINUX-64-STATIC: movl lsrc(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, ldst(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ind06:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl lsrc(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, ldst(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ind06:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl lsrc(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, ldst(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ind06:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: leaq ldst(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ind06:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _lsrc(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _ldst(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ind06:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lsrc(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, _ldst(,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ind06:
-; DARWIN-32-PIC: call L34$pb
-; DARWIN-32-PIC-NEXT: L34$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl _lsrc-L34$pb(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl %edx, _ldst-L34$pb(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ind06:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: leaq _ldst(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ind06:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: leaq _ldst(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ind06:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: leaq _ldst(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ind07(i64 %i) nounwind {
-entry:
- %0 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %i
- store i32* %0, i32** @lptr, align 8
- ret void
-; LINUX-64-STATIC: ind07:
-; LINUX-64-STATIC: leaq ldst(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, lptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ind07:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ldst(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, lptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ind07:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ldst(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, lptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ind07:
-; LINUX-64-PIC: leaq ldst(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq (%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: movq %rax, lptr(%rip)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ind07:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ldst(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _lptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ind07:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ldst(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _lptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ind07:
-; DARWIN-32-PIC: call L35$pb
-; DARWIN-32-PIC-NEXT: L35$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal _ldst-L35$pb(%eax,%ecx,4), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _lptr-L35$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ind07:
-; DARWIN-64-STATIC: leaq _ldst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq (%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ind07:
-; DARWIN-64-DYNAMIC: leaq _ldst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq (%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ind07:
-; DARWIN-64-PIC: leaq _ldst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq (%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ind08(i64 %i) nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %i
- %2 = load i32* %1, align 4
- %3 = getelementptr i32* %0, i64 %i
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: ind08:
-; LINUX-64-STATIC: movl lsrc(,%rdi,4), %eax
-; LINUX-64-STATIC: movq lptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ind08:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl lsrc(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl lptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, (%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ind08:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl lsrc(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl lptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, (%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ind08:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq lptr(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ind08:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _lsrc(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _lptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ind08:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lsrc(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl _lptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, (%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ind08:
-; DARWIN-32-PIC: call L36$pb
-; DARWIN-32-PIC-NEXT: L36$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl _lsrc-L36$pb(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl _lptr-L36$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, (%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ind08:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ind08:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ind08:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl (%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, (%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @off00(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %0
- %2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @dst, i64 0, i64 %0
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: off00:
-; LINUX-64-STATIC: movl src+64(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, dst+64(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: off00:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl src+64(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, dst+64(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: off00:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl src+64(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, dst+64(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: off00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq dst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _off00:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _src+64(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _dst+64(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _off00:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_src$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl 64(%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _off00:
-; DARWIN-32-PIC: call L37$pb
-; DARWIN-32-PIC-NEXT: L37$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L37$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L37$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _off00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _off00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _off00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @oxf00(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
- %3 = getelementptr [32 x i32]* @xdst, i64 0, i64 %0
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: oxf00:
-; LINUX-64-STATIC: movl xsrc+64(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, xdst+64(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: oxf00:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl xsrc+64(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, xdst+64(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: oxf00:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl xsrc+64(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, xdst+64(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: oxf00:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq xdst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _oxf00:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _xsrc+64(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _xdst+64(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _oxf00:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xsrc$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl 64(%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_xdst$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _oxf00:
-; DARWIN-32-PIC: call L38$pb
-; DARWIN-32-PIC-NEXT: L38$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L38$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L38$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _oxf00:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _oxf00:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _oxf00:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _xdst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @off01(i64 %i) nounwind {
-entry:
- %.sum = add i64 %i, 16
- %0 = getelementptr [131072 x i32]* @dst, i64 0, i64 %.sum
- store i32* %0, i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: off01:
-; LINUX-64-STATIC: leaq dst+64(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, ptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: off01:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal dst+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: off01:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal dst+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: off01:
-; LINUX-64-PIC: movq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _off01:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _dst+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _off01:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _off01:
-; DARWIN-32-PIC: call L39$pb
-; DARWIN-32-PIC-NEXT: L39$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L39$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: leal 64(%edx,%ecx,4), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L39$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _off01:
-; DARWIN-64-STATIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _off01:
-; DARWIN-64-DYNAMIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _off01:
-; DARWIN-64-PIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @oxf01(i64 %i) nounwind {
-entry:
- %.sum = add i64 %i, 16
- %0 = getelementptr [32 x i32]* @xdst, i64 0, i64 %.sum
- store i32* %0, i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: oxf01:
-; LINUX-64-STATIC: leaq xdst+64(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, ptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: oxf01:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal xdst+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: oxf01:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal xdst+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: oxf01:
-; LINUX-64-PIC: movq xdst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _oxf01:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _xdst+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _oxf01:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xdst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _oxf01:
-; DARWIN-32-PIC: call L40$pb
-; DARWIN-32-PIC-NEXT: L40$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L40$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: leal 64(%edx,%ecx,4), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L40$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _oxf01:
-; DARWIN-64-STATIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _oxf01:
-; DARWIN-64-DYNAMIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _oxf01:
-; DARWIN-64-PIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @off02(i64 %i) nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = add i64 %i, 16
- %2 = getelementptr [131072 x i32]* @src, i64 0, i64 %1
- %3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
- store i32 %3, i32* %4, align 4
- ret void
-; LINUX-64-STATIC: off02:
-; LINUX-64-STATIC: movl src+64(,%rdi,4), %eax
-; LINUX-64-STATIC: movq ptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: off02:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl src+64(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl ptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: off02:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl src+64(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl ptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: off02:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _off02:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _src+64(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _ptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _off02:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_src$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl 64(%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl (%edx), %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _off02:
-; DARWIN-32-PIC: call L41$pb
-; DARWIN-32-PIC-NEXT: L41$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L41$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L41$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _off02:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _off02:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _off02:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @oxf02(i64 %i) nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = add i64 %i, 16
- %2 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
- store i32 %3, i32* %4, align 4
- ret void
-; LINUX-64-STATIC: oxf02:
-; LINUX-64-STATIC: movl xsrc+64(,%rdi,4), %eax
-; LINUX-64-STATIC: movq ptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: oxf02:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl xsrc+64(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl ptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: oxf02:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl xsrc+64(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl ptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: oxf02:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _oxf02:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _xsrc+64(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _ptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _oxf02:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xsrc$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl 64(%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl (%edx), %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _oxf02:
-; DARWIN-32-PIC: call L42$pb
-; DARWIN-32-PIC-NEXT: L42$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L42$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L42$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _oxf02:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _oxf02:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _oxf02:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @off03(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %0
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: off03:
-; LINUX-64-STATIC: movl dsrc+64(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, ddst+64(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: off03:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl dsrc+64(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, ddst+64(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: off03:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl dsrc+64(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, ddst+64(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: off03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq ddst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _off03:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _dsrc+64(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _ddst+64(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _off03:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dsrc+64(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, _ddst+64(,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _off03:
-; DARWIN-32-PIC: call L43$pb
-; DARWIN-32-PIC-NEXT: L43$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl (_dsrc-L43$pb)+64(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl %edx, (_ddst-L43$pb)+64(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _off03:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: leaq _ddst(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _off03:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: leaq _ddst(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _off03:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: leaq _ddst(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @off04(i64 %i) nounwind {
-entry:
- %.sum = add i64 %i, 16
- %0 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %.sum
- store i32* %0, i32** @dptr, align 8
- ret void
-; LINUX-64-STATIC: off04:
-; LINUX-64-STATIC: leaq ddst+64(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, dptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: off04:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ddst+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, dptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: off04:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ddst+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, dptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: off04:
-; LINUX-64-PIC: movq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _off04:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ddst+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _dptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _off04:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ddst+64(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _dptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _off04:
-; DARWIN-32-PIC: call L44$pb
-; DARWIN-32-PIC-NEXT: L44$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_ddst-L44$pb)+64(%eax,%ecx,4), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _dptr-L44$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _off04:
-; DARWIN-64-STATIC: leaq _ddst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _off04:
-; DARWIN-64-DYNAMIC: leaq _ddst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _off04:
-; DARWIN-64-PIC: leaq _ddst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @off05(i64 %i) nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = add i64 %i, 16
- %2 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
- store i32 %3, i32* %4, align 4
- ret void
-; LINUX-64-STATIC: off05:
-; LINUX-64-STATIC: movl dsrc+64(,%rdi,4), %eax
-; LINUX-64-STATIC: movq dptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: off05:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl dsrc+64(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl dptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: off05:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl dsrc+64(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl dptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: off05:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _off05:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _dsrc+64(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _dptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _off05:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dsrc+64(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl _dptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _off05:
-; DARWIN-32-PIC: call L45$pb
-; DARWIN-32-PIC-NEXT: L45$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl (_dsrc-L45$pb)+64(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl _dptr-L45$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _off05:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _off05:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _off05:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @off06(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %0
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: off06:
-; LINUX-64-STATIC: movl lsrc+64(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, ldst+64(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: off06:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl lsrc+64(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, ldst+64(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: off06:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl lsrc+64(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, ldst+64(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: off06:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: leaq ldst(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _off06:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _lsrc+64(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _ldst+64(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _off06:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lsrc+64(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, _ldst+64(,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _off06:
-; DARWIN-32-PIC: call L46$pb
-; DARWIN-32-PIC-NEXT: L46$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl (_lsrc-L46$pb)+64(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl %edx, (_ldst-L46$pb)+64(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _off06:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: leaq _ldst(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _off06:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: leaq _ldst(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _off06:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: leaq _ldst(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @off07(i64 %i) nounwind {
-entry:
- %.sum = add i64 %i, 16
- %0 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %.sum
- store i32* %0, i32** @lptr, align 8
- ret void
-; LINUX-64-STATIC: off07:
-; LINUX-64-STATIC: leaq ldst+64(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, lptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: off07:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ldst+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, lptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: off07:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ldst+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, lptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: off07:
-; LINUX-64-PIC: leaq ldst(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: movq %rax, lptr(%rip)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _off07:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ldst+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _lptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _off07:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ldst+64(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _lptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _off07:
-; DARWIN-32-PIC: call L47$pb
-; DARWIN-32-PIC-NEXT: L47$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_ldst-L47$pb)+64(%eax,%ecx,4), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _lptr-L47$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _off07:
-; DARWIN-64-STATIC: leaq _ldst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _off07:
-; DARWIN-64-DYNAMIC: leaq _ldst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _off07:
-; DARWIN-64-PIC: leaq _ldst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @off08(i64 %i) nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = add i64 %i, 16
- %2 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
- store i32 %3, i32* %4, align 4
- ret void
-; LINUX-64-STATIC: off08:
-; LINUX-64-STATIC: movl lsrc+64(,%rdi,4), %eax
-; LINUX-64-STATIC: movq lptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: off08:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl lsrc+64(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl lptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: off08:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl lsrc+64(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl lptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: off08:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq lptr(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _off08:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _lsrc+64(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _lptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _off08:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lsrc+64(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl _lptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 64(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _off08:
-; DARWIN-32-PIC: call L48$pb
-; DARWIN-32-PIC-NEXT: L48$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl (_lsrc-L48$pb)+64(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl _lptr-L48$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _off08:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _off08:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _off08:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 64(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 64(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @moo00(i64 %i) nounwind {
-entry:
- %0 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
- store i32 %0, i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 65536), align 4
- ret void
-; LINUX-64-STATIC: moo00:
-; LINUX-64-STATIC: movl src+262144(%rip), %eax
-; LINUX-64-STATIC: movl %eax, dst+262144(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: moo00:
-; LINUX-32-STATIC: movl src+262144, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, dst+262144
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: moo00:
-; LINUX-32-PIC: movl src+262144, %eax
-; LINUX-32-PIC-NEXT: movl %eax, dst+262144
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: moo00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax), %eax
-; LINUX-64-PIC-NEXT: movq dst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _moo00:
-; DARWIN-32-STATIC: movl _src+262144, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _dst+262144
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _moo00:
-; DARWIN-32-DYNAMIC: movl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl 262144(%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 262144(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _moo00:
-; DARWIN-32-PIC: call L49$pb
-; DARWIN-32-PIC-NEXT: L49$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L49$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl 262144(%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L49$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 262144(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _moo00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 262144(%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _moo00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 262144(%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _moo00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 262144(%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @moo01(i64 %i) nounwind {
-entry:
- store i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 65536), i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: moo01:
-; LINUX-64-STATIC: movq $dst+262144, ptr(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: moo01:
-; LINUX-32-STATIC: movl $dst+262144, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: moo01:
-; LINUX-32-PIC: movl $dst+262144, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: moo01:
-; LINUX-64-PIC: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _moo01:
-; DARWIN-32-STATIC: movl $_dst+262144, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _moo01:
-; DARWIN-32-DYNAMIC: movl $262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl L_dst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _moo01:
-; DARWIN-32-PIC: call L50$pb
-; DARWIN-32-PIC-NEXT: L50$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl $262144, %ecx
-; DARWIN-32-PIC-NEXT: addl L_dst$non_lazy_ptr-L50$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L50$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _moo01:
-; DARWIN-64-STATIC: movl $262144, %eax
-; DARWIN-64-STATIC-NEXT: addq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _moo01:
-; DARWIN-64-DYNAMIC: movl $262144, %eax
-; DARWIN-64-DYNAMIC-NEXT: addq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _moo01:
-; DARWIN-64-PIC: movl $262144, %eax
-; DARWIN-64-PIC-NEXT: addq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @moo02(i64 %i) nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
- %2 = getelementptr i32* %0, i64 65536
- store i32 %1, i32* %2, align 4
- ret void
-; LINUX-64-STATIC: moo02:
-; LINUX-64-STATIC: movl src+262144(%rip), %eax
-; LINUX-64-STATIC: movq ptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 262144(%rcx)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: moo02:
-; LINUX-32-STATIC: movl src+262144, %eax
-; LINUX-32-STATIC-NEXT: movl ptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, 262144(%ecx)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: moo02:
-; LINUX-32-PIC: movl src+262144, %eax
-; LINUX-32-PIC-NEXT: movl ptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, 262144(%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: moo02:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _moo02:
-; DARWIN-32-STATIC: movl _src+262144, %eax
-; DARWIN-32-STATIC-NEXT: movl _ptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, 262144(%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _moo02:
-; DARWIN-32-DYNAMIC: movl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl 262144(%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl (%ecx), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 262144(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _moo02:
-; DARWIN-32-PIC: call L51$pb
-; DARWIN-32-PIC-NEXT: L51$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L51$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl 262144(%ecx), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L51$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 262144(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _moo02:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 262144(%rax), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _moo02:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 262144(%rax), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _moo02:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 262144(%rax), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @moo03(i64 %i) nounwind {
-entry:
- %0 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
- store i32 %0, i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 65536), align 32
- ret void
-; LINUX-64-STATIC: moo03:
-; LINUX-64-STATIC: movl dsrc+262144(%rip), %eax
-; LINUX-64-STATIC: movl %eax, ddst+262144(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: moo03:
-; LINUX-32-STATIC: movl dsrc+262144, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ddst+262144
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: moo03:
-; LINUX-32-PIC: movl dsrc+262144, %eax
-; LINUX-32-PIC-NEXT: movl %eax, ddst+262144
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: moo03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax), %eax
-; LINUX-64-PIC-NEXT: movq ddst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _moo03:
-; DARWIN-32-STATIC: movl _dsrc+262144, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ddst+262144
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _moo03:
-; DARWIN-32-DYNAMIC: movl _dsrc+262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _ddst+262144
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _moo03:
-; DARWIN-32-PIC: call L52$pb
-; DARWIN-32-PIC-NEXT: L52$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl (_dsrc-L52$pb)+262144(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, (_ddst-L52$pb)+262144(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _moo03:
-; DARWIN-64-STATIC: movl _dsrc+262144(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movl %eax, _ddst+262144(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _moo03:
-; DARWIN-64-DYNAMIC: movl _dsrc+262144(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, _ddst+262144(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _moo03:
-; DARWIN-64-PIC: movl _dsrc+262144(%rip), %eax
-; DARWIN-64-PIC-NEXT: movl %eax, _ddst+262144(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @moo04(i64 %i) nounwind {
-entry:
- store i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 65536), i32** @dptr, align 8
- ret void
-; LINUX-64-STATIC: moo04:
-; LINUX-64-STATIC: movq $ddst+262144, dptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: moo04:
-; LINUX-32-STATIC: movl $ddst+262144, dptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: moo04:
-; LINUX-32-PIC: movl $ddst+262144, dptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: moo04:
-; LINUX-64-PIC: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _moo04:
-; DARWIN-32-STATIC: movl $_ddst+262144, _dptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _moo04:
-; DARWIN-32-DYNAMIC: movl $_ddst+262144, _dptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _moo04:
-; DARWIN-32-PIC: call L53$pb
-; DARWIN-32-PIC-NEXT: L53$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_ddst-L53$pb)+262144(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _dptr-L53$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _moo04:
-; DARWIN-64-STATIC: leaq _ddst+262144(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _moo04:
-; DARWIN-64-DYNAMIC: leaq _ddst+262144(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _moo04:
-; DARWIN-64-PIC: leaq _ddst+262144(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @moo05(i64 %i) nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
- %2 = getelementptr i32* %0, i64 65536
- store i32 %1, i32* %2, align 4
- ret void
-; LINUX-64-STATIC: moo05:
-; LINUX-64-STATIC: movl dsrc+262144(%rip), %eax
-; LINUX-64-STATIC: movq dptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 262144(%rcx)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: moo05:
-; LINUX-32-STATIC: movl dsrc+262144, %eax
-; LINUX-32-STATIC-NEXT: movl dptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, 262144(%ecx)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: moo05:
-; LINUX-32-PIC: movl dsrc+262144, %eax
-; LINUX-32-PIC-NEXT: movl dptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, 262144(%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: moo05:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax), %eax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _moo05:
-; DARWIN-32-STATIC: movl _dsrc+262144, %eax
-; DARWIN-32-STATIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, 262144(%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _moo05:
-; DARWIN-32-DYNAMIC: movl _dsrc+262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 262144(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _moo05:
-; DARWIN-32-PIC: call L54$pb
-; DARWIN-32-PIC-NEXT: L54$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl (_dsrc-L54$pb)+262144(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl _dptr-L54$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 262144(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _moo05:
-; DARWIN-64-STATIC: movl _dsrc+262144(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _moo05:
-; DARWIN-64-DYNAMIC: movl _dsrc+262144(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _moo05:
-; DARWIN-64-PIC: movl _dsrc+262144(%rip), %eax
-; DARWIN-64-PIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @moo06(i64 %i) nounwind {
-entry:
- %0 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
- store i32 %0, i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 65536), align 4
- ret void
-; LINUX-64-STATIC: moo06:
-; LINUX-64-STATIC: movl lsrc+262144(%rip), %eax
-; LINUX-64-STATIC: movl %eax, ldst+262144(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: moo06:
-; LINUX-32-STATIC: movl lsrc+262144, %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ldst+262144
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: moo06:
-; LINUX-32-PIC: movl lsrc+262144, %eax
-; LINUX-32-PIC-NEXT: movl %eax, ldst+262144
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: moo06:
-; LINUX-64-PIC: movl lsrc+262144(%rip), %eax
-; LINUX-64-PIC-NEXT: movl %eax, ldst+262144(%rip)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _moo06:
-; DARWIN-32-STATIC: movl _lsrc+262144, %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ldst+262144
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _moo06:
-; DARWIN-32-DYNAMIC: movl _lsrc+262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _ldst+262144
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _moo06:
-; DARWIN-32-PIC: call L55$pb
-; DARWIN-32-PIC-NEXT: L55$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl (_lsrc-L55$pb)+262144(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, (_ldst-L55$pb)+262144(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _moo06:
-; DARWIN-64-STATIC: movl _lsrc+262144(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movl %eax, _ldst+262144(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _moo06:
-; DARWIN-64-DYNAMIC: movl _lsrc+262144(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, _ldst+262144(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _moo06:
-; DARWIN-64-PIC: movl _lsrc+262144(%rip), %eax
-; DARWIN-64-PIC-NEXT: movl %eax, _ldst+262144(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @moo07(i64 %i) nounwind {
-entry:
- store i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 65536), i32** @lptr, align 8
- ret void
-; LINUX-64-STATIC: moo07:
-; LINUX-64-STATIC: movq $ldst+262144, lptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: moo07:
-; LINUX-32-STATIC: movl $ldst+262144, lptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: moo07:
-; LINUX-32-PIC: movl $ldst+262144, lptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: moo07:
-; LINUX-64-PIC: leaq ldst+262144(%rip), %rax
-; LINUX-64-PIC-NEXT: movq %rax, lptr(%rip)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _moo07:
-; DARWIN-32-STATIC: movl $_ldst+262144, _lptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _moo07:
-; DARWIN-32-DYNAMIC: movl $_ldst+262144, _lptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _moo07:
-; DARWIN-32-PIC: call L56$pb
-; DARWIN-32-PIC-NEXT: L56$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_ldst-L56$pb)+262144(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _lptr-L56$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _moo07:
-; DARWIN-64-STATIC: leaq _ldst+262144(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _moo07:
-; DARWIN-64-DYNAMIC: leaq _ldst+262144(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _moo07:
-; DARWIN-64-PIC: leaq _ldst+262144(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @moo08(i64 %i) nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
- %2 = getelementptr i32* %0, i64 65536
- store i32 %1, i32* %2, align 4
- ret void
-; LINUX-64-STATIC: moo08:
-; LINUX-64-STATIC: movl lsrc+262144(%rip), %eax
-; LINUX-64-STATIC: movq lptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 262144(%rcx)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: moo08:
-; LINUX-32-STATIC: movl lsrc+262144, %eax
-; LINUX-32-STATIC-NEXT: movl lptr, %ecx
-; LINUX-32-STATIC-NEXT: movl %eax, 262144(%ecx)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: moo08:
-; LINUX-32-PIC: movl lsrc+262144, %eax
-; LINUX-32-PIC-NEXT: movl lptr, %ecx
-; LINUX-32-PIC-NEXT: movl %eax, 262144(%ecx)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: moo08:
-; LINUX-64-PIC: movl lsrc+262144(%rip), %eax
-; LINUX-64-PIC-NEXT: movq lptr(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _moo08:
-; DARWIN-32-STATIC: movl _lsrc+262144, %eax
-; DARWIN-32-STATIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-STATIC-NEXT: movl %eax, 262144(%ecx)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _moo08:
-; DARWIN-32-DYNAMIC: movl _lsrc+262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, 262144(%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _moo08:
-; DARWIN-32-PIC: call L57$pb
-; DARWIN-32-PIC-NEXT: L57$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl (_lsrc-L57$pb)+262144(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl _lptr-L57$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, 262144(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _moo08:
-; DARWIN-64-STATIC: movl _lsrc+262144(%rip), %eax
-; DARWIN-64-STATIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _moo08:
-; DARWIN-64-DYNAMIC: movl _lsrc+262144(%rip), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _moo08:
-; DARWIN-64-PIC: movl _lsrc+262144(%rip), %eax
-; DARWIN-64-PIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @big00(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %0
- %2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @dst, i64 0, i64 %0
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: big00:
-; LINUX-64-STATIC: movl src+262144(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, dst+262144(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: big00:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl src+262144(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, dst+262144(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: big00:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl src+262144(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, dst+262144(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: big00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq dst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _big00:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _src+262144(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _dst+262144(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _big00:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_src$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl 262144(%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _big00:
-; DARWIN-32-PIC: call L58$pb
-; DARWIN-32-PIC-NEXT: L58$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L58$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl 262144(%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L58$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 262144(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _big00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _big00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _big00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _dst at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @big01(i64 %i) nounwind {
-entry:
- %.sum = add i64 %i, 65536
- %0 = getelementptr [131072 x i32]* @dst, i64 0, i64 %.sum
- store i32* %0, i32** @ptr, align 8
- ret void
-; LINUX-64-STATIC: big01:
-; LINUX-64-STATIC: leaq dst+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, ptr(%rip)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: big01:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal dst+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, ptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: big01:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal dst+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, ptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: big01:
-; LINUX-64-PIC: movq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _big01:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _dst+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _ptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _big01:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, (%ecx)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _big01:
-; DARWIN-32-PIC: call L59$pb
-; DARWIN-32-PIC-NEXT: L59$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L59$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: leal 262144(%edx,%ecx,4), %ecx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L59$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %ecx, (%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _big01:
-; DARWIN-64-STATIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _big01:
-; DARWIN-64-DYNAMIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _big01:
-; DARWIN-64-PIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq %rax, (%rcx)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @big02(i64 %i) nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = add i64 %i, 65536
- %2 = getelementptr [131072 x i32]* @src, i64 0, i64 %1
- %3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
- store i32 %3, i32* %4, align 4
- ret void
-; LINUX-64-STATIC: big02:
-; LINUX-64-STATIC: movl src+262144(,%rdi,4), %eax
-; LINUX-64-STATIC: movq ptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 262144(%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: big02:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl src+262144(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl ptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: big02:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl src+262144(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl ptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: big02:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _big02:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _src+262144(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _ptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _big02:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_src$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl 262144(%ecx,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl L_ptr$non_lazy_ptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl (%edx), %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _big02:
-; DARWIN-32-PIC: call L60$pb
-; DARWIN-32-PIC-NEXT: L60$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L60$pb(%eax), %edx
-; DARWIN-32-PIC-NEXT: movl 262144(%edx,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L60$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 262144(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _big02:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _big02:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _big02:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movq (%rcx), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @big03(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %0
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: big03:
-; LINUX-64-STATIC: movl dsrc+262144(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, ddst+262144(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: big03:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl dsrc+262144(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, ddst+262144(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: big03:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl dsrc+262144(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, ddst+262144(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: big03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq ddst at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _big03:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _dsrc+262144(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _ddst+262144(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _big03:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dsrc+262144(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, _ddst+262144(,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _big03:
-; DARWIN-32-PIC: call L61$pb
-; DARWIN-32-PIC-NEXT: L61$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl (_dsrc-L61$pb)+262144(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl %edx, (_ddst-L61$pb)+262144(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _big03:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: leaq _ddst(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _big03:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: leaq _ddst(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _big03:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: leaq _ddst(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @big04(i64 %i) nounwind {
-entry:
- %.sum = add i64 %i, 65536
- %0 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %.sum
- store i32* %0, i32** @dptr, align 8
- ret void
-; LINUX-64-STATIC: big04:
-; LINUX-64-STATIC: leaq ddst+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, dptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: big04:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ddst+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, dptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: big04:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ddst+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, dptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: big04:
-; LINUX-64-PIC: movq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq %rax, (%rcx)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _big04:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ddst+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _dptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _big04:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ddst+262144(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _dptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _big04:
-; DARWIN-32-PIC: call L62$pb
-; DARWIN-32-PIC-NEXT: L62$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_ddst-L62$pb)+262144(%eax,%ecx,4), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _dptr-L62$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _big04:
-; DARWIN-64-STATIC: leaq _ddst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _big04:
-; DARWIN-64-DYNAMIC: leaq _ddst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _big04:
-; DARWIN-64-PIC: leaq _ddst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _dptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @big05(i64 %i) nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = add i64 %i, 65536
- %2 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
- store i32 %3, i32* %4, align 4
- ret void
-; LINUX-64-STATIC: big05:
-; LINUX-64-STATIC: movl dsrc+262144(,%rdi,4), %eax
-; LINUX-64-STATIC: movq dptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 262144(%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: big05:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl dsrc+262144(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl dptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: big05:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl dsrc+262144(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl dptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: big05:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movq (%rcx), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _big05:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _dsrc+262144(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _dptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _big05:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dsrc+262144(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl _dptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _big05:
-; DARWIN-32-PIC: call L63$pb
-; DARWIN-32-PIC-NEXT: L63$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl (_dsrc-L63$pb)+262144(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl _dptr-L63$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 262144(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _big05:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _big05:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _big05:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _dptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @big06(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %0
- store i32 %2, i32* %3, align 4
- ret void
-; LINUX-64-STATIC: big06:
-; LINUX-64-STATIC: movl lsrc+262144(,%rdi,4), %eax
-; LINUX-64-STATIC: movl %eax, ldst+262144(,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: big06:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl lsrc+262144(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl %ecx, ldst+262144(,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: big06:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl lsrc+262144(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl %ecx, ldst+262144(,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: big06:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: leaq ldst(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _big06:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _lsrc+262144(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl %ecx, _ldst+262144(,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _big06:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lsrc+262144(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, _ldst+262144(,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _big06:
-; DARWIN-32-PIC: call L64$pb
-; DARWIN-32-PIC-NEXT: L64$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl (_lsrc-L64$pb)+262144(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl %edx, (_ldst-L64$pb)+262144(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _big06:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: leaq _ldst(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _big06:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: leaq _ldst(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _big06:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: leaq _ldst(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @big07(i64 %i) nounwind {
-entry:
- %.sum = add i64 %i, 65536
- %0 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %.sum
- store i32* %0, i32** @lptr, align 8
- ret void
-; LINUX-64-STATIC: big07:
-; LINUX-64-STATIC: leaq ldst+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: movq %rax, lptr
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: big07:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ldst+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: movl %eax, lptr
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: big07:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ldst+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: movl %eax, lptr
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: big07:
-; LINUX-64-PIC: leaq ldst(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: movq %rax, lptr(%rip)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _big07:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ldst+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: movl %eax, _lptr
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _big07:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ldst+262144(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl %eax, _lptr
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _big07:
-; DARWIN-32-PIC: call L65$pb
-; DARWIN-32-PIC-NEXT: L65$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_ldst-L65$pb)+262144(%eax,%ecx,4), %ecx
-; DARWIN-32-PIC-NEXT: movl %ecx, _lptr-L65$pb(%eax)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _big07:
-; DARWIN-64-STATIC: leaq _ldst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _big07:
-; DARWIN-64-DYNAMIC: leaq _ldst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _big07:
-; DARWIN-64-PIC: leaq _ldst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: movq %rax, _lptr(%rip)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @big08(i64 %i) nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = add i64 %i, 65536
- %2 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
- store i32 %3, i32* %4, align 4
- ret void
-; LINUX-64-STATIC: big08:
-; LINUX-64-STATIC: movl lsrc+262144(,%rdi,4), %eax
-; LINUX-64-STATIC: movq lptr(%rip), %rcx
-; LINUX-64-STATIC: movl %eax, 262144(%rcx,%rdi,4)
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: big08:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl lsrc+262144(,%eax,4), %ecx
-; LINUX-32-STATIC-NEXT: movl lptr, %edx
-; LINUX-32-STATIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: big08:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl lsrc+262144(,%eax,4), %ecx
-; LINUX-32-PIC-NEXT: movl lptr, %edx
-; LINUX-32-PIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: big08:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; LINUX-64-PIC-NEXT: movq lptr(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _big08:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _lsrc+262144(,%eax,4), %ecx
-; DARWIN-32-STATIC-NEXT: movl _lptr, %edx
-; DARWIN-32-STATIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _big08:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lsrc+262144(,%eax,4), %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl _lptr, %edx
-; DARWIN-32-DYNAMIC-NEXT: movl %ecx, 262144(%edx,%eax,4)
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _big08:
-; DARWIN-32-PIC: call L66$pb
-; DARWIN-32-PIC-NEXT: L66$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl (_lsrc-L66$pb)+262144(%eax,%ecx,4), %edx
-; DARWIN-32-PIC-NEXT: movl _lptr-L66$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl %edx, 262144(%eax,%ecx,4)
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _big08:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-STATIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _big08:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-DYNAMIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _big08:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: movl 262144(%rax,%rdi,4), %eax
-; DARWIN-64-PIC-NEXT: movq _lptr(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl %eax, 262144(%rcx,%rdi,4)
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bar00() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @src to i8*)
-; LINUX-64-STATIC: bar00:
-; LINUX-64-STATIC: movl $src, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bar00:
-; LINUX-32-STATIC: movl $src, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bar00:
-; LINUX-32-PIC: movl $src, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bar00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bar00:
-; DARWIN-32-STATIC: movl $_src, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bar00:
-; DARWIN-32-DYNAMIC: movl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bar00:
-; DARWIN-32-PIC: call L67$pb
-; DARWIN-32-PIC-NEXT: L67$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L67$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bar00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bar00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bar00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bxr00() nounwind {
-entry:
- ret i8* bitcast ([32 x i32]* @xsrc to i8*)
-; LINUX-64-STATIC: bxr00:
-; LINUX-64-STATIC: movl $xsrc, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bxr00:
-; LINUX-32-STATIC: movl $xsrc, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bxr00:
-; LINUX-32-PIC: movl $xsrc, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bxr00:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bxr00:
-; DARWIN-32-STATIC: movl $_xsrc, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bxr00:
-; DARWIN-32-DYNAMIC: movl L_xsrc$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bxr00:
-; DARWIN-32-PIC: call L68$pb
-; DARWIN-32-PIC-NEXT: L68$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L68$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bxr00:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bxr00:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bxr00:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bar01() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @dst to i8*)
-; LINUX-64-STATIC: bar01:
-; LINUX-64-STATIC: movl $dst, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bar01:
-; LINUX-32-STATIC: movl $dst, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bar01:
-; LINUX-32-PIC: movl $dst, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bar01:
-; LINUX-64-PIC: movq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bar01:
-; DARWIN-32-STATIC: movl $_dst, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bar01:
-; DARWIN-32-DYNAMIC: movl L_dst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bar01:
-; DARWIN-32-PIC: call L69$pb
-; DARWIN-32-PIC-NEXT: L69$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L69$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bar01:
-; DARWIN-64-STATIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bar01:
-; DARWIN-64-DYNAMIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bar01:
-; DARWIN-64-PIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bxr01() nounwind {
-entry:
- ret i8* bitcast ([32 x i32]* @xdst to i8*)
-; LINUX-64-STATIC: bxr01:
-; LINUX-64-STATIC: movl $xdst, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bxr01:
-; LINUX-32-STATIC: movl $xdst, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bxr01:
-; LINUX-32-PIC: movl $xdst, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bxr01:
-; LINUX-64-PIC: movq xdst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bxr01:
-; DARWIN-32-STATIC: movl $_xdst, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bxr01:
-; DARWIN-32-DYNAMIC: movl L_xdst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bxr01:
-; DARWIN-32-PIC: call L70$pb
-; DARWIN-32-PIC-NEXT: L70$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L70$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bxr01:
-; DARWIN-64-STATIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bxr01:
-; DARWIN-64-DYNAMIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bxr01:
-; DARWIN-64-PIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bar02() nounwind {
-entry:
- ret i8* bitcast (i32** @ptr to i8*)
-; LINUX-64-STATIC: bar02:
-; LINUX-64-STATIC: movl $ptr, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bar02:
-; LINUX-32-STATIC: movl $ptr, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bar02:
-; LINUX-32-PIC: movl $ptr, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bar02:
-; LINUX-64-PIC: movq ptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bar02:
-; DARWIN-32-STATIC: movl $_ptr, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bar02:
-; DARWIN-32-DYNAMIC: movl L_ptr$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bar02:
-; DARWIN-32-PIC: call L71$pb
-; DARWIN-32-PIC-NEXT: L71$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L71$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bar02:
-; DARWIN-64-STATIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bar02:
-; DARWIN-64-DYNAMIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bar02:
-; DARWIN-64-PIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bar03() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @dsrc to i8*)
-; LINUX-64-STATIC: bar03:
-; LINUX-64-STATIC: movl $dsrc, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bar03:
-; LINUX-32-STATIC: movl $dsrc, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bar03:
-; LINUX-32-PIC: movl $dsrc, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bar03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bar03:
-; DARWIN-32-STATIC: movl $_dsrc, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bar03:
-; DARWIN-32-DYNAMIC: movl $_dsrc, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bar03:
-; DARWIN-32-PIC: call L72$pb
-; DARWIN-32-PIC-NEXT: L72$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _dsrc-L72$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bar03:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bar03:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bar03:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bar04() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @ddst to i8*)
-; LINUX-64-STATIC: bar04:
-; LINUX-64-STATIC: movl $ddst, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bar04:
-; LINUX-32-STATIC: movl $ddst, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bar04:
-; LINUX-32-PIC: movl $ddst, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bar04:
-; LINUX-64-PIC: movq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bar04:
-; DARWIN-32-STATIC: movl $_ddst, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bar04:
-; DARWIN-32-DYNAMIC: movl $_ddst, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bar04:
-; DARWIN-32-PIC: call L73$pb
-; DARWIN-32-PIC-NEXT: L73$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _ddst-L73$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bar04:
-; DARWIN-64-STATIC: leaq _ddst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bar04:
-; DARWIN-64-DYNAMIC: leaq _ddst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bar04:
-; DARWIN-64-PIC: leaq _ddst(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bar05() nounwind {
-entry:
- ret i8* bitcast (i32** @dptr to i8*)
-; LINUX-64-STATIC: bar05:
-; LINUX-64-STATIC: movl $dptr, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bar05:
-; LINUX-32-STATIC: movl $dptr, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bar05:
-; LINUX-32-PIC: movl $dptr, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bar05:
-; LINUX-64-PIC: movq dptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bar05:
-; DARWIN-32-STATIC: movl $_dptr, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bar05:
-; DARWIN-32-DYNAMIC: movl $_dptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bar05:
-; DARWIN-32-PIC: call L74$pb
-; DARWIN-32-PIC-NEXT: L74$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _dptr-L74$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bar05:
-; DARWIN-64-STATIC: leaq _dptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bar05:
-; DARWIN-64-DYNAMIC: leaq _dptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bar05:
-; DARWIN-64-PIC: leaq _dptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bar06() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @lsrc to i8*)
-; LINUX-64-STATIC: bar06:
-; LINUX-64-STATIC: movl $lsrc, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bar06:
-; LINUX-32-STATIC: movl $lsrc, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bar06:
-; LINUX-32-PIC: movl $lsrc, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bar06:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bar06:
-; DARWIN-32-STATIC: movl $_lsrc, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bar06:
-; DARWIN-32-DYNAMIC: movl $_lsrc, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bar06:
-; DARWIN-32-PIC: call L75$pb
-; DARWIN-32-PIC-NEXT: L75$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _lsrc-L75$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bar06:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bar06:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bar06:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bar07() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @ldst to i8*)
-; LINUX-64-STATIC: bar07:
-; LINUX-64-STATIC: movl $ldst, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bar07:
-; LINUX-32-STATIC: movl $ldst, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bar07:
-; LINUX-32-PIC: movl $ldst, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bar07:
-; LINUX-64-PIC: leaq ldst(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bar07:
-; DARWIN-32-STATIC: movl $_ldst, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bar07:
-; DARWIN-32-DYNAMIC: movl $_ldst, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bar07:
-; DARWIN-32-PIC: call L76$pb
-; DARWIN-32-PIC-NEXT: L76$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _ldst-L76$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bar07:
-; DARWIN-64-STATIC: leaq _ldst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bar07:
-; DARWIN-64-DYNAMIC: leaq _ldst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bar07:
-; DARWIN-64-PIC: leaq _ldst(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bar08() nounwind {
-entry:
- ret i8* bitcast (i32** @lptr to i8*)
-; LINUX-64-STATIC: bar08:
-; LINUX-64-STATIC: movl $lptr, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bar08:
-; LINUX-32-STATIC: movl $lptr, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bar08:
-; LINUX-32-PIC: movl $lptr, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bar08:
-; LINUX-64-PIC: leaq lptr(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bar08:
-; DARWIN-32-STATIC: movl $_lptr, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bar08:
-; DARWIN-32-DYNAMIC: movl $_lptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bar08:
-; DARWIN-32-PIC: call L77$pb
-; DARWIN-32-PIC-NEXT: L77$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _lptr-L77$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bar08:
-; DARWIN-64-STATIC: leaq _lptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bar08:
-; DARWIN-64-DYNAMIC: leaq _lptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bar08:
-; DARWIN-64-PIC: leaq _lptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @har00() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @src to i8*)
-; LINUX-64-STATIC: har00:
-; LINUX-64-STATIC: movl $src, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: har00:
-; LINUX-32-STATIC: movl $src, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: har00:
-; LINUX-32-PIC: movl $src, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: har00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _har00:
-; DARWIN-32-STATIC: movl $_src, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _har00:
-; DARWIN-32-DYNAMIC: movl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _har00:
-; DARWIN-32-PIC: call L78$pb
-; DARWIN-32-PIC-NEXT: L78$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L78$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _har00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _har00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _har00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @hxr00() nounwind {
-entry:
- ret i8* bitcast ([32 x i32]* @xsrc to i8*)
-; LINUX-64-STATIC: hxr00:
-; LINUX-64-STATIC: movl $xsrc, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: hxr00:
-; LINUX-32-STATIC: movl $xsrc, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: hxr00:
-; LINUX-32-PIC: movl $xsrc, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: hxr00:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _hxr00:
-; DARWIN-32-STATIC: movl $_xsrc, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _hxr00:
-; DARWIN-32-DYNAMIC: movl L_xsrc$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _hxr00:
-; DARWIN-32-PIC: call L79$pb
-; DARWIN-32-PIC-NEXT: L79$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L79$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _hxr00:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _hxr00:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _hxr00:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @har01() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @dst to i8*)
-; LINUX-64-STATIC: har01:
-; LINUX-64-STATIC: movl $dst, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: har01:
-; LINUX-32-STATIC: movl $dst, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: har01:
-; LINUX-32-PIC: movl $dst, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: har01:
-; LINUX-64-PIC: movq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _har01:
-; DARWIN-32-STATIC: movl $_dst, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _har01:
-; DARWIN-32-DYNAMIC: movl L_dst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _har01:
-; DARWIN-32-PIC: call L80$pb
-; DARWIN-32-PIC-NEXT: L80$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L80$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _har01:
-; DARWIN-64-STATIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _har01:
-; DARWIN-64-DYNAMIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _har01:
-; DARWIN-64-PIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @hxr01() nounwind {
-entry:
- ret i8* bitcast ([32 x i32]* @xdst to i8*)
-; LINUX-64-STATIC: hxr01:
-; LINUX-64-STATIC: movl $xdst, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: hxr01:
-; LINUX-32-STATIC: movl $xdst, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: hxr01:
-; LINUX-32-PIC: movl $xdst, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: hxr01:
-; LINUX-64-PIC: movq xdst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _hxr01:
-; DARWIN-32-STATIC: movl $_xdst, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _hxr01:
-; DARWIN-32-DYNAMIC: movl L_xdst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _hxr01:
-; DARWIN-32-PIC: call L81$pb
-; DARWIN-32-PIC-NEXT: L81$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L81$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _hxr01:
-; DARWIN-64-STATIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _hxr01:
-; DARWIN-64-DYNAMIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _hxr01:
-; DARWIN-64-PIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @har02() nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = bitcast i32* %0 to i8*
- ret i8* %1
-; LINUX-64-STATIC: har02:
-; LINUX-64-STATIC: movq ptr(%rip), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: har02:
-; LINUX-32-STATIC: movl ptr, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: har02:
-; LINUX-32-PIC: movl ptr, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: har02:
-; LINUX-64-PIC: movq ptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq (%rax), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _har02:
-; DARWIN-32-STATIC: movl _ptr, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _har02:
-; DARWIN-32-DYNAMIC: movl L_ptr$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl (%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _har02:
-; DARWIN-32-PIC: call L82$pb
-; DARWIN-32-PIC-NEXT: L82$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L82$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _har02:
-; DARWIN-64-STATIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq (%rax), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _har02:
-; DARWIN-64-DYNAMIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq (%rax), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _har02:
-; DARWIN-64-PIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq (%rax), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @har03() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @dsrc to i8*)
-; LINUX-64-STATIC: har03:
-; LINUX-64-STATIC: movl $dsrc, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: har03:
-; LINUX-32-STATIC: movl $dsrc, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: har03:
-; LINUX-32-PIC: movl $dsrc, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: har03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _har03:
-; DARWIN-32-STATIC: movl $_dsrc, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _har03:
-; DARWIN-32-DYNAMIC: movl $_dsrc, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _har03:
-; DARWIN-32-PIC: call L83$pb
-; DARWIN-32-PIC-NEXT: L83$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _dsrc-L83$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _har03:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _har03:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _har03:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @har04() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @ddst to i8*)
-; LINUX-64-STATIC: har04:
-; LINUX-64-STATIC: movl $ddst, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: har04:
-; LINUX-32-STATIC: movl $ddst, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: har04:
-; LINUX-32-PIC: movl $ddst, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: har04:
-; LINUX-64-PIC: movq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _har04:
-; DARWIN-32-STATIC: movl $_ddst, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _har04:
-; DARWIN-32-DYNAMIC: movl $_ddst, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _har04:
-; DARWIN-32-PIC: call L84$pb
-; DARWIN-32-PIC-NEXT: L84$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _ddst-L84$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _har04:
-; DARWIN-64-STATIC: leaq _ddst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _har04:
-; DARWIN-64-DYNAMIC: leaq _ddst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _har04:
-; DARWIN-64-PIC: leaq _ddst(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @har05() nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = bitcast i32* %0 to i8*
- ret i8* %1
-; LINUX-64-STATIC: har05:
-; LINUX-64-STATIC: movq dptr(%rip), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: har05:
-; LINUX-32-STATIC: movl dptr, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: har05:
-; LINUX-32-PIC: movl dptr, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: har05:
-; LINUX-64-PIC: movq dptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq (%rax), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _har05:
-; DARWIN-32-STATIC: movl _dptr, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _har05:
-; DARWIN-32-DYNAMIC: movl _dptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _har05:
-; DARWIN-32-PIC: call L85$pb
-; DARWIN-32-PIC-NEXT: L85$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl _dptr-L85$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _har05:
-; DARWIN-64-STATIC: movq _dptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _har05:
-; DARWIN-64-DYNAMIC: movq _dptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _har05:
-; DARWIN-64-PIC: movq _dptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @har06() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @lsrc to i8*)
-; LINUX-64-STATIC: har06:
-; LINUX-64-STATIC: movl $lsrc, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: har06:
-; LINUX-32-STATIC: movl $lsrc, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: har06:
-; LINUX-32-PIC: movl $lsrc, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: har06:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _har06:
-; DARWIN-32-STATIC: movl $_lsrc, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _har06:
-; DARWIN-32-DYNAMIC: movl $_lsrc, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _har06:
-; DARWIN-32-PIC: call L86$pb
-; DARWIN-32-PIC-NEXT: L86$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _lsrc-L86$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _har06:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _har06:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _har06:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @har07() nounwind {
-entry:
- ret i8* bitcast ([131072 x i32]* @ldst to i8*)
-; LINUX-64-STATIC: har07:
-; LINUX-64-STATIC: movl $ldst, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: har07:
-; LINUX-32-STATIC: movl $ldst, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: har07:
-; LINUX-32-PIC: movl $ldst, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: har07:
-; LINUX-64-PIC: leaq ldst(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _har07:
-; DARWIN-32-STATIC: movl $_ldst, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _har07:
-; DARWIN-32-DYNAMIC: movl $_ldst, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _har07:
-; DARWIN-32-PIC: call L87$pb
-; DARWIN-32-PIC-NEXT: L87$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _ldst-L87$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _har07:
-; DARWIN-64-STATIC: leaq _ldst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _har07:
-; DARWIN-64-DYNAMIC: leaq _ldst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _har07:
-; DARWIN-64-PIC: leaq _ldst(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @har08() nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = bitcast i32* %0 to i8*
- ret i8* %1
-; LINUX-64-STATIC: har08:
-; LINUX-64-STATIC: movq lptr(%rip), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: har08:
-; LINUX-32-STATIC: movl lptr, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: har08:
-; LINUX-32-PIC: movl lptr, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: har08:
-; LINUX-64-PIC: movq lptr(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _har08:
-; DARWIN-32-STATIC: movl _lptr, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _har08:
-; DARWIN-32-DYNAMIC: movl _lptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _har08:
-; DARWIN-32-PIC: call L88$pb
-; DARWIN-32-PIC-NEXT: L88$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl _lptr-L88$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _har08:
-; DARWIN-64-STATIC: movq _lptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _har08:
-; DARWIN-64-DYNAMIC: movq _lptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _har08:
-; DARWIN-64-PIC: movq _lptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bat00() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16) to i8*)
-; LINUX-64-STATIC: bat00:
-; LINUX-64-STATIC: movl $src+64, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bat00:
-; LINUX-32-STATIC: movl $src+64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bat00:
-; LINUX-32-PIC: movl $src+64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bat00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bat00:
-; DARWIN-32-STATIC: movl $_src+64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bat00:
-; DARWIN-32-DYNAMIC: movl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl $64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bat00:
-; DARWIN-32-PIC: call L89$pb
-; DARWIN-32-PIC-NEXT: L89$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L89$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: addl $64, %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bat00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: addq $64, %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bat00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: addq $64, %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bat00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: addq $64, %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bxt00() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16) to i8*)
-; LINUX-64-STATIC: bxt00:
-; LINUX-64-STATIC: movl $xsrc+64, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bxt00:
-; LINUX-32-STATIC: movl $xsrc+64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bxt00:
-; LINUX-32-PIC: movl $xsrc+64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bxt00:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bxt00:
-; DARWIN-32-STATIC: movl $_xsrc+64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bxt00:
-; DARWIN-32-DYNAMIC: movl L_xsrc$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl $64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bxt00:
-; DARWIN-32-PIC: call L90$pb
-; DARWIN-32-PIC-NEXT: L90$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L90$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: addl $64, %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bxt00:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: addq $64, %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bxt00:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: addq $64, %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bxt00:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: addq $64, %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bat01() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 16) to i8*)
-; LINUX-64-STATIC: bat01:
-; LINUX-64-STATIC: movl $dst+64, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bat01:
-; LINUX-32-STATIC: movl $dst+64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bat01:
-; LINUX-32-PIC: movl $dst+64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bat01:
-; LINUX-64-PIC: movq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bat01:
-; DARWIN-32-STATIC: movl $_dst+64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bat01:
-; DARWIN-32-DYNAMIC: movl L_dst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl $64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bat01:
-; DARWIN-32-PIC: call L91$pb
-; DARWIN-32-PIC-NEXT: L91$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L91$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: addl $64, %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bat01:
-; DARWIN-64-STATIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: addq $64, %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bat01:
-; DARWIN-64-DYNAMIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: addq $64, %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bat01:
-; DARWIN-64-PIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: addq $64, %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bxt01() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([32 x i32]* @xdst, i32 0, i64 16) to i8*)
-; LINUX-64-STATIC: bxt01:
-; LINUX-64-STATIC: movl $xdst+64, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bxt01:
-; LINUX-32-STATIC: movl $xdst+64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bxt01:
-; LINUX-32-PIC: movl $xdst+64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bxt01:
-; LINUX-64-PIC: movq xdst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bxt01:
-; DARWIN-32-STATIC: movl $_xdst+64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bxt01:
-; DARWIN-32-DYNAMIC: movl L_xdst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl $64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bxt01:
-; DARWIN-32-PIC: call L92$pb
-; DARWIN-32-PIC-NEXT: L92$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L92$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: addl $64, %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bxt01:
-; DARWIN-64-STATIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: addq $64, %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bxt01:
-; DARWIN-64-DYNAMIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: addq $64, %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bxt01:
-; DARWIN-64-PIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: addq $64, %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bat02() nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = getelementptr i32* %0, i64 16
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: bat02:
-; LINUX-64-STATIC: movq ptr(%rip), %rax
-; LINUX-64-STATIC: addq $64, %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bat02:
-; LINUX-32-STATIC: movl ptr, %eax
-; LINUX-32-STATIC-NEXT: addl $64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bat02:
-; LINUX-32-PIC: movl ptr, %eax
-; LINUX-32-PIC-NEXT: addl $64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bat02:
-; LINUX-64-PIC: movq ptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq (%rax), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bat02:
-; DARWIN-32-STATIC: movl _ptr, %eax
-; DARWIN-32-STATIC-NEXT: addl $64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bat02:
-; DARWIN-32-DYNAMIC: movl L_ptr$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl (%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: addl $64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bat02:
-; DARWIN-32-PIC: call L93$pb
-; DARWIN-32-PIC-NEXT: L93$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L93$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: addl $64, %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bat02:
-; DARWIN-64-STATIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq (%rax), %rax
-; DARWIN-64-STATIC-NEXT: addq $64, %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bat02:
-; DARWIN-64-DYNAMIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq (%rax), %rax
-; DARWIN-64-DYNAMIC-NEXT: addq $64, %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bat02:
-; DARWIN-64-PIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq (%rax), %rax
-; DARWIN-64-PIC-NEXT: addq $64, %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bat03() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16) to i8*)
-; LINUX-64-STATIC: bat03:
-; LINUX-64-STATIC: movl $dsrc+64, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bat03:
-; LINUX-32-STATIC: movl $dsrc+64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bat03:
-; LINUX-32-PIC: movl $dsrc+64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bat03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bat03:
-; DARWIN-32-STATIC: movl $_dsrc+64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bat03:
-; DARWIN-32-DYNAMIC: movl $_dsrc+64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bat03:
-; DARWIN-32-PIC: call L94$pb
-; DARWIN-32-PIC-NEXT: L94$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_dsrc-L94$pb)+64(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bat03:
-; DARWIN-64-STATIC: leaq _dsrc+64(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bat03:
-; DARWIN-64-DYNAMIC: leaq _dsrc+64(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bat03:
-; DARWIN-64-PIC: leaq _dsrc+64(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bat04() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 16) to i8*)
-; LINUX-64-STATIC: bat04:
-; LINUX-64-STATIC: movl $ddst+64, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bat04:
-; LINUX-32-STATIC: movl $ddst+64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bat04:
-; LINUX-32-PIC: movl $ddst+64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bat04:
-; LINUX-64-PIC: movq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bat04:
-; DARWIN-32-STATIC: movl $_ddst+64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bat04:
-; DARWIN-32-DYNAMIC: movl $_ddst+64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bat04:
-; DARWIN-32-PIC: call L95$pb
-; DARWIN-32-PIC-NEXT: L95$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_ddst-L95$pb)+64(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bat04:
-; DARWIN-64-STATIC: leaq _ddst+64(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bat04:
-; DARWIN-64-DYNAMIC: leaq _ddst+64(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bat04:
-; DARWIN-64-PIC: leaq _ddst+64(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bat05() nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = getelementptr i32* %0, i64 16
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: bat05:
-; LINUX-64-STATIC: movq dptr(%rip), %rax
-; LINUX-64-STATIC: addq $64, %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bat05:
-; LINUX-32-STATIC: movl dptr, %eax
-; LINUX-32-STATIC-NEXT: addl $64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bat05:
-; LINUX-32-PIC: movl dptr, %eax
-; LINUX-32-PIC-NEXT: addl $64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bat05:
-; LINUX-64-PIC: movq dptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq (%rax), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bat05:
-; DARWIN-32-STATIC: movl _dptr, %eax
-; DARWIN-32-STATIC-NEXT: addl $64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bat05:
-; DARWIN-32-DYNAMIC: movl _dptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl $64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bat05:
-; DARWIN-32-PIC: call L96$pb
-; DARWIN-32-PIC-NEXT: L96$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl _dptr-L96$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: addl $64, %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bat05:
-; DARWIN-64-STATIC: movq _dptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: addq $64, %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bat05:
-; DARWIN-64-DYNAMIC: movq _dptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: addq $64, %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bat05:
-; DARWIN-64-PIC: movq _dptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: addq $64, %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bat06() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16) to i8*)
-; LINUX-64-STATIC: bat06:
-; LINUX-64-STATIC: movl $lsrc+64, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bat06:
-; LINUX-32-STATIC: movl $lsrc+64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bat06:
-; LINUX-32-PIC: movl $lsrc+64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bat06:
-; LINUX-64-PIC: leaq lsrc+64(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bat06:
-; DARWIN-32-STATIC: movl $_lsrc+64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bat06:
-; DARWIN-32-DYNAMIC: movl $_lsrc+64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bat06:
-; DARWIN-32-PIC: call L97$pb
-; DARWIN-32-PIC-NEXT: L97$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_lsrc-L97$pb)+64(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bat06:
-; DARWIN-64-STATIC: leaq _lsrc+64(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bat06:
-; DARWIN-64-DYNAMIC: leaq _lsrc+64(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bat06:
-; DARWIN-64-PIC: leaq _lsrc+64(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bat07() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 16) to i8*)
-; LINUX-64-STATIC: bat07:
-; LINUX-64-STATIC: movl $ldst+64, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bat07:
-; LINUX-32-STATIC: movl $ldst+64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bat07:
-; LINUX-32-PIC: movl $ldst+64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bat07:
-; LINUX-64-PIC: leaq ldst+64(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bat07:
-; DARWIN-32-STATIC: movl $_ldst+64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bat07:
-; DARWIN-32-DYNAMIC: movl $_ldst+64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bat07:
-; DARWIN-32-PIC: call L98$pb
-; DARWIN-32-PIC-NEXT: L98$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_ldst-L98$pb)+64(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bat07:
-; DARWIN-64-STATIC: leaq _ldst+64(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bat07:
-; DARWIN-64-DYNAMIC: leaq _ldst+64(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bat07:
-; DARWIN-64-PIC: leaq _ldst+64(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bat08() nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = getelementptr i32* %0, i64 16
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: bat08:
-; LINUX-64-STATIC: movq lptr(%rip), %rax
-; LINUX-64-STATIC: addq $64, %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bat08:
-; LINUX-32-STATIC: movl lptr, %eax
-; LINUX-32-STATIC-NEXT: addl $64, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bat08:
-; LINUX-32-PIC: movl lptr, %eax
-; LINUX-32-PIC-NEXT: addl $64, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bat08:
-; LINUX-64-PIC: movq lptr(%rip), %rax
-; LINUX-64-PIC-NEXT: addq $64, %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bat08:
-; DARWIN-32-STATIC: movl _lptr, %eax
-; DARWIN-32-STATIC-NEXT: addl $64, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bat08:
-; DARWIN-32-DYNAMIC: movl _lptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl $64, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bat08:
-; DARWIN-32-PIC: call L99$pb
-; DARWIN-32-PIC-NEXT: L99$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl _lptr-L99$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: addl $64, %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bat08:
-; DARWIN-64-STATIC: movq _lptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: addq $64, %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bat08:
-; DARWIN-64-DYNAMIC: movq _lptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: addq $64, %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bat08:
-; DARWIN-64-PIC: movq _lptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: addq $64, %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bam00() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536) to i8*)
-; LINUX-64-STATIC: bam00:
-; LINUX-64-STATIC: movl $src+262144, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bam00:
-; LINUX-32-STATIC: movl $src+262144, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bam00:
-; LINUX-32-PIC: movl $src+262144, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bam00:
-; LINUX-64-PIC: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bam00:
-; DARWIN-32-STATIC: movl $_src+262144, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bam00:
-; DARWIN-32-DYNAMIC: movl $262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl L_src$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bam00:
-; DARWIN-32-PIC: call L100$pb
-; DARWIN-32-PIC-NEXT: L100$pb:
-; DARWIN-32-PIC-NEXT: popl %ecx
-; DARWIN-32-PIC-NEXT: movl $262144, %eax
-; DARWIN-32-PIC-NEXT: addl L_src$non_lazy_ptr-L100$pb(%ecx), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bam00:
-; DARWIN-64-STATIC: movl $262144, %eax
-; DARWIN-64-STATIC-NEXT: addq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bam00:
-; DARWIN-64-DYNAMIC: movl $262144, %eax
-; DARWIN-64-DYNAMIC-NEXT: addq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bam00:
-; DARWIN-64-PIC: movl $262144, %eax
-; DARWIN-64-PIC-NEXT: addq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bam01() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 65536) to i8*)
-; LINUX-64-STATIC: bam01:
-; LINUX-64-STATIC: movl $dst+262144, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bam01:
-; LINUX-32-STATIC: movl $dst+262144, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bam01:
-; LINUX-32-PIC: movl $dst+262144, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bam01:
-; LINUX-64-PIC: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bam01:
-; DARWIN-32-STATIC: movl $_dst+262144, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bam01:
-; DARWIN-32-DYNAMIC: movl $262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl L_dst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bam01:
-; DARWIN-32-PIC: call L101$pb
-; DARWIN-32-PIC-NEXT: L101$pb:
-; DARWIN-32-PIC-NEXT: popl %ecx
-; DARWIN-32-PIC-NEXT: movl $262144, %eax
-; DARWIN-32-PIC-NEXT: addl L_dst$non_lazy_ptr-L101$pb(%ecx), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bam01:
-; DARWIN-64-STATIC: movl $262144, %eax
-; DARWIN-64-STATIC-NEXT: addq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bam01:
-; DARWIN-64-DYNAMIC: movl $262144, %eax
-; DARWIN-64-DYNAMIC-NEXT: addq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bam01:
-; DARWIN-64-PIC: movl $262144, %eax
-; DARWIN-64-PIC-NEXT: addq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bxm01() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([32 x i32]* @xdst, i32 0, i64 65536) to i8*)
-; LINUX-64-STATIC: bxm01:
-; LINUX-64-STATIC: movl $xdst+262144, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bxm01:
-; LINUX-32-STATIC: movl $xdst+262144, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bxm01:
-; LINUX-32-PIC: movl $xdst+262144, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bxm01:
-; LINUX-64-PIC: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq xdst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bxm01:
-; DARWIN-32-STATIC: movl $_xdst+262144, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bxm01:
-; DARWIN-32-DYNAMIC: movl $262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl L_xdst$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bxm01:
-; DARWIN-32-PIC: call L102$pb
-; DARWIN-32-PIC-NEXT: L102$pb:
-; DARWIN-32-PIC-NEXT: popl %ecx
-; DARWIN-32-PIC-NEXT: movl $262144, %eax
-; DARWIN-32-PIC-NEXT: addl L_xdst$non_lazy_ptr-L102$pb(%ecx), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bxm01:
-; DARWIN-64-STATIC: movl $262144, %eax
-; DARWIN-64-STATIC-NEXT: addq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bxm01:
-; DARWIN-64-DYNAMIC: movl $262144, %eax
-; DARWIN-64-DYNAMIC-NEXT: addq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bxm01:
-; DARWIN-64-PIC: movl $262144, %eax
-; DARWIN-64-PIC-NEXT: addq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bam02() nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = getelementptr i32* %0, i64 65536
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: bam02:
-; LINUX-64-STATIC: movl $262144, %eax
-; LINUX-64-STATIC: addq ptr(%rip), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bam02:
-; LINUX-32-STATIC: movl $262144, %eax
-; LINUX-32-STATIC-NEXT: addl ptr, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bam02:
-; LINUX-32-PIC: movl $262144, %eax
-; LINUX-32-PIC-NEXT: addl ptr, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bam02:
-; LINUX-64-PIC: movq ptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq (%rcx), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bam02:
-; DARWIN-32-STATIC: movl $262144, %eax
-; DARWIN-32-STATIC-NEXT: addl _ptr, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bam02:
-; DARWIN-32-DYNAMIC: movl L_ptr$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: movl $262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl (%ecx), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bam02:
-; DARWIN-32-PIC: call L103$pb
-; DARWIN-32-PIC-NEXT: L103$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L103$pb(%eax), %ecx
-; DARWIN-32-PIC-NEXT: movl $262144, %eax
-; DARWIN-32-PIC-NEXT: addl (%ecx), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bam02:
-; DARWIN-64-STATIC: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-STATIC-NEXT: movl $262144, %eax
-; DARWIN-64-STATIC-NEXT: addq (%rcx), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bam02:
-; DARWIN-64-DYNAMIC: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-DYNAMIC-NEXT: movl $262144, %eax
-; DARWIN-64-DYNAMIC-NEXT: addq (%rcx), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bam02:
-; DARWIN-64-PIC: movq _ptr at GOTPCREL(%rip), %rcx
-; DARWIN-64-PIC-NEXT: movl $262144, %eax
-; DARWIN-64-PIC-NEXT: addq (%rcx), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bam03() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536) to i8*)
-; LINUX-64-STATIC: bam03:
-; LINUX-64-STATIC: movl $dsrc+262144, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bam03:
-; LINUX-32-STATIC: movl $dsrc+262144, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bam03:
-; LINUX-32-PIC: movl $dsrc+262144, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bam03:
-; LINUX-64-PIC: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bam03:
-; DARWIN-32-STATIC: movl $_dsrc+262144, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bam03:
-; DARWIN-32-DYNAMIC: movl $_dsrc+262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bam03:
-; DARWIN-32-PIC: call L104$pb
-; DARWIN-32-PIC-NEXT: L104$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_dsrc-L104$pb)+262144(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bam03:
-; DARWIN-64-STATIC: leaq _dsrc+262144(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bam03:
-; DARWIN-64-DYNAMIC: leaq _dsrc+262144(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bam03:
-; DARWIN-64-PIC: leaq _dsrc+262144(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bam04() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 65536) to i8*)
-; LINUX-64-STATIC: bam04:
-; LINUX-64-STATIC: movl $ddst+262144, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bam04:
-; LINUX-32-STATIC: movl $ddst+262144, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bam04:
-; LINUX-32-PIC: movl $ddst+262144, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bam04:
-; LINUX-64-PIC: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bam04:
-; DARWIN-32-STATIC: movl $_ddst+262144, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bam04:
-; DARWIN-32-DYNAMIC: movl $_ddst+262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bam04:
-; DARWIN-32-PIC: call L105$pb
-; DARWIN-32-PIC-NEXT: L105$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_ddst-L105$pb)+262144(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bam04:
-; DARWIN-64-STATIC: leaq _ddst+262144(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bam04:
-; DARWIN-64-DYNAMIC: leaq _ddst+262144(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bam04:
-; DARWIN-64-PIC: leaq _ddst+262144(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bam05() nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = getelementptr i32* %0, i64 65536
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: bam05:
-; LINUX-64-STATIC: movl $262144, %eax
-; LINUX-64-STATIC: addq dptr(%rip), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bam05:
-; LINUX-32-STATIC: movl $262144, %eax
-; LINUX-32-STATIC-NEXT: addl dptr, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bam05:
-; LINUX-32-PIC: movl $262144, %eax
-; LINUX-32-PIC-NEXT: addl dptr, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bam05:
-; LINUX-64-PIC: movq dptr at GOTPCREL(%rip), %rcx
-; LINUX-64-PIC-NEXT: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq (%rcx), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bam05:
-; DARWIN-32-STATIC: movl $262144, %eax
-; DARWIN-32-STATIC-NEXT: addl _dptr, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bam05:
-; DARWIN-32-DYNAMIC: movl $262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl _dptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bam05:
-; DARWIN-32-PIC: call L106$pb
-; DARWIN-32-PIC-NEXT: L106$pb:
-; DARWIN-32-PIC-NEXT: popl %ecx
-; DARWIN-32-PIC-NEXT: movl $262144, %eax
-; DARWIN-32-PIC-NEXT: addl _dptr-L106$pb(%ecx), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bam05:
-; DARWIN-64-STATIC: movl $262144, %eax
-; DARWIN-64-STATIC-NEXT: addq _dptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bam05:
-; DARWIN-64-DYNAMIC: movl $262144, %eax
-; DARWIN-64-DYNAMIC-NEXT: addq _dptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bam05:
-; DARWIN-64-PIC: movl $262144, %eax
-; DARWIN-64-PIC-NEXT: addq _dptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bam06() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536) to i8*)
-; LINUX-64-STATIC: bam06:
-; LINUX-64-STATIC: movl $lsrc+262144, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bam06:
-; LINUX-32-STATIC: movl $lsrc+262144, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bam06:
-; LINUX-32-PIC: movl $lsrc+262144, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bam06:
-; LINUX-64-PIC: leaq lsrc+262144(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bam06:
-; DARWIN-32-STATIC: movl $_lsrc+262144, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bam06:
-; DARWIN-32-DYNAMIC: movl $_lsrc+262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bam06:
-; DARWIN-32-PIC: call L107$pb
-; DARWIN-32-PIC-NEXT: L107$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_lsrc-L107$pb)+262144(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bam06:
-; DARWIN-64-STATIC: leaq _lsrc+262144(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bam06:
-; DARWIN-64-DYNAMIC: leaq _lsrc+262144(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bam06:
-; DARWIN-64-PIC: leaq _lsrc+262144(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bam07() nounwind {
-entry:
- ret i8* bitcast (i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 65536) to i8*)
-; LINUX-64-STATIC: bam07:
-; LINUX-64-STATIC: movl $ldst+262144, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bam07:
-; LINUX-32-STATIC: movl $ldst+262144, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bam07:
-; LINUX-32-PIC: movl $ldst+262144, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bam07:
-; LINUX-64-PIC: leaq ldst+262144(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bam07:
-; DARWIN-32-STATIC: movl $_ldst+262144, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bam07:
-; DARWIN-32-DYNAMIC: movl $_ldst+262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bam07:
-; DARWIN-32-PIC: call L108$pb
-; DARWIN-32-PIC-NEXT: L108$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal (_ldst-L108$pb)+262144(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bam07:
-; DARWIN-64-STATIC: leaq _ldst+262144(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bam07:
-; DARWIN-64-DYNAMIC: leaq _ldst+262144(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bam07:
-; DARWIN-64-PIC: leaq _ldst+262144(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @bam08() nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = getelementptr i32* %0, i64 65536
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: bam08:
-; LINUX-64-STATIC: movl $262144, %eax
-; LINUX-64-STATIC: addq lptr(%rip), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: bam08:
-; LINUX-32-STATIC: movl $262144, %eax
-; LINUX-32-STATIC-NEXT: addl lptr, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: bam08:
-; LINUX-32-PIC: movl $262144, %eax
-; LINUX-32-PIC-NEXT: addl lptr, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: bam08:
-; LINUX-64-PIC: movl $262144, %eax
-; LINUX-64-PIC-NEXT: addq lptr(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _bam08:
-; DARWIN-32-STATIC: movl $262144, %eax
-; DARWIN-32-STATIC-NEXT: addl _lptr, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _bam08:
-; DARWIN-32-DYNAMIC: movl $262144, %eax
-; DARWIN-32-DYNAMIC-NEXT: addl _lptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _bam08:
-; DARWIN-32-PIC: call L109$pb
-; DARWIN-32-PIC-NEXT: L109$pb:
-; DARWIN-32-PIC-NEXT: popl %ecx
-; DARWIN-32-PIC-NEXT: movl $262144, %eax
-; DARWIN-32-PIC-NEXT: addl _lptr-L109$pb(%ecx), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _bam08:
-; DARWIN-64-STATIC: movl $262144, %eax
-; DARWIN-64-STATIC-NEXT: addq _lptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _bam08:
-; DARWIN-64-DYNAMIC: movl $262144, %eax
-; DARWIN-64-DYNAMIC-NEXT: addq _lptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _bam08:
-; DARWIN-64-PIC: movl $262144, %eax
-; DARWIN-64-PIC-NEXT: addq _lptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cat00(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cat00:
-; LINUX-64-STATIC: leaq src+64(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cat00:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal src+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cat00:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal src+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cat00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cat00:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _src+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cat00:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_src$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cat00:
-; DARWIN-32-PIC: call L110$pb
-; DARWIN-32-PIC-NEXT: L110$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L110$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cat00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cat00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cat00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cxt00(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cxt00:
-; LINUX-64-STATIC: leaq xsrc+64(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cxt00:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal xsrc+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cxt00:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal xsrc+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cxt00:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cxt00:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _xsrc+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cxt00:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xsrc$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cxt00:
-; DARWIN-32-PIC: call L111$pb
-; DARWIN-32-PIC-NEXT: L111$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L111$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cxt00:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cxt00:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cxt00:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cat01(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @dst, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cat01:
-; LINUX-64-STATIC: leaq dst+64(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cat01:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal dst+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cat01:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal dst+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cat01:
-; LINUX-64-PIC: movq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cat01:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _dst+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cat01:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cat01:
-; DARWIN-32-PIC: call L112$pb
-; DARWIN-32-PIC-NEXT: L112$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L112$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cat01:
-; DARWIN-64-STATIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cat01:
-; DARWIN-64-DYNAMIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cat01:
-; DARWIN-64-PIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cxt01(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [32 x i32]* @xdst, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cxt01:
-; LINUX-64-STATIC: leaq xdst+64(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cxt01:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal xdst+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cxt01:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal xdst+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cxt01:
-; LINUX-64-PIC: movq xdst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cxt01:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _xdst+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cxt01:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xdst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cxt01:
-; DARWIN-32-PIC: call L113$pb
-; DARWIN-32-PIC-NEXT: L113$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L113$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cxt01:
-; DARWIN-64-STATIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cxt01:
-; DARWIN-64-DYNAMIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cxt01:
-; DARWIN-64-PIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cat02(i64 %i) nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = add i64 %i, 16
- %2 = getelementptr i32* %0, i64 %1
- %3 = bitcast i32* %2 to i8*
- ret i8* %3
-; LINUX-64-STATIC: cat02:
-; LINUX-64-STATIC: movq ptr(%rip), %rax
-; LINUX-64-STATIC: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cat02:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl ptr, %ecx
-; LINUX-32-STATIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cat02:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl ptr, %ecx
-; LINUX-32-PIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cat02:
-; LINUX-64-PIC: movq ptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq (%rax), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cat02:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _ptr, %ecx
-; DARWIN-32-STATIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cat02:
-; DARWIN-32-DYNAMIC: movl L_ptr$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl (%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 64(%eax,%ecx,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cat02:
-; DARWIN-32-PIC: call L114$pb
-; DARWIN-32-PIC-NEXT: L114$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L114$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cat02:
-; DARWIN-64-STATIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq (%rax), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cat02:
-; DARWIN-64-DYNAMIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq (%rax), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cat02:
-; DARWIN-64-PIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq (%rax), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cat03(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cat03:
-; LINUX-64-STATIC: leaq dsrc+64(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cat03:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal dsrc+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cat03:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal dsrc+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cat03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cat03:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _dsrc+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cat03:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _dsrc+64(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cat03:
-; DARWIN-32-PIC: call L115$pb
-; DARWIN-32-PIC-NEXT: L115$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_dsrc-L115$pb)+64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cat03:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cat03:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cat03:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cat04(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cat04:
-; LINUX-64-STATIC: leaq ddst+64(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cat04:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ddst+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cat04:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ddst+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cat04:
-; LINUX-64-PIC: movq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cat04:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ddst+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cat04:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ddst+64(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cat04:
-; DARWIN-32-PIC: call L116$pb
-; DARWIN-32-PIC-NEXT: L116$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_ddst-L116$pb)+64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cat04:
-; DARWIN-64-STATIC: leaq _ddst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cat04:
-; DARWIN-64-DYNAMIC: leaq _ddst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cat04:
-; DARWIN-64-PIC: leaq _ddst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cat05(i64 %i) nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = add i64 %i, 16
- %2 = getelementptr i32* %0, i64 %1
- %3 = bitcast i32* %2 to i8*
- ret i8* %3
-; LINUX-64-STATIC: cat05:
-; LINUX-64-STATIC: movq dptr(%rip), %rax
-; LINUX-64-STATIC: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cat05:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl dptr, %ecx
-; LINUX-32-STATIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cat05:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl dptr, %ecx
-; LINUX-32-PIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cat05:
-; LINUX-64-PIC: movq dptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq (%rax), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cat05:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-STATIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cat05:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cat05:
-; DARWIN-32-PIC: call L117$pb
-; DARWIN-32-PIC-NEXT: L117$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl _dptr-L117$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cat05:
-; DARWIN-64-STATIC: movq _dptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cat05:
-; DARWIN-64-DYNAMIC: movq _dptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cat05:
-; DARWIN-64-PIC: movq _dptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cat06(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cat06:
-; LINUX-64-STATIC: leaq lsrc+64(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cat06:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal lsrc+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cat06:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal lsrc+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cat06:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cat06:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _lsrc+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cat06:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _lsrc+64(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cat06:
-; DARWIN-32-PIC: call L118$pb
-; DARWIN-32-PIC-NEXT: L118$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_lsrc-L118$pb)+64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cat06:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cat06:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cat06:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cat07(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cat07:
-; LINUX-64-STATIC: leaq ldst+64(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cat07:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ldst+64(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cat07:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ldst+64(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cat07:
-; LINUX-64-PIC: leaq ldst(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cat07:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ldst+64(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cat07:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ldst+64(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cat07:
-; DARWIN-32-PIC: call L119$pb
-; DARWIN-32-PIC-NEXT: L119$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_ldst-L119$pb)+64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cat07:
-; DARWIN-64-STATIC: leaq _ldst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cat07:
-; DARWIN-64-DYNAMIC: leaq _ldst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cat07:
-; DARWIN-64-PIC: leaq _ldst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cat08(i64 %i) nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = add i64 %i, 16
- %2 = getelementptr i32* %0, i64 %1
- %3 = bitcast i32* %2 to i8*
- ret i8* %3
-; LINUX-64-STATIC: cat08:
-; LINUX-64-STATIC: movq lptr(%rip), %rax
-; LINUX-64-STATIC: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cat08:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl lptr, %ecx
-; LINUX-32-STATIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cat08:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl lptr, %ecx
-; LINUX-32-PIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cat08:
-; LINUX-64-PIC: movq lptr(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cat08:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-STATIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cat08:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 64(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cat08:
-; DARWIN-32-PIC: call L120$pb
-; DARWIN-32-PIC-NEXT: L120$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl _lptr-L120$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cat08:
-; DARWIN-64-STATIC: movq _lptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cat08:
-; DARWIN-64-DYNAMIC: movq _lptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cat08:
-; DARWIN-64-PIC: movq _lptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 64(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cam00(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cam00:
-; LINUX-64-STATIC: leaq src+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cam00:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal src+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cam00:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal src+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cam00:
-; LINUX-64-PIC: movq src at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cam00:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _src+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cam00:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_src$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cam00:
-; DARWIN-32-PIC: call L121$pb
-; DARWIN-32-PIC-NEXT: L121$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_src$non_lazy_ptr-L121$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cam00:
-; DARWIN-64-STATIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cam00:
-; DARWIN-64-DYNAMIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cam00:
-; DARWIN-64-PIC: movq _src at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cxm00(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cxm00:
-; LINUX-64-STATIC: leaq xsrc+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cxm00:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal xsrc+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cxm00:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal xsrc+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cxm00:
-; LINUX-64-PIC: movq xsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cxm00:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _xsrc+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cxm00:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xsrc$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cxm00:
-; DARWIN-32-PIC: call L122$pb
-; DARWIN-32-PIC-NEXT: L122$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xsrc$non_lazy_ptr-L122$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cxm00:
-; DARWIN-64-STATIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cxm00:
-; DARWIN-64-DYNAMIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cxm00:
-; DARWIN-64-PIC: movq _xsrc at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cam01(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @dst, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cam01:
-; LINUX-64-STATIC: leaq dst+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cam01:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal dst+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cam01:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal dst+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cam01:
-; LINUX-64-PIC: movq dst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cam01:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _dst+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cam01:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_dst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cam01:
-; DARWIN-32-PIC: call L123$pb
-; DARWIN-32-PIC-NEXT: L123$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_dst$non_lazy_ptr-L123$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cam01:
-; DARWIN-64-STATIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cam01:
-; DARWIN-64-DYNAMIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cam01:
-; DARWIN-64-PIC: movq _dst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cxm01(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [32 x i32]* @xdst, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cxm01:
-; LINUX-64-STATIC: leaq xdst+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cxm01:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal xdst+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cxm01:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal xdst+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cxm01:
-; LINUX-64-PIC: movq xdst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cxm01:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _xdst+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cxm01:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl L_xdst$non_lazy_ptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cxm01:
-; DARWIN-32-PIC: call L124$pb
-; DARWIN-32-PIC-NEXT: L124$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl L_xdst$non_lazy_ptr-L124$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cxm01:
-; DARWIN-64-STATIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cxm01:
-; DARWIN-64-DYNAMIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cxm01:
-; DARWIN-64-PIC: movq _xdst at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cam02(i64 %i) nounwind {
-entry:
- %0 = load i32** @ptr, align 8
- %1 = add i64 %i, 65536
- %2 = getelementptr i32* %0, i64 %1
- %3 = bitcast i32* %2 to i8*
- ret i8* %3
-; LINUX-64-STATIC: cam02:
-; LINUX-64-STATIC: movq ptr(%rip), %rax
-; LINUX-64-STATIC: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cam02:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl ptr, %ecx
-; LINUX-32-STATIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cam02:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl ptr, %ecx
-; LINUX-32-PIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cam02:
-; LINUX-64-PIC: movq ptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq (%rax), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cam02:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _ptr, %ecx
-; DARWIN-32-STATIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cam02:
-; DARWIN-32-DYNAMIC: movl L_ptr$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: movl (%eax), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 262144(%eax,%ecx,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cam02:
-; DARWIN-32-PIC: call L125$pb
-; DARWIN-32-PIC-NEXT: L125$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_ptr$non_lazy_ptr-L125$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: movl (%eax), %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cam02:
-; DARWIN-64-STATIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: movq (%rax), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cam02:
-; DARWIN-64-DYNAMIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: movq (%rax), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cam02:
-; DARWIN-64-PIC: movq _ptr at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: movq (%rax), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cam03(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cam03:
-; LINUX-64-STATIC: leaq dsrc+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cam03:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal dsrc+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cam03:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal dsrc+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cam03:
-; LINUX-64-PIC: movq dsrc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cam03:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _dsrc+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cam03:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _dsrc+262144(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cam03:
-; DARWIN-32-PIC: call L126$pb
-; DARWIN-32-PIC-NEXT: L126$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_dsrc-L126$pb)+262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cam03:
-; DARWIN-64-STATIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cam03:
-; DARWIN-64-DYNAMIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cam03:
-; DARWIN-64-PIC: leaq _dsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cam04(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cam04:
-; LINUX-64-STATIC: leaq ddst+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cam04:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ddst+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cam04:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ddst+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cam04:
-; LINUX-64-PIC: movq ddst at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cam04:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ddst+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cam04:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ddst+262144(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cam04:
-; DARWIN-32-PIC: call L127$pb
-; DARWIN-32-PIC-NEXT: L127$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_ddst-L127$pb)+262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cam04:
-; DARWIN-64-STATIC: leaq _ddst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cam04:
-; DARWIN-64-DYNAMIC: leaq _ddst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cam04:
-; DARWIN-64-PIC: leaq _ddst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cam05(i64 %i) nounwind {
-entry:
- %0 = load i32** @dptr, align 8
- %1 = add i64 %i, 65536
- %2 = getelementptr i32* %0, i64 %1
- %3 = bitcast i32* %2 to i8*
- ret i8* %3
-; LINUX-64-STATIC: cam05:
-; LINUX-64-STATIC: movq dptr(%rip), %rax
-; LINUX-64-STATIC: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cam05:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl dptr, %ecx
-; LINUX-32-STATIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cam05:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl dptr, %ecx
-; LINUX-32-PIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cam05:
-; LINUX-64-PIC: movq dptr at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: movq (%rax), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cam05:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-STATIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cam05:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _dptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cam05:
-; DARWIN-32-PIC: call L128$pb
-; DARWIN-32-PIC-NEXT: L128$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl _dptr-L128$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cam05:
-; DARWIN-64-STATIC: movq _dptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cam05:
-; DARWIN-64-DYNAMIC: movq _dptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cam05:
-; DARWIN-64-PIC: movq _dptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cam06(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cam06:
-; LINUX-64-STATIC: leaq lsrc+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cam06:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal lsrc+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cam06:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal lsrc+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cam06:
-; LINUX-64-PIC: leaq lsrc(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cam06:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _lsrc+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cam06:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _lsrc+262144(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cam06:
-; DARWIN-32-PIC: call L129$pb
-; DARWIN-32-PIC-NEXT: L129$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_lsrc-L129$pb)+262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cam06:
-; DARWIN-64-STATIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cam06:
-; DARWIN-64-DYNAMIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cam06:
-; DARWIN-64-PIC: leaq _lsrc(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cam07(i64 %i) nounwind {
-entry:
- %0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %0
- %2 = bitcast i32* %1 to i8*
- ret i8* %2
-; LINUX-64-STATIC: cam07:
-; LINUX-64-STATIC: leaq ldst+262144(,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cam07:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: leal ldst+262144(,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cam07:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: leal ldst+262144(,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cam07:
-; LINUX-64-PIC: leaq ldst(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cam07:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: leal _ldst+262144(,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cam07:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: leal _ldst+262144(,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cam07:
-; DARWIN-32-PIC: call L130$pb
-; DARWIN-32-PIC-NEXT: L130$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: leal (_ldst-L130$pb)+262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cam07:
-; DARWIN-64-STATIC: leaq _ldst(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cam07:
-; DARWIN-64-DYNAMIC: leaq _ldst(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cam07:
-; DARWIN-64-PIC: leaq _ldst(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define i8* @cam08(i64 %i) nounwind {
-entry:
- %0 = load i32** @lptr, align 8
- %1 = add i64 %i, 65536
- %2 = getelementptr i32* %0, i64 %1
- %3 = bitcast i32* %2 to i8*
- ret i8* %3
-; LINUX-64-STATIC: cam08:
-; LINUX-64-STATIC: movq lptr(%rip), %rax
-; LINUX-64-STATIC: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: cam08:
-; LINUX-32-STATIC: movl 4(%esp), %eax
-; LINUX-32-STATIC-NEXT: movl lptr, %ecx
-; LINUX-32-STATIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: cam08:
-; LINUX-32-PIC: movl 4(%esp), %eax
-; LINUX-32-PIC-NEXT: movl lptr, %ecx
-; LINUX-32-PIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: cam08:
-; LINUX-64-PIC: movq lptr(%rip), %rax
-; LINUX-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _cam08:
-; DARWIN-32-STATIC: movl 4(%esp), %eax
-; DARWIN-32-STATIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-STATIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _cam08:
-; DARWIN-32-DYNAMIC: movl 4(%esp), %eax
-; DARWIN-32-DYNAMIC-NEXT: movl _lptr, %ecx
-; DARWIN-32-DYNAMIC-NEXT: leal 262144(%ecx,%eax,4), %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _cam08:
-; DARWIN-32-PIC: call L131$pb
-; DARWIN-32-PIC-NEXT: L131$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl 4(%esp), %ecx
-; DARWIN-32-PIC-NEXT: movl _lptr-L131$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _cam08:
-; DARWIN-64-STATIC: movq _lptr(%rip), %rax
-; DARWIN-64-STATIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _cam08:
-; DARWIN-64-DYNAMIC: movq _lptr(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _cam08:
-; DARWIN-64-PIC: movq _lptr(%rip), %rax
-; DARWIN-64-PIC-NEXT: leaq 262144(%rax,%rdi,4), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @lcallee() nounwind {
-entry:
- call void @x() nounwind
- call void @x() nounwind
- call void @x() nounwind
- call void @x() nounwind
- call void @x() nounwind
- call void @x() nounwind
- call void @x() nounwind
- ret void
-; LINUX-64-STATIC: lcallee:
-; LINUX-64-STATIC: callq x
-; LINUX-64-STATIC: callq x
-; LINUX-64-STATIC: callq x
-; LINUX-64-STATIC: callq x
-; LINUX-64-STATIC: callq x
-; LINUX-64-STATIC: callq x
-; LINUX-64-STATIC: callq x
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: lcallee:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call x
-; LINUX-32-STATIC-NEXT: call x
-; LINUX-32-STATIC-NEXT: call x
-; LINUX-32-STATIC-NEXT: call x
-; LINUX-32-STATIC-NEXT: call x
-; LINUX-32-STATIC-NEXT: call x
-; LINUX-32-STATIC-NEXT: call x
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: lcallee:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call x
-; LINUX-32-PIC-NEXT: call x
-; LINUX-32-PIC-NEXT: call x
-; LINUX-32-PIC-NEXT: call x
-; LINUX-32-PIC-NEXT: call x
-; LINUX-32-PIC-NEXT: call x
-; LINUX-32-PIC-NEXT: call x
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: lcallee:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq x at PLT
-; LINUX-64-PIC-NEXT: callq x at PLT
-; LINUX-64-PIC-NEXT: callq x at PLT
-; LINUX-64-PIC-NEXT: callq x at PLT
-; LINUX-64-PIC-NEXT: callq x at PLT
-; LINUX-64-PIC-NEXT: callq x at PLT
-; LINUX-64-PIC-NEXT: callq x at PLT
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _lcallee:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call _x
-; DARWIN-32-STATIC-NEXT: call _x
-; DARWIN-32-STATIC-NEXT: call _x
-; DARWIN-32-STATIC-NEXT: call _x
-; DARWIN-32-STATIC-NEXT: call _x
-; DARWIN-32-STATIC-NEXT: call _x
-; DARWIN-32-STATIC-NEXT: call _x
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _lcallee:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_x$stub
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _lcallee:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call L_x$stub
-; DARWIN-32-PIC-NEXT: call L_x$stub
-; DARWIN-32-PIC-NEXT: call L_x$stub
-; DARWIN-32-PIC-NEXT: call L_x$stub
-; DARWIN-32-PIC-NEXT: call L_x$stub
-; DARWIN-32-PIC-NEXT: call L_x$stub
-; DARWIN-32-PIC-NEXT: call L_x$stub
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _lcallee:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq _x
-; DARWIN-64-STATIC-NEXT: callq _x
-; DARWIN-64-STATIC-NEXT: callq _x
-; DARWIN-64-STATIC-NEXT: callq _x
-; DARWIN-64-STATIC-NEXT: callq _x
-; DARWIN-64-STATIC-NEXT: callq _x
-; DARWIN-64-STATIC-NEXT: callq _x
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _lcallee:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq _x
-; DARWIN-64-DYNAMIC-NEXT: callq _x
-; DARWIN-64-DYNAMIC-NEXT: callq _x
-; DARWIN-64-DYNAMIC-NEXT: callq _x
-; DARWIN-64-DYNAMIC-NEXT: callq _x
-; DARWIN-64-DYNAMIC-NEXT: callq _x
-; DARWIN-64-DYNAMIC-NEXT: callq _x
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _lcallee:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq _x
-; DARWIN-64-PIC-NEXT: callq _x
-; DARWIN-64-PIC-NEXT: callq _x
-; DARWIN-64-PIC-NEXT: callq _x
-; DARWIN-64-PIC-NEXT: callq _x
-; DARWIN-64-PIC-NEXT: callq _x
-; DARWIN-64-PIC-NEXT: callq _x
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-declare void @x()
-
-define internal void @dcallee() nounwind {
-entry:
- call void @y() nounwind
- call void @y() nounwind
- call void @y() nounwind
- call void @y() nounwind
- call void @y() nounwind
- call void @y() nounwind
- call void @y() nounwind
- ret void
-; LINUX-64-STATIC: dcallee:
-; LINUX-64-STATIC: callq y
-; LINUX-64-STATIC: callq y
-; LINUX-64-STATIC: callq y
-; LINUX-64-STATIC: callq y
-; LINUX-64-STATIC: callq y
-; LINUX-64-STATIC: callq y
-; LINUX-64-STATIC: callq y
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: dcallee:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call y
-; LINUX-32-STATIC-NEXT: call y
-; LINUX-32-STATIC-NEXT: call y
-; LINUX-32-STATIC-NEXT: call y
-; LINUX-32-STATIC-NEXT: call y
-; LINUX-32-STATIC-NEXT: call y
-; LINUX-32-STATIC-NEXT: call y
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: dcallee:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call y
-; LINUX-32-PIC-NEXT: call y
-; LINUX-32-PIC-NEXT: call y
-; LINUX-32-PIC-NEXT: call y
-; LINUX-32-PIC-NEXT: call y
-; LINUX-32-PIC-NEXT: call y
-; LINUX-32-PIC-NEXT: call y
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: dcallee:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq y at PLT
-; LINUX-64-PIC-NEXT: callq y at PLT
-; LINUX-64-PIC-NEXT: callq y at PLT
-; LINUX-64-PIC-NEXT: callq y at PLT
-; LINUX-64-PIC-NEXT: callq y at PLT
-; LINUX-64-PIC-NEXT: callq y at PLT
-; LINUX-64-PIC-NEXT: callq y at PLT
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _dcallee:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call _y
-; DARWIN-32-STATIC-NEXT: call _y
-; DARWIN-32-STATIC-NEXT: call _y
-; DARWIN-32-STATIC-NEXT: call _y
-; DARWIN-32-STATIC-NEXT: call _y
-; DARWIN-32-STATIC-NEXT: call _y
-; DARWIN-32-STATIC-NEXT: call _y
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _dcallee:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_y$stub
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _dcallee:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call L_y$stub
-; DARWIN-32-PIC-NEXT: call L_y$stub
-; DARWIN-32-PIC-NEXT: call L_y$stub
-; DARWIN-32-PIC-NEXT: call L_y$stub
-; DARWIN-32-PIC-NEXT: call L_y$stub
-; DARWIN-32-PIC-NEXT: call L_y$stub
-; DARWIN-32-PIC-NEXT: call L_y$stub
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _dcallee:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq _y
-; DARWIN-64-STATIC-NEXT: callq _y
-; DARWIN-64-STATIC-NEXT: callq _y
-; DARWIN-64-STATIC-NEXT: callq _y
-; DARWIN-64-STATIC-NEXT: callq _y
-; DARWIN-64-STATIC-NEXT: callq _y
-; DARWIN-64-STATIC-NEXT: callq _y
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _dcallee:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq _y
-; DARWIN-64-DYNAMIC-NEXT: callq _y
-; DARWIN-64-DYNAMIC-NEXT: callq _y
-; DARWIN-64-DYNAMIC-NEXT: callq _y
-; DARWIN-64-DYNAMIC-NEXT: callq _y
-; DARWIN-64-DYNAMIC-NEXT: callq _y
-; DARWIN-64-DYNAMIC-NEXT: callq _y
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _dcallee:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq _y
-; DARWIN-64-PIC-NEXT: callq _y
-; DARWIN-64-PIC-NEXT: callq _y
-; DARWIN-64-PIC-NEXT: callq _y
-; DARWIN-64-PIC-NEXT: callq _y
-; DARWIN-64-PIC-NEXT: callq _y
-; DARWIN-64-PIC-NEXT: callq _y
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-declare void @y()
-
-define void ()* @address() nounwind {
-entry:
- ret void ()* @callee
-; LINUX-64-STATIC: address:
-; LINUX-64-STATIC: movl $callee, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: address:
-; LINUX-32-STATIC: movl $callee, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: address:
-; LINUX-32-PIC: movl $callee, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: address:
-; LINUX-64-PIC: movq callee at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _address:
-; DARWIN-32-STATIC: movl $_callee, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _address:
-; DARWIN-32-DYNAMIC: movl L_callee$non_lazy_ptr, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _address:
-; DARWIN-32-PIC: call L134$pb
-; DARWIN-32-PIC-NEXT: L134$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_callee$non_lazy_ptr-L134$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _address:
-; DARWIN-64-STATIC: movq _callee at GOTPCREL(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _address:
-; DARWIN-64-DYNAMIC: movq _callee at GOTPCREL(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _address:
-; DARWIN-64-PIC: movq _callee at GOTPCREL(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-declare void @callee()
-
-define void ()* @laddress() nounwind {
-entry:
- ret void ()* @lcallee
-; LINUX-64-STATIC: laddress:
-; LINUX-64-STATIC: movl $lcallee, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: laddress:
-; LINUX-32-STATIC: movl $lcallee, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: laddress:
-; LINUX-32-PIC: movl $lcallee, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: laddress:
-; LINUX-64-PIC: movq lcallee at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _laddress:
-; DARWIN-32-STATIC: movl $_lcallee, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _laddress:
-; DARWIN-32-DYNAMIC: movl $_lcallee, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _laddress:
-; DARWIN-32-PIC: call L135$pb
-; DARWIN-32-PIC-NEXT: L135$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _lcallee-L135$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _laddress:
-; DARWIN-64-STATIC: leaq _lcallee(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _laddress:
-; DARWIN-64-DYNAMIC: leaq _lcallee(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _laddress:
-; DARWIN-64-PIC: leaq _lcallee(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void ()* @daddress() nounwind {
-entry:
- ret void ()* @dcallee
-; LINUX-64-STATIC: daddress:
-; LINUX-64-STATIC: movl $dcallee, %eax
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: daddress:
-; LINUX-32-STATIC: movl $dcallee, %eax
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: daddress:
-; LINUX-32-PIC: movl $dcallee, %eax
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: daddress:
-; LINUX-64-PIC: leaq dcallee(%rip), %rax
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _daddress:
-; DARWIN-32-STATIC: movl $_dcallee, %eax
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _daddress:
-; DARWIN-32-DYNAMIC: movl $_dcallee, %eax
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _daddress:
-; DARWIN-32-PIC: call L136$pb
-; DARWIN-32-PIC-NEXT: L136$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: leal _dcallee-L136$pb(%eax), %eax
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _daddress:
-; DARWIN-64-STATIC: leaq _dcallee(%rip), %rax
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _daddress:
-; DARWIN-64-DYNAMIC: leaq _dcallee(%rip), %rax
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _daddress:
-; DARWIN-64-PIC: leaq _dcallee(%rip), %rax
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @caller() nounwind {
-entry:
- call void @callee() nounwind
- call void @callee() nounwind
- ret void
-; LINUX-64-STATIC: caller:
-; LINUX-64-STATIC: callq callee
-; LINUX-64-STATIC: callq callee
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: caller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call callee
-; LINUX-32-STATIC-NEXT: call callee
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: caller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call callee
-; LINUX-32-PIC-NEXT: call callee
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: caller:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq callee at PLT
-; LINUX-64-PIC-NEXT: callq callee at PLT
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _caller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call _callee
-; DARWIN-32-STATIC-NEXT: call _callee
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _caller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call L_callee$stub
-; DARWIN-32-DYNAMIC-NEXT: call L_callee$stub
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _caller:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call L_callee$stub
-; DARWIN-32-PIC-NEXT: call L_callee$stub
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _caller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq _callee
-; DARWIN-64-STATIC-NEXT: callq _callee
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _caller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq _callee
-; DARWIN-64-DYNAMIC-NEXT: callq _callee
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _caller:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq _callee
-; DARWIN-64-PIC-NEXT: callq _callee
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @dcaller() nounwind {
-entry:
- call void @dcallee() nounwind
- call void @dcallee() nounwind
- ret void
-; LINUX-64-STATIC: dcaller:
-; LINUX-64-STATIC: callq dcallee
-; LINUX-64-STATIC: callq dcallee
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: dcaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call dcallee
-; LINUX-32-STATIC-NEXT: call dcallee
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: dcaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call dcallee
-; LINUX-32-PIC-NEXT: call dcallee
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: dcaller:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq dcallee
-; LINUX-64-PIC-NEXT: callq dcallee
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _dcaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call _dcallee
-; DARWIN-32-STATIC-NEXT: call _dcallee
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _dcaller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call _dcallee
-; DARWIN-32-DYNAMIC-NEXT: call _dcallee
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _dcaller:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call _dcallee
-; DARWIN-32-PIC-NEXT: call _dcallee
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _dcaller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq _dcallee
-; DARWIN-64-STATIC-NEXT: callq _dcallee
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _dcaller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq _dcallee
-; DARWIN-64-DYNAMIC-NEXT: callq _dcallee
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _dcaller:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq _dcallee
-; DARWIN-64-PIC-NEXT: callq _dcallee
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @lcaller() nounwind {
-entry:
- call void @lcallee() nounwind
- call void @lcallee() nounwind
- ret void
-; LINUX-64-STATIC: lcaller:
-; LINUX-64-STATIC: callq lcallee
-; LINUX-64-STATIC: callq lcallee
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: lcaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call lcallee
-; LINUX-32-STATIC-NEXT: call lcallee
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: lcaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call lcallee
-; LINUX-32-PIC-NEXT: call lcallee
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: lcaller:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq lcallee at PLT
-; LINUX-64-PIC-NEXT: callq lcallee at PLT
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _lcaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call _lcallee
-; DARWIN-32-STATIC-NEXT: call _lcallee
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _lcaller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call _lcallee
-; DARWIN-32-DYNAMIC-NEXT: call _lcallee
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _lcaller:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call _lcallee
-; DARWIN-32-PIC-NEXT: call _lcallee
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _lcaller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq _lcallee
-; DARWIN-64-STATIC-NEXT: callq _lcallee
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _lcaller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq _lcallee
-; DARWIN-64-DYNAMIC-NEXT: callq _lcallee
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _lcaller:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq _lcallee
-; DARWIN-64-PIC-NEXT: callq _lcallee
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @tailcaller() nounwind {
-entry:
- call void @callee() nounwind
- ret void
-; LINUX-64-STATIC: tailcaller:
-; LINUX-64-STATIC: callq callee
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: tailcaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call callee
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: tailcaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call callee
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: tailcaller:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq callee at PLT
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _tailcaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call _callee
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _tailcaller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call L_callee$stub
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _tailcaller:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call L_callee$stub
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _tailcaller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq _callee
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _tailcaller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq _callee
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _tailcaller:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq _callee
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @dtailcaller() nounwind {
-entry:
- call void @dcallee() nounwind
- ret void
-; LINUX-64-STATIC: dtailcaller:
-; LINUX-64-STATIC: callq dcallee
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: dtailcaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call dcallee
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: dtailcaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call dcallee
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: dtailcaller:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq dcallee
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _dtailcaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call _dcallee
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _dtailcaller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call _dcallee
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _dtailcaller:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call _dcallee
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _dtailcaller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq _dcallee
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _dtailcaller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq _dcallee
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _dtailcaller:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq _dcallee
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ltailcaller() nounwind {
-entry:
- call void @lcallee() nounwind
- ret void
-; LINUX-64-STATIC: ltailcaller:
-; LINUX-64-STATIC: callq lcallee
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ltailcaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call lcallee
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ltailcaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call lcallee
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ltailcaller:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq lcallee at PLT
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ltailcaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call _lcallee
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ltailcaller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call _lcallee
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ltailcaller:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call _lcallee
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ltailcaller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq _lcallee
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ltailcaller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq _lcallee
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ltailcaller:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq _lcallee
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @icaller() nounwind {
-entry:
- %0 = load void ()** @ifunc, align 8
- call void %0() nounwind
- %1 = load void ()** @ifunc, align 8
- call void %1() nounwind
- ret void
-; LINUX-64-STATIC: icaller:
-; LINUX-64-STATIC: callq *ifunc
-; LINUX-64-STATIC: callq *ifunc
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: icaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call *ifunc
-; LINUX-32-STATIC-NEXT: call *ifunc
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: icaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call *ifunc
-; LINUX-32-PIC-NEXT: call *ifunc
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: icaller:
-; LINUX-64-PIC: pushq %rbx
-; LINUX-64-PIC-NEXT: movq ifunc at GOTPCREL(%rip), %rbx
-; LINUX-64-PIC-NEXT: callq *(%rbx)
-; LINUX-64-PIC-NEXT: callq *(%rbx)
-; LINUX-64-PIC-NEXT: popq %rbx
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _icaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call *_ifunc
-; DARWIN-32-STATIC-NEXT: call *_ifunc
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _icaller:
-; DARWIN-32-DYNAMIC: pushl %esi
-; DARWIN-32-DYNAMIC-NEXT: subl $8, %esp
-; DARWIN-32-DYNAMIC-NEXT: movl L_ifunc$non_lazy_ptr, %esi
-; DARWIN-32-DYNAMIC-NEXT: call *(%esi)
-; DARWIN-32-DYNAMIC-NEXT: call *(%esi)
-; DARWIN-32-DYNAMIC-NEXT: addl $8, %esp
-; DARWIN-32-DYNAMIC-NEXT: popl %esi
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _icaller:
-; DARWIN-32-PIC: pushl %esi
-; DARWIN-32-PIC-NEXT: subl $8, %esp
-; DARWIN-32-PIC-NEXT: call L143$pb
-; DARWIN-32-PIC-NEXT: L143$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_ifunc$non_lazy_ptr-L143$pb(%eax), %esi
-; DARWIN-32-PIC-NEXT: call *(%esi)
-; DARWIN-32-PIC-NEXT: call *(%esi)
-; DARWIN-32-PIC-NEXT: addl $8, %esp
-; DARWIN-32-PIC-NEXT: popl %esi
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _icaller:
-; DARWIN-64-STATIC: pushq %rbx
-; DARWIN-64-STATIC-NEXT: movq _ifunc at GOTPCREL(%rip), %rbx
-; DARWIN-64-STATIC-NEXT: callq *(%rbx)
-; DARWIN-64-STATIC-NEXT: callq *(%rbx)
-; DARWIN-64-STATIC-NEXT: popq %rbx
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _icaller:
-; DARWIN-64-DYNAMIC: pushq %rbx
-; DARWIN-64-DYNAMIC-NEXT: movq _ifunc at GOTPCREL(%rip), %rbx
-; DARWIN-64-DYNAMIC-NEXT: callq *(%rbx)
-; DARWIN-64-DYNAMIC-NEXT: callq *(%rbx)
-; DARWIN-64-DYNAMIC-NEXT: popq %rbx
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _icaller:
-; DARWIN-64-PIC: pushq %rbx
-; DARWIN-64-PIC-NEXT: movq _ifunc at GOTPCREL(%rip), %rbx
-; DARWIN-64-PIC-NEXT: callq *(%rbx)
-; DARWIN-64-PIC-NEXT: callq *(%rbx)
-; DARWIN-64-PIC-NEXT: popq %rbx
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @dicaller() nounwind {
-entry:
- %0 = load void ()** @difunc, align 8
- call void %0() nounwind
- %1 = load void ()** @difunc, align 8
- call void %1() nounwind
- ret void
-; LINUX-64-STATIC: dicaller:
-; LINUX-64-STATIC: callq *difunc
-; LINUX-64-STATIC: callq *difunc
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: dicaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call *difunc
-; LINUX-32-STATIC-NEXT: call *difunc
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: dicaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call *difunc
-; LINUX-32-PIC-NEXT: call *difunc
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: dicaller:
-; LINUX-64-PIC: pushq %rbx
-; LINUX-64-PIC-NEXT: movq difunc at GOTPCREL(%rip), %rbx
-; LINUX-64-PIC-NEXT: callq *(%rbx)
-; LINUX-64-PIC-NEXT: callq *(%rbx)
-; LINUX-64-PIC-NEXT: popq %rbx
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _dicaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call *_difunc
-; DARWIN-32-STATIC-NEXT: call *_difunc
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _dicaller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call *_difunc
-; DARWIN-32-DYNAMIC-NEXT: call *_difunc
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _dicaller:
-; DARWIN-32-PIC: pushl %esi
-; DARWIN-32-PIC-NEXT: subl $8, %esp
-; DARWIN-32-PIC-NEXT: call L144$pb
-; DARWIN-32-PIC-NEXT: L144$pb:
-; DARWIN-32-PIC-NEXT: popl %esi
-; DARWIN-32-PIC-NEXT: call *_difunc-L144$pb(%esi)
-; DARWIN-32-PIC-NEXT: call *_difunc-L144$pb(%esi)
-; DARWIN-32-PIC-NEXT: addl $8, %esp
-; DARWIN-32-PIC-NEXT: popl %esi
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _dicaller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq *_difunc(%rip)
-; DARWIN-64-STATIC-NEXT: callq *_difunc(%rip)
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _dicaller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq *_difunc(%rip)
-; DARWIN-64-DYNAMIC-NEXT: callq *_difunc(%rip)
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _dicaller:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq *_difunc(%rip)
-; DARWIN-64-PIC-NEXT: callq *_difunc(%rip)
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @licaller() nounwind {
-entry:
- %0 = load void ()** @lifunc, align 8
- call void %0() nounwind
- %1 = load void ()** @lifunc, align 8
- call void %1() nounwind
- ret void
-; LINUX-64-STATIC: licaller:
-; LINUX-64-STATIC: callq *lifunc
-; LINUX-64-STATIC: callq *lifunc
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: licaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call *lifunc
-; LINUX-32-STATIC-NEXT: call *lifunc
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: licaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call *lifunc
-; LINUX-32-PIC-NEXT: call *lifunc
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: licaller:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq *lifunc(%rip)
-; LINUX-64-PIC-NEXT: callq *lifunc(%rip)
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _licaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call *_lifunc
-; DARWIN-32-STATIC-NEXT: call *_lifunc
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _licaller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call *_lifunc
-; DARWIN-32-DYNAMIC-NEXT: call *_lifunc
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _licaller:
-; DARWIN-32-PIC: pushl %esi
-; DARWIN-32-PIC-NEXT: subl $8, %esp
-; DARWIN-32-PIC-NEXT: call L145$pb
-; DARWIN-32-PIC-NEXT: L145$pb:
-; DARWIN-32-PIC-NEXT: popl %esi
-; DARWIN-32-PIC-NEXT: call *_lifunc-L145$pb(%esi)
-; DARWIN-32-PIC-NEXT: call *_lifunc-L145$pb(%esi)
-; DARWIN-32-PIC-NEXT: addl $8, %esp
-; DARWIN-32-PIC-NEXT: popl %esi
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _licaller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq *_lifunc(%rip)
-; DARWIN-64-STATIC-NEXT: callq *_lifunc(%rip)
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _licaller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq *_lifunc(%rip)
-; DARWIN-64-DYNAMIC-NEXT: callq *_lifunc(%rip)
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _licaller:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq *_lifunc(%rip)
-; DARWIN-64-PIC-NEXT: callq *_lifunc(%rip)
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @itailcaller() nounwind {
-entry:
- %0 = load void ()** @ifunc, align 8
- call void %0() nounwind
- %1 = load void ()** @ifunc, align 8
- call void %1() nounwind
- ret void
-; LINUX-64-STATIC: itailcaller:
-; LINUX-64-STATIC: callq *ifunc
-; LINUX-64-STATIC: callq *ifunc
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: itailcaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call *ifunc
-; LINUX-32-STATIC-NEXT: call *ifunc
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: itailcaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call *ifunc
-; LINUX-32-PIC-NEXT: call *ifunc
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: itailcaller:
-; LINUX-64-PIC: pushq %rbx
-; LINUX-64-PIC-NEXT: movq ifunc at GOTPCREL(%rip), %rbx
-; LINUX-64-PIC-NEXT: callq *(%rbx)
-; LINUX-64-PIC-NEXT: callq *(%rbx)
-; LINUX-64-PIC-NEXT: popq %rbx
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _itailcaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call *_ifunc
-; DARWIN-32-STATIC-NEXT: call *_ifunc
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _itailcaller:
-; DARWIN-32-DYNAMIC: pushl %esi
-; DARWIN-32-DYNAMIC-NEXT: subl $8, %esp
-; DARWIN-32-DYNAMIC-NEXT: movl L_ifunc$non_lazy_ptr, %esi
-; DARWIN-32-DYNAMIC-NEXT: call *(%esi)
-; DARWIN-32-DYNAMIC-NEXT: call *(%esi)
-; DARWIN-32-DYNAMIC-NEXT: addl $8, %esp
-; DARWIN-32-DYNAMIC-NEXT: popl %esi
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _itailcaller:
-; DARWIN-32-PIC: pushl %esi
-; DARWIN-32-PIC-NEXT: subl $8, %esp
-; DARWIN-32-PIC-NEXT: call L146$pb
-; DARWIN-32-PIC-NEXT: L146$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: movl L_ifunc$non_lazy_ptr-L146$pb(%eax), %esi
-; DARWIN-32-PIC-NEXT: call *(%esi)
-; DARWIN-32-PIC-NEXT: call *(%esi)
-; DARWIN-32-PIC-NEXT: addl $8, %esp
-; DARWIN-32-PIC-NEXT: popl %esi
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _itailcaller:
-; DARWIN-64-STATIC: pushq %rbx
-; DARWIN-64-STATIC-NEXT: movq _ifunc at GOTPCREL(%rip), %rbx
-; DARWIN-64-STATIC-NEXT: callq *(%rbx)
-; DARWIN-64-STATIC-NEXT: callq *(%rbx)
-; DARWIN-64-STATIC-NEXT: popq %rbx
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _itailcaller:
-; DARWIN-64-DYNAMIC: pushq %rbx
-; DARWIN-64-DYNAMIC-NEXT: movq _ifunc at GOTPCREL(%rip), %rbx
-; DARWIN-64-DYNAMIC-NEXT: callq *(%rbx)
-; DARWIN-64-DYNAMIC-NEXT: callq *(%rbx)
-; DARWIN-64-DYNAMIC-NEXT: popq %rbx
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _itailcaller:
-; DARWIN-64-PIC: pushq %rbx
-; DARWIN-64-PIC-NEXT: movq _ifunc at GOTPCREL(%rip), %rbx
-; DARWIN-64-PIC-NEXT: callq *(%rbx)
-; DARWIN-64-PIC-NEXT: callq *(%rbx)
-; DARWIN-64-PIC-NEXT: popq %rbx
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @ditailcaller() nounwind {
-entry:
- %0 = load void ()** @difunc, align 8
- call void %0() nounwind
- ret void
-; LINUX-64-STATIC: ditailcaller:
-; LINUX-64-STATIC: callq *difunc
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: ditailcaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call *difunc
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: ditailcaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call *difunc
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: ditailcaller:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: movq difunc at GOTPCREL(%rip), %rax
-; LINUX-64-PIC-NEXT: callq *(%rax)
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _ditailcaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call *_difunc
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _ditailcaller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call *_difunc
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _ditailcaller:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call L147$pb
-; DARWIN-32-PIC-NEXT: L147$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: call *_difunc-L147$pb(%eax)
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _ditailcaller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq *_difunc(%rip)
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _ditailcaller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq *_difunc(%rip)
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _ditailcaller:
-; DARWIN-64-PIC: callq *_difunc(%rip)
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
-
-define void @litailcaller() nounwind {
-entry:
- %0 = load void ()** @lifunc, align 8
- call void %0() nounwind
- ret void
-; LINUX-64-STATIC: litailcaller:
-; LINUX-64-STATIC: callq *lifunc
-; LINUX-64-STATIC: ret
-
-; LINUX-32-STATIC: litailcaller:
-; LINUX-32-STATIC: subl $4, %esp
-; LINUX-32-STATIC-NEXT: call *lifunc
-; LINUX-32-STATIC-NEXT: addl $4, %esp
-; LINUX-32-STATIC-NEXT: ret
-
-; LINUX-32-PIC: litailcaller:
-; LINUX-32-PIC: subl $4, %esp
-; LINUX-32-PIC-NEXT: call *lifunc
-; LINUX-32-PIC-NEXT: addl $4, %esp
-; LINUX-32-PIC-NEXT: ret
-
-; LINUX-64-PIC: litailcaller:
-; LINUX-64-PIC: subq $8, %rsp
-; LINUX-64-PIC-NEXT: callq *lifunc(%rip)
-; LINUX-64-PIC-NEXT: addq $8, %rsp
-; LINUX-64-PIC-NEXT: ret
-
-; DARWIN-32-STATIC: _litailcaller:
-; DARWIN-32-STATIC: subl $12, %esp
-; DARWIN-32-STATIC-NEXT: call *_lifunc
-; DARWIN-32-STATIC-NEXT: addl $12, %esp
-; DARWIN-32-STATIC-NEXT: ret
-
-; DARWIN-32-DYNAMIC: _litailcaller:
-; DARWIN-32-DYNAMIC: subl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: call *_lifunc
-; DARWIN-32-DYNAMIC-NEXT: addl $12, %esp
-; DARWIN-32-DYNAMIC-NEXT: ret
-
-; DARWIN-32-PIC: _litailcaller:
-; DARWIN-32-PIC: subl $12, %esp
-; DARWIN-32-PIC-NEXT: call L148$pb
-; DARWIN-32-PIC-NEXT: L148$pb:
-; DARWIN-32-PIC-NEXT: popl %eax
-; DARWIN-32-PIC-NEXT: call *_lifunc-L148$pb(%eax)
-; DARWIN-32-PIC-NEXT: addl $12, %esp
-; DARWIN-32-PIC-NEXT: ret
-
-; DARWIN-64-STATIC: _litailcaller:
-; DARWIN-64-STATIC: subq $8, %rsp
-; DARWIN-64-STATIC-NEXT: callq *_lifunc(%rip)
-; DARWIN-64-STATIC-NEXT: addq $8, %rsp
-; DARWIN-64-STATIC-NEXT: ret
-
-; DARWIN-64-DYNAMIC: _litailcaller:
-; DARWIN-64-DYNAMIC: subq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: callq *_lifunc(%rip)
-; DARWIN-64-DYNAMIC-NEXT: addq $8, %rsp
-; DARWIN-64-DYNAMIC-NEXT: ret
-
-; DARWIN-64-PIC: _litailcaller:
-; DARWIN-64-PIC: subq $8, %rsp
-; DARWIN-64-PIC-NEXT: callq *_lifunc(%rip)
-; DARWIN-64-PIC-NEXT: addq $8, %rsp
-; DARWIN-64-PIC-NEXT: ret
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/add.ll b/libclamav/c++/llvm/test/CodeGen/X86/add.ll
deleted file mode 100644
index 3991a68..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/add.ll
+++ /dev/null
@@ -1,94 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=X64
-
-; The immediate can be encoded in a smaller way if the
-; instruction is a sub instead of an add.
-
-define i32 @test1(i32 inreg %a) nounwind {
- %b = add i32 %a, 128
- ret i32 %b
-; X32: subl $-128, %eax
-; X64: subl $-128,
-}
-define i64 @test2(i64 inreg %a) nounwind {
- %b = add i64 %a, 2147483648
- ret i64 %b
-; X32: addl $-2147483648, %eax
-; X64: subq $-2147483648,
-}
-define i64 @test3(i64 inreg %a) nounwind {
- %b = add i64 %a, 128
- ret i64 %b
-
-; X32: addl $128, %eax
-; X64: subq $-128,
-}
-
-define i1 @test4(i32 %v1, i32 %v2, i32* %X) nounwind {
-entry:
- %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
- %sum = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %normal
-
-normal:
- store i32 0, i32* %X
- br label %overflow
-
-overflow:
- ret i1 false
-
-; X32: test4:
-; X32: addl
-; X32-NEXT: jo
-
-; X64: test4:
-; X64: addl %esi, %edi
-; X64-NEXT: jo
-}
-
-define i1 @test5(i32 %v1, i32 %v2, i32* %X) nounwind {
-entry:
- %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
- %sum = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %carry, label %normal
-
-normal:
- store i32 0, i32* %X
- br label %carry
-
-carry:
- ret i1 false
-
-; X32: test5:
-; X32: addl
-; X32-NEXT: jb
-
-; X64: test5:
-; X64: addl %esi, %edi
-; X64-NEXT: jb
-}
-
-declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32)
-declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32)
-
-
-define i64 @test6(i64 %A, i32 %B) nounwind {
- %tmp12 = zext i32 %B to i64 ; <i64> [#uses=1]
- %tmp3 = shl i64 %tmp12, 32 ; <i64> [#uses=1]
- %tmp5 = add i64 %tmp3, %A ; <i64> [#uses=1]
- ret i64 %tmp5
-
-; X32: test6:
-; X32: movl 12(%esp), %edx
-; X32-NEXT: addl 8(%esp), %edx
-; X32-NEXT: movl 4(%esp), %eax
-; X32-NEXT: ret
-
-; X64: test6:
-; X64: shlq $32, %rsi
-; X64: leaq (%rsi,%rdi), %rax
-; X64: ret
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/addr-label-difference.ll b/libclamav/c++/llvm/test/CodeGen/X86/addr-label-difference.ll
deleted file mode 100644
index be0908a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/addr-label-difference.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc %s -o - | grep {__TEXT,__const}
-; PR5929
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
-target triple = "i386-apple-darwin10.0"
-
-; This array should go into the __TEXT,__const section, not into the
-; __DATA,__const section, because the elements don't need relocations.
- at test.array = internal constant [3 x i32] [i32 sub (i32 ptrtoint (i8* blockaddress(@test, %foo) to i32), i32 ptrtoint (i8* blockaddress(@test, %foo) to i32)), i32 sub (i32 ptrtoint (i8* blockaddress(@test, %bar) to i32), i32 ptrtoint (i8* blockaddress(@test, %foo) to i32)), i32 sub (i32 ptrtoint (i8* blockaddress(@test, %hack) to i32), i32 ptrtoint (i8* blockaddress(@test, %foo) to i32))] ; <[3 x i32]*> [#uses=1]
-
-define void @test(i32 %i) nounwind ssp {
-entry:
- call void @test(i32 1)
- br label %foo
-
-foo:
- call void @test(i32 1)
- br label %bar
-
-bar:
- call void @test(i32 1)
- br label %hack
-
-hack:
- call void @test(i32 1)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/aliases.ll b/libclamav/c++/llvm/test/CodeGen/X86/aliases.ll
deleted file mode 100644
index 3020eb3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/aliases.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -asm-verbose=false -o %t
-; RUN: grep { = } %t | count 7
-; RUN: grep set %t | count 16
-; RUN: grep globl %t | count 6
-; RUN: grep weak %t | count 1
-; RUN: grep hidden %t | count 1
-; RUN: grep protected %t | count 1
-
- at bar = external global i32
- at foo1 = alias i32* @bar
- at foo2 = alias i32* @bar
-
-%FunTy = type i32()
-
-declare i32 @foo_f()
- at bar_f = alias weak %FunTy* @foo_f
-
- at bar_i = alias internal i32* @bar
-
- at A = alias bitcast (i32* @bar to i64*)
-
- at bar_h = hidden alias i32* @bar
-
- at bar_p = protected alias i32* @bar
-
-define i32 @test() {
-entry:
- %tmp = load i32* @foo1
- %tmp1 = load i32* @foo2
- %tmp0 = load i32* @bar_i
- %tmp2 = call i32 @foo_f()
- %tmp3 = add i32 %tmp, %tmp2
- %tmp4 = call %FunTy* @bar_f()
- %tmp5 = add i32 %tmp3, %tmp4
- %tmp6 = add i32 %tmp1, %tmp5
- %tmp7 = add i32 %tmp6, %tmp0
- ret i32 %tmp7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/aligned-comm.ll b/libclamav/c++/llvm/test/CodeGen/X86/aligned-comm.ll
deleted file mode 100644
index 7715869..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/aligned-comm.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86
-; RUN: llc < %s -mtriple=i386-apple-darwin10 | grep {array,16512,7}
-; RUN: llc < %s -mtriple=i386-apple-darwin9 | grep {array,16512,7}
-
-; Darwin 9+ should get alignment on common symbols.
- at array = common global [4128 x i32] zeroinitializer, align 128
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/all-ones-vector.ll b/libclamav/c++/llvm/test/CodeGen/X86/all-ones-vector.ll
deleted file mode 100644
index 10fecad..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/all-ones-vector.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse2 | grep pcmpeqd | count 4
-
-define <4 x i32> @ioo() nounwind {
- ret <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
-}
-define <2 x i64> @loo() nounwind {
- ret <2 x i64> <i64 -1, i64 -1>
-}
-define <2 x double> @doo() nounwind {
- ret <2 x double> <double 0xffffffffffffffff, double 0xffffffffffffffff>
-}
-define <4 x float> @foo() nounwind {
- ret <4 x float> <float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000, float 0xffffffffe0000000>
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/alloca-align-rounding.ll b/libclamav/c++/llvm/test/CodeGen/X86/alloca-align-rounding.ll
deleted file mode 100644
index f45e9b8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/alloca-align-rounding.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin | grep and | count 1
-; RUN: llc < %s -march=x86-64 -mtriple=i686-pc-linux | grep and | count 1
-
-declare void @bar(<2 x i64>* %n)
-
-define void @foo(i32 %h) {
- %p = alloca <2 x i64>, i32 %h
- call void @bar(<2 x i64>* %p)
- ret void
-}
-
-define void @foo2(i32 %h) {
- %p = alloca <2 x i64>, i32 %h, align 32
- call void @bar(<2 x i64>* %p)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/and-or-fold.ll b/libclamav/c++/llvm/test/CodeGen/X86/and-or-fold.ll
deleted file mode 100644
index 8d04396..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/and-or-fold.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin | FileCheck -check-prefix=DARWIN %s
-; ClamAV local: no opt
-; RUNX: opt < %s -O2 | llc -mtriple=x86_64-apple-darwin | FileCheck -check-prefix=DARWIN-OPT %s
-
-; The dag combiner should fold together (x&127)|(y&16711680) -> (x|y)&c1
-; in this case.
-
-define i32 @test1(i32 %x, i16 %y) {
- %tmp1 = zext i16 %y to i32
- %tmp2 = and i32 %tmp1, 127
- %tmp4 = shl i32 %x, 16
- %tmp5 = and i32 %tmp4, 16711680
- %tmp6 = or i32 %tmp2, %tmp5
- ret i32 %tmp6
-; DARWIN: andl $16711807, %eax
-}
-
-; <rdar://problem/7529774> The optimizer shouldn't fold this into (and (or, C), D)
-; if (C & D) == 0
-define i64 @test2(i64 %x) nounwind readnone ssp {
-entry:
- %tmp1 = and i64 %x, 123127
- %tmp2 = or i64 %tmp1, 3
- ret i64 %tmp2
-; DARWIN-OPT: andq $123124
-; DARWIN-OPT-NEXT: leaq 3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/and-su.ll b/libclamav/c++/llvm/test/CodeGen/X86/and-su.ll
deleted file mode 100644
index 38db88a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/and-su.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
-
-; Don't duplicate the load.
-
-define fastcc i32 @foo(i32* %p) nounwind {
-; CHECK: foo:
-; CHECK: andl $10, %eax
-; CHECK: je
- %t0 = load i32* %p
- %t2 = and i32 %t0, 10
- %t3 = icmp ne i32 %t2, 0
- br i1 %t3, label %bb63, label %bb76
-bb63:
- ret i32 %t2
-bb76:
- ret i32 0
-}
-
-define fastcc double @bar(i32 %hash, double %x, double %y) nounwind {
-entry:
-; CHECK: bar:
- %0 = and i32 %hash, 15
- %1 = icmp ult i32 %0, 8
- br i1 %1, label %bb11, label %bb10
-
-bb10:
-; CHECK: bb10
-; CHECK: testb $1
- %2 = and i32 %hash, 1
- %3 = icmp eq i32 %2, 0
- br i1 %3, label %bb13, label %bb11
-
-bb11:
- %4 = fsub double -0.000000e+00, %x
- br label %bb13
-
-bb13:
-; CHECK: bb13
-; CHECK: testb $2
- %iftmp.9.0 = phi double [ %4, %bb11 ], [ %x, %bb10 ]
- %5 = and i32 %hash, 2
- %6 = icmp eq i32 %5, 0
- br i1 %6, label %bb16, label %bb14
-
-bb14:
- %7 = fsub double -0.000000e+00, %y
- br label %bb16
-
-bb16:
- %iftmp.10.0 = phi double [ %7, %bb14 ], [ %y, %bb13 ]
- %8 = fadd double %iftmp.9.0, %iftmp.10.0
- ret double %8
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/anyext.ll b/libclamav/c++/llvm/test/CodeGen/X86/anyext.ll
deleted file mode 100644
index 106fe83..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/anyext.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movzbl | count 2
-
-; Use movzbl to avoid partial-register updates.
-
-define i32 @foo(i32 %p, i8 zeroext %x) nounwind {
- %q = trunc i32 %p to i8
- %r = udiv i8 %q, %x
- %s = zext i8 %r to i32
- %t = and i32 %s, 1
- ret i32 %t
-}
-define i32 @bar(i32 %p, i16 zeroext %x) nounwind {
- %q = trunc i32 %p to i16
- %r = udiv i16 %q, %x
- %s = zext i16 %r to i32
- %t = and i32 %s, 1
- ret i32 %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/arg-cast.ll b/libclamav/c++/llvm/test/CodeGen/X86/arg-cast.ll
deleted file mode 100644
index c111514..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/arg-cast.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; This should compile to movl $2147483647, %eax + andl only.
-; RUN: llc < %s | grep andl
-; RUN: llc < %s | not grep movsd
-; RUN: llc < %s | grep esp | not grep add
-; rdar://5736574
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
-
-define i32 @foo(double %x) nounwind {
-entry:
- %x15 = bitcast double %x to i64 ; <i64> [#uses=1]
- %tmp713 = lshr i64 %x15, 32 ; <i64> [#uses=1]
- %tmp714 = trunc i64 %tmp713 to i32 ; <i32> [#uses=1]
- %tmp8 = and i32 %tmp714, 2147483647 ; <i32> [#uses=1]
- ret i32 %tmp8
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/asm-block-labels.ll b/libclamav/c++/llvm/test/CodeGen/X86/asm-block-labels.ll
deleted file mode 100644
index 9c96517..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/asm-block-labels.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; ClamAV local: no opt
-; RUNX: opt < %s -std-compile-opts | llc
-; RUN: true
-; ModuleID = 'block12.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
-
-define void @bar() {
-entry:
- br label %"LASM$foo"
-
-"LASM$foo": ; preds = %entry
- call void asm sideeffect ".file \22block12.c\22", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect ".line 1", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect "int $$1", "~{dirflag},~{fpsr},~{flags},~{memory}"( )
- call void asm sideeffect ".file \22block12.c\22", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect ".line 2", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect "brl ${0:l}", "X,~{dirflag},~{fpsr},~{flags},~{memory}"( label %"LASM$foo" )
- br label %return
-
-return: ; preds = %"LASM$foo"
- ret void
-}
-
-define void @baz() {
-entry:
- call void asm sideeffect ".file \22block12.c\22", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect ".line 3", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect "brl ${0:l}", "X,~{dirflag},~{fpsr},~{flags},~{memory}"( label %"LASM$foo" )
- call void asm sideeffect ".file \22block12.c\22", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect ".line 4", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect "int $$1", "~{dirflag},~{fpsr},~{flags},~{memory}"( )
- br label %"LASM$foo"
-
-"LASM$foo": ; preds = %entry
- call void asm sideeffect ".file \22block12.c\22", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect ".line 5", "~{dirflag},~{fpsr},~{flags}"( )
- call void asm sideeffect "int $$1", "~{dirflag},~{fpsr},~{flags},~{memory}"( )
- br label %return
-
-return: ; preds = %"LASM$foo"
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/asm-global-imm.ll b/libclamav/c++/llvm/test/CodeGen/X86/asm-global-imm.ll
deleted file mode 100644
index 96da224..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/asm-global-imm.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=static | \
-; RUN: grep {test1 \$_GV}
-; RUN: llc < %s -march=x86 -relocation-model=static | \
-; RUN: grep {test2 _GV}
-; PR882
-
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin9.0.0d2"
- at GV = weak global i32 0 ; <i32*> [#uses=2]
- at str = external global [12 x i8] ; <[12 x i8]*> [#uses=1]
-
-define void @foo() {
-entry:
- tail call void asm sideeffect "test1 $0", "i,~{dirflag},~{fpsr},~{flags}"( i32* @GV )
- tail call void asm sideeffect "test2 ${0:c}", "i,~{dirflag},~{fpsr},~{flags}"( i32* @GV )
- ret void
-}
-
-define void @unknown_bootoption() {
-entry:
- call void asm sideeffect "ud2\0A\09.word ${0:c}\0A\09.long ${1:c}\0A", "i,i,~{dirflag},~{fpsr},~{flags}"( i32 235, i8* getelementptr ([12 x i8]* @str, i32 0, i64 0) )
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/asm-indirect-mem.ll b/libclamav/c++/llvm/test/CodeGen/X86/asm-indirect-mem.ll
deleted file mode 100644
index c57aa99..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/asm-indirect-mem.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-; PR2267
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-
-define void @atomic_store_rel_int(i32* %p, i32 %v) nounwind {
-entry:
- %asmtmp = tail call i32 asm sideeffect "xchgl $1,$0", "=*m,=r,*m,1,~{dirflag},~{fpsr},~{flags}"( i32* %p, i32* %p, i32 %v ) nounwind ; <i32> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/asm-modifier-P.ll b/libclamav/c++/llvm/test/CodeGen/X86/asm-modifier-P.ll
deleted file mode 100644
index 6139da8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/asm-modifier-P.ll
+++ /dev/null
@@ -1,79 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-unknown-linux-gnu -relocation-model=pic | FileCheck %s -check-prefix=CHECK-PIC-32
-; RUN: llc < %s -march=x86 -mtriple=i686-unknown-linux-gnu -relocation-model=static | FileCheck %s -check-prefix=CHECK-STATIC-32
-; RUN: llc < %s -march=x86-64 -relocation-model=static | FileCheck %s -check-prefix=CHECK-STATIC-64
-; RUN: llc < %s -march=x86-64 -relocation-model=pic | FileCheck %s -check-prefix=CHECK-PIC-64
-; PR3379
-; XFAIL: *
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- at G = external global i32 ; <i32*> [#uses=1]
-
-declare void @bar(...)
-
-; extern int G;
-; void test1() {
-; asm("frob %0 x" : : "m"(G));
-; asm("frob %P0 x" : : "m"(G));
-;}
-
-define void @test1() nounwind {
-entry:
-; P suffix removes (rip) in -static 64-bit mode.
-
-; CHECK-PIC-64: test1:
-; CHECK-PIC-64: movq G at GOTPCREL(%rip), %rax
-; CHECK-PIC-64: frob (%rax) x
-; CHECK-PIC-64: frob (%rax) x
-
-; CHECK-STATIC-64: test1:
-; CHECK-STATIC-64: frob G(%rip) x
-; CHECK-STATIC-64: frob G x
-
-; CHECK-PIC-32: test1:
-; CHECK-PIC-32: frob G x
-; CHECK-PIC-32: frob G x
-
-; CHECK-STATIC-32: test1:
-; CHECK-STATIC-32: frob G x
-; CHECK-STATIC-32: frob G x
-
- call void asm "frob $0 x", "*m"(i32* @G) nounwind
- call void asm "frob ${0:P} x", "*m"(i32* @G) nounwind
- ret void
-}
-
-define void @test3() nounwind {
-entry:
-; CHECK-STATIC-64: test3:
-; CHECK-STATIC-64: call bar
-; CHECK-STATIC-64: call test3
-; CHECK-STATIC-64: call $bar
-; CHECK-STATIC-64: call $test3
-
-; CHECK-STATIC-32: test3:
-; CHECK-STATIC-32: call bar
-; CHECK-STATIC-32: call test3
-; CHECK-STATIC-32: call $bar
-; CHECK-STATIC-32: call $test3
-
-; CHECK-PIC-64: test3:
-; CHECK-PIC-64: call bar at PLT
-; CHECK-PIC-64: call test3 at PLT
-; CHECK-PIC-64: call $bar
-; CHECK-PIC-64: call $test3
-
-; CHECK-PIC-32: test3:
-; CHECK-PIC-32: call bar at PLT
-; CHECK-PIC-32: call test3 at PLT
-; CHECK-PIC-32: call $bar
-; CHECK-PIC-32: call $test3
-
-
-; asm(" blah %P0" : : "X"(bar));
- tail call void asm sideeffect "call ${0:P}", "X"(void (...)* @bar) nounwind
- tail call void asm sideeffect "call ${0:P}", "X"(void (...)* bitcast (void ()* @test3 to void (...)*)) nounwind
- tail call void asm sideeffect "call $0", "X"(void (...)* @bar) nounwind
- tail call void asm sideeffect "call $0", "X"(void (...)* bitcast (void ()* @test3 to void (...)*)) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/asm-modifier.ll b/libclamav/c++/llvm/test/CodeGen/X86/asm-modifier.ll
deleted file mode 100644
index 44f972e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/asm-modifier.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-; ModuleID = 'asm.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.6"
-
-define i32 @test1() nounwind {
-entry:
-; CHECK: test1:
-; CHECK: movw %gs:6, %ax
- %asmtmp.i = tail call i16 asm "movw\09%gs:${1:a}, ${0:w}", "=r,ir,~{dirflag},~{fpsr},~{flags}"(i32 6) nounwind ; <i16> [#uses=1]
- %0 = zext i16 %asmtmp.i to i32 ; <i32> [#uses=1]
- ret i32 %0
-}
-
-define zeroext i16 @test2(i32 %address) nounwind {
-entry:
-; CHECK: test2:
-; CHECK: movw %gs:(%eax), %ax
- %asmtmp = tail call i16 asm "movw\09%gs:${1:a}, ${0:w}", "=r,ir,~{dirflag},~{fpsr},~{flags}"(i32 %address) nounwind ; <i16> [#uses=1]
- ret i16 %asmtmp
-}
-
- at n = global i32 42 ; <i32*> [#uses=3]
- at y = common global i32 0 ; <i32*> [#uses=3]
-
-define void @test3() nounwind {
-entry:
-; CHECK: test3:
-; CHECK: movl _n, %eax
- call void asm sideeffect "movl ${0:a}, %eax", "ir,~{dirflag},~{fpsr},~{flags},~{eax}"(i32* @n) nounwind
- ret void
-}
-
-define void @test4() nounwind {
-entry:
-; CHECK: test4:
-; CHECK: movl L_y$non_lazy_ptr, %ecx
-; CHECK: movl (%ecx), %eax
- call void asm sideeffect "movl ${0:a}, %eax", "ir,~{dirflag},~{fpsr},~{flags},~{eax}"(i32* @y) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/atomic_add.ll b/libclamav/c++/llvm/test/CodeGen/X86/atomic_add.ll
deleted file mode 100644
index d00f8e8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/atomic_add.ll
+++ /dev/null
@@ -1,217 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; rdar://7103704
-
-define void @sub1(i32* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK: sub1:
-; CHECK: subl
- %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0]
- ret void
-}
-
-define void @inc4(i64* nocapture %p) nounwind ssp {
-entry:
-; CHECK: inc4:
-; CHECK: incq
- %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
- ret void
-}
-
-declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
-
-define void @add8(i64* nocapture %p) nounwind ssp {
-entry:
-; CHECK: add8:
-; CHECK: addq $2
- %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0]
- ret void
-}
-
-define void @add4(i64* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK: add4:
-; CHECK: addq
- %0 = sext i32 %v to i64 ; <i64> [#uses=1]
- %1 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0]
- ret void
-}
-
-define void @inc3(i8* nocapture %p) nounwind ssp {
-entry:
-; CHECK: inc3:
-; CHECK: incb
- %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0]
- ret void
-}
-
-declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind
-
-define void @add7(i8* nocapture %p) nounwind ssp {
-entry:
-; CHECK: add7:
-; CHECK: addb $2
- %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0]
- ret void
-}
-
-define void @add3(i8* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK: add3:
-; CHECK: addb
- %0 = trunc i32 %v to i8 ; <i8> [#uses=1]
- %1 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0]
- ret void
-}
-
-define void @inc2(i16* nocapture %p) nounwind ssp {
-entry:
-; CHECK: inc2:
-; CHECK: incw
- %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0]
- ret void
-}
-
-declare i16 @llvm.atomic.load.add.i16.p0i16(i16* nocapture, i16) nounwind
-
-define void @add6(i16* nocapture %p) nounwind ssp {
-entry:
-; CHECK: add6:
-; CHECK: addw $2
- %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0]
- ret void
-}
-
-define void @add2(i16* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK: add2:
-; CHECK: addw
- %0 = trunc i32 %v to i16 ; <i16> [#uses=1]
- %1 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0]
- ret void
-}
-
-define void @inc1(i32* nocapture %p) nounwind ssp {
-entry:
-; CHECK: inc1:
-; CHECK: incl
- %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
-
-define void @add5(i32* nocapture %p) nounwind ssp {
-entry:
-; CHECK: add5:
-; CHECK: addl $2
- %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0]
- ret void
-}
-
-define void @add1(i32* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK: add1:
-; CHECK: addl
- %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0]
- ret void
-}
-
-define void @dec4(i64* nocapture %p) nounwind ssp {
-entry:
-; CHECK: dec4:
-; CHECK: decq
- %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
- ret void
-}
-
-declare i64 @llvm.atomic.load.sub.i64.p0i64(i64* nocapture, i64) nounwind
-
-define void @sub8(i64* nocapture %p) nounwind ssp {
-entry:
-; CHECK: sub8:
-; CHECK: subq $2
- %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0]
- ret void
-}
-
-define void @sub4(i64* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK: sub4:
-; CHECK: subq
- %0 = sext i32 %v to i64 ; <i64> [#uses=1]
- %1 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0]
- ret void
-}
-
-define void @dec3(i8* nocapture %p) nounwind ssp {
-entry:
-; CHECK: dec3:
-; CHECK: decb
- %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0]
- ret void
-}
-
-declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind
-
-define void @sub7(i8* nocapture %p) nounwind ssp {
-entry:
-; CHECK: sub7:
-; CHECK: subb $2
- %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0]
- ret void
-}
-
-define void @sub3(i8* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK: sub3:
-; CHECK: subb
- %0 = trunc i32 %v to i8 ; <i8> [#uses=1]
- %1 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0]
- ret void
-}
-
-define void @dec2(i16* nocapture %p) nounwind ssp {
-entry:
-; CHECK: dec2:
-; CHECK: decw
- %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0]
- ret void
-}
-
-declare i16 @llvm.atomic.load.sub.i16.p0i16(i16* nocapture, i16) nounwind
-
-define void @sub6(i16* nocapture %p) nounwind ssp {
-entry:
-; CHECK: sub6:
-; CHECK: subw $2
- %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0]
- ret void
-}
-
-define void @sub2(i16* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK: sub2:
-; CHECK: subw
- %0 = trunc i32 %v to i16 ; <i16> [#uses=1]
- %1 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0]
- ret void
-}
-
-define void @dec1(i32* nocapture %p) nounwind ssp {
-entry:
-; CHECK: dec1:
-; CHECK: decl
- %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32* nocapture, i32) nounwind
-
-define void @sub5(i32* nocapture %p) nounwind ssp {
-entry:
-; CHECK: sub5:
-; CHECK: subl $2
- %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/atomic_op.ll b/libclamav/c++/llvm/test/CodeGen/X86/atomic_op.ll
deleted file mode 100644
index 3ef1887..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/atomic_op.ll
+++ /dev/null
@@ -1,94 +0,0 @@
-; RUN: llc < %s -march=x86 -o %t1
-; RUN: grep "lock" %t1 | count 17
-; RUN: grep "xaddl" %t1 | count 4
-; RUN: grep "cmpxchgl" %t1 | count 13
-; RUN: grep "xchgl" %t1 | count 14
-; RUN: grep "cmova" %t1 | count 2
-; RUN: grep "cmovb" %t1 | count 2
-; RUN: grep "cmovg" %t1 | count 2
-; RUN: grep "cmovl" %t1 | count 2
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-
-define void @main(i32 %argc, i8** %argv) {
-entry:
- %argc.addr = alloca i32 ; <i32*> [#uses=1]
- %argv.addr = alloca i8** ; <i8***> [#uses=1]
- %val1 = alloca i32 ; <i32*> [#uses=2]
- %val2 = alloca i32 ; <i32*> [#uses=15]
- %andt = alloca i32 ; <i32*> [#uses=2]
- %ort = alloca i32 ; <i32*> [#uses=2]
- %xort = alloca i32 ; <i32*> [#uses=2]
- %old = alloca i32 ; <i32*> [#uses=18]
- %temp = alloca i32 ; <i32*> [#uses=2]
- store i32 %argc, i32* %argc.addr
- store i8** %argv, i8*** %argv.addr
- store i32 0, i32* %val1
- store i32 31, i32* %val2
- store i32 3855, i32* %andt
- store i32 3855, i32* %ort
- store i32 3855, i32* %xort
- store i32 4, i32* %temp
- %tmp = load i32* %temp ; <i32> [#uses=1]
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
- store i32 %0, i32* %old
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
- store i32 %1, i32* %old
- call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
- store i32 %2, i32* %old
- call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
- store i32 %3, i32* %old
- call i32 @llvm.atomic.load.and.i32.p0i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1]
- store i32 %4, i32* %old
- call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ort, i32 4080 ) ; <i32>:5 [#uses=1]
- store i32 %5, i32* %old
- call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %xort, i32 4080 ) ; <i32>:6 [#uses=1]
- store i32 %6, i32* %old
- call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 16 ) ; <i32>:7 [#uses=1]
- store i32 %7, i32* %old
- %neg = sub i32 0, 1 ; <i32> [#uses=1]
- call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 %neg ) ; <i32>:8 [#uses=1]
- store i32 %8, i32* %old
- call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:9 [#uses=1]
- store i32 %9, i32* %old
- call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 0 ) ; <i32>:10 [#uses=1]
- store i32 %10, i32* %old
- call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 65535 ) ; <i32>:11 [#uses=1]
- store i32 %11, i32* %old
- call i32 @llvm.atomic.load.umax.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:12 [#uses=1]
- store i32 %12, i32* %old
- call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:13 [#uses=1]
- store i32 %13, i32* %old
- call i32 @llvm.atomic.load.umin.i32.p0i32( i32* %val2, i32 10 ) ; <i32>:14 [#uses=1]
- store i32 %14, i32* %old
- call i32 @llvm.atomic.swap.i32.p0i32( i32* %val2, i32 1976 ) ; <i32>:15 [#uses=1]
- store i32 %15, i32* %old
- %neg1 = sub i32 0, 10 ; <i32> [#uses=1]
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 %neg1, i32 1 ) ; <i32>:16 [#uses=1]
- store i32 %16, i32* %old
- call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %val2, i32 1976, i32 1 ) ; <i32>:17 [#uses=1]
- store i32 %17, i32* %old
- ret void
-}
-
-declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.min.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.max.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.umax.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.load.umin.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
-
-declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/attribute-sections.ll b/libclamav/c++/llvm/test/CodeGen/X86/attribute-sections.ll
deleted file mode 100644
index 3035334..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/attribute-sections.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -mtriple=i386-unknown-linux-gnu | FileCheck %s -check-prefix=LINUX
-
-declare i32 @foo()
- at G0 = global i32 ()* @foo, section ".init_array"
-
-; LINUX: .section .init_array,"aw"
-; LINUX: .globl G0
-
- at G1 = global i32 ()* @foo, section ".fini_array"
-
-; LINUX: .section .fini_array,"aw"
-; LINUX: .globl G1
-
- at G2 = global i32 ()* @foo, section ".preinit_array"
-
-; LINUX: .section .preinit_array,"aw"
-; LINUX: .globl G2
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/avoid-lea-scale2.ll b/libclamav/c++/llvm/test/CodeGen/X86/avoid-lea-scale2.ll
deleted file mode 100644
index 8003de2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/avoid-lea-scale2.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {leal.*-2(\[%\]rdi,\[%\]rdi)}
-
-define i32 @foo(i32 %x) nounwind readnone {
- %t0 = shl i32 %x, 1
- %t1 = add i32 %t0, -2
- ret i32 %t1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/avoid-loop-align-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/avoid-loop-align-2.ll
deleted file mode 100644
index fc9d1f0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/avoid-loop-align-2.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=x86 | grep align | count 4
-
-; TODO: Is it a good idea to align inner loops? It's hard to know without
-; knowing what their trip counts are, or other dynamic information. For
-; now, CodeGen aligns all loops.
-
- at x = external global i32* ; <i32**> [#uses=1]
-
-define i32 @t(i32 %a, i32 %b) nounwind readonly ssp {
-entry:
- %0 = icmp eq i32 %a, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb5, label %bb.nph12
-
-bb.nph12: ; preds = %entry
- %1 = icmp eq i32 %b, 0 ; <i1> [#uses=1]
- %2 = load i32** @x, align 8 ; <i32*> [#uses=1]
- br i1 %1, label %bb2.preheader, label %bb2.preheader.us
-
-bb2.preheader.us: ; preds = %bb2.bb3_crit_edge.us, %bb.nph12
- %indvar18 = phi i32 [ 0, %bb.nph12 ], [ %indvar.next19, %bb2.bb3_crit_edge.us ] ; <i32> [#uses=2]
- %sum.111.us = phi i32 [ 0, %bb.nph12 ], [ %4, %bb2.bb3_crit_edge.us ] ; <i32> [#uses=0]
- %tmp16 = mul i32 %indvar18, %a ; <i32> [#uses=1]
- br label %bb1.us
-
-bb1.us: ; preds = %bb1.us, %bb2.preheader.us
- %indvar = phi i32 [ 0, %bb2.preheader.us ], [ %indvar.next, %bb1.us ] ; <i32> [#uses=2]
- %tmp17 = add i32 %indvar, %tmp16 ; <i32> [#uses=1]
- %tmp. = zext i32 %tmp17 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32* %2, i64 %tmp. ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4 ; <i32> [#uses=2]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %b ; <i1> [#uses=1]
- br i1 %exitcond, label %bb2.bb3_crit_edge.us, label %bb1.us
-
-bb2.bb3_crit_edge.us: ; preds = %bb1.us
- %indvar.next19 = add i32 %indvar18, 1 ; <i32> [#uses=2]
- %exitcond22 = icmp eq i32 %indvar.next19, %a ; <i1> [#uses=1]
- br i1 %exitcond22, label %bb5, label %bb2.preheader.us
-
-bb2.preheader: ; preds = %bb2.preheader, %bb.nph12
- %indvar24 = phi i32 [ %indvar.next25, %bb2.preheader ], [ 0, %bb.nph12 ] ; <i32> [#uses=1]
- %indvar.next25 = add i32 %indvar24, 1 ; <i32> [#uses=2]
- %exitcond28 = icmp eq i32 %indvar.next25, %a ; <i1> [#uses=1]
- br i1 %exitcond28, label %bb5, label %bb2.preheader
-
-bb5: ; preds = %bb2.preheader, %bb2.bb3_crit_edge.us, %entry
- %sum.1.lcssa = phi i32 [ 0, %entry ], [ 0, %bb2.preheader ], [ %4, %bb2.bb3_crit_edge.us ] ; <i32> [#uses=1]
- ret i32 %sum.1.lcssa
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/avoid-loop-align.ll b/libclamav/c++/llvm/test/CodeGen/X86/avoid-loop-align.ll
deleted file mode 100644
index d4c5c67..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/avoid-loop-align.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s
-
-; CodeGen should align the top of the loop, which differs from the loop
-; header in this case.
-
-; CHECK: jmp LBB1_2
-; CHECK: .align
-; CHECK: LBB1_1:
-
- at A = common global [100 x i32] zeroinitializer, align 32 ; <[100 x i32]*> [#uses=1]
-
-define i8* @test(i8* %Q, i32* %L) nounwind {
-entry:
- %tmp = tail call i32 (...)* @foo() nounwind ; <i32> [#uses=2]
- %tmp1 = inttoptr i32 %tmp to i8* ; <i8*> [#uses=1]
- br label %bb1
-
-bb: ; preds = %bb1, %bb1
- %indvar.next = add i32 %P.0.rec, 1 ; <i32> [#uses=1]
- br label %bb1
-
-bb1: ; preds = %bb, %entry
- %P.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %P.0 = getelementptr i8* %tmp1, i32 %P.0.rec ; <i8*> [#uses=3]
- %tmp2 = load i8* %P.0, align 1 ; <i8> [#uses=1]
- switch i8 %tmp2, label %bb4 [
- i8 12, label %bb
- i8 42, label %bb
- ]
-
-bb4: ; preds = %bb1
- %tmp3 = ptrtoint i8* %P.0 to i32 ; <i32> [#uses=1]
- %tmp4 = sub i32 %tmp3, %tmp ; <i32> [#uses=1]
- %tmp5 = getelementptr [100 x i32]* @A, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
- store i32 4, i32* %tmp5, align 4
- ret i8* %P.0
-}
-
-declare i32 @foo(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/bigstructret.ll b/libclamav/c++/llvm/test/CodeGen/X86/bigstructret.ll
deleted file mode 100644
index 633995d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/bigstructret.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -o %t
-; RUN: grep "movl .24601, 12(%ecx)" %t
-; RUN: grep "movl .48, 8(%ecx)" %t
-; RUN: grep "movl .24, 4(%ecx)" %t
-; RUN: grep "movl .12, (%ecx)" %t
-
-%0 = type { i32, i32, i32, i32 }
-
-define internal fastcc %0 @ReturnBigStruct() nounwind readnone {
-entry:
- %0 = insertvalue %0 zeroinitializer, i32 12, 0
- %1 = insertvalue %0 %0, i32 24, 1
- %2 = insertvalue %0 %1, i32 48, 2
- %3 = insertvalue %0 %2, i32 24601, 3
- ret %0 %3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/bigstructret2.ll b/libclamav/c++/llvm/test/CodeGen/X86/bigstructret2.ll
deleted file mode 100644
index 46e0fd2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/bigstructret2.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -o %t
-
-%0 = type { i64, i64 }
-
-declare fastcc %0 @ReturnBigStruct() nounwind readnone
-
-define void @test(%0* %p) {
- %1 = call fastcc %0 @ReturnBigStruct()
- store %0 %1, %0* %p
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/bitcast-int-to-vector.ll b/libclamav/c++/llvm/test/CodeGen/X86/bitcast-int-to-vector.ll
deleted file mode 100644
index 4c25979..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/bitcast-int-to-vector.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i1 @foo(i64 %a)
-{
- %t = bitcast i64 %a to <2 x float>
- %r = extractelement <2 x float> %t, i32 0
- %s = extractelement <2 x float> %t, i32 1
- %b = fcmp uno float %r, %s
- ret i1 %b
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/bitcast.ll b/libclamav/c++/llvm/test/CodeGen/X86/bitcast.ll
deleted file mode 100644
index c34c675..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/bitcast.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86
-; RUN: llc < %s -march=x86-64
-; PR1033
-
-define i64 @test1(double %t) {
- %u = bitcast double %t to i64 ; <i64> [#uses=1]
- ret i64 %u
-}
-
-define double @test2(i64 %t) {
- %u = bitcast i64 %t to double ; <double> [#uses=1]
- ret double %u
-}
-
-define i32 @test3(float %t) {
- %u = bitcast float %t to i32 ; <i32> [#uses=1]
- ret i32 %u
-}
-
-define float @test4(i32 %t) {
- %u = bitcast i32 %t to float ; <float> [#uses=1]
- ret float %u
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/bitcast2.ll b/libclamav/c++/llvm/test/CodeGen/X86/bitcast2.ll
deleted file mode 100644
index 48922b5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/bitcast2.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movd | count 2
-; RUN: llc < %s -march=x86-64 | not grep rsp
-
-define i64 @test1(double %A) {
- %B = bitcast double %A to i64
- ret i64 %B
-}
-
-define double @test2(i64 %A) {
- %B = bitcast i64 %A to double
- ret double %B
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/br-fold.ll b/libclamav/c++/llvm/test/CodeGen/X86/br-fold.ll
deleted file mode 100644
index 8af3bd1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/br-fold.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
-
-; CHECK: orq
-; CHECK-NEXT: jne
-
- at _ZN11xercesc_2_513SchemaSymbols21fgURI_SCHEMAFORSCHEMAE = external constant [33 x i16], align 32 ; <[33 x i16]*> [#uses=1]
- at _ZN11xercesc_2_56XMLUni16fgNotationStringE = external constant [9 x i16], align 16 ; <[9 x i16]*> [#uses=1]
-
-define fastcc void @foo() {
-entry:
- br i1 icmp eq (i64 or (i64 ptrtoint ([33 x i16]* @_ZN11xercesc_2_513SchemaSymbols21fgURI_SCHEMAFORSCHEMAE to i64),
- i64 ptrtoint ([9 x i16]* @_ZN11xercesc_2_56XMLUni16fgNotationStringE to i64)), i64 0),
- label %bb8.i329, label %bb4.i.i318.preheader
-
-bb4.i.i318.preheader: ; preds = %bb6
- unreachable
-
-bb8.i329: ; preds = %bb6
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/brcond.ll b/libclamav/c++/llvm/test/CodeGen/X86/brcond.ll
deleted file mode 100644
index 130483a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/brcond.ll
+++ /dev/null
@@ -1,69 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10 | FileCheck %s
-; rdar://7475489
-
-define i32 @test1(i32 %a, i32 %b) nounwind ssp {
-entry:
-; CHECK: test1:
-; CHECK: xorb
-; CHECK-NOT: andb
-; CHECK-NOT: shrb
-; CHECK: testb $64
- %0 = and i32 %a, 16384
- %1 = icmp ne i32 %0, 0
- %2 = and i32 %b, 16384
- %3 = icmp ne i32 %2, 0
- %4 = xor i1 %1, %3
- br i1 %4, label %bb1, label %bb
-
-bb: ; preds = %entry
- %5 = tail call i32 (...)* @foo() nounwind ; <i32> [#uses=1]
- ret i32 %5
-
-bb1: ; preds = %entry
- %6 = tail call i32 (...)* @bar() nounwind ; <i32> [#uses=1]
- ret i32 %6
-}
-
-declare i32 @foo(...)
-
-declare i32 @bar(...)
-
-
-
-; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
-define i32 @test2(i32* %P, i32* %Q) nounwind ssp {
-entry:
- %a = icmp eq i32* %P, null ; <i1> [#uses=1]
- %b = icmp eq i32* %Q, null ; <i1> [#uses=1]
- %c = and i1 %a, %b
- br i1 %c, label %bb1, label %return
-
-bb1: ; preds = %entry
- ret i32 4
-
-return: ; preds = %entry
- ret i32 192
-; CHECK: test2:
-; CHECK: movl 4(%esp), %eax
-; CHECK-NEXT: orl 8(%esp), %eax
-; CHECK-NEXT: jne LBB2_2
-}
-
-; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
-define i32 @test3(i32* %P, i32* %Q) nounwind ssp {
-entry:
- %a = icmp ne i32* %P, null ; <i1> [#uses=1]
- %b = icmp ne i32* %Q, null ; <i1> [#uses=1]
- %c = or i1 %a, %b
- br i1 %c, label %bb1, label %return
-
-bb1: ; preds = %entry
- ret i32 4
-
-return: ; preds = %entry
- ret i32 192
-; CHECK: test3:
-; CHECK: movl 4(%esp), %eax
-; CHECK-NEXT: orl 8(%esp), %eax
-; CHECK-NEXT: je LBB3_2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/break-anti-dependencies.ll b/libclamav/c++/llvm/test/CodeGen/X86/break-anti-dependencies.ll
deleted file mode 100644
index 972b3cd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/break-anti-dependencies.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=x86-64 -post-RA-scheduler -break-anti-dependencies=none > %t
-; RUN: grep {%xmm0} %t | count 14
-; RUN: not grep {%xmm1} %t
-; RUN: llc < %s -march=x86-64 -post-RA-scheduler -break-anti-dependencies=critical > %t
-; RUN: grep {%xmm0} %t | count 7
-; RUN: grep {%xmm1} %t | count 7
-
-define void @goo(double* %r, double* %p, double* %q) nounwind {
-entry:
- %0 = load double* %p, align 8
- %1 = fadd double %0, 1.100000e+00
- %2 = fmul double %1, 1.200000e+00
- %3 = fadd double %2, 1.300000e+00
- %4 = fmul double %3, 1.400000e+00
- %5 = fadd double %4, 1.500000e+00
- %6 = fptosi double %5 to i32
- %7 = load double* %r, align 8
- %8 = fadd double %7, 7.100000e+00
- %9 = fmul double %8, 7.200000e+00
- %10 = fadd double %9, 7.300000e+00
- %11 = fmul double %10, 7.400000e+00
- %12 = fadd double %11, 7.500000e+00
- %13 = fptosi double %12 to i32
- %14 = icmp slt i32 %6, %13
- br i1 %14, label %bb, label %return
-
-bb:
- store double 9.300000e+00, double* %q, align 8
- ret void
-
-return:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/break-sse-dep.ll b/libclamav/c++/llvm/test/CodeGen/X86/break-sse-dep.ll
deleted file mode 100644
index acc0647..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/break-sse-dep.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 | FileCheck %s
-
-define double @t1(float* nocapture %x) nounwind readonly ssp {
-entry:
-; CHECK: t1:
-; CHECK: movss (%rdi), %xmm0
-; CHECK; cvtss2sd %xmm0, %xmm0
-
- %0 = load float* %x, align 4
- %1 = fpext float %0 to double
- ret double %1
-}
-
-define float @t2(double* nocapture %x) nounwind readonly ssp optsize {
-entry:
-; CHECK: t2:
-; CHECK; cvtsd2ss (%rdi), %xmm0
- %0 = load double* %x, align 8
- %1 = fptrunc double %0 to float
- ret float %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/bss_pagealigned.ll b/libclamav/c++/llvm/test/CodeGen/X86/bss_pagealigned.ll
deleted file mode 100644
index da95aca..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/bss_pagealigned.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc --code-model=kernel -march=x86-64 <%s -asm-verbose=0 | FileCheck %s
-; PR4933
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-%struct.kmem_cache_order_objects = type { i64 }
-declare i8* @memset(i8*, i32, i64)
-define void @unxlate_dev_mem_ptr(i64 %phis, i8* %addr) nounwind {
- %pte.addr.i = alloca %struct.kmem_cache_order_objects*
- %call8 = call i8* @memset(i8* bitcast ([512 x %struct.kmem_cache_order_objects]* @bm_pte to i8*), i32 0, i64 4096)
-; CHECK: movq $bm_pte, %rdi
-; CHECK-NEXT: xorl %esi, %esi
-; CHECK-NEXT: movl $4096, %edx
-; CHECK-NEXT: callq memset
- ret void
-}
- at bm_pte = internal global [512 x %struct.kmem_cache_order_objects] zeroinitializer, section ".bss.page_aligned", align 4096
-; CHECK: .section .bss.page_aligned,"aw", at nobits
-; CHECK-NEXT: .align 4096
-; CHECK-NEXT: bm_pte:
-; CHECK-NEXT: .zero 4096
-; CHECK-NEXT: .size bm_pte, 4096
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/bswap-inline-asm.ll b/libclamav/c++/llvm/test/CodeGen/X86/bswap-inline-asm.ll
deleted file mode 100644
index 2b70193..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/bswap-inline-asm.ll
+++ /dev/null
@@ -1,80 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: not grep APP %t
-; RUN: FileCheck %s < %t
-
-; CHECK: foo:
-; CHECK: bswapq
-define i64 @foo(i64 %x) nounwind {
- %asmtmp = tail call i64 asm "bswap $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i64 %x) nounwind
- ret i64 %asmtmp
-}
-
-; CHECK: bar:
-; CHECK: bswapq
-define i64 @bar(i64 %x) nounwind {
- %asmtmp = tail call i64 asm "bswapq ${0:q}", "=r,0,~{dirflag},~{fpsr},~{flags}"(i64 %x) nounwind
- ret i64 %asmtmp
-}
-
-; CHECK: pen:
-; CHECK: bswapl
-define i32 @pen(i32 %x) nounwind {
- %asmtmp = tail call i32 asm "bswapl ${0:q}", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 %x) nounwind
- ret i32 %asmtmp
-}
-
-; CHECK: s16:
-; CHECK: rolw $8,
-define zeroext i16 @s16(i16 zeroext %x) nounwind {
- %asmtmp = tail call i16 asm "rorw $$8, ${0:w}", "=r,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i16 %x) nounwind
- ret i16 %asmtmp
-}
-
-; CHECK: t16:
-; CHECK: rolw $8,
-define zeroext i16 @t16(i16 zeroext %x) nounwind {
- %asmtmp = tail call i16 asm "rorw $$8, ${0:w}", "=r,0,~{cc},~{dirflag},~{fpsr},~{flags}"(i16 %x) nounwind
- ret i16 %asmtmp
-}
-
-; CHECK: u16:
-; CHECK: rolw $8,
-define zeroext i16 @u16(i16 zeroext %x) nounwind {
- %asmtmp = tail call i16 asm "rolw $$8, ${0:w}", "=r,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i16 %x) nounwind
- ret i16 %asmtmp
-}
-
-; CHECK: v16:
-; CHECK: rolw $8,
-define zeroext i16 @v16(i16 zeroext %x) nounwind {
- %asmtmp = tail call i16 asm "rolw $$8, ${0:w}", "=r,0,~{cc},~{dirflag},~{fpsr},~{flags}"(i16 %x) nounwind
- ret i16 %asmtmp
-}
-
-; CHECK: s32:
-; CHECK: bswapl
-define i32 @s32(i32 %x) nounwind {
- %asmtmp = tail call i32 asm "bswap $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 %x) nounwind
- ret i32 %asmtmp
-}
-
-; CHECK: t32:
-; CHECK: bswapl
-define i32 @t32(i32 %x) nounwind {
- %asmtmp = tail call i32 asm "bswap $0", "=r,0,~{dirflag},~{flags},~{fpsr}"(i32 %x) nounwind
- ret i32 %asmtmp
-}
-
-; CHECK: s64:
-; CHECK: bswapq
-define i64 @s64(i64 %x) nounwind {
- %asmtmp = tail call i64 asm "bswap ${0:q}", "=r,0,~{dirflag},~{fpsr},~{flags}"(i64 %x) nounwind
- ret i64 %asmtmp
-}
-
-; CHECK: t64:
-; CHECK: bswapq
-define i64 @t64(i64 %x) nounwind {
- %asmtmp = tail call i64 asm "bswap ${0:q}", "=r,0,~{fpsr},~{dirflag},~{flags}"(i64 %x) nounwind
- ret i64 %asmtmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/bswap.ll b/libclamav/c++/llvm/test/CodeGen/X86/bswap.ll
deleted file mode 100644
index 0a72c1c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/bswap.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; bswap should be constant folded when it is passed a constant argument
-
-; RUN: llc < %s -march=x86 | \
-; RUN: grep bswapl | count 3
-; RUN: llc < %s -march=x86 | grep rolw | count 1
-
-declare i16 @llvm.bswap.i16(i16)
-
-declare i32 @llvm.bswap.i32(i32)
-
-declare i64 @llvm.bswap.i64(i64)
-
-define i16 @W(i16 %A) {
- %Z = call i16 @llvm.bswap.i16( i16 %A ) ; <i16> [#uses=1]
- ret i16 %Z
-}
-
-define i32 @X(i32 %A) {
- %Z = call i32 @llvm.bswap.i32( i32 %A ) ; <i32> [#uses=1]
- ret i32 %Z
-}
-
-define i64 @Y(i64 %A) {
- %Z = call i64 @llvm.bswap.i64( i64 %A ) ; <i64> [#uses=1]
- ret i64 %Z
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/bt.ll b/libclamav/c++/llvm/test/CodeGen/X86/bt.ll
deleted file mode 100644
index ec447e5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/bt.ll
+++ /dev/null
@@ -1,442 +0,0 @@
-; RUN: llc < %s -march=x86 | grep btl | count 28
-; RUN: llc < %s -march=x86 -mcpu=pentium4 | grep btl | not grep esp
-; RUN: llc < %s -march=x86 -mcpu=penryn | grep btl | not grep esp
-; PR3253
-
-; The register+memory form of the BT instruction should be usable on
-; pentium4, however it is currently disabled due to the register+memory
-; form having different semantics than the register+register form.
-
-; Test these patterns:
-; (X & (1 << N)) != 0 --> BT(X, N).
-; ((X >>u N) & 1) != 0 --> BT(X, N).
-; as well as several variations:
-; - The second form can use an arithmetic shift.
-; - Either form can use == instead of !=.
-; - Either form can compare with an operand of the &
-; instead of with 0.
-; - The comparison can be commuted (only cases where neither
-; operand is constant are included).
-; - The and can be commuted.
-
-define void @test2(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = lshr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, 1 ; <i32> [#uses=1]
- %tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @test2b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = lshr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 1, %tmp29
- %tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @atest2(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = ashr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, 1 ; <i32> [#uses=1]
- %tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @atest2b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = ashr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 1, %tmp29
- %tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @test3(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, %x ; <i32> [#uses=1]
- %tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @test3b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %x, %tmp29
- %tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @testne2(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = lshr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, 1 ; <i32> [#uses=1]
- %tmp4 = icmp ne i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @testne2b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = lshr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 1, %tmp29
- %tmp4 = icmp ne i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @atestne2(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = ashr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, 1 ; <i32> [#uses=1]
- %tmp4 = icmp ne i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @atestne2b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = ashr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 1, %tmp29
- %tmp4 = icmp ne i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @testne3(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, %x ; <i32> [#uses=1]
- %tmp4 = icmp ne i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @testne3b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %x, %tmp29
- %tmp4 = icmp ne i32 %tmp3, 0 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @query2(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = lshr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, 1 ; <i32> [#uses=1]
- %tmp4 = icmp eq i32 %tmp3, 1 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @query2b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = lshr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 1, %tmp29
- %tmp4 = icmp eq i32 %tmp3, 1 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @aquery2(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = ashr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, 1 ; <i32> [#uses=1]
- %tmp4 = icmp eq i32 %tmp3, 1 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @aquery2b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = ashr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 1, %tmp29
- %tmp4 = icmp eq i32 %tmp3, 1 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @query3(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, %x ; <i32> [#uses=1]
- %tmp4 = icmp eq i32 %tmp3, %tmp29 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @query3b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %x, %tmp29
- %tmp4 = icmp eq i32 %tmp3, %tmp29 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @query3x(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, %x ; <i32> [#uses=1]
- %tmp4 = icmp eq i32 %tmp29, %tmp3 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @query3bx(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %x, %tmp29
- %tmp4 = icmp eq i32 %tmp29, %tmp3 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @queryne2(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = lshr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, 1 ; <i32> [#uses=1]
- %tmp4 = icmp ne i32 %tmp3, 1 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @queryne2b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = lshr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 1, %tmp29
- %tmp4 = icmp ne i32 %tmp3, 1 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @aqueryne2(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = ashr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, 1 ; <i32> [#uses=1]
- %tmp4 = icmp ne i32 %tmp3, 1 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @aqueryne2b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = ashr i32 %x, %n ; <i32> [#uses=1]
- %tmp3 = and i32 1, %tmp29
- %tmp4 = icmp ne i32 %tmp3, 1 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @queryne3(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, %x ; <i32> [#uses=1]
- %tmp4 = icmp ne i32 %tmp3, %tmp29 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @queryne3b(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %x, %tmp29
- %tmp4 = icmp ne i32 %tmp3, %tmp29 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @queryne3x(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp29, %x ; <i32> [#uses=1]
- %tmp4 = icmp ne i32 %tmp29, %tmp3 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @queryne3bx(i32 %x, i32 %n) nounwind {
-entry:
- %tmp29 = shl i32 1, %n ; <i32> [#uses=1]
- %tmp3 = and i32 %x, %tmp29
- %tmp4 = icmp ne i32 %tmp29, %tmp3 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-declare void @foo()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/byval.ll b/libclamav/c++/llvm/test/CodeGen/X86/byval.ll
deleted file mode 100644
index af36e1b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/byval.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {movq 8(%rsp), %rax}
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep {movl 8(%esp), %edx} %t
-; RUN: grep {movl 4(%esp), %eax} %t
-
-%struct.s = type { i64, i64, i64 }
-
-define i64 @f(%struct.s* byval %a) {
-entry:
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 0
- %tmp3 = load i64* %tmp2, align 8
- ret i64 %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/byval2.ll b/libclamav/c++/llvm/test/CodeGen/X86/byval2.ll
deleted file mode 100644
index 71129f5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/byval2.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
-; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
-
-%struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
- i64, i64, i64, i64, i64, i64, i64, i64,
- i64 }
-
-define void @g(i64 %a, i64 %b, i64 %c) {
-entry:
- %d = alloca %struct.s, align 16
- %tmp = getelementptr %struct.s* %d, i32 0, i32 0
- store i64 %a, i64* %tmp, align 16
- %tmp2 = getelementptr %struct.s* %d, i32 0, i32 1
- store i64 %b, i64* %tmp2, align 16
- %tmp4 = getelementptr %struct.s* %d, i32 0, i32 2
- store i64 %c, i64* %tmp4, align 16
- call void @f( %struct.s* %d byval)
- call void @f( %struct.s* %d byval)
- ret void
-}
-
-declare void @f(%struct.s* byval)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/byval3.ll b/libclamav/c++/llvm/test/CodeGen/X86/byval3.ll
deleted file mode 100644
index 504e0be..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/byval3.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
-; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
-
-%struct.s = type { i32, i32, i32, i32, i32, i32, i32, i32,
- i32, i32, i32, i32, i32, i32, i32, i32,
- i32, i32, i32, i32, i32, i32, i32, i32,
- i32, i32, i32, i32, i32, i32, i32, i32,
- i32 }
-
-define void @g(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) nounwind {
-entry:
- %d = alloca %struct.s, align 16
- %tmp = getelementptr %struct.s* %d, i32 0, i32 0
- store i32 %a1, i32* %tmp, align 16
- %tmp2 = getelementptr %struct.s* %d, i32 0, i32 1
- store i32 %a2, i32* %tmp2, align 16
- %tmp4 = getelementptr %struct.s* %d, i32 0, i32 2
- store i32 %a3, i32* %tmp4, align 16
- %tmp6 = getelementptr %struct.s* %d, i32 0, i32 3
- store i32 %a4, i32* %tmp6, align 16
- %tmp8 = getelementptr %struct.s* %d, i32 0, i32 4
- store i32 %a5, i32* %tmp8, align 16
- %tmp10 = getelementptr %struct.s* %d, i32 0, i32 5
- store i32 %a6, i32* %tmp10, align 16
- call void @f( %struct.s* %d byval)
- call void @f( %struct.s* %d byval)
- ret void
-}
-
-declare void @f(%struct.s* byval)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/byval4.ll b/libclamav/c++/llvm/test/CodeGen/X86/byval4.ll
deleted file mode 100644
index 4db9d65..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/byval4.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
-; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
-
-%struct.s = type { i16, i16, i16, i16, i16, i16, i16, i16,
- i16, i16, i16, i16, i16, i16, i16, i16,
- i16, i16, i16, i16, i16, i16, i16, i16,
- i16, i16, i16, i16, i16, i16, i16, i16,
- i16, i16, i16, i16, i16, i16, i16, i16,
- i16, i16, i16, i16, i16, i16, i16, i16,
- i16, i16, i16, i16, i16, i16, i16, i16,
- i16, i16, i16, i16, i16, i16, i16, i16,
- i16 }
-
-
-define void @g(i16 signext %a1, i16 signext %a2, i16 signext %a3,
- i16 signext %a4, i16 signext %a5, i16 signext %a6) nounwind {
-entry:
- %a = alloca %struct.s, align 16
- %tmp = getelementptr %struct.s* %a, i32 0, i32 0
- store i16 %a1, i16* %tmp, align 16
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 1
- store i16 %a2, i16* %tmp2, align 16
- %tmp4 = getelementptr %struct.s* %a, i32 0, i32 2
- store i16 %a3, i16* %tmp4, align 16
- %tmp6 = getelementptr %struct.s* %a, i32 0, i32 3
- store i16 %a4, i16* %tmp6, align 16
- %tmp8 = getelementptr %struct.s* %a, i32 0, i32 4
- store i16 %a5, i16* %tmp8, align 16
- %tmp10 = getelementptr %struct.s* %a, i32 0, i32 5
- store i16 %a6, i16* %tmp10, align 16
- call void @f( %struct.s* %a byval )
- call void @f( %struct.s* %a byval )
- ret void
-}
-
-declare void @f(%struct.s* byval)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/byval5.ll b/libclamav/c++/llvm/test/CodeGen/X86/byval5.ll
deleted file mode 100644
index 69c115b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/byval5.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep rep.movsq | count 2
-; RUN: llc < %s -march=x86 | grep rep.movsl | count 2
-
-%struct.s = type { i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8, i8, i8, i8, i8, i8, i8, i8,
- i8 }
-
-
-define void @g(i8 signext %a1, i8 signext %a2, i8 signext %a3,
- i8 signext %a4, i8 signext %a5, i8 signext %a6) {
-entry:
- %a = alloca %struct.s
- %tmp = getelementptr %struct.s* %a, i32 0, i32 0
- store i8 %a1, i8* %tmp, align 8
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 1
- store i8 %a2, i8* %tmp2, align 8
- %tmp4 = getelementptr %struct.s* %a, i32 0, i32 2
- store i8 %a3, i8* %tmp4, align 8
- %tmp6 = getelementptr %struct.s* %a, i32 0, i32 3
- store i8 %a4, i8* %tmp6, align 8
- %tmp8 = getelementptr %struct.s* %a, i32 0, i32 4
- store i8 %a5, i8* %tmp8, align 8
- %tmp10 = getelementptr %struct.s* %a, i32 0, i32 5
- store i8 %a6, i8* %tmp10, align 8
- call void @f( %struct.s* %a byval )
- call void @f( %struct.s* %a byval )
- ret void
-}
-
-declare void @f(%struct.s* byval)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/byval6.ll b/libclamav/c++/llvm/test/CodeGen/X86/byval6.ll
deleted file mode 100644
index b060369..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/byval6.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86 | grep add | not grep 16
-
- %struct.W = type { x86_fp80, x86_fp80 }
- at B = global %struct.W { x86_fp80 0xK4001A000000000000000, x86_fp80 0xK4001C000000000000000 }, align 32
- at .cpx = internal constant %struct.W { x86_fp80 0xK4001E000000000000000, x86_fp80 0xK40028000000000000000 }
-
-define i32 @main() nounwind {
-entry:
- tail call void (i32, ...)* @bar( i32 3, %struct.W* byval @.cpx ) nounwind
- tail call void (i32, ...)* @baz( i32 3, %struct.W* byval @B ) nounwind
- ret i32 undef
-}
-
-declare void @bar(i32, ...)
-
-declare void @baz(i32, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/byval7.ll b/libclamav/c++/llvm/test/CodeGen/X86/byval7.ll
deleted file mode 100644
index 7284955..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/byval7.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | grep -E {add|lea} | grep 16
-
- %struct.S = type { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>,
- <2 x i64> }
-
-define i32 @main() nounwind {
-entry:
- %s = alloca %struct.S ; <%struct.S*> [#uses=2]
- %tmp15 = getelementptr %struct.S* %s, i32 0, i32 0 ; <<2 x i64>*> [#uses=1]
- store <2 x i64> < i64 8589934595, i64 1 >, <2 x i64>* %tmp15, align 16
- call void @t( i32 1, %struct.S* byval %s ) nounwind
- ret i32 0
-}
-
-declare void @t(i32, %struct.S* byval )
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/call-imm.ll b/libclamav/c++/llvm/test/CodeGen/X86/call-imm.ll
deleted file mode 100644
index 87785bc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/call-imm.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -mtriple=i386-darwin-apple -relocation-model=static | grep {call.*12345678}
-; RUN: llc < %s -mtriple=i386-darwin-apple -relocation-model=pic | not grep {call.*12345678}
-; RUN: llc < %s -mtriple=i386-pc-linux -relocation-model=dynamic-no-pic | grep {call.*12345678}
-
-; Call to immediate is not safe on x86-64 unless we *know* that the
-; call will be within 32-bits pcrel from the dest immediate.
-
-; RUN: llc < %s -march=x86-64 | grep {call.*\*%rax}
-
-; PR3666
-; PR3773
-; rdar://6904453
-
-define i32 @main() nounwind {
-entry:
- %0 = call i32 inttoptr (i32 12345678 to i32 (i32)*)(i32 0) nounwind ; <i32> [#uses=1]
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/call-push.ll b/libclamav/c++/llvm/test/CodeGen/X86/call-push.ll
deleted file mode 100644
index 02cbccc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/call-push.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -disable-fp-elim | FileCheck %s
-
- %struct.decode_t = type { i8, i8, i8, i8, i16, i8, i8, %struct.range_t** }
- %struct.range_t = type { float, float, i32, i32, i32, [0 x i8] }
-
-define i32 @decode_byte(%struct.decode_t* %decode) nounwind {
-; CHECK: decode_byte:
-; CHECK: pushl
-; CHECK: popl
-; CHECK: popl
-; CHECK: jmp
-entry:
- %tmp2 = getelementptr %struct.decode_t* %decode, i32 0, i32 4 ; <i16*> [#uses=1]
- %tmp23 = bitcast i16* %tmp2 to i32* ; <i32*> [#uses=1]
- %tmp4 = load i32* %tmp23 ; <i32> [#uses=1]
- %tmp514 = lshr i32 %tmp4, 24 ; <i32> [#uses=1]
- %tmp56 = trunc i32 %tmp514 to i8 ; <i8> [#uses=1]
- %tmp7 = icmp eq i8 %tmp56, 0 ; <i1> [#uses=1]
- br i1 %tmp7, label %UnifiedReturnBlock, label %cond_true
-
-cond_true: ; preds = %entry
- %tmp10 = tail call i32 @f( %struct.decode_t* %decode ) ; <i32> [#uses=1]
- ret i32 %tmp10
-
-UnifiedReturnBlock: ; preds = %entry
- ret i32 0
-}
-
-declare i32 @f(%struct.decode_t*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/change-compare-stride-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/change-compare-stride-0.ll
deleted file mode 100644
index d520a6f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/change-compare-stride-0.ll
+++ /dev/null
@@ -1,77 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep {cmpl \$-478,} %t
-; RUN: not grep inc %t
-; RUN: not grep {leal 1(} %t
-; RUN: not grep {leal -1(} %t
-; RUN: grep dec %t | count 1
-
-define void @borf(i8* nocapture %in, i8* nocapture %out) nounwind {
-bb4.thread:
- br label %bb2.outer
-
-bb2.outer: ; preds = %bb4, %bb4.thread
- %indvar18 = phi i32 [ 0, %bb4.thread ], [ %indvar.next28, %bb4 ] ; <i32> [#uses=3]
- %tmp34 = mul i32 %indvar18, 65535 ; <i32> [#uses=1]
- %i.0.reg2mem.0.ph = add i32 %tmp34, 639 ; <i32> [#uses=1]
- %0 = and i32 %i.0.reg2mem.0.ph, 65535 ; <i32> [#uses=1]
- %1 = mul i32 %0, 480 ; <i32> [#uses=1]
- %tmp20 = mul i32 %indvar18, -478 ; <i32> [#uses=1]
- br label %bb2
-
-bb2: ; preds = %bb2, %bb2.outer
- %indvar = phi i32 [ 0, %bb2.outer ], [ %indvar.next, %bb2 ] ; <i32> [#uses=3]
- %ctg2 = getelementptr i8* %out, i32 %tmp20 ; <i8*> [#uses=1]
- %tmp21 = ptrtoint i8* %ctg2 to i32 ; <i32> [#uses=1]
- %tmp23 = sub i32 %tmp21, %indvar ; <i32> [#uses=1]
- %out_addr.0.reg2mem.0 = inttoptr i32 %tmp23 to i8* ; <i8*> [#uses=1]
- %tmp25 = mul i32 %indvar, 65535 ; <i32> [#uses=1]
- %j.0.reg2mem.0 = add i32 %tmp25, 479 ; <i32> [#uses=1]
- %2 = and i32 %j.0.reg2mem.0, 65535 ; <i32> [#uses=1]
- %3 = add i32 %1, %2 ; <i32> [#uses=9]
- %4 = add i32 %3, -481 ; <i32> [#uses=1]
- %5 = getelementptr i8* %in, i32 %4 ; <i8*> [#uses=1]
- %6 = load i8* %5, align 1 ; <i8> [#uses=1]
- %7 = add i32 %3, -480 ; <i32> [#uses=1]
- %8 = getelementptr i8* %in, i32 %7 ; <i8*> [#uses=1]
- %9 = load i8* %8, align 1 ; <i8> [#uses=1]
- %10 = add i32 %3, -479 ; <i32> [#uses=1]
- %11 = getelementptr i8* %in, i32 %10 ; <i8*> [#uses=1]
- %12 = load i8* %11, align 1 ; <i8> [#uses=1]
- %13 = add i32 %3, -1 ; <i32> [#uses=1]
- %14 = getelementptr i8* %in, i32 %13 ; <i8*> [#uses=1]
- %15 = load i8* %14, align 1 ; <i8> [#uses=1]
- %16 = getelementptr i8* %in, i32 %3 ; <i8*> [#uses=1]
- %17 = load i8* %16, align 1 ; <i8> [#uses=1]
- %18 = add i32 %3, 1 ; <i32> [#uses=1]
- %19 = getelementptr i8* %in, i32 %18 ; <i8*> [#uses=1]
- %20 = load i8* %19, align 1 ; <i8> [#uses=1]
- %21 = add i32 %3, 481 ; <i32> [#uses=1]
- %22 = getelementptr i8* %in, i32 %21 ; <i8*> [#uses=1]
- %23 = load i8* %22, align 1 ; <i8> [#uses=1]
- %24 = add i32 %3, 480 ; <i32> [#uses=1]
- %25 = getelementptr i8* %in, i32 %24 ; <i8*> [#uses=1]
- %26 = load i8* %25, align 1 ; <i8> [#uses=1]
- %27 = add i32 %3, 479 ; <i32> [#uses=1]
- %28 = getelementptr i8* %in, i32 %27 ; <i8*> [#uses=1]
- %29 = load i8* %28, align 1 ; <i8> [#uses=1]
- %30 = add i8 %9, %6 ; <i8> [#uses=1]
- %31 = add i8 %30, %12 ; <i8> [#uses=1]
- %32 = add i8 %31, %15 ; <i8> [#uses=1]
- %33 = add i8 %32, %17 ; <i8> [#uses=1]
- %34 = add i8 %33, %20 ; <i8> [#uses=1]
- %35 = add i8 %34, %23 ; <i8> [#uses=1]
- %36 = add i8 %35, %26 ; <i8> [#uses=1]
- %37 = add i8 %36, %29 ; <i8> [#uses=1]
- store i8 %37, i8* %out_addr.0.reg2mem.0, align 1
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, 478 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb4, label %bb2
-
-bb4: ; preds = %bb2
- %indvar.next28 = add i32 %indvar18, 1 ; <i32> [#uses=2]
- %exitcond29 = icmp eq i32 %indvar.next28, 638 ; <i1> [#uses=1]
- br i1 %exitcond29, label %return, label %bb2.outer
-
-return: ; preds = %bb4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/change-compare-stride-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/change-compare-stride-1.ll
deleted file mode 100644
index a9ddbdb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/change-compare-stride-1.ll
+++ /dev/null
@@ -1,86 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep {cmpq \$-478,} %t
-; RUN: not grep inc %t
-; RUN: not grep {leal 1(} %t
-; RUN: not grep {leal -1(} %t
-; RUN: grep dec %t | count 1
-
-define void @borf(i8* nocapture %in, i8* nocapture %out) nounwind {
-bb4.thread:
- br label %bb2.outer
-
-bb2.outer: ; preds = %bb4, %bb4.thread
- %indvar19 = phi i64 [ 0, %bb4.thread ], [ %indvar.next29, %bb4 ] ; <i64> [#uses=3]
- %indvar31 = trunc i64 %indvar19 to i16 ; <i16> [#uses=1]
- %i.0.reg2mem.0.ph = sub i16 639, %indvar31 ; <i16> [#uses=1]
- %0 = zext i16 %i.0.reg2mem.0.ph to i32 ; <i32> [#uses=1]
- %1 = mul i32 %0, 480 ; <i32> [#uses=1]
- %tmp21 = mul i64 %indvar19, -478 ; <i64> [#uses=1]
- br label %bb2
-
-bb2: ; preds = %bb2, %bb2.outer
- %indvar = phi i64 [ 0, %bb2.outer ], [ %indvar.next, %bb2 ] ; <i64> [#uses=3]
- %indvar16 = trunc i64 %indvar to i16 ; <i16> [#uses=1]
- %ctg2 = getelementptr i8* %out, i64 %tmp21 ; <i8*> [#uses=1]
- %tmp22 = ptrtoint i8* %ctg2 to i64 ; <i64> [#uses=1]
- %tmp24 = sub i64 %tmp22, %indvar ; <i64> [#uses=1]
- %out_addr.0.reg2mem.0 = inttoptr i64 %tmp24 to i8* ; <i8*> [#uses=1]
- %j.0.reg2mem.0 = sub i16 479, %indvar16 ; <i16> [#uses=1]
- %2 = zext i16 %j.0.reg2mem.0 to i32 ; <i32> [#uses=1]
- %3 = add i32 %1, %2 ; <i32> [#uses=9]
- %4 = add i32 %3, -481 ; <i32> [#uses=1]
- %5 = zext i32 %4 to i64 ; <i64> [#uses=1]
- %6 = getelementptr i8* %in, i64 %5 ; <i8*> [#uses=1]
- %7 = load i8* %6, align 1 ; <i8> [#uses=1]
- %8 = add i32 %3, -480 ; <i32> [#uses=1]
- %9 = zext i32 %8 to i64 ; <i64> [#uses=1]
- %10 = getelementptr i8* %in, i64 %9 ; <i8*> [#uses=1]
- %11 = load i8* %10, align 1 ; <i8> [#uses=1]
- %12 = add i32 %3, -479 ; <i32> [#uses=1]
- %13 = zext i32 %12 to i64 ; <i64> [#uses=1]
- %14 = getelementptr i8* %in, i64 %13 ; <i8*> [#uses=1]
- %15 = load i8* %14, align 1 ; <i8> [#uses=1]
- %16 = add i32 %3, -1 ; <i32> [#uses=1]
- %17 = zext i32 %16 to i64 ; <i64> [#uses=1]
- %18 = getelementptr i8* %in, i64 %17 ; <i8*> [#uses=1]
- %19 = load i8* %18, align 1 ; <i8> [#uses=1]
- %20 = zext i32 %3 to i64 ; <i64> [#uses=1]
- %21 = getelementptr i8* %in, i64 %20 ; <i8*> [#uses=1]
- %22 = load i8* %21, align 1 ; <i8> [#uses=1]
- %23 = add i32 %3, 1 ; <i32> [#uses=1]
- %24 = zext i32 %23 to i64 ; <i64> [#uses=1]
- %25 = getelementptr i8* %in, i64 %24 ; <i8*> [#uses=1]
- %26 = load i8* %25, align 1 ; <i8> [#uses=1]
- %27 = add i32 %3, 481 ; <i32> [#uses=1]
- %28 = zext i32 %27 to i64 ; <i64> [#uses=1]
- %29 = getelementptr i8* %in, i64 %28 ; <i8*> [#uses=1]
- %30 = load i8* %29, align 1 ; <i8> [#uses=1]
- %31 = add i32 %3, 480 ; <i32> [#uses=1]
- %32 = zext i32 %31 to i64 ; <i64> [#uses=1]
- %33 = getelementptr i8* %in, i64 %32 ; <i8*> [#uses=1]
- %34 = load i8* %33, align 1 ; <i8> [#uses=1]
- %35 = add i32 %3, 479 ; <i32> [#uses=1]
- %36 = zext i32 %35 to i64 ; <i64> [#uses=1]
- %37 = getelementptr i8* %in, i64 %36 ; <i8*> [#uses=1]
- %38 = load i8* %37, align 1 ; <i8> [#uses=1]
- %39 = add i8 %11, %7 ; <i8> [#uses=1]
- %40 = add i8 %39, %15 ; <i8> [#uses=1]
- %41 = add i8 %40, %19 ; <i8> [#uses=1]
- %42 = add i8 %41, %22 ; <i8> [#uses=1]
- %43 = add i8 %42, %26 ; <i8> [#uses=1]
- %44 = add i8 %43, %30 ; <i8> [#uses=1]
- %45 = add i8 %44, %34 ; <i8> [#uses=1]
- %46 = add i8 %45, %38 ; <i8> [#uses=1]
- store i8 %46, i8* %out_addr.0.reg2mem.0, align 1
- %indvar.next = add i64 %indvar, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %indvar.next, 478 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb4, label %bb2
-
-bb4: ; preds = %bb2
- %indvar.next29 = add i64 %indvar19, 1 ; <i64> [#uses=2]
- %exitcond30 = icmp eq i64 %indvar.next29, 638 ; <i1> [#uses=1]
- br i1 %exitcond30, label %return, label %bb2.outer
-
-return: ; preds = %bb4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/clz.ll b/libclamav/c++/llvm/test/CodeGen/X86/clz.ll
deleted file mode 100644
index 3f27187..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/clz.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86 | grep bsr | count 2
-; RUN: llc < %s -march=x86 | grep bsf
-; RUN: llc < %s -march=x86 | grep cmov | count 3
-
-define i32 @t1(i32 %x) nounwind {
- %tmp = tail call i32 @llvm.ctlz.i32( i32 %x )
- ret i32 %tmp
-}
-
-declare i32 @llvm.ctlz.i32(i32) nounwind readnone
-
-define i32 @t2(i32 %x) nounwind {
- %tmp = tail call i32 @llvm.cttz.i32( i32 %x )
- ret i32 %tmp
-}
-
-declare i32 @llvm.cttz.i32(i32) nounwind readnone
-
-define i16 @t3(i16 %x, i16 %y) nounwind {
-entry:
- %tmp1 = add i16 %x, %y
- %tmp2 = tail call i16 @llvm.ctlz.i16( i16 %tmp1 ) ; <i16> [#uses=1]
- ret i16 %tmp2
-}
-
-declare i16 @llvm.ctlz.i16(i16) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/cmov.ll b/libclamav/c++/llvm/test/CodeGen/X86/cmov.ll
deleted file mode 100644
index 39d9d1e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/cmov.ll
+++ /dev/null
@@ -1,157 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-
-define i32 @test1(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
-entry:
-; CHECK: test1:
-; CHECK: btl
-; CHECK-NEXT: movl $12, %eax
-; CHECK-NEXT: cmovael (%rcx), %eax
-; CHECK-NEXT: ret
-
- %0 = lshr i32 %x, %n ; <i32> [#uses=1]
- %1 = and i32 %0, 1 ; <i32> [#uses=1]
- %toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
- %v = load i32* %vp
- %.0 = select i1 %toBool, i32 %v, i32 12 ; <i32> [#uses=1]
- ret i32 %.0
-}
-define i32 @test2(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
-entry:
-; CHECK: test2:
-; CHECK: btl
-; CHECK-NEXT: movl $12, %eax
-; CHECK-NEXT: cmovbl (%rcx), %eax
-; CHECK-NEXT: ret
-
- %0 = lshr i32 %x, %n ; <i32> [#uses=1]
- %1 = and i32 %0, 1 ; <i32> [#uses=1]
- %toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
- %v = load i32* %vp
- %.0 = select i1 %toBool, i32 12, i32 %v ; <i32> [#uses=1]
- ret i32 %.0
-}
-
-
-; x86's 32-bit cmov doesn't clobber the high 32 bits of the destination
-; if the condition is false. An explicit zero-extend (movl) is needed
-; after the cmov.
-
-declare void @bar(i64) nounwind
-
-define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
-; CHECK: test3:
-; CHECK: cmovnel %edi, %esi
-; CHECK-NEXT: movl %esi, %edi
-
- %c = trunc i64 %a to i32
- %d = trunc i64 %b to i32
- %e = select i1 %p, i32 %c, i32 %d
- %f = zext i32 %e to i64
- call void @bar(i64 %f)
- ret void
-}
-
-
-
-; CodeGen shouldn't try to do a setne after an expanded 8-bit conditional
-; move without recomputing EFLAGS, because the expansion of the conditional
-; move with control flow may clobber EFLAGS (e.g., with xor, to set the
-; register to zero).
-
-; The test is a little awkward; the important part is that there's a test before the
-; setne.
-; PR4814
-
-
- at g_3 = external global i8 ; <i8*> [#uses=1]
- at g_96 = external global i8 ; <i8*> [#uses=2]
- at g_100 = external global i8 ; <i8*> [#uses=2]
- at _2E_str = external constant [15 x i8], align 1 ; <[15 x i8]*> [#uses=1]
-
-define i32 @test4() nounwind {
-entry:
- %0 = load i8* @g_3, align 1 ; <i8> [#uses=2]
- %1 = sext i8 %0 to i32 ; <i32> [#uses=1]
- %.lobit.i = lshr i8 %0, 7 ; <i8> [#uses=1]
- %tmp.i = zext i8 %.lobit.i to i32 ; <i32> [#uses=1]
- %tmp.not.i = xor i32 %tmp.i, 1 ; <i32> [#uses=1]
- %iftmp.17.0.i.i = ashr i32 %1, %tmp.not.i ; <i32> [#uses=1]
- %retval56.i.i = trunc i32 %iftmp.17.0.i.i to i8 ; <i8> [#uses=1]
- %2 = icmp eq i8 %retval56.i.i, 0 ; <i1> [#uses=2]
- %g_96.promoted.i = load i8* @g_96 ; <i8> [#uses=3]
- %3 = icmp eq i8 %g_96.promoted.i, 0 ; <i1> [#uses=2]
- br i1 %3, label %func_4.exit.i, label %bb.i.i.i
-
-bb.i.i.i: ; preds = %entry
- %4 = volatile load i8* @g_100, align 1 ; <i8> [#uses=0]
- br label %func_4.exit.i
-
-; CHECK: test4:
-; CHECK: g_100
-; CHECK: testb
-; CHECK: testb %al, %al
-; CHECK-NEXT: setne %al
-; CHECK-NEXT: testb
-
-func_4.exit.i: ; preds = %bb.i.i.i, %entry
- %.not.i = xor i1 %2, true ; <i1> [#uses=1]
- %brmerge.i = or i1 %3, %.not.i ; <i1> [#uses=1]
- %.mux.i = select i1 %2, i8 %g_96.promoted.i, i8 0 ; <i8> [#uses=1]
- br i1 %brmerge.i, label %func_1.exit, label %bb.i.i
-
-bb.i.i: ; preds = %func_4.exit.i
- %5 = volatile load i8* @g_100, align 1 ; <i8> [#uses=0]
- br label %func_1.exit
-
-func_1.exit: ; preds = %bb.i.i, %func_4.exit.i
- %g_96.tmp.0.i = phi i8 [ %g_96.promoted.i, %bb.i.i ], [ %.mux.i, %func_4.exit.i ] ; <i8> [#uses=2]
- store i8 %g_96.tmp.0.i, i8* @g_96
- %6 = zext i8 %g_96.tmp.0.i to i32 ; <i32> [#uses=1]
- %7 = tail call i32 (i8*, ...)* @printf(i8* noalias getelementptr ([15 x i8]* @_2E_str, i64 0, i64 0), i32 %6) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i32 @printf(i8* nocapture, ...) nounwind
-
-
-; Should compile to setcc | -2.
-; rdar://6668608
-define i32 @test5(i32* nocapture %P) nounwind readonly {
-entry:
-; CHECK: test5:
-; CHECK: setg %al
-; CHECK: movzbl %al, %eax
-; CHECK: orl $-2, %eax
-; CHECK: ret
-
- %0 = load i32* %P, align 4 ; <i32> [#uses=1]
- %1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
- %iftmp.0.0 = select i1 %1, i32 -1, i32 -2 ; <i32> [#uses=1]
- ret i32 %iftmp.0.0
-}
-
-define i32 @test6(i32* nocapture %P) nounwind readonly {
-entry:
-; CHECK: test6:
-; CHECK: setl %al
-; CHECK: movzbl %al, %eax
-; CHECK: leal 4(%rax,%rax,8), %eax
-; CHECK: ret
- %0 = load i32* %P, align 4 ; <i32> [#uses=1]
- %1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
- %iftmp.0.0 = select i1 %1, i32 4, i32 13 ; <i32> [#uses=1]
- ret i32 %iftmp.0.0
-}
-
-
-; Don't try to use a 16-bit conditional move to do an 8-bit select,
-; because it isn't worth it. Just use a branch instead.
-define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
-; CHECK: test7:
-; CHECK: testb $1, %dil
-; CHECK-NEXT: jne LBB
-
- %d = select i1 %c, i8 %a, i8 %b
- ret i8 %d
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/cmp-test.ll b/libclamav/c++/llvm/test/CodeGen/X86/cmp-test.ll
deleted file mode 100644
index 898c09b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/cmp-test.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86 | grep cmp | count 1
-; RUN: llc < %s -march=x86 | grep test | count 1
-
-define i32 @f1(i32 %X, i32* %y) {
- %tmp = load i32* %y ; <i32> [#uses=1]
- %tmp.upgrd.1 = icmp eq i32 %tmp, 0 ; <i1> [#uses=1]
- br i1 %tmp.upgrd.1, label %ReturnBlock, label %cond_true
-
-cond_true: ; preds = %0
- ret i32 1
-
-ReturnBlock: ; preds = %0
- ret i32 0
-}
-
-define i32 @f2(i32 %X, i32* %y) {
- %tmp = load i32* %y ; <i32> [#uses=1]
- %tmp1 = shl i32 %tmp, 3 ; <i32> [#uses=1]
- %tmp1.upgrd.2 = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
- br i1 %tmp1.upgrd.2, label %ReturnBlock, label %cond_true
-
-cond_true: ; preds = %0
- ret i32 1
-
-ReturnBlock: ; preds = %0
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/cmp0.ll b/libclamav/c++/llvm/test/CodeGen/X86/cmp0.ll
deleted file mode 100644
index 4878448..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/cmp0.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-define i64 @test0(i64 %x) nounwind {
- %t = icmp eq i64 %x, 0
- %r = zext i1 %t to i64
- ret i64 %r
-; CHECK: test0:
-; CHECK: testq %rdi, %rdi
-; CHECK: sete %al
-; CHECK: movzbl %al, %eax
-; CHECK: ret
-}
-
-define i64 @test1(i64 %x) nounwind {
- %t = icmp slt i64 %x, 1
- %r = zext i1 %t to i64
- ret i64 %r
-; CHECK: test1:
-; CHECK: testq %rdi, %rdi
-; CHECK: setle %al
-; CHECK: movzbl %al, %eax
-; CHECK: ret
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/cmp2.ll b/libclamav/c++/llvm/test/CodeGen/X86/cmp2.ll
deleted file mode 100644
index 9a8e00c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/cmp2.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep ucomisd | grep CPI | count 2
-
-define i32 @test(double %A) nounwind {
- entry:
- %tmp2 = fcmp ogt double %A, 1.500000e+02; <i1> [#uses=1]
- %tmp5 = fcmp ult double %A, 7.500000e+01; <i1> [#uses=1]
- %bothcond = or i1 %tmp2, %tmp5; <i1> [#uses=1]
- br i1 %bothcond, label %bb8, label %bb12
-
- bb8:; preds = %entry
- %tmp9 = tail call i32 (...)* @foo( ) nounwind ; <i32> [#uses=1]
- ret i32 %tmp9
-
- bb12:; preds = %entry
- ret i32 32
-}
-
-declare i32 @foo(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/coalesce-esp.ll b/libclamav/c++/llvm/test/CodeGen/X86/coalesce-esp.ll
deleted file mode 100644
index 0fe4e56..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/coalesce-esp.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s | grep {movl %esp, %eax}
-; PR4572
-
-; Don't coalesce with %esp if it would end up putting %esp in
-; the index position of an address, because that can't be
-; encoded on x86. It would actually be slightly better to
-; swap the address operands though, since there's no scale.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-mingw32"
- %"struct.std::valarray<unsigned int>" = type { i32, i32* }
-
-define void @_ZSt17__gslice_to_indexjRKSt8valarrayIjES2_RS0_(i32 %__o, %"struct.std::valarray<unsigned int>"* nocapture %__l, %"struct.std::valarray<unsigned int>"* nocapture %__s, %"struct.std::valarray<unsigned int>"* nocapture %__i) nounwind {
-entry:
- %0 = alloca i32, i32 undef, align 4 ; <i32*> [#uses=1]
- br i1 undef, label %return, label %bb4
-
-bb4: ; preds = %bb7.backedge, %entry
- %indvar = phi i32 [ %indvar.next, %bb7.backedge ], [ 0, %entry ] ; <i32> [#uses=2]
- %scevgep24.sum = sub i32 undef, %indvar ; <i32> [#uses=2]
- %scevgep25 = getelementptr i32* %0, i32 %scevgep24.sum ; <i32*> [#uses=1]
- %scevgep27 = getelementptr i32* undef, i32 %scevgep24.sum ; <i32*> [#uses=1]
- %1 = load i32* %scevgep27, align 4 ; <i32> [#uses=0]
- br i1 undef, label %bb7.backedge, label %bb5
-
-bb5: ; preds = %bb4
- store i32 0, i32* %scevgep25, align 4
- br label %bb7.backedge
-
-bb7.backedge: ; preds = %bb5, %bb4
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br label %bb4
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute1.ll b/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute1.ll
deleted file mode 100644
index 8aa0bfd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute1.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | not grep movaps
-; PR1877
-
- at NNTOT = weak global i32 0 ; <i32*> [#uses=1]
- at G = weak global float 0.000000e+00 ; <float*> [#uses=1]
-
-define void @runcont(i32* %source) nounwind {
-entry:
- %tmp10 = load i32* @NNTOT, align 4 ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %entry
- %neuron.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %thesum.0 = phi float [ 0.000000e+00, %entry ], [ %tmp6, %bb ] ; <float> [#uses=1]
- %tmp2 = getelementptr i32* %source, i32 %neuron.0 ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
- %tmp34 = sitofp i32 %tmp3 to float ; <float> [#uses=1]
- %tmp6 = fadd float %tmp34, %thesum.0 ; <float> [#uses=2]
- %indvar.next = add i32 %neuron.0, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %tmp10 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb13, label %bb
-
-bb13: ; preds = %bb
- volatile store float %tmp6, float* @G, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute2.ll b/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute2.ll
deleted file mode 100644
index 5d10bba..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute2.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep paddw | count 2
-; RUN: llc < %s -march=x86-64 | not grep mov
-
-; The 2-addr pass should ensure that identical code is produced for these functions
-; no extra copy should be generated.
-
-define <2 x i64> @test1(<2 x i64> %x, <2 x i64> %y) nounwind {
-entry:
- %tmp6 = bitcast <2 x i64> %y to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp8 = bitcast <2 x i64> %x to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
- %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp10
-}
-
-define <2 x i64> @test2(<2 x i64> %x, <2 x i64> %y) nounwind {
-entry:
- %tmp6 = bitcast <2 x i64> %x to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp8 = bitcast <2 x i64> %y to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp9 = add <8 x i16> %tmp8, %tmp6 ; <<8 x i16>> [#uses=1]
- %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp10
-}
-
-
-; The coalescer should commute the add to avoid a copy.
-define <4 x float> @test3(<4 x float> %V) {
-entry:
- %tmp8 = shufflevector <4 x float> %V, <4 x float> undef,
- <4 x i32> < i32 3, i32 2, i32 1, i32 0 >
- %add = fadd <4 x float> %tmp8, %V
- ret <4 x float> %add
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute3.ll b/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute3.ll
deleted file mode 100644
index e5bd448..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute3.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | grep mov | count 6
-
- %struct.quad_struct = type { i32, i32, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct*, %struct.quad_struct* }
-
-define i32 @perimeter(%struct.quad_struct* %tree, i32 %size) nounwind {
-entry:
- switch i32 %size, label %UnifiedReturnBlock [
- i32 2, label %bb
- i32 0, label %bb50
- ]
-
-bb: ; preds = %entry
- %tmp31 = tail call i32 @perimeter( %struct.quad_struct* null, i32 0 ) nounwind ; <i32> [#uses=1]
- %tmp40 = tail call i32 @perimeter( %struct.quad_struct* null, i32 0 ) nounwind ; <i32> [#uses=1]
- %tmp33 = add i32 0, %tmp31 ; <i32> [#uses=1]
- %tmp42 = add i32 %tmp33, %tmp40 ; <i32> [#uses=1]
- ret i32 %tmp42
-
-bb50: ; preds = %entry
- ret i32 0
-
-UnifiedReturnBlock: ; preds = %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute4.ll b/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute4.ll
deleted file mode 100644
index 02a9781..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute4.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | not grep movaps
-; PR1501
-
-define float @foo(i32* %x, float* %y, i32 %c) nounwind {
-entry:
- %tmp2132 = icmp eq i32 %c, 0 ; <i1> [#uses=2]
- br i1 %tmp2132, label %bb23, label %bb.preheader
-
-bb.preheader: ; preds = %entry
- %umax = select i1 %tmp2132, i32 1, i32 %c ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %bb.preheader
- %i.0.reg2mem.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
- %res.0.reg2mem.0 = phi float [ 0.000000e+00, %bb.preheader ], [ %tmp14, %bb ] ; <float> [#uses=1]
- %tmp3 = getelementptr i32* %x, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
- %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
- %tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
- %tmp8 = getelementptr float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1]
- %tmp9 = load float* %tmp8, align 4 ; <float> [#uses=1]
- %tmp11 = fmul float %tmp9, %tmp45 ; <float> [#uses=1]
- %tmp14 = fadd float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2]
- %indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %umax ; <i1> [#uses=1]
- br i1 %exitcond, label %bb23, label %bb
-
-bb23: ; preds = %bb, %entry
- %res.0.reg2mem.1 = phi float [ 0.000000e+00, %entry ], [ %tmp14, %bb ] ; <float> [#uses=1]
- ret float %res.0.reg2mem.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute5.ll b/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute5.ll
deleted file mode 100644
index 510d115..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-commute5.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | not grep movaps
-
-define i32 @t() {
-entry:
- br i1 true, label %bb1664, label %bb1656
-bb1656: ; preds = %entry
- ret i32 0
-bb1664: ; preds = %entry
- %tmp4297 = bitcast <16 x i8> zeroinitializer to <2 x i64> ; <<2 x i64>> [#uses=2]
- %tmp4351 = call <16 x i8> @llvm.x86.sse2.pcmpeq.b( <16 x i8> zeroinitializer, <16 x i8> zeroinitializer ) nounwind readnone ; <<16 x i8>> [#uses=0]
- br i1 false, label %bb5310, label %bb4743
-bb4743: ; preds = %bb1664
- %tmp4360.not28 = or <2 x i64> zeroinitializer, %tmp4297 ; <<2 x i64>> [#uses=1]
- br label %bb5310
-bb5310: ; preds = %bb4743, %bb1664
- %tmp4360.not28.pn = phi <2 x i64> [ %tmp4360.not28, %bb4743 ], [ %tmp4297, %bb1664 ] ; <<2 x i64>> [#uses=1]
- %tmp4415.not.pn = or <2 x i64> zeroinitializer, %tmp4360.not28.pn ; <<2 x i64>> [#uses=0]
- ret i32 0
-}
-
-declare <16 x i8> @llvm.x86.sse2.pcmpeq.b(<16 x i8>, <16 x i8>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-cross.ll b/libclamav/c++/llvm/test/CodeGen/X86/coalescer-cross.ll
deleted file mode 100644
index 7d6f399..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-cross.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10 | not grep movaps
-; rdar://6509240
-
- type { %struct.TValue } ; type %0
- type { %struct.L_Umaxalign, i32, %struct.Node* } ; type %1
- %struct.CallInfo = type { %struct.TValue*, %struct.TValue*, %struct.TValue*, i32*, i32, i32 }
- %struct.GCObject = type { %struct.lua_State }
- %struct.L_Umaxalign = type { double }
- %struct.Mbuffer = type { i8*, i32, i32 }
- %struct.Node = type { %struct.TValue, %struct.TKey }
- %struct.TKey = type { %1 }
- %struct.TString = type { %struct.anon }
- %struct.TValue = type { %struct.L_Umaxalign, i32 }
- %struct.Table = type { %struct.GCObject*, i8, i8, i8, i8, %struct.Table*, %struct.TValue*, %struct.Node*, %struct.Node*, %struct.GCObject*, i32 }
- %struct.UpVal = type { %struct.GCObject*, i8, i8, %struct.TValue*, %0 }
- %struct.anon = type { %struct.GCObject*, i8, i8, i8, i32, i32 }
- %struct.global_State = type { %struct.stringtable, i8* (i8*, i8*, i32, i32)*, i8*, i8, i8, i32, %struct.GCObject*, %struct.GCObject**, %struct.GCObject*, %struct.GCObject*, %struct.GCObject*, %struct.GCObject*, %struct.Mbuffer, i32, i32, i32, i32, i32, i32, i32 (%struct.lua_State*)*, %struct.TValue, %struct.lua_State*, %struct.UpVal, [9 x %struct.Table*], [17 x %struct.TString*] }
- %struct.lua_Debug = type { i32, i8*, i8*, i8*, i8*, i32, i32, i32, i32, [60 x i8], i32 }
- %struct.lua_State = type { %struct.GCObject*, i8, i8, i8, %struct.TValue*, %struct.TValue*, %struct.global_State*, %struct.CallInfo*, i32*, %struct.TValue*, %struct.TValue*, %struct.CallInfo*, %struct.CallInfo*, i32, i32, i16, i16, i8, i8, i32, i32, void (%struct.lua_State*, %struct.lua_Debug*)*, %struct.TValue, %struct.TValue, %struct.GCObject*, %struct.GCObject*, %struct.lua_longjmp*, i32 }
- %struct.lua_longjmp = type { %struct.lua_longjmp*, [18 x i32], i32 }
- %struct.stringtable = type { %struct.GCObject**, i32, i32 }
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (%struct.lua_State*)* @os_clock to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define i32 @os_clock(%struct.lua_State* nocapture %L) nounwind ssp {
-entry:
- %0 = tail call i32 @"\01_clock$UNIX2003"() nounwind ; <i32> [#uses=1]
- %1 = uitofp i32 %0 to double ; <double> [#uses=1]
- %2 = fdiv double %1, 1.000000e+06 ; <double> [#uses=1]
- %3 = getelementptr %struct.lua_State* %L, i32 0, i32 4 ; <%struct.TValue**> [#uses=3]
- %4 = load %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=2]
- %5 = getelementptr %struct.TValue* %4, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- store double %2, double* %5, align 4
- %6 = getelementptr %struct.TValue* %4, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 3, i32* %6, align 4
- %7 = load %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=1]
- %8 = getelementptr %struct.TValue* %7, i32 1 ; <%struct.TValue*> [#uses=1]
- store %struct.TValue* %8, %struct.TValue** %3, align 4
- ret i32 1
-}
-
-declare i32 @"\01_clock$UNIX2003"()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-remat.ll b/libclamav/c++/llvm/test/CodeGen/X86/coalescer-remat.ll
deleted file mode 100644
index 4db520f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/coalescer-remat.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep xor | count 3
-
- at val = internal global i64 0 ; <i64*> [#uses=1]
-@"\01LC" = internal constant [7 x i8] c"0x%lx\0A\00" ; <[7 x i8]*> [#uses=1]
-
-define i32 @main() nounwind {
-entry:
- %0 = tail call i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* @val, i64 0, i64 1) ; <i64> [#uses=1]
- %1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([7 x i8]* @"\01LC", i32 0, i64 0), i64 %0) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind
-
-declare i32 @printf(i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/code_placement.ll b/libclamav/c++/llvm/test/CodeGen/X86/code_placement.ll
deleted file mode 100644
index 9747183..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/code_placement.ll
+++ /dev/null
@@ -1,136 +0,0 @@
-; RUN: llc -march=x86 < %s | FileCheck %s
-
- at Te0 = external global [256 x i32] ; <[256 x i32]*> [#uses=5]
- at Te1 = external global [256 x i32] ; <[256 x i32]*> [#uses=4]
- at Te3 = external global [256 x i32] ; <[256 x i32]*> [#uses=2]
-
-define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind ssp {
-entry:
- %0 = load i32* %rk, align 4 ; <i32> [#uses=1]
- %1 = getelementptr i32* %rk, i64 1 ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=1]
- %tmp15 = add i32 %r, -1 ; <i32> [#uses=1]
- %tmp.16 = zext i32 %tmp15 to i64 ; <i64> [#uses=2]
- br label %bb
-; CHECK: jmp
-; CHECK-NEXT: align
-
-bb: ; preds = %bb1, %entry
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %bb1 ] ; <i64> [#uses=3]
- %s1.0 = phi i32 [ %2, %entry ], [ %56, %bb1 ] ; <i32> [#uses=2]
- %s0.0 = phi i32 [ %0, %entry ], [ %43, %bb1 ] ; <i32> [#uses=2]
- %tmp18 = shl i64 %indvar, 4 ; <i64> [#uses=4]
- %rk26 = bitcast i32* %rk to i8* ; <i8*> [#uses=6]
- %3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1]
- %4 = zext i32 %3 to i64 ; <i64> [#uses=1]
- %5 = getelementptr [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=1]
- %7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1]
- %8 = and i32 %7, 255 ; <i32> [#uses=1]
- %9 = zext i32 %8 to i64 ; <i64> [#uses=1]
- %10 = getelementptr [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
- %ctg2.sum2728 = or i64 %tmp18, 8 ; <i64> [#uses=1]
- %12 = getelementptr i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
- %13 = bitcast i8* %12 to i32* ; <i32*> [#uses=1]
- %14 = load i32* %13, align 4 ; <i32> [#uses=1]
- %15 = xor i32 %11, %6 ; <i32> [#uses=1]
- %16 = xor i32 %15, %14 ; <i32> [#uses=3]
- %17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1]
- %18 = zext i32 %17 to i64 ; <i64> [#uses=1]
- %19 = getelementptr [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
- %20 = load i32* %19, align 4 ; <i32> [#uses=1]
- %21 = and i32 %s0.0, 255 ; <i32> [#uses=1]
- %22 = zext i32 %21 to i64 ; <i64> [#uses=1]
- %23 = getelementptr [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
- %24 = load i32* %23, align 4 ; <i32> [#uses=1]
- %ctg2.sum2930 = or i64 %tmp18, 12 ; <i64> [#uses=1]
- %25 = getelementptr i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
- %26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
- %27 = load i32* %26, align 4 ; <i32> [#uses=1]
- %28 = xor i32 %24, %20 ; <i32> [#uses=1]
- %29 = xor i32 %28, %27 ; <i32> [#uses=4]
- %30 = lshr i32 %16, 24 ; <i32> [#uses=1]
- %31 = zext i32 %30 to i64 ; <i64> [#uses=1]
- %32 = getelementptr [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
- %33 = load i32* %32, align 4 ; <i32> [#uses=2]
- %exitcond = icmp eq i64 %indvar, %tmp.16 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb2, label %bb1
-
-bb1: ; preds = %bb
- %ctg2.sum31 = add i64 %tmp18, 16 ; <i64> [#uses=1]
- %34 = getelementptr i8* %rk26, i64 %ctg2.sum31 ; <i8*> [#uses=1]
- %35 = bitcast i8* %34 to i32* ; <i32*> [#uses=1]
- %36 = lshr i32 %29, 16 ; <i32> [#uses=1]
- %37 = and i32 %36, 255 ; <i32> [#uses=1]
- %38 = zext i32 %37 to i64 ; <i64> [#uses=1]
- %39 = getelementptr [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
- %40 = load i32* %39, align 4 ; <i32> [#uses=1]
- %41 = load i32* %35, align 4 ; <i32> [#uses=1]
- %42 = xor i32 %40, %33 ; <i32> [#uses=1]
- %43 = xor i32 %42, %41 ; <i32> [#uses=1]
- %44 = lshr i32 %29, 24 ; <i32> [#uses=1]
- %45 = zext i32 %44 to i64 ; <i64> [#uses=1]
- %46 = getelementptr [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
- %47 = load i32* %46, align 4 ; <i32> [#uses=1]
- %48 = and i32 %16, 255 ; <i32> [#uses=1]
- %49 = zext i32 %48 to i64 ; <i64> [#uses=1]
- %50 = getelementptr [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
- %51 = load i32* %50, align 4 ; <i32> [#uses=1]
- %ctg2.sum32 = add i64 %tmp18, 20 ; <i64> [#uses=1]
- %52 = getelementptr i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
- %53 = bitcast i8* %52 to i32* ; <i32*> [#uses=1]
- %54 = load i32* %53, align 4 ; <i32> [#uses=1]
- %55 = xor i32 %51, %47 ; <i32> [#uses=1]
- %56 = xor i32 %55, %54 ; <i32> [#uses=1]
- %indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
- br label %bb
-
-bb2: ; preds = %bb
- %tmp10 = shl i64 %tmp.16, 4 ; <i64> [#uses=2]
- %ctg2.sum = add i64 %tmp10, 16 ; <i64> [#uses=1]
- %tmp1213 = getelementptr i8* %rk26, i64 %ctg2.sum ; <i8*> [#uses=1]
- %57 = bitcast i8* %tmp1213 to i32* ; <i32*> [#uses=1]
- %58 = and i32 %33, -16777216 ; <i32> [#uses=1]
- %59 = lshr i32 %29, 16 ; <i32> [#uses=1]
- %60 = and i32 %59, 255 ; <i32> [#uses=1]
- %61 = zext i32 %60 to i64 ; <i64> [#uses=1]
- %62 = getelementptr [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
- %63 = load i32* %62, align 4 ; <i32> [#uses=1]
- %64 = and i32 %63, 16711680 ; <i32> [#uses=1]
- %65 = or i32 %64, %58 ; <i32> [#uses=1]
- %66 = load i32* %57, align 4 ; <i32> [#uses=1]
- %67 = xor i32 %65, %66 ; <i32> [#uses=2]
- %68 = lshr i32 %29, 8 ; <i32> [#uses=1]
- %69 = zext i32 %68 to i64 ; <i64> [#uses=1]
- %70 = getelementptr [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
- %71 = load i32* %70, align 4 ; <i32> [#uses=1]
- %72 = and i32 %71, -16777216 ; <i32> [#uses=1]
- %73 = and i32 %16, 255 ; <i32> [#uses=1]
- %74 = zext i32 %73 to i64 ; <i64> [#uses=1]
- %75 = getelementptr [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
- %76 = load i32* %75, align 4 ; <i32> [#uses=1]
- %77 = and i32 %76, 16711680 ; <i32> [#uses=1]
- %78 = or i32 %77, %72 ; <i32> [#uses=1]
- %ctg2.sum25 = add i64 %tmp10, 20 ; <i64> [#uses=1]
- %79 = getelementptr i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
- %80 = bitcast i8* %79 to i32* ; <i32*> [#uses=1]
- %81 = load i32* %80, align 4 ; <i32> [#uses=1]
- %82 = xor i32 %78, %81 ; <i32> [#uses=2]
- %83 = lshr i32 %67, 24 ; <i32> [#uses=1]
- %84 = trunc i32 %83 to i8 ; <i8> [#uses=1]
- store i8 %84, i8* %out, align 1
- %85 = lshr i32 %67, 16 ; <i32> [#uses=1]
- %86 = trunc i32 %85 to i8 ; <i8> [#uses=1]
- %87 = getelementptr i8* %out, i64 1 ; <i8*> [#uses=1]
- store i8 %86, i8* %87, align 1
- %88 = getelementptr i8* %out, i64 4 ; <i8*> [#uses=1]
- %89 = lshr i32 %82, 24 ; <i32> [#uses=1]
- %90 = trunc i32 %89 to i8 ; <i8> [#uses=1]
- store i8 %90, i8* %88, align 1
- %91 = lshr i32 %82, 16 ; <i32> [#uses=1]
- %92 = trunc i32 %91 to i8 ; <i8> [#uses=1]
- %93 = getelementptr i8* %out, i64 5 ; <i8*> [#uses=1]
- store i8 %92, i8* %93, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/code_placement_eh.ll b/libclamav/c++/llvm/test/CodeGen/X86/code_placement_eh.ll
deleted file mode 100644
index 172d591..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/code_placement_eh.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; RUN: llc < %s
-
-; CodePlacementOpt shouldn't try to modify this loop because
-; it involves EH edges.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
-target triple = "i386-apple-darwin10.0"
-
-define void @foo() {
-invcont5:
- br label %bb15
-
-.noexc3: ; preds = %bb15
- br i1 undef, label %bb18.i5.i, label %bb15
-
-.noexc6.i.i: ; preds = %bb18.i5.i
- %tmp2021 = invoke float @cosf(float 0.000000e+00) readonly
- to label %bb18.i5.i unwind label %lpad.i.i ; <float> [#uses=0]
-
-bb18.i5.i: ; preds = %.noexc6.i.i, %bb51.i
- %tmp2019 = invoke float @sinf(float 0.000000e+00) readonly
- to label %.noexc6.i.i unwind label %lpad.i.i ; <float> [#uses=0]
-
-lpad.i.i: ; preds = %bb18.i5.i, %.noexc6.i.i
- %eh_ptr.i.i = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
- unreachable
-
-lpad59.i: ; preds = %bb15
- %eh_ptr60.i = call i8* @llvm.eh.exception() ; <i8*> [#uses=1]
- unreachable
-
-bb15: ; preds = %.noexc3, %invcont5
- invoke fastcc void @_ZN28btHashedOverlappingPairCacheC2Ev()
- to label %.noexc3 unwind label %lpad59.i
-}
-
-declare i8* @llvm.eh.exception() nounwind readonly
-
-declare i32 @llvm.eh.selector(i8*, i8*, ...) nounwind
-
-declare float @sinf(float) readonly
-
-declare float @cosf(float) readonly
-
-declare fastcc void @_ZN28btHashedOverlappingPairCacheC2Ev() align 2
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/codegen-dce.ll b/libclamav/c++/llvm/test/CodeGen/X86/codegen-dce.ll
deleted file mode 100644
index d83efaf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/codegen-dce.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; RUN: llc < %s -march=x86 -stats |& grep {codegen-dce} | grep {Number of dead instructions deleted}
-
- %struct.anon = type { [3 x double], double, %struct.node*, [64 x %struct.bnode*], [64 x %struct.bnode*] }
- %struct.bnode = type { i16, double, [3 x double], i32, i32, [3 x double], [3 x double], [3 x double], double, %struct.bnode*, %struct.bnode* }
- %struct.node = type { i16, double, [3 x double], i32, i32 }
-
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
-entry:
- %0 = malloc %struct.anon ; <%struct.anon*> [#uses=2]
- %1 = getelementptr %struct.anon* %0, i32 0, i32 2 ; <%struct.node**> [#uses=1]
- br label %bb14.i
-
-bb14.i: ; preds = %bb14.i, %entry
- %i8.0.reg2mem.0.i = phi i32 [ 0, %entry ], [ %2, %bb14.i ] ; <i32> [#uses=1]
- %2 = add i32 %i8.0.reg2mem.0.i, 1 ; <i32> [#uses=2]
- %exitcond74.i = icmp eq i32 %2, 32 ; <i1> [#uses=1]
- br i1 %exitcond74.i, label %bb32.i, label %bb14.i
-
-bb32.i: ; preds = %bb32.i, %bb14.i
- %tmp.0.reg2mem.0.i = phi i32 [ %indvar.next63.i, %bb32.i ], [ 0, %bb14.i ] ; <i32> [#uses=1]
- %indvar.next63.i = add i32 %tmp.0.reg2mem.0.i, 1 ; <i32> [#uses=2]
- %exitcond64.i = icmp eq i32 %indvar.next63.i, 64 ; <i1> [#uses=1]
- br i1 %exitcond64.i, label %bb47.loopexit.i, label %bb32.i
-
-bb.i.i: ; preds = %bb47.loopexit.i
- unreachable
-
-stepsystem.exit.i: ; preds = %bb47.loopexit.i
- store %struct.node* null, %struct.node** %1, align 4
- br label %bb.i6.i
-
-bb.i6.i: ; preds = %bb.i6.i, %stepsystem.exit.i
- br i1 false, label %bb107.i.i, label %bb.i6.i
-
-bb107.i.i: ; preds = %bb107.i.i, %bb.i6.i
- %q_addr.0.i.i.in = phi %struct.bnode** [ null, %bb107.i.i ], [ %3, %bb.i6.i ] ; <%struct.bnode**> [#uses=0]
- br label %bb107.i.i
-
-bb47.loopexit.i: ; preds = %bb32.i
- %3 = getelementptr %struct.anon* %0, i32 0, i32 4, i32 0 ; <%struct.bnode**> [#uses=1]
- %4 = icmp eq %struct.node* null, null ; <i1> [#uses=1]
- br i1 %4, label %stepsystem.exit.i, label %bb.i.i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/codegen-prepare-cast.ll b/libclamav/c++/llvm/test/CodeGen/X86/codegen-prepare-cast.ll
deleted file mode 100644
index 2a8ead8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/codegen-prepare-cast.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; PR4297
-
-target datalayout =
-"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- %"byte[]" = type { i64, i8* }
- %"char[][]" = type { i64, %"byte[]"* }
- at .str = external constant [7 x i8] ; <[7 x i8]*> [#uses=1]
-
-define fastcc i32 @_Dmain(%"char[][]" %unnamed) {
-entry:
- %tmp = getelementptr [7 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
- br i1 undef, label %foreachbody, label %foreachend
-
-foreachbody: ; preds = %entry
- %tmp4 = getelementptr i8* %tmp, i32 undef ; <i8*> [#uses=1]
- %tmp5 = load i8* %tmp4 ; <i8> [#uses=0]
- unreachable
-
-foreachend: ; preds = %entry
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/codegen-prepare-extload.ll b/libclamav/c++/llvm/test/CodeGen/X86/codegen-prepare-extload.ll
deleted file mode 100644
index 9f57d53..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/codegen-prepare-extload.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-; rdar://7304838
-
-; CodeGenPrepare should move the zext into the block with the load
-; so that SelectionDAG can select it with the load.
-
-; CHECK: movzbl (%rdi), %eax
-
-define void @foo(i8* %p, i32* %q) {
-entry:
- %t = load i8* %p
- %a = icmp slt i8 %t, 20
- br i1 %a, label %true, label %false
-true:
- %s = zext i8 %t to i32
- store i32 %s, i32* %q
- ret void
-false:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/codemodel.ll b/libclamav/c++/llvm/test/CodeGen/X86/codemodel.ll
deleted file mode 100644
index b6ca1ce..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/codemodel.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -code-model=small | FileCheck -check-prefix CHECK-SMALL %s
-; RUN: llc < %s -code-model=kernel | FileCheck -check-prefix CHECK-KERNEL %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- at data = external global [0 x i32] ; <[0 x i32]*> [#uses=5]
-
-define i32 @foo() nounwind readonly {
-entry:
-; CHECK-SMALL: foo:
-; CHECK-SMALL: movl data(%rip), %eax
-; CHECK-KERNEL: foo:
-; CHECK-KERNEL: movl data, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i64 0, i64 0), align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
-
-define i32 @foo2() nounwind readonly {
-entry:
-; CHECK-SMALL: foo2:
-; CHECK-SMALL: movl data+40(%rip), %eax
-; CHECK-KERNEL: foo2:
-; CHECK-KERNEL: movl data+40, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 10), align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
-
-define i32 @foo3() nounwind readonly {
-entry:
-; CHECK-SMALL: foo3:
-; CHECK-SMALL: movl data-40(%rip), %eax
-; CHECK-KERNEL: foo3:
-; CHECK-KERNEL: movq $-40, %rax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 -10), align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
-
-define i32 @foo4() nounwind readonly {
-entry:
-; FIXME: We really can use movabsl here!
-; CHECK-SMALL: foo4:
-; CHECK-SMALL: movl $16777216, %eax
-; CHECK-SMALL: movl data(%rax), %eax
-; CHECK-KERNEL: foo4:
-; CHECK-KERNEL: movl data+16777216, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194304), align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
-
-define i32 @foo1() nounwind readonly {
-entry:
-; CHECK-SMALL: foo1:
-; CHECK-SMALL: movl data+16777212(%rip), %eax
-; CHECK-KERNEL: foo1:
-; CHECK-KERNEL: movl data+16777212, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194303), align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
-define i32 @foo5() nounwind readonly {
-entry:
-; CHECK-SMALL: foo5:
-; CHECK-SMALL: movl data-16777216(%rip), %eax
-; CHECK-KERNEL: foo5:
-; CHECK-KERNEL: movq $-16777216, %rax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 -4194304), align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/combine-lds.ll b/libclamav/c++/llvm/test/CodeGen/X86/combine-lds.ll
deleted file mode 100644
index b49d081..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/combine-lds.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep fldl | count 1
-
-define double @doload64(i64 %x) nounwind {
- %tmp717 = bitcast i64 %x to double
- ret double %tmp717
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/combiner-aa-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/combiner-aa-0.ll
deleted file mode 100644
index a61ef7a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/combiner-aa-0.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86-64 -combiner-global-alias-analysis -combiner-alias-analysis
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
- %struct.Hash_Key = type { [4 x i32], i32 }
- at g_flipV_hashkey = external global %struct.Hash_Key, align 16 ; <%struct.Hash_Key*> [#uses=1]
-
-define void @foo() nounwind {
- %t0 = load i32* undef, align 16 ; <i32> [#uses=1]
- %t1 = load i32* null, align 4 ; <i32> [#uses=1]
- %t2 = srem i32 %t0, 32 ; <i32> [#uses=1]
- %t3 = shl i32 1, %t2 ; <i32> [#uses=1]
- %t4 = xor i32 %t3, %t1 ; <i32> [#uses=1]
- store i32 %t4, i32* null, align 4
- %t5 = getelementptr %struct.Hash_Key* @g_flipV_hashkey, i64 0, i32 0, i64 0 ; <i32*> [#uses=2]
- %t6 = load i32* %t5, align 4 ; <i32> [#uses=1]
- %t7 = shl i32 1, undef ; <i32> [#uses=1]
- %t8 = xor i32 %t7, %t6 ; <i32> [#uses=1]
- store i32 %t8, i32* %t5, align 4
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/combiner-aa-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/combiner-aa-1.ll
deleted file mode 100644
index 58a7129..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/combiner-aa-1.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s --combiner-alias-analysis --combiner-global-alias-analysis
-; PR4880
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
-
-%struct.alst_node = type { %struct.node }
-%struct.arg_node = type { %struct.node, i8*, %struct.alst_node* }
-%struct.arglst_node = type { %struct.alst_node, %struct.arg_node*, %struct.arglst_node* }
-%struct.lam_node = type { %struct.alst_node, %struct.arg_node*, %struct.alst_node* }
-%struct.node = type { i32 (...)**, %struct.node* }
-
-define i32 @._ZN8lam_node18resolve_name_clashEP8arg_nodeP9alst_node._ZNK8lam_nodeeqERK8exp_node._ZN11arglst_nodeD0Ev(%struct.lam_node* %this.this, %struct.arg_node* %outer_arg, %struct.alst_node* %env.cmp, %struct.arglst_node* %this, i32 %functionID) {
-comb_entry:
- %.SV59 = alloca %struct.node* ; <%struct.node**> [#uses=1]
- %0 = load i32 (...)*** null, align 4 ; <i32 (...)**> [#uses=1]
- %1 = getelementptr inbounds i32 (...)** %0, i32 3 ; <i32 (...)**> [#uses=1]
- %2 = load i32 (...)** %1, align 4 ; <i32 (...)*> [#uses=1]
- store %struct.node* undef, %struct.node** %.SV59
- %3 = bitcast i32 (...)* %2 to i32 (%struct.node*)* ; <i32 (%struct.node*)*> [#uses=1]
- %4 = tail call i32 %3(%struct.node* undef) ; <i32> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/commute-intrinsic.ll b/libclamav/c++/llvm/test/CodeGen/X86/commute-intrinsic.ll
deleted file mode 100644
index d810cb1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/commute-intrinsic.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 -relocation-model=static | not grep movaps
-
- at a = external global <2 x i64> ; <<2 x i64>*> [#uses=1]
-
-define <2 x i64> @madd(<2 x i64> %b) nounwind {
-entry:
- %tmp2 = load <2 x i64>* @a, align 16 ; <<2 x i64>> [#uses=1]
- %tmp6 = bitcast <2 x i64> %b to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp9 = bitcast <2 x i64> %tmp2 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp11 = tail call <4 x i32> @llvm.x86.sse2.pmadd.wd( <8 x i16> %tmp9, <8 x i16> %tmp6 ) nounwind readnone ; <<4 x i32>> [#uses=1]
- %tmp14 = bitcast <4 x i32> %tmp11 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp14
-}
-
-declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/commute-two-addr.ll b/libclamav/c++/llvm/test/CodeGen/X86/commute-two-addr.ll
deleted file mode 100644
index 56ea26b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/commute-two-addr.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; The register allocator can commute two-address instructions to avoid
-; insertion of register-register copies.
-
-; Make sure there are only 3 mov's for each testcase
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep {\\\<mov\\\>} | count 6
-
-
-target triple = "i686-pc-linux-gnu"
- at G = external global i32 ; <i32*> [#uses=2]
-
-declare void @ext(i32)
-
-define i32 @add_test(i32 %X, i32 %Y) {
- %Z = add i32 %X, %Y ; <i32> [#uses=1]
- store i32 %Z, i32* @G
- ret i32 %X
-}
-
-define i32 @xor_test(i32 %X, i32 %Y) {
- %Z = xor i32 %X, %Y ; <i32> [#uses=1]
- store i32 %Z, i32* @G
- ret i32 %X
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/compare-add.ll b/libclamav/c++/llvm/test/CodeGen/X86/compare-add.ll
deleted file mode 100644
index 358ee59..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/compare-add.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep add
-
-define i1 @X(i32 %X) {
- %Y = add i32 %X, 14 ; <i32> [#uses=1]
- %Z = icmp ne i32 %Y, 12345 ; <i1> [#uses=1]
- ret i1 %Z
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/compare-inf.ll b/libclamav/c++/llvm/test/CodeGen/X86/compare-inf.ll
deleted file mode 100644
index 2be90c9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/compare-inf.ll
+++ /dev/null
@@ -1,76 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; Convert oeq and une to ole/oge/ule/uge when comparing with infinity
-; and negative infinity, because those are more efficient on x86.
-
-; CHECK: oeq_inff:
-; CHECK: ucomiss
-; CHECK: jae
-define float @oeq_inff(float %x, float %y) nounwind readonly {
- %t0 = fcmp oeq float %x, 0x7FF0000000000000
- %t1 = select i1 %t0, float 1.0, float %y
- ret float %t1
-}
-
-; CHECK: oeq_inf:
-; CHECK: ucomisd
-; CHECK: jae
-define double @oeq_inf(double %x, double %y) nounwind readonly {
- %t0 = fcmp oeq double %x, 0x7FF0000000000000
- %t1 = select i1 %t0, double 1.0, double %y
- ret double %t1
-}
-
-; CHECK: une_inff:
-; CHECK: ucomiss
-; CHECK: jb
-define float @une_inff(float %x, float %y) nounwind readonly {
- %t0 = fcmp une float %x, 0x7FF0000000000000
- %t1 = select i1 %t0, float 1.0, float %y
- ret float %t1
-}
-
-; CHECK: une_inf:
-; CHECK: ucomisd
-; CHECK: jb
-define double @une_inf(double %x, double %y) nounwind readonly {
- %t0 = fcmp une double %x, 0x7FF0000000000000
- %t1 = select i1 %t0, double 1.0, double %y
- ret double %t1
-}
-
-; CHECK: oeq_neg_inff:
-; CHECK: ucomiss
-; CHECK: jae
-define float @oeq_neg_inff(float %x, float %y) nounwind readonly {
- %t0 = fcmp oeq float %x, 0xFFF0000000000000
- %t1 = select i1 %t0, float 1.0, float %y
- ret float %t1
-}
-
-; CHECK: oeq_neg_inf:
-; CHECK: ucomisd
-; CHECK: jae
-define double @oeq_neg_inf(double %x, double %y) nounwind readonly {
- %t0 = fcmp oeq double %x, 0xFFF0000000000000
- %t1 = select i1 %t0, double 1.0, double %y
- ret double %t1
-}
-
-; CHECK: une_neg_inff:
-; CHECK: ucomiss
-; CHECK: jb
-define float @une_neg_inff(float %x, float %y) nounwind readonly {
- %t0 = fcmp une float %x, 0xFFF0000000000000
- %t1 = select i1 %t0, float 1.0, float %y
- ret float %t1
-}
-
-; CHECK: une_neg_inf:
-; CHECK: ucomisd
-; CHECK: jb
-define double @une_neg_inf(double %x, double %y) nounwind readonly {
- %t0 = fcmp une double %x, 0xFFF0000000000000
- %t1 = select i1 %t0, double 1.0, double %y
- ret double %t1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/compare_folding.ll b/libclamav/c++/llvm/test/CodeGen/X86/compare_folding.ll
deleted file mode 100644
index 84c152d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/compare_folding.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | \
-; RUN: grep movsd | count 1
-; RUN: llc < %s -march=x86 -mcpu=yonah | \
-; RUN: grep ucomisd
-declare i1 @llvm.isunordered.f64(double, double)
-
-define i1 @test1(double %X, double %Y) {
- %COM = fcmp uno double %X, %Y ; <i1> [#uses=1]
- ret i1 %COM
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/compiler_used.ll b/libclamav/c++/llvm/test/CodeGen/X86/compiler_used.ll
deleted file mode 100644
index be8de5e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/compiler_used.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 | grep no_dead_strip | count 1
-; We should have a .no_dead_strip directive for Z but not for X/Y.
-
- at X = internal global i8 4
- at Y = internal global i32 123
- at Z = internal global i8 4
-
- at llvm.used = appending global [1 x i8*] [ i8* @Z ], section "llvm.metadata"
- at llvm.compiler_used = appending global [2 x i8*] [ i8* @X, i8* bitcast (i32* @Y to i8*)], section "llvm.metadata"
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/complex-fca.ll b/libclamav/c++/llvm/test/CodeGen/X86/complex-fca.ll
deleted file mode 100644
index 7e7acaa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/complex-fca.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 2
-
-define void @ccosl({ x86_fp80, x86_fp80 }* noalias sret %agg.result, { x86_fp80, x86_fp80 } %z) nounwind {
-entry:
- %z8 = extractvalue { x86_fp80, x86_fp80 } %z, 0
- %z9 = extractvalue { x86_fp80, x86_fp80 } %z, 1
- %0 = fsub x86_fp80 0xK80000000000000000000, %z9
- %insert = insertvalue { x86_fp80, x86_fp80 } undef, x86_fp80 %0, 0
- %insert7 = insertvalue { x86_fp80, x86_fp80 } %insert, x86_fp80 %z8, 1
- call void @ccoshl({ x86_fp80, x86_fp80 }* noalias sret %agg.result, { x86_fp80, x86_fp80 } %insert7) nounwind
- ret void
-}
-
-declare void @ccoshl({ x86_fp80, x86_fp80 }* noalias sret, { x86_fp80, x86_fp80 }) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/const-select.ll b/libclamav/c++/llvm/test/CodeGen/X86/const-select.ll
deleted file mode 100644
index ca8cc14..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/const-select.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin7"
-
-; RUN: llc < %s | grep {LCPI1_0(,%eax,4)}
-define float @f(i32 %x) nounwind readnone {
-entry:
- %0 = icmp eq i32 %x, 0 ; <i1> [#uses=1]
- %iftmp.0.0 = select i1 %0, float 4.200000e+01, float 2.300000e+01 ; <float> [#uses=1]
- ret float %iftmp.0.0
-}
-
-; RUN: llc < %s | grep {movsbl.*(%e.x,%e.x,4), %eax}
-define signext i8 @test(i8* nocapture %P, double %F) nounwind readonly {
-entry:
- %0 = fcmp olt double %F, 4.200000e+01 ; <i1> [#uses=1]
- %iftmp.0.0 = select i1 %0, i32 4, i32 0 ; <i32> [#uses=1]
- %1 = getelementptr i8* %P, i32 %iftmp.0.0 ; <i8*> [#uses=1]
- %2 = load i8* %1, align 1 ; <i8> [#uses=1]
- ret i8 %2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/constant-pool-remat-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/constant-pool-remat-0.ll
deleted file mode 100644
index 05388f9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/constant-pool-remat-0.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep LCPI | count 3
-; RUN: llc < %s -march=x86-64 -stats -info-output-file - | grep asm-printer | grep 6
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep LCPI | count 3
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats -info-output-file - | grep asm-printer | grep 12
-
-declare float @qux(float %y)
-
-define float @array(float %a) nounwind {
- %n = fmul float %a, 9.0
- %m = call float @qux(float %n)
- %o = fmul float %m, 9.0
- ret float %o
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/constant-pool-sharing.ll b/libclamav/c++/llvm/test/CodeGen/X86/constant-pool-sharing.ll
deleted file mode 100644
index c3e97ad..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/constant-pool-sharing.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; llc should share constant pool entries between this integer vector
-; and this floating-point vector since they have the same encoding.
-
-; CHECK: LCPI1_0(%rip), %xmm0
-; CHECK: movaps %xmm0, (%rdi)
-; CHECK: movaps %xmm0, (%rsi)
-
-define void @foo(<4 x i32>* %p, <4 x float>* %q, i1 %t) nounwind {
-entry:
- br label %loop
-loop:
- store <4 x i32><i32 1073741824, i32 1073741824, i32 1073741824, i32 1073741824>, <4 x i32>* %p
- store <4 x float><float 2.0, float 2.0, float 2.0, float 2.0>, <4 x float>* %q
- br i1 %t, label %loop, label %ret
-ret:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/constpool.ll b/libclamav/c++/llvm/test/CodeGen/X86/constpool.ll
deleted file mode 100644
index 2aac486..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/constpool.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s
-; RUN: llc < %s -fast-isel
-; RUN: llc < %s -march=x86-64
-; RUN: llc < %s -fast-isel -march=x86-64
-; PR4466
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.7"
-
-define i32 @main() nounwind {
-entry:
- %0 = fcmp oeq float undef, 0x7FF0000000000000 ; <i1> [#uses=1]
- %1 = zext i1 %0 to i32 ; <i32> [#uses=1]
- store i32 %1, i32* undef, align 4
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll b/libclamav/c++/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
deleted file mode 100644
index 8e38fe3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86-64 -o %t -stats -info-output-file - | \
-; RUN: grep {asm-printer} | grep {Number of machine instrs printed} | grep 10
-; RUN: grep {leal 1(\%rsi),} %t
-
-define fastcc zeroext i8 @fullGtU(i32 %i1, i32 %i2, i8* %ptr) nounwind optsize {
-entry:
- %0 = add i32 %i2, 1 ; <i32> [#uses=1]
- %1 = sext i32 %0 to i64 ; <i64> [#uses=1]
- %2 = getelementptr i8* %ptr, i64 %1 ; <i8*> [#uses=1]
- %3 = load i8* %2, align 1 ; <i8> [#uses=1]
- %4 = icmp eq i8 0, %3 ; <i1> [#uses=1]
- br i1 %4, label %bb3, label %bb34
-
-bb3: ; preds = %entry
- %5 = add i32 %i2, 4 ; <i32> [#uses=0]
- %6 = trunc i32 %5 to i8
- ret i8 %6
-
-bb34: ; preds = %entry
- ret i8 0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/copysign-zero.ll b/libclamav/c++/llvm/test/CodeGen/X86/copysign-zero.ll
deleted file mode 100644
index 47522d8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/copysign-zero.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s | not grep orpd
-; RUN: llc < %s | grep andpd | count 1
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
-
-define double @test(double %X) nounwind {
-entry:
- %tmp2 = tail call double @copysign( double 0.000000e+00, double %X ) nounwind readnone ; <double> [#uses=1]
- ret double %tmp2
-}
-
-declare double @copysign(double, double) nounwind readnone
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/crash.ll b/libclamav/c++/llvm/test/CodeGen/X86/crash.ll
deleted file mode 100644
index b9037f3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/crash.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc -march=x86 %s -o -
-; RUN: llc -march=x86-64 %s -o -
-
-; PR6497
-
-; Chain and flag folding issues.
-define i32 @test1() nounwind ssp {
-entry:
- %tmp5.i = volatile load i32* undef ; <i32> [#uses=1]
- %conv.i = zext i32 %tmp5.i to i64 ; <i64> [#uses=1]
- %tmp12.i = volatile load i32* undef ; <i32> [#uses=1]
- %conv13.i = zext i32 %tmp12.i to i64 ; <i64> [#uses=1]
- %shl.i = shl i64 %conv13.i, 32 ; <i64> [#uses=1]
- %or.i = or i64 %shl.i, %conv.i ; <i64> [#uses=1]
- %add16.i = add i64 %or.i, 256 ; <i64> [#uses=1]
- %shr.i = lshr i64 %add16.i, 8 ; <i64> [#uses=1]
- %conv19.i = trunc i64 %shr.i to i32 ; <i32> [#uses=1]
- volatile store i32 %conv19.i, i32* undef
- ret i32 undef
-}
-
-; PR6533
-define void @test2(i1 %x, i32 %y) nounwind {
- %land.ext = zext i1 %x to i32 ; <i32> [#uses=1]
- %and = and i32 %y, 1 ; <i32> [#uses=1]
- %xor = xor i32 %and, %land.ext ; <i32> [#uses=1]
- %cmp = icmp eq i32 %xor, 1 ; <i1> [#uses=1]
- br i1 %cmp, label %if.end, label %if.then
-
-if.then: ; preds = %land.end
- ret void
-
-if.end: ; preds = %land.end
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/critical-edge-split.ll b/libclamav/c++/llvm/test/CodeGen/X86/critical-edge-split.ll
deleted file mode 100644
index f29cbf3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/critical-edge-split.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -stats -info-output-file - | grep asm-printer | grep 29
-
- %CC = type { %Register }
- %II = type { %"struct.XX::II::$_74" }
- %JITFunction = type %YYValue* (%CC*, %YYValue**)
- %YYValue = type { i32 (...)** }
- %Register = type { %"struct.XX::ByteCodeFeatures" }
- %"struct.XX::ByteCodeFeatures" = type { i32 }
- %"struct.XX::II::$_74" = type { i8* }
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (%JITFunction* @loop to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define %YYValue* @loop(%CC*, %YYValue**) nounwind {
-; <label>:2
- %3 = getelementptr %CC* %0, i32 -9 ; <%CC*> [#uses=1]
- %4 = bitcast %CC* %3 to %YYValue** ; <%YYValue**> [#uses=2]
- %5 = load %YYValue** %4 ; <%YYValue*> [#uses=3]
- %unique_1.i = ptrtoint %YYValue* %5 to i1 ; <i1> [#uses=1]
- br i1 %unique_1.i, label %loop, label %11
-
-loop: ; preds = %6, %2
- %.1 = phi %YYValue* [ inttoptr (i32 1 to %YYValue*), %2 ], [ %intAddValue, %6 ] ; <%YYValue*> [#uses=3]
- %immediateCmp = icmp slt %YYValue* %.1, %5 ; <i1> [#uses=1]
- br i1 %immediateCmp, label %6, label %8
-
-; <label>:6 ; preds = %loop
- %lhsInt = ptrtoint %YYValue* %.1 to i32 ; <i32> [#uses=1]
- %7 = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %lhsInt, i32 2) ; <{ i32, i1 }> [#uses=2]
- %intAdd = extractvalue { i32, i1 } %7, 0 ; <i32> [#uses=1]
- %intAddValue = inttoptr i32 %intAdd to %YYValue* ; <%YYValue*> [#uses=1]
- %intAddOverflow = extractvalue { i32, i1 } %7, 1 ; <i1> [#uses=1]
- br i1 %intAddOverflow, label %.loopexit, label %loop
-
-; <label>:8 ; preds = %loop
- ret %YYValue* inttoptr (i32 10 to %YYValue*)
-
-.loopexit: ; preds = %6
- %9 = bitcast %CC* %0 to %YYValue** ; <%YYValue**> [#uses=1]
- store %YYValue* %.1, %YYValue** %9
- store %YYValue* %5, %YYValue** %4
- %10 = call fastcc %YYValue* @foobar(%II* inttoptr (i32 3431104 to %II*), %CC* %0, %YYValue** %1) ; <%YYValue*> [#uses=1]
- ret %YYValue* %10
-
-; <label>:11 ; preds = %2
- %12 = call fastcc %YYValue* @foobar(%II* inttoptr (i32 3431080 to %II*), %CC* %0, %YYValue** %1) ; <%YYValue*> [#uses=1]
- ret %YYValue* %12
-}
-
-declare fastcc %YYValue* @foobar(%II*, %CC*, %YYValue**) nounwind
-
-declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/cstring.ll b/libclamav/c++/llvm/test/CodeGen/X86/cstring.ll
deleted file mode 100644
index 5b5a766..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/cstring.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | not grep comm
-; rdar://6479858
-
- at str1 = internal constant [1 x i8] zeroinitializer
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/dag-rauw-cse.ll b/libclamav/c++/llvm/test/CodeGen/X86/dag-rauw-cse.ll
deleted file mode 100644
index edcfeb7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/dag-rauw-cse.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {orl \$1}
-; PR3018
-
-define i32 @test(i32 %A) nounwind {
- %B = or i32 %A, 1
- %C = or i32 %B, 1
- %D = and i32 %C, 7057
- ret i32 %D
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/dagcombine-buildvector.ll b/libclamav/c++/llvm/test/CodeGen/X86/dagcombine-buildvector.ll
deleted file mode 100644
index c0ee2ac..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/dagcombine-buildvector.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=penryn -disable-mmx -o %t
-; RUN: grep unpcklpd %t | count 1
-; RUN: grep movapd %t | count 1
-; RUN: grep movaps %t | count 1
-
-; Shows a dag combine bug that will generate an illegal build vector
-; with v2i64 build_vector i32, i32.
-
-define void @test(<2 x double>* %dst, <4 x double> %src) nounwind {
-entry:
- %tmp7.i = shufflevector <4 x double> %src, <4 x double> undef, <2 x i32> < i32 0, i32 2 >
- store <2 x double> %tmp7.i, <2 x double>* %dst
- ret void
-}
-
-define void @test2(<4 x i16>* %src, <4 x i32>* %dest) nounwind {
-entry:
- %tmp1 = load <4 x i16>* %src
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
- %0 = tail call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %tmp3)
- store <4 x i32> %0, <4 x i32>* %dest
- ret void
-}
-
-declare <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/dagcombine-cse.ll b/libclamav/c++/llvm/test/CodeGen/X86/dagcombine-cse.ll
deleted file mode 100644
index c3c7990..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/dagcombine-cse.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i386-apple-darwin -stats |& grep asm-printer | grep 14
-
-define i32 @t(i8* %ref_frame_ptr, i32 %ref_frame_stride, i32 %idxX, i32 %idxY) nounwind {
-entry:
- %tmp7 = mul i32 %idxY, %ref_frame_stride ; <i32> [#uses=2]
- %tmp9 = add i32 %tmp7, %idxX ; <i32> [#uses=1]
- %tmp11 = getelementptr i8* %ref_frame_ptr, i32 %tmp9 ; <i8*> [#uses=1]
- %tmp1112 = bitcast i8* %tmp11 to i32* ; <i32*> [#uses=1]
- %tmp13 = load i32* %tmp1112, align 4 ; <i32> [#uses=1]
- %tmp18 = add i32 %idxX, 4 ; <i32> [#uses=1]
- %tmp20.sum = add i32 %tmp18, %tmp7 ; <i32> [#uses=1]
- %tmp21 = getelementptr i8* %ref_frame_ptr, i32 %tmp20.sum ; <i8*> [#uses=1]
- %tmp2122 = bitcast i8* %tmp21 to i16* ; <i16*> [#uses=1]
- %tmp23 = load i16* %tmp2122, align 2 ; <i16> [#uses=1]
- %tmp2425 = zext i16 %tmp23 to i64 ; <i64> [#uses=1]
- %tmp26 = shl i64 %tmp2425, 32 ; <i64> [#uses=1]
- %tmp2728 = zext i32 %tmp13 to i64 ; <i64> [#uses=1]
- %tmp29 = or i64 %tmp26, %tmp2728 ; <i64> [#uses=1]
- %tmp3454 = bitcast i64 %tmp29 to double ; <double> [#uses=1]
- %tmp35 = insertelement <2 x double> undef, double %tmp3454, i32 0 ; <<2 x double>> [#uses=1]
- %tmp36 = insertelement <2 x double> %tmp35, double 0.000000e+00, i32 1 ; <<2 x double>> [#uses=1]
- %tmp42 = bitcast <2 x double> %tmp36 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp43 = shufflevector <8 x i16> %tmp42, <8 x i16> undef, <8 x i32> < i32 0, i32 1, i32 1, i32 2, i32 4, i32 5, i32 6, i32 7 > ; <<8 x i16>> [#uses=1]
- %tmp47 = bitcast <8 x i16> %tmp43 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp48 = extractelement <4 x i32> %tmp47, i32 0 ; <i32> [#uses=1]
- ret i32 %tmp48
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/darwin-bzero.ll b/libclamav/c++/llvm/test/CodeGen/X86/darwin-bzero.ll
deleted file mode 100644
index a9573cf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/darwin-bzero.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10 | grep __bzero
-
-declare void @llvm.memset.i32(i8*, i8, i32, i32)
-
-define void @foo(i8* %p, i32 %len) {
- call void @llvm.memset.i32(i8* %p, i8 0, i32 %len, i32 1)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/darwin-no-dead-strip.ll b/libclamav/c++/llvm/test/CodeGen/X86/darwin-no-dead-strip.ll
deleted file mode 100644
index 452d1f8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/darwin-no-dead-strip.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s | grep no_dead_strip
-
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin8.7.2"
- at x = weak global i32 0 ; <i32*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (i32* @x to i8*) ] ; <[1 x i8*]*> [#uses=0]
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/darwin-quote.ll b/libclamav/c++/llvm/test/CodeGen/X86/darwin-quote.ll
deleted file mode 100644
index 8fddc11..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/darwin-quote.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s
-
-
-define internal i64 @baz() nounwind {
- %tmp = load i64* @"+x"
- ret i64 %tmp
-; CHECK: _baz:
-; CHECK: movl "L_+x$non_lazy_ptr", %ecx
-}
-
-
-@"+x" = external global i64
-
-; CHECK: "L_+x$non_lazy_ptr":
-; CHECK: .indirect_symbol "_+x"
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/darwin-stub.ll b/libclamav/c++/llvm/test/CodeGen/X86/darwin-stub.ll
deleted file mode 100644
index b4d2e1a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/darwin-stub.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | grep stub
-; RUN: llc < %s -mtriple=i386-apple-darwin9 | not grep stub
-
-@"\01LC" = internal constant [13 x i8] c"Hello World!\00" ; <[13 x i8]*> [#uses=1]
-
-define i32 @main() nounwind {
-entry:
- %0 = tail call i32 @puts(i8* getelementptr ([13 x i8]* @"\01LC", i32 0, i32 0)) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare i32 @puts(i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/dg.exp b/libclamav/c++/llvm/test/CodeGen/X86/dg.exp
deleted file mode 100644
index 629a147..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/dg.exp
+++ /dev/null
@@ -1,5 +0,0 @@
-load_lib llvm.exp
-
-if { [llvm_supports_target X86] } {
- RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/discontiguous-loops.ll b/libclamav/c++/llvm/test/CodeGen/X86/discontiguous-loops.ll
deleted file mode 100644
index 479c450..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/discontiguous-loops.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; RUN: llc -verify-loop-info -verify-dom-info -march=x86-64 < %s
-; PR5243
-
- at .str96 = external constant [37 x i8], align 8 ; <[37 x i8]*> [#uses=1]
-
-define void @foo() nounwind {
-bb:
- br label %ybb1
-
-ybb1: ; preds = %yybb13, %xbb6, %bb
- switch i32 undef, label %bb18 [
- i32 150, label %ybb2
- i32 151, label %bb17
- i32 152, label %bb19
- i32 157, label %ybb8
- ]
-
-ybb2: ; preds = %ybb1
- %tmp = icmp eq i8** undef, null ; <i1> [#uses=1]
- br i1 %tmp, label %bb3, label %xbb6
-
-bb3: ; preds = %ybb2
- unreachable
-
-xbb4: ; preds = %xbb6
- store i32 0, i32* undef, align 8
- br i1 undef, label %xbb6, label %bb5
-
-bb5: ; preds = %xbb4
- call fastcc void @decl_mode_check_failed() nounwind
- unreachable
-
-xbb6: ; preds = %xbb4, %ybb2
- %tmp7 = icmp slt i32 undef, 0 ; <i1> [#uses=1]
- br i1 %tmp7, label %xbb4, label %ybb1
-
-ybb8: ; preds = %ybb1
- %tmp9 = icmp eq i8** undef, null ; <i1> [#uses=1]
- br i1 %tmp9, label %bb10, label %ybb12
-
-bb10: ; preds = %ybb8
- %tmp11 = load i8** undef, align 8 ; <i8*> [#uses=1]
- call void (i8*, ...)* @fatal(i8* getelementptr inbounds ([37 x i8]* @.str96, i64 0, i64 0), i8* %tmp11) nounwind
- unreachable
-
-ybb12: ; preds = %ybb8
- br i1 undef, label %bb15, label %ybb13
-
-ybb13: ; preds = %ybb12
- %tmp14 = icmp sgt i32 undef, 0 ; <i1> [#uses=1]
- br i1 %tmp14, label %bb16, label %ybb1
-
-bb15: ; preds = %ybb12
- call void (i8*, ...)* @fatal(i8* getelementptr inbounds ([37 x i8]* @.str96, i64 0, i64 0), i8* undef) nounwind
- unreachable
-
-bb16: ; preds = %ybb13
- unreachable
-
-bb17: ; preds = %ybb1
- unreachable
-
-bb18: ; preds = %ybb1
- unreachable
-
-bb19: ; preds = %ybb1
- unreachable
-}
-
-declare void @fatal(i8*, ...)
-
-declare fastcc void @decl_mode_check_failed() nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/div_const.ll b/libclamav/c++/llvm/test/CodeGen/X86/div_const.ll
deleted file mode 100644
index f0ada41..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/div_const.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 | grep 365384439
-
-define i32 @f9188_mul365384439_shift27(i32 %A) {
- %tmp1 = udiv i32 %A, 1577682821 ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/divrem.ll b/libclamav/c++/llvm/test/CodeGen/X86/divrem.ll
deleted file mode 100644
index e86b52f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/divrem.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep div | count 8
-
-define void @si64(i64 %x, i64 %y, i64* %p, i64* %q) {
- %r = sdiv i64 %x, %y
- %t = srem i64 %x, %y
- store i64 %r, i64* %p
- store i64 %t, i64* %q
- ret void
-}
-define void @si32(i32 %x, i32 %y, i32* %p, i32* %q) {
- %r = sdiv i32 %x, %y
- %t = srem i32 %x, %y
- store i32 %r, i32* %p
- store i32 %t, i32* %q
- ret void
-}
-define void @si16(i16 %x, i16 %y, i16* %p, i16* %q) {
- %r = sdiv i16 %x, %y
- %t = srem i16 %x, %y
- store i16 %r, i16* %p
- store i16 %t, i16* %q
- ret void
-}
-define void @si8(i8 %x, i8 %y, i8* %p, i8* %q) {
- %r = sdiv i8 %x, %y
- %t = srem i8 %x, %y
- store i8 %r, i8* %p
- store i8 %t, i8* %q
- ret void
-}
-define void @ui64(i64 %x, i64 %y, i64* %p, i64* %q) {
- %r = udiv i64 %x, %y
- %t = urem i64 %x, %y
- store i64 %r, i64* %p
- store i64 %t, i64* %q
- ret void
-}
-define void @ui32(i32 %x, i32 %y, i32* %p, i32* %q) {
- %r = udiv i32 %x, %y
- %t = urem i32 %x, %y
- store i32 %r, i32* %p
- store i32 %t, i32* %q
- ret void
-}
-define void @ui16(i16 %x, i16 %y, i16* %p, i16* %q) {
- %r = udiv i16 %x, %y
- %t = urem i16 %x, %y
- store i16 %r, i16* %p
- store i16 %t, i16* %q
- ret void
-}
-define void @ui8(i8 %x, i8 %y, i8* %p, i8* %q) {
- %r = udiv i8 %x, %y
- %t = urem i8 %x, %y
- store i8 %r, i8* %p
- store i8 %t, i8* %q
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/dll-linkage.ll b/libclamav/c++/llvm/test/CodeGen/X86/dll-linkage.ll
deleted file mode 100644
index c634c7e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/dll-linkage.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=i386-pc-mingw32 | FileCheck %s
-
-declare dllimport void @foo()
-
-define void @bar() nounwind {
-; CHECK: call *__imp__foo
- call void @foo()
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/dllexport.ll b/libclamav/c++/llvm/test/CodeGen/X86/dllexport.ll
deleted file mode 100644
index 2c699bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/dllexport.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-; PR2936
-
-target triple = "i386-mingw32"
-
-define dllexport x86_fastcallcc i32 @foo() nounwind {
-entry:
- ret i32 0
-}
-
-; CHECK: .section .drectve
-; CHECK: -export:@foo at 0
\ No newline at end of file
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/dollar-name.ll b/libclamav/c++/llvm/test/CodeGen/X86/dollar-name.ll
deleted file mode 100644
index 3b26319..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/dollar-name.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux | FileCheck %s
-; PR1339
-
-@"$bar" = global i32 zeroinitializer
-@"$qux" = external global i32
-
-define i32 @"$foo"() nounwind {
-; CHECK: movl ($bar),
-; CHECK: addl ($qux),
-; CHECK: call ($hen)
- %m = load i32* @"$bar"
- %n = load i32* @"$qux"
- %t = add i32 %m, %n
- %u = call i32 @"$hen"(i32 %t)
- ret i32 %u
-}
-
-declare i32 @"$hen"(i32 %a)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/dyn-stackalloc.ll b/libclamav/c++/llvm/test/CodeGen/X86/dyn-stackalloc.ll
deleted file mode 100644
index 7f0181f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/dyn-stackalloc.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep -E {\\\$4294967289|-7}
-; RUN: llc < %s -march=x86 | grep -E {\\\$4294967280|-16}
-; RUN: llc < %s -march=x86-64 | grep {\\-16}
-
-define void @t() nounwind {
-A:
- br label %entry
-
-entry:
- %m1 = alloca i32, align 4
- %m2 = alloca [7 x i8], align 16
- call void @s( i32* %m1, [7 x i8]* %m2 )
- ret void
-}
-
-declare void @s(i32*, [7 x i8]*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/empty-struct-return-type.ll b/libclamav/c++/llvm/test/CodeGen/X86/empty-struct-return-type.ll
deleted file mode 100644
index 34cd5d9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/empty-struct-return-type.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep call
-; PR4688
-
-; Return types can be empty structs, which can be awkward.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @_ZN15QtSharedPointer22internalSafetyCheckAddEPVKv(i8* %ptr) {
-entry:
- %0 = call { } @_ZNK5QHashIPv15QHashDummyValueE5valueERKS0_(i8** undef) ; <{ }> [#uses=0]
- ret void
-}
-
-declare hidden { } @_ZNK5QHashIPv15QHashDummyValueE5valueERKS0_(i8** nocapture) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/epilogue.ll b/libclamav/c++/llvm/test/CodeGen/X86/epilogue.ll
deleted file mode 100644
index 52dcb61..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/epilogue.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep lea
-; RUN: llc < %s -march=x86 | grep {movl %ebp}
-
-declare void @bar(<2 x i64>* %n)
-
-define void @foo(i64 %h) {
- %k = trunc i64 %h to i32
- %p = alloca <2 x i64>, i32 %k
- call void @bar(<2 x i64>* %p)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extend.ll b/libclamav/c++/llvm/test/CodeGen/X86/extend.ll
deleted file mode 100644
index 9553b1b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extend.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | grep movzx | count 1
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | grep movsx | count 1
-
- at G1 = internal global i8 0 ; <i8*> [#uses=1]
- at G2 = internal global i8 0 ; <i8*> [#uses=1]
-
-define i16 @test1() {
- %tmp.0 = load i8* @G1 ; <i8> [#uses=1]
- %tmp.3 = zext i8 %tmp.0 to i16 ; <i16> [#uses=1]
- ret i16 %tmp.3
-}
-
-define i16 @test2() {
- %tmp.0 = load i8* @G2 ; <i8> [#uses=1]
- %tmp.3 = sext i8 %tmp.0 to i16 ; <i16> [#uses=1]
- ret i16 %tmp.3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extern_weak.ll b/libclamav/c++/llvm/test/CodeGen/X86/extern_weak.ll
deleted file mode 100644
index 01e32aa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extern_weak.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin | grep weak_reference | count 2
-
- at Y = global i32 (i8*)* @X ; <i32 (i8*)**> [#uses=0]
-
-declare extern_weak i32 @X(i8*)
-
-define void @bar() {
- tail call void (...)* @foo( )
- ret void
-}
-
-declare extern_weak void @foo(...)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extmul128.ll b/libclamav/c++/llvm/test/CodeGen/X86/extmul128.ll
deleted file mode 100644
index 9b59829..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extmul128.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep mul | count 2
-
-define i128 @i64_sext_i128(i64 %a, i64 %b) {
- %aa = sext i64 %a to i128
- %bb = sext i64 %b to i128
- %cc = mul i128 %aa, %bb
- ret i128 %cc
-}
-define i128 @i64_zext_i128(i64 %a, i64 %b) {
- %aa = zext i64 %a to i128
- %bb = zext i64 %b to i128
- %cc = mul i128 %aa, %bb
- ret i128 %cc
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extmul64.ll b/libclamav/c++/llvm/test/CodeGen/X86/extmul64.ll
deleted file mode 100644
index 9e20ded..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extmul64.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mul | count 2
-
-define i64 @i32_sext_i64(i32 %a, i32 %b) {
- %aa = sext i32 %a to i64
- %bb = sext i32 %b to i64
- %cc = mul i64 %aa, %bb
- ret i64 %cc
-}
-define i64 @i32_zext_i64(i32 %a, i32 %b) {
- %aa = zext i32 %a to i64
- %bb = zext i32 %b to i64
- %cc = mul i64 %aa, %bb
- ret i64 %cc
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extract-combine.ll b/libclamav/c++/llvm/test/CodeGen/X86/extract-combine.ll
deleted file mode 100644
index 2040e87..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extract-combine.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mcpu=core2 -o %t
-; RUN: not grep unpcklps %t
-
-define i32 @foo() nounwind {
-entry:
- %tmp74.i25762 = shufflevector <16 x float> zeroinitializer, <16 x float> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19> ; <<16 x float>> [#uses=1]
- %tmp518 = shufflevector <16 x float> %tmp74.i25762, <16 x float> undef, <4 x i32> <i32 12, i32 13, i32 14, i32 15> ; <<4 x float>> [#uses=1]
- %movss.i25611 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp518, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
- %conv3.i25615 = shufflevector <4 x float> %movss.i25611, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
- %sub.i25620 = fsub <4 x float> %conv3.i25615, zeroinitializer ; <<4 x float>> [#uses=1]
- %mul.i25621 = fmul <4 x float> zeroinitializer, %sub.i25620 ; <<4 x float>> [#uses=1]
- %add.i25622 = fadd <4 x float> zeroinitializer, %mul.i25621 ; <<4 x float>> [#uses=1]
- store <4 x float> %add.i25622, <4 x float>* null
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extract-extract.ll b/libclamav/c++/llvm/test/CodeGen/X86/extract-extract.ll
deleted file mode 100644
index ad79ab9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extract-extract.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 >/dev/null
-; PR4699
-
-; Handle this extractvalue-of-extractvalue case without getting in
-; trouble with CSE in DAGCombine.
-
- %cc = type { %crd }
- %cr = type { i32 }
- %crd = type { i64, %cr* }
- %pp = type { %cc }
-
-define fastcc void @foo(%pp* nocapture byval %p_arg) {
-entry:
- %tmp2 = getelementptr %pp* %p_arg, i64 0, i32 0 ; <%cc*> [#uses=
- %tmp3 = load %cc* %tmp2 ; <%cc> [#uses=1]
- %tmp34 = extractvalue %cc %tmp3, 0 ; <%crd> [#uses=1]
- %tmp345 = extractvalue %crd %tmp34, 0 ; <i64> [#uses=1]
- %.ptr.i = load %cr** undef ; <%cr*> [#uses=0]
- %tmp15.i = shl i64 %tmp345, 3 ; <i64> [#uses=0]
- store %cr* undef, %cr** undef
- ret void
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extractelement-from-arg.ll b/libclamav/c++/llvm/test/CodeGen/X86/extractelement-from-arg.ll
deleted file mode 100644
index 4ea37f0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extractelement-from-arg.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse2
-
-define void @test(float* %R, <4 x float> %X) nounwind {
- %tmp = extractelement <4 x float> %X, i32 3
- store float %tmp, float* %R
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extractelement-load.ll b/libclamav/c++/llvm/test/CodeGen/X86/extractelement-load.ll
deleted file mode 100644
index ee57d9b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extractelement-load.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=yonah | not grep movd
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 -mcpu=core2 | not grep movd
-
-define i32 @t(<2 x i64>* %val) nounwind {
- %tmp2 = load <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1]
- %tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; <i32> [#uses=1]
- ret i32 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extractelement-shuffle.ll b/libclamav/c++/llvm/test/CodeGen/X86/extractelement-shuffle.ll
deleted file mode 100644
index d1ba9a8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extractelement-shuffle.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s
-
-; Examples that exhibits a bug in DAGCombine. The case is triggered by the
-; following program. The bug is DAGCombine assumes that the bit convert
-; preserves the number of elements so the optimization code tries to read
-; through the 3rd mask element, which doesn't exist.
-define i32 @update(<2 x i64> %val1, <2 x i64> %val2) nounwind readnone {
-entry:
- %shuf = shufflevector <2 x i64> %val1, <2 x i64> %val2, <2 x i32> <i32 0, i32 3>
- %bit = bitcast <2 x i64> %shuf to <4 x i32>
- %res = extractelement <4 x i32> %bit, i32 3
- ret i32 %res
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/extractps.ll b/libclamav/c++/llvm/test/CodeGen/X86/extractps.ll
deleted file mode 100644
index 14778f0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/extractps.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=penryn > %t
-; RUN: not grep movd %t
-; RUN: grep {movss %xmm} %t | count 1
-; RUN: grep {extractps \\\$1, %xmm0, } %t | count 1
-; PR2647
-
-external global float, align 16 ; <float*>:0 [#uses=2]
-
-define internal void @""() nounwind {
- load float* @0, align 16 ; <float>:1 [#uses=1]
- insertelement <4 x float> undef, float %1, i32 0 ; <<4 x float>>:2 [#uses=1]
- call <4 x float> @llvm.x86.sse.rsqrt.ss( <4 x float> %2 ) ; <<4 x float>>:3 [#uses=1]
- extractelement <4 x float> %3, i32 0 ; <float>:4 [#uses=1]
- store float %4, float* @0, align 16
- ret void
-}
-define internal void @""() nounwind {
- load float* @0, align 16 ; <float>:1 [#uses=1]
- insertelement <4 x float> undef, float %1, i32 1 ; <<4 x float>>:2 [#uses=1]
- call <4 x float> @llvm.x86.sse.rsqrt.ss( <4 x float> %2 ) ; <<4 x float>>:3 [#uses=1]
- extractelement <4 x float> %3, i32 1 ; <float>:4 [#uses=1]
- store float %4, float* @0, align 16
- ret void
-}
-
-declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fabs.ll b/libclamav/c++/llvm/test/CodeGen/X86/fabs.ll
deleted file mode 100644
index 54947c3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fabs.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; Make sure this testcase codegens to the fabs instruction, not a call to fabsf
-; RUN: llc < %s -march=x86 -mattr=-sse2,-sse3,-sse | grep fabs\$ | \
-; RUN: count 2
-; RUN: llc < %s -march=x86 -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math | \
-; RUN: grep fabs\$ | count 3
-
-declare float @fabsf(float)
-
-declare x86_fp80 @fabsl(x86_fp80)
-
-define float @test1(float %X) {
- %Y = call float @fabsf(float %X)
- ret float %Y
-}
-
-define double @test2(double %X) {
- %Y = fcmp oge double %X, -0.0
- %Z = fsub double -0.0, %X
- %Q = select i1 %Y, double %X, double %Z
- ret double %Q
-}
-
-define x86_fp80 @test3(x86_fp80 %X) {
- %Y = call x86_fp80 @fabsl(x86_fp80 %X)
- ret x86_fp80 %Y
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-cc-callee-pops.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-cc-callee-pops.ll
deleted file mode 100644
index 5e88ed7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-cc-callee-pops.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel -mcpu=yonah | grep {ret 20}
-
-; Check that a fastcc function pops its stack variables before returning.
-
-define x86_fastcallcc void @func(i64 %X, i64 %Y, float %G, double %Z) nounwind {
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-cc-merge-stack-adj.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
deleted file mode 100644
index e151821..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-cc-merge-stack-adj.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep {add ESP, 8}
-
-target triple = "i686-pc-linux-gnu"
-
-declare x86_fastcallcc void @func(i32*, i64)
-
-define x86_fastcallcc void @caller(i32, i64) {
- %X = alloca i32 ; <i32*> [#uses=1]
- call x86_fastcallcc void @func( i32* %X, i64 0 )
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-cc-pass-in-regs.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-cc-pass-in-regs.ll
deleted file mode 100644
index fe96c0c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-cc-pass-in-regs.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep {mov EDX, 1}
-; check that fastcc is passing stuff in regs.
-
-declare x86_fastcallcc i64 @callee(i64)
-
-define i64 @caller() {
- %X = call x86_fastcallcc i64 @callee( i64 4294967299 ) ; <i64> [#uses=1]
- ret i64 %X
-}
-
-define x86_fastcallcc i64 @caller2(i64 %X) {
- ret i64 %X
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-bail.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-bail.ll
deleted file mode 100644
index 9072c5c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-bail.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 -O0
-
-; This file is for regression tests for cases where FastISel needs
-; to gracefully bail out and let SelectionDAGISel take over.
-
- type { i64, i8* } ; type %0
-
-declare void @bar(%0)
-
-define fastcc void @foo() nounwind {
-entry:
- call void @bar(%0 zeroinitializer)
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-bc.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-bc.ll
deleted file mode 100644
index f2696ce..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-bc.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -O0 -march=x86-64 -mattr=+mmx | FileCheck %s
-; PR4684
-
-target datalayout =
-"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin9.8"
-
-declare void @func2(<1 x i64>)
-
-define void @func1() nounwind {
-
-; This isn't spectacular, but it's MMX code at -O0...
-; CHECK: movl $2, %eax
-; CHECK: movd %rax, %mm0
-; CHECK: movd %mm0, %rdi
-
- call void @func2(<1 x i64> <i64 2>)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-call.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-call.ll
deleted file mode 100644
index 5fcdbbb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-call.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -fast-isel -march=x86 | grep and
-
-define i32 @t() nounwind {
-tak:
- %tmp = call i1 @foo()
- br i1 %tmp, label %BB1, label %BB2
-BB1:
- ret i32 1
-BB2:
- ret i32 0
-}
-
-declare i1 @foo() zeroext nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-constpool.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-constpool.ll
deleted file mode 100644
index 84d10f3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-constpool.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -fast-isel | grep {LCPI1_0(%rip)}
-; Make sure fast isel uses rip-relative addressing when required.
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin9.0"
-
-define i32 @f0(double %x) nounwind {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=2]
- %x.addr = alloca double ; <double*> [#uses=2]
- store double %x, double* %x.addr
- %tmp = load double* %x.addr ; <double> [#uses=1]
- %cmp = fcmp olt double %tmp, 8.500000e-01 ; <i1> [#uses=1]
- %conv = zext i1 %cmp to i32 ; <i32> [#uses=1]
- store i32 %conv, i32* %retval
- %0 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-fneg.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-fneg.ll
deleted file mode 100644
index 5ffd48b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-fneg.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -fast-isel -fast-isel-abort -march=x86-64 | FileCheck %s
-; RUN: llc < %s -fast-isel -march=x86 -mattr=+sse2 | grep xor | count 2
-
-; CHECK: doo:
-; CHECK: xor
-define double @doo(double %x) nounwind {
- %y = fsub double -0.0, %x
- ret double %y
-}
-
-; CHECK: foo:
-; CHECK: xor
-define float @foo(float %x) nounwind {
- %y = fsub float -0.0, %x
- ret float %y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-gep.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-gep.ll
deleted file mode 100644
index 5b8acec..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-gep.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc < %s -march=x86-64 -O0 | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -march=x86 -O0 | FileCheck %s --check-prefix=X32
-
-; GEP indices are interpreted as signed integers, so they
-; should be sign-extended to 64 bits on 64-bit targets.
-; PR3181
-define i32 @test1(i32 %t3, i32* %t1) nounwind {
- %t9 = getelementptr i32* %t1, i32 %t3 ; <i32*> [#uses=1]
- %t15 = load i32* %t9 ; <i32> [#uses=1]
- ret i32 %t15
-; X32: test1:
-; X32: movl (%ecx,%eax,4), %eax
-; X32: ret
-
-; X64: test1:
-; X64: movslq %edi, %rax
-; X64: movl (%rsi,%rax,4), %eax
-; X64: ret
-
-}
-define i32 @test2(i64 %t3, i32* %t1) nounwind {
- %t9 = getelementptr i32* %t1, i64 %t3 ; <i32*> [#uses=1]
- %t15 = load i32* %t9 ; <i32> [#uses=1]
- ret i32 %t15
-; X32: test2:
-; X32: movl (%eax,%ecx,4), %eax
-; X32: ret
-
-; X64: test2:
-; X64: movl (%rsi,%rdi,4), %eax
-; X64: ret
-}
-
-
-
-; PR4984
-define i8 @test3(i8* %start) nounwind {
-entry:
- %A = getelementptr i8* %start, i64 -2 ; <i8*> [#uses=1]
- %B = load i8* %A, align 1 ; <i8> [#uses=1]
- ret i8 %B
-
-
-; X32: test3:
-; X32: movl 4(%esp), %eax
-; X32: movb -2(%eax), %al
-; X32: ret
-
-; X64: test3:
-; X64: movb -2(%rdi), %al
-; X64: ret
-
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-gv.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-gv.ll
deleted file mode 100644
index 34f8b38..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-gv.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -fast-isel | grep {_kill at GOTPCREL(%rip)}
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin10.0"
- at f = global i8 (...)* @kill ; <i8 (...)**> [#uses=1]
-
-declare signext i8 @kill(...)
-
-define i32 @main() nounwind ssp {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=2]
- %0 = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %1 = load i8 (...)** @f, align 8 ; <i8 (...)*> [#uses=1]
- %2 = icmp ne i8 (...)* %1, @kill ; <i1> [#uses=1]
- %3 = zext i1 %2 to i32 ; <i32> [#uses=1]
- store i32 %3, i32* %0, align 4
- %4 = load i32* %0, align 4 ; <i32> [#uses=1]
- store i32 %4, i32* %retval, align 4
- br label %return
-
-return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-i1.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-i1.ll
deleted file mode 100644
index d066578..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-i1.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 -fast-isel | grep {andb \$1, %}
-
-declare i64 @bar(i64)
-
-define i32 @foo(i64 %x) nounwind {
- %y = add i64 %x, -3 ; <i64> [#uses=1]
- %t = call i64 @bar(i64 %y) ; <i64> [#uses=1]
- %s = mul i64 %t, 77 ; <i64> [#uses=1]
- %z = trunc i64 %s to i1 ; <i1> [#uses=1]
- br label %next
-
-next: ; preds = %0
- %u = zext i1 %z to i32 ; <i32> [#uses=1]
- %v = add i32 %u, 1999 ; <i32> [#uses=1]
- br label %exit
-
-exit: ; preds = %next
- ret i32 %v
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-mem.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-mem.ll
deleted file mode 100644
index 35ec1e7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-mem.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -fast-isel -mtriple=i386-apple-darwin | \
-; RUN: grep lazy_ptr, | count 2
-; RUN: llc < %s -fast-isel -march=x86 -relocation-model=static | \
-; RUN: grep lea
-
- at src = external global i32
-
-define i32 @loadgv() nounwind {
-entry:
- %0 = load i32* @src, align 4
- %1 = load i32* @src, align 4
- %2 = add i32 %0, %1
- store i32 %2, i32* @src
- ret i32 %2
-}
-
-%stuff = type { i32 (...)** }
- at LotsStuff = external constant [4 x i32 (...)*]
-
-define void @t(%stuff* %this) nounwind {
-entry:
- store i32 (...)** getelementptr ([4 x i32 (...)*]* @LotsStuff, i32 0, i32 2), i32 (...)*** null, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-phys.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-phys.ll
deleted file mode 100644
index 158ef55..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-phys.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -fast-isel -fast-isel-abort -march=x86
-
-define i8 @t2(i8 %a, i8 %c) nounwind {
- %tmp = shl i8 %a, %c
- ret i8 %tmp
-}
-
-define i8 @t1(i8 %a) nounwind {
- %tmp = mul i8 %a, 17
- ret i8 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-shift-imm.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-shift-imm.ll
deleted file mode 100644
index 35f7a72..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-shift-imm.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 -O0 | grep {sarl \$80, %eax}
-; PR3242
-
-define i32 @foo(i32 %x) nounwind {
- %y = ashr i32 %x, 50000
- ret i32 %y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-tailcall.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-tailcall.ll
deleted file mode 100644
index c3e527c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-tailcall.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -fast-isel -tailcallopt -march=x86 | not grep add
-; PR4154
-
-; On x86, -tailcallopt changes the ABI so the caller shouldn't readjust
-; the stack pointer after the call in this code.
-
-define i32 @stub(i8* %t0) nounwind {
-entry:
- %t1 = load i32* inttoptr (i32 139708680 to i32*) ; <i32> [#uses=1]
- %t2 = bitcast i8* %t0 to i32 (i32)* ; <i32 (i32)*> [#uses=1]
- %t3 = call fastcc i32 %t2(i32 %t1) ; <i32> [#uses=1]
- ret i32 %t3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-tls.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-tls.ll
deleted file mode 100644
index a5e6642..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-tls.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=pic -mtriple=i686-unknown-linux-gnu -fast-isel | grep __tls_get_addr
-; PR3654
-
- at v = thread_local global i32 0
-define i32 @f() nounwind {
-entry:
- %t = load i32* @v
- %s = add i32 %t, 1
- ret i32 %s
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-trunc.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-trunc.ll
deleted file mode 100644
index 69b26c5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel-trunc.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -fast-isel -fast-isel-abort
-; RUN: llc < %s -march=x86-64 -fast-isel -fast-isel-abort
-
-define i8 @t1(i32 %x) signext nounwind {
- %tmp1 = trunc i32 %x to i8
- ret i8 %tmp1
-}
-
-define i8 @t2(i16 signext %x) signext nounwind {
- %tmp1 = trunc i16 %x to i8
- ret i8 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel.ll b/libclamav/c++/llvm/test/CodeGen/X86/fast-isel.ll
deleted file mode 100644
index 84b3fd7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fast-isel.ll
+++ /dev/null
@@ -1,75 +0,0 @@
-; RUN: llc < %s -fast-isel -fast-isel-abort -march=x86 -mattr=sse2
-
-; This tests very minimal fast-isel functionality.
-
-define i32* @foo(i32* %p, i32* %q, i32** %z) nounwind {
-entry:
- %r = load i32* %p
- %s = load i32* %q
- %y = load i32** %z
- br label %fast
-
-fast:
- %t0 = add i32 %r, %s
- %t1 = mul i32 %t0, %s
- %t2 = sub i32 %t1, %s
- %t3 = and i32 %t2, %s
- %t4 = xor i32 %t3, 3
- %t5 = xor i32 %t4, %s
- %t6 = add i32 %t5, 2
- %t7 = getelementptr i32* %y, i32 1
- %t8 = getelementptr i32* %t7, i32 %t6
- br label %exit
-
-exit:
- ret i32* %t8
-}
-
-define double @bar(double* %p, double* %q) nounwind {
-entry:
- %r = load double* %p
- %s = load double* %q
- br label %fast
-
-fast:
- %t0 = fadd double %r, %s
- %t1 = fmul double %t0, %s
- %t2 = fsub double %t1, %s
- %t3 = fadd double %t2, 707.0
- br label %exit
-
-exit:
- ret double %t3
-}
-
-define i32 @cast() nounwind {
-entry:
- %tmp2 = bitcast i32 0 to i32
- ret i32 %tmp2
-}
-
-define i1 @ptrtoint_i1(i8* %p) nounwind {
- %t = ptrtoint i8* %p to i1
- ret i1 %t
-}
-define i8* @inttoptr_i1(i1 %p) nounwind {
- %t = inttoptr i1 %p to i8*
- ret i8* %t
-}
-define i32 @ptrtoint_i32(i8* %p) nounwind {
- %t = ptrtoint i8* %p to i32
- ret i32 %t
-}
-define i8* @inttoptr_i32(i32 %p) nounwind {
- %t = inttoptr i32 %p to i8*
- ret i8* %t
-}
-
-define void @store_i1(i1* %p, i1 %t) nounwind {
- store i1 %t, i1* %p
- ret void
-}
-define i1 @load_i1(i1* %p) nounwind {
- %t = load i1* %p
- ret i1 %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fastcall-correct-mangling.ll b/libclamav/c++/llvm/test/CodeGen/X86/fastcall-correct-mangling.ll
deleted file mode 100644
index 33b18bb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fastcall-correct-mangling.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=i386-unknown-mingw32 | FileCheck %s
-
-; Check that a fastcall function gets correct mangling
-
-define x86_fastcallcc void @func(i64 %X, i8 %Y, i8 %G, i16 %Z) {
-; CHECK: @func at 20:
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fastcc-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/fastcc-2.ll
deleted file mode 100644
index d044a2a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fastcc-2.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | grep movsd
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | grep mov | count 1
-
-define i32 @foo() nounwind {
-entry:
- tail call fastcc void @bar( double 1.000000e+00 ) nounwind
- ret i32 0
-}
-
-declare fastcc void @bar(double)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fastcc-byval.ll b/libclamav/c++/llvm/test/CodeGen/X86/fastcc-byval.ll
deleted file mode 100644
index 52b3e57..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fastcc-byval.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -tailcallopt=false | grep {movl\[\[:space:\]\]*8(%esp), %eax} | count 2
-; PR3122
-; rdar://6400815
-
-; byval requires a copy, even with fastcc.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
- %struct.MVT = type { i32 }
-
-define fastcc i32 @bar() nounwind {
- %V = alloca %struct.MVT
- %a = getelementptr %struct.MVT* %V, i32 0, i32 0
- store i32 1, i32* %a
- call fastcc void @foo(%struct.MVT* byval %V) nounwind
- %t = load i32* %a
- ret i32 %t
-}
-
-declare fastcc void @foo(%struct.MVT* byval)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fastcc-sret.ll b/libclamav/c++/llvm/test/CodeGen/X86/fastcc-sret.ll
deleted file mode 100644
index d457418..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fastcc-sret.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 -tailcallopt=false | grep ret | not grep 4
-
- %struct.foo = type { [4 x i32] }
-
-define fastcc void @bar(%struct.foo* noalias sret %agg.result) nounwind {
-entry:
- %tmp1 = getelementptr %struct.foo* %agg.result, i32 0, i32 0
- %tmp3 = getelementptr [4 x i32]* %tmp1, i32 0, i32 0
- store i32 1, i32* %tmp3, align 8
- ret void
-}
-
- at dst = external global i32
-
-define void @foo() nounwind {
- %memtmp = alloca %struct.foo, align 4
- call fastcc void @bar( %struct.foo* sret %memtmp ) nounwind
- %tmp4 = getelementptr %struct.foo* %memtmp, i32 0, i32 0
- %tmp5 = getelementptr [4 x i32]* %tmp4, i32 0, i32 0
- %tmp6 = load i32* %tmp5
- store i32 %tmp6, i32* @dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fastcc.ll b/libclamav/c++/llvm/test/CodeGen/X86/fastcc.ll
deleted file mode 100644
index 705ab7b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fastcc.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 -post-RA-scheduler=false | FileCheck %s
-; CHECK: movsd %xmm0, 8(%esp)
-; CHECK: xorl %ecx, %ecx
-
- at d = external global double ; <double*> [#uses=1]
- at c = external global double ; <double*> [#uses=1]
- at b = external global double ; <double*> [#uses=1]
- at a = external global double ; <double*> [#uses=1]
-
-define i32 @foo() nounwind {
-entry:
- %0 = load double* @d, align 8 ; <double> [#uses=1]
- %1 = load double* @c, align 8 ; <double> [#uses=1]
- %2 = load double* @b, align 8 ; <double> [#uses=1]
- %3 = load double* @a, align 8 ; <double> [#uses=1]
- tail call fastcc void @bar( i32 0, i32 1, i32 2, double 1.000000e+00, double %3, double %2, double %1, double %0 ) nounwind
- ret i32 0
-}
-
-declare fastcc void @bar(i32, i32, i32, double, double, double, double, double)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fastcc3struct.ll b/libclamav/c++/llvm/test/CodeGen/X86/fastcc3struct.ll
deleted file mode 100644
index 84f8ef6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fastcc3struct.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -o %t
-; RUN: grep "movl .48, %ecx" %t
-; RUN: grep "movl .24, %edx" %t
-; RUN: grep "movl .12, %eax" %t
-
-%0 = type { i32, i32, i32 }
-
-define internal fastcc %0 @ReturnBigStruct() nounwind readnone {
-entry:
- %0 = insertvalue %0 zeroinitializer, i32 12, 0
- %1 = insertvalue %0 %0, i32 24, 1
- %2 = insertvalue %0 %1, i32 48, 2
- ret %0 %2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/field-extract-use-trunc.ll b/libclamav/c++/llvm/test/CodeGen/X86/field-extract-use-trunc.ll
deleted file mode 100644
index 6020530..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/field-extract-use-trunc.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s -march=x86 | grep sar | count 1
-; RUN: llc < %s -march=x86-64 | not grep sar
-
-define i32 @test(i32 %f12) {
- %tmp7.25 = lshr i32 %f12, 16
- %tmp7.26 = trunc i32 %tmp7.25 to i8
- %tmp78.2 = sext i8 %tmp7.26 to i32
- ret i32 %tmp78.2
-}
-
-define i32 @test2(i32 %f12) {
- %f11 = shl i32 %f12, 8
- %tmp7.25 = ashr i32 %f11, 24
- ret i32 %tmp7.25
-}
-
-define i32 @test3(i32 %f12) {
- %f11 = shl i32 %f12, 13
- %tmp7.25 = ashr i32 %f11, 24
- ret i32 %tmp7.25
-}
-
-define i64 @test4(i64 %f12) {
- %f11 = shl i64 %f12, 32
- %tmp7.25 = ashr i64 %f11, 32
- ret i64 %tmp7.25
-}
-
-define i16 @test5(i16 %f12) {
- %f11 = shl i16 %f12, 2
- %tmp7.25 = ashr i16 %f11, 8
- ret i16 %tmp7.25
-}
-
-define i16 @test6(i16 %f12) {
- %f11 = shl i16 %f12, 8
- %tmp7.25 = ashr i16 %f11, 8
- ret i16 %tmp7.25
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fildll.ll b/libclamav/c++/llvm/test/CodeGen/X86/fildll.ll
deleted file mode 100644
index c5a3765..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fildll.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=att -mattr=-sse2 | grep fildll | count 2
-
-define fastcc double @sint64_to_fp(i64 %X) {
- %R = sitofp i64 %X to double ; <double> [#uses=1]
- ret double %R
-}
-
-define fastcc double @uint64_to_fp(i64 %X) {
- %R = uitofp i64 %X to double ; <double> [#uses=1]
- ret double %R
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fmul-zero.ll b/libclamav/c++/llvm/test/CodeGen/X86/fmul-zero.ll
deleted file mode 100644
index 03bad65..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fmul-zero.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86-64 -enable-unsafe-fp-math | not grep mulps
-; RUN: llc < %s -march=x86-64 | grep mulps
-
-define void @test14(<4 x float>*) nounwind {
- load <4 x float>* %0, align 1
- fmul <4 x float> %2, zeroinitializer
- store <4 x float> %3, <4 x float>* %0, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-add.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-add.ll
deleted file mode 100644
index 5e80ea5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-add.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {cmpb \$0, (%r.\*,%r.\*)}
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin9.6"
- at prev_length = internal global i32 0 ; <i32*> [#uses=1]
- at window = internal global [65536 x i8] zeroinitializer, align 32 ; <[65536 x i8]*> [#uses=1]
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (i32)* @longest_match to i8*)] ; <[1 x i8*]*> [#uses=0]
-
-define fastcc i32 @longest_match(i32 %cur_match) nounwind {
-entry:
- %0 = load i32* @prev_length, align 4 ; <i32> [#uses=3]
- %1 = zext i32 %cur_match to i64 ; <i64> [#uses=1]
- %2 = sext i32 %0 to i64 ; <i64> [#uses=1]
- %.sum3 = add i64 %1, %2 ; <i64> [#uses=1]
- %3 = getelementptr [65536 x i8]* @window, i64 0, i64 %.sum3 ; <i8*> [#uses=1]
- %4 = load i8* %3, align 1 ; <i8> [#uses=1]
- %5 = icmp eq i8 %4, 0 ; <i1> [#uses=1]
- br i1 %5, label %bb5, label %bb23
-
-bb5: ; preds = %entry
- ret i32 %0
-
-bb23: ; preds = %entry
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-and-shift.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-and-shift.ll
deleted file mode 100644
index 9f79f77..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-and-shift.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep and
-
-define i32 @t1(i8* %X, i32 %i) {
-entry:
- %tmp2 = shl i32 %i, 2 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp2, 1020 ; <i32> [#uses=1]
- %tmp7 = getelementptr i8* %X, i32 %tmp4 ; <i8*> [#uses=1]
- %tmp78 = bitcast i8* %tmp7 to i32* ; <i32*> [#uses=1]
- %tmp9 = load i32* %tmp78, align 4 ; <i32> [#uses=1]
- ret i32 %tmp9
-}
-
-define i32 @t2(i16* %X, i32 %i) {
-entry:
- %tmp2 = shl i32 %i, 1 ; <i32> [#uses=1]
- %tmp4 = and i32 %tmp2, 131070 ; <i32> [#uses=1]
- %tmp7 = getelementptr i16* %X, i32 %tmp4 ; <i16*> [#uses=1]
- %tmp78 = bitcast i16* %tmp7 to i32* ; <i32*> [#uses=1]
- %tmp9 = load i32* %tmp78, align 4 ; <i32> [#uses=1]
- ret i32 %tmp9
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-call-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-call-2.ll
deleted file mode 100644
index 7a2b038..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-call-2.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | grep mov | count 1
-
- at f = external global void ()* ; <void ()**> [#uses=1]
-
-define i32 @main() nounwind {
-entry:
- load void ()** @f, align 8 ; <void ()*>:0 [#uses=1]
- tail call void %0( ) nounwind
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-call-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-call-3.ll
deleted file mode 100644
index 337a7ed..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-call-3.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep call | grep 560
-; rdar://6522427
-
- %"struct.clang::Action" = type { %"struct.clang::ActionBase" }
- %"struct.clang::ActionBase" = type { i32 (...)** }
- %"struct.clang::ActionBase::ActionResult<0u>" = type { i8*, i8 }
- at NumTrials = internal global i32 10000000 ; <i32*> [#uses=2]
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (void (i8*, %"struct.clang::Action"*)* @_Z25RawPointerPerformanceTestPvRN5clang6ActionE to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define void @_Z25RawPointerPerformanceTestPvRN5clang6ActionE(i8* %Val, %"struct.clang::Action"* %Actions) nounwind {
-entry:
- %0 = alloca %"struct.clang::ActionBase::ActionResult<0u>", align 8 ; <%"struct.clang::ActionBase::ActionResult<0u>"*> [#uses=3]
- %1 = load i32* @NumTrials, align 4 ; <i32> [#uses=1]
- %2 = icmp eq i32 %1, 0 ; <i1> [#uses=1]
- br i1 %2, label %return, label %bb.nph
-
-bb.nph: ; preds = %entry
- %3 = getelementptr %"struct.clang::Action"* %Actions, i64 0, i32 0, i32 0 ; <i32 (...)***> [#uses=1]
- %mrv_gep = bitcast %"struct.clang::ActionBase::ActionResult<0u>"* %0 to i64* ; <i64*> [#uses=1]
- %mrv_gep1 = getelementptr %"struct.clang::ActionBase::ActionResult<0u>"* %0, i64 0, i32 1 ; <i8*> [#uses=1]
- %4 = bitcast i8* %mrv_gep1 to i64* ; <i64*> [#uses=1]
- %5 = getelementptr %"struct.clang::ActionBase::ActionResult<0u>"* %0, i64 0, i32 0 ; <i8**> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %bb.nph
- %Trial.01 = phi i32 [ 0, %bb.nph ], [ %12, %bb ] ; <i32> [#uses=1]
- %Val_addr.02 = phi i8* [ %Val, %bb.nph ], [ %11, %bb ] ; <i8*> [#uses=1]
- %6 = load i32 (...)*** %3, align 8 ; <i32 (...)**> [#uses=1]
- %7 = getelementptr i32 (...)** %6, i64 70 ; <i32 (...)**> [#uses=1]
- %8 = load i32 (...)** %7, align 8 ; <i32 (...)*> [#uses=1]
- %9 = bitcast i32 (...)* %8 to { i64, i64 } (%"struct.clang::Action"*, i8*)* ; <{ i64, i64 } (%"struct.clang::Action"*, i8*)*> [#uses=1]
- %10 = call { i64, i64 } %9(%"struct.clang::Action"* %Actions, i8* %Val_addr.02) nounwind ; <{ i64, i64 }> [#uses=2]
- %mrv_gr = extractvalue { i64, i64 } %10, 0 ; <i64> [#uses=1]
- store i64 %mrv_gr, i64* %mrv_gep
- %mrv_gr2 = extractvalue { i64, i64 } %10, 1 ; <i64> [#uses=1]
- store i64 %mrv_gr2, i64* %4
- %11 = load i8** %5, align 8 ; <i8*> [#uses=1]
- %12 = add i32 %Trial.01, 1 ; <i32> [#uses=2]
- %13 = load i32* @NumTrials, align 4 ; <i32> [#uses=1]
- %14 = icmp ult i32 %12, %13 ; <i1> [#uses=1]
- br i1 %14, label %bb, label %return
-
-return: ; preds = %bb, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-call.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-call.ll
deleted file mode 100644
index 603e9ad..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-call.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep mov
-; RUN: llc < %s -march=x86-64 | not grep mov
-
-declare void @bar()
-
-define void @foo(i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, void()* %arg) nounwind {
- call void @bar()
- call void %arg()
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-imm.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-imm.ll
deleted file mode 100644
index f1fcbcf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-imm.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 | grep inc
-; RUN: llc < %s -march=x86 | grep add | grep 4
-
-define i32 @test(i32 %X) nounwind {
-entry:
- %0 = add i32 %X, 1
- ret i32 %0
-}
-
-define i32 @test2(i32 %X) nounwind {
-entry:
- %0 = add i32 %X, 4
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-load.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-load.ll
deleted file mode 100644
index 5525af2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-load.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
- %struct._obstack_chunk = type { i8*, %struct._obstack_chunk*, [4 x i8] }
- %struct.obstack = type { i32, %struct._obstack_chunk*, i8*, i8*, i8*, i32, i32, %struct._obstack_chunk* (...)*, void (...)*, i8*, i8 }
- at stmt_obstack = external global %struct.obstack ; <%struct.obstack*> [#uses=1]
-
-; This should just not crash.
-define void @test1() nounwind {
-entry:
- br i1 true, label %cond_true, label %cond_next
-
-cond_true: ; preds = %entry
- %new_size.0.i = select i1 false, i32 0, i32 0 ; <i32> [#uses=1]
- %tmp.i = load i32* bitcast (i8* getelementptr (%struct.obstack* @stmt_obstack, i32 0, i32 10) to i32*) ; <i32> [#uses=1]
- %tmp.i.upgrd.1 = trunc i32 %tmp.i to i8 ; <i8> [#uses=1]
- %tmp21.i = and i8 %tmp.i.upgrd.1, 1 ; <i8> [#uses=1]
- %tmp22.i = icmp eq i8 %tmp21.i, 0 ; <i1> [#uses=1]
- br i1 %tmp22.i, label %cond_false30.i, label %cond_true23.i
-
-cond_true23.i: ; preds = %cond_true
- ret void
-
-cond_false30.i: ; preds = %cond_true
- %tmp35.i = tail call %struct._obstack_chunk* null( i32 %new_size.0.i ) ; <%struct._obstack_chunk*> [#uses=0]
- ret void
-
-cond_next: ; preds = %entry
- ret void
-}
-
-
-
-define i32 @test2(i16* %P, i16* %Q) nounwind {
- %A = load i16* %P, align 4 ; <i16> [#uses=11]
- %C = zext i16 %A to i32 ; <i32> [#uses=1]
- %D = and i32 %C, 255 ; <i32> [#uses=1]
- br label %L
-L:
-
- store i16 %A, i16* %Q
- ret i32 %D
-
-; CHECK: test2:
-; CHECK: movl 4(%esp), %eax
-; CHECK-NEXT: movzwl (%eax), %ecx
-
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-mul-lohi.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-mul-lohi.ll
deleted file mode 100644
index 0351eca..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-mul-lohi.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep lea
-; RUN: llc < %s -march=x86-64 | not grep lea
-
- at B = external global [1000 x i8], align 32
- at A = external global [1000 x i8], align 32
- at P = external global [1000 x i8], align 32
-
-define void @foo(i32 %m) nounwind {
-entry:
- %tmp1 = icmp sgt i32 %m, 0
- br i1 %tmp1, label %bb, label %return
-
-bb:
- %i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
- %tmp2 = getelementptr [1000 x i8]* @B, i32 0, i32 %i.019.0
- %tmp3 = load i8* %tmp2, align 4
- %tmp4 = mul i8 %tmp3, 2
- %tmp5 = getelementptr [1000 x i8]* @A, i32 0, i32 %i.019.0
- store i8 %tmp4, i8* %tmp5, align 4
- %tmp8 = mul i32 %i.019.0, 9
- %tmp10 = getelementptr [1000 x i8]* @P, i32 0, i32 %tmp8
- store i8 17, i8* %tmp10, align 4
- %indvar.next = add i32 %i.019.0, 1
- %exitcond = icmp eq i32 %indvar.next, %m
- br i1 %exitcond, label %return, label %bb
-
-return:
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-pcmpeqd-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-pcmpeqd-0.ll
deleted file mode 100644
index ef5202f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-pcmpeqd-0.ll
+++ /dev/null
@@ -1,105 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah | not grep pcmpeqd
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah | grep orps | grep CPI1_2 | count 2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep pcmpeqd | count 1
-
-; This testcase shouldn't need to spill the -1 value,
-; so it should just use pcmpeqd to materialize an all-ones vector.
-; For i386, cp load of -1 are folded.
-
- %struct.__ImageExecInfo = type <{ <4 x i32>, <4 x float>, <2 x i64>, i8*, i8*, i8*, i32, i32, i32, i32, i32 }>
- %struct._cl_image_format_t = type <{ i32, i32, i32 }>
- %struct._image2d_t = type <{ i8*, %struct._cl_image_format_t, i32, i32, i32, i32, i32, i32 }>
-
-define void @program_1(%struct._image2d_t* %dest, %struct._image2d_t* %t0, <4 x float> %p0, <4 x float> %p1, <4 x float> %p4, <4 x float> %p5, <4 x float> %p6) nounwind {
-entry:
- %tmp3.i = load i32* null ; <i32> [#uses=1]
- %cmp = icmp sgt i32 %tmp3.i, 200 ; <i1> [#uses=1]
- br i1 %cmp, label %forcond, label %ifthen
-
-ifthen: ; preds = %entry
- ret void
-
-forcond: ; preds = %entry
- %tmp3.i536 = load i32* null ; <i32> [#uses=1]
- %cmp12 = icmp slt i32 0, %tmp3.i536 ; <i1> [#uses=1]
- br i1 %cmp12, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %bitcast204.i313 = bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>> [#uses=1]
- %mul233 = fmul <4 x float> %bitcast204.i313, zeroinitializer ; <<4 x float>> [#uses=1]
- %mul257 = fmul <4 x float> %mul233, zeroinitializer ; <<4 x float>> [#uses=1]
- %mul275 = fmul <4 x float> %mul257, zeroinitializer ; <<4 x float>> [#uses=1]
- %tmp51 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %mul275, <4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=1]
- %bitcast198.i182 = bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=0]
- %bitcast204.i185 = bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp69 = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> zeroinitializer) nounwind ; <<4 x i32>> [#uses=1]
- %tmp70 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %tmp69) nounwind ; <<4 x float>> [#uses=1]
- %sub140.i78 = fsub <4 x float> zeroinitializer, %tmp70 ; <<4 x float>> [#uses=2]
- %mul166.i86 = fmul <4 x float> zeroinitializer, %sub140.i78 ; <<4 x float>> [#uses=1]
- %add167.i87 = fadd <4 x float> %mul166.i86, < float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000 > ; <<4 x float>> [#uses=1]
- %mul171.i88 = fmul <4 x float> %add167.i87, %sub140.i78 ; <<4 x float>> [#uses=1]
- %add172.i89 = fadd <4 x float> %mul171.i88, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 > ; <<4 x float>> [#uses=1]
- %bitcast176.i90 = bitcast <4 x float> %add172.i89 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andnps178.i92 = and <4 x i32> %bitcast176.i90, zeroinitializer ; <<4 x i32>> [#uses=1]
- %bitcast179.i93 = bitcast <4 x i32> %andnps178.i92 to <4 x float> ; <<4 x float>> [#uses=1]
- %mul186.i96 = fmul <4 x float> %bitcast179.i93, zeroinitializer ; <<4 x float>> [#uses=1]
- %bitcast190.i98 = bitcast <4 x float> %mul186.i96 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andnps192.i100 = and <4 x i32> %bitcast190.i98, zeroinitializer ; <<4 x i32>> [#uses=1]
- %xorps.i102 = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %orps203.i103 = or <4 x i32> %andnps192.i100, %xorps.i102 ; <<4 x i32>> [#uses=1]
- %bitcast204.i104 = bitcast <4 x i32> %orps203.i103 to <4 x float> ; <<4 x float>> [#uses=1]
- %cmple.i = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> zeroinitializer, <4 x float> %tmp51, i8 2) nounwind ; <<4 x float>> [#uses=1]
- %tmp80 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> zeroinitializer) nounwind ; <<4 x float>> [#uses=1]
- %sub140.i = fsub <4 x float> zeroinitializer, %tmp80 ; <<4 x float>> [#uses=1]
- %bitcast148.i = bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andnps150.i = and <4 x i32> %bitcast148.i, < i32 -2139095041, i32 -2139095041, i32 -2139095041, i32 -2139095041 > ; <<4 x i32>> [#uses=0]
- %mul171.i = fmul <4 x float> zeroinitializer, %sub140.i ; <<4 x float>> [#uses=1]
- %add172.i = fadd <4 x float> %mul171.i, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 > ; <<4 x float>> [#uses=1]
- %bitcast176.i = bitcast <4 x float> %add172.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andnps178.i = and <4 x i32> %bitcast176.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %bitcast179.i = bitcast <4 x i32> %andnps178.i to <4 x float> ; <<4 x float>> [#uses=1]
- %mul186.i = fmul <4 x float> %bitcast179.i, zeroinitializer ; <<4 x float>> [#uses=1]
- %bitcast189.i = bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=0]
- %bitcast190.i = bitcast <4 x float> %mul186.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andnps192.i = and <4 x i32> %bitcast190.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %bitcast198.i = bitcast <4 x float> %cmple.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %xorps.i = xor <4 x i32> %bitcast198.i, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %orps203.i = or <4 x i32> %andnps192.i, %xorps.i ; <<4 x i32>> [#uses=1]
- %bitcast204.i = bitcast <4 x i32> %orps203.i to <4 x float> ; <<4 x float>> [#uses=1]
- %mul307 = fmul <4 x float> %bitcast204.i185, zeroinitializer ; <<4 x float>> [#uses=1]
- %mul310 = fmul <4 x float> %bitcast204.i104, zeroinitializer ; <<4 x float>> [#uses=2]
- %mul313 = fmul <4 x float> %bitcast204.i, zeroinitializer ; <<4 x float>> [#uses=1]
- %tmp82 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %mul307, <4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=1]
- %bitcast11.i15 = bitcast <4 x float> %tmp82 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andnps.i17 = and <4 x i32> %bitcast11.i15, zeroinitializer ; <<4 x i32>> [#uses=1]
- %orps.i18 = or <4 x i32> %andnps.i17, zeroinitializer ; <<4 x i32>> [#uses=1]
- %bitcast17.i19 = bitcast <4 x i32> %orps.i18 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp83 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %mul310, <4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=1]
- %bitcast.i3 = bitcast <4 x float> %mul310 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %bitcast6.i4 = bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=2]
- %andps.i5 = and <4 x i32> %bitcast.i3, %bitcast6.i4 ; <<4 x i32>> [#uses=1]
- %bitcast11.i6 = bitcast <4 x float> %tmp83 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %not.i7 = xor <4 x i32> %bitcast6.i4, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %andnps.i8 = and <4 x i32> %bitcast11.i6, %not.i7 ; <<4 x i32>> [#uses=1]
- %orps.i9 = or <4 x i32> %andnps.i8, %andps.i5 ; <<4 x i32>> [#uses=1]
- %bitcast17.i10 = bitcast <4 x i32> %orps.i9 to <4 x float> ; <<4 x float>> [#uses=1]
- %bitcast.i = bitcast <4 x float> %mul313 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andps.i = and <4 x i32> %bitcast.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %orps.i = or <4 x i32> zeroinitializer, %andps.i ; <<4 x i32>> [#uses=1]
- %bitcast17.i = bitcast <4 x i32> %orps.i to <4 x float> ; <<4 x float>> [#uses=1]
- call void null(<4 x float> %bitcast17.i19, <4 x float> %bitcast17.i10, <4 x float> %bitcast17.i, <4 x float> zeroinitializer, %struct.__ImageExecInfo* null, <4 x i32> zeroinitializer) nounwind
- unreachable
-
-afterfor: ; preds = %forcond
- ret void
-}
-
-declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
-
-declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
-
-declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
-
-declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
-
-declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-pcmpeqd-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-pcmpeqd-1.ll
deleted file mode 100644
index cc4198d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-pcmpeqd-1.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
-; RUN: grep pcmpeqd %t | count 1
-; RUN: grep xor %t | count 1
-; RUN: not grep LCP %t
-
-define <2 x double> @foo() nounwind {
- ret <2 x double> bitcast (<2 x i64><i64 -1, i64 -1> to <2 x double>)
-}
-define <2 x double> @bar() nounwind {
- ret <2 x double> bitcast (<2 x i64><i64 0, i64 0> to <2 x double>)
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll
deleted file mode 100644
index 49f8795..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll
+++ /dev/null
@@ -1,83 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah | not grep pcmpeqd
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep pcmpeqd | count 1
-
-; This testcase should need to spill the -1 value on x86-32,
-; so it shouldn't use pcmpeqd to materialize an all-ones vector; it
-; should use a constant-pool load instead.
-
- %struct.__ImageExecInfo = type <{ <4 x i32>, <4 x float>, <2 x i64>, i8*, i8*, i8*, i32, i32, i32, i32, i32 }>
- %struct._cl_image_format_t = type <{ i32, i32, i32 }>
- %struct._image2d_t = type <{ i8*, %struct._cl_image_format_t, i32, i32, i32, i32, i32, i32 }>
-
-define void @program_1(%struct._image2d_t* %dest, %struct._image2d_t* %t0, <4 x float> %p0, <4 x float> %p1, <4 x float> %p4, <4 x float> %p5, <4 x float> %p6) nounwind {
-entry:
- %tmp3.i = load i32* null ; <i32> [#uses=1]
- %cmp = icmp slt i32 0, %tmp3.i ; <i1> [#uses=1]
- br i1 %cmp, label %forcond, label %ifthen
-
-ifthen: ; preds = %entry
- ret void
-
-forcond: ; preds = %entry
- %tmp3.i536 = load i32* null ; <i32> [#uses=1]
- %cmp12 = icmp slt i32 0, %tmp3.i536 ; <i1> [#uses=1]
- br i1 %cmp12, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %bitcast204.i104 = bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp78 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> < float 1.280000e+02, float 1.280000e+02, float 1.280000e+02, float 1.280000e+02 >, <4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=2]
- %tmp79 = call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %tmp78) nounwind ; <<4 x i32>> [#uses=1]
- %tmp80 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %tmp79) nounwind ; <<4 x float>> [#uses=1]
- %sub140.i = fsub <4 x float> %tmp78, %tmp80 ; <<4 x float>> [#uses=2]
- %mul166.i = fmul <4 x float> zeroinitializer, %sub140.i ; <<4 x float>> [#uses=1]
- %add167.i = fadd <4 x float> %mul166.i, < float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000, float 0x3FE62ACB60000000 > ; <<4 x float>> [#uses=1]
- %mul171.i = fmul <4 x float> %add167.i, %sub140.i ; <<4 x float>> [#uses=1]
- %add172.i = fadd <4 x float> %mul171.i, < float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000, float 0x3FF0000A40000000 > ; <<4 x float>> [#uses=1]
- %bitcast176.i = bitcast <4 x float> %add172.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andnps178.i = and <4 x i32> %bitcast176.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %bitcast179.i = bitcast <4 x i32> %andnps178.i to <4 x float> ; <<4 x float>> [#uses=1]
- %mul186.i = fmul <4 x float> %bitcast179.i, zeroinitializer ; <<4 x float>> [#uses=1]
- %bitcast190.i = bitcast <4 x float> %mul186.i to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andnps192.i = and <4 x i32> %bitcast190.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %xorps.i = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %orps203.i = or <4 x i32> %andnps192.i, %xorps.i ; <<4 x i32>> [#uses=1]
- %bitcast204.i = bitcast <4 x i32> %orps203.i to <4 x float> ; <<4 x float>> [#uses=1]
- %mul310 = fmul <4 x float> %bitcast204.i104, zeroinitializer ; <<4 x float>> [#uses=2]
- %mul313 = fmul <4 x float> %bitcast204.i, zeroinitializer ; <<4 x float>> [#uses=1]
- %cmpunord.i11 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> zeroinitializer, <4 x float> zeroinitializer, i8 3) nounwind ; <<4 x float>> [#uses=1]
- %bitcast6.i13 = bitcast <4 x float> %cmpunord.i11 to <4 x i32> ; <<4 x i32>> [#uses=2]
- %andps.i14 = and <4 x i32> zeroinitializer, %bitcast6.i13 ; <<4 x i32>> [#uses=1]
- %not.i16 = xor <4 x i32> %bitcast6.i13, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %andnps.i17 = and <4 x i32> zeroinitializer, %not.i16 ; <<4 x i32>> [#uses=1]
- %orps.i18 = or <4 x i32> %andnps.i17, %andps.i14 ; <<4 x i32>> [#uses=1]
- %bitcast17.i19 = bitcast <4 x i32> %orps.i18 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp83 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %mul310, <4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=1]
- %bitcast.i3 = bitcast <4 x float> %mul310 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %andps.i5 = and <4 x i32> %bitcast.i3, zeroinitializer ; <<4 x i32>> [#uses=1]
- %bitcast11.i6 = bitcast <4 x float> %tmp83 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %not.i7 = xor <4 x i32> zeroinitializer, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %andnps.i8 = and <4 x i32> %bitcast11.i6, %not.i7 ; <<4 x i32>> [#uses=1]
- %orps.i9 = or <4 x i32> %andnps.i8, %andps.i5 ; <<4 x i32>> [#uses=1]
- %bitcast17.i10 = bitcast <4 x i32> %orps.i9 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp84 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %mul313, <4 x float> zeroinitializer) nounwind ; <<4 x float>> [#uses=1]
- %bitcast6.i = bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=2]
- %andps.i = and <4 x i32> zeroinitializer, %bitcast6.i ; <<4 x i32>> [#uses=1]
- %bitcast11.i = bitcast <4 x float> %tmp84 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %not.i = xor <4 x i32> %bitcast6.i, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %andnps.i = and <4 x i32> %bitcast11.i, %not.i ; <<4 x i32>> [#uses=1]
- %orps.i = or <4 x i32> %andnps.i, %andps.i ; <<4 x i32>> [#uses=1]
- %bitcast17.i = bitcast <4 x i32> %orps.i to <4 x float> ; <<4 x float>> [#uses=1]
- call void null(<4 x float> %bitcast17.i19, <4 x float> %bitcast17.i10, <4 x float> %bitcast17.i, <4 x float> zeroinitializer, %struct.__ImageExecInfo* null, <4 x i32> zeroinitializer) nounwind
- unreachable
-
-afterfor: ; preds = %forcond
- ret void
-}
-
-declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
-
-declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
-
-declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>) nounwind readnone
-
-declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fold-sext-trunc.ll b/libclamav/c++/llvm/test/CodeGen/X86/fold-sext-trunc.ll
deleted file mode 100644
index 2605123..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fold-sext-trunc.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movslq | count 1
-; PR4050
-
- type { i64 } ; type %0
- %struct.S1 = type { i16, i32 }
- at g_10 = external global %struct.S1 ; <%struct.S1*> [#uses=2]
-
-declare void @func_28(i64, i64)
-
-define void @int322(i32 %foo) nounwind {
-entry:
- %val = load i64* getelementptr (%0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0) ; <i64> [#uses=1]
- %0 = load i32* getelementptr (%struct.S1* @g_10, i32 0, i32 1), align 4 ; <i32> [#uses=1]
- %1 = sext i32 %0 to i64 ; <i64> [#uses=1]
- %tmp4.i = lshr i64 %val, 32 ; <i64> [#uses=1]
- %tmp5.i = trunc i64 %tmp4.i to i32 ; <i32> [#uses=1]
- %2 = sext i32 %tmp5.i to i64 ; <i64> [#uses=1]
- tail call void @func_28(i64 %2, i64 %1) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-immediate-shorten.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-immediate-shorten.ll
deleted file mode 100644
index cafc61a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-immediate-shorten.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-;; Test that this FP immediate is stored in the constant pool as a float.
-
-; RUN: llc < %s -march=x86 -mattr=-sse2,-sse3 | \
-; RUN: grep {.long.1123418112}
-
-define double @D() {
- ret double 1.230000e+02
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-in-intregs.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-in-intregs.ll
deleted file mode 100644
index 5d895a4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-in-intregs.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | not grep -E {\(\(xor\|and\)ps\|movd\)}
-
-; These operations should be done in integer registers, eliminating constant
-; pool loads, movd's etc.
-
-define i32 @test1(float %x) nounwind {
-entry:
- %tmp2 = fsub float -0.000000e+00, %x ; <float> [#uses=1]
- %tmp210 = bitcast float %tmp2 to i32 ; <i32> [#uses=1]
- ret i32 %tmp210
-}
-
-define i32 @test2(float %x) nounwind {
-entry:
- %tmp2 = tail call float @copysignf( float 1.000000e+00, float %x ) nounwind readnone ; <float> [#uses=1]
- %tmp210 = bitcast float %tmp2 to i32 ; <i32> [#uses=1]
- ret i32 %tmp210
-}
-
-declare float @copysignf(float, float) nounwind readnone
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-2results.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-2results.ll
deleted file mode 100644
index 321e267..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-2results.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-; RUN: llc < %s -march=x86 | grep fldz
-; RUN: llc < %s -march=x86-64 | grep fld1
-
-; This is basically this code on x86-64:
-; _Complex long double test() { return 1.0; }
-define {x86_fp80, x86_fp80} @test() {
- %A = fpext double 1.0 to x86_fp80
- %B = fpext double 0.0 to x86_fp80
- ret x86_fp80 %A, x86_fp80 %B
-}
-
-
-;_test2:
-; fld1
-; fld %st(0)
-; ret
-define {x86_fp80, x86_fp80} @test2() {
- %A = fpext double 1.0 to x86_fp80
- ret x86_fp80 %A, x86_fp80 %A
-}
-
-; Uses both values.
-define void @call1(x86_fp80 *%P1, x86_fp80 *%P2) {
- %a = call {x86_fp80,x86_fp80} @test()
- %b = getresult {x86_fp80,x86_fp80} %a, 0
- store x86_fp80 %b, x86_fp80* %P1
-
- %c = getresult {x86_fp80,x86_fp80} %a, 1
- store x86_fp80 %c, x86_fp80* %P2
- ret void
-}
-
-; Uses both values, requires fxch
-define void @call2(x86_fp80 *%P1, x86_fp80 *%P2) {
- %a = call {x86_fp80,x86_fp80} @test()
- %b = getresult {x86_fp80,x86_fp80} %a, 1
- store x86_fp80 %b, x86_fp80* %P1
-
- %c = getresult {x86_fp80,x86_fp80} %a, 0
- store x86_fp80 %c, x86_fp80* %P2
- ret void
-}
-
-; Uses ST(0), ST(1) is dead but must be popped.
-define void @call3(x86_fp80 *%P1, x86_fp80 *%P2) {
- %a = call {x86_fp80,x86_fp80} @test()
- %b = getresult {x86_fp80,x86_fp80} %a, 0
- store x86_fp80 %b, x86_fp80* %P1
- ret void
-}
-
-; Uses ST(1), ST(0) is dead and must be popped.
-define void @call4(x86_fp80 *%P1, x86_fp80 *%P2) {
- %a = call {x86_fp80,x86_fp80} @test()
-
- %c = getresult {x86_fp80,x86_fp80} %a, 1
- store x86_fp80 %c, x86_fp80* %P2
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-O0-crash.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-O0-crash.ll
deleted file mode 100644
index 4768ea2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-O0-crash.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc %s -O0 -fast-isel -regalloc=local -o -
-; PR4767
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin10"
-
-define void @fn(x86_fp80 %x) nounwind ssp {
-entry:
- %x.addr = alloca x86_fp80 ; <x86_fp80*> [#uses=5]
- store x86_fp80 %x, x86_fp80* %x.addr
- br i1 false, label %cond.true, label %cond.false
-
-cond.true: ; preds = %entry
- %tmp = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
- %tmp1 = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
- %cmp = fcmp oeq x86_fp80 %tmp, %tmp1 ; <i1> [#uses=1]
- br i1 %cmp, label %if.then, label %if.end
-
-cond.false: ; preds = %entry
- %tmp2 = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
- %tmp3 = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
- %cmp4 = fcmp une x86_fp80 %tmp2, %tmp3 ; <i1> [#uses=1]
- br i1 %cmp4, label %if.then, label %if.end
-
-if.then: ; preds = %cond.false, %cond.true
- br label %if.end
-
-if.end: ; preds = %if.then, %cond.false, %cond.true
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-compare.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-compare.ll
deleted file mode 100644
index 4bdf459..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-compare.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=i386 | \
-; RUN: grep {fucomi.*st.\[12\]}
-; PR1012
-
-define float @foo(float* %col.2.0) {
- %tmp = load float* %col.2.0 ; <float> [#uses=3]
- %tmp16 = fcmp olt float %tmp, 0.000000e+00 ; <i1> [#uses=1]
- %tmp20 = fsub float -0.000000e+00, %tmp ; <float> [#uses=1]
- %iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp ; <float> [#uses=1]
- ret float %iftmp.2.0
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-direct-ret.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-direct-ret.ll
deleted file mode 100644
index 5a28bb5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-direct-ret.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep fstp
-; RUN: llc < %s -march=x86 -mcpu=yonah | not grep movsd
-
-declare double @foo()
-
-define double @bar() {
-entry:
- %tmp5 = tail call double @foo()
- ret double %tmp5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-ret-conv.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-ret-conv.ll
deleted file mode 100644
index f220b24..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-ret-conv.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mcpu=yonah | grep cvtss2sd
-; RUN: llc < %s -mcpu=yonah | grep fstps
-; RUN: llc < %s -mcpu=yonah | not grep cvtsd2ss
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-define void @test(double *%b) {
-entry:
- %tmp13 = tail call double @foo()
- %tmp1314 = fptrunc double %tmp13 to float ; <float> [#uses=1]
- %tmp3940 = fpext float %tmp1314 to double ; <double> [#uses=1]
- volatile store double %tmp3940, double* %b
- ret void
-}
-
-declare double @foo()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-ret-store.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-ret-store.ll
deleted file mode 100644
index 05dfc54..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-ret-store.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -mcpu=yonah | not grep movss
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
-
-; This should store directly into P from the FP stack. It should not
-; go through a stack slot to get there.
-
-define void @bar(double* %P) {
-entry:
- %tmp = tail call double (...)* @foo( ) ; <double> [#uses=1]
- store double %tmp, double* %P, align 8
- ret void
-}
-
-declare double @foo(...)
-
-define void @bar2(float* %P) {
-entry:
- %tmp = tail call double (...)* @foo2( ) ; <double> [#uses=1]
- %tmp1 = fptrunc double %tmp to float ; <float> [#uses=1]
- store float %tmp1, float* %P, align 4
- ret void
-}
-
-declare double @foo2(...)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-ret.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-ret.ll
deleted file mode 100644
index c83a0cb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-ret.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin8 -mcpu=yonah -march=x86 > %t
-; RUN: grep fldl %t | count 1
-; RUN: not grep xmm %t
-; RUN: grep {sub.*esp} %t | count 1
-
-; These testcases shouldn't require loading into an XMM register then storing
-; to memory, then reloading into an FPStack reg.
-
-define double @test1(double *%P) {
- %A = load double* %P
- ret double %A
-}
-
-; fastcc should return a value
-define fastcc double @test2(<2 x double> %A) {
- %B = extractelement <2 x double> %A, i32 0
- ret double %B
-}
-
-define fastcc double @test3(<4 x float> %A) {
- %B = bitcast <4 x float> %A to <2 x double>
- %C = call fastcc double @test2(<2 x double> %B)
- ret double %C
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-retcopy.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-retcopy.ll
deleted file mode 100644
index 67dcb18..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-retcopy.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; This should not copy the result of foo into an xmm register.
-; RUN: llc < %s -march=x86 -mcpu=yonah -mtriple=i686-apple-darwin9 | not grep xmm
-; rdar://5689903
-
-declare double @foo()
-
-define double @carg({ double, double }* byval %z) nounwind {
-entry:
- %tmp5 = tail call double @foo() nounwind ; <double> [#uses=1]
- ret double %tmp5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-set-st1.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-set-st1.ll
deleted file mode 100644
index 894897a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp-stack-set-st1.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 | grep fxch | count 2
-
-define i32 @main() nounwind {
-entry:
- %asmtmp = tail call { double, double } asm sideeffect "fmul\09%st(1),%st\0A\09fst\09%st(1)\0A\09frndint\0A\09fxch %st(1)\0A\09fsub\09%st(1),%st\0A\09f2xm1\0A\09", "={st},={st(1)},0,1,~{dirflag},~{fpsr},~{flags}"(double 0x4030FEFBD582097D, double 4.620000e+01) nounwind ; <{ double, double }> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp2sint.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp2sint.ll
deleted file mode 100644
index 1675444..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp2sint.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-;; LowerFP_TO_SINT should not create a stack object if it's not needed.
-
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep add
-
-define i32 @main(i32 %argc, i8** %argv) {
-cond_false.i.i.i: ; preds = %bb.i5
- %tmp35.i = load double* null, align 8 ; <double> [#uses=1]
- %tmp3536.i = fptosi double %tmp35.i to i32 ; <i32> [#uses=1]
- %tmp3536140.i = zext i32 %tmp3536.i to i64 ; <i64> [#uses=1]
- %tmp39.i = load double* null, align 4 ; <double> [#uses=1]
- %tmp3940.i = fptosi double %tmp39.i to i32 ; <i32> [#uses=1]
- %tmp3940137.i = zext i32 %tmp3940.i to i64 ; <i64> [#uses=1]
- %tmp3940137138.i = shl i64 %tmp3940137.i, 32 ; <i64> [#uses=1]
- %tmp3940137138.ins.i = or i64 %tmp3940137138.i, %tmp3536140.i ; <i64> [#uses=1]
- %tmp95.i.i = trunc i64 %tmp3940137138.ins.i to i32 ; <i32> [#uses=1]
- store i32 %tmp95.i.i, i32* null, align 4
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp_constant_op.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp_constant_op.ll
deleted file mode 100644
index b3ec538..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp_constant_op.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel -mcpu=i486 | FileCheck %s
-; Test that the load of the constant is folded into the operation.
-
-
-define double @foo_add(double %P) {
- %tmp.1 = fadd double %P, 1.230000e+02 ; <double> [#uses=1]
- ret double %tmp.1
-}
-; CHECK: foo_add:
-; CHECK: fadd DWORD PTR
-
-define double @foo_mul(double %P) {
- %tmp.1 = fmul double %P, 1.230000e+02 ; <double> [#uses=1]
- ret double %tmp.1
-}
-; CHECK: foo_mul:
-; CHECK: fmul DWORD PTR
-
-define double @foo_sub(double %P) {
- %tmp.1 = fsub double %P, 1.230000e+02 ; <double> [#uses=1]
- ret double %tmp.1
-}
-; CHECK: foo_sub:
-; CHECK: fadd DWORD PTR
-
-define double @foo_subr(double %P) {
- %tmp.1 = fsub double 1.230000e+02, %P ; <double> [#uses=1]
- ret double %tmp.1
-}
-; CHECK: foo_subr:
-; CHECK: fsub QWORD PTR
-
-define double @foo_div(double %P) {
- %tmp.1 = fdiv double %P, 1.230000e+02 ; <double> [#uses=1]
- ret double %tmp.1
-}
-; CHECK: foo_div:
-; CHECK: fdiv DWORD PTR
-
-define double @foo_divr(double %P) {
- %tmp.1 = fdiv double 1.230000e+02, %P ; <double> [#uses=1]
- ret double %tmp.1
-}
-; CHECK: foo_divr:
-; CHECK: fdiv QWORD PTR
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp_load_cast_fold.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp_load_cast_fold.ll
deleted file mode 100644
index a160ac6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp_load_cast_fold.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 | grep fild | not grep ESP
-
-define double @short(i16* %P) {
- %V = load i16* %P ; <i16> [#uses=1]
- %V2 = sitofp i16 %V to double ; <double> [#uses=1]
- ret double %V2
-}
-
-define double @int(i32* %P) {
- %V = load i32* %P ; <i32> [#uses=1]
- %V2 = sitofp i32 %V to double ; <double> [#uses=1]
- ret double %V2
-}
-
-define double @long(i64* %P) {
- %V = load i64* %P ; <i64> [#uses=1]
- %V2 = sitofp i64 %V to double ; <double> [#uses=1]
- ret double %V2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fp_load_fold.ll b/libclamav/c++/llvm/test/CodeGen/X86/fp_load_fold.ll
deleted file mode 100644
index 0145069..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fp_load_fold.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep -i ST | not grep {fadd\\|fsub\\|fdiv\\|fmul}
-
-; Test that the load of the memory location is folded into the operation.
-
-define double @test_add(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = fadd double %X, %Y ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_mul(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = fmul double %X, %Y ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_sub(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = fsub double %X, %Y ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_subr(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = fsub double %Y, %X ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_div(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = fdiv double %X, %Y ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_divr(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = fdiv double %Y, %X ; <double> [#uses=1]
- ret double %R
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/fsxor-alignment.ll b/libclamav/c++/llvm/test/CodeGen/X86/fsxor-alignment.ll
deleted file mode 100644
index 6a8dbcf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/fsxor-alignment.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -enable-unsafe-fp-math | \
-; RUN: grep -v sp | grep xorps | count 2
-
-; Don't fold the incoming stack arguments into the xorps instructions used
-; to do floating-point negations, because the arguments aren't vectors
-; and aren't vector-aligned.
-
-define void @foo(float* %p, float* %q, float %s, float %y) {
- %ss = fsub float -0.0, %s
- %yy = fsub float -0.0, %y
- store float %ss, float* %p
- store float %yy, float* %q
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/full-lsr.ll b/libclamav/c++/llvm/test/CodeGen/X86/full-lsr.ll
deleted file mode 100644
index ff9b1b0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/full-lsr.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=x86 >%t
-
-; RUN: grep {addl \\\$4,} %t | count 3
-; RUN: not grep {,%} %t
-
-define void @foo(float* nocapture %A, float* nocapture %B, float* nocapture %C, i32 %N) nounwind {
-entry:
- %0 = icmp sgt i32 %N, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb, label %return
-
-bb: ; preds = %bb, %entry
- %i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=5]
- %1 = getelementptr float* %A, i32 %i.03 ; <float*> [#uses=1]
- %2 = load float* %1, align 4 ; <float> [#uses=1]
- %3 = getelementptr float* %B, i32 %i.03 ; <float*> [#uses=1]
- %4 = load float* %3, align 4 ; <float> [#uses=1]
- %5 = fadd float %2, %4 ; <float> [#uses=1]
- %6 = getelementptr float* %C, i32 %i.03 ; <float*> [#uses=1]
- store float %5, float* %6, align 4
- %7 = add i32 %i.03, 10 ; <i32> [#uses=3]
- %8 = getelementptr float* %A, i32 %7 ; <float*> [#uses=1]
- %9 = load float* %8, align 4 ; <float> [#uses=1]
- %10 = getelementptr float* %B, i32 %7 ; <float*> [#uses=1]
- %11 = load float* %10, align 4 ; <float> [#uses=1]
- %12 = fadd float %9, %11 ; <float> [#uses=1]
- %13 = getelementptr float* %C, i32 %7 ; <float*> [#uses=1]
- store float %12, float* %13, align 4
- %indvar.next = add i32 %i.03, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ga-offset.ll b/libclamav/c++/llvm/test/CodeGen/X86/ga-offset.ll
deleted file mode 100644
index 9f6d3f7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ga-offset.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: not grep lea %t
-; RUN: not grep add %t
-; RUN: grep mov %t | count 1
-; RUN: llc < %s -mtriple=x86_64-linux -relocation-model=static > %t
-; RUN: not grep lea %t
-; RUN: not grep add %t
-; RUN: grep mov %t | count 1
-
-; This store should fold to a single mov instruction.
-
- at ptr = global i32* null
- at dst = global [131072 x i32] zeroinitializer
-
-define void @foo() nounwind {
- store i32* getelementptr ([131072 x i32]* @dst, i32 0, i32 16), i32** @ptr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ghc-cc.ll b/libclamav/c++/llvm/test/CodeGen/X86/ghc-cc.ll
deleted file mode 100644
index 9393cf5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ghc-cc.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; RUN: llc < %s -tailcallopt -mtriple=i686-linux-gnu | FileCheck %s
-
-; Test the GHC call convention works (x86-32)
-
- at base = external global i32 ; assigned to register: EBX
- at sp = external global i32 ; assigned to register: EBP
- at hp = external global i32 ; assigned to register: EDI
- at r1 = external global i32 ; assigned to register: ESI
-
-define void @zap(i32 %a, i32 %b) nounwind {
-entry:
- ; CHECK: movl {{[0-9]*}}(%esp), %ebx
- ; CHECK-NEXT: movl {{[0-9]*}}(%esp), %ebp
- ; CHECK-NEXT: call addtwo
- %0 = call cc 10 i32 @addtwo(i32 %a, i32 %b)
- ; CHECK: call foo
- call void @foo() nounwind
- ret void
-}
-
-define cc 10 i32 @addtwo(i32 %x, i32 %y) nounwind {
-entry:
- ; CHECK: leal (%ebx,%ebp), %eax
- %0 = add i32 %x, %y
- ; CHECK-NEXT: ret
- ret i32 %0
-}
-
-define cc 10 void @foo() nounwind {
-entry:
- ; CHECK: movl base, %ebx
- ; CHECK-NEXT: movl sp, %ebp
- ; CHECK-NEXT: movl hp, %edi
- ; CHECK-NEXT: movl r1, %esi
- %0 = load i32* @r1
- %1 = load i32* @hp
- %2 = load i32* @sp
- %3 = load i32* @base
- ; CHECK: jmp bar
- tail call cc 10 void @bar( i32 %3, i32 %2, i32 %1, i32 %0 ) nounwind
- ret void
-}
-
-declare cc 10 void @bar(i32, i32, i32, i32)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ghc-cc64.ll b/libclamav/c++/llvm/test/CodeGen/X86/ghc-cc64.ll
deleted file mode 100644
index fcf7e17..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ghc-cc64.ll
+++ /dev/null
@@ -1,86 +0,0 @@
-; RUN: llc < %s -tailcallopt -mtriple=x86_64-linux-gnu | FileCheck %s
-
-; Check the GHC call convention works (x86-64)
-
- at base = external global i64 ; assigned to register: R13
- at sp = external global i64 ; assigned to register: RBP
- at hp = external global i64 ; assigned to register: R12
- at r1 = external global i64 ; assigned to register: RBX
- at r2 = external global i64 ; assigned to register: R14
- at r3 = external global i64 ; assigned to register: RSI
- at r4 = external global i64 ; assigned to register: RDI
- at r5 = external global i64 ; assigned to register: R8
- at r6 = external global i64 ; assigned to register: R9
- at splim = external global i64 ; assigned to register: R15
-
- at f1 = external global float ; assigned to register: XMM1
- at f2 = external global float ; assigned to register: XMM2
- at f3 = external global float ; assigned to register: XMM3
- at f4 = external global float ; assigned to register: XMM4
- at d1 = external global double ; assigned to register: XMM5
- at d2 = external global double ; assigned to register: XMM6
-
-define void @zap(i64 %a, i64 %b) nounwind {
-entry:
- ; CHECK: movq %rdi, %r13
- ; CHECK-NEXT: movq %rsi, %rbp
- ; CHECK-NEXT: callq addtwo
- %0 = call cc 10 i64 @addtwo(i64 %a, i64 %b)
- ; CHECK: callq foo
- call void @foo() nounwind
- ret void
-}
-
-define cc 10 i64 @addtwo(i64 %x, i64 %y) nounwind {
-entry:
- ; CHECK: leaq (%r13,%rbp), %rax
- %0 = add i64 %x, %y
- ; CHECK-NEXT: ret
- ret i64 %0
-}
-
-define cc 10 void @foo() nounwind {
-entry:
- ; CHECK: movq base(%rip), %r13
- ; CHECK-NEXT: movq sp(%rip), %rbp
- ; CHECK-NEXT: movq hp(%rip), %r12
- ; CHECK-NEXT: movq r1(%rip), %rbx
- ; CHECK-NEXT: movq r2(%rip), %r14
- ; CHECK-NEXT: movq r3(%rip), %rsi
- ; CHECK-NEXT: movq r4(%rip), %rdi
- ; CHECK-NEXT: movq r5(%rip), %r8
- ; CHECK-NEXT: movq r6(%rip), %r9
- ; CHECK-NEXT: movq splim(%rip), %r15
- ; CHECK-NEXT: movss f1(%rip), %xmm1
- ; CHECK-NEXT: movss f2(%rip), %xmm2
- ; CHECK-NEXT: movss f3(%rip), %xmm3
- ; CHECK-NEXT: movss f4(%rip), %xmm4
- ; CHECK-NEXT: movsd d1(%rip), %xmm5
- ; CHECK-NEXT: movsd d2(%rip), %xmm6
- %0 = load double* @d2
- %1 = load double* @d1
- %2 = load float* @f4
- %3 = load float* @f3
- %4 = load float* @f2
- %5 = load float* @f1
- %6 = load i64* @splim
- %7 = load i64* @r6
- %8 = load i64* @r5
- %9 = load i64* @r4
- %10 = load i64* @r3
- %11 = load i64* @r2
- %12 = load i64* @r1
- %13 = load i64* @hp
- %14 = load i64* @sp
- %15 = load i64* @base
- ; CHECK: jmp bar
- tail call cc 10 void @bar( i64 %15, i64 %14, i64 %13, i64 %12, i64 %11,
- i64 %10, i64 %9, i64 %8, i64 %7, i64 %6,
- float %5, float %4, float %3, float %2, double %1,
- double %0 ) nounwind
- ret void
-}
-
-declare cc 10 void @bar(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64,
- float, float, float, float, double, double)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/global-sections-tls.ll b/libclamav/c++/llvm/test/CodeGen/X86/global-sections-tls.ll
deleted file mode 100644
index 2c23030..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/global-sections-tls.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mtriple=i386-unknown-linux-gnu | FileCheck %s -check-prefix=LINUX
-
-; PR4639
- at G1 = internal thread_local global i32 0 ; <i32*> [#uses=1]
-; LINUX: .section .tbss,"awT", at nobits
-; LINUX: G1:
-
-
-define i32* @foo() nounwind readnone {
-entry:
- ret i32* @G1
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/global-sections.ll b/libclamav/c++/llvm/test/CodeGen/X86/global-sections.ll
deleted file mode 100644
index d79c56b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/global-sections.ll
+++ /dev/null
@@ -1,136 +0,0 @@
-; RUN: llc < %s -mtriple=i386-unknown-linux-gnu | FileCheck %s -check-prefix=LINUX
-; RUN: llc < %s -mtriple=i386-apple-darwin9.7 | FileCheck %s -check-prefix=DARWIN
-
-
-; int G1;
- at G1 = common global i32 0
-
-; LINUX: .type G1, at object
-; LINUX: .comm G1,4,4
-
-; DARWIN: .comm _G1,4,2
-
-
-
-
-; const int G2 __attribute__((weak)) = 42;
- at G2 = weak_odr constant i32 42
-
-
-; TODO: linux drops this into .rodata, we drop it into ".gnu.linkonce.r.G2"
-
-; DARWIN: .section __TEXT,__const_coal,coalesced
-; DARWIN: _G2:
-; DARWIN: .long 42
-
-
-; int * const G3 = &G1;
- at G3 = constant i32* @G1
-
-; DARWIN: .section __DATA,__const
-; DARWIN: .globl _G3
-; DARWIN: _G3:
-; DARWIN: .long _G1
-
-
-; _Complex long long const G4 = 34;
- at G4 = constant {i64,i64} { i64 34, i64 0 }
-
-; DARWIN: .section __TEXT,__const
-; DARWIN: _G4:
-; DARWIN: .long 34
-
-
-; int G5 = 47;
- at G5 = global i32 47
-
-; LINUX: .data
-; LINUX: .globl G5
-; LINUX: G5:
-; LINUX: .long 47
-
-; DARWIN: .section __DATA,__data
-; DARWIN: .globl _G5
-; DARWIN: _G5:
-; DARWIN: .long 47
-
-
-; PR4584
-@"foo bar" = linkonce global i32 42
-
-; LINUX: .type foo_20_bar, at object
-; LINUX:.section .gnu.linkonce.d.foo_20_bar,"aw", at progbits
-; LINUX: .weak foo_20_bar
-; LINUX: foo_20_bar:
-
-; DARWIN: .section __DATA,__datacoal_nt,coalesced
-; DARWIN: .globl "_foo bar"
-; DARWIN: .weak_definition "_foo bar"
-; DARWIN: "_foo bar":
-
-; PR4650
- at G6 = weak_odr constant [1 x i8] c"\01"
-
-; LINUX: .type G6, at object
-; LINUX: .section .gnu.linkonce.r.G6,"a", at progbits
-; LINUX: .weak G6
-; LINUX: G6:
-; LINUX: .byte 1
-; LINUX: .size G6, 1
-
-; DARWIN: .section __TEXT,__const_coal,coalesced
-; DARWIN: .globl _G6
-; DARWIN: .weak_definition _G6
-; DARWIN:_G6:
-; DARWIN: .byte 1
-
-
- at G7 = constant [10 x i8] c"abcdefghi\00"
-
-; DARWIN: __TEXT,__cstring,cstring_literals
-; DARWIN: .globl _G7
-; DARWIN: _G7:
-; DARWIN: .asciz "abcdefghi"
-
-; LINUX: .section .rodata.str1.1,"aMS", at progbits,1
-; LINUX: .globl G7
-; LINUX: G7:
-; LINUX: .asciz "abcdefghi"
-
-
- at G8 = constant [4 x i16] [ i16 1, i16 2, i16 3, i16 0 ]
-
-; DARWIN: .section __TEXT,__const
-; DARWIN: .globl _G8
-; DARWIN: _G8:
-
-; LINUX: .section .rodata.str2.2,"aMS", at progbits,2
-; LINUX: .globl G8
-; LINUX:G8:
-
- at G9 = constant [4 x i32] [ i32 1, i32 2, i32 3, i32 0 ]
-
-; DARWIN: .globl _G9
-; DARWIN: _G9:
-
-; LINUX: .section .rodata.str4.4,"aMS", at progbits,4
-; LINUX: .globl G9
-; LINUX:G9
-
-
- at G10 = weak global [100 x i32] zeroinitializer, align 32 ; <[100 x i32]*> [#uses=0]
-
-
-; DARWIN: .section __DATA,__datacoal_nt,coalesced
-; DARWIN: .globl _G10
-; DARWIN: .weak_definition _G10
-; DARWIN: .align 5
-; DARWIN: _G10:
-; DARWIN: .space 400
-
-; LINUX: .bss
-; LINUX: .weak G10
-; LINUX: .align 32
-; LINUX: G10:
-; LINUX: .zero 400
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/h-register-addressing-32.ll b/libclamav/c++/llvm/test/CodeGen/X86/h-register-addressing-32.ll
deleted file mode 100644
index 76ffd66..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/h-register-addressing-32.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {movzbl %\[abcd\]h,} | count 7
-
-; Use h-register extract and zero-extend.
-
-define double @foo8(double* nocapture inreg %p, i32 inreg %x) nounwind readonly {
- %t0 = lshr i32 %x, 8
- %t1 = and i32 %t0, 255
- %t2 = getelementptr double* %p, i32 %t1
- %t3 = load double* %t2, align 8
- ret double %t3
-}
-define float @foo4(float* nocapture inreg %p, i32 inreg %x) nounwind readonly {
- %t0 = lshr i32 %x, 8
- %t1 = and i32 %t0, 255
- %t2 = getelementptr float* %p, i32 %t1
- %t3 = load float* %t2, align 8
- ret float %t3
-}
-define i16 @foo2(i16* nocapture inreg %p, i32 inreg %x) nounwind readonly {
- %t0 = lshr i32 %x, 8
- %t1 = and i32 %t0, 255
- %t2 = getelementptr i16* %p, i32 %t1
- %t3 = load i16* %t2, align 8
- ret i16 %t3
-}
-define i8 @foo1(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
- %t0 = lshr i32 %x, 8
- %t1 = and i32 %t0, 255
- %t2 = getelementptr i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
- ret i8 %t3
-}
-define i8 @bar8(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
- %t0 = lshr i32 %x, 5
- %t1 = and i32 %t0, 2040
- %t2 = getelementptr i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
- ret i8 %t3
-}
-define i8 @bar4(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
- %t0 = lshr i32 %x, 6
- %t1 = and i32 %t0, 1020
- %t2 = getelementptr i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
- ret i8 %t3
-}
-define i8 @bar2(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
- %t0 = lshr i32 %x, 7
- %t1 = and i32 %t0, 510
- %t2 = getelementptr i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
- ret i8 %t3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/h-register-addressing-64.ll b/libclamav/c++/llvm/test/CodeGen/X86/h-register-addressing-64.ll
deleted file mode 100644
index 98817f3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/h-register-addressing-64.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {movzbl %\[abcd\]h,} | count 7
-
-; Use h-register extract and zero-extend.
-
-define double @foo8(double* nocapture inreg %p, i64 inreg %x) nounwind readonly {
- %t0 = lshr i64 %x, 8
- %t1 = and i64 %t0, 255
- %t2 = getelementptr double* %p, i64 %t1
- %t3 = load double* %t2, align 8
- ret double %t3
-}
-define float @foo4(float* nocapture inreg %p, i64 inreg %x) nounwind readonly {
- %t0 = lshr i64 %x, 8
- %t1 = and i64 %t0, 255
- %t2 = getelementptr float* %p, i64 %t1
- %t3 = load float* %t2, align 8
- ret float %t3
-}
-define i16 @foo2(i16* nocapture inreg %p, i64 inreg %x) nounwind readonly {
- %t0 = lshr i64 %x, 8
- %t1 = and i64 %t0, 255
- %t2 = getelementptr i16* %p, i64 %t1
- %t3 = load i16* %t2, align 8
- ret i16 %t3
-}
-define i8 @foo1(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
- %t0 = lshr i64 %x, 8
- %t1 = and i64 %t0, 255
- %t2 = getelementptr i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
- ret i8 %t3
-}
-define i8 @bar8(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
- %t0 = lshr i64 %x, 5
- %t1 = and i64 %t0, 2040
- %t2 = getelementptr i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
- ret i8 %t3
-}
-define i8 @bar4(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
- %t0 = lshr i64 %x, 6
- %t1 = and i64 %t0, 1020
- %t2 = getelementptr i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
- ret i8 %t3
-}
-define i8 @bar2(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
- %t0 = lshr i64 %x, 7
- %t1 = and i64 %t0, 510
- %t2 = getelementptr i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
- ret i8 %t3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/h-register-store.ll b/libclamav/c++/llvm/test/CodeGen/X86/h-register-store.ll
deleted file mode 100644
index d30e6b3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/h-register-store.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep mov %t | count 6
-; RUN: grep {movb %ah, (%rsi)} %t | count 3
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep mov %t | count 3
-; RUN: grep {movb %ah, (%e} %t | count 3
-
-; Use h-register extract and store.
-
-define void @foo16(i16 inreg %p, i8* inreg %z) nounwind {
- %q = lshr i16 %p, 8
- %t = trunc i16 %q to i8
- store i8 %t, i8* %z
- ret void
-}
-define void @foo32(i32 inreg %p, i8* inreg %z) nounwind {
- %q = lshr i32 %p, 8
- %t = trunc i32 %q to i8
- store i8 %t, i8* %z
- ret void
-}
-define void @foo64(i64 inreg %p, i8* inreg %z) nounwind {
- %q = lshr i64 %p, 8
- %t = trunc i64 %q to i8
- store i8 %t, i8* %z
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/h-registers-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/h-registers-0.ll
deleted file mode 100644
index 878fd93..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/h-registers-0.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {movzbl %\[abcd\]h,} | count 4
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep {incb %ah} %t | count 3
-; RUN: grep {movzbl %ah,} %t | count 3
-
-; Use h registers. On x86-64, codegen doesn't support general allocation
-; of h registers yet, due to x86 encoding complications.
-
-define void @bar64(i64 inreg %x, i8* inreg %p) nounwind {
- %t0 = lshr i64 %x, 8
- %t1 = trunc i64 %t0 to i8
- %t2 = add i8 %t1, 1
- store i8 %t2, i8* %p
- ret void
-}
-
-define void @bar32(i32 inreg %x, i8* inreg %p) nounwind {
- %t0 = lshr i32 %x, 8
- %t1 = trunc i32 %t0 to i8
- %t2 = add i8 %t1, 1
- store i8 %t2, i8* %p
- ret void
-}
-
-define void @bar16(i16 inreg %x, i8* inreg %p) nounwind {
- %t0 = lshr i16 %x, 8
- %t1 = trunc i16 %t0 to i8
- %t2 = add i8 %t1, 1
- store i8 %t2, i8* %p
- ret void
-}
-
-define i64 @qux64(i64 inreg %x) nounwind {
- %t0 = lshr i64 %x, 8
- %t1 = and i64 %t0, 255
- ret i64 %t1
-}
-
-define i32 @qux32(i32 inreg %x) nounwind {
- %t0 = lshr i32 %x, 8
- %t1 = and i32 %t0, 255
- ret i32 %t1
-}
-
-define i16 @qux16(i16 inreg %x) nounwind {
- %t0 = lshr i16 %x, 8
- ret i16 %t0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/h-registers-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/h-registers-1.ll
deleted file mode 100644
index e97ebab..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/h-registers-1.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep {movzbl %\[abcd\]h,} %t | count 8
-; RUN: grep {%\[abcd\]h} %t | not grep {%r\[\[:digit:\]\]*d}
-
-; LLVM creates virtual registers for values live across blocks
-; based on the type of the value. Make sure that the extracts
-; here use the GR64_NOREX register class for their result,
-; instead of plain GR64.
-
-define i64 @foo(i64 %a, i64 %b, i64 %c, i64 %d,
- i64 %e, i64 %f, i64 %g, i64 %h) {
- %sa = lshr i64 %a, 8
- %A = and i64 %sa, 255
- %sb = lshr i64 %b, 8
- %B = and i64 %sb, 255
- %sc = lshr i64 %c, 8
- %C = and i64 %sc, 255
- %sd = lshr i64 %d, 8
- %D = and i64 %sd, 255
- %se = lshr i64 %e, 8
- %E = and i64 %se, 255
- %sf = lshr i64 %f, 8
- %F = and i64 %sf, 255
- %sg = lshr i64 %g, 8
- %G = and i64 %sg, 255
- %sh = lshr i64 %h, 8
- %H = and i64 %sh, 255
- br label %next
-
-next:
- %u = add i64 %A, %B
- %v = add i64 %C, %D
- %w = add i64 %E, %F
- %x = add i64 %G, %H
- %y = add i64 %u, %v
- %z = add i64 %w, %x
- %t = add i64 %y, %z
- ret i64 %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/h-registers-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/h-registers-2.ll
deleted file mode 100644
index 16e13f8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/h-registers-2.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep {movzbl %\[abcd\]h,} %t | count 1
-; RUN: grep {shll \$3,} %t | count 1
-
-; Use an h register, but don't omit the explicit shift for
-; non-address use(s).
-
-define i32 @foo(i8* %x, i32 %y) nounwind {
- %t0 = lshr i32 %y, 8 ; <i32> [#uses=1]
- %t1 = and i32 %t0, 255 ; <i32> [#uses=2]
- %t2 = shl i32 %t1, 3
- %t3 = getelementptr i8* %x, i32 %t2 ; <i8*> [#uses=1]
- store i8 77, i8* %t3, align 4
- ret i32 %t2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/h-registers-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/h-registers-3.ll
deleted file mode 100644
index 8a0b07b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/h-registers-3.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 1
-; RUN: llc < %s -march=x86-64 | grep mov | count 1
-
-define zeroext i8 @foo() nounwind ssp {
-entry:
- %0 = tail call zeroext i16 (...)* @bar() nounwind
- %1 = lshr i16 %0, 8
- %2 = trunc i16 %1 to i8
- ret i8 %2
-}
-
-declare zeroext i16 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-2.ll
deleted file mode 100644
index 74554d1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-2.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 | grep mov | count 1
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 | not grep GOT
-
- at x = weak hidden global i32 0 ; <i32*> [#uses=1]
-
-define i32 @t() nounwind readonly {
-entry:
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-3.ll
deleted file mode 100644
index 4be881e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-3.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 | FileCheck %s -check-prefix=X64
-
- at x = external hidden global i32 ; <i32*> [#uses=1]
- at y = extern_weak hidden global i32 ; <i32*> [#uses=1]
-
-define i32 @t() nounwind readonly {
-entry:
-; X32: _t:
-; X32: movl _y, %eax
-
-; X64: _t:
-; X64: movl _y(%rip), %eax
-
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- %1 = load i32* @y, align 4 ; <i32> [#uses=1]
- %2 = add i32 %1, %0 ; <i32> [#uses=1]
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-4.ll
deleted file mode 100644
index a8aede5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-4.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 | FileCheck %s
-
- at x = common hidden global i32 0 ; <i32*> [#uses=1]
-
-define i32 @t() nounwind readonly {
-entry:
-; CHECK: t:
-; CHECK: movl _x, %eax
-; CHECK: .comm _x,4
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-5.ll
deleted file mode 100644
index 88fae37..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis-5.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 -relocation-model=pic -disable-fp-elim -unwind-tables | FileCheck %s
-; <rdar://problem/7383328>
-
- at .str = private constant [12 x i8] c"hello world\00", align 1 ; <[12 x i8]*> [#uses=1]
-
-define hidden void @func() nounwind ssp {
-entry:
- %0 = call i32 @puts(i8* getelementptr inbounds ([12 x i8]* @.str, i64 0, i64 0)) nounwind ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i32 @puts(i8*)
-
-define hidden i32 @main() nounwind ssp {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @func() nounwind
- br label %return
-
-return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval1
-}
-
-; CHECK: .private_extern _func.eh
-; CHECK: .private_extern _main.eh
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis.ll b/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis.ll
deleted file mode 100644
index a948bdf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/hidden-vis.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu | FileCheck %s -check-prefix=LINUX
-; RUN: llc < %s -mtriple=i686-apple-darwin8 | FileCheck %s -check-prefix=DARWIN
-
- at a = hidden global i32 0
- at b = external global i32
-
-define weak hidden void @t1() nounwind {
-; LINUX: .hidden t1
-; LINUX: t1:
-
-; DARWIN: .private_extern _t1
-; DARWIN: t1:
- ret void
-}
-
-define weak void @t2() nounwind {
-; LINUX: t2:
-; LINUX: .hidden a
-
-; DARWIN: t2:
-; DARWIN: .private_extern _a
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/i128-and-beyond.ll b/libclamav/c++/llvm/test/CodeGen/X86/i128-and-beyond.ll
deleted file mode 100644
index b741681..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/i128-and-beyond.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep -- -1 | count 14
-
-; These static initializers are too big to hand off to assemblers
-; as monolithic blobs.
-
- at x = global i128 -1
- at y = global i256 -1
- at z = global i512 -1
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/i128-immediate.ll b/libclamav/c++/llvm/test/CodeGen/X86/i128-immediate.ll
deleted file mode 100644
index c47569e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/i128-immediate.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movq | count 2
-
-define i128 @__addvti3() {
- ret i128 -1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/i128-mul.ll b/libclamav/c++/llvm/test/CodeGen/X86/i128-mul.ll
deleted file mode 100644
index e9d30d6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/i128-mul.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; PR1198
-
-define i64 @foo(i64 %x, i64 %y) {
- %tmp0 = zext i64 %x to i128
- %tmp1 = zext i64 %y to i128
- %tmp2 = mul i128 %tmp0, %tmp1
- %tmp7 = zext i32 64 to i128
- %tmp3 = lshr i128 %tmp2, %tmp7
- %tmp4 = trunc i128 %tmp3 to i64
- ret i64 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/i128-ret.ll b/libclamav/c++/llvm/test/CodeGen/X86/i128-ret.ll
deleted file mode 100644
index 277f428..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/i128-ret.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {movq 8(%rdi), %rdx}
-; RUN: llc < %s -march=x86-64 | grep {movq (%rdi), %rax}
-
-define i128 @test(i128 *%P) {
- %A = load i128* %P
- ret i128 %A
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/i256-add.ll b/libclamav/c++/llvm/test/CodeGen/X86/i256-add.ll
deleted file mode 100644
index 5a7a7a7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/i256-add.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep adcl %t | count 7
-; RUN: grep sbbl %t | count 7
-
-define void @add(i256* %p, i256* %q) nounwind {
- %a = load i256* %p
- %b = load i256* %q
- %c = add i256 %a, %b
- store i256 %c, i256* %p
- ret void
-}
-define void @sub(i256* %p, i256* %q) nounwind {
- %a = load i256* %p
- %b = load i256* %q
- %c = sub i256 %a, %b
- store i256 %c, i256* %p
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/i2k.ll b/libclamav/c++/llvm/test/CodeGen/X86/i2k.ll
deleted file mode 100644
index 6116c2e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/i2k.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define void @foo(i2011* %x, i2011* %y, i2011* %p) nounwind {
- %a = load i2011* %x
- %b = load i2011* %y
- %c = add i2011 %a, %b
- store i2011 %c, i2011* %p
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/i64-mem-copy.ll b/libclamav/c++/llvm/test/CodeGen/X86/i64-mem-copy.ll
deleted file mode 100644
index 847e209..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/i64-mem-copy.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {movq.*(%rsi), %rax}
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {movsd.*(%eax),}
-
-; Uses movsd to load / store i64 values if sse2 is available.
-
-; rdar://6659858
-
-define void @foo(i64* %x, i64* %y) nounwind {
-entry:
- %tmp1 = load i64* %y, align 8 ; <i64> [#uses=1]
- store i64 %tmp1, i64* %x, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/iabs.ll b/libclamav/c++/llvm/test/CodeGen/X86/iabs.ll
deleted file mode 100644
index 6a79ee8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/iabs.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86-64 -stats |& \
-; RUN: grep {6 .*Number of machine instrs printed}
-
-;; Integer absolute value, should produce something at least as good as:
-;; movl %edi, %eax
-;; sarl $31, %eax
-;; addl %eax, %edi
-;; xorl %eax, %edi
-;; movl %edi, %eax
-;; ret
-define i32 @test(i32 %a) nounwind {
- %tmp1neg = sub i32 0, %a
- %b = icmp sgt i32 %a, -1
- %abs = select i1 %b, i32 %a, i32 %tmp1neg
- ret i32 %abs
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/illegal-insert.ll b/libclamav/c++/llvm/test/CodeGen/X86/illegal-insert.ll
deleted file mode 100644
index dbf1b14..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/illegal-insert.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
-define <4 x double> @foo0(<4 x double> %t) {
- %r = insertelement <4 x double> %t, double 2.3, i32 0
- ret <4 x double> %r
-}
-define <4 x double> @foo1(<4 x double> %t) {
- %r = insertelement <4 x double> %t, double 2.3, i32 1
- ret <4 x double> %r
-}
-define <4 x double> @foo2(<4 x double> %t) {
- %r = insertelement <4 x double> %t, double 2.3, i32 2
- ret <4 x double> %r
-}
-define <4 x double> @foo3(<4 x double> %t) {
- %r = insertelement <4 x double> %t, double 2.3, i32 3
- ret <4 x double> %r
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/illegal-vector-args-return.ll b/libclamav/c++/llvm/test/CodeGen/X86/illegal-vector-args-return.ll
deleted file mode 100644
index cecf77a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/illegal-vector-args-return.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {mulpd %xmm3, %xmm1}
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {mulpd %xmm2, %xmm0}
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {addps %xmm3, %xmm1}
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {addps %xmm2, %xmm0}
-
-define <4 x double> @foo(<4 x double> %x, <4 x double> %z) {
- %y = fmul <4 x double> %x, %z
- ret <4 x double> %y
-}
-
-define <8 x float> @bar(<8 x float> %x, <8 x float> %z) {
- %y = fadd <8 x float> %x, %z
- ret <8 x float> %y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/imp-def-copies.ll b/libclamav/c++/llvm/test/CodeGen/X86/imp-def-copies.ll
deleted file mode 100644
index 9117840..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/imp-def-copies.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep mov
-
- %struct.active_line = type { %struct.gs_fixed_point, %struct.gs_fixed_point, i32, i32, i32, %struct.line_segment*, i32, i16, i16, %struct.active_line*, %struct.active_line* }
- %struct.gs_fixed_point = type { i32, i32 }
- %struct.line_list = type { %struct.active_line*, i32, i16, %struct.active_line*, %struct.active_line*, %struct.active_line*, %struct.active_line, i32 }
- %struct.line_segment = type { %struct.line_segment*, %struct.line_segment*, i32, %struct.gs_fixed_point }
- %struct.subpath = type { %struct.line_segment*, %struct.line_segment*, i32, %struct.gs_fixed_point, %struct.line_segment*, i32, i32, i8 }
-
-define fastcc void @add_y_list(%struct.subpath* %ppath.0.4.val, i16 signext %tag, %struct.line_list* %ll, i32 %pbox.0.0.1.val, i32 %pbox.0.1.0.val, i32 %pbox.0.1.1.val) nounwind {
-entry:
- br i1 false, label %return, label %bb
-bb: ; preds = %bb280, %entry
- %psub.1.reg2mem.0 = phi %struct.subpath* [ %psub.0.reg2mem.0, %bb280 ], [ undef, %entry ] ; <%struct.subpath*> [#uses=1]
- %plast.1.reg2mem.0 = phi %struct.line_segment* [ %plast.0.reg2mem.0, %bb280 ], [ undef, %entry ] ; <%struct.line_segment*> [#uses=1]
- %prev_dir.0.reg2mem.0 = phi i32 [ %dir.0.reg2mem.0, %bb280 ], [ undef, %entry ] ; <i32> [#uses=1]
- br i1 false, label %bb280, label %bb109
-bb109: ; preds = %bb
- %tmp113 = icmp sgt i32 0, %prev_dir.0.reg2mem.0 ; <i1> [#uses=1]
- br i1 %tmp113, label %bb116, label %bb280
-bb116: ; preds = %bb109
- ret void
-bb280: ; preds = %bb109, %bb
- %psub.0.reg2mem.0 = phi %struct.subpath* [ null, %bb ], [ %psub.1.reg2mem.0, %bb109 ] ; <%struct.subpath*> [#uses=1]
- %plast.0.reg2mem.0 = phi %struct.line_segment* [ null, %bb ], [ %plast.1.reg2mem.0, %bb109 ] ; <%struct.line_segment*> [#uses=1]
- %dir.0.reg2mem.0 = phi i32 [ 0, %bb ], [ 0, %bb109 ] ; <i32> [#uses=1]
- br i1 false, label %return, label %bb
-return: ; preds = %bb280, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/imul-lea-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/imul-lea-2.ll
deleted file mode 100644
index 1cb54b3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/imul-lea-2.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep lea | count 3
-; RUN: llc < %s -march=x86-64 | grep shl | count 1
-; RUN: llc < %s -march=x86-64 | not grep imul
-
-define i64 @t1(i64 %a) nounwind readnone {
-entry:
- %0 = mul i64 %a, 81 ; <i64> [#uses=1]
- ret i64 %0
-}
-
-define i64 @t2(i64 %a) nounwind readnone {
-entry:
- %0 = mul i64 %a, 40 ; <i64> [#uses=1]
- ret i64 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/imul-lea.ll b/libclamav/c++/llvm/test/CodeGen/X86/imul-lea.ll
deleted file mode 100644
index 4e8e2af..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/imul-lea.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 | grep lea
-
-declare i32 @foo()
-
-define i32 @test() {
- %tmp.0 = tail call i32 @foo( ) ; <i32> [#uses=1]
- %tmp.1 = mul i32 %tmp.0, 9 ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-2addr.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-2addr.ll
deleted file mode 100644
index 4a2c7fc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-2addr.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86-64 | not grep movq
-
-define i64 @t(i64 %a, i64 %b) nounwind ssp {
-entry:
- %asmtmp = tail call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i64 %a) nounwind ; <i64> [#uses=1]
- %asmtmp1 = tail call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 1, i64 %b) nounwind ; <i64> [#uses=1]
- %0 = add i64 %asmtmp1, %asmtmp ; <i64> [#uses=1]
- ret i64 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-R-constraint.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-R-constraint.ll
deleted file mode 100644
index 66c27ac..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-R-constraint.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
-; 7282062
-; ModuleID = '<stdin>'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin10.0"
-
-define void @udiv8(i8* %quotient, i16 zeroext %a, i8 zeroext %b, i8 zeroext %c, i8* %remainder) nounwind ssp {
-entry:
-; CHECK: udiv8:
-; CHECK-NOT: movb %ah, (%r8)
- %a_addr = alloca i16, align 2 ; <i16*> [#uses=2]
- %b_addr = alloca i8, align 1 ; <i8*> [#uses=2]
- store i16 %a, i16* %a_addr
- store i8 %b, i8* %b_addr
- call void asm "\09\09movw\09$2, %ax\09\09\0A\09\09divb\09$3\09\09\09\0A\09\09movb\09%al, $0\09\0A\09\09movb %ah, ($4)", "=*m,=*m,*m,*m,R,~{dirflag},~{fpsr},~{flags},~{ax}"(i8* %quotient, i8* %remainder, i16* %a_addr, i8* %b_addr, i8* %remainder) nounwind
- ret void
-; CHECK: ret
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-flag-clobber.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-flag-clobber.ll
deleted file mode 100644
index 51ea843..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-flag-clobber.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
-; PR3701
-
-define i64 @t(i64* %arg) nounwind {
- br i1 true, label %1, label %5
-
-; <label>:1 ; preds = %0
- %2 = icmp eq i64* null, %arg ; <i1> [#uses=1]
- %3 = tail call i64* asm sideeffect "movl %fs:0,$0", "=r,~{dirflag},~{fpsr},~{flags}"() nounwind ; <%struct.thread*> [#uses=0]
-; CHECK: test
-; CHECK-NEXT: j
- br i1 %2, label %4, label %5
-
-; <label>:4 ; preds = %1
- ret i64 1
-
-; <label>:5 ; preds = %1
- ret i64 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack.ll
deleted file mode 100644
index 09b0929..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define x86_fp80 @test1() {
- %tmp85 = call x86_fp80 asm sideeffect "fld0", "={st(0)}"()
- ret x86_fp80 %tmp85
-}
-
-define double @test2() {
- %tmp85 = call double asm sideeffect "fld0", "={st(0)}"()
- ret double %tmp85
-}
-
-define void @test3(x86_fp80 %X) {
- call void asm sideeffect "frob ", "{st(0)},~{dirflag},~{fpsr},~{flags}"( x86_fp80 %X)
- ret void
-}
-
-define void @test4(double %X) {
- call void asm sideeffect "frob ", "{st(0)},~{dirflag},~{fpsr},~{flags}"( double %X)
- ret void
-}
-
-define void @test5(double %X) {
- %Y = fadd double %X, 123.0
- call void asm sideeffect "frob ", "{st(0)},~{dirflag},~{fpsr},~{flags}"( double %Y)
- ret void
-}
-
-define void @test6(double %A, double %B, double %C,
- double %D, double %E) nounwind {
-entry:
- ; Uses the same value twice, should have one fstp after the asm.
- tail call void asm sideeffect "foo $0 $1", "f,f,~{dirflag},~{fpsr},~{flags}"( double %A, double %A ) nounwind
- ; Uses two different values, should be in st(0)/st(1) and both be popped.
- tail call void asm sideeffect "bar $0 $1", "f,f,~{dirflag},~{fpsr},~{flags}"( double %B, double %C ) nounwind
- ; Uses two different values, one of which isn't killed in this asm, it
- ; should not be popped after the asm.
- tail call void asm sideeffect "baz $0 $1", "f,f,~{dirflag},~{fpsr},~{flags}"( double %D, double %E ) nounwind
- ; This is the last use of %D, so it should be popped after.
- tail call void asm sideeffect "baz $0", "f,~{dirflag},~{fpsr},~{flags}"( double %D ) nounwind
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack2.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack2.ll
deleted file mode 100644
index ffa6ee6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack2.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep {fld %%st(0)} %t
-; PR4185
-
-define void @test() {
-return:
- call void asm sideeffect "fistpl $0", "{st}"(double 1.000000e+06)
- call void asm sideeffect "fistpl $0", "{st}"(double 1.000000e+06)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack3.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack3.ll
deleted file mode 100644
index 17945fe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack3.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep {fld %%st(0)} %t
-; PR4459
-
-declare x86_fp80 @ceil(x86_fp80)
-
-declare void @test(x86_fp80)
-
-define void @test2(x86_fp80 %a) {
-entry:
- %0 = call x86_fp80 @ceil(x86_fp80 %a)
- call void asm sideeffect "fistpl $0", "{st}"( x86_fp80 %0)
- call void @test(x86_fp80 %0 )
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack4.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack4.ll
deleted file mode 100644
index bae2970..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack4.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR4484
-
-declare x86_fp80 @ceil()
-
-declare void @test(x86_fp80)
-
-define void @test2(x86_fp80 %a) {
-entry:
- %0 = call x86_fp80 @ceil()
- call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %a)
- call void @test(x86_fp80 %0)
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack5.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack5.ll
deleted file mode 100644
index 8b219cf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-fpstack5.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR4485
-
-define void @test(x86_fp80* %a) {
-entry:
- %0 = load x86_fp80* %a, align 16
- %1 = fmul x86_fp80 %0, 0xK4006B400000000000000
- %2 = fmul x86_fp80 %1, 0xK4012F424000000000000
- tail call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %2)
- %3 = load x86_fp80* %a, align 16
- %4 = fmul x86_fp80 %3, 0xK4006B400000000000000
- %5 = fmul x86_fp80 %4, 0xK4012F424000000000000
- tail call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %5)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-modifier-n.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-modifier-n.ll
deleted file mode 100644
index 5e76b6c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-modifier-n.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | grep { 37}
-; rdar://7008959
-
-define void @bork() nounwind {
-entry:
- tail call void asm sideeffect "BORK ${0:n}", "i,~{dirflag},~{fpsr},~{flags}"(i32 -37) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-mrv.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-mrv.ll
deleted file mode 100644
index 78d7e77..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-mrv.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; PR2094
-; RUN: llc < %s -march=x86-64 | grep movslq
-; RUN: llc < %s -march=x86-64 | grep addps
-; RUN: llc < %s -march=x86-64 | grep paddd
-; RUN: llc < %s -march=x86-64 | not grep movq
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
-
-define i32 @test1(i8* %v, i8* %blk2, i8* %blk1, i32 %stride, i32 %h) nounwind {
- %tmp12 = sext i32 %stride to i64 ; <i64> [#uses=1]
- %mrv = call {i32, i8*, i8*} asm sideeffect "$0 $1 $2 $3 $4 $5 $6",
- "=r,=r,=r,r,r,r,r"( i64 %tmp12, i32 %h, i8* %blk1, i8* %blk2 ) nounwind
- %tmp6 = getresult {i32, i8*, i8*} %mrv, 0
- %tmp7 = call i32 asm sideeffect "set $0",
- "=r,~{dirflag},~{fpsr},~{flags}"( ) nounwind
- ret i32 %tmp7
-}
-
-define <4 x float> @test2() nounwind {
- %mrv = call {<4 x float>, <4 x float>} asm "set $0, $1", "=x,=x"()
- %a = getresult {<4 x float>, <4 x float>} %mrv, 0
- %b = getresult {<4 x float>, <4 x float>} %mrv, 1
- %c = fadd <4 x float> %a, %b
- ret <4 x float> %c
-}
-
-define <4 x i32> @test3() nounwind {
- %mrv = call {<4 x i32>, <4 x i32>} asm "set $0, $1", "=x,=x"()
- %a = getresult {<4 x i32>, <4 x i32>} %mrv, 0
- %b = getresult {<4 x i32>, <4 x i32>} %mrv, 1
- %c = add <4 x i32> %a, %b
- ret <4 x i32> %c
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-out-regs.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-out-regs.ll
deleted file mode 100644
index 46966f5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-out-regs.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -mtriple=i386-unknown-linux-gnu
-; PR3391
-
- at pci_indirect = external global { } ; <{ }*> [#uses=1]
- at pcibios_last_bus = external global i32 ; <i32*> [#uses=2]
-
-define void @pci_pcbios_init() nounwind section ".init.text" {
-entry:
- br label %bb1.i
-
-bb1.i: ; preds = %bb6.i.i, %bb1.i, %entry
- %0 = load i32* null, align 8 ; <i32> [#uses=1]
- %1 = icmp ugt i32 %0, 1048575 ; <i1> [#uses=1]
- br i1 %1, label %bb2.i, label %bb1.i
-
-bb2.i: ; preds = %bb1.i
- %asmtmp.i.i = tail call { i32, i32, i32, i32 } asm "lcall *(%edi); cld\0A\09jc 1f\0A\09xor %ah, %ah\0A1:", "={dx},={ax},={bx},={cx},1,{di},~{dirflag},~{fpsr},~{flags},~{memory}"(i32 45313, { }* @pci_indirect) nounwind ; <{ i32, i32, i32, i32 }> [#uses=2]
- %asmresult2.i.i = extractvalue { i32, i32, i32, i32 } %asmtmp.i.i, 1
- ; <i32> [#uses=1]
- %2 = lshr i32 %asmresult2.i.i, 8 ; <i32> [#uses=1]
- %3 = trunc i32 %2 to i8 ; <i8> [#uses=1]
- %4 = load i32* @pcibios_last_bus, align 4 ; <i32> [#uses=1]
- %5 = icmp slt i32 %4, 0 ; <i1> [#uses=1]
- br i1 %5, label %bb5.i.i, label %bb6.i.i
-
-bb5.i.i: ; preds = %bb2.i
- %asmresult4.i.i = extractvalue { i32, i32, i32, i32 } %asmtmp.i.i, 3
- ; <i32> [#uses=1]
- %6 = and i32 %asmresult4.i.i, 255 ; <i32> [#uses=1]
- store i32 %6, i32* @pcibios_last_bus, align 4
- br label %bb6.i.i
-
-bb6.i.i: ; preds = %bb5.i.i, %bb2.i
- %7 = icmp eq i8 %3, 0 ; <i1> [#uses=1]
- %or.cond.i.i = and i1 %7, false ; <i1> [#uses=1]
- br i1 %or.cond.i.i, label %bb1.i, label %bb8.i.i
-
-bb8.i.i: ; preds = %bb6.i.i
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-pic.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-pic.ll
deleted file mode 100644
index 0b5ff08..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-pic.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic | grep lea
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=pic | grep call
-
- at main_q = internal global i8* null ; <i8**> [#uses=1]
-
-define void @func2() nounwind {
-entry:
- tail call void asm "mov $1,%gs:$0", "=*m,ri,~{dirflag},~{fpsr},~{flags}"(i8** inttoptr (i32 152 to i8**), i8* bitcast (i8** @main_q to i8*)) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-q-regs.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-q-regs.ll
deleted file mode 100644
index ab44206..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-q-regs.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; rdar://7066579
-
- type { i64, i64, i64, i64, i64 } ; type %0
-
-define void @t() nounwind {
-entry:
- %asmtmp = call %0 asm sideeffect "mov %cr0, $0 \0Amov %cr2, $1 \0Amov %cr3, $2 \0Amov %cr4, $3 \0Amov %cr8, $0 \0A", "=q,=q,=q,=q,=q,~{dirflag},~{fpsr},~{flags}"() nounwind ; <%0> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-tied.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-tied.ll
deleted file mode 100644
index 1f4a13f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-tied.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 -O0 | grep {movl %edx, 12(%esp)} | count 2
-; rdar://6992609
-
-target triple = "i386-apple-darwin9.0"
- at llvm.used = appending global [1 x i8*] [i8* bitcast (i64 (i64)* @_OSSwapInt64 to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define i64 @_OSSwapInt64(i64 %_data) nounwind {
-entry:
- %retval = alloca i64 ; <i64*> [#uses=2]
- %_data.addr = alloca i64 ; <i64*> [#uses=4]
- store i64 %_data, i64* %_data.addr
- %tmp = load i64* %_data.addr ; <i64> [#uses=1]
- %0 = call i64 asm "bswap %eax\0A\09bswap %edx\0A\09xchgl %eax, %edx", "=A,0,~{dirflag},~{fpsr},~{flags}"(i64 %tmp) nounwind ; <i64> [#uses=1]
- store i64 %0, i64* %_data.addr
- %tmp1 = load i64* %_data.addr ; <i64> [#uses=1]
- store i64 %tmp1, i64* %retval
- %1 = load i64* %retval ; <i64> [#uses=1]
- ret i64 %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-x-scalar.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-x-scalar.ll
deleted file mode 100644
index 5a9628b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm-x-scalar.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah
-
-define void @test1() {
- tail call void asm sideeffect "ucomiss $0", "x"( float 0x41E0000000000000)
- ret void
-}
-
-define void @test2() {
- %tmp53 = tail call i32 asm "ucomiss $1, $3\0Acmovae $2, $0 ", "=r,mx,mr,x,0,~{dirflag},~{fpsr},~{flags},~{cc}"( float 0x41E0000000000000, i32 2147483647, float 0.000000e+00, i32 0 ) ; <i32> [#uses
- unreachable
-}
-
-define void @test3() {
- tail call void asm sideeffect "ucomiss $0, $1", "mx,x,~{dirflag},~{fpsr},~{flags},~{cc}"( float 0x41E0000000000000, i32 65536 )
- ret void
-}
-
-define void @test4() {
- %tmp1 = tail call float asm "", "=x,0,~{dirflag},~{fpsr},~{flags}"( float 0x47EFFFFFE0000000 ); <float> [#uses=1]
- %tmp4 = fsub float %tmp1, 0x3810000000000000 ; <float> [#uses=1]
- tail call void asm sideeffect "", "x,~{dirflag},~{fpsr},~{flags}"( float %tmp4 )
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm.ll b/libclamav/c++/llvm/test/CodeGen/X86/inline-asm.ll
deleted file mode 100644
index c66d7a8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/inline-asm.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define i32 @test1() nounwind {
- ; Dest is AX, dest type = i32.
- %tmp4 = call i32 asm sideeffect "FROB $0", "={ax}"()
- ret i32 %tmp4
-}
-
-define void @test2(i32 %V) nounwind {
- ; input is AX, in type = i32.
- call void asm sideeffect "FROB $0", "{ax}"(i32 %V)
- ret void
-}
-
-define void @test3() nounwind {
- ; FP constant as a memory operand.
- tail call void asm sideeffect "frob $0", "m"( float 0x41E0000000000000)
- ret void
-}
-
-define void @test4() nounwind {
- ; J means a constant in range 0 to 63.
- tail call void asm sideeffect "bork $0", "J"(i32 37) nounwind
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll
deleted file mode 100644
index 2243f93..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 3
-
-define fastcc i32 @sqlite3ExprResolveNames() nounwind {
-entry:
- br i1 false, label %UnifiedReturnBlock, label %bb4
-bb4: ; preds = %entry
- br i1 false, label %bb17, label %bb22
-bb17: ; preds = %bb4
- ret i32 1
-bb22: ; preds = %bb4
- br i1 true, label %walkExprTree.exit, label %bb4.i
-bb4.i: ; preds = %bb22
- ret i32 0
-walkExprTree.exit: ; preds = %bb22
- %tmp83 = load i16* null, align 4 ; <i16> [#uses=1]
- %tmp84 = or i16 %tmp83, 2 ; <i16> [#uses=2]
- store i16 %tmp84, i16* null, align 4
- %tmp98993 = zext i16 %tmp84 to i32 ; <i32> [#uses=1]
- %tmp1004 = lshr i32 %tmp98993, 3 ; <i32> [#uses=1]
- %tmp100.lobit5 = and i32 %tmp1004, 1 ; <i32> [#uses=1]
- ret i32 %tmp100.lobit5
-UnifiedReturnBlock: ; preds = %entry
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ins_subreg_coalesce-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/ins_subreg_coalesce-2.ll
deleted file mode 100644
index f2c9cc7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ins_subreg_coalesce-2.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86-64 | not grep movw
-
-define i16 @test5(i16 %f12) nounwind {
- %f11 = shl i16 %f12, 2 ; <i16> [#uses=1]
- %tmp7.25 = ashr i16 %f11, 8 ; <i16> [#uses=1]
- ret i16 %tmp7.25
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll
deleted file mode 100644
index 8c1c409..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll
+++ /dev/null
@@ -1,93 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep mov | count 3
-
- %struct.COMPOSITE = type { i8, i16, i16 }
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.FILE_POS = type { i8, i8, i16, i32 }
- %struct.FIRST_UNION = type { %struct.FILE_POS }
- %struct.FONT_INFO = type { %struct.metrics*, i8*, i16*, %struct.COMPOSITE*, i32, %struct.rec*, %struct.rec*, i16, i16, i16*, i8*, i8*, i16* }
- %struct.FOURTH_UNION = type { %struct.STYLE }
- %struct.GAP = type { i8, i8, i16 }
- %struct.LIST = type { %struct.rec*, %struct.rec* }
- %struct.SECOND_UNION = type { { i16, i8, i8 } }
- %struct.STYLE = type { { %struct.GAP }, { %struct.GAP }, i16, i16, i32 }
- %struct.THIRD_UNION = type { %struct.FILE*, [8 x i8] }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
- %struct.head_type = type { [2 x %struct.LIST], %struct.FIRST_UNION, %struct.SECOND_UNION, %struct.THIRD_UNION, %struct.FOURTH_UNION, %struct.rec*, { %struct.rec* }, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, %struct.rec*, i32 }
- %struct.metrics = type { i16, i16, i16, i16, i16 }
- %struct.rec = type { %struct.head_type }
-
-define void @FontChange(i1 %foo) nounwind {
-entry:
- br i1 %foo, label %bb298, label %bb49
-bb49: ; preds = %entry
- ret void
-bb298: ; preds = %entry
- br i1 %foo, label %bb304, label %bb366
-bb304: ; preds = %bb298
- br i1 %foo, label %bb330, label %bb428
-bb330: ; preds = %bb366, %bb304
- br label %bb366
-bb366: ; preds = %bb330, %bb298
- br i1 %foo, label %bb330, label %bb428
-bb428: ; preds = %bb366, %bb304
- br i1 %foo, label %bb650, label %bb433
-bb433: ; preds = %bb428
- ret void
-bb650: ; preds = %bb650, %bb428
- %tmp658 = load i8* null, align 8 ; <i8> [#uses=1]
- %tmp659 = icmp eq i8 %tmp658, 0 ; <i1> [#uses=1]
- br i1 %tmp659, label %bb650, label %bb662
-bb662: ; preds = %bb650
- %tmp685 = icmp eq %struct.rec* null, null ; <i1> [#uses=1]
- br i1 %tmp685, label %bb761, label %bb688
-bb688: ; preds = %bb662
- ret void
-bb761: ; preds = %bb662
- %tmp487248736542 = load i32* null, align 4 ; <i32> [#uses=2]
- %tmp487648776541 = and i32 %tmp487248736542, 57344 ; <i32> [#uses=1]
- %tmp4881 = icmp eq i32 %tmp487648776541, 8192 ; <i1> [#uses=1]
- br i1 %tmp4881, label %bb4884, label %bb4897
-bb4884: ; preds = %bb761
- %tmp488948906540 = and i32 %tmp487248736542, 7168 ; <i32> [#uses=1]
- %tmp4894 = icmp eq i32 %tmp488948906540, 1024 ; <i1> [#uses=1]
- br i1 %tmp4894, label %bb4932, label %bb4897
-bb4897: ; preds = %bb4884, %bb761
- ret void
-bb4932: ; preds = %bb4884
- %tmp4933 = load i32* null, align 4 ; <i32> [#uses=1]
- br i1 %foo, label %bb5054, label %bb4940
-bb4940: ; preds = %bb4932
- %tmp4943 = load i32* null, align 4 ; <i32> [#uses=2]
- switch i32 %tmp4933, label %bb5054 [
- i32 159, label %bb4970
- i32 160, label %bb5002
- ]
-bb4970: ; preds = %bb4940
- %tmp49746536 = trunc i32 %tmp4943 to i16 ; <i16> [#uses=1]
- %tmp49764977 = and i16 %tmp49746536, 4095 ; <i16> [#uses=1]
- %mask498049814982 = zext i16 %tmp49764977 to i64 ; <i64> [#uses=1]
- %tmp4984 = getelementptr %struct.FONT_INFO* null, i64 %mask498049814982, i32 5 ; <%struct.rec**> [#uses=1]
- %tmp4985 = load %struct.rec** %tmp4984, align 8 ; <%struct.rec*> [#uses=1]
- %tmp4988 = getelementptr %struct.rec* %tmp4985, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
- %tmp4991 = bitcast %struct.THIRD_UNION* %tmp4988 to i32* ; <i32*> [#uses=1]
- %tmp4992 = load i32* %tmp4991, align 8 ; <i32> [#uses=1]
- %tmp49924993 = trunc i32 %tmp4992 to i16 ; <i16> [#uses=1]
- %tmp4996 = add i16 %tmp49924993, 0 ; <i16> [#uses=1]
- br label %bb5054
-bb5002: ; preds = %bb4940
- %tmp50066537 = trunc i32 %tmp4943 to i16 ; <i16> [#uses=1]
- %tmp50085009 = and i16 %tmp50066537, 4095 ; <i16> [#uses=1]
- %mask501250135014 = zext i16 %tmp50085009 to i64 ; <i64> [#uses=1]
- %tmp5016 = getelementptr %struct.FONT_INFO* null, i64 %mask501250135014, i32 5 ; <%struct.rec**> [#uses=1]
- %tmp5017 = load %struct.rec** %tmp5016, align 8 ; <%struct.rec*> [#uses=1]
- %tmp5020 = getelementptr %struct.rec* %tmp5017, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
- %tmp5023 = bitcast %struct.THIRD_UNION* %tmp5020 to i32* ; <i32*> [#uses=1]
- %tmp5024 = load i32* %tmp5023, align 8 ; <i32> [#uses=1]
- %tmp50245025 = trunc i32 %tmp5024 to i16 ; <i16> [#uses=1]
- %tmp5028 = sub i16 %tmp50245025, 0 ; <i16> [#uses=1]
- br label %bb5054
-bb5054: ; preds = %bb5002, %bb4970, %bb4940, %bb4932
- %flen.0.reg2mem.0 = phi i16 [ %tmp4996, %bb4970 ], [ %tmp5028, %bb5002 ], [ 0, %bb4932 ], [ undef, %bb4940 ] ; <i16> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/insertelement-copytoregs.ll b/libclamav/c++/llvm/test/CodeGen/X86/insertelement-copytoregs.ll
deleted file mode 100644
index 34a29ca..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/insertelement-copytoregs.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep -v IMPLICIT_DEF
-
-define void @foo(<2 x float>* %p) {
- %t = insertelement <2 x float> undef, float 0.0, i32 0
- %v = insertelement <2 x float> %t, float 0.0, i32 1
- br label %bb8
-
-bb8:
- store <2 x float> %v, <2 x float>* %p
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/insertelement-legalize.ll b/libclamav/c++/llvm/test/CodeGen/X86/insertelement-legalize.ll
deleted file mode 100644
index 18aade2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/insertelement-legalize.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -disable-mmx
-
-; Test to check that we properly legalize an insert vector element
-define void @test(<2 x i64> %val, <2 x i64>* %dst, i64 %x) nounwind {
-entry:
- %tmp4 = insertelement <2 x i64> %val, i64 %x, i32 0 ; <<2 x i64>> [#uses=1]
- %add = add <2 x i64> %tmp4, %val ; <<2 x i64>> [#uses=1]
- store <2 x i64> %add, <2 x i64>* %dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/invalid-shift-immediate.ll b/libclamav/c++/llvm/test/CodeGen/X86/invalid-shift-immediate.ll
deleted file mode 100644
index 77a9f7e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/invalid-shift-immediate.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR2098
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-
-define void @foo(i32 %x) {
-entry:
- %x_addr = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %x, i32* %x_addr
- %tmp = load i32* %x_addr, align 4 ; <i32> [#uses=1]
- %tmp1 = ashr i32 %tmp, -2 ; <i32> [#uses=1]
- %tmp2 = and i32 %tmp1, 1 ; <i32> [#uses=1]
- %tmp23 = trunc i32 %tmp2 to i8 ; <i8> [#uses=1]
- %toBool = icmp ne i8 %tmp23, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %bb, label %bb5
-
-bb: ; preds = %entry
- %tmp4 = call i32 (...)* @bar( ) nounwind ; <i32> [#uses=0]
- br label %bb5
-
-bb5: ; preds = %bb, %entry
- br label %return
-
-return: ; preds = %bb5
- ret void
-}
-
-declare i32 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/isel-sink.ll b/libclamav/c++/llvm/test/CodeGen/X86/isel-sink.ll
deleted file mode 100644
index 0f94b23..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/isel-sink.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep lea
-; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin8 | \
-; RUN: grep {movl \$4, (.*,.*,4)}
-
-define i32 @test(i32* %X, i32 %B) {
- ; This gep should be sunk out of this block into the load/store users.
- %P = getelementptr i32* %X, i32 %B
- %G = icmp ult i32 %B, 1234
- br i1 %G, label %T, label %F
-T:
- store i32 4, i32* %P
- ret i32 141
-F:
- %V = load i32* %P
- ret i32 %V
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/isel-sink2.ll b/libclamav/c++/llvm/test/CodeGen/X86/isel-sink2.ll
deleted file mode 100644
index 5ed0e00..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/isel-sink2.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep {movb.7(%...)} %t
-; RUN: not grep leal %t
-
-define i8 @test(i32 *%P) nounwind {
- %Q = getelementptr i32* %P, i32 1
- %R = bitcast i32* %Q to i8*
- %S = load i8* %R
- %T = icmp eq i8 %S, 0
- br i1 %T, label %TB, label %F
-TB:
- ret i8 4
-F:
- %U = getelementptr i8* %R, i32 3
- %V = load i8* %U
- ret i8 %V
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/isel-sink3.ll b/libclamav/c++/llvm/test/CodeGen/X86/isel-sink3.ll
deleted file mode 100644
index 8d3d97a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/isel-sink3.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s | grep {addl.\$4, %ecx}
-; RUN: llc < %s | not grep leal
-; this should not sink %1 into bb1, that would increase reg pressure.
-
-; rdar://6399178
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin7"
-
-define i32 @bar(i32** %P) nounwind {
-entry:
- %0 = load i32** %P, align 4 ; <i32*> [#uses=2]
- %1 = getelementptr i32* %0, i32 1 ; <i32*> [#uses=1]
- %2 = icmp ugt i32* %1, inttoptr (i64 1233 to i32*) ; <i1> [#uses=1]
- br i1 %2, label %bb1, label %bb
-
-bb: ; preds = %entry
- store i32* inttoptr (i64 123 to i32*), i32** %P, align 4
- br label %bb1
-
-bb1: ; preds = %entry, %bb
- %3 = getelementptr i32* %1, i32 1 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4 ; <i32> [#uses=1]
- ret i32 %4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/isint.ll b/libclamav/c++/llvm/test/CodeGen/X86/isint.ll
deleted file mode 100644
index 507a328..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/isint.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
-; RUN: not grep cmp %t
-; RUN: not grep xor %t
-; RUN: grep jne %t | count 1
-; RUN: grep jp %t | count 1
-; RUN: grep setnp %t | count 1
-; RUN: grep sete %t | count 1
-; RUN: grep and %t | count 1
-; RUN: grep cvt %t | count 4
-
-define i32 @isint_return(double %d) nounwind {
- %i = fptosi double %d to i32
- %e = sitofp i32 %i to double
- %c = fcmp oeq double %d, %e
- %z = zext i1 %c to i32
- ret i32 %z
-}
-
-declare void @foo()
-
-define void @isint_branch(double %d) nounwind {
- %i = fptosi double %d to i32
- %e = sitofp i32 %i to double
- %c = fcmp oeq double %d, %e
- br i1 %c, label %true, label %false
-true:
- call void @foo()
- ret void
-false:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/isnan.ll b/libclamav/c++/llvm/test/CodeGen/X86/isnan.ll
deleted file mode 100644
index 4d465c0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/isnan.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep call
-
-declare i1 @llvm.isunordered.f64(double)
-
-define i1 @test_isnan(double %X) {
- %R = fcmp uno double %X, %X ; <i1> [#uses=1]
- ret i1 %R
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/isnan2.ll b/libclamav/c++/llvm/test/CodeGen/X86/isnan2.ll
deleted file mode 100644
index 7753346..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/isnan2.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | not grep pxor
-
-; This should not need to materialize 0.0 to evaluate the condition.
-
-define i32 @test(double %X) nounwind {
-entry:
- %tmp6 = fcmp uno double %X, 0.000000e+00 ; <i1> [#uses=1]
- %tmp67 = zext i1 %tmp6 to i32 ; <i32> [#uses=1]
- ret i32 %tmp67
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ispositive.ll b/libclamav/c++/llvm/test/CodeGen/X86/ispositive.ll
deleted file mode 100644
index 8adf723..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ispositive.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {shrl.*31}
-
-define i32 @test1(i32 %X) {
-entry:
- icmp slt i32 %X, 0 ; <i1>:0 [#uses=1]
- zext i1 %0 to i32 ; <i32>:1 [#uses=1]
- ret i32 %1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/iv-users-in-other-loops.ll b/libclamav/c++/llvm/test/CodeGen/X86/iv-users-in-other-loops.ll
deleted file mode 100644
index 408fb20..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/iv-users-in-other-loops.ll
+++ /dev/null
@@ -1,296 +0,0 @@
-; RUN: llc < %s -march=x86-64 -o %t
-; RUN: not grep inc %t
-; RUN: grep dec %t | count 2
-; RUN: grep addq %t | count 13
-; RUN: not grep addb %t
-; RUN: not grep leaq %t
-; RUN: not grep leal %t
-; RUN: not grep movq %t
-
-; IV users in each of the loops from other loops shouldn't cause LSR
-; to insert new induction variables. Previously it would create a
-; flood of new induction variables.
-; Also, the loop reversal should kick in once.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @foo(float* %A, i32 %IA, float* %B, i32 %IB, float* nocapture %C, i32 %N) nounwind {
-entry:
- %0 = xor i32 %IA, 1 ; <i32> [#uses=1]
- %1 = xor i32 %IB, 1 ; <i32> [#uses=1]
- %2 = or i32 %1, %0 ; <i32> [#uses=1]
- %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
- br i1 %3, label %bb2, label %bb13
-
-bb: ; preds = %bb3
- %4 = load float* %A_addr.0, align 4 ; <float> [#uses=1]
- %5 = load float* %B_addr.0, align 4 ; <float> [#uses=1]
- %6 = fmul float %4, %5 ; <float> [#uses=1]
- %7 = fadd float %6, %Sum0.0 ; <float> [#uses=1]
- %indvar.next154 = add i64 %B_addr.0.rec, 1 ; <i64> [#uses=1]
- br label %bb2
-
-bb2: ; preds = %entry, %bb
- %B_addr.0.rec = phi i64 [ %indvar.next154, %bb ], [ 0, %entry ] ; <i64> [#uses=14]
- %Sum0.0 = phi float [ %7, %bb ], [ 0.000000e+00, %entry ] ; <float> [#uses=5]
- %indvar146 = trunc i64 %B_addr.0.rec to i32 ; <i32> [#uses=1]
- %N_addr.0 = sub i32 %N, %indvar146 ; <i32> [#uses=6]
- %A_addr.0 = getelementptr float* %A, i64 %B_addr.0.rec ; <float*> [#uses=4]
- %B_addr.0 = getelementptr float* %B, i64 %B_addr.0.rec ; <float*> [#uses=4]
- %8 = icmp sgt i32 %N_addr.0, 0 ; <i1> [#uses=1]
- br i1 %8, label %bb3, label %bb4
-
-bb3: ; preds = %bb2
- %9 = ptrtoint float* %A_addr.0 to i64 ; <i64> [#uses=1]
- %10 = and i64 %9, 15 ; <i64> [#uses=1]
- %11 = icmp eq i64 %10, 0 ; <i1> [#uses=1]
- br i1 %11, label %bb4, label %bb
-
-bb4: ; preds = %bb3, %bb2
- %12 = ptrtoint float* %B_addr.0 to i64 ; <i64> [#uses=1]
- %13 = and i64 %12, 15 ; <i64> [#uses=1]
- %14 = icmp eq i64 %13, 0 ; <i1> [#uses=1]
- %15 = icmp sgt i32 %N_addr.0, 15 ; <i1> [#uses=2]
- br i1 %14, label %bb6.preheader, label %bb10.preheader
-
-bb10.preheader: ; preds = %bb4
- br i1 %15, label %bb9, label %bb12.loopexit
-
-bb6.preheader: ; preds = %bb4
- br i1 %15, label %bb5, label %bb8.loopexit
-
-bb5: ; preds = %bb5, %bb6.preheader
- %indvar143 = phi i64 [ 0, %bb6.preheader ], [ %indvar.next144, %bb5 ] ; <i64> [#uses=3]
- %vSum0.072 = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %21, %bb5 ] ; <<4 x float>> [#uses=1]
- %vSum1.070 = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %29, %bb5 ] ; <<4 x float>> [#uses=1]
- %vSum2.069 = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %37, %bb5 ] ; <<4 x float>> [#uses=1]
- %vSum3.067 = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %45, %bb5 ] ; <<4 x float>> [#uses=1]
- %indvar145 = trunc i64 %indvar143 to i32 ; <i32> [#uses=1]
- %tmp150 = mul i32 %indvar145, -16 ; <i32> [#uses=1]
- %N_addr.268 = add i32 %tmp150, %N_addr.0 ; <i32> [#uses=1]
- %A_addr.273.rec = shl i64 %indvar143, 4 ; <i64> [#uses=5]
- %B_addr.0.sum180 = add i64 %B_addr.0.rec, %A_addr.273.rec ; <i64> [#uses=2]
- %B_addr.271 = getelementptr float* %B, i64 %B_addr.0.sum180 ; <float*> [#uses=1]
- %A_addr.273 = getelementptr float* %A, i64 %B_addr.0.sum180 ; <float*> [#uses=1]
- tail call void asm sideeffect ";# foo", "~{dirflag},~{fpsr},~{flags}"() nounwind
- %16 = bitcast float* %A_addr.273 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %17 = load <4 x float>* %16, align 16 ; <<4 x float>> [#uses=1]
- %18 = bitcast float* %B_addr.271 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %19 = load <4 x float>* %18, align 16 ; <<4 x float>> [#uses=1]
- %20 = fmul <4 x float> %17, %19 ; <<4 x float>> [#uses=1]
- %21 = fadd <4 x float> %20, %vSum0.072 ; <<4 x float>> [#uses=2]
- %A_addr.273.sum163 = or i64 %A_addr.273.rec, 4 ; <i64> [#uses=1]
- %A_addr.0.sum175 = add i64 %B_addr.0.rec, %A_addr.273.sum163 ; <i64> [#uses=2]
- %22 = getelementptr float* %A, i64 %A_addr.0.sum175 ; <float*> [#uses=1]
- %23 = bitcast float* %22 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %24 = load <4 x float>* %23, align 16 ; <<4 x float>> [#uses=1]
- %25 = getelementptr float* %B, i64 %A_addr.0.sum175 ; <float*> [#uses=1]
- %26 = bitcast float* %25 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %27 = load <4 x float>* %26, align 16 ; <<4 x float>> [#uses=1]
- %28 = fmul <4 x float> %24, %27 ; <<4 x float>> [#uses=1]
- %29 = fadd <4 x float> %28, %vSum1.070 ; <<4 x float>> [#uses=2]
- %A_addr.273.sum161 = or i64 %A_addr.273.rec, 8 ; <i64> [#uses=1]
- %A_addr.0.sum174 = add i64 %B_addr.0.rec, %A_addr.273.sum161 ; <i64> [#uses=2]
- %30 = getelementptr float* %A, i64 %A_addr.0.sum174 ; <float*> [#uses=1]
- %31 = bitcast float* %30 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %32 = load <4 x float>* %31, align 16 ; <<4 x float>> [#uses=1]
- %33 = getelementptr float* %B, i64 %A_addr.0.sum174 ; <float*> [#uses=1]
- %34 = bitcast float* %33 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %35 = load <4 x float>* %34, align 16 ; <<4 x float>> [#uses=1]
- %36 = fmul <4 x float> %32, %35 ; <<4 x float>> [#uses=1]
- %37 = fadd <4 x float> %36, %vSum2.069 ; <<4 x float>> [#uses=2]
- %A_addr.273.sum159 = or i64 %A_addr.273.rec, 12 ; <i64> [#uses=1]
- %A_addr.0.sum173 = add i64 %B_addr.0.rec, %A_addr.273.sum159 ; <i64> [#uses=2]
- %38 = getelementptr float* %A, i64 %A_addr.0.sum173 ; <float*> [#uses=1]
- %39 = bitcast float* %38 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %40 = load <4 x float>* %39, align 16 ; <<4 x float>> [#uses=1]
- %41 = getelementptr float* %B, i64 %A_addr.0.sum173 ; <float*> [#uses=1]
- %42 = bitcast float* %41 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %43 = load <4 x float>* %42, align 16 ; <<4 x float>> [#uses=1]
- %44 = fmul <4 x float> %40, %43 ; <<4 x float>> [#uses=1]
- %45 = fadd <4 x float> %44, %vSum3.067 ; <<4 x float>> [#uses=2]
- %.rec83 = add i64 %A_addr.273.rec, 16 ; <i64> [#uses=1]
- %A_addr.0.sum172 = add i64 %B_addr.0.rec, %.rec83 ; <i64> [#uses=2]
- %46 = getelementptr float* %A, i64 %A_addr.0.sum172 ; <float*> [#uses=1]
- %47 = getelementptr float* %B, i64 %A_addr.0.sum172 ; <float*> [#uses=1]
- %48 = add i32 %N_addr.268, -16 ; <i32> [#uses=2]
- %49 = icmp sgt i32 %48, 15 ; <i1> [#uses=1]
- %indvar.next144 = add i64 %indvar143, 1 ; <i64> [#uses=1]
- br i1 %49, label %bb5, label %bb8.loopexit
-
-bb7: ; preds = %bb7, %bb8.loopexit
- %indvar130 = phi i64 [ 0, %bb8.loopexit ], [ %indvar.next131, %bb7 ] ; <i64> [#uses=3]
- %vSum0.260 = phi <4 x float> [ %vSum0.0.lcssa, %bb8.loopexit ], [ %55, %bb7 ] ; <<4 x float>> [#uses=1]
- %indvar132 = trunc i64 %indvar130 to i32 ; <i32> [#uses=1]
- %tmp133 = mul i32 %indvar132, -4 ; <i32> [#uses=1]
- %N_addr.358 = add i32 %tmp133, %N_addr.2.lcssa ; <i32> [#uses=1]
- %A_addr.361.rec = shl i64 %indvar130, 2 ; <i64> [#uses=3]
- %B_addr.359 = getelementptr float* %B_addr.2.lcssa, i64 %A_addr.361.rec ; <float*> [#uses=1]
- %A_addr.361 = getelementptr float* %A_addr.2.lcssa, i64 %A_addr.361.rec ; <float*> [#uses=1]
- %50 = bitcast float* %A_addr.361 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %51 = load <4 x float>* %50, align 16 ; <<4 x float>> [#uses=1]
- %52 = bitcast float* %B_addr.359 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %53 = load <4 x float>* %52, align 16 ; <<4 x float>> [#uses=1]
- %54 = fmul <4 x float> %51, %53 ; <<4 x float>> [#uses=1]
- %55 = fadd <4 x float> %54, %vSum0.260 ; <<4 x float>> [#uses=2]
- %.rec85 = add i64 %A_addr.361.rec, 4 ; <i64> [#uses=2]
- %56 = getelementptr float* %A_addr.2.lcssa, i64 %.rec85 ; <float*> [#uses=1]
- %57 = getelementptr float* %B_addr.2.lcssa, i64 %.rec85 ; <float*> [#uses=1]
- %58 = add i32 %N_addr.358, -4 ; <i32> [#uses=2]
- %59 = icmp sgt i32 %58, 3 ; <i1> [#uses=1]
- %indvar.next131 = add i64 %indvar130, 1 ; <i64> [#uses=1]
- br i1 %59, label %bb7, label %bb13
-
-bb8.loopexit: ; preds = %bb5, %bb6.preheader
- %A_addr.2.lcssa = phi float* [ %A_addr.0, %bb6.preheader ], [ %46, %bb5 ] ; <float*> [#uses=3]
- %vSum0.0.lcssa = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %21, %bb5 ] ; <<4 x float>> [#uses=2]
- %B_addr.2.lcssa = phi float* [ %B_addr.0, %bb6.preheader ], [ %47, %bb5 ] ; <float*> [#uses=3]
- %vSum1.0.lcssa = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %29, %bb5 ] ; <<4 x float>> [#uses=2]
- %vSum2.0.lcssa = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %37, %bb5 ] ; <<4 x float>> [#uses=2]
- %N_addr.2.lcssa = phi i32 [ %N_addr.0, %bb6.preheader ], [ %48, %bb5 ] ; <i32> [#uses=3]
- %vSum3.0.lcssa = phi <4 x float> [ zeroinitializer, %bb6.preheader ], [ %45, %bb5 ] ; <<4 x float>> [#uses=2]
- %60 = icmp sgt i32 %N_addr.2.lcssa, 3 ; <i1> [#uses=1]
- br i1 %60, label %bb7, label %bb13
-
-bb9: ; preds = %bb9, %bb10.preheader
- %indvar106 = phi i64 [ 0, %bb10.preheader ], [ %indvar.next107, %bb9 ] ; <i64> [#uses=3]
- %vSum0.339 = phi <4 x float> [ zeroinitializer, %bb10.preheader ], [ %75, %bb9 ] ; <<4 x float>> [#uses=1]
- %vSum1.237 = phi <4 x float> [ zeroinitializer, %bb10.preheader ], [ %80, %bb9 ] ; <<4 x float>> [#uses=1]
- %vSum2.236 = phi <4 x float> [ zeroinitializer, %bb10.preheader ], [ %85, %bb9 ] ; <<4 x float>> [#uses=1]
- %vSum3.234 = phi <4 x float> [ zeroinitializer, %bb10.preheader ], [ %90, %bb9 ] ; <<4 x float>> [#uses=1]
- %indvar108 = trunc i64 %indvar106 to i32 ; <i32> [#uses=1]
- %tmp113 = mul i32 %indvar108, -16 ; <i32> [#uses=1]
- %N_addr.435 = add i32 %tmp113, %N_addr.0 ; <i32> [#uses=1]
- %A_addr.440.rec = shl i64 %indvar106, 4 ; <i64> [#uses=5]
- %B_addr.0.sum = add i64 %B_addr.0.rec, %A_addr.440.rec ; <i64> [#uses=2]
- %B_addr.438 = getelementptr float* %B, i64 %B_addr.0.sum ; <float*> [#uses=1]
- %A_addr.440 = getelementptr float* %A, i64 %B_addr.0.sum ; <float*> [#uses=1]
- %61 = bitcast float* %B_addr.438 to <4 x float>* ; <i8*> [#uses=1]
- %62 = load <4 x float>* %61, align 1
- %B_addr.438.sum169 = or i64 %A_addr.440.rec, 4 ; <i64> [#uses=1]
- %B_addr.0.sum187 = add i64 %B_addr.0.rec, %B_addr.438.sum169 ; <i64> [#uses=2]
- %63 = getelementptr float* %B, i64 %B_addr.0.sum187 ; <float*> [#uses=1]
- %64 = bitcast float* %63 to <4 x float>* ; <i8*> [#uses=1]
- %65 = load <4 x float>* %64, align 1
- %B_addr.438.sum168 = or i64 %A_addr.440.rec, 8 ; <i64> [#uses=1]
- %B_addr.0.sum186 = add i64 %B_addr.0.rec, %B_addr.438.sum168 ; <i64> [#uses=2]
- %66 = getelementptr float* %B, i64 %B_addr.0.sum186 ; <float*> [#uses=1]
- %67 = bitcast float* %66 to <4 x float>* ; <i8*> [#uses=1]
- %68 = load <4 x float>* %67, align 1
- %B_addr.438.sum167 = or i64 %A_addr.440.rec, 12 ; <i64> [#uses=1]
- %B_addr.0.sum185 = add i64 %B_addr.0.rec, %B_addr.438.sum167 ; <i64> [#uses=2]
- %69 = getelementptr float* %B, i64 %B_addr.0.sum185 ; <float*> [#uses=1]
- %70 = bitcast float* %69 to <4 x float>* ; <i8*> [#uses=1]
- %71 = load <4 x float>* %70, align 1
- %72 = bitcast float* %A_addr.440 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %73 = load <4 x float>* %72, align 16 ; <<4 x float>> [#uses=1]
- %74 = fmul <4 x float> %73, %62 ; <<4 x float>> [#uses=1]
- %75 = fadd <4 x float> %74, %vSum0.339 ; <<4 x float>> [#uses=2]
- %76 = getelementptr float* %A, i64 %B_addr.0.sum187 ; <float*> [#uses=1]
- %77 = bitcast float* %76 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %78 = load <4 x float>* %77, align 16 ; <<4 x float>> [#uses=1]
- %79 = fmul <4 x float> %78, %65 ; <<4 x float>> [#uses=1]
- %80 = fadd <4 x float> %79, %vSum1.237 ; <<4 x float>> [#uses=2]
- %81 = getelementptr float* %A, i64 %B_addr.0.sum186 ; <float*> [#uses=1]
- %82 = bitcast float* %81 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %83 = load <4 x float>* %82, align 16 ; <<4 x float>> [#uses=1]
- %84 = fmul <4 x float> %83, %68 ; <<4 x float>> [#uses=1]
- %85 = fadd <4 x float> %84, %vSum2.236 ; <<4 x float>> [#uses=2]
- %86 = getelementptr float* %A, i64 %B_addr.0.sum185 ; <float*> [#uses=1]
- %87 = bitcast float* %86 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %88 = load <4 x float>* %87, align 16 ; <<4 x float>> [#uses=1]
- %89 = fmul <4 x float> %88, %71 ; <<4 x float>> [#uses=1]
- %90 = fadd <4 x float> %89, %vSum3.234 ; <<4 x float>> [#uses=2]
- %.rec89 = add i64 %A_addr.440.rec, 16 ; <i64> [#uses=1]
- %A_addr.0.sum170 = add i64 %B_addr.0.rec, %.rec89 ; <i64> [#uses=2]
- %91 = getelementptr float* %A, i64 %A_addr.0.sum170 ; <float*> [#uses=1]
- %92 = getelementptr float* %B, i64 %A_addr.0.sum170 ; <float*> [#uses=1]
- %93 = add i32 %N_addr.435, -16 ; <i32> [#uses=2]
- %94 = icmp sgt i32 %93, 15 ; <i1> [#uses=1]
- %indvar.next107 = add i64 %indvar106, 1 ; <i64> [#uses=1]
- br i1 %94, label %bb9, label %bb12.loopexit
-
-bb11: ; preds = %bb11, %bb12.loopexit
- %indvar = phi i64 [ 0, %bb12.loopexit ], [ %indvar.next, %bb11 ] ; <i64> [#uses=3]
- %vSum0.428 = phi <4 x float> [ %vSum0.3.lcssa, %bb12.loopexit ], [ %100, %bb11 ] ; <<4 x float>> [#uses=1]
- %indvar96 = trunc i64 %indvar to i32 ; <i32> [#uses=1]
- %tmp = mul i32 %indvar96, -4 ; <i32> [#uses=1]
- %N_addr.526 = add i32 %tmp, %N_addr.4.lcssa ; <i32> [#uses=1]
- %A_addr.529.rec = shl i64 %indvar, 2 ; <i64> [#uses=3]
- %B_addr.527 = getelementptr float* %B_addr.4.lcssa, i64 %A_addr.529.rec ; <float*> [#uses=1]
- %A_addr.529 = getelementptr float* %A_addr.4.lcssa, i64 %A_addr.529.rec ; <float*> [#uses=1]
- %95 = bitcast float* %B_addr.527 to <4 x float>* ; <i8*> [#uses=1]
- %96 = load <4 x float>* %95, align 1
- %97 = bitcast float* %A_addr.529 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %98 = load <4 x float>* %97, align 16 ; <<4 x float>> [#uses=1]
- %99 = fmul <4 x float> %98, %96 ; <<4 x float>> [#uses=1]
- %100 = fadd <4 x float> %99, %vSum0.428 ; <<4 x float>> [#uses=2]
- %.rec91 = add i64 %A_addr.529.rec, 4 ; <i64> [#uses=2]
- %101 = getelementptr float* %A_addr.4.lcssa, i64 %.rec91 ; <float*> [#uses=1]
- %102 = getelementptr float* %B_addr.4.lcssa, i64 %.rec91 ; <float*> [#uses=1]
- %103 = add i32 %N_addr.526, -4 ; <i32> [#uses=2]
- %104 = icmp sgt i32 %103, 3 ; <i1> [#uses=1]
- %indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
- br i1 %104, label %bb11, label %bb13
-
-bb12.loopexit: ; preds = %bb9, %bb10.preheader
- %A_addr.4.lcssa = phi float* [ %A_addr.0, %bb10.preheader ], [ %91, %bb9 ] ; <float*> [#uses=3]
- %vSum0.3.lcssa = phi <4 x float> [ zeroinitializer, %bb10.preheader ], [ %75, %bb9 ] ; <<4 x float>> [#uses=2]
- %B_addr.4.lcssa = phi float* [ %B_addr.0, %bb10.preheader ], [ %92, %bb9 ] ; <float*> [#uses=3]
- %vSum1.2.lcssa = phi <4 x float> [ zeroinitializer, %bb10.preheader ], [ %80, %bb9 ] ; <<4 x float>> [#uses=2]
- %vSum2.2.lcssa = phi <4 x float> [ zeroinitializer, %bb10.preheader ], [ %85, %bb9 ] ; <<4 x float>> [#uses=2]
- %N_addr.4.lcssa = phi i32 [ %N_addr.0, %bb10.preheader ], [ %93, %bb9 ] ; <i32> [#uses=3]
- %vSum3.2.lcssa = phi <4 x float> [ zeroinitializer, %bb10.preheader ], [ %90, %bb9 ] ; <<4 x float>> [#uses=2]
- %105 = icmp sgt i32 %N_addr.4.lcssa, 3 ; <i1> [#uses=1]
- br i1 %105, label %bb11, label %bb13
-
-bb13: ; preds = %bb12.loopexit, %bb11, %bb8.loopexit, %bb7, %entry
- %Sum0.1 = phi float [ 0.000000e+00, %entry ], [ %Sum0.0, %bb7 ], [ %Sum0.0, %bb8.loopexit ], [ %Sum0.0, %bb11 ], [ %Sum0.0, %bb12.loopexit ] ; <float> [#uses=1]
- %vSum3.1 = phi <4 x float> [ zeroinitializer, %entry ], [ %vSum3.0.lcssa, %bb7 ], [ %vSum3.0.lcssa, %bb8.loopexit ], [ %vSum3.2.lcssa, %bb11 ], [ %vSum3.2.lcssa, %bb12.loopexit ] ; <<4 x float>> [#uses=1]
- %N_addr.1 = phi i32 [ %N, %entry ], [ %N_addr.2.lcssa, %bb8.loopexit ], [ %58, %bb7 ], [ %N_addr.4.lcssa, %bb12.loopexit ], [ %103, %bb11 ] ; <i32> [#uses=2]
- %vSum2.1 = phi <4 x float> [ zeroinitializer, %entry ], [ %vSum2.0.lcssa, %bb7 ], [ %vSum2.0.lcssa, %bb8.loopexit ], [ %vSum2.2.lcssa, %bb11 ], [ %vSum2.2.lcssa, %bb12.loopexit ] ; <<4 x float>> [#uses=1]
- %vSum1.1 = phi <4 x float> [ zeroinitializer, %entry ], [ %vSum1.0.lcssa, %bb7 ], [ %vSum1.0.lcssa, %bb8.loopexit ], [ %vSum1.2.lcssa, %bb11 ], [ %vSum1.2.lcssa, %bb12.loopexit ] ; <<4 x float>> [#uses=1]
- %B_addr.1 = phi float* [ %B, %entry ], [ %B_addr.2.lcssa, %bb8.loopexit ], [ %57, %bb7 ], [ %B_addr.4.lcssa, %bb12.loopexit ], [ %102, %bb11 ] ; <float*> [#uses=1]
- %vSum0.1 = phi <4 x float> [ zeroinitializer, %entry ], [ %vSum0.0.lcssa, %bb8.loopexit ], [ %55, %bb7 ], [ %vSum0.3.lcssa, %bb12.loopexit ], [ %100, %bb11 ] ; <<4 x float>> [#uses=1]
- %A_addr.1 = phi float* [ %A, %entry ], [ %A_addr.2.lcssa, %bb8.loopexit ], [ %56, %bb7 ], [ %A_addr.4.lcssa, %bb12.loopexit ], [ %101, %bb11 ] ; <float*> [#uses=1]
- %106 = fadd <4 x float> %vSum0.1, %vSum2.1 ; <<4 x float>> [#uses=1]
- %107 = fadd <4 x float> %vSum1.1, %vSum3.1 ; <<4 x float>> [#uses=1]
- %108 = fadd <4 x float> %106, %107 ; <<4 x float>> [#uses=4]
- %tmp23 = extractelement <4 x float> %108, i32 0 ; <float> [#uses=1]
- %tmp21 = extractelement <4 x float> %108, i32 1 ; <float> [#uses=1]
- %109 = fadd float %tmp23, %tmp21 ; <float> [#uses=1]
- %tmp19 = extractelement <4 x float> %108, i32 2 ; <float> [#uses=1]
- %tmp17 = extractelement <4 x float> %108, i32 3 ; <float> [#uses=1]
- %110 = fadd float %tmp19, %tmp17 ; <float> [#uses=1]
- %111 = fadd float %109, %110 ; <float> [#uses=1]
- %Sum0.254 = fadd float %111, %Sum0.1 ; <float> [#uses=2]
- %112 = icmp sgt i32 %N_addr.1, 0 ; <i1> [#uses=1]
- br i1 %112, label %bb.nph56, label %bb16
-
-bb.nph56: ; preds = %bb13
- %tmp. = zext i32 %N_addr.1 to i64 ; <i64> [#uses=1]
- br label %bb14
-
-bb14: ; preds = %bb14, %bb.nph56
- %indvar117 = phi i64 [ 0, %bb.nph56 ], [ %indvar.next118, %bb14 ] ; <i64> [#uses=3]
- %Sum0.255 = phi float [ %Sum0.254, %bb.nph56 ], [ %Sum0.2, %bb14 ] ; <float> [#uses=1]
- %tmp.122 = sext i32 %IB to i64 ; <i64> [#uses=1]
- %B_addr.652.rec = mul i64 %indvar117, %tmp.122 ; <i64> [#uses=1]
- %tmp.124 = sext i32 %IA to i64 ; <i64> [#uses=1]
- %A_addr.653.rec = mul i64 %indvar117, %tmp.124 ; <i64> [#uses=1]
- %B_addr.652 = getelementptr float* %B_addr.1, i64 %B_addr.652.rec ; <float*> [#uses=1]
- %A_addr.653 = getelementptr float* %A_addr.1, i64 %A_addr.653.rec ; <float*> [#uses=1]
- %113 = load float* %A_addr.653, align 4 ; <float> [#uses=1]
- %114 = load float* %B_addr.652, align 4 ; <float> [#uses=1]
- %115 = fmul float %113, %114 ; <float> [#uses=1]
- %Sum0.2 = fadd float %115, %Sum0.255 ; <float> [#uses=2]
- %indvar.next118 = add i64 %indvar117, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %indvar.next118, %tmp. ; <i1> [#uses=1]
- br i1 %exitcond, label %bb16, label %bb14
-
-bb16: ; preds = %bb14, %bb13
- %Sum0.2.lcssa = phi float [ %Sum0.254, %bb13 ], [ %Sum0.2, %bb14 ] ; <float> [#uses=1]
- store float %Sum0.2.lcssa, float* %C, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/jump_sign.ll b/libclamav/c++/llvm/test/CodeGen/X86/jump_sign.ll
deleted file mode 100644
index 5e8e162..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/jump_sign.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 | grep jns
-
-define i32 @f(i32 %X) {
-entry:
- %tmp1 = add i32 %X, 1 ; <i32> [#uses=1]
- %tmp = icmp slt i32 %tmp1, 0 ; <i1> [#uses=1]
- br i1 %tmp, label %cond_true, label %cond_next
-
-cond_true: ; preds = %entry
- %tmp2 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- br label %cond_next
-
-cond_next: ; preds = %cond_true, %entry
- %tmp3 = tail call i32 (...)* @baz( ) ; <i32> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @bar(...)
-
-declare i32 @baz(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/large-gep-scale.ll b/libclamav/c++/llvm/test/CodeGen/X86/large-gep-scale.ll
deleted file mode 100644
index 143294e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/large-gep-scale.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
-; PR5281
-
-; After scaling, this type doesn't fit in memory. Codegen should generate
-; correct addressing still.
-
-; CHECK: shll $2, %edx
-
-define fastcc i32* @_ada_smkr([2147483647 x i32]* %u, i32 %t) nounwind {
- %x = getelementptr [2147483647 x i32]* %u, i32 %t, i32 0
- ret i32* %x
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ldzero.ll b/libclamav/c++/llvm/test/CodeGen/X86/ldzero.ll
deleted file mode 100644
index dab04bc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ldzero.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; RUN: llc < %s
-; verify PR 1700 is still fixed
-; ModuleID = 'hh.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
-
-define x86_fp80 @x() {
-entry:
- %retval = alloca x86_fp80, align 16 ; <x86_fp80*> [#uses=2]
- %tmp = alloca x86_fp80, align 16 ; <x86_fp80*> [#uses=2]
- %d = alloca double, align 8 ; <double*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store double 0.000000e+00, double* %d, align 8
- %tmp1 = load double* %d, align 8 ; <double> [#uses=1]
- %tmp12 = fpext double %tmp1 to x86_fp80 ; <x86_fp80> [#uses=1]
- store x86_fp80 %tmp12, x86_fp80* %tmp, align 16
- %tmp3 = load x86_fp80* %tmp, align 16 ; <x86_fp80> [#uses=1]
- store x86_fp80 %tmp3, x86_fp80* %retval, align 16
- br label %return
-
-return: ; preds = %entry
- %retval4 = load x86_fp80* %retval ; <x86_fp80> [#uses=1]
- ret x86_fp80 %retval4
-}
-
-define double @y() {
-entry:
- %retval = alloca double, align 8 ; <double*> [#uses=2]
- %tmp = alloca double, align 8 ; <double*> [#uses=2]
- %ld = alloca x86_fp80, align 16 ; <x86_fp80*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store x86_fp80 0xK00000000000000000000, x86_fp80* %ld, align 16
- %tmp1 = load x86_fp80* %ld, align 16 ; <x86_fp80> [#uses=1]
- %tmp12 = fptrunc x86_fp80 %tmp1 to double ; <double> [#uses=1]
- store double %tmp12, double* %tmp, align 8
- %tmp3 = load double* %tmp, align 8 ; <double> [#uses=1]
- store double %tmp3, double* %retval, align 8
- br label %return
-
-return: ; preds = %entry
- %retval4 = load double* %retval ; <double> [#uses=1]
- ret double %retval4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lea-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/lea-2.ll
deleted file mode 100644
index 6930350..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lea-2.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep {lea EAX, DWORD PTR \\\[... + 4\\*... - 5\\\]}
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: not grep add
-
-define i32 @test1(i32 %A, i32 %B) {
- %tmp1 = shl i32 %A, 2 ; <i32> [#uses=1]
- %tmp3 = add i32 %B, -5 ; <i32> [#uses=1]
- %tmp4 = add i32 %tmp3, %tmp1 ; <i32> [#uses=1]
- ret i32 %tmp4
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lea-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/lea-3.ll
deleted file mode 100644
index 44413d6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lea-3.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {leal (%rdi,%rdi,2), %eax}
-define i32 @test(i32 %a) {
- %tmp2 = mul i32 %a, 3 ; <i32> [#uses=1]
- ret i32 %tmp2
-}
-
-; RUN: llc < %s -march=x86-64 | grep {leaq (,%rdi,4), %rax}
-define i64 @test2(i64 %a) {
- %tmp2 = shl i64 %a, 2
- %tmp3 = or i64 %tmp2, %a
- ret i64 %tmp3
-}
-
-;; TODO! LEA instead of shift + copy.
-define i64 @test3(i64 %a) {
- %tmp2 = shl i64 %a, 3
- ret i64 %tmp2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lea-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/lea-4.ll
deleted file mode 100644
index 2171204..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lea-4.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep lea | count 2
-
-define zeroext i16 @t1(i32 %on_off) nounwind {
-entry:
- %0 = sub i32 %on_off, 1
- %1 = mul i32 %0, 2
- %2 = trunc i32 %1 to i16
- %3 = zext i16 %2 to i32
- %4 = trunc i32 %3 to i16
- ret i16 %4
-}
-
-define i32 @t2(i32 %on_off) nounwind {
-entry:
- %0 = sub i32 %on_off, 1
- %1 = mul i32 %0, 2
- %2 = and i32 %1, 65535
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lea-recursion.ll b/libclamav/c++/llvm/test/CodeGen/X86/lea-recursion.ll
deleted file mode 100644
index 3f32fd2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lea-recursion.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep lea | count 12
-
-; This testcase was written to demonstrate an instruction-selection problem,
-; however it also happens to expose a limitation in the DAGCombiner's
-; expression reassociation which causes it to miss opportunities for
-; constant folding due to the intermediate adds having multiple uses.
-; The Reassociate pass has similar limitations. If these limitations are
-; fixed, the test commands above will need to be updated to expect fewer
-; lea instructions.
-
- at g0 = weak global [1000 x i32] zeroinitializer, align 32 ; <[1000 x i32]*> [#uses=8]
- at g1 = weak global [1000 x i32] zeroinitializer, align 32 ; <[1000 x i32]*> [#uses=7]
-
-define void @foo() {
-entry:
- %tmp4 = load i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 0) ; <i32> [#uses=1]
- %tmp8 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 0) ; <i32> [#uses=1]
- %tmp9 = add i32 %tmp4, 1 ; <i32> [#uses=1]
- %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2]
- store i32 %tmp10, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 1)
- %tmp8.1 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1]
- %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2]
- store i32 %tmp10.1, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 2)
- %tmp8.2 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1]
- %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2]
- store i32 %tmp10.2, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 3)
- %tmp8.3 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 3) ; <i32> [#uses=1]
- %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1]
- %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2]
- store i32 %tmp10.3, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 4)
- %tmp8.4 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 4) ; <i32> [#uses=1]
- %tmp9.4 = add i32 %tmp10.3, 1 ; <i32> [#uses=1]
- %tmp10.4 = add i32 %tmp9.4, %tmp8.4 ; <i32> [#uses=2]
- store i32 %tmp10.4, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 5)
- %tmp8.5 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 5) ; <i32> [#uses=1]
- %tmp9.5 = add i32 %tmp10.4, 1 ; <i32> [#uses=1]
- %tmp10.5 = add i32 %tmp9.5, %tmp8.5 ; <i32> [#uses=2]
- store i32 %tmp10.5, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 6)
- %tmp8.6 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 6) ; <i32> [#uses=1]
- %tmp9.6 = add i32 %tmp10.5, 1 ; <i32> [#uses=1]
- %tmp10.6 = add i32 %tmp9.6, %tmp8.6 ; <i32> [#uses=1]
- store i32 %tmp10.6, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 7)
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lea.ll b/libclamav/c++/llvm/test/CodeGen/X86/lea.ll
deleted file mode 100644
index 22a9644..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lea.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-define i32 @test1(i32 %x) nounwind {
- %tmp1 = shl i32 %x, 3
- %tmp2 = add i32 %tmp1, 7
- ret i32 %tmp2
-; CHECK: test1:
-; CHECK: leal 7(,%rdi,8), %eax
-}
-
-
-; ISel the add of -4 with a neg and use an lea for the rest of the
-; arithemtic.
-define i32 @test2(i32 %x_offs) nounwind readnone {
-entry:
- %t0 = icmp sgt i32 %x_offs, 4
- br i1 %t0, label %bb.nph, label %bb2
-
-bb.nph:
- %tmp = add i32 %x_offs, -5
- %tmp6 = lshr i32 %tmp, 2
- %tmp7 = mul i32 %tmp6, -4
- %tmp8 = add i32 %tmp7, %x_offs
- %tmp9 = add i32 %tmp8, -4
- ret i32 %tmp9
-
-bb2:
- ret i32 %x_offs
-; CHECK: test2:
-; CHECK: leal -5(%rdi), %eax
-; CHECK: andl $-4, %eax
-; CHECK: negl %eax
-; CHECK: leal -4(%rdi,%rax), %eax
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/legalize-fmp-oeq-vector-select.ll b/libclamav/c++/llvm/test/CodeGen/X86/legalize-fmp-oeq-vector-select.ll
deleted file mode 100644
index 6a8c154..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/legalize-fmp-oeq-vector-select.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc -march=x86-64 -enable-legalize-types-checking < %s
-; PR5092
-
-define <4 x float> @bug(float %a) nounwind {
-entry:
- %cmp = fcmp oeq float %a, 0.000000e+00 ; <i1> [#uses=1]
- %temp = select i1 %cmp, <4 x float> <float 1.000000e+00, float 0.000000e+00,
-float 0.000000e+00, float 0.000000e+00>, <4 x float> zeroinitializer
- ret <4 x float> %temp
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/legalizedag_vec.ll b/libclamav/c++/llvm/test/CodeGen/X86/legalizedag_vec.ll
deleted file mode 100644
index 574b46a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/legalizedag_vec.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse2 -disable-mmx -o %t
-; RUN: grep {call.*divdi3} %t | count 2
-
-
-; Test case for r63760 where we generate a legalization assert that an illegal
-; type has been inserted by LegalizeDAG after LegalizeType has run. With sse2,
-; v2i64 is a legal type but with mmx disabled, i64 is an illegal type. When
-; legalizing the divide in LegalizeDAG, we scalarize the vector divide and make
-; two 64 bit divide library calls which introduces i64 nodes that needs to be
-; promoted.
-
-define <2 x i64> @test_long_div(<2 x i64> %num, <2 x i64> %div) {
- %div.r = sdiv <2 x i64> %num, %div
- ret <2 x i64> %div.r
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lfence.ll b/libclamav/c++/llvm/test/CodeGen/X86/lfence.ll
deleted file mode 100644
index 7a96ca3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lfence.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep lfence
-
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
-
-define void @test() {
- call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 false, i1 true)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/licm-symbol.ll b/libclamav/c++/llvm/test/CodeGen/X86/licm-symbol.ll
deleted file mode 100644
index d61bbfc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/licm-symbol.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-
-; MachineLICM should be able to hoist the sF reference out of the loop.
-
-; CHECK: pushl %esi
-; CHECK: subl $8, %esp
-; CHECK: movl $176, %esi
-; CHECK: addl L___sF$non_lazy_ptr, %esi
-; CHECK: .align 4, 0x90
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
-target triple = "i386-apple-darwin8"
-
-%struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
-%struct.__sFILEX = type opaque
-%struct.__sbuf = type { i8*, i32 }
-%struct.gcov_ctr_summary = type { i32, i32, i64, i64, i64 }
-%struct.gcov_summary = type { i32, [1 x %struct.gcov_ctr_summary] }
-
- at __sF = external global [0 x %struct.FILE] ; <[0 x %struct.FILE]*> [#uses=1]
-
-declare i32 @fprintf(%struct.FILE* nocapture) nounwind
-
-define void @gcov_exit() nounwind {
-entry:
- br label %bb151
-
-bb151: ; preds = %bb59, %bb56, %bb14
- br i1 undef, label %bb56, label %bb59
-
-bb56: ; preds = %bb151
- %t0 = call i32 (%struct.FILE*)* @fprintf(%struct.FILE* getelementptr inbounds ([0 x %struct.FILE]* @__sF, i32 0, i32 2)) nounwind
- br label %bb151
-
-bb59: ; preds = %bb151
- %t1 = call i32 (%struct.FILE*)* @fprintf(%struct.FILE* getelementptr inbounds ([0 x %struct.FILE]* @__sF, i32 0, i32 2)) nounwind
- br label %bb151
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/limited-prec.ll b/libclamav/c++/llvm/test/CodeGen/X86/limited-prec.ll
deleted file mode 100644
index 7bf4ac2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/limited-prec.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-; RUN: llc < %s -limit-float-precision=6 -march=x86 | \
-; RUN: not grep exp | not grep log | not grep pow
-; RUN: llc < %s -limit-float-precision=12 -march=x86 | \
-; RUN: not grep exp | not grep log | not grep pow
-; RUN: llc < %s -limit-float-precision=18 -march=x86 | \
-; RUN: not grep exp | not grep log | not grep pow
-
-define float @f1(float %x) nounwind noinline {
-entry:
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = call float @llvm.exp.f32(float %x) ; <float> [#uses=1]
- ret float %0
-}
-
-declare float @llvm.exp.f32(float) nounwind readonly
-
-define float @f2(float %x) nounwind noinline {
-entry:
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = call float @llvm.exp2.f32(float %x) ; <float> [#uses=1]
- ret float %0
-}
-
-declare float @llvm.exp2.f32(float) nounwind readonly
-
-define float @f3(float %x) nounwind noinline {
-entry:
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = call float @llvm.pow.f32(float 1.000000e+01, float %x) ; <float> [#uses=1]
- ret float %0
-}
-
-declare float @llvm.pow.f32(float, float) nounwind readonly
-
-define float @f4(float %x) nounwind noinline {
-entry:
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = call float @llvm.log.f32(float %x) ; <float> [#uses=1]
- ret float %0
-}
-
-declare float @llvm.log.f32(float) nounwind readonly
-
-define float @f5(float %x) nounwind noinline {
-entry:
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = call float @llvm.log2.f32(float %x) ; <float> [#uses=1]
- ret float %0
-}
-
-declare float @llvm.log2.f32(float) nounwind readonly
-
-define float @f6(float %x) nounwind noinline {
-entry:
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = call float @llvm.log10.f32(float %x) ; <float> [#uses=1]
- ret float %0
-}
-
-declare float @llvm.log10.f32(float) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/live-out-reg-info.ll b/libclamav/c++/llvm/test/CodeGen/X86/live-out-reg-info.ll
deleted file mode 100644
index 8cd9774..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/live-out-reg-info.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep testb
-
-; Make sure dagcombine doesn't eliminate the comparison due
-; to an off-by-one bug with ComputeMaskedBits information.
-
-declare void @qux()
-
-define void @foo(i32 %a) {
- %t0 = lshr i32 %a, 23
- br label %next
-next:
- %t1 = and i32 %t0, 256
- %t2 = icmp eq i32 %t1, 0
- br i1 %t2, label %true, label %false
-true:
- call void @qux()
- ret void
-false:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/local-liveness.ll b/libclamav/c++/llvm/test/CodeGen/X86/local-liveness.ll
deleted file mode 100644
index 321f208..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/local-liveness.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86 -regalloc=local | grep {subl %eax, %edx}
-
-; Local regalloc shouldn't assume that both the uses of the
-; sub instruction are kills, because one of them is tied
-; to an output. Previously, it was allocating both inputs
-; in the same register.
-
-define i32 @func_3() nounwind {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=2]
- %g_323 = alloca i8 ; <i8*> [#uses=2]
- %p_5 = alloca i64, align 8 ; <i64*> [#uses=2]
- %0 = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i64 0, i64* %p_5, align 8
- store i8 1, i8* %g_323, align 1
- %1 = load i8* %g_323, align 1 ; <i8> [#uses=1]
- %2 = sext i8 %1 to i64 ; <i64> [#uses=1]
- %3 = load i64* %p_5, align 8 ; <i64> [#uses=1]
- %4 = sub i64 %3, %2 ; <i64> [#uses=1]
- %5 = icmp sge i64 %4, 0 ; <i1> [#uses=1]
- %6 = zext i1 %5 to i32 ; <i32> [#uses=1]
- store i32 %6, i32* %0, align 4
- %7 = load i32* %0, align 4 ; <i32> [#uses=1]
- store i32 %7, i32* %retval, align 4
- br label %return
-
-return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/long-setcc.ll b/libclamav/c++/llvm/test/CodeGen/X86/long-setcc.ll
deleted file mode 100644
index e0165fb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/long-setcc.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 | grep cmp | count 1
-; RUN: llc < %s -march=x86 | grep shr | count 1
-; RUN: llc < %s -march=x86 | grep xor | count 1
-
-define i1 @t1(i64 %x) nounwind {
- %B = icmp slt i64 %x, 0
- ret i1 %B
-}
-
-define i1 @t2(i64 %x) nounwind {
- %tmp = icmp ult i64 %x, 4294967296
- ret i1 %tmp
-}
-
-define i1 @t3(i32 %x) nounwind {
- %tmp = icmp ugt i32 %x, -1
- ret i1 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/longlong-deadload.ll b/libclamav/c++/llvm/test/CodeGen/X86/longlong-deadload.ll
deleted file mode 100644
index 9a4c8f2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/longlong-deadload.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep '4{(%...)}
-; This should not load or store the top part of *P.
-
-define void @test(i64* %P) nounwind {
-entry:
- %tmp1 = load i64* %P, align 8 ; <i64> [#uses=1]
- %tmp2 = xor i64 %tmp1, 1 ; <i64> [#uses=1]
- store i64 %tmp2, i64* %P, align 8
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-blocks.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-blocks.ll
deleted file mode 100644
index a125e54..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-blocks.ll
+++ /dev/null
@@ -1,207 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -asm-verbose=false | FileCheck %s
-
-; These tests check for loop branching structure, and that the loop align
-; directive is placed in the expected place.
-
-; CodeGen should insert a branch into the middle of the loop in
-; order to avoid a branch within the loop.
-
-; CHECK: simple:
-; CHECK: jmp .LBB1_1
-; CHECK-NEXT: align
-; CHECK-NEXT: .LBB1_2:
-; CHECK-NEXT: callq loop_latch
-; CHECK-NEXT: .LBB1_1:
-; CHECK-NEXT: callq loop_header
-
-define void @simple() nounwind {
-entry:
- br label %loop
-
-loop:
- call void @loop_header()
- %t0 = tail call i32 @get()
- %t1 = icmp slt i32 %t0, 0
- br i1 %t1, label %done, label %bb
-
-bb:
- call void @loop_latch()
- br label %loop
-
-done:
- call void @exit()
- ret void
-}
-
-; CodeGen should move block_a to the top of the loop so that it
-; falls through into the loop, avoiding a branch within the loop.
-
-; CHECK: slightly_more_involved:
-; CHECK: jmp .LBB2_1
-; CHECK-NEXT: align
-; CHECK-NEXT: .LBB2_4:
-; CHECK-NEXT: callq bar99
-; CHECK-NEXT: .LBB2_1:
-; CHECK-NEXT: callq body
-
-define void @slightly_more_involved() nounwind {
-entry:
- br label %loop
-
-loop:
- call void @body()
- %t0 = call i32 @get()
- %t1 = icmp slt i32 %t0, 2
- br i1 %t1, label %block_a, label %bb
-
-bb:
- %t2 = call i32 @get()
- %t3 = icmp slt i32 %t2, 99
- br i1 %t3, label %exit, label %loop
-
-block_a:
- call void @bar99()
- br label %loop
-
-exit:
- call void @exit()
- ret void
-}
-
-; Same as slightly_more_involved, but block_a is now a CFG diamond with
-; fallthrough edges which should be preserved.
-
-; CHECK: yet_more_involved:
-; CHECK: jmp .LBB3_1
-; CHECK-NEXT: align
-; CHECK-NEXT: .LBB3_4:
-; CHECK-NEXT: callq bar99
-; CHECK-NEXT: callq get
-; CHECK-NEXT: cmpl $2999, %eax
-; CHECK-NEXT: jg .LBB3_6
-; CHECK-NEXT: callq block_a_true_func
-; CHECK-NEXT: jmp .LBB3_7
-; CHECK-NEXT: .LBB3_6:
-; CHECK-NEXT: callq block_a_false_func
-; CHECK-NEXT: .LBB3_7:
-; CHECK-NEXT: callq block_a_merge_func
-; CHECK-NEXT: .LBB3_1:
-; CHECK-NEXT: callq body
-
-define void @yet_more_involved() nounwind {
-entry:
- br label %loop
-
-loop:
- call void @body()
- %t0 = call i32 @get()
- %t1 = icmp slt i32 %t0, 2
- br i1 %t1, label %block_a, label %bb
-
-bb:
- %t2 = call i32 @get()
- %t3 = icmp slt i32 %t2, 99
- br i1 %t3, label %exit, label %loop
-
-block_a:
- call void @bar99()
- %z0 = call i32 @get()
- %z1 = icmp slt i32 %z0, 3000
- br i1 %z1, label %block_a_true, label %block_a_false
-
-block_a_true:
- call void @block_a_true_func()
- br label %block_a_merge
-
-block_a_false:
- call void @block_a_false_func()
- br label %block_a_merge
-
-block_a_merge:
- call void @block_a_merge_func()
- br label %loop
-
-exit:
- call void @exit()
- ret void
-}
-
-; CodeGen should move the CFG islands that are part of the loop but don't
-; conveniently fit anywhere so that they are at least contiguous with the
-; loop.
-
-; CHECK: cfg_islands:
-; CHECK: jmp .LBB4_1
-; CHECK-NEXT: align
-; CHECK-NEXT: .LBB4_7:
-; CHECK-NEXT: callq bar100
-; CHECK-NEXT: jmp .LBB4_1
-; CHECK-NEXT: .LBB4_8:
-; CHECK-NEXT: callq bar101
-; CHECK-NEXT: jmp .LBB4_1
-; CHECK-NEXT: .LBB4_9:
-; CHECK-NEXT: callq bar102
-; CHECK-NEXT: jmp .LBB4_1
-; CHECK-NEXT: .LBB4_5:
-; CHECK-NEXT: callq loop_latch
-; CHECK-NEXT: .LBB4_1:
-; CHECK-NEXT: callq loop_header
-
-define void @cfg_islands() nounwind {
-entry:
- br label %loop
-
-loop:
- call void @loop_header()
- %t0 = call i32 @get()
- %t1 = icmp slt i32 %t0, 100
- br i1 %t1, label %block100, label %bb
-
-bb:
- %t2 = call i32 @get()
- %t3 = icmp slt i32 %t2, 101
- br i1 %t3, label %block101, label %bb1
-
-bb1:
- %t4 = call i32 @get()
- %t5 = icmp slt i32 %t4, 102
- br i1 %t5, label %block102, label %bb2
-
-bb2:
- %t6 = call i32 @get()
- %t7 = icmp slt i32 %t6, 103
- br i1 %t7, label %exit, label %bb3
-
-bb3:
- call void @loop_latch()
- br label %loop
-
-exit:
- call void @exit()
- ret void
-
-block100:
- call void @bar100()
- br label %loop
-
-block101:
- call void @bar101()
- br label %loop
-
-block102:
- call void @bar102()
- br label %loop
-}
-
-declare void @bar99() nounwind
-declare void @bar100() nounwind
-declare void @bar101() nounwind
-declare void @bar102() nounwind
-declare void @body() nounwind
-declare void @exit() nounwind
-declare void @loop_header() nounwind
-declare void @loop_latch() nounwind
-declare i32 @get() nounwind
-declare void @block_a_true_func() nounwind
-declare void @block_a_false_func() nounwind
-declare void @block_a_merge_func() nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-hoist.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-hoist.ll
deleted file mode 100644
index b9008e5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-hoist.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; LSR should hoist the load from the "Arr" stub out of the loop.
-
-; RUN: llc < %s -relocation-model=dynamic-no-pic -mtriple=i686-apple-darwin8.7.2 | FileCheck %s
-
-; CHECK: _foo:
-; CHECK: L_Arr$non_lazy_ptr
-; CHECK: LBB1_1:
-
- at Arr = external global [0 x i32] ; <[0 x i32]*> [#uses=1]
-
-define void @foo(i32 %N.in, i32 %x) nounwind {
-entry:
- %N = bitcast i32 %N.in to i32 ; <i32> [#uses=1]
- br label %cond_true
-
-cond_true: ; preds = %cond_true, %entry
- %indvar = phi i32 [ %x, %entry ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
- %i.0.0 = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
- %tmp = getelementptr [0 x i32]* @Arr, i32 0, i32 %i.0.0 ; <i32*> [#uses=1]
- store i32 %i.0.0, i32* %tmp
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %cond_true
-
-return: ; preds = %cond_true
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce-2.ll
deleted file mode 100644
index b546462..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce-2.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=pic | FileCheck %s -check-prefix=PIC
-; RUN: llc < %s -march=x86 -relocation-model=static | FileCheck %s -check-prefix=STATIC
-;
-; Make sure the common loop invariant A is hoisted up to preheader,
-; since too many registers are needed to subsume it into the addressing modes.
-; It's safe to sink A in when it's not pic.
-
-; PIC: align
-; PIC: movl $4, -4([[REG:%e[a-z]+]])
-; PIC: movl $5, ([[REG]])
-; PIC: addl $4, [[REG]]
-; PIC: decl {{%e[[a-z]+}}
-; PIC: jne
-
-; STATIC: align
-; STATIC: movl $4, -4(%ecx)
-; STATIC: movl $5, (%ecx)
-; STATIC: addl $4, %ecx
-; STATIC: decl %eax
-; STATIC: jne
-
- at A = global [16 x [16 x i32]] zeroinitializer, align 32 ; <[16 x [16 x i32]]*> [#uses=2]
-
-define void @test(i32 %row, i32 %N.in) nounwind {
-entry:
- %N = bitcast i32 %N.in to i32 ; <i32> [#uses=1]
- %tmp5 = icmp sgt i32 %N.in, 0 ; <i1> [#uses=1]
- br i1 %tmp5, label %cond_true, label %return
-
-cond_true: ; preds = %cond_true, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
- %i.0.0 = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
- %tmp2 = add i32 %i.0.0, 1 ; <i32> [#uses=1]
- %tmp = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2 ; <i32*> [#uses=1]
- store i32 4, i32* %tmp
- %tmp5.upgrd.1 = add i32 %i.0.0, 2 ; <i32> [#uses=1]
- %tmp7 = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1 ; <i32*> [#uses=1]
- store i32 5, i32* %tmp7
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %cond_true
-
-return: ; preds = %cond_true, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce-3.ll
deleted file mode 100644
index b1c9fb9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce-3.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -relocation-model=dynamic-no-pic | FileCheck %s
-
-; CHECK: align
-; CHECK: movl $4, -4(%ecx)
-; CHECK: movl $5, (%ecx)
-; CHECK: addl $4, %ecx
-; CHECK: decl %eax
-; CHECK: jne
-
- at A = global [16 x [16 x i32]] zeroinitializer, align 32 ; <[16 x [16 x i32]]*> [#uses=2]
-
-define void @test(i32 %row, i32 %N.in) nounwind {
-entry:
- %N = bitcast i32 %N.in to i32 ; <i32> [#uses=1]
- %tmp5 = icmp sgt i32 %N.in, 0 ; <i1> [#uses=1]
- br i1 %tmp5, label %cond_true, label %return
-
-cond_true: ; preds = %cond_true, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
- %i.0.0 = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
- %tmp2 = add i32 %i.0.0, 1 ; <i32> [#uses=1]
- %tmp = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2 ; <i32*> [#uses=1]
- store i32 4, i32* %tmp
- %tmp5.upgrd.1 = add i32 %i.0.0, 2 ; <i32> [#uses=1]
- %tmp7 = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1 ; <i32*> [#uses=1]
- store i32 5, i32* %tmp7
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %cond_true
-
-return: ; preds = %cond_true, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce.ll
deleted file mode 100644
index 42c6ac4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=static | FileCheck %s
-
-; CHECK: align
-; CHECK: movl $4, -4(%ecx)
-; CHECK: movl $5, (%ecx)
-; CHECK: addl $4, %ecx
-; CHECK: decl %eax
-; CHECK: jne
-
- at A = internal global [16 x [16 x i32]] zeroinitializer, align 32 ; <[16 x [16 x i32]]*> [#uses=2]
-
-define void @test(i32 %row, i32 %N.in) nounwind {
-entry:
- %N = bitcast i32 %N.in to i32 ; <i32> [#uses=1]
- %tmp5 = icmp sgt i32 %N.in, 0 ; <i1> [#uses=1]
- br i1 %tmp5, label %cond_true, label %return
-
-cond_true: ; preds = %cond_true, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
- %i.0.0 = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
- %tmp2 = add i32 %i.0.0, 1 ; <i32> [#uses=1]
- %tmp = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2 ; <i32*> [#uses=1]
- store i32 4, i32* %tmp
- %tmp5.upgrd.1 = add i32 %i.0.0, 2 ; <i32> [#uses=1]
- %tmp7 = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1 ; <i32*> [#uses=1]
- store i32 5, i32* %tmp7
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %cond_true
-
-return: ; preds = %cond_true, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce2.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce2.ll
deleted file mode 100644
index 9b53adb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce2.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -relocation-model=pic | grep {\$pb} | grep mov
-;
-; Make sure the PIC label flags2-"L1$pb" is not moved up to the preheader.
-
- at flags2 = internal global [8193 x i8] zeroinitializer, align 32 ; <[8193 x i8]*> [#uses=1]
-
-define void @test(i32 %k, i32 %i) nounwind {
-entry:
- %k_addr.012 = shl i32 %i, 1 ; <i32> [#uses=1]
- %tmp14 = icmp sgt i32 %k_addr.012, 8192 ; <i1> [#uses=1]
- br i1 %tmp14, label %return, label %bb
-
-bb: ; preds = %bb, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %tmp. = shl i32 %i, 1 ; <i32> [#uses=1]
- %tmp.15 = mul i32 %indvar, %i ; <i32> [#uses=1]
- %tmp.16 = add i32 %tmp.15, %tmp. ; <i32> [#uses=2]
- %k_addr.0.0 = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
- %gep.upgrd.1 = zext i32 %tmp.16 to i64 ; <i64> [#uses=1]
- %tmp = getelementptr [8193 x i8]* @flags2, i32 0, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
- store i8 0, i8* %tmp
- %k_addr.0 = add i32 %k_addr.0.0, %i ; <i32> [#uses=1]
- %tmp.upgrd.2 = icmp sgt i32 %k_addr.0, 8192 ; <i1> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %tmp.upgrd.2, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce3.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce3.ll
deleted file mode 100644
index c45a374..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce3.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -march=x86 | grep cmp | grep 240
-; RUN: llc < %s -march=x86 | grep inc | count 1
-
-define i32 @foo(i32 %A, i32 %B, i32 %C, i32 %D) nounwind {
-entry:
- %tmp2955 = icmp sgt i32 %C, 0 ; <i1> [#uses=1]
- br i1 %tmp2955, label %bb26.outer.us, label %bb40.split
-
-bb26.outer.us: ; preds = %bb26.bb32_crit_edge.us, %entry
- %i.044.0.ph.us = phi i32 [ 0, %entry ], [ %indvar.next57, %bb26.bb32_crit_edge.us ] ; <i32> [#uses=2]
- %k.1.ph.us = phi i32 [ 0, %entry ], [ %k.0.us, %bb26.bb32_crit_edge.us ] ; <i32> [#uses=1]
- %tmp3.us = mul i32 %i.044.0.ph.us, 6 ; <i32> [#uses=1]
- br label %bb1.us
-
-bb1.us: ; preds = %bb1.us, %bb26.outer.us
- %j.053.us = phi i32 [ 0, %bb26.outer.us ], [ %tmp25.us, %bb1.us ] ; <i32> [#uses=2]
- %k.154.us = phi i32 [ %k.1.ph.us, %bb26.outer.us ], [ %k.0.us, %bb1.us ] ; <i32> [#uses=1]
- %tmp5.us = add i32 %tmp3.us, %j.053.us ; <i32> [#uses=1]
- %tmp7.us = shl i32 %D, %tmp5.us ; <i32> [#uses=2]
- %tmp9.us = icmp eq i32 %tmp7.us, %B ; <i1> [#uses=1]
- %tmp910.us = zext i1 %tmp9.us to i32 ; <i32> [#uses=1]
- %tmp12.us = and i32 %tmp7.us, %A ; <i32> [#uses=1]
- %tmp19.us = and i32 %tmp12.us, %tmp910.us ; <i32> [#uses=1]
- %k.0.us = add i32 %tmp19.us, %k.154.us ; <i32> [#uses=3]
- %tmp25.us = add i32 %j.053.us, 1 ; <i32> [#uses=2]
- %tmp29.us = icmp slt i32 %tmp25.us, %C ; <i1> [#uses=1]
- br i1 %tmp29.us, label %bb1.us, label %bb26.bb32_crit_edge.us
-
-bb26.bb32_crit_edge.us: ; preds = %bb1.us
- %indvar.next57 = add i32 %i.044.0.ph.us, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next57, 40 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb40.split, label %bb26.outer.us
-
-bb40.split: ; preds = %bb26.bb32_crit_edge.us, %entry
- %k.1.lcssa.lcssa.us-lcssa = phi i32 [ %k.0.us, %bb26.bb32_crit_edge.us ], [ 0, %entry ] ; <i32> [#uses=1]
- ret i32 %k.1.lcssa.lcssa.us-lcssa
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce4.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce4.ll
deleted file mode 100644
index 6c0eb8c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce4.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=static -mtriple=i686-apple-darwin | FileCheck %s -check-prefix=STATIC
-; RUN: llc < %s -march=x86 -relocation-model=pic | FileCheck %s -check-prefix=PIC
-
-; By starting the IV at -64 instead of 0, a cmp is eliminated,
-; as the flags from the add can be used directly.
-
-; STATIC: movl $-64, %ecx
-
-; STATIC: movl %eax, _state+76(%ecx)
-; STATIC: addl $16, %ecx
-; STATIC: jne
-
-; In PIC mode the symbol can't be folded, so the change-compare-stride
-; trick applies.
-
-; PIC: cmpl $64
-
- at state = external global [0 x i32] ; <[0 x i32]*> [#uses=4]
- at S = external global [0 x i32] ; <[0 x i32]*> [#uses=4]
-
-define i32 @foo() nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %t.063.0 = phi i32 [ 0, %entry ], [ %tmp47, %bb ] ; <i32> [#uses=1]
- %j.065.0 = shl i32 %indvar, 2 ; <i32> [#uses=4]
- %tmp3 = getelementptr [0 x i32]* @state, i32 0, i32 %j.065.0 ; <i32*> [#uses=2]
- %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
- %tmp6 = getelementptr [0 x i32]* @S, i32 0, i32 %t.063.0 ; <i32*> [#uses=1]
- %tmp7 = load i32* %tmp6, align 4 ; <i32> [#uses=1]
- %tmp8 = xor i32 %tmp7, %tmp4 ; <i32> [#uses=2]
- store i32 %tmp8, i32* %tmp3, align 4
- %tmp1378 = or i32 %j.065.0, 1 ; <i32> [#uses=1]
- %tmp16 = getelementptr [0 x i32]* @state, i32 0, i32 %tmp1378 ; <i32*> [#uses=2]
- %tmp17 = load i32* %tmp16, align 4 ; <i32> [#uses=1]
- %tmp19 = getelementptr [0 x i32]* @S, i32 0, i32 %tmp8 ; <i32*> [#uses=1]
- %tmp20 = load i32* %tmp19, align 4 ; <i32> [#uses=1]
- %tmp21 = xor i32 %tmp20, %tmp17 ; <i32> [#uses=2]
- store i32 %tmp21, i32* %tmp16, align 4
- %tmp2680 = or i32 %j.065.0, 2 ; <i32> [#uses=1]
- %tmp29 = getelementptr [0 x i32]* @state, i32 0, i32 %tmp2680 ; <i32*> [#uses=2]
- %tmp30 = load i32* %tmp29, align 4 ; <i32> [#uses=1]
- %tmp32 = getelementptr [0 x i32]* @S, i32 0, i32 %tmp21 ; <i32*> [#uses=1]
- %tmp33 = load i32* %tmp32, align 4 ; <i32> [#uses=1]
- %tmp34 = xor i32 %tmp33, %tmp30 ; <i32> [#uses=2]
- store i32 %tmp34, i32* %tmp29, align 4
- %tmp3982 = or i32 %j.065.0, 3 ; <i32> [#uses=1]
- %tmp42 = getelementptr [0 x i32]* @state, i32 0, i32 %tmp3982 ; <i32*> [#uses=2]
- %tmp43 = load i32* %tmp42, align 4 ; <i32> [#uses=1]
- %tmp45 = getelementptr [0 x i32]* @S, i32 0, i32 %tmp34 ; <i32*> [#uses=1]
- %tmp46 = load i32* %tmp45, align 4 ; <i32> [#uses=1]
- %tmp47 = xor i32 %tmp46, %tmp43 ; <i32> [#uses=3]
- store i32 %tmp47, i32* %tmp42, align 4
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, 4 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb57, label %bb
-
-bb57: ; preds = %bb
- %tmp59 = and i32 %tmp47, 255 ; <i32> [#uses=1]
- ret i32 %tmp59
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce5.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce5.ll
deleted file mode 100644
index b07eeb6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce5.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 | grep inc | count 1
-
- at X = weak global i16 0 ; <i16*> [#uses=1]
- at Y = weak global i16 0 ; <i16*> [#uses=1]
-
-define void @foo(i32 %N) nounwind {
-entry:
- %tmp1019 = icmp sgt i32 %N, 0 ; <i1> [#uses=1]
- br i1 %tmp1019, label %bb, label %return
-
-bb: ; preds = %bb, %entry
- %i.014.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %tmp1 = trunc i32 %i.014.0 to i16 ; <i16> [#uses=2]
- volatile store i16 %tmp1, i16* @X, align 2
- %tmp34 = shl i16 %tmp1, 2 ; <i16> [#uses=1]
- volatile store i16 %tmp34, i16* @Y, align 2
- %indvar.next = add i32 %i.014.0, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce6.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce6.ll
deleted file mode 100644
index bbafcf7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce6.ll
+++ /dev/null
@@ -1,66 +0,0 @@
-; RUN: llc < %s -march=x86-64 | not grep inc
-
-define fastcc i32 @decodeMP3(i32 %isize, i32* %done) nounwind {
-entry:
- br i1 false, label %cond_next191, label %cond_true189
-
-cond_true189: ; preds = %entry
- ret i32 0
-
-cond_next191: ; preds = %entry
- br i1 false, label %cond_next37.i, label %cond_false.i9
-
-cond_false.i9: ; preds = %cond_next191
- ret i32 0
-
-cond_next37.i: ; preds = %cond_next191
- br i1 false, label %cond_false50.i, label %cond_true44.i
-
-cond_true44.i: ; preds = %cond_next37.i
- br i1 false, label %cond_true11.i.i, label %bb414.preheader.i
-
-cond_true11.i.i: ; preds = %cond_true44.i
- ret i32 0
-
-cond_false50.i: ; preds = %cond_next37.i
- ret i32 0
-
-bb414.preheader.i: ; preds = %cond_true44.i
- br i1 false, label %bb.i18, label %do_layer3.exit
-
-bb.i18: ; preds = %bb414.preheader.i
- br i1 false, label %bb358.i, label %cond_true79.i
-
-cond_true79.i: ; preds = %bb.i18
- ret i32 0
-
-bb331.i: ; preds = %bb358.i, %cond_true.i149.i
- br i1 false, label %cond_true.i149.i, label %cond_false.i151.i
-
-cond_true.i149.i: ; preds = %bb331.i
- br i1 false, label %bb178.preheader.i.i, label %bb331.i
-
-cond_false.i151.i: ; preds = %bb331.i
- ret i32 0
-
-bb163.i.i: ; preds = %bb178.preheader.i.i, %bb163.i.i
- %rawout2.451.rec.i.i = phi i64 [ 0, %bb178.preheader.i.i ], [ %indvar.next260.i, %bb163.i.i ] ; <i64> [#uses=2]
- %i.052.i.i = trunc i64 %rawout2.451.rec.i.i to i32 ; <i32> [#uses=1]
- %tmp165.i144.i = shl i32 %i.052.i.i, 5 ; <i32> [#uses=1]
- %tmp165169.i.i = sext i32 %tmp165.i144.i to i64 ; <i64> [#uses=0]
- %indvar.next260.i = add i64 %rawout2.451.rec.i.i, 1 ; <i64> [#uses=2]
- %exitcond261.i = icmp eq i64 %indvar.next260.i, 18 ; <i1> [#uses=1]
- br i1 %exitcond261.i, label %bb178.preheader.i.i, label %bb163.i.i
-
-bb178.preheader.i.i: ; preds = %bb163.i.i, %cond_true.i149.i
- br label %bb163.i.i
-
-bb358.i: ; preds = %bb.i18
- br i1 false, label %bb331.i, label %bb406.i
-
-bb406.i: ; preds = %bb358.i
- ret i32 0
-
-do_layer3.exit: ; preds = %bb414.preheader.i
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce7.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce7.ll
deleted file mode 100644
index 4b565a6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce7.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep imul
-
-target triple = "i386-apple-darwin9.6"
- %struct.III_psy_xmin = type { [22 x double], [13 x [3 x double]] }
- %struct.III_scalefac_t = type { [22 x i32], [13 x [3 x i32]] }
- %struct.gr_info = type { i32, i32, i32, i32, i32, i32, i32, i32, [3 x i32], [3 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i32, i32*, [4 x i32] }
- %struct.lame_global_flags = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i8*, i8*, i32, i32, float, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, float, i32, i32, i32, float, float, float, float, i32, i32, i32, i32, i32, i32, i32, i32 }
-
-define fastcc void @outer_loop(%struct.lame_global_flags* nocapture %gfp, double* nocapture %xr, i32 %targ_bits, double* nocapture %best_noise, %struct.III_psy_xmin* nocapture %l3_xmin, i32* nocapture %l3_enc, %struct.III_scalefac_t* nocapture %scalefac, %struct.gr_info* nocapture %cod_info, i32 %ch) nounwind {
-entry:
- br label %bb4
-
-bb4: ; preds = %bb4, %entry
- br i1 true, label %bb5, label %bb4
-
-bb5: ; preds = %bb4
- br i1 true, label %bb28.i37, label %bb.i4
-
-bb.i4: ; preds = %bb.i4, %bb5
- br label %bb.i4
-
-bb28.i37: ; preds = %bb33.i47, %bb5
- %i.1.reg2mem.0.i = phi i32 [ %0, %bb33.i47 ], [ 0, %bb5 ] ; <i32> [#uses=2]
- %0 = add i32 %i.1.reg2mem.0.i, 1 ; <i32> [#uses=2]
- br label %bb29.i38
-
-bb29.i38: ; preds = %bb33.i47, %bb28.i37
- %indvar32.i = phi i32 [ %indvar.next33.i, %bb33.i47 ], [ 0, %bb28.i37 ] ; <i32> [#uses=2]
- %sfb.314.i = add i32 %indvar32.i, 0 ; <i32> [#uses=3]
- %1 = getelementptr [4 x [21 x double]]* null, i32 0, i32 %0, i32 %sfb.314.i ; <double*> [#uses=1]
- %2 = load double* %1, align 8 ; <double> [#uses=0]
- br i1 false, label %bb30.i41, label %bb33.i47
-
-bb30.i41: ; preds = %bb29.i38
- %3 = getelementptr %struct.III_scalefac_t* null, i32 0, i32 1, i32 %sfb.314.i, i32 %i.1.reg2mem.0.i ; <i32*> [#uses=1]
- store i32 0, i32* %3, align 4
- br label %bb33.i47
-
-bb33.i47: ; preds = %bb30.i41, %bb29.i38
- %4 = add i32 %sfb.314.i, 1 ; <i32> [#uses=1]
- %phitmp.i46 = icmp ugt i32 %4, 11 ; <i1> [#uses=1]
- %indvar.next33.i = add i32 %indvar32.i, 1 ; <i32> [#uses=1]
- br i1 %phitmp.i46, label %bb28.i37, label %bb29.i38
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce8.ll b/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce8.ll
deleted file mode 100644
index 6b2247d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/loop-strength-reduce8.ll
+++ /dev/null
@@ -1,84 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | FileCheck %s
-
-; CHECK: leal 16(%eax), %edx
-; CHECK: align
-; CHECK: addl $4, %edx
-; CHECK: decl %ecx
-; CHECK: jne LBB1_2
-
- %struct.CUMULATIVE_ARGS = type { i32, i32, i32, i32, i32, i32, i32 }
- %struct.bitmap_element = type { %struct.bitmap_element*, %struct.bitmap_element*, i32, [2 x i64] }
- %struct.bitmap_head_def = type { %struct.bitmap_element*, %struct.bitmap_element*, i32 }
- %struct.branch_path = type { %struct.rtx_def*, i32 }
- %struct.c_lang_decl = type <{ i8, [3 x i8] }>
- %struct.constant_descriptor = type { %struct.constant_descriptor*, i8*, %struct.rtx_def*, { x86_fp80 } }
- %struct.eh_region = type { %struct.eh_region*, %struct.eh_region*, %struct.eh_region*, i32, %struct.bitmap_head_def*, i32, { { %struct.eh_region*, %struct.eh_region*, %struct.eh_region*, %struct.rtx_def* } }, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.eh_status = type { %struct.eh_region*, %struct.eh_region**, %struct.eh_region*, %struct.eh_region*, %struct.tree_node*, %struct.rtx_def*, %struct.rtx_def*, i32, i32, %struct.varray_head_tag*, %struct.varray_head_tag*, %struct.varray_head_tag*, %struct.branch_path*, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.emit_status = type { i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.tree_node*, %struct.sequence_stack*, i32, i32, i8*, i32, i8*, %struct.tree_node**, %struct.rtx_def** }
- %struct.equiv_table = type { %struct.rtx_def*, %struct.rtx_def* }
- %struct.expr_status = type { i32, i32, i32, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def* }
- %struct.function = type { %struct.eh_status*, %struct.stmt_status*, %struct.expr_status*, %struct.emit_status*, %struct.varasm_status*, i8*, %struct.tree_node*, %struct.function*, i32, i32, i32, i32, %struct.rtx_def*, %struct.CUMULATIVE_ARGS, %struct.rtx_def*, %struct.rtx_def*, i8*, %struct.initial_value_struct*, i32, %struct.tree_node*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.tree_node*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, %struct.rtx_def*, i64, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.rtx_def*, i32, %struct.rtx_def**, %struct.temp_slot*, i32, i32, i32, %struct.var_refs_queue*, i32, i32, i8*, %struct.tree_node*, %struct.rtx_def*, i32, i32, %struct.machine_function*, i32, i32, %struct.language_function*, %struct.rtx_def*, i8, i8, i8 }
- %struct.goto_fixup = type { %struct.goto_fixup*, %struct.rtx_def*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, i32, %struct.rtx_def*, %struct.tree_node* }
- %struct.initial_value_struct = type { i32, i32, %struct.equiv_table* }
- %struct.label_chain = type { %struct.label_chain*, %struct.tree_node* }
- %struct.lang_decl = type { %struct.c_lang_decl, %struct.tree_node* }
- %struct.language_function = type { %struct.stmt_tree_s, %struct.tree_node* }
- %struct.machine_function = type { [59 x [3 x %struct.rtx_def*]], i32, i32 }
- %struct.nesting = type { %struct.nesting*, %struct.nesting*, i32, %struct.rtx_def*, { { i32, %struct.rtx_def*, %struct.rtx_def*, %struct.nesting*, %struct.tree_node*, %struct.tree_node*, %struct.label_chain*, i32, i32, i32, i32, %struct.rtx_def*, %struct.tree_node** } } }
- %struct.pool_constant = type { %struct.constant_descriptor*, %struct.pool_constant*, %struct.pool_constant*, %struct.rtx_def*, i32, i32, i32, i64, i32 }
- %struct.rtunion = type { i64 }
- %struct.rtx_def = type { i16, i8, i8, [1 x %struct.rtunion] }
- %struct.sequence_stack = type { %struct.rtx_def*, %struct.rtx_def*, %struct.tree_node*, %struct.sequence_stack* }
- %struct.stmt_status = type { %struct.nesting*, %struct.nesting*, %struct.nesting*, %struct.nesting*, %struct.nesting*, %struct.nesting*, i32, i32, %struct.tree_node*, %struct.rtx_def*, i32, i8*, i32, %struct.goto_fixup* }
- %struct.stmt_tree_s = type { %struct.tree_node*, %struct.tree_node*, i8*, i32 }
- %struct.temp_slot = type { %struct.temp_slot*, %struct.rtx_def*, %struct.rtx_def*, i32, i64, %struct.tree_node*, %struct.tree_node*, i8, i8, i32, i32, i64, i64 }
- %struct.tree_common = type { %struct.tree_node*, %struct.tree_node*, i8, i8, i8, i8 }
- %struct.tree_decl = type { %struct.tree_common, i8*, i32, i32, %struct.tree_node*, i8, i8, i8, i8, i8, i8, %struct.rtunion, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, %struct.rtx_def*, %struct.rtx_def*, { %struct.function* }, %struct.tree_node*, %struct.tree_node*, %struct.tree_node*, i64, %struct.lang_decl* }
- %struct.tree_exp = type { %struct.tree_common, i32, [1 x %struct.tree_node*] }
- %struct.tree_node = type { %struct.tree_decl }
- %struct.var_refs_queue = type { %struct.rtx_def*, i32, i32, %struct.var_refs_queue* }
- %struct.varasm_status = type { %struct.constant_descriptor**, %struct.pool_constant**, %struct.pool_constant*, %struct.pool_constant*, i64, %struct.rtx_def* }
- %struct.varray_data = type { [1 x i64] }
- %struct.varray_head_tag = type { i32, i32, i32, i8*, %struct.varray_data }
- at lineno = internal global i32 0 ; <i32*> [#uses=1]
- at tree_code_length = internal global [256 x i32] zeroinitializer
- at llvm.used = appending global [1 x i8*] [ i8* bitcast (%struct.tree_node* (i32, ...)* @build_stmt to i8*) ], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define %struct.tree_node* @build_stmt(i32 %code, ...) nounwind {
-entry:
- %p = alloca i8* ; <i8**> [#uses=3]
- %p1 = bitcast i8** %p to i8* ; <i8*> [#uses=2]
- call void @llvm.va_start(i8* %p1)
- %0 = call fastcc %struct.tree_node* @make_node(i32 %code) nounwind ; <%struct.tree_node*> [#uses=2]
- %1 = getelementptr [256 x i32]* @tree_code_length, i32 0, i32 %code ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=2]
- %3 = load i32* @lineno, align 4 ; <i32> [#uses=1]
- %4 = bitcast %struct.tree_node* %0 to %struct.tree_exp* ; <%struct.tree_exp*> [#uses=2]
- %5 = getelementptr %struct.tree_exp* %4, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 %3, i32* %5, align 4
- %6 = icmp sgt i32 %2, 0 ; <i1> [#uses=1]
- br i1 %6, label %bb, label %bb3
-
-bb: ; preds = %bb, %entry
- %i.01 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ] ; <i32> [#uses=2]
- %7 = load i8** %p, align 4 ; <i8*> [#uses=2]
- %8 = getelementptr i8* %7, i32 4 ; <i8*> [#uses=1]
- store i8* %8, i8** %p, align 4
- %9 = bitcast i8* %7 to %struct.tree_node** ; <%struct.tree_node**> [#uses=1]
- %10 = load %struct.tree_node** %9, align 4 ; <%struct.tree_node*> [#uses=1]
- %11 = getelementptr %struct.tree_exp* %4, i32 0, i32 2, i32 %i.01 ; <%struct.tree_node**> [#uses=1]
- store %struct.tree_node* %10, %struct.tree_node** %11, align 4
- %indvar.next = add i32 %i.01, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %2 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb3, label %bb
-
-bb3: ; preds = %bb, %entry
- call void @llvm.va_end(i8* %p1)
- ret %struct.tree_node* %0
-}
-
-declare void @llvm.va_start(i8*) nounwind
-
-declare void @llvm.va_end(i8*) nounwind
-
-declare fastcc %struct.tree_node* @make_node(i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll b/libclamav/c++/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
deleted file mode 100644
index 474450a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
+++ /dev/null
@@ -1,137 +0,0 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
-
-; CHECK: decq
-; CHECK-NEXT: jne
-
- at Te0 = external global [256 x i32] ; <[256 x i32]*> [#uses=5]
- at Te1 = external global [256 x i32] ; <[256 x i32]*> [#uses=4]
- at Te3 = external global [256 x i32] ; <[256 x i32]*> [#uses=2]
-
-define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind ssp {
-entry:
- %0 = load i32* %rk, align 4 ; <i32> [#uses=1]
- %1 = getelementptr i32* %rk, i64 1 ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=1]
- %tmp15 = add i32 %r, -1 ; <i32> [#uses=1]
- %tmp.16 = zext i32 %tmp15 to i64 ; <i64> [#uses=2]
- br label %bb
-
-bb: ; preds = %bb1, %entry
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %bb1 ] ; <i64> [#uses=3]
- %s1.0 = phi i32 [ %2, %entry ], [ %56, %bb1 ] ; <i32> [#uses=2]
- %s0.0 = phi i32 [ %0, %entry ], [ %43, %bb1 ] ; <i32> [#uses=2]
- %tmp18 = shl i64 %indvar, 4 ; <i64> [#uses=4]
- %rk26 = bitcast i32* %rk to i8* ; <i8*> [#uses=6]
- %3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1]
- %4 = zext i32 %3 to i64 ; <i64> [#uses=1]
- %5 = getelementptr [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=1]
- %7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1]
- %8 = and i32 %7, 255 ; <i32> [#uses=1]
- %9 = zext i32 %8 to i64 ; <i64> [#uses=1]
- %10 = getelementptr [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
- %ctg2.sum2728 = or i64 %tmp18, 8 ; <i64> [#uses=1]
- %12 = getelementptr i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
- %13 = bitcast i8* %12 to i32* ; <i32*> [#uses=1]
- %14 = load i32* %13, align 4 ; <i32> [#uses=1]
- %15 = xor i32 %11, %6 ; <i32> [#uses=1]
- %16 = xor i32 %15, %14 ; <i32> [#uses=3]
- %17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1]
- %18 = zext i32 %17 to i64 ; <i64> [#uses=1]
- %19 = getelementptr [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
- %20 = load i32* %19, align 4 ; <i32> [#uses=1]
- %21 = and i32 %s0.0, 255 ; <i32> [#uses=1]
- %22 = zext i32 %21 to i64 ; <i64> [#uses=1]
- %23 = getelementptr [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
- %24 = load i32* %23, align 4 ; <i32> [#uses=1]
- %ctg2.sum2930 = or i64 %tmp18, 12 ; <i64> [#uses=1]
- %25 = getelementptr i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
- %26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
- %27 = load i32* %26, align 4 ; <i32> [#uses=1]
- %28 = xor i32 %24, %20 ; <i32> [#uses=1]
- %29 = xor i32 %28, %27 ; <i32> [#uses=4]
- %30 = lshr i32 %16, 24 ; <i32> [#uses=1]
- %31 = zext i32 %30 to i64 ; <i64> [#uses=1]
- %32 = getelementptr [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
- %33 = load i32* %32, align 4 ; <i32> [#uses=2]
- %exitcond = icmp eq i64 %indvar, %tmp.16 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb2, label %bb1
-
-bb1: ; preds = %bb
- %ctg2.sum31 = add i64 %tmp18, 16 ; <i64> [#uses=1]
- %34 = getelementptr i8* %rk26, i64 %ctg2.sum31 ; <i8*> [#uses=1]
- %35 = bitcast i8* %34 to i32* ; <i32*> [#uses=1]
- %36 = lshr i32 %29, 16 ; <i32> [#uses=1]
- %37 = and i32 %36, 255 ; <i32> [#uses=1]
- %38 = zext i32 %37 to i64 ; <i64> [#uses=1]
- %39 = getelementptr [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
- %40 = load i32* %39, align 4 ; <i32> [#uses=1]
- %41 = load i32* %35, align 4 ; <i32> [#uses=1]
- %42 = xor i32 %40, %33 ; <i32> [#uses=1]
- %43 = xor i32 %42, %41 ; <i32> [#uses=1]
- %44 = lshr i32 %29, 24 ; <i32> [#uses=1]
- %45 = zext i32 %44 to i64 ; <i64> [#uses=1]
- %46 = getelementptr [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
- %47 = load i32* %46, align 4 ; <i32> [#uses=1]
- %48 = and i32 %16, 255 ; <i32> [#uses=1]
- %49 = zext i32 %48 to i64 ; <i64> [#uses=1]
- %50 = getelementptr [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
- %51 = load i32* %50, align 4 ; <i32> [#uses=1]
- %ctg2.sum32 = add i64 %tmp18, 20 ; <i64> [#uses=1]
- %52 = getelementptr i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
- %53 = bitcast i8* %52 to i32* ; <i32*> [#uses=1]
- %54 = load i32* %53, align 4 ; <i32> [#uses=1]
- %55 = xor i32 %51, %47 ; <i32> [#uses=1]
- %56 = xor i32 %55, %54 ; <i32> [#uses=1]
- %indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
- br label %bb
-
-bb2: ; preds = %bb
- %tmp10 = shl i64 %tmp.16, 4 ; <i64> [#uses=2]
- %ctg2.sum = add i64 %tmp10, 16 ; <i64> [#uses=1]
- %tmp1213 = getelementptr i8* %rk26, i64 %ctg2.sum ; <i8*> [#uses=1]
- %57 = bitcast i8* %tmp1213 to i32* ; <i32*> [#uses=1]
- %58 = and i32 %33, -16777216 ; <i32> [#uses=1]
- %59 = lshr i32 %29, 16 ; <i32> [#uses=1]
- %60 = and i32 %59, 255 ; <i32> [#uses=1]
- %61 = zext i32 %60 to i64 ; <i64> [#uses=1]
- %62 = getelementptr [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
- %63 = load i32* %62, align 4 ; <i32> [#uses=1]
- %64 = and i32 %63, 16711680 ; <i32> [#uses=1]
- %65 = or i32 %64, %58 ; <i32> [#uses=1]
- %66 = load i32* %57, align 4 ; <i32> [#uses=1]
- %67 = xor i32 %65, %66 ; <i32> [#uses=2]
- %68 = lshr i32 %29, 8 ; <i32> [#uses=1]
- %69 = zext i32 %68 to i64 ; <i64> [#uses=1]
- %70 = getelementptr [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
- %71 = load i32* %70, align 4 ; <i32> [#uses=1]
- %72 = and i32 %71, -16777216 ; <i32> [#uses=1]
- %73 = and i32 %16, 255 ; <i32> [#uses=1]
- %74 = zext i32 %73 to i64 ; <i64> [#uses=1]
- %75 = getelementptr [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
- %76 = load i32* %75, align 4 ; <i32> [#uses=1]
- %77 = and i32 %76, 16711680 ; <i32> [#uses=1]
- %78 = or i32 %77, %72 ; <i32> [#uses=1]
- %ctg2.sum25 = add i64 %tmp10, 20 ; <i64> [#uses=1]
- %79 = getelementptr i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
- %80 = bitcast i8* %79 to i32* ; <i32*> [#uses=1]
- %81 = load i32* %80, align 4 ; <i32> [#uses=1]
- %82 = xor i32 %78, %81 ; <i32> [#uses=2]
- %83 = lshr i32 %67, 24 ; <i32> [#uses=1]
- %84 = trunc i32 %83 to i8 ; <i8> [#uses=1]
- store i8 %84, i8* %out, align 1
- %85 = lshr i32 %67, 16 ; <i32> [#uses=1]
- %86 = trunc i32 %85 to i8 ; <i8> [#uses=1]
- %87 = getelementptr i8* %out, i64 1 ; <i8*> [#uses=1]
- store i8 %86, i8* %87, align 1
- %88 = getelementptr i8* %out, i64 4 ; <i8*> [#uses=1]
- %89 = lshr i32 %82, 24 ; <i32> [#uses=1]
- %90 = trunc i32 %89 to i8 ; <i8> [#uses=1]
- store i8 %90, i8* %88, align 1
- %91 = lshr i32 %82, 16 ; <i32> [#uses=1]
- %92 = trunc i32 %91 to i8 ; <i8> [#uses=1]
- %93 = getelementptr i8* %out, i64 5 ; <i8*> [#uses=1]
- store i8 %92, i8* %93, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lsr-negative-stride.ll b/libclamav/c++/llvm/test/CodeGen/X86/lsr-negative-stride.ll
deleted file mode 100644
index b08356c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lsr-negative-stride.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: not grep neg %t
-; RUN: not grep sub.*esp %t
-; RUN: not grep esi %t
-; RUN: not grep push %t
-
-; This corresponds to:
-;int t(int a, int b) {
-; while (a != b) {
-; if (a > b)
-; a -= b;
-; else
-; b -= a;
-; }
-; return a;
-;}
-
-
-define i32 @t(i32 %a, i32 %b) nounwind {
-entry:
- %tmp1434 = icmp eq i32 %a, %b ; <i1> [#uses=1]
- br i1 %tmp1434, label %bb17, label %bb.outer
-
-bb.outer: ; preds = %cond_false, %entry
- %b_addr.021.0.ph = phi i32 [ %b, %entry ], [ %tmp10, %cond_false ] ; <i32> [#uses=5]
- %a_addr.026.0.ph = phi i32 [ %a, %entry ], [ %a_addr.026.0, %cond_false ] ; <i32> [#uses=1]
- br label %bb
-
-bb: ; preds = %cond_true, %bb.outer
- %indvar = phi i32 [ 0, %bb.outer ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
- %tmp. = sub i32 0, %b_addr.021.0.ph ; <i32> [#uses=1]
- %tmp.40 = mul i32 %indvar, %tmp. ; <i32> [#uses=1]
- %a_addr.026.0 = add i32 %tmp.40, %a_addr.026.0.ph ; <i32> [#uses=6]
- %tmp3 = icmp sgt i32 %a_addr.026.0, %b_addr.021.0.ph ; <i1> [#uses=1]
- br i1 %tmp3, label %cond_true, label %cond_false
-
-cond_true: ; preds = %bb
- %tmp7 = sub i32 %a_addr.026.0, %b_addr.021.0.ph ; <i32> [#uses=2]
- %tmp1437 = icmp eq i32 %tmp7, %b_addr.021.0.ph ; <i1> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %tmp1437, label %bb17, label %bb
-
-cond_false: ; preds = %bb
- %tmp10 = sub i32 %b_addr.021.0.ph, %a_addr.026.0 ; <i32> [#uses=2]
- %tmp14 = icmp eq i32 %a_addr.026.0, %tmp10 ; <i1> [#uses=1]
- br i1 %tmp14, label %bb17, label %bb.outer
-
-bb17: ; preds = %cond_false, %cond_true, %entry
- %a_addr.026.1 = phi i32 [ %a, %entry ], [ %tmp7, %cond_true ], [ %a_addr.026.0, %cond_false ] ; <i32> [#uses=1]
- ret i32 %a_addr.026.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lsr-overflow.ll b/libclamav/c++/llvm/test/CodeGen/X86/lsr-overflow.ll
deleted file mode 100644
index 0b0214c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lsr-overflow.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; The comparison uses the pre-inc value, which could lead LSR to
-; try to compute -INT64_MIN.
-
-; CHECK: movabsq $-9223372036854775808, %rax
-; CHECK: cmpq %rax, %rbx
-; CHECK: sete %al
-
-declare i64 @bar()
-
-define i1 @foo() nounwind {
-entry:
- br label %for.cond.i
-
-for.cond.i:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.cond.i ]
- %t = call i64 @bar()
- %indvar.next = add i64 %indvar, 1
- %s = icmp ne i64 %indvar.next, %t
- br i1 %s, label %for.cond.i, label %__ABContainsLabel.exit
-
-__ABContainsLabel.exit:
- %cmp = icmp eq i64 %indvar, 9223372036854775807
- ret i1 %cmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lsr-reuse-trunc.ll b/libclamav/c++/llvm/test/CodeGen/X86/lsr-reuse-trunc.ll
deleted file mode 100644
index d1d7144..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lsr-reuse-trunc.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; Full strength reduction wouldn't reduce register pressure, so LSR should
-; stick with indexing here.
-
-; CHECK: movaps (%rsi,%rax,4), %xmm3
-; CHECK: movaps %xmm3, (%rdi,%rax,4)
-; CHECK: addq $4, %rax
-; CHECK: cmpl %eax, (%rdx)
-; CHECK-NEXT: jg
-
-define void @vvfloorf(float* nocapture %y, float* nocapture %x, i32* nocapture %n) nounwind {
-entry:
- %0 = load i32* %n, align 4
- %1 = icmp sgt i32 %0, 0
- br i1 %1, label %bb, label %return
-
-bb:
- %indvar = phi i64 [ %indvar.next, %bb ], [ 0, %entry ]
- %tmp = shl i64 %indvar, 2
- %scevgep = getelementptr float* %y, i64 %tmp
- %scevgep9 = bitcast float* %scevgep to <4 x float>*
- %scevgep10 = getelementptr float* %x, i64 %tmp
- %scevgep1011 = bitcast float* %scevgep10 to <4 x float>*
- %2 = load <4 x float>* %scevgep1011, align 16
- %3 = bitcast <4 x float> %2 to <4 x i32>
- %4 = and <4 x i32> %3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
- %5 = bitcast <4 x i32> %4 to <4 x float>
- %6 = and <4 x i32> %3, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
- %7 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %5, <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>, i8 5) nounwind
- %tmp.i4 = bitcast <4 x float> %7 to <4 x i32>
- %8 = xor <4 x i32> %tmp.i4, <i32 -1, i32 -1, i32 -1, i32 -1>
- %9 = and <4 x i32> %8, <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200>
- %10 = or <4 x i32> %9, %6
- %11 = bitcast <4 x i32> %10 to <4 x float>
- %12 = fadd <4 x float> %2, %11
- %13 = fsub <4 x float> %12, %11
- %14 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %2, <4 x float> %13, i8 1) nounwind
- %15 = bitcast <4 x float> %14 to <4 x i32>
- %16 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %15) nounwind readnone
- %17 = fadd <4 x float> %13, %16
- %tmp.i = bitcast <4 x float> %17 to <4 x i32>
- %18 = or <4 x i32> %tmp.i, %6
- %19 = bitcast <4 x i32> %18 to <4 x float>
- store <4 x float> %19, <4 x float>* %scevgep9, align 16
- %tmp12 = add i64 %tmp, 4
- %tmp13 = trunc i64 %tmp12 to i32
- %20 = load i32* %n, align 4
- %21 = icmp sgt i32 %20, %tmp13
- %indvar.next = add i64 %indvar, 1
- br i1 %21, label %bb, label %return
-
-return:
- ret void
-}
-
-declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
-
-declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lsr-reuse.ll b/libclamav/c++/llvm/test/CodeGen/X86/lsr-reuse.ll
deleted file mode 100644
index 2f6fb3f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lsr-reuse.ll
+++ /dev/null
@@ -1,442 +0,0 @@
-; RUN: llc < %s -march=x86-64 -O3 -asm-verbose=false | FileCheck %s
-target datalayout = "e-p:64:64:64"
-target triple = "x86_64-unknown-unknown"
-
-; Full strength reduction reduces register pressure from 5 to 4 here.
-; Instruction selection should use the FLAGS value from the dec for
-; the branch. Scheduling should push the adds upwards.
-
-; CHECK: full_me_0:
-; CHECK: movsd (%rsi), %xmm0
-; CHECK: addq $8, %rsi
-; CHECK: mulsd (%rdx), %xmm0
-; CHECK: addq $8, %rdx
-; CHECK: movsd %xmm0, (%rdi)
-; CHECK: addq $8, %rdi
-; CHECK: decq %rcx
-; CHECK: jne
-
-define void @full_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
-entry:
- %t0 = icmp sgt i64 %n, 0
- br i1 %t0, label %loop, label %return
-
-loop:
- %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
- %m = fmul double %t1, %t2
- store double %m, double* %Ai
- %i.next = add nsw i64 %i, 1
- %exitcond = icmp eq i64 %i.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-; Mostly-full strength reduction means we do full strength reduction on all
-; except for the offsets.
-;
-; Given a choice between constant offsets -2048 and 2048, choose the negative
-; value, because at boundary conditions it has a smaller encoding.
-; TODO: That's an over-general heuristic. It would be better for the target
-; to indicate what the encoding cost would be. Then using a 2048 offset
-; would be better on x86-64, since the start value would be 0 instead of
-; 2048.
-
-; CHECK: mostly_full_me_0:
-; CHECK: movsd -2048(%rsi), %xmm0
-; CHECK: mulsd -2048(%rdx), %xmm0
-; CHECK: movsd %xmm0, -2048(%rdi)
-; CHECK: movsd (%rsi), %xmm0
-; CHECK: addq $8, %rsi
-; CHECK: divsd (%rdx), %xmm0
-; CHECK: addq $8, %rdx
-; CHECK: movsd %xmm0, (%rdi)
-; CHECK: addq $8, %rdi
-; CHECK: decq %rcx
-; CHECK: jne
-
-define void @mostly_full_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
-entry:
- %t0 = icmp sgt i64 %n, 0
- br i1 %t0, label %loop, label %return
-
-loop:
- %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
- %m = fmul double %t1, %t2
- store double %m, double* %Ai
- %j = add i64 %i, 256
- %Aj = getelementptr inbounds double* %A, i64 %j
- %Bj = getelementptr inbounds double* %B, i64 %j
- %Cj = getelementptr inbounds double* %C, i64 %j
- %t3 = load double* %Bj
- %t4 = load double* %Cj
- %o = fdiv double %t3, %t4
- store double %o, double* %Aj
- %i.next = add nsw i64 %i, 1
- %exitcond = icmp eq i64 %i.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-; A minor variation on mostly_full_me_0.
-; Prefer to start the indvar at 0.
-
-; CHECK: mostly_full_me_1:
-; CHECK: movsd (%rsi), %xmm0
-; CHECK: mulsd (%rdx), %xmm0
-; CHECK: movsd %xmm0, (%rdi)
-; CHECK: movsd -2048(%rsi), %xmm0
-; CHECK: addq $8, %rsi
-; CHECK: divsd -2048(%rdx), %xmm0
-; CHECK: addq $8, %rdx
-; CHECK: movsd %xmm0, -2048(%rdi)
-; CHECK: addq $8, %rdi
-; CHECK: decq %rcx
-; CHECK: jne
-
-define void @mostly_full_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
-entry:
- %t0 = icmp sgt i64 %n, 0
- br i1 %t0, label %loop, label %return
-
-loop:
- %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
- %m = fmul double %t1, %t2
- store double %m, double* %Ai
- %j = sub i64 %i, 256
- %Aj = getelementptr inbounds double* %A, i64 %j
- %Bj = getelementptr inbounds double* %B, i64 %j
- %Cj = getelementptr inbounds double* %C, i64 %j
- %t3 = load double* %Bj
- %t4 = load double* %Cj
- %o = fdiv double %t3, %t4
- store double %o, double* %Aj
- %i.next = add nsw i64 %i, 1
- %exitcond = icmp eq i64 %i.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-; A slightly less minor variation on mostly_full_me_0.
-
-; CHECK: mostly_full_me_2:
-; CHECK: movsd (%rsi), %xmm0
-; CHECK: mulsd (%rdx), %xmm0
-; CHECK: movsd %xmm0, (%rdi)
-; CHECK: movsd -4096(%rsi), %xmm0
-; CHECK: addq $8, %rsi
-; CHECK: divsd -4096(%rdx), %xmm0
-; CHECK: addq $8, %rdx
-; CHECK: movsd %xmm0, -4096(%rdi)
-; CHECK: addq $8, %rdi
-; CHECK: decq %rcx
-; CHECK: jne
-
-define void @mostly_full_me_2(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
-entry:
- %t0 = icmp sgt i64 %n, 0
- br i1 %t0, label %loop, label %return
-
-loop:
- %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %k = add i64 %i, 256
- %Ak = getelementptr inbounds double* %A, i64 %k
- %Bk = getelementptr inbounds double* %B, i64 %k
- %Ck = getelementptr inbounds double* %C, i64 %k
- %t1 = load double* %Bk
- %t2 = load double* %Ck
- %m = fmul double %t1, %t2
- store double %m, double* %Ak
- %j = sub i64 %i, 256
- %Aj = getelementptr inbounds double* %A, i64 %j
- %Bj = getelementptr inbounds double* %B, i64 %j
- %Cj = getelementptr inbounds double* %C, i64 %j
- %t3 = load double* %Bj
- %t4 = load double* %Cj
- %o = fdiv double %t3, %t4
- store double %o, double* %Aj
- %i.next = add nsw i64 %i, 1
- %exitcond = icmp eq i64 %i.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-; In this test, the counting IV exit value is used, so full strength reduction
-; would not reduce register pressure. IndVarSimplify ought to simplify such
-; cases away, but it's useful here to verify that LSR's register pressure
-; heuristics are working as expected.
-
-; CHECK: count_me_0:
-; CHECK: movsd (%rsi,%rax,8), %xmm0
-; CHECK: mulsd (%rdx,%rax,8), %xmm0
-; CHECK: movsd %xmm0, (%rdi,%rax,8)
-; CHECK: incq %rax
-; CHECK: cmpq %rax, %rcx
-; CHECK: jne
-
-define i64 @count_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
-entry:
- %t0 = icmp sgt i64 %n, 0
- br i1 %t0, label %loop, label %return
-
-loop:
- %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
- %m = fmul double %t1, %t2
- store double %m, double* %Ai
- %i.next = add nsw i64 %i, 1
- %exitcond = icmp eq i64 %i.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- %q = phi i64 [ 0, %entry ], [ %i.next, %loop ]
- ret i64 %q
-}
-
-; In this test, the trip count value is used, so full strength reduction
-; would not reduce register pressure.
-; (though it would reduce register pressure inside the loop...)
-
-; CHECK: count_me_1:
-; CHECK: movsd (%rsi,%rax,8), %xmm0
-; CHECK: mulsd (%rdx,%rax,8), %xmm0
-; CHECK: movsd %xmm0, (%rdi,%rax,8)
-; CHECK: incq %rax
-; CHECK: cmpq %rax, %rcx
-; CHECK: jne
-
-define i64 @count_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
-entry:
- %t0 = icmp sgt i64 %n, 0
- br i1 %t0, label %loop, label %return
-
-loop:
- %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
- %m = fmul double %t1, %t2
- store double %m, double* %Ai
- %i.next = add nsw i64 %i, 1
- %exitcond = icmp eq i64 %i.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- %q = phi i64 [ 0, %entry ], [ %n, %loop ]
- ret i64 %q
-}
-
-; Full strength reduction doesn't save any registers here because the
-; loop tripcount is a constant.
-
-; CHECK: count_me_2:
-; CHECK: movl $10, %eax
-; CHECK: align
-; CHECK: BB7_1:
-; CHECK: movsd -40(%rdi,%rax,8), %xmm0
-; CHECK: addsd -40(%rsi,%rax,8), %xmm0
-; CHECK: movsd %xmm0, -40(%rdx,%rax,8)
-; CHECK: movsd (%rdi,%rax,8), %xmm0
-; CHECK: subsd (%rsi,%rax,8), %xmm0
-; CHECK: movsd %xmm0, (%rdx,%rax,8)
-; CHECK: incq %rax
-; CHECK: cmpq $5010, %rax
-; CHECK: jne
-
-define void @count_me_2(double* nocapture %A, double* nocapture %B, double* nocapture %C) nounwind {
-entry:
- br label %loop
-
-loop:
- %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
- %i5 = add i64 %i, 5
- %Ai = getelementptr double* %A, i64 %i5
- %t2 = load double* %Ai
- %Bi = getelementptr double* %B, i64 %i5
- %t4 = load double* %Bi
- %t5 = fadd double %t2, %t4
- %Ci = getelementptr double* %C, i64 %i5
- store double %t5, double* %Ci
- %i10 = add i64 %i, 10
- %Ai10 = getelementptr double* %A, i64 %i10
- %t9 = load double* %Ai10
- %Bi10 = getelementptr double* %B, i64 %i10
- %t11 = load double* %Bi10
- %t12 = fsub double %t9, %t11
- %Ci10 = getelementptr double* %C, i64 %i10
- store double %t12, double* %Ci10
- %i.next = add i64 %i, 1
- %exitcond = icmp eq i64 %i.next, 5000
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-; This should be fully strength-reduced to reduce register pressure.
-
-; CHECK: full_me_1:
-; CHECK: align
-; CHECK: BB8_1:
-; CHECK: movsd (%rdi), %xmm0
-; CHECK: addsd (%rsi), %xmm0
-; CHECK: movsd %xmm0, (%rdx)
-; CHECK: movsd 40(%rdi), %xmm0
-; CHECK: addq $8, %rdi
-; CHECK: subsd 40(%rsi), %xmm0
-; CHECK: addq $8, %rsi
-; CHECK: movsd %xmm0, 40(%rdx)
-; CHECK: addq $8, %rdx
-; CHECK: decq %rcx
-; CHECK: jne
-
-define void @full_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
- %i5 = add i64 %i, 5
- %Ai = getelementptr double* %A, i64 %i5
- %t2 = load double* %Ai
- %Bi = getelementptr double* %B, i64 %i5
- %t4 = load double* %Bi
- %t5 = fadd double %t2, %t4
- %Ci = getelementptr double* %C, i64 %i5
- store double %t5, double* %Ci
- %i10 = add i64 %i, 10
- %Ai10 = getelementptr double* %A, i64 %i10
- %t9 = load double* %Ai10
- %Bi10 = getelementptr double* %B, i64 %i10
- %t11 = load double* %Bi10
- %t12 = fsub double %t9, %t11
- %Ci10 = getelementptr double* %C, i64 %i10
- store double %t12, double* %Ci10
- %i.next = add i64 %i, 1
- %exitcond = icmp eq i64 %i.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-; This is a variation on full_me_0 in which the 0,+,1 induction variable
-; has a non-address use, pinning that value in a register.
-
-; CHECK: count_me_3:
-; CHECK: call
-; CHECK: movsd (%r15,%r13,8), %xmm0
-; CHECK: mulsd (%r14,%r13,8), %xmm0
-; CHECK: movsd %xmm0, (%r12,%r13,8)
-; CHECK: incq %r13
-; CHECK: cmpq %r13, %rbx
-; CHECK: jne
-
-declare void @use(i64)
-
-define void @count_me_3(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
-entry:
- %t0 = icmp sgt i64 %n, 0
- br i1 %t0, label %loop, label %return
-
-loop:
- %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- call void @use(i64 %i)
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
- %m = fmul double %t1, %t2
- store double %m, double* %Ai
- %i.next = add nsw i64 %i, 1
- %exitcond = icmp eq i64 %i.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-; LSR should use only one indvar for the inner loop.
-; rdar://7657764
-
-; CHECK: asd:
-; CHECK: BB10_5:
-; CHECK-NEXT: addl (%r{{[^,]*}},%rdi,4), %e
-; CHECK-NEXT: incq %rdi
-; CHECK-NEXT: cmpq %rdi, %r{{[^,]*}}
-; CHECK-NEXT: jg
-
-%struct.anon = type { i32, [4200 x i32] }
-
- at bars = common global [123123 x %struct.anon] zeroinitializer, align 32 ; <[123123 x %struct.anon]*> [#uses=2]
-
-define i32 @asd(i32 %n) nounwind readonly {
-entry:
- %0 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb.nph14, label %bb5
-
-bb.nph14: ; preds = %entry
- %tmp18 = zext i32 %n to i64 ; <i64> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb3, %bb.nph14
- %indvar16 = phi i64 [ 0, %bb.nph14 ], [ %indvar.next17, %bb3 ] ; <i64> [#uses=3]
- %s.113 = phi i32 [ 0, %bb.nph14 ], [ %s.0.lcssa, %bb3 ] ; <i32> [#uses=2]
- %scevgep2526 = getelementptr [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 0 ; <i32*> [#uses=1]
- %1 = load i32* %scevgep2526, align 4 ; <i32> [#uses=2]
- %2 = icmp sgt i32 %1, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb.nph, label %bb3
-
-bb.nph: ; preds = %bb
- %tmp23 = sext i32 %1 to i64 ; <i64> [#uses=1]
- br label %bb1
-
-bb1: ; preds = %bb.nph, %bb1
- %indvar = phi i64 [ 0, %bb.nph ], [ %tmp19, %bb1 ] ; <i64> [#uses=2]
- %s.07 = phi i32 [ %s.113, %bb.nph ], [ %4, %bb1 ] ; <i32> [#uses=1]
- %c.08 = getelementptr [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 1, i64 %indvar ; <i32*> [#uses=1]
- %3 = load i32* %c.08, align 4 ; <i32> [#uses=1]
- %4 = add nsw i32 %3, %s.07 ; <i32> [#uses=2]
- %tmp19 = add i64 %indvar, 1 ; <i64> [#uses=2]
- %5 = icmp sgt i64 %tmp23, %tmp19 ; <i1> [#uses=1]
- br i1 %5, label %bb1, label %bb3
-
-bb3: ; preds = %bb1, %bb
- %s.0.lcssa = phi i32 [ %s.113, %bb ], [ %4, %bb1 ] ; <i32> [#uses=2]
- %indvar.next17 = add i64 %indvar16, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %indvar.next17, %tmp18 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb5, label %bb
-
-bb5: ; preds = %bb3, %entry
- %s.1.lcssa = phi i32 [ 0, %entry ], [ %s.0.lcssa, %bb3 ] ; <i32> [#uses=1]
- ret i32 %s.1.lcssa
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lsr-sort.ll b/libclamav/c++/llvm/test/CodeGen/X86/lsr-sort.ll
deleted file mode 100644
index 1f3b59a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lsr-sort.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep inc %t | count 1
-; RUN: not grep incw %t
-
- at X = common global i16 0 ; <i16*> [#uses=1]
-
-define i32 @foo(i32 %N) nounwind {
-entry:
- %0 = icmp sgt i32 %N, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb, label %return
-
-bb: ; preds = %bb, %entry
- %i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %1 = trunc i32 %i.03 to i16 ; <i16> [#uses=1]
- volatile store i16 %1, i16* @X, align 2
- %indvar.next = add i32 %i.03, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb, %entry
- %h = phi i32 [ 0, %entry ], [ %indvar.next, %bb ]
- ret i32 %h
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/lsr-wrap.ll b/libclamav/c++/llvm/test/CodeGen/X86/lsr-wrap.ll
deleted file mode 100644
index ec8db50..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/lsr-wrap.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc -march=x86-64 < %s | FileCheck %s
-
-; LSR would like to use a single IV for both of these, however it's
-; not safe due to wraparound.
-
-; CHECK: addb $-4, %r
-; CHECK: decw %
-
- at g_19 = common global i32 0 ; <i32*> [#uses=2]
-
-declare i32 @func_8(i8 zeroext) nounwind
-
-declare i32 @func_3(i8 signext) nounwind
-
-define void @func_1() nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- %indvar = phi i16 [ 0, %entry ], [ %indvar.next, %bb ] ; <i16> [#uses=2]
- %tmp = sub i16 0, %indvar ; <i16> [#uses=1]
- %tmp27 = trunc i16 %tmp to i8 ; <i8> [#uses=1]
- %tmp1 = load i32* @g_19, align 4 ; <i32> [#uses=2]
- %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
- store i32 %tmp2, i32* @g_19, align 4
- %tmp3 = trunc i32 %tmp1 to i8 ; <i8> [#uses=1]
- %tmp4 = tail call i32 @func_8(i8 zeroext %tmp3) nounwind ; <i32> [#uses=0]
- %tmp5 = shl i8 %tmp27, 2 ; <i8> [#uses=1]
- %tmp6 = add i8 %tmp5, -112 ; <i8> [#uses=1]
- %tmp7 = tail call i32 @func_3(i8 signext %tmp6) nounwind ; <i32> [#uses=0]
- %indvar.next = add i16 %indvar, 1 ; <i16> [#uses=2]
- %exitcond = icmp eq i16 %indvar.next, -28 ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/masked-iv-safe.ll b/libclamav/c++/llvm/test/CodeGen/X86/masked-iv-safe.ll
deleted file mode 100644
index 0b4d73a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/masked-iv-safe.ll
+++ /dev/null
@@ -1,244 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: not grep and %t
-; RUN: not grep movz %t
-; RUN: not grep sar %t
-; RUN: not grep shl %t
-; RUN: grep add %t | count 2
-; RUN: grep inc %t | count 4
-; RUN: grep dec %t | count 2
-; RUN: grep lea %t | count 2
-
-; Optimize away zext-inreg and sext-inreg on the loop induction
-; variable using trip-count information.
-
-define void @count_up(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 10
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @count_down(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 0
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @count_up_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 10
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @count_down_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 0
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @another_count_up(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 18446744073709551615, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 0
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @another_count_down(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fdiv double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 18446744073709551615
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @another_count_up_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 18446744073709551615, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fdiv double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 0
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @another_count_down_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fdiv double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 18446744073709551615
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/masked-iv-unsafe.ll b/libclamav/c++/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
deleted file mode 100644
index f23c020..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
+++ /dev/null
@@ -1,386 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep and %t | count 6
-; RUN: grep movzb %t | count 6
-; RUN: grep sar %t | count 12
-
-; Don't optimize away zext-inreg and sext-inreg on the loop induction
-; variable, because it isn't safe to do so in these cases.
-
-define void @count_up(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 0
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @count_down(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 20
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @count_up_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 0
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @count_down_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 20
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @another_count_up(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @another_count_down(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ %n, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 10
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @another_count_up_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, %n
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @another_count_down_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ %n, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 10
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @yet_another_count_down(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 18446744073709551615
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @yet_another_count_up(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 3
- %exitcond = icmp eq i64 %indvar.next, 10
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @still_another_count_down(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
- %indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 3
- %exitcond = icmp eq i64 %indvar.next, 0
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @yet_another_count_up_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = add i64 %indvar, 3
- %exitcond = icmp eq i64 %indvar.next, 10
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-define void @yet_another_count_down_signed(double* %d, i64 %n) nounwind {
-entry:
- br label %loop
-
-loop:
- %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
- %s0 = shl i64 %indvar, 8
- %indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
- %t1 = load double* %t0
- %t2 = fmul double %t1, 0.1
- store double %t2, double* %t0
- %s1 = shl i64 %indvar, 24
- %indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
- %t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
- store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
- %t7 = load double* %t6
- %t8 = fmul double %t7, 4.5
- store double %t8, double* %t6
- %indvar.next = sub i64 %indvar, 3
- %exitcond = icmp eq i64 %indvar.next, 0
- br i1 %exitcond, label %return, label %loop
-
-return:
- ret void
-}
-
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/maskmovdqu.ll b/libclamav/c++/llvm/test/CodeGen/X86/maskmovdqu.ll
deleted file mode 100644
index 7796f0e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/maskmovdqu.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep -i EDI
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 | grep -i RDI
-; rdar://6573467
-
-define void @test(<16 x i8> %a, <16 x i8> %b, i32 %dummy, i8* %c) nounwind {
-entry:
- tail call void @llvm.x86.sse2.maskmov.dqu( <16 x i8> %a, <16 x i8> %b, i8* %c )
- ret void
-}
-
-declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memcmp.ll b/libclamav/c++/llvm/test/CodeGen/X86/memcmp.ll
deleted file mode 100644
index b90d2e2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memcmp.ll
+++ /dev/null
@@ -1,110 +0,0 @@
-; RUN: llc %s -o - -march=x86-64 | FileCheck %s
-
-; This tests codegen time inlining/optimization of memcmp
-; rdar://6480398
-
- at .str = private constant [23 x i8] c"fooooooooooooooooooooo\00", align 1 ; <[23 x i8]*> [#uses=1]
-
-declare i32 @memcmp(...)
-
-define void @memcmp2(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...)* @memcmp(i8* %X, i8* %Y, i32 2) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK: memcmp2:
-; CHECK: movw (%rsi), %ax
-; CHECK: cmpw %ax, (%rdi)
-}
-
-define void @memcmp2a(i8* %X, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...)* @memcmp(i8* %X, i8* getelementptr inbounds ([23 x i8]* @.str, i32 0, i32 1), i32 2) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK: memcmp2a:
-; CHECK: cmpw $28527, (%rdi)
-}
-
-
-define void @memcmp4(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...)* @memcmp(i8* %X, i8* %Y, i32 4) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK: memcmp4:
-; CHECK: movl (%rsi), %eax
-; CHECK: cmpl %eax, (%rdi)
-}
-
-define void @memcmp4a(i8* %X, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...)* @memcmp(i8* %X, i8* getelementptr inbounds ([23 x i8]* @.str, i32 0, i32 1), i32 4) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK: memcmp4a:
-; CHECK: cmpl $1869573999, (%rdi)
-}
-
-define void @memcmp8(i8* %X, i8* %Y, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...)* @memcmp(i8* %X, i8* %Y, i32 8) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK: memcmp8:
-; CHECK: movq (%rsi), %rax
-; CHECK: cmpq %rax, (%rdi)
-}
-
-define void @memcmp8a(i8* %X, i32* nocapture %P) nounwind {
-entry:
- %0 = tail call i32 (...)* @memcmp(i8* %X, i8* getelementptr inbounds ([23 x i8]* @.str, i32 0, i32 0), i32 8) nounwind ; <i32> [#uses=1]
- %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1]
- br i1 %1, label %return, label %bb
-
-bb: ; preds = %entry
- store i32 4, i32* %P, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-; CHECK: memcmp8a:
-; CHECK: movabsq $8029759185026510694, %rax
-; CHECK: cmpq %rax, (%rdi)
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memcpy-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/memcpy-2.ll
deleted file mode 100644
index 2dc939e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memcpy-2.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=-sse -mtriple=i686-apple-darwin8.8.0 | grep mov | count 7
-; RUN: llc < %s -march=x86 -mattr=+sse -mtriple=i686-apple-darwin8.8.0 | grep mov | count 5
-
- %struct.ParmT = type { [25 x i8], i8, i8* }
- at .str12 = internal constant [25 x i8] c"image\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00" ; <[25 x i8]*> [#uses=1]
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) nounwind
-
-define void @t(i32 %argc, i8** %argv) nounwind {
-entry:
- %parms.i = alloca [13 x %struct.ParmT] ; <[13 x %struct.ParmT]*> [#uses=1]
- %parms1.i = getelementptr [13 x %struct.ParmT]* %parms.i, i32 0, i32 0, i32 0, i32 0 ; <i8*> [#uses=1]
- call void @llvm.memcpy.i32( i8* %parms1.i, i8* getelementptr ([25 x i8]* @.str12, i32 0, i32 0), i32 25, i32 1 ) nounwind
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memcpy.ll b/libclamav/c++/llvm/test/CodeGen/X86/memcpy.ll
deleted file mode 100644
index 24530cd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memcpy.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep call.*memcpy | count 2
-
-declare void @llvm.memcpy.i64(i8*, i8*, i64, i32)
-
-define i8* @my_memcpy(i8* %a, i8* %b, i64 %n) {
-entry:
- tail call void @llvm.memcpy.i64( i8* %a, i8* %b, i64 %n, i32 1 )
- ret i8* %a
-}
-
-define i8* @my_memcpy2(i64* %a, i64* %b, i64 %n) {
-entry:
- %tmp14 = bitcast i64* %a to i8*
- %tmp25 = bitcast i64* %b to i8*
- tail call void @llvm.memcpy.i64(i8* %tmp14, i8* %tmp25, i64 %n, i32 8 )
- ret i8* %tmp14
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memmove-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/memmove-0.ll
deleted file mode 100644
index d405068..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memmove-0.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep {call memcpy}
-
-declare void @llvm.memmove.i64(i8* %d, i8* %s, i64 %l, i32 %a)
-
-define void @foo(i8* noalias %d, i8* noalias %s, i64 %l)
-{
- call void @llvm.memmove.i64(i8* %d, i8* %s, i64 %l, i32 1)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memmove-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/memmove-1.ll
deleted file mode 100644
index 2057be8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memmove-1.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep {call memmove}
-
-declare void @llvm.memmove.i64(i8* %d, i8* %s, i64 %l, i32 %a)
-
-define void @foo(i8* %d, i8* %s, i64 %l)
-{
- call void @llvm.memmove.i64(i8* %d, i8* %s, i64 %l, i32 1)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memmove-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/memmove-2.ll
deleted file mode 100644
index 68a9f4d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memmove-2.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | not grep call
-
-declare void @llvm.memmove.i64(i8* %d, i8* %s, i64 %l, i32 %a)
-
-define void @foo(i8* noalias %d, i8* noalias %s)
-{
- call void @llvm.memmove.i64(i8* %d, i8* %s, i64 32, i32 1)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memmove-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/memmove-3.ll
deleted file mode 100644
index d8a419c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memmove-3.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-pc-linux-gnu | grep {call memmove}
-
-declare void @llvm.memmove.i64(i8* %d, i8* %s, i64 %l, i32 %a)
-
-define void @foo(i8* %d, i8* %s)
-{
- call void @llvm.memmove.i64(i8* %d, i8* %s, i64 32, i32 1)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memmove-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/memmove-4.ll
deleted file mode 100644
index 027db1f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memmove-4.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s | not grep call
-
-target triple = "i686-pc-linux-gnu"
-
-define void @a(i8* %a, i8* %b) nounwind {
- %tmp2 = bitcast i8* %a to i8*
- %tmp3 = bitcast i8* %b to i8*
- tail call void @llvm.memmove.i32( i8* %tmp2, i8* %tmp3, i32 12, i32 4 )
- ret void
-}
-
-declare void @llvm.memmove.i32(i8*, i8*, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memset-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/memset-2.ll
deleted file mode 100644
index 7deb52f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memset-2.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s | not grep rep
-; RUN: llc < %s | grep memset
-
-target triple = "i386"
-
-declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind
-
-define fastcc i32 @cli_scanzip(i32 %desc) nounwind {
-entry:
- br label %bb8.i.i.i.i
-
-bb8.i.i.i.i: ; preds = %bb8.i.i.i.i, %entry
- icmp eq i32 0, 0 ; <i1>:0 [#uses=1]
- br i1 %0, label %bb61.i.i.i, label %bb8.i.i.i.i
-
-bb32.i.i.i: ; preds = %bb61.i.i.i
- ptrtoint i8* %tail.0.i.i.i to i32 ; <i32>:1 [#uses=1]
- sub i32 0, %1 ; <i32>:2 [#uses=1]
- icmp sgt i32 %2, 19 ; <i1>:3 [#uses=1]
- br i1 %3, label %bb34.i.i.i, label %bb61.i.i.i
-
-bb34.i.i.i: ; preds = %bb32.i.i.i
- load i32* null, align 4 ; <i32>:4 [#uses=1]
- icmp eq i32 %4, 101010256 ; <i1>:5 [#uses=1]
- br i1 %5, label %bb8.i11.i.i.i, label %bb61.i.i.i
-
-bb8.i11.i.i.i: ; preds = %bb8.i11.i.i.i, %bb34.i.i.i
- icmp eq i32 0, 0 ; <i1>:6 [#uses=1]
- br i1 %6, label %cli_dbgmsg.exit49.i, label %bb8.i11.i.i.i
-
-cli_dbgmsg.exit49.i: ; preds = %bb8.i11.i.i.i
- icmp eq [32768 x i8]* null, null ; <i1>:7 [#uses=1]
- br i1 %7, label %bb1.i28.i, label %bb8.i.i
-
-bb61.i.i.i: ; preds = %bb61.i.i.i, %bb34.i.i.i, %bb32.i.i.i, %bb8.i.i.i.i
- %tail.0.i.i.i = getelementptr [1024 x i8]* null, i32 0, i32 0 ; <i8*> [#uses=2]
- load i8* %tail.0.i.i.i, align 1 ; <i8>:8 [#uses=1]
- icmp eq i8 %8, 80 ; <i1>:9 [#uses=1]
- br i1 %9, label %bb32.i.i.i, label %bb61.i.i.i
-
-bb1.i28.i: ; preds = %cli_dbgmsg.exit49.i
- call void @llvm.memset.i32( i8* null, i8 0, i32 88, i32 1 ) nounwind
- unreachable
-
-bb8.i.i: ; preds = %bb8.i.i, %cli_dbgmsg.exit49.i
- br label %bb8.i.i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memset.ll b/libclamav/c++/llvm/test/CodeGen/X86/memset.ll
deleted file mode 100644
index cf7464d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memset.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=-sse -mtriple=i686-apple-darwin8.8.0 | grep mov | count 9
-; RUN: llc < %s -march=x86 -mattr=+sse -mtriple=i686-apple-darwin8.8.0 | grep mov | count 3
-
- %struct.x = type { i16, i16 }
-
-define void @t() nounwind {
-entry:
- %up_mvd = alloca [8 x %struct.x] ; <[8 x %struct.x]*> [#uses=2]
- %up_mvd116 = getelementptr [8 x %struct.x]* %up_mvd, i32 0, i32 0 ; <%struct.x*> [#uses=1]
- %tmp110117 = bitcast [8 x %struct.x]* %up_mvd to i8* ; <i8*> [#uses=1]
- call void @llvm.memset.i64( i8* %tmp110117, i8 0, i64 32, i32 8 )
- call void @foo( %struct.x* %up_mvd116 ) nounwind
- ret void
-}
-
-declare void @foo(%struct.x*)
-
-declare void @llvm.memset.i64(i8*, i8, i64, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/memset64-on-x86-32.ll b/libclamav/c++/llvm/test/CodeGen/X86/memset64-on-x86-32.ll
deleted file mode 100644
index da8fc51..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/memset64-on-x86-32.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | grep stosl
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep movq | count 10
-
-define void @bork() nounwind {
-entry:
- call void @llvm.memset.i64( i8* null, i8 0, i64 80, i32 4 )
- ret void
-}
-
-declare void @llvm.memset.i64(i8*, i8, i64, i32) nounwind
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mfence.ll b/libclamav/c++/llvm/test/CodeGen/X86/mfence.ll
deleted file mode 100644
index a1b2283..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mfence.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep sfence
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep lfence
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep mfence
-
-
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
-
-define void @test() {
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 true)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 false, i1 true)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 true, i1 true)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 false, i1 true)
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 true, i1 true)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 true, i1 true)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 true)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 false , i1 true)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mingw-alloca.ll b/libclamav/c++/llvm/test/CodeGen/X86/mingw-alloca.ll
deleted file mode 100644
index 7dcd84d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mingw-alloca.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i386-pc-mingw32"
-
-define void @foo1(i32 %N) nounwind {
-entry:
-; CHECK: _foo1:
-; CHECK: call __alloca
- %tmp14 = alloca i32, i32 %N ; <i32*> [#uses=1]
- call void @bar1( i32* %tmp14 )
- ret void
-}
-
-declare void @bar1(i32*)
-
-define void @foo2(i32 inreg %N) nounwind {
-entry:
-; CHECK: _foo2:
-; CHECK: andl $-16, %esp
-; CHECK: pushl %eax
-; CHECK: call __alloca
-; CHECK: movl 8028(%esp), %eax
- %A2 = alloca [2000 x i32], align 16 ; <[2000 x i32]*> [#uses=1]
- %A2.sub = getelementptr [2000 x i32]* %A2, i32 0, i32 0 ; <i32*> [#uses=1]
- call void @bar2( i32* %A2.sub, i32 %N )
- ret void
-}
-
-declare void @bar2(i32*, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-arg-passing.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-arg-passing.ll
deleted file mode 100644
index 426e98e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-arg-passing.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep mm0 | count 3
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+mmx | grep esp | count 1
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep xmm0
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep rdi
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | not grep movups
-;
-; On Darwin x86-32, v8i8, v4i16, v2i32 values are passed in MM[0-2].
-; On Darwin x86-32, v1i64 values are passed in memory.
-; On Darwin x86-64, v8i8, v4i16, v2i32 values are passed in XMM[0-7].
-; On Darwin x86-64, v1i64 values are passed in 64-bit GPRs.
-
- at u1 = external global <8 x i8>
-
-define void @t1(<8 x i8> %v1) nounwind {
- store <8 x i8> %v1, <8 x i8>* @u1, align 8
- ret void
-}
-
- at u2 = external global <1 x i64>
-
-define void @t2(<1 x i64> %v1) nounwind {
- store <1 x i64> %v1, <1 x i64>* @u2, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-arg-passing2.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-arg-passing2.ll
deleted file mode 100644
index c42af08..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-arg-passing2.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movq2dq | count 1
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movdq2q | count 2
-
- at g_v8qi = external global <8 x i8>
-
-define void @t1() nounwind {
- %tmp3 = load <8 x i8>* @g_v8qi, align 8
- %tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind
- ret void
-}
-
-define void @t2(<8 x i8> %v1, <8 x i8> %v2) nounwind {
- %tmp3 = add <8 x i8> %v1, %v2
- %tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind
- ret void
-}
-
-define void @t3() nounwind {
- call void @pass_v1di( <1 x i64> zeroinitializer )
- ret void
-}
-
-declare i32 @pass_v8qi(...)
-declare void @pass_v1di(<1 x i64>)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-arith.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-arith.ll
deleted file mode 100644
index e4dfdbf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-arith.ll
+++ /dev/null
@@ -1,131 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx
-
-;; A basic sanity check to make sure that MMX arithmetic actually compiles.
-
-define void @foo(<8 x i8>* %A, <8 x i8>* %B) {
-entry:
- %tmp1 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
- %tmp3 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp4 = add <8 x i8> %tmp1, %tmp3 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp4, <8 x i8>* %A
- %tmp7 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp12 = tail call <8 x i8> @llvm.x86.mmx.padds.b( <8 x i8> %tmp4, <8 x i8> %tmp7 ) ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp12, <8 x i8>* %A
- %tmp16 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp21 = tail call <8 x i8> @llvm.x86.mmx.paddus.b( <8 x i8> %tmp12, <8 x i8> %tmp16 ) ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp21, <8 x i8>* %A
- %tmp27 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp28 = sub <8 x i8> %tmp21, %tmp27 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp28, <8 x i8>* %A
- %tmp31 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp36 = tail call <8 x i8> @llvm.x86.mmx.psubs.b( <8 x i8> %tmp28, <8 x i8> %tmp31 ) ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp36, <8 x i8>* %A
- %tmp40 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp45 = tail call <8 x i8> @llvm.x86.mmx.psubus.b( <8 x i8> %tmp36, <8 x i8> %tmp40 ) ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp45, <8 x i8>* %A
- %tmp51 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp52 = mul <8 x i8> %tmp45, %tmp51 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp52, <8 x i8>* %A
- %tmp57 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp58 = and <8 x i8> %tmp52, %tmp57 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp58, <8 x i8>* %A
- %tmp63 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp64 = or <8 x i8> %tmp58, %tmp63 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp64, <8 x i8>* %A
- %tmp69 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp70 = xor <8 x i8> %tmp64, %tmp69 ; <<8 x i8>> [#uses=1]
- store <8 x i8> %tmp70, <8 x i8>* %A
- tail call void @llvm.x86.mmx.emms( )
- ret void
-}
-
-define void @baz(<2 x i32>* %A, <2 x i32>* %B) {
-entry:
- %tmp1 = load <2 x i32>* %A ; <<2 x i32>> [#uses=1]
- %tmp3 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp4 = add <2 x i32> %tmp1, %tmp3 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp4, <2 x i32>* %A
- %tmp9 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp10 = sub <2 x i32> %tmp4, %tmp9 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp10, <2 x i32>* %A
- %tmp15 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp16 = mul <2 x i32> %tmp10, %tmp15 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp16, <2 x i32>* %A
- %tmp21 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp22 = and <2 x i32> %tmp16, %tmp21 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp22, <2 x i32>* %A
- %tmp27 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp28 = or <2 x i32> %tmp22, %tmp27 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp28, <2 x i32>* %A
- %tmp33 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp34 = xor <2 x i32> %tmp28, %tmp33 ; <<2 x i32>> [#uses=1]
- store <2 x i32> %tmp34, <2 x i32>* %A
- tail call void @llvm.x86.mmx.emms( )
- ret void
-}
-
-define void @bar(<4 x i16>* %A, <4 x i16>* %B) {
-entry:
- %tmp1 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
- %tmp3 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp4 = add <4 x i16> %tmp1, %tmp3 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp4, <4 x i16>* %A
- %tmp7 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp12 = tail call <4 x i16> @llvm.x86.mmx.padds.w( <4 x i16> %tmp4, <4 x i16> %tmp7 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp12, <4 x i16>* %A
- %tmp16 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp21 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp12, <4 x i16> %tmp16 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp21, <4 x i16>* %A
- %tmp27 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp28 = sub <4 x i16> %tmp21, %tmp27 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp28, <4 x i16>* %A
- %tmp31 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp36 = tail call <4 x i16> @llvm.x86.mmx.psubs.w( <4 x i16> %tmp28, <4 x i16> %tmp31 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp36, <4 x i16>* %A
- %tmp40 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp45 = tail call <4 x i16> @llvm.x86.mmx.psubus.w( <4 x i16> %tmp36, <4 x i16> %tmp40 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp45, <4 x i16>* %A
- %tmp51 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp52 = mul <4 x i16> %tmp45, %tmp51 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp52, <4 x i16>* %A
- %tmp55 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp60 = tail call <4 x i16> @llvm.x86.mmx.pmulh.w( <4 x i16> %tmp52, <4 x i16> %tmp55 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp60, <4 x i16>* %A
- %tmp64 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp69 = tail call <2 x i32> @llvm.x86.mmx.pmadd.wd( <4 x i16> %tmp60, <4 x i16> %tmp64 ) ; <<2 x i32>> [#uses=1]
- %tmp70 = bitcast <2 x i32> %tmp69 to <4 x i16> ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp70, <4 x i16>* %A
- %tmp75 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp76 = and <4 x i16> %tmp70, %tmp75 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp76, <4 x i16>* %A
- %tmp81 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp82 = or <4 x i16> %tmp76, %tmp81 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp82, <4 x i16>* %A
- %tmp87 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp88 = xor <4 x i16> %tmp82, %tmp87 ; <<4 x i16>> [#uses=1]
- store <4 x i16> %tmp88, <4 x i16>* %A
- tail call void @llvm.x86.mmx.emms( )
- ret void
-}
-
-declare <8 x i8> @llvm.x86.mmx.padds.b(<8 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.x86.mmx.paddus.b(<8 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.x86.mmx.psubs.b(<8 x i8>, <8 x i8>)
-
-declare <8 x i8> @llvm.x86.mmx.psubus.b(<8 x i8>, <8 x i8>)
-
-declare <4 x i16> @llvm.x86.mmx.padds.w(<4 x i16>, <4 x i16>)
-
-declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
-
-declare <4 x i16> @llvm.x86.mmx.psubs.w(<4 x i16>, <4 x i16>)
-
-declare <4 x i16> @llvm.x86.mmx.psubus.w(<4 x i16>, <4 x i16>)
-
-declare <4 x i16> @llvm.x86.mmx.pmulh.w(<4 x i16>, <4 x i16>)
-
-declare <2 x i32> @llvm.x86.mmx.pmadd.wd(<4 x i16>, <4 x i16>)
-
-declare void @llvm.x86.mmx.emms()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-bitcast-to-i64.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-bitcast-to-i64.ll
deleted file mode 100644
index 1fd8f67..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-bitcast-to-i64.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movd | count 4
-
-define i64 @foo(<1 x i64>* %p) {
- %t = load <1 x i64>* %p
- %u = add <1 x i64> %t, %t
- %s = bitcast <1 x i64> %u to i64
- ret i64 %s
-}
-define i64 @goo(<2 x i32>* %p) {
- %t = load <2 x i32>* %p
- %u = add <2 x i32> %t, %t
- %s = bitcast <2 x i32> %u to i64
- ret i64 %s
-}
-define i64 @hoo(<4 x i16>* %p) {
- %t = load <4 x i16>* %p
- %u = add <4 x i16> %t, %t
- %s = bitcast <4 x i16> %u to i64
- ret i64 %s
-}
-define i64 @ioo(<8 x i8>* %p) {
- %t = load <8 x i8>* %p
- %u = add <8 x i8> %t, %t
- %s = bitcast <8 x i8> %u to i64
- ret i64 %s
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-copy-gprs.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-copy-gprs.ll
deleted file mode 100644
index 3607043..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-copy-gprs.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {movq.*(%rsi), %rax}
-; RUN: llc < %s -march=x86 -mattr=-sse2 | grep {movl.*4(%eax),}
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {movsd.(%eax),}
-
-; This test should use GPRs to copy the mmx value, not MMX regs. Using mmx regs,
-; increases the places that need to use emms.
-
-; rdar://5741668
-
-define void @foo(<1 x i64>* %x, <1 x i64>* %y) nounwind {
-entry:
- %tmp1 = load <1 x i64>* %y, align 8 ; <<1 x i64>> [#uses=1]
- store <1 x i64> %tmp1, <1 x i64>* %x, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-emms.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-emms.ll
deleted file mode 100644
index 5ff2588..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-emms.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep emms
-define void @foo() {
-entry:
- call void @llvm.x86.mmx.emms( )
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare void @llvm.x86.mmx.emms()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-insert-element.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-insert-element.ll
deleted file mode 100644
index a063ee1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-insert-element.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | not grep movq
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep psllq
-
-define <2 x i32> @qux(i32 %A) nounwind {
- %tmp3 = insertelement <2 x i32> < i32 0, i32 undef >, i32 %A, i32 1 ; <<2 x i32>> [#uses=1]
- ret <2 x i32> %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-pinsrw.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-pinsrw.ll
deleted file mode 100644
index 3af09f4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-pinsrw.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep pinsrw | count 1
-; PR2562
-
-external global i16 ; <i16*>:0 [#uses=1]
-external global <4 x i16> ; <<4 x i16>*>:1 [#uses=2]
-
-declare void @abort()
-
-define void @""() {
- load i16* @0 ; <i16>:1 [#uses=1]
- load <4 x i16>* @1 ; <<4 x i16>>:2 [#uses=1]
- insertelement <4 x i16> %2, i16 %1, i32 0 ; <<4 x i16>>:3 [#uses=1]
- store <4 x i16> %3, <4 x i16>* @1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-punpckhdq.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-punpckhdq.ll
deleted file mode 100644
index 0af7e01..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-punpckhdq.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep punpckhdq | count 1
-
-define void @bork(<1 x i64>* %x) {
-entry:
- %tmp2 = load <1 x i64>* %x ; <<1 x i64>> [#uses=1]
- %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32> ; <<2 x i32>> [#uses=1]
- %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 > ; <<2 x i32>> [#uses=1]
- %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64> ; <<1 x i64>> [#uses=1]
- store <1 x i64> %tmp10, <1 x i64>* %x
- tail call void @llvm.x86.mmx.emms( )
- ret void
-}
-
-declare void @llvm.x86.mmx.emms()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-s2v.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-s2v.ll
deleted file mode 100644
index c98023c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-s2v.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx
-; PR2574
-
-define void @entry(i32 %m_task_id, i32 %start_x, i32 %end_x) {; <label>:0
- br i1 true, label %bb.nph, label %._crit_edge
-
-bb.nph: ; preds = %bb.nph, %0
- %t2206f2.0 = phi <2 x float> [ %2, %bb.nph ], [ undef, %0 ] ; <<2 x float>> [#uses=1]
- insertelement <2 x float> %t2206f2.0, float 0.000000e+00, i32 0 ; <<2 x float>>:1 [#uses=1]
- insertelement <2 x float> %1, float 0.000000e+00, i32 1 ; <<2 x float>>:2 [#uses=1]
- br label %bb.nph
-
-._crit_edge: ; preds = %0
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-shift.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-shift.ll
deleted file mode 100644
index dd0aa2c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-shift.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep psllq | grep 32
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep psllq | grep 32
-; RUN: llc < %s -march=x86 -mattr=+mmx | grep psrad
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep psrlw
-
-define i64 @t1(<1 x i64> %mm1) nounwind {
-entry:
- %tmp6 = tail call <1 x i64> @llvm.x86.mmx.pslli.q( <1 x i64> %mm1, i32 32 ) ; <<1 x i64>> [#uses=1]
- %retval1112 = bitcast <1 x i64> %tmp6 to i64 ; <i64> [#uses=1]
- ret i64 %retval1112
-}
-
-declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32) nounwind readnone
-
-define i64 @t2(<2 x i32> %mm1, <2 x i32> %mm2) nounwind {
-entry:
- %tmp7 = tail call <2 x i32> @llvm.x86.mmx.psra.d( <2 x i32> %mm1, <2 x i32> %mm2 ) nounwind readnone ; <<2 x i32>> [#uses=1]
- %retval1112 = bitcast <2 x i32> %tmp7 to i64 ; <i64> [#uses=1]
- ret i64 %retval1112
-}
-
-declare <2 x i32> @llvm.x86.mmx.psra.d(<2 x i32>, <2 x i32>) nounwind readnone
-
-define i64 @t3(<1 x i64> %mm1, i32 %bits) nounwind {
-entry:
- %tmp6 = bitcast <1 x i64> %mm1 to <4 x i16> ; <<4 x i16>> [#uses=1]
- %tmp8 = tail call <4 x i16> @llvm.x86.mmx.psrli.w( <4 x i16> %tmp6, i32 %bits ) nounwind readnone ; <<4 x i16>> [#uses=1]
- %retval1314 = bitcast <4 x i16> %tmp8 to i64 ; <i64> [#uses=1]
- ret i64 %retval1314
-}
-
-declare <4 x i16> @llvm.x86.mmx.psrli.w(<4 x i16>, i32) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-shuffle.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-shuffle.ll
deleted file mode 100644
index e3125c7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-shuffle.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -mcpu=yonah
-; PR1427
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-pc-linux-gnu"
- %struct.DrawHelper = type { void (i32, %struct.QT_FT_Span*, i8*)*, void (i32, %struct.QT_FT_Span*, i8*)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i8*, i32, i32, i32)*, void (%struct.QRasterBuffer*, i32, i32, i32, i32, i32)* }
- %struct.QBasicAtomic = type { i32 }
- %struct.QClipData = type { i32, "struct.QClipData::ClipLine"*, i32, i32, %struct.QT_FT_Span*, i32, i32, i32, i32 }
- "struct.QClipData::ClipLine" = type { i32, %struct.QT_FT_Span* }
- %struct.QRasterBuffer = type { %struct.QRect, %struct.QRegion, %struct.QClipData*, %struct.QClipData*, i8, i32, i32, %struct.DrawHelper*, i32, i32, i32, i8* }
- %struct.QRect = type { i32, i32, i32, i32 }
- %struct.QRegion = type { "struct.QRegion::QRegionData"* }
- "struct.QRegion::QRegionData" = type { %struct.QBasicAtomic, %struct._XRegion*, i8*, %struct.QRegionPrivate* }
- %struct.QRegionPrivate = type opaque
- %struct.QT_FT_Span = type { i16, i16, i16, i8 }
- %struct._XRegion = type opaque
-
-define void @_Z19qt_bitmapblit16_sseP13QRasterBufferiijPKhiii(%struct.QRasterBuffer* %rasterBuffer, i32 %x, i32 %y, i32 %color, i8* %src, i32 %width, i32 %height, i32 %stride) {
-entry:
- %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32> ; <<2 x i32>> [#uses=1]
- %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>) ; <<2 x i32>> [#uses=1]
- %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16> ; <<4 x i16>> [#uses=1]
- %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 > ; <<4 x i16>> [#uses=1]
- %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8> ; <<8 x i8>> [#uses=1]
- tail call void @llvm.x86.mmx.maskmovq( <8 x i8> zeroinitializer, <8 x i8> %tmp555, i8* null )
- ret void
-}
-
-declare void @llvm.x86.mmx.maskmovq(<8 x i8>, <8 x i8>, i8*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-vzmovl-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-vzmovl-2.ll
deleted file mode 100644
index 8253c20..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-vzmovl-2.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep pxor
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep punpckldq
-
- %struct.vS1024 = type { [8 x <4 x i32>] }
- %struct.vS512 = type { [4 x <4 x i32>] }
-
-declare <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64>, i32) nounwind readnone
-
-define void @t() nounwind {
-entry:
- br label %bb554
-
-bb554: ; preds = %bb554, %entry
- %sum.0.reg2mem.0 = phi <1 x i64> [ %tmp562, %bb554 ], [ zeroinitializer, %entry ] ; <<1 x i64>> [#uses=1]
- %0 = load <1 x i64>* null, align 8 ; <<1 x i64>> [#uses=2]
- %1 = bitcast <1 x i64> %0 to <2 x i32> ; <<2 x i32>> [#uses=1]
- %tmp555 = and <2 x i32> %1, < i32 -1, i32 0 > ; <<2 x i32>> [#uses=1]
- %2 = bitcast <2 x i32> %tmp555 to <1 x i64> ; <<1 x i64>> [#uses=1]
- %3 = call <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64> %0, i32 32) nounwind readnone ; <<1 x i64>> [#uses=1]
- store <1 x i64> %sum.0.reg2mem.0, <1 x i64>* null
- %tmp558 = add <1 x i64> %sum.0.reg2mem.0, %2 ; <<1 x i64>> [#uses=1]
- %4 = call <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64> %tmp558, i32 32) nounwind readnone ; <<1 x i64>> [#uses=1]
- %tmp562 = add <1 x i64> %4, %3 ; <<1 x i64>> [#uses=1]
- br label %bb554
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mmx-vzmovl.ll b/libclamav/c++/llvm/test/CodeGen/X86/mmx-vzmovl.ll
deleted file mode 100644
index d21e240..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mmx-vzmovl.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep movd
-; RUN: llc < %s -march=x86-64 -mattr=+mmx | grep movq
-
-define void @foo(<1 x i64>* %a, <1 x i64>* %b) nounwind {
-entry:
- %0 = load <1 x i64>* %a, align 8 ; <<1 x i64>> [#uses=1]
- %1 = bitcast <1 x i64> %0 to <2 x i32> ; <<2 x i32>> [#uses=1]
- %2 = and <2 x i32> %1, < i32 -1, i32 0 > ; <<2 x i32>> [#uses=1]
- %3 = bitcast <2 x i32> %2 to <1 x i64> ; <<1 x i64>> [#uses=1]
- store <1 x i64> %3, <1 x i64>* %b, align 8
- br label %bb2
-
-bb2: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/movfs.ll b/libclamav/c++/llvm/test/CodeGen/X86/movfs.ll
deleted file mode 100644
index 823e986..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/movfs.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | grep fs
-
-define i32 @foo() nounwind readonly {
-entry:
- %tmp = load i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
- %tmp1 = load i32* %tmp ; <i32> [#uses=1]
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/movgs.ll b/libclamav/c++/llvm/test/CodeGen/X86/movgs.ll
deleted file mode 100644
index b04048b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/movgs.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | grep gs
-
-define i32 @foo() nounwind readonly {
-entry:
- %tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
- %tmp1 = load i32* %tmp ; <i32> [#uses=1]
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mul-legalize.ll b/libclamav/c++/llvm/test/CodeGen/X86/mul-legalize.ll
deleted file mode 100644
index 069737d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mul-legalize.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 | grep 24576
-; PR2135
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-pc-linux-gnu"
- at .str = constant [13 x i8] c"c45531m.adb\00\00"
-
-define void @main() nounwind {
-entry:
- %tmp1 = call i1 @report__equal( i32 3, i32 3 )
- %b.0 = select i1 %tmp1, i64 35184372088832, i64 0
- %tmp7 = mul i64 3, %b.0
- %tmp32 = icmp eq i64 %tmp7, 105553116266496
- br i1 %tmp32, label %return, label %bb35
-bb35:
- call void @abort( )
- unreachable
-return:
- ret void
-}
-
-declare i1 @report__equal(i32 %x, i32 %y) nounwind
-
-declare void @abort()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mul-remat.ll b/libclamav/c++/llvm/test/CodeGen/X86/mul-remat.ll
deleted file mode 100644
index 3fa0050..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mul-remat.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 1
-; PR1874
-
-define i32 @test(i32 %a, i32 %b) {
-entry:
- %tmp3 = mul i32 %b, %a
- ret i32 %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mul-shift-reassoc.ll b/libclamav/c++/llvm/test/CodeGen/X86/mul-shift-reassoc.ll
deleted file mode 100644
index 3777d8b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mul-shift-reassoc.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 | grep lea
-; RUN: llc < %s -march=x86 | not grep add
-
-define i32 @test(i32 %X, i32 %Y) {
- ; Push the shl through the mul to allow an LEA to be formed, instead
- ; of using a shift and add separately.
- %tmp.2 = shl i32 %X, 1 ; <i32> [#uses=1]
- %tmp.3 = mul i32 %tmp.2, %Y ; <i32> [#uses=1]
- %tmp.5 = add i32 %tmp.3, %Y ; <i32> [#uses=1]
- ret i32 %tmp.5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mul128.ll b/libclamav/c++/llvm/test/CodeGen/X86/mul128.ll
deleted file mode 100644
index 6825b99..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mul128.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep mul | count 3
-
-define i128 @foo(i128 %t, i128 %u) {
- %k = mul i128 %t, %u
- ret i128 %k
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/mul64.ll b/libclamav/c++/llvm/test/CodeGen/X86/mul64.ll
deleted file mode 100644
index 5a25c5d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/mul64.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mul | count 3
-
-define i64 @foo(i64 %t, i64 %u) {
- %k = mul i64 %t, %u
- ret i64 %k
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/multiple-return-values-cross-block.ll b/libclamav/c++/llvm/test/CodeGen/X86/multiple-return-values-cross-block.ll
deleted file mode 100644
index e9837d0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/multiple-return-values-cross-block.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86
-
-declare {x86_fp80, x86_fp80} @test()
-
-define void @call2(x86_fp80 *%P1, x86_fp80 *%P2) {
- %a = call {x86_fp80,x86_fp80} @test()
- %b = getresult {x86_fp80,x86_fp80} %a, 1
- store x86_fp80 %b, x86_fp80* %P1
-br label %L
-
-L:
- %c = getresult {x86_fp80,x86_fp80} %a, 0
- store x86_fp80 %c, x86_fp80* %P2
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/multiple-return-values.ll b/libclamav/c++/llvm/test/CodeGen/X86/multiple-return-values.ll
deleted file mode 100644
index 018d997..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/multiple-return-values.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86
-
-define {i64, float} @bar(i64 %a, float %b) {
- %y = add i64 %a, 7
- %z = fadd float %b, 7.0
- ret i64 %y, float %z
-}
-
-define i64 @foo() {
- %M = call {i64, float} @bar(i64 21, float 21.0)
- %N = getresult {i64, float} %M, 0
- %O = getresult {i64, float} %M, 1
- %P = fptosi float %O to i64
- %Q = add i64 %P, %N
- ret i64 %Q
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/nancvt.ll b/libclamav/c++/llvm/test/CodeGen/X86/nancvt.ll
deleted file mode 100644
index 8b85b20..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/nancvt.ll
+++ /dev/null
@@ -1,185 +0,0 @@
-; CLAMAV local: no opt
-; RUNX: opt < %s -std-compile-opts | llc > %t
-; RUNX: grep 2147027116 %t | count 3
-; RUNX: grep 2147228864 %t | count 3
-; RUNX: grep 2146502828 %t | count 3
-; RUNX: grep 2143034560 %t | count 3
-; RUN: true
-; Compile time conversions of NaNs.
-; ModuleID = 'nan2.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
- %struct..0anon = type { float }
- %struct..1anon = type { double }
- at fnan = constant [3 x i32] [ i32 2143831397, i32 2143831396, i32 2143831398 ] ; <[3 x i32]*> [#uses=1]
- at dnan = constant [3 x i64] [ i64 9223235251041752696, i64 9223235251041752697, i64 9223235250773317239 ], align 8 ; <[3 x i64]*> [#uses=1]
- at fsnan = constant [3 x i32] [ i32 2139637093, i32 2139637092, i32 2139637094 ] ; <[3 x i32]*> [#uses=1]
- at dsnan = constant [3 x i64] [ i64 9220983451228067448, i64 9220983451228067449, i64 9220983450959631991 ], align 8 ; <[3 x i64]*> [#uses=1]
- at .str = internal constant [10 x i8] c"%08x%08x\0A\00" ; <[10 x i8]*> [#uses=2]
- at .str1 = internal constant [6 x i8] c"%08x\0A\00" ; <[6 x i8]*> [#uses=2]
-
- at var = external global i32
-
-define i32 @main() {
-entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- %i = alloca i32, align 4 ; <i32*> [#uses=20]
- %uf = alloca %struct..0anon, align 4 ; <%struct..0anon*> [#uses=8]
- %ud = alloca %struct..1anon, align 8 ; <%struct..1anon*> [#uses=10]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 0, i32* %i, align 4
- br label %bb23
-
-bb: ; preds = %bb23
- %tmp = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp1 = getelementptr [3 x i32]* @fnan, i32 0, i32 %tmp ; <i32*> [#uses=1]
- %tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
- %tmp3 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp34 = bitcast float* %tmp3 to i32* ; <i32*> [#uses=1]
- store i32 %tmp2, i32* %tmp34, align 4
- %tmp5 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp6 = load float* %tmp5, align 4 ; <float> [#uses=1]
- %tmp67 = fpext float %tmp6 to double ; <double> [#uses=1]
- %tmp8 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- store double %tmp67, double* %tmp8, align 8
- %tmp9 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp910 = bitcast double* %tmp9 to i64* ; <i64*> [#uses=1]
- %tmp11 = load i64* %tmp910, align 8 ; <i64> [#uses=1]
- %tmp1112 = trunc i64 %tmp11 to i32 ; <i32> [#uses=1]
- %tmp13 = and i32 %tmp1112, -1 ; <i32> [#uses=1]
- %tmp14 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp1415 = bitcast double* %tmp14 to i64* ; <i64*> [#uses=1]
- %tmp16 = load i64* %tmp1415, align 8 ; <i64> [#uses=1]
- %.cast = zext i32 32 to i64 ; <i64> [#uses=1]
- %tmp17 = ashr i64 %tmp16, %.cast ; <i64> [#uses=1]
- %tmp1718 = trunc i64 %tmp17 to i32 ; <i32> [#uses=1]
- %tmp19 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
- volatile store i32 %tmp1718, i32* @var
- volatile store i32 %tmp13, i32* @var
- %tmp21 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp22 = add i32 %tmp21, 1 ; <i32> [#uses=1]
- store i32 %tmp22, i32* %i, align 4
- br label %bb23
-
-bb23: ; preds = %bb, %entry
- %tmp24 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp25 = icmp sle i32 %tmp24, 2 ; <i1> [#uses=1]
- %tmp2526 = zext i1 %tmp25 to i8 ; <i8> [#uses=1]
- %toBool = icmp ne i8 %tmp2526, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %bb, label %bb27
-
-bb27: ; preds = %bb23
- store i32 0, i32* %i, align 4
- br label %bb46
-
-bb28: ; preds = %bb46
- %tmp29 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp30 = getelementptr [3 x i64]* @dnan, i32 0, i32 %tmp29 ; <i64*> [#uses=1]
- %tmp31 = load i64* %tmp30, align 8 ; <i64> [#uses=1]
- %tmp32 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp3233 = bitcast double* %tmp32 to i64* ; <i64*> [#uses=1]
- store i64 %tmp31, i64* %tmp3233, align 8
- %tmp35 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp36 = load double* %tmp35, align 8 ; <double> [#uses=1]
- %tmp3637 = fptrunc double %tmp36 to float ; <float> [#uses=1]
- %tmp38 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- store float %tmp3637, float* %tmp38, align 4
- %tmp39 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp3940 = bitcast float* %tmp39 to i32* ; <i32*> [#uses=1]
- %tmp41 = load i32* %tmp3940, align 4 ; <i32> [#uses=1]
- %tmp42 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
- volatile store i32 %tmp41, i32* @var
- %tmp44 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp45 = add i32 %tmp44, 1 ; <i32> [#uses=1]
- store i32 %tmp45, i32* %i, align 4
- br label %bb46
-
-bb46: ; preds = %bb28, %bb27
- %tmp47 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp48 = icmp sle i32 %tmp47, 2 ; <i1> [#uses=1]
- %tmp4849 = zext i1 %tmp48 to i8 ; <i8> [#uses=1]
- %toBool50 = icmp ne i8 %tmp4849, 0 ; <i1> [#uses=1]
- br i1 %toBool50, label %bb28, label %bb51
-
-bb51: ; preds = %bb46
- store i32 0, i32* %i, align 4
- br label %bb78
-
-bb52: ; preds = %bb78
- %tmp53 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp54 = getelementptr [3 x i32]* @fsnan, i32 0, i32 %tmp53 ; <i32*> [#uses=1]
- %tmp55 = load i32* %tmp54, align 4 ; <i32> [#uses=1]
- %tmp56 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp5657 = bitcast float* %tmp56 to i32* ; <i32*> [#uses=1]
- store i32 %tmp55, i32* %tmp5657, align 4
- %tmp58 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp59 = load float* %tmp58, align 4 ; <float> [#uses=1]
- %tmp5960 = fpext float %tmp59 to double ; <double> [#uses=1]
- %tmp61 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- store double %tmp5960, double* %tmp61, align 8
- %tmp62 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp6263 = bitcast double* %tmp62 to i64* ; <i64*> [#uses=1]
- %tmp64 = load i64* %tmp6263, align 8 ; <i64> [#uses=1]
- %tmp6465 = trunc i64 %tmp64 to i32 ; <i32> [#uses=1]
- %tmp66 = and i32 %tmp6465, -1 ; <i32> [#uses=1]
- %tmp68 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp6869 = bitcast double* %tmp68 to i64* ; <i64*> [#uses=1]
- %tmp70 = load i64* %tmp6869, align 8 ; <i64> [#uses=1]
- %.cast71 = zext i32 32 to i64 ; <i64> [#uses=1]
- %tmp72 = ashr i64 %tmp70, %.cast71 ; <i64> [#uses=1]
- %tmp7273 = trunc i64 %tmp72 to i32 ; <i32> [#uses=1]
- %tmp74 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
- volatile store i32 %tmp7273, i32* @var
- volatile store i32 %tmp66, i32* @var
- %tmp76 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp77 = add i32 %tmp76, 1 ; <i32> [#uses=1]
- store i32 %tmp77, i32* %i, align 4
- br label %bb78
-
-bb78: ; preds = %bb52, %bb51
- %tmp79 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp80 = icmp sle i32 %tmp79, 2 ; <i1> [#uses=1]
- %tmp8081 = zext i1 %tmp80 to i8 ; <i8> [#uses=1]
- %toBool82 = icmp ne i8 %tmp8081, 0 ; <i1> [#uses=1]
- br i1 %toBool82, label %bb52, label %bb83
-
-bb83: ; preds = %bb78
- store i32 0, i32* %i, align 4
- br label %bb101
-
-bb84: ; preds = %bb101
- %tmp85 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp86 = getelementptr [3 x i64]* @dsnan, i32 0, i32 %tmp85 ; <i64*> [#uses=1]
- %tmp87 = load i64* %tmp86, align 8 ; <i64> [#uses=1]
- %tmp88 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp8889 = bitcast double* %tmp88 to i64* ; <i64*> [#uses=1]
- store i64 %tmp87, i64* %tmp8889, align 8
- %tmp90 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp91 = load double* %tmp90, align 8 ; <double> [#uses=1]
- %tmp9192 = fptrunc double %tmp91 to float ; <float> [#uses=1]
- %tmp93 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- store float %tmp9192, float* %tmp93, align 4
- %tmp94 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp9495 = bitcast float* %tmp94 to i32* ; <i32*> [#uses=1]
- %tmp96 = load i32* %tmp9495, align 4 ; <i32> [#uses=1]
- %tmp97 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
- volatile store i32 %tmp96, i32* @var
- %tmp99 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp100 = add i32 %tmp99, 1 ; <i32> [#uses=1]
- store i32 %tmp100, i32* %i, align 4
- br label %bb101
-
-bb101: ; preds = %bb84, %bb83
- %tmp102 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp103 = icmp sle i32 %tmp102, 2 ; <i1> [#uses=1]
- %tmp103104 = zext i1 %tmp103 to i8 ; <i8> [#uses=1]
- %toBool105 = icmp ne i8 %tmp103104, 0 ; <i1> [#uses=1]
- br i1 %toBool105, label %bb84, label %bb106
-
-bb106: ; preds = %bb101
- br label %return
-
-return: ; preds = %bb106
- %retval107 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval107
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/narrow_op-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/narrow_op-1.ll
deleted file mode 100644
index 18f1108..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/narrow_op-1.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep orb | count 1
-; RUN: llc < %s -march=x86-64 | grep orb | grep 1
-; RUN: llc < %s -march=x86-64 | grep orl | count 1
-; RUN: llc < %s -march=x86-64 | grep orl | grep 16842752
-
- %struct.bf = type { i64, i16, i16, i32 }
- at bfi = common global %struct.bf zeroinitializer, align 16
-
-define void @t1() nounwind optsize ssp {
-entry:
- %0 = load i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
- %1 = or i32 %0, 65536
- store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
- ret void
-}
-
-define void @t2() nounwind optsize ssp {
-entry:
- %0 = load i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
- %1 = or i32 %0, 16842752
- store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/narrow_op-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/narrow_op-2.ll
deleted file mode 100644
index 796ef7a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/narrow_op-2.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
- %struct.bf = type { i64, i16, i16, i32 }
- at bfi = external global %struct.bf*
-
-define void @t1() nounwind ssp {
-entry:
-
-; CHECK: andb $-2, 10(
-; CHECK: andb $-3, 10(
-
- %0 = load %struct.bf** @bfi, align 8
- %1 = getelementptr %struct.bf* %0, i64 0, i32 1
- %2 = bitcast i16* %1 to i32*
- %3 = load i32* %2, align 1
- %4 = and i32 %3, -65537
- store i32 %4, i32* %2, align 1
- %5 = load %struct.bf** @bfi, align 8
- %6 = getelementptr %struct.bf* %5, i64 0, i32 1
- %7 = bitcast i16* %6 to i32*
- %8 = load i32* %7, align 1
- %9 = and i32 %8, -131073
- store i32 %9, i32* %7, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/neg-shl-add.ll b/libclamav/c++/llvm/test/CodeGen/X86/neg-shl-add.ll
deleted file mode 100644
index 7aebc38..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/neg-shl-add.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc -march=x86-64 < %s | not grep negq
-
-; These sequences don't need neg instructions; they can be done with
-; a single shift and sub each.
-
-define i64 @foo(i64 %x, i64 %y, i64 %n) nounwind {
- %a = sub i64 0, %y
- %b = shl i64 %a, %n
- %c = add i64 %b, %x
- ret i64 %c
-}
-define i64 @boo(i64 %x, i64 %y, i64 %n) nounwind {
- %a = sub i64 0, %y
- %b = shl i64 %a, %n
- %c = add i64 %x, %b
- ret i64 %c
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/neg_fp.ll b/libclamav/c++/llvm/test/CodeGen/X86/neg_fp.ll
deleted file mode 100644
index 57164f2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/neg_fp.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse41 -o %t
-; RUN: grep xorps %t | count 1
-
-; Test that when we don't -enable-unsafe-fp-math, we don't do the optimization
-; -0 - (A - B) to (B - A) because A==B, -0 != 0
-
-define float @negfp(float %a, float %b) {
-entry:
- %sub = fsub float %a, %b ; <float> [#uses=1]
- %neg = fsub float -0.000000e+00, %sub ; <float> [#uses=1]
- ret float %neg
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/negate-add-zero.ll b/libclamav/c++/llvm/test/CodeGen/X86/negate-add-zero.ll
deleted file mode 100644
index c3f412e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/negate-add-zero.ll
+++ /dev/null
@@ -1,1145 +0,0 @@
-; RUN: llc < %s -enable-unsafe-fp-math -march=x86 | not grep xor
-; PR3374
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin7"
- %struct.AtomList = type { %"struct.CDSListRep<IVMAtom*>"* }
- %struct.AtomTree = type { %struct.IVM*, %"struct.CDSList<CDSList<HingeNode*> >" }
- %"struct.CDS::DefaultAlloc" = type <{ i8 }>
- %"struct.CDS::SingularError" = type { %"struct.CDS::exception" }
- %"struct.CDS::auto_ptr<IVMAtom>" = type { %struct.IVMAtom* }
- %"struct.CDS::exception" = type { [300 x i8] }
- %"struct.CDSList<CDSList<HingeNode*> >" = type { %"struct.CDSListRep<CDSList<HingeNode*> >"* }
- %"struct.CDSList<CDSList<int> >" = type { %"struct.CDSListRep<CDSList<int> >"* }
- %"struct.CDSList<HingeNode*>" = type { %"struct.CDSListRep<HingeNode*>"* }
- %"struct.CDSList<InternalDynamics::HingeSpec>" = type { %"struct.CDSListRep<InternalDynamics::HingeSpec>"* }
- %"struct.CDSList<Loop>" = type { %"struct.CDSListRep<Loop>"* }
- %"struct.CDSList<Pair<int, int> >" = type { %"struct.CDSListRep<Pair<int, int> >"* }
- %"struct.CDSList<int>" = type { %"struct.CDSListRep<int>"* }
- %"struct.CDSListRep<CDSList<HingeNode*> >" = type opaque
- %"struct.CDSListRep<CDSList<int> >" = type opaque
- %"struct.CDSListRep<HingeNode*>" = type { i32, i32, %struct.HingeNode**, i32 }
- %"struct.CDSListRep<IVMAtom*>" = type { i32, i32, %struct.IVMAtom**, i32 }
- %"struct.CDSListRep<InternalDynamics::HingeSpec>" = type opaque
- %"struct.CDSListRep<Loop>" = type opaque
- %"struct.CDSListRep<Pair<int, int> >" = type opaque
- %"struct.CDSListRep<int>" = type { i32, i32, i32*, i32 }
- %"struct.CDSMatrixBase<double>" = type { %"struct.CDSMatrixRep<double>"* }
- %"struct.CDSMatrixRep<double>" = type opaque
- %"struct.CDSStringRep<char>" = type { i8*, i32, i32, i32, i32 }
- %"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>" = type { %"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>" }
- %"struct.CDSVector<double,0,CDS::DefaultAlloc>" = type { %"struct.CDSVectorBase<double,CDS::DefaultAlloc>" }
- %"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>" = type { %"struct.CDSVectorRep<Vec3,CDS::DefaultAlloc>"* }
- %"struct.CDSVectorBase<double,CDS::DefaultAlloc>" = type { %"struct.CDSVectorRep<double,CDS::DefaultAlloc>"* }
- %"struct.CDSVectorRep<Vec3,CDS::DefaultAlloc>" = type { i32, %"struct.CDS::DefaultAlloc", %struct.Vec3*, i32 }
- %"struct.CDSVectorRep<double,CDS::DefaultAlloc>" = type { i32, %"struct.CDS::DefaultAlloc", double*, i32 }
- %"struct.FixedMatrix<double,1,1,0,0>" = type { %"struct.FixedMatrixBase<double,1,1>" }
- %"struct.FixedMatrix<double,1,3,0,0>" = type { %"struct.FixedMatrixBase<double,1,3>" }
- %"struct.FixedMatrix<double,1,6,0,0>" = type { %"struct.FixedMatrixBase<double,1,6>" }
- %"struct.FixedMatrix<double,2,2,0,0>" = type { %"struct.FixedMatrixBase<double,2,2>" }
- %"struct.FixedMatrix<double,2,6,0,0>" = type { %"struct.FixedMatrixBase<double,2,6>" }
- %"struct.FixedMatrix<double,3,3,0,0>" = type { %"struct.FixedMatrixBase<double,3,3>" }
- %"struct.FixedMatrix<double,3,6,0,0>" = type { %"struct.FixedMatrixBase<double,3,6>" }
- %"struct.FixedMatrix<double,5,5,0,0>" = type { %"struct.FixedMatrixBase<double,5,5>" }
- %"struct.FixedMatrix<double,5,6,0,0>" = type { %"struct.FixedMatrixBase<double,5,6>" }
- %"struct.FixedMatrixBase<double,1,1>" = type { [1 x double] }
- %"struct.FixedMatrixBase<double,1,3>" = type { [3 x double] }
- %"struct.FixedMatrixBase<double,1,6>" = type { [6 x double] }
- %"struct.FixedMatrixBase<double,2,2>" = type { [4 x double] }
- %"struct.FixedMatrixBase<double,2,6>" = type { [12 x double] }
- %"struct.FixedMatrixBase<double,3,3>" = type { [9 x double] }
- %"struct.FixedMatrixBase<double,3,6>" = type { [18 x double] }
- %"struct.FixedMatrixBase<double,5,5>" = type { [25 x double] }
- %"struct.FixedMatrixBase<double,5,6>" = type { [30 x double] }
- %"struct.FixedMatrixBase<double,6,6>" = type { [36 x double] }
- %"struct.FixedVector<double,2,0>" = type { %"struct.FixedVectorBase<double,2>" }
- %"struct.FixedVector<double,5,0>" = type { %"struct.FixedVectorBase<double,5>" }
- %"struct.FixedVectorBase<double,2>" = type { [2 x double] }
- %"struct.FixedVectorBase<double,5>" = type { [5 x double] }
- %struct.HNodeOrigin = type { %struct.HingeNode }
- %struct.HNodeRotate2 = type { %"struct.HingeNodeSpec<2>", %struct.Vec3, %struct.Vec3, %struct.Vec3, %struct.Vec3, %struct.Vec3, %struct.Mat3, %struct.Mat3, %struct.Vec3, %"struct.CDS::auto_ptr<IVMAtom>", %"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>" }
- %struct.HNodeRotate3 = type { %"struct.HingeNodeSpec<3>", %struct.Vec4, %struct.Vec4, %struct.Vec4, %struct.Vec3, %"struct.CDS::auto_ptr<IVMAtom>", %"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>", double, double, double, double, double, double, i8 }
- %struct.HNodeTorsion = type { %"struct.HingeNodeSpec<1>", %struct.Vec3, %"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>", %struct.Vec3, %struct.Mat3 }
- %struct.HNodeTranslate = type { %"struct.HingeNodeSpec<3>", %struct.IVMAtom*, %struct.Vec3, %"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>" }
- %struct.HNodeTranslateRotate2 = type { %"struct.HingeNodeSpec<5>", %struct.Vec3, %struct.Vec3, %struct.Vec3, %struct.Vec3, %struct.Vec3, %struct.Mat3, %struct.Mat3, %struct.Vec3, %"struct.CDS::auto_ptr<IVMAtom>", %"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>" }
- %struct.HNodeTranslateRotate3 = type { %"struct.HingeNodeSpec<6>", %struct.Vec4, %struct.Vec4, %struct.Vec4, %struct.Vec3, %"struct.CDS::auto_ptr<IVMAtom>", %"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>", double, double, double, double, double, double, i8 }
- %struct.HingeNode = type { i32 (...)**, %struct.HingeNode*, %"struct.CDSList<HingeNode*>", i32, %struct.AtomList, %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %struct.PhiMatrix, %struct.Mat6, %struct.Mat6, %"struct.FixedMatrix<double,1,6,0,0>", %struct.Mat6, %"struct.FixedMatrix<double,1,6,0,0>", %struct.Mat3, %struct.Mat6, %struct.IVM*, %struct.IVMAtom* }
- %"struct.HingeNodeSpec<1>" = type { %struct.HingeNode, i32, double, %struct.InertiaTensor, %struct.Mat6, %struct.Vec3, %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,1,0,0>", %"struct.FixedMatrix<double,1,1,0,0>", %"struct.FixedMatrix<double,1,1,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,1,0,0>", %"struct.FixedMatrix<double,1,1,0,0>", %"struct.FixedMatrix<double,1,1,0,0>", %"struct.FixedMatrix<double,1,1,0,0>", %"struct.FixedMatrix<double,1,6,0,0>" }
- %"struct.HingeNodeSpec<2>" = type { %struct.HingeNode, i32, double, %struct.InertiaTensor, %struct.Mat6, %struct.Vec3, %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedVector<double,2,0>", %"struct.FixedVector<double,2,0>", %"struct.FixedVector<double,2,0>", %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedVector<double,2,0>", %"struct.FixedVector<double,2,0>", %"struct.FixedVector<double,2,0>", %"struct.FixedMatrix<double,2,2,0,0>", %"struct.FixedMatrix<double,2,6,0,0>" }
- %"struct.HingeNodeSpec<3>" = type { %struct.HingeNode, i32, double, %struct.InertiaTensor, %struct.Mat6, %struct.Vec3, %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,3,0,0>", %"struct.FixedMatrix<double,1,3,0,0>", %"struct.FixedMatrix<double,1,3,0,0>", %"struct.FixedMatrix<double,3,6,0,0>", %"struct.FixedMatrix<double,1,3,0,0>", %"struct.FixedMatrix<double,1,3,0,0>", %"struct.FixedMatrix<double,1,3,0,0>", %"struct.FixedMatrix<double,3,3,0,0>", %"struct.FixedMatrix<double,3,6,0,0>" }
- %"struct.HingeNodeSpec<5>" = type { %struct.HingeNode, i32, double, %struct.InertiaTensor, %struct.Mat6, %struct.Vec3, %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedVector<double,5,0>", %"struct.FixedVector<double,5,0>", %"struct.FixedVector<double,5,0>", %"struct.FixedMatrix<double,5,6,0,0>", %"struct.FixedVector<double,5,0>", %"struct.FixedVector<double,5,0>", %"struct.FixedVector<double,5,0>", %"struct.FixedMatrix<double,5,5,0,0>", %"struct.FixedMatrix<double,5,6,0,0>" }
- %"struct.HingeNodeSpec<6>" = type { %struct.HingeNode, i32, double, %struct.InertiaTensor, %struct.Mat6, %struct.Vec3, %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %struct.Mat6, %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %"struct.FixedMatrix<double,1,6,0,0>", %struct.Mat6, %struct.Mat6 }
- %struct.IVM = type { i32 (...)**, %struct.AtomTree*, %struct.Integrator*, %struct.LengthConstraints*, i32, i32, i32, i8, i8, i8, i8, double, double, double, double, double, double, double, double, double, i32, double, double, double, double, double, double, %"struct.CDSList<Loop>", %"struct.CDSList<Pair<int, int> >", %struct.AtomList, %"struct.CDSList<CDSList<int> >", %"struct.CDSList<InternalDynamics::HingeSpec>", %struct.String, %"struct.CDSList<int>", i32 (%"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)*, double (%"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)*, i32 (%"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>"*)*, double (%"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<Vec3,0,CDS::DefaultAlloc>"*)* }
- %struct.IVMAtom = type { i32, %struct.HingeNode*, %struct.AtomList, %struct.Vec3, %struct.Vec3, %struct.Vec3, double, double }
- %struct.InertiaTensor = type { %struct.Mat3 }
- %struct.Integrator = type { i32 (...)**, %"struct.CDSVector<double,0,CDS::DefaultAlloc>", %"struct.CDSVector<double,0,CDS::DefaultAlloc>", %struct.IVM* }
- %"struct.InternalDynamics::HingeSpec" = type { %struct.String, i32, i32, %"struct.CDSList<int>" }
- %struct.LengthConstraints = type { double, i32, i32, %struct.IVM*, %struct.LengthConstraintsPrivates* }
- %struct.LengthConstraintsPrivates = type opaque
- %struct.Mat3 = type { %"struct.FixedMatrix<double,3,3,0,0>" }
- %struct.Mat6 = type { %"struct.FixedMatrixBase<double,6,6>" }
- %"struct.MatrixTools::InverseResults<FullMatrix<double> >" = type { %"struct.CDSVector<double,0,CDS::DefaultAlloc>", i32 }
- %struct.PhiMatrix = type { %struct.Vec3 }
- %struct.PhiMatrixTranspose = type { %struct.PhiMatrix* }
- %struct.RMat = type { %"struct.CDSMatrixBase<double>" }
- %struct.String = type { %"struct.CDSStringRep<char>"* }
- %"struct.SubMatrix<FixedMatrix<double, 6, 6, 0, 0> >" = type { %struct.Mat6*, i32, i32, i32, i32 }
- %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >" = type { %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, i32, i32 }
- %"struct.SubVector<FixedVector<double, 6, 0> >" = type { %"struct.FixedMatrix<double,1,6,0,0>"*, i32, i32 }
- %struct.Vec3 = type { %"struct.FixedMatrix<double,1,3,0,0>" }
- %struct.Vec4 = type { %"struct.FixedMatrix<double,2,2,0,0>" }
- %struct.__class_type_info_pseudo = type { %struct.__type_info_pseudo }
- %struct.__si_class_type_info_pseudo = type { %struct.__type_info_pseudo, %"struct.std::type_info"* }
- %struct.__type_info_pseudo = type { i8*, i8* }
- %"struct.std::basic_ios<char,std::char_traits<char> >" = type { %"struct.std::ios_base", %"struct.std::basic_ostream<char,std::char_traits<char> >"*, i8, i8, %"struct.std::basic_streambuf<char,std::char_traits<char> >"*, %"struct.std::ctype<char>"*, %"struct.std::num_get<char,std::istreambuf_iterator<char, std::char_traits<char> > >"*, %"struct.std::num_get<char,std::istreambuf_iterator<char, std::char_traits<char> > >"* }
- %"struct.std::basic_ostream<char,std::char_traits<char> >" = type { i32 (...)**, %"struct.std::basic_ios<char,std::char_traits<char> >" }
- %"struct.std::basic_streambuf<char,std::char_traits<char> >" = type { i32 (...)**, i8*, i8*, i8*, i8*, i8*, i8*, %"struct.std::locale" }
- %"struct.std::ctype<char>" = type { %"struct.std::locale::facet", i32*, i8, i32*, i32*, i32*, i8, [256 x i8], [256 x i8], i8 }
- %"struct.std::ios_base" = type { i32 (...)**, i32, i32, i32, i32, i32, %"struct.std::ios_base::_Callback_list"*, %"struct.std::ios_base::_Words", [8 x %"struct.std::ios_base::_Words"], i32, %"struct.std::ios_base::_Words"*, %"struct.std::locale" }
- %"struct.std::ios_base::_Callback_list" = type { %"struct.std::ios_base::_Callback_list"*, void (i32, %"struct.std::ios_base"*, i32)*, i32, i32 }
- %"struct.std::ios_base::_Words" = type { i8*, i32 }
- %"struct.std::locale" = type { %"struct.std::locale::_Impl"* }
- %"struct.std::locale::_Impl" = type { i32, %"struct.std::locale::facet"**, i32, %"struct.std::locale::facet"**, i8** }
- %"struct.std::locale::facet" = type { i32 (...)**, i32 }
- %"struct.std::num_get<char,std::istreambuf_iterator<char, std::char_traits<char> > >" = type { %"struct.std::locale::facet" }
- %"struct.std::type_info" = type { i32 (...)**, i8* }
- at _ZN9HingeNode7DEG2RADE = external constant double, align 8 ; <double*> [#uses=0]
-@"\01LC" = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
-@"\01LC1" = external constant [7 x i8] ; <[7 x i8]*> [#uses=0]
-@"\01LC2" = external constant [10 x i8] ; <[10 x i8]*> [#uses=0]
-@"\01LC3" = external constant [5 x i8] ; <[5 x i8]*> [#uses=0]
-@"\01LC4" = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
-@"\01LC5" = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
-@"\01LC6" = external constant [7 x i8] ; <[7 x i8]*> [#uses=0]
-@"\01LC7" = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
-@"\01LC8" = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
-@"\01LC9" = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
-@"\01LC10" = external constant [3 x i8] ; <[3 x i8]*> [#uses=0]
- at _ZStL8__ioinit = external global %"struct.CDS::DefaultAlloc" ; <%"struct.CDS::DefaultAlloc"*> [#uses=0]
- at __dso_handle = external global i8* ; <i8**> [#uses=0]
- at _ZTIN9HingeNode17VirtualBaseMethodE = external constant %struct.__class_type_info_pseudo ; <%struct.__class_type_info_pseudo*> [#uses=0]
- at _ZTVN10__cxxabiv117__class_type_infoE = external constant [0 x i32 (...)*] ; <[0 x i32 (...)*]*> [#uses=0]
- at _ZTSN9HingeNode17VirtualBaseMethodE = external constant [32 x i8], align 4 ; <[32 x i8]*> [#uses=0]
- at _ZTV9HingeNode = external constant [31 x i32 (...)*], align 32 ; <[31 x i32 (...)*]*> [#uses=0]
- at _ZTI9HingeNode = external constant %struct.__class_type_info_pseudo ; <%struct.__class_type_info_pseudo*> [#uses=0]
- at _ZTS9HingeNode = external constant [11 x i8] ; <[11 x i8]*> [#uses=0]
- at _ZTV11HNodeOrigin = external constant [31 x i32 (...)*], align 32 ; <[31 x i32 (...)*]*> [#uses=0]
- at _ZTI11HNodeOrigin = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTVN10__cxxabiv120__si_class_type_infoE = external constant [0 x i32 (...)*] ; <[0 x i32 (...)*]*> [#uses=0]
- at _ZTS11HNodeOrigin = external constant [14 x i8] ; <[14 x i8]*> [#uses=0]
- at _ZTV13HingeNodeSpecILi1EE = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI13HingeNodeSpecILi1EE = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS13HingeNodeSpecILi1EE = external constant [22 x i8] ; <[22 x i8]*> [#uses=0]
- at _ZTV13HingeNodeSpecILi3EE = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI13HingeNodeSpecILi3EE = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS13HingeNodeSpecILi3EE = external constant [22 x i8] ; <[22 x i8]*> [#uses=0]
- at _ZTV13HingeNodeSpecILi2EE = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI13HingeNodeSpecILi2EE = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS13HingeNodeSpecILi2EE = external constant [22 x i8] ; <[22 x i8]*> [#uses=0]
- at _ZTV13HingeNodeSpecILi6EE = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI13HingeNodeSpecILi6EE = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS13HingeNodeSpecILi6EE = external constant [22 x i8] ; <[22 x i8]*> [#uses=0]
- at _ZTV13HingeNodeSpecILi5EE = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI13HingeNodeSpecILi5EE = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS13HingeNodeSpecILi5EE = external constant [22 x i8] ; <[22 x i8]*> [#uses=0]
- at _ZSt4cout = external global %"struct.std::basic_ostream<char,std::char_traits<char> >" ; <%"struct.std::basic_ostream<char,std::char_traits<char> >"*> [#uses=0]
-@"\01LC11" = external constant [10 x i8] ; <[10 x i8]*> [#uses=0]
-@"\01LC12" = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
-@"\01LC13" = external constant [10 x i8] ; <[10 x i8]*> [#uses=0]
- at _ZSt4cerr = external global %"struct.std::basic_ostream<char,std::char_traits<char> >" ; <%"struct.std::basic_ostream<char,std::char_traits<char> >"*> [#uses=0]
-@"\01LC14" = external constant [29 x i8] ; <[29 x i8]*> [#uses=0]
-@"\01LC15" = external constant [11 x i8] ; <[11 x i8]*> [#uses=0]
-@"\01LC16" = external constant [13 x i8] ; <[13 x i8]*> [#uses=0]
-@"\01LC17" = external constant [21 x i8] ; <[21 x i8]*> [#uses=0]
-@"\01LC18" = external constant [8 x i8] ; <[8 x i8]*> [#uses=0]
-@"\01LC19" = external constant [4 x i8] ; <[4 x i8]*> [#uses=0]
-@"\01LC20" = external constant [42 x i8] ; <[42 x i8]*> [#uses=0]
- at _ZTIN16InternalDynamics9ExceptionE = external constant %struct.__class_type_info_pseudo ; <%struct.__class_type_info_pseudo*> [#uses=0]
- at _ZTSN16InternalDynamics9ExceptionE = external constant [31 x i8], align 4 ; <[31 x i8]*> [#uses=0]
- at _ZTIN3CDS13SingularErrorE = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTSN3CDS13SingularErrorE = external constant [22 x i8] ; <[22 x i8]*> [#uses=0]
- at _ZTIN3CDS9exceptionE = external constant %struct.__class_type_info_pseudo ; <%struct.__class_type_info_pseudo*> [#uses=0]
- at _ZTSN3CDS9exceptionE = external constant [17 x i8] ; <[17 x i8]*> [#uses=0]
- at _ZTV12HNodeTorsion = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI12HNodeTorsion = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS12HNodeTorsion = external constant [15 x i8] ; <[15 x i8]*> [#uses=0]
- at _ZTV12HNodeRotate3 = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI12HNodeRotate3 = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS12HNodeRotate3 = external constant [15 x i8] ; <[15 x i8]*> [#uses=0]
- at _ZTV12HNodeRotate2 = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI12HNodeRotate2 = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS12HNodeRotate2 = external constant [15 x i8] ; <[15 x i8]*> [#uses=0]
- at _ZTV21HNodeTranslateRotate3 = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI21HNodeTranslateRotate3 = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS21HNodeTranslateRotate3 = external constant [24 x i8] ; <[24 x i8]*> [#uses=0]
- at _ZTV21HNodeTranslateRotate2 = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI21HNodeTranslateRotate2 = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS21HNodeTranslateRotate2 = external constant [24 x i8] ; <[24 x i8]*> [#uses=0]
- at _ZTV14HNodeTranslate = external constant [33 x i32 (...)*], align 32 ; <[33 x i32 (...)*]*> [#uses=0]
- at _ZTI14HNodeTranslate = external constant %struct.__si_class_type_info_pseudo ; <%struct.__si_class_type_info_pseudo*> [#uses=0]
- at _ZTS14HNodeTranslate = external constant [17 x i8] ; <[17 x i8]*> [#uses=0]
-@"\01LC21" = external constant [31 x i8] ; <[31 x i8]*> [#uses=0]
-@"\01LC22" = external constant [6 x i8] ; <[6 x i8]*> [#uses=0]
-@"\01LC23" = external constant [12 x i8] ; <[12 x i8]*> [#uses=0]
-@"\01LC24" = external constant [5 x i8] ; <[5 x i8]*> [#uses=0]
-@"\01LC25" = external constant [7 x i8] ; <[7 x i8]*> [#uses=0]
-@"\01LC26" = external constant [7 x i8] ; <[7 x i8]*> [#uses=0]
-@"\01LC27" = external constant [43 x i8] ; <[43 x i8]*> [#uses=0]
-@"\01LC28" = external constant [15 x i8] ; <[15 x i8]*> [#uses=0]
-@"\01LC29" = external constant [20 x i8] ; <[20 x i8]*> [#uses=0]
-@"\01LC30" = external constant [41 x i8] ; <[41 x i8]*> [#uses=0]
- at llvm.global_ctors = external global [1 x { i32, void ()* }] ; <[1 x { i32, void ()* }]*> [#uses=0]
-
-declare void @_GLOBAL__I__ZN9HingeNode7DEG2RADE() section "__TEXT,__StaticInit,regular,pure_instructions"
-
-declare void @_ZN9HingeNode16velFromCartesianEv(%struct.HingeNode*) nounwind
-
-declare i32 @_ZNK9HingeNode6offsetEv(%struct.HingeNode*) nounwind
-
-declare i32 @_ZNK9HingeNode6getDOFEv(%struct.HingeNode*) nounwind
-
-declare i32 @_ZNK9HingeNode6getDimEv(%struct.HingeNode*) nounwind
-
-declare double @_ZN9HingeNode8kineticEEv(%struct.HingeNode*) nounwind
-
-declare double @_ZN9HingeNode8approxKEEv(%struct.HingeNode*) nounwind
-
-declare i8* @_ZN9HingeNode4typeEv(%struct.HingeNode*) nounwind
-
-declare i8* @_ZN11HNodeOrigin4typeEv(%struct.HNodeOrigin*) nounwind
-
-declare void @_ZN11HNodeOrigin5calcPEv(%struct.HNodeOrigin*) nounwind
-
-declare void @_ZN11HNodeOrigin5calcZEv(%struct.HNodeOrigin*) nounwind
-
-declare void @_ZN11HNodeOrigin9calcPandZEv(%struct.HNodeOrigin*) nounwind
-
-declare void @_ZN11HNodeOrigin9calcAccelEv(%struct.HNodeOrigin*) nounwind
-
-declare void @_ZN11HNodeOrigin17calcInternalForceEv(%struct.HNodeOrigin*) nounwind
-
-declare void @_ZN11HNodeOrigin18prepareVelInternalEv(%struct.HNodeOrigin*) nounwind
-
-declare void @_ZN11HNodeOrigin13propagateSVelERK11FixedVectorIdLi6ELi0EE(%struct.HNodeOrigin*, %"struct.FixedMatrix<double,1,6,0,0>"*) nounwind
-
-declare void @_ZN11HNodeOrigin9setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%struct.HNodeOrigin*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare void @_ZN11HNodeOrigin6setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeOrigin*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare void @_ZN11HNodeOrigin14setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%struct.HNodeOrigin*, %"struct.FixedMatrix<double,1,6,0,0>"*) nounwind
-
-declare void @_ZN11HNodeOrigin18enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES4_(%struct.HNodeOrigin*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare void @_ZN11HNodeOrigin5printEi(%struct.HNodeOrigin*, i32) nounwind
-
-declare void @_ZN11HNodeOrigin6getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeOrigin*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare void @_ZN11HNodeOrigin6getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeOrigin*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare void @_ZN11HNodeOrigin8getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeOrigin*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare void @_ZN11HNodeOrigin16getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeOrigin*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare void @_ZN11HNodeOrigin5calcYEv(%struct.HNodeOrigin*) nounwind
-
-declare i8* @_ZN14HNodeTranslate4typeEv(%struct.HNodeTranslate*) nounwind
-
-declare i8* @_ZN21HNodeTranslateRotate34typeEv(%struct.HNodeTranslateRotate3*) nounwind
-
-declare i32 @_ZNK21HNodeTranslateRotate36getDimEv(%struct.HNodeTranslateRotate3*) nounwind
-
-declare i8* @_ZN12HNodeRotate34typeEv(%struct.HNodeRotate3*) nounwind
-
-declare i32 @_ZNK12HNodeRotate36getDimEv(%struct.HNodeRotate3*) nounwind
-
-declare i8* @_ZN12HNodeRotate24typeEv(%struct.HNodeRotate2*) nounwind
-
-declare i32 @_ZNK12HNodeRotate26getDimEv(%struct.HNodeRotate2*) nounwind
-
-declare i8* @_ZN21HNodeTranslateRotate24typeEv(%struct.HNodeTranslateRotate2*) nounwind
-
-declare i32 @_ZNK21HNodeTranslateRotate26getDimEv(%struct.HNodeTranslateRotate2*) nounwind
-
-declare i8* @_ZN12HNodeTorsion4typeEv(%struct.HNodeTorsion*) nounwind
-
-declare fastcc double @_ZL12sumMassToTipPK9HingeNode(%struct.HingeNode*)
-
-declare void @_ZN13InertiaTensor4calcERK4Vec3RK7CDSListIP7IVMAtomE(%struct.InertiaTensor*, %struct.Vec3*, %struct.AtomList*) nounwind
-
-declare fastcc double @_ZL15sumInertiaToTipPK9HingeNodeRK4Vec3S4_(%struct.HingeNode*, %struct.Vec3*, %struct.Vec3*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsI11FixedVectorIdLi6ELi0EEERSoS2_RK9SubVectorIT_E(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.SubVector<FixedVector<double, 6, 0> >"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZStlsIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_St5_Setw(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, i32)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, i8*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZNSolsEd(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, double)
-
-declare void @_Z14orthoTransformIdLi3ELi3EE11FixedMatrixIT_XT1_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT0_ELi0ELi0EERKS0_IS1_XT1_EXT0_ELi0ELi0EE(%"struct.FixedMatrix<double,3,3,0,0>"* noalias sret, %"struct.FixedMatrix<double,3,3,0,0>"*, %"struct.FixedMatrix<double,3,3,0,0>"*)
-
-declare void @_ZN12HNodeRotate27calcRotEv(%struct.HNodeRotate2*)
-
-declare void @_ZN21HNodeTranslateRotate27calcRotEv(%struct.HNodeTranslateRotate2*)
-
-declare void @_ZmlIdLi6ELi6EE11FixedVectorIT_XT0_ELi0EERK11FixedMatrixIS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_ELi0EE(%"struct.FixedMatrix<double,1,6,0,0>"* noalias sret, %struct.Mat6*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZmlIdLi6ELi6ELi6EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%struct.Mat6* noalias sret, %struct.Mat6*, %struct.Mat6*)
-
-declare void @_ZmlIdLi6ELi6ELi3EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,3,6,0,0>"* noalias sret, %struct.Mat6*, %"struct.FixedMatrix<double,3,6,0,0>"*)
-
-declare void @_ZmlIdLi6ELi6ELi2EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,2,6,0,0>"* noalias sret, %struct.Mat6*, %"struct.FixedMatrix<double,2,6,0,0>"*)
-
-declare void @_ZmlIdLi5ELi6EE11FixedVectorIT_XT0_ELi0EERK11FixedMatrixIS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_ELi0EE(%"struct.FixedVector<double,5,0>"* noalias sret, %"struct.FixedMatrix<double,5,6,0,0>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZmlIdLi6ELi6ELi5EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,5,6,0,0>"* noalias sret, %struct.Mat6*, %"struct.FixedMatrix<double,5,6,0,0>"*)
-
-declare void @_ZN12HNodeRotate39setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%struct.HNodeRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate29setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%struct.HNodeRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate39setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%struct.HNodeTranslateRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate29setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%struct.HNodeTranslateRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare i32 @_ZNK13HingeNodeSpecILi1EE6offsetEv(%"struct.HingeNodeSpec<1>"*) nounwind
-
-declare %struct.Vec3* @_ZNK13HingeNodeSpecILi1EE5posCMEv(%"struct.HingeNodeSpec<1>"*) nounwind
-
-declare double* @_ZNK13HingeNodeSpecILi1EE4massEv(%"struct.HingeNodeSpec<1>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi1EE9calcPandZEv(%"struct.HingeNodeSpec<1>"*)
-
-declare i32 @_ZNK13HingeNodeSpecILi1EE6getDOFEv(%"struct.HingeNodeSpec<1>"*) nounwind
-
-declare i32 @_ZNK13HingeNodeSpecILi1EE6getDimEv(%"struct.HingeNodeSpec<1>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi1EE18enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%"struct.HingeNodeSpec<1>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare i32 @_ZNK13HingeNodeSpecILi5EE6offsetEv(%"struct.HingeNodeSpec<5>"*) nounwind
-
-declare %struct.Vec3* @_ZNK13HingeNodeSpecILi5EE5posCMEv(%"struct.HingeNodeSpec<5>"*) nounwind
-
-declare double* @_ZNK13HingeNodeSpecILi5EE4massEv(%"struct.HingeNodeSpec<5>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi5EE9calcPandZEv(%"struct.HingeNodeSpec<5>"*)
-
-declare i32 @_ZNK13HingeNodeSpecILi5EE6getDOFEv(%"struct.HingeNodeSpec<5>"*) nounwind
-
-declare i32 @_ZNK13HingeNodeSpecILi5EE6getDimEv(%"struct.HingeNodeSpec<5>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi5EE18enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%"struct.HingeNodeSpec<5>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare i32 @_ZNK13HingeNodeSpecILi2EE6offsetEv(%"struct.HingeNodeSpec<2>"*) nounwind
-
-declare %struct.Vec3* @_ZNK13HingeNodeSpecILi2EE5posCMEv(%"struct.HingeNodeSpec<2>"*) nounwind
-
-declare double* @_ZNK13HingeNodeSpecILi2EE4massEv(%"struct.HingeNodeSpec<2>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi2EE9calcPandZEv(%"struct.HingeNodeSpec<2>"*)
-
-declare i32 @_ZNK13HingeNodeSpecILi2EE6getDOFEv(%"struct.HingeNodeSpec<2>"*) nounwind
-
-declare i32 @_ZNK13HingeNodeSpecILi2EE6getDimEv(%"struct.HingeNodeSpec<2>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi2EE18enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%"struct.HingeNodeSpec<2>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare i32 @_ZNK13HingeNodeSpecILi3EE6offsetEv(%"struct.HingeNodeSpec<3>"*) nounwind
-
-declare %struct.Vec3* @_ZNK13HingeNodeSpecILi3EE5posCMEv(%"struct.HingeNodeSpec<3>"*) nounwind
-
-declare double* @_ZNK13HingeNodeSpecILi3EE4massEv(%"struct.HingeNodeSpec<3>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi3EE9calcPandZEv(%"struct.HingeNodeSpec<3>"*)
-
-declare i32 @_ZNK13HingeNodeSpecILi3EE6getDOFEv(%"struct.HingeNodeSpec<3>"*) nounwind
-
-declare i32 @_ZNK13HingeNodeSpecILi6EE6offsetEv(%"struct.HingeNodeSpec<6>"*) nounwind
-
-declare %struct.Vec3* @_ZNK13HingeNodeSpecILi6EE5posCMEv(%"struct.HingeNodeSpec<6>"*) nounwind
-
-declare double* @_ZNK13HingeNodeSpecILi6EE4massEv(%"struct.HingeNodeSpec<6>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi6EE9calcPandZEv(%"struct.HingeNodeSpec<6>"*)
-
-declare i32 @_ZNK13HingeNodeSpecILi6EE6getDOFEv(%"struct.HingeNodeSpec<6>"*) nounwind
-
-declare i32 @_ZNK13HingeNodeSpecILi6EE6getDimEv(%"struct.HingeNodeSpec<6>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi6EE9setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES6_(%"struct.HingeNodeSpec<6>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE18enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%"struct.HingeNodeSpec<6>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare i32 @_ZNK13HingeNodeSpecILi3EE6getDimEv(%"struct.HingeNodeSpec<3>"*) nounwind
-
-declare void @_ZN13HingeNodeSpecILi3EE9setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES6_(%"struct.HingeNodeSpec<3>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE18enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%"struct.HingeNodeSpec<3>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*) nounwind
-
-declare void @_Z14orthoTransformIdLi6ELi6EE11FixedMatrixIT_XT1_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT0_ELi0ELi0EERKS0_IS1_XT1_EXT0_ELi0ELi0EE(%struct.Mat6* noalias sret, %struct.Mat6*, %struct.Mat6*)
-
-declare double @_ZN13HingeNodeSpecILi1EE8kineticEEv(%"struct.HingeNodeSpec<1>"*)
-
-declare double @_ZN13HingeNodeSpecILi3EE8kineticEEv(%"struct.HingeNodeSpec<3>"*)
-
-declare double @_ZN13HingeNodeSpecILi2EE8kineticEEv(%"struct.HingeNodeSpec<2>"*)
-
-declare double @_ZN13HingeNodeSpecILi6EE8kineticEEv(%"struct.HingeNodeSpec<6>"*)
-
-declare double @_ZN13HingeNodeSpecILi5EE8kineticEEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZmlIdLi6ELi5ELi6EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%struct.Mat6* noalias sret, %"struct.FixedMatrix<double,5,6,0,0>"*, %"struct.FixedMatrix<double,5,6,0,0>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE9setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES6_(%"struct.HingeNodeSpec<1>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE9setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES6_(%"struct.HingeNodeSpec<5>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE9setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES6_(%"struct.HingeNodeSpec<2>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_Z14orthoTransformIdLi3ELi6EE11FixedMatrixIT_XT1_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT0_ELi0ELi0EERKS0_IS1_XT1_EXT0_ELi0ELi0EE(%struct.Mat6* noalias sret, %"struct.FixedMatrix<double,3,3,0,0>"*, %"struct.FixedMatrix<double,3,6,0,0>"*)
-
-declare void @_ZmlIdLi6ELi1ELi6EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%struct.Mat6* noalias sret, %"struct.FixedMatrix<double,1,6,0,0>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZmlIdLi6ELi5ELi5EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,5,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,5,6,0,0>"*, %"struct.FixedMatrix<double,5,5,0,0>"*)
-
-declare void @_Z14orthoTransformIdLi5ELi6EE11FixedMatrixIT_XT1_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT0_ELi0ELi0EERKS0_IS1_XT1_EXT0_ELi0ELi0EE(%struct.Mat6* noalias sret, %"struct.FixedMatrix<double,5,5,0,0>"*, %"struct.FixedMatrix<double,5,6,0,0>"*)
-
-declare void @_Z14orthoTransformIdLi2ELi6EE11FixedMatrixIT_XT1_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT0_ELi0ELi0EERKS0_IS1_XT1_EXT0_ELi0ELi0EE(%struct.Mat6* noalias sret, %"struct.FixedMatrix<double,2,2,0,0>"*, %"struct.FixedMatrix<double,2,6,0,0>"*)
-
-declare void @_ZmlIdLi1ELi6ELi6EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,1,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,1,6,0,0>"*, %struct.Mat6*)
-
-declare void @_ZmlIdLi5ELi6ELi6EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,5,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,5,6,0,0>"*, %struct.Mat6*)
-
-declare void @_Z14orthoTransformIdLi6ELi5EE11FixedMatrixIT_XT1_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT0_ELi0ELi0EERKS0_IS1_XT1_EXT0_ELi0ELi0EE(%"struct.FixedMatrix<double,5,5,0,0>"* noalias sret, %struct.Mat6*, %"struct.FixedMatrix<double,5,6,0,0>"*)
-
-declare void @_ZmlIdLi2ELi6ELi6EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,2,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,2,6,0,0>"*, %struct.Mat6*)
-
-declare void @_Z14orthoTransformIdLi6ELi2EE11FixedMatrixIT_XT1_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT0_ELi0ELi0EERKS0_IS1_XT1_EXT0_ELi0ELi0EE(%"struct.FixedMatrix<double,2,2,0,0>"* noalias sret, %struct.Mat6*, %"struct.FixedMatrix<double,2,6,0,0>"*)
-
-declare void @_ZmlIdLi3ELi6ELi6EE11FixedMatrixIT_XT0_EXT2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT1_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,3,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,3,6,0,0>"*, %struct.Mat6*)
-
-declare void @_Z14orthoTransformIdLi6ELi3EE11FixedMatrixIT_XT1_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT0_ELi0ELi0EERKS0_IS1_XT1_EXT0_ELi0ELi0EE(%"struct.FixedMatrix<double,3,3,0,0>"* noalias sret, %struct.Mat6*, %"struct.FixedMatrix<double,3,6,0,0>"*)
-
-declare void @_ZNSt8ios_base4InitC1Ev(%"struct.CDS::DefaultAlloc"*)
-
-declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*) nounwind
-
-declare void @__tcf_0(i8* nocapture)
-
-declare void @_ZNSt8ios_base4InitD1Ev(%"struct.CDS::DefaultAlloc"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsRSoRK9HingeNode(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %struct.HingeNode*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsRSoPK7IVMAtom(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %struct.IVMAtom*)
-
-declare void @_ZN9HingeNode8addChildEPS_(%struct.HingeNode*, %struct.HingeNode*)
-
-declare void @_ZN7CDSListIP9HingeNodeE6appendES1_(%"struct.CDSList<HingeNode*>"*, %struct.HingeNode*)
-
-declare void @_ZN9HingeNode4getHEv(%struct.RMat* noalias sret, %struct.HingeNode*)
-
-declare i8* @__cxa_allocate_exception(i32) nounwind
-
-declare void @__cxa_throw(i8*, i8*, void (i8*)*) noreturn
-
-declare void @_ZN9HingeNode16getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HingeNode*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN9HingeNode9calcAccelEv(%struct.HingeNode*)
-
-declare void @_ZN9HingeNode8getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HingeNode*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN9HingeNode6getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HingeNode*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN9HingeNode6getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HingeNode*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN9HingeNode5printEi(%struct.HingeNode*, i32)
-
-declare void @_ZN9HingeNode18enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES4_(%struct.HingeNode*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN9HingeNode14setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%struct.HingeNode*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN9HingeNode6setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HingeNode*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN9HingeNode9setPosVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEES5_(%struct.HingeNode*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN9HingeNode13propagateSVelERK11FixedVectorIdLi6ELi0EE(%struct.HingeNode*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN9HingeNode18prepareVelInternalEv(%struct.HingeNode*)
-
-declare void @_ZN9HingeNode17calcInternalForceEv(%struct.HingeNode*)
-
-declare void @_ZN9HingeNode5calcYEv(%struct.HingeNode*)
-
-declare void @_ZN9HingeNode9calcPandZEv(%struct.HingeNode*)
-
-declare void @_ZN9HingeNode5calcZEv(%struct.HingeNode*)
-
-declare void @_ZN9HingeNode5calcPEv(%struct.HingeNode*)
-
-declare double* @_ZNK9HingeNode4massEv(%struct.HingeNode*)
-
-declare %struct.Vec3* @_ZNK9HingeNode5posCMEv(%struct.HingeNode*)
-
-declare i8* @_Znam(i32)
-
-declare void @_ZN7CDSListIP9HingeNodeEC1Eii(%"struct.CDSList<HingeNode*>"*, i32, i32)
-
-declare i8* @_Znwm(i32)
-
-declare i8* @llvm.eh.exception() nounwind
-
-declare i32 @llvm.eh.selector.i32(i8*, i8*, ...) nounwind
-
-declare i32 @llvm.eh.typeid.for.i32(i8*) nounwind
-
-declare void @_ZdlPv(i8*) nounwind
-
-declare i32 @__gxx_personality_v0(...)
-
-declare void @_Unwind_Resume_or_Rethrow(i8*)
-
-declare void @_ZN7CDSListIP7IVMAtomEC1Eii(%struct.AtomList*, i32, i32)
-
-declare void @_ZN13CDSVectorBaseIdN3CDS12DefaultAllocEE8splitRepEv(%"struct.CDSVectorBase<double,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeTorsion16getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTorsion*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE8getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<1>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE6getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<1>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE6getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<1>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE16getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<1>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate316getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE16getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<3>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE8getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<3>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE6getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<3>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE6getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<3>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate216getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate28getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate26getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate26getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate38getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate36getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate36getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE16getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<2>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE8getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<2>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE6getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<2>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE6getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<2>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate316getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate38getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate36getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate36getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE16getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<6>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE8getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<6>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE6getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<6>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE6getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<6>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate216getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate28getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate26getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate26getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE16getInternalForceER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<5>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE8getAccelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<5>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE6getVelER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<5>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE6getPosER9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<5>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEE8splitRepEv(%"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>"*)
-
-declare void @_ZN7CDSListIP7IVMAtomE8splitRepEv(%struct.AtomList*)
-
-declare void @_ZN7CDSListIP9HingeNodeE8splitRepEv(%"struct.CDSList<HingeNode*>"*)
-
-declare void @_ZdaPv(i8*) nounwind
-
-declare void @_ZSt9terminatev() noreturn nounwind
-
-declare void @_ZN9HingeNodeC2EPK3IVMP7IVMAtomPKS3_PS_(%struct.HingeNode*, %struct.IVM*, %struct.IVMAtom*, %struct.IVMAtom*, %struct.HingeNode*)
-
-declare void @_ZN9HingeNodeD1Ev(%struct.HingeNode*)
-
-declare void @_ZN9HingeNodeD0Ev(%struct.HingeNode*)
-
-declare void @_ZN7CDSListIP7IVMAtomE6appendES1_(%struct.AtomList*, %struct.IVMAtom*)
-
-declare void @_ZN9HingeNodeC1EPK3IVMP7IVMAtomPKS3_PS_(%struct.HingeNode*, %struct.IVM*, %struct.IVMAtom*, %struct.IVMAtom*, %struct.HingeNode*)
-
-declare void @_ZN9HingeNodeD2Ev(%struct.HingeNode*)
-
-declare void @_ZN11HNodeOriginD0Ev(%struct.HNodeOrigin*)
-
-declare void @_ZN11HNodeOriginD1Ev(%struct.HNodeOrigin*)
-
-declare void @_ZN13HingeNodeSpecILi1EED0Ev(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EED1Ev(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE5calcPEv(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE5calcZEv(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE5calcYEv(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE17calcInternalForceEv(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE18prepareVelInternalEv(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE13propagateSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<1>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare double @_ZN13HingeNodeSpecILi1EE8approxKEEv(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE6setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<1>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE14setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<1>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE5printEi(%"struct.HingeNodeSpec<1>"*, i32)
-
-declare void @_ZN13HingeNodeSpecILi1EE9calcAccelEv(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE4getHEv(%struct.RMat* noalias sret, %"struct.HingeNodeSpec<1>"*)
-
-declare void @__cxa_pure_virtual() nounwind
-
-declare void @_ZN13HingeNodeSpecILi3EED0Ev(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EED1Ev(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE5calcPEv(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE5calcZEv(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE5calcYEv(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE17calcInternalForceEv(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE18prepareVelInternalEv(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE13propagateSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<3>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare double @_ZN13HingeNodeSpecILi3EE8approxKEEv(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE6setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<3>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE14setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<3>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE5printEi(%"struct.HingeNodeSpec<3>"*, i32)
-
-declare void @_ZN13HingeNodeSpecILi3EE9calcAccelEv(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE4getHEv(%struct.RMat* noalias sret, %"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EED0Ev(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EED1Ev(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE5calcPEv(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE5calcZEv(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE5calcYEv(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE17calcInternalForceEv(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE18prepareVelInternalEv(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE13propagateSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<2>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare double @_ZN13HingeNodeSpecILi2EE8approxKEEv(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE6setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<2>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE14setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<2>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE5printEi(%"struct.HingeNodeSpec<2>"*, i32)
-
-declare void @_ZN13HingeNodeSpecILi2EE9calcAccelEv(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE4getHEv(%struct.RMat* noalias sret, %"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EED0Ev(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EED1Ev(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE5calcPEv(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE5calcZEv(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE5calcYEv(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE17calcInternalForceEv(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE18prepareVelInternalEv(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE13propagateSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<6>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare double @_ZN13HingeNodeSpecILi6EE8approxKEEv(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE6setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<6>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE14setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<6>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE5printEi(%"struct.HingeNodeSpec<6>"*, i32)
-
-declare void @_ZN13HingeNodeSpecILi6EE9calcAccelEv(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE4getHEv(%struct.RMat* noalias sret, %"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EED0Ev(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EED1Ev(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE5calcPEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE5calcZEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE5calcYEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE17calcInternalForceEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE18prepareVelInternalEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE13propagateSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<5>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare double @_ZN13HingeNodeSpecILi5EE8approxKEEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE6setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%"struct.HingeNodeSpec<5>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE14setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%"struct.HingeNodeSpec<5>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE5printEi(%"struct.HingeNodeSpec<5>"*, i32)
-
-declare void @_ZN13HingeNodeSpecILi5EE9calcAccelEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE4getHEv(%struct.RMat* noalias sret, %"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN12HNodeTorsion7calcRotEv(%struct.HNodeTorsion*)
-
-declare double @sin(double) nounwind readnone
-
-declare double @cos(double) nounwind readnone
-
-declare void @_ZN12HNodeRotate37calcRotEv(%struct.HNodeRotate3*)
-
-declare void @_ZN21HNodeTranslateRotate37calcRotEv(%struct.HNodeTranslateRotate3*)
-
-declare void @_ZN9HingeNodeC2ERKS_(%struct.HingeNode*, %struct.HingeNode*)
-
-declare void @_ZN7CDSListIP9HingeNodeEC1ERKS2_(%"struct.CDSList<HingeNode*>"*, %"struct.CDSList<HingeNode*>"*)
-
-declare void @_ZN7CDSListIP7IVMAtomEC1ERKS2_(%struct.AtomList*, %struct.AtomList*)
-
-declare void @_ZN11HNodeOriginC2EPK9HingeNode(%struct.HNodeOrigin*, %struct.HingeNode*)
-
-declare void @_ZN13HingeNodeSpecILi1EEC2EPK9HingeNodeRi(%"struct.HingeNodeSpec<1>"*, %struct.HingeNode*, i32*)
-
-declare void @_ZN13HingeNodeSpecILi3EEC2EPK9HingeNodeRi(%"struct.HingeNodeSpec<3>"*, %struct.HingeNode*, i32*)
-
-declare void @_ZN13HingeNodeSpecILi2EEC2EPK9HingeNodeRi(%"struct.HingeNodeSpec<2>"*, %struct.HingeNode*, i32*)
-
-declare void @_ZN13HingeNodeSpecILi6EEC2EPK9HingeNodeRi(%"struct.HingeNodeSpec<6>"*, %struct.HingeNode*, i32*)
-
-declare void @_ZN13HingeNodeSpecILi5EEC2EPK9HingeNodeRi(%"struct.HingeNodeSpec<5>"*, %struct.HingeNode*, i32*)
-
-declare void @_ZplI4Vec3K11FixedVectorIdLi6ELi0EEET_RK9SubVectorIT0_ERKS4_(%struct.Vec3* noalias sret, %"struct.SubVector<FixedVector<double, 6, 0> >"*, %struct.Vec3*)
-
-declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi1ELi6ELi0ELi0EEEENT_13TransposeTypeERKS3_(%"struct.FixedMatrix<double,1,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN12HNodeRotate314setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%struct.HNodeRotate3*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN12HNodeRotate214setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%struct.HNodeRotate2*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN21HNodeTranslateRotate314setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%struct.HNodeTranslateRotate3*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN21HNodeTranslateRotate214setVelFromSVelERK11FixedVectorIdLi6ELi0EE(%struct.HNodeTranslateRotate2*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE9calcPropsEv(%"struct.HingeNodeSpec<1>"*)
-
-declare zeroext i8 @_ZNK3IVM12minimizationEv(%struct.IVM*)
-
-declare void @_Z8blockVecIdLi3ELi3EE11FixedVectorIT_XplT0_T1_ELi0EERKS0_IS1_XT0_ELi0EERKS0_IS1_XT1_ELi0EE(%"struct.FixedMatrix<double,1,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,1,3,0,0>"*, %"struct.FixedMatrix<double,1,3,0,0>"*)
-
-declare void @_ZN12HNodeTorsion11toCartesianEv(%struct.HNodeTorsion*)
-
-declare void @_ZN13HingeNodeSpecILi1EE18calcCartesianForceEv(%"struct.HingeNodeSpec<1>"*)
-
-declare void @_ZN13HingeNodeSpecILi3EE18calcCartesianForceEv(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN13HingeNodeSpecILi2EE18calcCartesianForceEv(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE18calcCartesianForceEv(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE18calcCartesianForceEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN12HNodeTorsion5calcHEv(%struct.HNodeTorsion*)
-
-declare void @_Z10blockMat12IdLi1ELi3ELi3EE11FixedMatrixIT_XT0_EXplT1_T2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,1,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,1,3,0,0>"*, %"struct.FixedMatrix<double,1,3,0,0>"*)
-
-declare void @_ZN13CDSMatrixBaseIdEC2I11FixedMatrixIdLi1ELi6ELi0ELi0EEEERKT_(%"struct.CDSMatrixBase<double>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi6ELi1ELi0ELi0EEEENT_13TransposeTypeERKS3_(%"struct.FixedMatrix<double,1,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZStlsIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_St13_Setprecision(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, i32)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi6EERSoS0_RK15FixedVectorBaseIT_XT0_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,1,6>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, i8 signext)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi3EERSoS0_RK15FixedVectorBaseIT_XT0_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,1,3>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi1EERSoS0_RK15FixedVectorBaseIT_XT0_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,1,1>"*)
-
-declare void @_ZN11FixedVectorIdLi3ELi0EE6subColILi6ELi1ELi0ELi0EEES0_RK11FixedMatrixIdXT_EXT0_EXT1_EXT2_EEiii(%"struct.FixedMatrix<double,1,3,0,0>"* noalias sret, %"struct.FixedMatrix<double,1,6,0,0>"*, i32, i32, i32)
-
-declare %"struct.FixedMatrixBase<double,6,6>"* @_ZN15FixedMatrixBaseIdLi6ELi6EEpLERKS0_(%"struct.FixedMatrixBase<double,6,6>"*, %"struct.FixedMatrixBase<double,6,6>"*)
-
-declare void @_ZN13HingeNodeSpecILi6EE9calcPropsEv(%"struct.HingeNodeSpec<6>"*)
-
-declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi6ELi6ELi0ELi0EEEENT_13TransposeTypeERKS3_(%struct.Mat6* noalias sret, %struct.Mat6*)
-
-declare void @_ZN21HNodeTranslateRotate311toCartesianEv(%struct.HNodeTranslateRotate3*)
-
-define linkonce void @_ZN21HNodeTranslateRotate36setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate3* %this, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"* %velv) {
-entry:
- %0 = add i32 0, -1 ; <i32> [#uses=1]
- %1 = getelementptr double* null, i32 %0 ; <double*> [#uses=1]
- %2 = load double* %1, align 8 ; <double> [#uses=1]
- %3 = load double* null, align 8 ; <double> [#uses=2]
- %4 = load double* null, align 8 ; <double> [#uses=2]
- %5 = load double* null, align 8 ; <double> [#uses=3]
- %6 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
- %7 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=0]
- %8 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=0]
- %9 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0]
- %10 = load double* null, align 8 ; <double> [#uses=2]
- %11 = fsub double -0.000000e+00, %10 ; <double> [#uses=1]
- %12 = load double* null, align 8 ; <double> [#uses=2]
- %13 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=1]
- %14 = load double* %13, align 8 ; <double> [#uses=2]
- %15 = fsub double -0.000000e+00, %14 ; <double> [#uses=1]
- %16 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %17 = load double* %16, align 8 ; <double> [#uses=2]
- %18 = fsub double -0.000000e+00, %17 ; <double> [#uses=1]
- %19 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
- %20 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0]
- %21 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 6 ; <double*> [#uses=0]
- %22 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 9 ; <double*> [#uses=0]
- %23 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=0]
- %24 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 4 ; <double*> [#uses=0]
- %25 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 7 ; <double*> [#uses=0]
- %26 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 10 ; <double*> [#uses=0]
- %27 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=0]
- %28 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 5 ; <double*> [#uses=0]
- %29 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 8 ; <double*> [#uses=0]
- %30 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 11 ; <double*> [#uses=0]
- %31 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
- %32 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %33 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %34 = fmul double %17, %5 ; <double> [#uses=1]
- %35 = fadd double 0.000000e+00, %34 ; <double> [#uses=1]
- %36 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %37 = fmul double %14, %3 ; <double> [#uses=1]
- %38 = fadd double %36, %37 ; <double> [#uses=1]
- %39 = fmul double %12, %4 ; <double> [#uses=1]
- %40 = fadd double %38, %39 ; <double> [#uses=1]
- %41 = fmul double %5, %11 ; <double> [#uses=1]
- %42 = fadd double %40, %41 ; <double> [#uses=2]
- store double %42, double* %32, align 8
- %43 = fmul double %2, %15 ; <double> [#uses=1]
- %44 = fadd double %43, 0.000000e+00 ; <double> [#uses=1]
- %45 = fmul double %3, %18 ; <double> [#uses=1]
- %46 = fadd double %44, %45 ; <double> [#uses=1]
- %47 = fmul double %10, %4 ; <double> [#uses=1]
- %48 = fadd double %46, %47 ; <double> [#uses=1]
- %49 = fmul double %12, %5 ; <double> [#uses=1]
- %50 = fadd double %48, %49 ; <double> [#uses=2]
- store double %50, double* %33, align 8
- %51 = fmul double %35, 2.000000e+00 ; <double> [#uses=1]
- %52 = fmul double %42, 2.000000e+00 ; <double> [#uses=1]
- %53 = fmul double %50, 2.000000e+00 ; <double> [#uses=1]
- %54 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- store double %51, double* %54, align 8
- %55 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- store double %52, double* %55, align 8
- %56 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- store double %53, double* %56, align 8
- %57 = add i32 0, 4 ; <i32> [#uses=1]
- %58 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 0 ; <%"struct.CDSVector<double,0,CDS::DefaultAlloc>"**> [#uses=1]
- store %"struct.CDSVector<double,0,CDS::DefaultAlloc>"* %velv, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"** %58, align 8
- %59 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 %57, i32* %59, align 4
- %60 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 2 ; <i32*> [#uses=1]
- store i32 3, i32* %60, align 8
- unreachable
-}
-
-declare void @_ZmlRK11FixedMatrixIdLi6ELi6ELi0ELi0EERK18PhiMatrixTranspose(%struct.Mat6* noalias sret, %struct.Mat6*, %struct.PhiMatrixTranspose*)
-
-declare void @_ZmlI4Mat3K11FixedMatrixIdLi6ELi6ELi0ELi0EEET_RK9SubMatrixIT0_ERKS4_(%struct.Mat3* noalias sret, %"struct.SubMatrix<FixedMatrix<double, 6, 6, 0, 0> >"*, %struct.Mat3*)
-
-declare void @_ZmiI4Mat3K11FixedMatrixIdLi6ELi6ELi0ELi0EEET_RK9SubMatrixIT0_ERKS4_(%struct.Mat3* noalias sret, %"struct.SubMatrix<FixedMatrix<double, 6, 6, 0, 0> >"*, %struct.Mat3*)
-
-declare %"struct.FixedMatrixBase<double,3,3>"* @_ZN15FixedMatrixBaseIdLi3ELi3EEmIERKS0_(%"struct.FixedMatrixBase<double,3,3>"*, %"struct.FixedMatrixBase<double,3,3>"*)
-
-declare void @_ZplI4Mat311FixedMatrixIdLi6ELi6ELi0ELi0EEET_RKS3_RK9SubMatrixIT0_E(%struct.Mat3* noalias sret, %struct.Mat3*, %"struct.SubMatrix<FixedMatrix<double, 6, 6, 0, 0> >"*)
-
-declare void @_ZN13CDSVectorBaseIdN3CDS12DefaultAllocEED2Ev(%"struct.CDSVectorBase<double,CDS::DefaultAlloc>"*)
-
-declare void @_ZN13HingeNodeSpecILi1EE7calcD_GERK11FixedMatrixIdLi6ELi6ELi0ELi0EE(%"struct.HingeNodeSpec<1>"*, %struct.Mat6*)
-
-declare void @_ZN11MatrixTools7inverseI11FixedMatrixIdLi1ELi1ELi0ELi0EEEET_RKS3_NS_14InverseResultsINS3_10MatrixTypeEEE(%"struct.FixedMatrix<double,1,1,0,0>"* noalias sret, %"struct.FixedMatrix<double,1,1,0,0>"*, %"struct.MatrixTools::InverseResults<FullMatrix<double> >"*)
-
-declare i8* @__cxa_get_exception_ptr(i8*) nounwind
-
-declare i8* @__cxa_begin_catch(i8*) nounwind
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi1ELi1EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,1,1>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi1ELi6EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,1,6>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZNSolsEi(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, i32)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIcERSoS0_RK9CDSStringIT_E(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %struct.String*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZNSolsEPFRSoS_E(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.std::basic_ostream<char,std::char_traits<char> >"* (%"struct.std::basic_ostream<char,std::char_traits<char> >"*)*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_(%"struct.std::basic_ostream<char,std::char_traits<char> >"*)
-
-declare void @__cxa_end_catch()
-
-declare void @_ZmlI4Mat311FixedMatrixIdLi6ELi6ELi0ELi0EEET_RKS3_RK9SubMatrixIT0_E(%struct.Mat3* noalias sret, %struct.Mat3*, %"struct.SubMatrix<FixedMatrix<double, 6, 6, 0, 0> >"*)
-
-declare void @_ZmlI4Mat311FixedMatrixIdLi6ELi6ELi0ELi0EEET_RK9SubMatrixIT0_ERKS3_(%struct.Mat3* noalias sret, %"struct.SubMatrix<FixedMatrix<double, 6, 6, 0, 0> >"*, %struct.Mat3*)
-
-declare void @_ZmiI4Mat311FixedMatrixIdLi6ELi6ELi0ELi0EEET_RK9SubMatrixIT0_ERKS3_(%struct.Mat3* noalias sret, %"struct.SubMatrix<FixedMatrix<double, 6, 6, 0, 0> >"*, %struct.Mat3*)
-
-declare %"struct.FixedMatrixBase<double,6,6>"* @_ZN15FixedMatrixBaseIdLi6ELi6EEmIERKS0_(%"struct.FixedMatrixBase<double,6,6>"*, %"struct.FixedMatrixBase<double,6,6>"*)
-
-declare void @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEEC2EiS2_(%"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>"*, i32, %"struct.CDS::DefaultAlloc"* byval align 4)
-
-declare void @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEED2Ev(%"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeTorsionD0Ev(%struct.HNodeTorsion*)
-
-declare void @_ZN12HNodeTorsionD1Ev(%struct.HNodeTorsion*)
-
-declare void @_ZN12HNodeRotate3D0Ev(%struct.HNodeRotate3*)
-
-declare void @_ZN12HNodeRotate3D1Ev(%struct.HNodeRotate3*)
-
-declare void @_ZN12HNodeRotate36setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate318enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES4_(%struct.HNodeRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate35printEi(%struct.HNodeRotate3*, i32)
-
-declare void @_ZN12HNodeRotate35calcHEv(%struct.HNodeRotate3*)
-
-declare void @_ZN12HNodeRotate311toCartesianEv(%struct.HNodeRotate3*)
-
-declare void @_ZN12HNodeRotate2D0Ev(%struct.HNodeRotate2*)
-
-declare void @_ZN12HNodeRotate2D1Ev(%struct.HNodeRotate2*)
-
-declare void @_ZN12HNodeRotate26setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate218enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES4_(%struct.HNodeRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN12HNodeRotate25printEi(%struct.HNodeRotate2*, i32)
-
-declare void @_ZN12HNodeRotate25calcHEv(%struct.HNodeRotate2*)
-
-declare void @_ZN12HNodeRotate211toCartesianEv(%struct.HNodeRotate2*)
-
-declare void @_ZN21HNodeTranslateRotate3D0Ev(%struct.HNodeTranslateRotate3*)
-
-declare void @_ZN21HNodeTranslateRotate3D1Ev(%struct.HNodeTranslateRotate3*)
-
-declare void @_ZN21HNodeTranslateRotate318enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES4_(%struct.HNodeTranslateRotate3*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate35printEi(%struct.HNodeTranslateRotate3*, i32)
-
-declare void @_ZN21HNodeTranslateRotate35calcHEv(%struct.HNodeTranslateRotate3*)
-
-declare void @_ZN21HNodeTranslateRotate2D0Ev(%struct.HNodeTranslateRotate2*)
-
-declare void @_ZN21HNodeTranslateRotate2D1Ev(%struct.HNodeTranslateRotate2*)
-
-declare void @_ZN21HNodeTranslateRotate26setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate218enforceConstraintsER9CDSVectorIdLi1EN3CDS12DefaultAllocEES4_(%struct.HNodeTranslateRotate2*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"*)
-
-declare void @_ZN21HNodeTranslateRotate25printEi(%struct.HNodeTranslateRotate2*, i32)
-
-declare void @_ZN21HNodeTranslateRotate25calcHEv(%struct.HNodeTranslateRotate2*)
-
-declare void @_ZN21HNodeTranslateRotate211toCartesianEv(%struct.HNodeTranslateRotate2*)
-
-declare void @_ZN14HNodeTranslateC2EPK9HingeNodeP7IVMAtomRi(%struct.HNodeTranslate*, %struct.HingeNode*, %struct.IVMAtom*, i32*)
-
-declare void @_ZN14HNodeTranslateD1Ev(%struct.HNodeTranslate*)
-
-declare void @_ZN14HNodeTranslateD0Ev(%struct.HNodeTranslate*)
-
-declare void @_ZN14HNodeTranslate5calcHEv(%struct.HNodeTranslate*)
-
-declare void @_ZN14HNodeTranslate11toCartesianEv(%struct.HNodeTranslate*)
-
-declare void @_ZN12HNodeRotate3C2EPK9HingeNodeP7IVMAtomRib(%struct.HNodeRotate3*, %struct.HingeNode*, %struct.IVMAtom*, i32*, i8 zeroext)
-
-declare void @_ZN8AtomTree6findCMEPK9HingeNode(%struct.Vec3* noalias sret, %struct.HingeNode*)
-
-declare %struct.IVMAtom** @_ZN7CDSListIP7IVMAtomE7prependERKS1_(%struct.AtomList*, %struct.IVMAtom**)
-
-declare %"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>"* @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEE6resizeEi(%"struct.CDSVectorBase<Vec3,CDS::DefaultAlloc>"*, i32)
-
-declare void @_ZN12HNodeRotate2C2EPK9HingeNodeRK4Vec3Ri(%struct.HNodeRotate2*, %struct.HingeNode*, %struct.Vec3*, i32*)
-
-declare void @_ZN21HNodeTranslateRotate3C2EPK9HingeNodeP7IVMAtomRib(%struct.HNodeTranslateRotate3*, %struct.HingeNode*, %struct.IVMAtom*, i32*, i8 zeroext)
-
-declare void @_ZN13HingeNodeSpecILi3EE9calcPropsEv(%"struct.HingeNodeSpec<3>"*)
-
-declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi3ELi6ELi0ELi0EEEENT_13TransposeTypeERKS3_(%"struct.FixedMatrix<double,3,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,3,6,0,0>"*)
-
-declare void @_ZN11MatrixTools9transposeI4Mat3EENT_13TransposeTypeERKS2_(%struct.Mat3* noalias sret, %struct.Mat3*)
-
-declare void @_Z10blockMat12IdLi3ELi3ELi3EE11FixedMatrixIT_XT0_EXplT1_T2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,3,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,3,3,0,0>"*, %"struct.FixedMatrix<double,3,3,0,0>"*)
-
-declare void @_ZN13CDSMatrixBaseIdEC2I11FixedMatrixIdLi3ELi6ELi0ELi0EEEERKT_(%"struct.CDSMatrixBase<double>"*, %"struct.FixedMatrix<double,3,6,0,0>"*)
-
-declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi6ELi3ELi0ELi0EEEENT_13TransposeTypeERKS3_(%"struct.FixedMatrix<double,3,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,3,6,0,0>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi4EERSoS0_RK15FixedVectorBaseIT_XT0_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,2,2>"*)
-
-declare double @_Z4normIdLi4EET_RK11FixedVectorIS0_XT0_ELi0EE(%"struct.FixedMatrix<double,2,2,0,0>"*)
-
-declare %"struct.FixedMatrixBase<double,2,2>"* @_ZN15FixedVectorBaseIdLi4EEdVERKd(%"struct.FixedMatrixBase<double,2,2>"*, double*)
-
-declare %"struct.FixedMatrixBase<double,2,2>"* @_ZN15FixedVectorBaseIdLi4EEmIERKS0_(%"struct.FixedMatrixBase<double,2,2>"*, %"struct.FixedMatrixBase<double,2,2>"*)
-
-declare void @_ZN11FixedVectorIdLi3ELi0EE6subColILi6ELi3ELi0ELi0EEES0_RK11FixedMatrixIdXT_EXT0_EXT1_EXT2_EEiii(%"struct.FixedMatrix<double,1,3,0,0>"* noalias sret, %"struct.FixedMatrix<double,3,6,0,0>"*, i32, i32, i32)
-
-declare void @_ZN13HingeNodeSpecILi3EE7calcD_GERK11FixedMatrixIdLi6ELi6ELi0ELi0EE(%"struct.HingeNodeSpec<3>"*, %struct.Mat6*)
-
-declare void @_ZN11MatrixTools7inverseI11FixedMatrixIdLi3ELi3ELi0ELi0EEEET_RKS3_NS_14InverseResultsINS3_10MatrixTypeEEE(%"struct.FixedMatrix<double,3,3,0,0>"* noalias sret, %"struct.FixedMatrix<double,3,3,0,0>"*, %"struct.MatrixTools::InverseResults<FullMatrix<double> >"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi3ELi3EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,3,3>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi3ELi6EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,3,6>"*)
-
-declare void @_Z7unitVecRK4Vec3(%struct.Vec3* noalias sret, %struct.Vec3*)
-
-declare double @_Z4normIdLi3EET_RK11FixedVectorIS0_XT0_ELi0EE(%"struct.FixedMatrix<double,1,3,0,0>"*)
-
-declare void @_ZN12HNodeTorsionC2EPK9HingeNodeRK4Vec3Ri(%struct.HNodeTorsion*, %struct.HingeNode*, %struct.Vec3*, i32*)
-
-declare double @acos(double) nounwind readnone
-
-declare double @atan2(double, double) nounwind readnone
-
-declare void @_ZN21HNodeTranslateRotate2C2EPK9HingeNodeRi(%struct.HNodeTranslateRotate2*, %struct.HingeNode*, i32*)
-
-declare void @_ZN13HingeNodeSpecILi2EE9calcPropsEv(%"struct.HingeNodeSpec<2>"*)
-
-declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi2ELi6ELi0ELi0EEEENT_13TransposeTypeERKS3_(%"struct.FixedMatrix<double,2,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,2,6,0,0>"*)
-
-declare void @_Z10blockMat21IdLi1ELi3ELi1EE11FixedMatrixIT_XplT0_T2_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT2_EXT1_ELi0ELi0EE(%"struct.FixedMatrix<double,1,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,1,3,0,0>"*, %"struct.FixedMatrix<double,1,3,0,0>"*)
-
-declare void @_Z10blockMat12IdLi2ELi3ELi3EE11FixedMatrixIT_XT0_EXplT1_T2_ELi0ELi0EERKS0_IS1_XT0_EXT1_ELi0ELi0EERKS0_IS1_XT0_EXT2_ELi0ELi0EE(%"struct.FixedMatrix<double,2,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,1,6,0,0>"*, %"struct.FixedMatrix<double,1,6,0,0>"*)
-
-declare void @_ZN13CDSMatrixBaseIdEC2I11FixedMatrixIdLi2ELi6ELi0ELi0EEEERKT_(%"struct.CDSMatrixBase<double>"*, %"struct.FixedMatrix<double,2,6,0,0>"*)
-
-declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi6ELi2ELi0ELi0EEEENT_13TransposeTypeERKS3_(%"struct.FixedMatrix<double,2,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,2,6,0,0>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi2EERSoS0_RK15FixedVectorBaseIT_XT0_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedVectorBase<double,2>"*)
-
-declare %"struct.FixedMatrixBase<double,1,3>"* @_ZN15FixedVectorBaseIdLi3EEdVERKd(%"struct.FixedMatrixBase<double,1,3>"*, double*)
-
-declare %"struct.FixedMatrixBase<double,1,3>"* @_ZN15FixedVectorBaseIdLi3EEmIERKS0_(%"struct.FixedMatrixBase<double,1,3>"*, %"struct.FixedMatrixBase<double,1,3>"*)
-
-declare void @_ZN11FixedVectorIdLi3ELi0EE6subColILi6ELi2ELi0ELi0EEES0_RK11FixedMatrixIdXT_EXT0_EXT1_EXT2_EEiii(%"struct.FixedMatrix<double,1,3,0,0>"* noalias sret, %"struct.FixedMatrix<double,2,6,0,0>"*, i32, i32, i32)
-
-declare void @_ZN13HingeNodeSpecILi2EE7calcD_GERK11FixedMatrixIdLi6ELi6ELi0ELi0EE(%"struct.HingeNodeSpec<2>"*, %struct.Mat6*)
-
-declare void @_ZN11MatrixTools7inverseI11FixedMatrixIdLi2ELi2ELi0ELi0EEEET_RKS3_NS_14InverseResultsINS3_10MatrixTypeEEE(%"struct.FixedMatrix<double,2,2,0,0>"* noalias sret, %"struct.FixedMatrix<double,2,2,0,0>"*, %"struct.MatrixTools::InverseResults<FullMatrix<double> >"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi2ELi2EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,2,2>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi2ELi6EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,2,6>"*)
-
-declare zeroext i8 @_ZNK9CDSStringIcE7matchesEPKcb(%struct.String*, i8*, i8 zeroext)
-
-declare %struct.HingeNode* @_Z9constructP9HingeNodeRKN16InternalDynamics9HingeSpecERi(%struct.HingeNode*, %"struct.InternalDynamics::HingeSpec"*, i32*)
-
-declare void @_ZN9CDSStringIcEC1ERKS0_(%struct.String*, %struct.String*)
-
-declare void @_ZN9CDSStringIcE8downcaseEv(%struct.String*)
-
-declare %struct.String* @_ZN9CDSStringIcEaSEPKc(%struct.String*, i8*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIP7IVMAtomERSoS2_RK7CDSListIT_E(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %struct.AtomList*)
-
-declare i32 @_ZNK7CDSListIP9HingeNodeE8getIndexERKS1_(%"struct.CDSList<HingeNode*>"*, %struct.HingeNode**)
-
-declare void @_ZN13CDSMatrixBaseIdEC2I11FixedMatrixIdLi6ELi6ELi0ELi0EEEERKT_(%"struct.CDSMatrixBase<double>"*, %struct.Mat6*)
-
-declare void @_ZN11FixedVectorIdLi3ELi0EE6subColILi6ELi6ELi0ELi0EEES0_RK11FixedMatrixIdXT_EXT0_EXT1_EXT2_EEiii(%"struct.FixedMatrix<double,1,3,0,0>"* noalias sret, %struct.Mat6*, i32, i32, i32)
-
-declare void @_ZN13HingeNodeSpecILi6EE7calcD_GERK11FixedMatrixIdLi6ELi6ELi0ELi0EE(%"struct.HingeNodeSpec<6>"*, %struct.Mat6*)
-
-declare void @_ZN11MatrixTools7inverseI11FixedMatrixIdLi6ELi6ELi0ELi0EEEET_RKS3_NS_14InverseResultsINS3_10MatrixTypeEEE(%struct.Mat6* noalias sret, %struct.Mat6*, %"struct.MatrixTools::InverseResults<FullMatrix<double> >"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi6ELi6EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,6,6>"*)
-
-declare void @_ZN13HingeNodeSpecILi5EE9calcPropsEv(%"struct.HingeNodeSpec<5>"*)
-
-declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi5ELi6ELi0ELi0EEEENT_13TransposeTypeERKS3_(%"struct.FixedMatrix<double,5,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,5,6,0,0>"*)
-
-declare void @_ZN13CDSMatrixBaseIdEC2I11FixedMatrixIdLi5ELi6ELi0ELi0EEEERKT_(%"struct.CDSMatrixBase<double>"*, %"struct.FixedMatrix<double,5,6,0,0>"*)
-
-declare void @_ZN11MatrixTools9transposeI11FixedMatrixIdLi6ELi5ELi0ELi0EEEENT_13TransposeTypeERKS3_(%"struct.FixedMatrix<double,5,6,0,0>"* noalias sret, %"struct.FixedMatrix<double,5,6,0,0>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi5EERSoS0_RK15FixedVectorBaseIT_XT0_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedVectorBase<double,5>"*)
-
-declare void @_ZN11FixedVectorIdLi3ELi0EE6subColILi6ELi5ELi0ELi0EEES0_RK11FixedMatrixIdXT_EXT0_EXT1_EXT2_EEiii(%"struct.FixedMatrix<double,1,3,0,0>"* noalias sret, %"struct.FixedMatrix<double,5,6,0,0>"*, i32, i32, i32)
-
-declare void @_ZN13HingeNodeSpecILi5EE7calcD_GERK11FixedMatrixIdLi6ELi6ELi0ELi0EE(%"struct.HingeNodeSpec<5>"*, %struct.Mat6*)
-
-declare void @_ZN11MatrixTools7inverseI11FixedMatrixIdLi5ELi5ELi0ELi0EEEET_RKS3_NS_14InverseResultsINS3_10MatrixTypeEEE(%"struct.FixedMatrix<double,5,5,0,0>"* noalias sret, %"struct.FixedMatrix<double,5,5,0,0>"*, %"struct.MatrixTools::InverseResults<FullMatrix<double> >"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi5ELi5EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,5,5>"*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZlsIdLi5ELi6EERSoS0_RK15FixedMatrixBaseIT_XT0_EXT1_EE(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, %"struct.FixedMatrixBase<double,5,6>"*)
-
-declare void @llvm.memset.i64(i8* nocapture, i8, i64, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/negative-sin.ll b/libclamav/c++/llvm/test/CodeGen/X86/negative-sin.ll
deleted file mode 100644
index 3bc7908..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/negative-sin.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -enable-unsafe-fp-math -march=x86-64 | \
-; RUN: not grep -E {addsd|subsd|xor}
-
-declare double @sin(double %f)
-
-define double @foo(double %e)
-{
- %f = fsub double 0.0, %e
- %g = call double @sin(double %f) readonly
- %h = fsub double 0.0, %g
- ret double %h
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/negative-stride-fptosi-user.ll b/libclamav/c++/llvm/test/CodeGen/X86/negative-stride-fptosi-user.ll
deleted file mode 100644
index 332e0b9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/negative-stride-fptosi-user.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep cvtsi2sd
-
-; LSR previously eliminated the sitofp by introducing an induction
-; variable which stepped by a bogus ((double)UINT32_C(-1)). It's theoretically
-; possible to eliminate the sitofp using a proper -1.0 step though; this
-; test should be changed if that is done.
-
-define void @foo(i32 %N) nounwind {
-entry:
- %0 = icmp slt i32 %N, 0 ; <i1> [#uses=1]
- br i1 %0, label %bb, label %return
-
-bb: ; preds = %bb, %entry
- %i.03 = phi i32 [ 0, %entry ], [ %2, %bb ] ; <i32> [#uses=2]
- %1 = sitofp i32 %i.03 to double ; <double> [#uses=1]
- tail call void @bar(double %1) nounwind
- %2 = add nsw i32 %i.03, -1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %2, %N ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb, %entry
- ret void
-}
-
-declare void @bar(double)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/negative-subscript.ll b/libclamav/c++/llvm/test/CodeGen/X86/negative-subscript.ll
deleted file mode 100644
index 28f7d6b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/negative-subscript.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86
-; rdar://6559995
-
- at a = external global [255 x i8*], align 32
-
-define i32 @main() nounwind {
-entry:
- store i8* bitcast (i8** getelementptr ([255 x i8*]* @a, i32 0, i32 -2147483624) to i8*), i8** getelementptr ([255 x i8*]* @a, i32 0, i32 16), align 32
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/negative_zero.ll b/libclamav/c++/llvm/test/CodeGen/X86/negative_zero.ll
deleted file mode 100644
index 29474c2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/negative_zero.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=-sse2,-sse3 | grep fchs
-
-
-define double @T() {
- ret double -1.0 ;; codegen as fld1/fchs, not as a load from cst pool
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/nobt.ll b/libclamav/c++/llvm/test/CodeGen/X86/nobt.ll
deleted file mode 100644
index 35090e3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/nobt.ll
+++ /dev/null
@@ -1,70 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep btl
-
-; This tests some cases where BT must not be generated. See also bt.ll.
-; Fixes 20040709-[12].c in gcc testsuite.
-
-define void @test2(i32 %x, i32 %n) nounwind {
-entry:
- %tmp1 = and i32 %x, 1
- %tmp2 = urem i32 %tmp1, 15
- %tmp3 = and i32 %tmp2, 1 ; <i32> [#uses=1]
- %tmp4 = icmp eq i32 %tmp3, %tmp2 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @test3(i32 %x, i32 %n) nounwind {
-entry:
- %tmp1 = and i32 %x, 1
- %tmp2 = urem i32 %tmp1, 15
- %tmp3 = and i32 %tmp2, 1 ; <i32> [#uses=1]
- %tmp4 = icmp eq i32 %tmp2, %tmp3 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @test4(i32 %x, i32 %n) nounwind {
-entry:
- %tmp1 = and i32 %x, 1
- %tmp2 = urem i32 %tmp1, 15
- %tmp3 = and i32 %tmp2, 1 ; <i32> [#uses=1]
- %tmp4 = icmp ne i32 %tmp2, %tmp3 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-define void @test5(i32 %x, i32 %n) nounwind {
-entry:
- %tmp1 = and i32 %x, 1
- %tmp2 = urem i32 %tmp1, 15
- %tmp3 = and i32 %tmp2, 1 ; <i32> [#uses=1]
- %tmp4 = icmp ne i32 %tmp2, %tmp3 ; <i1> [#uses=1]
- br i1 %tmp4, label %bb, label %UnifiedReturnBlock
-
-bb: ; preds = %entry
- call void @foo()
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-declare void @foo()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/nofence.ll b/libclamav/c++/llvm/test/CodeGen/X86/nofence.ll
deleted file mode 100644
index 244d2e9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/nofence.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep fence
-
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
-
-define void @test() {
- call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 true, i1 false, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 true, i1 false)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 false, i1 true, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 true, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 true, i1 false, i1 true, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 true, i1 true, i1 false)
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 false, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 true, i1 false, i1 true, i1 false)
- call void @llvm.memory.barrier( i1 true, i1 false, i1 true, i1 true, i1 false)
- call void @llvm.memory.barrier( i1 false, i1 true, i1 true, i1 true, i1 false)
-
-
- call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 false)
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 false , i1 false)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/nosse-error1.ll b/libclamav/c++/llvm/test/CodeGen/X86/nosse-error1.ll
deleted file mode 100644
index 16cbb73..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/nosse-error1.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llvm-as < %s > %t1
-; RUN: not llc -march=x86-64 -mattr=-sse < %t1 2> %t2
-; RUN: grep "SSE register return with SSE disabled" %t2
-; RUN: llc -march=x86-64 < %t1 | grep xmm
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- at f = external global float ; <float*> [#uses=4]
- at d = external global double ; <double*> [#uses=4]
-
-define void @test() nounwind {
-entry:
- %0 = load float* @f, align 4 ; <float> [#uses=1]
- %1 = tail call float @foo1(float %0) nounwind ; <float> [#uses=1]
- store float %1, float* @f, align 4
- %2 = load double* @d, align 8 ; <double> [#uses=1]
- %3 = tail call double @foo2(double %2) nounwind ; <double> [#uses=1]
- store double %3, double* @d, align 8
- %4 = load float* @f, align 4 ; <float> [#uses=1]
- %5 = tail call float @foo3(float %4) nounwind ; <float> [#uses=1]
- store float %5, float* @f, align 4
- %6 = load double* @d, align 8 ; <double> [#uses=1]
- %7 = tail call double @foo4(double %6) nounwind ; <double> [#uses=1]
- store double %7, double* @d, align 8
- ret void
-}
-
-declare float @foo1(float)
-
-declare double @foo2(double)
-
-declare float @foo3(float)
-
-declare double @foo4(double)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/nosse-error2.ll b/libclamav/c++/llvm/test/CodeGen/X86/nosse-error2.ll
deleted file mode 100644
index 45a5eaf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/nosse-error2.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llvm-as < %s > %t1
-; RUN: not llc -march=x86 -mcpu=i686 -mattr=-sse < %t1 2> %t2
-; RUN: grep "SSE register return with SSE disabled" %t2
-; RUN: llc -march=x86 -mcpu=i686 -mattr=+sse < %t1 | grep xmm
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-unknown-linux-gnu"
- at f = external global float ; <float*> [#uses=4]
- at d = external global double ; <double*> [#uses=4]
-
-define void @test() nounwind {
-entry:
- %0 = load float* @f, align 4 ; <float> [#uses=1]
- %1 = tail call inreg float @foo1(float inreg %0) nounwind ; <float> [#uses=1]
- store float %1, float* @f, align 4
- %2 = load double* @d, align 8 ; <double> [#uses=1]
- %3 = tail call inreg double @foo2(double inreg %2) nounwind ; <double> [#uses=1]
- store double %3, double* @d, align 8
- %4 = load float* @f, align 4 ; <float> [#uses=1]
- %5 = tail call inreg float @foo3(float inreg %4) nounwind ; <float> [#uses=1]
- store float %5, float* @f, align 4
- %6 = load double* @d, align 8 ; <double> [#uses=1]
- %7 = tail call inreg double @foo4(double inreg %6) nounwind ; <double> [#uses=1]
- store double %7, double* @d, align 8
- ret void
-}
-
-declare inreg float @foo1(float inreg)
-
-declare inreg double @foo2(double inreg)
-
-declare inreg float @foo3(float inreg)
-
-declare inreg double @foo4(double inreg)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/nosse-varargs.ll b/libclamav/c++/llvm/test/CodeGen/X86/nosse-varargs.ll
deleted file mode 100644
index e6da0ab..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/nosse-varargs.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: llvm-as < %s > %t
-; RUN: llc -march=x86-64 -mattr=-sse < %t | not grep xmm
-; RUN: llc -march=x86-64 < %t | grep xmm
-; PR3403
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- %struct.__va_list_tag = type { i32, i32, i8*, i8* }
-
-define i32 @foo(float %a, i8* nocapture %fmt, ...) nounwind {
-entry:
- %ap = alloca [1 x %struct.__va_list_tag], align 8 ; <[1 x %struct.__va_list_tag]*> [#uses=4]
- %ap12 = bitcast [1 x %struct.__va_list_tag]* %ap to i8* ; <i8*> [#uses=2]
- call void @llvm.va_start(i8* %ap12)
- %0 = getelementptr [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 0 ; <i32*> [#uses=2]
- %1 = load i32* %0, align 8 ; <i32> [#uses=3]
- %2 = icmp ult i32 %1, 48 ; <i1> [#uses=1]
- br i1 %2, label %bb, label %bb3
-
-bb: ; preds = %entry
- %3 = getelementptr [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 3 ; <i8**> [#uses=1]
- %4 = load i8** %3, align 8 ; <i8*> [#uses=1]
- %5 = inttoptr i32 %1 to i8* ; <i8*> [#uses=1]
- %6 = ptrtoint i8* %5 to i64 ; <i64> [#uses=1]
- %ctg2 = getelementptr i8* %4, i64 %6 ; <i8*> [#uses=1]
- %7 = add i32 %1, 8 ; <i32> [#uses=1]
- store i32 %7, i32* %0, align 8
- br label %bb4
-
-bb3: ; preds = %entry
- %8 = getelementptr [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 2 ; <i8**> [#uses=2]
- %9 = load i8** %8, align 8 ; <i8*> [#uses=2]
- %10 = getelementptr i8* %9, i64 8 ; <i8*> [#uses=1]
- store i8* %10, i8** %8, align 8
- br label %bb4
-
-bb4: ; preds = %bb3, %bb
- %addr.0.0 = phi i8* [ %ctg2, %bb ], [ %9, %bb3 ] ; <i8*> [#uses=1]
- %11 = bitcast i8* %addr.0.0 to i32* ; <i32*> [#uses=1]
- %12 = load i32* %11, align 4 ; <i32> [#uses=1]
- call void @llvm.va_end(i8* %ap12)
- ret i32 %12
-}
-
-declare void @llvm.va_start(i8*) nounwind
-
-declare void @llvm.va_end(i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/object-size.ll b/libclamav/c++/llvm/test/CodeGen/X86/object-size.ll
deleted file mode 100644
index eed3cfc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/object-size.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc -O0 < %s -march=x86-64 | FileCheck %s -check-prefix=X64
-
-; ModuleID = 'ts.c'
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-darwin10.0"
-
- at p = common global i8* null, align 8 ; <i8**> [#uses=4]
- at .str = private constant [3 x i8] c"Hi\00" ; <[3 x i8]*> [#uses=1]
-
-define void @bar() nounwind ssp {
-entry:
- %tmp = load i8** @p ; <i8*> [#uses=1]
- %0 = call i64 @llvm.objectsize.i64(i8* %tmp, i1 0) ; <i64> [#uses=1]
- %cmp = icmp ne i64 %0, -1 ; <i1> [#uses=1]
-; X64: movq $-1, %rax
-; X64: cmpq $-1, %rax
- br i1 %cmp, label %cond.true, label %cond.false
-
-cond.true: ; preds = %entry
- %tmp1 = load i8** @p ; <i8*> [#uses=1]
- %tmp2 = load i8** @p ; <i8*> [#uses=1]
- %1 = call i64 @llvm.objectsize.i64(i8* %tmp2, i1 1) ; <i64> [#uses=1]
- %call = call i8* @__strcpy_chk(i8* %tmp1, i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 %1) ssp ; <i8*> [#uses=1]
- br label %cond.end
-
-cond.false: ; preds = %entry
- %tmp3 = load i8** @p ; <i8*> [#uses=1]
- %call4 = call i8* @__inline_strcpy_chk(i8* %tmp3, i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0)) ssp ; <i8*> [#uses=1]
- br label %cond.end
-
-cond.end: ; preds = %cond.false, %cond.true
- %cond = phi i8* [ %call, %cond.true ], [ %call4, %cond.false ] ; <i8*> [#uses=0]
- ret void
-}
-
-declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readonly
-
-declare i8* @__strcpy_chk(i8*, i8*, i64) ssp
-
-define internal i8* @__inline_strcpy_chk(i8* %__dest, i8* %__src) nounwind ssp {
-entry:
- %retval = alloca i8* ; <i8**> [#uses=2]
- %__dest.addr = alloca i8* ; <i8**> [#uses=3]
- %__src.addr = alloca i8* ; <i8**> [#uses=2]
- store i8* %__dest, i8** %__dest.addr
- store i8* %__src, i8** %__src.addr
- %tmp = load i8** %__dest.addr ; <i8*> [#uses=1]
- %tmp1 = load i8** %__src.addr ; <i8*> [#uses=1]
- %tmp2 = load i8** %__dest.addr ; <i8*> [#uses=1]
- %0 = call i64 @llvm.objectsize.i64(i8* %tmp2, i1 1) ; <i64> [#uses=1]
- %call = call i8* @__strcpy_chk(i8* %tmp, i8* %tmp1, i64 %0) ssp ; <i8*> [#uses=1]
- store i8* %call, i8** %retval
- %1 = load i8** %retval ; <i8*> [#uses=1]
- ret i8* %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/opt-ext-uses.ll b/libclamav/c++/llvm/test/CodeGen/X86/opt-ext-uses.ll
deleted file mode 100644
index fa2aef5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/opt-ext-uses.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 | grep movw | count 1
-
-define i16 @t() signext {
-entry:
- %tmp180 = load i16* null, align 2 ; <i16> [#uses=3]
- %tmp180181 = sext i16 %tmp180 to i32 ; <i32> [#uses=1]
- %tmp182 = add i16 %tmp180, 10
- %tmp185 = icmp slt i16 %tmp182, 0 ; <i1> [#uses=1]
- br i1 %tmp185, label %cond_true188, label %cond_next245
-
-cond_true188: ; preds = %entry
- %tmp195196 = trunc i16 %tmp180 to i8 ; <i8> [#uses=0]
- ret i16 %tmp180
-
-cond_next245: ; preds = %entry
- %tmp256 = and i32 %tmp180181, 15 ; <i32> [#uses=0]
- %tmp3 = trunc i32 %tmp256 to i16
- ret i16 %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/optimize-max-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/optimize-max-0.ll
deleted file mode 100644
index 162c7a5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ /dev/null
@@ -1,461 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep cmov
-
-; LSR should be able to eliminate the max computations by
-; making the loops use slt/ult comparisons instead of ne comparisons.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9"
-
-define void @foo(i8* %r, i32 %s, i32 %w, i32 %x, i8* %j, i32 %d) nounwind {
-entry:
- %0 = mul i32 %x, %w ; <i32> [#uses=2]
- %1 = mul i32 %x, %w ; <i32> [#uses=1]
- %2 = sdiv i32 %1, 4 ; <i32> [#uses=1]
- %.sum2 = add i32 %2, %0 ; <i32> [#uses=2]
- %cond = icmp eq i32 %d, 1 ; <i1> [#uses=1]
- br i1 %cond, label %bb29, label %bb10.preheader
-
-bb10.preheader: ; preds = %entry
- %3 = icmp sgt i32 %x, 0 ; <i1> [#uses=1]
- br i1 %3, label %bb.nph9, label %bb18.loopexit
-
-bb.nph7: ; preds = %bb7.preheader
- %4 = mul i32 %y.08, %w ; <i32> [#uses=1]
- %5 = mul i32 %y.08, %s ; <i32> [#uses=1]
- %6 = add i32 %5, 1 ; <i32> [#uses=1]
- %tmp8 = icmp sgt i32 1, %w ; <i1> [#uses=1]
- %smax9 = select i1 %tmp8, i32 1, i32 %w ; <i32> [#uses=1]
- br label %bb6
-
-bb6: ; preds = %bb7, %bb.nph7
- %x.06 = phi i32 [ 0, %bb.nph7 ], [ %indvar.next7, %bb7 ] ; <i32> [#uses=3]
- %7 = add i32 %x.06, %4 ; <i32> [#uses=1]
- %8 = shl i32 %x.06, 1 ; <i32> [#uses=1]
- %9 = add i32 %6, %8 ; <i32> [#uses=1]
- %10 = getelementptr i8* %r, i32 %9 ; <i8*> [#uses=1]
- %11 = load i8* %10, align 1 ; <i8> [#uses=1]
- %12 = getelementptr i8* %j, i32 %7 ; <i8*> [#uses=1]
- store i8 %11, i8* %12, align 1
- br label %bb7
-
-bb7: ; preds = %bb6
- %indvar.next7 = add i32 %x.06, 1 ; <i32> [#uses=2]
- %exitcond10 = icmp ne i32 %indvar.next7, %smax9 ; <i1> [#uses=1]
- br i1 %exitcond10, label %bb6, label %bb7.bb9_crit_edge
-
-bb7.bb9_crit_edge: ; preds = %bb7
- br label %bb9
-
-bb9: ; preds = %bb7.preheader, %bb7.bb9_crit_edge
- br label %bb10
-
-bb10: ; preds = %bb9
- %indvar.next11 = add i32 %y.08, 1 ; <i32> [#uses=2]
- %exitcond12 = icmp ne i32 %indvar.next11, %x ; <i1> [#uses=1]
- br i1 %exitcond12, label %bb7.preheader, label %bb10.bb18.loopexit_crit_edge
-
-bb10.bb18.loopexit_crit_edge: ; preds = %bb10
- br label %bb10.bb18.loopexit_crit_edge.split
-
-bb10.bb18.loopexit_crit_edge.split: ; preds = %bb.nph9, %bb10.bb18.loopexit_crit_edge
- br label %bb18.loopexit
-
-bb.nph9: ; preds = %bb10.preheader
- %13 = icmp sgt i32 %w, 0 ; <i1> [#uses=1]
- br i1 %13, label %bb.nph9.split, label %bb10.bb18.loopexit_crit_edge.split
-
-bb.nph9.split: ; preds = %bb.nph9
- br label %bb7.preheader
-
-bb7.preheader: ; preds = %bb.nph9.split, %bb10
- %y.08 = phi i32 [ 0, %bb.nph9.split ], [ %indvar.next11, %bb10 ] ; <i32> [#uses=3]
- br i1 true, label %bb.nph7, label %bb9
-
-bb.nph5: ; preds = %bb18.loopexit
- %14 = sdiv i32 %w, 2 ; <i32> [#uses=1]
- %15 = icmp slt i32 %w, 2 ; <i1> [#uses=1]
- %16 = sdiv i32 %x, 2 ; <i32> [#uses=2]
- br i1 %15, label %bb18.bb20_crit_edge.split, label %bb.nph5.split
-
-bb.nph5.split: ; preds = %bb.nph5
- %tmp2 = icmp sgt i32 1, %16 ; <i1> [#uses=1]
- %smax3 = select i1 %tmp2, i32 1, i32 %16 ; <i32> [#uses=1]
- br label %bb13
-
-bb13: ; preds = %bb18, %bb.nph5.split
- %y.14 = phi i32 [ 0, %bb.nph5.split ], [ %indvar.next1, %bb18 ] ; <i32> [#uses=4]
- %17 = mul i32 %14, %y.14 ; <i32> [#uses=2]
- %18 = shl i32 %y.14, 1 ; <i32> [#uses=1]
- %19 = srem i32 %y.14, 2 ; <i32> [#uses=1]
- %20 = add i32 %19, %18 ; <i32> [#uses=1]
- %21 = mul i32 %20, %s ; <i32> [#uses=2]
- br i1 true, label %bb.nph3, label %bb17
-
-bb.nph3: ; preds = %bb13
- %22 = add i32 %17, %0 ; <i32> [#uses=1]
- %23 = add i32 %17, %.sum2 ; <i32> [#uses=1]
- %24 = sdiv i32 %w, 2 ; <i32> [#uses=2]
- %tmp = icmp sgt i32 1, %24 ; <i1> [#uses=1]
- %smax = select i1 %tmp, i32 1, i32 %24 ; <i32> [#uses=1]
- br label %bb14
-
-bb14: ; preds = %bb15, %bb.nph3
- %x.12 = phi i32 [ 0, %bb.nph3 ], [ %indvar.next, %bb15 ] ; <i32> [#uses=5]
- %25 = shl i32 %x.12, 2 ; <i32> [#uses=1]
- %26 = add i32 %25, %21 ; <i32> [#uses=1]
- %27 = getelementptr i8* %r, i32 %26 ; <i8*> [#uses=1]
- %28 = load i8* %27, align 1 ; <i8> [#uses=1]
- %.sum = add i32 %22, %x.12 ; <i32> [#uses=1]
- %29 = getelementptr i8* %j, i32 %.sum ; <i8*> [#uses=1]
- store i8 %28, i8* %29, align 1
- %30 = shl i32 %x.12, 2 ; <i32> [#uses=1]
- %31 = or i32 %30, 2 ; <i32> [#uses=1]
- %32 = add i32 %31, %21 ; <i32> [#uses=1]
- %33 = getelementptr i8* %r, i32 %32 ; <i8*> [#uses=1]
- %34 = load i8* %33, align 1 ; <i8> [#uses=1]
- %.sum6 = add i32 %23, %x.12 ; <i32> [#uses=1]
- %35 = getelementptr i8* %j, i32 %.sum6 ; <i8*> [#uses=1]
- store i8 %34, i8* %35, align 1
- br label %bb15
-
-bb15: ; preds = %bb14
- %indvar.next = add i32 %x.12, 1 ; <i32> [#uses=2]
- %exitcond = icmp ne i32 %indvar.next, %smax ; <i1> [#uses=1]
- br i1 %exitcond, label %bb14, label %bb15.bb17_crit_edge
-
-bb15.bb17_crit_edge: ; preds = %bb15
- br label %bb17
-
-bb17: ; preds = %bb15.bb17_crit_edge, %bb13
- br label %bb18
-
-bb18.loopexit: ; preds = %bb10.bb18.loopexit_crit_edge.split, %bb10.preheader
- %36 = icmp slt i32 %x, 2 ; <i1> [#uses=1]
- br i1 %36, label %bb20, label %bb.nph5
-
-bb18: ; preds = %bb17
- %indvar.next1 = add i32 %y.14, 1 ; <i32> [#uses=2]
- %exitcond4 = icmp ne i32 %indvar.next1, %smax3 ; <i1> [#uses=1]
- br i1 %exitcond4, label %bb13, label %bb18.bb20_crit_edge
-
-bb18.bb20_crit_edge: ; preds = %bb18
- br label %bb18.bb20_crit_edge.split
-
-bb18.bb20_crit_edge.split: ; preds = %bb18.bb20_crit_edge, %bb.nph5
- br label %bb20
-
-bb20: ; preds = %bb18.bb20_crit_edge.split, %bb18.loopexit
- switch i32 %d, label %return [
- i32 3, label %bb22
- i32 1, label %bb29
- ]
-
-bb22: ; preds = %bb20
- %37 = mul i32 %x, %w ; <i32> [#uses=1]
- %38 = sdiv i32 %37, 4 ; <i32> [#uses=1]
- %.sum3 = add i32 %38, %.sum2 ; <i32> [#uses=2]
- %39 = add i32 %x, 15 ; <i32> [#uses=1]
- %40 = and i32 %39, -16 ; <i32> [#uses=1]
- %41 = add i32 %w, 15 ; <i32> [#uses=1]
- %42 = and i32 %41, -16 ; <i32> [#uses=1]
- %43 = mul i32 %40, %s ; <i32> [#uses=1]
- %44 = icmp sgt i32 %x, 0 ; <i1> [#uses=1]
- br i1 %44, label %bb.nph, label %bb26
-
-bb.nph: ; preds = %bb22
- br label %bb23
-
-bb23: ; preds = %bb24, %bb.nph
- %y.21 = phi i32 [ 0, %bb.nph ], [ %indvar.next5, %bb24 ] ; <i32> [#uses=3]
- %45 = mul i32 %y.21, %42 ; <i32> [#uses=1]
- %.sum1 = add i32 %45, %43 ; <i32> [#uses=1]
- %46 = getelementptr i8* %r, i32 %.sum1 ; <i8*> [#uses=1]
- %47 = mul i32 %y.21, %w ; <i32> [#uses=1]
- %.sum5 = add i32 %47, %.sum3 ; <i32> [#uses=1]
- %48 = getelementptr i8* %j, i32 %.sum5 ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.i32(i8* %48, i8* %46, i32 %w, i32 1)
- br label %bb24
-
-bb24: ; preds = %bb23
- %indvar.next5 = add i32 %y.21, 1 ; <i32> [#uses=2]
- %exitcond6 = icmp ne i32 %indvar.next5, %x ; <i1> [#uses=1]
- br i1 %exitcond6, label %bb23, label %bb24.bb26_crit_edge
-
-bb24.bb26_crit_edge: ; preds = %bb24
- br label %bb26
-
-bb26: ; preds = %bb24.bb26_crit_edge, %bb22
- %49 = mul i32 %x, %w ; <i32> [#uses=1]
- %.sum4 = add i32 %.sum3, %49 ; <i32> [#uses=1]
- %50 = getelementptr i8* %j, i32 %.sum4 ; <i8*> [#uses=1]
- %51 = mul i32 %x, %w ; <i32> [#uses=1]
- %52 = sdiv i32 %51, 2 ; <i32> [#uses=1]
- tail call void @llvm.memset.i32(i8* %50, i8 -128, i32 %52, i32 1)
- ret void
-
-bb29: ; preds = %bb20, %entry
- %53 = add i32 %w, 15 ; <i32> [#uses=1]
- %54 = and i32 %53, -16 ; <i32> [#uses=1]
- %55 = icmp sgt i32 %x, 0 ; <i1> [#uses=1]
- br i1 %55, label %bb.nph11, label %bb33
-
-bb.nph11: ; preds = %bb29
- br label %bb30
-
-bb30: ; preds = %bb31, %bb.nph11
- %y.310 = phi i32 [ 0, %bb.nph11 ], [ %indvar.next13, %bb31 ] ; <i32> [#uses=3]
- %56 = mul i32 %y.310, %54 ; <i32> [#uses=1]
- %57 = getelementptr i8* %r, i32 %56 ; <i8*> [#uses=1]
- %58 = mul i32 %y.310, %w ; <i32> [#uses=1]
- %59 = getelementptr i8* %j, i32 %58 ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.i32(i8* %59, i8* %57, i32 %w, i32 1)
- br label %bb31
-
-bb31: ; preds = %bb30
- %indvar.next13 = add i32 %y.310, 1 ; <i32> [#uses=2]
- %exitcond14 = icmp ne i32 %indvar.next13, %x ; <i1> [#uses=1]
- br i1 %exitcond14, label %bb30, label %bb31.bb33_crit_edge
-
-bb31.bb33_crit_edge: ; preds = %bb31
- br label %bb33
-
-bb33: ; preds = %bb31.bb33_crit_edge, %bb29
- %60 = mul i32 %x, %w ; <i32> [#uses=1]
- %61 = getelementptr i8* %j, i32 %60 ; <i8*> [#uses=1]
- %62 = mul i32 %x, %w ; <i32> [#uses=1]
- %63 = sdiv i32 %62, 2 ; <i32> [#uses=1]
- tail call void @llvm.memset.i32(i8* %61, i8 -128, i32 %63, i32 1)
- ret void
-
-return: ; preds = %bb20
- ret void
-}
-
-define void @bar(i8* %r, i32 %s, i32 %w, i32 %x, i8* %j, i32 %d) nounwind {
-entry:
- %0 = mul i32 %x, %w ; <i32> [#uses=2]
- %1 = mul i32 %x, %w ; <i32> [#uses=1]
- %2 = udiv i32 %1, 4 ; <i32> [#uses=1]
- %.sum2 = add i32 %2, %0 ; <i32> [#uses=2]
- %cond = icmp eq i32 %d, 1 ; <i1> [#uses=1]
- br i1 %cond, label %bb29, label %bb10.preheader
-
-bb10.preheader: ; preds = %entry
- %3 = icmp ne i32 %x, 0 ; <i1> [#uses=1]
- br i1 %3, label %bb.nph9, label %bb18.loopexit
-
-bb.nph7: ; preds = %bb7.preheader
- %4 = mul i32 %y.08, %w ; <i32> [#uses=1]
- %5 = mul i32 %y.08, %s ; <i32> [#uses=1]
- %6 = add i32 %5, 1 ; <i32> [#uses=1]
- %tmp8 = icmp ugt i32 1, %w ; <i1> [#uses=1]
- %smax9 = select i1 %tmp8, i32 1, i32 %w ; <i32> [#uses=1]
- br label %bb6
-
-bb6: ; preds = %bb7, %bb.nph7
- %x.06 = phi i32 [ 0, %bb.nph7 ], [ %indvar.next7, %bb7 ] ; <i32> [#uses=3]
- %7 = add i32 %x.06, %4 ; <i32> [#uses=1]
- %8 = shl i32 %x.06, 1 ; <i32> [#uses=1]
- %9 = add i32 %6, %8 ; <i32> [#uses=1]
- %10 = getelementptr i8* %r, i32 %9 ; <i8*> [#uses=1]
- %11 = load i8* %10, align 1 ; <i8> [#uses=1]
- %12 = getelementptr i8* %j, i32 %7 ; <i8*> [#uses=1]
- store i8 %11, i8* %12, align 1
- br label %bb7
-
-bb7: ; preds = %bb6
- %indvar.next7 = add i32 %x.06, 1 ; <i32> [#uses=2]
- %exitcond10 = icmp ne i32 %indvar.next7, %smax9 ; <i1> [#uses=1]
- br i1 %exitcond10, label %bb6, label %bb7.bb9_crit_edge
-
-bb7.bb9_crit_edge: ; preds = %bb7
- br label %bb9
-
-bb9: ; preds = %bb7.preheader, %bb7.bb9_crit_edge
- br label %bb10
-
-bb10: ; preds = %bb9
- %indvar.next11 = add i32 %y.08, 1 ; <i32> [#uses=2]
- %exitcond12 = icmp ne i32 %indvar.next11, %x ; <i1> [#uses=1]
- br i1 %exitcond12, label %bb7.preheader, label %bb10.bb18.loopexit_crit_edge
-
-bb10.bb18.loopexit_crit_edge: ; preds = %bb10
- br label %bb10.bb18.loopexit_crit_edge.split
-
-bb10.bb18.loopexit_crit_edge.split: ; preds = %bb.nph9, %bb10.bb18.loopexit_crit_edge
- br label %bb18.loopexit
-
-bb.nph9: ; preds = %bb10.preheader
- %13 = icmp ugt i32 %w, 0 ; <i1> [#uses=1]
- br i1 %13, label %bb.nph9.split, label %bb10.bb18.loopexit_crit_edge.split
-
-bb.nph9.split: ; preds = %bb.nph9
- br label %bb7.preheader
-
-bb7.preheader: ; preds = %bb.nph9.split, %bb10
- %y.08 = phi i32 [ 0, %bb.nph9.split ], [ %indvar.next11, %bb10 ] ; <i32> [#uses=3]
- br i1 true, label %bb.nph7, label %bb9
-
-bb.nph5: ; preds = %bb18.loopexit
- %14 = udiv i32 %w, 2 ; <i32> [#uses=1]
- %15 = icmp ult i32 %w, 2 ; <i1> [#uses=1]
- %16 = udiv i32 %x, 2 ; <i32> [#uses=2]
- br i1 %15, label %bb18.bb20_crit_edge.split, label %bb.nph5.split
-
-bb.nph5.split: ; preds = %bb.nph5
- %tmp2 = icmp ugt i32 1, %16 ; <i1> [#uses=1]
- %smax3 = select i1 %tmp2, i32 1, i32 %16 ; <i32> [#uses=1]
- br label %bb13
-
-bb13: ; preds = %bb18, %bb.nph5.split
- %y.14 = phi i32 [ 0, %bb.nph5.split ], [ %indvar.next1, %bb18 ] ; <i32> [#uses=4]
- %17 = mul i32 %14, %y.14 ; <i32> [#uses=2]
- %18 = shl i32 %y.14, 1 ; <i32> [#uses=1]
- %19 = urem i32 %y.14, 2 ; <i32> [#uses=1]
- %20 = add i32 %19, %18 ; <i32> [#uses=1]
- %21 = mul i32 %20, %s ; <i32> [#uses=2]
- br i1 true, label %bb.nph3, label %bb17
-
-bb.nph3: ; preds = %bb13
- %22 = add i32 %17, %0 ; <i32> [#uses=1]
- %23 = add i32 %17, %.sum2 ; <i32> [#uses=1]
- %24 = udiv i32 %w, 2 ; <i32> [#uses=2]
- %tmp = icmp ugt i32 1, %24 ; <i1> [#uses=1]
- %smax = select i1 %tmp, i32 1, i32 %24 ; <i32> [#uses=1]
- br label %bb14
-
-bb14: ; preds = %bb15, %bb.nph3
- %x.12 = phi i32 [ 0, %bb.nph3 ], [ %indvar.next, %bb15 ] ; <i32> [#uses=5]
- %25 = shl i32 %x.12, 2 ; <i32> [#uses=1]
- %26 = add i32 %25, %21 ; <i32> [#uses=1]
- %27 = getelementptr i8* %r, i32 %26 ; <i8*> [#uses=1]
- %28 = load i8* %27, align 1 ; <i8> [#uses=1]
- %.sum = add i32 %22, %x.12 ; <i32> [#uses=1]
- %29 = getelementptr i8* %j, i32 %.sum ; <i8*> [#uses=1]
- store i8 %28, i8* %29, align 1
- %30 = shl i32 %x.12, 2 ; <i32> [#uses=1]
- %31 = or i32 %30, 2 ; <i32> [#uses=1]
- %32 = add i32 %31, %21 ; <i32> [#uses=1]
- %33 = getelementptr i8* %r, i32 %32 ; <i8*> [#uses=1]
- %34 = load i8* %33, align 1 ; <i8> [#uses=1]
- %.sum6 = add i32 %23, %x.12 ; <i32> [#uses=1]
- %35 = getelementptr i8* %j, i32 %.sum6 ; <i8*> [#uses=1]
- store i8 %34, i8* %35, align 1
- br label %bb15
-
-bb15: ; preds = %bb14
- %indvar.next = add i32 %x.12, 1 ; <i32> [#uses=2]
- %exitcond = icmp ne i32 %indvar.next, %smax ; <i1> [#uses=1]
- br i1 %exitcond, label %bb14, label %bb15.bb17_crit_edge
-
-bb15.bb17_crit_edge: ; preds = %bb15
- br label %bb17
-
-bb17: ; preds = %bb15.bb17_crit_edge, %bb13
- br label %bb18
-
-bb18.loopexit: ; preds = %bb10.bb18.loopexit_crit_edge.split, %bb10.preheader
- %36 = icmp ult i32 %x, 2 ; <i1> [#uses=1]
- br i1 %36, label %bb20, label %bb.nph5
-
-bb18: ; preds = %bb17
- %indvar.next1 = add i32 %y.14, 1 ; <i32> [#uses=2]
- %exitcond4 = icmp ne i32 %indvar.next1, %smax3 ; <i1> [#uses=1]
- br i1 %exitcond4, label %bb13, label %bb18.bb20_crit_edge
-
-bb18.bb20_crit_edge: ; preds = %bb18
- br label %bb18.bb20_crit_edge.split
-
-bb18.bb20_crit_edge.split: ; preds = %bb18.bb20_crit_edge, %bb.nph5
- br label %bb20
-
-bb20: ; preds = %bb18.bb20_crit_edge.split, %bb18.loopexit
- switch i32 %d, label %return [
- i32 3, label %bb22
- i32 1, label %bb29
- ]
-
-bb22: ; preds = %bb20
- %37 = mul i32 %x, %w ; <i32> [#uses=1]
- %38 = udiv i32 %37, 4 ; <i32> [#uses=1]
- %.sum3 = add i32 %38, %.sum2 ; <i32> [#uses=2]
- %39 = add i32 %x, 15 ; <i32> [#uses=1]
- %40 = and i32 %39, -16 ; <i32> [#uses=1]
- %41 = add i32 %w, 15 ; <i32> [#uses=1]
- %42 = and i32 %41, -16 ; <i32> [#uses=1]
- %43 = mul i32 %40, %s ; <i32> [#uses=1]
- %44 = icmp ugt i32 %x, 0 ; <i1> [#uses=1]
- br i1 %44, label %bb.nph, label %bb26
-
-bb.nph: ; preds = %bb22
- br label %bb23
-
-bb23: ; preds = %bb24, %bb.nph
- %y.21 = phi i32 [ 0, %bb.nph ], [ %indvar.next5, %bb24 ] ; <i32> [#uses=3]
- %45 = mul i32 %y.21, %42 ; <i32> [#uses=1]
- %.sum1 = add i32 %45, %43 ; <i32> [#uses=1]
- %46 = getelementptr i8* %r, i32 %.sum1 ; <i8*> [#uses=1]
- %47 = mul i32 %y.21, %w ; <i32> [#uses=1]
- %.sum5 = add i32 %47, %.sum3 ; <i32> [#uses=1]
- %48 = getelementptr i8* %j, i32 %.sum5 ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.i32(i8* %48, i8* %46, i32 %w, i32 1)
- br label %bb24
-
-bb24: ; preds = %bb23
- %indvar.next5 = add i32 %y.21, 1 ; <i32> [#uses=2]
- %exitcond6 = icmp ne i32 %indvar.next5, %x ; <i1> [#uses=1]
- br i1 %exitcond6, label %bb23, label %bb24.bb26_crit_edge
-
-bb24.bb26_crit_edge: ; preds = %bb24
- br label %bb26
-
-bb26: ; preds = %bb24.bb26_crit_edge, %bb22
- %49 = mul i32 %x, %w ; <i32> [#uses=1]
- %.sum4 = add i32 %.sum3, %49 ; <i32> [#uses=1]
- %50 = getelementptr i8* %j, i32 %.sum4 ; <i8*> [#uses=1]
- %51 = mul i32 %x, %w ; <i32> [#uses=1]
- %52 = udiv i32 %51, 2 ; <i32> [#uses=1]
- tail call void @llvm.memset.i32(i8* %50, i8 -128, i32 %52, i32 1)
- ret void
-
-bb29: ; preds = %bb20, %entry
- %53 = add i32 %w, 15 ; <i32> [#uses=1]
- %54 = and i32 %53, -16 ; <i32> [#uses=1]
- %55 = icmp ugt i32 %x, 0 ; <i1> [#uses=1]
- br i1 %55, label %bb.nph11, label %bb33
-
-bb.nph11: ; preds = %bb29
- br label %bb30
-
-bb30: ; preds = %bb31, %bb.nph11
- %y.310 = phi i32 [ 0, %bb.nph11 ], [ %indvar.next13, %bb31 ] ; <i32> [#uses=3]
- %56 = mul i32 %y.310, %54 ; <i32> [#uses=1]
- %57 = getelementptr i8* %r, i32 %56 ; <i8*> [#uses=1]
- %58 = mul i32 %y.310, %w ; <i32> [#uses=1]
- %59 = getelementptr i8* %j, i32 %58 ; <i8*> [#uses=1]
- tail call void @llvm.memcpy.i32(i8* %59, i8* %57, i32 %w, i32 1)
- br label %bb31
-
-bb31: ; preds = %bb30
- %indvar.next13 = add i32 %y.310, 1 ; <i32> [#uses=2]
- %exitcond14 = icmp ne i32 %indvar.next13, %x ; <i1> [#uses=1]
- br i1 %exitcond14, label %bb30, label %bb31.bb33_crit_edge
-
-bb31.bb33_crit_edge: ; preds = %bb31
- br label %bb33
-
-bb33: ; preds = %bb31.bb33_crit_edge, %bb29
- %60 = mul i32 %x, %w ; <i32> [#uses=1]
- %61 = getelementptr i8* %j, i32 %60 ; <i8*> [#uses=1]
- %62 = mul i32 %x, %w ; <i32> [#uses=1]
- %63 = udiv i32 %62, 2 ; <i32> [#uses=1]
- tail call void @llvm.memset.i32(i8* %61, i8 -128, i32 %63, i32 1)
- ret void
-
-return: ; preds = %bb20
- ret void
-}
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) nounwind
-
-declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/optimize-max-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/optimize-max-1.ll
deleted file mode 100644
index ad6c24d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/optimize-max-1.ll
+++ /dev/null
@@ -1,78 +0,0 @@
-; RUN: llc < %s -march=x86-64 | not grep cmov
-
-; LSR should be able to eliminate both smax and umax expressions
-; in loop trip counts.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-
-define void @fs(double* nocapture %p, i64 %n) nounwind {
-entry:
- %tmp = icmp slt i64 %n, 1 ; <i1> [#uses=1]
- %smax = select i1 %tmp, i64 1, i64 %n ; <i64> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %entry
- %i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.0 ; <double*> [#uses=1]
- store double 0.000000e+00, double* %scevgep, align 8
- %0 = add i64 %i.0, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %0, %smax ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb
- ret void
-}
-
-define void @bs(double* nocapture %p, i64 %n) nounwind {
-entry:
- %tmp = icmp sge i64 %n, 1 ; <i1> [#uses=1]
- %smax = select i1 %tmp, i64 %n, i64 1 ; <i64> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %entry
- %i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.0 ; <double*> [#uses=1]
- store double 0.000000e+00, double* %scevgep, align 8
- %0 = add i64 %i.0, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %0, %smax ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb
- ret void
-}
-
-define void @fu(double* nocapture %p, i64 %n) nounwind {
-entry:
- %tmp = icmp eq i64 %n, 0 ; <i1> [#uses=1]
- %umax = select i1 %tmp, i64 1, i64 %n ; <i64> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %entry
- %i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.0 ; <double*> [#uses=1]
- store double 0.000000e+00, double* %scevgep, align 8
- %0 = add i64 %i.0, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %0, %umax ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb
- ret void
-}
-
-define void @bu(double* nocapture %p, i64 %n) nounwind {
-entry:
- %tmp = icmp ne i64 %n, 0 ; <i1> [#uses=1]
- %umax = select i1 %tmp, i64 %n, i64 1 ; <i64> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb, %entry
- %i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.0 ; <double*> [#uses=1]
- store double 0.000000e+00, double* %scevgep, align 8
- %0 = add i64 %i.0, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %0, %umax ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb
-
-return: ; preds = %bb
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/optimize-max-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/optimize-max-2.ll
deleted file mode 100644
index 8851c5b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/optimize-max-2.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep cmov %t | count 2
-; RUN: grep jne %t | count 1
-
-; LSR's OptimizeMax function shouldn't try to eliminate this max, because
-; it has three operands.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-
-define void @foo(double* nocapture %p, i64 %x, i64 %y) nounwind {
-entry:
- %tmp = icmp eq i64 %y, 0 ; <i1> [#uses=1]
- %umax = select i1 %tmp, i64 1, i64 %y ; <i64> [#uses=2]
- %tmp8 = icmp ugt i64 %umax, %x ; <i1> [#uses=1]
- %umax9 = select i1 %tmp8, i64 %umax, i64 %x ; <i64> [#uses=1]
- br label %bb4
-
-bb4: ; preds = %bb4, %entry
- %i.07 = phi i64 [ 0, %entry ], [ %2, %bb4 ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.07 ; <double*> [#uses=2]
- %0 = load double* %scevgep, align 8 ; <double> [#uses=1]
- %1 = fmul double %0, 2.000000e+00 ; <double> [#uses=1]
- store double %1, double* %scevgep, align 8
- %2 = add i64 %i.07, 1 ; <i64> [#uses=2]
- %exitcond = icmp eq i64 %2, %umax9 ; <i1> [#uses=1]
- br i1 %exitcond, label %return, label %bb4
-
-return: ; preds = %bb4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/or-branch.ll b/libclamav/c++/llvm/test/CodeGen/X86/or-branch.ll
deleted file mode 100644
index 9ebf890..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/or-branch.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep set
-
-define void @foo(i32 %X, i32 %Y, i32 %Z) nounwind {
-entry:
- %tmp = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- %tmp.upgrd.1 = icmp eq i32 %X, 0 ; <i1> [#uses=1]
- %tmp3 = icmp slt i32 %Y, 5 ; <i1> [#uses=1]
- %tmp4 = or i1 %tmp3, %tmp.upgrd.1 ; <i1> [#uses=1]
- br i1 %tmp4, label %cond_true, label %UnifiedReturnBlock
-
-cond_true: ; preds = %entry
- %tmp5 = tail call i32 (...)* @bar( ) ; <i32> [#uses=0]
- ret void
-
-UnifiedReturnBlock: ; preds = %entry
- ret void
-}
-
-declare i32 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/overlap-shift.ll b/libclamav/c++/llvm/test/CodeGen/X86/overlap-shift.ll
deleted file mode 100644
index c1fc041..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/overlap-shift.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-;; X's live range extends beyond the shift, so the register allocator
-;; cannot coalesce it with Y. Because of this, a copy needs to be
-;; emitted before the shift to save the register value before it is
-;; clobbered. However, this copy is not needed if the register
-;; allocator turns the shift into an LEA. This also occurs for ADD.
-
-; Check that the shift gets turned into an LEA.
-
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: not grep {mov E.X, E.X}
-
- at G = external global i32 ; <i32*> [#uses=1]
-
-define i32 @test1(i32 %X) {
- %Z = shl i32 %X, 2 ; <i32> [#uses=1]
- volatile store i32 %Z, i32* @G
- ret i32 %X
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/packed_struct.ll b/libclamav/c++/llvm/test/CodeGen/X86/packed_struct.ll
deleted file mode 100644
index da6e8f8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/packed_struct.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep foos+5 %t
-; RUN: grep foos+1 %t
-; RUN: grep foos+9 %t
-; RUN: grep bara+19 %t
-; RUN: grep bara+4 %t
-
-; make sure we compute the correct offset for a packed structure
-
-;Note: codegen for this could change rendering the above checks wrong
-
-target datalayout = "e-p:32:32"
-target triple = "i686-pc-linux-gnu"
- %struct.anon = type <{ i8, i32, i32, i32 }>
- at foos = external global %struct.anon ; <%struct.anon*> [#uses=3]
- at bara = weak global [4 x <{ i32, i8 }>] zeroinitializer ; <[4 x <{ i32, i8 }>]*> [#uses=2]
-
-define i32 @foo() nounwind {
-entry:
- %tmp = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp6 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 3) ; <i32> [#uses=1]
- %tmp4 = add i32 %tmp3, %tmp ; <i32> [#uses=1]
- %tmp7 = add i32 %tmp4, %tmp6 ; <i32> [#uses=1]
- ret i32 %tmp7
-}
-
-define i8 @bar() nounwind {
-entry:
- %tmp = load i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 0, i32 1) ; <i8> [#uses=1]
- %tmp4 = load i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 3, i32 1) ; <i8> [#uses=1]
- %tmp5 = add i8 %tmp4, %tmp ; <i8> [#uses=1]
- ret i8 %tmp5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/palignr-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/palignr-2.ll
deleted file mode 100644
index 116d4c7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/palignr-2.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+ssse3 | FileCheck %s
-; rdar://7341330
-
- at a = global [4 x i32] [i32 4, i32 5, i32 6, i32 7], align 16 ; <[4 x i32]*> [#uses=1]
- at c = common global [4 x i32] zeroinitializer, align 16 ; <[4 x i32]*> [#uses=1]
- at b = global [4 x i32] [i32 0, i32 1, i32 2, i32 3], align 16 ; <[4 x i32]*> [#uses=1]
-
-define void @t1(<2 x i64> %a, <2 x i64> %b) nounwind ssp {
-entry:
-; CHECK: t1:
-; palignr $3, %xmm1, %xmm0
- %0 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %a, <2 x i64> %b, i8 24) nounwind readnone
- store <2 x i64> %0, <2 x i64>* bitcast ([4 x i32]* @c to <2 x i64>*), align 16
- ret void
-}
-
-declare <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64>, <2 x i64>, i8) nounwind readnone
-
-define void @t2() nounwind ssp {
-entry:
-; CHECK: t2:
-; palignr $4, _b, %xmm0
- %0 = load <2 x i64>* bitcast ([4 x i32]* @b to <2 x i64>*), align 16 ; <<2 x i64>> [#uses=1]
- %1 = load <2 x i64>* bitcast ([4 x i32]* @a to <2 x i64>*), align 16 ; <<2 x i64>> [#uses=1]
- %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 32) nounwind readnone
- store <2 x i64> %2, <2 x i64>* bitcast ([4 x i32]* @c to <2 x i64>*), align 16
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/palignr.ll b/libclamav/c++/llvm/test/CodeGen/X86/palignr.ll
deleted file mode 100644
index 3812c72..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/palignr.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=core2 | FileCheck %s
-; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck --check-prefix=YONAH %s
-
-define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK: pshufd
-; CHECK-YONAH: pshufd
- %C = shufflevector <4 x i32> %A, <4 x i32> undef, <4 x i32> < i32 1, i32 2, i32 3, i32 0 >
- ret <4 x i32> %C
-}
-
-define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK: palignr
-; CHECK-YONAH: shufps
- %C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 3, i32 4 >
- ret <4 x i32> %C
-}
-
-define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK: palignr
- %C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 undef, i32 4 >
- ret <4 x i32> %C
-}
-
-define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK: palignr
- %C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
- ret <4 x i32> %C
-}
-
-define <4 x float> @test5(<4 x float> %A, <4 x float> %B) nounwind {
-; CHECK: palignr
- %C = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
- ret <4 x float> %C
-}
-
-define <8 x i16> @test6(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK: palignr
- %C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 3, i32 4, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10 >
- ret <8 x i16> %C
-}
-
-define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK: palignr
- %C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 undef, i32 6, i32 undef, i32 8, i32 9, i32 10, i32 11, i32 12 >
- ret <8 x i16> %C
-}
-
-define <8 x i16> @test8(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK: palignr
- %C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 undef, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0 >
- ret <8 x i16> %C
-}
-
-define <16 x i8> @test9(<16 x i8> %A, <16 x i8> %B) nounwind {
-; CHECK: palignr
- %C = shufflevector <16 x i8> %A, <16 x i8> %B, <16 x i32> < i32 5, i32 6, i32 7, i32 undef, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20 >
- ret <16 x i8> %C
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/peep-test-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/peep-test-0.ll
deleted file mode 100644
index e521d8e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/peep-test-0.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: not grep cmp %t
-; RUN: not grep test %t
-
-define void @loop(i64 %n, double* nocapture %d) nounwind {
-entry:
- br label %bb
-
-bb:
- %indvar = phi i64 [ %n, %entry ], [ %indvar.next, %bb ]
- %i.03 = add i64 %indvar, %n
- %0 = getelementptr double* %d, i64 %i.03
- %1 = load double* %0, align 8
- %2 = fmul double %1, 3.000000e+00
- store double %2, double* %0, align 8
- %indvar.next = add i64 %indvar, 1
- %exitcond = icmp eq i64 %indvar.next, 0
- br i1 %exitcond, label %return, label %bb
-
-return:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/peep-test-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/peep-test-1.ll
deleted file mode 100644
index f83f0f6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/peep-test-1.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep dec %t | count 1
-; RUN: not grep test %t
-; RUN: not grep cmp %t
-
-define void @foo(i32 %n, double* nocapture %p) nounwind {
- br label %bb
-
-bb:
- %indvar = phi i32 [ 0, %0 ], [ %indvar.next, %bb ]
- %i.03 = sub i32 %n, %indvar
- %1 = getelementptr double* %p, i32 %i.03
- %2 = load double* %1, align 4
- %3 = fmul double %2, 2.930000e+00
- store double %3, double* %1, align 4
- %4 = add i32 %i.03, -1
- %phitmp = icmp slt i32 %4, 0
- %indvar.next = add i32 %indvar, 1
- br i1 %phitmp, label %bb, label %return
-
-return:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/peep-test-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/peep-test-2.ll
deleted file mode 100644
index 2745172..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/peep-test-2.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 | grep testl
-
-; It's tempting to eliminate the testl instruction here and just use the
-; EFLAGS value from the incl, however it can't be known whether the add
-; will overflow, and if it does the incl would set OF, and the
-; subsequent setg would return true.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.6"
-
-define i32 @f(i32 %j) nounwind readnone {
-entry:
- %0 = add i32 %j, 1 ; <i32> [#uses=1]
- %1 = icmp sgt i32 %0, 0 ; <i1> [#uses=1]
- %2 = zext i1 %1 to i32 ; <i32> [#uses=1]
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/peep-test-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/peep-test-3.ll
deleted file mode 100644
index a34a978..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/peep-test-3.ll
+++ /dev/null
@@ -1,89 +0,0 @@
-; RUN: llc < %s -march=x86 -post-RA-scheduler=false | FileCheck %s
-; rdar://7226797
-
-; LLVM should omit the testl and use the flags result from the orl.
-
-; CHECK: or:
-define void @or(float* %A, i32 %IA, i32 %N) nounwind {
-entry:
- %0 = ptrtoint float* %A to i32 ; <i32> [#uses=1]
- %1 = and i32 %0, 3 ; <i32> [#uses=1]
- %2 = xor i32 %IA, 1 ; <i32> [#uses=1]
-; CHECK: orl %ecx, %edx
-; CHECK-NEXT: je
- %3 = or i32 %2, %1 ; <i32> [#uses=1]
- %4 = icmp eq i32 %3, 0 ; <i1> [#uses=1]
- br i1 %4, label %return, label %bb
-
-bb: ; preds = %entry
- store float 0.000000e+00, float* %A, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-}
-; CHECK: xor:
-define void @xor(float* %A, i32 %IA, i32 %N) nounwind {
-entry:
- %0 = ptrtoint float* %A to i32 ; <i32> [#uses=1]
- %1 = and i32 %0, 3 ; <i32> [#uses=1]
-; CHECK: xorl $1, %e
-; CHECK-NEXT: je
- %2 = xor i32 %IA, 1 ; <i32> [#uses=1]
- %3 = xor i32 %2, %1 ; <i32> [#uses=1]
- %4 = icmp eq i32 %3, 0 ; <i1> [#uses=1]
- br i1 %4, label %return, label %bb
-
-bb: ; preds = %entry
- store float 0.000000e+00, float* %A, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-}
-; CHECK: and:
-define void @and(float* %A, i32 %IA, i32 %N, i8* %p) nounwind {
-entry:
- store i8 0, i8* %p
- %0 = ptrtoint float* %A to i32 ; <i32> [#uses=1]
- %1 = and i32 %0, 3 ; <i32> [#uses=1]
- %2 = xor i32 %IA, 1 ; <i32> [#uses=1]
-; CHECK: andl $3, %
-; CHECK-NEXT: movb %
-; CHECK-NEXT: je
- %3 = and i32 %2, %1 ; <i32> [#uses=1]
- %t = trunc i32 %3 to i8
- store i8 %t, i8* %p
- %4 = icmp eq i32 %3, 0 ; <i1> [#uses=1]
- br i1 %4, label %return, label %bb
-
-bb: ; preds = %entry
- store float 0.000000e+00, float* null, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-}
-
-; Just like @and, but without the trunc+store. This should use a testb
-; instead of an andl.
-; CHECK: test:
-define void @test(float* %A, i32 %IA, i32 %N, i8* %p) nounwind {
-entry:
- store i8 0, i8* %p
- %0 = ptrtoint float* %A to i32 ; <i32> [#uses=1]
- %1 = and i32 %0, 3 ; <i32> [#uses=1]
- %2 = xor i32 %IA, 1 ; <i32> [#uses=1]
-; CHECK: testb $3, %
-; CHECK-NEXT: je
- %3 = and i32 %2, %1 ; <i32> [#uses=1]
- %4 = icmp eq i32 %3, 0 ; <i1> [#uses=1]
- br i1 %4, label %return, label %bb
-
-bb: ; preds = %entry
- store float 0.000000e+00, float* null, align 4
- ret void
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/peep-vector-extract-concat.ll b/libclamav/c++/llvm/test/CodeGen/X86/peep-vector-extract-concat.ll
deleted file mode 100644
index e4ab2b5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/peep-vector-extract-concat.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse2,-sse41 | grep {pshufd \$3, %xmm0, %xmm0}
-
-define float @foo(<8 x float> %a) nounwind {
- %c = extractelement <8 x float> %a, i32 3
- ret float %c
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/peep-vector-extract-insert.ll b/libclamav/c++/llvm/test/CodeGen/X86/peep-vector-extract-insert.ll
deleted file mode 100644
index 5e18044..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/peep-vector-extract-insert.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {pxor %xmm0, %xmm0} | count 2
-
-define float @foo(<4 x float> %a) {
- %b = insertelement <4 x float> %a, float 0.0, i32 3
- %c = extractelement <4 x float> %b, i32 3
- ret float %c
-}
-define float @bar(float %a) {
- %b = insertelement <4 x float> <float 0x400B333340000000, float 4.5, float 0.0, float 0x4022666660000000>, float %a, i32 3
- %c = extractelement <4 x float> %b, i32 2
- ret float %c
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/personality.ll b/libclamav/c++/llvm/test/CodeGen/X86/personality.ll
deleted file mode 100644
index ce57e8f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/personality.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mtriple=i386-apple-darwin9 | FileCheck %s -check-prefix=X32
-; PR1632
-
-define void @_Z1fv() {
-entry:
- invoke void @_Z1gv( )
- to label %return unwind label %unwind
-
-unwind: ; preds = %entry
- br i1 false, label %eh_then, label %cleanup20
-
-eh_then: ; preds = %unwind
- invoke void @__cxa_end_catch( )
- to label %return unwind label %unwind10
-
-unwind10: ; preds = %eh_then
- %eh_select13 = tail call i64 (i8*, i8*, ...)* @llvm.eh.selector.i64( i8* null, i8* bitcast (void ()* @__gxx_personality_v0 to i8*), i32 1 ) ; <i32> [#uses=2]
- %tmp18 = icmp slt i64 %eh_select13, 0 ; <i1> [#uses=1]
- br i1 %tmp18, label %filter, label %cleanup20
-
-filter: ; preds = %unwind10
- unreachable
-
-cleanup20: ; preds = %unwind10, %unwind
- %eh_selector.0 = phi i64 [ 0, %unwind ], [ %eh_select13, %unwind10 ] ; <i32> [#uses=0]
- ret void
-
-return: ; preds = %eh_then, %entry
- ret void
-}
-
-declare void @_Z1gv()
-
-declare i64 @llvm.eh.selector.i64(i8*, i8*, ...)
-
-declare void @__gxx_personality_v0()
-
-declare void @__cxa_end_catch()
-
-; X64: Leh_frame_common_begin:
-; X64: .long (___gxx_personality_v0 at GOTPCREL)+4
-
-; X32: Leh_frame_common_begin:
-; X32: .long L___gxx_personality_v0$non_lazy_ptr-
-; ....
-
-; X32: .section __IMPORT,__pointers,non_lazy_symbol_pointers
-; X32: L___gxx_personality_v0$non_lazy_ptr:
-; X32: .indirect_symbol ___gxx_personality_v0
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/phi-immediate-factoring.ll b/libclamav/c++/llvm/test/CodeGen/X86/phi-immediate-factoring.ll
deleted file mode 100644
index 9f9f921..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/phi-immediate-factoring.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; PR1296
-; RUN: llc < %s -march=x86 | grep {movl \$1} | count 1
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
-
-define i32 @foo(i32 %A, i32 %B, i32 %C) {
-entry:
- switch i32 %A, label %out [
- i32 1, label %bb
- i32 0, label %bb13
- i32 2, label %bb35
- ]
-
-bb: ; preds = %cond_next, %entry
- %i.144.1 = phi i32 [ 0, %entry ], [ %tmp7, %cond_next ] ; <i32> [#uses=2]
- %tmp4 = and i32 %i.144.1, %B ; <i32> [#uses=1]
- icmp eq i32 %tmp4, 0 ; <i1>:0 [#uses=1]
- br i1 %0, label %cond_next, label %out
-
-cond_next: ; preds = %bb
- %tmp7 = add i32 %i.144.1, 1 ; <i32> [#uses=2]
- icmp slt i32 %tmp7, 1000 ; <i1>:1 [#uses=1]
- br i1 %1, label %bb, label %out
-
-bb13: ; preds = %cond_next18, %entry
- %i.248.1 = phi i32 [ 0, %entry ], [ %tmp20, %cond_next18 ] ; <i32> [#uses=2]
- %tmp16 = and i32 %i.248.1, %C ; <i32> [#uses=1]
- icmp eq i32 %tmp16, 0 ; <i1>:2 [#uses=1]
- br i1 %2, label %cond_next18, label %out
-
-cond_next18: ; preds = %bb13
- %tmp20 = add i32 %i.248.1, 1 ; <i32> [#uses=2]
- icmp slt i32 %tmp20, 1000 ; <i1>:3 [#uses=1]
- br i1 %3, label %bb13, label %out
-
-bb27: ; preds = %bb35
- %tmp30 = and i32 %i.3, %C ; <i32> [#uses=1]
- icmp eq i32 %tmp30, 0 ; <i1>:4 [#uses=1]
- br i1 %4, label %cond_next32, label %out
-
-cond_next32: ; preds = %bb27
- %indvar.next = add i32 %i.3, 1 ; <i32> [#uses=1]
- br label %bb35
-
-bb35: ; preds = %entry, %cond_next32
- %i.3 = phi i32 [ %indvar.next, %cond_next32 ], [ 0, %entry ] ; <i32> [#uses=3]
- icmp slt i32 %i.3, 1000 ; <i1>:5 [#uses=1]
- br i1 %5, label %bb27, label %out
-
-out: ; preds = %bb27, %bb35, %bb13, %cond_next18, %bb, %cond_next, %entry
- %result.0 = phi i32 [ 0, %entry ], [ 1, %bb ], [ 0, %cond_next ], [ 1, %bb13 ], [ 0, %cond_next18 ], [ 1, %bb27 ], [ 0, %bb35 ] ; <i32> [#uses=1]
- ret i32 %result.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll b/libclamav/c++/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll
deleted file mode 100644
index 045841e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-apple-darwin9 -regalloc=local | FileCheck %s
-; RUN: llc -O0 < %s -march=x86 -mtriple=i386-apple-darwin9 -regalloc=local | FileCheck %s
-; CHECKed instructions should be the same with or without -O0.
-
- at .str = private constant [12 x i8] c"x + y = %i\0A\00", align 1 ; <[12 x i8]*> [#uses=1]
-
-define i32 @main() nounwind {
-entry:
-; CHECK: movl 24(%esp), %eax
-; CHECK-NOT: movl
-; CHECK: movl %eax, 36(%esp)
-; CHECK-NOT: movl
-; CHECK: movl 28(%esp), %ebx
-; CHECK-NOT: movl
-; CHECK: movl %ebx, 40(%esp)
-; CHECK-NOT: movl
-; CHECK: addl %ebx, %eax
- %retval = alloca i32 ; <i32*> [#uses=2]
- %"%ebx" = alloca i32 ; <i32*> [#uses=1]
- %"%eax" = alloca i32 ; <i32*> [#uses=2]
- %result = alloca i32 ; <i32*> [#uses=2]
- %y = alloca i32 ; <i32*> [#uses=2]
- %x = alloca i32 ; <i32*> [#uses=2]
- %0 = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 1, i32* %x, align 4
- store i32 2, i32* %y, align 4
- call void asm sideeffect alignstack "# top of block", "~{dirflag},~{fpsr},~{flags},~{edi},~{esi},~{edx},~{ecx},~{eax}"() nounwind
- %asmtmp = call i32 asm sideeffect alignstack "movl $1, $0", "=={eax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32* %x) nounwind ; <i32> [#uses=1]
- store i32 %asmtmp, i32* %"%eax"
- %asmtmp1 = call i32 asm sideeffect alignstack "movl $1, $0", "=={ebx},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32* %y) nounwind ; <i32> [#uses=1]
- store i32 %asmtmp1, i32* %"%ebx"
- %1 = call i32 asm "", "={bx}"() nounwind ; <i32> [#uses=1]
- %2 = call i32 asm "", "={ax}"() nounwind ; <i32> [#uses=1]
- %asmtmp2 = call i32 asm sideeffect alignstack "addl $1, $0", "=={eax},{ebx},{eax},~{dirflag},~{fpsr},~{flags},~{memory}"(i32 %1, i32 %2) nounwind ; <i32> [#uses=1]
- store i32 %asmtmp2, i32* %"%eax"
- %3 = call i32 asm "", "={ax}"() nounwind ; <i32> [#uses=1]
- call void asm sideeffect alignstack "movl $0, $1", "{eax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32 %3, i32* %result) nounwind
- %4 = load i32* %result, align 4 ; <i32> [#uses=1]
- %5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 %4) nounwind ; <i32> [#uses=0]
- store i32 0, i32* %0, align 4
- %6 = load i32* %0, align 4 ; <i32> [#uses=1]
- store i32 %6, i32* %retval, align 4
- br label %return
-
-return: ; preds = %entry
- %retval3 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval3
-}
-
-declare i32 @printf(i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/phys_subreg_coalesce-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/phys_subreg_coalesce-2.ll
deleted file mode 100644
index 23c509c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/phys_subreg_coalesce-2.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 5
-; PR2659
-
-define i32 @binomial(i32 %n, i32 %k) nounwind {
-entry:
- %cmp = icmp ugt i32 %k, %n ; <i1> [#uses=1]
- br i1 %cmp, label %ifthen, label %forcond.preheader
-
-forcond.preheader: ; preds = %entry
- %cmp44 = icmp eq i32 %k, 0 ; <i1> [#uses=1]
- br i1 %cmp44, label %afterfor, label %forbody
-
-ifthen: ; preds = %entry
- ret i32 0
-
-forbody: ; preds = %forbody, %forcond.preheader
- %indvar = phi i32 [ 0, %forcond.preheader ], [ %divisor.02, %forbody ] ; <i32> [#uses=3]
- %accumulator.01 = phi i32 [ 1, %forcond.preheader ], [ %div, %forbody ] ; <i32> [#uses=1]
- %divisor.02 = add i32 %indvar, 1 ; <i32> [#uses=2]
- %n.addr.03 = sub i32 %n, %indvar ; <i32> [#uses=1]
- %mul = mul i32 %n.addr.03, %accumulator.01 ; <i32> [#uses=1]
- %div = udiv i32 %mul, %divisor.02 ; <i32> [#uses=2]
- %inc = add i32 %indvar, 2 ; <i32> [#uses=1]
- %cmp4 = icmp ugt i32 %inc, %k ; <i1> [#uses=1]
- br i1 %cmp4, label %afterfor, label %forbody
-
-afterfor: ; preds = %forbody, %forcond.preheader
- %accumulator.0.lcssa = phi i32 [ 1, %forcond.preheader ], [ %div, %forbody ] ; <i32> [#uses=1]
- ret i32 %accumulator.0.lcssa
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/phys_subreg_coalesce.ll b/libclamav/c++/llvm/test/CodeGen/X86/phys_subreg_coalesce.ll
deleted file mode 100644
index 2c855ce..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/phys_subreg_coalesce.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+sse2 | not grep movl
-
- %struct.dpoint = type { double, double }
-
-define %struct.dpoint @midpoint(i64 %p1.0, i64 %p2.0) nounwind readnone {
-entry:
- %0 = trunc i64 %p1.0 to i32 ; <i32> [#uses=1]
- %1 = sitofp i32 %0 to double ; <double> [#uses=1]
- %2 = trunc i64 %p2.0 to i32 ; <i32> [#uses=1]
- %3 = sitofp i32 %2 to double ; <double> [#uses=1]
- %4 = fadd double %1, %3 ; <double> [#uses=1]
- %5 = fmul double %4, 5.000000e-01 ; <double> [#uses=1]
- %6 = lshr i64 %p1.0, 32 ; <i64> [#uses=1]
- %7 = trunc i64 %6 to i32 ; <i32> [#uses=1]
- %8 = sitofp i32 %7 to double ; <double> [#uses=1]
- %9 = lshr i64 %p2.0, 32 ; <i64> [#uses=1]
- %10 = trunc i64 %9 to i32 ; <i32> [#uses=1]
- %11 = sitofp i32 %10 to double ; <double> [#uses=1]
- %12 = fadd double %8, %11 ; <double> [#uses=1]
- %13 = fmul double %12, 5.000000e-01 ; <double> [#uses=1]
- %mrv3 = insertvalue %struct.dpoint undef, double %5, 0 ; <%struct.dpoint> [#uses=1]
- %mrv4 = insertvalue %struct.dpoint %mrv3, double %13, 1 ; <%struct.dpoint> [#uses=1]
- ret %struct.dpoint %mrv4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pic-load-remat.ll b/libclamav/c++/llvm/test/CodeGen/X86/pic-load-remat.ll
deleted file mode 100644
index 7729752..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pic-load-remat.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 -relocation-model=pic | grep psllw | grep pb
-
-define void @f() nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- %tmp4403 = tail call <8 x i16> @llvm.x86.sse2.psubs.w( <8 x i16> zeroinitializer, <8 x i16> zeroinitializer ) nounwind readnone ; <<8 x i16>> [#uses=2]
- %tmp4443 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> zeroinitializer, <8 x i16> zeroinitializer ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp4609 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> zeroinitializer, <8 x i16> bitcast (<4 x i32> < i32 3, i32 5, i32 6, i32 9 > to <8 x i16>) ) ; <<8 x i16>> [#uses=1]
- %tmp4651 = add <8 x i16> %tmp4609, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 > ; <<8 x i16>> [#uses=1]
- %tmp4658 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp4651, <8 x i16> bitcast (<4 x i32> < i32 4, i32 1, i32 2, i32 3 > to <8 x i16>) ) ; <<8 x i16>> [#uses=1]
- %tmp4669 = tail call <8 x i16> @llvm.x86.sse2.pavg.w( <8 x i16> < i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170 >, <8 x i16> %tmp4443 ) nounwind readnone ; <<8 x i16>> [#uses=2]
- %tmp4679 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4669, <8 x i16> %tmp4669 ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp4689 = add <8 x i16> %tmp4679, %tmp4658 ; <<8 x i16>> [#uses=1]
- %tmp4700 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4689, <8 x i16> zeroinitializer ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp4708 = bitcast <8 x i16> %tmp4700 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp4772 = add <8 x i16> zeroinitializer, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 > ; <<8 x i16>> [#uses=1]
- %tmp4779 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp4772, <8 x i16> bitcast (<4 x i32> < i32 3, i32 5, i32 undef, i32 7 > to <8 x i16>) ) ; <<8 x i16>> [#uses=1]
- %tmp4810 = add <8 x i16> zeroinitializer, %tmp4779 ; <<8 x i16>> [#uses=1]
- %tmp4821 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4810, <8 x i16> zeroinitializer ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp4829 = bitcast <8 x i16> %tmp4821 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp4900 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> zeroinitializer, <8 x i16> bitcast (<4 x i32> < i32 1, i32 1, i32 2, i32 2 > to <8 x i16>) ) ; <<8 x i16>> [#uses=1]
- %tmp4911 = tail call <8 x i16> @llvm.x86.sse2.pavg.w( <8 x i16> < i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170, i16 -23170 >, <8 x i16> zeroinitializer ) nounwind readnone ; <<8 x i16>> [#uses=2]
- %tmp4921 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4911, <8 x i16> %tmp4911 ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp4931 = add <8 x i16> %tmp4921, %tmp4900 ; <<8 x i16>> [#uses=1]
- %tmp4942 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4931, <8 x i16> zeroinitializer ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp4950 = bitcast <8 x i16> %tmp4942 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp4957 = tail call <8 x i16> @llvm.x86.sse2.padds.w( <8 x i16> %tmp4403, <8 x i16> zeroinitializer ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp4958 = bitcast <8 x i16> %tmp4957 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp4967 = tail call <8 x i16> @llvm.x86.sse2.psubs.w( <8 x i16> %tmp4403, <8 x i16> zeroinitializer ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp4968 = bitcast <8 x i16> %tmp4967 to <2 x i64> ; <<2 x i64>> [#uses=1]
- store <2 x i64> %tmp4829, <2 x i64>* null, align 16
- store <2 x i64> %tmp4958, <2 x i64>* null, align 16
- store <2 x i64> %tmp4968, <2 x i64>* null, align 16
- store <2 x i64> %tmp4950, <2 x i64>* null, align 16
- store <2 x i64> %tmp4708, <2 x i64>* null, align 16
- br label %bb
-}
-
-declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pic.ll b/libclamav/c++/llvm/test/CodeGen/X86/pic.ll
deleted file mode 100644
index d3c28a0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pic.ll
+++ /dev/null
@@ -1,208 +0,0 @@
-; RUN: llc < %s -mtriple=i686-pc-linux-gnu -relocation-model=pic -asm-verbose=false -post-RA-scheduler=false | FileCheck %s -check-prefix=LINUX
-
- at ptr = external global i32*
- at dst = external global i32
- at src = external global i32
-
-define void @test1() nounwind {
-entry:
- store i32* @dst, i32** @ptr
- %tmp.s = load i32* @src
- store i32 %tmp.s, i32* @dst
- ret void
-
-; LINUX: test1:
-; LINUX: call .L1$pb
-; LINUX-NEXT: .L1$pb:
-; LINUX-NEXT: popl
-; LINUX: addl $_GLOBAL_OFFSET_TABLE_+(.Lpicbaseref1-.L1$pb),
-; LINUX: movl dst at GOT(%eax),
-; LINUX: movl ptr at GOT(%eax),
-; LINUX: movl src at GOT(%eax),
-; LINUX: ret
-}
-
- at ptr2 = global i32* null
- at dst2 = global i32 0
- at src2 = global i32 0
-
-define void @test2() nounwind {
-entry:
- store i32* @dst2, i32** @ptr2
- %tmp.s = load i32* @src2
- store i32 %tmp.s, i32* @dst2
- ret void
-
-; LINUX: test2:
-; LINUX: call .L2$pb
-; LINUX-NEXT: .L2$pb:
-; LINUX-NEXT: popl
-; LINUX: addl $_GLOBAL_OFFSET_TABLE_+(.Lpicbaseref2-.L2$pb), %eax
-; LINUX: movl dst2 at GOT(%eax),
-; LINUX: movl ptr2 at GOT(%eax),
-; LINUX: movl src2 at GOT(%eax),
-; LINUX: ret
-
-}
-
-declare i8* @malloc(i32)
-
-define void @test3() nounwind {
-entry:
- %ptr = call i8* @malloc(i32 40)
- ret void
-; LINUX: test3:
-; LINUX: pushl %ebx
-; LINUX-NEXT: subl $8, %esp
-; LINUX-NEXT: call .L3$pb
-; LINUX-NEXT: .L3$pb:
-; LINUX-NEXT: popl %ebx
-; LINUX: addl $_GLOBAL_OFFSET_TABLE_+(.Lpicbaseref3-.L3$pb), %ebx
-; LINUX: movl $40, (%esp)
-; LINUX: call malloc at PLT
-; LINUX: addl $8, %esp
-; LINUX: popl %ebx
-; LINUX: ret
-}
-
- at pfoo = external global void(...)*
-
-define void @test4() nounwind {
-entry:
- %tmp = call void(...)*(...)* @afoo()
- store void(...)* %tmp, void(...)** @pfoo
- %tmp1 = load void(...)** @pfoo
- call void(...)* %tmp1()
- ret void
-; LINUX: test4:
-; LINUX: call .L4$pb
-; LINUX-NEXT: .L4$pb:
-; LINUX: popl
-; LINUX: addl $_GLOBAL_OFFSET_TABLE_+(.Lpicbaseref4-.L4$pb),
-; LINUX: movl pfoo at GOT(%esi),
-; LINUX: call afoo at PLT
-; LINUX: call *
-}
-
-declare void(...)* @afoo(...)
-
-define void @test5() nounwind {
-entry:
- call void(...)* @foo()
- ret void
-; LINUX: test5:
-; LINUX: call .L5$pb
-; LINUX: popl %ebx
-; LINUX: addl $_GLOBAL_OFFSET_TABLE_+(.Lpicbaseref5-.L5$pb), %ebx
-; LINUX: call foo at PLT
-}
-
-declare void @foo(...)
-
-
- at ptr6 = internal global i32* null
- at dst6 = internal global i32 0
- at src6 = internal global i32 0
-
-define void @test6() nounwind {
-entry:
- store i32* @dst6, i32** @ptr6
- %tmp.s = load i32* @src6
- store i32 %tmp.s, i32* @dst6
- ret void
-
-; LINUX: test6:
-; LINUX: call .L6$pb
-; LINUX-NEXT: .L6$pb:
-; LINUX-NEXT: popl %eax
-; LINUX: addl $_GLOBAL_OFFSET_TABLE_+(.Lpicbaseref6-.L6$pb), %eax
-; LINUX: leal dst6 at GOTOFF(%eax), %ecx
-; LINUX: movl %ecx, ptr6 at GOTOFF(%eax)
-; LINUX: movl src6 at GOTOFF(%eax), %ecx
-; LINUX: movl %ecx, dst6 at GOTOFF(%eax)
-; LINUX: ret
-}
-
-
-;; Test constant pool references.
-define double @test7(i32 %a.u) nounwind {
-entry:
- %tmp = icmp eq i32 %a.u,0
- %retval = select i1 %tmp, double 4.561230e+02, double 1.234560e+02
- ret double %retval
-
-; LINUX: .LCPI7_0:
-
-; LINUX: test7:
-; LINUX: call .L7$pb
-; LINUX: .L7$pb:
-; LINUX: addl $_GLOBAL_OFFSET_TABLE_+(.Lpicbaseref7-.L7$pb),
-; LINUX: fldl .LCPI7_0 at GOTOFF(
-}
-
-
-;; Test jump table references.
-define void @test8(i32 %n.u) nounwind {
-entry:
- switch i32 %n.u, label %bb12 [i32 1, label %bb i32 2, label %bb6 i32 4, label %bb7 i32 5, label %bb8 i32 6, label %bb10 i32 7, label %bb1 i32 8, label %bb3 i32 9, label %bb4 i32 10, label %bb9 i32 11, label %bb2 i32 12, label %bb5 i32 13, label %bb11 ]
-bb:
- tail call void(...)* @foo1()
- ret void
-bb1:
- tail call void(...)* @foo2()
- ret void
-bb2:
- tail call void(...)* @foo6()
- ret void
-bb3:
- tail call void(...)* @foo3()
- ret void
-bb4:
- tail call void(...)* @foo4()
- ret void
-bb5:
- tail call void(...)* @foo5()
- ret void
-bb6:
- tail call void(...)* @foo1()
- ret void
-bb7:
- tail call void(...)* @foo2()
- ret void
-bb8:
- tail call void(...)* @foo6()
- ret void
-bb9:
- tail call void(...)* @foo3()
- ret void
-bb10:
- tail call void(...)* @foo4()
- ret void
-bb11:
- tail call void(...)* @foo5()
- ret void
-bb12:
- tail call void(...)* @foo6()
- ret void
-
-; LINUX: test8:
-; LINUX: call .L8$pb
-; LINUX: .L8$pb:
-; LINUX: addl $_GLOBAL_OFFSET_TABLE_+(.Lpicbaseref8-.L8$pb),
-; LINUX: addl .LJTI8_0 at GOTOFF(
-; LINUX: jmpl *
-
-; LINUX: .LJTI8_0:
-; LINUX: .long .LBB8_2 at GOTOFF
-; LINUX: .long .LBB8_2 at GOTOFF
-; LINUX: .long .LBB8_7 at GOTOFF
-; LINUX: .long .LBB8_3 at GOTOFF
-; LINUX: .long .LBB8_7 at GOTOFF
-}
-
-declare void @foo1(...)
-declare void @foo2(...)
-declare void @foo6(...)
-declare void @foo3(...)
-declare void @foo4(...)
-declare void @foo5(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pic_jumptable.ll b/libclamav/c++/llvm/test/CodeGen/X86/pic_jumptable.ll
deleted file mode 100644
index b3750c1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pic_jumptable.ll
+++ /dev/null
@@ -1,78 +0,0 @@
-; RUN: llc < %s -relocation-model=pic -mtriple=i386-linux-gnu -asm-verbose=false | not grep -F .text
-; RUN: llc < %s -relocation-model=pic -mtriple=i686-apple-darwin -asm-verbose=false | not grep lea
-; RUN: llc < %s -relocation-model=pic -mtriple=i686-apple-darwin -asm-verbose=false | grep add | count 2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | not grep 'lJTI'
-; rdar://6971437
-
-declare void @_Z3bari(i32)
-
-define linkonce void @_Z3fooILi1EEvi(i32 %Y) nounwind {
-entry:
- %Y_addr = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i32 %Y, i32* %Y_addr
- %tmp = load i32* %Y_addr ; <i32> [#uses=1]
- switch i32 %tmp, label %bb10 [
- i32 0, label %bb3
- i32 1, label %bb
- i32 2, label %bb
- i32 3, label %bb
- i32 4, label %bb
- i32 5, label %bb
- i32 6, label %bb
- i32 7, label %bb
- i32 8, label %bb
- i32 9, label %bb
- i32 10, label %bb
- i32 12, label %bb1
- i32 13, label %bb5
- i32 14, label %bb6
- i32 16, label %bb2
- i32 17, label %bb4
- i32 23, label %bb8
- i32 27, label %bb7
- i32 34, label %bb9
- ]
-
-bb: ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry
- br label %bb1
-
-bb1: ; preds = %bb, %entry
- br label %bb2
-
-bb2: ; preds = %bb1, %entry
- call void @_Z3bari( i32 1 )
- br label %bb11
-
-bb3: ; preds = %entry
- br label %bb4
-
-bb4: ; preds = %bb3, %entry
- br label %bb5
-
-bb5: ; preds = %bb4, %entry
- br label %bb6
-
-bb6: ; preds = %bb5, %entry
- call void @_Z3bari( i32 2 )
- br label %bb11
-
-bb7: ; preds = %entry
- br label %bb8
-
-bb8: ; preds = %bb7, %entry
- br label %bb9
-
-bb9: ; preds = %bb8, %entry
- call void @_Z3bari( i32 3 )
- br label %bb11
-
-bb10: ; preds = %entry
- br label %bb11
-
-bb11: ; preds = %bb10, %bb9, %bb6, %bb2
- br label %return
-
-return: ; preds = %bb11
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pmul.ll b/libclamav/c++/llvm/test/CodeGen/X86/pmul.ll
deleted file mode 100644
index e2746a8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pmul.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -stack-alignment=16 > %t
-; RUN: grep pmul %t | count 12
-; RUN: grep mov %t | count 12
-
-define <4 x i32> @a(<4 x i32> %i) nounwind {
- %A = mul <4 x i32> %i, < i32 117, i32 117, i32 117, i32 117 >
- ret <4 x i32> %A
-}
-define <2 x i64> @b(<2 x i64> %i) nounwind {
- %A = mul <2 x i64> %i, < i64 117, i64 117 >
- ret <2 x i64> %A
-}
-define <4 x i32> @c(<4 x i32> %i, <4 x i32> %j) nounwind {
- %A = mul <4 x i32> %i, %j
- ret <4 x i32> %A
-}
-define <2 x i64> @d(<2 x i64> %i, <2 x i64> %j) nounwind {
- %A = mul <2 x i64> %i, %j
- ret <2 x i64> %A
-}
-; Use a call to force spills.
-declare void @foo()
-define <4 x i32> @e(<4 x i32> %i, <4 x i32> %j) nounwind {
- call void @foo()
- %A = mul <4 x i32> %i, %j
- ret <4 x i32> %A
-}
-define <2 x i64> @f(<2 x i64> %i, <2 x i64> %j) nounwind {
- call void @foo()
- %A = mul <2 x i64> %i, %j
- ret <2 x i64> %A
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/postalloc-coalescing.ll b/libclamav/c++/llvm/test/CodeGen/X86/postalloc-coalescing.ll
deleted file mode 100644
index a171436..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/postalloc-coalescing.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 3
-
-define fastcc i32 @_Z18yy_get_next_bufferv() {
-entry:
- br label %bb131
-
-bb116: ; preds = %bb131
- %tmp125126 = trunc i32 %c.1 to i8 ; <i8> [#uses=1]
- store i8 %tmp125126, i8* null, align 1
- br label %bb131
-
-bb131: ; preds = %bb116, %entry
- %c.2 = phi i32 [ %c.1, %bb116 ], [ 42, %entry ] ; <i32> [#uses=1]
- %c.1 = select i1 false, i32 0, i32 %c.2 ; <i32> [#uses=4]
- %tmp181 = icmp eq i32 %c.1, -1 ; <i1> [#uses=1]
- br i1 %tmp181, label %bb158, label %bb116
-
-bb158: ; preds = %bb131
- br i1 true, label %cond_true163, label %cond_next178
-
-cond_true163: ; preds = %bb158
- %tmp172173 = trunc i32 %c.1 to i8 ; <i8> [#uses=1]
- store i8 %tmp172173, i8* null, align 1
- br label %cond_next178
-
-cond_next178: ; preds = %cond_true163, %bb158
- %tmp180 = icmp eq i32 %c.1, -1 ; <i1> [#uses=1]
- br i1 %tmp180, label %cond_next184, label %cond_next199
-
-cond_next184: ; preds = %cond_next178
- ret i32 0
-
-cond_next199: ; preds = %cond_next178
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/powi.ll b/libclamav/c++/llvm/test/CodeGen/X86/powi.ll
deleted file mode 100644
index c3d6831..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/powi.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc %s -march=x86 -mcpu=yonah -o - | grep mulsd | count 6
-; Ideally this would compile to 5 multiplies.
-
-define double @_Z3f10d(double %a) nounwind readonly ssp noredzone {
-entry:
- %0 = tail call double @llvm.powi.f64(double %a, i32 15) nounwind ; <double> [#uses=1]
- ret double %0
-}
-
-declare double @llvm.powi.f64(double, i32) nounwind readonly
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr1462.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr1462.ll
deleted file mode 100644
index 62549a5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr1462.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s
-; PR1462
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-
-v64:64:64-v128:128:128-a0:0:64"
-target triple = "x86_64-unknown-linux-gnu"
-
-define hidden i128 @__addvti3(i128 %a1, i128 %b2) {
-entry:
- %tmp8 = add i128 %b2, %a1 ; <i128> [#uses=3]
- %tmp10 = icmp sgt i128 %b2, -1 ; <i1> [#uses=1]
- %tmp18 = icmp sgt i128 %tmp8, %a1 ; <i1> [#uses=1]
- %tmp14 = icmp slt i128 %tmp8, %a1 ; <i1> [#uses=1]
- %iftmp.0.0.in = select i1 %tmp10, i1 %tmp14, i1 %tmp18 ; <i1> [#uses=1]
- br i1 %iftmp.0.0.in, label %cond_true22, label %cond_next23
-
-cond_true22: ; preds = %entry
- tail call void @abort( )
- unreachable
-
-cond_next23: ; preds = %entry
- ret i128 %tmp8
-}
-
-declare void @abort()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr1489.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr1489.ll
deleted file mode 100644
index c9e24bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr1489.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -disable-fp-elim -O0 -mcpu=i486 | grep 1082126238 | count 3
-; RUN: llc < %s -disable-fp-elim -O0 -mcpu=i486 | grep -- -1236950581 | count 1
-;; magic constants are 3.999f and half of 3.999
-; ModuleID = '1489.c'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
- at .str = internal constant [13 x i8] c"%d %d %d %d\0A\00" ; <[13 x i8]*> [#uses=1]
-
-define i32 @quux() nounwind {
-entry:
- %tmp1 = tail call i32 @lrintf( float 0x400FFDF3C0000000 ) ; <i32> [#uses=1]
- %tmp2 = icmp slt i32 %tmp1, 1 ; <i1> [#uses=1]
- %tmp23 = zext i1 %tmp2 to i32 ; <i32> [#uses=1]
- ret i32 %tmp23
-}
-
-declare i32 @lrintf(float)
-
-define i32 @foo() nounwind {
-entry:
- %tmp1 = tail call i32 @lrint( double 3.999000e+00 ) ; <i32> [#uses=1]
- %tmp2 = icmp slt i32 %tmp1, 1 ; <i1> [#uses=1]
- %tmp23 = zext i1 %tmp2 to i32 ; <i32> [#uses=1]
- ret i32 %tmp23
-}
-
-declare i32 @lrint(double)
-
-define i32 @bar() nounwind {
-entry:
- %tmp1 = tail call i32 @lrintf( float 0x400FFDF3C0000000 ) ; <i32> [#uses=1]
- %tmp2 = icmp slt i32 %tmp1, 1 ; <i1> [#uses=1]
- %tmp23 = zext i1 %tmp2 to i32 ; <i32> [#uses=1]
- ret i32 %tmp23
-}
-
-define i32 @baz() nounwind {
-entry:
- %tmp1 = tail call i32 @lrintf( float 0x400FFDF3C0000000 ) ; <i32> [#uses=1]
- %tmp2 = icmp slt i32 %tmp1, 1 ; <i1> [#uses=1]
- %tmp23 = zext i1 %tmp2 to i32 ; <i32> [#uses=1]
- ret i32 %tmp23
-}
-
-define i32 @main() nounwind {
-entry:
- %tmp = tail call i32 @baz( ) ; <i32> [#uses=1]
- %tmp1 = tail call i32 @bar( ) ; <i32> [#uses=1]
- %tmp2 = tail call i32 @foo( ) ; <i32> [#uses=1]
- %tmp3 = tail call i32 @quux( ) ; <i32> [#uses=1]
- %tmp5 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([13 x i8]* @.str, i32 0, i32 0), i32 %tmp3, i32 %tmp2, i32 %tmp1, i32 %tmp ) ; <i32> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr1505.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr1505.ll
deleted file mode 100644
index 883a806..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr1505.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mcpu=i486 | not grep fldl
-; PR1505
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
- at G = weak global float 0.000000e+00 ; <float*> [#uses=1]
-
-define void @t1(float %F) {
-entry:
- store float %F, float* @G
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr1505b.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr1505b.ll
deleted file mode 100644
index 6a08dae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr1505b.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; RUN: llc < %s -mcpu=i486 | grep fstpl | count 5
-; RUN: llc < %s -mcpu=i486 | grep fstps | count 2
-; PR1505
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
- %"struct.std::basic_ios<char,std::char_traits<char> >" = type { %"struct.std::ios_base", %"struct.std::basic_ostream<char,std::char_traits<char> >"*, i8, i8, %"struct.std::basic_streambuf<char,std::char_traits<char> >"*, %"struct.std::ctype<char>"*, %"struct.std::num_get<char,std::istreambuf_iterator<char, std::char_traits<char> > >"*, %"struct.std::num_get<char,std::istreambuf_iterator<char, std::char_traits<char> > >"* }
- %"struct.std::basic_ostream<char,std::char_traits<char> >" = type { i32 (...)**, %"struct.std::basic_ios<char,std::char_traits<char> >" }
- %"struct.std::basic_streambuf<char,std::char_traits<char> >" = type { i32 (...)**, i8*, i8*, i8*, i8*, i8*, i8*, %"struct.std::locale" }
- %"struct.std::ctype<char>" = type { %"struct.std::locale::facet", i32*, i8, i32*, i32*, i32*, i8, [256 x i8], [256 x i8], i8 }
- %"struct.std::ctype_base" = type <{ i8 }>
- %"struct.std::ios_base" = type { i32 (...)**, i32, i32, i32, i32, i32, %"struct.std::ios_base::_Callback_list"*, %"struct.std::ios_base::_Words", [8 x %"struct.std::ios_base::_Words"], i32, %"struct.std::ios_base::_Words"*, %"struct.std::locale" }
- %"struct.std::ios_base::_Callback_list" = type { %"struct.std::ios_base::_Callback_list"*, void (i32, %"struct.std::ios_base"*, i32)*, i32, i32 }
- %"struct.std::ios_base::_Words" = type { i8*, i32 }
- %"struct.std::locale" = type { %"struct.std::locale::_Impl"* }
- %"struct.std::locale::_Impl" = type { i32, %"struct.std::locale::facet"**, i32, %"struct.std::locale::facet"**, i8** }
- %"struct.std::locale::facet" = type { i32 (...)**, i32 }
- %"struct.std::num_get<char,std::istreambuf_iterator<char, std::char_traits<char> > >" = type { %"struct.std::locale::facet" }
- at a = global float 0x3FD3333340000000 ; <float*> [#uses=1]
- at b = global double 6.000000e-01, align 8 ; <double*> [#uses=1]
- at _ZSt8__ioinit = internal global %"struct.std::ctype_base" zeroinitializer ; <%"struct.std::ctype_base"*> [#uses=2]
- at __dso_handle = external global i8* ; <i8**> [#uses=1]
- at _ZSt4cout = external global %"struct.std::basic_ostream<char,std::char_traits<char> >" ; <%"struct.std::basic_ostream<char,std::char_traits<char> >"*> [#uses=2]
- at .str = internal constant [12 x i8] c"tan float: \00" ; <[12 x i8]*> [#uses=1]
- at .str1 = internal constant [13 x i8] c"tan double: \00" ; <[13 x i8]*> [#uses=1]
-
-declare void @_ZNSt8ios_base4InitD1Ev(%"struct.std::ctype_base"*)
-
-declare void @_ZNSt8ios_base4InitC1Ev(%"struct.std::ctype_base"*)
-
-declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*)
-
-define i32 @main() {
-entry:
- %tmp6 = volatile load float* @a ; <float> [#uses=1]
- %tmp9 = tail call float @tanf( float %tmp6 ) ; <float> [#uses=1]
- %tmp12 = volatile load double* @b ; <double> [#uses=1]
- %tmp13 = tail call double @tan( double %tmp12 ) ; <double> [#uses=1]
- %tmp1314 = fptrunc double %tmp13 to float ; <float> [#uses=1]
- %tmp16 = tail call %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc( %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZSt4cout, i8* getelementptr ([12 x i8]* @.str, i32 0, i32 0) ) ; <%"struct.std::basic_ostream<char,std::char_traits<char> >"*> [#uses=1]
- %tmp1920 = fpext float %tmp9 to double ; <double> [#uses=1]
- %tmp22 = tail call %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZNSolsEd( %"struct.std::basic_ostream<char,std::char_traits<char> >"* %tmp16, double %tmp1920 ) ; <%"struct.std::basic_ostream<char,std::char_traits<char> >"*> [#uses=1]
- %tmp30 = tail call %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_( %"struct.std::basic_ostream<char,std::char_traits<char> >"* %tmp22 ) ; <%"struct.std::basic_ostream<char,std::char_traits<char> >"*> [#uses=0]
- %tmp34 = tail call %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc( %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZSt4cout, i8* getelementptr ([13 x i8]* @.str1, i32 0, i32 0) ) ; <%"struct.std::basic_ostream<char,std::char_traits<char> >"*> [#uses=1]
- %tmp3940 = fpext float %tmp1314 to double ; <double> [#uses=1]
- %tmp42 = tail call %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZNSolsEd( %"struct.std::basic_ostream<char,std::char_traits<char> >"* %tmp34, double %tmp3940 ) ; <%"struct.std::basic_ostream<char,std::char_traits<char> >"*> [#uses=1]
- %tmp51 = tail call %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_( %"struct.std::basic_ostream<char,std::char_traits<char> >"* %tmp42 ) ; <%"struct.std::basic_ostream<char,std::char_traits<char> >"*> [#uses=0]
- ret i32 0
-}
-
-declare float @tanf(float)
-
-declare double @tan(double)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, i8*)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZNSolsEd(%"struct.std::basic_ostream<char,std::char_traits<char> >"*, double)
-
-declare %"struct.std::basic_ostream<char,std::char_traits<char> >"* @_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_(%"struct.std::basic_ostream<char,std::char_traits<char> >"*)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr2177.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr2177.ll
deleted file mode 100644
index e941bf7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr2177.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s
-; PR2177
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin9.1.0"
- %struct.S2259 = type { <4 x i16>, i8, i64 }
-
-define void @check2259va(i32 %z, ...) {
-entry:
- br i1 false, label %bb5, label %return
-bb5: ; preds = %entry
- switch i32 0, label %bb155 [
- i32 16, label %bb10
- i32 17, label %bb118
- i32 18, label %bb54
- i32 32, label %bb118
- i32 33, label %bb118
- i32 36, label %bb118
- ]
-bb10: ; preds = %bb5
- ret void
-bb54: ; preds = %bb5
- ret void
-bb118: ; preds = %bb5, %bb5, %bb5, %bb5
- %tmp125 = load i8** null, align 8 ; <i8*> [#uses=1]
- %tmp125126 = bitcast i8* %tmp125 to %struct.S2259* ; <%struct.S2259*> [#uses=1]
- %tmp128 = getelementptr %struct.S2259* %tmp125126, i32 0, i32 0 ; <<4 x i16>*> [#uses=1]
- %tmp129 = load <4 x i16>* %tmp128, align 8 ; <<4 x i16>> [#uses=1]
- store <4 x i16> %tmp129, <4 x i16>* null, align 8
- ret void
-bb155: ; preds = %bb5
- ret void
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr2182.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr2182.ll
deleted file mode 100644
index f97663c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr2182.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s | grep {addl \$3, (%eax)} | count 4
-; PR2182
-
-target datalayout =
-"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
- at x = weak global i32 0 ; <i32*> [#uses=8]
-
-define void @loop_2() nounwind {
-entry:
- %tmp = volatile load i32* @x, align 4 ; <i32> [#uses=1]
- %tmp1 = add i32 %tmp, 3 ; <i32> [#uses=1]
- volatile store i32 %tmp1, i32* @x, align 4
- %tmp.1 = volatile load i32* @x, align 4 ; <i32> [#uses=1]
- %tmp1.1 = add i32 %tmp.1, 3 ; <i32> [#uses=1]
- volatile store i32 %tmp1.1, i32* @x, align 4
- %tmp.2 = volatile load i32* @x, align 4 ; <i32> [#uses=1]
- %tmp1.2 = add i32 %tmp.2, 3 ; <i32> [#uses=1]
- volatile store i32 %tmp1.2, i32* @x, align 4
- %tmp.3 = volatile load i32* @x, align 4 ; <i32> [#uses=1]
- %tmp1.3 = add i32 %tmp.3, 3 ; <i32> [#uses=1]
- volatile store i32 %tmp1.3, i32* @x, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr2326.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr2326.ll
deleted file mode 100644
index f82dcb5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr2326.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 | grep sete
-; PR2326
-
-define i32 @func_59(i32 %p_60) nounwind {
-entry:
- %l_108 = alloca i32 ; <i32*> [#uses=2]
- %tmp15 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp16 = load i32* %l_108, align 4 ; <i32> [#uses=1]
- %tmp17 = icmp eq i32 %tmp15, %tmp16 ; <i1> [#uses=1]
- %tmp1718 = zext i1 %tmp17 to i8 ; <i8> [#uses=1]
- %tmp19 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp20 = load i32* %l_108, align 4 ; <i32> [#uses=1]
- %tmp21 = icmp ule i32 %tmp19, %tmp20 ; <i1> [#uses=1]
- %tmp2122 = zext i1 %tmp21 to i8 ; <i8> [#uses=1]
- %toBool23 = icmp ne i8 %tmp1718, 0 ; <i1> [#uses=1]
- %toBool24 = icmp ne i8 %tmp2122, 0 ; <i1> [#uses=1]
- %tmp25 = and i1 %toBool23, %toBool24 ; <i1> [#uses=1]
- %tmp2526 = zext i1 %tmp25 to i8 ; <i8> [#uses=1]
- %tmp252627 = zext i8 %tmp2526 to i32 ; <i32> [#uses=1]
- %tmp29 = call i32 (...)* @func_15( i32 %tmp252627, i32 0 ) nounwind ; <i32> [#uses=0]
- unreachable
-}
-
-declare i32 @func_15(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr2623.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr2623.ll
deleted file mode 100644
index 5d0eb5d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr2623.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc < %s
-; PR2623
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-unknown-freebsd7.0"
- %.objc_id = type { %.objc_id }*
- %.objc_selector = type { i8*, i8* }*
- at .objc_sel_ptr = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
- at .objc_sel_ptr13 = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
- at .objc_sel_ptr14 = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
- at .objc_sel_ptr15 = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
- at .objc_sel_ptr16 = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
- at .objc_sel_ptr17 = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
- at .objc_sel_ptr18 = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
- at .objc_sel_ptr19 = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
- at .objc_sel_ptr20 = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
- at .objc_sel_ptr21 = external constant %.objc_selector ; <%.objc_selector*> [#uses=1]
-
- at .objc_untyped_selector_alias = alias internal %.objc_selector* @.objc_sel_ptr15 ; <%.objc_selector*> [#uses=0]
- at .objc_untyped_selector_alias1 = alias internal %.objc_selector* @.objc_sel_ptr ; <%.objc_selector*> [#uses=0]
- at .objc_untyped_selector_alias2 = alias internal %.objc_selector* @.objc_sel_ptr17 ; <%.objc_selector*> [#uses=0]
- at .objc_untyped_selector_alias3 = alias internal %.objc_selector* @.objc_sel_ptr16 ; <%.objc_selector*> [#uses=0]
- at .objc_untyped_selector_alias4 = alias internal %.objc_selector* @.objc_sel_ptr13 ; <%.objc_selector*> [#uses=0]
- at .objc_untyped_selector_alias7 = alias internal %.objc_selector* @.objc_sel_ptr14 ; <%.objc_selector*> [#uses=0]
- at getRange = alias internal %.objc_selector* @.objc_sel_ptr18 ; <%.objc_selector*> [#uses=0]
-@"valueWithRange:" = alias internal %.objc_selector* @.objc_sel_ptr21 ; <%.objc_selector*> [#uses=0]
- at rangeValue = alias internal %.objc_selector* @.objc_sel_ptr20 ; <%.objc_selector*> [#uses=0]
-@"printRange:" = alias internal %.objc_selector* @.objc_sel_ptr19 ; <%.objc_selector*> [#uses=0]
-
-define void @"._objc_method_SmalltalkTool()-run"(i8* %self, %.objc_selector %_cmd) {
-entry:
- br i1 false, label %small_int_messagerangeValue, label %real_object_messagerangeValue
-
-small_int_messagerangeValue: ; preds = %entry
- br label %Continue
-
-real_object_messagerangeValue: ; preds = %entry
- br label %Continue
-
-Continue: ; preds = %real_object_messagerangeValue, %small_int_messagerangeValue
- %rangeValue = phi { i32, i32 } [ undef, %small_int_messagerangeValue ], [ undef, %real_object_messagerangeValue ] ; <{ i32, i32 }> [#uses=1]
- call void (%.objc_id, %.objc_selector, ...)* null( %.objc_id null, %.objc_selector null, { i32, i32 } %rangeValue )
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr2656.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr2656.ll
deleted file mode 100644
index afd7114..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr2656.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {xorps.\*sp} | count 1
-; PR2656
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin9.4.0"
- %struct.anon = type <{ float, float }>
- at .str = internal constant [17 x i8] c"pt: %.0f, %.0f\0A\00\00" ; <[17 x i8]*> [#uses=1]
-
-define void @foo(%struct.anon* byval %p) nounwind {
-entry:
- %tmp = getelementptr %struct.anon* %p, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp1 = load float* %tmp ; <float> [#uses=1]
- %tmp2 = getelementptr %struct.anon* %p, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp3 = load float* %tmp2 ; <float> [#uses=1]
- %neg = fsub float -0.000000e+00, %tmp1 ; <float> [#uses=1]
- %conv = fpext float %neg to double ; <double> [#uses=1]
- %neg4 = fsub float -0.000000e+00, %tmp3 ; <float> [#uses=1]
- %conv5 = fpext float %neg4 to double ; <double> [#uses=1]
- %call = call i32 (...)* @printf( i8* getelementptr ([17 x i8]* @.str, i32 0, i32 0), double %conv, double %conv5 ) ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @printf(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr2659.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr2659.ll
deleted file mode 100644
index 0760e4c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr2659.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin9.4.0 | grep movl | count 5
-; PR2659
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin9.4.0"
-
-define i32 @binomial(i32 %n, i32 %k) nounwind {
-entry:
- %cmp = icmp ugt i32 %k, %n ; <i1> [#uses=1]
- br i1 %cmp, label %ifthen, label %forcond.preheader
-
-forcond.preheader: ; preds = %entry
- %cmp44 = icmp eq i32 %k, 0 ; <i1> [#uses=1]
- br i1 %cmp44, label %afterfor, label %forbody
-
-ifthen: ; preds = %entry
- ret i32 0
-
-forbody: ; preds = %forbody, %forcond.preheader
- %indvar = phi i32 [ 0, %forcond.preheader ], [ %divisor.02, %forbody ] ; <i32> [#uses=3]
- %accumulator.01 = phi i32 [ 1, %forcond.preheader ], [ %div, %forbody ] ; <i32> [#uses=1]
- %divisor.02 = add i32 %indvar, 1 ; <i32> [#uses=2]
- %n.addr.03 = sub i32 %n, %indvar ; <i32> [#uses=1]
- %mul = mul i32 %n.addr.03, %accumulator.01 ; <i32> [#uses=1]
- %div = udiv i32 %mul, %divisor.02 ; <i32> [#uses=2]
- %inc = add i32 %indvar, 2 ; <i32> [#uses=1]
- %cmp4 = icmp ugt i32 %inc, %k ; <i1> [#uses=1]
- br i1 %cmp4, label %afterfor, label %forbody
-
-afterfor: ; preds = %forbody, %forcond.preheader
- %accumulator.0.lcssa = phi i32 [ 1, %forcond.preheader ], [ %div, %forbody ] ; <i32> [#uses=1]
- ret i32 %accumulator.0.lcssa
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr2849.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr2849.ll
deleted file mode 100644
index 0fec481..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr2849.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s
-; PR2849
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- %struct.BaseBoundPtrs = type { i8*, i8* }
- %struct.HashEntry = type { %struct.BaseBoundPtrs }
- %struct.NODE = type { i8, i8, %struct.anon }
- %struct.anon = type { %struct.xlist }
- %struct.xlist = type { %struct.NODE*, %struct.NODE* }
- %struct.xvect = type { %struct.NODE** }
- at hash_table_begin = external global %struct.HashEntry*
-
-define void @obshow() {
-entry:
- %tmp = load %struct.HashEntry** @hash_table_begin, align 8
- br i1 false, label %xlygetvalue.exit, label %xlygetvalue.exit
-
-xlygetvalue.exit:
- %storemerge.in.i = phi %struct.NODE** [ null, %entry ], [ null, %entry ]
- %storemerge.i = load %struct.NODE** %storemerge.in.i
- %tmp1 = ptrtoint %struct.NODE** %storemerge.in.i to i64
- %tmp2 = lshr i64 %tmp1, 3
- %tmp3 = and i64 %tmp2, 2147483647
- %tmp4 = getelementptr %struct.HashEntry* %tmp, i64 %tmp3, i32 0, i32 1
- %tmp7 = load i8** %tmp4, align 8
- %tmp8 = getelementptr %struct.NODE* %storemerge.i, i64 0, i32 2
- %tmp9 = bitcast %struct.anon* %tmp8 to %struct.NODE***
- %tmp11 = load %struct.NODE*** %tmp9, align 8
- %tmp12 = ptrtoint %struct.NODE** %tmp11 to i64
- %tmp13 = lshr i64 %tmp12, 3
- %tmp14 = and i64 %tmp13, 2147483647
- %tmp15 = getelementptr %struct.HashEntry* %tmp, i64 %tmp14, i32 0, i32 1
- call fastcc void @xlprint(i8** %tmp4, i8* %tmp7, i8** %tmp15)
- ret void
-}
-
-declare fastcc void @xlprint(i8**, i8*, i8**)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr2924.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr2924.ll
deleted file mode 100644
index b9e8dc1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr2924.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s
-; PR2924
-
-target datalayout =
-"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
-
-define x86_stdcallcc { i32, i8* } @_D3std6string7toupperFAaZAa({ i32, i8* } %s) {
-entry_std.string.toupper:
- %tmp58 = load i32* null
- %tmp59 = icmp eq i32 %tmp58, 0
- %r.val = load { i32, i8* }* null, align 8
- %condtmp.0 = select i1 %tmp59, { i32, i8* } undef, { i32, i8* } %r.val
-
- ret { i32, i8* } %condtmp.0
-}
-define { } @empty({ } %s) {
-entry_std.string.toupper:
- %tmp58 = load i32* null
- %tmp59 = icmp eq i32 %tmp58, 0
- %r.val = load { }* null, align 8
- %condtmp.0 = select i1 %tmp59, { } undef, { } %r.val
- ret { } %condtmp.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr2982.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr2982.ll
deleted file mode 100644
index 3f9a595..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr2982.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR2982
-
-target datalayout =
-"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.5"
- at g_279 = external global i32 ; <i32*> [#uses=1]
- at g_265 = external global i32 ; <i32*> [#uses=1]
- at g_3 = external global i8 ; <i8*> [#uses=1]
-
-declare i32 @rshift_u_u(...)
-
-define void @bar() nounwind {
-entry:
- %0 = load i32* @g_279, align 4 ; <i32> [#uses=1]
- %1 = shl i32 %0, 1 ; <i32> [#uses=1]
- %2 = and i32 %1, 2 ; <i32> [#uses=1]
- %3 = load i32* @g_265, align 4 ; <i32> [#uses=1]
- %4 = load i8* @g_3, align 1 ; <i8> [#uses=1]
- %5 = sext i8 %4 to i32 ; <i32> [#uses=1]
- %6 = add i32 %2, %3 ; <i32> [#uses=1]
- %7 = add i32 %6, %5 ; <i32> [#uses=1]
- %8 = tail call i32 (...)* @rshift_u_u(i32 %7, i32 0) nounwind
-; <i32> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3154.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3154.ll
deleted file mode 100644
index 18df97c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3154.ll
+++ /dev/null
@@ -1,104 +0,0 @@
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu -mattr=+sse2
-; RUN: llc < %s -mtriple=i386-pc-linux-gnu -mattr=+sse2 -relocation-model=pic -disable-fp-elim
-; PR3154
-
-define void @ff_flac_compute_autocorr_sse2(i32* %data, i32 %len, i32 %lag, double* %autoc) nounwind {
-entry:
- %c = alloca double, align 8 ; <double*> [#uses=2]
- %0 = add i32 %len, 2 ; <i32> [#uses=1]
- %1 = add i32 %0, %lag ; <i32> [#uses=1]
- %2 = alloca double, i32 %1 ; <double*> [#uses=2]
- %3 = getelementptr double* %2, i32 %lag ; <double*> [#uses=2]
- %4 = ptrtoint double* %3 to i32 ; <i32> [#uses=1]
- %5 = and i32 %4, 8 ; <i32> [#uses=1]
- %6 = icmp eq i32 %5, 0 ; <i1> [#uses=1]
- br i1 %6, label %bb19, label %bb
-
-bb: ; preds = %entry
- %.sum = add i32 %lag, 1 ; <i32> [#uses=1]
- %7 = getelementptr double* %2, i32 %.sum ; <double*> [#uses=1]
- br label %bb19
-
-bb19: ; preds = %bb, %entry
- %data15.0 = phi double* [ %7, %bb ], [ %3, %entry ] ; <double*> [#uses=5]
- %8 = sitofp i32 %len to double ; <double> [#uses=1]
- %9 = fsub double %8, 1.000000e+00 ; <double> [#uses=1]
- %10 = fdiv double 2.000000e+00, %9 ; <double> [#uses=1]
- store double %10, double* %c, align 8
- %11 = ashr i32 %len, 1 ; <i32> [#uses=3]
- %12 = mul i32 %11, -4 ; <i32> [#uses=2]
- %13 = shl i32 %len, 1 ; <i32> [#uses=1]
- %14 = and i32 %13, -4 ; <i32> [#uses=2]
- call void asm sideeffect "movsd $0, %xmm7 \0A\09movapd ff_pd_1, %xmm6 \0A\09movapd ff_pd_2, %xmm5 \0A\09movlhps %xmm7, %xmm7 \0A\09subpd %xmm5, %xmm7 \0A\09addsd %xmm6, %xmm7 \0A\09", "*m,~{dirflag},~{fpsr},~{flags}"(double* %c) nounwind
- %15 = and i32 %len, 1 ; <i32> [#uses=1]
- %toBool = icmp eq i32 %15, 0 ; <i1> [#uses=1]
- %16 = getelementptr double* %data15.0, i32 %11 ; <double*> [#uses=2]
- %17 = getelementptr i32* %data, i32 %11 ; <i32*> [#uses=2]
- br i1 %toBool, label %bb22, label %bb20
-
-bb20: ; preds = %bb19
- %asmtmp = call { i32, i32 } asm sideeffect "1: \0A\09movapd %xmm7, %xmm1 \0A\09mulpd %xmm1, %xmm1 \0A\09movapd %xmm6, %xmm0 \0A\09subpd %xmm1, %xmm0 \0A\09pshufd $$0x4e, %xmm0, %xmm1 \0A\09cvtpi2pd ($3,$0), %xmm2 \0A\09cvtpi2pd -1*4($3,$1), %xmm3 \0A\09mulpd %xmm0, %xmm2 \0A\09mulpd %xmm1, %xmm3 \0A\09movapd %xmm2, ($2,$0,2) \0A\09movupd %xmm3, -1*8($2,$1,2) \0A\09subpd %xmm5, %xmm7 \0A\09sub $$8, $1 \0A\09add $$8, $0 \0A\09jl 1b \0A\09", "=&r,=&r,r,r,0,1,~{dirflag},~{fpsr},~{flags}"(double* %16, i32* %17, i32 %12, i32 %14) nounwind ; <{ i32, i32 }> [#uses=0]
- br label %bb28.preheader
-
-bb22: ; preds = %bb19
- %asmtmp23 = call { i32, i32 } asm sideeffect "1: \0A\09movapd %xmm7, %xmm1 \0A\09mulpd %xmm1, %xmm1 \0A\09movapd %xmm6, %xmm0 \0A\09subpd %xmm1, %xmm0 \0A\09pshufd $$0x4e, %xmm0, %xmm1 \0A\09cvtpi2pd ($3,$0), %xmm2 \0A\09cvtpi2pd -2*4($3,$1), %xmm3 \0A\09mulpd %xmm0, %xmm2 \0A\09mulpd %xmm1, %xmm3 \0A\09movapd %xmm2, ($2,$0,2) \0A\09movapd %xmm3, -2*8($2,$1,2) \0A\09subpd %xmm5, %xmm7 \0A\09sub $$8, $1 \0A\09add $$8, $0 \0A\09jl 1b \0A\09", "=&r,=&r,r,r,0,1,~{dirflag},~{fpsr},~{flags}"(double* %16, i32* %17, i32 %12, i32 %14) nounwind ; <{ i32, i32 }> [#uses=0]
- br label %bb28.preheader
-
-bb28.preheader: ; preds = %bb22, %bb20
- %18 = icmp sgt i32 %lag, 0 ; <i1> [#uses=2]
- br i1 %18, label %bb27, label %bb29
-
-bb27: ; preds = %bb27, %bb28.preheader
- %j4.042 = phi i32 [ 0, %bb28.preheader ], [ %indvar.next45, %bb27 ] ; <i32> [#uses=2]
- %19 = sub i32 %j4.042, %lag ; <i32> [#uses=1]
- %20 = getelementptr double* %data15.0, i32 %19 ; <double*> [#uses=1]
- store double 0.000000e+00, double* %20, align 8
- %indvar.next45 = add i32 %j4.042, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next45, %lag ; <i1> [#uses=1]
- br i1 %exitcond, label %bb29, label %bb27
-
-bb29: ; preds = %bb27, %bb28.preheader
- %21 = getelementptr double* %data15.0, i32 %len ; <double*> [#uses=3]
- store double 0.000000e+00, double* %21, align 8
- br i1 %18, label %bb.nph, label %bb37
-
-bb.nph: ; preds = %bb29
- %22 = mul i32 %len, -8 ; <i32> [#uses=2]
- %23 = add i32 %lag, -2 ; <i32> [#uses=1]
- br label %bb30
-
-bb30: ; preds = %bb35, %bb.nph
- %indvar = phi i32 [ 0, %bb.nph ], [ %indvar.next, %bb35 ] ; <i32> [#uses=2]
- %j4.141 = shl i32 %indvar, 1 ; <i32> [#uses=8]
- %24 = icmp eq i32 %23, %j4.141 ; <i1> [#uses=1]
- %25 = or i32 %j4.141, 1 ; <i32> [#uses=2]
- br i1 %24, label %bb31, label %bb33
-
-bb31: ; preds = %bb30
- %26 = add i32 %j4.141, 2 ; <i32> [#uses=2]
- %.sum38 = sub i32 %len, %j4.141 ; <i32> [#uses=1]
- %27 = getelementptr double* %data15.0, i32 %.sum38 ; <double*> [#uses=1]
- %28 = getelementptr double* %autoc, i32 %j4.141 ; <double*> [#uses=1]
- %29 = getelementptr double* %autoc, i32 %25 ; <double*> [#uses=1]
- %30 = getelementptr double* %autoc, i32 %26 ; <double*> [#uses=1]
- %asmtmp32 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\09movsd ff_pd_1, %xmm2 \0A\091: \0A\09movapd ($4,$0), %xmm3 \0A\09movupd -8($5,$0), %xmm4 \0A\09movapd ($5,$0), %xmm5 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd %xmm3, %xmm5 \0A\09mulpd -16($5,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm5, %xmm0 \0A\09addpd %xmm3, %xmm2 \0A\09add $$16, $0 \0A\09jl 1b \0A\09movhlps %xmm0, %xmm3 \0A\09movhlps %xmm1, %xmm4 \0A\09movhlps %xmm2, %xmm5 \0A\09addsd %xmm3, %xmm0 \0A\09addsd %xmm4, %xmm1 \0A\09addsd %xmm5, %xmm2 \0A\09movsd %xmm0, $1 \0A\09movsd %xmm1, $2 \0A\09movsd %xmm2, $3 \0A\09", "=&r,=*m,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* %28, double* %29, double* %30, double* %21, double* %27, i32 %22) nounwind ; <i32> [#uses=0]
- br label %bb35
-
-bb33: ; preds = %bb30
- %.sum39 = sub i32 %len, %j4.141 ; <i32> [#uses=1]
- %31 = getelementptr double* %data15.0, i32 %.sum39 ; <double*> [#uses=1]
- %32 = getelementptr double* %autoc, i32 %j4.141 ; <double*> [#uses=1]
- %33 = getelementptr double* %autoc, i32 %25 ; <double*> [#uses=1]
- %asmtmp34 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\091: \0A\09movapd ($3,$0), %xmm3 \0A\09movupd -8($4,$0), %xmm4 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd ($4,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm3, %xmm0 \0A\09add $$16, $0 \0A\09jl 1b \0A\09movhlps %xmm0, %xmm3 \0A\09movhlps %xmm1, %xmm4 \0A\09addsd %xmm3, %xmm0 \0A\09addsd %xmm4, %xmm1 \0A\09movsd %xmm0, $1 \0A\09movsd %xmm1, $2 \0A\09", "=&r,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* %32, double* %33, double* %21, double* %31, i32 %22) nounwind ; <i32> [#uses=0]
- %.pre = add i32 %j4.141, 2 ; <i32> [#uses=1]
- br label %bb35
-
-bb35: ; preds = %bb33, %bb31
- %.pre-phi = phi i32 [ %.pre, %bb33 ], [ %26, %bb31 ] ; <i32> [#uses=1]
- %34 = icmp slt i32 %.pre-phi, %lag ; <i1> [#uses=1]
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br i1 %34, label %bb30, label %bb37
-
-bb37: ; preds = %bb35, %bb29
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3216.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3216.ll
deleted file mode 100644
index 38c9f32..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3216.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {sar. \$5}
-
- at foo = global i8 127
-
-define i32 @main() nounwind {
-entry:
- %tmp = load i8* @foo
- %bf.lo = lshr i8 %tmp, 5
- %bf.lo.cleared = and i8 %bf.lo, 7
- %0 = shl i8 %bf.lo.cleared, 5
- %bf.val.sext = ashr i8 %0, 5
- %conv = sext i8 %bf.val.sext to i32
- ret i32 %conv
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3241.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3241.ll
deleted file mode 100644
index 2f7917b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3241.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3241
-
- at g_620 = external global i32
-
-define void @func_18(i32 %p_21) nounwind {
-entry:
- %t0 = call i32 @func_31(i32 %p_21) nounwind
- %t1 = call i32 @safe_add_macro_uint32_t_u_u() nounwind
- %t2 = icmp sgt i32 %t1, 0
- %t3 = zext i1 %t2 to i32
- %t4 = load i32* @g_620, align 4
- %t5 = icmp eq i32 %t3, %t4
- %t6 = xor i32 %p_21, 1
- %t7 = call i32 @func_55(i32 %t6) nounwind
- br i1 %t5, label %return, label %bb
-
-bb:
- unreachable
-
-return:
- unreachable
-}
-
-declare i32 @func_31(i32)
-
-declare i32 @safe_add_macro_uint32_t_u_u()
-
-declare i32 @func_55(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3243.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3243.ll
deleted file mode 100644
index 483b5bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3243.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3243
-
-declare signext i16 @safe_mul_func_int16_t_s_s(i16 signext, i32) nounwind readnone optsize
-
-define i32 @func_120(i32 %p_121) nounwind optsize {
-entry:
- %0 = trunc i32 %p_121 to i16 ; <i16> [#uses=1]
- %1 = urem i16 %0, -15461 ; <i16> [#uses=1]
- %phitmp1 = trunc i16 %1 to i8 ; <i8> [#uses=1]
- %phitmp2 = urem i8 %phitmp1, -1 ; <i8> [#uses=1]
- %phitmp3 = zext i8 %phitmp2 to i16 ; <i16> [#uses=1]
- %2 = tail call signext i16 @safe_mul_func_int16_t_s_s(i16 signext %phitmp3, i32 1) nounwind ; <i16> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3244.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3244.ll
deleted file mode 100644
index 2598c2f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3244.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3244
-
- at g_62 = external global i16 ; <i16*> [#uses=1]
- at g_487 = external global i32 ; <i32*> [#uses=1]
-
-define i32 @func_42(i32 %p_43, i32 %p_44, i32 %p_45, i32 %p_46) nounwind {
-entry:
- %0 = load i16* @g_62, align 2 ; <i16> [#uses=1]
- %1 = load i32* @g_487, align 4 ; <i32> [#uses=1]
- %2 = trunc i16 %0 to i8 ; <i8> [#uses=1]
- %3 = trunc i32 %1 to i8 ; <i8> [#uses=1]
- %4 = tail call i32 (...)* @func_7(i64 -4455561449541442965, i32 1)
-nounwind ; <i32> [#uses=1]
- %5 = trunc i32 %4 to i8 ; <i8> [#uses=1]
- %6 = mul i8 %3, %2 ; <i8> [#uses=1]
- %7 = mul i8 %6, %5 ; <i8> [#uses=1]
- %8 = sext i8 %7 to i16 ; <i16> [#uses=1]
- %9 = tail call i32 @func_85(i16 signext %8, i32 1, i32 1) nounwind
- ; <i32> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @func_7(...)
-
-declare i32 @func_85(i16 signext, i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3250.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3250.ll
deleted file mode 100644
index cccbf54..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3250.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3250
-
-declare i32 @safe_sub_func_short_u_u(i16 signext, i16 signext) nounwind
-
-define i32 @func_106(i32 %p_107) nounwind {
-entry:
- %0 = tail call i32 (...)* @safe_div_(i32 %p_107, i32 1) nounwind
- ; <i32> [#uses=1]
- %1 = lshr i32 %0, -9 ; <i32> [#uses=1]
- %2 = trunc i32 %1 to i16 ; <i16> [#uses=1]
- %3 = tail call i32 @safe_sub_func_short_u_u(i16 signext 1, i16 signext
-%2) nounwind ; <i32> [#uses=0]
- ret i32 undef
-}
-
-declare i32 @safe_div_(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3317.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3317.ll
deleted file mode 100644
index 9d6626b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3317.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: llc < %s -march=x86
-; PR3317
-
- %ArraySInt16 = type { %JavaObject, i8*, [0 x i16] }
- %ArraySInt8 = type { %JavaObject, i8*, [0 x i8] }
- %Attribut = type { %ArraySInt16*, i32, i32 }
- %CacheNode = type { i8*, %JavaCommonClass*, %CacheNode*, %Enveloppe* }
- %Enveloppe = type { %CacheNode*, %ArraySInt16*, %ArraySInt16*, i8, %JavaClass*, %CacheNode }
- %JavaArray = type { %JavaObject, i8* }
- %JavaClass = type { %JavaCommonClass, i32, %VT*, [1 x %TaskClassMirror], i8*, %JavaField*, i16, %JavaField*, i16, %JavaMethod*, i16, %JavaMethod*, i16, i8*, %ArraySInt8*, i8*, %Attribut*, i16, %JavaClass**, i16, %JavaClass*, i16, i8, i32, i32, i8*, void (i8*)* }
- %JavaCommonClass = type { %JavaCommonClass**, i32, [1 x %JavaObject*], i16, %JavaClass**, i16, %ArraySInt16*, %JavaClass*, i8* }
- %JavaField = type { i8*, i16, %ArraySInt16*, %ArraySInt16*, %Attribut*, i16, %JavaClass*, i32, i16, i8* }
- %JavaMethod = type { i8*, i16, %Attribut*, i16, %Enveloppe*, i16, %JavaClass*, %ArraySInt16*, %ArraySInt16*, i8, i8*, i32, i8* }
- %JavaObject = type { %VT*, %JavaCommonClass*, i8* }
- %TaskClassMirror = type { i32, i8* }
- %UTF8 = type { %JavaObject, i8*, [0 x i16] }
- %VT = type [0 x i32 (...)*]
-
-declare void @jnjvmNullPointerException()
-
-define i32 @JnJVM_java_rmi_activation_ActivationGroupID_hashCode__(%JavaObject* nocapture) nounwind {
-start:
- %1 = getelementptr %JavaObject* %0, i64 1, i32 1 ; <%JavaCommonClass**> [#uses=1]
- %2 = load %JavaCommonClass** %1 ; <%JavaCommonClass*> [#uses=4]
- %3 = icmp eq %JavaCommonClass* %2, null ; <i1> [#uses=1]
- br i1 %3, label %verifyNullExit1, label %verifyNullCont2
-
-verifyNullExit1: ; preds = %start
- tail call void @jnjvmNullPointerException()
- unreachable
-
-verifyNullCont2: ; preds = %start
- %4 = bitcast %JavaCommonClass* %2 to { %JavaObject, i16, i32, i64 }* ; <{ %JavaObject, i16, i32, i64 }*> [#uses=1]
- %5 = getelementptr { %JavaObject, i16, i32, i64 }* %4, i64 0, i32 2 ; <i32*> [#uses=1]
- %6 = load i32* %5 ; <i32> [#uses=1]
- %7 = getelementptr %JavaCommonClass* %2, i64 0, i32 4 ; <%JavaClass***> [#uses=1]
- %8 = bitcast %JavaClass*** %7 to i64* ; <i64*> [#uses=1]
- %9 = load i64* %8 ; <i64> [#uses=1]
- %10 = trunc i64 %9 to i32 ; <i32> [#uses=1]
- %11 = getelementptr %JavaCommonClass* %2, i64 0, i32 3 ; <i16*> [#uses=1]
- %12 = load i16* %11 ; <i16> [#uses=1]
- %13 = sext i16 %12 to i32 ; <i32> [#uses=1]
- %14 = xor i32 %10, %6 ; <i32> [#uses=1]
- %15 = xor i32 %14, %13 ; <i32> [#uses=1]
- ret i32 %15
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3366.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3366.ll
deleted file mode 100644
index f813e2e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3366.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 | grep movzbl
-; PR3366
-
-define void @_ada_c34002a() nounwind {
-entry:
- %0 = load i8* null, align 1
- %1 = sdiv i8 90, %0
- %2 = icmp ne i8 %1, 3
- %3 = zext i1 %2 to i8
- %toBool449 = icmp ne i8 %3, 0
- %4 = or i1 false, %toBool449
- %5 = zext i1 %4 to i8
- %toBool450 = icmp ne i8 %5, 0
- br i1 %toBool450, label %bb451, label %bb457
-
-bb451:
- br label %bb457
-
-bb457:
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3457.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3457.ll
deleted file mode 100644
index f7af927..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3457.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin | not grep fstpt
-; PR3457
-; rdar://6548010
-
-define void @foo(double* nocapture %P) nounwind {
-entry:
- %0 = tail call double (...)* @test() nounwind ; <double> [#uses=2]
- %1 = tail call double (...)* @test() nounwind ; <double> [#uses=2]
- %2 = fmul double %0, %0 ; <double> [#uses=1]
- %3 = fmul double %1, %1 ; <double> [#uses=1]
- %4 = fadd double %2, %3 ; <double> [#uses=1]
- store double %4, double* %P, align 8
- ret void
-}
-
-declare double @test(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3495-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3495-2.ll
deleted file mode 100644
index 98c064a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3495-2.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=pic -disable-fp-elim -stats |& grep {Number of loads added} | grep 1
-; PR3495
-;
-; This test may not be testing what it was supposed to test.
-; It used to have two spills and four reloads, but not it only has one spill and one reload.
-
-target datalayout = "e-p:32:32:32"
-target triple = "i386-apple-darwin9.6"
- %struct.constraintVCGType = type { i32, i32, i32, i32 }
- %struct.nodeVCGType = type { %struct.constraintVCGType*, i32, i32, i32, %struct.constraintVCGType*, i32, i32, i32 }
-
-define fastcc void @SCC_DFSBelowVCG(%struct.nodeVCGType* %VCG, i32 %net, i32 %label) nounwind {
-entry:
- %0 = getelementptr %struct.nodeVCGType* %VCG, i32 %net, i32 5 ; <i32*> [#uses=2]
- %1 = load i32* %0, align 4 ; <i32> [#uses=1]
- %2 = icmp eq i32 %1, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb5, label %bb.nph3
-
-bb.nph3: ; preds = %entry
- %3 = getelementptr %struct.nodeVCGType* %VCG, i32 %net, i32 4 ; <%struct.constraintVCGType**> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb3, %bb.nph3
- %s.02 = phi i32 [ 0, %bb.nph3 ], [ %12, %bb3 ] ; <i32> [#uses=2]
- %4 = load %struct.constraintVCGType** %3, align 4 ; <%struct.constraintVCGType*> [#uses=1]
- %5 = icmp eq i32 0, 0 ; <i1> [#uses=1]
- br i1 %5, label %bb1, label %bb3
-
-bb1: ; preds = %bb
- %6 = getelementptr %struct.constraintVCGType* %4, i32 %s.02, i32 0 ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=2]
- %8 = getelementptr %struct.nodeVCGType* %VCG, i32 %7, i32 7 ; <i32*> [#uses=1]
- %9 = load i32* %8, align 4 ; <i32> [#uses=1]
- %10 = icmp eq i32 %9, 0 ; <i1> [#uses=1]
- br i1 %10, label %bb2, label %bb3
-
-bb2: ; preds = %bb1
- %11 = getelementptr %struct.nodeVCGType* %VCG, i32 %7, i32 4 ; <%struct.constraintVCGType**> [#uses=0]
- br label %bb.i
-
-bb.i: ; preds = %bb.i, %bb2
- br label %bb.i
-
-bb3: ; preds = %bb1, %bb
- %12 = add i32 %s.02, 1 ; <i32> [#uses=2]
- %13 = load i32* %0, align 4 ; <i32> [#uses=1]
- %14 = icmp ugt i32 %13, %12 ; <i1> [#uses=1]
- br i1 %14, label %bb, label %bb5
-
-bb5: ; preds = %bb3, %entry
- %15 = getelementptr %struct.nodeVCGType* %VCG, i32 %net, i32 6 ; <i32*> [#uses=1]
- store i32 %label, i32* %15, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3495.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3495.ll
deleted file mode 100644
index e84a84f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3495.ll
+++ /dev/null
@@ -1,79 +0,0 @@
-; RUN: llc < %s -march=x86 -stats |& grep {Number of loads added} | grep 2
-; RUN: llc < %s -march=x86 -stats |& grep {Number of register spills} | grep 1
-; RUN: llc < %s -march=x86 -stats |& grep {Number of machine instrs printed} | grep 34
-; PR3495
-
-target triple = "i386-pc-linux-gnu"
- at x = external global [8 x i32], align 32 ; <[8 x i32]*> [#uses=1]
- at rows = external global [8 x i32], align 32 ; <[8 x i32]*> [#uses=2]
- at up = external global [15 x i32], align 32 ; <[15 x i32]*> [#uses=2]
- at down = external global [15 x i32], align 32 ; <[15 x i32]*> [#uses=1]
-
-define i32 @queens(i32 %c) nounwind {
-entry:
- %tmp91 = add i32 %c, 1 ; <i32> [#uses=3]
- %tmp135 = getelementptr [8 x i32]* @x, i32 0, i32 %tmp91 ; <i32*> [#uses=1]
- br label %bb
-
-bb: ; preds = %bb569, %entry
- %r25.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %indvar.next715, %bb569 ] ; <i32> [#uses=4]
- %tmp27 = getelementptr [8 x i32]* @rows, i32 0, i32 %r25.0.reg2mem.0 ; <i32*> [#uses=1]
- %tmp28 = load i32* %tmp27, align 4 ; <i32> [#uses=1]
- %tmp29 = icmp eq i32 %tmp28, 0 ; <i1> [#uses=1]
- br i1 %tmp29, label %bb569, label %bb31
-
-bb31: ; preds = %bb
- %tmp35 = sub i32 %r25.0.reg2mem.0, 0 ; <i32> [#uses=1]
- %tmp36 = getelementptr [15 x i32]* @up, i32 0, i32 %tmp35 ; <i32*> [#uses=1]
- %tmp37 = load i32* %tmp36, align 4 ; <i32> [#uses=1]
- %tmp38 = icmp eq i32 %tmp37, 0 ; <i1> [#uses=1]
- br i1 %tmp38, label %bb569, label %bb41
-
-bb41: ; preds = %bb31
- %tmp54 = sub i32 %r25.0.reg2mem.0, %c ; <i32> [#uses=1]
- %tmp55 = add i32 %tmp54, 7 ; <i32> [#uses=1]
- %tmp62 = getelementptr [15 x i32]* @up, i32 0, i32 %tmp55 ; <i32*> [#uses=2]
- store i32 0, i32* %tmp62, align 4
- br label %bb92
-
-bb92: ; preds = %bb545, %bb41
- %r20.0.reg2mem.0 = phi i32 [ 0, %bb41 ], [ %indvar.next711, %bb545 ] ; <i32> [#uses=5]
- %tmp94 = getelementptr [8 x i32]* @rows, i32 0, i32 %r20.0.reg2mem.0 ; <i32*> [#uses=1]
- %tmp95 = load i32* %tmp94, align 4 ; <i32> [#uses=0]
- %tmp112 = add i32 %r20.0.reg2mem.0, %tmp91 ; <i32> [#uses=1]
- %tmp113 = getelementptr [15 x i32]* @down, i32 0, i32 %tmp112 ; <i32*> [#uses=2]
- %tmp114 = load i32* %tmp113, align 4 ; <i32> [#uses=1]
- %tmp115 = icmp eq i32 %tmp114, 0 ; <i1> [#uses=1]
- br i1 %tmp115, label %bb545, label %bb118
-
-bb118: ; preds = %bb92
- %tmp122 = sub i32 %r20.0.reg2mem.0, %tmp91 ; <i32> [#uses=0]
- store i32 0, i32* %tmp113, align 4
- store i32 %r20.0.reg2mem.0, i32* %tmp135, align 4
- br label %bb142
-
-bb142: ; preds = %bb142, %bb118
- %k18.0.reg2mem.0 = phi i32 [ 0, %bb118 ], [ %indvar.next709, %bb142 ] ; <i32> [#uses=1]
- %indvar.next709 = add i32 %k18.0.reg2mem.0, 1 ; <i32> [#uses=2]
- %exitcond710 = icmp eq i32 %indvar.next709, 8 ; <i1> [#uses=1]
- br i1 %exitcond710, label %bb155, label %bb142
-
-bb155: ; preds = %bb142
- %tmp156 = tail call i32 @putchar(i32 10) nounwind ; <i32> [#uses=0]
- br label %bb545
-
-bb545: ; preds = %bb155, %bb92
- %indvar.next711 = add i32 %r20.0.reg2mem.0, 1 ; <i32> [#uses=2]
- %exitcond712 = icmp eq i32 %indvar.next711, 8 ; <i1> [#uses=1]
- br i1 %exitcond712, label %bb553, label %bb92
-
-bb553: ; preds = %bb545
- store i32 1, i32* %tmp62, align 4
- br label %bb569
-
-bb569: ; preds = %bb553, %bb31, %bb
- %indvar.next715 = add i32 %r25.0.reg2mem.0, 1 ; <i32> [#uses=1]
- br label %bb
-}
-
-declare i32 @putchar(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pr3522.ll b/libclamav/c++/llvm/test/CodeGen/X86/pr3522.ll
deleted file mode 100644
index 7cdeaa0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pr3522.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86 -stats |& not grep machine-sink
-; PR3522
-
-target triple = "i386-pc-linux-gnu"
- at .str = external constant [13 x i8] ; <[13 x i8]*> [#uses=1]
-
-define void @_ada_c34018a() {
-entry:
- %0 = tail call i32 @report__ident_int(i32 90) ; <i32> [#uses=1]
- %1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
- invoke void @__gnat_rcheck_12(i8* getelementptr ([13 x i8]* @.str, i32 0, i32 0), i32 32) noreturn
- to label %invcont unwind label %lpad
-
-invcont: ; preds = %entry
- unreachable
-
-bb22: ; preds = %lpad
- ret void
-
-return: ; preds = %lpad
- ret void
-
-lpad: ; preds = %entry
- %2 = icmp eq i8 %1, 90 ; <i1> [#uses=1]
- br i1 %2, label %return, label %bb22
-}
-
-declare void @__gnat_rcheck_12(i8*, i32) noreturn
-
-declare i32 @report__ident_int(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split1.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split1.ll
deleted file mode 100644
index e89b507..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split1.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split -stats |& \
-; RUN: grep {pre-alloc-split} | grep {Number of intervals split} | grep 1
-; XFAIL: *
-
-define void @test(double* %P, i32 %cond) nounwind {
-entry:
- %0 = load double* %P, align 8 ; <double> [#uses=1]
- %1 = fadd double %0, 4.000000e+00 ; <double> [#uses=2]
- %2 = icmp eq i32 %cond, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb1, label %bb
-
-bb: ; preds = %entry
- %3 = fadd double %1, 4.000000e+00 ; <double> [#uses=1]
- br label %bb1
-
-bb1: ; preds = %bb, %entry
- %A.0 = phi double [ %3, %bb ], [ %1, %entry ] ; <double> [#uses=1]
- %4 = fmul double %A.0, 4.000000e+00 ; <double> [#uses=1]
- %5 = tail call i32 (...)* @bar() nounwind ; <i32> [#uses=0]
- store double %4, double* %P, align 8
- ret void
-}
-
-declare i32 @bar(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split10.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split10.ll
deleted file mode 100644
index db039bd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split10.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split
-
-define i32 @main(i32 %argc, i8** %argv) nounwind {
-entry:
- br label %bb14.i
-
-bb14.i: ; preds = %bb14.i, %entry
- %i8.0.reg2mem.0.i = phi i32 [ 0, %entry ], [ %0, %bb14.i ] ; <i32> [#uses=1]
- %0 = add i32 %i8.0.reg2mem.0.i, 1 ; <i32> [#uses=2]
- %1 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %2 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %3 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %exitcond75.i = icmp eq i32 %0, 32 ; <i1> [#uses=1]
- br i1 %exitcond75.i, label %bb24.i, label %bb14.i
-
-bb24.i: ; preds = %bb14.i
- %4 = fdiv double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
- %5 = fdiv double %1, 0.000000e+00 ; <double> [#uses=1]
- %6 = fdiv double %2, 0.000000e+00 ; <double> [#uses=1]
- %7 = fdiv double %3, 0.000000e+00 ; <double> [#uses=1]
- br label %bb31.i
-
-bb31.i: ; preds = %bb31.i, %bb24.i
- %tmp.0.reg2mem.0.i = phi i32 [ 0, %bb24.i ], [ %indvar.next64.i, %bb31.i ] ; <i32> [#uses=1]
- %indvar.next64.i = add i32 %tmp.0.reg2mem.0.i, 1 ; <i32> [#uses=2]
- %exitcond65.i = icmp eq i32 %indvar.next64.i, 64 ; <i1> [#uses=1]
- br i1 %exitcond65.i, label %bb33.i, label %bb31.i
-
-bb33.i: ; preds = %bb31.i
- br label %bb35.preheader.i
-
-bb5.i.i: ; preds = %bb35.preheader.i
- %8 = call double @floor(double 0.000000e+00) nounwind readnone ; <double> [#uses=0]
- br label %bb7.i.i
-
-bb7.i.i: ; preds = %bb35.preheader.i, %bb5.i.i
- br label %bb35.preheader.i
-
-bb35.preheader.i: ; preds = %bb7.i.i, %bb33.i
- %9 = fsub double 0.000000e+00, %4 ; <double> [#uses=1]
- store double %9, double* null, align 8
- %10 = fsub double 0.000000e+00, %5 ; <double> [#uses=1]
- store double %10, double* null, align 8
- %11 = fsub double 0.000000e+00, %6 ; <double> [#uses=1]
- store double %11, double* null, align 8
- %12 = fsub double 0.000000e+00, %7 ; <double> [#uses=1]
- store double %12, double* null, align 8
- br i1 false, label %bb7.i.i, label %bb5.i.i
-}
-
-declare double @floor(double) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split11.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split11.ll
deleted file mode 100644
index 0a9f4e3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split11.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse2 -pre-alloc-split | FileCheck %s
-
- at .str = private constant [28 x i8] c"\0A\0ADOUBLE D = %f\0A\00", align 1 ; <[28 x i8]*> [#uses=1]
- at .str1 = private constant [37 x i8] c"double to long l1 = %ld\09\09(0x%lx)\0A\00", align 8 ; <[37 x i8]*> [#uses=1]
- at .str2 = private constant [35 x i8] c"double to uint ui1 = %u\09\09(0x%x)\0A\00", align 8 ; <[35 x i8]*> [#uses=1]
- at .str3 = private constant [37 x i8] c"double to ulong ul1 = %lu\09\09(0x%lx)\0A\00", align 8 ; <[37 x i8]*> [#uses=1]
-
-define i32 @main(i32 %argc, i8** nocapture %argv) nounwind ssp {
-; CHECK: movsd %xmm0, (%rsp)
-entry:
- %0 = icmp sgt i32 %argc, 4 ; <i1> [#uses=1]
- br i1 %0, label %bb, label %bb2
-
-bb: ; preds = %entry
- %1 = getelementptr inbounds i8** %argv, i64 4 ; <i8**> [#uses=1]
- %2 = load i8** %1, align 8 ; <i8*> [#uses=1]
- %3 = tail call double @atof(i8* %2) nounwind ; <double> [#uses=1]
- br label %bb2
-
-bb2: ; preds = %bb, %entry
- %storemerge = phi double [ %3, %bb ], [ 2.000000e+00, %entry ] ; <double> [#uses=4]
- %4 = fptoui double %storemerge to i32 ; <i32> [#uses=2]
- %5 = fptoui double %storemerge to i64 ; <i64> [#uses=2]
- %6 = fptosi double %storemerge to i64 ; <i64> [#uses=2]
- %7 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([28 x i8]* @.str, i64 0, i64 0), double %storemerge) nounwind ; <i32> [#uses=0]
- %8 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([37 x i8]* @.str1, i64 0, i64 0), i64 %6, i64 %6) nounwind ; <i32> [#uses=0]
- %9 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([35 x i8]* @.str2, i64 0, i64 0), i32 %4, i32 %4) nounwind ; <i32> [#uses=0]
- %10 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([37 x i8]* @.str3, i64 0, i64 0), i64 %5, i64 %5) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
-
-declare double @atof(i8* nocapture) nounwind readonly
-
-declare i32 @printf(i8* nocapture, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split2.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split2.ll
deleted file mode 100644
index ba902f9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split2.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split -stats |& \
-; RUN: grep {pre-alloc-split} | count 2
-
-define i32 @t(i32 %arg) {
-entry:
- br label %bb6
-
-.noexc6: ; preds = %bb6
- %0 = and i32 %2, -8 ; <i32> [#uses=1]
- tail call void @llvm.memmove.i32(i8* %3, i8* null, i32 %0, i32 1) nounwind
- store double %1, double* null, align 8
- br label %bb6
-
-bb6: ; preds = %.noexc6, %entry
- %1 = uitofp i32 %arg to double ; <double> [#uses=1]
- %2 = sub i32 0, 0 ; <i32> [#uses=1]
- %3 = invoke i8* @_Znwm(i32 0)
- to label %.noexc6 unwind label %lpad32 ; <i8*> [#uses=1]
-
-lpad32: ; preds = %bb6
- unreachable
-}
-
-declare void @llvm.memmove.i32(i8*, i8*, i32, i32) nounwind
-
-declare i8* @_Znwm(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split3.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split3.ll
deleted file mode 100644
index 2e31420..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split3.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split -stats |& \
-; RUN: grep {pre-alloc-split} | grep {Number of intervals split} | grep 1
-
-define i32 @t(i32 %arg) {
-entry:
- br label %bb6
-
-.noexc6: ; preds = %bb6
- %0 = and i32 %2, -8 ; <i32> [#uses=1]
- tail call void @llvm.memmove.i32(i8* %3, i8* null, i32 %0, i32 1) nounwind
- store double %1, double* null, align 8
- br label %bb6
-
-bb6: ; preds = %.noexc6, %entry
- %1 = uitofp i32 %arg to double ; <double> [#uses=1]
- %2 = sub i32 0, 0 ; <i32> [#uses=1]
- %3 = invoke i8* @_Znwm(i32 0)
- to label %.noexc6 unwind label %lpad32 ; <i8*> [#uses=1]
-
-lpad32: ; preds = %bb6
- unreachable
-}
-
-declare void @llvm.memmove.i32(i8*, i8*, i32, i32) nounwind
-
-declare i8* @_Znwm(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split4.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split4.ll
deleted file mode 100644
index 10cef27..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split4.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split -stats |& \
-; RUN: grep {pre-alloc-split} | grep {Number of intervals split} | grep 2
-
-define i32 @main(i32 %argc, i8** %argv) nounwind {
-entry:
- br label %bb
-
-bb: ; preds = %bb, %entry
- %k.0.reg2mem.0 = phi double [ 1.000000e+00, %entry ], [ %6, %bb ] ; <double> [#uses=2]
- %Flint.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %5, %bb ] ; <double> [#uses=1]
- %twoThrd.0.reg2mem.0 = phi double [ 0.000000e+00, %entry ], [ %1, %bb ] ; <double> [#uses=1]
- %0 = tail call double @llvm.pow.f64(double 0x3FE5555555555555, double 0.000000e+00) ; <double> [#uses=1]
- %1 = fadd double %0, %twoThrd.0.reg2mem.0 ; <double> [#uses=1]
- %2 = tail call double @sin(double %k.0.reg2mem.0) nounwind readonly ; <double> [#uses=1]
- %3 = fmul double 0.000000e+00, %2 ; <double> [#uses=1]
- %4 = fdiv double 1.000000e+00, %3 ; <double> [#uses=1]
- store double %Flint.0.reg2mem.0, double* null
- store double %twoThrd.0.reg2mem.0, double* null
- %5 = fadd double %4, %Flint.0.reg2mem.0 ; <double> [#uses=1]
- %6 = fadd double %k.0.reg2mem.0, 1.000000e+00 ; <double> [#uses=1]
- br label %bb
-}
-
-declare double @llvm.pow.f64(double, double) nounwind readonly
-
-declare double @sin(double) nounwind readonly
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split5.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split5.ll
deleted file mode 100644
index 8def460..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split5.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split
-
-target triple = "i386-apple-darwin9.5"
- %struct.FILE = type { i8*, i32, i32, i16, i16, %struct.__sbuf, i32, i8*, i32 (i8*)*, i32 (i8*, i8*, i32)*, i64 (i8*, i64, i32)*, i32 (i8*, i8*, i32)*, %struct.__sbuf, %struct.__sFILEX*, i32, [3 x i8], [1 x i8], %struct.__sbuf, i32, i64 }
- %struct.__sFILEX = type opaque
- %struct.__sbuf = type { i8*, i32 }
-@"\01LC1" = external constant [48 x i8] ; <[48 x i8]*> [#uses=1]
-
-define i32 @main() nounwind {
-entry:
- br label %bb5.us
-
-bb5.us: ; preds = %bb8.split, %bb5.us, %entry
- %i.0.reg2mem.0.ph = phi i32 [ 0, %entry ], [ %indvar.next53, %bb8.split ], [ %i.0.reg2mem.0.ph, %bb5.us ] ; <i32> [#uses=2]
- %j.0.reg2mem.0.us = phi i32 [ %indvar.next47, %bb5.us ], [ 0, %bb8.split ], [ 0, %entry ] ; <i32> [#uses=1]
- %indvar.next47 = add i32 %j.0.reg2mem.0.us, 1 ; <i32> [#uses=2]
- %exitcond48 = icmp eq i32 %indvar.next47, 256 ; <i1> [#uses=1]
- br i1 %exitcond48, label %bb8.split, label %bb5.us
-
-bb8.split: ; preds = %bb5.us
- %indvar.next53 = add i32 %i.0.reg2mem.0.ph, 1 ; <i32> [#uses=2]
- %exitcond54 = icmp eq i32 %indvar.next53, 256 ; <i1> [#uses=1]
- br i1 %exitcond54, label %bb11, label %bb5.us
-
-bb11: ; preds = %bb11, %bb8.split
- %i.1.reg2mem.0 = phi i32 [ %indvar.next44, %bb11 ], [ 0, %bb8.split ] ; <i32> [#uses=1]
- %indvar.next44 = add i32 %i.1.reg2mem.0, 1 ; <i32> [#uses=2]
- %exitcond45 = icmp eq i32 %indvar.next44, 63 ; <i1> [#uses=1]
- br i1 %exitcond45, label %bb14, label %bb11
-
-bb14: ; preds = %bb14, %bb11
- %indvar = phi i32 [ %indvar.next40, %bb14 ], [ 0, %bb11 ] ; <i32> [#uses=1]
- %indvar.next40 = add i32 %indvar, 1 ; <i32> [#uses=2]
- %exitcond41 = icmp eq i32 %indvar.next40, 32768 ; <i1> [#uses=1]
- br i1 %exitcond41, label %bb28, label %bb14
-
-bb28: ; preds = %bb14
- %0 = fdiv double 2.550000e+02, 0.000000e+00 ; <double> [#uses=1]
- br label %bb30
-
-bb30: ; preds = %bb36, %bb28
- %m.1.reg2mem.0 = phi i32 [ %m.0, %bb36 ], [ 0, %bb28 ] ; <i32> [#uses=1]
- %1 = fmul double 0.000000e+00, %0 ; <double> [#uses=1]
- %2 = fptosi double %1 to i32 ; <i32> [#uses=1]
- br i1 false, label %bb36, label %bb35
-
-bb35: ; preds = %bb30
- %3 = tail call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* null, i8* getelementptr ([48 x i8]* @"\01LC1", i32 0, i32 0), i32 0, i32 0, i32 0, i32 %2) nounwind ; <i32> [#uses=0]
- br label %bb36
-
-bb36: ; preds = %bb35, %bb30
- %m.0 = phi i32 [ 0, %bb35 ], [ %m.1.reg2mem.0, %bb30 ] ; <i32> [#uses=1]
- br label %bb30
-}
-
-declare i32 @fprintf(%struct.FILE*, i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split6.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split6.ll
deleted file mode 100644
index d38e630..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split6.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split | grep {divsd 8} | count 1
-
- at current_surfaces.b = external global i1 ; <i1*> [#uses=1]
-
-declare double @sin(double) nounwind readonly
-
-declare double @asin(double) nounwind readonly
-
-define fastcc void @trace_line(i32 %line) nounwind {
-entry:
- %.b3 = load i1* @current_surfaces.b ; <i1> [#uses=1]
- br i1 %.b3, label %bb.nph, label %return
-
-bb.nph: ; preds = %entry
- %0 = load double* null, align 8 ; <double> [#uses=1]
- %1 = load double* null, align 8 ; <double> [#uses=2]
- %2 = fcmp une double %0, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %2, label %bb9.i, label %bb13.i
-
-bb9.i: ; preds = %bb.nph
- %3 = tail call double @asin(double 0.000000e+00) nounwind readonly ; <double> [#uses=0]
- %4 = fdiv double 1.000000e+00, %1 ; <double> [#uses=1]
- %5 = fmul double %4, 0.000000e+00 ; <double> [#uses=1]
- %6 = tail call double @asin(double %5) nounwind readonly ; <double> [#uses=0]
- unreachable
-
-bb13.i: ; preds = %bb.nph
- %7 = fdiv double 1.000000e+00, %1 ; <double> [#uses=1]
- %8 = tail call double @sin(double 0.000000e+00) nounwind readonly ; <double> [#uses=1]
- %9 = fmul double %7, %8 ; <double> [#uses=1]
- %10 = tail call double @asin(double %9) nounwind readonly ; <double> [#uses=0]
- unreachable
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split7.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split7.ll
deleted file mode 100644
index 0b81c0b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split7.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split
-
- at object_distance = external global double, align 8 ; <double*> [#uses=1]
- at axis_slope_angle = external global double, align 8 ; <double*> [#uses=1]
- at current_surfaces.b = external global i1 ; <i1*> [#uses=1]
-
-declare double @sin(double) nounwind readonly
-
-declare double @asin(double) nounwind readonly
-
-declare double @tan(double) nounwind readonly
-
-define fastcc void @trace_line(i32 %line) nounwind {
-entry:
- %.b3 = load i1* @current_surfaces.b ; <i1> [#uses=1]
- br i1 %.b3, label %bb, label %return
-
-bb: ; preds = %bb, %entry
- %0 = tail call double @asin(double 0.000000e+00) nounwind readonly ; <double> [#uses=1]
- %1 = fadd double 0.000000e+00, %0 ; <double> [#uses=2]
- %2 = tail call double @asin(double 0.000000e+00) nounwind readonly ; <double> [#uses=1]
- %3 = fsub double %1, %2 ; <double> [#uses=2]
- store double %3, double* @axis_slope_angle, align 8
- %4 = fdiv double %1, 2.000000e+00 ; <double> [#uses=1]
- %5 = tail call double @sin(double %4) nounwind readonly ; <double> [#uses=1]
- %6 = fmul double 0.000000e+00, %5 ; <double> [#uses=1]
- %7 = tail call double @tan(double %3) nounwind readonly ; <double> [#uses=0]
- %8 = fadd double 0.000000e+00, %6 ; <double> [#uses=1]
- store double %8, double* @object_distance, align 8
- br label %bb
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split8.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split8.ll
deleted file mode 100644
index 0684bd0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split8.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split -stats |& \
-; RUN: grep {pre-alloc-split} | grep {Number of intervals split} | grep 1
-
- at current_surfaces.b = external global i1 ; <i1*> [#uses=1]
-
-declare double @asin(double) nounwind readonly
-
-declare double @tan(double) nounwind readonly
-
-define fastcc void @trace_line(i32 %line) nounwind {
-entry:
- %.b3 = load i1* @current_surfaces.b ; <i1> [#uses=1]
- br i1 %.b3, label %bb, label %return
-
-bb: ; preds = %bb9.i, %entry
- %.rle4 = phi double [ %7, %bb9.i ], [ 0.000000e+00, %entry ] ; <double> [#uses=1]
- %0 = load double* null, align 8 ; <double> [#uses=3]
- %1 = fcmp une double %0, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %1, label %bb9.i, label %bb13.i
-
-bb9.i: ; preds = %bb
- %2 = fsub double %.rle4, %0 ; <double> [#uses=0]
- %3 = tail call double @asin(double %.rle4) nounwind readonly ; <double> [#uses=0]
- %4 = fmul double 0.000000e+00, %0 ; <double> [#uses=1]
- %5 = tail call double @tan(double 0.000000e+00) nounwind readonly ; <double> [#uses=0]
- %6 = fmul double %4, 0.000000e+00 ; <double> [#uses=1]
- %7 = fadd double %6, 0.000000e+00 ; <double> [#uses=1]
- br i1 false, label %return, label %bb
-
-bb13.i: ; preds = %bb
- unreachable
-
-return: ; preds = %bb9.i, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/pre-split9.ll b/libclamav/c++/llvm/test/CodeGen/X86/pre-split9.ll
deleted file mode 100644
index 86dda33..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/pre-split9.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -pre-alloc-split -stats |& \
-; RUN: grep {pre-alloc-split} | grep {Number of intervals split} | grep 1
-
- at current_surfaces.b = external global i1 ; <i1*> [#uses=1]
-
-declare double @sin(double) nounwind readonly
-
-declare double @asin(double) nounwind readonly
-
-declare double @tan(double) nounwind readonly
-
-define fastcc void @trace_line(i32 %line) nounwind {
-entry:
- %.b3 = load i1* @current_surfaces.b ; <i1> [#uses=1]
- br i1 %.b3, label %bb, label %return
-
-bb: ; preds = %bb9.i, %entry
- %.rle4 = phi double [ %8, %bb9.i ], [ 0.000000e+00, %entry ] ; <double> [#uses=1]
- %0 = load double* null, align 8 ; <double> [#uses=3]
- %1 = fcmp une double %0, 0.000000e+00 ; <i1> [#uses=1]
- br i1 %1, label %bb9.i, label %bb13.i
-
-bb9.i: ; preds = %bb
- %2 = fsub double %.rle4, %0 ; <double> [#uses=0]
- %3 = tail call double @asin(double %.rle4) nounwind readonly ; <double> [#uses=0]
- %4 = tail call double @sin(double 0.000000e+00) nounwind readonly ; <double> [#uses=1]
- %5 = fmul double %4, %0 ; <double> [#uses=1]
- %6 = tail call double @tan(double 0.000000e+00) nounwind readonly ; <double> [#uses=0]
- %7 = fmul double %5, 0.000000e+00 ; <double> [#uses=1]
- %8 = fadd double %7, 0.000000e+00 ; <double> [#uses=1]
- br i1 false, label %return, label %bb
-
-bb13.i: ; preds = %bb
- unreachable
-
-return: ; preds = %bb9.i, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/prefetch.ll b/libclamav/c++/llvm/test/CodeGen/X86/prefetch.ll
deleted file mode 100644
index fac5915..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/prefetch.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse > %t
-; RUN: grep prefetchnta %t
-; RUN: grep prefetcht0 %t
-; RUN: grep prefetcht1 %t
-; RUN: grep prefetcht2 %t
-
-define void @t(i8* %ptr) nounwind {
-entry:
- tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 1 )
- tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 2 )
- tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 3 )
- tail call void @llvm.prefetch( i8* %ptr, i32 0, i32 0 )
- ret void
-}
-
-declare void @llvm.prefetch(i8*, i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/private-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/private-2.ll
deleted file mode 100644
index 8aa744e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/private-2.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | grep L__ZZ20
-; Quote should be outside of private prefix.
-; rdar://6855766x
-
- %struct.A = type { i32*, i32 }
-@"_ZZ20-[Example1 whatever]E4C.91" = private constant %struct.A { i32* null, i32 1 } ; <%struct.A*> [#uses=1]
-
-define internal i32* @"\01-[Example1 whatever]"() nounwind optsize ssp {
-entry:
- %0 = getelementptr %struct.A* @"_ZZ20-[Example1 whatever]E4C.91", i64 0, i32 0 ; <i32**> [#uses=1]
- %1 = load i32** %0, align 8 ; <i32*> [#uses=1]
- ret i32* %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/private.ll b/libclamav/c++/llvm/test/CodeGen/X86/private.ll
deleted file mode 100644
index f52f8c7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/private.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; Test to make sure that the 'private' is used correctly.
-;
-; RUN: llc < %s -mtriple=x86_64-pc-linux | grep .Lfoo:
-; RUN: llc < %s -mtriple=x86_64-pc-linux | grep call.*\.Lfoo
-; RUN: llc < %s -mtriple=x86_64-pc-linux | grep .Lbaz:
-; RUN: llc < %s -mtriple=x86_64-pc-linux | grep movl.*\.Lbaz
-
-declare void @foo()
-
-define private void @foo() {
- ret void
-}
-
- at baz = private global i32 4
-
-define i32 @bar() {
- call void @foo()
- %1 = load i32* @baz, align 4
- ret i32 %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ptrtoint-constexpr.ll b/libclamav/c++/llvm/test/CodeGen/X86/ptrtoint-constexpr.ll
deleted file mode 100644
index d1cb34b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ptrtoint-constexpr.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mtriple=i386-linux | FileCheck %s
- %union.x = type { i64 }
-
-; CHECK: .globl r
-; CHECK: r:
-; CHECK: .quad r&4294967295
-
- at r = global %union.x { i64 ptrtoint (%union.x* @r to i64) }, align 4
-
-; CHECK: .globl x
-; CHECK: x:
-; CHECK: .quad ((0+1)&4294967295)*3
-
- at x = global i64 mul (i64 3, i64 ptrtoint (i2* getelementptr (i2* null, i64 1) to i64))
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rdtsc.ll b/libclamav/c++/llvm/test/CodeGen/X86/rdtsc.ll
deleted file mode 100644
index f21a44c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rdtsc.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | grep rdtsc
-; RUN: llc < %s -march=x86-64 | grep rdtsc
-declare i64 @llvm.readcyclecounter()
-
-define i64 @foo() {
- %tmp.1 = call i64 @llvm.readcyclecounter( ) ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/red-zone.ll b/libclamav/c++/llvm/test/CodeGen/X86/red-zone.ll
deleted file mode 100644
index 1ffb4e3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/red-zone.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; First without noredzone.
-; CHECK: f0:
-; CHECK: -4(%rsp)
-; CHECK: -4(%rsp)
-; CHECK: ret
-define x86_fp80 @f0(float %f) nounwind readnone {
-entry:
- %0 = fpext float %f to x86_fp80 ; <x86_fp80> [#uses=1]
- ret x86_fp80 %0
-}
-
-; Then with noredzone.
-; CHECK: f1:
-; CHECK: subq $4, %rsp
-; CHECK: (%rsp)
-; CHECK: (%rsp)
-; CHECK: addq $4, %rsp
-; CHECK: ret
-define x86_fp80 @f1(float %f) nounwind readnone noredzone {
-entry:
- %0 = fpext float %f to x86_fp80 ; <x86_fp80> [#uses=1]
- ret x86_fp80 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/red-zone2.ll b/libclamav/c++/llvm/test/CodeGen/X86/red-zone2.ll
deleted file mode 100644
index 9557d17..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/red-zone2.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep subq %t | count 1
-; RUN: grep addq %t | count 1
-
-define x86_fp80 @f0(float %f) nounwind readnone noredzone {
-entry:
- %0 = fpext float %f to x86_fp80 ; <x86_fp80> [#uses=1]
- ret x86_fp80 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/regpressure.ll b/libclamav/c++/llvm/test/CodeGen/X86/regpressure.ll
deleted file mode 100644
index e0b5f7a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/regpressure.ll
+++ /dev/null
@@ -1,114 +0,0 @@
-;; Both functions in this testcase should codegen to the same function, and
-;; neither of them should require spilling anything to the stack.
-
-; RUN: llc < %s -march=x86 -stats |& \
-; RUN: not grep {Number of register spills}
-
-;; This can be compiled to use three registers if the loads are not
-;; folded into the multiplies, 2 registers otherwise.
-
-define i32 @regpressure1(i32* %P) {
- %A = load i32* %P ; <i32> [#uses=1]
- %Bp = getelementptr i32* %P, i32 1 ; <i32*> [#uses=1]
- %B = load i32* %Bp ; <i32> [#uses=1]
- %s1 = mul i32 %A, %B ; <i32> [#uses=1]
- %Cp = getelementptr i32* %P, i32 2 ; <i32*> [#uses=1]
- %C = load i32* %Cp ; <i32> [#uses=1]
- %s2 = mul i32 %s1, %C ; <i32> [#uses=1]
- %Dp = getelementptr i32* %P, i32 3 ; <i32*> [#uses=1]
- %D = load i32* %Dp ; <i32> [#uses=1]
- %s3 = mul i32 %s2, %D ; <i32> [#uses=1]
- %Ep = getelementptr i32* %P, i32 4 ; <i32*> [#uses=1]
- %E = load i32* %Ep ; <i32> [#uses=1]
- %s4 = mul i32 %s3, %E ; <i32> [#uses=1]
- %Fp = getelementptr i32* %P, i32 5 ; <i32*> [#uses=1]
- %F = load i32* %Fp ; <i32> [#uses=1]
- %s5 = mul i32 %s4, %F ; <i32> [#uses=1]
- %Gp = getelementptr i32* %P, i32 6 ; <i32*> [#uses=1]
- %G = load i32* %Gp ; <i32> [#uses=1]
- %s6 = mul i32 %s5, %G ; <i32> [#uses=1]
- %Hp = getelementptr i32* %P, i32 7 ; <i32*> [#uses=1]
- %H = load i32* %Hp ; <i32> [#uses=1]
- %s7 = mul i32 %s6, %H ; <i32> [#uses=1]
- %Ip = getelementptr i32* %P, i32 8 ; <i32*> [#uses=1]
- %I = load i32* %Ip ; <i32> [#uses=1]
- %s8 = mul i32 %s7, %I ; <i32> [#uses=1]
- %Jp = getelementptr i32* %P, i32 9 ; <i32*> [#uses=1]
- %J = load i32* %Jp ; <i32> [#uses=1]
- %s9 = mul i32 %s8, %J ; <i32> [#uses=1]
- ret i32 %s9
-}
-
-define i32 @regpressure2(i32* %P) {
- %A = load i32* %P ; <i32> [#uses=1]
- %Bp = getelementptr i32* %P, i32 1 ; <i32*> [#uses=1]
- %B = load i32* %Bp ; <i32> [#uses=1]
- %Cp = getelementptr i32* %P, i32 2 ; <i32*> [#uses=1]
- %C = load i32* %Cp ; <i32> [#uses=1]
- %Dp = getelementptr i32* %P, i32 3 ; <i32*> [#uses=1]
- %D = load i32* %Dp ; <i32> [#uses=1]
- %Ep = getelementptr i32* %P, i32 4 ; <i32*> [#uses=1]
- %E = load i32* %Ep ; <i32> [#uses=1]
- %Fp = getelementptr i32* %P, i32 5 ; <i32*> [#uses=1]
- %F = load i32* %Fp ; <i32> [#uses=1]
- %Gp = getelementptr i32* %P, i32 6 ; <i32*> [#uses=1]
- %G = load i32* %Gp ; <i32> [#uses=1]
- %Hp = getelementptr i32* %P, i32 7 ; <i32*> [#uses=1]
- %H = load i32* %Hp ; <i32> [#uses=1]
- %Ip = getelementptr i32* %P, i32 8 ; <i32*> [#uses=1]
- %I = load i32* %Ip ; <i32> [#uses=1]
- %Jp = getelementptr i32* %P, i32 9 ; <i32*> [#uses=1]
- %J = load i32* %Jp ; <i32> [#uses=1]
- %s1 = mul i32 %A, %B ; <i32> [#uses=1]
- %s2 = mul i32 %s1, %C ; <i32> [#uses=1]
- %s3 = mul i32 %s2, %D ; <i32> [#uses=1]
- %s4 = mul i32 %s3, %E ; <i32> [#uses=1]
- %s5 = mul i32 %s4, %F ; <i32> [#uses=1]
- %s6 = mul i32 %s5, %G ; <i32> [#uses=1]
- %s7 = mul i32 %s6, %H ; <i32> [#uses=1]
- %s8 = mul i32 %s7, %I ; <i32> [#uses=1]
- %s9 = mul i32 %s8, %J ; <i32> [#uses=1]
- ret i32 %s9
-}
-
-define i32 @regpressure3(i16* %P, i1 %Cond, i32* %Other) {
- %A = load i16* %P ; <i16> [#uses=1]
- %Bp = getelementptr i16* %P, i32 1 ; <i16*> [#uses=1]
- %B = load i16* %Bp ; <i16> [#uses=1]
- %Cp = getelementptr i16* %P, i32 2 ; <i16*> [#uses=1]
- %C = load i16* %Cp ; <i16> [#uses=1]
- %Dp = getelementptr i16* %P, i32 3 ; <i16*> [#uses=1]
- %D = load i16* %Dp ; <i16> [#uses=1]
- %Ep = getelementptr i16* %P, i32 4 ; <i16*> [#uses=1]
- %E = load i16* %Ep ; <i16> [#uses=1]
- %Fp = getelementptr i16* %P, i32 5 ; <i16*> [#uses=1]
- %F = load i16* %Fp ; <i16> [#uses=1]
- %Gp = getelementptr i16* %P, i32 6 ; <i16*> [#uses=1]
- %G = load i16* %Gp ; <i16> [#uses=1]
- %Hp = getelementptr i16* %P, i32 7 ; <i16*> [#uses=1]
- %H = load i16* %Hp ; <i16> [#uses=1]
- %Ip = getelementptr i16* %P, i32 8 ; <i16*> [#uses=1]
- %I = load i16* %Ip ; <i16> [#uses=1]
- %Jp = getelementptr i16* %P, i32 9 ; <i16*> [#uses=1]
- %J = load i16* %Jp ; <i16> [#uses=1]
- %A.upgrd.1 = sext i16 %A to i32 ; <i32> [#uses=1]
- %B.upgrd.2 = sext i16 %B to i32 ; <i32> [#uses=1]
- %D.upgrd.3 = sext i16 %D to i32 ; <i32> [#uses=1]
- %C.upgrd.4 = sext i16 %C to i32 ; <i32> [#uses=1]
- %E.upgrd.5 = sext i16 %E to i32 ; <i32> [#uses=1]
- %F.upgrd.6 = sext i16 %F to i32 ; <i32> [#uses=1]
- %G.upgrd.7 = sext i16 %G to i32 ; <i32> [#uses=1]
- %H.upgrd.8 = sext i16 %H to i32 ; <i32> [#uses=1]
- %I.upgrd.9 = sext i16 %I to i32 ; <i32> [#uses=1]
- %J.upgrd.10 = sext i16 %J to i32 ; <i32> [#uses=1]
- %s1 = add i32 %A.upgrd.1, %B.upgrd.2 ; <i32> [#uses=1]
- %s2 = add i32 %C.upgrd.4, %s1 ; <i32> [#uses=1]
- %s3 = add i32 %D.upgrd.3, %s2 ; <i32> [#uses=1]
- %s4 = add i32 %E.upgrd.5, %s3 ; <i32> [#uses=1]
- %s5 = add i32 %F.upgrd.6, %s4 ; <i32> [#uses=1]
- %s6 = add i32 %G.upgrd.7, %s5 ; <i32> [#uses=1]
- %s7 = add i32 %H.upgrd.8, %s6 ; <i32> [#uses=1]
- %s8 = add i32 %I.upgrd.9, %s7 ; <i32> [#uses=1]
- %s9 = add i32 %J.upgrd.10, %s8 ; <i32> [#uses=1]
- ret i32 %s9
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rem-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/rem-2.ll
deleted file mode 100644
index 1b2af4b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rem-2.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep cltd
-
-define i32 @test(i32 %X) nounwind readnone {
-entry:
- %0 = srem i32 41, %X
- ret i32 %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rem.ll b/libclamav/c++/llvm/test/CodeGen/X86/rem.ll
deleted file mode 100644
index 394070e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rem.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep div
-
-define i32 @test1(i32 %X) {
- %tmp1 = srem i32 %X, 255 ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @test2(i32 %X) {
- %tmp1 = srem i32 %X, 256 ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @test3(i32 %X) {
- %tmp1 = urem i32 %X, 255 ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-define i32 @test4(i32 %X) {
- %tmp1 = urem i32 %X, 256 ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/remat-constant.ll b/libclamav/c++/llvm/test/CodeGen/X86/remat-constant.ll
deleted file mode 100644
index 3e81320..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/remat-constant.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux -relocation-model=static | grep xmm | count 2
-
-declare void @bar() nounwind
-
- at a = external constant float
-
-declare void @qux(float %f) nounwind
-
-define void @foo() nounwind {
- %f = load float* @a
- call void @bar()
- call void @qux(float %f)
- call void @qux(float %f)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/remat-mov-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/remat-mov-0.ll
deleted file mode 100644
index 5fb445c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/remat-mov-0.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-
-; CodeGen should remat the zero instead of spilling it.
-
-declare void @foo(i64 %p)
-
-; CHECK: bar:
-; CHECK: xorl %edi, %edi
-; CHECK: xorl %edi, %edi
-define void @bar() nounwind {
- call void @foo(i64 0)
- call void @foo(i64 0)
- ret void
-}
-
-; CHECK: bat:
-; CHECK: movq $-1, %rdi
-; CHECK: movq $-1, %rdi
-define void @bat() nounwind {
- call void @foo(i64 -1)
- call void @foo(i64 -1)
- ret void
-}
-
-; CHECK: bau:
-; CHECK: movl $1, %edi
-; CHECK: movl $1, %edi
-define void @bau() nounwind {
- call void @foo(i64 1)
- call void @foo(i64 1)
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/remat-scalar-zero.ll b/libclamav/c++/llvm/test/CodeGen/X86/remat-scalar-zero.ll
deleted file mode 100644
index 2da96ab..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/remat-scalar-zero.ll
+++ /dev/null
@@ -1,95 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu > %t
-; RUN: not grep xor %t
-; RUN: not grep movap %t
-; RUN: grep {\\.quad.*0} %t
-
-; Remat should be able to fold the zero constant into the div instructions
-; as a constant-pool load.
-
-define void @foo(double* nocapture %x, double* nocapture %y) nounwind {
-entry:
- %tmp1 = load double* %x ; <double> [#uses=1]
- %arrayidx4 = getelementptr inbounds double* %x, i64 1 ; <double*> [#uses=1]
- %tmp5 = load double* %arrayidx4 ; <double> [#uses=1]
- %arrayidx8 = getelementptr inbounds double* %x, i64 2 ; <double*> [#uses=1]
- %tmp9 = load double* %arrayidx8 ; <double> [#uses=1]
- %arrayidx12 = getelementptr inbounds double* %x, i64 3 ; <double*> [#uses=1]
- %tmp13 = load double* %arrayidx12 ; <double> [#uses=1]
- %arrayidx16 = getelementptr inbounds double* %x, i64 4 ; <double*> [#uses=1]
- %tmp17 = load double* %arrayidx16 ; <double> [#uses=1]
- %arrayidx20 = getelementptr inbounds double* %x, i64 5 ; <double*> [#uses=1]
- %tmp21 = load double* %arrayidx20 ; <double> [#uses=1]
- %arrayidx24 = getelementptr inbounds double* %x, i64 6 ; <double*> [#uses=1]
- %tmp25 = load double* %arrayidx24 ; <double> [#uses=1]
- %arrayidx28 = getelementptr inbounds double* %x, i64 7 ; <double*> [#uses=1]
- %tmp29 = load double* %arrayidx28 ; <double> [#uses=1]
- %arrayidx32 = getelementptr inbounds double* %x, i64 8 ; <double*> [#uses=1]
- %tmp33 = load double* %arrayidx32 ; <double> [#uses=1]
- %arrayidx36 = getelementptr inbounds double* %x, i64 9 ; <double*> [#uses=1]
- %tmp37 = load double* %arrayidx36 ; <double> [#uses=1]
- %arrayidx40 = getelementptr inbounds double* %x, i64 10 ; <double*> [#uses=1]
- %tmp41 = load double* %arrayidx40 ; <double> [#uses=1]
- %arrayidx44 = getelementptr inbounds double* %x, i64 11 ; <double*> [#uses=1]
- %tmp45 = load double* %arrayidx44 ; <double> [#uses=1]
- %arrayidx48 = getelementptr inbounds double* %x, i64 12 ; <double*> [#uses=1]
- %tmp49 = load double* %arrayidx48 ; <double> [#uses=1]
- %arrayidx52 = getelementptr inbounds double* %x, i64 13 ; <double*> [#uses=1]
- %tmp53 = load double* %arrayidx52 ; <double> [#uses=1]
- %arrayidx56 = getelementptr inbounds double* %x, i64 14 ; <double*> [#uses=1]
- %tmp57 = load double* %arrayidx56 ; <double> [#uses=1]
- %arrayidx60 = getelementptr inbounds double* %x, i64 15 ; <double*> [#uses=1]
- %tmp61 = load double* %arrayidx60 ; <double> [#uses=1]
- %arrayidx64 = getelementptr inbounds double* %x, i64 16 ; <double*> [#uses=1]
- %tmp65 = load double* %arrayidx64 ; <double> [#uses=1]
- %div = fdiv double %tmp1, 0.000000e+00 ; <double> [#uses=1]
- store double %div, double* %y
- %div70 = fdiv double %tmp5, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx72 = getelementptr inbounds double* %y, i64 1 ; <double*> [#uses=1]
- store double %div70, double* %arrayidx72
- %div74 = fdiv double %tmp9, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx76 = getelementptr inbounds double* %y, i64 2 ; <double*> [#uses=1]
- store double %div74, double* %arrayidx76
- %div78 = fdiv double %tmp13, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx80 = getelementptr inbounds double* %y, i64 3 ; <double*> [#uses=1]
- store double %div78, double* %arrayidx80
- %div82 = fdiv double %tmp17, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx84 = getelementptr inbounds double* %y, i64 4 ; <double*> [#uses=1]
- store double %div82, double* %arrayidx84
- %div86 = fdiv double %tmp21, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx88 = getelementptr inbounds double* %y, i64 5 ; <double*> [#uses=1]
- store double %div86, double* %arrayidx88
- %div90 = fdiv double %tmp25, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx92 = getelementptr inbounds double* %y, i64 6 ; <double*> [#uses=1]
- store double %div90, double* %arrayidx92
- %div94 = fdiv double %tmp29, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx96 = getelementptr inbounds double* %y, i64 7 ; <double*> [#uses=1]
- store double %div94, double* %arrayidx96
- %div98 = fdiv double %tmp33, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx100 = getelementptr inbounds double* %y, i64 8 ; <double*> [#uses=1]
- store double %div98, double* %arrayidx100
- %div102 = fdiv double %tmp37, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx104 = getelementptr inbounds double* %y, i64 9 ; <double*> [#uses=1]
- store double %div102, double* %arrayidx104
- %div106 = fdiv double %tmp41, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx108 = getelementptr inbounds double* %y, i64 10 ; <double*> [#uses=1]
- store double %div106, double* %arrayidx108
- %div110 = fdiv double %tmp45, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx112 = getelementptr inbounds double* %y, i64 11 ; <double*> [#uses=1]
- store double %div110, double* %arrayidx112
- %div114 = fdiv double %tmp49, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx116 = getelementptr inbounds double* %y, i64 12 ; <double*> [#uses=1]
- store double %div114, double* %arrayidx116
- %div118 = fdiv double %tmp53, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx120 = getelementptr inbounds double* %y, i64 13 ; <double*> [#uses=1]
- store double %div118, double* %arrayidx120
- %div122 = fdiv double %tmp57, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx124 = getelementptr inbounds double* %y, i64 14 ; <double*> [#uses=1]
- store double %div122, double* %arrayidx124
- %div126 = fdiv double %tmp61, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx128 = getelementptr inbounds double* %y, i64 15 ; <double*> [#uses=1]
- store double %div126, double* %arrayidx128
- %div130 = fdiv double %tmp65, 0.000000e+00 ; <double> [#uses=1]
- %arrayidx132 = getelementptr inbounds double* %y, i64 16 ; <double*> [#uses=1]
- store double %div130, double* %arrayidx132
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ret-addr.ll b/libclamav/c++/llvm/test/CodeGen/X86/ret-addr.ll
deleted file mode 100644
index b7b57ab..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ret-addr.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -disable-fp-elim -march=x86 | not grep xor
-; RUN: llc < %s -disable-fp-elim -march=x86-64 | not grep xor
-
-define i8* @h() nounwind readnone optsize {
-entry:
- %0 = tail call i8* @llvm.returnaddress(i32 2) ; <i8*> [#uses=1]
- ret i8* %0
-}
-
-declare i8* @llvm.returnaddress(i32) nounwind readnone
-
-define i8* @g() nounwind readnone optsize {
-entry:
- %0 = tail call i8* @llvm.returnaddress(i32 1) ; <i8*> [#uses=1]
- ret i8* %0
-}
-
-define i8* @f() nounwind readnone optsize {
-entry:
- %0 = tail call i8* @llvm.returnaddress(i32 0) ; <i8*> [#uses=1]
- ret i8* %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ret-i64-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/ret-i64-0.ll
deleted file mode 100644
index bca0f05..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ret-i64-0.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=x86 | grep xor | count 2
-
-define i64 @foo() nounwind {
- ret i64 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/ret-mmx.ll b/libclamav/c++/llvm/test/CodeGen/X86/ret-mmx.ll
deleted file mode 100644
index 04b57dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/ret-mmx.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2
-; rdar://6602459
-
- at g_v1di = external global <1 x i64>
-
-define void @t1() nounwind {
-entry:
- %call = call <1 x i64> @return_v1di() ; <<1 x i64>> [#uses=0]
- store <1 x i64> %call, <1 x i64>* @g_v1di
- ret void
-}
-
-declare <1 x i64> @return_v1di()
-
-define <1 x i64> @t2() nounwind {
- ret <1 x i64> <i64 1>
-}
-
-define <2 x i32> @t3() nounwind {
- ret <2 x i32> <i32 1, i32 0>
-}
-
-define double @t4() nounwind {
- ret double bitcast (<2 x i32> <i32 1, i32 0> to double)
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rip-rel-address.ll b/libclamav/c++/llvm/test/CodeGen/X86/rip-rel-address.ll
deleted file mode 100644
index 24ff07b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rip-rel-address.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86-64 -relocation-model=pic -mtriple=x86_64-apple-darwin10 | FileCheck %s -check-prefix=PIC64
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -relocation-model=static | FileCheck %s -check-prefix=STATIC64
-
-; Use %rip-relative addressing even in static mode on x86-64, because
-; it has a smaller encoding.
-
- at a = internal global double 3.4
-define double @foo() nounwind {
- %a = load double* @a
- ret double %a
-
-; PIC64: movsd _a(%rip), %xmm0
-; STATIC64: movsd a(%rip), %xmm0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rodata-relocs.ll b/libclamav/c++/llvm/test/CodeGen/X86/rodata-relocs.ll
deleted file mode 100644
index 276f8bb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rodata-relocs.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -relocation-model=static | grep rodata | count 3
-; RUN: llc < %s -relocation-model=static | grep -F "rodata.cst" | count 2
-; RUN: llc < %s -relocation-model=pic | grep rodata | count 2
-; RUN: llc < %s -relocation-model=pic | grep -F ".data.rel.ro" | count 2
-; RUN: llc < %s -relocation-model=pic | grep -F ".data.rel.ro.local" | count 1
-; RUN: llc < %s -relocation-model=pic | grep -F ".data.rel" | count 4
-; RUN: llc < %s -relocation-model=pic | grep -F ".data.rel.local" | count 1
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- at a = internal constant [2 x i32] [i32 1, i32 2]
- at a1 = constant [2 x i32] [i32 1, i32 2]
- at e = internal constant [2 x [2 x i32]] [[2 x i32] [i32 1, i32 2], [2 x i32] [i32 3, i32 4]], align 16
- at e1 = constant [2 x [2 x i32]] [[2 x i32] [i32 1, i32 2], [2 x i32] [i32 3, i32 4]], align 16
- at p = constant i8* bitcast ([2 x i32]* @a to i8*)
- at t = constant i8* bitcast ([2 x [2 x i32]]* @e to i8*)
- at p1 = constant i8* bitcast ([2 x i32]* @a1 to i8*)
- at t1 = constant i8* bitcast ([2 x [2 x i32]]* @e1 to i8*)
- at p2 = internal global i8* bitcast([2 x i32]* @a1 to i8*)
- at t2 = internal global i8* bitcast([2 x [2 x i32]]* @e1 to i8*)
- at p3 = internal global i8* bitcast([2 x i32]* @a to i8*)
- at t3 = internal global i8* bitcast([2 x [2 x i32]]* @e to i8*)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rot16.ll b/libclamav/c++/llvm/test/CodeGen/X86/rot16.ll
deleted file mode 100644
index 42ece47..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rot16.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep rol %t | count 3
-; RUN: grep ror %t | count 1
-; RUN: grep shld %t | count 2
-; RUN: grep shrd %t | count 2
-
-define i16 @foo(i16 %x, i16 %y, i16 %z) nounwind readnone {
-entry:
- %0 = shl i16 %x, %z
- %1 = sub i16 16, %z
- %2 = lshr i16 %x, %1
- %3 = or i16 %2, %0
- ret i16 %3
-}
-
-define i16 @bar(i16 %x, i16 %y, i16 %z) nounwind readnone {
-entry:
- %0 = shl i16 %y, %z
- %1 = sub i16 16, %z
- %2 = lshr i16 %x, %1
- %3 = or i16 %2, %0
- ret i16 %3
-}
-
-define i16 @un(i16 %x, i16 %y, i16 %z) nounwind readnone {
-entry:
- %0 = lshr i16 %x, %z
- %1 = sub i16 16, %z
- %2 = shl i16 %x, %1
- %3 = or i16 %2, %0
- ret i16 %3
-}
-
-define i16 @bu(i16 %x, i16 %y, i16 %z) nounwind readnone {
-entry:
- %0 = lshr i16 %y, %z
- %1 = sub i16 16, %z
- %2 = shl i16 %x, %1
- %3 = or i16 %2, %0
- ret i16 %3
-}
-
-define i16 @xfoo(i16 %x, i16 %y, i16 %z) nounwind readnone {
-entry:
- %0 = lshr i16 %x, 11
- %1 = shl i16 %x, 5
- %2 = or i16 %0, %1
- ret i16 %2
-}
-
-define i16 @xbar(i16 %x, i16 %y, i16 %z) nounwind readnone {
-entry:
- %0 = shl i16 %y, 5
- %1 = lshr i16 %x, 11
- %2 = or i16 %0, %1
- ret i16 %2
-}
-
-define i16 @xun(i16 %x, i16 %y, i16 %z) nounwind readnone {
-entry:
- %0 = lshr i16 %x, 5
- %1 = shl i16 %x, 11
- %2 = or i16 %0, %1
- ret i16 %2
-}
-
-define i16 @xbu(i16 %x, i16 %y, i16 %z) nounwind readnone {
-entry:
- %0 = lshr i16 %y, 5
- %1 = shl i16 %x, 11
- %2 = or i16 %0, %1
- ret i16 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rot32.ll b/libclamav/c++/llvm/test/CodeGen/X86/rot32.ll
deleted file mode 100644
index 655ed27..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rot32.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep rol %t | count 3
-; RUN: grep ror %t | count 1
-; RUN: grep shld %t | count 2
-; RUN: grep shrd %t | count 2
-
-define i32 @foo(i32 %x, i32 %y, i32 %z) nounwind readnone {
-entry:
- %0 = shl i32 %x, %z
- %1 = sub i32 32, %z
- %2 = lshr i32 %x, %1
- %3 = or i32 %2, %0
- ret i32 %3
-}
-
-define i32 @bar(i32 %x, i32 %y, i32 %z) nounwind readnone {
-entry:
- %0 = shl i32 %y, %z
- %1 = sub i32 32, %z
- %2 = lshr i32 %x, %1
- %3 = or i32 %2, %0
- ret i32 %3
-}
-
-define i32 @un(i32 %x, i32 %y, i32 %z) nounwind readnone {
-entry:
- %0 = lshr i32 %x, %z
- %1 = sub i32 32, %z
- %2 = shl i32 %x, %1
- %3 = or i32 %2, %0
- ret i32 %3
-}
-
-define i32 @bu(i32 %x, i32 %y, i32 %z) nounwind readnone {
-entry:
- %0 = lshr i32 %y, %z
- %1 = sub i32 32, %z
- %2 = shl i32 %x, %1
- %3 = or i32 %2, %0
- ret i32 %3
-}
-
-define i32 @xfoo(i32 %x, i32 %y, i32 %z) nounwind readnone {
-entry:
- %0 = lshr i32 %x, 25
- %1 = shl i32 %x, 7
- %2 = or i32 %0, %1
- ret i32 %2
-}
-
-define i32 @xbar(i32 %x, i32 %y, i32 %z) nounwind readnone {
-entry:
- %0 = shl i32 %y, 7
- %1 = lshr i32 %x, 25
- %2 = or i32 %0, %1
- ret i32 %2
-}
-
-define i32 @xun(i32 %x, i32 %y, i32 %z) nounwind readnone {
-entry:
- %0 = lshr i32 %x, 7
- %1 = shl i32 %x, 25
- %2 = or i32 %0, %1
- ret i32 %2
-}
-
-define i32 @xbu(i32 %x, i32 %y, i32 %z) nounwind readnone {
-entry:
- %0 = lshr i32 %y, 7
- %1 = shl i32 %x, 25
- %2 = or i32 %0, %1
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rot64.ll b/libclamav/c++/llvm/test/CodeGen/X86/rot64.ll
deleted file mode 100644
index 4e082bb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rot64.ll
+++ /dev/null
@@ -1,73 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep rol %t | count 3
-; RUN: grep ror %t | count 1
-; RUN: grep shld %t | count 2
-; RUN: grep shrd %t | count 2
-
-define i64 @foo(i64 %x, i64 %y, i64 %z) nounwind readnone {
-entry:
- %0 = shl i64 %x, %z
- %1 = sub i64 64, %z
- %2 = lshr i64 %x, %1
- %3 = or i64 %2, %0
- ret i64 %3
-}
-
-define i64 @bar(i64 %x, i64 %y, i64 %z) nounwind readnone {
-entry:
- %0 = shl i64 %y, %z
- %1 = sub i64 64, %z
- %2 = lshr i64 %x, %1
- %3 = or i64 %2, %0
- ret i64 %3
-}
-
-define i64 @un(i64 %x, i64 %y, i64 %z) nounwind readnone {
-entry:
- %0 = lshr i64 %x, %z
- %1 = sub i64 64, %z
- %2 = shl i64 %x, %1
- %3 = or i64 %2, %0
- ret i64 %3
-}
-
-define i64 @bu(i64 %x, i64 %y, i64 %z) nounwind readnone {
-entry:
- %0 = lshr i64 %y, %z
- %1 = sub i64 64, %z
- %2 = shl i64 %x, %1
- %3 = or i64 %2, %0
- ret i64 %3
-}
-
-define i64 @xfoo(i64 %x, i64 %y, i64 %z) nounwind readnone {
-entry:
- %0 = lshr i64 %x, 57
- %1 = shl i64 %x, 7
- %2 = or i64 %0, %1
- ret i64 %2
-}
-
-define i64 @xbar(i64 %x, i64 %y, i64 %z) nounwind readnone {
-entry:
- %0 = shl i64 %y, 7
- %1 = lshr i64 %x, 57
- %2 = or i64 %0, %1
- ret i64 %2
-}
-
-define i64 @xun(i64 %x, i64 %y, i64 %z) nounwind readnone {
-entry:
- %0 = lshr i64 %x, 7
- %1 = shl i64 %x, 57
- %2 = or i64 %0, %1
- ret i64 %2
-}
-
-define i64 @xbu(i64 %x, i64 %y, i64 %z) nounwind readnone {
-entry:
- %0 = lshr i64 %y, 7
- %1 = shl i64 %x, 57
- %2 = or i64 %0, %1
- ret i64 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rotate.ll b/libclamav/c++/llvm/test/CodeGen/X86/rotate.ll
deleted file mode 100644
index 1e20273..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rotate.ll
+++ /dev/null
@@ -1,100 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep {ro\[rl\]} | count 12
-
-define i32 @rotl32(i32 %A, i8 %Amt) {
- %shift.upgrd.1 = zext i8 %Amt to i32 ; <i32> [#uses=1]
- %B = shl i32 %A, %shift.upgrd.1 ; <i32> [#uses=1]
- %Amt2 = sub i8 32, %Amt ; <i8> [#uses=1]
- %shift.upgrd.2 = zext i8 %Amt2 to i32 ; <i32> [#uses=1]
- %C = lshr i32 %A, %shift.upgrd.2 ; <i32> [#uses=1]
- %D = or i32 %B, %C ; <i32> [#uses=1]
- ret i32 %D
-}
-
-define i32 @rotr32(i32 %A, i8 %Amt) {
- %shift.upgrd.3 = zext i8 %Amt to i32 ; <i32> [#uses=1]
- %B = lshr i32 %A, %shift.upgrd.3 ; <i32> [#uses=1]
- %Amt2 = sub i8 32, %Amt ; <i8> [#uses=1]
- %shift.upgrd.4 = zext i8 %Amt2 to i32 ; <i32> [#uses=1]
- %C = shl i32 %A, %shift.upgrd.4 ; <i32> [#uses=1]
- %D = or i32 %B, %C ; <i32> [#uses=1]
- ret i32 %D
-}
-
-define i32 @rotli32(i32 %A) {
- %B = shl i32 %A, 5 ; <i32> [#uses=1]
- %C = lshr i32 %A, 27 ; <i32> [#uses=1]
- %D = or i32 %B, %C ; <i32> [#uses=1]
- ret i32 %D
-}
-
-define i32 @rotri32(i32 %A) {
- %B = lshr i32 %A, 5 ; <i32> [#uses=1]
- %C = shl i32 %A, 27 ; <i32> [#uses=1]
- %D = or i32 %B, %C ; <i32> [#uses=1]
- ret i32 %D
-}
-
-define i16 @rotl16(i16 %A, i8 %Amt) {
- %shift.upgrd.5 = zext i8 %Amt to i16 ; <i16> [#uses=1]
- %B = shl i16 %A, %shift.upgrd.5 ; <i16> [#uses=1]
- %Amt2 = sub i8 16, %Amt ; <i8> [#uses=1]
- %shift.upgrd.6 = zext i8 %Amt2 to i16 ; <i16> [#uses=1]
- %C = lshr i16 %A, %shift.upgrd.6 ; <i16> [#uses=1]
- %D = or i16 %B, %C ; <i16> [#uses=1]
- ret i16 %D
-}
-
-define i16 @rotr16(i16 %A, i8 %Amt) {
- %shift.upgrd.7 = zext i8 %Amt to i16 ; <i16> [#uses=1]
- %B = lshr i16 %A, %shift.upgrd.7 ; <i16> [#uses=1]
- %Amt2 = sub i8 16, %Amt ; <i8> [#uses=1]
- %shift.upgrd.8 = zext i8 %Amt2 to i16 ; <i16> [#uses=1]
- %C = shl i16 %A, %shift.upgrd.8 ; <i16> [#uses=1]
- %D = or i16 %B, %C ; <i16> [#uses=1]
- ret i16 %D
-}
-
-define i16 @rotli16(i16 %A) {
- %B = shl i16 %A, 5 ; <i16> [#uses=1]
- %C = lshr i16 %A, 11 ; <i16> [#uses=1]
- %D = or i16 %B, %C ; <i16> [#uses=1]
- ret i16 %D
-}
-
-define i16 @rotri16(i16 %A) {
- %B = lshr i16 %A, 5 ; <i16> [#uses=1]
- %C = shl i16 %A, 11 ; <i16> [#uses=1]
- %D = or i16 %B, %C ; <i16> [#uses=1]
- ret i16 %D
-}
-
-define i8 @rotl8(i8 %A, i8 %Amt) {
- %B = shl i8 %A, %Amt ; <i8> [#uses=1]
- %Amt2 = sub i8 8, %Amt ; <i8> [#uses=1]
- %C = lshr i8 %A, %Amt2 ; <i8> [#uses=1]
- %D = or i8 %B, %C ; <i8> [#uses=1]
- ret i8 %D
-}
-
-define i8 @rotr8(i8 %A, i8 %Amt) {
- %B = lshr i8 %A, %Amt ; <i8> [#uses=1]
- %Amt2 = sub i8 8, %Amt ; <i8> [#uses=1]
- %C = shl i8 %A, %Amt2 ; <i8> [#uses=1]
- %D = or i8 %B, %C ; <i8> [#uses=1]
- ret i8 %D
-}
-
-define i8 @rotli8(i8 %A) {
- %B = shl i8 %A, 5 ; <i8> [#uses=1]
- %C = lshr i8 %A, 3 ; <i8> [#uses=1]
- %D = or i8 %B, %C ; <i8> [#uses=1]
- ret i8 %D
-}
-
-define i8 @rotri8(i8 %A) {
- %B = lshr i8 %A, 5 ; <i8> [#uses=1]
- %C = shl i8 %A, 3 ; <i8> [#uses=1]
- %D = or i8 %B, %C ; <i8> [#uses=1]
- ret i8 %D
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/rotate2.ll b/libclamav/c++/llvm/test/CodeGen/X86/rotate2.ll
deleted file mode 100644
index 2eea399..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/rotate2.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep rol | count 2
-
-define i64 @test1(i64 %x) nounwind {
-entry:
- %tmp2 = lshr i64 %x, 55 ; <i64> [#uses=1]
- %tmp4 = shl i64 %x, 9 ; <i64> [#uses=1]
- %tmp5 = or i64 %tmp2, %tmp4 ; <i64> [#uses=1]
- ret i64 %tmp5
-}
-
-define i64 @test2(i32 %x) nounwind {
-entry:
- %tmp2 = lshr i32 %x, 22 ; <i32> [#uses=1]
- %tmp4 = shl i32 %x, 10 ; <i32> [#uses=1]
- %tmp5 = or i32 %tmp2, %tmp4 ; <i32> [#uses=1]
- %tmp56 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
- ret i64 %tmp56
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/scalar-extract.ll b/libclamav/c++/llvm/test/CodeGen/X86/scalar-extract.ll
deleted file mode 100644
index 2845838..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/scalar-extract.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx -o %t
-; RUN: not grep movq %t
-
-; Check that widening doesn't introduce a mmx register in this case when
-; a simple load/store would suffice.
-
-define void @foo(<2 x i16>* %A, <2 x i16>* %B) {
-entry:
- %tmp1 = load <2 x i16>* %A ; <<2 x i16>> [#uses=1]
- store <2 x i16> %tmp1, <2 x i16>* %B
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/scalar-min-max-fill-operand.ll b/libclamav/c++/llvm/test/CodeGen/X86/scalar-min-max-fill-operand.ll
deleted file mode 100644
index fe40758..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/scalar-min-max-fill-operand.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep min | count 1
-; RUN: llc < %s -march=x86-64 | grep max | count 1
-; RUN: llc < %s -march=x86-64 | grep mov | count 2
-
-declare float @bar()
-
-define float @foo(float %a) nounwind
-{
- %s = call float @bar()
- %t = fcmp olt float %s, %a
- %u = select i1 %t, float %s, float %a
- ret float %u
-}
-define float @hem(float %a) nounwind
-{
- %s = call float @bar()
- %t = fcmp ogt float %s, %a
- %u = select i1 %t, float %s, float %a
- ret float %u
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/scalar_sse_minmax.ll b/libclamav/c++/llvm/test/CodeGen/X86/scalar_sse_minmax.ll
deleted file mode 100644
index bc4ab5d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/scalar_sse_minmax.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse,+sse2 | \
-; RUN: grep mins | count 3
-; RUN: llc < %s -march=x86 -mattr=+sse,+sse2 | \
-; RUN: grep maxs | count 2
-
-declare i1 @llvm.isunordered.f64(double, double)
-
-declare i1 @llvm.isunordered.f32(float, float)
-
-define float @min1(float %x, float %y) {
- %tmp = fcmp olt float %x, %y ; <i1> [#uses=1]
- %retval = select i1 %tmp, float %x, float %y ; <float> [#uses=1]
- ret float %retval
-}
-
-define double @min2(double %x, double %y) {
- %tmp = fcmp olt double %x, %y ; <i1> [#uses=1]
- %retval = select i1 %tmp, double %x, double %y ; <double> [#uses=1]
- ret double %retval
-}
-
-define float @max1(float %x, float %y) {
- %tmp = fcmp oge float %x, %y ; <i1> [#uses=1]
- %tmp2 = fcmp uno float %x, %y ; <i1> [#uses=1]
- %tmp3 = or i1 %tmp2, %tmp ; <i1> [#uses=1]
- %retval = select i1 %tmp3, float %x, float %y ; <float> [#uses=1]
- ret float %retval
-}
-
-define double @max2(double %x, double %y) {
- %tmp = fcmp oge double %x, %y ; <i1> [#uses=1]
- %tmp2 = fcmp uno double %x, %y ; <i1> [#uses=1]
- %tmp3 = or i1 %tmp2, %tmp ; <i1> [#uses=1]
- %retval = select i1 %tmp3, double %x, double %y ; <double> [#uses=1]
- ret double %retval
-}
-
-define <4 x float> @min3(float %tmp37) {
- %tmp375 = insertelement <4 x float> undef, float %tmp37, i32 0 ; <<4 x float>> [#uses=1]
- %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.553500e+04, float undef, float undef, float undef > ) ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp48
-}
-
-declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/scalar_widen_div.ll b/libclamav/c++/llvm/test/CodeGen/X86/scalar_widen_div.ll
deleted file mode 100644
index 77f320f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/scalar_widen_div.ll
+++ /dev/null
@@ -1,183 +0,0 @@
-; RUN: llc < %s -disable-mmx -march=x86-64 -mattr=+sse42 | FileCheck %s
-
-; Verify when widening a divide/remainder operation, we only generate a
-; divide/rem per element since divide/remainder can trap.
-
-define void @vectorDiv (<2 x i32> addrspace(1)* %nsource, <2 x i32> addrspace(1)* %dsource, <2 x i32> addrspace(1)* %qdest) nounwind {
-; CHECK: idivl
-; CHECK: idivl
-; CHECK-NOT: idivl
-; CHECK: ret
-entry:
- %nsource.addr = alloca <2 x i32> addrspace(1)*, align 4
- %dsource.addr = alloca <2 x i32> addrspace(1)*, align 4
- %qdest.addr = alloca <2 x i32> addrspace(1)*, align 4
- %index = alloca i32, align 4
- store <2 x i32> addrspace(1)* %nsource, <2 x i32> addrspace(1)** %nsource.addr
- store <2 x i32> addrspace(1)* %dsource, <2 x i32> addrspace(1)** %dsource.addr
- store <2 x i32> addrspace(1)* %qdest, <2 x i32> addrspace(1)** %qdest.addr
- %tmp = load <2 x i32> addrspace(1)** %qdest.addr
- %tmp1 = load i32* %index
- %arrayidx = getelementptr <2 x i32> addrspace(1)* %tmp, i32 %tmp1
- %tmp2 = load <2 x i32> addrspace(1)** %nsource.addr
- %tmp3 = load i32* %index
- %arrayidx4 = getelementptr <2 x i32> addrspace(1)* %tmp2, i32 %tmp3
- %tmp5 = load <2 x i32> addrspace(1)* %arrayidx4
- %tmp6 = load <2 x i32> addrspace(1)** %dsource.addr
- %tmp7 = load i32* %index
- %arrayidx8 = getelementptr <2 x i32> addrspace(1)* %tmp6, i32 %tmp7
- %tmp9 = load <2 x i32> addrspace(1)* %arrayidx8
- %tmp10 = sdiv <2 x i32> %tmp5, %tmp9
- store <2 x i32> %tmp10, <2 x i32> addrspace(1)* %arrayidx
- ret void
-}
-
-define <3 x i8> @test_char_div(<3 x i8> %num, <3 x i8> %div) {
-; CHECK: idivb
-; CHECK: idivb
-; CHECK: idivb
-; CHECK-NOT: idivb
-; CHECK: ret
- %div.r = sdiv <3 x i8> %num, %div
- ret <3 x i8> %div.r
-}
-
-define <3 x i8> @test_uchar_div(<3 x i8> %num, <3 x i8> %div) {
-; CHECK: divb
-; CHECK: divb
-; CHECK: divb
-; CHECK-NOT: divb
-; CHECK: ret
- %div.r = udiv <3 x i8> %num, %div
- ret <3 x i8> %div.r
-}
-
-define <5 x i16> @test_short_div(<5 x i16> %num, <5 x i16> %div) {
-; CHECK: idivw
-; CHECK: idivw
-; CHECK: idivw
-; CHECK: idivw
-; CHECK: idivw
-; CHECK-NOT: idivw
-; CHECK: ret
- %div.r = sdiv <5 x i16> %num, %div
- ret <5 x i16> %div.r
-}
-
-define <4 x i16> @test_ushort_div(<4 x i16> %num, <4 x i16> %div) {
-; CHECK: divw
-; CHECK: divw
-; CHECK: divw
-; CHECK: divw
-; CHECK-NOT: divw
-; CHECK: ret
- %div.r = udiv <4 x i16> %num, %div
- ret <4 x i16> %div.r
-}
-
-define <3 x i32> @test_uint_div(<3 x i32> %num, <3 x i32> %div) {
-; CHECK: divl
-; CHECK: divl
-; CHECK: divl
-; CHECK-NOT: divl
-; CHECK: ret
- %div.r = udiv <3 x i32> %num, %div
- ret <3 x i32> %div.r
-}
-
-define <3 x i64> @test_long_div(<3 x i64> %num, <3 x i64> %div) {
-; CHECK: idivq
-; CHECK: idivq
-; CHECK: idivq
-; CHECK-NOT: idivq
-; CHECK: ret
- %div.r = sdiv <3 x i64> %num, %div
- ret <3 x i64> %div.r
-}
-
-define <3 x i64> @test_ulong_div(<3 x i64> %num, <3 x i64> %div) {
-; CHECK: divq
-; CHECK: divq
-; CHECK: divq
-; CHECK-NOT: divq
-; CHECK: ret
- %div.r = udiv <3 x i64> %num, %div
- ret <3 x i64> %div.r
-}
-
-
-define <4 x i8> @test_char_rem(<4 x i8> %num, <4 x i8> %rem) {
-; CHECK: idivb
-; CHECK: idivb
-; CHECK: idivb
-; CHECK: idivb
-; CHECK-NOT: idivb
-; CHECK: ret
- %rem.r = srem <4 x i8> %num, %rem
- ret <4 x i8> %rem.r
-}
-
-define <5 x i16> @test_short_rem(<5 x i16> %num, <5 x i16> %rem) {
-; CHECK: idivw
-; CHECK: idivw
-; CHECK: idivw
-; CHECK: idivw
-; CHECK: idivw
-; CHECK-NOT: idivw
-; CHECK: ret
- %rem.r = srem <5 x i16> %num, %rem
- ret <5 x i16> %rem.r
-}
-
-define <4 x i32> @test_uint_rem(<4 x i32> %num, <4 x i32> %rem) {
-; CHECK: idivl
-; CHECK: idivl
-; CHECK: idivl
-; CHECK: idivl
-; CHECK-NOT: idivl
-; CHECK: ret
- %rem.r = srem <4 x i32> %num, %rem
- ret <4 x i32> %rem.r
-}
-
-
-define <5 x i64> @test_ulong_rem(<5 x i64> %num, <5 x i64> %rem) {
-; CHECK: divq
-; CHECK: divq
-; CHECK: divq
-; CHECK: divq
-; CHECK: divq
-; CHECK-NOT: divq
-; CHECK: ret
- %rem.r = urem <5 x i64> %num, %rem
- ret <5 x i64> %rem.r
-}
-
-define void @test_int_div(<3 x i32>* %dest, <3 x i32>* %old, i32 %n) {
-; CHECK: idivl
-; CHECK: idivl
-; CHECK: idivl
-; CHECK-NOT: idivl
-; CHECK: ret
-entry:
- %cmp13 = icmp sgt i32 %n, 0
- br i1 %cmp13, label %bb.nph, label %for.end
-
-bb.nph:
- br label %for.body
-
-for.body:
- %i.014 = phi i32 [ 0, %bb.nph ], [ %inc, %for.body ]
- %arrayidx11 = getelementptr <3 x i32>* %dest, i32 %i.014
- %tmp4 = load <3 x i32>* %arrayidx11 ; <<3 x i32>> [#uses=1]
- %arrayidx7 = getelementptr inbounds <3 x i32>* %old, i32 %i.014
- %tmp8 = load <3 x i32>* %arrayidx7 ; <<3 x i32>> [#uses=1]
- %div = sdiv <3 x i32> %tmp4, %tmp8
- store <3 x i32> %div, <3 x i32>* %arrayidx11
- %inc = add nsw i32 %i.014, 1
- %exitcond = icmp eq i32 %inc, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/scalarize-bitcast.ll b/libclamav/c++/llvm/test/CodeGen/X86/scalarize-bitcast.ll
deleted file mode 100644
index f6b29ec..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/scalarize-bitcast.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; PR3886
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "x86_64-pc-linux-gnu"
-
-define void @mmxCombineMaskU(i32* nocapture %src, i32* nocapture %mask) nounwind {
-entry:
- %tmp1 = load i32* %src ; <i32> [#uses=1]
- %0 = insertelement <2 x i32> undef, i32 %tmp1, i32 0 ; <<2 x i32>> [#uses=1]
- %1 = insertelement <2 x i32> %0, i32 0, i32 1 ; <<2 x i32>> [#uses=1]
- %conv.i.i = bitcast <2 x i32> %1 to <1 x i64> ; <<1 x i64>> [#uses=1]
- %tmp2.i.i = extractelement <1 x i64> %conv.i.i, i32 0 ; <i64> [#uses=1]
- %tmp22.i = bitcast i64 %tmp2.i.i to <1 x i64> ; <<1 x i64>> [#uses=1]
- %tmp15.i = extractelement <1 x i64> %tmp22.i, i32 0 ; <i64> [#uses=1]
- %conv.i26.i = bitcast i64 %tmp15.i to <8 x i8> ; <<8 x i8>> [#uses=1]
- %shuffle.i.i = shufflevector <8 x i8> %conv.i26.i, <8 x i8> <i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef>, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11> ; <<8 x i8>> [#uses=1]
- %conv6.i.i = bitcast <8 x i8> %shuffle.i.i to <1 x i64> ; <<1 x i64>> [#uses=1]
- %tmp12.i.i = extractelement <1 x i64> %conv6.i.i, i32 0 ; <i64> [#uses=1]
- %tmp10.i = bitcast i64 %tmp12.i.i to <1 x i64> ; <<1 x i64>> [#uses=1]
- %tmp24.i = extractelement <1 x i64> %tmp10.i, i32 0 ; <i64> [#uses=1]
- %tmp10 = bitcast i64 %tmp24.i to <1 x i64> ; <<1 x i64>> [#uses=1]
- %tmp7 = extractelement <1 x i64> %tmp10, i32 0 ; <i64> [#uses=1]
- %call6 = tail call i32 (...)* @store8888(i64 %tmp7) ; <i32> [#uses=1]
- store i32 %call6, i32* %src
- ret void
-}
-
-declare i32 @store8888(...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/scev-interchange.ll b/libclamav/c++/llvm/test/CodeGen/X86/scev-interchange.ll
deleted file mode 100644
index 81c919f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/scev-interchange.ll
+++ /dev/null
@@ -1,338 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
- %"struct.DataOutBase::GmvFlags" = type { i32 }
- %"struct.FE_DGPNonparametric<3>" = type { [1156 x i8], i32, %"struct.PolynomialSpace<1>" }
- %"struct.FiniteElementData<1>" = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
- %struct.Line = type { [2 x i32] }
- %"struct.PolynomialSpace<1>" = type { %"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >", i32, %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >" }
- %"struct.Polynomials::Polynomial<double>" = type { %struct.Subscriptor, %"struct.std::vector<double,std::allocator<double> >" }
- %struct.Subscriptor = type { i32 (...)**, i32, %"struct.std::type_info"* }
- %"struct.TableBase<2,double>" = type { %struct.Subscriptor, double*, i32, %"struct.TableIndices<2>" }
- %"struct.TableIndices<2>" = type { %struct.Line }
- %"struct.std::_Bit_const_iterator" = type { %"struct.std::_Bit_iterator_base" }
- %"struct.std::_Bit_iterator_base" = type { i64*, i32 }
- %"struct.std::_Bvector_base<std::allocator<bool> >" = type { %"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" }
- %"struct.std::_Bvector_base<std::allocator<bool> >::_Bvector_impl" = type { %"struct.std::_Bit_const_iterator", %"struct.std::_Bit_const_iterator", i64* }
- %"struct.std::_Vector_base<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >" = type { %"struct.std::_Vector_base<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >::_Vector_impl" }
- %"struct.std::_Vector_base<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >::_Vector_impl" = type { %"struct.Polynomials::Polynomial<double>"*, %"struct.Polynomials::Polynomial<double>"*, %"struct.Polynomials::Polynomial<double>"* }
- %"struct.std::_Vector_base<double,std::allocator<double> >" = type { %"struct.std::_Vector_base<double,std::allocator<double> >::_Vector_impl" }
- %"struct.std::_Vector_base<double,std::allocator<double> >::_Vector_impl" = type { double*, double*, double* }
- %"struct.std::_Vector_base<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" }
- %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" = type { i32*, i32*, i32* }
- %"struct.std::_Vector_base<std::vector<bool, std::allocator<bool> >,std::allocator<std::vector<bool, std::allocator<bool> > > >" = type { %"struct.std::_Vector_base<std::vector<bool, std::allocator<bool> >,std::allocator<std::vector<bool, std::allocator<bool> > > >::_Vector_impl" }
- %"struct.std::_Vector_base<std::vector<bool, std::allocator<bool> >,std::allocator<std::vector<bool, std::allocator<bool> > > >::_Vector_impl" = type { %"struct.std::vector<bool,std::allocator<bool> >"*, %"struct.std::vector<bool,std::allocator<bool> >"*, %"struct.std::vector<bool,std::allocator<bool> >"* }
- %"struct.std::type_info" = type { i32 (...)**, i8* }
- %"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >" = type { %"struct.std::_Vector_base<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >" }
- %"struct.std::vector<bool,std::allocator<bool> >" = type { %"struct.std::_Bvector_base<std::allocator<bool> >" }
- %"struct.std::vector<double,std::allocator<double> >" = type { %"struct.std::_Vector_base<double,std::allocator<double> >" }
- %"struct.std::vector<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >" }
- %"struct.std::vector<std::vector<bool, std::allocator<bool> >,std::allocator<std::vector<bool, std::allocator<bool> > > >" = type { %"struct.std::_Vector_base<std::vector<bool, std::allocator<bool> >,std::allocator<std::vector<bool, std::allocator<bool> > > >" }
-
-declare void @_Unwind_Resume(i8*)
-
-declare i8* @_Znwm(i64)
-
-declare fastcc void @_ZNSt6vectorIjSaIjEEaSERKS1_(%"struct.std::vector<int,std::allocator<int> >"*, %"struct.std::vector<int,std::allocator<int> >"*)
-
-declare fastcc void @_ZN9TableBaseILi2EdE6reinitERK12TableIndicesILi2EE(%"struct.TableBase<2,double>"* nocapture, i32, i32)
-
-declare fastcc void @_ZNSt6vectorIbSaIbEEC1EmRKbRKS0_(%"struct.std::vector<bool,std::allocator<bool> >"* nocapture, i64, i8* nocapture)
-
-declare fastcc void @_ZNSt6vectorIS_IbSaIbEESaIS1_EEC2EmRKS1_RKS2_(%"struct.std::vector<std::vector<bool, std::allocator<bool> >,std::allocator<std::vector<bool, std::allocator<bool> > > >"* nocapture, i64, %"struct.std::vector<bool,std::allocator<bool> >"* nocapture)
-
-declare fastcc void @_ZNSt6vectorIN11Polynomials10PolynomialIdEESaIS2_EED1Ev(%"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >"* nocapture)
-
-declare fastcc void @_ZN24TensorProductPolynomialsILi3EEC2IN11Polynomials10PolynomialIdEEEERKSt6vectorIT_SaIS6_EE(%"struct.PolynomialSpace<1>"* nocapture, %"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >"* nocapture)
-
-declare fastcc void @_ZN7FE_PolyI24TensorProductPolynomialsILi3EELi3EEC2EjRKS1_RK17FiniteElementDataILi3EERKSt6vectorIbSaIbEERKS9_ISB_SaISB_EE(%"struct.FE_DGPNonparametric<3>"*, i32, %"struct.PolynomialSpace<1>"* nocapture, %"struct.FiniteElementData<1>"* nocapture, %"struct.std::vector<bool,std::allocator<bool> >"* nocapture, %"struct.std::vector<std::vector<bool, std::allocator<bool> >,std::allocator<std::vector<bool, std::allocator<bool> > > >"* nocapture)
-
-declare fastcc void @_ZN11FE_Q_Helper12_GLOBAL__N_116invert_numberingERKSt6vectorIjSaIjEE(%"struct.std::vector<int,std::allocator<int> >"* noalias nocapture sret, %"struct.std::vector<int,std::allocator<int> >"* nocapture)
-
-declare fastcc void @_ZN4FE_QILi3EE14get_dpo_vectorEj(%"struct.std::vector<int,std::allocator<int> >"* noalias nocapture sret, i32)
-
-define fastcc void @_ZN4FE_QILi3EEC1Ej(i32 %degree) {
-entry:
- invoke fastcc void @_ZNSt6vectorIbSaIbEEC1EmRKbRKS0_(%"struct.std::vector<bool,std::allocator<bool> >"* undef, i64 1, i8* undef)
- to label %invcont.i unwind label %lpad.i
-
-invcont.i: ; preds = %entry
- invoke fastcc void @_ZN4FE_QILi3EE14get_dpo_vectorEj(%"struct.std::vector<int,std::allocator<int> >"* noalias sret undef, i32 %degree)
- to label %invcont1.i unwind label %lpad120.i
-
-invcont1.i: ; preds = %invcont.i
- invoke fastcc void @_ZNSt6vectorIS_IbSaIbEESaIS1_EEC2EmRKS1_RKS2_(%"struct.std::vector<std::vector<bool, std::allocator<bool> >,std::allocator<std::vector<bool, std::allocator<bool> > > >"* undef, i64 undef, %"struct.std::vector<bool,std::allocator<bool> >"* undef)
- to label %invcont3.i unwind label %lpad124.i
-
-invcont3.i: ; preds = %invcont1.i
- invoke fastcc void @_ZN4FE_QILi3EE14get_dpo_vectorEj(%"struct.std::vector<int,std::allocator<int> >"* noalias sret undef, i32 %degree)
- to label %invcont4.i unwind label %lpad128.i
-
-invcont4.i: ; preds = %invcont3.i
- invoke fastcc void @_ZNSt6vectorIbSaIbEEC1EmRKbRKS0_(%"struct.std::vector<bool,std::allocator<bool> >"* undef, i64 undef, i8* undef)
- to label %invcont6.i unwind label %lpad132.i
-
-invcont6.i: ; preds = %invcont4.i
- invoke fastcc void @_ZN4FE_QILi3EE14get_dpo_vectorEj(%"struct.std::vector<int,std::allocator<int> >"* noalias sret undef, i32 %degree)
- to label %invcont7.i unwind label %lpad136.i
-
-invcont7.i: ; preds = %invcont6.i
- invoke fastcc void @_ZN11Polynomials19LagrangeEquidistant23generate_complete_basisEj(%"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >"* noalias sret undef, i32 %degree)
- to label %invcont9.i unwind label %lpad140.i
-
-invcont9.i: ; preds = %invcont7.i
- invoke fastcc void @_ZN24TensorProductPolynomialsILi3EEC2IN11Polynomials10PolynomialIdEEEERKSt6vectorIT_SaIS6_EE(%"struct.PolynomialSpace<1>"* undef, %"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >"* undef)
- to label %invcont10.i unwind label %lpad144.i
-
-invcont10.i: ; preds = %invcont9.i
- invoke fastcc void @_ZN7FE_PolyI24TensorProductPolynomialsILi3EELi3EEC2EjRKS1_RK17FiniteElementDataILi3EERKSt6vectorIbSaIbEERKS9_ISB_SaISB_EE(%"struct.FE_DGPNonparametric<3>"* undef, i32 %degree, %"struct.PolynomialSpace<1>"* undef, %"struct.FiniteElementData<1>"* undef, %"struct.std::vector<bool,std::allocator<bool> >"* undef, %"struct.std::vector<std::vector<bool, std::allocator<bool> >,std::allocator<std::vector<bool, std::allocator<bool> > > >"* undef)
- to label %bb14.i unwind label %lpad148.i
-
-bb14.i: ; preds = %invcont10.i
- br i1 false, label %bb3.i164.i, label %bb.i.i.i.i160.i
-
-bb.i.i.i.i160.i: ; preds = %bb14.i
- unreachable
-
-bb3.i164.i: ; preds = %bb14.i
- br i1 undef, label %bb10.i168.i, label %bb.i.i.i20.i166.i
-
-bb.i.i.i20.i166.i: ; preds = %bb3.i164.i
- unreachable
-
-bb10.i168.i: ; preds = %bb3.i164.i
- invoke fastcc void @_ZNSt6vectorIN11Polynomials10PolynomialIdEESaIS2_EED1Ev(%"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >"* undef)
- to label %bb21.i unwind label %lpad144.i
-
-bb21.i: ; preds = %bb10.i168.i
- invoke fastcc void @_ZNSt6vectorIN11Polynomials10PolynomialIdEESaIS2_EED1Ev(%"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >"* undef)
- to label %bb28.i unwind label %lpad140.i
-
-bb28.i: ; preds = %bb21.i
- br i1 undef, label %bb35.i, label %bb.i.i.i175.i
-
-bb.i.i.i175.i: ; preds = %bb28.i
- br label %bb35.i
-
-bb35.i: ; preds = %bb.i.i.i175.i, %bb28.i
- br i1 undef, label %bb42.i, label %bb.i.i.i205.i
-
-bb.i.i.i205.i: ; preds = %bb35.i
- unreachable
-
-bb42.i: ; preds = %bb35.i
- br i1 undef, label %bb47.i, label %bb.i.i.i213.i
-
-bb.i.i.i213.i: ; preds = %bb42.i
- unreachable
-
-bb47.i: ; preds = %bb42.i
- br i1 undef, label %bb59.i, label %bb.i.i.i247.i
-
-bb.i.i.i247.i: ; preds = %bb47.i
- unreachable
-
-bb59.i: ; preds = %bb47.i
- br i1 undef, label %bb66.i, label %bb.i.i.i255.i
-
-bb.i.i.i255.i: ; preds = %bb59.i
- unreachable
-
-bb66.i: ; preds = %bb59.i
- br i1 undef, label %bb71.i, label %bb.i.i.i262.i
-
-bb.i.i.i262.i: ; preds = %bb66.i
- br label %bb71.i
-
-bb71.i: ; preds = %bb.i.i.i262.i, %bb66.i
- %tmp11.i.i29.i.i.i.i.i.i = invoke i8* @_Znwm(i64 12)
- to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i.i unwind label %lpad.i.i.i.i.i.i ; <i8*> [#uses=0]
-
-lpad.i.i.i.i.i.i: ; preds = %bb71.i
- unreachable
-
-_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i.i: ; preds = %bb71.i
- br i1 undef, label %_ZNSt6vectorIjSaIjEED1Ev.exit.i.i, label %bb.i.i.i.i94.i
-
-bb.i.i.i.i94.i: ; preds = %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i.i
- unreachable
-
-_ZNSt6vectorIjSaIjEED1Ev.exit.i.i: ; preds = %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i.i
- %tmp11.i.i29.i.i.i.i5.i.i = invoke i8* @_Znwm(i64 undef)
- to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i12.i.i unwind label %lpad.i.i.i.i8.i.i ; <i8*> [#uses=0]
-
-lpad.i.i.i.i8.i.i: ; preds = %_ZNSt6vectorIjSaIjEED1Ev.exit.i.i
- invoke void @_Unwind_Resume(i8* undef)
- to label %.noexc.i9.i.i unwind label %lpad.i19.i.i
-
-.noexc.i9.i.i: ; preds = %lpad.i.i.i.i8.i.i
- unreachable
-
-_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i12.i.i: ; preds = %_ZNSt6vectorIjSaIjEED1Ev.exit.i.i
- br i1 undef, label %bb50.i.i.i, label %bb.i.i.i.i.i.i.i.i.i.i
-
-bb.i.i.i.i.i.i.i.i.i.i: ; preds = %bb.i.i.i.i.i.i.i.i.i.i, %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i12.i.i
- br i1 undef, label %bb50.i.i.i, label %bb.i.i.i.i.i.i.i.i.i.i
-
-bb50.i.i.i: ; preds = %bb.i.i.i.i.i.i.i.i.i.i, %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i12.i.i
- invoke fastcc void @_ZN11FE_Q_Helper12_GLOBAL__N_116invert_numberingERKSt6vectorIjSaIjEE(%"struct.std::vector<int,std::allocator<int> >"* noalias sret undef, %"struct.std::vector<int,std::allocator<int> >"* undef)
- to label %bb83.i unwind label %lpad188.i
-
-lpad.i19.i.i: ; preds = %lpad.i.i.i.i8.i.i
- unreachable
-
-bb83.i: ; preds = %bb50.i.i.i
- br i1 undef, label %invcont84.i, label %bb.i.i.i221.i
-
-bb.i.i.i221.i: ; preds = %bb83.i
- unreachable
-
-invcont84.i: ; preds = %bb83.i
- %tmp11.i.i29.i.i.i.i.i = invoke i8* @_Znwm(i64 undef)
- to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i unwind label %lpad.i.i.i.i315.i ; <i8*> [#uses=0]
-
-lpad.i.i.i.i315.i: ; preds = %invcont84.i
- invoke void @_Unwind_Resume(i8* undef)
- to label %.noexc.i316.i unwind label %lpad.i352.i
-
-.noexc.i316.i: ; preds = %lpad.i.i.i.i315.i
- unreachable
-
-_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i: ; preds = %invcont84.i
- br i1 undef, label %bb50.i.i, label %bb.i.i.i.i.i.i.i.i320.i
-
-bb.i.i.i.i.i.i.i.i320.i: ; preds = %bb.i.i.i.i.i.i.i.i320.i, %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i
- br i1 undef, label %bb50.i.i, label %bb.i.i.i.i.i.i.i.i320.i
-
-bb50.i.i: ; preds = %bb.i.i.i.i.i.i.i.i320.i, %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i
- invoke fastcc void @_ZN11FE_Q_Helper12_GLOBAL__N_116invert_numberingERKSt6vectorIjSaIjEE(%"struct.std::vector<int,std::allocator<int> >"* noalias sret undef, %"struct.std::vector<int,std::allocator<int> >"* undef)
- to label %invcont86.i unwind label %lpad200.i
-
-lpad.i352.i: ; preds = %lpad.i.i.i.i315.i
- unreachable
-
-invcont86.i: ; preds = %bb50.i.i
- invoke fastcc void @_ZNSt6vectorIjSaIjEEaSERKS1_(%"struct.std::vector<int,std::allocator<int> >"* undef, %"struct.std::vector<int,std::allocator<int> >"* undef)
- to label %.noexc380.i unwind label %lpad204.i
-
-.noexc380.i: ; preds = %invcont86.i
- br i1 undef, label %bb100.i, label %bb.i.i.i198.i
-
-bb.i.i.i198.i: ; preds = %.noexc380.i
- unreachable
-
-bb100.i: ; preds = %.noexc380.i
- br i1 undef, label %invcont101.i, label %bb.i.i.i190.i
-
-bb.i.i.i190.i: ; preds = %bb100.i
- unreachable
-
-invcont101.i: ; preds = %bb100.i
- invoke fastcc void @_ZN9TableBaseILi2EdE6reinitERK12TableIndicesILi2EE(%"struct.TableBase<2,double>"* undef, i32 undef, i32 undef)
- to label %_ZN10FullMatrixIdEC1Ejj.exit.i.i unwind label %lpad.i.i.i.i.i
-
-lpad.i.i.i.i.i: ; preds = %invcont101.i
- unreachable
-
-_ZN10FullMatrixIdEC1Ejj.exit.i.i: ; preds = %invcont101.i
- invoke fastcc void @_ZN9TableBaseILi2EdE6reinitERK12TableIndicesILi2EE(%"struct.TableBase<2,double>"* undef, i32 undef, i32 undef)
- to label %_ZN10FullMatrixIdEC1Ejj.exit28.i.i unwind label %lpad.i.i.i27.i.i
-
-lpad.i.i.i27.i.i: ; preds = %_ZN10FullMatrixIdEC1Ejj.exit.i.i
- invoke void @_Unwind_Resume(i8* undef)
- to label %.noexc.i.i unwind label %lpad.i.i
-
-.noexc.i.i: ; preds = %lpad.i.i.i27.i.i
- unreachable
-
-_ZN10FullMatrixIdEC1Ejj.exit28.i.i: ; preds = %_ZN10FullMatrixIdEC1Ejj.exit.i.i
- br i1 undef, label %bb58.i.i, label %bb.i.i.i304.i.i
-
-bb.i.i.i304.i.i: ; preds = %_ZN10FullMatrixIdEC1Ejj.exit28.i.i
- unreachable
-
-bb58.i.i: ; preds = %_ZN10FullMatrixIdEC1Ejj.exit28.i.i
- br i1 false, label %bb.i191.i, label %bb.i.i.i297.i.i
-
-bb.i.i.i297.i.i: ; preds = %bb58.i.i
- unreachable
-
-lpad.i.i: ; preds = %lpad.i.i.i27.i.i
- unreachable
-
-bb.i191.i: ; preds = %.noexc232.i, %bb58.i.i
- invoke fastcc void @_ZN9TableBaseILi2EdE6reinitERK12TableIndicesILi2EE(%"struct.TableBase<2,double>"* undef, i32 undef, i32 undef)
- to label %.noexc232.i unwind label %lpad196.i
-
-.noexc232.i: ; preds = %bb.i191.i
- br i1 undef, label %bb29.loopexit.i.i, label %bb.i191.i
-
-bb7.i215.i: ; preds = %bb9.i216.i
- br i1 undef, label %bb16.preheader.i.i, label %bb8.i.i
-
-bb8.i.i: ; preds = %bb7.i215.i
- %tmp60.i.i = add i32 %0, 1 ; <i32> [#uses=1]
- br label %bb9.i216.i
-
-bb9.i216.i: ; preds = %bb29.loopexit.i.i, %bb8.i.i
- %0 = phi i32 [ 0, %bb29.loopexit.i.i ], [ %tmp60.i.i, %bb8.i.i ] ; <i32> [#uses=2]
- br i1 undef, label %bb7.i215.i, label %bb16.preheader.i.i
-
-bb15.i.i: ; preds = %bb16.preheader.i.i, %bb15.i.i
- %j1.0212.i.i = phi i32 [ %1, %bb15.i.i ], [ 0, %bb16.preheader.i.i ] ; <i32> [#uses=2]
- %tmp6.i.i195.i.i = load i32* undef, align 4 ; <i32> [#uses=1]
- %tmp231.i.i = mul i32 %0, %tmp6.i.i195.i.i ; <i32> [#uses=1]
- %tmp13.i197.i.i = add i32 %j1.0212.i.i, %tmp231.i.i ; <i32> [#uses=0]
- %1 = add i32 %j1.0212.i.i, 1 ; <i32> [#uses=1]
- br i1 undef, label %bb15.i.i, label %bb17.i.i
-
-bb17.i.i: ; preds = %bb16.preheader.i.i, %bb15.i.i
- br label %bb16.preheader.i.i
-
-bb16.preheader.i.i: ; preds = %bb17.i.i, %bb9.i216.i, %bb7.i215.i
- br i1 undef, label %bb17.i.i, label %bb15.i.i
-
-bb29.loopexit.i.i: ; preds = %.noexc232.i
- br label %bb9.i216.i
-
-lpad.i: ; preds = %entry
- unreachable
-
-lpad120.i: ; preds = %invcont.i
- unreachable
-
-lpad124.i: ; preds = %invcont1.i
- unreachable
-
-lpad128.i: ; preds = %invcont3.i
- unreachable
-
-lpad132.i: ; preds = %invcont4.i
- unreachable
-
-lpad136.i: ; preds = %invcont6.i
- unreachable
-
-lpad140.i: ; preds = %bb21.i, %invcont7.i
- unreachable
-
-lpad144.i: ; preds = %bb10.i168.i, %invcont9.i
- unreachable
-
-lpad148.i: ; preds = %invcont10.i
- unreachable
-
-lpad188.i: ; preds = %bb50.i.i.i
- unreachable
-
-lpad196.i: ; preds = %bb.i191.i
- unreachable
-
-lpad200.i: ; preds = %bb50.i.i
- unreachable
-
-lpad204.i: ; preds = %invcont86.i
- unreachable
-}
-
-declare fastcc void @_ZN11Polynomials19LagrangeEquidistant23generate_complete_basisEj(%"struct.std::vector<Polynomials::Polynomial<double>,std::allocator<Polynomials::Polynomial<double> > >"* noalias nocapture sret, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/select-aggregate.ll b/libclamav/c++/llvm/test/CodeGen/X86/select-aggregate.ll
deleted file mode 100644
index 44cafe2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/select-aggregate.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-; PR5757
-
-; CHECK: cmovneq %rdi, %rsi
-; CHECK: movl (%rsi), %eax
-
-%0 = type { i64, i32 }
-
-define i32 @foo(%0* %p, %0* %q, i1 %r) nounwind {
- %t0 = load %0* %p
- %t1 = load %0* %q
- %t4 = select i1 %r, %0 %t0, %0 %t1
- %t5 = extractvalue %0 %t4, 1
- ret i32 %t5
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/select-zero-one.ll b/libclamav/c++/llvm/test/CodeGen/X86/select-zero-one.ll
deleted file mode 100644
index c38a020..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/select-zero-one.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep cmov
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep xor
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movzbl | count 1
-
- at r1 = weak global i32 0
-
-define void @t1(i32 %a, double %b) {
- %tmp114 = fcmp ugt double %b, 1.000000e-09
- %tmp120 = icmp eq i32 %a, 0 ; <i1> [#uses=1]
- %bothcond = or i1 %tmp114, %tmp120 ; <i1> [#uses=1]
- %storemerge = select i1 %bothcond, i32 0, i32 1 ; <i32> [#uses=2]
- store i32 %storemerge, i32* @r1, align 4
- ret void
-}
-
- at r2 = weak global i8 0
-
-define void @t2(i32 %a, double %b) {
- %tmp114 = fcmp ugt double %b, 1.000000e-09
- %tmp120 = icmp eq i32 %a, 0 ; <i1> [#uses=1]
- %bothcond = or i1 %tmp114, %tmp120 ; <i1> [#uses=1]
- %storemerge = select i1 %bothcond, i8 0, i8 1 ; <i32> [#uses=2]
- store i8 %storemerge, i8* @r2, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/select.ll b/libclamav/c++/llvm/test/CodeGen/X86/select.ll
deleted file mode 100644
index 95ed9e9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/select.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=pentium
-; RUN: llc < %s -march=x86 -mcpu=yonah
-; RUN: llc < %s -march=x86 -mcpu=yonah | not grep set
-
-define i1 @boolSel(i1 %A, i1 %B, i1 %C) nounwind {
- %X = select i1 %A, i1 %B, i1 %C ; <i1> [#uses=1]
- ret i1 %X
-}
-
-define i8 @byteSel(i1 %A, i8 %B, i8 %C) nounwind {
- %X = select i1 %A, i8 %B, i8 %C ; <i8> [#uses=1]
- ret i8 %X
-}
-
-define i16 @shortSel(i1 %A, i16 %B, i16 %C) nounwind {
- %X = select i1 %A, i16 %B, i16 %C ; <i16> [#uses=1]
- ret i16 %X
-}
-
-define i32 @intSel(i1 %A, i32 %B, i32 %C) nounwind {
- %X = select i1 %A, i32 %B, i32 %C ; <i32> [#uses=1]
- ret i32 %X
-}
-
-define i64 @longSel(i1 %A, i64 %B, i64 %C) nounwind {
- %X = select i1 %A, i64 %B, i64 %C ; <i64> [#uses=1]
- ret i64 %X
-}
-
-define double @doubleSel(i1 %A, double %B, double %C) nounwind {
- %X = select i1 %A, double %B, double %C ; <double> [#uses=1]
- ret double %X
-}
-
-define i8 @foldSel(i1 %A, i8 %B, i8 %C) nounwind {
- %Cond = icmp slt i8 %B, %C ; <i1> [#uses=1]
- %X = select i1 %Cond, i8 %B, i8 %C ; <i8> [#uses=1]
- ret i8 %X
-}
-
-define i32 @foldSel2(i1 %A, i32 %B, i32 %C) nounwind {
- %Cond = icmp eq i32 %B, %C ; <i1> [#uses=1]
- %X = select i1 %Cond, i32 %B, i32 %C ; <i32> [#uses=1]
- ret i32 %X
-}
-
-define i32 @foldSel2a(i1 %A, i32 %B, i32 %C, double %X, double %Y) nounwind {
- %Cond = fcmp olt double %X, %Y ; <i1> [#uses=1]
- %X.upgrd.1 = select i1 %Cond, i32 %B, i32 %C ; <i32> [#uses=1]
- ret i32 %X.upgrd.1
-}
-
-define float @foldSel3(i1 %A, float %B, float %C, i32 %X, i32 %Y) nounwind {
- %Cond = icmp ult i32 %X, %Y ; <i1> [#uses=1]
- %X.upgrd.2 = select i1 %Cond, float %B, float %C ; <float> [#uses=1]
- ret float %X.upgrd.2
-}
-
-define float @nofoldSel4(i1 %A, float %B, float %C, i32 %X, i32 %Y) nounwind {
- %Cond = icmp slt i32 %X, %Y ; <i1> [#uses=1]
- %X.upgrd.3 = select i1 %Cond, float %B, float %C ; <float> [#uses=1]
- ret float %X.upgrd.3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/setcc.ll b/libclamav/c++/llvm/test/CodeGen/X86/setcc.ll
deleted file mode 100644
index c37e15d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/setcc.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
-; rdar://7329206
-
-; Use sbb x, x to materialize carry bit in a GPR. The value is either
-; all 1's or all 0's.
-
-define zeroext i16 @t1(i16 zeroext %x) nounwind readnone ssp {
-entry:
-; CHECK: t1:
-; CHECK: seta %al
-; CHECK: movzbl %al, %eax
-; CHECK: shll $5, %eax
- %0 = icmp ugt i16 %x, 26 ; <i1> [#uses=1]
- %iftmp.1.0 = select i1 %0, i16 32, i16 0 ; <i16> [#uses=1]
- ret i16 %iftmp.1.0
-}
-
-define zeroext i16 @t2(i16 zeroext %x) nounwind readnone ssp {
-entry:
-; CHECK: t2:
-; CHECK: sbbl %eax, %eax
-; CHECK: andl $32, %eax
- %0 = icmp ult i16 %x, 26 ; <i1> [#uses=1]
- %iftmp.0.0 = select i1 %0, i16 32, i16 0 ; <i16> [#uses=1]
- ret i16 %iftmp.0.0
-}
-
-define i64 @t3(i64 %x) nounwind readnone ssp {
-entry:
-; CHECK: t3:
-; CHECK: sbbq %rax, %rax
-; CHECK: andq $64, %rax
- %0 = icmp ult i64 %x, 18 ; <i1> [#uses=1]
- %iftmp.2.0 = select i1 %0, i64 64, i64 0 ; <i64> [#uses=1]
- ret i64 %iftmp.2.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/setoeq.ll b/libclamav/c++/llvm/test/CodeGen/X86/setoeq.ll
deleted file mode 100644
index 4a9c1ba..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/setoeq.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 | grep set | count 2
-; RUN: llc < %s -march=x86 | grep and
-
-define zeroext i8 @t(double %x) nounwind readnone {
-entry:
- %0 = fptosi double %x to i32 ; <i32> [#uses=1]
- %1 = sitofp i32 %0 to double ; <double> [#uses=1]
- %2 = fcmp oeq double %1, %x ; <i1> [#uses=1]
- %retval12 = zext i1 %2 to i8 ; <i8> [#uses=1]
- ret i8 %retval12
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/setuge.ll b/libclamav/c++/llvm/test/CodeGen/X86/setuge.ll
deleted file mode 100644
index 4ca2f18..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/setuge.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep set
-
-declare i1 @llvm.isunordered.f32(float, float)
-
-define float @cmp(float %A, float %B, float %C, float %D) nounwind {
-entry:
- %tmp.1 = fcmp uno float %A, %B ; <i1> [#uses=1]
- %tmp.2 = fcmp oge float %A, %B ; <i1> [#uses=1]
- %tmp.3 = or i1 %tmp.1, %tmp.2 ; <i1> [#uses=1]
- %tmp.4 = select i1 %tmp.3, float %C, float %D ; <float> [#uses=1]
- ret float %tmp.4
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sext-i1.ll b/libclamav/c++/llvm/test/CodeGen/X86/sext-i1.ll
deleted file mode 100644
index 21c418d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sext-i1.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=32
-; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=64
-; rdar://7573216
-; PR6146
-
-define i32 @t1(i32 %x) nounwind readnone ssp {
-entry:
-; 32: t1:
-; 32: cmpl $1
-; 32: sbbl
-
-; 64: t1:
-; 64: cmpl $1
-; 64: sbbl
- %0 = icmp eq i32 %x, 0
- %iftmp.0.0 = select i1 %0, i32 -1, i32 0
- ret i32 %iftmp.0.0
-}
-
-define i32 @t2(i32 %x) nounwind readnone ssp {
-entry:
-; 32: t2:
-; 32: cmpl $1
-; 32: sbbl
-
-; 64: t2:
-; 64: cmpl $1
-; 64: sbbl
- %0 = icmp eq i32 %x, 0
- %iftmp.0.0 = sext i1 %0 to i32
- ret i32 %iftmp.0.0
-}
-
-%struct.zbookmark = type { i64, i64 }
-%struct.zstream = type { }
-
-define i32 @t3() nounwind readonly {
-entry:
-; 32: t3:
-; 32: cmpl $1
-; 32: sbbl
-; 32: cmpl
-; 32: xorl
-
-; 64: t3:
-; 64: cmpl $1
-; 64: sbbq
-; 64: cmpq
-; 64: xorl
- %not.tobool = icmp eq i32 undef, 0 ; <i1> [#uses=2]
- %cond = sext i1 %not.tobool to i32 ; <i32> [#uses=1]
- %conv = sext i1 %not.tobool to i64 ; <i64> [#uses=1]
- %add13 = add i64 0, %conv ; <i64> [#uses=1]
- %cmp = icmp ult i64 undef, %add13 ; <i1> [#uses=1]
- br i1 %cmp, label %if.then, label %if.end
-
-if.then: ; preds = %entry
- br label %if.end
-
-if.end: ; preds = %if.then, %entry
- %xor27 = xor i32 undef, %cond ; <i32> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sext-load.ll b/libclamav/c++/llvm/test/CodeGen/X86/sext-load.ll
deleted file mode 100644
index c9b39d3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sext-load.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep movsbl
-
-define i32 @foo(i32 %X) nounwind {
-entry:
- %tmp12 = trunc i32 %X to i8 ; <i8> [#uses=1]
- %tmp123 = sext i8 %tmp12 to i32 ; <i32> [#uses=1]
- ret i32 %tmp123
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sext-ret-val.ll b/libclamav/c++/llvm/test/CodeGen/X86/sext-ret-val.ll
deleted file mode 100644
index da1a187..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sext-ret-val.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86 | grep movzbl | count 1
-; rdar://6699246
-
-define signext i8 @t1(i8* %A) nounwind readnone ssp {
-entry:
- %0 = icmp ne i8* %A, null
- %1 = zext i1 %0 to i8
- ret i8 %1
-}
-
-define i8 @t2(i8* %A) nounwind readnone ssp {
-entry:
- %0 = icmp ne i8* %A, null
- %1 = zext i1 %0 to i8
- ret i8 %1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sext-select.ll b/libclamav/c++/llvm/test/CodeGen/X86/sext-select.ll
deleted file mode 100644
index 4aca040..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sext-select.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 | grep movsw
-; PR2139
-
-declare void @abort()
-
-define i32 @main() {
-entry:
- %tmp73 = tail call i1 @return_false() ; <i8> [#uses=1]
- %g.0 = select i1 %tmp73, i16 0, i16 -480 ; <i16> [#uses=2]
- %tmp7778 = sext i16 %g.0 to i32 ; <i32> [#uses=1]
- %tmp80 = shl i32 %tmp7778, 3 ; <i32> [#uses=2]
- %tmp87 = icmp sgt i32 %tmp80, 32767 ; <i1> [#uses=1]
- br i1 %tmp87, label %bb90, label %bb91
-bb90: ; preds = %bb84, %bb72
- tail call void @abort()
- unreachable
-bb91: ; preds = %bb84
- ret i32 0
-}
-
-define i1 @return_false() {
- ret i1 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sext-subreg.ll b/libclamav/c++/llvm/test/CodeGen/X86/sext-subreg.ll
deleted file mode 100644
index b2b9f81..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sext-subreg.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-; rdar://7529457
-
-define i64 @t(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind {
-; CHECK: t:
-; CHECK: movslq %e{{.*}}, %rax
-; CHECK: movq %rax
-; CHECK: movl %eax
- %C = add i64 %A, %B
- %D = trunc i64 %C to i32
- volatile store i32 %D, i32* %P
- %E = shl i64 %C, 32
- %F = ashr i64 %E, 32
- volatile store i64 %F, i64 *%P2
- volatile store i32 %D, i32* %P
- ret i64 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sext-trunc.ll b/libclamav/c++/llvm/test/CodeGen/X86/sext-trunc.ll
deleted file mode 100644
index 2eaf425..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sext-trunc.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 > %t
-; RUN: grep movsbl %t
-; RUN: not grep movz %t
-; RUN: not grep and %t
-
-define i8 @foo(i16 signext %x) signext nounwind {
- %retval56 = trunc i16 %x to i8
- ret i8 %retval56
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sfence.ll b/libclamav/c++/llvm/test/CodeGen/X86/sfence.ll
deleted file mode 100644
index 4782879..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sfence.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep sfence
-
-declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
-
-define void @test() {
- call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 true, i1 true)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-and.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-and.ll
deleted file mode 100644
index fd278c2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-and.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 | grep and | count 1
-; RUN: llc < %s -march=x86-64 | not grep and
-
-define i32 @t1(i32 %t, i32 %val) nounwind {
- %shamt = and i32 %t, 31
- %res = shl i32 %val, %shamt
- ret i32 %res
-}
-
- at X = internal global i16 0
-
-define void @t2(i16 %t) nounwind {
- %shamt = and i16 %t, 31
- %tmp = load i16* @X
- %tmp1 = ashr i16 %tmp, %shamt
- store i16 %tmp1, i16* @X
- ret void
-}
-
-define i64 @t3(i64 %t, i64 %val) nounwind {
- %shamt = and i64 %t, 63
- %res = lshr i64 %val, %shamt
- ret i64 %res
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-coalesce.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-coalesce.ll
deleted file mode 100644
index d38f9a8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-coalesce.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep {shld.*CL}
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: not grep {mov CL, BL}
-
-; PR687
-
-define i64 @foo(i64 %x, i64* %X) {
- %tmp.1 = load i64* %X ; <i64> [#uses=1]
- %tmp.3 = trunc i64 %tmp.1 to i8 ; <i8> [#uses=1]
- %shift.upgrd.1 = zext i8 %tmp.3 to i64 ; <i64> [#uses=1]
- %tmp.4 = shl i64 %x, %shift.upgrd.1 ; <i64> [#uses=1]
- ret i64 %tmp.4
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-codegen.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-codegen.ll
deleted file mode 100644
index 4cba183..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-codegen.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -relocation-model=static -march=x86 | \
-; RUN: grep {shll \$3} | count 2
-
-; This should produce two shll instructions, not any lea's.
-
-target triple = "i686-apple-darwin8"
- at Y = weak global i32 0 ; <i32*> [#uses=1]
- at X = weak global i32 0 ; <i32*> [#uses=2]
-
-
-define void @fn1() {
-entry:
- %tmp = load i32* @Y ; <i32> [#uses=1]
- %tmp1 = shl i32 %tmp, 3 ; <i32> [#uses=1]
- %tmp2 = load i32* @X ; <i32> [#uses=1]
- %tmp3 = or i32 %tmp1, %tmp2 ; <i32> [#uses=1]
- store i32 %tmp3, i32* @X
- ret void
-}
-
-define i32 @fn2(i32 %X, i32 %Y) {
-entry:
- %tmp2 = shl i32 %Y, 3 ; <i32> [#uses=1]
- %tmp4 = or i32 %tmp2, %X ; <i32> [#uses=1]
- ret i32 %tmp4
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-combine.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-combine.ll
deleted file mode 100644
index e443ac1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-combine.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s | not grep shrl
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
- at array = weak global [4 x i32] zeroinitializer ; <[4 x i32]*> [#uses=1]
-
-define i32 @foo(i32 %x) {
-entry:
- %tmp2 = lshr i32 %x, 2 ; <i32> [#uses=1]
- %tmp3 = and i32 %tmp2, 3 ; <i32> [#uses=1]
- %tmp4 = getelementptr [4 x i32]* @array, i32 0, i32 %tmp3 ; <i32*> [#uses=1]
- %tmp5 = load i32* %tmp4, align 4 ; <i32> [#uses=1]
- ret i32 %tmp5
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-double.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-double.ll
deleted file mode 100644
index 5adee7c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-double.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep {sh\[lr\]d} | count 5
-
-define i64 @test1(i64 %X, i8 %C) {
- %shift.upgrd.1 = zext i8 %C to i64 ; <i64> [#uses=1]
- %Y = shl i64 %X, %shift.upgrd.1 ; <i64> [#uses=1]
- ret i64 %Y
-}
-
-define i64 @test2(i64 %X, i8 %C) {
- %shift.upgrd.2 = zext i8 %C to i64 ; <i64> [#uses=1]
- %Y = ashr i64 %X, %shift.upgrd.2 ; <i64> [#uses=1]
- ret i64 %Y
-}
-
-define i64 @test3(i64 %X, i8 %C) {
- %shift.upgrd.3 = zext i8 %C to i64 ; <i64> [#uses=1]
- %Y = lshr i64 %X, %shift.upgrd.3 ; <i64> [#uses=1]
- ret i64 %Y
-}
-
-define i32 @test4(i32 %A, i32 %B, i8 %C) {
- %shift.upgrd.4 = zext i8 %C to i32 ; <i32> [#uses=1]
- %X = shl i32 %A, %shift.upgrd.4 ; <i32> [#uses=1]
- %Cv = sub i8 32, %C ; <i8> [#uses=1]
- %shift.upgrd.5 = zext i8 %Cv to i32 ; <i32> [#uses=1]
- %Y = lshr i32 %B, %shift.upgrd.5 ; <i32> [#uses=1]
- %Z = or i32 %Y, %X ; <i32> [#uses=1]
- ret i32 %Z
-}
-
-define i16 @test5(i16 %A, i16 %B, i8 %C) {
- %shift.upgrd.6 = zext i8 %C to i16 ; <i16> [#uses=1]
- %X = shl i16 %A, %shift.upgrd.6 ; <i16> [#uses=1]
- %Cv = sub i8 16, %C ; <i8> [#uses=1]
- %shift.upgrd.7 = zext i8 %Cv to i16 ; <i16> [#uses=1]
- %Y = lshr i16 %B, %shift.upgrd.7 ; <i16> [#uses=1]
- %Z = or i16 %Y, %X ; <i16> [#uses=1]
- ret i16 %Z
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-folding.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-folding.ll
deleted file mode 100644
index 872817f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-folding.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 | \
-; RUN: grep {s\[ah\]\[rl\]l} | count 1
-
-define i32* @test1(i32* %P, i32 %X) {
- %Y = lshr i32 %X, 2 ; <i32> [#uses=1]
- %gep.upgrd.1 = zext i32 %Y to i64 ; <i64> [#uses=1]
- %P2 = getelementptr i32* %P, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- ret i32* %P2
-}
-
-define i32* @test2(i32* %P, i32 %X) {
- %Y = shl i32 %X, 2 ; <i32> [#uses=1]
- %gep.upgrd.2 = zext i32 %Y to i64 ; <i64> [#uses=1]
- %P2 = getelementptr i32* %P, i64 %gep.upgrd.2 ; <i32*> [#uses=1]
- ret i32* %P2
-}
-
-define i32* @test3(i32* %P, i32 %X) {
- %Y = ashr i32 %X, 2 ; <i32> [#uses=1]
- %P2 = getelementptr i32* %P, i32 %Y ; <i32*> [#uses=1]
- ret i32* %P2
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-i128.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-i128.ll
deleted file mode 100644
index c4d15ae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-i128.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86
-; RUN: llc < %s -march=x86-64
-
-define void @t(i128 %x, i128 %a, i128* nocapture %r) nounwind {
-entry:
- %0 = lshr i128 %x, %a
- store i128 %0, i128* %r, align 16
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-i256.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-i256.ll
deleted file mode 100644
index d5f65a6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-i256.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86
-; RUN: llc < %s -march=x86-64
-
-define void @t(i256 %x, i256 %a, i256* nocapture %r) nounwind readnone {
-entry:
- %0 = ashr i256 %x, %a
- store i256 %0, i256* %r
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-one.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-one.ll
deleted file mode 100644
index 0f80f90..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-one.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep leal
-
- at x = external global i32 ; <i32*> [#uses=1]
-
-define i32 @test() {
- %tmp.0 = load i32* @x ; <i32> [#uses=1]
- %tmp.1 = shl i32 %tmp.0, 1 ; <i32> [#uses=1]
- ret i32 %tmp.1
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shift-parts.ll b/libclamav/c++/llvm/test/CodeGen/X86/shift-parts.ll
deleted file mode 100644
index ce4f538..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shift-parts.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep shrdq
-; PR4736
-
-%0 = type { i32, i8, [35 x i8] }
-
- at g_144 = external global %0, align 8 ; <%0*> [#uses=1]
-
-define i32 @int87(i32 %uint64p_8) nounwind {
-entry:
- %srcval4 = load i320* bitcast (%0* @g_144 to i320*), align 8 ; <i320> [#uses=1]
- br label %for.cond
-
-for.cond: ; preds = %for.cond, %entry
- %call3.in.in.in.v = select i1 undef, i320 192, i320 128 ; <i320> [#uses=1]
- %call3.in.in.in = lshr i320 %srcval4, %call3.in.in.in.v ; <i320> [#uses=1]
- %call3.in = trunc i320 %call3.in.in.in to i32 ; <i32> [#uses=1]
- %tobool = icmp eq i32 %call3.in, 0 ; <i1> [#uses=1]
- br i1 %tobool, label %for.cond, label %if.then
-
-if.then: ; preds = %for.cond
- ret i32 1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shl_elim.ll b/libclamav/c++/llvm/test/CodeGen/X86/shl_elim.ll
deleted file mode 100644
index 4458891..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shl_elim.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {movl 8(.esp), %eax}
-; RUN: llc < %s -march=x86 | grep {shrl .eax}
-; RUN: llc < %s -march=x86 | grep {movswl .ax, .eax}
-
-define i32 @test1(i64 %a) {
- %tmp29 = lshr i64 %a, 24 ; <i64> [#uses=1]
- %tmp23 = trunc i64 %tmp29 to i32 ; <i32> [#uses=1]
- %tmp410 = lshr i32 %tmp23, 9 ; <i32> [#uses=1]
- %tmp45 = trunc i32 %tmp410 to i16 ; <i16> [#uses=1]
- %tmp456 = sext i16 %tmp45 to i32 ; <i32> [#uses=1]
- ret i32 %tmp456
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shrink-fp-const1.ll b/libclamav/c++/llvm/test/CodeGen/X86/shrink-fp-const1.ll
deleted file mode 100644
index 49b9fa3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shrink-fp-const1.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 | not grep cvtss2sd
-; PR1264
-
-define double @foo(double %x) {
- %y = fmul double %x, 5.000000e-01
- ret double %y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/shrink-fp-const2.ll b/libclamav/c++/llvm/test/CodeGen/X86/shrink-fp-const2.ll
deleted file mode 100644
index 3d5203b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/shrink-fp-const2.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 | grep flds
-; This should be a flds, not fldt.
-define x86_fp80 @test2() nounwind {
-entry:
- ret x86_fp80 0xK3FFFC000000000000000
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sibcall.ll b/libclamav/c++/llvm/test/CodeGen/X86/sibcall.ll
deleted file mode 100644
index 7278e3d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sibcall.ll
+++ /dev/null
@@ -1,232 +0,0 @@
-; RUN: llc < %s -march=x86 -asm-verbose=false | FileCheck %s -check-prefix=32
-; RUN: llc < %s -march=x86-64 -asm-verbose=false | FileCheck %s -check-prefix=64
-; XFAIL: i686-apple-darwin8
-
-define void @t1(i32 %x) nounwind ssp {
-entry:
-; 32: t1:
-; 32: jmp {{_?}}foo
-
-; 64: t1:
-; 64: jmp {{_?}}foo
- tail call void @foo() nounwind
- ret void
-}
-
-declare void @foo()
-
-define void @t2() nounwind ssp {
-entry:
-; 32: t2:
-; 32: jmp {{_?}}foo2
-
-; 64: t2:
-; 64: jmp {{_?}}foo2
- %0 = tail call i32 @foo2() nounwind
- ret void
-}
-
-declare i32 @foo2()
-
-define void @t3() nounwind ssp {
-entry:
-; 32: t3:
-; 32: jmp {{_?}}foo3
-
-; 64: t3:
-; 64: jmp {{_?}}foo3
- %0 = tail call i32 @foo3() nounwind
- ret void
-}
-
-declare i32 @foo3()
-
-define void @t4(void (i32)* nocapture %x) nounwind ssp {
-entry:
-; 32: t4:
-; 32: call *
-; FIXME: gcc can generate a tailcall for this. But it's tricky.
-
-; 64: t4:
-; 64-NOT: call
-; 64: jmpq *
- tail call void %x(i32 0) nounwind
- ret void
-}
-
-define void @t5(void ()* nocapture %x) nounwind ssp {
-entry:
-; 32: t5:
-; 32-NOT: call
-; 32: jmpl *
-
-; 64: t5:
-; 64-NOT: call
-; 64: jmpq *
- tail call void %x() nounwind
- ret void
-}
-
-define i32 @t6(i32 %x) nounwind ssp {
-entry:
-; 32: t6:
-; 32: call {{_?}}t6
-; 32: jmp {{_?}}bar
-
-; 64: t6:
-; 64: jmp {{_?}}t6
-; 64: jmp {{_?}}bar
- %0 = icmp slt i32 %x, 10
- br i1 %0, label %bb, label %bb1
-
-bb:
- %1 = add nsw i32 %x, -1
- %2 = tail call i32 @t6(i32 %1) nounwind ssp
- ret i32 %2
-
-bb1:
- %3 = tail call i32 @bar(i32 %x) nounwind
- ret i32 %3
-}
-
-declare i32 @bar(i32)
-
-define i32 @t7(i32 %a, i32 %b, i32 %c) nounwind ssp {
-entry:
-; 32: t7:
-; 32: jmp {{_?}}bar2
-
-; 64: t7:
-; 64: jmp {{_?}}bar2
- %0 = tail call i32 @bar2(i32 %a, i32 %b, i32 %c) nounwind
- ret i32 %0
-}
-
-declare i32 @bar2(i32, i32, i32)
-
-define signext i16 @t8() nounwind ssp {
-entry:
-; 32: t8:
-; 32: call {{_?}}bar3
-
-; 64: t8:
-; 64: callq {{_?}}bar3
- %0 = tail call signext i16 @bar3() nounwind ; <i16> [#uses=1]
- ret i16 %0
-}
-
-declare signext i16 @bar3()
-
-define signext i16 @t9(i32 (i32)* nocapture %x) nounwind ssp {
-entry:
-; 32: t9:
-; 32: call *
-
-; 64: t9:
-; 64: callq *
- %0 = bitcast i32 (i32)* %x to i16 (i32)*
- %1 = tail call signext i16 %0(i32 0) nounwind
- ret i16 %1
-}
-
-define void @t10() nounwind ssp {
-entry:
-; 32: t10:
-; 32: call
-
-; 64: t10:
-; 64: callq
- %0 = tail call i32 @foo4() noreturn nounwind
- unreachable
-}
-
-declare i32 @foo4()
-
-define i32 @t11(i32 %x, i32 %y, i32 %z.0, i32 %z.1, i32 %z.2) nounwind ssp {
-; In 32-bit mode, it's emitting a bunch of dead loads that are not being
-; eliminated currently.
-
-; 32: t11:
-; 32-NOT: subl ${{[0-9]+}}, %esp
-; 32: jne
-; 32-NOT: movl
-; 32-NOT: addl ${{[0-9]+}}, %esp
-; 32: jmp {{_?}}foo5
-
-; 64: t11:
-; 64-NOT: subq ${{[0-9]+}}, %esp
-; 64-NOT: addq ${{[0-9]+}}, %esp
-; 64: jmp {{_?}}foo5
-entry:
- %0 = icmp eq i32 %x, 0
- br i1 %0, label %bb6, label %bb
-
-bb:
- %1 = tail call i32 @foo5(i32 %x, i32 %y, i32 %z.0, i32 %z.1, i32 %z.2) nounwind
- ret i32 %1
-
-bb6:
- ret i32 0
-}
-
-declare i32 @foo5(i32, i32, i32, i32, i32)
-
-%struct.t = type { i32, i32, i32, i32, i32 }
-
-define i32 @t12(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind ssp {
-; 32: t12:
-; 32-NOT: subl ${{[0-9]+}}, %esp
-; 32-NOT: addl ${{[0-9]+}}, %esp
-; 32: jmp {{_?}}foo6
-
-; 64: t12:
-; 64-NOT: subq ${{[0-9]+}}, %esp
-; 64-NOT: addq ${{[0-9]+}}, %esp
-; 64: jmp {{_?}}foo6
-entry:
- %0 = icmp eq i32 %x, 0
- br i1 %0, label %bb2, label %bb
-
-bb:
- %1 = tail call i32 @foo6(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind
- ret i32 %1
-
-bb2:
- ret i32 0
-}
-
-declare i32 @foo6(i32, i32, %struct.t* byval align 4)
-
-; rdar://r7717598
-%struct.ns = type { i32, i32 }
-%struct.cp = type { float, float }
-
-define %struct.ns* @t13(%struct.cp* %yy) nounwind ssp {
-; 32: t13:
-; 32-NOT: jmp
-; 32: call
-; 32: ret
-
-; 64: t13:
-; 64-NOT: jmp
-; 64: call
-; 64: ret
-entry:
- %0 = tail call fastcc %struct.ns* @foo7(%struct.cp* byval align 4 %yy, i8 signext 0) nounwind
- ret %struct.ns* %0
-}
-
-declare fastcc %struct.ns* @foo7(%struct.cp* byval align 4, i8 signext) nounwind ssp
-
-
-
-define void @t19() alignstack(32) nounwind {
-entry:
-; CHECK: t19:
-; CHECK: andl $-32
-; CHECK: call {{_?}}foo
- tail call void @foo() nounwind
- ret void
-}
-
-declare void @foo()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sincos.ll b/libclamav/c++/llvm/test/CodeGen/X86/sincos.ll
deleted file mode 100644
index 13f9329..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sincos.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; Make sure this testcase codegens to the sin and cos instructions, not calls
-; RUN: llc < %s -march=x86 -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math | \
-; RUN: grep sin\$ | count 3
-; RUN: llc < %s -march=x86 -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math | \
-; RUN: grep cos\$ | count 3
-
-declare float @sinf(float) readonly
-
-declare double @sin(double) readonly
-
-declare x86_fp80 @sinl(x86_fp80) readonly
-
-define float @test1(float %X) {
- %Y = call float @sinf(float %X) readonly
- ret float %Y
-}
-
-define double @test2(double %X) {
- %Y = call double @sin(double %X) readonly
- ret double %Y
-}
-
-define x86_fp80 @test3(x86_fp80 %X) {
- %Y = call x86_fp80 @sinl(x86_fp80 %X) readonly
- ret x86_fp80 %Y
-}
-
-declare float @cosf(float) readonly
-
-declare double @cos(double) readonly
-
-declare x86_fp80 @cosl(x86_fp80) readonly
-
-define float @test4(float %X) {
- %Y = call float @cosf(float %X) readonly
- ret float %Y
-}
-
-define double @test5(double %X) {
- %Y = call double @cos(double %X) readonly
- ret double %Y
-}
-
-define x86_fp80 @test6(x86_fp80 %X) {
- %Y = call x86_fp80 @cosl(x86_fp80 %X) readonly
- ret x86_fp80 %Y
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sink-hoist.ll b/libclamav/c++/llvm/test/CodeGen/X86/sink-hoist.ll
deleted file mode 100644
index 01d7373..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sink-hoist.ll
+++ /dev/null
@@ -1,148 +0,0 @@
-; RUN: llc < %s -march=x86-64 -asm-verbose=false -mtriple=x86_64-unknown-linux-gnu -post-RA-scheduler=true | FileCheck %s
-
-; Currently, floating-point selects are lowered to CFG triangles.
-; This means that one side of the select is always unconditionally
-; evaluated, however with MachineSink we can sink the other side so
-; that it's conditionally evaluated.
-
-; CHECK: foo:
-; CHECK: divsd
-; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: jne
-; CHECK-NEXT: divsd
-
-define double @foo(double %x, double %y, i1 %c) nounwind {
- %a = fdiv double %x, 3.2
- %b = fdiv double %y, 3.3
- %z = select i1 %c, double %a, double %b
- ret double %z
-}
-
-; Hoist floating-point constant-pool loads out of loops.
-
-; CHECK: bar:
-; CHECK: movsd
-; CHECK: align
-define void @bar(double* nocapture %p, i64 %n) nounwind {
-entry:
- %0 = icmp sgt i64 %n, 0
- br i1 %0, label %bb, label %return
-
-bb:
- %i.03 = phi i64 [ 0, %entry ], [ %3, %bb ]
- %scevgep = getelementptr double* %p, i64 %i.03
- %1 = load double* %scevgep, align 8
- %2 = fdiv double 3.200000e+00, %1
- store double %2, double* %scevgep, align 8
- %3 = add nsw i64 %i.03, 1
- %exitcond = icmp eq i64 %3, %n
- br i1 %exitcond, label %return, label %bb
-
-return:
- ret void
-}
-
-; Sink instructions with dead EFLAGS defs.
-
-; CHECK: zzz:
-; CHECK: je
-; CHECK-NEXT: orb
-
-define zeroext i8 @zzz(i8 zeroext %a, i8 zeroext %b) nounwind readnone {
-entry:
- %tmp = zext i8 %a to i32 ; <i32> [#uses=1]
- %tmp2 = icmp eq i8 %a, 0 ; <i1> [#uses=1]
- %tmp3 = or i8 %b, -128 ; <i8> [#uses=1]
- %tmp4 = and i8 %b, 127 ; <i8> [#uses=1]
- %b_addr.0 = select i1 %tmp2, i8 %tmp4, i8 %tmp3 ; <i8> [#uses=1]
- ret i8 %b_addr.0
-}
-
-; Codegen should hoist and CSE these constants.
-
-; CHECK: vv:
-; CHECK: LCPI4_0(%rip), %xmm0
-; CHECK: LCPI4_1(%rip), %xmm1
-; CHECK: LCPI4_2(%rip), %xmm2
-; CHECK: align
-; CHECK-NOT: LCPI
-; CHECK: ret
-
- at _minusZero.6007 = internal constant <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00> ; <<4 x float>*> [#uses=0]
- at twoTo23.6008 = internal constant <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06> ; <<4 x float>*> [#uses=0]
-
-define void @vv(float* %y, float* %x, i32* %n) nounwind ssp {
-entry:
- br label %bb60
-
-bb: ; preds = %bb60
- %0 = bitcast float* %x_addr.0 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %1 = load <4 x float>* %0, align 16 ; <<4 x float>> [#uses=4]
- %tmp20 = bitcast <4 x float> %1 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp22 = and <4 x i32> %tmp20, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647> ; <<4 x i32>> [#uses=1]
- %tmp23 = bitcast <4 x i32> %tmp22 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp25 = bitcast <4 x float> %1 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp27 = and <4 x i32> %tmp25, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648> ; <<4 x i32>> [#uses=2]
- %tmp30 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %tmp23, <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>, i8 5) ; <<4 x float>> [#uses=1]
- %tmp34 = bitcast <4 x float> %tmp30 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp36 = xor <4 x i32> %tmp34, <i32 -1, i32 -1, i32 -1, i32 -1> ; <<4 x i32>> [#uses=1]
- %tmp37 = and <4 x i32> %tmp36, <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200> ; <<4 x i32>> [#uses=1]
- %tmp42 = or <4 x i32> %tmp37, %tmp27 ; <<4 x i32>> [#uses=1]
- %tmp43 = bitcast <4 x i32> %tmp42 to <4 x float> ; <<4 x float>> [#uses=2]
- %tmp45 = fadd <4 x float> %1, %tmp43 ; <<4 x float>> [#uses=1]
- %tmp47 = fsub <4 x float> %tmp45, %tmp43 ; <<4 x float>> [#uses=2]
- %tmp49 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %1, <4 x float> %tmp47, i8 1) ; <<4 x float>> [#uses=1]
- %2 = bitcast <4 x float> %tmp49 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %3 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %2) nounwind readnone ; <<4 x float>> [#uses=1]
- %tmp53 = fadd <4 x float> %tmp47, %3 ; <<4 x float>> [#uses=1]
- %tmp55 = bitcast <4 x float> %tmp53 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp57 = or <4 x i32> %tmp55, %tmp27 ; <<4 x i32>> [#uses=1]
- %tmp58 = bitcast <4 x i32> %tmp57 to <4 x float> ; <<4 x float>> [#uses=1]
- %4 = bitcast float* %y_addr.0 to <4 x float>* ; <<4 x float>*> [#uses=1]
- store <4 x float> %tmp58, <4 x float>* %4, align 16
- %5 = getelementptr float* %x_addr.0, i64 4 ; <float*> [#uses=1]
- %6 = getelementptr float* %y_addr.0, i64 4 ; <float*> [#uses=1]
- %7 = add i32 %i.0, 4 ; <i32> [#uses=1]
- br label %bb60
-
-bb60: ; preds = %bb, %entry
- %i.0 = phi i32 [ 0, %entry ], [ %7, %bb ] ; <i32> [#uses=2]
- %x_addr.0 = phi float* [ %x, %entry ], [ %5, %bb ] ; <float*> [#uses=2]
- %y_addr.0 = phi float* [ %y, %entry ], [ %6, %bb ] ; <float*> [#uses=2]
- %8 = load i32* %n, align 4 ; <i32> [#uses=1]
- %9 = icmp sgt i32 %8, %i.0 ; <i1> [#uses=1]
- br i1 %9, label %bb, label %return
-
-return: ; preds = %bb60
- ret void
-}
-
-declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
-
-declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
-
-; CodeGen should use the correct register class when extracting
-; a load from a zero-extending load for hoisting.
-
-; CHECK: default_get_pch_validity:
-; CHECK: movl cl_options_count(%rip), %ecx
-
- at cl_options_count = external constant i32 ; <i32*> [#uses=2]
-
-define void @default_get_pch_validity() nounwind {
-entry:
- %tmp4 = load i32* @cl_options_count, align 4 ; <i32> [#uses=1]
- %tmp5 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
- br i1 %tmp5, label %bb6, label %bb2
-
-bb2: ; preds = %bb2, %entry
- %i.019 = phi i64 [ 0, %entry ], [ %tmp25, %bb2 ] ; <i64> [#uses=1]
- %tmp25 = add i64 %i.019, 1 ; <i64> [#uses=2]
- %tmp11 = load i32* @cl_options_count, align 4 ; <i32> [#uses=1]
- %tmp12 = zext i32 %tmp11 to i64 ; <i64> [#uses=1]
- %tmp13 = icmp ugt i64 %tmp12, %tmp25 ; <i1> [#uses=1]
- br i1 %tmp13, label %bb2, label %bb6
-
-bb6: ; preds = %bb2, %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/small-byval-memcpy.ll b/libclamav/c++/llvm/test/CodeGen/X86/small-byval-memcpy.ll
deleted file mode 100644
index 9ec9182..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/small-byval-memcpy.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s | not grep movs
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-
-define void @ccosl({ x86_fp80, x86_fp80 }* noalias sret %agg.result, { x86_fp80, x86_fp80 }* byval align 4 %z) nounwind {
-entry:
- %iz = alloca { x86_fp80, x86_fp80 } ; <{ x86_fp80, x86_fp80 }*> [#uses=3]
- %tmp1 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 1 ; <x86_fp80*> [#uses=1]
- %tmp2 = load x86_fp80* %tmp1, align 16 ; <x86_fp80> [#uses=1]
- %tmp3 = fsub x86_fp80 0xK80000000000000000000, %tmp2 ; <x86_fp80> [#uses=1]
- %tmp4 = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 1 ; <x86_fp80*> [#uses=1]
- %real = getelementptr { x86_fp80, x86_fp80 }* %iz, i32 0, i32 0 ; <x86_fp80*> [#uses=1]
- %tmp6 = getelementptr { x86_fp80, x86_fp80 }* %z, i32 0, i32 0 ; <x86_fp80*> [#uses=1]
- %tmp7 = load x86_fp80* %tmp6, align 16 ; <x86_fp80> [#uses=1]
- store x86_fp80 %tmp3, x86_fp80* %real, align 16
- store x86_fp80 %tmp7, x86_fp80* %tmp4, align 16
- call void @ccoshl( { x86_fp80, x86_fp80 }* noalias sret %agg.result, { x86_fp80, x86_fp80 }* byval align 4 %iz ) nounwind
- ret void
-}
-
-declare void @ccoshl({ x86_fp80, x86_fp80 }* noalias sret , { x86_fp80, x86_fp80 }* byval align 4 ) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/smul-with-overflow-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/smul-with-overflow-2.ll
deleted file mode 100644
index 7c23adb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/smul-with-overflow-2.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mul | count 1
-; RUN: llc < %s -march=x86 | grep add | count 3
-
-define i32 @t1(i32 %a, i32 %b) nounwind readnone {
-entry:
- %tmp0 = add i32 %b, %a
- %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 2)
- %tmp2 = extractvalue { i32, i1 } %tmp1, 0
- ret i32 %tmp2
-}
-
-define i32 @t2(i32 %a, i32 %b) nounwind readnone {
-entry:
- %tmp0 = add i32 %b, %a
- %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 4)
- %tmp2 = extractvalue { i32, i1 } %tmp1, 0
- ret i32 %tmp2
-}
-
-declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/smul-with-overflow-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/smul-with-overflow-3.ll
deleted file mode 100644
index 49c31f5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/smul-with-overflow-3.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {jno} | count 1
-
- at ok = internal constant [4 x i8] c"%d\0A\00"
- at no = internal constant [4 x i8] c"no\0A\00"
-
-define i1 @func1(i32 %v1, i32 %v2) nounwind {
-entry:
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
- %sum = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %normal
-
-overflow:
- %t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
- ret i1 false
-
-normal:
- %t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum ) nounwind
- ret i1 true
-}
-
-declare i32 @printf(i8*, ...) nounwind
-declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/smul-with-overflow.ll b/libclamav/c++/llvm/test/CodeGen/X86/smul-with-overflow.ll
deleted file mode 100644
index 6d125e4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/smul-with-overflow.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {jo} | count 1
-
- at ok = internal constant [4 x i8] c"%d\0A\00"
- at no = internal constant [4 x i8] c"no\0A\00"
-
-define i1 @func1(i32 %v1, i32 %v2) nounwind {
-entry:
- %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
- %sum = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %normal
-
-normal:
- %t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum ) nounwind
- ret i1 true
-
-overflow:
- %t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
- ret i1 false
-}
-
-declare i32 @printf(i8*, ...) nounwind
-declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/soft-fp.ll b/libclamav/c++/llvm/test/CodeGen/X86/soft-fp.ll
deleted file mode 100644
index a52135d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/soft-fp.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -soft-float | not grep xmm
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 -soft-float | not grep xmm
-
- %struct.__va_list_tag = type { i32, i32, i8*, i8* }
-
-define i32 @t1(i32 %a, ...) nounwind {
-entry:
- %va = alloca [1 x %struct.__va_list_tag], align 8 ; <[1 x %struct.__va_list_tag]*> [#uses=2]
- %va12 = bitcast [1 x %struct.__va_list_tag]* %va to i8* ; <i8*> [#uses=2]
- call void @llvm.va_start(i8* %va12)
- %va3 = getelementptr [1 x %struct.__va_list_tag]* %va, i64 0, i64 0 ; <%struct.__va_list_tag*> [#uses=1]
- call void @bar(%struct.__va_list_tag* %va3) nounwind
- call void @llvm.va_end(i8* %va12)
- ret i32 undef
-}
-
-declare void @llvm.va_start(i8*) nounwind
-
-declare void @bar(%struct.__va_list_tag*)
-
-declare void @llvm.va_end(i8*) nounwind
-
-define float @t2(float %a, float %b) nounwind readnone {
-entry:
- %0 = fadd float %a, %b ; <float> [#uses=1]
- ret float %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/splat-scalar-load.ll b/libclamav/c++/llvm/test/CodeGen/X86/splat-scalar-load.ll
deleted file mode 100644
index 2b13029..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/splat-scalar-load.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse2 | FileCheck %s
-; rdar://7434544
-
-define <2 x i64> @t2() nounwind ssp {
-entry:
-; CHECK: t2:
-; CHECK: pshufd $85, (%esp), %xmm0
- %array = alloca [8 x float], align 4
- %arrayidx = getelementptr inbounds [8 x float]* %array, i32 0, i32 1
- %tmp2 = load float* %arrayidx
- %vecinit = insertelement <4 x float> undef, float %tmp2, i32 0
- %vecinit5 = insertelement <4 x float> %vecinit, float %tmp2, i32 1
- %vecinit7 = insertelement <4 x float> %vecinit5, float %tmp2, i32 2
- %vecinit9 = insertelement <4 x float> %vecinit7, float %tmp2, i32 3
- %0 = bitcast <4 x float> %vecinit9 to <2 x i64>
- ret <2 x i64> %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/split-eh-lpad-edges.ll b/libclamav/c++/llvm/test/CodeGen/X86/split-eh-lpad-edges.ll
deleted file mode 100644
index fd40a7f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/split-eh-lpad-edges.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | not grep jmp
-; rdar://6647639
-
- %struct.FetchPlanHeader = type { i8*, i8*, i32, i8*, i8*, i8*, i8*, i8*, %struct.NSObject* (%struct.NSObject*, %struct.objc_selector*, ...)*, %struct.__attributeDescriptionFlags }
- %struct.NSArray = type { %struct.NSObject }
- %struct.NSAutoreleasePool = type { %struct.NSObject, i8*, i8*, i8*, i8* }
- %struct.NSObject = type { %struct.NSObject* }
- %struct.__attributeDescriptionFlags = type <{ i32 }>
- %struct._message_ref_t = type { %struct.NSObject* (%struct.NSObject*, %struct._message_ref_t*, ...)*, %struct.objc_selector* }
- %struct.objc_selector = type opaque
-@"\01l_objc_msgSend_fixup_alloc" = external global %struct._message_ref_t, align 16 ; <%struct._message_ref_t*> [#uses=2]
-
-define %struct.NSArray* @newFetchedRowsForFetchPlan_MT(%struct.FetchPlanHeader* %fetchPlan, %struct.objc_selector* %selectionMethod, %struct.NSObject* %selectionParameter) ssp {
-entry:
- %0 = invoke %struct.NSObject* null(%struct.NSObject* null, %struct._message_ref_t* @"\01l_objc_msgSend_fixup_alloc")
- to label %invcont unwind label %lpad ; <%struct.NSObject*> [#uses=1]
-
-invcont: ; preds = %entry
- %1 = invoke %struct.NSObject* (%struct.NSObject*, %struct.objc_selector*, ...)* @objc_msgSend(%struct.NSObject* %0, %struct.objc_selector* null)
- to label %invcont26 unwind label %lpad ; <%struct.NSObject*> [#uses=0]
-
-invcont26: ; preds = %invcont
- %2 = invoke %struct.NSObject* null(%struct.NSObject* null, %struct._message_ref_t* @"\01l_objc_msgSend_fixup_alloc")
- to label %invcont27 unwind label %lpad ; <%struct.NSObject*> [#uses=0]
-
-invcont27: ; preds = %invcont26
- unreachable
-
-lpad: ; preds = %invcont26, %invcont, %entry
- %pool.1 = phi %struct.NSAutoreleasePool* [ null, %entry ], [ null, %invcont ], [ null, %invcont26 ] ; <%struct.NSAutoreleasePool*> [#uses=0]
- unreachable
-}
-
-declare %struct.NSObject* @objc_msgSend(%struct.NSObject*, %struct.objc_selector*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/split-select.ll b/libclamav/c++/llvm/test/CodeGen/X86/split-select.ll
deleted file mode 100644
index 07d4d52..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/split-select.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep test | count 1
-
-define void @foo(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) {
- %x = select i1 %c, <2 x i16> %a, <2 x i16> %b
- store <2 x i16> %x, <2 x i16>* %p
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/split-vector-rem.ll b/libclamav/c++/llvm/test/CodeGen/X86/split-vector-rem.ll
deleted file mode 100644
index 681c6b0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/split-vector-rem.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep div | count 16
-; RUN: llc < %s -march=x86-64 | grep fmodf | count 8
-
-define <8 x i32> @foo(<8 x i32> %t, <8 x i32> %u) {
- %m = srem <8 x i32> %t, %u
- ret <8 x i32> %m
-}
-define <8 x i32> @bar(<8 x i32> %t, <8 x i32> %u) {
- %m = urem <8 x i32> %t, %u
- ret <8 x i32> %m
-}
-define <8 x float> @qux(<8 x float> %t, <8 x float> %u) {
- %m = frem <8 x float> %t, %u
- ret <8 x float> %m
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sret.ll b/libclamav/c++/llvm/test/CodeGen/X86/sret.ll
deleted file mode 100644
index b945530..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sret.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -march=x86 | grep ret | grep 4
-
- %struct.foo = type { [4 x i32] }
-
-define void @bar(%struct.foo* noalias sret %agg.result) nounwind {
-entry:
- %tmp1 = getelementptr %struct.foo* %agg.result, i32 0, i32 0
- %tmp3 = getelementptr [4 x i32]* %tmp1, i32 0, i32 0
- store i32 1, i32* %tmp3, align 8
- ret void
-}
-
- at dst = external global i32
-
-define void @foo() nounwind {
- %memtmp = alloca %struct.foo, align 4
- call void @bar( %struct.foo* sret %memtmp ) nounwind
- %tmp4 = getelementptr %struct.foo* %memtmp, i32 0, i32 0
- %tmp5 = getelementptr [4 x i32]* %tmp4, i32 0, i32 0
- %tmp6 = load i32* %tmp5
- store i32 %tmp6, i32* @dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-0.ll
deleted file mode 100644
index b12a87d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-0.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86-64 | not grep mov
-
-define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind {
- %t = load <4 x float>* %p
- %z = fmul <4 x float> %t, %x
- ret <4 x float> %z
-}
-define <2 x double> @bar(<2 x double>* %p, <2 x double> %x) nounwind {
- %t = load <2 x double>* %p
- %z = fmul <2 x double> %t, %x
- ret <2 x double> %z
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-1.ll
deleted file mode 100644
index c7a5cd5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-1.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movap | count 2
-
-define <4 x float> @foo(<4 x float>* %p) nounwind {
- %t = load <4 x float>* %p
- ret <4 x float> %t
-}
-define <2 x double> @bar(<2 x double>* %p) nounwind {
- %t = load <2 x double>* %p
- ret <2 x double> %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-10.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-10.ll
deleted file mode 100644
index 0f91697..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-10.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movups | count 1
-
-define <2 x i64> @bar(<2 x i64>* %p) nounwind {
- %t = load <2 x i64>* %p, align 8
- ret <2 x i64> %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-11.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-11.ll
deleted file mode 100644
index aa1b437..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-11.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah -mtriple=i686-apple-darwin8 | grep movaps
-; RUN: llc < %s -march=x86 -mcpu=yonah -mtriple=linux | grep movups
-
-define <4 x float> @foo(float %a, float %b, float %c, float %d) nounwind {
-entry:
- %tmp6 = insertelement <4 x float> undef, float %a, i32 0
- %tmp7 = insertelement <4 x float> %tmp6, float %b, i32 1
- %tmp8 = insertelement <4 x float> %tmp7, float %c, i32 2
- %tmp9 = insertelement <4 x float> %tmp8, float %d, i32 3
- ret <4 x float> %tmp9
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-12.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-12.ll
deleted file mode 100644
index 4f025b9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-12.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: grep unpck %t | count 2
-; RUN: grep shuf %t | count 2
-; RUN: grep ps %t | count 4
-; RUN: grep pd %t | count 4
-; RUN: grep movup %t | count 4
-
-define <4 x float> @a(<4 x float>* %y) nounwind {
- %x = load <4 x float>* %y, align 4
- %a = extractelement <4 x float> %x, i32 0
- %b = extractelement <4 x float> %x, i32 1
- %c = extractelement <4 x float> %x, i32 2
- %d = extractelement <4 x float> %x, i32 3
- %p = insertelement <4 x float> undef, float %d, i32 0
- %q = insertelement <4 x float> %p, float %c, i32 1
- %r = insertelement <4 x float> %q, float %b, i32 2
- %s = insertelement <4 x float> %r, float %a, i32 3
- ret <4 x float> %s
-}
-define <4 x float> @b(<4 x float>* %y, <4 x float> %z) nounwind {
- %x = load <4 x float>* %y, align 4
- %a = extractelement <4 x float> %x, i32 2
- %b = extractelement <4 x float> %x, i32 3
- %c = extractelement <4 x float> %z, i32 2
- %d = extractelement <4 x float> %z, i32 3
- %p = insertelement <4 x float> undef, float %c, i32 0
- %q = insertelement <4 x float> %p, float %a, i32 1
- %r = insertelement <4 x float> %q, float %d, i32 2
- %s = insertelement <4 x float> %r, float %b, i32 3
- ret <4 x float> %s
-}
-define <2 x double> @c(<2 x double>* %y) nounwind {
- %x = load <2 x double>* %y, align 8
- %a = extractelement <2 x double> %x, i32 0
- %c = extractelement <2 x double> %x, i32 1
- %p = insertelement <2 x double> undef, double %c, i32 0
- %r = insertelement <2 x double> %p, double %a, i32 1
- ret <2 x double> %r
-}
-define <2 x double> @d(<2 x double>* %y, <2 x double> %z) nounwind {
- %x = load <2 x double>* %y, align 8
- %a = extractelement <2 x double> %x, i32 1
- %c = extractelement <2 x double> %z, i32 1
- %p = insertelement <2 x double> undef, double %c, i32 0
- %r = insertelement <2 x double> %p, double %a, i32 1
- ret <2 x double> %r
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-2.ll
deleted file mode 100644
index 102c3fb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-2.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movup | count 2
-
-define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind {
- %t = load <4 x float>* %p, align 4
- %z = fmul <4 x float> %t, %x
- ret <4 x float> %z
-}
-define <2 x double> @bar(<2 x double>* %p, <2 x double> %x) nounwind {
- %t = load <2 x double>* %p, align 8
- %z = fmul <2 x double> %t, %x
- ret <2 x double> %z
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-3.ll
deleted file mode 100644
index c42f7f0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-3.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movap | count 2
-
-define void @foo(<4 x float>* %p, <4 x float> %x) nounwind {
- store <4 x float> %x, <4 x float>* %p
- ret void
-}
-define void @bar(<2 x double>* %p, <2 x double> %x) nounwind {
- store <2 x double> %x, <2 x double>* %p
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-4.ll
deleted file mode 100644
index 4c59934..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-4.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movup | count 2
-
-define void @foo(<4 x float>* %p, <4 x float> %x) nounwind {
- store <4 x float> %x, <4 x float>* %p, align 4
- ret void
-}
-define void @bar(<2 x double>* %p, <2 x double> %x) nounwind {
- store <2 x double> %x, <2 x double>* %p, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-5.ll
deleted file mode 100644
index 21cd231..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-5.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movaps | count 1
-
-define <2 x i64> @bar(<2 x i64>* %p) nounwind {
- %t = load <2 x i64>* %p
- ret <2 x i64> %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-6.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-6.ll
deleted file mode 100644
index 0bbf422..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-6.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movups | count 1
-
-define <2 x i64> @bar(<2 x i64>* %p, <2 x i64> %x) nounwind {
- %t = load <2 x i64>* %p, align 8
- %z = mul <2 x i64> %t, %x
- ret <2 x i64> %z
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-7.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-7.ll
deleted file mode 100644
index 5784481..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-7.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movaps | count 1
-
-define void @bar(<2 x i64>* %p, <2 x i64> %x) nounwind {
- store <2 x i64> %x, <2 x i64>* %p
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-8.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-8.ll
deleted file mode 100644
index cfeff81..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-8.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movups | count 1
-
-define void @bar(<2 x i64>* %p, <2 x i64> %x) nounwind {
- store <2 x i64> %x, <2 x i64>* %p, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-9.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-align-9.ll
deleted file mode 100644
index cb26b95..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-align-9.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movup | count 2
-
-define <4 x float> @foo(<4 x float>* %p) nounwind {
- %t = load <4 x float>* %p, align 4
- ret <4 x float> %t
-}
-define <2 x double> @bar(<2 x double>* %p) nounwind {
- %t = load <2 x double>* %p, align 8
- ret <2 x double> %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-fcopysign.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-fcopysign.ll
deleted file mode 100644
index 0e0e4a9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-fcopysign.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep test
-
-define float @tst1(float %a, float %b) {
- %tmp = tail call float @copysignf( float %b, float %a )
- ret float %tmp
-}
-
-define double @tst2(double %a, float %b, float %c) {
- %tmp1 = fadd float %b, %c
- %tmp2 = fpext float %tmp1 to double
- %tmp = tail call double @copysign( double %a, double %tmp2 )
- ret double %tmp
-}
-
-declare float @copysignf(float, float)
-declare double @copysign(double, double)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-load-ret.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-load-ret.ll
deleted file mode 100644
index 1ebcb1a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-load-ret.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | not grep movss
-; RUN: llc < %s -march=x86 -mcpu=yonah | not grep xmm
-
-define double @test1(double* %P) {
- %X = load double* %P ; <double> [#uses=1]
- ret double %X
-}
-
-define double @test2() {
- ret double 1.234560e+03
-}
-
-
-; FIXME: Todo
-;double %test3(bool %B) {
-; %C = select bool %B, double 123.412, double 523.01123123
-; ret double %C
-;}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-minmax.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-minmax.ll
deleted file mode 100644
index 19fbed0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-minmax.ll
+++ /dev/null
@@ -1,932 +0,0 @@
-; RUN: llc < %s -march=x86-64 -asm-verbose=false | FileCheck %s
-; RUN: llc < %s -march=x86-64 -asm-verbose=false -enable-unsafe-fp-math | FileCheck -check-prefix=UNSAFE %s
-; RUN: llc < %s -march=x86-64 -asm-verbose=false -enable-finite-only-fp-math | FileCheck -check-prefix=FINITE %s
-
-; Some of these patterns can be matched as SSE min or max. Some of
-; then can be matched provided that the operands are swapped.
-; Some of them can't be matched at all and require a comparison
-; and a conditional branch.
-
-; The naming convention is {,x_,y_}{o,u}{gt,lt,ge,le}{,_inverse}
-; x_ : use 0.0 instead of %y
-; y_ : use -0.0 instead of %y
-; _inverse : swap the arms of the select.
-
-; CHECK: ogt:
-; CHECK-NEXT: maxsd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: ogt:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: ogt:
-; FINITE-NEXT: maxsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @ogt(double %x, double %y) nounwind {
- %c = fcmp ogt double %x, %y
- %d = select i1 %c, double %x, double %y
- ret double %d
-}
-
-; CHECK: olt:
-; CHECK-NEXT: minsd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: olt:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: olt:
-; FINITE-NEXT: minsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @olt(double %x, double %y) nounwind {
- %c = fcmp olt double %x, %y
- %d = select i1 %c, double %x, double %y
- ret double %d
-}
-
-; CHECK: ogt_inverse:
-; CHECK-NEXT: minsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: ogt_inverse:
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: ogt_inverse:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @ogt_inverse(double %x, double %y) nounwind {
- %c = fcmp ogt double %x, %y
- %d = select i1 %c, double %y, double %x
- ret double %d
-}
-
-; CHECK: olt_inverse:
-; CHECK-NEXT: maxsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: olt_inverse:
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: olt_inverse:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @olt_inverse(double %x, double %y) nounwind {
- %c = fcmp olt double %x, %y
- %d = select i1 %c, double %y, double %x
- ret double %d
-}
-
-; CHECK: oge:
-; CHECK-NEXT: ucomisd %xmm1, %xmm0
-; UNSAFE: oge:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: oge:
-; FINITE-NEXT: maxsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @oge(double %x, double %y) nounwind {
- %c = fcmp oge double %x, %y
- %d = select i1 %c, double %x, double %y
- ret double %d
-}
-
-; CHECK: ole:
-; CHECK-NEXT: ucomisd %xmm0, %xmm1
-; UNSAFE: ole:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; FINITE: ole:
-; FINITE-NEXT: minsd %xmm1, %xmm0
-define double @ole(double %x, double %y) nounwind {
- %c = fcmp ole double %x, %y
- %d = select i1 %c, double %x, double %y
- ret double %d
-}
-
-; CHECK: oge_inverse:
-; CHECK-NEXT: ucomisd %xmm1, %xmm0
-; UNSAFE: oge_inverse:
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: oge_inverse:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @oge_inverse(double %x, double %y) nounwind {
- %c = fcmp oge double %x, %y
- %d = select i1 %c, double %y, double %x
- ret double %d
-}
-
-; CHECK: ole_inverse:
-; CHECK-NEXT: ucomisd %xmm0, %xmm1
-; UNSAFE: ole_inverse:
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: ole_inverse:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @ole_inverse(double %x, double %y) nounwind {
- %c = fcmp ole double %x, %y
- %d = select i1 %c, double %y, double %x
- ret double %d
-}
-
-; CHECK: x_ogt:
-; CHECK-NEXT: pxor %xmm1, %xmm1
-; CHECK-NEXT: maxsd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: x_ogt:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ogt:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ogt(double %x) nounwind {
- %c = fcmp ogt double %x, 0.000000e+00
- %d = select i1 %c, double %x, double 0.000000e+00
- ret double %d
-}
-
-; CHECK: x_olt:
-; CHECK-NEXT: pxor %xmm1, %xmm1
-; CHECK-NEXT: minsd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: x_olt:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_olt:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_olt(double %x) nounwind {
- %c = fcmp olt double %x, 0.000000e+00
- %d = select i1 %c, double %x, double 0.000000e+00
- ret double %d
-}
-
-; CHECK: x_ogt_inverse:
-; CHECK-NEXT: pxor %xmm1, %xmm1
-; CHECK-NEXT: minsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: x_ogt_inverse:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ogt_inverse:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ogt_inverse(double %x) nounwind {
- %c = fcmp ogt double %x, 0.000000e+00
- %d = select i1 %c, double 0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: x_olt_inverse:
-; CHECK-NEXT: pxor %xmm1, %xmm1
-; CHECK-NEXT: maxsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: x_olt_inverse:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_olt_inverse:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_olt_inverse(double %x) nounwind {
- %c = fcmp olt double %x, 0.000000e+00
- %d = select i1 %c, double 0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: x_oge:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: x_oge:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_oge:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_oge(double %x) nounwind {
- %c = fcmp oge double %x, 0.000000e+00
- %d = select i1 %c, double %x, double 0.000000e+00
- ret double %d
-}
-
-; CHECK: x_ole:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: x_ole:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ole:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ole(double %x) nounwind {
- %c = fcmp ole double %x, 0.000000e+00
- %d = select i1 %c, double %x, double 0.000000e+00
- ret double %d
-}
-
-; CHECK: x_oge_inverse:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: x_oge_inverse:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_oge_inverse:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_oge_inverse(double %x) nounwind {
- %c = fcmp oge double %x, 0.000000e+00
- %d = select i1 %c, double 0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: x_ole_inverse:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: x_ole_inverse:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ole_inverse:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ole_inverse(double %x) nounwind {
- %c = fcmp ole double %x, 0.000000e+00
- %d = select i1 %c, double 0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: ugt:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: ugt:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: ugt:
-; FINITE-NEXT: maxsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @ugt(double %x, double %y) nounwind {
- %c = fcmp ugt double %x, %y
- %d = select i1 %c, double %x, double %y
- ret double %d
-}
-
-; CHECK: ult:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: ult:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: ult:
-; FINITE-NEXT: minsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @ult(double %x, double %y) nounwind {
- %c = fcmp ult double %x, %y
- %d = select i1 %c, double %x, double %y
- ret double %d
-}
-
-; CHECK: ugt_inverse:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: ugt_inverse:
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: ugt_inverse:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @ugt_inverse(double %x, double %y) nounwind {
- %c = fcmp ugt double %x, %y
- %d = select i1 %c, double %y, double %x
- ret double %d
-}
-
-; CHECK: ult_inverse:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: ult_inverse:
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: ult_inverse:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @ult_inverse(double %x, double %y) nounwind {
- %c = fcmp ult double %x, %y
- %d = select i1 %c, double %y, double %x
- ret double %d
-}
-
-; CHECK: uge:
-; CHECK-NEXT: maxsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: uge:
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: uge:
-; FINITE-NEXT: maxsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @uge(double %x, double %y) nounwind {
- %c = fcmp uge double %x, %y
- %d = select i1 %c, double %x, double %y
- ret double %d
-}
-
-; CHECK: ule:
-; CHECK-NEXT: minsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: ule:
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: ule:
-; FINITE-NEXT: minsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @ule(double %x, double %y) nounwind {
- %c = fcmp ule double %x, %y
- %d = select i1 %c, double %x, double %y
- ret double %d
-}
-
-; CHECK: uge_inverse:
-; CHECK-NEXT: minsd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: uge_inverse:
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: uge_inverse:
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @uge_inverse(double %x, double %y) nounwind {
- %c = fcmp uge double %x, %y
- %d = select i1 %c, double %y, double %x
- ret double %d
-}
-
-; CHECK: ule_inverse:
-; CHECK-NEXT: maxsd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: ule_inverse:
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: ule_inverse:
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @ule_inverse(double %x, double %y) nounwind {
- %c = fcmp ule double %x, %y
- %d = select i1 %c, double %y, double %x
- ret double %d
-}
-
-; CHECK: x_ugt:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: x_ugt:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ugt:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ugt(double %x) nounwind {
- %c = fcmp ugt double %x, 0.000000e+00
- %d = select i1 %c, double %x, double 0.000000e+00
- ret double %d
-}
-
-; CHECK: x_ult:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: x_ult:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ult:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ult(double %x) nounwind {
- %c = fcmp ult double %x, 0.000000e+00
- %d = select i1 %c, double %x, double 0.000000e+00
- ret double %d
-}
-
-; CHECK: x_ugt_inverse:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: x_ugt_inverse:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ugt_inverse:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ugt_inverse(double %x) nounwind {
- %c = fcmp ugt double %x, 0.000000e+00
- %d = select i1 %c, double 0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: x_ult_inverse:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: x_ult_inverse:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ult_inverse:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ult_inverse(double %x) nounwind {
- %c = fcmp ult double %x, 0.000000e+00
- %d = select i1 %c, double 0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: x_uge:
-; CHECK-NEXT: pxor %xmm1, %xmm1
-; CHECK-NEXT: maxsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: x_uge:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_uge:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_uge(double %x) nounwind {
- %c = fcmp uge double %x, 0.000000e+00
- %d = select i1 %c, double %x, double 0.000000e+00
- ret double %d
-}
-
-; CHECK: x_ule:
-; CHECK-NEXT: pxor %xmm1, %xmm1
-; CHECK-NEXT: minsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: x_ule:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ule:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ule(double %x) nounwind {
- %c = fcmp ule double %x, 0.000000e+00
- %d = select i1 %c, double %x, double 0.000000e+00
- ret double %d
-}
-
-; CHECK: x_uge_inverse:
-; CHECK-NEXT: pxor %xmm1, %xmm1
-; CHECK-NEXT: minsd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: x_uge_inverse:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_uge_inverse:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_uge_inverse(double %x) nounwind {
- %c = fcmp uge double %x, 0.000000e+00
- %d = select i1 %c, double 0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: x_ule_inverse:
-; CHECK-NEXT: pxor %xmm1, %xmm1
-; CHECK-NEXT: maxsd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: x_ule_inverse:
-; UNSAFE-NEXT: pxor %xmm1, %xmm1
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: x_ule_inverse:
-; FINITE-NEXT: pxor %xmm1, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @x_ule_inverse(double %x) nounwind {
- %c = fcmp ule double %x, 0.000000e+00
- %d = select i1 %c, double 0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: y_ogt:
-; CHECK-NEXT: maxsd {{[^,]*}}, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: y_ogt:
-; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ogt:
-; FINITE-NEXT: maxsd {{[^,]*}}, %xmm0
-; FINITE-NEXT: ret
-define double @y_ogt(double %x) nounwind {
- %c = fcmp ogt double %x, -0.000000e+00
- %d = select i1 %c, double %x, double -0.000000e+00
- ret double %d
-}
-
-; CHECK: y_olt:
-; CHECK-NEXT: minsd {{[^,]*}}, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: y_olt:
-; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_olt:
-; FINITE-NEXT: minsd {{[^,]*}}, %xmm0
-; FINITE-NEXT: ret
-define double @y_olt(double %x) nounwind {
- %c = fcmp olt double %x, -0.000000e+00
- %d = select i1 %c, double %x, double -0.000000e+00
- ret double %d
-}
-
-; CHECK: y_ogt_inverse:
-; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
-; CHECK-NEXT: minsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: y_ogt_inverse:
-; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ogt_inverse:
-; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @y_ogt_inverse(double %x) nounwind {
- %c = fcmp ogt double %x, -0.000000e+00
- %d = select i1 %c, double -0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: y_olt_inverse:
-; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
-; CHECK-NEXT: maxsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: y_olt_inverse:
-; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_olt_inverse:
-; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @y_olt_inverse(double %x) nounwind {
- %c = fcmp olt double %x, -0.000000e+00
- %d = select i1 %c, double -0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: y_oge:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: y_oge:
-; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_oge:
-; FINITE-NEXT: maxsd {{[^,]*}}, %xmm0
-; FINITE-NEXT: ret
-define double @y_oge(double %x) nounwind {
- %c = fcmp oge double %x, -0.000000e+00
- %d = select i1 %c, double %x, double -0.000000e+00
- ret double %d
-}
-
-; CHECK: y_ole:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: y_ole:
-; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ole:
-; FINITE-NEXT: minsd {{[^,]*}}, %xmm0
-; FINITE-NEXT: ret
-define double @y_ole(double %x) nounwind {
- %c = fcmp ole double %x, -0.000000e+00
- %d = select i1 %c, double %x, double -0.000000e+00
- ret double %d
-}
-
-; CHECK: y_oge_inverse:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: y_oge_inverse:
-; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_oge_inverse:
-; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @y_oge_inverse(double %x) nounwind {
- %c = fcmp oge double %x, -0.000000e+00
- %d = select i1 %c, double -0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: y_ole_inverse:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: y_ole_inverse:
-; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ole_inverse:
-; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @y_ole_inverse(double %x) nounwind {
- %c = fcmp ole double %x, -0.000000e+00
- %d = select i1 %c, double -0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: y_ugt:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: y_ugt:
-; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ugt:
-; FINITE-NEXT: maxsd {{[^,]*}}, %xmm0
-; FINITE-NEXT: ret
-define double @y_ugt(double %x) nounwind {
- %c = fcmp ugt double %x, -0.000000e+00
- %d = select i1 %c, double %x, double -0.000000e+00
- ret double %d
-}
-
-; CHECK: y_ult:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: y_ult:
-; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ult:
-; FINITE-NEXT: minsd {{[^,]*}}, %xmm0
-; FINITE-NEXT: ret
-define double @y_ult(double %x) nounwind {
- %c = fcmp ult double %x, -0.000000e+00
- %d = select i1 %c, double %x, double -0.000000e+00
- ret double %d
-}
-
-; CHECK: y_ugt_inverse:
-; CHECK: ucomisd %xmm0, %xmm1
-; UNSAFE: y_ugt_inverse:
-; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ugt_inverse:
-; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @y_ugt_inverse(double %x) nounwind {
- %c = fcmp ugt double %x, -0.000000e+00
- %d = select i1 %c, double -0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: y_ult_inverse:
-; CHECK: ucomisd %xmm1, %xmm0
-; UNSAFE: y_ult_inverse:
-; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ult_inverse:
-; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @y_ult_inverse(double %x) nounwind {
- %c = fcmp ult double %x, -0.000000e+00
- %d = select i1 %c, double -0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: y_uge:
-; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
-; CHECK-NEXT: maxsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: y_uge:
-; UNSAFE-NEXT: maxsd {{[^,]*}}, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_uge:
-; FINITE-NEXT: maxsd {{[^,]*}}, %xmm0
-; FINITE-NEXT: ret
-define double @y_uge(double %x) nounwind {
- %c = fcmp uge double %x, -0.000000e+00
- %d = select i1 %c, double %x, double -0.000000e+00
- ret double %d
-}
-
-; CHECK: y_ule:
-; CHECK-NEXT: movsd {{[^,]*}}, %xmm1
-; CHECK-NEXT: minsd %xmm0, %xmm1
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: y_ule:
-; UNSAFE-NEXT: minsd {{[^,]*}}, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ule:
-; FINITE-NEXT: minsd {{[^,]*}}, %xmm0
-; FINITE-NEXT: ret
-define double @y_ule(double %x) nounwind {
- %c = fcmp ule double %x, -0.000000e+00
- %d = select i1 %c, double %x, double -0.000000e+00
- ret double %d
-}
-
-; CHECK: y_uge_inverse:
-; CHECK-NEXT: minsd {{[^,]*}}, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: y_uge_inverse:
-; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
-; UNSAFE-NEXT: minsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_uge_inverse:
-; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
-; FINITE-NEXT: minsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @y_uge_inverse(double %x) nounwind {
- %c = fcmp uge double %x, -0.000000e+00
- %d = select i1 %c, double -0.000000e+00, double %x
- ret double %d
-}
-
-; CHECK: y_ule_inverse:
-; CHECK-NEXT: maxsd {{[^,]*}}, %xmm0
-; CHECK-NEXT: ret
-; UNSAFE: y_ule_inverse:
-; UNSAFE-NEXT: movsd {{[^,]*}}, %xmm1
-; UNSAFE-NEXT: maxsd %xmm0, %xmm1
-; UNSAFE-NEXT: movapd %xmm1, %xmm0
-; UNSAFE-NEXT: ret
-; FINITE: y_ule_inverse:
-; FINITE-NEXT: movsd {{[^,]*}}, %xmm1
-; FINITE-NEXT: maxsd %xmm0, %xmm1
-; FINITE-NEXT: movapd %xmm1, %xmm0
-; FINITE-NEXT: ret
-define double @y_ule_inverse(double %x) nounwind {
- %c = fcmp ule double %x, -0.000000e+00
- %d = select i1 %c, double -0.000000e+00, double %x
- ret double %d
-}
-; Test a few more misc. cases.
-
-; CHECK: clampTo3k_a:
-; CHECK: minsd
-; UNSAFE: clampTo3k_a:
-; UNSAFE: minsd
-; FINITE: clampTo3k_a:
-; FINITE: minsd
-define double @clampTo3k_a(double %x) nounwind readnone {
-entry:
- %0 = fcmp ogt double %x, 3.000000e+03 ; <i1> [#uses=1]
- %x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1]
- ret double %x_addr.0
-}
-
-; CHECK: clampTo3k_b:
-; CHECK: minsd
-; UNSAFE: clampTo3k_b:
-; UNSAFE: minsd
-; FINITE: clampTo3k_b:
-; FINITE: minsd
-define double @clampTo3k_b(double %x) nounwind readnone {
-entry:
- %0 = fcmp uge double %x, 3.000000e+03 ; <i1> [#uses=1]
- %x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1]
- ret double %x_addr.0
-}
-
-; CHECK: clampTo3k_c:
-; CHECK: maxsd
-; UNSAFE: clampTo3k_c:
-; UNSAFE: maxsd
-; FINITE: clampTo3k_c:
-; FINITE: maxsd
-define double @clampTo3k_c(double %x) nounwind readnone {
-entry:
- %0 = fcmp olt double %x, 3.000000e+03 ; <i1> [#uses=1]
- %x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1]
- ret double %x_addr.0
-}
-
-; CHECK: clampTo3k_d:
-; CHECK: maxsd
-; UNSAFE: clampTo3k_d:
-; UNSAFE: maxsd
-; FINITE: clampTo3k_d:
-; FINITE: maxsd
-define double @clampTo3k_d(double %x) nounwind readnone {
-entry:
- %0 = fcmp ule double %x, 3.000000e+03 ; <i1> [#uses=1]
- %x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1]
- ret double %x_addr.0
-}
-
-; CHECK: clampTo3k_e:
-; CHECK: maxsd
-; UNSAFE: clampTo3k_e:
-; UNSAFE: maxsd
-; FINITE: clampTo3k_e:
-; FINITE: maxsd
-define double @clampTo3k_e(double %x) nounwind readnone {
-entry:
- %0 = fcmp olt double %x, 3.000000e+03 ; <i1> [#uses=1]
- %x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1]
- ret double %x_addr.0
-}
-
-; CHECK: clampTo3k_f:
-; CHECK: maxsd
-; UNSAFE: clampTo3k_f:
-; UNSAFE: maxsd
-; FINITE: clampTo3k_f:
-; FINITE: maxsd
-define double @clampTo3k_f(double %x) nounwind readnone {
-entry:
- %0 = fcmp ule double %x, 3.000000e+03 ; <i1> [#uses=1]
- %x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1]
- ret double %x_addr.0
-}
-
-; CHECK: clampTo3k_g:
-; CHECK: minsd
-; UNSAFE: clampTo3k_g:
-; UNSAFE: minsd
-; FINITE: clampTo3k_g:
-; FINITE: minsd
-define double @clampTo3k_g(double %x) nounwind readnone {
-entry:
- %0 = fcmp ogt double %x, 3.000000e+03 ; <i1> [#uses=1]
- %x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1]
- ret double %x_addr.0
-}
-
-; CHECK: clampTo3k_h:
-; CHECK: minsd
-; UNSAFE: clampTo3k_h:
-; UNSAFE: minsd
-; FINITE: clampTo3k_h:
-; FINITE: minsd
-define double @clampTo3k_h(double %x) nounwind readnone {
-entry:
- %0 = fcmp uge double %x, 3.000000e+03 ; <i1> [#uses=1]
- %x_addr.0 = select i1 %0, double 3.000000e+03, double %x ; <double> [#uses=1]
- ret double %x_addr.0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse-varargs.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse-varargs.ll
deleted file mode 100644
index da38f0e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse-varargs.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep xmm | grep esp
-
-define i32 @t() nounwind {
-entry:
- tail call void (i32, ...)* @foo( i32 1, <4 x i32> < i32 10, i32 11, i32 12, i32 13 > ) nounwind
- ret i32 0
-}
-
-declare void @foo(i32, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse2.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse2.ll
deleted file mode 100644
index f2b8010..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse2.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; Tests for SSE2 and below, without SSE3+.
-; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=pentium4 -O3 | FileCheck %s
-
-define void @t1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
- %tmp3 = load <2 x double>* %A, align 16
- %tmp7 = insertelement <2 x double> undef, double %B, i32 0
- %tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 2, i32 1 >
- store <2 x double> %tmp9, <2 x double>* %r, align 16
- ret void
-
-; CHECK: t1:
-; CHECK: movl 8(%esp), %eax
-; CHECK-NEXT: movl 4(%esp), %ecx
-; CHECK-NEXT: movapd (%eax), %xmm0
-; CHECK-NEXT: movlpd 12(%esp), %xmm0
-; CHECK-NEXT: movapd %xmm0, (%ecx)
-; CHECK-NEXT: ret
-}
-
-define void @t2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
- %tmp3 = load <2 x double>* %A, align 16
- %tmp7 = insertelement <2 x double> undef, double %B, i32 0
- %tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 0, i32 2 >
- store <2 x double> %tmp9, <2 x double>* %r, align 16
- ret void
-
-; CHECK: t2:
-; CHECK: movl 8(%esp), %eax
-; CHECK-NEXT: movl 4(%esp), %ecx
-; CHECK-NEXT: movapd (%eax), %xmm0
-; CHECK-NEXT: movhpd 12(%esp), %xmm0
-; CHECK-NEXT: movapd %xmm0, (%ecx)
-; CHECK-NEXT: ret
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse3.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse3.ll
deleted file mode 100644
index 921161e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse3.ll
+++ /dev/null
@@ -1,262 +0,0 @@
-; These are tests for SSE3 codegen. Yonah has SSE3 and earlier but not SSSE3+.
-
-; RUN: llc < %s -march=x86-64 -mcpu=yonah -mtriple=i686-apple-darwin9 -O3 \
-; RUN: | FileCheck %s --check-prefix=X64
-
-; Test for v8xi16 lowering where we extract the first element of the vector and
-; placed it in the second element of the result.
-
-define void @t0(<8 x i16>* %dest, <8 x i16>* %old) nounwind {
-entry:
- %tmp3 = load <8 x i16>* %old
- %tmp6 = shufflevector <8 x i16> %tmp3,
- <8 x i16> < i16 0, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef >,
- <8 x i32> < i32 8, i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef >
- store <8 x i16> %tmp6, <8 x i16>* %dest
- ret void
-
-; X64: t0:
-; X64: movddup (%rsi), %xmm0
-; X64: xorl %eax, %eax
-; X64: pshuflw $0, %xmm0, %xmm0
-; X64: pinsrw $0, %eax, %xmm0
-; X64: movaps %xmm0, (%rdi)
-; X64: ret
-}
-
-define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> < i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
- ret <8 x i16> %tmp3
-
-; X64: t1:
-; X64: movl (%rsi), %eax
-; X64: movaps (%rdi), %xmm0
-; X64: pinsrw $0, %eax, %xmm0
-; X64: ret
-}
-
-define <8 x i16> @t2(<8 x i16> %A, <8 x i16> %B) nounwind {
- %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 9, i32 1, i32 2, i32 9, i32 4, i32 5, i32 6, i32 7 >
- ret <8 x i16> %tmp
-; X64: t2:
-; X64: pextrw $1, %xmm1, %eax
-; X64: pinsrw $0, %eax, %xmm0
-; X64: pinsrw $3, %eax, %xmm0
-; X64: ret
-}
-
-define <8 x i16> @t3(<8 x i16> %A, <8 x i16> %B) nounwind {
- %tmp = shufflevector <8 x i16> %A, <8 x i16> %A, <8 x i32> < i32 8, i32 3, i32 2, i32 13, i32 7, i32 6, i32 5, i32 4 >
- ret <8 x i16> %tmp
-; X64: t3:
-; X64: pextrw $5, %xmm0, %eax
-; X64: pshuflw $44, %xmm0, %xmm0
-; X64: pshufhw $27, %xmm0, %xmm0
-; X64: pinsrw $3, %eax, %xmm0
-; X64: ret
-}
-
-define <8 x i16> @t4(<8 x i16> %A, <8 x i16> %B) nounwind {
- %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 0, i32 7, i32 2, i32 3, i32 1, i32 5, i32 6, i32 5 >
- ret <8 x i16> %tmp
-; X64: t4:
-; X64: pextrw $7, %xmm0, %eax
-; X64: pshufhw $100, %xmm0, %xmm2
-; X64: pinsrw $1, %eax, %xmm2
-; X64: pextrw $1, %xmm0, %eax
-; X64: movaps %xmm2, %xmm0
-; X64: pinsrw $4, %eax, %xmm0
-; X64: ret
-}
-
-define <8 x i16> @t5(<8 x i16> %A, <8 x i16> %B) nounwind {
- %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 0, i32 1, i32 10, i32 11, i32 2, i32 3 >
- ret <8 x i16> %tmp
-; X64: t5:
-; X64: movlhps %xmm1, %xmm0
-; X64: pshufd $114, %xmm0, %xmm0
-; X64: ret
-}
-
-define <8 x i16> @t6(<8 x i16> %A, <8 x i16> %B) nounwind {
- %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
- ret <8 x i16> %tmp
-; X64: t6:
-; X64: movss %xmm1, %xmm0
-; X64: ret
-}
-
-define <8 x i16> @t7(<8 x i16> %A, <8 x i16> %B) nounwind {
- %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 0, i32 0, i32 3, i32 2, i32 4, i32 6, i32 4, i32 7 >
- ret <8 x i16> %tmp
-; X64: t7:
-; X64: pshuflw $-80, %xmm0, %xmm0
-; X64: pshufhw $-56, %xmm0, %xmm0
-; X64: ret
-}
-
-define void @t8(<2 x i64>* %res, <2 x i64>* %A) nounwind {
- %tmp = load <2 x i64>* %A
- %tmp.upgrd.1 = bitcast <2 x i64> %tmp to <8 x i16>
- %tmp0 = extractelement <8 x i16> %tmp.upgrd.1, i32 0
- %tmp1 = extractelement <8 x i16> %tmp.upgrd.1, i32 1
- %tmp2 = extractelement <8 x i16> %tmp.upgrd.1, i32 2
- %tmp3 = extractelement <8 x i16> %tmp.upgrd.1, i32 3
- %tmp4 = extractelement <8 x i16> %tmp.upgrd.1, i32 4
- %tmp5 = extractelement <8 x i16> %tmp.upgrd.1, i32 5
- %tmp6 = extractelement <8 x i16> %tmp.upgrd.1, i32 6
- %tmp7 = extractelement <8 x i16> %tmp.upgrd.1, i32 7
- %tmp8 = insertelement <8 x i16> undef, i16 %tmp2, i32 0
- %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 1
- %tmp10 = insertelement <8 x i16> %tmp9, i16 %tmp0, i32 2
- %tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 3
- %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp6, i32 4
- %tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 5
- %tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp4, i32 6
- %tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 7
- %tmp15.upgrd.2 = bitcast <8 x i16> %tmp15 to <2 x i64>
- store <2 x i64> %tmp15.upgrd.2, <2 x i64>* %res
- ret void
-; X64: t8:
-; X64: pshuflw $-58, (%rsi), %xmm0
-; X64: pshufhw $-58, %xmm0, %xmm0
-; X64: movaps %xmm0, (%rdi)
-; X64: ret
-}
-
-define void @t9(<4 x float>* %r, <2 x i32>* %A) nounwind {
- %tmp = load <4 x float>* %r
- %tmp.upgrd.3 = bitcast <2 x i32>* %A to double*
- %tmp.upgrd.4 = load double* %tmp.upgrd.3
- %tmp.upgrd.5 = insertelement <2 x double> undef, double %tmp.upgrd.4, i32 0
- %tmp5 = insertelement <2 x double> %tmp.upgrd.5, double undef, i32 1
- %tmp6 = bitcast <2 x double> %tmp5 to <4 x float>
- %tmp.upgrd.6 = extractelement <4 x float> %tmp, i32 0
- %tmp7 = extractelement <4 x float> %tmp, i32 1
- %tmp8 = extractelement <4 x float> %tmp6, i32 0
- %tmp9 = extractelement <4 x float> %tmp6, i32 1
- %tmp10 = insertelement <4 x float> undef, float %tmp.upgrd.6, i32 0
- %tmp11 = insertelement <4 x float> %tmp10, float %tmp7, i32 1
- %tmp12 = insertelement <4 x float> %tmp11, float %tmp8, i32 2
- %tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3
- store <4 x float> %tmp13, <4 x float>* %r
- ret void
-; X64: t9:
-; X64: movaps (%rdi), %xmm0
-; X64: movhps (%rsi), %xmm0
-; X64: movaps %xmm0, (%rdi)
-; X64: ret
-}
-
-
-
-; FIXME: This testcase produces icky code. It can be made much better!
-; PR2585
-
- at g1 = external constant <4 x i32>
- at g2 = external constant <4 x i16>
-
-define internal void @t10() nounwind {
- load <4 x i32>* @g1, align 16
- bitcast <4 x i32> %1 to <8 x i16>
- shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> < i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef >
- bitcast <8 x i16> %3 to <2 x i64>
- extractelement <2 x i64> %4, i32 0
- bitcast i64 %5 to <4 x i16>
- store <4 x i16> %6, <4 x i16>* @g2, align 8
- ret void
-; X64: t10:
-; X64: pextrw $4, %xmm0, %eax
-; X64: pextrw $6, %xmm0, %edx
-; X64: movlhps %xmm1, %xmm1
-; X64: pshuflw $8, %xmm1, %xmm1
-; X64: pinsrw $2, %eax, %xmm1
-; X64: pinsrw $3, %edx, %xmm1
-}
-
-
-; Pack various elements via shuffles.
-define <8 x i16> @t11(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp7 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 1, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp7
-
-; X64: t11:
-; X64: movlhps %xmm0, %xmm0
-; X64: movd %xmm1, %eax
-; X64: pshuflw $1, %xmm0, %xmm0
-; X64: pinsrw $1, %eax, %xmm0
-; X64: ret
-}
-
-
-define <8 x i16> @t12(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 0, i32 1, i32 undef, i32 undef, i32 3, i32 11, i32 undef , i32 undef >
- ret <8 x i16> %tmp9
-
-; X64: t12:
-; X64: movlhps %xmm0, %xmm0
-; X64: pextrw $3, %xmm1, %eax
-; X64: pshufhw $3, %xmm0, %xmm0
-; X64: pinsrw $5, %eax, %xmm0
-; X64: ret
-}
-
-
-define <8 x i16> @t13(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 8, i32 9, i32 undef, i32 undef, i32 11, i32 3, i32 undef , i32 undef >
- ret <8 x i16> %tmp9
-; X64: t13:
-; X64: punpcklqdq %xmm0, %xmm1
-; X64: pextrw $3, %xmm1, %eax
-; X64: pshufd $52, %xmm1, %xmm0
-; X64: pinsrw $4, %eax, %xmm0
-; X64: ret
-}
-
-
-define <8 x i16> @t14(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 2, i32 undef , i32 undef >
- ret <8 x i16> %tmp9
-; X64: t14:
-; X64: punpcklqdq %xmm0, %xmm1
-; X64: pshufhw $8, %xmm1, %xmm0
-; X64: ret
-}
-
-
-
-define <8 x i16> @t15(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp8 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 undef, i32 undef, i32 7, i32 2, i32 8, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp8
-; X64: t15:
-; X64: pextrw $7, %xmm0, %eax
-; X64: punpcklqdq %xmm1, %xmm0
-; X64: pshuflw $-128, %xmm0, %xmm0
-; X64: pinsrw $2, %eax, %xmm0
-; X64: ret
-}
-
-
-; Test yonah where we convert a shuffle to pextrw and pinrsw
-define <16 x i8> @t16(<16 x i8> %T0) nounwind readnone {
-entry:
- %tmp8 = shufflevector <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- %tmp9 = shufflevector <16 x i8> %tmp8, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 2, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <16 x i8> %tmp9
-; X64: t16:
-; X64: pinsrw $0, %eax, %xmm1
-; X64: pextrw $8, %xmm0, %eax
-; X64: pinsrw $1, %eax, %xmm1
-; X64: pextrw $1, %xmm1, %ecx
-; X64: movd %xmm1, %edx
-; X64: pinsrw $0, %edx, %xmm1
-; X64: pinsrw $1, %eax, %xmm0
-; X64: ret
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse41.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse41.ll
deleted file mode 100644
index a734c05..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse41.ll
+++ /dev/null
@@ -1,226 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse41 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse41 | FileCheck %s -check-prefix=X64
-
- at g16 = external global i16
-
-define <4 x i32> @pinsrd_1(i32 %s, <4 x i32> %tmp) nounwind {
- %tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 1
- ret <4 x i32> %tmp1
-; X32: pinsrd_1:
-; X32: pinsrd $1, 4(%esp), %xmm0
-
-; X64: pinsrd_1:
-; X64: pinsrd $1, %edi, %xmm0
-}
-
-define <16 x i8> @pinsrb_1(i8 %s, <16 x i8> %tmp) nounwind {
- %tmp1 = insertelement <16 x i8> %tmp, i8 %s, i32 1
- ret <16 x i8> %tmp1
-; X32: pinsrb_1:
-; X32: pinsrb $1, 4(%esp), %xmm0
-
-; X64: pinsrb_1:
-; X64: pinsrb $1, %edi, %xmm0
-}
-
-
-define <2 x i64> @pmovsxbd_1(i32* %p) nounwind {
-entry:
- %0 = load i32* %p, align 4
- %1 = insertelement <4 x i32> undef, i32 %0, i32 0
- %2 = insertelement <4 x i32> %1, i32 0, i32 1
- %3 = insertelement <4 x i32> %2, i32 0, i32 2
- %4 = insertelement <4 x i32> %3, i32 0, i32 3
- %5 = bitcast <4 x i32> %4 to <16 x i8>
- %6 = tail call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %5) nounwind readnone
- %7 = bitcast <4 x i32> %6 to <2 x i64>
- ret <2 x i64> %7
-
-; X32: _pmovsxbd_1:
-; X32: movl 4(%esp), %eax
-; X32: pmovsxbd (%eax), %xmm0
-
-; X64: _pmovsxbd_1:
-; X64: pmovsxbd (%rdi), %xmm0
-}
-
-define <2 x i64> @pmovsxwd_1(i64* %p) nounwind readonly {
-entry:
- %0 = load i64* %p ; <i64> [#uses=1]
- %tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0 ; <<2 x i64>> [#uses=1]
- %1 = bitcast <2 x i64> %tmp2 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone ; <<4 x i32>> [#uses=1]
- %3 = bitcast <4 x i32> %2 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %3
-
-; X32: _pmovsxwd_1:
-; X32: movl 4(%esp), %eax
-; X32: pmovsxwd (%eax), %xmm0
-
-; X64: _pmovsxwd_1:
-; X64: pmovsxwd (%rdi), %xmm0
-}
-
-
-
-
-define <2 x i64> @pmovzxbq_1() nounwind {
-entry:
- %0 = load i16* @g16, align 2 ; <i16> [#uses=1]
- %1 = insertelement <8 x i16> undef, i16 %0, i32 0 ; <<8 x i16>> [#uses=1]
- %2 = bitcast <8 x i16> %1 to <16 x i8> ; <<16 x i8>> [#uses=1]
- %3 = tail call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %2) nounwind readnone ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %3
-
-; X32: _pmovzxbq_1:
-; X32: movl L_g16$non_lazy_ptr, %eax
-; X32: pmovzxbq (%eax), %xmm0
-
-; X64: _pmovzxbq_1:
-; X64: movq _g16 at GOTPCREL(%rip), %rax
-; X64: pmovzxbq (%rax), %xmm0
-}
-
-declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone
-declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone
-declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
-
-
-
-
-define i32 @extractps_1(<4 x float> %v) nounwind {
- %s = extractelement <4 x float> %v, i32 3
- %i = bitcast float %s to i32
- ret i32 %i
-
-; X32: _extractps_1:
-; X32: extractps $3, %xmm0, %eax
-
-; X64: _extractps_1:
-; X64: extractps $3, %xmm0, %eax
-}
-define i32 @extractps_2(<4 x float> %v) nounwind {
- %t = bitcast <4 x float> %v to <4 x i32>
- %s = extractelement <4 x i32> %t, i32 3
- ret i32 %s
-
-; X32: _extractps_2:
-; X32: extractps $3, %xmm0, %eax
-
-; X64: _extractps_2:
-; X64: extractps $3, %xmm0, %eax
-}
-
-
-; The non-store form of extractps puts its result into a GPR.
-; This makes it suitable for an extract from a <4 x float> that
-; is bitcasted to i32, but unsuitable for much of anything else.
-
-define float @ext_1(<4 x float> %v) nounwind {
- %s = extractelement <4 x float> %v, i32 3
- %t = fadd float %s, 1.0
- ret float %t
-
-; X32: _ext_1:
-; X32: pshufd $3, %xmm0, %xmm0
-; X32: addss LCPI8_0, %xmm0
-
-; X64: _ext_1:
-; X64: pshufd $3, %xmm0, %xmm0
-; X64: addss LCPI8_0(%rip), %xmm0
-}
-define float @ext_2(<4 x float> %v) nounwind {
- %s = extractelement <4 x float> %v, i32 3
- ret float %s
-
-; X32: _ext_2:
-; X32: pshufd $3, %xmm0, %xmm0
-
-; X64: _ext_2:
-; X64: pshufd $3, %xmm0, %xmm0
-}
-define i32 @ext_3(<4 x i32> %v) nounwind {
- %i = extractelement <4 x i32> %v, i32 3
- ret i32 %i
-
-; X32: _ext_3:
-; X32: pextrd $3, %xmm0, %eax
-
-; X64: _ext_3:
-; X64: pextrd $3, %xmm0, %eax
-}
-
-define <4 x float> @insertps_1(<4 x float> %t1, <4 x float> %t2) nounwind {
- %tmp1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %t1, <4 x float> %t2, i32 1) nounwind readnone
- ret <4 x float> %tmp1
-; X32: _insertps_1:
-; X32: insertps $1, %xmm1, %xmm0
-
-; X64: _insertps_1:
-; X64: insertps $1, %xmm1, %xmm0
-}
-
-declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone
-
-define <4 x float> @insertps_2(<4 x float> %t1, float %t2) nounwind {
- %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
- ret <4 x float> %tmp1
-; X32: _insertps_2:
-; X32: insertps $0, 4(%esp), %xmm0
-
-; X64: _insertps_2:
-; X64: insertps $0, %xmm1, %xmm0
-}
-
-define <4 x float> @insertps_3(<4 x float> %t1, <4 x float> %t2) nounwind {
- %tmp2 = extractelement <4 x float> %t2, i32 0
- %tmp1 = insertelement <4 x float> %t1, float %tmp2, i32 0
- ret <4 x float> %tmp1
-; X32: _insertps_3:
-; X32: insertps $0, %xmm1, %xmm0
-
-; X64: _insertps_3:
-; X64: insertps $0, %xmm1, %xmm0
-}
-
-define i32 @ptestz_1(<4 x float> %t1, <4 x float> %t2) nounwind {
- %tmp1 = call i32 @llvm.x86.sse41.ptestz(<4 x float> %t1, <4 x float> %t2) nounwind readnone
- ret i32 %tmp1
-; X32: _ptestz_1:
-; X32: ptest %xmm1, %xmm0
-; X32: sete %al
-
-; X64: _ptestz_1:
-; X64: ptest %xmm1, %xmm0
-; X64: sete %al
-}
-
-define i32 @ptestz_2(<4 x float> %t1, <4 x float> %t2) nounwind {
- %tmp1 = call i32 @llvm.x86.sse41.ptestc(<4 x float> %t1, <4 x float> %t2) nounwind readnone
- ret i32 %tmp1
-; X32: _ptestz_2:
-; X32: ptest %xmm1, %xmm0
-; X32: setb %al
-
-; X64: _ptestz_2:
-; X64: ptest %xmm1, %xmm0
-; X64: setb %al
-}
-
-define i32 @ptestz_3(<4 x float> %t1, <4 x float> %t2) nounwind {
- %tmp1 = call i32 @llvm.x86.sse41.ptestnzc(<4 x float> %t1, <4 x float> %t2) nounwind readnone
- ret i32 %tmp1
-; X32: _ptestz_3:
-; X32: ptest %xmm1, %xmm0
-; X32: seta %al
-
-; X64: _ptestz_3:
-; X64: ptest %xmm1, %xmm0
-; X64: seta %al
-}
-
-
-declare i32 @llvm.x86.sse41.ptestz(<4 x float>, <4 x float>) nounwind readnone
-declare i32 @llvm.x86.sse41.ptestc(<4 x float>, <4 x float>) nounwind readnone
-declare i32 @llvm.x86.sse41.ptestnzc(<4 x float>, <4 x float>) nounwind readnone
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse42.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse42.ll
deleted file mode 100644
index c9c4d01..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse42.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse42 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse42 | FileCheck %s -check-prefix=X64
-
-declare i32 @llvm.x86.sse42.crc32.8(i32, i8) nounwind
-declare i32 @llvm.x86.sse42.crc32.16(i32, i16) nounwind
-declare i32 @llvm.x86.sse42.crc32.32(i32, i32) nounwind
-
-define i32 @crc32_8(i32 %a, i8 %b) nounwind {
- %tmp = call i32 @llvm.x86.sse42.crc32.8(i32 %a, i8 %b)
- ret i32 %tmp
-; X32: _crc32_8:
-; X32: crc32 8(%esp), %eax
-
-; X64: _crc32_8:
-; X64: crc32 %sil, %eax
-}
-
-
-define i32 @crc32_16(i32 %a, i16 %b) nounwind {
- %tmp = call i32 @llvm.x86.sse42.crc32.16(i32 %a, i16 %b)
- ret i32 %tmp
-; X32: _crc32_16:
-; X32: crc32 8(%esp), %eax
-
-; X64: _crc32_16:
-; X64: crc32 %si, %eax
-}
-
-
-define i32 @crc32_32(i32 %a, i32 %b) nounwind {
- %tmp = call i32 @llvm.x86.sse42.crc32.32(i32 %a, i32 %b)
- ret i32 %tmp
-; X32: _crc32_32:
-; X32: crc32 8(%esp), %eax
-
-; X64: _crc32_32:
-; X64: crc32 %esi, %eax
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sse_reload_fold.ll b/libclamav/c++/llvm/test/CodeGen/X86/sse_reload_fold.ll
deleted file mode 100644
index dc3d6fe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sse_reload_fold.ll
+++ /dev/null
@@ -1,124 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+64bit,+sse3 -print-failed-fuse-candidates |& \
-; RUN: grep fail | count 1
-
-declare float @test_f(float %f)
-declare double @test_d(double %f)
-declare <4 x float> @test_vf(<4 x float> %f)
-declare <2 x double> @test_vd(<2 x double> %f)
-declare float @llvm.sqrt.f32(float)
-declare double @llvm.sqrt.f64(double)
-
-declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>)
-declare <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float>)
-declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>)
-declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>)
-declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>)
-declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8)
-declare <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float>, <4 x float>)
-declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>)
-declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>)
-declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>)
-declare <2 x double> @llvm.x86.sse2.min.pd(<2 x double>, <2 x double>)
-declare <2 x double> @llvm.x86.sse2.max.pd(<2 x double>, <2 x double>)
-declare <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double>, <2 x double>, i8)
-declare <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double>, <2 x double>)
-declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>)
-declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>)
-
-define float @foo(float %f) {
- %a = call float @test_f(float %f)
- %t = call float @llvm.sqrt.f32(float %f)
- ret float %t
-}
-define double @doo(double %f) {
- %a = call double @test_d(double %f)
- %t = call double @llvm.sqrt.f64(double %f)
- ret double %t
-}
-define <4 x float> @a0(<4 x float> %f) {
- %a = call <4 x float> @test_vf(<4 x float> %f)
- %t = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %f)
- ret <4 x float> %t
-}
-define <4 x float> @a1(<4 x float> %f) {
- %a = call <4 x float> @test_vf(<4 x float> %f)
- %t = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %f)
- ret <4 x float> %t
-}
-define <4 x float> @a2(<4 x float> %f) {
- %a = call <4 x float> @test_vf(<4 x float> %f)
- %t = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %f)
- ret <4 x float> %t
-}
-define <4 x float> @b3(<4 x float> %f) {
- %y = call <4 x float> @test_vf(<4 x float> %f)
- %t = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %y, <4 x float> %f)
- ret <4 x float> %t
-}
-define <4 x float> @b4(<4 x float> %f) {
- %y = call <4 x float> @test_vf(<4 x float> %f)
- %t = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %y, <4 x float> %f)
- ret <4 x float> %t
-}
-define <4 x float> @b5(<4 x float> %f) {
- %y = call <4 x float> @test_vf(<4 x float> %f)
- %t = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %y, <4 x float> %f, i8 7)
- ret <4 x float> %t
-}
-define <4 x float> @b6(<4 x float> %f) {
- %y = call <4 x float> @test_vf(<4 x float> %f)
- %t = call <4 x float> @llvm.x86.sse3.addsub.ps(<4 x float> %y, <4 x float> %f)
- ret <4 x float> %t
-}
-define <4 x float> @b7(<4 x float> %f) {
- %y = call <4 x float> @test_vf(<4 x float> %f)
- %t = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %y, <4 x float> %f)
- ret <4 x float> %t
-}
-define <4 x float> @b8(<4 x float> %f) {
- %y = call <4 x float> @test_vf(<4 x float> %f)
- %t = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %y, <4 x float> %f)
- ret <4 x float> %t
-}
-define <2 x double> @c1(<2 x double> %f) {
- %a = call <2 x double> @test_vd(<2 x double> %f)
- %t = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %f)
- ret <2 x double> %t
-}
-define <2 x double> @d3(<2 x double> %f) {
- %y = call <2 x double> @test_vd(<2 x double> %f)
- %t = call <2 x double> @llvm.x86.sse2.min.pd(<2 x double> %y, <2 x double> %f)
- ret <2 x double> %t
-}
-define <2 x double> @d4(<2 x double> %f) {
- %y = call <2 x double> @test_vd(<2 x double> %f)
- %t = call <2 x double> @llvm.x86.sse2.max.pd(<2 x double> %y, <2 x double> %f)
- ret <2 x double> %t
-}
-define <2 x double> @d5(<2 x double> %f) {
- %y = call <2 x double> @test_vd(<2 x double> %f)
- %t = call <2 x double> @llvm.x86.sse2.cmp.pd(<2 x double> %y, <2 x double> %f, i8 7)
- ret <2 x double> %t
-}
-define <2 x double> @d6(<2 x double> %f) {
- %y = call <2 x double> @test_vd(<2 x double> %f)
- %t = call <2 x double> @llvm.x86.sse3.addsub.pd(<2 x double> %y, <2 x double> %f)
- ret <2 x double> %t
-}
-define <2 x double> @d7(<2 x double> %f) {
- %y = call <2 x double> @test_vd(<2 x double> %f)
- %t = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %y, <2 x double> %f)
- ret <2 x double> %t
-}
-define <2 x double> @d8(<2 x double> %f) {
- %y = call <2 x double> @test_vd(<2 x double> %f)
- %t = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %y, <2 x double> %f)
- ret <2 x double> %t
-}
-
-; This one should fail to fuse.
-define <2 x double> @z0(<2 x double> %f) {
- %y = call <2 x double> @test_vd(<2 x double> %f)
- %t = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %f, <2 x double> %y)
- ret <2 x double> %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/stack-align.ll b/libclamav/c++/llvm/test/CodeGen/X86/stack-align.ll
deleted file mode 100644
index e971ef7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/stack-align.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llc < %s -relocation-model=static -realign-stack=1 -mcpu=yonah | FileCheck %s
-
-; The double argument is at 4(esp) which is 16-byte aligned, allowing us to
-; fold the load into the andpd.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
- at G = external global double
-
-define void @test({ double, double }* byval %z, double* %P) {
-entry:
- %tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp1 = load double* %tmp, align 8 ; <double> [#uses=1]
- %tmp2 = tail call double @fabs( double %tmp1 ) ; <double> [#uses=1]
- ; CHECK: andpd{{.*}}4(%esp), %xmm
- %tmp3 = load double* @G, align 16 ; <double> [#uses=1]
- %tmp4 = tail call double @fabs( double %tmp3 ) ; <double> [#uses=1]
- %tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1]
- store double %tmp6, double* %P, align 8
- ret void
-}
-
-define void @test2() alignstack(16) {
-entry:
- ; CHECK: andl{{.*}}$-16, %esp
- ret void
-}
-
-; Use a call to force a spill.
-define <2 x double> @test3(<2 x double> %x, <2 x double> %y) alignstack(32) {
-entry:
- ; CHECK: andl{{.*}}$-32, %esp
- call void @test2()
- %A = mul <2 x double> %x, %y
- ret <2 x double> %A
-}
-
-declare double @fabs(double)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/stack-color-with-reg-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/stack-color-with-reg-2.ll
deleted file mode 100644
index c1f2672..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/stack-color-with-reg-2.ll
+++ /dev/null
@@ -1,230 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin10 -relocation-model=pic -disable-fp-elim -color-ss-with-regs | grep {movl\[\[:space:\]\]%eax, %ebx}
-
- %"struct..0$_67" = type { i32, %"struct.llvm::MachineOperand"**, %"struct.llvm::MachineOperand"* }
- %"struct..1$_69" = type { i32 }
- %"struct.llvm::AbstractTypeUser" = type { i32 (...)** }
- %"struct.llvm::AliasAnalysis" = type opaque
- %"struct.llvm::AnalysisResolver" = type { %"struct.std::vector<std::pair<const llvm::PassInfo*, llvm::Pass*>,std::allocator<std::pair<const llvm::PassInfo*, llvm::Pass*> > >", %"struct.llvm::PMDataManager"* }
- %"struct.llvm::Annotable" = type { %"struct.llvm::Annotation"* }
- %"struct.llvm::Annotation" = type { i32 (...)**, %"struct..1$_69", %"struct.llvm::Annotation"* }
- %"struct.llvm::Argument" = type { %"struct.llvm::Value", %"struct.llvm::ilist_node<llvm::Argument>", %"struct.llvm::Function"* }
- %"struct.llvm::AttrListPtr" = type { %"struct.llvm::AttributeListImpl"* }
- %"struct.llvm::AttributeListImpl" = type opaque
- %"struct.llvm::BasicBlock" = type { %"struct.llvm::Value", %"struct.llvm::ilist_node<llvm::BasicBlock>", %"struct.llvm::iplist<llvm::Instruction,llvm::ilist_traits<llvm::Instruction> >", %"struct.llvm::Function"* }
- %"struct.llvm::BitVector" = type { i32*, i32, i32 }
- %"struct.llvm::BumpPtrAllocator" = type { i8* }
- %"struct.llvm::CalleeSavedInfo" = type { i32, %"struct.llvm::TargetRegisterClass"*, i32 }
- %"struct.llvm::Constant" = type { %"struct.llvm::User" }
- %"struct.llvm::DebugLocTracker" = type { %"struct.std::vector<llvm::DebugLocTuple,std::allocator<llvm::DebugLocTuple> >", %"struct.llvm::DenseMap<llvm::DebugLocTuple,unsigned int,llvm::DenseMapInfo<llvm::DebugLocTuple>,llvm::DenseMapInfo<unsigned int> >" }
- %"struct.llvm::DebugLocTuple" = type { %"struct.llvm::GlobalVariable"*, i32, i32 }
- %"struct.llvm::DenseMap<llvm::DebugLocTuple,unsigned int,llvm::DenseMapInfo<llvm::DebugLocTuple>,llvm::DenseMapInfo<unsigned int> >" = type { i32, %"struct.std::pair<llvm::DebugLocTuple,unsigned int>"*, i32, i32 }
- %"struct.llvm::DenseMap<llvm::MachineInstr*,unsigned int,llvm::DenseMapInfo<llvm::MachineInstr*>,llvm::DenseMapInfo<unsigned int> >" = type { i32, %"struct.std::pair<llvm::MachineInstr*,unsigned int>"*, i32, i32 }
- %"struct.llvm::DenseMap<unsigned int,char,llvm::DenseMapInfo<unsigned int>,llvm::DenseMapInfo<char> >" = type { i32, %"struct.std::pair<unsigned int,char>"*, i32, i32 }
- %"struct.llvm::DenseMap<unsigned int,llvm::LiveInterval*,llvm::DenseMapInfo<unsigned int>,llvm::DenseMapInfo<llvm::LiveInterval*> >" = type { i32, %"struct.std::pair<unsigned int,llvm::LiveInterval*>"*, i32, i32 }
- %"struct.llvm::DenseSet<unsigned int,llvm::DenseMapInfo<unsigned int> >" = type { %"struct.llvm::DenseMap<unsigned int,char,llvm::DenseMapInfo<unsigned int>,llvm::DenseMapInfo<char> >" }
- %"struct.llvm::Function" = type { %"struct.llvm::GlobalValue", %"struct.llvm::Annotable", %"struct.llvm::ilist_node<llvm::Function>", %"struct.llvm::iplist<llvm::BasicBlock,llvm::ilist_traits<llvm::BasicBlock> >", %"struct.llvm::iplist<llvm::Argument,llvm::ilist_traits<llvm::Argument> >", %"struct.llvm::ValueSymbolTable"*, %"struct.llvm::AttrListPtr" }
- %"struct.llvm::FunctionPass" = type { %"struct.llvm::Pass" }
- %"struct.llvm::GlobalValue" = type { %"struct.llvm::Constant", %"struct.llvm::Module"*, i32, %"struct.std::string" }
- %"struct.llvm::GlobalVariable" = type opaque
- %"struct.llvm::Instruction" = type { %"struct.llvm::User", %"struct.llvm::ilist_node<llvm::Instruction>", %"struct.llvm::BasicBlock"* }
- %"struct.llvm::LiveInterval" = type <{ i32, float, i16, [6 x i8], %"struct.llvm::SmallVector<llvm::LiveRange,4u>", %"struct.llvm::SmallVector<llvm::MachineBasicBlock*,4u>" }>
- %"struct.llvm::LiveIntervals" = type { %"struct.llvm::MachineFunctionPass", %"struct.llvm::MachineFunction"*, %"struct.llvm::MachineRegisterInfo"*, %"struct.llvm::TargetMachine"*, %"struct.llvm::TargetRegisterInfo"*, %"struct.llvm::TargetInstrInfo"*, %"struct.llvm::AliasAnalysis"*, %"struct.llvm::LiveVariables"*, %"struct.llvm::BumpPtrAllocator", %"struct.std::vector<std::pair<unsigned int, unsigned int>,std::allocator<std::pair<unsigned int, unsigned int> > >", %"struct.std::vector<std::pair<unsigned int, llvm::MachineBasicBlock*>,std::allocator<std::pair<unsigned int, llvm::MachineBasicBlock*> > >", i64, %"struct.llvm::DenseMap<llvm::MachineInstr*,unsigned int,llvm::DenseMapInfo<llvm::MachineInstr*>,llvm::DenseMapInfo<unsigned int> >", %"struct.std::vector<llvm::MachineInstr*,std::allocator<llvm::MachineInstr*> >", %"struct.llvm::DenseMap<unsigned int,llvm::LiveInterval*,llvm::DenseMapInfo<unsigned int>,llvm::DenseMapInfo<llvm::LiveInterval*> >", %"struct.llvm::BitVector", %"struct.std::vector<llvm::MachineInstr*,std::allocator<llvm::MachineInstr*> >" }
- %"struct.llvm::LiveVariables" = type opaque
- %"struct.llvm::MVT" = type { %"struct..1$_69" }
- %"struct.llvm::MachineBasicBlock" = type { %"struct.llvm::ilist_node<llvm::MachineBasicBlock>", %"struct.llvm::ilist<llvm::MachineInstr>", %"struct.llvm::BasicBlock"*, i32, %"struct.llvm::MachineFunction"*, %"struct.std::vector<llvm::MachineBasicBlock*,std::allocator<llvm::MachineBasicBlock*> >", %"struct.std::vector<llvm::MachineBasicBlock*,std::allocator<llvm::MachineBasicBlock*> >", %"struct.std::vector<int,std::allocator<int> >", i32, i8 }
- %"struct.llvm::MachineConstantPool" = type opaque
- %"struct.llvm::MachineFrameInfo" = type { %"struct.std::vector<llvm::MachineFrameInfo::StackObject,std::allocator<llvm::MachineFrameInfo::StackObject> >", i32, i8, i8, i64, i32, i32, i8, i32, i32, %"struct.std::vector<llvm::CalleeSavedInfo,std::allocator<llvm::CalleeSavedInfo> >", %"struct.llvm::MachineModuleInfo"*, %"struct.llvm::TargetFrameInfo"* }
- %"struct.llvm::MachineFrameInfo::StackObject" = type { i64, i32, i8, i64 }
- %"struct.llvm::MachineFunction" = type { %"struct.llvm::Annotation", %"struct.llvm::Function"*, %"struct.llvm::TargetMachine"*, %"struct.llvm::MachineRegisterInfo"*, %"struct.llvm::AbstractTypeUser"*, %"struct.llvm::MachineFrameInfo"*, %"struct.llvm::MachineConstantPool"*, %"struct.llvm::MachineJumpTableInfo"*, %"struct.std::vector<llvm::MachineBasicBlock*,std::allocator<llvm::MachineBasicBlock*> >", %"struct.llvm::BumpPtrAllocator", %"struct.llvm::Recycler<llvm::MachineBasicBlock,80ul,4ul>", %"struct.llvm::Recycler<llvm::MachineBasicBlock,80ul,4ul>", %"struct.llvm::ilist<llvm::MachineBasicBlock>", %"struct..1$_69", %"struct.llvm::DebugLocTracker" }
- %"struct.llvm::MachineFunctionPass" = type { %"struct.llvm::FunctionPass" }
- %"struct.llvm::MachineInstr" = type { %"struct.llvm::ilist_node<llvm::MachineInstr>", %"struct.llvm::TargetInstrDesc"*, i16, %"struct.std::vector<llvm::MachineOperand,std::allocator<llvm::MachineOperand> >", %"struct.std::list<llvm::MachineMemOperand,std::allocator<llvm::MachineMemOperand> >", %"struct.llvm::MachineBasicBlock"*, %"struct..1$_69" }
- %"struct.llvm::MachineJumpTableInfo" = type opaque
- %"struct.llvm::MachineModuleInfo" = type opaque
- %"struct.llvm::MachineOperand" = type { i8, i8, i8, %"struct.llvm::MachineInstr"*, %"struct.llvm::MachineOperand::$_66" }
- %"struct.llvm::MachineOperand::$_66" = type { %"struct..0$_67" }
- %"struct.llvm::MachineRegisterInfo" = type { %"struct.std::vector<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*>,std::allocator<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*> > >", %"struct.std::vector<std::vector<unsigned int, std::allocator<unsigned int> >,std::allocator<std::vector<unsigned int, std::allocator<unsigned int> > > >", %"struct.llvm::MachineOperand"**, %"struct.llvm::BitVector", %"struct.std::vector<std::pair<unsigned int, unsigned int>,std::allocator<std::pair<unsigned int, unsigned int> > >", %"struct.std::vector<int,std::allocator<int> >" }
- %"struct.llvm::Module" = type opaque
- %"struct.llvm::PATypeHandle" = type { %"struct.llvm::Type"*, %"struct.llvm::AbstractTypeUser"* }
- %"struct.llvm::PATypeHolder" = type { %"struct.llvm::Type"* }
- %"struct.llvm::PMDataManager" = type opaque
- %"struct.llvm::Pass" = type { i32 (...)**, %"struct.llvm::AnalysisResolver"*, i32 }
- %"struct.llvm::PassInfo" = type { i8*, i8*, i32, i8, i8, i8, %"struct.std::vector<const llvm::PassInfo*,std::allocator<const llvm::PassInfo*> >", %"struct.llvm::Pass"* ()* }
- %"struct.llvm::Recycler<llvm::MachineBasicBlock,80ul,4ul>" = type { %"struct.llvm::iplist<llvm::RecyclerStruct,llvm::ilist_traits<llvm::RecyclerStruct> >" }
- %"struct.llvm::RecyclerStruct" = type { %"struct.llvm::RecyclerStruct"*, %"struct.llvm::RecyclerStruct"* }
- %"struct.llvm::SmallVector<llvm::LiveRange,4u>" = type <{ [17 x i8], [47 x i8] }>
- %"struct.llvm::SmallVector<llvm::MachineBasicBlock*,4u>" = type <{ [17 x i8], [15 x i8] }>
- %"struct.llvm::TargetAsmInfo" = type opaque
- %"struct.llvm::TargetFrameInfo" = type opaque
- %"struct.llvm::TargetInstrDesc" = type { i16, i16, i16, i16, i8*, i32, i32, i32*, i32*, %"struct.llvm::TargetRegisterClass"**, %"struct.llvm::TargetOperandInfo"* }
- %"struct.llvm::TargetInstrInfo" = type { i32 (...)**, %"struct.llvm::TargetInstrDesc"*, i32 }
- %"struct.llvm::TargetMachine" = type { i32 (...)**, %"struct.llvm::TargetAsmInfo"* }
- %"struct.llvm::TargetOperandInfo" = type { i16, i16, i32 }
- %"struct.llvm::TargetRegisterClass" = type { i32 (...)**, i32, i8*, %"struct.llvm::MVT"*, %"struct.llvm::TargetRegisterClass"**, %"struct.llvm::TargetRegisterClass"**, %"struct.llvm::TargetRegisterClass"**, %"struct.llvm::TargetRegisterClass"**, i32, i32, i32, i32*, i32*, %"struct.llvm::DenseSet<unsigned int,llvm::DenseMapInfo<unsigned int> >" }
- %"struct.llvm::TargetRegisterDesc" = type { i8*, i8*, i32*, i32*, i32* }
- %"struct.llvm::TargetRegisterInfo" = type { i32 (...)**, i32*, i32, i32*, i32, i32*, i32, %"struct.llvm::TargetRegisterDesc"*, i32, %"struct.llvm::TargetRegisterClass"**, %"struct.llvm::TargetRegisterClass"**, i32, i32 }
- %"struct.llvm::Type" = type { %"struct.llvm::AbstractTypeUser", i8, [3 x i8], i32, %"struct.llvm::Type"*, %"struct.std::vector<llvm::AbstractTypeUser*,std::allocator<llvm::AbstractTypeUser*> >", i32, %"struct.llvm::PATypeHandle"* }
- %"struct.llvm::Use" = type { %"struct.llvm::Value"*, %"struct.llvm::Use"*, %"struct..1$_69" }
- %"struct.llvm::User" = type { %"struct.llvm::Value", %"struct.llvm::Use"*, i32 }
- %"struct.llvm::Value" = type { i32 (...)**, i8, i8, i16, %"struct.llvm::PATypeHolder", %"struct.llvm::Use"*, %"struct.llvm::ValueName"* }
- %"struct.llvm::ValueName" = type opaque
- %"struct.llvm::ValueSymbolTable" = type opaque
- %"struct.llvm::ilist<llvm::MachineBasicBlock>" = type { %"struct.llvm::iplist<llvm::MachineBasicBlock,llvm::ilist_traits<llvm::MachineBasicBlock> >" }
- %"struct.llvm::ilist<llvm::MachineInstr>" = type { %"struct.llvm::iplist<llvm::MachineInstr,llvm::ilist_traits<llvm::MachineInstr> >" }
- %"struct.llvm::ilist_node<llvm::Argument>" = type { %"struct.llvm::Argument"*, %"struct.llvm::Argument"* }
- %"struct.llvm::ilist_node<llvm::BasicBlock>" = type { %"struct.llvm::BasicBlock"*, %"struct.llvm::BasicBlock"* }
- %"struct.llvm::ilist_node<llvm::Function>" = type { %"struct.llvm::Function"*, %"struct.llvm::Function"* }
- %"struct.llvm::ilist_node<llvm::Instruction>" = type { %"struct.llvm::Instruction"*, %"struct.llvm::Instruction"* }
- %"struct.llvm::ilist_node<llvm::MachineBasicBlock>" = type { %"struct.llvm::MachineBasicBlock"*, %"struct.llvm::MachineBasicBlock"* }
- %"struct.llvm::ilist_node<llvm::MachineInstr>" = type { %"struct.llvm::MachineInstr"*, %"struct.llvm::MachineInstr"* }
- %"struct.llvm::ilist_traits<llvm::Argument>" = type { %"struct.llvm::ilist_node<llvm::Argument>" }
- %"struct.llvm::ilist_traits<llvm::BasicBlock>" = type { %"struct.llvm::ilist_node<llvm::BasicBlock>" }
- %"struct.llvm::ilist_traits<llvm::Instruction>" = type { %"struct.llvm::ilist_node<llvm::Instruction>" }
- %"struct.llvm::ilist_traits<llvm::MachineBasicBlock>" = type { %"struct.llvm::ilist_node<llvm::MachineBasicBlock>" }
- %"struct.llvm::ilist_traits<llvm::MachineInstr>" = type { %"struct.llvm::ilist_node<llvm::MachineInstr>", %"struct.llvm::MachineBasicBlock"* }
- %"struct.llvm::ilist_traits<llvm::RecyclerStruct>" = type { %"struct.llvm::RecyclerStruct" }
- %"struct.llvm::iplist<llvm::Argument,llvm::ilist_traits<llvm::Argument> >" = type { %"struct.llvm::ilist_traits<llvm::Argument>", %"struct.llvm::Argument"* }
- %"struct.llvm::iplist<llvm::BasicBlock,llvm::ilist_traits<llvm::BasicBlock> >" = type { %"struct.llvm::ilist_traits<llvm::BasicBlock>", %"struct.llvm::BasicBlock"* }
- %"struct.llvm::iplist<llvm::Instruction,llvm::ilist_traits<llvm::Instruction> >" = type { %"struct.llvm::ilist_traits<llvm::Instruction>", %"struct.llvm::Instruction"* }
- %"struct.llvm::iplist<llvm::MachineBasicBlock,llvm::ilist_traits<llvm::MachineBasicBlock> >" = type { %"struct.llvm::ilist_traits<llvm::MachineBasicBlock>", %"struct.llvm::MachineBasicBlock"* }
- %"struct.llvm::iplist<llvm::MachineInstr,llvm::ilist_traits<llvm::MachineInstr> >" = type { %"struct.llvm::ilist_traits<llvm::MachineInstr>", %"struct.llvm::MachineInstr"* }
- %"struct.llvm::iplist<llvm::RecyclerStruct,llvm::ilist_traits<llvm::RecyclerStruct> >" = type { %"struct.llvm::ilist_traits<llvm::RecyclerStruct>", %"struct.llvm::RecyclerStruct"* }
- %"struct.std::IdxMBBPair" = type { i32, %"struct.llvm::MachineBasicBlock"* }
- %"struct.std::_List_base<llvm::MachineMemOperand,std::allocator<llvm::MachineMemOperand> >" = type { %"struct.llvm::ilist_traits<llvm::RecyclerStruct>" }
- %"struct.std::_Vector_base<const llvm::PassInfo*,std::allocator<const llvm::PassInfo*> >" = type { %"struct.std::_Vector_base<const llvm::PassInfo*,std::allocator<const llvm::PassInfo*> >::_Vector_impl" }
- %"struct.std::_Vector_base<const llvm::PassInfo*,std::allocator<const llvm::PassInfo*> >::_Vector_impl" = type { %"struct.llvm::PassInfo"**, %"struct.llvm::PassInfo"**, %"struct.llvm::PassInfo"** }
- %"struct.std::_Vector_base<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" }
- %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl" = type { i32*, i32*, i32* }
- %"struct.std::_Vector_base<llvm::AbstractTypeUser*,std::allocator<llvm::AbstractTypeUser*> >" = type { %"struct.std::_Vector_base<llvm::AbstractTypeUser*,std::allocator<llvm::AbstractTypeUser*> >::_Vector_impl" }
- %"struct.std::_Vector_base<llvm::AbstractTypeUser*,std::allocator<llvm::AbstractTypeUser*> >::_Vector_impl" = type { %"struct.llvm::AbstractTypeUser"**, %"struct.llvm::AbstractTypeUser"**, %"struct.llvm::AbstractTypeUser"** }
- %"struct.std::_Vector_base<llvm::CalleeSavedInfo,std::allocator<llvm::CalleeSavedInfo> >" = type { %"struct.std::_Vector_base<llvm::CalleeSavedInfo,std::allocator<llvm::CalleeSavedInfo> >::_Vector_impl" }
- %"struct.std::_Vector_base<llvm::CalleeSavedInfo,std::allocator<llvm::CalleeSavedInfo> >::_Vector_impl" = type { %"struct.llvm::CalleeSavedInfo"*, %"struct.llvm::CalleeSavedInfo"*, %"struct.llvm::CalleeSavedInfo"* }
- %"struct.std::_Vector_base<llvm::DebugLocTuple,std::allocator<llvm::DebugLocTuple> >" = type { %"struct.std::_Vector_base<llvm::DebugLocTuple,std::allocator<llvm::DebugLocTuple> >::_Vector_impl" }
- %"struct.std::_Vector_base<llvm::DebugLocTuple,std::allocator<llvm::DebugLocTuple> >::_Vector_impl" = type { %"struct.llvm::DebugLocTuple"*, %"struct.llvm::DebugLocTuple"*, %"struct.llvm::DebugLocTuple"* }
- %"struct.std::_Vector_base<llvm::MachineBasicBlock*,std::allocator<llvm::MachineBasicBlock*> >" = type { %"struct.std::_Vector_base<llvm::MachineBasicBlock*,std::allocator<llvm::MachineBasicBlock*> >::_Vector_impl" }
- %"struct.std::_Vector_base<llvm::MachineBasicBlock*,std::allocator<llvm::MachineBasicBlock*> >::_Vector_impl" = type { %"struct.llvm::MachineBasicBlock"**, %"struct.llvm::MachineBasicBlock"**, %"struct.llvm::MachineBasicBlock"** }
- %"struct.std::_Vector_base<llvm::MachineFrameInfo::StackObject,std::allocator<llvm::MachineFrameInfo::StackObject> >" = type { %"struct.std::_Vector_base<llvm::MachineFrameInfo::StackObject,std::allocator<llvm::MachineFrameInfo::StackObject> >::_Vector_impl" }
- %"struct.std::_Vector_base<llvm::MachineFrameInfo::StackObject,std::allocator<llvm::MachineFrameInfo::StackObject> >::_Vector_impl" = type { %"struct.llvm::MachineFrameInfo::StackObject"*, %"struct.llvm::MachineFrameInfo::StackObject"*, %"struct.llvm::MachineFrameInfo::StackObject"* }
- %"struct.std::_Vector_base<llvm::MachineInstr*,std::allocator<llvm::MachineInstr*> >" = type { %"struct.std::_Vector_base<llvm::MachineInstr*,std::allocator<llvm::MachineInstr*> >::_Vector_impl" }
- %"struct.std::_Vector_base<llvm::MachineInstr*,std::allocator<llvm::MachineInstr*> >::_Vector_impl" = type { %"struct.llvm::MachineInstr"**, %"struct.llvm::MachineInstr"**, %"struct.llvm::MachineInstr"** }
- %"struct.std::_Vector_base<llvm::MachineOperand,std::allocator<llvm::MachineOperand> >" = type { %"struct.std::_Vector_base<llvm::MachineOperand,std::allocator<llvm::MachineOperand> >::_Vector_impl" }
- %"struct.std::_Vector_base<llvm::MachineOperand,std::allocator<llvm::MachineOperand> >::_Vector_impl" = type { %"struct.llvm::MachineOperand"*, %"struct.llvm::MachineOperand"*, %"struct.llvm::MachineOperand"* }
- %"struct.std::_Vector_base<std::pair<const llvm::PassInfo*, llvm::Pass*>,std::allocator<std::pair<const llvm::PassInfo*, llvm::Pass*> > >" = type { %"struct.std::_Vector_base<std::pair<const llvm::PassInfo*, llvm::Pass*>,std::allocator<std::pair<const llvm::PassInfo*, llvm::Pass*> > >::_Vector_impl" }
- %"struct.std::_Vector_base<std::pair<const llvm::PassInfo*, llvm::Pass*>,std::allocator<std::pair<const llvm::PassInfo*, llvm::Pass*> > >::_Vector_impl" = type { %"struct.std::pair<const llvm::PassInfo*,llvm::Pass*>"*, %"struct.std::pair<const llvm::PassInfo*,llvm::Pass*>"*, %"struct.std::pair<const llvm::PassInfo*,llvm::Pass*>"* }
- %"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*>,std::allocator<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*> > >" = type { %"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*>,std::allocator<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*> > >::_Vector_impl" }
- %"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*>,std::allocator<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*> > >::_Vector_impl" = type { %"struct.std::pair<const llvm::TargetRegisterClass*,llvm::MachineOperand*>"*, %"struct.std::pair<const llvm::TargetRegisterClass*,llvm::MachineOperand*>"*, %"struct.std::pair<const llvm::TargetRegisterClass*,llvm::MachineOperand*>"* }
- %"struct.std::_Vector_base<std::pair<unsigned int, llvm::MachineBasicBlock*>,std::allocator<std::pair<unsigned int, llvm::MachineBasicBlock*> > >" = type { %"struct.std::_Vector_base<std::pair<unsigned int, llvm::MachineBasicBlock*>,std::allocator<std::pair<unsigned int, llvm::MachineBasicBlock*> > >::_Vector_impl" }
- %"struct.std::_Vector_base<std::pair<unsigned int, llvm::MachineBasicBlock*>,std::allocator<std::pair<unsigned int, llvm::MachineBasicBlock*> > >::_Vector_impl" = type { %"struct.std::IdxMBBPair"*, %"struct.std::IdxMBBPair"*, %"struct.std::IdxMBBPair"* }
- %"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>,std::allocator<std::pair<unsigned int, unsigned int> > >" = type { %"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>,std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" }
- %"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>,std::allocator<std::pair<unsigned int, unsigned int> > >::_Vector_impl" = type { %"struct.std::pair<unsigned int,int>"*, %"struct.std::pair<unsigned int,int>"*, %"struct.std::pair<unsigned int,int>"* }
- %"struct.std::_Vector_base<std::vector<unsigned int, std::allocator<unsigned int> >,std::allocator<std::vector<unsigned int, std::allocator<unsigned int> > > >" = type { %"struct.std::_Vector_base<std::vector<unsigned int, std::allocator<unsigned int> >,std::allocator<std::vector<unsigned int, std::allocator<unsigned int> > > >::_Vector_impl" }
- %"struct.std::_Vector_base<std::vector<unsigned int, std::allocator<unsigned int> >,std::allocator<std::vector<unsigned int, std::allocator<unsigned int> > > >::_Vector_impl" = type { %"struct.std::vector<int,std::allocator<int> >"*, %"struct.std::vector<int,std::allocator<int> >"*, %"struct.std::vector<int,std::allocator<int> >"* }
- %"struct.std::list<llvm::MachineMemOperand,std::allocator<llvm::MachineMemOperand> >" = type { %"struct.std::_List_base<llvm::MachineMemOperand,std::allocator<llvm::MachineMemOperand> >" }
- %"struct.std::pair<const llvm::PassInfo*,llvm::Pass*>" = type { %"struct.llvm::PassInfo"*, %"struct.llvm::Pass"* }
- %"struct.std::pair<const llvm::TargetRegisterClass*,llvm::MachineOperand*>" = type { %"struct.llvm::TargetRegisterClass"*, %"struct.llvm::MachineOperand"* }
- %"struct.std::pair<llvm::DebugLocTuple,unsigned int>" = type { %"struct.llvm::DebugLocTuple", i32 }
- %"struct.std::pair<llvm::MachineInstr*,unsigned int>" = type { %"struct.llvm::MachineInstr"*, i32 }
- %"struct.std::pair<unsigned int,char>" = type { i32, i8 }
- %"struct.std::pair<unsigned int,int>" = type { i32, i32 }
- %"struct.std::pair<unsigned int,llvm::LiveInterval*>" = type { i32, %"struct.llvm::LiveInterval"* }
- %"struct.std::string" = type { %"struct.llvm::BumpPtrAllocator" }
- %"struct.std::vector<const llvm::PassInfo*,std::allocator<const llvm::PassInfo*> >" = type { %"struct.std::_Vector_base<const llvm::PassInfo*,std::allocator<const llvm::PassInfo*> >" }
- %"struct.std::vector<int,std::allocator<int> >" = type { %"struct.std::_Vector_base<int,std::allocator<int> >" }
- %"struct.std::vector<llvm::AbstractTypeUser*,std::allocator<llvm::AbstractTypeUser*> >" = type { %"struct.std::_Vector_base<llvm::AbstractTypeUser*,std::allocator<llvm::AbstractTypeUser*> >" }
- %"struct.std::vector<llvm::CalleeSavedInfo,std::allocator<llvm::CalleeSavedInfo> >" = type { %"struct.std::_Vector_base<llvm::CalleeSavedInfo,std::allocator<llvm::CalleeSavedInfo> >" }
- %"struct.std::vector<llvm::DebugLocTuple,std::allocator<llvm::DebugLocTuple> >" = type { %"struct.std::_Vector_base<llvm::DebugLocTuple,std::allocator<llvm::DebugLocTuple> >" }
- %"struct.std::vector<llvm::MachineBasicBlock*,std::allocator<llvm::MachineBasicBlock*> >" = type { %"struct.std::_Vector_base<llvm::MachineBasicBlock*,std::allocator<llvm::MachineBasicBlock*> >" }
- %"struct.std::vector<llvm::MachineFrameInfo::StackObject,std::allocator<llvm::MachineFrameInfo::StackObject> >" = type { %"struct.std::_Vector_base<llvm::MachineFrameInfo::StackObject,std::allocator<llvm::MachineFrameInfo::StackObject> >" }
- %"struct.std::vector<llvm::MachineInstr*,std::allocator<llvm::MachineInstr*> >" = type { %"struct.std::_Vector_base<llvm::MachineInstr*,std::allocator<llvm::MachineInstr*> >" }
- %"struct.std::vector<llvm::MachineOperand,std::allocator<llvm::MachineOperand> >" = type { %"struct.std::_Vector_base<llvm::MachineOperand,std::allocator<llvm::MachineOperand> >" }
- %"struct.std::vector<std::pair<const llvm::PassInfo*, llvm::Pass*>,std::allocator<std::pair<const llvm::PassInfo*, llvm::Pass*> > >" = type { %"struct.std::_Vector_base<std::pair<const llvm::PassInfo*, llvm::Pass*>,std::allocator<std::pair<const llvm::PassInfo*, llvm::Pass*> > >" }
- %"struct.std::vector<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*>,std::allocator<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*> > >" = type { %"struct.std::_Vector_base<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*>,std::allocator<std::pair<const llvm::TargetRegisterClass*, llvm::MachineOperand*> > >" }
- %"struct.std::vector<std::pair<unsigned int, llvm::MachineBasicBlock*>,std::allocator<std::pair<unsigned int, llvm::MachineBasicBlock*> > >" = type { %"struct.std::_Vector_base<std::pair<unsigned int, llvm::MachineBasicBlock*>,std::allocator<std::pair<unsigned int, llvm::MachineBasicBlock*> > >" }
- %"struct.std::vector<std::pair<unsigned int, unsigned int>,std::allocator<std::pair<unsigned int, unsigned int> > >" = type { %"struct.std::_Vector_base<std::pair<unsigned int, unsigned int>,std::allocator<std::pair<unsigned int, unsigned int> > >" }
- %"struct.std::vector<std::vector<unsigned int, std::allocator<unsigned int> >,std::allocator<std::vector<unsigned int, std::allocator<unsigned int> > > >" = type { %"struct.std::_Vector_base<std::vector<unsigned int, std::allocator<unsigned int> >,std::allocator<std::vector<unsigned int, std::allocator<unsigned int> > > >" }
- at _ZZNK4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEE15LookupBucketForERKS2_RPSt4pairIS2_jEE8__func__ = external constant [16 x i8] ; <[16 x i8]*> [#uses=1]
-@"\01LC6" = external constant [56 x i8] ; <[56 x i8]*> [#uses=1]
-@"\01LC7" = external constant [134 x i8] ; <[134 x i8]*> [#uses=1]
-@"\01LC8" = external constant [72 x i8] ; <[72 x i8]*> [#uses=1]
- at _ZZN4llvm13LiveIntervals24InsertMachineInstrInMapsEPNS_12MachineInstrEjE8__func__ = external constant [25 x i8] ; <[25 x i8]*> [#uses=1]
-@"\01LC51" = external constant [42 x i8] ; <[42 x i8]*> [#uses=1]
-
-define void @_ZN4llvm13LiveIntervals24InsertMachineInstrInMapsEPNS_12MachineInstrEj(%"struct.llvm::LiveIntervals"* nocapture %this, %"struct.llvm::MachineInstr"* %MI, i32 %Index) nounwind ssp {
-entry:
- %0 = call i64 @_ZN4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEE4findERKS2_(%"struct.llvm::DenseMap<llvm::MachineInstr*,unsigned int,llvm::DenseMapInfo<llvm::MachineInstr*>,llvm::DenseMapInfo<unsigned int> >"* null, %"struct.llvm::MachineInstr"** null) nounwind ssp ; <i64> [#uses=1]
- %1 = trunc i64 %0 to i32 ; <i32> [#uses=1]
- %tmp11 = inttoptr i32 %1 to %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* ; <%"struct.std::pair<llvm::MachineInstr*,unsigned int>"*> [#uses=1]
- %2 = load %"struct.std::pair<llvm::MachineInstr*,unsigned int>"** null, align 4 ; <%"struct.std::pair<llvm::MachineInstr*,unsigned int>"*> [#uses=3]
- %3 = getelementptr %"struct.llvm::LiveIntervals"* %this, i32 0, i32 12, i32 0 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4 ; <i32> [#uses=2]
- %5 = getelementptr %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* %2, i32 %4 ; <%"struct.std::pair<llvm::MachineInstr*,unsigned int>"*> [#uses=1]
- br label %bb1.i.i.i
-
-bb.i.i.i: ; preds = %bb2.i.i.i
- %indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
- br label %bb1.i.i.i
-
-bb1.i.i.i: ; preds = %bb.i.i.i, %entry
- %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb.i.i.i ] ; <i32> [#uses=2]
- %tmp32 = shl i32 %indvar, 3 ; <i32> [#uses=1]
- %ctg2.sum = add i32 0, %tmp32 ; <i32> [#uses=1]
- %ctg237 = getelementptr i8* null, i32 %ctg2.sum ; <i8*> [#uses=1]
- %.0.0.i = bitcast i8* %ctg237 to %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* ; <%"struct.std::pair<llvm::MachineInstr*,unsigned int>"*> [#uses=2]
- %6 = icmp eq %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* %.0.0.i, %5 ; <i1> [#uses=1]
- br i1 %6, label %_ZN4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEE3endEv.exit, label %bb2.i.i.i
-
-bb2.i.i.i: ; preds = %bb1.i.i.i
- %7 = load %"struct.llvm::MachineInstr"** null, align 4 ; <%"struct.llvm::MachineInstr"*> [#uses=1]
- %8 = icmp eq %"struct.llvm::MachineInstr"* %7, inttoptr (i32 -8 to %"struct.llvm::MachineInstr"*) ; <i1> [#uses=1]
- %or.cond.i.i.i21 = or i1 false, %8 ; <i1> [#uses=1]
- br i1 %or.cond.i.i.i21, label %bb.i.i.i, label %_ZN4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEE3endEv.exit
-
-_ZN4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEE3endEv.exit: ; preds = %bb2.i.i.i, %bb1.i.i.i
- %9 = icmp eq %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* %tmp11, %.0.0.i ; <i1> [#uses=1]
- br i1 %9, label %bb7, label %bb6
-
-bb6: ; preds = %_ZN4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEE3endEv.exit
- call void @__assert_rtn(i8* getelementptr ([25 x i8]* @_ZZN4llvm13LiveIntervals24InsertMachineInstrInMapsEPNS_12MachineInstrEjE8__func__, i32 0, i32 0), i8* getelementptr ([72 x i8]* @"\01LC8", i32 0, i32 0), i32 251, i8* getelementptr ([42 x i8]* @"\01LC51", i32 0, i32 0)) noreturn nounwind
- unreachable
-
-bb7: ; preds = %_ZN4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEE3endEv.exit
- %10 = load %"struct.llvm::MachineInstr"** null, align 4 ; <%"struct.llvm::MachineInstr"*> [#uses=2]
- %11 = icmp eq %"struct.llvm::MachineInstr"* %10, inttoptr (i32 -8 to %"struct.llvm::MachineInstr"*) ; <i1> [#uses=1]
- %or.cond40.i.i.i = or i1 false, %11 ; <i1> [#uses=1]
- br i1 %or.cond40.i.i.i, label %bb5.i.i.i, label %bb6.preheader.i.i.i
-
-bb6.preheader.i.i.i: ; preds = %bb7
- %12 = add i32 %4, -1 ; <i32> [#uses=1]
- br label %bb6.i.i.i
-
-bb5.i.i.i: ; preds = %bb7
- call void @__assert_rtn(i8* getelementptr ([16 x i8]* @_ZZNK4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEE15LookupBucketForERKS2_RPSt4pairIS2_jEE8__func__, i32 0, i32 0), i8* getelementptr ([56 x i8]* @"\01LC6", i32 0, i32 0), i32 390, i8* getelementptr ([134 x i8]* @"\01LC7", i32 0, i32 0)) noreturn nounwind
- unreachable
-
-bb6.i.i.i: ; preds = %bb17.i.i.i, %bb6.preheader.i.i.i
- %FoundTombstone.1.i.i.i = phi %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* [ %FoundTombstone.0.i.i.i, %bb17.i.i.i ], [ null, %bb6.preheader.i.i.i ] ; <%"struct.std::pair<llvm::MachineInstr*,unsigned int>"*> [#uses=2]
- %ProbeAmt.0.i.i.i = phi i32 [ 0, %bb17.i.i.i ], [ 1, %bb6.preheader.i.i.i ] ; <i32> [#uses=1]
- %BucketNo.0.i.i.i = phi i32 [ %20, %bb17.i.i.i ], [ 0, %bb6.preheader.i.i.i ] ; <i32> [#uses=2]
- %13 = and i32 %BucketNo.0.i.i.i, %12 ; <i32> [#uses=2]
- %14 = getelementptr %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* %2, i32 %13 ; <%"struct.std::pair<llvm::MachineInstr*,unsigned int>"*> [#uses=2]
- %15 = getelementptr %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* %2, i32 %13, i32 0 ; <%"struct.llvm::MachineInstr"**> [#uses=1]
- %16 = load %"struct.llvm::MachineInstr"** %15, align 4 ; <%"struct.llvm::MachineInstr"*> [#uses=2]
- %17 = icmp eq %"struct.llvm::MachineInstr"* %16, %10 ; <i1> [#uses=1]
- br i1 %17, label %_ZN4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEEixERKS2_.exit, label %bb17.i.i.i
-
-bb17.i.i.i: ; preds = %bb6.i.i.i
- %18 = icmp eq %"struct.llvm::MachineInstr"* %16, inttoptr (i32 -8 to %"struct.llvm::MachineInstr"*) ; <i1> [#uses=1]
- %19 = icmp eq %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* %FoundTombstone.1.i.i.i, null ; <i1> [#uses=1]
- %or.cond.i.i.i = and i1 %18, %19 ; <i1> [#uses=1]
- %FoundTombstone.0.i.i.i = select i1 %or.cond.i.i.i, %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* %14, %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* %FoundTombstone.1.i.i.i ; <%"struct.std::pair<llvm::MachineInstr*,unsigned int>"*> [#uses=1]
- %20 = add i32 %BucketNo.0.i.i.i, %ProbeAmt.0.i.i.i ; <i32> [#uses=1]
- br label %bb6.i.i.i
-
-_ZN4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEEixERKS2_.exit: ; preds = %bb6.i.i.i
- %21 = getelementptr %"struct.std::pair<llvm::MachineInstr*,unsigned int>"* %14, i32 0, i32 1 ; <i32*> [#uses=1]
- store i32 %Index, i32* %21, align 4
- ret void
-}
-
-declare void @__assert_rtn(i8*, i8*, i32, i8*) noreturn
-
-declare i64 @_ZN4llvm8DenseMapIPNS_12MachineInstrEjNS_12DenseMapInfoIS2_EENS3_IjEEE4findERKS2_(%"struct.llvm::DenseMap<llvm::MachineInstr*,unsigned int,llvm::DenseMapInfo<llvm::MachineInstr*>,llvm::DenseMapInfo<unsigned int> >"* nocapture, %"struct.llvm::MachineInstr"** nocapture) nounwind ssp
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/stack-color-with-reg.ll b/libclamav/c++/llvm/test/CodeGen/X86/stack-color-with-reg.ll
deleted file mode 100644
index 42e7a39..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/stack-color-with-reg.ll
+++ /dev/null
@@ -1,360 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -relocation-model=pic -disable-fp-elim -color-ss-with-regs -stats -info-output-file - > %t
-; RUN: grep stackcoloring %t | grep "stack slot refs replaced with reg refs" | grep 8
-
- type { [62 x %struct.Bitvec*] } ; type %0
- type { i8* } ; type %1
- type { double } ; type %2
- %struct..5sPragmaType = type { i8*, i32 }
- %struct.AggInfo = type { i8, i8, i32, %struct.ExprList*, i32, %struct.AggInfo_col*, i32, i32, i32, %struct.AggInfo_func*, i32, i32 }
- %struct.AggInfo_col = type { %struct.Table*, i32, i32, i32, i32, %struct.Expr* }
- %struct.AggInfo_func = type { %struct.Expr*, %struct.FuncDef*, i32, i32 }
- %struct.AuxData = type { i8*, void (i8*)* }
- %struct.Bitvec = type { i32, i32, i32, %0 }
- %struct.BtCursor = type { %struct.Btree*, %struct.BtShared*, %struct.BtCursor*, %struct.BtCursor*, i32 (i8*, i32, i8*, i32, i8*)*, i8*, i32, %struct.MemPage*, i32, %struct.CellInfo, i8, i8, i8*, i64, i32, i8, i32* }
- %struct.BtLock = type { %struct.Btree*, i32, i8, %struct.BtLock* }
- %struct.BtShared = type { %struct.Pager*, %struct.sqlite3*, %struct.BtCursor*, %struct.MemPage*, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i16, i32, i32, i32, i32, i8, i32, i8*, void (i8*)*, %struct.sqlite3_mutex*, %struct.BusyHandler, i32, %struct.BtShared*, %struct.BtLock*, %struct.Btree* }
- %struct.Btree = type { %struct.sqlite3*, %struct.BtShared*, i8, i8, i8, i32, %struct.Btree*, %struct.Btree* }
- %struct.BtreeMutexArray = type { i32, [11 x %struct.Btree*] }
- %struct.BusyHandler = type { i32 (i8*, i32)*, i8*, i32 }
- %struct.CellInfo = type { i8*, i64, i32, i32, i16, i16, i16, i16 }
- %struct.CollSeq = type { i8*, i8, i8, i8*, i32 (i8*, i32, i8*, i32, i8*)*, void (i8*)* }
- %struct.Column = type { i8*, %struct.Expr*, i8*, i8*, i8, i8, i8, i8 }
- %struct.Context = type { i64, i32, %struct.Fifo }
- %struct.CountCtx = type { i64 }
- %struct.Cursor = type { %struct.BtCursor*, i32, i64, i64, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i64, %struct.Btree*, i32, i8*, i64, i8*, %struct.KeyInfo*, i32, i64, %struct.sqlite3_vtab_cursor*, %struct.sqlite3_module*, i32, i32, i32*, i32*, i8* }
- %struct.Db = type { i8*, %struct.Btree*, i8, i8, i8*, void (i8*)*, %struct.Schema* }
- %struct.DbPage = type { %struct.Pager*, i32, %struct.DbPage*, %struct.DbPage*, %struct.PagerLruLink, %struct.DbPage*, i8, i8, i8, i8, i8, i16, %struct.DbPage*, %struct.DbPage*, i8* }
- %struct.Expr = type { i8, i8, i16, %struct.CollSeq*, %struct.Expr*, %struct.Expr*, %struct.ExprList*, %struct..5sPragmaType, %struct..5sPragmaType, i32, i32, %struct.AggInfo*, i32, i32, %struct.Select*, %struct.Table*, i32 }
- %struct.ExprList = type { i32, i32, i32, %struct.ExprList_item* }
- %struct.ExprList_item = type { %struct.Expr*, i8*, i8, i8, i8 }
- %struct.FKey = type { %struct.Table*, %struct.FKey*, i8*, %struct.FKey*, i32, %struct.sColMap*, i8, i8, i8, i8 }
- %struct.Fifo = type { i32, %struct.FifoPage*, %struct.FifoPage* }
- %struct.FifoPage = type { i32, i32, i32, %struct.FifoPage*, [1 x i64] }
- %struct.FuncDef = type { i16, i8, i8, i8, i8*, %struct.FuncDef*, void (%struct.sqlite3_context*, i32, %struct.Mem**)*, void (%struct.sqlite3_context*, i32, %struct.Mem**)*, void (%struct.sqlite3_context*)*, [1 x i8] }
- %struct.Hash = type { i8, i8, i32, i32, %struct.HashElem*, %struct._ht* }
- %struct.HashElem = type { %struct.HashElem*, %struct.HashElem*, i8*, i8*, i32 }
- %struct.IdList = type { %struct..5sPragmaType*, i32, i32 }
- %struct.Index = type { i8*, i32, i32*, i32*, %struct.Table*, i32, i8, i8, i8*, %struct.Index*, %struct.Schema*, i8*, i8** }
- %struct.KeyInfo = type { %struct.sqlite3*, i8, i8, i8, i32, i8*, [1 x %struct.CollSeq*] }
- %struct.Mem = type { %struct.CountCtx, double, %struct.sqlite3*, i8*, i32, i16, i8, i8, void (i8*)* }
- %struct.MemPage = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, [5 x %struct._OvflCell], %struct.BtShared*, i8*, %struct.DbPage*, i32, %struct.MemPage* }
- %struct.Module = type { %struct.sqlite3_module*, i8*, i8*, void (i8*)* }
- %struct.Op = type { i8, i8, i8, i8, i32, i32, i32, %1 }
- %struct.Pager = type { %struct.sqlite3_vfs*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.Bitvec*, %struct.Bitvec*, i8*, i8*, i8*, i8*, %struct.sqlite3_file*, %struct.sqlite3_file*, %struct.sqlite3_file*, %struct.BusyHandler*, %struct.PagerLruList, %struct.DbPage*, %struct.DbPage*, %struct.DbPage*, i64, i64, i64, i64, i64, i32, void (%struct.DbPage*, i32)*, void (%struct.DbPage*, i32)*, i32, %struct.DbPage**, i8*, [16 x i8] }
- %struct.PagerLruLink = type { %struct.DbPage*, %struct.DbPage* }
- %struct.PagerLruList = type { %struct.DbPage*, %struct.DbPage*, %struct.DbPage* }
- %struct.Schema = type { i32, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Hash, %struct.Table*, i8, i8, i16, i32, %struct.sqlite3* }
- %struct.Select = type { %struct.ExprList*, i8, i8, i8, i8, i8, i8, i8, %struct.SrcList*, %struct.Expr*, %struct.ExprList*, %struct.Expr*, %struct.ExprList*, %struct.Select*, %struct.Select*, %struct.Select*, %struct.Expr*, %struct.Expr*, i32, i32, [3 x i32] }
- %struct.SrcList = type { i16, i16, [1 x %struct.SrcList_item] }
- %struct.SrcList_item = type { i8*, i8*, i8*, %struct.Table*, %struct.Select*, i8, i8, i32, %struct.Expr*, %struct.IdList*, i64 }
- %struct.Table = type { i8*, i32, %struct.Column*, i32, %struct.Index*, i32, %struct.Select*, i32, %struct.Trigger*, %struct.FKey*, i8*, %struct.Expr*, i32, i8, i8, i8, i8, i8, i8, i8, %struct.Module*, %struct.sqlite3_vtab*, i32, i8**, %struct.Schema* }
- %struct.Trigger = type { i8*, i8*, i8, i8, %struct.Expr*, %struct.IdList*, %struct..5sPragmaType, %struct.Schema*, %struct.Schema*, %struct.TriggerStep*, %struct.Trigger* }
- %struct.TriggerStep = type { i32, i32, %struct.Trigger*, %struct.Select*, %struct..5sPragmaType, %struct.Expr*, %struct.ExprList*, %struct.IdList*, %struct.TriggerStep*, %struct.TriggerStep* }
- %struct.Vdbe = type { %struct.sqlite3*, %struct.Vdbe*, %struct.Vdbe*, i32, i32, %struct.Op*, i32, i32, i32*, %struct.Mem**, %struct.Mem*, i32, %struct.Cursor**, i32, %struct.Mem*, i8**, i32, i32, i32, %struct.Mem*, i32, i32, %struct.Fifo, i32, i32, %struct.Context*, i32, i32, i32, i32, i32, [25 x i32], i32, i32, i8**, i8*, %struct.Mem*, i8, i8, i8, i8, i8, i8, i32, i64, i32, %struct.BtreeMutexArray, i32, i8*, i32 }
- %struct.VdbeFunc = type { %struct.FuncDef*, i32, [1 x %struct.AuxData] }
- %struct._OvflCell = type { i8*, i16 }
- %struct._ht = type { i32, %struct.HashElem* }
- %struct.sColMap = type { i32, i8* }
- %struct.sqlite3 = type { %struct.sqlite3_vfs*, i32, %struct.Db*, i32, i32, i32, i32, i8, i8, i8, i8, i32, %struct.CollSeq*, i64, i64, i32, i32, i32, %struct.sqlite3_mutex*, %struct.sqlite3InitInfo, i32, i8**, %struct.Vdbe*, i32, void (i8*, i8*)*, i8*, void (i8*, i8*, i64)*, i8*, i8*, i32 (i8*)*, i8*, void (i8*)*, i8*, void (i8*, i32, i8*, i8*, i64)*, void (i8*, %struct.sqlite3*, i32, i8*)*, void (i8*, %struct.sqlite3*, i32, i8*)*, i8*, %struct.Mem*, i8*, i8*, %2, i32 (i8*, i32, i8*, i8*, i8*, i8*)*, i8*, i32 (i8*)*, i8*, i32, %struct.Hash, %struct.Table*, %struct.sqlite3_vtab**, i32, %struct.Hash, %struct.Hash, %struct.BusyHandler, i32, [2 x %struct.Db], i8 }
- %struct.sqlite3InitInfo = type { i32, i32, i8 }
- %struct.sqlite3_context = type { %struct.FuncDef*, %struct.VdbeFunc*, %struct.Mem, %struct.Mem*, i32, %struct.CollSeq* }
- %struct.sqlite3_file = type { %struct.sqlite3_io_methods* }
- %struct.sqlite3_index_constraint = type { i32, i8, i8, i32 }
- %struct.sqlite3_index_constraint_usage = type { i32, i8 }
- %struct.sqlite3_index_info = type { i32, %struct.sqlite3_index_constraint*, i32, %struct.sqlite3_index_constraint_usage*, %struct.sqlite3_index_constraint_usage*, i32, i8*, i32, i32, double }
- %struct.sqlite3_io_methods = type { i32, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*, i8*, i32, i64)*, i32 (%struct.sqlite3_file*, i8*, i32, i64)*, i32 (%struct.sqlite3_file*, i64)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*, i64*)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*, i32)*, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*, i32, i8*)*, i32 (%struct.sqlite3_file*)*, i32 (%struct.sqlite3_file*)* }
- %struct.sqlite3_module = type { i32, i32 (%struct.sqlite3*, i8*, i32, i8**, %struct.sqlite3_vtab**, i8**)*, i32 (%struct.sqlite3*, i8*, i32, i8**, %struct.sqlite3_vtab**, i8**)*, i32 (%struct.sqlite3_vtab*, %struct.sqlite3_index_info*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*, %struct.sqlite3_vtab_cursor**)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*, i32, i8*, i32, %struct.Mem**)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*)*, i32 (%struct.sqlite3_vtab_cursor*, %struct.sqlite3_context*, i32)*, i32 (%struct.sqlite3_vtab_cursor*, i64*)*, i32 (%struct.sqlite3_vtab*, i32, %struct.Mem**, i64*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*)*, i32 (%struct.sqlite3_vtab*, i32, i8*, void (%struct.sqlite3_context*, i32, %struct.Mem**)**, i8**)*, i32 (%struct.sqlite3_vtab*, i8*)* }
- %struct.sqlite3_mutex = type opaque
- %struct.sqlite3_vfs = type { i32, i32, i32, %struct.sqlite3_vfs*, i8*, i8*, i32 (%struct.sqlite3_vfs*, i8*, %struct.sqlite3_file*, i32, i32*)*, i32 (%struct.sqlite3_vfs*, i8*, i32)*, i32 (%struct.sqlite3_vfs*, i8*, i32)*, i32 (%struct.sqlite3_vfs*, i32, i8*)*, i32 (%struct.sqlite3_vfs*, i8*, i32, i8*)*, i8* (%struct.sqlite3_vfs*, i8*)*, void (%struct.sqlite3_vfs*, i32, i8*)*, i8* (%struct.sqlite3_vfs*, i8*, i8*)*, void (%struct.sqlite3_vfs*, i8*)*, i32 (%struct.sqlite3_vfs*, i32, i8*)*, i32 (%struct.sqlite3_vfs*, i32)*, i32 (%struct.sqlite3_vfs*, double*)* }
- %struct.sqlite3_vtab = type { %struct.sqlite3_module*, i32, i8* }
- %struct.sqlite3_vtab_cursor = type { %struct.sqlite3_vtab* }
- at llvm.used = appending global [1 x i8*] [i8* bitcast (void (%struct.MemPage*, i32, i32)* @dropCell to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-
-define fastcc void @dropCell(%struct.MemPage* nocapture %pPage, i32 %idx, i32 %sz) nounwind ssp {
-entry:
- %0 = getelementptr %struct.MemPage* %pPage, i64 0, i32 18 ; <i8**> [#uses=1]
- %1 = load i8** %0, align 8 ; <i8*> [#uses=34]
- %2 = getelementptr %struct.MemPage* %pPage, i64 0, i32 12 ; <i16*> [#uses=1]
- %3 = load i16* %2, align 2 ; <i16> [#uses=1]
- %4 = zext i16 %3 to i32 ; <i32> [#uses=2]
- %5 = shl i32 %idx, 1 ; <i32> [#uses=2]
- %6 = add i32 %4, %5 ; <i32> [#uses=1]
- %7 = sext i32 %6 to i64 ; <i64> [#uses=2]
- %8 = getelementptr i8* %1, i64 %7 ; <i8*> [#uses=1]
- %9 = load i8* %8, align 1 ; <i8> [#uses=2]
- %10 = zext i8 %9 to i32 ; <i32> [#uses=1]
- %11 = shl i32 %10, 8 ; <i32> [#uses=1]
- %.sum3 = add i64 %7, 1 ; <i64> [#uses=1]
- %12 = getelementptr i8* %1, i64 %.sum3 ; <i8*> [#uses=1]
- %13 = load i8* %12, align 1 ; <i8> [#uses=2]
- %14 = zext i8 %13 to i32 ; <i32> [#uses=1]
- %15 = or i32 %11, %14 ; <i32> [#uses=3]
- %16 = icmp slt i32 %sz, 4 ; <i1> [#uses=1]
- %size_addr.0.i = select i1 %16, i32 4, i32 %sz ; <i32> [#uses=3]
- %17 = getelementptr %struct.MemPage* %pPage, i64 0, i32 8 ; <i8*> [#uses=5]
- %18 = load i8* %17, align 8 ; <i8> [#uses=1]
- %19 = zext i8 %18 to i32 ; <i32> [#uses=4]
- %20 = add i32 %19, 1 ; <i32> [#uses=2]
- br label %bb3.i
-
-bb3.i: ; preds = %bb3.i, %entry
- %addr.0.i = phi i32 [ %20, %entry ], [ %29, %bb3.i ] ; <i32> [#uses=1]
- %21 = sext i32 %addr.0.i to i64 ; <i64> [#uses=2]
- %22 = getelementptr i8* %1, i64 %21 ; <i8*> [#uses=2]
- %23 = load i8* %22, align 1 ; <i8> [#uses=2]
- %24 = zext i8 %23 to i32 ; <i32> [#uses=1]
- %25 = shl i32 %24, 8 ; <i32> [#uses=1]
- %.sum34.i = add i64 %21, 1 ; <i64> [#uses=1]
- %26 = getelementptr i8* %1, i64 %.sum34.i ; <i8*> [#uses=2]
- %27 = load i8* %26, align 1 ; <i8> [#uses=2]
- %28 = zext i8 %27 to i32 ; <i32> [#uses=1]
- %29 = or i32 %25, %28 ; <i32> [#uses=3]
- %.not.i = icmp uge i32 %29, %15 ; <i1> [#uses=1]
- %30 = icmp eq i32 %29, 0 ; <i1> [#uses=1]
- %or.cond.i = or i1 %30, %.not.i ; <i1> [#uses=1]
- br i1 %or.cond.i, label %bb5.i, label %bb3.i
-
-bb5.i: ; preds = %bb3.i
- store i8 %9, i8* %22, align 1
- store i8 %13, i8* %26, align 1
- %31 = zext i32 %15 to i64 ; <i64> [#uses=2]
- %32 = getelementptr i8* %1, i64 %31 ; <i8*> [#uses=1]
- store i8 %23, i8* %32, align 1
- %.sum32.i = add i64 %31, 1 ; <i64> [#uses=1]
- %33 = getelementptr i8* %1, i64 %.sum32.i ; <i8*> [#uses=1]
- store i8 %27, i8* %33, align 1
- %34 = add i32 %15, 2 ; <i32> [#uses=1]
- %35 = zext i32 %34 to i64 ; <i64> [#uses=2]
- %36 = getelementptr i8* %1, i64 %35 ; <i8*> [#uses=1]
- %37 = lshr i32 %size_addr.0.i, 8 ; <i32> [#uses=1]
- %38 = trunc i32 %37 to i8 ; <i8> [#uses=1]
- store i8 %38, i8* %36, align 1
- %39 = trunc i32 %size_addr.0.i to i8 ; <i8> [#uses=1]
- %.sum31.i = add i64 %35, 1 ; <i64> [#uses=1]
- %40 = getelementptr i8* %1, i64 %.sum31.i ; <i8*> [#uses=1]
- store i8 %39, i8* %40, align 1
- %41 = getelementptr %struct.MemPage* %pPage, i64 0, i32 14 ; <i16*> [#uses=4]
- %42 = load i16* %41, align 2 ; <i16> [#uses=1]
- %43 = trunc i32 %size_addr.0.i to i16 ; <i16> [#uses=1]
- %44 = add i16 %42, %43 ; <i16> [#uses=1]
- store i16 %44, i16* %41, align 2
- %45 = load i8* %17, align 8 ; <i8> [#uses=1]
- %46 = zext i8 %45 to i32 ; <i32> [#uses=1]
- %47 = add i32 %46, 1 ; <i32> [#uses=1]
- br label %bb11.outer.i
-
-bb11.outer.i: ; preds = %bb6.i, %bb5.i
- %addr.1.ph.i = phi i32 [ %47, %bb5.i ], [ %111, %bb6.i ] ; <i32> [#uses=1]
- %48 = sext i32 %addr.1.ph.i to i64 ; <i64> [#uses=2]
- %49 = getelementptr i8* %1, i64 %48 ; <i8*> [#uses=1]
- %.sum30.i = add i64 %48, 1 ; <i64> [#uses=1]
- %50 = getelementptr i8* %1, i64 %.sum30.i ; <i8*> [#uses=1]
- br label %bb11.i
-
-bb6.i: ; preds = %bb11.i
- %51 = zext i32 %111 to i64 ; <i64> [#uses=2]
- %52 = getelementptr i8* %1, i64 %51 ; <i8*> [#uses=2]
- %53 = load i8* %52, align 1 ; <i8> [#uses=1]
- %54 = zext i8 %53 to i32 ; <i32> [#uses=1]
- %55 = shl i32 %54, 8 ; <i32> [#uses=1]
- %.sum24.i = add i64 %51, 1 ; <i64> [#uses=1]
- %56 = getelementptr i8* %1, i64 %.sum24.i ; <i8*> [#uses=2]
- %57 = load i8* %56, align 1 ; <i8> [#uses=3]
- %58 = zext i8 %57 to i32 ; <i32> [#uses=1]
- %59 = or i32 %55, %58 ; <i32> [#uses=5]
- %60 = add i32 %111, 2 ; <i32> [#uses=1]
- %61 = zext i32 %60 to i64 ; <i64> [#uses=2]
- %62 = getelementptr i8* %1, i64 %61 ; <i8*> [#uses=2]
- %63 = load i8* %62, align 1 ; <i8> [#uses=1]
- %64 = zext i8 %63 to i32 ; <i32> [#uses=1]
- %65 = shl i32 %64, 8 ; <i32> [#uses=1]
- %.sum23.i = add i64 %61, 1 ; <i64> [#uses=1]
- %66 = getelementptr i8* %1, i64 %.sum23.i ; <i8*> [#uses=2]
- %67 = load i8* %66, align 1 ; <i8> [#uses=2]
- %68 = zext i8 %67 to i32 ; <i32> [#uses=1]
- %69 = or i32 %65, %68 ; <i32> [#uses=1]
- %70 = add i32 %111, 3 ; <i32> [#uses=1]
- %71 = add i32 %70, %69 ; <i32> [#uses=1]
- %72 = icmp sge i32 %71, %59 ; <i1> [#uses=1]
- %73 = icmp ne i32 %59, 0 ; <i1> [#uses=1]
- %74 = and i1 %72, %73 ; <i1> [#uses=1]
- br i1 %74, label %bb9.i, label %bb11.outer.i
-
-bb9.i: ; preds = %bb6.i
- %75 = load i8* %17, align 8 ; <i8> [#uses=1]
- %76 = zext i8 %75 to i32 ; <i32> [#uses=1]
- %77 = add i32 %76, 7 ; <i32> [#uses=1]
- %78 = zext i32 %77 to i64 ; <i64> [#uses=1]
- %79 = getelementptr i8* %1, i64 %78 ; <i8*> [#uses=2]
- %80 = load i8* %79, align 1 ; <i8> [#uses=1]
- %81 = sub i8 %109, %57 ; <i8> [#uses=1]
- %82 = add i8 %81, %67 ; <i8> [#uses=1]
- %83 = add i8 %82, %80 ; <i8> [#uses=1]
- store i8 %83, i8* %79, align 1
- %84 = zext i32 %59 to i64 ; <i64> [#uses=2]
- %85 = getelementptr i8* %1, i64 %84 ; <i8*> [#uses=1]
- %86 = load i8* %85, align 1 ; <i8> [#uses=1]
- store i8 %86, i8* %52, align 1
- %.sum22.i = add i64 %84, 1 ; <i64> [#uses=1]
- %87 = getelementptr i8* %1, i64 %.sum22.i ; <i8*> [#uses=1]
- %88 = load i8* %87, align 1 ; <i8> [#uses=1]
- store i8 %88, i8* %56, align 1
- %89 = add i32 %59, 2 ; <i32> [#uses=1]
- %90 = zext i32 %89 to i64 ; <i64> [#uses=2]
- %91 = getelementptr i8* %1, i64 %90 ; <i8*> [#uses=1]
- %92 = load i8* %91, align 1 ; <i8> [#uses=1]
- %93 = zext i8 %92 to i32 ; <i32> [#uses=1]
- %94 = shl i32 %93, 8 ; <i32> [#uses=1]
- %.sum20.i = add i64 %90, 1 ; <i64> [#uses=1]
- %95 = getelementptr i8* %1, i64 %.sum20.i ; <i8*> [#uses=2]
- %96 = load i8* %95, align 1 ; <i8> [#uses=1]
- %97 = zext i8 %96 to i32 ; <i32> [#uses=1]
- %98 = or i32 %94, %97 ; <i32> [#uses=1]
- %99 = sub i32 %59, %111 ; <i32> [#uses=1]
- %100 = add i32 %99, %98 ; <i32> [#uses=1]
- %101 = lshr i32 %100, 8 ; <i32> [#uses=1]
- %102 = trunc i32 %101 to i8 ; <i8> [#uses=1]
- store i8 %102, i8* %62, align 1
- %103 = load i8* %95, align 1 ; <i8> [#uses=1]
- %104 = sub i8 %57, %109 ; <i8> [#uses=1]
- %105 = add i8 %104, %103 ; <i8> [#uses=1]
- store i8 %105, i8* %66, align 1
- br label %bb11.i
-
-bb11.i: ; preds = %bb9.i, %bb11.outer.i
- %106 = load i8* %49, align 1 ; <i8> [#uses=1]
- %107 = zext i8 %106 to i32 ; <i32> [#uses=1]
- %108 = shl i32 %107, 8 ; <i32> [#uses=1]
- %109 = load i8* %50, align 1 ; <i8> [#uses=3]
- %110 = zext i8 %109 to i32 ; <i32> [#uses=1]
- %111 = or i32 %108, %110 ; <i32> [#uses=6]
- %112 = icmp eq i32 %111, 0 ; <i1> [#uses=1]
- br i1 %112, label %bb12.i, label %bb6.i
-
-bb12.i: ; preds = %bb11.i
- %113 = zext i32 %20 to i64 ; <i64> [#uses=2]
- %114 = getelementptr i8* %1, i64 %113 ; <i8*> [#uses=2]
- %115 = load i8* %114, align 1 ; <i8> [#uses=2]
- %116 = add i32 %19, 5 ; <i32> [#uses=1]
- %117 = zext i32 %116 to i64 ; <i64> [#uses=2]
- %118 = getelementptr i8* %1, i64 %117 ; <i8*> [#uses=3]
- %119 = load i8* %118, align 1 ; <i8> [#uses=1]
- %120 = icmp eq i8 %115, %119 ; <i1> [#uses=1]
- br i1 %120, label %bb13.i, label %bb1.preheader
-
-bb13.i: ; preds = %bb12.i
- %121 = add i32 %19, 2 ; <i32> [#uses=1]
- %122 = zext i32 %121 to i64 ; <i64> [#uses=1]
- %123 = getelementptr i8* %1, i64 %122 ; <i8*> [#uses=1]
- %124 = load i8* %123, align 1 ; <i8> [#uses=1]
- %125 = add i32 %19, 6 ; <i32> [#uses=1]
- %126 = zext i32 %125 to i64 ; <i64> [#uses=1]
- %127 = getelementptr i8* %1, i64 %126 ; <i8*> [#uses=1]
- %128 = load i8* %127, align 1 ; <i8> [#uses=1]
- %129 = icmp eq i8 %124, %128 ; <i1> [#uses=1]
- br i1 %129, label %bb14.i, label %bb1.preheader
-
-bb14.i: ; preds = %bb13.i
- %130 = zext i8 %115 to i32 ; <i32> [#uses=1]
- %131 = shl i32 %130, 8 ; <i32> [#uses=1]
- %.sum29.i = add i64 %113, 1 ; <i64> [#uses=1]
- %132 = getelementptr i8* %1, i64 %.sum29.i ; <i8*> [#uses=1]
- %133 = load i8* %132, align 1 ; <i8> [#uses=1]
- %134 = zext i8 %133 to i32 ; <i32> [#uses=1]
- %135 = or i32 %134, %131 ; <i32> [#uses=2]
- %136 = zext i32 %135 to i64 ; <i64> [#uses=1]
- %137 = getelementptr i8* %1, i64 %136 ; <i8*> [#uses=1]
- %138 = bitcast i8* %137 to i16* ; <i16*> [#uses=1]
- %139 = bitcast i8* %114 to i16* ; <i16*> [#uses=1]
- %tmp.i = load i16* %138, align 1 ; <i16> [#uses=1]
- store i16 %tmp.i, i16* %139, align 1
- %140 = load i8* %118, align 1 ; <i8> [#uses=1]
- %141 = zext i8 %140 to i32 ; <i32> [#uses=1]
- %142 = shl i32 %141, 8 ; <i32> [#uses=1]
- %.sum28.i = add i64 %117, 1 ; <i64> [#uses=1]
- %143 = getelementptr i8* %1, i64 %.sum28.i ; <i8*> [#uses=2]
- %144 = load i8* %143, align 1 ; <i8> [#uses=2]
- %145 = zext i8 %144 to i32 ; <i32> [#uses=1]
- %146 = or i32 %142, %145 ; <i32> [#uses=1]
- %147 = add i32 %135, 2 ; <i32> [#uses=1]
- %148 = zext i32 %147 to i64 ; <i64> [#uses=2]
- %149 = getelementptr i8* %1, i64 %148 ; <i8*> [#uses=1]
- %150 = load i8* %149, align 1 ; <i8> [#uses=1]
- %151 = zext i8 %150 to i32 ; <i32> [#uses=1]
- %152 = shl i32 %151, 8 ; <i32> [#uses=1]
- %.sum27.i = add i64 %148, 1 ; <i64> [#uses=1]
- %153 = getelementptr i8* %1, i64 %.sum27.i ; <i8*> [#uses=2]
- %154 = load i8* %153, align 1 ; <i8> [#uses=1]
- %155 = zext i8 %154 to i32 ; <i32> [#uses=1]
- %156 = or i32 %152, %155 ; <i32> [#uses=1]
- %157 = add i32 %156, %146 ; <i32> [#uses=1]
- %158 = lshr i32 %157, 8 ; <i32> [#uses=1]
- %159 = trunc i32 %158 to i8 ; <i8> [#uses=1]
- store i8 %159, i8* %118, align 1
- %160 = load i8* %153, align 1 ; <i8> [#uses=1]
- %161 = add i8 %160, %144 ; <i8> [#uses=1]
- store i8 %161, i8* %143, align 1
- br label %bb1.preheader
-
-bb1.preheader: ; preds = %bb14.i, %bb13.i, %bb12.i
- %i.08 = add i32 %idx, 1 ; <i32> [#uses=2]
- %162 = getelementptr %struct.MemPage* %pPage, i64 0, i32 15 ; <i16*> [#uses=4]
- %163 = load i16* %162, align 4 ; <i16> [#uses=2]
- %164 = zext i16 %163 to i32 ; <i32> [#uses=1]
- %165 = icmp sgt i32 %164, %i.08 ; <i1> [#uses=1]
- br i1 %165, label %bb, label %bb2
-
-bb: ; preds = %bb, %bb1.preheader
- %indvar = phi i64 [ 0, %bb1.preheader ], [ %indvar.next, %bb ] ; <i64> [#uses=3]
- %tmp16 = add i32 %5, %4 ; <i32> [#uses=1]
- %tmp.17 = sext i32 %tmp16 to i64 ; <i64> [#uses=1]
- %tmp19 = shl i64 %indvar, 1 ; <i64> [#uses=1]
- %ctg2.sum = add i64 %tmp.17, %tmp19 ; <i64> [#uses=4]
- %ctg229 = getelementptr i8* %1, i64 %ctg2.sum ; <i8*> [#uses=1]
- %ctg229.sum31 = add i64 %ctg2.sum, 2 ; <i64> [#uses=1]
- %166 = getelementptr i8* %1, i64 %ctg229.sum31 ; <i8*> [#uses=1]
- %167 = load i8* %166, align 1 ; <i8> [#uses=1]
- store i8 %167, i8* %ctg229
- %ctg229.sum30 = add i64 %ctg2.sum, 3 ; <i64> [#uses=1]
- %168 = getelementptr i8* %1, i64 %ctg229.sum30 ; <i8*> [#uses=1]
- %169 = load i8* %168, align 1 ; <i8> [#uses=1]
- %ctg229.sum = add i64 %ctg2.sum, 1 ; <i64> [#uses=1]
- %170 = getelementptr i8* %1, i64 %ctg229.sum ; <i8*> [#uses=1]
- store i8 %169, i8* %170, align 1
- %indvar15 = trunc i64 %indvar to i32 ; <i32> [#uses=1]
- %i.09 = add i32 %indvar15, %i.08 ; <i32> [#uses=1]
- %i.0 = add i32 %i.09, 1 ; <i32> [#uses=1]
- %171 = load i16* %162, align 4 ; <i16> [#uses=2]
- %172 = zext i16 %171 to i32 ; <i32> [#uses=1]
- %173 = icmp sgt i32 %172, %i.0 ; <i1> [#uses=1]
- %indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
- br i1 %173, label %bb, label %bb2
-
-bb2: ; preds = %bb, %bb1.preheader
- %174 = phi i16 [ %163, %bb1.preheader ], [ %171, %bb ] ; <i16> [#uses=1]
- %175 = add i16 %174, -1 ; <i16> [#uses=2]
- store i16 %175, i16* %162, align 4
- %176 = load i8* %17, align 8 ; <i8> [#uses=1]
- %177 = zext i8 %176 to i32 ; <i32> [#uses=1]
- %178 = add i32 %177, 3 ; <i32> [#uses=1]
- %179 = zext i32 %178 to i64 ; <i64> [#uses=1]
- %180 = getelementptr i8* %1, i64 %179 ; <i8*> [#uses=1]
- %181 = lshr i16 %175, 8 ; <i16> [#uses=1]
- %182 = trunc i16 %181 to i8 ; <i8> [#uses=1]
- store i8 %182, i8* %180, align 1
- %183 = load i8* %17, align 8 ; <i8> [#uses=1]
- %184 = zext i8 %183 to i32 ; <i32> [#uses=1]
- %185 = add i32 %184, 3 ; <i32> [#uses=1]
- %186 = zext i32 %185 to i64 ; <i64> [#uses=1]
- %187 = load i16* %162, align 4 ; <i16> [#uses=1]
- %188 = trunc i16 %187 to i8 ; <i8> [#uses=1]
- %.sum = add i64 %186, 1 ; <i64> [#uses=1]
- %189 = getelementptr i8* %1, i64 %.sum ; <i8*> [#uses=1]
- store i8 %188, i8* %189, align 1
- %190 = load i16* %41, align 2 ; <i16> [#uses=1]
- %191 = add i16 %190, 2 ; <i16> [#uses=1]
- store i16 %191, i16* %41, align 2
- %192 = getelementptr %struct.MemPage* %pPage, i64 0, i32 1 ; <i8*> [#uses=1]
- store i8 1, i8* %192, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/stdarg.ll b/libclamav/c++/llvm/test/CodeGen/X86/stdarg.ll
deleted file mode 100644
index 9778fa1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/stdarg.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {testb \[%\]al, \[%\]al}
-
-%struct.__va_list_tag = type { i32, i32, i8*, i8* }
-
-define void @foo(i32 %x, ...) nounwind {
-entry:
- %ap = alloca [1 x %struct.__va_list_tag], align 8; <[1 x %struct.__va_list_tag]*> [#uses=2]
- %ap12 = bitcast [1 x %struct.__va_list_tag]* %ap to i8*; <i8*> [#uses=2]
- call void @llvm.va_start(i8* %ap12)
- %ap3 = getelementptr inbounds [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0; <%struct.__va_list_tag*> [#uses=1]
- call void @bar(%struct.__va_list_tag* %ap3) nounwind
- call void @llvm.va_end(i8* %ap12)
- ret void
-}
-
-declare void @llvm.va_start(i8*) nounwind
-
-declare void @bar(%struct.__va_list_tag*)
-
-declare void @llvm.va_end(i8*) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/stdcall.ll b/libclamav/c++/llvm/test/CodeGen/X86/stdcall.ll
deleted file mode 100644
index 70204bc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/stdcall.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-; PR5851
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
-target triple = "i386-mingw32"
-
-%0 = type { void (...)* }
-
- at B = global %0 { void (...)* bitcast (void ()* @MyFunc to void (...)*) }, align 4
-; CHECK: _B:
-; CHECK: .long _MyFunc at 0
-
-define internal x86_stdcallcc void @MyFunc() nounwind {
-entry:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/store-empty-member.ll b/libclamav/c++/llvm/test/CodeGen/X86/store-empty-member.ll
deleted file mode 100644
index 37f86c6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/store-empty-member.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
-
-; Don't crash on an empty struct member.
-
-; CHECK: movl $2, 4(%esp)
-; CHECK: movl $1, (%esp)
-
-%testType = type {i32, [0 x i32], i32}
-
-define void @foo() nounwind {
- %1 = alloca %testType
- volatile store %testType {i32 1, [0 x i32] zeroinitializer, i32 2}, %testType* %1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/store-fp-constant.ll b/libclamav/c++/llvm/test/CodeGen/X86/store-fp-constant.ll
deleted file mode 100644
index 206886b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/store-fp-constant.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep rodata
-; RUN: llc < %s -march=x86 | not grep literal
-;
-; Check that no FP constants in this testcase ends up in the
-; constant pool.
-
- at G = external global float ; <float*> [#uses=1]
-
-declare void @extfloat(float)
-
-declare void @extdouble(double)
-
-define void @testfloatstore() {
- call void @extfloat( float 0x40934999A0000000 )
- call void @extdouble( double 0x409349A631F8A090 )
- store float 0x402A064C20000000, float* @G
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/store-global-address.ll b/libclamav/c++/llvm/test/CodeGen/X86/store-global-address.ll
deleted file mode 100644
index c8d4cbc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/store-global-address.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 | grep movl | count 1
-
- at dst = global i32 0 ; <i32*> [#uses=1]
- at ptr = global i32* null ; <i32**> [#uses=1]
-
-define void @test() {
- store i32* @dst, i32** @ptr
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/store_op_load_fold.ll b/libclamav/c++/llvm/test/CodeGen/X86/store_op_load_fold.ll
deleted file mode 100644
index 6e47eb3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/store_op_load_fold.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep mov
-;
-; Test the add and load are folded into the store instruction.
-
- at X = internal global i16 0 ; <i16*> [#uses=2]
-
-define void @foo() nounwind {
- %tmp.0 = load i16* @X ; <i16> [#uses=1]
- %tmp.3 = add i16 %tmp.0, 329 ; <i16> [#uses=1]
- store i16 %tmp.3, i16* @X
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/store_op_load_fold2.ll b/libclamav/c++/llvm/test/CodeGen/X86/store_op_load_fold2.ll
deleted file mode 100644
index 46e59e9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/store_op_load_fold2.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | FileCheck %s
-
-target datalayout = "e-p:32:32"
- %struct.Macroblock = type { i32, i32, i32, i32, i32, [8 x i32], %struct.Macroblock*, %struct.Macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], [16 x i8], [16 x i8], i32, i64, [4 x i32], [4 x i32], i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, double, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
-
-define internal fastcc i32 @dct_chroma(i32 %uv, i32 %cr_cbp) nounwind {
-cond_true2732.preheader: ; preds = %entry
- %tmp2666 = getelementptr %struct.Macroblock* null, i32 0, i32 13 ; <i64*> [#uses=2]
- %tmp2674 = trunc i32 0 to i8 ; <i8> [#uses=1]
- %tmp2667.us.us = load i64* %tmp2666 ; <i64> [#uses=1]
- %tmp2670.us.us = load i64* null ; <i64> [#uses=1]
- %shift.upgrd.1 = zext i8 %tmp2674 to i64 ; <i64> [#uses=1]
- %tmp2675.us.us = shl i64 %tmp2670.us.us, %shift.upgrd.1 ; <i64> [#uses=1]
- %tmp2675not.us.us = xor i64 %tmp2675.us.us, -1 ; <i64> [#uses=1]
- %tmp2676.us.us = and i64 %tmp2667.us.us, %tmp2675not.us.us ; <i64> [#uses=1]
- store i64 %tmp2676.us.us, i64* %tmp2666
- ret i32 0
-
-; CHECK: and {{E..}}, DWORD PTR [360]
-; CHECK: and DWORD PTR [356], {{E..}}
-; CHECK: mov DWORD PTR [360], {{E..}}
-
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/storetrunc-fp.ll b/libclamav/c++/llvm/test/CodeGen/X86/storetrunc-fp.ll
deleted file mode 100644
index 03ad093..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/storetrunc-fp.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep flds
-
-define void @foo(x86_fp80 %a, x86_fp80 %b, float* %fp) {
- %c = fadd x86_fp80 %a, %b
- %d = fptrunc x86_fp80 %c to float
- store float %d, float* %fp
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll b/libclamav/c++/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll
deleted file mode 100644
index f4847a3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -march=x86 -relocation-model=static | not grep lea
-; RUN: llc < %s -march=x86-64 | not grep lea
-
-; P should be sunk into the loop and folded into the address mode. There
-; shouldn't be any lea instructions inside the loop.
-
- at B = external global [1000 x i8], align 32
- at A = external global [1000 x i8], align 32
- at P = external global [1000 x i8], align 32
- at Q = external global [1000 x i8], align 32
-
-define void @foo(i32 %m, i32 %p) nounwind {
-entry:
- %tmp1 = icmp sgt i32 %m, 0
- br i1 %tmp1, label %bb, label %return
-
-bb:
- %i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
- %tmp2 = getelementptr [1000 x i8]* @B, i32 0, i32 %i.019.0
- %tmp3 = load i8* %tmp2, align 4
- %tmp4 = mul i8 %tmp3, 2
- %tmp5 = getelementptr [1000 x i8]* @A, i32 0, i32 %i.019.0
- store i8 %tmp4, i8* %tmp5, align 4
- %tmp8 = mul i32 %i.019.0, 9
- %tmp0 = add i32 %tmp8, %p
- %tmp10 = getelementptr [1000 x i8]* @P, i32 0, i32 %tmp0
- store i8 17, i8* %tmp10, align 4
- %tmp11 = getelementptr [1000 x i8]* @Q, i32 0, i32 %tmp0
- store i8 19, i8* %tmp11, align 4
- %indvar.next = add i32 %i.019.0, 1
- %exitcond = icmp eq i32 %indvar.next, %m
- br i1 %exitcond, label %return, label %bb
-
-return:
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/stride-reuse.ll b/libclamav/c++/llvm/test/CodeGen/X86/stride-reuse.ll
deleted file mode 100644
index 5cbd895..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/stride-reuse.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep lea
-; RUN: llc < %s -march=x86-64 | not grep lea
-
- at B = external global [1000 x float], align 32
- at A = external global [1000 x float], align 32
- at P = external global [1000 x i32], align 32
-
-define void @foo(i32 %m) nounwind {
-entry:
- %tmp1 = icmp sgt i32 %m, 0
- br i1 %tmp1, label %bb, label %return
-
-bb:
- %i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
- %tmp2 = getelementptr [1000 x float]* @B, i32 0, i32 %i.019.0
- %tmp3 = load float* %tmp2, align 4
- %tmp4 = fmul float %tmp3, 2.000000e+00
- %tmp5 = getelementptr [1000 x float]* @A, i32 0, i32 %i.019.0
- store float %tmp4, float* %tmp5, align 4
- %tmp8 = shl i32 %i.019.0, 1
- %tmp9 = add i32 %tmp8, 64
- %tmp10 = getelementptr [1000 x i32]* @P, i32 0, i32 %i.019.0
- store i32 %tmp9, i32* %tmp10, align 4
- %indvar.next = add i32 %i.019.0, 1
- %exitcond = icmp eq i32 %indvar.next, %m
- br i1 %exitcond, label %return, label %bb
-
-return:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/sub-with-overflow.ll b/libclamav/c++/llvm/test/CodeGen/X86/sub-with-overflow.ll
deleted file mode 100644
index 19f4079..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/sub-with-overflow.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {jo} | count 1
-; RUN: llc < %s -march=x86 | grep {jb} | count 1
-
- at ok = internal constant [4 x i8] c"%d\0A\00"
- at no = internal constant [4 x i8] c"no\0A\00"
-
-define i1 @func1(i32 %v1, i32 %v2) nounwind {
-entry:
- %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
- %sum = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %overflow, label %normal
-
-normal:
- %t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum ) nounwind
- ret i1 true
-
-overflow:
- %t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
- ret i1 false
-}
-
-define i1 @func2(i32 %v1, i32 %v2) nounwind {
-entry:
- %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
- %sum = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %carry, label %normal
-
-normal:
- %t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum ) nounwind
- ret i1 true
-
-carry:
- %t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
- ret i1 false
-}
-
-declare i32 @printf(i8*, ...) nounwind
-declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32)
-declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-0.ll
deleted file mode 100644
index d718c85..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-0.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep mov | count 1
-
-; Do eliminate the zero-extension instruction and rely on
-; x86-64's implicit zero-extension!
-
-define i64 @foo(i32* %p) nounwind {
- %t = load i32* %p
- %n = add i32 %t, 1
- %z = zext i32 %n to i64
- ret i64 %z
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-1.ll
deleted file mode 100644
index a297728..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-1.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {leal .*), %e.\*} | count 1
-
-; Don't eliminate or coalesce away the explicit zero-extension!
-; This is currently using an leal because of a 3-addressification detail,
-; though this isn't necessary; The point of this test is to make sure
-; a 32-bit add is used.
-
-define i64 @foo(i64 %a) nounwind {
- %b = add i64 %a, 4294967295
- %c = and i64 %b, 4294967295
- %d = add i64 %c, 1
- ret i64 %d
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-2.ll
deleted file mode 100644
index 49d2e88..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-2.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | grep movl
-; rdar://6707985
-
- %XXOO = type { %"struct.XXC::XXCC", i8*, %"struct.XXC::XXOO::$_71" }
- %XXValue = type opaque
- %"struct.XXC::ArrayStorage" = type { i32, i32, i32, i8*, i8*, [1 x %XXValue*] }
- %"struct.XXC::XXArray" = type { %XXOO, i32, %"struct.XXC::ArrayStorage"* }
- %"struct.XXC::XXCC" = type { i32 (...)**, i8* }
- %"struct.XXC::XXOO::$_71" = type { [2 x %XXValue*] }
-
-define internal fastcc %XXValue* @t(i64* %out, %"struct.XXC::ArrayStorage"* %tmp9) nounwind {
-prologue:
- %array = load %XXValue** inttoptr (i64 11111111 to %XXValue**) ; <%XXValue*> [#uses=0]
- %index = load %XXValue** inttoptr (i64 22222222 to %XXValue**) ; <%XXValue*> [#uses=1]
- %tmp = ptrtoint %XXValue* %index to i64 ; <i64> [#uses=2]
- store i64 %tmp, i64* %out
- %tmp6 = trunc i64 %tmp to i32 ; <i32> [#uses=1]
- br label %bb5
-
-bb5: ; preds = %prologue
- %tmp10 = zext i32 %tmp6 to i64 ; <i64> [#uses=1]
- %tmp11 = getelementptr %"struct.XXC::ArrayStorage"* %tmp9, i64 0, i32 5, i64 %tmp10 ; <%XXValue**> [#uses=1]
- %tmp12 = load %XXValue** %tmp11, align 8 ; <%XXValue*> [#uses=1]
- ret %XXValue* %tmp12
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-3.ll
deleted file mode 100644
index 931ae75..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-3.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep imull
-
-; Don't eliminate or coalesce away the explicit zero-extension!
-
-define i64 @foo(i64 %a) {
- %b = mul i64 %a, 7823
- %c = and i64 %b, 4294967295
- %d = add i64 %c, 1
- ret i64 %d
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-4.ll
deleted file mode 100644
index 0ea5541..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-4.ll
+++ /dev/null
@@ -1,135 +0,0 @@
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: not grep leaq %t
-; RUN: not grep incq %t
-; RUN: not grep decq %t
-; RUN: not grep negq %t
-; RUN: not grep addq %t
-; RUN: not grep subq %t
-; RUN: not grep {movl %} %t
-
-; Utilize implicit zero-extension on x86-64 to eliminate explicit
-; zero-extensions. Shrink 64-bit adds to 32-bit when the high
-; 32-bits will be zeroed.
-
-define void @bar(i64 %x, i64 %y, i64* %z) nounwind readnone {
-entry:
- %t0 = add i64 %x, %y
- %t1 = and i64 %t0, 4294967295
- store i64 %t1, i64* %z
- ret void
-}
-define void @easy(i32 %x, i32 %y, i64* %z) nounwind readnone {
-entry:
- %t0 = add i32 %x, %y
- %tn = zext i32 %t0 to i64
- %t1 = and i64 %tn, 4294967295
- store i64 %t1, i64* %z
- ret void
-}
-define void @cola(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
-entry:
- %p = load i64* %x
- %t0 = add i64 %p, %y
- %t1 = and i64 %t0, 4294967295
- %t2 = xor i64 %t1, %u
- store i64 %t2, i64* %z
- ret void
-}
-define void @yaks(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
-entry:
- %p = load i64* %x
- %t0 = add i64 %p, %y
- %t1 = xor i64 %t0, %u
- %t2 = and i64 %t1, 4294967295
- store i64 %t2, i64* %z
- ret void
-}
-define void @foo(i64 *%x, i64 *%y, i64* %z) nounwind readnone {
-entry:
- %a = load i64* %x
- %b = load i64* %y
- %t0 = add i64 %a, %b
- %t1 = and i64 %t0, 4294967295
- store i64 %t1, i64* %z
- ret void
-}
-define void @avo(i64 %x, i64* %z, i64 %u) nounwind readnone {
-entry:
- %t0 = add i64 %x, 734847
- %t1 = and i64 %t0, 4294967295
- %t2 = xor i64 %t1, %u
- store i64 %t2, i64* %z
- ret void
-}
-define void @phe(i64 %x, i64* %z, i64 %u) nounwind readnone {
-entry:
- %t0 = add i64 %x, 734847
- %t1 = xor i64 %t0, %u
- %t2 = and i64 %t1, 4294967295
- store i64 %t2, i64* %z
- ret void
-}
-define void @oze(i64 %y, i64* %z) nounwind readnone {
-entry:
- %t0 = add i64 %y, 1
- %t1 = and i64 %t0, 4294967295
- store i64 %t1, i64* %z
- ret void
-}
-
-define void @sbar(i64 %x, i64 %y, i64* %z) nounwind readnone {
-entry:
- %t0 = sub i64 %x, %y
- %t1 = and i64 %t0, 4294967295
- store i64 %t1, i64* %z
- ret void
-}
-define void @seasy(i32 %x, i32 %y, i64* %z) nounwind readnone {
-entry:
- %t0 = sub i32 %x, %y
- %tn = zext i32 %t0 to i64
- %t1 = and i64 %tn, 4294967295
- store i64 %t1, i64* %z
- ret void
-}
-define void @scola(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
-entry:
- %p = load i64* %x
- %t0 = sub i64 %p, %y
- %t1 = and i64 %t0, 4294967295
- %t2 = xor i64 %t1, %u
- store i64 %t2, i64* %z
- ret void
-}
-define void @syaks(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
-entry:
- %p = load i64* %x
- %t0 = sub i64 %p, %y
- %t1 = xor i64 %t0, %u
- %t2 = and i64 %t1, 4294967295
- store i64 %t2, i64* %z
- ret void
-}
-define void @sfoo(i64 *%x, i64 *%y, i64* %z) nounwind readnone {
-entry:
- %a = load i64* %x
- %b = load i64* %y
- %t0 = sub i64 %a, %b
- %t1 = and i64 %t0, 4294967295
- store i64 %t1, i64* %z
- ret void
-}
-define void @swya(i64 %y, i64* %z) nounwind readnone {
-entry:
- %t0 = sub i64 0, %y
- %t1 = and i64 %t0, 4294967295
- store i64 %t1, i64* %z
- ret void
-}
-define void @soze(i64 %y, i64* %z) nounwind readnone {
-entry:
- %t0 = sub i64 %y, 1
- %t1 = and i64 %t0, 4294967295
- store i64 %t1, i64* %z
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-6.ll b/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-6.ll
deleted file mode 100644
index 76430cd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/subreg-to-reg-6.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
-define i64 @foo() nounwind {
-entry:
- %t0 = load i32* null, align 8
- switch i32 %t0, label %bb65 [
- i32 16, label %bb
- i32 12, label %bb56
- ]
-
-bb:
- br label %bb65
-
-bb56:
- unreachable
-
-bb65:
- %a = phi i64 [ 0, %bb ], [ 0, %entry ]
- tail call void asm "", "{cx}"(i64 %a) nounwind
- %t15 = and i64 %a, 4294967295
- ret i64 %t15
-}
-
-define i64 @bar(i64 %t0) nounwind {
- call void asm "", "{cx}"(i64 0) nounwind
- %t1 = sub i64 0, %t0
- %t2 = and i64 %t1, 4294967295
- ret i64 %t2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/switch-crit-edge-constant.ll b/libclamav/c++/llvm/test/CodeGen/X86/switch-crit-edge-constant.ll
deleted file mode 100644
index 1f2ab0d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/switch-crit-edge-constant.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; PR925
-; RUN: llc < %s -march=x86 | \
-; RUN: grep mov.*str1 | count 1
-
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin8.7.2"
- at str1 = internal constant [5 x i8] c"bonk\00" ; <[5 x i8]*> [#uses=1]
- at str2 = internal constant [5 x i8] c"bork\00" ; <[5 x i8]*> [#uses=1]
- at str = internal constant [8 x i8] c"perfwap\00" ; <[8 x i8]*> [#uses=1]
-
-define void @foo(i32 %C) {
-entry:
- switch i32 %C, label %bb2 [
- i32 1, label %blahaha
- i32 2, label %blahaha
- i32 3, label %blahaha
- i32 4, label %blahaha
- i32 5, label %blahaha
- i32 6, label %blahaha
- i32 7, label %blahaha
- i32 8, label %blahaha
- i32 9, label %blahaha
- i32 10, label %blahaha
- ]
-
-bb2: ; preds = %entry
- %tmp5 = and i32 %C, 123 ; <i32> [#uses=1]
- %tmp = icmp eq i32 %tmp5, 0 ; <i1> [#uses=1]
- br i1 %tmp, label %blahaha, label %cond_true
-
-cond_true: ; preds = %bb2
- br label %blahaha
-
-blahaha: ; preds = %cond_true, %bb2, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry
- %s.0 = phi i8* [ getelementptr ([8 x i8]* @str, i32 0, i64 0), %cond_true ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str1, i32 0, i64 0), %entry ], [ getelementptr ([5 x i8]* @str2, i32 0, i64 0), %bb2 ] ; <i8*> [#uses=13]
- %tmp8 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp10 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp12 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp14 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp16 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp18 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp20 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp22 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp24 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp26 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp28 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp30 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- %tmp32 = tail call i32 (i8*, ...)* @printf( i8* %s.0 ) ; <i32> [#uses=0]
- ret void
-}
-
-declare i32 @printf(i8*, ...)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/switch-zextload.ll b/libclamav/c++/llvm/test/CodeGen/X86/switch-zextload.ll
deleted file mode 100644
index 55425bc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/switch-zextload.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 1
-
-; Do zextload, instead of a load and a separate zext.
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin9.6"
- %struct.move_s = type { i32, i32, i32, i32, i32, i32 }
- %struct.node_t = type { i8, i8, i8, i8, i32, i32, %struct.node_t**, %struct.node_t*, %struct.move_s }
-
-define fastcc void @set_proof_and_disproof_numbers(%struct.node_t* nocapture %node) nounwind {
-entry:
- %0 = load i8* null, align 1 ; <i8> [#uses=1]
- switch i8 %0, label %return [
- i8 2, label %bb31
- i8 0, label %bb80
- i8 1, label %bb82
- i8 3, label %bb84
- ]
-
-bb31: ; preds = %entry
- unreachable
-
-bb80: ; preds = %entry
- ret void
-
-bb82: ; preds = %entry
- ret void
-
-bb84: ; preds = %entry
- ret void
-
-return: ; preds = %entry
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/swizzle.ll b/libclamav/c++/llvm/test/CodeGen/X86/swizzle.ll
deleted file mode 100644
index 23e0c24..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/swizzle.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movlps
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep movups
-; rdar://6523650
-
- %struct.vector4_t = type { <4 x float> }
-
-define void @swizzle(i8* nocapture %a, %struct.vector4_t* nocapture %b, %struct.vector4_t* nocapture %c) nounwind {
-entry:
- %0 = getelementptr %struct.vector4_t* %b, i32 0, i32 0 ; <<4 x float>*> [#uses=2]
- %1 = load <4 x float>* %0, align 4 ; <<4 x float>> [#uses=1]
- %tmp.i = bitcast i8* %a to double* ; <double*> [#uses=1]
- %tmp1.i = load double* %tmp.i ; <double> [#uses=1]
- %2 = insertelement <2 x double> undef, double %tmp1.i, i32 0 ; <<2 x double>> [#uses=1]
- %tmp2.i = bitcast <2 x double> %2 to <4 x float> ; <<4 x float>> [#uses=1]
- %3 = shufflevector <4 x float> %1, <4 x float> %tmp2.i, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x float>> [#uses=1]
- store <4 x float> %3, <4 x float>* %0, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tail-opts.ll b/libclamav/c++/llvm/test/CodeGen/X86/tail-opts.ll
deleted file mode 100644
index 7b21e1b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tail-opts.ll
+++ /dev/null
@@ -1,408 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -asm-verbose=false -post-RA-scheduler=true | FileCheck %s
-
-declare void @bar(i32)
-declare void @car(i32)
-declare void @dar(i32)
-declare void @ear(i32)
-declare void @far(i32)
-declare i1 @qux()
-
- at GHJK = global i32 0
- at HABC = global i32 0
-
-; BranchFolding should tail-merge the stores since they all precede
-; direct branches to the same place.
-
-; CHECK: tail_merge_me:
-; CHECK-NOT: GHJK
-; CHECK: movl $0, GHJK(%rip)
-; CHECK-NEXT: movl $1, HABC(%rip)
-; CHECK-NOT: GHJK
-
-define void @tail_merge_me() nounwind {
-entry:
- %a = call i1 @qux()
- br i1 %a, label %A, label %next
-next:
- %b = call i1 @qux()
- br i1 %b, label %B, label %C
-
-A:
- call void @bar(i32 0)
- store i32 0, i32* @GHJK
- br label %M
-
-B:
- call void @car(i32 1)
- store i32 0, i32* @GHJK
- br label %M
-
-C:
- call void @dar(i32 2)
- store i32 0, i32* @GHJK
- br label %M
-
-M:
- store i32 1, i32* @HABC
- %c = call i1 @qux()
- br i1 %c, label %return, label %altret
-
-return:
- call void @ear(i32 1000)
- ret void
-altret:
- call void @far(i32 1001)
- ret void
-}
-
-declare i8* @choose(i8*, i8*)
-
-; BranchFolding should tail-duplicate the indirect jump to avoid
-; redundant branching.
-
-; CHECK: tail_duplicate_me:
-; CHECK: movl $0, GHJK(%rip)
-; CHECK-NEXT: jmpq *%rbx
-; CHECK: movl $0, GHJK(%rip)
-; CHECK-NEXT: jmpq *%rbx
-; CHECK: movl $0, GHJK(%rip)
-; CHECK-NEXT: jmpq *%rbx
-
-define void @tail_duplicate_me() nounwind {
-entry:
- %a = call i1 @qux()
- %c = call i8* @choose(i8* blockaddress(@tail_duplicate_me, %return),
- i8* blockaddress(@tail_duplicate_me, %altret))
- br i1 %a, label %A, label %next
-next:
- %b = call i1 @qux()
- br i1 %b, label %B, label %C
-
-A:
- call void @bar(i32 0)
- store i32 0, i32* @GHJK
- br label %M
-
-B:
- call void @car(i32 1)
- store i32 0, i32* @GHJK
- br label %M
-
-C:
- call void @dar(i32 2)
- store i32 0, i32* @GHJK
- br label %M
-
-M:
- indirectbr i8* %c, [label %return, label %altret]
-
-return:
- call void @ear(i32 1000)
- ret void
-altret:
- call void @far(i32 1001)
- ret void
-}
-
-; BranchFolding shouldn't try to merge the tails of two blocks
-; with only a branch in common, regardless of the fallthrough situation.
-
-; CHECK: dont_merge_oddly:
-; CHECK-NOT: ret
-; CHECK: ucomiss %xmm1, %xmm2
-; CHECK-NEXT: jbe .LBB3_3
-; CHECK-NEXT: ucomiss %xmm0, %xmm1
-; CHECK-NEXT: ja .LBB3_4
-; CHECK-NEXT: .LBB3_2:
-; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB3_3:
-; CHECK-NEXT: ucomiss %xmm0, %xmm2
-; CHECK-NEXT: jbe .LBB3_2
-; CHECK-NEXT: .LBB3_4:
-; CHECK-NEXT: xorb %al, %al
-; CHECK-NEXT: ret
-
-define i1 @dont_merge_oddly(float* %result) nounwind {
-entry:
- %tmp4 = getelementptr float* %result, i32 2
- %tmp5 = load float* %tmp4, align 4
- %tmp7 = getelementptr float* %result, i32 4
- %tmp8 = load float* %tmp7, align 4
- %tmp10 = getelementptr float* %result, i32 6
- %tmp11 = load float* %tmp10, align 4
- %tmp12 = fcmp olt float %tmp8, %tmp11
- br i1 %tmp12, label %bb, label %bb21
-
-bb:
- %tmp23469 = fcmp olt float %tmp5, %tmp8
- br i1 %tmp23469, label %bb26, label %bb30
-
-bb21:
- %tmp23 = fcmp olt float %tmp5, %tmp11
- br i1 %tmp23, label %bb26, label %bb30
-
-bb26:
- ret i1 0
-
-bb30:
- ret i1 1
-}
-
-; Do any-size tail-merging when two candidate blocks will both require
-; an unconditional jump to complete a two-way conditional branch.
-
-; CHECK: c_expand_expr_stmt:
-; CHECK: jmp .LBB4_7
-; CHECK-NEXT: .LBB4_12:
-; CHECK-NEXT: movq 8(%rax), %rax
-; CHECK-NEXT: movb 16(%rax), %al
-; CHECK-NEXT: cmpb $16, %al
-; CHECK-NEXT: je .LBB4_6
-; CHECK-NEXT: cmpb $23, %al
-; CHECK-NEXT: je .LBB4_6
-; CHECK-NEXT: jmp .LBB4_15
-; CHECK-NEXT: .LBB4_14:
-; CHECK-NEXT: cmpb $23, %bl
-; CHECK-NEXT: jne .LBB4_15
-; CHECK-NEXT: .LBB4_15:
-
-%0 = type { %struct.rtx_def* }
-%struct.lang_decl = type opaque
-%struct.rtx_def = type { i16, i8, i8, [1 x %union.rtunion] }
-%struct.tree_decl = type { [24 x i8], i8*, i32, %union.tree_node*, i32, i8, i8, i8, i8, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %struct.rtx_def*, %union..2anon, %0, %union.tree_node*, %struct.lang_decl* }
-%union..2anon = type { i32 }
-%union.rtunion = type { i8* }
-%union.tree_node = type { %struct.tree_decl }
-
-define fastcc void @c_expand_expr_stmt(%union.tree_node* %expr) nounwind {
-entry:
- %tmp4 = load i8* null, align 8 ; <i8> [#uses=3]
- switch i8 %tmp4, label %bb3 [
- i8 18, label %bb
- ]
-
-bb: ; preds = %entry
- switch i32 undef, label %bb1 [
- i32 0, label %bb2.i
- i32 37, label %bb.i
- ]
-
-bb.i: ; preds = %bb
- switch i32 undef, label %bb1 [
- i32 0, label %lvalue_p.exit
- ]
-
-bb2.i: ; preds = %bb
- br label %bb3
-
-lvalue_p.exit: ; preds = %bb.i
- %tmp21 = load %union.tree_node** null, align 8 ; <%union.tree_node*> [#uses=3]
- %tmp22 = getelementptr inbounds %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 0 ; <i8*> [#uses=1]
- %tmp23 = load i8* %tmp22, align 8 ; <i8> [#uses=1]
- %tmp24 = zext i8 %tmp23 to i32 ; <i32> [#uses=1]
- switch i32 %tmp24, label %lvalue_p.exit4 [
- i32 0, label %bb2.i3
- i32 2, label %bb.i1
- ]
-
-bb.i1: ; preds = %lvalue_p.exit
- %tmp25 = getelementptr inbounds %union.tree_node* %tmp21, i64 0, i32 0, i32 2 ; <i32*> [#uses=1]
- %tmp26 = bitcast i32* %tmp25 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
- %tmp27 = load %union.tree_node** %tmp26, align 8 ; <%union.tree_node*> [#uses=2]
- %tmp28 = getelementptr inbounds %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
- %tmp29 = load i8* %tmp28, align 8 ; <i8> [#uses=1]
- %tmp30 = zext i8 %tmp29 to i32 ; <i32> [#uses=1]
- switch i32 %tmp30, label %lvalue_p.exit4 [
- i32 0, label %bb2.i.i2
- i32 2, label %bb.i.i
- ]
-
-bb.i.i: ; preds = %bb.i1
- %tmp34 = tail call fastcc i32 @lvalue_p(%union.tree_node* null) nounwind ; <i32> [#uses=1]
- %phitmp = icmp ne i32 %tmp34, 0 ; <i1> [#uses=1]
- br label %lvalue_p.exit4
-
-bb2.i.i2: ; preds = %bb.i1
- %tmp35 = getelementptr inbounds %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
- %tmp36 = bitcast i8* %tmp35 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
- %tmp37 = load %union.tree_node** %tmp36, align 8 ; <%union.tree_node*> [#uses=1]
- %tmp38 = getelementptr inbounds %union.tree_node* %tmp37, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
- %tmp39 = load i8* %tmp38, align 8 ; <i8> [#uses=1]
- switch i8 %tmp39, label %bb2 [
- i8 16, label %lvalue_p.exit4
- i8 23, label %lvalue_p.exit4
- ]
-
-bb2.i3: ; preds = %lvalue_p.exit
- %tmp40 = getelementptr inbounds %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
- %tmp41 = bitcast i8* %tmp40 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
- %tmp42 = load %union.tree_node** %tmp41, align 8 ; <%union.tree_node*> [#uses=1]
- %tmp43 = getelementptr inbounds %union.tree_node* %tmp42, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
- %tmp44 = load i8* %tmp43, align 8 ; <i8> [#uses=1]
- switch i8 %tmp44, label %bb2 [
- i8 16, label %lvalue_p.exit4
- i8 23, label %lvalue_p.exit4
- ]
-
-lvalue_p.exit4: ; preds = %bb2.i3, %bb2.i3, %bb2.i.i2, %bb2.i.i2, %bb.i.i, %bb.i1, %lvalue_p.exit
- %tmp45 = phi i1 [ %phitmp, %bb.i.i ], [ false, %bb2.i.i2 ], [ false, %bb2.i.i2 ], [ false, %bb.i1 ], [ false, %bb2.i3 ], [ false, %bb2.i3 ], [ false, %lvalue_p.exit ] ; <i1> [#uses=1]
- %tmp46 = icmp eq i8 %tmp4, 0 ; <i1> [#uses=1]
- %or.cond = or i1 %tmp45, %tmp46 ; <i1> [#uses=1]
- br i1 %or.cond, label %bb2, label %bb3
-
-bb1: ; preds = %bb2.i.i, %bb.i, %bb
- %.old = icmp eq i8 %tmp4, 23 ; <i1> [#uses=1]
- br i1 %.old, label %bb2, label %bb3
-
-bb2: ; preds = %bb1, %lvalue_p.exit4, %bb2.i3, %bb2.i.i2
- br label %bb3
-
-bb3: ; preds = %bb2, %bb1, %lvalue_p.exit4, %bb2.i, %entry
- %expr_addr.0 = phi %union.tree_node* [ null, %bb2 ], [ %expr, %bb2.i ], [ %expr, %entry ], [ %expr, %bb1 ], [ %expr, %lvalue_p.exit4 ] ; <%union.tree_node*> [#uses=0]
- unreachable
-}
-
-declare fastcc i32 @lvalue_p(%union.tree_node* nocapture) nounwind readonly
-
-declare fastcc %union.tree_node* @default_conversion(%union.tree_node*) nounwind
-
-
-; If one tail merging candidate falls through into the other,
-; tail merging is likely profitable regardless of how few
-; instructions are involved. This function should have only
-; one ret instruction.
-
-; CHECK: foo:
-; CHECK: callq func
-; CHECK-NEXT: .LBB5_2:
-; CHECK-NEXT: addq $8, %rsp
-; CHECK-NEXT: ret
-
-define void @foo(i1* %V) nounwind {
-entry:
- %t0 = icmp eq i1* %V, null
- br i1 %t0, label %return, label %bb
-
-bb:
- call void @func()
- ret void
-
-return:
- ret void
-}
-
-declare void @func()
-
-; one - One instruction may be tail-duplicated even with optsize.
-
-; CHECK: one:
-; CHECK: movl $0, XYZ(%rip)
-; CHECK: movl $0, XYZ(%rip)
-
- at XYZ = external global i32
-
-define void @one() nounwind optsize {
-entry:
- %0 = icmp eq i32 undef, 0
- br i1 %0, label %bbx, label %bby
-
-bby:
- switch i32 undef, label %bb7 [
- i32 16, label %return
- ]
-
-bb7:
- volatile store i32 0, i32* @XYZ
- unreachable
-
-bbx:
- switch i32 undef, label %bb12 [
- i32 128, label %return
- ]
-
-bb12:
- volatile store i32 0, i32* @XYZ
- unreachable
-
-return:
- ret void
-}
-
-; two - Same as one, but with two instructions in the common
-; tail instead of one. This is too much to be merged, given
-; the optsize attribute.
-
-; CHECK: two:
-; CHECK-NOT: XYZ
-; CHECK: movl $0, XYZ(%rip)
-; CHECK: movl $1, XYZ(%rip)
-; CHECK-NOT: XYZ
-; CHECK: ret
-
-define void @two() nounwind optsize {
-entry:
- %0 = icmp eq i32 undef, 0
- br i1 %0, label %bbx, label %bby
-
-bby:
- switch i32 undef, label %bb7 [
- i32 16, label %return
- ]
-
-bb7:
- volatile store i32 0, i32* @XYZ
- volatile store i32 1, i32* @XYZ
- unreachable
-
-bbx:
- switch i32 undef, label %bb12 [
- i32 128, label %return
- ]
-
-bb12:
- volatile store i32 0, i32* @XYZ
- volatile store i32 1, i32* @XYZ
- unreachable
-
-return:
- ret void
-}
-
-; two_nosize - Same as two, but without the optsize attribute.
-; Now two instructions are enough to be tail-duplicated.
-
-; CHECK: two_nosize:
-; CHECK: movl $0, XYZ(%rip)
-; CHECK: movl $1, XYZ(%rip)
-; CHECK: movl $0, XYZ(%rip)
-; CHECK: movl $1, XYZ(%rip)
-
-define void @two_nosize() nounwind {
-entry:
- %0 = icmp eq i32 undef, 0
- br i1 %0, label %bbx, label %bby
-
-bby:
- switch i32 undef, label %bb7 [
- i32 16, label %return
- ]
-
-bb7:
- volatile store i32 0, i32* @XYZ
- volatile store i32 1, i32* @XYZ
- unreachable
-
-bbx:
- switch i32 undef, label %bb12 [
- i32 128, label %return
- ]
-
-bb12:
- volatile store i32 0, i32* @XYZ
- volatile store i32 1, i32* @XYZ
- unreachable
-
-return:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-fastisel.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcall-fastisel.ll
deleted file mode 100644
index d54fb41..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-fastisel.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86-64 -tailcallopt -fast-isel | grep TAILCALL
-
-; Fast-isel shouldn't attempt to handle this tail call, and it should
-; cleanly terminate instruction selection in the block after it's
-; done to avoid emitting invalid MachineInstrs.
-
-%0 = type { i64, i32, i8* }
-
-define fastcc i8* @"visit_array_aux<`Reference>"(%0 %arg, i32 %arg1) nounwind {
-fail: ; preds = %entry
- %tmp20 = tail call fastcc i8* @"visit_array_aux<`Reference>"(%0 %arg, i32 undef) ; <i8*> [#uses=1]
- ret i8* %tmp20
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-i1.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcall-i1.ll
deleted file mode 100644
index 8ef1f11..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-i1.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86 -tailcallopt | grep TAILCALL
-define fastcc i1 @i1test(i32, i32, i32, i32) {
- entry:
- %4 = tail call fastcc i1 @i1test( i32 %0, i32 %1, i32 %2, i32 %3)
- ret i1 %4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-largecode.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcall-largecode.ll
deleted file mode 100644
index 8ddc405..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-largecode.ll
+++ /dev/null
@@ -1,71 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-linux-gnu -tailcallopt -code-model=large | FileCheck %s
-
-declare fastcc i32 @callee(i32 %arg)
-define fastcc i32 @directcall(i32 %arg) {
-entry:
-; This is the large code model, so &callee may not fit into the jmp
-; instruction. Instead, stick it into a register.
-; CHECK: movabsq $callee, [[REGISTER:%r[a-z0-9]+]]
-; CHECK: jmpq *[[REGISTER]] # TAILCALL
- %res = tail call fastcc i32 @callee(i32 %arg)
- ret i32 %res
-}
-
-; Check that the register used for an indirect tail call doesn't
-; clobber any of the arguments.
-define fastcc i32 @indirect_manyargs(i32(i32,i32,i32,i32,i32,i32,i32)* %target) {
-; Adjust the stack to enter the function. (The amount of the
-; adjustment may change in the future, in which case the location of
-; the stack argument and the return adjustment will change too.)
-; CHECK: subq $8, %rsp
-; Put the call target into R11, which won't be clobbered while restoring
-; callee-saved registers and won't be used for passing arguments.
-; CHECK: movq %rdi, %r11
-; Pass the stack argument.
-; CHECK: movl $7, 16(%rsp)
-; Pass the register arguments, in the right registers.
-; CHECK: movl $1, %edi
-; CHECK: movl $2, %esi
-; CHECK: movl $3, %edx
-; CHECK: movl $4, %ecx
-; CHECK: movl $5, %r8d
-; CHECK: movl $6, %r9d
-; Adjust the stack to "return".
-; CHECK: addq $8, %rsp
-; And tail-call to the target.
-; CHECK: jmpq *%r11 # TAILCALL
- %res = tail call fastcc i32 %target(i32 1, i32 2, i32 3, i32 4, i32 5,
- i32 6, i32 7)
- ret i32 %res
-}
-
-; Check that the register used for a direct tail call doesn't clobber
-; any of the arguments.
-declare fastcc i32 @manyargs_callee(i32,i32,i32,i32,i32,i32,i32)
-define fastcc i32 @direct_manyargs() {
-; Adjust the stack to enter the function. (The amount of the
-; adjustment may change in the future, in which case the location of
-; the stack argument and the return adjustment will change too.)
-; CHECK: subq $8, %rsp
-; Pass the stack argument.
-; CHECK: movl $7, 16(%rsp)
-; Pass the register arguments, in the right registers.
-; CHECK: movl $1, %edi
-; CHECK: movl $2, %esi
-; CHECK: movl $3, %edx
-; CHECK: movl $4, %ecx
-; CHECK: movl $5, %r8d
-; CHECK: movl $6, %r9d
-; This is the large code model, so &manyargs_callee may not fit into
-; the jmp instruction. Put it into R11, which won't be clobbered
-; while restoring callee-saved registers and won't be used for passing
-; arguments.
-; CHECK: movabsq $manyargs_callee, %r11
-; Adjust the stack to "return".
-; CHECK: addq $8, %rsp
-; And tail-call to the target.
-; CHECK: jmpq *%r11 # TAILCALL
- %res = tail call fastcc i32 @manyargs_callee(i32 1, i32 2, i32 3, i32 4,
- i32 5, i32 6, i32 7)
- ret i32 %res
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-stackalign.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcall-stackalign.ll
deleted file mode 100644
index 0233139..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-stackalign.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -mtriple=i686-unknown-linux -tailcallopt | FileCheck %s
-; Linux has 8 byte alignment so the params cause stack size 20 when tailcallopt
-; is enabled, ensure that a normal fastcc call has matching stack size
-
-
-define fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
- ret i32 %a3
-}
-
-define fastcc i32 @tailcaller(i32 %in1, i32 %in2, i32 %in3, i32 %in4) {
- %tmp11 = tail call fastcc i32 @tailcallee(i32 %in1, i32 %in2,
- i32 %in1, i32 %in2)
- ret i32 %tmp11
-}
-
-define i32 @main(i32 %argc, i8** %argv) {
- %tmp1 = call fastcc i32 @tailcaller( i32 1, i32 2, i32 3, i32 4 )
- ; expect match subl [stacksize] here
- ret i32 0
-}
-
-; CHECK: call tailcaller
-; CHECK-NEXT: subl $12
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-structret.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcall-structret.ll
deleted file mode 100644
index d8be4b2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-structret.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86 -tailcallopt | grep TAILCALL
-define fastcc { { i8*, i8* }*, i8*} @init({ { i8*, i8* }*, i8*}, i32) {
-entry:
- %2 = tail call fastcc { { i8*, i8* }*, i8* } @init({ { i8*, i8*}*, i8*} %0, i32 %1)
- ret { { i8*, i8* }*, i8*} %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-void.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcall-void.ll
deleted file mode 100644
index 4e578d1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcall-void.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86 -tailcallopt | grep TAILCALL
-define fastcc void @i1test(i32, i32, i32, i32) {
- entry:
- tail call fastcc void @i1test( i32 %0, i32 %1, i32 %2, i32 %3)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcall1.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcall1.ll
deleted file mode 100644
index f7ff5d5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcall1.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -march=x86 -tailcallopt | grep TAILCALL | count 5
-
-; With -tailcallopt, CodeGen guarantees a tail call optimization
-; for all of these.
-
-declare fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4)
-
-define fastcc i32 @tailcaller(i32 %in1, i32 %in2) nounwind {
-entry:
- %tmp11 = tail call fastcc i32 @tailcallee(i32 %in1, i32 %in2, i32 %in1, i32 %in2)
- ret i32 %tmp11
-}
-
-declare fastcc i8* @alias_callee()
-
-define fastcc noalias i8* @noalias_caller() nounwind {
- %p = tail call fastcc i8* @alias_callee()
- ret i8* %p
-}
-
-declare fastcc noalias i8* @noalias_callee()
-
-define fastcc i8* @alias_caller() nounwind {
- %p = tail call fastcc noalias i8* @noalias_callee()
- ret i8* %p
-}
-
-declare fastcc i32 @i32_callee()
-
-define fastcc i32 @ret_undef() nounwind {
- %p = tail call fastcc i32 @i32_callee()
- ret i32 undef
-}
-
-declare fastcc void @does_not_return()
-
-define fastcc i32 @noret() nounwind {
- tail call fastcc void @does_not_return()
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcallbyval.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcallbyval.ll
deleted file mode 100644
index 7002560..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcallbyval.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 -tailcallopt | grep TAILCALL
-; RUN: llc < %s -march=x86 -tailcallopt | grep {movl\[\[:space:\]\]*4(%esp), %eax} | count 1
-%struct.s = type {i32, i32, i32, i32, i32, i32, i32, i32,
- i32, i32, i32, i32, i32, i32, i32, i32,
- i32, i32, i32, i32, i32, i32, i32, i32 }
-
-define fastcc i32 @tailcallee(%struct.s* byval %a) nounwind {
-entry:
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 0
- %tmp3 = load i32* %tmp2
- ret i32 %tmp3
-}
-
-define fastcc i32 @tailcaller(%struct.s* byval %a) nounwind {
-entry:
- %tmp4 = tail call fastcc i32 @tailcallee(%struct.s* %a byval)
- ret i32 %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcallbyval64.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcallbyval64.ll
deleted file mode 100644
index 7c685b8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcallbyval64.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86-64 -tailcallopt | grep TAILCALL
-; Expect 2 rep;movs because of tail call byval lowering.
-; RUN: llc < %s -march=x86-64 -tailcallopt | grep rep | wc -l | grep 2
-; A sequence of copyto/copyfrom virtual registers is used to deal with byval
-; lowering appearing after moving arguments to registers. The following two
-; checks verify that the register allocator changes those sequences to direct
-; moves to argument register where it can (for registers that are not used in
-; byval lowering - not rsi, not rdi, not rcx).
-; Expect argument 4 to be moved directly to register edx.
-; RUN: llc < %s -march=x86-64 -tailcallopt | grep movl | grep {7} | grep edx
-; Expect argument 6 to be moved directly to register r8.
-; RUN: llc < %s -march=x86-64 -tailcallopt | grep movl | grep {17} | grep r8
-
-%struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
- i64, i64, i64, i64, i64, i64, i64, i64,
- i64, i64, i64, i64, i64, i64, i64, i64 }
-
-declare fastcc i64 @tailcallee(%struct.s* byval %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5)
-
-
-define fastcc i64 @tailcaller(i64 %b, %struct.s* byval %a) {
-entry:
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 1
- %tmp3 = load i64* %tmp2, align 8
- %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* %a byval, i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
- ret i64 %tmp4
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcallfp.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcallfp.ll
deleted file mode 100644
index c0b609a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcallfp.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel -tailcallopt | not grep call
-define fastcc i32 @bar(i32 %X, i32(double, i32) *%FP) {
- %Y = tail call fastcc i32 %FP(double 0.0, i32 %X)
- ret i32 %Y
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcallfp2.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcallfp2.ll
deleted file mode 100644
index 3841f51..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcallfp2.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -tailcallopt | grep {jmp} | grep {\\*%eax}
-
-declare i32 @putchar(i32)
-
-define fastcc i32 @checktail(i32 %x, i32* %f, i32 %g) nounwind {
- %tmp1 = icmp sgt i32 %x, 0
- br i1 %tmp1, label %if-then, label %if-else
-
-if-then:
- %fun_ptr = bitcast i32* %f to i32(i32, i32*, i32)*
- %arg1 = add i32 %x, -1
- call i32 @putchar(i32 90)
- %res = tail call fastcc i32 %fun_ptr( i32 %arg1, i32 * %f, i32 %g)
- ret i32 %res
-
-if-else:
- ret i32 %x
-}
-
-
-define i32 @main() nounwind {
- %f = bitcast i32 (i32, i32*, i32)* @checktail to i32*
- %res = tail call fastcc i32 @checktail( i32 10, i32* %f,i32 10)
- ret i32 %res
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcallpic1.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcallpic1.ll
deleted file mode 100644
index 60e3be5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcallpic1.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -tailcallopt -mtriple=i686-pc-linux-gnu -relocation-model=pic | grep TAILCALL
-
-define protected fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
-entry:
- ret i32 %a3
-}
-
-define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
-entry:
- %tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 ) ; <i32> [#uses=1]
- ret i32 %tmp11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcallpic2.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcallpic2.ll
deleted file mode 100644
index eaa7631..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcallpic2.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -tailcallopt -mtriple=i686-pc-linux-gnu -relocation-model=pic | grep -v TAILCALL
-
-define fastcc i32 @tailcallee(i32 %a1, i32 %a2, i32 %a3, i32 %a4) {
-entry:
- ret i32 %a3
-}
-
-define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
-entry:
- %tmp11 = tail call fastcc i32 @tailcallee( i32 %in1, i32 %in2, i32 %in1, i32 %in2 ) ; <i32> [#uses=1]
- ret i32 %tmp11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tailcallstack64.ll b/libclamav/c++/llvm/test/CodeGen/X86/tailcallstack64.ll
deleted file mode 100644
index d05dff8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tailcallstack64.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s -tailcallopt -march=x86-64 -post-RA-scheduler=true | FileCheck %s
-
-; Check that lowered arguments on the stack do not overwrite each other.
-; Add %in1 %p1 to a different temporary register (%eax).
-; CHECK: movl %edi, %eax
-; Move param %in1 to temp register (%r10d).
-; CHECK: movl 40(%rsp), %r10d
-; Move param %in2 to stack.
-; CHECK: movl %r10d, 32(%rsp)
-; Move result of addition to stack.
-; CHECK: movl %eax, 40(%rsp)
-; Eventually, do a TAILCALL
-; CHECK: TAILCALL
-
-declare fastcc i32 @tailcallee(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6, i32 %a, i32 %b) nounwind
-
-define fastcc i32 @tailcaller(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6, i32 %in1, i32 %in2) nounwind {
-entry:
- %tmp = add i32 %in1, %p1
- %retval = tail call fastcc i32 @tailcallee(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6, i32 %in2,i32 %tmp)
- ret i32 %retval
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/test-nofold.ll b/libclamav/c++/llvm/test/CodeGen/X86/test-nofold.ll
deleted file mode 100644
index f1063dc..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/test-nofold.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck %s
-; rdar://5752025
-
-; We want:
-; CHECK: movl 4(%esp), %ecx
-; CHECK-NEXT: andl $15, %ecx
-; CHECK-NEXT: movl $42, %eax
-; CHECK-NEXT: cmovel %ecx, %eax
-; CHECK-NEXT: ret
-;
-; We don't want:
-; movl 4(%esp), %eax
-; movl %eax, %ecx # bad: extra copy
-; andl $15, %ecx
-; testl $15, %eax # bad: peep obstructed
-; movl $42, %eax
-; cmovel %ecx, %eax
-; ret
-;
-; We also don't want:
-; movl $15, %ecx # bad: larger encoding
-; andl 4(%esp), %ecx
-; movl $42, %eax
-; cmovel %ecx, %eax
-; ret
-;
-; We also don't want:
-; movl 4(%esp), %ecx
-; andl $15, %ecx
-; testl %ecx, %ecx # bad: unnecessary test
-; movl $42, %eax
-; cmovel %ecx, %eax
-; ret
-
-define i32 @t1(i32 %X) nounwind {
-entry:
- %tmp2 = and i32 %X, 15 ; <i32> [#uses=2]
- %tmp4 = icmp eq i32 %tmp2, 0 ; <i1> [#uses=1]
- %retval = select i1 %tmp4, i32 %tmp2, i32 42 ; <i32> [#uses=1]
- ret i32 %retval
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/test-shrink-bug.ll b/libclamav/c++/llvm/test/CodeGen/X86/test-shrink-bug.ll
deleted file mode 100644
index 64631ea..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/test-shrink-bug.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-
-; Codegen shouldn't reduce the comparison down to testb $-1, %al
-; because that changes the result of the signed test.
-; PR5132
-; CHECK: testw $255, %ax
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin10.0"
-
- at g_14 = global i8 -6, align 1 ; <i8*> [#uses=1]
-
-declare i32 @func_16(i8 signext %p_19, i32 %p_20) nounwind
-
-define i32 @func_35(i64 %p_38) nounwind ssp {
-entry:
- %tmp = load i8* @g_14 ; <i8> [#uses=2]
- %conv = zext i8 %tmp to i32 ; <i32> [#uses=1]
- %cmp = icmp sle i32 1, %conv ; <i1> [#uses=1]
- %conv2 = zext i1 %cmp to i32 ; <i32> [#uses=1]
- %call = call i32 @func_16(i8 signext %tmp, i32 %conv2) ssp ; <i32> [#uses=1]
- ret i32 1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/test-shrink.ll b/libclamav/c++/llvm/test/CodeGen/X86/test-shrink.ll
deleted file mode 100644
index 1d63693..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/test-shrink.ll
+++ /dev/null
@@ -1,158 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s --check-prefix=CHECK-64
-; RUN: llc < %s -march=x86 | FileCheck %s --check-prefix=CHECK-32
-
-; CHECK-64: g64xh:
-; CHECK-64: testb $8, %ah
-; CHECK-64: ret
-; CHECK-32: g64xh:
-; CHECK-32: testb $8, %ah
-; CHECK-32: ret
-define void @g64xh(i64 inreg %x) nounwind {
- %t = and i64 %x, 2048
- %s = icmp eq i64 %t, 0
- br i1 %s, label %yes, label %no
-
-yes:
- call void @bar()
- ret void
-no:
- ret void
-}
-; CHECK-64: g64xl:
-; CHECK-64: testb $8, %dil
-; CHECK-64: ret
-; CHECK-32: g64xl:
-; CHECK-32: testb $8, %al
-; CHECK-32: ret
-define void @g64xl(i64 inreg %x) nounwind {
- %t = and i64 %x, 8
- %s = icmp eq i64 %t, 0
- br i1 %s, label %yes, label %no
-
-yes:
- call void @bar()
- ret void
-no:
- ret void
-}
-; CHECK-64: g32xh:
-; CHECK-64: testb $8, %ah
-; CHECK-64: ret
-; CHECK-32: g32xh:
-; CHECK-32: testb $8, %ah
-; CHECK-32: ret
-define void @g32xh(i32 inreg %x) nounwind {
- %t = and i32 %x, 2048
- %s = icmp eq i32 %t, 0
- br i1 %s, label %yes, label %no
-
-yes:
- call void @bar()
- ret void
-no:
- ret void
-}
-; CHECK-64: g32xl:
-; CHECK-64: testb $8, %dil
-; CHECK-64: ret
-; CHECK-32: g32xl:
-; CHECK-32: testb $8, %al
-; CHECK-32: ret
-define void @g32xl(i32 inreg %x) nounwind {
- %t = and i32 %x, 8
- %s = icmp eq i32 %t, 0
- br i1 %s, label %yes, label %no
-
-yes:
- call void @bar()
- ret void
-no:
- ret void
-}
-; CHECK-64: g16xh:
-; CHECK-64: testb $8, %ah
-; CHECK-64: ret
-; CHECK-32: g16xh:
-; CHECK-32: testb $8, %ah
-; CHECK-32: ret
-define void @g16xh(i16 inreg %x) nounwind {
- %t = and i16 %x, 2048
- %s = icmp eq i16 %t, 0
- br i1 %s, label %yes, label %no
-
-yes:
- call void @bar()
- ret void
-no:
- ret void
-}
-; CHECK-64: g16xl:
-; CHECK-64: testb $8, %dil
-; CHECK-64: ret
-; CHECK-32: g16xl:
-; CHECK-32: testb $8, %al
-; CHECK-32: ret
-define void @g16xl(i16 inreg %x) nounwind {
- %t = and i16 %x, 8
- %s = icmp eq i16 %t, 0
- br i1 %s, label %yes, label %no
-
-yes:
- call void @bar()
- ret void
-no:
- ret void
-}
-; CHECK-64: g64x16:
-; CHECK-64: testw $-32640, %di
-; CHECK-64: ret
-; CHECK-32: g64x16:
-; CHECK-32: testw $-32640, %ax
-; CHECK-32: ret
-define void @g64x16(i64 inreg %x) nounwind {
- %t = and i64 %x, 32896
- %s = icmp eq i64 %t, 0
- br i1 %s, label %yes, label %no
-
-yes:
- call void @bar()
- ret void
-no:
- ret void
-}
-; CHECK-64: g32x16:
-; CHECK-64: testw $-32640, %di
-; CHECK-64: ret
-; CHECK-32: g32x16:
-; CHECK-32: testw $-32640, %ax
-; CHECK-32: ret
-define void @g32x16(i32 inreg %x) nounwind {
- %t = and i32 %x, 32896
- %s = icmp eq i32 %t, 0
- br i1 %s, label %yes, label %no
-
-yes:
- call void @bar()
- ret void
-no:
- ret void
-}
-; CHECK-64: g64x32:
-; CHECK-64: testl $268468352, %edi
-; CHECK-64: ret
-; CHECK-32: g64x32:
-; CHECK-32: testl $268468352, %eax
-; CHECK-32: ret
-define void @g64x32(i64 inreg %x) nounwind {
- %t = and i64 %x, 268468352
- %s = icmp eq i64 %t, 0
- br i1 %s, label %yes, label %no
-
-yes:
- call void @bar()
- ret void
-no:
- ret void
-}
-
-declare void @bar()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/testl-commute.ll b/libclamav/c++/llvm/test/CodeGen/X86/testl-commute.ll
deleted file mode 100644
index 3d5f672..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/testl-commute.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc < %s | grep {testl.*\(%r.i\), %} | count 3
-; rdar://5671654
-; The loads should fold into the testl instructions, no matter how
-; the inputs are commuted.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin7"
-
-define i32 @test(i32* %P, i32* %G) nounwind {
-entry:
- %0 = load i32* %P, align 4 ; <i32> [#uses=3]
- %1 = load i32* %G, align 4 ; <i32> [#uses=1]
- %2 = and i32 %1, %0 ; <i32> [#uses=1]
- %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
- br i1 %3, label %bb1, label %bb
-
-bb: ; preds = %entry
- %4 = tail call i32 @bar() nounwind ; <i32> [#uses=0]
- ret i32 %0
-
-bb1: ; preds = %entry
- ret i32 %0
-}
-
-define i32 @test2(i32* %P, i32* %G) nounwind {
-entry:
- %0 = load i32* %P, align 4 ; <i32> [#uses=3]
- %1 = load i32* %G, align 4 ; <i32> [#uses=1]
- %2 = and i32 %0, %1 ; <i32> [#uses=1]
- %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
- br i1 %3, label %bb1, label %bb
-
-bb: ; preds = %entry
- %4 = tail call i32 @bar() nounwind ; <i32> [#uses=0]
- ret i32 %0
-
-bb1: ; preds = %entry
- ret i32 %0
-}
-define i32 @test3(i32* %P, i32* %G) nounwind {
-entry:
- %0 = load i32* %P, align 4 ; <i32> [#uses=3]
- %1 = load i32* %G, align 4 ; <i32> [#uses=1]
- %2 = and i32 %0, %1 ; <i32> [#uses=1]
- %3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
- br i1 %3, label %bb1, label %bb
-
-bb: ; preds = %entry
- %4 = tail call i32 @bar() nounwind ; <i32> [#uses=0]
- ret i32 %1
-
-bb1: ; preds = %entry
- ret i32 %1
-}
-
-declare i32 @bar()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls-pic.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls-pic.ll
deleted file mode 100644
index 4cad837..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls-pic.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -relocation-model=pic | FileCheck -check-prefix=X32 %s
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu -relocation-model=pic | FileCheck -check-prefix=X64 %s
-
- at i = thread_local global i32 15
-
-define i32 @f1() {
-entry:
- %tmp1 = load i32* @i
- ret i32 %tmp1
-}
-
-; X32: f1:
-; X32: leal i at TLSGD(,%ebx), %eax
-; X32: call ___tls_get_addr at PLT
-
-; X64: f1:
-; X64: leaq i at TLSGD(%rip), %rdi
-; X64: call __tls_get_addr at PLT
-
-
- at i2 = external thread_local global i32
-
-define i32* @f2() {
-entry:
- ret i32* @i
-}
-
-; X32: f2:
-; X32: leal i at TLSGD(,%ebx), %eax
-; X32: call ___tls_get_addr at PLT
-
-; X64: f2:
-; X64: leaq i at TLSGD(%rip), %rdi
-; X64: call __tls_get_addr at PLT
-
-
-
-define i32 @f3() {
-entry:
- %tmp1 = load i32* @i ; <i32> [#uses=1]
- ret i32 %tmp1
-}
-
-; X32: f3:
-; X32: leal i at TLSGD(,%ebx), %eax
-; X32: call ___tls_get_addr at PLT
-
-; X64: f3:
-; X64: leaq i at TLSGD(%rip), %rdi
-; X64: call __tls_get_addr at PLT
-
-
-define i32* @f4() nounwind {
-entry:
- ret i32* @i
-}
-
-; X32: f4:
-; X32: leal i at TLSGD(,%ebx), %eax
-; X32: call ___tls_get_addr at PLT
-
-; X64: f4:
-; X64: leaq i at TLSGD(%rip), %rdi
-; X64: call __tls_get_addr at PLT
-
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls1.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls1.ll
deleted file mode 100644
index 0cae5c4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls1.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:i at NTPOFF, %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movl %fs:i at TPOFF, %eax} %t2
-
- at i = thread_local global i32 15
-
-define i32 @f() nounwind {
-entry:
- %tmp1 = load i32* @i
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls10.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls10.ll
deleted file mode 100644
index fb61596..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls10.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:0, %eax} %t
-; RUN: grep {leal i at NTPOFF(%eax), %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movq %fs:0, %rax} %t2
-; RUN: grep {leaq i at TPOFF(%rax), %rax} %t2
-
- at i = external hidden thread_local global i32
-
-define i32* @f() {
-entry:
- ret i32* @i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls11.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls11.ll
deleted file mode 100644
index a2c1a1f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls11.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movw %gs:i at NTPOFF, %ax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movw %fs:i at TPOFF, %ax} %t2
-
- at i = thread_local global i16 15
-
-define i16 @f() {
-entry:
- %tmp1 = load i16* @i
- ret i16 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls12.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls12.ll
deleted file mode 100644
index c29f6ad..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls12.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movb %gs:i at NTPOFF, %al} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movb %fs:i at TPOFF, %al} %t2
-
- at i = thread_local global i8 15
-
-define i8 @f() {
-entry:
- %tmp1 = load i8* @i
- ret i8 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls13.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls13.ll
deleted file mode 100644
index 08778ec..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls13.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movswl %gs:i at NTPOFF, %eax} %t
-; RUN: grep {movzwl %gs:j at NTPOFF, %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movswl %fs:i at TPOFF, %edi} %t2
-; RUN: grep {movzwl %fs:j at TPOFF, %edi} %t2
-
- at i = thread_local global i16 0
- at j = thread_local global i16 0
-
-define void @f() nounwind optsize {
-entry:
- %0 = load i16* @i, align 2
- %1 = sext i16 %0 to i32
- tail call void @g(i32 %1) nounwind
- %2 = load i16* @j, align 2
- %3 = zext i16 %2 to i32
- tail call void @h(i32 %3) nounwind
- ret void
-}
-
-declare void @g(i32)
-
-declare void @h(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls14.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls14.ll
deleted file mode 100644
index 88426dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls14.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movsbl %gs:i at NTPOFF, %eax} %t
-; RUN: grep {movzbl %gs:j at NTPOFF, %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movsbl %fs:i at TPOFF, %edi} %t2
-; RUN: grep {movzbl %fs:j at TPOFF, %edi} %t2
-
- at i = thread_local global i8 0
- at j = thread_local global i8 0
-
-define void @f() nounwind optsize {
-entry:
- %0 = load i8* @i, align 2
- %1 = sext i8 %0 to i32
- tail call void @g(i32 %1) nounwind
- %2 = load i8* @j, align 2
- %3 = zext i8 %2 to i32
- tail call void @h(i32 %3) nounwind
- ret void
-}
-
-declare void @g(i32)
-
-declare void @h(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls15.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls15.ll
deleted file mode 100644
index 7abf070..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls15.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:0, %eax} %t | count 1
-; RUN: grep {leal i at NTPOFF(%eax), %ecx} %t
-; RUN: grep {leal j at NTPOFF(%eax), %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movq %fs:0, %rax} %t2 | count 1
-; RUN: grep {leaq i at TPOFF(%rax), %rcx} %t2
-; RUN: grep {leaq j at TPOFF(%rax), %rax} %t2
-
- at i = thread_local global i32 0
- at j = thread_local global i32 0
-
-define void @f(i32** %a, i32** %b) {
-entry:
- store i32* @i, i32** %a, align 8
- store i32* @j, i32** %b, align 8
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls2.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls2.ll
deleted file mode 100644
index 5a94296..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls2.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:0, %eax} %t
-; RUN: grep {leal i at NTPOFF(%eax), %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movq %fs:0, %rax} %t2
-; RUN: grep {leaq i at TPOFF(%rax), %rax} %t2
-
- at i = thread_local global i32 15
-
-define i32* @f() {
-entry:
- ret i32* @i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls3.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls3.ll
deleted file mode 100644
index 7327cc4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls3.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl i at INDNTPOFF, %eax} %t
-; RUN: grep {movl %gs:(%eax), %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movq i at GOTTPOFF(%rip), %rax} %t2
-; RUN: grep {movl %fs:(%rax), %eax} %t2
-
- at i = external thread_local global i32 ; <i32*> [#uses=2]
-
-define i32 @f() nounwind {
-entry:
- %tmp1 = load i32* @i ; <i32> [#uses=1]
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls4.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls4.ll
deleted file mode 100644
index d2e40e3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls4.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:0, %eax} %t
-; RUN: grep {addl i at INDNTPOFF, %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movq %fs:0, %rax} %t2
-; RUN: grep {addq i at GOTTPOFF(%rip), %rax} %t2
-
- at i = external thread_local global i32 ; <i32*> [#uses=2]
-
-define i32* @f() {
-entry:
- ret i32* @i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls5.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls5.ll
deleted file mode 100644
index 4d2cc02..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls5.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:i at NTPOFF, %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movl %fs:i at TPOFF, %eax} %t2
-
- at i = internal thread_local global i32 15
-
-define i32 @f() {
-entry:
- %tmp1 = load i32* @i
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls6.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls6.ll
deleted file mode 100644
index 505106e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls6.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:0, %eax} %t
-; RUN: grep {leal i at NTPOFF(%eax), %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movq %fs:0, %rax} %t2
-; RUN: grep {leaq i at TPOFF(%rax), %rax} %t2
-
- at i = internal thread_local global i32 15
-
-define i32* @f() {
-entry:
- ret i32* @i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls7.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls7.ll
deleted file mode 100644
index e9116e7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls7.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:i at NTPOFF, %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movl %fs:i at TPOFF, %eax} %t2
-
- at i = hidden thread_local global i32 15
-
-define i32 @f() {
-entry:
- %tmp1 = load i32* @i
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls8.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls8.ll
deleted file mode 100644
index 375af94..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls8.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:0, %eax} %t
-; RUN: grep {leal i at NTPOFF(%eax), %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movq %fs:0, %rax} %t2
-; RUN: grep {leaq i at TPOFF(%rax), %rax} %t2
-
- at i = hidden thread_local global i32 15
-
-define i32* @f() {
-entry:
- ret i32* @i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/tls9.ll b/libclamav/c++/llvm/test/CodeGen/X86/tls9.ll
deleted file mode 100644
index 214146f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/tls9.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu > %t
-; RUN: grep {movl %gs:i at NTPOFF, %eax} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu > %t2
-; RUN: grep {movl %fs:i at TPOFF, %eax} %t2
-
- at i = external hidden thread_local global i32
-
-define i32 @f() {
-entry:
- %tmp1 = load i32* @i
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/trap.ll b/libclamav/c++/llvm/test/CodeGen/X86/trap.ll
deleted file mode 100644
index 03ae6bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/trap.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | grep ud2
-define i32 @test() noreturn nounwind {
-entry:
- tail call void @llvm.trap( )
- unreachable
-}
-
-declare void @llvm.trap() nounwind
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/trunc-to-bool.ll b/libclamav/c++/llvm/test/CodeGen/X86/trunc-to-bool.ll
deleted file mode 100644
index 6062084..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/trunc-to-bool.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; An integer truncation to i1 should be done with an and instruction to make
-; sure only the LSBit survives. Test that this is the case both for a returned
-; value and as the operand of a branch.
-; RUN: llc < %s -march=x86 | FileCheck %s
-
-define i1 @test1(i32 %X) zeroext nounwind {
- %Y = trunc i32 %X to i1
- ret i1 %Y
-}
-; CHECK: test1:
-; CHECK: andl $1, %eax
-
-define i1 @test2(i32 %val, i32 %mask) nounwind {
-entry:
- %shifted = ashr i32 %val, %mask
- %anded = and i32 %shifted, 1
- %trunced = trunc i32 %anded to i1
- br i1 %trunced, label %ret_true, label %ret_false
-ret_true:
- ret i1 true
-ret_false:
- ret i1 false
-}
-; CHECK: test2:
-; CHECK: btl %eax
-
-define i32 @test3(i8* %ptr) nounwind {
- %val = load i8* %ptr
- %tmp = trunc i8 %val to i1
- br i1 %tmp, label %cond_true, label %cond_false
-cond_true:
- ret i32 21
-cond_false:
- ret i32 42
-}
-; CHECK: test3:
-; CHECK: testb $1, (%eax)
-
-define i32 @test4(i8* %ptr) nounwind {
- %tmp = ptrtoint i8* %ptr to i1
- br i1 %tmp, label %cond_true, label %cond_false
-cond_true:
- ret i32 21
-cond_false:
- ret i32 42
-}
-; CHECK: test4:
-; CHECK: testb $1, 4(%esp)
-
-define i32 @test5(double %d) nounwind {
- %tmp = fptosi double %d to i1
- br i1 %tmp, label %cond_true, label %cond_false
-cond_true:
- ret i32 21
-cond_false:
- ret i32 42
-}
-; CHECK: test5:
-; CHECK: testb $1
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-coalesce-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-coalesce-2.ll
deleted file mode 100644
index 6f16a25..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-coalesce-2.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats |& \
-; RUN: grep {twoaddrinstr} | grep {Number of instructions aggressively commuted}
-; rdar://6480363
-
-target triple = "i386-apple-darwin9.6"
-
-define <2 x double> @t(<2 x double> %A, <2 x double> %B, <2 x double> %C) nounwind readnone {
-entry:
- %tmp.i3 = bitcast <2 x double> %B to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp2.i = or <2 x i64> %tmp.i3, <i64 4607632778762754458, i64 4607632778762754458> ; <<2 x i64>> [#uses=1]
- %tmp3.i = bitcast <2 x i64> %tmp2.i to <2 x double> ; <<2 x double>> [#uses=1]
- %tmp.i2 = fadd <2 x double> %tmp3.i, %A ; <<2 x double>> [#uses=1]
- %tmp.i = fadd <2 x double> %tmp.i2, %C ; <<2 x double>> [#uses=1]
- ret <2 x double> %tmp.i
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-coalesce.ll b/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-coalesce.ll
deleted file mode 100644
index 4c37225..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-coalesce.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 4
-; rdar://6523745
-
-@"\01LC" = internal constant [4 x i8] c"%d\0A\00" ; <[4 x i8]*> [#uses=1]
-
-define i32 @main() nounwind {
-bb1.thread:
- br label %bb1
-
-bb1: ; preds = %bb1, %bb1.thread
- %i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=2]
- %0 = trunc i32 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
- %1 = sdiv i8 %0, 2 ; <i8> [#uses=1]
- %2 = sext i8 %1 to i32 ; <i32> [#uses=1]
- %3 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), i32 %2) nounwind ; <i32> [#uses=0]
- %indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
- %exitcond = icmp eq i32 %indvar.next, 258 ; <i1> [#uses=1]
- br i1 %exitcond, label %bb2, label %bb1
-
-bb2: ; preds = %bb1
- ret i32 0
-}
-
-declare i32 @printf(i8*, ...) nounwind
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-lea.ll b/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-lea.ll
deleted file mode 100644
index a245ed7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-lea.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-;; X's live range extends beyond the shift, so the register allocator
-;; cannot coalesce it with Y. Because of this, a copy needs to be
-;; emitted before the shift to save the register value before it is
-;; clobbered. However, this copy is not needed if the register
-;; allocator turns the shift into an LEA. This also occurs for ADD.
-
-; Check that the shift gets turned into an LEA.
-; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
-; RUN: not grep {mov E.X, E.X}
-
- at G = external global i32 ; <i32*> [#uses=3]
-
-define i32 @test1(i32 %X, i32 %Y) {
- %Z = add i32 %X, %Y ; <i32> [#uses=1]
- volatile store i32 %Y, i32* @G
- volatile store i32 %Z, i32* @G
- ret i32 %X
-}
-
-define i32 @test2(i32 %X) {
- %Z = add i32 %X, 1 ; <i32> [#uses=1]
- volatile store i32 %Z, i32* @G
- ret i32 %X
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-pass-sink.ll b/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-pass-sink.ll
deleted file mode 100644
index 077fee0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-pass-sink.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -stats |& grep {Number of 3-address instructions sunk}
-
-define void @t2(<2 x i64>* %vDct, <2 x i64>* %vYp, i8* %skiplist, <2 x i64> %a1) nounwind {
-entry:
- %tmp25 = bitcast <2 x i64> %a1 to <8 x i16> ; <<8 x i16>> [#uses=1]
- br label %bb
-bb: ; preds = %bb, %entry
- %skiplist_addr.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
- %vYp_addr.0.rec = shl i32 %skiplist_addr.0.rec, 3 ; <i32> [#uses=3]
- %vDct_addr.0 = getelementptr <2 x i64>* %vDct, i32 %vYp_addr.0.rec ; <<2 x i64>*> [#uses=1]
- %vYp_addr.0 = getelementptr <2 x i64>* %vYp, i32 %vYp_addr.0.rec ; <<2 x i64>*> [#uses=1]
- %skiplist_addr.0 = getelementptr i8* %skiplist, i32 %skiplist_addr.0.rec ; <i8*> [#uses=1]
- %vDct_addr.0.sum43 = or i32 %vYp_addr.0.rec, 1 ; <i32> [#uses=1]
- %tmp7 = getelementptr <2 x i64>* %vDct, i32 %vDct_addr.0.sum43 ; <<2 x i64>*> [#uses=1]
- %tmp8 = load <2 x i64>* %tmp7, align 16 ; <<2 x i64>> [#uses=1]
- %tmp11 = load <2 x i64>* %vDct_addr.0, align 16 ; <<2 x i64>> [#uses=1]
- %tmp13 = bitcast <2 x i64> %tmp8 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp15 = bitcast <2 x i64> %tmp11 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp16 = shufflevector <8 x i16> %tmp15, <8 x i16> %tmp13, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 > ; <<8 x i16>> [#uses=1]
- %tmp26 = mul <8 x i16> %tmp25, %tmp16 ; <<8 x i16>> [#uses=1]
- %tmp27 = bitcast <8 x i16> %tmp26 to <2 x i64> ; <<2 x i64>> [#uses=1]
- store <2 x i64> %tmp27, <2 x i64>* %vYp_addr.0, align 16
- %tmp37 = load i8* %skiplist_addr.0, align 1 ; <i8> [#uses=1]
- %tmp38 = icmp eq i8 %tmp37, 0 ; <i1> [#uses=1]
- %indvar.next = add i32 %skiplist_addr.0.rec, 1 ; <i32> [#uses=1]
- br i1 %tmp38, label %return, label %bb
-return: ; preds = %bb
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-remat.ll b/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-remat.ll
deleted file mode 100644
index 4940c78..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/twoaddr-remat.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -march=x86 | grep 59796 | count 3
-
- %Args = type %Value*
- %Exec = type opaque*
- %Identifier = type opaque*
- %JSFunction = type %Value (%Exec, %Scope, %Value, %Args)
- %PropertyNameArray = type opaque*
- %Scope = type opaque*
- %Value = type opaque*
-
-declare i1 @X1(%Exec) readonly
-
-declare %Value @X2(%Exec)
-
-declare i32 @X3(%Exec, %Value)
-
-declare %Value @X4(i32) readnone
-
-define internal %Value @fast3bitlookup(%Exec %exec, %Scope %scope, %Value %this, %Args %args) nounwind {
-prologue:
- %eh_check = tail call i1 @X1( %Exec %exec ) readonly ; <i1> [#uses=1]
- br i1 %eh_check, label %exception, label %no_exception
-
-exception: ; preds = %no_exception, %prologue
- %rethrow_result = tail call %Value @X2( %Exec %exec ) ; <%Value> [#uses=1]
- ret %Value %rethrow_result
-
-no_exception: ; preds = %prologue
- %args_intptr = bitcast %Args %args to i32* ; <i32*> [#uses=1]
- %argc_val = load i32* %args_intptr ; <i32> [#uses=1]
- %cmpParamArgc = icmp sgt i32 %argc_val, 0 ; <i1> [#uses=1]
- %arg_ptr = getelementptr %Args %args, i32 1 ; <%Args> [#uses=1]
- %arg_val = load %Args %arg_ptr ; <%Value> [#uses=1]
- %ext_arg_val = select i1 %cmpParamArgc, %Value %arg_val, %Value inttoptr (i32 5 to %Value) ; <%Value> [#uses=1]
- %toInt325 = tail call i32 @X3( %Exec %exec, %Value %ext_arg_val ) ; <i32> [#uses=3]
- %eh_check6 = tail call i1 @X1( %Exec %exec ) readonly ; <i1> [#uses=1]
- br i1 %eh_check6, label %exception, label %no_exception7
-
-no_exception7: ; preds = %no_exception
- %shl_tmp_result = shl i32 %toInt325, 1 ; <i32> [#uses=1]
- %rhs_masked13 = and i32 %shl_tmp_result, 14 ; <i32> [#uses=1]
- %ashr_tmp_result = lshr i32 59796, %rhs_masked13 ; <i32> [#uses=1]
- %and_tmp_result15 = and i32 %ashr_tmp_result, 3 ; <i32> [#uses=1]
- %ashr_tmp_result3283 = lshr i32 %toInt325, 2 ; <i32> [#uses=1]
- %rhs_masked38 = and i32 %ashr_tmp_result3283, 14 ; <i32> [#uses=1]
- %ashr_tmp_result39 = lshr i32 59796, %rhs_masked38 ; <i32> [#uses=1]
- %and_tmp_result41 = and i32 %ashr_tmp_result39, 3 ; <i32> [#uses=1]
- %addconv = add i32 %and_tmp_result15, %and_tmp_result41 ; <i32> [#uses=1]
- %ashr_tmp_result6181 = lshr i32 %toInt325, 5 ; <i32> [#uses=1]
- %rhs_masked67 = and i32 %ashr_tmp_result6181, 6 ; <i32> [#uses=1]
- %ashr_tmp_result68 = lshr i32 59796, %rhs_masked67 ; <i32> [#uses=1]
- %and_tmp_result70 = and i32 %ashr_tmp_result68, 3 ; <i32> [#uses=1]
- %addconv82 = add i32 %addconv, %and_tmp_result70 ; <i32> [#uses=3]
- %rangetmp = add i32 %addconv82, 536870912 ; <i32> [#uses=1]
- %rangecmp = icmp ult i32 %rangetmp, 1073741824 ; <i1> [#uses=1]
- br i1 %rangecmp, label %NumberLiteralIntFast, label %NumberLiteralIntSlow
-
-NumberLiteralIntFast: ; preds = %no_exception7
- %imm_shift = shl i32 %addconv82, 2 ; <i32> [#uses=1]
- %imm_or = or i32 %imm_shift, 3 ; <i32> [#uses=1]
- %imm_val = inttoptr i32 %imm_or to %Value ; <%Value> [#uses=1]
- ret %Value %imm_val
-
-NumberLiteralIntSlow: ; preds = %no_exception7
- %toVal = call %Value @X4( i32 %addconv82 ) ; <%Value> [#uses=1]
- ret %Value %toVal
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/uint_to_fp-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/uint_to_fp-2.ll
deleted file mode 100644
index da5105d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/uint_to_fp-2.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd | count 1
-; rdar://6504833
-
-define float @f(i32 %x) nounwind readnone {
-entry:
- %0 = uitofp i32 %x to float
- ret float %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/uint_to_fp.ll b/libclamav/c++/llvm/test/CodeGen/X86/uint_to_fp.ll
deleted file mode 100644
index 41ee194..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/uint_to_fp.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | not grep {sub.*esp}
-; RUN: llc < %s -march=x86 -mcpu=yonah | grep cvtsi2ss
-; rdar://6034396
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i386-apple-darwin8"
-
-define void @test(i32 %x, float* %y) nounwind {
-entry:
- lshr i32 %x, 23 ; <i32>:0 [#uses=1]
- uitofp i32 %0 to float ; <float>:1 [#uses=1]
- store float %1, float* %y
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/umul-with-carry.ll b/libclamav/c++/llvm/test/CodeGen/X86/umul-with-carry.ll
deleted file mode 100644
index 7416051..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/umul-with-carry.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86 | grep {jc} | count 1
-; XFAIL: *
-
-; FIXME: umul-with-overflow not supported yet.
-
- at ok = internal constant [4 x i8] c"%d\0A\00"
- at no = internal constant [4 x i8] c"no\0A\00"
-
-define i1 @func(i32 %v1, i32 %v2) nounwind {
-entry:
- %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
- %sum = extractvalue {i32, i1} %t, 0
- %obit = extractvalue {i32, i1} %t, 1
- br i1 %obit, label %carry, label %normal
-
-normal:
- %t1 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @ok, i32 0, i32 0), i32 %sum ) nounwind
- ret i1 true
-
-carry:
- %t2 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @no, i32 0, i32 0) ) nounwind
- ret i1 false
-}
-
-declare i32 @printf(i8*, ...) nounwind
-declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/umul-with-overflow.ll b/libclamav/c++/llvm/test/CodeGen/X86/umul-with-overflow.ll
deleted file mode 100644
index d522bd8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/umul-with-overflow.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 | grep "\\\\\\\<mul"
-
-declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)
-define i1 @a(i32 %x) zeroext nounwind {
- %res = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 3)
- %obil = extractvalue {i32, i1} %res, 1
- ret i1 %obil
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/unaligned-load.ll b/libclamav/c++/llvm/test/CodeGen/X86/unaligned-load.ll
deleted file mode 100644
index b61803d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/unaligned-load.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin10.0 -relocation-model=dynamic-no-pic --asm-verbose=0 | FileCheck %s
-
- at .str1 = internal constant [31 x i8] c"DHRYSTONE PROGRAM, SOME STRING\00", align 8
- at .str3 = internal constant [31 x i8] c"DHRYSTONE PROGRAM, 2'ND STRING\00", align 8
-
-define void @func() nounwind ssp {
-entry:
- %String2Loc = alloca [31 x i8], align 1
- br label %bb
-
-bb:
- %String2Loc9 = getelementptr inbounds [31 x i8]* %String2Loc, i64 0, i64 0
- call void @llvm.memcpy.i64(i8* %String2Loc9, i8* getelementptr inbounds ([31 x i8]* @.str3, i64 0, i64 0), i64 31, i32 1)
-; CHECK: movups _.str3
- br label %bb
-
-return:
- ret void
-}
-
-declare void @llvm.memcpy.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
-
-; CHECK: .align 3
-; CHECK-NEXT: _.str1:
-; CHECK-NEXT: .asciz "DHRYSTONE PROGRAM, SOME STRING"
-; CHECK: .align 3
-; CHECK-NEXT: _.str3:
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/urem-i8-constant.ll b/libclamav/c++/llvm/test/CodeGen/X86/urem-i8-constant.ll
deleted file mode 100644
index e3cb69c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/urem-i8-constant.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86 | grep 111
-
-define i8 @foo(i8 %tmp325) {
- %t546 = urem i8 %tmp325, 37
- ret i8 %t546
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/use-add-flags.ll b/libclamav/c++/llvm/test/CodeGen/X86/use-add-flags.ll
deleted file mode 100644
index c2f0c23..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/use-add-flags.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc < %s -march=x86-64 -o - | FileCheck %s
-
-; Reuse the flags value from the add instructions instead of emitting separate
-; testl instructions.
-
-; Use the flags on the add.
-
-; CHECK: test1:
-; CHECK: addl (%rdi), %esi
-; CHECK-NEXT: movl %edx, %eax
-; CHECK-NEXT: cmovnsl %ecx, %eax
-; CHECK-NEXT: ret
-
-define i32 @test1(i32* %x, i32 %y, i32 %a, i32 %b) nounwind {
- %tmp2 = load i32* %x, align 4 ; <i32> [#uses=1]
- %tmp4 = add i32 %tmp2, %y ; <i32> [#uses=1]
- %tmp5 = icmp slt i32 %tmp4, 0 ; <i1> [#uses=1]
- %tmp.0 = select i1 %tmp5, i32 %a, i32 %b ; <i32> [#uses=1]
- ret i32 %tmp.0
-}
-
-declare void @foo(i32)
-
-; Don't use the flags result of the and here, since the and has no
-; other use. A simple test is better.
-
-; CHECK: test2:
-; CHECK: testb $16, %dil
-
-define void @test2(i32 %x) nounwind {
- %y = and i32 %x, 16
- %t = icmp eq i32 %y, 0
- br i1 %t, label %true, label %false
-true:
- call void @foo(i32 %x)
- ret void
-false:
- ret void
-}
-
-; Do use the flags result of the and here, since the and has another use.
-
-; CHECK: test3:
-; CHECK: andl $16, %edi
-; CHECK-NEXT: jne
-
-define void @test3(i32 %x) nounwind {
- %y = and i32 %x, 16
- %t = icmp eq i32 %y, 0
- br i1 %t, label %true, label %false
-true:
- call void @foo(i32 %y)
- ret void
-false:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/v4f32-immediate.ll b/libclamav/c++/llvm/test/CodeGen/X86/v4f32-immediate.ll
deleted file mode 100644
index b5ebaa7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/v4f32-immediate.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse | grep movaps
-
-define <4 x float> @foo() {
- ret <4 x float> <float 0x4009C9D0A0000000, float 0x4002666660000000, float 0x3FF3333340000000, float 0x3FB99999A0000000>
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll b/libclamav/c++/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll
deleted file mode 100644
index 4817db2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/variable-sized-darwin-bzero.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mtriple=i686-apple-darwin10 | grep __bzero
-
-declare void @llvm.memset.i64(i8*, i8, i64, i32)
-
-define void @foo(i8* %p, i64 %n) {
- call void @llvm.memset.i64(i8* %p, i8 0, i64 %n, i32 4)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/variadic-node-pic.ll b/libclamav/c++/llvm/test/CodeGen/X86/variadic-node-pic.ll
deleted file mode 100644
index 1182a30..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/variadic-node-pic.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -relocation-model=pic -code-model=large
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
-
-declare void @xscanf(i64) nounwind
-
-define void @foo() nounwind {
- call void (i64)* @xscanf( i64 0 ) nounwind
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec-trunc-store.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec-trunc-store.ll
deleted file mode 100644
index ea1a151..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec-trunc-store.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86-64 -disable-mmx | grep punpcklwd | count 2
-
-define void @foo() nounwind {
- %cti69 = trunc <8 x i32> undef to <8 x i16> ; <<8 x i16>> [#uses=1]
- store <8 x i16> %cti69, <8 x i16>* undef
- ret void
-}
-
-define void @bar() nounwind {
- %cti44 = trunc <4 x i32> undef to <4 x i16> ; <<4 x i16>> [#uses=1]
- store <4 x i16> %cti44, <4 x i16>* undef
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_add.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_add.ll
deleted file mode 100644
index 7c77d11..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_add.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define <2 x i64> @test(<2 x i64> %a, <2 x i64> %b) {
-entry:
- %tmp9 = add <2 x i64> %b, %a ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp9
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_align.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_align.ll
deleted file mode 100644
index e273115..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_align.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -mcpu=yonah -relocation-model=static | grep movaps | count 2
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin8"
-
-%f4 = type <4 x float>
-
- at G = external global { float,float,float,float}, align 16
-
-define %f4 @test1(float %W, float %X, float %Y, float %Z) nounwind {
- %tmp = insertelement %f4 undef, float %W, i32 0
- %tmp2 = insertelement %f4 %tmp, float %X, i32 1
- %tmp4 = insertelement %f4 %tmp2, float %Y, i32 2
- %tmp6 = insertelement %f4 %tmp4, float %Z, i32 3
- ret %f4 %tmp6
-}
-
-define %f4 @test2() nounwind {
- %Wp = getelementptr { float,float,float,float}* @G, i32 0, i32 0
- %Xp = getelementptr { float,float,float,float}* @G, i32 0, i32 1
- %Yp = getelementptr { float,float,float,float}* @G, i32 0, i32 2
- %Zp = getelementptr { float,float,float,float}* @G, i32 0, i32 3
-
- %W = load float* %Wp
- %X = load float* %Xp
- %Y = load float* %Yp
- %Z = load float* %Zp
-
- %tmp = insertelement %f4 undef, float %W, i32 0
- %tmp2 = insertelement %f4 %tmp, float %X, i32 1
- %tmp4 = insertelement %f4 %tmp2, float %Y, i32 2
- %tmp6 = insertelement %f4 %tmp4, float %Z, i32 3
- ret %f4 %tmp6
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_call.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_call.ll
deleted file mode 100644
index b3efc7b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_call.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin8 | \
-; RUN: grep {subl.*60}
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin8 | \
-; RUN: grep {movaps.*32}
-
-
-define void @test() {
- tail call void @xx( i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <2 x i64> bitcast (<4 x i32> < i32 4, i32 3, i32 2, i32 1 > to <2 x i64>), <2 x i64> bitcast (<4 x i32> < i32 8, i32 7, i32 6, i32 5 > to <2 x i64>), <2 x i64> bitcast (<4 x i32> < i32 6, i32 4, i32 2, i32 0 > to <2 x i64>), <2 x i64> bitcast (<4 x i32> < i32 8, i32 4, i32 2, i32 1 > to <2 x i64>), <2 x i64> bitcast (<4 x i32> < i32 0, i32 1, i32 3, i32 9 > to <2 x i64>) )
- ret void
-}
-
-declare void @xx(i32, i32, i32, i32, i32, i32, i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_cast.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_cast.ll
deleted file mode 100644
index 6f18d13..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_cast.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; RUN: llc < %s -march=x86-64 -disable-mmx
-
-define <8 x i32> @a(<8 x i16> %a) nounwind {
- %c = sext <8 x i16> %a to <8 x i32>
- ret <8 x i32> %c
-}
-
-define <3 x i32> @b(<3 x i16> %a) nounwind {
- %c = sext <3 x i16> %a to <3 x i32>
- ret <3 x i32> %c
-}
-
-define <1 x i32> @c(<1 x i16> %a) nounwind {
- %c = sext <1 x i16> %a to <1 x i32>
- ret <1 x i32> %c
-}
-
-define <8 x i32> @d(<8 x i16> %a) nounwind {
- %c = zext <8 x i16> %a to <8 x i32>
- ret <8 x i32> %c
-}
-
-define <3 x i32> @e(<3 x i16> %a) nounwind {
- %c = zext <3 x i16> %a to <3 x i32>
- ret <3 x i32> %c
-}
-
-define <1 x i32> @f(<1 x i16> %a) nounwind {
- %c = zext <1 x i16> %a to <1 x i32>
- ret <1 x i32> %c
-}
-
-define <8 x i16> @g(<8 x i32> %a) nounwind {
- %c = trunc <8 x i32> %a to <8 x i16>
- ret <8 x i16> %c
-}
-
-define <3 x i16> @h(<3 x i32> %a) nounwind {
- %c = trunc <3 x i32> %a to <3 x i16>
- ret <3 x i16> %c
-}
-
-define <1 x i16> @i(<1 x i32> %a) nounwind {
- %c = trunc <1 x i32> %a to <1 x i16>
- ret <1 x i16> %c
-}
-
-; PR6438
-define void @__OpenCL_math_kernel4_kernel() nounwind {
- %tmp12.i = and <4 x i32> zeroinitializer, <i32 2139095040, i32 2139095040, i32 2139095040, i32 2139095040> ; <<4 x i32>> [#uses=1]
- %cmp13.i = icmp eq <4 x i32> %tmp12.i, <i32 2139095040, i32 2139095040, i32 2139095040, i32 2139095040> ; <<4 x i1>> [#uses=2]
- %cmp.ext14.i = sext <4 x i1> %cmp13.i to <4 x i32> ; <<4 x i32>> [#uses=0]
- %tmp2110.i = and <4 x i1> %cmp13.i, zeroinitializer ; <<4 x i1>> [#uses=0]
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_clear.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_clear.ll
deleted file mode 100644
index 166d436..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_clear.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i386-apple-darwin -o %t
-; RUN: not grep and %t
-; RUN: not grep psrldq %t
-; RUN: grep xorps %t
-
-define <4 x float> @test(<4 x float>* %v1) nounwind {
- %tmp = load <4 x float>* %v1 ; <<4 x float>> [#uses=1]
- %tmp15 = bitcast <4 x float> %tmp to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp24 = and <2 x i64> %tmp15, bitcast (<4 x i32> < i32 0, i32 0, i32 -1, i32 -1 > to <2 x i64>) ; <<2 x i64>> [#uses=1]
- %tmp31 = bitcast <2 x i64> %tmp24 to <4 x float> ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp31
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_compare-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_compare-2.ll
deleted file mode 100644
index 091641b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_compare-2.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=penryn -disable-mmx | FileCheck %s
-
-declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) nounwind readnone
-
-declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) nounwind readnone
-
-declare <4 x i32> @llvm.x86.sse41.pmaxsd(<4 x i32>, <4 x i32>) nounwind readnone
-
-define void @blackDespeckle_wrapper(i8** %args_list, i64* %gtid, i64 %xend) {
-entry:
-; CHECK-NOT: set
-; CHECK: pcmpgt
-; CHECK: blendvps
- %shr.i = ashr <4 x i32> zeroinitializer, <i32 3, i32 3, i32 3, i32 3> ; <<4 x i32>> [#uses=1]
- %cmp318.i = sext <4 x i1> zeroinitializer to <4 x i32> ; <<4 x i32>> [#uses=1]
- %sub322.i = sub <4 x i32> %shr.i, zeroinitializer ; <<4 x i32>> [#uses=1]
- %cmp323.x = icmp slt <4 x i32> zeroinitializer, %sub322.i ; <<4 x i1>> [#uses=1]
- %cmp323.i = sext <4 x i1> %cmp323.x to <4 x i32> ; <<4 x i32>> [#uses=1]
- %or.i = or <4 x i32> %cmp318.i, %cmp323.i ; <<4 x i32>> [#uses=1]
- %tmp10.i83.i = bitcast <4 x i32> %or.i to <4 x float> ; <<4 x float>> [#uses=1]
- %0 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> undef, <4 x float> undef, <4 x float> %tmp10.i83.i) nounwind ; <<4 x float>> [#uses=1]
- %conv.i.i15.i = bitcast <4 x float> %0 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %swz.i.i28.i = shufflevector <4 x i32> %conv.i.i15.i, <4 x i32> undef, <2 x i32> <i32 0, i32 1> ; <<2 x i32>> [#uses=1]
- %tmp6.i29.i = bitcast <2 x i32> %swz.i.i28.i to <4 x i16> ; <<4 x i16>> [#uses=1]
- %swz.i30.i = shufflevector <4 x i16> %tmp6.i29.i, <4 x i16> undef, <2 x i32> <i32 0, i32 1> ; <<2 x i16>> [#uses=1]
- store <2 x i16> %swz.i30.i, <2 x i16>* undef
- unreachable
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_compare.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_compare.ll
deleted file mode 100644
index c8c7257..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_compare.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck %s
-
-
-define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK: test1:
-; CHECK: pcmpgtd
-; CHECK: ret
-
- %C = icmp sgt <4 x i32> %A, %B
- %D = sext <4 x i1> %C to <4 x i32>
- ret <4 x i32> %D
-}
-
-define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK: test2:
-; CHECK: pcmp
-; CHECK: pcmp
-; CHECK: xorps
-; CHECK: ret
- %C = icmp sge <4 x i32> %A, %B
- %D = sext <4 x i1> %C to <4 x i32>
- ret <4 x i32> %D
-}
-
-define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK: test3:
-; CHECK: pcmpgtd
-; CHECK: movaps
-; CHECK: ret
- %C = icmp slt <4 x i32> %A, %B
- %D = sext <4 x i1> %C to <4 x i32>
- ret <4 x i32> %D
-}
-
-define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK: test4:
-; CHECK: movaps
-; CHECK: pcmpgtd
-; CHECK: ret
- %C = icmp ugt <4 x i32> %A, %B
- %D = sext <4 x i1> %C to <4 x i32>
- ret <4 x i32> %D
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_ctbits.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_ctbits.ll
deleted file mode 100644
index f0158d6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_ctbits.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
-declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>)
-declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>)
-declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
-
-define <2 x i64> @footz(<2 x i64> %a) nounwind {
- %c = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a)
- ret <2 x i64> %c
-}
-define <2 x i64> @foolz(<2 x i64> %a) nounwind {
- %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a)
- ret <2 x i64> %c
-}
-define <2 x i64> @foopop(<2 x i64> %a) nounwind {
- %c = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
- ret <2 x i64> %c
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_ext_inreg.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_ext_inreg.ll
deleted file mode 100644
index 8d2a3c3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_ext_inreg.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llc < %s -march=x86-64
-; RUN: llc < %s -march=x86-64 -disable-mmx
-
-define <8 x i32> @a(<8 x i32> %a) nounwind {
- %b = trunc <8 x i32> %a to <8 x i16>
- %c = sext <8 x i16> %b to <8 x i32>
- ret <8 x i32> %c
-}
-
-define <3 x i32> @b(<3 x i32> %a) nounwind {
- %b = trunc <3 x i32> %a to <3 x i16>
- %c = sext <3 x i16> %b to <3 x i32>
- ret <3 x i32> %c
-}
-
-define <1 x i32> @c(<1 x i32> %a) nounwind {
- %b = trunc <1 x i32> %a to <1 x i16>
- %c = sext <1 x i16> %b to <1 x i32>
- ret <1 x i32> %c
-}
-
-define <8 x i32> @d(<8 x i32> %a) nounwind {
- %b = trunc <8 x i32> %a to <8 x i16>
- %c = zext <8 x i16> %b to <8 x i32>
- ret <8 x i32> %c
-}
-
-define <3 x i32> @e(<3 x i32> %a) nounwind {
- %b = trunc <3 x i32> %a to <3 x i16>
- %c = zext <3 x i16> %b to <3 x i32>
- ret <3 x i32> %c
-}
-
-define <1 x i32> @f(<1 x i32> %a) nounwind {
- %b = trunc <1 x i32> %a to <1 x i16>
- %c = zext <1 x i16> %b to <1 x i32>
- ret <1 x i32> %c
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_extract-sse4.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_extract-sse4.ll
deleted file mode 100644
index dab5dd1..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_extract-sse4.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse41 -o %t
-; RUN: grep extractps %t | count 1
-; RUN: grep pextrd %t | count 1
-; RUN: not grep pshufd %t
-; RUN: not grep movss %t
-
-define void @t1(float* %R, <4 x float>* %P1) nounwind {
- %X = load <4 x float>* %P1
- %tmp = extractelement <4 x float> %X, i32 3
- store float %tmp, float* %R
- ret void
-}
-
-define float @t2(<4 x float>* %P1) nounwind {
- %X = load <4 x float>* %P1
- %tmp = extractelement <4 x float> %X, i32 2
- ret float %tmp
-}
-
-define void @t3(i32* %R, <4 x i32>* %P1) nounwind {
- %X = load <4 x i32>* %P1
- %tmp = extractelement <4 x i32> %X, i32 3
- store i32 %tmp, i32* %R
- ret void
-}
-
-define i32 @t4(<4 x i32>* %P1) nounwind {
- %X = load <4 x i32>* %P1
- %tmp = extractelement <4 x i32> %X, i32 3
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_extract.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_extract.ll
deleted file mode 100644
index b013730..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_extract.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2,-sse41 -o %t
-; RUN: grep movss %t | count 3
-; RUN: grep movhlps %t | count 1
-; RUN: grep pshufd %t | count 1
-; RUN: grep unpckhpd %t | count 1
-
-define void @test1(<4 x float>* %F, float* %f) nounwind {
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp7 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
- %tmp2 = extractelement <4 x float> %tmp7, i32 0 ; <float> [#uses=1]
- store float %tmp2, float* %f
- ret void
-}
-
-define float @test2(<4 x float>* %F, float* %f) nounwind {
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp7 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
- %tmp2 = extractelement <4 x float> %tmp7, i32 2 ; <float> [#uses=1]
- ret float %tmp2
-}
-
-define void @test3(float* %R, <4 x float>* %P1) nounwind {
- %X = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
- %tmp = extractelement <4 x float> %X, i32 3 ; <float> [#uses=1]
- store float %tmp, float* %R
- ret void
-}
-
-define double @test4(double %A) nounwind {
- %tmp1 = call <2 x double> @foo( ) ; <<2 x double>> [#uses=1]
- %tmp2 = extractelement <2 x double> %tmp1, i32 1 ; <double> [#uses=1]
- %tmp3 = fadd double %tmp2, %A ; <double> [#uses=1]
- ret double %tmp3
-}
-
-declare <2 x double> @foo()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_fneg.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_fneg.ll
deleted file mode 100644
index d49c70e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_fneg.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define <4 x float> @t1(<4 x float> %Q) {
- %tmp15 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %Q
- ret <4 x float> %tmp15
-}
-
-define <4 x float> @t2(<4 x float> %Q) {
- %tmp15 = fsub <4 x float> zeroinitializer, %Q
- ret <4 x float> %tmp15
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_i64.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_i64.ll
deleted file mode 100644
index 462e16e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_i64.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep movq %t | count 2
-
-; Used movq to load i64 into a v2i64 when the top i64 is 0.
-
-define <2 x i64> @foo1(i64* %y) nounwind {
-entry:
- %tmp1 = load i64* %y, align 8 ; <i64> [#uses=1]
- %s2v = insertelement <2 x i64> undef, i64 %tmp1, i32 0
- %loadl = shufflevector <2 x i64> zeroinitializer, <2 x i64> %s2v, <2 x i32> <i32 2, i32 1>
- ret <2 x i64> %loadl
-}
-
-
-define <4 x float> @foo2(i64* %p) nounwind {
-entry:
- %load = load i64* %p
- %s2v = insertelement <2 x i64> undef, i64 %load, i32 0
- %loadl = shufflevector <2 x i64> zeroinitializer, <2 x i64> %s2v, <2 x i32> <i32 2, i32 1>
- %0 = bitcast <2 x i64> %loadl to <4 x float>
- ret <4 x float> %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_ins_extract-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_ins_extract-1.ll
deleted file mode 100644
index 2951193..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_ins_extract-1.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | grep {(%esp,%eax,4)} | count 4
-
-; Inserts and extracts with variable indices must be lowered
-; to memory accesses.
-
-define i32 @t0(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
- %t13 = insertelement <4 x i32> %t8, i32 76, i32 %t7
- %t9 = extractelement <4 x i32> %t13, i32 0
- ret i32 %t9
-}
-define i32 @t1(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
- %t13 = insertelement <4 x i32> %t8, i32 76, i32 0
- %t9 = extractelement <4 x i32> %t13, i32 %t7
- ret i32 %t9
-}
-define <4 x i32> @t2(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
- %t9 = extractelement <4 x i32> %t8, i32 %t7
- %t13 = insertelement <4 x i32> %t8, i32 %t9, i32 0
- ret <4 x i32> %t13
-}
-define <4 x i32> @t3(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
- %t9 = extractelement <4 x i32> %t8, i32 0
- %t13 = insertelement <4 x i32> %t8, i32 %t9, i32 %t7
- ret <4 x i32> %t13
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_ins_extract.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_ins_extract.ll
deleted file mode 100644
index 3776aea..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_ins_extract.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; CLAMAV-local: no opt
-; RUNX: opt < %s -scalarrepl -instcombine | \
-; RUNX: llc -march=x86 -mcpu=yonah | not grep sub.*esp
-; RUN: true
-;
-
-; This checks that various insert/extract idiom work without going to the
-; stack.
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-
-define void @test(<4 x float>* %F, float %f) {
-entry:
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
- %tmp10 = insertelement <4 x float> %tmp3, float %f, i32 0 ; <<4 x float>> [#uses=2]
- %tmp6 = fadd <4 x float> %tmp10, %tmp10 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp6, <4 x float>* %F
- ret void
-}
-
-define void @test2(<4 x float>* %F, float %f) {
-entry:
- %G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp3, <4 x float>* %G
- %tmp.upgrd.1 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
- store float %f, float* %tmp.upgrd.1
- %tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2]
- %tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp6, <4 x float>* %F
- ret void
-}
-
-define void @test3(<4 x float>* %F, float* %f) {
-entry:
- %G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp3, <4 x float>* %G
- %tmp.upgrd.2 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
- %tmp.upgrd.3 = load float* %tmp.upgrd.2 ; <float> [#uses=1]
- store float %tmp.upgrd.3, float* %f
- ret void
-}
-
-define void @test4(<4 x float>* %F, float* %f) {
-entry:
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
- %tmp5.lhs = extractelement <4 x float> %tmp, i32 0 ; <float> [#uses=1]
- %tmp5.rhs = extractelement <4 x float> %tmp, i32 0 ; <float> [#uses=1]
- %tmp5 = fadd float %tmp5.lhs, %tmp5.rhs ; <float> [#uses=1]
- store float %tmp5, float* %f
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-2.ll
deleted file mode 100644
index b08044b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-2.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2,-sse41 | grep {\$36,} | count 2
-; RUN: llc < %s -march=x86 -mattr=+sse2,-sse41 | grep shufps | count 2
-; RUN: llc < %s -march=x86 -mattr=+sse2,-sse41 | grep pinsrw | count 1
-; RUN: llc < %s -march=x86 -mattr=+sse2,-sse41 | grep movhpd | count 1
-; RUN: llc < %s -march=x86-64 -mattr=+sse2,-sse41 | grep unpcklpd | count 1
-
-define <4 x float> @t1(float %s, <4 x float> %tmp) nounwind {
- %tmp1 = insertelement <4 x float> %tmp, float %s, i32 3
- ret <4 x float> %tmp1
-}
-
-define <4 x i32> @t2(i32 %s, <4 x i32> %tmp) nounwind {
- %tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 3
- ret <4 x i32> %tmp1
-}
-
-define <2 x double> @t3(double %s, <2 x double> %tmp) nounwind {
- %tmp1 = insertelement <2 x double> %tmp, double %s, i32 1
- ret <2 x double> %tmp1
-}
-
-define <8 x i16> @t4(i16 %s, <8 x i16> %tmp) nounwind {
- %tmp1 = insertelement <8 x i16> %tmp, i16 %s, i32 5
- ret <8 x i16> %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-3.ll
deleted file mode 100644
index a18cd86..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-3.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse2,-sse41 | grep punpcklqdq | count 1
-
-define <2 x i64> @t1(i64 %s, <2 x i64> %tmp) nounwind {
- %tmp1 = insertelement <2 x i64> %tmp, i64 %s, i32 1
- ret <2 x i64> %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-5.ll
deleted file mode 100644
index 291fc04..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-5.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
-; RUN: grep psllq %t | grep 32
-; RUN: grep pslldq %t | grep 12
-; RUN: grep psrldq %t | grep 8
-; RUN: grep psrldq %t | grep 12
-
-define void @t1(i32 %a, <1 x i64>* %P) nounwind {
- %tmp12 = shl i32 %a, 12
- %tmp21 = insertelement <2 x i32> undef, i32 %tmp12, i32 1
- %tmp22 = insertelement <2 x i32> %tmp21, i32 0, i32 0
- %tmp23 = bitcast <2 x i32> %tmp22 to <1 x i64>
- store <1 x i64> %tmp23, <1 x i64>* %P
- ret void
-}
-
-define <4 x float> @t2(<4 x float>* %P) nounwind {
- %tmp1 = load <4 x float>* %P
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 4, i32 4, i32 4, i32 0 >
- ret <4 x float> %tmp2
-}
-
-define <4 x float> @t3(<4 x float>* %P) nounwind {
- %tmp1 = load <4 x float>* %P
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 4, i32 4 >
- ret <4 x float> %tmp2
-}
-
-define <4 x float> @t4(<4 x float>* %P) nounwind {
- %tmp1 = load <4 x float>* %P
- %tmp2 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp1, <4 x i32> < i32 7, i32 0, i32 0, i32 0 >
- ret <4 x float> %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-6.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-6.ll
deleted file mode 100644
index 54aa43f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-6.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep pslldq
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin9 -stats -info-output-file - | grep asm-printer | grep 6
-
-define <4 x float> @t3(<4 x float>* %P) nounwind {
- %tmp1 = load <4 x float>* %P
- %tmp2 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp1, <4 x i32> < i32 4, i32 4, i32 4, i32 0 >
- ret <4 x float> %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-7.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-7.ll
deleted file mode 100644
index 9ede10f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-7.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+mmx -mtriple=i686-apple-darwin9 -o - | grep punpckldq
-
-define <2 x i32> @mmx_movzl(<2 x i32> %x) nounwind {
-entry:
- %tmp3 = insertelement <2 x i32> %x, i32 32, i32 0 ; <<2 x i32>> [#uses=1]
- %tmp8 = insertelement <2 x i32> %tmp3, i32 0, i32 1 ; <<2 x i32>> [#uses=1]
- ret <2 x i32> %tmp8
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-8.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-8.ll
deleted file mode 100644
index 650951c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert-8.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse41 -o %t
-
-; tests variable insert and extract of a 4 x i32
-
-define <4 x i32> @var_insert(<4 x i32> %x, i32 %val, i32 %idx) nounwind {
-entry:
- %tmp3 = insertelement <4 x i32> %x, i32 %val, i32 %idx ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %tmp3
-}
-
-define i32 @var_extract(<4 x i32> %x, i32 %idx) nounwind {
-entry:
- %tmp3 = extractelement <4 x i32> %x, i32 %idx ; <<i32>> [#uses=1]
- ret i32 %tmp3
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_insert.ll
deleted file mode 100644
index 4e5d445..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2,-sse41 | grep movss | count 1
-; RUN: llc < %s -march=x86 -mattr=+sse2,-sse41 | not grep pinsrw
-
-define void @test(<4 x float>* %F, i32 %I) nounwind {
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=1]
- %f = sitofp i32 %I to float ; <float> [#uses=1]
- %tmp1 = insertelement <4 x float> %tmp, float %f, i32 0 ; <<4 x float>> [#uses=2]
- %tmp18 = fadd <4 x float> %tmp1, %tmp1 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp18, <4 x float>* %F
- ret void
-}
-
-define void @test2(<4 x float>* %F, i32 %I, float %g) nounwind {
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=1]
- %f = sitofp i32 %I to float ; <float> [#uses=1]
- %tmp1 = insertelement <4 x float> %tmp, float %f, i32 2 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp1, <4 x float>* %F
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert_4.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_insert_4.ll
deleted file mode 100644
index 2c31e56..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_insert_4.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah | grep 1084227584 | count 1
-
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin9.2.2"
-
-define <8 x float> @f(<8 x float> %a, i32 %b) nounwind {
-entry:
- %vecins = insertelement <8 x float> %a, float 5.000000e+00, i32 %b ; <<4 x float>> [#uses=1]
- ret <8 x float> %vecins
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_loadsingles.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_loadsingles.ll
deleted file mode 100644
index 8812c4f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_loadsingles.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq
-
-define <4 x float> @a(<4 x float> %a, float* nocapture %p) nounwind readonly {
-entry:
- %tmp1 = load float* %p
- %vecins = insertelement <4 x float> undef, float %tmp1, i32 0
- %add.ptr = getelementptr float* %p, i32 1
- %tmp5 = load float* %add.ptr
- %vecins7 = insertelement <4 x float> %vecins, float %tmp5, i32 1
- ret <4 x float> %vecins7
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_logical.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_logical.ll
deleted file mode 100644
index 1dc0b16..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_logical.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
-; RUN: grep xorps %t | count 2
-; RUN: grep andnps %t
-; RUN: grep movaps %t | count 2
-
-define void @t(<4 x float> %A) {
- %tmp1277 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %A
- store <4 x float> %tmp1277, <4 x float>* null
- ret void
-}
-
-define <4 x float> @t1(<4 x float> %a, <4 x float> %b) {
-entry:
- %tmp9 = bitcast <4 x float> %a to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp10 = bitcast <4 x float> %b to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp11 = xor <4 x i32> %tmp9, %tmp10 ; <<4 x i32>> [#uses=1]
- %tmp13 = bitcast <4 x i32> %tmp11 to <4 x float> ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp13
-}
-
-define <2 x double> @t2(<2 x double> %a, <2 x double> %b) {
-entry:
- %tmp9 = bitcast <2 x double> %a to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp10 = bitcast <2 x double> %b to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp11 = and <2 x i64> %tmp9, %tmp10 ; <<2 x i64>> [#uses=1]
- %tmp13 = bitcast <2 x i64> %tmp11 to <2 x double> ; <<2 x double>> [#uses=1]
- ret <2 x double> %tmp13
-}
-
-define void @t3(<4 x float> %a, <4 x float> %b, <4 x float>* %c, <4 x float>* %d) {
-entry:
- %tmp3 = load <4 x float>* %c ; <<4 x float>> [#uses=1]
- %tmp11 = bitcast <4 x float> %a to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp12 = bitcast <4 x float> %b to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp13 = xor <4 x i32> %tmp11, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
- %tmp14 = and <4 x i32> %tmp12, %tmp13 ; <<4 x i32>> [#uses=1]
- %tmp27 = bitcast <4 x float> %tmp3 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp28 = or <4 x i32> %tmp14, %tmp27 ; <<4 x i32>> [#uses=1]
- %tmp30 = bitcast <4 x i32> %tmp28 to <4 x float> ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp30, <4 x float>* %d
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_return.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_return.ll
deleted file mode 100644
index 66762b4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_return.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
-; RUN: grep xorps %t | count 1
-; RUN: grep movaps %t | count 1
-; RUN: not grep shuf %t
-
-define <2 x double> @test() {
- ret <2 x double> zeroinitializer
-}
-
-define <4 x i32> @test2() nounwind {
- ret <4 x i32> < i32 0, i32 0, i32 1, i32 0 >
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_select.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_select.ll
deleted file mode 100644
index 033e9f7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_select.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse
-
-define void @test(i32 %C, <4 x float>* %A, <4 x float>* %B) {
- %tmp = load <4 x float>* %A ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=2]
- %tmp9 = fmul <4 x float> %tmp3, %tmp3 ; <<4 x float>> [#uses=1]
- %tmp.upgrd.1 = icmp eq i32 %C, 0 ; <i1> [#uses=1]
- %iftmp.38.0 = select i1 %tmp.upgrd.1, <4 x float> %tmp9, <4 x float> %tmp ; <<4 x float>> [#uses=1]
- store <4 x float> %iftmp.38.0, <4 x float>* %A
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-2.ll
deleted file mode 100644
index a8f1187..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-2.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movss | count 1
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movd | count 1
-
-define <4 x float> @test1(float %a) nounwind {
- %tmp = insertelement <4 x float> zeroinitializer, float %a, i32 0 ; <<4 x float>> [#uses=1]
- %tmp5 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
- %tmp6 = insertelement <4 x float> %tmp5, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
- %tmp7 = insertelement <4 x float> %tmp6, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp7
-}
-
-define <2 x i64> @test(i32 %a) nounwind {
- %tmp = insertelement <4 x i32> zeroinitializer, i32 %a, i32 0 ; <<8 x i16>> [#uses=1]
- %tmp6 = insertelement <4 x i32> %tmp, i32 0, i32 1 ; <<8 x i32>> [#uses=1]
- %tmp8 = insertelement <4 x i32> %tmp6, i32 0, i32 2 ; <<8 x i32>> [#uses=1]
- %tmp10 = insertelement <4 x i32> %tmp8, i32 0, i32 3 ; <<8 x i32>> [#uses=1]
- %tmp19 = bitcast <4 x i32> %tmp10 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp19
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-3.ll
deleted file mode 100644
index ada17e0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-3.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep pshufd %t | count 2
-
-define <4 x float> @test(float %a) nounwind {
- %tmp = insertelement <4 x float> zeroinitializer, float %a, i32 1 ; <<4 x float>> [#uses=1]
- %tmp5 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
- %tmp6 = insertelement <4 x float> %tmp5, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp6
-}
-
-define <2 x i64> @test2(i32 %a) nounwind {
- %tmp7 = insertelement <4 x i32> zeroinitializer, i32 %a, i32 2 ; <<4 x i32>> [#uses=1]
- %tmp9 = insertelement <4 x i32> %tmp7, i32 0, i32 3 ; <<4 x i32>> [#uses=1]
- %tmp10 = bitcast <4 x i32> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp10
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-4.ll
deleted file mode 100644
index 332c8b7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-4.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep pinsrw | count 2
-
-define <2 x i64> @test(i16 %a) nounwind {
-entry:
- %tmp10 = insertelement <8 x i16> zeroinitializer, i16 %a, i32 3 ; <<8 x i16>> [#uses=1]
- %tmp12 = insertelement <8 x i16> %tmp10, i16 0, i32 4 ; <<8 x i16>> [#uses=1]
- %tmp14 = insertelement <8 x i16> %tmp12, i16 0, i32 5 ; <<8 x i16>> [#uses=1]
- %tmp16 = insertelement <8 x i16> %tmp14, i16 0, i32 6 ; <<8 x i16>> [#uses=1]
- %tmp18 = insertelement <8 x i16> %tmp16, i16 0, i32 7 ; <<8 x i16>> [#uses=1]
- %tmp19 = bitcast <8 x i16> %tmp18 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp19
-}
-
-define <2 x i64> @test2(i8 %a) nounwind {
-entry:
- %tmp24 = insertelement <16 x i8> zeroinitializer, i8 %a, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp26 = insertelement <16 x i8> %tmp24, i8 0, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp28 = insertelement <16 x i8> %tmp26, i8 0, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp30 = insertelement <16 x i8> %tmp28, i8 0, i32 13 ; <<16 x i8>> [#uses=1]
- %tmp32 = insertelement <16 x i8> %tmp30, i8 0, i32 14 ; <<16 x i8>> [#uses=1]
- %tmp34 = insertelement <16 x i8> %tmp32, i8 0, i32 15 ; <<16 x i8>> [#uses=1]
- %tmp35 = bitcast <16 x i8> %tmp34 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp35
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-5.ll
deleted file mode 100644
index f811a74..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-5.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep movlhps %t | count 1
-; RUN: grep movq %t | count 2
-
-define <4 x float> @test1(float %a, float %b) nounwind {
- %tmp = insertelement <4 x float> zeroinitializer, float %a, i32 0 ; <<4 x float>> [#uses=1]
- %tmp6 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
- %tmp8 = insertelement <4 x float> %tmp6, float %b, i32 2 ; <<4 x float>> [#uses=1]
- %tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp9
-}
-
-define <4 x float> @test2(float %a, float %b) nounwind {
- %tmp = insertelement <4 x float> zeroinitializer, float %a, i32 0 ; <<4 x float>> [#uses=1]
- %tmp7 = insertelement <4 x float> %tmp, float %b, i32 1 ; <<4 x float>> [#uses=1]
- %tmp8 = insertelement <4 x float> %tmp7, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
- %tmp9 = insertelement <4 x float> %tmp8, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp9
-}
-
-define <2 x i64> @test3(i32 %a, i32 %b) nounwind {
- %tmp = insertelement <4 x i32> zeroinitializer, i32 %a, i32 0 ; <<4 x i32>> [#uses=1]
- %tmp6 = insertelement <4 x i32> %tmp, i32 %b, i32 1 ; <<4 x i32>> [#uses=1]
- %tmp8 = insertelement <4 x i32> %tmp6, i32 0, i32 2 ; <<4 x i32>> [#uses=1]
- %tmp10 = insertelement <4 x i32> %tmp8, i32 0, i32 3 ; <<4 x i32>> [#uses=1]
- %tmp11 = bitcast <4 x i32> %tmp10 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-6.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-6.ll
deleted file mode 100644
index 0713d95..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-6.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep movss %t | count 1
-; RUN: grep movq %t | count 1
-; RUN: grep shufps %t | count 1
-
-define <4 x float> @test(float %a, float %b, float %c) nounwind {
- %tmp = insertelement <4 x float> zeroinitializer, float %a, i32 1 ; <<4 x float>> [#uses=1]
- %tmp8 = insertelement <4 x float> %tmp, float %b, i32 2 ; <<4 x float>> [#uses=1]
- %tmp10 = insertelement <4 x float> %tmp8, float %c, i32 3 ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp10
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-7.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-7.ll
deleted file mode 100644
index d993178..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-7.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd | count 1
-
-define <2 x i64> @test(<2 x i64>* %p) nounwind {
- %tmp = bitcast <2 x i64>* %p to double*
- %tmp.upgrd.1 = load double* %tmp
- %tmp.upgrd.2 = insertelement <2 x double> undef, double %tmp.upgrd.1, i32 0
- %tmp5 = insertelement <2 x double> %tmp.upgrd.2, double 0.0, i32 1
- %tmp.upgrd.3 = bitcast <2 x double> %tmp5 to <2 x i64>
- ret <2 x i64> %tmp.upgrd.3
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-8.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-8.ll
deleted file mode 100644
index 9697f11..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-8.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64 | not grep movsd
-; RUN: llc < %s -march=x86-64 | grep {movd.*%rdi,.*%xmm0}
-
-define <2 x i64> @test(i64 %i) nounwind {
-entry:
- %tmp10 = insertelement <2 x i64> undef, i64 %i, i32 0
- %tmp11 = insertelement <2 x i64> %tmp10, i64 0, i32 1
- ret <2 x i64> %tmp11
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-9.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-9.ll
deleted file mode 100644
index 3656e5f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-9.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movd | count 1
-; RUN: llc < %s -march=x86-64 | grep {movlhps.*%xmm0, %xmm0}
-
-define <2 x i64> @test3(i64 %A) nounwind {
-entry:
- %B = insertelement <2 x i64> undef, i64 %A, i32 1
- ret <2 x i64> %B
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-A.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-A.ll
deleted file mode 100644
index f05eecf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-A.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {movl.*\$1, %}
-define <2 x i64> @test1() nounwind {
-entry:
- ret <2 x i64> < i64 1, i64 0 >
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-B.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-B.ll
deleted file mode 100644
index f5b3e8b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-B.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep movaps
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep esp | count 2
-
-; These should both generate something like this:
-;_test3:
-; movl $1234567, %eax
-; andl 4(%esp), %eax
-; movd %eax, %xmm0
-; ret
-
-define <2 x i64> @test3(i64 %arg) nounwind {
-entry:
- %A = and i64 %arg, 1234567
- %B = insertelement <2 x i64> zeroinitializer, i64 %A, i32 0
- ret <2 x i64> %B
-}
-
-define <2 x i64> @test2(i64 %arg) nounwind {
-entry:
- %A = and i64 %arg, 1234567
- %B = insertelement <2 x i64> undef, i64 %A, i32 0
- ret <2 x i64> %B
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-C.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-C.ll
deleted file mode 100644
index 7636ac3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-C.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep mov | count 1
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 | grep movd
-
-define <2 x i64> @t1(i64 %x) nounwind {
- %tmp8 = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
- ret <2 x i64> %tmp8
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-D.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-D.ll
deleted file mode 100644
index 3d6369e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-D.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq
-
-define <4 x i32> @t(i32 %x, i32 %y) nounwind {
- %tmp1 = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
- %tmp2 = insertelement <4 x i32> %tmp1, i32 %y, i32 1
- ret <4 x i32> %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-E.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-E.ll
deleted file mode 100644
index d78be66..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-E.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq
-
-define <4 x float> @t(float %X) nounwind {
- %tmp11 = insertelement <4 x float> undef, float %X, i32 0
- %tmp12 = insertelement <4 x float> %tmp11, float %X, i32 1
- %tmp27 = insertelement <4 x float> %tmp12, float 0.000000e+00, i32 2
- %tmp28 = insertelement <4 x float> %tmp27, float 0.000000e+00, i32 3
- ret <4 x float> %tmp28
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-F.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-F.ll
deleted file mode 100644
index 4f0acb2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-F.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movq
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movsd
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep mov | count 3
-
-define <2 x i64> @t1(<2 x i64>* %ptr) nounwind {
- %tmp45 = bitcast <2 x i64>* %ptr to <2 x i32>*
- %tmp615 = load <2 x i32>* %tmp45
- %tmp7 = bitcast <2 x i32> %tmp615 to i64
- %tmp8 = insertelement <2 x i64> zeroinitializer, i64 %tmp7, i32 0
- ret <2 x i64> %tmp8
-}
-
-define <2 x i64> @t2(i64 %x) nounwind {
- %tmp717 = bitcast i64 %x to double
- %tmp8 = insertelement <2 x double> undef, double %tmp717, i32 0
- %tmp9 = insertelement <2 x double> %tmp8, double 0.000000e+00, i32 1
- %tmp11 = bitcast <2 x double> %tmp9 to <2 x i64>
- ret <2 x i64> %tmp11
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-G.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-G.ll
deleted file mode 100644
index 4a542fe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-G.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movss
-
-define fastcc void @t(<4 x float> %A) nounwind {
- %tmp41896 = extractelement <4 x float> %A, i32 0 ; <float> [#uses=1]
- %tmp14082 = insertelement <4 x float> < float 0.000000e+00, float undef, float undef, float undef >, float %tmp41896, i32 1 ; <<4 x float>> [#uses=1]
- %tmp14083 = insertelement <4 x float> %tmp14082, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp14083, <4 x float>* null, align 16
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-H.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-H.ll
deleted file mode 100644
index 5037e36..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-H.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep movz
-
-define <2 x i64> @doload64(i16 signext %x) nounwind {
-entry:
- %tmp36 = insertelement <8 x i16> undef, i16 %x, i32 0 ; <<8 x i16>> [#uses=1]
- %tmp37 = insertelement <8 x i16> %tmp36, i16 %x, i32 1 ; <<8 x i16>> [#uses=1]
- %tmp38 = insertelement <8 x i16> %tmp37, i16 %x, i32 2 ; <<8 x i16>> [#uses=1]
- %tmp39 = insertelement <8 x i16> %tmp38, i16 %x, i32 3 ; <<8 x i16>> [#uses=1]
- %tmp40 = insertelement <8 x i16> %tmp39, i16 %x, i32 4 ; <<8 x i16>> [#uses=1]
- %tmp41 = insertelement <8 x i16> %tmp40, i16 %x, i32 5 ; <<8 x i16>> [#uses=1]
- %tmp42 = insertelement <8 x i16> %tmp41, i16 %x, i32 6 ; <<8 x i16>> [#uses=1]
- %tmp43 = insertelement <8 x i16> %tmp42, i16 %x, i32 7 ; <<8 x i16>> [#uses=1]
- %tmp46 = bitcast <8 x i16> %tmp43 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp46
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-I.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-I.ll
deleted file mode 100644
index 64f36f9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-I.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movd
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep xorp
-
-define void @t1() nounwind {
- %tmp298.i.i = load <4 x float>* null, align 16
- %tmp304.i.i = bitcast <4 x float> %tmp298.i.i to <4 x i32>
- %tmp305.i.i = and <4 x i32> %tmp304.i.i, < i32 -1, i32 0, i32 0, i32 0 >
- store <4 x i32> %tmp305.i.i, <4 x i32>* null, align 16
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-J.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set-J.ll
deleted file mode 100644
index d90ab85..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set-J.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movss
-; PR2472
-
-define <4 x i32> @a(<4 x i32> %a) nounwind {
-entry:
- %vecext = extractelement <4 x i32> %a, i32 0
- insertelement <4 x i32> zeroinitializer, i32 %vecext, i32 0
- %add = add <4 x i32> %a, %0
- ret <4 x i32> %add
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_set.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_set.ll
deleted file mode 100644
index c316df8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_set.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep punpckl | count 7
-
-define void @test(<8 x i16>* %b, i16 %a0, i16 %a1, i16 %a2, i16 %a3, i16 %a4, i16 %a5, i16 %a6, i16 %a7) nounwind {
- %tmp = insertelement <8 x i16> zeroinitializer, i16 %a0, i32 0 ; <<8 x i16>> [#uses=1]
- %tmp2 = insertelement <8 x i16> %tmp, i16 %a1, i32 1 ; <<8 x i16>> [#uses=1]
- %tmp4 = insertelement <8 x i16> %tmp2, i16 %a2, i32 2 ; <<8 x i16>> [#uses=1]
- %tmp6 = insertelement <8 x i16> %tmp4, i16 %a3, i32 3 ; <<8 x i16>> [#uses=1]
- %tmp8 = insertelement <8 x i16> %tmp6, i16 %a4, i32 4 ; <<8 x i16>> [#uses=1]
- %tmp10 = insertelement <8 x i16> %tmp8, i16 %a5, i32 5 ; <<8 x i16>> [#uses=1]
- %tmp12 = insertelement <8 x i16> %tmp10, i16 %a6, i32 6 ; <<8 x i16>> [#uses=1]
- %tmp14 = insertelement <8 x i16> %tmp12, i16 %a7, i32 7 ; <<8 x i16>> [#uses=1]
- store <8 x i16> %tmp14, <8 x i16>* %b
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shift.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shift.ll
deleted file mode 100644
index ddf0469..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shift.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep psllw
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep psrlq
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep psraw
-
-define <2 x i64> @t1(<2 x i64> %b1, <2 x i64> %c) nounwind {
-entry:
- %tmp6 = bitcast <2 x i64> %c to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp8 = bitcast <2 x i64> %b1 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp9 = tail call <8 x i16> @llvm.x86.sse2.psll.w( <8 x i16> %tmp8, <8 x i16> %tmp6 ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp10 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp10
-}
-
-define <2 x i64> @t3(<2 x i64> %b1, i32 %c) nounwind {
-entry:
- %tmp2 = bitcast <2 x i64> %b1 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp4 = insertelement <4 x i32> undef, i32 %c, i32 0 ; <<4 x i32>> [#uses=1]
- %tmp8 = bitcast <4 x i32> %tmp4 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp9 = tail call <8 x i16> @llvm.x86.sse2.psra.w( <8 x i16> %tmp2, <8 x i16> %tmp8 ) ; <<8 x i16>> [#uses=1]
- %tmp11 = bitcast <8 x i16> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp11
-}
-
-declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>) nounwind readnone
-
-define <2 x i64> @t2(<2 x i64> %b1, <2 x i64> %c) nounwind {
-entry:
- %tmp9 = tail call <2 x i64> @llvm.x86.sse2.psrl.q( <2 x i64> %b1, <2 x i64> %c ) nounwind readnone ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp9
-}
-
-declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) nounwind readnone
-
-declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shift2.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shift2.ll
deleted file mode 100644
index c5f9dc4..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shift2.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep CPI
-
-define <2 x i64> @t1(<2 x i64> %b1, <2 x i64> %c) nounwind {
- %tmp1 = bitcast <2 x i64> %b1 to <8 x i16>
- %tmp2 = tail call <8 x i16> @llvm.x86.sse2.psrl.w( <8 x i16> %tmp1, <8 x i16> bitcast (<4 x i32> < i32 14, i32 undef, i32 undef, i32 undef > to <8 x i16>) ) nounwind readnone
- %tmp3 = bitcast <8 x i16> %tmp2 to <2 x i64>
- ret <2 x i64> %tmp3
-}
-
-define <4 x i32> @t2(<2 x i64> %b1, <2 x i64> %c) nounwind {
- %tmp1 = bitcast <2 x i64> %b1 to <4 x i32>
- %tmp2 = tail call <4 x i32> @llvm.x86.sse2.psll.d( <4 x i32> %tmp1, <4 x i32> < i32 14, i32 undef, i32 undef, i32 undef > ) nounwind readnone
- ret <4 x i32> %tmp2
-}
-
-declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
-declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shift3.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shift3.ll
deleted file mode 100644
index 1ebf455..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shift3.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep psllq
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep psraw
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movd | count 2
-
-define <2 x i64> @t1(<2 x i64> %x1, i32 %bits) nounwind {
-entry:
- %tmp3 = tail call <2 x i64> @llvm.x86.sse2.pslli.q( <2 x i64> %x1, i32 %bits ) nounwind readnone ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp3
-}
-
-define <2 x i64> @t2(<2 x i64> %x1) nounwind {
-entry:
- %tmp3 = tail call <2 x i64> @llvm.x86.sse2.pslli.q( <2 x i64> %x1, i32 10 ) nounwind readnone ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp3
-}
-
-define <2 x i64> @t3(<2 x i64> %x1, i32 %bits) nounwind {
-entry:
- %tmp2 = bitcast <2 x i64> %x1 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp4 = tail call <8 x i16> @llvm.x86.sse2.psrai.w( <8 x i16> %tmp2, i32 %bits ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp5 = bitcast <8 x i16> %tmp4 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp5
-}
-
-declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32) nounwind readnone
-declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-10.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-10.ll
deleted file mode 100644
index a63e386..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-10.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep unpcklps %t | count 1
-; RUN: grep pshufd %t | count 1
-; RUN: not grep {sub.*esp} %t
-
-define void @test(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) {
- %tmp = load <4 x float>* %B ; <<4 x float>> [#uses=2]
- %tmp3 = load <4 x float>* %A ; <<4 x float>> [#uses=2]
- %tmp.upgrd.1 = extractelement <4 x float> %tmp3, i32 0 ; <float> [#uses=1]
- %tmp7 = extractelement <4 x float> %tmp, i32 0 ; <float> [#uses=1]
- %tmp8 = extractelement <4 x float> %tmp3, i32 1 ; <float> [#uses=1]
- %tmp9 = extractelement <4 x float> %tmp, i32 1 ; <float> [#uses=1]
- %tmp10 = insertelement <4 x float> undef, float %tmp.upgrd.1, i32 0 ; <<4 x float>> [#uses=1]
- %tmp11 = insertelement <4 x float> %tmp10, float %tmp7, i32 1 ; <<4 x float>> [#uses=1]
- %tmp12 = insertelement <4 x float> %tmp11, float %tmp8, i32 2 ; <<4 x float>> [#uses=1]
- %tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp13, <4 x float>* %res
- ret void
-}
-
-define void @test2(<4 x float> %X, <4 x float>* %res) {
- %tmp5 = shufflevector <4 x float> %X, <4 x float> undef, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp5, <4 x float>* %res
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-11.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-11.ll
deleted file mode 100644
index 640745a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-11.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i386-apple-darwin | not grep mov
-
-define <4 x i32> @test() nounwind {
- %tmp131 = call <2 x i64> @llvm.x86.sse2.psrl.dq( <2 x i64> < i64 -1, i64 -1 >, i32 96 ) ; <<2 x i64>> [#uses=1]
- %tmp137 = bitcast <2 x i64> %tmp131 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp138 = and <4 x i32> %tmp137, bitcast (<2 x i64> < i64 -1, i64 -1 > to <4 x i32>) ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %tmp138
-}
-
-declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-14.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-14.ll
deleted file mode 100644
index f0cfc44..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-14.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep movd | count 1
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 | grep movd | count 2
-; RUN: llc < %s -march=x86-64 -mattr=+sse2 | grep movq | count 3
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep xor
-
-define <4 x i32> @t1(i32 %a) nounwind {
-entry:
- %tmp = insertelement <4 x i32> undef, i32 %a, i32 0
- %tmp6 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %tmp, <4 x i32> < i32 4, i32 1, i32 2, i32 3 > ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %tmp6
-}
-
-define <2 x i64> @t2(i64 %a) nounwind {
-entry:
- %tmp = insertelement <2 x i64> undef, i64 %a, i32 0
- %tmp6 = shufflevector <2 x i64> zeroinitializer, <2 x i64> %tmp, <2 x i32> < i32 2, i32 1 > ; <<4 x i32>> [#uses=1]
- ret <2 x i64> %tmp6
-}
-
-define <2 x i64> @t3(<2 x i64>* %a) nounwind {
-entry:
- %tmp4 = load <2 x i64>* %a, align 16 ; <<2 x i64>> [#uses=1]
- %tmp6 = bitcast <2 x i64> %tmp4 to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp7 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %tmp6, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x i32>> [#uses=1]
- %tmp8 = bitcast <4 x i32> %tmp7 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp8
-}
-
-define <2 x i64> @t4(<2 x i64> %a) nounwind {
-entry:
- %tmp5 = bitcast <2 x i64> %a to <4 x i32> ; <<4 x i32>> [#uses=1]
- %tmp6 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %tmp5, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x i32>> [#uses=1]
- %tmp7 = bitcast <4 x i32> %tmp6 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp7
-}
-
-define <2 x i64> @t5(<2 x i64> %a) nounwind {
-entry:
- %tmp6 = shufflevector <2 x i64> zeroinitializer, <2 x i64> %a, <2 x i32> < i32 2, i32 1 > ; <<4 x i32>> [#uses=1]
- ret <2 x i64> %tmp6
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-15.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-15.ll
deleted file mode 100644
index 5a9b8fd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-15.ll
+++ /dev/null
@@ -1,81 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define <2 x i64> @t00(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 0, i32 0 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t01(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 0, i32 1 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t02(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 0, i32 2 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t03(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 0, i32 3 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t10(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 1, i32 0 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t11(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 1, i32 1 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t12(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 1, i32 2 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t13(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 1, i32 3 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t20(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 2, i32 0 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t21(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 2, i32 1 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t22(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 2, i32 2 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t23(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 2, i32 3 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t30(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 3, i32 0 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t31(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 3, i32 1 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t32(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 3, i32 2 >
- ret <2 x i64> %tmp
-}
-
-define <2 x i64> @t33(<2 x i64> %a, <2 x i64> %b) nounwind {
- %tmp = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> < i32 3, i32 3 >
- ret <2 x i64> %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-16.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-16.ll
deleted file mode 100644
index 470f676..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-16.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse,-sse2 -mtriple=i386-apple-darwin -o %t
-; RUN: grep shufps %t | count 4
-; RUN: grep movaps %t | count 2
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i386-apple-darwin -o %t
-; RUN: grep pshufd %t | count 4
-; RUN: not grep shufps %t
-; RUN: not grep mov %t
-
-define <4 x float> @t1(<4 x float> %a, <4 x float> %b) nounwind {
- %tmp1 = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> zeroinitializer
- ret <4 x float> %tmp1
-}
-
-define <4 x float> @t2(<4 x float> %A, <4 x float> %B) nounwind {
- %tmp = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> < i32 3, i32 3, i32 3, i32 3 >
- ret <4 x float> %tmp
-}
-
-define <4 x float> @t3(<4 x float> %A, <4 x float> %B) nounwind {
- %tmp = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> < i32 4, i32 4, i32 4, i32 4 >
- ret <4 x float> %tmp
-}
-
-define <4 x float> @t4(<4 x float> %A, <4 x float> %B) nounwind {
- %tmp = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> < i32 1, i32 3, i32 2, i32 0 >
- ret <4 x float> %tmp
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-17.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-17.ll
deleted file mode 100644
index 9c33abb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-17.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {movd.*%rdi, %xmm0}
-; RUN: llc < %s -march=x86-64 | not grep xor
-; PR2108
-
-define <2 x i64> @doload64(i64 %x) nounwind {
-entry:
- %tmp717 = bitcast i64 %x to double ; <double> [#uses=1]
- %tmp8 = insertelement <2 x double> undef, double %tmp717, i32 0 ; <<2 x double>> [#uses=1]
- %tmp9 = insertelement <2 x double> %tmp8, double 0.000000e+00, i32 1 ; <<2 x double>> [#uses=1]
- %tmp11 = bitcast <2 x double> %tmp9 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp11
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-18.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-18.ll
deleted file mode 100644
index 1104a4a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-18.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin8.8.0 | grep mov | count 7
-
- %struct.vector4_t = type { <4 x float> }
-
-define void @swizzle(i8* %a, %struct.vector4_t* %b, %struct.vector4_t* %c) nounwind {
-entry:
- %tmp9 = getelementptr %struct.vector4_t* %b, i32 0, i32 0 ; <<4 x float>*> [#uses=2]
- %tmp10 = load <4 x float>* %tmp9, align 16 ; <<4 x float>> [#uses=1]
- %tmp14 = bitcast i8* %a to double* ; <double*> [#uses=1]
- %tmp15 = load double* %tmp14 ; <double> [#uses=1]
- %tmp16 = insertelement <2 x double> undef, double %tmp15, i32 0 ; <<2 x double>> [#uses=1]
- %tmp18 = bitcast <2 x double> %tmp16 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp19 = shufflevector <4 x float> %tmp10, <4 x float> %tmp18, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp19, <4 x float>* %tmp9, align 16
- %tmp28 = getelementptr %struct.vector4_t* %c, i32 0, i32 0 ; <<4 x float>*> [#uses=2]
- %tmp29 = load <4 x float>* %tmp28, align 16 ; <<4 x float>> [#uses=1]
- %tmp26 = getelementptr i8* %a, i32 8 ; <i8*> [#uses=1]
- %tmp33 = bitcast i8* %tmp26 to double* ; <double*> [#uses=1]
- %tmp34 = load double* %tmp33 ; <double> [#uses=1]
- %tmp35 = insertelement <2 x double> undef, double %tmp34, i32 0 ; <<2 x double>> [#uses=1]
- %tmp37 = bitcast <2 x double> %tmp35 to <4 x float> ; <<4 x float>> [#uses=1]
- %tmp38 = shufflevector <4 x float> %tmp29, <4 x float> %tmp37, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp38, <4 x float>* %tmp28, align 16
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-19.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-19.ll
deleted file mode 100644
index 9fc09df..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-19.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin9 -stats -info-output-file - | grep asm-printer | grep 4
-; PR2485
-
-define <4 x i32> @t(<4 x i32> %a, <4 x i32> %b) nounwind {
-entry:
- %shuffle = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> < i32 4, i32 0, i32 0, i32 0 > ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %shuffle
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-20.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-20.ll
deleted file mode 100644
index 6d1bac0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-20.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -mtriple=i686-apple-darwin9 -stats -info-output-file - | grep asm-printer | grep 3
-
-define <4 x float> @func(<4 x float> %fp0, <4 x float> %fp1) nounwind {
-entry:
- shufflevector <4 x float> %fp0, <4 x float> %fp1, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:0 [#uses=1]
- ret <4 x float> %0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-22.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-22.ll
deleted file mode 100644
index 6807e4d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-22.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=pentium-m | FileCheck %s
-
-define <4 x float> @t1(<4 x float> %a) nounwind {
-; CHECK: movlhps
- %tmp1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> < i32 0, i32 1, i32 0, i32 1 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp1
-}
-
-define <4 x i32> @t2(<4 x i32>* %a) nounwind {
-; CHECK: pshufd
-; CHECK: ret
- %tmp1 = load <4 x i32>* %a
- %tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> < i32 0, i32 1, i32 0, i32 1 > ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %tmp2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-23.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-23.ll
deleted file mode 100644
index 05a3a1e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-23.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | not grep punpck
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep pshufd
-
-define i32 @t() nounwind {
-entry:
- %a = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
- %b = alloca <4 x i32> ; <<4 x i32>*> [#uses=5]
- volatile store <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
- %tmp = load <4 x i32>* %a ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp, <4 x i32>* %b
- %tmp1 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
- %punpckldq = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x i32>> [#uses=1]
- store <4 x i32> %punpckldq, <4 x i32>* %b
- %tmp3 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
- %result = extractelement <4 x i32> %tmp3, i32 0 ; <i32> [#uses=1]
- ret i32 %result
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-24.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-24.ll
deleted file mode 100644
index 7562f1d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-24.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep punpck
-
-define i32 @t() nounwind optsize {
-entry:
- %a = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
- %b = alloca <4 x i32> ; <<4 x i32>*> [#uses=5]
- volatile store <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
- %tmp = load <4 x i32>* %a ; <<4 x i32>> [#uses=1]
- store <4 x i32> %tmp, <4 x i32>* %b
- %tmp1 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
- %tmp2 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
- %punpckldq = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x i32>> [#uses=1]
- store <4 x i32> %punpckldq, <4 x i32>* %b
- %tmp3 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
- %result = extractelement <4 x i32> %tmp3, i32 0 ; <i32> [#uses=1]
- ret i32 %result
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-25.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-25.ll
deleted file mode 100644
index d9b2388..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-25.ll
+++ /dev/null
@@ -1,34 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -o %t
-; RUN: grep unpcklps %t | count 3
-; RUN: grep unpckhps %t | count 1
-
-; Transpose example using the more generic vector shuffle. We return
-; float8 instead of float16 since x86 can return that in register.
-; ModuleID = 'transpose2_opt.bc'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-apple-cl.1.0"
- at r0 = common global <4 x float> zeroinitializer, align 16 ; <<4 x float>*> [#uses=1]
- at r1 = common global <4 x float> zeroinitializer, align 16 ; <<4 x float>*> [#uses=1]
- at r2 = common global <4 x float> zeroinitializer, align 16 ; <<4 x float>*> [#uses=1]
- at r3 = common global <4 x float> zeroinitializer, align 16 ; <<4 x float>*> [#uses=1]
-
-define <8 x float> @__transpose2(<4 x float> %p0, <4 x float> %p1, <4 x float> %p2, <4 x float> %p3) nounwind {
-entry:
- %unpcklps = shufflevector <4 x float> %p0, <4 x float> %p2, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=2]
- %unpckhps = shufflevector <4 x float> %p0, <4 x float> %p2, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=2]
- %unpcklps8 = shufflevector <4 x float> %p1, <4 x float> %p3, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=2]
- %unpckhps11 = shufflevector <4 x float> %p1, <4 x float> %p3, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=2]
- %unpcklps14 = shufflevector <4 x float> %unpcklps, <4 x float> %unpcklps8, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1]
- %unpcklps14a = shufflevector <4 x float> %unpcklps14, <4 x float> undef, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %unpckhps17 = shufflevector <4 x float> %unpcklps, <4 x float> %unpcklps8, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=1]
- %unpckhps17a = shufflevector <4 x float> %unpckhps17, <4 x float> undef, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %r1 = shufflevector <16 x float> %unpcklps14a, <16 x float> %unpckhps17a, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %unpcklps20 = shufflevector <4 x float> %unpckhps, <4 x float> %unpckhps11, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1]
- %unpcklps20a = shufflevector <4 x float> %unpcklps20, <4 x float> undef, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %r2 = shufflevector <16 x float> %r1, <16 x float> %unpcklps20a, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 12, i32 13, i32 14, i32 15>
- %unpckhps23 = shufflevector <4 x float> %unpckhps, <4 x float> %unpckhps11, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=1]
- %unpckhps23a = shufflevector <4 x float> %unpckhps23, <4 x float> undef, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %r3 = shufflevector <16 x float> %r2, <16 x float> %unpckhps23a, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
- %r4 = shufflevector <16 x float> %r3, <16 x float> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- ret <8 x float> %r4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-26.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-26.ll
deleted file mode 100644
index 086af6b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-26.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -o %t
-; RUN: grep unpcklps %t | count 1
-; RUN: grep unpckhps %t | count 3
-
-; Transpose example using the more generic vector shuffle. Return float8
-; instead of float16
-; ModuleID = 'transpose2_opt.bc'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i386-apple-cl.1.0"
- at r0 = common global <4 x float> zeroinitializer, align 16 ; <<4 x float>*> [#uses=1]
- at r1 = common global <4 x float> zeroinitializer, align 16 ; <<4 x float>*> [#uses=1]
- at r2 = common global <4 x float> zeroinitializer, align 16 ; <<4 x float>*> [#uses=1]
- at r3 = common global <4 x float> zeroinitializer, align 16 ; <<4 x float>*> [#uses=1]
-
-define <8 x float> @__transpose2(<4 x float> %p0, <4 x float> %p1, <4 x float> %p2, <4 x float> %p3) nounwind {
-entry:
- %unpcklps = shufflevector <4 x float> %p0, <4 x float> %p2, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=2]
- %unpckhps = shufflevector <4 x float> %p0, <4 x float> %p2, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=2]
- %unpcklps8 = shufflevector <4 x float> %p1, <4 x float> %p3, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=2]
- %unpckhps11 = shufflevector <4 x float> %p1, <4 x float> %p3, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=2]
- %unpcklps14 = shufflevector <4 x float> %unpcklps, <4 x float> %unpcklps8, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1]
- %unpckhps17 = shufflevector <4 x float> %unpcklps, <4 x float> %unpcklps8, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=1]
- %r1 = shufflevector <4 x float> %unpcklps14, <4 x float> %unpckhps17, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
- %unpcklps20 = shufflevector <4 x float> %unpckhps, <4 x float> %unpckhps11, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1]
- %unpckhps23 = shufflevector <4 x float> %unpckhps, <4 x float> %unpckhps11, <4 x i32> < i32 2, i32 6, i32 3, i32 7 > ; <<4 x float>> [#uses=1]
- %r2 = shufflevector <4 x float> %unpcklps20, <4 x float> %unpckhps23, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
-; %r3 = shufflevector <8 x float> %r1, <8 x float> %r2, <16 x i32> < i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15 >;
- ret <8 x float> %r2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-27.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-27.ll
deleted file mode 100644
index d700ccb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-27.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -o %t
-; RUN: grep addps %t | count 2
-; RUN: grep mulps %t | count 2
-; RUN: grep subps %t | count 2
-
-; ModuleID = 'vec_shuffle-27.bc'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-apple-cl.1.0"
-
-define <8 x float> @my2filter4_1d(<4 x float> %a, <8 x float> %T0, <8 x float> %T1) nounwind readnone {
-entry:
- %tmp7 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3 > ; <<8 x float>> [#uses=1]
- %sub = fsub <8 x float> %T1, %T0 ; <<8 x float>> [#uses=1]
- %mul = fmul <8 x float> %sub, %tmp7 ; <<8 x float>> [#uses=1]
- %add = fadd <8 x float> %mul, %T0 ; <<8 x float>> [#uses=1]
- ret <8 x float> %add
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-28.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-28.ll
deleted file mode 100644
index 343685b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-28.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=core2 -o %t
-; RUN: grep pshufb %t | count 1
-
-; FIXME: this test has a superfluous punpcklqdq pre-pshufb currently.
-; Don't XFAIL it because it's still better than the previous code.
-
-; Pack various elements via shuffles.
-define <8 x i16> @shuf1(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp7 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 1, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-3.ll
deleted file mode 100644
index f4930b0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-3.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep movlhps %t | count 1
-; RUN: grep movhlps %t | count 1
-
-define <4 x float> @test1(<4 x float>* %x, <4 x float>* %y) {
- %tmp = load <4 x float>* %y ; <<4 x float>> [#uses=2]
- %tmp5 = load <4 x float>* %x ; <<4 x float>> [#uses=2]
- %tmp9 = fadd <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1]
- %tmp21 = fsub <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1]
- %tmp27 = shufflevector <4 x float> %tmp9, <4 x float> %tmp21, <4 x i32> < i32 0, i32 1, i32 4, i32 5 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp27
-}
-
-define <4 x float> @movhl(<4 x float>* %x, <4 x float>* %y) {
-entry:
- %tmp = load <4 x float>* %y ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>* %x ; <<4 x float>> [#uses=1]
- %tmp4 = shufflevector <4 x float> %tmp3, <4 x float> %tmp, <4 x i32> < i32 2, i32 3, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp4
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-30.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-30.ll
deleted file mode 100644
index 3f69150..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-30.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -disable-mmx -o %t
-; RUN: grep pshufhw %t | grep -- -95 | count 1
-; RUN: grep shufps %t | count 1
-; RUN: not grep pslldq %t
-
-; Test case when creating pshufhw, we incorrectly set the higher order bit
-; for an undef,
-define void @test(<8 x i16>* %dest, <8 x i16> %in) nounwind {
-entry:
- %0 = load <8 x i16>* %dest
- %1 = shufflevector <8 x i16> %0, <8 x i16> %in, <8 x i32> < i32 0, i32 1, i32 2, i32 3, i32 13, i32 undef, i32 14, i32 14>
- store <8 x i16> %1, <8 x i16>* %dest
- ret void
-}
-
-; A test case where we shouldn't generate a punpckldq but a pshufd and a pslldq
-define void @test2(<4 x i32>* %dest, <4 x i32> %in) nounwind {
-entry:
- %0 = shufflevector <4 x i32> %in, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> < i32 undef, i32 5, i32 undef, i32 2>
- store <4 x i32> %0, <4 x i32>* %dest
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-31.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-31.ll
deleted file mode 100644
index bb06e15..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-31.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=core2 -o %t
-; RUN: grep pshufb %t | count 1
-
-define <8 x i16> @shuf3(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 0, i32 1, i32 undef, i32 undef, i32 3, i32 11, i32 undef , i32 undef >
- ret <8 x i16> %tmp9
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-34.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-34.ll
deleted file mode 100644
index d057b3f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-34.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=core2 | grep pshufb | count 2
-
-define <8 x i16> @shuf2(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp8 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 undef, i32 undef, i32 7, i32 2, i32 8, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp8
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-35.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-35.ll
deleted file mode 100644
index 7f0fcb5..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-35.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah -stack-alignment=16 -o %t
-; RUN: grep pextrw %t | count 13
-; RUN: grep pinsrw %t | count 14
-; RUN: grep rolw %t | count 13
-; RUN: not grep esp %t
-; RUN: not grep ebp %t
-; RUN: llc < %s -march=x86 -mcpu=core2 -stack-alignment=16 -o %t
-; RUN: grep pshufb %t | count 3
-
-define <16 x i8> @shuf1(<16 x i8> %T0) nounwind readnone {
-entry:
- %tmp8 = shufflevector <16 x i8> %T0, <16 x i8> undef, <16 x i32> < i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 12, i32 13, i32 15 , i32 14 >
- ret <16 x i8> %tmp8
-}
-
-define <16 x i8> @shuf2(<16 x i8> %T0, <16 x i8> %T1) nounwind readnone {
-entry:
- %tmp8 = shufflevector <16 x i8> %T0, <16 x i8> %T1, <16 x i32> < i32 undef, i32 undef, i32 3, i32 2, i32 17, i32 16, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 12, i32 13, i32 15 , i32 14 >
- ret <16 x i8> %tmp8
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-36.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-36.ll
deleted file mode 100644
index 1ea37c8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-36.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=sse41 | FileCheck %s
-
-define <8 x i16> @shuf6(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-; CHECK: pshufb
-; CHECK-NOT: pshufb
-; CHECK: ret
-entry:
- %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 3, i32 2, i32 0, i32 2, i32 1, i32 5, i32 6 , i32 undef >
- ret <8 x i16> %tmp9
-}
-
-define <8 x i16> @shuf7(<8 x i16> %t0) {
-; CHECK: pshufd
- %tmp10 = shufflevector <8 x i16> %t0, <8 x i16> undef, <8 x i32> < i32 undef, i32 2, i32 2, i32 2, i32 2, i32 2, i32 undef, i32 undef >
- ret <8 x i16> %tmp10
-}
\ No newline at end of file
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-4.ll
deleted file mode 100644
index 829fedf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-4.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 > %t
-; RUN: grep shuf %t | count 2
-; RUN: not grep unpck %t
-
-define void @test(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x float>* %C) {
- %tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=1]
- %tmp5 = load <4 x float>* %C ; <<4 x float>> [#uses=1]
- %tmp11 = shufflevector <4 x float> %tmp3, <4 x float> %tmp5, <4 x i32> < i32 1, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp11, <4 x float>* %res
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-5.ll
deleted file mode 100644
index c24167a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-5.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep movhlps %t | count 1
-; RUN: grep shufps %t | count 1
-
-define void @test() nounwind {
- %tmp1 = load <4 x float>* null ; <<4 x float>> [#uses=2]
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
- %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
- %tmp4 = fadd <4 x float> %tmp2, %tmp3 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp4, <4 x float>* null
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-6.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-6.ll
deleted file mode 100644
index f034b0a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-6.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep movapd %t | count 1
-; RUN: grep movaps %t | count 1
-; RUN: grep movups %t | count 2
-
-target triple = "i686-apple-darwin"
- at x = global [4 x i32] [ i32 1, i32 2, i32 3, i32 4 ] ; <[4 x i32]*> [#uses=4]
-
-define <2 x i64> @test1() {
- %tmp = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 0) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp5 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp7 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 3) ; <i32> [#uses=1]
- %tmp.upgrd.1 = insertelement <4 x i32> undef, i32 %tmp, i32 0 ; <<4 x i32>> [#uses=1]
- %tmp13 = insertelement <4 x i32> %tmp.upgrd.1, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
- %tmp14 = insertelement <4 x i32> %tmp13, i32 %tmp5, i32 2 ; <<4 x i32>> [#uses=1]
- %tmp15 = insertelement <4 x i32> %tmp14, i32 %tmp7, i32 3 ; <<4 x i32>> [#uses=1]
- %tmp16 = bitcast <4 x i32> %tmp15 to <2 x i64> ; <<2 x i64>> [#uses=1]
- ret <2 x i64> %tmp16
-}
-
-define <4 x float> @test2(i32 %dummy, float %a, float %b, float %c, float %d) {
- %tmp = insertelement <4 x float> undef, float %a, i32 0 ; <<4 x float>> [#uses=1]
- %tmp11 = insertelement <4 x float> %tmp, float %b, i32 1 ; <<4 x float>> [#uses=1]
- %tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2 ; <<4 x float>> [#uses=1]
- %tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3 ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp13
-}
-
-define <4 x float> @test3(float %a, float %b, float %c, float %d) {
- %tmp = insertelement <4 x float> undef, float %a, i32 0 ; <<4 x float>> [#uses=1]
- %tmp11 = insertelement <4 x float> %tmp, float %b, i32 1 ; <<4 x float>> [#uses=1]
- %tmp12 = insertelement <4 x float> %tmp11, float %c, i32 2 ; <<4 x float>> [#uses=1]
- %tmp13 = insertelement <4 x float> %tmp12, float %d, i32 3 ; <<4 x float>> [#uses=1]
- ret <4 x float> %tmp13
-}
-
-define <2 x double> @test4(double %a, double %b) {
- %tmp = insertelement <2 x double> undef, double %a, i32 0 ; <<2 x double>> [#uses=1]
- %tmp7 = insertelement <2 x double> %tmp, double %b, i32 1 ; <<2 x double>> [#uses=1]
- ret <2 x double> %tmp7
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-7.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-7.ll
deleted file mode 100644
index 4cdca09..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-7.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -o %t
-; RUN: grep xorps %t | count 1
-; RUN: not grep shufps %t
-
-define void @test() {
- bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:1 [#uses=1]
- shufflevector <4 x float> %1, <4 x float> zeroinitializer, <4 x i32> zeroinitializer ; <<4 x float>>:2 [#uses=1]
- store <4 x float> %2, <4 x float>* null
- unreachable
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-8.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-8.ll
deleted file mode 100644
index 964ce7b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-8.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | \
-; RUN: not grep shufps
-
-define void @test(<4 x float>* %res, <4 x float>* %A) {
- %tmp1 = load <4 x float>* %A ; <<4 x float>> [#uses=1]
- %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp2, <4 x float>* %res
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-9.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-9.ll
deleted file mode 100644
index fc16a26..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle-9.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
-
-define <4 x i32> @test(i8** %ptr) {
-; CHECK: xorps
-; CHECK: punpcklbw
-; CHECK: punpcklwd
-
- %tmp = load i8** %ptr ; <i8*> [#uses=1]
- %tmp.upgrd.1 = bitcast i8* %tmp to float* ; <float*> [#uses=1]
- %tmp.upgrd.2 = load float* %tmp.upgrd.1 ; <float> [#uses=1]
- %tmp.upgrd.3 = insertelement <4 x float> undef, float %tmp.upgrd.2, i32 0 ; <<4 x float>> [#uses=1]
- %tmp9 = insertelement <4 x float> %tmp.upgrd.3, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
- %tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
- %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
- %tmp21 = bitcast <4 x float> %tmp11 to <16 x i8> ; <<16 x i8>> [#uses=1]
- %tmp22 = shufflevector <16 x i8> %tmp21, <16 x i8> zeroinitializer, <16 x i32> < i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23 > ; <<16 x i8>> [#uses=1]
- %tmp31 = bitcast <16 x i8> %tmp22 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp.upgrd.4 = shufflevector <8 x i16> zeroinitializer, <8 x i16> %tmp31, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 > ; <<8 x i16>> [#uses=1]
- %tmp36 = bitcast <8 x i16> %tmp.upgrd.4 to <4 x i32> ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %tmp36
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle.ll
deleted file mode 100644
index c05b79a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_shuffle.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=core2 -o %t
-; RUN: grep shufp %t | count 1
-; RUN: grep movupd %t | count 1
-; RUN: grep pshufhw %t | count 1
-
-define void @test_v4sf(<4 x float>* %P, float %X, float %Y) nounwind {
- %tmp = insertelement <4 x float> zeroinitializer, float %X, i32 0 ; <<4 x float>> [#uses=1]
- %tmp2 = insertelement <4 x float> %tmp, float %X, i32 1 ; <<4 x float>> [#uses=1]
- %tmp4 = insertelement <4 x float> %tmp2, float %Y, i32 2 ; <<4 x float>> [#uses=1]
- %tmp6 = insertelement <4 x float> %tmp4, float %Y, i32 3 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp6, <4 x float>* %P
- ret void
-}
-
-define void @test_v2sd(<2 x double>* %P, double %X, double %Y) nounwind {
- %tmp = insertelement <2 x double> zeroinitializer, double %X, i32 0 ; <<2 x double>> [#uses=1]
- %tmp2 = insertelement <2 x double> %tmp, double %Y, i32 1 ; <<2 x double>> [#uses=1]
- store <2 x double> %tmp2, <2 x double>* %P
- ret void
-}
-
-define void @test_v8i16(<2 x i64>* %res, <2 x i64>* %A) nounwind {
- %tmp = load <2 x i64>* %A ; <<2 x i64>> [#uses=1]
- %tmp.upgrd.1 = bitcast <2 x i64> %tmp to <8 x i16> ; <<8 x i16>> [#uses=8]
- %tmp.upgrd.2 = extractelement <8 x i16> %tmp.upgrd.1, i32 0 ; <i16> [#uses=1]
- %tmp1 = extractelement <8 x i16> %tmp.upgrd.1, i32 1 ; <i16> [#uses=1]
- %tmp2 = extractelement <8 x i16> %tmp.upgrd.1, i32 2 ; <i16> [#uses=1]
- %tmp3 = extractelement <8 x i16> %tmp.upgrd.1, i32 3 ; <i16> [#uses=1]
- %tmp4 = extractelement <8 x i16> %tmp.upgrd.1, i32 6 ; <i16> [#uses=1]
- %tmp5 = extractelement <8 x i16> %tmp.upgrd.1, i32 5 ; <i16> [#uses=1]
- %tmp6 = extractelement <8 x i16> %tmp.upgrd.1, i32 4 ; <i16> [#uses=1]
- %tmp7 = extractelement <8 x i16> %tmp.upgrd.1, i32 7 ; <i16> [#uses=1]
- %tmp8 = insertelement <8 x i16> undef, i16 %tmp.upgrd.2, i32 0 ; <<8 x i16>> [#uses=1]
- %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 1 ; <<8 x i16>> [#uses=1]
- %tmp10 = insertelement <8 x i16> %tmp9, i16 %tmp2, i32 2 ; <<8 x i16>> [#uses=1]
- %tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 3 ; <<8 x i16>> [#uses=1]
- %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 4 ; <<8 x i16>> [#uses=1]
- %tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 5 ; <<8 x i16>> [#uses=1]
- %tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp6, i32 6 ; <<8 x i16>> [#uses=1]
- %tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 7 ; <<8 x i16>> [#uses=1]
- %tmp15.upgrd.3 = bitcast <8 x i16> %tmp15 to <2 x i64> ; <<2 x i64>> [#uses=1]
- store <2 x i64> %tmp15.upgrd.3, <2 x i64>* %res
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_splat-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_splat-2.ll
deleted file mode 100644
index cde5ae9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_splat-2.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep pshufd | count 1
-
-define void @test(<2 x i64>* %P, i8 %x) nounwind {
- %tmp = insertelement <16 x i8> zeroinitializer, i8 %x, i32 0 ; <<16 x i8>> [#uses=1]
- %tmp36 = insertelement <16 x i8> %tmp, i8 %x, i32 1 ; <<16 x i8>> [#uses=1]
- %tmp38 = insertelement <16 x i8> %tmp36, i8 %x, i32 2 ; <<16 x i8>> [#uses=1]
- %tmp40 = insertelement <16 x i8> %tmp38, i8 %x, i32 3 ; <<16 x i8>> [#uses=1]
- %tmp42 = insertelement <16 x i8> %tmp40, i8 %x, i32 4 ; <<16 x i8>> [#uses=1]
- %tmp44 = insertelement <16 x i8> %tmp42, i8 %x, i32 5 ; <<16 x i8>> [#uses=1]
- %tmp46 = insertelement <16 x i8> %tmp44, i8 %x, i32 6 ; <<16 x i8>> [#uses=1]
- %tmp48 = insertelement <16 x i8> %tmp46, i8 %x, i32 7 ; <<16 x i8>> [#uses=1]
- %tmp50 = insertelement <16 x i8> %tmp48, i8 %x, i32 8 ; <<16 x i8>> [#uses=1]
- %tmp52 = insertelement <16 x i8> %tmp50, i8 %x, i32 9 ; <<16 x i8>> [#uses=1]
- %tmp54 = insertelement <16 x i8> %tmp52, i8 %x, i32 10 ; <<16 x i8>> [#uses=1]
- %tmp56 = insertelement <16 x i8> %tmp54, i8 %x, i32 11 ; <<16 x i8>> [#uses=1]
- %tmp58 = insertelement <16 x i8> %tmp56, i8 %x, i32 12 ; <<16 x i8>> [#uses=1]
- %tmp60 = insertelement <16 x i8> %tmp58, i8 %x, i32 13 ; <<16 x i8>> [#uses=1]
- %tmp62 = insertelement <16 x i8> %tmp60, i8 %x, i32 14 ; <<16 x i8>> [#uses=1]
- %tmp64 = insertelement <16 x i8> %tmp62, i8 %x, i32 15 ; <<16 x i8>> [#uses=1]
- %tmp68 = load <2 x i64>* %P ; <<2 x i64>> [#uses=1]
- %tmp71 = bitcast <2 x i64> %tmp68 to <16 x i8> ; <<16 x i8>> [#uses=1]
- %tmp73 = add <16 x i8> %tmp71, %tmp64 ; <<16 x i8>> [#uses=1]
- %tmp73.upgrd.1 = bitcast <16 x i8> %tmp73 to <2 x i64> ; <<2 x i64>> [#uses=1]
- store <2 x i64> %tmp73.upgrd.1, <2 x i64>* %P
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_splat-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_splat-3.ll
deleted file mode 100644
index 649b85c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_splat-3.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -o %t
-; RUN: grep punpcklwd %t | count 4
-; RUN: grep punpckhwd %t | count 4
-; RUN: grep "pshufd" %t | count 8
-
-; Splat test for v8i16
-; Should generate with pshufd with masks $0, $85, $170, $255 (each mask is used twice)
-define <8 x i16> @shuf_8i16_0(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 0, i32 undef, i32 undef, i32 0, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp6
-}
-
-define <8 x i16> @shuf_8i16_1(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp6
-}
-
-define <8 x i16> @shuf_8i16_2(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 2, i32 undef, i32 undef, i32 2, i32 undef, i32 2, i32 undef , i32 undef >
- ret <8 x i16> %tmp6
-}
-
-define <8 x i16> @shuf_8i16_3(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 3, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp6
-}
-
-define <8 x i16> @shuf_8i16_4(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 4, i32 undef, i32 undef, i32 undef, i32 4, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp6
-}
-
-define <8 x i16> @shuf_8i16_5(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 5, i32 undef, i32 undef, i32 5, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp6
-}
-
-define <8 x i16> @shuf_8i16_6(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 6, i32 6, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp6
-}
-
-
-define <8 x i16> @shuf_8i16_7(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 7, i32 undef, i32 undef, i32 7, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <8 x i16> %tmp6
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_splat-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_splat-4.ll
deleted file mode 100644
index d9941e6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_splat-4.ll
+++ /dev/null
@@ -1,104 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse41 -o %t
-; RUN: grep punpcklbw %t | count 16
-; RUN: grep punpckhbw %t | count 16
-; RUN: grep "pshufd" %t | count 16
-
-; Should generate with pshufd with masks $0, $85, $170, $255 (each mask is used 4 times)
-
-; Splat test for v16i8
-define <16 x i8 > @shuf_16i8_0(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 0, i32 undef, i32 undef, i32 0, i32 undef, i32 0, i32 0 , i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_1(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef, i32 undef, i32 undef, i32 undef >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_2(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 2, i32 undef, i32 undef, i32 2, i32 undef, i32 2, i32 2 , i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_3(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 3, i32 undef, i32 undef, i32 3, i32 undef, i32 3, i32 3 , i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3 >
- ret <16 x i8 > %tmp6
-}
-
-
-define <16 x i8 > @shuf_16i8_4(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 4, i32 undef, i32 undef, i32 undef, i32 4, i32 undef, i32 undef , i32 undef, i32 undef, i32 undef, i32 undef , i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_5(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 5, i32 undef, i32 undef, i32 5, i32 undef, i32 5, i32 5 , i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_6(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 6, i32 undef, i32 undef, i32 6, i32 undef, i32 6, i32 6 , i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_7(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 7, i32 undef, i32 undef, i32 7, i32 undef, i32 undef, i32 undef , i32 undef, i32 undef, i32 undef, i32 undef , i32 undef , i32 undef, i32 undef, i32 undef , i32 undef >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_8(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 8, i32 undef, i32 undef, i32 8, i32 undef, i32 8, i32 8 , i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_9(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 9, i32 undef, i32 undef, i32 9, i32 undef, i32 9, i32 9 , i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_10(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 10, i32 undef, i32 undef, i32 10, i32 undef, i32 10, i32 10 , i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10, i32 10 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_11(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 11, i32 undef, i32 undef, i32 11, i32 undef, i32 11, i32 11 , i32 11, i32 11, i32 11, i32 11, i32 11, i32 11, i32 11, i32 11, i32 11 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_12(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 12, i32 undef, i32 undef, i32 12, i32 undef, i32 12, i32 12 , i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12, i32 12 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_13(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 13, i32 undef, i32 undef, i32 13, i32 undef, i32 13, i32 13 , i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13, i32 13 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_14(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 14, i32 undef, i32 undef, i32 14, i32 undef, i32 14, i32 14 , i32 14, i32 14, i32 14, i32 14, i32 14, i32 14, i32 14, i32 14, i32 14 >
- ret <16 x i8 > %tmp6
-}
-
-define <16 x i8 > @shuf_16i8_15(<16 x i8 > %T0, <16 x i8 > %T1) nounwind readnone {
-entry:
- %tmp6 = shufflevector <16 x i8 > %T0, <16 x i8 > %T1, <16 x i32> < i32 15, i32 undef, i32 undef, i32 15, i32 undef, i32 15, i32 15 , i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15 >
- ret <16 x i8 > %tmp6
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_splat.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_splat.ll
deleted file mode 100644
index a87fbd0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_splat.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep pshufd
-; RUN: llc < %s -march=x86 -mattr=+sse3 | grep movddup
-
-define void @test_v4sf(<4 x float>* %P, <4 x float>* %Q, float %X) nounwind {
- %tmp = insertelement <4 x float> zeroinitializer, float %X, i32 0 ; <<4 x float>> [#uses=1]
- %tmp2 = insertelement <4 x float> %tmp, float %X, i32 1 ; <<4 x float>> [#uses=1]
- %tmp4 = insertelement <4 x float> %tmp2, float %X, i32 2 ; <<4 x float>> [#uses=1]
- %tmp6 = insertelement <4 x float> %tmp4, float %X, i32 3 ; <<4 x float>> [#uses=1]
- %tmp8 = load <4 x float>* %Q ; <<4 x float>> [#uses=1]
- %tmp10 = fmul <4 x float> %tmp8, %tmp6 ; <<4 x float>> [#uses=1]
- store <4 x float> %tmp10, <4 x float>* %P
- ret void
-}
-
-define void @test_v2sd(<2 x double>* %P, <2 x double>* %Q, double %X) nounwind {
- %tmp = insertelement <2 x double> zeroinitializer, double %X, i32 0 ; <<2 x double>> [#uses=1]
- %tmp2 = insertelement <2 x double> %tmp, double %X, i32 1 ; <<2 x double>> [#uses=1]
- %tmp4 = load <2 x double>* %Q ; <<2 x double>> [#uses=1]
- %tmp6 = fmul <2 x double> %tmp4, %tmp2 ; <<2 x double>> [#uses=1]
- store <2 x double> %tmp6, <2 x double>* %P
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_ss_load_fold.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
deleted file mode 100644
index c8b2927..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
+++ /dev/null
@@ -1,72 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse,+sse2,+sse41 | FileCheck %s
-
-target datalayout = "e-p:32:32"
-target triple = "i686-apple-darwin8.7.2"
-
-define i16 @test1(float %f) nounwind {
- %tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
- %tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
- %tmp11 = insertelement <4 x float> %tmp10, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
- %tmp12 = insertelement <4 x float> %tmp11, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
- %tmp28 = tail call <4 x float> @llvm.x86.sse.sub.ss( <4 x float> %tmp12, <4 x float> < float 1.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
- %tmp37 = tail call <4 x float> @llvm.x86.sse.mul.ss( <4 x float> %tmp28, <4 x float> < float 5.000000e-01, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
- %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp37, <4 x float> < float 6.553500e+04, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00 > ) ; <<4 x float>> [#uses=1]
- %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> zeroinitializer ) ; <<4 x float>> [#uses=1]
- %tmp.upgrd.1 = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1]
- %tmp69 = trunc i32 %tmp.upgrd.1 to i16 ; <i16> [#uses=1]
- ret i16 %tmp69
-; CHECK: test1:
-; CHECK: subss LCPI1_
-; CHECK: mulss LCPI1_
-; CHECK: minss LCPI1_
-}
-
-define i16 @test2(float %f) nounwind {
- %tmp28 = fsub float %f, 1.000000e+00 ; <float> [#uses=1]
- %tmp37 = fmul float %tmp28, 5.000000e-01 ; <float> [#uses=1]
- %tmp375 = insertelement <4 x float> undef, float %tmp37, i32 0 ; <<4 x float>> [#uses=1]
- %tmp48 = tail call <4 x float> @llvm.x86.sse.min.ss( <4 x float> %tmp375, <4 x float> < float 6.553500e+04, float undef, float undef, float undef > ) ; <<4 x float>> [#uses=1]
- %tmp59 = tail call <4 x float> @llvm.x86.sse.max.ss( <4 x float> %tmp48, <4 x float> < float 0.000000e+00, float undef, float undef, float undef > ) ; <<4 x float>> [#uses=1]
- %tmp = tail call i32 @llvm.x86.sse.cvttss2si( <4 x float> %tmp59 ) ; <i32> [#uses=1]
- %tmp69 = trunc i32 %tmp to i16 ; <i16> [#uses=1]
- ret i16 %tmp69
-; CHECK: test2:
-; CHECK: addss LCPI2_
-; CHECK: mulss LCPI2_
-; CHECK: minss LCPI2_
-}
-
-declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>)
-
-declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>)
-
-declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>)
-
-declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>)
-
-declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)
-
-
-declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32)
-declare <4 x float> @f()
-
-define <4 x float> @test3(<4 x float> %A, float *%b, i32 %C) nounwind {
- %a = load float *%b
- %B = insertelement <4 x float> undef, float %a, i32 0
- %X = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %A, <4 x float> %B, i32 4)
- ret <4 x float> %X
-; CHECK: test3:
-; CHECK: roundss $4, (%eax), %xmm0
-}
-
-define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
- %a = load float *%b
- %B = insertelement <4 x float> undef, float %a, i32 0
- %q = call <4 x float> @f()
- %X = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %q, <4 x float> %B, i32 4)
- ret <4 x float> %X
-; CHECK: test4:
-; CHECK: movss (%eax), %xmm
-; CHECK: call
-; CHECK: roundss $4, %xmm{{.*}}, %xmm0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_zero-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_zero-2.ll
deleted file mode 100644
index cdb030e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_zero-2.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-define i32 @t() {
-entry:
- br i1 true, label %bb4743, label %bb1656
-bb1656: ; preds = %entry
- ret i32 0
-bb1664: ; preds = %entry
- br i1 false, label %bb5310, label %bb4743
-bb4743: ; preds = %bb1664
- %tmp5256 = bitcast <2 x i64> zeroinitializer to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp5257 = sub <8 x i16> %tmp5256, zeroinitializer ; <<8 x i16>> [#uses=1]
- %tmp5258 = bitcast <8 x i16> %tmp5257 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp5265 = bitcast <2 x i64> %tmp5258 to <8 x i16> ; <<8 x i16>> [#uses=1]
- %tmp5266 = call <16 x i8> @llvm.x86.sse2.packuswb.128( <8 x i16> %tmp5265, <8 x i16> zeroinitializer ) nounwind readnone ; <<8 x i16>> [#uses=1]
- %tmp5267 = bitcast <16 x i8> %tmp5266 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp5294 = and <2 x i64> zeroinitializer, %tmp5267 ; <<2 x i64>> [#uses=1]
- br label %bb5310
-bb5310: ; preds = %bb4743, %bb1664
- %tmp5294.pn = phi <2 x i64> [ %tmp5294, %bb4743 ], [ zeroinitializer, %bb1664 ] ; <<2 x i64>> [#uses=0]
- ret i32 0
-}
-
-declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_zero.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_zero.ll
deleted file mode 100644
index ae5af58..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_zero.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | grep xorps | count 2
-
-define void @foo(<4 x float>* %P) {
- %T = load <4 x float>* %P ; <<4 x float>> [#uses=1]
- %S = fadd <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1]
- store <4 x float> %S, <4 x float>* %P
- ret void
-}
-
-define void @bar(<4 x i32>* %P) {
- %T = load <4 x i32>* %P ; <<4 x i32>> [#uses=1]
- %S = add <4 x i32> zeroinitializer, %T ; <<4 x i32>> [#uses=1]
- store <4 x i32> %S, <4 x i32>* %P
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vec_zero_cse.ll b/libclamav/c++/llvm/test/CodeGen/X86/vec_zero_cse.ll
deleted file mode 100644
index 296378c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vec_zero_cse.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pxor | count 1
-; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep xorps | count 1
-; RUN: llc < %s -relocation-model=static -march=x86 -mcpu=yonah | grep pcmpeqd | count 2
-
- at M1 = external global <1 x i64>
- at M2 = external global <2 x i32>
-
- at S1 = external global <2 x i64>
- at S2 = external global <4 x i32>
-
-define void @test() {
- store <1 x i64> zeroinitializer, <1 x i64>* @M1
- store <2 x i32> zeroinitializer, <2 x i32>* @M2
- ret void
-}
-
-define void @test2() {
- store <1 x i64> < i64 -1 >, <1 x i64>* @M1
- store <2 x i32> < i32 -1, i32 -1 >, <2 x i32>* @M2
- ret void
-}
-
-define void @test3() {
- store <2 x i64> zeroinitializer, <2 x i64>* @S1
- store <4 x i32> zeroinitializer, <4 x i32>* @S2
- ret void
-}
-
-define void @test4() {
- store <2 x i64> < i64 -1, i64 -1>, <2 x i64>* @S1
- store <4 x i32> < i32 -1, i32 -1, i32 -1, i32 -1 >, <4 x i32>* @S2
- ret void
-}
-
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vector-intrinsics.ll b/libclamav/c++/llvm/test/CodeGen/X86/vector-intrinsics.ll
deleted file mode 100644
index edf58b9..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vector-intrinsics.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep call | count 16
-
-declare <4 x double> @llvm.sin.v4f64(<4 x double> %p)
-declare <4 x double> @llvm.cos.v4f64(<4 x double> %p)
-declare <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q)
-declare <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32)
-
-define <4 x double> @foo(<4 x double> %p)
-{
- %t = call <4 x double> @llvm.sin.v4f64(<4 x double> %p)
- ret <4 x double> %t
-}
-define <4 x double> @goo(<4 x double> %p)
-{
- %t = call <4 x double> @llvm.cos.v4f64(<4 x double> %p)
- ret <4 x double> %t
-}
-define <4 x double> @moo(<4 x double> %p, <4 x double> %q)
-{
- %t = call <4 x double> @llvm.pow.v4f64(<4 x double> %p, <4 x double> %q)
- ret <4 x double> %t
-}
-define <4 x double> @zoo(<4 x double> %p, i32 %q)
-{
- %t = call <4 x double> @llvm.powi.v4f64(<4 x double> %p, i32 %q)
- ret <4 x double> %t
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vector-rem.ll b/libclamav/c++/llvm/test/CodeGen/X86/vector-rem.ll
deleted file mode 100644
index 51cd872..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vector-rem.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep div | count 8
-; RUN: llc < %s -march=x86-64 | grep fmodf | count 4
-
-define <4 x i32> @foo(<4 x i32> %t, <4 x i32> %u) {
- %m = srem <4 x i32> %t, %u
- ret <4 x i32> %m
-}
-define <4 x i32> @bar(<4 x i32> %t, <4 x i32> %u) {
- %m = urem <4 x i32> %t, %u
- ret <4 x i32> %m
-}
-define <4 x float> @qux(<4 x float> %t, <4 x float> %u) {
- %m = frem <4 x float> %t, %u
- ret <4 x float> %m
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vector-variable-idx.ll b/libclamav/c++/llvm/test/CodeGen/X86/vector-variable-idx.ll
deleted file mode 100644
index 2a4d18c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vector-variable-idx.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movss | count 2
-; PR2676
-
-define float @foo(<4 x float> %p, i32 %t) {
- %z = extractelement <4 x float> %p, i32 %t
- ret float %z
-}
-define <4 x float> @bar(<4 x float> %p, float %f, i32 %t) {
- %z = insertelement <4 x float> %p, float %f, i32 %t
- ret <4 x float> %z
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vector.ll b/libclamav/c++/llvm/test/CodeGen/X86/vector.ll
deleted file mode 100644
index 3fff849..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vector.ll
+++ /dev/null
@@ -1,156 +0,0 @@
-; Test that vectors are scalarized/lowered correctly.
-; RUN: llc < %s -march=x86 -mcpu=i386 > %t
-; RUN: llc < %s -march=x86 -mcpu=yonah > %t
-
-%d8 = type <8 x double>
-%f1 = type <1 x float>
-%f2 = type <2 x float>
-%f4 = type <4 x float>
-%f8 = type <8 x float>
-%i4 = type <4 x i32>
-
-
-;;; TEST HANDLING OF VARIOUS VECTOR SIZES
-
-define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
- %p = load %f1* %P ; <%f1> [#uses=1]
- %q = load %f1* %Q ; <%f1> [#uses=1]
- %R = fadd %f1 %p, %q ; <%f1> [#uses=1]
- store %f1 %R, %f1* %S
- ret void
-}
-
-define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
- %p = load %f2* %P ; <%f2> [#uses=1]
- %q = load %f2* %Q ; <%f2> [#uses=1]
- %R = fadd %f2 %p, %q ; <%f2> [#uses=1]
- store %f2 %R, %f2* %S
- ret void
-}
-
-define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
- %R = fadd %f4 %p, %q ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
- %R = fadd %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
- ret void
-}
-
-define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
- %R = fmul %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
- ret void
-}
-
-define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
- %R = fdiv %f8 %p, %q ; <%f8> [#uses=1]
- store %f8 %R, %f8* %S
- ret void
-}
-
-;;; TEST VECTOR CONSTRUCTS
-
-define void @test_cst(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_zero(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_undef(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %R = fadd %f4 %p, undef ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_constant_insert(%f4* %S) {
- %R = insertelement %f4 zeroinitializer, float 1.000000e+01, i32 0 ; <%f4> [#uses
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_variable_buildvector(float %F, %f4* %S) {
- %R = insertelement %f4 zeroinitializer, float %F, i32 0 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define void @test_scalar_to_vector(float %F, %f4* %S) {
- %R = insertelement %f4 undef, float %F, i32 0 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %S
- ret void
-}
-
-define float @test_extract_elt(%f8* %P) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %R = extractelement %f8 %p, i32 3 ; <float> [#uses=1]
- ret float %R
-}
-
-define double @test_extract_elt2(%d8* %P) {
- %p = load %d8* %P ; <%d8> [#uses=1]
- %R = extractelement %d8 %p, i32 3 ; <double> [#uses=1]
- ret double %R
-}
-
-define void @test_cast_1(%f4* %b, %i4* %a) {
- %tmp = load %f4* %b ; <%f4> [#uses=1]
- %tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1]
- %tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1]
- %tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 > ; <%i4> [#uses=1]
- store %i4 %tmp4, %i4* %a
- ret void
-}
-
-define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
- %T = load %f8* %a ; <%f8> [#uses=1]
- %T2 = bitcast %f8 %T to <8 x i32> ; <<8 x i32>> [#uses=1]
- store <8 x i32> %T2, <8 x i32>* %b
- ret void
-}
-
-
-;;; TEST IMPORTANT IDIOMS
-
-define void @splat(%f4* %P, %f4* %Q, float %X) {
- %tmp = insertelement %f4 undef, float %X, i32 0 ; <%f4> [#uses=1]
- %tmp2 = insertelement %f4 %tmp, float %X, i32 1 ; <%f4> [#uses=1]
- %tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1]
- %tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
- %R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
- store %f4 %R, %f4* %P
- ret void
-}
-
-define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) {
- %tmp = insertelement %i4 undef, i32 %X, i32 0 ; <%i4> [#uses=1]
- %tmp2 = insertelement %i4 %tmp, i32 %X, i32 1 ; <%i4> [#uses=1]
- %tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1]
- %tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1]
- %q = load %i4* %Q ; <%i4> [#uses=1]
- %R = add %i4 %q, %tmp6 ; <%i4> [#uses=1]
- store %i4 %R, %i4* %P
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vfcmp.ll b/libclamav/c++/llvm/test/CodeGen/X86/vfcmp.ll
deleted file mode 100644
index f5f5293..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vfcmp.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-; PR2620
-
-
-define void @t2(i32 %m_task_id, i32 %start_x, i32 %end_x) nounwind {
- %A = fcmp olt <2 x double> zeroinitializer, zeroinitializer ; <<2 x i64>>:1 [#uses=1]
- sext <2 x i1> %A to <2 x i64>
- extractelement <2 x i64> %1, i32 1 ; <i64>:2 [#uses=1]
- lshr i64 %2, 63 ; <i64>:3 [#uses=1]
- trunc i64 %3 to i1 ; <i1>:4 [#uses=1]
- zext i1 %4 to i8 ; <i8>:5 [#uses=1]
- insertelement <2 x i8> zeroinitializer, i8 %5, i32 1 ; <<2 x i8>>:6 [#uses=1]
- store <2 x i8> %6, <2 x i8>* null
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/volatile.ll b/libclamav/c++/llvm/test/CodeGen/X86/volatile.ll
deleted file mode 100644
index 5e1e0c8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/volatile.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=sse2 | grep movsd | count 5
-; RUN: llc < %s -march=x86 -mattr=sse2 -O0 | grep movsd | count 5
-
- at x = external global double
-
-define void @foo() nounwind {
- %a = volatile load double* @x
- volatile store double 0.0, double* @x
- volatile store double 0.0, double* @x
- %b = volatile load double* @x
- ret void
-}
-
-define void @bar() nounwind {
- %c = volatile load double* @x
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vortex-bug.ll b/libclamav/c++/llvm/test/CodeGen/X86/vortex-bug.ll
deleted file mode 100644
index 40f1117..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vortex-bug.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
- %struct.blktkntype = type { i32, i32 }
- %struct.fieldstruc = type { [128 x i8], %struct.blktkntype*, i32, i32 }
-
-define fastcc i32 @Env_GetFieldStruc(i8* %FieldName, i32* %Status, %struct.fieldstruc* %FieldStruc) nounwind {
-entry:
- br label %bb137.i
-
-bb137.i: ; preds = %bb137.i, %entry
- %FieldName_addr.0209.rec.i = phi i64 [ %tmp139.rec.i, %bb137.i ], [ 0, %entry ] ; <i64> [#uses=1]
- %tmp147213.i = phi i32 [ %tmp147.i, %bb137.i ], [ 1, %entry ] ; <i32> [#uses=2]
- %tmp139.rec.i = add i64 %FieldName_addr.0209.rec.i, 1 ; <i64> [#uses=2]
- %tmp141142.i = sext i32 %tmp147213.i to i64 ; <i64> [#uses=0]
- %tmp147.i = add i32 %tmp147213.i, 1 ; <i32> [#uses=1]
- br i1 false, label %bb137.i, label %bb149.i.loopexit
-
-bb149.i.loopexit: ; preds = %bb137.i
- %tmp139.i = getelementptr i8* %FieldName, i64 %tmp139.rec.i ; <i8*> [#uses=0]
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vshift-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/vshift-1.ll
deleted file mode 100644
index ae845e0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vshift-1.ll
+++ /dev/null
@@ -1,79 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -disable-mmx | FileCheck %s
-
-; test vector shifts converted to proper SSE2 vector shifts when the shift
-; amounts are the same.
-
-define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
-entry:
-; CHECK: shift1a:
-; CHECK: psllq
- %shl = shl <2 x i64> %val, < i64 32, i64 32 >
- store <2 x i64> %shl, <2 x i64>* %dst
- ret void
-}
-
-define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
-entry:
-; CHECK: shift1b:
-; CHECK: movd
-; CHECK-NEXT: psllq
- %0 = insertelement <2 x i64> undef, i64 %amt, i32 0
- %1 = insertelement <2 x i64> %0, i64 %amt, i32 1
- %shl = shl <2 x i64> %val, %1
- store <2 x i64> %shl, <2 x i64>* %dst
- ret void
-}
-
-
-define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
-entry:
-; CHECK: shift2a:
-; CHECK: pslld
- %shl = shl <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
- store <4 x i32> %shl, <4 x i32>* %dst
- ret void
-}
-
-define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
-entry:
-; CHECK: shift2b:
-; CHECK: movd
-; CHECK-NEXT: pslld
- %0 = insertelement <4 x i32> undef, i32 %amt, i32 0
- %1 = insertelement <4 x i32> %0, i32 %amt, i32 1
- %2 = insertelement <4 x i32> %1, i32 %amt, i32 2
- %3 = insertelement <4 x i32> %2, i32 %amt, i32 3
- %shl = shl <4 x i32> %val, %3
- store <4 x i32> %shl, <4 x i32>* %dst
- ret void
-}
-
-define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
-entry:
-; CHECK: shift3a:
-; CHECK: psllw
- %shl = shl <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
- store <8 x i16> %shl, <8 x i16>* %dst
- ret void
-}
-
-; Make sure the shift amount is properly zero extended.
-define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
-entry:
-; CHECK: shift3b:
-; CHECK: movzwl
-; CHECK: movd
-; CHECK-NEXT: psllw
- %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
- %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
- %2 = insertelement <8 x i16> %0, i16 %amt, i32 2
- %3 = insertelement <8 x i16> %0, i16 %amt, i32 3
- %4 = insertelement <8 x i16> %0, i16 %amt, i32 4
- %5 = insertelement <8 x i16> %0, i16 %amt, i32 5
- %6 = insertelement <8 x i16> %0, i16 %amt, i32 6
- %7 = insertelement <8 x i16> %0, i16 %amt, i32 7
- %shl = shl <8 x i16> %val, %7
- store <8 x i16> %shl, <8 x i16>* %dst
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vshift-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/vshift-2.ll
deleted file mode 100644
index 36feb11..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vshift-2.ll
+++ /dev/null
@@ -1,78 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -disable-mmx | FileCheck %s
-
-; test vector shifts converted to proper SSE2 vector shifts when the shift
-; amounts are the same.
-
-define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
-entry:
-; CHECK: shift1a:
-; CHECK: psrlq
- %lshr = lshr <2 x i64> %val, < i64 32, i64 32 >
- store <2 x i64> %lshr, <2 x i64>* %dst
- ret void
-}
-
-define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
-entry:
-; CHECK: shift1b:
-; CHECK: movd
-; CHECK-NEXT: psrlq
- %0 = insertelement <2 x i64> undef, i64 %amt, i32 0
- %1 = insertelement <2 x i64> %0, i64 %amt, i32 1
- %lshr = lshr <2 x i64> %val, %1
- store <2 x i64> %lshr, <2 x i64>* %dst
- ret void
-}
-
-define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
-entry:
-; CHECK: shift2a:
-; CHECK: psrld
- %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 >
- store <4 x i32> %lshr, <4 x i32>* %dst
- ret void
-}
-
-define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
-entry:
-; CHECK: shift2b:
-; CHECK: movd
-; CHECK-NEXT: psrld
- %0 = insertelement <4 x i32> undef, i32 %amt, i32 0
- %1 = insertelement <4 x i32> %0, i32 %amt, i32 1
- %2 = insertelement <4 x i32> %1, i32 %amt, i32 2
- %3 = insertelement <4 x i32> %2, i32 %amt, i32 3
- %lshr = lshr <4 x i32> %val, %3
- store <4 x i32> %lshr, <4 x i32>* %dst
- ret void
-}
-
-
-define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
-entry:
-; CHECK: shift3a:
-; CHECK: psrlw
- %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
- store <8 x i16> %lshr, <8 x i16>* %dst
- ret void
-}
-
-; properly zero extend the shift amount
-define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
-entry:
-; CHECK: shift3b:
-; CHECK: movzwl
-; CHECK: movd
-; CHECK-NEXT: psrlw
- %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
- %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
- %2 = insertelement <8 x i16> %0, i16 %amt, i32 2
- %3 = insertelement <8 x i16> %0, i16 %amt, i32 3
- %4 = insertelement <8 x i16> %0, i16 %amt, i32 4
- %5 = insertelement <8 x i16> %0, i16 %amt, i32 5
- %6 = insertelement <8 x i16> %0, i16 %amt, i32 6
- %7 = insertelement <8 x i16> %0, i16 %amt, i32 7
- %lshr = lshr <8 x i16> %val, %7
- store <8 x i16> %lshr, <8 x i16>* %dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vshift-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/vshift-3.ll
deleted file mode 100644
index 20d3f48..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vshift-3.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -disable-mmx | FileCheck %s
-
-; test vector shifts converted to proper SSE2 vector shifts when the shift
-; amounts are the same.
-
-; Note that x86 does have ashr
-
-; shift1a can't use a packed shift
-define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
-entry:
-; CHECK: shift1a:
-; CHECK: sarl
- %ashr = ashr <2 x i64> %val, < i64 32, i64 32 >
- store <2 x i64> %ashr, <2 x i64>* %dst
- ret void
-}
-
-define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
-entry:
-; CHECK: shift2a:
-; CHECK: psrad $5
- %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
- store <4 x i32> %ashr, <4 x i32>* %dst
- ret void
-}
-
-define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
-entry:
-; CHECK: shift2b:
-; CHECK: movd
-; CHECK-NEXT: psrad
- %0 = insertelement <4 x i32> undef, i32 %amt, i32 0
- %1 = insertelement <4 x i32> %0, i32 %amt, i32 1
- %2 = insertelement <4 x i32> %1, i32 %amt, i32 2
- %3 = insertelement <4 x i32> %2, i32 %amt, i32 3
- %ashr = ashr <4 x i32> %val, %3
- store <4 x i32> %ashr, <4 x i32>* %dst
- ret void
-}
-
-define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
-entry:
-; CHECK: shift3a:
-; CHECK: psraw $5
- %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
- store <8 x i16> %ashr, <8 x i16>* %dst
- ret void
-}
-
-define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
-entry:
-; CHECK: shift3b:
-; CHECK: movzwl
-; CHECK: movd
-; CHECK-NEXT: psraw
- %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
- %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
- %2 = insertelement <8 x i16> %0, i16 %amt, i32 2
- %3 = insertelement <8 x i16> %0, i16 %amt, i32 3
- %4 = insertelement <8 x i16> %0, i16 %amt, i32 4
- %5 = insertelement <8 x i16> %0, i16 %amt, i32 5
- %6 = insertelement <8 x i16> %0, i16 %amt, i32 6
- %7 = insertelement <8 x i16> %0, i16 %amt, i32 7
- %ashr = ashr <8 x i16> %val, %7
- store <8 x i16> %ashr, <8 x i16>* %dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vshift-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/vshift-4.ll
deleted file mode 100644
index 9773cbe..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vshift-4.ll
+++ /dev/null
@@ -1,85 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -disable-mmx | FileCheck %s
-
-; test vector shifts converted to proper SSE2 vector shifts when the shift
-; amounts are the same when using a shuffle splat.
-
-define void @shift1a(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind {
-entry:
-; CHECK: shift1a:
-; CHECK: psllq
- %shamt = shufflevector <2 x i64> %sh, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
- %shl = shl <2 x i64> %val, %shamt
- store <2 x i64> %shl, <2 x i64>* %dst
- ret void
-}
-
-; shift1b can't use a packed shift
-define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, <2 x i64> %sh) nounwind {
-entry:
-; CHECK: shift1b:
-; CHECK: shll
- %shamt = shufflevector <2 x i64> %sh, <2 x i64> undef, <2 x i32> <i32 0, i32 1>
- %shl = shl <2 x i64> %val, %shamt
- store <2 x i64> %shl, <2 x i64>* %dst
- ret void
-}
-
-define void @shift2a(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
-entry:
-; CHECK: shift2a:
-; CHECK: pslld
- %shamt = shufflevector <2 x i32> %amt, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %shl = shl <4 x i32> %val, %shamt
- store <4 x i32> %shl, <4 x i32>* %dst
- ret void
-}
-
-define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
-entry:
-; CHECK: shift2b:
-; CHECK: pslld
- %shamt = shufflevector <2 x i32> %amt, <2 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 1, i32 1>
- %shl = shl <4 x i32> %val, %shamt
- store <4 x i32> %shl, <4 x i32>* %dst
- ret void
-}
-
-define void @shift2c(<4 x i32> %val, <4 x i32>* %dst, <2 x i32> %amt) nounwind {
-entry:
-; CHECK: shift2c:
-; CHECK: pslld
- %shamt = shufflevector <2 x i32> %amt, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
- %shl = shl <4 x i32> %val, %shamt
- store <4 x i32> %shl, <4 x i32>* %dst
- ret void
-}
-
-define void @shift3a(<8 x i16> %val, <8 x i16>* %dst, <8 x i16> %amt) nounwind {
-entry:
-; CHECK: shift3a:
-; CHECK: movzwl
-; CHECK: psllw
- %shamt = shufflevector <8 x i16> %amt, <8 x i16> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
- %shl = shl <8 x i16> %val, %shamt
- store <8 x i16> %shl, <8 x i16>* %dst
- ret void
-}
-
-define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
-entry:
-; CHECK: shift3b:
-; CHECK: movzwl
-; CHECK: psllw
- %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
- %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
- %2 = insertelement <8 x i16> %0, i16 %amt, i32 2
- %3 = insertelement <8 x i16> %0, i16 %amt, i32 3
- %4 = insertelement <8 x i16> %0, i16 %amt, i32 4
- %5 = insertelement <8 x i16> %0, i16 %amt, i32 5
- %6 = insertelement <8 x i16> %0, i16 %amt, i32 6
- %7 = insertelement <8 x i16> %0, i16 %amt, i32 7
- %shl = shl <8 x i16> %val, %7
- store <8 x i16> %shl, <8 x i16>* %dst
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vshift-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/vshift-5.ll
deleted file mode 100644
index a543f38..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vshift-5.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -disable-mmx | FileCheck %s
-
-; When loading the shift amount from memory, avoid generating the splat.
-
-define void @shift5a(<4 x i32> %val, <4 x i32>* %dst, i32* %pamt) nounwind {
-entry:
-; CHECK: shift5a:
-; CHECK: movd
-; CHECK-NEXT: pslld
- %amt = load i32* %pamt
- %tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0
- %shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer
- %shl = shl <4 x i32> %val, %shamt
- store <4 x i32> %shl, <4 x i32>* %dst
- ret void
-}
-
-
-define void @shift5b(<4 x i32> %val, <4 x i32>* %dst, i32* %pamt) nounwind {
-entry:
-; CHECK: shift5b:
-; CHECK: movd
-; CHECK-NEXT: psrad
- %amt = load i32* %pamt
- %tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0
- %shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer
- %shr = ashr <4 x i32> %val, %shamt
- store <4 x i32> %shr, <4 x i32>* %dst
- ret void
-}
-
-
-define void @shift5c(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
-entry:
-; CHECK: shift5c:
-; CHECK: movd
-; CHECK-NEXT: pslld
- %tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0
- %shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer
- %shl = shl <4 x i32> %val, %shamt
- store <4 x i32> %shl, <4 x i32>* %dst
- ret void
-}
-
-
-define void @shift5d(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
-entry:
-; CHECK: shift5d:
-; CHECK: movd
-; CHECK-NEXT: psrad
- %tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0
- %shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer
- %shr = ashr <4 x i32> %val, %shamt
- store <4 x i32> %shr, <4 x i32>* %dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vshift_scalar.ll b/libclamav/c++/llvm/test/CodeGen/X86/vshift_scalar.ll
deleted file mode 100644
index 9dd8478..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vshift_scalar.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s
-
-; Legalization test that requires scalarizing a vector.
-
-define void @update(<1 x i32> %val, <1 x i32>* %dst) nounwind {
-entry:
- %shl = shl <1 x i32> %val, < i32 2>
- %shr = ashr <1 x i32> %val, < i32 4>
- store <1 x i32> %shr, <1 x i32>* %dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vshift_split.ll b/libclamav/c++/llvm/test/CodeGen/X86/vshift_split.ll
deleted file mode 100644
index 359d36d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vshift_split.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2
-
-; Example that requires splitting and expanding a vector shift.
-define <2 x i64> @update(<2 x i64> %val) nounwind readnone {
-entry:
- %shr = lshr <2 x i64> %val, < i64 2, i64 3 >
- ret <2 x i64> %shr
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vshift_split2.ll b/libclamav/c++/llvm/test/CodeGen/X86/vshift_split2.ll
deleted file mode 100644
index 0f8c2b8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vshift_split2.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mcpu=yonah
-
-; Legalization example that requires splitting a large vector into smaller pieces.
-
-define void @update(<8 x i32> %val, <8 x i32>* %dst) nounwind {
-entry:
- %shl = shl <8 x i32> %val, < i32 2, i32 2, i32 2, i32 2, i32 4, i32 4, i32 4, i32 4 >
- %shr = ashr <8 x i32> %val, < i32 2, i32 2, i32 2, i32 2, i32 4, i32 4, i32 4, i32 4 >
- store <8 x i32> %shr, <8 x i32>* %dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/vsplit-and.ll b/libclamav/c++/llvm/test/CodeGen/X86/vsplit-and.ll
deleted file mode 100644
index a247c6e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/vsplit-and.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llc < %s -march=x86 -disable-mmx | FileCheck %s
-
-
-define void @t(<2 x i64>* %dst, <2 x i64> %src1, <2 x i64> %src2) nounwind readonly {
-; CHECK: andb
- %cmp1 = icmp ne <2 x i64> %src1, zeroinitializer
- %cmp2 = icmp ne <2 x i64> %src2, zeroinitializer
- %t1 = and <2 x i1> %cmp1, %cmp2
- %t2 = sext <2 x i1> %t1 to <2 x i64>
- store <2 x i64> %t2, <2 x i64>* %dst
- ret void
-}
-
-define void @t2(<3 x i64>* %dst, <3 x i64> %src1, <3 x i64> %src2) nounwind readonly {
-; CHECK: andb
- %cmp1 = icmp ne <3 x i64> %src1, zeroinitializer
- %cmp2 = icmp ne <3 x i64> %src2, zeroinitializer
- %t1 = and <3 x i1> %cmp1, %cmp2
- %t2 = sext <3 x i1> %t1 to <3 x i64>
- store <3 x i64> %t2, <3 x i64>* %dst
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/weak.ll b/libclamav/c++/llvm/test/CodeGen/X86/weak.ll
deleted file mode 100644
index 8590e8d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/weak.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: llc < %s -march=x86
- at a = extern_weak global i32 ; <i32*> [#uses=1]
- at b = global i32* @a ; <i32**> [#uses=0]
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/wide-integer-fold.ll b/libclamav/c++/llvm/test/CodeGen/X86/wide-integer-fold.ll
deleted file mode 100644
index b3b4d24..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/wide-integer-fold.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-; CHECK: movq $-65535, %rax
-
-; DAGCombiner should fold this to a simple constant.
-
-define i64 @foo(i192 %a) nounwind {
- %t = or i192 %a, -22300404916163702203072254898040925442801665
- %s = and i192 %t, -22300404916163702203072254898040929737768960
- %u = lshr i192 %s, 128
- %v = trunc i192 %u to i64
- ret i64 %v
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-1.ll
deleted file mode 100644
index f8d0690..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-1.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-
-; Widen a v3i8 to v16i8 to use a vector add
-
-define void @update(<3 x i8>* %dst, <3 x i8>* %src, i32 %n) nounwind {
-entry:
-; CHECK-NOT: pextrw
-; CHECK: paddb
-; CHECK: pextrb
- %dst.addr = alloca <3 x i8>* ; <<3 x i8>**> [#uses=2]
- %src.addr = alloca <3 x i8>* ; <<3 x i8>**> [#uses=2]
- %n.addr = alloca i32 ; <i32*> [#uses=2]
- %i = alloca i32, align 4 ; <i32*> [#uses=6]
- store <3 x i8>* %dst, <3 x i8>** %dst.addr
- store <3 x i8>* %src, <3 x i8>** %src.addr
- store i32 %n, i32* %n.addr
- store i32 0, i32* %i
- br label %forcond
-
-forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
- %cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
- br i1 %cmp, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <3 x i8>** %dst.addr ; <<3 x i8>*> [#uses=1]
- %arrayidx = getelementptr <3 x i8>* %tmp3, i32 %tmp2 ; <<3 x i8>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <3 x i8>** %src.addr ; <<3 x i8>*> [#uses=1]
- %arrayidx6 = getelementptr <3 x i8>* %tmp5, i32 %tmp4 ; <<3 x i8>*> [#uses=1]
- %tmp7 = load <3 x i8>* %arrayidx6 ; <<3 x i8>> [#uses=1]
- %add = add <3 x i8> %tmp7, < i8 1, i8 1, i8 1 > ; <<3 x i8>> [#uses=1]
- store <3 x i8> %add, <3 x i8>* %arrayidx
- br label %forinc
-
-forinc: ; preds = %forbody
- %tmp8 = load i32* %i ; <i32> [#uses=1]
- %inc = add i32 %tmp8, 1 ; <i32> [#uses=1]
- store i32 %inc, i32* %i
- br label %forcond
-
-afterfor: ; preds = %forcond
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-2.ll
deleted file mode 100644
index fdecaa3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-2.ll
+++ /dev/null
@@ -1,59 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: paddb
-; CHECK: pand
-
-; widen v8i8 to v16i8 (checks even power of 2 widening with add & and)
-
-define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
-entry:
- %dst_i.addr = alloca i64* ; <i64**> [#uses=2]
- %src_i.addr = alloca i64* ; <i64**> [#uses=2]
- %n.addr = alloca i32 ; <i32*> [#uses=2]
- %i = alloca i32, align 4 ; <i32*> [#uses=8]
- %dst = alloca <8 x i8>*, align 4 ; <<8 x i8>**> [#uses=2]
- %src = alloca <8 x i8>*, align 4 ; <<8 x i8>**> [#uses=2]
- store i64* %dst_i, i64** %dst_i.addr
- store i64* %src_i, i64** %src_i.addr
- store i32 %n, i32* %n.addr
- store i32 0, i32* %i
- br label %forcond
-
-forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
- %cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
- br i1 %cmp, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load i64** %dst_i.addr ; <i64*> [#uses=1]
- %arrayidx = getelementptr i64* %tmp3, i32 %tmp2 ; <i64*> [#uses=1]
- %conv = bitcast i64* %arrayidx to <8 x i8>* ; <<8 x i8>*> [#uses=1]
- store <8 x i8>* %conv, <8 x i8>** %dst
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load i64** %src_i.addr ; <i64*> [#uses=1]
- %arrayidx6 = getelementptr i64* %tmp5, i32 %tmp4 ; <i64*> [#uses=1]
- %conv7 = bitcast i64* %arrayidx6 to <8 x i8>* ; <<8 x i8>*> [#uses=1]
- store <8 x i8>* %conv7, <8 x i8>** %src
- %tmp8 = load i32* %i ; <i32> [#uses=1]
- %tmp9 = load <8 x i8>** %dst ; <<8 x i8>*> [#uses=1]
- %arrayidx10 = getelementptr <8 x i8>* %tmp9, i32 %tmp8 ; <<8 x i8>*> [#uses=1]
- %tmp11 = load i32* %i ; <i32> [#uses=1]
- %tmp12 = load <8 x i8>** %src ; <<8 x i8>*> [#uses=1]
- %arrayidx13 = getelementptr <8 x i8>* %tmp12, i32 %tmp11 ; <<8 x i8>*> [#uses=1]
- %tmp14 = load <8 x i8>* %arrayidx13 ; <<8 x i8>> [#uses=1]
- %add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 > ; <<8 x i8>> [#uses=1]
- %and = and <8 x i8> %add, < i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4 > ; <<8 x i8>> [#uses=1]
- store <8 x i8> %and, <8 x i8>* %arrayidx10
- br label %forinc
-
-forinc: ; preds = %forbody
- %tmp15 = load i32* %i ; <i32> [#uses=1]
- %inc = add i32 %tmp15, 1 ; <i32> [#uses=1]
- store i32 %inc, i32* %i
- br label %forcond
-
-afterfor: ; preds = %forcond
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-3.ll
deleted file mode 100644
index 1f2c250..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-3.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx -post-RA-scheduler=true | FileCheck %s
-; CHECK: paddw
-; CHECK: pextrw
-; CHECK: movd
-
-; Widen a v3i16 to v8i16 to do a vector add
-
- at .str = internal constant [4 x i8] c"%d \00" ; <[4 x i8]*> [#uses=1]
- at .str1 = internal constant [2 x i8] c"\0A\00" ; <[2 x i8]*> [#uses=1]
-
-define void @update(<3 x i16>* %dst, <3 x i16>* %src, i32 %n) nounwind {
-entry:
- %dst.addr = alloca <3 x i16>* ; <<3 x i16>**> [#uses=2]
- %src.addr = alloca <3 x i16>* ; <<3 x i16>**> [#uses=2]
- %n.addr = alloca i32 ; <i32*> [#uses=2]
- %v = alloca <3 x i16>, align 8 ; <<3 x i16>*> [#uses=1]
- %i = alloca i32, align 4 ; <i32*> [#uses=6]
- store <3 x i16>* %dst, <3 x i16>** %dst.addr
- store <3 x i16>* %src, <3 x i16>** %src.addr
- store i32 %n, i32* %n.addr
- store <3 x i16> < i16 1, i16 1, i16 1 >, <3 x i16>* %v
- store i32 0, i32* %i
- br label %forcond
-
-forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
- %cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
- br i1 %cmp, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <3 x i16>** %dst.addr ; <<3 x i16>*> [#uses=1]
- %arrayidx = getelementptr <3 x i16>* %tmp3, i32 %tmp2 ; <<3 x i16>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <3 x i16>** %src.addr ; <<3 x i16>*> [#uses=1]
- %arrayidx6 = getelementptr <3 x i16>* %tmp5, i32 %tmp4 ; <<3 x i16>*> [#uses=1]
- %tmp7 = load <3 x i16>* %arrayidx6 ; <<3 x i16>> [#uses=1]
- %add = add <3 x i16> %tmp7, < i16 1, i16 1, i16 1 > ; <<3 x i16>> [#uses=1]
- store <3 x i16> %add, <3 x i16>* %arrayidx
- br label %forinc
-
-forinc: ; preds = %forbody
- %tmp8 = load i32* %i ; <i32> [#uses=1]
- %inc = add i32 %tmp8, 1 ; <i32> [#uses=1]
- store i32 %inc, i32* %i
- br label %forcond
-
-afterfor: ; preds = %forcond
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-4.ll
deleted file mode 100644
index f7506ae..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-4.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: psubw
-; CHECK-NEXT: pmullw
-
-; Widen a v5i16 to v8i16 to do a vector sub and multiple
-
-define void @update(<5 x i16>* %dst, <5 x i16>* %src, i32 %n) nounwind {
-entry:
- %dst.addr = alloca <5 x i16>* ; <<5 x i16>**> [#uses=2]
- %src.addr = alloca <5 x i16>* ; <<5 x i16>**> [#uses=2]
- %n.addr = alloca i32 ; <i32*> [#uses=2]
- %v = alloca <5 x i16>, align 16 ; <<5 x i16>*> [#uses=1]
- %i = alloca i32, align 4 ; <i32*> [#uses=6]
- store <5 x i16>* %dst, <5 x i16>** %dst.addr
- store <5 x i16>* %src, <5 x i16>** %src.addr
- store i32 %n, i32* %n.addr
- store <5 x i16> < i16 1, i16 1, i16 1, i16 0, i16 0 >, <5 x i16>* %v
- store i32 0, i32* %i
- br label %forcond
-
-forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
- %cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
- br i1 %cmp, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <5 x i16>** %dst.addr ; <<5 x i16>*> [#uses=1]
- %arrayidx = getelementptr <5 x i16>* %tmp3, i32 %tmp2 ; <<5 x i16>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <5 x i16>** %src.addr ; <<5 x i16>*> [#uses=1]
- %arrayidx6 = getelementptr <5 x i16>* %tmp5, i32 %tmp4 ; <<5 x i16>*> [#uses=1]
- %tmp7 = load <5 x i16>* %arrayidx6 ; <<5 x i16>> [#uses=1]
- %sub = sub <5 x i16> %tmp7, < i16 271, i16 271, i16 271, i16 271, i16 271 > ; <<5 x i16>> [#uses=1]
- %mul = mul <5 x i16> %sub, < i16 2, i16 2, i16 2, i16 2, i16 2 > ; <<5 x i16>> [#uses=1]
- store <5 x i16> %mul, <5 x i16>* %arrayidx
- br label %forinc
-
-forinc: ; preds = %forbody
- %tmp8 = load i32* %i ; <i32> [#uses=1]
- %inc = add i32 %tmp8, 1 ; <i32> [#uses=1]
- store i32 %inc, i32* %i
- br label %forcond
-
-afterfor: ; preds = %forcond
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-5.ll
deleted file mode 100644
index f7f3408..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-5.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: movaps
-; CHECK: pmulld
-; CHECK: psubd
-
-; widen a v3i32 to v4i32 to do a vector multiple and a subtraction
-
-define void @update(<3 x i32>* %dst, <3 x i32>* %src, i32 %n) nounwind {
-entry:
- %dst.addr = alloca <3 x i32>* ; <<3 x i32>**> [#uses=2]
- %src.addr = alloca <3 x i32>* ; <<3 x i32>**> [#uses=2]
- %n.addr = alloca i32 ; <i32*> [#uses=2]
- %v = alloca <3 x i32>, align 16 ; <<3 x i32>*> [#uses=1]
- %i = alloca i32, align 4 ; <i32*> [#uses=6]
- store <3 x i32>* %dst, <3 x i32>** %dst.addr
- store <3 x i32>* %src, <3 x i32>** %src.addr
- store i32 %n, i32* %n.addr
- store <3 x i32> < i32 1, i32 1, i32 1 >, <3 x i32>* %v
- store i32 0, i32* %i
- br label %forcond
-
-forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
- %cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
- br i1 %cmp, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <3 x i32>** %dst.addr ; <<3 x i32>*> [#uses=1]
- %arrayidx = getelementptr <3 x i32>* %tmp3, i32 %tmp2 ; <<3 x i32>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <3 x i32>** %src.addr ; <<3 x i32>*> [#uses=1]
- %arrayidx6 = getelementptr <3 x i32>* %tmp5, i32 %tmp4 ; <<3 x i32>*> [#uses=1]
- %tmp7 = load <3 x i32>* %arrayidx6 ; <<3 x i32>> [#uses=1]
- %mul = mul <3 x i32> %tmp7, < i32 4, i32 4, i32 4 > ; <<3 x i32>> [#uses=1]
- %sub = sub <3 x i32> %mul, < i32 3, i32 3, i32 3 > ; <<3 x i32>> [#uses=1]
- store <3 x i32> %sub, <3 x i32>* %arrayidx
- br label %forinc
-
-forinc: ; preds = %forbody
- %tmp8 = load i32* %i ; <i32> [#uses=1]
- %inc = add i32 %tmp8, 1 ; <i32> [#uses=1]
- store i32 %inc, i32* %i
- br label %forcond
-
-afterfor: ; preds = %forcond
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-6.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-6.ll
deleted file mode 100644
index 538123f..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_arith-6.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: mulps
-; CHECK: addps
-
-; widen a v3f32 to vfi32 to do a vector multiple and an add
-
-define void @update(<3 x float>* %dst, <3 x float>* %src, i32 %n) nounwind {
-entry:
- %dst.addr = alloca <3 x float>* ; <<3 x float>**> [#uses=2]
- %src.addr = alloca <3 x float>* ; <<3 x float>**> [#uses=2]
- %n.addr = alloca i32 ; <i32*> [#uses=2]
- %v = alloca <3 x float>, align 16 ; <<3 x float>*> [#uses=2]
- %i = alloca i32, align 4 ; <i32*> [#uses=6]
- store <3 x float>* %dst, <3 x float>** %dst.addr
- store <3 x float>* %src, <3 x float>** %src.addr
- store i32 %n, i32* %n.addr
- store <3 x float> < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00 >, <3 x float>* %v
- store i32 0, i32* %i
- br label %forcond
-
-forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
- %cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
- br i1 %cmp, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <3 x float>** %dst.addr ; <<3 x float>*> [#uses=1]
- %arrayidx = getelementptr <3 x float>* %tmp3, i32 %tmp2 ; <<3 x float>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <3 x float>** %src.addr ; <<3 x float>*> [#uses=1]
- %arrayidx6 = getelementptr <3 x float>* %tmp5, i32 %tmp4 ; <<3 x float>*> [#uses=1]
- %tmp7 = load <3 x float>* %arrayidx6 ; <<3 x float>> [#uses=1]
- %tmp8 = load <3 x float>* %v ; <<3 x float>> [#uses=1]
- %mul = fmul <3 x float> %tmp7, %tmp8 ; <<3 x float>> [#uses=1]
- %add = fadd <3 x float> %mul, < float 0x409EE02900000000, float 0x409EE02900000000, float 0x409EE02900000000 > ; <<3 x float>> [#uses=1]
- store <3 x float> %add, <3 x float>* %arrayidx
- br label %forinc
-
-forinc: ; preds = %forbody
- %tmp9 = load i32* %i ; <i32> [#uses=1]
- %inc = add i32 %tmp9, 1 ; <i32> [#uses=1]
- store i32 %inc, i32* %i
- br label %forcond
-
-afterfor: ; preds = %forcond
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-1.ll
deleted file mode 100644
index d4ab174..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-1.ll
+++ /dev/null
@@ -1,44 +0,0 @@
-; RUN: llc -march=x86 -mattr=+sse42 < %s -disable-mmx | FileCheck %s
-; CHECK: paddw
-; CHECK: pextrd
-; CHECK: movd
-
-; bitcast a v4i16 to v2i32
-
-define void @convert(<2 x i32>* %dst, <4 x i16>* %src) nounwind {
-entry:
- %dst.addr = alloca <2 x i32>* ; <<2 x i32>**> [#uses=2]
- %src.addr = alloca <4 x i16>* ; <<4 x i16>**> [#uses=2]
- %i = alloca i32, align 4 ; <i32*> [#uses=6]
- store <2 x i32>* %dst, <2 x i32>** %dst.addr
- store <4 x i16>* %src, <4 x i16>** %src.addr
- store i32 0, i32* %i
- br label %forcond
-
-forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %cmp = icmp slt i32 %tmp, 4 ; <i1> [#uses=1]
- br i1 %cmp, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %tmp1 = load i32* %i ; <i32> [#uses=1]
- %tmp2 = load <2 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1]
- %arrayidx = getelementptr <2 x i32>* %tmp2, i32 %tmp1 ; <<2 x i32>*> [#uses=1]
- %tmp3 = load i32* %i ; <i32> [#uses=1]
- %tmp4 = load <4 x i16>** %src.addr ; <<4 x i16>*> [#uses=1]
- %arrayidx5 = getelementptr <4 x i16>* %tmp4, i32 %tmp3 ; <<4 x i16>*> [#uses=1]
- %tmp6 = load <4 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1]
- %add = add <4 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1 > ; <<4 x i16>> [#uses=1]
- %conv = bitcast <4 x i16> %add to <2 x i32> ; <<2 x i32>> [#uses=1]
- store <2 x i32> %conv, <2 x i32>* %arrayidx
- br label %forinc
-
-forinc: ; preds = %forbody
- %tmp7 = load i32* %i ; <i32> [#uses=1]
- %inc = add i32 %tmp7, 1 ; <i32> [#uses=1]
- store i32 %inc, i32* %i
- br label %forcond
-
-afterfor: ; preds = %forcond
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-2.ll
deleted file mode 100644
index 1e626a2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-2.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: pextrd
-; CHECK: pextrd
-; CHECK: movd
-; CHECK: movaps
-
-
-; bitcast v14i16 to v7i32
-
-define void @convert(<7 x i32>* %dst, <14 x i16>* %src) nounwind {
-entry:
- %dst.addr = alloca <7 x i32>* ; <<7 x i32>**> [#uses=2]
- %src.addr = alloca <14 x i16>* ; <<14 x i16>**> [#uses=2]
- %i = alloca i32, align 4 ; <i32*> [#uses=6]
- store <7 x i32>* %dst, <7 x i32>** %dst.addr
- store <14 x i16>* %src, <14 x i16>** %src.addr
- store i32 0, i32* %i
- br label %forcond
-
-forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %cmp = icmp slt i32 %tmp, 4 ; <i1> [#uses=1]
- br i1 %cmp, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %tmp1 = load i32* %i ; <i32> [#uses=1]
- %tmp2 = load <7 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1]
- %arrayidx = getelementptr <7 x i32>* %tmp2, i32 %tmp1 ; <<7 x i32>*> [#uses=1]
- %tmp3 = load i32* %i ; <i32> [#uses=1]
- %tmp4 = load <14 x i16>** %src.addr ; <<4 x i16>*> [#uses=1]
- %arrayidx5 = getelementptr <14 x i16>* %tmp4, i32 %tmp3 ; <<4 x i16>*> [#uses=1]
- %tmp6 = load <14 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1]
- %add = add <14 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 > ; <<4 x i16>> [#uses=1]
- %conv = bitcast <14 x i16> %add to <7 x i32> ; <<7 x i32>> [#uses=1]
- store <7 x i32> %conv, <7 x i32>* %arrayidx
- br label %forinc
-
-forinc: ; preds = %forbody
- %tmp7 = load i32* %i ; <i32> [#uses=1]
- %inc = add i32 %tmp7, 1 ; <i32> [#uses=1]
- store i32 %inc, i32* %i
- br label %forcond
-
-afterfor: ; preds = %forcond
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-3.ll
deleted file mode 100644
index 02674dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-3.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: paddd
-; CHECK: pextrd
-; CHECK: pextrd
-
-; bitcast v12i8 to v3i32
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-target triple = "i686-apple-darwin10.0.0d2"
-
-define void @convert(<12 x i8>* %dst.addr, <3 x i32> %src) nounwind {
-entry:
- %add = add <3 x i32> %src, < i32 1, i32 1, i32 1 > ; <<3 x i32>> [#uses=1]
- %conv = bitcast <3 x i32> %add to <12 x i8> ; <<12 x i8>> [#uses=1]
- store <12 x i8> %conv, <12 x i8>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-4.ll
deleted file mode 100644
index 5f31e56..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-4.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: sarb
-; CHECK: sarb
-; CHECK: sarb
-; CHECK: sarb
-; CHECK: sarb
-; CHECK: sarb
-; CHECK: sarb
-; CHECK: sarb
-
-; v8i8 that is widen to v16i8 then split
-; FIXME: This is widen to v16i8 and split to 16 and we then rebuild the vector.
-; Unfortunately, we don't split the store so we don't get the code we want.
-
-define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind {
-entry:
- %dst_i.addr = alloca i64* ; <i64**> [#uses=2]
- %src_i.addr = alloca i64* ; <i64**> [#uses=2]
- %n.addr = alloca i32 ; <i32*> [#uses=2]
- %i = alloca i32, align 4 ; <i32*> [#uses=8]
- %dst = alloca <8 x i8>*, align 4 ; <<8 x i8>**> [#uses=2]
- %src = alloca <8 x i8>*, align 4 ; <<8 x i8>**> [#uses=2]
- store i64* %dst_i, i64** %dst_i.addr
- store i64* %src_i, i64** %src_i.addr
- store i32 %n, i32* %n.addr
- store i32 0, i32* %i
- br label %forcond
-
-forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
- %cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
- br i1 %cmp, label %forbody, label %afterfor
-
-forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load i64** %dst_i.addr ; <i64*> [#uses=1]
- %arrayidx = getelementptr i64* %tmp3, i32 %tmp2 ; <i64*> [#uses=1]
- %conv = bitcast i64* %arrayidx to <8 x i8>* ; <<8 x i8>*> [#uses=1]
- store <8 x i8>* %conv, <8 x i8>** %dst
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load i64** %src_i.addr ; <i64*> [#uses=1]
- %arrayidx6 = getelementptr i64* %tmp5, i32 %tmp4 ; <i64*> [#uses=1]
- %conv7 = bitcast i64* %arrayidx6 to <8 x i8>* ; <<8 x i8>*> [#uses=1]
- store <8 x i8>* %conv7, <8 x i8>** %src
- %tmp8 = load i32* %i ; <i32> [#uses=1]
- %tmp9 = load <8 x i8>** %dst ; <<8 x i8>*> [#uses=1]
- %arrayidx10 = getelementptr <8 x i8>* %tmp9, i32 %tmp8 ; <<8 x i8>*> [#uses=1]
- %tmp11 = load i32* %i ; <i32> [#uses=1]
- %tmp12 = load <8 x i8>** %src ; <<8 x i8>*> [#uses=1]
- %arrayidx13 = getelementptr <8 x i8>* %tmp12, i32 %tmp11 ; <<8 x i8>*> [#uses=1]
- %tmp14 = load <8 x i8>* %arrayidx13 ; <<8 x i8>> [#uses=1]
- %add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 > ; <<8 x i8>> [#uses=1]
- %shr = ashr <8 x i8> %add, < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 > ; <<8 x i8>> [#uses=1]
- store <8 x i8> %shr, <8 x i8>* %arrayidx10
- br label %forinc
-
-forinc: ; preds = %forbody
- %tmp15 = load i32* %i ; <i32> [#uses=1]
- %inc = add i32 %tmp15, 1 ; <i32> [#uses=1]
- store i32 %inc, i32* %i
- br label %forcond
-
-afterfor: ; preds = %forcond
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-5.ll
deleted file mode 100644
index d1d7fec..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-5.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: movl
-; CHECK: movd
-
-; bitcast a i64 to v2i32
-
-define void @convert(<2 x i32>* %dst.addr, i64 %src) nounwind {
-entry:
- %conv = bitcast i64 %src to <2 x i32>
- %xor = xor <2 x i32> %conv, < i32 255, i32 32767 >
- store <2 x i32> %xor, <2 x i32>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-6.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-6.ll
deleted file mode 100644
index 08759bf..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_cast-6.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse41 -disable-mmx | FileCheck %s
-; CHECK: movd
-
-; Test bit convert that requires widening in the operand.
-
-define i32 @return_v2hi() nounwind {
-entry:
- %retval12 = bitcast <2 x i16> zeroinitializer to i32 ; <i32> [#uses=1]
- ret i32 %retval12
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-1.ll
deleted file mode 100644
index a2029dd..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-1.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: pshufd
-; CHECK: paddd
-
-; truncate v2i64 to v2i32
-
-define void @convert(<2 x i32>* %dst.addr, <2 x i64> %src) nounwind {
-entry:
- %val = trunc <2 x i64> %src to <2 x i32>
- %add = add <2 x i32> %val, < i32 1, i32 1 >
- store <2 x i32> %add, <2 x i32>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-2.ll
deleted file mode 100644
index b24a9b3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-2.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: movswl
-; CHECK: movswl
-
-; sign extension v2i32 to v2i16
-
-define void @convert(<2 x i32>* %dst.addr, <2 x i16> %src) nounwind {
-entry:
- %signext = sext <2 x i16> %src to <2 x i32> ; <<12 x i8>> [#uses=1]
- store <2 x i32> %signext, <2 x i32>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-3.ll
deleted file mode 100644
index 1a40800..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-3.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: cvtsi2ss
-
-; sign to float v2i16 to v2f32
-
-define void @convert(<2 x float>* %dst.addr, <2 x i16> %src) nounwind {
-entry:
- %val = sitofp <2 x i16> %src to <2 x float>
- store <2 x float> %val, <2 x float>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-4.ll
deleted file mode 100644
index e505b62..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_conv-4.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: cvtsi2ss
-
-; unsigned to float v7i16 to v7f32
-
-define void @convert(<7 x float>* %dst.addr, <7 x i16> %src) nounwind {
-entry:
- %val = sitofp <7 x i16> %src to <7 x float>
- store <7 x float> %val, <7 x float>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_extract-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_extract-1.ll
deleted file mode 100644
index 308e6b8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_extract-1.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86-64 -mattr=+sse42 -disable-mmx | FileCheck %s
-; widen extract subvector
-
-define void @convert(<2 x double>* %dst.addr, <3 x double> %src) {
-entry:
-; CHECK: convert:
-; CHECK: unpcklpd {{%xmm[0-7]}}, {{%xmm[0-7]}}
-; CHECK-NEXT: movapd
- %val = shufflevector <3 x double> %src, <3 x double> undef, <2 x i32> < i32 0, i32 1>
- store <2 x double> %val, <2 x double>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_load-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_load-0.ll
deleted file mode 100644
index f6c4af0..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_load-0.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -o - -march=x86-64 | FileCheck %s
-; PR4891
-
-; Both loads should happen before either store.
-
-; CHECK: movl (%rdi), %eax
-; CHECK: movl (%rsi), %ecx
-; CHECK: movl %ecx, (%rdi)
-; CHECK: movl %eax, (%rsi)
-
-define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
-entry:
- %0 = load <2 x i16>* %b, align 2 ; <<2 x i16>> [#uses=1]
- %1 = load i32* %c, align 4 ; <i32> [#uses=1]
- %tmp1 = bitcast i32 %1 to <2 x i16> ; <<2 x i16>> [#uses=1]
- store <2 x i16> %tmp1, <2 x i16>* %b, align 2
- %tmp5 = bitcast <2 x i16> %0 to <1 x i32> ; <<1 x i32>> [#uses=1]
- %tmp3 = extractelement <1 x i32> %tmp5, i32 0 ; <i32> [#uses=1]
- store i32 %tmp3, i32* %c, align 4
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_load-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_load-1.ll
deleted file mode 100644
index d397645..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_load-1.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; RUN: llc %s -o - -march=x86-64 -mtriple=x86_64-unknown-linux-gnu -disable-mmx | FileCheck %s
-; PR4891
-
-; This load should be before the call, not after.
-
-; CHECK: movaps compl+128(%rip), %xmm0
-; CHECK: movaps %xmm0, (%rsp)
-; CHECK: callq killcommon
-
- at compl = linkonce global [20 x i64] zeroinitializer, align 64 ; <[20 x i64]*> [#uses=1]
-
-declare void @killcommon(i32* noalias)
-
-define void @reset(<2 x float>* noalias %garbage1) {
-"file complex.c, line 27, bb1":
- %changed = alloca i32, align 4 ; <i32*> [#uses=3]
- br label %"file complex.c, line 27, bb13"
-
-"file complex.c, line 27, bb13": ; preds = %"file complex.c, line 27, bb1"
- store i32 0, i32* %changed, align 4
- %r2 = getelementptr float* bitcast ([20 x i64]* @compl to float*), i64 32 ; <float*> [#uses=1]
- %r3 = bitcast float* %r2 to <2 x float>* ; <<2 x float>*> [#uses=1]
- %r4 = load <2 x float>* %r3, align 4 ; <<2 x float>> [#uses=1]
- call void @killcommon(i32* %changed)
- br label %"file complex.c, line 34, bb4"
-
-"file complex.c, line 34, bb4": ; preds = %"file complex.c, line 27, bb13"
- %r5 = load i32* %changed, align 4 ; <i32> [#uses=1]
- %r6 = icmp eq i32 %r5, 0 ; <i1> [#uses=1]
- %r7 = zext i1 %r6 to i32 ; <i32> [#uses=1]
- %r8 = icmp ne i32 %r7, 0 ; <i1> [#uses=1]
- br i1 %r8, label %"file complex.c, line 34, bb7", label %"file complex.c, line 27, bb5"
-
-"file complex.c, line 27, bb5": ; preds = %"file complex.c, line 34, bb4"
- br label %"file complex.c, line 35, bb6"
-
-"file complex.c, line 35, bb6": ; preds = %"file complex.c, line 27, bb5"
- %r11 = ptrtoint <2 x float>* %garbage1 to i64 ; <i64> [#uses=1]
- %r12 = inttoptr i64 %r11 to <2 x float>* ; <<2 x float>*> [#uses=1]
- store <2 x float> %r4, <2 x float>* %r12, align 4
- br label %"file complex.c, line 34, bb7"
-
-"file complex.c, line 34, bb7": ; preds = %"file complex.c, line 35, bb6", %"file complex.c, line 34, bb4"
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_load-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_load-2.ll
deleted file mode 100644
index 11383fa..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_load-2.ll
+++ /dev/null
@@ -1,155 +0,0 @@
-; RUN: llc < %s -o - -march=x86-64 -mattr=+sse42 -disable-mmx | FileCheck %s
-
-; Test based on pr5626 to load/store
-;
-
-%i32vec3 = type <3 x i32>
-define void @add3i32(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
-; CHECK: movaps
-; CHECK: paddd
-; CHECK: pextrd
-; CHECK: movq
- %a = load %i32vec3* %ap, align 16
- %b = load %i32vec3* %bp, align 16
- %x = add %i32vec3 %a, %b
- store %i32vec3 %x, %i32vec3* %ret, align 16
- ret void
-}
-
-define void @add3i32_2(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
-; CHECK: movq
-; CHECK: pinsrd
-; CHECK: movq
-; CHECK: pinsrd
-; CHECK: paddd
-; CHECK: pextrd
-; CHECK: movq
- %a = load %i32vec3* %ap
- %b = load %i32vec3* %bp
- %x = add %i32vec3 %a, %b
- store %i32vec3 %x, %i32vec3* %ret
- ret void
-}
-
-%i32vec7 = type <7 x i32>
-define void @add7i32(%i32vec7* sret %ret, %i32vec7* %ap, %i32vec7* %bp) {
-; CHECK: movaps
-; CHECK: movaps
-; CHECK: paddd
-; CHECK: paddd
-; CHECK: pextrd
-; CHECK: movq
-; CHECK: movaps
- %a = load %i32vec7* %ap, align 16
- %b = load %i32vec7* %bp, align 16
- %x = add %i32vec7 %a, %b
- store %i32vec7 %x, %i32vec7* %ret, align 16
- ret void
-}
-
-%i32vec12 = type <12 x i32>
-define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) {
-; CHECK: movaps
-; CHECK: movaps
-; CHECK: movaps
-; CHECK: paddd
-; CHECK: paddd
-; CHECK: paddd
-; CHECK: movaps
-; CHECK: movaps
-; CHECK: movaps
- %a = load %i32vec12* %ap, align 16
- %b = load %i32vec12* %bp, align 16
- %x = add %i32vec12 %a, %b
- store %i32vec12 %x, %i32vec12* %ret, align 16
- ret void
-}
-
-
-%i16vec3 = type <3 x i16>
-define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp) nounwind {
-; CHECK: movaps
-; CHECK: paddw
-; CHECK: movd
-; CHECK: pextrw
- %a = load %i16vec3* %ap, align 16
- %b = load %i16vec3* %bp, align 16
- %x = add %i16vec3 %a, %b
- store %i16vec3 %x, %i16vec3* %ret, align 16
- ret void
-}
-
-%i16vec4 = type <4 x i16>
-define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp) nounwind {
-; CHECK: movaps
-; CHECK: paddw
-; CHECK: movq
- %a = load %i16vec4* %ap, align 16
- %b = load %i16vec4* %bp, align 16
- %x = add %i16vec4 %a, %b
- store %i16vec4 %x, %i16vec4* %ret, align 16
- ret void
-}
-
-%i16vec12 = type <12 x i16>
-define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12* %bp) nounwind {
-; CHECK: movaps
-; CHECK: movaps
-; CHECK: paddw
-; CHECK: paddw
-; CHECK: movq
-; CHECK: movaps
- %a = load %i16vec12* %ap, align 16
- %b = load %i16vec12* %bp, align 16
- %x = add %i16vec12 %a, %b
- store %i16vec12 %x, %i16vec12* %ret, align 16
- ret void
-}
-
-%i16vec18 = type <18 x i16>
-define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18* %bp) nounwind {
-; CHECK: movaps
-; CHECK: movaps
-; CHECK: movaps
-; CHECK: paddw
-; CHECK: paddw
-; CHECK: paddw
-; CHECK: movd
-; CHECK: movaps
-; CHECK: movaps
- %a = load %i16vec18* %ap, align 16
- %b = load %i16vec18* %bp, align 16
- %x = add %i16vec18 %a, %b
- store %i16vec18 %x, %i16vec18* %ret, align 16
- ret void
-}
-
-
-%i8vec3 = type <3 x i8>
-define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) nounwind {
-; CHECK: movaps
-; CHECK: paddb
-; CHECK: pextrb
-; CHECK: movb
- %a = load %i8vec3* %ap, align 16
- %b = load %i8vec3* %bp, align 16
- %x = add %i8vec3 %a, %b
- store %i8vec3 %x, %i8vec3* %ret, align 16
- ret void
-}
-
-%i8vec31 = type <31 x i8>
-define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp) nounwind {
-; CHECK: movaps
-; CHECK: movaps
-; CHECK: paddb
-; CHECK: paddb
-; CHECK: movq
-; CHECK: pextrb
-; CHECK: pextrw
- %a = load %i8vec31* %ap, align 16
- %b = load %i8vec31* %bp, align 16
- %x = add %i8vec31 %a, %b
- store %i8vec31 %x, %i8vec31* %ret, align 16
- ret void
-}
\ No newline at end of file
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_select-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_select-1.ll
deleted file mode 100644
index d9de892..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_select-1.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: jne
-
-; widening select v6i32 and then a sub
-
-define void @select(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2) nounwind {
-entry:
- %x = select i1 %c, <6 x i32> %src1, <6 x i32> %src2
- %val = sub <6 x i32> %x, < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
- store <6 x i32> %val, <6 x i32>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_shuffle-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_shuffle-1.ll
deleted file mode 100644
index 47dba4b..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_shuffle-1.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: insertps
-; CHECK: extractps
-
-; widening shuffle v3float and then a add
-
-define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
-entry:
- %x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 1, i32 2>
- %val = fadd <3 x float> %x, %src2
- store <3 x float> %val, <3 x float>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/widen_shuffle-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/widen_shuffle-2.ll
deleted file mode 100644
index 9374a02..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/widen_shuffle-2.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse42 -disable-mmx | FileCheck %s
-; CHECK: insertps
-; CHECK: extractps
-
-; widening shuffle v3float and then a add
-
-define void @shuf(<3 x float>* %dst.addr, <3 x float> %src1,<3 x float> %src2) nounwind {
-entry:
- %x = shufflevector <3 x float> %src1, <3 x float> %src2, <3 x i32> < i32 0, i32 4, i32 2>
- %val = fadd <3 x float> %x, %src2
- store <3 x float> %val, <3 x float>* %dst.addr
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-and-mask.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-and-mask.ll
deleted file mode 100644
index 2465f23..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-and-mask.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
-
-; This should be a single mov, not a load of immediate + andq.
-; CHECK: test:
-; CHECK: movl %edi, %eax
-
-define i64 @test(i64 %x) nounwind {
-entry:
- %tmp123 = and i64 %x, 4294967295 ; <i64> [#uses=1]
- ret i64 %tmp123
-}
-
-; This copy can't be coalesced away because it needs the implicit zero-extend.
-; CHECK: bbb:
-; CHECK: movl %edi, %edi
-
-define void @bbb(i64 %x) nounwind {
- %t = and i64 %x, 4294967295
- call void @foo(i64 %t)
- ret void
-}
-
-; This should use a 32-bit and with implicit zero-extension, not a 64-bit and
-; with a separate mov to materialize the mask.
-; rdar://7527390
-; CHECK: ccc:
-; CHECK: andl $-1048593, %edi
-
-declare void @foo(i64 %x) nounwind
-
-define void @ccc(i64 %x) nounwind {
- %t = and i64 %x, 4293918703
- call void @foo(i64 %t)
- ret void
-}
-
-; This requires a mov and a 64-bit and.
-; CHECK: ddd:
-; CHECK: movabsq $4294967296, %rax
-; CHECK: andq %rax, %rdi
-
-define void @ddd(i64 %x) nounwind {
- %t = and i64 %x, 4294967296
- call void @foo(i64 %t)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-arg.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-arg.ll
deleted file mode 100644
index ec8dd8e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-arg.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llc < %s | grep {movl %edi, %eax}
-; The input value is already sign extended, don't re-extend it.
-; This testcase corresponds to:
-; int test(short X) { return (int)X; }
-
-target datalayout = "e-p:64:64"
-target triple = "x86_64-apple-darwin8"
-
-
-define i32 @test(i16 signext %X) {
-entry:
- %tmp12 = sext i16 %X to i32 ; <i32> [#uses=1]
- ret i32 %tmp12
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-asm.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-asm.ll
deleted file mode 100644
index 2640e59..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-asm.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s
-; PR1029
-
-target datalayout = "e-p:64:64"
-target triple = "x86_64-unknown-linux-gnu"
-
-define void @frame_dummy() {
-entry:
- %tmp1 = tail call void (i8*)* (void (i8*)*)* asm "", "=r,0,~{dirflag},~{fpsr},~{flags}"( void (i8*)* null ) ; <void (i8*)*> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-dead-stack-adjust.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-dead-stack-adjust.ll
deleted file mode 100644
index 79316f2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-dead-stack-adjust.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s | not grep rsp
-; RUN: llc < %s | grep cvttsd2siq
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
-
-define double @a(double %b) nounwind {
-entry:
- %tmp12 = fptoui double %b to i32 ; <i32> [#uses=1]
- %tmp123 = uitofp i32 %tmp12 to double ; <double> [#uses=1]
- ret double %tmp123
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-disp.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-disp.ll
deleted file mode 100644
index d8059eb..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-disp.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep mov | count 2
-
-; Fold an offset into an address even if it's not a 32-bit
-; signed integer.
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-unknown-linux-gnu"
- at call_used_regs = external global [53 x i8], align 32
-
-define fastcc void @foo() nounwind {
- %t = getelementptr [53 x i8]* @call_used_regs, i64 0, i64 4294967295
- store i8 1, i8* %t, align 1
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-frameaddr.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-frameaddr.ll
deleted file mode 100644
index 57163d3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-frameaddr.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep movq | grep rbp
-
-define i64* @stack_end_address() nounwind {
-entry:
- tail call i8* @llvm.frameaddress( i32 0 )
- bitcast i8* %0 to i64*
- ret i64* %1
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-gv-offset.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-gv-offset.ll
deleted file mode 100644
index 365e4af..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-gv-offset.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin | not grep lea
-
- %struct.x = type { float, double }
- at X = global %struct.x { float 1.000000e+00, double 2.000000e+00 }, align 16 ; <%struct.x*> [#uses=2]
-
-define i32 @main() nounwind {
-entry:
- %tmp2 = load float* getelementptr (%struct.x* @X, i32 0, i32 0), align 16 ; <float> [#uses=1]
- %tmp4 = load double* getelementptr (%struct.x* @X, i32 0, i32 1), align 8 ; <double> [#uses=1]
- tail call void @t( float %tmp2, double %tmp4 ) nounwind
- ret i32 0
-}
-
-declare void @t(float, double)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-jumps.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-jumps.ll
deleted file mode 100644
index 11b40c8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-jumps.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; RUN: llc < %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin10.0"
-
-define i8 @test1() nounwind ssp {
-entry:
- %0 = select i1 undef, i8* blockaddress(@test1, %bb), i8* blockaddress(@test1, %bb6) ; <i8*> [#uses=1]
- indirectbr i8* %0, [label %bb, label %bb6]
-
-bb: ; preds = %entry
- ret i8 1
-
-bb6: ; preds = %entry
- ret i8 2
-}
-
-
-; PR5930 - Trunc of block address differences.
- at test.array = internal constant [3 x i32] [i32 trunc (i64 sub (i64 ptrtoint (i8* blockaddress(@test2, %foo) to i64), i64 ptrtoint (i8* blockaddress(@test2, %foo) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (i8* blockaddress(@test2, %bar) to i64), i64 ptrtoint (i8* blockaddress(@test2, %foo) to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (i8* blockaddress(@test2, %hack) to i64), i64 ptrtoint (i8* blockaddress(@test2, %foo) to i64)) to i32)] ; <[3 x i32]*> [#uses=1]
-
-define void @test2(i32 %i) nounwind ssp {
-entry:
- %i.addr = alloca i32 ; <i32*> [#uses=2]
- store i32 %i, i32* %i.addr
- %tmp = load i32* %i.addr ; <i32> [#uses=1]
- %idxprom = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %arrayidx = getelementptr inbounds i32* getelementptr inbounds ([3 x i32]* @test.array, i32 0, i32 0), i64 %idxprom ; <i32*> [#uses=1]
- %tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
- %idx.ext = sext i32 %tmp1 to i64 ; <i64> [#uses=1]
- %add.ptr = getelementptr i8* blockaddress(@test2, %foo), i64 %idx.ext ; <i8*> [#uses=1]
- br label %indirectgoto
-
-foo: ; preds = %indirectgoto, %indirectgoto, %indirectgoto, %indirectgoto, %indirectgoto
- br label %bar
-
-bar: ; preds = %foo, %indirectgoto
- br label %hack
-
-hack: ; preds = %bar, %indirectgoto
- ret void
-
-indirectgoto: ; preds = %entry
- %indirect.goto.dest = phi i8* [ %add.ptr, %entry ] ; <i8*> [#uses=1]
- indirectbr i8* %indirect.goto.dest, [label %foo, label %foo, label %bar, label %foo, label %hack, label %foo, label %foo]
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-malloc.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-malloc.ll
deleted file mode 100644
index b4f1fa6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-malloc.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep {shll.*3, %edi}
-; PR3829
-; The generated code should multiply by 3 (sizeof i8*) as an i32,
-; not as an i64!
-
-define i8** @test(i32 %sz) {
- %sub = add i32 %sz, 536870911 ; <i32> [#uses=1]
- %call = malloc i8*, i32 %sub ; <i8**> [#uses=1]
- ret i8** %call
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-mem.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-mem.ll
deleted file mode 100644
index d15f516..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-mem.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -o %t1
-; RUN: grep GOTPCREL %t1 | count 4
-; RUN: grep %%rip %t1 | count 6
-; RUN: grep movq %t1 | count 6
-; RUN: grep leaq %t1 | count 1
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=static -o %t2
-; RUN: grep movl %t2 | count 2
-; RUN: grep movq %t2 | count 2
-
- at ptr = external global i32* ; <i32**> [#uses=1]
- at src = external global [0 x i32] ; <[0 x i32]*> [#uses=1]
- at dst = external global [0 x i32] ; <[0 x i32]*> [#uses=1]
- at lptr = internal global i32* null ; <i32**> [#uses=1]
- at ldst = internal global [500 x i32] zeroinitializer, align 32 ; <[500 x i32]*> [#uses=1]
- at lsrc = internal global [500 x i32] zeroinitializer, align 32 ; <[500 x i32]*> [#uses=0]
- at bsrc = internal global [500000 x i32] zeroinitializer, align 32 ; <[500000 x i32]*> [#uses=0]
- at bdst = internal global [500000 x i32] zeroinitializer, align 32 ; <[500000 x i32]*> [#uses=0]
-
-define void @test1() nounwind {
- %tmp = load i32* getelementptr ([0 x i32]* @src, i32 0, i32 0) ; <i32> [#uses=1]
- store i32 %tmp, i32* getelementptr ([0 x i32]* @dst, i32 0, i32 0)
- ret void
-}
-
-define void @test2() nounwind {
- store i32* getelementptr ([0 x i32]* @dst, i32 0, i32 0), i32** @ptr
- ret void
-}
-
-define void @test3() nounwind {
- store i32* getelementptr ([500 x i32]* @ldst, i32 0, i32 0), i32** @lptr
- br label %return
-
-return: ; preds = %0
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-1.ll
deleted file mode 100644
index 46f6d33..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-1.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {callq f at PLT} %t1
-
-define void @g() {
-entry:
- call void @f( )
- ret void
-}
-
-declare void @f()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-10.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-10.ll
deleted file mode 100644
index b6f82e2..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-10.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {callq g at PLT} %t1
-
- at g = alias weak i32 ()* @f
-
-define void @h() {
-entry:
- %tmp31 = call i32 @g()
- ret void
-}
-
-declare extern_weak i32 @f()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-11.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-11.ll
deleted file mode 100644
index 4db331c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-11.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {callq __fixunsxfti at PLT} %t1
-
-define i128 @f(x86_fp80 %a) nounwind {
-entry:
- %tmp78 = fptoui x86_fp80 %a to i128
- ret i128 %tmp78
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-2.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-2.ll
deleted file mode 100644
index 1ce2de7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-2.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {callq f} %t1
-; RUN: not grep {callq f at PLT} %t1
-
-define void @g() {
-entry:
- call void @f( )
- ret void
-}
-
-declare hidden void @f()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-3.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-3.ll
deleted file mode 100644
index aa3c888..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-3.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {callq f} %t1
-; RUN: not grep {callq f at PLT} %t1
-
-define void @g() {
-entry:
- call void @f( )
- ret void
-}
-
-define internal void @f() {
-entry:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-4.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-4.ll
deleted file mode 100644
index 90fc119..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-4.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {movq a at GOTPCREL(%rip),} %t1
-
- at a = global i32 0
-
-define i32 @get_a() {
-entry:
- %tmp1 = load i32* @a, align 4
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-5.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-5.ll
deleted file mode 100644
index 6369bde..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-5.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {movl a(%rip),} %t1
-; RUN: not grep GOTPCREL %t1
-
- at a = hidden global i32 0
-
-define i32 @get_a() {
-entry:
- %tmp1 = load i32* @a, align 4
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-6.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-6.ll
deleted file mode 100644
index 6e19ad3..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-6.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {movl a(%rip),} %t1
-; RUN: not grep GOTPCREL %t1
-
- at a = internal global i32 0
-
-define i32 @get_a() nounwind {
-entry:
- %tmp1 = load i32* @a, align 4
- ret i32 %tmp1
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-7.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-7.ll
deleted file mode 100644
index 4d98ee6..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-7.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {movq f at GOTPCREL(%rip),} %t1
-
-define void ()* @g() nounwind {
-entry:
- ret void ()* @f
-}
-
-declare void @f()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-8.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-8.ll
deleted file mode 100644
index d3b567c..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-8.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {leaq f(%rip),} %t1
-; RUN: not grep GOTPCREL %t1
-
-define void ()* @g() {
-entry:
- ret void ()* @f
-}
-
-declare hidden void @f()
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-9.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-9.ll
deleted file mode 100644
index 0761031..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-pic-9.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-pc-linux -relocation-model=pic -o %t1
-; RUN: grep {leaq f(%rip),} %t1
-; RUN: not grep GOTPCREL %t1
-
-define void ()* @g() nounwind {
-entry:
- ret void ()* @f
-}
-
-define internal void @f() nounwind {
-entry:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-ret0.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-ret0.ll
deleted file mode 100644
index c74f6d8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-ret0.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: llc < %s -march=x86-64 | grep mov | count 1
-
-define i32 @f() nounwind {
- tail call void @t( i32 1 ) nounwind
- ret i32 0
-}
-
-declare void @t(i32)
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-shortint.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-shortint.ll
deleted file mode 100644
index 7f96543..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-shortint.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s | grep movswl
-
-target datalayout = "e-p:64:64"
-target triple = "x86_64-apple-darwin8"
-
-
-define void @bar(i16 zeroext %A) {
- tail call void @foo( i16 %A signext )
- ret void
-}
-declare void @foo(i16 signext )
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-sret-return.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-sret-return.ll
deleted file mode 100644
index 7b5f189..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-sret-return.ll
+++ /dev/null
@@ -1,63 +0,0 @@
-; RUN: llc < %s | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
-target triple = "x86_64-apple-darwin8"
- %struct.foo = type { [4 x i64] }
-
-; CHECK: bar:
-; CHECK: movq %rdi, %rax
-define void @bar(%struct.foo* noalias sret %agg.result, %struct.foo* %d) nounwind {
-entry:
- %d_addr = alloca %struct.foo* ; <%struct.foo**> [#uses=2]
- %memtmp = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store %struct.foo* %d, %struct.foo** %d_addr
- %tmp = load %struct.foo** %d_addr, align 8 ; <%struct.foo*> [#uses=1]
- %tmp1 = getelementptr %struct.foo* %agg.result, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
- %tmp2 = getelementptr %struct.foo* %tmp, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
- %tmp3 = getelementptr [4 x i64]* %tmp1, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp4 = getelementptr [4 x i64]* %tmp2, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp5 = load i64* %tmp4, align 8 ; <i64> [#uses=1]
- store i64 %tmp5, i64* %tmp3, align 8
- %tmp6 = getelementptr [4 x i64]* %tmp1, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp7 = getelementptr [4 x i64]* %tmp2, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp8 = load i64* %tmp7, align 8 ; <i64> [#uses=1]
- store i64 %tmp8, i64* %tmp6, align 8
- %tmp9 = getelementptr [4 x i64]* %tmp1, i32 0, i32 2 ; <i64*> [#uses=1]
- %tmp10 = getelementptr [4 x i64]* %tmp2, i32 0, i32 2 ; <i64*> [#uses=1]
- %tmp11 = load i64* %tmp10, align 8 ; <i64> [#uses=1]
- store i64 %tmp11, i64* %tmp9, align 8
- %tmp12 = getelementptr [4 x i64]* %tmp1, i32 0, i32 3 ; <i64*> [#uses=1]
- %tmp13 = getelementptr [4 x i64]* %tmp2, i32 0, i32 3 ; <i64*> [#uses=1]
- %tmp14 = load i64* %tmp13, align 8 ; <i64> [#uses=1]
- store i64 %tmp14, i64* %tmp12, align 8
- %tmp15 = getelementptr %struct.foo* %memtmp, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
- %tmp16 = getelementptr %struct.foo* %agg.result, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
- %tmp17 = getelementptr [4 x i64]* %tmp15, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp18 = getelementptr [4 x i64]* %tmp16, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp19 = load i64* %tmp18, align 8 ; <i64> [#uses=1]
- store i64 %tmp19, i64* %tmp17, align 8
- %tmp20 = getelementptr [4 x i64]* %tmp15, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp21 = getelementptr [4 x i64]* %tmp16, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp22 = load i64* %tmp21, align 8 ; <i64> [#uses=1]
- store i64 %tmp22, i64* %tmp20, align 8
- %tmp23 = getelementptr [4 x i64]* %tmp15, i32 0, i32 2 ; <i64*> [#uses=1]
- %tmp24 = getelementptr [4 x i64]* %tmp16, i32 0, i32 2 ; <i64*> [#uses=1]
- %tmp25 = load i64* %tmp24, align 8 ; <i64> [#uses=1]
- store i64 %tmp25, i64* %tmp23, align 8
- %tmp26 = getelementptr [4 x i64]* %tmp15, i32 0, i32 3 ; <i64*> [#uses=1]
- %tmp27 = getelementptr [4 x i64]* %tmp16, i32 0, i32 3 ; <i64*> [#uses=1]
- %tmp28 = load i64* %tmp27, align 8 ; <i64> [#uses=1]
- store i64 %tmp28, i64* %tmp26, align 8
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-; CHECK: foo:
-; CHECK: movq %rdi, %rax
-define void @foo({ i64 }* noalias nocapture sret %agg.result) nounwind {
- store { i64 } { i64 0 }, { i64 }* %agg.result
- ret void
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-varargs.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-64-varargs.ll
deleted file mode 100644
index 428f449..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-64-varargs.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -code-model=large -relocation-model=static | grep call | not grep rax
-
- at .str = internal constant [26 x i8] c"%d, %f, %d, %lld, %d, %f\0A\00" ; <[26 x i8]*> [#uses=1]
-
-declare i32 @printf(i8*, ...) nounwind
-
-define i32 @main() nounwind {
-entry:
- %tmp10.i = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([26 x i8]* @.str, i32 0, i64 0), i32 12, double 0x3FF3EB8520000000, i32 120, i64 123456677890, i32 -10, double 4.500000e+15 ) nounwind ; <i32> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-frameaddr.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-frameaddr.ll
deleted file mode 100644
index d595874..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-frameaddr.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | grep ebp
-
-define i8* @t() nounwind {
-entry:
- %0 = tail call i8* @llvm.frameaddress(i32 0)
- ret i8* %0
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-frameaddr2.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-frameaddr2.ll
deleted file mode 100644
index c509115..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-frameaddr2.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -march=x86 | grep mov | count 3
-
-define i8* @t() nounwind {
-entry:
- %0 = tail call i8* @llvm.frameaddress(i32 2)
- ret i8* %0
-}
-
-declare i8* @llvm.frameaddress(i32) nounwind readnone
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/x86-store-gv-addr.ll b/libclamav/c++/llvm/test/CodeGen/X86/x86-store-gv-addr.ll
deleted file mode 100644
index 089517a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/x86-store-gv-addr.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=static | not grep lea
-; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -relocation-model=static | not grep lea
-
- at v = external global i32, align 8
- at v_addr = external global i32*, align 8
-
-define void @t() nounwind optsize {
- store i32* @v, i32** @v_addr, align 8
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/xmm-r64.ll b/libclamav/c++/llvm/test/CodeGen/X86/xmm-r64.ll
deleted file mode 100644
index 2a6b5c7..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/xmm-r64.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llc < %s -march=x86-64
-
-define <4 x i32> @test() {
- %tmp1039 = call <4 x i32> @llvm.x86.sse2.psll.d( <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <<4 x i32>> [#uses=1]
- %tmp1040 = bitcast <4 x i32> %tmp1039 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp1048 = add <2 x i64> %tmp1040, zeroinitializer ; <<2 x i64>> [#uses=1]
- %tmp1048.upgrd.1 = bitcast <2 x i64> %tmp1048 to <4 x i32> ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %tmp1048.upgrd.1
-}
-
-declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>)
-
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/xor-icmp.ll b/libclamav/c++/llvm/test/CodeGen/X86/xor-icmp.ll
deleted file mode 100644
index 2d75c5d..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/xor-icmp.ll
+++ /dev/null
@@ -1,67 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=X64
-; rdar://7367229
-
-define i32 @t(i32 %a, i32 %b) nounwind ssp {
-entry:
-; X32: t:
-; X32: xorb
-; X32-NOT: andb
-; X32-NOT: shrb
-; X32: testb $64
-; X32: jne
-
-; X64: t:
-; X64-NOT: setne
-; X64: xorl
-; X64: testb $64
-; X64: jne
- %0 = and i32 %a, 16384
- %1 = icmp ne i32 %0, 0
- %2 = and i32 %b, 16384
- %3 = icmp ne i32 %2, 0
- %4 = xor i1 %1, %3
- br i1 %4, label %bb1, label %bb
-
-bb: ; preds = %entry
- %5 = tail call i32 (...)* @foo() nounwind ; <i32> [#uses=1]
- ret i32 %5
-
-bb1: ; preds = %entry
- %6 = tail call i32 (...)* @bar() nounwind ; <i32> [#uses=1]
- ret i32 %6
-}
-
-declare i32 @foo(...)
-
-declare i32 @bar(...)
-
-define i32 @t2(i32 %x, i32 %y) nounwind ssp {
-; X32: t2:
-; X32: cmpl
-; X32: sete
-; X32: cmpl
-; X32: sete
-; X32-NOT: xor
-; X32: je
-
-; X64: t2:
-; X64: testl
-; X64: sete
-; X64: testl
-; X64: sete
-; X64-NOT: xor
-; X64: je
-entry:
- %0 = icmp eq i32 %x, 0 ; <i1> [#uses=1]
- %1 = icmp eq i32 %y, 0 ; <i1> [#uses=1]
- %2 = xor i1 %1, %0 ; <i1> [#uses=1]
- br i1 %2, label %bb, label %return
-
-bb: ; preds = %entry
- %3 = tail call i32 (...)* @foo() nounwind ; <i32> [#uses=0]
- ret i32 undef
-
-return: ; preds = %entry
- ret i32 undef
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/xor.ll b/libclamav/c++/llvm/test/CodeGen/X86/xor.ll
deleted file mode 100644
index 9bfff8a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/xor.ll
+++ /dev/null
@@ -1,144 +0,0 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -march=x86-64 | FileCheck %s -check-prefix=X64
-
-; Though it is undefined, we want xor undef,undef to produce zero.
-define <4 x i32> @test1() nounwind {
- %tmp = xor <4 x i32> undef, undef
- ret <4 x i32> %tmp
-
-; X32: test1:
-; X32: xorps %xmm0, %xmm0
-; X32: ret
-}
-
-; Though it is undefined, we want xor undef,undef to produce zero.
-define i32 @test2() nounwind{
- %tmp = xor i32 undef, undef
- ret i32 %tmp
-; X32: test2:
-; X32: xorl %eax, %eax
-; X32: ret
-}
-
-define i32 @test3(i32 %a, i32 %b) nounwind {
-entry:
- %tmp1not = xor i32 %b, -2
- %tmp3 = and i32 %tmp1not, %a
- %tmp4 = lshr i32 %tmp3, 1
- ret i32 %tmp4
-
-; X64: test3:
-; X64: notl %esi
-; X64: andl %edi, %esi
-; X64: movl %esi, %eax
-; X64: shrl %eax
-; X64: ret
-
-; X32: test3:
-; X32: movl 8(%esp), %eax
-; X32: notl %eax
-; X32: andl 4(%esp), %eax
-; X32: shrl %eax
-; X32: ret
-}
-
-define i32 @test4(i32 %a, i32 %b) nounwind {
-entry:
- br label %bb
-bb:
- %b_addr.0 = phi i32 [ %b, %entry ], [ %tmp8, %bb ]
- %a_addr.0 = phi i32 [ %a, %entry ], [ %tmp3, %bb ]
- %tmp3 = xor i32 %a_addr.0, %b_addr.0
- %tmp4not = xor i32 %tmp3, 2147483647
- %tmp6 = and i32 %tmp4not, %b_addr.0
- %tmp8 = shl i32 %tmp6, 1
- %tmp10 = icmp eq i32 %tmp8, 0
- br i1 %tmp10, label %bb12, label %bb
-bb12:
- ret i32 %tmp3
-
-; X64: test4:
-; X64: notl [[REG:%[a-z]+]]
-; X64: andl {{.*}}[[REG]]
-; X32: test4:
-; X32: notl [[REG:%[a-z]+]]
-; X32: andl {{.*}}[[REG]]
-}
-
-define i16 @test5(i16 %a, i16 %b) nounwind {
-entry:
- br label %bb
-bb:
- %b_addr.0 = phi i16 [ %b, %entry ], [ %tmp8, %bb ]
- %a_addr.0 = phi i16 [ %a, %entry ], [ %tmp3, %bb ]
- %tmp3 = xor i16 %a_addr.0, %b_addr.0
- %tmp4not = xor i16 %tmp3, 32767
- %tmp6 = and i16 %tmp4not, %b_addr.0
- %tmp8 = shl i16 %tmp6, 1
- %tmp10 = icmp eq i16 %tmp8, 0
- br i1 %tmp10, label %bb12, label %bb
-bb12:
- ret i16 %tmp3
-; X64: test5:
-; X64: notw [[REG:%[a-z]+]]
-; X64: andw {{.*}}[[REG]]
-; X32: test5:
-; X32: notw [[REG:%[a-z]+]]
-; X32: andw {{.*}}[[REG]]
-}
-
-define i8 @test6(i8 %a, i8 %b) nounwind {
-entry:
- br label %bb
-bb:
- %b_addr.0 = phi i8 [ %b, %entry ], [ %tmp8, %bb ]
- %a_addr.0 = phi i8 [ %a, %entry ], [ %tmp3, %bb ]
- %tmp3 = xor i8 %a_addr.0, %b_addr.0
- %tmp4not = xor i8 %tmp3, 127
- %tmp6 = and i8 %tmp4not, %b_addr.0
- %tmp8 = shl i8 %tmp6, 1
- %tmp10 = icmp eq i8 %tmp8, 0
- br i1 %tmp10, label %bb12, label %bb
-bb12:
- ret i8 %tmp3
-; X64: test6:
-; X64: notb [[REG:%[a-z]+]]
-; X64: andb {{.*}}[[REG]]
-; X32: test6:
-; X32: notb [[REG:%[a-z]+]]
-; X32: andb {{.*}}[[REG]]
-}
-
-define i32 @test7(i32 %a, i32 %b) nounwind {
-entry:
- br label %bb
-bb:
- %b_addr.0 = phi i32 [ %b, %entry ], [ %tmp8, %bb ]
- %a_addr.0 = phi i32 [ %a, %entry ], [ %tmp3, %bb ]
- %tmp3 = xor i32 %a_addr.0, %b_addr.0
- %tmp4not = xor i32 %tmp3, 2147483646
- %tmp6 = and i32 %tmp4not, %b_addr.0
- %tmp8 = shl i32 %tmp6, 1
- %tmp10 = icmp eq i32 %tmp8, 0
- br i1 %tmp10, label %bb12, label %bb
-bb12:
- ret i32 %tmp3
-; X64: test7:
-; X64: xorl $2147483646, [[REG:%[a-z]+]]
-; X64: andl {{.*}}[[REG]]
-; X32: test7:
-; X32: xorl $2147483646, [[REG:%[a-z]+]]
-; X32: andl {{.*}}[[REG]]
-}
-
-define i32 @test8(i32 %a) nounwind {
-; rdar://7553032
-entry:
- %t1 = sub i32 0, %a
- %t2 = add i32 %t1, -1
- ret i32 %t2
-; X64: test8:
-; X64: notl %eax
-; X32: test8:
-; X32: notl %eax
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/zero-remat.ll b/libclamav/c++/llvm/test/CodeGen/X86/zero-remat.ll
deleted file mode 100644
index 3e3bb95..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/zero-remat.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s --check-prefix=CHECK-64
-; RUN: llc < %s -march=x86-64 -stats -info-output-file - | grep asm-printer | grep 12
-; RUN: llc < %s -march=x86 | FileCheck %s --check-prefix=CHECK-32
-
-declare void @bar(double %x)
-declare void @barf(float %x)
-
-define double @foo() nounwind {
-
- call void @bar(double 0.0)
- ret double 0.0
-
-;CHECK-32: foo:
-;CHECK-32: call
-;CHECK-32: fldz
-;CHECK-32: ret
-
-;CHECK-64: foo:
-;CHECK-64: pxor
-;CHECK-64: call
-;CHECK-64: pxor
-;CHECK-64: ret
-}
-
-
-define float @foof() nounwind {
- call void @barf(float 0.0)
- ret float 0.0
-
-;CHECK-32: foof:
-;CHECK-32: call
-;CHECK-32: fldz
-;CHECK-32: ret
-
-;CHECK-64: foof:
-;CHECK-64: pxor
-;CHECK-64: call
-;CHECK-64: pxor
-;CHECK-64: ret
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/zext-inreg-0.ll b/libclamav/c++/llvm/test/CodeGen/X86/zext-inreg-0.ll
deleted file mode 100644
index ae6221a..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/zext-inreg-0.ll
+++ /dev/null
@@ -1,66 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep and
-; RUN: llc < %s -march=x86-64 > %t
-; RUN: not grep and %t
-; RUN: not grep movzbq %t
-; RUN: not grep movzwq %t
-; RUN: not grep movzlq %t
-
-; These should use movzbl instead of 'and 255'.
-; This related to not having a ZERO_EXTEND_REG opcode.
-
-define i32 @a(i32 %d) nounwind {
- %e = add i32 %d, 1
- %retval = and i32 %e, 255
- ret i32 %retval
-}
-define i32 @b(float %d) nounwind {
- %tmp12 = fptoui float %d to i8
- %retval = zext i8 %tmp12 to i32
- ret i32 %retval
-}
-define i32 @c(i32 %d) nounwind {
- %e = add i32 %d, 1
- %retval = and i32 %e, 65535
- ret i32 %retval
-}
-define i64 @d(i64 %d) nounwind {
- %e = add i64 %d, 1
- %retval = and i64 %e, 255
- ret i64 %retval
-}
-define i64 @e(i64 %d) nounwind {
- %e = add i64 %d, 1
- %retval = and i64 %e, 65535
- ret i64 %retval
-}
-define i64 @f(i64 %d) nounwind {
- %e = add i64 %d, 1
- %retval = and i64 %e, 4294967295
- ret i64 %retval
-}
-
-define i32 @g(i8 %d) nounwind {
- %e = add i8 %d, 1
- %retval = zext i8 %e to i32
- ret i32 %retval
-}
-define i32 @h(i16 %d) nounwind {
- %e = add i16 %d, 1
- %retval = zext i16 %e to i32
- ret i32 %retval
-}
-define i64 @i(i8 %d) nounwind {
- %e = add i8 %d, 1
- %retval = zext i8 %e to i64
- ret i64 %retval
-}
-define i64 @j(i16 %d) nounwind {
- %e = add i16 %d, 1
- %retval = zext i16 %e to i64
- ret i64 %retval
-}
-define i64 @k(i32 %d) nounwind {
- %e = add i32 %d, 1
- %retval = zext i32 %e to i64
- ret i64 %retval
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/zext-inreg-1.ll b/libclamav/c++/llvm/test/CodeGen/X86/zext-inreg-1.ll
deleted file mode 100644
index 17fe374..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/zext-inreg-1.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llc < %s -march=x86 | not grep and
-
-; These tests differ from the ones in zext-inreg-0.ll in that
-; on x86-64 they do require and instructions.
-
-; These should use movzbl instead of 'and 255'.
-; This related to not having ZERO_EXTEND_REG node.
-
-define i64 @l(i64 %d) nounwind {
- %e = add i64 %d, 1
- %retval = and i64 %e, 1099511627775
- ret i64 %retval
-}
-define i64 @m(i64 %d) nounwind {
- %e = add i64 %d, 1
- %retval = and i64 %e, 281474976710655
- ret i64 %retval
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/zext-shl.ll b/libclamav/c++/llvm/test/CodeGen/X86/zext-shl.ll
deleted file mode 100644
index 928848e..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/zext-shl.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llc < %s -march=x86 | FileCheck %s
-
-define i32 @t1(i8 zeroext %x) nounwind readnone ssp {
-entry:
-; CHECK: t1:
-; CHECK: shll
-; CHECK-NOT: movzwl
-; CHECK: ret
- %0 = zext i8 %x to i16
- %1 = shl i16 %0, 5
- %2 = zext i16 %1 to i32
- ret i32 %2
-}
-
-define i32 @t2(i8 zeroext %x) nounwind readnone ssp {
-entry:
-; CHECK: t2:
-; CHECK: shrl
-; CHECK-NOT: movzwl
-; CHECK: ret
- %0 = zext i8 %x to i16
- %1 = lshr i16 %0, 3
- %2 = zext i16 %1 to i32
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/CodeGen/X86/zext-trunc.ll b/libclamav/c++/llvm/test/CodeGen/X86/zext-trunc.ll
deleted file mode 100644
index b9ffbe8..0000000
--- a/libclamav/c++/llvm/test/CodeGen/X86/zext-trunc.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llc < %s -march=x86-64 | FileCheck %s
-; rdar://7570931
-
-define i64 @foo(i64 %a, i64 %b) nounwind {
-; CHECK: foo:
-; CHECK: leal
-; CHECK-NOT: movl
-; CHECK: ret
- %c = add i64 %a, %b
- %d = trunc i64 %c to i32
- %e = zext i32 %d to i64
- ret i64 %e
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2002-12-16-ArgTest.ll b/libclamav/c++/llvm/test/ExecutionEngine/2002-12-16-ArgTest.ll
deleted file mode 100644
index 32be184..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2002-12-16-ArgTest.ll
+++ /dev/null
@@ -1,39 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-; XFAIL: *
-
- at .LC0 = internal global [10 x i8] c"argc: %d\0A\00" ; <[10 x i8]*> [#uses=1]
-
-declare i32 @puts(i8*)
-
-define void @getoptions(i32* %argc) {
-bb0:
- ret void
-}
-
-declare i32 @printf(i8*, ...)
-
-define i32 @main(i32 %argc, i8** %argv) {
-bb0:
- call i32 (i8*, ...)* @printf( i8* getelementptr ([10 x i8]* @.LC0, i64 0, i64 0), i32 %argc ) ; <i32>:0 [#uses=0]
- %cast224 = bitcast i8** %argv to i8* ; <i8*> [#uses=1]
- %local = alloca i8* ; <i8**> [#uses=3]
- store i8* %cast224, i8** %local
- %cond226 = icmp sle i32 %argc, 0 ; <i1> [#uses=1]
- br i1 %cond226, label %bb3, label %bb2
-bb2: ; preds = %bb2, %bb0
- %cann-indvar = phi i32 [ 0, %bb0 ], [ %add1-indvar, %bb2 ] ; <i32> [#uses=2]
- %add1-indvar = add i32 %cann-indvar, 1 ; <i32> [#uses=2]
- %cann-indvar-idxcast = sext i32 %cann-indvar to i64 ; <i64> [#uses=1]
- %CT = bitcast i8** %local to i8*** ; <i8***> [#uses=1]
- %reg115 = load i8*** %CT ; <i8**> [#uses=1]
- %cast235 = getelementptr i8** %reg115, i64 %cann-indvar-idxcast ; <i8**> [#uses=1]
- %reg117 = load i8** %cast235 ; <i8*> [#uses=1]
- %reg236 = call i32 @puts( i8* %reg117 ) ; <i32> [#uses=0]
- %cond239 = icmp slt i32 %add1-indvar, %argc ; <i1> [#uses=1]
- br i1 %cond239, label %bb2, label %bb3
-bb3: ; preds = %bb2, %bb0
- %cast243 = bitcast i8** %local to i32* ; <i32*> [#uses=1]
- call void @getoptions( i32* %cast243 )
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-04-LoopTest.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-01-04-LoopTest.ll
deleted file mode 100644
index 653cf79..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-04-LoopTest.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @main() {
- call i32 @mylog( i32 4 ) ; <i32>:1 [#uses=0]
- ret i32 0
-}
-
-define internal i32 @mylog(i32 %num) {
-bb0:
- br label %bb2
-bb2: ; preds = %bb2, %bb0
- %reg112 = phi i32 [ 10, %bb2 ], [ 1, %bb0 ] ; <i32> [#uses=1]
- %cann-indvar = phi i32 [ %cann-indvar, %bb2 ], [ 0, %bb0 ] ; <i32> [#uses=1]
- %reg114 = add i32 %reg112, 1 ; <i32> [#uses=2]
- %cond222 = icmp slt i32 %reg114, %num ; <i1> [#uses=1]
- br i1 %cond222, label %bb2, label %bb3
-bb3: ; preds = %bb2
- ret i32 %reg114
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-04-PhiTest.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-01-04-PhiTest.ll
deleted file mode 100644
index b5c9d81..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-04-PhiTest.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @main() {
-; <label>:0
- br label %Loop
-Loop: ; preds = %Loop, %0
- %X = phi i32 [ 0, %0 ], [ 1, %Loop ] ; <i32> [#uses=1]
- br i1 true, label %Out, label %Loop
-Out: ; preds = %Loop
- ret i32 %X
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-09-SARTest.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-01-09-SARTest.ll
deleted file mode 100644
index 8147897..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-09-SARTest.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-; We were accidentally inverting the signedness of right shifts. Whoops.
-
-define i32 @main() {
- %X = ashr i32 -1, 16 ; <i32> [#uses=1]
- %Y = ashr i32 %X, 16 ; <i32> [#uses=1]
- %Z = add i32 %Y, 1 ; <i32> [#uses=1]
- ret i32 %Z
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-10-FUCOM.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-01-10-FUCOM.ll
deleted file mode 100644
index d996fa5..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-10-FUCOM.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @main() {
- %X = fadd double 0.000000e+00, 1.000000e+00 ; <double> [#uses=1]
- %Y = fsub double 0.000000e+00, 1.000000e+00 ; <double> [#uses=2]
- %Z = fcmp oeq double %X, %Y ; <i1> [#uses=0]
- fadd double %Y, 0.000000e+00 ; <double>:1 [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-15-AlignmentTest.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-01-15-AlignmentTest.ll
deleted file mode 100644
index a55d74d..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-01-15-AlignmentTest.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @bar(i8* %X) {
- ; pointer should be 4 byte aligned!
- %P = alloca double ; <double*> [#uses=1]
- %R = ptrtoint double* %P to i32 ; <i32> [#uses=1]
- %A = and i32 %R, 3 ; <i32> [#uses=1]
- ret i32 %A
-}
-
-define i32 @main() {
- %SP = alloca i8 ; <i8*> [#uses=1]
- %X = add i32 0, 0 ; <i32> [#uses=1]
- alloca i8, i32 %X ; <i8*>:1 [#uses=0]
- call i32 @bar( i8* %SP ) ; <i32>:2 [#uses=1]
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-05-06-LivenessClobber.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-05-06-LivenessClobber.ll
deleted file mode 100644
index 57fe95b..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-05-06-LivenessClobber.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; This testcase shoudl return with an exit code of 1.
-;
-; RUN: llvm-as < %s | not lli
-
- at test = global i64 0 ; <i64*> [#uses=1]
-
-define internal i64 @test.upgrd.1() {
- %tmp.0 = load i64* @test ; <i64> [#uses=1]
- %tmp.1 = add i64 %tmp.0, 1 ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
-
-define i32 @main() {
- %L = call i64 @test.upgrd.1( ) ; <i64> [#uses=1]
- %I = trunc i64 %L to i32 ; <i32> [#uses=1]
- ret i32 %I
-}
-
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-05-07-ArgumentTest.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-05-07-ArgumentTest.ll
deleted file mode 100644
index 1dad78e..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-05-07-ArgumentTest.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llvm-as < %s | lli - test
-; XFAIL: *
-
-declare i32 @puts(i8*)
-
-define i32 @main(i32 %argc.1, i8** %argv.1) {
- %tmp.5 = getelementptr i8** %argv.1, i64 1 ; <i8**> [#uses=1]
- %tmp.6 = load i8** %tmp.5 ; <i8*> [#uses=1]
- %tmp.0 = call i32 @puts( i8* %tmp.6 ) ; <i32> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-05-11-PHIRegAllocBug.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-05-11-PHIRegAllocBug.ll
deleted file mode 100644
index 5a13b21..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-05-11-PHIRegAllocBug.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-target datalayout = "e-p:32:32"
-
-define i32 @main() {
-entry:
- br label %endif
-then: ; No predecessors!
- br label %endif
-endif: ; preds = %then, %entry
- %x = phi i32 [ 4, %entry ], [ 27, %then ] ; <i32> [#uses=0]
- %result = phi i32 [ 32, %then ], [ 0, %entry ] ; <i32> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-06-04-bzip2-bug.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-06-04-bzip2-bug.ll
deleted file mode 100644
index 6e2da70..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-06-04-bzip2-bug.ll
+++ /dev/null
@@ -1,20 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-; Testcase distilled from 256.bzip2.
-
-target datalayout = "e-p:32:32"
-
-define i32 @main() {
-entry:
- br label %loopentry.0
-loopentry.0: ; preds = %loopentry.0, %entry
- %h.0 = phi i32 [ %tmp.2, %loopentry.0 ], [ -1, %entry ] ; <i32> [#uses=1]
- %tmp.2 = add i32 %h.0, 1 ; <i32> [#uses=3]
- %tmp.4 = icmp ne i32 %tmp.2, 0 ; <i1> [#uses=1]
- br i1 %tmp.4, label %loopentry.0, label %loopentry.1
-loopentry.1: ; preds = %loopentry.0
- %h.1 = phi i32 [ %tmp.2, %loopentry.0 ] ; <i32> [#uses=1]
- ret i32 %h.1
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-06-05-PHIBug.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-06-05-PHIBug.ll
deleted file mode 100644
index 50b48da..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-06-05-PHIBug.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-; Testcase distilled from 256.bzip2.
-
-target datalayout = "e-p:32:32"
-
-define i32 @main() {
-entry:
- %X = add i32 1, -1 ; <i32> [#uses=3]
- br label %Next
-Next: ; preds = %entry
- %A = phi i32 [ %X, %entry ] ; <i32> [#uses=0]
- %B = phi i32 [ %X, %entry ] ; <i32> [#uses=0]
- %C = phi i32 [ %X, %entry ] ; <i32> [#uses=1]
- ret i32 %C
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-08-15-AllocaAssertion.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-08-15-AllocaAssertion.ll
deleted file mode 100644
index 6c90b33..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-08-15-AllocaAssertion.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-; This testcase failed to work because two variable sized allocas confused the
-; local register allocator.
-
-define i32 @main(i32 %X) {
- %A = alloca i32, i32 %X ; <i32*> [#uses=0]
- %B = alloca float, i32 %X ; <float*> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-08-21-EnvironmentTest.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-08-21-EnvironmentTest.ll
deleted file mode 100644
index 35b5f88..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-08-21-EnvironmentTest.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-; XFAIL: *
-
-;
-; Regression Test: EnvironmentTest.ll
-;
-; Description:
-; This is a regression test that verifies that the JIT passes the
-; environment to the main() function.
-;
-
-
-declare i32 @strlen(i8*)
-
-define i32 @main(i32 %argc.1, i8** %argv.1, i8** %envp.1) {
- %tmp.2 = load i8** %envp.1 ; <i8*> [#uses=1]
- %tmp.3 = call i32 @strlen( i8* %tmp.2 ) ; <i32> [#uses=1]
- %T = icmp eq i32 %tmp.3, 0 ; <i1> [#uses=1]
- %R = zext i1 %T to i32 ; <i32> [#uses=1]
- ret i32 %R
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-08-23-RegisterAllocatePhysReg.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-08-23-RegisterAllocatePhysReg.ll
deleted file mode 100644
index b165a1c..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-08-23-RegisterAllocatePhysReg.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-; This testcase exposes a bug in the local register allocator where it runs out
-; of registers (due to too many overlapping live ranges), but then attempts to
-; use the ESP register (which is not allocatable) to hold a value.
-
-define i32 @main(i32 %A) {
- ; ESP gets used again...
- %Ap2 = alloca i32, i32 %A ; <i32*> [#uses=11]
- ; Produce lots of overlapping live ranges
- %B = add i32 %A, 1 ; <i32> [#uses=1]
- %C = add i32 %A, 2 ; <i32> [#uses=1]
- %D = add i32 %A, 3 ; <i32> [#uses=1]
- %E = add i32 %A, 4 ; <i32> [#uses=1]
- %F = add i32 %A, 5 ; <i32> [#uses=1]
- %G = add i32 %A, 6 ; <i32> [#uses=1]
- %H = add i32 %A, 7 ; <i32> [#uses=1]
- %I = add i32 %A, 8 ; <i32> [#uses=1]
- %J = add i32 %A, 9 ; <i32> [#uses=1]
- %K = add i32 %A, 10 ; <i32> [#uses=1]
- ; Uses of all of the values
- store i32 %A, i32* %Ap2
- store i32 %B, i32* %Ap2
- store i32 %C, i32* %Ap2
- store i32 %D, i32* %Ap2
- store i32 %E, i32* %Ap2
- store i32 %F, i32* %Ap2
- store i32 %G, i32* %Ap2
- store i32 %H, i32* %Ap2
- store i32 %I, i32* %Ap2
- store i32 %J, i32* %Ap2
- store i32 %K, i32* %Ap2
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2003-10-18-PHINode-ConstantExpr-CondCode-Failure.ll b/libclamav/c++/llvm/test/ExecutionEngine/2003-10-18-PHINode-ConstantExpr-CondCode-Failure.ll
deleted file mode 100644
index aa9d7e7..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2003-10-18-PHINode-ConstantExpr-CondCode-Failure.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
- at A = global i32 0 ; <i32*> [#uses=1]
-
-define i32 @main() {
- %Ret = call i32 @test( i1 true, i32 0 ) ; <i32> [#uses=1]
- ret i32 %Ret
-}
-
-define i32 @test(i1 %c, i32 %A) {
- br i1 %c, label %Taken1, label %NotTaken
-Cont: ; preds = %Taken1, %NotTaken
- %V = phi i32 [ 0, %NotTaken ], [ sub (i32 ptrtoint (i32* @A to i32), i32 1234), %Taken1 ] ; <i32> [#uses=0]
- ret i32 0
-NotTaken: ; preds = %0
- br label %Cont
-Taken1: ; preds = %0
- %B = icmp eq i32 %A, 0 ; <i1> [#uses=1]
- br i1 %B, label %Cont, label %ExitError
-ExitError: ; preds = %Taken1
- ret i32 12
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2005-12-02-TailCallBug.ll b/libclamav/c++/llvm/test/ExecutionEngine/2005-12-02-TailCallBug.ll
deleted file mode 100644
index 59a40ae..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2005-12-02-TailCallBug.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; PR672
-; RUN: llvm-as < %s | lli
-
-define i32 @main() {
- %f = bitcast i32 (i32, i32*, i32)* @check_tail to i32* ; <i32*> [#uses=1]
- %res = tail call fastcc i32 @check_tail( i32 10, i32* %f, i32 10 ) ; <i32> [#uses=1]
- ret i32 %res
-}
-
-define fastcc i32 @check_tail(i32 %x, i32* %f, i32 %g) {
- %tmp1 = icmp sgt i32 %x, 0 ; <i1> [#uses=1]
- br i1 %tmp1, label %if-then, label %if-else
-if-then: ; preds = %0
- %fun_ptr = bitcast i32* %f to i32 (i32, i32*, i32)* ; <i32 (i32, i32*, i32)*> [#uses=1]
- %arg1 = add i32 %x, -1 ; <i32> [#uses=1]
- %res = tail call fastcc i32 %fun_ptr( i32 %arg1, i32* %f, i32 %g ) ; <i32> [#uses=1]
- ret i32 %res
-if-else: ; preds = %0
- ret i32 %x
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2007-12-10-APIntLoadStore.ll b/libclamav/c++/llvm/test/ExecutionEngine/2007-12-10-APIntLoadStore.ll
deleted file mode 100644
index f347f5d..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2007-12-10-APIntLoadStore.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llvm-as %s -o - | lli -force-interpreter
-; PR1836
-
-define i32 @main() {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=2]
- %tmp = alloca i32 ; <i32*> [#uses=2]
- %x = alloca i75, align 16 ; <i75*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i75 999, i75* %x, align 16
- store i32 0, i32* %tmp, align 4
- %tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1]
- store i32 %tmp1, i32* %retval, align 4
- br label %return
-
-return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval2
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2008-06-05-APInt-OverAShr.ll b/libclamav/c++/llvm/test/ExecutionEngine/2008-06-05-APInt-OverAShr.ll
deleted file mode 100644
index e7e434f..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2008-06-05-APInt-OverAShr.ll
+++ /dev/null
@@ -1,60 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli -force-interpreter=true %t.bc | grep 1
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
-target triple = "i686-pc-linux-gnu"
- at .str = internal constant [10 x i8] c"MSB = %d\0A\00" ; <[10 x i8]*> [#uses=1]
-
-define i65 @foo(i65 %x) {
-entry:
- %x_addr = alloca i65 ; <i65*> [#uses=2]
- %retval = alloca i65 ; <i65*> [#uses=2]
- %tmp = alloca i65 ; <i65*> [#uses=2]
- %"alloca point" = bitcast i65 0 to i65 ; <i65> [#uses=0]
- store i65 %x, i65* %x_addr
- %tmp1 = load i65* %x_addr, align 4 ; <i65> [#uses=1]
- %tmp2 = ashr i65 %tmp1, 65 ; <i65> [#uses=1]
- store i65 %tmp2, i65* %tmp, align 4
- %tmp3 = load i65* %tmp, align 4 ; <i65> [#uses=1]
- store i65 %tmp3, i65* %retval, align 4
- br label %return
-
-return: ; preds = %entry
- %retval4 = load i65* %retval ; <i65> [#uses=1]
- ret i65 %retval4
-}
-
-define i32 @main() {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=1]
- %iftmp.0 = alloca i32 ; <i32*> [#uses=3]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = call i65 @foo( i65 -9 ) ; <i65> [#uses=1]
- %tmp1 = lshr i65 %tmp, 64 ; <i65> [#uses=1]
- %tmp2 = xor i65 %tmp1, 1 ; <i65> [#uses=1]
- %tmp3 = and i65 %tmp2, 1 ; <i65> [#uses=1]
- %tmp34 = trunc i65 %tmp3 to i8 ; <i8> [#uses=1]
- %toBool = icmp ne i8 %tmp34, 0 ; <i1> [#uses=1]
- br i1 %toBool, label %cond_true, label %cond_false
-
-cond_true: ; preds = %entry
- store i32 0, i32* %iftmp.0, align 4
- br label %cond_next
-
-cond_false: ; preds = %entry
- store i32 1, i32* %iftmp.0, align 4
- br label %cond_next
-
-cond_next: ; preds = %cond_false, %cond_true
- %tmp5 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp6 = load i32* %iftmp.0, align 4 ; <i32> [#uses=1]
- %tmp7 = call i32 (i8*, ...)* @printf( i8* noalias %tmp5, i32 %tmp6 ) nounwind ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %cond_next
- store i32 0, i32* %retval, align 4
- %retval8 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval8
-}
-
-declare i32 @printf(i8* noalias , ...) nounwind
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/2010-01-15-UndefValue.ll b/libclamav/c++/llvm/test/ExecutionEngine/2010-01-15-UndefValue.ll
deleted file mode 100644
index 33ca63a..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/2010-01-15-UndefValue.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli -force-interpreter=true %t.bc
-
-define i32 @main() {
- %a = add i32 0, undef
- %b = fadd float 0.0, undef
- %c = fadd double 0.0, undef
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/dg.exp b/libclamav/c++/llvm/test/ExecutionEngine/dg.exp
deleted file mode 100644
index f200589..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/dg.exp
+++ /dev/null
@@ -1,3 +0,0 @@
-load_lib llvm.exp
-
-RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/fpbitcast.ll b/libclamav/c++/llvm/test/ExecutionEngine/fpbitcast.ll
deleted file mode 100644
index 34ca129..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/fpbitcast.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llvm-as < %s > %t.bc
-; RUN: lli -force-interpreter=true %t.bc | grep 40091eb8
-;
-define i32 @test(double %x) {
-entry:
- %x46.i = bitcast double %x to i64
- %tmp343.i = lshr i64 %x46.i, 32
- %tmp344.i = trunc i64 %tmp343.i to i32
- ret i32 %tmp344.i
-}
-
-define i32 @main()
-{
- %res = call i32 @test(double 3.14)
- %ptr = getelementptr [4 x i8]* @format, i32 0, i32 0
- call i32 (i8*,...)* @printf(i8* %ptr, i32 %res)
- ret i32 0
-}
-
-declare i32 @printf(i8*, ...)
- at format = internal constant [4 x i8] c"%x\0A\00"
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/hello.ll b/libclamav/c++/llvm/test/ExecutionEngine/hello.ll
deleted file mode 100644
index 2fa168b..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/hello.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-; XFAIL: *
-
- at .LC0 = internal global [12 x i8] c"Hello World\00" ; <[12 x i8]*> [#uses=1]
-
-declare i32 @puts(i8*)
-
-define i32 @main() {
- %reg210 = call i32 @puts( i8* getelementptr ([12 x i8]* @.LC0, i64 0, i64 0) ) ; <i32> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/hello2.ll b/libclamav/c++/llvm/test/ExecutionEngine/hello2.ll
deleted file mode 100644
index ce099cf..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/hello2.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-; XFAIL: *
-
- at X = global i32 7 ; <i32*> [#uses=0]
- at msg = internal global [13 x i8] c"Hello World\0A\00" ; <[13 x i8]*> [#uses=1]
-
-declare void @printf([13 x i8]*, ...)
-
-define void @bar() {
- call void ([13 x i8]*, ...)* @printf( [13 x i8]* @msg )
- ret void
-}
-
-define i32 @main() {
- call void @bar( )
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/simplesttest.ll b/libclamav/c++/llvm/test/ExecutionEngine/simplesttest.ll
deleted file mode 100644
index 5d9cf76..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/simplesttest.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @main() {
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/simpletest.ll b/libclamav/c++/llvm/test/ExecutionEngine/simpletest.ll
deleted file mode 100644
index 53fb79c..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/simpletest.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @bar() {
- ret i32 0
-}
-
-define i32 @main() {
- %r = call i32 @bar( ) ; <i32> [#uses=1]
- ret i32 %r
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/stubs.ll b/libclamav/c++/llvm/test/ExecutionEngine/stubs.ll
deleted file mode 100644
index a8efeaf..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/stubs.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llvm-as < %s | lli -disable-lazy-compilation=false
-; XFAIL: *
-
-define i32 @main() nounwind {
-entry:
- call void @lazily_compiled_address_is_consistent()
- ret i32 0
-}
-
-; Test PR3043: @test should have the same address before and after
-; it's JIT-compiled.
- at funcPtr = common global i1 ()* null, align 4
- at lcaic_failure = internal constant [46 x i8] c"@lazily_compiled_address_is_consistent failed\00"
-
-define void @lazily_compiled_address_is_consistent() nounwind {
-entry:
- store i1 ()* @test, i1 ()** @funcPtr
- %pass = tail call i1 @test() ; <i32> [#uses=1]
- br i1 %pass, label %pass_block, label %fail_block
-pass_block:
- ret void
-fail_block:
- call i32 @puts(i8* getelementptr([46 x i8]* @lcaic_failure, i32 0, i32 0))
- call void @exit(i32 1)
- unreachable
-}
-
-define i1 @test() nounwind {
-entry:
- %tmp = load i1 ()** @funcPtr
- %eq = icmp eq i1 ()* %tmp, @test
- ret i1 %eq
-}
-
-declare i32 @puts(i8*) noreturn
-declare void @exit(i32) noreturn
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-arith.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-arith.ll
deleted file mode 100644
index 8c51e6b..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-arith.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @main() {
- %A = add i8 0, 12 ; <i8> [#uses=1]
- %B = sub i8 %A, 1 ; <i8> [#uses=2]
- %C = mul i8 %B, %B ; <i8> [#uses=2]
- %D = sdiv i8 %C, %C ; <i8> [#uses=2]
- %E = srem i8 %D, %D ; <i8> [#uses=0]
- %F = udiv i8 5, 6 ; <i8> [#uses=0]
- %G = urem i8 6, 5 ; <i8> [#uses=0]
- %A.upgrd.1 = add i16 0, 12 ; <i16> [#uses=1]
- %B.upgrd.2 = sub i16 %A.upgrd.1, 1 ; <i16> [#uses=2]
- %C.upgrd.3 = mul i16 %B.upgrd.2, %B.upgrd.2 ; <i16> [#uses=2]
- %D.upgrd.4 = sdiv i16 %C.upgrd.3, %C.upgrd.3 ; <i16> [#uses=2]
- %E.upgrd.5 = srem i16 %D.upgrd.4, %D.upgrd.4 ; <i16> [#uses=0]
- %F.upgrd.6 = udiv i16 5, 6 ; <i16> [#uses=0]
- %G.upgrd.7 = urem i32 6, 5 ; <i32> [#uses=0]
- %A.upgrd.8 = add i32 0, 12 ; <i32> [#uses=1]
- %B.upgrd.9 = sub i32 %A.upgrd.8, 1 ; <i32> [#uses=2]
- %C.upgrd.10 = mul i32 %B.upgrd.9, %B.upgrd.9 ; <i32> [#uses=2]
- %D.upgrd.11 = sdiv i32 %C.upgrd.10, %C.upgrd.10 ; <i32> [#uses=2]
- %E.upgrd.12 = srem i32 %D.upgrd.11, %D.upgrd.11 ; <i32> [#uses=0]
- %F.upgrd.13 = udiv i32 5, 6 ; <i32> [#uses=0]
- %G1 = urem i32 6, 5 ; <i32> [#uses=0]
- %A.upgrd.14 = add i64 0, 12 ; <i64> [#uses=1]
- %B.upgrd.15 = sub i64 %A.upgrd.14, 1 ; <i64> [#uses=2]
- %C.upgrd.16 = mul i64 %B.upgrd.15, %B.upgrd.15 ; <i64> [#uses=2]
- %D.upgrd.17 = sdiv i64 %C.upgrd.16, %C.upgrd.16 ; <i64> [#uses=2]
- %E.upgrd.18 = srem i64 %D.upgrd.17, %D.upgrd.17 ; <i64> [#uses=0]
- %F.upgrd.19 = udiv i64 5, 6 ; <i64> [#uses=0]
- %G.upgrd.20 = urem i64 6, 5 ; <i64> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-branch.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-branch.ll
deleted file mode 100644
index dd8db54..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-branch.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-; test unconditional branch
-define i32 @main() {
- br label %Test
-Test: ; preds = %Test, %0
- %X = icmp eq i32 0, 4 ; <i1> [#uses=1]
- br i1 %X, label %Test, label %Label
-Label: ; preds = %Test
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-call.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-call.ll
deleted file mode 100644
index 4464ebd..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-call.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-declare void @exit(i32)
-
-define i32 @test(i8 %C, i16 %S) {
- %X = trunc i16 %S to i8 ; <i8> [#uses=1]
- %Y = zext i8 %X to i32 ; <i32> [#uses=1]
- ret i32 %Y
-}
-
-define void @FP(void (i32)* %F) {
- %X = call i32 @test( i8 123, i16 1024 ) ; <i32> [#uses=1]
- call void %F( i32 %X )
- ret void
-}
-
-define i32 @main() {
- call void @FP( void (i32)* @exit )
- ret i32 1
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-cast.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-cast.ll
deleted file mode 100644
index 82d4949..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-cast.ll
+++ /dev/null
@@ -1,110 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @foo() {
- ret i32 0
-}
-
-define i32 @main() {
- icmp ne i1 true, false ; <i1>:1 [#uses=0]
- zext i1 true to i8 ; <i8>:2 [#uses=0]
- zext i1 true to i8 ; <i8>:3 [#uses=0]
- zext i1 true to i16 ; <i16>:4 [#uses=0]
- zext i1 true to i16 ; <i16>:5 [#uses=0]
- zext i1 true to i32 ; <i32>:6 [#uses=0]
- zext i1 true to i32 ; <i32>:7 [#uses=0]
- zext i1 true to i64 ; <i64>:8 [#uses=0]
- zext i1 true to i64 ; <i64>:9 [#uses=0]
- uitofp i1 true to float ; <float>:10 [#uses=0]
- uitofp i1 true to double ; <double>:11 [#uses=0]
- icmp ne i8 0, 0 ; <i1>:12 [#uses=0]
- icmp ne i8 1, 0 ; <i1>:13 [#uses=0]
- bitcast i8 0 to i8 ; <i8>:14 [#uses=0]
- bitcast i8 -1 to i8 ; <i8>:15 [#uses=0]
- sext i8 4 to i16 ; <i16>:16 [#uses=0]
- sext i8 4 to i16 ; <i16>:17 [#uses=0]
- sext i8 4 to i64 ; <i64>:18 [#uses=0]
- sext i8 4 to i64 ; <i64>:19 [#uses=0]
- sitofp i8 4 to float ; <float>:20 [#uses=0]
- sitofp i8 4 to double ; <double>:21 [#uses=0]
- icmp ne i8 0, 0 ; <i1>:22 [#uses=0]
- icmp ne i8 1, 0 ; <i1>:23 [#uses=0]
- bitcast i8 0 to i8 ; <i8>:24 [#uses=0]
- bitcast i8 1 to i8 ; <i8>:25 [#uses=0]
- zext i8 4 to i16 ; <i16>:26 [#uses=0]
- zext i8 4 to i16 ; <i16>:27 [#uses=0]
- zext i8 4 to i64 ; <i64>:28 [#uses=0]
- zext i8 4 to i64 ; <i64>:29 [#uses=0]
- uitofp i8 0 to float ; <float>:30 [#uses=0]
- uitofp i8 0 to double ; <double>:31 [#uses=0]
- icmp ne i16 1, 0 ; <i1>:32 [#uses=0]
- trunc i16 -1 to i8 ; <i8>:33 [#uses=0]
- trunc i16 255 to i8 ; <i8>:34 [#uses=0]
- bitcast i16 0 to i16 ; <i16>:35 [#uses=0]
- bitcast i16 0 to i16 ; <i16>:36 [#uses=0]
- sext i16 0 to i64 ; <i64>:37 [#uses=0]
- sext i16 0 to i64 ; <i64>:38 [#uses=0]
- sitofp i16 0 to float ; <float>:39 [#uses=0]
- sitofp i16 0 to double ; <double>:40 [#uses=0]
- icmp ne i16 1, 0 ; <i1>:41 [#uses=0]
- trunc i16 1 to i8 ; <i8>:42 [#uses=0]
- trunc i16 255 to i8 ; <i8>:43 [#uses=0]
- bitcast i16 0 to i16 ; <i16>:44 [#uses=0]
- bitcast i16 0 to i16 ; <i16>:45 [#uses=0]
- zext i16 0 to i64 ; <i64>:46 [#uses=0]
- zext i16 0 to i64 ; <i64>:47 [#uses=0]
- uitofp i16 0 to float ; <float>:48 [#uses=0]
- uitofp i16 0 to double ; <double>:49 [#uses=0]
- icmp ne i32 6, 0 ; <i1>:50 [#uses=0]
- trunc i32 -6 to i8 ; <i8>:51 [#uses=0]
- trunc i32 6 to i8 ; <i8>:52 [#uses=0]
- trunc i32 6 to i16 ; <i16>:53 [#uses=0]
- bitcast i32 0 to i32 ; <i32>:54 [#uses=0]
- sext i32 0 to i64 ; <i64>:55 [#uses=0]
- sext i32 0 to i64 ; <i64>:56 [#uses=0]
- sitofp i32 0 to float ; <float>:57 [#uses=0]
- sitofp i32 0 to double ; <double>:58 [#uses=0]
- icmp ne i32 6, 0 ; <i1>:59 [#uses=0]
- trunc i32 7 to i8 ; <i8>:60 [#uses=0]
- trunc i32 8 to i8 ; <i8>:61 [#uses=0]
- trunc i32 9 to i16 ; <i16>:62 [#uses=0]
- bitcast i32 10 to i32 ; <i32>:63 [#uses=0]
- zext i32 0 to i64 ; <i64>:64 [#uses=0]
- zext i32 0 to i64 ; <i64>:65 [#uses=0]
- uitofp i32 0 to float ; <float>:66 [#uses=0]
- uitofp i32 0 to double ; <double>:67 [#uses=0]
- icmp ne i64 0, 0 ; <i1>:68 [#uses=0]
- trunc i64 0 to i8 ; <i8>:69 [#uses=0]
- trunc i64 0 to i8 ; <i8>:70 [#uses=0]
- trunc i64 0 to i16 ; <i16>:71 [#uses=0]
- trunc i64 0 to i16 ; <i16>:72 [#uses=0]
- trunc i64 0 to i32 ; <i32>:73 [#uses=0]
- trunc i64 0 to i32 ; <i32>:74 [#uses=0]
- bitcast i64 0 to i64 ; <i64>:75 [#uses=0]
- bitcast i64 0 to i64 ; <i64>:76 [#uses=0]
- sitofp i64 0 to float ; <float>:77 [#uses=0]
- sitofp i64 0 to double ; <double>:78 [#uses=0]
- icmp ne i64 1, 0 ; <i1>:79 [#uses=0]
- trunc i64 1 to i8 ; <i8>:80 [#uses=0]
- trunc i64 1 to i8 ; <i8>:81 [#uses=0]
- trunc i64 1 to i16 ; <i16>:82 [#uses=0]
- trunc i64 1 to i16 ; <i16>:83 [#uses=0]
- trunc i64 1 to i32 ; <i32>:84 [#uses=0]
- trunc i64 1 to i32 ; <i32>:85 [#uses=0]
- bitcast i64 1 to i64 ; <i64>:86 [#uses=0]
- bitcast i64 1 to i64 ; <i64>:87 [#uses=0]
- uitofp i64 1 to float ; <float>:88 [#uses=0]
- uitofp i64 0 to double ; <double>:89 [#uses=0]
- bitcast float 0.000000e+00 to float ; <float>:90 [#uses=0]
- fpext float 0.000000e+00 to double ; <double>:91 [#uses=0]
- fptosi double 0.000000e+00 to i8 ; <i8>:92 [#uses=0]
- fptoui double 0.000000e+00 to i8 ; <i8>:93 [#uses=0]
- fptosi double 0.000000e+00 to i16 ; <i16>:94 [#uses=0]
- fptoui double 0.000000e+00 to i16 ; <i16>:95 [#uses=0]
- fptosi double 0.000000e+00 to i32 ; <i32>:96 [#uses=0]
- fptoui double 0.000000e+00 to i32 ; <i32>:97 [#uses=0]
- fptosi double 0.000000e+00 to i64 ; <i64>:98 [#uses=0]
- fptrunc double 0.000000e+00 to float ; <float>:99 [#uses=0]
- bitcast double 0.000000e+00 to double ; <double>:100 [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-constantexpr.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-constantexpr.ll
deleted file mode 100644
index cd5c635..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-constantexpr.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-; This tests to make sure that we can evaluate weird constant expressions
-
- at A = global i32 5 ; <i32*> [#uses=1]
- at B = global i32 6 ; <i32*> [#uses=1]
-
-define i32 @main() {
- %A = or i1 false, icmp slt (i32* @A, i32* @B) ; <i1> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-fp.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-fp.ll
deleted file mode 100644
index 76d00ad..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-fp.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RXUN: lli %t.bc > /dev/null
-; FP needs libcalls, which ClamAV has disabled, disable this test
-
-define double @test(double* %DP, double %Arg) {
- %D = load double* %DP ; <double> [#uses=1]
- %V = fadd double %D, 1.000000e+00 ; <double> [#uses=2]
- %W = fsub double %V, %V ; <double> [#uses=3]
- %X = fmul double %W, %W ; <double> [#uses=2]
- %Y = fdiv double %X, %X ; <double> [#uses=2]
- %Z = frem double %Y, %Y ; <double> [#uses=3]
- %Z1 = fdiv double %Z, %W ; <double> [#uses=0]
- %Q = fadd double %Z, %Arg ; <double> [#uses=1]
- %R = bitcast double %Q to double ; <double> [#uses=1]
- store double %R, double* %DP
- ret double %Z
-}
-
-define i32 @main() {
- %X = alloca double ; <double*> [#uses=2]
- store double 0.000000e+00, double* %X
- call double @test( double* %X, double 2.000000e+00 ) ; <double>:1 [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-loadstore.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-loadstore.ll
deleted file mode 100644
index ba0f0ba..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-loadstore.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define void @test(i8* %P, i16* %P.upgrd.1, i32* %P.upgrd.2, i64* %P.upgrd.3) {
- %V = load i8* %P ; <i8> [#uses=1]
- store i8 %V, i8* %P
- %V.upgrd.4 = load i16* %P.upgrd.1 ; <i16> [#uses=1]
- store i16 %V.upgrd.4, i16* %P.upgrd.1
- %V.upgrd.5 = load i32* %P.upgrd.2 ; <i32> [#uses=1]
- store i32 %V.upgrd.5, i32* %P.upgrd.2
- %V.upgrd.6 = load i64* %P.upgrd.3 ; <i64> [#uses=1]
- store i64 %V.upgrd.6, i64* %P.upgrd.3
- ret void
-}
-
-define i32 @varalloca(i32 %Size) {
- ;; Variable sized alloca
- %X = alloca i32, i32 %Size ; <i32*> [#uses=2]
- store i32 %Size, i32* %X
- %Y = load i32* %X ; <i32> [#uses=1]
- ret i32 %Y
-}
-
-define i32 @main() {
- %A = alloca i8 ; <i8*> [#uses=1]
- %B = alloca i16 ; <i16*> [#uses=1]
- %C = alloca i32 ; <i32*> [#uses=1]
- %D = alloca i64 ; <i64*> [#uses=1]
- call void @test( i8* %A, i16* %B, i32* %C, i64* %D )
- call i32 @varalloca( i32 7 ) ; <i32>:1 [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-logical.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-logical.ll
deleted file mode 100644
index e560e52..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-logical.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @main() {
- %A = and i8 4, 8 ; <i8> [#uses=2]
- %B = or i8 %A, 7 ; <i8> [#uses=1]
- %C = xor i8 %B, %A ; <i8> [#uses=0]
- %A.upgrd.1 = and i16 4, 8 ; <i16> [#uses=2]
- %B.upgrd.2 = or i16 %A.upgrd.1, 7 ; <i16> [#uses=1]
- %C.upgrd.3 = xor i16 %B.upgrd.2, %A.upgrd.1 ; <i16> [#uses=0]
- %A.upgrd.4 = and i32 4, 8 ; <i32> [#uses=2]
- %B.upgrd.5 = or i32 %A.upgrd.4, 7 ; <i32> [#uses=1]
- %C.upgrd.6 = xor i32 %B.upgrd.5, %A.upgrd.4 ; <i32> [#uses=0]
- %A.upgrd.7 = and i64 4, 8 ; <i64> [#uses=2]
- %B.upgrd.8 = or i64 %A.upgrd.7, 7 ; <i64> [#uses=1]
- %C.upgrd.9 = xor i64 %B.upgrd.8, %A.upgrd.7 ; <i64> [#uses=0]
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-loop.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-loop.ll
deleted file mode 100644
index 7cd69e2..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-loop.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @main() {
-; <label>:0
- br label %Loop
-Loop: ; preds = %Loop, %0
- %I = phi i32 [ 0, %0 ], [ %i2, %Loop ] ; <i32> [#uses=1]
- %i2 = add i32 %I, 1 ; <i32> [#uses=2]
- %C = icmp eq i32 %i2, 10 ; <i1> [#uses=1]
- br i1 %C, label %Out, label %Loop
-Out: ; preds = %Loop
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-malloc.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-malloc.ll
deleted file mode 100644
index 0097603..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-malloc.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-; CLAMAV-local: no external funcs
-; XFAIL: *
-
-define i32 @main() {
- %X = malloc i32 ; <i32*> [#uses=1]
- %Y = malloc i32, i32 100 ; <i32*> [#uses=1]
- %u = add i32 1, 2 ; <i32> [#uses=1]
- %Z = malloc i32, i32 %u ; <i32*> [#uses=1]
- free i32* %X
- free i32* %Y
- free i32* %Z
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-phi.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-phi.ll
deleted file mode 100644
index f1aaefa..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-phi.ll
+++ /dev/null
@@ -1,35 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-; test phi node
- at Y = global i32 6 ; <i32*> [#uses=1]
-
-define void @blah(i32* %X) {
-; <label>:0
- br label %T
-T: ; preds = %Dead, %0
- phi i32* [ %X, %0 ], [ @Y, %Dead ] ; <i32*>:1 [#uses=0]
- ret void
-Dead: ; No predecessors!
- br label %T
-}
-
-define i32 @test(i1 %C) {
-; <label>:0
- br i1 %C, label %T, label %T
-T: ; preds = %0, %0
- %X = phi i32 [ 123, %0 ], [ 123, %0 ] ; <i32> [#uses=1]
- ret i32 %X
-}
-
-define i32 @main() {
-; <label>:0
- br label %Test
-Test: ; preds = %Dead, %0
- %X = phi i32 [ 0, %0 ], [ %Y, %Dead ] ; <i32> [#uses=1]
- ret i32 %X
-Dead: ; No predecessors!
- %Y = ashr i32 12, 4 ; <i32> [#uses=1]
- br label %Test
-}
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-ret.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-ret.ll
deleted file mode 100644
index eae91f5..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-ret.ll
+++ /dev/null
@@ -1,47 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-; test return instructions
-define void @test1() {
- ret void
-}
-
-define i8 @test2() {
- ret i8 1
-}
-
-define i8 @test3() {
- ret i8 1
-}
-
-define i16 @test4() {
- ret i16 -1
-}
-
-define i16 @test5() {
- ret i16 -1
-}
-
-define i32 @main() {
- ret i32 0
-}
-
-define i32 @test6() {
- ret i32 4
-}
-
-define i64 @test7() {
- ret i64 0
-}
-
-define i64 @test8() {
- ret i64 0
-}
-
-define float @test9() {
- ret float 1.000000e+00
-}
-
-define double @test10() {
- ret double 2.000000e+00
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-setcond-fp.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-setcond-fp.ll
deleted file mode 100644
index 4264e2c..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-setcond-fp.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-
-define i32 @main() {
- %double1 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=6]
- %double2 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=6]
- %float1 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=6]
- %float2 = fadd float 0.000000e+00, 0.000000e+00 ; <float> [#uses=6]
- %test49 = fcmp oeq float %float1, %float2 ; <i1> [#uses=0]
- %test50 = fcmp oge float %float1, %float2 ; <i1> [#uses=0]
- %test51 = fcmp ogt float %float1, %float2 ; <i1> [#uses=0]
- %test52 = fcmp ole float %float1, %float2 ; <i1> [#uses=0]
- %test53 = fcmp olt float %float1, %float2 ; <i1> [#uses=0]
- %test54 = fcmp une float %float1, %float2 ; <i1> [#uses=0]
- %test55 = fcmp oeq double %double1, %double2 ; <i1> [#uses=0]
- %test56 = fcmp oge double %double1, %double2 ; <i1> [#uses=0]
- %test57 = fcmp ogt double %double1, %double2 ; <i1> [#uses=0]
- %test58 = fcmp ole double %double1, %double2 ; <i1> [#uses=0]
- %test59 = fcmp olt double %double1, %double2 ; <i1> [#uses=0]
- %test60 = fcmp une double %double1, %double2 ; <i1> [#uses=0]
- ret i32 0
-}
-
-
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-setcond-int.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-setcond-int.ll
deleted file mode 100644
index 772f4fa..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-setcond-int.ll
+++ /dev/null
@@ -1,70 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @main() {
- %int1 = add i32 0, 0 ; <i32> [#uses=6]
- %int2 = add i32 0, 0 ; <i32> [#uses=6]
- %long1 = add i64 0, 0 ; <i64> [#uses=6]
- %long2 = add i64 0, 0 ; <i64> [#uses=6]
- %sbyte1 = add i8 0, 0 ; <i8> [#uses=6]
- %sbyte2 = add i8 0, 0 ; <i8> [#uses=6]
- %short1 = add i16 0, 0 ; <i16> [#uses=6]
- %short2 = add i16 0, 0 ; <i16> [#uses=6]
- %ubyte1 = add i8 0, 0 ; <i8> [#uses=6]
- %ubyte2 = add i8 0, 0 ; <i8> [#uses=6]
- %uint1 = add i32 0, 0 ; <i32> [#uses=6]
- %uint2 = add i32 0, 0 ; <i32> [#uses=6]
- %ulong1 = add i64 0, 0 ; <i64> [#uses=6]
- %ulong2 = add i64 0, 0 ; <i64> [#uses=6]
- %ushort1 = add i16 0, 0 ; <i16> [#uses=6]
- %ushort2 = add i16 0, 0 ; <i16> [#uses=6]
- %test1 = icmp eq i8 %ubyte1, %ubyte2 ; <i1> [#uses=0]
- %test2 = icmp uge i8 %ubyte1, %ubyte2 ; <i1> [#uses=0]
- %test3 = icmp ugt i8 %ubyte1, %ubyte2 ; <i1> [#uses=0]
- %test4 = icmp ule i8 %ubyte1, %ubyte2 ; <i1> [#uses=0]
- %test5 = icmp ult i8 %ubyte1, %ubyte2 ; <i1> [#uses=0]
- %test6 = icmp ne i8 %ubyte1, %ubyte2 ; <i1> [#uses=0]
- %test7 = icmp eq i16 %ushort1, %ushort2 ; <i1> [#uses=0]
- %test8 = icmp uge i16 %ushort1, %ushort2 ; <i1> [#uses=0]
- %test9 = icmp ugt i16 %ushort1, %ushort2 ; <i1> [#uses=0]
- %test10 = icmp ule i16 %ushort1, %ushort2 ; <i1> [#uses=0]
- %test11 = icmp ult i16 %ushort1, %ushort2 ; <i1> [#uses=0]
- %test12 = icmp ne i16 %ushort1, %ushort2 ; <i1> [#uses=0]
- %test13 = icmp eq i32 %uint1, %uint2 ; <i1> [#uses=0]
- %test14 = icmp uge i32 %uint1, %uint2 ; <i1> [#uses=0]
- %test15 = icmp ugt i32 %uint1, %uint2 ; <i1> [#uses=0]
- %test16 = icmp ule i32 %uint1, %uint2 ; <i1> [#uses=0]
- %test17 = icmp ult i32 %uint1, %uint2 ; <i1> [#uses=0]
- %test18 = icmp ne i32 %uint1, %uint2 ; <i1> [#uses=0]
- %test19 = icmp eq i64 %ulong1, %ulong2 ; <i1> [#uses=0]
- %test20 = icmp uge i64 %ulong1, %ulong2 ; <i1> [#uses=0]
- %test21 = icmp ugt i64 %ulong1, %ulong2 ; <i1> [#uses=0]
- %test22 = icmp ule i64 %ulong1, %ulong2 ; <i1> [#uses=0]
- %test23 = icmp ult i64 %ulong1, %ulong2 ; <i1> [#uses=0]
- %test24 = icmp ne i64 %ulong1, %ulong2 ; <i1> [#uses=0]
- %test25 = icmp eq i8 %sbyte1, %sbyte2 ; <i1> [#uses=0]
- %test26 = icmp sge i8 %sbyte1, %sbyte2 ; <i1> [#uses=0]
- %test27 = icmp sgt i8 %sbyte1, %sbyte2 ; <i1> [#uses=0]
- %test28 = icmp sle i8 %sbyte1, %sbyte2 ; <i1> [#uses=0]
- %test29 = icmp slt i8 %sbyte1, %sbyte2 ; <i1> [#uses=0]
- %test30 = icmp ne i8 %sbyte1, %sbyte2 ; <i1> [#uses=0]
- %test31 = icmp eq i16 %short1, %short2 ; <i1> [#uses=0]
- %test32 = icmp sge i16 %short1, %short2 ; <i1> [#uses=0]
- %test33 = icmp sgt i16 %short1, %short2 ; <i1> [#uses=0]
- %test34 = icmp sle i16 %short1, %short2 ; <i1> [#uses=0]
- %test35 = icmp slt i16 %short1, %short2 ; <i1> [#uses=0]
- %test36 = icmp ne i16 %short1, %short2 ; <i1> [#uses=0]
- %test37 = icmp eq i32 %int1, %int2 ; <i1> [#uses=0]
- %test38 = icmp sge i32 %int1, %int2 ; <i1> [#uses=0]
- %test39 = icmp sgt i32 %int1, %int2 ; <i1> [#uses=0]
- %test40 = icmp sle i32 %int1, %int2 ; <i1> [#uses=0]
- %test41 = icmp slt i32 %int1, %int2 ; <i1> [#uses=0]
- %test42 = icmp ne i32 %int1, %int2 ; <i1> [#uses=0]
- %test43 = icmp eq i64 %long1, %long2 ; <i1> [#uses=0]
- %test44 = icmp sge i64 %long1, %long2 ; <i1> [#uses=0]
- %test45 = icmp sgt i64 %long1, %long2 ; <i1> [#uses=0]
- %test46 = icmp sle i64 %long1, %long2 ; <i1> [#uses=0]
- %test47 = icmp slt i64 %long1, %long2 ; <i1> [#uses=0]
- %test48 = icmp ne i64 %long1, %long2 ; <i1> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/ExecutionEngine/test-shift.ll b/libclamav/c++/llvm/test/ExecutionEngine/test-shift.ll
deleted file mode 100644
index 2791b85..0000000
--- a/libclamav/c++/llvm/test/ExecutionEngine/test-shift.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llvm-as %s -o %t.bc
-; RUN: lli %t.bc > /dev/null
-
-define i32 @main() {
- %shamt = add i8 0, 1 ; <i8> [#uses=8]
- %shift.upgrd.1 = zext i8 %shamt to i32 ; <i32> [#uses=1]
- %t1.s = shl i32 1, %shift.upgrd.1 ; <i32> [#uses=0]
- %t2.s = shl i32 1, 4 ; <i32> [#uses=0]
- %shift.upgrd.2 = zext i8 %shamt to i32 ; <i32> [#uses=1]
- %t1 = shl i32 1, %shift.upgrd.2 ; <i32> [#uses=0]
- %t2 = shl i32 1, 5 ; <i32> [#uses=0]
- %t2.s.upgrd.3 = shl i64 1, 4 ; <i64> [#uses=0]
- %t2.upgrd.4 = shl i64 1, 5 ; <i64> [#uses=0]
- %shift.upgrd.5 = zext i8 %shamt to i32 ; <i32> [#uses=1]
- %tr1.s = ashr i32 1, %shift.upgrd.5 ; <i32> [#uses=0]
- %tr2.s = ashr i32 1, 4 ; <i32> [#uses=0]
- %shift.upgrd.6 = zext i8 %shamt to i32 ; <i32> [#uses=1]
- %tr1 = lshr i32 1, %shift.upgrd.6 ; <i32> [#uses=0]
- %tr2 = lshr i32 1, 5 ; <i32> [#uses=0]
- %tr1.l = ashr i64 1, 4 ; <i64> [#uses=0]
- %shift.upgrd.7 = zext i8 %shamt to i64 ; <i64> [#uses=1]
- %tr2.l = ashr i64 1, %shift.upgrd.7 ; <i64> [#uses=0]
- %tr3.l = shl i64 1, 4 ; <i64> [#uses=0]
- %shift.upgrd.8 = zext i8 %shamt to i64 ; <i64> [#uses=1]
- %tr4.l = shl i64 1, %shift.upgrd.8 ; <i64> [#uses=0]
- %tr1.u = lshr i64 1, 5 ; <i64> [#uses=0]
- %shift.upgrd.9 = zext i8 %shamt to i64 ; <i64> [#uses=1]
- %tr2.u = lshr i64 1, %shift.upgrd.9 ; <i64> [#uses=0]
- %tr3.u = shl i64 1, 5 ; <i64> [#uses=0]
- %shift.upgrd.10 = zext i8 %shamt to i64 ; <i64> [#uses=1]
- %tr4.u = shl i64 1, %shift.upgrd.10 ; <i64> [#uses=0]
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/Feature/NamedMDNode.ll b/libclamav/c++/llvm/test/Feature/NamedMDNode.ll
deleted file mode 100644
index 02a79f8..0000000
--- a/libclamav/c++/llvm/test/Feature/NamedMDNode.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis | grep "llvm.stuff = "
-
-;; Simple NamedMDNode
-!0 = metadata !{i32 42}
-!1 = metadata !{metadata !"foo"}
-!llvm.stuff = !{!0, !1, null}
-
-!samename = !{!0, !1}
-declare void @samename()
diff --git a/libclamav/c++/llvm/test/Feature/NamedMDNode2.ll b/libclamav/c++/llvm/test/Feature/NamedMDNode2.ll
deleted file mode 100644
index 0524dd2..0000000
--- a/libclamav/c++/llvm/test/Feature/NamedMDNode2.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llvm-as < %s -o /dev/null
-; PR4654
-
-
- at foo = constant i1 false
-!0 = metadata !{i1 false}
-!a = !{!0}
diff --git a/libclamav/c++/llvm/test/Feature/README.txt b/libclamav/c++/llvm/test/Feature/README.txt
deleted file mode 100644
index 5947bb2..0000000
--- a/libclamav/c++/llvm/test/Feature/README.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This directory contains test cases for individual source features of LLVM.
-It is designed to make sure that the major components of LLVM support all of the
-features of LLVM, for very small examples. Entire programs should not go here.
-
-Regression tests for individual bug fixes should go into the test/Regression dir.
-
diff --git a/libclamav/c++/llvm/test/Feature/aliases.ll b/libclamav/c++/llvm/test/Feature/aliases.ll
deleted file mode 100644
index d44dff4..0000000
--- a/libclamav/c++/llvm/test/Feature/aliases.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at bar = external global i32
- at foo1 = alias i32* @bar
- at foo2 = alias i32* @bar
- at foo3 = alias i32* @foo2
-
-%FunTy = type i32()
-
-declare i32 @foo_f()
- at bar_f = alias weak %FunTy* @foo_f
- at bar_ff = alias i32()* @bar_f
-
- at bar_i = alias internal i32* @bar
-
- at A = alias bitcast (i32* @bar to i64*)
-
-define i32 @test() {
-entry:
- %tmp = load i32* @foo1
- %tmp1 = load i32* @foo2
- %tmp0 = load i32* @bar_i
- %tmp2 = call i32 @foo_f()
- %tmp3 = add i32 %tmp, %tmp2
- %tmp4 = call %FunTy* @bar_f()
- %tmp5 = add i32 %tmp3, %tmp4
- %tmp6 = add i32 %tmp1, %tmp5
- %tmp7 = add i32 %tmp6, %tmp0
- ret i32 %tmp7
-}
diff --git a/libclamav/c++/llvm/test/Feature/alignment.ll b/libclamav/c++/llvm/test/Feature/alignment.ll
deleted file mode 100644
index ef35a13..0000000
--- a/libclamav/c++/llvm/test/Feature/alignment.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at X = global i32 4, align 16 ; <i32*> [#uses=0]
-
-define i32* @test() align 32 {
- %X = alloca i32, align 4 ; <i32*> [#uses=1]
- %Y = alloca i32, i32 42, align 16 ; <i32*> [#uses=0]
- %Z = alloca i32 ; <i32*> [#uses=0]
- ret i32* %X
-}
-
-define i32* @test2() {
- %X = malloc i32, align 4 ; <i32*> [#uses=1]
- %Y = malloc i32, i32 42, align 16 ; <i32*> [#uses=0]
- %Z = malloc i32 ; <i32*> [#uses=0]
- %T = malloc i32, align 256 ; <i32*> [#uses=0]
- ret i32* %X
-}
-
-define void @test3() alignstack(16) {
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/basictest.ll b/libclamav/c++/llvm/test/Feature/basictest.ll
deleted file mode 100644
index 2303b59..0000000
--- a/libclamav/c++/llvm/test/Feature/basictest.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; Test "stripped" format where nothing is symbolic... this is how the bytecode
-; format looks anyways (except for negative vs positive offsets)...
-;
-
-define void @void(i32, i32) {
- add i32 0, 0 ; <i32>:3 [#uses=2]
- sub i32 0, 4 ; <i32>:4 [#uses=2]
- br label %5
-
-; <label>:5 ; preds = %5, %2
- add i32 %0, %1 ; <i32>:6 [#uses=2]
- sub i32 %6, %4 ; <i32>:7 [#uses=1]
- icmp sle i32 %7, %3 ; <i1>:8 [#uses=1]
- br i1 %8, label %9, label %5
-
-; <label>:9 ; preds = %5
- add i32 %0, %1 ; <i32>:10 [#uses=0]
- sub i32 %6, %4 ; <i32>:11 [#uses=1]
- icmp sle i32 %11, %3 ; <i1>:12 [#uses=0]
- ret void
-}
-
-; This function always returns zero
-define i32 @zarro() {
-Startup:
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/Feature/callingconventions.ll b/libclamav/c++/llvm/test/Feature/callingconventions.ll
deleted file mode 100644
index d2e9de4..0000000
--- a/libclamav/c++/llvm/test/Feature/callingconventions.ll
+++ /dev/null
@@ -1,50 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-define fastcc void @foo() {
- ret void
-}
-
-define coldcc void @bar() {
- call fastcc void @foo( )
- ret void
-}
-
-define void @structret({ i8 }* sret %P) {
- call void @structret( { i8 }* sret %P )
- ret void
-}
-
-define void @foo2() {
- ret void
-}
-
-define coldcc void @bar2() {
- call fastcc void @foo( )
- ret void
-}
-
-define cc42 void @bar3() {
- invoke fastcc void @foo( )
- to label %Ok unwind label %U
-
-Ok: ; preds = %0
- ret void
-
-U: ; preds = %0
- unwind
-}
-
-define void @bar4() {
- call cc42 void @bar( )
- invoke cc42 void @bar3( )
- to label %Ok unwind label %U
-
-Ok: ; preds = %0
- ret void
-
-U: ; preds = %0
- unwind
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/calltest.ll b/libclamav/c++/llvm/test/Feature/calltest.ll
deleted file mode 100644
index feafd3c..0000000
--- a/libclamav/c++/llvm/test/Feature/calltest.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%FunTy = type i32 (i32)
-
-declare i32 @test(i32) ; Test forward declaration merging
-
-define void @invoke(%FunTy* %x) {
- %foo = call i32 %x( i32 123 ) ; <i32> [#uses=0]
- %foo2 = tail call i32 %x( i32 123 ) ; <i32> [#uses=0]
- ret void
-}
-
-define i32 @main(i32 %argc) {
- %retval = call i32 @test( i32 %argc ) ; <i32> [#uses=2]
- %two = add i32 %retval, %retval ; <i32> [#uses=1]
- %retval2 = invoke i32 @test( i32 %argc )
- to label %Next unwind label %Error ; <i32> [#uses=1]
-
-Next: ; preds = %0
- %two2 = add i32 %two, %retval2 ; <i32> [#uses=1]
- call void @invoke( %FunTy* @test )
- ret i32 %two2
-
-Error: ; preds = %0
- ret i32 -1
-}
-
-define i32 @test(i32 %i0) {
- ret i32 %i0
-}
diff --git a/libclamav/c++/llvm/test/Feature/casttest.ll b/libclamav/c++/llvm/test/Feature/casttest.ll
deleted file mode 100644
index d9c22ff..0000000
--- a/libclamav/c++/llvm/test/Feature/casttest.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-define i16 @FunFunc(i64 %x, i8 %z) {
-bb0:
- %cast110 = sext i8 %z to i16 ; <i16> [#uses=1]
- %cast10 = trunc i64 %x to i16 ; <i16> [#uses=1]
- %reg109 = add i16 %cast110, %cast10 ; <i16> [#uses=1]
- ret i16 %reg109
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/cfgstructures.ll b/libclamav/c++/llvm/test/Feature/cfgstructures.ll
deleted file mode 100644
index e667f6d..0000000
--- a/libclamav/c++/llvm/test/Feature/cfgstructures.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-;; This is an irreducible flow graph
-define void @irreducible(i1 %cond) {
- br i1 %cond, label %X, label %Y
-
-X: ; preds = %Y, %0
- br label %Y
-
-Y: ; preds = %X, %0
- br label %X
-}
-
-;; This is a pair of loops that share the same header
-define void @sharedheader(i1 %cond) {
- br label %A
-
-A: ; preds = %Y, %X, %0
- br i1 %cond, label %X, label %Y
-
-X: ; preds = %A
- br label %A
-
-Y: ; preds = %A
- br label %A
-}
-
-
-;; This is a simple nested loop
-define void @nested(i1 %cond1, i1 %cond2, i1 %cond3) {
- br label %Loop1
-
-Loop1: ; preds = %L2Exit, %0
- br label %Loop2
-
-Loop2: ; preds = %L3Exit, %Loop1
- br label %Loop3
-
-Loop3: ; preds = %Loop3, %Loop2
- br i1 %cond3, label %Loop3, label %L3Exit
-
-L3Exit: ; preds = %Loop3
- br i1 %cond2, label %Loop2, label %L2Exit
-
-L2Exit: ; preds = %L3Exit
- br i1 %cond1, label %Loop1, label %L1Exit
-
-L1Exit: ; preds = %L2Exit
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/constexpr.ll b/libclamav/c++/llvm/test/Feature/constexpr.ll
deleted file mode 100644
index 13e6f36..0000000
--- a/libclamav/c++/llvm/test/Feature/constexpr.ll
+++ /dev/null
@@ -1,80 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; This testcase is for testing expressions constructed from
-; constant values, including constant pointers to globals.
-;
-
-;;-------------------------------
-;; Test constant cast expressions
-;;-------------------------------
-
-global i64 u0x00001 ; hexadecimal unsigned integer constants
-global i64 s0x0012312 ; hexadecimal signed integer constants
-
- at t2 = global i32* @t1 ;; Forward reference without cast
- at t3 = global i32* bitcast (i32* @t1 to i32*) ;; Forward reference with cast
- at t1 = global i32 4 ;; i32* @0
- at t4 = global i32** bitcast (i32** @t3 to i32**) ;; Cast of a previous cast
- at t5 = global i32** @t3 ;; Reference to a previous cast
- at t6 = global i32*** @t4 ;; Different ref. to a previous cast
- at t7 = global float* inttoptr (i32 12345678 to float*) ;; Cast ordinary value to ptr
- at t9 = global i32 bitcast (float bitcast (i32 8 to float) to i32) ;; Nested cast expression
-
-global i32* bitcast (float* @4 to i32*) ;; Forward numeric reference
-global float* @4 ;; Duplicate forward numeric reference
-global float 0.0
-
-
-;;---------------------------------------------------
-;; Test constant getelementpr expressions for arrays
-;;---------------------------------------------------
-
- at array = constant [2 x i32] [ i32 12, i32 52 ]
- at arrayPtr = global i32* getelementptr ([2 x i32]* @array, i64 0, i64 0) ;; i32* &@array[0][0]
- at arrayPtr5 = global i32** getelementptr (i32** @arrayPtr, i64 5) ;; i32* &@arrayPtr[5]
-
- at somestr = constant [11x i8] c"hello world"
- at char5 = global i8* getelementptr([11x i8]* @somestr, i64 0, i64 5)
-
-;; cast of getelementptr
- at char8a = global i32* bitcast (i8* getelementptr([11x i8]* @somestr, i64 0, i64 8) to i32*)
-
-;; getelementptr containing casts
- at char8b = global i8* getelementptr([11x i8]* @somestr, i64 sext (i8 0 to i64), i64 sext (i8 8 to i64))
-
-;;-------------------------------------------------------
-;; TODO: Test constant getelementpr expressions for structures
-;;-------------------------------------------------------
-
-%SType = type { i32 , {float, {i8} }, i64 } ;; struct containing struct
-%SAType = type { i32 , {[2x float], i64} } ;; struct containing array
-
- at S1 = global %SType* null ;; Global initialized to NULL
- at S2c = constant %SType { i32 1, {float,{i8}} {float 2.0, {i8} {i8 3}}, i64 4}
-
- at S3c = constant %SAType { i32 1, {[2x float], i64} {[2x float] [float 2.0, float 3.0], i64 4} }
-
- at S1ptr = global %SType** @S1 ;; Ref. to global S1
- at S2 = global %SType* @S2c ;; Ref. to constant S2
- at S3 = global %SAType* @S3c ;; Ref. to constant S3
-
- ;; Pointer to float (**@S1).1.0
- at S1fld1a = global float* getelementptr (%SType* @S2c, i64 0, i32 1, i32 0)
- ;; Another ptr to the same!
- at S1fld1b = global float* getelementptr (%SType* @S2c, i64 0, i32 1, i32 0)
-
- at S1fld1bptr = global float** @S1fld1b ;; Ref. to previous pointer
-
- ;; Pointer to i8 (**@S2).1.1.0
- at S2fld3 = global i8* getelementptr (%SType* @S2c, i64 0, i32 1, i32 1, i32 0)
-
- ;; Pointer to float (**@S2).1.0[0]
-;@S3fld3 = global float* getelementptr (%SAType** @S3, i64 0, i64 0, i32 1, i32 0, i64 0)
-
-;;---------------------------------------------------------
-;; TODO: Test constant expressions for unary and binary operators
-;;---------------------------------------------------------
-
-;;---------------------------------------------------
diff --git a/libclamav/c++/llvm/test/Feature/constpointer.ll b/libclamav/c++/llvm/test/Feature/constpointer.ll
deleted file mode 100644
index 5c1bed1..0000000
--- a/libclamav/c++/llvm/test/Feature/constpointer.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; This testcase is primarily used for testing that global values can be used as
-; constant pointer initializers. This is tricky because they can be forward
-; declared and involves an icky bytecode encoding. There is no meaningful
-; optimization that can be performed on this file, it is just here to test
-; assembly and disassembly.
-;
-
-
- at t3 = global i32* @t1 ;; Forward reference
- at t1 = global i32 4
- at t4 = global i32** @t3 ;; reference to reference
-
- at t2 = global i32* @t1
-
-global float * @2 ;; Forward numeric reference
-global float * @2 ;; Duplicate forward numeric reference
-global float 0.0
-global float * @2 ;; Numeric reference
-
-
- at fptr = global void() * @f ;; Forward ref method defn
-declare void @f() ;; External method
-
- at sptr1 = global [11x i8]* @somestr ;; Forward ref to a constant
- at somestr = constant [11x i8] c"hello world"
- at sptr2 = global [11x i8]* @somestr
-
diff --git a/libclamav/c++/llvm/test/Feature/dg.exp b/libclamav/c++/llvm/test/Feature/dg.exp
deleted file mode 100644
index f200589..0000000
--- a/libclamav/c++/llvm/test/Feature/dg.exp
+++ /dev/null
@@ -1,3 +0,0 @@
-load_lib llvm.exp
-
-RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/libclamav/c++/llvm/test/Feature/escaped_label.ll b/libclamav/c++/llvm/test/Feature/escaped_label.ll
deleted file mode 100644
index 7f5f619..0000000
--- a/libclamav/c++/llvm/test/Feature/escaped_label.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-define i32 @foo() {
- br label %"foo`~!@#$%^&*()-_=+{}[]\\\\|;:',<.>/?"
-
-"foo`~!@#$%^&*()-_=+{}[]\\\\|;:',<.>/?": ; preds = %0
- ret i32 17
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/float.ll b/libclamav/c++/llvm/test/Feature/float.ll
deleted file mode 100644
index 6c6c5dd..0000000
--- a/libclamav/c++/llvm/test/Feature/float.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at F1 = global float 0x4010000000000000
- at D1 = global double 0x4010000000000000
diff --git a/libclamav/c++/llvm/test/Feature/fold-fpcast.ll b/libclamav/c++/llvm/test/Feature/fold-fpcast.ll
deleted file mode 100644
index cdf8da6..0000000
--- a/libclamav/c++/llvm/test/Feature/fold-fpcast.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis | not grep bitcast
-
-define i32 @test1() {
- ret i32 bitcast(float 0x400D9999A0000000 to i32)
-}
-
-define float @test2() {
- ret float bitcast(i32 17 to float)
-}
-
-define i64 @test3() {
- ret i64 bitcast (double 0x400921FB4D12D84A to i64)
-}
-
-define double @test4() {
- ret double bitcast (i64 42 to double)
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/forwardreftest.ll b/libclamav/c++/llvm/test/Feature/forwardreftest.ll
deleted file mode 100644
index 26d214a..0000000
--- a/libclamav/c++/llvm/test/Feature/forwardreftest.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%myty = type i32
-%myfn = type float (i32,double,i32,i16)
-type i32(%myfn*)
-type i32(i32)
-type i32(i32(i32)*)
-
- %thisfuncty = type i32 (i32) *
-
-declare void @F(%thisfuncty, %thisfuncty, %thisfuncty)
-
-define i32 @zarro(i32 %Func) {
-Startup:
- add i32 0, 10 ; <i32>:0 [#uses=0]
- ret i32 0
-}
-
-define i32 @test(i32) {
- call void @F( %thisfuncty @zarro, %thisfuncty @test, %thisfuncty @foozball )
- ret i32 0
-}
-
-define i32 @foozball(i32) {
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/global_section.ll b/libclamav/c++/llvm/test/Feature/global_section.ll
deleted file mode 100644
index b8f5eb1..0000000
--- a/libclamav/c++/llvm/test/Feature/global_section.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at X = global i32 4, section "foo", align 16 ; <i32*> [#uses=0]
-
-define void @test() section "bar" {
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/globalredefinition.ll b/libclamav/c++/llvm/test/Feature/globalredefinition.ll
deleted file mode 100644
index 42e2d1a..0000000
--- a/libclamav/c++/llvm/test/Feature/globalredefinition.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; Test forward references and redefinitions of globals
-
- at A = global i32* @B ; <i32**> [#uses=0]
- at B = global i32 7 ; <i32*> [#uses=1]
-
-declare void @X()
-
-declare void @X()
-
-define void @X() {
- ret void
-}
-
-declare void @X()
diff --git a/libclamav/c++/llvm/test/Feature/globalredefinition3.ll b/libclamav/c++/llvm/test/Feature/globalredefinition3.ll
deleted file mode 100644
index 5a5b3f1..0000000
--- a/libclamav/c++/llvm/test/Feature/globalredefinition3.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: not llvm-as %s -o /dev/null |& grep {redefinition of global '@B'}
-
- at B = global i32 7
- at B = global i32 7
diff --git a/libclamav/c++/llvm/test/Feature/globalvars.ll b/libclamav/c++/llvm/test/Feature/globalvars.ll
deleted file mode 100644
index 9a23775..0000000
--- a/libclamav/c++/llvm/test/Feature/globalvars.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at MyVar = external global i32 ; <i32*> [#uses=1]
- at MyIntList = external global { \2*, i32 } ; <{ \2*, i32 }*> [#uses=1]
-external global i32 ; <i32*>:0 [#uses=0]
- at AConst = constant i32 123 ; <i32*> [#uses=0]
- at AString = constant [4 x i8] c"test" ; <[4 x i8]*> [#uses=0]
- at ZeroInit = global { [100 x i32], [40 x float] } zeroinitializer ; <{ [100 x i32], [40 x float] }*> [#uses=0]
-
-define i32 @foo(i32 %blah) {
- store i32 5, i32* @MyVar
- %idx = getelementptr { \2*, i32 }* @MyIntList, i64 0, i32 1 ; <i32*> [#uses=1]
- store i32 12, i32* %idx
- ret i32 %blah
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/indirectcall.ll b/libclamav/c++/llvm/test/Feature/indirectcall.ll
deleted file mode 100644
index c1cf39f..0000000
--- a/libclamav/c++/llvm/test/Feature/indirectcall.ll
+++ /dev/null
@@ -1,49 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-declare i32 @atoi(i8*)
-
-define i64 @fib(i64 %n) {
- icmp ult i64 %n, 2 ; <i1>:1 [#uses=1]
- br i1 %1, label %BaseCase, label %RecurseCase
-
-BaseCase: ; preds = %0
- ret i64 1
-
-RecurseCase: ; preds = %0
- %n2 = sub i64 %n, 2 ; <i64> [#uses=1]
- %n1 = sub i64 %n, 1 ; <i64> [#uses=1]
- %f2 = call i64 @fib( i64 %n2 ) ; <i64> [#uses=1]
- %f1 = call i64 @fib( i64 %n1 ) ; <i64> [#uses=1]
- %result = add i64 %f2, %f1 ; <i64> [#uses=1]
- ret i64 %result
-}
-
-define i64 @realmain(i32 %argc, i8** %argv) {
-; <label>:0
- icmp eq i32 %argc, 2 ; <i1>:1 [#uses=1]
- br i1 %1, label %HasArg, label %Continue
-
-HasArg: ; preds = %0
- %n1 = add i32 1, 1 ; <i32> [#uses=1]
- br label %Continue
-
-Continue: ; preds = %HasArg, %0
- %n = phi i32 [ %n1, %HasArg ], [ 1, %0 ] ; <i32> [#uses=1]
- %N = sext i32 %n to i64 ; <i64> [#uses=1]
- %F = call i64 @fib( i64 %N ) ; <i64> [#uses=1]
- ret i64 %F
-}
-
-define i64 @trampoline(i64 %n, i64 (i64)* %fibfunc) {
- %F = call i64 %fibfunc( i64 %n ) ; <i64> [#uses=1]
- ret i64 %F
-}
-
-define i32 @main() {
- %Result = call i64 @trampoline( i64 10, i64 (i64)* @fib ) ; <i64> [#uses=1]
- %Result.upgrd.1 = trunc i64 %Result to i32 ; <i32> [#uses=1]
- ret i32 %Result.upgrd.1
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/indirectcall2.ll b/libclamav/c++/llvm/test/Feature/indirectcall2.ll
deleted file mode 100644
index 1b949fc..0000000
--- a/libclamav/c++/llvm/test/Feature/indirectcall2.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-define i64 @test(i64 %X) {
- ret i64 %X
-}
-
-define i64 @fib(i64 %n) {
-; <label>:0
- %T = icmp ult i64 %n, 2 ; <i1> [#uses=1]
- br i1 %T, label %BaseCase, label %RecurseCase
-
-RecurseCase: ; preds = %0
- %result = call i64 @test( i64 %n ) ; <i64> [#uses=0]
- br label %BaseCase
-
-BaseCase: ; preds = %RecurseCase, %0
- %X = phi i64 [ 1, %0 ], [ 2, %RecurseCase ] ; <i64> [#uses=1]
- ret i64 %X
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/inlineasm.ll b/libclamav/c++/llvm/test/Feature/inlineasm.ll
deleted file mode 100644
index 6be5722..0000000
--- a/libclamav/c++/llvm/test/Feature/inlineasm.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-module asm "this is an inline asm block"
-module asm "this is another inline asm block"
-
-define i32 @test() {
- %X = call i32 asm "tricky here $0, $1", "=r,r"( i32 4 ) ; <i32> [#uses=1]
- call void asm sideeffect "eieio", ""( )
- ret i32 %X
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/instructions.ll b/libclamav/c++/llvm/test/Feature/instructions.ll
deleted file mode 100644
index d0c303d..0000000
--- a/libclamav/c++/llvm/test/Feature/instructions.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-define i32 @test_extractelement(<4 x i32> %V) {
- %R = extractelement <4 x i32> %V, i32 1 ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define <4 x i32> @test_insertelement(<4 x i32> %V) {
- %R = insertelement <4 x i32> %V, i32 0, i32 0 ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %R
-}
-
-define <4 x i32> @test_shufflevector_u(<4 x i32> %V) {
- %R = shufflevector <4 x i32> %V, <4 x i32> %V, <4 x i32> < i32 1, i32 undef, i32 7, i32 2 > ; <<4 x i32>> [#uses=1]
- ret <4 x i32> %R
-}
-
-define <4 x float> @test_shufflevector_f(<4 x float> %V) {
- %R = shufflevector <4 x float> %V, <4 x float> undef, <4 x i32> < i32 1, i32 undef, i32 7, i32 2 > ; <<4 x float>> [#uses=1]
- ret <4 x float> %R
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/intrinsics.ll b/libclamav/c++/llvm/test/Feature/intrinsics.ll
deleted file mode 100644
index 2dd6b53..0000000
--- a/libclamav/c++/llvm/test/Feature/intrinsics.ll
+++ /dev/null
@@ -1,62 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-declare i1 @llvm.isunordered.f32(float, float)
-
-declare i1 @llvm.isunordered.f64(double, double)
-
-declare void @llvm.prefetch(i8*, i32, i32)
-
-declare i8 @llvm.ctpop.i8(i8)
-
-declare i16 @llvm.ctpop.i16(i16)
-
-declare i32 @llvm.ctpop.i32(i32)
-
-declare i64 @llvm.ctpop.i64(i64)
-
-declare i8 @llvm.cttz.i8(i8)
-
-declare i16 @llvm.cttz.i16(i16)
-
-declare i32 @llvm.cttz.i32(i32)
-
-declare i64 @llvm.cttz.i64(i64)
-
-declare i8 @llvm.ctlz.i8(i8)
-
-declare i16 @llvm.ctlz.i16(i16)
-
-declare i32 @llvm.ctlz.i32(i32)
-
-declare i64 @llvm.ctlz.i64(i64)
-
-declare float @llvm.sqrt.f32(float)
-
-declare double @llvm.sqrt.f64(double)
-
-; Test llvm intrinsics
-;
-define void @libm() {
- fcmp uno float 1.000000e+00, 2.000000e+00 ; <i1>:1 [#uses=0]
- fcmp uno double 3.000000e+00, 4.000000e+00 ; <i1>:2 [#uses=0]
- call void @llvm.prefetch( i8* null, i32 1, i32 3 )
- call float @llvm.sqrt.f32( float 5.000000e+00 ) ; <float>:3 [#uses=0]
- call double @llvm.sqrt.f64( double 6.000000e+00 ) ; <double>:4 [#uses=0]
- call i8 @llvm.ctpop.i8( i8 10 ) ; <i32>:5 [#uses=0]
- call i16 @llvm.ctpop.i16( i16 11 ) ; <i32>:6 [#uses=0]
- call i32 @llvm.ctpop.i32( i32 12 ) ; <i32>:7 [#uses=0]
- call i64 @llvm.ctpop.i64( i64 13 ) ; <i32>:8 [#uses=0]
- call i8 @llvm.ctlz.i8( i8 14 ) ; <i32>:9 [#uses=0]
- call i16 @llvm.ctlz.i16( i16 15 ) ; <i32>:10 [#uses=0]
- call i32 @llvm.ctlz.i32( i32 16 ) ; <i32>:11 [#uses=0]
- call i64 @llvm.ctlz.i64( i64 17 ) ; <i32>:12 [#uses=0]
- call i8 @llvm.cttz.i8( i8 18 ) ; <i32>:13 [#uses=0]
- call i16 @llvm.cttz.i16( i16 19 ) ; <i32>:14 [#uses=0]
- call i32 @llvm.cttz.i32( i32 20 ) ; <i32>:15 [#uses=0]
- call i64 @llvm.cttz.i64( i64 21 ) ; <i32>:16 [#uses=0]
- ret void
-}
-
-; FIXME: test ALL the intrinsics in this file.
diff --git a/libclamav/c++/llvm/test/Feature/llvm2cpp.exp b/libclamav/c++/llvm/test/Feature/llvm2cpp.exp
deleted file mode 100644
index de0126c..0000000
--- a/libclamav/c++/llvm/test/Feature/llvm2cpp.exp
+++ /dev/null
@@ -1,3 +0,0 @@
-load_lib llvm2cpp.exp
-
-llvm2cpp-test [lsort [glob -nocomplain $srcdir/$subdir/*.ll]]
diff --git a/libclamav/c++/llvm/test/Feature/load_module.ll b/libclamav/c++/llvm/test/Feature/load_module.ll
deleted file mode 100644
index e2e222f..0000000
--- a/libclamav/c++/llvm/test/Feature/load_module.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; PR1318
-; RUN: opt < %s -load=%llvmlibsdir/LLVMHello%shlibext -hello \
-; RUN: -disable-output |& grep Hello
-
- at junk = global i32 0
-
-define i32* @somefunk() {
- ret i32* @junk
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/md_on_instruction.ll b/libclamav/c++/llvm/test/Feature/md_on_instruction.ll
deleted file mode 100644
index da9e49e..0000000
--- a/libclamav/c++/llvm/test/Feature/md_on_instruction.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis | grep " !dbg " | count 4
-define i32 @foo() nounwind ssp {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=2]
- call void @llvm.dbg.func.start(metadata !0)
- store i32 42, i32* %retval, !dbg !3
- br label %0, !dbg !3
-
-; <label>:0 ; preds = %entry
- call void @llvm.dbg.region.end(metadata !0)
- %1 = load i32* %retval, !dbg !3 ; <i32> [#uses=1]
- ret i32 %1, !dbg !3
-}
-
-declare void @llvm.dbg.func.start(metadata) nounwind readnone
-
-declare void @llvm.dbg.region.end(metadata) nounwind readnone
-
-!0 = metadata !{i32 458798, i32 0, metadata !1, metadata !"foo", metadata !"foo", metadata !"foo", metadata !1, i32 1, metadata !2, i1 false, i1 true}
-!1 = metadata !{i32 458769, i32 0, i32 12, metadata !"foo.c", metadata !"/tmp", metadata !"clang 1.0", i1 true, i1 false, metadata !"", i32 0}
-!2 = metadata !{i32 458788, metadata !1, metadata !"int", metadata !1, i32 0, i64 32, i64 32, i64 0, i32 0, i32 5}
-!3 = metadata !{i32 1, i32 13, metadata !1, metadata !1}
diff --git a/libclamav/c++/llvm/test/Feature/memorymarkers.ll b/libclamav/c++/llvm/test/Feature/memorymarkers.ll
deleted file mode 100644
index 06b8376..0000000
--- a/libclamav/c++/llvm/test/Feature/memorymarkers.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llvm-as -disable-output < %s
-
-%"struct.std::pair<int,int>" = type { i32, i32 }
-
-declare void @_Z3barRKi(i32*)
-
-declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) nounwind
-declare {}* @llvm.invariant.start(i64, i8* nocapture) readonly nounwind
-declare void @llvm.invariant.end({}*, i64, i8* nocapture) nounwind
-
-define i32 @_Z4foo2v() nounwind {
-entry:
- %x = alloca %"struct.std::pair<int,int>"
- %y = bitcast %"struct.std::pair<int,int>"* %x to i8*
-
- ;; Constructor starts here (this isn't needed since it is immediately
- ;; preceded by an alloca, but shown for completeness).
- call void @llvm.lifetime.start(i64 8, i8* %y)
-
- %0 = getelementptr %"struct.std::pair<int,int>"* %x, i32 0, i32 0
- store i32 4, i32* %0, align 8
- %1 = getelementptr %"struct.std::pair<int,int>"* %x, i32 0, i32 1
- store i32 5, i32* %1, align 4
-
- ;; Constructor has finished here.
- %inv = call {}* @llvm.invariant.start(i64 8, i8* %y)
- call void @_Z3barRKi(i32* %0) nounwind
- %2 = load i32* %0, align 8
-
- ;; Destructor is run here.
- call void @llvm.invariant.end({}* %inv, i64 8, i8* %y)
- ;; Destructor is done here.
- call void @llvm.lifetime.end(i64 8, i8* %y)
- ret i32 %2
-}
diff --git a/libclamav/c++/llvm/test/Feature/newcasts.ll b/libclamav/c++/llvm/test/Feature/newcasts.ll
deleted file mode 100644
index 4cfc8bc..0000000
--- a/libclamav/c++/llvm/test/Feature/newcasts.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-define void @"NewCasts" (i16 %x) {
- %a = zext i16 %x to i32
- %b = sext i16 %x to i32
- %c = trunc i16 %x to i8
- %d = uitofp i16 %x to float
- %e = sitofp i16 %x to double
- %f = fptoui float %d to i16
- %g = fptosi double %e to i16
- %i = fpext float %d to double
- %j = fptrunc double %i to float
- %k = bitcast i32 %a to float
- %l = inttoptr i16 %x to i32*
- %m = ptrtoint i32* %l to i64
- %n = insertelement <4 x i32> undef, i32 %a, i32 0
- %o = sitofp <4 x i32> %n to <4 x float>
- %p = uitofp <4 x i32> %n to <4 x float>
- %q = fptosi <4 x float> %p to <4 x i32>
- %r = fptoui <4 x float> %p to <4 x i32>
- ret void
-}
-
-
-define i16 @"ZExtConst" () {
- ret i16 trunc ( i32 zext ( i16 42 to i32) to i16 )
-}
-
-define i16 @"SExtConst" () {
- ret i16 trunc (i32 sext (i16 42 to i32) to i16 )
-}
diff --git a/libclamav/c++/llvm/test/Feature/noalias-ret.ll b/libclamav/c++/llvm/test/Feature/noalias-ret.ll
deleted file mode 100644
index d88452b..0000000
--- a/libclamav/c++/llvm/test/Feature/noalias-ret.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: llvm-as < %s
-
-define noalias i8* @_Znwj(i32 %x) nounwind {
- %A = malloc i8, i32 %x
- ret i8* %A
-}
diff --git a/libclamav/c++/llvm/test/Feature/opaquetypes.ll b/libclamav/c++/llvm/test/Feature/opaquetypes.ll
deleted file mode 100644
index 6539c1a..0000000
--- a/libclamav/c++/llvm/test/Feature/opaquetypes.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; This test case is used to test opaque type processing, forward references,
-; and recursive types. Oh my.
-;
-
-%SQ1 = type { i32 }
-%SQ2 = type { %ITy }
-%ITy = type i32
-
-
-%CCC = type { \2* }
-%BBB = type { \2*, \2 * }
-%AAA = type { \2*, {\2*}, [12x{\2*}], {[1x{\2*}]} }
-
-; Test numbered types
-type %CCC
-type %BBB
-%Composite = type { %0, %1 }
-
-; Test simple opaque type resolution...
-%intty = type i32
-
-; Perform a simple forward reference...
-%ty1 = type { %ty2, i32 }
-%ty2 = type float
-
-; Do a recursive type...
-%list = type { %list * }
-%listp = type { %listp } *
-
-; Do two mutually recursive types...
-%TyA = type { %ty2, %TyB * }
-%TyB = type { double, %TyA * }
-
-; A complex recursive type...
-%Y = type { {%Y*}, %Y* }
-%Z = type { { %Z * }, [12x%Z] *, {{{ %Z * }}} }
-
-; More ridiculous test cases...
-%A = type [ 123x %A*]
-%M = type %M (%M, %M) *
-%P = type %P*
-
-; Recursive ptrs
-%u = type %v*
-%v = type %u*
-
-; Test the parser for unnamed recursive types...
-%P1 = type \1 *
-%Y1 = type { { \3 * }, \2 * }
-%Z1 = type { { \3 * }, [12x\3] *, { { { \5 * } } } }
-
diff --git a/libclamav/c++/llvm/test/Feature/packed.ll b/libclamav/c++/llvm/test/Feature/packed.ll
deleted file mode 100644
index b86a227..0000000
--- a/libclamav/c++/llvm/test/Feature/packed.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at foo1 = external global <4 x float> ; <<4 x float>*> [#uses=2]
- at foo2 = external global <2 x i32> ; <<2 x i32>*> [#uses=2]
-
-define void @main() {
- store <4 x float> < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 >, <4 x float>* @foo1
- store <2 x i32> < i32 4, i32 4 >, <2 x i32>* @foo2
- %l1 = load <4 x float>* @foo1 ; <<4 x float>> [#uses=0]
- %l2 = load <2 x i32>* @foo2 ; <<2 x i32>> [#uses=0]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/packed_struct.ll b/libclamav/c++/llvm/test/Feature/packed_struct.ll
deleted file mode 100644
index 4d4ace9..0000000
--- a/libclamav/c++/llvm/test/Feature/packed_struct.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-; RUN: not grep cast %t2.ll
-; RUN: grep {\\}>} %t2.ll
-; END.
-
-%struct.anon = type <{ i8, i32, i32, i32 }>
- at foos = external global %struct.anon
- at bara = external global [2 x <{ i32, i8 }>]
-
-;initializers should work for packed and non-packed the same way
- at E1 = global <{i8, i32, i32}> <{i8 1, i32 2, i32 3}>
- at E2 = global {i8, i32, i32} {i8 4, i32 5, i32 6}
-
-
-define i32 @main()
-{
- %tmp = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp6 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 3) ; <i32> [#uses=1]
- %tmp4 = add i32 %tmp3, %tmp ; <i32> [#uses=1]
- %tmp7 = add i32 %tmp4, %tmp6 ; <i32> [#uses=1]
- ret i32 %tmp7
-}
-
-define i32 @bar() {
-entry:
- %tmp = load i32* getelementptr([2 x <{ i32, i8 }>]* @bara, i32 0, i32 0, i32 0 ) ; <i32> [#uses=1]
- %tmp4 = load i32* getelementptr ([2 x <{ i32, i8 }>]* @bara, i32 0, i32 1, i32 0) ; <i32> [#uses=1]
- %tmp5 = add i32 %tmp4, %tmp ; <i32> [#uses=1]
- ret i32 %tmp5
-}
diff --git a/libclamav/c++/llvm/test/Feature/paramattrs.ll b/libclamav/c++/llvm/test/Feature/paramattrs.ll
deleted file mode 100644
index 3bee617..0000000
--- a/libclamav/c++/llvm/test/Feature/paramattrs.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%ZFunTy = type i32(i8 zeroext)
-%SFunTy = type i32(i8 signext)
-
-declare i16 @"test"(i16 signext %arg) signext
-declare i8 @"test2" (i16 zeroext %a2) zeroext
-
-declare i32 @"test3"(i32* noalias %p)
-
-declare void @exit(i32) noreturn nounwind
-
-define i32 @main(i32 inreg %argc, i8 ** inreg %argv) nounwind {
- %val = trunc i32 %argc to i16
- %res1 = call i16 (i16 signext) signext *@test(i16 signext %val) signext
- %two = add i16 %res1, %res1
- %res2 = call i8 @test2(i16 %two zeroext) zeroext
- %retVal = sext i16 %two to i32
- ret i32 %retVal
-}
diff --git a/libclamav/c++/llvm/test/Feature/ppcld.ll b/libclamav/c++/llvm/test/Feature/ppcld.ll
deleted file mode 100644
index 393a491..0000000
--- a/libclamav/c++/llvm/test/Feature/ppcld.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t
-; RUN: llvm-as < %t | llvm-dis > %t2
-; RUN: diff %t %t2
-; ModuleID = '<stdin>'
-target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "powerpc-apple-darwin8"
- at ld = external global ppc_fp128 ; <ppc_fp128*> [#uses=1]
- at d = global double 4.050000e+00, align 8 ; <double*> [#uses=1]
- at f = global float 0x4010333340000000 ; <float*> [#uses=1]
-
-define i32 @foo() {
-entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load float* @f ; <float> [#uses=1]
- %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
- %tmp2 = load double* @d ; <double> [#uses=1]
- %tmp3 = fmul double %tmp1, %tmp2 ; <double> [#uses=1]
- %tmp4 = fpext double %tmp3 to ppc_fp128 ; <ppc_fp128> [#uses=1]
- store ppc_fp128 %tmp4, ppc_fp128* @ld
- br label %return
-
-return: ; preds = %entry
- %retval4 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval4
-}
diff --git a/libclamav/c++/llvm/test/Feature/properties.ll b/libclamav/c++/llvm/test/Feature/properties.ll
deleted file mode 100644
index c688d68..0000000
--- a/libclamav/c++/llvm/test/Feature/properties.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-target datalayout = "e-p:32:32"
-target triple = "proc-vend-sys"
-deplibs = [ "m", "c" ]
diff --git a/libclamav/c++/llvm/test/Feature/prototype.ll b/libclamav/c++/llvm/test/Feature/prototype.ll
deleted file mode 100644
index 3754a1d..0000000
--- a/libclamav/c++/llvm/test/Feature/prototype.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-declare i32 @bar(i32)
-
-define i32 @foo(i32 %blah) {
- %xx = call i32 @bar( i32 %blah ) ; <i32> [#uses=1]
- ret i32 %xx
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/recursivetype.ll b/libclamav/c++/llvm/test/Feature/recursivetype.ll
deleted file mode 100644
index 43db5f0..0000000
--- a/libclamav/c++/llvm/test/Feature/recursivetype.ll
+++ /dev/null
@@ -1,103 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; This file contains the output from the following compiled C code:
-; typedef struct list {
-; struct list *Next;
-; int Data;
-; } list;
-;
-; // Iterative insert fn
-; void InsertIntoListTail(list **L, int Data) {
-; while (*L)
-; L = &(*L)->Next;
-; *L = (list*)malloc(sizeof(list));
-; (*L)->Data = Data;
-; (*L)->Next = 0;
-; }
-;
-; // Recursive list search fn
-; list *FindData(list *L, int Data) {
-; if (L == 0) return 0;
-; if (L->Data == Data) return L;
-; return FindData(L->Next, Data);
-; }
-;
-; void DoListStuff() {
-; list *MyList = 0;
-; InsertIntoListTail(&MyList, 100);
-; InsertIntoListTail(&MyList, 12);
-; InsertIntoListTail(&MyList, 42);
-; InsertIntoListTail(&MyList, 1123);
-; InsertIntoListTail(&MyList, 1213);
-;
-; if (FindData(MyList, 75)) foundIt();
-; if (FindData(MyList, 42)) foundIt();
-; if (FindData(MyList, 700)) foundIt();
-; }
-
-%list = type { %list*, i32 }
-
-declare i8* @malloc(i32)
-
-define void @InsertIntoListTail(%list** %L, i32 %Data) {
-bb1:
- %reg116 = load %list** %L ; <%list*> [#uses=1]
- %cast1004 = inttoptr i64 0 to %list* ; <%list*> [#uses=1]
- %cond1000 = icmp eq %list* %reg116, %cast1004 ; <i1> [#uses=1]
- br i1 %cond1000, label %bb3, label %bb2
-
-bb2: ; preds = %bb2, %bb1
- %reg117 = phi %list** [ %reg118, %bb2 ], [ %L, %bb1 ] ; <%list**> [#uses=1]
- %cast1010 = bitcast %list** %reg117 to %list*** ; <%list***> [#uses=1]
- %reg118 = load %list*** %cast1010 ; <%list**> [#uses=3]
- %reg109 = load %list** %reg118 ; <%list*> [#uses=1]
- %cast1005 = inttoptr i64 0 to %list* ; <%list*> [#uses=1]
- %cond1001 = icmp ne %list* %reg109, %cast1005 ; <i1> [#uses=1]
- br i1 %cond1001, label %bb2, label %bb3
-
-bb3: ; preds = %bb2, %bb1
- %reg119 = phi %list** [ %reg118, %bb2 ], [ %L, %bb1 ] ; <%list**> [#uses=1]
- %cast1006 = bitcast %list** %reg119 to i8** ; <i8**> [#uses=1]
- %reg111 = call i8* @malloc( i32 16 ) ; <i8*> [#uses=3]
- store i8* %reg111, i8** %cast1006
- %reg111.upgrd.1 = ptrtoint i8* %reg111 to i64 ; <i64> [#uses=1]
- %reg1002 = add i64 %reg111.upgrd.1, 8 ; <i64> [#uses=1]
- %reg1002.upgrd.2 = inttoptr i64 %reg1002 to i8* ; <i8*> [#uses=1]
- %cast1008 = bitcast i8* %reg1002.upgrd.2 to i32* ; <i32*> [#uses=1]
- store i32 %Data, i32* %cast1008
- %cast1003 = inttoptr i64 0 to i64* ; <i64*> [#uses=1]
- %cast1009 = bitcast i8* %reg111 to i64** ; <i64**> [#uses=1]
- store i64* %cast1003, i64** %cast1009
- ret void
-}
-
-define %list* @FindData(%list* %L, i32 %Data) {
-bb1:
- br label %bb2
-
-bb2: ; preds = %bb6, %bb1
- %reg115 = phi %list* [ %reg116, %bb6 ], [ %L, %bb1 ] ; <%list*> [#uses=4]
- %cast1014 = inttoptr i64 0 to %list* ; <%list*> [#uses=1]
- %cond1011 = icmp ne %list* %reg115, %cast1014 ; <i1> [#uses=1]
- br i1 %cond1011, label %bb4, label %bb3
-
-bb3: ; preds = %bb2
- ret %list* null
-
-bb4: ; preds = %bb2
- %idx = getelementptr %list* %reg115, i64 0, i32 1 ; <i32*> [#uses=1]
- %reg111 = load i32* %idx ; <i32> [#uses=1]
- %cond1013 = icmp ne i32 %reg111, %Data ; <i1> [#uses=1]
- br i1 %cond1013, label %bb6, label %bb5
-
-bb5: ; preds = %bb4
- ret %list* %reg115
-
-bb6: ; preds = %bb4
- %idx2 = getelementptr %list* %reg115, i64 0, i32 0 ; <%list**> [#uses=1]
- %reg116 = load %list** %idx2 ; <%list*> [#uses=1]
- br label %bb2
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/simplecalltest.ll b/libclamav/c++/llvm/test/Feature/simplecalltest.ll
deleted file mode 100644
index 6452286..0000000
--- a/libclamav/c++/llvm/test/Feature/simplecalltest.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- %FunTy = type i32 (i32)
-
-define void @invoke(%FunTy* %x) {
- %foo = call i32 %x( i32 123 ) ; <i32> [#uses=0]
- ret void
-}
-
-define i32 @main(i32 %argc, i8** %argv, i8** %envp) {
- %retval = call i32 @test( i32 %argc ) ; <i32> [#uses=2]
- %two = add i32 %retval, %retval ; <i32> [#uses=1]
- %retval2 = call i32 @test( i32 %argc ) ; <i32> [#uses=1]
- %two2 = add i32 %two, %retval2 ; <i32> [#uses=1]
- call void @invoke( %FunTy* @test )
- ret i32 %two2
-}
-
-define i32 @test(i32 %i0) {
- ret i32 %i0
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/small.ll b/libclamav/c++/llvm/test/Feature/small.ll
deleted file mode 100644
index 4644f64..0000000
--- a/libclamav/c++/llvm/test/Feature/small.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%x = type i32
-
-define i32 @foo(i32 %in) {
-label:
- ret i32 2
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/smallest.ll b/libclamav/c++/llvm/test/Feature/smallest.ll
deleted file mode 100644
index 5dd023c..0000000
--- a/libclamav/c++/llvm/test/Feature/smallest.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
diff --git a/libclamav/c++/llvm/test/Feature/sparcld.ll b/libclamav/c++/llvm/test/Feature/sparcld.ll
deleted file mode 100644
index 095f6f6..0000000
--- a/libclamav/c++/llvm/test/Feature/sparcld.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t
-; RUN: llvm-as < %t | llvm-dis > %t2
-; RUN: diff %t %t2
-; ModuleID = '<stdin>'
- at ld = external global fp128 ; <fp128*> [#uses=1]
- at d = global double 4.050000e+00, align 8 ; <double*> [#uses=1]
- at f = global float 0x4010333340000000 ; <float*> [#uses=1]
-
-define i32 @foo() {
-entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load float* @f ; <float> [#uses=1]
- %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
- %tmp2 = load double* @d ; <double> [#uses=1]
- %tmp3 = fmul double %tmp1, %tmp2 ; <double> [#uses=1]
- %tmp4 = fpext double %tmp3 to fp128 ; <fp128> [#uses=1]
- store fp128 %tmp4, fp128* @ld
- br label %return
-
-return: ; preds = %entry
- %retval4 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval4
-}
diff --git a/libclamav/c++/llvm/test/Feature/terminators.ll b/libclamav/c++/llvm/test/Feature/terminators.ll
deleted file mode 100644
index 1bca2a8..0000000
--- a/libclamav/c++/llvm/test/Feature/terminators.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- %int = type i32
-
-define i32 @squared(i32 %i0) {
- switch i32 %i0, label %Default [
- i32 1, label %Case1
- i32 2, label %Case2
- i32 4, label %Case4
- ]
-
-Default: ; preds = %0
- ret i32 -1
-
-Case1: ; preds = %0
- ret i32 1
-
-Case2: ; preds = %0
- ret i32 4
-
-Case4: ; preds = %0
- ret i32 16
-}
-
-
- at Addr = global i8* blockaddress(@indbrtest, %BB1)
- at Addr3 = global i8* blockaddress(@squared, %Case1)
-
-
-define i32 @indbrtest(i8* %P, i32* %Q) {
- indirectbr i8* %P, [label %BB1, label %BB2, label %BB3]
-BB1:
- indirectbr i32* %Q, []
-BB2:
- %R = bitcast i8* blockaddress(@indbrtest, %BB3) to i8*
- indirectbr i8* %R, [label %BB1, label %BB2, label %BB3]
-BB3:
- ret i32 2
-}
-
-
diff --git a/libclamav/c++/llvm/test/Feature/testalloca.ll b/libclamav/c++/llvm/test/Feature/testalloca.ll
deleted file mode 100644
index 230b5a9..0000000
--- a/libclamav/c++/llvm/test/Feature/testalloca.ll
+++ /dev/null
@@ -1,22 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- %inners = type { float, { i8 } }
- %struct = type { i32, %inners, i64 }
-
-define i32 @testfunction(i32 %i0, i32 %j0) {
- alloca i8, i32 5 ; <i8*>:1 [#uses=0]
- %ptr = alloca i32 ; <i32*> [#uses=2]
- store i32 3, i32* %ptr
- %val = load i32* %ptr ; <i32> [#uses=0]
- %sptr = alloca %struct ; <%struct*> [#uses=2]
- %nsptr = getelementptr %struct* %sptr, i64 0, i32 1 ; <%inners*> [#uses=1]
- %ubsptr = getelementptr %inners* %nsptr, i64 0, i32 1 ; <{ i8 }*> [#uses=1]
- %idx = getelementptr { i8 }* %ubsptr, i64 0, i32 0 ; <i8*> [#uses=1]
- store i8 4, i8* %idx
- %fptr = getelementptr %struct* %sptr, i64 0, i32 1, i32 0 ; <float*> [#uses=1]
- store float 4.000000e+00, float* %fptr
- ret i32 3
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/testconstants.ll b/libclamav/c++/llvm/test/Feature/testconstants.ll
deleted file mode 100644
index 6810f3d..0000000
--- a/libclamav/c++/llvm/test/Feature/testconstants.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at somestr = constant [11 x i8] c"hello world" ; <[11 x i8]*> [#uses=1]
- at array = constant [2 x i32] [ i32 12, i32 52 ] ; <[2 x i32]*> [#uses=1]
-constant { i32, i32 } { i32 4, i32 3 } ; <{ i32, i32 }*>:0 [#uses=0]
-
-define [2 x i32]* @testfunction(i32 %i0, i32 %j0) {
- ret [2 x i32]* @array
-}
-
-define i8* @otherfunc(i32, double) {
- %somestr = getelementptr [11 x i8]* @somestr, i64 0, i64 0 ; <i8*> [#uses=1]
- ret i8* %somestr
-}
-
-define i8* @yetanotherfunc(i32, double) {
- ret i8* null
-}
-
-define i32 @negativeUnsigned() {
- ret i32 -1
-}
-
-define i32 @largeSigned() {
- ret i32 -394967296
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/testlogical.ll b/libclamav/c++/llvm/test/Feature/testlogical.ll
deleted file mode 100644
index a064869..0000000
--- a/libclamav/c++/llvm/test/Feature/testlogical.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-define i32 @simpleAdd(i32 %i0, i32 %j0) {
- %t1 = xor i32 %i0, %j0 ; <i32> [#uses=1]
- %t2 = or i32 %i0, %j0 ; <i32> [#uses=1]
- %t3 = and i32 %t1, %t2 ; <i32> [#uses=1]
- ret i32 %t3
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/testmemory.ll b/libclamav/c++/llvm/test/Feature/testmemory.ll
deleted file mode 100644
index a9019f0..0000000
--- a/libclamav/c++/llvm/test/Feature/testmemory.ll
+++ /dev/null
@@ -1,36 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
- %complexty = type { i32, { [4 x i8*], float }, double }
- %struct = type { i32, { float, { i8 } }, i64 }
-
-define i32 @main() {
- call i32 @testfunction( i64 0, i64 1 ) ; <i32>:1 [#uses=0]
- ret i32 0
-}
-
-define i32 @testfunction(i64 %i0, i64 %j0) {
- %array0 = malloc [4 x i8] ; <[4 x i8]*> [#uses=2]
- %size = add i32 2, 2 ; <i32> [#uses=1]
- %array1 = malloc i8, i32 4 ; <i8*> [#uses=1]
- %array2 = malloc i8, i32 %size ; <i8*> [#uses=1]
- %idx = getelementptr [4 x i8]* %array0, i64 0, i64 2 ; <i8*> [#uses=1]
- store i8 123, i8* %idx
- free [4 x i8]* %array0
- free i8* %array1
- free i8* %array2
- %aa = alloca %complexty, i32 5 ; <%complexty*> [#uses=1]
- %idx2 = getelementptr %complexty* %aa, i64 %i0, i32 1, i32 0, i64 %j0 ; <i8**> [#uses=1]
- store i8* null, i8** %idx2
- %ptr = alloca i32 ; <i32*> [#uses=2]
- store i32 3, i32* %ptr
- %val = load i32* %ptr ; <i32> [#uses=0]
- %sptr = alloca %struct ; <%struct*> [#uses=1]
- %ubsptr = getelementptr %struct* %sptr, i64 0, i32 1, i32 1 ; <{ i8 }*> [#uses=1]
- %idx3 = getelementptr { i8 }* %ubsptr, i64 0, i32 0 ; <i8*> [#uses=1]
- store i8 4, i8* %idx3
- ret i32 3
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/testtype.ll b/libclamav/c++/llvm/test/Feature/testtype.ll
deleted file mode 100644
index 124aa09..0000000
--- a/libclamav/c++/llvm/test/Feature/testtype.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%X = type i32* addrspace(4)*
-
- %inners = type { float, { i8 } }
- %struct = type { i32, %inners, i64 }
-
-%fwdref = type { %fwd* }
-%fwd = type %fwdref*
-
-; same as above with unnamed types
-type { %1* }
-type %0*
-%test = type %1
-
-%test2 = type [2 x i32]
-;%x = type %undefined*
-
-%test3 = type i32 (i32()*, float(...)*, ...)*
diff --git a/libclamav/c++/llvm/test/Feature/testvarargs.ll b/libclamav/c++/llvm/test/Feature/testvarargs.ll
deleted file mode 100644
index a73b7ec..0000000
--- a/libclamav/c++/llvm/test/Feature/testvarargs.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-declare i32 @printf(i8*, ...) ;; Prototype for: int __builtin_printf(const char*, ...)
-
-define i32 @testvarar() {
- call i32 (i8*, ...)* @printf( i8* null, i32 12, i8 42 ) ; <i32>:1 [#uses=1]
- ret i32 %1
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/undefined.ll b/libclamav/c++/llvm/test/Feature/undefined.ll
deleted file mode 100644
index e63ce41..0000000
--- a/libclamav/c++/llvm/test/Feature/undefined.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at X = global i32 undef ; <i32*> [#uses=0]
-
-declare i32 @atoi(i8*)
-
-define i32 @test() {
- ret i32 undef
-}
-
-define i32 @test2() {
- %X = add i32 undef, 1 ; <i32> [#uses=1]
- ret i32 %X
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/unions.ll b/libclamav/c++/llvm/test/Feature/unions.ll
deleted file mode 100644
index 9d6c36b..0000000
--- a/libclamav/c++/llvm/test/Feature/unions.ll
+++ /dev/null
@@ -1,12 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%union.anon = type union { i8, i32, float }
-
- at union1 = constant union { i32, i8 } { i32 4 }
- at union2 = constant union { i32, i8 } insertvalue(union { i32, i8 } undef, i32 4, 0)
-
-define void @"Unions" () {
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Feature/unreachable.ll b/libclamav/c++/llvm/test/Feature/unreachable.ll
deleted file mode 100644
index 8bffb4c..0000000
--- a/libclamav/c++/llvm/test/Feature/unreachable.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-declare void @bar()
-
-define i32 @foo() {
- unreachable
-}
-
-define double @xyz() {
- call void @bar( )
- unreachable
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/varargs.ll b/libclamav/c++/llvm/test/Feature/varargs.ll
deleted file mode 100644
index b9317df..0000000
--- a/libclamav/c++/llvm/test/Feature/varargs.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; Demonstrate all of the variable argument handling intrinsic functions plus
-; the va_arg instruction.
-
-declare void @llvm.va_start(i8*)
-
-declare void @llvm.va_copy(i8*, i8*)
-
-declare void @llvm.va_end(i8*)
-
-define i32 @test(i32 %X, ...) {
- %ap = alloca i8* ; <i8**> [#uses=4]
- %va.upgrd.1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_start( i8* %va.upgrd.1 )
- %tmp = va_arg i8** %ap, i32 ; <i32> [#uses=1]
- %aq = alloca i8* ; <i8**> [#uses=2]
- %va0.upgrd.2 = bitcast i8** %aq to i8* ; <i8*> [#uses=1]
- %va1.upgrd.3 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_copy( i8* %va0.upgrd.2, i8* %va1.upgrd.3 )
- %va.upgrd.4 = bitcast i8** %aq to i8* ; <i8*> [#uses=1]
- call void @llvm.va_end( i8* %va.upgrd.4 )
- %va.upgrd.5 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_end( i8* %va.upgrd.5 )
- ret i32 %tmp
-}
-
diff --git a/libclamav/c++/llvm/test/Feature/varargs_new.ll b/libclamav/c++/llvm/test/Feature/varargs_new.ll
deleted file mode 100644
index a46f270..0000000
--- a/libclamav/c++/llvm/test/Feature/varargs_new.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; Demonstrate all of the variable argument handling intrinsic functions plus
-; the va_arg instruction.
-
-declare void @llvm.va_start(i8*)
-
-declare void @llvm.va_copy(i8*, i8*)
-
-declare void @llvm.va_end(i8*)
-
-define i32 @test(i32 %X, ...) {
- ; Allocate two va_list items. On this target, va_list is of type sbyte*
- %ap = alloca i8* ; <i8**> [#uses=4]
- %aq = alloca i8* ; <i8**> [#uses=2]
-
- ; Initialize variable argument processing
- %va.upgrd.1 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_start( i8* %va.upgrd.1 )
-
- ; Read a single integer argument
- %tmp = va_arg i8** %ap, i32 ; <i32> [#uses=1]
-
- ; Demonstrate usage of llvm.va_copy and llvm_va_end
- %apv = load i8** %ap ; <i8*> [#uses=1]
- %va0.upgrd.2 = bitcast i8** %aq to i8* ; <i8*> [#uses=1]
- %va1.upgrd.3 = bitcast i8* %apv to i8* ; <i8*> [#uses=1]
- call void @llvm.va_copy( i8* %va0.upgrd.2, i8* %va1.upgrd.3 )
- %va.upgrd.4 = bitcast i8** %aq to i8* ; <i8*> [#uses=1]
- call void @llvm.va_end( i8* %va.upgrd.4 )
-
- ; Stop processing of arguments.
- %va.upgrd.5 = bitcast i8** %ap to i8* ; <i8*> [#uses=1]
- call void @llvm.va_end( i8* %va.upgrd.5 )
- ret i32 %tmp
-}
diff --git a/libclamav/c++/llvm/test/Feature/vector-cast-constant-exprs.ll b/libclamav/c++/llvm/test/Feature/vector-cast-constant-exprs.ll
deleted file mode 100644
index ffdc0f0..0000000
--- a/libclamav/c++/llvm/test/Feature/vector-cast-constant-exprs.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis | not grep {ret.*(}
-
-; All of these constant expressions should fold.
-
-define <2 x float> @ga() {
- ret <2 x float> fptrunc (<2 x double><double 4.3, double 3.2> to <2 x float>)
-}
-define <2 x double> @gb() {
- ret <2 x double> fpext (<2 x float><float 2.0, float 8.0> to <2 x double>)
-}
-define <2 x i64> @gd() {
- ret <2 x i64> zext (<2 x i32><i32 3, i32 4> to <2 x i64>)
-}
-define <2 x i64> @ge() {
- ret <2 x i64> sext (<2 x i32><i32 3, i32 4> to <2 x i64>)
-}
-define <2 x i32> @gf() {
- ret <2 x i32> trunc (<2 x i64><i64 3, i64 4> to <2 x i32>)
-}
-define <2 x i32> @gh() {
- ret <2 x i32> fptoui (<2 x float><float 8.0, float 7.0> to <2 x i32>)
-}
-define <2 x i32> @gi() {
- ret <2 x i32> fptosi (<2 x float><float 8.0, float 7.0> to <2 x i32>)
-}
-define <2 x float> @gj() {
- ret <2 x float> uitofp (<2 x i32><i32 8, i32 7> to <2 x float>)
-}
-define <2 x float> @gk() {
- ret <2 x float> sitofp (<2 x i32><i32 8, i32 7> to <2 x float>)
-}
-define <2 x double> @gl() {
- ret <2 x double> bitcast (<2 x double><double 4.0, double 3.0> to <2 x double>)
-}
-define <2 x double> @gm() {
- ret <2 x double> bitcast (<2 x i64><i64 4, i64 3> to <2 x double>)
-}
diff --git a/libclamav/c++/llvm/test/Feature/weak_constant.ll b/libclamav/c++/llvm/test/Feature/weak_constant.ll
deleted file mode 100644
index 9025aaa..0000000
--- a/libclamav/c++/llvm/test/Feature/weak_constant.ll
+++ /dev/null
@@ -1,38 +0,0 @@
-; RUN: opt < %s -std-compile-opts -S > %t
-; RUN: grep undef %t | count 1
-; RUN: grep 5 %t | count 1
-; RUN: grep 7 %t | count 1
-; RUN: grep 9 %t | count 1
-
- type { i32, i32 } ; type %0
- at a = weak constant i32 undef ; <i32*> [#uses=1]
- at b = weak constant i32 5 ; <i32*> [#uses=1]
- at c = weak constant %0 { i32 7, i32 9 } ; <%0*> [#uses=1]
-
-define i32 @la() {
- %v = load i32* @a ; <i32> [#uses=1]
- ret i32 %v
-}
-
-define i32 @lb() {
- %v = load i32* @b ; <i32> [#uses=1]
- ret i32 %v
-}
-
-define i32 @lc() {
- %g = getelementptr %0* @c, i32 0, i32 0 ; <i32*> [#uses=1]
- %u = load i32* %g ; <i32> [#uses=1]
- %h = getelementptr %0* @c, i32 0, i32 1 ; <i32*> [#uses=1]
- %v = load i32* %h ; <i32> [#uses=1]
- %r = add i32 %u, %v
- ret i32 %r
-}
-
-define i32 @f() {
- %u = call i32 @la() ; <i32> [#uses=1]
- %v = call i32 @lb() ; <i32> [#uses=1]
- %w = call i32 @lc() ; <i32> [#uses=1]
- %r = add i32 %u, %v ; <i32> [#uses=1]
- %s = add i32 %r, %w ; <i32> [#uses=1]
- ret i32 %s
-}
diff --git a/libclamav/c++/llvm/test/Feature/weirdnames.ll b/libclamav/c++/llvm/test/Feature/weirdnames.ll
deleted file mode 100644
index cc773cd..0000000
--- a/libclamav/c++/llvm/test/Feature/weirdnames.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; Test using double quotes to form names that are not legal in the % form
-%"&^ " = type { i32 }
-@"%.*+ foo" = global %"&^ " { i32 5 }
-@"0" = global float 0.000000e+00 ; This CANNOT be %0
-@"\\03foo" = global float 0x3FB99999A0000000 ; Make sure funny char gets round trip
diff --git a/libclamav/c++/llvm/test/Feature/x86ld.ll b/libclamav/c++/llvm/test/Feature/x86ld.ll
deleted file mode 100644
index 32005ae..0000000
--- a/libclamav/c++/llvm/test/Feature/x86ld.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t
-; RUN: llvm-as < %t | llvm-dis > %t2
-; RUN: diff %t %t2
-; ModuleID = '<stdin>'
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "i686-apple-darwin8"
- at ld = external global x86_fp80 ; <x86_fp80*> [#uses=1]
- at d = global double 4.050000e+00, align 8 ; <double*> [#uses=1]
- at f = global float 0x4010333340000000 ; <float*> [#uses=1]
-
-define i32 @foo() {
-entry:
- %retval = alloca i32, align 4 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load float* @f ; <float> [#uses=1]
- %tmp1 = fpext float %tmp to double ; <double> [#uses=1]
- %tmp2 = load double* @d ; <double> [#uses=1]
- %tmp3 = fmul double %tmp1, %tmp2 ; <double> [#uses=1]
- %tmp4 = fpext double %tmp3 to x86_fp80 ; <x86_fp80> [#uses=1]
- store x86_fp80 %tmp4, x86_fp80* @ld
- br label %return
-
-return: ; preds = %entry
- %retval4 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval4
-}
diff --git a/libclamav/c++/llvm/test/Integer/2007-01-19-TruncSext.ll b/libclamav/c++/llvm/test/Integer/2007-01-19-TruncSext.ll
deleted file mode 100644
index 3fee6bc..0000000
--- a/libclamav/c++/llvm/test/Integer/2007-01-19-TruncSext.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-; RUN: llvm-as < %s | lli --force-interpreter=true | grep -- -255
-
- at ARRAY = global [ 20 x i17 ] zeroinitializer
- at FORMAT = constant [ 4 x i8 ] c"%d\0A\00"
-
-declare i32 @printf(i8* %format, ...)
-
-define void @multiply(i32 %index, i32 %X, i32 %Y) {
- %Z = mul i32 %X, %Y
- %P = getelementptr [20 x i17]* @ARRAY, i32 0, i32 %index
- %Result = trunc i32 %Z to i17
- store i17 %Result, i17* %P
- ret void
-}
-
-define i32 @main(i32 %argc, i8** %argv) {
- %i = bitcast i32 0 to i32
- call void @multiply(i32 %i, i32 -1, i32 255)
- %P = getelementptr [20 x i17]* @ARRAY, i32 0, i32 0
- %X = load i17* %P
- %result = sext i17 %X to i32
- %fmt = getelementptr [4 x i8]* @FORMAT, i32 0, i32 0
- call i32 (i8*,...)* @printf(i8* %fmt, i32 %result)
- ret i32 0
-}
-
diff --git a/libclamav/c++/llvm/test/Integer/BitArith.ll b/libclamav/c++/llvm/test/Integer/BitArith.ll
deleted file mode 100644
index 350a984..0000000
--- a/libclamav/c++/llvm/test/Integer/BitArith.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-declare void @"foo"(i31 %i, i63 %j, i10 %k)
-
-
-; foo test basic arith operations
-define void @"foo"(i31 %i, i63 %j, i10 %k)
-begin
- %t1 = trunc i63 %j to i31
- %t2 = add i31 %t1, %i
- %t20 = add i31 3, %t1
- %t3 = zext i31 %i to i63
- %t4 = sub i63 %t3, %j
- %t40 = sub i63 %j, -100
- %t5 = mul i10 %k, 7
- %t6 = sdiv i63 %j, -2
- %t7 = udiv i63 %j, %t3
- %t8 = urem i10 %k, 10
- %t9 = srem i10 %k, -10
- ret void
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/BitBit.ll b/libclamav/c++/llvm/test/Integer/BitBit.ll
deleted file mode 100644
index 420bbe5..0000000
--- a/libclamav/c++/llvm/test/Integer/BitBit.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-declare void @"foo"(i31 %i, i33 %j)
-
-
-; foo test basic bitwise operations
-define void @"foo"(i31 %i, i33 %j)
-begin
- %t1 = trunc i33 %j to i31
- %t2 = and i31 %t1, %i
- %t3 = sext i31 %i to i33
- %t4 = or i33 %t3, %j
- %t5 = xor i31 %t2, 7
- %t6 = shl i31 %i, 2
- %t7 = trunc i31 %i to i8
- %t8 = shl i8 %t7, 3
- %t9 = lshr i33 %j, 31
- %t7z = zext i8 %t7 to i33
- %t10 = ashr i33 %j, %t7z
- ret void
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/BitCast.ll b/libclamav/c++/llvm/test/Integer/BitCast.ll
deleted file mode 100644
index 0bef023..0000000
--- a/libclamav/c++/llvm/test/Integer/BitCast.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-declare void @"foo"(i31 %i, i1280 %j, i1 %k, float %f)
-
-
-; foo test basic arith operations
-define void @"foo"(i31 %i, i1280 %j, i1 %k, float %f)
-begin
- %t1 = trunc i1280 %j to i31
- %t2 = trunc i31 %t1 to i1
-
- %t3 = zext i31 %i to i1280
- %t4 = sext i31 %i to i1280
-
- %t5 = fptoui float 0x400921FA00000000 to i31
- %t6 = uitofp i31 %t5 to double
-
- %t7 = fptosi double 0xC0934A456D5CFAAD to i28
- %t8 = sitofp i8 -1 to double
- %t9 = uitofp i8 255 to double
-
- ret void
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/BitIcmp.ll b/libclamav/c++/llvm/test/Integer/BitIcmp.ll
deleted file mode 100644
index c224612..0000000
--- a/libclamav/c++/llvm/test/Integer/BitIcmp.ll
+++ /dev/null
@@ -1,43 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-define i55 @"simpleIcmp"(i55 %i0, i55 %j0)
-begin
- %t1 = icmp eq i55 %i0, %j0
- %t2 = icmp ne i55 %i0, %j0
- %t3 = icmp ult i55 %i0, %j0
- %t4 = icmp sgt i55 %i0, %j0
- %t5 = icmp ule i55 %i0, %j0
- %t6 = icmp sge i55 %i0, %j0
-
- %t7 = icmp eq i55 %i0, 1098765432
- %t8 = icmp ne i55 %i0, -31415926
-
- %t9 = icmp ult i55 10000, %j0
- %t10 = icmp sgt i55 -10000, %j0
-
- ret i55 %i0
-end
-
-define i31 @"phitest"(i12 %i)
-begin
-
-HasArg:
- %n1 = add i12 1, %i
- br label %Continue
-
-Continue:
- %n = phi i12 [%n1, %HasArg], [%next, %Continue]
- %next = add i12 1, %n
- br label %Continue
-end
-
-define i18 @"select"(i18 %i)
-begin
- %t = icmp sgt i18 %i, 100
- %k = select i1 %t, i18 %i, i18 999
- ret i18 %k
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/BitMem.ll b/libclamav/c++/llvm/test/Integer/BitMem.ll
deleted file mode 100644
index 2c093bc..0000000
--- a/libclamav/c++/llvm/test/Integer/BitMem.ll
+++ /dev/null
@@ -1,29 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-declare void @"foo"()
-
-
-; foo test basic arith operations
-define void @"foo"() {
- %t1 = malloc i31, i32 4
- %t2 = malloc i31, i32 7, align 1024
- %t3 = malloc [4 x i15]
-
- %idx = getelementptr [4 x i15]* %t3, i64 0, i64 2
- store i15 -123, i15* %idx
-
- free [4 x i15]* %t3
- free i31* %t2
- free i31* %t1
-
- %t4 = alloca i12, i32 100
- free i12* %t4
-
- %t5 = alloca i31
- store i31 -123, i31* %t5
-
- free i31* %t5
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Integer/BitMisc.ll b/libclamav/c++/llvm/test/Integer/BitMisc.ll
deleted file mode 100644
index 8ce4d4a..0000000
--- a/libclamav/c++/llvm/test/Integer/BitMisc.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
- at MyVar = external global i19
- at MyIntList = external global { i39 *, i19 }
- external global i19 ; i19*:0
-
- at AConst = constant i19 -123
-
- at AString = constant [4 x i8] c"test"
-
- at ZeroInit = global { [100 x i19 ], [40 x float ] } { [100 x i19] zeroinitializer,
- [40 x float] zeroinitializer }
-
-
-define i19 @"foo"(i19 %blah)
-begin
- store i19 5, i19* @MyVar
- %idx = getelementptr { i39 *, i19 } * @MyIntList, i64 0, i32 1
- store i19 12, i19* %idx
- ret i19 %blah
-end
diff --git a/libclamav/c++/llvm/test/Integer/BitPacked.ll b/libclamav/c++/llvm/test/Integer/BitPacked.ll
deleted file mode 100644
index e6e453a..0000000
--- a/libclamav/c++/llvm/test/Integer/BitPacked.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at foo1 = external global <4 x float>
- at foo2 = external global <2 x i10>
-
-
-define void @main()
-{
- store <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x float>* @foo1
- store <2 x i10> <i10 4, i10 4>, <2 x i10>* @foo2
- %l1 = load <4 x float>* @foo1
- %l2 = load <2 x i10>* @foo2
- %r1 = extractelement <2 x i10> %l2, i32 1
- %r2 = extractelement <2 x i10> %l2, i32 0
- %t = mul i10 %r1, %r2
- %r3 = insertelement <2 x i10> %l2, i10 %t, i32 0
- store <2 x i10> %r3, <2 x i10>* @foo2
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Integer/a15.ll b/libclamav/c++/llvm/test/Integer/a15.ll
deleted file mode 100644
index 5c9dc3b..0000000
--- a/libclamav/c++/llvm/test/Integer/a15.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t.ll
-; RUN: diff %t.ll %s.out
-
-; test 15 bits
-;
- at b = constant i15 add(i15 32767, i15 1)
- at c = constant i15 add(i15 32767, i15 32767)
- at d = constant i15 add(i15 32760, i15 8)
- at e = constant i15 sub(i15 0 , i15 1)
- at f = constant i15 sub(i15 0 , i15 32767)
- at g = constant i15 sub(i15 2 , i15 32767)
-
- at h = constant i15 shl(i15 1 , i15 15)
- at i = constant i15 shl(i15 1 , i15 14)
- at j = constant i15 lshr(i15 32767 , i15 14)
- at l = constant i15 ashr(i15 32767 , i15 14)
-
- at n = constant i15 mul(i15 32767, i15 2)
- at q = constant i15 mul(i15 -16383,i15 -3)
- at r = constant i15 sdiv(i15 -1, i15 16383)
- at s = constant i15 udiv(i15 -1, i15 16383)
- at t = constant i15 srem(i15 1, i15 32766)
- at u = constant i15 urem(i15 32767,i15 -1)
- at o = constant i15 trunc( i16 32768 to i15 )
- at p = constant i15 trunc( i16 32767 to i15 )
- at v = constant i15 srem(i15 -1, i15 768)
-
diff --git a/libclamav/c++/llvm/test/Integer/a15.ll.out b/libclamav/c++/llvm/test/Integer/a15.ll.out
deleted file mode 100644
index 5195cdf..0000000
--- a/libclamav/c++/llvm/test/Integer/a15.ll.out
+++ /dev/null
@@ -1,21 +0,0 @@
-; ModuleID = '<stdin>'
-
- at b = constant i15 0 ; <i15*> [#uses=0]
- at c = constant i15 -2 ; <i15*> [#uses=0]
- at d = constant i15 0 ; <i15*> [#uses=0]
- at e = constant i15 -1 ; <i15*> [#uses=0]
- at f = constant i15 1 ; <i15*> [#uses=0]
- at g = constant i15 3 ; <i15*> [#uses=0]
- at h = constant i15 undef ; <i15*> [#uses=0]
- at i = constant i15 -16384 ; <i15*> [#uses=0]
- at j = constant i15 1 ; <i15*> [#uses=0]
- at l = constant i15 -1 ; <i15*> [#uses=0]
- at n = constant i15 -2 ; <i15*> [#uses=0]
- at q = constant i15 16381 ; <i15*> [#uses=0]
- at r = constant i15 0 ; <i15*> [#uses=0]
- at s = constant i15 2 ; <i15*> [#uses=0]
- at t = constant i15 1 ; <i15*> [#uses=0]
- at u = constant i15 0 ; <i15*> [#uses=0]
- at o = constant i15 0 ; <i15*> [#uses=0]
- at p = constant i15 -1 ; <i15*> [#uses=0]
- at v = constant i15 -1 ; <i15*> [#uses=0]
diff --git a/libclamav/c++/llvm/test/Integer/a17.ll b/libclamav/c++/llvm/test/Integer/a17.ll
deleted file mode 100644
index db03e7c..0000000
--- a/libclamav/c++/llvm/test/Integer/a17.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t.ll
-; RUN: diff %t.ll %s.out
-
-; test 17 bits
-;
- at b = constant i17 add(i17 131071, i17 1)
- at c = constant i17 add(i17 131071, i17 131071)
- at d = constant i17 add(i17 131064, i17 8)
- at e = constant i17 sub(i17 0 , i17 1)
- at f = constant i17 sub(i17 0 , i17 131071)
- at g = constant i17 sub(i17 2 , i17 131071)
-
- at h = constant i17 shl(i17 1 , i17 17)
- at i = constant i17 shl(i17 1 , i17 16)
- at j = constant i17 lshr(i17 131071 , i17 16)
- at l = constant i17 ashr(i17 131071 , i17 16)
-
- at n = constant i17 mul(i17 131071, i17 2)
- at q = constant i17 sdiv(i17 -1, i17 65535)
- at r = constant i17 udiv(i17 -1, i17 65535)
- at s = constant i17 srem(i17 1, i17 131070)
- at t = constant i17 urem(i17 131071,i17 -1)
- at o = constant i17 trunc( i18 131072 to i17 )
- at p = constant i17 trunc( i18 131071 to i17 )
- at v = constant i17 srem(i17 -1, i17 15)
diff --git a/libclamav/c++/llvm/test/Integer/a17.ll.out b/libclamav/c++/llvm/test/Integer/a17.ll.out
deleted file mode 100644
index ba66412..0000000
--- a/libclamav/c++/llvm/test/Integer/a17.ll.out
+++ /dev/null
@@ -1,20 +0,0 @@
-; ModuleID = '<stdin>'
-
- at b = constant i17 0 ; <i17*> [#uses=0]
- at c = constant i17 -2 ; <i17*> [#uses=0]
- at d = constant i17 0 ; <i17*> [#uses=0]
- at e = constant i17 -1 ; <i17*> [#uses=0]
- at f = constant i17 1 ; <i17*> [#uses=0]
- at g = constant i17 3 ; <i17*> [#uses=0]
- at h = constant i17 undef ; <i17*> [#uses=0]
- at i = constant i17 -65536 ; <i17*> [#uses=0]
- at j = constant i17 1 ; <i17*> [#uses=0]
- at l = constant i17 -1 ; <i17*> [#uses=0]
- at n = constant i17 -2 ; <i17*> [#uses=0]
- at q = constant i17 0 ; <i17*> [#uses=0]
- at r = constant i17 2 ; <i17*> [#uses=0]
- at s = constant i17 1 ; <i17*> [#uses=0]
- at t = constant i17 0 ; <i17*> [#uses=0]
- at o = constant i17 0 ; <i17*> [#uses=0]
- at p = constant i17 -1 ; <i17*> [#uses=0]
- at v = constant i17 -1 ; <i17*> [#uses=0]
diff --git a/libclamav/c++/llvm/test/Integer/a31.ll b/libclamav/c++/llvm/test/Integer/a31.ll
deleted file mode 100644
index c0c571f..0000000
--- a/libclamav/c++/llvm/test/Integer/a31.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t.ll
-; RUN: diff %t.ll %s.out
-
-; test 31 bits
-;
- at b = constant i31 add(i31 2147483647, i31 1)
- at c = constant i31 add(i31 2147483647, i31 2147483647)
- at d = constant i31 add(i31 2147483640, i31 8)
- at e = constant i31 sub(i31 0 , i31 1)
- at f = constant i31 sub(i31 0 , i31 2147483647)
- at g = constant i31 sub(i31 2 , i31 2147483647)
-
- at h = constant i31 shl(i31 1 , i31 31)
- at i = constant i31 shl(i31 1 , i31 30)
- at j = constant i31 lshr(i31 2147483647 , i31 30)
- at l = constant i31 ashr(i31 2147483647 , i31 30)
-
- at n = constant i31 mul(i31 2147483647, i31 2)
- at q = constant i31 sdiv(i31 -1, i31 1073741823)
- at r = constant i31 udiv(i31 -1, i31 1073741823)
- at s = constant i31 srem(i31 1, i31 2147483646)
- at t = constant i31 urem(i31 2147483647,i31 -1)
- at o = constant i31 trunc( i32 2147483648 to i31 )
- at p = constant i31 trunc( i32 2147483647 to i31 )
- at u = constant i31 srem(i31 -3, i31 17)
diff --git a/libclamav/c++/llvm/test/Integer/a31.ll.out b/libclamav/c++/llvm/test/Integer/a31.ll.out
deleted file mode 100644
index 7407a74..0000000
--- a/libclamav/c++/llvm/test/Integer/a31.ll.out
+++ /dev/null
@@ -1,20 +0,0 @@
-; ModuleID = '<stdin>'
-
- at b = constant i31 0 ; <i31*> [#uses=0]
- at c = constant i31 -2 ; <i31*> [#uses=0]
- at d = constant i31 0 ; <i31*> [#uses=0]
- at e = constant i31 -1 ; <i31*> [#uses=0]
- at f = constant i31 1 ; <i31*> [#uses=0]
- at g = constant i31 3 ; <i31*> [#uses=0]
- at h = constant i31 undef ; <i31*> [#uses=0]
- at i = constant i31 -1073741824 ; <i31*> [#uses=0]
- at j = constant i31 1 ; <i31*> [#uses=0]
- at l = constant i31 -1 ; <i31*> [#uses=0]
- at n = constant i31 -2 ; <i31*> [#uses=0]
- at q = constant i31 0 ; <i31*> [#uses=0]
- at r = constant i31 2 ; <i31*> [#uses=0]
- at s = constant i31 1 ; <i31*> [#uses=0]
- at t = constant i31 0 ; <i31*> [#uses=0]
- at o = constant i31 0 ; <i31*> [#uses=0]
- at p = constant i31 -1 ; <i31*> [#uses=0]
- at u = constant i31 -3 ; <i31*> [#uses=0]
diff --git a/libclamav/c++/llvm/test/Integer/a33.ll b/libclamav/c++/llvm/test/Integer/a33.ll
deleted file mode 100644
index f328907..0000000
--- a/libclamav/c++/llvm/test/Integer/a33.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t.ll
-; RUN: diff %t.ll %s.out
-
-; test 33 bits
-;
- at b = constant i33 add(i33 8589934591, i33 1)
- at c = constant i33 add(i33 8589934591, i33 8589934591)
- at d = constant i33 add(i33 8589934584, i33 8)
- at e = constant i33 sub(i33 0 , i33 1)
- at f = constant i33 sub(i33 0 , i33 8589934591)
- at g = constant i33 sub(i33 2 , i33 8589934591)
-
- at h = constant i33 shl(i33 1 , i33 33)
- at i = constant i33 shl(i33 1 , i33 32)
- at j = constant i33 lshr(i33 8589934591 , i33 32)
- at l = constant i33 ashr(i33 8589934591 , i33 32)
-
- at n = constant i33 mul(i33 8589934591, i33 2)
- at q = constant i33 sdiv(i33 -1, i33 4294967295)
- at r = constant i33 udiv(i33 -1, i33 4294967295)
- at s = constant i33 srem(i33 1, i33 8589934590)
- at t = constant i33 urem(i33 8589934591,i33 -1)
- at o = constant i33 trunc( i34 8589934592 to i33 )
- at p = constant i33 trunc( i34 8589934591 to i33 )
- at u = constant i33 srem(i33 -1, i33 17)
-
diff --git a/libclamav/c++/llvm/test/Integer/a33.ll.out b/libclamav/c++/llvm/test/Integer/a33.ll.out
deleted file mode 100644
index 6cd61ee..0000000
--- a/libclamav/c++/llvm/test/Integer/a33.ll.out
+++ /dev/null
@@ -1,20 +0,0 @@
-; ModuleID = '<stdin>'
-
- at b = constant i33 0 ; <i33*> [#uses=0]
- at c = constant i33 -2 ; <i33*> [#uses=0]
- at d = constant i33 0 ; <i33*> [#uses=0]
- at e = constant i33 -1 ; <i33*> [#uses=0]
- at f = constant i33 1 ; <i33*> [#uses=0]
- at g = constant i33 3 ; <i33*> [#uses=0]
- at h = constant i33 undef ; <i33*> [#uses=0]
- at i = constant i33 -4294967296 ; <i33*> [#uses=0]
- at j = constant i33 1 ; <i33*> [#uses=0]
- at l = constant i33 -1 ; <i33*> [#uses=0]
- at n = constant i33 -2 ; <i33*> [#uses=0]
- at q = constant i33 0 ; <i33*> [#uses=0]
- at r = constant i33 2 ; <i33*> [#uses=0]
- at s = constant i33 1 ; <i33*> [#uses=0]
- at t = constant i33 0 ; <i33*> [#uses=0]
- at o = constant i33 0 ; <i33*> [#uses=0]
- at p = constant i33 -1 ; <i33*> [#uses=0]
- at u = constant i33 -1 ; <i33*> [#uses=0]
diff --git a/libclamav/c++/llvm/test/Integer/a63.ll b/libclamav/c++/llvm/test/Integer/a63.ll
deleted file mode 100644
index 052ecd5..0000000
--- a/libclamav/c++/llvm/test/Integer/a63.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t.ll
-; RUN: diff %t.ll %s.out
-
-; test 63 bits
-;
- at b = constant i63 add(i63 9223372036854775807, i63 1)
- at c = constant i63 add(i63 9223372036854775807, i63 9223372036854775807)
- at d = constant i63 add(i63 9223372036854775800, i63 8)
- at e = constant i63 sub(i63 0 , i63 1)
- at f = constant i63 sub(i63 0 , i63 9223372036854775807)
- at g = constant i63 sub(i63 2 , i63 9223372036854775807)
-
- at h = constant i63 shl(i63 1 , i63 63)
- at i = constant i63 shl(i63 1 , i63 62)
- at j = constant i63 lshr(i63 9223372036854775807 , i63 62)
- at l = constant i63 ashr(i63 9223372036854775807 , i63 62)
-
- at n = constant i63 mul(i63 9223372036854775807, i63 2)
- at q = constant i63 sdiv(i63 -1, i63 4611686018427387903)
- at u = constant i63 sdiv(i63 -1, i63 1)
- at r = constant i63 udiv(i63 -1, i63 4611686018427387903)
- at s = constant i63 srem(i63 3, i63 9223372036854775806)
- at t = constant i63 urem(i63 9223372036854775807,i63 -1)
- at o = constant i63 trunc( i64 9223372036854775808 to i63 )
- at p = constant i63 trunc( i64 9223372036854775807 to i63 )
diff --git a/libclamav/c++/llvm/test/Integer/a63.ll.out b/libclamav/c++/llvm/test/Integer/a63.ll.out
deleted file mode 100644
index 18dff5a..0000000
--- a/libclamav/c++/llvm/test/Integer/a63.ll.out
+++ /dev/null
@@ -1,20 +0,0 @@
-; ModuleID = '<stdin>'
-
- at b = constant i63 0 ; <i63*> [#uses=0]
- at c = constant i63 -2 ; <i63*> [#uses=0]
- at d = constant i63 0 ; <i63*> [#uses=0]
- at e = constant i63 -1 ; <i63*> [#uses=0]
- at f = constant i63 1 ; <i63*> [#uses=0]
- at g = constant i63 3 ; <i63*> [#uses=0]
- at h = constant i63 undef ; <i63*> [#uses=0]
- at i = constant i63 -4611686018427387904 ; <i63*> [#uses=0]
- at j = constant i63 1 ; <i63*> [#uses=0]
- at l = constant i63 -1 ; <i63*> [#uses=0]
- at n = constant i63 -2 ; <i63*> [#uses=0]
- at q = constant i63 0 ; <i63*> [#uses=0]
- at u = constant i63 -1 ; <i63*> [#uses=0]
- at r = constant i63 2 ; <i63*> [#uses=0]
- at s = constant i63 1 ; <i63*> [#uses=0]
- at t = constant i63 0 ; <i63*> [#uses=0]
- at o = constant i63 0 ; <i63*> [#uses=0]
- at p = constant i63 -1 ; <i63*> [#uses=0]
diff --git a/libclamav/c++/llvm/test/Integer/a7.ll b/libclamav/c++/llvm/test/Integer/a7.ll
deleted file mode 100644
index 1edb35f..0000000
--- a/libclamav/c++/llvm/test/Integer/a7.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t.ll
-; RUN: diff %t.ll %s.out
-
-; test 7 bits
-;
- at b = constant i7 add(i7 127, i7 1)
- at q = constant i7 add(i7 -64, i7 -1)
- at c = constant i7 add(i7 127, i7 127)
- at d = constant i7 add(i7 120, i7 8)
- at e = constant i7 sub(i7 0 , i7 1)
- at f = constant i7 sub(i7 0 , i7 127)
- at g = constant i7 sub(i7 2 , i7 127)
- at r = constant i7 sub(i7 -3, i7 120)
- at s = constant i7 sub(i7 -3, i7 -8)
-
- at h = constant i7 shl(i7 1 , i7 7)
- at i = constant i7 shl(i7 1 , i7 6)
- at j = constant i7 lshr(i7 127 , i7 6)
- at l = constant i7 ashr(i7 127 , i7 6)
- at m2= constant i7 ashr(i7 -1 , i7 3)
-
- at n = constant i7 mul(i7 127, i7 2)
- at t = constant i7 mul(i7 -63, i7 -2)
- at u = constant i7 mul(i7 -32, i7 2)
- at v = constant i7 sdiv(i7 -1, i7 63)
- at w = constant i7 udiv(i7 -1, i7 63)
- at x = constant i7 srem(i7 1 , i7 126)
- at y = constant i7 urem(i7 127, i7 -1)
- at o = constant i7 trunc( i8 128 to i7 )
- at p = constant i7 trunc( i8 255 to i7 )
-
diff --git a/libclamav/c++/llvm/test/Integer/a7.ll.out b/libclamav/c++/llvm/test/Integer/a7.ll.out
deleted file mode 100644
index 250925d..0000000
--- a/libclamav/c++/llvm/test/Integer/a7.ll.out
+++ /dev/null
@@ -1,25 +0,0 @@
-; ModuleID = '<stdin>'
-
- at b = constant i7 0 ; <i7*> [#uses=0]
- at q = constant i7 63 ; <i7*> [#uses=0]
- at c = constant i7 -2 ; <i7*> [#uses=0]
- at d = constant i7 0 ; <i7*> [#uses=0]
- at e = constant i7 -1 ; <i7*> [#uses=0]
- at f = constant i7 1 ; <i7*> [#uses=0]
- at g = constant i7 3 ; <i7*> [#uses=0]
- at r = constant i7 5 ; <i7*> [#uses=0]
- at s = constant i7 5 ; <i7*> [#uses=0]
- at h = constant i7 undef ; <i7*> [#uses=0]
- at i = constant i7 -64 ; <i7*> [#uses=0]
- at j = constant i7 1 ; <i7*> [#uses=0]
- at l = constant i7 -1 ; <i7*> [#uses=0]
- at m2 = constant i7 -1 ; <i7*> [#uses=0]
- at n = constant i7 -2 ; <i7*> [#uses=0]
- at t = constant i7 -2 ; <i7*> [#uses=0]
- at u = constant i7 -64 ; <i7*> [#uses=0]
- at v = constant i7 0 ; <i7*> [#uses=0]
- at w = constant i7 2 ; <i7*> [#uses=0]
- at x = constant i7 1 ; <i7*> [#uses=0]
- at y = constant i7 0 ; <i7*> [#uses=0]
- at o = constant i7 0 ; <i7*> [#uses=0]
- at p = constant i7 -1 ; <i7*> [#uses=0]
diff --git a/libclamav/c++/llvm/test/Integer/a9.ll b/libclamav/c++/llvm/test/Integer/a9.ll
deleted file mode 100644
index 711ec82..0000000
--- a/libclamav/c++/llvm/test/Integer/a9.ll
+++ /dev/null
@@ -1,25 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t.ll
-; RUN: diff %t.ll %s.out
-
-; test 9 bits
-;
- at b = constant i9 add(i9 511, i9 1)
- at c = constant i9 add(i9 511, i9 511)
- at d = constant i9 add(i9 504, i9 8)
- at e = constant i9 sub(i9 0 , i9 1)
- at f = constant i9 sub(i9 0 , i9 511)
- at g = constant i9 sub(i9 2 , i9 511)
-
- at h = constant i9 shl(i9 1 , i9 9)
- at i = constant i9 shl(i9 1 , i9 8)
- at j = constant i9 lshr(i9 511 , i9 8)
- at l = constant i9 ashr(i9 511 , i9 8)
-
- at n = constant i9 mul(i9 511, i9 2)
- at q = constant i9 sdiv(i9 511, i9 2)
- at r = constant i9 udiv(i9 511, i9 2)
- at s = constant i9 urem(i9 511, i9 -1)
- at t = constant i9 srem(i9 1, i9 510)
- at o = constant i9 trunc( i10 512 to i9 )
- at p = constant i9 trunc( i10 511 to i9 )
-
diff --git a/libclamav/c++/llvm/test/Integer/a9.ll.out b/libclamav/c++/llvm/test/Integer/a9.ll.out
deleted file mode 100644
index 6e38062..0000000
--- a/libclamav/c++/llvm/test/Integer/a9.ll.out
+++ /dev/null
@@ -1,19 +0,0 @@
-; ModuleID = '<stdin>'
-
- at b = constant i9 0 ; <i9*> [#uses=0]
- at c = constant i9 -2 ; <i9*> [#uses=0]
- at d = constant i9 0 ; <i9*> [#uses=0]
- at e = constant i9 -1 ; <i9*> [#uses=0]
- at f = constant i9 1 ; <i9*> [#uses=0]
- at g = constant i9 3 ; <i9*> [#uses=0]
- at h = constant i9 undef ; <i9*> [#uses=0]
- at i = constant i9 -256 ; <i9*> [#uses=0]
- at j = constant i9 1 ; <i9*> [#uses=0]
- at l = constant i9 -1 ; <i9*> [#uses=0]
- at n = constant i9 -2 ; <i9*> [#uses=0]
- at q = constant i9 0 ; <i9*> [#uses=0]
- at r = constant i9 255 ; <i9*> [#uses=0]
- at s = constant i9 0 ; <i9*> [#uses=0]
- at t = constant i9 1 ; <i9*> [#uses=0]
- at o = constant i9 0 ; <i9*> [#uses=0]
- at p = constant i9 -1 ; <i9*> [#uses=0]
diff --git a/libclamav/c++/llvm/test/Integer/alignment_bt.ll b/libclamav/c++/llvm/test/Integer/alignment_bt.ll
deleted file mode 100644
index 3a9d051..0000000
--- a/libclamav/c++/llvm/test/Integer/alignment_bt.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at X = global i19 4, align 16
-
-define i19 *@test() align 32 {
- %X = alloca i19, align 4
- %Y = alloca i51, i32 42, align 16
- %Z = alloca i32, align 1
- ret i19 *%X
-}
-
-define i19 *@test2() {
- %X = malloc i19, align 4
- %Y = malloc i51, i32 42, align 16
- %Z = malloc i32, align 1
- ret i19 *%X
-}
-
-
diff --git a/libclamav/c++/llvm/test/Integer/basictest_bt.ll b/libclamav/c++/llvm/test/Integer/basictest_bt.ll
deleted file mode 100644
index 5c98856..0000000
--- a/libclamav/c++/llvm/test/Integer/basictest_bt.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-; Test "stripped" format where nothing is symbolic... this is how the bytecode
-; format looks anyways (except for negative vs positive offsets)...
-;
-define void @void(i39, i39) {
- add i39 0, 0 ; <i39>:3 [#uses=2]
- sub i39 0, 4 ; <i39>:4 [#uses=2]
- br label %5
-
-; <label>:5 ; preds = %5, %2
- add i39 %0, %1 ; <i39>:6 [#uses=2]
- sub i39 %6, %4 ; <i39>:7 [#uses=1]
- icmp sle i39 %7, %3 ; <i1>:8 [#uses=1]
- br i1 %8, label %9, label %5
-
-; <label>:9 ; preds = %5
- add i39 %0, %1 ; <i39>:10 [#uses=0]
- sub i39 %6, %4 ; <i39>:11 [#uses=1]
- icmp sle i39 %11, %3 ; <i1>:12 [#uses=0]
- ret void
-}
-
-; This function always returns zero
-define i39 @zarro() {
-Startup:
- ret i39 0
-}
diff --git a/libclamav/c++/llvm/test/Integer/cfgstructures_bt.ll b/libclamav/c++/llvm/test/Integer/cfgstructures_bt.ll
deleted file mode 100644
index 09aec1f..0000000
--- a/libclamav/c++/llvm/test/Integer/cfgstructures_bt.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-;; This is an irreducible flow graph
-
-
-define void @"irreducible"(i1 %cond)
-begin
- br i1 %cond, label %X, label %Y
-
-X:
- br label %Y
-Y:
- br label %X
-end
-
-;; This is a pair of loops that share the same header
-
-define void @"sharedheader"(i1 %cond)
-begin
- br label %A
-A:
- br i1 %cond, label %X, label %Y
-
-X:
- br label %A
-Y:
- br label %A
-end
-
-;; This is a simple nested loop
-define void @"nested"(i1 %cond1, i1 %cond2, i1 %cond3)
-begin
- br label %Loop1
-
-Loop1:
- br label %Loop2
-
-Loop2:
- br label %Loop3
-
-Loop3:
- br i1 %cond3, label %Loop3, label %L3Exit
-
-L3Exit:
- br i1 %cond2, label %Loop2, label %L2Exit
-
-L2Exit:
- br i1 %cond1, label %Loop1, label %L1Exit
-
-L1Exit:
- ret void
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/constexpr_bt.ll b/libclamav/c++/llvm/test/Integer/constexpr_bt.ll
deleted file mode 100644
index fc8b06d..0000000
--- a/libclamav/c++/llvm/test/Integer/constexpr_bt.ll
+++ /dev/null
@@ -1,84 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-; This testcase is for testing expressions constructed from
-; constant values, including constant pointers to globals.
-;
-
-;;-------------------------------
-;; Test constant cast expressions
-;;-------------------------------
-
-global i63 u0x00001 ; hexadecimal unsigned integer constants
-global i63 s0x012312 ; hexadecimal signed integer constants
-
- at t2 = global i33* @t1 ;; Forward reference without cast
- at t3 = global i33* bitcast (i33* @t1 to i33*) ;; Forward reference with cast
- at t1 = global i33 4 ;; i32* @0
- at t4 = global i33** bitcast (i33** @t3 to i33**) ;; Cast of a previous cast
- at t5 = global i33** @t3 ;; Reference to a previous cast
- at t6 = global i33*** @t4
- at t7 = global float* inttoptr (i32 12345678 to float*) ;; Cast ordinary value to ptr
- at t9 = global i33 fptosi (float sitofp (i33 8 to float) to i33) ;; Nested cast expression
-
-
-global i32* bitcast (float* @4 to i32*) ;; Forward numeric reference
-global float* @4 ;; Duplicate forward numeric reference
-global float 0.0
-
-
-;;---------------------------------------------------
-;; Test constant getelementpr expressions for arrays
-;;---------------------------------------------------
-
- at array = constant [2 x i33] [ i33 12, i33 52 ]
- at arrayPtr = global i33* getelementptr ([2 x i33]* @array, i64 0, i64 0) ;; i33* &@array[0][0]
- at arrayPtr5 = global i33** getelementptr (i33** @arrayPtr, i64 5) ;; i33* &@arrayPtr[5]
-
- at somestr = constant [11x i8] c"hello world"
- at char5 = global i8* getelementptr([11x i8]* @somestr, i64 0, i64 5)
-
-;; cast of getelementptr
- at char8a = global i33* bitcast (i8* getelementptr([11x i8]* @somestr, i64 0, i64 8) to i33*)
-
-;; getelementptr containing casts
- at char8b = global i8* getelementptr([11x i8]* @somestr, i64 sext (i8 0 to i64), i64 sext (i8 8 to i64))
-
-;;-------------------------------------------------------
-;; TODO: Test constant getelementpr expressions for structures
-;;-------------------------------------------------------
-
-%SType = type { i33 , {float, {i8} }, i64 } ;; struct containing struct
-%SAType = type { i33 , {[2x float], i64} } ;; struct containing array
-
- at S1 = global %SType* null ;; Global initialized to NULL
- at S2c = constant %SType { i33 1, {float,{i8}} {float 2.0, {i8} {i8 3}}, i64 4}
-
- at S3c = constant %SAType { i33 1, {[2x float], i64} {[2x float] [float 2.0, float 3.0], i64 4} }
-
- at S1ptr = global %SType** @S1 ;; Ref. to global S1
- at S2 = global %SType* @S2c ;; Ref. to constant S2
- at S3 = global %SAType* @S3c ;; Ref. to constant S3
-
- ;; Pointer to float (**@S1).1.0
- at S1fld1a = global float* getelementptr (%SType* @S2c, i64 0, i32 1, i32 0)
- ;; Another ptr to the same!
- at S1fld1b = global float* getelementptr (%SType* @S2c, i64 0, i32 1, i32 0)
-
- at S1fld1bptr = global float** @S1fld1b ;; Ref. to previous pointer
-
- ;; Pointer to i8 (**@S2).1.1.0
- at S2fld3 = global i8* getelementptr (%SType* @S2c, i64 0, i32 1, i32 1, i32 0)
-
- ;; Pointer to float (**@S2).1.0[0]
-;@S3fld3 = global float* getelementptr (%SAType** @S3, i64 0, i64 0, i32 1, i32 0, i64 0)
-
-;;---------------------------------------------------------
-;; TODO: Test constant expressions for unary and binary operators
-;;---------------------------------------------------------
-
-;;---------------------------------------------------
-
-
diff --git a/libclamav/c++/llvm/test/Integer/constpointer_bt.ll b/libclamav/c++/llvm/test/Integer/constpointer_bt.ll
deleted file mode 100644
index 6be9ec3..0000000
--- a/libclamav/c++/llvm/test/Integer/constpointer_bt.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; This testcase is primarily used for testing that global values can be used as
-; constant pointer initializers. This is tricky because they can be forward
-; declared and involves an icky bytecode encoding. There is no meaningful
-; optimization that can be performed on this file, it is just here to test
-; assembly and disassembly.
-;
-
-
- at t3 = global i40 * @t1 ;; Forward reference
- at t1 = global i40 4
- at t4 = global i40 ** @t3 ;; reference to reference
-
- at t2 = global i40 * @t1
-
-global float * @2 ;; Forward numeric reference
-global float * @2 ;; Duplicate forward numeric reference
-global float 0.0
-global float * @2 ;; Numeric reference
-
-
- at fptr = global void() * @f ;; Forward ref method defn
-declare void @"f"() ;; External method
-
- at sptr1 = global [11x i8]* @somestr ;; Forward ref to a constant
- at somestr = constant [11x i8] c"hello world"
- at sptr2 = global [11x i8]* @somestr
-
-
diff --git a/libclamav/c++/llvm/test/Integer/dg.exp b/libclamav/c++/llvm/test/Integer/dg.exp
deleted file mode 100644
index f200589..0000000
--- a/libclamav/c++/llvm/test/Integer/dg.exp
+++ /dev/null
@@ -1,3 +0,0 @@
-load_lib llvm.exp
-
-RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/libclamav/c++/llvm/test/Integer/fold-fpcast_bt.ll b/libclamav/c++/llvm/test/Integer/fold-fpcast_bt.ll
deleted file mode 100644
index 8e5f838..0000000
--- a/libclamav/c++/llvm/test/Integer/fold-fpcast_bt.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis | not grep bitcast
-
-define i60 @test1() {
- ret i60 fptoui(float 0x400D9999A0000000 to i60)
-}
-
-define float @test2() {
- ret float uitofp(i60 17 to float)
-}
-
-define i64 @test3() {
- ret i64 bitcast (double 0x400921FB4D12D84A to i64)
-}
-
-define double @test4() {
- ret double bitcast (i64 42 to double)
-}
-
-define i30 @test5() {
- ret i30 fptoui(float 0x400D9999A0000000 to i30)
-}
-
-define float @test6() {
- ret float uitofp(i30 17 to float)
-}
-
-define i64 @test7() {
- ret i64 bitcast (double 0x400921FB4D12D84A to i64)
-}
-
-define double @test8() {
- ret double bitcast (i64 42 to double)
-}
diff --git a/libclamav/c++/llvm/test/Integer/forwardreftest_bt.ll b/libclamav/c++/llvm/test/Integer/forwardreftest_bt.ll
deleted file mode 100644
index 5d73eff..0000000
--- a/libclamav/c++/llvm/test/Integer/forwardreftest_bt.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- %myty = type i55
- %myfn = type float (i55,double,i55,i16)
- type i55(%myfn*)
- type i55(i55)
- type i55(i55(i55)*)
-
- %thisfuncty = type i55 (i55) *
-
-declare void @F(%thisfuncty, %thisfuncty, %thisfuncty)
-
-; This function always returns zero
-define i55 @zarro(i55 %Func)
-begin
-Startup:
- add i55 0, 10
- ret i55 0
-end
-
-define i55 @test(i55)
-begin
- call void @F(%thisfuncty @zarro, %thisfuncty @test, %thisfuncty @foozball)
- ret i55 0
-end
-
-define i55 @foozball(i55)
-begin
- ret i55 0
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/globalredefinition_bt.ll b/libclamav/c++/llvm/test/Integer/globalredefinition_bt.ll
deleted file mode 100644
index b369b2a..0000000
--- a/libclamav/c++/llvm/test/Integer/globalredefinition_bt.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; Test forward references and redefinitions of globals
-
- at A = global i17* @B
- at B = global i17 7
-
-declare void @X()
-
-declare void @X()
-
-define void @X() {
- ret void
-}
-
-declare void @X()
diff --git a/libclamav/c++/llvm/test/Integer/globalvars_bt.ll b/libclamav/c++/llvm/test/Integer/globalvars_bt.ll
deleted file mode 100644
index 5c43185..0000000
--- a/libclamav/c++/llvm/test/Integer/globalvars_bt.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-
- at MyVar = external global i27
- at MyIntList = external global { \2 *, i27 }
- external global i27 ; i27*:0
-
- at AConst = constant i27 123
-
- at AString = constant [4 x i8] c"test"
-
- at ZeroInit = global { [100 x i27 ], [40 x float ] } { [100 x i27] zeroinitializer,
- [40 x float] zeroinitializer }
-
-
-define i27 @"foo"(i27 %blah)
-begin
- store i27 5, i27 *@MyVar
- %idx = getelementptr { \2 *, i27 } * @MyIntList, i64 0, i32 1
- store i27 12, i27* %idx
- ret i27 %blah
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/indirectcall2_bt.ll b/libclamav/c++/llvm/test/Integer/indirectcall2_bt.ll
deleted file mode 100644
index 5b7c68d..0000000
--- a/libclamav/c++/llvm/test/Integer/indirectcall2_bt.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-define i63 @"test"(i63 %X)
-begin
- ret i63 %X
-end
-
-define i63 @"fib"(i63 %n)
-begin
- %T = icmp ult i63 %n, 2 ; {i1}:0
- br i1 %T, label %BaseCase, label %RecurseCase
-
-RecurseCase:
- %result = call i63 @test(i63 %n)
- br label %BaseCase
-
-BaseCase:
- %X = phi i63 [1, %0], [2, %RecurseCase]
- ret i63 %X
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/indirectcall_bt.ll b/libclamav/c++/llvm/test/Integer/indirectcall_bt.ll
deleted file mode 100644
index d586fca..0000000
--- a/libclamav/c++/llvm/test/Integer/indirectcall_bt.ll
+++ /dev/null
@@ -1,52 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-declare i32 @"atoi"(i8 *)
-
-define i63 @"fib"(i63 %n)
-begin
- icmp ult i63 %n, 2 ; {i1}:1
- br i1 %1, label %BaseCase, label %RecurseCase
-
-BaseCase:
- ret i63 1
-
-RecurseCase:
- %n2 = sub i63 %n, 2
- %n1 = sub i63 %n, 1
- %f2 = call i63(i63) * @fib(i63 %n2)
- %f1 = call i63(i63) * @fib(i63 %n1)
- %result = add i63 %f2, %f1
- ret i63 %result
-end
-
-define i63 @"realmain"(i32 %argc, i8 ** %argv)
-begin
- icmp eq i32 %argc, 2 ; {i1}:1
- br i1 %1, label %HasArg, label %Continue
-HasArg:
- ; %n1 = atoi(argv[1])
- %n1 = add i32 1, 1
- br label %Continue
-
-Continue:
- %n = phi i32 [%n1, %HasArg], [1, %0]
- %N = sext i32 %n to i63
- %F = call i63(i63) *@fib(i63 %N)
- ret i63 %F
-end
-
-define i63 @"trampoline"(i63 %n, i63(i63)* %fibfunc)
-begin
- %F = call i63(i63) *%fibfunc(i63 %n)
- ret i63 %F
-end
-
-define i32 @"main"()
-begin
- %Result = call i63 @trampoline(i63 10, i63(i63) *@fib)
- %Result2 = trunc i63 %Result to i32
- ret i32 %Result2
-end
diff --git a/libclamav/c++/llvm/test/Integer/instructions_bt.ll b/libclamav/c++/llvm/test/Integer/instructions_bt.ll
deleted file mode 100644
index 7ca5890..0000000
--- a/libclamav/c++/llvm/test/Integer/instructions_bt.ll
+++ /dev/null
@@ -1,26 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-define i39 @test_extractelement(<4 x i39> %V) {
- %R = extractelement <4 x i39> %V, i32 1
- ret i39 %R
-}
-
-define <4 x i39> @test_insertelement(<4 x i39> %V) {
- %R = insertelement <4 x i39> %V, i39 0, i32 0
- ret <4 x i39> %R
-}
-
-define <4 x i39> @test_shufflevector_u(<4 x i39> %V) {
- %R = shufflevector <4 x i39> %V, <4 x i39> %V,
- <4 x i32> < i32 1, i32 undef, i32 7, i32 2>
- ret <4 x i39> %R
-}
-
-define <4 x float> @test_shufflevector_f(<4 x float> %V) {
- %R = shufflevector <4 x float> %V, <4 x float> undef,
- <4 x i32> < i32 1, i32 undef, i32 7, i32 2>
- ret <4 x float> %R
-}
diff --git a/libclamav/c++/llvm/test/Integer/newcasts_bt.ll b/libclamav/c++/llvm/test/Integer/newcasts_bt.ll
deleted file mode 100644
index e2eee4f..0000000
--- a/libclamav/c++/llvm/test/Integer/newcasts_bt.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-define void @"NewCasts" (i17 %x) {
- %a = zext i17 %x to i32
- %b = sext i17 %x to i32
- %c = trunc i17 %x to i8
- %d = uitofp i17 %x to float
- %e = sitofp i17 %x to double
- %f = fptoui float %d to i17
- %g = fptosi double %e to i17
- %i = fpext float %d to double
- %j = fptrunc double %i to float
- %k = bitcast i32 %a to float
- %l = inttoptr i17 %x to i32*
- %m = ptrtoint i32* %l to i64
- ret void
-}
-
-
-define i17 @"ZExtConst" () {
- ret i17 trunc ( i32 zext ( i17 42 to i32) to i17 )
-}
-
-define i17 @"SExtConst" () {
- ret i17 trunc (i32 sext (i17 42 to i32) to i17 )
-}
diff --git a/libclamav/c++/llvm/test/Integer/opaquetypes_bt.ll b/libclamav/c++/llvm/test/Integer/opaquetypes_bt.ll
deleted file mode 100644
index 5771342..0000000
--- a/libclamav/c++/llvm/test/Integer/opaquetypes_bt.ll
+++ /dev/null
@@ -1,58 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; This test case is used to test opaque type processing, forward references,
-; and recursive types. Oh my.
-;
-
-%SQ1 = type { i31 }
-%SQ2 = type { %ITy }
-%ITy = type i31
-
-
-%CCC = type { \2* }
-%BBB = type { \2*, \2 * }
-%AAA = type { \2*, {\2*}, [12x{\2*}], {[1x{\2*}]} }
-
-; Test numbered types
-type %CCC
-type %BBB
-%Composite = type { %0, %1 }
-
-; Test simple opaque type resolution...
-%i31ty = type i31
-
-; Perform a simple forward reference...
-%ty1 = type { %ty2, i31 }
-%ty2 = type float
-
-; Do a recursive type...
-%list = type { %list * }
-%listp = type { %listp } *
-
-; Do two mutually recursive types...
-%TyA = type { %ty2, %TyB * }
-%TyB = type { double, %TyA * }
-
-; A complex recursive type...
-%Y = type { {%Y*}, %Y* }
-%Z = type { { %Z * }, [12x%Z] *, {{{ %Z * }}} }
-
-; More ridiculous test cases...
-%A = type [ 123x %A*]
-%M = type %M (%M, %M) *
-%P = type %P*
-
-; Recursive ptrs
-%u = type %v*
-%v = type %u*
-
-; Test the parser for unnamed recursive types...
-%P1 = type \1 *
-%Y1 = type { { \3 * }, \2 * }
-%Z1 = type { { \3 * }, [12x\3] *, { { { \5 * } } } }
-
-
-
-
diff --git a/libclamav/c++/llvm/test/Integer/packed_bt.ll b/libclamav/c++/llvm/test/Integer/packed_bt.ll
deleted file mode 100644
index f6ea87c..0000000
--- a/libclamav/c++/llvm/test/Integer/packed_bt.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at foo1 = external global <4 x float>
- at foo2 = external global <2 x i10>
-
-
-define void @main()
-{
- store <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, <4 x float>* @foo1
- store <2 x i10> <i10 4, i10 4>, <2 x i10>* @foo2
- %l1 = load <4 x float>* @foo1
- %l2 = load <2 x i10>* @foo2
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Integer/packed_struct_bt.ll b/libclamav/c++/llvm/test/Integer/packed_struct_bt.ll
deleted file mode 100644
index a4d01e7..0000000
--- a/libclamav/c++/llvm/test/Integer/packed_struct_bt.ll
+++ /dev/null
@@ -1,33 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-; RUN: not grep cast %t2.ll
-; RUN: grep {\\}>} %t2.ll
-; END.
-
-%struct.anon = type <{ i8, i35, i35, i35 }>
- at foos = external global %struct.anon
- at bara = external global [2 x <{ i35, i8 }>]
-
-;initializers should work for packed and non-packed the same way
- at E1 = global <{i8, i35, i35}> <{i8 1, i35 2, i35 3}>
- at E2 = global {i8, i35, i35} {i8 4, i35 5, i35 6}
-
-
-define i35 @main()
-{
- %tmp = load i35* getelementptr (%struct.anon* @foos, i32 0, i32 1) ; <i35> [#uses=1]
- %tmp3 = load i35* getelementptr (%struct.anon* @foos, i32 0, i32 2) ; <i35> [#uses=1]
- %tmp6 = load i35* getelementptr (%struct.anon* @foos, i32 0, i32 3) ; <i35> [#uses=1]
- %tmp4 = add i35 %tmp3, %tmp ; <i35> [#uses=1]
- %tmp7 = add i35 %tmp4, %tmp6 ; <i35> [#uses=1]
- ret i35 %tmp7
-}
-
-define i35 @bar() {
-entry:
- %tmp = load i35* getelementptr([2 x <{ i35, i8 }>]* @bara, i32 0, i32 0, i32 0 ) ; <i35> [#uses=1]
- %tmp4 = load i35* getelementptr ([2 x <{ i35, i8 }>]* @bara, i32 0, i32 1, i32 0) ; <i35> [#uses=1]
- %tmp5 = add i35 %tmp4, %tmp ; <i35> [#uses=1]
- ret i35 %tmp5
-}
diff --git a/libclamav/c++/llvm/test/Integer/paramattrs_bt.ll b/libclamav/c++/llvm/test/Integer/paramattrs_bt.ll
deleted file mode 100644
index 47ef753..0000000
--- a/libclamav/c++/llvm/test/Integer/paramattrs_bt.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llvm-as < %s | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%ZFunTy = type i33(i8 zeroext)
-%SFunTy = type i33(i8 signext)
-
-declare i16 @"test"(i16 signext %arg) signext
-declare i8 @"test2" (i16 zeroext %a2) zeroext
-
-
-define i33 @main(i33 %argc, i8 **%argv) {
- %val = trunc i33 %argc to i16
- %res = call i16 (i16 signext) signext *@test(i16 signext %val) signext
- %two = add i16 %res, %res
- %res2 = call i8 @test2(i16 %two zeroext) zeroext
- %retVal = sext i16 %two to i33
- ret i33 %retVal
-}
diff --git a/libclamav/c++/llvm/test/Integer/properties_bt.ll b/libclamav/c++/llvm/test/Integer/properties_bt.ll
deleted file mode 100644
index f24ddc2..0000000
--- a/libclamav/c++/llvm/test/Integer/properties_bt.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-target datalayout = "e-p:32:32"
-target triple = "proc-vend-sys"
-deplibs = [ "m", "c" ]
-
diff --git a/libclamav/c++/llvm/test/Integer/prototype_bt.ll b/libclamav/c++/llvm/test/Integer/prototype_bt.ll
deleted file mode 100644
index 2236e8b..0000000
--- a/libclamav/c++/llvm/test/Integer/prototype_bt.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-declare i31 @"bar"(i31 %in)
-
-define i31 @"foo"(i31 %blah)
-begin
- %xx = call i31 @bar(i31 %blah)
- ret i31 %xx
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/recursivetype_bt.ll b/libclamav/c++/llvm/test/Integer/recursivetype_bt.ll
deleted file mode 100644
index d5ce3f5..0000000
--- a/libclamav/c++/llvm/test/Integer/recursivetype_bt.ll
+++ /dev/null
@@ -1,108 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-; This file contains the output from the following compiled C code:
-; typedef struct list {
-; struct list *Next;
-; i32 Data;
-; } list;
-;
-; // Iterative insert fn
-; void InsertIntoListTail(list **L, i32 Data) {
-; while (*L)
-; L = &(*L)->Next;
-; *L = (list*)malloc(sizeof(list));
-; (*L)->Data = Data;
-; (*L)->Next = 0;
-; }
-;
-; // Recursive list search fn
-; list *FindData(list *L, i32 Data) {
-; if (L == 0) return 0;
-; if (L->Data == Data) return L;
-; return FindData(L->Next, Data);
-; }
-;
-; void DoListStuff() {
-; list *MyList = 0;
-; InsertIntoListTail(&MyList, 100);
-; InsertIntoListTail(&MyList, 12);
-; InsertIntoListTail(&MyList, 42);
-; InsertIntoListTail(&MyList, 1123);
-; InsertIntoListTail(&MyList, 1213);
-;
-; if (FindData(MyList, 75)) foundIt();
-; if (FindData(MyList, 42)) foundIt();
-; if (FindData(MyList, 700)) foundIt();
-; }
-
-%list = type { %list*, i36 }
-
-declare i8 *@"malloc"(i32)
-
-;;**********************
-;;**********************
-
-define void @"InsertIntoListTail"(%list** %L, i36 %Data)
-begin
-bb1:
- %reg116 = load %list** %L ;;<%list*>
- %cast1004 = inttoptr i64 0 to %list* ;;<%list*>
- %cond1000 = icmp eq %list* %reg116, %cast1004 ;;<i1>
- br i1 %cond1000, label %bb3, label %bb2
-
-bb2:
- %reg117 = phi %list** [ %reg118, %bb2 ], [ %L, %bb1 ] ;;<%list**>
- %cast1010 = bitcast %list** %reg117 to %list*** ;;<%list***>
- %reg118 = load %list*** %cast1010 ;;<%list**>
- %reg109 = load %list** %reg118 ;;<%list*>
- %cast1005 = inttoptr i64 0 to %list* ;;<%list*>
- %cond1001 = icmp ne %list* %reg109, %cast1005 ;;<i1>
- br i1 %cond1001, label %bb2, label %bb3
-
-bb3:
- %reg119 = phi %list** [ %reg118, %bb2 ], [ %L, %bb1 ] ;;<%list**>
- %cast1006 = bitcast %list** %reg119 to i8** ;;<i8**>
- %reg111 = call i8* @malloc(i32 16) ;;<i8*>
- store i8* %reg111, i8** %cast1006 ;;<void>
- %reg112 = ptrtoint i8* %reg111 to i64
- %reg1002 = add i64 %reg112, 8
- %reg1005 = inttoptr i64 %reg1002 to i8* ;;<i8*>
- %cast1008 = bitcast i8* %reg1005 to i36* ;;<i36*>
- store i36 %Data, i36* %cast1008 ;;<void>
- %cast1003 = inttoptr i64 0 to i64* ;;<i64*>
- %cast1009 = bitcast i8* %reg111 to i64** ;;<i64**>
- store i64* %cast1003, i64** %cast1009 ;;<void>
- ret void
-end
-
-define %list* @"FindData"(%list* %L, i36 %Data)
-begin
-bb1:
- br label %bb2
-
-bb2:
- %reg115 = phi %list* [ %reg116, %bb6 ], [ %L, %bb1 ] ;;<%list*>
- %cast1014 = inttoptr i64 0 to %list* ;;<%list*>
- %cond1011 = icmp ne %list* %reg115, %cast1014 ;;<i1>
- br i1 %cond1011, label %bb4, label %bb3
-
-bb3:
- ret %list* null
-
-bb4:
- %idx = getelementptr %list* %reg115, i64 0, i32 1 ;;<i36>
- %reg111 = load i36* %idx
- %cond1013 = icmp ne i36 %reg111, %Data ;;<i1>
- br i1 %cond1013, label %bb6, label %bb5
-
-bb5:
- ret %list* %reg115
-
-bb6:
- %idx2 = getelementptr %list* %reg115, i64 0, i32 0 ;;<%list*>
- %reg116 = load %list** %idx2
- br label %bb2
-end
diff --git a/libclamav/c++/llvm/test/Integer/simplecalltest_bt.ll b/libclamav/c++/llvm/test/Integer/simplecalltest_bt.ll
deleted file mode 100644
index 45dc0f1..0000000
--- a/libclamav/c++/llvm/test/Integer/simplecalltest_bt.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%FunTy = type i31(i31)
-
-
-define void @"invoke"(%FunTy *%x)
-begin
- %foo = call %FunTy* %x(i31 123)
- ret void
-end
-
-define i31 @"main"(i31 %argc, i8 **%argv, i8 **%envp)
-begin
- %retval = call i31 (i31) *@test(i31 %argc)
- %two = add i31 %retval, %retval
- %retval2 = call i31 @test(i31 %argc)
-
- %two2 = add i31 %two, %retval2
- call void @invoke (%FunTy* @test)
- ret i31 %two2
-end
-
-define i31 @"test"(i31 %i0)
-begin
- ret i31 %i0
-end
diff --git a/libclamav/c++/llvm/test/Integer/small_bt.ll b/libclamav/c++/llvm/test/Integer/small_bt.ll
deleted file mode 100644
index 00fcace..0000000
--- a/libclamav/c++/llvm/test/Integer/small_bt.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-%x = type i19
-
-
-define i19 @"foo"(i19 %in)
-begin
-label:
- ret i19 2
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/testalloca_bt.ll b/libclamav/c++/llvm/test/Integer/testalloca_bt.ll
deleted file mode 100644
index e8e73c5..0000000
--- a/libclamav/c++/llvm/test/Integer/testalloca_bt.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-%inners = type {float, {i8 } }
-%struct = type { i33 , {float, {i8 } } , i64 }
-
-
-define i33 @testfunction(i33 %i0, i33 %j0)
-begin
- alloca i8, i32 5
- %ptr = alloca i33 ; yields {i33*}:ptr
- store i33 3, i33* %ptr ; yields {void}
- %val = load i33* %ptr ; yields {i33}:val = i33 %3
-
- %sptr = alloca %struct ; yields {%struct*}:sptr
- %nsptr = getelementptr %struct * %sptr, i64 0, i32 1 ; yields {inners*}:nsptr
- %ubsptr = getelementptr %inners * %nsptr, i64 0, i32 1 ; yields {{i8}*}:ubsptr
- %idx = getelementptr {i8} * %ubsptr, i64 0, i32 0
- store i8 4, i8* %idx
-
- %fptr = getelementptr %struct * %sptr, i64 0, i32 1, i32 0 ; yields {float*}:fptr
- store float 4.0, float * %fptr
-
- ret i33 3
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/testarith_bt.ll b/libclamav/c++/llvm/test/Integer/testarith_bt.ll
deleted file mode 100644
index 0820399..0000000
--- a/libclamav/c++/llvm/test/Integer/testarith_bt.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-define i31 @"simpleArith"(i31 %i0, i31 %j0)
-begin
- %t1 = add i31 %i0, %j0
- %t2 = sub i31 %i0, %j0
- %t3 = mul i31 %t1, %t2
- %t4 = udiv i31 %t1, %t2
- %t5 = sdiv i31 %t1, %t2
- %t6 = urem i31 %t1, %t2
- %t7 = srem i31 %t1, %t2
- %t8 = shl i31 %t1, 9
- %t9 = lshr i31 %t1, 9
- %t10= ashr i31 %t1, 9
- %f1 = sitofp i31 %t1 to float
- %f2 = fdiv float 4.0, %f1
- ret i31 %t3
-end
diff --git a/libclamav/c++/llvm/test/Integer/testconstants_bt.ll b/libclamav/c++/llvm/test/Integer/testconstants_bt.ll
deleted file mode 100644
index 8ca49cf..0000000
--- a/libclamav/c++/llvm/test/Integer/testconstants_bt.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- at somestr = constant [11x i8] c"hello world"
- at array = constant [2 x i55] [ i55 12, i55 52 ]
- constant { i55, i55 } { i55 4, i55 3 }
-
-
-define [2 x i55]* @testfunction(i55 %i0, i55 %j0)
-begin
- ret [2x i55]* @array
-end
-
-define i8* @otherfunc(i55, double)
-begin
- %somestr = getelementptr [11x i8]* @somestr, i64 0, i64 0
- ret i8* %somestr
-end
-
-define i8* @yetanotherfunc(i55, double)
-begin
- ret i8* null ; Test null
-end
-
-define i55 @negativeUnsigned() {
- ret i55 -1
-}
-
-define i55 @largeSigned() {
- ret i55 3900000000
-}
diff --git a/libclamav/c++/llvm/test/Integer/testicmp_bt.ll b/libclamav/c++/llvm/test/Integer/testicmp_bt.ll
deleted file mode 100644
index 40a2465..0000000
--- a/libclamav/c++/llvm/test/Integer/testicmp_bt.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-define i31 @"simpleIcmp"(i31 %i0, i31 %j0)
-begin
- %t1 = icmp eq i31 %i0, %j0
- %t2 = icmp ne i31 %i0, %j0
- %t3 = icmp ult i31 %i0, %j0
- %t4 = icmp sgt i31 %i0, %j0
- %t5 = icmp ule i31 %i0, %j0
- %t6 = icmp sge i31 %i0, %j0
-
- %t7 = icmp eq i31 %i0, 1098765432
- %t8 = icmp ne i31 %i0, -31415926
-
- %t9 = icmp ult i31 10000, %j0
- %t10 = icmp sgt i31 -10000, %j0
-
-
- ret i31 %i0
-end
diff --git a/libclamav/c++/llvm/test/Integer/testlogical_bt.ll b/libclamav/c++/llvm/test/Integer/testlogical_bt.ll
deleted file mode 100644
index a2c927d..0000000
--- a/libclamav/c++/llvm/test/Integer/testlogical_bt.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-define i31 @"simpleAdd"(i31 %i0, i31 %j0)
-begin
- %t1 = xor i31 %i0, %j0
- %t2 = or i31 %i0, %j0
- %t3 = and i31 %t1, %t2
- ret i31 %t3
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/testlogical_new_bt.ll b/libclamav/c++/llvm/test/Integer/testlogical_new_bt.ll
deleted file mode 100644
index 49a26dc..0000000
--- a/libclamav/c++/llvm/test/Integer/testlogical_new_bt.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-define i31 @"simpleAdd"(i31 %i0, i31 %j0)
-begin
- %t1 = xor i31 %i0, %j0
- %t2 = or i31 %i0, %j0
- %t3 = and i31 %t1, %t2
- %t4 = shl i31 %i0, 2
- %t5 = ashr i31 %i0, 2
- %t6 = lshr i31 %j0, 22
- ret i31 %t3
-end
diff --git a/libclamav/c++/llvm/test/Integer/testmemory_bt.ll b/libclamav/c++/llvm/test/Integer/testmemory_bt.ll
deleted file mode 100644
index e503c56..0000000
--- a/libclamav/c++/llvm/test/Integer/testmemory_bt.ll
+++ /dev/null
@@ -1,45 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-%struct = type { i31 , {float, {i9 } } , i64 }
-%complexty = type {i31, {[4 x i9 *], float}, double}
-
-
-define i31 @"main"()
-begin
- call i31 @testfunction(i64 0, i64 1)
- ret i31 0
-end
-
-define i31 @"testfunction"(i64 %i0, i64 %j0)
-begin
- %array0 = malloc [4 x i9] ; yields {[4 x i9]*}:array0
- %size = add i32 2, 2 ; yields {i31}:size = i31 %4
- %array1 = malloc i9, i32 4 ; yields {i9*}:array1
- %array2 = malloc i9, i32 %size ; yields {i9*}:array2
-
- %idx = getelementptr [4 x i9]* %array0, i64 0, i64 2
- store i9 123, i9* %idx
- free [4x i9]* %array0
- free i9* %array1
- free i9* %array2
-
-
- %aa = alloca %complexty, i32 5
- %idx2 = getelementptr %complexty* %aa, i64 %i0, i32 1, i32 0, i64 %j0
- store i9 *null, i9** %idx2
-
- %ptr = alloca i31 ; yields {i31*}:ptr
- store i31 3, i31* %ptr ; yields {void}
- %val = load i31* %ptr ; yields {i31}:val = i31 %3
-
- %sptr = alloca %struct ; yields {%struct*}:sptr
- %ubsptr = getelementptr %struct * %sptr, i64 0, i32 1, i32 1 ; yields {{i9}*}:ubsptr
- %idx3 = getelementptr {i9} * %ubsptr, i64 0, i32 0
- store i9 4, i9* %idx3
-
- ret i31 3
-end
-
diff --git a/libclamav/c++/llvm/test/Integer/testswitch_bt.ll b/libclamav/c++/llvm/test/Integer/testswitch_bt.ll
deleted file mode 100644
index bf7cdc5..0000000
--- a/libclamav/c++/llvm/test/Integer/testswitch_bt.ll
+++ /dev/null
@@ -1,24 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
- %i35 = type i35
-
-
-define i35 @"squared"(%i35 %i0)
-begin
- switch i35 %i0, label %Default [
- i35 1, label %Case1
- i35 2, label %Case2
- i35 4, label %Case4 ]
-
-Default:
- ret i35 -1 ; Unrecognized input value
-
-Case1:
- ret i35 1
-Case2:
- ret i35 4
-Case4:
- ret i35 16
-end
diff --git a/libclamav/c++/llvm/test/Integer/testvarargs_bt.ll b/libclamav/c++/llvm/test/Integer/testvarargs_bt.ll
deleted file mode 100644
index 3227d14..0000000
--- a/libclamav/c++/llvm/test/Integer/testvarargs_bt.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-declare i31 @"printf"(i8*, ...) ;; Prototype for: i32 __builtin_printf(const char*, ...)
-
-define i31 @"testvarar"()
-begin
- call i31(i8*, ...) *@printf(i8 * null, i31 12, i8 42)
- ret i31 %1
-end
-
-
diff --git a/libclamav/c++/llvm/test/Integer/undefined_bt.ll b/libclamav/c++/llvm/test/Integer/undefined_bt.ll
deleted file mode 100644
index 7eba590..0000000
--- a/libclamav/c++/llvm/test/Integer/undefined_bt.ll
+++ /dev/null
@@ -1,18 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
- at X = global i31 undef
-
-
-declare i32 @"atoi"(i8 *)
-
-define i63 @test() {
- ret i63 undef
-}
-
-define i31 @test2() {
- %X = add i31 undef, 1
- ret i31 %X
-}
diff --git a/libclamav/c++/llvm/test/Integer/unreachable_bt.ll b/libclamav/c++/llvm/test/Integer/unreachable_bt.ll
deleted file mode 100644
index cb65d4b..0000000
--- a/libclamav/c++/llvm/test/Integer/unreachable_bt.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-
-
-declare void @bar()
-
-define i9 @foo() { ;; Calling this function has undefined behavior
- unreachable
-}
-
-define double @xyz() {
- call void @bar()
- unreachable ;; Bar must not return.
-}
diff --git a/libclamav/c++/llvm/test/Integer/varargs_bt.ll b/libclamav/c++/llvm/test/Integer/varargs_bt.ll
deleted file mode 100644
index 25ad58a..0000000
--- a/libclamav/c++/llvm/test/Integer/varargs_bt.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; Demonstrate all of the variable argument handling intrinsic functions plus
-; the va_arg instruction.
-
-declare void @llvm.va_start(i8** %ap)
-declare void @llvm.va_copy(i8** %aq, i8** %ap)
-declare void @llvm.va_end(i8** %ap)
-
-define i33 @test(i33 %X, ...) {
- %ap = alloca i8*
- call void @llvm.va_start(i8** %ap)
- %tmp = va_arg i8** %ap, i33
-
- %aq = alloca i8*
- call void @llvm.va_copy(i8** %aq, i8** %ap)
- call void @llvm.va_end(i8** %aq)
-
- call void @llvm.va_end(i8** %ap)
- ret i33 %tmp
-}
diff --git a/libclamav/c++/llvm/test/Integer/varargs_new_bt.ll b/libclamav/c++/llvm/test/Integer/varargs_new_bt.ll
deleted file mode 100644
index 59bb3f2..0000000
--- a/libclamav/c++/llvm/test/Integer/varargs_new_bt.ll
+++ /dev/null
@@ -1,32 +0,0 @@
-; RUN: llvm-as %s -o - | llvm-dis > %t1.ll
-; RUN: llvm-as %t1.ll -o - | llvm-dis > %t2.ll
-; RUN: diff %t1.ll %t2.ll
-
-; Demonstrate all of the variable argument handling intrinsic functions plus
-; the va_arg instruction.
-
-declare void @llvm.va_start(i8**)
-declare void @llvm.va_copy(i8**, i8*)
-declare void @llvm.va_end(i8**)
-
-define i31 @test(i31 %X, ...) {
- ; Allocate two va_list items. On this target, va_list is of type i8*
- %ap = alloca i8* ; <i8**> [#uses=4]
- %aq = alloca i8* ; <i8**> [#uses=2]
-
- ; Initialize variable argument processing
- call void @llvm.va_start(i8** %ap)
-
- ; Read a single integer argument
- %tmp = va_arg i8** %ap, i31 ; <i31> [#uses=1]
-
- ; Demonstrate usage of llvm.va_copy and llvm_va_end
- %apv = load i8** %ap ; <i8*> [#uses=1]
- call void @llvm.va_copy(i8** %aq, i8* %apv)
- call void @llvm.va_end(i8** %aq)
-
- ; Stop processing of arguments.
- call void @llvm.va_end(i8** %ap)
- ret i31 %tmp
-
-}
diff --git a/libclamav/c++/llvm/test/Makefile b/libclamav/c++/llvm/test/Makefile
deleted file mode 100644
index 3750fdb..0000000
--- a/libclamav/c++/llvm/test/Makefile
+++ /dev/null
@@ -1,203 +0,0 @@
-#===- test/Makefile ----------------------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-LEVEL = ..
-DIRS =
-
-#
-# Make Dejagnu the default for testing
-#
-all:: check-local
-
-# Include other test rules
-include Makefile.tests
-
-#===------------------------------------------------------------------------===#
-# DejaGNU testing support
-#===------------------------------------------------------------------------===#
-
-ifneq ($(GREP_OPTIONS),)
-$(warning GREP_OPTIONS environment variable may interfere with test results)
-endif
-
-ifdef VERBOSE
-RUNTESTFLAGS := $(VERBOSE)
-LIT_ARGS := -v
-else
-LIT_ARGS := -s -v
-endif
-
-ifdef TESTSUITE
-LIT_TESTSUITE := $(TESTSUITE)
-CLEANED_TESTSUITE := $(patsubst %/,%,$(TESTSUITE))
-CLEANED_TESTSUITE := $(patsubst test/%,%,$(CLEANED_TESTSUITE))
-RUNTESTFLAGS += --tool $(CLEANED_TESTSUITE)
-else
-LIT_TESTSUITE := .
-endif
-
-ifdef VG
-VALGRIND := valgrind --tool=memcheck --quiet --trace-children=yes --error-exitcode=3 --leak-check=full $(VALGRIND_EXTRA_ARGS)
-endif
-
-# Check what to run for -all.
-LIT_ALL_TESTSUITES := $(LIT_TESTSUITE)
-
-extra-lit-site-cfgs::
-.PHONY: extra-lit-site-cfgs
-
-ifneq ($(strip $(filter check-local-all,$(MAKECMDGOALS))),)
-ifndef TESTSUITE
-ifeq ($(shell test -d $(PROJ_SRC_DIR)/../tools/clang && echo OK), OK)
-LIT_ALL_TESTSUITES += $(PROJ_OBJ_DIR)/../tools/clang/test
-
-# Force creation of Clang's lit.site.cfg.
-clang-lit-site-cfg: FORCE
- $(MAKE) -C $(PROJ_OBJ_DIR)/../tools/clang/test lit.site.cfg
-extra-lit-site-cfgs:: clang-lit-site-cfg
-endif
-endif
-endif
-
-IGNORE_TESTS :=
-
-ifndef RUNLLVM2CPP
-IGNORE_TESTS += llvm2cpp.exp
-endif
-
-ifdef IGNORE_TESTS
-RUNTESTFLAGS += --ignore "$(strip $(IGNORE_TESTS))"
-endif
-
-# ulimits like these are redundantly enforced by the buildbots, so
-# just removing them here won't work.
-# Both AuroraUX & Solaris do not have the -m flag for ulimit
-ifeq ($(HOST_OS),SunOS)
-ULIMIT=ulimit -t 600 ; ulimit -d 512000 ; ulimit -v 512000 ;
-else # !SunOS
-ifeq ($(HOST_OS),AuroraUX)
-ULIMIT=ulimit -t 600 ; ulimit -d 512000 ; ulimit -v 512000 ;
-else # !AuroraUX
-ULIMIT=ulimit -t 600 ; ulimit -d 512000 ; ulimit -m 512000 ; ulimit -v 512000 ;
-endif # AuroraUX
-endif # SunOS
-
-ifneq ($(RUNTEST),)
-check-local:: site.exp
- ( $(ULIMIT) \
- PATH="$(LLVMToolDir):$(LLVM_SRC_ROOT)/test/Scripts:$(LLVMGCCDIR)/bin:$(PATH)" \
- $(RUNTEST) $(RUNTESTFLAGS) )
-else
-check-local:: site.exp
- @echo "*** dejagnu not found. Make sure 'runtest' is in your PATH, then reconfigure LLVM."
-endif
-
-check-local-lit:: lit.site.cfg Unit/lit.site.cfg
- ( $(ULIMIT) \
- $(LLVM_SRC_ROOT)/utils/lit/lit.py $(LIT_ARGS) $(LIT_TESTSUITE) )
-
-check-local-all:: lit.site.cfg Unit/lit.site.cfg extra-lit-site-cfgs
- ( $(ULIMIT) \
- $(LLVM_SRC_ROOT)/utils/lit/lit.py $(LIT_ARGS) $(LIT_ALL_TESTSUITES) )
-
-ifdef TESTONE
-CLEANED_TESTONE := $(patsubst %/,%,$(TESTONE))
-CLEANED_TESTONE := $(patsubst test/%,%,$(CLEANED_TESTONE))
-SUBDIR := $(shell dirname $(CLEANED_TESTONE))
-TESTPATH := $(LLVM_SRC_ROOT)/test/$(CLEANED_TESTONE)
-check-one: site.exp $(TCLSH)
- $(Verb)( echo "source $(LLVM_OBJ_ROOT)/test/site.exp" ; \
- echo "set subdir $(SUBDIR)" ; \
- echo "proc pass { msg } { puts \"PASS: \$$msg\" } "; \
- echo "proc fail { msg } { puts \"FAIL: \$$msg\" }" ; \
- echo "proc xfail { msg } { puts \"XFAIL: \$$msg\" }" ; \
- echo "proc xpass { msg } { puts \"XPASS: \$$msg\" }" ; \
- echo "proc verbose args { }" ; \
- echo "source $(LLVM_SRC_ROOT)/test/lib/llvm.exp" ; \
- echo "RunLLVMTests $(TESTPATH)" ) | \
- ( $(ULIMIT) \
- PATH="$(LLVMToolDir):$(LLVM_SRC_ROOT)/test/Scripts:$(PATH)" \
- $(TCLSH) )
-endif
-
-clean::
- $(RM) -rf `find $(LLVM_OBJ_ROOT)/test -name Output -type d -print`
-
-# dsymutil is used on the Darwin to manipulate DWARF debugging information.
-ifeq ($(TARGET_OS),Darwin)
-DSYMUTIL=dsymutil
-else
-DSYMUTIL=true
-endif
-ifdef TargetCommonOpts
-BUGPOINT_TOPTS="-gcc-tool-args $(TargetCommonOpts)"
-else
-BUGPOINT_TOPTS=""
-endif
-
-ifneq ($(OCAMLOPT),)
-CC_FOR_OCAMLOPT := $(shell $(OCAMLOPT) -config | grep native_c_compiler | sed -e 's/native_c_compiler: //')
-CXX_FOR_OCAMLOPT := $(subst gcc,g++,$(CC_FOR_OCAMLOPT))
-endif
-
-FORCE:
-
-site.exp: FORCE
- @echo 'Making a new site.exp file...'
- @echo '## Autogenerated by LLVM configuration.' > site.tmp
- @echo '# Do not edit!' >> site.tmp
- @echo 'set target_triplet "$(TARGET_TRIPLE)"' >> site.tmp
- @echo 'set TARGETS_TO_BUILD "$(TARGETS_TO_BUILD)"' >> site.tmp
- @echo 'set llvmgcc_langs "$(LLVMGCC_LANGS)"' >> site.tmp
- @echo 'set llvmtoolsdir "$(ToolDir)"' >>site.tmp
- @echo 'set llvmlibsdir "$(LibDir)"' >>site.tmp
- @echo 'set llvm_bindings "$(BINDINGS_TO_BUILD)"' >> site.tmp
- @echo 'set srcroot "$(LLVM_SRC_ROOT)"' >>site.tmp
- @echo 'set objroot "$(LLVM_OBJ_ROOT)"' >>site.tmp
- @echo 'set srcdir "$(LLVM_SRC_ROOT)/test"' >>site.tmp
- @echo 'set objdir "$(LLVM_OBJ_ROOT)/test"' >>site.tmp
- @echo 'set gccpath "$(CC)"' >>site.tmp
- @echo 'set gxxpath "$(CXX)"' >>site.tmp
- @echo 'set compile_c "' $(CC) $(CPP.Flags) $(TargetCommonOpts) $(CompileCommonOpts) -c '"' >>site.tmp
- @echo 'set compile_cxx "' $(CXX) $(CPP.Flags) $(CXX.Flags) $(TargetCommonOpts) $(CompileCommonOpts) -c '"' >> site.tmp
- @echo 'set link "' $(CXX) $(CPP.Flags) $(CXX.Flags) $(TargetCommonOpts) $(CompileCommonOpts) $(LD.Flags) '"' >>site.tmp
- @echo 'set llvmgcc "$(LLVMGCC) $(TargetCommonOpts) $(EXTRA_OPTIONS)"' >> site.tmp
- @echo 'set llvmgxx "$(LLVMGCC) $(TargetCommonOpts) $(EXTRA_OPTIONS)"' >> site.tmp
- @echo 'set bugpoint_topts $(BUGPOINT_TOPTS)' >> site.tmp
- @echo 'set shlibext "$(SHLIBEXT)"' >> site.tmp
- @echo 'set ocamlopt "$(OCAMLOPT) -cc \"$(CXX_FOR_OCAMLOPT)\" -I $(LibDir)/ocaml"' >> site.tmp
- @echo 'set valgrind "$(VALGRIND)"' >> site.tmp
- @echo 'set grep "$(GREP)"' >>site.tmp
- @echo 'set gas "$(GAS)"' >>site.tmp
- @echo 'set llvmdsymutil "$(DSYMUTIL)"' >>site.tmp
- @echo '## All variables above are generated by configure. Do Not Edit ## ' >>site.tmp
- @test ! -f site.exp || \
- sed '1,/^## All variables above are.*##/ d' site.exp >> site.tmp
- @-rm -f site.bak
- @test ! -f site.exp || mv site.exp site.bak
- @mv site.tmp site.exp
-
-lit.site.cfg: site.exp
- @echo "Making LLVM 'lit.site.cfg' file..."
- @sed -e "s#@LLVM_SOURCE_DIR@#$(LLVM_SRC_ROOT)#g" \
- -e "s#@LLVM_BINARY_DIR@#$(LLVM_OBJ_ROOT)#g" \
- -e "s#@LLVM_TOOLS_DIR@#$(ToolDir)#g" \
- -e "s#@LLVMGCCDIR@#$(LLVMGCCDIR)#g" \
- $(PROJ_SRC_DIR)/lit.site.cfg.in > $@
-
-Unit/lit.site.cfg: $(PROJ_OBJ_DIR)/Unit/.dir FORCE
- @echo "Making LLVM unittest 'lit.site.cfg' file..."
- @sed -e "s#@LLVM_SOURCE_DIR@#$(LLVM_SRC_ROOT)#g" \
- -e "s#@LLVM_BINARY_DIR@#$(LLVM_OBJ_ROOT)#g" \
- -e "s#@LLVM_TOOLS_DIR@#$(ToolDir)#g" \
- -e "s#@LLVMGCCDIR@#$(LLVMGCCDIR)#g" \
- -e "s#@LLVM_BUILD_MODE@#$(BuildMode)#g" \
- -e "s#@ENABLE_SHARED@#$(ENABLE_SHARED)#g" \
- -e "s#@SHLIBPATH_VAR@#$(SHLIBPATH_VAR)#g" \
- $(PROJ_SRC_DIR)/Unit/lit.site.cfg.in > $@
diff --git a/libclamav/c++/llvm/test/Makefile.tests b/libclamav/c++/llvm/test/Makefile.tests
deleted file mode 100644
index aeb5871..0000000
--- a/libclamav/c++/llvm/test/Makefile.tests
+++ /dev/null
@@ -1,80 +0,0 @@
-##----------------------------------------------------------*- Makefile -*-===##
-##
-## Common rules for generating, linking, and compiling via LLVM. This is
-## used to implement a robust testing framework for LLVM
-##
-##-------------------------------------------------------------------------===##
-
-# If the user specified a TEST= option on the command line, we do not want to do
-# the default testing type. Instead, we change the default target to be the
-# test:: target.
-#
-ifdef TEST
-test::
-endif
-
-# We do not want to make .d files for tests!
-DISABLE_AUTO_DEPENDENCIES=1
-
-include ${LEVEL}/Makefile.common
-
-# Specify ENABLE_STATS on the command line to enable -stats and -time-passes
-# output from gccas and gccld.
-ifdef ENABLE_STATS
-STATS = -stats -time-passes
-endif
-
-.PHONY: clean default
-
-# These files, which might be intermediate results, should not be deleted by
-# make
-.PRECIOUS: Output/%.bc Output/%.ll
-.PRECIOUS: Output/%.tbc Output/%.tll
-.PRECIOUS: Output/.dir
-.PRECIOUS: Output/%.llvm.bc
-.PRECIOUS: Output/%.llvm
-
-LCCFLAGS += -O2 -Wall
-LCXXFLAGS += -O2 -Wall
-LLCFLAGS =
-TESTRUNR = @echo Running test: $<; \
- PATH="$(LLVMTOOLCURRENT):$(LLVM_SRC_ROOT)/test/Scripts:$(PATH)" \
- $(LLVM_SRC_ROOT)/test/TestRunner.sh
-
-LLCLIBS := $(LLCLIBS) -lm
-
-clean::
- $(RM) -f a.out core
- $(RM) -rf Output/
-
-# Compile from X.c to Output/X.ll
-Output/%.ll: %.c $(LCC1) Output/.dir $(INCLUDES)
- -$(LLVMCC) $(CPPFLAGS) $(LCCFLAGS) -S $< -o $@
-
-# Compile from X.cpp to Output/X.ll
-Output/%.ll: %.cpp $(LCC1XX) Output/.dir $(INCLUDES)
- -$(LLVMCXX) $(CPPFLAGS) $(LCXXFLAGS) -S $< -o $@
-
-# Compile from X.cc to Output/X.ll
-Output/%.ll: %.cc $(LCC1XX) Output/.dir $(INCLUDES)
- -$(LLVMCXX) $(CPPFLAGS) $(LCXXFLAGS) -S $< -o $@
-
-# LLVM Assemble from Output/X.ll to Output/X.bc. Output/X.ll must have come
-# from GCC output, so use GCCAS.
-#
-Output/%.bc: Output/%.ll $(LGCCAS)
- -$(LGCCAS) $(STATS) $< -o $@
-
-# LLVM Assemble from X.ll to Output/X.bc. Because we are coming directly from
-# LLVM source, use the non-transforming assembler.
-#
-Output/%.bc: %.ll $(LLVMAS) Output/.dir
- -$(LLVMAS) $< -o $@
-
-## Cancel built-in implicit rules that override above rules
-%: %.s
-
-%: %.c
-
-%.o: %.c
-
diff --git a/libclamav/c++/llvm/test/Scripts/README.txt b/libclamav/c++/llvm/test/Scripts/README.txt
deleted file mode 100644
index b0b1105..0000000
--- a/libclamav/c++/llvm/test/Scripts/README.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-This directory contains scripts which are used by the TestRunner style
-tests, which allows them to be simpler and more direct.
diff --git a/libclamav/c++/llvm/test/Scripts/ignore b/libclamav/c++/llvm/test/Scripts/ignore
deleted file mode 100755
index 865ae4d..0000000
--- a/libclamav/c++/llvm/test/Scripts/ignore
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-#
-# Program: ignore
-#
-# Synopsis: Ignore the result code of the command and always return 0
-#
-# Syntax: ignore command <arguments>
-
-"$@" || exit 0 && exit 0
-exit 0
diff --git a/libclamav/c++/llvm/test/Scripts/macho-dump b/libclamav/c++/llvm/test/Scripts/macho-dump
deleted file mode 100755
index 5b9943a..0000000
--- a/libclamav/c++/llvm/test/Scripts/macho-dump
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env python
-
-import struct
-import sys
-import StringIO
-
-class Reader:
- def __init__(self, path):
- if path == '-':
- # Snarf all the data so we can seek.
- self.file = StringIO.StringIO(sys.stdin.read())
- else:
- self.file = open(path,'rb')
- self.isLSB = None
-
- self.string_table = None
-
- def setLSB(self, isLSB):
- self.isLSB = bool(isLSB)
-
- def tell(self):
- return self.file.tell()
-
- def seek(self, pos):
- self.file.seek(pos)
-
- def read(self, N):
- data = self.file.read(N)
- if len(data) != N:
- raise ValueError,"Out of data!"
- return data
-
- def read8(self):
- return ord(self.read(1))
-
- def read16(self):
- return struct.unpack('><'[self.isLSB] + 'H', self.read(2))[0]
-
- def read32(self):
- # Force to 32-bit, if possible; otherwise these might be long ints on a
- # big-endian platform. FIXME: Why???
- Value = struct.unpack('><'[self.isLSB] + 'I', self.read(4))[0]
- return int(Value)
-
- def registerStringTable(self, strings):
- if self.string_table is not None:
- raise ValueError,"%s: warning: multiple string tables" % sys.argv[0]
-
- self.string_table = strings
-
- def getString(self, index):
- if self.string_table is None:
- raise ValueError,"%s: warning: no string table registered" % sys.argv[0]
-
- end = self.string_table.index('\x00', index)
- return self.string_table[index:end]
-
-def dumpmacho(path, opts):
- f = Reader(path)
-
- magic = f.read(4)
- if magic == '\xFE\xED\xFA\xCE':
- f.setLSB(False)
- elif magic == '\xCE\xFA\xED\xFE':
- f.setLSB(True)
- else:
- raise ValueError,"Not a Mach-O object file: %r (bad magic)" % path
-
- print "('cputype', %r)" % f.read32()
- print "('cpusubtype', %r)" % f.read32()
- filetype = f.read32()
- print "('filetype', %r)" % filetype
-
- numLoadCommands = f.read32()
- print "('num_load_commands', %r)" % filetype
-
- loadCommandsSize = f.read32()
- print "('load_commands_size', %r)" % loadCommandsSize
-
- print "('flag', %r)" % f.read32()
-
- start = f.tell()
-
- print "('load_commands', ["
- for i in range(numLoadCommands):
- dumpLoadCommand(f, i, opts)
- print "])"
-
- if f.tell() - start != loadCommandsSize:
- raise ValueError,"%s: warning: invalid load commands size: %r" % (sys.argv[0], loadCommandsSize)
-
-def dumpLoadCommand(f, i, opts):
- start = f.tell()
-
- print " # Load Command %r" % i
- cmd = f.read32()
- print " (('command', %r)" % cmd
- cmdSize = f.read32()
- print " ('size', %r)" % cmdSize
-
- if cmd == 1:
- dumpSegmentLoadCommand32(f, opts)
- elif cmd == 2:
- dumpSymtabCommand(f, opts)
- elif cmd == 11:
- dumpDysymtabCommand(f, opts)
- elif cmd == 27:
- import uuid
- print " ('uuid', %s)" % uuid.UUID(bytes=f.read(16))
- else:
- print >>sys.stderr,"%s: warning: unknown load command: %r" % (sys.argv[0], cmd)
- f.read(cmdSize - 8)
- print " ),"
-
- if f.tell() - start != cmdSize:
- raise ValueError,"%s: warning: invalid load command size: %r" % (sys.argv[0], cmdSize)
-
-def dumpSegmentLoadCommand32(f, opts):
- print " ('segment_name', %r)" % f.read(16)
- print " ('vm_addr', %r)" % f.read32()
- print " ('vm_size', %r)" % f.read32()
- print " ('file_offset', %r)" % f.read32()
- print " ('file_size', %r)" % f.read32()
- print " ('maxprot', %r)" % f.read32()
- print " ('initprot', %r)" % f.read32()
- numSections = f.read32()
- print " ('num_sections', %r)" % numSections
- print " ('flags', %r)" % f.read32()
-
- print " ('sections', ["
- for i in range(numSections):
- dumpSection32(f, i, opts)
- print " ])"
-
-def dumpSymtabCommand(f, opts):
- symoff = f.read32()
- print " ('symoff', %r)" % symoff
- nsyms = f.read32()
- print " ('nsyms', %r)" % nsyms
- stroff = f.read32()
- print " ('stroff', %r)" % stroff
- strsize = f.read32()
- print " ('strsize', %r)" % strsize
-
- prev_pos = f.tell()
-
- f.seek(stroff)
- string_data = f.read(strsize)
- print " ('_string_data', %r)" % string_data
-
- f.registerStringTable(string_data)
-
- f.seek(symoff)
- print " ('_symbols', ["
- for i in range(nsyms):
- dumpNlist32(f, i, opts)
- print " ])"
-
- f.seek(prev_pos)
-
-def dumpNlist32(f, i, opts):
- print " # Symbol %r" % i
- n_strx = f.read32()
- print " (('n_strx', %r)" % n_strx
- n_type = f.read8()
- print " ('n_type', %#x)" % n_type
- n_sect = f.read8()
- print " ('n_sect', %r)" % n_sect
- n_desc = f.read16()
- print " ('n_desc', %r)" % n_desc
- n_value = f.read32()
- print " ('n_value', %r)" % n_value
- print " ('_string', %r)" % f.getString(n_strx)
- print " ),"
-
-def dumpDysymtabCommand(f, opts):
- print " ('ilocalsym', %r)" % f.read32()
- print " ('nlocalsym', %r)" % f.read32()
- print " ('iextdefsym', %r)" % f.read32()
- print " ('nextdefsym', %r)" % f.read32()
- print " ('iundefsym', %r)" % f.read32()
- print " ('nundefsym', %r)" % f.read32()
- print " ('tocoff', %r)" % f.read32()
- print " ('ntoc', %r)" % f.read32()
- print " ('modtaboff', %r)" % f.read32()
- print " ('nmodtab', %r)" % f.read32()
- print " ('extrefsymoff', %r)" % f.read32()
- print " ('nextrefsyms', %r)" % f.read32()
- indirectsymoff = f.read32()
- print " ('indirectsymoff', %r)" % indirectsymoff
- nindirectsyms = f.read32()
- print " ('nindirectsyms', %r)" % nindirectsyms
- print " ('extreloff', %r)" % f.read32()
- print " ('nextrel', %r)" % f.read32()
- print " ('locreloff', %r)" % f.read32()
- print " ('nlocrel', %r)" % f.read32()
-
- prev_pos = f.tell()
-
- f.seek(indirectsymoff)
- print " ('_indirect_symbols', ["
- for i in range(nindirectsyms):
- print " # Indirect Symbol %r" % i
- print " (('symbol_index', %#x),)," % f.read32()
- print " ])"
-
- f.seek(prev_pos)
-
-def dumpSection32(f, i, opts):
- print " # Section %r" % i
- print " (('section_name', %r)" % f.read(16)
- print " ('segment_name', %r)" % f.read(16)
- print " ('address', %r)" % f.read32()
- size = f.read32()
- print " ('size', %r)" % size
- offset = f.read32()
- print " ('offset', %r)" % offset
- print " ('alignment', %r)" % f.read32()
- reloc_offset = f.read32()
- print " ('reloc_offset', %r)" % reloc_offset
- num_reloc = f.read32()
- print " ('num_reloc', %r)" % num_reloc
- print " ('flags', %#x)" % f.read32()
- print " ('reserved1', %r)" % f.read32()
- print " ('reserved2', %r)" % f.read32()
- print " ),"
-
- prev_pos = f.tell()
-
- f.seek(reloc_offset)
- print " ('_relocations', ["
- for i in range(num_reloc):
- print " # Relocation %r" % i
- print " (('word-0', %#x)," % f.read32()
- print " ('word-1', %#x))," % f.read32()
- print " ])"
-
- if opts.dumpSectionData:
- f.seek(offset)
- print " ('_section_data', %r)" % f.read(size)
-
- f.seek(prev_pos)
-
-def main():
- from optparse import OptionParser, OptionGroup
- parser = OptionParser("usage: %prog [options] {files}")
- parser.add_option("", "--dump-section-data", dest="dumpSectionData",
- help="Dump the contents of sections",
- action="store_true", default=False)
- (opts, args) = parser.parse_args()
-
- if not args:
- args.append('-')
-
- for arg in args:
- dumpmacho(arg, opts)
-
-if __name__ == '__main__':
- main()
diff --git a/libclamav/c++/llvm/test/TestRunner.sh b/libclamav/c++/llvm/test/TestRunner.sh
deleted file mode 100755
index 4f04d81..0000000
--- a/libclamav/c++/llvm/test/TestRunner.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-#
-# TestRunner.sh - This script is used to run the deja-gnu tests exactly like
-# deja-gnu does, by executing the Tcl script specified in the test case's
-# RUN: lines. This is made possible by a simple make target supported by the
-# test/Makefile. All this script does is invoke that make target.
-#
-# Usage:
-# TestRunner.sh {script_names}
-#
-# This script is typically used by cd'ing to a test directory and then
-# running TestRunner.sh with a list of test file names you want to run.
-#
-TESTPATH=`pwd`
-SUBDIR=""
-if test `dirname $1` = "." ; then
- while test `basename $TESTPATH` != "test" -a ! -z "$TESTPATH" ; do
- tmp=`basename $TESTPATH`
- SUBDIR="$tmp/$SUBDIR"
- TESTPATH=`dirname $TESTPATH`
- done
-fi
-
-for TESTFILE in "$@" ; do
- if test `dirname $TESTFILE` = . ; then
- if test -d "$TESTPATH" ; then
- cd $TESTPATH
- make check-one TESTONE="$SUBDIR$TESTFILE"
- cd $PWD
- else
- echo "Can't find llvm/test directory in " `pwd`
- fi
- else
- make check-one TESTONE=$TESTFILE
- fi
-done
diff --git a/libclamav/c++/llvm/test/Unit/lit.cfg b/libclamav/c++/llvm/test/Unit/lit.cfg
deleted file mode 100644
index 5fe0732..0000000
--- a/libclamav/c++/llvm/test/Unit/lit.cfg
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- Python -*-
-
-# Configuration file for the 'lit' test runner.
-
-import os
-
-# name: The name of this test suite.
-config.name = 'LLVM-Unit'
-
-# suffixes: A list of file extensions to treat as test files.
-config.suffixes = []
-
-# test_source_root: The root path where tests are located.
-# test_exec_root: The root path where tests should be run.
-llvm_obj_root = getattr(config, 'llvm_obj_root', None)
-if llvm_obj_root is not None:
- config.test_exec_root = os.path.join(llvm_obj_root, 'unittests')
- config.test_source_root = config.test_exec_root
-
-# testFormat: The test format to use to interpret tests.
-llvm_build_mode = getattr(config, 'llvm_build_mode', "Debug")
-config.test_format = lit.formats.GoogleTest(llvm_build_mode, 'Tests')
-
-###
-
-# If necessary, point the dynamic loader at libLLVM.so.
-if config.enable_shared:
- libdir = os.path.join(config.llvm_obj_root, config.llvm_build_mode, 'lib')
- shlibpath = config.environment.get(config.shlibpath_var,'')
- if shlibpath:
- shlibpath = ':' + shlibpath
- shlibpath = libdir + shlibpath
- config.environment[config.shlibpath_var] = shlibpath
-
-# Check that the object root is known.
-if config.test_exec_root is None:
- # Otherwise, we haven't loaded the site specific configuration (the user is
- # probably trying to run on a test file directly, and either the site
- # configuration hasn't been created by the build system, or we are in an
- # out-of-tree build situation).
-
- # Check for 'llvm_unit_site_config' user parameter, and use that if available.
- site_cfg = lit.params.get('llvm_unit_site_config', None)
- if site_cfg and os.path.exists(site_cfg):
- lit.load_config(config, site_cfg)
- raise SystemExit
-
- # Try to detect the situation where we are using an out-of-tree build by
- # looking for 'llvm-config'.
- #
- # FIXME: I debated (i.e., wrote and threw away) adding logic to
- # automagically generate the lit.site.cfg if we are in some kind of fresh
- # build situation. This means knowing how to invoke the build system
- # though, and I decided it was too much magic.
-
- llvm_config = lit.util.which('llvm-config', config.environment['PATH'])
- if not llvm_config:
- lit.fatal('No site specific configuration available!')
-
- # Get the source and object roots.
- llvm_src_root = lit.util.capture(['llvm-config', '--src-root']).strip()
- llvm_obj_root = lit.util.capture(['llvm-config', '--obj-root']).strip()
-
- # Validate that we got a tree which points to here.
- this_src_root = os.path.join(os.path.dirname(__file__),'..','..')
- if os.path.realpath(llvm_src_root) != os.path.realpath(this_src_root):
- lit.fatal('No site specific configuration available!')
-
- # Check that the site specific configuration exists.
- site_cfg = os.path.join(llvm_obj_root, 'test', 'Unit', 'lit.site.cfg')
- if not os.path.exists(site_cfg):
- lit.fatal('No site specific configuration available!')
-
- # Okay, that worked. Notify the user of the automagic, and reconfigure.
- lit.note('using out-of-tree build at %r' % llvm_obj_root)
- lit.load_config(config, site_cfg)
- raise SystemExit
diff --git a/libclamav/c++/llvm/test/Unit/lit.site.cfg.in b/libclamav/c++/llvm/test/Unit/lit.site.cfg.in
deleted file mode 100644
index 51b5bc4..0000000
--- a/libclamav/c++/llvm/test/Unit/lit.site.cfg.in
+++ /dev/null
@@ -1,12 +0,0 @@
-## Autogenerated by LLVM/Clang configuration.
-# Do not edit!
-config.llvm_src_root = "@LLVM_SOURCE_DIR@"
-config.llvm_obj_root = "@LLVM_BINARY_DIR@"
-config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
-config.llvmgcc_dir = "@LLVMGCCDIR@"
-config.llvm_build_mode = "@LLVM_BUILD_MODE@"
-config.enable_shared = @ENABLE_SHARED@
-config.shlibpath_var = "@SHLIBPATH_VAR@"
-
-# Let the main config do the real work.
-lit.load_config(config, "@LLVM_SOURCE_DIR@/test/Unit/lit.cfg")
diff --git a/libclamav/c++/llvm/test/Verifier/2002-04-13-RetTypes.ll b/libclamav/c++/llvm/test/Verifier/2002-04-13-RetTypes.ll
deleted file mode 100644
index 197f5c2..0000000
--- a/libclamav/c++/llvm/test/Verifier/2002-04-13-RetTypes.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llvm-as < %s |& grep {return type does not match operand type}
-
-; Verify the the operand type of the ret instructions in a function match the
-; delcared return type of the function they live in.
-;
-
-define i32 @testfunc()
-begin
- ret i32* null
-end
diff --git a/libclamav/c++/llvm/test/Verifier/2002-11-05-GetelementptrPointers.ll b/libclamav/c++/llvm/test/Verifier/2002-11-05-GetelementptrPointers.ll
deleted file mode 100644
index 1f71387..0000000
--- a/libclamav/c++/llvm/test/Verifier/2002-11-05-GetelementptrPointers.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: not llvm-as < %s |& grep {invalid getelementptr indices}
-
-; This testcase is invalid because we are indexing into a pointer that is
-; contained WITHIN a structure.
-
-define void @test({i32, i32*} * %X) {
- getelementptr {i32, i32*} * %X, i32 0, i32 1, i32 0
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Verifier/2004-05-21-SwitchConstantMismatch.ll b/libclamav/c++/llvm/test/Verifier/2004-05-21-SwitchConstantMismatch.ll
deleted file mode 100644
index 339a21c..0000000
--- a/libclamav/c++/llvm/test/Verifier/2004-05-21-SwitchConstantMismatch.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
-
-
-int %main() {
-start1:
- switch uint 0, label %brt0 [int 3, label %brt1 ]
-brt0:
- ret int 0
-brt1:
- ret int 0
-}
-
diff --git a/libclamav/c++/llvm/test/Verifier/2005-03-21-UndefinedTypeReference.ll b/libclamav/c++/llvm/test/Verifier/2005-03-21-UndefinedTypeReference.ll
deleted file mode 100644
index 5299397..0000000
--- a/libclamav/c++/llvm/test/Verifier/2005-03-21-UndefinedTypeReference.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: not llvm-as < %s |& grep {use of undefined type named 'InvalidType'}
-
-define void @test() {
- malloc %InvalidType
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/Verifier/2006-07-11-StoreStruct.ll b/libclamav/c++/llvm/test/Verifier/2006-07-11-StoreStruct.ll
deleted file mode 100644
index 80ab122..0000000
--- a/libclamav/c++/llvm/test/Verifier/2006-07-11-StoreStruct.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: llvm-as < %s |& not grep {Instruction operands must be first-class}
-
-; This previously was for PR826, but structs are now first-class so
-; the following is now valid.
-
- %struct_4 = type { i32 }
-
-define void @test() {
- store %struct_4 zeroinitializer, %struct_4* null
- unreachable
-}
diff --git a/libclamav/c++/llvm/test/Verifier/2006-10-15-AddrLabel.ll b/libclamav/c++/llvm/test/Verifier/2006-10-15-AddrLabel.ll
deleted file mode 100644
index 0b73b47..0000000
--- a/libclamav/c++/llvm/test/Verifier/2006-10-15-AddrLabel.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: not llvm-as < %s > /dev/null |& grep {basic block pointers are invalid}
-
-define i32 @main() {
- %foo = call i8* %llvm.stacksave()
- %foop = bitcast i8* %foo to label*
- %nret = load label* %foop
- br label %nret
-}
diff --git a/libclamav/c++/llvm/test/Verifier/2006-12-12-IntrinsicDefine.ll b/libclamav/c++/llvm/test/Verifier/2006-12-12-IntrinsicDefine.ll
deleted file mode 100644
index b63ae65..0000000
--- a/libclamav/c++/llvm/test/Verifier/2006-12-12-IntrinsicDefine.ll
+++ /dev/null
@@ -1,7 +0,0 @@
-; RUN: not llvm-as < %s |& grep {llvm intrinsics cannot be defined}
-; PR1047
-
-define void @llvm.memcpy.i32(i8*, i8*, i32, i32) {
-entry:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Verifier/2007-12-21-InvokeParamAttrs.ll b/libclamav/c++/llvm/test/Verifier/2007-12-21-InvokeParamAttrs.ll
deleted file mode 100644
index 709b47b..0000000
--- a/libclamav/c++/llvm/test/Verifier/2007-12-21-InvokeParamAttrs.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
-declare void @foo(i8*)
-
-define void @bar() {
- invoke void @foo(i8* signext null)
- to label %r unwind label %r
-r:
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Verifier/2008-01-11-VarargAttrs.ll b/libclamav/c++/llvm/test/Verifier/2008-01-11-VarargAttrs.ll
deleted file mode 100644
index b6ce625..0000000
--- a/libclamav/c++/llvm/test/Verifier/2008-01-11-VarargAttrs.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
- %struct = type { }
-
-declare void @foo(...)
-
-define void @bar() {
- call void (...)* @foo(%struct* sret null )
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Verifier/2008-03-01-AllocaSized.ll b/libclamav/c++/llvm/test/Verifier/2008-03-01-AllocaSized.ll
deleted file mode 100644
index 079a75d..0000000
--- a/libclamav/c++/llvm/test/Verifier/2008-03-01-AllocaSized.ll
+++ /dev/null
@@ -1,8 +0,0 @@
-; RUN: not llvm-as %s -o /dev/null |& grep {Cannot allocate unsized type}
-; PR2113
-
-define void @test() {
- %A = alloca void()
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/Verifier/2008-08-22-MemCpyAlignment.ll b/libclamav/c++/llvm/test/Verifier/2008-08-22-MemCpyAlignment.ll
deleted file mode 100644
index aaf69ae..0000000
--- a/libclamav/c++/llvm/test/Verifier/2008-08-22-MemCpyAlignment.ll
+++ /dev/null
@@ -1,11 +0,0 @@
-; RUN: not llvm-as %s -o /dev/null |& grep {alignment argument of memory intrinsics must be a constant int}
-; PR2318
-
-define void @x(i8* %a, i8* %src, i64 %len, i32 %align) nounwind {
-entry:
- tail call void @llvm.memcpy.i64( i8* %a, i8* %src, i64 %len, i32 %align) nounwind
- ret void
-}
-
-declare void @llvm.memcpy.i64( i8* %a, i8* %src, i64 %len, i32)
-
diff --git a/libclamav/c++/llvm/test/Verifier/2008-11-15-RetVoid.ll b/libclamav/c++/llvm/test/Verifier/2008-11-15-RetVoid.ll
deleted file mode 100644
index dbdcae2..0000000
--- a/libclamav/c++/llvm/test/Verifier/2008-11-15-RetVoid.ll
+++ /dev/null
@@ -1,5 +0,0 @@
-; RUN: not llvm-as < %s |& grep {returns non-void in Function of void return}
-
-define void @foo() {
- ret i32 0
-}
diff --git a/libclamav/c++/llvm/test/Verifier/2009-05-29-InvokeResult1.ll b/libclamav/c++/llvm/test/Verifier/2009-05-29-InvokeResult1.ll
deleted file mode 100644
index bb815b3..0000000
--- a/libclamav/c++/llvm/test/Verifier/2009-05-29-InvokeResult1.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
-declare i32 @v()
-
-define i32 @f() {
-e:
- %r = invoke i32 @v()
- to label %c unwind label %u ; <i32> [#uses=2]
-
-c: ; preds = %e
- ret i32 %r
-
-u: ; preds = %e
- ret i32 %r
-}
diff --git a/libclamav/c++/llvm/test/Verifier/2009-05-29-InvokeResult2.ll b/libclamav/c++/llvm/test/Verifier/2009-05-29-InvokeResult2.ll
deleted file mode 100644
index 900b1d8..0000000
--- a/libclamav/c++/llvm/test/Verifier/2009-05-29-InvokeResult2.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
-declare i32 @v()
-
-define i32 @g() {
-e:
- %s = invoke i32 @v()
- to label %c unwind label %u ; <i32> [#uses=2]
-
-c: ; preds = %e
- ret i32 %s
-
-u: ; preds = %e
- %t = phi i32 [ %s, %e ] ; <i32> [#uses=1]
- ret i32 %t
-}
diff --git a/libclamav/c++/llvm/test/Verifier/2009-05-29-InvokeResult3.ll b/libclamav/c++/llvm/test/Verifier/2009-05-29-InvokeResult3.ll
deleted file mode 100644
index 050de46..0000000
--- a/libclamav/c++/llvm/test/Verifier/2009-05-29-InvokeResult3.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-
-declare i32 @v()
-
-define i32 @h() {
-e:
- %s = invoke i32 @v()
- to label %c unwind label %u ; <i32> [#uses=2]
-
-c: ; preds = %e
- br label %d
-
-d: ; preds = %u, %c
- %p = phi i32 [ %s, %c ], [ %s, %u ] ; <i32> [#uses=1]
- ret i32 %p
-
-u: ; preds = %e
- br label %d
-}
diff --git a/libclamav/c++/llvm/test/Verifier/AmbiguousPhi.ll b/libclamav/c++/llvm/test/Verifier/AmbiguousPhi.ll
deleted file mode 100644
index 9a72530..0000000
--- a/libclamav/c++/llvm/test/Verifier/AmbiguousPhi.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llvm-as < %s |& grep {multiple entries for the same basic block}
-
-
-
-define i32 @test(i32 %i, i32 %j, i1 %c) {
- br i1 %c, label %A, label %A
-A:
- %a = phi i32 [%i, %0], [%j, %0] ; Error, different values from same block!
- ret i32 %a
-}
diff --git a/libclamav/c++/llvm/test/Verifier/PhiGrouping.ll b/libclamav/c++/llvm/test/Verifier/PhiGrouping.ll
deleted file mode 100644
index dc529dc..0000000
--- a/libclamav/c++/llvm/test/Verifier/PhiGrouping.ll
+++ /dev/null
@@ -1,17 +0,0 @@
-; RUN: not llvm-as < %s |& grep {PHI nodes not grouped at top}
-
-
-
-define i32 @test(i32 %i, i32 %j, i1 %c) {
- br i1 %c, label %A, label %B
-A:
- br label %C
-B:
- br label %C
-
-C:
- %a = phi i32 [%i, %A], [%j, %B]
- %x = add i32 %a, 0 ; Error, PHI's should be grouped!
- %b = phi i32 [%i, %A], [%j, %B]
- ret i32 %x
-}
diff --git a/libclamav/c++/llvm/test/Verifier/README.txt b/libclamav/c++/llvm/test/Verifier/README.txt
deleted file mode 100644
index c041521..0000000
--- a/libclamav/c++/llvm/test/Verifier/README.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-This directory contains testcases that the verifier is supposed to detect as
-malformed LLVM code. Testcases for situations that the verifier incorrectly
-identifies as malformed should go in the test/Assembler directory.
diff --git a/libclamav/c++/llvm/test/Verifier/SelfReferential.ll b/libclamav/c++/llvm/test/Verifier/SelfReferential.ll
deleted file mode 100644
index 70154b7..0000000
--- a/libclamav/c++/llvm/test/Verifier/SelfReferential.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: not llvm-as %s -o /dev/null |& grep {Only PHI nodes may reference their own value}
-
-; Test that self referential instructions are not allowed
-
-define void @test() {
- %A = add i32 %A, 0 ; <i32> [#uses=1]
- ret void
-}
-
diff --git a/libclamav/c++/llvm/test/Verifier/aliasing-chain.ll b/libclamav/c++/llvm/test/Verifier/aliasing-chain.ll
deleted file mode 100644
index fc5ef1c..0000000
--- a/libclamav/c++/llvm/test/Verifier/aliasing-chain.ll
+++ /dev/null
@@ -1,6 +0,0 @@
-; RUN: not llvm-as %s -o /dev/null |& grep {Aliasing chain should end with function or global variable}
-
-; Test that alising chain does not create a cycle
-
- at b1 = alias i32* @c1
- at c1 = alias i32* @b1
diff --git a/libclamav/c++/llvm/test/Verifier/byval-1.ll b/libclamav/c++/llvm/test/Verifier/byval-1.ll
deleted file mode 100644
index 9bbead0..0000000
--- a/libclamav/c++/llvm/test/Verifier/byval-1.ll
+++ /dev/null
@@ -1,2 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-declare void @h(i32 byval %num)
diff --git a/libclamav/c++/llvm/test/Verifier/byval-2.ll b/libclamav/c++/llvm/test/Verifier/byval-2.ll
deleted file mode 100644
index 1d03715..0000000
--- a/libclamav/c++/llvm/test/Verifier/byval-2.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-; PR2711
- %s = type opaque
-declare void @h(%s* byval %num)
diff --git a/libclamav/c++/llvm/test/Verifier/byval-4.ll b/libclamav/c++/llvm/test/Verifier/byval-4.ll
deleted file mode 100644
index b6f9c67..0000000
--- a/libclamav/c++/llvm/test/Verifier/byval-4.ll
+++ /dev/null
@@ -1,4 +0,0 @@
-; RUN: llvm-as %s -o /dev/null
-%struct.foo = type { i64 }
-
-declare void @h(%struct.foo* byval %num)
diff --git a/libclamav/c++/llvm/test/Verifier/dg.exp b/libclamav/c++/llvm/test/Verifier/dg.exp
deleted file mode 100644
index f200589..0000000
--- a/libclamav/c++/llvm/test/Verifier/dg.exp
+++ /dev/null
@@ -1,3 +0,0 @@
-load_lib llvm.exp
-
-RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
diff --git a/libclamav/c++/llvm/test/Verifier/gcread-ptrptr.ll b/libclamav/c++/llvm/test/Verifier/gcread-ptrptr.ll
deleted file mode 100644
index 4ed22fa..0000000
--- a/libclamav/c++/llvm/test/Verifier/gcread-ptrptr.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-; PR1633
-
-%meta = type { i8* }
-%obj = type { %meta* }
-
-declare %obj* @llvm.gcread(%obj*, %obj*)
-
-define %obj* @f() {
-entry:
- %x = call %obj* @llvm.gcread(%obj* null, %obj* null)
- ret %obj* %x
-}
diff --git a/libclamav/c++/llvm/test/Verifier/gcroot-alloca.ll b/libclamav/c++/llvm/test/Verifier/gcroot-alloca.ll
deleted file mode 100644
index 8caa4b9..0000000
--- a/libclamav/c++/llvm/test/Verifier/gcroot-alloca.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-; PR1633
-
-%meta = type { i8* }
-%obj = type { %meta* }
-
-declare void @llvm.gcroot(%obj**, %meta*)
-
-define void @f() {
-entry:
- call void @llvm.gcroot(%obj** null, %meta* null)
-
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Verifier/gcroot-meta.ll b/libclamav/c++/llvm/test/Verifier/gcroot-meta.ll
deleted file mode 100644
index 1836f61..0000000
--- a/libclamav/c++/llvm/test/Verifier/gcroot-meta.ll
+++ /dev/null
@@ -1,16 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-; PR1633
-
-%meta = type { i8* }
-%obj = type { %meta* }
-
-declare void @llvm.gcroot(%obj**, %meta*)
-
-define void @f() {
-entry:
- %local.obj = alloca %obj*
- %local.meta = alloca %meta
- call void @llvm.gcroot(%obj** %local.obj, %meta* %local.meta)
-
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Verifier/gcroot-ptrptr.ll b/libclamav/c++/llvm/test/Verifier/gcroot-ptrptr.ll
deleted file mode 100644
index b573295..0000000
--- a/libclamav/c++/llvm/test/Verifier/gcroot-ptrptr.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-; PR1633
-
-%meta = type { i8* }
-%obj = type { %meta* }
-
-declare void @llvm.gcroot(%obj*, %meta*)
-
-define void @f() {
-entry:
- %local.obj = alloca %obj
- call void @llvm.gcroot(%obj* %local.obj, %meta* null)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Verifier/gcwrite-ptrptr.ll b/libclamav/c++/llvm/test/Verifier/gcwrite-ptrptr.ll
deleted file mode 100644
index 1f60bec..0000000
--- a/libclamav/c++/llvm/test/Verifier/gcwrite-ptrptr.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: not llvm-as < %s >& /dev/null
-; PR1633
-
-%meta = type { i8* }
-%obj = type { %meta* }
-
-declare void @llvm.gcwrite(%obj*, %obj*, %obj*)
-
-define void @f() {
-entry:
- call void @llvm.gcwrite(%obj* null, %obj* null, %obj* null)
- ret void
-}
diff --git a/libclamav/c++/llvm/test/Verifier/invoke-1.ll b/libclamav/c++/llvm/test/Verifier/invoke-1.ll
deleted file mode 100644
index 427abe0..0000000
--- a/libclamav/c++/llvm/test/Verifier/invoke-1.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llvm-as < %s |& grep {not verify as correct}
-; PR1042
-
-define i32 @foo() {
- %A = invoke i32 @foo( )
- to label %L unwind label %L ; <i32> [#uses=1]
-L: ; preds = %0, %0
- ret i32 %A
-}
-
diff --git a/libclamav/c++/llvm/test/Verifier/invoke-2.ll b/libclamav/c++/llvm/test/Verifier/invoke-2.ll
deleted file mode 100644
index 0145935..0000000
--- a/libclamav/c++/llvm/test/Verifier/invoke-2.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: not llvm-as %s |& grep {not verify as correct}
-; PR1042
-
-define i32 @foo() {
- br i1 false, label %L1, label %L2
-L1: ; preds = %0
- %A = invoke i32 @foo( )
- to label %L unwind label %L ; <i32> [#uses=1]
-L2: ; preds = %0
- br label %L
-L: ; preds = %L2, %L1, %L1
- ret i32 %A
-}
-
diff --git a/libclamav/c++/llvm/test/lib/llvm.exp b/libclamav/c++/llvm/test/lib/llvm.exp
deleted file mode 100644
index 19a2729..0000000
--- a/libclamav/c++/llvm/test/lib/llvm.exp
+++ /dev/null
@@ -1,313 +0,0 @@
-# This procedure executes one line of a test case's execution script.
-proc execOneLine { test PRS outcome lineno line } {
- set status 0
- set resultmsg ""
- set retval [ catch { eval exec -keepnewline -- $line } errmsg ]
- if { $retval != 0 } {
- set code [lindex $::errorCode 0]
- set lineno [expr $lineno + 1]
- if { $PRS != ""} {
- set PRS " for $PRS"
- }
- set errmsg " at line $lineno\nwhile running: $line\n$errmsg"
- switch "$code" {
- CHILDSTATUS {
- set status [lindex $::errorCode 2]
- if { $status != 0 } {
- set resultmsg "$test$PRS\nFailed with exit($status)$errmsg"
- }
- }
- CHILDKILLED {
- set signal [lindex $::errorCode 2]
- set resultmsg "$test$PRS\nFailed with signal($signal)$errmsg"
- }
- CHILDSUSP {
- set signal [lindex $::errorCode 2]
- set resultmsg "$test$PRS\nFailed with suspend($signal)$errmsg"
- }
- POSIX {
- set posixNum [lindex $::errorCode 1]
- set posixMsg [lindex $::errorCode 2]
- set resultmsg "$test$PRS\nFailed with posix($posixNum,$posixMsg)$errmsg"
- }
- NONE {
- # Any other error such as stderr output of a program, or syntax error in
- # the RUN line.
- set resultmsg "$test$PRS\nFailed with unknown error (or has stderr output)$errmsg"
- }
- default {
- set resultmsg "$test$PRS\nFailed with unknown error$errmsg"
- }
- }
- }
- return $resultmsg
-}
-
-# This procedure performs variable substitutions on the RUN: lines of a test
-# cases.
-proc substitute { line test tmpFile } {
- global srcroot objroot srcdir objdir subdir target_triplet
- global llvmgcc llvmgxx ocamlopt
- global gccpath gxxpath compile_c compile_cxx link shlibext llvmlibsdir
- global llvmdsymutil valgrind grep gas bugpoint_topts
- set path [file join $srcdir $subdir]
-
- # Substitute all Tcl variables.
- set new_line [subst $line ]
-
- #replace %% with _#MARKER#_ to make the replacement of %% more predictable
- regsub -all {%%} $new_line {_#MARKER#_} new_line
- #replace %llvmgcc_only with actual path to llvmgcc
- regsub -all {%llvmgcc_only} $new_line "$llvmgcc" new_line
- #replace %llvmgcc with actual path to llvmgcc
- regsub -all {%llvmgcc} $new_line "$llvmgcc -emit-llvm -w" new_line
- #replace %llvmgxx with actual path to llvmg++
- regsub -all {%llvmgxx} $new_line "$llvmgxx -emit-llvm -w" new_line
- #replace %compile_cxx with C++ compilation command
- regsub -all {%compile_cxx} $new_line "$compile_cxx" new_line
- #replace %compile_c with C compilation command
- regsub -all {%compile_c} $new_line "$compile_c" new_line
- #replace %link with C++ link command
- regsub -all {%link} $new_line "$link" new_line
- #replace %shlibext with shared library extension
- regsub -all {%shlibext} $new_line "$shlibext" new_line
- #replace %ocamlopt with ocaml compiler command
- regsub -all {%ocamlopt} $new_line "$ocamlopt" new_line
- #replace %llvmdsymutil with dsymutil command
- regsub -all {%llvmdsymutil} $new_line "$llvmdsymutil" new_line
- #replace %llvmlibsdir with configure library directory
- regsub -all {%llvmlibsdir} $new_line "$llvmlibsdir" new_line
- #replace %bugpoint_topts with actual bugpoint target options
- regsub -all {%bugpoint_topts} $new_line "$bugpoint_topts" new_line
- #replace %p with path to source,
- regsub -all {%p} $new_line [file join $srcdir $subdir] new_line
- #replace %s with filename
- regsub -all {%s} $new_line $test new_line
- #replace %t with temp filenames
- regsub -all {%t} $new_line $tmpFile new_line
- #replace %abs_tmp with absolute temp filenames
- regsub -all {%abs_tmp} $new_line [file join [pwd] $tmpFile] new_line
- #replace _#MARKER#_ with %
- regsub -all {_#MARKER#_} $new_line % new_line
-
- #replace grep with GNU grep
- regsub -all { grep } $new_line " $grep " new_line
- #replace as with GNU as
- regsub -all {\| as } $new_line "| $gas " new_line
-
- #valgind related stuff
-# regsub -all {bugpoint } $new_line "$valgrind bugpoint " new_line
- regsub -all {llc } $new_line "$valgrind llc " new_line
- regsub -all {lli } $new_line "$valgrind lli " new_line
- regsub -all {llvm-ar } $new_line "$valgrind llvm-ar " new_line
- regsub -all {llvm-as } $new_line "$valgrind llvm-as " new_line
- regsub -all {llvm-bcanalyzer } $new_line "$valgrind llvm-bcanalyzer " new_line
- regsub -all {llvm-dis } $new_line "$valgrind llvm-dis " new_line
- regsub -all {llvm-extract } $new_line "$valgrind llvm-extract " new_line
- regsub -all {llvm-ld } $new_line "$valgrind llvm-ld " new_line
- regsub -all {llvm-link } $new_line "$valgrind llvm-link " new_line
- regsub -all {llvm-nm } $new_line "$valgrind llvm-nm " new_line
- regsub -all {llvm-prof } $new_line "$valgrind llvm-prof " new_line
- regsub -all {llvm-ranlib } $new_line "$valgrind llvm-ranlib " new_line
- regsub -all {([^a-zA-Z_-])opt } $new_line "\\1$valgrind opt " new_line
- regsub -all {^opt } $new_line "$valgrind opt " new_line
- regsub -all {tblgen } $new_line "$valgrind tblgen " new_line
- regsub -all "not $valgrind " $new_line "$valgrind not " new_line
-
- return $new_line
-}
-
-# This procedure runs the set of tests for the test_source_files array.
-proc RunLLVMTests { test_source_files } {
- global srcroot objroot srcdir objdir subdir target_triplet
- set timeout 60
-
- set path [file join $objdir $subdir]
-
- #Make Output Directory if it does not exist already
- if { [file exists path] } {
- cd $path
- } else {
- file mkdir $path
- cd $path
- }
-
- file mkdir Output
- cd Output
-
- foreach test $test_source_files {
- #Should figure out best way to set the timeout
- #set timeout 40
-
- set filename [file tail $test]
- verbose "ABOUT TO RUN: $filename" 2
- set outcome PASS
- set tmpFile "$filename.tmp"
-
- # Mark that it should not be XFAIL for this target.
- set targetPASS 0
-
- #set hasRunline bool to check if testcase has a runline
- set numLines 0
-
- # Open the test file and start reading lines
- set testFileId [ open $test r]
- set runline ""
- set PRNUMS ""
- foreach line [split [read $testFileId] \n] {
-
- # if its the END. line then stop parsing (optimization for big files)
- if {[regexp {END.[[:space:]]*$} $line match endofscript]} {
- break
-
- # if the line is continued, concatenate and continue the loop
- } elseif {[regexp {RUN: *(.+)(\\)$} $line match oneline suffix]} {
- set runline "$runline$oneline "
-
- # if its a terminating RUN: line then do substitution on the whole line
- # and then save the line.
- } elseif {[regexp {RUN: *(.+)$} $line match oneline suffix]} {
- set runline "$runline$oneline"
- set runline [ substitute $runline $test $tmpFile ]
- set lines($numLines) $runline
- set numLines [expr $numLines + 1]
- set runline ""
-
- # if its an PR line, save the problem report number
- } elseif {[regexp {PR([0-9]+)} $line match prnum]} {
- if {$PRNUMS == ""} {
- set PRNUMS "PR$prnum"
- } else {
- set PRNUMS "$PRNUMS,$prnum"
- }
- # if its an XFAIL line, see if we should be XFAILing or not.
- } elseif {[regexp {XFAIL:[ *](.+)} $line match targets]} {
- set targets
-
- #split up target if more then 1 specified
- foreach target [split $targets ,] {
- if { $target == "*" } {
- if {$targetPASS != 1} {
- set outcome XFAIL
- }
- } elseif { [regexp $target $target_triplet match] } {
- if {$targetPASS != 1} {
- set outcome XFAIL
- }
- }
- }
- } elseif {[regexp {XTARGET:[ *](.+)} $line match targets]} {
- set targets
-
- #split up target if more then 1 specified
- foreach target [split $targets ,] {
- if { [regexp {\*} $target match] } {
- set targetPASS 1
- set outcome PASS
- } elseif { [regexp $target $target_triplet match] } {
- set targetPASS 1
- set outcome PASS
- }
- }
- }
- }
-
- # Done reading the script
- close $testFileId
-
-
- if { $numLines == 0 } {
- fail "$test: \nDoes not have a RUN line\n"
- } else {
- set failed 0
- for { set i 0 } { $i < $numLines } { set i [ expr $i + 1 ] } {
- regsub ^.*RUN:(.*) $lines($i) \1 theLine
- set resultmsg [execOneLine $test $PRNUMS $outcome $i $theLine ]
- if { $resultmsg != "" } {
- if { $outcome == "XFAIL" } {
- xfail "$resultmsg"
- } else {
- fail "$resultmsg"
- }
- set failed 1
- break
- }
- }
- if { $failed } {
- continue
- } else {
- if { $PRNUMS != "" } {
- set PRNUMS " for $PRNUMS"
- }
- if { $outcome == "XFAIL" } {
- xpass "$test$PRNUMS"
- } else {
- pass "$test$PRNUMS"
- }
- }
- }
- }
-}
-
-# This procedure provides an interface to check the LLVMGCC_LANGS makefile
-# variable to see if llvm-gcc supports compilation of a particular language.
-proc llvm_gcc_supports { lang } {
- global llvmgcc llvmgcc_langs
- # validate the language choices and determine the name of the compiler
- # component responsible for determining if the compiler has been built.
- switch "$lang" {
- ada { set file gnat1 }
- c { set file cc1 }
- c++ { set file cc1plus }
- objc { set file cc1obj }
- obj-c++ { set file cc1objplus }
- fortran { set file f951 }
- default { return 0 }
- }
- foreach supported_lang [split "$llvmgcc_langs" ,] {
- if { "$lang" == "$supported_lang" } {
- # FIXME: Knowing it is configured is not enough. We should do two more
- # checks here. First, we need to run llvm-gcc -print-prog-name=$file to
- # get the path to the compiler. If we don't get a path, the language isn't
- # properly configured or built. If we do get a path, we should check to
- # make sure that it is executable and perhaps even try executing it.
- return 1;
- }
- }
- return 0;
-}
-
-# This procedure provides an interface to check the TARGETS_TO_BUILD makefile
-# variable to see if a particular target has been configured to build. This
-# helps avoid running tests for targets that aren't available.
-proc llvm_supports_target { tgtName } {
- global TARGETS_TO_BUILD
- foreach target [split $TARGETS_TO_BUILD] {
- if { [regexp $tgtName $target match] } {
- return 1
- }
- }
- return 0
-}
-
-proc llvm_supports_darwin_and_target { tgtName } {
- global target_triplet
- if { [ llvm_supports_target $tgtName ] } {
- if { [regexp darwin $target_triplet match] } {
- return 1
- }
- }
- return 0
-}
-
-# This procedure provides an interface to check the BINDINGS_TO_BUILD makefile
-# variable to see if a particular binding has been configured to build.
-proc llvm_supports_binding { name } {
- global llvm_bindings
- foreach item [split $llvm_bindings] {
- if { [regexp $name $item match] } {
- return 1
- }
- }
- return 0
-}
diff --git a/libclamav/c++/llvm/test/lib/llvm2cpp.exp b/libclamav/c++/llvm/test/lib/llvm2cpp.exp
deleted file mode 100644
index f453033..0000000
--- a/libclamav/c++/llvm/test/lib/llvm2cpp.exp
+++ /dev/null
@@ -1,100 +0,0 @@
-# This file defines a tcl proc to assist with testing the llvm2cpp. There are
-# no llvm2cpp specific test cases. Instead, it utilizes all the existing test
-# cases and makes sure llvm2cpp can run them. The basic idea is that we find
-# all the LLVM Assembly (*.ll) files, run llvm2cpp on them to generate a C++
-# program, compile those programs, run them and see if what they produce matches
-# the original input to llvm2cpp.
-
-proc llvm2cpp-test { files } {
- global subdir llvmtoolsdir llvmlibsdir objdir srcdir objroot srcroot
- set timeout 30
- set path [file join $objdir $subdir]
- set llc [file join $llvmtoolsdir llc ]
- set llvmas [file join $llvmtoolsdir llvm-as ]
- set llvmdis [file join $llvmtoolsdir llvm-dis ]
-
- #Make Output Directory if it does not exist already
- if { [file exists path] } {
- cd $path
- } else {
- file mkdir $path
- cd $path
- }
-
- file mkdir Output
-
- foreach test $files {
-
- set filename [file tail $test]
- set generated [file join Output $filename.cpp]
- set executable [file join Output $filename.exe]
- set output [file join Output $filename.gen]
- set assembly [file join Output $filename.asm]
- set testname [file rootname $filename]
- set bytecode [file join Output $filename.bc]
-
- # Note that the stderr for llvm-as, etc. must be redirected to /dev/null
- # because otherwise exec will see the msgs and return 1 even though they
- # are only warnings. If real errors are generated on stderr then llvm-as
- # will return a non-zero retval anyway so we're good.
-
- # Scan the test file to see if there's an XFAIL file. If so, don't run it
- set retval [ catch {
- exec -keepnewline grep XFAIL $test 2>/dev/null } msg ]
- if { $retval == 0 } {
- continue;
- }
-
- # Run llvm-as/llvm-dis
- set pipeline llvm-as|llvm-dis
- set retval [ catch {
- exec -keepnewline $llvmas < $test -o - | $llvmdis -o $assembly 2>/dev/null } msg ]
-
- if { $retval != 0 } {
- fail "$test: $pipeline returned $retval\n$msg"
- continue
- }
-
- # Build bytecode for llvm2cpp input
- set retval [ catch {
- exec -keepnewline $llvmas < $assembly > $bytecode 2>/dev/null } msg ]
-
- if { $retval != 0 } {
- fail "$test: llvm-as returned $retval\n$msg"
- continue
- }
-
- set retval [ catch {
- exec -keepnewline $llc -march=cpp -o $generated < $bytecode 2>/dev/null } msg]
-
- if { $retval != 0 } {
- fail "$test: llvm2cpp returned $retval\n$msg"
- continue
- }
-
- set retval [ catch {
- exec -keepnewline gcc -g -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -o $executable $generated -I$srcroot/include -I$objroot/include -L$llvmlibsdir -lLLVMCore -lLLVMSupport -lLLVMSystem -lstdc++ } msg ]
- if { $retval != 0 } {
- fail "$test: gcc returned $retval\n$msg"
- continue
- }
-
- set retval [ catch { exec -keepnewline $executable > $output } msg ]
- if { $retval != 0 } {
- set execname [file tail $executable]
- fail "$test: $execname returned $retval:\n$msg"
- continue
- }
-
- set retval [ catch {
- exec -keepnewline diff $assembly $output } msg ]
-
- if { $retval != 0 } {
- fail "$test: diff returned $retval:\n$msg"
- continue
- }
- pass "$test"
- }
-}
-
-
diff --git a/libclamav/c++/llvm/test/lit.cfg b/libclamav/c++/llvm/test/lit.cfg
deleted file mode 100644
index 0f49f4b..0000000
--- a/libclamav/c++/llvm/test/lit.cfg
+++ /dev/null
@@ -1,200 +0,0 @@
-# -*- Python -*-
-
-# Configuration file for the 'lit' test runner.
-
-import os
-
-# name: The name of this test suite.
-config.name = 'LLVM'
-
-# testFormat: The test format to use to interpret tests.
-config.test_format = lit.formats.TclTest()
-
-# suffixes: A list of file extensions to treat as test files, this is actually
-# set by on_clone().
-config.suffixes = []
-
-# test_source_root: The root path where tests are located.
-config.test_source_root = os.path.dirname(__file__)
-
-# test_exec_root: The root path where tests should be run.
-llvm_obj_root = getattr(config, 'llvm_obj_root', None)
-if llvm_obj_root is not None:
- config.test_exec_root = os.path.join(llvm_obj_root, 'test')
-
-# Tweak the PATH to include the scripts dir, the tools dir, and the llvm-gcc bin
-# dir (if available).
-if llvm_obj_root is not None:
- llvm_src_root = getattr(config, 'llvm_src_root', None)
- if not llvm_src_root:
- lit.fatal('No LLVM source root set!')
- path = os.path.pathsep.join((os.path.join(llvm_src_root, 'test',
- 'Scripts'),
- config.environment['PATH']))
- config.environment['PATH'] = path
-
- llvm_tools_dir = getattr(config, 'llvm_tools_dir', None)
- if not llvm_tools_dir:
- lit.fatal('No LLVM tools dir set!')
- path = os.path.pathsep.join((llvm_tools_dir, config.environment['PATH']))
- config.environment['PATH'] = path
-
- llvmgcc_dir = getattr(config, 'llvmgcc_dir', None)
- if llvmgcc_dir:
- path = os.path.pathsep.join((os.path.join(llvmgcc_dir, 'bin'),
- config.environment['PATH']))
- config.environment['PATH'] = path
-
-# Propogate 'HOME' through the environment.
-config.environment['HOME'] = os.environ['HOME']
-
-###
-
-import os
-
-# Check that the object root is known.
-if config.test_exec_root is None:
- # Otherwise, we haven't loaded the site specific configuration (the user is
- # probably trying to run on a test file directly, and either the site
- # configuration hasn't been created by the build system, or we are in an
- # out-of-tree build situation).
-
- # Check for 'llvm_site_config' user parameter, and use that if available.
- site_cfg = lit.params.get('llvm_site_config', None)
- if site_cfg and os.path.exists(site_cfg):
- lit.load_config(config, site_cfg)
- raise SystemExit
-
- # Try to detect the situation where we are using an out-of-tree build by
- # looking for 'llvm-config'.
- #
- # FIXME: I debated (i.e., wrote and threw away) adding logic to
- # automagically generate the lit.site.cfg if we are in some kind of fresh
- # build situation. This means knowing how to invoke the build system
- # though, and I decided it was too much magic.
-
- llvm_config = lit.util.which('llvm-config', config.environment['PATH'])
- if not llvm_config:
- lit.fatal('No site specific configuration available!')
-
- # Get the source and object roots.
- llvm_src_root = lit.util.capture(['llvm-config', '--src-root']).strip()
- llvm_obj_root = lit.util.capture(['llvm-config', '--obj-root']).strip()
-
- # Validate that we got a tree which points to here.
- this_src_root = os.path.dirname(config.test_source_root)
- if os.path.realpath(llvm_src_root) != os.path.realpath(this_src_root):
- lit.fatal('No site specific configuration available!')
-
- # Check that the site specific configuration exists.
- site_cfg = os.path.join(llvm_obj_root, 'test', 'lit.site.cfg')
- if not os.path.exists(site_cfg):
- lit.fatal('No site specific configuration available!')
-
- # Okay, that worked. Notify the user of the automagic, and reconfigure.
- lit.note('using out-of-tree build at %r' % llvm_obj_root)
- lit.load_config(config, site_cfg)
- raise SystemExit
-
-###
-
-# Load site data from DejaGNU's site.exp.
-import re
-site_exp = {}
-# FIXME: Implement lit.site.cfg.
-for line in open(os.path.join(config.llvm_obj_root, 'test', 'site.exp')):
- m = re.match('set ([^ ]+) "([^"]*)"', line)
- if m:
- site_exp[m.group(1)] = m.group(2)
-
-# Add substitutions.
-config.substitutions.append(('%llvmgcc_only', site_exp['llvmgcc']))
-config.substitutions.append(('grep ', site_exp['grep']+" "))
-for sub in ['llvmgcc', 'llvmgxx', 'compile_cxx', 'compile_c',
- 'link', 'shlibext', 'ocamlopt', 'llvmdsymutil', 'llvmlibsdir',
- 'bugpoint_topts']:
- if sub in ('llvmgcc', 'llvmgxx'):
- config.substitutions.append(('%' + sub,
- site_exp[sub] + ' -emit-llvm -w'))
- # FIXME: This is a hack to avoid LLVMC tests failing due to a clang driver
- # warning when passing in "-fexceptions -fno-exceptions".
- elif sub == 'compile_cxx':
- config.substitutions.append(('%' + sub,
- site_exp[sub].replace('-fno-exceptions', '')))
- else:
- config.substitutions.append(('%' + sub, site_exp[sub]))
-
-excludes = []
-
-# Provide target_triple for use in XFAIL and XTARGET.
-config.target_triple = site_exp['target_triplet']
-
-# Provide llvm_supports_target for use in local configs.
-targets = set(site_exp["TARGETS_TO_BUILD"].split())
-def llvm_supports_target(name):
- return name in targets
-
-def llvm_supports_darwin_and_target(name):
- return 'darwin' in config.target_triple and llvm_supports_target(name)
-
-langs = set(site_exp['llvmgcc_langs'].split(','))
-def llvm_gcc_supports(name):
- return name in langs
-
-bindings = set(site_exp['llvm_bindings'].split(','))
-def llvm_supports_binding(name):
- return name in bindings
-
-config.conditions["TARGET"] = llvm_supports_target
-config.conditions["BINDING"] = llvm_supports_binding
-
-# Provide on_clone hook for reading 'dg.exp'.
-import os
-simpleLibData = re.compile(r"""load_lib llvm.exp
-
-RunLLVMTests \[lsort \[glob -nocomplain \$srcdir/\$subdir/\*\.(.*)\]\]""",
- re.MULTILINE)
-conditionalLibData = re.compile(r"""load_lib llvm.exp
-
-if.*\[ ?(llvm[^ ]*) ([^ ]*) ?\].*{
- *RunLLVMTests \[lsort \[glob -nocomplain \$srcdir/\$subdir/\*\.(.*)\]\]
-\}""", re.MULTILINE)
-def on_clone(parent, cfg, for_path):
- def addSuffixes(match):
- if match[0] == '{' and match[-1] == '}':
- cfg.suffixes = ['.' + s for s in match[1:-1].split(',')]
- else:
- cfg.suffixes = ['.' + match]
-
- libPath = os.path.join(os.path.dirname(for_path),
- 'dg.exp')
- if not os.path.exists(libPath):
- cfg.unsupported = True
- return
-
- # Reset unsupported, in case we inherited it.
- cfg.unsupported = False
- lib = open(libPath).read().strip()
-
- # Check for a simple library.
- m = simpleLibData.match(lib)
- if m:
- addSuffixes(m.group(1))
- return
-
- # Check for a conditional test set.
- m = conditionalLibData.match(lib)
- if m:
- funcname,arg,match = m.groups()
- addSuffixes(match)
-
- func = globals().get(funcname)
- if not func:
- lit.error('unsupported predicate %r' % funcname)
- elif not func(arg):
- cfg.unsupported = True
- return
- # Otherwise, give up.
- lit.error('unable to understand %r:\n%s' % (libPath, lib))
-
-config.on_clone = on_clone
diff --git a/libclamav/c++/llvm/test/lit.site.cfg.in b/libclamav/c++/llvm/test/lit.site.cfg.in
deleted file mode 100644
index 88699e3..0000000
--- a/libclamav/c++/llvm/test/lit.site.cfg.in
+++ /dev/null
@@ -1,9 +0,0 @@
-## Autogenerated by LLVM/Clang configuration.
-# Do not edit!
-config.llvm_src_root = "@LLVM_SOURCE_DIR@"
-config.llvm_obj_root = "@LLVM_BINARY_DIR@"
-config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
-config.llvmgcc_dir = "@LLVMGCCDIR@"
-
-# Let the main config do the real work.
-lit.load_config(config, "@LLVM_SOURCE_DIR@/test/lit.cfg")
diff --git a/libclamav/c++/llvm/test/site.exp.in b/libclamav/c++/llvm/test/site.exp.in
deleted file mode 100644
index c760c2c..0000000
--- a/libclamav/c++/llvm/test/site.exp.in
+++ /dev/null
@@ -1,26 +0,0 @@
-## Autogenerated by LLVM configuration.
-# Do not edit!
-set target_triplet "@TARGET_TRIPLE@"
-set TARGETS_TO_BUILD "@TARGETS_TO_BUILD@"
-set llvmgcc_langs "@LLVMGCC_LANGS@"
-set llvmtoolsdir "@LLVM_TOOLS_DIR@"
-set llvmlibsdir "@LLVM_LIBS_DIR@"
-set llvm_bindings "@LLVM_BINDINGS@"
-set srcroot "@LLVM_SOURCE_DIR@"
-set objroot "@LLVM_BINARY_DIR@"
-set srcdir "@LLVM_SOURCE_DIR@"
-set objdir "@LLVM_BINARY_DIR@"
-set gccpath "@GCCPATH@"
-set gxxpath "@GXXPATH@"
-set compile_c "@TEST_COMPILE_C_CMD@"
-set compile_cxx "@TEST_COMPILE_CXX_CMD@"
-set link "@TEST_LINK_CMD@"
-set llvmgcc "@LLVMGCC@"
-set llvmgxx "@LLVMGXX@"
-set bugpoint_topts "@BUGPOINT_TOPTS@"
-set shlibext "@SHLIBEXT@"
-set ocamlopt "@OCAMLOPT@"
-set valgrind "@VALGRIND@"
-set grep "@GREP@"
-set gas "@AS@"
-set llvmdsymutil "@DSYMUTIL@"
diff --git a/libclamav/c++/llvm/tools/CMakeLists.txt b/libclamav/c++/llvm/tools/CMakeLists.txt
deleted file mode 100644
index 55a2441..0000000
--- a/libclamav/c++/llvm/tools/CMakeLists.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-# NOTE: The tools are organized into five groups of four consisting of one
-# large and three small executables. This is done to minimize memory load
-# in parallel builds. Please retain this ordering.
-
-if( NOT WIN32 OR MSYS OR CYGWIN )
- # It is useful to build llvm-config before the other tools, so we
- # have a fresh LibDeps.txt for regenerating the hard-coded library
- # dependencies. llvm-config/CMakeLists.txt takes care of this but we
- # must keep llvm-config as the first entry on the list of tools to
- # be built.
- add_subdirectory(llvm-config)
-endif()
-
-add_subdirectory(llvm-as)
-add_subdirectory(llvm-dis)
-
-add_subdirectory(llc)
-
-add_subdirectory(lli)
-
-
-add_subdirectory(llvmc)
-
-if( EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/clang/CMakeLists.txt )
- add_subdirectory( ${CMAKE_CURRENT_SOURCE_DIR}/clang )
-endif( EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/clang/CMakeLists.txt )
-
-set(LLVM_COMMON_DEPENDS ${LLVM_COMMON_DEPENDS} PARENT_SCOPE)
diff --git a/libclamav/c++/llvm/tools/Makefile b/libclamav/c++/llvm/tools/Makefile
deleted file mode 100644
index a108881..0000000
--- a/libclamav/c++/llvm/tools/Makefile
+++ /dev/null
@@ -1,59 +0,0 @@
-##===- tools/Makefile --------------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL := ..
-
-# Build clang if present.
-OPTIONAL_PARALLEL_DIRS := clang
-
-# NOTE: The tools are organized into five groups of four consisting of one
-# large and three small executables. This is done to minimize memory load
-# in parallel builds. Please retain this ordering.
-DIRS := llvm-config
-PARALLEL_DIRS := opt llvm-as llvm-dis \
- llc llvm-ranlib llvm-ar llvm-nm \
- llvm-ld llvm-prof llvm-link \
- lli llvm-extract \
- bugpoint llvm-bcanalyzer llvm-stub \
- llvm-mc llvmc \
- edis
-
-
-# Let users override the set of tools to build from the command line.
-ifdef ONLY_TOOLS
- OPTIONAL_PARALLEL_DIRS :=
- PARALLEL_DIRS := $(ONLY_TOOLS)
-endif
-
-include $(LEVEL)/Makefile.config
-
-# These libraries build as dynamic libraries (.dylib /.so), they can only be
-# built if ENABLE_PIC is set.
-ifeq ($(ENABLE_PIC),1)
- # No support for dynamic libraries on windows targets.
- ifneq ($(TARGET_OS), $(filter $(TARGET_OS), Cygwin MingW))
- PARALLEL_DIRS += edis
-
- # gold only builds if binutils is around. It requires "lto" to build before
- # it so it is added to DIRS.
- ifdef BINUTILS_INCDIR
- PARALLEL_DIRS += gold
- DIRS += lto
- else
- PARALLEL_DIRS += lto
- endif
- endif
-endif
-
-# Only build edis if X86 target support is enabled.
-ifeq ($(filter $(TARGETS_TO_BUILD), X86),)
- PARALLEL_DIRS := $(filter-out edis, $(PARALLEL_DIRS))
-endif
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llc/CMakeLists.txt b/libclamav/c++/llvm/tools/llc/CMakeLists.txt
deleted file mode 100644
index 683f298..0000000
--- a/libclamav/c++/llvm/tools/llc/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS ${LLVM_TARGETS_TO_BUILD} bitreader asmparser)
-
-add_llvm_tool(llc
- llc.cpp
- )
diff --git a/libclamav/c++/llvm/tools/llc/Makefile b/libclamav/c++/llvm/tools/llc/Makefile
deleted file mode 100644
index 7319aad..0000000
--- a/libclamav/c++/llvm/tools/llc/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-#===- tools/llc/Makefile -----------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llc
-
-# Include this here so we can get the configuration of the targets
-# that have been configured for construction. We have to do this
-# early so we can set up LINK_COMPONENTS before including Makefile.rules
-include $(LEVEL)/Makefile.config
-
-LINK_COMPONENTS := $(TARGETS_TO_BUILD) bitreader asmparser
-
-include $(LLVM_SRC_ROOT)/Makefile.rules
-
diff --git a/libclamav/c++/llvm/tools/llc/llc.cpp b/libclamav/c++/llvm/tools/llc/llc.cpp
deleted file mode 100644
index 810ba42..0000000
--- a/libclamav/c++/llvm/tools/llc/llc.cpp
+++ /dev/null
@@ -1,389 +0,0 @@
-//===-- llc.cpp - Implement the LLVM Native Code Generator ----------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This is the llc code generator driver. It provides a convenient
-// command-line interface for generating native assembly-language code
-// or C code, given LLVM bitcode.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/PassManager.h"
-#include "llvm/Pass.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Support/IRReader.h"
-#include "llvm/CodeGen/LinkAllAsmWriterComponents.h"
-#include "llvm/CodeGen/LinkAllCodegenComponents.h"
-#include "llvm/Config/config.h"
-#include "llvm/LinkAllVMCore.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/FileUtilities.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/PluginLoader.h"
-#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/System/Host.h"
-#include "llvm/System/Signals.h"
-#include "llvm/Target/SubtargetFeature.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/Target/TargetSelect.h"
-#include "llvm/Transforms/Scalar.h"
-#include <memory>
-using namespace llvm;
-
-// General options for llc. Other pass-specific options are specified
-// within the corresponding llc passes, and target-specific options
-// and back-end code generation options are specified with the target machine.
-//
-static cl::opt<std::string>
-InputFilename(cl::Positional, cl::desc("<input bitcode>"), cl::init("-"));
-
-static cl::opt<std::string>
-OutputFilename("o", cl::desc("Output filename"), cl::value_desc("filename"));
-
-static cl::opt<bool>
-Force("f", cl::desc("Enable binary output on terminals"));
-
-// Determine optimization level.
-static cl::opt<char>
-OptLevel("O",
- cl::desc("Optimization level. [-O0, -O1, -O2, or -O3] "
- "(default = '-O2')"),
- cl::Prefix,
- cl::ZeroOrMore,
- cl::init(' '));
-
-static cl::opt<std::string>
-TargetTriple("mtriple", cl::desc("Override target triple for module"));
-
-static cl::opt<std::string>
-MArch("march", cl::desc("Architecture to generate code for (see --version)"));
-
-static cl::opt<std::string>
-MCPU("mcpu",
- cl::desc("Target a specific cpu type (-mcpu=help for details)"),
- cl::value_desc("cpu-name"),
- cl::init(""));
-
-static cl::list<std::string>
-MAttrs("mattr",
- cl::CommaSeparated,
- cl::desc("Target specific attributes (-mattr=help for details)"),
- cl::value_desc("a1,+a2,-a3,..."));
-
-cl::opt<TargetMachine::CodeGenFileType>
-FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
- cl::desc("Choose a file type (not all types are supported by all targets):"),
- cl::values(
- clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
- "Emit an assembly ('.s') file"),
- clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
- "Emit a native object ('.o') file [experimental]"),
- clEnumValN(TargetMachine::CGFT_Null, "null",
- "Emit nothing, for performance testing"),
- clEnumValEnd));
-
-cl::opt<bool> NoVerify("disable-verify", cl::Hidden,
- cl::desc("Do not verify input module"));
-
-
-static cl::opt<bool>
-DisableRedZone("disable-red-zone",
- cl::desc("Do not emit code that uses the red zone."),
- cl::init(false));
-
-static cl::opt<bool>
-NoImplicitFloats("no-implicit-float",
- cl::desc("Don't generate implicit floating point instructions (x86-only)"),
- cl::init(false));
-
-// GetFileNameRoot - Helper function to get the basename of a filename.
-static inline std::string
-GetFileNameRoot(const std::string &InputFilename) {
- std::string IFN = InputFilename;
- std::string outputFilename;
- int Len = IFN.length();
- if ((Len > 2) &&
- IFN[Len-3] == '.' &&
- ((IFN[Len-2] == 'b' && IFN[Len-1] == 'c') ||
- (IFN[Len-2] == 'l' && IFN[Len-1] == 'l'))) {
- outputFilename = std::string(IFN.begin(), IFN.end()-3); // s/.bc/.s/
- } else {
- outputFilename = IFN;
- }
- return outputFilename;
-}
-
-static formatted_raw_ostream *GetOutputStream(const char *TargetName,
- const char *ProgName) {
- if (OutputFilename != "") {
- if (OutputFilename == "-")
- return &fouts();
-
- // Make sure that the Out file gets unlinked from the disk if we get a
- // SIGINT
- sys::RemoveFileOnSignal(sys::Path(OutputFilename));
-
- std::string error;
- raw_fd_ostream *FDOut =
- new raw_fd_ostream(OutputFilename.c_str(), error,
- raw_fd_ostream::F_Binary);
- if (!error.empty()) {
- errs() << error << '\n';
- delete FDOut;
- return 0;
- }
- formatted_raw_ostream *Out =
- new formatted_raw_ostream(*FDOut, formatted_raw_ostream::DELETE_STREAM);
-
- return Out;
- }
-
- if (InputFilename == "-") {
- OutputFilename = "-";
- return &fouts();
- }
-
- OutputFilename = GetFileNameRoot(InputFilename);
-
- bool Binary = false;
- switch (FileType) {
- default: assert(0 && "Unknown file type");
- case TargetMachine::CGFT_AssemblyFile:
- if (TargetName[0] == 'c') {
- if (TargetName[1] == 0)
- OutputFilename += ".cbe.c";
- else if (TargetName[1] == 'p' && TargetName[2] == 'p')
- OutputFilename += ".cpp";
- else
- OutputFilename += ".s";
- } else
- OutputFilename += ".s";
- break;
- case TargetMachine::CGFT_ObjectFile:
- OutputFilename += ".o";
- Binary = true;
- break;
- case TargetMachine::CGFT_Null:
- OutputFilename += ".null";
- Binary = true;
- break;
- }
-
- // Make sure that the Out file gets unlinked from the disk if we get a
- // SIGINT
- sys::RemoveFileOnSignal(sys::Path(OutputFilename));
-
- std::string error;
- unsigned OpenFlags = 0;
- if (Binary) OpenFlags |= raw_fd_ostream::F_Binary;
- raw_fd_ostream *FDOut = new raw_fd_ostream(OutputFilename.c_str(), error,
- OpenFlags);
- if (!error.empty()) {
- errs() << error << '\n';
- delete FDOut;
- return 0;
- }
-
- formatted_raw_ostream *Out =
- new formatted_raw_ostream(*FDOut, formatted_raw_ostream::DELETE_STREAM);
-
- return Out;
-}
-
-// main - Entry point for the llc compiler.
-//
-int main(int argc, char **argv) {
- sys::PrintStackTraceOnErrorSignal();
- PrettyStackTraceProgram X(argc, argv);
-
- // Enable debug stream buffering.
- EnableDebugBuffering = true;
-
- LLVMContext &Context = getGlobalContext();
- llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
-
- // Initialize targets first, so that --version shows registered targets.
- InitializeAllTargets();
- InitializeAllAsmPrinters();
-
- cl::ParseCommandLineOptions(argc, argv, "llvm system compiler\n");
-
- // Load the module to be compiled...
- SMDiagnostic Err;
- std::auto_ptr<Module> M;
-
- M.reset(ParseIRFile(InputFilename, Err, Context));
- if (M.get() == 0) {
- Err.Print(argv[0], errs());
- return 1;
- }
- Module &mod = *M.get();
-
- // If we are supposed to override the target triple, do so now.
- if (!TargetTriple.empty())
- mod.setTargetTriple(TargetTriple);
-
- Triple TheTriple(mod.getTargetTriple());
- if (TheTriple.getTriple().empty())
- TheTriple.setTriple(sys::getHostTriple());
-
- // Allocate target machine. First, check whether the user has explicitly
- // specified an architecture to compile for. If so we have to look it up by
- // name, because it might be a backend that has no mapping to a target triple.
- const Target *TheTarget = 0;
- if (!MArch.empty()) {
- for (TargetRegistry::iterator it = TargetRegistry::begin(),
- ie = TargetRegistry::end(); it != ie; ++it) {
- if (MArch == it->getName()) {
- TheTarget = &*it;
- break;
- }
- }
-
- if (!TheTarget) {
- errs() << argv[0] << ": error: invalid target '" << MArch << "'.\n";
- return 1;
- }
-
- // Adjust the triple to match (if known), otherwise stick with the
- // module/host triple.
- Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
- if (Type != Triple::UnknownArch)
- TheTriple.setArch(Type);
- } else {
- std::string Err;
- TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Err);
- if (TheTarget == 0) {
- errs() << argv[0] << ": error auto-selecting target for module '"
- << Err << "'. Please use the -march option to explicitly "
- << "pick a target.\n";
- return 1;
- }
- }
-
- // Package up features to be passed to target/subtarget
- std::string FeaturesStr;
- if (MCPU.size() || MAttrs.size()) {
- SubtargetFeatures Features;
- Features.setCPU(MCPU);
- for (unsigned i = 0; i != MAttrs.size(); ++i)
- Features.AddFeature(MAttrs[i]);
- FeaturesStr = Features.getString();
- }
-
- std::auto_ptr<TargetMachine>
- target(TheTarget->createTargetMachine(TheTriple.getTriple(), FeaturesStr));
- assert(target.get() && "Could not allocate target machine!");
- TargetMachine &Target = *target.get();
-
- // Figure out where we are going to send the output...
- formatted_raw_ostream *Out = GetOutputStream(TheTarget->getName(), argv[0]);
- if (Out == 0) return 1;
-
- CodeGenOpt::Level OLvl = CodeGenOpt::Default;
- switch (OptLevel) {
- default:
- errs() << argv[0] << ": invalid optimization level.\n";
- return 1;
- case ' ': break;
- case '0': OLvl = CodeGenOpt::None; break;
- case '1': OLvl = CodeGenOpt::Less; break;
- case '2': OLvl = CodeGenOpt::Default; break;
- case '3': OLvl = CodeGenOpt::Aggressive; break;
- }
-
- // Request that addPassesToEmitFile run the Verifier after running
- // passes which modify the IR.
-#ifndef NDEBUG
- bool DisableVerify = false;
-#else
- bool DisableVerify = true;
-#endif
-
- // If this target requires addPassesToEmitWholeFile, do it now. This is
- // used by strange things like the C backend.
- if (Target.WantsWholeFile()) {
- PassManager PM;
-
- // Add the target data from the target machine, if it exists, or the module.
- if (const TargetData *TD = Target.getTargetData())
- PM.add(new TargetData(*TD));
- else
- PM.add(new TargetData(&mod));
-
- if (!NoVerify)
- PM.add(createVerifierPass());
-
- // Ask the target to add backend passes as necessary.
- if (Target.addPassesToEmitWholeFile(PM, *Out, FileType, OLvl,
- DisableVerify)) {
- errs() << argv[0] << ": target does not support generation of this"
- << " file type!\n";
- if (Out != &fouts()) delete Out;
- // And the Out file is empty and useless, so remove it now.
- sys::Path(OutputFilename).eraseFromDisk();
- return 1;
- }
- PM.run(mod);
- } else {
- // Build up all of the passes that we want to do to the module.
- FunctionPassManager Passes(M.get());
-
- // Add the target data from the target machine, if it exists, or the module.
- if (const TargetData *TD = Target.getTargetData())
- Passes.add(new TargetData(*TD));
- else
- Passes.add(new TargetData(&mod));
-
-#ifndef NDEBUG
- if (!NoVerify)
- Passes.add(createVerifierPass());
-#endif
-
- // Override default to generate verbose assembly.
- Target.setAsmVerbosityDefault(true);
-
- if (Target.addPassesToEmitFile(Passes, *Out, FileType, OLvl,
- DisableVerify)) {
- errs() << argv[0] << ": target does not support generation of this"
- << " file type!\n";
- if (Out != &fouts()) delete Out;
- // And the Out file is empty and useless, so remove it now.
- sys::Path(OutputFilename).eraseFromDisk();
- return 1;
- }
-
- Passes.doInitialization();
-
- // Run our queue of passes all at once now, efficiently.
- // TODO: this could lazily stream functions out of the module.
- for (Module::iterator I = mod.begin(), E = mod.end(); I != E; ++I)
- if (!I->isDeclaration()) {
- if (DisableRedZone)
- I->addFnAttr(Attribute::NoRedZone);
- if (NoImplicitFloats)
- I->addFnAttr(Attribute::NoImplicitFloat);
- Passes.run(*I);
- }
-
- Passes.doFinalization();
- }
-
- // Delete the ostream if it's not a stdout stream
- if (Out != &fouts()) delete Out;
-
- return 0;
-}
diff --git a/libclamav/c++/llvm/tools/lli/CMakeLists.txt b/libclamav/c++/llvm/tools/lli/CMakeLists.txt
deleted file mode 100644
index ce70d46..0000000
--- a/libclamav/c++/llvm/tools/lli/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(LLVM_LINK_COMPONENTS jit interpreter nativecodegen bitreader selectiondag)
-
-add_llvm_tool(lli
- lli.cpp
- )
diff --git a/libclamav/c++/llvm/tools/lli/Makefile b/libclamav/c++/llvm/tools/lli/Makefile
deleted file mode 100644
index 8f6eeed..0000000
--- a/libclamav/c++/llvm/tools/lli/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- tools/lli/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL := ../..
-TOOLNAME := lli
-LINK_COMPONENTS := jit interpreter nativecodegen bitreader selectiondag
-
-# Enable JIT support
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/lli/lli.cpp b/libclamav/c++/llvm/tools/lli/lli.cpp
deleted file mode 100644
index 81c17cd..0000000
--- a/libclamav/c++/llvm/tools/lli/lli.cpp
+++ /dev/null
@@ -1,253 +0,0 @@
-//===- lli.cpp - LLVM Interpreter / Dynamic compiler ----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This utility provides a simple wrapper around the LLVM Execution Engines,
-// which allow the direct execution of LLVM programs through a Just-In-Time
-// compiler, or through an interpreter if no JIT is available for this platform.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/Type.h"
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/CodeGen/LinkAllCodegenComponents.h"
-#include "llvm/ExecutionEngine/GenericValue.h"
-#include "llvm/ExecutionEngine/Interpreter.h"
-#include "llvm/ExecutionEngine/JIT.h"
-#include "llvm/ExecutionEngine/JITEventListener.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/PluginLoader.h"
-#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Process.h"
-#include "llvm/System/Signals.h"
-#include "llvm/Target/TargetSelect.h"
-#include <cerrno>
-using namespace llvm;
-
-namespace {
- cl::opt<std::string>
- InputFile(cl::desc("<input bitcode>"), cl::Positional, cl::init("-"));
-
- cl::list<std::string>
- InputArgv(cl::ConsumeAfter, cl::desc("<program arguments>..."));
-
- cl::opt<bool> ForceInterpreter("force-interpreter",
- cl::desc("Force interpretation: disable JIT"),
- cl::init(false));
-
- // Determine optimization level.
- cl::opt<char>
- OptLevel("O",
- cl::desc("Optimization level. [-O0, -O1, -O2, or -O3] "
- "(default = '-O2')"),
- cl::Prefix,
- cl::ZeroOrMore,
- cl::init(' '));
-
- cl::opt<std::string>
- TargetTriple("mtriple", cl::desc("Override target triple for module"));
-
- cl::opt<std::string>
- MArch("march",
- cl::desc("Architecture to generate assembly for (see --version)"));
-
- cl::opt<std::string>
- MCPU("mcpu",
- cl::desc("Target a specific cpu type (-mcpu=help for details)"),
- cl::value_desc("cpu-name"),
- cl::init(""));
-
- cl::list<std::string>
- MAttrs("mattr",
- cl::CommaSeparated,
- cl::desc("Target specific attributes (-mattr=help for details)"),
- cl::value_desc("a1,+a2,-a3,..."));
-
- cl::opt<std::string>
- EntryFunc("entry-function",
- cl::desc("Specify the entry function (default = 'main') "
- "of the executable"),
- cl::value_desc("function"),
- cl::init("main"));
-
- cl::opt<std::string>
- FakeArgv0("fake-argv0",
- cl::desc("Override the 'argv[0]' value passed into the executing"
- " program"), cl::value_desc("executable"));
-
- cl::opt<bool>
- DisableCoreFiles("disable-core-files", cl::Hidden,
- cl::desc("Disable emission of core files if possible"));
-
- cl::opt<bool>
- NoLazyCompilation("disable-lazy-compilation",
- cl::desc("Disable JIT lazy compilation"),
- cl::init(false));
-}
-
-static ExecutionEngine *EE = 0;
-
-static void do_shutdown() {
- delete EE;
- llvm_shutdown();
-}
-
-//===----------------------------------------------------------------------===//
-// main Driver function
-//
-int main(int argc, char **argv, char * const *envp) {
- sys::PrintStackTraceOnErrorSignal();
- PrettyStackTraceProgram X(argc, argv);
-
- LLVMContext &Context = getGlobalContext();
- atexit(do_shutdown); // Call llvm_shutdown() on exit.
-
- // If we have a native target, initialize it to ensure it is linked in and
- // usable by the JIT.
- InitializeNativeTarget();
-
- cl::ParseCommandLineOptions(argc, argv,
- "llvm interpreter & dynamic compiler\n");
-
- // If the user doesn't want core files, disable them.
- if (DisableCoreFiles)
- sys::Process::PreventCoreFiles();
-
- // Load the bitcode...
- std::string ErrorMsg;
- Module *Mod = NULL;
- if (MemoryBuffer *Buffer = MemoryBuffer::getFileOrSTDIN(InputFile,&ErrorMsg)){
- Mod = getLazyBitcodeModule(Buffer, Context, &ErrorMsg);
- if (!Mod) delete Buffer;
- }
-
- if (!Mod) {
- errs() << argv[0] << ": error loading program '" << InputFile << "': "
- << ErrorMsg << "\n";
- exit(1);
- }
-
- // If not jitting lazily, load the whole bitcode file eagerly too.
- if (NoLazyCompilation) {
- if (Mod->MaterializeAllPermanently(&ErrorMsg)) {
- errs() << argv[0] << ": bitcode didn't read correctly.\n";
- errs() << "Reason: " << ErrorMsg << "\n";
- exit(1);
- }
- }
-
- EngineBuilder builder(Mod);
- builder.setMArch(MArch);
- builder.setMCPU(MCPU);
- builder.setMAttrs(MAttrs);
- builder.setErrorStr(&ErrorMsg);
- builder.setEngineKind(ForceInterpreter
- ? EngineKind::Interpreter
- : EngineKind::JIT);
-
- // If we are supposed to override the target triple, do so now.
- if (!TargetTriple.empty())
- Mod->setTargetTriple(TargetTriple);
-
- CodeGenOpt::Level OLvl = CodeGenOpt::Default;
- switch (OptLevel) {
- default:
- errs() << argv[0] << ": invalid optimization level.\n";
- return 1;
- case ' ': break;
- case '0': OLvl = CodeGenOpt::None; break;
- case '1': OLvl = CodeGenOpt::Less; break;
- case '2': OLvl = CodeGenOpt::Default; break;
- case '3': OLvl = CodeGenOpt::Aggressive; break;
- }
- builder.setOptLevel(OLvl);
-
- EE = builder.create();
- if (!EE) {
- if (!ErrorMsg.empty())
- errs() << argv[0] << ": error creating EE: " << ErrorMsg << "\n";
- else
- errs() << argv[0] << ": unknown error creating EE!\n";
- exit(1);
- }
-
- EE->RegisterJITEventListener(createOProfileJITEventListener());
-
- EE->DisableLazyCompilation(NoLazyCompilation);
-
- // If the user specifically requested an argv[0] to pass into the program,
- // do it now.
- if (!FakeArgv0.empty()) {
- InputFile = FakeArgv0;
- } else {
- // Otherwise, if there is a .bc suffix on the executable strip it off, it
- // might confuse the program.
- if (InputFile.rfind(".bc") == InputFile.length() - 3)
- InputFile.erase(InputFile.length() - 3);
- }
-
- // Add the module's name to the start of the vector of arguments to main().
- InputArgv.insert(InputArgv.begin(), InputFile);
-
- // Call the main function from M as if its signature were:
- // int main (int argc, char **argv, const char **envp)
- // using the contents of Args to determine argc & argv, and the contents of
- // EnvVars to determine envp.
- //
- Function *EntryFn = Mod->getFunction(EntryFunc);
- if (!EntryFn) {
- errs() << '\'' << EntryFunc << "\' function not found in module.\n";
- return -1;
- }
-
- // If the program doesn't explicitly call exit, we will need the Exit
- // function later on to make an explicit call, so get the function now.
- Constant *Exit = Mod->getOrInsertFunction("exit", Type::getVoidTy(Context),
- Type::getInt32Ty(Context),
- NULL);
-
- // Reset errno to zero on entry to main.
- errno = 0;
-
- // Run static constructors.
- EE->runStaticConstructorsDestructors(false);
-
- if (NoLazyCompilation) {
- for (Module::iterator I = Mod->begin(), E = Mod->end(); I != E; ++I) {
- Function *Fn = &*I;
- if (Fn != EntryFn && !Fn->isDeclaration())
- EE->getPointerToFunction(Fn);
- }
- }
-
- // Run main.
- int Result = EE->runFunctionAsMain(EntryFn, InputArgv, envp);
-
- // Run static destructors.
- EE->runStaticConstructorsDestructors(true);
-
- // If the program didn't call exit explicitly, we should call it now.
- // This ensures that any atexit handlers get called correctly.
- if (Function *ExitF = dyn_cast<Function>(Exit)) {
- std::vector<GenericValue> Args;
- GenericValue ResultGV;
- ResultGV.IntVal = APInt(32, Result);
- Args.push_back(ResultGV);
- EE->runFunction(ExitF, Args);
- errs() << "ERROR: exit(" << Result << ") returned!\n";
- abort();
- } else {
- errs() << "ERROR: exit defined with wrong prototype!\n";
- abort();
- }
-}
diff --git a/libclamav/c++/llvm/tools/llvm-as/CMakeLists.txt b/libclamav/c++/llvm/tools/llvm-as/CMakeLists.txt
deleted file mode 100644
index eef4a13..0000000
--- a/libclamav/c++/llvm/tools/llvm-as/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-set(LLVM_LINK_COMPONENTS asmparser bitwriter)
-set(LLVM_REQUIRES_EH 1)
-
-add_llvm_tool(llvm-as
- llvm-as.cpp
- )
diff --git a/libclamav/c++/llvm/tools/llvm-as/Makefile b/libclamav/c++/llvm/tools/llvm-as/Makefile
deleted file mode 100644
index e1e5853..0000000
--- a/libclamav/c++/llvm/tools/llvm-as/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- tools/llvm-as/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llvm-as
-LINK_COMPONENTS := asmparser bitwriter
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvm-as/llvm-as.cpp b/libclamav/c++/llvm/tools/llvm-as/llvm-as.cpp
deleted file mode 100644
index d39d6c8..0000000
--- a/libclamav/c++/llvm/tools/llvm-as/llvm-as.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
-//===--- llvm-as.cpp - The low-level LLVM assembler -----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This utility may be invoked in the following manner:
-// llvm-as --help - Output information about command line switches
-// llvm-as [options] - Read LLVM asm from stdin, write bitcode to stdout
-// llvm-as [options] x.ll - Read LLVM asm from the x.ll file, write bitcode
-// to the x.bc file.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/Assembly/Parser.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/SystemUtils.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Signals.h"
-#include <memory>
-using namespace llvm;
-
-static cl::opt<std::string>
-InputFilename(cl::Positional, cl::desc("<input .llvm file>"), cl::init("-"));
-
-static cl::opt<std::string>
-OutputFilename("o", cl::desc("Override output filename"),
- cl::value_desc("filename"));
-
-static cl::opt<bool>
-Force("f", cl::desc("Enable binary output on terminals"));
-
-static cl::opt<bool>
-DisableOutput("disable-output", cl::desc("Disable output"), cl::init(false));
-
-static cl::opt<bool>
-DumpAsm("d", cl::desc("Print assembly as parsed"), cl::Hidden);
-
-static cl::opt<bool>
-DisableVerify("disable-verify", cl::Hidden,
- cl::desc("Do not run verifier on input LLVM (dangerous!)"));
-
-static void WriteOutputFile(const Module *M) {
- // Infer the output filename if needed.
- if (OutputFilename.empty()) {
- if (InputFilename == "-") {
- OutputFilename = "-";
- } else {
- std::string IFN = InputFilename;
- int Len = IFN.length();
- if (IFN[Len-3] == '.' && IFN[Len-2] == 'l' && IFN[Len-1] == 'l') {
- // Source ends in .ll
- OutputFilename = std::string(IFN.begin(), IFN.end()-3);
- } else {
- OutputFilename = IFN; // Append a .bc to it
- }
- OutputFilename += ".bc";
- }
- }
-
- // Make sure that the Out file gets unlinked from the disk if we get a
- // SIGINT.
- if (OutputFilename != "-")
- sys::RemoveFileOnSignal(sys::Path(OutputFilename));
-
- std::string ErrorInfo;
- std::auto_ptr<raw_ostream> Out
- (new raw_fd_ostream(OutputFilename.c_str(), ErrorInfo,
- raw_fd_ostream::F_Binary));
- if (!ErrorInfo.empty()) {
- errs() << ErrorInfo << '\n';
- exit(1);
- }
-
- if (Force || !CheckBitcodeOutputToConsole(*Out, true))
- WriteBitcodeToFile(M, *Out);
-}
-
-int main(int argc, char **argv) {
- // Print a stack trace if we signal out.
- sys::PrintStackTraceOnErrorSignal();
- PrettyStackTraceProgram X(argc, argv);
- LLVMContext &Context = getGlobalContext();
- llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
- cl::ParseCommandLineOptions(argc, argv, "llvm .ll -> .bc assembler\n");
-
- // Parse the file now...
- SMDiagnostic Err;
- std::auto_ptr<Module> M(ParseAssemblyFile(InputFilename, Err, Context));
- if (M.get() == 0) {
- Err.Print(argv[0], errs());
- return 1;
- }
-
- if (!DisableVerify) {
- std::string Err;
- if (verifyModule(*M.get(), ReturnStatusAction, &Err)) {
- errs() << argv[0]
- << ": assembly parsed, but does not verify as correct!\n";
- errs() << Err;
- return 1;
- }
- }
-
- if (DumpAsm) errs() << "Here's the assembly:\n" << *M.get();
-
- if (!DisableOutput)
- WriteOutputFile(M.get());
-
- return 0;
-}
diff --git a/libclamav/c++/llvm/tools/llvm-config/CMakeLists.txt b/libclamav/c++/llvm/tools/llvm-config/CMakeLists.txt
deleted file mode 100644
index 7a43dba..0000000
--- a/libclamav/c++/llvm/tools/llvm-config/CMakeLists.txt
+++ /dev/null
@@ -1,146 +0,0 @@
-include(TestBigEndian)
-
-include(FindPerl)
-if( NOT PERL_FOUND )
- message(FATAL_ERROR "Perl required but not found!")
-endif( NOT PERL_FOUND )
-
-set(PERL ${PERL_EXECUTABLE})
-set(VERSION PACKAGE_VERSION)
-set(PREFIX ${LLVM_BINARY_DIR}) # TODO: Root for `make install'.
-set(abs_top_srcdir ${LLVM_MAIN_SRC_DIR})
-set(abs_top_builddir ${LLVM_BINARY_DIR})
-execute_process(COMMAND date
- OUTPUT_VARIABLE LLVM_CONFIGTIME
- OUTPUT_STRIP_TRAILING_WHITESPACE)
-# LLVM_ON_UNIX and LLVM_ON_WIN32 already set.
-# those are set to blank by `autoconf' on MinGW, so it seems they are not required:
-#set(LLVMGCCDIR "")
-#set(LLVMGCC "")
-#set(LLVMGXX "")
-test_big_endian(IS_BIG_ENDIAN)
-if( IS_BIG_ENDIAN )
- set(ENDIAN "big")
-else( IS_BIG_ENDIAN )
- set(ENDIAN "little")
-endif( IS_BIG_ENDIAN )
-set(SHLIBEXT ${LTDL_SHLIB_EXT})
-#EXEEXT already set.
-set(OS "${CMAKE_SYSTEM}")
-set(ARCH "${LLVM_NATIVE_ARCH}")
-
-get_system_libs(LLVM_SYSTEM_LIBS_LIST)
-foreach(l ${LLVM_SYSTEM_LIBS_LIST})
- set(LLVM_SYSTEM_LIBS ${LLVM_SYSTEM_LIBS} "-l${l}")
-endforeach()
-
-foreach(c ${LLVM_TARGETS_TO_BUILD})
- set(TARGETS_BUILT "${TARGETS_BUILT} ${c}")
-endforeach(c)
-set(TARGETS_TO_BUILD ${TARGETS_BUILT})
-set(TARGET_HAS_JIT "1") # TODO
-
-# Avoids replacement at config-time:
-set(LLVM_CPPFLAGS "@LLVM_CPPFLAGS@")
-set(LLVM_CFLAGS "@LLVM_CFLAGS@")
-set(LLVM_CXXFLAGS "@LLVM_CXXFLAGS@")
-set(LLVM_LDFLAGS "@LLVM_LDFLAGS@")
-set(LIBS "@LIBS@")
-set(LLVM_BUILDMODE "@LLVM_BUILDMODE@")
-
-configure_file(
- ${CMAKE_CURRENT_SOURCE_DIR}/llvm-config.in.in
- ${CMAKE_CURRENT_BINARY_DIR}/llvm-config.in
- @ONLY
-)
-
-set(LIBDEPS ${CMAKE_CURRENT_BINARY_DIR}/LibDeps.txt)
-set(LIBDEPS_TMP ${CMAKE_CURRENT_BINARY_DIR}/LibDeps.txt.tmp)
-set(FINAL_LIBDEPS ${CMAKE_CURRENT_BINARY_DIR}/FinalLibDeps.txt)
-set(LLVM_CONFIG ${LLVM_TOOLS_BINARY_DIR}/llvm-config)
-set(LLVM_CONFIG_IN ${CMAKE_CURRENT_BINARY_DIR}/llvm-config.in)
-
-if( CMAKE_CROSSCOMPILING )
- set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY)
-endif()
-
-find_program(NM_PATH nm PATH_SUFFIXES /bin)
-
-if( NOT NM_PATH )
- message(FATAL_ERROR "`nm' not found")
-endif()
-
-add_custom_command(OUTPUT ${LIBDEPS_TMP}
- COMMAND ${PERL_EXECUTABLE} ${LLVM_MAIN_SRC_DIR}/utils/GenLibDeps.pl -flat ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/${CMAKE_CFG_INTDIR} ${NM_PATH} > ${LIBDEPS_TMP}
- DEPENDS ${llvm_libs}
- COMMENT "Regenerating ${LIBDEPS_TMP}")
-
-add_custom_command(OUTPUT ${LIBDEPS}
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${LIBDEPS_TMP} ${LIBDEPS}
- DEPENDS ${LIBDEPS_TMP}
- COMMENT "Updated ${LIBDEPS} because dependencies changed")
-
-add_custom_command(OUTPUT ${FINAL_LIBDEPS}
- COMMAND ${PERL_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/find-cycles.pl < ${LIBDEPS} > ${FINAL_LIBDEPS} || ${CMAKE_COMMAND} -E remove -f ${FINAL_LIBDEPS}
- DEPENDS ${LIBDEPS}
- COMMENT "Checking for cyclic dependencies between LLVM libraries.")
-
-set(C_FLGS "${CMAKE_C_FLAGS_${uppercase_CMAKE_BUILD_TYPE}} ${LLVM_DEFINITIONS}")
-set(CXX_FLGS "${CMAKE_CXX_FLAGS_${uppercase_CMAKE_BUILD_TYPE}} ${LLVM_DEFINITIONS}")
-set(CPP_FLGS "${CMAKE_CPP_FLAGS_${uppercase_CMAKE_BUILD_TYPE}} ${LLVM_DEFINITIONS}")
-
-add_custom_command(OUTPUT ${LLVM_CONFIG}
- COMMAND echo 's!@LLVM_CPPFLAGS@!${CPP_FLGS}!' > temp.sed
- COMMAND echo 's!@LLVM_CFLAGS@!${C_FLGS}!' >> temp.sed
- COMMAND echo 's!@LLVM_CXXFLAGS@!${CXX_FLGS}!' >> temp.sed
- # TODO: Use general flags for linking! not just for shared libs:
- COMMAND echo 's!@LLVM_LDFLAGS@!${CMAKE_SHARED_LINKER_FLAGS}!' >> temp.sed
- COMMAND echo 's!@LIBS@!${LLVM_SYSTEM_LIBS}!' >> temp.sed
- COMMAND echo 's!@LLVM_BUILDMODE@!${CMAKE_BUILD_TYPE}!' >> temp.sed
- COMMAND sed -f temp.sed < ${LLVM_CONFIG_IN} > ${LLVM_CONFIG}
- COMMAND ${CMAKE_COMMAND} -E remove -f temp.sed
- COMMAND cat ${FINAL_LIBDEPS} >> ${LLVM_CONFIG}
- COMMAND chmod +x ${LLVM_CONFIG}
- DEPENDS ${FINAL_LIBDEPS} ${LLVM_CONFIG_IN}
- COMMENT "Building llvm-config script."
- )
-
-add_custom_target(llvm-config.target ALL
- DEPENDS ${LLVM_CONFIG})
-
-add_dependencies(llvm-config.target ${llvm_lib_targets})
-
-# Make sure that llvm-config builds before the llvm tools, so we have
-# LibDeps.txt and can use it for updating the hard-coded library
-# dependencies on cmake/modules/LLVMLibDeps.cmake when the tools'
-# build fail due to outdated dependencies:
-set(LLVM_COMMON_DEPENDS ${LLVM_COMMON_DEPENDS} llvm-config.target)
-
-install(FILES ${LLVM_CONFIG}
- PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE
- WORLD_READ WORLD_EXECUTE
- DESTINATION bin)
-
-
-# Regeneration of library dependencies.
-
-# See the comments at the end of cmake/modules/LLVMConfig.cmake for
-# notes and guidelines.
-
-set(LLVMLibDeps ${LLVM_MAIN_SRC_DIR}/cmake/modules/LLVMLibDeps.cmake)
-set(LLVMLibDeps_TMP ${CMAKE_CURRENT_BINARY_DIR}/LLVMLibDeps.cmake.tmp)
-
-add_custom_command(OUTPUT ${LLVMLibDeps_TMP}
- COMMAND sed -e s'@\\.a@@g' -e s'@\\.so@@g' -e 's at libLLVM@LLVM at g' -e 's@: @ @' -e 's@\\\(.*\\\)@set\(MSVC_LIB_DEPS_\\1\)@' ${FINAL_LIBDEPS} > ${LLVMLibDeps_TMP}
- COMMAND ${CMAKE_COMMAND} -E copy_if_different ${LLVMLibDeps_TMP} ${LLVMLibDeps}
- DEPENDS ${FINAL_LIBDEPS}
- COMMENT "Updating cmake library dependencies file ${LLVMLibDeps}"
- )
-
-if( LLVM_TARGETS_TO_BUILD STREQUAL LLVM_ALL_TARGETS )
- add_custom_target(llvmlibdeps.target ALL DEPENDS ${LLVMLibDeps_TMP})
- add_dependencies(llvmlibdeps.target llvm-config.target)
- set(LLVM_COMMON_DEPENDS ${LLVM_COMMON_DEPENDS} llvmlibdeps.target)
-endif()
-
-set(LLVM_COMMON_DEPENDS ${LLVM_COMMON_DEPENDS} PARENT_SCOPE)
diff --git a/libclamav/c++/llvm/tools/llvm-config/Makefile b/libclamav/c++/llvm/tools/llvm-config/Makefile
deleted file mode 100644
index c7f7b32..0000000
--- a/libclamav/c++/llvm/tools/llvm-config/Makefile
+++ /dev/null
@@ -1,131 +0,0 @@
-##===- tools/llvm-config/Makefile --------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-
-EXTRA_DIST = LibDeps.txt FinalLibDeps.txt llvm-config.in.in find-cycles.pl
-
-include $(LEVEL)/Makefile.common
-
-# If we don't have Perl, we can't generate the library dependencies upon which
-# llvm-config depends. Therefore, only if we detect perl will we do anything
-# useful.
-ifeq ($(HAVE_PERL),1)
-
-# Combine preprocessor flags (except for -I) and CXX flags.
-SUB_CPPFLAGS = ${CPP.BaseFlags}
-SUB_CFLAGS = ${CPP.BaseFlags} ${C.Flags}
-SUB_CXXFLAGS = ${CPP.BaseFlags} ${CXX.Flags}
-
-# This is blank for now. We need to be careful about adding stuff here:
-# LDFLAGS tend not to be portable, and we don't currently require the
-# user to use libtool when linking against LLVM.
-SUB_LDFLAGS =
-
-FinalLibDeps = $(PROJ_OBJ_DIR)/FinalLibDeps.txt
-LibDeps = $(PROJ_OBJ_DIR)/LibDeps.txt
-LibDepsTemp = $(PROJ_OBJ_DIR)/LibDeps.txt.tmp
-GenLibDeps = $(PROJ_SRC_ROOT)/utils/GenLibDeps.pl
-
-$(LibDepsTemp): $(GenLibDeps) $(LibDir) $(wildcard $(LibDir)/*.a $(LibDir)/*.o)
- $(Echo) "Regenerating LibDeps.txt.tmp"
- $(Verb) $(PERL) $(GenLibDeps) -flat $(LibDir) "$(NM_PATH)" > $(LibDepsTemp)
-
-$(LibDeps): $(LibDepsTemp)
- $(Verb) $(CMP) -s $@ $< || ( $(CP) $< $@ && \
- $(EchoCmd) Updated LibDeps.txt because dependencies changed )
-
-# Find all the cyclic dependencies between various LLVM libraries, so we
-# don't have to process them at runtime.
-$(FinalLibDeps): find-cycles.pl $(LibDeps)
- $(Echo) "Checking for cyclic dependencies between LLVM libraries."
- $(Verb) $(PERL) $< < $(LibDeps) > $@ || rm -f $@
-
-# Rerun our configure substitutions as needed.
-ConfigInIn = $(PROJ_SRC_DIR)/llvm-config.in.in
-llvm-config.in: $(ConfigInIn) $(ConfigStatusScript)
- $(Verb) cd $(PROJ_OBJ_ROOT) ; \
- $(ConfigStatusScript) tools/llvm-config/llvm-config.in
-
-llvm-config-perobj: llvm-config.in $(GenLibDeps) $(LibDir) $(wildcard $(LibDir)/*.a)
- $(Echo) "Generating llvm-config-perobj"
- $(Verb) $(PERL) $(GenLibDeps) -perobj -flat $(LibDir) "$(NM_PATH)" >PerobjDeps.txt
- $(Echo) "Checking for cyclic dependencies between LLVM objects."
- $(Verb) $(PERL) $(PROJ_SRC_DIR)/find-cycles.pl < PerobjDepsIncl.txt > PerobjDepsInclFinal.txt || rm -f $@
- $(Verb) $(ECHO) 's/@LLVM_CPPFLAGS@/$(subst /,\/,$(SUB_CPPFLAGS))/' \
- > temp.sed
- $(Verb) $(ECHO) 's/@LLVM_CFLAGS@/$(subst /,\/,$(SUB_CFLAGS))/' \
- >> temp.sed
- $(Verb) $(ECHO) 's/@LLVM_CXXFLAGS@/$(subst /,\/,$(SUB_CXXFLAGS))/' \
- >> temp.sed
- $(Verb) $(ECHO) 's/@LLVM_LDFLAGS@/$(subst /,\/,$(SUB_LDFLAGS))/' \
- >> temp.sed
- $(Verb) $(ECHO) 's/@LLVM_BUILDMODE@/$(subst /,\/,$(BuildMode))/' \
- >> temp.sed
- $(Verb) $(SED) -f temp.sed < $< > $@
- $(Verb) $(RM) temp.sed
- $(Verb) cat PerobjDepsFinal.txt >> $@
- $(Verb) chmod +x $@
-
-llvm-config-perobjincl: llvm-config.in $(GenLibDeps) $(LibDir) $(wildcard $(LibDir)/*.a)
- $(Echo) "Generating llvm-config-perobjincl"
- $(Verb) $(PERL) $(GenLibDeps) -perobj -perobjincl -flat $(LibDir) "$(NM_PATH)" >PerobjDepsIncl.txt
- $(Echo) "Checking for cyclic dependencies between LLVM objects."
- $(Verb) $(PERL) $(PROJ_SRC_DIR)/find-cycles.pl < PerobjDepsIncl.txt > PerobjDepsInclFinal.txt
- $(Verb) $(ECHO) 's/@LLVM_CPPFLAGS@/$(subst /,\/,$(SUB_CPPFLAGS))/' \
- > temp.sed
- $(Verb) $(ECHO) 's/@LLVM_CFLAGS@/$(subst /,\/,$(SUB_CFLAGS))/' \
- >> temp.sed
- $(Verb) $(ECHO) 's/@LLVM_CXXFLAGS@/$(subst /,\/,$(SUB_CXXFLAGS))/' \
- >> temp.sed
- $(Verb) $(ECHO) 's/@LLVM_LDFLAGS@/$(subst /,\/,$(SUB_LDFLAGS))/' \
- >> temp.sed
- $(Verb) $(ECHO) 's/@LLVM_BUILDMODE@/$(subst /,\/,$(BuildMode))/' \
- >> temp.sed
- $(Verb) $(SED) -f temp.sed < $< > $@
- $(Verb) $(RM) temp.sed
- $(Verb) cat PerobjDepsInclFinal.txt >> $@
- $(Verb) chmod +x $@
-
-# Build our final script.
-$(ToolDir)/llvm-config: llvm-config.in $(FinalLibDeps)
- $(Echo) "Building llvm-config script."
- $(Verb) $(ECHO) 's/@LLVM_CPPFLAGS@/$(subst /,\/,$(SUB_CPPFLAGS))/' \
- > temp.sed
- $(Verb) $(ECHO) 's/@LLVM_CFLAGS@/$(subst /,\/,$(SUB_CFLAGS))/' \
- >> temp.sed
- $(Verb) $(ECHO) 's/@LLVM_CXXFLAGS@/$(subst /,\/,$(SUB_CXXFLAGS))/' \
- >> temp.sed
- $(Verb) $(ECHO) 's/@LLVM_LDFLAGS@/$(subst /,\/,$(SUB_LDFLAGS))/' \
- >> temp.sed
- $(Verb) $(ECHO) 's/@LLVM_BUILDMODE@/$(subst /,\/,$(BuildMode))/' \
- >> temp.sed
- $(Verb) $(SED) -f temp.sed < $< > $@
- $(Verb) $(RM) temp.sed
- $(Verb) cat $(FinalLibDeps) >> $@
- $(Verb) chmod +x $@
-
-else
-# We don't have perl, just generate a dummy llvm-config
-$(ToolDir)/llvm-config:
- $(Echo) "Building place holder llvm-config script."
- $(Verb) $(ECHO) 'echo llvm-config: Perl not found so llvm-config could not be generated' >> $@
- $(Verb) chmod +x $@
-
-endif
-# Hook into the standard Makefile rules.
-all-local:: $(ToolDir)/llvm-config
-clean-local::
- $(Verb) $(RM) -f $(ToolDir)/llvm-config llvm-config.in $(FinalLibDeps) \
- $(LibDeps) GenLibDeps.out
-install-local:: all-local
- $(Echo) Installing llvm-config
- $(Verb) $(MKDIR) $(DESTDIR)$(PROJ_bindir)
- $(Verb) $(ScriptInstall) $(ToolDir)/llvm-config $(DESTDIR)$(PROJ_bindir)
-
diff --git a/libclamav/c++/llvm/tools/llvm-config/find-cycles.pl b/libclamav/c++/llvm/tools/llvm-config/find-cycles.pl
deleted file mode 100755
index 5cbf5b4..0000000
--- a/libclamav/c++/llvm/tools/llvm-config/find-cycles.pl
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/perl
-#
-# Program: find-cycles.pl
-#
-# Synopsis: Given a list of possibly cyclic dependencies, merge all the
-# cycles. This makes it possible to topologically sort the
-# dependencies between different parts of LLVM.
-#
-# Syntax: find-cycles.pl < LibDeps.txt > FinalLibDeps.txt
-#
-# Input: cycmem1: cycmem2 dep1 dep2
-# cycmem2: cycmem1 dep3 dep4
-# boring: dep4
-#
-# Output: cycmem1 cycmem2: dep1 dep2 dep3 dep4
-# boring: dep4
-#
-# This file was written by Eric Kidd, and is placed into the public domain.
-#
-
-use 5.006;
-use strict;
-use warnings;
-
-my %DEPS;
-my @CYCLES;
-sub find_all_cycles;
-
-# Read our dependency information.
-while (<>) {
- chomp;
- my ($module, $dependency_str) = /^\s*([^:]+):\s*(.*)\s*$/;
- die "Malformed data: $_" unless defined $dependency_str;
- my @dependencies = split(/ /, $dependency_str);
- $DEPS{$module} = \@dependencies;
-}
-
-# Partition our raw dependencies into sets of cyclically-connected nodes.
-find_all_cycles();
-
-# Print out the finished cycles, with their dependencies.
-my @output;
-my $cycles_found = 0;
-foreach my $cycle (@CYCLES) {
- my @modules = sort keys %{$cycle};
-
- # Merge the dependencies of all modules in this cycle.
- my %dependencies;
- foreach my $module (@modules) {
- @dependencies{@{$DEPS{$module}}} = 1;
- }
-
- # Prune the known cyclic dependencies.
- foreach my $module (@modules) {
- delete $dependencies{$module};
- }
-
- # Warn about possible linker problems.
- my @archives = grep(/\.a$/, @modules);
- if (@archives > 1) {
- $cycles_found = $cycles_found + 1;
- print STDERR "find-cycles.pl: Circular dependency between *.a files:\n";
- print STDERR "find-cycles.pl: ", join(' ', @archives), "\n";
- push @modules, @archives; # WORKAROUND: Duplicate *.a files. Ick.
- } elsif (@modules > 1) {
- $cycles_found = $cycles_found + 1;
- print STDERR "find-cycles.pl: Circular dependency between *.o files:\n";
- print STDERR "find-cycles.pl: ", join(' ', @modules), "\n";
- push @modules, @modules; # WORKAROUND: Duplicate *.o files. Ick.
- }
-
- # Add to our output. (@modules is already as sorted as we need it to be.)
- push @output, (join(' ', @modules) . ': ' .
- join(' ', sort keys %dependencies) . "\n");
-}
-print sort @output;
-
-exit $cycles_found;
-
-#==========================================================================
-# Depedency Cycle Support
-#==========================================================================
-# For now, we have cycles in our dependency graph. Ideally, each cycle
-# would be collapsed down to a single *.a file, saving us all this work.
-#
-# To understand this code, you'll need a working knowledge of Perl 5,
-# and possibly some quality time with 'man perlref'.
-
-my %SEEN;
-my %CYCLES;
-sub find_cycles ($@);
-sub found_cycles ($@);
-
-sub find_all_cycles {
- # Find all multi-item cycles.
- my @modules = sort keys %DEPS;
- foreach my $module (@modules) { find_cycles($module); }
-
- # Build fake one-item "cycles" for the remaining modules, so we can
- # treat them uniformly.
- foreach my $module (@modules) {
- unless (defined $CYCLES{$module}) {
- my %cycle = ($module, 1);
- $CYCLES{$module} = \%cycle;
- }
- }
-
- # Find all our unique cycles. We have to do this the hard way because
- # we apparently can't store hash references as hash keys without making
- # 'strict refs' sad.
- my %seen;
- foreach my $cycle (values %CYCLES) {
- unless ($seen{$cycle}) {
- $seen{$cycle} = 1;
- push @CYCLES, $cycle;
- }
- }
-}
-
-# Walk through our graph depth-first (keeping a trail in @path), and report
-# any cycles we find.
-sub find_cycles ($@) {
- my ($module, @path) = @_;
- if (str_in_list($module, @path)) {
- found_cycle($module, @path);
- } else {
- return if defined $SEEN{$module};
- $SEEN{$module} = 1;
- foreach my $dep (@{$DEPS{$module}}) {
- find_cycles($dep, @path, $module);
- }
- }
-}
-
-# Give a cycle, attempt to merge it with pre-existing cycle data.
-sub found_cycle ($@) {
- my ($module, @path) = @_;
-
- # Pop any modules which aren't part of our cycle.
- while ($path[0] ne $module) { shift @path; }
- #print join("->", @path, $module) . "\n";
-
- # Collect the modules in our cycle into a hash.
- my %cycle;
- foreach my $item (@path) {
- $cycle{$item} = 1;
- if (defined $CYCLES{$item}) {
- # Looks like we intersect with an existing cycle, so merge
- # all those in, too.
- foreach my $old_item (keys %{$CYCLES{$item}}) {
- $cycle{$old_item} = 1;
- }
- }
- }
-
- # Update our global cycle table.
- my $cycle_ref = \%cycle;
- foreach my $item (keys %cycle) {
- $CYCLES{$item} = $cycle_ref;
- }
- #print join(":", sort keys %cycle) . "\n";
-}
-
-sub str_in_list ($@) {
- my ($str, @list) = @_;
- foreach my $item (@list) {
- return 1 if ($item eq $str);
- }
- return 0;
-}
diff --git a/libclamav/c++/llvm/tools/llvm-config/llvm-config.in.in b/libclamav/c++/llvm/tools/llvm-config/llvm-config.in.in
deleted file mode 100644
index d435d57..0000000
--- a/libclamav/c++/llvm/tools/llvm-config/llvm-config.in.in
+++ /dev/null
@@ -1,460 +0,0 @@
-#!@PERL@
-##===- tools/llvm-config ---------------------------------------*- perl -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-#
-# Synopsis: Prints out compiler options needed to build against an installed
-# copy of LLVM.
-#
-# Syntax: llvm-config OPTIONS... [COMPONENTS...]
-#
-##===----------------------------------------------------------------------===##
-
-use 5.006;
-use strict;
-use warnings;
-use Cwd 'abs_path';
-
-#---- begin autoconf values ----
-my $PACKAGE_NAME = q{@PACKAGE_NAME@};
-my $VERSION = q{@PACKAGE_VERSION@};
-my $PREFIX = q{@LLVM_PREFIX@};
-my $LLVM_CONFIGTIME = q{@LLVM_CONFIGTIME@};
-my $LLVM_SRC_ROOT = q{@abs_top_srcdir@};
-my $LLVM_OBJ_ROOT = q{@abs_top_builddir@};
-my $ARCH = lc(q{@ARCH@});
-my $TARGET_TRIPLE = q{@target@};
-my $TARGETS_TO_BUILD = q{@TARGETS_TO_BUILD@};
-my $TARGET_HAS_JIT = q{@TARGET_HAS_JIT@};
-my @TARGETS_BUILT = map { lc($_) } qw{@TARGETS_TO_BUILD@};
-#---- end autoconf values ----
-
-# Must pretend x86_64 architecture is really x86, otherwise the native backend
-# won't get linked in.
-$ARCH = "x86" if $ARCH eq "x86_64";
-
-#---- begin Makefile values ----
-my $CPPFLAGS = q{@LLVM_CPPFLAGS@};
-my $CFLAGS = q{@LLVM_CFLAGS@};
-my $CXXFLAGS = q{@LLVM_CXXFLAGS@};
-my $LDFLAGS = q{@LLVM_LDFLAGS@};
-my $SYSTEM_LIBS = q{@LIBS@};
-my $LLVM_BUILDMODE = q{@LLVM_BUILDMODE@};
-#---- end Makefile values ----
-
-# Figure out where llvm-config is being run from. Primarily, we care if it has
-# been installed, or is running from the build directory, which changes the
-# locations of some files.
-
-# Convert the current executable name into its directory (e.g. ".").
-my ($RUN_DIR) = ($0 =~ /^(.*)\/.*$/);
-
-# Turn the directory into an absolute directory on the file system, also pop up
-# from "bin" into the build or prefix dir.
-my $ABS_RUN_DIR = abs_path("$RUN_DIR/..");
-chomp($ABS_RUN_DIR);
-
-# Compute the absolute object directory build, e.g. "foo/llvm/Debug".
-my $ABS_OBJ_ROOT = "$LLVM_OBJ_ROOT/$LLVM_BUILDMODE";
-$ABS_OBJ_ROOT = abs_path("$ABS_OBJ_ROOT") if (-d $ABS_OBJ_ROOT);
-chomp($ABS_OBJ_ROOT);
-
-my $INCLUDEDIR = "$ABS_RUN_DIR/include";
-my $INCLUDEOPTION = "-I$INCLUDEDIR";
-my $LIBDIR = "$ABS_RUN_DIR/lib";
-my $BINDIR = "$ABS_RUN_DIR/bin";
-if ($ABS_RUN_DIR eq $ABS_OBJ_ROOT) {
- # If we are running out of the build directory, the include dir is in the
- # srcdir.
- $INCLUDEDIR = "$LLVM_SRC_ROOT/include";
- # We need include files from both the srcdir and objdir.
- $INCLUDEOPTION = "-I$INCLUDEDIR -I$LLVM_OBJ_ROOT/include"
-} else {
- # If installed, ignore the prefix the tree was configured with, use the
- # current prefix.
- $PREFIX = $ABS_RUN_DIR;
-}
-
-sub usage;
-sub fix_library_names (@);
-sub fix_library_files (@);
-sub expand_dependencies (@);
-sub name_map_entries;
-
-# Parse our command-line arguments.
-usage if @ARGV == 0;
-my @components;
-my $has_opt = 0;
-my $want_libs = 0;
-my $want_libnames = 0;
-my $want_libfiles = 0;
-my $want_components = 0;
-foreach my $arg (@ARGV) {
- if ($arg =~ /^-/) {
- if ($arg eq "--version") {
- $has_opt = 1; print "$VERSION\n";
- } elsif ($arg eq "--prefix") {
- $has_opt = 1; print "$PREFIX\n";
- } elsif ($arg eq "--bindir") {
- $has_opt = 1; print "$BINDIR\n";
- } elsif ($arg eq "--includedir") {
- $has_opt = 1; print "$INCLUDEDIR\n";
- } elsif ($arg eq "--libdir") {
- $has_opt = 1; print "$LIBDIR\n";
- } elsif ($arg eq "--cppflags") {
- $has_opt = 1; print "$INCLUDEOPTION $CPPFLAGS\n";
- } elsif ($arg eq "--cflags") {
- $has_opt = 1; print "$INCLUDEOPTION $CFLAGS\n";
- } elsif ($arg eq "--cxxflags") {
- $has_opt = 1; print "$INCLUDEOPTION $CXXFLAGS\n";
- } elsif ($arg eq "--ldflags") {
- $has_opt = 1; print "-L$LIBDIR $LDFLAGS $SYSTEM_LIBS\n";
- } elsif ($arg eq "--libs") {
- $has_opt = 1; $want_libs = 1;
- } elsif ($arg eq "--libnames") {
- $has_opt = 1; $want_libnames = 1;
- } elsif ($arg eq "--libfiles") {
- $has_opt = 1; $want_libfiles = 1;
- } elsif ($arg eq "--components") {
- $has_opt = 1; print join(' ', name_map_entries), "\n";
- } elsif ($arg eq "--targets-built") {
- $has_opt = 1; print join(' ', @TARGETS_BUILT), "\n";
- } elsif ($arg eq "--host-target") {
- $has_opt = 1; print "$TARGET_TRIPLE\n";
- } elsif ($arg eq "--build-mode") {
- $has_opt = 1; print "$LLVM_BUILDMODE\n";
- } elsif ($arg eq "--obj-root") {
- $has_opt = 1; print abs_path("$LLVM_OBJ_ROOT/");
- } elsif ($arg eq "--src-root") {
- $has_opt = 1; print abs_path("$LLVM_SRC_ROOT/");
- } else {
- usage();
- }
- } else {
- push @components, $arg;
- }
-}
-
-# If no options were specified, fail.
-usage unless $has_opt;
-
-# If no components were specified, default to 'all'.
-if (@components == 0) {
- push @components, 'all';
-}
-
-# Force component names to lower case.
- at components = map lc, @components;
-
-# Handle any arguments which require building our dependency graph.
-if ($want_libs || $want_libnames || $want_libfiles) {
- my @libs = expand_dependencies(@components);
- print join(' ', fix_library_names(@libs)), "\n" if ($want_libs);
- print join(' ', @libs), "\n" if ($want_libnames);
- print join(' ', fix_library_files(@libs)), "\n" if ($want_libfiles);
-}
-
-exit 0;
-
-#==========================================================================
-# Support Routines
-#==========================================================================
-
-sub usage {
- print STDERR <<__EOD__;
-Usage: llvm-config <OPTION>... [<COMPONENT>...]
-
-Get various configuration information needed to compile programs which use
-LLVM. Typically called from 'configure' scripts. Examples:
- llvm-config --cxxflags
- llvm-config --ldflags
- llvm-config --libs engine bcreader scalaropts
-
-Options:
- --version Print LLVM version.
- --prefix Print the installation prefix.
- --src-root Print the source root LLVM was built from.
- --obj-root Print the object root used to build LLVM.
- --bindir Directory containing LLVM executables.
- --includedir Directory containing LLVM headers.
- --libdir Directory containing LLVM libraries.
- --cppflags C preprocessor flags for files that include LLVM headers.
- --cflags C compiler flags for files that include LLVM headers.
- --cxxflags C++ compiler flags for files that include LLVM headers.
- --ldflags Print Linker flags.
- --libs Libraries needed to link against LLVM components.
- --libnames Bare library names for in-tree builds.
- --libfiles Fully qualified library filenames for makefile depends.
- --components List of all possible components.
- --targets-built List of all targets currently built.
- --host-target Target triple used to configure LLVM.
- --build-mode Print build mode of LLVM tree (e.g. Debug or Release).
-Typical components:
- all All LLVM libraries (default).
- backend Either a native backend or the C backend.
- engine Either a native JIT or a bytecode interpreter.
-__EOD__
- exit(1);
-}
-
-# Use -lfoo instead of libfoo.a whenever possible, and add directories to
-# files which can't be found using -L.
-sub fix_library_names (@) {
- my @libs = @_;
- my @result;
- foreach my $lib (@libs) {
- # Transform the bare library name appropriately.
- my ($basename) = ($lib =~ /^lib([^.]*)\.a/);
- if (defined $basename) {
- push @result, "-l$basename";
- } else {
- push @result, "$LIBDIR/$lib";
- }
- }
- return @result;
-}
-
-# Turn the list of libraries into a list of files.
-sub fix_library_files(@) {
- my @libs = @_;
- my @result;
- foreach my $lib (@libs) {
- # Transform the bare library name into a filename.
- push @result, "$LIBDIR/$lib";
- }
- return @result;
-}
-
-#==========================================================================
-# Library Dependency Analysis
-#==========================================================================
-# Given a few human-readable library names, find all their dependencies
-# and sort them into an order which the linker will like. If we packed
-# our libraries into fewer archives, we could make the linker do much
-# of this work for us.
-#
-# Libraries have two different types of names in this code: Human-friendly
-# "component" names entered on the command-line, and the raw file names
-# we use internally (and ultimately pass to the linker).
-#
-# To understand this code, you'll need a working knowledge of Perl 5,
-# and possibly some quality time with 'man perlref'.
-
-sub load_dependencies;
-sub build_name_map;
-sub have_native_backend;
-sub find_best_engine;
-sub expand_names (@);
-sub find_all_required_sets (@);
-sub find_all_required_sets_helper ($$@);
-
-# Each "set" contains one or more libraries which must be included as a
-# group (due to cyclic dependencies). Sets are represented as a Perl array
-# reference pointing to a list of internal library names.
-my @SETS;
-
-# Various mapping tables.
-my %LIB_TO_SET_MAP; # Maps internal library names to their sets.
-my %SET_DEPS; # Maps sets to a list of libraries they depend on.
-my %NAME_MAP; # Maps human-entered names to internal names.
-
-# Have our dependencies been loaded yet?
-my $DEPENDENCIES_LOADED = 0;
-
-# Given a list of human-friendly component names, translate them into a
-# complete set of linker arguments.
-sub expand_dependencies (@) {
- my @libs = @_;
- load_dependencies;
- my @required_sets = find_all_required_sets(expand_names(@libs));
- my @sorted_sets = topologically_sort_sets(@required_sets);
-
- # Expand the library sets into libraries.
- my @result;
- foreach my $set (@sorted_sets) { push @result, @{$set}; }
- return @result;
-}
-
-# Load in the raw dependency data stored at the end of this file.
-sub load_dependencies {
- return if $DEPENDENCIES_LOADED;
- $DEPENDENCIES_LOADED = 1;
- while (<DATA>) {
- # Parse our line.
- my ($libs, $deps) = /^\s*([^:]+):\s*(.*)\s*$/;
- die "Malformed dependency data" unless defined $deps;
- my @libs = split(' ', $libs);
- my @deps = split(' ', $deps);
-
- # Record our dependency data.
- my $set = \@libs;
- push @SETS, $set;
- foreach my $lib (@libs) { $LIB_TO_SET_MAP{$lib} = $set; }
- $SET_DEPS{$set} = \@deps;
- }
- build_name_map;
-}
-
-# Build a map converting human-friendly component names into internal
-# library names.
-sub build_name_map {
- # Add entries for all the actual libraries.
- foreach my $set (@SETS) {
- foreach my $lib (sort @$set) {
- my $short_name = $lib;
- $short_name =~ s/^(lib)?LLVM([^.]*)\..*$/$2/;
- $short_name =~ tr/A-Z/a-z/;
- $NAME_MAP{$short_name} = [$lib];
- }
- }
-
- # Add target-specific entries
- foreach my $target (@TARGETS_BUILT) {
- # FIXME: Temporary, until we don't switch all targets
- if (defined $NAME_MAP{$target.'asmprinter'}) {
- $NAME_MAP{$target} = [$target.'info',
- $target.'asmprinter',
- $target.'codegen']
- } else {
- $NAME_MAP{$target} = [$target.'info',
- $NAME_MAP{$target}[0]]
- }
-
- if (defined $NAME_MAP{$target.'asmparser'}) {
- push @{$NAME_MAP{$target}},$target.'asmparser'
- }
-
- if (defined $NAME_MAP{$target.'disassembler'}) {
- push @{$NAME_MAP{$target}},$target.'disassembler'
- }
- }
-
- # Add virtual entries.
- $NAME_MAP{'native'} = have_native_backend() ? [$ARCH] : [];
- $NAME_MAP{'nativecodegen'} = have_native_backend() ? [$ARCH.'codegen'] : [];
- $NAME_MAP{'backend'} = have_native_backend() ? ['native'] : ['cbackend'];
- $NAME_MAP{'engine'} = find_best_engine;
- $NAME_MAP{'all'} = [name_map_entries]; # Must be last.
-}
-
-# Return true if we have a native backend to use.
-sub have_native_backend {
- my %BUILT;
- foreach my $target (@TARGETS_BUILT) { $BUILT{$target} = 1; }
- return defined $NAME_MAP{$ARCH} && defined $BUILT{$ARCH};
-}
-
-# Find a working subclass of ExecutionEngine for this platform.
-sub find_best_engine {
- if (have_native_backend && $TARGET_HAS_JIT) {
- return ['jit', 'native'];
- } else {
- return ['interpreter'];
- }
-}
-
-# Get all the human-friendly component names.
-sub name_map_entries {
- load_dependencies;
- return sort keys %NAME_MAP;
-}
-
-# Map human-readable names to internal library names.
-sub expand_names (@) {
- my @names = @_;
- my @result;
- foreach my $name (@names) {
- if (defined $LIB_TO_SET_MAP{$name}) {
- # We've hit bottom: An actual library name.
- push @result, $name;
- } elsif (defined $NAME_MAP{$name}) {
- # We've found a short name to expand.
- push @result, expand_names(@{$NAME_MAP{$name}});
- } else {
- print STDERR "llvm-config: unknown component name: $name\n";
- exit(1);
- }
- }
- return @result;
-}
-
-# Given a list of internal library names, return all sets of libraries which
-# will need to be included by the linker (in no particular order).
-sub find_all_required_sets (@) {
- my @libs = @_;
- my %sets_added;
- my @result;
- find_all_required_sets_helper(\%sets_added, \@result, @libs);
- return @result;
-}
-
-# Recursive closures are pretty broken in Perl, so we're going to separate
-# this function from find_all_required_sets and pass in the state we need
-# manually, as references. Yes, this is fairly unpleasant.
-sub find_all_required_sets_helper ($$@) {
- my ($sets_added, $result, @libs) = @_;
- foreach my $lib (@libs) {
- my $set = $LIB_TO_SET_MAP{$lib};
- next if defined $$sets_added{$set};
- $$sets_added{$set} = 1;
- push @$result, $set;
- find_all_required_sets_helper($sets_added, $result, @{$SET_DEPS{$set}});
- }
-}
-
-# Print a list of sets, with a label. Used for debugging.
-sub print_sets ($@) {
- my ($label, @sets) = @_;
- my @output;
- foreach my $set (@sets) { push @output, join(',', @$set); }
- print "$label: ", join(';', @output), "\n";
-}
-
-# Returns true if $lib is a key in $added.
-sub has_lib_been_added ($$) {
- my ($added, $lib) = @_;
- return defined $$added{$LIB_TO_SET_MAP{$lib}};
-}
-
-# Returns true if all the dependencies of $set appear in $added.
-sub have_all_deps_been_added ($$) {
- my ($added, $set) = @_;
- #print_sets(" Checking", $set);
- #print_sets(" Wants", $SET_DEPS{$set});
- foreach my $lib (@{$SET_DEPS{$set}}) {
- return 0 unless has_lib_been_added($added, $lib);
- }
- return 1;
-}
-
-# Given a list of sets, topologically sort them using dependencies.
-sub topologically_sort_sets (@) {
- my @sets = @_;
- my %added;
- my @result;
- SCAN: while (@sets) { # We'll delete items from @sets as we go.
- #print_sets("So far", reverse(@result));
- #print_sets("Remaining", @sets);
- for (my $i = 0; $i < @sets; ++$i) {
- my $set = $sets[$i];
- if (have_all_deps_been_added(\%added, $set)) {
- push @result, $set;
- $added{$set} = 1;
- #print "Removing $i.\n";
- splice(@sets, $i, 1);
- next SCAN; # Restart our scan.
- }
- }
- die "Can't find a library with no dependencies";
- }
- return reverse(@result);
-}
-
-# Our library dependency data will be added after the '__END__' token, and will
-# be read through the magic <DATA> filehandle.
-__END__
diff --git a/libclamav/c++/llvm/tools/llvm-dis/CMakeLists.txt b/libclamav/c++/llvm/tools/llvm-dis/CMakeLists.txt
deleted file mode 100644
index d62a6b5..0000000
--- a/libclamav/c++/llvm/tools/llvm-dis/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-set(LLVM_LINK_COMPONENTS bitreader)
-set(LLVM_REQUIRES_EH 1)
-
-add_llvm_tool(llvm-dis
- llvm-dis.cpp
- )
diff --git a/libclamav/c++/llvm/tools/llvm-dis/llvm-dis.cpp b/libclamav/c++/llvm/tools/llvm-dis/llvm-dis.cpp
deleted file mode 100644
index b8b1a39..0000000
--- a/libclamav/c++/llvm/tools/llvm-dis/llvm-dis.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-//===-- llvm-dis.cpp - The low-level LLVM disassembler --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This utility may be invoked in the following manner:
-// llvm-dis [options] - Read LLVM bitcode from stdin, write asm to stdout
-// llvm-dis [options] x.bc - Read LLVM bitcode from the x.bc file, write asm
-// to the x.ll file.
-// Options:
-// --help - Output information about command line switches
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Signals.h"
-#include <memory>
-using namespace llvm;
-
-static cl::opt<std::string>
-InputFilename(cl::Positional, cl::desc("<input bitcode>"), cl::init("-"));
-
-static cl::opt<std::string>
-OutputFilename("o", cl::desc("Override output filename"),
- cl::value_desc("filename"));
-
-static cl::opt<bool>
-Force("f", cl::desc("Enable binary output on terminals"));
-
-static cl::opt<bool>
-DontPrint("disable-output", cl::desc("Don't output the .ll file"), cl::Hidden);
-
-int main(int argc, char **argv) {
- // Print a stack trace if we signal out.
- sys::PrintStackTraceOnErrorSignal();
- PrettyStackTraceProgram X(argc, argv);
-
- LLVMContext &Context = getGlobalContext();
- llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
-
-
- cl::ParseCommandLineOptions(argc, argv, "llvm .bc -> .ll disassembler\n");
-
- std::string ErrorMessage;
- std::auto_ptr<Module> M;
-
- if (MemoryBuffer *Buffer
- = MemoryBuffer::getFileOrSTDIN(InputFilename, &ErrorMessage)) {
- M.reset(ParseBitcodeFile(Buffer, Context, &ErrorMessage));
- delete Buffer;
- }
-
- if (M.get() == 0) {
- errs() << argv[0] << ": ";
- if (ErrorMessage.size())
- errs() << ErrorMessage << "\n";
- else
- errs() << "bitcode didn't read correctly.\n";
- return 1;
- }
-
- // Just use stdout. We won't actually print anything on it.
- if (DontPrint)
- OutputFilename = "-";
-
- if (OutputFilename.empty()) { // Unspecified output, infer it.
- if (InputFilename == "-") {
- OutputFilename = "-";
- } else {
- const std::string &IFN = InputFilename;
- int Len = IFN.length();
- // If the source ends in .bc, strip it off.
- if (IFN[Len-3] == '.' && IFN[Len-2] == 'b' && IFN[Len-1] == 'c')
- OutputFilename = std::string(IFN.begin(), IFN.end()-3)+".ll";
- else
- OutputFilename = IFN+".ll";
- }
- }
-
- // Make sure that the Out file gets unlinked from the disk if we get a
- // SIGINT.
- if (OutputFilename != "-")
- sys::RemoveFileOnSignal(sys::Path(OutputFilename));
-
- std::string ErrorInfo;
- std::auto_ptr<raw_fd_ostream>
- Out(new raw_fd_ostream(OutputFilename.c_str(), ErrorInfo,
- raw_fd_ostream::F_Binary));
- if (!ErrorInfo.empty()) {
- errs() << ErrorInfo << '\n';
- return 1;
- }
-
- // All that llvm-dis does is write the assembly to a file.
- if (!DontPrint)
- *Out << *M;
-
- return 0;
-}
-
diff --git a/libclamav/c++/llvm/tools/llvmc/CMakeLists.txt b/libclamav/c++/llvm/tools/llvmc/CMakeLists.txt
deleted file mode 100644
index bebaaeb..0000000
--- a/libclamav/c++/llvm/tools/llvmc/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-# add_subdirectory(driver)
-
-# TODO: support plugins and user-configured builds.
-# See ./doc/LLVMC-Reference.rst "Customizing LLVMC: the compilation graph"
diff --git a/libclamav/c++/llvm/tools/llvmc/Makefile b/libclamav/c++/llvm/tools/llvmc/Makefile
deleted file mode 100644
index 8f99526..0000000
--- a/libclamav/c++/llvm/tools/llvmc/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- tools/llvmc/Makefile --------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open
-# Source License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-
-export LLVMC_BASED_DRIVER_NAME = llvmc
-export LLVMC_BUILTIN_PLUGINS = Base Clang
-REQUIRES_RTTI = 1
-
-DIRS = plugins driver
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/doc/LLVMC-Reference.rst b/libclamav/c++/llvm/tools/llvmc/doc/LLVMC-Reference.rst
deleted file mode 100644
index ca8500d..0000000
--- a/libclamav/c++/llvm/tools/llvmc/doc/LLVMC-Reference.rst
+++ /dev/null
@@ -1,832 +0,0 @@
-===================================
-Customizing LLVMC: Reference Manual
-===================================
-..
- This file was automatically generated by rst2html.
- Please do not edit directly!
- The ReST source lives in the directory 'tools/llvmc/doc'.
-
-.. contents::
-
-.. raw:: html
-
- <div class="doc_author">
- <p>Written by <a href="mailto:foldr at codedgers.com">Mikhail Glushenkov</a></p>
- </div>
-
-Introduction
-============
-
-LLVMC is a generic compiler driver, designed to be customizable and
-extensible. It plays the same role for LLVM as the ``gcc`` program
-does for GCC - LLVMC's job is essentially to transform a set of input
-files into a set of targets depending on configuration rules and user
-options. What makes LLVMC different is that these transformation rules
-are completely customizable - in fact, LLVMC knows nothing about the
-specifics of transformation (even the command-line options are mostly
-not hard-coded) and regards the transformation structure as an
-abstract graph. The structure of this graph is completely determined
-by plugins, which can be either statically or dynamically linked. This
-makes it possible to easily adapt LLVMC for other purposes - for
-example, as a build tool for game resources.
-
-Because LLVMC employs TableGen_ as its configuration language, you
-need to be familiar with it to customize LLVMC.
-
-.. _TableGen: http://llvm.org/docs/TableGenFundamentals.html
-
-
-Compiling with LLVMC
-====================
-
-LLVMC tries hard to be as compatible with ``gcc`` as possible,
-although there are some small differences. Most of the time, however,
-you shouldn't be able to notice them::
-
- $ # This works as expected:
- $ llvmc -O3 -Wall hello.cpp
- $ ./a.out
- hello
-
-One nice feature of LLVMC is that one doesn't have to distinguish between
-different compilers for different languages (think ``g++`` vs. ``gcc``) - the
-right toolchain is chosen automatically based on input language names (which
-are, in turn, determined from file extensions). If you want to force files
-ending with ".c" to compile as C++, use the ``-x`` option, just like you would
-do it with ``gcc``::
-
- $ # hello.c is really a C++ file
- $ llvmc -x c++ hello.c
- $ ./a.out
- hello
-
-On the other hand, when using LLVMC as a linker to combine several C++
-object files you should provide the ``--linker`` option since it's
-impossible for LLVMC to choose the right linker in that case::
-
- $ llvmc -c hello.cpp
- $ llvmc hello.o
- [A lot of link-time errors skipped]
- $ llvmc --linker=c++ hello.o
- $ ./a.out
- hello
-
-By default, LLVMC uses ``llvm-gcc`` to compile the source code. It is also
-possible to choose the ``clang`` compiler with the ``-clang`` option.
-
-
-Predefined options
-==================
-
-LLVMC has some built-in options that can't be overridden in the
-configuration libraries:
-
-* ``-o FILE`` - Output file name.
-
-* ``-x LANGUAGE`` - Specify the language of the following input files
- until the next -x option.
-
-* ``-load PLUGIN_NAME`` - Load the specified plugin DLL. Example:
- ``-load $LLVM_DIR/Release/lib/LLVMCSimple.so``.
-
-* ``-v`` - Enable verbose mode, i.e. print out all executed commands.
-
-* ``--save-temps`` - Write temporary files to the current directory and do not
- delete them on exit. This option can also take an argument: the
- ``--save-temps=obj`` switch will write files into the directory specified with
- the ``-o`` option. The ``--save-temps=cwd`` and ``--save-temps`` switches are
- both synonyms for the default behaviour.
-
-* ``--temp-dir DIRECTORY`` - Store temporary files in the given directory. This
- directory is deleted on exit unless ``--save-temps`` is specified. If
- ``--save-temps=obj`` is also specified, ``--temp-dir`` is given the
- precedence.
-
-* ``--check-graph`` - Check the compilation for common errors like mismatched
- output/input language names, multiple default edges and cycles. Because of
- plugins, these checks can't be performed at compile-time. Exit with code zero
- if no errors were found, and return the number of found errors
- otherwise. Hidden option, useful for debugging LLVMC plugins.
-
-* ``--view-graph`` - Show a graphical representation of the compilation graph
- and exit. Requires that you have ``dot`` and ``gv`` programs installed. Hidden
- option, useful for debugging LLVMC plugins.
-
-* ``--write-graph`` - Write a ``compilation-graph.dot`` file in the current
- directory with the compilation graph description in Graphviz format (identical
- to the file used by the ``--view-graph`` option). The ``-o`` option can be
- used to set the output file name. Hidden option, useful for debugging LLVMC
- plugins.
-
-* ``--help``, ``--help-hidden``, ``--version`` - These options have
- their standard meaning.
-
-Compiling LLVMC plugins
-=======================
-
-It's easiest to start working on your own LLVMC plugin by copying the
-skeleton project which lives under ``$LLVMC_DIR/plugins/Simple``::
-
- $ cd $LLVMC_DIR/plugins
- $ cp -r Simple MyPlugin
- $ cd MyPlugin
- $ ls
- Makefile PluginMain.cpp Simple.td
-
-As you can see, our basic plugin consists of only two files (not
-counting the build script). ``Simple.td`` contains TableGen
-description of the compilation graph; its format is documented in the
-following sections. ``PluginMain.cpp`` is just a helper file used to
-compile the auto-generated C++ code produced from TableGen source. It
-can also contain hook definitions (see `below`__).
-
-__ hooks_
-
-The first thing that you should do is to change the ``LLVMC_PLUGIN``
-variable in the ``Makefile`` to avoid conflicts (since this variable
-is used to name the resulting library)::
-
- LLVMC_PLUGIN=MyPlugin
-
-It is also a good idea to rename ``Simple.td`` to something less
-generic::
-
- $ mv Simple.td MyPlugin.td
-
-To build your plugin as a dynamic library, just ``cd`` to its source
-directory and run ``make``. The resulting file will be called
-``plugin_llvmc_$(LLVMC_PLUGIN).$(DLL_EXTENSION)`` (in our case,
-``plugin_llvmc_MyPlugin.so``). This library can be then loaded in with the
-``-load`` option. Example::
-
- $ cd $LLVMC_DIR/plugins/Simple
- $ make
- $ llvmc -load $LLVM_DIR/Release/lib/plugin_llvmc_Simple.so
-
-Compiling standalone LLVMC-based drivers
-========================================
-
-By default, the ``llvmc`` executable consists of a driver core plus several
-statically linked plugins (``Base`` and ``Clang`` at the moment). You can
-produce a standalone LLVMC-based driver executable by linking the core with your
-own plugins. The recommended way to do this is by starting with the provided
-``Skeleton`` example (``$LLVMC_DIR/example/Skeleton``)::
-
- $ cd $LLVMC_DIR/example/
- $ cp -r Skeleton mydriver
- $ cd mydriver
- $ vim Makefile
- [...]
- $ make
-
-If you're compiling LLVM with different source and object directories, then you
-must perform the following additional steps before running ``make``::
-
- # LLVMC_SRC_DIR = $LLVM_SRC_DIR/tools/llvmc/
- # LLVMC_OBJ_DIR = $LLVM_OBJ_DIR/tools/llvmc/
- $ cp $LLVMC_SRC_DIR/example/mydriver/Makefile \
- $LLVMC_OBJ_DIR/example/mydriver/
- $ cd $LLVMC_OBJ_DIR/example/mydriver
- $ make
-
-Another way to do the same thing is by using the following command::
-
- $ cd $LLVMC_DIR
- $ make LLVMC_BUILTIN_PLUGINS=MyPlugin LLVMC_BASED_DRIVER_NAME=mydriver
-
-This works with both srcdir == objdir and srcdir != objdir, but assumes that the
-plugin source directory was placed under ``$LLVMC_DIR/plugins``.
-
-Sometimes, you will want a 'bare-bones' version of LLVMC that has no
-built-in plugins. It can be compiled with the following command::
-
- $ cd $LLVMC_DIR
- $ make LLVMC_BUILTIN_PLUGINS=""
-
-
-Customizing LLVMC: the compilation graph
-========================================
-
-Each TableGen configuration file should include the common
-definitions::
-
- include "llvm/CompilerDriver/Common.td"
-
-Internally, LLVMC stores information about possible source
-transformations in form of a graph. Nodes in this graph represent
-tools, and edges between two nodes represent a transformation path. A
-special "root" node is used to mark entry points for the
-transformations. LLVMC also assigns a weight to each edge (more on
-this later) to choose between several alternative edges.
-
-The definition of the compilation graph (see file
-``plugins/Base/Base.td`` for an example) is just a list of edges::
-
- def CompilationGraph : CompilationGraph<[
- Edge<"root", "llvm_gcc_c">,
- Edge<"root", "llvm_gcc_assembler">,
- ...
-
- Edge<"llvm_gcc_c", "llc">,
- Edge<"llvm_gcc_cpp", "llc">,
- ...
-
- OptionalEdge<"llvm_gcc_c", "opt", (case (switch_on "opt"),
- (inc_weight))>,
- OptionalEdge<"llvm_gcc_cpp", "opt", (case (switch_on "opt"),
- (inc_weight))>,
- ...
-
- OptionalEdge<"llvm_gcc_assembler", "llvm_gcc_cpp_linker",
- (case (input_languages_contain "c++"), (inc_weight),
- (or (parameter_equals "linker", "g++"),
- (parameter_equals "linker", "c++")), (inc_weight))>,
- ...
-
- ]>;
-
-As you can see, the edges can be either default or optional, where
-optional edges are differentiated by an additional ``case`` expression
-used to calculate the weight of this edge. Notice also that we refer
-to tools via their names (as strings). This makes it possible to add
-edges to an existing compilation graph in plugins without having to
-know about all tool definitions used in the graph.
-
-The default edges are assigned a weight of 1, and optional edges get a
-weight of 0 + 2*N where N is the number of tests that evaluated to
-true in the ``case`` expression. It is also possible to provide an
-integer parameter to ``inc_weight`` and ``dec_weight`` - in this case,
-the weight is increased (or decreased) by the provided value instead
-of the default 2. It is also possible to change the default weight of
-an optional edge by using the ``default`` clause of the ``case``
-construct.
-
-When passing an input file through the graph, LLVMC picks the edge
-with the maximum weight. To avoid ambiguity, there should be only one
-default edge between two nodes (with the exception of the root node,
-which gets a special treatment - there you are allowed to specify one
-default edge *per language*).
-
-When multiple plugins are loaded, their compilation graphs are merged
-together. Since multiple edges that have the same end nodes are not
-allowed (i.e. the graph is not a multigraph), an edge defined in
-several plugins will be replaced by the definition from the plugin
-that was loaded last. Plugin load order can be controlled by using the
-plugin priority feature described above.
-
-To get a visual representation of the compilation graph (useful for
-debugging), run ``llvmc --view-graph``. You will need ``dot`` and
-``gsview`` installed for this to work properly.
-
-Describing options
-==================
-
-Command-line options that the plugin supports are defined by using an
-``OptionList``::
-
- def Options : OptionList<[
- (switch_option "E", (help "Help string")),
- (alias_option "quiet", "q")
- ...
- ]>;
-
-As you can see, the option list is just a list of DAGs, where each DAG
-is an option description consisting of the option name and some
-properties. A plugin can define more than one option list (they are
-all merged together in the end), which can be handy if one wants to
-separate option groups syntactically.
-
-* Possible option types:
-
- - ``switch_option`` - a simple boolean switch without arguments, for example
- ``-O2`` or ``-time``. At most one occurrence is allowed.
-
- - ``parameter_option`` - option that takes one argument, for example
- ``-std=c99``. It is also allowed to use spaces instead of the equality
- sign: ``-std c99``. At most one occurrence is allowed.
-
- - ``parameter_list_option`` - same as the above, but more than one option
- occurence is allowed.
-
- - ``prefix_option`` - same as the parameter_option, but the option name and
- argument do not have to be separated. Example: ``-ofile``. This can be also
- specified as ``-o file``; however, ``-o=file`` will be parsed incorrectly
- (``=file`` will be interpreted as option value). At most one occurrence is
- allowed.
-
- - ``prefix_list_option`` - same as the above, but more than one occurence of
- the option is allowed; example: ``-lm -lpthread``.
-
- - ``alias_option`` - a special option type for creating aliases. Unlike other
- option types, aliases are not allowed to have any properties besides the
- aliased option name. Usage example: ``(alias_option "preprocess", "E")``
-
-
-* Possible option properties:
-
- - ``help`` - help string associated with this option. Used for ``--help``
- output.
-
- - ``required`` - this option must be specified exactly once (or, in case of
- the list options without the ``multi_val`` property, at least
- once). Incompatible with ``optional`` and ``one_or_more``.
-
- - ``optional`` - the option can be specified either zero times or exactly
- once. The default for switch options. Useful only for list options in
- conjunction with ``multi_val``. Incompatible with ``required``,
- ``zero_or_more`` and ``one_or_more``.
-
- - ``one_or_more`` - the option must be specified at least once. Can be useful
- to allow switch options be both obligatory and be specified multiple
- times. For list options is useful only in conjunction with ``multi_val``;
- for ordinary it is synonymous with ``required``. Incompatible with
- ``required``, ``optional`` and ``zero_or_more``.
-
- - ``zero_or_more`` - the option can be specified zero or more times. Useful
- to allow a single switch option to be specified more than
- once. Incompatible with ``required``, ``optional`` and ``one_or_more``.
-
- - ``hidden`` - the description of this option will not appear in
- the ``--help`` output (but will appear in the ``--help-hidden``
- output).
-
- - ``really_hidden`` - the option will not be mentioned in any help
- output.
-
- - ``comma_separated`` - Indicates that any commas specified for an option's
- value should be used to split the value up into multiple values for the
- option. This property is valid only for list options. In conjunction with
- ``forward_value`` can be used to implement option forwarding in style of
- gcc's ``-Wa,``.
-
- - ``multi_val n`` - this option takes *n* arguments (can be useful in some
- special cases). Usage example: ``(parameter_list_option "foo", (multi_val
- 3))``; the command-line syntax is '-foo a b c'. Only list options can have
- this attribute; you can, however, use the ``one_or_more``, ``optional``
- and ``required`` properties.
-
- - ``init`` - this option has a default value, either a string (if it is a
- parameter), or a boolean (if it is a switch; as in C++, boolean constants
- are called ``true`` and ``false``). List options can't have ``init``
- attribute.
- Usage examples: ``(switch_option "foo", (init true))``; ``(prefix_option
- "bar", (init "baz"))``.
-
- - ``extern`` - this option is defined in some other plugin, see `below`__.
-
- __ extern_
-
-.. _extern:
-
-External options
-----------------
-
-Sometimes, when linking several plugins together, one plugin needs to
-access options defined in some other plugin. Because of the way
-options are implemented, such options must be marked as
-``extern``. This is what the ``extern`` option property is
-for. Example::
-
- ...
- (switch_option "E", (extern))
- ...
-
-If an external option has additional attributes besides 'extern', they are
-ignored. See also the section on plugin `priorities`__.
-
-__ priorities_
-
-.. _case:
-
-Conditional evaluation
-======================
-
-The 'case' construct is the main means by which programmability is
-achieved in LLVMC. It can be used to calculate edge weights, program
-actions and modify the shell commands to be executed. The 'case'
-expression is designed after the similarly-named construct in
-functional languages and takes the form ``(case (test_1), statement_1,
-(test_2), statement_2, ... (test_N), statement_N)``. The statements
-are evaluated only if the corresponding tests evaluate to true.
-
-Examples::
-
- // Edge weight calculation
-
- // Increases edge weight by 5 if "-A" is provided on the
- // command-line, and by 5 more if "-B" is also provided.
- (case
- (switch_on "A"), (inc_weight 5),
- (switch_on "B"), (inc_weight 5))
-
-
- // Tool command line specification
-
- // Evaluates to "cmdline1" if the option "-A" is provided on the
- // command line; to "cmdline2" if "-B" is provided;
- // otherwise to "cmdline3".
-
- (case
- (switch_on "A"), "cmdline1",
- (switch_on "B"), "cmdline2",
- (default), "cmdline3")
-
-Note the slight difference in 'case' expression handling in contexts
-of edge weights and command line specification - in the second example
-the value of the ``"B"`` switch is never checked when switch ``"A"`` is
-enabled, and the whole expression always evaluates to ``"cmdline1"`` in
-that case.
-
-Case expressions can also be nested, i.e. the following is legal::
-
- (case (switch_on "E"), (case (switch_on "o"), ..., (default), ...)
- (default), ...)
-
-You should, however, try to avoid doing that because it hurts
-readability. It is usually better to split tool descriptions and/or
-use TableGen inheritance instead.
-
-* Possible tests are:
-
- - ``switch_on`` - Returns true if a given command-line switch is provided by
- the user. Can be given a list as argument, in that case ``(switch_on ["foo",
- "bar", "baz"])`` is equivalent to ``(and (switch_on "foo"), (switch_on
- "bar"), (switch_on "baz"))``.
- Example: ``(switch_on "opt")``.
-
- - ``any_switch_on`` - Given a list of switch options, returns true if any of
- the switches is turned on.
- Example: ``(any_switch_on ["foo", "bar", "baz"])`` is equivalent to ``(or
- (switch_on "foo"), (switch_on "bar"), (switch_on "baz"))``.
-
- - ``parameter_equals`` - Returns true if a command-line parameter equals
- a given value.
- Example: ``(parameter_equals "W", "all")``.
-
- - ``element_in_list`` - Returns true if a command-line parameter
- list contains a given value.
- Example: ``(element_in_list "l", "pthread")``.
-
- - ``input_languages_contain`` - Returns true if a given language
- belongs to the current input language set.
- Example: ``(input_languages_contain "c++")``.
-
- - ``in_language`` - Evaluates to true if the input file language is equal to
- the argument. At the moment works only with ``cmd_line`` and ``actions`` (on
- non-join nodes).
- Example: ``(in_language "c++")``.
-
- - ``not_empty`` - Returns true if a given option (which should be either a
- parameter or a parameter list) is set by the user. Like ``switch_on``, can
- be also given a list as argument.
- Example: ``(not_empty "o")``.
-
- - ``any_not_empty`` - Returns true if ``not_empty`` returns true for any of
- the options in the list.
- Example: ``(any_not_empty ["foo", "bar", "baz"])`` is equivalent to ``(or
- (not_empty "foo"), (not_empty "bar"), (not_empty "baz"))``.
-
- - ``empty`` - The opposite of ``not_empty``. Equivalent to ``(not (not_empty
- X))``. Provided for convenience. Can be given a list as argument.
-
- - ``any_not_empty`` - Returns true if ``not_empty`` returns true for any of
- the options in the list.
- Example: ``(any_empty ["foo", "bar", "baz"])`` is equivalent to ``(not (and
- (not_empty "foo"), (not_empty "bar"), (not_empty "baz")))``.
-
- - ``single_input_file`` - Returns true if there was only one input file
- provided on the command-line. Used without arguments:
- ``(single_input_file)``.
-
- - ``multiple_input_files`` - Equivalent to ``(not (single_input_file))`` (the
- case of zero input files is considered an error).
-
- - ``default`` - Always evaluates to true. Should always be the last
- test in the ``case`` expression.
-
- - ``and`` - A standard binary logical combinator that returns true iff all of
- its arguments return true. Used like this: ``(and (test1), (test2),
- ... (testN))``. Nesting of ``and`` and ``or`` is allowed, but not
- encouraged.
-
- - ``or`` - A binary logical combinator that returns true iff any of its
- arguments returns true. Example: ``(or (test1), (test2), ... (testN))``.
-
- - ``not`` - Standard unary logical combinator that negates its
- argument. Example: ``(not (or (test1), (test2), ... (testN)))``.
-
-
-
-Writing a tool description
-==========================
-
-As was said earlier, nodes in the compilation graph represent tools,
-which are described separately. A tool definition looks like this
-(taken from the ``include/llvm/CompilerDriver/Tools.td`` file)::
-
- def llvm_gcc_cpp : Tool<[
- (in_language "c++"),
- (out_language "llvm-assembler"),
- (output_suffix "bc"),
- (cmd_line "llvm-g++ -c $INFILE -o $OUTFILE -emit-llvm"),
- (sink)
- ]>;
-
-This defines a new tool called ``llvm_gcc_cpp``, which is an alias for
-``llvm-g++``. As you can see, a tool definition is just a list of
-properties; most of them should be self-explanatory. The ``sink``
-property means that this tool should be passed all command-line
-options that aren't mentioned in the option list.
-
-The complete list of all currently implemented tool properties follows.
-
-* Possible tool properties:
-
- - ``in_language`` - input language name. Can be either a string or a
- list, in case the tool supports multiple input languages.
-
- - ``out_language`` - output language name. Multiple output languages are not
- allowed.
-
- - ``output_suffix`` - output file suffix. Can also be changed
- dynamically, see documentation on actions.
-
- - ``cmd_line`` - the actual command used to run the tool. You can
- use ``$INFILE`` and ``$OUTFILE`` variables, output redirection
- with ``>``, hook invocations (``$CALL``), environment variables
- (via ``$ENV``) and the ``case`` construct.
-
- - ``join`` - this tool is a "join node" in the graph, i.e. it gets a
- list of input files and joins them together. Used for linkers.
-
- - ``sink`` - all command-line options that are not handled by other
- tools are passed to this tool.
-
- - ``actions`` - A single big ``case`` expression that specifies how
- this tool reacts on command-line options (described in more detail
- `below`__).
-
-__ actions_
-
-.. _actions:
-
-Actions
--------
-
-A tool often needs to react to command-line options, and this is
-precisely what the ``actions`` property is for. The next example
-illustrates this feature::
-
- def llvm_gcc_linker : Tool<[
- (in_language "object-code"),
- (out_language "executable"),
- (output_suffix "out"),
- (cmd_line "llvm-gcc $INFILE -o $OUTFILE"),
- (join),
- (actions (case (not_empty "L"), (forward "L"),
- (not_empty "l"), (forward "l"),
- (not_empty "dummy"),
- [(append_cmd "-dummy1"), (append_cmd "-dummy2")])
- ]>;
-
-The ``actions`` tool property is implemented on top of the omnipresent
-``case`` expression. It associates one or more different *actions*
-with given conditions - in the example, the actions are ``forward``,
-which forwards a given option unchanged, and ``append_cmd``, which
-appends a given string to the tool execution command. Multiple actions
-can be associated with a single condition by using a list of actions
-(used in the example to append some dummy options). The same ``case``
-construct can also be used in the ``cmd_line`` property to modify the
-tool command line.
-
-The "join" property used in the example means that this tool behaves
-like a linker.
-
-The list of all possible actions follows.
-
-* Possible actions:
-
- - ``append_cmd`` - Append a string to the tool invocation command.
- Example: ``(case (switch_on "pthread"), (append_cmd "-lpthread"))``.
-
- - ``error`` - Exit with error.
- Example: ``(error "Mixing -c and -S is not allowed!")``.
-
- - ``warning`` - Print a warning.
- Example: ``(warning "Specifying both -O1 and -O2 is meaningless!")``.
-
- - ``forward`` - Forward the option unchanged.
- Example: ``(forward "Wall")``.
-
- - ``forward_as`` - Change the option's name, but forward the argument
- unchanged.
- Example: ``(forward_as "O0", "--disable-optimization")``.
-
- - ``forward_value`` - Forward only option's value. Cannot be used with switch
- options (since they don't have values), but works fine with lists.
- Example: ``(forward_value "Wa,")``.
-
- - ``forward_transformed_value`` - As above, but applies a hook to the
- option's value before forwarding (see `below`__). When
- ``forward_transformed_value`` is applied to a list
- option, the hook must have signature
- ``std::string hooks::HookName (const std::vector<std::string>&)``.
- Example: ``(forward_transformed_value "m", "ConvertToMAttr")``.
-
- __ hooks_
-
- - ``output_suffix`` - Modify the output suffix of this tool.
- Example: ``(output_suffix "i")``.
-
- - ``stop_compilation`` - Stop compilation after this tool processes its
- input. Used without arguments.
- Example: ``(stop_compilation)``.
-
-
-Language map
-============
-
-If you are adding support for a new language to LLVMC, you'll need to
-modify the language map, which defines mappings from file extensions
-to language names. It is used to choose the proper toolchain(s) for a
-given input file set. Language map definition looks like this::
-
- def LanguageMap : LanguageMap<
- [LangToSuffixes<"c++", ["cc", "cp", "cxx", "cpp", "CPP", "c++", "C"]>,
- LangToSuffixes<"c", ["c"]>,
- ...
- ]>;
-
-For example, without those definitions the following command wouldn't work::
-
- $ llvmc hello.cpp
- llvmc: Unknown suffix: cpp
-
-The language map entries are needed only for the tools that are linked from the
-root node. Since a tool can't have multiple output languages, for inner nodes of
-the graph the input and output languages should match. This is enforced at
-compile-time.
-
-Option preprocessor
-===================
-
-It is sometimes useful to run error-checking code before processing the
-compilation graph. For example, if optimization options "-O1" and "-O2" are
-implemented as switches, we might want to output a warning if the user invokes
-the driver with both of these options enabled.
-
-The ``OptionPreprocessor`` feature is reserved specially for these
-occasions. Example (adapted from the built-in Base plugin)::
-
-
- def Preprocess : OptionPreprocessor<
- (case (not (any_switch_on ["O0", "O1", "O2", "O3"])),
- (set_option "O2"),
- (and (switch_on "O3"), (any_switch_on ["O0", "O1", "O2"])),
- (unset_option ["O0", "O1", "O2"]),
- (and (switch_on "O2"), (any_switch_on ["O0", "O1"])),
- (unset_option ["O0", "O1"]),
- (and (switch_on "O1"), (switch_on "O0")),
- (unset_option "O0"))
- >;
-
-Here, ``OptionPreprocessor`` is used to unset all spurious ``-O`` options so
-that they are not forwarded to the compiler. If no optimization options are
-specified, ``-O2`` is enabled.
-
-``OptionPreprocessor`` is basically a single big ``case`` expression, which is
-evaluated only once right after the plugin is loaded. The only allowed actions
-in ``OptionPreprocessor`` are ``error``, ``warning``, and two special actions:
-``unset_option`` and ``set_option``. As their names suggest, they can be used to
-set or unset a given option. To set an option with ``set_option``, use the
-two-argument form: ``(set_option "parameter", VALUE)``. Here, ``VALUE`` can be
-either a string, a string list, or a boolean constant.
-
-For convenience, ``set_option`` and ``unset_option`` also work on lists. That
-is, instead of ``[(unset_option "A"), (unset_option "B")]`` you can use
-``(unset_option ["A", "B"])``. Obviously, ``(set_option ["A", "B"])`` is valid
-only if both ``A`` and ``B`` are switches.
-
-
-More advanced topics
-====================
-
-.. _hooks:
-
-Hooks and environment variables
--------------------------------
-
-Normally, LLVMC executes programs from the system ``PATH``. Sometimes,
-this is not sufficient: for example, we may want to specify tool paths
-or names in the configuration file. This can be easily achieved via
-the hooks mechanism. To write your own hooks, just add their
-definitions to the ``PluginMain.cpp`` or drop a ``.cpp`` file into the
-your plugin directory. Hooks should live in the ``hooks`` namespace
-and have the signature ``std::string hooks::MyHookName ([const char*
-Arg0 [ const char* Arg2 [, ...]]])``. They can be used from the
-``cmd_line`` tool property::
-
- (cmd_line "$CALL(MyHook)/path/to/file -o $CALL(AnotherHook)")
-
-To pass arguments to hooks, use the following syntax::
-
- (cmd_line "$CALL(MyHook, 'Arg1', 'Arg2', 'Arg # 3')/path/to/file -o1 -o2")
-
-It is also possible to use environment variables in the same manner::
-
- (cmd_line "$ENV(VAR1)/path/to/file -o $ENV(VAR2)")
-
-To change the command line string based on user-provided options use
-the ``case`` expression (documented `above`__)::
-
- (cmd_line
- (case
- (switch_on "E"),
- "llvm-g++ -E -x c $INFILE -o $OUTFILE",
- (default),
- "llvm-g++ -c -x c $INFILE -o $OUTFILE -emit-llvm"))
-
-__ case_
-
-.. _priorities:
-
-How plugins are loaded
-----------------------
-
-It is possible for LLVMC plugins to depend on each other. For example,
-one can create edges between nodes defined in some other plugin. To
-make this work, however, that plugin should be loaded first. To
-achieve this, the concept of plugin priority was introduced. By
-default, every plugin has priority zero; to specify the priority
-explicitly, put the following line in your plugin's TableGen file::
-
- def Priority : PluginPriority<$PRIORITY_VALUE>;
- # Where PRIORITY_VALUE is some integer > 0
-
-Plugins are loaded in order of their (increasing) priority, starting
-with 0. Therefore, the plugin with the highest priority value will be
-loaded last.
-
-Debugging
----------
-
-When writing LLVMC plugins, it can be useful to get a visual view of
-the resulting compilation graph. This can be achieved via the command
-line option ``--view-graph``. This command assumes that Graphviz_ and
-Ghostview_ are installed. There is also a ``--write-graph`` option that
-creates a Graphviz source file (``compilation-graph.dot``) in the
-current directory.
-
-Another useful ``llvmc`` option is ``--check-graph``. It checks the
-compilation graph for common errors like mismatched output/input
-language names, multiple default edges and cycles. These checks can't
-be performed at compile-time because the plugins can load code
-dynamically. When invoked with ``--check-graph``, ``llvmc`` doesn't
-perform any compilation tasks and returns the number of encountered
-errors as its status code.
-
-.. _Graphviz: http://www.graphviz.org/
-.. _Ghostview: http://pages.cs.wisc.edu/~ghost/
-
-Conditioning on the executable name
------------------------------------
-
-For now, the executable name (the value passed to the driver in ``argv[0]``) is
-accessible only in the C++ code (i.e. hooks). Use the following code::
-
- namespace llvmc {
- extern const char* ProgramName;
- }
-
- namespace hooks {
-
- std::string MyHook() {
- //...
- if (strcmp(ProgramName, "mydriver") == 0) {
- //...
-
- }
-
- } // end namespace hooks
-
-In general, you're encouraged not to make the behaviour dependent on the
-executable file name, and use command-line switches instead. See for example how
-the ``Base`` plugin behaves when it needs to choose the correct linker options
-(think ``g++`` vs. ``gcc``).
-
-.. raw:: html
-
- <hr />
- <address>
- <a href="http://jigsaw.w3.org/css-validator/check/referer">
- <img src="http://jigsaw.w3.org/css-validator/images/vcss-blue"
- alt="Valid CSS" /></a>
- <a href="http://validator.w3.org/check?uri=referer">
- <img src="http://www.w3.org/Icons/valid-xhtml10-blue"
- alt="Valid XHTML 1.0 Transitional"/></a>
-
- <a href="mailto:foldr at codedgers.com">Mikhail Glushenkov</a><br />
- <a href="http://llvm.org">LLVM Compiler Infrastructure</a><br />
-
- Last modified: $Date: 2008-12-11 11:34:48 -0600 (Thu, 11 Dec 2008) $
- </address>
diff --git a/libclamav/c++/llvm/tools/llvmc/doc/LLVMC-Tutorial.rst b/libclamav/c++/llvm/tools/llvmc/doc/LLVMC-Tutorial.rst
deleted file mode 100644
index e7e8f08..0000000
--- a/libclamav/c++/llvm/tools/llvmc/doc/LLVMC-Tutorial.rst
+++ /dev/null
@@ -1,129 +0,0 @@
-======================
-Tutorial - Using LLVMC
-======================
-..
- This file was automatically generated by rst2html.
- Please do not edit directly!
- The ReST source lives in the directory 'tools/llvmc/doc'.
-
-.. contents::
-
-.. raw:: html
-
- <div class="doc_author">
- <p>Written by <a href="mailto:foldr at codedgers.com">Mikhail Glushenkov</a></p>
- </div>
-
-Introduction
-============
-
-LLVMC is a generic compiler driver, which plays the same role for LLVM
-as the ``gcc`` program does for GCC - the difference being that LLVMC
-is designed to be more adaptable and easier to customize. Most of
-LLVMC functionality is implemented via plugins, which can be loaded
-dynamically or compiled in. This tutorial describes the basic usage
-and configuration of LLVMC.
-
-
-Compiling with LLVMC
-====================
-
-In general, LLVMC tries to be command-line compatible with ``gcc`` as
-much as possible, so most of the familiar options work::
-
- $ llvmc -O3 -Wall hello.cpp
- $ ./a.out
- hello
-
-This will invoke ``llvm-g++`` under the hood (you can see which
-commands are executed by using the ``-v`` option). For further help on
-command-line LLVMC usage, refer to the ``llvmc --help`` output.
-
-
-Using LLVMC to generate toolchain drivers
-=========================================
-
-LLVMC plugins are written mostly using TableGen_, so you need to
-be familiar with it to get anything done.
-
-.. _TableGen: http://llvm.org/docs/TableGenFundamentals.html
-
-Start by compiling ``example/Simple``, which is a primitive wrapper for
-``gcc``::
-
- $ cd $LLVM_DIR/tools/llvmc
- $ cp -r example/Simple plugins/Simple
-
- # NB: A less verbose way to compile standalone LLVMC-based drivers is
- # described in the reference manual.
-
- $ make LLVMC_BASED_DRIVER_NAME=mygcc LLVMC_BUILTIN_PLUGINS=Simple
- $ cat > hello.c
- [...]
- $ mygcc hello.c
- $ ./hello.out
- Hello
-
-Here we link our plugin with the LLVMC core statically to form an executable
-file called ``mygcc``. It is also possible to build our plugin as a dynamic
-library to be loaded by the ``llvmc`` executable (or any other LLVMC-based
-standalone driver); this is described in the reference manual.
-
-Contents of the file ``Simple.td`` look like this::
-
- // Include common definitions
- include "llvm/CompilerDriver/Common.td"
-
- // Tool descriptions
- def gcc : Tool<
- [(in_language "c"),
- (out_language "executable"),
- (output_suffix "out"),
- (cmd_line "gcc $INFILE -o $OUTFILE"),
- (sink)
- ]>;
-
- // Language map
- def LanguageMap : LanguageMap<[LangToSuffixes<"c", ["c"]>]>;
-
- // Compilation graph
- def CompilationGraph : CompilationGraph<[Edge<"root", "gcc">]>;
-
-As you can see, this file consists of three parts: tool descriptions,
-language map, and the compilation graph definition.
-
-At the heart of LLVMC is the idea of a compilation graph: vertices in
-this graph are tools, and edges represent a transformation path
-between two tools (for example, assembly source produced by the
-compiler can be transformed into executable code by an assembler). The
-compilation graph is basically a list of edges; a special node named
-``root`` is used to mark graph entry points.
-
-Tool descriptions are represented as property lists: most properties
-in the example above should be self-explanatory; the ``sink`` property
-means that all options lacking an explicit description should be
-forwarded to this tool.
-
-The ``LanguageMap`` associates a language name with a list of suffixes
-and is used for deciding which toolchain corresponds to a given input
-file.
-
-To learn more about LLVMC customization, refer to the reference
-manual and plugin source code in the ``plugins`` directory.
-
-.. raw:: html
-
- <hr />
- <address>
- <a href="http://jigsaw.w3.org/css-validator/check/referer">
- <img src="http://jigsaw.w3.org/css-validator/images/vcss-blue"
- alt="Valid CSS" /></a>
- <a href="http://validator.w3.org/check?uri=referer">
- <img src="http://www.w3.org/Icons/valid-xhtml10-blue"
- alt="Valid XHTML 1.0 Transitional"/></a>
-
- <a href="mailto:foldr at codedgers.com">Mikhail Glushenkov</a><br />
- <a href="http://llvm.org">LLVM Compiler Infrastructure</a><br />
-
- Last modified: $Date: 2008-12-11 11:34:48 -0600 (Thu, 11 Dec 2008) $
- </address>
diff --git a/libclamav/c++/llvm/tools/llvmc/doc/Makefile b/libclamav/c++/llvm/tools/llvmc/doc/Makefile
deleted file mode 100644
index ef98767..0000000
--- a/libclamav/c++/llvm/tools/llvmc/doc/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
-##===- tools/llvmc/doc/Makefile ----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL=../../..
-
-ifneq (,$(strip $(wildcard $(LEVEL)/Makefile.config)))
-include $(LEVEL)/Makefile.config
-else
-CP=cp
-RM=rm
-endif
-
-DOC_DIR=../../../docs
-RST2HTML=rst2html --stylesheet=llvm.css --link-stylesheet
-
-all : LLVMC-Reference.html LLVMC-Tutorial.html
- $(CP) LLVMC-Reference.html $(DOC_DIR)/CompilerDriver.html
- $(CP) LLVMC-Tutorial.html $(DOC_DIR)/CompilerDriverTutorial.html
-
-LLVMC-Tutorial.html : LLVMC-Tutorial.rst
- $(RST2HTML) $< $@
-
-LLVMC-Reference.html : LLVMC-Reference.rst
- $(RST2HTML) $< $@
-
-clean :
- $(RM) LLVMC-Tutorial.html LLVMC-Reference.html
diff --git a/libclamav/c++/llvm/tools/llvmc/doc/img/lines.gif b/libclamav/c++/llvm/tools/llvmc/doc/img/lines.gif
deleted file mode 100644
index 88f491e..0000000
Binary files a/libclamav/c++/llvm/tools/llvmc/doc/img/lines.gif and /dev/null differ
diff --git a/libclamav/c++/llvm/tools/llvmc/driver/Main.cpp b/libclamav/c++/llvm/tools/llvmc/driver/Main.cpp
deleted file mode 100644
index b1f5b67..0000000
--- a/libclamav/c++/llvm/tools/llvmc/driver/Main.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-//===--- Main.cpp - The LLVM Compiler Driver -------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open
-// Source License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Just include CompilerDriver/Main.inc.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CompilerDriver/Main.inc"
diff --git a/libclamav/c++/llvm/tools/llvmc/driver/Makefile b/libclamav/c++/llvm/tools/llvmc/driver/Makefile
deleted file mode 100644
index 2f3104b..0000000
--- a/libclamav/c++/llvm/tools/llvmc/driver/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-##===- tools/llvmc/driver/Makefile -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open
-# Source License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LLVMC_BASED_DRIVER = $(LLVMC_BASED_DRIVER_NAME)
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Hello/Hello.cpp b/libclamav/c++/llvm/tools/llvmc/example/Hello/Hello.cpp
deleted file mode 100644
index a7179ea..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Hello/Hello.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//===- Hello.cpp - Example code from "Writing an LLVMC Plugin" ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Test plugin for LLVMC. Shows how to write plugins without using TableGen.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CompilerDriver/CompilationGraph.h"
-#include "llvm/CompilerDriver/Plugin.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace {
-struct MyPlugin : public llvmc::BasePlugin {
-
- void PreprocessOptions() const
- {}
-
- void PopulateLanguageMap(llvmc::LanguageMap&) const
- { outs() << "Hello!\n"; }
-
- void PopulateCompilationGraph(llvmc::CompilationGraph&) const
- {}
-};
-
-static llvmc::RegisterPlugin<MyPlugin> RP("Hello", "Hello World plugin");
-
-}
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Hello/Makefile b/libclamav/c++/llvm/tools/llvmc/example/Hello/Makefile
deleted file mode 100644
index 10325e6..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Hello/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-##===- tools/llvmc/plugins/Hello/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-
-LLVMC_PLUGIN = Hello
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Simple/Makefile b/libclamav/c++/llvm/tools/llvmc/example/Simple/Makefile
deleted file mode 100644
index d7adb5d..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Simple/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- tools/llvmc/plugins/Simple/Makefile -----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-
-LLVMC_PLUGIN = Simple
-BUILT_SOURCES = AutoGenerated.inc
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Simple/PluginMain.cpp b/libclamav/c++/llvm/tools/llvmc/example/Simple/PluginMain.cpp
deleted file mode 100644
index add8acb..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Simple/PluginMain.cpp
+++ /dev/null
@@ -1 +0,0 @@
-#include "AutoGenerated.inc"
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Simple/Simple.td b/libclamav/c++/llvm/tools/llvmc/example/Simple/Simple.td
deleted file mode 100644
index 87bc385..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Simple/Simple.td
+++ /dev/null
@@ -1,37 +0,0 @@
-//===- Simple.td - A simple plugin for LLVMC ------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// A simple LLVMC-based gcc wrapper that shows how to write LLVMC plugins.
-//
-// To compile, use this command:
-//
-// $ cd $LLVMC_DIR/example/Simple
-// $ make
-//
-// Run as:
-//
-// $ llvmc -load $LLVM_DIR/Release/lib/plugin_llvmc_Simple.so
-//
-// For instructions on how to build your own LLVMC-based driver, see
-// the 'example/Skeleton' directory.
-//===----------------------------------------------------------------------===//
-
-include "llvm/CompilerDriver/Common.td"
-
-def gcc : Tool<
-[(in_language "c"),
- (out_language "executable"),
- (output_suffix "out"),
- (cmd_line "gcc $INFILE -o $OUTFILE"),
- (sink)
-]>;
-
-def LanguageMap : LanguageMap<[LangToSuffixes<"c", ["c"]>]>;
-
-def CompilationGraph : CompilationGraph<[Edge<"root", "gcc">]>;
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/Makefile b/libclamav/c++/llvm/tools/llvmc/example/Skeleton/Makefile
deleted file mode 100644
index f489abf..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-##===- llvmc/example/Skeleton/Makefile ---------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open
-# Source License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-# Change this so that $(BASE_LEVEL)/Makefile.common refers to
-# $LLVM_DIR/Makefile.common or $YOUR_LLVM_BASED_PROJECT/Makefile.common.
-export LLVMC_BASE_LEVEL = ../../../..
-
-# Change this to the name of your LLVMC-based driver.
-export LLVMC_BASED_DRIVER_NAME = llvmc-skeleton
-
-# List your plugin names here
-export LLVMC_BUILTIN_PLUGINS = # Plugin
-
-LEVEL = $(LLVMC_BASE_LEVEL)
-
-DIRS = plugins driver
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/README b/libclamav/c++/llvm/tools/llvmc/example/Skeleton/README
deleted file mode 100644
index 92216ae..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/README
+++ /dev/null
@@ -1,6 +0,0 @@
-
-This is a template that can be used to create your own LLVMC-based drivers. Just
-copy the `Skeleton` directory to the location of your preference and edit
-`Skeleton/Makefile` and `Skeleton/plugins/Plugin`.
-
-The build system assumes that your project is based on LLVM.
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/driver/Main.cpp b/libclamav/c++/llvm/tools/llvmc/example/Skeleton/driver/Main.cpp
deleted file mode 100644
index b1f5b67..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/driver/Main.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-//===--- Main.cpp - The LLVM Compiler Driver -------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open
-// Source License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Just include CompilerDriver/Main.inc.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/CompilerDriver/Main.inc"
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/driver/Makefile b/libclamav/c++/llvm/tools/llvmc/example/Skeleton/driver/Makefile
deleted file mode 100644
index 93e795b..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/driver/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-##===- llvmc/example/Skeleton/driver/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open
-# Source License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = $(LLVMC_BASE_LEVEL)/..
-LLVMC_BASED_DRIVER = $(LLVMC_BASED_DRIVER_NAME)
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Makefile b/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Makefile
deleted file mode 100644
index fb07f23..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- llvmc/example/Skeleton/plugins/Makefile -------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open
-# Source License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = $(LLVMC_BASE_LEVEL)/..
-
-ifneq ($(LLVMC_BUILTIN_PLUGINS),)
-DIRS = $(LLVMC_BUILTIN_PLUGINS)
-endif
-
-export LLVMC_BUILTIN_PLUGIN=1
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Plugin/Makefile b/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Plugin/Makefile
deleted file mode 100644
index 54f7221..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Plugin/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- llvmc/example/Skeleton/plugins/Plugin/Makefile ------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = $(LLVMC_BASE_LEVEL)/../..
-
-# Change this to the name of your plugin.
-LLVMC_PLUGIN = Plugin
-
-BUILT_SOURCES = AutoGenerated.inc
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Plugin/Plugin.td b/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Plugin/Plugin.td
deleted file mode 100644
index febb9ad..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Plugin/Plugin.td
+++ /dev/null
@@ -1,7 +0,0 @@
-//===- Plugin.td - A skeleton plugin for LLVMC -------------*- tablegen -*-===//
-//
-// Write the code for your plugin here.
-//
-//===----------------------------------------------------------------------===//
-
-include "llvm/CompilerDriver/Common.td"
diff --git a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Plugin/PluginMain.cpp b/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Plugin/PluginMain.cpp
deleted file mode 100644
index add8acb..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/Skeleton/plugins/Plugin/PluginMain.cpp
+++ /dev/null
@@ -1 +0,0 @@
-#include "AutoGenerated.inc"
diff --git a/libclamav/c++/llvm/tools/llvmc/example/mcc16/Makefile b/libclamav/c++/llvm/tools/llvmc/example/mcc16/Makefile
deleted file mode 100644
index e94bca2..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/mcc16/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- llvmc/example/mcc16/Makefile ------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open
-# Source License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-export LLVMC_BASE_LEVEL = ../../../..
-export LLVMC_BASED_DRIVER_NAME = mcc16
-export LLVMC_BUILTIN_PLUGINS = PIC16Base
-
-LEVEL = $(LLVMC_BASE_LEVEL)
-
-DIRS = plugins driver
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/mcc16/README b/libclamav/c++/llvm/tools/llvmc/example/mcc16/README
deleted file mode 100644
index eeef6a4..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/mcc16/README
+++ /dev/null
@@ -1,75 +0,0 @@
-This is a basic compiler driver for the PIC16 toolchain that shows how to create
-your own llvmc-based drivers. It is based on the example/Skeleton template.
-
-The PIC16 toolchain looks like this:
-
-clang-cc (FE) -> llvm-ld (optimizer) -> llc (codegen) -> native-as -> native-ld
-
-Following features were requested by Sanjiv:
-
-From: Sanjiv Gupta <sanjiv.gupta <at> microchip.com>
-Subject: Re: llvmc for PIC16
-Newsgroups: gmane.comp.compilers.llvm.devel
-Date: 2009-06-05 06:51:14 GMT
-
-The salient features that we want to have in the driver are:
-1. llvm-ld will be used as "The Optimizer".
-2. If the user has specified to generate the final executable, then
-llvm-ld should run on all the .bc files generated by clang and create a
-single optimized .bc file for further tools.
-3. -Wo <options> - pass optimizations to the llvm-ld
-4. mcc16 -Wl <options> - pass options to native linker.
-5. mcc16 -Wa <options> - pass options to native assembler.
-
-Here are some example command lines and sample command invocations as to
-what should be done.
-
-$ mcc16 -S foo.c
-// [clang-cc foo.c] -> foo.bc
-// [llvm-ld foo.bc] -> foo.opt.bc
-// [llc foo.opt.bc] -> foo.s
-
-$ mcc16 -S foo.c bar.c
-// [clang-cc foo.c] -> foo.bc
-// [llvm-ld foo.bc] -> foo.opt.bc
-// [llc foo.opt.bc] -> foo.s
-// [clang-cc bar.c] -> bar.bc
-// [llvm-ld bar.bc] -> bar.opt.bc
-// [llc bar.opt.bc] -> bar.s
-
-** Use of -g causes llvm-ld to run with -disable-opt
-$ mcc16 -S -g foo.c
-// [clang-cc foo.c] -> foo.bc
-// [llvm-ld -disable-opt foo.bc] -> foo.opt.bc
-// [llc foo.opt.bc] -> foo.s
-
-** -I is passed to clang-cc, -pre-RA-sched=list-burr to llc.
-$ mcc16 -S -g -I ../include -pre-RA-sched=list-burr foo.c
-// [clang-cc -I ../include foo.c] -> foo.bc
-// [llvm-ld -disable-opt foo.bc] -> foo.opt.bc
-// [llc -pre-RA-sched=list-burr foo.opt.bc] -> foo.s
-
-** -Wo passes options to llvm-ld
-$ mcc16 -Wo=opt1,opt2 -S -I ../include -pre-RA-sched=list-burr foo.c
-// [clang-cc -I ../include foo.c] -> foo.bc
-// [llvm-ld -opt1 -opt2 foo.bc] -> foo.opt.bc
-// [llc -pre-RA-sched=list-burr foo.opt.bc] -> foo.s
-
-** -Wa passes options to native as.
-$ mcc16 -c foo.c -Wa=opt1
-// [clang-cc foo.c] -> foo.bc
-// [llvm-ld foo.bc] -> foo.opt.bc
-// [llc foo.opt.bc] -> foo.s
-// [native-as -opt1 foo.s] -> foo.o
-
-$ mcc16 -Wo=opt1 -Wl=opt2 -Wa=opt3 foo.c bar.c
-// [clang-cc foo.c] -> foo.bc
-// [clang-cc bar.c] -> bar.bc
-// [llvm-ld -opt1 foo.bc bar.bc] -> a.out.bc
-// [llc a.out.bc] -> a.out.s
-// [native-as -opt3 a.out.s] -> a.out.o
-// [native-ld -opt2 a.out.o] -> a.out
-
-Is this achievable by a tablegen based driver ?
-
-- Sanjiv
diff --git a/libclamav/c++/llvm/tools/llvmc/example/mcc16/driver/Main.cpp b/libclamav/c++/llvm/tools/llvmc/example/mcc16/driver/Main.cpp
deleted file mode 100644
index e66e2f9..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/mcc16/driver/Main.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-//===--- Main.cpp - The LLVM Compiler Driver -------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open
-// Source License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Usually this file just includes CompilerDriver/Main.inc, but here we apply
-// some trickery to make the built-in '-save-temps' option hidden and enable
-// '--temp-dir' by default.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Config/config.h"
-#include "llvm/CompilerDriver/BuiltinOptions.h"
-#include "llvm/CompilerDriver/ForceLinkage.h"
-#include "llvm/System/Path.h"
-#include <iostream>
-
-namespace llvmc {
- int Main(int argc, char** argv);
-}
-
-// Modify the PACKAGE_VERSION to use build number in top level configure file.
-void PIC16VersionPrinter(void) {
- std::cout << "MPLAB C16 1.0 " << PACKAGE_VERSION << "\n";
-}
-
-int main(int argc, char** argv) {
-
- // HACK
- SaveTemps.setHiddenFlag(llvm::cl::Hidden);
- TempDirname.setHiddenFlag(llvm::cl::Hidden);
- Languages.setHiddenFlag(llvm::cl::Hidden);
- DryRun.setHiddenFlag(llvm::cl::Hidden);
-
- llvm::cl::SetVersionPrinter(PIC16VersionPrinter);
-
- // Ask for a standard temp dir, but just cache its basename., and delete it.
- llvm::sys::Path tempDir;
- tempDir = llvm::sys::Path::GetTemporaryDirectory();
- TempDirname = tempDir.getBasename();
- tempDir.eraseFromDisk(true);
-
- // We are creating a temp dir in current dir, with the cached name.
- // But before that remove if one already exists with that name..
- tempDir = TempDirname;
- tempDir.eraseFromDisk(true);
-
- llvmc::ForceLinkage();
- return llvmc::Main(argc, argv);
-}
diff --git a/libclamav/c++/llvm/tools/llvmc/example/mcc16/driver/Makefile b/libclamav/c++/llvm/tools/llvmc/example/mcc16/driver/Makefile
deleted file mode 100644
index 670d8bd..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/mcc16/driver/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-##===- llvmc/example/mcc16/driver/Makefile -----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open
-# Source License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = $(LLVMC_BASE_LEVEL)/..
-LLVMC_BASED_DRIVER = $(LLVMC_BASED_DRIVER_NAME)
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/Makefile b/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/Makefile
deleted file mode 100644
index fb07f23..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- llvmc/example/Skeleton/plugins/Makefile -------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open
-# Source License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = $(LLVMC_BASE_LEVEL)/..
-
-ifneq ($(LLVMC_BUILTIN_PLUGINS),)
-DIRS = $(LLVMC_BUILTIN_PLUGINS)
-endif
-
-export LLVMC_BUILTIN_PLUGIN=1
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/PIC16Base/Makefile b/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/PIC16Base/Makefile
deleted file mode 100644
index 5d785fd..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/PIC16Base/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- llvmc/example/Skeleton/plugins/Plugin/Makefile ------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = $(LLVMC_BASE_LEVEL)/../..
-
-# Change this to the name of your plugin.
-LLVMC_PLUGIN = PIC16Base
-
-BUILT_SOURCES = AutoGenerated.inc
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/PIC16Base/PIC16Base.td b/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/PIC16Base/PIC16Base.td
deleted file mode 100644
index 25149ad..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/PIC16Base/PIC16Base.td
+++ /dev/null
@@ -1,234 +0,0 @@
-//===- PIC16Base.td - PIC16 toolchain driver ---------------*- tablegen -*-===//
-//
-// A basic driver for the PIC16 toolchain.
-//
-//===----------------------------------------------------------------------===//
-
-include "llvm/CompilerDriver/Common.td"
-
-// Options
-
-def OptionList : OptionList<[
- (switch_option "g",
- (help "Enable Debugging")),
- (switch_option "E",
- (help "Stop after preprocessing, do not compile")),
- (switch_option "S",
- (help "Stop after compilation, do not assemble")),
- (switch_option "bc",
- (help "Stop after b-code generation, do not compile")),
- (switch_option "c",
- (help "Stop after assemble, do not link")),
- (prefix_option "p",
- (help "Specify part name")),
- (prefix_list_option "I",
- (help "Add a directory to include path")),
- (prefix_list_option "L",
- (help "Add a directory to library path")),
- (prefix_list_option "K",
- (help "Add a directory to linker script search path")),
- (parameter_option "l",
- (help "Specify a library to link")),
- (parameter_option "k",
- (help "Specify a linker script")),
- (parameter_option "m",
- (help "Generate linker map file with the given name")),
- (prefix_list_option "D",
- (help "Define a macro")),
- (switch_option "X",
- (help "Do not invoke mp2hex to create an output hex file.")),
- (switch_option "O0",
- (help "Do not optimize")),
- (switch_option "O1",
- (help "Optimization Level 1.")),
- (switch_option "O2",
- (help "Optimization Level 2.")),
- (switch_option "O3",
- (help "Optimization Level 3.")),
- (switch_option "Od",
- (help "Perform Debug-safe Optimizations only.")),
- (switch_option "w",
- (help "Disable all warnings.")),
-// (switch_option "O1",
-// (help "Optimization level 1")),
-// (switch_option "O2",
-// (help "Optimization level 2. (Default)")),
-// (parameter_option "pre-RA-sched",
-// (help "Example of an option that is passed to llc")),
- (parameter_option "regalloc",
- (help "Register allocator to use (possible values: simple, linearscan, pbqp, local; default=linearscan)")),
- (prefix_list_option "Wa,", (comma_separated),
- (help "Pass options to assembler (Run 'gpasm -help' for assembler options)")),
- (prefix_list_option "Wl,", (comma_separated),
- (help "Pass options to linker (Run 'mplink -help' for linker options)"))
-// (prefix_list_option "Wllc,",
-// (help "Pass options to llc")),
-// (prefix_list_option "Wo,",
-// (help "Pass options to llvm-ld"))
-]>;
-
-// Tools
-class clang_based<string language, string cmd, string ext_E> : Tool<
-[(in_language language),
- (out_language "llvm-bitcode"),
- (output_suffix "bc"),
- (command cmd),
- (actions (case
- (and (multiple_input_files),
- (or (switch_on "S"), (switch_on "c"))),
- (error "cannot specify -o with -c or -S with multiple files"),
- (switch_on "E"), [(forward "E"),
- (stop_compilation), (output_suffix ext_E)],
- (and (switch_on "E"), (empty "o")), (no_out_file),
- (switch_on "bc"),[(stop_compilation), (output_suffix "bc")],
- (switch_on "g"), (append_cmd "-g"),
- (switch_on "w"), (append_cmd "-w"),
- (switch_on "O1"), (append_cmd ""),
- (switch_on "O2"), (append_cmd ""),
- (switch_on "O3"), (append_cmd ""),
- (switch_on "Od"), (append_cmd ""),
- (not_empty "D"), (forward "D"),
- (not_empty "I"), (forward "I"),
- (switch_on "O0"), (append_cmd "-O0"),
- (default), (append_cmd "-O1")))
-// (sink)
-]>;
-
-def clang_cc : clang_based<"c", "$CALL(GetBinDir)clang -cc1 -I $CALL(GetStdHeadersDir) -D $CALL(GetLowerCasePartDefine) -D $CALL(GetUpperCasePartDefine) -triple=pic16- -emit-llvm-bc ", "i">;
-
-//def clang_cc : Tool<[
-// (in_language "c"),
-// (out_language "llvm-bitcode"),
-// (output_suffix "bc"),
-// (cmd_line "$CALL(GetBinDir)clang-cc -I $CALL(GetStdHeadersDir) -triple=pic16- -emit-llvm-bc "),
-// (cmd_line kkkkk
-// (actions (case
-// (switch_on "g"), (append_cmd "g"),
-// (not_empty "I"), (forward "I"))),
-// (sink)
-//]>;
-
-
-// pre-link-and-lto step.
-def llvm_ld : Tool<[
- (in_language "llvm-bitcode"),
- (out_language "llvm-bitcode"),
- (output_suffix "bc"),
- (command "$CALL(GetBinDir)llvm-ld -L $CALL(GetStdLibsDir) -disable-licm-promotion -l std"),
- (out_file_option "-b"),
- (actions (case
- (switch_on "O0"), (append_cmd "-disable-opt"),
- (switch_on "O1"), (append_cmd "-disable-opt"),
-// Whenever O3 is not specified on the command line, default i.e. disable-inlining will always be added.
- (switch_on "O2"), (append_cmd ""),
- (switch_on "O3"), (append_cmd ""),
- (default), (append_cmd "-disable-inlining"))),
- (join)
-]>;
-
-// optimize single file
-def llvm_ld_optimizer : Tool<[
- (in_language "llvm-bitcode"),
- (out_language "llvm-bitcode"),
- (output_suffix "bc"),
-// FIXME: we are still not disabling licm-promotion.
-// -disable-licm-promotion and building stdn library causes c16-71 to fail.
- (command "$CALL(GetBinDir)llvm-ld "),
- (out_file_option "-b"),
- (actions (case
- (switch_on "O0"), (append_cmd "-disable-opt"),
- (switch_on "O1"), (append_cmd "-disable-opt"),
-// Whenever O3 is not specified on the command line, default i.e. disable-inlining will always be added.
- (switch_on "O2"), (append_cmd ""),
- (switch_on "O3"), (append_cmd ""),
- (default), (append_cmd "-disable-inlining")))
-]>;
-
-// optimizer step.
-def pic16passes : Tool<[
- (in_language "llvm-bitcode"),
- (out_language "llvm-bitcode"),
- (output_suffix "obc"),
- (command "$CALL(GetBinDir)opt -pic16cloner -pic16overlay -f"),
- (actions (case
- (switch_on "O0"), (append_cmd "-disable-opt")))
-]>;
-
-def llc : Tool<[
- (in_language "llvm-bitcode"),
- (out_language "assembler"),
- (output_suffix "s"),
- (command "$CALL(GetBinDir)llc -march=pic16 -disable-jump-tables -pre-RA-sched=list-burr -f"),
- (actions (case
- (switch_on "S"), (stop_compilation),
-// (not_empty "Wllc,"), (unpack_values "Wllc,"),
-// (not_empty "pre-RA-sched"), (forward "pre-RA-sched")))
- (not_empty "regalloc"), (forward "regalloc"),
- (empty "regalloc"), (append_cmd "-regalloc=linearscan")))
-]>;
-
-def gpasm : Tool<[
- (in_language "assembler"),
- (out_language "object-code"),
- (output_suffix "o"),
- (command "$CALL(GetBinDir)gpasm -z -r decimal -I $CALL(GetStdAsmHeadersDir) -C -c -w 2"),
- (actions (case
- (switch_on "c"), (stop_compilation),
- (switch_on "g"), (append_cmd "-g"),
- (not_empty "p"), (forward "p"),
- (empty "p"), (append_cmd "-p 16f1xxx"),
- (not_empty "Wa,"), (forward_value "Wa,")))
-]>;
-
-def mplink : Tool<[
- (in_language "object-code"),
- (out_language "executable"),
- (output_suffix "cof"),
- (command "$CALL(GetBinDir)mplink -e -k $CALL(GetStdLinkerScriptsDir) -l $CALL(GetStdLibsDir) intrinsics.lib stdn.lib"),
- (actions (case
- (not_empty "Wl,"), (forward_value "Wl,"),
- (switch_on "X"), (append_cmd "-x"),
- (not_empty "L"), (forward_as "L", "-l"),
- (not_empty "K"), (forward_as "K", "-k"),
- (not_empty "m"), (forward "m"),
- (not_empty "p"), [(forward "p"), (append_cmd "-c")],
- (empty "p"), (append_cmd "-p 16f1xxx -c"),
-// (not_empty "l"), [(unpack_values "l"),(append_cmd ".lib")])),
- (not_empty "k"), (forward "k"),
- (not_empty "l"), (forward "l"))),
- (join)
-]>;
-
-// Language map
-
-def LanguageMap : LanguageMap<[
- LangToSuffixes<"c", ["c"]>,
- LangToSuffixes<"c-cpp-output", ["i"]>,
- LangToSuffixes<"assembler", ["s"]>,
- LangToSuffixes<"assembler-with-cpp", ["S"]>,
- LangToSuffixes<"llvm-assembler", ["ll"]>,
- LangToSuffixes<"llvm-bitcode", ["bc"]>,
- LangToSuffixes<"object-code", ["o"]>,
- LangToSuffixes<"executable", ["cof"]>
-]>;
-
-// Compilation graph
-
-def CompilationGraph : CompilationGraph<[
- Edge<"root", "clang_cc">,
- Edge<"root", "llvm_ld">,
- OptionalEdge<"root", "llvm_ld_optimizer", (case
- (switch_on "S"), (inc_weight),
- (switch_on "c"), (inc_weight))>,
- Edge<"root", "gpasm">,
- Edge<"root", "mplink">,
- Edge<"clang_cc", "llvm_ld">,
- OptionalEdge<"clang_cc", "llvm_ld_optimizer", (case
- (switch_on "S"), (inc_weight),
- (switch_on "c"), (inc_weight))>,
- Edge<"llvm_ld", "pic16passes">,
- Edge<"llvm_ld_optimizer", "pic16passes">,
- Edge<"pic16passes", "llc">,
- Edge<"llc", "gpasm">,
- Edge<"gpasm", "mplink">
-]>;
diff --git a/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/PIC16Base/PluginMain.cpp b/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/PIC16Base/PluginMain.cpp
deleted file mode 100644
index 9b2f9fc..0000000
--- a/libclamav/c++/llvm/tools/llvmc/example/mcc16/plugins/PIC16Base/PluginMain.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-#include "AutoGenerated.inc"
-
-#include "llvm/System/Path.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-namespace llvmc {
- extern char *ProgramName;
-}
-
-
-
-// Returns the platform specific directory separator via #ifdefs.
-// FIXME: This currently work on linux and windows only. It does not
-// work on other unices.
-static std::string GetDirSeparator() {
-#if __linux__ || __APPLE__
- return "/";
-#else
- return "\\";
-#endif
-}
-
-namespace hooks {
-// Get preprocessor define for the part.
-// It is __partname format in lower case.
-std::string
-GetLowerCasePartDefine(void) {
- std::string Partname;
- if (AutoGeneratedParameter_p.empty()) {
- Partname = "16f1xxx";
- } else {
- Partname = AutoGeneratedParameter_p;
- }
-
- std::string LowerCase;
- for (unsigned i = 0; i <= Partname.size(); i++) {
- LowerCase.push_back(std::tolower(Partname[i]));
- }
-
- return "__" + LowerCase;
-}
-
-std::string
-GetUpperCasePartDefine(void) {
- std::string Partname;
- if (AutoGeneratedParameter_p.empty()) {
- Partname = "16f1xxx";
- } else {
- Partname = AutoGeneratedParameter_p;
- }
-
- std::string UpperCase;
- for (unsigned i = 0; i <= Partname.size(); i++) {
- UpperCase.push_back(std::toupper(Partname[i]));
- }
-
- return "__" + UpperCase;
-}
-
-
-// Get the dir where c16 executables reside.
-std::string GetBinDir() {
- // Construct a Path object from the program name.
- void *P = (void*) (intptr_t) GetBinDir;
- sys::Path ProgramFullPath
- = sys::Path::GetMainExecutable(llvmc::ProgramName, P);
-
- // Get the dir name for the program. It's last component should be 'bin'.
- std::string BinDir = ProgramFullPath.getDirname();
-
- // llvm::errs() << "BinDir: " << BinDir << '\n';
- return BinDir + GetDirSeparator();
-}
-
-// Get the Top-level Installation dir for c16.
-std::string GetInstallDir() {
- sys::Path BinDirPath = sys::Path(GetBinDir());
-
- // Go one more level up to get the install dir.
- std::string InstallDir = BinDirPath.getDirname();
-
- return InstallDir + GetDirSeparator();
-}
-
-// Get the dir where the c16 header files reside.
-std::string GetStdHeadersDir() {
- return GetInstallDir() + "include";
-}
-
-// Get the dir where the assembler header files reside.
-std::string GetStdAsmHeadersDir() {
- return GetInstallDir() + "inc";
-}
-
-// Get the dir where the linker scripts reside.
-std::string GetStdLinkerScriptsDir() {
- return GetInstallDir() + "lkr";
-}
-
-// Get the dir where startup code, intrinsics and lib reside.
-std::string GetStdLibsDir() {
- return GetInstallDir() + "lib";
-}
-}
diff --git a/libclamav/c++/llvm/tools/llvmc/plugins/Base/Base.td.in b/libclamav/c++/llvm/tools/llvmc/plugins/Base/Base.td.in
deleted file mode 100644
index ac0f665..0000000
--- a/libclamav/c++/llvm/tools/llvmc/plugins/Base/Base.td.in
+++ /dev/null
@@ -1,367 +0,0 @@
-//===- Base.td - LLVMC toolchain descriptions --------------*- tablegen -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains compilation graph description used by llvmc.
-//
-//===----------------------------------------------------------------------===//
-
-include "llvm/CompilerDriver/Common.td"
-
-// Options
-
-def OptList : OptionList<[
- (switch_option "emit-llvm",
- (help "Emit LLVM .ll files instead of native object files")),
- (switch_option "E",
- (help "Stop after the preprocessing stage, do not run the compiler")),
- (switch_option "fsyntax-only",
- (help "Stop after checking the input for syntax errors")),
- (switch_option "opt",
- (help "Enable opt")),
- (switch_option "O0",
- (help "Turn off optimization"), (zero_or_more)),
- (switch_option "O1",
- (help "Optimization level 1"), (zero_or_more)),
- (switch_option "O2",
- (help "Optimization level 2"), (zero_or_more)),
- (switch_option "O3",
- (help "Optimization level 3"), (zero_or_more)),
- (switch_option "S",
- (help "Stop after compilation, do not assemble")),
- (switch_option "c",
- (help "Compile and assemble, but do not link")),
- (switch_option "pthread",
- (help "Enable threads")),
- (switch_option "m32",
- (help "Generate code for a 32-bit environment"), (hidden)),
- (switch_option "m64",
- (help "Generate code for a 64-bit environment"), (hidden)),
- (switch_option "fPIC",
- (help "Relocation model: PIC"), (hidden)),
- (switch_option "mdynamic-no-pic",
- (help "Relocation model: dynamic-no-pic"), (hidden)),
- (parameter_option "linker",
- (help "Choose linker (possible values: gcc, g++)")),
- (parameter_option "mtune",
- (help "Target a specific CPU type"), (hidden), (forward_not_split)),
-
- // TODO: Add a conditional compilation mechanism to make Darwin-only options
- // like '-arch' really Darwin-only.
-
- (parameter_option "arch",
- (help "Compile for the specified target architecture"), (hidden)),
- (parameter_option "march",
- (help "A synonym for -mtune"), (hidden), (forward_not_split)),
- (parameter_option "mcpu",
- (help "A deprecated synonym for -mtune"), (hidden), (forward_not_split)),
- (switch_option "mfix-and-continue",
- (help "Needed by gdb to load .o files dynamically"), (hidden)),
- (parameter_option "MF",
- (help "Specify a file to write dependencies to"), (hidden)),
- (parameter_list_option "MT",
- (help "Change the name of the rule emitted by dependency generation"),
- (hidden)),
- (parameter_list_option "include",
- (help "Include the named file prior to preprocessing")),
- (parameter_list_option "iquote",
- (help "Search dir only for files requested with #inlcude \"file\""),
- (hidden)),
- (parameter_list_option "framework",
- (help "Specifies a framework to link against")),
- (parameter_list_option "weak_framework",
- (help "Specifies a framework to weakly link against"), (hidden)),
- (parameter_option "filelist", (hidden),
- (help "Link the files listed in file")),
- (prefix_list_option "F",
- (help "Add a directory to framework search path")),
- (prefix_list_option "I",
- (help "Add a directory to include path")),
- (prefix_list_option "D",
- (help "Define a macro")),
- (parameter_list_option "Xpreprocessor", (hidden),
- (help "Pass options to preprocessor")),
- (prefix_list_option "Wa,", (comma_separated),
- (help "Pass options to assembler")),
- (parameter_list_option "Xassembler", (hidden),
- (help "Pass options to assembler")),
- (prefix_list_option "Wllc,", (comma_separated),
- (help "Pass options to llc")),
- (prefix_list_option "L",
- (help "Add a directory to link path")),
- (prefix_list_option "l",
- (help "Search a library when linking")),
- (prefix_list_option "Wl,",
- (help "Pass options to linker")),
- (parameter_list_option "Xlinker", (hidden),
- (help "Pass options to linker")),
- (prefix_list_option "Wo,", (comma_separated),
- (help "Pass options to opt")),
- (prefix_list_option "m",
- (help "Enable or disable various extensions (-mmmx, -msse, etc.)"),
- (hidden)),
- (switch_option "dynamiclib", (hidden),
- (help "Produce a dynamic library")),
- (switch_option "prebind", (hidden),
- (help "Prebind all undefined symbols")),
- (switch_option "dead_strip", (hidden),
- (help "Remove unreachable blocks of code")),
- (switch_option "single_module", (hidden),
- (help "Build the library so it contains only one module")),
- (parameter_option "install_name", (hidden),
- (help "File name the library will be installed in")),
- (parameter_option "compatibility_version", (hidden),
- (help "Compatibility version number")),
- (parameter_option "current_version", (hidden),
- (help "Current version number"))
-]>;
-
-// Option preprocessor.
-
-def Preprocess : OptionPreprocessor<
-(case (not (any_switch_on ["O0", "O1", "O2", "O3"])),
- (set_option "O2"),
- (and (switch_on "O3"), (any_switch_on ["O0", "O1", "O2"])),
- (unset_option ["O0", "O1", "O2"]),
- (and (switch_on "O2"), (any_switch_on ["O0", "O1"])),
- (unset_option ["O0", "O1"]),
- (switch_on ["O1", "O0"]),
- (unset_option "O0"))
->;
-
-// Tools
-
-class llvm_gcc_based <string cmd_prefix, string in_lang,
- string E_ext, string out_lang> : Tool<
-[(in_language in_lang),
- (out_language "llvm-bitcode"),
- (output_suffix out_lang),
- (command cmd_prefix),
- (actions
- (case
- (and (not_empty "o"),
- (multiple_input_files), (or (switch_on "S"), (switch_on "c"))),
- (error "cannot specify -o with -c or -S with multiple files"),
- (switch_on "E"),
- [(forward "E"), (stop_compilation), (output_suffix E_ext)],
- (and (switch_on "E"), (empty "o")), (no_out_file),
- (switch_on ["emit-llvm", "S"]),
- [(output_suffix "ll"), (stop_compilation)],
- (switch_on ["emit-llvm", "c"]), (stop_compilation),
- (switch_on "fsyntax-only"), [(forward "fsyntax-only"),
- (no_out_file), (stop_compilation)],
- (switch_on ["S", "emit-llvm"]), [(forward "S"), (forward "emit-llvm")],
- (not (or (switch_on ["S", "emit-llvm"]), (switch_on "fsyntax-only"))),
- [(append_cmd "-c"), (append_cmd "-emit-llvm")],
-
- // Forwards
- (not_empty "Xpreprocessor"), (forward "Xpreprocessor"),
- (not_empty "include"), (forward "include"),
- (not_empty "iquote"), (forward "iquote"),
- (not_empty "save-temps"), (append_cmd "-save-temps"),
- (not_empty "I"), (forward "I"),
- (not_empty "F"), (forward "F"),
- (not_empty "D"), (forward "D"),
- (not_empty "arch"), (forward "arch"),
- (not_empty "march"), (forward "march"),
- (not_empty "mtune"), (forward "mtune"),
- (not_empty "mcpu"), (forward "mcpu"),
- (not_empty "m"), (forward "m"),
- (switch_on "mfix-and-continue"), (forward "mfix-and-continue"),
- (switch_on "m32"), (forward "m32"),
- (switch_on "m64"), (forward "m64"),
- (switch_on "O0"), (forward "O0"),
- (switch_on "O1"), (forward "O1"),
- (switch_on "O2"), (forward "O2"),
- (switch_on "O3"), (forward "O3"),
- (switch_on "fPIC"), (forward "fPIC"),
- (switch_on "mdynamic-no-pic"), (forward "mdynamic-no-pic"),
- (not_empty "MF"), (forward "MF"),
- (not_empty "MT"), (forward "MT"))),
- (sink)
-]>;
-
-def llvm_gcc_c : llvm_gcc_based<"@LLVMGCCCOMMAND@ -x c", "c", "i", "bc">;
-def llvm_gcc_cpp : llvm_gcc_based<"@LLVMGXXCOMMAND@ -x c++", "c++", "i", "bc">;
-def llvm_gcc_m : llvm_gcc_based<"@LLVMGCCCOMMAND@ -x objective-c",
- "objective-c", "mi", "bc">;
-def llvm_gcc_mxx : llvm_gcc_based<"@LLVMGCCCOMMAND@ -x objective-c++",
- "objective-c++", "mi", "bc">;
-
-def llvm_gcc_c_pch : llvm_gcc_based<"@LLVMGCCCOMMAND@ -x c-header",
- "c-header", "i", "gch">;
-def llvm_gcc_cpp_pch : llvm_gcc_based<"@LLVMGXXCOMMAND@ -x c++-header",
- "c++-header",
- "i", "gch">;
-def llvm_gcc_m_pch : llvm_gcc_based<"@LLVMGCCCOMMAND@ -x objective-c-header",
- "objective-c-header",
- "mi", "gch">;
-def llvm_gcc_mxx_pch
- : llvm_gcc_based<"@LLVMGCCCOMMAND@ -x objective-c++-header",
- "objective-c++-header", "mi", "gch">;
-
-def opt : Tool<
-[(in_language "llvm-bitcode"),
- (out_language "llvm-bitcode"),
- (output_suffix "bc"),
- (actions (case (not_empty "Wo,"), (forward_value "Wo,"),
- (switch_on "O1"), (forward "O1"),
- (switch_on "O2"), (forward "O2"),
- (switch_on "O3"), (forward "O3"))),
- (command "opt -f")
-]>;
-
-def llvm_as : Tool<
-[(in_language "llvm-assembler"),
- (out_language "llvm-bitcode"),
- (output_suffix "bc"),
- (command "llvm-as"),
- (actions (case (switch_on "emit-llvm"), (stop_compilation)))
-]>;
-
-def llvm_gcc_assembler : Tool<
-[(in_language "assembler"),
- (out_language "object-code"),
- (output_suffix "o"),
- (command "@LLVMGCCCOMMAND@ -c -x assembler"),
- (actions (case
- (switch_on "c"), (stop_compilation),
- (not_empty "arch"), (forward "arch"),
- (not_empty "Xassembler"), (forward "Xassembler"),
- (not_empty "Wa,"), (forward "Wa,")))
-]>;
-
-def llc : Tool<
-[(in_language ["llvm-bitcode", "llvm-assembler"]),
- (out_language "assembler"),
- (output_suffix "s"),
- (command "llc -f"),
- (actions (case
- (switch_on "S"), (stop_compilation),
- (switch_on "O0"), (forward "O0"),
- (switch_on "O1"), (forward "O1"),
- (switch_on "O2"), (forward "O2"),
- (switch_on "O3"), (forward "O3"),
- (switch_on "fPIC"), (append_cmd "-relocation-model=pic"),
- (switch_on "mdynamic-no-pic"),
- (append_cmd "-relocation-model=dynamic-no-pic"),
- (not_empty "march"), (forward_as "mtune", "-mcpu"),
- (not_empty "mtune"), (forward_as "mtune", "-mcpu"),
- (not_empty "mcpu"), (forward "mcpu"),
- (not_empty "m"), (forward_transformed_value "m", "ConvertToMAttr"),
- (not_empty "Wllc,"), (forward_value "Wllc,")))
-]>;
-
-// Base class for linkers
-class llvm_gcc_based_linker <string cmd_prefix> : Tool<
-[(in_language "object-code"),
- (out_language "executable"),
- (output_suffix "out"),
- (command cmd_prefix),
- (works_on_empty (case (not_empty "filelist"), true,
- (default), false)),
- (join),
- (actions (case
- (switch_on "pthread"), (append_cmd "-lpthread"),
- (not_empty "L"), (forward "L"),
- (not_empty "F"), (forward "F"),
- (not_empty "arch"), (forward "arch"),
- (not_empty "framework"), (forward "framework"),
- (not_empty "weak_framework"), (forward "weak_framework"),
- (not_empty "filelist"), (forward "filelist"),
- (switch_on "m32"), (forward "m32"),
- (switch_on "m64"), (forward "m64"),
- (not_empty "l"), (forward "l"),
- (not_empty "Xlinker"), (forward "Xlinker"),
- (not_empty "Wl,"), (forward "Wl,"),
- (switch_on "dynamiclib"), (forward "dynamiclib"),
- (switch_on "prebind"), (forward "prebind"),
- (switch_on "dead_strip"), (forward "dead_strip"),
- (switch_on "single_module"), (forward "single_module"),
- (not_empty "compatibility_version"),
- (forward "compatibility_version"),
- (not_empty "current_version"), (forward "current_version"),
- (not_empty "install_name"), (forward "install_name")))
-]>;
-
-// Default linker
-def llvm_gcc_linker : llvm_gcc_based_linker<"@LLVMGCCCOMMAND@">;
-// Alternative linker for C++
-def llvm_gcc_cpp_linker : llvm_gcc_based_linker<"@LLVMGXXCOMMAND@">;
-
-// Language map
-
-def LanguageMap : LanguageMap<
- [LangToSuffixes<"c++", ["cc", "cp", "cxx", "cpp", "CPP", "c++", "C"]>,
- LangToSuffixes<"c++-header", ["hpp"]>,
- LangToSuffixes<"c", ["c"]>,
- LangToSuffixes<"c-header", ["h"]>,
- LangToSuffixes<"c-cpp-output", ["i"]>,
- LangToSuffixes<"objective-c-cpp-output", ["mi"]>,
- LangToSuffixes<"objective-c++", ["mm"]>,
- LangToSuffixes<"objective-c++-header", ["hmm"]>,
- LangToSuffixes<"objective-c", ["m"]>,
- LangToSuffixes<"objective-c-header", ["hm"]>,
- LangToSuffixes<"assembler", ["s"]>,
- LangToSuffixes<"assembler-with-cpp", ["S"]>,
- LangToSuffixes<"llvm-assembler", ["ll"]>,
- LangToSuffixes<"llvm-bitcode", ["bc"]>,
- LangToSuffixes<"object-code", ["o", "*empty*"]>,
- LangToSuffixes<"executable", ["out"]>
- ]>;
-
-// Compilation graph
-
-def CompilationGraph : CompilationGraph<[
- Edge<"root", "llvm_gcc_c">,
- Edge<"root", "llvm_gcc_assembler">,
- Edge<"root", "llvm_gcc_cpp">,
- Edge<"root", "llvm_gcc_m">,
- Edge<"root", "llvm_gcc_mxx">,
- Edge<"root", "llc">,
-
- Edge<"root", "llvm_gcc_c_pch">,
- Edge<"root", "llvm_gcc_cpp_pch">,
- Edge<"root", "llvm_gcc_m_pch">,
- Edge<"root", "llvm_gcc_mxx_pch">,
-
- Edge<"llvm_gcc_c", "llc">,
- Edge<"llvm_gcc_cpp", "llc">,
- Edge<"llvm_gcc_m", "llc">,
- Edge<"llvm_gcc_mxx", "llc">,
- Edge<"llvm_as", "llc">,
-
- OptionalEdge<"root", "llvm_as",
- (case (switch_on "emit-llvm"), (inc_weight))>,
- OptionalEdge<"llvm_gcc_c", "opt", (case (switch_on "opt"), (inc_weight))>,
- OptionalEdge<"llvm_gcc_cpp", "opt", (case (switch_on "opt"), (inc_weight))>,
- OptionalEdge<"llvm_gcc_m", "opt", (case (switch_on "opt"), (inc_weight))>,
- OptionalEdge<"llvm_gcc_mxx", "opt", (case (switch_on "opt"), (inc_weight))>,
- OptionalEdge<"llvm_as", "opt", (case (switch_on "opt"), (inc_weight))>,
- Edge<"opt", "llc">,
-
- Edge<"llc", "llvm_gcc_assembler">,
- Edge<"llvm_gcc_assembler", "llvm_gcc_linker">,
- OptionalEdge<"llvm_gcc_assembler", "llvm_gcc_cpp_linker",
- (case
- (or (input_languages_contain "c++"),
- (input_languages_contain "objective-c++")),
- (inc_weight),
- (or (parameter_equals "linker", "g++"),
- (parameter_equals "linker", "c++")), (inc_weight))>,
-
-
- Edge<"root", "llvm_gcc_linker">,
- OptionalEdge<"root", "llvm_gcc_cpp_linker",
- (case
- (or (input_languages_contain "c++"),
- (input_languages_contain "objective-c++")),
- (inc_weight),
- (or (parameter_equals "linker", "g++"),
- (parameter_equals "linker", "c++")), (inc_weight))>
- ]>;
diff --git a/libclamav/c++/llvm/tools/llvmc/plugins/Base/Hooks.cpp b/libclamav/c++/llvm/tools/llvmc/plugins/Base/Hooks.cpp
deleted file mode 100644
index 661a914..0000000
--- a/libclamav/c++/llvm/tools/llvmc/plugins/Base/Hooks.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-#include <string>
-#include <vector>
-
-namespace hooks {
-typedef std::vector<std::string> StrVec;
-
-/// ConvertToMAttr - Convert -m* and -mno-* to -mattr=+*,-*
-std::string ConvertToMAttr(const StrVec& Opts) {
- std::string out("-mattr=");
-
- bool firstIter = true;
- for (StrVec::const_iterator B = Opts.begin(), E = Opts.end(); B!=E; ++B) {
- const std::string& Arg = *B;
-
- if (firstIter)
- firstIter = false;
- else
- out += ",";
-
- if (Arg.find("no-") == 0 && Arg[3] != 0) {
- out += '-';
- out += Arg.c_str() + 3;
- }
- else {
- out += '+';
- out += Arg;
- }
- }
-
- return out;
-}
-
-}
diff --git a/libclamav/c++/llvm/tools/llvmc/plugins/Base/Makefile b/libclamav/c++/llvm/tools/llvmc/plugins/Base/Makefile
deleted file mode 100644
index ebc4335..0000000
--- a/libclamav/c++/llvm/tools/llvmc/plugins/Base/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- tools/llvmc/plugins/Base/Makefile -------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-
-LLVMC_PLUGIN = Base
-BUILT_SOURCES = AutoGenerated.inc
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/plugins/Base/PluginMain.cpp b/libclamav/c++/llvm/tools/llvmc/plugins/Base/PluginMain.cpp
deleted file mode 100644
index add8acb..0000000
--- a/libclamav/c++/llvm/tools/llvmc/plugins/Base/PluginMain.cpp
+++ /dev/null
@@ -1 +0,0 @@
-#include "AutoGenerated.inc"
diff --git a/libclamav/c++/llvm/tools/llvmc/plugins/Clang/Clang.td b/libclamav/c++/llvm/tools/llvmc/plugins/Clang/Clang.td
deleted file mode 100644
index 988d9b1..0000000
--- a/libclamav/c++/llvm/tools/llvmc/plugins/Clang/Clang.td
+++ /dev/null
@@ -1,101 +0,0 @@
-include "llvm/CompilerDriver/Common.td"
-
-def Priority : PluginPriority<1>;
-
-def Options : OptionList<[
-// Extern options
-(switch_option "E", (extern)),
-(switch_option "S", (extern)),
-(switch_option "c", (extern)),
-(switch_option "fsyntax-only", (extern)),
-(switch_option "emit-llvm", (extern)),
-(switch_option "pthread", (extern)),
-(parameter_list_option "I", (extern)),
-(parameter_list_option "include", (extern)),
-(parameter_list_option "L", (extern)),
-(parameter_list_option "l", (extern)),
-(prefix_list_option "Wa,", (extern)),
-(prefix_list_option "Wl,", (extern)),
-
-(switch_option "clang", (help "Use Clang instead of llvm-gcc"))
-]>;
-
-class clang_based<string language, string cmd, string ext_E> : Tool<
-[(in_language language),
- (out_language "llvm-bitcode"),
- (output_suffix "bc"),
- (command cmd),
- (actions (case (switch_on "E"),
- [(forward "E"), (stop_compilation), (output_suffix ext_E)],
- (and (switch_on "E"), (empty "o")), (no_out_file),
- (switch_on "fsyntax-only"), (stop_compilation),
- (switch_on ["S", "emit-llvm"]),
- [(append_cmd "-emit-llvm"),
- (stop_compilation), (output_suffix "ll")],
- (not (switch_on ["S", "emit-llvm"])),
- (append_cmd "-emit-llvm-bc"),
- (switch_on ["c", "emit-llvm"]),
- (stop_compilation),
- (not_empty "include"), (forward "include"),
- (not_empty "I"), (forward "I"))),
- (sink)
-]>;
-
-def clang_c : clang_based<"c", "clang -x c", "i">;
-def clang_cpp : clang_based<"c++", "clang -x c++", "i">;
-def clang_objective_c : clang_based<"objective-c",
- "clang -x objective-c", "mi">;
-def clang_objective_cpp : clang_based<"objective-c++",
- "clang -x objective-c++", "mi">;
-
-def as : Tool<
-[(in_language "assembler"),
- (out_language "object-code"),
- (output_suffix "o"),
- (command "as"),
- (actions (case (not_empty "Wa,"), (forward_value "Wa,"),
- (switch_on "c"), (stop_compilation)))
-]>;
-
-// Default linker
-def llvm_ld : Tool<
-[(in_language "object-code"),
- (out_language "executable"),
- (output_suffix "out"),
- (command "llvm-ld -native -disable-internalize"),
- (actions (case
- (switch_on "pthread"), (append_cmd "-lpthread"),
- (not_empty "L"), (forward "L"),
- (not_empty "l"), (forward "l"),
- (not_empty "Wl,"), (forward_value "Wl,"))),
- (join)
-]>;
-
-// Language map
-
-def LanguageMap : LanguageMap<[
- LangToSuffixes<"c++", ["cc", "cp", "cxx", "cpp", "CPP", "c++", "C"]>,
- LangToSuffixes<"c", ["c"]>,
- LangToSuffixes<"objective-c", ["m"]>,
- LangToSuffixes<"c-cpp-output", ["i"]>,
- LangToSuffixes<"objective-c-cpp-output", ["mi"]>
-]>;
-
-// Compilation graph
-
-def CompilationGraph : CompilationGraph<[
- OptionalEdge<"root", "clang_c",
- (case (switch_on "clang"), (inc_weight))>,
- OptionalEdge<"root", "clang_cpp",
- (case (switch_on "clang"), (inc_weight))>,
- OptionalEdge<"root", "clang_objective_c",
- (case (switch_on "clang"), (inc_weight))>,
- OptionalEdge<"root", "clang_objective_cpp",
- (case (switch_on "clang"), (inc_weight))>,
- Edge<"clang_c", "llc">,
- Edge<"clang_cpp", "llc">,
- Edge<"clang_objective_c", "llc">,
- Edge<"clang_objective_cpp", "llc">,
- OptionalEdge<"llc", "as", (case (switch_on "clang"), (inc_weight))>,
- Edge<"as", "llvm_ld">
-]>;
diff --git a/libclamav/c++/llvm/tools/llvmc/plugins/Clang/Makefile b/libclamav/c++/llvm/tools/llvmc/plugins/Clang/Makefile
deleted file mode 100644
index 5e5b88a..0000000
--- a/libclamav/c++/llvm/tools/llvmc/plugins/Clang/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- tools/llvmc/plugins/Clang/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-
-LLVMC_PLUGIN = Clang
-BUILT_SOURCES = AutoGenerated.inc
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/tools/llvmc/plugins/Clang/PluginMain.cpp b/libclamav/c++/llvm/tools/llvmc/plugins/Clang/PluginMain.cpp
deleted file mode 100644
index add8acb..0000000
--- a/libclamav/c++/llvm/tools/llvmc/plugins/Clang/PluginMain.cpp
+++ /dev/null
@@ -1 +0,0 @@
-#include "AutoGenerated.inc"
diff --git a/libclamav/c++/llvm/tools/llvmc/plugins/Makefile b/libclamav/c++/llvm/tools/llvmc/plugins/Makefile
deleted file mode 100644
index 37dac6f..0000000
--- a/libclamav/c++/llvm/tools/llvmc/plugins/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- tools/llvmc/plugins/Makefile ------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open
-# Source License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-
-ifneq ($(LLVMC_BUILTIN_PLUGINS),)
-DIRS = $(LLVMC_BUILTIN_PLUGINS)
-endif
-
-export LLVMC_BUILTIN_PLUGIN=1
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/unittests/ADT/APFloatTest.cpp b/libclamav/c++/llvm/unittests/ADT/APFloatTest.cpp
deleted file mode 100644
index 964b04d..0000000
--- a/libclamav/c++/llvm/unittests/ADT/APFloatTest.cpp
+++ /dev/null
@@ -1,579 +0,0 @@
-//===- llvm/unittest/ADT/APFloat.cpp - APFloat unit tests ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include <ostream>
-#include <string>
-#include "llvm/Support/raw_ostream.h"
-#include "gtest/gtest.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-
-using namespace llvm;
-
-static double convertToDoubleFromString(const char *Str) {
- llvm::APFloat F(0.0);
- F.convertFromString(Str, llvm::APFloat::rmNearestTiesToEven);
- return F.convertToDouble();
-}
-
-static std::string convertToString(double d, unsigned Prec, unsigned Pad) {
- llvm::SmallVector<char, 100> Buffer;
- llvm::APFloat F(d);
- F.toString(Buffer, Prec, Pad);
- return std::string(Buffer.data(), Buffer.size());
-}
-
-namespace {
-
-TEST(APFloatTest, Zero) {
- EXPECT_EQ(0.0f, APFloat(APFloat::IEEEsingle, 0.0f).convertToFloat());
- EXPECT_EQ(-0.0f, APFloat(APFloat::IEEEsingle, -0.0f).convertToFloat());
-
- EXPECT_EQ(0.0, APFloat(APFloat::IEEEdouble, 0.0).convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, -0.0).convertToDouble());
-}
-
-TEST(APFloatTest, fromZeroDecimalString) {
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0.").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0.").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0.").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, ".0").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+.0").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-.0").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0.0").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0.0").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0.0").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "00000.").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+00000.").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-00000.").convertToDouble());
-
- EXPECT_EQ(0.0, APFloat(APFloat::IEEEdouble, ".00000").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+.00000").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-.00000").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0000.00000").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0000.00000").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0000.00000").convertToDouble());
-}
-
-TEST(APFloatTest, fromZeroDecimalSingleExponentString) {
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0e1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0e1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0e1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0e+1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0e+1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0e+1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0e-1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0e-1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0e-1").convertToDouble());
-
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0.e1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0.e1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0.e1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0.e+1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0.e+1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0.e+1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0.e-1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0.e-1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0.e-1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, ".0e1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+.0e1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-.0e1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, ".0e+1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+.0e+1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-.0e+1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, ".0e-1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+.0e-1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-.0e-1").convertToDouble());
-
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0.0e1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0.0e1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0.0e1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0.0e+1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0.0e+1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0.0e+1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0.0e-1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0.0e-1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0.0e-1").convertToDouble());
-
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "000.0000e1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+000.0000e+1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-000.0000e+1").convertToDouble());
-}
-
-TEST(APFloatTest, fromZeroDecimalLargeExponentString) {
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0e1234").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0e1234").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0e1234").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0e+1234").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0e+1234").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0e+1234").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0e-1234").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0e-1234").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0e-1234").convertToDouble());
-
- EXPECT_EQ(0.0, APFloat(APFloat::IEEEdouble, "000.0000e1234").convertToDouble());
- EXPECT_EQ(0.0, APFloat(APFloat::IEEEdouble, "000.0000e-1234").convertToDouble());
-
- EXPECT_EQ(0.0, APFloat(APFloat::IEEEdouble, StringRef("0e1234\02", 6)).convertToDouble());
-}
-
-TEST(APFloatTest, fromZeroHexadecimalString) {
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0p1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x0p1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0p1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0p+1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x0p+1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0p+1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0p-1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x0p-1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0p-1").convertToDouble());
-
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0.p1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x0.p1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0.p1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0.p+1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x0.p+1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0.p+1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0.p-1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x0.p-1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0.p-1").convertToDouble());
-
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x.0p1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x.0p1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x.0p1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x.0p+1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x.0p+1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x.0p+1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x.0p-1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x.0p-1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x.0p-1").convertToDouble());
-
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0.0p1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x0.0p1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0.0p1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0.0p+1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x0.0p+1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0.0p+1").convertToDouble());
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0.0p-1").convertToDouble());
- EXPECT_EQ(+0.0, APFloat(APFloat::IEEEdouble, "+0x0.0p-1").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0.0p-1").convertToDouble());
-
-
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x00000.p1").convertToDouble());
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0000.00000p1").convertToDouble());
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x.00000p1").convertToDouble());
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0.p1").convertToDouble());
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0p1234").convertToDouble());
- EXPECT_EQ(-0.0, APFloat(APFloat::IEEEdouble, "-0x0p1234").convertToDouble());
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x00000.p1234").convertToDouble());
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0000.00000p1234").convertToDouble());
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x.00000p1234").convertToDouble());
- EXPECT_EQ( 0.0, APFloat(APFloat::IEEEdouble, "0x0.p1234").convertToDouble());
-}
-
-TEST(APFloatTest, fromDecimalString) {
- EXPECT_EQ(1.0, APFloat(APFloat::IEEEdouble, "1").convertToDouble());
- EXPECT_EQ(2.0, APFloat(APFloat::IEEEdouble, "2.").convertToDouble());
- EXPECT_EQ(0.5, APFloat(APFloat::IEEEdouble, ".5").convertToDouble());
- EXPECT_EQ(1.0, APFloat(APFloat::IEEEdouble, "1.0").convertToDouble());
- EXPECT_EQ(-2.0, APFloat(APFloat::IEEEdouble, "-2").convertToDouble());
- EXPECT_EQ(-4.0, APFloat(APFloat::IEEEdouble, "-4.").convertToDouble());
- EXPECT_EQ(-0.5, APFloat(APFloat::IEEEdouble, "-.5").convertToDouble());
- EXPECT_EQ(-1.5, APFloat(APFloat::IEEEdouble, "-1.5").convertToDouble());
- EXPECT_EQ(1.25e12, APFloat(APFloat::IEEEdouble, "1.25e12").convertToDouble());
- EXPECT_EQ(1.25e+12, APFloat(APFloat::IEEEdouble, "1.25e+12").convertToDouble());
- EXPECT_EQ(1.25e-12, APFloat(APFloat::IEEEdouble, "1.25e-12").convertToDouble());
- EXPECT_EQ(1024.0, APFloat(APFloat::IEEEdouble, "1024.").convertToDouble());
- EXPECT_EQ(1024.05, APFloat(APFloat::IEEEdouble, "1024.05000").convertToDouble());
- EXPECT_EQ(0.05, APFloat(APFloat::IEEEdouble, ".05000").convertToDouble());
- EXPECT_EQ(2.0, APFloat(APFloat::IEEEdouble, "2.").convertToDouble());
- EXPECT_EQ(2.0e2, APFloat(APFloat::IEEEdouble, "2.e2").convertToDouble());
- EXPECT_EQ(2.0e+2, APFloat(APFloat::IEEEdouble, "2.e+2").convertToDouble());
- EXPECT_EQ(2.0e-2, APFloat(APFloat::IEEEdouble, "2.e-2").convertToDouble());
- EXPECT_EQ(2.05e2, APFloat(APFloat::IEEEdouble, "002.05000e2").convertToDouble());
- EXPECT_EQ(2.05e+2, APFloat(APFloat::IEEEdouble, "002.05000e+2").convertToDouble());
- EXPECT_EQ(2.05e-2, APFloat(APFloat::IEEEdouble, "002.05000e-2").convertToDouble());
- EXPECT_EQ(2.05e12, APFloat(APFloat::IEEEdouble, "002.05000e12").convertToDouble());
- EXPECT_EQ(2.05e+12, APFloat(APFloat::IEEEdouble, "002.05000e+12").convertToDouble());
- EXPECT_EQ(2.05e-12, APFloat(APFloat::IEEEdouble, "002.05000e-12").convertToDouble());
-
- // These are "carefully selected" to overflow the fast log-base
- // calculations in APFloat.cpp
- EXPECT_TRUE(APFloat(APFloat::IEEEdouble, "99e99999").isInfinity());
- EXPECT_TRUE(APFloat(APFloat::IEEEdouble, "-99e99999").isInfinity());
- EXPECT_TRUE(APFloat(APFloat::IEEEdouble, "1e-99999").isPosZero());
- EXPECT_TRUE(APFloat(APFloat::IEEEdouble, "-1e-99999").isNegZero());
-}
-
-TEST(APFloatTest, fromHexadecimalString) {
- EXPECT_EQ( 1.0, APFloat(APFloat::IEEEdouble, "0x1p0").convertToDouble());
- EXPECT_EQ(+1.0, APFloat(APFloat::IEEEdouble, "+0x1p0").convertToDouble());
- EXPECT_EQ(-1.0, APFloat(APFloat::IEEEdouble, "-0x1p0").convertToDouble());
-
- EXPECT_EQ( 1.0, APFloat(APFloat::IEEEdouble, "0x1p+0").convertToDouble());
- EXPECT_EQ(+1.0, APFloat(APFloat::IEEEdouble, "+0x1p+0").convertToDouble());
- EXPECT_EQ(-1.0, APFloat(APFloat::IEEEdouble, "-0x1p+0").convertToDouble());
-
- EXPECT_EQ( 1.0, APFloat(APFloat::IEEEdouble, "0x1p-0").convertToDouble());
- EXPECT_EQ(+1.0, APFloat(APFloat::IEEEdouble, "+0x1p-0").convertToDouble());
- EXPECT_EQ(-1.0, APFloat(APFloat::IEEEdouble, "-0x1p-0").convertToDouble());
-
-
- EXPECT_EQ( 2.0, APFloat(APFloat::IEEEdouble, "0x1p1").convertToDouble());
- EXPECT_EQ(+2.0, APFloat(APFloat::IEEEdouble, "+0x1p1").convertToDouble());
- EXPECT_EQ(-2.0, APFloat(APFloat::IEEEdouble, "-0x1p1").convertToDouble());
-
- EXPECT_EQ( 2.0, APFloat(APFloat::IEEEdouble, "0x1p+1").convertToDouble());
- EXPECT_EQ(+2.0, APFloat(APFloat::IEEEdouble, "+0x1p+1").convertToDouble());
- EXPECT_EQ(-2.0, APFloat(APFloat::IEEEdouble, "-0x1p+1").convertToDouble());
-
- EXPECT_EQ( 0.5, APFloat(APFloat::IEEEdouble, "0x1p-1").convertToDouble());
- EXPECT_EQ(+0.5, APFloat(APFloat::IEEEdouble, "+0x1p-1").convertToDouble());
- EXPECT_EQ(-0.5, APFloat(APFloat::IEEEdouble, "-0x1p-1").convertToDouble());
-
-
- EXPECT_EQ( 3.0, APFloat(APFloat::IEEEdouble, "0x1.8p1").convertToDouble());
- EXPECT_EQ(+3.0, APFloat(APFloat::IEEEdouble, "+0x1.8p1").convertToDouble());
- EXPECT_EQ(-3.0, APFloat(APFloat::IEEEdouble, "-0x1.8p1").convertToDouble());
-
- EXPECT_EQ( 3.0, APFloat(APFloat::IEEEdouble, "0x1.8p+1").convertToDouble());
- EXPECT_EQ(+3.0, APFloat(APFloat::IEEEdouble, "+0x1.8p+1").convertToDouble());
- EXPECT_EQ(-3.0, APFloat(APFloat::IEEEdouble, "-0x1.8p+1").convertToDouble());
-
- EXPECT_EQ( 0.75, APFloat(APFloat::IEEEdouble, "0x1.8p-1").convertToDouble());
- EXPECT_EQ(+0.75, APFloat(APFloat::IEEEdouble, "+0x1.8p-1").convertToDouble());
- EXPECT_EQ(-0.75, APFloat(APFloat::IEEEdouble, "-0x1.8p-1").convertToDouble());
-
-
- EXPECT_EQ( 8192.0, APFloat(APFloat::IEEEdouble, "0x1000.000p1").convertToDouble());
- EXPECT_EQ(+8192.0, APFloat(APFloat::IEEEdouble, "+0x1000.000p1").convertToDouble());
- EXPECT_EQ(-8192.0, APFloat(APFloat::IEEEdouble, "-0x1000.000p1").convertToDouble());
-
- EXPECT_EQ( 8192.0, APFloat(APFloat::IEEEdouble, "0x1000.000p+1").convertToDouble());
- EXPECT_EQ(+8192.0, APFloat(APFloat::IEEEdouble, "+0x1000.000p+1").convertToDouble());
- EXPECT_EQ(-8192.0, APFloat(APFloat::IEEEdouble, "-0x1000.000p+1").convertToDouble());
-
- EXPECT_EQ( 2048.0, APFloat(APFloat::IEEEdouble, "0x1000.000p-1").convertToDouble());
- EXPECT_EQ(+2048.0, APFloat(APFloat::IEEEdouble, "+0x1000.000p-1").convertToDouble());
- EXPECT_EQ(-2048.0, APFloat(APFloat::IEEEdouble, "-0x1000.000p-1").convertToDouble());
-
-
- EXPECT_EQ( 8192.0, APFloat(APFloat::IEEEdouble, "0x1000p1").convertToDouble());
- EXPECT_EQ(+8192.0, APFloat(APFloat::IEEEdouble, "+0x1000p1").convertToDouble());
- EXPECT_EQ(-8192.0, APFloat(APFloat::IEEEdouble, "-0x1000p1").convertToDouble());
-
- EXPECT_EQ( 8192.0, APFloat(APFloat::IEEEdouble, "0x1000p+1").convertToDouble());
- EXPECT_EQ(+8192.0, APFloat(APFloat::IEEEdouble, "+0x1000p+1").convertToDouble());
- EXPECT_EQ(-8192.0, APFloat(APFloat::IEEEdouble, "-0x1000p+1").convertToDouble());
-
- EXPECT_EQ( 2048.0, APFloat(APFloat::IEEEdouble, "0x1000p-1").convertToDouble());
- EXPECT_EQ(+2048.0, APFloat(APFloat::IEEEdouble, "+0x1000p-1").convertToDouble());
- EXPECT_EQ(-2048.0, APFloat(APFloat::IEEEdouble, "-0x1000p-1").convertToDouble());
-
-
- EXPECT_EQ( 16384.0, APFloat(APFloat::IEEEdouble, "0x10p10").convertToDouble());
- EXPECT_EQ(+16384.0, APFloat(APFloat::IEEEdouble, "+0x10p10").convertToDouble());
- EXPECT_EQ(-16384.0, APFloat(APFloat::IEEEdouble, "-0x10p10").convertToDouble());
-
- EXPECT_EQ( 16384.0, APFloat(APFloat::IEEEdouble, "0x10p+10").convertToDouble());
- EXPECT_EQ(+16384.0, APFloat(APFloat::IEEEdouble, "+0x10p+10").convertToDouble());
- EXPECT_EQ(-16384.0, APFloat(APFloat::IEEEdouble, "-0x10p+10").convertToDouble());
-
- EXPECT_EQ( 0.015625, APFloat(APFloat::IEEEdouble, "0x10p-10").convertToDouble());
- EXPECT_EQ(+0.015625, APFloat(APFloat::IEEEdouble, "+0x10p-10").convertToDouble());
- EXPECT_EQ(-0.015625, APFloat(APFloat::IEEEdouble, "-0x10p-10").convertToDouble());
-
- EXPECT_EQ(1.0625, APFloat(APFloat::IEEEdouble, "0x1.1p0").convertToDouble());
- EXPECT_EQ(1.0, APFloat(APFloat::IEEEdouble, "0x1p0").convertToDouble());
-
- EXPECT_EQ(2.71828, convertToDoubleFromString("2.71828"));
-}
-
-TEST(APFloatTest, toString) {
- ASSERT_EQ("10", convertToString(10.0, 6, 3));
- ASSERT_EQ("1.0E+1", convertToString(10.0, 6, 0));
- ASSERT_EQ("10100", convertToString(1.01E+4, 5, 2));
- ASSERT_EQ("1.01E+4", convertToString(1.01E+4, 4, 2));
- ASSERT_EQ("1.01E+4", convertToString(1.01E+4, 5, 1));
- ASSERT_EQ("0.0101", convertToString(1.01E-2, 5, 2));
- ASSERT_EQ("0.0101", convertToString(1.01E-2, 4, 2));
- ASSERT_EQ("1.01E-2", convertToString(1.01E-2, 5, 1));
- ASSERT_EQ("0.7853981633974483", convertToString(0.78539816339744830961, 0, 3));
- ASSERT_EQ("4.940656458412465E-324", convertToString(4.9406564584124654e-324, 0, 3));
- ASSERT_EQ("873.1834", convertToString(873.1834, 0, 1));
- ASSERT_EQ("8.731834E+2", convertToString(873.1834, 0, 0));
-}
-
-static APInt nanbits(const fltSemantics &Sem,
- bool SNaN, bool Negative, uint64_t fill) {
- APInt apfill(64, fill);
- if (SNaN)
- return APFloat::getSNaN(Sem, Negative, &apfill).bitcastToAPInt();
- else
- return APFloat::getQNaN(Sem, Negative, &apfill).bitcastToAPInt();
-}
-
-TEST(APFloatTest, makeNaN) {
- ASSERT_EQ(0x7fc00000, nanbits(APFloat::IEEEsingle, false, false, 0));
- ASSERT_EQ(0xffc00000, nanbits(APFloat::IEEEsingle, false, true, 0));
- ASSERT_EQ(0x7fc0ae72, nanbits(APFloat::IEEEsingle, false, false, 0xae72));
- ASSERT_EQ(0x7fffae72, nanbits(APFloat::IEEEsingle, false, false, 0xffffae72));
- ASSERT_EQ(0x7fa00000, nanbits(APFloat::IEEEsingle, true, false, 0));
- ASSERT_EQ(0xffa00000, nanbits(APFloat::IEEEsingle, true, true, 0));
- ASSERT_EQ(0x7f80ae72, nanbits(APFloat::IEEEsingle, true, false, 0xae72));
- ASSERT_EQ(0x7fbfae72, nanbits(APFloat::IEEEsingle, true, false, 0xffffae72));
-
- ASSERT_EQ(0x7ff8000000000000ULL, nanbits(APFloat::IEEEdouble, false, false, 0));
- ASSERT_EQ(0xfff8000000000000ULL, nanbits(APFloat::IEEEdouble, false, true, 0));
- ASSERT_EQ(0x7ff800000000ae72ULL, nanbits(APFloat::IEEEdouble, false, false, 0xae72));
- ASSERT_EQ(0x7fffffffffffae72ULL, nanbits(APFloat::IEEEdouble, false, false, 0xffffffffffffae72ULL));
- ASSERT_EQ(0x7ff4000000000000ULL, nanbits(APFloat::IEEEdouble, true, false, 0));
- ASSERT_EQ(0xfff4000000000000ULL, nanbits(APFloat::IEEEdouble, true, true, 0));
- ASSERT_EQ(0x7ff000000000ae72ULL, nanbits(APFloat::IEEEdouble, true, false, 0xae72));
- ASSERT_EQ(0x7ff7ffffffffae72ULL, nanbits(APFloat::IEEEdouble, true, false, 0xffffffffffffae72ULL));
-}
-
-#ifdef GTEST_HAS_DEATH_TEST
-#ifndef NDEBUG
-TEST(APFloatTest, SemanticsDeath) {
- EXPECT_DEATH(APFloat(APFloat::IEEEsingle, 0.0f).convertToDouble(), "Float semantics are not IEEEdouble");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, 0.0 ).convertToFloat(), "Float semantics are not IEEEsingle");
-}
-
-TEST(APFloatTest, StringDecimalDeath) {
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, ""), "Invalid string length");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+"), "String has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-"), "String has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("\0", 1)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("1\0", 2)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("1\02", 3)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("1\02e1", 5)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("1e\0", 3)), "Invalid character in exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("1e1\0", 4)), "Invalid character in exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("1e1\02", 5)), "Invalid character in exponent");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1.0f"), "Invalid character in significand");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, ".."), "String contains multiple dots");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "..0"), "String contains multiple dots");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1.0.0"), "String contains multiple dots");
-}
-
-TEST(APFloatTest, StringDecimalSignificandDeath) {
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "."), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+."), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-."), "Significand has no digits");
-
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "e"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+e"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-e"), "Significand has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "e1"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+e1"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-e1"), "Significand has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, ".e1"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+.e1"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-.e1"), "Significand has no digits");
-
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, ".e"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+.e"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-.e"), "Significand has no digits");
-}
-
-TEST(APFloatTest, StringDecimalExponentDeath) {
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+1e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-1e"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1.e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+1.e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-1.e"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, ".1e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+.1e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-.1e"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1.1e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+1.1e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-1.1e"), "Exponent has no digits");
-
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1e+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1e-"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, ".1e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, ".1e+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, ".1e-"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1.0e"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1.0e+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "1.0e-"), "Exponent has no digits");
-}
-
-TEST(APFloatTest, StringHexadecimalDeath) {
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x"), "Invalid string");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x"), "Invalid string");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x"), "Invalid string");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x0"), "Hex strings require an exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x0"), "Hex strings require an exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x0"), "Hex strings require an exponent");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x0."), "Hex strings require an exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x0."), "Hex strings require an exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x0."), "Hex strings require an exponent");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x.0"), "Hex strings require an exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x.0"), "Hex strings require an exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x.0"), "Hex strings require an exponent");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x0.0"), "Hex strings require an exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x0.0"), "Hex strings require an exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x0.0"), "Hex strings require an exponent");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("0x\0", 3)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("0x1\0", 4)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("0x1\02", 5)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("0x1\02p1", 7)), "Invalid character in significand");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("0x1p\0", 5)), "Invalid character in exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("0x1p1\0", 6)), "Invalid character in exponent");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, StringRef("0x1p1\02", 7)), "Invalid character in exponent");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1p0f"), "Invalid character in exponent");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x..p1"), "String contains multiple dots");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x..0p1"), "String contains multiple dots");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1.0.0p1"), "String contains multiple dots");
-}
-
-TEST(APFloatTest, StringHexadecimalSignificandDeath) {
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x."), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x."), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x."), "Significand has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0xp"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0xp"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0xp"), "Significand has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0xp+"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0xp+"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0xp+"), "Significand has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0xp-"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0xp-"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0xp-"), "Significand has no digits");
-
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x.p"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x.p"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x.p"), "Significand has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x.p+"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x.p+"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x.p+"), "Significand has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x.p-"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x.p-"), "Significand has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x.p-"), "Significand has no digits");
-}
-
-TEST(APFloatTest, StringHexadecimalExponentDeath) {
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1p"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x1p"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x1p"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1p+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x1p+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x1p+"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1p-"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x1p-"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x1p-"), "Exponent has no digits");
-
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1.p"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x1.p"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x1.p"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1.p+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x1.p+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x1.p+"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1.p-"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x1.p-"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x1.p-"), "Exponent has no digits");
-
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x.1p"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x.1p"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x.1p"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x.1p+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x.1p+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x.1p+"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x.1p-"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x.1p-"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x.1p-"), "Exponent has no digits");
-
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1.1p"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x1.1p"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x1.1p"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1.1p+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x1.1p+"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x1.1p+"), "Exponent has no digits");
-
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "0x1.1p-"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "+0x1.1p-"), "Exponent has no digits");
- EXPECT_DEATH(APFloat(APFloat::IEEEdouble, "-0x1.1p-"), "Exponent has no digits");
-}
-#endif
-#endif
-
-}
diff --git a/libclamav/c++/llvm/unittests/ADT/APIntTest.cpp b/libclamav/c++/llvm/unittests/ADT/APIntTest.cpp
deleted file mode 100644
index d08e86a..0000000
--- a/libclamav/c++/llvm/unittests/ADT/APIntTest.cpp
+++ /dev/null
@@ -1,346 +0,0 @@
-//===- llvm/unittest/ADT/APInt.cpp - APInt unit tests ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include <ostream>
-#include "gtest/gtest.h"
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/SmallString.h"
-
-using namespace llvm;
-
-namespace {
-
-// Test that APInt shift left works when bitwidth > 64 and shiftamt == 0
-TEST(APIntTest, ShiftLeftByZero) {
- APInt One = APInt::getNullValue(65) + 1;
- APInt Shl = One.shl(0);
- EXPECT_EQ(true, Shl[0]);
- EXPECT_EQ(false, Shl[1]);
-}
-
-TEST(APIntTest, i128_NegativeCount) {
- APInt Minus3(128, static_cast<uint64_t>(-3), true);
- EXPECT_EQ(126u, Minus3.countLeadingOnes());
- EXPECT_EQ(-3, Minus3.getSExtValue());
-
- APInt Minus1(128, static_cast<uint64_t>(-1), true);
- EXPECT_EQ(0u, Minus1.countLeadingZeros());
- EXPECT_EQ(128u, Minus1.countLeadingOnes());
- EXPECT_EQ(128u, Minus1.getActiveBits());
- EXPECT_EQ(0u, Minus1.countTrailingZeros());
- EXPECT_EQ(128u, Minus1.countTrailingOnes());
- EXPECT_EQ(128u, Minus1.countPopulation());
- EXPECT_EQ(-1, Minus1.getSExtValue());
-}
-
-TEST(APIntTest, i33_Count) {
- APInt i33minus2(33, static_cast<uint64_t>(-2), true);
- EXPECT_EQ(0u, i33minus2.countLeadingZeros());
- EXPECT_EQ(32u, i33minus2.countLeadingOnes());
- EXPECT_EQ(33u, i33minus2.getActiveBits());
- EXPECT_EQ(1u, i33minus2.countTrailingZeros());
- EXPECT_EQ(32u, i33minus2.countPopulation());
- EXPECT_EQ(-2, i33minus2.getSExtValue());
- EXPECT_EQ(((uint64_t)-2)&((1ull<<33) -1), i33minus2.getZExtValue());
-}
-
-TEST(APIntTest, i65_Count) {
- APInt i65minus(65, 0, true);
- i65minus.set(64);
- EXPECT_EQ(0u, i65minus.countLeadingZeros());
- EXPECT_EQ(1u, i65minus.countLeadingOnes());
- EXPECT_EQ(65u, i65minus.getActiveBits());
- EXPECT_EQ(64u, i65minus.countTrailingZeros());
- EXPECT_EQ(1u, i65minus.countPopulation());
-}
-
-TEST(APIntTest, i128_PositiveCount) {
- APInt u128max = APInt::getAllOnesValue(128);
- EXPECT_EQ(128u, u128max.countLeadingOnes());
- EXPECT_EQ(0u, u128max.countLeadingZeros());
- EXPECT_EQ(128u, u128max.getActiveBits());
- EXPECT_EQ(0u, u128max.countTrailingZeros());
- EXPECT_EQ(128u, u128max.countTrailingOnes());
- EXPECT_EQ(128u, u128max.countPopulation());
-
- APInt u64max(128, static_cast<uint64_t>(-1), false);
- EXPECT_EQ(64u, u64max.countLeadingZeros());
- EXPECT_EQ(0u, u64max.countLeadingOnes());
- EXPECT_EQ(64u, u64max.getActiveBits());
- EXPECT_EQ(0u, u64max.countTrailingZeros());
- EXPECT_EQ(64u, u64max.countTrailingOnes());
- EXPECT_EQ(64u, u64max.countPopulation());
- EXPECT_EQ((uint64_t)~0ull, u64max.getZExtValue());
-
- APInt zero(128, 0, true);
- EXPECT_EQ(128u, zero.countLeadingZeros());
- EXPECT_EQ(0u, zero.countLeadingOnes());
- EXPECT_EQ(0u, zero.getActiveBits());
- EXPECT_EQ(128u, zero.countTrailingZeros());
- EXPECT_EQ(0u, zero.countTrailingOnes());
- EXPECT_EQ(0u, zero.countPopulation());
- EXPECT_EQ(0u, zero.getSExtValue());
- EXPECT_EQ(0u, zero.getZExtValue());
-
- APInt one(128, 1, true);
- EXPECT_EQ(127u, one.countLeadingZeros());
- EXPECT_EQ(0u, one.countLeadingOnes());
- EXPECT_EQ(1u, one.getActiveBits());
- EXPECT_EQ(0u, one.countTrailingZeros());
- EXPECT_EQ(1u, one.countTrailingOnes());
- EXPECT_EQ(1u, one.countPopulation());
- EXPECT_EQ(1, one.getSExtValue());
- EXPECT_EQ(1u, one.getZExtValue());
-}
-
-TEST(APIntTest, i1) {
- const APInt neg_two(1, static_cast<uint64_t>(-2), true);
- const APInt neg_one(1, static_cast<uint64_t>(-1), true);
- const APInt zero(1, 0);
- const APInt one(1, 1);
- const APInt two(1, 2);
-
- EXPECT_EQ(0, neg_two.getSExtValue());
- EXPECT_EQ(-1, neg_one.getSExtValue());
- EXPECT_EQ(1u, neg_one.getZExtValue());
- EXPECT_EQ(0u, zero.getZExtValue());
- EXPECT_EQ(-1, one.getSExtValue());
- EXPECT_EQ(1u, one.getZExtValue());
- EXPECT_EQ(0u, two.getZExtValue());
- EXPECT_EQ(0, two.getSExtValue());
-
- // Basic equalities for 1-bit values.
- EXPECT_EQ(zero, two);
- EXPECT_EQ(zero, neg_two);
- EXPECT_EQ(one, neg_one);
- EXPECT_EQ(two, neg_two);
-
- // Additions.
- EXPECT_EQ(two, one + one);
- EXPECT_EQ(zero, neg_one + one);
- EXPECT_EQ(neg_two, neg_one + neg_one);
-
- // Subtractions.
- EXPECT_EQ(neg_two, neg_one - one);
- EXPECT_EQ(two, one - neg_one);
- EXPECT_EQ(zero, one - one);
-
- // Shifts.
- EXPECT_EQ(zero, one << one);
- EXPECT_EQ(one, one << zero);
- EXPECT_EQ(zero, one.shl(1));
- EXPECT_EQ(one, one.shl(0));
- EXPECT_EQ(zero, one.lshr(1));
- EXPECT_EQ(zero, one.ashr(1));
-
- // Multiplies.
- EXPECT_EQ(neg_one, neg_one * one);
- EXPECT_EQ(neg_one, one * neg_one);
- EXPECT_EQ(one, neg_one * neg_one);
- EXPECT_EQ(one, one * one);
-
- // Divides.
- EXPECT_EQ(neg_one, one.sdiv(neg_one));
- EXPECT_EQ(neg_one, neg_one.sdiv(one));
- EXPECT_EQ(one, neg_one.sdiv(neg_one));
- EXPECT_EQ(one, one.sdiv(one));
-
- EXPECT_EQ(neg_one, one.udiv(neg_one));
- EXPECT_EQ(neg_one, neg_one.udiv(one));
- EXPECT_EQ(one, neg_one.udiv(neg_one));
- EXPECT_EQ(one, one.udiv(one));
-
- // Remainders.
- EXPECT_EQ(zero, neg_one.srem(one));
- EXPECT_EQ(zero, neg_one.urem(one));
- EXPECT_EQ(zero, one.srem(neg_one));
-}
-
-TEST(APIntTest, fromString) {
- EXPECT_EQ(APInt(32, 0), APInt(32, "0", 2));
- EXPECT_EQ(APInt(32, 1), APInt(32, "1", 2));
- EXPECT_EQ(APInt(32, 2), APInt(32, "10", 2));
- EXPECT_EQ(APInt(32, 3), APInt(32, "11", 2));
- EXPECT_EQ(APInt(32, 4), APInt(32, "100", 2));
-
- EXPECT_EQ(APInt(32, 0), APInt(32, "+0", 2));
- EXPECT_EQ(APInt(32, 1), APInt(32, "+1", 2));
- EXPECT_EQ(APInt(32, 2), APInt(32, "+10", 2));
- EXPECT_EQ(APInt(32, 3), APInt(32, "+11", 2));
- EXPECT_EQ(APInt(32, 4), APInt(32, "+100", 2));
-
- EXPECT_EQ(APInt(32, uint64_t(-0LL)), APInt(32, "-0", 2));
- EXPECT_EQ(APInt(32, uint64_t(-1LL)), APInt(32, "-1", 2));
- EXPECT_EQ(APInt(32, uint64_t(-2LL)), APInt(32, "-10", 2));
- EXPECT_EQ(APInt(32, uint64_t(-3LL)), APInt(32, "-11", 2));
- EXPECT_EQ(APInt(32, uint64_t(-4LL)), APInt(32, "-100", 2));
-
-
- EXPECT_EQ(APInt(32, 0), APInt(32, "0", 8));
- EXPECT_EQ(APInt(32, 1), APInt(32, "1", 8));
- EXPECT_EQ(APInt(32, 7), APInt(32, "7", 8));
- EXPECT_EQ(APInt(32, 8), APInt(32, "10", 8));
- EXPECT_EQ(APInt(32, 15), APInt(32, "17", 8));
- EXPECT_EQ(APInt(32, 16), APInt(32, "20", 8));
-
- EXPECT_EQ(APInt(32, +0), APInt(32, "+0", 8));
- EXPECT_EQ(APInt(32, +1), APInt(32, "+1", 8));
- EXPECT_EQ(APInt(32, +7), APInt(32, "+7", 8));
- EXPECT_EQ(APInt(32, +8), APInt(32, "+10", 8));
- EXPECT_EQ(APInt(32, +15), APInt(32, "+17", 8));
- EXPECT_EQ(APInt(32, +16), APInt(32, "+20", 8));
-
- EXPECT_EQ(APInt(32, uint64_t(-0LL)), APInt(32, "-0", 8));
- EXPECT_EQ(APInt(32, uint64_t(-1LL)), APInt(32, "-1", 8));
- EXPECT_EQ(APInt(32, uint64_t(-7LL)), APInt(32, "-7", 8));
- EXPECT_EQ(APInt(32, uint64_t(-8LL)), APInt(32, "-10", 8));
- EXPECT_EQ(APInt(32, uint64_t(-15LL)), APInt(32, "-17", 8));
- EXPECT_EQ(APInt(32, uint64_t(-16LL)), APInt(32, "-20", 8));
-
-
- EXPECT_EQ(APInt(32, 0), APInt(32, "0", 10));
- EXPECT_EQ(APInt(32, 1), APInt(32, "1", 10));
- EXPECT_EQ(APInt(32, 9), APInt(32, "9", 10));
- EXPECT_EQ(APInt(32, 10), APInt(32, "10", 10));
- EXPECT_EQ(APInt(32, 19), APInt(32, "19", 10));
- EXPECT_EQ(APInt(32, 20), APInt(32, "20", 10));
-
- EXPECT_EQ(APInt(32, uint64_t(-0LL)), APInt(32, "-0", 10));
- EXPECT_EQ(APInt(32, uint64_t(-1LL)), APInt(32, "-1", 10));
- EXPECT_EQ(APInt(32, uint64_t(-9LL)), APInt(32, "-9", 10));
- EXPECT_EQ(APInt(32, uint64_t(-10LL)), APInt(32, "-10", 10));
- EXPECT_EQ(APInt(32, uint64_t(-19LL)), APInt(32, "-19", 10));
- EXPECT_EQ(APInt(32, uint64_t(-20LL)), APInt(32, "-20", 10));
-
-
- EXPECT_EQ(APInt(32, 0), APInt(32, "0", 16));
- EXPECT_EQ(APInt(32, 1), APInt(32, "1", 16));
- EXPECT_EQ(APInt(32, 15), APInt(32, "F", 16));
- EXPECT_EQ(APInt(32, 16), APInt(32, "10", 16));
- EXPECT_EQ(APInt(32, 31), APInt(32, "1F", 16));
- EXPECT_EQ(APInt(32, 32), APInt(32, "20", 16));
-
- EXPECT_EQ(APInt(32, uint64_t(-0LL)), APInt(32, "-0", 16));
- EXPECT_EQ(APInt(32, uint64_t(-1LL)), APInt(32, "-1", 16));
- EXPECT_EQ(APInt(32, uint64_t(-15LL)), APInt(32, "-F", 16));
- EXPECT_EQ(APInt(32, uint64_t(-16LL)), APInt(32, "-10", 16));
- EXPECT_EQ(APInt(32, uint64_t(-31LL)), APInt(32, "-1F", 16));
- EXPECT_EQ(APInt(32, uint64_t(-32LL)), APInt(32, "-20", 16));
-}
-
-TEST(APIntTest, StringBitsNeeded2) {
- EXPECT_EQ(1U, APInt::getBitsNeeded( "0", 2));
- EXPECT_EQ(1U, APInt::getBitsNeeded( "1", 2));
- EXPECT_EQ(2U, APInt::getBitsNeeded( "10", 2));
- EXPECT_EQ(2U, APInt::getBitsNeeded( "11", 2));
- EXPECT_EQ(3U, APInt::getBitsNeeded("100", 2));
-
- EXPECT_EQ(1U, APInt::getBitsNeeded( "+0", 2));
- EXPECT_EQ(1U, APInt::getBitsNeeded( "+1", 2));
- EXPECT_EQ(2U, APInt::getBitsNeeded( "+10", 2));
- EXPECT_EQ(2U, APInt::getBitsNeeded( "+11", 2));
- EXPECT_EQ(3U, APInt::getBitsNeeded("+100", 2));
-
- EXPECT_EQ(2U, APInt::getBitsNeeded( "-0", 2));
- EXPECT_EQ(2U, APInt::getBitsNeeded( "-1", 2));
- EXPECT_EQ(3U, APInt::getBitsNeeded( "-10", 2));
- EXPECT_EQ(3U, APInt::getBitsNeeded( "-11", 2));
- EXPECT_EQ(4U, APInt::getBitsNeeded("-100", 2));
-}
-
-TEST(APIntTest, StringBitsNeeded8) {
- EXPECT_EQ(3U, APInt::getBitsNeeded( "0", 8));
- EXPECT_EQ(3U, APInt::getBitsNeeded( "7", 8));
- EXPECT_EQ(6U, APInt::getBitsNeeded("10", 8));
- EXPECT_EQ(6U, APInt::getBitsNeeded("17", 8));
- EXPECT_EQ(6U, APInt::getBitsNeeded("20", 8));
-
- EXPECT_EQ(3U, APInt::getBitsNeeded( "+0", 8));
- EXPECT_EQ(3U, APInt::getBitsNeeded( "+7", 8));
- EXPECT_EQ(6U, APInt::getBitsNeeded("+10", 8));
- EXPECT_EQ(6U, APInt::getBitsNeeded("+17", 8));
- EXPECT_EQ(6U, APInt::getBitsNeeded("+20", 8));
-
- EXPECT_EQ(4U, APInt::getBitsNeeded( "-0", 8));
- EXPECT_EQ(4U, APInt::getBitsNeeded( "-7", 8));
- EXPECT_EQ(7U, APInt::getBitsNeeded("-10", 8));
- EXPECT_EQ(7U, APInt::getBitsNeeded("-17", 8));
- EXPECT_EQ(7U, APInt::getBitsNeeded("-20", 8));
-}
-
-TEST(APIntTest, StringBitsNeeded10) {
- EXPECT_EQ(1U, APInt::getBitsNeeded( "0", 10));
- EXPECT_EQ(2U, APInt::getBitsNeeded( "3", 10));
- EXPECT_EQ(4U, APInt::getBitsNeeded( "9", 10));
- EXPECT_EQ(4U, APInt::getBitsNeeded("10", 10));
- EXPECT_EQ(5U, APInt::getBitsNeeded("19", 10));
- EXPECT_EQ(5U, APInt::getBitsNeeded("20", 10));
-
- EXPECT_EQ(1U, APInt::getBitsNeeded( "+0", 10));
- EXPECT_EQ(4U, APInt::getBitsNeeded( "+9", 10));
- EXPECT_EQ(4U, APInt::getBitsNeeded("+10", 10));
- EXPECT_EQ(5U, APInt::getBitsNeeded("+19", 10));
- EXPECT_EQ(5U, APInt::getBitsNeeded("+20", 10));
-
- EXPECT_EQ(2U, APInt::getBitsNeeded( "-0", 10));
- EXPECT_EQ(5U, APInt::getBitsNeeded( "-9", 10));
- EXPECT_EQ(5U, APInt::getBitsNeeded("-10", 10));
- EXPECT_EQ(6U, APInt::getBitsNeeded("-19", 10));
- EXPECT_EQ(6U, APInt::getBitsNeeded("-20", 10));
-}
-
-TEST(APIntTest, StringBitsNeeded16) {
- EXPECT_EQ(4U, APInt::getBitsNeeded( "0", 16));
- EXPECT_EQ(4U, APInt::getBitsNeeded( "F", 16));
- EXPECT_EQ(8U, APInt::getBitsNeeded("10", 16));
- EXPECT_EQ(8U, APInt::getBitsNeeded("1F", 16));
- EXPECT_EQ(8U, APInt::getBitsNeeded("20", 16));
-
- EXPECT_EQ(4U, APInt::getBitsNeeded( "+0", 16));
- EXPECT_EQ(4U, APInt::getBitsNeeded( "+F", 16));
- EXPECT_EQ(8U, APInt::getBitsNeeded("+10", 16));
- EXPECT_EQ(8U, APInt::getBitsNeeded("+1F", 16));
- EXPECT_EQ(8U, APInt::getBitsNeeded("+20", 16));
-
- EXPECT_EQ(5U, APInt::getBitsNeeded( "-0", 16));
- EXPECT_EQ(5U, APInt::getBitsNeeded( "-F", 16));
- EXPECT_EQ(9U, APInt::getBitsNeeded("-10", 16));
- EXPECT_EQ(9U, APInt::getBitsNeeded("-1F", 16));
- EXPECT_EQ(9U, APInt::getBitsNeeded("-20", 16));
-}
-
-TEST(APIntTest, Log2) {
- EXPECT_EQ(APInt(15, 7).logBase2(), 2U);
- EXPECT_EQ(APInt(15, 7).ceilLogBase2(), 3U);
- EXPECT_EQ(APInt(15, 7).exactLogBase2(), -1);
- EXPECT_EQ(APInt(15, 8).logBase2(), 3U);
- EXPECT_EQ(APInt(15, 8).ceilLogBase2(), 3U);
- EXPECT_EQ(APInt(15, 8).exactLogBase2(), 3);
- EXPECT_EQ(APInt(15, 9).logBase2(), 3U);
- EXPECT_EQ(APInt(15, 9).ceilLogBase2(), 4U);
- EXPECT_EQ(APInt(15, 9).exactLogBase2(), -1);
-}
-
-#ifdef GTEST_HAS_DEATH_TEST
-#ifndef NDEBUG
-TEST(APIntTest, StringDeath) {
- EXPECT_DEATH(APInt(0, "", 0), "Bitwidth too small");
- EXPECT_DEATH(APInt(32, "", 0), "Invalid string length");
- EXPECT_DEATH(APInt(32, "0", 0), "Radix should be 2, 8, 10, or 16!");
- EXPECT_DEATH(APInt(32, "", 10), "Invalid string length");
- EXPECT_DEATH(APInt(32, "-", 10), "String is only a sign, needs a value.");
- EXPECT_DEATH(APInt(1, "1234", 10), "Insufficient bit width");
- EXPECT_DEATH(APInt(32, "\0", 10), "Invalid string length");
- EXPECT_DEATH(APInt(32, StringRef("1\02", 3), 10), "Invalid character in digit string");
- EXPECT_DEATH(APInt(32, "1L", 10), "Invalid character in digit string");
-}
-#endif
-#endif
-
-}
diff --git a/libclamav/c++/llvm/unittests/ADT/BitVectorTest.cpp b/libclamav/c++/llvm/unittests/ADT/BitVectorTest.cpp
deleted file mode 100644
index 4fe11c1..0000000
--- a/libclamav/c++/llvm/unittests/ADT/BitVectorTest.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-//===- llvm/unittest/ADT/BitVectorTest.cpp - BitVector tests --------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef XFAIL
-#include "llvm/ADT/BitVector.h"
-#include "gtest/gtest.h"
-
-using namespace llvm;
-
-namespace {
-
-TEST(BitVectorTest, TrivialOperation) {
- BitVector Vec;
- EXPECT_EQ(0U, Vec.count());
- EXPECT_EQ(0U, Vec.size());
- EXPECT_FALSE(Vec.any());
- EXPECT_TRUE(Vec.none());
- EXPECT_TRUE(Vec.empty());
-
- Vec.resize(5, true);
- EXPECT_EQ(5U, Vec.count());
- EXPECT_EQ(5U, Vec.size());
- EXPECT_TRUE(Vec.any());
- EXPECT_FALSE(Vec.none());
- EXPECT_FALSE(Vec.empty());
-
- Vec.resize(11);
- EXPECT_EQ(5U, Vec.count());
- EXPECT_EQ(11U, Vec.size());
- EXPECT_TRUE(Vec.any());
- EXPECT_FALSE(Vec.none());
- EXPECT_FALSE(Vec.empty());
-
- BitVector Inv = ~Vec;
- EXPECT_EQ(6U, Inv.count());
- EXPECT_EQ(11U, Inv.size());
- EXPECT_TRUE(Inv.any());
- EXPECT_FALSE(Inv.none());
- EXPECT_FALSE(Inv.empty());
-
- EXPECT_FALSE(Inv == Vec);
- EXPECT_TRUE(Inv != Vec);
- Vec = ~Vec;
- EXPECT_TRUE(Inv == Vec);
- EXPECT_FALSE(Inv != Vec);
-
- // Add some "interesting" data to Vec.
- Vec.resize(23, true);
- Vec.resize(25, false);
- Vec.resize(26, true);
- Vec.resize(29, false);
- Vec.resize(33, true);
- Vec.resize(61, false);
- unsigned Count = 0;
- for (unsigned i = Vec.find_first(); i != -1u; i = Vec.find_next(i)) {
- ++Count;
- EXPECT_TRUE(Vec[i]);
- EXPECT_TRUE(Vec.test(i));
- }
- EXPECT_EQ(Count, Vec.count());
- EXPECT_EQ(Count, 23u);
- EXPECT_FALSE(Vec[0]);
- EXPECT_TRUE(Vec[32]);
- EXPECT_FALSE(Vec[60]);
-
- BitVector Copy = Vec;
- BitVector Alt(3, false);
- Alt.resize(6, true);
- std::swap(Alt, Vec);
- EXPECT_TRUE(Copy == Alt);
- EXPECT_TRUE(Vec.size() == 6);
- EXPECT_TRUE(Vec.count() == 3);
- EXPECT_TRUE(Vec.find_first() == 3);
- std::swap(Copy, Vec);
-
- // Add some more "interesting" data.
- Vec.resize(68, true);
- Vec.resize(78, false);
- Vec.resize(89, true);
- Vec.resize(90, false);
- Vec.resize(91, true);
- Vec.resize(130, false);
- Count = 0;
- for (unsigned i = Vec.find_first(); i != -1u; i = Vec.find_next(i)) {
- ++Count;
- EXPECT_TRUE(Vec[i]);
- EXPECT_TRUE(Vec.test(i));
- }
- EXPECT_EQ(Count, Vec.count());
- EXPECT_EQ(Count, 42u);
- EXPECT_FALSE(Vec[0]);
- EXPECT_TRUE(Vec[32]);
- EXPECT_FALSE(Vec[60]);
- EXPECT_FALSE(Vec[129]);
-
- Vec.flip(60);
- EXPECT_TRUE(Vec[60]);
- EXPECT_EQ(Count + 1, Vec.count());
- Vec.flip(60);
- EXPECT_FALSE(Vec[60]);
- EXPECT_EQ(Count, Vec.count());
-
- Vec.reset(32);
- EXPECT_FALSE(Vec[32]);
- EXPECT_EQ(Count - 1, Vec.count());
- Vec.set(32);
- EXPECT_TRUE(Vec[32]);
- EXPECT_EQ(Count, Vec.count());
-
- Vec.flip();
- EXPECT_EQ(Vec.size() - Count, Vec.count());
-
- Vec.reset();
- EXPECT_EQ(0U, Vec.count());
- EXPECT_EQ(130U, Vec.size());
- EXPECT_FALSE(Vec.any());
- EXPECT_TRUE(Vec.none());
- EXPECT_FALSE(Vec.empty());
-
- Inv = ~BitVector();
- EXPECT_EQ(0U, Inv.count());
- EXPECT_EQ(0U, Inv.size());
- EXPECT_FALSE(Inv.any());
- EXPECT_TRUE(Inv.none());
- EXPECT_TRUE(Inv.empty());
-
- Vec.clear();
- EXPECT_EQ(0U, Vec.count());
- EXPECT_EQ(0U, Vec.size());
- EXPECT_FALSE(Vec.any());
- EXPECT_TRUE(Vec.none());
- EXPECT_TRUE(Vec.empty());
-}
-
-TEST(BitVectorTest, CompoundAssignment) {
- BitVector A;
- A.resize(10);
- A.set(4);
- A.set(7);
-
- BitVector B;
- B.resize(50);
- B.set(5);
- B.set(18);
-
- A |= B;
- EXPECT_TRUE(A.test(4));
- EXPECT_TRUE(A.test(5));
- EXPECT_TRUE(A.test(7));
- EXPECT_TRUE(A.test(18));
- EXPECT_EQ(4U, A.count());
- EXPECT_EQ(50U, A.size());
-
- B.resize(10);
- B.set();
- B.reset(2);
- B.reset(7);
- A &= B;
- EXPECT_FALSE(A.test(2));
- EXPECT_FALSE(A.test(7));
- EXPECT_EQ(2U, A.count());
- EXPECT_EQ(50U, A.size());
-
- B.resize(100);
- B.set();
-
- A ^= B;
- EXPECT_TRUE(A.test(2));
- EXPECT_TRUE(A.test(7));
- EXPECT_EQ(98U, A.count());
- EXPECT_EQ(100U, A.size());
-}
-
-}
-
-#endif
diff --git a/libclamav/c++/llvm/unittests/ADT/DeltaAlgorithmTest.cpp b/libclamav/c++/llvm/unittests/ADT/DeltaAlgorithmTest.cpp
deleted file mode 100644
index a1884cd..0000000
--- a/libclamav/c++/llvm/unittests/ADT/DeltaAlgorithmTest.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-//===- llvm/unittest/ADT/DeltaAlgorithmTest.cpp ---------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/DeltaAlgorithm.h"
-#include <algorithm>
-#include <cstdarg>
-using namespace llvm;
-
-namespace std {
-
-std::ostream &operator<<(std::ostream &OS,
- const std::set<unsigned> &S) {
- OS << "{";
- for (std::set<unsigned>::const_iterator it = S.begin(),
- ie = S.end(); it != ie; ++it) {
- if (it != S.begin())
- OS << ",";
- OS << *it;
- }
- OS << "}";
- return OS;
-}
-
-}
-
-namespace {
-
-class FixedDeltaAlgorithm : public DeltaAlgorithm {
- changeset_ty FailingSet;
- unsigned NumTests;
-
-protected:
- virtual bool ExecuteOneTest(const changeset_ty &Changes) {
- ++NumTests;
- return std::includes(Changes.begin(), Changes.end(),
- FailingSet.begin(), FailingSet.end());
- }
-
-public:
- FixedDeltaAlgorithm(const changeset_ty &_FailingSet)
- : FailingSet(_FailingSet),
- NumTests(0) {}
-
- unsigned getNumTests() const { return NumTests; }
-};
-
-std::set<unsigned> fixed_set(unsigned N, ...) {
- std::set<unsigned> S;
- va_list ap;
- va_start(ap, N);
- for (unsigned i = 0; i != N; ++i)
- S.insert(va_arg(ap, unsigned));
- va_end(ap);
- return S;
-}
-
-std::set<unsigned> range(unsigned Start, unsigned End) {
- std::set<unsigned> S;
- while (Start != End)
- S.insert(Start++);
- return S;
-}
-
-std::set<unsigned> range(unsigned N) {
- return range(0, N);
-}
-
-TEST(DeltaAlgorithmTest, Basic) {
- // P = {3,5,7} \in S
- // [0, 20) should minimize to {3,5,7} in a reasonable number of tests.
- std::set<unsigned> Fails = fixed_set(3, 3, 5, 7);
- FixedDeltaAlgorithm FDA(Fails);
- EXPECT_EQ(fixed_set(3, 3, 5, 7), FDA.Run(range(20)));
- EXPECT_GE(33U, FDA.getNumTests());
-
- // P = {3,5,7} \in S
- // [10, 20) should minimize to [10,20)
- EXPECT_EQ(range(10,20), FDA.Run(range(10,20)));
-
- // P = [0,4) \in S
- // [0, 4) should minimize to [0,4) in 11 tests.
- //
- // 11 = |{ {},
- // {0}, {1}, {2}, {3},
- // {1, 2, 3}, {0, 2, 3}, {0, 1, 3}, {0, 1, 2},
- // {0, 1}, {2, 3} }|
- FDA = FixedDeltaAlgorithm(range(10));
- EXPECT_EQ(range(4), FDA.Run(range(4)));
- EXPECT_EQ(11U, FDA.getNumTests());
-}
-
-}
-
diff --git a/libclamav/c++/llvm/unittests/ADT/DenseMapTest.cpp b/libclamav/c++/llvm/unittests/ADT/DenseMapTest.cpp
deleted file mode 100644
index afac651..0000000
--- a/libclamav/c++/llvm/unittests/ADT/DenseMapTest.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-//===- llvm/unittest/ADT/DenseMapMap.cpp - DenseMap unit tests --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/DenseMap.h"
-
-using namespace llvm;
-
-namespace {
-
-// Test fixture
-class DenseMapTest : public testing::Test {
-protected:
- DenseMap<uint32_t, uint32_t> uintMap;
- DenseMap<uint32_t *, uint32_t *> uintPtrMap;
- uint32_t dummyInt;
-};
-
-// Empty map tests
-TEST_F(DenseMapTest, EmptyIntMapTest) {
- // Size tests
- EXPECT_EQ(0u, uintMap.size());
- EXPECT_TRUE(uintMap.empty());
-
- // Iterator tests
- EXPECT_TRUE(uintMap.begin() == uintMap.end());
-
- // Lookup tests
- EXPECT_FALSE(uintMap.count(0u));
- EXPECT_TRUE(uintMap.find(0u) == uintMap.end());
- EXPECT_EQ(0u, uintMap.lookup(0u));
-}
-
-// Empty map tests for pointer map
-TEST_F(DenseMapTest, EmptyPtrMapTest) {
- // Size tests
- EXPECT_EQ(0u, uintPtrMap.size());
- EXPECT_TRUE(uintPtrMap.empty());
-
- // Iterator tests
- EXPECT_TRUE(uintPtrMap.begin() == uintPtrMap.end());
-
- // Lookup tests
- EXPECT_FALSE(uintPtrMap.count(&dummyInt));
- EXPECT_TRUE(uintPtrMap.find(&dummyInt) == uintPtrMap.begin());
- EXPECT_EQ(0, uintPtrMap.lookup(&dummyInt));
-}
-
-// Constant map tests
-TEST_F(DenseMapTest, ConstEmptyMapTest) {
- const DenseMap<uint32_t, uint32_t> & constUintMap = uintMap;
- const DenseMap<uint32_t *, uint32_t *> & constUintPtrMap = uintPtrMap;
- EXPECT_EQ(0u, constUintMap.size());
- EXPECT_EQ(0u, constUintPtrMap.size());
- EXPECT_TRUE(constUintMap.empty());
- EXPECT_TRUE(constUintPtrMap.empty());
- EXPECT_TRUE(constUintMap.begin() == constUintMap.end());
- EXPECT_TRUE(constUintPtrMap.begin() == constUintPtrMap.end());
-}
-
-// A map with a single entry
-TEST_F(DenseMapTest, SingleEntryMapTest) {
- uintMap[0] = 1;
-
- // Size tests
- EXPECT_EQ(1u, uintMap.size());
- EXPECT_FALSE(uintMap.begin() == uintMap.end());
- EXPECT_FALSE(uintMap.empty());
-
- // Iterator tests
- DenseMap<uint32_t, uint32_t>::iterator it = uintMap.begin();
- EXPECT_EQ(0u, it->first);
- EXPECT_EQ(1u, it->second);
- ++it;
- EXPECT_TRUE(it == uintMap.end());
-
- // Lookup tests
- EXPECT_TRUE(uintMap.count(0u));
- EXPECT_TRUE(uintMap.find(0u) == uintMap.begin());
- EXPECT_EQ(1u, uintMap.lookup(0u));
- EXPECT_EQ(1u, uintMap[0]);
-}
-
-// Test clear() method
-TEST_F(DenseMapTest, ClearTest) {
- uintMap[0] = 1;
- uintMap.clear();
-
- EXPECT_EQ(0u, uintMap.size());
- EXPECT_TRUE(uintMap.empty());
- EXPECT_TRUE(uintMap.begin() == uintMap.end());
-}
-
-// Test erase(iterator) method
-TEST_F(DenseMapTest, EraseTest) {
- uintMap[0] = 1;
- uintMap.erase(uintMap.begin());
-
- EXPECT_EQ(0u, uintMap.size());
- EXPECT_TRUE(uintMap.empty());
- EXPECT_TRUE(uintMap.begin() == uintMap.end());
-}
-
-// Test erase(value) method
-TEST_F(DenseMapTest, EraseTest2) {
- uintMap[0] = 1;
- uintMap.erase(0);
-
- EXPECT_EQ(0u, uintMap.size());
- EXPECT_TRUE(uintMap.empty());
- EXPECT_TRUE(uintMap.begin() == uintMap.end());
-}
-
-// Test insert() method
-TEST_F(DenseMapTest, InsertTest) {
- uintMap.insert(std::make_pair(0u, 1u));
- EXPECT_EQ(1u, uintMap.size());
- EXPECT_EQ(1u, uintMap[0]);
-}
-
-// Test copy constructor method
-TEST_F(DenseMapTest, CopyConstructorTest) {
- uintMap[0] = 1;
- DenseMap<uint32_t, uint32_t> copyMap(uintMap);
-
- EXPECT_EQ(1u, copyMap.size());
- EXPECT_EQ(1u, copyMap[0]);
-}
-
-// Test assignment operator method
-TEST_F(DenseMapTest, AssignmentTest) {
- uintMap[0] = 1;
- DenseMap<uint32_t, uint32_t> copyMap = uintMap;
-
- EXPECT_EQ(1u, copyMap.size());
- EXPECT_EQ(1u, copyMap[0]);
-}
-
-// A more complex iteration test
-TEST_F(DenseMapTest, IterationTest) {
- bool visited[100];
-
- // Insert 100 numbers into the map
- for (int i = 0; i < 100; ++i) {
- visited[i] = false;
- uintMap[i] = 3;
- }
-
- // Iterate over all numbers and mark each one found.
- for (DenseMap<uint32_t, uint32_t>::iterator it = uintMap.begin();
- it != uintMap.end(); ++it) {
- visited[it->first] = true;
- }
-
- // Ensure every number was visited.
- for (int i = 0; i < 100; ++i) {
- ASSERT_TRUE(visited[i]) << "Entry #" << i << " was never visited";
- }
-}
-
-// const_iterator test
-TEST_F(DenseMapTest, ConstIteratorTest) {
- // Check conversion from iterator to const_iterator.
- DenseMap<uint32_t, uint32_t>::iterator it = uintMap.begin();
- DenseMap<uint32_t, uint32_t>::const_iterator cit(it);
- EXPECT_TRUE(it == cit);
-
- // Check copying of const_iterators.
- DenseMap<uint32_t, uint32_t>::const_iterator cit2(cit);
- EXPECT_TRUE(cit == cit2);
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ADT/DenseSetTest.cpp b/libclamav/c++/llvm/unittests/ADT/DenseSetTest.cpp
deleted file mode 100644
index 7a35f52..0000000
--- a/libclamav/c++/llvm/unittests/ADT/DenseSetTest.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-//===- llvm/unittest/ADT/DenseSetTest.cpp - DenseSet unit tests --*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include <llvm/ADT/DenseSet.h>
-
-using namespace llvm;
-
-namespace {
-
-// Test fixture
-class DenseSetTest : public testing::Test {
-};
-
-// Test hashing with a set of only two entries.
-TEST_F(DenseSetTest, DoubleEntrySetTest) {
- llvm::DenseSet<unsigned> set(2);
- set.insert(0);
- set.insert(1);
- // Original failure was an infinite loop in this call:
- EXPECT_EQ(0, set.count(2));
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ADT/ImmutableSetTest.cpp b/libclamav/c++/llvm/unittests/ADT/ImmutableSetTest.cpp
deleted file mode 100644
index 1be510d..0000000
--- a/libclamav/c++/llvm/unittests/ADT/ImmutableSetTest.cpp
+++ /dev/null
@@ -1,201 +0,0 @@
-//===----------- ImmutableSetTest.cpp - ImmutableSet unit tests ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/ImmutableSet.h"
-
-using namespace llvm;
-
-namespace {
-class ImmutableSetTest : public testing::Test {
-protected:
- // for callback tests
- static char buffer[10];
-
- struct MyIter {
- int counter;
- char *ptr;
-
- MyIter() : counter(0), ptr(buffer) {
- for (unsigned i=0; i<sizeof(buffer);++i) buffer[i]='\0';
- }
- void operator()(char c) {
- *ptr++ = c;
- ++counter;
- }
- };
-};
-char ImmutableSetTest::buffer[10];
-
-
-TEST_F(ImmutableSetTest, EmptyIntSetTest) {
- ImmutableSet<int>::Factory f;
-
- EXPECT_TRUE(f.GetEmptySet() == f.GetEmptySet());
- EXPECT_FALSE(f.GetEmptySet() != f.GetEmptySet());
- EXPECT_TRUE(f.GetEmptySet().isEmpty());
-
- ImmutableSet<int> S = f.GetEmptySet();
- EXPECT_EQ(0u, S.getHeight());
- EXPECT_TRUE(S.begin() == S.end());
- EXPECT_FALSE(S.begin() != S.end());
-}
-
-
-TEST_F(ImmutableSetTest, OneElemIntSetTest) {
- ImmutableSet<int>::Factory f;
- ImmutableSet<int> S = f.GetEmptySet();
-
- ImmutableSet<int> S2 = f.Add(S, 3);
- EXPECT_TRUE(S.isEmpty());
- EXPECT_FALSE(S2.isEmpty());
- EXPECT_FALSE(S == S2);
- EXPECT_TRUE(S != S2);
- EXPECT_FALSE(S.contains(3));
- EXPECT_TRUE(S2.contains(3));
- EXPECT_FALSE(S2.begin() == S2.end());
- EXPECT_TRUE(S2.begin() != S2.end());
-
- ImmutableSet<int> S3 = f.Add(S, 2);
- EXPECT_TRUE(S.isEmpty());
- EXPECT_FALSE(S3.isEmpty());
- EXPECT_FALSE(S == S3);
- EXPECT_TRUE(S != S3);
- EXPECT_FALSE(S.contains(2));
- EXPECT_TRUE(S3.contains(2));
-
- EXPECT_FALSE(S2 == S3);
- EXPECT_TRUE(S2 != S3);
- EXPECT_FALSE(S2.contains(2));
- EXPECT_FALSE(S3.contains(3));
-}
-
-TEST_F(ImmutableSetTest, MultiElemIntSetTest) {
- ImmutableSet<int>::Factory f;
- ImmutableSet<int> S = f.GetEmptySet();
-
- ImmutableSet<int> S2 = f.Add(f.Add(f.Add(S, 3), 4), 5);
- ImmutableSet<int> S3 = f.Add(f.Add(f.Add(S2, 9), 20), 43);
- ImmutableSet<int> S4 = f.Add(S2, 9);
-
- EXPECT_TRUE(S.isEmpty());
- EXPECT_FALSE(S2.isEmpty());
- EXPECT_FALSE(S3.isEmpty());
- EXPECT_FALSE(S4.isEmpty());
-
- EXPECT_FALSE(S.contains(3));
- EXPECT_FALSE(S.contains(9));
-
- EXPECT_TRUE(S2.contains(3));
- EXPECT_TRUE(S2.contains(4));
- EXPECT_TRUE(S2.contains(5));
- EXPECT_FALSE(S2.contains(9));
- EXPECT_FALSE(S2.contains(0));
-
- EXPECT_TRUE(S3.contains(43));
- EXPECT_TRUE(S3.contains(20));
- EXPECT_TRUE(S3.contains(9));
- EXPECT_TRUE(S3.contains(3));
- EXPECT_TRUE(S3.contains(4));
- EXPECT_TRUE(S3.contains(5));
- EXPECT_FALSE(S3.contains(0));
-
- EXPECT_TRUE(S4.contains(9));
- EXPECT_TRUE(S4.contains(3));
- EXPECT_TRUE(S4.contains(4));
- EXPECT_TRUE(S4.contains(5));
- EXPECT_FALSE(S4.contains(20));
- EXPECT_FALSE(S4.contains(43));
-}
-
-TEST_F(ImmutableSetTest, RemoveIntSetTest) {
- ImmutableSet<int>::Factory f;
- ImmutableSet<int> S = f.GetEmptySet();
-
- ImmutableSet<int> S2 = f.Add(f.Add(S, 4), 5);
- ImmutableSet<int> S3 = f.Add(S2, 3);
- ImmutableSet<int> S4 = f.Remove(S3, 3);
-
- EXPECT_TRUE(S3.contains(3));
- EXPECT_FALSE(S2.contains(3));
- EXPECT_FALSE(S4.contains(3));
-
- EXPECT_TRUE(S2 == S4);
- EXPECT_TRUE(S3 != S2);
- EXPECT_TRUE(S3 != S4);
-
- EXPECT_TRUE(S3.contains(4));
- EXPECT_TRUE(S3.contains(5));
-
- EXPECT_TRUE(S4.contains(4));
- EXPECT_TRUE(S4.contains(5));
-}
-
-TEST_F(ImmutableSetTest, CallbackCharSetTest) {
- ImmutableSet<char>::Factory f;
- ImmutableSet<char> S = f.GetEmptySet();
-
- ImmutableSet<char> S2 = f.Add(f.Add(f.Add(S, 'a'), 'e'), 'i');
- ImmutableSet<char> S3 = f.Add(f.Add(S2, 'o'), 'u');
-
- S3.foreach<MyIter>();
-
- ASSERT_STREQ("aeiou", buffer);
-}
-
-TEST_F(ImmutableSetTest, Callback2CharSetTest) {
- ImmutableSet<char>::Factory f;
- ImmutableSet<char> S = f.GetEmptySet();
-
- ImmutableSet<char> S2 = f.Add(f.Add(f.Add(S, 'b'), 'c'), 'd');
- ImmutableSet<char> S3 = f.Add(f.Add(f.Add(S2, 'f'), 'g'), 'h');
-
- MyIter obj;
- S3.foreach<MyIter>(obj);
- ASSERT_STREQ("bcdfgh", buffer);
- ASSERT_EQ(6, obj.counter);
-
- MyIter obj2;
- S2.foreach<MyIter>(obj2);
- ASSERT_STREQ("bcd", buffer);
- ASSERT_EQ(3, obj2.counter);
-
- MyIter obj3;
- S.foreach<MyIter>(obj);
- ASSERT_STREQ("", buffer);
- ASSERT_EQ(0, obj3.counter);
-}
-
-TEST_F(ImmutableSetTest, IterLongSetTest) {
- ImmutableSet<long>::Factory f;
- ImmutableSet<long> S = f.GetEmptySet();
-
- ImmutableSet<long> S2 = f.Add(f.Add(f.Add(S, 0), 1), 2);
- ImmutableSet<long> S3 = f.Add(f.Add(f.Add(S2, 3), 4), 5);
-
- int i = 0;
- for (ImmutableSet<long>::iterator I = S.begin(), E = S.end(); I != E; ++I) {
- ASSERT_EQ(i++, *I);
- }
- ASSERT_EQ(0, i);
-
- i = 0;
- for (ImmutableSet<long>::iterator I = S2.begin(), E = S2.end(); I != E; ++I) {
- ASSERT_EQ(i++, *I);
- }
- ASSERT_EQ(3, i);
-
- i = 0;
- for (ImmutableSet<long>::iterator I = S3.begin(), E = S3.end(); I != E; I++) {
- ASSERT_EQ(i++, *I);
- }
- ASSERT_EQ(6, i);
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ADT/Makefile b/libclamav/c++/llvm/unittests/ADT/Makefile
deleted file mode 100644
index fe08328..0000000
--- a/libclamav/c++/llvm/unittests/ADT/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-##===- unittests/ADT/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TESTNAME = ADT
-LINK_COMPONENTS := core support
-
-include $(LEVEL)/Makefile.config
-
-# Xfail BitVectorTest for now on PPC Darwin. 7598360.
-ifeq ($(ARCH),PowerPC)
-ifeq ($(TARGET_OS),Darwin)
-CPP.Flags += -DXFAIL
-endif
-endif
-
-include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest
diff --git a/libclamav/c++/llvm/unittests/ADT/SmallBitVectorTest.cpp b/libclamav/c++/llvm/unittests/ADT/SmallBitVectorTest.cpp
deleted file mode 100644
index a2cc652..0000000
--- a/libclamav/c++/llvm/unittests/ADT/SmallBitVectorTest.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-//===- llvm/unittest/ADT/SmallBitVectorTest.cpp - SmallBitVector tests ----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/SmallBitVector.h"
-#include "gtest/gtest.h"
-
-using namespace llvm;
-
-namespace {
-
-TEST(SmallBitVectorTest, TrivialOperation) {
- SmallBitVector Vec;
- EXPECT_EQ(0U, Vec.count());
- EXPECT_EQ(0U, Vec.size());
- EXPECT_FALSE(Vec.any());
- EXPECT_TRUE(Vec.none());
- EXPECT_TRUE(Vec.empty());
-
- Vec.resize(5, true);
- EXPECT_EQ(5U, Vec.count());
- EXPECT_EQ(5U, Vec.size());
- EXPECT_TRUE(Vec.any());
- EXPECT_FALSE(Vec.none());
- EXPECT_FALSE(Vec.empty());
-
- Vec.resize(11);
- EXPECT_EQ(5U, Vec.count());
- EXPECT_EQ(11U, Vec.size());
- EXPECT_TRUE(Vec.any());
- EXPECT_FALSE(Vec.none());
- EXPECT_FALSE(Vec.empty());
-
- SmallBitVector Inv = ~Vec;
- EXPECT_EQ(6U, Inv.count());
- EXPECT_EQ(11U, Inv.size());
- EXPECT_TRUE(Inv.any());
- EXPECT_FALSE(Inv.none());
- EXPECT_FALSE(Inv.empty());
-
- EXPECT_FALSE(Inv == Vec);
- EXPECT_TRUE(Inv != Vec);
- Vec = ~Vec;
- EXPECT_TRUE(Inv == Vec);
- EXPECT_FALSE(Inv != Vec);
-
- // Add some "interesting" data to Vec.
- Vec.resize(23, true);
- Vec.resize(25, false);
- Vec.resize(26, true);
- Vec.resize(29, false);
- Vec.resize(33, true);
- Vec.resize(61, false);
- unsigned Count = 0;
- for (unsigned i = Vec.find_first(); i != -1u; i = Vec.find_next(i)) {
- ++Count;
- EXPECT_TRUE(Vec[i]);
- EXPECT_TRUE(Vec.test(i));
- }
- EXPECT_EQ(Count, Vec.count());
- EXPECT_EQ(Count, 23u);
- EXPECT_FALSE(Vec[0]);
- EXPECT_TRUE(Vec[32]);
- EXPECT_FALSE(Vec[60]);
-
- SmallBitVector Copy = Vec;
- SmallBitVector Alt(3, false);
- Alt.resize(6, true);
- std::swap(Alt, Vec);
- EXPECT_TRUE(Copy == Alt);
- EXPECT_TRUE(Vec.size() == 6);
- EXPECT_TRUE(Vec.count() == 3);
- EXPECT_TRUE(Vec.find_first() == 3);
- std::swap(Copy, Vec);
-
- // Add some more "interesting" data.
- Vec.resize(68, true);
- Vec.resize(78, false);
- Vec.resize(89, true);
- Vec.resize(90, false);
- Vec.resize(91, true);
- Vec.resize(130, false);
- Count = 0;
- for (unsigned i = Vec.find_first(); i != -1u; i = Vec.find_next(i)) {
- ++Count;
- EXPECT_TRUE(Vec[i]);
- EXPECT_TRUE(Vec.test(i));
- }
- EXPECT_EQ(Count, Vec.count());
- EXPECT_EQ(Count, 42u);
- EXPECT_FALSE(Vec[0]);
- EXPECT_TRUE(Vec[32]);
- EXPECT_FALSE(Vec[60]);
- EXPECT_FALSE(Vec[129]);
-
- Vec.flip(60);
- EXPECT_TRUE(Vec[60]);
- EXPECT_EQ(Count + 1, Vec.count());
- Vec.flip(60);
- EXPECT_FALSE(Vec[60]);
- EXPECT_EQ(Count, Vec.count());
-
- Vec.reset(32);
- EXPECT_FALSE(Vec[32]);
- EXPECT_EQ(Count - 1, Vec.count());
- Vec.set(32);
- EXPECT_TRUE(Vec[32]);
- EXPECT_EQ(Count, Vec.count());
-
- Vec.flip();
- EXPECT_EQ(Vec.size() - Count, Vec.count());
-
- Vec.reset();
- EXPECT_EQ(0U, Vec.count());
- EXPECT_EQ(130U, Vec.size());
- EXPECT_FALSE(Vec.any());
- EXPECT_TRUE(Vec.none());
- EXPECT_FALSE(Vec.empty());
-
- Inv = ~SmallBitVector();
- EXPECT_EQ(0U, Inv.count());
- EXPECT_EQ(0U, Inv.size());
- EXPECT_FALSE(Inv.any());
- EXPECT_TRUE(Inv.none());
- EXPECT_TRUE(Inv.empty());
-
- Vec.clear();
- EXPECT_EQ(0U, Vec.count());
- EXPECT_EQ(0U, Vec.size());
- EXPECT_FALSE(Vec.any());
- EXPECT_TRUE(Vec.none());
- EXPECT_TRUE(Vec.empty());
-}
-
-TEST(SmallBitVectorTest, CompoundAssignment) {
- SmallBitVector A;
- A.resize(10);
- A.set(4);
- A.set(7);
-
- SmallBitVector B;
- B.resize(50);
- B.set(5);
- B.set(18);
-
- A |= B;
- EXPECT_TRUE(A.test(4));
- EXPECT_TRUE(A.test(5));
- EXPECT_TRUE(A.test(7));
- EXPECT_TRUE(A.test(18));
- EXPECT_EQ(4U, A.count());
- EXPECT_EQ(50U, A.size());
-
- B.resize(10);
- B.set();
- B.reset(2);
- B.reset(7);
- A &= B;
- EXPECT_FALSE(A.test(2));
- EXPECT_FALSE(A.test(7));
- EXPECT_EQ(2U, A.count());
- EXPECT_EQ(50U, A.size());
-
- B.resize(100);
- B.set();
-
- A ^= B;
- EXPECT_TRUE(A.test(2));
- EXPECT_TRUE(A.test(7));
- EXPECT_EQ(98U, A.count());
- EXPECT_EQ(100U, A.size());
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ADT/SmallStringTest.cpp b/libclamav/c++/llvm/unittests/ADT/SmallStringTest.cpp
deleted file mode 100644
index 099d815..0000000
--- a/libclamav/c++/llvm/unittests/ADT/SmallStringTest.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//===- llvm/unittest/ADT/SmallStringTest.cpp ------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// SmallString unit tests.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/SmallString.h"
-#include <stdarg.h>
-#include <climits>
-#include <cstring>
-
-using namespace llvm;
-
-namespace {
-
-// Test fixture class
-class SmallStringTest : public testing::Test {
-protected:
- typedef SmallString<40> StringType;
-
- StringType theString;
-
- void assertEmpty(StringType & v) {
- // Size tests
- EXPECT_EQ(0u, v.size());
- EXPECT_TRUE(v.empty());
- // Iterator tests
- EXPECT_TRUE(v.begin() == v.end());
- }
-};
-
-// New string test.
-TEST_F(SmallStringTest, EmptyStringTest) {
- SCOPED_TRACE("EmptyStringTest");
- assertEmpty(theString);
- EXPECT_TRUE(theString.rbegin() == theString.rend());
-}
-
-}
-
diff --git a/libclamav/c++/llvm/unittests/ADT/SmallVectorTest.cpp b/libclamav/c++/llvm/unittests/ADT/SmallVectorTest.cpp
deleted file mode 100644
index 8a81796..0000000
--- a/libclamav/c++/llvm/unittests/ADT/SmallVectorTest.cpp
+++ /dev/null
@@ -1,402 +0,0 @@
-//===- llvm/unittest/ADT/SmallVectorTest.cpp ------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// SmallVector unit tests.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/SmallVector.h"
-#include <stdarg.h>
-
-using namespace llvm;
-
-namespace {
-
-/// A helper class that counts the total number of constructor and
-/// destructor calls.
-class Constructable {
-private:
- static int numConstructorCalls;
- static int numDestructorCalls;
- static int numAssignmentCalls;
-
- int value;
-
-public:
- Constructable() : value(0) {
- ++numConstructorCalls;
- }
-
- Constructable(int val) : value(val) {
- ++numConstructorCalls;
- }
-
- Constructable(const Constructable & src) {
- value = src.value;
- ++numConstructorCalls;
- }
-
- ~Constructable() {
- ++numDestructorCalls;
- }
-
- Constructable & operator=(const Constructable & src) {
- value = src.value;
- ++numAssignmentCalls;
- return *this;
- }
-
- int getValue() const {
- return abs(value);
- }
-
- static void reset() {
- numConstructorCalls = 0;
- numDestructorCalls = 0;
- numAssignmentCalls = 0;
- }
-
- static int getNumConstructorCalls() {
- return numConstructorCalls;
- }
-
- static int getNumDestructorCalls() {
- return numDestructorCalls;
- }
-
- friend bool operator==(const Constructable & c0, const Constructable & c1) {
- return c0.getValue() == c1.getValue();
- }
-
- friend bool operator!=(const Constructable & c0, const Constructable & c1) {
- return c0.getValue() != c1.getValue();
- }
-};
-
-int Constructable::numConstructorCalls;
-int Constructable::numDestructorCalls;
-int Constructable::numAssignmentCalls;
-
-// Test fixture class
-class SmallVectorTest : public testing::Test {
-protected:
- typedef SmallVector<Constructable, 4> VectorType;
-
- VectorType theVector;
- VectorType otherVector;
-
- void SetUp() {
- Constructable::reset();
- }
-
- void assertEmpty(VectorType & v) {
- // Size tests
- EXPECT_EQ(0u, v.size());
- EXPECT_TRUE(v.empty());
-
- // Iterator tests
- EXPECT_TRUE(v.begin() == v.end());
- }
-
- // Assert that theVector contains the specified values, in order.
- void assertValuesInOrder(VectorType & v, size_t size, ...) {
- EXPECT_EQ(size, v.size());
-
- va_list ap;
- va_start(ap, size);
- for (size_t i = 0; i < size; ++i) {
- int value = va_arg(ap, int);
- EXPECT_EQ(value, v[i].getValue());
- }
-
- va_end(ap);
- }
-
- // Generate a sequence of values to initialize the vector.
- void makeSequence(VectorType & v, int start, int end) {
- for (int i = start; i <= end; ++i) {
- v.push_back(Constructable(i));
- }
- }
-};
-
-// New vector test.
-TEST_F(SmallVectorTest, EmptyVectorTest) {
- SCOPED_TRACE("EmptyVectorTest");
- assertEmpty(theVector);
- EXPECT_TRUE(theVector.rbegin() == theVector.rend());
- EXPECT_EQ(0, Constructable::getNumConstructorCalls());
- EXPECT_EQ(0, Constructable::getNumDestructorCalls());
-}
-
-// Simple insertions and deletions.
-TEST_F(SmallVectorTest, PushPopTest) {
- SCOPED_TRACE("PushPopTest");
-
- // Push an element
- theVector.push_back(Constructable(1));
-
- // Size tests
- assertValuesInOrder(theVector, 1u, 1);
- EXPECT_FALSE(theVector.begin() == theVector.end());
- EXPECT_FALSE(theVector.empty());
-
- // Push another element
- theVector.push_back(Constructable(2));
- assertValuesInOrder(theVector, 2u, 1, 2);
-
- // Pop one element
- theVector.pop_back();
- assertValuesInOrder(theVector, 1u, 1);
-
- // Pop another element
- theVector.pop_back();
- assertEmpty(theVector);
-
- // Check number of constructor calls. Should be 2 for each list element,
- // one for the argument to push_back, and one for the list element itself.
- EXPECT_EQ(4, Constructable::getNumConstructorCalls());
- EXPECT_EQ(4, Constructable::getNumDestructorCalls());
-}
-
-// Clear test.
-TEST_F(SmallVectorTest, ClearTest) {
- SCOPED_TRACE("ClearTest");
-
- makeSequence(theVector, 1, 2);
- theVector.clear();
-
- assertEmpty(theVector);
- EXPECT_EQ(4, Constructable::getNumConstructorCalls());
- EXPECT_EQ(4, Constructable::getNumDestructorCalls());
-}
-
-// Resize smaller test.
-TEST_F(SmallVectorTest, ResizeShrinkTest) {
- SCOPED_TRACE("ResizeShrinkTest");
-
- makeSequence(theVector, 1, 3);
- theVector.resize(1);
-
- assertValuesInOrder(theVector, 1u, 1);
- EXPECT_EQ(6, Constructable::getNumConstructorCalls());
- EXPECT_EQ(5, Constructable::getNumDestructorCalls());
-}
-
-// Resize bigger test.
-TEST_F(SmallVectorTest, ResizeGrowTest) {
- SCOPED_TRACE("ResizeGrowTest");
-
- theVector.resize(2);
-
- // The extra constructor/destructor calls come from the temporary object used
- // to initialize the contents of the resized array (via copy construction).
- EXPECT_EQ(3, Constructable::getNumConstructorCalls());
- EXPECT_EQ(1, Constructable::getNumDestructorCalls());
- EXPECT_EQ(2u, theVector.size());
-}
-
-// Resize with fill value.
-TEST_F(SmallVectorTest, ResizeFillTest) {
- SCOPED_TRACE("ResizeFillTest");
-
- theVector.resize(3, Constructable(77));
- assertValuesInOrder(theVector, 3u, 77, 77, 77);
-}
-
-// Overflow past fixed size.
-TEST_F(SmallVectorTest, OverflowTest) {
- SCOPED_TRACE("OverflowTest");
-
- // Push more elements than the fixed size.
- makeSequence(theVector, 1, 10);
-
- // Test size and values.
- EXPECT_EQ(10u, theVector.size());
- for (int i = 0; i < 10; ++i) {
- EXPECT_EQ(i+1, theVector[i].getValue());
- }
-
- // Now resize back to fixed size.
- theVector.resize(1);
-
- assertValuesInOrder(theVector, 1u, 1);
-}
-
-// Iteration tests.
-TEST_F(SmallVectorTest, IterationTest) {
- makeSequence(theVector, 1, 2);
-
- // Forward Iteration
- VectorType::iterator it = theVector.begin();
- EXPECT_TRUE(*it == theVector.front());
- EXPECT_TRUE(*it == theVector[0]);
- EXPECT_EQ(1, it->getValue());
- ++it;
- EXPECT_TRUE(*it == theVector[1]);
- EXPECT_TRUE(*it == theVector.back());
- EXPECT_EQ(2, it->getValue());
- ++it;
- EXPECT_TRUE(it == theVector.end());
- --it;
- EXPECT_TRUE(*it == theVector[1]);
- EXPECT_EQ(2, it->getValue());
- --it;
- EXPECT_TRUE(*it == theVector[0]);
- EXPECT_EQ(1, it->getValue());
-
- // Reverse Iteration
- VectorType::reverse_iterator rit = theVector.rbegin();
- EXPECT_TRUE(*rit == theVector[1]);
- EXPECT_EQ(2, rit->getValue());
- ++rit;
- EXPECT_TRUE(*rit == theVector[0]);
- EXPECT_EQ(1, rit->getValue());
- ++rit;
- EXPECT_TRUE(rit == theVector.rend());
- --rit;
- EXPECT_TRUE(*rit == theVector[0]);
- EXPECT_EQ(1, rit->getValue());
- --rit;
- EXPECT_TRUE(*rit == theVector[1]);
- EXPECT_EQ(2, rit->getValue());
-}
-
-// Swap test.
-TEST_F(SmallVectorTest, SwapTest) {
- SCOPED_TRACE("SwapTest");
-
- makeSequence(theVector, 1, 2);
- std::swap(theVector, otherVector);
-
- assertEmpty(theVector);
- assertValuesInOrder(otherVector, 2u, 1, 2);
-}
-
-// Append test
-TEST_F(SmallVectorTest, AppendTest) {
- SCOPED_TRACE("AppendTest");
-
- makeSequence(otherVector, 2, 3);
-
- theVector.push_back(Constructable(1));
- theVector.append(otherVector.begin(), otherVector.end());
-
- assertValuesInOrder(theVector, 3u, 1, 2, 3);
-}
-
-// Append repeated test
-TEST_F(SmallVectorTest, AppendRepeatedTest) {
- SCOPED_TRACE("AppendRepeatedTest");
-
- theVector.push_back(Constructable(1));
- theVector.append(2, Constructable(77));
- assertValuesInOrder(theVector, 3u, 1, 77, 77);
-}
-
-// Assign test
-TEST_F(SmallVectorTest, AssignTest) {
- SCOPED_TRACE("AssignTest");
-
- theVector.push_back(Constructable(1));
- theVector.assign(2, Constructable(77));
- assertValuesInOrder(theVector, 2u, 77, 77);
-}
-
-// Erase a single element
-TEST_F(SmallVectorTest, EraseTest) {
- SCOPED_TRACE("EraseTest");
-
- makeSequence(theVector, 1, 3);
- theVector.erase(theVector.begin());
- assertValuesInOrder(theVector, 2u, 2, 3);
-}
-
-// Erase a range of elements
-TEST_F(SmallVectorTest, EraseRangeTest) {
- SCOPED_TRACE("EraseRangeTest");
-
- makeSequence(theVector, 1, 3);
- theVector.erase(theVector.begin(), theVector.begin() + 2);
- assertValuesInOrder(theVector, 1u, 3);
-}
-
-// Insert a single element.
-TEST_F(SmallVectorTest, InsertTest) {
- SCOPED_TRACE("InsertTest");
-
- makeSequence(theVector, 1, 3);
- theVector.insert(theVector.begin() + 1, Constructable(77));
- assertValuesInOrder(theVector, 4u, 1, 77, 2, 3);
-}
-
-// Insert repeated elements.
-TEST_F(SmallVectorTest, InsertRepeatedTest) {
- SCOPED_TRACE("InsertRepeatedTest");
-
- makeSequence(theVector, 10, 15);
- theVector.insert(theVector.begin() + 1, 2, Constructable(16));
- assertValuesInOrder(theVector, 8u, 10, 16, 16, 11, 12, 13, 14, 15);
-}
-
-// Insert range.
-TEST_F(SmallVectorTest, InsertRangeTest) {
- SCOPED_TRACE("InsertRepeatedTest");
-
- makeSequence(theVector, 1, 3);
- theVector.insert(theVector.begin() + 1, 3, Constructable(77));
- assertValuesInOrder(theVector, 6u, 1, 77, 77, 77, 2, 3);
-}
-
-// Comparison tests.
-TEST_F(SmallVectorTest, ComparisonTest) {
- SCOPED_TRACE("ComparisonTest");
-
- makeSequence(theVector, 1, 3);
- makeSequence(otherVector, 1, 3);
-
- EXPECT_TRUE(theVector == otherVector);
- EXPECT_FALSE(theVector != otherVector);
-
- otherVector.clear();
- makeSequence(otherVector, 2, 4);
-
- EXPECT_FALSE(theVector == otherVector);
- EXPECT_TRUE(theVector != otherVector);
-}
-
-// Constant vector tests.
-TEST_F(SmallVectorTest, ConstVectorTest) {
- const VectorType constVector;
-
- EXPECT_EQ(0u, constVector.size());
- EXPECT_TRUE(constVector.empty());
- EXPECT_TRUE(constVector.begin() == constVector.end());
-}
-
-// Direct array access.
-TEST_F(SmallVectorTest, DirectVectorTest) {
- EXPECT_EQ(0u, theVector.size());
- EXPECT_EQ(4u, theVector.capacity());
- EXPECT_EQ(0, Constructable::getNumConstructorCalls());
- theVector.end()[0] = 1;
- theVector.end()[1] = 2;
- theVector.end()[2] = 3;
- theVector.end()[3] = 4;
- theVector.set_size(4);
- EXPECT_EQ(4u, theVector.size());
- EXPECT_EQ(4, Constructable::getNumConstructorCalls());
- EXPECT_EQ(1, theVector[0].getValue());
- EXPECT_EQ(2, theVector[1].getValue());
- EXPECT_EQ(3, theVector[2].getValue());
- EXPECT_EQ(4, theVector[3].getValue());
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ADT/SparseBitVectorTest.cpp b/libclamav/c++/llvm/unittests/ADT/SparseBitVectorTest.cpp
deleted file mode 100644
index d8fc5ce..0000000
--- a/libclamav/c++/llvm/unittests/ADT/SparseBitVectorTest.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-//===- llvm/unittest/ADT/SparseBitVectorTest.cpp - SparseBitVector tests --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/SparseBitVector.h"
-#include "gtest/gtest.h"
-
-using namespace llvm;
-
-namespace {
-
-TEST(SparseBitVectorTest, TrivialOperation) {
- SparseBitVector<> Vec;
- EXPECT_EQ(0U, Vec.count());
- EXPECT_FALSE(Vec.test(17));
- Vec.set(5);
- EXPECT_TRUE(Vec.test(5));
- EXPECT_FALSE(Vec.test(17));
- Vec.reset(6);
- EXPECT_TRUE(Vec.test(5));
- EXPECT_FALSE(Vec.test(6));
- Vec.reset(5);
- EXPECT_FALSE(Vec.test(5));
- EXPECT_TRUE(Vec.test_and_set(17));
- EXPECT_FALSE(Vec.test_and_set(17));
- EXPECT_TRUE(Vec.test(17));
- Vec.clear();
- EXPECT_FALSE(Vec.test(17));
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ADT/StringMapTest.cpp b/libclamav/c++/llvm/unittests/ADT/StringMapTest.cpp
deleted file mode 100644
index 413f068..0000000
--- a/libclamav/c++/llvm/unittests/ADT/StringMapTest.cpp
+++ /dev/null
@@ -1,207 +0,0 @@
-//===- llvm/unittest/ADT/StringMapMap.cpp - StringMap unit tests ----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/System/DataTypes.h"
-using namespace llvm;
-
-namespace {
-
-// Test fixture
-class StringMapTest : public testing::Test {
-protected:
- StringMap<uint32_t> testMap;
-
- static const char testKey[];
- static const uint32_t testValue;
- static const char* testKeyFirst;
- static size_t testKeyLength;
- static const std::string testKeyStr;
-
- void assertEmptyMap() {
- // Size tests
- EXPECT_EQ(0u, testMap.size());
- EXPECT_TRUE(testMap.empty());
-
- // Iterator tests
- EXPECT_TRUE(testMap.begin() == testMap.end());
-
- // Lookup tests
- EXPECT_EQ(0u, testMap.count(testKey));
- EXPECT_EQ(0u, testMap.count(StringRef(testKeyFirst, testKeyLength)));
- EXPECT_EQ(0u, testMap.count(testKeyStr));
- EXPECT_TRUE(testMap.find(testKey) == testMap.end());
- EXPECT_TRUE(testMap.find(StringRef(testKeyFirst, testKeyLength)) ==
- testMap.end());
- EXPECT_TRUE(testMap.find(testKeyStr) == testMap.end());
- }
-
- void assertSingleItemMap() {
- // Size tests
- EXPECT_EQ(1u, testMap.size());
- EXPECT_FALSE(testMap.begin() == testMap.end());
- EXPECT_FALSE(testMap.empty());
-
- // Iterator tests
- StringMap<uint32_t>::iterator it = testMap.begin();
- EXPECT_STREQ(testKey, it->first());
- EXPECT_EQ(testValue, it->second);
- ++it;
- EXPECT_TRUE(it == testMap.end());
-
- // Lookup tests
- EXPECT_EQ(1u, testMap.count(testKey));
- EXPECT_EQ(1u, testMap.count(StringRef(testKeyFirst, testKeyLength)));
- EXPECT_EQ(1u, testMap.count(testKeyStr));
- EXPECT_TRUE(testMap.find(testKey) == testMap.begin());
- EXPECT_TRUE(testMap.find(StringRef(testKeyFirst, testKeyLength)) ==
- testMap.begin());
- EXPECT_TRUE(testMap.find(testKeyStr) == testMap.begin());
- }
-};
-
-const char StringMapTest::testKey[] = "key";
-const uint32_t StringMapTest::testValue = 1u;
-const char* StringMapTest::testKeyFirst = testKey;
-size_t StringMapTest::testKeyLength = sizeof(testKey) - 1;
-const std::string StringMapTest::testKeyStr(testKey);
-
-// Empty map tests.
-TEST_F(StringMapTest, EmptyMapTest) {
- SCOPED_TRACE("EmptyMapTest");
- assertEmptyMap();
-}
-
-// Constant map tests.
-TEST_F(StringMapTest, ConstEmptyMapTest) {
- const StringMap<uint32_t>& constTestMap = testMap;
-
- // Size tests
- EXPECT_EQ(0u, constTestMap.size());
- EXPECT_TRUE(constTestMap.empty());
-
- // Iterator tests
- EXPECT_TRUE(constTestMap.begin() == constTestMap.end());
-
- // Lookup tests
- EXPECT_EQ(0u, constTestMap.count(testKey));
- EXPECT_EQ(0u, constTestMap.count(StringRef(testKeyFirst, testKeyLength)));
- EXPECT_EQ(0u, constTestMap.count(testKeyStr));
- EXPECT_TRUE(constTestMap.find(testKey) == constTestMap.end());
- EXPECT_TRUE(constTestMap.find(StringRef(testKeyFirst, testKeyLength)) ==
- constTestMap.end());
- EXPECT_TRUE(constTestMap.find(testKeyStr) == constTestMap.end());
-}
-
-// A map with a single entry.
-TEST_F(StringMapTest, SingleEntryMapTest) {
- SCOPED_TRACE("SingleEntryMapTest");
- testMap[testKey] = testValue;
- assertSingleItemMap();
-}
-
-// Test clear() method.
-TEST_F(StringMapTest, ClearTest) {
- SCOPED_TRACE("ClearTest");
- testMap[testKey] = testValue;
- testMap.clear();
- assertEmptyMap();
-}
-
-// Test erase(iterator) method.
-TEST_F(StringMapTest, EraseIteratorTest) {
- SCOPED_TRACE("EraseIteratorTest");
- testMap[testKey] = testValue;
- testMap.erase(testMap.begin());
- assertEmptyMap();
-}
-
-// Test erase(value) method.
-TEST_F(StringMapTest, EraseValueTest) {
- SCOPED_TRACE("EraseValueTest");
- testMap[testKey] = testValue;
- testMap.erase(testKey);
- assertEmptyMap();
-}
-
-// Test inserting two values and erasing one.
-TEST_F(StringMapTest, InsertAndEraseTest) {
- SCOPED_TRACE("InsertAndEraseTest");
- testMap[testKey] = testValue;
- testMap["otherKey"] = 2;
- testMap.erase("otherKey");
- assertSingleItemMap();
-}
-
-// A more complex iteration test.
-TEST_F(StringMapTest, IterationTest) {
- bool visited[100];
-
- // Insert 100 numbers into the map
- for (int i = 0; i < 100; ++i) {
- std::stringstream ss;
- ss << "key_" << i;
- testMap[ss.str()] = i;
- visited[i] = false;
- }
-
- // Iterate over all numbers and mark each one found.
- for (StringMap<uint32_t>::iterator it = testMap.begin();
- it != testMap.end(); ++it) {
- std::stringstream ss;
- ss << "key_" << it->second;
- ASSERT_STREQ(ss.str().c_str(), it->first());
- visited[it->second] = true;
- }
-
- // Ensure every number was visited.
- for (int i = 0; i < 100; ++i) {
- ASSERT_TRUE(visited[i]) << "Entry #" << i << " was never visited";
- }
-}
-
-} // end anonymous namespace
-
-namespace llvm {
-
-template <>
-class StringMapEntryInitializer<uint32_t> {
-public:
- template <typename InitTy>
- static void Initialize(StringMapEntry<uint32_t> &T, InitTy InitVal) {
- T.second = InitVal;
- }
-};
-
-} // end llvm namespace
-
-namespace {
-
-// Test StringMapEntry::Create() method.
-TEST_F(StringMapTest, StringMapEntryTest) {
- StringMap<uint32_t>::value_type* entry =
- StringMap<uint32_t>::value_type::Create(
- testKeyFirst, testKeyFirst + testKeyLength, 1u);
- EXPECT_STREQ(testKey, entry->first());
- EXPECT_EQ(1u, entry->second);
- free(entry);
-}
-
-// Test insert() method.
-TEST_F(StringMapTest, InsertTest) {
- SCOPED_TRACE("InsertTest");
- testMap.insert(
- StringMap<uint32_t>::value_type::Create(
- testKeyFirst, testKeyFirst + testKeyLength,
- testMap.getAllocator(), 1u));
- assertSingleItemMap();
-}
-
-} // end anonymous namespace
diff --git a/libclamav/c++/llvm/unittests/ADT/StringRefTest.cpp b/libclamav/c++/llvm/unittests/ADT/StringRefTest.cpp
deleted file mode 100644
index b0dcb0a..0000000
--- a/libclamav/c++/llvm/unittests/ADT/StringRefTest.cpp
+++ /dev/null
@@ -1,262 +0,0 @@
-//===- llvm/unittest/ADT/StringRefTest.cpp - StringRef unit tests ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace llvm {
-
-std::ostream &operator<<(std::ostream &OS, const StringRef &S) {
- OS << S;
- return OS;
-}
-
-std::ostream &operator<<(std::ostream &OS,
- const std::pair<StringRef, StringRef> &P) {
- OS << "(" << P.first << ", " << P.second << ")";
- return OS;
-}
-
-}
-
-namespace {
-TEST(StringRefTest, Construction) {
- EXPECT_EQ("", StringRef());
- EXPECT_EQ("hello", StringRef("hello"));
- EXPECT_EQ("hello", StringRef("hello world", 5));
- EXPECT_EQ("hello", StringRef(std::string("hello")));
-}
-
-TEST(StringRefTest, Iteration) {
- StringRef S("hello");
- const char *p = "hello";
- for (const char *it = S.begin(), *ie = S.end(); it != ie; ++it, ++p)
- EXPECT_EQ(*it, *p);
-}
-
-TEST(StringRefTest, StringOps) {
- const char *p = "hello";
- EXPECT_EQ(p, StringRef(p, 0).data());
- EXPECT_TRUE(StringRef().empty());
- EXPECT_EQ((size_t) 5, StringRef("hello").size());
- EXPECT_EQ(-1, StringRef("aab").compare("aad"));
- EXPECT_EQ( 0, StringRef("aab").compare("aab"));
- EXPECT_EQ( 1, StringRef("aab").compare("aaa"));
- EXPECT_EQ(-1, StringRef("aab").compare("aabb"));
- EXPECT_EQ( 1, StringRef("aab").compare("aa"));
-}
-
-TEST(StringRefTest, Operators) {
- EXPECT_EQ("", StringRef());
- EXPECT_TRUE(StringRef("aab") < StringRef("aad"));
- EXPECT_FALSE(StringRef("aab") < StringRef("aab"));
- EXPECT_TRUE(StringRef("aab") <= StringRef("aab"));
- EXPECT_FALSE(StringRef("aab") <= StringRef("aaa"));
- EXPECT_TRUE(StringRef("aad") > StringRef("aab"));
- EXPECT_FALSE(StringRef("aab") > StringRef("aab"));
- EXPECT_TRUE(StringRef("aab") >= StringRef("aab"));
- EXPECT_FALSE(StringRef("aaa") >= StringRef("aab"));
- EXPECT_EQ(StringRef("aab"), StringRef("aab"));
- EXPECT_FALSE(StringRef("aab") == StringRef("aac"));
- EXPECT_FALSE(StringRef("aab") != StringRef("aab"));
- EXPECT_TRUE(StringRef("aab") != StringRef("aac"));
- EXPECT_EQ('a', StringRef("aab")[1]);
-}
-
-TEST(StringRefTest, Substr) {
- StringRef Str("hello");
- EXPECT_EQ("lo", Str.substr(3));
- EXPECT_EQ("", Str.substr(100));
- EXPECT_EQ("hello", Str.substr(0, 100));
- EXPECT_EQ("o", Str.substr(4, 10));
-}
-
-TEST(StringRefTest, Slice) {
- StringRef Str("hello");
- EXPECT_EQ("l", Str.slice(2, 3));
- EXPECT_EQ("ell", Str.slice(1, 4));
- EXPECT_EQ("llo", Str.slice(2, 100));
- EXPECT_EQ("", Str.slice(2, 1));
- EXPECT_EQ("", Str.slice(10, 20));
-}
-
-TEST(StringRefTest, Split) {
- StringRef Str("hello");
- EXPECT_EQ(std::make_pair(StringRef("hello"), StringRef("")),
- Str.split('X'));
- EXPECT_EQ(std::make_pair(StringRef("h"), StringRef("llo")),
- Str.split('e'));
- EXPECT_EQ(std::make_pair(StringRef(""), StringRef("ello")),
- Str.split('h'));
- EXPECT_EQ(std::make_pair(StringRef("he"), StringRef("lo")),
- Str.split('l'));
- EXPECT_EQ(std::make_pair(StringRef("hell"), StringRef("")),
- Str.split('o'));
-
- EXPECT_EQ(std::make_pair(StringRef("hello"), StringRef("")),
- Str.rsplit('X'));
- EXPECT_EQ(std::make_pair(StringRef("h"), StringRef("llo")),
- Str.rsplit('e'));
- EXPECT_EQ(std::make_pair(StringRef(""), StringRef("ello")),
- Str.rsplit('h'));
- EXPECT_EQ(std::make_pair(StringRef("hel"), StringRef("o")),
- Str.rsplit('l'));
- EXPECT_EQ(std::make_pair(StringRef("hell"), StringRef("")),
- Str.rsplit('o'));
-}
-
-TEST(StringRefTest, Split2) {
- SmallVector<StringRef, 5> parts;
- SmallVector<StringRef, 5> expected;
-
- expected.push_back("ab"); expected.push_back("c");
- StringRef(",ab,,c,").split(parts, ",", -1, false);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back(""); expected.push_back("ab"); expected.push_back("");
- expected.push_back("c"); expected.push_back("");
- StringRef(",ab,,c,").split(parts, ",", -1, true);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back("");
- StringRef("").split(parts, ",", -1, true);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- StringRef("").split(parts, ",", -1, false);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- StringRef(",").split(parts, ",", -1, false);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back(""); expected.push_back("");
- StringRef(",").split(parts, ",", -1, true);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back("a"); expected.push_back("b");
- StringRef("a,b").split(parts, ",", -1, true);
- EXPECT_TRUE(parts == expected);
-
- // Test MaxSplit
- expected.clear(); parts.clear();
- expected.push_back("a,,b,c");
- StringRef("a,,b,c").split(parts, ",", 0, true);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back("a,,b,c");
- StringRef("a,,b,c").split(parts, ",", 0, false);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back("a"); expected.push_back(",b,c");
- StringRef("a,,b,c").split(parts, ",", 1, true);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back("a"); expected.push_back(",b,c");
- StringRef("a,,b,c").split(parts, ",", 1, false);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back("a"); expected.push_back(""); expected.push_back("b,c");
- StringRef("a,,b,c").split(parts, ",", 2, true);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back("a"); expected.push_back("b,c");
- StringRef("a,,b,c").split(parts, ",", 2, false);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back("a"); expected.push_back(""); expected.push_back("b");
- expected.push_back("c");
- StringRef("a,,b,c").split(parts, ",", 3, true);
- EXPECT_TRUE(parts == expected);
-
- expected.clear(); parts.clear();
- expected.push_back("a"); expected.push_back("b"); expected.push_back("c");
- StringRef("a,,b,c").split(parts, ",", 3, false);
- EXPECT_TRUE(parts == expected);
-}
-
-TEST(StringRefTest, StartsWith) {
- StringRef Str("hello");
- EXPECT_TRUE(Str.startswith("he"));
- EXPECT_FALSE(Str.startswith("helloworld"));
- EXPECT_FALSE(Str.startswith("hi"));
-}
-
-TEST(StringRefTest, EndsWith) {
- StringRef Str("hello");
- EXPECT_TRUE(Str.endswith("lo"));
- EXPECT_FALSE(Str.endswith("helloworld"));
- EXPECT_FALSE(Str.endswith("worldhello"));
- EXPECT_FALSE(Str.endswith("so"));
-}
-
-TEST(StringRefTest, Find) {
- StringRef Str("hello");
- EXPECT_EQ(2U, Str.find('l'));
- EXPECT_EQ(StringRef::npos, Str.find('z'));
- EXPECT_EQ(StringRef::npos, Str.find("helloworld"));
- EXPECT_EQ(0U, Str.find("hello"));
- EXPECT_EQ(1U, Str.find("ello"));
- EXPECT_EQ(StringRef::npos, Str.find("zz"));
- EXPECT_EQ(2U, Str.find("ll", 2));
- EXPECT_EQ(StringRef::npos, Str.find("ll", 3));
-
- EXPECT_EQ(3U, Str.rfind('l'));
- EXPECT_EQ(StringRef::npos, Str.rfind('z'));
- EXPECT_EQ(StringRef::npos, Str.rfind("helloworld"));
- EXPECT_EQ(0U, Str.rfind("hello"));
- EXPECT_EQ(1U, Str.rfind("ello"));
- EXPECT_EQ(StringRef::npos, Str.rfind("zz"));
-
- EXPECT_EQ(2U, Str.find_first_of('l'));
- EXPECT_EQ(1U, Str.find_first_of("el"));
- EXPECT_EQ(StringRef::npos, Str.find_first_of("xyz"));
-
- EXPECT_EQ(1U, Str.find_first_not_of('h'));
- EXPECT_EQ(4U, Str.find_first_not_of("hel"));
- EXPECT_EQ(StringRef::npos, Str.find_first_not_of("hello"));
-}
-
-TEST(StringRefTest, Count) {
- StringRef Str("hello");
- EXPECT_EQ(2U, Str.count('l'));
- EXPECT_EQ(1U, Str.count('o'));
- EXPECT_EQ(0U, Str.count('z'));
- EXPECT_EQ(0U, Str.count("helloworld"));
- EXPECT_EQ(1U, Str.count("hello"));
- EXPECT_EQ(1U, Str.count("ello"));
- EXPECT_EQ(0U, Str.count("zz"));
-}
-
-TEST(StringRefTest, EditDistance) {
- StringRef Str("hello");
- EXPECT_EQ(2U, Str.edit_distance("hill"));
-}
-
-TEST(StringRefTest, Misc) {
- std::string Storage;
- raw_string_ostream OS(Storage);
- OS << StringRef("hello");
- EXPECT_EQ("hello", OS.str());
-}
-
-} // end anonymous namespace
diff --git a/libclamav/c++/llvm/unittests/ADT/TripleTest.cpp b/libclamav/c++/llvm/unittests/ADT/TripleTest.cpp
deleted file mode 100644
index 1a9e81a..0000000
--- a/libclamav/c++/llvm/unittests/ADT/TripleTest.cpp
+++ /dev/null
@@ -1,149 +0,0 @@
-//===----------- Triple.cpp - Triple unit tests ---------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/Triple.h"
-
-using namespace llvm;
-
-namespace {
-
-TEST(TripleTest, BasicParsing) {
- Triple T;
-
- T = Triple("");
- EXPECT_EQ("", T.getArchName().str());
- EXPECT_EQ("", T.getVendorName().str());
- EXPECT_EQ("", T.getOSName().str());
- EXPECT_EQ("", T.getEnvironmentName().str());
-
- T = Triple("-");
- EXPECT_EQ("", T.getArchName().str());
- EXPECT_EQ("", T.getVendorName().str());
- EXPECT_EQ("", T.getOSName().str());
- EXPECT_EQ("", T.getEnvironmentName().str());
-
- T = Triple("--");
- EXPECT_EQ("", T.getArchName().str());
- EXPECT_EQ("", T.getVendorName().str());
- EXPECT_EQ("", T.getOSName().str());
- EXPECT_EQ("", T.getEnvironmentName().str());
-
- T = Triple("---");
- EXPECT_EQ("", T.getArchName().str());
- EXPECT_EQ("", T.getVendorName().str());
- EXPECT_EQ("", T.getOSName().str());
- EXPECT_EQ("", T.getEnvironmentName().str());
-
- T = Triple("----");
- EXPECT_EQ("", T.getArchName().str());
- EXPECT_EQ("", T.getVendorName().str());
- EXPECT_EQ("", T.getOSName().str());
- EXPECT_EQ("-", T.getEnvironmentName().str());
-
- T = Triple("a");
- EXPECT_EQ("a", T.getArchName().str());
- EXPECT_EQ("", T.getVendorName().str());
- EXPECT_EQ("", T.getOSName().str());
- EXPECT_EQ("", T.getEnvironmentName().str());
-
- T = Triple("a-b");
- EXPECT_EQ("a", T.getArchName().str());
- EXPECT_EQ("b", T.getVendorName().str());
- EXPECT_EQ("", T.getOSName().str());
- EXPECT_EQ("", T.getEnvironmentName().str());
-
- T = Triple("a-b-c");
- EXPECT_EQ("a", T.getArchName().str());
- EXPECT_EQ("b", T.getVendorName().str());
- EXPECT_EQ("c", T.getOSName().str());
- EXPECT_EQ("", T.getEnvironmentName().str());
-
- T = Triple("a-b-c-d");
- EXPECT_EQ("a", T.getArchName().str());
- EXPECT_EQ("b", T.getVendorName().str());
- EXPECT_EQ("c", T.getOSName().str());
- EXPECT_EQ("d", T.getEnvironmentName().str());
-}
-
-TEST(TripleTest, ParsedIDs) {
- Triple T;
-
- T = Triple("i386-apple-darwin");
- EXPECT_EQ(Triple::x86, T.getArch());
- EXPECT_EQ(Triple::Apple, T.getVendor());
- EXPECT_EQ(Triple::Darwin, T.getOS());
-
- T = Triple("x86_64-pc-linux-gnu");
- EXPECT_EQ(Triple::x86_64, T.getArch());
- EXPECT_EQ(Triple::PC, T.getVendor());
- EXPECT_EQ(Triple::Linux, T.getOS());
-
- T = Triple("powerpc-dunno-notsure");
- EXPECT_EQ(Triple::ppc, T.getArch());
- EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
- EXPECT_EQ(Triple::UnknownOS, T.getOS());
-
- T = Triple("huh");
- EXPECT_EQ(Triple::UnknownArch, T.getArch());
-
- // Two exceptional cases.
-
- T = Triple("i386-mingw32");
- EXPECT_EQ(Triple::x86, T.getArch());
- EXPECT_EQ(Triple::PC, T.getVendor());
- EXPECT_EQ(Triple::MinGW32, T.getOS());
-
- T = Triple("arm-elf");
- EXPECT_EQ(Triple::arm, T.getArch());
- EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
- EXPECT_EQ(Triple::UnknownOS, T.getOS());
-}
-
-TEST(TripleTest, MutateName) {
- Triple T;
- EXPECT_EQ(Triple::UnknownArch, T.getArch());
- EXPECT_EQ(Triple::UnknownVendor, T.getVendor());
- EXPECT_EQ(Triple::UnknownOS, T.getOS());
-
- T.setArchName("i386");
- EXPECT_EQ(Triple::x86, T.getArch());
- EXPECT_EQ("i386--", T.getTriple());
-
- T.setVendorName("pc");
- EXPECT_EQ(Triple::x86, T.getArch());
- EXPECT_EQ(Triple::PC, T.getVendor());
- EXPECT_EQ("i386-pc-", T.getTriple());
-
- T.setOSName("linux");
- EXPECT_EQ(Triple::x86, T.getArch());
- EXPECT_EQ(Triple::PC, T.getVendor());
- EXPECT_EQ(Triple::Linux, T.getOS());
- EXPECT_EQ("i386-pc-linux", T.getTriple());
-
- T.setEnvironmentName("gnu");
- EXPECT_EQ(Triple::x86, T.getArch());
- EXPECT_EQ(Triple::PC, T.getVendor());
- EXPECT_EQ(Triple::Linux, T.getOS());
- EXPECT_EQ("i386-pc-linux-gnu", T.getTriple());
-
- T.setOSName("freebsd");
- EXPECT_EQ(Triple::x86, T.getArch());
- EXPECT_EQ(Triple::PC, T.getVendor());
- EXPECT_EQ(Triple::FreeBSD, T.getOS());
- EXPECT_EQ("i386-pc-freebsd-gnu", T.getTriple());
-
- T.setOSAndEnvironmentName("darwin");
- EXPECT_EQ(Triple::x86, T.getArch());
- EXPECT_EQ(Triple::PC, T.getVendor());
- EXPECT_EQ(Triple::Darwin, T.getOS());
- EXPECT_EQ("i386-pc-darwin", T.getTriple());
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ADT/TwineTest.cpp b/libclamav/c++/llvm/unittests/ADT/TwineTest.cpp
deleted file mode 100644
index 61e8a0a..0000000
--- a/libclamav/c++/llvm/unittests/ADT/TwineTest.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-//===- TwineTest.cpp - Twine unit tests -----------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-namespace {
-
-std::string repr(const Twine &Value) {
- std::string res;
- llvm::raw_string_ostream OS(res);
- Value.printRepr(OS);
- return OS.str();
-}
-
-TEST(TwineTest, Construction) {
- EXPECT_EQ("", Twine().str());
- EXPECT_EQ("hi", Twine("hi").str());
- EXPECT_EQ("hi", Twine(std::string("hi")).str());
- EXPECT_EQ("hi", Twine(StringRef("hi")).str());
- EXPECT_EQ("hi", Twine(StringRef(std::string("hi"))).str());
- EXPECT_EQ("hi", Twine(StringRef("hithere", 2)).str());
-}
-
-TEST(TwineTest, Numbers) {
- EXPECT_EQ("123", Twine(123U).str());
- EXPECT_EQ("123", Twine(123).str());
- EXPECT_EQ("-123", Twine(-123).str());
- EXPECT_EQ("123", Twine(123).str());
- EXPECT_EQ("-123", Twine(-123).str());
- EXPECT_EQ("123", Twine((char) 123).str());
- EXPECT_EQ("-123", Twine((signed char) -123).str());
-
- EXPECT_EQ("7b", Twine::utohexstr(123).str());
-}
-
-TEST(TwineTest, Concat) {
- // Check verse repr, since we care about the actual representation not just
- // the result.
-
- // Concat with null.
- EXPECT_EQ("(Twine null empty)",
- repr(Twine("hi").concat(Twine::createNull())));
- EXPECT_EQ("(Twine null empty)",
- repr(Twine::createNull().concat(Twine("hi"))));
-
- // Concat with empty.
- EXPECT_EQ("(Twine cstring:\"hi\" empty)",
- repr(Twine("hi").concat(Twine())));
- EXPECT_EQ("(Twine cstring:\"hi\" empty)",
- repr(Twine().concat(Twine("hi"))));
-
- // Concatenation of unary ropes.
- EXPECT_EQ("(Twine cstring:\"a\" cstring:\"b\")",
- repr(Twine("a").concat(Twine("b"))));
-
- // Concatenation of other ropes.
- EXPECT_EQ("(Twine rope:(Twine cstring:\"a\" cstring:\"b\") cstring:\"c\")",
- repr(Twine("a").concat(Twine("b")).concat(Twine("c"))));
- EXPECT_EQ("(Twine cstring:\"a\" rope:(Twine cstring:\"b\" cstring:\"c\"))",
- repr(Twine("a").concat(Twine("b").concat(Twine("c")))));
-}
-
- // I suppose linking in the entire code generator to add a unit test to check
- // the code size of the concat operation is overkill... :)
-
-} // end anonymous namespace
diff --git a/libclamav/c++/llvm/unittests/ADT/ValueMapTest.cpp b/libclamav/c++/llvm/unittests/ADT/ValueMapTest.cpp
deleted file mode 100644
index 451e30a..0000000
--- a/libclamav/c++/llvm/unittests/ADT/ValueMapTest.cpp
+++ /dev/null
@@ -1,294 +0,0 @@
-//===- llvm/unittest/ADT/ValueMapTest.cpp - ValueMap unit tests -*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/ValueMap.h"
-#include "llvm/Instructions.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/Config/config.h"
-
-#include "gtest/gtest.h"
-
-using namespace llvm;
-
-namespace {
-
-// Test fixture
-template<typename T>
-class ValueMapTest : public testing::Test {
-protected:
- Constant *ConstantV;
- OwningPtr<BitCastInst> BitcastV;
- OwningPtr<BinaryOperator> AddV;
-
- ValueMapTest() :
- ConstantV(ConstantInt::get(Type::getInt32Ty(getGlobalContext()), 0)),
- BitcastV(new BitCastInst(ConstantV, Type::getInt32Ty(getGlobalContext()))),
- AddV(BinaryOperator::CreateAdd(ConstantV, ConstantV)) {
- }
-};
-
-// Run everything on Value*, a subtype to make sure that casting works as
-// expected, and a const subtype to make sure we cast const correctly.
-typedef ::testing::Types<Value, Instruction, const Instruction> KeyTypes;
-TYPED_TEST_CASE(ValueMapTest, KeyTypes);
-
-TYPED_TEST(ValueMapTest, Null) {
- ValueMap<TypeParam*, int> VM1;
- VM1[NULL] = 7;
- EXPECT_EQ(7, VM1.lookup(NULL));
-}
-
-TYPED_TEST(ValueMapTest, FollowsValue) {
- ValueMap<TypeParam*, int> VM;
- VM[this->BitcastV.get()] = 7;
- EXPECT_EQ(7, VM.lookup(this->BitcastV.get()));
- EXPECT_EQ(0, VM.count(this->AddV.get()));
- this->BitcastV->replaceAllUsesWith(this->AddV.get());
- EXPECT_EQ(7, VM.lookup(this->AddV.get()));
- EXPECT_EQ(0, VM.count(this->BitcastV.get()));
- this->AddV.reset();
- EXPECT_EQ(0, VM.count(this->AddV.get()));
- EXPECT_EQ(0, VM.count(this->BitcastV.get()));
- EXPECT_EQ(0U, VM.size());
-}
-
-TYPED_TEST(ValueMapTest, OperationsWork) {
- ValueMap<TypeParam*, int> VM;
- ValueMap<TypeParam*, int> VM2(16);
- typename ValueMapConfig<TypeParam*>::ExtraData Data;
- ValueMap<TypeParam*, int> VM3(Data, 16);
- EXPECT_TRUE(VM.empty());
-
- VM[this->BitcastV.get()] = 7;
-
- // Find:
- typename ValueMap<TypeParam*, int>::iterator I =
- VM.find(this->BitcastV.get());
- ASSERT_TRUE(I != VM.end());
- EXPECT_EQ(this->BitcastV.get(), I->first);
- EXPECT_EQ(7, I->second);
- EXPECT_TRUE(VM.find(this->AddV.get()) == VM.end());
-
- // Const find:
- const ValueMap<TypeParam*, int> &CVM = VM;
- typename ValueMap<TypeParam*, int>::const_iterator CI =
- CVM.find(this->BitcastV.get());
- ASSERT_TRUE(CI != CVM.end());
- EXPECT_EQ(this->BitcastV.get(), CI->first);
- EXPECT_EQ(7, CI->second);
- EXPECT_TRUE(CVM.find(this->AddV.get()) == CVM.end());
-
- // Insert:
- std::pair<typename ValueMap<TypeParam*, int>::iterator, bool> InsertResult1 =
- VM.insert(std::make_pair(this->AddV.get(), 3));
- EXPECT_EQ(this->AddV.get(), InsertResult1.first->first);
- EXPECT_EQ(3, InsertResult1.first->second);
- EXPECT_TRUE(InsertResult1.second);
- EXPECT_EQ(true, VM.count(this->AddV.get()));
- std::pair<typename ValueMap<TypeParam*, int>::iterator, bool> InsertResult2 =
- VM.insert(std::make_pair(this->AddV.get(), 5));
- EXPECT_EQ(this->AddV.get(), InsertResult2.first->first);
- EXPECT_EQ(3, InsertResult2.first->second);
- EXPECT_FALSE(InsertResult2.second);
-
- // Erase:
- VM.erase(InsertResult2.first);
- EXPECT_EQ(false, VM.count(this->AddV.get()));
- EXPECT_EQ(true, VM.count(this->BitcastV.get()));
- VM.erase(this->BitcastV.get());
- EXPECT_EQ(false, VM.count(this->BitcastV.get()));
- EXPECT_EQ(0U, VM.size());
-
- // Range insert:
- SmallVector<std::pair<Instruction*, int>, 2> Elems;
- Elems.push_back(std::make_pair(this->AddV.get(), 1));
- Elems.push_back(std::make_pair(this->BitcastV.get(), 2));
- VM.insert(Elems.begin(), Elems.end());
- EXPECT_EQ(1, VM.lookup(this->AddV.get()));
- EXPECT_EQ(2, VM.lookup(this->BitcastV.get()));
-}
-
-template<typename ExpectedType, typename VarType>
-void CompileAssertHasType(VarType) {
- typedef char assert[is_same<ExpectedType, VarType>::value ? 1 : -1];
-}
-
-TYPED_TEST(ValueMapTest, Iteration) {
- ValueMap<TypeParam*, int> VM;
- VM[this->BitcastV.get()] = 2;
- VM[this->AddV.get()] = 3;
- size_t size = 0;
- for (typename ValueMap<TypeParam*, int>::iterator I = VM.begin(), E = VM.end();
- I != E; ++I) {
- ++size;
- std::pair<TypeParam*, int> value = *I;
- CompileAssertHasType<TypeParam*>(I->first);
- if (I->second == 2) {
- EXPECT_EQ(this->BitcastV.get(), I->first);
- I->second = 5;
- } else if (I->second == 3) {
- EXPECT_EQ(this->AddV.get(), I->first);
- I->second = 6;
- } else {
- ADD_FAILURE() << "Iterated through an extra value.";
- }
- }
- EXPECT_EQ(2U, size);
- EXPECT_EQ(5, VM[this->BitcastV.get()]);
- EXPECT_EQ(6, VM[this->AddV.get()]);
-
- size = 0;
- // Cast to const ValueMap to avoid a bug in DenseMap's iterators.
- const ValueMap<TypeParam*, int>& CVM = VM;
- for (typename ValueMap<TypeParam*, int>::const_iterator I = CVM.begin(),
- E = CVM.end(); I != E; ++I) {
- ++size;
- std::pair<TypeParam*, int> value = *I;
- CompileAssertHasType<TypeParam*>(I->first);
- if (I->second == 5) {
- EXPECT_EQ(this->BitcastV.get(), I->first);
- } else if (I->second == 6) {
- EXPECT_EQ(this->AddV.get(), I->first);
- } else {
- ADD_FAILURE() << "Iterated through an extra value.";
- }
- }
- EXPECT_EQ(2U, size);
-}
-
-TYPED_TEST(ValueMapTest, DefaultCollisionBehavior) {
- // By default, we overwrite the old value with the replaced value.
- ValueMap<TypeParam*, int> VM;
- VM[this->BitcastV.get()] = 7;
- VM[this->AddV.get()] = 9;
- this->BitcastV->replaceAllUsesWith(this->AddV.get());
- EXPECT_EQ(0, VM.count(this->BitcastV.get()));
- EXPECT_EQ(9, VM.lookup(this->AddV.get()));
-}
-
-TYPED_TEST(ValueMapTest, ConfiguredCollisionBehavior) {
- // TODO: Implement this when someone needs it.
-}
-
-template<typename KeyT>
-struct LockMutex : ValueMapConfig<KeyT> {
- struct ExtraData {
- sys::Mutex *M;
- bool *CalledRAUW;
- bool *CalledDeleted;
- };
- static void onRAUW(const ExtraData &Data, KeyT Old, KeyT New) {
- *Data.CalledRAUW = true;
- EXPECT_FALSE(Data.M->tryacquire()) << "Mutex should already be locked.";
- }
- static void onDelete(const ExtraData &Data, KeyT Old) {
- *Data.CalledDeleted = true;
- EXPECT_FALSE(Data.M->tryacquire()) << "Mutex should already be locked.";
- }
- static sys::Mutex *getMutex(const ExtraData &Data) { return Data.M; }
-};
-#if ENABLE_THREADS
-TYPED_TEST(ValueMapTest, LocksMutex) {
- sys::Mutex M(false); // Not recursive.
- bool CalledRAUW = false, CalledDeleted = false;
- typename LockMutex<TypeParam*>::ExtraData Data =
- {&M, &CalledRAUW, &CalledDeleted};
- ValueMap<TypeParam*, int, LockMutex<TypeParam*> > VM(Data);
- VM[this->BitcastV.get()] = 7;
- this->BitcastV->replaceAllUsesWith(this->AddV.get());
- this->AddV.reset();
- EXPECT_TRUE(CalledRAUW);
- EXPECT_TRUE(CalledDeleted);
-}
-#endif
-
-template<typename KeyT>
-struct NoFollow : ValueMapConfig<KeyT> {
- enum { FollowRAUW = false };
-};
-
-TYPED_TEST(ValueMapTest, NoFollowRAUW) {
- ValueMap<TypeParam*, int, NoFollow<TypeParam*> > VM;
- VM[this->BitcastV.get()] = 7;
- EXPECT_EQ(7, VM.lookup(this->BitcastV.get()));
- EXPECT_EQ(0, VM.count(this->AddV.get()));
- this->BitcastV->replaceAllUsesWith(this->AddV.get());
- EXPECT_EQ(7, VM.lookup(this->BitcastV.get()));
- EXPECT_EQ(0, VM.lookup(this->AddV.get()));
- this->AddV.reset();
- EXPECT_EQ(7, VM.lookup(this->BitcastV.get()));
- EXPECT_EQ(0, VM.lookup(this->AddV.get()));
- this->BitcastV.reset();
- EXPECT_EQ(0, VM.lookup(this->BitcastV.get()));
- EXPECT_EQ(0, VM.lookup(this->AddV.get()));
- EXPECT_EQ(0U, VM.size());
-}
-
-template<typename KeyT>
-struct CountOps : ValueMapConfig<KeyT> {
- struct ExtraData {
- int *Deletions;
- int *RAUWs;
- };
-
- static void onRAUW(const ExtraData &Data, KeyT Old, KeyT New) {
- ++*Data.RAUWs;
- }
- static void onDelete(const ExtraData &Data, KeyT Old) {
- ++*Data.Deletions;
- }
-};
-
-TYPED_TEST(ValueMapTest, CallsConfig) {
- int Deletions = 0, RAUWs = 0;
- typename CountOps<TypeParam*>::ExtraData Data = {&Deletions, &RAUWs};
- ValueMap<TypeParam*, int, CountOps<TypeParam*> > VM(Data);
- VM[this->BitcastV.get()] = 7;
- this->BitcastV->replaceAllUsesWith(this->AddV.get());
- EXPECT_EQ(0, Deletions);
- EXPECT_EQ(1, RAUWs);
- this->AddV.reset();
- EXPECT_EQ(1, Deletions);
- EXPECT_EQ(1, RAUWs);
- this->BitcastV.reset();
- EXPECT_EQ(1, Deletions);
- EXPECT_EQ(1, RAUWs);
-}
-
-template<typename KeyT>
-struct ModifyingConfig : ValueMapConfig<KeyT> {
- // We'll put a pointer here back to the ValueMap this key is in, so
- // that we can modify it (and clobber *this) before the ValueMap
- // tries to do the same modification. In previous versions of
- // ValueMap, that exploded.
- typedef ValueMap<KeyT, int, ModifyingConfig<KeyT> > **ExtraData;
-
- static void onRAUW(ExtraData Map, KeyT Old, KeyT New) {
- (*Map)->erase(Old);
- }
- static void onDelete(ExtraData Map, KeyT Old) {
- (*Map)->erase(Old);
- }
-};
-TYPED_TEST(ValueMapTest, SurvivesModificationByConfig) {
- ValueMap<TypeParam*, int, ModifyingConfig<TypeParam*> > *MapAddress;
- ValueMap<TypeParam*, int, ModifyingConfig<TypeParam*> > VM(&MapAddress);
- MapAddress = &VM;
- // Now the ModifyingConfig can modify the Map inside a callback.
- VM[this->BitcastV.get()] = 7;
- this->BitcastV->replaceAllUsesWith(this->AddV.get());
- EXPECT_FALSE(VM.count(this->BitcastV.get()));
- EXPECT_FALSE(VM.count(this->AddV.get()));
- VM[this->AddV.get()] = 7;
- this->AddV.reset();
- EXPECT_FALSE(VM.count(this->AddV.get()));
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp b/libclamav/c++/llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp
deleted file mode 100644
index 904ee2b..0000000
--- a/libclamav/c++/llvm/unittests/ExecutionEngine/ExecutionEngineTest.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-//===- ExecutionEngineTest.cpp - Unit tests for ExecutionEngine -----------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/DerivedTypes.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ExecutionEngine/Interpreter.h"
-#include "gtest/gtest.h"
-
-using namespace llvm;
-
-namespace {
-
-class ExecutionEngineTest : public testing::Test {
-protected:
- ExecutionEngineTest()
- : M(new Module("<main>", getGlobalContext())),
- Engine(EngineBuilder(M).create()) {
- }
-
- virtual void SetUp() {
- ASSERT_TRUE(Engine.get() != NULL);
- }
-
- GlobalVariable *NewExtGlobal(const Type *T, const Twine &Name) {
- return new GlobalVariable(*M, T, false, // Not constant.
- GlobalValue::ExternalLinkage, NULL, Name);
- }
-
- Module *const M;
- const OwningPtr<ExecutionEngine> Engine;
-};
-
-TEST_F(ExecutionEngineTest, ForwardGlobalMapping) {
- GlobalVariable *G1 =
- NewExtGlobal(Type::getInt32Ty(getGlobalContext()), "Global1");
- int32_t Mem1 = 3;
- Engine->addGlobalMapping(G1, &Mem1);
- EXPECT_EQ(&Mem1, Engine->getPointerToGlobalIfAvailable(G1));
- int32_t Mem2 = 4;
- Engine->updateGlobalMapping(G1, &Mem2);
- EXPECT_EQ(&Mem2, Engine->getPointerToGlobalIfAvailable(G1));
- Engine->updateGlobalMapping(G1, NULL);
- EXPECT_EQ(NULL, Engine->getPointerToGlobalIfAvailable(G1));
- Engine->updateGlobalMapping(G1, &Mem2);
- EXPECT_EQ(&Mem2, Engine->getPointerToGlobalIfAvailable(G1));
-
- GlobalVariable *G2 =
- NewExtGlobal(Type::getInt32Ty(getGlobalContext()), "Global1");
- EXPECT_EQ(NULL, Engine->getPointerToGlobalIfAvailable(G2))
- << "The NULL return shouldn't depend on having called"
- << " updateGlobalMapping(..., NULL)";
- // Check that update...() can be called before add...().
- Engine->updateGlobalMapping(G2, &Mem1);
- EXPECT_EQ(&Mem1, Engine->getPointerToGlobalIfAvailable(G2));
- EXPECT_EQ(&Mem2, Engine->getPointerToGlobalIfAvailable(G1))
- << "A second mapping shouldn't affect the first.";
-}
-
-TEST_F(ExecutionEngineTest, ReverseGlobalMapping) {
- GlobalVariable *G1 =
- NewExtGlobal(Type::getInt32Ty(getGlobalContext()), "Global1");
-
- int32_t Mem1 = 3;
- Engine->addGlobalMapping(G1, &Mem1);
- EXPECT_EQ(G1, Engine->getGlobalValueAtAddress(&Mem1));
- int32_t Mem2 = 4;
- Engine->updateGlobalMapping(G1, &Mem2);
- EXPECT_EQ(NULL, Engine->getGlobalValueAtAddress(&Mem1));
- EXPECT_EQ(G1, Engine->getGlobalValueAtAddress(&Mem2));
-
- GlobalVariable *G2 =
- NewExtGlobal(Type::getInt32Ty(getGlobalContext()), "Global2");
- Engine->updateGlobalMapping(G2, &Mem1);
- EXPECT_EQ(G2, Engine->getGlobalValueAtAddress(&Mem1));
- EXPECT_EQ(G1, Engine->getGlobalValueAtAddress(&Mem2));
- Engine->updateGlobalMapping(G1, NULL);
- EXPECT_EQ(G2, Engine->getGlobalValueAtAddress(&Mem1))
- << "Removing one mapping doesn't affect a different one.";
- EXPECT_EQ(NULL, Engine->getGlobalValueAtAddress(&Mem2));
- Engine->updateGlobalMapping(G2, &Mem2);
- EXPECT_EQ(NULL, Engine->getGlobalValueAtAddress(&Mem1));
- EXPECT_EQ(G2, Engine->getGlobalValueAtAddress(&Mem2))
- << "Once a mapping is removed, we can point another GV at the"
- << " now-free address.";
-}
-
-TEST_F(ExecutionEngineTest, ClearModuleMappings) {
- GlobalVariable *G1 =
- NewExtGlobal(Type::getInt32Ty(getGlobalContext()), "Global1");
-
- int32_t Mem1 = 3;
- Engine->addGlobalMapping(G1, &Mem1);
- EXPECT_EQ(G1, Engine->getGlobalValueAtAddress(&Mem1));
-
- Engine->clearGlobalMappingsFromModule(M);
-
- EXPECT_EQ(NULL, Engine->getGlobalValueAtAddress(&Mem1));
-
- GlobalVariable *G2 =
- NewExtGlobal(Type::getInt32Ty(getGlobalContext()), "Global2");
- // After clearing the module mappings, we can assign a new GV to the
- // same address.
- Engine->addGlobalMapping(G2, &Mem1);
- EXPECT_EQ(G2, Engine->getGlobalValueAtAddress(&Mem1));
-}
-
-TEST_F(ExecutionEngineTest, DestructionRemovesGlobalMapping) {
- GlobalVariable *G1 =
- NewExtGlobal(Type::getInt32Ty(getGlobalContext()), "Global1");
- int32_t Mem1 = 3;
- Engine->addGlobalMapping(G1, &Mem1);
- // Make sure the reverse mapping is enabled.
- EXPECT_EQ(G1, Engine->getGlobalValueAtAddress(&Mem1));
- // When the GV goes away, the ExecutionEngine should remove any
- // mappings that refer to it.
- G1->eraseFromParent();
- EXPECT_EQ(NULL, Engine->getGlobalValueAtAddress(&Mem1));
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp b/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp
deleted file mode 100644
index a36ec3b..0000000
--- a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/JITEventListenerTest.cpp
+++ /dev/null
@@ -1,238 +0,0 @@
-//===- JITEventListenerTest.cpp - Unit tests for JITEventListeners --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ExecutionEngine/JITEventListener.h"
-
-#include "llvm/LLVMContext.h"
-#include "llvm/Instructions.h"
-#include "llvm/Module.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/CodeGen/MachineCodeInfo.h"
-#include "llvm/ExecutionEngine/JIT.h"
-#include "llvm/Support/TypeBuilder.h"
-#include "llvm/Target/TargetSelect.h"
-#include "gtest/gtest.h"
-#include <vector>
-
-using namespace llvm;
-
-int dummy;
-
-namespace {
-
-struct FunctionEmittedEvent {
- // Indices are local to the RecordingJITEventListener, since the
- // JITEventListener interface makes no guarantees about the order of
- // calls between Listeners.
- unsigned Index;
- const Function *F;
- void *Code;
- size_t Size;
- JITEvent_EmittedFunctionDetails Details;
-};
-struct FunctionFreedEvent {
- unsigned Index;
- void *Code;
-};
-
-struct RecordingJITEventListener : public JITEventListener {
- std::vector<FunctionEmittedEvent> EmittedEvents;
- std::vector<FunctionFreedEvent> FreedEvents;
-
- int NextIndex;
-
- RecordingJITEventListener() : NextIndex(0) {}
-
- virtual void NotifyFunctionEmitted(const Function &F,
- void *Code, size_t Size,
- const EmittedFunctionDetails &Details) {
- FunctionEmittedEvent Event = {NextIndex++, &F, Code, Size, Details};
- EmittedEvents.push_back(Event);
- }
-
- virtual void NotifyFreeingMachineCode(void *OldPtr) {
- FunctionFreedEvent Event = {NextIndex++, OldPtr};
- FreedEvents.push_back(Event);
- }
-};
-
-class JITEventListenerTest : public testing::Test {
- protected:
- JITEventListenerTest()
- : M(new Module("module", getGlobalContext())),
- EE(EngineBuilder(M)
- .setEngineKind(EngineKind::JIT)
- .create()) {
- }
-
- Module *M;
- const OwningPtr<ExecutionEngine> EE;
-};
-
-Function *buildFunction(Module *M) {
- Function *Result = Function::Create(
- TypeBuilder<int32_t(int32_t), false>::get(getGlobalContext()),
- GlobalValue::ExternalLinkage, "id", M);
- Value *Arg = Result->arg_begin();
- BasicBlock *BB = BasicBlock::Create(M->getContext(), "entry", Result);
- ReturnInst::Create(M->getContext(), Arg, BB);
- return Result;
-}
-
-// Tests that a single JITEventListener follows JIT events accurately.
-TEST_F(JITEventListenerTest, Simple) {
- RecordingJITEventListener Listener;
- EE->RegisterJITEventListener(&Listener);
- Function *F1 = buildFunction(M);
- Function *F2 = buildFunction(M);
-
- void *F1_addr = EE->getPointerToFunction(F1);
- void *F2_addr = EE->getPointerToFunction(F2);
- EE->getPointerToFunction(F1); // Should do nothing.
- EE->freeMachineCodeForFunction(F1);
- EE->freeMachineCodeForFunction(F2);
-
- ASSERT_EQ(2U, Listener.EmittedEvents.size());
- ASSERT_EQ(2U, Listener.FreedEvents.size());
-
- EXPECT_EQ(0U, Listener.EmittedEvents[0].Index);
- EXPECT_EQ(F1, Listener.EmittedEvents[0].F);
- EXPECT_EQ(F1_addr, Listener.EmittedEvents[0].Code);
- EXPECT_LT(0U, Listener.EmittedEvents[0].Size)
- << "We don't know how big the function will be, but it had better"
- << " contain some bytes.";
-
- EXPECT_EQ(1U, Listener.EmittedEvents[1].Index);
- EXPECT_EQ(F2, Listener.EmittedEvents[1].F);
- EXPECT_EQ(F2_addr, Listener.EmittedEvents[1].Code);
- EXPECT_LT(0U, Listener.EmittedEvents[1].Size)
- << "We don't know how big the function will be, but it had better"
- << " contain some bytes.";
-
- EXPECT_EQ(2U, Listener.FreedEvents[0].Index);
- EXPECT_EQ(F1_addr, Listener.FreedEvents[0].Code);
-
- EXPECT_EQ(3U, Listener.FreedEvents[1].Index);
- EXPECT_EQ(F2_addr, Listener.FreedEvents[1].Code);
-
- F1->eraseFromParent();
- F2->eraseFromParent();
-}
-
-// Tests that a single JITEventListener follows JIT events accurately.
-TEST_F(JITEventListenerTest, MultipleListenersDontInterfere) {
- RecordingJITEventListener Listener1;
- RecordingJITEventListener Listener2;
- RecordingJITEventListener Listener3;
- Function *F1 = buildFunction(M);
- Function *F2 = buildFunction(M);
-
- EE->RegisterJITEventListener(&Listener1);
- EE->RegisterJITEventListener(&Listener2);
- void *F1_addr = EE->getPointerToFunction(F1);
- EE->RegisterJITEventListener(&Listener3);
- EE->UnregisterJITEventListener(&Listener1);
- void *F2_addr = EE->getPointerToFunction(F2);
- EE->UnregisterJITEventListener(&Listener2);
- EE->UnregisterJITEventListener(&Listener3);
- EE->freeMachineCodeForFunction(F1);
- EE->RegisterJITEventListener(&Listener2);
- EE->RegisterJITEventListener(&Listener3);
- EE->RegisterJITEventListener(&Listener1);
- EE->freeMachineCodeForFunction(F2);
- EE->UnregisterJITEventListener(&Listener1);
- EE->UnregisterJITEventListener(&Listener2);
- EE->UnregisterJITEventListener(&Listener3);
-
- // Listener 1.
- ASSERT_EQ(1U, Listener1.EmittedEvents.size());
- ASSERT_EQ(1U, Listener1.FreedEvents.size());
-
- EXPECT_EQ(0U, Listener1.EmittedEvents[0].Index);
- EXPECT_EQ(F1, Listener1.EmittedEvents[0].F);
- EXPECT_EQ(F1_addr, Listener1.EmittedEvents[0].Code);
- EXPECT_LT(0U, Listener1.EmittedEvents[0].Size)
- << "We don't know how big the function will be, but it had better"
- << " contain some bytes.";
-
- EXPECT_EQ(1U, Listener1.FreedEvents[0].Index);
- EXPECT_EQ(F2_addr, Listener1.FreedEvents[0].Code);
-
- // Listener 2.
- ASSERT_EQ(2U, Listener2.EmittedEvents.size());
- ASSERT_EQ(1U, Listener2.FreedEvents.size());
-
- EXPECT_EQ(0U, Listener2.EmittedEvents[0].Index);
- EXPECT_EQ(F1, Listener2.EmittedEvents[0].F);
- EXPECT_EQ(F1_addr, Listener2.EmittedEvents[0].Code);
- EXPECT_LT(0U, Listener2.EmittedEvents[0].Size)
- << "We don't know how big the function will be, but it had better"
- << " contain some bytes.";
-
- EXPECT_EQ(1U, Listener2.EmittedEvents[1].Index);
- EXPECT_EQ(F2, Listener2.EmittedEvents[1].F);
- EXPECT_EQ(F2_addr, Listener2.EmittedEvents[1].Code);
- EXPECT_LT(0U, Listener2.EmittedEvents[1].Size)
- << "We don't know how big the function will be, but it had better"
- << " contain some bytes.";
-
- EXPECT_EQ(2U, Listener2.FreedEvents[0].Index);
- EXPECT_EQ(F2_addr, Listener2.FreedEvents[0].Code);
-
- // Listener 3.
- ASSERT_EQ(1U, Listener3.EmittedEvents.size());
- ASSERT_EQ(1U, Listener3.FreedEvents.size());
-
- EXPECT_EQ(0U, Listener3.EmittedEvents[0].Index);
- EXPECT_EQ(F2, Listener3.EmittedEvents[0].F);
- EXPECT_EQ(F2_addr, Listener3.EmittedEvents[0].Code);
- EXPECT_LT(0U, Listener3.EmittedEvents[0].Size)
- << "We don't know how big the function will be, but it had better"
- << " contain some bytes.";
-
- EXPECT_EQ(1U, Listener3.FreedEvents[0].Index);
- EXPECT_EQ(F2_addr, Listener3.FreedEvents[0].Code);
-
- F1->eraseFromParent();
- F2->eraseFromParent();
-}
-
-TEST_F(JITEventListenerTest, MatchesMachineCodeInfo) {
- RecordingJITEventListener Listener;
- MachineCodeInfo MCI;
- Function *F = buildFunction(M);
-
- EE->RegisterJITEventListener(&Listener);
- EE->runJITOnFunction(F, &MCI);
- void *F_addr = EE->getPointerToFunction(F);
- EE->freeMachineCodeForFunction(F);
-
- ASSERT_EQ(1U, Listener.EmittedEvents.size());
- ASSERT_EQ(1U, Listener.FreedEvents.size());
-
- EXPECT_EQ(0U, Listener.EmittedEvents[0].Index);
- EXPECT_EQ(F, Listener.EmittedEvents[0].F);
- EXPECT_EQ(F_addr, Listener.EmittedEvents[0].Code);
- EXPECT_EQ(MCI.address(), Listener.EmittedEvents[0].Code);
- EXPECT_EQ(MCI.size(), Listener.EmittedEvents[0].Size);
-
- EXPECT_EQ(1U, Listener.FreedEvents[0].Index);
- EXPECT_EQ(F_addr, Listener.FreedEvents[0].Code);
-}
-
-class JITEnvironment : public testing::Environment {
- virtual void SetUp() {
- // Required to create a JIT.
- InitializeNativeTarget();
- }
-};
-testing::Environment* const jit_env =
- testing::AddGlobalTestEnvironment(new JITEnvironment);
-
-} // anonymous namespace
diff --git a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp b/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp
deleted file mode 100644
index aa0c41d..0000000
--- a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/JITMemoryManagerTest.cpp
+++ /dev/null
@@ -1,279 +0,0 @@
-//===- JITMemoryManagerTest.cpp - Unit tests for the JIT memory manager ---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/LLVMContext.h"
-
-using namespace llvm;
-
-namespace {
-
-Function *makeFakeFunction() {
- std::vector<const Type*> params;
- const FunctionType *FTy =
- FunctionType::get(Type::getVoidTy(getGlobalContext()), params, false);
- return Function::Create(FTy, GlobalValue::ExternalLinkage);
-}
-
-// Allocate three simple functions that fit in the initial slab. This exercises
-// the code in the case that we don't have to allocate more memory to store the
-// function bodies.
-TEST(JITMemoryManagerTest, NoAllocations) {
- OwningPtr<JITMemoryManager> MemMgr(
- JITMemoryManager::CreateDefaultMemManager());
- uintptr_t size;
- std::string Error;
-
- // Allocate the functions.
- OwningPtr<Function> F1(makeFakeFunction());
- size = 1024;
- uint8_t *FunctionBody1 = MemMgr->startFunctionBody(F1.get(), size);
- memset(FunctionBody1, 0xFF, 1024);
- MemMgr->endFunctionBody(F1.get(), FunctionBody1, FunctionBody1 + 1024);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-
- OwningPtr<Function> F2(makeFakeFunction());
- size = 1024;
- uint8_t *FunctionBody2 = MemMgr->startFunctionBody(F2.get(), size);
- memset(FunctionBody2, 0xFF, 1024);
- MemMgr->endFunctionBody(F2.get(), FunctionBody2, FunctionBody2 + 1024);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-
- OwningPtr<Function> F3(makeFakeFunction());
- size = 1024;
- uint8_t *FunctionBody3 = MemMgr->startFunctionBody(F3.get(), size);
- memset(FunctionBody3, 0xFF, 1024);
- MemMgr->endFunctionBody(F3.get(), FunctionBody3, FunctionBody3 + 1024);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-
- // Deallocate them out of order, in case that matters.
- MemMgr->deallocateFunctionBody(FunctionBody2);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
- MemMgr->deallocateFunctionBody(FunctionBody1);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
- MemMgr->deallocateFunctionBody(FunctionBody3);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-}
-
-// Make three large functions that take up most of the space in the slab. Then
-// try allocating three smaller functions that don't require additional slabs.
-TEST(JITMemoryManagerTest, TestCodeAllocation) {
- OwningPtr<JITMemoryManager> MemMgr(
- JITMemoryManager::CreateDefaultMemManager());
- uintptr_t size;
- std::string Error;
-
- // Big functions are a little less than the largest block size.
- const uintptr_t smallFuncSize = 1024;
- const uintptr_t bigFuncSize = (MemMgr->GetDefaultCodeSlabSize() -
- smallFuncSize * 2);
-
- // Allocate big functions
- OwningPtr<Function> F1(makeFakeFunction());
- size = bigFuncSize;
- uint8_t *FunctionBody1 = MemMgr->startFunctionBody(F1.get(), size);
- ASSERT_LE(bigFuncSize, size);
- memset(FunctionBody1, 0xFF, bigFuncSize);
- MemMgr->endFunctionBody(F1.get(), FunctionBody1, FunctionBody1 + bigFuncSize);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-
- OwningPtr<Function> F2(makeFakeFunction());
- size = bigFuncSize;
- uint8_t *FunctionBody2 = MemMgr->startFunctionBody(F2.get(), size);
- ASSERT_LE(bigFuncSize, size);
- memset(FunctionBody2, 0xFF, bigFuncSize);
- MemMgr->endFunctionBody(F2.get(), FunctionBody2, FunctionBody2 + bigFuncSize);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-
- OwningPtr<Function> F3(makeFakeFunction());
- size = bigFuncSize;
- uint8_t *FunctionBody3 = MemMgr->startFunctionBody(F3.get(), size);
- ASSERT_LE(bigFuncSize, size);
- memset(FunctionBody3, 0xFF, bigFuncSize);
- MemMgr->endFunctionBody(F3.get(), FunctionBody3, FunctionBody3 + bigFuncSize);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-
- // Check that each large function took it's own slab.
- EXPECT_EQ(3U, MemMgr->GetNumCodeSlabs());
-
- // Allocate small functions
- OwningPtr<Function> F4(makeFakeFunction());
- size = smallFuncSize;
- uint8_t *FunctionBody4 = MemMgr->startFunctionBody(F4.get(), size);
- ASSERT_LE(smallFuncSize, size);
- memset(FunctionBody4, 0xFF, smallFuncSize);
- MemMgr->endFunctionBody(F4.get(), FunctionBody4,
- FunctionBody4 + smallFuncSize);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-
- OwningPtr<Function> F5(makeFakeFunction());
- size = smallFuncSize;
- uint8_t *FunctionBody5 = MemMgr->startFunctionBody(F5.get(), size);
- ASSERT_LE(smallFuncSize, size);
- memset(FunctionBody5, 0xFF, smallFuncSize);
- MemMgr->endFunctionBody(F5.get(), FunctionBody5,
- FunctionBody5 + smallFuncSize);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-
- OwningPtr<Function> F6(makeFakeFunction());
- size = smallFuncSize;
- uint8_t *FunctionBody6 = MemMgr->startFunctionBody(F6.get(), size);
- ASSERT_LE(smallFuncSize, size);
- memset(FunctionBody6, 0xFF, smallFuncSize);
- MemMgr->endFunctionBody(F6.get(), FunctionBody6,
- FunctionBody6 + smallFuncSize);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-
- // Check that the small functions didn't allocate any new slabs.
- EXPECT_EQ(3U, MemMgr->GetNumCodeSlabs());
-
- // Deallocate them out of order, in case that matters.
- MemMgr->deallocateFunctionBody(FunctionBody2);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
- MemMgr->deallocateFunctionBody(FunctionBody1);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
- MemMgr->deallocateFunctionBody(FunctionBody4);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
- MemMgr->deallocateFunctionBody(FunctionBody3);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
- MemMgr->deallocateFunctionBody(FunctionBody5);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
- MemMgr->deallocateFunctionBody(FunctionBody6);
- EXPECT_TRUE(MemMgr->CheckInvariants(Error)) << Error;
-}
-
-// Allocate five global ints of varying widths and alignment, and check their
-// alignment and overlap.
-TEST(JITMemoryManagerTest, TestSmallGlobalInts) {
- OwningPtr<JITMemoryManager> MemMgr(
- JITMemoryManager::CreateDefaultMemManager());
- uint8_t *a = (uint8_t *)MemMgr->allocateGlobal(8, 0);
- uint16_t *b = (uint16_t*)MemMgr->allocateGlobal(16, 2);
- uint32_t *c = (uint32_t*)MemMgr->allocateGlobal(32, 4);
- uint64_t *d = (uint64_t*)MemMgr->allocateGlobal(64, 8);
-
- // Check the alignment.
- EXPECT_EQ(0U, ((uintptr_t)b) & 0x1);
- EXPECT_EQ(0U, ((uintptr_t)c) & 0x3);
- EXPECT_EQ(0U, ((uintptr_t)d) & 0x7);
-
- // Initialize them each one at a time and make sure they don't overlap.
- *a = 0xff;
- *b = 0U;
- *c = 0U;
- *d = 0U;
- EXPECT_EQ(0xffU, *a);
- EXPECT_EQ(0U, *b);
- EXPECT_EQ(0U, *c);
- EXPECT_EQ(0U, *d);
- *a = 0U;
- *b = 0xffffU;
- EXPECT_EQ(0U, *a);
- EXPECT_EQ(0xffffU, *b);
- EXPECT_EQ(0U, *c);
- EXPECT_EQ(0U, *d);
- *b = 0U;
- *c = 0xffffffffU;
- EXPECT_EQ(0U, *a);
- EXPECT_EQ(0U, *b);
- EXPECT_EQ(0xffffffffU, *c);
- EXPECT_EQ(0U, *d);
- *c = 0U;
- *d = 0xffffffffffffffffULL;
- EXPECT_EQ(0U, *a);
- EXPECT_EQ(0U, *b);
- EXPECT_EQ(0U, *c);
- EXPECT_EQ(0xffffffffffffffffULL, *d);
-
- // Make sure we didn't allocate any extra slabs for this tiny amount of data.
- EXPECT_EQ(1U, MemMgr->GetNumDataSlabs());
-}
-
-// Allocate a small global, a big global, and a third global, and make sure we
-// only use two slabs for that.
-TEST(JITMemoryManagerTest, TestLargeGlobalArray) {
- OwningPtr<JITMemoryManager> MemMgr(
- JITMemoryManager::CreateDefaultMemManager());
- size_t Size = 4 * MemMgr->GetDefaultDataSlabSize();
- uint64_t *a = (uint64_t*)MemMgr->allocateGlobal(64, 8);
- uint8_t *g = MemMgr->allocateGlobal(Size, 8);
- uint64_t *b = (uint64_t*)MemMgr->allocateGlobal(64, 8);
-
- // Check the alignment.
- EXPECT_EQ(0U, ((uintptr_t)a) & 0x7);
- EXPECT_EQ(0U, ((uintptr_t)g) & 0x7);
- EXPECT_EQ(0U, ((uintptr_t)b) & 0x7);
-
- // Initialize them to make sure we don't segfault and make sure they don't
- // overlap.
- memset(a, 0x1, 8);
- memset(g, 0x2, Size);
- memset(b, 0x3, 8);
- EXPECT_EQ(0x0101010101010101ULL, *a);
- // Just check the edges.
- EXPECT_EQ(0x02U, g[0]);
- EXPECT_EQ(0x02U, g[Size - 1]);
- EXPECT_EQ(0x0303030303030303ULL, *b);
-
- // Check the number of slabs.
- EXPECT_EQ(2U, MemMgr->GetNumDataSlabs());
-}
-
-// Allocate lots of medium globals so that we can test moving the bump allocator
-// to a new slab.
-TEST(JITMemoryManagerTest, TestManyGlobals) {
- OwningPtr<JITMemoryManager> MemMgr(
- JITMemoryManager::CreateDefaultMemManager());
- size_t SlabSize = MemMgr->GetDefaultDataSlabSize();
- size_t Size = 128;
- int Iters = (SlabSize / Size) + 1;
-
- // We should start with one slab.
- EXPECT_EQ(1U, MemMgr->GetNumDataSlabs());
-
- // After allocating a bunch of globals, we should have two.
- for (int I = 0; I < Iters; ++I)
- MemMgr->allocateGlobal(Size, 8);
- EXPECT_EQ(2U, MemMgr->GetNumDataSlabs());
-
- // And after much more, we should have three.
- for (int I = 0; I < Iters; ++I)
- MemMgr->allocateGlobal(Size, 8);
- EXPECT_EQ(3U, MemMgr->GetNumDataSlabs());
-}
-
-// Allocate lots of function stubs so that we can test moving the stub bump
-// allocator to a new slab.
-TEST(JITMemoryManagerTest, TestManyStubs) {
- OwningPtr<JITMemoryManager> MemMgr(
- JITMemoryManager::CreateDefaultMemManager());
- size_t SlabSize = MemMgr->GetDefaultStubSlabSize();
- size_t Size = 128;
- int Iters = (SlabSize / Size) + 1;
-
- // We should start with one slab.
- EXPECT_EQ(1U, MemMgr->GetNumStubSlabs());
-
- // After allocating a bunch of stubs, we should have two.
- for (int I = 0; I < Iters; ++I)
- MemMgr->allocateStub(NULL, Size, 8);
- EXPECT_EQ(2U, MemMgr->GetNumStubSlabs());
-
- // And after much more, we should have three.
- for (int I = 0; I < Iters; ++I)
- MemMgr->allocateStub(NULL, Size, 8);
- EXPECT_EQ(3U, MemMgr->GetNumStubSlabs());
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/JITTest.cpp b/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/JITTest.cpp
deleted file mode 100644
index 1f90e8c..0000000
--- a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/JITTest.cpp
+++ /dev/null
@@ -1,810 +0,0 @@
-//===- JITTest.cpp - Unit tests for the JIT -------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/Assembly/Parser.h"
-#include "llvm/BasicBlock.h"
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/Constant.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/ExecutionEngine/JIT.h"
-#include "llvm/ExecutionEngine/JITMemoryManager.h"
-#include "llvm/Function.h"
-#include "llvm/GlobalValue.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/Support/IRBuilder.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/TypeBuilder.h"
-#include "llvm/Target/TargetSelect.h"
-#include "llvm/Type.h"
-
-#include <vector>
-
-using namespace llvm;
-
-namespace {
-
-Function *makeReturnGlobal(std::string Name, GlobalVariable *G, Module *M) {
- std::vector<const Type*> params;
- const FunctionType *FTy = FunctionType::get(G->getType()->getElementType(),
- params, false);
- Function *F = Function::Create(FTy, GlobalValue::ExternalLinkage, Name, M);
- BasicBlock *Entry = BasicBlock::Create(M->getContext(), "entry", F);
- IRBuilder<> builder(Entry);
- Value *Load = builder.CreateLoad(G);
- const Type *GTy = G->getType()->getElementType();
- Value *Add = builder.CreateAdd(Load, ConstantInt::get(GTy, 1LL));
- builder.CreateStore(Add, G);
- builder.CreateRet(Add);
- return F;
-}
-
-std::string DumpFunction(const Function *F) {
- std::string Result;
- raw_string_ostream(Result) << "" << *F;
- return Result;
-}
-
-class RecordingJITMemoryManager : public JITMemoryManager {
- const OwningPtr<JITMemoryManager> Base;
-public:
- RecordingJITMemoryManager()
- : Base(JITMemoryManager::CreateDefaultMemManager()) {
- stubsAllocated = 0;
- }
-
- void setSizeRequired(bool Required) { SizeRequired = Required; }
-
- virtual void setMemoryWritable() { Base->setMemoryWritable(); }
- virtual void setMemoryExecutable() { Base->setMemoryExecutable(); }
- virtual void setPoisonMemory(bool poison) { Base->setPoisonMemory(poison); }
- virtual void AllocateGOT() { Base->AllocateGOT(); }
- virtual uint8_t *getGOTBase() const { return Base->getGOTBase(); }
- struct StartFunctionBodyCall {
- StartFunctionBodyCall(uint8_t *Result, const Function *F,
- uintptr_t ActualSize, uintptr_t ActualSizeResult)
- : Result(Result), F(F), F_dump(DumpFunction(F)),
- ActualSize(ActualSize), ActualSizeResult(ActualSizeResult) {}
- uint8_t *Result;
- const Function *F;
- std::string F_dump;
- uintptr_t ActualSize;
- uintptr_t ActualSizeResult;
- };
- std::vector<StartFunctionBodyCall> startFunctionBodyCalls;
- virtual uint8_t *startFunctionBody(const Function *F,
- uintptr_t &ActualSize) {
- uintptr_t InitialActualSize = ActualSize;
- uint8_t *Result = Base->startFunctionBody(F, ActualSize);
- startFunctionBodyCalls.push_back(
- StartFunctionBodyCall(Result, F, InitialActualSize, ActualSize));
- return Result;
- }
- int stubsAllocated;
- virtual uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize,
- unsigned Alignment) {
- stubsAllocated++;
- return Base->allocateStub(F, StubSize, Alignment);
- }
- struct EndFunctionBodyCall {
- EndFunctionBodyCall(const Function *F, uint8_t *FunctionStart,
- uint8_t *FunctionEnd)
- : F(F), F_dump(DumpFunction(F)),
- FunctionStart(FunctionStart), FunctionEnd(FunctionEnd) {}
- const Function *F;
- std::string F_dump;
- uint8_t *FunctionStart;
- uint8_t *FunctionEnd;
- };
- std::vector<EndFunctionBodyCall> endFunctionBodyCalls;
- virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart,
- uint8_t *FunctionEnd) {
- endFunctionBodyCalls.push_back(
- EndFunctionBodyCall(F, FunctionStart, FunctionEnd));
- Base->endFunctionBody(F, FunctionStart, FunctionEnd);
- }
- virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) {
- return Base->allocateSpace(Size, Alignment);
- }
- virtual uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) {
- return Base->allocateGlobal(Size, Alignment);
- }
- struct DeallocateFunctionBodyCall {
- DeallocateFunctionBodyCall(const void *Body) : Body(Body) {}
- const void *Body;
- };
- std::vector<DeallocateFunctionBodyCall> deallocateFunctionBodyCalls;
- virtual void deallocateFunctionBody(void *Body) {
- deallocateFunctionBodyCalls.push_back(DeallocateFunctionBodyCall(Body));
- Base->deallocateFunctionBody(Body);
- }
- struct DeallocateExceptionTableCall {
- DeallocateExceptionTableCall(const void *ET) : ET(ET) {}
- const void *ET;
- };
- std::vector<DeallocateExceptionTableCall> deallocateExceptionTableCalls;
- virtual void deallocateExceptionTable(void *ET) {
- deallocateExceptionTableCalls.push_back(DeallocateExceptionTableCall(ET));
- Base->deallocateExceptionTable(ET);
- }
- struct StartExceptionTableCall {
- StartExceptionTableCall(uint8_t *Result, const Function *F,
- uintptr_t ActualSize, uintptr_t ActualSizeResult)
- : Result(Result), F(F), F_dump(DumpFunction(F)),
- ActualSize(ActualSize), ActualSizeResult(ActualSizeResult) {}
- uint8_t *Result;
- const Function *F;
- std::string F_dump;
- uintptr_t ActualSize;
- uintptr_t ActualSizeResult;
- };
- std::vector<StartExceptionTableCall> startExceptionTableCalls;
- virtual uint8_t* startExceptionTable(const Function* F,
- uintptr_t &ActualSize) {
- uintptr_t InitialActualSize = ActualSize;
- uint8_t *Result = Base->startExceptionTable(F, ActualSize);
- startExceptionTableCalls.push_back(
- StartExceptionTableCall(Result, F, InitialActualSize, ActualSize));
- return Result;
- }
- struct EndExceptionTableCall {
- EndExceptionTableCall(const Function *F, uint8_t *TableStart,
- uint8_t *TableEnd, uint8_t* FrameRegister)
- : F(F), F_dump(DumpFunction(F)),
- TableStart(TableStart), TableEnd(TableEnd),
- FrameRegister(FrameRegister) {}
- const Function *F;
- std::string F_dump;
- uint8_t *TableStart;
- uint8_t *TableEnd;
- uint8_t *FrameRegister;
- };
- std::vector<EndExceptionTableCall> endExceptionTableCalls;
- virtual void endExceptionTable(const Function *F, uint8_t *TableStart,
- uint8_t *TableEnd, uint8_t* FrameRegister) {
- endExceptionTableCalls.push_back(
- EndExceptionTableCall(F, TableStart, TableEnd, FrameRegister));
- return Base->endExceptionTable(F, TableStart, TableEnd, FrameRegister);
- }
-};
-
-bool LoadAssemblyInto(Module *M, const char *assembly) {
- SMDiagnostic Error;
- bool success =
- NULL != ParseAssemblyString(assembly, M, Error, M->getContext());
- std::string errMsg;
- raw_string_ostream os(errMsg);
- Error.Print("", os);
- EXPECT_TRUE(success) << os.str();
- return success;
-}
-
-class JITTest : public testing::Test {
- protected:
- virtual void SetUp() {
- M = new Module("<main>", Context);
- RJMM = new RecordingJITMemoryManager;
- RJMM->setPoisonMemory(true);
- std::string Error;
- TheJIT.reset(EngineBuilder(M).setEngineKind(EngineKind::JIT)
- .setJITMemoryManager(RJMM)
- .setErrorStr(&Error).create());
- ASSERT_TRUE(TheJIT.get() != NULL) << Error;
- }
-
- void LoadAssembly(const char *assembly) {
- LoadAssemblyInto(M, assembly);
- }
-
- LLVMContext Context;
- Module *M; // Owned by ExecutionEngine.
- RecordingJITMemoryManager *RJMM;
- OwningPtr<ExecutionEngine> TheJIT;
-};
-
-// Regression test for a bug. The JIT used to allocate globals inside the same
-// memory block used for the function, and when the function code was freed,
-// the global was left in the same place. This test allocates a function
-// that uses and global, deallocates it, and then makes sure that the global
-// stays alive after that.
-TEST(JIT, GlobalInFunction) {
- LLVMContext context;
- Module *M = new Module("<main>", context);
-
- JITMemoryManager *MemMgr = JITMemoryManager::CreateDefaultMemManager();
- // Tell the memory manager to poison freed memory so that accessing freed
- // memory is more easily tested.
- MemMgr->setPoisonMemory(true);
- std::string Error;
- OwningPtr<ExecutionEngine> JIT(EngineBuilder(M)
- .setEngineKind(EngineKind::JIT)
- .setErrorStr(&Error)
- .setJITMemoryManager(MemMgr)
- // The next line enables the fix:
- .setAllocateGVsWithCode(false)
- .create());
- ASSERT_EQ(Error, "");
-
- // Create a global variable.
- const Type *GTy = Type::getInt32Ty(context);
- GlobalVariable *G = new GlobalVariable(
- *M,
- GTy,
- false, // Not constant.
- GlobalValue::InternalLinkage,
- Constant::getNullValue(GTy),
- "myglobal");
-
- // Make a function that points to a global.
- Function *F1 = makeReturnGlobal("F1", G, M);
-
- // Get the pointer to the native code to force it to JIT the function and
- // allocate space for the global.
- void (*F1Ptr)() =
- reinterpret_cast<void(*)()>((intptr_t)JIT->getPointerToFunction(F1));
-
- // Since F1 was codegen'd, a pointer to G should be available.
- int32_t *GPtr = (int32_t*)JIT->getPointerToGlobalIfAvailable(G);
- ASSERT_NE((int32_t*)NULL, GPtr);
- EXPECT_EQ(0, *GPtr);
-
- // F1() should increment G.
- F1Ptr();
- EXPECT_EQ(1, *GPtr);
-
- // Make a second function identical to the first, referring to the same
- // global.
- Function *F2 = makeReturnGlobal("F2", G, M);
- void (*F2Ptr)() =
- reinterpret_cast<void(*)()>((intptr_t)JIT->getPointerToFunction(F2));
-
- // F2() should increment G.
- F2Ptr();
- EXPECT_EQ(2, *GPtr);
-
- // Deallocate F1.
- JIT->freeMachineCodeForFunction(F1);
-
- // F2() should *still* increment G.
- F2Ptr();
- EXPECT_EQ(3, *GPtr);
-}
-
-int PlusOne(int arg) {
- return arg + 1;
-}
-
-TEST_F(JITTest, FarCallToKnownFunction) {
- // x86-64 can only make direct calls to functions within 32 bits of
- // the current PC. To call anything farther away, we have to load
- // the address into a register and call through the register. The
- // current JIT does this by allocating a stub for any far call.
- // There was a bug in which the JIT tried to emit a direct call when
- // the target was already in the JIT's global mappings and lazy
- // compilation was disabled.
-
- Function *KnownFunction = Function::Create(
- TypeBuilder<int(int), false>::get(Context),
- GlobalValue::ExternalLinkage, "known", M);
- TheJIT->addGlobalMapping(KnownFunction, (void*)(intptr_t)PlusOne);
-
- // int test() { return known(7); }
- Function *TestFunction = Function::Create(
- TypeBuilder<int(), false>::get(Context),
- GlobalValue::ExternalLinkage, "test", M);
- BasicBlock *Entry = BasicBlock::Create(Context, "entry", TestFunction);
- IRBuilder<> Builder(Entry);
- Value *result = Builder.CreateCall(
- KnownFunction,
- ConstantInt::get(TypeBuilder<int, false>::get(Context), 7));
- Builder.CreateRet(result);
-
- TheJIT->DisableLazyCompilation(true);
- int (*TestFunctionPtr)() = reinterpret_cast<int(*)()>(
- (intptr_t)TheJIT->getPointerToFunction(TestFunction));
- // This used to crash in trying to call PlusOne().
- EXPECT_EQ(8, TestFunctionPtr());
-}
-
-// Test a function C which calls A and B which call each other.
-TEST_F(JITTest, NonLazyCompilationStillNeedsStubs) {
- TheJIT->DisableLazyCompilation(true);
-
- const FunctionType *Func1Ty =
- cast<FunctionType>(TypeBuilder<void(void), false>::get(Context));
- std::vector<const Type*> arg_types;
- arg_types.push_back(Type::getInt1Ty(Context));
- const FunctionType *FuncTy = FunctionType::get(
- Type::getVoidTy(Context), arg_types, false);
- Function *Func1 = Function::Create(Func1Ty, Function::ExternalLinkage,
- "func1", M);
- Function *Func2 = Function::Create(FuncTy, Function::InternalLinkage,
- "func2", M);
- Function *Func3 = Function::Create(FuncTy, Function::InternalLinkage,
- "func3", M);
- BasicBlock *Block1 = BasicBlock::Create(Context, "block1", Func1);
- BasicBlock *Block2 = BasicBlock::Create(Context, "block2", Func2);
- BasicBlock *True2 = BasicBlock::Create(Context, "cond_true", Func2);
- BasicBlock *False2 = BasicBlock::Create(Context, "cond_false", Func2);
- BasicBlock *Block3 = BasicBlock::Create(Context, "block3", Func3);
- BasicBlock *True3 = BasicBlock::Create(Context, "cond_true", Func3);
- BasicBlock *False3 = BasicBlock::Create(Context, "cond_false", Func3);
-
- // Make Func1 call Func2(0) and Func3(0).
- IRBuilder<> Builder(Block1);
- Builder.CreateCall(Func2, ConstantInt::getTrue(Context));
- Builder.CreateCall(Func3, ConstantInt::getTrue(Context));
- Builder.CreateRetVoid();
-
- // void Func2(bool b) { if (b) { Func3(false); return; } return; }
- Builder.SetInsertPoint(Block2);
- Builder.CreateCondBr(Func2->arg_begin(), True2, False2);
- Builder.SetInsertPoint(True2);
- Builder.CreateCall(Func3, ConstantInt::getFalse(Context));
- Builder.CreateRetVoid();
- Builder.SetInsertPoint(False2);
- Builder.CreateRetVoid();
-
- // void Func3(bool b) { if (b) { Func2(false); return; } return; }
- Builder.SetInsertPoint(Block3);
- Builder.CreateCondBr(Func3->arg_begin(), True3, False3);
- Builder.SetInsertPoint(True3);
- Builder.CreateCall(Func2, ConstantInt::getFalse(Context));
- Builder.CreateRetVoid();
- Builder.SetInsertPoint(False3);
- Builder.CreateRetVoid();
-
- // Compile the function to native code
- void (*F1Ptr)() =
- reinterpret_cast<void(*)()>((intptr_t)TheJIT->getPointerToFunction(Func1));
-
- F1Ptr();
-}
-
-// Regression test for PR5162. This used to trigger an AssertingVH inside the
-// JIT's Function to stub mapping.
-TEST_F(JITTest, NonLazyLeaksNoStubs) {
- TheJIT->DisableLazyCompilation(true);
-
- // Create two functions with a single basic block each.
- const FunctionType *FuncTy =
- cast<FunctionType>(TypeBuilder<int(), false>::get(Context));
- Function *Func1 = Function::Create(FuncTy, Function::ExternalLinkage,
- "func1", M);
- Function *Func2 = Function::Create(FuncTy, Function::InternalLinkage,
- "func2", M);
- BasicBlock *Block1 = BasicBlock::Create(Context, "block1", Func1);
- BasicBlock *Block2 = BasicBlock::Create(Context, "block2", Func2);
-
- // The first function calls the second and returns the result
- IRBuilder<> Builder(Block1);
- Value *Result = Builder.CreateCall(Func2);
- Builder.CreateRet(Result);
-
- // The second function just returns a constant
- Builder.SetInsertPoint(Block2);
- Builder.CreateRet(ConstantInt::get(TypeBuilder<int, false>::get(Context),42));
-
- // Compile the function to native code
- (void)TheJIT->getPointerToFunction(Func1);
-
- // Free the JIT state for the functions
- TheJIT->freeMachineCodeForFunction(Func1);
- TheJIT->freeMachineCodeForFunction(Func2);
-
- // Delete the first function (and show that is has no users)
- EXPECT_EQ(Func1->getNumUses(), 0u);
- Func1->eraseFromParent();
-
- // Delete the second function (and show that it has no users - it had one,
- // func1 but that's gone now)
- EXPECT_EQ(Func2->getNumUses(), 0u);
- Func2->eraseFromParent();
-}
-
-TEST_F(JITTest, ModuleDeletion) {
- TheJIT->DisableLazyCompilation(false);
- LoadAssembly("define void @main() { "
- " call i32 @computeVal() "
- " ret void "
- "} "
- " "
- "define internal i32 @computeVal() { "
- " ret i32 0 "
- "} ");
- Function *func = M->getFunction("main");
- TheJIT->getPointerToFunction(func);
- TheJIT->removeModule(M);
- delete M;
-
- SmallPtrSet<const void*, 2> FunctionsDeallocated;
- for (unsigned i = 0, e = RJMM->deallocateFunctionBodyCalls.size();
- i != e; ++i) {
- FunctionsDeallocated.insert(RJMM->deallocateFunctionBodyCalls[i].Body);
- }
- for (unsigned i = 0, e = RJMM->startFunctionBodyCalls.size(); i != e; ++i) {
- EXPECT_TRUE(FunctionsDeallocated.count(
- RJMM->startFunctionBodyCalls[i].Result))
- << "Function leaked: \n" << RJMM->startFunctionBodyCalls[i].F_dump;
- }
- EXPECT_EQ(RJMM->startFunctionBodyCalls.size(),
- RJMM->deallocateFunctionBodyCalls.size());
-
- SmallPtrSet<const void*, 2> ExceptionTablesDeallocated;
- unsigned NumTablesDeallocated = 0;
- for (unsigned i = 0, e = RJMM->deallocateExceptionTableCalls.size();
- i != e; ++i) {
- ExceptionTablesDeallocated.insert(
- RJMM->deallocateExceptionTableCalls[i].ET);
- if (RJMM->deallocateExceptionTableCalls[i].ET != NULL) {
- // If JITEmitDebugInfo is off, we'll "deallocate" NULL, which doesn't
- // appear in startExceptionTableCalls.
- NumTablesDeallocated++;
- }
- }
- for (unsigned i = 0, e = RJMM->startExceptionTableCalls.size(); i != e; ++i) {
- EXPECT_TRUE(ExceptionTablesDeallocated.count(
- RJMM->startExceptionTableCalls[i].Result))
- << "Function's exception table leaked: \n"
- << RJMM->startExceptionTableCalls[i].F_dump;
- }
- EXPECT_EQ(RJMM->startExceptionTableCalls.size(),
- NumTablesDeallocated);
-}
-
-// ARM and PPC still emit stubs for calls since the target may be too far away
-// to call directly. This #if can probably be removed when
-// http://llvm.org/PR5201 is fixed.
-#if !defined(__arm__) && !defined(__powerpc__) && !defined(__ppc__)
-typedef int (*FooPtr) ();
-
-TEST_F(JITTest, NoStubs) {
- LoadAssembly("define void @bar() {"
- "entry: "
- "ret void"
- "}"
- " "
- "define i32 @foo() {"
- "entry:"
- "call void @bar()"
- "ret i32 undef"
- "}"
- " "
- "define i32 @main() {"
- "entry:"
- "%0 = call i32 @foo()"
- "call void @bar()"
- "ret i32 undef"
- "}");
- Function *foo = M->getFunction("foo");
- uintptr_t tmp = (uintptr_t)(TheJIT->getPointerToFunction(foo));
- FooPtr ptr = (FooPtr)(tmp);
-
- (ptr)();
-
- // We should now allocate no more stubs, we have the code to foo
- // and the existing stub for bar.
- int stubsBefore = RJMM->stubsAllocated;
- Function *func = M->getFunction("main");
- TheJIT->getPointerToFunction(func);
-
- Function *bar = M->getFunction("bar");
- TheJIT->getPointerToFunction(bar);
-
- ASSERT_EQ(stubsBefore, RJMM->stubsAllocated);
-}
-#endif // !ARM && !PPC
-
-TEST_F(JITTest, FunctionPointersOutliveTheirCreator) {
- TheJIT->DisableLazyCompilation(true);
- LoadAssembly("define i8()* @get_foo_addr() { "
- " ret i8()* @foo "
- "} "
- " "
- "define i8 @foo() { "
- " ret i8 42 "
- "} ");
- Function *F_get_foo_addr = M->getFunction("get_foo_addr");
-
- typedef char(*fooT)();
- fooT (*get_foo_addr)() = reinterpret_cast<fooT(*)()>(
- (intptr_t)TheJIT->getPointerToFunction(F_get_foo_addr));
- fooT foo_addr = get_foo_addr();
-
- // Now free get_foo_addr. This should not free the machine code for foo or
- // any call stub returned as foo's canonical address.
- TheJIT->freeMachineCodeForFunction(F_get_foo_addr);
-
- // Check by calling the reported address of foo.
- EXPECT_EQ(42, foo_addr());
-
- // The reported address should also be the same as the result of a subsequent
- // getPointerToFunction(foo).
-#if 0
- // Fails until PR5126 is fixed:
- Function *F_foo = M->getFunction("foo");
- fooT foo = reinterpret_cast<fooT>(
- (intptr_t)TheJIT->getPointerToFunction(F_foo));
- EXPECT_EQ((intptr_t)foo, (intptr_t)foo_addr);
-#endif
-}
-
-// ARM doesn't have an implementation of replaceMachineCodeForFunction(), so
-// recompileAndRelinkFunction doesn't work.
-#if !defined(__arm__)
-TEST_F(JITTest, FunctionIsRecompiledAndRelinked) {
- Function *F = Function::Create(TypeBuilder<int(void), false>::get(Context),
- GlobalValue::ExternalLinkage, "test", M);
- BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
- IRBuilder<> Builder(Entry);
- Value *Val = ConstantInt::get(TypeBuilder<int, false>::get(Context), 1);
- Builder.CreateRet(Val);
-
- TheJIT->DisableLazyCompilation(true);
- // Compile the function once, and make sure it works.
- int (*OrigFPtr)() = reinterpret_cast<int(*)()>(
- (intptr_t)TheJIT->recompileAndRelinkFunction(F));
- EXPECT_EQ(1, OrigFPtr());
-
- // Now change the function to return a different value.
- Entry->eraseFromParent();
- BasicBlock *NewEntry = BasicBlock::Create(Context, "new_entry", F);
- Builder.SetInsertPoint(NewEntry);
- Val = ConstantInt::get(TypeBuilder<int, false>::get(Context), 2);
- Builder.CreateRet(Val);
- // Recompile it, which should produce a new function pointer _and_ update the
- // old one.
- int (*NewFPtr)() = reinterpret_cast<int(*)()>(
- (intptr_t)TheJIT->recompileAndRelinkFunction(F));
-
- EXPECT_EQ(2, NewFPtr())
- << "The new pointer should call the new version of the function";
- EXPECT_EQ(2, OrigFPtr())
- << "The old pointer's target should now jump to the new version";
-}
-#endif // !defined(__arm__)
-
-} // anonymous namespace
-// This variable is intentionally defined differently in the statically-compiled
-// program from the IR input to the JIT to assert that the JIT doesn't use its
-// definition.
-extern "C" int32_t JITTest_AvailableExternallyGlobal;
-int32_t JITTest_AvailableExternallyGlobal = 42;
-namespace {
-
-#if 0
-// CLAMAV LOCAL: disable because this needs rdynamic flag
-TEST_F(JITTest, AvailableExternallyGlobalIsntEmitted) {
- TheJIT->DisableLazyCompilation(true);
- LoadAssembly("@JITTest_AvailableExternallyGlobal = "
- " available_externally global i32 7 "
- " "
- "define i32 @loader() { "
- " %result = load i32* @JITTest_AvailableExternallyGlobal "
- " ret i32 %result "
- "} ");
- Function *loaderIR = M->getFunction("loader");
-
- int32_t (*loader)() = reinterpret_cast<int32_t(*)()>(
- (intptr_t)TheJIT->getPointerToFunction(loaderIR));
- EXPECT_EQ(42, loader()) << "func should return 42 from the external global,"
- << " not 7 from the IR version.";
-}
-
-} // anonymous namespace
-// This function is intentionally defined differently in the statically-compiled
-// program from the IR input to the JIT to assert that the JIT doesn't use its
-// definition.
-extern "C" int32_t JITTest_AvailableExternallyFunction() {
- return 42;
-}
-namespace {
-
-TEST_F(JITTest, AvailableExternallyFunctionIsntCompiled) {
- TheJIT->DisableLazyCompilation(true);
- LoadAssembly("define available_externally i32 "
- " @JITTest_AvailableExternallyFunction() { "
- " ret i32 7 "
- "} "
- " "
- "define i32 @func() { "
- " %result = tail call i32 "
- " @JITTest_AvailableExternallyFunction() "
- " ret i32 %result "
- "} ");
- Function *funcIR = M->getFunction("func");
-
- int32_t (*func)() = reinterpret_cast<int32_t(*)()>(
- (intptr_t)TheJIT->getPointerToFunction(funcIR));
- EXPECT_EQ(42, func()) << "func should return 42 from the static version,"
- << " not 7 from the IR version.";
-}
-
-TEST_F(JITTest, NeedsExactSizeWithManyGlobals) {
- // PR5291: When the JMM needed the exact size of function bodies before
- // starting to emit them, the JITEmitter would modify a set while iterating
- // over it.
- TheJIT->DisableLazyCompilation(true);
- RJMM->setSizeRequired(true);
-
- LoadAssembly("@A = global i32 42 "
- "@B = global i32* @A "
- "@C = global i32** @B "
- "@D = global i32*** @C "
- "@E = global i32**** @D "
- "@F = global i32***** @E "
- "@G = global i32****** @F "
- "@H = global i32******* @G "
- "@I = global i32******** @H "
- "define i32********* @test() { "
- " ret i32********* @I "
- "}");
- Function *testIR = M->getFunction("test");
- int32_t********* (*test)() = reinterpret_cast<int32_t*********(*)()>(
- (intptr_t)TheJIT->getPointerToFunction(testIR));
- EXPECT_EQ(42, *********test());
-}
-
-TEST_F(JITTest, EscapedLazyStubStillCallable) {
- TheJIT->DisableLazyCompilation(false);
- LoadAssembly("define internal i32 @stubbed() { "
- " ret i32 42 "
- "} "
- " "
- "define i32()* @get_stub() { "
- " ret i32()* @stubbed "
- "} ");
- typedef int32_t(*StubTy)();
-
- // Call get_stub() to get the address of @stubbed without actually JITting it.
- Function *get_stubIR = M->getFunction("get_stub");
- StubTy (*get_stub)() = reinterpret_cast<StubTy(*)()>(
- (intptr_t)TheJIT->getPointerToFunction(get_stubIR));
- StubTy stubbed = get_stub();
- // Now get_stubIR is the only reference to stubbed's stub.
- get_stubIR->eraseFromParent();
- // Now there are no references inside the JIT, but we've got a pointer outside
- // it. The stub should be callable and return the right value.
- EXPECT_EQ(42, stubbed());
-}
-
-// Converts the LLVM assembly to bitcode and returns it in a std::string. An
-// empty string indicates an error.
-std::string AssembleToBitcode(LLVMContext &Context, const char *Assembly) {
- Module TempModule("TempModule", Context);
- if (!LoadAssemblyInto(&TempModule, Assembly)) {
- return "";
- }
-
- std::string Result;
- raw_string_ostream OS(Result);
- WriteBitcodeToFile(&TempModule, OS);
- OS.flush();
- return Result;
-}
-
-// Returns a newly-created ExecutionEngine that reads the bitcode in 'Bitcode'
-// lazily. The associated Module (owned by the ExecutionEngine) is returned in
-// M. Both will be NULL on an error. Bitcode must live at least as long as the
-// ExecutionEngine.
-ExecutionEngine *getJITFromBitcode(
- LLVMContext &Context, const std::string &Bitcode, Module *&M) {
- // c_str() is null-terminated like MemoryBuffer::getMemBuffer requires.
- MemoryBuffer *BitcodeBuffer =
- MemoryBuffer::getMemBuffer(Bitcode.c_str(),
- Bitcode.c_str() + Bitcode.size(),
- "Bitcode for test");
- std::string errMsg;
- M = getLazyBitcodeModule(BitcodeBuffer, Context, &errMsg);
- if (M == NULL) {
- ADD_FAILURE() << errMsg;
- delete BitcodeBuffer;
- return NULL;
- }
- ExecutionEngine *TheJIT = EngineBuilder(M)
- .setEngineKind(EngineKind::JIT)
- .setErrorStr(&errMsg)
- .create();
- if (TheJIT == NULL) {
- ADD_FAILURE() << errMsg;
- delete M;
- M = NULL;
- return NULL;
- }
- return TheJIT;
-}
-
-TEST(LazyLoadedJITTest, MaterializableAvailableExternallyFunctionIsntCompiled) {
- LLVMContext Context;
- const std::string Bitcode =
- AssembleToBitcode(Context,
- "define available_externally i32 "
- " @JITTest_AvailableExternallyFunction() { "
- " ret i32 7 "
- "} "
- " "
- "define i32 @func() { "
- " %result = tail call i32 "
- " @JITTest_AvailableExternallyFunction() "
- " ret i32 %result "
- "} ");
- ASSERT_FALSE(Bitcode.empty()) << "Assembling failed";
- Module *M;
- OwningPtr<ExecutionEngine> TheJIT(getJITFromBitcode(Context, Bitcode, M));
- ASSERT_TRUE(TheJIT.get()) << "Failed to create JIT.";
- TheJIT->DisableLazyCompilation(true);
-
- Function *funcIR = M->getFunction("func");
- Function *availableFunctionIR =
- M->getFunction("JITTest_AvailableExternallyFunction");
-
- // Double-check that the available_externally function is still unmaterialized
- // when getPointerToFunction needs to find out if it's available_externally.
- EXPECT_TRUE(availableFunctionIR->isMaterializable());
-
- int32_t (*func)() = reinterpret_cast<int32_t(*)()>(
- (intptr_t)TheJIT->getPointerToFunction(funcIR));
- EXPECT_EQ(42, func()) << "func should return 42 from the static version,"
- << " not 7 from the IR version.";
-}
-
-TEST(LazyLoadedJITTest, EagerCompiledRecursionThroughGhost) {
- LLVMContext Context;
- const std::string Bitcode =
- AssembleToBitcode(Context,
- "define i32 @recur1(i32 %a) { "
- " %zero = icmp eq i32 %a, 0 "
- " br i1 %zero, label %done, label %notdone "
- "done: "
- " ret i32 3 "
- "notdone: "
- " %am1 = sub i32 %a, 1 "
- " %result = call i32 @recur2(i32 %am1) "
- " ret i32 %result "
- "} "
- " "
- "define i32 @recur2(i32 %b) { "
- " %result = call i32 @recur1(i32 %b) "
- " ret i32 %result "
- "} ");
- ASSERT_FALSE(Bitcode.empty()) << "Assembling failed";
- Module *M;
- OwningPtr<ExecutionEngine> TheJIT(getJITFromBitcode(Context, Bitcode, M));
- ASSERT_TRUE(TheJIT.get()) << "Failed to create JIT.";
- TheJIT->DisableLazyCompilation(true);
-
- Function *recur1IR = M->getFunction("recur1");
- Function *recur2IR = M->getFunction("recur2");
- EXPECT_TRUE(recur1IR->isMaterializable());
- EXPECT_TRUE(recur2IR->isMaterializable());
-
- int32_t (*recur1)(int32_t) = reinterpret_cast<int32_t(*)(int32_t)>(
- (intptr_t)TheJIT->getPointerToFunction(recur1IR));
- EXPECT_EQ(3, recur1(4));
-}
-#endif
-// This code is copied from JITEventListenerTest, but it only runs once for all
-// the tests in this directory. Everything seems fine, but that's strange
-// behavior.
-class JITEnvironment : public testing::Environment {
- virtual void SetUp() {
- // Required to create a JIT.
- InitializeNativeTarget();
- }
-};
-testing::Environment* const jit_env =
- testing::AddGlobalTestEnvironment(new JITEnvironment);
-
-}
diff --git a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/Makefile b/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/Makefile
deleted file mode 100644
index f5abe75..0000000
--- a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- unittests/ExecutionEngine/JIT/Makefile --------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-TESTNAME = JIT
-LINK_COMPONENTS := asmparser bitreader bitwriter core jit native support
-
-include $(LEVEL)/Makefile.config
-include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest
-
-# Permit these tests to use the JIT's symbolic lookup.
-LD.Flags += $(RDYNAMIC)
diff --git a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/MultiJITTest.cpp b/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/MultiJITTest.cpp
deleted file mode 100644
index 8997d39..0000000
--- a/libclamav/c++/llvm/unittests/ExecutionEngine/JIT/MultiJITTest.cpp
+++ /dev/null
@@ -1,164 +0,0 @@
-//===- MultiJITTest.cpp - Unit tests for instantiating multiple JITs ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/Assembly/Parser.h"
-#include "llvm/ExecutionEngine/GenericValue.h"
-#include "llvm/ExecutionEngine/JIT.h"
-#include "llvm/Support/SourceMgr.h"
-#include <vector>
-
-using namespace llvm;
-
-namespace {
-
-bool LoadAssemblyInto(Module *M, const char *assembly) {
- SMDiagnostic Error;
- bool success =
- NULL != ParseAssemblyString(assembly, M, Error, M->getContext());
- std::string errMsg;
- raw_string_ostream os(errMsg);
- Error.Print("", os);
- EXPECT_TRUE(success) << os.str();
- return success;
-}
-
-void createModule1(LLVMContext &Context1, Module *&M1, Function *&FooF1) {
- M1 = new Module("test1", Context1);
- LoadAssemblyInto(M1,
- "define i32 @add1(i32 %ArgX1) { "
- "entry: "
- " %addresult = add i32 1, %ArgX1 "
- " ret i32 %addresult "
- "} "
- " "
- "define i32 @foo1() { "
- "entry: "
- " %add1 = call i32 @add1(i32 10) "
- " ret i32 %add1 "
- "} ");
- FooF1 = M1->getFunction("foo1");
-}
-
-void createModule2(LLVMContext &Context2, Module *&M2, Function *&FooF2) {
- M2 = new Module("test2", Context2);
- LoadAssemblyInto(M2,
- "define i32 @add2(i32 %ArgX2) { "
- "entry: "
- " %addresult = add i32 2, %ArgX2 "
- " ret i32 %addresult "
- "} "
- " "
- "define i32 @foo2() { "
- "entry: "
- " %add2 = call i32 @add2(i32 10) "
- " ret i32 %add2 "
- "} ");
- FooF2 = M2->getFunction("foo2");
-}
-
-TEST(MultiJitTest, EagerMode) {
- LLVMContext Context1;
- Module *M1 = 0;
- Function *FooF1 = 0;
- createModule1(Context1, M1, FooF1);
-
- LLVMContext Context2;
- Module *M2 = 0;
- Function *FooF2 = 0;
- createModule2(Context2, M2, FooF2);
-
- // Now we create the JIT in eager mode
- OwningPtr<ExecutionEngine> EE1(EngineBuilder(M1).create());
- EE1->DisableLazyCompilation(true);
- OwningPtr<ExecutionEngine> EE2(EngineBuilder(M2).create());
- EE2->DisableLazyCompilation(true);
-
- // Call the `foo' function with no arguments:
- std::vector<GenericValue> noargs;
- GenericValue gv1 = EE1->runFunction(FooF1, noargs);
- GenericValue gv2 = EE2->runFunction(FooF2, noargs);
-
- // Import result of execution:
- EXPECT_EQ(gv1.IntVal, 11);
- EXPECT_EQ(gv2.IntVal, 12);
-
- EE1->freeMachineCodeForFunction(FooF1);
- EE2->freeMachineCodeForFunction(FooF2);
-}
-
-TEST(MultiJitTest, LazyMode) {
- LLVMContext Context1;
- Module *M1 = 0;
- Function *FooF1 = 0;
- createModule1(Context1, M1, FooF1);
-
- LLVMContext Context2;
- Module *M2 = 0;
- Function *FooF2 = 0;
- createModule2(Context2, M2, FooF2);
-
- // Now we create the JIT in lazy mode
- OwningPtr<ExecutionEngine> EE1(EngineBuilder(M1).create());
- EE1->DisableLazyCompilation(false);
- OwningPtr<ExecutionEngine> EE2(EngineBuilder(M2).create());
- EE2->DisableLazyCompilation(false);
-
- // Call the `foo' function with no arguments:
- std::vector<GenericValue> noargs;
- GenericValue gv1 = EE1->runFunction(FooF1, noargs);
- GenericValue gv2 = EE2->runFunction(FooF2, noargs);
-
- // Import result of execution:
- EXPECT_EQ(gv1.IntVal, 11);
- EXPECT_EQ(gv2.IntVal, 12);
-
- EE1->freeMachineCodeForFunction(FooF1);
- EE2->freeMachineCodeForFunction(FooF2);
-}
-
-extern "C" {
- extern void *getPointerToNamedFunction(const char *Name);
-}
-
-TEST(MultiJitTest, JitPool) {
- LLVMContext Context1;
- Module *M1 = 0;
- Function *FooF1 = 0;
- createModule1(Context1, M1, FooF1);
-
- LLVMContext Context2;
- Module *M2 = 0;
- Function *FooF2 = 0;
- createModule2(Context2, M2, FooF2);
-
- // Now we create two JITs
- OwningPtr<ExecutionEngine> EE1(EngineBuilder(M1).create());
- OwningPtr<ExecutionEngine> EE2(EngineBuilder(M2).create());
-
- Function *F1 = EE1->FindFunctionNamed("foo1");
- void *foo1 = EE1->getPointerToFunction(F1);
-
- Function *F2 = EE2->FindFunctionNamed("foo2");
- void *foo2 = EE2->getPointerToFunction(F2);
-
- // Function in M1
- EXPECT_EQ(getPointerToNamedFunction("foo1"), foo1);
-
- // Function in M2
- EXPECT_EQ(getPointerToNamedFunction("foo2"), foo2);
-
- // Symbol search
- EXPECT_EQ((intptr_t)getPointerToNamedFunction("getPointerToNamedFunction"),
- (intptr_t)&getPointerToNamedFunction);
-}
-
-} // anonymous namespace
diff --git a/libclamav/c++/llvm/unittests/ExecutionEngine/Makefile b/libclamav/c++/llvm/unittests/ExecutionEngine/Makefile
deleted file mode 100644
index d4ef92f..0000000
--- a/libclamav/c++/llvm/unittests/ExecutionEngine/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- unittests/ExecutionEngine/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TESTNAME = ExecutionEngine
-LINK_COMPONENTS := engine interpreter
-
-include $(LEVEL)/Makefile.config
-
-PARALLEL_DIRS = JIT
-
-include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest
diff --git a/libclamav/c++/llvm/unittests/Makefile b/libclamav/c++/llvm/unittests/Makefile
deleted file mode 100644
index 9f377cd..0000000
--- a/libclamav/c++/llvm/unittests/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- unittests/Makefile ----------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ..
-
-PARALLEL_DIRS = ADT ExecutionEngine Support Transforms VMCore
-
-include $(LEVEL)/Makefile.common
-
-clean::
- $(Verb) $(RM) -f *Tests
diff --git a/libclamav/c++/llvm/unittests/Makefile.unittest b/libclamav/c++/llvm/unittests/Makefile.unittest
deleted file mode 100644
index 8fbcfd2..0000000
--- a/libclamav/c++/llvm/unittests/Makefile.unittest
+++ /dev/null
@@ -1,54 +0,0 @@
-##===- unittests/Makefile.unittest -------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-#
-# This file is included by all of the unit test makefiles.
-#
-##===----------------------------------------------------------------------===##
-
-# Set up variables for building a unit test.
-ifdef TESTNAME
-
-include $(LEVEL)/Makefile.common
-
-LLVMUnitTestExe = $(BuildMode)/$(TESTNAME)Tests$(EXEEXT)
-
-# Note that these flags are duplicated when building GoogleTest itself in
-# utils/unittest/googletest/Makefile; ensure that any changes are made to both.
-CPP.Flags += -I$(LLVM_SRC_ROOT)/utils/unittest/googletest/include
-CPP.Flags += $(NO_MISSING_FIELD_INITIALIZERS) $(NO_VARIADIC_MACROS)
-CPP.Flags += -DGTEST_HAS_RTTI=0
-# libstdc++'s TR1 <tuple> header depends on RTTI and uses C++'0x features not
-# supported by Clang, so force googletest to use its own tuple implementation.
-# When we import googletest >=1.4.0, we can drop this line.
-CPP.Flags += -DGTEST_HAS_TR1_TUPLE=0
-
-TESTLIBS = -lGoogleTest -lUnitTestMain
-
-ifeq ($(ENABLE_SHARED), 1)
- # Add the absolute path to the dynamic library. This is ok because
- # we'll never install unittests.
- LD.Flags += $(RPATH) -Wl,$(LibDir)
- # Also set {DYLD,LD}_LIBRARY_PATH because OSX ignores the rpath most
- # of the time.
- Run.Shared := $(SHLIBPATH_VAR)="$(LibDir)$${$(SHLIBPATH_VAR):+:}$$$(SHLIBPATH_VAR)"
-endif
-
-$(LLVMUnitTestExe): $(ObjectsO) $(ProjLibsPaths) $(LLVMLibsPaths)
- $(Echo) Linking $(BuildMode) unit test $(TESTNAME) $(StripWarnMsg)
- $(Verb) $(Link) -o $@ $(TOOLLINKOPTS) $(ObjectsO) $(ProjLibsOptions) \
- $(TESTLIBS) $(LLVMLibsOptions) $(ExtraLibs) $(TOOLLINKOPTSB) $(LIBS)
- $(Echo) ======= Finished Linking $(BuildMode) Unit test $(TESTNAME) \
- $(StripWarnMsg)
-
-all:: $(LLVMUnitTestExe)
-
-unitcheck:: $(LLVMUnitTestExe)
- $(Run.Shared) $(LLVMUnitTestExe)
-
-endif
diff --git a/libclamav/c++/llvm/unittests/Support/AllocatorTest.cpp b/libclamav/c++/llvm/unittests/Support/AllocatorTest.cpp
deleted file mode 100644
index 6c0fca9..0000000
--- a/libclamav/c++/llvm/unittests/Support/AllocatorTest.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-//===- llvm/unittest/Support/AllocatorTest.cpp - BumpPtrAllocator tests ---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/Allocator.h"
-
-#include "gtest/gtest.h"
-#include <cstdlib>
-
-using namespace llvm;
-
-namespace {
-
-TEST(AllocatorTest, Basics) {
- BumpPtrAllocator Alloc;
- int *a = (int*)Alloc.Allocate(sizeof(int), 0);
- int *b = (int*)Alloc.Allocate(sizeof(int) * 10, 0);
- int *c = (int*)Alloc.Allocate(sizeof(int), 0);
- *a = 1;
- b[0] = 2;
- b[9] = 2;
- *c = 3;
- EXPECT_EQ(1, *a);
- EXPECT_EQ(2, b[0]);
- EXPECT_EQ(2, b[9]);
- EXPECT_EQ(3, *c);
- EXPECT_EQ(1U, Alloc.GetNumSlabs());
-}
-
-// Allocate enough bytes to create three slabs.
-TEST(AllocatorTest, ThreeSlabs) {
- BumpPtrAllocator Alloc(4096, 4096);
- Alloc.Allocate(3000, 0);
- EXPECT_EQ(1U, Alloc.GetNumSlabs());
- Alloc.Allocate(3000, 0);
- EXPECT_EQ(2U, Alloc.GetNumSlabs());
- Alloc.Allocate(3000, 0);
- EXPECT_EQ(3U, Alloc.GetNumSlabs());
-}
-
-// Allocate enough bytes to create two slabs, reset the allocator, and do it
-// again.
-TEST(AllocatorTest, TestReset) {
- BumpPtrAllocator Alloc(4096, 4096);
- Alloc.Allocate(3000, 0);
- EXPECT_EQ(1U, Alloc.GetNumSlabs());
- Alloc.Allocate(3000, 0);
- EXPECT_EQ(2U, Alloc.GetNumSlabs());
- Alloc.Reset();
- EXPECT_EQ(1U, Alloc.GetNumSlabs());
- Alloc.Allocate(3000, 0);
- EXPECT_EQ(1U, Alloc.GetNumSlabs());
- Alloc.Allocate(3000, 0);
- EXPECT_EQ(2U, Alloc.GetNumSlabs());
-}
-
-// Test some allocations at varying alignments.
-TEST(AllocatorTest, TestAlignment) {
- BumpPtrAllocator Alloc;
- uintptr_t a;
- a = (uintptr_t)Alloc.Allocate(1, 2);
- EXPECT_EQ(0U, a & 1);
- a = (uintptr_t)Alloc.Allocate(1, 4);
- EXPECT_EQ(0U, a & 3);
- a = (uintptr_t)Alloc.Allocate(1, 8);
- EXPECT_EQ(0U, a & 7);
- a = (uintptr_t)Alloc.Allocate(1, 16);
- EXPECT_EQ(0U, a & 15);
- a = (uintptr_t)Alloc.Allocate(1, 32);
- EXPECT_EQ(0U, a & 31);
- a = (uintptr_t)Alloc.Allocate(1, 64);
- EXPECT_EQ(0U, a & 63);
- a = (uintptr_t)Alloc.Allocate(1, 128);
- EXPECT_EQ(0U, a & 127);
-}
-
-// Test allocating just over the slab size. This tests a bug where before the
-// allocator incorrectly calculated the buffer end pointer.
-TEST(AllocatorTest, TestOverflow) {
- BumpPtrAllocator Alloc(4096, 4096);
-
- // Fill the slab right up until the end pointer.
- Alloc.Allocate(4096 - sizeof(MemSlab), 0);
- EXPECT_EQ(1U, Alloc.GetNumSlabs());
-
- // If we don't allocate a new slab, then we will have overflowed.
- Alloc.Allocate(1, 0);
- EXPECT_EQ(2U, Alloc.GetNumSlabs());
-}
-
-// Mock slab allocator that returns slabs aligned on 4096 bytes. There is no
-// easy portable way to do this, so this is kind of a hack.
-class MockSlabAllocator : public SlabAllocator {
- MemSlab *LastSlab;
-
-public:
- virtual ~MockSlabAllocator() { }
-
- virtual MemSlab *Allocate(size_t Size) {
- // Allocate space for the alignment, the slab, and a void* that goes right
- // before the slab.
- size_t Alignment = 4096;
- void *MemBase = malloc(Size + Alignment - 1 + sizeof(void*));
-
- // Make the slab.
- MemSlab *Slab = (MemSlab*)(((uintptr_t)MemBase+sizeof(void*)+Alignment-1) &
- ~(uintptr_t)(Alignment - 1));
- Slab->Size = Size;
- Slab->NextPtr = 0;
-
- // Hold a pointer to the base so we can free the whole malloced block.
- ((void**)Slab)[-1] = MemBase;
-
- LastSlab = Slab;
- return Slab;
- }
-
- virtual void Deallocate(MemSlab *Slab) {
- free(((void**)Slab)[-1]);
- }
-
- MemSlab *GetLastSlab() {
- return LastSlab;
- }
-};
-
-// Allocate a large-ish block with a really large alignment so that the
-// allocator will think that it has space, but after it does the alignment it
-// will not.
-TEST(AllocatorTest, TestBigAlignment) {
- MockSlabAllocator SlabAlloc;
- BumpPtrAllocator Alloc(4096, 4096, SlabAlloc);
- uintptr_t Ptr = (uintptr_t)Alloc.Allocate(3000, 2048);
- MemSlab *Slab = SlabAlloc.GetLastSlab();
- EXPECT_LE(Ptr + 3000, ((uintptr_t)Slab) + Slab->Size);
-}
-
-} // anonymous namespace
diff --git a/libclamav/c++/llvm/unittests/Support/CommandLineTest.cpp b/libclamav/c++/llvm/unittests/Support/CommandLineTest.cpp
deleted file mode 100644
index 72fa24a..0000000
--- a/libclamav/c++/llvm/unittests/Support/CommandLineTest.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-//===- llvm/unittest/Support/CommandLineTest.cpp - CommandLine tests ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Config/config.h"
-
-#include "gtest/gtest.h"
-
-#include <string>
-#include <stdlib.h>
-
-using namespace llvm;
-
-namespace {
-
-class TempEnvVar {
- public:
- TempEnvVar(const char *name, const char *value)
- : name(name) {
- const char *old_value = getenv(name);
- EXPECT_EQ(NULL, old_value) << old_value;
-#if HAVE_SETENV
- setenv(name, value, true);
-#else
-# define SKIP_ENVIRONMENT_TESTS
-#endif
- }
-
- ~TempEnvVar() {
-#if HAVE_SETENV
- // Assume setenv and unsetenv come together.
- unsetenv(name);
-#endif
- }
-
- private:
- const char *const name;
-};
-
-#ifndef SKIP_ENVIRONMENT_TESTS
-
-const char test_env_var[] = "LLVM_TEST_COMMAND_LINE_FLAGS";
-
-cl::opt<std::string> EnvironmentTestOption("env-test-opt");
-TEST(CommandLineTest, ParseEnvironment) {
- TempEnvVar TEV(test_env_var, "-env-test-opt=hello");
- EXPECT_EQ("", EnvironmentTestOption);
- cl::ParseEnvironmentOptions("CommandLineTest", test_env_var);
- EXPECT_EQ("hello", EnvironmentTestOption);
-}
-
-#endif // SKIP_ENVIRONMENT_TESTS
-
-} // anonymous namespace
diff --git a/libclamav/c++/llvm/unittests/Support/ConstantRangeTest.cpp b/libclamav/c++/llvm/unittests/Support/ConstantRangeTest.cpp
deleted file mode 100644
index 6b8d01d..0000000
--- a/libclamav/c++/llvm/unittests/Support/ConstantRangeTest.cpp
+++ /dev/null
@@ -1,351 +0,0 @@
-//===- llvm/unittest/Support/ConstantRangeTest.cpp - ConstantRange tests --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/ConstantRange.h"
-
-#include "gtest/gtest.h"
-
-using namespace llvm;
-
-namespace {
-
-class ConstantRangeTest : public ::testing::Test {
-protected:
- static ConstantRange Full;
- static ConstantRange Empty;
- static ConstantRange One;
- static ConstantRange Some;
- static ConstantRange Wrap;
-};
-
-ConstantRange ConstantRangeTest::Full(16);
-ConstantRange ConstantRangeTest::Empty(16, false);
-ConstantRange ConstantRangeTest::One(APInt(16, 0xa));
-ConstantRange ConstantRangeTest::Some(APInt(16, 0xa), APInt(16, 0xaaa));
-ConstantRange ConstantRangeTest::Wrap(APInt(16, 0xaaa), APInt(16, 0xa));
-
-TEST_F(ConstantRangeTest, Basics) {
- EXPECT_TRUE(Full.isFullSet());
- EXPECT_FALSE(Full.isEmptySet());
- EXPECT_FALSE(Full.isWrappedSet());
- EXPECT_TRUE(Full.contains(APInt(16, 0x0)));
- EXPECT_TRUE(Full.contains(APInt(16, 0x9)));
- EXPECT_TRUE(Full.contains(APInt(16, 0xa)));
- EXPECT_TRUE(Full.contains(APInt(16, 0xaa9)));
- EXPECT_TRUE(Full.contains(APInt(16, 0xaaa)));
-
- EXPECT_FALSE(Empty.isFullSet());
- EXPECT_TRUE(Empty.isEmptySet());
- EXPECT_FALSE(Empty.isWrappedSet());
- EXPECT_FALSE(Empty.contains(APInt(16, 0x0)));
- EXPECT_FALSE(Empty.contains(APInt(16, 0x9)));
- EXPECT_FALSE(Empty.contains(APInt(16, 0xa)));
- EXPECT_FALSE(Empty.contains(APInt(16, 0xaa9)));
- EXPECT_FALSE(Empty.contains(APInt(16, 0xaaa)));
-
- EXPECT_FALSE(One.isFullSet());
- EXPECT_FALSE(One.isEmptySet());
- EXPECT_FALSE(One.isWrappedSet());
- EXPECT_FALSE(One.contains(APInt(16, 0x0)));
- EXPECT_FALSE(One.contains(APInt(16, 0x9)));
- EXPECT_TRUE(One.contains(APInt(16, 0xa)));
- EXPECT_FALSE(One.contains(APInt(16, 0xaa9)));
- EXPECT_FALSE(One.contains(APInt(16, 0xaaa)));
-
- EXPECT_FALSE(Some.isFullSet());
- EXPECT_FALSE(Some.isEmptySet());
- EXPECT_FALSE(Some.isWrappedSet());
- EXPECT_FALSE(Some.contains(APInt(16, 0x0)));
- EXPECT_FALSE(Some.contains(APInt(16, 0x9)));
- EXPECT_TRUE(Some.contains(APInt(16, 0xa)));
- EXPECT_TRUE(Some.contains(APInt(16, 0xaa9)));
- EXPECT_FALSE(Some.contains(APInt(16, 0xaaa)));
-
- EXPECT_FALSE(Wrap.isFullSet());
- EXPECT_FALSE(Wrap.isEmptySet());
- EXPECT_TRUE(Wrap.isWrappedSet());
- EXPECT_TRUE(Wrap.contains(APInt(16, 0x0)));
- EXPECT_TRUE(Wrap.contains(APInt(16, 0x9)));
- EXPECT_FALSE(Wrap.contains(APInt(16, 0xa)));
- EXPECT_FALSE(Wrap.contains(APInt(16, 0xaa9)));
- EXPECT_TRUE(Wrap.contains(APInt(16, 0xaaa)));
-}
-
-TEST_F(ConstantRangeTest, Equality) {
- EXPECT_EQ(Full, Full);
- EXPECT_EQ(Empty, Empty);
- EXPECT_EQ(One, One);
- EXPECT_EQ(Some, Some);
- EXPECT_EQ(Wrap, Wrap);
- EXPECT_NE(Full, Empty);
- EXPECT_NE(Full, One);
- EXPECT_NE(Full, Some);
- EXPECT_NE(Full, Wrap);
- EXPECT_NE(Empty, One);
- EXPECT_NE(Empty, Some);
- EXPECT_NE(Empty, Wrap);
- EXPECT_NE(One, Some);
- EXPECT_NE(One, Wrap);
- EXPECT_NE(Some, Wrap);
-}
-
-TEST_F(ConstantRangeTest, SingleElement) {
- EXPECT_EQ(Full.getSingleElement(), static_cast<APInt *>(NULL));
- EXPECT_EQ(Empty.getSingleElement(), static_cast<APInt *>(NULL));
- EXPECT_EQ(*One.getSingleElement(), APInt(16, 0xa));
- EXPECT_EQ(Some.getSingleElement(), static_cast<APInt *>(NULL));
- EXPECT_EQ(Wrap.getSingleElement(), static_cast<APInt *>(NULL));
-
- EXPECT_FALSE(Full.isSingleElement());
- EXPECT_FALSE(Empty.isSingleElement());
- EXPECT_TRUE(One.isSingleElement());
- EXPECT_FALSE(Some.isSingleElement());
- EXPECT_FALSE(Wrap.isSingleElement());
-}
-
-TEST_F(ConstantRangeTest, GetSetSize) {
- EXPECT_EQ(Full.getSetSize(), APInt(16, 0));
- EXPECT_EQ(Empty.getSetSize(), APInt(16, 0));
- EXPECT_EQ(One.getSetSize(), APInt(16, 1));
- EXPECT_EQ(Some.getSetSize(), APInt(16, 0xaa0));
- EXPECT_EQ(Wrap.getSetSize(), APInt(16, 0x10000 - 0xaa0));
-}
-
-TEST_F(ConstantRangeTest, GetMinsAndMaxes) {
- EXPECT_EQ(Full.getUnsignedMax(), APInt(16, UINT16_MAX));
- EXPECT_EQ(One.getUnsignedMax(), APInt(16, 0xa));
- EXPECT_EQ(Some.getUnsignedMax(), APInt(16, 0xaa9));
- EXPECT_EQ(Wrap.getUnsignedMax(), APInt(16, UINT16_MAX));
-
- EXPECT_EQ(Full.getUnsignedMin(), APInt(16, 0));
- EXPECT_EQ(One.getUnsignedMin(), APInt(16, 0xa));
- EXPECT_EQ(Some.getUnsignedMin(), APInt(16, 0xa));
- EXPECT_EQ(Wrap.getUnsignedMin(), APInt(16, 0));
-
- EXPECT_EQ(Full.getSignedMax(), APInt(16, INT16_MAX));
- EXPECT_EQ(One.getSignedMax(), APInt(16, 0xa));
- EXPECT_EQ(Some.getSignedMax(), APInt(16, 0xaa9));
- EXPECT_EQ(Wrap.getSignedMax(), APInt(16, INT16_MAX));
-
- EXPECT_EQ(Full.getSignedMin(), APInt(16, (uint64_t)INT16_MIN));
- EXPECT_EQ(One.getSignedMin(), APInt(16, 0xa));
- EXPECT_EQ(Some.getSignedMin(), APInt(16, 0xa));
- EXPECT_EQ(Wrap.getSignedMin(), APInt(16, (uint64_t)INT16_MIN));
-
- // Found by Klee
- EXPECT_EQ(ConstantRange(APInt(4, 7), APInt(4, 0)).getSignedMax(),
- APInt(4, 7));
-}
-
-TEST_F(ConstantRangeTest, Trunc) {
- ConstantRange TFull = Full.truncate(10);
- ConstantRange TEmpty = Empty.truncate(10);
- ConstantRange TOne = One.truncate(10);
- ConstantRange TSome = Some.truncate(10);
- ConstantRange TWrap = Wrap.truncate(10);
- EXPECT_TRUE(TFull.isFullSet());
- EXPECT_TRUE(TEmpty.isEmptySet());
- EXPECT_EQ(TOne, ConstantRange(APInt(One.getLower()).trunc(10),
- APInt(One.getUpper()).trunc(10)));
- EXPECT_TRUE(TSome.isFullSet());
-}
-
-TEST_F(ConstantRangeTest, ZExt) {
- ConstantRange ZFull = Full.zeroExtend(20);
- ConstantRange ZEmpty = Empty.zeroExtend(20);
- ConstantRange ZOne = One.zeroExtend(20);
- ConstantRange ZSome = Some.zeroExtend(20);
- ConstantRange ZWrap = Wrap.zeroExtend(20);
- EXPECT_EQ(ZFull, ConstantRange(APInt(20, 0), APInt(20, 0x10000)));
- EXPECT_TRUE(ZEmpty.isEmptySet());
- EXPECT_EQ(ZOne, ConstantRange(APInt(One.getLower()).zext(20),
- APInt(One.getUpper()).zext(20)));
- EXPECT_EQ(ZSome, ConstantRange(APInt(Some.getLower()).zext(20),
- APInt(Some.getUpper()).zext(20)));
- EXPECT_EQ(ZWrap, ConstantRange(APInt(Wrap.getLower()).zext(20),
- APInt(Wrap.getUpper()).zext(20)));
-}
-
-TEST_F(ConstantRangeTest, SExt) {
- ConstantRange SFull = Full.signExtend(20);
- ConstantRange SEmpty = Empty.signExtend(20);
- ConstantRange SOne = One.signExtend(20);
- ConstantRange SSome = Some.signExtend(20);
- ConstantRange SWrap = Wrap.signExtend(20);
- EXPECT_EQ(SFull, ConstantRange(APInt(20, (uint64_t)INT16_MIN, true),
- APInt(20, INT16_MAX + 1, true)));
- EXPECT_TRUE(SEmpty.isEmptySet());
- EXPECT_EQ(SOne, ConstantRange(APInt(One.getLower()).sext(20),
- APInt(One.getUpper()).sext(20)));
- EXPECT_EQ(SSome, ConstantRange(APInt(Some.getLower()).sext(20),
- APInt(Some.getUpper()).sext(20)));
- EXPECT_EQ(SWrap, ConstantRange(APInt(Wrap.getLower()).sext(20),
- APInt(Wrap.getUpper()).sext(20)));
-}
-
-TEST_F(ConstantRangeTest, IntersectWith) {
- EXPECT_EQ(Empty.intersectWith(Full), Empty);
- EXPECT_EQ(Empty.intersectWith(Empty), Empty);
- EXPECT_EQ(Empty.intersectWith(One), Empty);
- EXPECT_EQ(Empty.intersectWith(Some), Empty);
- EXPECT_EQ(Empty.intersectWith(Wrap), Empty);
- EXPECT_EQ(Full.intersectWith(Full), Full);
- EXPECT_EQ(Some.intersectWith(Some), Some);
- EXPECT_EQ(Some.intersectWith(One), One);
- EXPECT_EQ(Full.intersectWith(One), One);
- EXPECT_EQ(Full.intersectWith(Some), Some);
- EXPECT_EQ(Some.intersectWith(Wrap), Empty);
- EXPECT_EQ(One.intersectWith(Wrap), Empty);
- EXPECT_EQ(One.intersectWith(Wrap), Wrap.intersectWith(One));
-
- // Klee generated testcase from PR4545.
- // The intersection of i16 [4, 2) and [6, 5) is disjoint, looking like
- // 01..4.6789ABCDEF where the dots represent values not in the intersection.
- ConstantRange LHS(APInt(16, 4), APInt(16, 2));
- ConstantRange RHS(APInt(16, 6), APInt(16, 5));
- EXPECT_TRUE(LHS.intersectWith(RHS) == LHS);
-}
-
-TEST_F(ConstantRangeTest, UnionWith) {
- EXPECT_EQ(Wrap.unionWith(One),
- ConstantRange(APInt(16, 0xaaa), APInt(16, 0xb)));
- EXPECT_EQ(One.unionWith(Wrap), Wrap.unionWith(One));
- EXPECT_EQ(Empty.unionWith(Empty), Empty);
- EXPECT_EQ(Full.unionWith(Full), Full);
- EXPECT_EQ(Some.unionWith(Wrap), Full);
-
- // PR4545
- EXPECT_EQ(ConstantRange(APInt(16, 14), APInt(16, 1)).unionWith(
- ConstantRange(APInt(16, 0), APInt(16, 8))),
- ConstantRange(APInt(16, 14), APInt(16, 8)));
- EXPECT_EQ(ConstantRange(APInt(16, 6), APInt(16, 4)).unionWith(
- ConstantRange(APInt(16, 4), APInt(16, 0))),
- ConstantRange(16));
- EXPECT_EQ(ConstantRange(APInt(16, 1), APInt(16, 0)).unionWith(
- ConstantRange(APInt(16, 2), APInt(16, 1))),
- ConstantRange(16));
-}
-
-TEST_F(ConstantRangeTest, SubtractAPInt) {
- EXPECT_EQ(Full.subtract(APInt(16, 4)), Full);
- EXPECT_EQ(Empty.subtract(APInt(16, 4)), Empty);
- EXPECT_EQ(Some.subtract(APInt(16, 4)),
- ConstantRange(APInt(16, 0x6), APInt(16, 0xaa6)));
- EXPECT_EQ(Wrap.subtract(APInt(16, 4)),
- ConstantRange(APInt(16, 0xaa6), APInt(16, 0x6)));
- EXPECT_EQ(One.subtract(APInt(16, 4)),
- ConstantRange(APInt(16, 0x6)));
-}
-
-TEST_F(ConstantRangeTest, Add) {
- EXPECT_EQ(Full.add(APInt(16, 4)), Full);
- EXPECT_EQ(Full.add(Full), Full);
- EXPECT_EQ(Full.add(Empty), Empty);
- EXPECT_EQ(Full.add(One), Full);
- EXPECT_EQ(Full.add(Some), Full);
- EXPECT_EQ(Full.add(Wrap), Full);
- EXPECT_EQ(Empty.add(Empty), Empty);
- EXPECT_EQ(Empty.add(One), Empty);
- EXPECT_EQ(Empty.add(Some), Empty);
- EXPECT_EQ(Empty.add(Wrap), Empty);
- EXPECT_EQ(Empty.add(APInt(16, 4)), Empty);
- EXPECT_EQ(Some.add(APInt(16, 4)),
- ConstantRange(APInt(16, 0xe), APInt(16, 0xaae)));
- EXPECT_EQ(Wrap.add(APInt(16, 4)),
- ConstantRange(APInt(16, 0xaae), APInt(16, 0xe)));
- EXPECT_EQ(One.add(APInt(16, 4)),
- ConstantRange(APInt(16, 0xe)));
-}
-
-TEST_F(ConstantRangeTest, Multiply) {
- EXPECT_EQ(Full.multiply(Full), Full);
- EXPECT_EQ(Full.multiply(Empty), Empty);
- EXPECT_EQ(Full.multiply(One), Full);
- EXPECT_EQ(Full.multiply(Some), Full);
- EXPECT_EQ(Full.multiply(Wrap), Full);
- EXPECT_EQ(Empty.multiply(Empty), Empty);
- EXPECT_EQ(Empty.multiply(One), Empty);
- EXPECT_EQ(Empty.multiply(Some), Empty);
- EXPECT_EQ(Empty.multiply(Wrap), Empty);
- EXPECT_EQ(One.multiply(One), ConstantRange(APInt(16, 0xa*0xa),
- APInt(16, 0xa*0xa + 1)));
- EXPECT_EQ(One.multiply(Some), ConstantRange(APInt(16, 0xa*0xa),
- APInt(16, 0xa*0xaa9 + 1)));
- EXPECT_EQ(One.multiply(Wrap), Full);
- EXPECT_EQ(Some.multiply(Some), Full);
- EXPECT_EQ(Some.multiply(Wrap), Full);
- EXPECT_EQ(Wrap.multiply(Wrap), Full);
-
- // http://llvm.org/PR4545
- EXPECT_EQ(ConstantRange(APInt(4, 1), APInt(4, 6)).multiply(
- ConstantRange(APInt(4, 6), APInt(4, 2))),
- ConstantRange(4, /*isFullSet=*/true));
-}
-
-TEST_F(ConstantRangeTest, UMax) {
- EXPECT_EQ(Full.umax(Full), Full);
- EXPECT_EQ(Full.umax(Empty), Empty);
- EXPECT_EQ(Full.umax(Some), ConstantRange(APInt(16, 0xa), APInt(16, 0)));
- EXPECT_EQ(Full.umax(Wrap), Full);
- EXPECT_EQ(Full.umax(Some), ConstantRange(APInt(16, 0xa), APInt(16, 0)));
- EXPECT_EQ(Empty.umax(Empty), Empty);
- EXPECT_EQ(Empty.umax(Some), Empty);
- EXPECT_EQ(Empty.umax(Wrap), Empty);
- EXPECT_EQ(Empty.umax(One), Empty);
- EXPECT_EQ(Some.umax(Some), Some);
- EXPECT_EQ(Some.umax(Wrap), ConstantRange(APInt(16, 0xa), APInt(16, 0)));
- EXPECT_EQ(Some.umax(One), Some);
- // TODO: ConstantRange is currently over-conservative here.
- EXPECT_EQ(Wrap.umax(Wrap), Full);
- EXPECT_EQ(Wrap.umax(One), ConstantRange(APInt(16, 0xa), APInt(16, 0)));
- EXPECT_EQ(One.umax(One), One);
-}
-
-TEST_F(ConstantRangeTest, SMax) {
- EXPECT_EQ(Full.smax(Full), Full);
- EXPECT_EQ(Full.smax(Empty), Empty);
- EXPECT_EQ(Full.smax(Some), ConstantRange(APInt(16, 0xa),
- APInt::getSignedMinValue(16)));
- EXPECT_EQ(Full.smax(Wrap), Full);
- EXPECT_EQ(Full.smax(One), ConstantRange(APInt(16, 0xa),
- APInt::getSignedMinValue(16)));
- EXPECT_EQ(Empty.smax(Empty), Empty);
- EXPECT_EQ(Empty.smax(Some), Empty);
- EXPECT_EQ(Empty.smax(Wrap), Empty);
- EXPECT_EQ(Empty.smax(One), Empty);
- EXPECT_EQ(Some.smax(Some), Some);
- EXPECT_EQ(Some.smax(Wrap), ConstantRange(APInt(16, 0xa),
- APInt(16, (uint64_t)INT16_MIN)));
- EXPECT_EQ(Some.smax(One), Some);
- EXPECT_EQ(Wrap.smax(One), ConstantRange(APInt(16, 0xa),
- APInt(16, (uint64_t)INT16_MIN)));
- EXPECT_EQ(One.smax(One), One);
-}
-
-TEST_F(ConstantRangeTest, UDiv) {
- EXPECT_EQ(Full.udiv(Full), Full);
- EXPECT_EQ(Full.udiv(Empty), Empty);
- EXPECT_EQ(Full.udiv(One), ConstantRange(APInt(16, 0),
- APInt(16, 0xffff / 0xa + 1)));
- EXPECT_EQ(Full.udiv(Some), ConstantRange(APInt(16, 0),
- APInt(16, 0xffff / 0xa + 1)));
- EXPECT_EQ(Full.udiv(Wrap), Full);
- EXPECT_EQ(Empty.udiv(Empty), Empty);
- EXPECT_EQ(Empty.udiv(One), Empty);
- EXPECT_EQ(Empty.udiv(Some), Empty);
- EXPECT_EQ(Empty.udiv(Wrap), Empty);
- EXPECT_EQ(One.udiv(One), ConstantRange(APInt(16, 1)));
- EXPECT_EQ(One.udiv(Some), ConstantRange(APInt(16, 0), APInt(16, 2)));
- EXPECT_EQ(One.udiv(Wrap), ConstantRange(APInt(16, 0), APInt(16, 0xb)));
- EXPECT_EQ(Some.udiv(Some), ConstantRange(APInt(16, 0), APInt(16, 0x111)));
- EXPECT_EQ(Some.udiv(Wrap), ConstantRange(APInt(16, 0), APInt(16, 0xaaa)));
- EXPECT_EQ(Wrap.udiv(Wrap), Full);
-}
-
-} // anonymous namespace
diff --git a/libclamav/c++/llvm/unittests/Support/LeakDetectorTest.cpp b/libclamav/c++/llvm/unittests/Support/LeakDetectorTest.cpp
deleted file mode 100644
index d198c7a..0000000
--- a/libclamav/c++/llvm/unittests/Support/LeakDetectorTest.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//===- llvm/unittest/LeakDetector/LeakDetector.cpp - LeakDetector tests ---===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/Support/LeakDetector.h"
-
-using namespace llvm;
-
-namespace {
-
-#ifdef GTEST_HAS_DEATH_TEST
-#ifndef NDEBUG
-TEST(LeakDetector, Death1) {
- LeakDetector::addGarbageObject((void*) 1);
- LeakDetector::addGarbageObject((void*) 2);
-
- EXPECT_DEATH(LeakDetector::addGarbageObject((void*) 1),
- ".*Ts.count\\(o\\) == 0 && \"Object already in set!\"");
- EXPECT_DEATH(LeakDetector::addGarbageObject((void*) 2),
- "Cache != o && \"Object already in set!\"");
-}
-#endif
-#endif
-
-}
diff --git a/libclamav/c++/llvm/unittests/Support/Makefile b/libclamav/c++/llvm/unittests/Support/Makefile
deleted file mode 100644
index 815bdd2..0000000
--- a/libclamav/c++/llvm/unittests/Support/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- unittests/ADT/Makefile ------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TESTNAME = Support
-LINK_COMPONENTS := core support
-
-include $(LEVEL)/Makefile.config
-include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest
diff --git a/libclamav/c++/llvm/unittests/Support/MathExtrasTest.cpp b/libclamav/c++/llvm/unittests/Support/MathExtrasTest.cpp
deleted file mode 100644
index 3db1f77..0000000
--- a/libclamav/c++/llvm/unittests/Support/MathExtrasTest.cpp
+++ /dev/null
@@ -1,104 +0,0 @@
-//===- unittests/Support/MathExtrasTest.cpp - math utils tests ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/Support/MathExtras.h"
-
-using namespace llvm;
-
-namespace {
-
-TEST(MathExtras, isPowerOf2_32) {
- EXPECT_TRUE(isPowerOf2_32(1 << 6));
- EXPECT_TRUE(isPowerOf2_32(1 << 12));
- EXPECT_FALSE(isPowerOf2_32((1 << 19) + 3));
- EXPECT_FALSE(isPowerOf2_32(0xABCDEF0));
-}
-
-TEST(MathExtras, isPowerOf2_64) {
- EXPECT_TRUE(isPowerOf2_64(1LL << 46));
- EXPECT_TRUE(isPowerOf2_64(1LL << 12));
- EXPECT_FALSE(isPowerOf2_64((1LL << 53) + 3));
- EXPECT_FALSE(isPowerOf2_64(0xABCDEF0ABCDEF0LL));
-}
-
-TEST(MathExtras, ByteSwap_32) {
- EXPECT_EQ(0x44332211u, ByteSwap_32(0x11223344));
- EXPECT_EQ(0xDDCCBBAAu, ByteSwap_32(0xAABBCCDD));
-}
-
-TEST(MathExtras, ByteSwap_64) {
- EXPECT_EQ(0x8877665544332211ULL, ByteSwap_64(0x1122334455667788LL));
- EXPECT_EQ(0x1100FFEEDDCCBBAAULL, ByteSwap_64(0xAABBCCDDEEFF0011LL));
-}
-
-TEST(MathExtras, CountLeadingZeros_32) {
- EXPECT_EQ(8u, CountLeadingZeros_32(0x00F000FF));
- EXPECT_EQ(8u, CountLeadingZeros_32(0x00F12345));
- for (unsigned i = 0; i <= 30; ++i) {
- EXPECT_EQ(31 - i, CountLeadingZeros_32(1 << i));
- }
-}
-
-TEST(MathExtras, CountLeadingZeros_64) {
- EXPECT_EQ(8u, CountLeadingZeros_64(0x00F1234500F12345LL));
- EXPECT_EQ(1u, CountLeadingZeros_64(1LL << 62));
- for (unsigned i = 0; i <= 62; ++i) {
- EXPECT_EQ(63 - i, CountLeadingZeros_64(1LL << i));
- }
-}
-
-TEST(MathExtras, CountLeadingOnes_32) {
- for (int i = 30; i >= 0; --i) {
- // Start with all ones and unset some bit.
- EXPECT_EQ(31u - i, CountLeadingOnes_32(0xFFFFFFFF ^ (1 << i)));
- }
-}
-
-TEST(MathExtras, CountLeadingOnes_64) {
- for (int i = 62; i >= 0; --i) {
- // Start with all ones and unset some bit.
- EXPECT_EQ(63u - i, CountLeadingOnes_64(0xFFFFFFFFFFFFFFFFLL ^ (1LL << i)));
- }
- for (int i = 30; i >= 0; --i) {
- // Start with all ones and unset some bit.
- EXPECT_EQ(31u - i, CountLeadingOnes_32(0xFFFFFFFF ^ (1 << i)));
- }
-}
-
-TEST(MathExtras, FloatBits) {
- static const float kValue = 5632.34;
- EXPECT_FLOAT_EQ(kValue, BitsToFloat(FloatToBits(kValue)));
-}
-
-TEST(MathExtras, DoubleBits) {
- static const double kValue = 87987234.983498;
- EXPECT_FLOAT_EQ(kValue, BitsToDouble(DoubleToBits(kValue)));
-}
-
-TEST(MathExtras, MinAlign) {
- EXPECT_EQ(1u, MinAlign(2, 3));
- EXPECT_EQ(2u, MinAlign(2, 4));
- EXPECT_EQ(1u, MinAlign(17, 64));
- EXPECT_EQ(256u, MinAlign(256, 512));
-}
-
-TEST(MathExtras, NextPowerOf2) {
- EXPECT_EQ(4u, NextPowerOf2(3));
- EXPECT_EQ(16u, NextPowerOf2(15));
- EXPECT_EQ(256u, NextPowerOf2(128));
-}
-
-TEST(MathExtras, RoundUpToAlignment) {
- EXPECT_EQ(8u, RoundUpToAlignment(5, 8));
- EXPECT_EQ(24u, RoundUpToAlignment(17, 8));
- EXPECT_EQ(0u, RoundUpToAlignment(~0LL, 8));
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/Support/RegexTest.cpp b/libclamav/c++/llvm/unittests/Support/RegexTest.cpp
deleted file mode 100644
index 65b66c3..0000000
--- a/libclamav/c++/llvm/unittests/Support/RegexTest.cpp
+++ /dev/null
@@ -1,94 +0,0 @@
-//===- llvm/unittest/Support/RegexTest.cpp - Regex tests --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/Support/Regex.h"
-#include "llvm/ADT/SmallVector.h"
-#include <cstring>
-
-using namespace llvm;
-namespace {
-
-class RegexTest : public ::testing::Test {
-};
-
-TEST_F(RegexTest, Basics) {
- Regex r1("^[0-9]+$");
- EXPECT_TRUE(r1.match("916"));
- EXPECT_TRUE(r1.match("9"));
- EXPECT_FALSE(r1.match("9a"));
-
- SmallVector<StringRef, 1> Matches;
- Regex r2("[0-9]+");
- EXPECT_TRUE(r2.match("aa216b", &Matches));
- EXPECT_EQ(1u, Matches.size());
- EXPECT_EQ("216", Matches[0].str());
-
- Regex r3("[0-9]+([a-f])?:([0-9]+)");
- EXPECT_TRUE(r3.match("9a:513b", &Matches));
- EXPECT_EQ(3u, Matches.size());
- EXPECT_EQ("9a:513", Matches[0].str());
- EXPECT_EQ("a", Matches[1].str());
- EXPECT_EQ("513", Matches[2].str());
-
- EXPECT_TRUE(r3.match("9:513b", &Matches));
- EXPECT_EQ(3u, Matches.size());
- EXPECT_EQ("9:513", Matches[0].str());
- EXPECT_EQ("", Matches[1].str());
- EXPECT_EQ("513", Matches[2].str());
-
- Regex r4("a[^b]+b");
- std::string String="axxb";
- String[2] = '\0';
- EXPECT_FALSE(r4.match("abb"));
- EXPECT_TRUE(r4.match(String, &Matches));
- EXPECT_EQ(1u, Matches.size());
- EXPECT_EQ(String, Matches[0].str());
-
-
- std::string NulPattern="X[0-9]+X([a-f])?:([0-9]+)";
- String="YX99a:513b";
- NulPattern[7] = '\0';
- Regex r5(NulPattern);
- EXPECT_FALSE(r5.match(String));
- EXPECT_FALSE(r5.match("X9"));
- String[3]='\0';
- EXPECT_TRUE(r5.match(String));
-}
-
-TEST_F(RegexTest, Substitution) {
- std::string Error;
-
- EXPECT_EQ("aNUMber", Regex("[0-9]+").sub("NUM", "a1234ber"));
-
- // Standard Escapes
- EXPECT_EQ("a\\ber", Regex("[0-9]+").sub("\\\\", "a1234ber", &Error));
- EXPECT_EQ(Error, "");
- EXPECT_EQ("a\nber", Regex("[0-9]+").sub("\\n", "a1234ber", &Error));
- EXPECT_EQ(Error, "");
- EXPECT_EQ("a\tber", Regex("[0-9]+").sub("\\t", "a1234ber", &Error));
- EXPECT_EQ(Error, "");
- EXPECT_EQ("ajber", Regex("[0-9]+").sub("\\j", "a1234ber", &Error));
- EXPECT_EQ(Error, "");
-
- EXPECT_EQ("aber", Regex("[0-9]+").sub("\\", "a1234ber", &Error));
- EXPECT_EQ(Error, "replacement string contained trailing backslash");
-
- // Backreferences
- EXPECT_EQ("aa1234bber", Regex("a[0-9]+b").sub("a\\0b", "a1234ber", &Error));
- EXPECT_EQ(Error, "");
-
- EXPECT_EQ("a1234ber", Regex("a([0-9]+)b").sub("a\\1b", "a1234ber", &Error));
- EXPECT_EQ(Error, "");
-
- EXPECT_EQ("aber", Regex("a[0-9]+b").sub("a\\100b", "a1234ber", &Error));
- EXPECT_EQ(Error, "invalid backreference string '100'");
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/Support/System.cpp b/libclamav/c++/llvm/unittests/Support/System.cpp
deleted file mode 100644
index b3dd17d..0000000
--- a/libclamav/c++/llvm/unittests/Support/System.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-//===- llvm/unittest/Support/System.cpp - System tests --===//
-#include "gtest/gtest.h"
-#include "llvm/System/TimeValue.h"
-#include <time.h>
-
-using namespace llvm;
-namespace {
-class SystemTest : public ::testing::Test {
-};
-
-TEST_F(SystemTest, TimeValue) {
- sys::TimeValue now = sys::TimeValue::now();
- time_t now_t = time(NULL);
- EXPECT_TRUE(abs(now_t - now.toEpochTime()) < 2);
-}
-}
diff --git a/libclamav/c++/llvm/unittests/Support/TypeBuilderTest.cpp b/libclamav/c++/llvm/unittests/Support/TypeBuilderTest.cpp
deleted file mode 100644
index e805827..0000000
--- a/libclamav/c++/llvm/unittests/Support/TypeBuilderTest.cpp
+++ /dev/null
@@ -1,253 +0,0 @@
-//===- llvm/unittest/Support/TypeBuilderTest.cpp - TypeBuilder tests -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/TypeBuilder.h"
-#include "llvm/LLVMContext.h"
-
-#include "gtest/gtest.h"
-
-using namespace llvm;
-
-namespace {
-
-TEST(TypeBuilderTest, Void) {
- EXPECT_EQ(Type::getVoidTy(getGlobalContext()), (TypeBuilder<void, true>::get(getGlobalContext())));
- EXPECT_EQ(Type::getVoidTy(getGlobalContext()), (TypeBuilder<void, false>::get(getGlobalContext())));
- // Special cases for C compatibility:
- EXPECT_EQ(Type::getInt8PtrTy(getGlobalContext()),
- (TypeBuilder<void*, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8PtrTy(getGlobalContext()),
- (TypeBuilder<const void*, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8PtrTy(getGlobalContext()),
- (TypeBuilder<volatile void*, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8PtrTy(getGlobalContext()),
- (TypeBuilder<const volatile void*, false>::get(
- getGlobalContext())));
-}
-
-TEST(TypeBuilderTest, HostIntegers) {
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()), (TypeBuilder<int8_t, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()), (TypeBuilder<uint8_t, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt16Ty(getGlobalContext()), (TypeBuilder<int16_t, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt16Ty(getGlobalContext()), (TypeBuilder<uint16_t, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt32Ty(getGlobalContext()), (TypeBuilder<int32_t, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt32Ty(getGlobalContext()), (TypeBuilder<uint32_t, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt64Ty(getGlobalContext()), (TypeBuilder<int64_t, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt64Ty(getGlobalContext()), (TypeBuilder<uint64_t, false>::get(getGlobalContext())));
-
- EXPECT_EQ(IntegerType::get(getGlobalContext(), sizeof(size_t) * CHAR_BIT),
- (TypeBuilder<size_t, false>::get(getGlobalContext())));
- EXPECT_EQ(IntegerType::get(getGlobalContext(), sizeof(ptrdiff_t) * CHAR_BIT),
- (TypeBuilder<ptrdiff_t, false>::get(getGlobalContext())));
-}
-
-TEST(TypeBuilderTest, CrossCompilableIntegers) {
- EXPECT_EQ(IntegerType::get(getGlobalContext(), 1), (TypeBuilder<types::i<1>, true>::get(getGlobalContext())));
- EXPECT_EQ(IntegerType::get(getGlobalContext(), 1), (TypeBuilder<types::i<1>, false>::get(getGlobalContext())));
- EXPECT_EQ(IntegerType::get(getGlobalContext(), 72), (TypeBuilder<types::i<72>, true>::get(getGlobalContext())));
- EXPECT_EQ(IntegerType::get(getGlobalContext(), 72), (TypeBuilder<types::i<72>, false>::get(getGlobalContext())));
-}
-
-TEST(TypeBuilderTest, Float) {
- EXPECT_EQ(Type::getFloatTy(getGlobalContext()), (TypeBuilder<float, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getDoubleTy(getGlobalContext()), (TypeBuilder<double, false>::get(getGlobalContext())));
- // long double isn't supported yet.
- EXPECT_EQ(Type::getFloatTy(getGlobalContext()), (TypeBuilder<types::ieee_float, true>::get(getGlobalContext())));
- EXPECT_EQ(Type::getFloatTy(getGlobalContext()), (TypeBuilder<types::ieee_float, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getDoubleTy(getGlobalContext()), (TypeBuilder<types::ieee_double, true>::get(getGlobalContext())));
- EXPECT_EQ(Type::getDoubleTy(getGlobalContext()), (TypeBuilder<types::ieee_double, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getX86_FP80Ty(getGlobalContext()), (TypeBuilder<types::x86_fp80, true>::get(getGlobalContext())));
- EXPECT_EQ(Type::getX86_FP80Ty(getGlobalContext()), (TypeBuilder<types::x86_fp80, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getFP128Ty(getGlobalContext()), (TypeBuilder<types::fp128, true>::get(getGlobalContext())));
- EXPECT_EQ(Type::getFP128Ty(getGlobalContext()), (TypeBuilder<types::fp128, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getPPC_FP128Ty(getGlobalContext()), (TypeBuilder<types::ppc_fp128, true>::get(getGlobalContext())));
- EXPECT_EQ(Type::getPPC_FP128Ty(getGlobalContext()), (TypeBuilder<types::ppc_fp128, false>::get(getGlobalContext())));
-}
-
-TEST(TypeBuilderTest, Derived) {
- EXPECT_EQ(PointerType::getUnqual(Type::getInt8PtrTy(getGlobalContext())),
- (TypeBuilder<int8_t**, false>::get(getGlobalContext())));
- EXPECT_EQ(ArrayType::get(Type::getInt8Ty(getGlobalContext()), 7),
- (TypeBuilder<int8_t[7], false>::get(getGlobalContext())));
- EXPECT_EQ(ArrayType::get(Type::getInt8Ty(getGlobalContext()), 0),
- (TypeBuilder<int8_t[], false>::get(getGlobalContext())));
-
- EXPECT_EQ(PointerType::getUnqual(Type::getInt8PtrTy(getGlobalContext())),
- (TypeBuilder<types::i<8>**, false>::get(getGlobalContext())));
- EXPECT_EQ(ArrayType::get(Type::getInt8Ty(getGlobalContext()), 7),
- (TypeBuilder<types::i<8>[7], false>::get(getGlobalContext())));
- EXPECT_EQ(ArrayType::get(Type::getInt8Ty(getGlobalContext()), 0),
- (TypeBuilder<types::i<8>[], false>::get(getGlobalContext())));
-
- EXPECT_EQ(PointerType::getUnqual(Type::getInt8PtrTy(getGlobalContext())),
- (TypeBuilder<types::i<8>**, true>::get(getGlobalContext())));
- EXPECT_EQ(ArrayType::get(Type::getInt8Ty(getGlobalContext()), 7),
- (TypeBuilder<types::i<8>[7], true>::get(getGlobalContext())));
- EXPECT_EQ(ArrayType::get(Type::getInt8Ty(getGlobalContext()), 0),
- (TypeBuilder<types::i<8>[], true>::get(getGlobalContext())));
-
-
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()),
- (TypeBuilder<const int8_t, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()),
- (TypeBuilder<volatile int8_t, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()),
- (TypeBuilder<const volatile int8_t, false>::get(getGlobalContext())));
-
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()),
- (TypeBuilder<const types::i<8>, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()),
- (TypeBuilder<volatile types::i<8>, false>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()),
- (TypeBuilder<const volatile types::i<8>, false>::get(getGlobalContext())));
-
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()),
- (TypeBuilder<const types::i<8>, true>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()),
- (TypeBuilder<volatile types::i<8>, true>::get(getGlobalContext())));
- EXPECT_EQ(Type::getInt8Ty(getGlobalContext()),
- (TypeBuilder<const volatile types::i<8>, true>::get(getGlobalContext())));
-
- EXPECT_EQ(Type::getInt8PtrTy(getGlobalContext()),
- (TypeBuilder<const volatile int8_t*const volatile, false>::get(getGlobalContext())));
-}
-
-TEST(TypeBuilderTest, Functions) {
- std::vector<const Type*> params;
- EXPECT_EQ(FunctionType::get(Type::getVoidTy(getGlobalContext()), params, false),
- (TypeBuilder<void(), true>::get(getGlobalContext())));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, true),
- (TypeBuilder<int8_t(...), false>::get(getGlobalContext())));
- params.push_back(TypeBuilder<int32_t*, false>::get(getGlobalContext()));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, false),
- (TypeBuilder<int8_t(const int32_t*), false>::get(getGlobalContext())));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, true),
- (TypeBuilder<int8_t(const int32_t*, ...), false>::get(getGlobalContext())));
- params.push_back(TypeBuilder<char*, false>::get(getGlobalContext()));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, false),
- (TypeBuilder<int8_t(int32_t*, void*), false>::get(getGlobalContext())));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, true),
- (TypeBuilder<int8_t(int32_t*, char*, ...), false>::get(getGlobalContext())));
- params.push_back(TypeBuilder<char, false>::get(getGlobalContext()));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, false),
- (TypeBuilder<int8_t(int32_t*, void*, char), false>::get(getGlobalContext())));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, true),
- (TypeBuilder<int8_t(int32_t*, char*, char, ...), false>::get(getGlobalContext())));
- params.push_back(TypeBuilder<char, false>::get(getGlobalContext()));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, false),
- (TypeBuilder<int8_t(int32_t*, void*, char, char), false>::get(getGlobalContext())));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, true),
- (TypeBuilder<int8_t(int32_t*, char*, char, char, ...),
- false>::get(getGlobalContext())));
- params.push_back(TypeBuilder<char, false>::get(getGlobalContext()));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, false),
- (TypeBuilder<int8_t(int32_t*, void*, char, char, char),
- false>::get(getGlobalContext())));
- EXPECT_EQ(FunctionType::get(Type::getInt8Ty(getGlobalContext()), params, true),
- (TypeBuilder<int8_t(int32_t*, char*, char, char, char, ...),
- false>::get(getGlobalContext())));
-}
-
-TEST(TypeBuilderTest, Context) {
- // We used to cache TypeBuilder results in static local variables. This
- // produced the same type for different contexts, which of course broke
- // things.
- LLVMContext context1;
- EXPECT_EQ(&context1,
- &(TypeBuilder<types::i<1>, true>::get(context1))->getContext());
- LLVMContext context2;
- EXPECT_EQ(&context2,
- &(TypeBuilder<types::i<1>, true>::get(context2))->getContext());
-}
-
-class MyType {
- int a;
- int *b;
- void *array[1];
-};
-
-class MyPortableType {
- int32_t a;
- int32_t *b;
- void *array[1];
-};
-
-} // anonymous namespace
-
-namespace llvm {
-template<bool cross> class TypeBuilder<MyType, cross> {
-public:
- static const StructType *get(LLVMContext &Context) {
- // Using the static result variable ensures that the type is
- // only looked up once.
- std::vector<const Type*> st;
- st.push_back(TypeBuilder<int, cross>::get(Context));
- st.push_back(TypeBuilder<int*, cross>::get(Context));
- st.push_back(TypeBuilder<void*[], cross>::get(Context));
- static const StructType *const result = StructType::get(Context, st);
- return result;
- }
-
- // You may find this a convenient place to put some constants
- // to help with getelementptr. They don't have any effect on
- // the operation of TypeBuilder.
- enum Fields {
- FIELD_A,
- FIELD_B,
- FIELD_ARRAY
- };
-};
-
-template<bool cross> class TypeBuilder<MyPortableType, cross> {
-public:
- static const StructType *get(LLVMContext &Context) {
- // Using the static result variable ensures that the type is
- // only looked up once.
- std::vector<const Type*> st;
- st.push_back(TypeBuilder<types::i<32>, cross>::get(Context));
- st.push_back(TypeBuilder<types::i<32>*, cross>::get(Context));
- st.push_back(TypeBuilder<types::i<8>*[], cross>::get(Context));
- static const StructType *const result = StructType::get(Context, st);
- return result;
- }
-
- // You may find this a convenient place to put some constants
- // to help with getelementptr. They don't have any effect on
- // the operation of TypeBuilder.
- enum Fields {
- FIELD_A,
- FIELD_B,
- FIELD_ARRAY
- };
-};
-} // namespace llvm
-namespace {
-
-TEST(TypeBuilderTest, Extensions) {
- EXPECT_EQ(PointerType::getUnqual(StructType::get(getGlobalContext(),
- TypeBuilder<int, false>::get(getGlobalContext()),
- TypeBuilder<int*, false>::get(getGlobalContext()),
- TypeBuilder<void*[], false>::get(getGlobalContext()),
- NULL)),
- (TypeBuilder<MyType*, false>::get(getGlobalContext())));
- EXPECT_EQ(PointerType::getUnqual(StructType::get(getGlobalContext(),
- TypeBuilder<types::i<32>, false>::get(getGlobalContext()),
- TypeBuilder<types::i<32>*, false>::get(getGlobalContext()),
- TypeBuilder<types::i<8>*[], false>::get(getGlobalContext()),
- NULL)),
- (TypeBuilder<MyPortableType*, false>::get(getGlobalContext())));
- EXPECT_EQ(PointerType::getUnqual(StructType::get(getGlobalContext(),
- TypeBuilder<types::i<32>, false>::get(getGlobalContext()),
- TypeBuilder<types::i<32>*, false>::get(getGlobalContext()),
- TypeBuilder<types::i<8>*[], false>::get(getGlobalContext()),
- NULL)),
- (TypeBuilder<MyPortableType*, true>::get(getGlobalContext())));
-}
-
-} // anonymous namespace
diff --git a/libclamav/c++/llvm/unittests/Support/ValueHandleTest.cpp b/libclamav/c++/llvm/unittests/Support/ValueHandleTest.cpp
deleted file mode 100644
index 6a6528f..0000000
--- a/libclamav/c++/llvm/unittests/Support/ValueHandleTest.cpp
+++ /dev/null
@@ -1,412 +0,0 @@
-//===- llvm/unittest/Support/ValueHandleTest.cpp - ValueHandle tests --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/ValueHandle.h"
-
-#include "llvm/Constants.h"
-#include "llvm/Instructions.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/ADT/OwningPtr.h"
-
-#include "gtest/gtest.h"
-
-#include <memory>
-
-using namespace llvm;
-
-namespace {
-
-class ValueHandle : public testing::Test {
-protected:
- Constant *ConstantV;
- std::auto_ptr<BitCastInst> BitcastV;
-
- ValueHandle() :
- ConstantV(ConstantInt::get(Type::getInt32Ty(getGlobalContext()), 0)),
- BitcastV(new BitCastInst(ConstantV, Type::getInt32Ty(getGlobalContext()))) {
- }
-};
-
-class ConcreteCallbackVH : public CallbackVH {
-public:
- ConcreteCallbackVH() : CallbackVH() {}
- ConcreteCallbackVH(Value *V) : CallbackVH(V) {}
-};
-
-TEST_F(ValueHandle, WeakVH_BasicOperation) {
- WeakVH WVH(BitcastV.get());
- EXPECT_EQ(BitcastV.get(), WVH);
- WVH = ConstantV;
- EXPECT_EQ(ConstantV, WVH);
-
- // Make sure I can call a method on the underlying Value. It
- // doesn't matter which method.
- EXPECT_EQ(Type::getInt32Ty(getGlobalContext()), WVH->getType());
- EXPECT_EQ(Type::getInt32Ty(getGlobalContext()), (*WVH).getType());
-}
-
-TEST_F(ValueHandle, WeakVH_Comparisons) {
- WeakVH BitcastWVH(BitcastV.get());
- WeakVH ConstantWVH(ConstantV);
-
- EXPECT_TRUE(BitcastWVH == BitcastWVH);
- EXPECT_TRUE(BitcastV.get() == BitcastWVH);
- EXPECT_TRUE(BitcastWVH == BitcastV.get());
- EXPECT_FALSE(BitcastWVH == ConstantWVH);
-
- EXPECT_TRUE(BitcastWVH != ConstantWVH);
- EXPECT_TRUE(BitcastV.get() != ConstantWVH);
- EXPECT_TRUE(BitcastWVH != ConstantV);
- EXPECT_FALSE(BitcastWVH != BitcastWVH);
-
- // Cast to Value* so comparisons work.
- Value *BV = BitcastV.get();
- Value *CV = ConstantV;
- EXPECT_EQ(BV < CV, BitcastWVH < ConstantWVH);
- EXPECT_EQ(BV <= CV, BitcastWVH <= ConstantWVH);
- EXPECT_EQ(BV > CV, BitcastWVH > ConstantWVH);
- EXPECT_EQ(BV >= CV, BitcastWVH >= ConstantWVH);
-
- EXPECT_EQ(BV < CV, BitcastV.get() < ConstantWVH);
- EXPECT_EQ(BV <= CV, BitcastV.get() <= ConstantWVH);
- EXPECT_EQ(BV > CV, BitcastV.get() > ConstantWVH);
- EXPECT_EQ(BV >= CV, BitcastV.get() >= ConstantWVH);
-
- EXPECT_EQ(BV < CV, BitcastWVH < ConstantV);
- EXPECT_EQ(BV <= CV, BitcastWVH <= ConstantV);
- EXPECT_EQ(BV > CV, BitcastWVH > ConstantV);
- EXPECT_EQ(BV >= CV, BitcastWVH >= ConstantV);
-}
-
-TEST_F(ValueHandle, WeakVH_FollowsRAUW) {
- WeakVH WVH(BitcastV.get());
- WeakVH WVH_Copy(WVH);
- WeakVH WVH_Recreated(BitcastV.get());
- BitcastV->replaceAllUsesWith(ConstantV);
- EXPECT_EQ(ConstantV, WVH);
- EXPECT_EQ(ConstantV, WVH_Copy);
- EXPECT_EQ(ConstantV, WVH_Recreated);
-}
-
-TEST_F(ValueHandle, WeakVH_NullOnDeletion) {
- WeakVH WVH(BitcastV.get());
- WeakVH WVH_Copy(WVH);
- WeakVH WVH_Recreated(BitcastV.get());
- BitcastV.reset();
- Value *null_value = NULL;
- EXPECT_EQ(null_value, WVH);
- EXPECT_EQ(null_value, WVH_Copy);
- EXPECT_EQ(null_value, WVH_Recreated);
-}
-
-
-TEST_F(ValueHandle, AssertingVH_BasicOperation) {
- AssertingVH<CastInst> AVH(BitcastV.get());
- CastInst *implicit_to_exact_type = AVH;
- implicit_to_exact_type = implicit_to_exact_type; // Avoid warning.
-
- AssertingVH<Value> GenericAVH(BitcastV.get());
- EXPECT_EQ(BitcastV.get(), GenericAVH);
- GenericAVH = ConstantV;
- EXPECT_EQ(ConstantV, GenericAVH);
-
- // Make sure I can call a method on the underlying CastInst. It
- // doesn't matter which method.
- EXPECT_FALSE(AVH->mayWriteToMemory());
- EXPECT_FALSE((*AVH).mayWriteToMemory());
-}
-
-TEST_F(ValueHandle, AssertingVH_Const) {
- const CastInst *ConstBitcast = BitcastV.get();
- AssertingVH<const CastInst> AVH(ConstBitcast);
- const CastInst *implicit_to_exact_type = AVH;
- implicit_to_exact_type = implicit_to_exact_type; // Avoid warning.
-}
-
-TEST_F(ValueHandle, AssertingVH_Comparisons) {
- AssertingVH<Value> BitcastAVH(BitcastV.get());
- AssertingVH<Value> ConstantAVH(ConstantV);
-
- EXPECT_TRUE(BitcastAVH == BitcastAVH);
- EXPECT_TRUE(BitcastV.get() == BitcastAVH);
- EXPECT_TRUE(BitcastAVH == BitcastV.get());
- EXPECT_FALSE(BitcastAVH == ConstantAVH);
-
- EXPECT_TRUE(BitcastAVH != ConstantAVH);
- EXPECT_TRUE(BitcastV.get() != ConstantAVH);
- EXPECT_TRUE(BitcastAVH != ConstantV);
- EXPECT_FALSE(BitcastAVH != BitcastAVH);
-
- // Cast to Value* so comparisons work.
- Value *BV = BitcastV.get();
- Value *CV = ConstantV;
- EXPECT_EQ(BV < CV, BitcastAVH < ConstantAVH);
- EXPECT_EQ(BV <= CV, BitcastAVH <= ConstantAVH);
- EXPECT_EQ(BV > CV, BitcastAVH > ConstantAVH);
- EXPECT_EQ(BV >= CV, BitcastAVH >= ConstantAVH);
-
- EXPECT_EQ(BV < CV, BitcastV.get() < ConstantAVH);
- EXPECT_EQ(BV <= CV, BitcastV.get() <= ConstantAVH);
- EXPECT_EQ(BV > CV, BitcastV.get() > ConstantAVH);
- EXPECT_EQ(BV >= CV, BitcastV.get() >= ConstantAVH);
-
- EXPECT_EQ(BV < CV, BitcastAVH < ConstantV);
- EXPECT_EQ(BV <= CV, BitcastAVH <= ConstantV);
- EXPECT_EQ(BV > CV, BitcastAVH > ConstantV);
- EXPECT_EQ(BV >= CV, BitcastAVH >= ConstantV);
-}
-
-TEST_F(ValueHandle, AssertingVH_DoesNotFollowRAUW) {
- AssertingVH<Value> AVH(BitcastV.get());
- BitcastV->replaceAllUsesWith(ConstantV);
- EXPECT_EQ(BitcastV.get(), AVH);
-}
-
-#ifdef NDEBUG
-
-TEST_F(ValueHandle, AssertingVH_ReducesToPointer) {
- EXPECT_EQ(sizeof(CastInst *), sizeof(AssertingVH<CastInst>));
-}
-
-#else // !NDEBUG
-
-#ifdef GTEST_HAS_DEATH_TEST
-
-TEST_F(ValueHandle, AssertingVH_Asserts) {
- AssertingVH<Value> AVH(BitcastV.get());
- EXPECT_DEATH({BitcastV.reset();},
- "An asserting value handle still pointed to this value!");
- AssertingVH<Value> Copy(AVH);
- AVH = NULL;
- EXPECT_DEATH({BitcastV.reset();},
- "An asserting value handle still pointed to this value!");
- Copy = NULL;
- BitcastV.reset();
-}
-
-#endif // GTEST_HAS_DEATH_TEST
-
-#endif // NDEBUG
-
-TEST_F(ValueHandle, CallbackVH_BasicOperation) {
- ConcreteCallbackVH CVH(BitcastV.get());
- EXPECT_EQ(BitcastV.get(), CVH);
- CVH = ConstantV;
- EXPECT_EQ(ConstantV, CVH);
-
- // Make sure I can call a method on the underlying Value. It
- // doesn't matter which method.
- EXPECT_EQ(Type::getInt32Ty(getGlobalContext()), CVH->getType());
- EXPECT_EQ(Type::getInt32Ty(getGlobalContext()), (*CVH).getType());
-}
-
-TEST_F(ValueHandle, CallbackVH_Comparisons) {
- ConcreteCallbackVH BitcastCVH(BitcastV.get());
- ConcreteCallbackVH ConstantCVH(ConstantV);
-
- EXPECT_TRUE(BitcastCVH == BitcastCVH);
- EXPECT_TRUE(BitcastV.get() == BitcastCVH);
- EXPECT_TRUE(BitcastCVH == BitcastV.get());
- EXPECT_FALSE(BitcastCVH == ConstantCVH);
-
- EXPECT_TRUE(BitcastCVH != ConstantCVH);
- EXPECT_TRUE(BitcastV.get() != ConstantCVH);
- EXPECT_TRUE(BitcastCVH != ConstantV);
- EXPECT_FALSE(BitcastCVH != BitcastCVH);
-
- // Cast to Value* so comparisons work.
- Value *BV = BitcastV.get();
- Value *CV = ConstantV;
- EXPECT_EQ(BV < CV, BitcastCVH < ConstantCVH);
- EXPECT_EQ(BV <= CV, BitcastCVH <= ConstantCVH);
- EXPECT_EQ(BV > CV, BitcastCVH > ConstantCVH);
- EXPECT_EQ(BV >= CV, BitcastCVH >= ConstantCVH);
-
- EXPECT_EQ(BV < CV, BitcastV.get() < ConstantCVH);
- EXPECT_EQ(BV <= CV, BitcastV.get() <= ConstantCVH);
- EXPECT_EQ(BV > CV, BitcastV.get() > ConstantCVH);
- EXPECT_EQ(BV >= CV, BitcastV.get() >= ConstantCVH);
-
- EXPECT_EQ(BV < CV, BitcastCVH < ConstantV);
- EXPECT_EQ(BV <= CV, BitcastCVH <= ConstantV);
- EXPECT_EQ(BV > CV, BitcastCVH > ConstantV);
- EXPECT_EQ(BV >= CV, BitcastCVH >= ConstantV);
-}
-
-TEST_F(ValueHandle, CallbackVH_CallbackOnDeletion) {
- class RecordingVH : public CallbackVH {
- public:
- int DeletedCalls;
- int AURWCalls;
-
- RecordingVH() : DeletedCalls(0), AURWCalls(0) {}
- RecordingVH(Value *V) : CallbackVH(V), DeletedCalls(0), AURWCalls(0) {}
-
- private:
- virtual void deleted() { DeletedCalls++; CallbackVH::deleted(); }
- virtual void allUsesReplacedWith(Value *) { AURWCalls++; }
- };
-
- RecordingVH RVH;
- RVH = BitcastV.get();
- EXPECT_EQ(0, RVH.DeletedCalls);
- EXPECT_EQ(0, RVH.AURWCalls);
- BitcastV.reset();
- EXPECT_EQ(1, RVH.DeletedCalls);
- EXPECT_EQ(0, RVH.AURWCalls);
-}
-
-TEST_F(ValueHandle, CallbackVH_CallbackOnRAUW) {
- class RecordingVH : public CallbackVH {
- public:
- int DeletedCalls;
- Value *AURWArgument;
-
- RecordingVH() : DeletedCalls(0), AURWArgument(NULL) {}
- RecordingVH(Value *V)
- : CallbackVH(V), DeletedCalls(0), AURWArgument(NULL) {}
-
- private:
- virtual void deleted() { DeletedCalls++; CallbackVH::deleted(); }
- virtual void allUsesReplacedWith(Value *new_value) {
- EXPECT_EQ(NULL, AURWArgument);
- AURWArgument = new_value;
- }
- };
-
- RecordingVH RVH;
- RVH = BitcastV.get();
- EXPECT_EQ(0, RVH.DeletedCalls);
- EXPECT_EQ(NULL, RVH.AURWArgument);
- BitcastV->replaceAllUsesWith(ConstantV);
- EXPECT_EQ(0, RVH.DeletedCalls);
- EXPECT_EQ(ConstantV, RVH.AURWArgument);
-}
-
-TEST_F(ValueHandle, CallbackVH_DeletionCanRAUW) {
- class RecoveringVH : public CallbackVH {
- public:
- int DeletedCalls;
- Value *AURWArgument;
- LLVMContext *Context;
-
- RecoveringVH() : DeletedCalls(0), AURWArgument(NULL),
- Context(&getGlobalContext()) {}
- RecoveringVH(Value *V)
- : CallbackVH(V), DeletedCalls(0), AURWArgument(NULL),
- Context(&getGlobalContext()) {}
-
- private:
- virtual void deleted() {
- getValPtr()->replaceAllUsesWith(Constant::getNullValue(Type::getInt32Ty(getGlobalContext())));
- setValPtr(NULL);
- }
- virtual void allUsesReplacedWith(Value *new_value) {
- ASSERT_TRUE(NULL != getValPtr());
- EXPECT_EQ(1U, getValPtr()->getNumUses());
- EXPECT_EQ(NULL, AURWArgument);
- AURWArgument = new_value;
- }
- };
-
- // Normally, if a value has uses, deleting it will crash. However, we can use
- // a CallbackVH to remove the uses before the check for no uses.
- RecoveringVH RVH;
- RVH = BitcastV.get();
- std::auto_ptr<BinaryOperator> BitcastUser(
- BinaryOperator::CreateAdd(RVH,
- Constant::getNullValue(Type::getInt32Ty(getGlobalContext()))));
- EXPECT_EQ(BitcastV.get(), BitcastUser->getOperand(0));
- BitcastV.reset(); // Would crash without the ValueHandler.
- EXPECT_EQ(Constant::getNullValue(Type::getInt32Ty(getGlobalContext())), RVH.AURWArgument);
- EXPECT_EQ(Constant::getNullValue(Type::getInt32Ty(getGlobalContext())),
- BitcastUser->getOperand(0));
-}
-
-TEST_F(ValueHandle, DestroyingOtherVHOnSameValueDoesntBreakIteration) {
- // When a CallbackVH modifies other ValueHandles in its callbacks,
- // that shouldn't interfere with non-modified ValueHandles receiving
- // their appropriate callbacks.
- //
- // We create the active CallbackVH in the middle of a palindromic
- // arrangement of other VHs so that the bad behavior would be
- // triggered in whichever order callbacks run.
-
- class DestroyingVH : public CallbackVH {
- public:
- OwningPtr<WeakVH> ToClear[2];
- DestroyingVH(Value *V) {
- ToClear[0].reset(new WeakVH(V));
- setValPtr(V);
- ToClear[1].reset(new WeakVH(V));
- }
- virtual void deleted() {
- ToClear[0].reset();
- ToClear[1].reset();
- CallbackVH::deleted();
- }
- virtual void allUsesReplacedWith(Value *) {
- ToClear[0].reset();
- ToClear[1].reset();
- }
- };
-
- {
- WeakVH ShouldBeVisited1(BitcastV.get());
- DestroyingVH C(BitcastV.get());
- WeakVH ShouldBeVisited2(BitcastV.get());
-
- BitcastV->replaceAllUsesWith(ConstantV);
- EXPECT_EQ(ConstantV, static_cast<Value*>(ShouldBeVisited1));
- EXPECT_EQ(ConstantV, static_cast<Value*>(ShouldBeVisited2));
- }
-
- {
- WeakVH ShouldBeVisited1(BitcastV.get());
- DestroyingVH C(BitcastV.get());
- WeakVH ShouldBeVisited2(BitcastV.get());
-
- BitcastV.reset();
- EXPECT_EQ(NULL, static_cast<Value*>(ShouldBeVisited1));
- EXPECT_EQ(NULL, static_cast<Value*>(ShouldBeVisited2));
- }
-}
-
-TEST_F(ValueHandle, AssertingVHCheckedLast) {
- // If a CallbackVH exists to clear out a group of AssertingVHs on
- // Value deletion, the CallbackVH should get a chance to do so
- // before the AssertingVHs assert.
-
- class ClearingVH : public CallbackVH {
- public:
- AssertingVH<Value> *ToClear[2];
- ClearingVH(Value *V,
- AssertingVH<Value> &A0, AssertingVH<Value> &A1)
- : CallbackVH(V) {
- ToClear[0] = &A0;
- ToClear[1] = &A1;
- }
-
- virtual void deleted() {
- *ToClear[0] = 0;
- *ToClear[1] = 0;
- CallbackVH::deleted();
- }
- };
-
- AssertingVH<Value> A1, A2;
- A1 = BitcastV.get();
- ClearingVH C(BitcastV.get(), A1, A2);
- A2 = BitcastV.get();
- // C.deleted() should run first, clearing the two AssertingVHs,
- // which should prevent them from asserting.
- BitcastV.reset();
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/Support/raw_ostream_test.cpp b/libclamav/c++/llvm/unittests/Support/raw_ostream_test.cpp
deleted file mode 100644
index 2b797b4..0000000
--- a/libclamav/c++/llvm/unittests/Support/raw_ostream_test.cpp
+++ /dev/null
@@ -1,146 +0,0 @@
-//===- llvm/unittest/Support/raw_ostream_test.cpp - raw_ostream tests -----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/Format.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-namespace {
-
-template<typename T> std::string printToString(const T &Value) {
- std::string res;
- llvm::raw_string_ostream(res) << Value;
- return res;
-}
-
-/// printToString - Print the given value to a stream which only has \arg
-/// BytesLeftInBuffer bytes left in the buffer. This is useful for testing edge
-/// cases in the buffer handling logic.
-template<typename T> std::string printToString(const T &Value,
- unsigned BytesLeftInBuffer) {
- // FIXME: This is relying on internal knowledge of how raw_ostream works to
- // get the buffer position right.
- SmallString<256> SVec;
- assert(BytesLeftInBuffer < 256 && "Invalid buffer count!");
- llvm::raw_svector_ostream OS(SVec);
- unsigned StartIndex = 256 - BytesLeftInBuffer;
- for (unsigned i = 0; i != StartIndex; ++i)
- OS << '?';
- OS << Value;
- return OS.str().substr(StartIndex);
-}
-
-template<typename T> std::string printToStringUnbuffered(const T &Value) {
- std::string res;
- llvm::raw_string_ostream OS(res);
- OS.SetUnbuffered();
- OS << Value;
- return res;
-}
-
-TEST(raw_ostreamTest, Types_Buffered) {
- // Char
- EXPECT_EQ("c", printToString('c'));
-
- // String
- EXPECT_EQ("hello", printToString("hello"));
- EXPECT_EQ("hello", printToString(std::string("hello")));
-
- // Int
- EXPECT_EQ("0", printToString(0));
- EXPECT_EQ("2425", printToString(2425));
- EXPECT_EQ("-2425", printToString(-2425));
-
- // Long long
- EXPECT_EQ("0", printToString(0LL));
- EXPECT_EQ("257257257235709", printToString(257257257235709LL));
- EXPECT_EQ("-257257257235709", printToString(-257257257235709LL));
-
- // Double
- EXPECT_EQ("1.100000e+00", printToString(1.1));
-
- // void*
- EXPECT_EQ("0x0", printToString((void*) 0));
- EXPECT_EQ("0xbeef", printToString((void*) 0xbeef));
- EXPECT_EQ("0xdeadbeef", printToString((void*) 0xdeadbeef));
-
- // Min and max.
- EXPECT_EQ("18446744073709551615", printToString(UINT64_MAX));
- EXPECT_EQ("-9223372036854775808", printToString(INT64_MIN));
-}
-
-TEST(raw_ostreamTest, Types_Unbuffered) {
- // Char
- EXPECT_EQ("c", printToStringUnbuffered('c'));
-
- // String
- EXPECT_EQ("hello", printToStringUnbuffered("hello"));
- EXPECT_EQ("hello", printToStringUnbuffered(std::string("hello")));
-
- // Int
- EXPECT_EQ("0", printToStringUnbuffered(0));
- EXPECT_EQ("2425", printToStringUnbuffered(2425));
- EXPECT_EQ("-2425", printToStringUnbuffered(-2425));
-
- // Long long
- EXPECT_EQ("0", printToStringUnbuffered(0LL));
- EXPECT_EQ("257257257235709", printToStringUnbuffered(257257257235709LL));
- EXPECT_EQ("-257257257235709", printToStringUnbuffered(-257257257235709LL));
-
- // Double
- EXPECT_EQ("1.100000e+00", printToStringUnbuffered(1.1));
-
- // void*
- EXPECT_EQ("0x0", printToStringUnbuffered((void*) 0));
- EXPECT_EQ("0xbeef", printToStringUnbuffered((void*) 0xbeef));
- EXPECT_EQ("0xdeadbeef", printToStringUnbuffered((void*) 0xdeadbeef));
-
- // Min and max.
- EXPECT_EQ("18446744073709551615", printToStringUnbuffered(UINT64_MAX));
- EXPECT_EQ("-9223372036854775808", printToStringUnbuffered(INT64_MIN));
-}
-
-TEST(raw_ostreamTest, BufferEdge) {
- EXPECT_EQ("1.20", printToString(format("%.2f", 1.2), 1));
- EXPECT_EQ("1.20", printToString(format("%.2f", 1.2), 2));
- EXPECT_EQ("1.20", printToString(format("%.2f", 1.2), 3));
- EXPECT_EQ("1.20", printToString(format("%.2f", 1.2), 4));
- EXPECT_EQ("1.20", printToString(format("%.2f", 1.2), 10));
-}
-
-TEST(raw_ostreamTest, TinyBuffer) {
- std::string Str;
- raw_string_ostream OS(Str);
- OS.SetBufferSize(1);
- OS << "hello";
- OS << 1;
- OS << 'w' << 'o' << 'r' << 'l' << 'd';
- EXPECT_EQ("hello1world", OS.str());
-}
-
-TEST(raw_ostreamTest, WriteEscaped) {
- std::string Str;
-
- Str = "";
- raw_string_ostream(Str).write_escaped("hi");
- EXPECT_EQ("hi", Str);
-
- Str = "";
- raw_string_ostream(Str).write_escaped("\\\t\n\"");
- EXPECT_EQ("\\\\\\t\\n\\\"", Str);
-
- Str = "";
- raw_string_ostream(Str).write_escaped("\1\10\200");
- EXPECT_EQ("\\001\\010\\200", Str);
-}
-
-}
diff --git a/libclamav/c++/llvm/unittests/Transforms/Makefile b/libclamav/c++/llvm/unittests/Transforms/Makefile
deleted file mode 100644
index 599b18a..0000000
--- a/libclamav/c++/llvm/unittests/Transforms/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-##===- unittests/Transforms/Makefile -----------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-
-PARALLEL_DIRS = Utils
-
-include $(LEVEL)/Makefile.common
-
-clean::
- $(Verb) $(RM) -f *Tests
diff --git a/libclamav/c++/llvm/unittests/Transforms/Utils/Cloning.cpp b/libclamav/c++/llvm/unittests/Transforms/Utils/Cloning.cpp
deleted file mode 100644
index 17047e7..0000000
--- a/libclamav/c++/llvm/unittests/Transforms/Utils/Cloning.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-//===- Cloning.cpp - Unit tests for the Cloner ----------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/Argument.h"
-#include "llvm/Instructions.h"
-#include "llvm/LLVMContext.h"
-
-using namespace llvm;
-
-TEST(CloneInstruction, OverflowBits) {
- LLVMContext context;
- Value *V = new Argument(Type::getInt32Ty(context));
-
- BinaryOperator *Add = BinaryOperator::Create(Instruction::Add, V, V);
- BinaryOperator *Sub = BinaryOperator::Create(Instruction::Sub, V, V);
- BinaryOperator *Mul = BinaryOperator::Create(Instruction::Mul, V, V);
-
- EXPECT_FALSE(cast<BinaryOperator>(Add->clone())->hasNoUnsignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Add->clone())->hasNoSignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Sub->clone())->hasNoUnsignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Sub->clone())->hasNoSignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Mul->clone())->hasNoUnsignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Mul->clone())->hasNoSignedWrap());
-
- Add->setHasNoUnsignedWrap();
- Sub->setHasNoUnsignedWrap();
- Mul->setHasNoUnsignedWrap();
-
- EXPECT_TRUE(cast<BinaryOperator>(Add->clone())->hasNoUnsignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Add->clone())->hasNoSignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Sub->clone())->hasNoUnsignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Sub->clone())->hasNoSignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Mul->clone())->hasNoUnsignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Mul->clone())->hasNoSignedWrap());
-
- Add->setHasNoSignedWrap();
- Sub->setHasNoSignedWrap();
- Mul->setHasNoSignedWrap();
-
- EXPECT_TRUE(cast<BinaryOperator>(Add->clone())->hasNoUnsignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Add->clone())->hasNoSignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Sub->clone())->hasNoUnsignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Sub->clone())->hasNoSignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Mul->clone())->hasNoUnsignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Mul->clone())->hasNoSignedWrap());
-
- Add->setHasNoUnsignedWrap(false);
- Sub->setHasNoUnsignedWrap(false);
- Mul->setHasNoUnsignedWrap(false);
-
- EXPECT_FALSE(cast<BinaryOperator>(Add->clone())->hasNoUnsignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Add->clone())->hasNoSignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Sub->clone())->hasNoUnsignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Sub->clone())->hasNoSignedWrap());
- EXPECT_FALSE(cast<BinaryOperator>(Mul->clone())->hasNoUnsignedWrap());
- EXPECT_TRUE(cast<BinaryOperator>(Mul->clone())->hasNoSignedWrap());
-}
-
-TEST(CloneInstruction, Inbounds) {
- LLVMContext context;
- Value *V = new Argument(Type::getInt32PtrTy(context));
- Constant *Z = Constant::getNullValue(Type::getInt32Ty(context));
- std::vector<Value *> ops;
- ops.push_back(Z);
- GetElementPtrInst *GEP = GetElementPtrInst::Create(V, ops.begin(), ops.end());
- EXPECT_FALSE(cast<GetElementPtrInst>(GEP->clone())->isInBounds());
-
- GEP->setIsInBounds();
- EXPECT_TRUE(cast<GetElementPtrInst>(GEP->clone())->isInBounds());
-}
-
-TEST(CloneInstruction, Exact) {
- LLVMContext context;
- Value *V = new Argument(Type::getInt32Ty(context));
-
- BinaryOperator *SDiv = BinaryOperator::Create(Instruction::SDiv, V, V);
- EXPECT_FALSE(cast<BinaryOperator>(SDiv->clone())->isExact());
-
- SDiv->setIsExact(true);
- EXPECT_TRUE(cast<BinaryOperator>(SDiv->clone())->isExact());
-}
diff --git a/libclamav/c++/llvm/unittests/Transforms/Utils/Makefile b/libclamav/c++/llvm/unittests/Transforms/Utils/Makefile
deleted file mode 100644
index fdf4be0..0000000
--- a/libclamav/c++/llvm/unittests/Transforms/Utils/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- unittests/Transforms/Utils/Makefile -----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-TESTNAME = Utils
-LINK_COMPONENTS := core support transformutils
-
-include $(LEVEL)/Makefile.config
-include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest
diff --git a/libclamav/c++/llvm/unittests/VMCore/ConstantsTest.cpp b/libclamav/c++/llvm/unittests/VMCore/ConstantsTest.cpp
deleted file mode 100644
index 8f28407..0000000
--- a/libclamav/c++/llvm/unittests/VMCore/ConstantsTest.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-//===- llvm/unittest/VMCore/ConstantsTest.cpp - Constants unit tests ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/LLVMContext.h"
-#include "gtest/gtest.h"
-
-namespace llvm {
-namespace {
-
-TEST(ConstantsTest, Integer_i1) {
- const IntegerType* Int1 = IntegerType::get(getGlobalContext(), 1);
- Constant* One = ConstantInt::get(Int1, 1, true);
- Constant* Zero = ConstantInt::get(Int1, 0);
- Constant* NegOne = ConstantInt::get(Int1, static_cast<uint64_t>(-1), true);
- EXPECT_EQ(NegOne, ConstantInt::getSigned(Int1, -1));
- Constant* Undef = UndefValue::get(Int1);
-
- // Input: @b = constant i1 add(i1 1 , i1 1)
- // Output: @b = constant i1 false
- EXPECT_EQ(Zero, ConstantExpr::getAdd(One, One));
-
- // @c = constant i1 add(i1 -1, i1 1)
- // @c = constant i1 false
- EXPECT_EQ(Zero, ConstantExpr::getAdd(NegOne, One));
-
- // @d = constant i1 add(i1 -1, i1 -1)
- // @d = constant i1 false
- EXPECT_EQ(Zero, ConstantExpr::getAdd(NegOne, NegOne));
-
- // @e = constant i1 sub(i1 -1, i1 1)
- // @e = constant i1 false
- EXPECT_EQ(Zero, ConstantExpr::getSub(NegOne, One));
-
- // @f = constant i1 sub(i1 1 , i1 -1)
- // @f = constant i1 false
- EXPECT_EQ(Zero, ConstantExpr::getSub(One, NegOne));
-
- // @g = constant i1 sub(i1 1 , i1 1)
- // @g = constant i1 false
- EXPECT_EQ(Zero, ConstantExpr::getSub(One, One));
-
- // @h = constant i1 shl(i1 1 , i1 1) ; undefined
- // @h = constant i1 undef
- EXPECT_EQ(Undef, ConstantExpr::getShl(One, One));
-
- // @i = constant i1 shl(i1 1 , i1 0)
- // @i = constant i1 true
- EXPECT_EQ(One, ConstantExpr::getShl(One, Zero));
-
- // @j = constant i1 lshr(i1 1, i1 1) ; undefined
- // @j = constant i1 undef
- EXPECT_EQ(Undef, ConstantExpr::getLShr(One, One));
-
- // @m = constant i1 ashr(i1 1, i1 1) ; undefined
- // @m = constant i1 undef
- EXPECT_EQ(Undef, ConstantExpr::getAShr(One, One));
-
- // @n = constant i1 mul(i1 -1, i1 1)
- // @n = constant i1 true
- EXPECT_EQ(One, ConstantExpr::getMul(NegOne, One));
-
- // @o = constant i1 sdiv(i1 -1, i1 1) ; overflow
- // @o = constant i1 true
- EXPECT_EQ(One, ConstantExpr::getSDiv(NegOne, One));
-
- // @p = constant i1 sdiv(i1 1 , i1 -1); overflow
- // @p = constant i1 true
- EXPECT_EQ(One, ConstantExpr::getSDiv(One, NegOne));
-
- // @q = constant i1 udiv(i1 -1, i1 1)
- // @q = constant i1 true
- EXPECT_EQ(One, ConstantExpr::getUDiv(NegOne, One));
-
- // @r = constant i1 udiv(i1 1, i1 -1)
- // @r = constant i1 true
- EXPECT_EQ(One, ConstantExpr::getUDiv(One, NegOne));
-
- // @s = constant i1 srem(i1 -1, i1 1) ; overflow
- // @s = constant i1 false
- EXPECT_EQ(Zero, ConstantExpr::getSRem(NegOne, One));
-
- // @t = constant i1 urem(i1 -1, i1 1)
- // @t = constant i1 false
- EXPECT_EQ(Zero, ConstantExpr::getURem(NegOne, One));
-
- // @u = constant i1 srem(i1 1, i1 -1) ; overflow
- // @u = constant i1 false
- EXPECT_EQ(Zero, ConstantExpr::getSRem(One, NegOne));
-}
-
-TEST(ConstantsTest, IntSigns) {
- const IntegerType* Int8Ty = Type::getInt8Ty(getGlobalContext());
- EXPECT_EQ(100, ConstantInt::get(Int8Ty, 100, false)->getSExtValue());
- EXPECT_EQ(100, ConstantInt::get(Int8Ty, 100, true)->getSExtValue());
- EXPECT_EQ(100, ConstantInt::getSigned(Int8Ty, 100)->getSExtValue());
- EXPECT_EQ(-50, ConstantInt::get(Int8Ty, 206)->getSExtValue());
- EXPECT_EQ(-50, ConstantInt::getSigned(Int8Ty, -50)->getSExtValue());
- EXPECT_EQ(206U, ConstantInt::getSigned(Int8Ty, -50)->getZExtValue());
-
- // Overflow is handled by truncation.
- EXPECT_EQ(0x3b, ConstantInt::get(Int8Ty, 0x13b)->getSExtValue());
-}
-
-} // end anonymous namespace
-} // end namespace llvm
diff --git a/libclamav/c++/llvm/unittests/VMCore/DerivedTypesTest.cpp b/libclamav/c++/llvm/unittests/VMCore/DerivedTypesTest.cpp
deleted file mode 100644
index 2e0450d..0000000
--- a/libclamav/c++/llvm/unittests/VMCore/DerivedTypesTest.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//===- llvm/unittest/VMCore/DerivedTypesTest.cpp - Types unit tests -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "../lib/VMCore/LLVMContextImpl.h"
-#include "llvm/Type.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/LLVMContext.h"
-using namespace llvm;
-
-namespace {
-
-TEST(OpaqueTypeTest, RegisterWithContext) {
- LLVMContext C;
- LLVMContextImpl *pImpl = C.pImpl;
-
- // 1 refers to the AlwaysOpaqueTy allocated in the Context's constructor and
- // destroyed in the destructor.
- EXPECT_EQ(1u, pImpl->OpaqueTypes.size());
- {
- PATypeHolder Type = OpaqueType::get(C);
- EXPECT_EQ(2u, pImpl->OpaqueTypes.size());
- }
- EXPECT_EQ(1u, pImpl->OpaqueTypes.size());
-}
-
-} // namespace
diff --git a/libclamav/c++/llvm/unittests/VMCore/Makefile b/libclamav/c++/llvm/unittests/VMCore/Makefile
deleted file mode 100644
index 1b2b69c..0000000
--- a/libclamav/c++/llvm/unittests/VMCore/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-##===- unittests/VMCore/Makefile ---------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TESTNAME = VMCore
-LINK_COMPONENTS := core support target ipa
-
-include $(LEVEL)/Makefile.config
-include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest
diff --git a/libclamav/c++/llvm/unittests/VMCore/MetadataTest.cpp b/libclamav/c++/llvm/unittests/VMCore/MetadataTest.cpp
deleted file mode 100644
index 13bf27e..0000000
--- a/libclamav/c++/llvm/unittests/VMCore/MetadataTest.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-//===- llvm/unittest/VMCore/Metadata.cpp - Metadata unit tests ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-#include "llvm/Constants.h"
-#include "llvm/Instructions.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Metadata.h"
-#include "llvm/Module.h"
-#include "llvm/Type.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/ValueHandle.h"
-using namespace llvm;
-
-namespace {
-
-class MetadataTest : public testing::Test {
-protected:
- LLVMContext Context;
-};
-typedef MetadataTest MDStringTest;
-
-// Test that construction of MDString with different value produces different
-// MDString objects, even with the same string pointer and nulls in the string.
-TEST_F(MDStringTest, CreateDifferent) {
- char x[3] = { 'f', 0, 'A' };
- MDString *s1 = MDString::get(Context, StringRef(&x[0], 3));
- x[2] = 'B';
- MDString *s2 = MDString::get(Context, StringRef(&x[0], 3));
- EXPECT_NE(s1, s2);
-}
-
-// Test that creation of MDStrings with the same string contents produces the
-// same MDString object, even with different pointers.
-TEST_F(MDStringTest, CreateSame) {
- char x[4] = { 'a', 'b', 'c', 'X' };
- char y[4] = { 'a', 'b', 'c', 'Y' };
-
- MDString *s1 = MDString::get(Context, StringRef(&x[0], 3));
- MDString *s2 = MDString::get(Context, StringRef(&y[0], 3));
- EXPECT_EQ(s1, s2);
-}
-
-// Test that MDString prints out the string we fed it.
-TEST_F(MDStringTest, PrintingSimple) {
- char *str = new char[13];
- strncpy(str, "testing 1 2 3", 13);
- MDString *s = MDString::get(Context, StringRef(str, 13));
- strncpy(str, "aaaaaaaaaaaaa", 13);
- delete[] str;
-
- std::string Str;
- raw_string_ostream oss(Str);
- s->print(oss);
- EXPECT_STREQ("metadata !\"testing 1 2 3\"", oss.str().c_str());
-}
-
-// Test printing of MDString with non-printable characters.
-TEST_F(MDStringTest, PrintingComplex) {
- char str[5] = {0, '\n', '"', '\\', -1};
- MDString *s = MDString::get(Context, StringRef(str+0, 5));
- std::string Str;
- raw_string_ostream oss(Str);
- s->print(oss);
- EXPECT_STREQ("metadata !\"\\00\\0A\\22\\5C\\FF\"", oss.str().c_str());
-}
-
-typedef MetadataTest MDNodeTest;
-
-// Test the two constructors, and containing other Constants.
-TEST_F(MDNodeTest, Simple) {
- char x[3] = { 'a', 'b', 'c' };
- char y[3] = { '1', '2', '3' };
-
- MDString *s1 = MDString::get(Context, StringRef(&x[0], 3));
- MDString *s2 = MDString::get(Context, StringRef(&y[0], 3));
- ConstantInt *CI = ConstantInt::get(getGlobalContext(), APInt(8, 0));
-
- std::vector<Value *> V;
- V.push_back(s1);
- V.push_back(CI);
- V.push_back(s2);
-
- MDNode *n1 = MDNode::get(Context, &V[0], 3);
- Value *const c1 = n1;
- MDNode *n2 = MDNode::get(Context, &c1, 1);
- MDNode *n3 = MDNode::get(Context, &V[0], 3);
- EXPECT_NE(n1, n2);
-#ifdef ENABLE_MDNODE_UNIQUING
- EXPECT_EQ(n1, n3);
-#else
- (void) n3;
-#endif
-
- EXPECT_EQ(3u, n1->getNumOperands());
- EXPECT_EQ(s1, n1->getOperand(0));
- EXPECT_EQ(CI, n1->getOperand(1));
- EXPECT_EQ(s2, n1->getOperand(2));
-
- EXPECT_EQ(1u, n2->getNumOperands());
- EXPECT_EQ(n1, n2->getOperand(0));
-}
-
-TEST_F(MDNodeTest, Delete) {
- Constant *C = ConstantInt::get(Type::getInt32Ty(getGlobalContext()), 1);
- Instruction *I = new BitCastInst(C, Type::getInt32Ty(getGlobalContext()));
-
- Value *const V = I;
- MDNode *n = MDNode::get(Context, &V, 1);
- WeakVH wvh = n;
-
- EXPECT_EQ(n, wvh);
-
- delete I;
-}
-
-TEST(NamedMDNodeTest, Search) {
- LLVMContext Context;
- Constant *C = ConstantInt::get(Type::getInt32Ty(Context), 1);
- Constant *C2 = ConstantInt::get(Type::getInt32Ty(Context), 2);
-
- Value *const V = C;
- Value *const V2 = C2;
- MDNode *n = MDNode::get(Context, &V, 1);
- MDNode *n2 = MDNode::get(Context, &V2, 1);
-
- MDNode *Nodes[2] = { n, n2 };
-
- Module *M = new Module("MyModule", Context);
- const char *Name = "llvm.NMD1";
- NamedMDNode *NMD = NamedMDNode::Create(Context, Name, &Nodes[0], 2, M);
- std::string Str;
- raw_string_ostream oss(Str);
- NMD->print(oss);
- EXPECT_STREQ("!llvm.NMD1 = !{!0, !1}\n",
- oss.str().c_str());
-}
-}
diff --git a/libclamav/c++/llvm/unittests/VMCore/PassManagerTest.cpp b/libclamav/c++/llvm/unittests/VMCore/PassManagerTest.cpp
deleted file mode 100644
index cb8f9eb..0000000
--- a/libclamav/c++/llvm/unittests/VMCore/PassManagerTest.cpp
+++ /dev/null
@@ -1,527 +0,0 @@
-//===- llvm/unittest/VMCore/PassManager.cpp - Constants unit tests ------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Module.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/PassManager.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Pass.h"
-#include "llvm/Analysis/LoopPass.h"
-#include "llvm/CallGraphSCCPass.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Constants.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/Function.h"
-#include "llvm/CallingConv.h"
-#include "llvm/BasicBlock.h"
-#include "llvm/Instructions.h"
-#include "llvm/InlineAsm.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/PassManager.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Assembly/PrintModulePass.h"
-#include "gtest/gtest.h"
-
-namespace llvm {
- namespace {
- // ND = no deps
- // NM = no modifications
- struct ModuleNDNM: public ModulePass {
- public:
- static char run;
- static char ID;
- ModuleNDNM() : ModulePass(&ID) {}
- virtual bool runOnModule(Module &M) {
- run++;
- return false;
- }
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesAll();
- }
- };
- char ModuleNDNM::ID=0;
- char ModuleNDNM::run=0;
-
- struct ModuleNDM : public ModulePass {
- public:
- static char run;
- static char ID;
- ModuleNDM() : ModulePass(&ID) {}
- virtual bool runOnModule(Module &M) {
- run++;
- return true;
- }
- };
- char ModuleNDM::ID=0;
- char ModuleNDM::run=0;
- RegisterPass<ModuleNDM> X("mndm","mndm",false,false);
-
- struct ModuleNDM2 : public ModulePass {
- public:
- static char run;
- static char ID;
- ModuleNDM2() : ModulePass(&ID) {}
- virtual bool runOnModule(Module &M) {
- run++;
- return true;
- }
- };
- char ModuleNDM2::ID=0;
- char ModuleNDM2::run=0;
-
- struct ModuleDNM : public ModulePass {
- public:
- static char run;
- static char ID;
- ModuleDNM() : ModulePass(&ID) {}
- virtual bool runOnModule(Module &M) {
- EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
- run++;
- return false;
- }
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<ModuleNDM>();
- AU.setPreservesAll();
- }
- };
- char ModuleDNM::ID=0;
- char ModuleDNM::run=0;
-
- template<typename P>
- struct PassTestBase : public P {
- protected:
- static int runc;
- static bool initialized;
- static bool finalized;
- int allocated;
- void run() {
- EXPECT_EQ(true, initialized);
- EXPECT_EQ(false, finalized);
- EXPECT_EQ(0, allocated);
- allocated++;
- runc++;
- }
- public:
- static char ID;
- static void finishedOK(int run) {
- EXPECT_GT(runc, 0);
- EXPECT_EQ(true, initialized);
- EXPECT_EQ(true, finalized);
- EXPECT_EQ(run, runc);
- }
- PassTestBase() : P(&ID), allocated(0) {
- initialized = false;
- finalized = false;
- runc = 0;
- }
-
- virtual void releaseMemory() {
- EXPECT_GT(runc, 0);
- EXPECT_GT(allocated, 0);
- allocated--;
- }
- };
- template<typename P> char PassTestBase<P>::ID;
- template<typename P> int PassTestBase<P>::runc;
- template<typename P> bool PassTestBase<P>::initialized;
- template<typename P> bool PassTestBase<P>::finalized;
-
- template<typename T, typename P>
- struct PassTest : public PassTestBase<P> {
- public:
- virtual bool doInitialization(T &t) {
- EXPECT_EQ(false, PassTestBase<P>::initialized);
- PassTestBase<P>::initialized = true;
- return false;
- }
- virtual bool doFinalization(T &t) {
- EXPECT_EQ(false, PassTestBase<P>::finalized);
- PassTestBase<P>::finalized = true;
- EXPECT_EQ(0, PassTestBase<P>::allocated);
- return false;
- }
- };
-
- struct CGPass : public PassTest<CallGraph, CallGraphSCCPass> {
- public:
- virtual bool runOnSCC(std::vector<CallGraphNode*> &SCMM) {
- EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
- run();
- return false;
- }
- };
- RegisterPass<CGPass> X1("cgp","cgp");
-
- struct FPass : public PassTest<Module, FunctionPass> {
- public:
- virtual bool runOnFunction(Function &F) {
- // FIXME: PR4112
- // EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
- run();
- return false;
- }
- };
- RegisterPass<FPass> X2("fp","fp");
-
- struct LPass : public PassTestBase<LoopPass> {
- private:
- static int initcount;
- static int fincount;
- public:
- LPass() {
- initcount = 0; fincount=0;
- EXPECT_EQ(false, initialized);
- }
- static void finishedOK(int run, int finalized) {
- PassTestBase<LoopPass>::finishedOK(run);
- EXPECT_EQ(run, initcount);
- EXPECT_EQ(finalized, fincount);
- }
- virtual bool doInitialization(Loop* L, LPPassManager &LPM) {
- initialized = true;
- initcount++;
- return false;
- }
- virtual bool runOnLoop(Loop *L, LPPassManager &LPM) {
- EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
- run();
- return false;
- }
- virtual bool doFinalization() {
- fincount++;
- finalized = true;
- return false;
- }
- };
- int LPass::initcount=0;
- int LPass::fincount=0;
- RegisterPass<LPass> X3("lp","lp");
-
- struct BPass : public PassTestBase<BasicBlockPass> {
- private:
- static int inited;
- static int fin;
- public:
- static void finishedOK(int run, int N) {
- PassTestBase<BasicBlockPass>::finishedOK(run);
- EXPECT_EQ(inited, N);
- EXPECT_EQ(fin, N);
- }
- BPass() {
- inited = 0;
- fin = 0;
- }
- virtual bool doInitialization(Module &M) {
- EXPECT_EQ(false, initialized);
- initialized = true;
- return false;
- }
- virtual bool doInitialization(Function &F) {
- inited++;
- return false;
- }
- virtual bool runOnBasicBlock(BasicBlock &BB) {
- EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
- run();
- return false;
- }
- virtual bool doFinalization(Function &F) {
- fin++;
- return false;
- }
- virtual bool doFinalization(Module &M) {
- EXPECT_EQ(false, finalized);
- finalized = true;
- EXPECT_EQ(0, allocated);
- return false;
- }
- };
- int BPass::inited=0;
- int BPass::fin=0;
- RegisterPass<BPass> X4("bp","bp");
-
- struct OnTheFlyTest: public ModulePass {
- public:
- static char ID;
- OnTheFlyTest() : ModulePass(&ID) {}
- virtual bool runOnModule(Module &M) {
- EXPECT_TRUE(getAnalysisIfAvailable<TargetData>());
- for (Module::iterator I=M.begin(),E=M.end(); I != E; ++I) {
- Function &F = *I;
- {
- SCOPED_TRACE("Running on the fly function pass");
- getAnalysis<FPass>(F);
- }
- }
- return false;
- }
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<FPass>();
- }
- };
- char OnTheFlyTest::ID=0;
-
- TEST(PassManager, RunOnce) {
- Module M("test-once", getGlobalContext());
- struct ModuleNDNM *mNDNM = new ModuleNDNM();
- struct ModuleDNM *mDNM = new ModuleDNM();
- struct ModuleNDM *mNDM = new ModuleNDM();
- struct ModuleNDM2 *mNDM2 = new ModuleNDM2();
-
- mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
-
- PassManager Passes;
- Passes.add(new TargetData(&M));
- Passes.add(mNDM2);
- Passes.add(mNDM);
- Passes.add(mNDNM);
- Passes.add(mDNM);
-
- Passes.run(M);
- // each pass must be run exactly once, since nothing invalidates them
- EXPECT_EQ(1, mNDM->run);
- EXPECT_EQ(1, mNDNM->run);
- EXPECT_EQ(1, mDNM->run);
- EXPECT_EQ(1, mNDM2->run);
- }
-
- TEST(PassManager, ReRun) {
- Module M("test-rerun", getGlobalContext());
- struct ModuleNDNM *mNDNM = new ModuleNDNM();
- struct ModuleDNM *mDNM = new ModuleDNM();
- struct ModuleNDM *mNDM = new ModuleNDM();
- struct ModuleNDM2 *mNDM2 = new ModuleNDM2();
-
- mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0;
-
- PassManager Passes;
- Passes.add(new TargetData(&M));
- Passes.add(mNDM);
- Passes.add(mNDNM);
- Passes.add(mNDM2);// invalidates mNDM needed by mDNM
- Passes.add(mDNM);
-
- Passes.run(M);
- // Some passes must be rerun because a pass that modified the
- // module/function was run inbetween
- EXPECT_EQ(2, mNDM->run);
- EXPECT_EQ(1, mNDNM->run);
- EXPECT_EQ(1, mNDM2->run);
- EXPECT_EQ(1, mDNM->run);
- }
-
- Module* makeLLVMModule();
-
- template<typename T>
- void MemoryTestHelper(int run) {
- Module *M = makeLLVMModule();
- T *P = new T();
- PassManager Passes;
- Passes.add(new TargetData(M));
- Passes.add(P);
- Passes.run(*M);
- T::finishedOK(run);
- }
-
- template<typename T>
- void MemoryTestHelper(int run, int N) {
- Module *M = makeLLVMModule();
- T *P = new T();
- PassManager Passes;
- Passes.add(new TargetData(M));
- Passes.add(P);
- Passes.run(*M);
- T::finishedOK(run, N);
- delete M;
- }
-
- TEST(PassManager, Memory) {
- // SCC#1: test1->test2->test3->test1
- // SCC#2: test4
- // SCC#3: indirect call node
- {
- SCOPED_TRACE("Callgraph pass");
- MemoryTestHelper<CGPass>(3);
- }
-
- {
- SCOPED_TRACE("Function pass");
- MemoryTestHelper<FPass>(4);// 4 functions
- }
-
- {
- SCOPED_TRACE("Loop pass");
- MemoryTestHelper<LPass>(2, 1); //2 loops, 1 function
- }
- {
- SCOPED_TRACE("Basic block pass");
- MemoryTestHelper<BPass>(7, 4); //9 basic blocks
- }
-
- }
-
- TEST(PassManager, MemoryOnTheFly) {
- Module *M = makeLLVMModule();
- {
- SCOPED_TRACE("Running OnTheFlyTest");
- struct OnTheFlyTest *O = new OnTheFlyTest();
- PassManager Passes;
- Passes.add(new TargetData(M));
- Passes.add(O);
- Passes.run(*M);
-
- FPass::finishedOK(4);
- }
- delete M;
- }
-
- Module* makeLLVMModule() {
- // Module Construction
- Module* mod = new Module("test-mem", getGlobalContext());
- mod->setDataLayout("e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
- "i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-"
- "a0:0:64-s0:64:64-f80:128:128");
- mod->setTargetTriple("x86_64-unknown-linux-gnu");
-
- // Type Definitions
- std::vector<const Type*>FuncTy_0_args;
- FunctionType* FuncTy_0 = FunctionType::get(
- /*Result=*/IntegerType::get(getGlobalContext(), 32),
- /*Params=*/FuncTy_0_args,
- /*isVarArg=*/false);
-
- std::vector<const Type*>FuncTy_2_args;
- FuncTy_2_args.push_back(IntegerType::get(getGlobalContext(), 1));
- FunctionType* FuncTy_2 = FunctionType::get(
- /*Result=*/Type::getVoidTy(getGlobalContext()),
- /*Params=*/FuncTy_2_args,
- /*isVarArg=*/false);
-
-
- // Function Declarations
-
- Function* func_test1 = Function::Create(
- /*Type=*/FuncTy_0,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"test1", mod);
- func_test1->setCallingConv(CallingConv::C);
- AttrListPtr func_test1_PAL;
- func_test1->setAttributes(func_test1_PAL);
-
- Function* func_test2 = Function::Create(
- /*Type=*/FuncTy_0,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"test2", mod);
- func_test2->setCallingConv(CallingConv::C);
- AttrListPtr func_test2_PAL;
- func_test2->setAttributes(func_test2_PAL);
-
- Function* func_test3 = Function::Create(
- /*Type=*/FuncTy_0,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"test3", mod);
- func_test3->setCallingConv(CallingConv::C);
- AttrListPtr func_test3_PAL;
- func_test3->setAttributes(func_test3_PAL);
-
- Function* func_test4 = Function::Create(
- /*Type=*/FuncTy_2,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"test4", mod);
- func_test4->setCallingConv(CallingConv::C);
- AttrListPtr func_test4_PAL;
- func_test4->setAttributes(func_test4_PAL);
-
- // Global Variable Declarations
-
-
- // Constant Definitions
-
- // Global Variable Definitions
-
- // Function Definitions
-
- // Function: test1 (func_test1)
- {
-
- BasicBlock* label_entry = BasicBlock::Create(getGlobalContext(), "entry",func_test1,0);
-
- // Block entry (label_entry)
- CallInst* int32_3 = CallInst::Create(func_test2, "", label_entry);
- int32_3->setCallingConv(CallingConv::C);
- int32_3->setTailCall(false);AttrListPtr int32_3_PAL;
- int32_3->setAttributes(int32_3_PAL);
-
- ReturnInst::Create(getGlobalContext(), int32_3, label_entry);
-
- }
-
- // Function: test2 (func_test2)
- {
-
- BasicBlock* label_entry_5 = BasicBlock::Create(getGlobalContext(), "entry",func_test2,0);
-
- // Block entry (label_entry_5)
- CallInst* int32_6 = CallInst::Create(func_test3, "", label_entry_5);
- int32_6->setCallingConv(CallingConv::C);
- int32_6->setTailCall(false);AttrListPtr int32_6_PAL;
- int32_6->setAttributes(int32_6_PAL);
-
- ReturnInst::Create(getGlobalContext(), int32_6, label_entry_5);
-
- }
-
- // Function: test3 (func_test3)
- {
-
- BasicBlock* label_entry_8 = BasicBlock::Create(getGlobalContext(), "entry",func_test3,0);
-
- // Block entry (label_entry_8)
- CallInst* int32_9 = CallInst::Create(func_test1, "", label_entry_8);
- int32_9->setCallingConv(CallingConv::C);
- int32_9->setTailCall(false);AttrListPtr int32_9_PAL;
- int32_9->setAttributes(int32_9_PAL);
-
- ReturnInst::Create(getGlobalContext(), int32_9, label_entry_8);
-
- }
-
- // Function: test4 (func_test4)
- {
- Function::arg_iterator args = func_test4->arg_begin();
- Value* int1_f = args++;
- int1_f->setName("f");
-
- BasicBlock* label_entry_11 = BasicBlock::Create(getGlobalContext(), "entry",func_test4,0);
- BasicBlock* label_bb = BasicBlock::Create(getGlobalContext(), "bb",func_test4,0);
- BasicBlock* label_bb1 = BasicBlock::Create(getGlobalContext(), "bb1",func_test4,0);
- BasicBlock* label_return = BasicBlock::Create(getGlobalContext(), "return",func_test4,0);
-
- // Block entry (label_entry_11)
- BranchInst::Create(label_bb, label_entry_11);
-
- // Block bb (label_bb)
- BranchInst::Create(label_bb, label_bb1, int1_f, label_bb);
-
- // Block bb1 (label_bb1)
- BranchInst::Create(label_bb1, label_return, int1_f, label_bb1);
-
- // Block return (label_return)
- ReturnInst::Create(getGlobalContext(), label_return);
-
- }
- return mod;
- }
-
- }
-}
diff --git a/libclamav/c++/llvm/unittests/VMCore/VerifierTest.cpp b/libclamav/c++/llvm/unittests/VMCore/VerifierTest.cpp
deleted file mode 100644
index c8838c5..0000000
--- a/libclamav/c++/llvm/unittests/VMCore/VerifierTest.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-//===- llvm/unittest/VMCore/VerifierTest.cpp - Verifier unit tests --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/Instructions.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Analysis/Verifier.h"
-#include "gtest/gtest.h"
-
-namespace llvm {
-namespace {
-
-TEST(VerifierTest, Branch_i1) {
- LLVMContext &C = getGlobalContext();
- FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), /*isVarArg=*/false);
- Function *F = Function::Create(FTy, GlobalValue::ExternalLinkage);
- BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
- BasicBlock *Exit = BasicBlock::Create(C, "exit", F);
- ReturnInst::Create(C, Exit);
-
- // To avoid triggering an assertion in BranchInst::Create, we first create
- // a branch with an 'i1' condition ...
-
- Constant *False = ConstantInt::getFalse(C);
- BranchInst *BI = BranchInst::Create(Exit, Exit, False, Entry);
-
- // ... then use setOperand to redirect it to a value of different type.
-
- Constant *Zero32 = ConstantInt::get(IntegerType::get(C, 32), 0);
- BI->setOperand(0, Zero32);
-
- EXPECT_TRUE(verifyFunction(*F, ReturnStatusAction));
-}
-
-}
-}
diff --git a/libclamav/c++/llvm/utils/FileCheck/CMakeLists.txt b/libclamav/c++/llvm/utils/FileCheck/CMakeLists.txt
deleted file mode 100644
index 8fee03f..0000000
--- a/libclamav/c++/llvm/utils/FileCheck/CMakeLists.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-add_executable(FileCheck
- FileCheck.cpp
- )
-
-target_link_libraries(FileCheck LLVMSupport LLVMSystem)
-if( MINGW )
- target_link_libraries(FileCheck imagehlp psapi)
-endif( MINGW )
-if( LLVM_ENABLE_THREADS AND HAVE_LIBPTHREAD )
- target_link_libraries(FileCheck pthread)
-endif()
diff --git a/libclamav/c++/llvm/utils/FileCheck/FileCheck.cpp b/libclamav/c++/llvm/utils/FileCheck/FileCheck.cpp
deleted file mode 100644
index 3c4742c..0000000
--- a/libclamav/c++/llvm/utils/FileCheck/FileCheck.cpp
+++ /dev/null
@@ -1,733 +0,0 @@
-//===- FileCheck.cpp - Check that File's Contents match what is expected --===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// FileCheck does a line-by line check of a file that validates whether it
-// contains the expected content. This is useful for regression tests etc.
-//
-// This program exits with an error status of 2 on error, exit status of 0 if
-// the file matched the expected contents, and exit status of 1 if it did not
-// contain the expected contents.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/Regex.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Signals.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringMap.h"
-#include <algorithm>
-using namespace llvm;
-
-static cl::opt<std::string>
-CheckFilename(cl::Positional, cl::desc("<check-file>"), cl::Required);
-
-static cl::opt<std::string>
-InputFilename("input-file", cl::desc("File to check (defaults to stdin)"),
- cl::init("-"), cl::value_desc("filename"));
-
-static cl::opt<std::string>
-CheckPrefix("check-prefix", cl::init("CHECK"),
- cl::desc("Prefix to use from check file (defaults to 'CHECK')"));
-
-static cl::opt<bool>
-NoCanonicalizeWhiteSpace("strict-whitespace",
- cl::desc("Do not treat all horizontal whitespace as equivalent"));
-
-//===----------------------------------------------------------------------===//
-// Pattern Handling Code.
-//===----------------------------------------------------------------------===//
-
-class Pattern {
- SMLoc PatternLoc;
-
- /// FixedStr - If non-empty, this pattern is a fixed string match with the
- /// specified fixed string.
- StringRef FixedStr;
-
- /// RegEx - If non-empty, this is a regex pattern.
- std::string RegExStr;
-
- /// VariableUses - Entries in this vector map to uses of a variable in the
- /// pattern, e.g. "foo[[bar]]baz". In this case, the RegExStr will contain
- /// "foobaz" and we'll get an entry in this vector that tells us to insert the
- /// value of bar at offset 3.
- std::vector<std::pair<StringRef, unsigned> > VariableUses;
-
- /// VariableDefs - Entries in this vector map to definitions of a variable in
- /// the pattern, e.g. "foo[[bar:.*]]baz". In this case, the RegExStr will
- /// contain "foo(.*)baz" and VariableDefs will contain the pair "bar",1. The
- /// index indicates what parenthesized value captures the variable value.
- std::vector<std::pair<StringRef, unsigned> > VariableDefs;
-
-public:
-
- Pattern() { }
-
- bool ParsePattern(StringRef PatternStr, SourceMgr &SM);
-
- /// Match - Match the pattern string against the input buffer Buffer. This
- /// returns the position that is matched or npos if there is no match. If
- /// there is a match, the size of the matched string is returned in MatchLen.
- ///
- /// The VariableTable StringMap provides the current values of filecheck
- /// variables and is updated if this match defines new values.
- size_t Match(StringRef Buffer, size_t &MatchLen,
- StringMap<StringRef> &VariableTable) const;
-
- /// PrintFailureInfo - Print additional information about a failure to match
- /// involving this pattern.
- void PrintFailureInfo(const SourceMgr &SM, StringRef Buffer,
- const StringMap<StringRef> &VariableTable) const;
-
-private:
- static void AddFixedStringToRegEx(StringRef FixedStr, std::string &TheStr);
- bool AddRegExToRegEx(StringRef RegExStr, unsigned &CurParen, SourceMgr &SM);
-
- /// ComputeMatchDistance - Compute an arbitrary estimate for the quality of
- /// matching this pattern at the start of \arg Buffer; a distance of zero
- /// should correspond to a perfect match.
- unsigned ComputeMatchDistance(StringRef Buffer,
- const StringMap<StringRef> &VariableTable) const;
-};
-
-
-bool Pattern::ParsePattern(StringRef PatternStr, SourceMgr &SM) {
- PatternLoc = SMLoc::getFromPointer(PatternStr.data());
-
- // Ignore trailing whitespace.
- while (!PatternStr.empty() &&
- (PatternStr.back() == ' ' || PatternStr.back() == '\t'))
- PatternStr = PatternStr.substr(0, PatternStr.size()-1);
-
- // Check that there is something on the line.
- if (PatternStr.empty()) {
- SM.PrintMessage(PatternLoc, "found empty check string with prefix '" +
- CheckPrefix+":'", "error");
- return true;
- }
-
- // Check to see if this is a fixed string, or if it has regex pieces.
- if (PatternStr.size() < 2 ||
- (PatternStr.find("{{") == StringRef::npos &&
- PatternStr.find("[[") == StringRef::npos)) {
- FixedStr = PatternStr;
- return false;
- }
-
- // Paren value #0 is for the fully matched string. Any new parenthesized
- // values add from their.
- unsigned CurParen = 1;
-
- // Otherwise, there is at least one regex piece. Build up the regex pattern
- // by escaping scary characters in fixed strings, building up one big regex.
- while (!PatternStr.empty()) {
- // RegEx matches.
- if (PatternStr.size() >= 2 &&
- PatternStr[0] == '{' && PatternStr[1] == '{') {
-
- // Otherwise, this is the start of a regex match. Scan for the }}.
- size_t End = PatternStr.find("}}");
- if (End == StringRef::npos) {
- SM.PrintMessage(SMLoc::getFromPointer(PatternStr.data()),
- "found start of regex string with no end '}}'", "error");
- return true;
- }
-
- if (AddRegExToRegEx(PatternStr.substr(2, End-2), CurParen, SM))
- return true;
- PatternStr = PatternStr.substr(End+2);
- continue;
- }
-
- // Named RegEx matches. These are of two forms: [[foo:.*]] which matches .*
- // (or some other regex) and assigns it to the FileCheck variable 'foo'. The
- // second form is [[foo]] which is a reference to foo. The variable name
- // itself must be of the form "[a-zA-Z_][0-9a-zA-Z_]*", otherwise we reject
- // it. This is to catch some common errors.
- if (PatternStr.size() >= 2 &&
- PatternStr[0] == '[' && PatternStr[1] == '[') {
- // Verify that it is terminated properly.
- size_t End = PatternStr.find("]]");
- if (End == StringRef::npos) {
- SM.PrintMessage(SMLoc::getFromPointer(PatternStr.data()),
- "invalid named regex reference, no ]] found", "error");
- return true;
- }
-
- StringRef MatchStr = PatternStr.substr(2, End-2);
- PatternStr = PatternStr.substr(End+2);
-
- // Get the regex name (e.g. "foo").
- size_t NameEnd = MatchStr.find(':');
- StringRef Name = MatchStr.substr(0, NameEnd);
-
- if (Name.empty()) {
- SM.PrintMessage(SMLoc::getFromPointer(Name.data()),
- "invalid name in named regex: empty name", "error");
- return true;
- }
-
- // Verify that the name is well formed.
- for (unsigned i = 0, e = Name.size(); i != e; ++i)
- if (Name[i] != '_' &&
- (Name[i] < 'a' || Name[i] > 'z') &&
- (Name[i] < 'A' || Name[i] > 'Z') &&
- (Name[i] < '0' || Name[i] > '9')) {
- SM.PrintMessage(SMLoc::getFromPointer(Name.data()+i),
- "invalid name in named regex", "error");
- return true;
- }
-
- // Name can't start with a digit.
- if (isdigit(Name[0])) {
- SM.PrintMessage(SMLoc::getFromPointer(Name.data()),
- "invalid name in named regex", "error");
- return true;
- }
-
- // Handle [[foo]].
- if (NameEnd == StringRef::npos) {
- VariableUses.push_back(std::make_pair(Name, RegExStr.size()));
- continue;
- }
-
- // Handle [[foo:.*]].
- VariableDefs.push_back(std::make_pair(Name, CurParen));
- RegExStr += '(';
- ++CurParen;
-
- if (AddRegExToRegEx(MatchStr.substr(NameEnd+1), CurParen, SM))
- return true;
-
- RegExStr += ')';
- }
-
- // Handle fixed string matches.
- // Find the end, which is the start of the next regex.
- size_t FixedMatchEnd = PatternStr.find("{{");
- FixedMatchEnd = std::min(FixedMatchEnd, PatternStr.find("[["));
- AddFixedStringToRegEx(PatternStr.substr(0, FixedMatchEnd), RegExStr);
- PatternStr = PatternStr.substr(FixedMatchEnd);
- continue;
- }
-
- return false;
-}
-
-void Pattern::AddFixedStringToRegEx(StringRef FixedStr, std::string &TheStr) {
- // Add the characters from FixedStr to the regex, escaping as needed. This
- // avoids "leaning toothpicks" in common patterns.
- for (unsigned i = 0, e = FixedStr.size(); i != e; ++i) {
- switch (FixedStr[i]) {
- // These are the special characters matched in "p_ere_exp".
- case '(':
- case ')':
- case '^':
- case '$':
- case '|':
- case '*':
- case '+':
- case '?':
- case '.':
- case '[':
- case '\\':
- case '{':
- TheStr += '\\';
- // FALL THROUGH.
- default:
- TheStr += FixedStr[i];
- break;
- }
- }
-}
-
-bool Pattern::AddRegExToRegEx(StringRef RegexStr, unsigned &CurParen,
- SourceMgr &SM) {
- Regex R(RegexStr);
- std::string Error;
- if (!R.isValid(Error)) {
- SM.PrintMessage(SMLoc::getFromPointer(RegexStr.data()),
- "invalid regex: " + Error, "error");
- return true;
- }
-
- RegExStr += RegexStr.str();
- CurParen += R.getNumMatches();
- return false;
-}
-
-/// Match - Match the pattern string against the input buffer Buffer. This
-/// returns the position that is matched or npos if there is no match. If
-/// there is a match, the size of the matched string is returned in MatchLen.
-size_t Pattern::Match(StringRef Buffer, size_t &MatchLen,
- StringMap<StringRef> &VariableTable) const {
- // If this is a fixed string pattern, just match it now.
- if (!FixedStr.empty()) {
- MatchLen = FixedStr.size();
- return Buffer.find(FixedStr);
- }
-
- // Regex match.
-
- // If there are variable uses, we need to create a temporary string with the
- // actual value.
- StringRef RegExToMatch = RegExStr;
- std::string TmpStr;
- if (!VariableUses.empty()) {
- TmpStr = RegExStr;
-
- unsigned InsertOffset = 0;
- for (unsigned i = 0, e = VariableUses.size(); i != e; ++i) {
- StringMap<StringRef>::iterator it =
- VariableTable.find(VariableUses[i].first);
- // If the variable is undefined, return an error.
- if (it == VariableTable.end())
- return StringRef::npos;
-
- // Look up the value and escape it so that we can plop it into the regex.
- std::string Value;
- AddFixedStringToRegEx(it->second, Value);
-
- // Plop it into the regex at the adjusted offset.
- TmpStr.insert(TmpStr.begin()+VariableUses[i].second+InsertOffset,
- Value.begin(), Value.end());
- InsertOffset += Value.size();
- }
-
- // Match the newly constructed regex.
- RegExToMatch = TmpStr;
- }
-
-
- SmallVector<StringRef, 4> MatchInfo;
- if (!Regex(RegExToMatch, Regex::Newline).match(Buffer, &MatchInfo))
- return StringRef::npos;
-
- // Successful regex match.
- assert(!MatchInfo.empty() && "Didn't get any match");
- StringRef FullMatch = MatchInfo[0];
-
- // If this defines any variables, remember their values.
- for (unsigned i = 0, e = VariableDefs.size(); i != e; ++i) {
- assert(VariableDefs[i].second < MatchInfo.size() &&
- "Internal paren error");
- VariableTable[VariableDefs[i].first] = MatchInfo[VariableDefs[i].second];
- }
-
- MatchLen = FullMatch.size();
- return FullMatch.data()-Buffer.data();
-}
-
-unsigned Pattern::ComputeMatchDistance(StringRef Buffer,
- const StringMap<StringRef> &VariableTable) const {
- // Just compute the number of matching characters. For regular expressions, we
- // just compare against the regex itself and hope for the best.
- //
- // FIXME: One easy improvement here is have the regex lib generate a single
- // example regular expression which matches, and use that as the example
- // string.
- StringRef ExampleString(FixedStr);
- if (ExampleString.empty())
- ExampleString = RegExStr;
-
- // Only compare up to the first line in the buffer, or the string size.
- StringRef BufferPrefix = Buffer.substr(0, ExampleString.size());
- BufferPrefix = BufferPrefix.split('\n').first;
- return BufferPrefix.edit_distance(ExampleString);
-}
-
-void Pattern::PrintFailureInfo(const SourceMgr &SM, StringRef Buffer,
- const StringMap<StringRef> &VariableTable) const{
- // If this was a regular expression using variables, print the current
- // variable values.
- if (!VariableUses.empty()) {
- for (unsigned i = 0, e = VariableUses.size(); i != e; ++i) {
- StringRef Var = VariableUses[i].first;
- StringMap<StringRef>::const_iterator it = VariableTable.find(Var);
- SmallString<256> Msg;
- raw_svector_ostream OS(Msg);
-
- // Check for undefined variable references.
- if (it == VariableTable.end()) {
- OS << "uses undefined variable \"";
- OS.write_escaped(Var) << "\"";;
- } else {
- OS << "with variable \"";
- OS.write_escaped(Var) << "\" equal to \"";
- OS.write_escaped(it->second) << "\"";
- }
-
- SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()), OS.str(), "note",
- /*ShowLine=*/false);
- }
- }
-
- // Attempt to find the closest/best fuzzy match. Usually an error happens
- // because some string in the output didn't exactly match. In these cases, we
- // would like to show the user a best guess at what "should have" matched, to
- // save them having to actually check the input manually.
- size_t NumLinesForward = 0;
- size_t Best = StringRef::npos;
- double BestQuality = 0;
-
- // Use an arbitrary 4k limit on how far we will search.
- for (size_t i = 0, e = std::min(size_t(4096), Buffer.size()); i != e; ++i) {
- if (Buffer[i] == '\n')
- ++NumLinesForward;
-
- // Patterns have leading whitespace stripped, so skip whitespace when
- // looking for something which looks like a pattern.
- if (Buffer[i] == ' ' || Buffer[i] == '\t')
- continue;
-
- // Compute the "quality" of this match as an arbitrary combination of the
- // match distance and the number of lines skipped to get to this match.
- unsigned Distance = ComputeMatchDistance(Buffer.substr(i), VariableTable);
- double Quality = Distance + (NumLinesForward / 100.);
-
- if (Quality < BestQuality || Best == StringRef::npos) {
- Best = i;
- BestQuality = Quality;
- }
- }
-
- if (Best != StringRef::npos && BestQuality < 50) {
- // Print the "possible intended match here" line if we found something
- // reasonable.
- SM.PrintMessage(SMLoc::getFromPointer(Buffer.data() + Best),
- "possible intended match here", "note");
-
- // FIXME: If we wanted to be really friendly we would show why the match
- // failed, as it can be hard to spot simple one character differences.
- }
-}
-
-//===----------------------------------------------------------------------===//
-// Check Strings.
-//===----------------------------------------------------------------------===//
-
-/// CheckString - This is a check that we found in the input file.
-struct CheckString {
- /// Pat - The pattern to match.
- Pattern Pat;
-
- /// Loc - The location in the match file that the check string was specified.
- SMLoc Loc;
-
- /// IsCheckNext - This is true if this is a CHECK-NEXT: directive (as opposed
- /// to a CHECK: directive.
- bool IsCheckNext;
-
- /// NotStrings - These are all of the strings that are disallowed from
- /// occurring between this match string and the previous one (or start of
- /// file).
- std::vector<std::pair<SMLoc, Pattern> > NotStrings;
-
- CheckString(const Pattern &P, SMLoc L, bool isCheckNext)
- : Pat(P), Loc(L), IsCheckNext(isCheckNext) {}
-};
-
-/// CanonicalizeInputFile - Remove duplicate horizontal space from the specified
-/// memory buffer, free it, and return a new one.
-static MemoryBuffer *CanonicalizeInputFile(MemoryBuffer *MB) {
- SmallVector<char, 16> NewFile;
- NewFile.reserve(MB->getBufferSize());
-
- for (const char *Ptr = MB->getBufferStart(), *End = MB->getBufferEnd();
- Ptr != End; ++Ptr) {
- // If C is not a horizontal whitespace, skip it.
- if (*Ptr != ' ' && *Ptr != '\t') {
- NewFile.push_back(*Ptr);
- continue;
- }
-
- // Otherwise, add one space and advance over neighboring space.
- NewFile.push_back(' ');
- while (Ptr+1 != End &&
- (Ptr[1] == ' ' || Ptr[1] == '\t'))
- ++Ptr;
- }
-
- // Free the old buffer and return a new one.
- MemoryBuffer *MB2 =
- MemoryBuffer::getMemBufferCopy(NewFile.data(),
- NewFile.data() + NewFile.size(),
- MB->getBufferIdentifier());
-
- delete MB;
- return MB2;
-}
-
-
-/// ReadCheckFile - Read the check file, which specifies the sequence of
-/// expected strings. The strings are added to the CheckStrings vector.
-static bool ReadCheckFile(SourceMgr &SM,
- std::vector<CheckString> &CheckStrings) {
- // Open the check file, and tell SourceMgr about it.
- std::string ErrorStr;
- MemoryBuffer *F =
- MemoryBuffer::getFileOrSTDIN(CheckFilename.c_str(), &ErrorStr);
- if (F == 0) {
- errs() << "Could not open check file '" << CheckFilename << "': "
- << ErrorStr << '\n';
- return true;
- }
-
- // If we want to canonicalize whitespace, strip excess whitespace from the
- // buffer containing the CHECK lines.
- if (!NoCanonicalizeWhiteSpace)
- F = CanonicalizeInputFile(F);
-
- SM.AddNewSourceBuffer(F, SMLoc());
-
- // Find all instances of CheckPrefix followed by : in the file.
- StringRef Buffer = F->getBuffer();
-
- std::vector<std::pair<SMLoc, Pattern> > NotMatches;
-
- while (1) {
- // See if Prefix occurs in the memory buffer.
- Buffer = Buffer.substr(Buffer.find(CheckPrefix));
-
- // If we didn't find a match, we're done.
- if (Buffer.empty())
- break;
-
- const char *CheckPrefixStart = Buffer.data();
-
- // When we find a check prefix, keep track of whether we find CHECK: or
- // CHECK-NEXT:
- bool IsCheckNext = false, IsCheckNot = false;
-
- // Verify that the : is present after the prefix.
- if (Buffer[CheckPrefix.size()] == ':') {
- Buffer = Buffer.substr(CheckPrefix.size()+1);
- } else if (Buffer.size() > CheckPrefix.size()+6 &&
- memcmp(Buffer.data()+CheckPrefix.size(), "-NEXT:", 6) == 0) {
- Buffer = Buffer.substr(CheckPrefix.size()+7);
- IsCheckNext = true;
- } else if (Buffer.size() > CheckPrefix.size()+5 &&
- memcmp(Buffer.data()+CheckPrefix.size(), "-NOT:", 5) == 0) {
- Buffer = Buffer.substr(CheckPrefix.size()+6);
- IsCheckNot = true;
- } else {
- Buffer = Buffer.substr(1);
- continue;
- }
-
- // Okay, we found the prefix, yay. Remember the rest of the line, but
- // ignore leading and trailing whitespace.
- Buffer = Buffer.substr(Buffer.find_first_not_of(" \t"));
-
- // Scan ahead to the end of line.
- size_t EOL = Buffer.find_first_of("\n\r");
-
- // Remember the location of the start of the pattern, for diagnostics.
- SMLoc PatternLoc = SMLoc::getFromPointer(Buffer.data());
-
- // Parse the pattern.
- Pattern P;
- if (P.ParsePattern(Buffer.substr(0, EOL), SM))
- return true;
-
- Buffer = Buffer.substr(EOL);
-
-
- // Verify that CHECK-NEXT lines have at least one CHECK line before them.
- if (IsCheckNext && CheckStrings.empty()) {
- SM.PrintMessage(SMLoc::getFromPointer(CheckPrefixStart),
- "found '"+CheckPrefix+"-NEXT:' without previous '"+
- CheckPrefix+ ": line", "error");
- return true;
- }
-
- // Handle CHECK-NOT.
- if (IsCheckNot) {
- NotMatches.push_back(std::make_pair(SMLoc::getFromPointer(Buffer.data()),
- P));
- continue;
- }
-
-
- // Okay, add the string we captured to the output vector and move on.
- CheckStrings.push_back(CheckString(P,
- PatternLoc,
- IsCheckNext));
- std::swap(NotMatches, CheckStrings.back().NotStrings);
- }
-
- if (CheckStrings.empty()) {
- errs() << "error: no check strings found with prefix '" << CheckPrefix
- << ":'\n";
- return true;
- }
-
- if (!NotMatches.empty()) {
- errs() << "error: '" << CheckPrefix
- << "-NOT:' not supported after last check line.\n";
- return true;
- }
-
- return false;
-}
-
-static void PrintCheckFailed(const SourceMgr &SM, const CheckString &CheckStr,
- StringRef Buffer,
- StringMap<StringRef> &VariableTable) {
- // Otherwise, we have an error, emit an error message.
- SM.PrintMessage(CheckStr.Loc, "expected string not found in input",
- "error");
-
- // Print the "scanning from here" line. If the current position is at the
- // end of a line, advance to the start of the next line.
- Buffer = Buffer.substr(Buffer.find_first_not_of(" \t\n\r"));
-
- SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()), "scanning from here",
- "note");
-
- // Allow the pattern to print additional information if desired.
- CheckStr.Pat.PrintFailureInfo(SM, Buffer, VariableTable);
-}
-
-/// CountNumNewlinesBetween - Count the number of newlines in the specified
-/// range.
-static unsigned CountNumNewlinesBetween(StringRef Range) {
- unsigned NumNewLines = 0;
- while (1) {
- // Scan for newline.
- Range = Range.substr(Range.find_first_of("\n\r"));
- if (Range.empty()) return NumNewLines;
-
- ++NumNewLines;
-
- // Handle \n\r and \r\n as a single newline.
- if (Range.size() > 1 &&
- (Range[1] == '\n' || Range[1] == '\r') &&
- (Range[0] != Range[1]))
- Range = Range.substr(1);
- Range = Range.substr(1);
- }
-}
-
-int main(int argc, char **argv) {
- sys::PrintStackTraceOnErrorSignal();
- PrettyStackTraceProgram X(argc, argv);
- cl::ParseCommandLineOptions(argc, argv);
-
- SourceMgr SM;
-
- // Read the expected strings from the check file.
- std::vector<CheckString> CheckStrings;
- if (ReadCheckFile(SM, CheckStrings))
- return 2;
-
- // Open the file to check and add it to SourceMgr.
- std::string ErrorStr;
- MemoryBuffer *F =
- MemoryBuffer::getFileOrSTDIN(InputFilename.c_str(), &ErrorStr);
- if (F == 0) {
- errs() << "Could not open input file '" << InputFilename << "': "
- << ErrorStr << '\n';
- return true;
- }
-
- // Remove duplicate spaces in the input file if requested.
- if (!NoCanonicalizeWhiteSpace)
- F = CanonicalizeInputFile(F);
-
- SM.AddNewSourceBuffer(F, SMLoc());
-
- /// VariableTable - This holds all the current filecheck variables.
- StringMap<StringRef> VariableTable;
-
- // Check that we have all of the expected strings, in order, in the input
- // file.
- StringRef Buffer = F->getBuffer();
-
- const char *LastMatch = Buffer.data();
-
- for (unsigned StrNo = 0, e = CheckStrings.size(); StrNo != e; ++StrNo) {
- const CheckString &CheckStr = CheckStrings[StrNo];
-
- StringRef SearchFrom = Buffer;
-
- // Find StrNo in the file.
- size_t MatchLen = 0;
- Buffer = Buffer.substr(CheckStr.Pat.Match(Buffer, MatchLen, VariableTable));
-
- // If we didn't find a match, reject the input.
- if (Buffer.empty()) {
- PrintCheckFailed(SM, CheckStr, SearchFrom, VariableTable);
- return 1;
- }
-
- StringRef SkippedRegion(LastMatch, Buffer.data()-LastMatch);
-
- // If this check is a "CHECK-NEXT", verify that the previous match was on
- // the previous line (i.e. that there is one newline between them).
- if (CheckStr.IsCheckNext) {
- // Count the number of newlines between the previous match and this one.
- assert(LastMatch != F->getBufferStart() &&
- "CHECK-NEXT can't be the first check in a file");
-
- unsigned NumNewLines = CountNumNewlinesBetween(SkippedRegion);
- if (NumNewLines == 0) {
- SM.PrintMessage(CheckStr.Loc,
- CheckPrefix+"-NEXT: is on the same line as previous match",
- "error");
- SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()),
- "'next' match was here", "note");
- SM.PrintMessage(SMLoc::getFromPointer(LastMatch),
- "previous match was here", "note");
- return 1;
- }
-
- if (NumNewLines != 1) {
- SM.PrintMessage(CheckStr.Loc,
- CheckPrefix+
- "-NEXT: is not on the line after the previous match",
- "error");
- SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()),
- "'next' match was here", "note");
- SM.PrintMessage(SMLoc::getFromPointer(LastMatch),
- "previous match was here", "note");
- return 1;
- }
- }
-
- // If this match had "not strings", verify that they don't exist in the
- // skipped region.
- for (unsigned ChunkNo = 0, e = CheckStr.NotStrings.size();
- ChunkNo != e; ++ChunkNo) {
- size_t MatchLen = 0;
- size_t Pos = CheckStr.NotStrings[ChunkNo].second.Match(SkippedRegion,
- MatchLen,
- VariableTable);
- if (Pos == StringRef::npos) continue;
-
- SM.PrintMessage(SMLoc::getFromPointer(LastMatch+Pos),
- CheckPrefix+"-NOT: string occurred!", "error");
- SM.PrintMessage(CheckStr.NotStrings[ChunkNo].first,
- CheckPrefix+"-NOT: pattern specified here", "note");
- return 1;
- }
-
-
- // Otherwise, everything is good. Step over the matched text and remember
- // the position after the match as the end of the last match.
- Buffer = Buffer.substr(MatchLen);
- LastMatch = Buffer.data();
- }
-
- return 0;
-}
diff --git a/libclamav/c++/llvm/utils/FileCheck/Makefile b/libclamav/c++/llvm/utils/FileCheck/Makefile
deleted file mode 100644
index f1af5b6..0000000
--- a/libclamav/c++/llvm/utils/FileCheck/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-##===- utils/FileCheck/Makefile ----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = FileCheck
-USEDLIBS = LLVMSupport.a LLVMSystem.a
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-# Don't install this utility
-NO_INSTALL = 1
-
-include $(LEVEL)/Makefile.common
-
diff --git a/libclamav/c++/llvm/utils/FileUpdate/CMakeLists.txt b/libclamav/c++/llvm/utils/FileUpdate/CMakeLists.txt
deleted file mode 100644
index bacbd16..0000000
--- a/libclamav/c++/llvm/utils/FileUpdate/CMakeLists.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-add_executable(FileUpdate
- FileUpdate.cpp
- )
-
-target_link_libraries(FileUpdate LLVMSupport LLVMSystem)
-if( MINGW )
- target_link_libraries(FileUpdate imagehlp psapi)
-endif( MINGW )
-if( LLVM_ENABLE_THREADS AND HAVE_LIBPTHREAD )
- target_link_libraries(FileUpdate pthread)
-endif()
diff --git a/libclamav/c++/llvm/utils/FileUpdate/FileUpdate.cpp b/libclamav/c++/llvm/utils/FileUpdate/FileUpdate.cpp
deleted file mode 100644
index 26fd75e..0000000
--- a/libclamav/c++/llvm/utils/FileUpdate/FileUpdate.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-//===- FileUpdate.cpp - Conditionally update a file -----------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// FileUpdate is a utility for conditionally updating a file from its input
-// based on whether the input differs from the output. It is used to avoid
-// unnecessary modifications in a build system.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/System/Signals.h"
-using namespace llvm;
-
-static cl::opt<bool>
-Quiet("quiet", cl::desc("Don't print unnecessary status information"),
- cl::init(false));
-
-static cl::opt<std::string>
-InputFilename("input-file", cl::desc("Input file (defaults to stdin)"),
- cl::init("-"), cl::value_desc("filename"));
-
-static cl::opt<std::string>
-OutputFilename(cl::Positional, cl::desc("<output-file>"), cl::Required);
-
-int main(int argc, char **argv) {
- sys::PrintStackTraceOnErrorSignal();
- PrettyStackTraceProgram X(argc, argv);
- cl::ParseCommandLineOptions(argc, argv);
-
- // Get the input data.
- std::string ErrorStr;
- MemoryBuffer *In =
- MemoryBuffer::getFileOrSTDIN(InputFilename.c_str(), &ErrorStr);
- if (In == 0) {
- errs() << argv[0] << ": error: Unable to get input '"
- << InputFilename << "': " << ErrorStr << '\n';
- return 1;
- }
-
- // Get the output data.
- MemoryBuffer *Out = MemoryBuffer::getFile(OutputFilename.c_str(), &ErrorStr);
-
- // If the output exists and the contents match, we are done.
- if (Out && In->getBufferSize() == Out->getBufferSize() &&
- memcmp(In->getBufferStart(), Out->getBufferStart(),
- Out->getBufferSize()) == 0) {
- if (!Quiet)
- outs() << argv[0] << ": Not updating '" << OutputFilename
- << "', contents match input.\n";
- return 0;
- }
-
- delete Out;
-
- // Otherwise, overwrite the output.
- if (!Quiet)
- outs() << argv[0] << ": Updating '" << OutputFilename
- << "', contents changed.\n";
- raw_fd_ostream OutStream(OutputFilename.c_str(), ErrorStr,
- raw_fd_ostream::F_Binary);
- if (!ErrorStr.empty()) {
- errs() << argv[0] << ": Unable to write output '"
- << OutputFilename << "': " << ErrorStr << '\n';
- return 1;
- }
-
- OutStream.write(In->getBufferStart(), In->getBufferSize());
- OutStream.close();
-
- if (OutStream.has_error()) {
- errs() << argv[0] << ": Could not open output file '"
- << OutputFilename << "': " << ErrorStr << '\n';
- return 1;
- }
-
- return 0;
-}
diff --git a/libclamav/c++/llvm/utils/FileUpdate/Makefile b/libclamav/c++/llvm/utils/FileUpdate/Makefile
deleted file mode 100644
index 5b545c2..0000000
--- a/libclamav/c++/llvm/utils/FileUpdate/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-##===- utils/FileUpdate/Makefile ---------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = FileUpdate
-USEDLIBS = LLVMSupport.a LLVMSystem.a
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-# Don't install this utility
-NO_INSTALL = 1
-
-include $(LEVEL)/Makefile.common
-
diff --git a/libclamav/c++/llvm/utils/Makefile b/libclamav/c++/llvm/utils/Makefile
index 000705e..1a4dcca 100644
--- a/libclamav/c++/llvm/utils/Makefile
+++ b/libclamav/c++/llvm/utils/Makefile
@@ -8,14 +8,15 @@
##===----------------------------------------------------------------------===##
LEVEL = ..
-PARALLEL_DIRS := TableGen fpcmp PerfectShuffle FileCheck FileUpdate count not unittest
+PARALLEL_DIRS := FileCheck FileUpdate TableGen PerfectShuffle \
+ count fpcmp llvm-lit not unittest
-EXTRA_DIST := cgiplotNLT.pl check-each-file codegen-diff countloc.sh cvsupdate \
+EXTRA_DIST := cgiplotNLT.pl check-each-file codegen-diff countloc.sh \
DSAclean.py DSAextract.py emacs findsym.pl GenLibDeps.pl \
getsrcs.sh importNLT.pl llvmdo llvmgrep llvm-native-gcc \
llvm-native-gxx makellvm NightlyTest.gnuplot NightlyTest.pl \
NightlyTestTemplate.html NLT.schema OldenDataRecover.pl \
- parseNLT.pl plotNLT.pl profile.pl RegressionFinder.pl userloc.pl \
+ parseNLT.pl plotNLT.pl profile.pl \
webNLT.pl vim
include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/utils/NewNightlyTest.pl b/libclamav/c++/llvm/utils/NewNightlyTest.pl
index a306382..1b48168 100755
--- a/libclamav/c++/llvm/utils/NewNightlyTest.pl
+++ b/libclamav/c++/llvm/utils/NewNightlyTest.pl
@@ -24,6 +24,7 @@ use Socket;
# IMPLEMENTED.
# -nickname NAME The NAME argument specifieds the nickname this script
# will submit to the nightlytest results repository.
+# -nouname Don't include uname data (machine will be identified by nickname only).
# -submit-server Specifies a server to submit the test results too. If this
# option is not specified it defaults to
# llvm.org. This is basically just the address of the
@@ -46,8 +47,8 @@ use Socket;
# -noclean Do not run 'make clean' before building.
# -nobuild Do not build llvm. If tests are enabled perform them
# on the llvm build specified in the build directory
-# -release Build an LLVM Release version
-# -release-asserts Build an LLVM ReleaseAsserts version
+# -release Build an LLVM Release+Asserts version
+# -release-asserts Build an LLVM Release version
# -disable-bindings Disable building LLVM bindings.
# -with-clang Checkout Clang source into tools/clang.
# -compileflags Next argument specifies extra options passed to make when
@@ -220,6 +221,7 @@ while (scalar(@ARGV) and ($_ = $ARGV[0], /^[-+]/)) {
$LLVMGCCPATH = $ARGV[0] . '/bin';
shift; next;}
if (/^-noexternals$/) { $NOEXTERNALS = 1; next; }
+ if (/^-nouname$/) { $NOUNAME = 1; next; }
if (/^-use-gmake/) { $MAKECMD = "gmake"; shift; next; }
if (/^-extraflags/) { $CONFIGUREARGS .=
" --with-extra-options=\'$ARGV[0]\'"; shift; next;}
@@ -693,12 +695,21 @@ $endtime = `date "+20%y-%m-%d %H:%M:%S"`;
if ( $VERBOSE ) { print "PREPARING LOGS TO BE SENT TO SERVER\n"; }
-$machine_data = "uname: ".`uname -a`.
- "hardware: ".`uname -m`.
- "os: ".`uname -sr`.
- "name: ".`uname -n`.
- "date: ".`date \"+20%y-%m-%d\"`.
- "time: ".`date +\"%H:%M:%S\"`;
+if ( ! $NOUNAME ) {
+ $machine_data = "uname: ".`uname -a`.
+ "hardware: ".`uname -m`.
+ "os: ".`uname -sr`.
+ "name: ".`uname -n`.
+ "date: ".`date \"+20%y-%m-%d\"`.
+ "time: ".`date +\"%H:%M:%S\"`;
+} else {
+ $machine_data = "uname: (excluded)\n".
+ "hardware: ".`uname -m`.
+ "os: ".`uname -sr`.
+ "name: $nickname\n".
+ "date: ".`date \"+20%y-%m-%d\"`.
+ "time: ".`date +\"%H:%M:%S\"`;
+}
# Get gcc version.
my $gcc_version_long = "";
diff --git a/libclamav/c++/llvm/utils/PerfectShuffle/Makefile b/libclamav/c++/llvm/utils/PerfectShuffle/Makefile
deleted file mode 100644
index 28709fe..0000000
--- a/libclamav/c++/llvm/utils/PerfectShuffle/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-##===- utils/PerfectShuffle/Makefile -----------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = llvm-PerfectShuffle
-NO_INSTALL = 1
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-include $(LEVEL)/Makefile.common
-
diff --git a/libclamav/c++/llvm/utils/PerfectShuffle/PerfectShuffle.cpp b/libclamav/c++/llvm/utils/PerfectShuffle/PerfectShuffle.cpp
deleted file mode 100644
index b94a7d3..0000000
--- a/libclamav/c++/llvm/utils/PerfectShuffle/PerfectShuffle.cpp
+++ /dev/null
@@ -1,571 +0,0 @@
-//===-- PerfectShuffle.cpp - Perfect Shuffle Generator --------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file computes an optimal sequence of instructions for doing all shuffles
-// of two 4-element vectors. With a release build and when configured to emit
-// an altivec instruction table, this takes about 30s to run on a 2.7Ghz
-// PowerPC G5.
-//
-//===----------------------------------------------------------------------===//
-
-#include <iostream>
-#include <vector>
-#include <cassert>
-#include <cstdlib>
-struct Operator;
-
-// Masks are 4-nibble hex numbers. Values 0-7 in any nibble means that it takes
-// an element from that value of the input vectors. A value of 8 means the
-// entry is undefined.
-
-// Mask manipulation functions.
-static inline unsigned short MakeMask(unsigned V0, unsigned V1,
- unsigned V2, unsigned V3) {
- return (V0 << (3*4)) | (V1 << (2*4)) | (V2 << (1*4)) | (V3 << (0*4));
-}
-
-/// getMaskElt - Return element N of the specified mask.
-static unsigned getMaskElt(unsigned Mask, unsigned Elt) {
- return (Mask >> ((3-Elt)*4)) & 0xF;
-}
-
-static unsigned setMaskElt(unsigned Mask, unsigned Elt, unsigned NewVal) {
- unsigned FieldShift = ((3-Elt)*4);
- return (Mask & ~(0xF << FieldShift)) | (NewVal << FieldShift);
-}
-
-// Reject elements where the values are 9-15.
-static bool isValidMask(unsigned short Mask) {
- unsigned short UndefBits = Mask & 0x8888;
- return (Mask & ((UndefBits >> 1)|(UndefBits>>2)|(UndefBits>>3))) == 0;
-}
-
-/// hasUndefElements - Return true if any of the elements in the mask are undefs
-///
-static bool hasUndefElements(unsigned short Mask) {
- return (Mask & 0x8888) != 0;
-}
-
-/// isOnlyLHSMask - Return true if this mask only refers to its LHS, not
-/// including undef values..
-static bool isOnlyLHSMask(unsigned short Mask) {
- return (Mask & 0x4444) == 0;
-}
-
-/// getLHSOnlyMask - Given a mask that refers to its LHS and RHS, modify it to
-/// refer to the LHS only (for when one argument value is passed into the same
-/// function twice).
-#if 0
-static unsigned short getLHSOnlyMask(unsigned short Mask) {
- return Mask & 0xBBBB; // Keep only LHS and Undefs.
-}
-#endif
-
-/// getCompressedMask - Turn a 16-bit uncompressed mask (where each elt uses 4
-/// bits) into a compressed 13-bit mask, where each elt is multiplied by 9.
-static unsigned getCompressedMask(unsigned short Mask) {
- return getMaskElt(Mask, 0)*9*9*9 + getMaskElt(Mask, 1)*9*9 +
- getMaskElt(Mask, 2)*9 + getMaskElt(Mask, 3);
-}
-
-static void PrintMask(unsigned i, std::ostream &OS) {
- OS << "<" << (char)(getMaskElt(i, 0) == 8 ? 'u' : ('0'+getMaskElt(i, 0)))
- << "," << (char)(getMaskElt(i, 1) == 8 ? 'u' : ('0'+getMaskElt(i, 1)))
- << "," << (char)(getMaskElt(i, 2) == 8 ? 'u' : ('0'+getMaskElt(i, 2)))
- << "," << (char)(getMaskElt(i, 3) == 8 ? 'u' : ('0'+getMaskElt(i, 3)))
- << ">";
-}
-
-/// ShuffleVal - This represents a shufflevector operation.
-struct ShuffleVal {
- unsigned Cost; // Number of instrs used to generate this value.
- Operator *Op; // The Operation used to generate this value.
- unsigned short Arg0, Arg1; // Input operands for this value.
-
- ShuffleVal() : Cost(1000000) {}
-};
-
-
-/// ShufTab - This is the actual shuffle table that we are trying to generate.
-///
-static ShuffleVal ShufTab[65536];
-
-/// TheOperators - All of the operators that this target supports.
-static std::vector<Operator*> TheOperators;
-
-/// Operator - This is a vector operation that is available for use.
-struct Operator {
- unsigned short ShuffleMask;
- unsigned short OpNum;
- const char *Name;
- unsigned Cost;
-
- Operator(unsigned short shufflemask, const char *name, unsigned opnum,
- unsigned cost = 1)
- : ShuffleMask(shufflemask), OpNum(opnum), Name(name), Cost(cost) {
- TheOperators.push_back(this);
- }
- ~Operator() {
- assert(TheOperators.back() == this);
- TheOperators.pop_back();
- }
-
- bool isOnlyLHSOperator() const {
- return isOnlyLHSMask(ShuffleMask);
- }
-
- const char *getName() const { return Name; }
- unsigned getCost() const { return Cost; }
-
- unsigned short getTransformedMask(unsigned short LHSMask, unsigned RHSMask) {
- // Extract the elements from LHSMask and RHSMask, as appropriate.
- unsigned Result = 0;
- for (unsigned i = 0; i != 4; ++i) {
- unsigned SrcElt = (ShuffleMask >> (4*i)) & 0xF;
- unsigned ResElt;
- if (SrcElt < 4)
- ResElt = getMaskElt(LHSMask, SrcElt);
- else if (SrcElt < 8)
- ResElt = getMaskElt(RHSMask, SrcElt-4);
- else {
- assert(SrcElt == 8 && "Bad src elt!");
- ResElt = 8;
- }
- Result |= ResElt << (4*i);
- }
- return Result;
- }
-};
-
-static const char *getZeroCostOpName(unsigned short Op) {
- if (ShufTab[Op].Arg0 == 0x0123)
- return "LHS";
- else if (ShufTab[Op].Arg0 == 0x4567)
- return "RHS";
- else {
- assert(0 && "bad zero cost operation");
- abort();
- }
-}
-
-static void PrintOperation(unsigned ValNo, unsigned short Vals[]) {
- unsigned short ThisOp = Vals[ValNo];
- std::cerr << "t" << ValNo;
- PrintMask(ThisOp, std::cerr);
- std::cerr << " = " << ShufTab[ThisOp].Op->getName() << "(";
-
- if (ShufTab[ShufTab[ThisOp].Arg0].Cost == 0) {
- std::cerr << getZeroCostOpName(ShufTab[ThisOp].Arg0);
- PrintMask(ShufTab[ThisOp].Arg0, std::cerr);
- } else {
- // Figure out what tmp # it is.
- for (unsigned i = 0; ; ++i)
- if (Vals[i] == ShufTab[ThisOp].Arg0) {
- std::cerr << "t" << i;
- break;
- }
- }
-
- if (!ShufTab[Vals[ValNo]].Op->isOnlyLHSOperator()) {
- std::cerr << ", ";
- if (ShufTab[ShufTab[ThisOp].Arg1].Cost == 0) {
- std::cerr << getZeroCostOpName(ShufTab[ThisOp].Arg1);
- PrintMask(ShufTab[ThisOp].Arg1, std::cerr);
- } else {
- // Figure out what tmp # it is.
- for (unsigned i = 0; ; ++i)
- if (Vals[i] == ShufTab[ThisOp].Arg1) {
- std::cerr << "t" << i;
- break;
- }
- }
- }
- std::cerr << ") ";
-}
-
-static unsigned getNumEntered() {
- unsigned Count = 0;
- for (unsigned i = 0; i != 65536; ++i)
- Count += ShufTab[i].Cost < 100;
- return Count;
-}
-
-static void EvaluateOps(unsigned short Elt, unsigned short Vals[],
- unsigned &NumVals) {
- if (ShufTab[Elt].Cost == 0) return;
-
- // If this value has already been evaluated, it is free. FIXME: match undefs.
- for (unsigned i = 0, e = NumVals; i != e; ++i)
- if (Vals[i] == Elt) return;
-
- // Otherwise, get the operands of the value, then add it.
- unsigned Arg0 = ShufTab[Elt].Arg0, Arg1 = ShufTab[Elt].Arg1;
- if (ShufTab[Arg0].Cost)
- EvaluateOps(Arg0, Vals, NumVals);
- if (Arg0 != Arg1 && ShufTab[Arg1].Cost)
- EvaluateOps(Arg1, Vals, NumVals);
-
- Vals[NumVals++] = Elt;
-}
-
-
-int main() {
- // Seed the table with accesses to the LHS and RHS.
- ShufTab[0x0123].Cost = 0;
- ShufTab[0x0123].Op = 0;
- ShufTab[0x0123].Arg0 = 0x0123;
- ShufTab[0x4567].Cost = 0;
- ShufTab[0x4567].Op = 0;
- ShufTab[0x4567].Arg0 = 0x4567;
-
- // Seed the first-level of shuffles, shuffles whose inputs are the input to
- // the vectorshuffle operation.
- bool MadeChange = true;
- unsigned OpCount = 0;
- while (MadeChange) {
- MadeChange = false;
- ++OpCount;
- std::cerr << "Starting iteration #" << OpCount << " with "
- << getNumEntered() << " entries established.\n";
-
- // Scan the table for two reasons: First, compute the maximum cost of any
- // operation left in the table. Second, make sure that values with undefs
- // have the cheapest alternative that they match.
- unsigned MaxCost = ShufTab[0].Cost;
- for (unsigned i = 1; i != 0x8889; ++i) {
- if (!isValidMask(i)) continue;
- if (ShufTab[i].Cost > MaxCost)
- MaxCost = ShufTab[i].Cost;
-
- // If this value has an undef, make it be computed the cheapest possible
- // way of any of the things that it matches.
- if (hasUndefElements(i)) {
- // This code is a little bit tricky, so here's the idea: consider some
- // permutation, like 7u4u. To compute the lowest cost for 7u4u, we
- // need to take the minimum cost of all of 7[0-8]4[0-8], 81 entries. If
- // there are 3 undefs, the number rises to 729 entries we have to scan,
- // and for the 4 undef case, we have to scan the whole table.
- //
- // Instead of doing this huge amount of scanning, we process the table
- // entries *in order*, and use the fact that 'u' is 8, larger than any
- // valid index. Given an entry like 7u4u then, we only need to scan
- // 7[0-7]4u - 8 entries. We can get away with this, because we already
- // know that each of 704u, 714u, 724u, etc contain the minimum value of
- // all of the 704[0-8], 714[0-8] and 724[0-8] entries respectively.
- unsigned UndefIdx;
- if (i & 0x8000)
- UndefIdx = 0;
- else if (i & 0x0800)
- UndefIdx = 1;
- else if (i & 0x0080)
- UndefIdx = 2;
- else if (i & 0x0008)
- UndefIdx = 3;
- else
- abort();
-
- unsigned MinVal = i;
- unsigned MinCost = ShufTab[i].Cost;
-
- // Scan the 8 entries.
- for (unsigned j = 0; j != 8; ++j) {
- unsigned NewElt = setMaskElt(i, UndefIdx, j);
- if (ShufTab[NewElt].Cost < MinCost) {
- MinCost = ShufTab[NewElt].Cost;
- MinVal = NewElt;
- }
- }
-
- // If we found something cheaper than what was here before, use it.
- if (i != MinVal) {
- MadeChange = true;
- ShufTab[i] = ShufTab[MinVal];
- }
- }
- }
-
- for (unsigned LHS = 0; LHS != 0x8889; ++LHS) {
- if (!isValidMask(LHS)) continue;
- if (ShufTab[LHS].Cost > 1000) continue;
-
- // If nothing involving this operand could possibly be cheaper than what
- // we already have, don't consider it.
- if (ShufTab[LHS].Cost + 1 >= MaxCost)
- continue;
-
- for (unsigned opnum = 0, e = TheOperators.size(); opnum != e; ++opnum) {
- Operator *Op = TheOperators[opnum];
-
- // Evaluate op(LHS,LHS)
- unsigned ResultMask = Op->getTransformedMask(LHS, LHS);
-
- unsigned Cost = ShufTab[LHS].Cost + Op->getCost();
- if (Cost < ShufTab[ResultMask].Cost) {
- ShufTab[ResultMask].Cost = Cost;
- ShufTab[ResultMask].Op = Op;
- ShufTab[ResultMask].Arg0 = LHS;
- ShufTab[ResultMask].Arg1 = LHS;
- MadeChange = true;
- }
-
- // If this is a two input instruction, include the op(x,y) cases. If
- // this is a one input instruction, skip this.
- if (Op->isOnlyLHSOperator()) continue;
-
- for (unsigned RHS = 0; RHS != 0x8889; ++RHS) {
- if (!isValidMask(RHS)) continue;
- if (ShufTab[RHS].Cost > 1000) continue;
-
- // If nothing involving this operand could possibly be cheaper than
- // what we already have, don't consider it.
- if (ShufTab[RHS].Cost + 1 >= MaxCost)
- continue;
-
-
- // Evaluate op(LHS,RHS)
- unsigned ResultMask = Op->getTransformedMask(LHS, RHS);
-
- if (ShufTab[ResultMask].Cost <= OpCount ||
- ShufTab[ResultMask].Cost <= ShufTab[LHS].Cost ||
- ShufTab[ResultMask].Cost <= ShufTab[RHS].Cost)
- continue;
-
- // Figure out the cost to evaluate this, knowing that CSE's only need
- // to be evaluated once.
- unsigned short Vals[30];
- unsigned NumVals = 0;
- EvaluateOps(LHS, Vals, NumVals);
- EvaluateOps(RHS, Vals, NumVals);
-
- unsigned Cost = NumVals + Op->getCost();
- if (Cost < ShufTab[ResultMask].Cost) {
- ShufTab[ResultMask].Cost = Cost;
- ShufTab[ResultMask].Op = Op;
- ShufTab[ResultMask].Arg0 = LHS;
- ShufTab[ResultMask].Arg1 = RHS;
- MadeChange = true;
- }
- }
- }
- }
- }
-
- std::cerr << "Finished Table has " << getNumEntered()
- << " entries established.\n";
-
- unsigned CostArray[10] = { 0 };
-
- // Compute a cost histogram.
- for (unsigned i = 0; i != 65536; ++i) {
- if (!isValidMask(i)) continue;
- if (ShufTab[i].Cost > 9)
- ++CostArray[9];
- else
- ++CostArray[ShufTab[i].Cost];
- }
-
- for (unsigned i = 0; i != 9; ++i)
- if (CostArray[i])
- std::cout << "// " << CostArray[i] << " entries have cost " << i << "\n";
- if (CostArray[9])
- std::cout << "// " << CostArray[9] << " entries have higher cost!\n";
-
-
- // Build up the table to emit.
- std::cout << "\n// This table is 6561*4 = 26244 bytes in size.\n";
- std::cout << "static const unsigned PerfectShuffleTable[6561+1] = {\n";
-
- for (unsigned i = 0; i != 0x8889; ++i) {
- if (!isValidMask(i)) continue;
-
- // CostSat - The cost of this operation saturated to two bits.
- unsigned CostSat = ShufTab[i].Cost;
- if (CostSat > 4) CostSat = 4;
- if (CostSat == 0) CostSat = 1;
- --CostSat; // Cost is now between 0-3.
-
- unsigned OpNum = ShufTab[i].Op ? ShufTab[i].Op->OpNum : 0;
- assert(OpNum < 16 && "Too few bits to encode operation!");
-
- unsigned LHS = getCompressedMask(ShufTab[i].Arg0);
- unsigned RHS = getCompressedMask(ShufTab[i].Arg1);
-
- // Encode this as 2 bits of saturated cost, 4 bits of opcodes, 13 bits of
- // LHS, and 13 bits of RHS = 32 bits.
- unsigned Val = (CostSat << 30) | (OpNum << 26) | (LHS << 13) | RHS;
-
- std::cout << " " << Val << "U,\t// ";
- PrintMask(i, std::cout);
- std::cout << ": Cost " << ShufTab[i].Cost;
- std::cout << " " << (ShufTab[i].Op ? ShufTab[i].Op->getName() : "copy");
- std::cout << " ";
- if (ShufTab[ShufTab[i].Arg0].Cost == 0) {
- std::cout << getZeroCostOpName(ShufTab[i].Arg0);
- } else {
- PrintMask(ShufTab[i].Arg0, std::cout);
- }
-
- if (ShufTab[i].Op && !ShufTab[i].Op->isOnlyLHSOperator()) {
- std::cout << ", ";
- if (ShufTab[ShufTab[i].Arg1].Cost == 0) {
- std::cout << getZeroCostOpName(ShufTab[i].Arg1);
- } else {
- PrintMask(ShufTab[i].Arg1, std::cout);
- }
- }
- std::cout << "\n";
- }
- std::cout << " 0\n};\n";
-
- if (0) {
- // Print out the table.
- for (unsigned i = 0; i != 0x8889; ++i) {
- if (!isValidMask(i)) continue;
- if (ShufTab[i].Cost < 1000) {
- PrintMask(i, std::cerr);
- std::cerr << " - Cost " << ShufTab[i].Cost << " - ";
-
- unsigned short Vals[30];
- unsigned NumVals = 0;
- EvaluateOps(i, Vals, NumVals);
-
- for (unsigned j = 0, e = NumVals; j != e; ++j)
- PrintOperation(j, Vals);
- std::cerr << "\n";
- }
- }
- }
-}
-
-
-#ifdef GENERATE_ALTIVEC
-
-///===---------------------------------------------------------------------===//
-/// The altivec instruction definitions. This is the altivec-specific part of
-/// this file.
-///===---------------------------------------------------------------------===//
-
-// Note that the opcode numbers here must match those in the PPC backend.
-enum {
- OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
- OP_VMRGHW,
- OP_VMRGLW,
- OP_VSPLTISW0,
- OP_VSPLTISW1,
- OP_VSPLTISW2,
- OP_VSPLTISW3,
- OP_VSLDOI4,
- OP_VSLDOI8,
- OP_VSLDOI12
-};
-
-struct vmrghw : public Operator {
- vmrghw() : Operator(0x0415, "vmrghw", OP_VMRGHW) {}
-} the_vmrghw;
-
-struct vmrglw : public Operator {
- vmrglw() : Operator(0x2637, "vmrglw", OP_VMRGLW) {}
-} the_vmrglw;
-
-template<unsigned Elt>
-struct vspltisw : public Operator {
- vspltisw(const char *N, unsigned Opc)
- : Operator(MakeMask(Elt, Elt, Elt, Elt), N, Opc) {}
-};
-
-vspltisw<0> the_vspltisw0("vspltisw0", OP_VSPLTISW0);
-vspltisw<1> the_vspltisw1("vspltisw1", OP_VSPLTISW1);
-vspltisw<2> the_vspltisw2("vspltisw2", OP_VSPLTISW2);
-vspltisw<3> the_vspltisw3("vspltisw3", OP_VSPLTISW3);
-
-template<unsigned N>
-struct vsldoi : public Operator {
- vsldoi(const char *Name, unsigned Opc)
- : Operator(MakeMask(N&7, (N+1)&7, (N+2)&7, (N+3)&7), Name, Opc) {
- }
-};
-
-vsldoi<1> the_vsldoi1("vsldoi4" , OP_VSLDOI4);
-vsldoi<2> the_vsldoi2("vsldoi8" , OP_VSLDOI8);
-vsldoi<3> the_vsldoi3("vsldoi12", OP_VSLDOI12);
-
-#endif
-
-#define GENERATE_NEON
-
-#ifdef GENERATE_NEON
-enum {
- OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
- OP_VREV,
- OP_VDUP0,
- OP_VDUP1,
- OP_VDUP2,
- OP_VDUP3,
- OP_VEXT1,
- OP_VEXT2,
- OP_VEXT3,
- OP_VUZPL, // VUZP, left result
- OP_VUZPR, // VUZP, right result
- OP_VZIPL, // VZIP, left result
- OP_VZIPR, // VZIP, right result
- OP_VTRNL, // VTRN, left result
- OP_VTRNR // VTRN, right result
-};
-
-struct vrev : public Operator {
- vrev() : Operator(0x1032, "vrev", OP_VREV) {}
-} the_vrev;
-
-template<unsigned Elt>
-struct vdup : public Operator {
- vdup(const char *N, unsigned Opc)
- : Operator(MakeMask(Elt, Elt, Elt, Elt), N, Opc) {}
-};
-
-vdup<0> the_vdup0("vdup0", OP_VDUP0);
-vdup<1> the_vdup1("vdup1", OP_VDUP1);
-vdup<2> the_vdup2("vdup2", OP_VDUP2);
-vdup<3> the_vdup3("vdup3", OP_VDUP3);
-
-template<unsigned N>
-struct vext : public Operator {
- vext(const char *Name, unsigned Opc)
- : Operator(MakeMask(N&7, (N+1)&7, (N+2)&7, (N+3)&7), Name, Opc) {
- }
-};
-
-vext<1> the_vext1("vext1", OP_VEXT1);
-vext<2> the_vext2("vext2", OP_VEXT2);
-vext<3> the_vext3("vext3", OP_VEXT3);
-
-struct vuzpl : public Operator {
- vuzpl() : Operator(0x0246, "vuzpl", OP_VUZPL, 2) {}
-} the_vuzpl;
-
-struct vuzpr : public Operator {
- vuzpr() : Operator(0x1357, "vuzpr", OP_VUZPR, 2) {}
-} the_vuzpr;
-
-struct vzipl : public Operator {
- vzipl() : Operator(0x0415, "vzipl", OP_VZIPL, 2) {}
-} the_vzipl;
-
-struct vzipr : public Operator {
- vzipr() : Operator(0x2637, "vzipr", OP_VZIPR, 2) {}
-} the_vzipr;
-
-struct vtrnl : public Operator {
- vtrnl() : Operator(0x0426, "vtrnl", OP_VTRNL, 2) {}
-} the_vtrnl;
-
-struct vtrnr : public Operator {
- vtrnr() : Operator(0x1537, "vtrnr", OP_VTRNR, 2) {}
-} the_vtrnr;
-
-#endif
diff --git a/libclamav/c++/llvm/utils/RegressionFinder.pl b/libclamav/c++/llvm/utils/RegressionFinder.pl
deleted file mode 100755
index 86b0777..0000000
--- a/libclamav/c++/llvm/utils/RegressionFinder.pl
+++ /dev/null
@@ -1,186 +0,0 @@
-#! /usr/bin/perl
-# Script to find regressions by binary-searching a time interval in the
-# CVS tree. Written by Brian Gaeke on 2-Mar-2004.
-#
-
-require 5.6.0; # NOTE: This script not tested with earlier versions.
-use Getopt::Std;
-use POSIX;
-use Time::Local;
-use IO::Handle;
-
-sub usage {
- print STDERR <<END;
-findRegression [-I] -w WTIME -d DTIME -t TOOLS -c SCRIPT
-
-The -w, -d, -t, and -c options are required.
-Run findRegression in the top level of an LLVM tree.
-WTIME is a time when you are sure the regression does NOT exist ("Works").
-DTIME is a time when you are sure the regression DOES exist ("Doesntwork").
-WTIME and DTIME are both in the format: "YYYY/MM/DD HH:MM".
--I means run builds at WTIME and DTIME first to make sure.
-TOOLS is a comma separated list of tools to rebuild before running SCRIPT.
-SCRIPT exits 1 if the regression is present in TOOLS; 0 otherwise.
-END
- exit 1;
-}
-
-sub timeAsSeconds {
- my ($timestr) = @_;
-
- if ( $timestr =~ /(\d\d\d\d)\/(\d\d)\/(\d\d) (\d\d):(\d\d)/ ) {
- my ( $year, $mon, $mday, $hour, $min ) = ( $1, $2, $3, $4, $5 );
- return timegm( 0, $min, $hour, $mday, $mon - 1, $year );
- }
- else {
- die "** Can't parse date + time: $timestr\n";
- }
-}
-
-sub timeAsString {
- my ($secs) = @_;
- return strftime( "%Y/%m/%d %H:%M", gmtime($secs) );
-}
-
-sub run {
- my ($cmdline) = @_;
- print LOG "** Running: $cmdline\n";
- return system($cmdline);
-}
-
-sub buildLibrariesAndTools {
- run("sh /home/vadve/gaeke/scripts/run-configure");
- run("$MAKE -C lib/Support");
- run("$MAKE -C utils");
- run("$MAKE -C lib");
- foreach my $tool (@TOOLS) { run("$MAKE -C tools/$tool"); }
-}
-
-sub contains {
- my ( $file, $regex ) = @_;
- local (*FILE);
- open( FILE, "<$file" ) or die "** can't read $file: $!\n";
- while (<FILE>) {
- if (/$regex/) {
- close FILE;
- return 1;
- }
- }
- close FILE;
- return 0;
-}
-
-sub updateSources {
- my ($time) = @_;
- my $inst = "include/llvm/Instruction.h";
- unlink($inst);
- run( "cvs update -D'" . timeAsString($time) . "'" );
- if ( !contains( $inst, 'class Instruction.*Annotable' ) ) {
- run("patch -F100 -p0 < makeInstructionAnnotable.patch");
- }
-}
-
-sub regressionPresentAt {
- my ($time) = @_;
-
- updateSources($time);
- buildLibrariesAndTools();
- my $rc = run($SCRIPT);
- if ($rc) {
- print LOG "** Found that regression was PRESENT at "
- . timeAsString($time) . "\n";
- return 1;
- }
- else {
- print LOG "** Found that regression was ABSENT at "
- . timeAsString($time) . "\n";
- return 0;
- }
-}
-
-sub regressionAbsentAt {
- my ($time) = @_;
- return !regressionPresentAt($time);
-}
-
-sub closeTo {
- my ( $time1, $time2 ) = @_;
- return abs( $time1 - $time2 ) < 600; # 10 minutes seems reasonable.
-}
-
-sub halfWayPoint {
- my ( $time1, $time2 ) = @_;
- my $halfSpan = int( abs( $time1 - $time2 ) / 2 );
- if ( $time1 < $time2 ) {
- return $time1 + $halfSpan;
- }
- else {
- return $time2 + $halfSpan;
- }
-}
-
-sub checkBoundaryConditions {
- print LOG "** Checking for presence of regression at ", timeAsString($DTIME),
- "\n";
- if ( !regressionPresentAt($DTIME) ) {
- die ( "** Can't help you; $SCRIPT says regression absent at dtime: "
- . timeAsString($DTIME)
- . "\n" );
- }
- print LOG "** Checking for absence of regression at ", timeAsString($WTIME),
- "\n";
- if ( !regressionAbsentAt($WTIME) ) {
- die ( "** Can't help you; $SCRIPT says regression present at wtime: "
- . timeAsString($WTIME)
- . "\n" );
- }
-}
-
-##############################################################################
-
-# Set up log files
-open (STDERR, ">&STDOUT") || die "** Can't redirect std.err: $!\n";
-autoflush STDOUT 1;
-autoflush STDERR 1;
-open (LOG, ">RegFinder.log") || die "** can't write RegFinder.log: $!\n";
-autoflush LOG 1;
-# Check command line arguments and environment variables
-getopts('Iw:d:t:c:');
-if ( !( $opt_w && $opt_d && $opt_t && $opt_c ) ) {
- usage;
-}
-$MAKE = $ENV{'MAKE'};
-$MAKE = 'gmake' unless $MAKE;
-$WTIME = timeAsSeconds($opt_w);
-print LOG "** Assuming worked at ", timeAsString($WTIME), "\n";
-$DTIME = timeAsSeconds($opt_d);
-print LOG "** Assuming didn't work at ", timeAsString($DTIME), "\n";
-$opt_t =~ s/\s*//g;
-$SCRIPT = $opt_c;
-die "** $SCRIPT is not executable or not found\n" unless -x $SCRIPT;
-print LOG "** Checking for the regression using $SCRIPT\n";
- at TOOLS = split ( /,/, $opt_t );
-print LOG (
- "** Going to rebuild: ",
- ( join ", ", @TOOLS ),
- " before each $SCRIPT run\n"
-);
-if ($opt_I) { checkBoundaryConditions(); }
-# do the dirty work:
-while ( !closeTo( $DTIME, $WTIME ) ) {
- my $halfPt = halfWayPoint( $DTIME, $WTIME );
- print LOG "** Checking whether regression is present at ",
- timeAsString($halfPt), "\n";
- if ( regressionPresentAt($halfPt) ) {
- $DTIME = $halfPt;
- }
- else {
- $WTIME = $halfPt;
- }
-}
-# Tell them what we found
-print LOG "** Narrowed it down to:\n";
-print LOG "** Worked at: ", timeAsString($WTIME), "\n";
-print LOG "** Did not work at: ", timeAsString($DTIME), "\n";
-close LOG;
-exit 0;
diff --git a/libclamav/c++/llvm/utils/TableGen/ARMDecoderEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/ARMDecoderEmitter.cpp
new file mode 100644
index 0000000..03b01f6
--- /dev/null
+++ b/libclamav/c++/llvm/utils/TableGen/ARMDecoderEmitter.cpp
@@ -0,0 +1,1878 @@
+//===------------ ARMDecoderEmitter.cpp - Decoder Generator ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the ARM Disassembler.
+// It contains the tablegen backend that emits the decoder functions for ARM and
+// Thumb. The disassembler core includes the auto-generated file, invokes the
+// decoder functions, and builds up the MCInst based on the decoded Opcode.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "arm-decoder-emitter"
+
+#include "ARMDecoderEmitter.h"
+#include "CodeGenTarget.h"
+#include "Record.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <vector>
+#include <map>
+#include <string>
+
+using namespace llvm;
+
+/////////////////////////////////////////////////////
+// //
+// Enums and Utilities for ARM Instruction Format //
+// //
+/////////////////////////////////////////////////////
+
+#define ARM_FORMATS \
+ ENTRY(ARM_FORMAT_PSEUDO, 0) \
+ ENTRY(ARM_FORMAT_MULFRM, 1) \
+ ENTRY(ARM_FORMAT_BRFRM, 2) \
+ ENTRY(ARM_FORMAT_BRMISCFRM, 3) \
+ ENTRY(ARM_FORMAT_DPFRM, 4) \
+ ENTRY(ARM_FORMAT_DPSOREGFRM, 5) \
+ ENTRY(ARM_FORMAT_LDFRM, 6) \
+ ENTRY(ARM_FORMAT_STFRM, 7) \
+ ENTRY(ARM_FORMAT_LDMISCFRM, 8) \
+ ENTRY(ARM_FORMAT_STMISCFRM, 9) \
+ ENTRY(ARM_FORMAT_LDSTMULFRM, 10) \
+ ENTRY(ARM_FORMAT_LDSTEXFRM, 11) \
+ ENTRY(ARM_FORMAT_ARITHMISCFRM, 12) \
+ ENTRY(ARM_FORMAT_SATFRM, 13) \
+ ENTRY(ARM_FORMAT_EXTFRM, 14) \
+ ENTRY(ARM_FORMAT_VFPUNARYFRM, 15) \
+ ENTRY(ARM_FORMAT_VFPBINARYFRM, 16) \
+ ENTRY(ARM_FORMAT_VFPCONV1FRM, 17) \
+ ENTRY(ARM_FORMAT_VFPCONV2FRM, 18) \
+ ENTRY(ARM_FORMAT_VFPCONV3FRM, 19) \
+ ENTRY(ARM_FORMAT_VFPCONV4FRM, 20) \
+ ENTRY(ARM_FORMAT_VFPCONV5FRM, 21) \
+ ENTRY(ARM_FORMAT_VFPLDSTFRM, 22) \
+ ENTRY(ARM_FORMAT_VFPLDSTMULFRM, 23) \
+ ENTRY(ARM_FORMAT_VFPMISCFRM, 24) \
+ ENTRY(ARM_FORMAT_THUMBFRM, 25) \
+ ENTRY(ARM_FORMAT_MISCFRM, 26) \
+ ENTRY(ARM_FORMAT_NEONGETLNFRM, 27) \
+ ENTRY(ARM_FORMAT_NEONSETLNFRM, 28) \
+ ENTRY(ARM_FORMAT_NEONDUPFRM, 29) \
+ ENTRY(ARM_FORMAT_NLdSt, 30) \
+ ENTRY(ARM_FORMAT_N1RegModImm, 31) \
+ ENTRY(ARM_FORMAT_N2Reg, 32) \
+ ENTRY(ARM_FORMAT_NVCVT, 33) \
+ ENTRY(ARM_FORMAT_NVecDupLn, 34) \
+ ENTRY(ARM_FORMAT_N2RegVecShL, 35) \
+ ENTRY(ARM_FORMAT_N2RegVecShR, 36) \
+ ENTRY(ARM_FORMAT_N3Reg, 37) \
+ ENTRY(ARM_FORMAT_N3RegVecSh, 38) \
+ ENTRY(ARM_FORMAT_NVecExtract, 39) \
+ ENTRY(ARM_FORMAT_NVecMulScalar, 40) \
+ ENTRY(ARM_FORMAT_NVTBL, 41)
+
+// ARM instruction format specifies the encoding used by the instruction.
+#define ENTRY(n, v) n = v,
+typedef enum {
+ ARM_FORMATS
+ ARM_FORMAT_NA
+} ARMFormat;
+#undef ENTRY
+
+// Converts enum to const char*.
+static const char *stringForARMFormat(ARMFormat form) {
+#define ENTRY(n, v) case n: return #n;
+ switch(form) {
+ ARM_FORMATS
+ case ARM_FORMAT_NA:
+ default:
+ return "";
+ }
+#undef ENTRY
+}
+
+enum {
+ IndexModeNone = 0,
+ IndexModePre = 1,
+ IndexModePost = 2,
+ IndexModeUpd = 3
+};
+
+/////////////////////////
+// //
+// Utility functions //
+// //
+/////////////////////////
+
+/// byteFromBitsInit - Return the byte value from a BitsInit.
+/// Called from getByteField().
+static uint8_t byteFromBitsInit(BitsInit &init) {
+ int width = init.getNumBits();
+
+ assert(width <= 8 && "Field is too large for uint8_t!");
+
+ int index;
+ uint8_t mask = 0x01;
+
+ uint8_t ret = 0;
+
+ for (index = 0; index < width; index++) {
+ if (static_cast<BitInit*>(init.getBit(index))->getValue())
+ ret |= mask;
+
+ mask <<= 1;
+ }
+
+ return ret;
+}
+
+static uint8_t getByteField(const Record &def, const char *str) {
+ BitsInit *bits = def.getValueAsBitsInit(str);
+ return byteFromBitsInit(*bits);
+}
+
+static BitsInit &getBitsField(const Record &def, const char *str) {
+ BitsInit *bits = def.getValueAsBitsInit(str);
+ return *bits;
+}
+
+/// sameStringExceptSuffix - Return true if the two strings differ only in RHS's
+/// suffix. ("VST4d8", "VST4d8_UPD", "_UPD") as input returns true.
+static
+bool sameStringExceptSuffix(const StringRef LHS, const StringRef RHS,
+ const StringRef Suffix) {
+
+ if (RHS.startswith(LHS) && RHS.endswith(Suffix))
+ return RHS.size() == LHS.size() + Suffix.size();
+
+ return false;
+}
+
+/// thumbInstruction - Determine whether we have a Thumb instruction.
+/// See also ARMInstrFormats.td.
+static bool thumbInstruction(uint8_t Form) {
+ return Form == ARM_FORMAT_THUMBFRM;
+}
+
+// The set (BIT_TRUE, BIT_FALSE, BIT_UNSET) represents a ternary logic system
+// for a bit value.
+//
+// BIT_UNFILTERED is used as the init value for a filter position. It is used
+// only for filter processings.
+typedef enum {
+ BIT_TRUE, // '1'
+ BIT_FALSE, // '0'
+ BIT_UNSET, // '?'
+ BIT_UNFILTERED // unfiltered
+} bit_value_t;
+
+static bool ValueSet(bit_value_t V) {
+ return (V == BIT_TRUE || V == BIT_FALSE);
+}
+static bool ValueNotSet(bit_value_t V) {
+ return (V == BIT_UNSET);
+}
+static int Value(bit_value_t V) {
+ return ValueNotSet(V) ? -1 : (V == BIT_FALSE ? 0 : 1);
+}
+static bit_value_t bitFromBits(BitsInit &bits, unsigned index) {
+ if (BitInit *bit = dynamic_cast<BitInit*>(bits.getBit(index)))
+ return bit->getValue() ? BIT_TRUE : BIT_FALSE;
+
+ // The bit is uninitialized.
+ return BIT_UNSET;
+}
+// Prints the bit value for each position.
+static void dumpBits(raw_ostream &o, BitsInit &bits) {
+ unsigned index;
+
+ for (index = bits.getNumBits(); index > 0; index--) {
+ switch (bitFromBits(bits, index - 1)) {
+ case BIT_TRUE:
+ o << "1";
+ break;
+ case BIT_FALSE:
+ o << "0";
+ break;
+ case BIT_UNSET:
+ o << "_";
+ break;
+ default:
+ assert(0 && "unexpected return value from bitFromBits");
+ }
+ }
+}
+
+// Enums for the available target names.
+typedef enum {
+ TARGET_ARM = 0,
+ TARGET_THUMB
+} TARGET_NAME_t;
+
+// FIXME: Possibly auto-detected?
+#define BIT_WIDTH 32
+
+// Forward declaration.
+class FilterChooser;
+
+// Representation of the instruction to work on.
+typedef bit_value_t insn_t[BIT_WIDTH];
+
+/// Filter - Filter works with FilterChooser to produce the decoding tree for
+/// the ISA.
+///
+/// It is useful to think of a Filter as governing the switch stmts of the
+/// decoding tree in a certain level. Each case stmt delegates to an inferior
+/// FilterChooser to decide what further decoding logic to employ, or in another
+/// words, what other remaining bits to look at. The FilterChooser eventually
+/// chooses a best Filter to do its job.
+///
+/// This recursive scheme ends when the number of Opcodes assigned to the
+/// FilterChooser becomes 1 or if there is a conflict. A conflict happens when
+/// the Filter/FilterChooser combo does not know how to distinguish among the
+/// Opcodes assigned.
+///
+/// An example of a conflcit is
+///
+/// Conflict:
+/// 111101000.00........00010000....
+/// 111101000.00........0001........
+/// 1111010...00........0001........
+/// 1111010...00....................
+/// 1111010.........................
+/// 1111............................
+/// ................................
+/// VST4q8a 111101000_00________00010000____
+/// VST4q8b 111101000_00________00010000____
+///
+/// The Debug output shows the path that the decoding tree follows to reach the
+/// the conclusion that there is a conflict. VST4q8a is a vst4 to double-spaced
+/// even registers, while VST4q8b is a vst4 to double-spaced odd regsisters.
+///
+/// The encoding info in the .td files does not specify this meta information,
+/// which could have been used by the decoder to resolve the conflict. The
+/// decoder could try to decode the even/odd register numbering and assign to
+/// VST4q8a or VST4q8b, but for the time being, the decoder chooses the "a"
+/// version and return the Opcode since the two have the same Asm format string.
+class Filter {
+protected:
+ FilterChooser *Owner; // points to the FilterChooser who owns this filter
+ unsigned StartBit; // the starting bit position
+ unsigned NumBits; // number of bits to filter
+ bool Mixed; // a mixed region contains both set and unset bits
+
+ // Map of well-known segment value to the set of uid's with that value.
+ std::map<uint64_t, std::vector<unsigned> > FilteredInstructions;
+
+ // Set of uid's with non-constant segment values.
+ std::vector<unsigned> VariableInstructions;
+
+ // Map of well-known segment value to its delegate.
+ std::map<unsigned, FilterChooser*> FilterChooserMap;
+
+ // Number of instructions which fall under FilteredInstructions category.
+ unsigned NumFiltered;
+
+ // Keeps track of the last opcode in the filtered bucket.
+ unsigned LastOpcFiltered;
+
+ // Number of instructions which fall under VariableInstructions category.
+ unsigned NumVariable;
+
+public:
+ unsigned getNumFiltered() { return NumFiltered; }
+ unsigned getNumVariable() { return NumVariable; }
+ unsigned getSingletonOpc() {
+ assert(NumFiltered == 1);
+ return LastOpcFiltered;
+ }
+ // Return the filter chooser for the group of instructions without constant
+ // segment values.
+ FilterChooser &getVariableFC() {
+ assert(NumFiltered == 1);
+ assert(FilterChooserMap.size() == 1);
+ return *(FilterChooserMap.find((unsigned)-1)->second);
+ }
+
+ Filter(const Filter &f);
+ Filter(FilterChooser &owner, unsigned startBit, unsigned numBits, bool mixed);
+
+ ~Filter();
+
+ // Divides the decoding task into sub tasks and delegates them to the
+ // inferior FilterChooser's.
+ //
+ // A special case arises when there's only one entry in the filtered
+ // instructions. In order to unambiguously decode the singleton, we need to
+ // match the remaining undecoded encoding bits against the singleton.
+ void recurse();
+
+ // Emit code to decode instructions given a segment or segments of bits.
+ void emit(raw_ostream &o, unsigned &Indentation);
+
+ // Returns the number of fanout produced by the filter. More fanout implies
+ // the filter distinguishes more categories of instructions.
+ unsigned usefulness() const;
+}; // End of class Filter
+
+// These are states of our finite state machines used in FilterChooser's
+// filterProcessor() which produces the filter candidates to use.
+typedef enum {
+ ATTR_NONE,
+ ATTR_FILTERED,
+ ATTR_ALL_SET,
+ ATTR_ALL_UNSET,
+ ATTR_MIXED
+} bitAttr_t;
+
+/// FilterChooser - FilterChooser chooses the best filter among a set of Filters
+/// in order to perform the decoding of instructions at the current level.
+///
+/// Decoding proceeds from the top down. Based on the well-known encoding bits
+/// of instructions available, FilterChooser builds up the possible Filters that
+/// can further the task of decoding by distinguishing among the remaining
+/// candidate instructions.
+///
+/// Once a filter has been chosen, it is called upon to divide the decoding task
+/// into sub-tasks and delegates them to its inferior FilterChoosers for further
+/// processings.
+///
+/// It is useful to think of a Filter as governing the switch stmts of the
+/// decoding tree. And each case is delegated to an inferior FilterChooser to
+/// decide what further remaining bits to look at.
+class FilterChooser {
+ static TARGET_NAME_t TargetName;
+
+protected:
+ friend class Filter;
+
+ // Vector of codegen instructions to choose our filter.
+ const std::vector<const CodeGenInstruction*> &AllInstructions;
+
+ // Vector of uid's for this filter chooser to work on.
+ const std::vector<unsigned> Opcodes;
+
+ // Vector of candidate filters.
+ std::vector<Filter> Filters;
+
+ // Array of bit values passed down from our parent.
+ // Set to all BIT_UNFILTERED's for Parent == NULL.
+ bit_value_t FilterBitValues[BIT_WIDTH];
+
+ // Links to the FilterChooser above us in the decoding tree.
+ FilterChooser *Parent;
+
+ // Index of the best filter from Filters.
+ int BestIndex;
+
+public:
+ static void setTargetName(TARGET_NAME_t tn) { TargetName = tn; }
+
+ FilterChooser(const FilterChooser &FC) :
+ AllInstructions(FC.AllInstructions), Opcodes(FC.Opcodes),
+ Filters(FC.Filters), Parent(FC.Parent), BestIndex(FC.BestIndex) {
+ memcpy(FilterBitValues, FC.FilterBitValues, sizeof(FilterBitValues));
+ }
+
+ FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
+ const std::vector<unsigned> &IDs) :
+ AllInstructions(Insts), Opcodes(IDs), Filters(), Parent(NULL),
+ BestIndex(-1) {
+ for (unsigned i = 0; i < BIT_WIDTH; ++i)
+ FilterBitValues[i] = BIT_UNFILTERED;
+
+ doFilter();
+ }
+
+ FilterChooser(const std::vector<const CodeGenInstruction*> &Insts,
+ const std::vector<unsigned> &IDs,
+ bit_value_t (&ParentFilterBitValues)[BIT_WIDTH],
+ FilterChooser &parent) :
+ AllInstructions(Insts), Opcodes(IDs), Filters(), Parent(&parent),
+ BestIndex(-1) {
+ for (unsigned i = 0; i < BIT_WIDTH; ++i)
+ FilterBitValues[i] = ParentFilterBitValues[i];
+
+ doFilter();
+ }
+
+ // The top level filter chooser has NULL as its parent.
+ bool isTopLevel() { return Parent == NULL; }
+
+ // This provides an opportunity for target specific code emission.
+ void emitTopHook(raw_ostream &o);
+
+ // Emit the top level typedef and decodeInstruction() function.
+ void emitTop(raw_ostream &o, unsigned &Indentation);
+
+ // This provides an opportunity for target specific code emission after
+ // emitTop().
+ void emitBot(raw_ostream &o, unsigned &Indentation);
+
+protected:
+ // Populates the insn given the uid.
+ void insnWithID(insn_t &Insn, unsigned Opcode) const {
+ BitsInit &Bits = getBitsField(*AllInstructions[Opcode]->TheDef, "Inst");
+
+ for (unsigned i = 0; i < BIT_WIDTH; ++i)
+ Insn[i] = bitFromBits(Bits, i);
+
+ // Set Inst{21} to 1 (wback) when IndexModeBits == IndexModeUpd.
+ if (getByteField(*AllInstructions[Opcode]->TheDef, "IndexModeBits")
+ == IndexModeUpd)
+ Insn[21] = BIT_TRUE;
+ }
+
+ // Returns the record name.
+ const std::string &nameWithID(unsigned Opcode) const {
+ return AllInstructions[Opcode]->TheDef->getName();
+ }
+
+ // Populates the field of the insn given the start position and the number of
+ // consecutive bits to scan for.
+ //
+ // Returns false if there exists any uninitialized bit value in the range.
+ // Returns true, otherwise.
+ bool fieldFromInsn(uint64_t &Field, insn_t &Insn, unsigned StartBit,
+ unsigned NumBits) const;
+
+ /// dumpFilterArray - dumpFilterArray prints out debugging info for the given
+ /// filter array as a series of chars.
+ void dumpFilterArray(raw_ostream &o, bit_value_t (&filter)[BIT_WIDTH]);
+
+ /// dumpStack - dumpStack traverses the filter chooser chain and calls
+ /// dumpFilterArray on each filter chooser up to the top level one.
+ void dumpStack(raw_ostream &o, const char *prefix);
+
+ Filter &bestFilter() {
+ assert(BestIndex != -1 && "BestIndex not set");
+ return Filters[BestIndex];
+ }
+
+ // Called from Filter::recurse() when singleton exists. For debug purpose.
+ void SingletonExists(unsigned Opc);
+
+ bool PositionFiltered(unsigned i) {
+ return ValueSet(FilterBitValues[i]);
+ }
+
+ // Calculates the island(s) needed to decode the instruction.
+ // This returns a lit of undecoded bits of an instructions, for example,
+ // Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
+ // decoded bits in order to verify that the instruction matches the Opcode.
+ unsigned getIslands(std::vector<unsigned> &StartBits,
+ std::vector<unsigned> &EndBits, std::vector<uint64_t> &FieldVals,
+ insn_t &Insn);
+
+ // The purpose of this function is for the API client to detect possible
+ // Load/Store Coprocessor instructions. If the coprocessor number is of
+ // the instruction is either 10 or 11, the decoder should not report the
+ // instruction as LDC/LDC2/STC/STC2, but should match against Advanced SIMD or
+ // VFP instructions.
+ bool LdStCopEncoding1(unsigned Opc) {
+ const std::string &Name = nameWithID(Opc);
+ if (Name == "LDC_OFFSET" || Name == "LDC_OPTION" ||
+ Name == "LDC_POST" || Name == "LDC_PRE" ||
+ Name == "LDCL_OFFSET" || Name == "LDCL_OPTION" ||
+ Name == "LDCL_POST" || Name == "LDCL_PRE" ||
+ Name == "STC_OFFSET" || Name == "STC_OPTION" ||
+ Name == "STC_POST" || Name == "STC_PRE" ||
+ Name == "STCL_OFFSET" || Name == "STCL_OPTION" ||
+ Name == "STCL_POST" || Name == "STCL_PRE")
+ return true;
+ else
+ return false;
+ }
+
+ // Emits code to decode the singleton. Return true if we have matched all the
+ // well-known bits.
+ bool emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,unsigned Opc);
+
+ // Emits code to decode the singleton, and then to decode the rest.
+ void emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,Filter &Best);
+
+ // Assign a single filter and run with it.
+ void runSingleFilter(FilterChooser &owner, unsigned startBit, unsigned numBit,
+ bool mixed);
+
+ // reportRegion is a helper function for filterProcessor to mark a region as
+ // eligible for use as a filter region.
+ void reportRegion(bitAttr_t RA, unsigned StartBit, unsigned BitIndex,
+ bool AllowMixed);
+
+ // FilterProcessor scans the well-known encoding bits of the instructions and
+ // builds up a list of candidate filters. It chooses the best filter and
+ // recursively descends down the decoding tree.
+ bool filterProcessor(bool AllowMixed, bool Greedy = true);
+
+ // Decides on the best configuration of filter(s) to use in order to decode
+ // the instructions. A conflict of instructions may occur, in which case we
+ // dump the conflict set to the standard error.
+ void doFilter();
+
+ // Emits code to decode our share of instructions. Returns true if the
+ // emitted code causes a return, which occurs if we know how to decode
+ // the instruction at this level or the instruction is not decodeable.
+ bool emit(raw_ostream &o, unsigned &Indentation);
+};
+
+///////////////////////////
+// //
+// Filter Implmenetation //
+// //
+///////////////////////////
+
+Filter::Filter(const Filter &f) :
+ Owner(f.Owner), StartBit(f.StartBit), NumBits(f.NumBits), Mixed(f.Mixed),
+ FilteredInstructions(f.FilteredInstructions),
+ VariableInstructions(f.VariableInstructions),
+ FilterChooserMap(f.FilterChooserMap), NumFiltered(f.NumFiltered),
+ LastOpcFiltered(f.LastOpcFiltered), NumVariable(f.NumVariable) {
+}
+
+Filter::Filter(FilterChooser &owner, unsigned startBit, unsigned numBits,
+ bool mixed) : Owner(&owner), StartBit(startBit), NumBits(numBits),
+ Mixed(mixed) {
+ assert(StartBit + NumBits - 1 < BIT_WIDTH);
+
+ NumFiltered = 0;
+ LastOpcFiltered = 0;
+ NumVariable = 0;
+
+ for (unsigned i = 0, e = Owner->Opcodes.size(); i != e; ++i) {
+ insn_t Insn;
+
+ // Populates the insn given the uid.
+ Owner->insnWithID(Insn, Owner->Opcodes[i]);
+
+ uint64_t Field;
+ // Scans the segment for possibly well-specified encoding bits.
+ bool ok = Owner->fieldFromInsn(Field, Insn, StartBit, NumBits);
+
+ if (ok) {
+ // The encoding bits are well-known. Lets add the uid of the
+ // instruction into the bucket keyed off the constant field value.
+ LastOpcFiltered = Owner->Opcodes[i];
+ FilteredInstructions[Field].push_back(LastOpcFiltered);
+ ++NumFiltered;
+ } else {
+ // Some of the encoding bit(s) are unspecfied. This contributes to
+ // one additional member of "Variable" instructions.
+ VariableInstructions.push_back(Owner->Opcodes[i]);
+ ++NumVariable;
+ }
+ }
+
+ assert((FilteredInstructions.size() + VariableInstructions.size() > 0)
+ && "Filter returns no instruction categories");
+}
+
+Filter::~Filter() {
+ std::map<unsigned, FilterChooser*>::iterator filterIterator;
+ for (filterIterator = FilterChooserMap.begin();
+ filterIterator != FilterChooserMap.end();
+ filterIterator++) {
+ delete filterIterator->second;
+ }
+}
+
+// Divides the decoding task into sub tasks and delegates them to the
+// inferior FilterChooser's.
+//
+// A special case arises when there's only one entry in the filtered
+// instructions. In order to unambiguously decode the singleton, we need to
+// match the remaining undecoded encoding bits against the singleton.
+void Filter::recurse() {
+ std::map<uint64_t, std::vector<unsigned> >::const_iterator mapIterator;
+
+ bit_value_t BitValueArray[BIT_WIDTH];
+ // Starts by inheriting our parent filter chooser's filter bit values.
+ memcpy(BitValueArray, Owner->FilterBitValues, sizeof(BitValueArray));
+
+ unsigned bitIndex;
+
+ if (VariableInstructions.size()) {
+ // Conservatively marks each segment position as BIT_UNSET.
+ for (bitIndex = 0; bitIndex < NumBits; bitIndex++)
+ BitValueArray[StartBit + bitIndex] = BIT_UNSET;
+
+ // Delegates to an inferior filter chooser for futher processing on this
+ // group of instructions whose segment values are variable.
+ FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>(
+ (unsigned)-1,
+ new FilterChooser(Owner->AllInstructions,
+ VariableInstructions,
+ BitValueArray,
+ *Owner)
+ ));
+ }
+
+ // No need to recurse for a singleton filtered instruction.
+ // See also Filter::emit().
+ if (getNumFiltered() == 1) {
+ //Owner->SingletonExists(LastOpcFiltered);
+ assert(FilterChooserMap.size() == 1);
+ return;
+ }
+
+ // Otherwise, create sub choosers.
+ for (mapIterator = FilteredInstructions.begin();
+ mapIterator != FilteredInstructions.end();
+ mapIterator++) {
+
+ // Marks all the segment positions with either BIT_TRUE or BIT_FALSE.
+ for (bitIndex = 0; bitIndex < NumBits; bitIndex++) {
+ if (mapIterator->first & (1ULL << bitIndex))
+ BitValueArray[StartBit + bitIndex] = BIT_TRUE;
+ else
+ BitValueArray[StartBit + bitIndex] = BIT_FALSE;
+ }
+
+ // Delegates to an inferior filter chooser for futher processing on this
+ // category of instructions.
+ FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>(
+ mapIterator->first,
+ new FilterChooser(Owner->AllInstructions,
+ mapIterator->second,
+ BitValueArray,
+ *Owner)
+ ));
+ }
+}
+
+// Emit code to decode instructions given a segment or segments of bits.
+void Filter::emit(raw_ostream &o, unsigned &Indentation) {
+ o.indent(Indentation) << "// Check Inst{";
+
+ if (NumBits > 1)
+ o << (StartBit + NumBits - 1) << '-';
+
+ o << StartBit << "} ...\n";
+
+ o.indent(Indentation) << "switch (fieldFromInstruction(insn, "
+ << StartBit << ", " << NumBits << ")) {\n";
+
+ std::map<unsigned, FilterChooser*>::iterator filterIterator;
+
+ bool DefaultCase = false;
+ for (filterIterator = FilterChooserMap.begin();
+ filterIterator != FilterChooserMap.end();
+ filterIterator++) {
+
+ // Field value -1 implies a non-empty set of variable instructions.
+ // See also recurse().
+ if (filterIterator->first == (unsigned)-1) {
+ DefaultCase = true;
+
+ o.indent(Indentation) << "default:\n";
+ o.indent(Indentation) << " break; // fallthrough\n";
+
+ // Closing curly brace for the switch statement.
+ // This is unconventional because we want the default processing to be
+ // performed for the fallthrough cases as well, i.e., when the "cases"
+ // did not prove a decoded instruction.
+ o.indent(Indentation) << "}\n";
+
+ } else
+ o.indent(Indentation) << "case " << filterIterator->first << ":\n";
+
+ // We arrive at a category of instructions with the same segment value.
+ // Now delegate to the sub filter chooser for further decodings.
+ // The case may fallthrough, which happens if the remaining well-known
+ // encoding bits do not match exactly.
+ if (!DefaultCase) { ++Indentation; ++Indentation; }
+
+ bool finished = filterIterator->second->emit(o, Indentation);
+ // For top level default case, there's no need for a break statement.
+ if (Owner->isTopLevel() && DefaultCase)
+ break;
+ if (!finished)
+ o.indent(Indentation) << "break;\n";
+
+ if (!DefaultCase) { --Indentation; --Indentation; }
+ }
+
+ // If there is no default case, we still need to supply a closing brace.
+ if (!DefaultCase) {
+ // Closing curly brace for the switch statement.
+ o.indent(Indentation) << "}\n";
+ }
+}
+
+// Returns the number of fanout produced by the filter. More fanout implies
+// the filter distinguishes more categories of instructions.
+unsigned Filter::usefulness() const {
+ if (VariableInstructions.size())
+ return FilteredInstructions.size();
+ else
+ return FilteredInstructions.size() + 1;
+}
+
+//////////////////////////////////
+// //
+// Filterchooser Implementation //
+// //
+//////////////////////////////////
+
+// Define the symbol here.
+TARGET_NAME_t FilterChooser::TargetName;
+
+// This provides an opportunity for target specific code emission.
+void FilterChooser::emitTopHook(raw_ostream &o) {
+ if (TargetName == TARGET_ARM) {
+ // Emit code that references the ARMFormat data type.
+ o << "static const ARMFormat ARMFormats[] = {\n";
+ for (unsigned i = 0, e = AllInstructions.size(); i != e; ++i) {
+ const Record &Def = *(AllInstructions[i]->TheDef);
+ const std::string &Name = Def.getName();
+ if (Def.isSubClassOf("InstARM") || Def.isSubClassOf("InstThumb"))
+ o.indent(2) <<
+ stringForARMFormat((ARMFormat)getByteField(Def, "Form"));
+ else
+ o << " ARM_FORMAT_NA";
+
+ o << ",\t// Inst #" << i << " = " << Name << '\n';
+ }
+ o << " ARM_FORMAT_NA\t// Unreachable.\n";
+ o << "};\n\n";
+ }
+}
+
+// Emit the top level typedef and decodeInstruction() function.
+void FilterChooser::emitTop(raw_ostream &o, unsigned &Indentation) {
+ // Run the target specific emit hook.
+ emitTopHook(o);
+
+ switch (BIT_WIDTH) {
+ case 8:
+ o.indent(Indentation) << "typedef uint8_t field_t;\n";
+ break;
+ case 16:
+ o.indent(Indentation) << "typedef uint16_t field_t;\n";
+ break;
+ case 32:
+ o.indent(Indentation) << "typedef uint32_t field_t;\n";
+ break;
+ case 64:
+ o.indent(Indentation) << "typedef uint64_t field_t;\n";
+ break;
+ default:
+ assert(0 && "Unexpected instruction size!");
+ }
+
+ o << '\n';
+
+ o.indent(Indentation) << "static field_t " <<
+ "fieldFromInstruction(field_t insn, unsigned startBit, unsigned numBits)\n";
+
+ o.indent(Indentation) << "{\n";
+
+ ++Indentation; ++Indentation;
+ o.indent(Indentation) << "assert(startBit + numBits <= " << BIT_WIDTH
+ << " && \"Instruction field out of bounds!\");\n";
+ o << '\n';
+ o.indent(Indentation) << "field_t fieldMask;\n";
+ o << '\n';
+ o.indent(Indentation) << "if (numBits == " << BIT_WIDTH << ")\n";
+
+ ++Indentation; ++Indentation;
+ o.indent(Indentation) << "fieldMask = (field_t)-1;\n";
+ --Indentation; --Indentation;
+
+ o.indent(Indentation) << "else\n";
+
+ ++Indentation; ++Indentation;
+ o.indent(Indentation) << "fieldMask = ((1 << numBits) - 1) << startBit;\n";
+ --Indentation; --Indentation;
+
+ o << '\n';
+ o.indent(Indentation) << "return (insn & fieldMask) >> startBit;\n";
+ --Indentation; --Indentation;
+
+ o.indent(Indentation) << "}\n";
+
+ o << '\n';
+
+ o.indent(Indentation) << "static uint16_t decodeInstruction(field_t insn) {\n";
+
+ ++Indentation; ++Indentation;
+ // Emits code to decode the instructions.
+ emit(o, Indentation);
+
+ o << '\n';
+ o.indent(Indentation) << "return 0;\n";
+ --Indentation; --Indentation;
+
+ o.indent(Indentation) << "}\n";
+
+ o << '\n';
+}
+
+// This provides an opportunity for target specific code emission after
+// emitTop().
+void FilterChooser::emitBot(raw_ostream &o, unsigned &Indentation) {
+ if (TargetName != TARGET_THUMB) return;
+
+ // Emit code that decodes the Thumb ISA.
+ o.indent(Indentation)
+ << "static uint16_t decodeThumbInstruction(field_t insn) {\n";
+
+ ++Indentation; ++Indentation;
+
+ // Emits code to decode the instructions.
+ emit(o, Indentation);
+
+ o << '\n';
+ o.indent(Indentation) << "return 0;\n";
+
+ --Indentation; --Indentation;
+
+ o.indent(Indentation) << "}\n";
+}
+
+// Populates the field of the insn given the start position and the number of
+// consecutive bits to scan for.
+//
+// Returns false if and on the first uninitialized bit value encountered.
+// Returns true, otherwise.
+bool FilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
+ unsigned StartBit, unsigned NumBits) const {
+ Field = 0;
+
+ for (unsigned i = 0; i < NumBits; ++i) {
+ if (Insn[StartBit + i] == BIT_UNSET)
+ return false;
+
+ if (Insn[StartBit + i] == BIT_TRUE)
+ Field = Field | (1ULL << i);
+ }
+
+ return true;
+}
+
+/// dumpFilterArray - dumpFilterArray prints out debugging info for the given
+/// filter array as a series of chars.
+void FilterChooser::dumpFilterArray(raw_ostream &o,
+ bit_value_t (&filter)[BIT_WIDTH]) {
+ unsigned bitIndex;
+
+ for (bitIndex = BIT_WIDTH; bitIndex > 0; bitIndex--) {
+ switch (filter[bitIndex - 1]) {
+ case BIT_UNFILTERED:
+ o << ".";
+ break;
+ case BIT_UNSET:
+ o << "_";
+ break;
+ case BIT_TRUE:
+ o << "1";
+ break;
+ case BIT_FALSE:
+ o << "0";
+ break;
+ }
+ }
+}
+
+/// dumpStack - dumpStack traverses the filter chooser chain and calls
+/// dumpFilterArray on each filter chooser up to the top level one.
+void FilterChooser::dumpStack(raw_ostream &o, const char *prefix) {
+ FilterChooser *current = this;
+
+ while (current) {
+ o << prefix;
+ dumpFilterArray(o, current->FilterBitValues);
+ o << '\n';
+ current = current->Parent;
+ }
+}
+
+// Called from Filter::recurse() when singleton exists. For debug purpose.
+void FilterChooser::SingletonExists(unsigned Opc) {
+ insn_t Insn0;
+ insnWithID(Insn0, Opc);
+
+ errs() << "Singleton exists: " << nameWithID(Opc)
+ << " with its decoding dominating ";
+ for (unsigned i = 0; i < Opcodes.size(); ++i) {
+ if (Opcodes[i] == Opc) continue;
+ errs() << nameWithID(Opcodes[i]) << ' ';
+ }
+ errs() << '\n';
+
+ dumpStack(errs(), "\t\t");
+ for (unsigned i = 0; i < Opcodes.size(); i++) {
+ const std::string &Name = nameWithID(Opcodes[i]);
+
+ errs() << '\t' << Name << " ";
+ dumpBits(errs(),
+ getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
+ errs() << '\n';
+ }
+}
+
+// Calculates the island(s) needed to decode the instruction.
+// This returns a list of undecoded bits of an instructions, for example,
+// Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
+// decoded bits in order to verify that the instruction matches the Opcode.
+unsigned FilterChooser::getIslands(std::vector<unsigned> &StartBits,
+ std::vector<unsigned> &EndBits, std::vector<uint64_t> &FieldVals,
+ insn_t &Insn) {
+ unsigned Num, BitNo;
+ Num = BitNo = 0;
+
+ uint64_t FieldVal = 0;
+
+ // 0: Init
+ // 1: Water (the bit value does not affect decoding)
+ // 2: Island (well-known bit value needed for decoding)
+ int State = 0;
+ int Val = -1;
+
+ for (unsigned i = 0; i < BIT_WIDTH; ++i) {
+ Val = Value(Insn[i]);
+ bool Filtered = PositionFiltered(i);
+ switch (State) {
+ default:
+ assert(0 && "Unreachable code!");
+ break;
+ case 0:
+ case 1:
+ if (Filtered || Val == -1)
+ State = 1; // Still in Water
+ else {
+ State = 2; // Into the Island
+ BitNo = 0;
+ StartBits.push_back(i);
+ FieldVal = Val;
+ }
+ break;
+ case 2:
+ if (Filtered || Val == -1) {
+ State = 1; // Into the Water
+ EndBits.push_back(i - 1);
+ FieldVals.push_back(FieldVal);
+ ++Num;
+ } else {
+ State = 2; // Still in Island
+ ++BitNo;
+ FieldVal = FieldVal | Val << BitNo;
+ }
+ break;
+ }
+ }
+ // If we are still in Island after the loop, do some housekeeping.
+ if (State == 2) {
+ EndBits.push_back(BIT_WIDTH - 1);
+ FieldVals.push_back(FieldVal);
+ ++Num;
+ }
+
+ assert(StartBits.size() == Num && EndBits.size() == Num &&
+ FieldVals.size() == Num);
+ return Num;
+}
+
+// Emits code to decode the singleton. Return true if we have matched all the
+// well-known bits.
+bool FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
+ unsigned Opc) {
+ std::vector<unsigned> StartBits;
+ std::vector<unsigned> EndBits;
+ std::vector<uint64_t> FieldVals;
+ insn_t Insn;
+ insnWithID(Insn, Opc);
+
+ // This provides a good opportunity to check for possible Ld/St Coprocessor
+ // Opcode and escapes if the coproc # is either 10 or 11. It is a NEON/VFP
+ // instruction is disguise.
+ if (TargetName == TARGET_ARM && LdStCopEncoding1(Opc)) {
+ o.indent(Indentation);
+ // A8.6.51 & A8.6.188
+ // If coproc = 0b101?, i.e, slice(insn, 11, 8) = 10 or 11, escape.
+ o << "if (fieldFromInstruction(insn, 9, 3) == 5) break; // fallthrough\n";
+ }
+
+ // Look for islands of undecoded bits of the singleton.
+ getIslands(StartBits, EndBits, FieldVals, Insn);
+
+ unsigned Size = StartBits.size();
+ unsigned I, NumBits;
+
+ // If we have matched all the well-known bits, just issue a return.
+ if (Size == 0) {
+ o.indent(Indentation) << "return " << Opc << "; // " << nameWithID(Opc)
+ << '\n';
+ return true;
+ }
+
+ // Otherwise, there are more decodings to be done!
+
+ // Emit code to match the island(s) for the singleton.
+ o.indent(Indentation) << "// Check ";
+
+ for (I = Size; I != 0; --I) {
+ o << "Inst{" << EndBits[I-1] << '-' << StartBits[I-1] << "} ";
+ if (I > 1)
+ o << "&& ";
+ else
+ o << "for singleton decoding...\n";
+ }
+
+ o.indent(Indentation) << "if (";
+
+ for (I = Size; I != 0; --I) {
+ NumBits = EndBits[I-1] - StartBits[I-1] + 1;
+ o << "fieldFromInstruction(insn, " << StartBits[I-1] << ", " << NumBits
+ << ") == " << FieldVals[I-1];
+ if (I > 1)
+ o << " && ";
+ else
+ o << ")\n";
+ }
+
+ o.indent(Indentation) << " return " << Opc << "; // " << nameWithID(Opc)
+ << '\n';
+
+ return false;
+}
+
+// Emits code to decode the singleton, and then to decode the rest.
+void FilterChooser::emitSingletonDecoder(raw_ostream &o, unsigned &Indentation,
+ Filter &Best) {
+
+ unsigned Opc = Best.getSingletonOpc();
+
+ emitSingletonDecoder(o, Indentation, Opc);
+
+ // Emit code for the rest.
+ o.indent(Indentation) << "else\n";
+
+ Indentation += 2;
+ Best.getVariableFC().emit(o, Indentation);
+ Indentation -= 2;
+}
+
+// Assign a single filter and run with it. Top level API client can initialize
+// with a single filter to start the filtering process.
+void FilterChooser::runSingleFilter(FilterChooser &owner, unsigned startBit,
+ unsigned numBit, bool mixed) {
+ Filters.clear();
+ Filter F(*this, startBit, numBit, true);
+ Filters.push_back(F);
+ BestIndex = 0; // Sole Filter instance to choose from.
+ bestFilter().recurse();
+}
+
+// reportRegion is a helper function for filterProcessor to mark a region as
+// eligible for use as a filter region.
+void FilterChooser::reportRegion(bitAttr_t RA, unsigned StartBit,
+ unsigned BitIndex, bool AllowMixed) {
+ if (RA == ATTR_MIXED && AllowMixed)
+ Filters.push_back(Filter(*this, StartBit, BitIndex - StartBit, true));
+ else if (RA == ATTR_ALL_SET && !AllowMixed)
+ Filters.push_back(Filter(*this, StartBit, BitIndex - StartBit, false));
+}
+
+// FilterProcessor scans the well-known encoding bits of the instructions and
+// builds up a list of candidate filters. It chooses the best filter and
+// recursively descends down the decoding tree.
+bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
+ Filters.clear();
+ BestIndex = -1;
+ unsigned numInstructions = Opcodes.size();
+
+ assert(numInstructions && "Filter created with no instructions");
+
+ // No further filtering is necessary.
+ if (numInstructions == 1)
+ return true;
+
+ // Heuristics. See also doFilter()'s "Heuristics" comment when num of
+ // instructions is 3.
+ if (AllowMixed && !Greedy) {
+ assert(numInstructions == 3);
+
+ for (unsigned i = 0; i < Opcodes.size(); ++i) {
+ std::vector<unsigned> StartBits;
+ std::vector<unsigned> EndBits;
+ std::vector<uint64_t> FieldVals;
+ insn_t Insn;
+
+ insnWithID(Insn, Opcodes[i]);
+
+ // Look for islands of undecoded bits of any instruction.
+ if (getIslands(StartBits, EndBits, FieldVals, Insn) > 0) {
+ // Found an instruction with island(s). Now just assign a filter.
+ runSingleFilter(*this, StartBits[0], EndBits[0] - StartBits[0] + 1,
+ true);
+ return true;
+ }
+ }
+ }
+
+ unsigned BitIndex, InsnIndex;
+
+ // We maintain BIT_WIDTH copies of the bitAttrs automaton.
+ // The automaton consumes the corresponding bit from each
+ // instruction.
+ //
+ // Input symbols: 0, 1, and _ (unset).
+ // States: NONE, FILTERED, ALL_SET, ALL_UNSET, and MIXED.
+ // Initial state: NONE.
+ //
+ // (NONE) ------- [01] -> (ALL_SET)
+ // (NONE) ------- _ ----> (ALL_UNSET)
+ // (ALL_SET) ---- [01] -> (ALL_SET)
+ // (ALL_SET) ---- _ ----> (MIXED)
+ // (ALL_UNSET) -- [01] -> (MIXED)
+ // (ALL_UNSET) -- _ ----> (ALL_UNSET)
+ // (MIXED) ------ . ----> (MIXED)
+ // (FILTERED)---- . ----> (FILTERED)
+
+ bitAttr_t bitAttrs[BIT_WIDTH];
+
+ // FILTERED bit positions provide no entropy and are not worthy of pursuing.
+ // Filter::recurse() set either BIT_TRUE or BIT_FALSE for each position.
+ for (BitIndex = 0; BitIndex < BIT_WIDTH; ++BitIndex)
+ if (FilterBitValues[BitIndex] == BIT_TRUE ||
+ FilterBitValues[BitIndex] == BIT_FALSE)
+ bitAttrs[BitIndex] = ATTR_FILTERED;
+ else
+ bitAttrs[BitIndex] = ATTR_NONE;
+
+ for (InsnIndex = 0; InsnIndex < numInstructions; ++InsnIndex) {
+ insn_t insn;
+
+ insnWithID(insn, Opcodes[InsnIndex]);
+
+ for (BitIndex = 0; BitIndex < BIT_WIDTH; ++BitIndex) {
+ switch (bitAttrs[BitIndex]) {
+ case ATTR_NONE:
+ if (insn[BitIndex] == BIT_UNSET)
+ bitAttrs[BitIndex] = ATTR_ALL_UNSET;
+ else
+ bitAttrs[BitIndex] = ATTR_ALL_SET;
+ break;
+ case ATTR_ALL_SET:
+ if (insn[BitIndex] == BIT_UNSET)
+ bitAttrs[BitIndex] = ATTR_MIXED;
+ break;
+ case ATTR_ALL_UNSET:
+ if (insn[BitIndex] != BIT_UNSET)
+ bitAttrs[BitIndex] = ATTR_MIXED;
+ break;
+ case ATTR_MIXED:
+ case ATTR_FILTERED:
+ break;
+ }
+ }
+ }
+
+ // The regionAttr automaton consumes the bitAttrs automatons' state,
+ // lowest-to-highest.
+ //
+ // Input symbols: F(iltered), (all_)S(et), (all_)U(nset), M(ixed)
+ // States: NONE, ALL_SET, MIXED
+ // Initial state: NONE
+ //
+ // (NONE) ----- F --> (NONE)
+ // (NONE) ----- S --> (ALL_SET) ; and set region start
+ // (NONE) ----- U --> (NONE)
+ // (NONE) ----- M --> (MIXED) ; and set region start
+ // (ALL_SET) -- F --> (NONE) ; and report an ALL_SET region
+ // (ALL_SET) -- S --> (ALL_SET)
+ // (ALL_SET) -- U --> (NONE) ; and report an ALL_SET region
+ // (ALL_SET) -- M --> (MIXED) ; and report an ALL_SET region
+ // (MIXED) ---- F --> (NONE) ; and report a MIXED region
+ // (MIXED) ---- S --> (ALL_SET) ; and report a MIXED region
+ // (MIXED) ---- U --> (NONE) ; and report a MIXED region
+ // (MIXED) ---- M --> (MIXED)
+
+ bitAttr_t RA = ATTR_NONE;
+ unsigned StartBit = 0;
+
+ for (BitIndex = 0; BitIndex < BIT_WIDTH; BitIndex++) {
+ bitAttr_t bitAttr = bitAttrs[BitIndex];
+
+ assert(bitAttr != ATTR_NONE && "Bit without attributes");
+
+ switch (RA) {
+ case ATTR_NONE:
+ switch (bitAttr) {
+ case ATTR_FILTERED:
+ break;
+ case ATTR_ALL_SET:
+ StartBit = BitIndex;
+ RA = ATTR_ALL_SET;
+ break;
+ case ATTR_ALL_UNSET:
+ break;
+ case ATTR_MIXED:
+ StartBit = BitIndex;
+ RA = ATTR_MIXED;
+ break;
+ default:
+ assert(0 && "Unexpected bitAttr!");
+ }
+ break;
+ case ATTR_ALL_SET:
+ switch (bitAttr) {
+ case ATTR_FILTERED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ RA = ATTR_NONE;
+ break;
+ case ATTR_ALL_SET:
+ break;
+ case ATTR_ALL_UNSET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ RA = ATTR_NONE;
+ break;
+ case ATTR_MIXED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ StartBit = BitIndex;
+ RA = ATTR_MIXED;
+ break;
+ default:
+ assert(0 && "Unexpected bitAttr!");
+ }
+ break;
+ case ATTR_MIXED:
+ switch (bitAttr) {
+ case ATTR_FILTERED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ StartBit = BitIndex;
+ RA = ATTR_NONE;
+ break;
+ case ATTR_ALL_SET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ StartBit = BitIndex;
+ RA = ATTR_ALL_SET;
+ break;
+ case ATTR_ALL_UNSET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ RA = ATTR_NONE;
+ break;
+ case ATTR_MIXED:
+ break;
+ default:
+ assert(0 && "Unexpected bitAttr!");
+ }
+ break;
+ case ATTR_ALL_UNSET:
+ assert(0 && "regionAttr state machine has no ATTR_UNSET state");
+ case ATTR_FILTERED:
+ assert(0 && "regionAttr state machine has no ATTR_FILTERED state");
+ }
+ }
+
+ // At the end, if we're still in ALL_SET or MIXED states, report a region
+ switch (RA) {
+ case ATTR_NONE:
+ break;
+ case ATTR_FILTERED:
+ break;
+ case ATTR_ALL_SET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ break;
+ case ATTR_ALL_UNSET:
+ break;
+ case ATTR_MIXED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ break;
+ }
+
+ // We have finished with the filter processings. Now it's time to choose
+ // the best performing filter.
+ BestIndex = 0;
+ bool AllUseless = true;
+ unsigned BestScore = 0;
+
+ for (unsigned i = 0, e = Filters.size(); i != e; ++i) {
+ unsigned Usefulness = Filters[i].usefulness();
+
+ if (Usefulness)
+ AllUseless = false;
+
+ if (Usefulness > BestScore) {
+ BestIndex = i;
+ BestScore = Usefulness;
+ }
+ }
+
+ if (!AllUseless)
+ bestFilter().recurse();
+
+ return !AllUseless;
+} // end of FilterChooser::filterProcessor(bool)
+
+// Decides on the best configuration of filter(s) to use in order to decode
+// the instructions. A conflict of instructions may occur, in which case we
+// dump the conflict set to the standard error.
+void FilterChooser::doFilter() {
+ unsigned Num = Opcodes.size();
+ assert(Num && "FilterChooser created with no instructions");
+
+ // Heuristics: Use Inst{31-28} as the top level filter for ARM ISA.
+ if (TargetName == TARGET_ARM && Parent == NULL) {
+ runSingleFilter(*this, 28, 4, false);
+ return;
+ }
+
+ // Try regions of consecutive known bit values first.
+ if (filterProcessor(false))
+ return;
+
+ // Then regions of mixed bits (both known and unitialized bit values allowed).
+ if (filterProcessor(true))
+ return;
+
+ // Heuristics to cope with conflict set {t2CMPrs, t2SUBSrr, t2SUBSrs} where
+ // no single instruction for the maximum ATTR_MIXED region Inst{14-4} has a
+ // well-known encoding pattern. In such case, we backtrack and scan for the
+ // the very first consecutive ATTR_ALL_SET region and assign a filter to it.
+ if (Num == 3 && filterProcessor(true, false))
+ return;
+
+ // If we come to here, the instruction decoding has failed.
+ // Set the BestIndex to -1 to indicate so.
+ BestIndex = -1;
+}
+
+// Emits code to decode our share of instructions. Returns true if the
+// emitted code causes a return, which occurs if we know how to decode
+// the instruction at this level or the instruction is not decodeable.
+bool FilterChooser::emit(raw_ostream &o, unsigned &Indentation) {
+ if (Opcodes.size() == 1)
+ // There is only one instruction in the set, which is great!
+ // Call emitSingletonDecoder() to see whether there are any remaining
+ // encodings bits.
+ return emitSingletonDecoder(o, Indentation, Opcodes[0]);
+
+ // Choose the best filter to do the decodings!
+ if (BestIndex != -1) {
+ Filter &Best = bestFilter();
+ if (Best.getNumFiltered() == 1)
+ emitSingletonDecoder(o, Indentation, Best);
+ else
+ bestFilter().emit(o, Indentation);
+ return false;
+ }
+
+ // If we reach here, there is a conflict in decoding. Let's resolve the known
+ // conflicts!
+ if ((TargetName == TARGET_ARM || TargetName == TARGET_THUMB) &&
+ Opcodes.size() == 2) {
+ // Resolve the known conflict sets:
+ //
+ // 1. source registers are identical => VMOVDneon; otherwise => VORRd
+ // 2. source registers are identical => VMOVQ; otherwise => VORRq
+ // 3. LDR, LDRcp => return LDR for now.
+ // FIXME: How can we distinguish between LDR and LDRcp? Do we need to?
+ // 4. tLDM, tLDM_UPD => Rn = Inst{10-8}, reglist = Inst{7-0},
+ // wback = registers<Rn> = 0
+ // NOTE: (tLDM, tLDM_UPD) resolution must come before Advanced SIMD
+ // addressing mode resolution!!!
+ // 5. VLD[234]LN*/VST[234]LN* vs. VLD[234]LN*_UPD/VST[234]LN*_UPD conflicts
+ // are resolved returning the non-UPD versions of the instructions if the
+ // Rm field, i.e., Inst{3-0} is 0b1111. This is specified in A7.7.1
+ // Advanced SIMD addressing mode.
+ const std::string &name1 = nameWithID(Opcodes[0]);
+ const std::string &name2 = nameWithID(Opcodes[1]);
+ if ((name1 == "VMOVDneon" && name2 == "VORRd") ||
+ (name1 == "VMOVQ" && name2 == "VORRq")) {
+ // Inserting the opening curly brace for this case block.
+ --Indentation; --Indentation;
+ o.indent(Indentation) << "{\n";
+ ++Indentation; ++Indentation;
+
+ o.indent(Indentation)
+ << "field_t N = fieldFromInstruction(insn, 7, 1), "
+ << "M = fieldFromInstruction(insn, 5, 1);\n";
+ o.indent(Indentation)
+ << "field_t Vn = fieldFromInstruction(insn, 16, 4), "
+ << "Vm = fieldFromInstruction(insn, 0, 4);\n";
+ o.indent(Indentation)
+ << "return (N == M && Vn == Vm) ? "
+ << Opcodes[0] << " /* " << name1 << " */ : "
+ << Opcodes[1] << " /* " << name2 << " */ ;\n";
+
+ // Inserting the closing curly brace for this case block.
+ --Indentation; --Indentation;
+ o.indent(Indentation) << "}\n";
+ ++Indentation; ++Indentation;
+
+ return true;
+ }
+ if (name1 == "LDR" && name2 == "LDRcp") {
+ o.indent(Indentation)
+ << "return " << Opcodes[0]
+ << "; // Returning LDR for {LDR, LDRcp}\n";
+ return true;
+ }
+ if (name1 == "tLDM" && name2 == "tLDM_UPD") {
+ // Inserting the opening curly brace for this case block.
+ --Indentation; --Indentation;
+ o.indent(Indentation) << "{\n";
+ ++Indentation; ++Indentation;
+
+ o.indent(Indentation)
+ << "unsigned Rn = fieldFromInstruction(insn, 8, 3), "
+ << "list = fieldFromInstruction(insn, 0, 8);\n";
+ o.indent(Indentation)
+ << "return ((list >> Rn) & 1) == 0 ? "
+ << Opcodes[1] << " /* " << name2 << " */ : "
+ << Opcodes[0] << " /* " << name1 << " */ ;\n";
+
+ // Inserting the closing curly brace for this case block.
+ --Indentation; --Indentation;
+ o.indent(Indentation) << "}\n";
+ ++Indentation; ++Indentation;
+
+ return true;
+ }
+ if (sameStringExceptSuffix(name1, name2, "_UPD")) {
+ o.indent(Indentation)
+ << "return fieldFromInstruction(insn, 0, 4) == 15 ? " << Opcodes[0]
+ << " /* " << name1 << " */ : " << Opcodes[1] << "/* " << name2
+ << " */ ; // Advanced SIMD addressing mode\n";
+ return true;
+ }
+
+ // Otherwise, it does not belong to the known conflict sets.
+ }
+
+ // We don't know how to decode these instructions! Return 0 and dump the
+ // conflict set!
+ o.indent(Indentation) << "return 0;" << " // Conflict set: ";
+ for (int i = 0, N = Opcodes.size(); i < N; ++i) {
+ o << nameWithID(Opcodes[i]);
+ if (i < (N - 1))
+ o << ", ";
+ else
+ o << '\n';
+ }
+
+ // Print out useful conflict information for postmortem analysis.
+ errs() << "Decoding Conflict:\n";
+
+ dumpStack(errs(), "\t\t");
+
+ for (unsigned i = 0; i < Opcodes.size(); i++) {
+ const std::string &Name = nameWithID(Opcodes[i]);
+
+ errs() << '\t' << Name << " ";
+ dumpBits(errs(),
+ getBitsField(*AllInstructions[Opcodes[i]]->TheDef, "Inst"));
+ errs() << '\n';
+ }
+
+ return true;
+}
+
+
+////////////////////////////////////////////
+// //
+// ARMDEBackend //
+// (Helper class for ARMDecoderEmitter) //
+// //
+////////////////////////////////////////////
+
+class ARMDecoderEmitter::ARMDEBackend {
+public:
+ ARMDEBackend(ARMDecoderEmitter &frontend) :
+ NumberedInstructions(),
+ Opcodes(),
+ Frontend(frontend),
+ Target(),
+ FC(NULL)
+ {
+ if (Target.getName() == "ARM")
+ TargetName = TARGET_ARM;
+ else {
+ errs() << "Target name " << Target.getName() << " not recognized\n";
+ assert(0 && "Unknown target");
+ }
+
+ // Populate the instructions for our TargetName.
+ populateInstructions();
+ }
+
+ ~ARMDEBackend() {
+ if (FC) {
+ delete FC;
+ FC = NULL;
+ }
+ }
+
+ void getInstructionsByEnumValue(std::vector<const CodeGenInstruction*>
+ &NumberedInstructions) {
+ // We must emit the PHI opcode first...
+ std::string Namespace = Target.getInstNamespace();
+ assert(!Namespace.empty() && "No instructions defined.");
+
+ NumberedInstructions = Target.getInstructionsByEnumValue();
+ }
+
+ bool populateInstruction(const CodeGenInstruction &CGI, TARGET_NAME_t TN);
+
+ void populateInstructions();
+
+ // Emits disassembler code for instruction decoding. This delegates to the
+ // FilterChooser instance to do the heavy lifting.
+ void emit(raw_ostream &o);
+
+protected:
+ std::vector<const CodeGenInstruction*> NumberedInstructions;
+ std::vector<unsigned> Opcodes;
+ // Special case for the ARM chip, which supports ARM and Thumb ISAs.
+ // Opcodes2 will be populated with the Thumb opcodes.
+ std::vector<unsigned> Opcodes2;
+ ARMDecoderEmitter &Frontend;
+ CodeGenTarget Target;
+ FilterChooser *FC;
+
+ TARGET_NAME_t TargetName;
+};
+
+bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
+ const CodeGenInstruction &CGI, TARGET_NAME_t TN) {
+ const Record &Def = *CGI.TheDef;
+ const StringRef Name = Def.getName();
+ uint8_t Form = getByteField(Def, "Form");
+
+ BitsInit &Bits = getBitsField(Def, "Inst");
+
+ // If all the bit positions are not specified; do not decode this instruction.
+ // We are bound to fail! For proper disassembly, the well-known encoding bits
+ // of the instruction must be fully specified.
+ //
+ // This also removes pseudo instructions from considerations of disassembly,
+ // which is a better design and less fragile than the name matchings.
+ if (Bits.allInComplete()) return false;
+
+ if (TN == TARGET_ARM) {
+ // FIXME: what about Int_MemBarrierV6 and Int_SyncBarrierV6?
+ if ((Name != "Int_MemBarrierV7" && Name != "Int_SyncBarrierV7") &&
+ Form == ARM_FORMAT_PSEUDO)
+ return false;
+ if (thumbInstruction(Form))
+ return false;
+ if (Name.find("CMPz") != std::string::npos /* ||
+ Name.find("CMNz") != std::string::npos */)
+ return false;
+
+ // Ignore pseudo instructions.
+ if (Name == "BXr9" || Name == "BMOVPCRX" || Name == "BMOVPCRXr9")
+ return false;
+
+ // Tail calls are other patterns that generate existing instructions.
+ if (Name == "TCRETURNdi" || Name == "TCRETURNdiND" ||
+ Name == "TCRETURNri" || Name == "TCRETURNriND" ||
+ Name == "TAILJMPd" || Name == "TAILJMPdt" ||
+ Name == "TAILJMPdND" || Name == "TAILJMPdNDt" ||
+ Name == "TAILJMPr" || Name == "TAILJMPrND" ||
+ Name == "MOVr_TC")
+ return false;
+
+ // VLDMQ/VSTMQ can be handled with the more generic VLDMD/VSTMD.
+ if (Name == "VLDMQ" || Name == "VLDMQ_UPD" ||
+ Name == "VSTMQ" || Name == "VSTMQ_UPD")
+ return false;
+
+ //
+ // The following special cases are for conflict resolutions.
+ //
+
+ // NEON NLdStFrm conflict resolutions:
+ //
+ // 1. Ignore suffix "odd" and "odd_UPD", prefer the "even" register-
+ // numbered ones which have the same Asm format string.
+ // 2. Ignore VST2d64_UPD, which conflicts with VST1q64_UPD.
+ // 3. Ignore VLD2d64_UPD, which conflicts with VLD1q64_UPD.
+ // 4. Ignore VLD1q[_UPD], which conflicts with VLD1q64[_UPD].
+ // 5. Ignore VST1q[_UPD], which conflicts with VST1q64[_UPD].
+ if (Name.endswith("odd") || Name.endswith("odd_UPD") ||
+ Name == "VST2d64_UPD" || Name == "VLD2d64_UPD" ||
+ Name == "VLD1q" || Name == "VLD1q_UPD" ||
+ Name == "VST1q" || Name == "VST1q_UPD")
+ return false;
+
+ // RSCSri and RSCSrs set the 's' bit, but are not predicated. We are
+ // better off using the generic RSCri and RSCrs instructions.
+ if (Name == "RSCSri" || Name == "RSCSrs") return false;
+
+ // MOVCCr, MOVCCs, MOVCCi, FCYPScc, FCYPDcc, FNEGScc, and FNEGDcc are used
+ // in the compiler to implement conditional moves. We can ignore them in
+ // favor of their more generic versions of instructions.
+ // See also SDNode *ARMDAGToDAGISel::Select(SDValue Op).
+ if (Name == "MOVCCr" || Name == "MOVCCs" || Name == "MOVCCi" ||
+ Name == "FCPYScc" || Name == "FCPYDcc" ||
+ Name == "FNEGScc" || Name == "FNEGDcc")
+ return false;
+
+ // Ditto for VMOVDcc, VMOVScc, VNEGDcc, and VNEGScc.
+ if (Name == "VMOVDcc" || Name == "VMOVScc" || Name == "VNEGDcc" ||
+ Name == "VNEGScc")
+ return false;
+
+ // Ignore the *_sfp instructions when decoding. They are used by the
+ // compiler to implement scalar floating point operations using vector
+ // operations in order to work around some performance issues.
+ if (Name.find("_sfp") != std::string::npos) return false;
+
+ // LDM_RET is a special case of LDM (Load Multiple) where the registers
+ // loaded include the PC, causing a branch to a loaded address. Ignore
+ // the LDM_RET instruction when decoding.
+ if (Name == "LDM_RET") return false;
+
+ // Bcc is in a more generic form than B. Ignore B when decoding.
+ if (Name == "B") return false;
+
+ // Ignore the non-Darwin BL instructions and the TPsoft (TLS) instruction.
+ if (Name == "BL" || Name == "BL_pred" || Name == "BLX" || Name == "BX" ||
+ Name == "TPsoft")
+ return false;
+
+ // Ignore VDUPf[d|q] instructions known to conflict with VDUP32[d-q] for
+ // decoding. The instruction duplicates an element from an ARM core
+ // register into every element of the destination vector. There is no
+ // distinction between data types.
+ if (Name == "VDUPfd" || Name == "VDUPfq") return false;
+
+ // A8-598: VEXT
+ // Vector Extract extracts elements from the bottom end of the second
+ // operand vector and the top end of the first, concatenates them and
+ // places the result in the destination vector. The elements of the
+ // vectors are treated as being 8-bit bitfields. There is no distinction
+ // between data types. The size of the operation can be specified in
+ // assembler as vext.size. If the value is 16, 32, or 64, the syntax is
+ // a pseudo-instruction for a VEXT instruction specifying the equivalent
+ // number of bytes.
+ //
+ // Variants VEXTd16, VEXTd32, VEXTd8, and VEXTdf are reduced to VEXTd8;
+ // variants VEXTq16, VEXTq32, VEXTq8, and VEXTqf are reduced to VEXTq8.
+ if (Name == "VEXTd16" || Name == "VEXTd32" || Name == "VEXTdf" ||
+ Name == "VEXTq16" || Name == "VEXTq32" || Name == "VEXTqf")
+ return false;
+
+ // Vector Reverse is similar to Vector Extract. There is no distinction
+ // between data types, other than size.
+ //
+ // VREV64df is equivalent to VREV64d32.
+ // VREV64qf is equivalent to VREV64q32.
+ if (Name == "VREV64df" || Name == "VREV64qf") return false;
+
+ // VDUPLNfd is equivalent to VDUPLN32d; VDUPfdf is specialized VDUPLN32d.
+ // VDUPLNfq is equivalent to VDUPLN32q; VDUPfqf is specialized VDUPLN32q.
+ // VLD1df is equivalent to VLD1d32.
+ // VLD1qf is equivalent to VLD1q32.
+ // VLD2d64 is equivalent to VLD1q64.
+ // VST1df is equivalent to VST1d32.
+ // VST1qf is equivalent to VST1q32.
+ // VST2d64 is equivalent to VST1q64.
+ if (Name == "VDUPLNfd" || Name == "VDUPfdf" ||
+ Name == "VDUPLNfq" || Name == "VDUPfqf" ||
+ Name == "VLD1df" || Name == "VLD1qf" || Name == "VLD2d64" ||
+ Name == "VST1df" || Name == "VST1qf" || Name == "VST2d64")
+ return false;
+ } else if (TN == TARGET_THUMB) {
+ if (!thumbInstruction(Form))
+ return false;
+
+ // On Darwin R9 is call-clobbered. Ignore the non-Darwin counterparts.
+ if (Name == "tBL" || Name == "tBLXi" || Name == "tBLXr")
+ return false;
+
+ // Ignore the TPsoft (TLS) instructions, which conflict with tBLr9.
+ if (Name == "tTPsoft" || Name == "t2TPsoft")
+ return false;
+
+ // Ignore tLEApcrel and tLEApcrelJT, prefer tADDrPCi.
+ if (Name == "tLEApcrel" || Name == "tLEApcrelJT")
+ return false;
+
+ // Ignore t2LEApcrel, prefer the generic t2ADD* for disassembly printing.
+ if (Name == "t2LEApcrel")
+ return false;
+
+ // Ignore tADDrSP, tADDspr, and tPICADD, prefer the generic tADDhirr.
+ // Ignore t2SUBrSPs, prefer the t2SUB[S]r[r|s].
+ // Ignore t2ADDrSPs, prefer the t2ADD[S]r[r|s].
+ // Ignore t2ADDrSPi/t2SUBrSPi, which have more generic couterparts.
+ // Ignore t2ADDrSPi12/t2SUBrSPi12, which have more generic couterparts
+ if (Name == "tADDrSP" || Name == "tADDspr" || Name == "tPICADD" ||
+ Name == "t2SUBrSPs" || Name == "t2ADDrSPs" ||
+ Name == "t2ADDrSPi" || Name == "t2SUBrSPi" ||
+ Name == "t2ADDrSPi12" || Name == "t2SUBrSPi12")
+ return false;
+
+ // Ignore t2LDRDpci, prefer the generic t2LDRDi8, t2LDRD_PRE, t2LDRD_POST.
+ if (Name == "t2LDRDpci")
+ return false;
+
+ // Ignore t2TBB, t2TBH and prefer the generic t2TBBgen, t2TBHgen.
+ if (Name == "t2TBB" || Name == "t2TBH")
+ return false;
+
+ // Resolve conflicts:
+ //
+ // tBfar conflicts with tBLr9
+ // tCMNz conflicts with tCMN (with assembly format strings being equal)
+ // tPOP_RET/t2LDM_RET conflict with tPOP/t2LDM (ditto)
+ // tMOVCCi conflicts with tMOVi8
+ // tMOVCCr conflicts with tMOVgpr2gpr
+ // tBR_JTr conflicts with tBRIND
+ // tSpill conflicts with tSTRspi
+ // tLDRcp conflicts with tLDRspi
+ // tRestore conflicts with tLDRspi
+ // t2LEApcrelJT conflicts with t2LEApcrel
+ if (Name == "tBfar" ||
+ /* Name == "tCMNz" || */ Name == "tCMPzi8" || Name == "tCMPzr" ||
+ Name == "tCMPzhir" || /* Name == "t2CMNzrr" || Name == "t2CMNzrs" ||
+ Name == "t2CMNzri" || */ Name == "t2CMPzrr" || Name == "t2CMPzrs" ||
+ Name == "t2CMPzri" || Name == "tPOP_RET" || Name == "t2LDM_RET" ||
+ Name == "tMOVCCi" || Name == "tMOVCCr" || Name == "tBR_JTr" ||
+ Name == "tSpill" || Name == "tLDRcp" || Name == "tRestore" ||
+ Name == "t2LEApcrelJT")
+ return false;
+ }
+
+ // Dumps the instruction encoding format.
+ switch (TargetName) {
+ case TARGET_ARM:
+ case TARGET_THUMB:
+ DEBUG(errs() << Name << " " << stringForARMFormat((ARMFormat)Form));
+ break;
+ }
+
+ DEBUG({
+ errs() << " ";
+
+ // Dumps the instruction encoding bits.
+ dumpBits(errs(), Bits);
+
+ errs() << '\n';
+
+ // Dumps the list of operand info.
+ for (unsigned i = 0, e = CGI.OperandList.size(); i != e; ++i) {
+ CodeGenInstruction::OperandInfo Info = CGI.OperandList[i];
+ const std::string &OperandName = Info.Name;
+ const Record &OperandDef = *Info.Rec;
+
+ errs() << "\t" << OperandName << " (" << OperandDef.getName() << ")\n";
+ }
+ });
+
+ return true;
+}
+
+void ARMDecoderEmitter::ARMDEBackend::populateInstructions() {
+ getInstructionsByEnumValue(NumberedInstructions);
+
+ uint16_t numUIDs = NumberedInstructions.size();
+ uint16_t uid;
+
+ const char *instClass = NULL;
+
+ switch (TargetName) {
+ case TARGET_ARM:
+ instClass = "InstARM";
+ break;
+ default:
+ assert(0 && "Unreachable code!");
+ }
+
+ for (uid = 0; uid < numUIDs; uid++) {
+ // filter out intrinsics
+ if (!NumberedInstructions[uid]->TheDef->isSubClassOf(instClass))
+ continue;
+
+ if (populateInstruction(*NumberedInstructions[uid], TargetName))
+ Opcodes.push_back(uid);
+ }
+
+ // Special handling for the ARM chip, which supports two modes of execution.
+ // This branch handles the Thumb opcodes.
+ if (TargetName == TARGET_ARM) {
+ for (uid = 0; uid < numUIDs; uid++) {
+ // filter out intrinsics
+ if (!NumberedInstructions[uid]->TheDef->isSubClassOf("InstARM")
+ && !NumberedInstructions[uid]->TheDef->isSubClassOf("InstThumb"))
+ continue;
+
+ if (populateInstruction(*NumberedInstructions[uid], TARGET_THUMB))
+ Opcodes2.push_back(uid);
+ }
+ }
+}
+
+// Emits disassembler code for instruction decoding. This delegates to the
+// FilterChooser instance to do the heavy lifting.
+void ARMDecoderEmitter::ARMDEBackend::emit(raw_ostream &o) {
+ switch (TargetName) {
+ case TARGET_ARM:
+ Frontend.EmitSourceFileHeader("ARM/Thumb Decoders", o);
+ break;
+ default:
+ assert(0 && "Unreachable code!");
+ }
+
+ o << "#include \"llvm/System/DataTypes.h\"\n";
+ o << "#include <assert.h>\n";
+ o << '\n';
+ o << "namespace llvm {\n\n";
+
+ FilterChooser::setTargetName(TargetName);
+
+ switch (TargetName) {
+ case TARGET_ARM: {
+ // Emit common utility and ARM ISA decoder.
+ FC = new FilterChooser(NumberedInstructions, Opcodes);
+ // Reset indentation level.
+ unsigned Indentation = 0;
+ FC->emitTop(o, Indentation);
+ delete FC;
+
+ // Emit Thumb ISA decoder as well.
+ FilterChooser::setTargetName(TARGET_THUMB);
+ FC = new FilterChooser(NumberedInstructions, Opcodes2);
+ // Reset indentation level.
+ Indentation = 0;
+ FC->emitBot(o, Indentation);
+ break;
+ }
+ default:
+ assert(0 && "Unreachable code!");
+ }
+
+ o << "\n} // End llvm namespace \n";
+}
+
+/////////////////////////
+// Backend interface //
+/////////////////////////
+
+void ARMDecoderEmitter::initBackend()
+{
+ Backend = new ARMDEBackend(*this);
+}
+
+void ARMDecoderEmitter::run(raw_ostream &o)
+{
+ Backend->emit(o);
+}
+
+void ARMDecoderEmitter::shutdownBackend()
+{
+ delete Backend;
+ Backend = NULL;
+}
diff --git a/libclamav/c++/llvm/utils/TableGen/ARMDecoderEmitter.h b/libclamav/c++/llvm/utils/TableGen/ARMDecoderEmitter.h
new file mode 100644
index 0000000..571a947
--- /dev/null
+++ b/libclamav/c++/llvm/utils/TableGen/ARMDecoderEmitter.h
@@ -0,0 +1,50 @@
+//===------------ ARMDecoderEmitter.h - Decoder Generator -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the ARM Disassembler.
+// It contains the tablegen backend declaration ARMDecoderEmitter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ARMDECODEREMITTER_H
+#define ARMDECODEREMITTER_H
+
+#include "TableGenBackend.h"
+
+#include "llvm/System/DataTypes.h"
+
+namespace llvm {
+
+class ARMDecoderEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+public:
+ ARMDecoderEmitter(RecordKeeper &R) : Records(R) {
+ initBackend();
+ }
+
+ ~ARMDecoderEmitter() {
+ shutdownBackend();
+ }
+
+ // run - Output the code emitter
+ void run(raw_ostream &o);
+
+private:
+ // Helper class for ARMDecoderEmitter.
+ class ARMDEBackend;
+
+ ARMDEBackend *Backend;
+
+ void initBackend();
+ void shutdownBackend();
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index b823e57..5583986 100644
--- a/libclamav/c++/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -199,6 +199,14 @@ static void TokenizeAsmString(StringRef AsmString,
break;
}
+ case '.':
+ if (InTok) {
+ Tokens.push_back(AsmString.slice(Prev, i));
+ }
+ Prev = i;
+ InTok = true;
+ break;
+
default:
InTok = true;
}
@@ -260,9 +268,12 @@ static bool IsAssemblerInstruction(StringRef Name,
}
if (Tokens[i][0] == '$' && !OperandNames.insert(Tokens[i]).second) {
- std::string Err = "'" + Name.str() + "': " +
- "invalid assembler instruction; tied operand '" + Tokens[i].str() + "'";
- throw TGError(CGI.TheDef->getLoc(), Err);
+ DEBUG({
+ errs() << "warning: '" << Name << "': "
+ << "ignoring instruction with tied operand '"
+ << Tokens[i].str() << "'\n";
+ });
+ return false;
}
}
@@ -271,6 +282,8 @@ static bool IsAssemblerInstruction(StringRef Name,
namespace {
+struct SubtargetFeatureInfo;
+
/// ClassInfo - Helper class for storing the information about a particular
/// class of operands which can be matched.
struct ClassInfo {
@@ -388,6 +401,9 @@ public:
/// operator< - Compare two classes.
bool operator<(const ClassInfo &RHS) const {
+ if (this == &RHS)
+ return false;
+
// Unrelated classes can be ordered by kind.
if (!isRelatedTo(RHS))
return Kind < RHS.Kind;
@@ -403,7 +419,13 @@ public:
default:
// This class preceeds the RHS if it is a proper subset of the RHS.
- return this != &RHS && isSubsetOf(RHS);
+ if (isSubsetOf(RHS))
+ return true;
+ if (RHS.isSubsetOf(*this))
+ return false;
+
+ // Otherwise, order by name to ensure we have a total ordering.
+ return ValueName < RHS.ValueName;
}
}
};
@@ -435,6 +457,9 @@ struct InstructionInfo {
/// Operands - The operands that this instruction matches.
SmallVector<Operand, 4> Operands;
+ /// Predicates - The required subtarget features to match this instruction.
+ SmallVector<SubtargetFeatureInfo*, 4> RequiredFeatures;
+
/// ConversionFnKind - The enum value which is passed to the generated
/// ConvertToMCInst to convert parsed operands into an MCInst for this
/// function.
@@ -496,6 +521,19 @@ public:
void dump();
};
+/// SubtargetFeatureInfo - Helper class for storing information on a subtarget
+/// feature which participates in instruction matching.
+struct SubtargetFeatureInfo {
+ /// \brief The predicate record for this feature.
+ Record *TheDef;
+
+ /// \brief An unique index assigned to represent this feature.
+ unsigned Index;
+
+ /// \brief The name of the enumerated constant identifying this feature.
+ std::string EnumName;
+};
+
class AsmMatcherInfo {
public:
/// The tablegen AsmParser record.
@@ -516,6 +554,9 @@ public:
/// Map of Register records to their class information.
std::map<Record*, ClassInfo*> RegisterClasses;
+ /// Map of Predicate records to their subtarget information.
+ std::map<Record*, SubtargetFeatureInfo*> SubtargetFeatures;
+
private:
/// Map of token to class information which has already been constructed.
std::map<std::string, ClassInfo*> TokenClasses;
@@ -534,6 +575,23 @@ private:
ClassInfo *getOperandClass(StringRef Token,
const CodeGenInstruction::OperandInfo &OI);
+ /// getSubtargetFeature - Lookup or create the subtarget feature info for the
+ /// given operand.
+ SubtargetFeatureInfo *getSubtargetFeature(Record *Def) {
+ assert(Def->isSubClassOf("Predicate") && "Invalid predicate type!");
+
+ SubtargetFeatureInfo *&Entry = SubtargetFeatures[Def];
+ if (!Entry) {
+ Entry = new SubtargetFeatureInfo;
+ Entry->TheDef = Def;
+ Entry->Index = SubtargetFeatures.size() - 1;
+ Entry->EnumName = "Feature_" + Def->getName();
+ assert(Entry->Index < 32 && "Too many subtarget features!");
+ }
+
+ return Entry;
+ }
+
/// BuildRegisterClasses - Build the ClassInfo* instances for register
/// classes.
void BuildRegisterClasses(CodeGenTarget &Target,
@@ -794,15 +852,19 @@ void AsmMatcherInfo::BuildOperandClasses(CodeGenTarget &Target) {
ClassInfo *CI = AsmOperandClasses[*it];
CI->Kind = ClassInfo::UserClass0 + Index;
- Init *Super = (*it)->getValueInit("SuperClass");
- if (DefInit *DI = dynamic_cast<DefInit*>(Super)) {
+ ListInit *Supers = (*it)->getValueAsListInit("SuperClasses");
+ for (unsigned i = 0, e = Supers->getSize(); i != e; ++i) {
+ DefInit *DI = dynamic_cast<DefInit*>(Supers->getElement(i));
+ if (!DI) {
+ PrintError((*it)->getLoc(), "Invalid super class reference!");
+ continue;
+ }
+
ClassInfo *SC = AsmOperandClasses[DI->getDef()];
if (!SC)
PrintError((*it)->getLoc(), "Invalid super class reference!");
else
CI->SuperClasses.push_back(SC);
- } else {
- assert(dynamic_cast<UnsetInit*>(Super) && "Unexpected SuperClass field!");
}
CI->ClassName = (*it)->getValueAsString("Name");
CI->Name = "MCK_" + CI->ClassName;
@@ -844,19 +906,20 @@ void AsmMatcherInfo::BuildInfo(CodeGenTarget &Target) {
// Parse the instructions; we need to do this first so that we can gather the
// singleton register classes.
std::set<std::string> SingletonRegisterNames;
- for (std::map<std::string, CodeGenInstruction>::const_iterator
- it = Target.getInstructions().begin(),
- ie = Target.getInstructions().end();
- it != ie; ++it) {
- const CodeGenInstruction &CGI = it->second;
+
+ const std::vector<const CodeGenInstruction*> &InstrList =
+ Target.getInstructionsByEnumValue();
+
+ for (unsigned i = 0, e = InstrList.size(); i != e; ++i) {
+ const CodeGenInstruction &CGI = *InstrList[i];
- if (!StringRef(it->first).startswith(MatchPrefix))
+ if (!StringRef(CGI.TheDef->getName()).startswith(MatchPrefix))
continue;
- OwningPtr<InstructionInfo> II(new InstructionInfo);
+ OwningPtr<InstructionInfo> II(new InstructionInfo());
- II->InstrName = it->first;
- II->Instr = &it->second;
+ II->InstrName = CGI.TheDef->getName();
+ II->Instr = &CGI;
II->AsmString = FlattenVariants(CGI.AsmString, 0);
// Remove comments from the asm string.
@@ -869,7 +932,7 @@ void AsmMatcherInfo::BuildInfo(CodeGenTarget &Target) {
TokenizeAsmString(II->AsmString, II->Tokens);
// Ignore instructions which shouldn't be matched.
- if (!IsAssemblerInstruction(it->first, CGI, II->Tokens))
+ if (!IsAssemblerInstruction(CGI.TheDef->getName(), CGI, II->Tokens))
continue;
// Collect singleton registers, if used.
@@ -889,7 +952,31 @@ void AsmMatcherInfo::BuildInfo(CodeGenTarget &Target) {
}
}
}
-
+
+ // Compute the require features.
+ ListInit *Predicates = CGI.TheDef->getValueAsListInit("Predicates");
+ for (unsigned i = 0, e = Predicates->getSize(); i != e; ++i) {
+ if (DefInit *Pred = dynamic_cast<DefInit*>(Predicates->getElement(i))) {
+ // Ignore OptForSize and OptForSpeed, they aren't really requirements,
+ // rather they are hints to isel.
+ //
+ // FIXME: Find better way to model this.
+ if (Pred->getDef()->getName() == "OptForSize" ||
+ Pred->getDef()->getName() == "OptForSpeed")
+ continue;
+
+ // FIXME: Total hack; for now, we just limit ourselves to In32BitMode
+ // and In64BitMode, because we aren't going to have the right feature
+ // masks for SSE and friends. We need to decide what we are going to do
+ // about CPU subtypes to implement this the right way.
+ if (Pred->getDef()->getName() != "In32BitMode" &&
+ Pred->getDef()->getName() != "In64BitMode")
+ continue;
+
+ II->RequiredFeatures.push_back(getSubtargetFeature(Pred->getDef()));
+ }
+ }
+
Instructions.push_back(II.take());
}
@@ -998,7 +1085,7 @@ static void EmitConvertToMCInst(CodeGenTarget &Target,
// Start the unified conversion function.
- CvtOS << "static bool ConvertToMCInst(ConversionKind Kind, MCInst &Inst, "
+ CvtOS << "static void ConvertToMCInst(ConversionKind Kind, MCInst &Inst, "
<< "unsigned Opcode,\n"
<< " const SmallVectorImpl<MCParsedAsmOperand*"
<< "> &Operands) {\n";
@@ -1155,13 +1242,12 @@ static void EmitConvertToMCInst(CodeGenTarget &Target,
}
}
- CvtOS << " break;\n";
+ CvtOS << " return;\n";
}
// Finish the convert function.
CvtOS << " }\n";
- CvtOS << " return false;\n";
CvtOS << "}\n\n";
// Finish the enum, and drop the convert function after it.
@@ -1486,6 +1572,48 @@ static void EmitMatchRegisterName(CodeGenTarget &Target, Record *AsmParser,
OS << "}\n\n";
}
+/// EmitSubtargetFeatureFlagEnumeration - Emit the subtarget feature flag
+/// definitions.
+static void EmitSubtargetFeatureFlagEnumeration(CodeGenTarget &Target,
+ AsmMatcherInfo &Info,
+ raw_ostream &OS) {
+ OS << "// Flags for subtarget features that participate in "
+ << "instruction matching.\n";
+ OS << "enum SubtargetFeatureFlag {\n";
+ for (std::map<Record*, SubtargetFeatureInfo*>::const_iterator
+ it = Info.SubtargetFeatures.begin(),
+ ie = Info.SubtargetFeatures.end(); it != ie; ++it) {
+ SubtargetFeatureInfo &SFI = *it->second;
+ OS << " " << SFI.EnumName << " = (1 << " << SFI.Index << "),\n";
+ }
+ OS << " Feature_None = 0\n";
+ OS << "};\n\n";
+}
+
+/// EmitComputeAvailableFeatures - Emit the function to compute the list of
+/// available features given a subtarget.
+static void EmitComputeAvailableFeatures(CodeGenTarget &Target,
+ AsmMatcherInfo &Info,
+ raw_ostream &OS) {
+ std::string ClassName =
+ Info.AsmParser->getValueAsString("AsmParserClassName");
+
+ OS << "unsigned " << Target.getName() << ClassName << "::\n"
+ << "ComputeAvailableFeatures(const " << Target.getName()
+ << "Subtarget *Subtarget) const {\n";
+ OS << " unsigned Features = 0;\n";
+ for (std::map<Record*, SubtargetFeatureInfo*>::const_iterator
+ it = Info.SubtargetFeatures.begin(),
+ ie = Info.SubtargetFeatures.end(); it != ie; ++it) {
+ SubtargetFeatureInfo &SFI = *it->second;
+ OS << " if (" << SFI.TheDef->getValueAsString("CondString")
+ << ")\n";
+ OS << " Features |= " << SFI.EnumName << ";\n";
+ }
+ OS << " return Features;\n";
+ OS << "}\n\n";
+}
+
void AsmMatcherEmitter::run(raw_ostream &OS) {
CodeGenTarget Target;
Record *AsmParser = Target.getAsmParser();
@@ -1537,6 +1665,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
EmitSourceFileHeader("Assembly Matcher Source Fragment", OS);
+ // Emit the subtarget feature enumeration.
+ EmitSubtargetFeatureFlagEnumeration(Target, Info, OS);
+
// Emit the function to match a register name to number.
EmitMatchRegisterName(Target, AsmParser, OS);
@@ -1557,6 +1688,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
// Emit the subclass predicate routine.
EmitIsSubclass(Target, Info.Classes, OS);
+ // Emit the available features compute function.
+ EmitComputeAvailableFeatures(Target, Info, OS);
+
// Finally, build the match function.
size_t MaxNumOperands = 0;
@@ -1564,10 +1698,11 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
Info.Instructions.begin(), ie = Info.Instructions.end();
it != ie; ++it)
MaxNumOperands = std::max(MaxNumOperands, (*it)->Operands.size());
-
- OS << "bool " << Target.getName() << ClassName
- << "::\nMatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> "
- "&Operands,\n MCInst &Inst) {\n";
+
+ OS << "bool " << Target.getName() << ClassName << "::\n"
+ << "MatchInstructionImpl(const SmallVectorImpl<MCParsedAsmOperand*>"
+ << " &Operands,\n";
+ OS << " MCInst &Inst) {\n";
// Emit the static match table; unused classes get initalized to 0 which is
// guaranteed to be InvalidMatchClass.
@@ -1583,6 +1718,7 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " unsigned Opcode;\n";
OS << " ConversionKind ConvertFn;\n";
OS << " MatchClassKind Classes[" << MaxNumOperands << "];\n";
+ OS << " unsigned RequiredFeatures;\n";
OS << " } MatchTable[" << Info.Instructions.size() << "] = {\n";
for (std::vector<InstructionInfo*>::const_iterator it =
@@ -1598,11 +1734,27 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
if (i) OS << ", ";
OS << Op.Class->Name;
}
- OS << " } },\n";
+ OS << " }, ";
+
+ // Write the required features mask.
+ if (!II.RequiredFeatures.empty()) {
+ for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) {
+ if (i) OS << "|";
+ OS << II.RequiredFeatures[i]->EnumName;
+ }
+ } else
+ OS << "0";
+
+ OS << "},\n";
}
OS << " };\n\n";
+
+ // Emit code to get the available features.
+ OS << " // Get the current feature set.\n";
+ OS << " unsigned AvailableFeatures = getAvailableFeatures();\n\n";
+
// Emit code to compute the class list for this operand vector.
OS << " // Eliminate obvious mismatches.\n";
OS << " if (Operands.size() > " << MaxNumOperands << ")\n";
@@ -1628,14 +1780,28 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
OS << " for (const MatchEntry *it = MatchTable, "
<< "*ie = MatchTable + " << Info.Instructions.size()
<< "; it != ie; ++it) {\n";
+
+ // Emit check that the required features are available.
+ OS << " if ((AvailableFeatures & it->RequiredFeatures) "
+ << "!= it->RequiredFeatures)\n";
+ OS << " continue;\n";
+
+ // Emit check that the subclasses match.
for (unsigned i = 0; i != MaxNumOperands; ++i) {
OS << " if (!IsSubclass(Classes["
<< i << "], it->Classes[" << i << "]))\n";
OS << " continue;\n";
}
OS << "\n";
- OS << " return ConvertToMCInst(it->ConvertFn, Inst, "
- << "it->Opcode, Operands);\n";
+ OS << " ConvertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
+
+ // Call the post-processing function, if used.
+ std::string InsnCleanupFn =
+ AsmParser->getValueAsString("AsmParserInstCleanup");
+ if (!InsnCleanupFn.empty())
+ OS << " " << InsnCleanupFn << "(Inst);\n";
+
+ OS << " return false;\n";
OS << " }\n\n";
OS << " return true;\n";
diff --git a/libclamav/c++/llvm/utils/TableGen/AsmWriterEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/AsmWriterEmitter.cpp
index 3a38dd4..23f13c2 100644
--- a/libclamav/c++/llvm/utils/TableGen/AsmWriterEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/AsmWriterEmitter.cpp
@@ -115,7 +115,7 @@ FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
const AsmWriterInst *Inst = getAsmWriterInstByID(i);
- if (Inst == 0) continue; // PHI, INLINEASM, DBG_LABEL, etc.
+ if (Inst == 0) continue; // PHI, INLINEASM, PROLOG_LABEL, etc.
std::string Command;
if (Inst->Operands.empty())
@@ -248,22 +248,22 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) {
"/// printInstruction - This method is automatically generated by tablegen\n"
"/// from the instruction set description.\n"
"void " << Target.getName() << ClassName
- << "::printInstruction(const MachineInstr *MI) {\n";
+ << "::printInstruction(const MachineInstr *MI, raw_ostream &O) {\n";
std::vector<AsmWriterInst> Instructions;
for (CodeGenTarget::inst_iterator I = Target.inst_begin(),
E = Target.inst_end(); I != E; ++I)
- if (!I->second.AsmString.empty() &&
- I->second.TheDef->getName() != "PHI")
+ if (!(*I)->AsmString.empty() &&
+ (*I)->TheDef->getName() != "PHI")
Instructions.push_back(
- AsmWriterInst(I->second,
+ AsmWriterInst(**I,
AsmWriter->getValueAsInt("Variant"),
AsmWriter->getValueAsInt("FirstOperandColumn"),
AsmWriter->getValueAsInt("OperandSpacing")));
// Get the instruction numbering.
- Target.getInstructionsByEnumValue(NumberedInstructions);
+ NumberedInstructions = Target.getInstructionsByEnumValue();
// Compute the CodeGenInstruction -> AsmWriterInst mapping. Note that not
// all machine instructions are necessarily being printed, so there may be
@@ -499,8 +499,8 @@ void AsmWriterEmitter::EmitGetInstructionName(raw_ostream &O) {
Record *AsmWriter = Target.getAsmWriter();
std::string ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
- std::vector<const CodeGenInstruction*> NumberedInstructions;
- Target.getInstructionsByEnumValue(NumberedInstructions);
+ const std::vector<const CodeGenInstruction*> &NumberedInstructions =
+ Target.getInstructionsByEnumValue();
StringToOffsetTable StringTable;
O <<
diff --git a/libclamav/c++/llvm/utils/TableGen/AsmWriterInst.cpp b/libclamav/c++/llvm/utils/TableGen/AsmWriterInst.cpp
index 508e453..b2228b0 100644
--- a/libclamav/c++/llvm/utils/TableGen/AsmWriterInst.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/AsmWriterInst.cpp
@@ -38,6 +38,7 @@ std::string AsmWriterOperand::getCode() const {
std::string Result = Str + "(MI";
if (MIOpNo != ~0U)
Result += ", " + utostr(MIOpNo);
+ Result += ", O";
if (!MiModifier.empty())
Result += ", \"" + MiModifier + '"';
return Result + "); ";
diff --git a/libclamav/c++/llvm/utils/TableGen/CMakeLists.txt b/libclamav/c++/llvm/utils/TableGen/CMakeLists.txt
index 881b50a..972989b 100644
--- a/libclamav/c++/llvm/utils/TableGen/CMakeLists.txt
+++ b/libclamav/c++/llvm/utils/TableGen/CMakeLists.txt
@@ -1,8 +1,11 @@
add_executable(tblgen
+ ARMDecoderEmitter.cpp
AsmMatcherEmitter.cpp
AsmWriterEmitter.cpp
AsmWriterInst.cpp
CallingConvEmitter.cpp
+ ClangASTNodesEmitter.cpp
+ ClangAttrEmitter.cpp
ClangDiagnosticsEmitter.cpp
CodeEmitterGen.cpp
CodeGenDAGPatterns.cpp
@@ -20,6 +23,7 @@ add_executable(tblgen
InstrInfoEmitter.cpp
IntrinsicEmitter.cpp
LLVMCConfigurationEmitter.cpp
+ NeonEmitter.cpp
OptParserEmitter.cpp
Record.cpp
RegisterInfoEmitter.cpp
@@ -37,6 +41,6 @@ target_link_libraries(tblgen LLVMSupport LLVMSystem)
if( MINGW )
target_link_libraries(tblgen imagehlp psapi)
endif( MINGW )
-if( LLVM_ENABLE_THREADS AND HAVE_LIBPTHREAD )
+if( LLVM_ENABLE_THREADS AND HAVE_LIBPTHREAD AND NOT BEOS )
target_link_libraries(tblgen pthread)
endif()
diff --git a/libclamav/c++/llvm/utils/TableGen/CallingConvEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/CallingConvEmitter.cpp
index 28ba2ed..7643609 100644
--- a/libclamav/c++/llvm/utils/TableGen/CallingConvEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/CallingConvEmitter.cpp
@@ -169,6 +169,8 @@ void CallingConvEmitter::EmitAction(Record *Action,
else
O << "\n" << IndentStr << " State.getTarget().getTargetData()"
"->getABITypeAlignment(LocVT.getTypeForEVT(State.getContext()))";
+ if (Action->isSubClassOf("CCAssignToStackWithShadow"))
+ O << ", " << getQualifiedName(Action->getValueAsDef("ShadowReg"));
O << ");\n" << IndentStr
<< "State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset"
<< Counter << ", LocVT, LocInfo));\n";
diff --git a/libclamav/c++/llvm/utils/TableGen/ClangASTNodesEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/ClangASTNodesEmitter.cpp
new file mode 100644
index 0000000..187ab46
--- /dev/null
+++ b/libclamav/c++/llvm/utils/TableGen/ClangASTNodesEmitter.cpp
@@ -0,0 +1,165 @@
+//=== ClangASTNodesEmitter.cpp - Generate Clang AST node tables -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang AST node tables
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangASTNodesEmitter.h"
+#include <set>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Statement Node Tables (.inc file) generation.
+//===----------------------------------------------------------------------===//
+
+// Returns the first and last non-abstract subrecords
+// Called recursively to ensure that nodes remain contiguous
+std::pair<Record *, Record *> ClangASTNodesEmitter::EmitNode(
+ const ChildMap &Tree,
+ raw_ostream &OS,
+ Record *Base) {
+ std::string BaseName = macroName(Base->getName());
+
+ ChildIterator i = Tree.lower_bound(Base), e = Tree.upper_bound(Base);
+
+ Record *First = 0, *Last = 0;
+ // This might be the pseudo-node for Stmt; don't assume it has an Abstract
+ // bit
+ if (Base->getValue("Abstract") && !Base->getValueAsBit("Abstract"))
+ First = Last = Base;
+
+ for (; i != e; ++i) {
+ Record *R = i->second;
+ bool Abstract = R->getValueAsBit("Abstract");
+ std::string NodeName = macroName(R->getName());
+
+ OS << "#ifndef " << NodeName << "\n";
+ OS << "# define " << NodeName << "(Type, Base) "
+ << BaseName << "(Type, Base)\n";
+ OS << "#endif\n";
+
+ if (Abstract)
+ OS << "ABSTRACT_" << macroName(Root.getName()) << "(" << NodeName << "("
+ << R->getName() << ", " << baseName(*Base) << "))\n";
+ else
+ OS << NodeName << "(" << R->getName() << ", "
+ << baseName(*Base) << ")\n";
+
+ if (Tree.find(R) != Tree.end()) {
+ const std::pair<Record *, Record *> &Result
+ = EmitNode(Tree, OS, R);
+ if (!First && Result.first)
+ First = Result.first;
+ if (Result.second)
+ Last = Result.second;
+ } else {
+ if (!Abstract) {
+ Last = R;
+
+ if (!First)
+ First = R;
+ }
+ }
+
+ OS << "#undef " << NodeName << "\n\n";
+ }
+
+ if (First) {
+ assert (Last && "Got a first node but not a last node for a range!");
+ if (Base == &Root)
+ OS << "LAST_" << macroName(Root.getName()) << "_RANGE(";
+ else
+ OS << macroName(Root.getName()) << "_RANGE(";
+ OS << Base->getName() << ", " << First->getName() << ", "
+ << Last->getName() << ")\n\n";
+ }
+
+ return std::make_pair(First, Last);
+}
+
+void ClangASTNodesEmitter::run(raw_ostream &OS) {
+ // Write the preamble
+ OS << "#ifndef ABSTRACT_" << macroName(Root.getName()) << "\n";
+ OS << "# define ABSTRACT_" << macroName(Root.getName()) << "(Type) Type\n";
+ OS << "#endif\n";
+
+ OS << "#ifndef " << macroName(Root.getName()) << "_RANGE\n";
+ OS << "# define "
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef LAST_" << macroName(Root.getName()) << "_RANGE\n";
+ OS << "# define LAST_"
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last) "
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last)\n";
+ OS << "#endif\n\n";
+
+ // Emit statements
+ const std::vector<Record*> Stmts
+ = Records.getAllDerivedDefinitions(Root.getName());
+
+ ChildMap Tree;
+
+ for (unsigned i = 0, e = Stmts.size(); i != e; ++i) {
+ Record *R = Stmts[i];
+
+ if (R->getValue("Base"))
+ Tree.insert(std::make_pair(R->getValueAsDef("Base"), R));
+ else
+ Tree.insert(std::make_pair(&Root, R));
+ }
+
+ EmitNode(Tree, OS, &Root);
+
+ OS << "#undef " << macroName(Root.getName()) << "\n";
+ OS << "#undef " << macroName(Root.getName()) << "_RANGE\n";
+ OS << "#undef LAST_" << macroName(Root.getName()) << "_RANGE\n";
+ OS << "#undef ABSTRACT_" << macroName(Root.getName()) << "\n";
+}
+
+void ClangDeclContextEmitter::run(raw_ostream &OS) {
+ // FIXME: Find a .td file format to allow for this to be represented better.
+
+ OS << "#ifndef DECL_CONTEXT\n";
+ OS << "# define DECL_CONTEXT(DECL)\n";
+ OS << "#endif\n";
+
+ OS << "#ifndef DECL_CONTEXT_BASE\n";
+ OS << "# define DECL_CONTEXT_BASE(DECL) DECL_CONTEXT(DECL)\n";
+ OS << "#endif\n";
+
+ typedef std::set<Record*> RecordSet;
+ typedef std::vector<Record*> RecordVector;
+
+ RecordVector DeclContextsVector
+ = Records.getAllDerivedDefinitions("DeclContext");
+ RecordVector Decls = Records.getAllDerivedDefinitions("Decl");
+ RecordSet DeclContexts (DeclContextsVector.begin(), DeclContextsVector.end());
+
+ for (RecordVector::iterator i = Decls.begin(), e = Decls.end(); i != e; ++i) {
+ Record *R = *i;
+
+ if (R->getValue("Base")) {
+ Record *B = R->getValueAsDef("Base");
+ if (DeclContexts.find(B) != DeclContexts.end()) {
+ OS << "DECL_CONTEXT_BASE(" << B->getName() << ")\n";
+ DeclContexts.erase(B);
+ }
+ }
+ }
+
+ for (RecordSet::iterator i = DeclContexts.begin(), e = DeclContexts.end();
+ i != e; ++i) {
+ OS << "DECL_CONTEXT(" << (*i)->getName() << ")\n";
+ }
+
+ OS << "#undef DECL_CONTEXT\n";
+ OS << "#undef DECL_CONTEXT_BASE\n";
+}
diff --git a/libclamav/c++/llvm/utils/TableGen/ClangASTNodesEmitter.h b/libclamav/c++/llvm/utils/TableGen/ClangASTNodesEmitter.h
new file mode 100644
index 0000000..abf9c9a
--- /dev/null
+++ b/libclamav/c++/llvm/utils/TableGen/ClangASTNodesEmitter.h
@@ -0,0 +1,84 @@
+//===- ClangASTNodesEmitter.h - Generate Clang AST node tables -*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang AST node tables
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGAST_EMITTER_H
+#define CLANGAST_EMITTER_H
+
+#include "TableGenBackend.h"
+#include "Record.h"
+#include <string>
+#include <cctype>
+#include <map>
+
+namespace llvm {
+
+/// ClangASTNodesEmitter - The top-level class emits .inc files containing
+/// declarations of Clang statements.
+///
+class ClangASTNodesEmitter : public TableGenBackend {
+ // A map from a node to each of its derived nodes.
+ typedef std::multimap<Record*, Record*> ChildMap;
+ typedef ChildMap::const_iterator ChildIterator;
+
+ RecordKeeper &Records;
+ Record Root;
+ const std::string &BaseSuffix;
+
+ // Create a macro-ized version of a name
+ static std::string macroName(std::string S) {
+ for (unsigned i = 0; i < S.size(); ++i)
+ S[i] = std::toupper(S[i]);
+
+ return S;
+ }
+
+ // Return the name to be printed in the base field. Normally this is
+ // the record's name plus the base suffix, but if it is the root node and
+ // the suffix is non-empty, it's just the suffix.
+ std::string baseName(Record &R) {
+ if (&R == &Root && !BaseSuffix.empty())
+ return BaseSuffix;
+
+ return R.getName() + BaseSuffix;
+ }
+
+ std::pair<Record *, Record *> EmitNode (const ChildMap &Tree, raw_ostream& OS,
+ Record *Base);
+public:
+ explicit ClangASTNodesEmitter(RecordKeeper &R, const std::string &N,
+ const std::string &S)
+ : Records(R), Root(N, SMLoc()), BaseSuffix(S)
+ {}
+
+ // run - Output the .inc file contents
+ void run(raw_ostream &OS);
+};
+
+/// ClangDeclContextEmitter - Emits an addendum to a .inc file to enumerate the
+/// clang declaration contexts.
+///
+class ClangDeclContextEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangDeclContextEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ // run - Output the .inc file contents
+ void run(raw_ostream &OS);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/utils/TableGen/ClangAttrEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/ClangAttrEmitter.cpp
new file mode 100644
index 0000000..8d3399a
--- /dev/null
+++ b/libclamav/c++/llvm/utils/TableGen/ClangAttrEmitter.cpp
@@ -0,0 +1,638 @@
+//===- ClangAttrEmitter.cpp - Generate Clang attribute handling =-*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang attribute processing code
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangAttrEmitter.h"
+#include "Record.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <algorithm>
+#include <cctype>
+
+using namespace llvm;
+
+static const std::vector<StringRef> getValueAsListOfStrings(Record &R,
+ StringRef FieldName) {
+ ListInit *List = R.getValueAsListInit(FieldName);
+ assert (List && "Got a null ListInit");
+
+ std::vector<StringRef> Strings;
+ Strings.reserve(List->getSize());
+
+ for (ListInit::iterator i = List->begin(), e = List->end(); i != e; ++i) {
+ assert(*i && "Got a null element in a ListInit");
+ if (StringInit *S = dynamic_cast<StringInit *>(*i))
+ Strings.push_back(S->getValue());
+ else if (CodeInit *C = dynamic_cast<CodeInit *>(*i))
+ Strings.push_back(C->getValue());
+ else
+ assert(false && "Got a non-string, non-code element in a ListInit");
+ }
+
+ return Strings;
+}
+
+std::string ReadPCHRecord(StringRef type) {
+ return StringSwitch<std::string>(type)
+ .EndsWith("Decl *", "cast_or_null<" + std::string(type, 0, type.size()-1) +
+ ">(GetDecl(Record[Idx++]))")
+ .Case("QualType", "ReadTypeRecord(Idx++)")
+ .Default("Record[Idx++]");
+}
+
+// Assumes that the way to get the value is SA->getname()
+std::string WritePCHRecord(StringRef type, StringRef name) {
+ return StringSwitch<std::string>(type)
+ .EndsWith("Decl *", "AddDeclRef(" + std::string(name) +
+ ", Record);\n")
+ .Case("QualType", "AddTypeRef(" + std::string(name) + ", Record);\n")
+ .Default("Record.push_back(" + std::string(name) + ");\n");
+}
+
+namespace {
+ class Argument {
+ std::string lowerName, upperName;
+ StringRef attrName;
+
+ public:
+ Argument(Record &Arg, StringRef Attr)
+ : lowerName(Arg.getValueAsString("Name")), upperName(lowerName),
+ attrName(Attr) {
+ if (!lowerName.empty()) {
+ lowerName[0] = std::tolower(lowerName[0]);
+ upperName[0] = std::toupper(upperName[0]);
+ }
+ }
+ virtual ~Argument() {}
+
+ StringRef getLowerName() const { return lowerName; }
+ StringRef getUpperName() const { return upperName; }
+ StringRef getAttrName() const { return attrName; }
+
+ // These functions print the argument contents formatted in different ways.
+ virtual void writeAccessors(raw_ostream &OS) const = 0;
+ virtual void writeAccessorDefinitions(raw_ostream &OS) const {}
+ virtual void writeCloneArgs(raw_ostream &OS) const = 0;
+ virtual void writeCtorBody(raw_ostream &OS) const {}
+ virtual void writeCtorInitializers(raw_ostream &OS) const = 0;
+ virtual void writeCtorParameters(raw_ostream &OS) const = 0;
+ virtual void writeDeclarations(raw_ostream &OS) const = 0;
+ virtual void writePCHReadArgs(raw_ostream &OS) const = 0;
+ virtual void writePCHReadDecls(raw_ostream &OS) const = 0;
+ virtual void writePCHWrite(raw_ostream &OS) const = 0;
+ };
+
+ class SimpleArgument : public Argument {
+ std::string type;
+
+ public:
+ SimpleArgument(Record &Arg, StringRef Attr, std::string T)
+ : Argument(Arg, Attr), type(T)
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " " << type << " get" << getUpperName() << "() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "(" << getUpperName() << ")";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << type << " " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << type << " " << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ std::string read = ReadPCHRecord(type);
+ OS << " " << type << " " << getLowerName() << " = " << read << ";\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " " << WritePCHRecord(type, "SA->get" +
+ std::string(getUpperName()) + "()");
+ }
+ };
+
+ class StringArgument : public Argument {
+ public:
+ StringArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr)
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " llvm::StringRef get" << getUpperName() << "() const {\n";
+ OS << " return llvm::StringRef(" << getLowerName() << ", "
+ << getLowerName() << "Length);\n";
+ OS << " }\n";
+ OS << " unsigned get" << getUpperName() << "Length() const {\n";
+ OS << " return " << getLowerName() << "Length;\n";
+ OS << " }\n";
+ OS << " void set" << getUpperName()
+ << "(ASTContext &C, llvm::StringRef S) {\n";
+ OS << " " << getLowerName() << "Length = S.size();\n";
+ OS << " this->" << getLowerName() << " = new (C, 1) char ["
+ << getLowerName() << "Length];\n";
+ OS << " std::memcpy(this->" << getLowerName() << ", S.data(), "
+ << getLowerName() << "Length);\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << "get" << getUpperName() << "()";
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
+ << ".data(), " << getLowerName() << "Length);";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "Length(" << getUpperName() << ".size()),"
+ << getLowerName() << "(new (Ctx, 1) char[" << getLowerName()
+ << "Length])";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << "llvm::StringRef " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << "unsigned " << getLowerName() << "Length;\n";
+ OS << "char *" << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " std::string " << getLowerName() << "= ReadString(Record, Idx);\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " AddString(SA->get" << getUpperName() << "(), Record);\n";
+ }
+ };
+
+ class AlignedArgument : public Argument {
+ public:
+ AlignedArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr)
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " bool is" << getUpperName() << "Dependent() const;\n";
+
+ OS << " unsigned get" << getUpperName() << "(ASTContext &Ctx) const;\n";
+
+ OS << " bool is" << getUpperName() << "Expr() const {\n";
+ OS << " return is" << getLowerName() << "Expr;\n";
+ OS << " }\n";
+
+ OS << " Expr *get" << getUpperName() << "Expr() const {\n";
+ OS << " assert(is" << getLowerName() << "Expr);\n";
+ OS << " return " << getLowerName() << "Expr;\n";
+ OS << " }\n";
+
+ OS << " TypeSourceInfo *get" << getUpperName() << "Type() const {\n";
+ OS << " assert(!is" << getLowerName() << "Expr);\n";
+ OS << " return " << getLowerName() << "Type;\n";
+ OS << " }";
+ }
+ void writeAccessorDefinitions(raw_ostream &OS) const {
+ OS << "bool " << getAttrName() << "Attr::is" << getUpperName()
+ << "Dependent() const {\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " return " << getLowerName() << "Expr && (" << getLowerName()
+ << "Expr->isValueDependent() || " << getLowerName()
+ << "Expr->isTypeDependent());\n";
+ OS << " else\n";
+ OS << " return " << getLowerName()
+ << "Type->getType()->isDependentType();\n";
+ OS << "}\n";
+
+ // FIXME: Do not do the calculation here
+ // FIXME: Handle types correctly
+ // A null pointer means maximum alignment
+ // FIXME: Load the platform-specific maximum alignment, rather than
+ // 16, the x86 max.
+ OS << "unsigned " << getAttrName() << "Attr::get" << getUpperName()
+ << "(ASTContext &Ctx) const {\n";
+ OS << " assert(!is" << getUpperName() << "Dependent());\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " return (" << getLowerName() << "Expr ? " << getLowerName()
+ << "Expr->EvaluateAsInt(Ctx).getZExtValue() : 16)"
+ << "* Ctx.getCharWidth();\n";
+ OS << " else\n";
+ OS << " return 0; // FIXME\n";
+ OS << "}\n";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << "is" << getLowerName() << "Expr, is" << getLowerName()
+ << "Expr ? static_cast<void*>(" << getLowerName()
+ << "Expr) : " << getLowerName()
+ << "Type";
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " " << getLowerName() << "Expr = reinterpret_cast<Expr *>("
+ << getUpperName() << ");\n";
+ OS << " else\n";
+ OS << " " << getLowerName()
+ << "Type = reinterpret_cast<TypeSourceInfo *>(" << getUpperName()
+ << ");";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << "is" << getLowerName() << "Expr(Is" << getUpperName() << "Expr)";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << "bool Is" << getUpperName() << "Expr, void *" << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << "bool is" << getLowerName() << "Expr;\n";
+ OS << "union {\n";
+ OS << "Expr *" << getLowerName() << "Expr;\n";
+ OS << "TypeSourceInfo *" << getLowerName() << "Type;\n";
+ OS << "};";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << "is" << getLowerName() << "Expr, " << getLowerName() << "Ptr";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " bool is" << getLowerName() << "Expr = Record[Idx++];\n";
+ OS << " void *" << getLowerName() << "Ptr;\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " " << getLowerName() << "Ptr = ReadExpr(DeclsCursor);\n";
+ OS << " else\n";
+ OS << " " << getLowerName()
+ << "Ptr = GetTypeSourceInfo(DeclsCursor, Record, Idx);\n";
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " Record.push_back(SA->is" << getUpperName() << "Expr());\n";
+ OS << " if (SA->is" << getUpperName() << "Expr())\n";
+ OS << " AddStmt(SA->get" << getUpperName() << "Expr());\n";
+ OS << " else\n";
+ OS << " AddTypeSourceInfo(SA->get" << getUpperName()
+ << "Type(), Record);\n";
+ }
+ };
+
+ class VariadicArgument : public Argument {
+ std::string type;
+
+ public:
+ VariadicArgument(Record &Arg, StringRef Attr, std::string T)
+ : Argument(Arg, Attr), type(T)
+ {}
+
+ std::string getType() const { return type; }
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " typedef " << type << "* " << getLowerName() << "_iterator;\n";
+ OS << " " << getLowerName() << "_iterator " << getLowerName()
+ << "_begin() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }\n";
+ OS << " " << getLowerName() << "_iterator " << getLowerName()
+ << "_end() const {\n";
+ OS << " return " << getLowerName() << " + " << getLowerName()
+ << "Size;\n";
+ OS << " }\n";
+ OS << " unsigned " << getLowerName() << "_size() const {\n"
+ << " return " << getLowerName() << "Size;\n;";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << getLowerName() << ", " << getLowerName() << "Size";
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ // FIXME: memcpy is not safe on non-trivial types.
+ OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
+ << ", " << getLowerName() << "Size * sizeof(" << getType() << "));\n";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "Size(" << getUpperName() << "Size), "
+ << getLowerName() << "(new (Ctx, 16) " << getType() << "["
+ << getLowerName() << "Size])";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << getType() << " *" << getUpperName() << ", unsigned "
+ << getUpperName() << "Size";
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << " unsigned " << getLowerName() << "Size;\n";
+ OS << " " << getType() << " *" << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " unsigned " << getLowerName() << "Size = Record[Idx++];\n";
+ OS << " llvm::SmallVector<" << type << ", 4> " << getLowerName()
+ << ";\n";
+ OS << " " << getLowerName() << ".reserve(" << getLowerName()
+ << "Size);\n";
+ OS << " for (unsigned i = " << getLowerName() << "Size; i; --i)\n";
+
+ std::string read = ReadPCHRecord(type);
+ OS << " " << getLowerName() << ".push_back(" << read << ");\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName() << ".data(), " << getLowerName() << "Size";
+ }
+ void writePCHWrite(raw_ostream &OS) const{
+ OS << " Record.push_back(SA->" << getLowerName() << "_size());\n";
+ OS << " for (" << getAttrName() << "Attr::" << getLowerName()
+ << "_iterator i = SA->" << getLowerName() << "_begin(), e = SA->"
+ << getLowerName() << "_end(); i != e; ++i)\n";
+ OS << " " << WritePCHRecord(type, "(*i)");
+ }
+ };
+
+ class EnumArgument : public Argument {
+ std::string type;
+ std::vector<StringRef> values, enums;
+ public:
+ EnumArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr), type(Arg.getValueAsString("Type")),
+ values(getValueAsListOfStrings(Arg, "Values")),
+ enums(getValueAsListOfStrings(Arg, "Enums"))
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " " << type << " get" << getUpperName() << "() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "(" << getUpperName() << ")";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << type << " " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ // Calculate the various enum values
+ std::vector<StringRef> uniques(enums);
+ std::sort(uniques.begin(), uniques.end());
+ uniques.erase(std::unique(uniques.begin(), uniques.end()),
+ uniques.end());
+ // FIXME: Emit a proper error
+ assert(!uniques.empty());
+
+ std::vector<StringRef>::iterator i = uniques.begin(),
+ e = uniques.end();
+ // The last one needs to not have a comma.
+ --e;
+
+ OS << "public:\n";
+ OS << " enum " << type << " {\n";
+ for (; i != e; ++i)
+ OS << " " << *i << ",\n";
+ OS << " " << *e << "\n";
+ OS << " };\n";
+ OS << "private:\n";
+ OS << " " << type << " " << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " " << getAttrName() << "Attr::" << type << " " << getLowerName()
+ << "(static_cast<" << getAttrName() << "Attr::" << type
+ << ">(Record[Idx++]));\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << "Record.push_back(SA->get" << getUpperName() << "());\n";
+ }
+ };
+}
+
+static Argument *createArgument(Record &Arg, StringRef Attr,
+ Record *Search = 0) {
+ if (!Search)
+ Search = &Arg;
+
+ Argument *Ptr = 0;
+ llvm::StringRef ArgName = Search->getName();
+
+ if (ArgName == "AlignedArgument") Ptr = new AlignedArgument(Arg, Attr);
+ else if (ArgName == "EnumArgument") Ptr = new EnumArgument(Arg, Attr);
+ else if (ArgName == "ExprArgument") Ptr = new SimpleArgument(Arg, Attr,
+ "Expr *");
+ else if (ArgName == "FunctionArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "FunctionDecl *");
+ else if (ArgName == "IdentifierArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "IdentifierInfo *");
+ else if (ArgName == "IntArgument") Ptr = new SimpleArgument(Arg, Attr, "int");
+ else if (ArgName == "StringArgument") Ptr = new StringArgument(Arg, Attr);
+ else if (ArgName == "TypeArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "QualType");
+ else if (ArgName == "UnsignedArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "unsigned");
+ else if (ArgName == "VariadicUnsignedArgument")
+ Ptr = new VariadicArgument(Arg, Attr, "unsigned");
+
+ if (!Ptr) {
+ std::vector<Record*> Bases = Search->getSuperClasses();
+ for (std::vector<Record*>::iterator i = Bases.begin(), e = Bases.end();
+ i != e; ++i) {
+ Ptr = createArgument(Arg, Attr, *i);
+ if (Ptr)
+ break;
+ }
+ }
+ return Ptr;
+}
+
+void ClangAttrClassEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+ OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n";
+ OS << "#define LLVM_CLANG_ATTR_CLASSES_INC\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
+ i != e; ++i) {
+ Record &R = **i;
+
+ OS << "class " << R.getName() << "Attr : public Attr {\n";
+
+ std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<Argument*> Args;
+ std::vector<Argument*>::iterator ai, ae;
+ Args.reserve(ArgRecords.size());
+
+ for (std::vector<Record*>::iterator ri = ArgRecords.begin(),
+ re = ArgRecords.end();
+ ri != re; ++ri) {
+ Record &ArgRecord = **ri;
+ Argument *Arg = createArgument(ArgRecord, R.getName());
+ assert(Arg);
+ Args.push_back(Arg);
+
+ Arg->writeDeclarations(OS);
+ OS << "\n\n";
+ }
+
+ ae = Args.end();
+
+ OS << "\n public:\n";
+ OS << " " << R.getName() << "Attr(SourceLocation L, ASTContext &Ctx\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << " , ";
+ (*ai)->writeCtorParameters(OS);
+ OS << "\n";
+ }
+
+ OS << " )\n";
+ OS << " : Attr(attr::" << R.getName() << ", L)\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << " , ";
+ (*ai)->writeCtorInitializers(OS);
+ OS << "\n";
+ }
+
+ OS << " {\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ (*ai)->writeCtorBody(OS);
+ OS << "\n";
+ }
+ OS << " }\n\n";
+
+ OS << " virtual " << R.getName() << "Attr *clone (ASTContext &C) const;\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ (*ai)->writeAccessors(OS);
+ OS << "\n\n";
+ }
+
+ OS << R.getValueAsCode("AdditionalMembers");
+ OS << "\n\n";
+
+ OS << " static bool classof(const Attr *A) { return A->getKind() == "
+ << "attr::" << R.getName() << "; }\n";
+ OS << " static bool classof(const " << R.getName()
+ << "Attr *) { return true; }\n";
+ OS << "};\n\n";
+ }
+
+ OS << "#endif\n";
+}
+
+void ClangAttrImplEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ri, re;
+ std::vector<Argument*>::iterator ai, ae;
+
+ for (; i != e; ++i) {
+ Record &R = **i;
+ std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<Argument*> Args;
+ for (ri = ArgRecords.begin(), re = ArgRecords.end(); ri != re; ++ri)
+ Args.push_back(createArgument(**ri, R.getName()));
+
+ for (ai = Args.begin(), ae = Args.end(); ai != ae; ++ai)
+ (*ai)->writeAccessorDefinitions(OS);
+
+ OS << R.getName() << "Attr *" << R.getName()
+ << "Attr::clone(ASTContext &C) const {\n";
+ OS << " return new (C) " << R.getName() << "Attr(getLocation(), C";
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << ", ";
+ (*ai)->writeCloneArgs(OS);
+ }
+ OS << ");\n}\n\n";
+ }
+}
+
+void ClangAttrListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "#ifndef LAST_ATTR\n";
+ OS << "#define LAST_ATTR(NAME) ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
+
+ if (i != e) {
+ // Move the end iterator back to emit the last attribute.
+ for(--e; i != e; ++i)
+ OS << "ATTR(" << (*i)->getName() << ")\n";
+
+ OS << "LAST_ATTR(" << (*i)->getName() << ")\n\n";
+ }
+
+ OS << "#undef LAST_ATTR\n";
+ OS << "#undef ATTR\n";
+}
+
+void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edi.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"),
+ ArgRecords;
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ai, ae;
+ std::vector<Argument*> Args;
+ std::vector<Argument*>::iterator ri, re;
+
+ OS << " switch (Kind) {\n";
+ OS << " default:\n";
+ OS << " assert(0 && \"Unknown attribute!\");\n";
+ OS << " break;\n";
+ for (; i != e; ++i) {
+ Record &R = **i;
+ OS << " case attr::" << R.getName() << ": {\n";
+ ArgRecords = R.getValueAsListOfDefs("Args");
+ Args.clear();
+ for (ai = ArgRecords.begin(), ae = ArgRecords.end(); ai != ae; ++ai) {
+ Argument *A = createArgument(**ai, R.getName());
+ Args.push_back(A);
+ A->writePCHReadDecls(OS);
+ }
+ OS << " New = new (*Context) " << R.getName() << "Attr(Loc, *Context";
+ for (ri = Args.begin(), re = Args.end(); ri != re; ++ri) {
+ OS << ", ";
+ (*ri)->writePCHReadArgs(OS);
+ }
+ OS << ");\n";
+ OS << " break;\n";
+ OS << " }\n";
+ }
+ OS << " }\n";
+}
+
+void ClangAttrPCHWriteEmitter::run(raw_ostream &OS) {
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), Args;
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ai, ae;
+
+ OS << " switch (A->getKind()) {\n";
+ OS << " default:\n";
+ OS << " llvm_unreachable(\"Unknown attribute kind!\");\n";
+ OS << " break;\n";
+ for (; i != e; ++i) {
+ Record &R = **i;
+ OS << " case attr::" << R.getName() << ": {\n";
+ Args = R.getValueAsListOfDefs("Args");
+ if (!Args.empty())
+ OS << " const " << R.getName() << "Attr *SA = cast<" << R.getName()
+ << "Attr>(A);\n";
+ for (ai = Args.begin(), ae = Args.end(); ai != ae; ++ai)
+ createArgument(**ai, R.getName())->writePCHWrite(OS);
+ OS << " break;\n";
+ OS << " }\n";
+ }
+ OS << " }\n";
+}
diff --git a/libclamav/c++/llvm/utils/TableGen/ClangAttrEmitter.h b/libclamav/c++/llvm/utils/TableGen/ClangAttrEmitter.h
new file mode 100644
index 0000000..8314982
--- /dev/null
+++ b/libclamav/c++/llvm/utils/TableGen/ClangAttrEmitter.h
@@ -0,0 +1,88 @@
+//===- ClangAttrEmitter.h - Generate Clang attribute handling =-*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang attribute processing code
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGATTR_EMITTER_H
+#define CLANGATTR_EMITTER_H
+
+#include "TableGenBackend.h"
+
+namespace llvm {
+
+/// ClangAttrClassEmitter - class emits the class defintions for attributes for
+/// clang.
+class ClangAttrClassEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrClassEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrImplEmitter - class emits the class method defintions for
+/// attributes for clang.
+class ClangAttrImplEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrImplEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrListEmitter - class emits the enumeration list for attributes for
+/// clang.
+class ClangAttrListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrPCHReadEmitter - class emits the code to read an attribute from
+/// a clang precompiled header.
+class ClangAttrPCHReadEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrPCHReadEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrPCHWriteEmitter - class emits the code to read an attribute from
+/// a clang precompiled header.
+class ClangAttrPCHWriteEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrPCHWriteEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+}
+
+#endif
diff --git a/libclamav/c++/llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 6f1080e..75b6252 100644
--- a/libclamav/c++/llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -17,12 +17,110 @@
#include "llvm/Support/Compiler.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/VectorExtras.h"
#include <set>
#include <map>
using namespace llvm;
//===----------------------------------------------------------------------===//
+// Diagnostic category computation code.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DiagGroupParentMap {
+ std::map<const Record*, std::vector<Record*> > Mapping;
+public:
+ DiagGroupParentMap() {
+ std::vector<Record*> DiagGroups
+ = Records.getAllDerivedDefinitions("DiagGroup");
+ for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
+ std::vector<Record*> SubGroups =
+ DiagGroups[i]->getValueAsListOfDefs("SubGroups");
+ for (unsigned j = 0, e = SubGroups.size(); j != e; ++j)
+ Mapping[SubGroups[j]].push_back(DiagGroups[i]);
+ }
+ }
+
+ const std::vector<Record*> &getParents(const Record *Group) {
+ return Mapping[Group];
+ }
+};
+} // end anonymous namespace.
+
+
+static std::string
+getCategoryFromDiagGroup(const Record *Group,
+ DiagGroupParentMap &DiagGroupParents) {
+ // If the DiagGroup has a category, return it.
+ std::string CatName = Group->getValueAsString("CategoryName");
+ if (!CatName.empty()) return CatName;
+
+ // The diag group may the subgroup of one or more other diagnostic groups,
+ // check these for a category as well.
+ const std::vector<Record*> &Parents = DiagGroupParents.getParents(Group);
+ for (unsigned i = 0, e = Parents.size(); i != e; ++i) {
+ CatName = getCategoryFromDiagGroup(Parents[i], DiagGroupParents);
+ if (!CatName.empty()) return CatName;
+ }
+ return "";
+}
+
+/// getDiagnosticCategory - Return the category that the specified diagnostic
+/// lives in.
+static std::string getDiagnosticCategory(const Record *R,
+ DiagGroupParentMap &DiagGroupParents) {
+ // If the diagnostic is in a group, and that group has a category, use it.
+ if (DefInit *Group = dynamic_cast<DefInit*>(R->getValueInit("Group"))) {
+ // Check the diagnostic's diag group for a category.
+ std::string CatName = getCategoryFromDiagGroup(Group->getDef(),
+ DiagGroupParents);
+ if (!CatName.empty()) return CatName;
+ }
+
+ // If the diagnostic itself has a category, get it.
+ return R->getValueAsString("CategoryName");
+}
+
+namespace {
+ class DiagCategoryIDMap {
+ StringMap<unsigned> CategoryIDs;
+ std::vector<std::string> CategoryStrings;
+ public:
+ DiagCategoryIDMap() {
+ DiagGroupParentMap ParentInfo;
+
+ // The zero'th category is "".
+ CategoryStrings.push_back("");
+ CategoryIDs[""] = 0;
+
+ std::vector<Record*> Diags =
+ Records.getAllDerivedDefinitions("Diagnostic");
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ std::string Category = getDiagnosticCategory(Diags[i], ParentInfo);
+ if (Category.empty()) continue; // Skip diags with no category.
+
+ unsigned &ID = CategoryIDs[Category];
+ if (ID != 0) continue; // Already seen.
+
+ ID = CategoryStrings.size();
+ CategoryStrings.push_back(Category);
+ }
+ }
+
+ unsigned getID(StringRef CategoryString) {
+ return CategoryIDs[CategoryString];
+ }
+
+ typedef std::vector<std::string>::iterator iterator;
+ iterator begin() { return CategoryStrings.begin(); }
+ iterator end() { return CategoryStrings.end(); }
+ };
+} // end anonymous namespace.
+
+
+
+//===----------------------------------------------------------------------===//
// Warning Tables (.inc file) generation.
//===----------------------------------------------------------------------===//
@@ -34,12 +132,15 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
OS << "__" << ComponentName << "START = DIAG_START_" << ComponentName
<< ",\n";
OS << "#undef " << ComponentName << "START\n";
- OS << "#endif\n";
+ OS << "#endif\n\n";
}
const std::vector<Record*> &Diags =
Records.getAllDerivedDefinitions("Diagnostic");
+ DiagCategoryIDMap CategoryIDs;
+ DiagGroupParentMap DGParentMap;
+
for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
const Record &R = *Diags[i];
// Filter by component.
@@ -67,6 +168,9 @@ void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
OS << ", true";
else
OS << ", false";
+
+ // Category number.
+ OS << ", " << CategoryIDs.getID(getDiagnosticCategory(&R, DGParentMap));
OS << ")\n";
}
}
@@ -82,6 +186,9 @@ struct GroupInfo {
};
void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
+ // Compute a mapping from a DiagGroup to all of its parents.
+ DiagGroupParentMap DGParentMap;
+
// Invert the 1-[0/1] mapping of diags to group into a one to many mapping of
// groups to diags in the group.
std::map<std::string, GroupInfo> DiagsInGroup;
@@ -98,9 +205,10 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
// Add all DiagGroup's to the DiagsInGroup list to make sure we pick up empty
// groups (these are warnings that GCC supports that clang never produces).
- Diags = Records.getAllDerivedDefinitions("DiagGroup");
- for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
- Record *Group = Diags[i];
+ std::vector<Record*> DiagGroups
+ = Records.getAllDerivedDefinitions("DiagGroup");
+ for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
+ Record *Group = DiagGroups[i];
GroupInfo &GI = DiagsInGroup[Group->getValueAsString("GroupName")];
std::vector<Record*> SubGroups = Group->getValueAsListOfDefs("SubGroups");
@@ -132,7 +240,7 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
const std::vector<std::string> &SubGroups = I->second.SubGroups;
if (!SubGroups.empty()) {
- OS << "static const char DiagSubGroup" << I->second.IDNo << "[] = { ";
+ OS << "static const short DiagSubGroup" << I->second.IDNo << "[] = { ";
for (unsigned i = 0, e = SubGroups.size(); i != e; ++i) {
std::map<std::string, GroupInfo>::iterator RI =
DiagsInGroup.find(SubGroups[i]);
@@ -167,4 +275,12 @@ void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
OS << " },\n";
}
OS << "#endif // GET_DIAG_TABLE\n\n";
+
+ // Emit the category table next.
+ DiagCategoryIDMap CategoriesByID;
+ OS << "\n#ifdef GET_CATEGORY_TABLE\n";
+ for (DiagCategoryIDMap::iterator I = CategoriesByID.begin(),
+ E = CategoriesByID.end(); I != E; ++I)
+ OS << "CATEGORY(\"" << *I << "\")\n";
+ OS << "#endif // GET_CATEGORY_TABLE\n\n";
}
diff --git a/libclamav/c++/llvm/utils/TableGen/CodeEmitterGen.cpp b/libclamav/c++/llvm/utils/TableGen/CodeEmitterGen.cpp
index f1857f5..ec702c2 100644
--- a/libclamav/c++/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -24,18 +24,8 @@ void CodeEmitterGen::reverseBits(std::vector<Record*> &Insts) {
for (std::vector<Record*>::iterator I = Insts.begin(), E = Insts.end();
I != E; ++I) {
Record *R = *I;
- if (R->getName() == "PHI" ||
- R->getName() == "INLINEASM" ||
- R->getName() == "DBG_LABEL" ||
- R->getName() == "EH_LABEL" ||
- R->getName() == "GC_LABEL" ||
- R->getName() == "KILL" ||
- R->getName() == "EXTRACT_SUBREG" ||
- R->getName() == "INSERT_SUBREG" ||
- R->getName() == "IMPLICIT_DEF" ||
- R->getName() == "SUBREG_TO_REG" ||
- R->getName() == "COPY_TO_REGCLASS" ||
- R->getName() == "DBG_VALUE") continue;
+ if (R->getValueAsString("Namespace") == "TargetOpcode")
+ continue;
BitsInit *BI = R->getValueAsBitsInit("Inst");
@@ -86,8 +76,8 @@ void CodeEmitterGen::run(raw_ostream &o) {
EmitSourceFileHeader("Machine Code Emitter", o);
std::string Namespace = Insts[0]->getValueAsString("Namespace") + "::";
- std::vector<const CodeGenInstruction*> NumberedInstructions;
- Target.getInstructionsByEnumValue(NumberedInstructions);
+ const std::vector<const CodeGenInstruction*> &NumberedInstructions =
+ Target.getInstructionsByEnumValue();
// Emit function declaration
o << "unsigned " << Target.getName() << "CodeEmitter::"
@@ -95,25 +85,14 @@ void CodeEmitterGen::run(raw_ostream &o) {
// Emit instruction base values
o << " static const unsigned InstBits[] = {\n";
- for (std::vector<const CodeGenInstruction*>::iterator
+ for (std::vector<const CodeGenInstruction*>::const_iterator
IN = NumberedInstructions.begin(),
EN = NumberedInstructions.end();
IN != EN; ++IN) {
const CodeGenInstruction *CGI = *IN;
Record *R = CGI->TheDef;
- if (R->getName() == "PHI" ||
- R->getName() == "INLINEASM" ||
- R->getName() == "DBG_LABEL" ||
- R->getName() == "EH_LABEL" ||
- R->getName() == "GC_LABEL" ||
- R->getName() == "KILL" ||
- R->getName() == "EXTRACT_SUBREG" ||
- R->getName() == "INSERT_SUBREG" ||
- R->getName() == "IMPLICIT_DEF" ||
- R->getName() == "SUBREG_TO_REG" ||
- R->getName() == "COPY_TO_REGCLASS" ||
- R->getName() == "DBG_VALUE") {
+ if (R->getValueAsString("Namespace") == "TargetOpcode") {
o << " 0U,\n";
continue;
}
@@ -138,25 +117,14 @@ void CodeEmitterGen::run(raw_ostream &o) {
for (std::vector<Record*>::iterator IC = Insts.begin(), EC = Insts.end();
IC != EC; ++IC) {
Record *R = *IC;
+ if (R->getValueAsString("Namespace") == "TargetOpcode")
+ continue;
const std::string &InstName = R->getName();
std::string Case("");
-
- if (InstName == "PHI" ||
- InstName == "INLINEASM" ||
- InstName == "DBG_LABEL"||
- InstName == "EH_LABEL"||
- InstName == "GC_LABEL"||
- InstName == "KILL"||
- InstName == "EXTRACT_SUBREG" ||
- InstName == "INSERT_SUBREG" ||
- InstName == "IMPLICIT_DEF" ||
- InstName == "SUBREG_TO_REG" ||
- InstName == "COPY_TO_REGCLASS" ||
- InstName == "DBG_VALUE") continue;
BitsInit *BI = R->getValueAsBitsInit("Inst");
const std::vector<RecordVal> &Vals = R->getValues();
- CodeGenInstruction &CGI = Target.getInstruction(InstName);
+ CodeGenInstruction &CGI = Target.getInstruction(R);
// Loop over all of the fields in the instruction, determining which are the
// operands to the instruction.
@@ -249,7 +217,7 @@ void CodeEmitterGen::run(raw_ostream &o) {
<< " std::string msg;\n"
<< " raw_string_ostream Msg(msg);\n"
<< " Msg << \"Not supported instr: \" << MI;\n"
- << " llvm_report_error(Msg.str());\n"
+ << " report_fatal_error(Msg.str());\n"
<< " }\n"
<< " return Value;\n"
<< "}\n\n";
diff --git a/libclamav/c++/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/libclamav/c++/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
index ce737bf..303aa6c 100644
--- a/libclamav/c++/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -15,90 +15,427 @@
#include "CodeGenDAGPatterns.h"
#include "Record.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include <set>
#include <algorithm>
-#include <iostream>
using namespace llvm;
//===----------------------------------------------------------------------===//
-// Helpers for working with extended types.
+// EEVT::TypeSet Implementation
+//===----------------------------------------------------------------------===//
-/// FilterVTs - Filter a list of VT's according to a predicate.
-///
-template<typename T>
-static std::vector<MVT::SimpleValueType>
-FilterVTs(const std::vector<MVT::SimpleValueType> &InVTs, T Filter) {
- std::vector<MVT::SimpleValueType> Result;
- for (unsigned i = 0, e = InVTs.size(); i != e; ++i)
- if (Filter(InVTs[i]))
- Result.push_back(InVTs[i]);
- return Result;
+static inline bool isInteger(MVT::SimpleValueType VT) {
+ return EVT(VT).isInteger();
+}
+static inline bool isFloatingPoint(MVT::SimpleValueType VT) {
+ return EVT(VT).isFloatingPoint();
+}
+static inline bool isVector(MVT::SimpleValueType VT) {
+ return EVT(VT).isVector();
+}
+static inline bool isScalar(MVT::SimpleValueType VT) {
+ return !EVT(VT).isVector();
}
-template<typename T>
-static std::vector<unsigned char>
-FilterEVTs(const std::vector<unsigned char> &InVTs, T Filter) {
- std::vector<unsigned char> Result;
- for (unsigned i = 0, e = InVTs.size(); i != e; ++i)
- if (Filter((MVT::SimpleValueType)InVTs[i]))
- Result.push_back(InVTs[i]);
- return Result;
+EEVT::TypeSet::TypeSet(MVT::SimpleValueType VT, TreePattern &TP) {
+ if (VT == MVT::iAny)
+ EnforceInteger(TP);
+ else if (VT == MVT::fAny)
+ EnforceFloatingPoint(TP);
+ else if (VT == MVT::vAny)
+ EnforceVector(TP);
+ else {
+ assert((VT < MVT::LAST_VALUETYPE || VT == MVT::iPTR ||
+ VT == MVT::iPTRAny) && "Not a concrete type!");
+ TypeVec.push_back(VT);
+ }
}
-static std::vector<unsigned char>
-ConvertVTs(const std::vector<MVT::SimpleValueType> &InVTs) {
- std::vector<unsigned char> Result;
- for (unsigned i = 0, e = InVTs.size(); i != e; ++i)
- Result.push_back(InVTs[i]);
- return Result;
+
+EEVT::TypeSet::TypeSet(const std::vector<MVT::SimpleValueType> &VTList) {
+ assert(!VTList.empty() && "empty list?");
+ TypeVec.append(VTList.begin(), VTList.end());
+
+ if (!VTList.empty())
+ assert(VTList[0] != MVT::iAny && VTList[0] != MVT::vAny &&
+ VTList[0] != MVT::fAny);
+
+ // Verify no duplicates.
+ array_pod_sort(TypeVec.begin(), TypeVec.end());
+ assert(std::unique(TypeVec.begin(), TypeVec.end()) == TypeVec.end());
+}
+
+/// FillWithPossibleTypes - Set to all legal types and return true, only valid
+/// on completely unknown type sets.
+bool EEVT::TypeSet::FillWithPossibleTypes(TreePattern &TP,
+ bool (*Pred)(MVT::SimpleValueType),
+ const char *PredicateName) {
+ assert(isCompletelyUnknown());
+ const std::vector<MVT::SimpleValueType> &LegalTypes =
+ TP.getDAGPatterns().getTargetInfo().getLegalValueTypes();
+
+ for (unsigned i = 0, e = LegalTypes.size(); i != e; ++i)
+ if (Pred == 0 || Pred(LegalTypes[i]))
+ TypeVec.push_back(LegalTypes[i]);
+
+ // If we have nothing that matches the predicate, bail out.
+ if (TypeVec.empty())
+ TP.error("Type inference contradiction found, no " +
+ std::string(PredicateName) + " types found");
+ // No need to sort with one element.
+ if (TypeVec.size() == 1) return true;
+
+ // Remove duplicates.
+ array_pod_sort(TypeVec.begin(), TypeVec.end());
+ TypeVec.erase(std::unique(TypeVec.begin(), TypeVec.end()), TypeVec.end());
+
+ return true;
}
-static inline bool isInteger(MVT::SimpleValueType VT) {
- return EVT(VT).isInteger();
+/// hasIntegerTypes - Return true if this TypeSet contains iAny or an
+/// integer value type.
+bool EEVT::TypeSet::hasIntegerTypes() const {
+ for (unsigned i = 0, e = TypeVec.size(); i != e; ++i)
+ if (isInteger(TypeVec[i]))
+ return true;
+ return false;
+}
+
+/// hasFloatingPointTypes - Return true if this TypeSet contains an fAny or
+/// a floating point value type.
+bool EEVT::TypeSet::hasFloatingPointTypes() const {
+ for (unsigned i = 0, e = TypeVec.size(); i != e; ++i)
+ if (isFloatingPoint(TypeVec[i]))
+ return true;
+ return false;
+}
+
+/// hasVectorTypes - Return true if this TypeSet contains a vAny or a vector
+/// value type.
+bool EEVT::TypeSet::hasVectorTypes() const {
+ for (unsigned i = 0, e = TypeVec.size(); i != e; ++i)
+ if (isVector(TypeVec[i]))
+ return true;
+ return false;
}
-static inline bool isFloatingPoint(MVT::SimpleValueType VT) {
- return EVT(VT).isFloatingPoint();
+
+std::string EEVT::TypeSet::getName() const {
+ if (TypeVec.empty()) return "<empty>";
+
+ std::string Result;
+
+ for (unsigned i = 0, e = TypeVec.size(); i != e; ++i) {
+ std::string VTName = llvm::getEnumName(TypeVec[i]);
+ // Strip off MVT:: prefix if present.
+ if (VTName.substr(0,5) == "MVT::")
+ VTName = VTName.substr(5);
+ if (i) Result += ':';
+ Result += VTName;
+ }
+
+ if (TypeVec.size() == 1)
+ return Result;
+ return "{" + Result + "}";
}
-static inline bool isVector(MVT::SimpleValueType VT) {
- return EVT(VT).isVector();
+/// MergeInTypeInfo - This merges in type information from the specified
+/// argument. If 'this' changes, it returns true. If the two types are
+/// contradictory (e.g. merge f32 into i32) then this throws an exception.
+bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){
+ if (InVT.isCompletelyUnknown() || *this == InVT)
+ return false;
+
+ if (isCompletelyUnknown()) {
+ *this = InVT;
+ return true;
+ }
+
+ assert(TypeVec.size() >= 1 && InVT.TypeVec.size() >= 1 && "No unknowns");
+
+ // Handle the abstract cases, seeing if we can resolve them better.
+ switch (TypeVec[0]) {
+ default: break;
+ case MVT::iPTR:
+ case MVT::iPTRAny:
+ if (InVT.hasIntegerTypes()) {
+ EEVT::TypeSet InCopy(InVT);
+ InCopy.EnforceInteger(TP);
+ InCopy.EnforceScalar(TP);
+
+ if (InCopy.isConcrete()) {
+ // If the RHS has one integer type, upgrade iPTR to i32.
+ TypeVec[0] = InVT.TypeVec[0];
+ return true;
+ }
+
+ // If the input has multiple scalar integers, this doesn't add any info.
+ if (!InCopy.isCompletelyUnknown())
+ return false;
+ }
+ break;
+ }
+
+ // If the input constraint is iAny/iPTR and this is an integer type list,
+ // remove non-integer types from the list.
+ if ((InVT.TypeVec[0] == MVT::iPTR || InVT.TypeVec[0] == MVT::iPTRAny) &&
+ hasIntegerTypes()) {
+ bool MadeChange = EnforceInteger(TP);
+
+ // If we're merging in iPTR/iPTRAny and the node currently has a list of
+ // multiple different integer types, replace them with a single iPTR.
+ if ((InVT.TypeVec[0] == MVT::iPTR || InVT.TypeVec[0] == MVT::iPTRAny) &&
+ TypeVec.size() != 1) {
+ TypeVec.resize(1);
+ TypeVec[0] = InVT.TypeVec[0];
+ MadeChange = true;
+ }
+
+ return MadeChange;
+ }
+
+ // If this is a type list and the RHS is a typelist as well, eliminate entries
+ // from this list that aren't in the other one.
+ bool MadeChange = false;
+ TypeSet InputSet(*this);
+
+ for (unsigned i = 0; i != TypeVec.size(); ++i) {
+ bool InInVT = false;
+ for (unsigned j = 0, e = InVT.TypeVec.size(); j != e; ++j)
+ if (TypeVec[i] == InVT.TypeVec[j]) {
+ InInVT = true;
+ break;
+ }
+
+ if (InInVT) continue;
+ TypeVec.erase(TypeVec.begin()+i--);
+ MadeChange = true;
+ }
+
+ // If we removed all of our types, we have a type contradiction.
+ if (!TypeVec.empty())
+ return MadeChange;
+
+ // FIXME: Really want an SMLoc here!
+ TP.error("Type inference contradiction found, merging '" +
+ InVT.getName() + "' into '" + InputSet.getName() + "'");
+ return true; // unreachable
}
-static bool LHSIsSubsetOfRHS(const std::vector<unsigned char> &LHS,
- const std::vector<unsigned char> &RHS) {
- if (LHS.size() > RHS.size()) return false;
- for (unsigned i = 0, e = LHS.size(); i != e; ++i)
- if (std::find(RHS.begin(), RHS.end(), LHS[i]) == RHS.end())
- return false;
+/// EnforceInteger - Remove all non-integer types from this set.
+bool EEVT::TypeSet::EnforceInteger(TreePattern &TP) {
+ // If we know nothing, then get the full set.
+ if (TypeVec.empty())
+ return FillWithPossibleTypes(TP, isInteger, "integer");
+ if (!hasFloatingPointTypes())
+ return false;
+
+ TypeSet InputSet(*this);
+
+ // Filter out all the fp types.
+ for (unsigned i = 0; i != TypeVec.size(); ++i)
+ if (!isInteger(TypeVec[i]))
+ TypeVec.erase(TypeVec.begin()+i--);
+
+ if (TypeVec.empty())
+ TP.error("Type inference contradiction found, '" +
+ InputSet.getName() + "' needs to be integer");
return true;
}
-namespace llvm {
-namespace EEVT {
-/// isExtIntegerInVTs - Return true if the specified extended value type vector
-/// contains iAny or an integer value type.
-bool isExtIntegerInVTs(const std::vector<unsigned char> &EVTs) {
- assert(!EVTs.empty() && "Cannot check for integer in empty ExtVT list!");
- return EVTs[0] == MVT::iAny || !(FilterEVTs(EVTs, isInteger).empty());
+/// EnforceFloatingPoint - Remove all integer types from this set.
+bool EEVT::TypeSet::EnforceFloatingPoint(TreePattern &TP) {
+ // If we know nothing, then get the full set.
+ if (TypeVec.empty())
+ return FillWithPossibleTypes(TP, isFloatingPoint, "floating point");
+
+ if (!hasIntegerTypes())
+ return false;
+
+ TypeSet InputSet(*this);
+
+ // Filter out all the fp types.
+ for (unsigned i = 0; i != TypeVec.size(); ++i)
+ if (!isFloatingPoint(TypeVec[i]))
+ TypeVec.erase(TypeVec.begin()+i--);
+
+ if (TypeVec.empty())
+ TP.error("Type inference contradiction found, '" +
+ InputSet.getName() + "' needs to be floating point");
+ return true;
}
-/// isExtFloatingPointInVTs - Return true if the specified extended value type
-/// vector contains fAny or a FP value type.
-bool isExtFloatingPointInVTs(const std::vector<unsigned char> &EVTs) {
- assert(!EVTs.empty() && "Cannot check for FP in empty ExtVT list!");
- return EVTs[0] == MVT::fAny || !(FilterEVTs(EVTs, isFloatingPoint).empty());
+/// EnforceScalar - Remove all vector types from this.
+bool EEVT::TypeSet::EnforceScalar(TreePattern &TP) {
+ // If we know nothing, then get the full set.
+ if (TypeVec.empty())
+ return FillWithPossibleTypes(TP, isScalar, "scalar");
+
+ if (!hasVectorTypes())
+ return false;
+
+ TypeSet InputSet(*this);
+
+ // Filter out all the vector types.
+ for (unsigned i = 0; i != TypeVec.size(); ++i)
+ if (!isScalar(TypeVec[i]))
+ TypeVec.erase(TypeVec.begin()+i--);
+
+ if (TypeVec.empty())
+ TP.error("Type inference contradiction found, '" +
+ InputSet.getName() + "' needs to be scalar");
+ return true;
}
-/// isExtVectorInVTs - Return true if the specified extended value type
-/// vector contains vAny or a vector value type.
-bool isExtVectorInVTs(const std::vector<unsigned char> &EVTs) {
- assert(!EVTs.empty() && "Cannot check for vector in empty ExtVT list!");
- return EVTs[0] == MVT::vAny || !(FilterEVTs(EVTs, isVector).empty());
+/// EnforceVector - Remove all vector types from this.
+bool EEVT::TypeSet::EnforceVector(TreePattern &TP) {
+ // If we know nothing, then get the full set.
+ if (TypeVec.empty())
+ return FillWithPossibleTypes(TP, isVector, "vector");
+
+ TypeSet InputSet(*this);
+ bool MadeChange = false;
+
+ // Filter out all the scalar types.
+ for (unsigned i = 0; i != TypeVec.size(); ++i)
+ if (!isVector(TypeVec[i])) {
+ TypeVec.erase(TypeVec.begin()+i--);
+ MadeChange = true;
+ }
+
+ if (TypeVec.empty())
+ TP.error("Type inference contradiction found, '" +
+ InputSet.getName() + "' needs to be a vector");
+ return MadeChange;
}
-} // end namespace EEVT.
-} // end namespace llvm.
+
+
+
+/// EnforceSmallerThan - 'this' must be a smaller VT than Other. Update
+/// this an other based on this information.
+bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) {
+ // Both operands must be integer or FP, but we don't care which.
+ bool MadeChange = false;
+
+ if (isCompletelyUnknown())
+ MadeChange = FillWithPossibleTypes(TP);
+
+ if (Other.isCompletelyUnknown())
+ MadeChange = Other.FillWithPossibleTypes(TP);
+
+ // If one side is known to be integer or known to be FP but the other side has
+ // no information, get at least the type integrality info in there.
+ if (!hasFloatingPointTypes())
+ MadeChange |= Other.EnforceInteger(TP);
+ else if (!hasIntegerTypes())
+ MadeChange |= Other.EnforceFloatingPoint(TP);
+ if (!Other.hasFloatingPointTypes())
+ MadeChange |= EnforceInteger(TP);
+ else if (!Other.hasIntegerTypes())
+ MadeChange |= EnforceFloatingPoint(TP);
+
+ assert(!isCompletelyUnknown() && !Other.isCompletelyUnknown() &&
+ "Should have a type list now");
+
+ // If one contains vectors but the other doesn't pull vectors out.
+ if (!hasVectorTypes())
+ MadeChange |= Other.EnforceScalar(TP);
+ if (!hasVectorTypes())
+ MadeChange |= EnforceScalar(TP);
+
+ // This code does not currently handle nodes which have multiple types,
+ // where some types are integer, and some are fp. Assert that this is not
+ // the case.
+ assert(!(hasIntegerTypes() && hasFloatingPointTypes()) &&
+ !(Other.hasIntegerTypes() && Other.hasFloatingPointTypes()) &&
+ "SDTCisOpSmallerThanOp does not handle mixed int/fp types!");
+
+ // Okay, find the smallest type from the current set and remove it from the
+ // largest set.
+ MVT::SimpleValueType Smallest = TypeVec[0];
+ for (unsigned i = 1, e = TypeVec.size(); i != e; ++i)
+ if (TypeVec[i] < Smallest)
+ Smallest = TypeVec[i];
+
+ // If this is the only type in the large set, the constraint can never be
+ // satisfied.
+ if (Other.TypeVec.size() == 1 && Other.TypeVec[0] == Smallest)
+ TP.error("Type inference contradiction found, '" +
+ Other.getName() + "' has nothing larger than '" + getName() +"'!");
+
+ SmallVector<MVT::SimpleValueType, 2>::iterator TVI =
+ std::find(Other.TypeVec.begin(), Other.TypeVec.end(), Smallest);
+ if (TVI != Other.TypeVec.end()) {
+ Other.TypeVec.erase(TVI);
+ MadeChange = true;
+ }
+
+ // Okay, find the largest type in the Other set and remove it from the
+ // current set.
+ MVT::SimpleValueType Largest = Other.TypeVec[0];
+ for (unsigned i = 1, e = Other.TypeVec.size(); i != e; ++i)
+ if (Other.TypeVec[i] > Largest)
+ Largest = Other.TypeVec[i];
+
+ // If this is the only type in the small set, the constraint can never be
+ // satisfied.
+ if (TypeVec.size() == 1 && TypeVec[0] == Largest)
+ TP.error("Type inference contradiction found, '" +
+ getName() + "' has nothing smaller than '" + Other.getName()+"'!");
+
+ TVI = std::find(TypeVec.begin(), TypeVec.end(), Largest);
+ if (TVI != TypeVec.end()) {
+ TypeVec.erase(TVI);
+ MadeChange = true;
+ }
+
+ return MadeChange;
+}
+
+/// EnforceVectorEltTypeIs - 'this' is now constrainted to be a vector type
+/// whose element is specified by VTOperand.
+bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand,
+ TreePattern &TP) {
+ // "This" must be a vector and "VTOperand" must be a scalar.
+ bool MadeChange = false;
+ MadeChange |= EnforceVector(TP);
+ MadeChange |= VTOperand.EnforceScalar(TP);
+
+ // If we know the vector type, it forces the scalar to agree.
+ if (isConcrete()) {
+ EVT IVT = getConcrete();
+ IVT = IVT.getVectorElementType();
+ return MadeChange |
+ VTOperand.MergeInTypeInfo(IVT.getSimpleVT().SimpleTy, TP);
+ }
+
+ // If the scalar type is known, filter out vector types whose element types
+ // disagree.
+ if (!VTOperand.isConcrete())
+ return MadeChange;
+
+ MVT::SimpleValueType VT = VTOperand.getConcrete();
+
+ TypeSet InputSet(*this);
+
+ // Filter out all the types which don't have the right element type.
+ for (unsigned i = 0; i != TypeVec.size(); ++i) {
+ assert(isVector(TypeVec[i]) && "EnforceVector didn't work");
+ if (EVT(TypeVec[i]).getVectorElementType().getSimpleVT().SimpleTy != VT) {
+ TypeVec.erase(TypeVec.begin()+i--);
+ MadeChange = true;
+ }
+ }
+
+ if (TypeVec.empty()) // FIXME: Really want an SMLoc here!
+ TP.error("Type inference contradiction found, forcing '" +
+ InputSet.getName() + "' to have a vector element");
+ return MadeChange;
+}
+
+//===----------------------------------------------------------------------===//
+// Helpers for working with extended types.
bool RecordPtrCmp::operator()(const Record *LHS, const Record *RHS) const {
return LHS->getID() < RHS->getID();
@@ -154,6 +491,59 @@ void DumpDepVars(MultipleUseVarSet &DepVars) {
// PatternToMatch implementation
//
+
+/// getPatternSize - Return the 'size' of this pattern. We want to match large
+/// patterns before small ones. This is used to determine the size of a
+/// pattern.
+static unsigned getPatternSize(const TreePatternNode *P,
+ const CodeGenDAGPatterns &CGP) {
+ unsigned Size = 3; // The node itself.
+ // If the root node is a ConstantSDNode, increases its size.
+ // e.g. (set R32:$dst, 0).
+ if (P->isLeaf() && dynamic_cast<IntInit*>(P->getLeafValue()))
+ Size += 2;
+
+ // FIXME: This is a hack to statically increase the priority of patterns
+ // which maps a sub-dag to a complex pattern. e.g. favors LEA over ADD.
+ // Later we can allow complexity / cost for each pattern to be (optionally)
+ // specified. To get best possible pattern match we'll need to dynamically
+ // calculate the complexity of all patterns a dag can potentially map to.
+ const ComplexPattern *AM = P->getComplexPatternInfo(CGP);
+ if (AM)
+ Size += AM->getNumOperands() * 3;
+
+ // If this node has some predicate function that must match, it adds to the
+ // complexity of this node.
+ if (!P->getPredicateFns().empty())
+ ++Size;
+
+ // Count children in the count if they are also nodes.
+ for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i) {
+ TreePatternNode *Child = P->getChild(i);
+ if (!Child->isLeaf() && Child->getNumTypes() &&
+ Child->getType(0) != MVT::Other)
+ Size += getPatternSize(Child, CGP);
+ else if (Child->isLeaf()) {
+ if (dynamic_cast<IntInit*>(Child->getLeafValue()))
+ Size += 5; // Matches a ConstantSDNode (+3) and a specific value (+2).
+ else if (Child->getComplexPatternInfo(CGP))
+ Size += getPatternSize(Child, CGP);
+ else if (!Child->getPredicateFns().empty())
+ ++Size;
+ }
+ }
+
+ return Size;
+}
+
+/// Compute the complexity metric for the input pattern. This roughly
+/// corresponds to the number of nodes that are covered.
+unsigned PatternToMatch::
+getPatternComplexity(const CodeGenDAGPatterns &CGP) const {
+ return getPatternSize(getSrcPattern(), CGP) + getAddedComplexity();
+}
+
+
/// getPredicateCheck - Return a single string containing all of this
/// pattern's predicates concatenated with "&&" operators.
///
@@ -187,6 +577,9 @@ SDTypeConstraint::SDTypeConstraint(Record *R) {
if (R->isSubClassOf("SDTCisVT")) {
ConstraintType = SDTCisVT;
x.SDTCisVT_Info.VT = getValueType(R->getValueAsDef("VT"));
+ if (x.SDTCisVT_Info.VT == MVT::isVoid)
+ throw TGError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT");
+
} else if (R->isSubClassOf("SDTCisPtrTy")) {
ConstraintType = SDTCisPtrTy;
} else if (R->isSubClassOf("SDTCisInt")) {
@@ -208,8 +601,7 @@ SDTypeConstraint::SDTypeConstraint(Record *R) {
R->getValueAsInt("BigOperandNum");
} else if (R->isSubClassOf("SDTCisEltOfVec")) {
ConstraintType = SDTCisEltOfVec;
- x.SDTCisEltOfVec_Info.OtherOperandNum =
- R->getValueAsInt("OtherOpNum");
+ x.SDTCisEltOfVec_Info.OtherOperandNum = R->getValueAsInt("OtherOpNum");
} else {
errs() << "Unrecognized SDTypeConstraint '" << R->getName() << "'!\n";
exit(1);
@@ -217,24 +609,27 @@ SDTypeConstraint::SDTypeConstraint(Record *R) {
}
/// getOperandNum - Return the node corresponding to operand #OpNo in tree
-/// N, which has NumResults results.
-TreePatternNode *SDTypeConstraint::getOperandNum(unsigned OpNo,
- TreePatternNode *N,
- unsigned NumResults) const {
- assert(NumResults <= 1 &&
- "We only work with nodes with zero or one result so far!");
+/// N, and the result number in ResNo.
+static TreePatternNode *getOperandNum(unsigned OpNo, TreePatternNode *N,
+ const SDNodeInfo &NodeInfo,
+ unsigned &ResNo) {
+ unsigned NumResults = NodeInfo.getNumResults();
+ if (OpNo < NumResults) {
+ ResNo = OpNo;
+ return N;
+ }
+
+ OpNo -= NumResults;
- if (OpNo >= (NumResults + N->getNumChildren())) {
- errs() << "Invalid operand number " << OpNo << " ";
+ if (OpNo >= N->getNumChildren()) {
+ errs() << "Invalid operand number in type constraint "
+ << (OpNo+NumResults) << " ";
N->dump();
errs() << '\n';
exit(1);
}
- if (OpNo < NumResults)
- return N; // FIXME: need value #
- else
- return N->getChild(OpNo-NumResults);
+ return N->getChild(OpNo);
}
/// ApplyTypeConstraint - Given a node in a pattern, apply this type
@@ -244,65 +639,32 @@ TreePatternNode *SDTypeConstraint::getOperandNum(unsigned OpNo,
bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
const SDNodeInfo &NodeInfo,
TreePattern &TP) const {
- unsigned NumResults = NodeInfo.getNumResults();
- assert(NumResults <= 1 &&
- "We only work with nodes with zero or one result so far!");
-
- // Check that the number of operands is sane. Negative operands -> varargs.
- if (NodeInfo.getNumOperands() >= 0) {
- if (N->getNumChildren() != (unsigned)NodeInfo.getNumOperands())
- TP.error(N->getOperator()->getName() + " node requires exactly " +
- itostr(NodeInfo.getNumOperands()) + " operands!");
- }
-
- const CodeGenTarget &CGT = TP.getDAGPatterns().getTargetInfo();
-
- TreePatternNode *NodeToApply = getOperandNum(OperandNo, N, NumResults);
+ unsigned ResNo = 0; // The result number being referenced.
+ TreePatternNode *NodeToApply = getOperandNum(OperandNo, N, NodeInfo, ResNo);
switch (ConstraintType) {
default: assert(0 && "Unknown constraint type!");
case SDTCisVT:
// Operand must be a particular type.
- return NodeToApply->UpdateNodeType(x.SDTCisVT_Info.VT, TP);
- case SDTCisPtrTy: {
+ return NodeToApply->UpdateNodeType(ResNo, x.SDTCisVT_Info.VT, TP);
+ case SDTCisPtrTy:
// Operand must be same as target pointer type.
- return NodeToApply->UpdateNodeType(MVT::iPTR, TP);
- }
- case SDTCisInt: {
- // If there is only one integer type supported, this must be it.
- std::vector<MVT::SimpleValueType> IntVTs =
- FilterVTs(CGT.getLegalValueTypes(), isInteger);
-
- // If we found exactly one supported integer type, apply it.
- if (IntVTs.size() == 1)
- return NodeToApply->UpdateNodeType(IntVTs[0], TP);
- return NodeToApply->UpdateNodeType(MVT::iAny, TP);
- }
- case SDTCisFP: {
- // If there is only one FP type supported, this must be it.
- std::vector<MVT::SimpleValueType> FPVTs =
- FilterVTs(CGT.getLegalValueTypes(), isFloatingPoint);
-
- // If we found exactly one supported FP type, apply it.
- if (FPVTs.size() == 1)
- return NodeToApply->UpdateNodeType(FPVTs[0], TP);
- return NodeToApply->UpdateNodeType(MVT::fAny, TP);
- }
- case SDTCisVec: {
- // If there is only one vector type supported, this must be it.
- std::vector<MVT::SimpleValueType> VecVTs =
- FilterVTs(CGT.getLegalValueTypes(), isVector);
-
- // If we found exactly one supported vector type, apply it.
- if (VecVTs.size() == 1)
- return NodeToApply->UpdateNodeType(VecVTs[0], TP);
- return NodeToApply->UpdateNodeType(MVT::vAny, TP);
- }
+ return NodeToApply->UpdateNodeType(ResNo, MVT::iPTR, TP);
+ case SDTCisInt:
+ // Require it to be one of the legal integer VTs.
+ return NodeToApply->getExtType(ResNo).EnforceInteger(TP);
+ case SDTCisFP:
+ // Require it to be one of the legal fp VTs.
+ return NodeToApply->getExtType(ResNo).EnforceFloatingPoint(TP);
+ case SDTCisVec:
+ // Require it to be one of the legal vector VTs.
+ return NodeToApply->getExtType(ResNo).EnforceVector(TP);
case SDTCisSameAs: {
+ unsigned OResNo = 0;
TreePatternNode *OtherNode =
- getOperandNum(x.SDTCisSameAs_Info.OtherOperandNum, N, NumResults);
- return NodeToApply->UpdateNodeType(OtherNode->getExtTypes(), TP) |
- OtherNode->UpdateNodeType(NodeToApply->getExtTypes(), TP);
+ getOperandNum(x.SDTCisSameAs_Info.OtherOperandNum, N, NodeInfo, OResNo);
+ return NodeToApply->UpdateNodeType(OResNo, OtherNode->getExtType(ResNo),TP)|
+ OtherNode->UpdateNodeType(ResNo,NodeToApply->getExtType(OResNo),TP);
}
case SDTCisVTSmallerThanOp: {
// The NodeToApply must be a leaf node that is a VT. OtherOperandNum must
@@ -314,86 +676,34 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
TP.error(N->getOperator()->getName() + " expects a VT operand!");
MVT::SimpleValueType VT =
getValueType(static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef());
- if (!isInteger(VT))
- TP.error(N->getOperator()->getName() + " VT operand must be integer!");
-
- TreePatternNode *OtherNode =
- getOperandNum(x.SDTCisVTSmallerThanOp_Info.OtherOperandNum, N,NumResults);
- // It must be integer.
- bool MadeChange = OtherNode->UpdateNodeType(MVT::iAny, TP);
+ EEVT::TypeSet TypeListTmp(VT, TP);
- // This code only handles nodes that have one type set. Assert here so
- // that we can change this if we ever need to deal with multiple value
- // types at this point.
- assert(OtherNode->getExtTypes().size() == 1 && "Node has too many types!");
- if (OtherNode->hasTypeSet() && OtherNode->getTypeNum(0) <= VT)
- OtherNode->UpdateNodeType(MVT::Other, TP); // Throw an error.
- return MadeChange;
+ unsigned OResNo = 0;
+ TreePatternNode *OtherNode =
+ getOperandNum(x.SDTCisVTSmallerThanOp_Info.OtherOperandNum, N, NodeInfo,
+ OResNo);
+
+ return TypeListTmp.EnforceSmallerThan(OtherNode->getExtType(OResNo), TP);
}
case SDTCisOpSmallerThanOp: {
+ unsigned BResNo = 0;
TreePatternNode *BigOperand =
- getOperandNum(x.SDTCisOpSmallerThanOp_Info.BigOperandNum, N, NumResults);
-
- // Both operands must be integer or FP, but we don't care which.
- bool MadeChange = false;
-
- // This code does not currently handle nodes which have multiple types,
- // where some types are integer, and some are fp. Assert that this is not
- // the case.
- assert(!(EEVT::isExtIntegerInVTs(NodeToApply->getExtTypes()) &&
- EEVT::isExtFloatingPointInVTs(NodeToApply->getExtTypes())) &&
- !(EEVT::isExtIntegerInVTs(BigOperand->getExtTypes()) &&
- EEVT::isExtFloatingPointInVTs(BigOperand->getExtTypes())) &&
- "SDTCisOpSmallerThanOp does not handle mixed int/fp types!");
- if (EEVT::isExtIntegerInVTs(NodeToApply->getExtTypes()))
- MadeChange |= BigOperand->UpdateNodeType(MVT::iAny, TP);
- else if (EEVT::isExtFloatingPointInVTs(NodeToApply->getExtTypes()))
- MadeChange |= BigOperand->UpdateNodeType(MVT::fAny, TP);
- if (EEVT::isExtIntegerInVTs(BigOperand->getExtTypes()))
- MadeChange |= NodeToApply->UpdateNodeType(MVT::iAny, TP);
- else if (EEVT::isExtFloatingPointInVTs(BigOperand->getExtTypes()))
- MadeChange |= NodeToApply->UpdateNodeType(MVT::fAny, TP);
-
- std::vector<MVT::SimpleValueType> VTs = CGT.getLegalValueTypes();
-
- if (EEVT::isExtIntegerInVTs(NodeToApply->getExtTypes())) {
- VTs = FilterVTs(VTs, isInteger);
- } else if (EEVT::isExtFloatingPointInVTs(NodeToApply->getExtTypes())) {
- VTs = FilterVTs(VTs, isFloatingPoint);
- } else {
- VTs.clear();
- }
-
- switch (VTs.size()) {
- default: // Too many VT's to pick from.
- case 0: break; // No info yet.
- case 1:
- // Only one VT of this flavor. Cannot ever satisfy the constraints.
- return NodeToApply->UpdateNodeType(MVT::Other, TP); // throw
- case 2:
- // If we have exactly two possible types, the little operand must be the
- // small one, the big operand should be the big one. Common with
- // float/double for example.
- assert(VTs[0] < VTs[1] && "Should be sorted!");
- MadeChange |= NodeToApply->UpdateNodeType(VTs[0], TP);
- MadeChange |= BigOperand->UpdateNodeType(VTs[1], TP);
- break;
- }
- return MadeChange;
+ getOperandNum(x.SDTCisOpSmallerThanOp_Info.BigOperandNum, N, NodeInfo,
+ BResNo);
+ return NodeToApply->getExtType(ResNo).
+ EnforceSmallerThan(BigOperand->getExtType(BResNo), TP);
}
case SDTCisEltOfVec: {
- TreePatternNode *OtherOperand =
- getOperandNum(x.SDTCisEltOfVec_Info.OtherOperandNum,
- N, NumResults);
- if (OtherOperand->hasTypeSet()) {
- if (!isVector(OtherOperand->getTypeNum(0)))
- TP.error(N->getOperator()->getName() + " VT operand must be a vector!");
- EVT IVT = OtherOperand->getTypeNum(0);
- IVT = IVT.getVectorElementType();
- return NodeToApply->UpdateNodeType(IVT.getSimpleVT().SimpleTy, TP);
- }
- return false;
+ unsigned VResNo = 0;
+ TreePatternNode *VecOperand =
+ getOperandNum(x.SDTCisEltOfVec_Info.OtherOperandNum, N, NodeInfo,
+ VResNo);
+
+ // Filter vector types out of VecOperand that don't have the right element
+ // type.
+ return VecOperand->getExtType(VResNo).
+ EnforceVectorEltTypeIs(NodeToApply->getExtType(ResNo), TP);
}
}
return false;
@@ -433,6 +743,8 @@ SDNodeInfo::SDNodeInfo(Record *R) : Def(R) {
Properties |= 1 << SDNPSideEffect;
} else if (PropList[i]->getName() == "SDNPMemOperand") {
Properties |= 1 << SDNPMemOperand;
+ } else if (PropList[i]->getName() == "SDNPVariadic") {
+ Properties |= 1 << SDNPVariadic;
} else {
errs() << "Unknown SD Node property '" << PropList[i]->getName()
<< "' on node '" << R->getName() << "'!\n";
@@ -449,11 +761,12 @@ SDNodeInfo::SDNodeInfo(Record *R) : Def(R) {
/// getKnownType - If the type constraints on this node imply a fixed type
/// (e.g. all stores return void, etc), then return it as an
-/// MVT::SimpleValueType. Otherwise, return EEVT::isUnknown.
-unsigned SDNodeInfo::getKnownType() const {
+/// MVT::SimpleValueType. Otherwise, return EEVT::Other.
+MVT::SimpleValueType SDNodeInfo::getKnownType(unsigned ResNo) const {
unsigned NumResults = getNumResults();
assert(NumResults <= 1 &&
"We only work with nodes with zero or one result so far!");
+ assert(ResNo == 0 && "Only handles single result nodes so far");
for (unsigned i = 0, e = TypeConstraints.size(); i != e; ++i) {
// Make sure that this applies to the correct node result.
@@ -468,7 +781,7 @@ unsigned SDNodeInfo::getKnownType() const {
return MVT::iPTR;
}
}
- return EEVT::isUnknown;
+ return MVT::Other;
}
//===----------------------------------------------------------------------===//
@@ -482,146 +795,61 @@ TreePatternNode::~TreePatternNode() {
#endif
}
-/// UpdateNodeType - Set the node type of N to VT if VT contains
-/// information. If N already contains a conflicting type, then throw an
-/// exception. This returns true if any information was updated.
-///
-bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
- TreePattern &TP) {
- assert(!ExtVTs.empty() && "Cannot update node type with empty type vector!");
+static unsigned GetNumNodeResults(Record *Operator, CodeGenDAGPatterns &CDP) {
+ if (Operator->getName() == "set" ||
+ Operator->getName() == "implicit")
+ return 0; // All return nothing.
- if (ExtVTs[0] == EEVT::isUnknown || LHSIsSubsetOfRHS(getExtTypes(), ExtVTs))
- return false;
- if (isTypeCompletelyUnknown() || LHSIsSubsetOfRHS(ExtVTs, getExtTypes())) {
- setTypes(ExtVTs);
- return true;
- }
-
- if (getExtTypeNum(0) == MVT::iPTR || getExtTypeNum(0) == MVT::iPTRAny) {
- if (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny ||
- ExtVTs[0] == MVT::iAny)
- return false;
- if (EEVT::isExtIntegerInVTs(ExtVTs)) {
- std::vector<unsigned char> FVTs = FilterEVTs(ExtVTs, isInteger);
- if (FVTs.size()) {
- setTypes(ExtVTs);
- return true;
- }
- }
- }
-
- // Merge vAny with iAny/fAny. The latter include vector types so keep them
- // as the more specific information.
- if (ExtVTs[0] == MVT::vAny &&
- (getExtTypeNum(0) == MVT::iAny || getExtTypeNum(0) == MVT::fAny))
- return false;
- if (getExtTypeNum(0) == MVT::vAny &&
- (ExtVTs[0] == MVT::iAny || ExtVTs[0] == MVT::fAny)) {
- setTypes(ExtVTs);
- return true;
- }
-
- if (ExtVTs[0] == MVT::iAny &&
- EEVT::isExtIntegerInVTs(getExtTypes())) {
- assert(hasTypeSet() && "should be handled above!");
- std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isInteger);
- if (getExtTypes() == FVTs)
- return false;
- setTypes(FVTs);
- return true;
- }
- if ((ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny) &&
- EEVT::isExtIntegerInVTs(getExtTypes())) {
- //assert(hasTypeSet() && "should be handled above!");
- std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isInteger);
- if (getExtTypes() == FVTs)
- return false;
- if (FVTs.size()) {
- setTypes(FVTs);
- return true;
- }
- }
- if (ExtVTs[0] == MVT::fAny &&
- EEVT::isExtFloatingPointInVTs(getExtTypes())) {
- assert(hasTypeSet() && "should be handled above!");
- std::vector<unsigned char> FVTs =
- FilterEVTs(getExtTypes(), isFloatingPoint);
- if (getExtTypes() == FVTs)
- return false;
- setTypes(FVTs);
- return true;
- }
- if (ExtVTs[0] == MVT::vAny &&
- EEVT::isExtVectorInVTs(getExtTypes())) {
- assert(hasTypeSet() && "should be handled above!");
- std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isVector);
- if (getExtTypes() == FVTs)
- return false;
- setTypes(FVTs);
- return true;
- }
-
- // If we know this is an int, FP, or vector type, and we are told it is a
- // specific one, take the advice.
- //
- // Similarly, we should probably set the type here to the intersection of
- // {iAny|fAny|vAny} and ExtVTs
- if ((getExtTypeNum(0) == MVT::iAny &&
- EEVT::isExtIntegerInVTs(ExtVTs)) ||
- (getExtTypeNum(0) == MVT::fAny &&
- EEVT::isExtFloatingPointInVTs(ExtVTs)) ||
- (getExtTypeNum(0) == MVT::vAny &&
- EEVT::isExtVectorInVTs(ExtVTs))) {
- setTypes(ExtVTs);
- return true;
- }
- if (getExtTypeNum(0) == MVT::iAny &&
- (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny)) {
- setTypes(ExtVTs);
- return true;
- }
-
- if (isLeaf()) {
- dump();
- errs() << " ";
- TP.error("Type inference contradiction found in node!");
- } else {
- TP.error("Type inference contradiction found in node " +
- getOperator()->getName() + "!");
+ if (Operator->isSubClassOf("Intrinsic"))
+ return CDP.getIntrinsic(Operator).IS.RetVTs.size();
+
+ if (Operator->isSubClassOf("SDNode"))
+ return CDP.getSDNodeInfo(Operator).getNumResults();
+
+ if (Operator->isSubClassOf("PatFrag")) {
+ // If we've already parsed this pattern fragment, get it. Otherwise, handle
+ // the forward reference case where one pattern fragment references another
+ // before it is processed.
+ if (TreePattern *PFRec = CDP.getPatternFragmentIfRead(Operator))
+ return PFRec->getOnlyTree()->getNumTypes();
+
+ // Get the result tree.
+ DagInit *Tree = Operator->getValueAsDag("Fragment");
+ Record *Op = 0;
+ if (Tree && dynamic_cast<DefInit*>(Tree->getOperator()))
+ Op = dynamic_cast<DefInit*>(Tree->getOperator())->getDef();
+ assert(Op && "Invalid Fragment");
+ return GetNumNodeResults(Op, CDP);
}
- return true; // unreachable
-}
+
+ if (Operator->isSubClassOf("Instruction")) {
+ CodeGenInstruction &InstInfo = CDP.getTargetInfo().getInstruction(Operator);
-static std::string GetTypeName(unsigned char TypeID) {
- switch (TypeID) {
- case MVT::Other: return "Other";
- case MVT::iAny: return "iAny";
- case MVT::fAny: return "fAny";
- case MVT::vAny: return "vAny";
- case EEVT::isUnknown: return "isUnknown";
- case MVT::iPTR: return "iPTR";
- case MVT::iPTRAny: return "iPTRAny";
- default:
- std::string VTName = llvm::getName((MVT::SimpleValueType)TypeID);
- // Strip off EVT:: prefix if present.
- if (VTName.substr(0,5) == "MVT::")
- VTName = VTName.substr(5);
- return VTName;
+ // FIXME: Should allow access to all the results here.
+ unsigned NumDefsToAdd = InstInfo.NumDefs ? 1 : 0;
+
+ // Add on one implicit def if it has a resolvable type.
+ if (InstInfo.HasOneImplicitDefWithKnownVT(CDP.getTargetInfo()) !=MVT::Other)
+ ++NumDefsToAdd;
+ return NumDefsToAdd;
}
+
+ if (Operator->isSubClassOf("SDNodeXForm"))
+ return 1; // FIXME: Generalize SDNodeXForm
+
+ Operator->dump();
+ errs() << "Unhandled node in GetNumNodeResults\n";
+ exit(1);
}
-
void TreePatternNode::print(raw_ostream &OS) const {
- if (isLeaf()) {
+ if (isLeaf())
OS << *getLeafValue();
- } else {
+ else
OS << '(' << getOperator()->getName();
- }
-
- // FIXME: At some point we should handle printing all the value types for
- // nodes that are multiply typed.
- if (getExtTypeNum(0) != EEVT::isUnknown)
- OS << ':' << GetTypeName(getExtTypeNum(0));
+
+ for (unsigned i = 0, e = Types.size(); i != e; ++i)
+ OS << ':' << getExtType(i).getName();
if (!isLeaf()) {
if (getNumChildren() != 0) {
@@ -686,16 +914,16 @@ bool TreePatternNode::isIsomorphicTo(const TreePatternNode *N,
TreePatternNode *TreePatternNode::clone() const {
TreePatternNode *New;
if (isLeaf()) {
- New = new TreePatternNode(getLeafValue());
+ New = new TreePatternNode(getLeafValue(), getNumTypes());
} else {
std::vector<TreePatternNode*> CChildren;
CChildren.reserve(Children.size());
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
CChildren.push_back(getChild(i)->clone());
- New = new TreePatternNode(getOperator(), CChildren);
+ New = new TreePatternNode(getOperator(), CChildren, getNumTypes());
}
New->setName(getName());
- New->setTypes(getExtTypes());
+ New->Types = Types;
New->setPredicateFns(getPredicateFns());
New->setTransformFn(getTransformFn());
return New;
@@ -703,7 +931,8 @@ TreePatternNode *TreePatternNode::clone() const {
/// RemoveAllTypes - Recursively strip all the types of this tree.
void TreePatternNode::RemoveAllTypes() {
- removeTypes();
+ for (unsigned i = 0, e = Types.size(); i != e; ++i)
+ Types[i] = EEVT::TypeSet(); // Reset to unknown type.
if (isLeaf()) return;
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
getChild(i)->RemoveAllTypes();
@@ -785,7 +1014,8 @@ TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
}
FragTree->setName(getName());
- FragTree->UpdateNodeType(getExtTypes(), TP);
+ for (unsigned i = 0, e = Types.size(); i != e; ++i)
+ FragTree->UpdateNodeType(i, getExtType(i), TP);
// Transfer in the old predicates.
for (unsigned i = 0, e = getPredicateFns().size(); i != e; ++i)
@@ -803,47 +1033,62 @@ TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) {
/// type which should be applied to it. This will infer the type of register
/// references from the register file information, for example.
///
-static std::vector<unsigned char> getImplicitType(Record *R, bool NotRegisters,
- TreePattern &TP) {
- // Some common return values
- std::vector<unsigned char> Unknown(1, EEVT::isUnknown);
- std::vector<unsigned char> Other(1, MVT::Other);
-
- // Check to see if this is a register or a register class...
+static EEVT::TypeSet getImplicitType(Record *R, unsigned ResNo,
+ bool NotRegisters, TreePattern &TP) {
+ // Check to see if this is a register or a register class.
if (R->isSubClassOf("RegisterClass")) {
+ assert(ResNo == 0 && "Regclass ref only has one result!");
if (NotRegisters)
- return Unknown;
- const CodeGenRegisterClass &RC =
- TP.getDAGPatterns().getTargetInfo().getRegisterClass(R);
- return ConvertVTs(RC.getValueTypes());
- } else if (R->isSubClassOf("PatFrag")) {
+ return EEVT::TypeSet(); // Unknown.
+ const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
+ return EEVT::TypeSet(T.getRegisterClass(R).getValueTypes());
+ }
+
+ if (R->isSubClassOf("PatFrag")) {
+ assert(ResNo == 0 && "FIXME: PatFrag with multiple results?");
// Pattern fragment types will be resolved when they are inlined.
- return Unknown;
- } else if (R->isSubClassOf("Register")) {
+ return EEVT::TypeSet(); // Unknown.
+ }
+
+ if (R->isSubClassOf("Register")) {
+ assert(ResNo == 0 && "Registers only produce one result!");
if (NotRegisters)
- return Unknown;
+ return EEVT::TypeSet(); // Unknown.
const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
- return T.getRegisterVTs(R);
- } else if (R->isSubClassOf("ValueType") || R->isSubClassOf("CondCode")) {
+ return EEVT::TypeSet(T.getRegisterVTs(R));
+ }
+
+ if (R->isSubClassOf("SubRegIndex")) {
+ assert(ResNo == 0 && "SubRegisterIndices only produce one result!");
+ return EEVT::TypeSet();
+ }
+
+ if (R->isSubClassOf("ValueType") || R->isSubClassOf("CondCode")) {
+ assert(ResNo == 0 && "This node only has one result!");
// Using a VTSDNode or CondCodeSDNode.
- return Other;
- } else if (R->isSubClassOf("ComplexPattern")) {
+ return EEVT::TypeSet(MVT::Other, TP);
+ }
+
+ if (R->isSubClassOf("ComplexPattern")) {
+ assert(ResNo == 0 && "FIXME: ComplexPattern with multiple results?");
if (NotRegisters)
- return Unknown;
- std::vector<unsigned char>
- ComplexPat(1, TP.getDAGPatterns().getComplexPattern(R).getValueType());
- return ComplexPat;
- } else if (R->isSubClassOf("PointerLikeRegClass")) {
- Other[0] = MVT::iPTR;
- return Other;
- } else if (R->getName() == "node" || R->getName() == "srcvalue" ||
- R->getName() == "zero_reg") {
+ return EEVT::TypeSet(); // Unknown.
+ return EEVT::TypeSet(TP.getDAGPatterns().getComplexPattern(R).getValueType(),
+ TP);
+ }
+ if (R->isSubClassOf("PointerLikeRegClass")) {
+ assert(ResNo == 0 && "Regclass can only have one result!");
+ return EEVT::TypeSet(MVT::iPTR, TP);
+ }
+
+ if (R->getName() == "node" || R->getName() == "srcvalue" ||
+ R->getName() == "zero_reg") {
// Placeholder.
- return Unknown;
+ return EEVT::TypeSet(); // Unknown.
}
TP.error("Unknown node flavor used in pattern: " + R->getName());
- return Other;
+ return EEVT::TypeSet(MVT::Other, TP);
}
@@ -922,45 +1167,44 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
if (isLeaf()) {
if (DefInit *DI = dynamic_cast<DefInit*>(getLeafValue())) {
// If it's a regclass or something else known, include the type.
- return UpdateNodeType(getImplicitType(DI->getDef(), NotRegisters, TP),TP);
+ bool MadeChange = false;
+ for (unsigned i = 0, e = Types.size(); i != e; ++i)
+ MadeChange |= UpdateNodeType(i, getImplicitType(DI->getDef(), i,
+ NotRegisters, TP), TP);
+ return MadeChange;
}
if (IntInit *II = dynamic_cast<IntInit*>(getLeafValue())) {
+ assert(Types.size() == 1 && "Invalid IntInit");
+
// Int inits are always integers. :)
- bool MadeChange = UpdateNodeType(MVT::iAny, TP);
+ bool MadeChange = Types[0].EnforceInteger(TP);
- if (hasTypeSet()) {
- // At some point, it may make sense for this tree pattern to have
- // multiple types. Assert here that it does not, so we revisit this
- // code when appropriate.
- assert(getExtTypes().size() >= 1 && "TreePattern doesn't have a type!");
- MVT::SimpleValueType VT = getTypeNum(0);
- for (unsigned i = 1, e = getExtTypes().size(); i != e; ++i)
- assert(getTypeNum(i) == VT && "TreePattern has too many types!");
-
- VT = getTypeNum(0);
- if (VT != MVT::iPTR && VT != MVT::iPTRAny) {
- unsigned Size = EVT(VT).getSizeInBits();
- // Make sure that the value is representable for this type.
- if (Size < 32) {
- int Val = (II->getValue() << (32-Size)) >> (32-Size);
- if (Val != II->getValue()) {
- // If sign-extended doesn't fit, does it fit as unsigned?
- unsigned ValueMask;
- unsigned UnsignedVal;
- ValueMask = unsigned(~uint32_t(0UL) >> (32-Size));
- UnsignedVal = unsigned(II->getValue());
-
- if ((ValueMask & UnsignedVal) != UnsignedVal) {
- TP.error("Integer value '" + itostr(II->getValue())+
- "' is out of range for type '" +
- getEnumName(getTypeNum(0)) + "'!");
- }
- }
- }
- }
- }
+ if (!Types[0].isConcrete())
+ return MadeChange;
+
+ MVT::SimpleValueType VT = getType(0);
+ if (VT == MVT::iPTR || VT == MVT::iPTRAny)
+ return MadeChange;
+ unsigned Size = EVT(VT).getSizeInBits();
+ // Make sure that the value is representable for this type.
+ if (Size >= 32) return MadeChange;
+
+ int Val = (II->getValue() << (32-Size)) >> (32-Size);
+ if (Val == II->getValue()) return MadeChange;
+
+ // If sign-extended doesn't fit, does it fit as unsigned?
+ unsigned ValueMask;
+ unsigned UnsignedVal;
+ ValueMask = unsigned(~uint32_t(0UL) >> (32-Size));
+ UnsignedVal = unsigned(II->getValue());
+
+ if ((ValueMask & UnsignedVal) == UnsignedVal)
+ return MadeChange;
+
+ TP.error("Integer value '" + itostr(II->getValue())+
+ "' is out of range for type '" + getEnumName(getType(0)) + "'!");
return MadeChange;
}
return false;
@@ -968,29 +1212,30 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
// special handling for set, which isn't really an SDNode.
if (getOperator()->getName() == "set") {
- assert (getNumChildren() >= 2 && "Missing RHS of a set?");
+ assert(getNumTypes() == 0 && "Set doesn't produce a value");
+ assert(getNumChildren() >= 2 && "Missing RHS of a set?");
unsigned NC = getNumChildren();
- bool MadeChange = false;
+
+ TreePatternNode *SetVal = getChild(NC-1);
+ bool MadeChange = SetVal->ApplyTypeConstraints(TP, NotRegisters);
+
for (unsigned i = 0; i < NC-1; ++i) {
- MadeChange = getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
- MadeChange |= getChild(NC-1)->ApplyTypeConstraints(TP, NotRegisters);
+ TreePatternNode *Child = getChild(i);
+ MadeChange |= Child->ApplyTypeConstraints(TP, NotRegisters);
// Types of operands must match.
- MadeChange |= getChild(i)->UpdateNodeType(getChild(NC-1)->getExtTypes(),
- TP);
- MadeChange |= getChild(NC-1)->UpdateNodeType(getChild(i)->getExtTypes(),
- TP);
- MadeChange |= UpdateNodeType(MVT::isVoid, TP);
+ MadeChange |= Child->UpdateNodeType(0, SetVal->getExtType(i), TP);
+ MadeChange |= SetVal->UpdateNodeType(i, Child->getExtType(0), TP);
}
return MadeChange;
}
- if (getOperator()->getName() == "implicit" ||
- getOperator()->getName() == "parallel") {
+ if (getOperator()->getName() == "implicit") {
+ assert(getNumTypes() == 0 && "Node doesn't produce a value");
+
bool MadeChange = false;
for (unsigned i = 0; i < getNumChildren(); ++i)
MadeChange = getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
- MadeChange |= UpdateNodeType(MVT::isVoid, TP);
return MadeChange;
}
@@ -998,6 +1243,18 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
bool MadeChange = false;
MadeChange |= getChild(0)->ApplyTypeConstraints(TP, NotRegisters);
MadeChange |= getChild(1)->ApplyTypeConstraints(TP, NotRegisters);
+
+ assert(getChild(0)->getNumTypes() == 1 &&
+ getChild(1)->getNumTypes() == 1 && "Unhandled case");
+
+ // child #1 of COPY_TO_REGCLASS should be a register class. We don't care
+ // what type it gets, so if it didn't get a concrete type just give it the
+ // first viable type from the reg class.
+ if (!getChild(1)->hasTypeSet(0) &&
+ !getChild(1)->getExtType(0).isCompletelyUnknown()) {
+ MVT::SimpleValueType RCVT = getChild(1)->getExtType(0).getTypeList()[0];
+ MadeChange |= getChild(1)->UpdateNodeType(0, RCVT, TP);
+ }
return MadeChange;
}
@@ -1007,22 +1264,24 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
// Apply the result type to the node.
unsigned NumRetVTs = Int->IS.RetVTs.size();
unsigned NumParamVTs = Int->IS.ParamVTs.size();
-
+
for (unsigned i = 0, e = NumRetVTs; i != e; ++i)
- MadeChange |= UpdateNodeType(Int->IS.RetVTs[i], TP);
+ MadeChange |= UpdateNodeType(i, Int->IS.RetVTs[i], TP);
- if (getNumChildren() != NumParamVTs + NumRetVTs)
+ if (getNumChildren() != NumParamVTs + 1)
TP.error("Intrinsic '" + Int->Name + "' expects " +
- utostr(NumParamVTs + NumRetVTs - 1) + " operands, not " +
+ utostr(NumParamVTs) + " operands, not " +
utostr(getNumChildren() - 1) + " operands!");
// Apply type info to the intrinsic ID.
- MadeChange |= getChild(0)->UpdateNodeType(MVT::iPTR, TP);
+ MadeChange |= getChild(0)->UpdateNodeType(0, MVT::iPTR, TP);
- for (unsigned i = NumRetVTs, e = getNumChildren(); i != e; ++i) {
- MVT::SimpleValueType OpVT = Int->IS.ParamVTs[i - NumRetVTs];
- MadeChange |= getChild(i)->UpdateNodeType(OpVT, TP);
- MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
+ for (unsigned i = 0, e = getNumChildren()-1; i != e; ++i) {
+ MadeChange |= getChild(i+1)->ApplyTypeConstraints(TP, NotRegisters);
+
+ MVT::SimpleValueType OpVT = Int->IS.ParamVTs[i];
+ assert(getChild(i+1)->getNumTypes() == 1 && "Unhandled case");
+ MadeChange |= getChild(i+1)->UpdateNodeType(0, OpVT, TP);
}
return MadeChange;
}
@@ -1030,50 +1289,66 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
if (getOperator()->isSubClassOf("SDNode")) {
const SDNodeInfo &NI = CDP.getSDNodeInfo(getOperator());
+ // Check that the number of operands is sane. Negative operands -> varargs.
+ if (NI.getNumOperands() >= 0 &&
+ getNumChildren() != (unsigned)NI.getNumOperands())
+ TP.error(getOperator()->getName() + " node requires exactly " +
+ itostr(NI.getNumOperands()) + " operands!");
+
bool MadeChange = NI.ApplyTypeConstraints(this, TP);
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
- // Branch, etc. do not produce results and top-level forms in instr pattern
- // must have void types.
- if (NI.getNumResults() == 0)
- MadeChange |= UpdateNodeType(MVT::isVoid, TP);
-
- return MadeChange;
+ return MadeChange;
}
if (getOperator()->isSubClassOf("Instruction")) {
const DAGInstruction &Inst = CDP.getInstruction(getOperator());
- bool MadeChange = false;
- unsigned NumResults = Inst.getNumResults();
+ CodeGenInstruction &InstInfo =
+ CDP.getTargetInfo().getInstruction(getOperator());
- assert(NumResults <= 1 &&
- "Only supports zero or one result instrs!");
+ bool MadeChange = false;
- CodeGenInstruction &InstInfo =
- CDP.getTargetInfo().getInstruction(getOperator()->getName());
- // Apply the result type to the node
- if (NumResults == 0 || InstInfo.NumDefs == 0) {
- MadeChange = UpdateNodeType(MVT::isVoid, TP);
- } else {
- Record *ResultNode = Inst.getResult(0);
+ // Apply the result types to the node, these come from the things in the
+ // (outs) list of the instruction.
+ // FIXME: Cap at one result so far.
+ unsigned NumResultsToAdd = InstInfo.NumDefs ? 1 : 0;
+ for (unsigned ResNo = 0; ResNo != NumResultsToAdd; ++ResNo) {
+ Record *ResultNode = Inst.getResult(ResNo);
if (ResultNode->isSubClassOf("PointerLikeRegClass")) {
- std::vector<unsigned char> VT;
- VT.push_back(MVT::iPTR);
- MadeChange = UpdateNodeType(VT, TP);
+ MadeChange |= UpdateNodeType(ResNo, MVT::iPTR, TP);
} else if (ResultNode->getName() == "unknown") {
- std::vector<unsigned char> VT;
- VT.push_back(EEVT::isUnknown);
- MadeChange = UpdateNodeType(VT, TP);
+ // Nothing to do.
} else {
assert(ResultNode->isSubClassOf("RegisterClass") &&
"Operands should be register classes!");
-
const CodeGenRegisterClass &RC =
CDP.getTargetInfo().getRegisterClass(ResultNode);
- MadeChange = UpdateNodeType(ConvertVTs(RC.getValueTypes()), TP);
+ MadeChange |= UpdateNodeType(ResNo, RC.getValueTypes(), TP);
}
}
+
+ // If the instruction has implicit defs, we apply the first one as a result.
+ // FIXME: This sucks, it should apply all implicit defs.
+ if (!InstInfo.ImplicitDefs.empty()) {
+ unsigned ResNo = NumResultsToAdd;
+
+ // FIXME: Generalize to multiple possible types and multiple possible
+ // ImplicitDefs.
+ MVT::SimpleValueType VT =
+ InstInfo.HasOneImplicitDefWithKnownVT(CDP.getTargetInfo());
+
+ if (VT != MVT::Other)
+ MadeChange |= UpdateNodeType(ResNo, VT, TP);
+ }
+
+ // If this is an INSERT_SUBREG, constrain the source and destination VTs to
+ // be the same.
+ if (getOperator()->getName() == "INSERT_SUBREG") {
+ assert(getChild(0)->getNumTypes() == 1 && "FIXME: Unhandled");
+ MadeChange |= UpdateNodeType(0, getChild(0)->getExtType(0), TP);
+ MadeChange |= getChild(0)->UpdateNodeType(0, getExtType(0), TP);
+ }
unsigned ChildNo = 0;
for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i) {
@@ -1094,17 +1369,19 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
MVT::SimpleValueType VT;
TreePatternNode *Child = getChild(ChildNo++);
+ unsigned ChildResNo = 0; // Instructions always use res #0 of their op.
+
if (OperandNode->isSubClassOf("RegisterClass")) {
const CodeGenRegisterClass &RC =
CDP.getTargetInfo().getRegisterClass(OperandNode);
- MadeChange |= Child->UpdateNodeType(ConvertVTs(RC.getValueTypes()), TP);
+ MadeChange |= Child->UpdateNodeType(ChildResNo, RC.getValueTypes(), TP);
} else if (OperandNode->isSubClassOf("Operand")) {
VT = getValueType(OperandNode->getValueAsDef("Type"));
- MadeChange |= Child->UpdateNodeType(VT, TP);
+ MadeChange |= Child->UpdateNodeType(ChildResNo, VT, TP);
} else if (OperandNode->isSubClassOf("PointerLikeRegClass")) {
- MadeChange |= Child->UpdateNodeType(MVT::iPTR, TP);
+ MadeChange |= Child->UpdateNodeType(ChildResNo, MVT::iPTR, TP);
} else if (OperandNode->getName() == "unknown") {
- MadeChange |= Child->UpdateNodeType(EEVT::isUnknown, TP);
+ // Nothing to do.
} else {
assert(0 && "Unknown operand type!");
abort();
@@ -1126,15 +1403,20 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
TP.error("Node transform '" + getOperator()->getName() +
"' requires one operand!");
+ bool MadeChange = getChild(0)->ApplyTypeConstraints(TP, NotRegisters);
+
+
// If either the output or input of the xform does not have exact
// type info. We assume they must be the same. Otherwise, it is perfectly
// legal to transform from one type to a completely different type.
+#if 0
if (!hasTypeSet() || !getChild(0)->hasTypeSet()) {
- bool MadeChange = UpdateNodeType(getChild(0)->getExtTypes(), TP);
- MadeChange |= getChild(0)->UpdateNodeType(getExtTypes(), TP);
+ bool MadeChange = UpdateNodeType(getChild(0)->getExtType(), TP);
+ MadeChange |= getChild(0)->UpdateNodeType(getExtType(), TP);
return MadeChange;
}
- return false;
+#endif
+ return MadeChange;
}
/// OnlyOnRHSOfCommutative - Return true if this value is only allowed on the
@@ -1194,15 +1476,15 @@ bool TreePatternNode::canPatternMatch(std::string &Reason,
TreePattern::TreePattern(Record *TheRec, ListInit *RawPat, bool isInput,
CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){
- isInputPattern = isInput;
- for (unsigned i = 0, e = RawPat->getSize(); i != e; ++i)
- Trees.push_back(ParseTreePattern((DagInit*)RawPat->getElement(i)));
+ isInputPattern = isInput;
+ for (unsigned i = 0, e = RawPat->getSize(); i != e; ++i)
+ Trees.push_back(ParseTreePattern(RawPat->getElement(i), ""));
}
TreePattern::TreePattern(Record *TheRec, DagInit *Pat, bool isInput,
CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){
isInputPattern = isInput;
- Trees.push_back(ParseTreePattern(Pat));
+ Trees.push_back(ParseTreePattern(Pat, ""));
}
TreePattern::TreePattern(Record *TheRec, TreePatternNode *Pat, bool isInput,
@@ -1211,14 +1493,68 @@ TreePattern::TreePattern(Record *TheRec, TreePatternNode *Pat, bool isInput,
Trees.push_back(Pat);
}
-
-
void TreePattern::error(const std::string &Msg) const {
dump();
throw TGError(TheRecord->getLoc(), "In " + TheRecord->getName() + ": " + Msg);
}
-TreePatternNode *TreePattern::ParseTreePattern(DagInit *Dag) {
+void TreePattern::ComputeNamedNodes() {
+ for (unsigned i = 0, e = Trees.size(); i != e; ++i)
+ ComputeNamedNodes(Trees[i]);
+}
+
+void TreePattern::ComputeNamedNodes(TreePatternNode *N) {
+ if (!N->getName().empty())
+ NamedNodes[N->getName()].push_back(N);
+
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
+ ComputeNamedNodes(N->getChild(i));
+}
+
+
+TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){
+ if (DefInit *DI = dynamic_cast<DefInit*>(TheInit)) {
+ Record *R = DI->getDef();
+
+ // Direct reference to a leaf DagNode or PatFrag? Turn it into a
+ // TreePatternNode if its own. For example:
+ /// (foo GPR, imm) -> (foo GPR, (imm))
+ if (R->isSubClassOf("SDNode") || R->isSubClassOf("PatFrag"))
+ return ParseTreePattern(new DagInit(DI, "",
+ std::vector<std::pair<Init*, std::string> >()),
+ OpName);
+
+ // Input argument?
+ TreePatternNode *Res = new TreePatternNode(DI, 1);
+ if (R->getName() == "node" && !OpName.empty()) {
+ if (OpName.empty())
+ error("'node' argument requires a name to match with operand list");
+ Args.push_back(OpName);
+ }
+
+ Res->setName(OpName);
+ return Res;
+ }
+
+ if (IntInit *II = dynamic_cast<IntInit*>(TheInit)) {
+ if (!OpName.empty())
+ error("Constant int argument should not have a name!");
+ return new TreePatternNode(II, 1);
+ }
+
+ if (BitsInit *BI = dynamic_cast<BitsInit*>(TheInit)) {
+ // Turn this into an IntInit.
+ Init *II = BI->convertInitializerTo(new IntRecTy());
+ if (II == 0 || !dynamic_cast<IntInit*>(II))
+ error("Bits value must be constants!");
+ return ParseTreePattern(II, OpName);
+ }
+
+ DagInit *Dag = dynamic_cast<DagInit*>(TheInit);
+ if (!Dag) {
+ TheInit->dump();
+ error("Pattern has unexpected init kind!");
+ }
DefInit *OpDef = dynamic_cast<DefInit*>(Dag->getOperator());
if (!OpDef) error("Pattern has unexpected operator type!");
Record *Operator = OpDef->getDef();
@@ -1229,41 +1565,14 @@ TreePatternNode *TreePattern::ParseTreePattern(DagInit *Dag) {
if (Dag->getNumArgs() != 1)
error("Type cast only takes one operand!");
- Init *Arg = Dag->getArg(0);
- TreePatternNode *New;
- if (DefInit *DI = dynamic_cast<DefInit*>(Arg)) {
- Record *R = DI->getDef();
- if (R->isSubClassOf("SDNode") || R->isSubClassOf("PatFrag")) {
- Dag->setArg(0, new DagInit(DI, "",
- std::vector<std::pair<Init*, std::string> >()));
- return ParseTreePattern(Dag);
- }
- New = new TreePatternNode(DI);
- } else if (DagInit *DI = dynamic_cast<DagInit*>(Arg)) {
- New = ParseTreePattern(DI);
- } else if (IntInit *II = dynamic_cast<IntInit*>(Arg)) {
- New = new TreePatternNode(II);
- if (!Dag->getArgName(0).empty())
- error("Constant int argument should not have a name!");
- } else if (BitsInit *BI = dynamic_cast<BitsInit*>(Arg)) {
- // Turn this into an IntInit.
- Init *II = BI->convertInitializerTo(new IntRecTy());
- if (II == 0 || !dynamic_cast<IntInit*>(II))
- error("Bits value must be constants!");
-
- New = new TreePatternNode(dynamic_cast<IntInit*>(II));
- if (!Dag->getArgName(0).empty())
- error("Constant int argument should not have a name!");
- } else {
- Arg->dump();
- error("Unknown leaf value for tree pattern!");
- return 0;
- }
+ TreePatternNode *New = ParseTreePattern(Dag->getArg(0), Dag->getArgName(0));
// Apply the type cast.
- New->UpdateNodeType(getValueType(Operator), *this);
- if (New->getNumChildren() == 0)
- New->setName(Dag->getArgName(0));
+ assert(New->getNumTypes() == 1 && "FIXME: Unhandled");
+ New->UpdateNodeType(0, getValueType(Operator), *this);
+
+ if (!OpName.empty())
+ error("ValueType cast should not have a name!");
return New;
}
@@ -1274,65 +1583,38 @@ TreePatternNode *TreePattern::ParseTreePattern(DagInit *Dag) {
!Operator->isSubClassOf("SDNodeXForm") &&
!Operator->isSubClassOf("Intrinsic") &&
Operator->getName() != "set" &&
- Operator->getName() != "implicit" &&
- Operator->getName() != "parallel")
+ Operator->getName() != "implicit")
error("Unrecognized node '" + Operator->getName() + "'!");
// Check to see if this is something that is illegal in an input pattern.
- if (isInputPattern && (Operator->isSubClassOf("Instruction") ||
- Operator->isSubClassOf("SDNodeXForm")))
- error("Cannot use '" + Operator->getName() + "' in an input pattern!");
+ if (isInputPattern) {
+ if (Operator->isSubClassOf("Instruction") ||
+ Operator->isSubClassOf("SDNodeXForm"))
+ error("Cannot use '" + Operator->getName() + "' in an input pattern!");
+ } else {
+ if (Operator->isSubClassOf("Intrinsic"))
+ error("Cannot use '" + Operator->getName() + "' in an output pattern!");
+
+ if (Operator->isSubClassOf("SDNode") &&
+ Operator->getName() != "imm" &&
+ Operator->getName() != "fpimm" &&
+ Operator->getName() != "tglobaltlsaddr" &&
+ Operator->getName() != "tconstpool" &&
+ Operator->getName() != "tjumptable" &&
+ Operator->getName() != "tframeindex" &&
+ Operator->getName() != "texternalsym" &&
+ Operator->getName() != "tblockaddress" &&
+ Operator->getName() != "tglobaladdr" &&
+ Operator->getName() != "bb" &&
+ Operator->getName() != "vt")
+ error("Cannot use '" + Operator->getName() + "' in an output pattern!");
+ }
std::vector<TreePatternNode*> Children;
-
- for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i) {
- Init *Arg = Dag->getArg(i);
- if (DagInit *DI = dynamic_cast<DagInit*>(Arg)) {
- Children.push_back(ParseTreePattern(DI));
- if (Children.back()->getName().empty())
- Children.back()->setName(Dag->getArgName(i));
- } else if (DefInit *DefI = dynamic_cast<DefInit*>(Arg)) {
- Record *R = DefI->getDef();
- // Direct reference to a leaf DagNode or PatFrag? Turn it into a
- // TreePatternNode if its own.
- if (R->isSubClassOf("SDNode") || R->isSubClassOf("PatFrag")) {
- Dag->setArg(i, new DagInit(DefI, "",
- std::vector<std::pair<Init*, std::string> >()));
- --i; // Revisit this node...
- } else {
- TreePatternNode *Node = new TreePatternNode(DefI);
- Node->setName(Dag->getArgName(i));
- Children.push_back(Node);
-
- // Input argument?
- if (R->getName() == "node") {
- if (Dag->getArgName(i).empty())
- error("'node' argument requires a name to match with operand list");
- Args.push_back(Dag->getArgName(i));
- }
- }
- } else if (IntInit *II = dynamic_cast<IntInit*>(Arg)) {
- TreePatternNode *Node = new TreePatternNode(II);
- if (!Dag->getArgName(i).empty())
- error("Constant int argument should not have a name!");
- Children.push_back(Node);
- } else if (BitsInit *BI = dynamic_cast<BitsInit*>(Arg)) {
- // Turn this into an IntInit.
- Init *II = BI->convertInitializerTo(new IntRecTy());
- if (II == 0 || !dynamic_cast<IntInit*>(II))
- error("Bits value must be constants!");
-
- TreePatternNode *Node = new TreePatternNode(dynamic_cast<IntInit*>(II));
- if (!Dag->getArgName(i).empty())
- error("Constant int argument should not have a name!");
- Children.push_back(Node);
- } else {
- errs() << '"';
- Arg->dump();
- errs() << "\": ";
- error("Unknown leaf value for tree pattern!");
- }
- }
+
+ // Parse all the operands.
+ for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i)
+ Children.push_back(ParseTreePattern(Dag->getArg(i), Dag->getArgName(i)));
// If the operator is an intrinsic, then this is just syntactic sugar for for
// (intrinsic_* <number>, ..children..). Pick the right intrinsic node, and
@@ -1343,34 +1625,127 @@ TreePatternNode *TreePattern::ParseTreePattern(DagInit *Dag) {
// If this intrinsic returns void, it must have side-effects and thus a
// chain.
- if (Int.IS.RetVTs[0] == MVT::isVoid) {
+ if (Int.IS.RetVTs.empty())
Operator = getDAGPatterns().get_intrinsic_void_sdnode();
- } else if (Int.ModRef != CodeGenIntrinsic::NoMem) {
+ else if (Int.ModRef != CodeGenIntrinsic::NoMem)
// Has side-effects, requires chain.
Operator = getDAGPatterns().get_intrinsic_w_chain_sdnode();
- } else {
- // Otherwise, no chain.
+ else // Otherwise, no chain.
Operator = getDAGPatterns().get_intrinsic_wo_chain_sdnode();
- }
- TreePatternNode *IIDNode = new TreePatternNode(new IntInit(IID));
+ TreePatternNode *IIDNode = new TreePatternNode(new IntInit(IID), 1);
Children.insert(Children.begin(), IIDNode);
}
- TreePatternNode *Result = new TreePatternNode(Operator, Children);
- Result->setName(Dag->getName());
+ unsigned NumResults = GetNumNodeResults(Operator, CDP);
+ TreePatternNode *Result = new TreePatternNode(Operator, Children, NumResults);
+ Result->setName(OpName);
+
+ if (!Dag->getName().empty()) {
+ assert(Result->getName().empty());
+ Result->setName(Dag->getName());
+ }
return Result;
}
+/// SimplifyTree - See if we can simplify this tree to eliminate something that
+/// will never match in favor of something obvious that will. This is here
+/// strictly as a convenience to target authors because it allows them to write
+/// more type generic things and have useless type casts fold away.
+///
+/// This returns true if any change is made.
+static bool SimplifyTree(TreePatternNode *&N) {
+ if (N->isLeaf())
+ return false;
+
+ // If we have a bitconvert with a resolved type and if the source and
+ // destination types are the same, then the bitconvert is useless, remove it.
+ if (N->getOperator()->getName() == "bitconvert" &&
+ N->getExtType(0).isConcrete() &&
+ N->getExtType(0) == N->getChild(0)->getExtType(0) &&
+ N->getName().empty()) {
+ N = N->getChild(0);
+ SimplifyTree(N);
+ return true;
+ }
+
+ // Walk all children.
+ bool MadeChange = false;
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i) {
+ TreePatternNode *Child = N->getChild(i);
+ MadeChange |= SimplifyTree(Child);
+ N->setChild(i, Child);
+ }
+ return MadeChange;
+}
+
+
+
/// InferAllTypes - Infer/propagate as many types throughout the expression
/// patterns as possible. Return true if all types are inferred, false
/// otherwise. Throw an exception if a type contradiction is found.
-bool TreePattern::InferAllTypes() {
+bool TreePattern::
+InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
+ if (NamedNodes.empty())
+ ComputeNamedNodes();
+
bool MadeChange = true;
while (MadeChange) {
MadeChange = false;
- for (unsigned i = 0, e = Trees.size(); i != e; ++i)
+ for (unsigned i = 0, e = Trees.size(); i != e; ++i) {
MadeChange |= Trees[i]->ApplyTypeConstraints(*this, false);
+ MadeChange |= SimplifyTree(Trees[i]);
+ }
+
+ // If there are constraints on our named nodes, apply them.
+ for (StringMap<SmallVector<TreePatternNode*,1> >::iterator
+ I = NamedNodes.begin(), E = NamedNodes.end(); I != E; ++I) {
+ SmallVectorImpl<TreePatternNode*> &Nodes = I->second;
+
+ // If we have input named node types, propagate their types to the named
+ // values here.
+ if (InNamedTypes) {
+ // FIXME: Should be error?
+ assert(InNamedTypes->count(I->getKey()) &&
+ "Named node in output pattern but not input pattern?");
+
+ const SmallVectorImpl<TreePatternNode*> &InNodes =
+ InNamedTypes->find(I->getKey())->second;
+
+ // The input types should be fully resolved by now.
+ for (unsigned i = 0, e = Nodes.size(); i != e; ++i) {
+ // If this node is a register class, and it is the root of the pattern
+ // then we're mapping something onto an input register. We allow
+ // changing the type of the input register in this case. This allows
+ // us to match things like:
+ // def : Pat<(v1i64 (bitconvert(v2i32 DPR:$src))), (v1i64 DPR:$src)>;
+ if (Nodes[i] == Trees[0] && Nodes[i]->isLeaf()) {
+ DefInit *DI = dynamic_cast<DefInit*>(Nodes[i]->getLeafValue());
+ if (DI && DI->getDef()->isSubClassOf("RegisterClass"))
+ continue;
+ }
+
+ assert(Nodes[i]->getNumTypes() == 1 &&
+ InNodes[0]->getNumTypes() == 1 &&
+ "FIXME: cannot name multiple result nodes yet");
+ MadeChange |= Nodes[i]->UpdateNodeType(0, InNodes[0]->getExtType(0),
+ *this);
+ }
+ }
+
+ // If there are multiple nodes with the same name, they must all have the
+ // same type.
+ if (I->second.size() > 1) {
+ for (unsigned i = 0, e = Nodes.size()-1; i != e; ++i) {
+ TreePatternNode *N1 = Nodes[i], *N2 = Nodes[i+1];
+ assert(N1->getNumTypes() == 1 && N2->getNumTypes() == 1 &&
+ "FIXME: cannot name multiple result nodes yet");
+
+ MadeChange |= N1->UpdateNodeType(0, N2->getExtType(0), *this);
+ MadeChange |= N2->UpdateNodeType(0, N1->getExtType(0), *this);
+ }
+ }
+ }
}
bool HasUnresolvedTypes = false;
@@ -1622,16 +1997,13 @@ void CodeGenDAGPatterns::ParseDefaultOperands() {
/// HandleUse - Given "Pat" a leaf in the pattern, check to see if it is an
/// instruction input. Return true if this is a real use.
static bool HandleUse(TreePattern *I, TreePatternNode *Pat,
- std::map<std::string, TreePatternNode*> &InstInputs,
- std::vector<Record*> &InstImpInputs) {
+ std::map<std::string, TreePatternNode*> &InstInputs) {
// No name -> not interesting.
if (Pat->getName().empty()) {
if (Pat->isLeaf()) {
DefInit *DI = dynamic_cast<DefInit*>(Pat->getLeafValue());
if (DI && DI->getDef()->isSubClassOf("RegisterClass"))
I->error("Input " + DI->getDef()->getName() + " must be named!");
- else if (DI && DI->getDef()->isSubClassOf("Register"))
- InstImpInputs.push_back(DI->getDef());
}
return false;
}
@@ -1677,10 +2049,9 @@ void CodeGenDAGPatterns::
FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
std::map<std::string, TreePatternNode*> &InstInputs,
std::map<std::string, TreePatternNode*>&InstResults,
- std::vector<Record*> &InstImpInputs,
std::vector<Record*> &InstImpResults) {
if (Pat->isLeaf()) {
- bool isUse = HandleUse(I, Pat, InstInputs, InstImpInputs);
+ bool isUse = HandleUse(I, Pat, InstInputs);
if (!isUse && Pat->getTransformFn())
I->error("Cannot specify a transform function for a non-input value!");
return;
@@ -1704,15 +2075,15 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
// If this is not a set, verify that the children nodes are not void typed,
// and recurse.
for (unsigned i = 0, e = Pat->getNumChildren(); i != e; ++i) {
- if (Pat->getChild(i)->getExtTypeNum(0) == MVT::isVoid)
+ if (Pat->getChild(i)->getNumTypes() == 0)
I->error("Cannot have void nodes inside of patterns!");
FindPatternInputsAndOutputs(I, Pat->getChild(i), InstInputs, InstResults,
- InstImpInputs, InstImpResults);
+ InstImpResults);
}
// If this is a non-leaf node with no children, treat it basically as if
// it were a leaf. This handles nodes like (imm).
- bool isUse = HandleUse(I, Pat, InstInputs, InstImpInputs);
+ bool isUse = HandleUse(I, Pat, InstInputs);
if (!isUse && Pat->getTransformFn())
I->error("Cannot specify a transform function for a non-input value!");
@@ -1753,8 +2124,7 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
// Verify and collect info from the computation.
FindPatternInputsAndOutputs(I, Pat->getChild(NumDests),
- InstInputs, InstResults,
- InstImpInputs, InstImpResults);
+ InstInputs, InstResults, InstImpResults);
}
//===----------------------------------------------------------------------===//
@@ -1766,10 +2136,12 @@ class InstAnalyzer {
bool &mayStore;
bool &mayLoad;
bool &HasSideEffects;
+ bool &IsVariadic;
public:
InstAnalyzer(const CodeGenDAGPatterns &cdp,
- bool &maystore, bool &mayload, bool &hse)
- : CDP(cdp), mayStore(maystore), mayLoad(mayload), HasSideEffects(hse){
+ bool &maystore, bool &mayload, bool &hse, bool &isv)
+ : CDP(cdp), mayStore(maystore), mayLoad(mayload), HasSideEffects(hse),
+ IsVariadic(isv) {
}
/// Analyze - Analyze the specified instruction, returning true if the
@@ -1818,16 +2190,17 @@ private:
if (OpInfo.hasProperty(SDNPMayStore)) mayStore = true;
if (OpInfo.hasProperty(SDNPMayLoad)) mayLoad = true;
if (OpInfo.hasProperty(SDNPSideEffect)) HasSideEffects = true;
+ if (OpInfo.hasProperty(SDNPVariadic)) IsVariadic = true;
if (const CodeGenIntrinsic *IntInfo = N->getIntrinsicInfo(CDP)) {
// If this is an intrinsic, analyze it.
if (IntInfo->ModRef >= CodeGenIntrinsic::ReadArgMem)
mayLoad = true;// These may load memory.
- if (IntInfo->ModRef >= CodeGenIntrinsic::WriteArgMem)
+ if (IntInfo->ModRef >= CodeGenIntrinsic::ReadWriteArgMem)
mayStore = true;// Intrinsics that can write to memory are 'mayStore'.
- if (IntInfo->ModRef >= CodeGenIntrinsic::WriteMem)
+ if (IntInfo->ModRef >= CodeGenIntrinsic::ReadWriteMem)
// WriteMem intrinsics can have other strange effects.
HasSideEffects = true;
}
@@ -1837,12 +2210,13 @@ private:
static void InferFromPattern(const CodeGenInstruction &Inst,
bool &MayStore, bool &MayLoad,
- bool &HasSideEffects,
+ bool &HasSideEffects, bool &IsVariadic,
const CodeGenDAGPatterns &CDP) {
- MayStore = MayLoad = HasSideEffects = false;
+ MayStore = MayLoad = HasSideEffects = IsVariadic = false;
bool HadPattern =
- InstAnalyzer(CDP, MayStore, MayLoad, HasSideEffects).Analyze(Inst.TheDef);
+ InstAnalyzer(CDP, MayStore, MayLoad, HasSideEffects, IsVariadic)
+ .Analyze(Inst.TheDef);
// InstAnalyzer only correctly analyzes mayStore/mayLoad so far.
if (Inst.mayStore) { // If the .td file explicitly sets mayStore, use it.
@@ -1880,6 +2254,9 @@ static void InferFromPattern(const CodeGenInstruction &Inst,
"which already inferred this.\n", Inst.TheDef->getName().c_str());
HasSideEffects = true;
}
+
+ if (Inst.isVariadic)
+ IsVariadic = true; // Can warn if we want.
}
/// ParseInstructions - Parse all of the instructions, inlining and resolving
@@ -1901,7 +2278,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
std::vector<Record*> Results;
std::vector<Record*> Operands;
- CodeGenInstruction &InstInfo =Target.getInstruction(Instrs[i]->getName());
+ CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]);
if (InstInfo.OperandList.size() != 0) {
if (InstInfo.NumDefs == 0) {
@@ -1920,10 +2297,8 @@ void CodeGenDAGPatterns::ParseInstructions() {
// Create and insert the instruction.
std::vector<Record*> ImpResults;
- std::vector<Record*> ImpOperands;
Instructions.insert(std::make_pair(Instrs[i],
- DAGInstruction(0, Results, Operands, ImpResults,
- ImpOperands)));
+ DAGInstruction(0, Results, Operands, ImpResults)));
continue; // no pattern.
}
@@ -1945,20 +2320,19 @@ void CodeGenDAGPatterns::ParseInstructions() {
// in the instruction, including what reg class they are.
std::map<std::string, TreePatternNode*> InstResults;
- std::vector<Record*> InstImpInputs;
std::vector<Record*> InstImpResults;
// Verify that the top-level forms in the instruction are of void type, and
// fill in the InstResults map.
for (unsigned j = 0, e = I->getNumTrees(); j != e; ++j) {
TreePatternNode *Pat = I->getTree(j);
- if (Pat->getExtTypeNum(0) != MVT::isVoid)
+ if (Pat->getNumTypes() != 0)
I->error("Top-level forms in instruction pattern should have"
" void types");
// Find inputs and outputs, and verify the structure of the uses/defs.
FindPatternInputsAndOutputs(I, Pat, InstInputs, InstResults,
- InstImpInputs, InstImpResults);
+ InstImpResults);
}
// Now that we have inputs and outputs of the pattern, inspect the operands
@@ -1968,11 +2342,11 @@ void CodeGenDAGPatterns::ParseInstructions() {
// Parse the operands list from the (ops) list, validating it.
assert(I->getArgList().empty() && "Args list should still be empty here!");
- CodeGenInstruction &CGI = Target.getInstruction(Instrs[i]->getName());
+ CodeGenInstruction &CGI = Target.getInstruction(Instrs[i]);
// Check that all of the results occur first in the list.
std::vector<Record*> Results;
- TreePatternNode *Res0Node = NULL;
+ TreePatternNode *Res0Node = 0;
for (unsigned i = 0; i != NumResults; ++i) {
if (i == CGI.OperandList.size())
I->error("'" + InstResults.begin()->first +
@@ -2050,7 +2424,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
OpNode->setTransformFn(0);
std::vector<TreePatternNode*> Children;
Children.push_back(OpNode);
- OpNode = new TreePatternNode(Xform, Children);
+ OpNode = new TreePatternNode(Xform, Children, OpNode->getNumTypes());
}
ResultNodeOperands.push_back(OpNode);
@@ -2061,22 +2435,22 @@ void CodeGenDAGPatterns::ParseInstructions() {
" occurs in pattern but not in operands list!");
TreePatternNode *ResultPattern =
- new TreePatternNode(I->getRecord(), ResultNodeOperands);
+ new TreePatternNode(I->getRecord(), ResultNodeOperands,
+ GetNumNodeResults(I->getRecord(), *this));
// Copy fully inferred output node type to instruction result pattern.
- if (NumResults > 0)
- ResultPattern->setTypes(Res0Node->getExtTypes());
+ for (unsigned i = 0; i != NumResults; ++i)
+ ResultPattern->setType(i, Res0Node->getExtType(i));
// Create and insert the instruction.
- // FIXME: InstImpResults and InstImpInputs should not be part of
- // DAGInstruction.
- DAGInstruction TheInst(I, Results, Operands, InstImpResults, InstImpInputs);
+ // FIXME: InstImpResults should not be part of DAGInstruction.
+ DAGInstruction TheInst(I, Results, Operands, InstImpResults);
Instructions.insert(std::make_pair(I->getRecord(), TheInst));
// Use a temporary tree pattern to infer all types and make sure that the
// constructed result is correct. This depends on the instruction already
// being inserted into the Instructions map.
TreePattern Temp(I->getRecord(), ResultPattern, false, *this);
- Temp.InferAllTypes();
+ Temp.InferAllTypes(&I->getNamedNodesMap());
DAGInstruction &TheInsertedInst = Instructions.find(I->getRecord())->second;
TheInsertedInst.setResultPattern(Temp.getOnlyTree());
@@ -2165,24 +2539,6 @@ void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern,
if (SrcNames[I->first].first == 0)
Pattern->error("Pattern has input without matching name in output: $" +
I->first);
-
-#if 0
- const std::vector<unsigned char> &SrcTypeVec =
- SrcNames[I->first].first->getExtTypes();
- const std::vector<unsigned char> &DstTypeVec =
- I->second.first->getExtTypes();
- if (SrcTypeVec == DstTypeVec) continue;
-
- std::string SrcType, DstType;
- for (unsigned i = 0, e = SrcTypeVec.size(); i != e; ++i)
- SrcType += ":" + GetTypeName(SrcTypeVec[i]);
- for (unsigned i = 0, e = DstTypeVec.size(); i != e; ++i)
- DstType += ":" + GetTypeName(DstTypeVec[i]);
-
- Pattern->error("Variable $" + I->first +
- " has different types in source (" + SrcType +
- ") and dest (" + DstType + ") pattern!");
-#endif
}
// Scan all of the named values in the source pattern, rejecting them if the
@@ -2198,65 +2554,67 @@ void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern,
void CodeGenDAGPatterns::InferInstructionFlags() {
- std::map<std::string, CodeGenInstruction> &InstrDescs =
- Target.getInstructions();
- for (std::map<std::string, CodeGenInstruction>::iterator
- II = InstrDescs.begin(), E = InstrDescs.end(); II != E; ++II) {
- CodeGenInstruction &InstInfo = II->second;
+ const std::vector<const CodeGenInstruction*> &Instructions =
+ Target.getInstructionsByEnumValue();
+ for (unsigned i = 0, e = Instructions.size(); i != e; ++i) {
+ CodeGenInstruction &InstInfo =
+ const_cast<CodeGenInstruction &>(*Instructions[i]);
// Determine properties of the instruction from its pattern.
- bool MayStore, MayLoad, HasSideEffects;
- InferFromPattern(InstInfo, MayStore, MayLoad, HasSideEffects, *this);
+ bool MayStore, MayLoad, HasSideEffects, IsVariadic;
+ InferFromPattern(InstInfo, MayStore, MayLoad, HasSideEffects, IsVariadic,
+ *this);
InstInfo.mayStore = MayStore;
InstInfo.mayLoad = MayLoad;
InstInfo.hasSideEffects = HasSideEffects;
+ InstInfo.isVariadic = IsVariadic;
+ }
+}
+
+/// Given a pattern result with an unresolved type, see if we can find one
+/// instruction with an unresolved result type. Force this result type to an
+/// arbitrary element if it's possible types to converge results.
+static bool ForceArbitraryInstResultType(TreePatternNode *N, TreePattern &TP) {
+ if (N->isLeaf())
+ return false;
+
+ // Analyze children.
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
+ if (ForceArbitraryInstResultType(N->getChild(i), TP))
+ return true;
+
+ if (!N->getOperator()->isSubClassOf("Instruction"))
+ return false;
+
+ // If this type is already concrete or completely unknown we can't do
+ // anything.
+ for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i) {
+ if (N->getExtType(i).isCompletelyUnknown() || N->getExtType(i).isConcrete())
+ continue;
+
+ // Otherwise, force its type to the first possibility (an arbitrary choice).
+ if (N->getExtType(i).MergeInTypeInfo(N->getExtType(i).getTypeList()[0], TP))
+ return true;
}
+
+ return false;
}
void CodeGenDAGPatterns::ParsePatterns() {
std::vector<Record*> Patterns = Records.getAllDerivedDefinitions("Pattern");
for (unsigned i = 0, e = Patterns.size(); i != e; ++i) {
- DagInit *Tree = Patterns[i]->getValueAsDag("PatternToMatch");
- DefInit *OpDef = dynamic_cast<DefInit*>(Tree->getOperator());
- Record *Operator = OpDef->getDef();
- TreePattern *Pattern;
- if (Operator->getName() != "parallel")
- Pattern = new TreePattern(Patterns[i], Tree, true, *this);
- else {
- std::vector<Init*> Values;
- RecTy *ListTy = 0;
- for (unsigned j = 0, ee = Tree->getNumArgs(); j != ee; ++j) {
- Values.push_back(Tree->getArg(j));
- TypedInit *TArg = dynamic_cast<TypedInit*>(Tree->getArg(j));
- if (TArg == 0) {
- errs() << "In dag: " << Tree->getAsString();
- errs() << " -- Untyped argument in pattern\n";
- assert(0 && "Untyped argument in pattern");
- }
- if (ListTy != 0) {
- ListTy = resolveTypes(ListTy, TArg->getType());
- if (ListTy == 0) {
- errs() << "In dag: " << Tree->getAsString();
- errs() << " -- Incompatible types in pattern arguments\n";
- assert(0 && "Incompatible types in pattern arguments");
- }
- }
- else {
- ListTy = TArg->getType();
- }
- }
- ListInit *LI = new ListInit(Values, new ListRecTy(ListTy));
- Pattern = new TreePattern(Patterns[i], LI, true, *this);
- }
+ Record *CurPattern = Patterns[i];
+ DagInit *Tree = CurPattern->getValueAsDag("PatternToMatch");
+ TreePattern *Pattern = new TreePattern(CurPattern, Tree, true, *this);
// Inline pattern fragments into it.
Pattern->InlinePatternFragments();
- ListInit *LI = Patterns[i]->getValueAsListInit("ResultInstrs");
+ ListInit *LI = CurPattern->getValueAsListInit("ResultInstrs");
if (LI->getSize() == 0) continue; // no pattern.
// Parse the instruction.
- TreePattern *Result = new TreePattern(Patterns[i], LI, false, *this);
+ TreePattern *Result = new TreePattern(CurPattern, LI, false, *this);
// Inline pattern fragments into it.
Result->InlinePatternFragments();
@@ -2270,38 +2628,61 @@ void CodeGenDAGPatterns::ParsePatterns() {
do {
// Infer as many types as possible. If we cannot infer all of them, we
// can never do anything with this pattern: report it to the user.
- InferredAllPatternTypes = Pattern->InferAllTypes();
+ InferredAllPatternTypes =
+ Pattern->InferAllTypes(&Pattern->getNamedNodesMap());
// Infer as many types as possible. If we cannot infer all of them, we
// can never do anything with this pattern: report it to the user.
- InferredAllResultTypes = Result->InferAllTypes();
+ InferredAllResultTypes =
+ Result->InferAllTypes(&Pattern->getNamedNodesMap());
+ IterateInference = false;
+
// Apply the type of the result to the source pattern. This helps us
// resolve cases where the input type is known to be a pointer type (which
// is considered resolved), but the result knows it needs to be 32- or
// 64-bits. Infer the other way for good measure.
- IterateInference = Pattern->getTree(0)->
- UpdateNodeType(Result->getTree(0)->getExtTypes(), *Result);
- IterateInference |= Result->getTree(0)->
- UpdateNodeType(Pattern->getTree(0)->getExtTypes(), *Result);
+ for (unsigned i = 0, e = std::min(Result->getTree(0)->getNumTypes(),
+ Pattern->getTree(0)->getNumTypes());
+ i != e; ++i) {
+ IterateInference = Pattern->getTree(0)->
+ UpdateNodeType(i, Result->getTree(0)->getExtType(i), *Result);
+ IterateInference |= Result->getTree(0)->
+ UpdateNodeType(i, Pattern->getTree(0)->getExtType(i), *Result);
+ }
+
+ // If our iteration has converged and the input pattern's types are fully
+ // resolved but the result pattern is not fully resolved, we may have a
+ // situation where we have two instructions in the result pattern and
+ // the instructions require a common register class, but don't care about
+ // what actual MVT is used. This is actually a bug in our modelling:
+ // output patterns should have register classes, not MVTs.
+ //
+ // In any case, to handle this, we just go through and disambiguate some
+ // arbitrary types to the result pattern's nodes.
+ if (!IterateInference && InferredAllPatternTypes &&
+ !InferredAllResultTypes)
+ IterateInference = ForceArbitraryInstResultType(Result->getTree(0),
+ *Result);
} while (IterateInference);
// Verify that we inferred enough types that we can do something with the
// pattern and result. If these fire the user has to add type casts.
if (!InferredAllPatternTypes)
Pattern->error("Could not infer all types in pattern!");
- if (!InferredAllResultTypes)
+ if (!InferredAllResultTypes) {
+ Pattern->dump();
Result->error("Could not infer all types in pattern result!");
+ }
// Validate that the input pattern is correct.
std::map<std::string, TreePatternNode*> InstInputs;
std::map<std::string, TreePatternNode*> InstResults;
- std::vector<Record*> InstImpInputs;
std::vector<Record*> InstImpResults;
for (unsigned j = 0, ee = Pattern->getNumTrees(); j != ee; ++j)
FindPatternInputsAndOutputs(Pattern, Pattern->getTree(j),
InstInputs, InstResults,
- InstImpInputs, InstImpResults);
+ InstImpResults);
// Promote the xform function to be an explicit node if set.
TreePatternNode *DstPattern = Result->getOnlyTree();
@@ -2312,25 +2693,29 @@ void CodeGenDAGPatterns::ParsePatterns() {
OpNode->setTransformFn(0);
std::vector<TreePatternNode*> Children;
Children.push_back(OpNode);
- OpNode = new TreePatternNode(Xform, Children);
+ OpNode = new TreePatternNode(Xform, Children, OpNode->getNumTypes());
}
ResultNodeOperands.push_back(OpNode);
}
DstPattern = Result->getOnlyTree();
if (!DstPattern->isLeaf())
DstPattern = new TreePatternNode(DstPattern->getOperator(),
- ResultNodeOperands);
- DstPattern->setTypes(Result->getOnlyTree()->getExtTypes());
+ ResultNodeOperands,
+ DstPattern->getNumTypes());
+
+ for (unsigned i = 0, e = Result->getOnlyTree()->getNumTypes(); i != e; ++i)
+ DstPattern->setType(i, Result->getOnlyTree()->getExtType(i));
+
TreePattern Temp(Result->getRecord(), DstPattern, false, *this);
Temp.InferAllTypes();
AddPatternToMatch(Pattern,
- PatternToMatch(Patterns[i]->getValueAsListInit("Predicates"),
- Pattern->getTree(0),
- Temp.getOnlyTree(), InstImpResults,
- Patterns[i]->getValueAsInt("AddedComplexity"),
- Patterns[i]->getID()));
+ PatternToMatch(CurPattern->getValueAsListInit("Predicates"),
+ Pattern->getTree(0),
+ Temp.getOnlyTree(), InstImpResults,
+ CurPattern->getValueAsInt("AddedComplexity"),
+ CurPattern->getID()));
}
}
@@ -2364,13 +2749,15 @@ static void CombineChildVariants(TreePatternNode *Orig,
std::vector<TreePatternNode*> NewChildren;
for (unsigned i = 0, e = ChildVariants.size(); i != e; ++i)
NewChildren.push_back(ChildVariants[i][Idxs[i]]);
- TreePatternNode *R = new TreePatternNode(Orig->getOperator(), NewChildren);
+ TreePatternNode *R = new TreePatternNode(Orig->getOperator(), NewChildren,
+ Orig->getNumTypes());
// Copy over properties.
R->setName(Orig->getName());
R->setPredicateFns(Orig->getPredicateFns());
R->setTransformFn(Orig->getTransformFn());
- R->setTypes(Orig->getExtTypes());
+ for (unsigned i = 0, e = Orig->getNumTypes(); i != e; ++i)
+ R->setType(i, Orig->getExtType(i));
// If this pattern cannot match, do not include it as a variant.
std::string ErrString;
diff --git a/libclamav/c++/llvm/utils/TableGen/CodeGenDAGPatterns.h b/libclamav/c++/llvm/utils/TableGen/CodeGenDAGPatterns.h
index 37d633e..0a1362a 100644
--- a/libclamav/c++/llvm/utils/TableGen/CodeGenDAGPatterns.h
+++ b/libclamav/c++/llvm/utils/TableGen/CodeGenDAGPatterns.h
@@ -15,12 +15,14 @@
#ifndef CODEGEN_DAGPATTERNS_H
#define CODEGEN_DAGPATTERNS_H
+#include "CodeGenTarget.h"
+#include "CodeGenIntrinsics.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
#include <set>
#include <algorithm>
#include <vector>
-
-#include "CodeGenTarget.h"
-#include "CodeGenIntrinsics.h"
+#include <map>
namespace llvm {
class Record;
@@ -39,21 +41,107 @@ namespace llvm {
/// arbitrary integer, floating-point, and vector types, so only an unknown
/// value is needed.
namespace EEVT {
- enum DAGISelGenValueType {
- isUnknown = MVT::LAST_VALUETYPE
- };
+ /// TypeSet - This is either empty if it's completely unknown, or holds a set
+ /// of types. It is used during type inference because register classes can
+ /// have multiple possible types and we don't know which one they get until
+ /// type inference is complete.
+ ///
+ /// TypeSet can have three states:
+ /// Vector is empty: The type is completely unknown, it can be any valid
+ /// target type.
+ /// Vector has multiple constrained types: (e.g. v4i32 + v4f32) it is one
+ /// of those types only.
+ /// Vector has one concrete type: The type is completely known.
+ ///
+ class TypeSet {
+ SmallVector<MVT::SimpleValueType, 4> TypeVec;
+ public:
+ TypeSet() {}
+ TypeSet(MVT::SimpleValueType VT, TreePattern &TP);
+ TypeSet(const std::vector<MVT::SimpleValueType> &VTList);
+
+ bool isCompletelyUnknown() const { return TypeVec.empty(); }
+
+ bool isConcrete() const {
+ if (TypeVec.size() != 1) return false;
+ unsigned char T = TypeVec[0]; (void)T;
+ assert(T < MVT::LAST_VALUETYPE || T == MVT::iPTR || T == MVT::iPTRAny);
+ return true;
+ }
+
+ MVT::SimpleValueType getConcrete() const {
+ assert(isConcrete() && "Type isn't concrete yet");
+ return (MVT::SimpleValueType)TypeVec[0];
+ }
+
+ bool isDynamicallyResolved() const {
+ return getConcrete() == MVT::iPTR || getConcrete() == MVT::iPTRAny;
+ }
+
+ const SmallVectorImpl<MVT::SimpleValueType> &getTypeList() const {
+ assert(!TypeVec.empty() && "Not a type list!");
+ return TypeVec;
+ }
+
+ bool isVoid() const {
+ return TypeVec.size() == 1 && TypeVec[0] == MVT::isVoid;
+ }
+
+ /// hasIntegerTypes - Return true if this TypeSet contains any integer value
+ /// types.
+ bool hasIntegerTypes() const;
+
+ /// hasFloatingPointTypes - Return true if this TypeSet contains an fAny or
+ /// a floating point value type.
+ bool hasFloatingPointTypes() const;
+
+ /// hasVectorTypes - Return true if this TypeSet contains a vector value
+ /// type.
+ bool hasVectorTypes() const;
+
+ /// getName() - Return this TypeSet as a string.
+ std::string getName() const;
+
+ /// MergeInTypeInfo - This merges in type information from the specified
+ /// argument. If 'this' changes, it returns true. If the two types are
+ /// contradictory (e.g. merge f32 into i32) then this throws an exception.
+ bool MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP);
+
+ bool MergeInTypeInfo(MVT::SimpleValueType InVT, TreePattern &TP) {
+ return MergeInTypeInfo(EEVT::TypeSet(InVT, TP), TP);
+ }
+
+ /// Force this type list to only contain integer types.
+ bool EnforceInteger(TreePattern &TP);
- /// isExtIntegerInVTs - Return true if the specified extended value type
- /// vector contains iAny or an integer value type.
- bool isExtIntegerInVTs(const std::vector<unsigned char> &EVTs);
+ /// Force this type list to only contain floating point types.
+ bool EnforceFloatingPoint(TreePattern &TP);
- /// isExtFloatingPointInVTs - Return true if the specified extended value
- /// type vector contains fAny or a FP value type.
- bool isExtFloatingPointInVTs(const std::vector<unsigned char> &EVTs);
+ /// EnforceScalar - Remove all vector types from this type list.
+ bool EnforceScalar(TreePattern &TP);
- /// isExtVectorinVTs - Return true if the specified extended value type
- /// vector contains vAny or a vector value type.
- bool isExtVectorInVTs(const std::vector<unsigned char> &EVTs);
+ /// EnforceVector - Remove all non-vector types from this type list.
+ bool EnforceVector(TreePattern &TP);
+
+ /// EnforceSmallerThan - 'this' must be a smaller VT than Other. Update
+ /// this an other based on this information.
+ bool EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP);
+
+ /// EnforceVectorEltTypeIs - 'this' is now constrainted to be a vector type
+ /// whose element is VT.
+ bool EnforceVectorEltTypeIs(EEVT::TypeSet &VT, TreePattern &TP);
+
+ bool operator!=(const TypeSet &RHS) const { return TypeVec != RHS.TypeVec; }
+ bool operator==(const TypeSet &RHS) const { return TypeVec == RHS.TypeVec; }
+
+ private:
+ /// FillWithPossibleTypes - Set to all legal types and return true, only
+ /// valid on completely unknown type sets. If Pred is non-null, only MVTs
+ /// that pass the predicate are added.
+ bool FillWithPossibleTypes(TreePattern &TP,
+ bool (*Pred)(MVT::SimpleValueType) = 0,
+ const char *PredicateName = 0);
+ };
}
/// Set type used to track multiply used variables in patterns
@@ -72,7 +160,7 @@ struct SDTypeConstraint {
union { // The discriminated union.
struct {
- unsigned char VT;
+ MVT::SimpleValueType VT;
} SDTCisVT_Info;
struct {
unsigned OtherOperandNum;
@@ -94,11 +182,6 @@ struct SDTypeConstraint {
/// exception.
bool ApplyTypeConstraint(TreePatternNode *N, const SDNodeInfo &NodeInfo,
TreePattern &TP) const;
-
- /// getOperandNum - Return the node corresponding to operand #OpNo in tree
- /// N, which has NumResults results.
- TreePatternNode *getOperandNum(unsigned OpNo, TreePatternNode *N,
- unsigned NumResults) const;
};
/// SDNodeInfo - One of these records is created for each SDNode instance in
@@ -116,6 +199,9 @@ public:
SDNodeInfo(Record *R); // Parse the specified record.
unsigned getNumResults() const { return NumResults; }
+
+ /// getNumOperands - This is the number of operands required or -1 if
+ /// variadic.
int getNumOperands() const { return NumOperands; }
Record *getRecord() const { return Def; }
const std::string &getEnumName() const { return EnumName; }
@@ -127,8 +213,8 @@ public:
/// getKnownType - If the type constraints on this node imply a fixed type
/// (e.g. all stores return void, etc), then return it as an
- /// MVT::SimpleValueType. Otherwise, return EEVT::isUnknown.
- unsigned getKnownType() const;
+ /// MVT::SimpleValueType. Otherwise, return MVT::Other.
+ MVT::SimpleValueType getKnownType(unsigned ResNo) const;
/// hasProperty - Return true if this node has the specified property.
///
@@ -150,10 +236,10 @@ public:
/// patterns), and as such should be ref counted. We currently just leak all
/// TreePatternNode objects!
class TreePatternNode {
- /// The inferred type for this node, or EEVT::isUnknown if it hasn't
- /// been determined yet. This is a std::vector because during inference
- /// there may be multiple possible types.
- std::vector<unsigned char> Types;
+ /// The type of each node result. Before and during type inference, each
+ /// result may be a set of possible types. After (successful) type inference,
+ /// each is a single concrete type.
+ SmallVector<EEVT::TypeSet, 1> Types;
/// Operator - The Record for the operator if this is an interior node (not
/// a leaf).
@@ -177,41 +263,41 @@ class TreePatternNode {
std::vector<TreePatternNode*> Children;
public:
- TreePatternNode(Record *Op, const std::vector<TreePatternNode*> &Ch)
- : Types(), Operator(Op), Val(0), TransformFn(0),
- Children(Ch) { Types.push_back(EEVT::isUnknown); }
- TreePatternNode(Init *val) // leaf ctor
- : Types(), Operator(0), Val(val), TransformFn(0) {
- Types.push_back(EEVT::isUnknown);
+ TreePatternNode(Record *Op, const std::vector<TreePatternNode*> &Ch,
+ unsigned NumResults)
+ : Operator(Op), Val(0), TransformFn(0), Children(Ch) {
+ Types.resize(NumResults);
+ }
+ TreePatternNode(Init *val, unsigned NumResults) // leaf ctor
+ : Operator(0), Val(val), TransformFn(0) {
+ Types.resize(NumResults);
}
~TreePatternNode();
const std::string &getName() const { return Name; }
- void setName(const std::string &N) { Name = N; }
+ void setName(StringRef N) { Name.assign(N.begin(), N.end()); }
bool isLeaf() const { return Val != 0; }
- bool hasTypeSet() const {
- return (Types[0] < MVT::LAST_VALUETYPE) || (Types[0] == MVT::iPTR) ||
- (Types[0] == MVT::iPTRAny);
- }
- bool isTypeCompletelyUnknown() const {
- return Types[0] == EEVT::isUnknown;
+
+ // Type accessors.
+ unsigned getNumTypes() const { return Types.size(); }
+ MVT::SimpleValueType getType(unsigned ResNo) const {
+ return Types[ResNo].getConcrete();
}
- bool isTypeDynamicallyResolved() const {
- return (Types[0] == MVT::iPTR) || (Types[0] == MVT::iPTRAny);
+ const SmallVectorImpl<EEVT::TypeSet> &getExtTypes() const { return Types; }
+ const EEVT::TypeSet &getExtType(unsigned ResNo) const { return Types[ResNo]; }
+ EEVT::TypeSet &getExtType(unsigned ResNo) { return Types[ResNo]; }
+ void setType(unsigned ResNo, const EEVT::TypeSet &T) { Types[ResNo] = T; }
+
+ bool hasTypeSet(unsigned ResNo) const {
+ return Types[ResNo].isConcrete();
}
- MVT::SimpleValueType getTypeNum(unsigned Num) const {
- assert(hasTypeSet() && "Doesn't have a type yet!");
- assert(Types.size() > Num && "Type num out of range!");
- return (MVT::SimpleValueType)Types[Num];
+ bool isTypeCompletelyUnknown(unsigned ResNo) const {
+ return Types[ResNo].isCompletelyUnknown();
}
- unsigned char getExtTypeNum(unsigned Num) const {
- assert(Types.size() > Num && "Extended type num out of range!");
- return Types[Num];
+ bool isTypeDynamicallyResolved(unsigned ResNo) const {
+ return Types[ResNo].isDynamicallyResolved();
}
- const std::vector<unsigned char> &getExtTypes() const { return Types; }
- void setTypes(const std::vector<unsigned char> &T) { Types = T; }
- void removeTypes() { Types = std::vector<unsigned char>(1, EEVT::isUnknown); }
Init *getLeafValue() const { assert(isLeaf()); return Val; }
Record *getOperator() const { assert(!isLeaf()); return Operator; }
@@ -304,17 +390,22 @@ public: // Higher level manipulation routines.
/// information. If N already contains a conflicting type, then throw an
/// exception. This returns true if any information was updated.
///
- bool UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
- TreePattern &TP);
- bool UpdateNodeType(unsigned char ExtVT, TreePattern &TP) {
- std::vector<unsigned char> ExtVTs(1, ExtVT);
- return UpdateNodeType(ExtVTs, TP);
+ bool UpdateNodeType(unsigned ResNo, const EEVT::TypeSet &InTy,
+ TreePattern &TP) {
+ return Types[ResNo].MergeInTypeInfo(InTy, TP);
+ }
+
+ bool UpdateNodeType(unsigned ResNo, MVT::SimpleValueType InTy,
+ TreePattern &TP) {
+ return Types[ResNo].MergeInTypeInfo(EEVT::TypeSet(InTy, TP), TP);
}
/// ContainsUnresolvedType - Return true if this tree contains any
/// unresolved types.
bool ContainsUnresolvedType() const {
- if (!hasTypeSet() && !isTypeDynamicallyResolved()) return true;
+ for (unsigned i = 0, e = Types.size(); i != e; ++i)
+ if (!Types[i].isConcrete()) return true;
+
for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
if (getChild(i)->ContainsUnresolvedType()) return true;
return false;
@@ -340,6 +431,10 @@ class TreePattern {
///
std::vector<TreePatternNode*> Trees;
+ /// NamedNodes - This is all of the nodes that have names in the trees in this
+ /// pattern.
+ StringMap<SmallVector<TreePatternNode*,1> > NamedNodes;
+
/// TheRecord - The actual TableGen record corresponding to this pattern.
///
Record *TheRecord;
@@ -375,6 +470,12 @@ public:
assert(Trees.size() == 1 && "Doesn't have exactly one pattern!");
return Trees[0];
}
+
+ const StringMap<SmallVector<TreePatternNode*,1> > &getNamedNodesMap() {
+ if (NamedNodes.empty())
+ ComputeNamedNodes();
+ return NamedNodes;
+ }
/// getRecord - Return the actual TableGen record corresponding to this
/// pattern.
@@ -401,7 +502,8 @@ public:
/// InferAllTypes - Infer/propagate as many types throughout the expression
/// patterns as possible. Return true if all types are inferred, false
/// otherwise. Throw an exception if a type contradiction is found.
- bool InferAllTypes();
+ bool InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> >
+ *NamedTypes=0);
/// error - Throw an exception, prefixing it with information about this
/// pattern.
@@ -411,7 +513,9 @@ public:
void dump() const;
private:
- TreePatternNode *ParseTreePattern(DagInit *DI);
+ TreePatternNode *ParseTreePattern(Init *DI, StringRef OpName);
+ void ComputeNamedNodes();
+ void ComputeNamedNodes(TreePatternNode *N);
};
/// DAGDefaultOperand - One of these is created for each PredicateOperand
@@ -425,23 +529,19 @@ class DAGInstruction {
std::vector<Record*> Results;
std::vector<Record*> Operands;
std::vector<Record*> ImpResults;
- std::vector<Record*> ImpOperands;
TreePatternNode *ResultPattern;
public:
DAGInstruction(TreePattern *TP,
const std::vector<Record*> &results,
const std::vector<Record*> &operands,
- const std::vector<Record*> &impresults,
- const std::vector<Record*> &impoperands)
+ const std::vector<Record*> &impresults)
: Pattern(TP), Results(results), Operands(operands),
- ImpResults(impresults), ImpOperands(impoperands),
- ResultPattern(0) {}
+ ImpResults(impresults), ResultPattern(0) {}
const TreePattern *getPattern() const { return Pattern; }
unsigned getNumResults() const { return Results.size(); }
unsigned getNumOperands() const { return Operands.size(); }
unsigned getNumImpResults() const { return ImpResults.size(); }
- unsigned getNumImpOperands() const { return ImpOperands.size(); }
const std::vector<Record*>& getImpResults() const { return ImpResults; }
void setResultPattern(TreePatternNode *R) { ResultPattern = R; }
@@ -461,11 +561,6 @@ public:
return ImpResults[RN];
}
- Record *getImpOperand(unsigned ON) const {
- assert(ON < ImpOperands.size());
- return ImpOperands[ON];
- }
-
TreePatternNode *getResultPattern() const { return ResultPattern; }
};
@@ -494,6 +589,10 @@ public:
unsigned getAddedComplexity() const { return AddedComplexity; }
std::string getPredicateCheck() const;
+
+ /// Compute the complexity metric for the input pattern. This roughly
+ /// corresponds to the number of nodes that are covered.
+ unsigned getPatternComplexity(const CodeGenDAGPatterns &CGP) const;
};
// Deterministic comparison of Record*.
@@ -591,6 +690,11 @@ public:
assert(PatternFragments.count(R) && "Invalid pattern fragment request!");
return PatternFragments.find(R)->second;
}
+ TreePattern *getPatternFragmentIfRead(Record *R) const {
+ if (!PatternFragments.count(R)) return 0;
+ return PatternFragments.find(R)->second;
+ }
+
typedef std::map<Record*, TreePattern*, RecordPtrCmp>::const_iterator
pf_iterator;
pf_iterator pf_begin() const { return PatternFragments.begin(); }
@@ -637,7 +741,6 @@ private:
TreePatternNode*> &InstInputs,
std::map<std::string,
TreePatternNode*> &InstResults,
- std::vector<Record*> &InstImpInputs,
std::vector<Record*> &InstImpResults);
};
} // end namespace llvm
diff --git a/libclamav/c++/llvm/utils/TableGen/CodeGenInstruction.cpp b/libclamav/c++/llvm/utils/TableGen/CodeGenInstruction.cpp
index f5b52ec..01a1fe1 100644
--- a/libclamav/c++/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/CodeGenInstruction.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
#include "Record.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/STLExtras.h"
@@ -101,12 +102,12 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
isReturn = R->getValueAsBit("isReturn");
isBranch = R->getValueAsBit("isBranch");
isIndirectBranch = R->getValueAsBit("isIndirectBranch");
+ isCompare = R->getValueAsBit("isCompare");
isBarrier = R->getValueAsBit("isBarrier");
isCall = R->getValueAsBit("isCall");
canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
mayLoad = R->getValueAsBit("mayLoad");
mayStore = R->getValueAsBit("mayStore");
- bool isTwoAddress = R->getValueAsBit("isTwoAddress");
isPredicable = R->getValueAsBit("isPredicable");
isConvertibleToThreeAddress = R->getValueAsBit("isConvertibleToThreeAddress");
isCommutable = R->getValueAsBit("isCommutable");
@@ -123,36 +124,43 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
hasExtraDefRegAllocReq = R->getValueAsBit("hasExtraDefRegAllocReq");
hasOptionalDef = false;
isVariadic = false;
+ ImplicitDefs = R->getValueAsListOfDefs("Defs");
+ ImplicitUses = R->getValueAsListOfDefs("Uses");
if (neverHasSideEffects + hasSideEffects > 1)
throw R->getName() + ": multiple conflicting side-effect flags set!";
- DagInit *DI;
- try {
- DI = R->getValueAsDag("OutOperandList");
- } catch (...) {
- // Error getting operand list, just ignore it (sparcv9).
- AsmString.clear();
- OperandList.clear();
- return;
- }
- NumDefs = DI->getNumArgs();
-
- DagInit *IDI;
- try {
- IDI = R->getValueAsDag("InOperandList");
- } catch (...) {
- // Error getting operand list, just ignore it (sparcv9).
- AsmString.clear();
- OperandList.clear();
- return;
- }
- DI = (DagInit*)(new BinOpInit(BinOpInit::CONCAT, DI, IDI, new DagRecTy))->Fold(R, 0);
-
+ DagInit *OutDI = R->getValueAsDag("OutOperandList");
+
+ if (DefInit *Init = dynamic_cast<DefInit*>(OutDI->getOperator())) {
+ if (Init->getDef()->getName() != "outs")
+ throw R->getName() + ": invalid def name for output list: use 'outs'";
+ } else
+ throw R->getName() + ": invalid output list: use 'outs'";
+
+ NumDefs = OutDI->getNumArgs();
+
+ DagInit *InDI = R->getValueAsDag("InOperandList");
+ if (DefInit *Init = dynamic_cast<DefInit*>(InDI->getOperator())) {
+ if (Init->getDef()->getName() != "ins")
+ throw R->getName() + ": invalid def name for input list: use 'ins'";
+ } else
+ throw R->getName() + ": invalid input list: use 'ins'";
+
unsigned MIOperandNo = 0;
std::set<std::string> OperandNames;
- for (unsigned i = 0, e = DI->getNumArgs(); i != e; ++i) {
- DefInit *Arg = dynamic_cast<DefInit*>(DI->getArg(i));
+ for (unsigned i = 0, e = InDI->getNumArgs()+OutDI->getNumArgs(); i != e; ++i){
+ Init *ArgInit;
+ std::string ArgName;
+ if (i < NumDefs) {
+ ArgInit = OutDI->getArg(i);
+ ArgName = OutDI->getArgName(i);
+ } else {
+ ArgInit = InDI->getArg(i-NumDefs);
+ ArgName = InDI->getArgName(i-NumDefs);
+ }
+
+ DefInit *Arg = dynamic_cast<DefInit*>(ArgInit);
if (!Arg)
throw "Illegal operand for the '" + R->getName() + "' instruction!";
@@ -189,14 +197,14 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
"' in '" + R->getName() + "' instruction!";
// Check that the operand has a name and that it's unique.
- if (DI->getArgName(i).empty())
+ if (ArgName.empty())
throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
" has no name!";
- if (!OperandNames.insert(DI->getArgName(i)).second)
+ if (!OperandNames.insert(ArgName).second)
throw "In instruction '" + R->getName() + "', operand #" + utostr(i) +
" has the same name as a previous operand!";
- OperandList.push_back(OperandInfo(Rec, DI->getArgName(i), PrintMethod,
+ OperandList.push_back(OperandInfo(Rec, ArgName, PrintMethod,
MIOperandNo, NumOps, MIOpInfo));
MIOperandNo += NumOps;
}
@@ -204,16 +212,6 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
// Parse Constraints.
ParseConstraints(R->getValueAsString("Constraints"), this);
- // For backward compatibility: isTwoAddress means operand 1 is tied to
- // operand 0.
- if (isTwoAddress) {
- if (!OperandList[1].Constraints[0].isNone())
- throw R->getName() + ": cannot use isTwoAddress property: instruction "
- "already has constraint set!";
- OperandList[1].Constraints[0] =
- CodeGenInstruction::ConstraintInfo::getTied(0);
- }
-
// Parse the DisableEncoding field.
std::string DisableEncoding = R->getValueAsString("DisableEncoding");
while (1) {
@@ -287,3 +285,22 @@ CodeGenInstruction::ParseOperandName(const std::string &Op,
// Otherwise, didn't find it!
throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'";
}
+
+
+/// HasOneImplicitDefWithKnownVT - If the instruction has at least one
+/// implicit def and it has a known VT, return the VT, otherwise return
+/// MVT::Other.
+MVT::SimpleValueType CodeGenInstruction::
+HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const {
+ if (ImplicitDefs.empty()) return MVT::Other;
+
+ // Check to see if the first implicit def has a resolvable type.
+ Record *FirstImplicitDef = ImplicitDefs[0];
+ assert(FirstImplicitDef->isSubClassOf("Register"));
+ const std::vector<MVT::SimpleValueType> &RegVTs =
+ TargetInfo.getRegisterVTs(FirstImplicitDef);
+ if (RegVTs.size() == 1)
+ return RegVTs[0];
+ return MVT::Other;
+}
+
diff --git a/libclamav/c++/llvm/utils/TableGen/CodeGenInstruction.h b/libclamav/c++/llvm/utils/TableGen/CodeGenInstruction.h
index aae2cac..b02d0d3 100644
--- a/libclamav/c++/llvm/utils/TableGen/CodeGenInstruction.h
+++ b/libclamav/c++/llvm/utils/TableGen/CodeGenInstruction.h
@@ -22,6 +22,7 @@
namespace llvm {
class Record;
class DagInit;
+ class CodeGenTarget;
class CodeGenInstruction {
public:
@@ -105,7 +106,8 @@ namespace llvm {
MINumOperands(MINO), MIOperandInfo(MIOI) {}
};
- /// NumDefs - Number of def operands declared.
+ /// NumDefs - Number of def operands declared, this is the number of
+ /// elements in the instruction's (outs) list.
///
unsigned NumDefs;
@@ -113,10 +115,15 @@ namespace llvm {
/// type (which is a record).
std::vector<OperandInfo> OperandList;
+ /// ImplicitDefs/ImplicitUses - These are lists of registers that are
+ /// implicitly defined and used by the instruction.
+ std::vector<Record*> ImplicitDefs, ImplicitUses;
+
// Various boolean values we track for the instruction.
bool isReturn;
bool isBranch;
bool isIndirectBranch;
+ bool isCompare;
bool isBarrier;
bool isCall;
bool canFoldAsLoad;
@@ -178,6 +185,12 @@ namespace llvm {
/// non-empty name. If the instruction does not have an operand with the
/// specified name, throw an exception.
unsigned getOperandNamed(const std::string &Name) const;
+
+ /// HasOneImplicitDefWithKnownVT - If the instruction has at least one
+ /// implicit def and it has a known VT, return the VT, otherwise return
+ /// MVT::Other.
+ MVT::SimpleValueType
+ HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const;
};
}
diff --git a/libclamav/c++/llvm/utils/TableGen/CodeGenIntrinsics.h b/libclamav/c++/llvm/utils/TableGen/CodeGenIntrinsics.h
index 7e7bdf9..3208c0d 100644
--- a/libclamav/c++/llvm/utils/TableGen/CodeGenIntrinsics.h
+++ b/libclamav/c++/llvm/utils/TableGen/CodeGenIntrinsics.h
@@ -60,7 +60,7 @@ namespace llvm {
// Memory mod/ref behavior of this intrinsic.
enum {
- NoMem, ReadArgMem, ReadMem, WriteArgMem, WriteMem
+ NoMem, ReadArgMem, ReadMem, ReadWriteArgMem, ReadWriteMem
} ModRef;
/// This is set to true if the intrinsic is overloaded by its argument
diff --git a/libclamav/c++/llvm/utils/TableGen/CodeGenRegisters.h b/libclamav/c++/llvm/utils/TableGen/CodeGenRegisters.h
index 6f8682b..ccd3d22 100644
--- a/libclamav/c++/llvm/utils/TableGen/CodeGenRegisters.h
+++ b/libclamav/c++/llvm/utils/TableGen/CodeGenRegisters.h
@@ -16,8 +16,10 @@
#define CODEGEN_REGISTERS_H
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/ADT/DenseMap.h"
#include <string>
#include <vector>
+#include <set>
#include <cstdlib>
namespace llvm {
@@ -40,7 +42,8 @@ namespace llvm {
unsigned SpillSize;
unsigned SpillAlignment;
int CopyCost;
- std::vector<Record*> SubRegClasses;
+ // Map SubRegIndex -> RegisterClass
+ DenseMap<Record*,Record*> SubRegClasses;
std::string MethodProtos, MethodBodies;
const std::string &getName() const;
@@ -53,6 +56,37 @@ namespace llvm {
assert(0 && "VTNum greater than number of ValueTypes in RegClass!");
abort();
}
+
+ // Returns true if RC is a strict subclass.
+ // RC is a sub-class of this class if it is a valid replacement for any
+ // instruction operand where a register of this classis required. It must
+ // satisfy these conditions:
+ //
+ // 1. All RC registers are also in this.
+ // 2. The RC spill size must not be smaller than our spill size.
+ // 3. RC spill alignment must be compatible with ours.
+ //
+ bool hasSubClass(const CodeGenRegisterClass *RC) const {
+
+ if (RC->Elements.size() > Elements.size() ||
+ (SpillAlignment && RC->SpillAlignment % SpillAlignment) ||
+ SpillSize > RC->SpillSize)
+ return false;
+
+ std::set<Record*> RegSet;
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ Record *Reg = Elements[i];
+ RegSet.insert(Reg);
+ }
+
+ for (unsigned i = 0, e = RC->Elements.size(); i != e; ++i) {
+ Record *Reg = RC->Elements[i];
+ if (!RegSet.count(Reg))
+ return false;
+ }
+
+ return true;
+ }
CodeGenRegisterClass(Record *R);
};
diff --git a/libclamav/c++/llvm/utils/TableGen/CodeGenTarget.cpp b/libclamav/c++/llvm/utils/TableGen/CodeGenTarget.cpp
index 2688091..cbfe2ad 100644
--- a/libclamav/c++/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -18,6 +18,7 @@
#include "CodeGenIntrinsics.h"
#include "Record.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CommandLine.h"
#include <algorithm>
using namespace llvm;
@@ -79,6 +80,7 @@ std::string llvm::getEnumName(MVT::SimpleValueType T) {
case MVT::v1i64: return "MVT::v1i64";
case MVT::v2i64: return "MVT::v2i64";
case MVT::v4i64: return "MVT::v4i64";
+ case MVT::v8i64: return "MVT::v8i64";
case MVT::v2f32: return "MVT::v2f32";
case MVT::v4f32: return "MVT::v4f32";
case MVT::v8f32: return "MVT::v8f32";
@@ -120,24 +122,21 @@ const std::string &CodeGenTarget::getName() const {
}
std::string CodeGenTarget::getInstNamespace() const {
- std::string InstNS;
-
for (inst_iterator i = inst_begin(), e = inst_end(); i != e; ++i) {
- InstNS = i->second.Namespace;
-
- // Make sure not to pick up "TargetInstrInfo" by accidentally getting
+ // Make sure not to pick up "TargetOpcode" by accidentally getting
// the namespace off the PHI instruction or something.
- if (InstNS != "TargetInstrInfo")
- break;
+ if ((*i)->Namespace != "TargetOpcode")
+ return (*i)->Namespace;
}
- return InstNS;
+ return "";
}
Record *CodeGenTarget::getInstructionSet() const {
return TargetRec->getValueAsDef("InstructionSet");
}
+
/// getAsmParser - Return the AssemblyParser definition for this target.
///
Record *CodeGenTarget::getAsmParser() const {
@@ -160,6 +159,7 @@ void CodeGenTarget::ReadRegisters() const {
std::vector<Record*> Regs = Records.getAllDerivedDefinitions("Register");
if (Regs.empty())
throw std::string("No 'Register' subclasses defined!");
+ std::sort(Regs.begin(), Regs.end(), LessRecord());
Registers.reserve(Regs.size());
Registers.assign(Regs.begin(), Regs.end());
@@ -174,6 +174,11 @@ const std::string &CodeGenRegister::getName() const {
return TheDef->getName();
}
+void CodeGenTarget::ReadSubRegIndices() const {
+ SubRegIndices = Records.getAllDerivedDefinitions("SubRegIndex");
+ std::sort(SubRegIndices.begin(), SubRegIndices.end(), LessRecord());
+}
+
void CodeGenTarget::ReadRegisterClasses() const {
std::vector<Record*> RegClasses =
Records.getAllDerivedDefinitions("RegisterClass");
@@ -184,19 +189,23 @@ void CodeGenTarget::ReadRegisterClasses() const {
RegisterClasses.assign(RegClasses.begin(), RegClasses.end());
}
-std::vector<unsigned char> CodeGenTarget::getRegisterVTs(Record *R) const {
- std::vector<unsigned char> Result;
+std::vector<MVT::SimpleValueType> CodeGenTarget::
+getRegisterVTs(Record *R) const {
+ std::vector<MVT::SimpleValueType> Result;
const std::vector<CodeGenRegisterClass> &RCs = getRegisterClasses();
for (unsigned i = 0, e = RCs.size(); i != e; ++i) {
const CodeGenRegisterClass &RC = RegisterClasses[i];
for (unsigned ei = 0, ee = RC.Elements.size(); ei != ee; ++ei) {
if (R == RC.Elements[ei]) {
const std::vector<MVT::SimpleValueType> &InVTs = RC.getValueTypes();
- for (unsigned i = 0, e = InVTs.size(); i != e; ++i)
- Result.push_back(InVTs[i]);
+ Result.insert(Result.end(), InVTs.begin(), InVTs.end());
}
}
}
+
+ // Remove duplicates.
+ array_pod_sort(Result.begin(), Result.end());
+ Result.erase(std::unique(Result.begin(), Result.end()), Result.end());
return Result;
}
@@ -226,17 +235,30 @@ CodeGenRegisterClass::CodeGenRegisterClass(Record *R) : TheDef(R) {
"' does not derive from the Register class!";
Elements.push_back(Reg);
}
-
- std::vector<Record*> SubRegClassList =
- R->getValueAsListOfDefs("SubRegClassList");
- for (unsigned i = 0, e = SubRegClassList.size(); i != e; ++i) {
- Record *SubRegClass = SubRegClassList[i];
- if (!SubRegClass->isSubClassOf("RegisterClass"))
- throw "Register Class member '" + SubRegClass->getName() +
- "' does not derive from the RegisterClass class!";
- SubRegClasses.push_back(SubRegClass);
- }
-
+
+ // SubRegClasses is a list<dag> containing (RC, subregindex, ...) dags.
+ ListInit *SRC = R->getValueAsListInit("SubRegClasses");
+ for (ListInit::const_iterator i = SRC->begin(), e = SRC->end(); i != e; ++i) {
+ DagInit *DAG = dynamic_cast<DagInit*>(*i);
+ if (!DAG) throw "SubRegClasses must contain DAGs";
+ DefInit *DAGOp = dynamic_cast<DefInit*>(DAG->getOperator());
+ Record *RCRec;
+ if (!DAGOp || !(RCRec = DAGOp->getDef())->isSubClassOf("RegisterClass"))
+ throw "Operator '" + DAG->getOperator()->getAsString() +
+ "' in SubRegClasses is not a RegisterClass";
+ // Iterate over args, all SubRegIndex instances.
+ for (DagInit::const_arg_iterator ai = DAG->arg_begin(), ae = DAG->arg_end();
+ ai != ae; ++ai) {
+ DefInit *Idx = dynamic_cast<DefInit*>(*ai);
+ Record *IdxRec;
+ if (!Idx || !(IdxRec = Idx->getDef())->isSubClassOf("SubRegIndex"))
+ throw "Argument '" + (*ai)->getAsString() +
+ "' in SubRegClasses is not a SubRegIndex";
+ if (!SubRegClasses.insert(std::make_pair(IdxRec, RCRec)).second)
+ throw "SubRegIndex '" + IdxRec->getName() + "' mentioned twice";
+ }
+ }
+
// Allow targets to override the size in bits of the RegisterClass.
unsigned Size = R->getValueAsInt("Size");
@@ -277,98 +299,76 @@ void CodeGenTarget::ReadInstructions() const {
for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
std::string AsmStr = Insts[i]->getValueAsString(InstFormatName);
- Instructions.insert(std::make_pair(Insts[i]->getName(),
- CodeGenInstruction(Insts[i], AsmStr)));
+ Instructions[Insts[i]] = new CodeGenInstruction(Insts[i], AsmStr);
+ }
+}
+
+static const CodeGenInstruction *
+GetInstByName(const char *Name,
+ const DenseMap<const Record*, CodeGenInstruction*> &Insts) {
+ const Record *Rec = Records.getDef(Name);
+
+ DenseMap<const Record*, CodeGenInstruction*>::const_iterator
+ I = Insts.find(Rec);
+ if (Rec == 0 || I == Insts.end())
+ throw std::string("Could not find '") + Name + "' instruction!";
+ return I->second;
+}
+
+namespace {
+/// SortInstByName - Sorting predicate to sort instructions by name.
+///
+struct SortInstByName {
+ bool operator()(const CodeGenInstruction *Rec1,
+ const CodeGenInstruction *Rec2) const {
+ return Rec1->TheDef->getName() < Rec2->TheDef->getName();
}
+};
}
/// getInstructionsByEnumValue - Return all of the instructions defined by the
/// target, ordered by their enum value.
-void CodeGenTarget::
-getInstructionsByEnumValue(std::vector<const CodeGenInstruction*>
- &NumberedInstructions) {
- std::map<std::string, CodeGenInstruction>::const_iterator I;
- I = getInstructions().find("PHI");
- if (I == Instructions.end()) throw "Could not find 'PHI' instruction!";
- const CodeGenInstruction *PHI = &I->second;
-
- I = getInstructions().find("INLINEASM");
- if (I == Instructions.end()) throw "Could not find 'INLINEASM' instruction!";
- const CodeGenInstruction *INLINEASM = &I->second;
-
- I = getInstructions().find("DBG_LABEL");
- if (I == Instructions.end()) throw "Could not find 'DBG_LABEL' instruction!";
- const CodeGenInstruction *DBG_LABEL = &I->second;
-
- I = getInstructions().find("EH_LABEL");
- if (I == Instructions.end()) throw "Could not find 'EH_LABEL' instruction!";
- const CodeGenInstruction *EH_LABEL = &I->second;
-
- I = getInstructions().find("GC_LABEL");
- if (I == Instructions.end()) throw "Could not find 'GC_LABEL' instruction!";
- const CodeGenInstruction *GC_LABEL = &I->second;
-
- I = getInstructions().find("KILL");
- if (I == Instructions.end()) throw "Could not find 'KILL' instruction!";
- const CodeGenInstruction *KILL = &I->second;
-
- I = getInstructions().find("EXTRACT_SUBREG");
- if (I == Instructions.end())
- throw "Could not find 'EXTRACT_SUBREG' instruction!";
- const CodeGenInstruction *EXTRACT_SUBREG = &I->second;
-
- I = getInstructions().find("INSERT_SUBREG");
- if (I == Instructions.end())
- throw "Could not find 'INSERT_SUBREG' instruction!";
- const CodeGenInstruction *INSERT_SUBREG = &I->second;
-
- I = getInstructions().find("IMPLICIT_DEF");
- if (I == Instructions.end())
- throw "Could not find 'IMPLICIT_DEF' instruction!";
- const CodeGenInstruction *IMPLICIT_DEF = &I->second;
-
- I = getInstructions().find("SUBREG_TO_REG");
- if (I == Instructions.end())
- throw "Could not find 'SUBREG_TO_REG' instruction!";
- const CodeGenInstruction *SUBREG_TO_REG = &I->second;
-
- I = getInstructions().find("COPY_TO_REGCLASS");
- if (I == Instructions.end())
- throw "Could not find 'COPY_TO_REGCLASS' instruction!";
- const CodeGenInstruction *COPY_TO_REGCLASS = &I->second;
-
- I = getInstructions().find("DBG_VALUE");
- if (I == Instructions.end())
- throw "Could not find 'DBG_VALUE' instruction!";
- const CodeGenInstruction *DBG_VALUE = &I->second;
-
- // Print out the rest of the instructions now.
- NumberedInstructions.push_back(PHI);
- NumberedInstructions.push_back(INLINEASM);
- NumberedInstructions.push_back(DBG_LABEL);
- NumberedInstructions.push_back(EH_LABEL);
- NumberedInstructions.push_back(GC_LABEL);
- NumberedInstructions.push_back(KILL);
- NumberedInstructions.push_back(EXTRACT_SUBREG);
- NumberedInstructions.push_back(INSERT_SUBREG);
- NumberedInstructions.push_back(IMPLICIT_DEF);
- NumberedInstructions.push_back(SUBREG_TO_REG);
- NumberedInstructions.push_back(COPY_TO_REGCLASS);
- NumberedInstructions.push_back(DBG_VALUE);
- for (inst_iterator II = inst_begin(), E = inst_end(); II != E; ++II)
- if (&II->second != PHI &&
- &II->second != INLINEASM &&
- &II->second != DBG_LABEL &&
- &II->second != EH_LABEL &&
- &II->second != GC_LABEL &&
- &II->second != KILL &&
- &II->second != EXTRACT_SUBREG &&
- &II->second != INSERT_SUBREG &&
- &II->second != IMPLICIT_DEF &&
- &II->second != SUBREG_TO_REG &&
- &II->second != COPY_TO_REGCLASS &&
- &II->second != DBG_VALUE)
- NumberedInstructions.push_back(&II->second);
+void CodeGenTarget::ComputeInstrsByEnum() const {
+ // The ordering here must match the ordering in TargetOpcodes.h.
+ const char *const FixedInstrs[] = {
+ "PHI",
+ "INLINEASM",
+ "PROLOG_LABEL",
+ "EH_LABEL",
+ "GC_LABEL",
+ "KILL",
+ "EXTRACT_SUBREG",
+ "INSERT_SUBREG",
+ "IMPLICIT_DEF",
+ "SUBREG_TO_REG",
+ "COPY_TO_REGCLASS",
+ "DBG_VALUE",
+ "REG_SEQUENCE",
+ "COPY",
+ 0
+ };
+ const DenseMap<const Record*, CodeGenInstruction*> &Insts = getInstructions();
+ for (const char *const *p = FixedInstrs; *p; ++p) {
+ const CodeGenInstruction *Instr = GetInstByName(*p, Insts);
+ assert(Instr && "Missing target independent instruction");
+ assert(Instr->Namespace == "TargetOpcode" && "Bad namespace");
+ InstrsByEnum.push_back(Instr);
+ }
+ unsigned EndOfPredefines = InstrsByEnum.size();
+
+ for (DenseMap<const Record*, CodeGenInstruction*>::const_iterator
+ I = Insts.begin(), E = Insts.end(); I != E; ++I) {
+ const CodeGenInstruction *CGI = I->second;
+ if (CGI->Namespace != "TargetOpcode")
+ InstrsByEnum.push_back(CGI);
+ }
+
+ assert(InstrsByEnum.size() == Insts.size() && "Missing predefined instr");
+
+ // All of the instructions are now in random order based on the map iteration.
+ // Sort them by name.
+ std::sort(InstrsByEnum.begin()+EndOfPredefines, InstrsByEnum.end(),
+ SortInstByName());
}
@@ -404,6 +404,8 @@ ComplexPattern::ComplexPattern(Record *R) {
Properties |= 1 << SDNPSideEffect;
} else if (PropList[i]->getName() == "SDNPMemOperand") {
Properties |= 1 << SDNPMemOperand;
+ } else if (PropList[i]->getName() == "SDNPVariadic") {
+ Properties |= 1 << SDNPVariadic;
} else {
errs() << "Unsupported SD Node property '" << PropList[i]->getName()
<< "' on ComplexPattern '" << R->getName() << "'!\n";
@@ -432,7 +434,7 @@ std::vector<CodeGenIntrinsic> llvm::LoadIntrinsics(const RecordKeeper &RC,
CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
TheDef = R;
std::string DefName = R->getName();
- ModRef = WriteMem;
+ ModRef = ReadWriteMem;
isOverloaded = false;
isCommutative = false;
@@ -495,15 +497,17 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
}
if (EVT(VT).isOverloaded()) {
OverloadedVTs.push_back(VT);
- isOverloaded |= true;
+ isOverloaded = true;
}
+
+ // Reject invalid types.
+ if (VT == MVT::isVoid)
+ throw "Intrinsic '" + DefName + " has void in result type list!";
+
IS.RetVTs.push_back(VT);
IS.RetTypeDefs.push_back(TyEl);
}
-
- if (IS.RetVTs.size() == 0)
- throw "Intrinsic '"+DefName+"' needs at least a type for the ret value!";
-
+
// Parse the list of parameter types.
TypeList = R->getValueAsListInit("ParamTypes");
for (unsigned i = 0, e = TypeList->getSize(); i != e; ++i) {
@@ -524,10 +528,16 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
"Expected iAny or vAny type");
} else
VT = getValueType(TyEl->getValueAsDef("VT"));
+
if (EVT(VT).isOverloaded()) {
OverloadedVTs.push_back(VT);
- isOverloaded |= true;
+ isOverloaded = true;
}
+
+ // Reject invalid types.
+ if (VT == MVT::isVoid && i != e-1 /*void at end means varargs*/)
+ throw "Intrinsic '" + DefName + " has void in result type list!";
+
IS.ParamVTs.push_back(VT);
IS.ParamTypeDefs.push_back(TyEl);
}
@@ -545,10 +555,8 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
ModRef = ReadArgMem;
else if (Property->getName() == "IntrReadMem")
ModRef = ReadMem;
- else if (Property->getName() == "IntrWriteArgMem")
- ModRef = WriteArgMem;
- else if (Property->getName() == "IntrWriteMem")
- ModRef = WriteMem;
+ else if (Property->getName() == "IntrReadWriteArgMem")
+ ModRef = ReadWriteArgMem;
else if (Property->getName() == "Commutative")
isCommutative = true;
else if (Property->isSubClassOf("NoCapture")) {
diff --git a/libclamav/c++/llvm/utils/TableGen/CodeGenTarget.h b/libclamav/c++/llvm/utils/TableGen/CodeGenTarget.h
index 07bc54d..6b06b66 100644
--- a/libclamav/c++/llvm/utils/TableGen/CodeGenTarget.h
+++ b/libclamav/c++/llvm/utils/TableGen/CodeGenTarget.h
@@ -17,16 +17,14 @@
#ifndef CODEGEN_TARGET_H
#define CODEGEN_TARGET_H
-#include "llvm/Support/raw_ostream.h"
#include "CodeGenRegisters.h"
#include "CodeGenInstruction.h"
+#include "Record.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
-#include <map>
namespace llvm {
-class Record;
-class RecordKeeper;
struct CodeGenRegister;
class CodeGenTarget;
@@ -43,7 +41,8 @@ enum SDNP {
SDNPMayLoad,
SDNPMayStore,
SDNPSideEffect,
- SDNPMemOperand
+ SDNPMemOperand,
+ SDNPVariadic
};
/// getValueType - Return the MVT::SimpleValueType that the specified TableGen
@@ -62,14 +61,18 @@ std::string getQualifiedName(const Record *R);
class CodeGenTarget {
Record *TargetRec;
- mutable std::map<std::string, CodeGenInstruction> Instructions;
+ mutable DenseMap<const Record*, CodeGenInstruction*> Instructions;
mutable std::vector<CodeGenRegister> Registers;
+ mutable std::vector<Record*> SubRegIndices;
mutable std::vector<CodeGenRegisterClass> RegisterClasses;
mutable std::vector<MVT::SimpleValueType> LegalValueTypes;
void ReadRegisters() const;
+ void ReadSubRegIndices() const;
void ReadRegisterClasses() const;
void ReadInstructions() const;
void ReadLegalValueTypes() const;
+
+ mutable std::vector<const CodeGenInstruction*> InstrsByEnum;
public:
CodeGenTarget();
@@ -97,11 +100,25 @@ public:
return Registers;
}
+ const std::vector<Record*> &getSubRegIndices() const {
+ if (SubRegIndices.empty()) ReadSubRegIndices();
+ return SubRegIndices;
+ }
+
+ // Map a SubRegIndex Record to its number.
+ unsigned getSubRegIndexNo(Record *idx) const {
+ if (SubRegIndices.empty()) ReadSubRegIndices();
+ std::vector<Record*>::const_iterator i =
+ std::find(SubRegIndices.begin(), SubRegIndices.end(), idx);
+ assert(i != SubRegIndices.end() && "Not a SubRegIndex");
+ return (i - SubRegIndices.begin()) + 1;
+ }
+
const std::vector<CodeGenRegisterClass> &getRegisterClasses() const {
if (RegisterClasses.empty()) ReadRegisterClasses();
return RegisterClasses;
}
-
+
const CodeGenRegisterClass &getRegisterClass(Record *R) const {
const std::vector<CodeGenRegisterClass> &RC = getRegisterClasses();
for (unsigned i = 0, e = RC.size(); i != e; ++i)
@@ -167,7 +184,7 @@ public:
/// getRegisterVTs - Find the union of all possible SimpleValueTypes for the
/// specified physical register.
- std::vector<unsigned char> getRegisterVTs(Record *R) const;
+ std::vector<MVT::SimpleValueType> getRegisterVTs(Record *R) const;
const std::vector<MVT::SimpleValueType> &getLegalValueTypes() const {
if (LegalValueTypes.empty()) ReadLegalValueTypes();
@@ -183,37 +200,40 @@ public:
return false;
}
- /// getInstructions - Return all of the instructions defined for this target.
- ///
- const std::map<std::string, CodeGenInstruction> &getInstructions() const {
+private:
+ DenseMap<const Record*, CodeGenInstruction*> &getInstructions() const {
if (Instructions.empty()) ReadInstructions();
return Instructions;
}
- std::map<std::string, CodeGenInstruction> &getInstructions() {
+public:
+
+ CodeGenInstruction &getInstruction(const Record *InstRec) const {
if (Instructions.empty()) ReadInstructions();
- return Instructions;
+ DenseMap<const Record*, CodeGenInstruction*>::iterator I =
+ Instructions.find(InstRec);
+ assert(I != Instructions.end() && "Not an instruction");
+ return *I->second;
}
- CodeGenInstruction &getInstruction(const std::string &Name) const {
- const std::map<std::string, CodeGenInstruction> &Insts = getInstructions();
- assert(Insts.count(Name) && "Not an instruction!");
- return const_cast<CodeGenInstruction&>(Insts.find(Name)->second);
- }
-
- typedef std::map<std::string,
- CodeGenInstruction>::const_iterator inst_iterator;
- inst_iterator inst_begin() const { return getInstructions().begin(); }
- inst_iterator inst_end() const { return Instructions.end(); }
-
/// getInstructionsByEnumValue - Return all of the instructions defined by the
/// target, ordered by their enum value.
- void getInstructionsByEnumValue(std::vector<const CodeGenInstruction*>
- &NumberedInstructions);
-
+ const std::vector<const CodeGenInstruction*> &
+ getInstructionsByEnumValue() const {
+ if (InstrsByEnum.empty()) ComputeInstrsByEnum();
+ return InstrsByEnum;
+ }
+ typedef std::vector<const CodeGenInstruction*>::const_iterator inst_iterator;
+ inst_iterator inst_begin() const{return getInstructionsByEnumValue().begin();}
+ inst_iterator inst_end() const { return getInstructionsByEnumValue().end(); }
+
+
/// isLittleEndianEncoding - are instruction bit patterns defined as [0..n]?
///
bool isLittleEndianEncoding() const;
+
+private:
+ void ComputeInstrsByEnum() const;
};
/// ComplexPattern - ComplexPattern info, corresponding to the ComplexPattern
diff --git a/libclamav/c++/llvm/utils/TableGen/DAGISelEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/DAGISelEmitter.cpp
index e0fa7c8..8a73404 100644
--- a/libclamav/c++/llvm/utils/TableGen/DAGISelEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/DAGISelEmitter.cpp
@@ -21,55 +21,6 @@ using namespace llvm;
// DAGISelEmitter Helper methods
//
-/// getPatternSize - Return the 'size' of this pattern. We want to match large
-/// patterns before small ones. This is used to determine the size of a
-/// pattern.
-static unsigned getPatternSize(TreePatternNode *P, CodeGenDAGPatterns &CGP) {
- assert((EEVT::isExtIntegerInVTs(P->getExtTypes()) ||
- EEVT::isExtFloatingPointInVTs(P->getExtTypes()) ||
- P->getExtTypeNum(0) == MVT::isVoid ||
- P->getExtTypeNum(0) == MVT::Flag ||
- P->getExtTypeNum(0) == MVT::iPTR ||
- P->getExtTypeNum(0) == MVT::iPTRAny) &&
- "Not a valid pattern node to size!");
- unsigned Size = 3; // The node itself.
- // If the root node is a ConstantSDNode, increases its size.
- // e.g. (set R32:$dst, 0).
- if (P->isLeaf() && dynamic_cast<IntInit*>(P->getLeafValue()))
- Size += 2;
-
- // FIXME: This is a hack to statically increase the priority of patterns
- // which maps a sub-dag to a complex pattern. e.g. favors LEA over ADD.
- // Later we can allow complexity / cost for each pattern to be (optionally)
- // specified. To get best possible pattern match we'll need to dynamically
- // calculate the complexity of all patterns a dag can potentially map to.
- const ComplexPattern *AM = P->getComplexPatternInfo(CGP);
- if (AM)
- Size += AM->getNumOperands() * 3;
-
- // If this node has some predicate function that must match, it adds to the
- // complexity of this node.
- if (!P->getPredicateFns().empty())
- ++Size;
-
- // Count children in the count if they are also nodes.
- for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i) {
- TreePatternNode *Child = P->getChild(i);
- if (!Child->isLeaf() && Child->getExtTypeNum(0) != MVT::Other)
- Size += getPatternSize(Child, CGP);
- else if (Child->isLeaf()) {
- if (dynamic_cast<IntInit*>(Child->getLeafValue()))
- Size += 5; // Matches a ConstantSDNode (+3) and a specific value (+2).
- else if (Child->getComplexPatternInfo(CGP))
- Size += getPatternSize(Child, CGP);
- else if (!Child->getPredicateFns().empty())
- ++Size;
- }
- }
-
- return Size;
-}
-
/// getResultPatternCost - Compute the number of instructions for this pattern.
/// This is a temporary hack. We should really include the instruction
/// latencies in this calculation.
@@ -81,7 +32,7 @@ static unsigned getResultPatternCost(TreePatternNode *P,
Record *Op = P->getOperator();
if (Op->isSubClassOf("Instruction")) {
Cost++;
- CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op->getName());
+ CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op);
if (II.usesCustomInserter)
Cost += 10;
}
@@ -106,51 +57,6 @@ static unsigned getResultPatternSize(TreePatternNode *P,
return Cost;
}
-//===----------------------------------------------------------------------===//
-// Predicate emitter implementation.
-//
-
-void DAGISelEmitter::EmitPredicateFunctions(raw_ostream &OS) {
- OS << "\n// Predicate functions.\n";
-
- // Walk the pattern fragments, adding them to a map, which sorts them by
- // name.
- typedef std::map<std::string, std::pair<Record*, TreePattern*> > PFsByNameTy;
- PFsByNameTy PFsByName;
-
- for (CodeGenDAGPatterns::pf_iterator I = CGP.pf_begin(), E = CGP.pf_end();
- I != E; ++I)
- PFsByName.insert(std::make_pair(I->first->getName(), *I));
-
-
- for (PFsByNameTy::iterator I = PFsByName.begin(), E = PFsByName.end();
- I != E; ++I) {
- Record *PatFragRecord = I->second.first;// Record that derives from PatFrag.
- TreePattern *P = I->second.second;
-
- // If there is a code init for this fragment, emit the predicate code.
- std::string Code = PatFragRecord->getValueAsCode("Predicate");
- if (Code.empty()) continue;
-
- if (P->getOnlyTree()->isLeaf())
- OS << "inline bool Predicate_" << PatFragRecord->getName()
- << "(SDNode *N) const {\n";
- else {
- std::string ClassName =
- CGP.getSDNodeInfo(P->getOnlyTree()->getOperator()).getSDClassName();
- const char *C2 = ClassName == "SDNode" ? "N" : "inN";
-
- OS << "inline bool Predicate_" << PatFragRecord->getName()
- << "(SDNode *" << C2 << ") const {\n";
- if (ClassName != "SDNode")
- OS << " " << ClassName << " *N = cast<" << ClassName << ">(inN);\n";
- }
- OS << Code << "\n}\n";
- }
-
- OS << "\n\n";
-}
-
namespace {
// PatternSortingPredicate - return true if we prefer to match LHS before RHS.
// In particular, we want to match maximal patterns first and lowest cost within
@@ -159,12 +65,25 @@ struct PatternSortingPredicate {
PatternSortingPredicate(CodeGenDAGPatterns &cgp) : CGP(cgp) {}
CodeGenDAGPatterns &CGP;
- bool operator()(const PatternToMatch *LHS,
- const PatternToMatch *RHS) {
- unsigned LHSSize = getPatternSize(LHS->getSrcPattern(), CGP);
- unsigned RHSSize = getPatternSize(RHS->getSrcPattern(), CGP);
- LHSSize += LHS->getAddedComplexity();
- RHSSize += RHS->getAddedComplexity();
+ bool operator()(const PatternToMatch *LHS, const PatternToMatch *RHS) {
+ const TreePatternNode *LHSSrc = LHS->getSrcPattern();
+ const TreePatternNode *RHSSrc = RHS->getSrcPattern();
+
+ if (LHSSrc->getNumTypes() != 0 && RHSSrc->getNumTypes() != 0 &&
+ LHSSrc->getType(0) != RHSSrc->getType(0)) {
+ MVT::SimpleValueType V1 = LHSSrc->getType(0), V2 = RHSSrc->getType(0);
+ if (MVT(V1).isVector() != MVT(V2).isVector())
+ return MVT(V2).isVector();
+
+ if (MVT(V1).isFloatingPoint() != MVT(V2).isFloatingPoint())
+ return MVT(V2).isFloatingPoint();
+ }
+
+ // Otherwise, if the patterns might both match, sort based on complexity,
+ // which means that we prefer to match patterns that cover more nodes in the
+ // input over nodes that cover fewer.
+ unsigned LHSSize = LHS->getPatternComplexity(CGP);
+ unsigned RHSSize = RHS->getPatternComplexity(CGP);
if (LHSSize > RHSSize) return true; // LHS -> bigger -> less cost
if (LHSSize < RHSSize) return false;
@@ -179,7 +98,8 @@ struct PatternSortingPredicate {
if (LHSPatSize < RHSPatSize) return true;
if (LHSPatSize > RHSPatSize) return false;
- // Sort based on the UID of the pattern, giving us a deterministic ordering.
+ // Sort based on the UID of the pattern, giving us a deterministic ordering
+ // if all other sorting conditions fail.
assert(LHS == RHS || LHS->ID != RHS->ID);
return LHS->ID < RHS->ID;
}
@@ -203,9 +123,6 @@ void DAGISelEmitter::run(raw_ostream &OS) {
errs() << "\n";
});
- // FIXME: These are being used by hand written code, gross.
- EmitPredicateFunctions(OS);
-
// Add all the patterns to a temporary list so we can sort them.
std::vector<const PatternToMatch*> Patterns;
for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(), E = CGP.ptm_end();
@@ -214,8 +131,7 @@ void DAGISelEmitter::run(raw_ostream &OS) {
// We want to process the matches in order of minimal cost. Sort the patterns
// so the least cost one is at the start.
- std::stable_sort(Patterns.begin(), Patterns.end(),
- PatternSortingPredicate(CGP));
+ std::sort(Patterns.begin(), Patterns.end(), PatternSortingPredicate(CGP));
// Convert each variant of each pattern into a Matcher.
diff --git a/libclamav/c++/llvm/utils/TableGen/DAGISelEmitter.h b/libclamav/c++/llvm/utils/TableGen/DAGISelEmitter.h
index 5ffdde8..2117e65 100644
--- a/libclamav/c++/llvm/utils/TableGen/DAGISelEmitter.h
+++ b/libclamav/c++/llvm/utils/TableGen/DAGISelEmitter.h
@@ -31,8 +31,6 @@ public:
// run - Output the isel, returning true on failure.
void run(raw_ostream &OS);
-private:
- void EmitPredicateFunctions(raw_ostream &OS);
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcher.cpp b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcher.cpp
index 22d2fe8..9f12a68 100644
--- a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcher.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcher.cpp
@@ -147,7 +147,8 @@ void SwitchOpcodeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
void CheckTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
- OS.indent(indent) << "CheckType " << getEnumName(Type) << '\n';
+ OS.indent(indent) << "CheckType " << getEnumName(Type) << ", ResNo="
+ << ResNo << '\n';
}
void SwitchTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
@@ -356,15 +357,13 @@ bool CheckOpcodeMatcher::isContradictoryImpl(const Matcher *M) const {
// different, then we know they contradict. For example, a check for
// ISD::STORE will never be true at the same time a check for Type i32 is.
if (const CheckTypeMatcher *CT = dyn_cast<CheckTypeMatcher>(M)) {
- // FIXME: What result is this referring to?
- unsigned NodeType;
- if (getOpcode().getNumResults() == 0)
- NodeType = MVT::isVoid;
- else
- NodeType = getOpcode().getKnownType();
- if (NodeType != EEVT::isUnknown)
- return TypesAreContradictory((MVT::SimpleValueType)NodeType,
- CT->getType());
+ // If checking for a result the opcode doesn't have, it can't match.
+ if (CT->getResNo() >= getOpcode().getNumResults())
+ return true;
+
+ MVT::SimpleValueType NodeType = getOpcode().getKnownType(CT->getResNo());
+ if (NodeType != MVT::Other)
+ return TypesAreContradictory(NodeType, CT->getType());
}
return false;
diff --git a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcher.h b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcher.h
index ef7ecf4..d9b25d5 100644
--- a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcher.h
+++ b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcher.h
@@ -492,14 +492,16 @@ private:
};
/// CheckTypeMatcher - This checks to see if the current node has the
-/// specified type, if not it fails to match.
+/// specified type at the specified result, if not it fails to match.
class CheckTypeMatcher : public Matcher {
MVT::SimpleValueType Type;
+ unsigned ResNo;
public:
- CheckTypeMatcher(MVT::SimpleValueType type)
- : Matcher(CheckType), Type(type) {}
+ CheckTypeMatcher(MVT::SimpleValueType type, unsigned resno)
+ : Matcher(CheckType), Type(type), ResNo(resno) {}
MVT::SimpleValueType getType() const { return Type; }
+ unsigned getResNo() const { return ResNo; }
static inline bool classof(const Matcher *N) {
return N->getKind() == CheckType;
diff --git a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index cabf2d4..dfbfe80 100644
--- a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file contains code to generate C++ code a matcher.
+// This file contains code to generate C++ code for a matcher.
//
//===----------------------------------------------------------------------===//
@@ -32,6 +32,7 @@ OmitComments("omit-comments", cl::desc("Do not generate comments"),
namespace {
class MatcherTableEmitter {
+ const CodeGenDAGPatterns &CGP;
StringMap<unsigned> NodePredicateMap, PatternPredicateMap;
std::vector<std::string> NodePredicates, PatternPredicates;
@@ -43,13 +44,12 @@ class MatcherTableEmitter {
std::vector<Record*> NodeXForms;
public:
- MatcherTableEmitter() {}
+ MatcherTableEmitter(const CodeGenDAGPatterns &cgp) : CGP(cgp) {}
unsigned EmitMatcherList(const Matcher *N, unsigned Indent,
unsigned StartIdx, formatted_raw_ostream &OS);
- void EmitPredicateFunctions(const CodeGenDAGPatterns &CGP,
- formatted_raw_ostream &OS);
+ void EmitPredicateFunctions(formatted_raw_ostream &OS);
void EmitHistogram(const Matcher *N, formatted_raw_ostream &OS);
private:
@@ -255,9 +255,9 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
}
case Matcher::CheckOpcode:
- OS << "OPC_CheckOpcode, "
- << cast<CheckOpcodeMatcher>(N)->getOpcode().getEnumName() << ",\n";
- return 2;
+ OS << "OPC_CheckOpcode, TARGET_OPCODE("
+ << cast<CheckOpcodeMatcher>(N)->getOpcode().getEnumName() << "),\n";
+ return 3;
case Matcher::SwitchOpcode:
case Matcher::SwitchType: {
@@ -280,10 +280,14 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
// For each case we emit the size, then the opcode, then the matcher.
for (unsigned i = 0, e = NumCases; i != e; ++i) {
const Matcher *Child;
- if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N))
+ unsigned IdxSize;
+ if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N)) {
Child = SOM->getCaseMatcher(i);
- else
+ IdxSize = 2; // size of opcode in table is 2 bytes.
+ } else {
Child = cast<SwitchTypeMatcher>(N)->getCaseMatcher(i);
+ IdxSize = 1; // size of type in table is 1 byte.
+ }
// We need to encode the opcode and the offset of the case code before
// emitting the case code. Handle this by buffering the output into a
@@ -299,7 +303,8 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
TmpBuf.clear();
raw_svector_ostream OS(TmpBuf);
formatted_raw_ostream FOS(OS);
- ChildSize = EmitMatcherList(Child, Indent+1, CurrentIdx+VBRSize+1, FOS);
+ ChildSize = EmitMatcherList(Child, Indent+1, CurrentIdx+VBRSize+IdxSize,
+ FOS);
} while (GetVBRSize(ChildSize) != VBRSize);
assert(ChildSize != 0 && "Should not have a zero-sized child!");
@@ -316,15 +321,15 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
OS << ' ';
if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N))
- OS << SOM->getCaseOpcode(i).getEnumName();
+ OS << "TARGET_OPCODE(" << SOM->getCaseOpcode(i).getEnumName() << "),";
else
- OS << getEnumName(cast<SwitchTypeMatcher>(N)->getCaseType(i));
- OS << ',';
-
+ OS << getEnumName(cast<SwitchTypeMatcher>(N)->getCaseType(i)) << ',';
+
+ CurrentIdx += IdxSize;
+
if (!OmitComments)
- OS << "// ->" << CurrentIdx+ChildSize+1;
+ OS << "// ->" << CurrentIdx+ChildSize;
OS << '\n';
- ++CurrentIdx;
OS << TmpBuf.str();
CurrentIdx += ChildSize;
}
@@ -341,6 +346,8 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
}
case Matcher::CheckType:
+ assert(cast<CheckTypeMatcher>(N)->getResNo() == 0 &&
+ "FIXME: Add support for CheckType of resno != 0");
OS << "OPC_CheckType, "
<< getEnumName(cast<CheckTypeMatcher>(N)->getType()) << ",\n";
return 2;
@@ -442,6 +449,13 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
case Matcher::EmitMergeInputChains: {
const EmitMergeInputChainsMatcher *MN =
cast<EmitMergeInputChainsMatcher>(N);
+
+ // Handle the specialized forms OPC_EmitMergeInputChains1_0 and 1_1.
+ if (MN->getNumNodes() == 1 && MN->getNode(0) < 2) {
+ OS << "OPC_EmitMergeInputChains1_" << MN->getNode(0) << ",\n";
+ return 1;
+ }
+
OS << "OPC_EmitMergeInputChains, " << MN->getNumNodes() << ", ";
for (unsigned i = 0, e = MN->getNumNodes(); i != e; ++i)
OS << MN->getNode(i) << ", ";
@@ -507,7 +521,8 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
if (const MorphNodeToMatcher *SNT = dyn_cast<MorphNodeToMatcher>(N)) {
OS.PadToColumn(Indent*2) << "// Src: "
- << *SNT->getPattern().getSrcPattern() << '\n';
+ << *SNT->getPattern().getSrcPattern() << " - Complexity = "
+ << SNT->getPattern().getPatternComplexity(CGP) << '\n';
OS.PadToColumn(Indent*2) << "// Dst: "
<< *SNT->getPattern().getDstPattern() << '\n';
}
@@ -534,7 +549,8 @@ EmitMatcher(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
OS << '\n';
if (!OmitComments) {
OS.PadToColumn(Indent*2) << "// Src: "
- << *CM->getPattern().getSrcPattern() << '\n';
+ << *CM->getPattern().getSrcPattern() << " - Complexity = "
+ << CM->getPattern().getPatternComplexity(CGP) << '\n';
OS.PadToColumn(Indent*2) << "// Dst: "
<< *CM->getPattern().getDstPattern();
}
@@ -565,8 +581,7 @@ EmitMatcherList(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
return Size;
}
-void MatcherTableEmitter::EmitPredicateFunctions(const CodeGenDAGPatterns &CGP,
- formatted_raw_ostream &OS) {
+void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
// Emit pattern predicates.
if (!PatternPredicates.empty()) {
OS << "bool CheckPatternPredicate(unsigned PredNo) const {\n";
@@ -620,6 +635,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(const CodeGenDAGPatterns &CGP,
if (!ComplexPatterns.empty()) {
OS << "bool CheckComplexPattern(SDNode *Root, SDValue N,\n";
OS << " unsigned PatternNo, SmallVectorImpl<SDValue> &Result) {\n";
+ OS << " unsigned NextRes = Result.size();\n";
OS << " switch (PatternNo) {\n";
OS << " default: assert(0 && \"Invalid pattern # in table?\");\n";
for (unsigned i = 0, e = ComplexPatterns.size(); i != e; ++i) {
@@ -630,12 +646,12 @@ void MatcherTableEmitter::EmitPredicateFunctions(const CodeGenDAGPatterns &CGP,
++NumOps; // Get the chained node too.
OS << " case " << i << ":\n";
- OS << " Result.resize(Result.size()+" << NumOps << ");\n";
+ OS << " Result.resize(NextRes+" << NumOps << ");\n";
OS << " return " << P.getSelectFunc();
OS << "(Root, N";
for (unsigned i = 0; i != NumOps; ++i)
- OS << ", Result[Result.size()-" << (NumOps-i) << ']';
+ OS << ", Result[NextRes+" << i << ']';
OS << ");\n";
}
OS << " }\n";
@@ -760,7 +776,7 @@ void llvm::EmitMatcherTable(const Matcher *TheMatcher,
OS << "// The main instruction selector code.\n";
OS << "SDNode *SelectCode(SDNode *N) {\n";
- MatcherTableEmitter MatcherEmitter;
+ MatcherTableEmitter MatcherEmitter(CGP);
OS << " // Opcodes are emitted as 2 bytes, TARGET_OPCODE handles this.\n";
OS << " #define TARGET_OPCODE(X) X & 255, unsigned(X) >> 8\n";
@@ -775,5 +791,5 @@ void llvm::EmitMatcherTable(const Matcher *TheMatcher,
OS << '\n';
// Next up, emit the function for node and pattern predicates:
- MatcherEmitter.EmitPredicateFunctions(CGP, OS);
+ MatcherEmitter.EmitPredicateFunctions(OS);
}
diff --git a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherGen.cpp b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherGen.cpp
index 4951a42..aba6636 100644
--- a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherGen.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherGen.cpp
@@ -224,6 +224,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
if (// Handle register references. Nothing to do here, they always match.
LeafRec->isSubClassOf("RegisterClass") ||
LeafRec->isSubClassOf("PointerLikeRegClass") ||
+ LeafRec->isSubClassOf("SubRegIndex") ||
// Place holder for SRCVALUE nodes. Nothing to do here.
LeafRec->getName() == "srcvalue")
return;
@@ -408,11 +409,13 @@ void MatcherGen::EmitMatchCode(const TreePatternNode *N,
// If N and NodeNoTypes don't agree on a type, then this is a case where we
// need to do a type check. Emit the check, apply the tyep to NodeNoTypes and
// reinfer any correlated types.
- unsigned NodeType = EEVT::isUnknown;
- if (NodeNoTypes->getExtTypes() != N->getExtTypes()) {
- NodeType = N->getTypeNum(0);
- NodeNoTypes->setTypes(N->getExtTypes());
+ SmallVector<unsigned, 2> ResultsToTypeCheck;
+
+ for (unsigned i = 0, e = NodeNoTypes->getNumTypes(); i != e; ++i) {
+ if (NodeNoTypes->getExtType(i) == N->getExtType(i)) continue;
+ NodeNoTypes->setType(i, N->getExtType(i));
InferPossibleTypes();
+ ResultsToTypeCheck.push_back(i);
}
// If this node has a name associated with it, capture it in VariableMap. If
@@ -442,8 +445,9 @@ void MatcherGen::EmitMatchCode(const TreePatternNode *N,
for (unsigned i = 0, e = N->getPredicateFns().size(); i != e; ++i)
AddMatcher(new CheckPredicateMatcher(N->getPredicateFns()[i]));
- if (NodeType != EEVT::isUnknown)
- AddMatcher(new CheckTypeMatcher((MVT::SimpleValueType)NodeType));
+ for (unsigned i = 0, e = ResultsToTypeCheck.size(); i != e; ++i)
+ AddMatcher(new CheckTypeMatcher(N->getType(ResultsToTypeCheck[i]),
+ ResultsToTypeCheck[i]));
}
/// EmitMatcherCode - Generate the code that matches the predicate of this
@@ -567,7 +571,7 @@ void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N,
assert(N->isLeaf() && "Must be a leaf");
if (IntInit *II = dynamic_cast<IntInit*>(N->getLeafValue())) {
- AddMatcher(new EmitIntegerMatcher(II->getValue(),N->getTypeNum(0)));
+ AddMatcher(new EmitIntegerMatcher(II->getValue(), N->getType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
@@ -575,14 +579,13 @@ void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N,
// If this is an explicit register reference, handle it.
if (DefInit *DI = dynamic_cast<DefInit*>(N->getLeafValue())) {
if (DI->getDef()->isSubClassOf("Register")) {
- AddMatcher(new EmitRegisterMatcher(DI->getDef(),
- N->getTypeNum(0)));
+ AddMatcher(new EmitRegisterMatcher(DI->getDef(), N->getType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
if (DI->getDef()->getName() == "zero_reg") {
- AddMatcher(new EmitRegisterMatcher(0, N->getTypeNum(0)));
+ AddMatcher(new EmitRegisterMatcher(0, N->getType(0)));
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
@@ -595,6 +598,14 @@ void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N,
ResultOps.push_back(NextRecordedOperandNo++);
return;
}
+
+ // Handle a subregister index. This is used for INSERT_SUBREG etc.
+ if (DI->getDef()->isSubClassOf("SubRegIndex")) {
+ std::string Value = getQualifiedName(DI->getDef());
+ AddMatcher(new EmitStringIntegerMatcher(Value, MVT::i32));
+ ResultOps.push_back(NextRecordedOperandNo++);
+ return;
+ }
}
errs() << "unhandled leaf node: \n";
@@ -628,7 +639,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
SmallVectorImpl<unsigned> &OutputOps) {
Record *Op = N->getOperator();
const CodeGenTarget &CGT = CGP.getTargetInfo();
- CodeGenInstruction &II = CGT.getInstruction(Op->getName());
+ CodeGenInstruction &II = CGT.getInstruction(Op);
const DAGInstruction &Inst = CGP.getInstruction(Op);
// If we can, get the pattern for the instruction we're generating. We derive
@@ -678,16 +689,26 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
!CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) {
// This is a predicate or optional def operand; emit the
// 'default ops' operands.
- const DAGDefaultOperand &DefaultOp =
- CGP.getDefaultOperand(II.OperandList[InstOpNo].Rec);
+ const DAGDefaultOperand &DefaultOp
+ = CGP.getDefaultOperand(OperandNode);
for (unsigned i = 0, e = DefaultOp.DefaultOps.size(); i != e; ++i)
EmitResultOperand(DefaultOp.DefaultOps[i], InstOps);
continue;
}
+ const TreePatternNode *Child = N->getChild(ChildNo);
+
// Otherwise this is a normal operand or a predicate operand without
// 'execute always'; emit it.
- EmitResultOperand(N->getChild(ChildNo), InstOps);
+ unsigned BeforeAddingNumOps = InstOps.size();
+ EmitResultOperand(Child, InstOps);
+ assert(InstOps.size() > BeforeAddingNumOps && "Didn't add any operands");
+
+ // If the operand is an instruction and it produced multiple results, just
+ // take the first one.
+ if (!Child->isLeaf() && Child->getOperator()->isSubClassOf("Instruction"))
+ InstOps.resize(BeforeAddingNumOps+1);
+
++ChildNo;
}
@@ -699,7 +720,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
// occur in patterns like (mul:i8 AL:i8, GR8:i8:$src).
for (unsigned i = 0, e = PhysRegInputs.size(); i != e; ++i)
AddMatcher(new EmitCopyToRegMatcher(PhysRegInputs[i].second,
- PhysRegInputs[i].first));
+ PhysRegInputs[i].first));
// Even if the node has no other flag inputs, the resultant node must be
// flagged to the CopyFromReg nodes we just generated.
TreeHasInFlag = true;
@@ -709,29 +730,34 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
// Determine the result types.
SmallVector<MVT::SimpleValueType, 4> ResultVTs;
- if (NumResults != 0 && N->getTypeNum(0) != MVT::isVoid) {
- // FIXME2: If the node has multiple results, we should add them. For now,
- // preserve existing behavior?!
- ResultVTs.push_back(N->getTypeNum(0));
- }
-
+ for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i)
+ ResultVTs.push_back(N->getType(i));
// If this is the root instruction of a pattern that has physical registers in
// its result pattern, add output VTs for them. For example, X86 has:
// (set AL, (mul ...))
// This also handles implicit results like:
// (implicit EFLAGS)
- if (isRoot && Pattern.getDstRegs().size() != 0) {
- for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i)
- if (Pattern.getDstRegs()[i]->isSubClassOf("Register"))
- ResultVTs.push_back(getRegisterValueType(Pattern.getDstRegs()[i], CGT));
+ if (isRoot && !Pattern.getDstRegs().empty()) {
+ // If the root came from an implicit def in the instruction handling stuff,
+ // don't re-add it.
+ Record *HandledReg = 0;
+ if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
+ HandledReg = II.ImplicitDefs[0];
+
+ for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
+ Record *Reg = Pattern.getDstRegs()[i];
+ if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
+ ResultVTs.push_back(getRegisterValueType(Reg, CGT));
+ }
}
- // FIXME2: Instead of using the isVariadic flag on the instruction, we should
- // have an SDNP that indicates variadicism. The TargetInstrInfo isVariadic
- // property should be inferred from this when an instruction has a pattern.
+ // If this is the root of the pattern and the pattern we're matching includes
+ // a node that is variadic, mark the generated node as variadic so that it
+ // gets the excess operands from the input DAG.
int NumFixedArityOperands = -1;
- if (isRoot && II.isVariadic)
+ if (isRoot &&
+ (Pattern.getSrcPattern()->NodeHasProperty(SDNPVariadic, CGP)))
NumFixedArityOperands = Pattern.getSrcPattern()->getNumChildren();
// If this is the root node and any of the nodes matched nodes in the input
@@ -750,6 +776,9 @@ EmitResultInstructionAsOperand(const TreePatternNode *N,
bool NodeHasMemRefs =
isRoot && Pattern.getSrcPattern()->TreeHasProperty(SDNPMemOperand, CGP);
+ assert((!ResultVTs.empty() || TreeHasOutFlag || NodeHasChain) &&
+ "Node has no result");
+
AddMatcher(new EmitNodeMatcher(II.Namespace+"::"+II.TheDef->getName(),
ResultVTs.data(), ResultVTs.size(),
InstOps.data(), InstOps.size(),
@@ -817,33 +846,35 @@ void MatcherGen::EmitResultCode() {
// At this point, we have however many values the result pattern produces.
// However, the input pattern might not need all of these. If there are
- // excess values at the end (such as condition codes etc) just lop them off.
- // This doesn't need to worry about flags or chains, just explicit results.
- //
- // FIXME2: This doesn't work because there is currently no way to get an
- // accurate count of the # results the source pattern sets. This is because
- // of the "parallel" construct in X86 land, which looks like this:
+ // excess values at the end (such as implicit defs of condition codes etc)
+ // just lop them off. This doesn't need to worry about flags or chains, just
+ // explicit results.
//
- //def : Pat<(parallel (X86and_flag GR8:$src1, GR8:$src2),
- // (implicit EFLAGS)),
- // (AND8rr GR8:$src1, GR8:$src2)>;
- //
- // This idiom means to match the two-result node X86and_flag (which is
- // declared as returning a single result, because we can't match multi-result
- // nodes yet). In this case, we would have to know that the input has two
- // results. However, mul8r is modelled exactly the same way, but without
- // implicit defs included. The fix is to support multiple results directly
- // and eliminate 'parallel'.
- //
- // FIXME2: When this is fixed, we should revert the terrible hack in the
- // OPC_EmitNode code in the interpreter.
-#if 0
- const TreePatternNode *Src = Pattern.getSrcPattern();
- unsigned NumSrcResults = Src->getTypeNum(0) != MVT::isVoid ? 1 : 0;
- NumSrcResults += Pattern.getDstRegs().size();
+ unsigned NumSrcResults = Pattern.getSrcPattern()->getNumTypes();
+
+ // If the pattern also has (implicit) results, count them as well.
+ if (!Pattern.getDstRegs().empty()) {
+ // If the root came from an implicit def in the instruction handling stuff,
+ // don't re-add it.
+ Record *HandledReg = 0;
+ const TreePatternNode *DstPat = Pattern.getDstPattern();
+ if (!DstPat->isLeaf() &&DstPat->getOperator()->isSubClassOf("Instruction")){
+ const CodeGenTarget &CGT = CGP.getTargetInfo();
+ CodeGenInstruction &II = CGT.getInstruction(DstPat->getOperator());
+
+ if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
+ HandledReg = II.ImplicitDefs[0];
+ }
+
+ for (unsigned i = 0; i != Pattern.getDstRegs().size(); ++i) {
+ Record *Reg = Pattern.getDstRegs()[i];
+ if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
+ ++NumSrcResults;
+ }
+ }
+
assert(Ops.size() >= NumSrcResults && "Didn't provide enough results");
Ops.resize(NumSrcResults);
-#endif
// If the matched pattern covers nodes which define a flag result, emit a node
// that tells the matcher about them so that it can update their results.
@@ -877,6 +908,3 @@ Matcher *llvm::ConvertPatternToMatcher(const PatternToMatch &Pattern,
// Unconditional match.
return Gen.GetMatcher();
}
-
-
-
diff --git a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherOpt.cpp b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
index 910c4c5..c73bdb9 100644
--- a/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/DAGISelMatcherOpt.cpp
@@ -44,11 +44,14 @@ static void ContractNodes(OwningPtr<Matcher> &MatcherPtr,
if (MoveChildMatcher *MC = dyn_cast<MoveChildMatcher>(N)) {
Matcher *New = 0;
if (RecordMatcher *RM = dyn_cast<RecordMatcher>(MC->getNext()))
- New = new RecordChildMatcher(MC->getChildNo(), RM->getWhatFor(),
- RM->getResultNo());
+ if (MC->getChildNo() < 8) // Only have RecordChild0...7
+ New = new RecordChildMatcher(MC->getChildNo(), RM->getWhatFor(),
+ RM->getResultNo());
- if (CheckTypeMatcher *CT= dyn_cast<CheckTypeMatcher>(MC->getNext()))
- New = new CheckChildTypeMatcher(MC->getChildNo(), CT->getType());
+ if (CheckTypeMatcher *CT = dyn_cast<CheckTypeMatcher>(MC->getNext()))
+ if (MC->getChildNo() < 8 && // Only have CheckChildType0...7
+ CT->getResNo() == 0) // CheckChildType checks res #0
+ New = new CheckChildTypeMatcher(MC->getChildNo(), CT->getType());
if (New) {
// Insert the new node.
@@ -418,10 +421,12 @@ static void FactorNodes(OwningPtr<Matcher> &MatcherPtr) {
CheckTypeMatcher *CTM =
cast_or_null<CheckTypeMatcher>(FindNodeWithKind(NewOptionsToMatch[i],
Matcher::CheckType));
- if (CTM == 0 ||
+ if (CTM == 0 ||
// iPTR checks could alias any other case without us knowing, don't
// bother with them.
CTM->getType() == MVT::iPTR ||
+ // SwitchType only works for result #0.
+ CTM->getResNo() != 0 ||
// If the CheckType isn't at the start of the list, see if we can move
// it there.
!CTM->canMoveBefore(NewOptionsToMatch[i])) {
@@ -486,7 +491,7 @@ static void FactorNodes(OwningPtr<Matcher> &MatcherPtr) {
MatcherPtr.reset(new SwitchTypeMatcher(&Cases[0], Cases.size()));
} else {
// If we factored and ended up with one case, create it now.
- MatcherPtr.reset(new CheckTypeMatcher(Cases[0].first));
+ MatcherPtr.reset(new CheckTypeMatcher(Cases[0].first, 0));
MatcherPtr->setNext(Cases[0].second);
}
return;
diff --git a/libclamav/c++/llvm/utils/TableGen/DisassemblerEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/DisassemblerEmitter.cpp
index 61b9b15..3284366 100644
--- a/libclamav/c++/llvm/utils/TableGen/DisassemblerEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/DisassemblerEmitter.cpp
@@ -12,6 +12,8 @@
#include "Record.h"
#include "X86DisassemblerTables.h"
#include "X86RecognizableInstr.h"
+#include "ARMDecoderEmitter.h"
+
using namespace llvm;
using namespace llvm::X86Disassembler;
@@ -108,8 +110,8 @@ void DisassemblerEmitter::run(raw_ostream &OS) {
if (Target.getName() == "X86") {
DisassemblerTables Tables;
- std::vector<const CodeGenInstruction*> numberedInstructions;
- Target.getInstructionsByEnumValue(numberedInstructions);
+ const std::vector<const CodeGenInstruction*> &numberedInstructions =
+ Target.getInstructionsByEnumValue();
for (unsigned i = 0, e = numberedInstructions.size(); i != e; ++i)
RecognizableInstr::processInstr(Tables, *numberedInstructions[i], i);
@@ -124,6 +126,12 @@ void DisassemblerEmitter::run(raw_ostream &OS) {
return;
}
+ // Fixed-instruction-length targets use a common disassembler.
+ if (Target.getName() == "ARM") {
+ ARMDecoderEmitter(Records).run(OS);
+ return;
+ }
+
throw TGError(Target.getTargetRecord()->getLoc(),
"Unable to generate disassembler for this target");
}
diff --git a/libclamav/c++/llvm/utils/TableGen/EDEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/EDEmitter.cpp
index 9aad2f6..525fffb 100644
--- a/libclamav/c++/llvm/utils/TableGen/EDEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/EDEmitter.cpp
@@ -19,15 +19,14 @@
#include "CodeGenTarget.h"
#include "Record.h"
+#include "llvm/MC/EDInstInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
-#include <vector>
+#include <map>
#include <string>
-
-#define MAX_OPERANDS 5
-#define MAX_SYNTAXES 2
+#include <vector>
using namespace llvm;
@@ -54,9 +53,9 @@ namespace {
unsigned int index = 0;
unsigned int numEntries = Entries.size();
- for(index = 0; index < numEntries; ++index) {
+ for (index = 0; index < numEntries; ++index) {
o.indent(i) << Entries[index];
- if(index < (numEntries - 1))
+ if (index < (numEntries - 1))
o << ",";
o << "\n";
}
@@ -85,34 +84,6 @@ namespace {
}
};
- class StructEmitter {
- private:
- std::string Name;
- std::vector<std::string> MemberTypes;
- std::vector<std::string> MemberNames;
- public:
- StructEmitter(const char *N) : Name(N) {
- }
- void addMember(const char *t, const char *n) {
- MemberTypes.push_back(std::string(t));
- MemberNames.push_back(std::string(n));
- }
- void emit(raw_ostream &o, unsigned int &i) {
- o.indent(i) << "struct " << Name.c_str() << " {" << "\n";
- i += 2;
-
- unsigned int index = 0;
- unsigned int numMembers = MemberTypes.size();
- for (index = 0; index < numMembers; ++index) {
- o.indent(i) << MemberTypes[index] << " " << MemberNames[index] << ";";
- o << "\n";
- }
-
- i -= 2;
- o.indent(i) << "};" << "\n";
- }
- };
-
class ConstantEmitter {
public:
virtual ~ConstantEmitter() { }
@@ -121,47 +92,78 @@ namespace {
class LiteralConstantEmitter : public ConstantEmitter {
private:
- std::string Literal;
+ bool IsNumber;
+ union {
+ int Number;
+ const char* String;
+ };
public:
- LiteralConstantEmitter(const char *literal) : Literal(literal) {
+ LiteralConstantEmitter(int number = 0) :
+ IsNumber(true),
+ Number(number) {
+ }
+ void set(const char *string) {
+ IsNumber = false;
+ Number = 0;
+ String = string;
}
- LiteralConstantEmitter(int literal) {
- char buf[256];
- snprintf(buf, 256, "%d", literal);
- Literal = buf;
+ bool is(const char *string) {
+ return !strcmp(String, string);
}
void emit(raw_ostream &o, unsigned int &i) {
- o << Literal;
+ if (IsNumber)
+ o << Number;
+ else
+ o << String;
}
};
class CompoundConstantEmitter : public ConstantEmitter {
private:
- std::vector<ConstantEmitter*> Entries;
+ unsigned int Padding;
+ std::vector<ConstantEmitter *> Entries;
public:
- CompoundConstantEmitter() {
- }
- ~CompoundConstantEmitter() {
- unsigned int index;
- unsigned int numEntries = Entries.size();
- for (index = 0; index < numEntries; ++index) {
- delete Entries[index];
- }
+ CompoundConstantEmitter(unsigned int padding = 0) : Padding(padding) {
}
CompoundConstantEmitter &addEntry(ConstantEmitter *e) {
Entries.push_back(e);
+
return *this;
}
+ ~CompoundConstantEmitter() {
+ while (Entries.size()) {
+ ConstantEmitter *entry = Entries.back();
+ Entries.pop_back();
+ delete entry;
+ }
+ }
void emit(raw_ostream &o, unsigned int &i) {
o << "{" << "\n";
i += 2;
unsigned int index;
unsigned int numEntries = Entries.size();
- for (index = 0; index < numEntries; ++index) {
+
+ unsigned int numToPrint;
+
+ if (Padding) {
+ if (numEntries > Padding) {
+ fprintf(stderr, "%u entries but %u padding\n", numEntries, Padding);
+ llvm_unreachable("More entries than padding");
+ }
+ numToPrint = Padding;
+ } else {
+ numToPrint = numEntries;
+ }
+
+ for (index = 0; index < numToPrint; ++index) {
o.indent(i);
- Entries[index]->emit(o, i);
- if (index < (numEntries - 1))
+ if (index < numEntries)
+ Entries[index]->emit(o, i);
+ else
+ o << "-1";
+
+ if (index < (numToPrint - 1))
o << ",";
o << "\n";
}
@@ -226,93 +228,94 @@ void populateOperandOrder(CompoundConstantEmitter *operandOrder,
++operandIterator) {
if (operandIterator->OperandType ==
AsmWriterOperand::isMachineInstrOperand) {
- char buf[2];
- snprintf(buf, sizeof(buf), "%u", operandIterator->CGIOpNo);
- operandOrder->addEntry(new LiteralConstantEmitter(buf));
+ operandOrder->addEntry(
+ new LiteralConstantEmitter(operandIterator->CGIOpNo));
numArgs++;
}
}
-
- for(; numArgs < MAX_OPERANDS; numArgs++) {
- operandOrder->addEntry(new LiteralConstantEmitter("-1"));
- }
}
/////////////////////////////////////////////////////
// Support functions for handling X86 instructions //
/////////////////////////////////////////////////////
-#define ADDFLAG(flag) flags->addEntry(flag)
+#define SET(flag) { type->set(flag); return 0; }
-#define REG(str) if (name == str) { ADDFLAG("kOperandFlagRegister"); return 0; }
-#define MEM(str) if (name == str) { ADDFLAG("kOperandFlagMemory"); return 0; }
-#define LEA(str) if (name == str) { ADDFLAG("kOperandFlagEffectiveAddress"); \
- return 0; }
-#define IMM(str) if (name == str) { ADDFLAG("kOperandFlagImmediate"); \
- return 0; }
-#define PCR(str) if (name == str) { ADDFLAG("kOperandFlagMemory"); \
- ADDFLAG("kOperandFlagPCRelative"); \
- return 0; }
+#define REG(str) if (name == str) SET("kOperandTypeRegister");
+#define MEM(str) if (name == str) SET("kOperandTypeX86Memory");
+#define LEA(str) if (name == str) SET("kOperandTypeX86EffectiveAddress");
+#define IMM(str) if (name == str) SET("kOperandTypeImmediate");
+#define PCR(str) if (name == str) SET("kOperandTypeX86PCRelative");
-/// X86FlagFromOpName - Processes the name of a single X86 operand (which is
-/// actually its type) and translates it into an operand flag
+/// X86TypeFromOpName - Processes the name of a single X86 operand (which is
+/// actually its type) and translates it into an operand type
///
-/// @arg flags - The flags object to add the flag to
+/// @arg flags - The type object to set
/// @arg name - The name of the operand
-static int X86FlagFromOpName(FlagsConstantEmitter *flags,
+static int X86TypeFromOpName(LiteralConstantEmitter *type,
const std::string &name) {
REG("GR8");
REG("GR8_NOREX");
REG("GR16");
REG("GR32");
REG("GR32_NOREX");
+ REG("GR32_TC");
REG("FR32");
REG("RFP32");
REG("GR64");
+ REG("GR64_TC");
REG("FR64");
REG("VR64");
REG("RFP64");
REG("RFP80");
REG("VR128");
+ REG("VR256");
REG("RST");
REG("SEGMENT_REG");
REG("DEBUG_REG");
- REG("CONTROL_REG_32");
- REG("CONTROL_REG_64");
+ REG("CONTROL_REG");
+ IMM("i8imm");
+ IMM("i16imm");
+ IMM("i16i8imm");
+ IMM("i32imm");
+ IMM("i32i8imm");
+ IMM("i64imm");
+ IMM("i64i8imm");
+ IMM("i64i32imm");
+ IMM("SSECC");
+
+ // all R, I, R, I, R
MEM("i8mem");
MEM("i8mem_NOREX");
MEM("i16mem");
MEM("i32mem");
+ MEM("i32mem_TC");
MEM("f32mem");
MEM("ssmem");
MEM("opaque32mem");
MEM("opaque48mem");
MEM("i64mem");
+ MEM("i64mem_TC");
MEM("f64mem");
MEM("sdmem");
MEM("f80mem");
MEM("opaque80mem");
MEM("i128mem");
+ MEM("i256mem");
MEM("f128mem");
+ MEM("f256mem");
MEM("opaque512mem");
+ // all R, I, R, I
LEA("lea32mem");
LEA("lea64_32mem");
LEA("lea64mem");
- IMM("i8imm");
- IMM("i16imm");
- IMM("i16i8imm");
- IMM("i32imm");
- IMM("i32imm_pcrel");
- IMM("i32i8imm");
- IMM("i64imm");
- IMM("i64i8imm");
- IMM("i64i32imm");
- IMM("i64i32imm_pcrel");
- IMM("SSECC");
-
+ // all I
+ PCR("i16imm_pcrel");
+ PCR("i32imm_pcrel");
+ PCR("i64i32imm_pcrel");
PCR("brtarget8");
PCR("offset8");
PCR("offset16");
@@ -328,7 +331,8 @@ static int X86FlagFromOpName(FlagsConstantEmitter *flags,
#undef LEA
#undef IMM
#undef PCR
-#undef ADDFLAG
+
+#undef SET
/// X86PopulateOperands - Handles all the operands in an X86 instruction, adding
/// the appropriate flags to their descriptors
@@ -336,7 +340,7 @@ static int X86FlagFromOpName(FlagsConstantEmitter *flags,
/// @operandFlags - A reference the array of operand flag objects
/// @inst - The instruction to use as a source of information
static void X86PopulateOperands(
- FlagsConstantEmitter *(&operandFlags)[MAX_OPERANDS],
+ LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS],
const CodeGenInstruction &inst) {
if (!inst.TheDef->isSubClassOf("X86Inst"))
return;
@@ -349,7 +353,7 @@ static void X86PopulateOperands(
inst.OperandList[index];
Record &rec = *operandInfo.Rec;
- if (X86FlagFromOpName(operandFlags[index], rec.getName())) {
+ if (X86TypeFromOpName(operandTypes[index], rec.getName())) {
errs() << "Operand type: " << rec.getName().c_str() << "\n";
errs() << "Operand name: " << operandInfo.Name.c_str() << "\n";
errs() << "Instruction mame: " << inst.TheDef->getName().c_str() << "\n";
@@ -365,10 +369,11 @@ static void X86PopulateOperands(
/// between names and operand indices
/// @opName - The name of the operand
/// @flag - The name of the flag to add
-static inline void decorate1(FlagsConstantEmitter *(&operandFlags)[MAX_OPERANDS],
- const CodeGenInstruction &inst,
- const char *opName,
- const char *opFlag) {
+static inline void decorate1(
+ FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS],
+ const CodeGenInstruction &inst,
+ const char *opName,
+ const char *opFlag) {
unsigned opIndex;
opIndex = inst.getOperandNamed(std::string(opName));
@@ -378,78 +383,70 @@ static inline void decorate1(FlagsConstantEmitter *(&operandFlags)[MAX_OPERANDS]
#define DECORATE1(opName, opFlag) decorate1(operandFlags, inst, opName, opFlag)
-#define MOV(source, target) { \
- instFlags.addEntry("kInstructionFlagMove"); \
- DECORATE1(source, "kOperandFlagSource"); \
- DECORATE1(target, "kOperandFlagTarget"); \
+#define MOV(source, target) { \
+ instType.set("kInstructionTypeMove"); \
+ DECORATE1(source, "kOperandFlagSource"); \
+ DECORATE1(target, "kOperandFlagTarget"); \
}
-#define BRANCH(target) { \
- instFlags.addEntry("kInstructionFlagBranch"); \
- DECORATE1(target, "kOperandFlagTarget"); \
+#define BRANCH(target) { \
+ instType.set("kInstructionTypeBranch"); \
+ DECORATE1(target, "kOperandFlagTarget"); \
}
-#define PUSH(source) { \
- instFlags.addEntry("kInstructionFlagPush"); \
- DECORATE1(source, "kOperandFlagSource"); \
+#define PUSH(source) { \
+ instType.set("kInstructionTypePush"); \
+ DECORATE1(source, "kOperandFlagSource"); \
}
-#define POP(target) { \
- instFlags.addEntry("kInstructionFlagPop"); \
- DECORATE1(target, "kOperandFlagTarget"); \
+#define POP(target) { \
+ instType.set("kInstructionTypePop"); \
+ DECORATE1(target, "kOperandFlagTarget"); \
}
-#define CALL(target) { \
- instFlags.addEntry("kInstructionFlagCall"); \
- DECORATE1(target, "kOperandFlagTarget"); \
+#define CALL(target) { \
+ instType.set("kInstructionTypeCall"); \
+ DECORATE1(target, "kOperandFlagTarget"); \
}
-#define RETURN() { \
- instFlags.addEntry("kInstructionFlagReturn"); \
+#define RETURN() { \
+ instType.set("kInstructionTypeReturn"); \
}
/// X86ExtractSemantics - Performs various checks on the name of an X86
/// instruction to determine what sort of an instruction it is and then adds
/// the appropriate flags to the instruction and its operands
///
-/// @arg instFlags - A reference to the flags for the instruction as a whole
+/// @arg instType - A reference to the type for the instruction as a whole
/// @arg operandFlags - A reference to the array of operand flag object pointers
/// @arg inst - A reference to the original instruction
-static void X86ExtractSemantics(FlagsConstantEmitter &instFlags,
- FlagsConstantEmitter *(&operandFlags)[MAX_OPERANDS],
- const CodeGenInstruction &inst) {
+static void X86ExtractSemantics(
+ LiteralConstantEmitter &instType,
+ FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS],
+ const CodeGenInstruction &inst) {
const std::string &name = inst.TheDef->getName();
if (name.find("MOV") != name.npos) {
if (name.find("MOV_V") != name.npos) {
// ignore (this is a pseudoinstruction)
- }
- else if (name.find("MASK") != name.npos) {
+ } else if (name.find("MASK") != name.npos) {
// ignore (this is a masking move)
- }
- else if (name.find("r0") != name.npos) {
+ } else if (name.find("r0") != name.npos) {
// ignore (this is a pseudoinstruction)
- }
- else if (name.find("PS") != name.npos ||
+ } else if (name.find("PS") != name.npos ||
name.find("PD") != name.npos) {
// ignore (this is a shuffling move)
- }
- else if (name.find("MOVS") != name.npos) {
+ } else if (name.find("MOVS") != name.npos) {
// ignore (this is a string move)
- }
- else if (name.find("_F") != name.npos) {
+ } else if (name.find("_F") != name.npos) {
// TODO handle _F moves to ST(0)
- }
- else if (name.find("a") != name.npos) {
+ } else if (name.find("a") != name.npos) {
// TODO handle moves to/from %ax
- }
- else if (name.find("CMOV") != name.npos) {
+ } else if (name.find("CMOV") != name.npos) {
MOV("src2", "dst");
- }
- else if (name.find("PC") != name.npos) {
+ } else if (name.find("PC") != name.npos) {
MOV("label", "reg")
- }
- else {
+ } else {
MOV("src", "dst");
}
}
@@ -458,8 +455,7 @@ static void X86ExtractSemantics(FlagsConstantEmitter &instFlags,
name.find("J") == 0) {
if (name.find("FAR") != name.npos && name.find("i") != name.npos) {
BRANCH("off");
- }
- else {
+ } else {
BRANCH("dst");
}
}
@@ -467,19 +463,17 @@ static void X86ExtractSemantics(FlagsConstantEmitter &instFlags,
if (name.find("PUSH") != name.npos) {
if (name.find("FS") != name.npos ||
name.find("GS") != name.npos) {
- instFlags.addEntry("kInstructionFlagPush");
+ instType.set("kInstructionTypePush");
// TODO add support for fixed operands
- }
- else if (name.find("F") != name.npos) {
+ } else if (name.find("F") != name.npos) {
// ignore (this pushes onto the FP stack)
- }
- else if (name[name.length() - 1] == 'm') {
+ } else if (name.find("A") != name.npos) {
+ // ignore (pushes all GP registoers onto the stack)
+ } else if (name[name.length() - 1] == 'm') {
PUSH("src");
- }
- else if (name.find("i") != name.npos) {
+ } else if (name.find("i") != name.npos) {
PUSH("imm");
- }
- else {
+ } else {
PUSH("reg");
}
}
@@ -487,19 +481,17 @@ static void X86ExtractSemantics(FlagsConstantEmitter &instFlags,
if (name.find("POP") != name.npos) {
if (name.find("POPCNT") != name.npos) {
// ignore (not a real pop)
- }
- else if (name.find("FS") != name.npos ||
+ } else if (name.find("FS") != name.npos ||
name.find("GS") != name.npos) {
- instFlags.addEntry("kInstructionFlagPop");
+ instType.set("kInstructionTypePop");
// TODO add support for fixed operands
- }
- else if (name.find("F") != name.npos) {
+ } else if (name.find("F") != name.npos) {
// ignore (this pops from the FP stack)
- }
- else if (name[name.length() - 1] == 'm') {
+ } else if (name.find("A") != name.npos) {
+ // ignore (pushes all GP registoers onto the stack)
+ } else if (name[name.length() - 1] == 'm') {
POP("dst");
- }
- else {
+ } else {
POP("reg");
}
}
@@ -507,17 +499,13 @@ static void X86ExtractSemantics(FlagsConstantEmitter &instFlags,
if (name.find("CALL") != name.npos) {
if (name.find("ADJ") != name.npos) {
// ignore (not a call)
- }
- else if (name.find("SYSCALL") != name.npos) {
+ } else if (name.find("SYSCALL") != name.npos) {
// ignore (doesn't go anywhere we know about)
- }
- else if (name.find("VMCALL") != name.npos) {
+ } else if (name.find("VMCALL") != name.npos) {
// ignore (rather different semantics than a regular call)
- }
- else if (name.find("FAR") != name.npos && name.find("i") != name.npos) {
+ } else if (name.find("FAR") != name.npos && name.find("i") != name.npos) {
CALL("off");
- }
- else {
+ } else {
CALL("dst");
}
}
@@ -534,9 +522,185 @@ static void X86ExtractSemantics(FlagsConstantEmitter &instFlags,
#undef CALL
#undef RETURN
-#undef COND_DECORATE_2
-#undef COND_DECORATE_1
-#undef DECORATE1
+/////////////////////////////////////////////////////
+// Support functions for handling ARM instructions //
+/////////////////////////////////////////////////////
+
+#define SET(flag) { type->set(flag); return 0; }
+
+#define REG(str) if (name == str) SET("kOperandTypeRegister");
+#define IMM(str) if (name == str) SET("kOperandTypeImmediate");
+
+#define MISC(str, type) if (name == str) SET(type);
+
+/// ARMFlagFromOpName - Processes the name of a single ARM operand (which is
+/// actually its type) and translates it into an operand type
+///
+/// @arg type - The type object to set
+/// @arg name - The name of the operand
+static int ARMFlagFromOpName(LiteralConstantEmitter *type,
+ const std::string &name) {
+ REG("GPR");
+ REG("rGPR");
+ REG("tcGPR");
+ REG("cc_out");
+ REG("s_cc_out");
+ REG("tGPR");
+ REG("DPR");
+ REG("DPR_VFP2");
+ REG("DPR_8");
+ REG("SPR");
+ REG("QPR");
+ REG("QQPR");
+ REG("QQQQPR");
+
+ IMM("i32imm");
+ IMM("bf_inv_mask_imm");
+ IMM("jtblock_operand");
+ IMM("nohash_imm");
+ IMM("cpinst_operand");
+ IMM("cps_opt");
+ IMM("vfp_f64imm");
+ IMM("vfp_f32imm");
+ IMM("memb_opt");
+ IMM("msr_mask");
+ IMM("neg_zero");
+ IMM("imm0_31");
+ IMM("nModImm");
+ IMM("imm0_4095");
+ IMM("jt2block_operand");
+ IMM("t_imm_s4");
+ IMM("pclabel");
+ IMM("shift_imm");
+
+ MISC("brtarget", "kOperandTypeARMBranchTarget"); // ?
+ MISC("so_reg", "kOperandTypeARMSoReg"); // R, R, I
+ MISC("t2_so_reg", "kOperandTypeThumb2SoReg"); // R, I
+ MISC("so_imm", "kOperandTypeARMSoImm"); // I
+ MISC("t2_so_imm", "kOperandTypeThumb2SoImm"); // I
+ MISC("so_imm2part", "kOperandTypeARMSoImm2Part"); // I
+ MISC("pred", "kOperandTypeARMPredicate"); // I, R
+ MISC("it_pred", "kOperandTypeARMPredicate"); // I
+ MISC("addrmode2", "kOperandTypeARMAddrMode2"); // R, R, I
+ MISC("am2offset", "kOperandTypeARMAddrMode2Offset"); // R, I
+ MISC("addrmode3", "kOperandTypeARMAddrMode3"); // R, R, I
+ MISC("am3offset", "kOperandTypeARMAddrMode3Offset"); // R, I
+ MISC("addrmode4", "kOperandTypeARMAddrMode4"); // R, I
+ MISC("addrmode5", "kOperandTypeARMAddrMode5"); // R, I
+ MISC("addrmode6", "kOperandTypeARMAddrMode6"); // R, R, I, I
+ MISC("am6offset", "kOperandTypeARMAddrMode6Offset"); // R, I, I
+ MISC("addrmodepc", "kOperandTypeARMAddrModePC"); // R, I
+ MISC("reglist", "kOperandTypeARMRegisterList"); // I, R, ...
+ MISC("it_mask", "kOperandTypeThumbITMask"); // I
+ MISC("t2addrmode_imm8", "kOperandTypeThumb2AddrModeImm8"); // R, I
+ MISC("t2am_imm8_offset", "kOperandTypeThumb2AddrModeImm8Offset");//I
+ MISC("t2addrmode_imm12", "kOperandTypeThumb2AddrModeImm12"); // R, I
+ MISC("t2addrmode_so_reg", "kOperandTypeThumb2AddrModeSoReg"); // R, R, I
+ MISC("t2addrmode_imm8s4", "kOperandTypeThumb2AddrModeImm8s4"); // R, I
+ MISC("t2am_imm8s4_offset", "kOperandTypeThumb2AddrModeImm8s4Offset");
+ // R, I
+ MISC("tb_addrmode", "kOperandTypeARMTBAddrMode"); // I
+ MISC("t_addrmode_s1", "kOperandTypeThumbAddrModeS1"); // R, I, R
+ MISC("t_addrmode_s2", "kOperandTypeThumbAddrModeS2"); // R, I, R
+ MISC("t_addrmode_s4", "kOperandTypeThumbAddrModeS4"); // R, I, R
+ MISC("t_addrmode_rr", "kOperandTypeThumbAddrModeRR"); // R, R
+ MISC("t_addrmode_sp", "kOperandTypeThumbAddrModeSP"); // R, I
+
+ return 1;
+}
+
+#undef SOREG
+#undef SOIMM
+#undef PRED
+#undef REG
+#undef MEM
+#undef LEA
+#undef IMM
+#undef PCR
+
+#undef SET
+
+/// ARMPopulateOperands - Handles all the operands in an ARM instruction, adding
+/// the appropriate flags to their descriptors
+///
+/// @operandFlags - A reference the array of operand flag objects
+/// @inst - The instruction to use as a source of information
+static void ARMPopulateOperands(
+ LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS],
+ const CodeGenInstruction &inst) {
+ if (!inst.TheDef->isSubClassOf("InstARM") &&
+ !inst.TheDef->isSubClassOf("InstThumb"))
+ return;
+
+ unsigned int index;
+ unsigned int numOperands = inst.OperandList.size();
+
+ if (numOperands > EDIS_MAX_OPERANDS) {
+ errs() << "numOperands == " << numOperands << " > " <<
+ EDIS_MAX_OPERANDS << '\n';
+ llvm_unreachable("Too many operands");
+ }
+
+ for (index = 0; index < numOperands; ++index) {
+ const CodeGenInstruction::OperandInfo &operandInfo =
+ inst.OperandList[index];
+ Record &rec = *operandInfo.Rec;
+
+ if (ARMFlagFromOpName(operandTypes[index], rec.getName())) {
+ errs() << "Operand type: " << rec.getName() << '\n';
+ errs() << "Operand name: " << operandInfo.Name << '\n';
+ errs() << "Instruction mame: " << inst.TheDef->getName() << '\n';
+ llvm_unreachable("Unhandled type");
+ }
+ }
+}
+
+#define BRANCH(target) { \
+ instType.set("kInstructionTypeBranch"); \
+ DECORATE1(target, "kOperandFlagTarget"); \
+}
+
+/// ARMExtractSemantics - Performs various checks on the name of an ARM
+/// instruction to determine what sort of an instruction it is and then adds
+/// the appropriate flags to the instruction and its operands
+///
+/// @arg instType - A reference to the type for the instruction as a whole
+/// @arg operandTypes - A reference to the array of operand type object pointers
+/// @arg operandFlags - A reference to the array of operand flag object pointers
+/// @arg inst - A reference to the original instruction
+static void ARMExtractSemantics(
+ LiteralConstantEmitter &instType,
+ LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS],
+ FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS],
+ const CodeGenInstruction &inst) {
+ const std::string &name = inst.TheDef->getName();
+
+ if (name == "tBcc" ||
+ name == "tB" ||
+ name == "t2Bcc" ||
+ name == "Bcc" ||
+ name == "tCBZ" ||
+ name == "tCBNZ") {
+ BRANCH("target");
+ }
+
+ if (name == "tBLr9" ||
+ name == "BLr9_pred" ||
+ name == "tBLXi_r9" ||
+ name == "tBLXr_r9" ||
+ name == "BLXr9" ||
+ name == "t2BXJ" ||
+ name == "BXJ") {
+ BRANCH("func");
+
+ unsigned opIndex;
+ opIndex = inst.getOperandNamed("func");
+ if (operandTypes[opIndex]->is("kOperandTypeImmediate"))
+ operandTypes[opIndex]->set("kOperandTypeARMBranchTarget");
+ }
+}
+
+#undef BRANCH
/// populateInstInfo - Fills an array of InstInfos with information about each
/// instruction in a target
@@ -545,8 +709,8 @@ static void X86ExtractSemantics(FlagsConstantEmitter &instFlags,
/// @arg target - The CodeGenTarget to use as a source of instructions
static void populateInstInfo(CompoundConstantEmitter &infoArray,
CodeGenTarget &target) {
- std::vector<const CodeGenInstruction*> numberedInstructions;
- target.getInstructionsByEnumValue(numberedInstructions);
+ const std::vector<const CodeGenInstruction*> &numberedInstructions =
+ target.getInstructionsByEnumValue();
unsigned int index;
unsigned int numInstructions = numberedInstructions.size();
@@ -557,19 +721,29 @@ static void populateInstInfo(CompoundConstantEmitter &infoArray,
CompoundConstantEmitter *infoStruct = new CompoundConstantEmitter;
infoArray.addEntry(infoStruct);
- FlagsConstantEmitter *instFlags = new FlagsConstantEmitter;
- infoStruct->addEntry(instFlags);
+ LiteralConstantEmitter *instType = new LiteralConstantEmitter;
+ infoStruct->addEntry(instType);
LiteralConstantEmitter *numOperandsEmitter =
new LiteralConstantEmitter(inst.OperandList.size());
infoStruct->addEntry(numOperandsEmitter);
+
+ CompoundConstantEmitter *operandTypeArray = new CompoundConstantEmitter;
+ infoStruct->addEntry(operandTypeArray);
+
+ LiteralConstantEmitter *operandTypes[EDIS_MAX_OPERANDS];
CompoundConstantEmitter *operandFlagArray = new CompoundConstantEmitter;
infoStruct->addEntry(operandFlagArray);
- FlagsConstantEmitter *operandFlags[MAX_OPERANDS];
+ FlagsConstantEmitter *operandFlags[EDIS_MAX_OPERANDS];
- for (unsigned operandIndex = 0; operandIndex < MAX_OPERANDS; ++operandIndex) {
+ for (unsigned operandIndex = 0;
+ operandIndex < EDIS_MAX_OPERANDS;
+ ++operandIndex) {
+ operandTypes[operandIndex] = new LiteralConstantEmitter;
+ operandTypeArray->addEntry(operandTypes[operandIndex]);
+
operandFlags[operandIndex] = new FlagsConstantEmitter;
operandFlagArray->addEntry(operandFlags[operandIndex]);
}
@@ -577,89 +751,114 @@ static void populateInstInfo(CompoundConstantEmitter &infoArray,
unsigned numSyntaxes = 0;
if (target.getName() == "X86") {
- X86PopulateOperands(operandFlags, inst);
- X86ExtractSemantics(*instFlags, operandFlags, inst);
+ X86PopulateOperands(operandTypes, inst);
+ X86ExtractSemantics(*instType, operandFlags, inst);
numSyntaxes = 2;
}
+ else if (target.getName() == "ARM") {
+ ARMPopulateOperands(operandTypes, inst);
+ ARMExtractSemantics(*instType, operandTypes, operandFlags, inst);
+ numSyntaxes = 1;
+ }
+
+ CompoundConstantEmitter *operandOrderArray = new CompoundConstantEmitter;
- CompoundConstantEmitter *operandOrderArray = new CompoundConstantEmitter;
infoStruct->addEntry(operandOrderArray);
- for (unsigned syntaxIndex = 0; syntaxIndex < MAX_SYNTAXES; ++syntaxIndex) {
- CompoundConstantEmitter *operandOrder = new CompoundConstantEmitter;
+ for (unsigned syntaxIndex = 0;
+ syntaxIndex < EDIS_MAX_SYNTAXES;
+ ++syntaxIndex) {
+ CompoundConstantEmitter *operandOrder =
+ new CompoundConstantEmitter(EDIS_MAX_OPERANDS);
+
operandOrderArray->addEntry(operandOrder);
if (syntaxIndex < numSyntaxes) {
populateOperandOrder(operandOrder, inst, syntaxIndex);
}
- else {
- for (unsigned operandIndex = 0;
- operandIndex < MAX_OPERANDS;
- ++operandIndex) {
- operandOrder->addEntry(new LiteralConstantEmitter("-1"));
- }
- }
}
+
+ infoStruct = NULL;
}
}
-void EDEmitter::run(raw_ostream &o) {
- unsigned int i = 0;
-
- CompoundConstantEmitter infoArray;
- CodeGenTarget target;
-
- populateInstInfo(infoArray, target);
-
- o << "InstInfo instInfo" << target.getName().c_str() << "[] = ";
- infoArray.emit(o, i);
- o << ";" << "\n";
-}
-
-void EDEmitter::runHeader(raw_ostream &o) {
- EmitSourceFileHeader("Enhanced Disassembly Info Header", o);
+static void emitCommonEnums(raw_ostream &o, unsigned int &i) {
+ EnumEmitter operandTypes("OperandTypes");
+ operandTypes.addEntry("kOperandTypeNone");
+ operandTypes.addEntry("kOperandTypeImmediate");
+ operandTypes.addEntry("kOperandTypeRegister");
+ operandTypes.addEntry("kOperandTypeX86Memory");
+ operandTypes.addEntry("kOperandTypeX86EffectiveAddress");
+ operandTypes.addEntry("kOperandTypeX86PCRelative");
+ operandTypes.addEntry("kOperandTypeARMBranchTarget");
+ operandTypes.addEntry("kOperandTypeARMSoReg");
+ operandTypes.addEntry("kOperandTypeARMSoImm");
+ operandTypes.addEntry("kOperandTypeARMSoImm2Part");
+ operandTypes.addEntry("kOperandTypeARMPredicate");
+ operandTypes.addEntry("kOperandTypeARMAddrMode2");
+ operandTypes.addEntry("kOperandTypeARMAddrMode2Offset");
+ operandTypes.addEntry("kOperandTypeARMAddrMode3");
+ operandTypes.addEntry("kOperandTypeARMAddrMode3Offset");
+ operandTypes.addEntry("kOperandTypeARMAddrMode4");
+ operandTypes.addEntry("kOperandTypeARMAddrMode5");
+ operandTypes.addEntry("kOperandTypeARMAddrMode6");
+ operandTypes.addEntry("kOperandTypeARMAddrMode6Offset");
+ operandTypes.addEntry("kOperandTypeARMAddrModePC");
+ operandTypes.addEntry("kOperandTypeARMRegisterList");
+ operandTypes.addEntry("kOperandTypeARMTBAddrMode");
+ operandTypes.addEntry("kOperandTypeThumbITMask");
+ operandTypes.addEntry("kOperandTypeThumbAddrModeS1");
+ operandTypes.addEntry("kOperandTypeThumbAddrModeS2");
+ operandTypes.addEntry("kOperandTypeThumbAddrModeS4");
+ operandTypes.addEntry("kOperandTypeThumbAddrModeRR");
+ operandTypes.addEntry("kOperandTypeThumbAddrModeSP");
+ operandTypes.addEntry("kOperandTypeThumb2SoReg");
+ operandTypes.addEntry("kOperandTypeThumb2SoImm");
+ operandTypes.addEntry("kOperandTypeThumb2AddrModeImm8");
+ operandTypes.addEntry("kOperandTypeThumb2AddrModeImm8Offset");
+ operandTypes.addEntry("kOperandTypeThumb2AddrModeImm12");
+ operandTypes.addEntry("kOperandTypeThumb2AddrModeSoReg");
+ operandTypes.addEntry("kOperandTypeThumb2AddrModeImm8s4");
+ operandTypes.addEntry("kOperandTypeThumb2AddrModeImm8s4Offset");
+ operandTypes.emit(o, i);
- o << "#ifndef EDInfo_" << "\n";
- o << "#define EDInfo_" << "\n";
- o << "\n";
- o << "#include <inttypes.h>" << "\n";
o << "\n";
- o << "#define MAX_OPERANDS " << format("%d", MAX_OPERANDS) << "\n";
- o << "#define MAX_SYNTAXES " << format("%d", MAX_SYNTAXES) << "\n";
- o << "\n";
-
- unsigned int i = 0;
EnumEmitter operandFlags("OperandFlags");
- operandFlags.addEntry("kOperandFlagImmediate");
- operandFlags.addEntry("kOperandFlagRegister");
- operandFlags.addEntry("kOperandFlagMemory");
- operandFlags.addEntry("kOperandFlagEffectiveAddress");
- operandFlags.addEntry("kOperandFlagPCRelative");
operandFlags.addEntry("kOperandFlagSource");
operandFlags.addEntry("kOperandFlagTarget");
operandFlags.emitAsFlags(o, i);
o << "\n";
- EnumEmitter instructionFlags("InstructionFlags");
- instructionFlags.addEntry("kInstructionFlagMove");
- instructionFlags.addEntry("kInstructionFlagBranch");
- instructionFlags.addEntry("kInstructionFlagPush");
- instructionFlags.addEntry("kInstructionFlagPop");
- instructionFlags.addEntry("kInstructionFlagCall");
- instructionFlags.addEntry("kInstructionFlagReturn");
- instructionFlags.emitAsFlags(o, i);
+ EnumEmitter instructionTypes("InstructionTypes");
+ instructionTypes.addEntry("kInstructionTypeNone");
+ instructionTypes.addEntry("kInstructionTypeMove");
+ instructionTypes.addEntry("kInstructionTypeBranch");
+ instructionTypes.addEntry("kInstructionTypePush");
+ instructionTypes.addEntry("kInstructionTypePop");
+ instructionTypes.addEntry("kInstructionTypeCall");
+ instructionTypes.addEntry("kInstructionTypeReturn");
+ instructionTypes.emit(o, i);
o << "\n";
+}
+
+void EDEmitter::run(raw_ostream &o) {
+ unsigned int i = 0;
+
+ CompoundConstantEmitter infoArray;
+ CodeGenTarget target;
- StructEmitter instInfo("InstInfo");
- instInfo.addMember("uint32_t", "instructionFlags");
- instInfo.addMember("uint8_t", "numOperands");
- instInfo.addMember("uint8_t", "operandFlags[MAX_OPERANDS]");
- instInfo.addMember("const char", "operandOrders[MAX_SYNTAXES][MAX_OPERANDS]");
- instInfo.emit(o, i);
+ populateInstInfo(infoArray, target);
- o << "\n";
- o << "#endif" << "\n";
+ emitCommonEnums(o, i);
+
+ o << "namespace {\n";
+
+ o << "llvm::EDInstInfo instInfo" << target.getName().c_str() << "[] = ";
+ infoArray.emit(o, i);
+ o << ";" << "\n";
+
+ o << "}\n";
}
diff --git a/libclamav/c++/llvm/utils/TableGen/EDEmitter.h b/libclamav/c++/llvm/utils/TableGen/EDEmitter.h
index 9e40a8b..e30373f 100644
--- a/libclamav/c++/llvm/utils/TableGen/EDEmitter.h
+++ b/libclamav/c++/llvm/utils/TableGen/EDEmitter.h
@@ -27,9 +27,6 @@ namespace llvm {
// run - Output the instruction table.
void run(raw_ostream &o);
-
- // runHeader - Emit a header file that allows use of the instruction table.
- void runHeader(raw_ostream &o);
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/utils/TableGen/FastISelEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/FastISelEmitter.cpp
index f589bcc..6c16fcf 100644
--- a/libclamav/c++/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -31,7 +31,7 @@ namespace {
struct InstructionMemo {
std::string Name;
const CodeGenRegisterClass *RC;
- unsigned char SubRegNo;
+ std::string SubRegNo;
std::vector<std::string>* PhysRegs;
};
@@ -54,30 +54,34 @@ struct OperandsSignature {
bool initialize(TreePatternNode *InstPatNode,
const CodeGenTarget &Target,
MVT::SimpleValueType VT) {
- if (!InstPatNode->isLeaf() &&
- InstPatNode->getOperator()->getName() == "imm") {
- Operands.push_back("i");
- return true;
- }
- if (!InstPatNode->isLeaf() &&
- InstPatNode->getOperator()->getName() == "fpimm") {
- Operands.push_back("f");
- return true;
+
+ if (!InstPatNode->isLeaf()) {
+ if (InstPatNode->getOperator()->getName() == "imm") {
+ Operands.push_back("i");
+ return true;
+ }
+ if (InstPatNode->getOperator()->getName() == "fpimm") {
+ Operands.push_back("f");
+ return true;
+ }
}
const CodeGenRegisterClass *DstRC = 0;
for (unsigned i = 0, e = InstPatNode->getNumChildren(); i != e; ++i) {
TreePatternNode *Op = InstPatNode->getChild(i);
+
// For now, filter out any operand with a predicate.
- if (!Op->getPredicateFns().empty())
- return false;
// For now, filter out any operand with multiple values.
- if (Op->getExtTypes().size() != 1)
+ if (!Op->getPredicateFns().empty() ||
+ Op->getNumTypes() != 1)
return false;
+
+ assert(Op->hasTypeSet(0) && "Type infererence not done?");
// For now, all the operands must have the same type.
- if (Op->getTypeNum(0) != VT)
+ if (Op->getType(0) != VT)
return false;
+
if (!Op->isLeaf()) {
if (Op->getOperator()->getName() == "imm") {
Operands.push_back("i");
@@ -103,13 +107,15 @@ struct OperandsSignature {
RC = Target.getRegisterClassForRegister(OpLeafRec);
else
return false;
- // For now, require the register operands' register classes to all
- // be the same.
+
+ // For now, this needs to be a register class of some sort.
if (!RC)
return false;
- // For now, all the operands must have the same register class.
+
+ // For now, all the operands must have the same register class or be
+ // a strict subclass of the destination.
if (DstRC) {
- if (DstRC != RC)
+ if (DstRC != RC && !DstRC->hasSubClass(RC))
return false;
} else
DstRC = RC;
@@ -121,7 +127,7 @@ struct OperandsSignature {
void PrintParameters(raw_ostream &OS) const {
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
if (Operands[i] == "r") {
- OS << "unsigned Op" << i;
+ OS << "unsigned Op" << i << ", bool Op" << i << "IsKill";
} else if (Operands[i] == "i") {
OS << "uint64_t imm" << i;
} else if (Operands[i] == "f") {
@@ -147,7 +153,7 @@ struct OperandsSignature {
if (PrintedArg)
OS << ", ";
if (Operands[i] == "r") {
- OS << "Op" << i;
+ OS << "Op" << i << ", Op" << i << "IsKill";
PrintedArg = true;
} else if (Operands[i] == "i") {
OS << "imm" << i;
@@ -165,7 +171,7 @@ struct OperandsSignature {
void PrintArguments(raw_ostream &OS) const {
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
if (Operands[i] == "r") {
- OS << "Op" << i;
+ OS << "Op" << i << ", Op" << i << "IsKill";
} else if (Operands[i] == "i") {
OS << "imm" << i;
} else if (Operands[i] == "f") {
@@ -206,7 +212,8 @@ class FastISelMap {
typedef std::map<MVT::SimpleValueType, PredMap> RetPredMap;
typedef std::map<MVT::SimpleValueType, RetPredMap> TypeRetPredMap;
typedef std::map<std::string, TypeRetPredMap> OpcodeTypeRetPredMap;
- typedef std::map<OperandsSignature, OpcodeTypeRetPredMap> OperandsOpcodeTypeRetPredMap;
+ typedef std::map<OperandsSignature, OpcodeTypeRetPredMap>
+ OperandsOpcodeTypeRetPredMap;
OperandsOpcodeTypeRetPredMap SimplePatterns;
@@ -255,10 +262,10 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
Record *Op = Dst->getOperator();
if (!Op->isSubClassOf("Instruction"))
continue;
- CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op->getName());
+ CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op);
if (II.OperandList.empty())
continue;
-
+
// For now, ignore multi-instruction patterns.
bool MultiInsts = false;
for (unsigned i = 0, e = Dst->getNumChildren(); i != e; ++i) {
@@ -276,7 +283,7 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
// For now, ignore instructions where the first operand is not an
// output register.
const CodeGenRegisterClass *DstRC = 0;
- unsigned SubRegNo = ~0;
+ std::string SubRegNo;
if (Op->getName() != "EXTRACT_SUBREG") {
Record *Op0Rec = II.OperandList[0].Rec;
if (!Op0Rec->isSubClassOf("RegisterClass"))
@@ -285,8 +292,15 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
if (!DstRC)
continue;
} else {
- SubRegNo = static_cast<IntInit*>(
- Dst->getChild(1)->getLeafValue())->getValue();
+ // If this isn't a leaf, then continue since the register classes are
+ // a bit too complicated for now.
+ if (!Dst->getChild(1)->isLeaf()) continue;
+
+ DefInit *SR = dynamic_cast<DefInit*>(Dst->getChild(1)->getLeafValue());
+ if (SR)
+ SubRegNo = getQualifiedName(SR->getDef());
+ else
+ SubRegNo = Dst->getChild(1)->getLeafValue()->getAsString();
}
// Inspect the pattern.
@@ -294,12 +308,18 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
if (!InstPatNode) continue;
if (InstPatNode->isLeaf()) continue;
+ // Ignore multiple result nodes for now.
+ if (InstPatNode->getNumTypes() > 1) continue;
+
Record *InstPatOp = InstPatNode->getOperator();
std::string OpcodeName = getOpcodeName(InstPatOp, CGP);
- MVT::SimpleValueType RetVT = InstPatNode->getTypeNum(0);
+ MVT::SimpleValueType RetVT = MVT::isVoid;
+ if (InstPatNode->getNumTypes()) RetVT = InstPatNode->getType(0);
MVT::SimpleValueType VT = RetVT;
- if (InstPatNode->getNumChildren())
- VT = InstPatNode->getChild(0)->getTypeNum(0);
+ if (InstPatNode->getNumChildren()) {
+ assert(InstPatNode->getChild(0)->getNumTypes() == 1);
+ VT = InstPatNode->getChild(0)->getType(0);
+ }
// For now, filter out instructions which just set a register to
// an Operand or an immediate, like MOV32ri.
@@ -360,7 +380,8 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
SubRegNo,
PhysRegInputs
};
- assert(!SimplePatterns[Operands][OpcodeName][VT][RetVT].count(PredicateCheck) &&
+ assert(!SimplePatterns[Operands][OpcodeName][VT][RetVT]
+ .count(PredicateCheck) &&
"Duplicate pattern!");
SimplePatterns[Operands][OpcodeName][VT][RetVT][PredicateCheck] = Memo;
}
@@ -421,15 +442,13 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
if ((*Memo.PhysRegs)[i] != "")
- OS << " TII.copyRegToReg(*MBB, MBB->end(), "
- << (*Memo.PhysRegs)[i] << ", Op" << i << ", "
- << "TM.getRegisterInfo()->getPhysicalRegisterRegClass("
- << (*Memo.PhysRegs)[i] << "), "
- << "MRI.getRegClass(Op" << i << "));\n";
+ OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, "
+ << "TII.get(TargetOpcode::COPY), "
+ << (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
}
OS << " return FastEmitInst_";
- if (Memo.SubRegNo == (unsigned char)~0) {
+ if (Memo.SubRegNo.empty()) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs);
OS << "(" << InstNS << Memo.Name << ", ";
OS << InstNS << Memo.RC->getName() << "RegisterClass";
@@ -439,8 +458,8 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
OS << ");\n";
} else {
OS << "extractsubreg(" << getName(RetVT);
- OS << ", Op0, ";
- OS << (unsigned)Memo.SubRegNo;
+ OS << ", Op0, Op0IsKill, ";
+ OS << Memo.SubRegNo;
OS << ");\n";
}
@@ -513,18 +532,16 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
HasPred = true;
}
- for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
- if ((*Memo.PhysRegs)[i] != "")
- OS << " TII.copyRegToReg(*MBB, MBB->end(), "
- << (*Memo.PhysRegs)[i] << ", Op" << i << ", "
- << "TM.getRegisterInfo()->getPhysicalRegisterRegClass("
- << (*Memo.PhysRegs)[i] << "), "
- << "MRI.getRegClass(Op" << i << "));\n";
- }
+ for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
+ if ((*Memo.PhysRegs)[i] != "")
+ OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, "
+ << "TII.get(TargetOpcode::COPY), "
+ << (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
+ }
OS << " return FastEmitInst_";
- if (Memo.SubRegNo == (unsigned char)~0) {
+ if (Memo.SubRegNo.empty()) {
Operands.PrintManglingSuffix(OS, *Memo.PhysRegs);
OS << "(" << InstNS << Memo.Name << ", ";
OS << InstNS << Memo.RC->getName() << "RegisterClass";
@@ -533,8 +550,8 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
Operands.PrintArguments(OS, *Memo.PhysRegs);
OS << ");\n";
} else {
- OS << "extractsubreg(RetVT, Op0, ";
- OS << (unsigned)Memo.SubRegNo;
+ OS << "extractsubreg(RetVT, Op0, Op0IsKill, ";
+ OS << Memo.SubRegNo;
OS << ");\n";
}
diff --git a/libclamav/c++/llvm/utils/TableGen/InstrEnumEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/InstrEnumEmitter.cpp
index d1e7f3d..47a8474 100644
--- a/libclamav/c++/llvm/utils/TableGen/InstrEnumEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/InstrEnumEmitter.cpp
@@ -26,22 +26,15 @@ void InstrEnumEmitter::run(raw_ostream &OS) {
CodeGenTarget Target;
// We must emit the PHI opcode first...
- std::string Namespace;
- for (CodeGenTarget::inst_iterator II = Target.inst_begin(),
- E = Target.inst_end(); II != E; ++II) {
- if (II->second.Namespace != "TargetOpcode") {
- Namespace = II->second.Namespace;
- break;
- }
- }
+ std::string Namespace = Target.getInstNamespace();
if (Namespace.empty()) {
fprintf(stderr, "No instructions defined!\n");
exit(1);
}
- std::vector<const CodeGenInstruction*> NumberedInstructions;
- Target.getInstructionsByEnumValue(NumberedInstructions);
+ const std::vector<const CodeGenInstruction*> &NumberedInstructions =
+ Target.getInstructionsByEnumValue();
OS << "namespace " << Namespace << " {\n";
OS << " enum {\n";
diff --git a/libclamav/c++/llvm/utils/TableGen/InstrInfoEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/InstrInfoEmitter.cpp
index 898c92a..4d3aa5e 100644
--- a/libclamav/c++/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -39,16 +39,10 @@ static void PrintBarriers(std::vector<Record*> &Barriers,
// Instruction Itinerary Information.
//===----------------------------------------------------------------------===//
-struct RecordNameComparator {
- bool operator()(const Record *Rec1, const Record *Rec2) const {
- return Rec1->getName() < Rec2->getName();
- }
-};
-
void InstrInfoEmitter::GatherItinClasses() {
std::vector<Record*> DefList =
Records.getAllDerivedDefinitions("InstrItinClass");
- std::sort(DefList.begin(), DefList.end(), RecordNameComparator());
+ std::sort(DefList.begin(), DefList.end(), LessRecord());
for (unsigned i = 0, N = DefList.size(); i < N; i++)
ItinClassMap[DefList[i]->getName()] = i;
@@ -98,7 +92,8 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
else if (OpR->isSubClassOf("PointerLikeRegClass"))
Res += utostr(OpR->getValueAsInt("RegClassKind")) + ", ";
else
- Res += "0, ";
+ // -1 means the operand does not have a fixed register class.
+ Res += "-1, ";
// Fill in applicable flags.
Res += "0";
@@ -149,7 +144,7 @@ void InstrInfoEmitter::EmitOperandInfo(raw_ostream &OS,
const CodeGenTarget &Target = CDP.getTargetInfo();
for (CodeGenTarget::inst_iterator II = Target.inst_begin(),
E = Target.inst_end(); II != E; ++II) {
- std::vector<std::string> OperandInfo = GetOperandInfo(II->second);
+ std::vector<std::string> OperandInfo = GetOperandInfo(**II);
unsigned &N = OperandInfoIDs[OperandInfo];
if (N != 0) continue;
@@ -214,7 +209,7 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
// Emit all of the instruction's implicit uses and defs.
for (CodeGenTarget::inst_iterator II = Target.inst_begin(),
E = Target.inst_end(); II != E; ++II) {
- Record *Inst = II->second.TheDef;
+ Record *Inst = (*II)->TheDef;
std::vector<Record*> Uses = Inst->getValueAsListOfDefs("Uses");
if (!Uses.empty()) {
unsigned &IL = EmittedLists[Uses];
@@ -244,8 +239,8 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
//
OS << "\nstatic const TargetInstrDesc " << TargetName
<< "Insts[] = {\n";
- std::vector<const CodeGenInstruction*> NumberedInstructions;
- Target.getInstructionsByEnumValue(NumberedInstructions);
+ const std::vector<const CodeGenInstruction*> &NumberedInstructions =
+ Target.getInstructionsByEnumValue();
for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i)
emitRecord(*NumberedInstructions[i], i, InstrInfo, EmittedLists,
@@ -275,6 +270,7 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
if (Inst.isReturn) OS << "|(1<<TID::Return)";
if (Inst.isBranch) OS << "|(1<<TID::Branch)";
if (Inst.isIndirectBranch) OS << "|(1<<TID::IndirectBranch)";
+ if (Inst.isCompare) OS << "|(1<<TID::Compare)";
if (Inst.isBarrier) OS << "|(1<<TID::Barrier)";
if (Inst.hasDelaySlot) OS << "|(1<<TID::DelaySlot)";
if (Inst.isCall) OS << "|(1<<TID::Call)";
@@ -294,20 +290,20 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
if (Inst.isAsCheapAsAMove) OS << "|(1<<TID::CheapAsAMove)";
if (Inst.hasExtraSrcRegAllocReq) OS << "|(1<<TID::ExtraSrcRegAllocReq)";
if (Inst.hasExtraDefRegAllocReq) OS << "|(1<<TID::ExtraDefRegAllocReq)";
- OS << ", 0";
// Emit all of the target-specific flags...
- ListInit *LI = InstrInfo->getValueAsListInit("TSFlagsFields");
- ListInit *Shift = InstrInfo->getValueAsListInit("TSFlagsShifts");
- if (LI->getSize() != Shift->getSize())
- throw "Lengths of " + InstrInfo->getName() +
- ":(TargetInfoFields, TargetInfoPositions) must be equal!";
-
- for (unsigned i = 0, e = LI->getSize(); i != e; ++i)
- emitShiftedValue(Inst.TheDef, dynamic_cast<StringInit*>(LI->getElement(i)),
- dynamic_cast<IntInit*>(Shift->getElement(i)), OS);
-
- OS << ", ";
+ BitsInit *TSF = Inst.TheDef->getValueAsBitsInit("TSFlags");
+ if (!TSF) throw "no TSFlags?";
+ uint64_t Value = 0;
+ for (unsigned i = 0, e = TSF->getNumBits(); i != e; ++i) {
+ if (BitInit *Bit = dynamic_cast<BitInit*>(TSF->getBit(i)))
+ Value |= uint64_t(Bit->getValue()) << i;
+ else
+ throw "Invalid TSFlags bit in " + Inst.TheDef->getName();
+ }
+ OS << ", 0x";
+ OS.write_hex(Value);
+ OS << "ULL, ";
// Emit the implicit uses and defs lists...
std::vector<Record*> UseList = Inst.TheDef->getValueAsListOfDefs("Uses");
@@ -334,66 +330,6 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
OS << "0";
else
OS << "OperandInfo" << OpInfo.find(OperandInfo)->second;
-
- OS << " }, // Inst #" << Num << " = " << Inst.TheDef->getName() << "\n";
-}
-
-void InstrInfoEmitter::emitShiftedValue(Record *R, StringInit *Val,
- IntInit *ShiftInt, raw_ostream &OS) {
- if (Val == 0 || ShiftInt == 0)
- throw std::string("Illegal value or shift amount in TargetInfo*!");
- RecordVal *RV = R->getValue(Val->getValue());
- int Shift = ShiftInt->getValue();
-
- if (RV == 0 || RV->getValue() == 0) {
- // This isn't an error if this is a builtin instruction.
- if (R->getName() != "PHI" &&
- R->getName() != "INLINEASM" &&
- R->getName() != "DBG_LABEL" &&
- R->getName() != "EH_LABEL" &&
- R->getName() != "GC_LABEL" &&
- R->getName() != "KILL" &&
- R->getName() != "EXTRACT_SUBREG" &&
- R->getName() != "INSERT_SUBREG" &&
- R->getName() != "IMPLICIT_DEF" &&
- R->getName() != "SUBREG_TO_REG" &&
- R->getName() != "COPY_TO_REGCLASS" &&
- R->getName() != "DBG_VALUE")
- throw R->getName() + " doesn't have a field named '" +
- Val->getValue() + "'!";
- return;
- }
-
- Init *Value = RV->getValue();
- if (BitInit *BI = dynamic_cast<BitInit*>(Value)) {
- if (BI->getValue()) OS << "|(1<<" << Shift << ")";
- return;
- } else if (BitsInit *BI = dynamic_cast<BitsInit*>(Value)) {
- // Convert the Bits to an integer to print...
- Init *I = BI->convertInitializerTo(new IntRecTy());
- if (I)
- if (IntInit *II = dynamic_cast<IntInit*>(I)) {
- if (II->getValue()) {
- if (Shift)
- OS << "|(" << II->getValue() << "<<" << Shift << ")";
- else
- OS << "|" << II->getValue();
- }
- return;
- }
-
- } else if (IntInit *II = dynamic_cast<IntInit*>(Value)) {
- if (II->getValue()) {
- if (Shift)
- OS << "|(" << II->getValue() << "<<" << Shift << ")";
- else
- OS << II->getValue();
- }
- return;
- }
-
- errs() << "Unhandled initializer: " << *Val << "\n";
- throw "In record '" + R->getName() + "' for TSFlag emission.";
+ OS << " }, // Inst #" << Num << " = " << Inst.TheDef->getName() << "\n";
}
-
diff --git a/libclamav/c++/llvm/utils/TableGen/InstrInfoEmitter.h b/libclamav/c++/llvm/utils/TableGen/InstrInfoEmitter.h
index 657939e..abb1c6b 100644
--- a/libclamav/c++/llvm/utils/TableGen/InstrInfoEmitter.h
+++ b/libclamav/c++/llvm/utils/TableGen/InstrInfoEmitter.h
@@ -47,8 +47,6 @@ private:
std::map<Record*, unsigned> &BM,
const OperandInfoMapTy &OpInfo,
raw_ostream &OS);
- void emitShiftedValue(Record *R, StringInit *Val, IntInit *Shift,
- raw_ostream &OS);
// Itinerary information.
void GatherItinClasses();
diff --git a/libclamav/c++/llvm/utils/TableGen/IntrinsicEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/IntrinsicEmitter.cpp
index c5df9e4..ba30d97 100644
--- a/libclamav/c++/llvm/utils/TableGen/IntrinsicEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/IntrinsicEmitter.cpp
@@ -30,6 +30,8 @@ void IntrinsicEmitter::run(raw_ostream &OS) {
if (TargetOnly && !Ints.empty())
TargetPrefix = Ints[0].TargetPrefix;
+ EmitPrefix(OS);
+
// Emit the enum information.
EmitEnumInfo(Ints, OS);
@@ -59,6 +61,23 @@ void IntrinsicEmitter::run(raw_ostream &OS) {
// Emit code to translate GCC builtins into LLVM intrinsics.
EmitIntrinsicToGCCBuiltinMap(Ints, OS);
+
+ EmitSuffix(OS);
+}
+
+void IntrinsicEmitter::EmitPrefix(raw_ostream &OS) {
+ OS << "// VisualStudio defines setjmp as _setjmp\n"
+ "#if defined(_MSC_VER) && defined(setjmp)\n"
+ "#define setjmp_undefined_for_visual_studio\n"
+ "#undef setjmp\n"
+ "#endif\n\n";
+}
+
+void IntrinsicEmitter::EmitSuffix(raw_ostream &OS) {
+ OS << "#if defined(_MSC_VER) && defined(setjmp_undefined_for_visual_studio)\n"
+ "// let's return it to _setjmp state\n"
+ "#define setjmp _setjmp\n"
+ "#endif\n\n";
}
void IntrinsicEmitter::EmitEnumInfo(const std::vector<CodeGenIntrinsic> &Ints,
@@ -172,10 +191,11 @@ static void EmitTypeGenerate(raw_ostream &OS, const Record *ArgType,
static void EmitTypeGenerate(raw_ostream &OS,
const std::vector<Record*> &ArgTypes,
unsigned &ArgNo) {
- if (ArgTypes.size() == 1) {
- EmitTypeGenerate(OS, ArgTypes.front(), ArgNo);
- return;
- }
+ if (ArgTypes.empty())
+ return EmitTypeForValueType(OS, MVT::isVoid);
+
+ if (ArgTypes.size() == 1)
+ return EmitTypeGenerate(OS, ArgTypes.front(), ArgNo);
OS << "StructType::get(Context, ";
@@ -251,11 +271,11 @@ namespace {
unsigned RHSSize = RHSVec->size();
unsigned LHSSize = LHSVec->size();
- do {
+ for (; i != LHSSize; ++i) {
if (i == RHSSize) return false; // RHS is shorter than LHS.
if ((*LHSVec)[i] != (*RHSVec)[i])
return (*LHSVec)[i]->getName() < (*RHSVec)[i]->getName();
- } while (++i != LHSSize);
+ }
if (i != RHSSize) return true;
@@ -525,7 +545,7 @@ EmitModRefBehavior(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS){
OS << "switch (iid) {\n";
OS << "default:\n return UnknownModRefBehavior;\n";
for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
- if (Ints[i].ModRef == CodeGenIntrinsic::WriteMem)
+ if (Ints[i].ModRef == CodeGenIntrinsic::ReadWriteMem)
continue;
OS << "case " << TargetPrefix << "Intrinsic::" << Ints[i].EnumName
<< ":\n";
@@ -539,7 +559,7 @@ EmitModRefBehavior(const std::vector<CodeGenIntrinsic> &Ints, raw_ostream &OS){
case CodeGenIntrinsic::ReadMem:
OS << " return OnlyReadsMemory;\n";
break;
- case CodeGenIntrinsic::WriteArgMem:
+ case CodeGenIntrinsic::ReadWriteArgMem:
OS << " return AccessesArguments;\n";
break;
}
diff --git a/libclamav/c++/llvm/utils/TableGen/IntrinsicEmitter.h b/libclamav/c++/llvm/utils/TableGen/IntrinsicEmitter.h
index c3c92bc..b1efecb 100644
--- a/libclamav/c++/llvm/utils/TableGen/IntrinsicEmitter.h
+++ b/libclamav/c++/llvm/utils/TableGen/IntrinsicEmitter.h
@@ -28,6 +28,8 @@ namespace llvm {
: Records(R), TargetOnly(T) {}
void run(raw_ostream &OS);
+
+ void EmitPrefix(raw_ostream &OS);
void EmitEnumInfo(const std::vector<CodeGenIntrinsic> &Ints,
raw_ostream &OS);
@@ -50,6 +52,7 @@ namespace llvm {
raw_ostream &OS);
void EmitIntrinsicToGCCBuiltinMap(const std::vector<CodeGenIntrinsic> &Ints,
raw_ostream &OS);
+ void EmitSuffix(raw_ostream &OS);
};
} // End llvm namespace
diff --git a/libclamav/c++/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp
index da2d54f..8b81e14 100644
--- a/libclamav/c++/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/LLVMCConfigurationEmitter.cpp
@@ -33,6 +33,7 @@ namespace {
/// Typedefs
typedef std::vector<Record*> RecordVector;
+typedef std::vector<const DagInit*> DagVector;
typedef std::vector<std::string> StrVector;
//===----------------------------------------------------------------------===//
@@ -49,7 +50,7 @@ const unsigned Indent4 = TabWidth*4;
const char * const DefaultHelpString = "NO HELP MESSAGE PROVIDED";
// Name for the "sink" option.
-const char * const SinkOptionName = "AutoGeneratedSinkOption";
+const char * const SinkOptionName = "SinkOption";
//===----------------------------------------------------------------------===//
/// Helper functions
@@ -109,11 +110,6 @@ void CheckNumberOfArguments (const DagInit& d, unsigned minArgs) {
throw GetOperatorName(d) + ": too few arguments!";
}
-// IsDagEmpty - is this DAG marked with an empty marker?
-bool IsDagEmpty (const DagInit& d) {
- return GetOperatorName(d) == "empty_dag_marker";
-}
-
// EscapeVariableName - Escape commas and other symbols not allowed
// in the C++ variable names. Makes it possible to use options named
// like "Wa," (useful for prefix options).
@@ -188,21 +184,25 @@ void apply(F Fun, T0& Arg0, T1& Arg1) {
/// documentation for detailed description of differences.
namespace OptionType {
- enum OptionType { Alias, Switch, Parameter, ParameterList,
- Prefix, PrefixList};
+ enum OptionType { Alias, Switch, SwitchList,
+ Parameter, ParameterList, Prefix, PrefixList };
bool IsAlias(OptionType t) {
return (t == Alias);
}
bool IsList (OptionType t) {
- return (t == ParameterList || t == PrefixList);
+ return (t == SwitchList || t == ParameterList || t == PrefixList);
}
bool IsSwitch (OptionType t) {
return (t == Switch);
}
+ bool IsSwitchList (OptionType t) {
+ return (t == SwitchList);
+ }
+
bool IsParameter (OptionType t) {
return (t == Parameter || t == Prefix);
}
@@ -214,6 +214,8 @@ OptionType::OptionType stringToOptionType(const std::string& T) {
return OptionType::Alias;
else if (T == "switch_option")
return OptionType::Switch;
+ else if (T == "switch_list_option")
+ return OptionType::SwitchList;
else if (T == "parameter_option")
return OptionType::Parameter;
else if (T == "parameter_list_option")
@@ -228,10 +230,9 @@ OptionType::OptionType stringToOptionType(const std::string& T) {
namespace OptionDescriptionFlags {
enum OptionDescriptionFlags { Required = 0x1, Hidden = 0x2,
- ReallyHidden = 0x4, Extern = 0x8,
- OneOrMore = 0x10, Optional = 0x20,
- CommaSeparated = 0x40, ForwardNotSplit = 0x80,
- ZeroOrMore = 0x100 };
+ ReallyHidden = 0x4, OneOrMore = 0x8,
+ Optional = 0x10, CommaSeparated = 0x20,
+ ForwardNotSplit = 0x40, ZeroOrMore = 0x80 };
}
/// OptionDescription - Represents data contained in a single
@@ -256,7 +257,13 @@ struct OptionDescription {
/// GenVariableName - Returns the variable name used in the
/// generated C++ code.
- std::string GenVariableName() const;
+ std::string GenVariableName() const
+ { return "autogenerated::" + GenOptionType() + EscapeVariableName(Name); }
+
+ /// GenPlainVariableName - Returns the variable name without the namespace
+ /// prefix.
+ std::string GenPlainVariableName() const
+ { return GenOptionType() + EscapeVariableName(Name); }
/// Merge - Merge two option descriptions.
void Merge (const OptionDescription& other);
@@ -273,9 +280,6 @@ struct OptionDescription {
bool isCommaSeparated() const;
void setCommaSeparated();
- bool isExtern() const;
- void setExtern();
-
bool isForwardNotSplit() const;
void setForwardNotSplit();
@@ -300,12 +304,23 @@ struct OptionDescription {
bool isSwitch() const
{ return OptionType::IsSwitch(this->Type); }
+ bool isSwitchList() const
+ { return OptionType::IsSwitchList(this->Type); }
+
bool isParameter() const
{ return OptionType::IsParameter(this->Type); }
bool isList() const
{ return OptionType::IsList(this->Type); }
+ bool isParameterList() const
+ { return (OptionType::IsList(this->Type)
+ && !OptionType::IsSwitchList(this->Type)); }
+
+private:
+
+ // GenOptionType - Helper function used by GenVariableName().
+ std::string GenOptionType() const;
};
void OptionDescription::CheckConsistency() const {
@@ -359,13 +374,6 @@ void OptionDescription::setForwardNotSplit() {
Flags |= OptionDescriptionFlags::ForwardNotSplit;
}
-bool OptionDescription::isExtern() const {
- return Flags & OptionDescriptionFlags::Extern;
-}
-void OptionDescription::setExtern() {
- Flags |= OptionDescriptionFlags::Extern;
-}
-
bool OptionDescription::isRequired() const {
return Flags & OptionDescriptionFlags::Required;
}
@@ -417,6 +425,8 @@ const char* OptionDescription::GenTypeDeclaration() const {
return "cl::list<std::string>";
case OptionType::Switch:
return "cl::opt<bool>";
+ case OptionType::SwitchList:
+ return "cl::list<bool>";
case OptionType::Parameter:
case OptionType::Prefix:
default:
@@ -424,20 +434,21 @@ const char* OptionDescription::GenTypeDeclaration() const {
}
}
-std::string OptionDescription::GenVariableName() const {
- const std::string& EscapedName = EscapeVariableName(Name);
+std::string OptionDescription::GenOptionType() const {
switch (Type) {
case OptionType::Alias:
- return "AutoGeneratedAlias_" + EscapedName;
+ return "Alias_";
case OptionType::PrefixList:
case OptionType::ParameterList:
- return "AutoGeneratedList_" + EscapedName;
+ return "List_";
case OptionType::Switch:
- return "AutoGeneratedSwitch_" + EscapedName;
+ return "Switch_";
+ case OptionType::SwitchList:
+ return "SwitchList_";
case OptionType::Prefix:
case OptionType::Parameter:
default:
- return "AutoGeneratedParameter_" + EscapedName;
+ return "Parameter_";
}
}
@@ -457,9 +468,11 @@ public:
// wrong type.
const OptionDescription& FindSwitch(const std::string& OptName) const;
const OptionDescription& FindParameter(const std::string& OptName) const;
- const OptionDescription& FindList(const std::string& OptName) const;
+ const OptionDescription& FindParameterList(const std::string& OptName) const;
const OptionDescription&
FindListOrParameter(const std::string& OptName) const;
+ const OptionDescription&
+ FindParameterListOrParameter(const std::string& OptName) const;
/// insertDescription - Insert new OptionDescription into
/// OptionDescriptions list
@@ -489,10 +502,10 @@ OptionDescriptions::FindSwitch(const std::string& OptName) const {
}
const OptionDescription&
-OptionDescriptions::FindList(const std::string& OptName) const {
+OptionDescriptions::FindParameterList(const std::string& OptName) const {
const OptionDescription& OptDesc = this->FindOption(OptName);
- if (!OptDesc.isList())
- throw OptName + ": incorrect option type - should be a list!";
+ if (!OptDesc.isList() || OptDesc.isSwitchList())
+ throw OptName + ": incorrect option type - should be a parameter list!";
return OptDesc;
}
@@ -513,6 +526,16 @@ OptionDescriptions::FindListOrParameter(const std::string& OptName) const {
return OptDesc;
}
+const OptionDescription&
+OptionDescriptions::FindParameterListOrParameter
+(const std::string& OptName) const {
+ const OptionDescription& OptDesc = this->FindOption(OptName);
+ if ((!OptDesc.isList() && !OptDesc.isParameter()) || OptDesc.isSwitchList())
+ throw OptName
+ + ": incorrect option type - should be a parameter list or parameter!";
+ return OptDesc;
+}
+
void OptionDescriptions::InsertDescription (const OptionDescription& o) {
container_type::iterator I = Descriptions.find(o.Name);
if (I != Descriptions.end()) {
@@ -586,7 +609,6 @@ void InvokeDagInitHandler(const FunctionObject* const Obj,
((Obj)->*(h))(Dag, IndentLevel, O);
}
-
template <typename H>
typename HandlerTable<H>::HandlerMap HandlerTable<H>::Handlers_;
@@ -615,7 +637,6 @@ public:
: optDesc_(OD)
{
if (!staticMembersInitialized_) {
- AddHandler("extern", &CollectOptionProperties::onExtern);
AddHandler("help", &CollectOptionProperties::onHelp);
AddHandler("hidden", &CollectOptionProperties::onHidden);
AddHandler("init", &CollectOptionProperties::onInit);
@@ -644,11 +665,6 @@ private:
/// Option property handlers --
/// Methods that handle option properties such as (help) or (hidden).
- void onExtern (const DagInit& d) {
- CheckNumberOfArguments(d, 0);
- optDesc_.setExtern();
- }
-
void onHelp (const DagInit& d) {
CheckNumberOfArguments(d, 1);
optDesc_.Help = EscapeQuotes(InitPtrToString(d.getArg(0)));
@@ -666,8 +682,8 @@ private:
void onCommaSeparated (const DagInit& d) {
CheckNumberOfArguments(d, 0);
- if (!optDesc_.isList())
- throw "'comma_separated' is valid only on list options!";
+ if (!optDesc_.isParameterList())
+ throw "'comma_separated' is valid only on parameter list options!";
optDesc_.setCommaSeparated();
}
@@ -709,7 +725,7 @@ private:
void onZeroOrMore (const DagInit& d) {
CheckNumberOfArguments(d, 0);
- if (OptionType::IsList(optDesc_.Type))
+ if (optDesc_.isList())
llvm::errs() << "Warning: specifying the 'zero_or_more' property "
"on a list option has no effect.\n";
@@ -720,7 +736,7 @@ private:
void onOptional (const DagInit& d) {
CheckNumberOfArguments(d, 0);
- if (!OptionType::IsList(optDesc_.Type))
+ if (!optDesc_.isList())
llvm::errs() << "Warning: specifying the 'optional' property"
"on a non-list option has no effect.\n";
@@ -734,7 +750,7 @@ private:
if (val < 2)
throw "Error in the 'multi_val' property: "
"the value must be greater than 1!";
- if (!OptionType::IsList(optDesc_.Type))
+ if (!optDesc_.isParameterList())
throw "The multi_val property is valid only on list options!";
optDesc_.MultiVal = val;
}
@@ -761,16 +777,16 @@ public:
OptionDescription OD(Type, Name);
- if (!OD.isExtern())
- CheckNumberOfArguments(d, 2);
+ CheckNumberOfArguments(d, 2);
if (OD.isAlias()) {
// Aliases store the aliased option name in the 'Help' field.
OD.Help = InitPtrToString(d.getArg(1));
}
- else if (!OD.isExtern()) {
+ else {
processOptionProperties(d, OD);
}
+
OptDescs_.InsertDescription(OD);
}
@@ -789,15 +805,14 @@ private:
/// CollectOptionDescriptions - Collects option properties from all
/// OptionLists.
-void CollectOptionDescriptions (RecordVector::const_iterator B,
- RecordVector::const_iterator E,
+void CollectOptionDescriptions (const RecordVector& V,
OptionDescriptions& OptDescs)
{
// For every OptionList:
- for (; B!=E; ++B) {
- RecordVector::value_type T = *B;
+ for (RecordVector::const_iterator B = V.begin(),
+ E = V.end(); B!=E; ++B) {
// Throws an exception if the value does not exist.
- ListInit* PropList = T->getValueAsListInit("options");
+ ListInit* PropList = (*B)->getValueAsListInit("options");
// For every option description in this list:
// collect the information and
@@ -831,11 +846,7 @@ struct ToolDescription : public RefCountedBase<ToolDescription> {
// Default ctor here is needed because StringMap can only store
// DefaultConstructible objects
- ToolDescription ()
- : CmdLine(0), Actions(0), OutFileOption("-o"),
- Flags(0), OnEmpty(0)
- {}
- ToolDescription (const std::string& n)
+ ToolDescription (const std::string &n = "")
: Name(n), CmdLine(0), Actions(0), OutFileOption("-o"),
Flags(0), OnEmpty(0)
{}
@@ -974,12 +985,12 @@ private:
/// CollectToolDescriptions - Gather information about tool properties
/// from the parsed TableGen data (basically a wrapper for the
/// CollectToolProperties function object).
-void CollectToolDescriptions (RecordVector::const_iterator B,
- RecordVector::const_iterator E,
+void CollectToolDescriptions (const RecordVector& Tools,
ToolDescriptions& ToolDescs)
{
// Iterate over a properties list of every Tool definition
- for (;B!=E;++B) {
+ for (RecordVector::const_iterator B = Tools.begin(),
+ E = Tools.end(); B!=E; ++B) {
const Record* T = *B;
// Throws an exception if the value does not exist.
ListInit* PropList = T->getValueAsListInit("properties");
@@ -995,30 +1006,17 @@ void CollectToolDescriptions (RecordVector::const_iterator B,
/// FillInEdgeVector - Merge all compilation graph definitions into
/// one single edge list.
-void FillInEdgeVector(RecordVector::const_iterator B,
- RecordVector::const_iterator E, RecordVector& Out) {
- for (; B != E; ++B) {
- const ListInit* edges = (*B)->getValueAsListInit("edges");
-
- for (unsigned i = 0; i < edges->size(); ++i)
- Out.push_back(edges->getElementAsRecord(i));
- }
-}
-
-/// CalculatePriority - Calculate the priority of this plugin.
-int CalculatePriority(RecordVector::const_iterator B,
- RecordVector::const_iterator E) {
- int priority = 0;
+void FillInEdgeVector(const RecordVector& CompilationGraphs,
+ DagVector& Out) {
+ for (RecordVector::const_iterator B = CompilationGraphs.begin(),
+ E = CompilationGraphs.end(); B != E; ++B) {
+ const ListInit* Edges = (*B)->getValueAsListInit("edges");
- if (B != E) {
- priority = static_cast<int>((*B)->getValueAsInt("priority"));
-
- if (++B != E)
- throw "More than one 'PluginPriority' instance found: "
- "most probably an error!";
+ for (ListInit::const_iterator B = Edges->begin(),
+ E = Edges->end(); B != E; ++B) {
+ Out.push_back(&InitPtrToDag(*B));
+ }
}
-
- return priority;
}
/// NotInGraph - Helper function object for FilterNotInGraph.
@@ -1038,18 +1036,18 @@ public:
/// FilterNotInGraph - Filter out from ToolDescs all Tools not
/// mentioned in the compilation graph definition.
-void FilterNotInGraph (const RecordVector& EdgeVector,
+void FilterNotInGraph (const DagVector& EdgeVector,
ToolDescriptions& ToolDescs) {
// List all tools mentioned in the graph.
llvm::StringSet<> ToolsInGraph;
- for (RecordVector::const_iterator B = EdgeVector.begin(),
+ for (DagVector::const_iterator B = EdgeVector.begin(),
E = EdgeVector.end(); B != E; ++B) {
- const Record* Edge = *B;
- const std::string& NodeA = Edge->getValueAsString("a");
- const std::string& NodeB = Edge->getValueAsString("b");
+ const DagInit* Edge = *B;
+ const std::string& NodeA = InitPtrToString(Edge->getArg(0));
+ const std::string& NodeB = InitPtrToString(Edge->getArg(1));
if (NodeA != "root")
ToolsInGraph.insert(NodeA);
@@ -1079,10 +1077,8 @@ void FillInToolToLang (const ToolDescriptions& ToolDescs,
}
/// TypecheckGraph - Check that names for output and input languages
-/// on all edges do match. This doesn't do much when the information
-/// about the whole graph is not available (i.e. when compiling most
-/// plugins).
-void TypecheckGraph (const RecordVector& EdgeVector,
+/// on all edges do match.
+void TypecheckGraph (const DagVector& EdgeVector,
const ToolDescriptions& ToolDescs) {
StringMap<StringSet<> > ToolToInLang;
StringMap<std::string> ToolToOutLang;
@@ -1091,11 +1087,11 @@ void TypecheckGraph (const RecordVector& EdgeVector,
StringMap<std::string>::iterator IAE = ToolToOutLang.end();
StringMap<StringSet<> >::iterator IBE = ToolToInLang.end();
- for (RecordVector::const_iterator B = EdgeVector.begin(),
+ for (DagVector::const_iterator B = EdgeVector.begin(),
E = EdgeVector.end(); B != E; ++B) {
- const Record* Edge = *B;
- const std::string& NodeA = Edge->getValueAsString("a");
- const std::string& NodeB = Edge->getValueAsString("b");
+ const DagInit* Edge = *B;
+ const std::string& NodeA = InitPtrToString(Edge->getArg(0));
+ const std::string& NodeB = InitPtrToString(Edge->getArg(1));
StringMap<std::string>::iterator IA = ToolToOutLang.find(NodeA);
StringMap<StringSet<> >::iterator IB = ToolToInLang.find(NodeB);
@@ -1234,10 +1230,15 @@ public:
}
};
+/// IsOptionalEdge - Validate that the 'optional_edge' has proper structure.
+bool IsOptionalEdge (const DagInit& Edg) {
+ return (GetOperatorName(Edg) == "optional_edge") && (Edg.getNumArgs() > 2);
+}
+
/// CheckForSuperfluousOptions - Check that there are no side
/// effect-free options (specified only in the OptionList). Otherwise,
/// output a warning.
-void CheckForSuperfluousOptions (const RecordVector& Edges,
+void CheckForSuperfluousOptions (const DagVector& EdgeVector,
const ToolDescriptions& ToolDescs,
const OptionDescriptions& OptDescs) {
llvm::StringSet<> nonSuperfluousOptions;
@@ -1255,13 +1256,13 @@ void CheckForSuperfluousOptions (const RecordVector& Edges,
// Add all options mentioned in the 'case' clauses of the
// OptionalEdges of the compilation graph to the set of
// non-superfluous options.
- for (RecordVector::const_iterator B = Edges.begin(), E = Edges.end();
- B != E; ++B) {
- const Record* Edge = *B;
- DagInit& Weight = *Edge->getValueAsDag("weight");
-
- if (!IsDagEmpty(Weight))
+ for (DagVector::const_iterator B = EdgeVector.begin(),
+ E = EdgeVector.end(); B != E; ++B) {
+ const DagInit& Edge = **B;
+ if (IsOptionalEdge(Edge)) {
+ const DagInit& Weight = InitPtrToDag(Edge.getArg(2));
WalkCase(&Weight, ExtractOptionNames(nonSuperfluousOptions), Id());
+ }
}
// Check that all options in OptDescs belong to the set of
@@ -1440,7 +1441,7 @@ bool EmitCaseTest2Args(const std::string& TestName,
return true;
}
else if (TestName == "element_in_list") {
- const OptionDescription& OptDesc = OptDescs.FindList(OptName);
+ const OptionDescription& OptDesc = OptDescs.FindParameterList(OptName);
const std::string& VarName = OptDesc.GenVariableName();
O << "std::find(" << VarName << ".begin(),\n";
O.indent(IndentLevel + Indent1)
@@ -1815,6 +1816,24 @@ void EmitCmdLineVecFill(const Init* CmdLine, const std::string& ToolName,
}
+/// EmitForEachListElementCycleHeader - Emit common code for iterating through
+/// all elements of a list. Helper function used by
+/// EmitForwardOptionPropertyHandlingCode.
+void EmitForEachListElementCycleHeader (const OptionDescription& D,
+ unsigned IndentLevel,
+ raw_ostream& O) {
+ unsigned IndentLevel1 = IndentLevel + Indent1;
+
+ O.indent(IndentLevel)
+ << "for (" << D.GenTypeDeclaration()
+ << "::iterator B = " << D.GenVariableName() << ".begin(),\n";
+ O.indent(IndentLevel)
+ << "E = " << D.GenVariableName() << ".end(); B != E;) {\n";
+ O.indent(IndentLevel1) << "unsigned pos = " << D.GenVariableName()
+ << ".getPosition(B - " << D.GenVariableName()
+ << ".begin());\n";
+}
+
/// EmitForwardOptionPropertyHandlingCode - Helper function used to
/// implement EmitActionHandler. Emits code for
/// handling the (forward) and (forward_as) option properties.
@@ -1855,14 +1874,7 @@ void EmitForwardOptionPropertyHandlingCode (const OptionDescription& D,
<< D.GenVariableName() << "));\n";
break;
case OptionType::PrefixList:
- O.indent(IndentLevel)
- << "for (" << D.GenTypeDeclaration()
- << "::iterator B = " << D.GenVariableName() << ".begin(),\n";
- O.indent(IndentLevel)
- << "E = " << D.GenVariableName() << ".end(); B != E;) {\n";
- O.indent(IndentLevel1) << "unsigned pos = " << D.GenVariableName()
- << ".getPosition(B - " << D.GenVariableName()
- << ".begin());\n";
+ EmitForEachListElementCycleHeader(D, IndentLevel, O);
O.indent(IndentLevel1) << "vec.push_back(std::make_pair(pos, \""
<< Name << "\" + " << "*B));\n";
O.indent(IndentLevel1) << "++B;\n";
@@ -1875,14 +1887,7 @@ void EmitForwardOptionPropertyHandlingCode (const OptionDescription& D,
O.indent(IndentLevel) << "}\n";
break;
case OptionType::ParameterList:
- O.indent(IndentLevel)
- << "for (" << D.GenTypeDeclaration() << "::iterator B = "
- << D.GenVariableName() << ".begin(),\n";
- O.indent(IndentLevel) << "E = " << D.GenVariableName()
- << ".end() ; B != E;) {\n";
- O.indent(IndentLevel1) << "unsigned pos = " << D.GenVariableName()
- << ".getPosition(B - " << D.GenVariableName()
- << ".begin());\n";
+ EmitForEachListElementCycleHeader(D, IndentLevel, O);
O.indent(IndentLevel1) << "vec.push_back(std::make_pair(pos, \""
<< Name << "\"));\n";
@@ -1893,6 +1898,13 @@ void EmitForwardOptionPropertyHandlingCode (const OptionDescription& D,
O.indent(IndentLevel) << "}\n";
break;
+ case OptionType::SwitchList:
+ EmitForEachListElementCycleHeader(D, IndentLevel, O);
+ O.indent(IndentLevel1) << "vec.push_back(std::make_pair(pos, \""
+ << Name << "\"));\n";
+ O.indent(IndentLevel1) << "++B;\n";
+ O.indent(IndentLevel) << "}\n";
+ break;
case OptionType::Alias:
default:
throw "Aliases are not allowed in tool option descriptions!";
@@ -1908,10 +1920,10 @@ struct ActionHandlingCallbackBase
unsigned IndentLevel, raw_ostream& O) const
{
O.indent(IndentLevel)
- << "throw std::runtime_error(\"" <<
- (d.getNumArgs() >= 1 ? InitPtrToString(d.getArg(0))
- : "Unknown error!")
+ << "PrintError(\""
+ << (d.getNumArgs() >= 1 ? InitPtrToString(d.getArg(0)) : "Unknown error!")
<< "\");\n";
+ O.indent(IndentLevel) << "return 1;\n";
}
void onWarningDag(const DagInit& d,
@@ -1926,7 +1938,6 @@ struct ActionHandlingCallbackBase
/// EmitActionHandlersCallback - Emit code that handles actions. Used by
/// EmitGenerateActionMethod() as an argument to EmitCaseConstructHandler().
-
class EmitActionHandlersCallback;
typedef void (EmitActionHandlersCallback::* EmitActionHandlersCallbackHandler)
@@ -1997,7 +2008,12 @@ class EmitActionHandlersCallback :
{
CheckNumberOfArguments(Dag, 1);
const std::string& Name = InitPtrToString(Dag.getArg(0));
- const OptionDescription& D = OptDescs.FindListOrParameter(Name);
+ const OptionDescription& D = OptDescs.FindParameterListOrParameter(Name);
+
+ if (D.isSwitchList()) {
+ throw std::runtime_error
+ ("forward_value is not allowed with switch_list");
+ }
if (D.isParameter()) {
O.indent(IndentLevel) << "vec.push_back(std::make_pair("
@@ -2005,8 +2021,9 @@ class EmitActionHandlersCallback :
<< D.GenVariableName() << "));\n";
}
else {
- O.indent(IndentLevel) << "for (cl::list<std::string>::iterator B = "
- << D.GenVariableName() << ".begin(), \n";
+ O.indent(IndentLevel) << "for (" << D.GenTypeDeclaration()
+ << "::iterator B = " << D.GenVariableName()
+ << ".begin(), \n";
O.indent(IndentLevel + Indent1) << " E = " << D.GenVariableName()
<< ".end(); B != E; ++B)\n";
O.indent(IndentLevel) << "{\n";
@@ -2026,7 +2043,7 @@ class EmitActionHandlersCallback :
CheckNumberOfArguments(Dag, 2);
const std::string& Name = InitPtrToString(Dag.getArg(0));
const std::string& Hook = InitPtrToString(Dag.getArg(1));
- const OptionDescription& D = OptDescs.FindListOrParameter(Name);
+ const OptionDescription& D = OptDescs.FindParameterListOrParameter(Name);
O.indent(IndentLevel) << "vec.push_back(std::make_pair("
<< D.GenVariableName() << ".getPosition("
@@ -2099,25 +2116,32 @@ class EmitActionHandlersCallback :
};
void EmitGenerateActionMethodHeader(const ToolDescription& D,
- bool IsJoin, raw_ostream& O)
+ bool IsJoin, bool Naked,
+ raw_ostream& O)
{
+ O.indent(Indent1) << "int GenerateAction(Action& Out,\n";
+
if (IsJoin)
- O.indent(Indent1) << "Action GenerateAction(const PathVector& inFiles,\n";
+ O.indent(Indent2) << "const PathVector& inFiles,\n";
else
- O.indent(Indent1) << "Action GenerateAction(const sys::Path& inFile,\n";
+ O.indent(Indent2) << "const sys::Path& inFile,\n";
- O.indent(Indent2) << "bool HasChildren,\n";
+ O.indent(Indent2) << "const bool HasChildren,\n";
O.indent(Indent2) << "const llvm::sys::Path& TempDir,\n";
O.indent(Indent2) << "const InputLanguagesSet& InLangs,\n";
O.indent(Indent2) << "const LanguageMap& LangMap) const\n";
O.indent(Indent1) << "{\n";
- O.indent(Indent2) << "std::string cmd;\n";
- O.indent(Indent2) << "std::string out_file;\n";
- O.indent(Indent2) << "std::vector<std::pair<unsigned, std::string> > vec;\n";
- O.indent(Indent2) << "bool stop_compilation = !HasChildren;\n";
- O.indent(Indent2) << "bool no_out_file = false;\n";
- O.indent(Indent2) << "const char* output_suffix = \""
- << D.OutputSuffix << "\";\n";
+
+ if (!Naked) {
+ O.indent(Indent2) << "std::string cmd;\n";
+ O.indent(Indent2) << "std::string out_file;\n";
+ O.indent(Indent2)
+ << "std::vector<std::pair<unsigned, std::string> > vec;\n";
+ O.indent(Indent2) << "bool stop_compilation = !HasChildren;\n";
+ O.indent(Indent2) << "bool no_out_file = false;\n";
+ O.indent(Indent2) << "std::string output_suffix(\""
+ << D.OutputSuffix << "\");\n";
+ }
}
// EmitGenerateActionMethod - Emit either a normal or a "join" version of the
@@ -2126,7 +2150,7 @@ void EmitGenerateActionMethod (const ToolDescription& D,
const OptionDescriptions& OptDescs,
bool IsJoin, raw_ostream& O) {
- EmitGenerateActionMethodHeader(D, IsJoin, O);
+ EmitGenerateActionMethodHeader(D, IsJoin, /* Naked = */ false, O);
if (!D.CmdLine)
throw "Tool " + D.Name + " has no cmd_line property!";
@@ -2173,25 +2197,29 @@ void EmitGenerateActionMethod (const ToolDescription& D,
O.indent(Indent3) << "out_file = this->OutFilename("
<< (IsJoin ? "sys::Path(),\n" : "inFile,\n");
- O.indent(Indent4) << "TempDir, stop_compilation, output_suffix).str();\n\n";
+ O.indent(Indent4) <<
+ "TempDir, stop_compilation, output_suffix.c_str()).str();\n\n";
O.indent(Indent3) << "vec.push_back(std::make_pair(65536, out_file));\n";
O.indent(Indent2) << "}\n\n";
// Handle the Sink property.
+ std::string SinkOption("autogenerated::");
+ SinkOption += SinkOptionName;
if (D.isSink()) {
- O.indent(Indent2) << "if (!" << SinkOptionName << ".empty()) {\n";
+ O.indent(Indent2) << "if (!" << SinkOption << ".empty()) {\n";
O.indent(Indent3) << "for (cl::list<std::string>::iterator B = "
- << SinkOptionName << ".begin(), E = " << SinkOptionName
+ << SinkOption << ".begin(), E = " << SinkOption
<< ".end(); B != E; ++B)\n";
- O.indent(Indent4) << "vec.push_back(std::make_pair(" << SinkOptionName
- << ".getPosition(B - " << SinkOptionName
+ O.indent(Indent4) << "vec.push_back(std::make_pair(" << SinkOption
+ << ".getPosition(B - " << SinkOption
<< ".begin()), *B));\n";
O.indent(Indent2) << "}\n";
}
- O.indent(Indent2) << "return Action(cmd, this->SortArgs(vec), "
+ O.indent(Indent2) << "Out.Construct(cmd, this->SortArgs(vec), "
<< "stop_compilation, out_file);\n";
+ O.indent(Indent2) << "return 0;\n";
O.indent(Indent1) << "}\n\n";
}
@@ -2201,14 +2229,11 @@ void EmitGenerateActionMethods (const ToolDescription& ToolDesc,
const OptionDescriptions& OptDescs,
raw_ostream& O) {
if (!ToolDesc.isJoin()) {
- O.indent(Indent1) << "Action GenerateAction(const PathVector& inFiles,\n";
- O.indent(Indent2) << "bool HasChildren,\n";
- O.indent(Indent2) << "const llvm::sys::Path& TempDir,\n";
- O.indent(Indent2) << "const InputLanguagesSet& InLangs,\n";
- O.indent(Indent2) << "const LanguageMap& LangMap) const\n";
- O.indent(Indent1) << "{\n";
- O.indent(Indent2) << "throw std::runtime_error(\"" << ToolDesc.Name
+ EmitGenerateActionMethodHeader(ToolDesc, /* IsJoin = */ true,
+ /* Naked = */ true, O);
+ O.indent(Indent2) << "PrintError(\"" << ToolDesc.Name
<< " is not a Join tool!\");\n";
+ O.indent(Indent2) << "return -1;\n";
O.indent(Indent1) << "}\n\n";
}
else {
@@ -2321,8 +2346,7 @@ void EmitToolClassDefinition (const ToolDescription& D,
/// EmitOptionDefinitions - Iterate over a list of option descriptions
/// and emit registration code.
void EmitOptionDefinitions (const OptionDescriptions& descs,
- bool HasSink, bool HasExterns,
- raw_ostream& O)
+ bool HasSink, raw_ostream& O)
{
std::vector<OptionDescription> Aliases;
@@ -2336,16 +2360,8 @@ void EmitOptionDefinitions (const OptionDescriptions& descs,
continue;
}
- if (val.isExtern())
- O << "extern ";
-
O << val.GenTypeDeclaration() << ' '
- << val.GenVariableName();
-
- if (val.isExtern()) {
- O << ";\n";
- continue;
- }
+ << val.GenPlainVariableName();
O << "(\"" << val.Name << "\"\n";
@@ -2396,7 +2412,7 @@ void EmitOptionDefinitions (const OptionDescriptions& descs,
const OptionDescription& val = *B;
O << val.GenTypeDeclaration() << ' '
- << val.GenVariableName()
+ << val.GenPlainVariableName()
<< "(\"" << val.Name << '\"';
const OptionDescription& D = descs.FindOption(val.Help);
@@ -2407,9 +2423,7 @@ void EmitOptionDefinitions (const OptionDescriptions& descs,
// Emit the sink option.
if (HasSink)
- O << (HasExterns ? "extern cl" : "cl")
- << "::list<std::string> " << SinkOptionName
- << (HasExterns ? ";\n" : "(cl::Sink);\n");
+ O << "cl::list<std::string> " << SinkOptionName << "(cl::Sink);\n";
O << '\n';
}
@@ -2492,8 +2506,15 @@ class EmitPreprocessOptionsCallback :
O.indent(IndentLevel) << OptDesc.GenVariableName() << ".clear();\n";
for (ListInit::const_iterator B = List.begin(), E = List.end();
B != E; ++B) {
- O.indent(IndentLevel) << OptDesc.GenVariableName() << ".push_back(\""
- << InitPtrToString(*B) << "\");\n";
+ const Init* CurElem = *B;
+ if (OptDesc.isSwitchList())
+ CheckBooleanConstant(CurElem);
+
+ O.indent(IndentLevel)
+ << OptDesc.GenVariableName() << ".push_back(\""
+ << (OptDesc.isSwitchList() ? CurElem->getAsString()
+ : InitPtrToString(CurElem))
+ << "\");\n";
}
}
else if (OptDesc.isSwitch()) {
@@ -2561,11 +2582,11 @@ public:
};
-/// EmitPreprocessOptions - Emit the PreprocessOptionsLocal() function.
+/// EmitPreprocessOptions - Emit the PreprocessOptions() function.
void EmitPreprocessOptions (const RecordKeeper& Records,
const OptionDescriptions& OptDecs, raw_ostream& O)
{
- O << "void PreprocessOptionsLocal() {\n";
+ O << "int PreprocessOptions () {\n";
const RecordVector& OptionPreprocessors =
Records.getAllDerivedDefinitions("OptionPreprocessor");
@@ -2578,58 +2599,101 @@ void EmitPreprocessOptions (const RecordKeeper& Records,
false, OptDecs, O);
}
+ O << '\n';
+ O.indent(Indent1) << "return 0;\n";
O << "}\n\n";
}
-/// EmitPopulateLanguageMap - Emit the PopulateLanguageMapLocal() function.
-void EmitPopulateLanguageMap (const RecordKeeper& Records, raw_ostream& O)
+class DoEmitPopulateLanguageMap;
+typedef void (DoEmitPopulateLanguageMap::* DoEmitPopulateLanguageMapHandler)
+(const DagInit& D);
+
+class DoEmitPopulateLanguageMap
+: public HandlerTable<DoEmitPopulateLanguageMapHandler>
{
- O << "void PopulateLanguageMapLocal(LanguageMap& langMap) {\n";
+private:
+ raw_ostream& O_;
- // Get the relevant field out of RecordKeeper
- const Record* LangMapRecord = Records.getDef("LanguageMap");
+public:
+
+ explicit DoEmitPopulateLanguageMap (raw_ostream& O) : O_(O) {
+ if (!staticMembersInitialized_) {
+ AddHandler("lang_to_suffixes",
+ &DoEmitPopulateLanguageMap::onLangToSuffixes);
+
+ staticMembersInitialized_ = true;
+ }
+ }
- // It is allowed for a plugin to have no language map.
- if (LangMapRecord) {
+ void operator() (Init* I) {
+ InvokeDagInitHandler(this, I);
+ }
- ListInit* LangsToSuffixesList = LangMapRecord->getValueAsListInit("map");
- if (!LangsToSuffixesList)
- throw "Error in the language map definition!";
+private:
- for (unsigned i = 0; i < LangsToSuffixesList->size(); ++i) {
- const Record* LangToSuffixes = LangsToSuffixesList->getElementAsRecord(i);
+ void onLangToSuffixes (const DagInit& d) {
+ CheckNumberOfArguments(d, 2);
- const std::string& Lang = LangToSuffixes->getValueAsString("lang");
- const ListInit* Suffixes = LangToSuffixes->getValueAsListInit("suffixes");
+ const std::string& Lang = InitPtrToString(d.getArg(0));
+ Init* Suffixes = d.getArg(1);
- for (unsigned i = 0; i < Suffixes->size(); ++i)
- O.indent(Indent1) << "langMap[\""
- << InitPtrToString(Suffixes->getElement(i))
- << "\"] = \"" << Lang << "\";\n";
+ // Second argument to lang_to_suffixes is either a single string...
+ if (typeid(*Suffixes) == typeid(StringInit)) {
+ O_.indent(Indent1) << "langMap[\"" << InitPtrToString(Suffixes)
+ << "\"] = \"" << Lang << "\";\n";
+ }
+ // ...or a list of strings.
+ else {
+ const ListInit& Lst = InitPtrToList(Suffixes);
+ assert(Lst.size() != 0);
+ for (ListInit::const_iterator B = Lst.begin(), E = Lst.end();
+ B != E; ++B) {
+ O_.indent(Indent1) << "langMap[\"" << InitPtrToString(*B)
+ << "\"] = \"" << Lang << "\";\n";
+ }
}
}
+};
+
+/// EmitPopulateLanguageMap - Emit the PopulateLanguageMap() function.
+void EmitPopulateLanguageMap (const RecordKeeper& Records, raw_ostream& O)
+{
+ O << "int PopulateLanguageMap (LanguageMap& langMap) {\n";
+
+ // For each LangMap:
+ const RecordVector& LangMaps =
+ Records.getAllDerivedDefinitions("LanguageMap");
+
+ for (RecordVector::const_iterator B = LangMaps.begin(),
+ E = LangMaps.end(); B!=E; ++B) {
+ ListInit* LangMap = (*B)->getValueAsListInit("map");
+ std::for_each(LangMap->begin(), LangMap->end(),
+ DoEmitPopulateLanguageMap(O));
+ }
+
+ O << '\n';
+ O.indent(Indent1) << "return 0;\n";
O << "}\n\n";
}
-/// IncDecWeight - Helper function passed to EmitCaseConstructHandler()
-/// by EmitEdgeClass().
-void IncDecWeight (const Init* i, unsigned IndentLevel,
- raw_ostream& O) {
+/// EmitEdgePropertyHandlerCallback - Emits code that handles edge
+/// properties. Helper function passed to EmitCaseConstructHandler() by
+/// EmitEdgeClass().
+void EmitEdgePropertyHandlerCallback (const Init* i, unsigned IndentLevel,
+ raw_ostream& O) {
const DagInit& d = InitPtrToDag(i);
const std::string& OpName = GetOperatorName(d);
if (OpName == "inc_weight") {
O.indent(IndentLevel) << "ret += ";
}
- else if (OpName == "dec_weight") {
- O.indent(IndentLevel) << "ret -= ";
- }
else if (OpName == "error") {
CheckNumberOfArguments(d, 1);
- O.indent(IndentLevel) << "throw std::runtime_error(\""
+ O.indent(IndentLevel) << "PrintError(\""
<< InitPtrToString(d.getArg(0))
<< "\");\n";
+ O.indent(IndentLevel) << "return -1;\n";
return;
}
else {
@@ -2646,7 +2710,7 @@ void IncDecWeight (const Init* i, unsigned IndentLevel,
/// EmitEdgeClass - Emit a single Edge# class.
void EmitEdgeClass (unsigned N, const std::string& Target,
- DagInit* Case, const OptionDescriptions& OptDescs,
+ const DagInit& Case, const OptionDescriptions& OptDescs,
raw_ostream& O) {
// Class constructor.
@@ -2657,40 +2721,48 @@ void EmitEdgeClass (unsigned N, const std::string& Target,
// Function Weight().
O.indent(Indent1)
- << "unsigned Weight(const InputLanguagesSet& InLangs) const {\n";
+ << "int Weight(const InputLanguagesSet& InLangs) const {\n";
O.indent(Indent2) << "unsigned ret = 0;\n";
// Handle the 'case' construct.
- EmitCaseConstructHandler(Case, Indent2, IncDecWeight, false, OptDescs, O);
+ EmitCaseConstructHandler(&Case, Indent2, EmitEdgePropertyHandlerCallback,
+ false, OptDescs, O);
O.indent(Indent2) << "return ret;\n";
O.indent(Indent1) << "}\n\n};\n\n";
}
/// EmitEdgeClasses - Emit Edge* classes that represent graph edges.
-void EmitEdgeClasses (const RecordVector& EdgeVector,
+void EmitEdgeClasses (const DagVector& EdgeVector,
const OptionDescriptions& OptDescs,
raw_ostream& O) {
int i = 0;
- for (RecordVector::const_iterator B = EdgeVector.begin(),
+ for (DagVector::const_iterator B = EdgeVector.begin(),
E = EdgeVector.end(); B != E; ++B) {
- const Record* Edge = *B;
- const std::string& NodeB = Edge->getValueAsString("b");
- DagInit& Weight = *Edge->getValueAsDag("weight");
+ const DagInit& Edge = **B;
+ const std::string& Name = GetOperatorName(Edge);
+
+ if (Name == "optional_edge") {
+ assert(IsOptionalEdge(Edge));
+ const std::string& NodeB = InitPtrToString(Edge.getArg(1));
+
+ const DagInit& Weight = InitPtrToDag(Edge.getArg(2));
+ EmitEdgeClass(i, NodeB, Weight, OptDescs, O);
+ }
+ else if (Name != "edge") {
+ throw "Unknown edge class: '" + Name + "'!";
+ }
- if (!IsDagEmpty(Weight))
- EmitEdgeClass(i, NodeB, &Weight, OptDescs, O);
++i;
}
}
-/// EmitPopulateCompilationGraph - Emit the PopulateCompilationGraphLocal()
-/// function.
-void EmitPopulateCompilationGraph (const RecordVector& EdgeVector,
+/// EmitPopulateCompilationGraph - Emit the PopulateCompilationGraph() function.
+void EmitPopulateCompilationGraph (const DagVector& EdgeVector,
const ToolDescriptions& ToolDescs,
raw_ostream& O)
{
- O << "void PopulateCompilationGraphLocal(CompilationGraph& G) {\n";
+ O << "int PopulateCompilationGraph (CompilationGraph& G) {\n";
for (ToolDescriptions::const_iterator B = ToolDescs.begin(),
E = ToolDescs.end(); B != E; ++B)
@@ -2701,24 +2773,27 @@ void EmitPopulateCompilationGraph (const RecordVector& EdgeVector,
// Insert edges.
int i = 0;
- for (RecordVector::const_iterator B = EdgeVector.begin(),
+ for (DagVector::const_iterator B = EdgeVector.begin(),
E = EdgeVector.end(); B != E; ++B) {
- const Record* Edge = *B;
- const std::string& NodeA = Edge->getValueAsString("a");
- const std::string& NodeB = Edge->getValueAsString("b");
- DagInit& Weight = *Edge->getValueAsDag("weight");
+ const DagInit& Edge = **B;
+ const std::string& NodeA = InitPtrToString(Edge.getArg(0));
+ const std::string& NodeB = InitPtrToString(Edge.getArg(1));
- O.indent(Indent1) << "G.insertEdge(\"" << NodeA << "\", ";
+ O.indent(Indent1) << "if (int ret = G.insertEdge(\"" << NodeA << "\", ";
- if (IsDagEmpty(Weight))
- O << "new SimpleEdge(\"" << NodeB << "\")";
- else
+ if (IsOptionalEdge(Edge))
O << "new Edge" << i << "()";
+ else
+ O << "new SimpleEdge(\"" << NodeB << "\")";
+
+ O << "))\n";
+ O.indent(Indent2) << "return ret;\n";
- O << ");\n";
++i;
}
+ O << '\n';
+ O.indent(Indent1) << "return 0;\n";
O << "}\n\n";
}
@@ -2762,7 +2837,8 @@ public:
CheckNumberOfArguments(Dag, 2);
const std::string& OptName = InitPtrToString(Dag.getArg(0));
const std::string& HookName = InitPtrToString(Dag.getArg(1));
- const OptionDescription& D = OptDescs_.FindOption(OptName);
+ const OptionDescription& D =
+ OptDescs_.FindParameterListOrParameter(OptName);
HookNames_[HookName] = HookInfo(D.isList() ? HookInfo::ListHook
: HookInfo::ArgHook);
@@ -2827,9 +2903,6 @@ public:
this->onCmdLine(InitPtrToString(Arg));
}
- void operator()(const DagInit* Test, unsigned, bool) {
- this->operator()(Test);
- }
void operator()(const Init* Statement, unsigned) {
this->operator()(Statement);
}
@@ -2873,7 +2946,6 @@ void EmitHookDeclarations(const ToolDescriptions& ToolDescs,
if (HookNames.empty())
return;
- O << "namespace hooks {\n";
for (HookInfoMap::const_iterator B = HookNames.begin(),
E = HookNames.end(); B != E; ++B) {
const char* HookName = B->first();
@@ -2892,23 +2964,6 @@ void EmitHookDeclarations(const ToolDescriptions& ToolDescs,
O <<");\n";
}
- O << "}\n\n";
-}
-
-/// EmitRegisterPlugin - Emit code to register this plugin.
-void EmitRegisterPlugin(int Priority, raw_ostream& O) {
- O << "struct Plugin : public llvmc::BasePlugin {\n\n";
- O.indent(Indent1) << "int Priority() const { return "
- << Priority << "; }\n\n";
- O.indent(Indent1) << "void PreprocessOptions() const\n";
- O.indent(Indent1) << "{ PreprocessOptionsLocal(); }\n\n";
- O.indent(Indent1) << "void PopulateLanguageMap(LanguageMap& langMap) const\n";
- O.indent(Indent1) << "{ PopulateLanguageMapLocal(langMap); }\n\n";
- O.indent(Indent1)
- << "void PopulateCompilationGraph(CompilationGraph& graph) const\n";
- O.indent(Indent1) << "{ PopulateCompilationGraphLocal(graph); }\n"
- << "};\n\n"
- << "static llvmc::RegisterPlugin<Plugin> RP;\n\n";
}
/// EmitIncludes - Emit necessary #include directives and some
@@ -2916,8 +2971,7 @@ void EmitRegisterPlugin(int Priority, raw_ostream& O) {
void EmitIncludes(raw_ostream& O) {
O << "#include \"llvm/CompilerDriver/BuiltinOptions.h\"\n"
<< "#include \"llvm/CompilerDriver/CompilationGraph.h\"\n"
- << "#include \"llvm/CompilerDriver/ForceLinkageMacros.h\"\n"
- << "#include \"llvm/CompilerDriver/Plugin.h\"\n"
+ << "#include \"llvm/CompilerDriver/Error.h\"\n"
<< "#include \"llvm/CompilerDriver/Tool.h\"\n\n"
<< "#include \"llvm/Support/CommandLine.h\"\n"
@@ -2931,21 +2985,17 @@ void EmitIncludes(raw_ostream& O) {
<< "using namespace llvm;\n"
<< "using namespace llvmc;\n\n"
- << "extern cl::opt<std::string> OutputFilename;\n\n"
-
<< "inline const char* checkCString(const char* s)\n"
<< "{ return s == NULL ? \"\" : s; }\n\n";
}
-/// PluginData - Holds all information about a plugin.
-struct PluginData {
+/// DriverData - Holds all information about the driver.
+struct DriverData {
OptionDescriptions OptDescs;
- bool HasSink;
- bool HasExterns;
ToolDescriptions ToolDescs;
- RecordVector Edges;
- int Priority;
+ DagVector Edges;
+ bool HasSink;
};
/// HasSink - Go through the list of tool descriptions and check if
@@ -2959,46 +3009,27 @@ bool HasSink(const ToolDescriptions& ToolDescs) {
return false;
}
-/// HasExterns - Go through the list of option descriptions and check
-/// if there are any external options.
-bool HasExterns(const OptionDescriptions& OptDescs) {
- for (OptionDescriptions::const_iterator B = OptDescs.begin(),
- E = OptDescs.end(); B != E; ++B)
- if (B->second.isExtern())
- return true;
-
- return false;
-}
-
-/// CollectPluginData - Collect tool and option properties,
-/// compilation graph edges and plugin priority from the parse tree.
-void CollectPluginData (const RecordKeeper& Records, PluginData& Data) {
+/// CollectDriverData - Collect compilation graph edges, tool properties and
+/// option properties from the parse tree.
+void CollectDriverData (const RecordKeeper& Records, DriverData& Data) {
// Collect option properties.
const RecordVector& OptionLists =
Records.getAllDerivedDefinitions("OptionList");
- CollectOptionDescriptions(OptionLists.begin(), OptionLists.end(),
- Data.OptDescs);
+ CollectOptionDescriptions(OptionLists, Data.OptDescs);
// Collect tool properties.
const RecordVector& Tools = Records.getAllDerivedDefinitions("Tool");
- CollectToolDescriptions(Tools.begin(), Tools.end(), Data.ToolDescs);
+ CollectToolDescriptions(Tools, Data.ToolDescs);
Data.HasSink = HasSink(Data.ToolDescs);
- Data.HasExterns = HasExterns(Data.OptDescs);
// Collect compilation graph edges.
const RecordVector& CompilationGraphs =
Records.getAllDerivedDefinitions("CompilationGraph");
- FillInEdgeVector(CompilationGraphs.begin(), CompilationGraphs.end(),
- Data.Edges);
-
- // Calculate the priority of this plugin.
- const RecordVector& Priorities =
- Records.getAllDerivedDefinitions("PluginPriority");
- Data.Priority = CalculatePriority(Priorities.begin(), Priorities.end());
+ FillInEdgeVector(CompilationGraphs, Data.Edges);
}
-/// CheckPluginData - Perform some sanity checks on the collected data.
-void CheckPluginData(PluginData& Data) {
+/// CheckDriverData - Perform some sanity checks on the collected data.
+void CheckDriverData(DriverData& Data) {
// Filter out all tools not mentioned in the compilation graph.
FilterNotInGraph(Data.Edges, Data.ToolDescs);
@@ -3010,24 +3041,24 @@ void CheckPluginData(PluginData& Data) {
CheckForSuperfluousOptions(Data.Edges, Data.ToolDescs, Data.OptDescs);
}
-void EmitPluginCode(const PluginData& Data, raw_ostream& O) {
+void EmitDriverCode(const DriverData& Data, raw_ostream& O) {
// Emit file header.
EmitIncludes(O);
// Emit global option registration code.
- EmitOptionDefinitions(Data.OptDescs, Data.HasSink, Data.HasExterns, O);
+ O << "namespace llvmc {\n"
+ << "namespace autogenerated {\n\n";
+ EmitOptionDefinitions(Data.OptDescs, Data.HasSink, O);
+ O << "} // End namespace autogenerated.\n"
+ << "} // End namespace llvmc.\n\n";
// Emit hook declarations.
+ O << "namespace hooks {\n";
EmitHookDeclarations(Data.ToolDescs, Data.OptDescs, O);
+ O << "} // End namespace hooks.\n\n";
O << "namespace {\n\n";
-
- // Emit PreprocessOptionsLocal() function.
- EmitPreprocessOptions(Records, Data.OptDescs, O);
-
- // Emit PopulateLanguageMapLocal() function
- // (language map maps from file extensions to language names).
- EmitPopulateLanguageMap(Records, O);
+ O << "using namespace llvmc::autogenerated;\n\n";
// Emit Tool classes.
for (ToolDescriptions::const_iterator B = Data.ToolDescs.begin(),
@@ -3037,18 +3068,23 @@ void EmitPluginCode(const PluginData& Data, raw_ostream& O) {
// Emit Edge# classes.
EmitEdgeClasses(Data.Edges, Data.OptDescs, O);
- // Emit PopulateCompilationGraphLocal() function.
- EmitPopulateCompilationGraph(Data.Edges, Data.ToolDescs, O);
-
- // Emit code for plugin registration.
- EmitRegisterPlugin(Data.Priority, O);
-
O << "} // End anonymous namespace.\n\n";
- // Force linkage magic.
O << "namespace llvmc {\n";
- O << "LLVMC_FORCE_LINKAGE_DECL(LLVMC_PLUGIN_NAME) {}\n";
- O << "}\n";
+ O << "namespace autogenerated {\n\n";
+
+ // Emit PreprocessOptions() function.
+ EmitPreprocessOptions(Records, Data.OptDescs, O);
+
+ // Emit PopulateLanguageMap() function
+ // (language map maps from file extensions to language names).
+ EmitPopulateLanguageMap(Records, O);
+
+ // Emit PopulateCompilationGraph() function.
+ EmitPopulateCompilationGraph(Data.Edges, Data.ToolDescs, O);
+
+ O << "} // End namespace autogenerated.\n";
+ O << "} // End namespace llvmc.\n\n";
// EOF
}
@@ -3060,13 +3096,13 @@ void EmitPluginCode(const PluginData& Data, raw_ostream& O) {
/// run - The back-end entry point.
void LLVMCConfigurationEmitter::run (raw_ostream &O) {
try {
- PluginData Data;
+ DriverData Data;
- CollectPluginData(Records, Data);
- CheckPluginData(Data);
+ CollectDriverData(Records, Data);
+ CheckDriverData(Data);
- this->EmitSourceFileHeader("LLVMC Configuration Library", O);
- EmitPluginCode(Data, O);
+ this->EmitSourceFileHeader("llvmc-based driver: auto-generated code", O);
+ EmitDriverCode(Data, O);
} catch (std::exception& Error) {
throw Error.what() + std::string(" - usually this means a syntax error.");
diff --git a/libclamav/c++/llvm/utils/TableGen/Makefile b/libclamav/c++/llvm/utils/TableGen/Makefile
index 7ea88de..f27cd99 100644
--- a/libclamav/c++/llvm/utils/TableGen/Makefile
+++ b/libclamav/c++/llvm/utils/TableGen/Makefile
@@ -1,10 +1,10 @@
##===- utils/TableGen/Makefile -----------------------------*- Makefile -*-===##
-#
+#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
-#
+#
##===----------------------------------------------------------------------===##
LEVEL = ../..
diff --git a/libclamav/c++/llvm/utils/TableGen/NeonEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/NeonEmitter.cpp
new file mode 100644
index 0000000..0a12f37
--- /dev/null
+++ b/libclamav/c++/llvm/utils/TableGen/NeonEmitter.cpp
@@ -0,0 +1,1207 @@
+//===- NeonEmitter.cpp - Generate arm_neon.h for use with clang -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting arm_neon.h, which includes
+// a declaration and definition of each function specified by the ARM NEON
+// compiler interface. See ARM document DUI0348B.
+//
+// Each NEON instruction is implemented in terms of 1 or more functions which
+// are suffixed with the element type of the input vectors. Functions may be
+// implemented in terms of generic vector operations such as +, *, -, etc. or
+// by calling a __builtin_-prefixed function which will be handled by clang's
+// CodeGen library.
+//
+// Additional validation code can be generated by this file when runHeader() is
+// called, rather than the normal run() entry point.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NeonEmitter.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include <string>
+
+using namespace llvm;
+
+/// ParseTypes - break down a string such as "fQf" into a vector of StringRefs,
+/// which each StringRef representing a single type declared in the string.
+/// for "fQf" we would end up with 2 StringRefs, "f", and "Qf", representing
+/// 2xfloat and 4xfloat respectively.
+static void ParseTypes(Record *r, std::string &s,
+ SmallVectorImpl<StringRef> &TV) {
+ const char *data = s.data();
+ int len = 0;
+
+ for (unsigned i = 0, e = s.size(); i != e; ++i, ++len) {
+ if (data[len] == 'P' || data[len] == 'Q' || data[len] == 'U')
+ continue;
+
+ switch (data[len]) {
+ case 'c':
+ case 's':
+ case 'i':
+ case 'l':
+ case 'h':
+ case 'f':
+ break;
+ default:
+ throw TGError(r->getLoc(),
+ "Unexpected letter: " + std::string(data + len, 1));
+ break;
+ }
+ TV.push_back(StringRef(data, len + 1));
+ data += len + 1;
+ len = -1;
+ }
+}
+
+/// Widen - Convert a type code into the next wider type. char -> short,
+/// short -> int, etc.
+static char Widen(const char t) {
+ switch (t) {
+ case 'c':
+ return 's';
+ case 's':
+ return 'i';
+ case 'i':
+ return 'l';
+ default: throw "unhandled type in widen!";
+ }
+ return '\0';
+}
+
+/// Narrow - Convert a type code into the next smaller type. short -> char,
+/// float -> half float, etc.
+static char Narrow(const char t) {
+ switch (t) {
+ case 's':
+ return 'c';
+ case 'i':
+ return 's';
+ case 'l':
+ return 'i';
+ case 'f':
+ return 'h';
+ default: throw "unhandled type in widen!";
+ }
+ return '\0';
+}
+
+/// For a particular StringRef, return the base type code, and whether it has
+/// the quad-vector, polynomial, or unsigned modifiers set.
+static char ClassifyType(StringRef ty, bool &quad, bool &poly, bool &usgn) {
+ unsigned off = 0;
+
+ // remember quad.
+ if (ty[off] == 'Q') {
+ quad = true;
+ ++off;
+ }
+
+ // remember poly.
+ if (ty[off] == 'P') {
+ poly = true;
+ ++off;
+ }
+
+ // remember unsigned.
+ if (ty[off] == 'U') {
+ usgn = true;
+ ++off;
+ }
+
+ // base type to get the type string for.
+ return ty[off];
+}
+
+/// ModType - Transform a type code and its modifiers based on a mod code. The
+/// mod code definitions may be found at the top of arm_neon.td.
+static char ModType(const char mod, char type, bool &quad, bool &poly,
+ bool &usgn, bool &scal, bool &cnst, bool &pntr) {
+ switch (mod) {
+ case 't':
+ if (poly) {
+ poly = false;
+ usgn = true;
+ }
+ break;
+ case 'u':
+ usgn = true;
+ case 'x':
+ poly = false;
+ if (type == 'f')
+ type = 'i';
+ break;
+ case 'f':
+ if (type == 'h')
+ quad = true;
+ type = 'f';
+ usgn = false;
+ break;
+ case 'g':
+ quad = false;
+ break;
+ case 'w':
+ type = Widen(type);
+ quad = true;
+ break;
+ case 'n':
+ type = Widen(type);
+ break;
+ case 'l':
+ type = 'l';
+ scal = true;
+ usgn = true;
+ break;
+ case 's':
+ case 'a':
+ scal = true;
+ break;
+ case 'k':
+ quad = true;
+ break;
+ case 'c':
+ cnst = true;
+ case 'p':
+ pntr = true;
+ scal = true;
+ break;
+ case 'h':
+ type = Narrow(type);
+ if (type == 'h')
+ quad = false;
+ break;
+ case 'e':
+ type = Narrow(type);
+ usgn = true;
+ break;
+ default:
+ break;
+ }
+ return type;
+}
+
+/// TypeString - for a modifier and type, generate the name of the typedef for
+/// that type. If generic is true, emit the generic vector type rather than
+/// the public NEON type. QUc -> uint8x8_t / __neon_uint8x8_t.
+static std::string TypeString(const char mod, StringRef typestr,
+ bool generic = false) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ if (mod == 'v')
+ return "void";
+ if (mod == 'i')
+ return "int";
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ SmallString<128> s;
+
+ if (generic)
+ s += "__neon_";
+
+ if (usgn)
+ s.push_back('u');
+
+ switch (type) {
+ case 'c':
+ s += poly ? "poly8" : "int8";
+ if (scal)
+ break;
+ s += quad ? "x16" : "x8";
+ break;
+ case 's':
+ s += poly ? "poly16" : "int16";
+ if (scal)
+ break;
+ s += quad ? "x8" : "x4";
+ break;
+ case 'i':
+ s += "int32";
+ if (scal)
+ break;
+ s += quad ? "x4" : "x2";
+ break;
+ case 'l':
+ s += "int64";
+ if (scal)
+ break;
+ s += quad ? "x2" : "x1";
+ break;
+ case 'h':
+ s += "float16";
+ if (scal)
+ break;
+ s += quad ? "x8" : "x4";
+ break;
+ case 'f':
+ s += "float32";
+ if (scal)
+ break;
+ s += quad ? "x4" : "x2";
+ break;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+
+ if (mod == '2')
+ s += "x2";
+ if (mod == '3')
+ s += "x3";
+ if (mod == '4')
+ s += "x4";
+
+ // Append _t, finishing the type string typedef type.
+ s += "_t";
+
+ if (cnst)
+ s += " const";
+
+ if (pntr)
+ s += " *";
+
+ return s.str();
+}
+
+/// BuiltinTypeString - for a modifier and type, generate the clang
+/// BuiltinsARM.def prototype code for the function. See the top of clang's
+/// Builtins.def for a description of the type strings.
+static std::string BuiltinTypeString(const char mod, StringRef typestr,
+ ClassKind ck, bool ret) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ if (mod == 'v')
+ return "v";
+ if (mod == 'i')
+ return "i";
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ if (pntr) {
+ usgn = false;
+ poly = false;
+ type = 'v';
+ }
+ if (type == 'h') {
+ type = 's';
+ usgn = true;
+ }
+ usgn = usgn | poly | ((ck == ClassI || ck == ClassW) && scal && type != 'f');
+
+ if (scal) {
+ SmallString<128> s;
+
+ if (usgn)
+ s.push_back('U');
+
+ if (type == 'l')
+ s += "LLi";
+ else
+ s.push_back(type);
+
+ if (cnst)
+ s.push_back('C');
+ if (pntr)
+ s.push_back('*');
+ return s.str();
+ }
+
+ // Since the return value must be one type, return a vector type of the
+ // appropriate width which we will bitcast. An exception is made for
+ // returning structs of 2, 3, or 4 vectors which are returned in a sret-like
+ // fashion, storing them to a pointer arg.
+ if (ret) {
+ if (mod == '2' || mod == '3' || mod == '4')
+ return "vv*";
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
+ return quad ? "V4f" : "V2f";
+ if (ck != ClassB && type == 's')
+ return quad ? "V8s" : "V4s";
+ if (ck != ClassB && type == 'i')
+ return quad ? "V4i" : "V2i";
+ if (ck != ClassB && type == 'l')
+ return quad ? "V2LLi" : "V1LLi";
+
+ return quad ? "V16c" : "V8c";
+ }
+
+ // Non-return array types are passed as individual vectors.
+ if (mod == '2')
+ return quad ? "V16cV16c" : "V8cV8c";
+ if (mod == '3')
+ return quad ? "V16cV16cV16c" : "V8cV8cV8c";
+ if (mod == '4')
+ return quad ? "V16cV16cV16cV16c" : "V8cV8cV8cV8c";
+
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
+ return quad ? "V4f" : "V2f";
+ if (ck != ClassB && type == 's')
+ return quad ? "V8s" : "V4s";
+ if (ck != ClassB && type == 'i')
+ return quad ? "V4i" : "V2i";
+ if (ck != ClassB && type == 'l')
+ return quad ? "V2LLi" : "V1LLi";
+
+ return quad ? "V16c" : "V8c";
+}
+
+/// StructTag - generate the name of the struct tag for a type.
+/// These names are mandated by ARM's ABI.
+static std::string StructTag(StringRef typestr) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ SmallString<128> s;
+ s += "__simd";
+ s += quad ? "128_" : "64_";
+ if (usgn)
+ s.push_back('u');
+
+ switch (type) {
+ case 'c':
+ s += poly ? "poly8" : "int8";
+ break;
+ case 's':
+ s += poly ? "poly16" : "int16";
+ break;
+ case 'i':
+ s += "int32";
+ break;
+ case 'l':
+ s += "int64";
+ break;
+ case 'h':
+ s += "float16";
+ break;
+ case 'f':
+ s += "float32";
+ break;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+
+ // Append _t, finishing the struct tag name.
+ s += "_t";
+
+ return s.str();
+}
+
+/// MangleName - Append a type or width suffix to a base neon function name,
+/// and insert a 'q' in the appropriate location if the operation works on
+/// 128b rather than 64b. E.g. turn "vst2_lane" into "vst2q_lane_f32", etc.
+static std::string MangleName(const std::string &name, StringRef typestr,
+ ClassKind ck) {
+ if (name == "vcvt_f32_f16")
+ return name;
+
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ std::string s = name;
+
+ switch (type) {
+ case 'c':
+ switch (ck) {
+ case ClassS: s += poly ? "_p8" : usgn ? "_u8" : "_s8"; break;
+ case ClassI: s += "_i8"; break;
+ case ClassW: s += "_8"; break;
+ default: break;
+ }
+ break;
+ case 's':
+ switch (ck) {
+ case ClassS: s += poly ? "_p16" : usgn ? "_u16" : "_s16"; break;
+ case ClassI: s += "_i16"; break;
+ case ClassW: s += "_16"; break;
+ default: break;
+ }
+ break;
+ case 'i':
+ switch (ck) {
+ case ClassS: s += usgn ? "_u32" : "_s32"; break;
+ case ClassI: s += "_i32"; break;
+ case ClassW: s += "_32"; break;
+ default: break;
+ }
+ break;
+ case 'l':
+ switch (ck) {
+ case ClassS: s += usgn ? "_u64" : "_s64"; break;
+ case ClassI: s += "_i64"; break;
+ case ClassW: s += "_64"; break;
+ default: break;
+ }
+ break;
+ case 'h':
+ switch (ck) {
+ case ClassS:
+ case ClassI: s += "_f16"; break;
+ case ClassW: s += "_16"; break;
+ default: break;
+ }
+ break;
+ case 'f':
+ switch (ck) {
+ case ClassS:
+ case ClassI: s += "_f32"; break;
+ case ClassW: s += "_32"; break;
+ default: break;
+ }
+ break;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+ if (ck == ClassB)
+ s += "_v";
+
+ // Insert a 'q' before the first '_' character so that it ends up before
+ // _lane or _n on vector-scalar operations.
+ if (quad) {
+ size_t pos = s.find('_');
+ s = s.insert(pos, "q");
+ }
+ return s;
+}
+
+// Generate the string "(argtype a, argtype b, ...)"
+static std::string GenArgs(const std::string &proto, StringRef typestr) {
+ bool define = proto.find('i') != std::string::npos;
+ char arg = 'a';
+
+ std::string s;
+ s += "(";
+
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ if (!define) {
+ s += TypeString(proto[i], typestr);
+ s.push_back(' ');
+ }
+ s.push_back(arg);
+ if ((i + 1) < e)
+ s += ", ";
+ }
+
+ s += ")";
+ return s;
+}
+
+static std::string Duplicate(unsigned nElts, StringRef typestr,
+ const std::string &a) {
+ std::string s;
+
+ s = "(__neon_" + TypeString('d', typestr) + "){ ";
+ for (unsigned i = 0; i != nElts; ++i) {
+ s += a;
+ if ((i + 1) < nElts)
+ s += ", ";
+ }
+ s += " }";
+
+ return s;
+}
+
+// Generate the definition for this intrinsic, e.g. "a + b" for OpAdd.
+// If structTypes is true, the NEON types are structs of vector types rather
+// than vector types, and the call becomes "a.val + b.val"
+static std::string GenOpString(OpKind op, const std::string &proto,
+ StringRef typestr, bool structTypes = true) {
+ bool dummy, quad = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+ unsigned nElts = 0;
+ switch (type) {
+ case 'c': nElts = 8; break;
+ case 's': nElts = 4; break;
+ case 'i': nElts = 2; break;
+ case 'l': nElts = 1; break;
+ case 'h': nElts = 4; break;
+ case 'f': nElts = 2; break;
+ }
+
+ std::string ts = TypeString(proto[0], typestr);
+ std::string s = ts + " r; r";
+
+ if (structTypes)
+ s += ".val";
+
+ s += " = ";
+
+ std::string a, b, c;
+ if (proto.size() > 1)
+ a = (structTypes && proto[1] != 'l' && proto[1] != 's') ? "a.val" : "a";
+ b = structTypes ? "b.val" : "b";
+ c = structTypes ? "c.val" : "c";
+
+ switch(op) {
+ case OpAdd:
+ s += a + " + " + b;
+ break;
+ case OpSub:
+ s += a + " - " + b;
+ break;
+ case OpMulN:
+ b = Duplicate(nElts << (int)quad, typestr, "b");
+ case OpMul:
+ s += a + " * " + b;
+ break;
+ case OpMlaN:
+ c = Duplicate(nElts << (int)quad, typestr, "c");
+ case OpMla:
+ s += a + " + ( " + b + " * " + c + " )";
+ break;
+ case OpMlsN:
+ c = Duplicate(nElts << (int)quad, typestr, "c");
+ case OpMls:
+ s += a + " - ( " + b + " * " + c + " )";
+ break;
+ case OpEq:
+ s += "(__neon_" + ts + ")(" + a + " == " + b + ")";
+ break;
+ case OpGe:
+ s += "(__neon_" + ts + ")(" + a + " >= " + b + ")";
+ break;
+ case OpLe:
+ s += "(__neon_" + ts + ")(" + a + " <= " + b + ")";
+ break;
+ case OpGt:
+ s += "(__neon_" + ts + ")(" + a + " > " + b + ")";
+ break;
+ case OpLt:
+ s += "(__neon_" + ts + ")(" + a + " < " + b + ")";
+ break;
+ case OpNeg:
+ s += " -" + a;
+ break;
+ case OpNot:
+ s += " ~" + a;
+ break;
+ case OpAnd:
+ s += a + " & " + b;
+ break;
+ case OpOr:
+ s += a + " | " + b;
+ break;
+ case OpXor:
+ s += a + " ^ " + b;
+ break;
+ case OpAndNot:
+ s += a + " & ~" + b;
+ break;
+ case OpOrNot:
+ s += a + " | ~" + b;
+ break;
+ case OpCast:
+ s += "(__neon_" + ts + ")" + a;
+ break;
+ case OpConcat:
+ s += "__builtin_shufflevector((__neon_int64x1_t)" + a;
+ s += ", (__neon_int64x1_t)" + b + ", 0, 1)";
+ break;
+ case OpHi:
+ s += "(__neon_int64x1_t)(((__neon_int64x2_t)" + a + ")[1])";
+ break;
+ case OpLo:
+ s += "(__neon_int64x1_t)(((__neon_int64x2_t)" + a + ")[0])";
+ break;
+ case OpDup:
+ s += Duplicate(nElts << (int)quad, typestr, a);
+ break;
+ case OpSelect:
+ // ((0 & 1) | (~0 & 2))
+ ts = TypeString(proto[1], typestr);
+ s += "( " + a + " & (__neon_" + ts + ")" + b + ") | ";
+ s += "(~" + a + " & (__neon_" + ts + ")" + c + ")";
+ break;
+ case OpRev16:
+ s += "__builtin_shufflevector(" + a + ", " + a;
+ for (unsigned i = 2; i <= nElts << (int)quad; i += 2)
+ for (unsigned j = 0; j != 2; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ")";
+ break;
+ case OpRev32:
+ nElts >>= 1;
+ s += "__builtin_shufflevector(" + a + ", " + a;
+ for (unsigned i = nElts; i <= nElts << (1 + (int)quad); i += nElts)
+ for (unsigned j = 0; j != nElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ")";
+ break;
+ case OpRev64:
+ s += "__builtin_shufflevector(" + a + ", " + a;
+ for (unsigned i = nElts; i <= nElts << (int)quad; i += nElts)
+ for (unsigned j = 0; j != nElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ")";
+ break;
+ default:
+ throw "unknown OpKind!";
+ break;
+ }
+ s += "; return r;";
+ return s;
+}
+
+static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
+ unsigned mod = proto[0];
+ unsigned ret = 0;
+
+ if (mod == 'v' || mod == 'f')
+ mod = proto[1];
+
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ // Base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ if (usgn)
+ ret |= 0x08;
+ if (quad && proto[1] != 'g')
+ ret |= 0x10;
+
+ switch (type) {
+ case 'c':
+ ret |= poly ? 5 : 0;
+ break;
+ case 's':
+ ret |= poly ? 6 : 1;
+ break;
+ case 'i':
+ ret |= 2;
+ break;
+ case 'l':
+ ret |= 3;
+ break;
+ case 'h':
+ ret |= 7;
+ break;
+ case 'f':
+ ret |= 4;
+ break;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+ return ret;
+}
+
+// Generate the definition for this intrinsic, e.g. __builtin_neon_cls(a)
+// If structTypes is true, the NEON types are structs of vector types rather
+// than vector types, and the call becomes __builtin_neon_cls(a.val)
+static std::string GenBuiltin(const std::string &name, const std::string &proto,
+ StringRef typestr, ClassKind ck,
+ bool structTypes = true) {
+ bool dummy, quad = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+ unsigned nElts = 0;
+ switch (type) {
+ case 'c': nElts = 8; break;
+ case 's': nElts = 4; break;
+ case 'i': nElts = 2; break;
+ case 'l': nElts = 1; break;
+ case 'h': nElts = 4; break;
+ case 'f': nElts = 2; break;
+ }
+ if (quad) nElts <<= 1;
+
+ char arg = 'a';
+ std::string s;
+
+ // If this builtin returns a struct 2, 3, or 4 vectors, pass it as an implicit
+ // sret-like argument.
+ bool sret = (proto[0] == '2' || proto[0] == '3' || proto[0] == '4');
+
+ // If this builtin takes an immediate argument, we need to #define it rather
+ // than use a standard declaration, so that SemaChecking can range check
+ // the immediate passed by the user.
+ bool define = proto.find('i') != std::string::npos;
+
+ // If all types are the same size, bitcasting the args will take care
+ // of arg checking. The actual signedness etc. will be taken care of with
+ // special enums.
+ if (proto.find('s') == std::string::npos)
+ ck = ClassB;
+
+ if (proto[0] != 'v') {
+ std::string ts = TypeString(proto[0], typestr);
+
+ if (define) {
+ if (sret)
+ s += "({ " + ts + " r; ";
+ else if (proto[0] != 's')
+ s += "(" + ts + "){(__neon_" + ts + ")";
+ } else if (sret) {
+ s += ts + " r; ";
+ } else {
+ s += ts + " r; r";
+ if (structTypes && proto[0] != 's' && proto[0] != 'i' && proto[0] != 'l')
+ s += ".val";
+
+ s += " = ";
+ }
+ }
+
+ bool splat = proto.find('a') != std::string::npos;
+
+ s += "__builtin_neon_";
+ if (splat) {
+ std::string vname(name, 0, name.size()-2);
+ s += MangleName(vname, typestr, ck);
+ } else {
+ s += MangleName(name, typestr, ck);
+ }
+ s += "(";
+
+ // Pass the address of the return variable as the first argument to sret-like
+ // builtins.
+ if (sret)
+ s += "&r, ";
+
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ std::string args = std::string(&arg, 1);
+ if (define)
+ args = "(" + args + ")";
+
+ // Handle multiple-vector values specially, emitting each subvector as an
+ // argument to the __builtin.
+ if (structTypes && (proto[i] == '2' || proto[i] == '3' || proto[i] == '4')){
+ for (unsigned vi = 0, ve = proto[i] - '0'; vi != ve; ++vi) {
+ s += args + ".val[" + utostr(vi) + "].val";
+ if ((vi + 1) < ve)
+ s += ", ";
+ }
+ if ((i + 1) < e)
+ s += ", ";
+
+ continue;
+ }
+
+ if (splat && (i + 1) == e)
+ s += Duplicate(nElts, typestr, args);
+ else
+ s += args;
+
+ if (structTypes && proto[i] != 's' && proto[i] != 'i' && proto[i] != 'l' &&
+ proto[i] != 'p' && proto[i] != 'c' && proto[i] != 'a') {
+ s += ".val";
+ }
+ if ((i + 1) < e)
+ s += ", ";
+ }
+
+ // Extra constant integer to hold type class enum for this function, e.g. s8
+ if (ck == ClassB)
+ s += ", " + utostr(GetNeonEnum(proto, typestr));
+
+ if (define)
+ s += ")";
+ else
+ s += ");";
+
+ if (proto[0] != 'v') {
+ if (define) {
+ if (sret)
+ s += "; r; })";
+ else if (proto[0] != 's')
+ s += "}";
+ } else {
+ s += " return r;";
+ }
+ }
+ return s;
+}
+
+static std::string GenBuiltinDef(const std::string &name,
+ const std::string &proto,
+ StringRef typestr, ClassKind ck) {
+ std::string s("BUILTIN(__builtin_neon_");
+
+ // If all types are the same size, bitcasting the args will take care
+ // of arg checking. The actual signedness etc. will be taken care of with
+ // special enums.
+ if (proto.find('s') == std::string::npos)
+ ck = ClassB;
+
+ s += MangleName(name, typestr, ck);
+ s += ", \"";
+
+ for (unsigned i = 0, e = proto.size(); i != e; ++i)
+ s += BuiltinTypeString(proto[i], typestr, ck, i == 0);
+
+ // Extra constant integer to hold type class enum for this function, e.g. s8
+ if (ck == ClassB)
+ s += "i";
+
+ s += "\", \"n\")";
+ return s;
+}
+
+/// run - Read the records in arm_neon.td and output arm_neon.h. arm_neon.h
+/// is comprised of type definitions and function declarations.
+void NeonEmitter::run(raw_ostream &OS) {
+ EmitSourceFileHeader("ARM NEON Header", OS);
+
+ // FIXME: emit license into file?
+
+ OS << "#ifndef __ARM_NEON_H\n";
+ OS << "#define __ARM_NEON_H\n\n";
+
+ OS << "#ifndef __ARM_NEON__\n";
+ OS << "#error \"NEON support not enabled\"\n";
+ OS << "#endif\n\n";
+
+ OS << "#include <stdint.h>\n\n";
+
+ // Emit NEON-specific scalar typedefs.
+ OS << "typedef float float32_t;\n";
+ OS << "typedef uint8_t poly8_t;\n";
+ OS << "typedef uint16_t poly16_t;\n";
+ OS << "typedef uint16_t float16_t;\n";
+
+ // Emit Neon vector typedefs.
+ std::string TypedefTypes("cQcsQsiQilQlUcQUcUsQUsUiQUiUlQUlhQhfQfPcQPcPsQPs");
+ SmallVector<StringRef, 24> TDTypeVec;
+ ParseTypes(0, TypedefTypes, TDTypeVec);
+
+ // Emit vector typedefs.
+ for (unsigned v = 1; v != 5; ++v) {
+ for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
+ bool dummy, quad = false;
+ (void) ClassifyType(TDTypeVec[i], quad, dummy, dummy);
+ OS << "typedef __attribute__(( __vector_size__(";
+
+ OS << utostr(8*v*(quad ? 2 : 1)) << ") )) ";
+ if (!quad)
+ OS << " ";
+
+ OS << TypeString('s', TDTypeVec[i]);
+ OS << " __neon_";
+
+ char t = (v == 1) ? 'd' : '0' + v;
+ OS << TypeString(t, TDTypeVec[i]) << ";\n";
+ }
+ }
+ OS << "\n";
+
+ // Emit struct typedefs.
+ for (unsigned vi = 1; vi != 5; ++vi) {
+ for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
+ std::string ts = TypeString('d', TDTypeVec[i], vi == 1);
+ std::string vs = TypeString((vi > 1) ? '0' + vi : 'd', TDTypeVec[i]);
+ std::string tag = (vi > 1) ? vs : StructTag(TDTypeVec[i]);
+ OS << "typedef struct " << tag << " {\n";
+ OS << " " << ts << " val";
+ if (vi > 1)
+ OS << "[" << utostr(vi) << "]";
+ OS << ";\n} " << vs << ";\n\n";
+ }
+ }
+
+ OS << "#define __ai static __attribute__((__always_inline__))\n\n";
+
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+
+ // Unique the return+pattern types, and assign them.
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ std::string name = LowercaseString(R->getName());
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+
+ bool define = Proto.find('i') != std::string::npos;
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ assert(!Proto.empty() && "");
+
+ // static always inline + return type
+ if (define)
+ OS << "#define";
+ else
+ OS << "__ai " << TypeString(Proto[0], TypeVec[ti]);
+
+ // Function name with type suffix
+ OS << " " << MangleName(name, TypeVec[ti], ClassS);
+
+ // Function arguments
+ OS << GenArgs(Proto, TypeVec[ti]);
+
+ // Definition.
+ if (define)
+ OS << " ";
+ else
+ OS << " { ";
+
+ if (k != OpNone) {
+ OS << GenOpString(k, Proto, TypeVec[ti]);
+ } else {
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ if (ck == ClassNone)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+ OS << GenBuiltin(name, Proto, TypeVec[ti], ck);
+ }
+ if (!define)
+ OS << " }";
+ OS << "\n";
+ }
+ OS << "\n";
+ }
+ OS << "#undef __ai\n\n";
+ OS << "#endif /* __ARM_NEON_H */\n";
+}
+
+static unsigned RangeFromType(StringRef typestr) {
+ // base type to get the type string for.
+ bool quad = false, dummy = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+
+ switch (type) {
+ case 'c':
+ return (8 << (int)quad) - 1;
+ case 'h':
+ case 's':
+ return (4 << (int)quad) - 1;
+ case 'f':
+ case 'i':
+ return (2 << (int)quad) - 1;
+ case 'l':
+ return (1 << (int)quad) - 1;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+ assert(0 && "unreachable");
+ return 0;
+}
+
+/// runHeader - Emit a file with sections defining:
+/// 1. the NEON section of BuiltinsARM.def.
+/// 2. the SemaChecking code for the type overload checking.
+/// 3. the SemaChecking code for validation of intrinsic immedate arguments.
+void NeonEmitter::runHeader(raw_ostream &OS) {
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+
+ StringMap<OpKind> EmittedMap;
+
+ // Generate BuiltinsARM.def for NEON
+ OS << "#ifdef GET_NEON_BUILTINS\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string Proto = R->getValueAsString("Prototype");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ std::string Types = R->getValueAsString("Types");
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ std::string name = LowercaseString(R->getName());
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the BuiltinsARM.def declaration for this builtin, ensuring
+ // that each unique BUILTIN() macro appears only once in the output
+ // stream.
+ std::string bd = GenBuiltinDef(name, Proto, TypeVec[ti], ck);
+ if (EmittedMap.count(bd))
+ continue;
+
+ EmittedMap[bd] = OpNone;
+ OS << bd << "\n";
+ }
+ }
+ OS << "#endif\n\n";
+
+ // Generate the overloaded type checking code for SemaChecking.cpp
+ OS << "#ifdef GET_NEON_OVERLOAD_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+ std::string name = LowercaseString(R->getName());
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which have a scalar argument cannot be overloaded, no need to
+ // check them if we are emitting the type checking code.
+ if (Proto.find('s') != std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ int si = -1, qi = -1;
+ unsigned mask = 0, qmask = 0;
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the switch case(s) for this builtin for the type validation.
+ bool quad = false, poly = false, usgn = false;
+ (void) ClassifyType(TypeVec[ti], quad, poly, usgn);
+
+ if (quad) {
+ qi = ti;
+ qmask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ } else {
+ si = ti;
+ mask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ }
+ }
+ if (mask)
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[si], ClassB)
+ << ": mask = " << "0x" << utohexstr(mask) << "; break;\n";
+ if (qmask)
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[qi], ClassB)
+ << ": mask = " << "0x" << utohexstr(qmask) << "; break;\n";
+ }
+ OS << "#endif\n\n";
+
+ // Generate the intrinsic range checking code for shift/lane immediates.
+ OS << "#ifdef GET_NEON_IMMEDIATE_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string name = LowercaseString(R->getName());
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which do not have an immediate do not need to have range
+ // checking code emitted.
+ if (Proto.find('i') == std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ std::string namestr, shiftstr, rangestr;
+
+ // Builtins which are overloaded by type will need to have their upper
+ // bound computed at Sema time based on the type constant.
+ if (Proto.find('s') == std::string::npos) {
+ ck = ClassB;
+ if (R->getValueAsBit("isShift")) {
+ shiftstr = ", true";
+
+ // Right shifts have an 'r' in the name, left shifts do not.
+ if (name.find('r') != std::string::npos)
+ rangestr = "l = 1; ";
+ }
+ rangestr += "u = RFT(TV" + shiftstr + ")";
+ } else {
+ rangestr = "u = " + utostr(RangeFromType(TypeVec[ti]));
+ }
+ // Make sure cases appear only once by uniquing them in a string map.
+ namestr = MangleName(name, TypeVec[ti], ck);
+ if (EmittedMap.count(namestr))
+ continue;
+ EmittedMap[namestr] = OpNone;
+
+ // Calculate the index of the immediate that should be range checked.
+ unsigned immidx = 0;
+
+ // Builtins that return a struct of multiple vectors have an extra
+ // leading arg for the struct return.
+ if (Proto[0] == '2' || Proto[0] == '3' || Proto[0] == '4')
+ ++immidx;
+
+ // Add one to the index for each argument until we reach the immediate
+ // to be checked. Structs of vectors are passed as multiple arguments.
+ for (unsigned ii = 1, ie = Proto.size(); ii != ie; ++ii) {
+ switch (Proto[ii]) {
+ default: immidx += 1; break;
+ case '2': immidx += 2; break;
+ case '3': immidx += 3; break;
+ case '4': immidx += 4; break;
+ case 'i': ie = ii + 1; break;
+ }
+ }
+ OS << "case ARM::BI__builtin_neon_" << MangleName(name, TypeVec[ti], ck)
+ << ": i = " << immidx << "; " << rangestr << "; break;\n";
+ }
+ }
+ OS << "#endif\n\n";
+}
diff --git a/libclamav/c++/llvm/utils/TableGen/NeonEmitter.h b/libclamav/c++/llvm/utils/TableGen/NeonEmitter.h
new file mode 100644
index 0000000..6c6760d
--- /dev/null
+++ b/libclamav/c++/llvm/utils/TableGen/NeonEmitter.h
@@ -0,0 +1,122 @@
+//===- NeonEmitter.h - Generate arm_neon.h for use with clang ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting arm_neon.h, which includes
+// a declaration and definition of each function specified by the ARM NEON
+// compiler interface. See ARM document DUI0348B.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NEON_EMITTER_H
+#define NEON_EMITTER_H
+
+#include "Record.h"
+#include "TableGenBackend.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+
+enum OpKind {
+ OpNone,
+ OpAdd,
+ OpSub,
+ OpMul,
+ OpMla,
+ OpMls,
+ OpMulN,
+ OpMlaN,
+ OpMlsN,
+ OpEq,
+ OpGe,
+ OpLe,
+ OpGt,
+ OpLt,
+ OpNeg,
+ OpNot,
+ OpAnd,
+ OpOr,
+ OpXor,
+ OpAndNot,
+ OpOrNot,
+ OpCast,
+ OpConcat,
+ OpDup,
+ OpHi,
+ OpLo,
+ OpSelect,
+ OpRev16,
+ OpRev32,
+ OpRev64
+};
+
+enum ClassKind {
+ ClassNone,
+ ClassI,
+ ClassS,
+ ClassW,
+ ClassB
+};
+
+namespace llvm {
+
+ class NeonEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+ StringMap<OpKind> OpMap;
+ DenseMap<Record*, ClassKind> ClassMap;
+
+ public:
+ NeonEmitter(RecordKeeper &R) : Records(R) {
+ OpMap["OP_NONE"] = OpNone;
+ OpMap["OP_ADD"] = OpAdd;
+ OpMap["OP_SUB"] = OpSub;
+ OpMap["OP_MUL"] = OpMul;
+ OpMap["OP_MLA"] = OpMla;
+ OpMap["OP_MLS"] = OpMls;
+ OpMap["OP_MUL_N"] = OpMulN;
+ OpMap["OP_MLA_N"] = OpMlaN;
+ OpMap["OP_MLS_N"] = OpMlsN;
+ OpMap["OP_EQ"] = OpEq;
+ OpMap["OP_GE"] = OpGe;
+ OpMap["OP_LE"] = OpLe;
+ OpMap["OP_GT"] = OpGt;
+ OpMap["OP_LT"] = OpLt;
+ OpMap["OP_NEG"] = OpNeg;
+ OpMap["OP_NOT"] = OpNot;
+ OpMap["OP_AND"] = OpAnd;
+ OpMap["OP_OR"] = OpOr;
+ OpMap["OP_XOR"] = OpXor;
+ OpMap["OP_ANDN"] = OpAndNot;
+ OpMap["OP_ORN"] = OpOrNot;
+ OpMap["OP_CAST"] = OpCast;
+ OpMap["OP_CONC"] = OpConcat;
+ OpMap["OP_HI"] = OpHi;
+ OpMap["OP_LO"] = OpLo;
+ OpMap["OP_DUP"] = OpDup;
+ OpMap["OP_SEL"] = OpSelect;
+ OpMap["OP_REV16"] = OpRev16;
+ OpMap["OP_REV32"] = OpRev32;
+ OpMap["OP_REV64"] = OpRev64;
+
+ Record *SI = R.getClass("SInst");
+ Record *II = R.getClass("IInst");
+ Record *WI = R.getClass("WInst");
+ ClassMap[SI] = ClassS;
+ ClassMap[II] = ClassI;
+ ClassMap[WI] = ClassW;
+ }
+
+ // run - Emit arm_neon.h.inc
+ void run(raw_ostream &o);
+
+ // runHeader - Emit all the __builtin prototypes used in arm_neon.h
+ void runHeader(raw_ostream &o);
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/libclamav/c++/llvm/utils/TableGen/Record.cpp b/libclamav/c++/llvm/utils/TableGen/Record.cpp
index f9e2fe8..dc79358 100644
--- a/libclamav/c++/llvm/utils/TableGen/Record.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/Record.cpp
@@ -270,7 +270,15 @@ Init *RecordRecTy::convertValue(TypedInit *TI) {
}
bool RecordRecTy::baseClassOf(const RecordRecTy *RHS) const {
- return Rec == RHS->getRecord() || RHS->getRecord()->isSubClassOf(Rec);
+ if (Rec == RHS->getRecord() || RHS->getRecord()->isSubClassOf(Rec))
+ return true;
+
+ const std::vector<Record*> &SC = Rec->getSuperClasses();
+ for (unsigned i = 0, e = SC.size(); i != e; ++i)
+ if (RHS->getRecord()->isSubClassOf(SC[i]))
+ return true;
+
+ return false;
}
@@ -620,23 +628,6 @@ std::string UnOpInit::getAsString() const {
return Result + "(" + LHS->getAsString() + ")";
}
-RecTy *UnOpInit::getFieldType(const std::string &FieldName) const {
- switch (getOpcode()) {
- default: assert(0 && "Unknown unop");
- case CAST: {
- RecordRecTy *RecordType = dynamic_cast<RecordRecTy *>(getType());
- if (RecordType) {
- RecordVal *Field = RecordType->getRecord()->getValue(FieldName);
- if (Field) {
- return Field->getType();
- }
- }
- break;
- }
- }
- return 0;
-}
-
Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
switch (getOpcode()) {
default: assert(0 && "Unknown binop");
@@ -646,18 +637,8 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
if (LHSs && RHSs) {
DefInit *LOp = dynamic_cast<DefInit*>(LHSs->getOperator());
DefInit *ROp = dynamic_cast<DefInit*>(RHSs->getOperator());
- if (LOp->getDef() != ROp->getDef()) {
- bool LIsOps =
- LOp->getDef()->getName() == "outs" ||
- LOp->getDef()->getName() != "ins" ||
- LOp->getDef()->getName() != "defs";
- bool RIsOps =
- ROp->getDef()->getName() == "outs" ||
- ROp->getDef()->getName() != "ins" ||
- ROp->getDef()->getName() != "defs";
- if (!LIsOps || !RIsOps)
- throw "Concated Dag operators do not match!";
- }
+ if (LOp == 0 || ROp == 0 || LOp->getDef() != ROp->getDef())
+ throw "Concated Dag operators do not match!";
std::vector<Init*> Args;
std::vector<std::string> ArgNames;
for (unsigned i = 0, e = LHSs->getNumArgs(); i != e; ++i) {
@@ -731,9 +712,20 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
break;
}
case EQ: {
- // Make sure we've resolved
+ // try to fold eq comparison for 'bit' and 'int', otherwise fallback
+ // to string objects.
+ IntInit* L =
+ dynamic_cast<IntInit*>(LHS->convertInitializerTo(new IntRecTy()));
+ IntInit* R =
+ dynamic_cast<IntInit*>(RHS->convertInitializerTo(new IntRecTy()));
+
+ if (L && R)
+ return new IntInit(L->getValue() == R->getValue());
+
StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
StringInit *RHSs = dynamic_cast<StringInit*>(RHS);
+
+ // Make sure we've resolved
if (LHSs && RHSs)
return new IntInit(LHSs->getValue() == RHSs->getValue());
@@ -981,6 +973,8 @@ Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
case IF: {
IntInit *LHSi = dynamic_cast<IntInit*>(LHS);
+ if (Init *I = LHS->convertInitializerTo(new IntRecTy()))
+ LHSi = dynamic_cast<IntInit*>(I);
if (LHSi) {
if (LHSi->getValue()) {
return MHS;
@@ -1000,6 +994,8 @@ Init *TernOpInit::resolveReferences(Record &R, const RecordVal *RV) {
if (Opc == IF && lhs != LHS) {
IntInit *Value = dynamic_cast<IntInit*>(lhs);
+ if (Init *I = lhs->convertInitializerTo(new IntRecTy()))
+ Value = dynamic_cast<IntInit*>(I);
if (Value != 0) {
// Short-circuit
if (Value->getValue()) {
@@ -1033,6 +1029,17 @@ std::string TernOpInit::getAsString() const {
+ RHS->getAsString() + ")";
}
+RecTy *TypedInit::getFieldType(const std::string &FieldName) const {
+ RecordRecTy *RecordType = dynamic_cast<RecordRecTy *>(getType());
+ if (RecordType) {
+ RecordVal *Field = RecordType->getRecord()->getValue(FieldName);
+ if (Field) {
+ return Field->getType();
+ }
+ }
+ return 0;
+}
+
Init *TypedInit::convertInitializerBitRange(const std::vector<unsigned> &Bits) {
BitsRecTy *T = dynamic_cast<BitsRecTy*>(getType());
if (T == 0) return 0; // Cannot subscript a non-bits variable...
@@ -1118,12 +1125,15 @@ RecTy *VarInit::getFieldType(const std::string &FieldName) const {
return 0;
}
-Init *VarInit::getFieldInit(Record &R, const std::string &FieldName) const {
+Init *VarInit::getFieldInit(Record &R, const RecordVal *RV,
+ const std::string &FieldName) const {
if (dynamic_cast<RecordRecTy*>(getType()))
- if (const RecordVal *RV = R.getValue(VarName)) {
- Init *TheInit = RV->getValue();
+ if (const RecordVal *Val = R.getValue(VarName)) {
+ if (RV != Val && (RV || dynamic_cast<UnsetInit*>(Val->getValue())))
+ return 0;
+ Init *TheInit = Val->getValue();
assert(TheInit != this && "Infinite loop detected!");
- if (Init *I = TheInit->getFieldInit(R, FieldName))
+ if (Init *I = TheInit->getFieldInit(R, RV, FieldName))
return I;
else
return 0;
@@ -1184,7 +1194,8 @@ RecTy *DefInit::getFieldType(const std::string &FieldName) const {
return 0;
}
-Init *DefInit::getFieldInit(Record &R, const std::string &FieldName) const {
+Init *DefInit::getFieldInit(Record &R, const RecordVal *RV,
+ const std::string &FieldName) const {
return Def->getValue(FieldName)->getValue();
}
@@ -1195,7 +1206,7 @@ std::string DefInit::getAsString() const {
Init *FieldInit::resolveBitReference(Record &R, const RecordVal *RV,
unsigned Bit) {
- if (Init *BitsVal = Rec->getFieldInit(R, FieldName))
+ if (Init *BitsVal = Rec->getFieldInit(R, RV, FieldName))
if (BitsInit *BI = dynamic_cast<BitsInit*>(BitsVal)) {
assert(Bit < BI->getNumBits() && "Bit reference out of range!");
Init *B = BI->getBit(Bit);
@@ -1208,7 +1219,7 @@ Init *FieldInit::resolveBitReference(Record &R, const RecordVal *RV,
Init *FieldInit::resolveListElementReference(Record &R, const RecordVal *RV,
unsigned Elt) {
- if (Init *ListVal = Rec->getFieldInit(R, FieldName))
+ if (Init *ListVal = Rec->getFieldInit(R, RV, FieldName))
if (ListInit *LI = dynamic_cast<ListInit*>(ListVal)) {
if (Elt >= LI->getSize()) return 0;
Init *E = LI->getElement(Elt);
@@ -1225,7 +1236,7 @@ Init *FieldInit::resolveListElementReference(Record &R, const RecordVal *RV,
Init *FieldInit::resolveReferences(Record &R, const RecordVal *RV) {
Init *NewRec = RV ? Rec->resolveReferences(R, RV) : Rec;
- Init *BitsVal = NewRec->getFieldInit(R, FieldName);
+ Init *BitsVal = NewRec->getFieldInit(R, RV, FieldName);
if (BitsVal) {
Init *BVR = BitsVal->resolveReferences(R, RV);
return BVR->isComplete() ? BVR : this;
@@ -1245,7 +1256,7 @@ Init *DagInit::resolveReferences(Record &R, const RecordVal *RV) {
Init *Op = Val->resolveReferences(R, RV);
if (Args != NewArgs || Op != Val)
- return new DagInit(Op, "", NewArgs, ArgNames);
+ return new DagInit(Op, ValName, NewArgs, ArgNames);
return this;
}
@@ -1313,7 +1324,6 @@ void Record::resolveReferencesTo(const RecordVal *RV) {
}
}
-
void Record::dump() const { errs() << *this; }
raw_ostream &llvm::operator<<(raw_ostream &OS, const Record &R) {
diff --git a/libclamav/c++/llvm/utils/TableGen/Record.h b/libclamav/c++/llvm/utils/TableGen/Record.h
index 90096e9..d6f37ee 100644
--- a/libclamav/c++/llvm/utils/TableGen/Record.h
+++ b/libclamav/c++/llvm/utils/TableGen/Record.h
@@ -503,7 +503,8 @@ struct Init {
/// initializer for the specified field. If getFieldType returns non-null
/// this method should return non-null, otherwise it returns null.
///
- virtual Init *getFieldInit(Record &R, const std::string &FieldName) const {
+ virtual Init *getFieldInit(Record &R, const RecordVal *RV,
+ const std::string &FieldName) const {
return 0;
}
@@ -534,6 +535,12 @@ public:
virtual Init *convertInitializerBitRange(const std::vector<unsigned> &Bits);
virtual Init *convertInitListSlice(const std::vector<unsigned> &Elements);
+ /// getFieldType - This method is used to implement the FieldInit class.
+ /// Implementors of this method should return the type of the named field if
+ /// they are of record type.
+ ///
+ virtual RecTy *getFieldType(const std::string &FieldName) const;
+
/// resolveBitReference - This method is used to implement
/// VarBitInit::resolveReferences. If the bit is able to be resolved, we
/// simply return the resolved value, otherwise we return null.
@@ -608,6 +615,11 @@ public:
if (!getBit(i)->isComplete()) return false;
return true;
}
+ bool allInComplete() const {
+ for (unsigned i = 0; i != getNumBits(); ++i)
+ if (getBit(i)->isComplete()) return false;
+ return true;
+ }
virtual std::string getAsString() const;
virtual Init *resolveReferences(Record &R, const RecordVal *RV);
@@ -829,12 +841,6 @@ public:
virtual Init *resolveReferences(Record &R, const RecordVal *RV);
- /// getFieldType - This method is used to implement the FieldInit class.
- /// Implementors of this method should return the type of the named field if
- /// they are of record type.
- ///
- virtual RecTy *getFieldType(const std::string &FieldName) const;
-
virtual std::string getAsString() const;
};
@@ -950,7 +956,8 @@ public:
unsigned Elt);
virtual RecTy *getFieldType(const std::string &FieldName) const;
- virtual Init *getFieldInit(Record &R, const std::string &FieldName) const;
+ virtual Init *getFieldInit(Record &R, const RecordVal *RV,
+ const std::string &FieldName) const;
/// resolveReferences - This method is used by classes that refer to other
/// variables which may not be defined at the time they expression is formed.
@@ -1035,7 +1042,8 @@ public:
//virtual Init *convertInitializerBitRange(const std::vector<unsigned> &Bits);
virtual RecTy *getFieldType(const std::string &FieldName) const;
- virtual Init *getFieldInit(Record &R, const std::string &FieldName) const;
+ virtual Init *getFieldInit(Record &R, const RecordVal *RV,
+ const std::string &FieldName) const;
virtual std::string getAsString() const;
@@ -1453,7 +1461,7 @@ public:
///
struct LessRecord {
bool operator()(const Record *Rec1, const Record *Rec2) const {
- return Rec1->getName() < Rec2->getName();
+ return StringRef(Rec1->getName()).compare_numeric(Rec2->getName()) < 0;
}
};
diff --git a/libclamav/c++/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index fcf4123..6f06705 100644
--- a/libclamav/c++/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -35,14 +35,29 @@ void RegisterInfoEmitter::runEnums(raw_ostream &OS) {
if (!Namespace.empty())
OS << "namespace " << Namespace << " {\n";
- OS << " enum {\n NoRegister,\n";
+ OS << "enum {\n NoRegister,\n";
for (unsigned i = 0, e = Registers.size(); i != e; ++i)
- OS << " " << Registers[i].getName() << ", \t// " << i+1 << "\n";
- OS << " NUM_TARGET_REGS \t// " << Registers.size()+1 << "\n";
- OS << " };\n";
+ OS << " " << Registers[i].getName() << ", \t// " << i+1 << "\n";
+ OS << " NUM_TARGET_REGS \t// " << Registers.size()+1 << "\n";
+ OS << "};\n";
if (!Namespace.empty())
OS << "}\n";
+
+ const std::vector<Record*> SubRegIndices = Target.getSubRegIndices();
+ if (!SubRegIndices.empty()) {
+ OS << "\n// Subregister indices\n";
+ Namespace = SubRegIndices[0]->getValueAsString("Namespace");
+ if (!Namespace.empty())
+ OS << "namespace " << Namespace << " {\n";
+ OS << "enum {\n NoSubRegister,\n";
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i)
+ OS << " " << SubRegIndices[i]->getName() << ",\t// " << i+1 << "\n";
+ OS << " NUM_TARGET_SUBREGS = " << SubRegIndices.size()+1 << "\n";
+ OS << "};\n";
+ if (!Namespace.empty())
+ OS << "}\n";
+ }
OS << "} // End llvm namespace \n";
}
@@ -67,6 +82,7 @@ void RegisterInfoEmitter::runHeader(raw_ostream &OS) {
<< " { return false; }\n"
<< " unsigned getSubReg(unsigned RegNo, unsigned Index) const;\n"
<< " unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const;\n"
+ << " unsigned composeSubRegIndices(unsigned, unsigned) const;\n"
<< "};\n\n";
const std::vector<CodeGenRegisterClass> &RegisterClasses =
@@ -80,7 +96,7 @@ void RegisterInfoEmitter::runHeader(raw_ostream &OS) {
for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
if (i) OS << ",\n";
OS << " " << RegisterClasses[i].getName() << "RegClassID";
- OS << " = " << (i+1);
+ OS << " = " << i;
}
OS << "\n };\n\n";
@@ -103,16 +119,6 @@ void RegisterInfoEmitter::runHeader(raw_ostream &OS) {
OS << "} // End llvm namespace \n";
}
-bool isSubRegisterClass(const CodeGenRegisterClass &RC,
- std::set<Record*> &RegSet) {
- for (unsigned i = 0, e = RC.Elements.size(); i != e; ++i) {
- Record *Reg = RC.Elements[i];
- if (!RegSet.count(Reg))
- return false;
- }
- return true;
-}
-
static void addSuperReg(Record *R, Record *S,
std::map<Record*, std::set<Record*>, LessRecord> &SubRegs,
std::map<Record*, std::set<Record*>, LessRecord> &SuperRegs,
@@ -156,6 +162,160 @@ static void addSubSuperReg(Record *R, Record *S,
addSubSuperReg(R, *I, SubRegs, SuperRegs, Aliases);
}
+struct RegisterMaps {
+ // Map SubRegIndex -> Register
+ typedef std::map<Record*, Record*, LessRecord> SubRegMap;
+ // Map Register -> SubRegMap
+ typedef std::map<Record*, SubRegMap> SubRegMaps;
+
+ SubRegMaps SubReg;
+ SubRegMap &inferSubRegIndices(Record *Reg);
+
+ // Composite SubRegIndex instances.
+ // Map (SubRegIndex,SubRegIndex) -> SubRegIndex
+ typedef DenseMap<std::pair<Record*,Record*>,Record*> CompositeMap;
+ CompositeMap Composite;
+
+ // Compute SubRegIndex compositions after inferSubRegIndices has run on all
+ // registers.
+ void computeComposites();
+};
+
+// Calculate all subregindices for Reg. Loopy subregs cause infinite recursion.
+RegisterMaps::SubRegMap &RegisterMaps::inferSubRegIndices(Record *Reg) {
+ SubRegMap &SRM = SubReg[Reg];
+ if (!SRM.empty())
+ return SRM;
+ std::vector<Record*> SubRegs = Reg->getValueAsListOfDefs("SubRegs");
+ std::vector<Record*> Indices = Reg->getValueAsListOfDefs("SubRegIndices");
+ if (SubRegs.size() != Indices.size())
+ throw "Register " + Reg->getName() + " SubRegIndices doesn't match SubRegs";
+
+ // First insert the direct subregs and make sure they are fully indexed.
+ for (unsigned i = 0, e = SubRegs.size(); i != e; ++i) {
+ if (!SRM.insert(std::make_pair(Indices[i], SubRegs[i])).second)
+ throw "SubRegIndex " + Indices[i]->getName()
+ + " appears twice in Register " + Reg->getName();
+ inferSubRegIndices(SubRegs[i]);
+ }
+
+ // Keep track of inherited subregs and how they can be reached.
+ // Register -> (SubRegIndex, SubRegIndex)
+ typedef std::map<Record*, std::pair<Record*,Record*>, LessRecord> OrphanMap;
+ OrphanMap Orphans;
+
+ // Clone inherited subregs. Here the order is important - earlier subregs take
+ // precedence.
+ for (unsigned i = 0, e = SubRegs.size(); i != e; ++i) {
+ SubRegMap &M = SubReg[SubRegs[i]];
+ for (SubRegMap::iterator si = M.begin(), se = M.end(); si != se; ++si)
+ if (!SRM.insert(*si).second)
+ Orphans[si->second] = std::make_pair(Indices[i], si->first);
+ }
+
+ // Finally process the composites.
+ ListInit *Comps = Reg->getValueAsListInit("CompositeIndices");
+ for (unsigned i = 0, e = Comps->size(); i != e; ++i) {
+ DagInit *Pat = dynamic_cast<DagInit*>(Comps->getElement(i));
+ if (!Pat)
+ throw "Invalid dag '" + Comps->getElement(i)->getAsString()
+ + "' in CompositeIndices";
+ DefInit *BaseIdxInit = dynamic_cast<DefInit*>(Pat->getOperator());
+ if (!BaseIdxInit || !BaseIdxInit->getDef()->isSubClassOf("SubRegIndex"))
+ throw "Invalid SubClassIndex in " + Pat->getAsString();
+
+ // Resolve list of subreg indices into R2.
+ Record *R2 = Reg;
+ for (DagInit::const_arg_iterator di = Pat->arg_begin(),
+ de = Pat->arg_end(); di != de; ++di) {
+ DefInit *IdxInit = dynamic_cast<DefInit*>(*di);
+ if (!IdxInit || !IdxInit->getDef()->isSubClassOf("SubRegIndex"))
+ throw "Invalid SubClassIndex in " + Pat->getAsString();
+ SubRegMap::const_iterator ni = SubReg[R2].find(IdxInit->getDef());
+ if (ni == SubReg[R2].end())
+ throw "Composite " + Pat->getAsString() + " refers to bad index in "
+ + R2->getName();
+ R2 = ni->second;
+ }
+
+ // Insert composite index. Allow overriding inherited indices etc.
+ SRM[BaseIdxInit->getDef()] = R2;
+
+ // R2 is now directly addressable, no longer an orphan.
+ Orphans.erase(R2);
+ }
+
+ // Now, Orphans contains the inherited subregisters without a direct index.
+ if (!Orphans.empty()) {
+ errs() << "Error: Register " << getQualifiedName(Reg)
+ << " inherited subregisters without an index:\n";
+ for (OrphanMap::iterator i = Orphans.begin(), e = Orphans.end(); i != e;
+ ++i) {
+ errs() << " " << getQualifiedName(i->first)
+ << " = " << i->second.first->getName()
+ << ", " << i->second.second->getName() << "\n";
+ }
+ abort();
+ }
+ return SRM;
+}
+
+void RegisterMaps::computeComposites() {
+ for (SubRegMaps::const_iterator sri = SubReg.begin(), sre = SubReg.end();
+ sri != sre; ++sri) {
+ Record *Reg1 = sri->first;
+ const SubRegMap &SRM1 = sri->second;
+ for (SubRegMap::const_iterator i1 = SRM1.begin(), e1 = SRM1.end();
+ i1 != e1; ++i1) {
+ Record *Idx1 = i1->first;
+ Record *Reg2 = i1->second;
+ // Ignore identity compositions.
+ if (Reg1 == Reg2)
+ continue;
+ // If Reg2 has no subregs, Idx1 doesn't compose.
+ if (!SubReg.count(Reg2))
+ continue;
+ const SubRegMap &SRM2 = SubReg[Reg2];
+ // Try composing Idx1 with another SubRegIndex.
+ for (SubRegMap::const_iterator i2 = SRM2.begin(), e2 = SRM2.end();
+ i2 != e2; ++i2) {
+ std::pair<Record*,Record*> IdxPair(Idx1, i2->first);
+ Record *Reg3 = i2->second;
+ // OK Reg1:IdxPair == Reg3. Find the index with Reg:Idx == Reg3.
+ for (SubRegMap::const_iterator i1d = SRM1.begin(), e1d = SRM1.end();
+ i1d != e1d; ++i1d) {
+ // Ignore identity compositions.
+ if (Reg2 == Reg3)
+ continue;
+ if (i1d->second == Reg3) {
+ std::pair<CompositeMap::iterator,bool> Ins =
+ Composite.insert(std::make_pair(IdxPair, i1d->first));
+ // Conflicting composition?
+ if (!Ins.second && Ins.first->second != i1d->first) {
+ errs() << "Error: SubRegIndex " << getQualifiedName(Idx1)
+ << " and " << getQualifiedName(IdxPair.second)
+ << " compose ambiguously as "
+ << getQualifiedName(Ins.first->second) << " or "
+ << getQualifiedName(i1d->first) << "\n";
+ abort();
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // We don't care about the difference between (Idx1, Idx2) -> Idx2 and invalid
+ // compositions, so remove any mappings of that form.
+ for (CompositeMap::iterator i = Composite.begin(), e = Composite.end();
+ i != e;) {
+ CompositeMap::iterator j = i;
+ ++i;
+ if (j->first.second == j->second)
+ Composite.erase(j);
+ }
+}
+
class RegisterSorter {
private:
std::map<Record*, std::set<Record*>, LessRecord> &RegisterSubRegs;
@@ -243,80 +403,82 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
std::map<unsigned, std::set<unsigned> > SuperRegClassMap;
OS << "\n";
- // Emit the sub-register classes for each RegisterClass
- for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
- const CodeGenRegisterClass &RC = RegisterClasses[rc];
+ unsigned NumSubRegIndices = Target.getSubRegIndices().size();
+
+ if (NumSubRegIndices) {
+ // Emit the sub-register classes for each RegisterClass
+ for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
+ const CodeGenRegisterClass &RC = RegisterClasses[rc];
+ std::vector<Record*> SRC(NumSubRegIndices);
+ for (DenseMap<Record*,Record*>::const_iterator
+ i = RC.SubRegClasses.begin(),
+ e = RC.SubRegClasses.end(); i != e; ++i) {
+ // Build SRC array.
+ unsigned idx = Target.getSubRegIndexNo(i->first);
+ SRC.at(idx-1) = i->second;
+
+ // Find the register class number of i->second for SuperRegClassMap.
+ for (unsigned rc2 = 0, e2 = RegisterClasses.size(); rc2 != e2; ++rc2) {
+ const CodeGenRegisterClass &RC2 = RegisterClasses[rc2];
+ if (RC2.TheDef == i->second) {
+ SuperRegClassMap[rc2].insert(rc);
+ break;
+ }
+ }
+ }
- // Give the register class a legal C name if it's anonymous.
- std::string Name = RC.TheDef->getName();
+ // Give the register class a legal C name if it's anonymous.
+ std::string Name = RC.TheDef->getName();
- OS << " // " << Name
- << " Sub-register Classes...\n"
- << " static const TargetRegisterClass* const "
- << Name << "SubRegClasses[] = {\n ";
+ OS << " // " << Name
+ << " Sub-register Classes...\n"
+ << " static const TargetRegisterClass* const "
+ << Name << "SubRegClasses[] = {\n ";
- bool Empty = true;
+ for (unsigned idx = 0; idx != NumSubRegIndices; ++idx) {
+ if (idx)
+ OS << ", ";
+ if (SRC[idx])
+ OS << "&" << getQualifiedName(SRC[idx]) << "RegClass";
+ else
+ OS << "0";
+ }
+ OS << "\n };\n\n";
+ }
- for (unsigned subrc = 0, subrcMax = RC.SubRegClasses.size();
- subrc != subrcMax; ++subrc) {
- unsigned rc2 = 0, e2 = RegisterClasses.size();
- for (; rc2 != e2; ++rc2) {
- const CodeGenRegisterClass &RC2 = RegisterClasses[rc2];
- if (RC.SubRegClasses[subrc]->getName() == RC2.getName()) {
+ // Emit the super-register classes for each RegisterClass
+ for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
+ const CodeGenRegisterClass &RC = RegisterClasses[rc];
+
+ // Give the register class a legal C name if it's anonymous.
+ std::string Name = RC.TheDef->getName();
+
+ OS << " // " << Name
+ << " Super-register Classes...\n"
+ << " static const TargetRegisterClass* const "
+ << Name << "SuperRegClasses[] = {\n ";
+
+ bool Empty = true;
+ std::map<unsigned, std::set<unsigned> >::iterator I =
+ SuperRegClassMap.find(rc);
+ if (I != SuperRegClassMap.end()) {
+ for (std::set<unsigned>::iterator II = I->second.begin(),
+ EE = I->second.end(); II != EE; ++II) {
+ const CodeGenRegisterClass &RC2 = RegisterClasses[*II];
if (!Empty)
OS << ", ";
OS << "&" << getQualifiedName(RC2.TheDef) << "RegClass";
Empty = false;
-
- std::map<unsigned, std::set<unsigned> >::iterator SCMI =
- SuperRegClassMap.find(rc2);
- if (SCMI == SuperRegClassMap.end()) {
- SuperRegClassMap.insert(std::make_pair(rc2,
- std::set<unsigned>()));
- SCMI = SuperRegClassMap.find(rc2);
- }
- SCMI->second.insert(rc);
- break;
}
}
- if (rc2 == e2)
- throw "Register Class member '" +
- RC.SubRegClasses[subrc]->getName() +
- "' is not a valid RegisterClass!";
- }
- OS << (!Empty ? ", " : "") << "NULL";
- OS << "\n };\n\n";
- }
-
- // Emit the super-register classes for each RegisterClass
- for (unsigned rc = 0, e = RegisterClasses.size(); rc != e; ++rc) {
- const CodeGenRegisterClass &RC = RegisterClasses[rc];
-
- // Give the register class a legal C name if it's anonymous.
- std::string Name = RC.TheDef->getName();
-
- OS << " // " << Name
- << " Super-register Classes...\n"
- << " static const TargetRegisterClass* const "
- << Name << "SuperRegClasses[] = {\n ";
-
- bool Empty = true;
- std::map<unsigned, std::set<unsigned> >::iterator I =
- SuperRegClassMap.find(rc);
- if (I != SuperRegClassMap.end()) {
- for (std::set<unsigned>::iterator II = I->second.begin(),
- EE = I->second.end(); II != EE; ++II) {
- const CodeGenRegisterClass &RC2 = RegisterClasses[*II];
- if (!Empty)
- OS << ", ";
- OS << "&" << getQualifiedName(RC2.TheDef) << "RegClass";
- Empty = false;
- }
+ OS << (!Empty ? ", " : "") << "NULL";
+ OS << "\n };\n\n";
}
-
- OS << (!Empty ? ", " : "") << "NULL";
- OS << "\n };\n\n";
+ } else {
+ // No subregindices in this target
+ OS << " static const TargetRegisterClass* const "
+ << "NullRegClasses[] = { NULL };\n\n";
}
// Emit the sub-classes array for each RegisterClass
@@ -326,12 +488,6 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
// Give the register class a legal C name if it's anonymous.
std::string Name = RC.TheDef->getName();
- std::set<Record*> RegSet;
- for (unsigned i = 0, e = RC.Elements.size(); i != e; ++i) {
- Record *Reg = RC.Elements[i];
- RegSet.insert(Reg);
- }
-
OS << " // " << Name
<< " Register Class sub-classes...\n"
<< " static const TargetRegisterClass* const "
@@ -341,21 +497,9 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
for (unsigned rc2 = 0, e2 = RegisterClasses.size(); rc2 != e2; ++rc2) {
const CodeGenRegisterClass &RC2 = RegisterClasses[rc2];
- // RC2 is a sub-class of RC if it is a valid replacement for any
- // instruction operand where an RC register is required. It must satisfy
- // these conditions:
- //
- // 1. All RC2 registers are also in RC.
- // 2. The RC2 spill size must not be smaller that the RC spill size.
- // 3. RC2 spill alignment must be compatible with RC.
- //
// Sub-classes are used to determine if a virtual register can be used
// as an instruction operand, or if it must be copied first.
-
- if (rc == rc2 || RC2.Elements.size() > RC.Elements.size() ||
- (RC.SpillAlignment && RC2.SpillAlignment % RC.SpillAlignment) ||
- RC.SpillSize > RC2.SpillSize || !isSubRegisterClass(RC2, RegSet))
- continue;
+ if (rc == rc2 || !RC.hasSubClass(&RC2)) continue;
if (!Empty) OS << ", ";
OS << "&" << getQualifiedName(RC2.TheDef) << "RegClass";
@@ -413,8 +557,10 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
<< RC.getName() + "VTs" << ", "
<< RC.getName() + "Subclasses" << ", "
<< RC.getName() + "Superclasses" << ", "
- << RC.getName() + "SubRegClasses" << ", "
- << RC.getName() + "SuperRegClasses" << ", "
+ << (NumSubRegIndices ? RC.getName() + "Sub" : std::string("Null"))
+ << "RegClasses, "
+ << (NumSubRegIndices ? RC.getName() + "Super" : std::string("Null"))
+ << "RegClasses, "
<< RC.SpillSize/8 << ", "
<< RC.SpillAlignment/8 << ", "
<< RC.CopyCost << ", "
@@ -436,7 +582,6 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
std::map<Record*, std::set<Record*>, LessRecord> RegisterSubRegs;
std::map<Record*, std::set<Record*>, LessRecord> RegisterSuperRegs;
std::map<Record*, std::set<Record*>, LessRecord> RegisterAliases;
- std::map<Record*, std::vector<std::pair<int, Record*> > > SubRegVectors;
typedef std::map<Record*, std::vector<int64_t>, LessRecord> DwarfRegNumsMapTy;
DwarfRegNumsMapTy DwarfRegNums;
@@ -556,83 +701,6 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
delete [] SubregHashTable;
- // Print the SuperregHashTable, a simple quadratically probed
- // hash table for determining if a register is a super-register
- // of another register.
- unsigned NumSupRegs = 0;
- RegNo.clear();
- for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- RegNo[Regs[i].TheDef] = i;
- NumSupRegs += RegisterSuperRegs[Regs[i].TheDef].size();
- }
-
- unsigned SuperregHashTableSize = 2 * NextPowerOf2(2 * NumSupRegs);
- unsigned* SuperregHashTable = new unsigned[2 * SuperregHashTableSize];
- std::fill(SuperregHashTable, SuperregHashTable + 2 * SuperregHashTableSize, ~0U);
-
- hashMisses = 0;
-
- for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- Record* R = Regs[i].TheDef;
- for (std::set<Record*>::iterator I = RegisterSuperRegs[R].begin(),
- E = RegisterSuperRegs[R].end(); I != E; ++I) {
- Record* RJ = *I;
- // We have to increase the indices of both registers by one when
- // computing the hash because, in the generated code, there
- // will be an extra empty slot at register 0.
- size_t index = ((i+1) + (RegNo[RJ]+1) * 37) & (SuperregHashTableSize-1);
- unsigned ProbeAmt = 2;
- while (SuperregHashTable[index*2] != ~0U &&
- SuperregHashTable[index*2+1] != ~0U) {
- index = (index + ProbeAmt) & (SuperregHashTableSize-1);
- ProbeAmt += 2;
-
- hashMisses++;
- }
-
- SuperregHashTable[index*2] = i;
- SuperregHashTable[index*2+1] = RegNo[RJ];
- }
- }
-
- OS << "\n\n // Number of hash collisions: " << hashMisses << "\n";
-
- if (SuperregHashTableSize) {
- std::string Namespace = Regs[0].TheDef->getValueAsString("Namespace");
-
- OS << " const unsigned SuperregHashTable[] = { ";
- for (unsigned i = 0; i < SuperregHashTableSize - 1; ++i) {
- if (i != 0)
- // Insert spaces for nice formatting.
- OS << " ";
-
- if (SuperregHashTable[2*i] != ~0U) {
- OS << getQualifiedName(Regs[SuperregHashTable[2*i]].TheDef) << ", "
- << getQualifiedName(Regs[SuperregHashTable[2*i+1]].TheDef) << ", \n";
- } else {
- OS << Namespace << "::NoRegister, " << Namespace << "::NoRegister, \n";
- }
- }
-
- unsigned Idx = SuperregHashTableSize*2-2;
- if (SuperregHashTable[Idx] != ~0U) {
- OS << " "
- << getQualifiedName(Regs[SuperregHashTable[Idx]].TheDef) << ", "
- << getQualifiedName(Regs[SuperregHashTable[Idx+1]].TheDef) << " };\n";
- } else {
- OS << Namespace << "::NoRegister, " << Namespace << "::NoRegister };\n";
- }
-
- OS << " const unsigned SuperregHashTableSize = "
- << SuperregHashTableSize << ";\n";
- } else {
- OS << " const unsigned SuperregHashTable[] = { ~0U, ~0U };\n"
- << " const unsigned SuperregHashTableSize = 1;\n";
- }
-
- delete [] SuperregHashTable;
-
-
// Print the AliasHashTable, a simple quadratically probed
// hash table for determining if a register aliases another register.
unsigned NumAliases = 0;
@@ -717,6 +785,8 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
// to memory.
for (std::map<Record*, std::set<Record*>, LessRecord >::iterator
I = RegisterAliases.begin(), E = RegisterAliases.end(); I != E; ++I) {
+ if (I->second.empty())
+ continue;
OS << " const unsigned " << I->first->getName() << "_AliasSet[] = { ";
for (std::set<Record*>::iterator ASI = I->second.begin(),
E = I->second.end(); ASI != E; ++ASI)
@@ -733,6 +803,8 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
// sub-registers list to memory.
for (std::map<Record*, std::set<Record*>, LessRecord>::iterator
I = RegisterSubRegs.begin(), E = RegisterSubRegs.end(); I != E; ++I) {
+ if (I->second.empty())
+ continue;
OS << " const unsigned " << I->first->getName() << "_SubRegsSet[] = { ";
std::vector<Record*> SubRegsVector;
for (std::set<Record*>::iterator ASI = I->second.begin(),
@@ -754,6 +826,8 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
// super-registers list to memory.
for (std::map<Record*, std::set<Record*>, LessRecord >::iterator
I = RegisterSuperRegs.begin(), E = RegisterSuperRegs.end(); I != E; ++I) {
+ if (I->second.empty())
+ continue;
OS << " const unsigned " << I->first->getName() << "_SuperRegsSet[] = { ";
std::vector<Record*> SuperRegsVector;
@@ -772,92 +846,116 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
// Now that register alias and sub-registers sets have been emitted, emit the
// register descriptors now.
- const std::vector<CodeGenRegister> &Registers = Target.getRegisters();
- for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
- const CodeGenRegister &Reg = Registers[i];
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ const CodeGenRegister &Reg = Regs[i];
OS << " { \"";
OS << Reg.getName() << "\",\t";
- if (RegisterAliases.count(Reg.TheDef))
+ if (!RegisterAliases[Reg.TheDef].empty())
OS << Reg.getName() << "_AliasSet,\t";
else
OS << "Empty_AliasSet,\t";
- if (RegisterSubRegs.count(Reg.TheDef))
+ if (!RegisterSubRegs[Reg.TheDef].empty())
OS << Reg.getName() << "_SubRegsSet,\t";
else
OS << "Empty_SubRegsSet,\t";
- if (RegisterSuperRegs.count(Reg.TheDef))
+ if (!RegisterSuperRegs[Reg.TheDef].empty())
OS << Reg.getName() << "_SuperRegsSet },\n";
else
OS << "Empty_SuperRegsSet },\n";
}
OS << " };\n"; // End of register descriptors...
+
+ // Emit SubRegIndex names, skipping 0
+ const std::vector<Record*> SubRegIndices = Target.getSubRegIndices();
+ OS << "\n const char *const SubRegIndexTable[] = { \"";
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+ OS << SubRegIndices[i]->getName();
+ if (i+1 != e)
+ OS << "\", \"";
+ }
+ OS << "\" };\n\n";
OS << "}\n\n"; // End of anonymous namespace...
std::string ClassName = Target.getName() + "GenRegisterInfo";
// Calculate the mapping of subregister+index pairs to physical registers.
- std::vector<Record*> SubRegs = Records.getAllDerivedDefinitions("SubRegSet");
- for (unsigned i = 0, e = SubRegs.size(); i != e; ++i) {
- int subRegIndex = SubRegs[i]->getValueAsInt("index");
- std::vector<Record*> From = SubRegs[i]->getValueAsListOfDefs("From");
- std::vector<Record*> To = SubRegs[i]->getValueAsListOfDefs("To");
-
- if (From.size() != To.size()) {
- errs() << "Error: register list and sub-register list not of equal length"
- << " in SubRegSet\n";
- exit(1);
- }
-
- // For each entry in from/to vectors, insert the to register at index
- for (unsigned ii = 0, ee = From.size(); ii != ee; ++ii)
- SubRegVectors[From[ii]].push_back(std::make_pair(subRegIndex, To[ii]));
- }
-
+ RegisterMaps RegMaps;
+
// Emit the subregister + index mapping function based on the information
// calculated above.
- OS << "unsigned " << ClassName
+ OS << "unsigned " << ClassName
<< "::getSubReg(unsigned RegNo, unsigned Index) const {\n"
<< " switch (RegNo) {\n"
<< " default:\n return 0;\n";
- for (std::map<Record*, std::vector<std::pair<int, Record*> > >::iterator
- I = SubRegVectors.begin(), E = SubRegVectors.end(); I != E; ++I) {
- OS << " case " << getQualifiedName(I->first) << ":\n";
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ RegisterMaps::SubRegMap &SRM = RegMaps.inferSubRegIndices(Regs[i].TheDef);
+ if (SRM.empty())
+ continue;
+ OS << " case " << getQualifiedName(Regs[i].TheDef) << ":\n";
OS << " switch (Index) {\n";
OS << " default: return 0;\n";
- for (unsigned i = 0, e = I->second.size(); i != e; ++i)
- OS << " case " << (I->second)[i].first << ": return "
- << getQualifiedName((I->second)[i].second) << ";\n";
+ for (RegisterMaps::SubRegMap::const_iterator ii = SRM.begin(),
+ ie = SRM.end(); ii != ie; ++ii)
+ OS << " case " << getQualifiedName(ii->first)
+ << ": return " << getQualifiedName(ii->second) << ";\n";
OS << " };\n" << " break;\n";
}
OS << " };\n";
OS << " return 0;\n";
OS << "}\n\n";
- OS << "unsigned " << ClassName
+ OS << "unsigned " << ClassName
<< "::getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const {\n"
<< " switch (RegNo) {\n"
<< " default:\n return 0;\n";
- for (std::map<Record*, std::vector<std::pair<int, Record*> > >::iterator
- I = SubRegVectors.begin(), E = SubRegVectors.end(); I != E; ++I) {
- OS << " case " << getQualifiedName(I->first) << ":\n";
- for (unsigned i = 0, e = I->second.size(); i != e; ++i)
- OS << " if (SubRegNo == "
- << getQualifiedName((I->second)[i].second)
- << ") return " << (I->second)[i].first << ";\n";
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ RegisterMaps::SubRegMap &SRM = RegMaps.SubReg[Regs[i].TheDef];
+ if (SRM.empty())
+ continue;
+ OS << " case " << getQualifiedName(Regs[i].TheDef) << ":\n";
+ for (RegisterMaps::SubRegMap::const_iterator ii = SRM.begin(),
+ ie = SRM.end(); ii != ie; ++ii)
+ OS << " if (SubRegNo == " << getQualifiedName(ii->second)
+ << ") return " << getQualifiedName(ii->first) << ";\n";
OS << " return 0;\n";
}
OS << " };\n";
OS << " return 0;\n";
OS << "}\n\n";
-
+
+ // Emit composeSubRegIndices
+ RegMaps.computeComposites();
+ OS << "unsigned " << ClassName
+ << "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n"
+ << " switch (IdxA) {\n"
+ << " default:\n return IdxB;\n";
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+ bool Open = false;
+ for (unsigned j = 0; j != e; ++j) {
+ if (Record *Comp = RegMaps.Composite.lookup(
+ std::make_pair(SubRegIndices[i], SubRegIndices[j]))) {
+ if (!Open) {
+ OS << " case " << getQualifiedName(SubRegIndices[i])
+ << ": switch(IdxB) {\n default: return IdxB;\n";
+ Open = true;
+ }
+ OS << " case " << getQualifiedName(SubRegIndices[j])
+ << ": return " << getQualifiedName(Comp) << ";\n";
+ }
+ }
+ if (Open)
+ OS << " }\n";
+ }
+ OS << " }\n}\n\n";
+
// Emit the constructor of the class...
OS << ClassName << "::" << ClassName
<< "(int CallFrameSetupOpcode, int CallFrameDestroyOpcode)\n"
- << " : TargetRegisterInfo(RegisterDescriptors, " << Registers.size()+1
- << ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n "
+ << " : TargetRegisterInfo(RegisterDescriptors, " << Regs.size()+1
+ << ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n"
+ << " SubRegIndexTable,\n"
<< " CallFrameSetupOpcode, CallFrameDestroyOpcode,\n"
<< " SubregHashTable, SubregHashTableSize,\n"
- << " SuperregHashTable, SuperregHashTableSize,\n"
<< " AliasesHashTable, AliasesHashTableSize) {\n"
<< "}\n\n";
@@ -865,8 +963,8 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
// First, just pull all provided information to the map
unsigned maxLength = 0;
- for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
- Record *Reg = Registers[i].TheDef;
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ Record *Reg = Regs[i].TheDef;
std::vector<int64_t> RegNums = Reg->getValueAsListOfInts("DwarfNumbers");
maxLength = std::max((size_t)maxLength, RegNums.size());
if (DwarfRegNums.count(Reg))
diff --git a/libclamav/c++/llvm/utils/TableGen/SubtargetEmitter.cpp b/libclamav/c++/llvm/utils/TableGen/SubtargetEmitter.cpp
index 9ac652f..b04eaf8 100644
--- a/libclamav/c++/llvm/utils/TableGen/SubtargetEmitter.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/SubtargetEmitter.cpp
@@ -203,7 +203,8 @@ unsigned SubtargetEmitter::CollectAllItinClasses(raw_ostream &OS,
// data initialization for the specified itinerary. N is the number
// of stages.
//
-void SubtargetEmitter::FormItineraryStageString(Record *ItinData,
+void SubtargetEmitter::FormItineraryStageString(const std::string &Name,
+ Record *ItinData,
std::string &ItinString,
unsigned &NStages) {
// Get states list
@@ -216,7 +217,7 @@ void SubtargetEmitter::FormItineraryStageString(Record *ItinData,
// Next stage
const Record *Stage = StageList[i];
- // Form string as ,{ cycles, u1 | u2 | ... | un, timeinc }
+ // Form string as ,{ cycles, u1 | u2 | ... | un, timeinc, kind }
int Cycles = Stage->getValueAsInt("Cycles");
ItinString += " { " + itostr(Cycles) + ", ";
@@ -226,13 +227,16 @@ void SubtargetEmitter::FormItineraryStageString(Record *ItinData,
// For each unit
for (unsigned j = 0, M = UnitList.size(); j < M;) {
// Add name and bitwise or
- ItinString += UnitList[j]->getName();
+ ItinString += Name + "FU::" + UnitList[j]->getName();
if (++j < M) ItinString += " | ";
}
int TimeInc = Stage->getValueAsInt("TimeInc");
ItinString += ", " + itostr(TimeInc);
+ int Kind = Stage->getValueAsInt("Kind");
+ ItinString += ", (llvm::InstrStage::ReservationKinds)" + itostr(Kind);
+
// Close off stage
ItinString += " }";
if (++i < N) ItinString += ", ";
@@ -276,9 +280,29 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
// If just no itinerary then don't bother
if (ProcItinList.size() < 2) return;
+ // Emit functional units for all the itineraries.
+ for (unsigned i = 0, N = ProcItinList.size(); i < N; ++i) {
+ // Next record
+ Record *Proc = ProcItinList[i];
+
+ std::vector<Record*> FUs = Proc->getValueAsListOfDefs("FU");
+ if (FUs.empty())
+ continue;
+
+ const std::string &Name = Proc->getName();
+ OS << "\n// Functional units for itineraries \"" << Name << "\"\n"
+ << "namespace " << Name << "FU {\n";
+
+ for (unsigned j = 0, FUN = FUs.size(); j < FUN; ++j)
+ OS << " const unsigned " << FUs[j]->getName()
+ << " = 1 << " << j << ";\n";
+
+ OS << "}\n";
+ }
+
// Begin stages table
- std::string StageTable = "static const llvm::InstrStage Stages[] = {\n";
- StageTable += " { 0, 0, 0 }, // No itinerary\n";
+ std::string StageTable = "\nstatic const llvm::InstrStage Stages[] = {\n";
+ StageTable += " { 0, 0, 0, llvm::InstrStage::Required }, // No itinerary\n";
// Begin operand cycle table
std::string OperandCycleTable = "static const unsigned OperandCycles[] = {\n";
@@ -312,7 +336,7 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
// Get string and stage count
std::string ItinStageString;
unsigned NStages;
- FormItineraryStageString(ItinData, ItinStageString, NStages);
+ FormItineraryStageString(Name, ItinData, ItinStageString, NStages);
// Get string and operand cycle count
std::string ItinOperandCycleString;
@@ -367,7 +391,7 @@ void SubtargetEmitter::EmitStageAndOperandCycleData(raw_ostream &OS,
}
// Closing stage
- StageTable += " { 0, 0, 0 } // End itinerary\n";
+ StageTable += " { 0, 0, 0, llvm::InstrStage::Required } // End itinerary\n";
StageTable += "};\n";
// Closing operand cycles
@@ -564,9 +588,9 @@ void SubtargetEmitter::run(raw_ostream &OS) {
OS << "#include \"llvm/Support/raw_ostream.h\"\n";
OS << "#include \"llvm/Target/SubtargetFeature.h\"\n";
OS << "#include \"llvm/Target/TargetInstrItineraries.h\"\n\n";
-
- Enumeration(OS, "FuncUnit", true);
- OS<<"\n";
+
+// Enumeration(OS, "FuncUnit", true);
+// OS<<"\n";
// Enumeration(OS, "InstrItinClass", false);
// OS<<"\n";
Enumeration(OS, "SubtargetFeature", true);
diff --git a/libclamav/c++/llvm/utils/TableGen/SubtargetEmitter.h b/libclamav/c++/llvm/utils/TableGen/SubtargetEmitter.h
index 1d7088f..f43a443 100644
--- a/libclamav/c++/llvm/utils/TableGen/SubtargetEmitter.h
+++ b/libclamav/c++/llvm/utils/TableGen/SubtargetEmitter.h
@@ -34,7 +34,8 @@ class SubtargetEmitter : public TableGenBackend {
void CPUKeyValues(raw_ostream &OS);
unsigned CollectAllItinClasses(raw_ostream &OS,
std::map<std::string, unsigned> &ItinClassesMap);
- void FormItineraryStageString(Record *ItinData, std::string &ItinString,
+ void FormItineraryStageString(const std::string &Names,
+ Record *ItinData, std::string &ItinString,
unsigned &NStages);
void FormItineraryOperandCycleString(Record *ItinData, std::string &ItinString,
unsigned &NOperandCycles);
diff --git a/libclamav/c++/llvm/utils/TableGen/TGParser.cpp b/libclamav/c++/llvm/utils/TableGen/TGParser.cpp
index 8c158e0..f81aabe 100644
--- a/libclamav/c++/llvm/utils/TableGen/TGParser.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/TGParser.cpp
@@ -1635,13 +1635,12 @@ bool TGParser::ParseObjectBody(Record *CurRec) {
return ParseBody(CurRec);
}
-
/// ParseDef - Parse and return a top level or multiclass def, return the record
/// corresponding to it. This returns null on error.
///
/// DefInst ::= DEF ObjectName ObjectBody
///
-llvm::Record *TGParser::ParseDef(MultiClass *CurMultiClass) {
+bool TGParser::ParseDef(MultiClass *CurMultiClass) {
SMLoc DefLoc = Lex.getLoc();
assert(Lex.getCode() == tgtok::Def && "Unknown tok");
Lex.Lex(); // Eat the 'def' token.
@@ -1655,7 +1654,7 @@ llvm::Record *TGParser::ParseDef(MultiClass *CurMultiClass) {
// Ensure redefinition doesn't happen.
if (Records.getDef(CurRec->getName())) {
Error(DefLoc, "def '" + CurRec->getName() + "' already defined");
- return 0;
+ return true;
}
Records.addDef(CurRec);
} else {
@@ -1664,20 +1663,33 @@ llvm::Record *TGParser::ParseDef(MultiClass *CurMultiClass) {
if (CurMultiClass->DefPrototypes[i]->getName() == CurRec->getName()) {
Error(DefLoc, "def '" + CurRec->getName() +
"' already defined in this multiclass!");
- return 0;
+ return true;
}
CurMultiClass->DefPrototypes.push_back(CurRec);
}
if (ParseObjectBody(CurRec))
- return 0;
+ return true;
if (CurMultiClass == 0) // Def's in multiclasses aren't really defs.
CurRec->resolveReferences();
// If ObjectBody has template arguments, it's an error.
assert(CurRec->getTemplateArgs().empty() && "How'd this get template args?");
- return CurRec;
+
+ if (CurMultiClass) {
+ // Copy the template arguments for the multiclass into the def.
+ const std::vector<std::string> &TArgs =
+ CurMultiClass->Rec.getTemplateArgs();
+
+ for (unsigned i = 0, e = TArgs.size(); i != e; ++i) {
+ const RecordVal *RV = CurMultiClass->Rec.getValue(TArgs[i]);
+ assert(RV && "Template arg doesn't exist?");
+ CurRec->addValue(*RV);
+ }
+ }
+
+ return false;
}
@@ -1758,12 +1770,12 @@ std::vector<LetRecord> TGParser::ParseLetList() {
}
/// ParseTopLevelLet - Parse a 'let' at top level. This can be a couple of
-/// different related productions.
+/// different related productions. This works inside multiclasses too.
///
/// Object ::= LET LetList IN '{' ObjectList '}'
/// Object ::= LET LetList IN Object
///
-bool TGParser::ParseTopLevelLet() {
+bool TGParser::ParseTopLevelLet(MultiClass *CurMultiClass) {
assert(Lex.getCode() == tgtok::Let && "Unexpected token");
Lex.Lex();
@@ -1779,7 +1791,7 @@ bool TGParser::ParseTopLevelLet() {
// If this is a scalar let, just handle it now
if (Lex.getCode() != tgtok::l_brace) {
// LET LetList IN Object
- if (ParseObject())
+ if (ParseObject(CurMultiClass))
return true;
} else { // Object ::= LETCommand '{' ObjectList '}'
SMLoc BraceLoc = Lex.getLoc();
@@ -1787,7 +1799,7 @@ bool TGParser::ParseTopLevelLet() {
Lex.Lex(); // eat the '{'.
// Parse the object list.
- if (ParseObjectList())
+ if (ParseObjectList(CurMultiClass))
return true;
if (Lex.getCode() != tgtok::r_brace) {
@@ -1802,29 +1814,6 @@ bool TGParser::ParseTopLevelLet() {
return false;
}
-/// ParseMultiClassDef - Parse a def in a multiclass context.
-///
-/// MultiClassDef ::= DefInst
-///
-bool TGParser::ParseMultiClassDef(MultiClass *CurMC) {
- if (Lex.getCode() != tgtok::Def)
- return TokError("expected 'def' in multiclass body");
-
- Record *D = ParseDef(CurMC);
- if (D == 0) return true;
-
- // Copy the template arguments for the multiclass into the def.
- const std::vector<std::string> &TArgs = CurMC->Rec.getTemplateArgs();
-
- for (unsigned i = 0, e = TArgs.size(); i != e; ++i) {
- const RecordVal *RV = CurMC->Rec.getValue(TArgs[i]);
- assert(RV && "Template arg doesn't exist?");
- D->addValue(*RV);
- }
-
- return false;
-}
-
/// ParseMultiClass - Parse a multiclass definition.
///
/// MultiClassInst ::= MULTICLASS ID TemplateArgList?
@@ -1885,10 +1874,18 @@ bool TGParser::ParseMultiClass() {
if (Lex.Lex() == tgtok::r_brace) // eat the '{'.
return TokError("multiclass must contain at least one def");
- while (Lex.getCode() != tgtok::r_brace)
- if (ParseMultiClassDef(CurMultiClass))
- return true;
-
+ while (Lex.getCode() != tgtok::r_brace) {
+ switch (Lex.getCode()) {
+ default:
+ return TokError("expected 'let', 'def' or 'defm' in multiclass body");
+ case tgtok::Let:
+ case tgtok::Def:
+ case tgtok::Defm:
+ if (ParseObject(CurMultiClass))
+ return true;
+ break;
+ }
+ }
Lex.Lex(); // eat the '}'.
}
@@ -1900,7 +1897,7 @@ bool TGParser::ParseMultiClass() {
///
/// DefMInst ::= DEFM ID ':' DefmSubClassRef ';'
///
-bool TGParser::ParseDefm() {
+bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
assert(Lex.getCode() == tgtok::Defm && "Unexpected token!");
if (Lex.Lex() != tgtok::Id) // eat the defm.
return TokError("expected identifier after defm");
@@ -1910,6 +1907,12 @@ bool TGParser::ParseDefm() {
if (Lex.Lex() != tgtok::colon)
return TokError("expected ':' after defm identifier");
+ // Keep track of the new generated record definitions.
+ std::vector<Record*> NewRecDefs;
+
+ // This record also inherits from a regular class (non-multiclass)?
+ bool InheritFromClass = false;
+
// eat the colon.
Lex.Lex();
@@ -1991,17 +1994,87 @@ bool TGParser::ParseDefm() {
return Error(DefmPrefixLoc, "def '" + CurRec->getName() +
"' already defined, instantiating defm with subdef '" +
DefProto->getName() + "'");
- Records.addDef(CurRec);
- CurRec->resolveReferences();
+
+ // Don't create a top level definition for defm inside multiclasses,
+ // instead, only update the prototypes and bind the template args
+ // with the new created definition.
+ if (CurMultiClass) {
+ for (unsigned i = 0, e = CurMultiClass->DefPrototypes.size();
+ i != e; ++i) {
+ if (CurMultiClass->DefPrototypes[i]->getName() == CurRec->getName()) {
+ Error(DefmPrefixLoc, "defm '" + CurRec->getName() +
+ "' already defined in this multiclass!");
+ return 0;
+ }
+ }
+ CurMultiClass->DefPrototypes.push_back(CurRec);
+
+ // Copy the template arguments for the multiclass into the new def.
+ const std::vector<std::string> &TA =
+ CurMultiClass->Rec.getTemplateArgs();
+
+ for (unsigned i = 0, e = TA.size(); i != e; ++i) {
+ const RecordVal *RV = CurMultiClass->Rec.getValue(TA[i]);
+ assert(RV && "Template arg doesn't exist?");
+ CurRec->addValue(*RV);
+ }
+ } else {
+ Records.addDef(CurRec);
+ }
+
+ NewRecDefs.push_back(CurRec);
}
if (Lex.getCode() != tgtok::comma) break;
Lex.Lex(); // eat ','.
SubClassLoc = Lex.getLoc();
+
+ // A defm can inherit from regular classes (non-multiclass) as
+ // long as they come in the end of the inheritance list.
+ InheritFromClass = (Records.getClass(Lex.getCurStrVal()) != 0);
+
+ if (InheritFromClass)
+ break;
+
Ref = ParseSubClassReference(0, true);
}
+ if (InheritFromClass) {
+ // Process all the classes to inherit as if they were part of a
+ // regular 'def' and inherit all record values.
+ SubClassReference SubClass = ParseSubClassReference(0, false);
+ while (1) {
+ // Check for error.
+ if (SubClass.Rec == 0) return true;
+
+ // Get the expanded definition prototypes and teach them about
+ // the record values the current class to inherit has
+ for (unsigned i = 0, e = NewRecDefs.size(); i != e; ++i) {
+ Record *CurRec = NewRecDefs[i];
+
+ // Add it.
+ if (AddSubClass(CurRec, SubClass))
+ return true;
+
+ // Process any variables on the let stack.
+ for (unsigned i = 0, e = LetStack.size(); i != e; ++i)
+ for (unsigned j = 0, e = LetStack[i].size(); j != e; ++j)
+ if (SetValue(CurRec, LetStack[i][j].Loc, LetStack[i][j].Name,
+ LetStack[i][j].Bits, LetStack[i][j].Value))
+ return true;
+ }
+
+ if (Lex.getCode() != tgtok::comma) break;
+ Lex.Lex(); // eat ','.
+ SubClass = ParseSubClassReference(0, false);
+ }
+ }
+
+ if (!CurMultiClass)
+ for (unsigned i = 0, e = NewRecDefs.size(); i != e; ++i)
+ NewRecDefs[i]->resolveReferences();
+
if (Lex.getCode() != tgtok::semi)
return TokError("expected ';' at end of defm");
Lex.Lex();
@@ -2016,12 +2089,12 @@ bool TGParser::ParseDefm() {
/// Object ::= DefMInst
/// Object ::= LETCommand '{' ObjectList '}'
/// Object ::= LETCommand Object
-bool TGParser::ParseObject() {
+bool TGParser::ParseObject(MultiClass *MC) {
switch (Lex.getCode()) {
default: assert(0 && "This is not an object");
- case tgtok::Let: return ParseTopLevelLet();
- case tgtok::Def: return ParseDef(0) == 0;
- case tgtok::Defm: return ParseDefm();
+ case tgtok::Let: return ParseTopLevelLet(MC);
+ case tgtok::Def: return ParseDef(MC);
+ case tgtok::Defm: return ParseDefm(MC);
case tgtok::Class: return ParseClass();
case tgtok::MultiClass: return ParseMultiClass();
}
@@ -2029,9 +2102,9 @@ bool TGParser::ParseObject() {
/// ParseObjectList
/// ObjectList :== Object*
-bool TGParser::ParseObjectList() {
+bool TGParser::ParseObjectList(MultiClass *MC) {
while (isObjectStart(Lex.getCode())) {
- if (ParseObject())
+ if (ParseObject(MC))
return true;
}
return false;
diff --git a/libclamav/c++/llvm/utils/TableGen/TGParser.h b/libclamav/c++/llvm/utils/TableGen/TGParser.h
index 9f4b634..0aee931 100644
--- a/libclamav/c++/llvm/utils/TableGen/TGParser.h
+++ b/libclamav/c++/llvm/utils/TableGen/TGParser.h
@@ -69,16 +69,15 @@ private: // Semantic analysis methods.
SubMultiClassReference &SubMultiClass);
private: // Parser methods.
- bool ParseObjectList();
- bool ParseObject();
+ bool ParseObjectList(MultiClass *MC = 0);
+ bool ParseObject(MultiClass *MC);
bool ParseClass();
bool ParseMultiClass();
- bool ParseMultiClassDef(MultiClass *CurMC);
- bool ParseDefm();
- bool ParseTopLevelLet();
+ bool ParseDefm(MultiClass *CurMultiClass);
+ bool ParseDef(MultiClass *CurMultiClass);
+ bool ParseTopLevelLet(MultiClass *CurMultiClass);
std::vector<LetRecord> ParseLetList();
- Record *ParseDef(MultiClass *CurMultiClass);
bool ParseObjectBody(Record *CurRec);
bool ParseBody(Record *CurRec);
bool ParseBodyItem(Record *CurRec);
diff --git a/libclamav/c++/llvm/utils/TableGen/TableGen.cpp b/libclamav/c++/llvm/utils/TableGen/TableGen.cpp
index f20ec00..49ee1b8 100644
--- a/libclamav/c++/llvm/utils/TableGen/TableGen.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/TableGen.cpp
@@ -18,6 +18,8 @@
#include "AsmMatcherEmitter.h"
#include "AsmWriterEmitter.h"
#include "CallingConvEmitter.h"
+#include "ClangASTNodesEmitter.h"
+#include "ClangAttrEmitter.h"
#include "ClangDiagnosticsEmitter.h"
#include "CodeEmitterGen.h"
#include "DAGISelEmitter.h"
@@ -28,13 +30,14 @@
#include "InstrInfoEmitter.h"
#include "IntrinsicEmitter.h"
#include "LLVMCConfigurationEmitter.h"
+#include "NeonEmitter.h"
#include "OptParserEmitter.h"
#include "Record.h"
#include "RegisterInfoEmitter.h"
+#include "ARMDecoderEmitter.h"
#include "SubtargetEmitter.h"
#include "TGParser.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/raw_ostream.h"
@@ -48,10 +51,18 @@ enum ActionType {
GenEmitter,
GenRegisterEnums, GenRegister, GenRegisterHeader,
GenInstrEnums, GenInstrs, GenAsmWriter, GenAsmMatcher,
+ GenARMDecoder,
GenDisassembler,
GenCallingConv,
+ GenClangAttrClasses,
+ GenClangAttrImpl,
+ GenClangAttrList,
+ GenClangAttrPCHRead,
+ GenClangAttrPCHWrite,
GenClangDiagsDefs,
GenClangDiagGroups,
+ GenClangDeclNodes,
+ GenClangStmtNodes,
GenDAGISel,
GenFastISel,
GenOptParserDefs, GenOptParserImpl,
@@ -59,7 +70,9 @@ enum ActionType {
GenIntrinsic,
GenTgtIntrinsic,
GenLLVMCConf,
- GenEDHeader, GenEDInfo,
+ GenEDInfo,
+ GenArmNeon,
+ GenArmNeonSema,
PrintEnums
};
@@ -84,6 +97,8 @@ namespace {
"Generate calling convention descriptions"),
clEnumValN(GenAsmWriter, "gen-asm-writer",
"Generate assembly writer"),
+ clEnumValN(GenARMDecoder, "gen-arm-decoder",
+ "Generate decoders for ARM/Thumb"),
clEnumValN(GenDisassembler, "gen-disassembler",
"Generate disassembler"),
clEnumValN(GenAsmMatcher, "gen-asm-matcher",
@@ -102,16 +117,32 @@ namespace {
"Generate intrinsic information"),
clEnumValN(GenTgtIntrinsic, "gen-tgt-intrinsic",
"Generate target intrinsic information"),
+ clEnumValN(GenClangAttrClasses, "gen-clang-attr-classes",
+ "Generate clang attribute clases"),
+ clEnumValN(GenClangAttrImpl, "gen-clang-attr-impl",
+ "Generate clang attribute implementations"),
+ clEnumValN(GenClangAttrList, "gen-clang-attr-list",
+ "Generate a clang attribute list"),
+ clEnumValN(GenClangAttrPCHRead, "gen-clang-attr-pch-read",
+ "Generate clang PCH attribute reader"),
+ clEnumValN(GenClangAttrPCHWrite, "gen-clang-attr-pch-write",
+ "Generate clang PCH attribute writer"),
clEnumValN(GenClangDiagsDefs, "gen-clang-diags-defs",
"Generate Clang diagnostics definitions"),
clEnumValN(GenClangDiagGroups, "gen-clang-diag-groups",
"Generate Clang diagnostic groups"),
+ clEnumValN(GenClangDeclNodes, "gen-clang-decl-nodes",
+ "Generate Clang AST statement nodes"),
+ clEnumValN(GenClangStmtNodes, "gen-clang-stmt-nodes",
+ "Generate Clang AST statement nodes"),
clEnumValN(GenLLVMCConf, "gen-llvmc",
"Generate LLVMC configuration library"),
- clEnumValN(GenEDHeader, "gen-enhanced-disassembly-header",
- "Generate enhanced disassembly info header"),
clEnumValN(GenEDInfo, "gen-enhanced-disassembly-info",
"Generate enhanced disassembly info"),
+ clEnumValN(GenArmNeon, "gen-arm-neon",
+ "Generate arm_neon.h for clang"),
+ clEnumValN(GenArmNeonSema, "gen-arm-neon-sema",
+ "Generate ARM NEON sema support for clang"),
clEnumValN(PrintEnums, "print-enums",
"Print enum values for a class"),
clEnumValEnd));
@@ -130,7 +161,7 @@ namespace {
cl::list<std::string>
IncludeDirs("I", cl::desc("Directory of include files"),
cl::value_desc("directory"), cl::Prefix);
-
+
cl::opt<std::string>
ClangComponent("clang-component",
cl::desc("Only use warnings from specified component"),
@@ -157,18 +188,18 @@ static bool ParseFile(const std::string &Filename,
std::string ErrorStr;
MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrorStr);
if (F == 0) {
- errs() << "Could not open input file '" << Filename << "': "
+ errs() << "Could not open input file '" << Filename << "': "
<< ErrorStr <<"\n";
return true;
}
-
+
// Tell SrcMgr about this buffer, which is what TGParser will pick up.
SrcMgr.AddNewSourceBuffer(F, SMLoc());
// Record the location of the include directory so that the lexer can find
// it later.
SrcMgr.setIncludeDirs(IncludeDirs);
-
+
TGParser Parser(SrcMgr);
return Parser.ParseFile();
@@ -179,119 +210,140 @@ int main(int argc, char **argv) {
PrettyStackTraceProgram X(argc, argv);
cl::ParseCommandLineOptions(argc, argv);
-
+
// Parse the input file.
if (ParseFile(InputFilename, IncludeDirs, SrcMgr))
return 1;
- raw_ostream *Out = &outs();
- if (OutputFilename != "-") {
- std::string Error;
- Out = new raw_fd_ostream(OutputFilename.c_str(), Error);
-
- if (!Error.empty()) {
- errs() << argv[0] << ": error opening " << OutputFilename
- << ":" << Error << "\n";
- return 1;
- }
-
- // Make sure the file gets removed if *gasp* tablegen crashes...
- sys::RemoveFileOnSignal(sys::Path(OutputFilename));
+ std::string Error;
+ tool_output_file Out(OutputFilename.c_str(), Error);
+ if (!Error.empty()) {
+ errs() << argv[0] << ": error opening " << OutputFilename
+ << ":" << Error << "\n";
+ return 1;
}
try {
switch (Action) {
case PrintRecords:
- *Out << Records; // No argument, dump all contents
+ Out.os() << Records; // No argument, dump all contents
break;
case GenEmitter:
- CodeEmitterGen(Records).run(*Out);
+ CodeEmitterGen(Records).run(Out.os());
break;
case GenRegisterEnums:
- RegisterInfoEmitter(Records).runEnums(*Out);
+ RegisterInfoEmitter(Records).runEnums(Out.os());
break;
case GenRegister:
- RegisterInfoEmitter(Records).run(*Out);
+ RegisterInfoEmitter(Records).run(Out.os());
break;
case GenRegisterHeader:
- RegisterInfoEmitter(Records).runHeader(*Out);
+ RegisterInfoEmitter(Records).runHeader(Out.os());
break;
case GenInstrEnums:
- InstrEnumEmitter(Records).run(*Out);
+ InstrEnumEmitter(Records).run(Out.os());
break;
case GenInstrs:
- InstrInfoEmitter(Records).run(*Out);
+ InstrInfoEmitter(Records).run(Out.os());
break;
case GenCallingConv:
- CallingConvEmitter(Records).run(*Out);
+ CallingConvEmitter(Records).run(Out.os());
break;
- case GenAsmWriter:
- AsmWriterEmitter(Records).run(*Out);
+/* case GenAsmWriter:
+ AsmWriterEmitter(Records).run(Out.os());
+ break;*/
+ case GenARMDecoder:
+ ARMDecoderEmitter(Records).run(Out.os());
break;
case GenAsmMatcher:
- AsmMatcherEmitter(Records).run(*Out);
+ AsmMatcherEmitter(Records).run(Out.os());
+ break;
+ case GenClangAttrClasses:
+ ClangAttrClassEmitter(Records).run(Out.os());
+ break;
+ case GenClangAttrImpl:
+ ClangAttrImplEmitter(Records).run(Out.os());
+ break;
+ case GenClangAttrList:
+ ClangAttrListEmitter(Records).run(Out.os());
+ break;
+ case GenClangAttrPCHRead:
+ ClangAttrPCHReadEmitter(Records).run(Out.os());
+ break;
+ case GenClangAttrPCHWrite:
+ ClangAttrPCHWriteEmitter(Records).run(Out.os());
break;
case GenClangDiagsDefs:
- ClangDiagsDefsEmitter(Records, ClangComponent).run(*Out);
+ ClangDiagsDefsEmitter(Records, ClangComponent).run(Out.os());
break;
case GenClangDiagGroups:
- ClangDiagGroupsEmitter(Records).run(*Out);
+ ClangDiagGroupsEmitter(Records).run(Out.os());
+ break;
+ case GenClangDeclNodes:
+ ClangASTNodesEmitter(Records, "Decl", "Decl").run(Out.os());
+ ClangDeclContextEmitter(Records).run(Out.os());
break;
- case GenDisassembler:
- DisassemblerEmitter(Records).run(*Out);
+ case GenClangStmtNodes:
+ ClangASTNodesEmitter(Records, "Stmt", "").run(Out.os());
break;
+/* case GenDisassembler:
+ DisassemblerEmitter(Records).run(Out.os());
+ break;*/
case GenOptParserDefs:
- OptParserEmitter(Records, true).run(*Out);
+ OptParserEmitter(Records, true).run(Out.os());
break;
case GenOptParserImpl:
- OptParserEmitter(Records, false).run(*Out);
+ OptParserEmitter(Records, false).run(Out.os());
break;
case GenDAGISel:
- DAGISelEmitter(Records).run(*Out);
+ DAGISelEmitter(Records).run(Out.os());
break;
case GenFastISel:
- FastISelEmitter(Records).run(*Out);
+ FastISelEmitter(Records).run(Out.os());
break;
case GenSubtarget:
- SubtargetEmitter(Records).run(*Out);
+ SubtargetEmitter(Records).run(Out.os());
break;
case GenIntrinsic:
- IntrinsicEmitter(Records).run(*Out);
+ IntrinsicEmitter(Records).run(Out.os());
break;
case GenTgtIntrinsic:
- IntrinsicEmitter(Records, true).run(*Out);
+ IntrinsicEmitter(Records, true).run(Out.os());
break;
case GenLLVMCConf:
- LLVMCConfigurationEmitter(Records).run(*Out);
- break;
- case GenEDHeader:
- EDEmitter(Records).runHeader(*Out);
+ LLVMCConfigurationEmitter(Records).run(Out.os());
break;
case GenEDInfo:
- EDEmitter(Records).run(*Out);
+ EDEmitter(Records).run(Out.os());
+ break;
+ case GenArmNeon:
+ NeonEmitter(Records).run(Out.os());
+ break;
+ case GenArmNeonSema:
+ NeonEmitter(Records).runHeader(Out.os());
break;
case PrintEnums:
{
std::vector<Record*> Recs = Records.getAllDerivedDefinitions(Class);
for (unsigned i = 0, e = Recs.size(); i != e; ++i)
- *Out << Recs[i]->getName() << ", ";
- *Out << "\n";
+ Out.os() << Recs[i]->getName() << ", ";
+ Out.os() << "\n";
break;
}
default:
assert(1 && "Invalid Action");
return 1;
}
-
- if (Out != &outs())
- delete Out; // Close the file
+
+ // Declare success.
+ Out.keep();
return 0;
-
+
} catch (const TGError &Error) {
errs() << argv[0] << ": error:\n";
PrintError(Error.getLoc(), Error.getMessage());
-
+
} catch (const std::string &Error) {
errs() << argv[0] << ": " << Error << "\n";
} catch (const char *Error) {
@@ -299,10 +351,6 @@ int main(int argc, char **argv) {
} catch (...) {
errs() << argv[0] << ": Unknown unexpected exception occurred.\n";
}
-
- if (Out != &outs()) {
- delete Out; // Close the file
- std::remove(OutputFilename.c_str()); // Remove the file, it's broken
- }
+
return 1;
}
diff --git a/libclamav/c++/llvm/utils/TableGen/X86DisassemblerTables.cpp b/libclamav/c++/llvm/utils/TableGen/X86DisassemblerTables.cpp
index be07031..2176224 100644
--- a/libclamav/c++/llvm/utils/TableGen/X86DisassemblerTables.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/X86DisassemblerTables.cpp
@@ -161,7 +161,7 @@ void DisassemblerTables::emitOneID(raw_ostream &o,
/// @param i - The indentation level for that output stream.
static void emitEmptyTable(raw_ostream &o, uint32_t &i)
{
- o.indent(i * 2) << "InstrUID modRMEmptyTable[1] = { 0 };" << "\n";
+ o.indent(i * 2) << "static InstrUID modRMEmptyTable[1] = { 0 };" << "\n";
o << "\n";
}
@@ -275,7 +275,7 @@ void DisassemblerTables::emitModRMDecision(raw_ostream &o1,
return;
}
- o1.indent(i1) << "InstrUID modRMTable" << thisTableNumber;
+ o1.indent(i1) << "static InstrUID modRMTable" << thisTableNumber;
switch (dt) {
default:
diff --git a/libclamav/c++/llvm/utils/TableGen/X86RecognizableInstr.cpp b/libclamav/c++/llvm/utils/TableGen/X86RecognizableInstr.cpp
index ea78d41..4dba85b 100644
--- a/libclamav/c++/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/libclamav/c++/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -33,7 +33,7 @@ using namespace llvm;
MAP(C9, 38) \
MAP(E8, 39) \
MAP(F0, 40) \
- MAP(F8, 41) \
+ MAP(F8, 41) \
MAP(F9, 42)
// A clone of X86 since we can't depend on something that is generated.
@@ -212,6 +212,7 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
HasOpSizePrefix = Rec->getValueAsBit("hasOpSizePrefix");
HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix");
+ HasVEX_4VPrefix = Rec->getValueAsBit("hasVEX_4VPrefix");
HasLockPrefix = Rec->getValueAsBit("hasLockPrefix");
IsCodeGenOnly = Rec->getValueAsBit("isCodeGenOnly");
@@ -230,6 +231,10 @@ void RecognizableInstr::processInstr(DisassemblerTables &tables,
const CodeGenInstruction &insn,
InstrUID uid)
{
+ // Ignore "asm parser only" instructions.
+ if (insn.TheDef->getValueAsBit("isAsmParserOnly"))
+ return;
+
RecognizableInstr recogInstr(tables, insn, uid);
recogInstr.emitInstructionSpecifier(tables);
@@ -298,6 +303,7 @@ RecognizableInstr::filter_ret RecognizableInstr::filter() const {
Name.find("_int") != Name.npos ||
Name.find("Int_") != Name.npos ||
Name.find("_NOREX") != Name.npos ||
+ Name.find("_TC") != Name.npos ||
Name.find("EH_RETURN") != Name.npos ||
Name.find("V_SET") != Name.npos ||
Name.find("LOCK_") != Name.npos ||
@@ -527,7 +533,13 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
"Unexpected number of operands for MRMSrcRegFrm");
HANDLE_OPERAND(roRegister)
HANDLE_OPERAND(rmRegister)
- HANDLE_OPTIONAL(immediate)
+
+ if (HasVEX_4VPrefix)
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
+ HANDLE_OPTIONAL(rmRegister)
+ else
+ HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMSrcMem:
// Operand 1 is a register operand in the Reg/Opcode field.
@@ -536,6 +548,12 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
"Unexpected number of operands for MRMSrcMemFrm");
HANDLE_OPERAND(roRegister)
+
+ if (HasVEX_4VPrefix)
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
+ HANDLE_OPTIONAL(rmRegister)
+
HANDLE_OPERAND(memory)
HANDLE_OPTIONAL(immediate)
break;
@@ -818,8 +836,9 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
TYPE("RST", TYPE_ST)
TYPE("i128mem", TYPE_M128)
TYPE("i64i32imm_pcrel", TYPE_REL64)
+ TYPE("i16imm_pcrel", TYPE_REL16)
TYPE("i32imm_pcrel", TYPE_REL32)
- TYPE("SSECC", TYPE_IMM8)
+ TYPE("SSECC", TYPE_IMM3)
TYPE("brtarget", TYPE_RELv)
TYPE("brtarget8", TYPE_REL8)
TYPE("f80mem", TYPE_M80FP)
@@ -834,8 +853,7 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
TYPE("opaque512mem", TYPE_M512)
TYPE("SEGMENT_REG", TYPE_SEGMENTREG)
TYPE("DEBUG_REG", TYPE_DEBUGREG)
- TYPE("CONTROL_REG_32", TYPE_CR32)
- TYPE("CONTROL_REG_64", TYPE_CR64)
+ TYPE("CONTROL_REG", TYPE_CONTROLREG)
TYPE("offset8", TYPE_MOFFS8)
TYPE("offset16", TYPE_MOFFS16)
TYPE("offset32", TYPE_MOFFS32)
@@ -894,8 +912,7 @@ OperandEncoding RecognizableInstr::roRegisterEncodingFromString
ENCODING("VR64", ENCODING_REG)
ENCODING("SEGMENT_REG", ENCODING_REG)
ENCODING("DEBUG_REG", ENCODING_REG)
- ENCODING("CONTROL_REG_32", ENCODING_REG)
- ENCODING("CONTROL_REG_64", ENCODING_REG)
+ ENCODING("CONTROL_REG", ENCODING_REG)
errs() << "Unhandled reg/opcode register encoding " << s << "\n";
llvm_unreachable("Unhandled reg/opcode register encoding");
}
@@ -939,6 +956,7 @@ OperandEncoding RecognizableInstr::relocationEncodingFromString
ENCODING("i64i8imm", ENCODING_IB)
ENCODING("i8imm", ENCODING_IB)
ENCODING("i64i32imm_pcrel", ENCODING_ID)
+ ENCODING("i16imm_pcrel", ENCODING_IW)
ENCODING("i32imm_pcrel", ENCODING_ID)
ENCODING("brtarget", ENCODING_Iv)
ENCODING("brtarget8", ENCODING_IB)
diff --git a/libclamav/c++/llvm/utils/TableGen/X86RecognizableInstr.h b/libclamav/c++/llvm/utils/TableGen/X86RecognizableInstr.h
index 84374b0..db4d96d 100644
--- a/libclamav/c++/llvm/utils/TableGen/X86RecognizableInstr.h
+++ b/libclamav/c++/llvm/utils/TableGen/X86RecognizableInstr.h
@@ -52,6 +52,8 @@ private:
bool HasOpSizePrefix;
/// The hasREX_WPrefix field from the record
bool HasREX_WPrefix;
+ /// The hasVEX_4VPrefix field from the record
+ bool HasVEX_4VPrefix;
/// The hasLockPrefix field from the record
bool HasLockPrefix;
/// The isCodeGenOnly filed from the record
diff --git a/libclamav/c++/llvm/utils/buildit/GNUmakefile b/libclamav/c++/llvm/utils/buildit/GNUmakefile
index 8d8504c..54577e2 100644
--- a/libclamav/c++/llvm/utils/buildit/GNUmakefile
+++ b/libclamav/c++/llvm/utils/buildit/GNUmakefile
@@ -32,7 +32,7 @@ DSTROOT = $(OBJROOT)/../dst
#######################################################################
-PREFIX = /usr/local
+PREFIX = /Developer/usr/local
# Unless assertions are forced on in the GMAKE command line, disable them.
ifndef ENABLE_ASSERTIONS
@@ -46,6 +46,13 @@ else
LLVM_OPTIMIZED := yes
endif
+# Default to not install libLTO.dylib.
+INSTALL_LIBLTO := no
+
+# Default to do a native build, not a cross-build for an ARM host or simulator.
+ARM_HOSTED_BUILD := no
+IOS_SIM_BUILD := no
+
ifndef RC_ProjectSourceVersion
RC_ProjectSourceVersion = 9999
endif
@@ -59,9 +66,19 @@ install: $(OBJROOT) $(SYMROOT) $(DSTROOT)
cd $(OBJROOT) && \
$(SRC)/utils/buildit/build_llvm "$(RC_ARCHS)" "$(TARGETS)" \
$(SRC) $(PREFIX) $(DSTROOT) $(SYMROOT) \
- $(ENABLE_ASSERTIONS) $(LLVM_OPTIMIZED) \
+ $(ENABLE_ASSERTIONS) $(LLVM_OPTIMIZED) $(INSTALL_LIBLTO) \
+ $(ARM_HOSTED_BUILD) $(IOS_SIM_BUILD) \
$(RC_ProjectSourceVersion) $(RC_ProjectSourceSubversion)
+EmbeddedHosted:
+ $(MAKE) ARM_HOSTED_BUILD=yes PREFIX=/usr/local install
+
+# When building for the iOS simulator, MACOSX_DEPLOYMENT_TARGET is not set
+# by default, but it needs to be set when building tools that run on the host
+# (e.g., tblgen), so set it here.
+EmbeddedSim:
+ export MACOSX_DEPLOYMENT_TARGET=`sw_vers -productVersion`; \
+ $(MAKE) IOS_SIM_BUILD=yes PREFIX=$(SDKROOT)/usr/local install
# installhdrs does nothing, because the headers aren't useful until
# the compiler is installed.
@@ -111,4 +128,4 @@ clean:
$(OBJROOT) $(SYMROOT) $(DSTROOT):
mkdir -p $@
-.PHONY: install installsrc clean
+.PHONY: install installsrc clean EmbeddedHosted EmbeddedSim
diff --git a/libclamav/c++/llvm/utils/buildit/build_llvm b/libclamav/c++/llvm/utils/buildit/build_llvm
index 1fa3fdf..39ec1cc 100755
--- a/libclamav/c++/llvm/utils/buildit/build_llvm
+++ b/libclamav/c++/llvm/utils/buildit/build_llvm
@@ -9,12 +9,12 @@ set -x
# The first parameter is a space-separated list of the architectures the
# compilers will run on. For instance, "ppc i386". If the current machine
# isn't in the list, it will (effectively) be added.
-# FIXME: HOSTS is not used in this script. Use it or Remove it.
HOSTS="$1"
# The second parameter is a space-separated list of the architectures the
# compilers will generate code for. If the current machine isn't in the list, a
# compiler for it will get built anyway, but won't be installed.
+# FIXME: The list of targets is currently hard-coded and TARGETS is not used.
TARGETS="$2"
# The third parameter is the path to the compiler sources. There should be a
@@ -42,11 +42,21 @@ LLVM_ASSERTIONS="$7"
# build.
LLVM_OPTIMIZED="$8"
-# The nineth parameter is the version number of the submission, e.g. 1007.
-LLVM_SUBMIT_VERSION="$9"
+# The ninth parameter is a yes/no that indicates whether libLTO.dylib
+# should be installed.
+INSTALL_LIBLTO="$9"
-# The tenth parameter is the subversion number of the submission, e.g. 03.
-LLVM_SUBMIT_SUBVERSION="${10}"
+# A yes/no parameter that controls whether to cross-build for an ARM host.
+ARM_HOSTED_BUILD="${10}"
+
+# A yes/no parameter that controls whether to cross-build for the iOS simulator
+IOS_SIM_BUILD="${11}"
+
+# The version number of the submission, e.g. 1007.
+LLVM_SUBMIT_VERSION="${12}"
+
+# The subversion number of the submission, e.g. 03.
+LLVM_SUBMIT_SUBVERSION="${13}"
# The current working directory is where the build will happen. It may already
# contain a partial result of an interrupted build, in which case this script
@@ -56,21 +66,6 @@ DIR=`pwd`
DARWIN_VERS=`uname -r | sed 's/\..*//'`
echo DARWIN_VERS = $DARWIN_VERS
-if [ "x$RC_ProjectName" = "xllvmCore_Embedded" ]; then
- DEST_DIR="$DEST_DIR/Developer/Platforms/iPhoneOS.platform"
- mkdir -p "$DEST_DIR"
-fi
-
-DEVELOPER_DIR="${DEVELOPER_DIR-Developer}"
-if [ "x$RC_ProjectName" = "xllvmCore_EmbeddedHosted" ]; then
- DT_HOME="$DEST_DIR/usr"
- HOST_SDKROOT=$SDKROOT
-else
- DT_HOME="$DEST_DIR/$DEVELOPER_DIR/usr"
-fi
-
-DEST_ROOT="/$DEVELOPER_DIR$DEST_ROOT"
-
################################################################################
# Run the build.
@@ -90,8 +85,7 @@ sed -e '/[Aa]pple-style/d' -e '/include.*GNUmakefile/d' $ORIG_SRC_DIR/Makefile >
mkdir -p $DIR/obj-llvm || exit 1
cd $DIR/obj-llvm || exit 1
-
-if [ "x$RC_ProjectName" = "xllvmCore_EmbeddedHosted" ]; then
+if [ "$ARM_HOSTED_BUILD" = yes ]; then
# The cross-tools' build process expects to find an existing cross toolchain
# under names like 'arm-apple-darwin$DARWIN_VERS-as'; so make them.
rm -rf $DIR/bin || exit 1
@@ -106,11 +100,10 @@ if [ "x$RC_ProjectName" = "xllvmCore_EmbeddedHosted" ]; then
# Try to use the platform llvm-gcc. Fall back to gcc if it's not available.
for prog in gcc g++ ; do
P=$DIR/bin/arm-apple-darwin$DARWIN_VERS-${prog}
-# FIXME: Uncomment once llvm-gcc works for this
-# T=`xcrun -find llvm-${prog}`
-# if [ "x$T" = "x" ] ; then
+ T=`xcrun -sdk $SDKROOT -find llvm-${prog}`
+ if [ "x$T" = "x" ] ; then
T=`xcrun -sdk $SDKROOT -find ${prog}`
-# fi
+ fi
echo '#!/bin/sh' > $P || exit 1
echo 'exec '$T' -arch armv6 -isysroot '${SDKROOT}' "$@"' >> $P || exit 1
chmod a+x $P || exit 1
@@ -131,28 +124,23 @@ elif [ $DARWIN_VERS -gt 9 ]; then
unset XTMPCC savedPATH
fi
-
-if [ "x$RC_ProjectName" = "xllvmCore_EmbeddedHosted" ]; then
- if [ \! -f Makefile.config ]; then
- $SRC_DIR/configure --prefix=$DT_HOME \
- --enable-targets=arm \
- --host=arm-apple-darwin10 \
- --target=arm-apple-darwin10 \
- --build=i686-apple-darwin10 \
- --enable-assertions=$LLVM_ASSERTIONS \
- --enable-optimized=$LLVM_OPTIMIZED \
- --disable-bindings \
- || exit 1
- fi
+if [ "$ARM_HOSTED_BUILD" = yes ]; then
+ configure_opts="--enable-targets=arm --host=arm-apple-darwin10 \
+ --target=arm-apple-darwin10 --build=i686-apple-darwin10"
+elif [ "$IOS_SIM_BUILD" = yes ]; then
+ # Use a non-standard "darwin_sim" host triple to trigger a cross-build.
+ configure_opts="--enable-targets=x86 --host=i686-apple-darwin_sim \
+ --build=i686-apple-darwin10"
else
- if [ \! -f Makefile.config ]; then
- $SRC_DIR/configure --prefix=$DT_HOME/local \
- --enable-targets=arm,x86,powerpc,cbe \
- --enable-assertions=$LLVM_ASSERTIONS \
- --enable-optimized=$LLVM_OPTIMIZED \
- --disable-bindings \
- || exit 1
- fi
+ configure_opts="--enable-targets=arm,x86,powerpc,cbe"
+fi
+
+if [ \! -f Makefile.config ]; then
+ $SRC_DIR/configure --prefix=$DEST_DIR$DEST_ROOT $configure_opts \
+ --enable-assertions=$LLVM_ASSERTIONS \
+ --enable-optimized=$LLVM_OPTIMIZED \
+ --disable-bindings \
+ || exit 1
fi
SUBVERSION=`echo $RC_ProjectSourceVersion | sed -e 's/[^.]*\.\([0-9]*\).*/\1/'`
@@ -201,9 +189,11 @@ if [ "x$MAJ_VER" != "x4" -o "x$MIN_VER" != "x0" ]; then
JOBS_FLAG="-j $SYSCTL"
fi
-make $JOBS_FLAG $OPTIMIZE_OPTS UNIVERSAL=1 UNIVERSAL_ARCH="$TARGETS" \
- UNIVERSAL_SDK_PATH=$HOST_SDKROOT \
+make $JOBS_FLAG $OPTIMIZE_OPTS UNIVERSAL=1 UNIVERSAL_ARCH="$HOSTS" \
+ UNIVERSAL_SDK_PATH=$SDKROOT \
NO_RUNTIME_LIBS=1 \
+ DISABLE_EDIS=1 \
+ DEBUG_SYMBOLS=1 \
LLVM_SUBMIT_VERSION=$LLVM_SUBMIT_VERSION \
LLVM_SUBMIT_SUBVERSION=$LLVM_SUBMIT_SUBVERSION \
CXXFLAGS="-DLLVM_VERSION_INFO='\" Apple Build #$LLVM_VERSION\"'" \
@@ -226,8 +216,10 @@ rm -rf * || exit 1
cd $DIR/obj-llvm || exit 1
# Install the tree into the destination directory.
-make $LOCAL_MAKEFLAGS $OPTIMIZE_OPTS UNIVERSAL=1 UNIVERSAL_ARCH="$TARGETS" \
+make $LOCAL_MAKEFLAGS $OPTIMIZE_OPTS UNIVERSAL=1 UNIVERSAL_ARCH="$HOSTS" \
NO_RUNTIME_LIBS=1 \
+ DISABLE_EDIS=1 \
+ DEBUG_SYMBOLS=1 \
LLVM_SUBMIT_VERSION=$LLVM_SUBMIT_VERSION \
LLVM_SUBMIT_SUBVERSION=$LLVM_SUBMIT_SUBVERSION \
OPTIMIZE_OPTION='-O3' VERBOSE=1 install
@@ -248,14 +240,17 @@ echo "#define LLVM_MINOR_VERSION ${RC_ProjectSourceSubversion}" >> $DEST_DIR$DES
if [ "x$LLVM_DEBUG" != "x1" ]; then
# Strip local symbols from llvm libraries.
- strip -S $DEST_DIR$DEST_ROOT/lib/*.[oa]
+ #
+ # Use '-l' to strip i386 modules. N.B. that flag doesn't work with kext or
+ # PPC objects!
+ strip -Sl $DEST_DIR$DEST_ROOT/lib/*.[oa]
for f in `ls $DEST_DIR$DEST_ROOT/lib/*.so`; do
- strip -Sx $f
+ strip -Sxl $f
done
fi
# Copy over the tblgen utility.
-cp `find $DIR -name tblgen` $DT_HOME/local/bin
+cp `find $DIR -name tblgen` $DEST_DIR$DEST_ROOT/bin
# Remove .dir files
cd $DEST_DIR$DEST_ROOT
@@ -274,14 +269,8 @@ else
-exec lipo -extract ppc7400 -extract i386 -extract x86_64 {} -output {} \;
fi
-cd $DEST_DIR$DEST_ROOT
-mkdir -p $DT_HOME/lib
-mv lib/libLTO.dylib $DT_HOME/lib/libLTO.dylib
-strip -S $DT_HOME/lib/libLTO.dylib
-rm -f lib/libLTO.a lib/libLTO.la
-
# The Hello dylib is an example of how to build a pass. No need to install it.
-rm lib/libLLVMHello.dylib
+rm $DEST_DIR$DEST_ROOT/lib/LLVMHello.dylib
# Compress manpages
MDIR=$DEST_DIR$DEST_ROOT/share/man/man1
@@ -327,25 +316,56 @@ find obj-* -name \*.\[chy\] -o -name \*.cpp -print \
| cpio -pdml $SYM_DIR/src || exit 1
################################################################################
+# Install and strip libLTO.dylib
+
+cd $DEST_DIR$DEST_ROOT
+if [ "$INSTALL_LIBLTO" = "yes" ]; then
+ DT_HOME="$DEST_DIR/Developer/usr"
+ mkdir -p $DT_HOME/lib
+ mv lib/libLTO.dylib $DT_HOME/lib/libLTO.dylib
+
+ # Save a copy of the unstripped dylib
+ mkdir -p $SYM_DIR/Developer/usr/lib
+ cp $DT_HOME/lib/libLTO.dylib $SYM_DIR/Developer/usr/lib/libLTO.dylib
+
+ # Use '-l' to strip i386 modules. N.B. that flag doesn't work with kext or
+ # PPC objects!
+ strip -arch all -Sl $DT_HOME/lib/libLTO.dylib
+
+ if [ "x$DISABLE_USR_LINKS" == "x" ]; then
+ # Add a symlink in /usr/lib for B&I.
+ mkdir -p $DEST_DIR/usr/lib/
+ (cd $DEST_DIR/usr/lib && \
+ ln -s ../../Developer/usr/lib/libLTO.dylib ./libLTO.dylib)
+ fi
+else
+ rm -f lib/libLTO.dylib
+fi
+rm -f lib/libLTO.a lib/libLTO.la
+
+################################################################################
# Remove debugging information from DEST_DIR.
+cd $DIR || exit 1
+
find $DEST_DIR -name \*.a -print | xargs ranlib || exit 1
find $DEST_DIR -name \*.dSYM -print | xargs rm -r || exit 1
-chgrp -h -R wheel $DEST_DIR
-chgrp -R wheel $DEST_DIR
-################################################################################
-# Remove tar ball from docs directory
+# Strip debugging information from files
+#
+# Use '-l' to strip i386 modules. N.B. that flag doesn't work with kext or
+# PPC objects!
+find $DEST_DIR -perm -0111 -type f \
+ ! \( -name '*.la' -o -name gccas -o -name gccld -o -name llvm-config \) \
+ -print | xargs -n 1 -P ${SYSCTL} strip -arch all -Sl
-find $DEST_DIR -name html.tar.gz -exec rm {} \;
+chgrp -h -R wheel $DEST_DIR
+chgrp -R wheel $DEST_DIR
################################################################################
-# symlinks so that B&I can find things
+# Remove the docs directory
-cd $DEST_DIR
-mkdir -p ./usr/lib/
-cd usr/lib
-ln -s ../../$DEVELOPER_DIR/usr/lib/libLTO.dylib ./libLTO.dylib
+rm -rf $DEST_DIR$DEST_ROOT/docs
################################################################################
# w00t! Done!
diff --git a/libclamav/c++/llvm/utils/count/count.c b/libclamav/c++/llvm/utils/count/count.c
index a37e1e0..ae96791 100644
--- a/libclamav/c++/llvm/utils/count/count.c
+++ b/libclamav/c++/llvm/utils/count/count.c
@@ -26,13 +26,15 @@ int main(int argc, char **argv) {
}
NumLines = 0;
- while ((NumRead = fread(Buffer, 1, sizeof(Buffer), stdin))) {
+ do {
unsigned i;
+ NumRead = fread(Buffer, 1, sizeof(Buffer), stdin);
+
for (i = 0; i != NumRead; ++i)
if (Buffer[i] == '\n')
++NumLines;
- }
+ } while (NumRead == sizeof(Buffer));
if (!feof(stdin)) {
fprintf(stderr, "%s: error reading stdin\n", argv[0]);
diff --git a/libclamav/c++/llvm/utils/fpcmp/Makefile b/libclamav/c++/llvm/utils/fpcmp/Makefile
deleted file mode 100644
index fd2f747..0000000
--- a/libclamav/c++/llvm/utils/fpcmp/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-##===- utils/fpcmp/Makefile --------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = fpcmp
-USEDLIBS = LLVMSupport.a LLVMSystem.a
-NO_INSTALL = 1
-
-include $(LEVEL)/Makefile.common
-
diff --git a/libclamav/c++/llvm/utils/fpcmp/fpcmp.cpp b/libclamav/c++/llvm/utils/fpcmp/fpcmp.cpp
deleted file mode 100644
index 66d8ab1..0000000
--- a/libclamav/c++/llvm/utils/fpcmp/fpcmp.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-//===- fpcmp.cpp - A fuzzy "cmp" that permits floating point noise --------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// fpcmp is a tool that basically works like the 'cmp' tool, except that it can
-// tolerate errors due to floating point noise, with the -r and -a options.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/FileUtilities.h"
-#include <iostream>
-using namespace llvm;
-
-namespace {
- cl::opt<std::string>
- File1(cl::Positional, cl::desc("<input file #1>"), cl::Required);
- cl::opt<std::string>
- File2(cl::Positional, cl::desc("<input file #2>"), cl::Required);
-
- cl::opt<double>
- RelTolerance("r", cl::desc("Relative error tolerated"), cl::init(0));
- cl::opt<double>
- AbsTolerance("a", cl::desc("Absolute error tolerated"), cl::init(0));
-}
-
-int main(int argc, char **argv) {
- cl::ParseCommandLineOptions(argc, argv);
-
- std::string ErrorMsg;
- int DF = DiffFilesWithTolerance(sys::PathWithStatus(File1),
- sys::PathWithStatus(File2),
- AbsTolerance, RelTolerance, &ErrorMsg);
- if (!ErrorMsg.empty())
- std::cerr << argv[0] << ": " << ErrorMsg << "\n";
- return DF;
-}
-
diff --git a/libclamav/c++/llvm/utils/lit/lit/ExampleTests/Clang/lit.cfg b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/Clang/lit.cfg
index 114ac60..1e1e807 100644
--- a/libclamav/c++/llvm/utils/lit/lit/ExampleTests/Clang/lit.cfg
+++ b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/Clang/lit.cfg
@@ -41,40 +41,7 @@ def inferClang(PATH):
return clang
-def inferClangCC(clang, PATH):
- clangcc = os.getenv('CLANGCC')
-
- # If the user set clang in the environment, definitely use that and don't
- # try to validate.
- if clangcc:
- return clangcc
-
- # Otherwise try adding -cc since we expect to be looking in a build
- # directory.
- if clang.endswith('.exe'):
- clangccName = clang[:-4] + '-cc.exe'
- else:
- clangccName = clang + '-cc'
- clangcc = lit.util.which(clangccName, PATH)
- if not clangcc:
- # Otherwise ask clang.
- res = lit.util.capture([clang, '-print-prog-name=clang-cc'])
- res = res.strip()
- if res and os.path.exists(res):
- clangcc = res
-
- if not clangcc:
- lit.fatal("couldn't find 'clang-cc' program, try setting "
- "CLANGCC in your environment")
-
- return clangcc
-
clang = inferClang(config.environment['PATH'])
if not lit.quiet:
lit.note('using clang: %r' % clang)
config.substitutions.append( (' clang ', ' ' + clang + ' ') )
-
-clang_cc = inferClangCC(clang, config.environment['PATH'])
-if not lit.quiet:
- lit.note('using clang-cc: %r' % clang_cc)
-config.substitutions.append( (' clang-cc ', ' ' + clang_cc + ' ') )
diff --git a/libclamav/c++/llvm/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll
index 4e8a582..3ff3633 100644
--- a/libclamav/c++/llvm/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll
+++ b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll
@@ -1 +1 @@
-; RUN: grep "hi" %S/data.txt
\ No newline at end of file
+; RUN: grep "hi" %S/data.txt
diff --git a/libclamav/c++/llvm/utils/lit/lit/ExampleTests/lit.cfg b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/lit.cfg
index dbd574f..20ee37d 100644
--- a/libclamav/c++/llvm/utils/lit/lit/ExampleTests/lit.cfg
+++ b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/lit.cfg
@@ -21,3 +21,6 @@ config.test_exec_root = None
# target_triple: Used by ShTest and TclTest formats for XFAIL checks.
config.target_triple = 'foo'
+
+# available_features: Used by ShTest and TclTest formats for REQUIRES checks.
+config.available_features = ['some-feature-name']
diff --git a/libclamav/c++/llvm/utils/lit/lit/ExampleTests/required-and-missing.c b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/required-and-missing.c
new file mode 100644
index 0000000..47ba72e
--- /dev/null
+++ b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/required-and-missing.c
@@ -0,0 +1,4 @@
+// This test shouldn't be run, the required feature is missing.
+//
+// RUN: false
+// REQUIRES: some-missing-feature-name
diff --git a/libclamav/c++/llvm/utils/lit/lit/ExampleTests/required-and-present.c b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/required-and-present.c
new file mode 100644
index 0000000..2a09e08
--- /dev/null
+++ b/libclamav/c++/llvm/utils/lit/lit/ExampleTests/required-and-present.c
@@ -0,0 +1,2 @@
+// RUN: true
+// REQUIRES: some-feature-name
diff --git a/libclamav/c++/llvm/utils/lit/lit/LitConfig.py b/libclamav/c++/llvm/utils/lit/lit/LitConfig.py
index 0e0a493..ac48591 100644
--- a/libclamav/c++/llvm/utils/lit/lit/LitConfig.py
+++ b/libclamav/c++/llvm/utils/lit/lit/LitConfig.py
@@ -15,7 +15,7 @@ class LitConfig:
import Util as util
def __init__(self, progname, path, quiet,
- useValgrind, valgrindArgs,
+ useValgrind, valgrindLeakCheck, valgrindArgs,
useTclAsSh,
noExecute, debug, isWindows,
params):
@@ -25,7 +25,8 @@ class LitConfig:
self.path = list(map(str, path))
self.quiet = bool(quiet)
self.useValgrind = bool(useValgrind)
- self.valgrindArgs = list(valgrindArgs)
+ self.valgrindLeakCheck = bool(valgrindLeakCheck)
+ self.valgrindUserArgs = list(valgrindArgs)
self.useTclAsSh = bool(useTclAsSh)
self.noExecute = noExecute
self.debug = debug
@@ -36,6 +37,22 @@ class LitConfig:
self.numErrors = 0
self.numWarnings = 0
+ self.valgrindArgs = []
+ self.valgrindTriple = ""
+ if self.useValgrind:
+ self.valgrindTriple = "-vg"
+ self.valgrindArgs = ['valgrind', '-q', '--run-libc-freeres=no',
+ '--tool=memcheck', '--trace-children=yes',
+ '--error-exitcode=123']
+ if self.valgrindLeakCheck:
+ self.valgrindTriple += "_leak"
+ self.valgrindArgs.append('--leak-check=full')
+ else:
+ # The default is 'summary'.
+ self.valgrindArgs.append('--leak-check=no')
+ self.valgrindArgs.extend(self.valgrindUserArgs)
+
+
def load_config(self, config, path):
"""load_config(config, path) - Load a config object from an alternate
path."""
@@ -54,7 +71,7 @@ class LitConfig:
self.bashPath = Util.which('bash', os.pathsep.join(self.path))
if self.bashPath is None:
# Check some known paths.
- for path in ('/bin/bash', '/usr/bin/bash'):
+ for path in ('/bin/bash', '/usr/bin/bash', '/usr/local/bin/bash'):
if os.path.exists(path):
self.bashPath = path
break
diff --git a/libclamav/c++/llvm/utils/lit/lit/LitFormats.py b/libclamav/c++/llvm/utils/lit/lit/LitFormats.py
index 270f087..e86f103 100644
--- a/libclamav/c++/llvm/utils/lit/lit/LitFormats.py
+++ b/libclamav/c++/llvm/utils/lit/lit/LitFormats.py
@@ -1,3 +1,2 @@
from TestFormats import GoogleTest, ShTest, TclTest
from TestFormats import SyntaxCheckTest, OneCommandPerFileTest
-
diff --git a/libclamav/c++/llvm/utils/lit/lit/LitTestCase.py b/libclamav/c++/llvm/utils/lit/lit/LitTestCase.py
new file mode 100644
index 0000000..8951185
--- /dev/null
+++ b/libclamav/c++/llvm/utils/lit/lit/LitTestCase.py
@@ -0,0 +1,30 @@
+import unittest
+import Test
+
+"""
+TestCase adaptor for providing a 'unittest' compatible interface to 'lit' tests.
+"""
+
+class UnresolvedError(RuntimeError):
+ pass
+
+class LitTestCase(unittest.TestCase):
+ def __init__(self, test, lit_config):
+ unittest.TestCase.__init__(self)
+ self._test = test
+ self._lit_config = lit_config
+
+ def id(self):
+ return self._test.getFullName()
+
+ def shortDescription(self):
+ return self._test.getFullName()
+
+ def runTest(self):
+ tr, output = self._test.config.test_format.execute(
+ self._test, self._lit_config)
+
+ if tr is Test.UNRESOLVED:
+ raise UnresolvedError(output)
+ elif tr.isFailure:
+ self.fail(output)
diff --git a/libclamav/c++/llvm/utils/lit/lit/ShUtil.py b/libclamav/c++/llvm/utils/lit/lit/ShUtil.py
index c8f9332..dda622a 100644
--- a/libclamav/c++/llvm/utils/lit/lit/ShUtil.py
+++ b/libclamav/c++/llvm/utils/lit/lit/ShUtil.py
@@ -67,6 +67,9 @@ class ShLexer:
elif c == '"':
self.eat()
str += self.lex_arg_quoted('"')
+ elif c == "'":
+ self.eat()
+ str += self.lex_arg_quoted("'")
elif not self.win32Escapes and c == '\\':
# Outside of a string, '\\' escapes everything.
self.eat()
@@ -287,6 +290,10 @@ class TestShParse(unittest.TestCase):
Pipeline([Command(['echo', 'hello'], [])], False))
self.assertEqual(self.parse('echo ""'),
Pipeline([Command(['echo', ''], [])], False))
+ self.assertEqual(self.parse("""echo -DFOO='a'"""),
+ Pipeline([Command(['echo', '-DFOO=a'], [])], False))
+ self.assertEqual(self.parse('echo -DFOO="a"'),
+ Pipeline([Command(['echo', '-DFOO=a'], [])], False))
def test_redirection(self):
self.assertEqual(self.parse('echo hello > c'),
diff --git a/libclamav/c++/llvm/utils/lit/lit/TestFormats.py b/libclamav/c++/llvm/utils/lit/lit/TestFormats.py
index d87a467..7ffbd2b 100644
--- a/libclamav/c++/llvm/utils/lit/lit/TestFormats.py
+++ b/libclamav/c++/llvm/utils/lit/lit/TestFormats.py
@@ -1,14 +1,21 @@
import os
+import platform
import Test
import TestRunner
import Util
+kIsWindows = platform.system() == 'Windows'
+
class GoogleTest(object):
def __init__(self, test_sub_dir, test_suffix):
self.test_sub_dir = str(test_sub_dir)
self.test_suffix = str(test_suffix)
+ # On Windows, assume tests will also end in '.exe'.
+ if kIsWindows:
+ self.test_suffix += '.exe'
+
def getGTestTests(self, path, litConfig, localConfig):
"""getGTestTests(path) - [name]
@@ -72,6 +79,9 @@ class GoogleTest(object):
testName = os.path.join(namePrefix, testName)
cmd = [testPath, '--gtest_filter=' + testName]
+ if litConfig.useValgrind:
+ cmd = litConfig.valgrindArgs + cmd
+
out, err, exitCode = TestRunner.executeCommand(
cmd, env=test.config.environment)
@@ -87,8 +97,9 @@ class FileBasedTest(object):
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
- # Ignore dot files.
- if filename.startswith('.'):
+ # Ignore dot files and excluded tests.
+ if (filename.startswith('.') or
+ filename in localConfig.excludes):
continue
filepath = os.path.join(source_path, filename)
@@ -125,14 +136,20 @@ class OneCommandPerFileTest:
self.command = [command]
else:
self.command = list(command)
- self.dir = str(dir)
+ if dir is not None:
+ dir = str(dir)
+ self.dir = dir
self.recursive = bool(recursive)
self.pattern = re.compile(pattern)
self.useTempInput = useTempInput
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
- for dirname,subdirs,filenames in os.walk(self.dir):
+ dir = self.dir
+ if dir is None:
+ dir = testSuite.getSourcePath(path_in_suite)
+
+ for dirname,subdirs,filenames in os.walk(dir):
if not self.recursive:
subdirs[:] = []
@@ -147,7 +164,7 @@ class OneCommandPerFileTest:
continue
path = os.path.join(dirname,filename)
- suffix = path[len(self.dir):]
+ suffix = path[len(dir):]
if suffix.startswith(os.sep):
suffix = suffix[1:]
test = Test.Test(testSuite,
@@ -173,8 +190,10 @@ class OneCommandPerFileTest:
self.createTempInput(tmp, test)
tmp.flush()
cmd.append(tmp.name)
- else:
+ elif hasattr(test, 'source_path'):
cmd.append(test.source_path)
+ else:
+ cmd.append(test.getSourcePath())
out, err, exitCode = TestRunner.executeCommand(cmd)
diff --git a/libclamav/c++/llvm/utils/lit/lit/TestRunner.py b/libclamav/c++/llvm/utils/lit/lit/TestRunner.py
index a7de2b7..0eb51a8 100644
--- a/libclamav/c++/llvm/utils/lit/lit/TestRunner.py
+++ b/libclamav/c++/llvm/utils/lit/lit/TestRunner.py
@@ -13,11 +13,13 @@ class InternalShellError(Exception):
self.command = command
self.message = message
+kIsWindows = platform.system() == 'Windows'
+
# Don't use close_fds on Windows.
-kUseCloseFDs = platform.system() != 'Windows'
+kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
-kAvoidDevNull = platform.system() == 'Windows'
+kAvoidDevNull = kIsWindows
def executeCommand(command, cwd=None, env=None):
p = subprocess.Popen(command, cwd=cwd,
@@ -63,6 +65,8 @@ def executeShCmd(cmd, cfg, cwd, results):
procs = []
input = subprocess.PIPE
stderrTempFiles = []
+ opened_files = []
+ named_temp_files = []
# To avoid deadlock, we use a single stderr stream for piped
# output. This is null until we have seen some output using
# stderr.
@@ -113,8 +117,11 @@ def executeShCmd(cmd, cfg, cwd, results):
else:
r[2] = open(r[0], r[1])
# Workaround a Win32 and/or subprocess bug when appending.
+ #
+ # FIXME: Actually, this is probably an instance of PR6753.
if r[1] == 'a':
r[2].seek(0, 2)
+ opened_files.append(r[2])
result = r[2]
final_redirects.append(result)
@@ -142,6 +149,15 @@ def executeShCmd(cmd, cfg, cwd, results):
if not args[0]:
raise InternalShellError(j, '%r: command not found' % j.args[0])
+ # Replace uses of /dev/null with temporary files.
+ if kAvoidDevNull:
+ for i,arg in enumerate(args):
+ if arg == "/dev/null":
+ f = tempfile.NamedTemporaryFile(delete=False)
+ f.close()
+ named_temp_files.append(f.name)
+ args[i] = f.name
+
procs.append(subprocess.Popen(args, cwd=cwd,
stdin = stdin,
stdout = stdout,
@@ -176,7 +192,7 @@ def executeShCmd(cmd, cfg, cwd, results):
else:
err = ''
procData[i] = (out,err)
-
+
# Read stderr out of the temp files.
for i,f in stderrTempFiles:
f.seek(0, 0)
@@ -199,6 +215,17 @@ def executeShCmd(cmd, cfg, cwd, results):
else:
exitCode = res
+ # Explicitly close any redirected files.
+ for f in opened_files:
+ f.close()
+
+ # Remove any named temporary files we created.
+ for f in named_temp_files:
+ try:
+ os.remove(f)
+ except OSError:
+ pass
+
if cmd.negate:
exitCode = not exitCode
@@ -252,6 +279,14 @@ def executeTclScriptInternal(test, litConfig, tmpBase, commands, cwd):
except:
return (Test.FAIL, "Tcl 'exec' parse error on: %r" % ln)
+ if litConfig.useValgrind:
+ for pipeline in cmds:
+ if pipeline.commands:
+ # Only valgrind the first command in each pipeline, to avoid
+ # valgrinding things like grep, not, and FileCheck.
+ cmd = pipeline.commands[0]
+ cmd.args = litConfig.valgrindArgs + cmd.args
+
cmd = cmds[0]
for c in cmds[1:]:
cmd = ShUtil.Seq(cmd, '&&', c)
@@ -277,11 +312,6 @@ def executeTclScriptInternal(test, litConfig, tmpBase, commands, cwd):
out,err,exitCode = executeCommand(command, cwd=cwd,
env=test.config.environment)
- # Tcl commands fail on standard error output.
- if err:
- exitCode = 1
- out = 'Command has output on stderr!\n\n' + out
-
return out,err,exitCode
else:
results = []
@@ -293,11 +323,6 @@ def executeTclScriptInternal(test, litConfig, tmpBase, commands, cwd):
out = err = ''
- # Tcl commands fail on standard error output.
- if [True for _,_,err,res in results if err]:
- exitCode = 1
- out += 'Command has output on stderr!\n\n'
-
for i,(cmd, cmd_out, cmd_err, res) in enumerate(results):
out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args))
out += 'Command %d Result: %r\n' % (i, res)
@@ -327,12 +352,7 @@ def executeScript(test, litConfig, tmpBase, commands, cwd):
if litConfig.useValgrind:
# FIXME: Running valgrind on sh is overkill. We probably could just
# run on clang with no real loss.
- valgrindArgs = ['valgrind', '-q',
- '--tool=memcheck', '--trace-children=yes',
- '--error-exitcode=123']
- valgrindArgs.extend(litConfig.valgrindArgs)
-
- command = valgrindArgs + command
+ command = litConfig.valgrindArgs + command
return executeCommand(command, cwd=cwd, env=test.config.environment)
@@ -353,9 +373,7 @@ def isExpectedFail(xfails, xtargets, target_triple):
return True
-import re
-
-def parseIntegratedTestScript(test):
+def parseIntegratedTestScript(test, normalize_slashes=False):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL' and 'XTARGET'
information. The RUN lines also will have variable substitution performed.
@@ -366,18 +384,25 @@ def parseIntegratedTestScript(test):
#
# FIXME: This should not be here?
sourcepath = test.getSourcePath()
+ sourcedir = os.path.dirname(sourcepath)
execpath = test.getExecPath()
execdir,execbase = os.path.split(execpath)
tmpBase = os.path.join(execdir, 'Output', execbase)
if test.index is not None:
tmpBase += '_%d' % test.index
+ # Normalize slashes, if requested.
+ if normalize_slashes:
+ sourcepath = sourcepath.replace('\\', '/')
+ sourcedir = sourcedir.replace('\\', '/')
+ tmpBase = tmpBase.replace('\\', '/')
+
# We use #_MARKER_# to hide %% while we do the other substitutions.
substitutions = [('%%', '#_MARKER_#')]
substitutions.extend(test.config.substitutions)
substitutions.extend([('%s', sourcepath),
- ('%S', os.path.dirname(sourcepath)),
- ('%p', os.path.dirname(sourcepath)),
+ ('%S', sourcedir),
+ ('%p', sourcedir),
('%t', tmpBase + '.tmp'),
# FIXME: Remove this once we kill DejaGNU.
('%abs_tmp', tmpBase + '.tmp'),
@@ -387,21 +412,8 @@ def parseIntegratedTestScript(test):
script = []
xfails = []
xtargets = []
- ignoredAny = False
+ requires = []
for ln in open(sourcepath):
- conditional = re.search('IF\((.+?)\((.+?)\)\):', ln)
- if conditional:
- ln = ln[conditional.end():]
- condition = conditional.group(1)
- value = conditional.group(2)
-
- # Actually test the condition.
- if condition not in test.config.conditions:
- return (Test.UNRESOLVED, "unknown condition '"+condition+"'")
- if not test.config.conditions[condition](value):
- ignoredAny = True
- continue
-
if 'RUN:' in ln:
# Isolate the command to run.
index = ln.index('RUN:')
@@ -421,6 +433,9 @@ def parseIntegratedTestScript(test):
elif 'XTARGET:' in ln:
items = ln[ln.index('XTARGET:') + 8:].split(',')
xtargets.extend([s.strip() for s in items])
+ elif 'REQUIRES:' in ln:
+ items = ln[ln.index('REQUIRES:') + 9:].split(',')
+ requires.extend([s.strip() for s in items])
elif 'END.' in ln:
# Check for END. lines.
if ln[ln.index('END.'):].strip() == 'END.':
@@ -438,38 +453,53 @@ def parseIntegratedTestScript(test):
# Verify the script contains a run line.
if not script:
- if ignoredAny:
- return (Test.UNSUPPORTED, "Test has only ignored run lines")
return (Test.UNRESOLVED, "Test has no run line!")
+ # Check for unterminated run lines.
if script[-1][-1] == '\\':
return (Test.UNRESOLVED, "Test has unterminated run lines (with '\\')")
+ # Check that we have the required features:
+ missing_required_features = [f for f in requires
+ if f not in test.config.available_features]
+ if missing_required_features:
+ msg = ', '.join(missing_required_features)
+ return (Test.UNSUPPORTED,
+ "Test requires the following features: %s" % msg)
+
isXFail = isExpectedFail(xfails, xtargets, test.suite.config.target_triple)
return script,isXFail,tmpBase,execdir
-def formatTestOutput(status, out, err, exitCode, script):
+def formatTestOutput(status, out, err, exitCode, failDueToStderr, script):
output = StringIO.StringIO()
print >>output, "Script:"
print >>output, "--"
print >>output, '\n'.join(script)
print >>output, "--"
- print >>output, "Exit Code: %r" % exitCode
- print >>output, "Command Output (stdout):"
- print >>output, "--"
- output.write(out)
- print >>output, "--"
- print >>output, "Command Output (stderr):"
- print >>output, "--"
- output.write(err)
- print >>output, "--"
+ print >>output, "Exit Code: %r" % exitCode,
+ if failDueToStderr:
+ print >>output, "(but there was output on stderr)"
+ else:
+ print >>output
+ if out:
+ print >>output, "Command Output (stdout):"
+ print >>output, "--"
+ output.write(out)
+ print >>output, "--"
+ if err:
+ print >>output, "Command Output (stderr):"
+ print >>output, "--"
+ output.write(err)
+ print >>output, "--"
return (status, output.getvalue())
def executeTclTest(test, litConfig):
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
- res = parseIntegratedTestScript(test)
+ # Parse the test script, normalizing slashes in substitutions on Windows
+ # (since otherwise Tcl style lexing will treat them as escapes).
+ res = parseIntegratedTestScript(test, normalize_slashes=kIsWindows)
if len(res) == 2:
return res
@@ -485,18 +515,30 @@ def executeTclTest(test, litConfig):
if len(res) == 2:
return res
+ # Test for failure. In addition to the exit code, Tcl commands are
+ # considered to fail if there is any standard error output.
out,err,exitCode = res
if isXFail:
- ok = exitCode != 0
- status = (Test.XPASS, Test.XFAIL)[ok]
+ ok = exitCode != 0 or err
+ if ok:
+ status = Test.XFAIL
+ else:
+ status = Test.XPASS
else:
- ok = exitCode == 0
- status = (Test.FAIL, Test.PASS)[ok]
+ ok = exitCode == 0 and not err
+ if ok:
+ status = Test.PASS
+ else:
+ status = Test.FAIL
if ok:
return (status,'')
- return formatTestOutput(status, out, err, exitCode, script)
+ # Set a flag for formatTestOutput so it can explain why the test was
+ # considered to have failed, despite having an exit code of 0.
+ failDueToStderr = exitCode == 0 and err
+
+ return formatTestOutput(status, out, err, exitCode, failDueToStderr, script)
def executeShTest(test, litConfig, useExternalSh):
if test.config.unsupported:
@@ -524,12 +566,21 @@ def executeShTest(test, litConfig, useExternalSh):
out,err,exitCode = res
if isXFail:
ok = exitCode != 0
- status = (Test.XPASS, Test.XFAIL)[ok]
+ if ok:
+ status = Test.XFAIL
+ else:
+ status = Test.XPASS
else:
ok = exitCode == 0
- status = (Test.FAIL, Test.PASS)[ok]
+ if ok:
+ status = Test.PASS
+ else:
+ status = Test.FAIL
if ok:
return (status,'')
- return formatTestOutput(status, out, err, exitCode, script)
+ # Sh tests are not considered to fail just from stderr output.
+ failDueToStderr = False
+
+ return formatTestOutput(status, out, err, exitCode, failDueToStderr, script)
diff --git a/libclamav/c++/llvm/utils/lit/lit/TestingConfig.py b/libclamav/c++/llvm/utils/lit/lit/TestingConfig.py
index d6f2a4d..5c1b273 100644
--- a/libclamav/c++/llvm/utils/lit/lit/TestingConfig.py
+++ b/libclamav/c++/llvm/utils/lit/lit/TestingConfig.py
@@ -29,7 +29,7 @@ class TestingConfig:
test_exec_root = None,
test_source_root = None,
excludes = [],
- conditions = {})
+ available_features = [])
if os.path.exists(path):
# FIXME: Improve detection and error reporting of errors in the
@@ -55,7 +55,8 @@ class TestingConfig:
def __init__(self, parent, name, suffixes, test_format,
environment, substitutions, unsupported, on_clone,
- test_exec_root, test_source_root, excludes, conditions):
+ test_exec_root, test_source_root, excludes,
+ available_features):
self.parent = parent
self.name = str(name)
self.suffixes = set(suffixes)
@@ -67,7 +68,7 @@ class TestingConfig:
self.test_exec_root = test_exec_root
self.test_source_root = test_source_root
self.excludes = set(excludes)
- self.conditions = dict(conditions)
+ self.available_features = set(available_features)
def clone(self, path):
# FIXME: Chain implementations?
@@ -77,7 +78,7 @@ class TestingConfig:
self.environment, self.substitutions,
self.unsupported, self.on_clone,
self.test_exec_root, self.test_source_root,
- self.excludes, self.conditions)
+ self.excludes, self.available_features)
if cfg.on_clone:
cfg.on_clone(self, cfg, path)
return cfg
diff --git a/libclamav/c++/llvm/utils/lit/lit/lit.py b/libclamav/c++/llvm/utils/lit/lit/lit.py
index 436f8e7..13d2630 100755
--- a/libclamav/c++/llvm/utils/lit/lit/lit.py
+++ b/libclamav/c++/llvm/utils/lit/lit/lit.py
@@ -258,9 +258,10 @@ def getTestsInSuite(ts, path_in_suite, litConfig,
lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
# Search for tests.
- for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
- litConfig, lc):
- yield res
+ if lc.test_format is not None:
+ for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
+ litConfig, lc):
+ yield res
# Search subdirectories.
for filename in os.listdir(source_path):
@@ -315,8 +316,49 @@ def runTests(numThreads, litConfig, provider, display):
except KeyboardInterrupt:
sys.exit(2)
-def main():
- # Bump the GIL check interval, its more important to get any one thread to a
+def load_test_suite(inputs):
+ import unittest
+
+ # Create the global config object.
+ litConfig = LitConfig.LitConfig(progname = 'lit',
+ path = [],
+ quiet = False,
+ useValgrind = False,
+ valgrindLeakCheck = False,
+ valgrindArgs = [],
+ useTclAsSh = False,
+ noExecute = False,
+ debug = False,
+ isWindows = (platform.system()=='Windows'),
+ params = {})
+
+ # Load the tests from the inputs.
+ tests = []
+ testSuiteCache = {}
+ localConfigCache = {}
+ for input in inputs:
+ prev = len(tests)
+ tests.extend(getTests(input, litConfig,
+ testSuiteCache, localConfigCache)[1])
+ if prev == len(tests):
+ litConfig.warning('input %r contained no tests' % input)
+
+ # If there were any errors during test discovery, exit now.
+ if litConfig.numErrors:
+ print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors
+ sys.exit(2)
+
+ # Return a unittest test suite which just runs the tests in order.
+ def get_test_fn(test):
+ return unittest.FunctionTestCase(
+ lambda: test.config.test_format.execute(
+ test, litConfig),
+ description = test.getFullName())
+
+ from LitTestCase import LitTestCase
+ return unittest.TestSuite([LitTestCase(test, litConfig) for test in tests])
+
+def main(builtinParameters = {}): # Bump the GIL check interval, its more important to get any one thread to a
# blocking operation (hopefully exec) than to try and unblock other threads.
#
# FIXME: This is a hack.
@@ -362,6 +404,9 @@ def main():
group.add_option("", "--vg", dest="useValgrind",
help="Run tests under valgrind",
action="store_true", default=False)
+ group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
+ help="Check for memory leaks under valgrind",
+ action="store_true", default=False)
group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
help="Specify an extra argument for valgrind",
type=str, action="append", default=[])
@@ -423,7 +468,7 @@ def main():
inputs = args
# Create the user defined parameters.
- userParams = {}
+ userParams = dict(builtinParameters)
for entry in opts.userParameters:
if '=' not in entry:
name,val = entry,''
@@ -436,6 +481,7 @@ def main():
path = opts.path,
quiet = opts.quiet,
useValgrind = opts.useValgrind,
+ valgrindLeakCheck = opts.valgrindLeakCheck,
valgrindArgs = opts.valgrindArgs,
useTclAsSh = opts.useTclAsSh,
noExecute = opts.noExecute,
@@ -443,11 +489,27 @@ def main():
isWindows = (platform.system()=='Windows'),
params = userParams)
+ # Expand '@...' form in inputs.
+ actual_inputs = []
+ for input in inputs:
+ if os.path.exists(input) or not input.startswith('@'):
+ actual_inputs.append(input)
+ else:
+ f = open(input[1:])
+ try:
+ for ln in f:
+ ln = ln.strip()
+ if ln:
+ actual_inputs.append(ln)
+ finally:
+ f.close()
+
+
# Load the tests from the inputs.
tests = []
testSuiteCache = {}
localConfigCache = {}
- for input in inputs:
+ for input in actual_inputs:
prev = len(tests)
tests.extend(getTests(input, litConfig,
testSuiteCache, localConfigCache)[1])
diff --git a/libclamav/c++/llvm/utils/llvm-lit/Makefile b/libclamav/c++/llvm/utils/llvm-lit/Makefile
new file mode 100644
index 0000000..702591f
--- /dev/null
+++ b/libclamav/c++/llvm/utils/llvm-lit/Makefile
@@ -0,0 +1,21 @@
+##===- utils/llvm-lit/Makefile -----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../..
+
+include $(LEVEL)/Makefile.common
+
+all:: $(ToolDir)/llvm-lit
+
+$(ToolDir)/llvm-lit: llvm-lit.in $(ToolDir)/.dir
+ $(Echo) "Creating 'llvm-lit' script..."
+ $(Verb)sed -e "s#@LLVM_SOURCE_DIR@#$(LLVM_SRC_ROOT)#g" \
+ -e "s#@LLVM_BINARY_DIR@#$(LLVM_OBJ_ROOT)#g" \
+ $< > $@
+ $(Verb)chmod +x $@
diff --git a/libclamav/c++/llvm/utils/llvm-lit/llvm-lit.in b/libclamav/c++/llvm/utils/llvm-lit/llvm-lit.in
new file mode 100644
index 0000000..3ff2c24
--- /dev/null
+++ b/libclamav/c++/llvm/utils/llvm-lit/llvm-lit.in
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+import os
+import sys
+
+# Variables configured at build time.
+llvm_source_root = "@LLVM_SOURCE_DIR@"
+llvm_obj_root = "@LLVM_BINARY_DIR@"
+
+# Make sure we can find the lit package.
+sys.path.append(os.path.join(llvm_source_root, 'utils', 'lit'))
+
+# Set up some builtin parameters, so that by default the LLVM test suite
+# configuration file knows how to find the object tree.
+builtin_parameters = {
+ 'llvm_site_config' : os.path.join(llvm_obj_root, 'test', 'lit.site.cfg')
+ }
+
+if __name__=='__main__':
+ import lit
+ lit.main(builtin_parameters)
diff --git a/libclamav/c++/llvm/utils/llvm.grm b/libclamav/c++/llvm/utils/llvm.grm
index d391e2a..9d6bdf7 100644
--- a/libclamav/c++/llvm/utils/llvm.grm
+++ b/libclamav/c++/llvm/utils/llvm.grm
@@ -8,6 +8,8 @@ It is strictly syntax-based, and makes no attempt to generate
IR that is semantically valid. Most of the IR produced doesn't
pass the Verifier.
+TODO: Metadata, in all its forms
+
*)
I ::= "title: LLVM assembly language\n"
@@ -90,6 +92,8 @@ GVInternalLinkage
| dllexport
| common
| private
+ | "linker_private"
+ | "linker_private_weak"
;
GVExternalLinkage
@@ -398,7 +402,7 @@ OptVolatile ::= - volatile | _ ;
OptExact ::= - exact | _ ;
OptNSW ::= - nsw | _ ;
OptNUW ::= - nuw | _ ;
-OptNW ::= OptNUW OptNSW ;
+OptNW ::= OptNUW OptNSW | OptNSW OptNUW ;
OptInBounds ::= - inbounds | _ ;
MemoryInst ::= malloc Types OptCAlign
diff --git a/libclamav/c++/llvm/utils/llvmdo b/libclamav/c++/llvm/utils/llvmdo
index 4a7e05a..bcfc221 100755
--- a/libclamav/c++/llvm/utils/llvmdo
+++ b/libclamav/c++/llvm/utils/llvmdo
@@ -76,8 +76,6 @@ fi
shift;
paths_to_ignore="\
- -path */CVS -o \
- -path */CVS/* -o \
-path */.svn/ -o \
-path */.svn/* -o \
-path docs/doxygen/* -o \
@@ -130,7 +128,6 @@ files_to_match="\
-o -name llvmgrep \
-o -name check-each-file \
-o -name codgen-diff \
- -o -name cvsupdate \
-o -name llvm-native-gcc \
-o -name llvm-native-gxx \
-o -name makellvm \
@@ -153,7 +150,6 @@ files_to_ignore="\
-name \.* \
-o -name *~ \
-o -name #* \
- -o -name *.cvs \
-o -name configure \
-o -name slow.ll \
-o -name *libtool* \
diff --git a/libclamav/c++/llvm/utils/mkpatch b/libclamav/c++/llvm/utils/mkpatch
deleted file mode 100755
index 278a241..0000000
--- a/libclamav/c++/llvm/utils/mkpatch
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#
-# This script makes a patch for LLVM ensuring the correct diff options and
-# putting the files in a standard review order.
-
-
-function error {
- retcode="$?"
- echo "mkpatch: error: $1 ($retcode)"
- exit 1
-}
-
-if [ ! -e llvm.spec.in ] ; then
- error "Please change directory to the LLVM top source directory"
-fi
-if [ "$#" -ne 1 ] ; then
- error "usage: utils/mkpatch [PATCH_NAME]"
-fi
-NAME="$1"
-echo "mkpatch: Generating differences on top level files"
-svn diff -N -x -u > "$NAME".patch.raw 2>&1
-echo "mkpatch: Generating differences on all directories"
-svn diff -x -u >> "$NAME".patch.raw 2>&1 \
- autoconf docs utils include lib/System lib/Support lib/VMCore lib/AsmParser \
- lib/Bitcode lib/Analysis lib/Transforms lib/CodeGen lib/Target \
- lib/ExecutionEngine lib/Linker \
- tools test unittests runtime projects examples Xcode
-
-echo "mkpatch: Removing cruft from the patch file"
-sed -e '/^[?] .*/d' -e '/^cvs diff: Diffing/d' "$NAME".patch.raw | awk '\
-BEGIN { deleting = 0; } \
-/^Index: .*[.]cvs$/ { deleting = 1; fname=substr($0,7); \
- print "Skipping: ", fname > "/dev/stderr"; } \
-/^Index:.*/ && !/^Index: .*[.]cvs$/ { deleting = 0; } \
-{ if (! deleting) { print; } } ' > "$NAME".patch || \
- error "sed/awk cleanup failed"
-
diff --git a/libclamav/c++/llvm/utils/not/CMakeLists.txt b/libclamav/c++/llvm/utils/not/CMakeLists.txt
deleted file mode 100644
index 407c82e..0000000
--- a/libclamav/c++/llvm/utils/not/CMakeLists.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-add_executable(not
- not.cpp
- )
-
-target_link_libraries(not LLVMSystem)
-if( MINGW )
- target_link_libraries(not imagehlp psapi)
-endif( MINGW )
-if( LLVM_ENABLE_THREADS AND HAVE_LIBPTHREAD )
- target_link_libraries(not pthread)
-endif()
diff --git a/libclamav/c++/llvm/utils/not/Makefile b/libclamav/c++/llvm/utils/not/Makefile
deleted file mode 100644
index fef4802..0000000
--- a/libclamav/c++/llvm/utils/not/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-##===- utils/not/Makefile ----------------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-TOOLNAME = not
-USEDLIBS = LLVMSupport.a LLVMSystem.a
-
-# This tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
-
-# Don't install this utility
-NO_INSTALL = 1
-
-include $(LEVEL)/Makefile.common
-
diff --git a/libclamav/c++/llvm/utils/not/not.cpp b/libclamav/c++/llvm/utils/not/not.cpp
deleted file mode 100644
index dd89b8f..0000000
--- a/libclamav/c++/llvm/utils/not/not.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-//===- not.cpp - The 'not' testing tool -----------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/System/Path.h"
-#include "llvm/System/Program.h"
-using namespace llvm;
-
-int main(int argc, const char **argv) {
- sys::Path Program = sys::Program::FindProgramByName(argv[1]);
- return !sys::Program::ExecuteAndWait(Program, argv + 1);
-}
diff --git a/libclamav/c++/llvm/utils/unittest/Makefile b/libclamav/c++/llvm/utils/unittest/Makefile
deleted file mode 100644
index 6a09341..0000000
--- a/libclamav/c++/llvm/utils/unittest/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-##===- utils/unittest/Makefile -----------------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../..
-PARALLEL_DIRS = googletest UnitTestMain
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/utils/unittest/UnitTestMain/Makefile b/libclamav/c++/llvm/utils/unittest/UnitTestMain/Makefile
deleted file mode 100644
index 5c10049..0000000
--- a/libclamav/c++/llvm/utils/unittest/UnitTestMain/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-##===- utils/unittest/UnitTestMain/Makefile ----------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-
-include $(LEVEL)/Makefile.config
-
-LIBRARYNAME = UnitTestMain
-BUILD_ARCHIVE = 1
-REQUIRES_RTTI = 1
-
-CPP.Flags += -I$(LLVM_SRC_ROOT)/utils/unittest/googletest/include
-CPP.Flags += $(NO_MISSING_FIELD_INITIALIZERS) $(NO_VARIADIC_MACROS)
-CPP.Flags += -DGTEST_HAS_RTTI=0
-# libstdc++'s TR1 <tuple> header depends on RTTI and uses C++'0x features not
-# supported by Clang, so force googletest to use its own tuple implementation.
-# When we import googletest >=1.4.0, we can drop this line.
-CPP.Flags += -DGTEST_HAS_TR1_TUPLE=0
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/utils/unittest/UnitTestMain/TestMain.cpp b/libclamav/c++/llvm/utils/unittest/UnitTestMain/TestMain.cpp
deleted file mode 100644
index d97dca8..0000000
--- a/libclamav/c++/llvm/utils/unittest/UnitTestMain/TestMain.cpp
+++ /dev/null
@@ -1,15 +0,0 @@
-//===--- utils/unittest/UnitTestMain/TestMain.cpp - unittest driver -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "gtest/gtest.h"
-
-int main(int argc, char **argv) {
- testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
-}
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/LICENSE.TXT b/libclamav/c++/llvm/utils/unittest/googletest/LICENSE.TXT
deleted file mode 100644
index 1941a11..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/LICENSE.TXT
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2008, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/Makefile b/libclamav/c++/llvm/utils/unittest/googletest/Makefile
deleted file mode 100644
index 1ec979d..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/Makefile
+++ /dev/null
@@ -1,36 +0,0 @@
-##===- utils/unittest/googletest/Makefile ------------------*- Makefile -*-===##
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL := ../../..
-
-include $(LEVEL)/Makefile.config
-
-LIBRARYNAME = GoogleTest
-BUILD_ARCHIVE = 1
-REQUIRES_RTTI = 1
-
-# Note that these flags are duplicated when building individual tests in
-# unittests/Makefile.unittest and ../UnitTestMain/Makefile; ensure that any
-# changes are made to both.
-CPP.Flags += -I$(LLVM_SRC_ROOT)/utils/unittest/googletest/include
-CPP.Flags += $(NO_MISSING_FIELD_INITIALIZERS) $(NO_VARIADIC_MACROS)
-CPP.Flags += -DGTEST_HAS_RTTI=0
-# libstdc++'s TR1 <tuple> header depends on RTTI and uses C++'0x features not
-# supported by Clang, so force googletest to use its own tuple implementation.
-# When we import googletest >=1.4.0, we can drop this line.
-CPP.Flags += -DGTEST_HAS_TR1_TUPLE=0
-
-
-ifeq ($(HOST_OS),MingW)
- CPP.Flags += -DGTEST_OS_WINDOWS=1
-endif
-
-NO_INSTALL = 1
-
-include $(LEVEL)/Makefile.common
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/README.LLVM b/libclamav/c++/llvm/utils/unittest/googletest/README.LLVM
deleted file mode 100644
index e907a5e..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/README.LLVM
+++ /dev/null
@@ -1,31 +0,0 @@
-LLVM notes
-----------
-
-This directory contains Google Test 1.2.1, with all elements removed except for
-the actual source code, to minimize the addition to the LLVM distribution.
-
-Cleaned up as follows:
-
-# Remove all the unnecessary files and directories
-$ rm -f aclocal* configure* Makefile* CHANGES CONTRIBUTORS README
-$ rm -rf build-aux m4 make msvc samples scons scripts test xcode
-$ rm -f `find . -name \*\.pump`
-
-# Move all the source files to the current directory
-$ mv src/* .
-$ rmdir src
-
-# Move extra headers into the already-existing internal headers dir
-$ mv *.h include/gtest/internal/
-
-# Update paths to the included files
-$ perl -pi -e 's|^#include "src/|#include "gtest/internal/|' *.cc
-
-$ rm -f gtest-all.cc gtest_main.cc
-
-$ mv COPYING LICENSE.TXT
-
-
-Modified as follows:
-* To GTestStreamToHelper in include/gtest/internal/gtest-internal.h,
- added the ability to stream with raw_os_ostream.
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/gtest-death-test.cc b/libclamav/c++/llvm/utils/unittest/googletest/gtest-death-test.cc
deleted file mode 100644
index 7eb2642..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/gtest-death-test.cc
+++ /dev/null
@@ -1,777 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-//
-// This file implements death tests.
-
-#include <gtest/gtest-death-test.h>
-#include <gtest/internal/gtest-port.h>
-
-#ifdef GTEST_HAS_DEATH_TEST
-#include <errno.h>
-#include <limits.h>
-#include <stdarg.h>
-#endif // GTEST_HAS_DEATH_TEST
-
-#include <gtest/gtest-message.h>
-#include <gtest/internal/gtest-string.h>
-
-// Indicates that this translation unit is part of Google Test's
-// implementation. It must come before gtest-internal-inl.h is
-// included, or there will be a compiler error. This trick is to
-// prevent a user from accidentally including gtest-internal-inl.h in
-// his code.
-#define GTEST_IMPLEMENTATION
-#include "gtest/internal/gtest-internal-inl.h"
-#undef GTEST_IMPLEMENTATION
-
-namespace testing {
-
-// Constants.
-
-// The default death test style.
-static const char kDefaultDeathTestStyle[] = "fast";
-
-GTEST_DEFINE_string_(
- death_test_style,
- internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle),
- "Indicates how to run a death test in a forked child process: "
- "\"threadsafe\" (child process re-executes the test binary "
- "from the beginning, running only the specific death test) or "
- "\"fast\" (child process runs the death test immediately "
- "after forking).");
-
-namespace internal {
-GTEST_DEFINE_string_(
- internal_run_death_test, "",
- "Indicates the file, line number, temporal index of "
- "the single death test to run, and a file descriptor to "
- "which a success code may be sent, all separated by "
- "colons. This flag is specified if and only if the current "
- "process is a sub-process launched for running a thread-safe "
- "death test. FOR INTERNAL USE ONLY.");
-} // namespace internal
-
-#ifdef GTEST_HAS_DEATH_TEST
-
-// ExitedWithCode constructor.
-ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {
-}
-
-// ExitedWithCode function-call operator.
-bool ExitedWithCode::operator()(int exit_status) const {
- return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_;
-}
-
-// KilledBySignal constructor.
-KilledBySignal::KilledBySignal(int signum) : signum_(signum) {
-}
-
-// KilledBySignal function-call operator.
-bool KilledBySignal::operator()(int exit_status) const {
- return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_;
-}
-
-namespace internal {
-
-// Utilities needed for death tests.
-
-// Generates a textual description of a given exit code, in the format
-// specified by wait(2).
-static String ExitSummary(int exit_code) {
- Message m;
- if (WIFEXITED(exit_code)) {
- m << "Exited with exit status " << WEXITSTATUS(exit_code);
- } else if (WIFSIGNALED(exit_code)) {
- m << "Terminated by signal " << WTERMSIG(exit_code);
- }
-#ifdef WCOREDUMP
- if (WCOREDUMP(exit_code)) {
- m << " (core dumped)";
- }
-#endif
- return m.GetString();
-}
-
-// Returns true if exit_status describes a process that was terminated
-// by a signal, or exited normally with a nonzero exit code.
-bool ExitedUnsuccessfully(int exit_status) {
- return !ExitedWithCode(0)(exit_status);
-}
-
-// Generates a textual failure message when a death test finds more than
-// one thread running, or cannot determine the number of threads, prior
-// to executing the given statement. It is the responsibility of the
-// caller not to pass a thread_count of 1.
-static String DeathTestThreadWarning(size_t thread_count) {
- Message msg;
- msg << "Death tests use fork(), which is unsafe particularly"
- << " in a threaded context. For this test, " << GTEST_NAME << " ";
- if (thread_count == 0)
- msg << "couldn't detect the number of threads.";
- else
- msg << "detected " << thread_count << " threads.";
- return msg.GetString();
-}
-
-// Static string containing a description of the outcome of the
-// last death test.
-static String last_death_test_message;
-
-// Flag characters for reporting a death test that did not die.
-static const char kDeathTestLived = 'L';
-static const char kDeathTestReturned = 'R';
-static const char kDeathTestInternalError = 'I';
-
-// An enumeration describing all of the possible ways that a death test
-// can conclude. DIED means that the process died while executing the
-// test code; LIVED means that process lived beyond the end of the test
-// code; and RETURNED means that the test statement attempted a "return,"
-// which is not allowed. IN_PROGRESS means the test has not yet
-// concluded.
-enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED };
-
-// Routine for aborting the program which is safe to call from an
-// exec-style death test child process, in which case the the error
-// message is propagated back to the parent process. Otherwise, the
-// message is simply printed to stderr. In either case, the program
-// then exits with status 1.
-void DeathTestAbort(const char* format, ...) {
- // This function may be called from a threadsafe-style death test
- // child process, which operates on a very small stack. Use the
- // heap for any additional non-miniscule memory requirements.
- const InternalRunDeathTestFlag* const flag =
- GetUnitTestImpl()->internal_run_death_test_flag();
- va_list args;
- va_start(args, format);
-
- if (flag != NULL) {
- FILE* parent = fdopen(flag->status_fd, "w");
- fputc(kDeathTestInternalError, parent);
- vfprintf(parent, format, args);
- fclose(parent);
- va_end(args);
- _exit(1);
- } else {
- vfprintf(stderr, format, args);
- va_end(args);
- abort();
- }
-}
-
-// A replacement for CHECK that calls DeathTestAbort if the assertion
-// fails.
-#define GTEST_DEATH_TEST_CHECK_(expression) \
- do { \
- if (!(expression)) { \
- DeathTestAbort("CHECK failed: File %s, line %d: %s", \
- __FILE__, __LINE__, #expression); \
- } \
- } while (0)
-
-// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for
-// evaluating any system call that fulfills two conditions: it must return
-// -1 on failure, and set errno to EINTR when it is interrupted and
-// should be tried again. The macro expands to a loop that repeatedly
-// evaluates the expression as long as it evaluates to -1 and sets
-// errno to EINTR. If the expression evaluates to -1 but errno is
-// something other than EINTR, DeathTestAbort is called.
-#define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \
- do { \
- int retval; \
- do { \
- retval = (expression); \
- } while (retval == -1 && errno == EINTR); \
- if (retval == -1) { \
- DeathTestAbort("CHECK failed: File %s, line %d: %s != -1", \
- __FILE__, __LINE__, #expression); \
- } \
- } while (0)
-
-// Death test constructor. Increments the running death test count
-// for the current test.
-DeathTest::DeathTest() {
- TestInfo* const info = GetUnitTestImpl()->current_test_info();
- if (info == NULL) {
- DeathTestAbort("Cannot run a death test outside of a TEST or "
- "TEST_F construct");
- }
-}
-
-// Creates and returns a death test by dispatching to the current
-// death test factory.
-bool DeathTest::Create(const char* statement, const RE* regex,
- const char* file, int line, DeathTest** test) {
- return GetUnitTestImpl()->death_test_factory()->Create(
- statement, regex, file, line, test);
-}
-
-const char* DeathTest::LastMessage() {
- return last_death_test_message.c_str();
-}
-
-// ForkingDeathTest provides implementations for most of the abstract
-// methods of the DeathTest interface. Only the AssumeRole method is
-// left undefined.
-class ForkingDeathTest : public DeathTest {
- public:
- ForkingDeathTest(const char* statement, const RE* regex);
-
- // All of these virtual functions are inherited from DeathTest.
- virtual int Wait();
- virtual bool Passed(bool status_ok);
- virtual void Abort(AbortReason reason);
-
- protected:
- void set_forked(bool forked) { forked_ = forked; }
- void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
- void set_read_fd(int fd) { read_fd_ = fd; }
- void set_write_fd(int fd) { write_fd_ = fd; }
-
- private:
- // The textual content of the code this object is testing.
- const char* const statement_;
- // The regular expression which test output must match.
- const RE* const regex_;
- // True if the death test successfully forked.
- bool forked_;
- // PID of child process during death test; 0 in the child process itself.
- pid_t child_pid_;
- // File descriptors for communicating the death test's status byte.
- int read_fd_; // Always -1 in the child process.
- int write_fd_; // Always -1 in the parent process.
- // The exit status of the child process.
- int status_;
- // How the death test concluded.
- DeathTestOutcome outcome_;
-};
-
-// Constructs a ForkingDeathTest.
-ForkingDeathTest::ForkingDeathTest(const char* statement, const RE* regex)
- : DeathTest(),
- statement_(statement),
- regex_(regex),
- forked_(false),
- child_pid_(-1),
- read_fd_(-1),
- write_fd_(-1),
- status_(-1),
- outcome_(IN_PROGRESS) {
-}
-
-// Reads an internal failure message from a file descriptor, then calls
-// LOG(FATAL) with that message. Called from a death test parent process
-// to read a failure message from the death test child process.
-static void FailFromInternalError(int fd) {
- Message error;
- char buffer[256];
- ssize_t num_read;
-
- do {
- while ((num_read = read(fd, buffer, 255)) > 0) {
- buffer[num_read] = '\0';
- error << buffer;
- }
- } while (num_read == -1 && errno == EINTR);
-
- // TODO(smcafee): Maybe just FAIL the test instead?
- if (num_read == 0) {
- GTEST_LOG_(FATAL, error);
- } else {
- GTEST_LOG_(FATAL,
- Message() << "Error while reading death test internal: "
- << strerror(errno) << " [" << errno << "]");
- }
-}
-
-// Waits for the child in a death test to exit, returning its exit
-// status, or 0 if no child process exists. As a side effect, sets the
-// outcome data member.
-int ForkingDeathTest::Wait() {
- if (!forked_)
- return 0;
-
- // The read() here blocks until data is available (signifying the
- // failure of the death test) or until the pipe is closed (signifying
- // its success), so it's okay to call this in the parent before
- // the child process has exited.
- char flag;
- ssize_t bytes_read;
-
- do {
- bytes_read = read(read_fd_, &flag, 1);
- } while (bytes_read == -1 && errno == EINTR);
-
- if (bytes_read == 0) {
- outcome_ = DIED;
- } else if (bytes_read == 1) {
- switch (flag) {
- case kDeathTestReturned:
- outcome_ = RETURNED;
- break;
- case kDeathTestLived:
- outcome_ = LIVED;
- break;
- case kDeathTestInternalError:
- FailFromInternalError(read_fd_); // Does not return.
- break;
- default:
- GTEST_LOG_(FATAL,
- Message() << "Death test child process reported unexpected "
- << "status byte (" << static_cast<unsigned int>(flag)
- << ")");
- }
- } else {
- GTEST_LOG_(FATAL,
- Message() << "Read from death test child process failed: "
- << strerror(errno));
- }
-
- GTEST_DEATH_TEST_CHECK_SYSCALL_(close(read_fd_));
- GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_, 0));
- return status_;
-}
-
-// Assesses the success or failure of a death test, using both private
-// members which have previously been set, and one argument:
-//
-// Private data members:
-// outcome: an enumeration describing how the death test
-// concluded: DIED, LIVED, or RETURNED. The death test fails
-// in the latter two cases
-// status: the exit status of the child process, in the format
-// specified by wait(2)
-// regex: a regular expression object to be applied to
-// the test's captured standard error output; the death test
-// fails if it does not match
-//
-// Argument:
-// status_ok: true if exit_status is acceptable in the context of
-// this particular death test, which fails if it is false
-//
-// Returns true iff all of the above conditions are met. Otherwise, the
-// first failing condition, in the order given above, is the one that is
-// reported. Also sets the static variable last_death_test_message.
-bool ForkingDeathTest::Passed(bool status_ok) {
- if (!forked_)
- return false;
-
-#if GTEST_HAS_GLOBAL_STRING
- const ::string error_message = GetCapturedStderr();
-#else
- const ::std::string error_message = GetCapturedStderr();
-#endif // GTEST_HAS_GLOBAL_STRING
-
- bool success = false;
- Message buffer;
-
- buffer << "Death test: " << statement_ << "\n";
- switch (outcome_) {
- case LIVED:
- buffer << " Result: failed to die.\n"
- << " Error msg: " << error_message;
- break;
- case RETURNED:
- buffer << " Result: illegal return in test statement.\n"
- << " Error msg: " << error_message;
- break;
- case DIED:
- if (status_ok) {
- if (RE::PartialMatch(error_message, *regex_)) {
- success = true;
- } else {
- buffer << " Result: died but not with expected error.\n"
- << " Expected: " << regex_->pattern() << "\n"
- << "Actual msg: " << error_message;
- }
- } else {
- buffer << " Result: died but not with expected exit code:\n"
- << " " << ExitSummary(status_) << "\n";
- }
- break;
- case IN_PROGRESS:
- default:
- GTEST_LOG_(FATAL,
- "DeathTest::Passed somehow called before conclusion of test");
- }
-
- last_death_test_message = buffer.GetString();
- return success;
-}
-
-// Signals that the death test code which should have exited, didn't.
-// Should be called only in a death test child process.
-// Writes a status byte to the child's status file desriptor, then
-// calls _exit(1).
-void ForkingDeathTest::Abort(AbortReason reason) {
- // The parent process considers the death test to be a failure if
- // it finds any data in our pipe. So, here we write a single flag byte
- // to the pipe, then exit.
- const char flag =
- reason == TEST_DID_NOT_DIE ? kDeathTestLived : kDeathTestReturned;
- GTEST_DEATH_TEST_CHECK_SYSCALL_(write(write_fd_, &flag, 1));
- GTEST_DEATH_TEST_CHECK_SYSCALL_(close(write_fd_));
- _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash)
-}
-
-// A concrete death test class that forks, then immediately runs the test
-// in the child process.
-class NoExecDeathTest : public ForkingDeathTest {
- public:
- NoExecDeathTest(const char* statement, const RE* regex) :
- ForkingDeathTest(statement, regex) { }
- virtual TestRole AssumeRole();
-};
-
-// The AssumeRole process for a fork-and-run death test. It implements a
-// straightforward fork, with a simple pipe to transmit the status byte.
-DeathTest::TestRole NoExecDeathTest::AssumeRole() {
- const size_t thread_count = GetThreadCount();
-#if 0
- if (thread_count != 1) {
- GTEST_LOG_(WARNING, DeathTestThreadWarning(thread_count));
- }
-#endif
-
- int pipe_fd[2];
- GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
-
- last_death_test_message = "";
- CaptureStderr();
- // When we fork the process below, the log file buffers are copied, but the
- // file descriptors are shared. We flush all log files here so that closing
- // the file descriptors in the child process doesn't throw off the
- // synchronization between descriptors and buffers in the parent process.
- // This is as close to the fork as possible to avoid a race condition in case
- // there are multiple threads running before the death test, and another
- // thread writes to the log file.
- FlushInfoLog();
-
- const pid_t child_pid = fork();
- GTEST_DEATH_TEST_CHECK_(child_pid != -1);
- set_child_pid(child_pid);
- if (child_pid == 0) {
- GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0]));
- set_write_fd(pipe_fd[1]);
- // Redirects all logging to stderr in the child process to prevent
- // concurrent writes to the log files. We capture stderr in the parent
- // process and append the child process' output to a log.
- LogToStderr();
- return EXECUTE_TEST;
- } else {
- GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
- set_read_fd(pipe_fd[0]);
- set_forked(true);
- return OVERSEE_TEST;
- }
-}
-
-// A concrete death test class that forks and re-executes the main
-// program from the beginning, with command-line flags set that cause
-// only this specific death test to be run.
-class ExecDeathTest : public ForkingDeathTest {
- public:
- ExecDeathTest(const char* statement, const RE* regex,
- const char* file, int line) :
- ForkingDeathTest(statement, regex), file_(file), line_(line) { }
- virtual TestRole AssumeRole();
- private:
- // The name of the file in which the death test is located.
- const char* const file_;
- // The line number on which the death test is located.
- const int line_;
-};
-
-// Utility class for accumulating command-line arguments.
-class Arguments {
- public:
- Arguments() {
- args_.push_back(NULL);
- }
- ~Arguments() {
- for (std::vector<char*>::iterator i = args_.begin();
- i + 1 != args_.end();
- ++i) {
- free(*i);
- }
- }
- void AddArgument(const char* argument) {
- args_.insert(args_.end() - 1, strdup(argument));
- }
-
- template <typename Str>
- void AddArguments(const ::std::vector<Str>& arguments) {
- for (typename ::std::vector<Str>::const_iterator i = arguments.begin();
- i != arguments.end();
- ++i) {
- args_.insert(args_.end() - 1, strdup(i->c_str()));
- }
- }
- char* const* Argv() {
- return &args_[0];
- }
- private:
- std::vector<char*> args_;
-};
-
-// A struct that encompasses the arguments to the child process of a
-// threadsafe-style death test process.
-struct ExecDeathTestArgs {
- char* const* argv; // Command-line arguments for the child's call to exec
- int close_fd; // File descriptor to close; the read end of a pipe
-};
-
-// The main function for a threadsafe-style death test child process.
-// This function is called in a clone()-ed process and thus must avoid
-// any potentially unsafe operations like malloc or libc functions.
-static int ExecDeathTestChildMain(void* child_arg) {
- ExecDeathTestArgs* const args = static_cast<ExecDeathTestArgs*>(child_arg);
- GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd));
-
- // We need to execute the test program in the same environment where
- // it was originally invoked. Therefore we change to the original
- // working directory first.
- const char* const original_dir =
- UnitTest::GetInstance()->original_working_dir();
- // We can safely call chdir() as it's a direct system call.
- if (chdir(original_dir) != 0) {
- DeathTestAbort("chdir(\"%s\") failed: %s",
- original_dir, strerror(errno));
- return EXIT_FAILURE;
- }
-
- // We can safely call execve() as it's a direct system call. We
- // cannot use execvp() as it's a libc function and thus potentially
- // unsafe. Since execve() doesn't search the PATH, the user must
- // invoke the test program via a valid path that contains at least
- // one path separator.
- execve(args->argv[0], args->argv, environ);
- DeathTestAbort("execve(%s, ...) in %s failed: %s",
- args->argv[0], original_dir, strerror(errno));
- return EXIT_FAILURE;
-}
-
-// Two utility routines that together determine the direction the stack
-// grows.
-// This could be accomplished more elegantly by a single recursive
-// function, but we want to guard against the unlikely possibility of
-// a smart compiler optimizing the recursion away.
-static bool StackLowerThanAddress(const void* ptr) {
- int dummy;
- return &dummy < ptr;
-}
-
-static bool StackGrowsDown() {
- int dummy;
- return StackLowerThanAddress(&dummy);
-}
-
-// A threadsafe implementation of fork(2) for threadsafe-style death tests
-// that uses clone(2). It dies with an error message if anything goes
-// wrong.
-static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
- static const bool stack_grows_down = StackGrowsDown();
- const size_t stack_size = getpagesize();
- void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);
- void* const stack_top =
- static_cast<char*>(stack) + (stack_grows_down ? stack_size : 0);
- ExecDeathTestArgs args = { argv, close_fd };
- const pid_t child_pid = clone(&ExecDeathTestChildMain, stack_top,
- SIGCHLD, &args);
- GTEST_DEATH_TEST_CHECK_(child_pid != -1);
- GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);
- return child_pid;
-}
-
-// The AssumeRole process for a fork-and-exec death test. It re-executes the
-// main program from the beginning, setting the --gtest_filter
-// and --gtest_internal_run_death_test flags to cause only the current
-// death test to be re-run.
-DeathTest::TestRole ExecDeathTest::AssumeRole() {
- const UnitTestImpl* const impl = GetUnitTestImpl();
- const InternalRunDeathTestFlag* const flag =
- impl->internal_run_death_test_flag();
- const TestInfo* const info = impl->current_test_info();
- const int death_test_index = info->result()->death_test_count();
-
- if (flag != NULL) {
- set_write_fd(flag->status_fd);
- return EXECUTE_TEST;
- }
-
- int pipe_fd[2];
- GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
- // Clear the close-on-exec flag on the write end of the pipe, lest
- // it be closed when the child process does an exec:
- GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1);
-
- const String filter_flag =
- String::Format("--%s%s=%s.%s",
- GTEST_FLAG_PREFIX, kFilterFlag,
- info->test_case_name(), info->name());
- const String internal_flag =
- String::Format("--%s%s=%s:%d:%d:%d",
- GTEST_FLAG_PREFIX, kInternalRunDeathTestFlag, file_, line_,
- death_test_index, pipe_fd[1]);
- Arguments args;
- args.AddArguments(GetArgvs());
- args.AddArgument("--logtostderr");
- args.AddArgument(filter_flag.c_str());
- args.AddArgument(internal_flag.c_str());
-
- last_death_test_message = "";
-
- CaptureStderr();
- // See the comment in NoExecDeathTest::AssumeRole for why the next line
- // is necessary.
- FlushInfoLog();
-
- const pid_t child_pid = ExecDeathTestFork(args.Argv(), pipe_fd[0]);
- GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
- set_child_pid(child_pid);
- set_read_fd(pipe_fd[0]);
- set_forked(true);
- return OVERSEE_TEST;
-}
-
-// Creates a concrete DeathTest-derived class that depends on the
-// --gtest_death_test_style flag, and sets the pointer pointed to
-// by the "test" argument to its address. If the test should be
-// skipped, sets that pointer to NULL. Returns true, unless the
-// flag is set to an invalid value.
-bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex,
- const char* file, int line,
- DeathTest** test) {
- UnitTestImpl* const impl = GetUnitTestImpl();
- const InternalRunDeathTestFlag* const flag =
- impl->internal_run_death_test_flag();
- const int death_test_index = impl->current_test_info()
- ->increment_death_test_count();
-
- if (flag != NULL) {
- if (death_test_index > flag->index) {
- last_death_test_message = String::Format(
- "Death test count (%d) somehow exceeded expected maximum (%d)",
- death_test_index, flag->index);
- return false;
- }
-
- if (!(flag->file == file && flag->line == line &&
- flag->index == death_test_index)) {
- *test = NULL;
- return true;
- }
- }
-
- if (GTEST_FLAG(death_test_style) == "threadsafe") {
- *test = new ExecDeathTest(statement, regex, file, line);
- } else if (GTEST_FLAG(death_test_style) == "fast") {
- *test = new NoExecDeathTest(statement, regex);
- } else {
- last_death_test_message = String::Format(
- "Unknown death test style \"%s\" encountered",
- GTEST_FLAG(death_test_style).c_str());
- return false;
- }
-
- return true;
-}
-
-// Splits a given string on a given delimiter, populating a given
-// vector with the fields. GTEST_HAS_DEATH_TEST implies that we have
-// ::std::string, so we can use it here.
-static void SplitString(const ::std::string& str, char delimiter,
- ::std::vector< ::std::string>* dest) {
- ::std::vector< ::std::string> parsed;
- ::std::string::size_type pos = 0;
- while (true) {
- const ::std::string::size_type colon = str.find(delimiter, pos);
- if (colon == ::std::string::npos) {
- parsed.push_back(str.substr(pos));
- break;
- } else {
- parsed.push_back(str.substr(pos, colon - pos));
- pos = colon + 1;
- }
- }
- dest->swap(parsed);
-}
-
-// Attempts to parse a string into a positive integer. Returns true
-// if that is possible. GTEST_HAS_DEATH_TEST implies that we have
-// ::std::string, so we can use it here.
-static bool ParsePositiveInt(const ::std::string& str, int* number) {
- // Fail fast if the given string does not begin with a digit;
- // this bypasses strtol's "optional leading whitespace and plus
- // or minus sign" semantics, which are undesirable here.
- if (str.empty() || !isdigit(str[0])) {
- return false;
- }
- char* endptr;
- const long parsed = strtol(str.c_str(), &endptr, 10); // NOLINT
- if (*endptr == '\0' && parsed <= INT_MAX) {
- *number = static_cast<int>(parsed);
- return true;
- } else {
- return false;
- }
-}
-
-// Returns a newly created InternalRunDeathTestFlag object with fields
-// initialized from the GTEST_FLAG(internal_run_death_test) flag if
-// the flag is specified; otherwise returns NULL.
-InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {
- if (GTEST_FLAG(internal_run_death_test) == "") return NULL;
-
- InternalRunDeathTestFlag* const internal_run_death_test_flag =
- new InternalRunDeathTestFlag;
- // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
- // can use it here.
- ::std::vector< ::std::string> fields;
- SplitString(GTEST_FLAG(internal_run_death_test).c_str(), ':', &fields);
- if (fields.size() != 4
- || !ParsePositiveInt(fields[1], &internal_run_death_test_flag->line)
- || !ParsePositiveInt(fields[2], &internal_run_death_test_flag->index)
- || !ParsePositiveInt(fields[3],
- &internal_run_death_test_flag->status_fd)) {
- DeathTestAbort("Bad --gtest_internal_run_death_test flag: %s",
- GTEST_FLAG(internal_run_death_test).c_str());
- }
- internal_run_death_test_flag->file = fields[0].c_str();
- return internal_run_death_test_flag;
-}
-
-} // namespace internal
-
-#endif // GTEST_HAS_DEATH_TEST
-
-} // namespace testing
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/gtest-filepath.cc b/libclamav/c++/llvm/utils/unittest/googletest/gtest-filepath.cc
deleted file mode 100644
index 640c27c..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/gtest-filepath.cc
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: keith.ray at gmail.com (Keith Ray)
-
-#include <gtest/internal/gtest-filepath.h>
-#include <gtest/internal/gtest-port.h>
-
-#include <stdlib.h>
-
-#ifdef _WIN32_WCE
-#include <windows.h>
-#elif defined(GTEST_OS_WINDOWS)
-#include <direct.h>
-#include <io.h>
-#include <sys/stat.h>
-#elif defined(GTEST_OS_SYMBIAN)
-// Symbian OpenC has PATH_MAX in sys/syslimits.h
-#include <sys/syslimits.h>
-#include <unistd.h>
-#else
-#include <limits.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#endif // _WIN32_WCE or _WIN32
-
-#ifdef GTEST_OS_WINDOWS
-#define GTEST_PATH_MAX_ _MAX_PATH
-#elif defined(PATH_MAX)
-#define GTEST_PATH_MAX_ PATH_MAX
-#elif defined(_XOPEN_PATH_MAX)
-#define GTEST_PATH_MAX_ _XOPEN_PATH_MAX
-#else
-#define GTEST_PATH_MAX_ _POSIX_PATH_MAX
-#endif // GTEST_OS_WINDOWS
-
-#include <gtest/internal/gtest-string.h>
-
-namespace testing {
-namespace internal {
-
-#ifdef GTEST_OS_WINDOWS
-const char kPathSeparator = '\\';
-const char kPathSeparatorString[] = "\\";
-#ifdef _WIN32_WCE
-// Windows CE doesn't have a current directory. You should not use
-// the current directory in tests on Windows CE, but this at least
-// provides a reasonable fallback.
-const char kCurrentDirectoryString[] = "\\";
-// Windows CE doesn't define INVALID_FILE_ATTRIBUTES
-const DWORD kInvalidFileAttributes = 0xffffffff;
-#else
-const char kCurrentDirectoryString[] = ".\\";
-#endif // _WIN32_WCE
-#else
-const char kPathSeparator = '/';
-const char kPathSeparatorString[] = "/";
-const char kCurrentDirectoryString[] = "./";
-#endif // GTEST_OS_WINDOWS
-
-// Returns the current working directory, or "" if unsuccessful.
-FilePath FilePath::GetCurrentDir() {
-#ifdef _WIN32_WCE
-// Windows CE doesn't have a current directory, so we just return
-// something reasonable.
- return FilePath(kCurrentDirectoryString);
-#elif defined(GTEST_OS_WINDOWS)
- char cwd[GTEST_PATH_MAX_ + 1] = {};
- return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
-#else
- char cwd[GTEST_PATH_MAX_ + 1] = {};
- return FilePath(getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
-#endif
-}
-
-// Returns a copy of the FilePath with the case-insensitive extension removed.
-// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
-// FilePath("dir/file"). If a case-insensitive extension is not
-// found, returns a copy of the original FilePath.
-FilePath FilePath::RemoveExtension(const char* extension) const {
- String dot_extension(String::Format(".%s", extension));
- if (pathname_.EndsWithCaseInsensitive(dot_extension.c_str())) {
- return FilePath(String(pathname_.c_str(), pathname_.GetLength() - 4));
- }
- return *this;
-}
-
-// Returns a copy of the FilePath with the directory part removed.
-// Example: FilePath("path/to/file").RemoveDirectoryName() returns
-// FilePath("file"). If there is no directory part ("just_a_file"), it returns
-// the FilePath unmodified. If there is no file part ("just_a_dir/") it
-// returns an empty FilePath ("").
-// On Windows platform, '\' is the path separator, otherwise it is '/'.
-FilePath FilePath::RemoveDirectoryName() const {
- const char* const last_sep = strrchr(c_str(), kPathSeparator);
- return last_sep ? FilePath(String(last_sep + 1)) : *this;
-}
-
-// RemoveFileName returns the directory path with the filename removed.
-// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
-// If the FilePath is "a_file" or "/a_file", RemoveFileName returns
-// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
-// not have a file, like "just/a/dir/", it returns the FilePath unmodified.
-// On Windows platform, '\' is the path separator, otherwise it is '/'.
-FilePath FilePath::RemoveFileName() const {
- const char* const last_sep = strrchr(c_str(), kPathSeparator);
- return FilePath(last_sep ? String(c_str(), last_sep + 1 - c_str())
- : String(kCurrentDirectoryString));
-}
-
-// Helper functions for naming files in a directory for xml output.
-
-// Given directory = "dir", base_name = "test", number = 0,
-// extension = "xml", returns "dir/test.xml". If number is greater
-// than zero (e.g., 12), returns "dir/test_12.xml".
-// On Windows platform, uses \ as the separator rather than /.
-FilePath FilePath::MakeFileName(const FilePath& directory,
- const FilePath& base_name,
- int number,
- const char* extension) {
- FilePath dir(directory.RemoveTrailingPathSeparator());
- if (number == 0) {
- return FilePath(String::Format("%s%c%s.%s", dir.c_str(), kPathSeparator,
- base_name.c_str(), extension));
- }
- return FilePath(String::Format("%s%c%s_%d.%s", dir.c_str(), kPathSeparator,
- base_name.c_str(), number, extension));
-}
-
-// Returns true if pathname describes something findable in the file-system,
-// either a file, directory, or whatever.
-bool FilePath::FileOrDirectoryExists() const {
-#ifdef GTEST_OS_WINDOWS
-#ifdef _WIN32_WCE
- LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str());
- const DWORD attributes = GetFileAttributes(unicode);
- delete [] unicode;
- return attributes != kInvalidFileAttributes;
-#else
- struct _stat file_stat = {};
- return _stat(pathname_.c_str(), &file_stat) == 0;
-#endif // _WIN32_WCE
-#else
- struct stat file_stat = {};
- return stat(pathname_.c_str(), &file_stat) == 0;
-#endif // GTEST_OS_WINDOWS
-}
-
-// Returns true if pathname describes a directory in the file-system
-// that exists.
-bool FilePath::DirectoryExists() const {
- bool result = false;
-#ifdef GTEST_OS_WINDOWS
- // Don't strip off trailing separator if path is a root directory on
- // Windows (like "C:\\").
- const FilePath& path(IsRootDirectory() ? *this :
- RemoveTrailingPathSeparator());
-#ifdef _WIN32_WCE
- LPCWSTR unicode = String::AnsiToUtf16(path.c_str());
- const DWORD attributes = GetFileAttributes(unicode);
- delete [] unicode;
- if ((attributes != kInvalidFileAttributes) &&
- (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
- result = true;
- }
-#else
- struct _stat file_stat = {};
- result = _stat(path.c_str(), &file_stat) == 0 &&
- (_S_IFDIR & file_stat.st_mode) != 0;
-#endif // _WIN32_WCE
-#else
- struct stat file_stat = {};
- result = stat(pathname_.c_str(), &file_stat) == 0 &&
- S_ISDIR(file_stat.st_mode);
-#endif // GTEST_OS_WINDOWS
- return result;
-}
-
-// Returns true if pathname describes a root directory. (Windows has one
-// root directory per disk drive.)
-bool FilePath::IsRootDirectory() const {
-#ifdef GTEST_OS_WINDOWS
- const char* const name = pathname_.c_str();
- return pathname_.GetLength() == 3 &&
- ((name[0] >= 'a' && name[0] <= 'z') ||
- (name[0] >= 'A' && name[0] <= 'Z')) &&
- name[1] == ':' &&
- name[2] == kPathSeparator;
-#else
- return pathname_ == kPathSeparatorString;
-#endif
-}
-
-// Returns a pathname for a file that does not currently exist. The pathname
-// will be directory/base_name.extension or
-// directory/base_name_<number>.extension if directory/base_name.extension
-// already exists. The number will be incremented until a pathname is found
-// that does not already exist.
-// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
-// There could be a race condition if two or more processes are calling this
-// function at the same time -- they could both pick the same filename.
-FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
- const FilePath& base_name,
- const char* extension) {
- FilePath full_pathname;
- int number = 0;
- do {
- full_pathname.Set(MakeFileName(directory, base_name, number++, extension));
- } while (full_pathname.FileOrDirectoryExists());
- return full_pathname;
-}
-
-// Returns true if FilePath ends with a path separator, which indicates that
-// it is intended to represent a directory. Returns false otherwise.
-// This does NOT check that a directory (or file) actually exists.
-bool FilePath::IsDirectory() const {
- return pathname_.EndsWith(kPathSeparatorString);
-}
-
-// Create directories so that path exists. Returns true if successful or if
-// the directories already exist; returns false if unable to create directories
-// for any reason.
-bool FilePath::CreateDirectoriesRecursively() const {
- if (!this->IsDirectory()) {
- return false;
- }
-
- if (pathname_.GetLength() == 0 || this->DirectoryExists()) {
- return true;
- }
-
- const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName());
- return parent.CreateDirectoriesRecursively() && this->CreateFolder();
-}
-
-// Create the directory so that path exists. Returns true if successful or
-// if the directory already exists; returns false if unable to create the
-// directory for any reason, including if the parent directory does not
-// exist. Not named "CreateDirectory" because that's a macro on Windows.
-bool FilePath::CreateFolder() const {
-#ifdef GTEST_OS_WINDOWS
-#ifdef _WIN32_WCE
- FilePath removed_sep(this->RemoveTrailingPathSeparator());
- LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str());
- int result = CreateDirectory(unicode, NULL) ? 0 : -1;
- delete [] unicode;
-#else
- int result = _mkdir(pathname_.c_str());
-#endif // !WIN32_WCE
-#else
- int result = mkdir(pathname_.c_str(), 0777);
-#endif // _WIN32
- if (result == -1) {
- return this->DirectoryExists(); // An error is OK if the directory exists.
- }
- return true; // No error.
-}
-
-// If input name has a trailing separator character, remove it and return the
-// name, otherwise return the name string unmodified.
-// On Windows platform, uses \ as the separator, other platforms use /.
-FilePath FilePath::RemoveTrailingPathSeparator() const {
- return pathname_.EndsWith(kPathSeparatorString)
- ? FilePath(String(pathname_.c_str(), pathname_.GetLength() - 1))
- : *this;
-}
-
-// Normalize removes any redundant separators that might be in the pathname.
-// For example, "bar///foo" becomes "bar/foo". Does not eliminate other
-// redundancies that might be in a pathname involving "." or "..".
-void FilePath::Normalize() {
- if (pathname_.c_str() == NULL) {
- pathname_ = "";
- return;
- }
- const char* src = pathname_.c_str();
- char* const dest = new char[pathname_.GetLength() + 1];
- char* dest_ptr = dest;
- memset(dest_ptr, 0, pathname_.GetLength() + 1);
-
- while (*src != '\0') {
- *dest_ptr++ = *src;
- if (*src != kPathSeparator)
- src++;
- else
- while (*src == kPathSeparator)
- src++;
- }
- *dest_ptr = '\0';
- pathname_ = dest;
- delete[] dest;
-}
-
-} // namespace internal
-} // namespace testing
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/gtest-port.cc b/libclamav/c++/llvm/utils/unittest/googletest/gtest-port.cc
deleted file mode 100644
index 9878cae..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/gtest-port.cc
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-
-#include <gtest/internal/gtest-port.h>
-
-#include <limits.h>
-#include <stdlib.h>
-#include <stdio.h>
-
-#ifdef GTEST_HAS_DEATH_TEST
-#include <regex.h>
-#endif // GTEST_HAS_DEATH_TEST
-
-#ifdef _WIN32_WCE
-#include <windows.h> // For TerminateProcess()
-#endif // _WIN32_WCE
-
-#include <gtest/gtest-spi.h>
-#include <gtest/gtest-message.h>
-#include <gtest/internal/gtest-string.h>
-
-
-namespace testing {
-namespace internal {
-
-#ifdef GTEST_HAS_DEATH_TEST
-
-// Implements RE. Currently only needed for death tests.
-
-RE::~RE() {
- regfree(&partial_regex_);
- regfree(&full_regex_);
- free(const_cast<char*>(pattern_));
-}
-
-// Returns true iff regular expression re matches the entire str.
-bool RE::FullMatch(const char* str, const RE& re) {
- if (!re.is_valid_) return false;
-
- regmatch_t match;
- return regexec(&re.full_regex_, str, 1, &match, 0) == 0;
-}
-
-// Returns true iff regular expression re matches a substring of str
-// (including str itself).
-bool RE::PartialMatch(const char* str, const RE& re) {
- if (!re.is_valid_) return false;
-
- regmatch_t match;
- return regexec(&re.partial_regex_, str, 1, &match, 0) == 0;
-}
-
-// Initializes an RE from its string representation.
-void RE::Init(const char* regex) {
- pattern_ = strdup(regex);
-
- // Reserves enough bytes to hold the regular expression used for a
- // full match.
- const size_t full_regex_len = strlen(regex) + 10;
- char* const full_pattern = new char[full_regex_len];
-
- snprintf(full_pattern, full_regex_len, "^(%s)$", regex);
- is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0;
- // We want to call regcomp(&partial_regex_, ...) even if the
- // previous expression returns false. Otherwise partial_regex_ may
- // not be properly initialized can may cause trouble when it's
- // freed.
- is_valid_ = (regcomp(&partial_regex_, regex, REG_EXTENDED) == 0) && is_valid_;
- EXPECT_TRUE(is_valid_)
- << "Regular expression \"" << regex
- << "\" is not a valid POSIX Extended regular expression.";
-
- delete[] full_pattern;
-}
-
-#endif // GTEST_HAS_DEATH_TEST
-
-// Logs a message at the given severity level.
-void GTestLog(GTestLogSeverity severity, const char* file,
- int line, const char* msg) {
- const char* const marker =
- severity == GTEST_INFO ? "[ INFO ]" :
- severity == GTEST_WARNING ? "[WARNING]" :
- severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]";
- fprintf(stderr, "\n%s %s:%d: %s\n", marker, file, line, msg);
- if (severity == GTEST_FATAL) {
- abort();
- }
-}
-
-#ifdef GTEST_HAS_DEATH_TEST
-
-// Defines the stderr capturer.
-
-class CapturedStderr {
- public:
- // The ctor redirects stderr to a temporary file.
- CapturedStderr() {
- uncaptured_fd_ = dup(STDERR_FILENO);
-
- // There's no guarantee that a test has write access to the
- // current directory, so we create the temporary file in the /tmp
- // directory instead.
- char name_template[] = "/tmp/captured_stderr.XXXXXX";
- const int captured_fd = mkstemp(name_template);
- filename_ = name_template;
- fflush(NULL);
- dup2(captured_fd, STDERR_FILENO);
- close(captured_fd);
- }
-
- ~CapturedStderr() {
- remove(filename_.c_str());
- }
-
- // Stops redirecting stderr.
- void StopCapture() {
- // Restores the original stream.
- fflush(NULL);
- dup2(uncaptured_fd_, STDERR_FILENO);
- close(uncaptured_fd_);
- uncaptured_fd_ = -1;
- }
-
- // Returns the name of the temporary file holding the stderr output.
- // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
- // can use it here.
- ::std::string filename() const { return filename_; }
-
- private:
- int uncaptured_fd_;
- ::std::string filename_;
-};
-
-static CapturedStderr* g_captured_stderr = NULL;
-
-// Returns the size (in bytes) of a file.
-static size_t GetFileSize(FILE * file) {
- fseek(file, 0, SEEK_END);
- return static_cast<size_t>(ftell(file));
-}
-
-// Reads the entire content of a file as a string.
-// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can
-// use it here.
-static ::std::string ReadEntireFile(FILE * file) {
- const size_t file_size = GetFileSize(file);
- char* const buffer = new char[file_size];
-
- size_t bytes_last_read = 0; // # of bytes read in the last fread()
- size_t bytes_read = 0; // # of bytes read so far
-
- fseek(file, 0, SEEK_SET);
-
- // Keeps reading the file until we cannot read further or the
- // pre-determined file size is reached.
- do {
- bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file);
- bytes_read += bytes_last_read;
- } while (bytes_last_read > 0 && bytes_read < file_size);
-
- const ::std::string content(buffer, buffer+bytes_read);
- delete[] buffer;
-
- return content;
-}
-
-// Starts capturing stderr.
-void CaptureStderr() {
- if (g_captured_stderr != NULL) {
- GTEST_LOG_(FATAL, "Only one stderr capturer can exist at one time.");
- }
- g_captured_stderr = new CapturedStderr;
-}
-
-// Stops capturing stderr and returns the captured string.
-// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can
-// use it here.
-::std::string GetCapturedStderr() {
- g_captured_stderr->StopCapture();
- FILE* const file = fopen(g_captured_stderr->filename().c_str(), "r");
- const ::std::string content = ReadEntireFile(file);
- fclose(file);
-
- delete g_captured_stderr;
- g_captured_stderr = NULL;
-
- return content;
-}
-
-// A copy of all command line arguments. Set by InitGoogleTest().
-::std::vector<String> g_argvs;
-
-// Returns the command line as a vector of strings.
-const ::std::vector<String>& GetArgvs() { return g_argvs; }
-
-#endif // GTEST_HAS_DEATH_TEST
-
-#ifdef _WIN32_WCE
-void abort() {
- DebugBreak();
- TerminateProcess(GetCurrentProcess(), 1);
-}
-#endif // _WIN32_WCE
-
-// Returns the name of the environment variable corresponding to the
-// given flag. For example, FlagToEnvVar("foo") will return
-// "GTEST_FOO" in the open-source version.
-static String FlagToEnvVar(const char* flag) {
- const String full_flag = (Message() << GTEST_FLAG_PREFIX << flag).GetString();
-
- Message env_var;
- for (int i = 0; i != full_flag.GetLength(); i++) {
- env_var << static_cast<char>(toupper(full_flag.c_str()[i]));
- }
-
- return env_var.GetString();
-}
-
-// Reads and returns the Boolean environment variable corresponding to
-// the given flag; if it's not set, returns default_value.
-//
-// The value is considered true iff it's not "0".
-bool BoolFromGTestEnv(const char* flag, bool default_value) {
- const String env_var = FlagToEnvVar(flag);
- const char* const string_value = GetEnv(env_var.c_str());
- return string_value == NULL ?
- default_value : strcmp(string_value, "0") != 0;
-}
-
-// Parses 'str' for a 32-bit signed integer. If successful, writes
-// the result to *value and returns true; otherwise leaves *value
-// unchanged and returns false.
-bool ParseInt32(const Message& src_text, const char* str, Int32* value) {
- // Parses the environment variable as a decimal integer.
- char* end = NULL;
- const long long_value = strtol(str, &end, 10); // NOLINT
-
- // Has strtol() consumed all characters in the string?
- if (*end != '\0') {
- // No - an invalid character was encountered.
- Message msg;
- msg << "WARNING: " << src_text
- << " is expected to be a 32-bit integer, but actually"
- << " has value \"" << str << "\".\n";
- printf("%s", msg.GetString().c_str());
- fflush(stdout);
- return false;
- }
-
- // Is the parsed value in the range of an Int32?
- const Int32 result = static_cast<Int32>(long_value);
- if (long_value == LONG_MAX || long_value == LONG_MIN ||
- // The parsed value overflows as a long. (strtol() returns
- // LONG_MAX or LONG_MIN when the input overflows.)
- result != long_value
- // The parsed value overflows as an Int32.
- ) {
- Message msg;
- msg << "WARNING: " << src_text
- << " is expected to be a 32-bit integer, but actually"
- << " has value " << str << ", which overflows.\n";
- printf("%s", msg.GetString().c_str());
- fflush(stdout);
- return false;
- }
-
- *value = result;
- return true;
-}
-
-// Reads and returns a 32-bit integer stored in the environment
-// variable corresponding to the given flag; if it isn't set or
-// doesn't represent a valid 32-bit integer, returns default_value.
-Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
- const String env_var = FlagToEnvVar(flag);
- const char* const string_value = GetEnv(env_var.c_str());
- if (string_value == NULL) {
- // The environment variable is not set.
- return default_value;
- }
-
- Int32 result = default_value;
- if (!ParseInt32(Message() << "Environment variable " << env_var,
- string_value, &result)) {
- printf("The default value %s is used.\n",
- (Message() << default_value).GetString().c_str());
- fflush(stdout);
- return default_value;
- }
-
- return result;
-}
-
-// Reads and returns the string environment variable corresponding to
-// the given flag; if it's not set, returns default_value.
-const char* StringFromGTestEnv(const char* flag, const char* default_value) {
- const String env_var = FlagToEnvVar(flag);
- const char* const value = GetEnv(env_var.c_str());
- return value == NULL ? default_value : value;
-}
-
-} // namespace internal
-} // namespace testing
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/gtest-test-part.cc b/libclamav/c++/llvm/utils/unittest/googletest/gtest-test-part.cc
deleted file mode 100644
index 2e80f21..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/gtest-test-part.cc
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: mheule at google.com (Markus Heule)
-//
-// The Google C++ Testing Framework (Google Test)
-
-#include <gtest/gtest-test-part.h>
-
-// Indicates that this translation unit is part of Google Test's
-// implementation. It must come before gtest-internal-inl.h is
-// included, or there will be a compiler error. This trick is to
-// prevent a user from accidentally including gtest-internal-inl.h in
-// his code.
-#define GTEST_IMPLEMENTATION
-#include "gtest/internal/gtest-internal-inl.h"
-#undef GTEST_IMPLEMENTATION
-
-namespace testing {
-
-// Gets the summary of the failure message by omitting the stack trace
-// in it.
-internal::String TestPartResult::ExtractSummary(const char* message) {
- const char* const stack_trace = strstr(message, internal::kStackTraceMarker);
- return stack_trace == NULL ? internal::String(message) :
- internal::String(message, stack_trace - message);
-}
-
-// Prints a TestPartResult object.
-std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
- return os << result.file_name() << ":"
- << result.line_number() << ": "
- << (result.type() == TPRT_SUCCESS ? "Success" :
- result.type() == TPRT_FATAL_FAILURE ? "Fatal failure" :
- "Non-fatal failure") << ":\n"
- << result.message() << std::endl;
-}
-
-// Constructs an empty TestPartResultArray.
-TestPartResultArray::TestPartResultArray()
- : list_(new internal::List<TestPartResult>) {
-}
-
-// Destructs a TestPartResultArray.
-TestPartResultArray::~TestPartResultArray() {
- delete list_;
-}
-
-// Appends a TestPartResult to the array.
-void TestPartResultArray::Append(const TestPartResult& result) {
- list_->PushBack(result);
-}
-
-// Returns the TestPartResult at the given index (0-based).
-const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {
- if (index < 0 || index >= size()) {
- printf("\nInvalid index (%d) into TestPartResultArray.\n", index);
- internal::abort();
- }
-
- const internal::ListNode<TestPartResult>* p = list_->Head();
- for (int i = 0; i < index; i++) {
- p = p->next();
- }
-
- return p->element();
-}
-
-// Returns the number of TestPartResult objects in the array.
-int TestPartResultArray::size() const {
- return list_->size();
-}
-
-namespace internal {
-
-HasNewFatalFailureHelper::HasNewFatalFailureHelper()
- : has_new_fatal_failure_(false),
- original_reporter_(UnitTest::GetInstance()->impl()->
- GetTestPartResultReporterForCurrentThread()) {
- UnitTest::GetInstance()->impl()->SetTestPartResultReporterForCurrentThread(
- this);
-}
-
-HasNewFatalFailureHelper::~HasNewFatalFailureHelper() {
- UnitTest::GetInstance()->impl()->SetTestPartResultReporterForCurrentThread(
- original_reporter_);
-}
-
-void HasNewFatalFailureHelper::ReportTestPartResult(
- const TestPartResult& result) {
- if (result.fatally_failed())
- has_new_fatal_failure_ = true;
- original_reporter_->ReportTestPartResult(result);
-}
-
-} // namespace internal
-
-} // namespace testing
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/gtest-typed-test.cc b/libclamav/c++/llvm/utils/unittest/googletest/gtest-typed-test.cc
deleted file mode 100644
index d42a159..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/gtest-typed-test.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-
-#include <gtest/gtest-typed-test.h>
-#include <gtest/gtest.h>
-
-namespace testing {
-namespace internal {
-
-#ifdef GTEST_HAS_TYPED_TEST_P
-
-// Verifies that registered_tests match the test names in
-// defined_test_names_; returns registered_tests if successful, or
-// aborts the program otherwise.
-const char* TypedTestCasePState::VerifyRegisteredTestNames(
- const char* file, int line, const char* registered_tests) {
- typedef ::std::set<const char*>::const_iterator DefinedTestIter;
- registered_ = true;
-
- Message errors;
- ::std::set<String> tests;
- for (const char* names = registered_tests; names != NULL;
- names = SkipComma(names)) {
- const String name = GetPrefixUntilComma(names);
- if (tests.count(name) != 0) {
- errors << "Test " << name << " is listed more than once.\n";
- continue;
- }
-
- bool found = false;
- for (DefinedTestIter it = defined_test_names_.begin();
- it != defined_test_names_.end();
- ++it) {
- if (name == *it) {
- found = true;
- break;
- }
- }
-
- if (found) {
- tests.insert(name);
- } else {
- errors << "No test named " << name
- << " can be found in this test case.\n";
- }
- }
-
- for (DefinedTestIter it = defined_test_names_.begin();
- it != defined_test_names_.end();
- ++it) {
- if (tests.count(*it) == 0) {
- errors << "You forgot to list test " << *it << ".\n";
- }
- }
-
- const String& errors_str = errors.GetString();
- if (errors_str != "") {
- fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
- errors_str.c_str());
- abort();
- }
-
- return registered_tests;
-}
-
-#endif // GTEST_HAS_TYPED_TEST_P
-
-} // namespace internal
-} // namespace testing
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/gtest.cc b/libclamav/c++/llvm/utils/unittest/googletest/gtest.cc
deleted file mode 100644
index b5a654f..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/gtest.cc
+++ /dev/null
@@ -1,3951 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-
-#include <gtest/gtest.h>
-#include <gtest/gtest-spi.h>
-
-#include <ctype.h>
-#include <math.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <wchar.h>
-#include <wctype.h>
-
-#ifdef GTEST_OS_LINUX
-
-// TODO(kenton at google.com): Use autoconf to detect availability of
-// gettimeofday().
-#define GTEST_HAS_GETTIMEOFDAY
-
-#include <fcntl.h>
-#include <limits.h>
-#include <sched.h>
-// Declares vsnprintf(). This header is not available on Windows.
-#include <strings.h>
-#include <sys/mman.h>
-#include <sys/time.h>
-#include <unistd.h>
-#include <string>
-#include <vector>
-
-#elif defined(GTEST_OS_SYMBIAN)
-#define GTEST_HAS_GETTIMEOFDAY
-#include <sys/time.h> // NOLINT
-
-#elif defined(GTEST_OS_ZOS)
-#define GTEST_HAS_GETTIMEOFDAY
-#include <sys/time.h> // NOLINT
-
-// On z/OS we additionally need strings.h for strcasecmp.
-#include <strings.h>
-
-#elif defined(_WIN32_WCE) // We are on Windows CE.
-
-#include <windows.h> // NOLINT
-
-#elif defined(GTEST_OS_WINDOWS) // We are on Windows proper.
-
-#include <io.h> // NOLINT
-#include <sys/timeb.h> // NOLINT
-#include <sys/types.h> // NOLINT
-#include <sys/stat.h> // NOLINT
-
-#if defined(__MINGW__) || defined(__MINGW32__)
-// MinGW has gettimeofday() but not _ftime64().
-// TODO(kenton at google.com): Use autoconf to detect availability of
-// gettimeofday().
-// TODO(kenton at google.com): There are other ways to get the time on
-// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW
-// supports these. consider using them instead.
-#define GTEST_HAS_GETTIMEOFDAY
-#include <sys/time.h> // NOLINT
-#endif
-
-// cpplint thinks that the header is already included, so we want to
-// silence it.
-#include <windows.h> // NOLINT
-
-#else
-
-// Assume other platforms have gettimeofday().
-// TODO(kenton at google.com): Use autoconf to detect availability of
-// gettimeofday().
-#define GTEST_HAS_GETTIMEOFDAY
-
-// cpplint thinks that the header is already included, so we want to
-// silence it.
-#include <sys/time.h> // NOLINT
-#include <unistd.h> // NOLINT
-
-#endif
-
-// Indicates that this translation unit is part of Google Test's
-// implementation. It must come before gtest-internal-inl.h is
-// included, or there will be a compiler error. This trick is to
-// prevent a user from accidentally including gtest-internal-inl.h in
-// his code.
-#define GTEST_IMPLEMENTATION
-#include "gtest/internal/gtest-internal-inl.h"
-#undef GTEST_IMPLEMENTATION
-
-#ifdef GTEST_OS_WINDOWS
-#define fileno _fileno
-#define isatty _isatty
-#define vsnprintf _vsnprintf
-#endif // GTEST_OS_WINDOWS
-
-namespace testing {
-
-// Constants.
-
-// A test whose test case name or test name matches this filter is
-// disabled and not run.
-static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*";
-
-// A test case whose name matches this filter is considered a death
-// test case and will be run before test cases whose name doesn't
-// match this filter.
-static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*";
-
-// A test filter that matches everything.
-static const char kUniversalFilter[] = "*";
-
-// The default output file for XML output.
-static const char kDefaultOutputFile[] = "test_detail.xml";
-
-namespace internal {
-
-// The text used in failure messages to indicate the start of the
-// stack trace.
-const char kStackTraceMarker[] = "\nStack trace:\n";
-
-} // namespace internal
-
-GTEST_DEFINE_bool_(
- break_on_failure,
- internal::BoolFromGTestEnv("break_on_failure", false),
- "True iff a failed assertion should be a debugger break-point.");
-
-GTEST_DEFINE_bool_(
- catch_exceptions,
- internal::BoolFromGTestEnv("catch_exceptions", false),
- "True iff " GTEST_NAME
- " should catch exceptions and treat them as test failures.");
-
-GTEST_DEFINE_string_(
- color,
- internal::StringFromGTestEnv("color", "auto"),
- "Whether to use colors in the output. Valid values: yes, no, "
- "and auto. 'auto' means to use colors if the output is "
- "being sent to a terminal and the TERM environment variable "
- "is set to xterm or xterm-color.");
-
-GTEST_DEFINE_string_(
- filter,
- internal::StringFromGTestEnv("filter", kUniversalFilter),
- "A colon-separated list of glob (not regex) patterns "
- "for filtering the tests to run, optionally followed by a "
- "'-' and a : separated list of negative patterns (tests to "
- "exclude). A test is run if it matches one of the positive "
- "patterns and does not match any of the negative patterns.");
-
-GTEST_DEFINE_bool_(list_tests, false,
- "List all tests without running them.");
-
-GTEST_DEFINE_string_(
- output,
- internal::StringFromGTestEnv("output", ""),
- "A format (currently must be \"xml\"), optionally followed "
- "by a colon and an output file name or directory. A directory "
- "is indicated by a trailing pathname separator. "
- "Examples: \"xml:filename.xml\", \"xml::directoryname/\". "
- "If a directory is specified, output files will be created "
- "within that directory, with file-names based on the test "
- "executable's name and, if necessary, made unique by adding "
- "digits.");
-
-GTEST_DEFINE_bool_(
- print_time,
- internal::BoolFromGTestEnv("print_time", false),
- "True iff " GTEST_NAME
- " should display elapsed time in text output.");
-
-GTEST_DEFINE_int32_(
- repeat,
- internal::Int32FromGTestEnv("repeat", 1),
- "How many times to repeat each test. Specify a negative number "
- "for repeating forever. Useful for shaking out flaky tests.");
-
-GTEST_DEFINE_int32_(
- stack_trace_depth,
- internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
- "The maximum number of stack frames to print when an "
- "assertion fails. The valid range is 0 through 100, inclusive.");
-
-GTEST_DEFINE_bool_(
- show_internal_stack_frames, false,
- "True iff " GTEST_NAME " should include internal stack frames when "
- "printing test failure stack traces.");
-
-namespace internal {
-
-// GTestIsInitialized() returns true iff the user has initialized
-// Google Test. Useful for catching the user mistake of not initializing
-// Google Test before calling RUN_ALL_TESTS().
-//
-// A user must call testing::InitGoogleTest() to initialize Google
-// Test. g_init_gtest_count is set to the number of times
-// InitGoogleTest() has been called. We don't protect this variable
-// under a mutex as it is only accessed in the main thread.
-int g_init_gtest_count = 0;
-static bool GTestIsInitialized() { return g_init_gtest_count != 0; }
-
-// Iterates over a list of TestCases, keeping a running sum of the
-// results of calling a given int-returning method on each.
-// Returns the sum.
-static int SumOverTestCaseList(const internal::List<TestCase*>& case_list,
- int (TestCase::*method)() const) {
- int sum = 0;
- for (const internal::ListNode<TestCase*>* node = case_list.Head();
- node != NULL;
- node = node->next()) {
- sum += (node->element()->*method)();
- }
- return sum;
-}
-
-// Returns true iff the test case passed.
-static bool TestCasePassed(const TestCase* test_case) {
- return test_case->should_run() && test_case->Passed();
-}
-
-// Returns true iff the test case failed.
-static bool TestCaseFailed(const TestCase* test_case) {
- return test_case->should_run() && test_case->Failed();
-}
-
-// Returns true iff test_case contains at least one test that should
-// run.
-static bool ShouldRunTestCase(const TestCase* test_case) {
- return test_case->should_run();
-}
-
-// AssertHelper constructor.
-AssertHelper::AssertHelper(TestPartResultType type, const char* file,
- int line, const char* message)
- : type_(type), file_(file), line_(line), message_(message) {
-}
-
-// Message assignment, for assertion streaming support.
-void AssertHelper::operator=(const Message& message) const {
- UnitTest::GetInstance()->
- AddTestPartResult(type_, file_, line_,
- AppendUserMessage(message_, message),
- UnitTest::GetInstance()->impl()
- ->CurrentOsStackTraceExceptTop(1)
- // Skips the stack frame for this function itself.
- ); // NOLINT
-}
-
-// Mutex for linked pointers.
-Mutex g_linked_ptr_mutex(Mutex::NO_CONSTRUCTOR_NEEDED_FOR_STATIC_MUTEX);
-
-// Application pathname gotten in InitGoogleTest.
-String g_executable_path;
-
-// Returns the current application's name, removing directory path if that
-// is present.
-FilePath GetCurrentExecutableName() {
- FilePath result;
-
-#if defined(_WIN32_WCE) || defined(GTEST_OS_WINDOWS)
- result.Set(FilePath(g_executable_path).RemoveExtension("exe"));
-#else
- result.Set(FilePath(g_executable_path));
-#endif // _WIN32_WCE || GTEST_OS_WINDOWS
-
- return result.RemoveDirectoryName();
-}
-
-// Functions for processing the gtest_output flag.
-
-// Returns the output format, or "" for normal printed output.
-String UnitTestOptions::GetOutputFormat() {
- const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
- if (gtest_output_flag == NULL) return String("");
-
- const char* const colon = strchr(gtest_output_flag, ':');
- return (colon == NULL) ?
- String(gtest_output_flag) :
- String(gtest_output_flag, colon - gtest_output_flag);
-}
-
-// Returns the name of the requested output file, or the default if none
-// was explicitly specified.
-String UnitTestOptions::GetOutputFile() {
- const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
- if (gtest_output_flag == NULL)
- return String("");
-
- const char* const colon = strchr(gtest_output_flag, ':');
- if (colon == NULL)
- return String(kDefaultOutputFile);
-
- internal::FilePath output_name(colon + 1);
- if (!output_name.IsDirectory())
- return output_name.ToString();
-
- internal::FilePath result(internal::FilePath::GenerateUniqueFileName(
- output_name, internal::GetCurrentExecutableName(),
- GetOutputFormat().c_str()));
- return result.ToString();
-}
-
-// Returns true iff the wildcard pattern matches the string. The
-// first ':' or '\0' character in pattern marks the end of it.
-//
-// This recursive algorithm isn't very efficient, but is clear and
-// works well enough for matching test names, which are short.
-bool UnitTestOptions::PatternMatchesString(const char *pattern,
- const char *str) {
- switch (*pattern) {
- case '\0':
- case ':': // Either ':' or '\0' marks the end of the pattern.
- return *str == '\0';
- case '?': // Matches any single character.
- return *str != '\0' && PatternMatchesString(pattern + 1, str + 1);
- case '*': // Matches any string (possibly empty) of characters.
- return (*str != '\0' && PatternMatchesString(pattern, str + 1)) ||
- PatternMatchesString(pattern + 1, str);
- default: // Non-special character. Matches itself.
- return *pattern == *str &&
- PatternMatchesString(pattern + 1, str + 1);
- }
-}
-
-bool UnitTestOptions::MatchesFilter(const String& name, const char* filter) {
- const char *cur_pattern = filter;
- while (true) {
- if (PatternMatchesString(cur_pattern, name.c_str())) {
- return true;
- }
-
- // Finds the next pattern in the filter.
- cur_pattern = strchr(cur_pattern, ':');
-
- // Returns if no more pattern can be found.
- if (cur_pattern == NULL) {
- return false;
- }
-
- // Skips the pattern separater (the ':' character).
- cur_pattern++;
- }
-}
-
-// TODO(keithray): move String function implementations to gtest-string.cc.
-
-// Returns true iff the user-specified filter matches the test case
-// name and the test name.
-bool UnitTestOptions::FilterMatchesTest(const String &test_case_name,
- const String &test_name) {
- const String& full_name = String::Format("%s.%s",
- test_case_name.c_str(),
- test_name.c_str());
-
- // Split --gtest_filter at '-', if there is one, to separate into
- // positive filter and negative filter portions
- const char* const p = GTEST_FLAG(filter).c_str();
- const char* const dash = strchr(p, '-');
- String positive;
- String negative;
- if (dash == NULL) {
- positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter
- negative = String("");
- } else {
- positive.Set(p, dash - p); // Everything up to the dash
- negative = String(dash+1); // Everything after the dash
- if (positive.empty()) {
- // Treat '-test1' as the same as '*-test1'
- positive = kUniversalFilter;
- }
- }
-
- // A filter is a colon-separated list of patterns. It matches a
- // test if any pattern in it matches the test.
- return (MatchesFilter(full_name, positive.c_str()) &&
- !MatchesFilter(full_name, negative.c_str()));
-}
-
-#ifdef GTEST_OS_WINDOWS
-// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
-// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
-// This function is useful as an __except condition.
-int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {
- // Google Test should handle an exception if:
- // 1. the user wants it to, AND
- // 2. this is not a breakpoint exception.
- return (GTEST_FLAG(catch_exceptions) &&
- exception_code != EXCEPTION_BREAKPOINT) ?
- EXCEPTION_EXECUTE_HANDLER :
- EXCEPTION_CONTINUE_SEARCH;
-}
-#endif // GTEST_OS_WINDOWS
-
-} // namespace internal
-
-// The interface for printing the result of a UnitTest
-class UnitTestEventListenerInterface {
- public:
- // The d'tor is pure virtual as this is an abstract class.
- virtual ~UnitTestEventListenerInterface() = 0;
-
- // Called before the unit test starts.
- virtual void OnUnitTestStart(const UnitTest*) {}
-
- // Called after the unit test ends.
- virtual void OnUnitTestEnd(const UnitTest*) {}
-
- // Called before the test case starts.
- virtual void OnTestCaseStart(const TestCase*) {}
-
- // Called after the test case ends.
- virtual void OnTestCaseEnd(const TestCase*) {}
-
- // Called before the global set-up starts.
- virtual void OnGlobalSetUpStart(const UnitTest*) {}
-
- // Called after the global set-up ends.
- virtual void OnGlobalSetUpEnd(const UnitTest*) {}
-
- // Called before the global tear-down starts.
- virtual void OnGlobalTearDownStart(const UnitTest*) {}
-
- // Called after the global tear-down ends.
- virtual void OnGlobalTearDownEnd(const UnitTest*) {}
-
- // Called before the test starts.
- virtual void OnTestStart(const TestInfo*) {}
-
- // Called after the test ends.
- virtual void OnTestEnd(const TestInfo*) {}
-
- // Called after an assertion.
- virtual void OnNewTestPartResult(const TestPartResult*) {}
-};
-
-// The c'tor sets this object as the test part result reporter used by
-// Google Test. The 'result' parameter specifies where to report the
-// results. Intercepts only failures from the current thread.
-ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
- TestPartResultArray* result)
- : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD),
- result_(result) {
- Init();
-}
-
-// The c'tor sets this object as the test part result reporter used by
-// Google Test. The 'result' parameter specifies where to report the
-// results.
-ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
- InterceptMode intercept_mode, TestPartResultArray* result)
- : intercept_mode_(intercept_mode),
- result_(result) {
- Init();
-}
-
-void ScopedFakeTestPartResultReporter::Init() {
- internal::UnitTestImpl* const impl = UnitTest::GetInstance()->impl();
- if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
- old_reporter_ = impl->GetGlobalTestPartResultReporter();
- impl->SetGlobalTestPartResultReporter(this);
- } else {
- old_reporter_ = impl->GetTestPartResultReporterForCurrentThread();
- impl->SetTestPartResultReporterForCurrentThread(this);
- }
-}
-
-// The d'tor restores the test part result reporter used by Google Test
-// before.
-ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {
- internal::UnitTestImpl* const impl = UnitTest::GetInstance()->impl();
- if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
- impl->SetGlobalTestPartResultReporter(old_reporter_);
- } else {
- impl->SetTestPartResultReporterForCurrentThread(old_reporter_);
- }
-}
-
-// Increments the test part result count and remembers the result.
-// This method is from the TestPartResultReporterInterface interface.
-void ScopedFakeTestPartResultReporter::ReportTestPartResult(
- const TestPartResult& result) {
- result_->Append(result);
-}
-
-namespace internal {
-
-// Returns the type ID of ::testing::Test. We should always call this
-// instead of GetTypeId< ::testing::Test>() to get the type ID of
-// testing::Test. This is to work around a suspected linker bug when
-// using Google Test as a framework on Mac OS X. The bug causes
-// GetTypeId< ::testing::Test>() to return different values depending
-// on whether the call is from the Google Test framework itself or
-// from user test code. GetTestTypeId() is guaranteed to always
-// return the same value, as it always calls GetTypeId<>() from the
-// gtest.cc, which is within the Google Test framework.
-TypeId GetTestTypeId() {
- return GetTypeId<Test>();
-}
-
-// The value of GetTestTypeId() as seen from within the Google Test
-// library. This is solely for testing GetTestTypeId().
-const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();
-
-// This predicate-formatter checks that 'results' contains a test part
-// failure of the given type and that the failure message contains the
-// given substring.
-AssertionResult HasOneFailure(const char* /* results_expr */,
- const char* /* type_expr */,
- const char* /* substr_expr */,
- const TestPartResultArray& results,
- TestPartResultType type,
- const char* substr) {
- const String expected(
- type == TPRT_FATAL_FAILURE ? "1 fatal failure" :
- "1 non-fatal failure");
- Message msg;
- if (results.size() != 1) {
- msg << "Expected: " << expected << "\n"
- << " Actual: " << results.size() << " failures";
- for (int i = 0; i < results.size(); i++) {
- msg << "\n" << results.GetTestPartResult(i);
- }
- return AssertionFailure(msg);
- }
-
- const TestPartResult& r = results.GetTestPartResult(0);
- if (r.type() != type) {
- msg << "Expected: " << expected << "\n"
- << " Actual:\n"
- << r;
- return AssertionFailure(msg);
- }
-
- if (strstr(r.message(), substr) == NULL) {
- msg << "Expected: " << expected << " containing \""
- << substr << "\"\n"
- << " Actual:\n"
- << r;
- return AssertionFailure(msg);
- }
-
- return AssertionSuccess();
-}
-
-// The constructor of SingleFailureChecker remembers where to look up
-// test part results, what type of failure we expect, and what
-// substring the failure message should contain.
-SingleFailureChecker:: SingleFailureChecker(
- const TestPartResultArray* results,
- TestPartResultType type,
- const char* substr)
- : results_(results),
- type_(type),
- substr_(substr) {}
-
-// The destructor of SingleFailureChecker verifies that the given
-// TestPartResultArray contains exactly one failure that has the given
-// type and contains the given substring. If that's not the case, a
-// non-fatal failure will be generated.
-SingleFailureChecker::~SingleFailureChecker() {
- EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_.c_str());
-}
-
-DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(
- UnitTestImpl* unit_test) : unit_test_(unit_test) {}
-
-void DefaultGlobalTestPartResultReporter::ReportTestPartResult(
- const TestPartResult& result) {
- unit_test_->current_test_result()->AddTestPartResult(result);
- unit_test_->result_printer()->OnNewTestPartResult(&result);
-}
-
-DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(
- UnitTestImpl* unit_test) : unit_test_(unit_test) {}
-
-void DefaultPerThreadTestPartResultReporter::ReportTestPartResult(
- const TestPartResult& result) {
- unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result);
-}
-
-// Returns the global test part result reporter.
-TestPartResultReporterInterface*
-UnitTestImpl::GetGlobalTestPartResultReporter() {
- internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
- return global_test_part_result_repoter_;
-}
-
-// Sets the global test part result reporter.
-void UnitTestImpl::SetGlobalTestPartResultReporter(
- TestPartResultReporterInterface* reporter) {
- internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
- global_test_part_result_repoter_ = reporter;
-}
-
-// Returns the test part result reporter for the current thread.
-TestPartResultReporterInterface*
-UnitTestImpl::GetTestPartResultReporterForCurrentThread() {
- return per_thread_test_part_result_reporter_.get();
-}
-
-// Sets the test part result reporter for the current thread.
-void UnitTestImpl::SetTestPartResultReporterForCurrentThread(
- TestPartResultReporterInterface* reporter) {
- per_thread_test_part_result_reporter_.set(reporter);
-}
-
-// Gets the number of successful test cases.
-int UnitTestImpl::successful_test_case_count() const {
- return test_cases_.CountIf(TestCasePassed);
-}
-
-// Gets the number of failed test cases.
-int UnitTestImpl::failed_test_case_count() const {
- return test_cases_.CountIf(TestCaseFailed);
-}
-
-// Gets the number of all test cases.
-int UnitTestImpl::total_test_case_count() const {
- return test_cases_.size();
-}
-
-// Gets the number of all test cases that contain at least one test
-// that should run.
-int UnitTestImpl::test_case_to_run_count() const {
- return test_cases_.CountIf(ShouldRunTestCase);
-}
-
-// Gets the number of successful tests.
-int UnitTestImpl::successful_test_count() const {
- return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count);
-}
-
-// Gets the number of failed tests.
-int UnitTestImpl::failed_test_count() const {
- return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count);
-}
-
-// Gets the number of disabled tests.
-int UnitTestImpl::disabled_test_count() const {
- return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count);
-}
-
-// Gets the number of all tests.
-int UnitTestImpl::total_test_count() const {
- return SumOverTestCaseList(test_cases_, &TestCase::total_test_count);
-}
-
-// Gets the number of tests that should run.
-int UnitTestImpl::test_to_run_count() const {
- return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count);
-}
-
-// Returns the current OS stack trace as a String.
-//
-// The maximum number of stack frames to be included is specified by
-// the gtest_stack_trace_depth flag. The skip_count parameter
-// specifies the number of top frames to be skipped, which doesn't
-// count against the number of frames to be included.
-//
-// For example, if Foo() calls Bar(), which in turn calls
-// CurrentOsStackTraceExceptTop(1), Foo() will be included in the
-// trace but Bar() and CurrentOsStackTraceExceptTop() won't.
-String UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {
- (void)skip_count;
- return String("");
-}
-
-static TimeInMillis GetTimeInMillis() {
-#ifdef _WIN32_WCE // We are on Windows CE
- // Difference between 1970-01-01 and 1601-01-01 in miliseconds.
- // http://analogous.blogspot.com/2005/04/epoch.html
- const TimeInMillis kJavaEpochToWinFileTimeDelta = 11644473600000UL;
- const DWORD kTenthMicrosInMilliSecond = 10000;
-
- SYSTEMTIME now_systime;
- FILETIME now_filetime;
- ULARGE_INTEGER now_int64;
- // TODO(kenton at google.com): Shouldn't this just use
- // GetSystemTimeAsFileTime()?
- GetSystemTime(&now_systime);
- if (SystemTimeToFileTime(&now_systime, &now_filetime)) {
- now_int64.LowPart = now_filetime.dwLowDateTime;
- now_int64.HighPart = now_filetime.dwHighDateTime;
- now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) -
- kJavaEpochToWinFileTimeDelta;
- return now_int64.QuadPart;
- }
- return 0;
-#elif defined(GTEST_OS_WINDOWS) && !defined(GTEST_HAS_GETTIMEOFDAY)
- __timeb64 now;
-#ifdef _MSC_VER
- // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996
- // (deprecated function) there.
- // TODO(kenton at google.com): Use GetTickCount()? Or use
- // SystemTimeToFileTime()
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
- _ftime64(&now);
-#pragma warning(pop) // Restores the warning state.
-#else
- _ftime64(&now);
-#endif // _MSC_VER
- return static_cast<TimeInMillis>(now.time) * 1000 + now.millitm;
-#elif defined(GTEST_HAS_GETTIMEOFDAY)
- struct timeval now;
- gettimeofday(&now, NULL);
- return static_cast<TimeInMillis>(now.tv_sec) * 1000 + now.tv_usec / 1000;
-#else
-#error "Don't know how to get the current time on your system."
-#endif
-}
-
-// Utilities
-
-// class String
-
-// Returns the input enclosed in double quotes if it's not NULL;
-// otherwise returns "(null)". For example, "\"Hello\"" is returned
-// for input "Hello".
-//
-// This is useful for printing a C string in the syntax of a literal.
-//
-// Known issue: escape sequences are not handled yet.
-String String::ShowCStringQuoted(const char* c_str) {
- return c_str ? String::Format("\"%s\"", c_str) : String("(null)");
-}
-
-// Copies at most length characters from str into a newly-allocated
-// piece of memory of size length+1. The memory is allocated with new[].
-// A terminating null byte is written to the memory, and a pointer to it
-// is returned. If str is NULL, NULL is returned.
-static char* CloneString(const char* str, size_t length) {
- if (str == NULL) {
- return NULL;
- } else {
- char* const clone = new char[length + 1];
- // MSVC 8 deprecates strncpy(), so we want to suppress warning
- // 4996 (deprecated function) there.
-#ifdef GTEST_OS_WINDOWS // We are on Windows.
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
- strncpy(clone, str, length);
-#pragma warning(pop) // Restores the warning state.
-#else // We are on Linux or Mac OS.
- strncpy(clone, str, length);
-#endif // GTEST_OS_WINDOWS
- clone[length] = '\0';
- return clone;
- }
-}
-
-// Clones a 0-terminated C string, allocating memory using new. The
-// caller is responsible for deleting[] the return value. Returns the
-// cloned string, or NULL if the input is NULL.
-const char * String::CloneCString(const char* c_str) {
- return (c_str == NULL) ?
- NULL : CloneString(c_str, strlen(c_str));
-}
-
-#ifdef _WIN32_WCE
-// Creates a UTF-16 wide string from the given ANSI string, allocating
-// memory using new. The caller is responsible for deleting the return
-// value using delete[]. Returns the wide string, or NULL if the
-// input is NULL.
-LPCWSTR String::AnsiToUtf16(const char* ansi) {
- if (!ansi) return NULL;
- const int length = strlen(ansi);
- const int unicode_length =
- MultiByteToWideChar(CP_ACP, 0, ansi, length,
- NULL, 0);
- WCHAR* unicode = new WCHAR[unicode_length + 1];
- MultiByteToWideChar(CP_ACP, 0, ansi, length,
- unicode, unicode_length);
- unicode[unicode_length] = 0;
- return unicode;
-}
-
-// Creates an ANSI string from the given wide string, allocating
-// memory using new. The caller is responsible for deleting the return
-// value using delete[]. Returns the ANSI string, or NULL if the
-// input is NULL.
-const char* String::Utf16ToAnsi(LPCWSTR utf16_str) {
- if (!utf16_str) return NULL;
- const int ansi_length =
- WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
- NULL, 0, NULL, NULL);
- char* ansi = new char[ansi_length + 1];
- WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
- ansi, ansi_length, NULL, NULL);
- ansi[ansi_length] = 0;
- return ansi;
-}
-
-#endif // _WIN32_WCE
-
-// Compares two C strings. Returns true iff they have the same content.
-//
-// Unlike strcmp(), this function can handle NULL argument(s). A NULL
-// C string is considered different to any non-NULL C string,
-// including the empty string.
-bool String::CStringEquals(const char * lhs, const char * rhs) {
- if ( lhs == NULL ) return rhs == NULL;
-
- if ( rhs == NULL ) return false;
-
- return strcmp(lhs, rhs) == 0;
-}
-
-#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
-
-// Converts an array of wide chars to a narrow string using the UTF-8
-// encoding, and streams the result to the given Message object.
-static void StreamWideCharsToMessage(const wchar_t* wstr, size_t len,
- Message* msg) {
- // TODO(wan): consider allowing a testing::String object to
- // contain '\0'. This will make it behave more like std::string,
- // and will allow ToUtf8String() to return the correct encoding
- // for '\0' s.t. we can get rid of the conditional here (and in
- // several other places).
- for (size_t i = 0; i != len; ) { // NOLINT
- if (wstr[i] != L'\0') {
- *msg << WideStringToUtf8(wstr + i, static_cast<int>(len - i));
- while (i != len && wstr[i] != L'\0')
- i++;
- } else {
- *msg << '\0';
- i++;
- }
- }
-}
-
-#endif // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
-
-} // namespace internal
-
-#if GTEST_HAS_STD_WSTRING
-// Converts the given wide string to a narrow string using the UTF-8
-// encoding, and streams the result to this Message object.
-Message& Message::operator <<(const ::std::wstring& wstr) {
- internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
- return *this;
-}
-#endif // GTEST_HAS_STD_WSTRING
-
-#if GTEST_HAS_GLOBAL_WSTRING
-// Converts the given wide string to a narrow string using the UTF-8
-// encoding, and streams the result to this Message object.
-Message& Message::operator <<(const ::wstring& wstr) {
- internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
- return *this;
-}
-#endif // GTEST_HAS_GLOBAL_WSTRING
-
-namespace internal {
-
-// Formats a value to be used in a failure message.
-
-// For a char value, we print it as a C++ char literal and as an
-// unsigned integer (both in decimal and in hexadecimal).
-String FormatForFailureMessage(char ch) {
- const unsigned int ch_as_uint = ch;
- // A String object cannot contain '\0', so we print "\\0" when ch is
- // '\0'.
- return String::Format("'%s' (%u, 0x%X)",
- ch ? String::Format("%c", ch).c_str() : "\\0",
- ch_as_uint, ch_as_uint);
-}
-
-// For a wchar_t value, we print it as a C++ wchar_t literal and as an
-// unsigned integer (both in decimal and in hexidecimal).
-String FormatForFailureMessage(wchar_t wchar) {
- // The C++ standard doesn't specify the exact size of the wchar_t
- // type. It just says that it shall have the same size as another
- // integral type, called its underlying type.
- //
- // Therefore, in order to print a wchar_t value in the numeric form,
- // we first convert it to the largest integral type (UInt64) and
- // then print the converted value.
- //
- // We use streaming to print the value as "%llu" doesn't work
- // correctly with MSVC 7.1.
- const UInt64 wchar_as_uint64 = wchar;
- Message msg;
- // A String object cannot contain '\0', so we print "\\0" when wchar is
- // L'\0'.
- char buffer[32]; // CodePointToUtf8 requires a buffer that big.
- msg << "L'"
- << (wchar ? CodePointToUtf8(static_cast<UInt32>(wchar), buffer) : "\\0")
- << "' (" << wchar_as_uint64 << ", 0x" << ::std::setbase(16)
- << wchar_as_uint64 << ")";
- return msg.GetString();
-}
-
-} // namespace internal
-
-// AssertionResult constructor.
-AssertionResult::AssertionResult(const internal::String& failure_message)
- : failure_message_(failure_message) {
-}
-
-
-// Makes a successful assertion result.
-AssertionResult AssertionSuccess() {
- return AssertionResult();
-}
-
-
-// Makes a failed assertion result with the given failure message.
-AssertionResult AssertionFailure(const Message& message) {
- return AssertionResult(message.GetString());
-}
-
-namespace internal {
-
-// Constructs and returns the message for an equality assertion
-// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
-//
-// The first four parameters are the expressions used in the assertion
-// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
-// where foo is 5 and bar is 6, we have:
-//
-// expected_expression: "foo"
-// actual_expression: "bar"
-// expected_value: "5"
-// actual_value: "6"
-//
-// The ignoring_case parameter is true iff the assertion is a
-// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
-// be inserted into the message.
-AssertionResult EqFailure(const char* expected_expression,
- const char* actual_expression,
- const String& expected_value,
- const String& actual_value,
- bool ignoring_case) {
- Message msg;
- msg << "Value of: " << actual_expression;
- if (actual_value != actual_expression) {
- msg << "\n Actual: " << actual_value;
- }
-
- msg << "\nExpected: " << expected_expression;
- if (ignoring_case) {
- msg << " (ignoring case)";
- }
- if (expected_value != expected_expression) {
- msg << "\nWhich is: " << expected_value;
- }
-
- return AssertionFailure(msg);
-}
-
-
-// Helper function for implementing ASSERT_NEAR.
-AssertionResult DoubleNearPredFormat(const char* expr1,
- const char* expr2,
- const char* abs_error_expr,
- double val1,
- double val2,
- double abs_error) {
- const double diff = fabs(val1 - val2);
- if (diff <= abs_error) return AssertionSuccess();
-
- // TODO(wan): do not print the value of an expression if it's
- // already a literal.
- Message msg;
- msg << "The difference between " << expr1 << " and " << expr2
- << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n"
- << expr1 << " evaluates to " << val1 << ",\n"
- << expr2 << " evaluates to " << val2 << ", and\n"
- << abs_error_expr << " evaluates to " << abs_error << ".";
- return AssertionFailure(msg);
-}
-
-
-// Helper template for implementing FloatLE() and DoubleLE().
-template <typename RawType>
-AssertionResult FloatingPointLE(const char* expr1,
- const char* expr2,
- RawType val1,
- RawType val2) {
- // Returns success if val1 is less than val2,
- if (val1 < val2) {
- return AssertionSuccess();
- }
-
- // or if val1 is almost equal to val2.
- const FloatingPoint<RawType> lhs(val1), rhs(val2);
- if (lhs.AlmostEquals(rhs)) {
- return AssertionSuccess();
- }
-
- // Note that the above two checks will both fail if either val1 or
- // val2 is NaN, as the IEEE floating-point standard requires that
- // any predicate involving a NaN must return false.
-
- StrStream val1_ss;
- val1_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
- << val1;
-
- StrStream val2_ss;
- val2_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
- << val2;
-
- Message msg;
- msg << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n"
- << " Actual: " << StrStreamToString(&val1_ss) << " vs "
- << StrStreamToString(&val2_ss);
-
- return AssertionFailure(msg);
-}
-
-} // namespace internal
-
-// Asserts that val1 is less than, or almost equal to, val2. Fails
-// otherwise. In particular, it fails if either val1 or val2 is NaN.
-AssertionResult FloatLE(const char* expr1, const char* expr2,
- float val1, float val2) {
- return internal::FloatingPointLE<float>(expr1, expr2, val1, val2);
-}
-
-// Asserts that val1 is less than, or almost equal to, val2. Fails
-// otherwise. In particular, it fails if either val1 or val2 is NaN.
-AssertionResult DoubleLE(const char* expr1, const char* expr2,
- double val1, double val2) {
- return internal::FloatingPointLE<double>(expr1, expr2, val1, val2);
-}
-
-namespace internal {
-
-// The helper function for {ASSERT|EXPECT}_EQ with int or enum
-// arguments.
-AssertionResult CmpHelperEQ(const char* expected_expression,
- const char* actual_expression,
- BiggestInt expected,
- BiggestInt actual) {
- if (expected == actual) {
- return AssertionSuccess();
- }
-
- return EqFailure(expected_expression,
- actual_expression,
- FormatForComparisonFailureMessage(expected, actual),
- FormatForComparisonFailureMessage(actual, expected),
- false);
-}
-
-// A macro for implementing the helper functions needed to implement
-// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here
-// just to avoid copy-and-paste of similar code.
-#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
-AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
- BiggestInt val1, BiggestInt val2) {\
- if (val1 op val2) {\
- return AssertionSuccess();\
- } else {\
- Message msg;\
- msg << "Expected: (" << expr1 << ") " #op " (" << expr2\
- << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
- << " vs " << FormatForComparisonFailureMessage(val2, val1);\
- return AssertionFailure(msg);\
- }\
-}
-
-// Implements the helper function for {ASSERT|EXPECT}_NE with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(NE, !=)
-// Implements the helper function for {ASSERT|EXPECT}_LE with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(LE, <=)
-// Implements the helper function for {ASSERT|EXPECT}_LT with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(LT, < )
-// Implements the helper function for {ASSERT|EXPECT}_GE with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(GE, >=)
-// Implements the helper function for {ASSERT|EXPECT}_GT with int or
-// enum arguments.
-GTEST_IMPL_CMP_HELPER_(GT, > )
-
-#undef GTEST_IMPL_CMP_HELPER_
-
-// The helper function for {ASSERT|EXPECT}_STREQ.
-AssertionResult CmpHelperSTREQ(const char* expected_expression,
- const char* actual_expression,
- const char* expected,
- const char* actual) {
- if (String::CStringEquals(expected, actual)) {
- return AssertionSuccess();
- }
-
- return EqFailure(expected_expression,
- actual_expression,
- String::ShowCStringQuoted(expected),
- String::ShowCStringQuoted(actual),
- false);
-}
-
-// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
-AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
- const char* actual_expression,
- const char* expected,
- const char* actual) {
- if (String::CaseInsensitiveCStringEquals(expected, actual)) {
- return AssertionSuccess();
- }
-
- return EqFailure(expected_expression,
- actual_expression,
- String::ShowCStringQuoted(expected),
- String::ShowCStringQuoted(actual),
- true);
-}
-
-// The helper function for {ASSERT|EXPECT}_STRNE.
-AssertionResult CmpHelperSTRNE(const char* s1_expression,
- const char* s2_expression,
- const char* s1,
- const char* s2) {
- if (!String::CStringEquals(s1, s2)) {
- return AssertionSuccess();
- } else {
- Message msg;
- msg << "Expected: (" << s1_expression << ") != ("
- << s2_expression << "), actual: \""
- << s1 << "\" vs \"" << s2 << "\"";
- return AssertionFailure(msg);
- }
-}
-
-// The helper function for {ASSERT|EXPECT}_STRCASENE.
-AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
- const char* s2_expression,
- const char* s1,
- const char* s2) {
- if (!String::CaseInsensitiveCStringEquals(s1, s2)) {
- return AssertionSuccess();
- } else {
- Message msg;
- msg << "Expected: (" << s1_expression << ") != ("
- << s2_expression << ") (ignoring case), actual: \""
- << s1 << "\" vs \"" << s2 << "\"";
- return AssertionFailure(msg);
- }
-}
-
-} // namespace internal
-
-namespace {
-
-// Helper functions for implementing IsSubString() and IsNotSubstring().
-
-// This group of overloaded functions return true iff needle is a
-// substring of haystack. NULL is considered a substring of itself
-// only.
-
-bool IsSubstringPred(const char* needle, const char* haystack) {
- if (needle == NULL || haystack == NULL)
- return needle == haystack;
-
- return strstr(haystack, needle) != NULL;
-}
-
-bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) {
- if (needle == NULL || haystack == NULL)
- return needle == haystack;
-
- return wcsstr(haystack, needle) != NULL;
-}
-
-// StringType here can be either ::std::string or ::std::wstring.
-template <typename StringType>
-bool IsSubstringPred(const StringType& needle,
- const StringType& haystack) {
- return haystack.find(needle) != StringType::npos;
-}
-
-// This function implements either IsSubstring() or IsNotSubstring(),
-// depending on the value of the expected_to_be_substring parameter.
-// StringType here can be const char*, const wchar_t*, ::std::string,
-// or ::std::wstring.
-template <typename StringType>
-AssertionResult IsSubstringImpl(
- bool expected_to_be_substring,
- const char* needle_expr, const char* haystack_expr,
- const StringType& needle, const StringType& haystack) {
- if (IsSubstringPred(needle, haystack) == expected_to_be_substring)
- return AssertionSuccess();
-
- const bool is_wide_string = sizeof(needle[0]) > 1;
- const char* const begin_string_quote = is_wide_string ? "L\"" : "\"";
- return AssertionFailure(
- Message()
- << "Value of: " << needle_expr << "\n"
- << " Actual: " << begin_string_quote << needle << "\"\n"
- << "Expected: " << (expected_to_be_substring ? "" : "not ")
- << "a substring of " << haystack_expr << "\n"
- << "Which is: " << begin_string_quote << haystack << "\"");
-}
-
-} // namespace
-
-// IsSubstring() and IsNotSubstring() check whether needle is a
-// substring of haystack (NULL is considered a substring of itself
-// only), and return an appropriate error message when they fail.
-
-AssertionResult IsSubstring(
- const char* needle_expr, const char* haystack_expr,
- const char* needle, const char* haystack) {
- return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsSubstring(
- const char* needle_expr, const char* haystack_expr,
- const wchar_t* needle, const wchar_t* haystack) {
- return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsNotSubstring(
- const char* needle_expr, const char* haystack_expr,
- const char* needle, const char* haystack) {
- return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsNotSubstring(
- const char* needle_expr, const char* haystack_expr,
- const wchar_t* needle, const wchar_t* haystack) {
- return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
-}
-
-#if GTEST_HAS_STD_STRING
-AssertionResult IsSubstring(
- const char* needle_expr, const char* haystack_expr,
- const ::std::string& needle, const ::std::string& haystack) {
- return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsNotSubstring(
- const char* needle_expr, const char* haystack_expr,
- const ::std::string& needle, const ::std::string& haystack) {
- return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
-}
-#endif // GTEST_HAS_STD_STRING
-
-#if GTEST_HAS_STD_WSTRING
-AssertionResult IsSubstring(
- const char* needle_expr, const char* haystack_expr,
- const ::std::wstring& needle, const ::std::wstring& haystack) {
- return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
-}
-
-AssertionResult IsNotSubstring(
- const char* needle_expr, const char* haystack_expr,
- const ::std::wstring& needle, const ::std::wstring& haystack) {
- return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
-}
-#endif // GTEST_HAS_STD_WSTRING
-
-namespace internal {
-
-#ifdef GTEST_OS_WINDOWS
-
-namespace {
-
-// Helper function for IsHRESULT{SuccessFailure} predicates
-AssertionResult HRESULTFailureHelper(const char* expr,
- const char* expected,
- long hr) { // NOLINT
-#ifdef _WIN32_WCE
- // Windows CE doesn't support FormatMessage.
- const char error_text[] = "";
-#else
- // Looks up the human-readable system message for the HRESULT code
- // and since we're not passing any params to FormatMessage, we don't
- // want inserts expanded.
- const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM |
- FORMAT_MESSAGE_IGNORE_INSERTS;
- const DWORD kBufSize = 4096; // String::Format can't exceed this length.
- // Gets the system's human readable message string for this HRESULT.
- char error_text[kBufSize] = { '\0' };
- DWORD message_length = ::FormatMessageA(kFlags,
- 0, // no source, we're asking system
- hr, // the error
- 0, // no line width restrictions
- error_text, // output buffer
- kBufSize, // buf size
- NULL); // no arguments for inserts
- // Trims tailing white space (FormatMessage leaves a trailing cr-lf)
- for (; message_length && isspace(error_text[message_length - 1]);
- --message_length) {
- error_text[message_length - 1] = '\0';
- }
-#endif // _WIN32_WCE
-
- const String error_hex(String::Format("0x%08X ", hr));
- Message msg;
- msg << "Expected: " << expr << " " << expected << ".\n"
- << " Actual: " << error_hex << error_text << "\n";
-
- return ::testing::AssertionFailure(msg);
-}
-
-} // namespace
-
-AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT
- if (SUCCEEDED(hr)) {
- return AssertionSuccess();
- }
- return HRESULTFailureHelper(expr, "succeeds", hr);
-}
-
-AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT
- if (FAILED(hr)) {
- return AssertionSuccess();
- }
- return HRESULTFailureHelper(expr, "fails", hr);
-}
-
-#endif // GTEST_OS_WINDOWS
-
-// Utility functions for encoding Unicode text (wide strings) in
-// UTF-8.
-
-// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8
-// like this:
-//
-// Code-point length Encoding
-// 0 - 7 bits 0xxxxxxx
-// 8 - 11 bits 110xxxxx 10xxxxxx
-// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx
-// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
-
-// The maximum code-point a one-byte UTF-8 sequence can represent.
-const UInt32 kMaxCodePoint1 = (static_cast<UInt32>(1) << 7) - 1;
-
-// The maximum code-point a two-byte UTF-8 sequence can represent.
-const UInt32 kMaxCodePoint2 = (static_cast<UInt32>(1) << (5 + 6)) - 1;
-
-// The maximum code-point a three-byte UTF-8 sequence can represent.
-const UInt32 kMaxCodePoint3 = (static_cast<UInt32>(1) << (4 + 2*6)) - 1;
-
-// The maximum code-point a four-byte UTF-8 sequence can represent.
-const UInt32 kMaxCodePoint4 = (static_cast<UInt32>(1) << (3 + 3*6)) - 1;
-
-// Chops off the n lowest bits from a bit pattern. Returns the n
-// lowest bits. As a side effect, the original bit pattern will be
-// shifted to the right by n bits.
-inline UInt32 ChopLowBits(UInt32* bits, int n) {
- const UInt32 low_bits = *bits & ((static_cast<UInt32>(1) << n) - 1);
- *bits >>= n;
- return low_bits;
-}
-
-// Converts a Unicode code point to a narrow string in UTF-8 encoding.
-// code_point parameter is of type UInt32 because wchar_t may not be
-// wide enough to contain a code point.
-// The output buffer str must containt at least 32 characters.
-// The function returns the address of the output buffer.
-// If the code_point is not a valid Unicode code point
-// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output
-// as '(Invalid Unicode 0xXXXXXXXX)'.
-char* CodePointToUtf8(UInt32 code_point, char* str) {
- if (code_point <= kMaxCodePoint1) {
- str[1] = '\0';
- str[0] = static_cast<char>(code_point); // 0xxxxxxx
- } else if (code_point <= kMaxCodePoint2) {
- str[2] = '\0';
- str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
- str[0] = static_cast<char>(0xC0 | code_point); // 110xxxxx
- } else if (code_point <= kMaxCodePoint3) {
- str[3] = '\0';
- str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
- str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
- str[0] = static_cast<char>(0xE0 | code_point); // 1110xxxx
- } else if (code_point <= kMaxCodePoint4) {
- str[4] = '\0';
- str[3] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
- str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
- str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
- str[0] = static_cast<char>(0xF0 | code_point); // 11110xxx
- } else {
- // The longest string String::Format can produce when invoked
- // with these parameters is 28 character long (not including
- // the terminating nul character). We are asking for 32 character
- // buffer just in case. This is also enough for strncpy to
- // null-terminate the destination string.
- // MSVC 8 deprecates strncpy(), so we want to suppress warning
- // 4996 (deprecated function) there.
-#ifdef GTEST_OS_WINDOWS // We are on Windows.
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
-#endif
- strncpy(str, String::Format("(Invalid Unicode 0x%X)", code_point).c_str(),
- 32);
-#ifdef GTEST_OS_WINDOWS // We are on Windows.
-#pragma warning(pop) // Restores the warning state.
-#endif
- str[31] = '\0'; // Makes sure no change in the format to strncpy leaves
- // the result unterminated.
- }
- return str;
-}
-
-// The following two functions only make sense if the the system
-// uses UTF-16 for wide string encoding. All supported systems
-// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16.
-
-// Determines if the arguments constitute UTF-16 surrogate pair
-// and thus should be combined into a single Unicode code point
-// using CreateCodePointFromUtf16SurrogatePair.
-inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) {
- if (sizeof(wchar_t) == 2)
- return (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;
- else
- return false;
-}
-
-// Creates a Unicode code point from UTF16 surrogate pair.
-inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first,
- wchar_t second) {
- if (sizeof(wchar_t) == 2) {
- const UInt32 mask = (1 << 10) - 1;
- return (((first & mask) << 10) | (second & mask)) + 0x10000;
- } else {
- // This should not be called, but we provide a sensible default
- // in case it is.
- return static_cast<UInt32>(first);
- }
-}
-
-// Converts a wide string to a narrow string in UTF-8 encoding.
-// The wide string is assumed to have the following encoding:
-// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
-// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
-// Parameter str points to a null-terminated wide string.
-// Parameter num_chars may additionally limit the number
-// of wchar_t characters processed. -1 is used when the entire string
-// should be processed.
-// If the string contains code points that are not valid Unicode code points
-// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
-// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
-// and contains invalid UTF-16 surrogate pairs, values in those pairs
-// will be encoded as individual Unicode characters from Basic Normal Plane.
-String WideStringToUtf8(const wchar_t* str, int num_chars) {
- if (num_chars == -1)
- num_chars = static_cast<int>(wcslen(str));
-
- StrStream stream;
- for (int i = 0; i < num_chars; ++i) {
- UInt32 unicode_code_point;
-
- if (str[i] == L'\0') {
- break;
- } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) {
- unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i],
- str[i + 1]);
- i++;
- } else {
- unicode_code_point = static_cast<UInt32>(str[i]);
- }
-
- char buffer[32]; // CodePointToUtf8 requires a buffer this big.
- stream << CodePointToUtf8(unicode_code_point, buffer);
- }
- return StrStreamToString(&stream);
-}
-
-// Converts a wide C string to a String using the UTF-8 encoding.
-// NULL will be converted to "(null)".
-String String::ShowWideCString(const wchar_t * wide_c_str) {
- if (wide_c_str == NULL) return String("(null)");
-
- return String(internal::WideStringToUtf8(wide_c_str, -1).c_str());
-}
-
-// Similar to ShowWideCString(), except that this function encloses
-// the converted string in double quotes.
-String String::ShowWideCStringQuoted(const wchar_t* wide_c_str) {
- if (wide_c_str == NULL) return String("(null)");
-
- return String::Format("L\"%s\"",
- String::ShowWideCString(wide_c_str).c_str());
-}
-
-// Compares two wide C strings. Returns true iff they have the same
-// content.
-//
-// Unlike wcscmp(), this function can handle NULL argument(s). A NULL
-// C string is considered different to any non-NULL C string,
-// including the empty string.
-bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) {
- if (lhs == NULL) return rhs == NULL;
-
- if (rhs == NULL) return false;
-
- return wcscmp(lhs, rhs) == 0;
-}
-
-// Helper function for *_STREQ on wide strings.
-AssertionResult CmpHelperSTREQ(const char* expected_expression,
- const char* actual_expression,
- const wchar_t* expected,
- const wchar_t* actual) {
- if (String::WideCStringEquals(expected, actual)) {
- return AssertionSuccess();
- }
-
- return EqFailure(expected_expression,
- actual_expression,
- String::ShowWideCStringQuoted(expected),
- String::ShowWideCStringQuoted(actual),
- false);
-}
-
-// Helper function for *_STRNE on wide strings.
-AssertionResult CmpHelperSTRNE(const char* s1_expression,
- const char* s2_expression,
- const wchar_t* s1,
- const wchar_t* s2) {
- if (!String::WideCStringEquals(s1, s2)) {
- return AssertionSuccess();
- }
-
- Message msg;
- msg << "Expected: (" << s1_expression << ") != ("
- << s2_expression << "), actual: "
- << String::ShowWideCStringQuoted(s1)
- << " vs " << String::ShowWideCStringQuoted(s2);
- return AssertionFailure(msg);
-}
-
-// Compares two C strings, ignoring case. Returns true iff they have
-// the same content.
-//
-// Unlike strcasecmp(), this function can handle NULL argument(s). A
-// NULL C string is considered different to any non-NULL C string,
-// including the empty string.
-bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {
- if ( lhs == NULL ) return rhs == NULL;
-
- if ( rhs == NULL ) return false;
-
-#ifdef GTEST_OS_WINDOWS
- return _stricmp(lhs, rhs) == 0;
-#else // GTEST_OS_WINDOWS
- return strcasecmp(lhs, rhs) == 0;
-#endif // GTEST_OS_WINDOWS
-}
-
- // Compares two wide C strings, ignoring case. Returns true iff they
- // have the same content.
- //
- // Unlike wcscasecmp(), this function can handle NULL argument(s).
- // A NULL C string is considered different to any non-NULL wide C string,
- // including the empty string.
- // NB: The implementations on different platforms slightly differ.
- // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
- // environment variable. On GNU platform this method uses wcscasecmp
- // which compares according to LC_CTYPE category of the current locale.
- // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
- // current locale.
-bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
- const wchar_t* rhs) {
- if ( lhs == NULL ) return rhs == NULL;
-
- if ( rhs == NULL ) return false;
-
-#ifdef GTEST_OS_WINDOWS
- return _wcsicmp(lhs, rhs) == 0;
-#elif defined(GTEST_OS_LINUX)
- return wcscasecmp(lhs, rhs) == 0;
-#else
- // Mac OS X and Cygwin don't define wcscasecmp. Other unknown OSes
- // may not define it either.
- wint_t left, right;
- do {
- left = towlower(*lhs++);
- right = towlower(*rhs++);
- } while (left && left == right);
- return left == right;
-#endif // OS selector
-}
-
-// Constructs a String by copying a given number of chars from a
-// buffer. E.g. String("hello", 3) will create the string "hel".
-String::String(const char * buffer, size_t len) {
- char * const temp = new char[ len + 1 ];
- memcpy(temp, buffer, len);
- temp[ len ] = '\0';
- c_str_ = temp;
-}
-
-// Compares this with another String.
-// Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0
-// if this is greater than rhs.
-int String::Compare(const String & rhs) const {
- if ( c_str_ == NULL ) {
- return rhs.c_str_ == NULL ? 0 : -1; // NULL < anything except NULL
- }
-
- return rhs.c_str_ == NULL ? 1 : strcmp(c_str_, rhs.c_str_);
-}
-
-// Returns true iff this String ends with the given suffix. *Any*
-// String is considered to end with a NULL or empty suffix.
-bool String::EndsWith(const char* suffix) const {
- if (suffix == NULL || CStringEquals(suffix, "")) return true;
-
- if (c_str_ == NULL) return false;
-
- const size_t this_len = strlen(c_str_);
- const size_t suffix_len = strlen(suffix);
- return (this_len >= suffix_len) &&
- CStringEquals(c_str_ + this_len - suffix_len, suffix);
-}
-
-// Returns true iff this String ends with the given suffix, ignoring case.
-// Any String is considered to end with a NULL or empty suffix.
-bool String::EndsWithCaseInsensitive(const char* suffix) const {
- if (suffix == NULL || CStringEquals(suffix, "")) return true;
-
- if (c_str_ == NULL) return false;
-
- const size_t this_len = strlen(c_str_);
- const size_t suffix_len = strlen(suffix);
- return (this_len >= suffix_len) &&
- CaseInsensitiveCStringEquals(c_str_ + this_len - suffix_len, suffix);
-}
-
-// Sets the 0-terminated C string this String object represents. The
-// old string in this object is deleted, and this object will own a
-// clone of the input string. This function copies only up to length
-// bytes (plus a terminating null byte), or until the first null byte,
-// whichever comes first.
-//
-// This function works even when the c_str parameter has the same
-// value as that of the c_str_ field.
-void String::Set(const char * c_str, size_t length) {
- // Makes sure this works when c_str == c_str_
- const char* const temp = CloneString(c_str, length);
- delete[] c_str_;
- c_str_ = temp;
-}
-
-// Assigns a C string to this object. Self-assignment works.
-const String& String::operator=(const char* c_str) {
- // Makes sure this works when c_str == c_str_
- if (c_str != c_str_) {
- delete[] c_str_;
- c_str_ = CloneCString(c_str);
- }
- return *this;
-}
-
-// Formats a list of arguments to a String, using the same format
-// spec string as for printf.
-//
-// We do not use the StringPrintf class as it is not universally
-// available.
-//
-// The result is limited to 4096 characters (including the tailing 0).
-// If 4096 characters are not enough to format the input,
-// "<buffer exceeded>" is returned.
-String String::Format(const char * format, ...) {
- va_list args;
- va_start(args, format);
-
- char buffer[4096];
- // MSVC 8 deprecates vsnprintf(), so we want to suppress warning
- // 4996 (deprecated function) there.
-#ifdef GTEST_OS_WINDOWS // We are on Windows.
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
- const int size =
- vsnprintf(buffer, sizeof(buffer)/sizeof(buffer[0]) - 1, format, args);
-#pragma warning(pop) // Restores the warning state.
-#else // We are on Linux or Mac OS.
- const int size =
- vsnprintf(buffer, sizeof(buffer)/sizeof(buffer[0]) - 1, format, args);
-#endif // GTEST_OS_WINDOWS
- va_end(args);
-
- return String(size >= 0 ? buffer : "<buffer exceeded>");
-}
-
-// Converts the buffer in a StrStream to a String, converting NUL
-// bytes to "\\0" along the way.
-String StrStreamToString(StrStream* ss) {
-#if GTEST_HAS_STD_STRING
- const ::std::string& str = ss->str();
- const char* const start = str.c_str();
- const char* const end = start + str.length();
-#else
- const char* const start = ss->str();
- const char* const end = start + ss->pcount();
-#endif // GTEST_HAS_STD_STRING
-
- // We need to use a helper StrStream to do this transformation
- // because String doesn't support push_back().
- StrStream helper;
- for (const char* ch = start; ch != end; ++ch) {
- if (*ch == '\0') {
- helper << "\\0"; // Replaces NUL with "\\0";
- } else {
- helper.put(*ch);
- }
- }
-
-#if GTEST_HAS_STD_STRING
- return String(helper.str().c_str());
-#else
- const String str(helper.str(), helper.pcount());
- helper.freeze(false);
- ss->freeze(false);
- return str;
-#endif // GTEST_HAS_STD_STRING
-}
-
-// Appends the user-supplied message to the Google-Test-generated message.
-String AppendUserMessage(const String& gtest_msg,
- const Message& user_msg) {
- // Appends the user message if it's non-empty.
- const String user_msg_string = user_msg.GetString();
- if (user_msg_string.empty()) {
- return gtest_msg;
- }
-
- Message msg;
- msg << gtest_msg << "\n" << user_msg_string;
-
- return msg.GetString();
-}
-
-// class TestResult
-
-// Creates an empty TestResult.
-TestResult::TestResult()
- : death_test_count_(0),
- elapsed_time_(0) {
-}
-
-// D'tor.
-TestResult::~TestResult() {
-}
-
-// Adds a test part result to the list.
-void TestResult::AddTestPartResult(const TestPartResult& test_part_result) {
- test_part_results_.PushBack(test_part_result);
-}
-
-// Adds a test property to the list. If a property with the same key as the
-// supplied property is already represented, the value of this test_property
-// replaces the old value for that key.
-void TestResult::RecordProperty(const TestProperty& test_property) {
- if (!ValidateTestProperty(test_property)) {
- return;
- }
- MutexLock lock(&test_properites_mutex_);
- ListNode<TestProperty>* const node_with_matching_key =
- test_properties_.FindIf(TestPropertyKeyIs(test_property.key()));
- if (node_with_matching_key == NULL) {
- test_properties_.PushBack(test_property);
- return;
- }
- TestProperty& property_with_matching_key = node_with_matching_key->element();
- property_with_matching_key.SetValue(test_property.value());
-}
-
-// Adds a failure if the key is a reserved attribute of Google Test
-// testcase tags. Returns true if the property is valid.
-bool TestResult::ValidateTestProperty(const TestProperty& test_property) {
- String key(test_property.key());
- if (key == "name" || key == "status" || key == "time" || key == "classname") {
- ADD_FAILURE()
- << "Reserved key used in RecordProperty(): "
- << key
- << " ('name', 'status', 'time', and 'classname' are reserved by "
- << GTEST_NAME << ")";
- return false;
- }
- return true;
-}
-
-// Clears the object.
-void TestResult::Clear() {
- test_part_results_.Clear();
- test_properties_.Clear();
- death_test_count_ = 0;
- elapsed_time_ = 0;
-}
-
-// Returns true iff the test part passed.
-static bool TestPartPassed(const TestPartResult & result) {
- return result.passed();
-}
-
-// Gets the number of successful test parts.
-int TestResult::successful_part_count() const {
- return test_part_results_.CountIf(TestPartPassed);
-}
-
-// Returns true iff the test part failed.
-static bool TestPartFailed(const TestPartResult & result) {
- return result.failed();
-}
-
-// Gets the number of failed test parts.
-int TestResult::failed_part_count() const {
- return test_part_results_.CountIf(TestPartFailed);
-}
-
-// Returns true iff the test part fatally failed.
-static bool TestPartFatallyFailed(const TestPartResult & result) {
- return result.fatally_failed();
-}
-
-// Returns true iff the test fatally failed.
-bool TestResult::HasFatalFailure() const {
- return test_part_results_.CountIf(TestPartFatallyFailed) > 0;
-}
-
-// Gets the number of all test parts. This is the sum of the number
-// of successful test parts and the number of failed test parts.
-int TestResult::total_part_count() const {
- return test_part_results_.size();
-}
-
-} // namespace internal
-
-// class Test
-
-// Creates a Test object.
-
-// The c'tor saves the values of all Google Test flags.
-Test::Test()
- : gtest_flag_saver_(new internal::GTestFlagSaver) {
-}
-
-// The d'tor restores the values of all Google Test flags.
-Test::~Test() {
- delete gtest_flag_saver_;
-}
-
-// Sets up the test fixture.
-//
-// A sub-class may override this.
-void Test::SetUp() {
-}
-
-// Tears down the test fixture.
-//
-// A sub-class may override this.
-void Test::TearDown() {
-}
-
-// Allows user supplied key value pairs to be recorded for later output.
-void Test::RecordProperty(const char* key, const char* value) {
- UnitTest::GetInstance()->RecordPropertyForCurrentTest(key, value);
-}
-
-// Allows user supplied key value pairs to be recorded for later output.
-void Test::RecordProperty(const char* key, int value) {
- Message value_message;
- value_message << value;
- RecordProperty(key, value_message.GetString().c_str());
-}
-
-#ifdef GTEST_OS_WINDOWS
-// We are on Windows.
-
-// Adds an "exception thrown" fatal failure to the current test.
-static void AddExceptionThrownFailure(DWORD exception_code,
- const char* location) {
- Message message;
- message << "Exception thrown with code 0x" << std::setbase(16) <<
- exception_code << std::setbase(10) << " in " << location << ".";
-
- UnitTest* const unit_test = UnitTest::GetInstance();
- unit_test->AddTestPartResult(
- TPRT_FATAL_FAILURE,
- static_cast<const char *>(NULL),
- // We have no info about the source file where the exception
- // occurred.
- -1, // We have no info on which line caused the exception.
- message.GetString(),
- internal::String(""));
-}
-
-#endif // GTEST_OS_WINDOWS
-
-// Google Test requires all tests in the same test case to use the same test
-// fixture class. This function checks if the current test has the
-// same fixture class as the first test in the current test case. If
-// yes, it returns true; otherwise it generates a Google Test failure and
-// returns false.
-bool Test::HasSameFixtureClass() {
- internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
- const TestCase* const test_case = impl->current_test_case();
-
- // Info about the first test in the current test case.
- const internal::TestInfoImpl* const first_test_info =
- test_case->test_info_list().Head()->element()->impl();
- const internal::TypeId first_fixture_id = first_test_info->fixture_class_id();
- const char* const first_test_name = first_test_info->name();
-
- // Info about the current test.
- const internal::TestInfoImpl* const this_test_info =
- impl->current_test_info()->impl();
- const internal::TypeId this_fixture_id = this_test_info->fixture_class_id();
- const char* const this_test_name = this_test_info->name();
-
- if (this_fixture_id != first_fixture_id) {
- // Is the first test defined using TEST?
- const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId();
- // Is this test defined using TEST?
- const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId();
-
- if (first_is_TEST || this_is_TEST) {
- // The user mixed TEST and TEST_F in this test case - we'll tell
- // him/her how to fix it.
-
- // Gets the name of the TEST and the name of the TEST_F. Note
- // that first_is_TEST and this_is_TEST cannot both be true, as
- // the fixture IDs are different for the two tests.
- const char* const TEST_name =
- first_is_TEST ? first_test_name : this_test_name;
- const char* const TEST_F_name =
- first_is_TEST ? this_test_name : first_test_name;
-
- ADD_FAILURE()
- << "All tests in the same test case must use the same test fixture\n"
- << "class, so mixing TEST_F and TEST in the same test case is\n"
- << "illegal. In test case " << this_test_info->test_case_name()
- << ",\n"
- << "test " << TEST_F_name << " is defined using TEST_F but\n"
- << "test " << TEST_name << " is defined using TEST. You probably\n"
- << "want to change the TEST to TEST_F or move it to another test\n"
- << "case.";
- } else {
- // The user defined two fixture classes with the same name in
- // two namespaces - we'll tell him/her how to fix it.
- ADD_FAILURE()
- << "All tests in the same test case must use the same test fixture\n"
- << "class. However, in test case "
- << this_test_info->test_case_name() << ",\n"
- << "you defined test " << first_test_name
- << " and test " << this_test_name << "\n"
- << "using two different test fixture classes. This can happen if\n"
- << "the two classes are from different namespaces or translation\n"
- << "units and have the same name. You should probably rename one\n"
- << "of the classes to put the tests into different test cases.";
- }
- return false;
- }
-
- return true;
-}
-
-// Runs the test and updates the test result.
-void Test::Run() {
- if (!HasSameFixtureClass()) return;
-
- internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
-#if defined(GTEST_OS_WINDOWS) && !defined(__MINGW32__)
- // We are on Windows.
- impl->os_stack_trace_getter()->UponLeavingGTest();
- __try {
- SetUp();
- } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
- GetExceptionCode())) {
- AddExceptionThrownFailure(GetExceptionCode(), "SetUp()");
- }
-
- // We will run the test only if SetUp() had no fatal failure.
- if (!HasFatalFailure()) {
- impl->os_stack_trace_getter()->UponLeavingGTest();
- __try {
- TestBody();
- } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
- GetExceptionCode())) {
- AddExceptionThrownFailure(GetExceptionCode(), "the test body");
- }
- }
-
- // However, we want to clean up as much as possible. Hence we will
- // always call TearDown(), even if SetUp() or the test body has
- // failed.
- impl->os_stack_trace_getter()->UponLeavingGTest();
- __try {
- TearDown();
- } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
- GetExceptionCode())) {
- AddExceptionThrownFailure(GetExceptionCode(), "TearDown()");
- }
-
-#else // We are on Linux, Mac or MingW - exceptions are disabled.
- impl->os_stack_trace_getter()->UponLeavingGTest();
- SetUp();
-
- // We will run the test only if SetUp() was successful.
- if (!HasFatalFailure()) {
- impl->os_stack_trace_getter()->UponLeavingGTest();
- TestBody();
- }
-
- // However, we want to clean up as much as possible. Hence we will
- // always call TearDown(), even if SetUp() or the test body has
- // failed.
- impl->os_stack_trace_getter()->UponLeavingGTest();
- TearDown();
-#endif // GTEST_OS_WINDOWS
-}
-
-
-// Returns true iff the current test has a fatal failure.
-bool Test::HasFatalFailure() {
- return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();
-}
-
-// class TestInfo
-
-// Constructs a TestInfo object. It assumes ownership of the test factory
-// object via impl_.
-TestInfo::TestInfo(const char* test_case_name,
- const char* name,
- const char* test_case_comment,
- const char* comment,
- internal::TypeId fixture_class_id,
- internal::TestFactoryBase* factory) {
- impl_ = new internal::TestInfoImpl(this, test_case_name, name,
- test_case_comment, comment,
- fixture_class_id, factory);
-}
-
-// Destructs a TestInfo object.
-TestInfo::~TestInfo() {
- delete impl_;
-}
-
-namespace internal {
-
-// Creates a new TestInfo object and registers it with Google Test;
-// returns the created object.
-//
-// Arguments:
-//
-// test_case_name: name of the test case
-// name: name of the test
-// test_case_comment: a comment on the test case that will be included in
-// the test output
-// comment: a comment on the test that will be included in the
-// test output
-// fixture_class_id: ID of the test fixture class
-// set_up_tc: pointer to the function that sets up the test case
-// tear_down_tc: pointer to the function that tears down the test case
-// factory: pointer to the factory that creates a test object.
-// The newly created TestInfo instance will assume
-// ownership of the factory object.
-TestInfo* MakeAndRegisterTestInfo(
- const char* test_case_name, const char* name,
- const char* test_case_comment, const char* comment,
- TypeId fixture_class_id,
- SetUpTestCaseFunc set_up_tc,
- TearDownTestCaseFunc tear_down_tc,
- TestFactoryBase* factory) {
- TestInfo* const test_info =
- new TestInfo(test_case_name, name, test_case_comment, comment,
- fixture_class_id, factory);
- GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info);
- return test_info;
-}
-
-#ifdef GTEST_HAS_PARAM_TEST
-void ReportInvalidTestCaseType(const char* test_case_name,
- const char* file, int line) {
- Message errors;
- errors
- << "Attempted redefinition of test case " << test_case_name << ".\n"
- << "All tests in the same test case must use the same test fixture\n"
- << "class. However, in test case " << test_case_name << ", you tried\n"
- << "to define a test using a fixture class different from the one\n"
- << "used earlier. This can happen if the two fixture classes are\n"
- << "from different namespaces and have the same name. You should\n"
- << "probably rename one of the classes to put the tests into different\n"
- << "test cases.";
-
- fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
- errors.GetString().c_str());
-}
-#endif // GTEST_HAS_PARAM_TEST
-
-} // namespace internal
-
-// Returns the test case name.
-const char* TestInfo::test_case_name() const {
- return impl_->test_case_name();
-}
-
-// Returns the test name.
-const char* TestInfo::name() const {
- return impl_->name();
-}
-
-// Returns the test case comment.
-const char* TestInfo::test_case_comment() const {
- return impl_->test_case_comment();
-}
-
-// Returns the test comment.
-const char* TestInfo::comment() const {
- return impl_->comment();
-}
-
-// Returns true if this test should run.
-bool TestInfo::should_run() const { return impl_->should_run(); }
-
-// Returns the result of the test.
-const internal::TestResult* TestInfo::result() const { return impl_->result(); }
-
-// Increments the number of death tests encountered in this test so
-// far.
-int TestInfo::increment_death_test_count() {
- return impl_->result()->increment_death_test_count();
-}
-
-namespace {
-
-// A predicate that checks the test name of a TestInfo against a known
-// value.
-//
-// This is used for implementation of the TestCase class only. We put
-// it in the anonymous namespace to prevent polluting the outer
-// namespace.
-//
-// TestNameIs is copyable.
-class TestNameIs {
- public:
- // Constructor.
- //
- // TestNameIs has NO default constructor.
- explicit TestNameIs(const char* name)
- : name_(name) {}
-
- // Returns true iff the test name of test_info matches name_.
- bool operator()(const TestInfo * test_info) const {
- return test_info && internal::String(test_info->name()).Compare(name_) == 0;
- }
-
- private:
- internal::String name_;
-};
-
-} // namespace
-
-// Finds and returns a TestInfo with the given name. If one doesn't
-// exist, returns NULL.
-TestInfo * TestCase::GetTestInfo(const char* test_name) {
- // Can we find a TestInfo with the given name?
- internal::ListNode<TestInfo *> * const node = test_info_list_->FindIf(
- TestNameIs(test_name));
-
- // Returns the TestInfo found.
- return node ? node->element() : NULL;
-}
-
-namespace internal {
-
-// This method expands all parameterized tests registered with macros TEST_P
-// and INSTANTIATE_TEST_CASE_P into regular tests and registers those.
-// This will be done just once during the program runtime.
-void UnitTestImpl::RegisterParameterizedTests() {
-#ifdef GTEST_HAS_PARAM_TEST
- if (!parameterized_tests_registered_) {
- parameterized_test_registry_.RegisterTests();
- parameterized_tests_registered_ = true;
- }
-#endif
-}
-
-// Creates the test object, runs it, records its result, and then
-// deletes it.
-void TestInfoImpl::Run() {
- if (!should_run_) return;
-
- // Tells UnitTest where to store test result.
- UnitTestImpl* const impl = internal::GetUnitTestImpl();
- impl->set_current_test_info(parent_);
-
- // Notifies the unit test event listener that a test is about to
- // start.
- UnitTestEventListenerInterface* const result_printer =
- impl->result_printer();
- result_printer->OnTestStart(parent_);
-
- const TimeInMillis start = GetTimeInMillis();
-
- impl->os_stack_trace_getter()->UponLeavingGTest();
-#if defined(GTEST_OS_WINDOWS) && !defined(__MINGW32__)
- // We are on Windows.
- Test* test = NULL;
-
- __try {
- // Creates the test object.
- test = factory_->CreateTest();
- } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
- GetExceptionCode())) {
- AddExceptionThrownFailure(GetExceptionCode(),
- "the test fixture's constructor");
- return;
- }
-#else // We are on Linux, Mac OS or MingW - exceptions are disabled.
-
- // TODO(wan): If test->Run() throws, test won't be deleted. This is
- // not a problem now as we don't use exceptions. If we were to
- // enable exceptions, we should revise the following to be
- // exception-safe.
-
- // Creates the test object.
- Test* test = factory_->CreateTest();
-#endif // GTEST_OS_WINDOWS
-
- // Runs the test only if the constructor of the test fixture didn't
- // generate a fatal failure.
- if (!Test::HasFatalFailure()) {
- test->Run();
- }
-
- // Deletes the test object.
- impl->os_stack_trace_getter()->UponLeavingGTest();
- delete test;
- test = NULL;
-
- result_.set_elapsed_time(GetTimeInMillis() - start);
-
- // Notifies the unit test event listener that a test has just finished.
- result_printer->OnTestEnd(parent_);
-
- // Tells UnitTest to stop associating assertion results to this
- // test.
- impl->set_current_test_info(NULL);
-}
-
-} // namespace internal
-
-// class TestCase
-
-// Gets the number of successful tests in this test case.
-int TestCase::successful_test_count() const {
- return test_info_list_->CountIf(TestPassed);
-}
-
-// Gets the number of failed tests in this test case.
-int TestCase::failed_test_count() const {
- return test_info_list_->CountIf(TestFailed);
-}
-
-int TestCase::disabled_test_count() const {
- return test_info_list_->CountIf(TestDisabled);
-}
-
-// Get the number of tests in this test case that should run.
-int TestCase::test_to_run_count() const {
- return test_info_list_->CountIf(ShouldRunTest);
-}
-
-// Gets the number of all tests.
-int TestCase::total_test_count() const {
- return test_info_list_->size();
-}
-
-// Creates a TestCase with the given name.
-//
-// Arguments:
-//
-// name: name of the test case
-// set_up_tc: pointer to the function that sets up the test case
-// tear_down_tc: pointer to the function that tears down the test case
-TestCase::TestCase(const char* name, const char* comment,
- Test::SetUpTestCaseFunc set_up_tc,
- Test::TearDownTestCaseFunc tear_down_tc)
- : name_(name),
- comment_(comment),
- set_up_tc_(set_up_tc),
- tear_down_tc_(tear_down_tc),
- should_run_(false),
- elapsed_time_(0) {
- test_info_list_ = new internal::List<TestInfo *>;
-}
-
-// Destructor of TestCase.
-TestCase::~TestCase() {
- // Deletes every Test in the collection.
- test_info_list_->ForEach(internal::Delete<TestInfo>);
-
- // Then deletes the Test collection.
- delete test_info_list_;
- test_info_list_ = NULL;
-}
-
-// Adds a test to this test case. Will delete the test upon
-// destruction of the TestCase object.
-void TestCase::AddTestInfo(TestInfo * test_info) {
- test_info_list_->PushBack(test_info);
-}
-
-// Runs every test in this TestCase.
-void TestCase::Run() {
- if (!should_run_) return;
-
- internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
- impl->set_current_test_case(this);
-
- UnitTestEventListenerInterface * const result_printer =
- impl->result_printer();
-
- result_printer->OnTestCaseStart(this);
- impl->os_stack_trace_getter()->UponLeavingGTest();
- set_up_tc_();
-
- const internal::TimeInMillis start = internal::GetTimeInMillis();
- test_info_list_->ForEach(internal::TestInfoImpl::RunTest);
- elapsed_time_ = internal::GetTimeInMillis() - start;
-
- impl->os_stack_trace_getter()->UponLeavingGTest();
- tear_down_tc_();
- result_printer->OnTestCaseEnd(this);
- impl->set_current_test_case(NULL);
-}
-
-// Clears the results of all tests in this test case.
-void TestCase::ClearResult() {
- test_info_list_->ForEach(internal::TestInfoImpl::ClearTestResult);
-}
-
-
-// class UnitTestEventListenerInterface
-
-// The virtual d'tor.
-UnitTestEventListenerInterface::~UnitTestEventListenerInterface() {
-}
-
-// A result printer that never prints anything. Used in the child process
-// of an exec-style death test to avoid needless output clutter.
-class NullUnitTestResultPrinter : public UnitTestEventListenerInterface {};
-
-// Formats a countable noun. Depending on its quantity, either the
-// singular form or the plural form is used. e.g.
-//
-// FormatCountableNoun(1, "formula", "formuli") returns "1 formula".
-// FormatCountableNoun(5, "book", "books") returns "5 books".
-static internal::String FormatCountableNoun(int count,
- const char * singular_form,
- const char * plural_form) {
- return internal::String::Format("%d %s", count,
- count == 1 ? singular_form : plural_form);
-}
-
-// Formats the count of tests.
-static internal::String FormatTestCount(int test_count) {
- return FormatCountableNoun(test_count, "test", "tests");
-}
-
-// Formats the count of test cases.
-static internal::String FormatTestCaseCount(int test_case_count) {
- return FormatCountableNoun(test_case_count, "test case", "test cases");
-}
-
-// Converts a TestPartResultType enum to human-friendly string
-// representation. Both TPRT_NONFATAL_FAILURE and TPRT_FATAL_FAILURE
-// are translated to "Failure", as the user usually doesn't care about
-// the difference between the two when viewing the test result.
-static const char * TestPartResultTypeToString(TestPartResultType type) {
- switch (type) {
- case TPRT_SUCCESS:
- return "Success";
-
- case TPRT_NONFATAL_FAILURE:
- case TPRT_FATAL_FAILURE:
-#ifdef _MSC_VER
- return "error: ";
-#else
- return "Failure\n";
-#endif
- }
-
- return "Unknown result type";
-}
-
-// Prints a TestPartResult.
-static void PrintTestPartResult(
- const TestPartResult & test_part_result) {
- printf("%s %s%s\n",
- internal::FormatFileLocation(test_part_result.file_name(),
- test_part_result.line_number()).c_str(),
- TestPartResultTypeToString(test_part_result.type()),
- test_part_result.message());
- fflush(stdout);
-}
-
-// class PrettyUnitTestResultPrinter
-
-namespace internal {
-
-enum GTestColor {
- COLOR_RED,
- COLOR_GREEN,
- COLOR_YELLOW
-};
-
-#if defined(GTEST_OS_WINDOWS) && !defined(_WIN32_WCE)
-
-// Returns the character attribute for the given color.
-WORD GetColorAttribute(GTestColor color) {
- switch (color) {
- case COLOR_RED: return FOREGROUND_RED;
- case COLOR_GREEN: return FOREGROUND_GREEN;
- case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN;
- }
- return 0;
-}
-
-#else
-
-// Returns the ANSI color code for the given color.
-const char* GetAnsiColorCode(GTestColor color) {
- switch (color) {
- case COLOR_RED: return "1";
- case COLOR_GREEN: return "2";
- case COLOR_YELLOW: return "3";
- };
- return NULL;
-}
-
-#endif // GTEST_OS_WINDOWS && !_WIN32_WCE
-
-// Returns true iff Google Test should use colors in the output.
-bool ShouldUseColor(bool stdout_is_tty) {
- const char* const gtest_color = GTEST_FLAG(color).c_str();
-
- if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) {
-#ifdef GTEST_OS_WINDOWS
- // On Windows the TERM variable is usually not set, but the
- // console there does support colors.
- return stdout_is_tty;
-#else
- // On non-Windows platforms, we rely on the TERM variable.
- const char* const term = GetEnv("TERM");
- const bool term_supports_color =
- String::CStringEquals(term, "xterm") ||
- String::CStringEquals(term, "xterm-color") ||
- String::CStringEquals(term, "cygwin");
- return stdout_is_tty && term_supports_color;
-#endif // GTEST_OS_WINDOWS
- }
-
- return String::CaseInsensitiveCStringEquals(gtest_color, "yes") ||
- String::CaseInsensitiveCStringEquals(gtest_color, "true") ||
- String::CaseInsensitiveCStringEquals(gtest_color, "t") ||
- String::CStringEquals(gtest_color, "1");
- // We take "yes", "true", "t", and "1" as meaning "yes". If the
- // value is neither one of these nor "auto", we treat it as "no" to
- // be conservative.
-}
-
-// Helpers for printing colored strings to stdout. Note that on Windows, we
-// cannot simply emit special characters and have the terminal change colors.
-// This routine must actually emit the characters rather than return a string
-// that would be colored when printed, as can be done on Linux.
-void ColoredPrintf(GTestColor color, const char* fmt, ...) {
- va_list args;
- va_start(args, fmt);
-
-#if defined(_WIN32_WCE) || defined(GTEST_OS_SYMBIAN) || defined(GTEST_OS_ZOS)
- static const bool use_color = false;
-#else
- static const bool use_color = ShouldUseColor(isatty(fileno(stdout)) != 0);
-#endif // !_WIN32_WCE
- // The '!= 0' comparison is necessary to satisfy MSVC 7.1.
-
- if (!use_color) {
- vprintf(fmt, args);
- va_end(args);
- return;
- }
-
-#if defined(GTEST_OS_WINDOWS) && !defined(_WIN32_WCE)
- const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
-
- // Gets the current text color.
- CONSOLE_SCREEN_BUFFER_INFO buffer_info;
- GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
- const WORD old_color_attrs = buffer_info.wAttributes;
-
- SetConsoleTextAttribute(stdout_handle,
- GetColorAttribute(color) | FOREGROUND_INTENSITY);
- vprintf(fmt, args);
-
- // Restores the text color.
- SetConsoleTextAttribute(stdout_handle, old_color_attrs);
-#else
- printf("\033[0;3%sm", GetAnsiColorCode(color));
- vprintf(fmt, args);
- printf("\033[m"); // Resets the terminal to default.
-#endif // GTEST_OS_WINDOWS && !_WIN32_WCE
- va_end(args);
-}
-
-} // namespace internal
-
-using internal::ColoredPrintf;
-using internal::COLOR_RED;
-using internal::COLOR_GREEN;
-using internal::COLOR_YELLOW;
-
-// This class implements the UnitTestEventListenerInterface interface.
-//
-// Class PrettyUnitTestResultPrinter is copyable.
-class PrettyUnitTestResultPrinter : public UnitTestEventListenerInterface {
- public:
- PrettyUnitTestResultPrinter() {}
- static void PrintTestName(const char * test_case, const char * test) {
- printf("%s.%s", test_case, test);
- }
-
- // The following methods override what's in the
- // UnitTestEventListenerInterface class.
- virtual void OnUnitTestStart(const UnitTest * unit_test);
- virtual void OnGlobalSetUpStart(const UnitTest*);
- virtual void OnTestCaseStart(const TestCase * test_case);
- virtual void OnTestCaseEnd(const TestCase * test_case);
- virtual void OnTestStart(const TestInfo * test_info);
- virtual void OnNewTestPartResult(const TestPartResult * result);
- virtual void OnTestEnd(const TestInfo * test_info);
- virtual void OnGlobalTearDownStart(const UnitTest*);
- virtual void OnUnitTestEnd(const UnitTest * unit_test);
-
- private:
- internal::String test_case_name_;
-};
-
-// Called before the unit test starts.
-void PrettyUnitTestResultPrinter::OnUnitTestStart(
- const UnitTest * unit_test) {
- const char * const filter = GTEST_FLAG(filter).c_str();
-
- // Prints the filter if it's not *. This reminds the user that some
- // tests may be skipped.
- if (!internal::String::CStringEquals(filter, kUniversalFilter)) {
- ColoredPrintf(COLOR_YELLOW,
- "Note: %s filter = %s\n", GTEST_NAME, filter);
- }
-
- const internal::UnitTestImpl* const impl = unit_test->impl();
- ColoredPrintf(COLOR_GREEN, "[==========] ");
- printf("Running %s from %s.\n",
- FormatTestCount(impl->test_to_run_count()).c_str(),
- FormatTestCaseCount(impl->test_case_to_run_count()).c_str());
- fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnGlobalSetUpStart(const UnitTest*) {
- ColoredPrintf(COLOR_GREEN, "[----------] ");
- printf("Global test environment set-up.\n");
- fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnTestCaseStart(
- const TestCase * test_case) {
- test_case_name_ = test_case->name();
- const internal::String counts =
- FormatCountableNoun(test_case->test_to_run_count(), "test", "tests");
- ColoredPrintf(COLOR_GREEN, "[----------] ");
- printf("%s from %s", counts.c_str(), test_case_name_.c_str());
- if (test_case->comment()[0] == '\0') {
- printf("\n");
- } else {
- printf(", where %s\n", test_case->comment());
- }
- fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnTestCaseEnd(
- const TestCase * test_case) {
- if (!GTEST_FLAG(print_time)) return;
-
- test_case_name_ = test_case->name();
- const internal::String counts =
- FormatCountableNoun(test_case->test_to_run_count(), "test", "tests");
- ColoredPrintf(COLOR_GREEN, "[----------] ");
- printf("%s from %s (%s ms total)\n\n",
- counts.c_str(), test_case_name_.c_str(),
- internal::StreamableToString(test_case->elapsed_time()).c_str());
- fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo * test_info) {
- ColoredPrintf(COLOR_GREEN, "[ RUN ] ");
- PrintTestName(test_case_name_.c_str(), test_info->name());
- if (test_info->comment()[0] == '\0') {
- printf("\n");
- } else {
- printf(", where %s\n", test_info->comment());
- }
- fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo * test_info) {
- if (test_info->result()->Passed()) {
- ColoredPrintf(COLOR_GREEN, "[ OK ] ");
- } else {
- ColoredPrintf(COLOR_RED, "[ FAILED ] ");
- }
- PrintTestName(test_case_name_.c_str(), test_info->name());
- if (GTEST_FLAG(print_time)) {
- printf(" (%s ms)\n", internal::StreamableToString(
- test_info->result()->elapsed_time()).c_str());
- } else {
- printf("\n");
- }
- fflush(stdout);
-}
-
-// Called after an assertion failure.
-void PrettyUnitTestResultPrinter::OnNewTestPartResult(
- const TestPartResult * result) {
- // If the test part succeeded, we don't need to do anything.
- if (result->type() == TPRT_SUCCESS)
- return;
-
- // Print failure message from the assertion (e.g. expected this and got that).
- PrintTestPartResult(*result);
- fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnGlobalTearDownStart(const UnitTest*) {
- ColoredPrintf(COLOR_GREEN, "[----------] ");
- printf("Global test environment tear-down\n");
- fflush(stdout);
-}
-
-namespace internal {
-
-// Internal helper for printing the list of failed tests.
-static void PrintFailedTestsPretty(const UnitTestImpl* impl) {
- const int failed_test_count = impl->failed_test_count();
- if (failed_test_count == 0) {
- return;
- }
-
- for (const internal::ListNode<TestCase*>* node = impl->test_cases()->Head();
- node != NULL; node = node->next()) {
- const TestCase* const tc = node->element();
- if (!tc->should_run() || (tc->failed_test_count() == 0)) {
- continue;
- }
- for (const internal::ListNode<TestInfo*>* tinode =
- tc->test_info_list().Head();
- tinode != NULL; tinode = tinode->next()) {
- const TestInfo* const ti = tinode->element();
- if (!tc->ShouldRunTest(ti) || tc->TestPassed(ti)) {
- continue;
- }
- ColoredPrintf(COLOR_RED, "[ FAILED ] ");
- printf("%s.%s", ti->test_case_name(), ti->name());
- if (ti->test_case_comment()[0] != '\0' ||
- ti->comment()[0] != '\0') {
- printf(", where %s", ti->test_case_comment());
- if (ti->test_case_comment()[0] != '\0' &&
- ti->comment()[0] != '\0') {
- printf(" and ");
- }
- }
- printf("%s\n", ti->comment());
- }
- }
-}
-
-} // namespace internal
-
-void PrettyUnitTestResultPrinter::OnUnitTestEnd(
- const UnitTest * unit_test) {
- const internal::UnitTestImpl* const impl = unit_test->impl();
-
- ColoredPrintf(COLOR_GREEN, "[==========] ");
- printf("%s from %s ran.",
- FormatTestCount(impl->test_to_run_count()).c_str(),
- FormatTestCaseCount(impl->test_case_to_run_count()).c_str());
- if (GTEST_FLAG(print_time)) {
- printf(" (%s ms total)",
- internal::StreamableToString(impl->elapsed_time()).c_str());
- }
- printf("\n");
- ColoredPrintf(COLOR_GREEN, "[ PASSED ] ");
- printf("%s.\n", FormatTestCount(impl->successful_test_count()).c_str());
-
- int num_failures = impl->failed_test_count();
- if (!impl->Passed()) {
- const int failed_test_count = impl->failed_test_count();
- ColoredPrintf(COLOR_RED, "[ FAILED ] ");
- printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str());
- internal::PrintFailedTestsPretty(impl);
- printf("\n%2d FAILED %s\n", num_failures,
- num_failures == 1 ? "TEST" : "TESTS");
- }
-
- int num_disabled = impl->disabled_test_count();
- if (num_disabled) {
- if (!num_failures) {
- printf("\n"); // Add a spacer if no FAILURE banner is displayed.
- }
- ColoredPrintf(COLOR_YELLOW,
- " YOU HAVE %d DISABLED %s\n\n",
- num_disabled,
- num_disabled == 1 ? "TEST" : "TESTS");
- }
- // Ensure that Google Test output is printed before, e.g., heapchecker output.
- fflush(stdout);
-}
-
-// End PrettyUnitTestResultPrinter
-
-// class UnitTestEventsRepeater
-//
-// This class forwards events to other event listeners.
-class UnitTestEventsRepeater : public UnitTestEventListenerInterface {
- public:
- typedef internal::List<UnitTestEventListenerInterface *> Listeners;
- typedef internal::ListNode<UnitTestEventListenerInterface *> ListenersNode;
- UnitTestEventsRepeater() {}
- virtual ~UnitTestEventsRepeater();
- void AddListener(UnitTestEventListenerInterface *listener);
-
- virtual void OnUnitTestStart(const UnitTest* unit_test);
- virtual void OnUnitTestEnd(const UnitTest* unit_test);
- virtual void OnGlobalSetUpStart(const UnitTest* unit_test);
- virtual void OnGlobalSetUpEnd(const UnitTest* unit_test);
- virtual void OnGlobalTearDownStart(const UnitTest* unit_test);
- virtual void OnGlobalTearDownEnd(const UnitTest* unit_test);
- virtual void OnTestCaseStart(const TestCase* test_case);
- virtual void OnTestCaseEnd(const TestCase* test_case);
- virtual void OnTestStart(const TestInfo* test_info);
- virtual void OnTestEnd(const TestInfo* test_info);
- virtual void OnNewTestPartResult(const TestPartResult* result);
-
- private:
- Listeners listeners_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestEventsRepeater);
-};
-
-UnitTestEventsRepeater::~UnitTestEventsRepeater() {
- for (ListenersNode* listener = listeners_.Head();
- listener != NULL;
- listener = listener->next()) {
- delete listener->element();
- }
-}
-
-void UnitTestEventsRepeater::AddListener(
- UnitTestEventListenerInterface *listener) {
- listeners_.PushBack(listener);
-}
-
-// Since the methods are identical, use a macro to reduce boilerplate.
-// This defines a member that repeats the call to all listeners.
-#define GTEST_REPEATER_METHOD_(Name, Type) \
-void UnitTestEventsRepeater::Name(const Type* parameter) { \
- for (ListenersNode* listener = listeners_.Head(); \
- listener != NULL; \
- listener = listener->next()) { \
- listener->element()->Name(parameter); \
- } \
-}
-
-GTEST_REPEATER_METHOD_(OnUnitTestStart, UnitTest)
-GTEST_REPEATER_METHOD_(OnUnitTestEnd, UnitTest)
-GTEST_REPEATER_METHOD_(OnGlobalSetUpStart, UnitTest)
-GTEST_REPEATER_METHOD_(OnGlobalSetUpEnd, UnitTest)
-GTEST_REPEATER_METHOD_(OnGlobalTearDownStart, UnitTest)
-GTEST_REPEATER_METHOD_(OnGlobalTearDownEnd, UnitTest)
-GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase)
-GTEST_REPEATER_METHOD_(OnTestCaseEnd, TestCase)
-GTEST_REPEATER_METHOD_(OnTestStart, TestInfo)
-GTEST_REPEATER_METHOD_(OnTestEnd, TestInfo)
-GTEST_REPEATER_METHOD_(OnNewTestPartResult, TestPartResult)
-
-#undef GTEST_REPEATER_METHOD_
-
-// End PrettyUnitTestResultPrinter
-
-// This class generates an XML output file.
-class XmlUnitTestResultPrinter : public UnitTestEventListenerInterface {
- public:
- explicit XmlUnitTestResultPrinter(const char* output_file);
-
- virtual void OnUnitTestEnd(const UnitTest* unit_test);
-
- private:
- // Is c a whitespace character that is normalized to a space character
- // when it appears in an XML attribute value?
- static bool IsNormalizableWhitespace(char c) {
- return c == 0x9 || c == 0xA || c == 0xD;
- }
-
- // May c appear in a well-formed XML document?
- static bool IsValidXmlCharacter(char c) {
- return IsNormalizableWhitespace(c) || c >= 0x20;
- }
-
- // Returns an XML-escaped copy of the input string str. If
- // is_attribute is true, the text is meant to appear as an attribute
- // value, and normalizable whitespace is preserved by replacing it
- // with character references.
- static internal::String EscapeXml(const char* str,
- bool is_attribute);
-
- // Convenience wrapper around EscapeXml when str is an attribute value.
- static internal::String EscapeXmlAttribute(const char* str) {
- return EscapeXml(str, true);
- }
-
- // Convenience wrapper around EscapeXml when str is not an attribute value.
- static internal::String EscapeXmlText(const char* str) {
- return EscapeXml(str, false);
- }
-
- // Prints an XML representation of a TestInfo object.
- static void PrintXmlTestInfo(FILE* out,
- const char* test_case_name,
- const TestInfo* test_info);
-
- // Prints an XML representation of a TestCase object
- static void PrintXmlTestCase(FILE* out, const TestCase* test_case);
-
- // Prints an XML summary of unit_test to output stream out.
- static void PrintXmlUnitTest(FILE* out, const UnitTest* unit_test);
-
- // Produces a string representing the test properties in a result as space
- // delimited XML attributes based on the property key="value" pairs.
- // When the String is not empty, it includes a space at the beginning,
- // to delimit this attribute from prior attributes.
- static internal::String TestPropertiesAsXmlAttributes(
- const internal::TestResult* result);
-
- // The output file.
- const internal::String output_file_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter);
-};
-
-// Creates a new XmlUnitTestResultPrinter.
-XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file)
- : output_file_(output_file) {
- if (output_file_.c_str() == NULL || output_file_.empty()) {
- fprintf(stderr, "XML output file may not be null\n");
- fflush(stderr);
- exit(EXIT_FAILURE);
- }
-}
-
-// Called after the unit test ends.
-void XmlUnitTestResultPrinter::OnUnitTestEnd(const UnitTest* unit_test) {
- FILE* xmlout = NULL;
- internal::FilePath output_file(output_file_);
- internal::FilePath output_dir(output_file.RemoveFileName());
-
- if (output_dir.CreateDirectoriesRecursively()) {
- // MSVC 8 deprecates fopen(), so we want to suppress warning 4996
- // (deprecated function) there.
-#ifdef GTEST_OS_WINDOWS
- // We are on Windows.
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
- xmlout = fopen(output_file_.c_str(), "w");
-#pragma warning(pop) // Restores the warning state.
-#else // We are on Linux or Mac OS.
- xmlout = fopen(output_file_.c_str(), "w");
-#endif // GTEST_OS_WINDOWS
- }
- if (xmlout == NULL) {
- // TODO(wan): report the reason of the failure.
- //
- // We don't do it for now as:
- //
- // 1. There is no urgent need for it.
- // 2. It's a bit involved to make the errno variable thread-safe on
- // all three operating systems (Linux, Windows, and Mac OS).
- // 3. To interpret the meaning of errno in a thread-safe way,
- // we need the strerror_r() function, which is not available on
- // Windows.
- fprintf(stderr,
- "Unable to open file \"%s\"\n",
- output_file_.c_str());
- fflush(stderr);
- exit(EXIT_FAILURE);
- }
- PrintXmlUnitTest(xmlout, unit_test);
- fclose(xmlout);
-}
-
-// Returns an XML-escaped copy of the input string str. If is_attribute
-// is true, the text is meant to appear as an attribute value, and
-// normalizable whitespace is preserved by replacing it with character
-// references.
-//
-// Invalid XML characters in str, if any, are stripped from the output.
-// It is expected that most, if not all, of the text processed by this
-// module will consist of ordinary English text.
-// If this module is ever modified to produce version 1.1 XML output,
-// most invalid characters can be retained using character references.
-// TODO(wan): It might be nice to have a minimally invasive, human-readable
-// escaping scheme for invalid characters, rather than dropping them.
-internal::String XmlUnitTestResultPrinter::EscapeXml(const char* str,
- bool is_attribute) {
- Message m;
-
- if (str != NULL) {
- for (const char* src = str; *src; ++src) {
- switch (*src) {
- case '<':
- m << "<";
- break;
- case '>':
- m << ">";
- break;
- case '&':
- m << "&";
- break;
- case '\'':
- if (is_attribute)
- m << "'";
- else
- m << '\'';
- break;
- case '"':
- if (is_attribute)
- m << """;
- else
- m << '"';
- break;
- default:
- if (IsValidXmlCharacter(*src)) {
- if (is_attribute && IsNormalizableWhitespace(*src))
- m << internal::String::Format("&#x%02X;", unsigned(*src));
- else
- m << *src;
- }
- break;
- }
- }
- }
-
- return m.GetString();
-}
-
-
-// The following routines generate an XML representation of a UnitTest
-// object.
-//
-// This is how Google Test concepts map to the DTD:
-//
-// <testsuite name="AllTests"> <-- corresponds to a UnitTest object
-// <testsuite name="testcase-name"> <-- corresponds to a TestCase object
-// <testcase name="test-name"> <-- corresponds to a TestInfo object
-// <failure message="...">...</failure>
-// <failure message="...">...</failure>
-// <failure message="...">...</failure>
-// <-- individual assertion failures
-// </testcase>
-// </testsuite>
-// </testsuite>
-
-namespace internal {
-
-// Formats the given time in milliseconds as seconds. The returned
-// C-string is owned by this function and cannot be released by the
-// caller. Calling the function again invalidates the previous
-// result.
-const char* FormatTimeInMillisAsSeconds(TimeInMillis ms) {
- static String str;
- str = (Message() << (ms/1000.0)).GetString();
- return str.c_str();
-}
-
-} // namespace internal
-
-// Prints an XML representation of a TestInfo object.
-// TODO(wan): There is also value in printing properties with the plain printer.
-void XmlUnitTestResultPrinter::PrintXmlTestInfo(FILE* out,
- const char* test_case_name,
- const TestInfo* test_info) {
- const internal::TestResult * const result = test_info->result();
- const internal::List<TestPartResult> &results = result->test_part_results();
- fprintf(out,
- " <testcase name=\"%s\" status=\"%s\" time=\"%s\" "
- "classname=\"%s\"%s",
- EscapeXmlAttribute(test_info->name()).c_str(),
- test_info->should_run() ? "run" : "notrun",
- internal::FormatTimeInMillisAsSeconds(result->elapsed_time()),
- EscapeXmlAttribute(test_case_name).c_str(),
- TestPropertiesAsXmlAttributes(result).c_str());
-
- int failures = 0;
- for (const internal::ListNode<TestPartResult>* part_node = results.Head();
- part_node != NULL;
- part_node = part_node->next()) {
- const TestPartResult& part = part_node->element();
- if (part.failed()) {
- const internal::String message =
- internal::String::Format("%s:%d\n%s", part.file_name(),
- part.line_number(), part.message());
- if (++failures == 1)
- fprintf(out, ">\n");
- fprintf(out,
- " <failure message=\"%s\" type=\"\"><![CDATA[%s]]>"
- "</failure>\n",
- EscapeXmlAttribute(part.summary()).c_str(), message.c_str());
- }
- }
-
- if (failures == 0)
- fprintf(out, " />\n");
- else
- fprintf(out, " </testcase>\n");
-}
-
-// Prints an XML representation of a TestCase object
-void XmlUnitTestResultPrinter::PrintXmlTestCase(FILE* out,
- const TestCase* test_case) {
- fprintf(out,
- " <testsuite name=\"%s\" tests=\"%d\" failures=\"%d\" "
- "disabled=\"%d\" ",
- EscapeXmlAttribute(test_case->name()).c_str(),
- test_case->total_test_count(),
- test_case->failed_test_count(),
- test_case->disabled_test_count());
- fprintf(out,
- "errors=\"0\" time=\"%s\">\n",
- internal::FormatTimeInMillisAsSeconds(test_case->elapsed_time()));
- for (const internal::ListNode<TestInfo*>* info_node =
- test_case->test_info_list().Head();
- info_node != NULL;
- info_node = info_node->next()) {
- PrintXmlTestInfo(out, test_case->name(), info_node->element());
- }
- fprintf(out, " </testsuite>\n");
-}
-
-// Prints an XML summary of unit_test to output stream out.
-void XmlUnitTestResultPrinter::PrintXmlUnitTest(FILE* out,
- const UnitTest* unit_test) {
- const internal::UnitTestImpl* const impl = unit_test->impl();
- fprintf(out, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
- fprintf(out,
- "<testsuite tests=\"%d\" failures=\"%d\" disabled=\"%d\" "
- "errors=\"0\" time=\"%s\" ",
- impl->total_test_count(),
- impl->failed_test_count(),
- impl->disabled_test_count(),
- internal::FormatTimeInMillisAsSeconds(impl->elapsed_time()));
- fprintf(out, "name=\"AllTests\">\n");
- for (const internal::ListNode<TestCase*>* case_node =
- impl->test_cases()->Head();
- case_node != NULL;
- case_node = case_node->next()) {
- PrintXmlTestCase(out, case_node->element());
- }
- fprintf(out, "</testsuite>\n");
-}
-
-// Produces a string representing the test properties in a result as space
-// delimited XML attributes based on the property key="value" pairs.
-internal::String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
- const internal::TestResult* result) {
- using internal::TestProperty;
- Message attributes;
- const internal::List<TestProperty>& properties = result->test_properties();
- for (const internal::ListNode<TestProperty>* property_node =
- properties.Head();
- property_node != NULL;
- property_node = property_node->next()) {
- const TestProperty& property = property_node->element();
- attributes << " " << property.key() << "="
- << "\"" << EscapeXmlAttribute(property.value()) << "\"";
- }
- return attributes.GetString();
-}
-
-// End XmlUnitTestResultPrinter
-
-namespace internal {
-
-// Class ScopedTrace
-
-// Pushes the given source file location and message onto a per-thread
-// trace stack maintained by Google Test.
-// L < UnitTest::mutex_
-ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) {
- TraceInfo trace;
- trace.file = file;
- trace.line = line;
- trace.message = message.GetString();
-
- UnitTest::GetInstance()->PushGTestTrace(trace);
-}
-
-// Pops the info pushed by the c'tor.
-// L < UnitTest::mutex_
-ScopedTrace::~ScopedTrace() {
- UnitTest::GetInstance()->PopGTestTrace();
-}
-
-
-// class OsStackTraceGetter
-
-// Returns the current OS stack trace as a String. Parameters:
-//
-// max_depth - the maximum number of stack frames to be included
-// in the trace.
-// skip_count - the number of top frames to be skipped; doesn't count
-// against max_depth.
-//
-// L < mutex_
-// We use "L < mutex_" to denote that the function may acquire mutex_.
-String OsStackTraceGetter::CurrentStackTrace(int, int) {
- return String("");
-}
-
-// L < mutex_
-void OsStackTraceGetter::UponLeavingGTest() {
-}
-
-const char* const
-OsStackTraceGetter::kElidedFramesMarker =
- "... " GTEST_NAME " internal frames ...";
-
-} // namespace internal
-
-// class UnitTest
-
-// Gets the singleton UnitTest object. The first time this method is
-// called, a UnitTest object is constructed and returned. Consecutive
-// calls will return the same object.
-//
-// We don't protect this under mutex_ as a user is not supposed to
-// call this before main() starts, from which point on the return
-// value will never change.
-UnitTest * UnitTest::GetInstance() {
- // When compiled with MSVC 7.1 in optimized mode, destroying the
- // UnitTest object upon exiting the program messes up the exit code,
- // causing successful tests to appear failed. We have to use a
- // different implementation in this case to bypass the compiler bug.
- // This implementation makes the compiler happy, at the cost of
- // leaking the UnitTest object.
-#if _MSC_VER == 1310 && !defined(_DEBUG) // MSVC 7.1 and optimized build.
- static UnitTest* const instance = new UnitTest;
- return instance;
-#else
- static UnitTest instance;
- return &instance;
-#endif // _MSC_VER==1310 && !defined(_DEBUG)
-}
-
-// Registers and returns a global test environment. When a test
-// program is run, all global test environments will be set-up in the
-// order they were registered. After all tests in the program have
-// finished, all global test environments will be torn-down in the
-// *reverse* order they were registered.
-//
-// The UnitTest object takes ownership of the given environment.
-//
-// We don't protect this under mutex_, as we only support calling it
-// from the main thread.
-Environment* UnitTest::AddEnvironment(Environment* env) {
- if (env == NULL) {
- return NULL;
- }
-
- impl_->environments()->PushBack(env);
- impl_->environments_in_reverse_order()->PushFront(env);
- return env;
-}
-
-// Adds a TestPartResult to the current TestResult object. All Google Test
-// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call
-// this to report their results. The user code should use the
-// assertion macros instead of calling this directly.
-// L < mutex_
-void UnitTest::AddTestPartResult(TestPartResultType result_type,
- const char* file_name,
- int line_number,
- const internal::String& message,
- const internal::String& os_stack_trace) {
- Message msg;
- msg << message;
-
- internal::MutexLock lock(&mutex_);
- if (impl_->gtest_trace_stack()->size() > 0) {
- msg << "\n" << GTEST_NAME << " trace:";
-
- for (internal::ListNode<internal::TraceInfo>* node =
- impl_->gtest_trace_stack()->Head();
- node != NULL;
- node = node->next()) {
- const internal::TraceInfo& trace = node->element();
- msg << "\n" << trace.file << ":" << trace.line << ": " << trace.message;
- }
- }
-
- if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) {
- msg << internal::kStackTraceMarker << os_stack_trace;
- }
-
- const TestPartResult result =
- TestPartResult(result_type, file_name, line_number,
- msg.GetString().c_str());
- impl_->GetTestPartResultReporterForCurrentThread()->
- ReportTestPartResult(result);
-
- // If this is a failure and the user wants the debugger to break on
- // failures ...
- if (result_type != TPRT_SUCCESS && GTEST_FLAG(break_on_failure)) {
- // ... then we generate a seg fault.
- *static_cast<int*>(NULL) = 1;
- }
-}
-
-// Creates and adds a property to the current TestResult. If a property matching
-// the supplied value already exists, updates its value instead.
-void UnitTest::RecordPropertyForCurrentTest(const char* key,
- const char* value) {
- const internal::TestProperty test_property(key, value);
- impl_->current_test_result()->RecordProperty(test_property);
-}
-
-// Runs all tests in this UnitTest object and prints the result.
-// Returns 0 if successful, or 1 otherwise.
-//
-// We don't protect this under mutex_, as we only support calling it
-// from the main thread.
-int UnitTest::Run() {
-#if defined(GTEST_OS_WINDOWS) && !defined(__MINGW32__)
-
-#if !defined(_WIN32_WCE)
- // SetErrorMode doesn't exist on CE.
- if (GTEST_FLAG(catch_exceptions)) {
- // The user wants Google Test to catch exceptions thrown by the tests.
-
- // This lets fatal errors be handled by us, instead of causing pop-ups.
- SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT |
- SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);
- }
-#endif // _WIN32_WCE
-
- __try {
- return impl_->RunAllTests();
- } __except(internal::UnitTestOptions::GTestShouldProcessSEH(
- GetExceptionCode())) {
- printf("Exception thrown with code 0x%x.\nFAIL\n", GetExceptionCode());
- fflush(stdout);
- return 1;
- }
-
-#else
- // We are on Linux, Mac OS or MingW. There is no exception of any kind.
-
- return impl_->RunAllTests();
-#endif // GTEST_OS_WINDOWS
-}
-
-// Returns the working directory when the first TEST() or TEST_F() was
-// executed.
-const char* UnitTest::original_working_dir() const {
- return impl_->original_working_dir_.c_str();
-}
-
-// Returns the TestCase object for the test that's currently running,
-// or NULL if no test is running.
-// L < mutex_
-const TestCase* UnitTest::current_test_case() const {
- internal::MutexLock lock(&mutex_);
- return impl_->current_test_case();
-}
-
-// Returns the TestInfo object for the test that's currently running,
-// or NULL if no test is running.
-// L < mutex_
-const TestInfo* UnitTest::current_test_info() const {
- internal::MutexLock lock(&mutex_);
- return impl_->current_test_info();
-}
-
-#ifdef GTEST_HAS_PARAM_TEST
-// Returns ParameterizedTestCaseRegistry object used to keep track of
-// value-parameterized tests and instantiate and register them.
-// L < mutex_
-internal::ParameterizedTestCaseRegistry&
- UnitTest::parameterized_test_registry() {
- return impl_->parameterized_test_registry();
-}
-#endif // GTEST_HAS_PARAM_TEST
-
-// Creates an empty UnitTest.
-UnitTest::UnitTest() {
- impl_ = new internal::UnitTestImpl(this);
-}
-
-// Destructor of UnitTest.
-UnitTest::~UnitTest() {
- delete impl_;
-}
-
-// Pushes a trace defined by SCOPED_TRACE() on to the per-thread
-// Google Test trace stack.
-// L < mutex_
-void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) {
- internal::MutexLock lock(&mutex_);
- impl_->gtest_trace_stack()->PushFront(trace);
-}
-
-// Pops a trace from the per-thread Google Test trace stack.
-// L < mutex_
-void UnitTest::PopGTestTrace() {
- internal::MutexLock lock(&mutex_);
- impl_->gtest_trace_stack()->PopFront(NULL);
-}
-
-namespace internal {
-
-UnitTestImpl::UnitTestImpl(UnitTest* parent)
- : parent_(parent),
-#ifdef _MSC_VER
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4355) // Temporarily disables warning 4355
- // (using this in initializer).
- default_global_test_part_result_reporter_(this),
- default_per_thread_test_part_result_reporter_(this),
-#pragma warning(pop) // Restores the warning state again.
-#else
- default_global_test_part_result_reporter_(this),
- default_per_thread_test_part_result_reporter_(this),
-#endif // _MSC_VER
- global_test_part_result_repoter_(
- &default_global_test_part_result_reporter_),
- per_thread_test_part_result_reporter_(
- &default_per_thread_test_part_result_reporter_),
- test_cases_(),
-#ifdef GTEST_HAS_PARAM_TEST
- parameterized_test_registry_(),
- parameterized_tests_registered_(false),
-#endif // GTEST_HAS_PARAM_TEST
- last_death_test_case_(NULL),
- current_test_case_(NULL),
- current_test_info_(NULL),
- ad_hoc_test_result_(),
- result_printer_(NULL),
- os_stack_trace_getter_(NULL),
-#ifdef GTEST_HAS_DEATH_TEST
- elapsed_time_(0),
- internal_run_death_test_flag_(NULL),
- death_test_factory_(new DefaultDeathTestFactory) {
-#else
- elapsed_time_(0) {
-#endif // GTEST_HAS_DEATH_TEST
-}
-
-UnitTestImpl::~UnitTestImpl() {
- // Deletes every TestCase.
- test_cases_.ForEach(internal::Delete<TestCase>);
-
- // Deletes every Environment.
- environments_.ForEach(internal::Delete<Environment>);
-
- // Deletes the current test result printer.
- delete result_printer_;
-
- delete os_stack_trace_getter_;
-}
-
-// A predicate that checks the name of a TestCase against a known
-// value.
-//
-// This is used for implementation of the UnitTest class only. We put
-// it in the anonymous namespace to prevent polluting the outer
-// namespace.
-//
-// TestCaseNameIs is copyable.
-class TestCaseNameIs {
- public:
- // Constructor.
- explicit TestCaseNameIs(const String& name)
- : name_(name) {}
-
- // Returns true iff the name of test_case matches name_.
- bool operator()(const TestCase* test_case) const {
- return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0;
- }
-
- private:
- String name_;
-};
-
-// Finds and returns a TestCase with the given name. If one doesn't
-// exist, creates one and returns it.
-//
-// Arguments:
-//
-// test_case_name: name of the test case
-// set_up_tc: pointer to the function that sets up the test case
-// tear_down_tc: pointer to the function that tears down the test case
-TestCase* UnitTestImpl::GetTestCase(const char* test_case_name,
- const char* comment,
- Test::SetUpTestCaseFunc set_up_tc,
- Test::TearDownTestCaseFunc tear_down_tc) {
- // Can we find a TestCase with the given name?
- internal::ListNode<TestCase*>* node = test_cases_.FindIf(
- TestCaseNameIs(test_case_name));
-
- if (node == NULL) {
- // No. Let's create one.
- TestCase* const test_case =
- new TestCase(test_case_name, comment, set_up_tc, tear_down_tc);
-
- // Is this a death test case?
- if (internal::UnitTestOptions::MatchesFilter(String(test_case_name),
- kDeathTestCaseFilter)) {
- // Yes. Inserts the test case after the last death test case
- // defined so far.
- node = test_cases_.InsertAfter(last_death_test_case_, test_case);
- last_death_test_case_ = node;
- } else {
- // No. Appends to the end of the list.
- test_cases_.PushBack(test_case);
- node = test_cases_.Last();
- }
- }
-
- // Returns the TestCase found.
- return node->element();
-}
-
-// Helpers for setting up / tearing down the given environment. They
-// are for use in the List::ForEach() method.
-static void SetUpEnvironment(Environment* env) { env->SetUp(); }
-static void TearDownEnvironment(Environment* env) { env->TearDown(); }
-
-// Runs all tests in this UnitTest object, prints the result, and
-// returns 0 if all tests are successful, or 1 otherwise. If any
-// exception is thrown during a test on Windows, this test is
-// considered to be failed, but the rest of the tests will still be
-// run. (We disable exceptions on Linux and Mac OS X, so the issue
-// doesn't apply there.)
-// When parameterized tests are enabled, it explands and registers
-// parameterized tests first in RegisterParameterizedTests().
-// All other functions called from RunAllTests() may safely assume that
-// parameterized tests are ready to be counted and run.
-int UnitTestImpl::RunAllTests() {
- // Makes sure InitGoogleTest() was called.
- if (!GTestIsInitialized()) {
- printf("%s",
- "\nThis test program did NOT call ::testing::InitGoogleTest "
- "before calling RUN_ALL_TESTS(). Please fix it.\n");
- return 1;
- }
-
- RegisterParameterizedTests();
-
- // Lists all the tests and exits if the --gtest_list_tests
- // flag was specified.
- if (GTEST_FLAG(list_tests)) {
- ListAllTests();
- return 0;
- }
-
- // True iff we are in a subprocess for running a thread-safe-style
- // death test.
- bool in_subprocess_for_death_test = false;
-
-#ifdef GTEST_HAS_DEATH_TEST
- internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
- in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL);
-#endif // GTEST_HAS_DEATH_TEST
-
- UnitTestEventListenerInterface * const printer = result_printer();
-
- // Compares the full test names with the filter to decide which
- // tests to run.
- const bool has_tests_to_run = FilterTests() > 0;
- // True iff at least one test has failed.
- bool failed = false;
-
- // How many times to repeat the tests? We don't want to repeat them
- // when we are inside the subprocess of a death test.
- const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat);
- // Repeats forever if the repeat count is negative.
- const bool forever = repeat < 0;
- for (int i = 0; forever || i != repeat; i++) {
- if (repeat != 1) {
- printf("\nRepeating all tests (iteration %d) . . .\n\n", i + 1);
- }
-
- // Tells the unit test event listener that the tests are about to
- // start.
- printer->OnUnitTestStart(parent_);
-
- const TimeInMillis start = GetTimeInMillis();
-
- // Runs each test case if there is at least one test to run.
- if (has_tests_to_run) {
- // Sets up all environments beforehand.
- printer->OnGlobalSetUpStart(parent_);
- environments_.ForEach(SetUpEnvironment);
- printer->OnGlobalSetUpEnd(parent_);
-
- // Runs the tests only if there was no fatal failure during global
- // set-up.
- if (!Test::HasFatalFailure()) {
- test_cases_.ForEach(TestCase::RunTestCase);
- }
-
- // Tears down all environments in reverse order afterwards.
- printer->OnGlobalTearDownStart(parent_);
- environments_in_reverse_order_.ForEach(TearDownEnvironment);
- printer->OnGlobalTearDownEnd(parent_);
- }
-
- elapsed_time_ = GetTimeInMillis() - start;
-
- // Tells the unit test event listener that the tests have just
- // finished.
- printer->OnUnitTestEnd(parent_);
-
- // Gets the result and clears it.
- if (!Passed()) {
- failed = true;
- }
- ClearResult();
- }
-
- // Returns 0 if all tests passed, or 1 other wise.
- return failed ? 1 : 0;
-}
-
-// Compares the name of each test with the user-specified filter to
-// decide whether the test should be run, then records the result in
-// each TestCase and TestInfo object.
-// Returns the number of tests that should run.
-int UnitTestImpl::FilterTests() {
- int num_runnable_tests = 0;
- for (const internal::ListNode<TestCase *> *test_case_node =
- test_cases_.Head();
- test_case_node != NULL;
- test_case_node = test_case_node->next()) {
- TestCase * const test_case = test_case_node->element();
- const String &test_case_name = test_case->name();
- test_case->set_should_run(false);
-
- for (const internal::ListNode<TestInfo *> *test_info_node =
- test_case->test_info_list().Head();
- test_info_node != NULL;
- test_info_node = test_info_node->next()) {
- TestInfo * const test_info = test_info_node->element();
- const String test_name(test_info->name());
- // A test is disabled if test case name or test name matches
- // kDisableTestFilter.
- const bool is_disabled =
- internal::UnitTestOptions::MatchesFilter(test_case_name,
- kDisableTestFilter) ||
- internal::UnitTestOptions::MatchesFilter(test_name,
- kDisableTestFilter);
- test_info->impl()->set_is_disabled(is_disabled);
-
- const bool should_run = !is_disabled &&
- internal::UnitTestOptions::FilterMatchesTest(test_case_name,
- test_name);
- test_info->impl()->set_should_run(should_run);
- test_case->set_should_run(test_case->should_run() || should_run);
- if (should_run) {
- num_runnable_tests++;
- }
- }
- }
- return num_runnable_tests;
-}
-
-// Lists all tests by name.
-void UnitTestImpl::ListAllTests() {
- for (const internal::ListNode<TestCase*>* test_case_node = test_cases_.Head();
- test_case_node != NULL;
- test_case_node = test_case_node->next()) {
- const TestCase* const test_case = test_case_node->element();
-
- // Prints the test case name following by an indented list of test nodes.
- printf("%s.\n", test_case->name());
-
- for (const internal::ListNode<TestInfo*>* test_info_node =
- test_case->test_info_list().Head();
- test_info_node != NULL;
- test_info_node = test_info_node->next()) {
- const TestInfo* const test_info = test_info_node->element();
-
- printf(" %s\n", test_info->name());
- }
- }
- fflush(stdout);
-}
-
-// Sets the unit test result printer.
-//
-// Does nothing if the input and the current printer object are the
-// same; otherwise, deletes the old printer object and makes the
-// input the current printer.
-void UnitTestImpl::set_result_printer(
- UnitTestEventListenerInterface* result_printer) {
- if (result_printer_ != result_printer) {
- delete result_printer_;
- result_printer_ = result_printer;
- }
-}
-
-// Returns the current unit test result printer if it is not NULL;
-// otherwise, creates an appropriate result printer, makes it the
-// current printer, and returns it.
-UnitTestEventListenerInterface* UnitTestImpl::result_printer() {
- if (result_printer_ != NULL) {
- return result_printer_;
- }
-
-#ifdef GTEST_HAS_DEATH_TEST
- if (internal_run_death_test_flag_.get() != NULL) {
- result_printer_ = new NullUnitTestResultPrinter;
- return result_printer_;
- }
-#endif // GTEST_HAS_DEATH_TEST
-
- UnitTestEventsRepeater *repeater = new UnitTestEventsRepeater;
- const String& output_format = internal::UnitTestOptions::GetOutputFormat();
- if (output_format == "xml") {
- repeater->AddListener(new XmlUnitTestResultPrinter(
- internal::UnitTestOptions::GetOutputFile().c_str()));
- } else if (output_format != "") {
- printf("WARNING: unrecognized output format \"%s\" ignored.\n",
- output_format.c_str());
- fflush(stdout);
- }
- repeater->AddListener(new PrettyUnitTestResultPrinter);
- result_printer_ = repeater;
- return result_printer_;
-}
-
-// Sets the OS stack trace getter.
-//
-// Does nothing if the input and the current OS stack trace getter are
-// the same; otherwise, deletes the old getter and makes the input the
-// current getter.
-void UnitTestImpl::set_os_stack_trace_getter(
- OsStackTraceGetterInterface* getter) {
- if (os_stack_trace_getter_ != getter) {
- delete os_stack_trace_getter_;
- os_stack_trace_getter_ = getter;
- }
-}
-
-// Returns the current OS stack trace getter if it is not NULL;
-// otherwise, creates an OsStackTraceGetter, makes it the current
-// getter, and returns it.
-OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() {
- if (os_stack_trace_getter_ == NULL) {
- os_stack_trace_getter_ = new OsStackTraceGetter;
- }
-
- return os_stack_trace_getter_;
-}
-
-// Returns the TestResult for the test that's currently running, or
-// the TestResult for the ad hoc test if no test is running.
-internal::TestResult* UnitTestImpl::current_test_result() {
- return current_test_info_ ?
- current_test_info_->impl()->result() : &ad_hoc_test_result_;
-}
-
-// TestInfoImpl constructor. The new instance assumes ownership of the test
-// factory object.
-TestInfoImpl::TestInfoImpl(TestInfo* parent,
- const char* test_case_name,
- const char* name,
- const char* test_case_comment,
- const char* comment,
- TypeId fixture_class_id,
- internal::TestFactoryBase* factory) :
- parent_(parent),
- test_case_name_(String(test_case_name)),
- name_(String(name)),
- test_case_comment_(String(test_case_comment)),
- comment_(String(comment)),
- fixture_class_id_(fixture_class_id),
- should_run_(false),
- is_disabled_(false),
- factory_(factory) {
-}
-
-// TestInfoImpl destructor.
-TestInfoImpl::~TestInfoImpl() {
- delete factory_;
-}
-
-// Returns the current OS stack trace as a String.
-//
-// The maximum number of stack frames to be included is specified by
-// the gtest_stack_trace_depth flag. The skip_count parameter
-// specifies the number of top frames to be skipped, which doesn't
-// count against the number of frames to be included.
-//
-// For example, if Foo() calls Bar(), which in turn calls
-// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
-// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
-String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test, int skip_count) {
- // We pass skip_count + 1 to skip this wrapper function in addition
- // to what the user really wants to skip.
- return unit_test->impl()->CurrentOsStackTraceExceptTop(skip_count + 1);
-}
-
-// Returns the number of failed test parts in the given test result object.
-int GetFailedPartCount(const TestResult* result) {
- return result->failed_part_count();
-}
-
-// Parses a string as a command line flag. The string should have
-// the format "--flag=value". When def_optional is true, the "=value"
-// part can be omitted.
-//
-// Returns the value of the flag, or NULL if the parsing failed.
-const char* ParseFlagValue(const char* str,
- const char* flag,
- bool def_optional) {
- // str and flag must not be NULL.
- if (str == NULL || flag == NULL) return NULL;
-
- // The flag must start with "--" followed by GTEST_FLAG_PREFIX.
- const String flag_str = String::Format("--%s%s", GTEST_FLAG_PREFIX, flag);
- const size_t flag_len = flag_str.GetLength();
- if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;
-
- // Skips the flag name.
- const char* flag_end = str + flag_len;
-
- // When def_optional is true, it's OK to not have a "=value" part.
- if (def_optional && (flag_end[0] == '\0')) {
- return flag_end;
- }
-
- // If def_optional is true and there are more characters after the
- // flag name, or if def_optional is false, there must be a '=' after
- // the flag name.
- if (flag_end[0] != '=') return NULL;
-
- // Returns the string after "=".
- return flag_end + 1;
-}
-
-// Parses a string for a bool flag, in the form of either
-// "--flag=value" or "--flag".
-//
-// In the former case, the value is taken as true as long as it does
-// not start with '0', 'f', or 'F'.
-//
-// In the latter case, the value is taken as true.
-//
-// On success, stores the value of the flag in *value, and returns
-// true. On failure, returns false without changing *value.
-bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
- // Gets the value of the flag as a string.
- const char* const value_str = ParseFlagValue(str, flag, true);
-
- // Aborts if the parsing failed.
- if (value_str == NULL) return false;
-
- // Converts the string value to a bool.
- *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');
- return true;
-}
-
-// Parses a string for an Int32 flag, in the form of
-// "--flag=value".
-//
-// On success, stores the value of the flag in *value, and returns
-// true. On failure, returns false without changing *value.
-bool ParseInt32Flag(const char* str, const char* flag, Int32* value) {
- // Gets the value of the flag as a string.
- const char* const value_str = ParseFlagValue(str, flag, false);
-
- // Aborts if the parsing failed.
- if (value_str == NULL) return false;
-
- // Sets *value to the value of the flag.
- return ParseInt32(Message() << "The value of flag --" << flag,
- value_str, value);
-}
-
-// Parses a string for a string flag, in the form of
-// "--flag=value".
-//
-// On success, stores the value of the flag in *value, and returns
-// true. On failure, returns false without changing *value.
-bool ParseStringFlag(const char* str, const char* flag, String* value) {
- // Gets the value of the flag as a string.
- const char* const value_str = ParseFlagValue(str, flag, false);
-
- // Aborts if the parsing failed.
- if (value_str == NULL) return false;
-
- // Sets *value to the value of the flag.
- *value = value_str;
- return true;
-}
-
-// Parses the command line for Google Test flags, without initializing
-// other parts of Google Test. The type parameter CharType can be
-// instantiated to either char or wchar_t.
-template <typename CharType>
-void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
- for (int i = 1; i < *argc; i++) {
- const String arg_string = StreamableToString(argv[i]);
- const char* const arg = arg_string.c_str();
-
- using internal::ParseBoolFlag;
- using internal::ParseInt32Flag;
- using internal::ParseStringFlag;
-
- // Do we see a Google Test flag?
- if (ParseBoolFlag(arg, kBreakOnFailureFlag,
- >EST_FLAG(break_on_failure)) ||
- ParseBoolFlag(arg, kCatchExceptionsFlag,
- >EST_FLAG(catch_exceptions)) ||
- ParseStringFlag(arg, kColorFlag, >EST_FLAG(color)) ||
- ParseStringFlag(arg, kDeathTestStyleFlag,
- >EST_FLAG(death_test_style)) ||
- ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) ||
- ParseStringFlag(arg, kInternalRunDeathTestFlag,
- >EST_FLAG(internal_run_death_test)) ||
- ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) ||
- ParseStringFlag(arg, kOutputFlag, >EST_FLAG(output)) ||
- ParseBoolFlag(arg, kPrintTimeFlag, >EST_FLAG(print_time)) ||
- ParseInt32Flag(arg, kRepeatFlag, >EST_FLAG(repeat))
- ) {
- // Yes. Shift the remainder of the argv list left by one. Note
- // that argv has (*argc + 1) elements, the last one always being
- // NULL. The following loop moves the trailing NULL element as
- // well.
- for (int j = i; j != *argc; j++) {
- argv[j] = argv[j + 1];
- }
-
- // Decrements the argument count.
- (*argc)--;
-
- // We also need to decrement the iterator as we just removed
- // an element.
- i--;
- }
- }
-}
-
-// Parses the command line for Google Test flags, without initializing
-// other parts of Google Test.
-void ParseGoogleTestFlagsOnly(int* argc, char** argv) {
- ParseGoogleTestFlagsOnlyImpl(argc, argv);
-}
-void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) {
- ParseGoogleTestFlagsOnlyImpl(argc, argv);
-}
-
-// The internal implementation of InitGoogleTest().
-//
-// The type parameter CharType can be instantiated to either char or
-// wchar_t.
-template <typename CharType>
-void InitGoogleTestImpl(int* argc, CharType** argv) {
- g_init_gtest_count++;
-
- // We don't want to run the initialization code twice.
- if (g_init_gtest_count != 1) return;
-
- if (*argc <= 0) return;
-
- internal::g_executable_path = internal::StreamableToString(argv[0]);
-
-#ifdef GTEST_HAS_DEATH_TEST
- g_argvs.clear();
- for (int i = 0; i != *argc; i++) {
- g_argvs.push_back(StreamableToString(argv[i]));
- }
-#endif // GTEST_HAS_DEATH_TEST
-
- ParseGoogleTestFlagsOnly(argc, argv);
-}
-
-} // namespace internal
-
-// Initializes Google Test. This must be called before calling
-// RUN_ALL_TESTS(). In particular, it parses a command line for the
-// flags that Google Test recognizes. Whenever a Google Test flag is
-// seen, it is removed from argv, and *argc is decremented.
-//
-// No value is returned. Instead, the Google Test flag variables are
-// updated.
-//
-// Calling the function for the second time has no user-visible effect.
-void InitGoogleTest(int* argc, char** argv) {
- internal::InitGoogleTestImpl(argc, argv);
-}
-
-// This overloaded version can be used in Windows programs compiled in
-// UNICODE mode.
-void InitGoogleTest(int* argc, wchar_t** argv) {
- internal::InitGoogleTestImpl(argc, argv);
-}
-
-} // namespace testing
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-death-test.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-death-test.h
deleted file mode 100644
index f0e109a..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-death-test.h
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file defines the public API for death tests. It is
-// #included by gtest.h so a user doesn't need to include this
-// directly.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
-#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
-
-#include <gtest/internal/gtest-death-test-internal.h>
-
-namespace testing {
-
-// This flag controls the style of death tests. Valid values are "threadsafe",
-// meaning that the death test child process will re-execute the test binary
-// from the start, running only a single death test, or "fast",
-// meaning that the child process will execute the test logic immediately
-// after forking.
-GTEST_DECLARE_string_(death_test_style);
-
-#ifdef GTEST_HAS_DEATH_TEST
-
-// The following macros are useful for writing death tests.
-
-// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is
-// executed:
-//
-// 1. It generates a warning if there is more than one active
-// thread. This is because it's safe to fork() or clone() only
-// when there is a single thread.
-//
-// 2. The parent process clone()s a sub-process and runs the death
-// test in it; the sub-process exits with code 0 at the end of the
-// death test, if it hasn't exited already.
-//
-// 3. The parent process waits for the sub-process to terminate.
-//
-// 4. The parent process checks the exit code and error message of
-// the sub-process.
-//
-// Examples:
-//
-// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number");
-// for (int i = 0; i < 5; i++) {
-// EXPECT_DEATH(server.ProcessRequest(i),
-// "Invalid request .* in ProcessRequest()")
-// << "Failed to die on request " << i);
-// }
-//
-// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting");
-//
-// bool KilledBySIGHUP(int exit_code) {
-// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP;
-// }
-//
-// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!");
-//
-// Known caveats:
-//
-// A "threadsafe" style death test obtains the path to the test
-// program from argv[0] and re-executes it in the sub-process. For
-// simplicity, the current implementation doesn't search the PATH
-// when launching the sub-process. This means that the user must
-// invoke the test program via a path that contains at least one
-// path separator (e.g. path/to/foo_test and
-// /absolute/path/to/bar_test are fine, but foo_test is not). This
-// is rarely a problem as people usually don't put the test binary
-// directory in PATH.
-//
-// TODO(wan at google.com): make thread-safe death tests search the PATH.
-
-// Asserts that a given statement causes the program to exit, with an
-// integer exit status that satisfies predicate, and emitting error output
-// that matches regex.
-#define ASSERT_EXIT(statement, predicate, regex) \
- GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
-
-// Like ASSERT_EXIT, but continues on to successive tests in the
-// test case, if any:
-#define EXPECT_EXIT(statement, predicate, regex) \
- GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
-
-// Asserts that a given statement causes the program to exit, either by
-// explicitly exiting with a nonzero exit code or being killed by a
-// signal, and emitting error output that matches regex.
-#define ASSERT_DEATH(statement, regex) \
- ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
-
-// Like ASSERT_DEATH, but continues on to successive tests in the
-// test case, if any:
-#define EXPECT_DEATH(statement, regex) \
- EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
-
-// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
-
-// Tests that an exit code describes a normal exit with a given exit code.
-class ExitedWithCode {
- public:
- explicit ExitedWithCode(int exit_code);
- bool operator()(int exit_status) const;
- private:
- const int exit_code_;
-};
-
-// Tests that an exit code describes an exit due to termination by a
-// given signal.
-class KilledBySignal {
- public:
- explicit KilledBySignal(int signum);
- bool operator()(int exit_status) const;
- private:
- const int signum_;
-};
-
-// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
-// The death testing framework causes this to have interesting semantics,
-// since the sideeffects of the call are only visible in opt mode, and not
-// in debug mode.
-//
-// In practice, this can be used to test functions that utilize the
-// LOG(DFATAL) macro using the following style:
-//
-// int DieInDebugOr12(int* sideeffect) {
-// if (sideeffect) {
-// *sideeffect = 12;
-// }
-// LOG(DFATAL) << "death";
-// return 12;
-// }
-//
-// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) {
-// int sideeffect = 0;
-// // Only asserts in dbg.
-// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death");
-//
-// #ifdef NDEBUG
-// // opt-mode has sideeffect visible.
-// EXPECT_EQ(12, sideeffect);
-// #else
-// // dbg-mode no visible sideeffect.
-// EXPECT_EQ(0, sideeffect);
-// #endif
-// }
-//
-// This will assert that DieInDebugReturn12InOpt() crashes in debug
-// mode, usually due to a DCHECK or LOG(DFATAL), but returns the
-// appropriate fallback value (12 in this case) in opt mode. If you
-// need to test that a function has appropriate side-effects in opt
-// mode, include assertions against the side-effects. A general
-// pattern for this is:
-//
-// EXPECT_DEBUG_DEATH({
-// // Side-effects here will have an effect after this statement in
-// // opt mode, but none in debug mode.
-// EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
-// }, "death");
-//
-#ifdef NDEBUG
-
-#define EXPECT_DEBUG_DEATH(statement, regex) \
- do { statement; } while (false)
-
-#define ASSERT_DEBUG_DEATH(statement, regex) \
- do { statement; } while (false)
-
-#else
-
-#define EXPECT_DEBUG_DEATH(statement, regex) \
- EXPECT_DEATH(statement, regex)
-
-#define ASSERT_DEBUG_DEATH(statement, regex) \
- ASSERT_DEATH(statement, regex)
-
-#endif // NDEBUG for EXPECT_DEBUG_DEATH
-#endif // GTEST_HAS_DEATH_TEST
-} // namespace testing
-
-#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-message.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-message.h
deleted file mode 100644
index 7effd08..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-message.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file defines the Message class.
-//
-// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
-// leave some internal implementation details in this header file.
-// They are clearly marked by comments like this:
-//
-// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-//
-// Such code is NOT meant to be used by a user directly, and is subject
-// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
-// program!
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
-#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
-
-#include <gtest/internal/gtest-string.h>
-#include <gtest/internal/gtest-internal.h>
-
-namespace testing {
-
-// The Message class works like an ostream repeater.
-//
-// Typical usage:
-//
-// 1. You stream a bunch of values to a Message object.
-// It will remember the text in a StrStream.
-// 2. Then you stream the Message object to an ostream.
-// This causes the text in the Message to be streamed
-// to the ostream.
-//
-// For example;
-//
-// testing::Message foo;
-// foo << 1 << " != " << 2;
-// std::cout << foo;
-//
-// will print "1 != 2".
-//
-// Message is not intended to be inherited from. In particular, its
-// destructor is not virtual.
-//
-// Note that StrStream behaves differently in gcc and in MSVC. You
-// can stream a NULL char pointer to it in the former, but not in the
-// latter (it causes an access violation if you do). The Message
-// class hides this difference by treating a NULL char pointer as
-// "(null)".
-class Message {
- private:
- // The type of basic IO manipulators (endl, ends, and flush) for
- // narrow streams.
- typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);
-
- public:
- // Constructs an empty Message.
- // We allocate the StrStream separately because it otherwise each use of
- // ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's
- // stack frame leading to huge stack frames in some cases; gcc does not reuse
- // the stack space.
- Message() : ss_(new internal::StrStream) {}
-
- // Copy constructor.
- Message(const Message& msg) : ss_(new internal::StrStream) { // NOLINT
- *ss_ << msg.GetString();
- }
-
- // Constructs a Message from a C-string.
- explicit Message(const char* str) : ss_(new internal::StrStream) {
- *ss_ << str;
- }
-
- ~Message() { delete ss_; }
-#ifdef GTEST_OS_SYMBIAN
- // Streams a value (either a pointer or not) to this object.
- template <typename T>
- inline Message& operator <<(const T& value) {
- StreamHelper(typename internal::is_pointer<T>::type(), value);
- return *this;
- }
-#else
- // Streams a non-pointer value to this object.
- template <typename T>
- inline Message& operator <<(const T& val) {
- ::GTestStreamToHelper(ss_, val);
- return *this;
- }
-
- // Streams a pointer value to this object.
- //
- // This function is an overload of the previous one. When you
- // stream a pointer to a Message, this definition will be used as it
- // is more specialized. (The C++ Standard, section
- // [temp.func.order].) If you stream a non-pointer, then the
- // previous definition will be used.
- //
- // The reason for this overload is that streaming a NULL pointer to
- // ostream is undefined behavior. Depending on the compiler, you
- // may get "0", "(nil)", "(null)", or an access violation. To
- // ensure consistent result across compilers, we always treat NULL
- // as "(null)".
- template <typename T>
- inline Message& operator <<(T* const& pointer) { // NOLINT
- if (pointer == NULL) {
- *ss_ << "(null)";
- } else {
- ::GTestStreamToHelper(ss_, pointer);
- }
- return *this;
- }
-#endif // GTEST_OS_SYMBIAN
-
- // Since the basic IO manipulators are overloaded for both narrow
- // and wide streams, we have to provide this specialized definition
- // of operator <<, even though its body is the same as the
- // templatized version above. Without this definition, streaming
- // endl or other basic IO manipulators to Message will confuse the
- // compiler.
- Message& operator <<(BasicNarrowIoManip val) {
- *ss_ << val;
- return *this;
- }
-
- // Instead of 1/0, we want to see true/false for bool values.
- Message& operator <<(bool b) {
- return *this << (b ? "true" : "false");
- }
-
- // These two overloads allow streaming a wide C string to a Message
- // using the UTF-8 encoding.
- Message& operator <<(const wchar_t* wide_c_str) {
- return *this << internal::String::ShowWideCString(wide_c_str);
- }
- Message& operator <<(wchar_t* wide_c_str) {
- return *this << internal::String::ShowWideCString(wide_c_str);
- }
-
-#if GTEST_HAS_STD_WSTRING
- // Converts the given wide string to a narrow string using the UTF-8
- // encoding, and streams the result to this Message object.
- Message& operator <<(const ::std::wstring& wstr);
-#endif // GTEST_HAS_STD_WSTRING
-
-#if GTEST_HAS_GLOBAL_WSTRING
- // Converts the given wide string to a narrow string using the UTF-8
- // encoding, and streams the result to this Message object.
- Message& operator <<(const ::wstring& wstr);
-#endif // GTEST_HAS_GLOBAL_WSTRING
-
- // Gets the text streamed to this object so far as a String.
- // Each '\0' character in the buffer is replaced with "\\0".
- //
- // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
- internal::String GetString() const {
- return internal::StrStreamToString(ss_);
- }
-
- private:
-#ifdef GTEST_OS_SYMBIAN
- // These are needed as the Nokia Symbian Compiler cannot decide between
- // const T& and const T* in a function template. The Nokia compiler _can_
- // decide between class template specializations for T and T*, so a
- // tr1::type_traits-like is_pointer works, and we can overload on that.
- template <typename T>
- inline void StreamHelper(internal::true_type dummy, T* pointer) {
- if (pointer == NULL) {
- *ss_ << "(null)";
- } else {
- ::GTestStreamToHelper(ss_, pointer);
- }
- }
- template <typename T>
- inline void StreamHelper(internal::false_type dummy, const T& value) {
- ::GTestStreamToHelper(ss_, value);
- }
-#endif // GTEST_OS_SYMBIAN
-
- // We'll hold the text streamed to this object here.
- internal::StrStream* const ss_;
-
- // We declare (but don't implement) this to prevent the compiler
- // from implementing the assignment operator.
- void operator=(const Message&);
-};
-
-// Streams a Message to an ostream.
-inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
- return os << sb.GetString();
-}
-
-} // namespace testing
-
-#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-param-test.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-param-test.h
deleted file mode 100644
index 0cf05dc..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-param-test.h
+++ /dev/null
@@ -1,1390 +0,0 @@
-// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
-
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: vladl at google.com (Vlad Losev)
-//
-// Macros and functions for implementing parameterized tests
-// in Google C++ Testing Framework (Google Test)
-//
-// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
-//
-#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
-#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
-
-
-// Value-parameterized tests allow you to test your code with different
-// parameters without writing multiple copies of the same test.
-//
-// Here is how you use value-parameterized tests:
-
-#if 0
-
-// To write value-parameterized tests, first you should define a fixture
-// class. It must be derived from testing::TestWithParam<T>, where T is
-// the type of your parameter values. TestWithParam<T> is itself derived
-// from testing::Test. T can be any copyable type. If it's a raw pointer,
-// you are responsible for managing the lifespan of the pointed values.
-
-class FooTest : public ::testing::TestWithParam<const char*> {
- // You can implement all the usual class fixture members here.
-};
-
-// Then, use the TEST_P macro to define as many parameterized tests
-// for this fixture as you want. The _P suffix is for "parameterized"
-// or "pattern", whichever you prefer to think.
-
-TEST_P(FooTest, DoesBlah) {
- // Inside a test, access the test parameter with the GetParam() method
- // of the TestWithParam<T> class:
- EXPECT_TRUE(foo.Blah(GetParam()));
- ...
-}
-
-TEST_P(FooTest, HasBlahBlah) {
- ...
-}
-
-// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test
-// case with any set of parameters you want. Google Test defines a number
-// of functions for generating test parameters. They return what we call
-// (surprise!) parameter generators. Here is a summary of them, which
-// are all in the testing namespace:
-//
-//
-// Range(begin, end [, step]) - Yields values {begin, begin+step,
-// begin+step+step, ...}. The values do not
-// include end. step defaults to 1.
-// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}.
-// ValuesIn(container) - Yields values from a C-style array, an STL
-// ValuesIn(begin,end) container, or an iterator range [begin, end).
-// Bool() - Yields sequence {false, true}.
-// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product
-// for the math savvy) of the values generated
-// by the N generators.
-//
-// For more details, see comments at the definitions of these functions below
-// in this file.
-//
-// The following statement will instantiate tests from the FooTest test case
-// each with parameter values "meeny", "miny", and "moe".
-
-INSTANTIATE_TEST_CASE_P(InstantiationName,
- FooTest,
- Values("meeny", "miny", "moe"));
-
-// To distinguish different instances of the pattern, (yes, you
-// can instantiate it more then once) the first argument to the
-// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the
-// actual test case name. Remember to pick unique prefixes for different
-// instantiations. The tests from the instantiation above will have
-// these names:
-//
-// * InstantiationName/FooTest.DoesBlah/0 for "meeny"
-// * InstantiationName/FooTest.DoesBlah/1 for "miny"
-// * InstantiationName/FooTest.DoesBlah/2 for "moe"
-// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny"
-// * InstantiationName/FooTest.HasBlahBlah/1 for "miny"
-// * InstantiationName/FooTest.HasBlahBlah/2 for "moe"
-//
-// You can use these names in --gtest_filter.
-//
-// This statement will instantiate all tests from FooTest again, each
-// with parameter values "cat" and "dog":
-
-const char* pets[] = {"cat", "dog"};
-INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
-
-// The tests from the instantiation above will have these names:
-//
-// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat"
-// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog"
-// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat"
-// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog"
-//
-// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests
-// in the given test case, whether their definitions come before or
-// AFTER the INSTANTIATE_TEST_CASE_P statement.
-//
-// Please also note that generator expressions are evaluated in
-// RUN_ALL_TESTS(), after main() has started. This allows evaluation of
-// parameter list based on command line parameters.
-//
-// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
-// for more examples.
-//
-// In the future, we plan to publish the API for defining new parameter
-// generators. But for now this interface remains part of the internal
-// implementation and is subject to change.
-
-#endif // 0
-
-
-#include <utility>
-
-#include <gtest/internal/gtest-port.h>
-
-#ifdef GTEST_HAS_PARAM_TEST
-
-#include <gtest/internal/gtest-internal.h>
-#include <gtest/internal/gtest-param-util.h>
-
-namespace testing {
-
-// Functions producing parameter generators.
-//
-// Google Test uses these generators to produce parameters for value-
-// parameterized tests. When a parameterized test case is instantiated
-// with a particular generator, Google Test creates and runs tests
-// for each element in the sequence produced by the generator.
-//
-// In the following sample, tests from test case FooTest are instantiated
-// each three times with parameter values 3, 5, and 8:
-//
-// class FooTest : public TestWithParam<int> { ... };
-//
-// TEST_P(FooTest, TestThis) {
-// }
-// TEST_P(FooTest, TestThat) {
-// }
-// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8));
-//
-
-// Range() returns generators providing sequences of values in a range.
-//
-// Synopsis:
-// Range(start, end)
-// - returns a generator producing a sequence of values {start, start+1,
-// start+2, ..., }.
-// Range(start, end, step)
-// - returns a generator producing a sequence of values {start, start+step,
-// start+step+step, ..., }.
-// Notes:
-// * The generated sequences never include end. For example, Range(1, 5)
-// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)
-// returns a generator producing {1, 3, 5, 7}.
-// * start and end must have the same type. That type may be any integral or
-// floating-point type or a user defined type satisfying these conditions:
-// * It must be assignable (have operator=() defined).
-// * It must have operator+() (operator+(int-compatible type) for
-// two-operand version).
-// * It must have operator<() defined.
-// Elements in the resulting sequences will also have that type.
-// * Condition start < end must be satisfied in order for resulting sequences
-// to contain any elements.
-//
-template <typename T, typename IncrementT>
-internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
- return internal::ParamGenerator<T>(
- new internal::RangeGenerator<T, IncrementT>(start, end, step));
-}
-
-template <typename T>
-internal::ParamGenerator<T> Range(T start, T end) {
- return Range(start, end, 1);
-}
-
-// ValuesIn() function allows generation of tests with parameters coming from
-// a container.
-//
-// Synopsis:
-// ValuesIn(const T (&array)[N])
-// - returns a generator producing sequences with elements from
-// a C-style array.
-// ValuesIn(const Container& container)
-// - returns a generator producing sequences with elements from
-// an STL-style container.
-// ValuesIn(Iterator begin, Iterator end)
-// - returns a generator producing sequences with elements from
-// a range [begin, end) defined by a pair of STL-style iterators. These
-// iterators can also be plain C pointers.
-//
-// Please note that ValuesIn copies the values from the containers
-// passed in and keeps them to generate tests in RUN_ALL_TESTS().
-//
-// Examples:
-//
-// This instantiates tests from test case StringTest
-// each with C-string values of "foo", "bar", and "baz":
-//
-// const char* strings[] = {"foo", "bar", "baz"};
-// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings));
-//
-// This instantiates tests from test case StlStringTest
-// each with STL strings with values "a" and "b":
-//
-// ::std::vector< ::std::string> GetParameterStrings() {
-// ::std::vector< ::std::string> v;
-// v.push_back("a");
-// v.push_back("b");
-// return v;
-// }
-//
-// INSTANTIATE_TEST_CASE_P(CharSequence,
-// StlStringTest,
-// ValuesIn(GetParameterStrings()));
-//
-//
-// This will also instantiate tests from CharTest
-// each with parameter values 'a' and 'b':
-//
-// ::std::list<char> GetParameterChars() {
-// ::std::list<char> list;
-// list.push_back('a');
-// list.push_back('b');
-// return list;
-// }
-// ::std::list<char> l = GetParameterChars();
-// INSTANTIATE_TEST_CASE_P(CharSequence2,
-// CharTest,
-// ValuesIn(l.begin(), l.end()));
-//
-template <typename ForwardIterator>
-internal::ParamGenerator<
- typename ::std::iterator_traits<ForwardIterator>::value_type> ValuesIn(
- ForwardIterator begin,
- ForwardIterator end) {
- typedef typename ::std::iterator_traits<ForwardIterator>::value_type
- ParamType;
- return internal::ParamGenerator<ParamType>(
- new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
-}
-
-template <typename T, size_t N>
-internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
- return ValuesIn(array, array + N);
-}
-
-template <class Container>
-internal::ParamGenerator<typename Container::value_type> ValuesIn(
- const Container& container) {
- return ValuesIn(container.begin(), container.end());
-}
-
-} // namespace testing
-
-#include <gtest/internal/gtest-param-util-generated.h>
-
-namespace testing {
-
-// Values() allows generating tests from explicitly specified list of
-// parameters.
-//
-// Synopsis:
-// Values(T v1, T v2, ..., T vN)
-// - returns a generator producing sequences with elements v1, v2, ..., vN.
-//
-// For example, this instantiates tests from test case BarTest each
-// with values "one", "two", and "three":
-//
-// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three"));
-//
-// This instantiates tests from test case BazTest each with values 1, 2, 3.5.
-// The exact type of values will depend on the type of parameter in BazTest.
-//
-// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
-//
-// Currently, Values() supports from 1 to 50 parameters.
-//
-template <typename T1>
-internal::ValueArray1<T1> Values(T1 v1) {
- return internal::ValueArray1<T1>(v1);
-}
-
-template <typename T1, typename T2>
-internal::ValueArray2<T1, T2> Values(T1 v1, T2 v2) {
- return internal::ValueArray2<T1, T2>(v1, v2);
-}
-
-template <typename T1, typename T2, typename T3>
-internal::ValueArray3<T1, T2, T3> Values(T1 v1, T2 v2, T3 v3) {
- return internal::ValueArray3<T1, T2, T3>(v1, v2, v3);
-}
-
-template <typename T1, typename T2, typename T3, typename T4>
-internal::ValueArray4<T1, T2, T3, T4> Values(T1 v1, T2 v2, T3 v3, T4 v4) {
- return internal::ValueArray4<T1, T2, T3, T4>(v1, v2, v3, v4);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-internal::ValueArray5<T1, T2, T3, T4, T5> Values(T1 v1, T2 v2, T3 v3, T4 v4,
- T5 v5) {
- return internal::ValueArray5<T1, T2, T3, T4, T5>(v1, v2, v3, v4, v5);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6>
-internal::ValueArray6<T1, T2, T3, T4, T5, T6> Values(T1 v1, T2 v2, T3 v3,
- T4 v4, T5 v5, T6 v6) {
- return internal::ValueArray6<T1, T2, T3, T4, T5, T6>(v1, v2, v3, v4, v5, v6);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7>
-internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7> Values(T1 v1, T2 v2, T3 v3,
- T4 v4, T5 v5, T6 v6, T7 v7) {
- return internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7>(v1, v2, v3, v4, v5,
- v6, v7);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8>
-internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8> Values(T1 v1, T2 v2,
- T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) {
- return internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8>(v1, v2, v3, v4,
- v5, v6, v7, v8);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9>
-internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9> Values(T1 v1, T2 v2,
- T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) {
- return internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(v1, v2, v3,
- v4, v5, v6, v7, v8, v9);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10>
-internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Values(T1 v1,
- T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) {
- return internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>(v1,
- v2, v3, v4, v5, v6, v7, v8, v9, v10);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11>
-internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
- T11> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11) {
- return internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
- T11>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12>
-internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12) {
- return internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13>
-internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13) {
- return internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14>
-internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) {
- return internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
- v14);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15>
-internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
- T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) {
- return internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
- v13, v14, v15);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16>
-internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
- T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
- T16 v16) {
- return internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
- v12, v13, v14, v15, v16);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17>
-internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
- T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
- T16 v16, T17 v17) {
- return internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
- v11, v12, v13, v14, v15, v16, v17);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18>
-internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
- T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
- T16 v16, T17 v17, T18 v18) {
- return internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
- v10, v11, v12, v13, v14, v15, v16, v17, v18);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19>
-internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
- T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
- T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) {
- return internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19>(v1, v2, v3, v4, v5, v6, v7, v8,
- v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20>
-internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20> Values(T1 v1, T2 v2, T3 v3, T4 v4,
- T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
- T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) {
- return internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20>(v1, v2, v3, v4, v5, v6, v7,
- v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21>
-internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21> Values(T1 v1, T2 v2, T3 v3, T4 v4,
- T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
- T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) {
- return internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>(v1, v2, v3, v4, v5, v6,
- v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22>
-internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22> Values(T1 v1, T2 v2, T3 v3,
- T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
- T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
- T21 v21, T22 v22) {
- return internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>(v1, v2, v3, v4,
- v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
- v20, v21, v22);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23>
-internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> Values(T1 v1, T2 v2,
- T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
- T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
- T21 v21, T22 v22, T23 v23) {
- return internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>(v1, v2, v3,
- v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
- v20, v21, v22, v23);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24>
-internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Values(T1 v1, T2 v2,
- T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
- T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
- T21 v21, T22 v22, T23 v23, T24 v24) {
- return internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>(v1, v2,
- v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
- v19, v20, v21, v22, v23, v24);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25>
-internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Values(T1 v1,
- T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
- T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
- T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) {
- return internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25>(v1,
- v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
- v18, v19, v20, v21, v22, v23, v24, v25);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26>
-internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26) {
- return internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
- v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27>
-internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27) {
- return internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
- v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28>
-internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28) {
- return internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
- v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
- v28);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29>
-internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29) {
- return internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
- v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
- v27, v28, v29);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30>
-internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
- T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
- T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
- T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) {
- return internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
- v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
- v26, v27, v28, v29, v30);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31>
-internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
- T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
- T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
- T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) {
- return internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
- v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
- v25, v26, v27, v28, v29, v30, v31);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32>
-internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
- T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
- T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
- T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
- T32 v32) {
- return internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
- v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
- v24, v25, v26, v27, v28, v29, v30, v31, v32);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33>
-internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
- T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
- T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
- T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
- T32 v32, T33 v33) {
- return internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33>(v1, v2, v3, v4, v5, v6, v7, v8,
- v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
- v24, v25, v26, v27, v28, v29, v30, v31, v32, v33);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34>
-internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
- T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
- T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
- T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
- T31 v31, T32 v32, T33 v33, T34 v34) {
- return internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34>(v1, v2, v3, v4, v5, v6, v7,
- v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
- v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35>
-internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35> Values(T1 v1, T2 v2, T3 v3, T4 v4,
- T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
- T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
- T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
- T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) {
- return internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35>(v1, v2, v3, v4, v5, v6,
- v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
- v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36>
-internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36> Values(T1 v1, T2 v2, T3 v3, T4 v4,
- T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
- T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
- T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
- T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) {
- return internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36>(v1, v2, v3, v4,
- v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
- v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
- v34, v35, v36);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37>
-internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37> Values(T1 v1, T2 v2, T3 v3,
- T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
- T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
- T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
- T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
- T37 v37) {
- return internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37>(v1, v2, v3,
- v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
- v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
- v34, v35, v36, v37);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38>
-internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Values(T1 v1, T2 v2,
- T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
- T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
- T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
- T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
- T37 v37, T38 v38) {
- return internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38>(v1, v2,
- v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
- v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32,
- v33, v34, v35, v36, v37, v38);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39>
-internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Values(T1 v1, T2 v2,
- T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
- T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
- T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
- T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
- T37 v37, T38 v38, T39 v39) {
- return internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39>(v1,
- v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
- v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31,
- v32, v33, v34, v35, v36, v37, v38, v39);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40>
-internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Values(T1 v1,
- T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
- T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
- T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27,
- T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35,
- T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) {
- return internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
- v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29,
- v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41>
-internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) {
- return internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
- v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28,
- v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42>
-internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42) {
- return internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41, T42>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
- v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
- v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41,
- v42);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43>
-internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43) {
- return internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41, T42, T43>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
- v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
- v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40,
- v41, v42, v43);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44>
-internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43, T44 v44) {
- return internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41, T42, T43, T44>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
- v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
- v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39,
- v40, v41, v42, v43, v44);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45>
-internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
- T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
- T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
- T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
- T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
- T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) {
- return internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41, T42, T43, T44, T45>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
- v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
- v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38,
- v39, v40, v41, v42, v43, v44, v45);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46>
-internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
- T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
- T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
- T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
- T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
- T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) {
- return internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41, T42, T43, T44, T45, T46>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
- v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
- v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
- v38, v39, v40, v41, v42, v43, v44, v45, v46);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47>
-internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46, T47> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
- T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
- T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
- T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
- T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
- T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) {
- return internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41, T42, T43, T44, T45, T46, T47>(v1, v2, v3, v4, v5, v6, v7, v8,
- v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
- v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
- v38, v39, v40, v41, v42, v43, v44, v45, v46, v47);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48>
-internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46, T47, T48> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
- T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
- T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
- T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
- T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
- T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47,
- T48 v48) {
- return internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41, T42, T43, T44, T45, T46, T47, T48>(v1, v2, v3, v4, v5, v6, v7,
- v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
- v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36,
- v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48, typename T49>
-internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46, T47, T48, T49> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
- T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
- T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
- T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
- T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38,
- T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46,
- T47 v47, T48 v48, T49 v49) {
- return internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41, T42, T43, T44, T45, T46, T47, T48, T49>(v1, v2, v3, v4, v5, v6,
- v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
- v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35,
- v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49);
-}
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48, typename T49, typename T50>
-internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46, T47, T48, T49, T50> Values(T1 v1, T2 v2, T3 v3, T4 v4,
- T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
- T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
- T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
- T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37,
- T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45,
- T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) {
- return internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40, T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>(v1, v2, v3, v4,
- v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
- v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
- v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47,
- v48, v49, v50);
-}
-
-// Bool() allows generating tests with parameters in a set of (false, true).
-//
-// Synopsis:
-// Bool()
-// - returns a generator producing sequences with elements {false, true}.
-//
-// It is useful when testing code that depends on Boolean flags. Combinations
-// of multiple flags can be tested when several Bool()'s are combined using
-// Combine() function.
-//
-// In the following example all tests in the test case FlagDependentTest
-// will be instantiated twice with parameters false and true.
-//
-// class FlagDependentTest : public testing::TestWithParam<bool> {
-// virtual void SetUp() {
-// external_flag = GetParam();
-// }
-// }
-// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool());
-//
-inline internal::ParamGenerator<bool> Bool() {
- return Values(false, true);
-}
-
-#ifdef GTEST_HAS_COMBINE
-// Combine() allows the user to combine two or more sequences to produce
-// values of a Cartesian product of those sequences' elements.
-//
-// Synopsis:
-// Combine(gen1, gen2, ..., genN)
-// - returns a generator producing sequences with elements coming from
-// the Cartesian product of elements from the sequences generated by
-// gen1, gen2, ..., genN. The sequence elements will have a type of
-// tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types
-// of elements from sequences produces by gen1, gen2, ..., genN.
-//
-// Combine can have up to 10 arguments. This number is currently limited
-// by the maximum number of elements in the tuple implementation used by Google
-// Test.
-//
-// Example:
-//
-// This will instantiate tests in test case AnimalTest each one with
-// the parameter values tuple("cat", BLACK), tuple("cat", WHITE),
-// tuple("dog", BLACK), and tuple("dog", WHITE):
-//
-// enum Color { BLACK, GRAY, WHITE };
-// class AnimalTest
-// : public testing::TestWithParam<tuple<const char*, Color> > {...};
-//
-// TEST_P(AnimalTest, AnimalLooksNice) {...}
-//
-// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest,
-// Combine(Values("cat", "dog"),
-// Values(BLACK, WHITE)));
-//
-// This will instantiate tests in FlagDependentTest with all variations of two
-// Boolean flags:
-//
-// class FlagDependentTest
-// : public testing::TestWithParam<tuple(bool, bool)> > {
-// virtual void SetUp() {
-// // Assigns external_flag_1 and external_flag_2 values from the tuple.
-// tie(external_flag_1, external_flag_2) = GetParam();
-// }
-// };
-//
-// TEST_P(FlagDependentTest, TestFeature1) {
-// // Test your code using external_flag_1 and external_flag_2 here.
-// }
-// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest,
-// Combine(Bool(), Bool()));
-//
-template <typename Generator1, typename Generator2>
-internal::CartesianProductHolder2<Generator1, Generator2> Combine(
- const Generator1& g1, const Generator2& g2) {
- return internal::CartesianProductHolder2<Generator1, Generator2>(
- g1, g2);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3>
-internal::CartesianProductHolder3<Generator1, Generator2, Generator3> Combine(
- const Generator1& g1, const Generator2& g2, const Generator3& g3) {
- return internal::CartesianProductHolder3<Generator1, Generator2, Generator3>(
- g1, g2, g3);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
- typename Generator4>
-internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
- Generator4> Combine(
- const Generator1& g1, const Generator2& g2, const Generator3& g3,
- const Generator4& g4) {
- return internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
- Generator4>(
- g1, g2, g3, g4);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
- typename Generator4, typename Generator5>
-internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
- Generator4, Generator5> Combine(
- const Generator1& g1, const Generator2& g2, const Generator3& g3,
- const Generator4& g4, const Generator5& g5) {
- return internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
- Generator4, Generator5>(
- g1, g2, g3, g4, g5);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
- typename Generator4, typename Generator5, typename Generator6>
-internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6> Combine(
- const Generator1& g1, const Generator2& g2, const Generator3& g3,
- const Generator4& g4, const Generator5& g5, const Generator6& g6) {
- return internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6>(
- g1, g2, g3, g4, g5, g6);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
- typename Generator4, typename Generator5, typename Generator6,
- typename Generator7>
-internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6, Generator7> Combine(
- const Generator1& g1, const Generator2& g2, const Generator3& g3,
- const Generator4& g4, const Generator5& g5, const Generator6& g6,
- const Generator7& g7) {
- return internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6, Generator7>(
- g1, g2, g3, g4, g5, g6, g7);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
- typename Generator4, typename Generator5, typename Generator6,
- typename Generator7, typename Generator8>
-internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6, Generator7, Generator8> Combine(
- const Generator1& g1, const Generator2& g2, const Generator3& g3,
- const Generator4& g4, const Generator5& g5, const Generator6& g6,
- const Generator7& g7, const Generator8& g8) {
- return internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6, Generator7, Generator8>(
- g1, g2, g3, g4, g5, g6, g7, g8);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
- typename Generator4, typename Generator5, typename Generator6,
- typename Generator7, typename Generator8, typename Generator9>
-internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6, Generator7, Generator8,
- Generator9> Combine(
- const Generator1& g1, const Generator2& g2, const Generator3& g3,
- const Generator4& g4, const Generator5& g5, const Generator6& g6,
- const Generator7& g7, const Generator8& g8, const Generator9& g9) {
- return internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6, Generator7, Generator8, Generator9>(
- g1, g2, g3, g4, g5, g6, g7, g8, g9);
-}
-
-template <typename Generator1, typename Generator2, typename Generator3,
- typename Generator4, typename Generator5, typename Generator6,
- typename Generator7, typename Generator8, typename Generator9,
- typename Generator10>
-internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
- Generator10> Combine(
- const Generator1& g1, const Generator2& g2, const Generator3& g3,
- const Generator4& g4, const Generator5& g5, const Generator6& g6,
- const Generator7& g7, const Generator8& g8, const Generator9& g9,
- const Generator10& g10) {
- return internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
- Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
- Generator10>(
- g1, g2, g3, g4, g5, g6, g7, g8, g9, g10);
-}
-#endif // GTEST_HAS_COMBINE
-
-
-
-#define TEST_P(test_case_name, test_name) \
- class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
- : public test_case_name { \
- public: \
- GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
- virtual void TestBody(); \
- private: \
- static int AddToRegistry() { \
- ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
- GetTestCasePatternHolder<test_case_name>(\
- #test_case_name, __FILE__, __LINE__)->AddTestPattern(\
- #test_case_name, \
- #test_name, \
- new ::testing::internal::TestMetaFactory< \
- GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \
- return 0; \
- } \
- static int gtest_registering_dummy_; \
- GTEST_DISALLOW_COPY_AND_ASSIGN_(\
- GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
- }; \
- int GTEST_TEST_CLASS_NAME_(test_case_name, \
- test_name)::gtest_registering_dummy_ = \
- GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
- void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
-
-#define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \
- ::testing::internal::ParamGenerator<test_case_name::ParamType> \
- gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \
- int gtest_##prefix##test_case_name##_dummy_ = \
- ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
- GetTestCasePatternHolder<test_case_name>(\
- #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\
- #prefix, \
- >est_##prefix##test_case_name##_EvalGenerator_, \
- __FILE__, __LINE__)
-
-} // namespace testing
-
-#endif // GTEST_HAS_PARAM_TEST
-
-#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-spi.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-spi.h
deleted file mode 100644
index a4e387a..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-spi.h
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-//
-// Utilities for testing Google Test itself and code that uses Google Test
-// (e.g. frameworks built on top of Google Test).
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
-#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
-
-#include <gtest/gtest.h>
-
-namespace testing {
-
-// This helper class can be used to mock out Google Test failure reporting
-// so that we can test Google Test or code that builds on Google Test.
-//
-// An object of this class appends a TestPartResult object to the
-// TestPartResultArray object given in the constructor whenever a Google Test
-// failure is reported. It can either intercept only failures that are
-// generated in the same thread that created this object or it can intercept
-// all generated failures. The scope of this mock object can be controlled with
-// the second argument to the two arguments constructor.
-class ScopedFakeTestPartResultReporter
- : public TestPartResultReporterInterface {
- public:
- // The two possible mocking modes of this object.
- enum InterceptMode {
- INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
- INTERCEPT_ALL_THREADS // Intercepts all failures.
- };
-
- // The c'tor sets this object as the test part result reporter used
- // by Google Test. The 'result' parameter specifies where to report the
- // results. This reporter will only catch failures generated in the current
- // thread. DEPRECATED
- explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
-
- // Same as above, but you can choose the interception scope of this object.
- ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
- TestPartResultArray* result);
-
- // The d'tor restores the previous test part result reporter.
- virtual ~ScopedFakeTestPartResultReporter();
-
- // Appends the TestPartResult object to the TestPartResultArray
- // received in the constructor.
- //
- // This method is from the TestPartResultReporterInterface
- // interface.
- virtual void ReportTestPartResult(const TestPartResult& result);
- private:
- void Init();
-
- const InterceptMode intercept_mode_;
- TestPartResultReporterInterface* old_reporter_;
- TestPartResultArray* const result_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
-};
-
-namespace internal {
-
-// A helper class for implementing EXPECT_FATAL_FAILURE() and
-// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
-// TestPartResultArray contains exactly one failure that has the given
-// type and contains the given substring. If that's not the case, a
-// non-fatal failure will be generated.
-class SingleFailureChecker {
- public:
- // The constructor remembers the arguments.
- SingleFailureChecker(const TestPartResultArray* results,
- TestPartResultType type,
- const char* substr);
- ~SingleFailureChecker();
- private:
- const TestPartResultArray* const results_;
- const TestPartResultType type_;
- const String substr_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
-};
-
-} // namespace internal
-
-} // namespace testing
-
-// A set of macros for testing Google Test assertions or code that's expected
-// to generate Google Test fatal failures. It verifies that the given
-// statement will cause exactly one fatal Google Test failure with 'substr'
-// being part of the failure message.
-//
-// There are two different versions of this macro. EXPECT_FATAL_FAILURE only
-// affects and considers failures generated in the current thread and
-// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
-//
-// The verification of the assertion is done correctly even when the statement
-// throws an exception or aborts the current function.
-//
-// Known restrictions:
-// - 'statement' cannot reference local non-static variables or
-// non-static members of the current object.
-// - 'statement' cannot return a value.
-// - You cannot stream a failure message to this macro.
-//
-// Note that even though the implementations of the following two
-// macros are much alike, we cannot refactor them to use a common
-// helper macro, due to some peculiarity in how the preprocessor
-// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
-// gtest_unittest.cc will fail to compile if we do that.
-#define EXPECT_FATAL_FAILURE(statement, substr) \
- do { \
- class GTestExpectFatalFailureHelper {\
- public:\
- static void Execute() { statement; }\
- };\
- ::testing::TestPartResultArray gtest_failures;\
- ::testing::internal::SingleFailureChecker gtest_checker(\
- >est_failures, ::testing::TPRT_FATAL_FAILURE, (substr));\
- {\
- ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
- ::testing::ScopedFakeTestPartResultReporter:: \
- INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\
- GTestExpectFatalFailureHelper::Execute();\
- }\
- } while (false)
-
-#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
- do { \
- class GTestExpectFatalFailureHelper {\
- public:\
- static void Execute() { statement; }\
- };\
- ::testing::TestPartResultArray gtest_failures;\
- ::testing::internal::SingleFailureChecker gtest_checker(\
- >est_failures, ::testing::TPRT_FATAL_FAILURE, (substr));\
- {\
- ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
- ::testing::ScopedFakeTestPartResultReporter:: \
- INTERCEPT_ALL_THREADS, >est_failures);\
- GTestExpectFatalFailureHelper::Execute();\
- }\
- } while (false)
-
-// A macro for testing Google Test assertions or code that's expected to
-// generate Google Test non-fatal failures. It asserts that the given
-// statement will cause exactly one non-fatal Google Test failure with 'substr'
-// being part of the failure message.
-//
-// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only
-// affects and considers failures generated in the current thread and
-// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
-//
-// 'statement' is allowed to reference local variables and members of
-// the current object.
-//
-// The verification of the assertion is done correctly even when the statement
-// throws an exception or aborts the current function.
-//
-// Known restrictions:
-// - You cannot stream a failure message to this macro.
-//
-// Note that even though the implementations of the following two
-// macros are much alike, we cannot refactor them to use a common
-// helper macro, due to some peculiarity in how the preprocessor
-// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
-// gtest_unittest.cc will fail to compile if we do that.
-#define EXPECT_NONFATAL_FAILURE(statement, substr) \
- do {\
- ::testing::TestPartResultArray gtest_failures;\
- ::testing::internal::SingleFailureChecker gtest_checker(\
- >est_failures, ::testing::TPRT_NONFATAL_FAILURE, (substr));\
- {\
- ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
- ::testing::ScopedFakeTestPartResultReporter:: \
- INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\
- statement;\
- }\
- } while (false)
-
-#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
- do {\
- ::testing::TestPartResultArray gtest_failures;\
- ::testing::internal::SingleFailureChecker gtest_checker(\
- >est_failures, ::testing::TPRT_NONFATAL_FAILURE, (substr));\
- {\
- ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
- ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS,\
- >est_failures);\
- statement;\
- }\
- } while (false)
-
-#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-test-part.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-test-part.h
deleted file mode 100644
index 1a281af..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-test-part.h
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: mheule at google.com (Markus Heule)
-//
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
-#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
-
-#include <iosfwd>
-#include <gtest/internal/gtest-internal.h>
-#include <gtest/internal/gtest-string.h>
-
-namespace testing {
-
-// The possible outcomes of a test part (i.e. an assertion or an
-// explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
-enum TestPartResultType {
- TPRT_SUCCESS, // Succeeded.
- TPRT_NONFATAL_FAILURE, // Failed but the test can continue.
- TPRT_FATAL_FAILURE // Failed and the test should be terminated.
-};
-
-// A copyable object representing the result of a test part (i.e. an
-// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
-//
-// Don't inherit from TestPartResult as its destructor is not virtual.
-class TestPartResult {
- public:
- // C'tor. TestPartResult does NOT have a default constructor.
- // Always use this constructor (with parameters) to create a
- // TestPartResult object.
- TestPartResult(TestPartResultType type,
- const char* file_name,
- int line_number,
- const char* message)
- : type_(type),
- file_name_(file_name),
- line_number_(line_number),
- summary_(ExtractSummary(message)),
- message_(message) {
- }
-
- // Gets the outcome of the test part.
- TestPartResultType type() const { return type_; }
-
- // Gets the name of the source file where the test part took place, or
- // NULL if it's unknown.
- const char* file_name() const { return file_name_.c_str(); }
-
- // Gets the line in the source file where the test part took place,
- // or -1 if it's unknown.
- int line_number() const { return line_number_; }
-
- // Gets the summary of the failure message.
- const char* summary() const { return summary_.c_str(); }
-
- // Gets the message associated with the test part.
- const char* message() const { return message_.c_str(); }
-
- // Returns true iff the test part passed.
- bool passed() const { return type_ == TPRT_SUCCESS; }
-
- // Returns true iff the test part failed.
- bool failed() const { return type_ != TPRT_SUCCESS; }
-
- // Returns true iff the test part non-fatally failed.
- bool nonfatally_failed() const { return type_ == TPRT_NONFATAL_FAILURE; }
-
- // Returns true iff the test part fatally failed.
- bool fatally_failed() const { return type_ == TPRT_FATAL_FAILURE; }
- private:
- TestPartResultType type_;
-
- // Gets the summary of the failure message by omitting the stack
- // trace in it.
- static internal::String ExtractSummary(const char* message);
-
- // The name of the source file where the test part took place, or
- // NULL if the source file is unknown.
- internal::String file_name_;
- // The line in the source file where the test part took place, or -1
- // if the line number is unknown.
- int line_number_;
- internal::String summary_; // The test failure summary.
- internal::String message_; // The test failure message.
-};
-
-// Prints a TestPartResult object.
-std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
-
-// An array of TestPartResult objects.
-//
-// We define this class as we cannot use STL containers when compiling
-// Google Test with MSVC 7.1 and exceptions disabled.
-//
-// Don't inherit from TestPartResultArray as its destructor is not
-// virtual.
-class TestPartResultArray {
- public:
- TestPartResultArray();
- ~TestPartResultArray();
-
- // Appends the given TestPartResult to the array.
- void Append(const TestPartResult& result);
-
- // Returns the TestPartResult at the given index (0-based).
- const TestPartResult& GetTestPartResult(int index) const;
-
- // Returns the number of TestPartResult objects in the array.
- int size() const;
- private:
- // Internally we use a list to simulate the array. Yes, this means
- // that random access is O(N) in time, but it's OK for its purpose.
- internal::List<TestPartResult>* const list_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
-};
-
-// This interface knows how to report a test part result.
-class TestPartResultReporterInterface {
- public:
- virtual ~TestPartResultReporterInterface() {}
-
- virtual void ReportTestPartResult(const TestPartResult& result) = 0;
-};
-
-namespace internal {
-
-// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
-// statement generates new fatal failures. To do so it registers itself as the
-// current test part result reporter. Besides checking if fatal failures were
-// reported, it only delegates the reporting to the former result reporter.
-// The original result reporter is restored in the destructor.
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-class HasNewFatalFailureHelper : public TestPartResultReporterInterface {
- public:
- HasNewFatalFailureHelper();
- virtual ~HasNewFatalFailureHelper();
- virtual void ReportTestPartResult(const TestPartResult& result);
- bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
- private:
- bool has_new_fatal_failure_;
- TestPartResultReporterInterface* original_reporter_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
-};
-
-} // namespace internal
-
-} // namespace testing
-
-#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-typed-test.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-typed-test.h
deleted file mode 100644
index dec42cf..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest-typed-test.h
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
-#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
-
-// This header implements typed tests and type-parameterized tests.
-
-// Typed (aka type-driven) tests repeat the same test for types in a
-// list. You must know which types you want to test with when writing
-// typed tests. Here's how you do it:
-
-#if 0
-
-// First, define a fixture class template. It should be parameterized
-// by a type. Remember to derive it from testing::Test.
-template <typename T>
-class FooTest : public testing::Test {
- public:
- ...
- typedef std::list<T> List;
- static T shared_;
- T value_;
-};
-
-// Next, associate a list of types with the test case, which will be
-// repeated for each type in the list. The typedef is necessary for
-// the macro to parse correctly.
-typedef testing::Types<char, int, unsigned int> MyTypes;
-TYPED_TEST_CASE(FooTest, MyTypes);
-
-// If the type list contains only one type, you can write that type
-// directly without Types<...>:
-// TYPED_TEST_CASE(FooTest, int);
-
-// Then, use TYPED_TEST() instead of TEST_F() to define as many typed
-// tests for this test case as you want.
-TYPED_TEST(FooTest, DoesBlah) {
- // Inside a test, refer to TypeParam to get the type parameter.
- // Since we are inside a derived class template, C++ requires use to
- // visit the members of FooTest via 'this'.
- TypeParam n = this->value_;
-
- // To visit static members of the fixture, add the TestFixture::
- // prefix.
- n += TestFixture::shared_;
-
- // To refer to typedefs in the fixture, add the "typename
- // TestFixture::" prefix.
- typename TestFixture::List values;
- values.push_back(n);
- ...
-}
-
-TYPED_TEST(FooTest, HasPropertyA) { ... }
-
-#endif // 0
-
-// Type-parameterized tests are abstract test patterns parameterized
-// by a type. Compared with typed tests, type-parameterized tests
-// allow you to define the test pattern without knowing what the type
-// parameters are. The defined pattern can be instantiated with
-// different types any number of times, in any number of translation
-// units.
-//
-// If you are designing an interface or concept, you can define a
-// suite of type-parameterized tests to verify properties that any
-// valid implementation of the interface/concept should have. Then,
-// each implementation can easily instantiate the test suite to verify
-// that it conforms to the requirements, without having to write
-// similar tests repeatedly. Here's an example:
-
-#if 0
-
-// First, define a fixture class template. It should be parameterized
-// by a type. Remember to derive it from testing::Test.
-template <typename T>
-class FooTest : public testing::Test {
- ...
-};
-
-// Next, declare that you will define a type-parameterized test case
-// (the _P suffix is for "parameterized" or "pattern", whichever you
-// prefer):
-TYPED_TEST_CASE_P(FooTest);
-
-// Then, use TYPED_TEST_P() to define as many type-parameterized tests
-// for this type-parameterized test case as you want.
-TYPED_TEST_P(FooTest, DoesBlah) {
- // Inside a test, refer to TypeParam to get the type parameter.
- TypeParam n = 0;
- ...
-}
-
-TYPED_TEST_P(FooTest, HasPropertyA) { ... }
-
-// Now the tricky part: you need to register all test patterns before
-// you can instantiate them. The first argument of the macro is the
-// test case name; the rest are the names of the tests in this test
-// case.
-REGISTER_TYPED_TEST_CASE_P(FooTest,
- DoesBlah, HasPropertyA);
-
-// Finally, you are free to instantiate the pattern with the types you
-// want. If you put the above code in a header file, you can #include
-// it in multiple C++ source files and instantiate it multiple times.
-//
-// To distinguish different instances of the pattern, the first
-// argument to the INSTANTIATE_* macro is a prefix that will be added
-// to the actual test case name. Remember to pick unique prefixes for
-// different instances.
-typedef testing::Types<char, int, unsigned int> MyTypes;
-INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
-
-// If the type list contains only one type, you can write that type
-// directly without Types<...>:
-// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int);
-
-#endif // 0
-
-#include <gtest/internal/gtest-port.h>
-#include <gtest/internal/gtest-type-util.h>
-
-// Implements typed tests.
-
-#ifdef GTEST_HAS_TYPED_TEST
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Expands to the name of the typedef for the type parameters of the
-// given test case.
-#define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_
-
-#define TYPED_TEST_CASE(CaseName, Types) \
- typedef ::testing::internal::TypeList<Types>::type \
- GTEST_TYPE_PARAMS_(CaseName)
-
-#define TYPED_TEST(CaseName, TestName) \
- template <typename gtest_TypeParam_> \
- class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
- : public CaseName<gtest_TypeParam_> { \
- private: \
- typedef CaseName<gtest_TypeParam_> TestFixture; \
- typedef gtest_TypeParam_ TypeParam; \
- virtual void TestBody(); \
- }; \
- bool gtest_##CaseName##_##TestName##_registered_ = \
- ::testing::internal::TypeParameterizedTest< \
- CaseName, \
- ::testing::internal::TemplateSel< \
- GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
- GTEST_TYPE_PARAMS_(CaseName)>::Register(\
- "", #CaseName, #TestName, 0); \
- template <typename gtest_TypeParam_> \
- void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
-
-#endif // GTEST_HAS_TYPED_TEST
-
-// Implements type-parameterized tests.
-
-#ifdef GTEST_HAS_TYPED_TEST_P
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Expands to the namespace name that the type-parameterized tests for
-// the given type-parameterized test case are defined in. The exact
-// name of the namespace is subject to change without notice.
-#define GTEST_CASE_NAMESPACE_(TestCaseName) \
- gtest_case_##TestCaseName##_
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Expands to the name of the variable used to remember the names of
-// the defined tests in the given test case.
-#define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \
- gtest_typed_test_case_p_state_##TestCaseName##_
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
-//
-// Expands to the name of the variable used to remember the names of
-// the registered tests in the given test case.
-#define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \
- gtest_registered_test_names_##TestCaseName##_
-
-// The variables defined in the type-parameterized test macros are
-// static as typically these macros are used in a .h file that can be
-// #included in multiple translation units linked together.
-#define TYPED_TEST_CASE_P(CaseName) \
- static ::testing::internal::TypedTestCasePState \
- GTEST_TYPED_TEST_CASE_P_STATE_(CaseName)
-
-#define TYPED_TEST_P(CaseName, TestName) \
- namespace GTEST_CASE_NAMESPACE_(CaseName) { \
- template <typename gtest_TypeParam_> \
- class TestName : public CaseName<gtest_TypeParam_> { \
- private: \
- typedef CaseName<gtest_TypeParam_> TestFixture; \
- typedef gtest_TypeParam_ TypeParam; \
- virtual void TestBody(); \
- }; \
- static bool gtest_##TestName##_defined_ = \
- GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\
- __FILE__, __LINE__, #CaseName, #TestName); \
- } \
- template <typename gtest_TypeParam_> \
- void GTEST_CASE_NAMESPACE_(CaseName)::TestName<gtest_TypeParam_>::TestBody()
-
-#define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \
- namespace GTEST_CASE_NAMESPACE_(CaseName) { \
- typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
- } \
- static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \
- GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\
- __FILE__, __LINE__, #__VA_ARGS__)
-
-#define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \
- bool gtest_##Prefix##_##CaseName = \
- ::testing::internal::TypeParameterizedTestCase<CaseName, \
- GTEST_CASE_NAMESPACE_(CaseName)::gtest_AllTests_, \
- ::testing::internal::TypeList<Types>::type>::Register(\
- #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName))
-
-#endif // GTEST_HAS_TYPED_TEST_P
-
-#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest.h
deleted file mode 100644
index ebd3123..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest.h
+++ /dev/null
@@ -1,1317 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file defines the public API for Google Test. It should be
-// included by any test program that uses Google Test.
-//
-// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
-// leave some internal implementation details in this header file.
-// They are clearly marked by comments like this:
-//
-// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-//
-// Such code is NOT meant to be used by a user directly, and is subject
-// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
-// program!
-//
-// Acknowledgment: Google Test borrowed the idea of automatic test
-// registration from Barthelemy Dagenais' (barthelemy at prologique.com)
-// easyUnit framework.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
-#define GTEST_INCLUDE_GTEST_GTEST_H_
-
-// The following platform macros are used throughout Google Test:
-// _WIN32_WCE Windows CE (set in project files)
-//
-// Note that even though _MSC_VER and _WIN32_WCE really indicate a compiler
-// and a Win32 implementation, respectively, we use them to indicate the
-// combination of compiler - Win 32 API - C library, since the code currently
-// only supports:
-// Windows proper with Visual C++ and MS C library (_MSC_VER && !_WIN32_WCE) and
-// Windows Mobile with Visual C++ and no C library (_WIN32_WCE).
-
-#include <limits>
-#include <gtest/internal/gtest-internal.h>
-#include <gtest/internal/gtest-string.h>
-#include <gtest/gtest-death-test.h>
-#include <gtest/gtest-message.h>
-#include <gtest/gtest-param-test.h>
-#include <gtest/gtest_prod.h>
-#include <gtest/gtest-test-part.h>
-#include <gtest/gtest-typed-test.h>
-
-// Depending on the platform, different string classes are available.
-// On Windows, ::std::string compiles only when exceptions are
-// enabled. On Linux, in addition to ::std::string, Google also makes
-// use of class ::string, which has the same interface as
-// ::std::string, but has a different implementation.
-//
-// The user can tell us whether ::std::string is available in his
-// environment by defining the macro GTEST_HAS_STD_STRING to either 1
-// or 0 on the compiler command line. He can also define
-// GTEST_HAS_GLOBAL_STRING to 1 to indicate that ::string is available
-// AND is a distinct type to ::std::string, or define it to 0 to
-// indicate otherwise.
-//
-// If the user's ::std::string and ::string are the same class due to
-// aliasing, he should define GTEST_HAS_STD_STRING to 1 and
-// GTEST_HAS_GLOBAL_STRING to 0.
-//
-// If the user doesn't define GTEST_HAS_STD_STRING and/or
-// GTEST_HAS_GLOBAL_STRING, they are defined heuristically.
-
-namespace testing {
-
-// The upper limit for valid stack trace depths.
-const int kMaxStackTraceDepth = 100;
-
-// This flag specifies the maximum number of stack frames to be
-// printed in a failure message.
-GTEST_DECLARE_int32_(stack_trace_depth);
-
-// This flag controls whether Google Test includes Google Test internal
-// stack frames in failure stack traces.
-GTEST_DECLARE_bool_(show_internal_stack_frames);
-
-namespace internal {
-
-class GTestFlagSaver;
-
-// Converts a streamable value to a String. A NULL pointer is
-// converted to "(null)". When the input value is a ::string,
-// ::std::string, ::wstring, or ::std::wstring object, each NUL
-// character in it is replaced with "\\0".
-// Declared in gtest-internal.h but defined here, so that it has access
-// to the definition of the Message class, required by the ARM
-// compiler.
-template <typename T>
-String StreamableToString(const T& streamable) {
- return (Message() << streamable).GetString();
-}
-
-} // namespace internal
-
-// A class for indicating whether an assertion was successful. When
-// the assertion wasn't successful, the AssertionResult object
-// remembers a non-empty message that described how it failed.
-//
-// This class is useful for defining predicate-format functions to be
-// used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
-//
-// The constructor of AssertionResult is private. To create an
-// instance of this class, use one of the factory functions
-// (AssertionSuccess() and AssertionFailure()).
-//
-// For example, in order to be able to write:
-//
-// // Verifies that Foo() returns an even number.
-// EXPECT_PRED_FORMAT1(IsEven, Foo());
-//
-// you just need to define:
-//
-// testing::AssertionResult IsEven(const char* expr, int n) {
-// if ((n % 2) == 0) return testing::AssertionSuccess();
-//
-// Message msg;
-// msg << "Expected: " << expr << " is even\n"
-// << " Actual: it's " << n;
-// return testing::AssertionFailure(msg);
-// }
-//
-// If Foo() returns 5, you will see the following message:
-//
-// Expected: Foo() is even
-// Actual: it's 5
-class AssertionResult {
- public:
- // Declares factory functions for making successful and failed
- // assertion results as friends.
- friend AssertionResult AssertionSuccess();
- friend AssertionResult AssertionFailure(const Message&);
-
- // Returns true iff the assertion succeeded.
- operator bool() const { return failure_message_.c_str() == NULL; } // NOLINT
-
- // Returns the assertion's failure message.
- const char* failure_message() const { return failure_message_.c_str(); }
-
- private:
- // The default constructor. It is used when the assertion succeeded.
- AssertionResult() {}
-
- // The constructor used when the assertion failed.
- explicit AssertionResult(const internal::String& failure_message);
-
- // Stores the assertion's failure message.
- internal::String failure_message_;
-};
-
-// Makes a successful assertion result.
-AssertionResult AssertionSuccess();
-
-// Makes a failed assertion result with the given failure message.
-AssertionResult AssertionFailure(const Message& msg);
-
-// The abstract class that all tests inherit from.
-//
-// In Google Test, a unit test program contains one or many TestCases, and
-// each TestCase contains one or many Tests.
-//
-// When you define a test using the TEST macro, you don't need to
-// explicitly derive from Test - the TEST macro automatically does
-// this for you.
-//
-// The only time you derive from Test is when defining a test fixture
-// to be used a TEST_F. For example:
-//
-// class FooTest : public testing::Test {
-// protected:
-// virtual void SetUp() { ... }
-// virtual void TearDown() { ... }
-// ...
-// };
-//
-// TEST_F(FooTest, Bar) { ... }
-// TEST_F(FooTest, Baz) { ... }
-//
-// Test is not copyable.
-class Test {
- public:
- friend class internal::TestInfoImpl;
-
- // Defines types for pointers to functions that set up and tear down
- // a test case.
- typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc;
- typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc;
-
- // The d'tor is virtual as we intend to inherit from Test.
- virtual ~Test();
-
- // Sets up the stuff shared by all tests in this test case.
- //
- // Google Test will call Foo::SetUpTestCase() before running the first
- // test in test case Foo. Hence a sub-class can define its own
- // SetUpTestCase() method to shadow the one defined in the super
- // class.
- static void SetUpTestCase() {}
-
- // Tears down the stuff shared by all tests in this test case.
- //
- // Google Test will call Foo::TearDownTestCase() after running the last
- // test in test case Foo. Hence a sub-class can define its own
- // TearDownTestCase() method to shadow the one defined in the super
- // class.
- static void TearDownTestCase() {}
-
- // Returns true iff the current test has a fatal failure.
- static bool HasFatalFailure();
-
- // Logs a property for the current test. Only the last value for a given
- // key is remembered.
- // These are public static so they can be called from utility functions
- // that are not members of the test fixture.
- // The arguments are const char* instead strings, as Google Test is used
- // on platforms where string doesn't compile.
- //
- // Note that a driving consideration for these RecordProperty methods
- // was to produce xml output suited to the Greenspan charting utility,
- // which at present will only chart values that fit in a 32-bit int. It
- // is the user's responsibility to restrict their values to 32-bit ints
- // if they intend them to be used with Greenspan.
- static void RecordProperty(const char* key, const char* value);
- static void RecordProperty(const char* key, int value);
-
- protected:
- // Creates a Test object.
- Test();
-
- // Sets up the test fixture.
- virtual void SetUp();
-
- // Tears down the test fixture.
- virtual void TearDown();
-
- private:
- // Returns true iff the current test has the same fixture class as
- // the first test in the current test case.
- static bool HasSameFixtureClass();
-
- // Runs the test after the test fixture has been set up.
- //
- // A sub-class must implement this to define the test logic.
- //
- // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM.
- // Instead, use the TEST or TEST_F macro.
- virtual void TestBody() = 0;
-
- // Sets up, executes, and tears down the test.
- void Run();
-
- // Uses a GTestFlagSaver to save and restore all Google Test flags.
- const internal::GTestFlagSaver* const gtest_flag_saver_;
-
- // Often a user mis-spells SetUp() as Setup() and spends a long time
- // wondering why it is never called by Google Test. The declaration of
- // the following method is solely for catching such an error at
- // compile time:
- //
- // - The return type is deliberately chosen to be not void, so it
- // will be a conflict if a user declares void Setup() in his test
- // fixture.
- //
- // - This method is private, so it will be another compiler error
- // if a user calls it from his test fixture.
- //
- // DO NOT OVERRIDE THIS FUNCTION.
- //
- // If you see an error about overriding the following function or
- // about it being private, you have mis-spelled SetUp() as Setup().
- struct Setup_should_be_spelled_SetUp {};
- virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
-
- // We disallow copying Tests.
- GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);
-};
-
-
-// A TestInfo object stores the following information about a test:
-//
-// Test case name
-// Test name
-// Whether the test should be run
-// A function pointer that creates the test object when invoked
-// Test result
-//
-// The constructor of TestInfo registers itself with the UnitTest
-// singleton such that the RUN_ALL_TESTS() macro knows which tests to
-// run.
-class TestInfo {
- public:
- // Destructs a TestInfo object. This function is not virtual, so
- // don't inherit from TestInfo.
- ~TestInfo();
-
- // Returns the test case name.
- const char* test_case_name() const;
-
- // Returns the test name.
- const char* name() const;
-
- // Returns the test case comment.
- const char* test_case_comment() const;
-
- // Returns the test comment.
- const char* comment() const;
-
- // Returns true if this test should run.
- //
- // Google Test allows the user to filter the tests by their full names.
- // The full name of a test Bar in test case Foo is defined as
- // "Foo.Bar". Only the tests that match the filter will run.
- //
- // A filter is a colon-separated list of glob (not regex) patterns,
- // optionally followed by a '-' and a colon-separated list of
- // negative patterns (tests to exclude). A test is run if it
- // matches one of the positive patterns and does not match any of
- // the negative patterns.
- //
- // For example, *A*:Foo.* is a filter that matches any string that
- // contains the character 'A' or starts with "Foo.".
- bool should_run() const;
-
- // Returns the result of the test.
- const internal::TestResult* result() const;
- private:
-#ifdef GTEST_HAS_DEATH_TEST
- friend class internal::DefaultDeathTestFactory;
-#endif // GTEST_HAS_DEATH_TEST
- friend class internal::TestInfoImpl;
- friend class internal::UnitTestImpl;
- friend class Test;
- friend class TestCase;
- friend TestInfo* internal::MakeAndRegisterTestInfo(
- const char* test_case_name, const char* name,
- const char* test_case_comment, const char* comment,
- internal::TypeId fixture_class_id,
- Test::SetUpTestCaseFunc set_up_tc,
- Test::TearDownTestCaseFunc tear_down_tc,
- internal::TestFactoryBase* factory);
-
- // Increments the number of death tests encountered in this test so
- // far.
- int increment_death_test_count();
-
- // Accessors for the implementation object.
- internal::TestInfoImpl* impl() { return impl_; }
- const internal::TestInfoImpl* impl() const { return impl_; }
-
- // Constructs a TestInfo object. The newly constructed instance assumes
- // ownership of the factory object.
- TestInfo(const char* test_case_name, const char* name,
- const char* test_case_comment, const char* comment,
- internal::TypeId fixture_class_id,
- internal::TestFactoryBase* factory);
-
- // An opaque implementation object.
- internal::TestInfoImpl* impl_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);
-};
-
-// An Environment object is capable of setting up and tearing down an
-// environment. The user should subclass this to define his own
-// environment(s).
-//
-// An Environment object does the set-up and tear-down in virtual
-// methods SetUp() and TearDown() instead of the constructor and the
-// destructor, as:
-//
-// 1. You cannot safely throw from a destructor. This is a problem
-// as in some cases Google Test is used where exceptions are enabled, and
-// we may want to implement ASSERT_* using exceptions where they are
-// available.
-// 2. You cannot use ASSERT_* directly in a constructor or
-// destructor.
-class Environment {
- public:
- // The d'tor is virtual as we need to subclass Environment.
- virtual ~Environment() {}
-
- // Override this to define how to set up the environment.
- virtual void SetUp() {}
-
- // Override this to define how to tear down the environment.
- virtual void TearDown() {}
- private:
- // If you see an error about overriding the following function or
- // about it being private, you have mis-spelled SetUp() as Setup().
- struct Setup_should_be_spelled_SetUp {};
- virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
-};
-
-// A UnitTest consists of a list of TestCases.
-//
-// This is a singleton class. The only instance of UnitTest is
-// created when UnitTest::GetInstance() is first called. This
-// instance is never deleted.
-//
-// UnitTest is not copyable.
-//
-// This class is thread-safe as long as the methods are called
-// according to their specification.
-class UnitTest {
- public:
- // Gets the singleton UnitTest object. The first time this method
- // is called, a UnitTest object is constructed and returned.
- // Consecutive calls will return the same object.
- static UnitTest* GetInstance();
-
- // Registers and returns a global test environment. When a test
- // program is run, all global test environments will be set-up in
- // the order they were registered. After all tests in the program
- // have finished, all global test environments will be torn-down in
- // the *reverse* order they were registered.
- //
- // The UnitTest object takes ownership of the given environment.
- //
- // This method can only be called from the main thread.
- Environment* AddEnvironment(Environment* env);
-
- // Adds a TestPartResult to the current TestResult object. All
- // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
- // eventually call this to report their results. The user code
- // should use the assertion macros instead of calling this directly.
- //
- // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
- void AddTestPartResult(TestPartResultType result_type,
- const char* file_name,
- int line_number,
- const internal::String& message,
- const internal::String& os_stack_trace);
-
- // Adds a TestProperty to the current TestResult object. If the result already
- // contains a property with the same key, the value will be updated.
- void RecordPropertyForCurrentTest(const char* key, const char* value);
-
- // Runs all tests in this UnitTest object and prints the result.
- // Returns 0 if successful, or 1 otherwise.
- //
- // This method can only be called from the main thread.
- //
- // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
- int Run() GTEST_MUST_USE_RESULT_;
-
- // Returns the working directory when the first TEST() or TEST_F()
- // was executed. The UnitTest object owns the string.
- const char* original_working_dir() const;
-
- // Returns the TestCase object for the test that's currently running,
- // or NULL if no test is running.
- const TestCase* current_test_case() const;
-
- // Returns the TestInfo object for the test that's currently running,
- // or NULL if no test is running.
- const TestInfo* current_test_info() const;
-
-#ifdef GTEST_HAS_PARAM_TEST
- // Returns the ParameterizedTestCaseRegistry object used to keep track of
- // value-parameterized tests and instantiate and register them.
- internal::ParameterizedTestCaseRegistry& parameterized_test_registry();
-#endif // GTEST_HAS_PARAM_TEST
-
- // Accessors for the implementation object.
- internal::UnitTestImpl* impl() { return impl_; }
- const internal::UnitTestImpl* impl() const { return impl_; }
- private:
- // ScopedTrace is a friend as it needs to modify the per-thread
- // trace stack, which is a private member of UnitTest.
- friend class internal::ScopedTrace;
-
- // Creates an empty UnitTest.
- UnitTest();
-
- // D'tor
- virtual ~UnitTest();
-
- // Pushes a trace defined by SCOPED_TRACE() on to the per-thread
- // Google Test trace stack.
- void PushGTestTrace(const internal::TraceInfo& trace);
-
- // Pops a trace from the per-thread Google Test trace stack.
- void PopGTestTrace();
-
- // Protects mutable state in *impl_. This is mutable as some const
- // methods need to lock it too.
- mutable internal::Mutex mutex_;
-
- // Opaque implementation object. This field is never changed once
- // the object is constructed. We don't mark it as const here, as
- // doing so will cause a warning in the constructor of UnitTest.
- // Mutable state in *impl_ is protected by mutex_.
- internal::UnitTestImpl* impl_;
-
- // We disallow copying UnitTest.
- GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest);
-};
-
-// A convenient wrapper for adding an environment for the test
-// program.
-//
-// You should call this before RUN_ALL_TESTS() is called, probably in
-// main(). If you use gtest_main, you need to call this before main()
-// starts for it to take effect. For example, you can define a global
-// variable like this:
-//
-// testing::Environment* const foo_env =
-// testing::AddGlobalTestEnvironment(new FooEnvironment);
-//
-// However, we strongly recommend you to write your own main() and
-// call AddGlobalTestEnvironment() there, as relying on initialization
-// of global variables makes the code harder to read and may cause
-// problems when you register multiple environments from different
-// translation units and the environments have dependencies among them
-// (remember that the compiler doesn't guarantee the order in which
-// global variables from different translation units are initialized).
-inline Environment* AddGlobalTestEnvironment(Environment* env) {
- return UnitTest::GetInstance()->AddEnvironment(env);
-}
-
-// Initializes Google Test. This must be called before calling
-// RUN_ALL_TESTS(). In particular, it parses a command line for the
-// flags that Google Test recognizes. Whenever a Google Test flag is
-// seen, it is removed from argv, and *argc is decremented.
-//
-// No value is returned. Instead, the Google Test flag variables are
-// updated.
-//
-// Calling the function for the second time has no user-visible effect.
-void InitGoogleTest(int* argc, char** argv);
-
-// This overloaded version can be used in Windows programs compiled in
-// UNICODE mode.
-void InitGoogleTest(int* argc, wchar_t** argv);
-
-namespace internal {
-
-// These overloaded versions handle ::std::string and ::std::wstring.
-#if GTEST_HAS_STD_STRING
-inline String FormatForFailureMessage(const ::std::string& str) {
- return (Message() << '"' << str << '"').GetString();
-}
-#endif // GTEST_HAS_STD_STRING
-
-#if GTEST_HAS_STD_WSTRING
-inline String FormatForFailureMessage(const ::std::wstring& wstr) {
- return (Message() << "L\"" << wstr << '"').GetString();
-}
-#endif // GTEST_HAS_STD_WSTRING
-
-// These overloaded versions handle ::string and ::wstring.
-#if GTEST_HAS_GLOBAL_STRING
-inline String FormatForFailureMessage(const ::string& str) {
- return (Message() << '"' << str << '"').GetString();
-}
-#endif // GTEST_HAS_GLOBAL_STRING
-
-#if GTEST_HAS_GLOBAL_WSTRING
-inline String FormatForFailureMessage(const ::wstring& wstr) {
- return (Message() << "L\"" << wstr << '"').GetString();
-}
-#endif // GTEST_HAS_GLOBAL_WSTRING
-
-// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc)
-// operand to be used in a failure message. The type (but not value)
-// of the other operand may affect the format. This allows us to
-// print a char* as a raw pointer when it is compared against another
-// char*, and print it as a C string when it is compared against an
-// std::string object, for example.
-//
-// The default implementation ignores the type of the other operand.
-// Some specialized versions are used to handle formatting wide or
-// narrow C strings.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-template <typename T1, typename T2>
-String FormatForComparisonFailureMessage(const T1& value,
- const T2& /* other_operand */) {
- return FormatForFailureMessage(value);
-}
-
-// The helper function for {ASSERT|EXPECT}_EQ.
-template <typename T1, typename T2>
-AssertionResult CmpHelperEQ(const char* expected_expression,
- const char* actual_expression,
- const T1& expected,
- const T2& actual) {
- if (expected == actual) {
- return AssertionSuccess();
- }
-
- return EqFailure(expected_expression,
- actual_expression,
- FormatForComparisonFailureMessage(expected, actual),
- FormatForComparisonFailureMessage(actual, expected),
- false);
-}
-
-// With this overloaded version, we allow anonymous enums to be used
-// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums
-// can be implicitly cast to BiggestInt.
-AssertionResult CmpHelperEQ(const char* expected_expression,
- const char* actual_expression,
- BiggestInt expected,
- BiggestInt actual);
-
-// The helper class for {ASSERT|EXPECT}_EQ. The template argument
-// lhs_is_null_literal is true iff the first argument to ASSERT_EQ()
-// is a null pointer literal. The following default implementation is
-// for lhs_is_null_literal being false.
-template <bool lhs_is_null_literal>
-class EqHelper {
- public:
- // This templatized version is for the general case.
- template <typename T1, typename T2>
- static AssertionResult Compare(const char* expected_expression,
- const char* actual_expression,
- const T1& expected,
- const T2& actual) {
- return CmpHelperEQ(expected_expression, actual_expression, expected,
- actual);
- }
-
- // With this overloaded version, we allow anonymous enums to be used
- // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous
- // enums can be implicitly cast to BiggestInt.
- //
- // Even though its body looks the same as the above version, we
- // cannot merge the two, as it will make anonymous enums unhappy.
- static AssertionResult Compare(const char* expected_expression,
- const char* actual_expression,
- BiggestInt expected,
- BiggestInt actual) {
- return CmpHelperEQ(expected_expression, actual_expression, expected,
- actual);
- }
-};
-
-// This specialization is used when the first argument to ASSERT_EQ()
-// is a null pointer literal.
-template <>
-class EqHelper<true> {
- public:
- // We define two overloaded versions of Compare(). The first
- // version will be picked when the second argument to ASSERT_EQ() is
- // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or
- // EXPECT_EQ(false, a_bool).
- template <typename T1, typename T2>
- static AssertionResult Compare(const char* expected_expression,
- const char* actual_expression,
- const T1& expected,
- const T2& actual) {
- return CmpHelperEQ(expected_expression, actual_expression, expected,
- actual);
- }
-
- // This version will be picked when the second argument to
- // ASSERT_EQ() is a pointer, e.g. ASSERT_EQ(NULL, a_pointer).
- template <typename T1, typename T2>
- static AssertionResult Compare(const char* expected_expression,
- const char* actual_expression,
- const T1& expected,
- T2* actual) {
- // We already know that 'expected' is a null pointer.
- return CmpHelperEQ(expected_expression, actual_expression,
- static_cast<T2*>(NULL), actual);
- }
-};
-
-// A macro for implementing the helper functions needed to implement
-// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste
-// of similar code.
-//
-// For each templatized helper function, we also define an overloaded
-// version for BiggestInt in order to reduce code bloat and allow
-// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled
-// with gcc 4.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
-template <typename T1, typename T2>\
-AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
- const T1& val1, const T2& val2) {\
- if (val1 op val2) {\
- return AssertionSuccess();\
- } else {\
- Message msg;\
- msg << "Expected: (" << expr1 << ") " #op " (" << expr2\
- << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
- << " vs " << FormatForComparisonFailureMessage(val2, val1);\
- return AssertionFailure(msg);\
- }\
-}\
-AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
- BiggestInt val1, BiggestInt val2);
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-
-// Implements the helper function for {ASSERT|EXPECT}_NE
-GTEST_IMPL_CMP_HELPER_(NE, !=)
-// Implements the helper function for {ASSERT|EXPECT}_LE
-GTEST_IMPL_CMP_HELPER_(LE, <=)
-// Implements the helper function for {ASSERT|EXPECT}_LT
-GTEST_IMPL_CMP_HELPER_(LT, < )
-// Implements the helper function for {ASSERT|EXPECT}_GE
-GTEST_IMPL_CMP_HELPER_(GE, >=)
-// Implements the helper function for {ASSERT|EXPECT}_GT
-GTEST_IMPL_CMP_HELPER_(GT, > )
-
-#undef GTEST_IMPL_CMP_HELPER_
-
-// The helper function for {ASSERT|EXPECT}_STREQ.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTREQ(const char* expected_expression,
- const char* actual_expression,
- const char* expected,
- const char* actual);
-
-// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
- const char* actual_expression,
- const char* expected,
- const char* actual);
-
-// The helper function for {ASSERT|EXPECT}_STRNE.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTRNE(const char* s1_expression,
- const char* s2_expression,
- const char* s1,
- const char* s2);
-
-// The helper function for {ASSERT|EXPECT}_STRCASENE.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
- const char* s2_expression,
- const char* s1,
- const char* s2);
-
-
-// Helper function for *_STREQ on wide strings.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTREQ(const char* expected_expression,
- const char* actual_expression,
- const wchar_t* expected,
- const wchar_t* actual);
-
-// Helper function for *_STRNE on wide strings.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTRNE(const char* s1_expression,
- const char* s2_expression,
- const wchar_t* s1,
- const wchar_t* s2);
-
-} // namespace internal
-
-// IsSubstring() and IsNotSubstring() are intended to be used as the
-// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by
-// themselves. They check whether needle is a substring of haystack
-// (NULL is considered a substring of itself only), and return an
-// appropriate error message when they fail.
-//
-// The {needle,haystack}_expr arguments are the stringified
-// expressions that generated the two real arguments.
-AssertionResult IsSubstring(
- const char* needle_expr, const char* haystack_expr,
- const char* needle, const char* haystack);
-AssertionResult IsSubstring(
- const char* needle_expr, const char* haystack_expr,
- const wchar_t* needle, const wchar_t* haystack);
-AssertionResult IsNotSubstring(
- const char* needle_expr, const char* haystack_expr,
- const char* needle, const char* haystack);
-AssertionResult IsNotSubstring(
- const char* needle_expr, const char* haystack_expr,
- const wchar_t* needle, const wchar_t* haystack);
-#if GTEST_HAS_STD_STRING
-AssertionResult IsSubstring(
- const char* needle_expr, const char* haystack_expr,
- const ::std::string& needle, const ::std::string& haystack);
-AssertionResult IsNotSubstring(
- const char* needle_expr, const char* haystack_expr,
- const ::std::string& needle, const ::std::string& haystack);
-#endif // GTEST_HAS_STD_STRING
-
-#if GTEST_HAS_STD_WSTRING
-AssertionResult IsSubstring(
- const char* needle_expr, const char* haystack_expr,
- const ::std::wstring& needle, const ::std::wstring& haystack);
-AssertionResult IsNotSubstring(
- const char* needle_expr, const char* haystack_expr,
- const ::std::wstring& needle, const ::std::wstring& haystack);
-#endif // GTEST_HAS_STD_WSTRING
-
-namespace internal {
-
-// Helper template function for comparing floating-points.
-//
-// Template parameter:
-//
-// RawType: the raw floating-point type (either float or double)
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-template <typename RawType>
-AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression,
- const char* actual_expression,
- RawType expected,
- RawType actual) {
- const FloatingPoint<RawType> lhs(expected), rhs(actual);
-
- if (lhs.AlmostEquals(rhs)) {
- return AssertionSuccess();
- }
-
- StrStream expected_ss;
- expected_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
- << expected;
-
- StrStream actual_ss;
- actual_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
- << actual;
-
- return EqFailure(expected_expression,
- actual_expression,
- StrStreamToString(&expected_ss),
- StrStreamToString(&actual_ss),
- false);
-}
-
-// Helper function for implementing ASSERT_NEAR.
-//
-// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult DoubleNearPredFormat(const char* expr1,
- const char* expr2,
- const char* abs_error_expr,
- double val1,
- double val2,
- double abs_error);
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-// A class that enables one to stream messages to assertion macros
-class AssertHelper {
- public:
- // Constructor.
- AssertHelper(TestPartResultType type, const char* file, int line,
- const char* message);
- // Message assignment is a semantic trick to enable assertion
- // streaming; see the GTEST_MESSAGE_ macro below.
- void operator=(const Message& message) const;
- private:
- TestPartResultType const type_;
- const char* const file_;
- int const line_;
- String const message_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);
-};
-
-} // namespace internal
-
-#ifdef GTEST_HAS_PARAM_TEST
-// The abstract base class that all value-parameterized tests inherit from.
-//
-// This class adds support for accessing the test parameter value via
-// the GetParam() method.
-//
-// Use it with one of the parameter generator defining functions, like Range(),
-// Values(), ValuesIn(), Bool(), and Combine().
-//
-// class FooTest : public ::testing::TestWithParam<int> {
-// protected:
-// FooTest() {
-// // Can use GetParam() here.
-// }
-// virtual ~FooTest() {
-// // Can use GetParam() here.
-// }
-// virtual void SetUp() {
-// // Can use GetParam() here.
-// }
-// virtual void TearDown {
-// // Can use GetParam() here.
-// }
-// };
-// TEST_P(FooTest, DoesBar) {
-// // Can use GetParam() method here.
-// Foo foo;
-// ASSERT_TRUE(foo.DoesBar(GetParam()));
-// }
-// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10));
-
-template <typename T>
-class TestWithParam : public Test {
- public:
- typedef T ParamType;
-
- // The current parameter value. Is also available in the test fixture's
- // constructor.
- const ParamType& GetParam() const { return *parameter_; }
-
- private:
- // Sets parameter value. The caller is responsible for making sure the value
- // remains alive and unchanged throughout the current test.
- static void SetParam(const ParamType* parameter) {
- parameter_ = parameter;
- }
-
- // Static value used for accessing parameter during a test lifetime.
- static const ParamType* parameter_;
-
- // TestClass must be a subclass of TestWithParam<T>.
- template <class TestClass> friend class internal::ParameterizedTestFactory;
-};
-
-template <typename T>
-const T* TestWithParam<T>::parameter_ = NULL;
-
-#endif // GTEST_HAS_PARAM_TEST
-
-// Macros for indicating success/failure in test code.
-
-// ADD_FAILURE unconditionally adds a failure to the current test.
-// SUCCEED generates a success - it doesn't automatically make the
-// current test successful, as a test is only successful when it has
-// no failure.
-//
-// EXPECT_* verifies that a certain condition is satisfied. If not,
-// it behaves like ADD_FAILURE. In particular:
-//
-// EXPECT_TRUE verifies that a Boolean condition is true.
-// EXPECT_FALSE verifies that a Boolean condition is false.
-//
-// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except
-// that they will also abort the current function on failure. People
-// usually want the fail-fast behavior of FAIL and ASSERT_*, but those
-// writing data-driven tests often find themselves using ADD_FAILURE
-// and EXPECT_* more.
-//
-// Examples:
-//
-// EXPECT_TRUE(server.StatusIsOK());
-// ASSERT_FALSE(server.HasPendingRequest(port))
-// << "There are still pending requests " << "on port " << port;
-
-// Generates a nonfatal failure with a generic message.
-#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed")
-
-// Generates a fatal failure with a generic message.
-#define FAIL() GTEST_FATAL_FAILURE_("Failed")
-
-// Generates a success with a generic message.
-#define SUCCEED() GTEST_SUCCESS_("Succeeded")
-
-// Macros for testing exceptions.
-//
-// * {ASSERT|EXPECT}_THROW(statement, expected_exception):
-// Tests that the statement throws the expected exception.
-// * {ASSERT|EXPECT}_NO_THROW(statement):
-// Tests that the statement doesn't throw any exception.
-// * {ASSERT|EXPECT}_ANY_THROW(statement):
-// Tests that the statement throws an exception.
-
-#define EXPECT_THROW(statement, expected_exception) \
- GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_NO_THROW(statement) \
- GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_ANY_THROW(statement) \
- GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_THROW(statement, expected_exception) \
- GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_)
-#define ASSERT_NO_THROW(statement) \
- GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
-#define ASSERT_ANY_THROW(statement) \
- GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)
-
-// Boolean assertions.
-#define EXPECT_TRUE(condition) \
- GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
- GTEST_NONFATAL_FAILURE_)
-#define EXPECT_FALSE(condition) \
- GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
- GTEST_NONFATAL_FAILURE_)
-#define ASSERT_TRUE(condition) \
- GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
- GTEST_FATAL_FAILURE_)
-#define ASSERT_FALSE(condition) \
- GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
- GTEST_FATAL_FAILURE_)
-
-// Includes the auto-generated header that implements a family of
-// generic predicate assertion macros.
-#include <gtest/gtest_pred_impl.h>
-
-// Macros for testing equalities and inequalities.
-//
-// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual
-// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2
-// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2
-// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2
-// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2
-// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2
-//
-// When they are not, Google Test prints both the tested expressions and
-// their actual values. The values must be compatible built-in types,
-// or you will get a compiler error. By "compatible" we mean that the
-// values can be compared by the respective operator.
-//
-// Note:
-//
-// 1. It is possible to make a user-defined type work with
-// {ASSERT|EXPECT}_??(), but that requires overloading the
-// comparison operators and is thus discouraged by the Google C++
-// Usage Guide. Therefore, you are advised to use the
-// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are
-// equal.
-//
-// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on
-// pointers (in particular, C strings). Therefore, if you use it
-// with two C strings, you are testing how their locations in memory
-// are related, not how their content is related. To compare two C
-// strings by content, use {ASSERT|EXPECT}_STR*().
-//
-// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to
-// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you
-// what the actual value is when it fails, and similarly for the
-// other comparisons.
-//
-// 4. Do not depend on the order in which {ASSERT|EXPECT}_??()
-// evaluate their arguments, which is undefined.
-//
-// 5. These macros evaluate their arguments exactly once.
-//
-// Examples:
-//
-// EXPECT_NE(5, Foo());
-// EXPECT_EQ(NULL, a_pointer);
-// ASSERT_LT(i, array_size);
-// ASSERT_GT(records.size(), 0) << "There is no record left.";
-
-#define EXPECT_EQ(expected, actual) \
- EXPECT_PRED_FORMAT2(::testing::internal:: \
- EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
- expected, actual)
-#define EXPECT_NE(expected, actual) \
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual)
-#define EXPECT_LE(val1, val2) \
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
-#define EXPECT_LT(val1, val2) \
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
-#define EXPECT_GE(val1, val2) \
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
-#define EXPECT_GT(val1, val2) \
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
-
-#define ASSERT_EQ(expected, actual) \
- ASSERT_PRED_FORMAT2(::testing::internal:: \
- EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
- expected, actual)
-#define ASSERT_NE(val1, val2) \
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
-#define ASSERT_LE(val1, val2) \
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
-#define ASSERT_LT(val1, val2) \
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
-#define ASSERT_GE(val1, val2) \
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
-#define ASSERT_GT(val1, val2) \
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
-
-// C String Comparisons. All tests treat NULL and any non-NULL string
-// as different. Two NULLs are equal.
-//
-// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2
-// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2
-// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case
-// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case
-//
-// For wide or narrow string objects, you can use the
-// {ASSERT|EXPECT}_??() macros.
-//
-// Don't depend on the order in which the arguments are evaluated,
-// which is undefined.
-//
-// These macros evaluate their arguments exactly once.
-
-#define EXPECT_STREQ(expected, actual) \
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
-#define EXPECT_STRNE(s1, s2) \
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
-#define EXPECT_STRCASEEQ(expected, actual) \
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
-#define EXPECT_STRCASENE(s1, s2)\
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
-
-#define ASSERT_STREQ(expected, actual) \
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
-#define ASSERT_STRNE(s1, s2) \
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
-#define ASSERT_STRCASEEQ(expected, actual) \
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
-#define ASSERT_STRCASENE(s1, s2)\
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
-
-// Macros for comparing floating-point numbers.
-//
-// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual):
-// Tests that two float values are almost equal.
-// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual):
-// Tests that two double values are almost equal.
-// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error):
-// Tests that v1 and v2 are within the given distance to each other.
-//
-// Google Test uses ULP-based comparison to automatically pick a default
-// error bound that is appropriate for the operands. See the
-// FloatingPoint template class in gtest-internal.h if you are
-// interested in the implementation details.
-
-#define EXPECT_FLOAT_EQ(expected, actual)\
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
- expected, actual)
-
-#define EXPECT_DOUBLE_EQ(expected, actual)\
- EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
- expected, actual)
-
-#define ASSERT_FLOAT_EQ(expected, actual)\
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
- expected, actual)
-
-#define ASSERT_DOUBLE_EQ(expected, actual)\
- ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
- expected, actual)
-
-#define EXPECT_NEAR(val1, val2, abs_error)\
- EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
- val1, val2, abs_error)
-
-#define ASSERT_NEAR(val1, val2, abs_error)\
- ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
- val1, val2, abs_error)
-
-// These predicate format functions work on floating-point values, and
-// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g.
-//
-// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0);
-
-// Asserts that val1 is less than, or almost equal to, val2. Fails
-// otherwise. In particular, it fails if either val1 or val2 is NaN.
-AssertionResult FloatLE(const char* expr1, const char* expr2,
- float val1, float val2);
-AssertionResult DoubleLE(const char* expr1, const char* expr2,
- double val1, double val2);
-
-
-#ifdef GTEST_OS_WINDOWS
-
-// Macros that test for HRESULT failure and success, these are only useful
-// on Windows, and rely on Windows SDK macros and APIs to compile.
-//
-// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr)
-//
-// When expr unexpectedly fails or succeeds, Google Test prints the
-// expected result and the actual result with both a human-readable
-// string representation of the error, if available, as well as the
-// hex result code.
-#define EXPECT_HRESULT_SUCCEEDED(expr) \
- EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
-
-#define ASSERT_HRESULT_SUCCEEDED(expr) \
- ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
-
-#define EXPECT_HRESULT_FAILED(expr) \
- EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
-
-#define ASSERT_HRESULT_FAILED(expr) \
- ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
-
-#endif // GTEST_OS_WINDOWS
-
-// Macros that execute statement and check that it doesn't generate new fatal
-// failures in the current thread.
-//
-// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement);
-//
-// Examples:
-//
-// EXPECT_NO_FATAL_FAILURE(Process());
-// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed";
-//
-#define ASSERT_NO_FATAL_FAILURE(statement) \
- GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_)
-#define EXPECT_NO_FATAL_FAILURE(statement) \
- GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_)
-
-// Causes a trace (including the source file path, the current line
-// number, and the given message) to be included in every test failure
-// message generated by code in the current scope. The effect is
-// undone when the control leaves the current scope.
-//
-// The message argument can be anything streamable to std::ostream.
-//
-// In the implementation, we include the current line number as part
-// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s
-// to appear in the same block - as long as they are on different
-// lines.
-#define SCOPED_TRACE(message) \
- ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\
- __FILE__, __LINE__, ::testing::Message() << (message))
-
-
-// Defines a test.
-//
-// The first parameter is the name of the test case, and the second
-// parameter is the name of the test within the test case.
-//
-// The convention is to end the test case name with "Test". For
-// example, a test case for the Foo class can be named FooTest.
-//
-// The user should put his test code between braces after using this
-// macro. Example:
-//
-// TEST(FooTest, InitializesCorrectly) {
-// Foo foo;
-// EXPECT_TRUE(foo.StatusIsOK());
-// }
-
-// Note that we call GetTestTypeId() instead of GetTypeId<
-// ::testing::Test>() here to get the type ID of testing::Test. This
-// is to work around a suspected linker bug when using Google Test as
-// a framework on Mac OS X. The bug causes GetTypeId<
-// ::testing::Test>() to return different values depending on whether
-// the call is from the Google Test framework itself or from user test
-// code. GetTestTypeId() is guaranteed to always return the same
-// value, as it always calls GetTypeId<>() from the Google Test
-// framework.
-#define TEST(test_case_name, test_name)\
- GTEST_TEST_(test_case_name, test_name,\
- ::testing::Test, ::testing::internal::GetTestTypeId())
-
-
-// Defines a test that uses a test fixture.
-//
-// The first parameter is the name of the test fixture class, which
-// also doubles as the test case name. The second parameter is the
-// name of the test within the test case.
-//
-// A test fixture class must be declared earlier. The user should put
-// his test code between braces after using this macro. Example:
-//
-// class FooTest : public testing::Test {
-// protected:
-// virtual void SetUp() { b_.AddElement(3); }
-//
-// Foo a_;
-// Foo b_;
-// };
-//
-// TEST_F(FooTest, InitializesCorrectly) {
-// EXPECT_TRUE(a_.StatusIsOK());
-// }
-//
-// TEST_F(FooTest, ReturnsElementCountCorrectly) {
-// EXPECT_EQ(0, a_.size());
-// EXPECT_EQ(1, b_.size());
-// }
-
-#define TEST_F(test_fixture, test_name)\
- GTEST_TEST_(test_fixture, test_name, test_fixture,\
- ::testing::internal::GetTypeId<test_fixture>())
-
-// Use this macro in main() to run all tests. It returns 0 if all
-// tests are successful, or 1 otherwise.
-//
-// RUN_ALL_TESTS() should be invoked after the command line has been
-// parsed by InitGoogleTest().
-
-#define RUN_ALL_TESTS()\
- (::testing::UnitTest::GetInstance()->Run())
-
-} // namespace testing
-
-#endif // GTEST_INCLUDE_GTEST_GTEST_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest_pred_impl.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest_pred_impl.h
deleted file mode 100644
index e1e2f8c..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest_pred_impl.h
+++ /dev/null
@@ -1,368 +0,0 @@
-// Copyright 2006, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is AUTOMATICALLY GENERATED on 10/02/2008 by command
-// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND!
-//
-// Implements a family of generic predicate assertion macros.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
-#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
-
-// Makes sure this header is not included before gtest.h.
-#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
-#error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
-#endif // GTEST_INCLUDE_GTEST_GTEST_H_
-
-// This header implements a family of generic predicate assertion
-// macros:
-//
-// ASSERT_PRED_FORMAT1(pred_format, v1)
-// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
-// ...
-//
-// where pred_format is a function or functor that takes n (in the
-// case of ASSERT_PRED_FORMATn) values and their source expression
-// text, and returns a testing::AssertionResult. See the definition
-// of ASSERT_EQ in gtest.h for an example.
-//
-// If you don't care about formatting, you can use the more
-// restrictive version:
-//
-// ASSERT_PRED1(pred, v1)
-// ASSERT_PRED2(pred, v1, v2)
-// ...
-//
-// where pred is an n-ary function or functor that returns bool,
-// and the values v1, v2, ..., must support the << operator for
-// streaming to std::ostream.
-//
-// We also define the EXPECT_* variations.
-//
-// For now we only support predicates whose arity is at most 5.
-// Please email googletestframework at googlegroups.com if you need
-// support for higher arities.
-
-// GTEST_ASSERT_ is the basic statement to which all of the assertions
-// in this file reduce. Don't use this in your code.
-
-#define GTEST_ASSERT_(expression, on_failure) \
- GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (const ::testing::AssertionResult gtest_ar = (expression)) \
- ; \
- else \
- on_failure(gtest_ar.failure_message())
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use
-// this in your code.
-template <typename Pred,
- typename T1>
-AssertionResult AssertPred1Helper(const char* pred_text,
- const char* e1,
- Pred pred,
- const T1& v1) {
- if (pred(v1)) return AssertionSuccess();
-
- Message msg;
- msg << pred_text << "("
- << e1 << ") evaluates to false, where"
- << "\n" << e1 << " evaluates to " << v1;
- return AssertionFailure(msg);
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\
- GTEST_ASSERT_(pred_format(#v1, v1),\
- on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use
-// this in your code.
-#define GTEST_PRED1_(pred, v1, on_failure)\
- GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
- #v1, \
- pred, \
- v1), on_failure)
-
-// Unary predicate assertion macros.
-#define EXPECT_PRED_FORMAT1(pred_format, v1) \
- GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED1(pred, v1) \
- GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT1(pred_format, v1) \
- GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED1(pred, v1) \
- GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
-
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use
-// this in your code.
-template <typename Pred,
- typename T1,
- typename T2>
-AssertionResult AssertPred2Helper(const char* pred_text,
- const char* e1,
- const char* e2,
- Pred pred,
- const T1& v1,
- const T2& v2) {
- if (pred(v1, v2)) return AssertionSuccess();
-
- Message msg;
- msg << pred_text << "("
- << e1 << ", "
- << e2 << ") evaluates to false, where"
- << "\n" << e1 << " evaluates to " << v1
- << "\n" << e2 << " evaluates to " << v2;
- return AssertionFailure(msg);
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\
- GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2),\
- on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use
-// this in your code.
-#define GTEST_PRED2_(pred, v1, v2, on_failure)\
- GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \
- #v1, \
- #v2, \
- pred, \
- v1, \
- v2), on_failure)
-
-// Binary predicate assertion macros.
-#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
- GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED2(pred, v1, v2) \
- GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
- GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED2(pred, v1, v2) \
- GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
-
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use
-// this in your code.
-template <typename Pred,
- typename T1,
- typename T2,
- typename T3>
-AssertionResult AssertPred3Helper(const char* pred_text,
- const char* e1,
- const char* e2,
- const char* e3,
- Pred pred,
- const T1& v1,
- const T2& v2,
- const T3& v3) {
- if (pred(v1, v2, v3)) return AssertionSuccess();
-
- Message msg;
- msg << pred_text << "("
- << e1 << ", "
- << e2 << ", "
- << e3 << ") evaluates to false, where"
- << "\n" << e1 << " evaluates to " << v1
- << "\n" << e2 << " evaluates to " << v2
- << "\n" << e3 << " evaluates to " << v3;
- return AssertionFailure(msg);
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\
- GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3),\
- on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use
-// this in your code.
-#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\
- GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \
- #v1, \
- #v2, \
- #v3, \
- pred, \
- v1, \
- v2, \
- v3), on_failure)
-
-// Ternary predicate assertion macros.
-#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
- GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED3(pred, v1, v2, v3) \
- GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
- GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED3(pred, v1, v2, v3) \
- GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
-
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use
-// this in your code.
-template <typename Pred,
- typename T1,
- typename T2,
- typename T3,
- typename T4>
-AssertionResult AssertPred4Helper(const char* pred_text,
- const char* e1,
- const char* e2,
- const char* e3,
- const char* e4,
- Pred pred,
- const T1& v1,
- const T2& v2,
- const T3& v3,
- const T4& v4) {
- if (pred(v1, v2, v3, v4)) return AssertionSuccess();
-
- Message msg;
- msg << pred_text << "("
- << e1 << ", "
- << e2 << ", "
- << e3 << ", "
- << e4 << ") evaluates to false, where"
- << "\n" << e1 << " evaluates to " << v1
- << "\n" << e2 << " evaluates to " << v2
- << "\n" << e3 << " evaluates to " << v3
- << "\n" << e4 << " evaluates to " << v4;
- return AssertionFailure(msg);
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\
- GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4),\
- on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use
-// this in your code.
-#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\
- GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \
- #v1, \
- #v2, \
- #v3, \
- #v4, \
- pred, \
- v1, \
- v2, \
- v3, \
- v4), on_failure)
-
-// 4-ary predicate assertion macros.
-#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
- GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
- GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
- GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
- GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
-
-
-
-// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use
-// this in your code.
-template <typename Pred,
- typename T1,
- typename T2,
- typename T3,
- typename T4,
- typename T5>
-AssertionResult AssertPred5Helper(const char* pred_text,
- const char* e1,
- const char* e2,
- const char* e3,
- const char* e4,
- const char* e5,
- Pred pred,
- const T1& v1,
- const T2& v2,
- const T3& v3,
- const T4& v4,
- const T5& v5) {
- if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
-
- Message msg;
- msg << pred_text << "("
- << e1 << ", "
- << e2 << ", "
- << e3 << ", "
- << e4 << ", "
- << e5 << ") evaluates to false, where"
- << "\n" << e1 << " evaluates to " << v1
- << "\n" << e2 << " evaluates to " << v2
- << "\n" << e3 << " evaluates to " << v3
- << "\n" << e4 << " evaluates to " << v4
- << "\n" << e5 << " evaluates to " << v5;
- return AssertionFailure(msg);
-}
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
-// Don't use this in your code.
-#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\
- GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5),\
- on_failure)
-
-// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use
-// this in your code.
-#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\
- GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \
- #v1, \
- #v2, \
- #v3, \
- #v4, \
- #v5, \
- pred, \
- v1, \
- v2, \
- v3, \
- v4, \
- v5), on_failure)
-
-// 5-ary predicate assertion macros.
-#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
- GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
-#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
- GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
-#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
- GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
-#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
- GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
-
-
-
-#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest_prod.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest_prod.h
deleted file mode 100644
index da80ddc..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/gtest_prod.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2006, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-//
-// Google C++ Testing Framework definitions useful in production code.
-
-#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_
-#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_
-
-// When you need to test the private or protected members of a class,
-// use the FRIEND_TEST macro to declare your tests as friends of the
-// class. For example:
-//
-// class MyClass {
-// private:
-// void MyMethod();
-// FRIEND_TEST(MyClassTest, MyMethod);
-// };
-//
-// class MyClassTest : public testing::Test {
-// // ...
-// };
-//
-// TEST_F(MyClassTest, MyMethod) {
-// // Can call MyClass::MyMethod() here.
-// }
-
-#define FRIEND_TEST(test_case_name, test_name)\
-friend class test_case_name##_##test_name##_Test
-
-#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-death-test-internal.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-death-test-internal.h
deleted file mode 100644
index 0769fca..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-death-test-internal.h
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: wan at google.com (Zhanyong Wan), eefacm at gmail.com (Sean Mcafee)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file defines internal utilities needed for implementing
-// death tests. They are subject to change without notice.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
-
-#include <gtest/internal/gtest-internal.h>
-
-namespace testing {
-namespace internal {
-
-GTEST_DECLARE_string_(internal_run_death_test);
-
-// Names of the flags (needed for parsing Google Test flags).
-const char kDeathTestStyleFlag[] = "death_test_style";
-const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
-
-#ifdef GTEST_HAS_DEATH_TEST
-
-// DeathTest is a class that hides much of the complexity of the
-// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method
-// returns a concrete class that depends on the prevailing death test
-// style, as defined by the --gtest_death_test_style and/or
-// --gtest_internal_run_death_test flags.
-
-// In describing the results of death tests, these terms are used with
-// the corresponding definitions:
-//
-// exit status: The integer exit information in the format specified
-// by wait(2)
-// exit code: The integer code passed to exit(3), _exit(2), or
-// returned from main()
-class DeathTest {
- public:
- // Create returns false if there was an error determining the
- // appropriate action to take for the current death test; for example,
- // if the gtest_death_test_style flag is set to an invalid value.
- // The LastMessage method will return a more detailed message in that
- // case. Otherwise, the DeathTest pointer pointed to by the "test"
- // argument is set. If the death test should be skipped, the pointer
- // is set to NULL; otherwise, it is set to the address of a new concrete
- // DeathTest object that controls the execution of the current test.
- static bool Create(const char* statement, const RE* regex,
- const char* file, int line, DeathTest** test);
- DeathTest();
- virtual ~DeathTest() { }
-
- // A helper class that aborts a death test when it's deleted.
- class ReturnSentinel {
- public:
- explicit ReturnSentinel(DeathTest* test) : test_(test) { }
- ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); }
- private:
- DeathTest* const test_;
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
- } GTEST_ATTRIBUTE_UNUSED_;
-
- // An enumeration of possible roles that may be taken when a death
- // test is encountered. EXECUTE means that the death test logic should
- // be executed immediately. OVERSEE means that the program should prepare
- // the appropriate environment for a child process to execute the death
- // test, then wait for it to complete.
- enum TestRole { OVERSEE_TEST, EXECUTE_TEST };
-
- // An enumeration of the two reasons that a test might be aborted.
- enum AbortReason { TEST_ENCOUNTERED_RETURN_STATEMENT, TEST_DID_NOT_DIE };
-
- // Assumes one of the above roles.
- virtual TestRole AssumeRole() = 0;
-
- // Waits for the death test to finish and returns its status.
- virtual int Wait() = 0;
-
- // Returns true if the death test passed; that is, the test process
- // exited during the test, its exit status matches a user-supplied
- // predicate, and its stderr output matches a user-supplied regular
- // expression.
- // The user-supplied predicate may be a macro expression rather
- // than a function pointer or functor, or else Wait and Passed could
- // be combined.
- virtual bool Passed(bool exit_status_ok) = 0;
-
- // Signals that the death test did not die as expected.
- virtual void Abort(AbortReason reason) = 0;
-
- // Returns a human-readable outcome message regarding the outcome of
- // the last death test.
- static const char* LastMessage();
-
- private:
- GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
-};
-
-// Factory interface for death tests. May be mocked out for testing.
-class DeathTestFactory {
- public:
- virtual ~DeathTestFactory() { }
- virtual bool Create(const char* statement, const RE* regex,
- const char* file, int line, DeathTest** test) = 0;
-};
-
-// A concrete DeathTestFactory implementation for normal use.
-class DefaultDeathTestFactory : public DeathTestFactory {
- public:
- virtual bool Create(const char* statement, const RE* regex,
- const char* file, int line, DeathTest** test);
-};
-
-// Returns true if exit_status describes a process that was terminated
-// by a signal, or exited normally with a nonzero exit code.
-bool ExitedUnsuccessfully(int exit_status);
-
-// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
-// ASSERT_EXIT*, and EXPECT_EXIT*.
-#define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \
- GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (true) { \
- const ::testing::internal::RE& gtest_regex = (regex); \
- ::testing::internal::DeathTest* gtest_dt; \
- if (!::testing::internal::DeathTest::Create(#statement, >est_regex, \
- __FILE__, __LINE__, >est_dt)) { \
- goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
- } \
- if (gtest_dt != NULL) { \
- ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \
- gtest_dt_ptr(gtest_dt); \
- switch (gtest_dt->AssumeRole()) { \
- case ::testing::internal::DeathTest::OVERSEE_TEST: \
- if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \
- goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
- } \
- break; \
- case ::testing::internal::DeathTest::EXECUTE_TEST: { \
- ::testing::internal::DeathTest::ReturnSentinel \
- gtest_sentinel(gtest_dt); \
- { statement; } \
- gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
- break; \
- } \
- } \
- } \
- } else \
- GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \
- fail(::testing::internal::DeathTest::LastMessage())
-// The symbol "fail" here expands to something into which a message
-// can be streamed.
-
-// A struct representing the parsed contents of the
-// --gtest_internal_run_death_test flag, as it existed when
-// RUN_ALL_TESTS was called.
-struct InternalRunDeathTestFlag {
- String file;
- int line;
- int index;
- int status_fd;
-};
-
-// Returns a newly created InternalRunDeathTestFlag object with fields
-// initialized from the GTEST_FLAG(internal_run_death_test) flag if
-// the flag is specified; otherwise returns NULL.
-InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();
-
-#endif // GTEST_HAS_DEATH_TEST
-
-} // namespace internal
-} // namespace testing
-
-#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h
deleted file mode 100644
index 9a0682a..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2008, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keith.ray at gmail.com (Keith Ray)
-//
-// Google Test filepath utilities
-//
-// This header file declares classes and functions used internally by
-// Google Test. They are subject to change without notice.
-//
-// This file is #included in testing/base/internal/gtest-internal.h
-// Do not include this header file separately!
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
-
-#include <gtest/internal/gtest-string.h>
-
-namespace testing {
-namespace internal {
-
-// FilePath - a class for file and directory pathname manipulation which
-// handles platform-specific conventions (like the pathname separator).
-// Used for helper functions for naming files in a directory for xml output.
-// Except for Set methods, all methods are const or static, which provides an
-// "immutable value object" -- useful for peace of mind.
-// A FilePath with a value ending in a path separator ("like/this/") represents
-// a directory, otherwise it is assumed to represent a file. In either case,
-// it may or may not represent an actual file or directory in the file system.
-// Names are NOT checked for syntax correctness -- no checking for illegal
-// characters, malformed paths, etc.
-
-class FilePath {
- public:
- FilePath() : pathname_("") { }
- FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }
-
- explicit FilePath(const char* pathname) : pathname_(pathname) {
- Normalize();
- }
-
- explicit FilePath(const String& pathname) : pathname_(pathname) {
- Normalize();
- }
-
- FilePath& operator=(const FilePath& rhs) {
- Set(rhs);
- return *this;
- }
-
- void Set(const FilePath& rhs) {
- pathname_ = rhs.pathname_;
- }
-
- String ToString() const { return pathname_; }
- const char* c_str() const { return pathname_.c_str(); }
-
- // Returns the current working directory, or "" if unsuccessful.
- static FilePath GetCurrentDir();
-
- // Given directory = "dir", base_name = "test", number = 0,
- // extension = "xml", returns "dir/test.xml". If number is greater
- // than zero (e.g., 12), returns "dir/test_12.xml".
- // On Windows platform, uses \ as the separator rather than /.
- static FilePath MakeFileName(const FilePath& directory,
- const FilePath& base_name,
- int number,
- const char* extension);
-
- // Returns a pathname for a file that does not currently exist. The pathname
- // will be directory/base_name.extension or
- // directory/base_name_<number>.extension if directory/base_name.extension
- // already exists. The number will be incremented until a pathname is found
- // that does not already exist.
- // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
- // There could be a race condition if two or more processes are calling this
- // function at the same time -- they could both pick the same filename.
- static FilePath GenerateUniqueFileName(const FilePath& directory,
- const FilePath& base_name,
- const char* extension);
-
- // Returns true iff the path is NULL or "".
- bool IsEmpty() const { return c_str() == NULL || *c_str() == '\0'; }
-
- // If input name has a trailing separator character, removes it and returns
- // the name, otherwise return the name string unmodified.
- // On Windows platform, uses \ as the separator, other platforms use /.
- FilePath RemoveTrailingPathSeparator() const;
-
- // Returns a copy of the FilePath with the directory part removed.
- // Example: FilePath("path/to/file").RemoveDirectoryName() returns
- // FilePath("file"). If there is no directory part ("just_a_file"), it returns
- // the FilePath unmodified. If there is no file part ("just_a_dir/") it
- // returns an empty FilePath ("").
- // On Windows platform, '\' is the path separator, otherwise it is '/'.
- FilePath RemoveDirectoryName() const;
-
- // RemoveFileName returns the directory path with the filename removed.
- // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
- // If the FilePath is "a_file" or "/a_file", RemoveFileName returns
- // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
- // not have a file, like "just/a/dir/", it returns the FilePath unmodified.
- // On Windows platform, '\' is the path separator, otherwise it is '/'.
- FilePath RemoveFileName() const;
-
- // Returns a copy of the FilePath with the case-insensitive extension removed.
- // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
- // FilePath("dir/file"). If a case-insensitive extension is not
- // found, returns a copy of the original FilePath.
- FilePath RemoveExtension(const char* extension) const;
-
- // Creates directories so that path exists. Returns true if successful or if
- // the directories already exist; returns false if unable to create
- // directories for any reason. Will also return false if the FilePath does
- // not represent a directory (that is, it doesn't end with a path separator).
- bool CreateDirectoriesRecursively() const;
-
- // Create the directory so that path exists. Returns true if successful or
- // if the directory already exists; returns false if unable to create the
- // directory for any reason, including if the parent directory does not
- // exist. Not named "CreateDirectory" because that's a macro on Windows.
- bool CreateFolder() const;
-
- // Returns true if FilePath describes something in the file-system,
- // either a file, directory, or whatever, and that something exists.
- bool FileOrDirectoryExists() const;
-
- // Returns true if pathname describes a directory in the file-system
- // that exists.
- bool DirectoryExists() const;
-
- // Returns true if FilePath ends with a path separator, which indicates that
- // it is intended to represent a directory. Returns false otherwise.
- // This does NOT check that a directory (or file) actually exists.
- bool IsDirectory() const;
-
- // Returns true if pathname describes a root directory. (Windows has one
- // root directory per disk drive.)
- bool IsRootDirectory() const;
-
- private:
- // Replaces multiple consecutive separators with a single separator.
- // For example, "bar///foo" becomes "bar/foo". Does not eliminate other
- // redundancies that might be in a pathname involving "." or "..".
- //
- // A pathname with multiple consecutive separators may occur either through
- // user error or as a result of some scripts or APIs that generate a pathname
- // with a trailing separator. On other platforms the same API or script
- // may NOT generate a pathname with a trailing "/". Then elsewhere that
- // pathname may have another "/" and pathname components added to it,
- // without checking for the separator already being there.
- // The script language and operating system may allow paths like "foo//bar"
- // but some of the functions in FilePath will not handle that correctly. In
- // particular, RemoveTrailingPathSeparator() only removes one separator, and
- // it is called in CreateDirectoriesRecursively() assuming that it will change
- // a pathname from directory syntax (trailing separator) to filename syntax.
-
- void Normalize();
-
- String pathname_;
-}; // class FilePath
-
-} // namespace internal
-} // namespace testing
-
-#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal-inl.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal-inl.h
deleted file mode 100644
index b8f67c1..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal-inl.h
+++ /dev/null
@@ -1,1267 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Utility functions and classes used by the Google C++ testing framework.
-//
-// Author: wan at google.com (Zhanyong Wan)
-//
-// This file contains purely Google Test's internal implementation. Please
-// DO NOT #INCLUDE IT IN A USER PROGRAM.
-
-#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_
-#define GTEST_SRC_GTEST_INTERNAL_INL_H_
-
-// GTEST_IMPLEMENTATION is defined iff the current translation unit is
-// part of Google Test's implementation.
-#ifndef GTEST_IMPLEMENTATION
-// A user is trying to include this from his code - just say no.
-#error "gtest-internal-inl.h is part of Google Test's internal implementation."
-#error "It must not be included except by Google Test itself."
-#endif // GTEST_IMPLEMENTATION
-
-#include <stddef.h>
-
-#include <gtest/internal/gtest-port.h>
-
-#ifdef GTEST_OS_WINDOWS
-#include <windows.h> // NOLINT
-#endif // GTEST_OS_WINDOWS
-
-#include <gtest/gtest.h>
-#include <gtest/gtest-spi.h>
-
-namespace testing {
-
-// Declares the flags.
-//
-// We don't want the users to modify these flags in the code, but want
-// Google Test's own unit tests to be able to access them. Therefore we
-// declare them here as opposed to in gtest.h.
-GTEST_DECLARE_bool_(break_on_failure);
-GTEST_DECLARE_bool_(catch_exceptions);
-GTEST_DECLARE_string_(color);
-GTEST_DECLARE_string_(filter);
-GTEST_DECLARE_bool_(list_tests);
-GTEST_DECLARE_string_(output);
-GTEST_DECLARE_bool_(print_time);
-GTEST_DECLARE_int32_(repeat);
-GTEST_DECLARE_int32_(stack_trace_depth);
-GTEST_DECLARE_bool_(show_internal_stack_frames);
-
-namespace internal {
-
-// The value of GetTestTypeId() as seen from within the Google Test
-// library. This is solely for testing GetTestTypeId().
-extern const TypeId kTestTypeIdInGoogleTest;
-
-// Names of the flags (needed for parsing Google Test flags).
-const char kBreakOnFailureFlag[] = "break_on_failure";
-const char kCatchExceptionsFlag[] = "catch_exceptions";
-const char kColorFlag[] = "color";
-const char kFilterFlag[] = "filter";
-const char kListTestsFlag[] = "list_tests";
-const char kOutputFlag[] = "output";
-const char kPrintTimeFlag[] = "print_time";
-const char kRepeatFlag[] = "repeat";
-
-// This class saves the values of all Google Test flags in its c'tor, and
-// restores them in its d'tor.
-class GTestFlagSaver {
- public:
- // The c'tor.
- GTestFlagSaver() {
- break_on_failure_ = GTEST_FLAG(break_on_failure);
- catch_exceptions_ = GTEST_FLAG(catch_exceptions);
- color_ = GTEST_FLAG(color);
- death_test_style_ = GTEST_FLAG(death_test_style);
- filter_ = GTEST_FLAG(filter);
- internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
- list_tests_ = GTEST_FLAG(list_tests);
- output_ = GTEST_FLAG(output);
- print_time_ = GTEST_FLAG(print_time);
- repeat_ = GTEST_FLAG(repeat);
- }
-
- // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS.
- ~GTestFlagSaver() {
- GTEST_FLAG(break_on_failure) = break_on_failure_;
- GTEST_FLAG(catch_exceptions) = catch_exceptions_;
- GTEST_FLAG(color) = color_;
- GTEST_FLAG(death_test_style) = death_test_style_;
- GTEST_FLAG(filter) = filter_;
- GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
- GTEST_FLAG(list_tests) = list_tests_;
- GTEST_FLAG(output) = output_;
- GTEST_FLAG(print_time) = print_time_;
- GTEST_FLAG(repeat) = repeat_;
- }
- private:
- // Fields for saving the original values of flags.
- bool break_on_failure_;
- bool catch_exceptions_;
- String color_;
- String death_test_style_;
- String filter_;
- String internal_run_death_test_;
- bool list_tests_;
- String output_;
- bool print_time_;
- bool pretty_;
- internal::Int32 repeat_;
-} GTEST_ATTRIBUTE_UNUSED_;
-
-// Converts a Unicode code point to a narrow string in UTF-8 encoding.
-// code_point parameter is of type UInt32 because wchar_t may not be
-// wide enough to contain a code point.
-// The output buffer str must containt at least 32 characters.
-// The function returns the address of the output buffer.
-// If the code_point is not a valid Unicode code point
-// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output
-// as '(Invalid Unicode 0xXXXXXXXX)'.
-char* CodePointToUtf8(UInt32 code_point, char* str);
-
-// Converts a wide string to a narrow string in UTF-8 encoding.
-// The wide string is assumed to have the following encoding:
-// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
-// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
-// Parameter str points to a null-terminated wide string.
-// Parameter num_chars may additionally limit the number
-// of wchar_t characters processed. -1 is used when the entire string
-// should be processed.
-// If the string contains code points that are not valid Unicode code points
-// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
-// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
-// and contains invalid UTF-16 surrogate pairs, values in those pairs
-// will be encoded as individual Unicode characters from Basic Normal Plane.
-String WideStringToUtf8(const wchar_t* str, int num_chars);
-
-// Returns the number of active threads, or 0 when there is an error.
-size_t GetThreadCount();
-
-// List is a simple singly-linked list container.
-//
-// We cannot use std::list as Microsoft's implementation of STL has
-// problems when exception is disabled. There is a hack to work
-// around this, but we've seen cases where the hack fails to work.
-//
-// TODO(wan): switch to std::list when we have a reliable fix for the
-// STL problem, e.g. when we upgrade to the next version of Visual
-// C++, or (more likely) switch to STLport.
-//
-// The element type must support copy constructor.
-
-// Forward declare List
-template <typename E> // E is the element type.
-class List;
-
-// ListNode is a node in a singly-linked list. It consists of an
-// element and a pointer to the next node. The last node in the list
-// has a NULL value for its next pointer.
-template <typename E> // E is the element type.
-class ListNode {
- friend class List<E>;
-
- private:
-
- E element_;
- ListNode * next_;
-
- // The c'tor is private s.t. only in the ListNode class and in its
- // friend class List we can create a ListNode object.
- //
- // Creates a node with a given element value. The next pointer is
- // set to NULL.
- //
- // ListNode does NOT have a default constructor. Always use this
- // constructor (with parameter) to create a ListNode object.
- explicit ListNode(const E & element) : element_(element), next_(NULL) {}
-
- // We disallow copying ListNode
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ListNode);
-
- public:
-
- // Gets the element in this node.
- E & element() { return element_; }
- const E & element() const { return element_; }
-
- // Gets the next node in the list.
- ListNode * next() { return next_; }
- const ListNode * next() const { return next_; }
-};
-
-
-// List is a simple singly-linked list container.
-template <typename E> // E is the element type.
-class List {
- public:
-
- // Creates an empty list.
- List() : head_(NULL), last_(NULL), size_(0) {}
-
- // D'tor.
- virtual ~List();
-
- // Clears the list.
- void Clear() {
- if ( size_ > 0 ) {
- // 1. Deletes every node.
- ListNode<E> * node = head_;
- ListNode<E> * next = node->next();
- for ( ; ; ) {
- delete node;
- node = next;
- if ( node == NULL ) break;
- next = node->next();
- }
-
- // 2. Resets the member variables.
- head_ = last_ = NULL;
- size_ = 0;
- }
- }
-
- // Gets the number of elements.
- int size() const { return size_; }
-
- // Returns true if the list is empty.
- bool IsEmpty() const { return size() == 0; }
-
- // Gets the first element of the list, or NULL if the list is empty.
- ListNode<E> * Head() { return head_; }
- const ListNode<E> * Head() const { return head_; }
-
- // Gets the last element of the list, or NULL if the list is empty.
- ListNode<E> * Last() { return last_; }
- const ListNode<E> * Last() const { return last_; }
-
- // Adds an element to the end of the list. A copy of the element is
- // created using the copy constructor, and then stored in the list.
- // Changes made to the element in the list doesn't affect the source
- // object, and vice versa.
- void PushBack(const E & element) {
- ListNode<E> * new_node = new ListNode<E>(element);
-
- if ( size_ == 0 ) {
- head_ = last_ = new_node;
- size_ = 1;
- } else {
- last_->next_ = new_node;
- last_ = new_node;
- size_++;
- }
- }
-
- // Adds an element to the beginning of this list.
- void PushFront(const E& element) {
- ListNode<E>* const new_node = new ListNode<E>(element);
-
- if ( size_ == 0 ) {
- head_ = last_ = new_node;
- size_ = 1;
- } else {
- new_node->next_ = head_;
- head_ = new_node;
- size_++;
- }
- }
-
- // Removes an element from the beginning of this list. If the
- // result argument is not NULL, the removed element is stored in the
- // memory it points to. Otherwise the element is thrown away.
- // Returns true iff the list wasn't empty before the operation.
- bool PopFront(E* result) {
- if (size_ == 0) return false;
-
- if (result != NULL) {
- *result = head_->element_;
- }
-
- ListNode<E>* const old_head = head_;
- size_--;
- if (size_ == 0) {
- head_ = last_ = NULL;
- } else {
- head_ = head_->next_;
- }
- delete old_head;
-
- return true;
- }
-
- // Inserts an element after a given node in the list. It's the
- // caller's responsibility to ensure that the given node is in the
- // list. If the given node is NULL, inserts the element at the
- // front of the list.
- ListNode<E>* InsertAfter(ListNode<E>* node, const E& element) {
- if (node == NULL) {
- PushFront(element);
- return Head();
- }
-
- ListNode<E>* const new_node = new ListNode<E>(element);
- new_node->next_ = node->next_;
- node->next_ = new_node;
- size_++;
- if (node == last_) {
- last_ = new_node;
- }
-
- return new_node;
- }
-
- // Returns the number of elements that satisfy a given predicate.
- // The parameter 'predicate' is a Boolean function or functor that
- // accepts a 'const E &', where E is the element type.
- template <typename P> // P is the type of the predicate function/functor
- int CountIf(P predicate) const {
- int count = 0;
- for ( const ListNode<E> * node = Head();
- node != NULL;
- node = node->next() ) {
- if ( predicate(node->element()) ) {
- count++;
- }
- }
-
- return count;
- }
-
- // Applies a function/functor to each element in the list. The
- // parameter 'functor' is a function/functor that accepts a 'const
- // E &', where E is the element type. This method does not change
- // the elements.
- template <typename F> // F is the type of the function/functor
- void ForEach(F functor) const {
- for ( const ListNode<E> * node = Head();
- node != NULL;
- node = node->next() ) {
- functor(node->element());
- }
- }
-
- // Returns the first node whose element satisfies a given predicate,
- // or NULL if none is found. The parameter 'predicate' is a
- // function/functor that accepts a 'const E &', where E is the
- // element type. This method does not change the elements.
- template <typename P> // P is the type of the predicate function/functor.
- const ListNode<E> * FindIf(P predicate) const {
- for ( const ListNode<E> * node = Head();
- node != NULL;
- node = node->next() ) {
- if ( predicate(node->element()) ) {
- return node;
- }
- }
-
- return NULL;
- }
-
- template <typename P>
- ListNode<E> * FindIf(P predicate) {
- for ( ListNode<E> * node = Head();
- node != NULL;
- node = node->next() ) {
- if ( predicate(node->element() ) ) {
- return node;
- }
- }
-
- return NULL;
- }
-
- private:
- ListNode<E>* head_; // The first node of the list.
- ListNode<E>* last_; // The last node of the list.
- int size_; // The number of elements in the list.
-
- // We disallow copying List.
- GTEST_DISALLOW_COPY_AND_ASSIGN_(List);
-};
-
-// The virtual destructor of List.
-template <typename E>
-List<E>::~List() {
- Clear();
-}
-
-// A function for deleting an object. Handy for being used as a
-// functor.
-template <typename T>
-static void Delete(T * x) {
- delete x;
-}
-
-// A copyable object representing a user specified test property which can be
-// output as a key/value string pair.
-//
-// Don't inherit from TestProperty as its destructor is not virtual.
-class TestProperty {
- public:
- // C'tor. TestProperty does NOT have a default constructor.
- // Always use this constructor (with parameters) to create a
- // TestProperty object.
- TestProperty(const char* key, const char* value) :
- key_(key), value_(value) {
- }
-
- // Gets the user supplied key.
- const char* key() const {
- return key_.c_str();
- }
-
- // Gets the user supplied value.
- const char* value() const {
- return value_.c_str();
- }
-
- // Sets a new value, overriding the one supplied in the constructor.
- void SetValue(const char* new_value) {
- value_ = new_value;
- }
-
- private:
- // The key supplied by the user.
- String key_;
- // The value supplied by the user.
- String value_;
-};
-
-// A predicate that checks the key of a TestProperty against a known key.
-//
-// TestPropertyKeyIs is copyable.
-class TestPropertyKeyIs {
- public:
- // Constructor.
- //
- // TestPropertyKeyIs has NO default constructor.
- explicit TestPropertyKeyIs(const char* key)
- : key_(key) {}
-
- // Returns true iff the test name of test property matches on key_.
- bool operator()(const TestProperty& test_property) const {
- return String(test_property.key()).Compare(key_) == 0;
- }
-
- private:
- String key_;
-};
-
-// The result of a single Test. This includes a list of
-// TestPartResults, a list of TestProperties, a count of how many
-// death tests there are in the Test, and how much time it took to run
-// the Test.
-//
-// TestResult is not copyable.
-class TestResult {
- public:
- // Creates an empty TestResult.
- TestResult();
-
- // D'tor. Do not inherit from TestResult.
- ~TestResult();
-
- // Gets the list of TestPartResults.
- const internal::List<TestPartResult> & test_part_results() const {
- return test_part_results_;
- }
-
- // Gets the list of TestProperties.
- const internal::List<internal::TestProperty> & test_properties() const {
- return test_properties_;
- }
-
- // Gets the number of successful test parts.
- int successful_part_count() const;
-
- // Gets the number of failed test parts.
- int failed_part_count() const;
-
- // Gets the number of all test parts. This is the sum of the number
- // of successful test parts and the number of failed test parts.
- int total_part_count() const;
-
- // Returns true iff the test passed (i.e. no test part failed).
- bool Passed() const { return !Failed(); }
-
- // Returns true iff the test failed.
- bool Failed() const { return failed_part_count() > 0; }
-
- // Returns true iff the test fatally failed.
- bool HasFatalFailure() const;
-
- // Returns the elapsed time, in milliseconds.
- TimeInMillis elapsed_time() const { return elapsed_time_; }
-
- // Sets the elapsed time.
- void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
-
- // Adds a test part result to the list.
- void AddTestPartResult(const TestPartResult& test_part_result);
-
- // Adds a test property to the list. The property is validated and may add
- // a non-fatal failure if invalid (e.g., if it conflicts with reserved
- // key names). If a property is already recorded for the same key, the
- // value will be updated, rather than storing multiple values for the same
- // key.
- void RecordProperty(const internal::TestProperty& test_property);
-
- // Adds a failure if the key is a reserved attribute of Google Test
- // testcase tags. Returns true if the property is valid.
- // TODO(russr): Validate attribute names are legal and human readable.
- static bool ValidateTestProperty(const internal::TestProperty& test_property);
-
- // Returns the death test count.
- int death_test_count() const { return death_test_count_; }
-
- // Increments the death test count, returning the new count.
- int increment_death_test_count() { return ++death_test_count_; }
-
- // Clears the object.
- void Clear();
- private:
- // Protects mutable state of the property list and of owned properties, whose
- // values may be updated.
- internal::Mutex test_properites_mutex_;
-
- // The list of TestPartResults
- internal::List<TestPartResult> test_part_results_;
- // The list of TestProperties
- internal::List<internal::TestProperty> test_properties_;
- // Running count of death tests.
- int death_test_count_;
- // The elapsed time, in milliseconds.
- TimeInMillis elapsed_time_;
-
- // We disallow copying TestResult.
- GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
-}; // class TestResult
-
-class TestInfoImpl {
- public:
- TestInfoImpl(TestInfo* parent, const char* test_case_name,
- const char* name, const char* test_case_comment,
- const char* comment, TypeId fixture_class_id,
- internal::TestFactoryBase* factory);
- ~TestInfoImpl();
-
- // Returns true if this test should run.
- bool should_run() const { return should_run_; }
-
- // Sets the should_run member.
- void set_should_run(bool should) { should_run_ = should; }
-
- // Returns true if this test is disabled. Disabled tests are not run.
- bool is_disabled() const { return is_disabled_; }
-
- // Sets the is_disabled member.
- void set_is_disabled(bool is) { is_disabled_ = is; }
-
- // Returns the test case name.
- const char* test_case_name() const { return test_case_name_.c_str(); }
-
- // Returns the test name.
- const char* name() const { return name_.c_str(); }
-
- // Returns the test case comment.
- const char* test_case_comment() const { return test_case_comment_.c_str(); }
-
- // Returns the test comment.
- const char* comment() const { return comment_.c_str(); }
-
- // Returns the ID of the test fixture class.
- TypeId fixture_class_id() const { return fixture_class_id_; }
-
- // Returns the test result.
- internal::TestResult* result() { return &result_; }
- const internal::TestResult* result() const { return &result_; }
-
- // Creates the test object, runs it, records its result, and then
- // deletes it.
- void Run();
-
- // Calls the given TestInfo object's Run() method.
- static void RunTest(TestInfo * test_info) {
- test_info->impl()->Run();
- }
-
- // Clears the test result.
- void ClearResult() { result_.Clear(); }
-
- // Clears the test result in the given TestInfo object.
- static void ClearTestResult(TestInfo * test_info) {
- test_info->impl()->ClearResult();
- }
-
- private:
- // These fields are immutable properties of the test.
- TestInfo* const parent_; // The owner of this object
- const String test_case_name_; // Test case name
- const String name_; // Test name
- const String test_case_comment_; // Test case comment
- const String comment_; // Test comment
- const TypeId fixture_class_id_; // ID of the test fixture class
- bool should_run_; // True iff this test should run
- bool is_disabled_; // True iff this test is disabled
- internal::TestFactoryBase* const factory_; // The factory that creates
- // the test object
-
- // This field is mutable and needs to be reset before running the
- // test for the second time.
- internal::TestResult result_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfoImpl);
-};
-
-} // namespace internal
-
-// A test case, which consists of a list of TestInfos.
-//
-// TestCase is not copyable.
-class TestCase {
- public:
- // Creates a TestCase with the given name.
- //
- // TestCase does NOT have a default constructor. Always use this
- // constructor to create a TestCase object.
- //
- // Arguments:
- //
- // name: name of the test case
- // set_up_tc: pointer to the function that sets up the test case
- // tear_down_tc: pointer to the function that tears down the test case
- TestCase(const char* name, const char* comment,
- Test::SetUpTestCaseFunc set_up_tc,
- Test::TearDownTestCaseFunc tear_down_tc);
-
- // Destructor of TestCase.
- virtual ~TestCase();
-
- // Gets the name of the TestCase.
- const char* name() const { return name_.c_str(); }
-
- // Returns the test case comment.
- const char* comment() const { return comment_.c_str(); }
-
- // Returns true if any test in this test case should run.
- bool should_run() const { return should_run_; }
-
- // Sets the should_run member.
- void set_should_run(bool should) { should_run_ = should; }
-
- // Gets the (mutable) list of TestInfos in this TestCase.
- internal::List<TestInfo*>& test_info_list() { return *test_info_list_; }
-
- // Gets the (immutable) list of TestInfos in this TestCase.
- const internal::List<TestInfo *> & test_info_list() const {
- return *test_info_list_;
- }
-
- // Gets the number of successful tests in this test case.
- int successful_test_count() const;
-
- // Gets the number of failed tests in this test case.
- int failed_test_count() const;
-
- // Gets the number of disabled tests in this test case.
- int disabled_test_count() const;
-
- // Get the number of tests in this test case that should run.
- int test_to_run_count() const;
-
- // Gets the number of all tests in this test case.
- int total_test_count() const;
-
- // Returns true iff the test case passed.
- bool Passed() const { return !Failed(); }
-
- // Returns true iff the test case failed.
- bool Failed() const { return failed_test_count() > 0; }
-
- // Returns the elapsed time, in milliseconds.
- internal::TimeInMillis elapsed_time() const { return elapsed_time_; }
-
- // Adds a TestInfo to this test case. Will delete the TestInfo upon
- // destruction of the TestCase object.
- void AddTestInfo(TestInfo * test_info);
-
- // Finds and returns a TestInfo with the given name. If one doesn't
- // exist, returns NULL.
- TestInfo* GetTestInfo(const char* test_name);
-
- // Clears the results of all tests in this test case.
- void ClearResult();
-
- // Clears the results of all tests in the given test case.
- static void ClearTestCaseResult(TestCase* test_case) {
- test_case->ClearResult();
- }
-
- // Runs every test in this TestCase.
- void Run();
-
- // Runs every test in the given TestCase.
- static void RunTestCase(TestCase * test_case) { test_case->Run(); }
-
- // Returns true iff test passed.
- static bool TestPassed(const TestInfo * test_info) {
- const internal::TestInfoImpl* const impl = test_info->impl();
- return impl->should_run() && impl->result()->Passed();
- }
-
- // Returns true iff test failed.
- static bool TestFailed(const TestInfo * test_info) {
- const internal::TestInfoImpl* const impl = test_info->impl();
- return impl->should_run() && impl->result()->Failed();
- }
-
- // Returns true iff test is disabled.
- static bool TestDisabled(const TestInfo * test_info) {
- return test_info->impl()->is_disabled();
- }
-
- // Returns true if the given test should run.
- static bool ShouldRunTest(const TestInfo *test_info) {
- return test_info->impl()->should_run();
- }
-
- private:
- // Name of the test case.
- internal::String name_;
- // Comment on the test case.
- internal::String comment_;
- // List of TestInfos.
- internal::List<TestInfo*>* test_info_list_;
- // Pointer to the function that sets up the test case.
- Test::SetUpTestCaseFunc set_up_tc_;
- // Pointer to the function that tears down the test case.
- Test::TearDownTestCaseFunc tear_down_tc_;
- // True iff any test in this test case should run.
- bool should_run_;
- // Elapsed time, in milliseconds.
- internal::TimeInMillis elapsed_time_;
-
- // We disallow copying TestCases.
- GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);
-};
-
-namespace internal {
-
-// Class UnitTestOptions.
-//
-// This class contains functions for processing options the user
-// specifies when running the tests. It has only static members.
-//
-// In most cases, the user can specify an option using either an
-// environment variable or a command line flag. E.g. you can set the
-// test filter using either GTEST_FILTER or --gtest_filter. If both
-// the variable and the flag are present, the latter overrides the
-// former.
-class UnitTestOptions {
- public:
- // Functions for processing the gtest_output flag.
-
- // Returns the output format, or "" for normal printed output.
- static String GetOutputFormat();
-
- // Returns the name of the requested output file, or the default if none
- // was explicitly specified.
- static String GetOutputFile();
-
- // Functions for processing the gtest_filter flag.
-
- // Returns true iff the wildcard pattern matches the string. The
- // first ':' or '\0' character in pattern marks the end of it.
- //
- // This recursive algorithm isn't very efficient, but is clear and
- // works well enough for matching test names, which are short.
- static bool PatternMatchesString(const char *pattern, const char *str);
-
- // Returns true iff the user-specified filter matches the test case
- // name and the test name.
- static bool FilterMatchesTest(const String &test_case_name,
- const String &test_name);
-
-#ifdef GTEST_OS_WINDOWS
- // Function for supporting the gtest_catch_exception flag.
-
- // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
- // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
- // This function is useful as an __except condition.
- static int GTestShouldProcessSEH(DWORD exception_code);
-#endif // GTEST_OS_WINDOWS
-
- // Returns true if "name" matches the ':' separated list of glob-style
- // filters in "filter".
- static bool MatchesFilter(const String& name, const char* filter);
-};
-
-// Returns the current application's name, removing directory path if that
-// is present. Used by UnitTestOptions::GetOutputFile.
-FilePath GetCurrentExecutableName();
-
-// The role interface for getting the OS stack trace as a string.
-class OsStackTraceGetterInterface {
- public:
- OsStackTraceGetterInterface() {}
- virtual ~OsStackTraceGetterInterface() {}
-
- // Returns the current OS stack trace as a String. Parameters:
- //
- // max_depth - the maximum number of stack frames to be included
- // in the trace.
- // skip_count - the number of top frames to be skipped; doesn't count
- // against max_depth.
- virtual String CurrentStackTrace(int max_depth, int skip_count) = 0;
-
- // UponLeavingGTest() should be called immediately before Google Test calls
- // user code. It saves some information about the current stack that
- // CurrentStackTrace() will use to find and hide Google Test stack frames.
- virtual void UponLeavingGTest() = 0;
-
- private:
- GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface);
-};
-
-// A working implementation of the OsStackTraceGetterInterface interface.
-class OsStackTraceGetter : public OsStackTraceGetterInterface {
- public:
- OsStackTraceGetter() {}
- virtual String CurrentStackTrace(int max_depth, int skip_count);
- virtual void UponLeavingGTest();
-
- // This string is inserted in place of stack frames that are part of
- // Google Test's implementation.
- static const char* const kElidedFramesMarker;
-
- private:
- Mutex mutex_; // protects all internal state
-
- // We save the stack frame below the frame that calls user code.
- // We do this because the address of the frame immediately below
- // the user code changes between the call to UponLeavingGTest()
- // and any calls to CurrentStackTrace() from within the user code.
- void* caller_frame_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter);
-};
-
-// Information about a Google Test trace point.
-struct TraceInfo {
- const char* file;
- int line;
- String message;
-};
-
-// This is the default global test part result reporter used in UnitTestImpl.
-// This class should only be used by UnitTestImpl.
-class DefaultGlobalTestPartResultReporter
- : public TestPartResultReporterInterface {
- public:
- explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test);
- // Implements the TestPartResultReporterInterface. Reports the test part
- // result in the current test.
- virtual void ReportTestPartResult(const TestPartResult& result);
-
- private:
- UnitTestImpl* const unit_test_;
-};
-
-// This is the default per thread test part result reporter used in
-// UnitTestImpl. This class should only be used by UnitTestImpl.
-class DefaultPerThreadTestPartResultReporter
- : public TestPartResultReporterInterface {
- public:
- explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test);
- // Implements the TestPartResultReporterInterface. The implementation just
- // delegates to the current global test part result reporter of *unit_test_.
- virtual void ReportTestPartResult(const TestPartResult& result);
-
- private:
- UnitTestImpl* const unit_test_;
-};
-
-// The private implementation of the UnitTest class. We don't protect
-// the methods under a mutex, as this class is not accessible by a
-// user and the UnitTest class that delegates work to this class does
-// proper locking.
-class UnitTestImpl {
- public:
- explicit UnitTestImpl(UnitTest* parent);
- virtual ~UnitTestImpl();
-
- // There are two different ways to register your own TestPartResultReporter.
- // You can register your own repoter to listen either only for test results
- // from the current thread or for results from all threads.
- // By default, each per-thread test result repoter just passes a new
- // TestPartResult to the global test result reporter, which registers the
- // test part result for the currently running test.
-
- // Returns the global test part result reporter.
- TestPartResultReporterInterface* GetGlobalTestPartResultReporter();
-
- // Sets the global test part result reporter.
- void SetGlobalTestPartResultReporter(
- TestPartResultReporterInterface* reporter);
-
- // Returns the test part result reporter for the current thread.
- TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread();
-
- // Sets the test part result reporter for the current thread.
- void SetTestPartResultReporterForCurrentThread(
- TestPartResultReporterInterface* reporter);
-
- // Gets the number of successful test cases.
- int successful_test_case_count() const;
-
- // Gets the number of failed test cases.
- int failed_test_case_count() const;
-
- // Gets the number of all test cases.
- int total_test_case_count() const;
-
- // Gets the number of all test cases that contain at least one test
- // that should run.
- int test_case_to_run_count() const;
-
- // Gets the number of successful tests.
- int successful_test_count() const;
-
- // Gets the number of failed tests.
- int failed_test_count() const;
-
- // Gets the number of disabled tests.
- int disabled_test_count() const;
-
- // Gets the number of all tests.
- int total_test_count() const;
-
- // Gets the number of tests that should run.
- int test_to_run_count() const;
-
- // Gets the elapsed time, in milliseconds.
- TimeInMillis elapsed_time() const { return elapsed_time_; }
-
- // Returns true iff the unit test passed (i.e. all test cases passed).
- bool Passed() const { return !Failed(); }
-
- // Returns true iff the unit test failed (i.e. some test case failed
- // or something outside of all tests failed).
- bool Failed() const {
- return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed();
- }
-
- // Returns the TestResult for the test that's currently running, or
- // the TestResult for the ad hoc test if no test is running.
- internal::TestResult* current_test_result();
-
- // Returns the TestResult for the ad hoc test.
- const internal::TestResult* ad_hoc_test_result() const {
- return &ad_hoc_test_result_;
- }
-
- // Sets the unit test result printer.
- //
- // Does nothing if the input and the current printer object are the
- // same; otherwise, deletes the old printer object and makes the
- // input the current printer.
- void set_result_printer(UnitTestEventListenerInterface * result_printer);
-
- // Returns the current unit test result printer if it is not NULL;
- // otherwise, creates an appropriate result printer, makes it the
- // current printer, and returns it.
- UnitTestEventListenerInterface* result_printer();
-
- // Sets the OS stack trace getter.
- //
- // Does nothing if the input and the current OS stack trace getter
- // are the same; otherwise, deletes the old getter and makes the
- // input the current getter.
- void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter);
-
- // Returns the current OS stack trace getter if it is not NULL;
- // otherwise, creates an OsStackTraceGetter, makes it the current
- // getter, and returns it.
- OsStackTraceGetterInterface* os_stack_trace_getter();
-
- // Returns the current OS stack trace as a String.
- //
- // The maximum number of stack frames to be included is specified by
- // the gtest_stack_trace_depth flag. The skip_count parameter
- // specifies the number of top frames to be skipped, which doesn't
- // count against the number of frames to be included.
- //
- // For example, if Foo() calls Bar(), which in turn calls
- // CurrentOsStackTraceExceptTop(1), Foo() will be included in the
- // trace but Bar() and CurrentOsStackTraceExceptTop() won't.
- String CurrentOsStackTraceExceptTop(int skip_count);
-
- // Finds and returns a TestCase with the given name. If one doesn't
- // exist, creates one and returns it.
- //
- // Arguments:
- //
- // test_case_name: name of the test case
- // set_up_tc: pointer to the function that sets up the test case
- // tear_down_tc: pointer to the function that tears down the test case
- TestCase* GetTestCase(const char* test_case_name,
- const char* comment,
- Test::SetUpTestCaseFunc set_up_tc,
- Test::TearDownTestCaseFunc tear_down_tc);
-
- // Adds a TestInfo to the unit test.
- //
- // Arguments:
- //
- // set_up_tc: pointer to the function that sets up the test case
- // tear_down_tc: pointer to the function that tears down the test case
- // test_info: the TestInfo object
- void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc,
- Test::TearDownTestCaseFunc tear_down_tc,
- TestInfo * test_info) {
- // In order to support thread-safe death tests, we need to
- // remember the original working directory when the test program
- // was first invoked. We cannot do this in RUN_ALL_TESTS(), as
- // the user may have changed the current directory before calling
- // RUN_ALL_TESTS(). Therefore we capture the current directory in
- // AddTestInfo(), which is called to register a TEST or TEST_F
- // before main() is reached.
- if (original_working_dir_.IsEmpty()) {
- original_working_dir_.Set(FilePath::GetCurrentDir());
- if (original_working_dir_.IsEmpty()) {
- printf("%s\n", "Failed to get the current working directory.");
- abort();
- }
- }
-
- GetTestCase(test_info->test_case_name(),
- test_info->test_case_comment(),
- set_up_tc,
- tear_down_tc)->AddTestInfo(test_info);
- }
-
-#ifdef GTEST_HAS_PARAM_TEST
- // Returns ParameterizedTestCaseRegistry object used to keep track of
- // value-parameterized tests and instantiate and register them.
- internal::ParameterizedTestCaseRegistry& parameterized_test_registry() {
- return parameterized_test_registry_;
- }
-#endif // GTEST_HAS_PARAM_TEST
-
- // Sets the TestCase object for the test that's currently running.
- void set_current_test_case(TestCase* current_test_case) {
- current_test_case_ = current_test_case;
- }
-
- // Sets the TestInfo object for the test that's currently running. If
- // current_test_info is NULL, the assertion results will be stored in
- // ad_hoc_test_result_.
- void set_current_test_info(TestInfo* current_test_info) {
- current_test_info_ = current_test_info;
- }
-
- // Registers all parameterized tests defined using TEST_P and
- // INSTANTIATE_TEST_P, creating regular tests for each test/parameter
- // combination. This method can be called more then once; it has
- // guards protecting from registering the tests more then once.
- // If value-parameterized tests are disabled, RegisterParameterizedTests
- // is present but does nothing.
- void RegisterParameterizedTests();
-
- // Runs all tests in this UnitTest object, prints the result, and
- // returns 0 if all tests are successful, or 1 otherwise. If any
- // exception is thrown during a test on Windows, this test is
- // considered to be failed, but the rest of the tests will still be
- // run. (We disable exceptions on Linux and Mac OS X, so the issue
- // doesn't apply there.)
- int RunAllTests();
-
- // Clears the results of all tests, including the ad hoc test.
- void ClearResult() {
- test_cases_.ForEach(TestCase::ClearTestCaseResult);
- ad_hoc_test_result_.Clear();
- }
-
- // Matches the full name of each test against the user-specified
- // filter to decide whether the test should run, then records the
- // result in each TestCase and TestInfo object.
- // Returns the number of tests that should run.
- int FilterTests();
-
- // Lists all the tests by name.
- void ListAllTests();
-
- const TestCase* current_test_case() const { return current_test_case_; }
- TestInfo* current_test_info() { return current_test_info_; }
- const TestInfo* current_test_info() const { return current_test_info_; }
-
- // Returns the list of environments that need to be set-up/torn-down
- // before/after the tests are run.
- internal::List<Environment*>* environments() { return &environments_; }
- internal::List<Environment*>* environments_in_reverse_order() {
- return &environments_in_reverse_order_;
- }
-
- internal::List<TestCase*>* test_cases() { return &test_cases_; }
- const internal::List<TestCase*>* test_cases() const { return &test_cases_; }
-
- // Getters for the per-thread Google Test trace stack.
- internal::List<TraceInfo>* gtest_trace_stack() {
- return gtest_trace_stack_.pointer();
- }
- const internal::List<TraceInfo>* gtest_trace_stack() const {
- return gtest_trace_stack_.pointer();
- }
-
-#ifdef GTEST_HAS_DEATH_TEST
- // Returns a pointer to the parsed --gtest_internal_run_death_test
- // flag, or NULL if that flag was not specified.
- // This information is useful only in a death test child process.
- const InternalRunDeathTestFlag* internal_run_death_test_flag() const {
- return internal_run_death_test_flag_.get();
- }
-
- // Returns a pointer to the current death test factory.
- internal::DeathTestFactory* death_test_factory() {
- return death_test_factory_.get();
- }
-
- friend class ReplaceDeathTestFactory;
-#endif // GTEST_HAS_DEATH_TEST
-
- private:
- friend class ::testing::UnitTest;
-
- // The UnitTest object that owns this implementation object.
- UnitTest* const parent_;
-
- // The working directory when the first TEST() or TEST_F() was
- // executed.
- internal::FilePath original_working_dir_;
-
- // The default test part result reporters.
- DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_;
- DefaultPerThreadTestPartResultReporter
- default_per_thread_test_part_result_reporter_;
-
- // Points to (but doesn't own) the global test part result reporter.
- TestPartResultReporterInterface* global_test_part_result_repoter_;
-
- // Protects read and write access to global_test_part_result_reporter_.
- internal::Mutex global_test_part_result_reporter_mutex_;
-
- // Points to (but doesn't own) the per-thread test part result reporter.
- internal::ThreadLocal<TestPartResultReporterInterface*>
- per_thread_test_part_result_reporter_;
-
- // The list of environments that need to be set-up/torn-down
- // before/after the tests are run. environments_in_reverse_order_
- // simply mirrors environments_ in reverse order.
- internal::List<Environment*> environments_;
- internal::List<Environment*> environments_in_reverse_order_;
-
- internal::List<TestCase*> test_cases_; // The list of TestCases.
-
-#ifdef GTEST_HAS_PARAM_TEST
- // ParameterizedTestRegistry object used to register value-parameterized
- // tests.
- internal::ParameterizedTestCaseRegistry parameterized_test_registry_;
-
- // Indicates whether RegisterParameterizedTests() has been called already.
- bool parameterized_tests_registered_;
-#endif // GTEST_HAS_PARAM_TEST
-
- // Points to the last death test case registered. Initially NULL.
- internal::ListNode<TestCase*>* last_death_test_case_;
-
- // This points to the TestCase for the currently running test. It
- // changes as Google Test goes through one test case after another.
- // When no test is running, this is set to NULL and Google Test
- // stores assertion results in ad_hoc_test_result_. Initally NULL.
- TestCase* current_test_case_;
-
- // This points to the TestInfo for the currently running test. It
- // changes as Google Test goes through one test after another. When
- // no test is running, this is set to NULL and Google Test stores
- // assertion results in ad_hoc_test_result_. Initially NULL.
- TestInfo* current_test_info_;
-
- // Normally, a user only writes assertions inside a TEST or TEST_F,
- // or inside a function called by a TEST or TEST_F. Since Google
- // Test keeps track of which test is current running, it can
- // associate such an assertion with the test it belongs to.
- //
- // If an assertion is encountered when no TEST or TEST_F is running,
- // Google Test attributes the assertion result to an imaginary "ad hoc"
- // test, and records the result in ad_hoc_test_result_.
- internal::TestResult ad_hoc_test_result_;
-
- // The unit test result printer. Will be deleted when the UnitTest
- // object is destructed. By default, a plain text printer is used,
- // but the user can set this field to use a custom printer if that
- // is desired.
- UnitTestEventListenerInterface* result_printer_;
-
- // The OS stack trace getter. Will be deleted when the UnitTest
- // object is destructed. By default, an OsStackTraceGetter is used,
- // but the user can set this field to use a custom getter if that is
- // desired.
- OsStackTraceGetterInterface* os_stack_trace_getter_;
-
- // How long the test took to run, in milliseconds.
- TimeInMillis elapsed_time_;
-
-#ifdef GTEST_HAS_DEATH_TEST
- // The decomposed components of the gtest_internal_run_death_test flag,
- // parsed when RUN_ALL_TESTS is called.
- internal::scoped_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;
- internal::scoped_ptr<internal::DeathTestFactory> death_test_factory_;
-#endif // GTEST_HAS_DEATH_TEST
-
- // A per-thread stack of traces created by the SCOPED_TRACE() macro.
- internal::ThreadLocal<internal::List<TraceInfo> > gtest_trace_stack_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);
-}; // class UnitTestImpl
-
-// Convenience function for accessing the global UnitTest
-// implementation object.
-inline UnitTestImpl* GetUnitTestImpl() {
- return UnitTest::GetInstance()->impl();
-}
-
-// Parses the command line for Google Test flags, without initializing
-// other parts of Google Test.
-void ParseGoogleTestFlagsOnly(int* argc, char** argv);
-void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
-
-} // namespace internal
-} // namespace testing
-
-#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal.h
deleted file mode 100644
index 242ffea..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal.h
+++ /dev/null
@@ -1,896 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: wan at google.com (Zhanyong Wan), eefacm at gmail.com (Sean Mcafee)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file declares functions and macros used internally by
-// Google Test. They are subject to change without notice.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
-
-#include <gtest/internal/gtest-port.h>
-
-#ifdef GTEST_OS_LINUX
-#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-#endif // GTEST_OS_LINUX
-
-#include <ctype.h>
-#include <string.h>
-#include <iomanip>
-#include <limits>
-#include <set>
-
-#include <gtest/internal/gtest-string.h>
-#include <gtest/internal/gtest-filepath.h>
-#include <gtest/internal/gtest-type-util.h>
-
-#include "llvm/Support/raw_os_ostream.h"
-
-// Due to C++ preprocessor weirdness, we need double indirection to
-// concatenate two tokens when one of them is __LINE__. Writing
-//
-// foo ## __LINE__
-//
-// will result in the token foo__LINE__, instead of foo followed by
-// the current line number. For more details, see
-// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6
-#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar)
-#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar
-
-// Google Test defines the testing::Message class to allow construction of
-// test messages via the << operator. The idea is that anything
-// streamable to std::ostream can be streamed to a testing::Message.
-// This allows a user to use his own types in Google Test assertions by
-// overloading the << operator.
-//
-// util/gtl/stl_logging-inl.h overloads << for STL containers. These
-// overloads cannot be defined in the std namespace, as that will be
-// undefined behavior. Therefore, they are defined in the global
-// namespace instead.
-//
-// C++'s symbol lookup rule (i.e. Koenig lookup) says that these
-// overloads are visible in either the std namespace or the global
-// namespace, but not other namespaces, including the testing
-// namespace which Google Test's Message class is in.
-//
-// To allow STL containers (and other types that has a << operator
-// defined in the global namespace) to be used in Google Test assertions,
-// testing::Message must access the custom << operator from the global
-// namespace. Hence this helper function.
-//
-// Note: Jeffrey Yasskin suggested an alternative fix by "using
-// ::operator<<;" in the definition of Message's operator<<. That fix
-// doesn't require a helper function, but unfortunately doesn't
-// compile with MSVC.
-
-// LLVM INTERNAL CHANGE: To allow operator<< to work with both
-// std::ostreams and LLVM's raw_ostreams, we define a special
-// std::ostream with an implicit conversion to raw_ostream& and stream
-// to that. This causes the compiler to prefer std::ostream overloads
-// but still find raw_ostream& overloads.
-namespace llvm {
-class convertible_fwd_ostream : public std::ostream {
- std::ostream& os_;
- raw_os_ostream ros_;
-
-public:
- convertible_fwd_ostream(std::ostream& os)
- : std::ostream(os.rdbuf()), os_(os), ros_(*this) {}
- operator raw_ostream&() { return ros_; }
-};
-}
-template <typename T>
-inline void GTestStreamToHelper(std::ostream* os, const T& val) {
- llvm::convertible_fwd_ostream cos(*os);
- cos << val;
-}
-
-namespace testing {
-
-// Forward declaration of classes.
-
-class Message; // Represents a failure message.
-class Test; // Represents a test.
-class TestCase; // A collection of related tests.
-class TestPartResult; // Result of a test part.
-class TestInfo; // Information about a test.
-class UnitTest; // A collection of test cases.
-class UnitTestEventListenerInterface; // Listens to Google Test events.
-class AssertionResult; // Result of an assertion.
-
-namespace internal {
-
-struct TraceInfo; // Information about a trace point.
-class ScopedTrace; // Implements scoped trace.
-class TestInfoImpl; // Opaque implementation of TestInfo
-class TestResult; // Result of a single Test.
-class UnitTestImpl; // Opaque implementation of UnitTest
-
-template <typename E> class List; // A generic list.
-template <typename E> class ListNode; // A node in a generic list.
-
-// How many times InitGoogleTest() has been called.
-extern int g_init_gtest_count;
-
-// The text used in failure messages to indicate the start of the
-// stack trace.
-extern const char kStackTraceMarker[];
-
-// A secret type that Google Test users don't know about. It has no
-// definition on purpose. Therefore it's impossible to create a
-// Secret object, which is what we want.
-class Secret;
-
-// Two overloaded helpers for checking at compile time whether an
-// expression is a null pointer literal (i.e. NULL or any 0-valued
-// compile-time integral constant). Their return values have
-// different sizes, so we can use sizeof() to test which version is
-// picked by the compiler. These helpers have no implementations, as
-// we only need their signatures.
-//
-// Given IsNullLiteralHelper(x), the compiler will pick the first
-// version if x can be implicitly converted to Secret*, and pick the
-// second version otherwise. Since Secret is a secret and incomplete
-// type, the only expression a user can write that has type Secret* is
-// a null pointer literal. Therefore, we know that x is a null
-// pointer literal if and only if the first version is picked by the
-// compiler.
-char IsNullLiteralHelper(Secret* p);
-char (&IsNullLiteralHelper(...))[2]; // NOLINT
-
-// A compile-time bool constant that is true if and only if x is a
-// null pointer literal (i.e. NULL or any 0-valued compile-time
-// integral constant).
-#ifdef GTEST_ELLIPSIS_NEEDS_COPY_
-// Passing non-POD classes through ellipsis (...) crashes the ARM
-// compiler. The Nokia Symbian and the IBM XL C/C++ compiler try to
-// instantiate a copy constructor for objects passed through ellipsis
-// (...), failing for uncopyable objects. Hence we define this to
-// false (and lose support for NULL detection).
-#define GTEST_IS_NULL_LITERAL_(x) false
-#else
-#define GTEST_IS_NULL_LITERAL_(x) \
- (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1)
-#endif // GTEST_ELLIPSIS_NEEDS_COPY_
-
-// Appends the user-supplied message to the Google-Test-generated message.
-String AppendUserMessage(const String& gtest_msg,
- const Message& user_msg);
-
-// A helper class for creating scoped traces in user programs.
-class ScopedTrace {
- public:
- // The c'tor pushes the given source file location and message onto
- // a trace stack maintained by Google Test.
- ScopedTrace(const char* file, int line, const Message& message);
-
- // The d'tor pops the info pushed by the c'tor.
- //
- // Note that the d'tor is not virtual in order to be efficient.
- // Don't inherit from ScopedTrace!
- ~ScopedTrace();
-
- private:
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace);
-} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its
- // c'tor and d'tor. Therefore it doesn't
- // need to be used otherwise.
-
-// Converts a streamable value to a String. A NULL pointer is
-// converted to "(null)". When the input value is a ::string,
-// ::std::string, ::wstring, or ::std::wstring object, each NUL
-// character in it is replaced with "\\0".
-// Declared here but defined in gtest.h, so that it has access
-// to the definition of the Message class, required by the ARM
-// compiler.
-template <typename T>
-String StreamableToString(const T& streamable);
-
-// Formats a value to be used in a failure message.
-
-#ifdef GTEST_NEEDS_IS_POINTER_
-
-// These are needed as the Nokia Symbian and IBM XL C/C++ compilers
-// cannot decide between const T& and const T* in a function template.
-// These compilers _can_ decide between class template specializations
-// for T and T*, so a tr1::type_traits-like is_pointer works, and we
-// can overload on that.
-
-// This overload makes sure that all pointers (including
-// those to char or wchar_t) are printed as raw pointers.
-template <typename T>
-inline String FormatValueForFailureMessage(internal::true_type dummy,
- T* pointer) {
- return StreamableToString(static_cast<const void*>(pointer));
-}
-
-template <typename T>
-inline String FormatValueForFailureMessage(internal::false_type dummy,
- const T& value) {
- return StreamableToString(value);
-}
-
-template <typename T>
-inline String FormatForFailureMessage(const T& value) {
- return FormatValueForFailureMessage(
- typename internal::is_pointer<T>::type(), value);
-}
-
-#else
-
-// These are needed as the above solution using is_pointer has the
-// limitation that T cannot be a type without external linkage, when
-// compiled using MSVC.
-
-template <typename T>
-inline String FormatForFailureMessage(const T& value) {
- return StreamableToString(value);
-}
-
-// This overload makes sure that all pointers (including
-// those to char or wchar_t) are printed as raw pointers.
-template <typename T>
-inline String FormatForFailureMessage(T* pointer) {
- return StreamableToString(static_cast<const void*>(pointer));
-}
-
-#endif // GTEST_NEEDS_IS_POINTER_
-
-// These overloaded versions handle narrow and wide characters.
-String FormatForFailureMessage(char ch);
-String FormatForFailureMessage(wchar_t wchar);
-
-// When this operand is a const char* or char*, and the other operand
-// is a ::std::string or ::string, we print this operand as a C string
-// rather than a pointer. We do the same for wide strings.
-
-// This internal macro is used to avoid duplicated code.
-#define GTEST_FORMAT_IMPL_(operand2_type, operand1_printer)\
-inline String FormatForComparisonFailureMessage(\
- operand2_type::value_type* str, const operand2_type& /*operand2*/) {\
- return operand1_printer(str);\
-}\
-inline String FormatForComparisonFailureMessage(\
- const operand2_type::value_type* str, const operand2_type& /*operand2*/) {\
- return operand1_printer(str);\
-}
-
-#if GTEST_HAS_STD_STRING
-GTEST_FORMAT_IMPL_(::std::string, String::ShowCStringQuoted)
-#endif // GTEST_HAS_STD_STRING
-#if GTEST_HAS_STD_WSTRING
-GTEST_FORMAT_IMPL_(::std::wstring, String::ShowWideCStringQuoted)
-#endif // GTEST_HAS_STD_WSTRING
-
-#if GTEST_HAS_GLOBAL_STRING
-GTEST_FORMAT_IMPL_(::string, String::ShowCStringQuoted)
-#endif // GTEST_HAS_GLOBAL_STRING
-#if GTEST_HAS_GLOBAL_WSTRING
-GTEST_FORMAT_IMPL_(::wstring, String::ShowWideCStringQuoted)
-#endif // GTEST_HAS_GLOBAL_WSTRING
-
-#undef GTEST_FORMAT_IMPL_
-
-// Constructs and returns the message for an equality assertion
-// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
-//
-// The first four parameters are the expressions used in the assertion
-// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
-// where foo is 5 and bar is 6, we have:
-//
-// expected_expression: "foo"
-// actual_expression: "bar"
-// expected_value: "5"
-// actual_value: "6"
-//
-// The ignoring_case parameter is true iff the assertion is a
-// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
-// be inserted into the message.
-AssertionResult EqFailure(const char* expected_expression,
- const char* actual_expression,
- const String& expected_value,
- const String& actual_value,
- bool ignoring_case);
-
-
-// This template class represents an IEEE floating-point number
-// (either single-precision or double-precision, depending on the
-// template parameters).
-//
-// The purpose of this class is to do more sophisticated number
-// comparison. (Due to round-off error, etc, it's very unlikely that
-// two floating-points will be equal exactly. Hence a naive
-// comparison by the == operation often doesn't work.)
-//
-// Format of IEEE floating-point:
-//
-// The most-significant bit being the leftmost, an IEEE
-// floating-point looks like
-//
-// sign_bit exponent_bits fraction_bits
-//
-// Here, sign_bit is a single bit that designates the sign of the
-// number.
-//
-// For float, there are 8 exponent bits and 23 fraction bits.
-//
-// For double, there are 11 exponent bits and 52 fraction bits.
-//
-// More details can be found at
-// http://en.wikipedia.org/wiki/IEEE_floating-point_standard.
-//
-// Template parameter:
-//
-// RawType: the raw floating-point type (either float or double)
-template <typename RawType>
-class FloatingPoint {
- public:
- // Defines the unsigned integer type that has the same size as the
- // floating point number.
- typedef typename TypeWithSize<sizeof(RawType)>::UInt Bits;
-
- // Constants.
-
- // # of bits in a number.
- static const size_t kBitCount = 8*sizeof(RawType);
-
- // # of fraction bits in a number.
- static const size_t kFractionBitCount =
- std::numeric_limits<RawType>::digits - 1;
-
- // # of exponent bits in a number.
- static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
-
- // The mask for the sign bit.
- static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
-
- // The mask for the fraction bits.
- static const Bits kFractionBitMask =
- ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
-
- // The mask for the exponent bits.
- static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
-
- // How many ULP's (Units in the Last Place) we want to tolerate when
- // comparing two numbers. The larger the value, the more error we
- // allow. A 0 value means that two numbers must be exactly the same
- // to be considered equal.
- //
- // The maximum error of a single floating-point operation is 0.5
- // units in the last place. On Intel CPU's, all floating-point
- // calculations are done with 80-bit precision, while double has 64
- // bits. Therefore, 4 should be enough for ordinary use.
- //
- // See the following article for more details on ULP:
- // http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm.
- static const size_t kMaxUlps = 4;
-
- // Constructs a FloatingPoint from a raw floating-point number.
- //
- // On an Intel CPU, passing a non-normalized NAN (Not a Number)
- // around may change its bits, although the new value is guaranteed
- // to be also a NAN. Therefore, don't expect this constructor to
- // preserve the bits in x when x is a NAN.
- explicit FloatingPoint(const RawType& x) : value_(x) {}
-
- // Static methods
-
- // Reinterprets a bit pattern as a floating-point number.
- //
- // This function is needed to test the AlmostEquals() method.
- static RawType ReinterpretBits(const Bits bits) {
- FloatingPoint fp(0);
- fp.bits_ = bits;
- return fp.value_;
- }
-
- // Returns the floating-point number that represent positive infinity.
- static RawType Infinity() {
- return ReinterpretBits(kExponentBitMask);
- }
-
- // Non-static methods
-
- // Returns the bits that represents this number.
- const Bits &bits() const { return bits_; }
-
- // Returns the exponent bits of this number.
- Bits exponent_bits() const { return kExponentBitMask & bits_; }
-
- // Returns the fraction bits of this number.
- Bits fraction_bits() const { return kFractionBitMask & bits_; }
-
- // Returns the sign bit of this number.
- Bits sign_bit() const { return kSignBitMask & bits_; }
-
- // Returns true iff this is NAN (not a number).
- bool is_nan() const {
- // It's a NAN if the exponent bits are all ones and the fraction
- // bits are not entirely zeros.
- return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
- }
-
- // Returns true iff this number is at most kMaxUlps ULP's away from
- // rhs. In particular, this function:
- //
- // - returns false if either number is (or both are) NAN.
- // - treats really large numbers as almost equal to infinity.
- // - thinks +0.0 and -0.0 are 0 DLP's apart.
- bool AlmostEquals(const FloatingPoint& rhs) const {
- // The IEEE standard says that any comparison operation involving
- // a NAN must return false.
- if (is_nan() || rhs.is_nan()) return false;
-
- return DistanceBetweenSignAndMagnitudeNumbers(bits_, rhs.bits_) <= kMaxUlps;
- }
-
- private:
- // Converts an integer from the sign-and-magnitude representation to
- // the biased representation. More precisely, let N be 2 to the
- // power of (kBitCount - 1), an integer x is represented by the
- // unsigned number x + N.
- //
- // For instance,
- //
- // -N + 1 (the most negative number representable using
- // sign-and-magnitude) is represented by 1;
- // 0 is represented by N; and
- // N - 1 (the biggest number representable using
- // sign-and-magnitude) is represented by 2N - 1.
- //
- // Read http://en.wikipedia.org/wiki/Signed_number_representations
- // for more details on signed number representations.
- static Bits SignAndMagnitudeToBiased(const Bits &sam) {
- if (kSignBitMask & sam) {
- // sam represents a negative number.
- return ~sam + 1;
- } else {
- // sam represents a positive number.
- return kSignBitMask | sam;
- }
- }
-
- // Given two numbers in the sign-and-magnitude representation,
- // returns the distance between them as an unsigned number.
- static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
- const Bits &sam2) {
- const Bits biased1 = SignAndMagnitudeToBiased(sam1);
- const Bits biased2 = SignAndMagnitudeToBiased(sam2);
- return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
- }
-
- union {
- RawType value_; // The raw floating-point number.
- Bits bits_; // The bits that represent the number.
- };
-};
-
-// Typedefs the instances of the FloatingPoint template class that we
-// care to use.
-typedef FloatingPoint<float> Float;
-typedef FloatingPoint<double> Double;
-
-// In order to catch the mistake of putting tests that use different
-// test fixture classes in the same test case, we need to assign
-// unique IDs to fixture classes and compare them. The TypeId type is
-// used to hold such IDs. The user should treat TypeId as an opaque
-// type: the only operation allowed on TypeId values is to compare
-// them for equality using the == operator.
-typedef const void* TypeId;
-
-template <typename T>
-class TypeIdHelper {
- public:
- // dummy_ must not have a const type. Otherwise an overly eager
- // compiler (e.g. MSVC 7.1 & 8.0) may try to merge
- // TypeIdHelper<T>::dummy_ for different Ts as an "optimization".
- static bool dummy_;
-};
-
-template <typename T>
-bool TypeIdHelper<T>::dummy_ = false;
-
-// GetTypeId<T>() returns the ID of type T. Different values will be
-// returned for different types. Calling the function twice with the
-// same type argument is guaranteed to return the same ID.
-template <typename T>
-TypeId GetTypeId() {
- // The compiler is required to allocate a different
- // TypeIdHelper<T>::dummy_ variable for each T used to instantiate
- // the template. Therefore, the address of dummy_ is guaranteed to
- // be unique.
- return &(TypeIdHelper<T>::dummy_);
-}
-
-// Returns the type ID of ::testing::Test. Always call this instead
-// of GetTypeId< ::testing::Test>() to get the type ID of
-// ::testing::Test, as the latter may give the wrong result due to a
-// suspected linker bug when compiling Google Test as a Mac OS X
-// framework.
-TypeId GetTestTypeId();
-
-// Defines the abstract factory interface that creates instances
-// of a Test object.
-class TestFactoryBase {
- public:
- virtual ~TestFactoryBase() {}
-
- // Creates a test instance to run. The instance is both created and destroyed
- // within TestInfoImpl::Run()
- virtual Test* CreateTest() = 0;
-
- protected:
- TestFactoryBase() {}
-
- private:
- GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase);
-};
-
-// This class provides implementation of TeastFactoryBase interface.
-// It is used in TEST and TEST_F macros.
-template <class TestClass>
-class TestFactoryImpl : public TestFactoryBase {
- public:
- virtual Test* CreateTest() { return new TestClass; }
-};
-
-#ifdef GTEST_OS_WINDOWS
-
-// Predicate-formatters for implementing the HRESULT checking macros
-// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}
-// We pass a long instead of HRESULT to avoid causing an
-// include dependency for the HRESULT type.
-AssertionResult IsHRESULTSuccess(const char* expr, long hr); // NOLINT
-AssertionResult IsHRESULTFailure(const char* expr, long hr); // NOLINT
-
-#endif // GTEST_OS_WINDOWS
-
-// Formats a source file path and a line number as they would appear
-// in a compiler error message.
-inline String FormatFileLocation(const char* file, int line) {
- const char* const file_name = file == NULL ? "unknown file" : file;
- if (line < 0) {
- return String::Format("%s:", file_name);
- }
-#ifdef _MSC_VER
- return String::Format("%s(%d):", file_name, line);
-#else
- return String::Format("%s:%d:", file_name, line);
-#endif // _MSC_VER
-}
-
-// Types of SetUpTestCase() and TearDownTestCase() functions.
-typedef void (*SetUpTestCaseFunc)();
-typedef void (*TearDownTestCaseFunc)();
-
-// Creates a new TestInfo object and registers it with Google Test;
-// returns the created object.
-//
-// Arguments:
-//
-// test_case_name: name of the test case
-// name: name of the test
-// test_case_comment: a comment on the test case that will be included in
-// the test output
-// comment: a comment on the test that will be included in the
-// test output
-// fixture_class_id: ID of the test fixture class
-// set_up_tc: pointer to the function that sets up the test case
-// tear_down_tc: pointer to the function that tears down the test case
-// factory: pointer to the factory that creates a test object.
-// The newly created TestInfo instance will assume
-// ownership of the factory object.
-TestInfo* MakeAndRegisterTestInfo(
- const char* test_case_name, const char* name,
- const char* test_case_comment, const char* comment,
- TypeId fixture_class_id,
- SetUpTestCaseFunc set_up_tc,
- TearDownTestCaseFunc tear_down_tc,
- TestFactoryBase* factory);
-
-#if defined(GTEST_HAS_TYPED_TEST) || defined(GTEST_HAS_TYPED_TEST_P)
-
-// State of the definition of a type-parameterized test case.
-class TypedTestCasePState {
- public:
- TypedTestCasePState() : registered_(false) {}
-
- // Adds the given test name to defined_test_names_ and return true
- // if the test case hasn't been registered; otherwise aborts the
- // program.
- bool AddTestName(const char* file, int line, const char* case_name,
- const char* test_name) {
- if (registered_) {
- fprintf(stderr, "%s Test %s must be defined before "
- "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n",
- FormatFileLocation(file, line).c_str(), test_name, case_name);
- abort();
- }
- defined_test_names_.insert(test_name);
- return true;
- }
-
- // Verifies that registered_tests match the test names in
- // defined_test_names_; returns registered_tests if successful, or
- // aborts the program otherwise.
- const char* VerifyRegisteredTestNames(
- const char* file, int line, const char* registered_tests);
-
- private:
- bool registered_;
- ::std::set<const char*> defined_test_names_;
-};
-
-// Skips to the first non-space char after the first comma in 'str';
-// returns NULL if no comma is found in 'str'.
-inline const char* SkipComma(const char* str) {
- const char* comma = strchr(str, ',');
- if (comma == NULL) {
- return NULL;
- }
- while (isspace(*(++comma))) {}
- return comma;
-}
-
-// Returns the prefix of 'str' before the first comma in it; returns
-// the entire string if it contains no comma.
-inline String GetPrefixUntilComma(const char* str) {
- const char* comma = strchr(str, ',');
- return comma == NULL ? String(str) : String(str, comma - str);
-}
-
-// TypeParameterizedTest<Fixture, TestSel, Types>::Register()
-// registers a list of type-parameterized tests with Google Test. The
-// return value is insignificant - we just need to return something
-// such that we can call this function in a namespace scope.
-//
-// Implementation note: The GTEST_TEMPLATE_ macro declares a template
-// template parameter. It's defined in gtest-type-util.h.
-template <GTEST_TEMPLATE_ Fixture, class TestSel, typename Types>
-class TypeParameterizedTest {
- public:
- // 'index' is the index of the test in the type list 'Types'
- // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase,
- // Types). Valid values for 'index' are [0, N - 1] where N is the
- // length of Types.
- static bool Register(const char* prefix, const char* case_name,
- const char* test_names, int index) {
- typedef typename Types::Head Type;
- typedef Fixture<Type> FixtureClass;
- typedef typename GTEST_BIND_(TestSel, Type) TestClass;
-
- // First, registers the first type-parameterized test in the type
- // list.
- MakeAndRegisterTestInfo(
- String::Format("%s%s%s/%d", prefix, prefix[0] == '\0' ? "" : "/",
- case_name, index).c_str(),
- GetPrefixUntilComma(test_names).c_str(),
- String::Format("TypeParam = %s", GetTypeName<Type>().c_str()).c_str(),
- "",
- GetTypeId<FixtureClass>(),
- TestClass::SetUpTestCase,
- TestClass::TearDownTestCase,
- new TestFactoryImpl<TestClass>);
-
- // Next, recurses (at compile time) with the tail of the type list.
- return TypeParameterizedTest<Fixture, TestSel, typename Types::Tail>
- ::Register(prefix, case_name, test_names, index + 1);
- }
-};
-
-// The base case for the compile time recursion.
-template <GTEST_TEMPLATE_ Fixture, class TestSel>
-class TypeParameterizedTest<Fixture, TestSel, Types0> {
- public:
- static bool Register(const char* /*prefix*/, const char* /*case_name*/,
- const char* /*test_names*/, int /*index*/) {
- return true;
- }
-};
-
-// TypeParameterizedTestCase<Fixture, Tests, Types>::Register()
-// registers *all combinations* of 'Tests' and 'Types' with Google
-// Test. The return value is insignificant - we just need to return
-// something such that we can call this function in a namespace scope.
-template <GTEST_TEMPLATE_ Fixture, typename Tests, typename Types>
-class TypeParameterizedTestCase {
- public:
- static bool Register(const char* prefix, const char* case_name,
- const char* test_names) {
- typedef typename Tests::Head Head;
-
- // First, register the first test in 'Test' for each type in 'Types'.
- TypeParameterizedTest<Fixture, Head, Types>::Register(
- prefix, case_name, test_names, 0);
-
- // Next, recurses (at compile time) with the tail of the test list.
- return TypeParameterizedTestCase<Fixture, typename Tests::Tail, Types>
- ::Register(prefix, case_name, SkipComma(test_names));
- }
-};
-
-// The base case for the compile time recursion.
-template <GTEST_TEMPLATE_ Fixture, typename Types>
-class TypeParameterizedTestCase<Fixture, Templates0, Types> {
- public:
- static bool Register(const char* prefix, const char* case_name,
- const char* test_names) {
- return true;
- }
-};
-
-#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
-
-// Returns the current OS stack trace as a String.
-//
-// The maximum number of stack frames to be included is specified by
-// the gtest_stack_trace_depth flag. The skip_count parameter
-// specifies the number of top frames to be skipped, which doesn't
-// count against the number of frames to be included.
-//
-// For example, if Foo() calls Bar(), which in turn calls
-// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
-// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
-String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test, int skip_count);
-
-// Returns the number of failed test parts in the given test result object.
-int GetFailedPartCount(const TestResult* result);
-
-} // namespace internal
-} // namespace testing
-
-#define GTEST_MESSAGE_(message, result_type) \
- ::testing::internal::AssertHelper(result_type, __FILE__, __LINE__, message) \
- = ::testing::Message()
-
-#define GTEST_FATAL_FAILURE_(message) \
- return GTEST_MESSAGE_(message, ::testing::TPRT_FATAL_FAILURE)
-
-#define GTEST_NONFATAL_FAILURE_(message) \
- GTEST_MESSAGE_(message, ::testing::TPRT_NONFATAL_FAILURE)
-
-#define GTEST_SUCCESS_(message) \
- GTEST_MESSAGE_(message, ::testing::TPRT_SUCCESS)
-
-#define GTEST_TEST_THROW_(statement, expected_exception, fail) \
- GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (const char* gtest_msg = "") { \
- bool gtest_caught_expected = false; \
- try { \
- statement; \
- } \
- catch (expected_exception const&) { \
- gtest_caught_expected = true; \
- } \
- catch (...) { \
- gtest_msg = "Expected: " #statement " throws an exception of type " \
- #expected_exception ".\n Actual: it throws a different " \
- "type."; \
- goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
- } \
- if (!gtest_caught_expected) { \
- gtest_msg = "Expected: " #statement " throws an exception of type " \
- #expected_exception ".\n Actual: it throws nothing."; \
- goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
- } \
- } else \
- GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \
- fail(gtest_msg)
-
-#define GTEST_TEST_NO_THROW_(statement, fail) \
- GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (const char* gtest_msg = "") { \
- try { \
- statement; \
- } \
- catch (...) { \
- gtest_msg = "Expected: " #statement " doesn't throw an exception.\n" \
- " Actual: it throws."; \
- goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \
- } \
- } else \
- GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \
- fail(gtest_msg)
-
-#define GTEST_TEST_ANY_THROW_(statement, fail) \
- GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (const char* gtest_msg = "") { \
- bool gtest_caught_any = false; \
- try { \
- statement; \
- } \
- catch (...) { \
- gtest_caught_any = true; \
- } \
- if (!gtest_caught_any) { \
- gtest_msg = "Expected: " #statement " throws an exception.\n" \
- " Actual: it doesn't."; \
- goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \
- } \
- } else \
- GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \
- fail(gtest_msg)
-
-
-#define GTEST_TEST_BOOLEAN_(boolexpr, booltext, actual, expected, fail) \
- GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (boolexpr) \
- ; \
- else \
- fail("Value of: " booltext "\n Actual: " #actual "\nExpected: " #expected)
-
-#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \
- GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (const char* gtest_msg = "") { \
- ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \
- { statement; } \
- if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
- gtest_msg = "Expected: " #statement " doesn't generate new fatal " \
- "failures in the current thread.\n" \
- " Actual: it does."; \
- goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \
- } \
- } else \
- GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \
- fail(gtest_msg)
-
-// Expands to the name of the class that implements the given test.
-#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
- test_case_name##_##test_name##_Test
-
-// Helper macro for defining tests.
-#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\
-class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\
- public:\
- GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\
- private:\
- virtual void TestBody();\
- static ::testing::TestInfo* const test_info_;\
- GTEST_DISALLOW_COPY_AND_ASSIGN_(\
- GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\
-};\
-\
-::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\
- ::test_info_ =\
- ::testing::internal::MakeAndRegisterTestInfo(\
- #test_case_name, #test_name, "", "", \
- (parent_id), \
- parent_class::SetUpTestCase, \
- parent_class::TearDownTestCase, \
- new ::testing::internal::TestFactoryImpl<\
- GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\
-void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
-
-#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-linked_ptr.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-linked_ptr.h
deleted file mode 100644
index f98af0b..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-linked_ptr.h
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright 2003 Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: Dan Egnor (egnor at google.com)
-//
-// A "smart" pointer type with reference tracking. Every pointer to a
-// particular object is kept on a circular linked list. When the last pointer
-// to an object is destroyed or reassigned, the object is deleted.
-//
-// Used properly, this deletes the object when the last reference goes away.
-// There are several caveats:
-// - Like all reference counting schemes, cycles lead to leaks.
-// - Each smart pointer is actually two pointers (8 bytes instead of 4).
-// - Every time a pointer is assigned, the entire list of pointers to that
-// object is traversed. This class is therefore NOT SUITABLE when there
-// will often be more than two or three pointers to a particular object.
-// - References are only tracked as long as linked_ptr<> objects are copied.
-// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
-// will happen (double deletion).
-//
-// A good use of this class is storing object references in STL containers.
-// You can safely put linked_ptr<> in a vector<>.
-// Other uses may not be as good.
-//
-// Note: If you use an incomplete type with linked_ptr<>, the class
-// *containing* linked_ptr<> must have a constructor and destructor (even
-// if they do nothing!).
-//
-// Bill Gibbons suggested we use something like this.
-//
-// Thread Safety:
-// Unlike other linked_ptr implementations, in this implementation
-// a linked_ptr object is thread-safe in the sense that:
-// - it's safe to copy linked_ptr objects concurrently,
-// - it's safe to copy *from* a linked_ptr and read its underlying
-// raw pointer (e.g. via get()) concurrently, and
-// - it's safe to write to two linked_ptrs that point to the same
-// shared object concurrently.
-// TODO(wan at google.com): rename this to safe_linked_ptr to avoid
-// confusion with normal linked_ptr.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
-
-#include <stdlib.h>
-#include <assert.h>
-
-#include <gtest/internal/gtest-port.h>
-
-namespace testing {
-namespace internal {
-
-// Protects copying of all linked_ptr objects.
-extern Mutex g_linked_ptr_mutex;
-
-// This is used internally by all instances of linked_ptr<>. It needs to be
-// a non-template class because different types of linked_ptr<> can refer to
-// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
-// So, it needs to be possible for different types of linked_ptr to participate
-// in the same circular linked list, so we need a single class type here.
-//
-// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr<T>.
-class linked_ptr_internal {
- public:
- // Create a new circle that includes only this instance.
- void join_new() {
- next_ = this;
- }
-
- // Many linked_ptr operations may change p.link_ for some linked_ptr
- // variable p in the same circle as this object. Therefore we need
- // to prevent two such operations from occurring concurrently.
- //
- // Note that different types of linked_ptr objects can coexist in a
- // circle (e.g. linked_ptr<Base>, linked_ptr<Derived1>, and
- // linked_ptr<Derived2>). Therefore we must use a single mutex to
- // protect all linked_ptr objects. This can create serious
- // contention in production code, but is acceptable in a testing
- // framework.
-
- // Join an existing circle.
- // L < g_linked_ptr_mutex
- void join(linked_ptr_internal const* ptr) {
- MutexLock lock(&g_linked_ptr_mutex);
-
- linked_ptr_internal const* p = ptr;
- while (p->next_ != ptr) p = p->next_;
- p->next_ = this;
- next_ = ptr;
- }
-
- // Leave whatever circle we're part of. Returns true if we were the
- // last member of the circle. Once this is done, you can join() another.
- // L < g_linked_ptr_mutex
- bool depart() {
- MutexLock lock(&g_linked_ptr_mutex);
-
- if (next_ == this) return true;
- linked_ptr_internal const* p = next_;
- while (p->next_ != this) p = p->next_;
- p->next_ = next_;
- return false;
- }
-
- private:
- mutable linked_ptr_internal const* next_;
-};
-
-template <typename T>
-class linked_ptr {
- public:
- typedef T element_type;
-
- // Take over ownership of a raw pointer. This should happen as soon as
- // possible after the object is created.
- explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
- ~linked_ptr() { depart(); }
-
- // Copy an existing linked_ptr<>, adding ourselves to the list of references.
- template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
- linked_ptr(linked_ptr const& ptr) { // NOLINT
- assert(&ptr != this);
- copy(&ptr);
- }
-
- // Assignment releases the old value and acquires the new.
- template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
- depart();
- copy(&ptr);
- return *this;
- }
-
- linked_ptr& operator=(linked_ptr const& ptr) {
- if (&ptr != this) {
- depart();
- copy(&ptr);
- }
- return *this;
- }
-
- // Smart pointer members.
- void reset(T* ptr = NULL) {
- depart();
- capture(ptr);
- }
- T* get() const { return value_; }
- T* operator->() const { return value_; }
- T& operator*() const { return *value_; }
- // Release ownership of the pointed object and returns it.
- // Sole ownership by this linked_ptr object is required.
- T* release() {
- bool last = link_.depart();
- assert(last);
- T* v = value_;
- value_ = NULL;
- return v;
- }
-
- bool operator==(T* p) const { return value_ == p; }
- bool operator!=(T* p) const { return value_ != p; }
- template <typename U>
- bool operator==(linked_ptr<U> const& ptr) const {
- return value_ == ptr.get();
- }
- template <typename U>
- bool operator!=(linked_ptr<U> const& ptr) const {
- return value_ != ptr.get();
- }
-
- private:
- template <typename U>
- friend class linked_ptr;
-
- T* value_;
- linked_ptr_internal link_;
-
- void depart() {
- if (link_.depart()) delete value_;
- }
-
- void capture(T* ptr) {
- value_ = ptr;
- link_.join_new();
- }
-
- template <typename U> void copy(linked_ptr<U> const* ptr) {
- value_ = ptr->get();
- if (value_)
- link_.join(&ptr->link_);
- else
- link_.join_new();
- }
-};
-
-template<typename T> inline
-bool operator==(T* ptr, const linked_ptr<T>& x) {
- return ptr == x.get();
-}
-
-template<typename T> inline
-bool operator!=(T* ptr, const linked_ptr<T>& x) {
- return ptr != x.get();
-}
-
-// A function to convert T* into linked_ptr<T>
-// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
-// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
-template <typename T>
-linked_ptr<T> make_linked_ptr(T* ptr) {
- return linked_ptr<T>(ptr);
-}
-
-} // namespace internal
-} // namespace testing
-
-#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util-generated.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util-generated.h
deleted file mode 100644
index 17f3f7b..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util-generated.h
+++ /dev/null
@@ -1,4572 +0,0 @@
-// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
-
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: vladl at google.com (Vlad Losev)
-
-// Type and function utilities for implementing parameterized tests.
-// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
-//
-// Currently Google Test supports at most 50 arguments in Values,
-// and at most 10 arguments in Combine. Please contact
-// googletestframework at googlegroups.com if you need more.
-// Please note that the number of arguments to Combine is limited
-// by the maximum arity of the implementation of tr1::tuple which is
-// currently set at 10.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
-
-#include <gtest/internal/gtest-port.h>
-
-#ifdef GTEST_HAS_PARAM_TEST
-
-#include <gtest/internal/gtest-param-util.h>
-
-namespace testing {
-namespace internal {
-
-// Used in the Values() function to provide polymorphic capabilities.
-template <typename T1>
-class ValueArray1 {
- public:
- explicit ValueArray1(T1 v1) : v1_(v1) {}
-
- template <typename T>
- operator ParamGenerator<T>() const { return ValuesIn(&v1_, &v1_ + 1); }
-
- private:
- const T1 v1_;
-};
-
-template <typename T1, typename T2>
-class ValueArray2 {
- public:
- ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
-};
-
-template <typename T1, typename T2, typename T3>
-class ValueArray3 {
- public:
- ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4>
-class ValueArray4 {
- public:
- ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3),
- v4_(v4) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-class ValueArray5 {
- public:
- ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3),
- v4_(v4), v5_(v5) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6>
-class ValueArray6 {
- public:
- ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2),
- v3_(v3), v4_(v4), v5_(v5), v6_(v6) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7>
-class ValueArray7 {
- public:
- ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1),
- v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8>
-class ValueArray8 {
- public:
- ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
- T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9>
-class ValueArray9 {
- public:
- ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
- T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10>
-class ValueArray10 {
- public:
- ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11>
-class ValueArray11 {
- public:
- ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
- v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12>
-class ValueArray12 {
- public:
- ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
- v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13>
-class ValueArray13 {
- public:
- ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
- v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
- v12_(v12), v13_(v13) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14>
-class ValueArray14 {
- public:
- ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3),
- v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15>
-class ValueArray15 {
- public:
- ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2),
- v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16>
-class ValueArray16 {
- public:
- ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1),
- v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
- v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
- v16_(v16) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17>
-class ValueArray17 {
- public:
- ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
- T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18>
-class ValueArray18 {
- public:
- ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17), v18_(v18) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19>
-class ValueArray19 {
- public:
- ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
- v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
- v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20>
-class ValueArray20 {
- public:
- ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
- v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
- v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
- v19_(v19), v20_(v20) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21>
-class ValueArray21 {
- public:
- ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
- v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
- v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
- v18_(v18), v19_(v19), v20_(v20), v21_(v21) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22>
-class ValueArray22 {
- public:
- ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3),
- v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
- v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23>
-class ValueArray23 {
- public:
- ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2),
- v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
- v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
- v23_(v23) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_,
- v23_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24>
-class ValueArray24 {
- public:
- ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1),
- v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
- v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
- v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
- v22_(v22), v23_(v23), v24_(v24) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25>
-class ValueArray25 {
- public:
- ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
- T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
- v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26>
-class ValueArray26 {
- public:
- ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
- v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27>
-class ValueArray27 {
- public:
- ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
- v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
- v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
- v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
- v26_(v26), v27_(v27) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28>
-class ValueArray28 {
- public:
- ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
- v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
- v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
- v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
- v25_(v25), v26_(v26), v27_(v27), v28_(v28) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29>
-class ValueArray29 {
- public:
- ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
- v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
- v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
- v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
- v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30>
-class ValueArray30 {
- public:
- ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3),
- v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
- v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
- v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
- v29_(v29), v30_(v30) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31>
-class ValueArray31 {
- public:
- ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2),
- v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
- v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
- v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
- v29_(v29), v30_(v30), v31_(v31) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32>
-class ValueArray32 {
- public:
- ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1),
- v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
- v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
- v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
- v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
- v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33>
-class ValueArray33 {
- public:
- ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
- T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
- v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
- v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
- v33_(v33) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34>
-class ValueArray34 {
- public:
- ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
- v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
- v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
- v33_(v33), v34_(v34) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35>
-class ValueArray35 {
- public:
- ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
- v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
- v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
- v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
- v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
- v32_(v32), v33_(v33), v34_(v34), v35_(v35) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_,
- v35_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36>
-class ValueArray36 {
- public:
- ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
- v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
- v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
- v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
- v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
- v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37>
-class ValueArray37 {
- public:
- ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
- v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
- v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
- v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
- v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
- v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
- v36_(v36), v37_(v37) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38>
-class ValueArray38 {
- public:
- ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3),
- v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
- v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
- v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
- v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
- v35_(v35), v36_(v36), v37_(v37), v38_(v38) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39>
-class ValueArray39 {
- public:
- ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2),
- v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
- v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
- v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
- v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
- v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40>
-class ValueArray40 {
- public:
- ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1),
- v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
- v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
- v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
- v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
- v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
- v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
- v40_(v40) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41>
-class ValueArray41 {
- public:
- ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
- T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
- v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
- v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
- v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
- v39_(v39), v40_(v40), v41_(v41) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42>
-class ValueArray42 {
- public:
- ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
- v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
- v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
- v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
- v39_(v39), v40_(v40), v41_(v41), v42_(v42) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_, v42_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
- const T42 v42_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43>
-class ValueArray43 {
- public:
- ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
- v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
- v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
- v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
- v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
- v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37),
- v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
- const T42 v42_;
- const T43 v43_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44>
-class ValueArray44 {
- public:
- ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
- v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
- v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
- v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
- v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
- v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36),
- v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42),
- v43_(v43), v44_(v44) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
- const T42 v42_;
- const T43 v43_;
- const T44 v44_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45>
-class ValueArray45 {
- public:
- ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
- v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
- v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
- v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
- v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
- v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
- v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41),
- v42_(v42), v43_(v43), v44_(v44), v45_(v45) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
- const T42 v42_;
- const T43 v43_;
- const T44 v44_;
- const T45 v45_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46>
-class ValueArray46 {
- public:
- ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3),
- v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
- v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
- v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
- v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
- v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
- v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
- const T42 v42_;
- const T43 v43_;
- const T44 v44_;
- const T45 v45_;
- const T46 v46_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47>
-class ValueArray47 {
- public:
- ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2),
- v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
- v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
- v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
- v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
- v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
- v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
- v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46),
- v47_(v47) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_,
- v47_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
- const T42 v42_;
- const T43 v43_;
- const T44 v44_;
- const T45 v45_;
- const T46 v46_;
- const T47 v47_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48>
-class ValueArray48 {
- public:
- ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1),
- v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
- v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
- v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
- v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
- v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
- v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
- v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45),
- v46_(v46), v47_(v47), v48_(v48) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_,
- v48_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
- const T42 v42_;
- const T43 v43_;
- const T44 v44_;
- const T45 v45_;
- const T46 v46_;
- const T47 v47_;
- const T48 v48_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48, typename T49>
-class ValueArray49 {
- public:
- ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48,
- T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
- v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
- v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
- v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
- v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
- v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_,
- v48_, v49_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
- const T42 v42_;
- const T43 v43_;
- const T44 v44_;
- const T45 v45_;
- const T46 v46_;
- const T47 v47_;
- const T48 v48_;
- const T49 v49_;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48, typename T49, typename T50>
-class ValueArray50 {
- public:
- ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
- T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
- T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
- T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
- T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
- T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49,
- T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
- v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
- v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
- v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
- v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
- v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
- v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
- v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {}
-
- template <typename T>
- operator ParamGenerator<T>() const {
- const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
- v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
- v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
- v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_,
- v48_, v49_, v50_};
- return ValuesIn(array);
- }
-
- private:
- const T1 v1_;
- const T2 v2_;
- const T3 v3_;
- const T4 v4_;
- const T5 v5_;
- const T6 v6_;
- const T7 v7_;
- const T8 v8_;
- const T9 v9_;
- const T10 v10_;
- const T11 v11_;
- const T12 v12_;
- const T13 v13_;
- const T14 v14_;
- const T15 v15_;
- const T16 v16_;
- const T17 v17_;
- const T18 v18_;
- const T19 v19_;
- const T20 v20_;
- const T21 v21_;
- const T22 v22_;
- const T23 v23_;
- const T24 v24_;
- const T25 v25_;
- const T26 v26_;
- const T27 v27_;
- const T28 v28_;
- const T29 v29_;
- const T30 v30_;
- const T31 v31_;
- const T32 v32_;
- const T33 v33_;
- const T34 v34_;
- const T35 v35_;
- const T36 v36_;
- const T37 v37_;
- const T38 v38_;
- const T39 v39_;
- const T40 v40_;
- const T41 v41_;
- const T42 v42_;
- const T43 v43_;
- const T44 v44_;
- const T45 v45_;
- const T46 v46_;
- const T47 v47_;
- const T48 v48_;
- const T49 v49_;
- const T50 v50_;
-};
-
-#ifdef GTEST_HAS_COMBINE
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Generates values from the Cartesian product of values produced
-// by the argument generators.
-//
-template <typename T1, typename T2>
-class CartesianProductGenerator2
- : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2> > {
- public:
- typedef ::std::tr1::tuple<T1, T2> ParamType;
-
- CartesianProductGenerator2(const ParamGenerator<T1>& g1,
- const ParamGenerator<T2>& g2)
- : g1_(g1), g2_(g2) {}
- virtual ~CartesianProductGenerator2() {}
-
- virtual ParamIteratorInterface<ParamType>* Begin() const {
- return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin());
- }
- virtual ParamIteratorInterface<ParamType>* End() const {
- return new Iterator(this, g1_, g1_.end(), g2_, g2_.end());
- }
-
- private:
- class Iterator : public ParamIteratorInterface<ParamType> {
- public:
- Iterator(const ParamGeneratorInterface<ParamType>* base,
- const ParamGenerator<T1>& g1,
- const typename ParamGenerator<T1>::iterator& current1,
- const ParamGenerator<T2>& g2,
- const typename ParamGenerator<T2>::iterator& current2)
- : base_(base),
- begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
- begin2_(g2.begin()), end2_(g2.end()), current2_(current2) {
- ComputeCurrentValue();
- }
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
- return base_;
- }
- // Advance should not be called on beyond-of-range iterators
- // so no component iterators must be beyond end of range, either.
- virtual void Advance() {
- assert(!AtEnd());
- ++current2_;
- if (current2_ == end2_) {
- current2_ = begin2_;
- ++current1_;
- }
- ComputeCurrentValue();
- }
- virtual ParamIteratorInterface<ParamType>* Clone() const {
- return new Iterator(*this);
- }
- virtual const ParamType* Current() const { return ¤t_value_; }
- virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const Iterator* typed_other =
- CheckedDowncastToActualType<const Iterator>(&other);
- // We must report iterators equal if they both point beyond their
- // respective ranges. That can happen in a variety of fashions,
- // so we have to consult AtEnd().
- return (AtEnd() && typed_other->AtEnd()) ||
- (
- current1_ == typed_other->current1_ &&
- current2_ == typed_other->current2_);
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_),
- begin1_(other.begin1_),
- end1_(other.end1_),
- current1_(other.current1_),
- begin2_(other.begin2_),
- end2_(other.end2_),
- current2_(other.current2_) {
- ComputeCurrentValue();
- }
-
- void ComputeCurrentValue() {
- if (!AtEnd())
- current_value_ = ParamType(*current1_, *current2_);
- }
- bool AtEnd() const {
- // We must report iterator past the end of the range when either of the
- // component iterators has reached the end of its range.
- return
- current1_ == end1_ ||
- current2_ == end2_;
- }
-
- const ParamGeneratorInterface<ParamType>* const base_;
- // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
- // current[i]_ is the actual traversing iterator.
- const typename ParamGenerator<T1>::iterator begin1_;
- const typename ParamGenerator<T1>::iterator end1_;
- typename ParamGenerator<T1>::iterator current1_;
- const typename ParamGenerator<T2>::iterator begin2_;
- const typename ParamGenerator<T2>::iterator end2_;
- typename ParamGenerator<T2>::iterator current2_;
- ParamType current_value_;
- };
-
- const ParamGenerator<T1> g1_;
- const ParamGenerator<T2> g2_;
-};
-
-
-template <typename T1, typename T2, typename T3>
-class CartesianProductGenerator3
- : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3> > {
- public:
- typedef ::std::tr1::tuple<T1, T2, T3> ParamType;
-
- CartesianProductGenerator3(const ParamGenerator<T1>& g1,
- const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3)
- : g1_(g1), g2_(g2), g3_(g3) {}
- virtual ~CartesianProductGenerator3() {}
-
- virtual ParamIteratorInterface<ParamType>* Begin() const {
- return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
- g3_.begin());
- }
- virtual ParamIteratorInterface<ParamType>* End() const {
- return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end());
- }
-
- private:
- class Iterator : public ParamIteratorInterface<ParamType> {
- public:
- Iterator(const ParamGeneratorInterface<ParamType>* base,
- const ParamGenerator<T1>& g1,
- const typename ParamGenerator<T1>::iterator& current1,
- const ParamGenerator<T2>& g2,
- const typename ParamGenerator<T2>::iterator& current2,
- const ParamGenerator<T3>& g3,
- const typename ParamGenerator<T3>::iterator& current3)
- : base_(base),
- begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
- begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
- begin3_(g3.begin()), end3_(g3.end()), current3_(current3) {
- ComputeCurrentValue();
- }
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
- return base_;
- }
- // Advance should not be called on beyond-of-range iterators
- // so no component iterators must be beyond end of range, either.
- virtual void Advance() {
- assert(!AtEnd());
- ++current3_;
- if (current3_ == end3_) {
- current3_ = begin3_;
- ++current2_;
- }
- if (current2_ == end2_) {
- current2_ = begin2_;
- ++current1_;
- }
- ComputeCurrentValue();
- }
- virtual ParamIteratorInterface<ParamType>* Clone() const {
- return new Iterator(*this);
- }
- virtual const ParamType* Current() const { return ¤t_value_; }
- virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const Iterator* typed_other =
- CheckedDowncastToActualType<const Iterator>(&other);
- // We must report iterators equal if they both point beyond their
- // respective ranges. That can happen in a variety of fashions,
- // so we have to consult AtEnd().
- return (AtEnd() && typed_other->AtEnd()) ||
- (
- current1_ == typed_other->current1_ &&
- current2_ == typed_other->current2_ &&
- current3_ == typed_other->current3_);
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_),
- begin1_(other.begin1_),
- end1_(other.end1_),
- current1_(other.current1_),
- begin2_(other.begin2_),
- end2_(other.end2_),
- current2_(other.current2_),
- begin3_(other.begin3_),
- end3_(other.end3_),
- current3_(other.current3_) {
- ComputeCurrentValue();
- }
-
- void ComputeCurrentValue() {
- if (!AtEnd())
- current_value_ = ParamType(*current1_, *current2_, *current3_);
- }
- bool AtEnd() const {
- // We must report iterator past the end of the range when either of the
- // component iterators has reached the end of its range.
- return
- current1_ == end1_ ||
- current2_ == end2_ ||
- current3_ == end3_;
- }
-
- const ParamGeneratorInterface<ParamType>* const base_;
- // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
- // current[i]_ is the actual traversing iterator.
- const typename ParamGenerator<T1>::iterator begin1_;
- const typename ParamGenerator<T1>::iterator end1_;
- typename ParamGenerator<T1>::iterator current1_;
- const typename ParamGenerator<T2>::iterator begin2_;
- const typename ParamGenerator<T2>::iterator end2_;
- typename ParamGenerator<T2>::iterator current2_;
- const typename ParamGenerator<T3>::iterator begin3_;
- const typename ParamGenerator<T3>::iterator end3_;
- typename ParamGenerator<T3>::iterator current3_;
- ParamType current_value_;
- };
-
- const ParamGenerator<T1> g1_;
- const ParamGenerator<T2> g2_;
- const ParamGenerator<T3> g3_;
-};
-
-
-template <typename T1, typename T2, typename T3, typename T4>
-class CartesianProductGenerator4
- : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4> > {
- public:
- typedef ::std::tr1::tuple<T1, T2, T3, T4> ParamType;
-
- CartesianProductGenerator4(const ParamGenerator<T1>& g1,
- const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
- const ParamGenerator<T4>& g4)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
- virtual ~CartesianProductGenerator4() {}
-
- virtual ParamIteratorInterface<ParamType>* Begin() const {
- return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
- g3_.begin(), g4_, g4_.begin());
- }
- virtual ParamIteratorInterface<ParamType>* End() const {
- return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
- g4_, g4_.end());
- }
-
- private:
- class Iterator : public ParamIteratorInterface<ParamType> {
- public:
- Iterator(const ParamGeneratorInterface<ParamType>* base,
- const ParamGenerator<T1>& g1,
- const typename ParamGenerator<T1>::iterator& current1,
- const ParamGenerator<T2>& g2,
- const typename ParamGenerator<T2>::iterator& current2,
- const ParamGenerator<T3>& g3,
- const typename ParamGenerator<T3>::iterator& current3,
- const ParamGenerator<T4>& g4,
- const typename ParamGenerator<T4>::iterator& current4)
- : base_(base),
- begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
- begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
- begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
- begin4_(g4.begin()), end4_(g4.end()), current4_(current4) {
- ComputeCurrentValue();
- }
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
- return base_;
- }
- // Advance should not be called on beyond-of-range iterators
- // so no component iterators must be beyond end of range, either.
- virtual void Advance() {
- assert(!AtEnd());
- ++current4_;
- if (current4_ == end4_) {
- current4_ = begin4_;
- ++current3_;
- }
- if (current3_ == end3_) {
- current3_ = begin3_;
- ++current2_;
- }
- if (current2_ == end2_) {
- current2_ = begin2_;
- ++current1_;
- }
- ComputeCurrentValue();
- }
- virtual ParamIteratorInterface<ParamType>* Clone() const {
- return new Iterator(*this);
- }
- virtual const ParamType* Current() const { return ¤t_value_; }
- virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const Iterator* typed_other =
- CheckedDowncastToActualType<const Iterator>(&other);
- // We must report iterators equal if they both point beyond their
- // respective ranges. That can happen in a variety of fashions,
- // so we have to consult AtEnd().
- return (AtEnd() && typed_other->AtEnd()) ||
- (
- current1_ == typed_other->current1_ &&
- current2_ == typed_other->current2_ &&
- current3_ == typed_other->current3_ &&
- current4_ == typed_other->current4_);
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_),
- begin1_(other.begin1_),
- end1_(other.end1_),
- current1_(other.current1_),
- begin2_(other.begin2_),
- end2_(other.end2_),
- current2_(other.current2_),
- begin3_(other.begin3_),
- end3_(other.end3_),
- current3_(other.current3_),
- begin4_(other.begin4_),
- end4_(other.end4_),
- current4_(other.current4_) {
- ComputeCurrentValue();
- }
-
- void ComputeCurrentValue() {
- if (!AtEnd())
- current_value_ = ParamType(*current1_, *current2_, *current3_,
- *current4_);
- }
- bool AtEnd() const {
- // We must report iterator past the end of the range when either of the
- // component iterators has reached the end of its range.
- return
- current1_ == end1_ ||
- current2_ == end2_ ||
- current3_ == end3_ ||
- current4_ == end4_;
- }
-
- const ParamGeneratorInterface<ParamType>* const base_;
- // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
- // current[i]_ is the actual traversing iterator.
- const typename ParamGenerator<T1>::iterator begin1_;
- const typename ParamGenerator<T1>::iterator end1_;
- typename ParamGenerator<T1>::iterator current1_;
- const typename ParamGenerator<T2>::iterator begin2_;
- const typename ParamGenerator<T2>::iterator end2_;
- typename ParamGenerator<T2>::iterator current2_;
- const typename ParamGenerator<T3>::iterator begin3_;
- const typename ParamGenerator<T3>::iterator end3_;
- typename ParamGenerator<T3>::iterator current3_;
- const typename ParamGenerator<T4>::iterator begin4_;
- const typename ParamGenerator<T4>::iterator end4_;
- typename ParamGenerator<T4>::iterator current4_;
- ParamType current_value_;
- };
-
- const ParamGenerator<T1> g1_;
- const ParamGenerator<T2> g2_;
- const ParamGenerator<T3> g3_;
- const ParamGenerator<T4> g4_;
-};
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-class CartesianProductGenerator5
- : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5> > {
- public:
- typedef ::std::tr1::tuple<T1, T2, T3, T4, T5> ParamType;
-
- CartesianProductGenerator5(const ParamGenerator<T1>& g1,
- const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
- const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
- virtual ~CartesianProductGenerator5() {}
-
- virtual ParamIteratorInterface<ParamType>* Begin() const {
- return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
- g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin());
- }
- virtual ParamIteratorInterface<ParamType>* End() const {
- return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
- g4_, g4_.end(), g5_, g5_.end());
- }
-
- private:
- class Iterator : public ParamIteratorInterface<ParamType> {
- public:
- Iterator(const ParamGeneratorInterface<ParamType>* base,
- const ParamGenerator<T1>& g1,
- const typename ParamGenerator<T1>::iterator& current1,
- const ParamGenerator<T2>& g2,
- const typename ParamGenerator<T2>::iterator& current2,
- const ParamGenerator<T3>& g3,
- const typename ParamGenerator<T3>::iterator& current3,
- const ParamGenerator<T4>& g4,
- const typename ParamGenerator<T4>::iterator& current4,
- const ParamGenerator<T5>& g5,
- const typename ParamGenerator<T5>::iterator& current5)
- : base_(base),
- begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
- begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
- begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
- begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
- begin5_(g5.begin()), end5_(g5.end()), current5_(current5) {
- ComputeCurrentValue();
- }
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
- return base_;
- }
- // Advance should not be called on beyond-of-range iterators
- // so no component iterators must be beyond end of range, either.
- virtual void Advance() {
- assert(!AtEnd());
- ++current5_;
- if (current5_ == end5_) {
- current5_ = begin5_;
- ++current4_;
- }
- if (current4_ == end4_) {
- current4_ = begin4_;
- ++current3_;
- }
- if (current3_ == end3_) {
- current3_ = begin3_;
- ++current2_;
- }
- if (current2_ == end2_) {
- current2_ = begin2_;
- ++current1_;
- }
- ComputeCurrentValue();
- }
- virtual ParamIteratorInterface<ParamType>* Clone() const {
- return new Iterator(*this);
- }
- virtual const ParamType* Current() const { return ¤t_value_; }
- virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const Iterator* typed_other =
- CheckedDowncastToActualType<const Iterator>(&other);
- // We must report iterators equal if they both point beyond their
- // respective ranges. That can happen in a variety of fashions,
- // so we have to consult AtEnd().
- return (AtEnd() && typed_other->AtEnd()) ||
- (
- current1_ == typed_other->current1_ &&
- current2_ == typed_other->current2_ &&
- current3_ == typed_other->current3_ &&
- current4_ == typed_other->current4_ &&
- current5_ == typed_other->current5_);
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_),
- begin1_(other.begin1_),
- end1_(other.end1_),
- current1_(other.current1_),
- begin2_(other.begin2_),
- end2_(other.end2_),
- current2_(other.current2_),
- begin3_(other.begin3_),
- end3_(other.end3_),
- current3_(other.current3_),
- begin4_(other.begin4_),
- end4_(other.end4_),
- current4_(other.current4_),
- begin5_(other.begin5_),
- end5_(other.end5_),
- current5_(other.current5_) {
- ComputeCurrentValue();
- }
-
- void ComputeCurrentValue() {
- if (!AtEnd())
- current_value_ = ParamType(*current1_, *current2_, *current3_,
- *current4_, *current5_);
- }
- bool AtEnd() const {
- // We must report iterator past the end of the range when either of the
- // component iterators has reached the end of its range.
- return
- current1_ == end1_ ||
- current2_ == end2_ ||
- current3_ == end3_ ||
- current4_ == end4_ ||
- current5_ == end5_;
- }
-
- const ParamGeneratorInterface<ParamType>* const base_;
- // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
- // current[i]_ is the actual traversing iterator.
- const typename ParamGenerator<T1>::iterator begin1_;
- const typename ParamGenerator<T1>::iterator end1_;
- typename ParamGenerator<T1>::iterator current1_;
- const typename ParamGenerator<T2>::iterator begin2_;
- const typename ParamGenerator<T2>::iterator end2_;
- typename ParamGenerator<T2>::iterator current2_;
- const typename ParamGenerator<T3>::iterator begin3_;
- const typename ParamGenerator<T3>::iterator end3_;
- typename ParamGenerator<T3>::iterator current3_;
- const typename ParamGenerator<T4>::iterator begin4_;
- const typename ParamGenerator<T4>::iterator end4_;
- typename ParamGenerator<T4>::iterator current4_;
- const typename ParamGenerator<T5>::iterator begin5_;
- const typename ParamGenerator<T5>::iterator end5_;
- typename ParamGenerator<T5>::iterator current5_;
- ParamType current_value_;
- };
-
- const ParamGenerator<T1> g1_;
- const ParamGenerator<T2> g2_;
- const ParamGenerator<T3> g3_;
- const ParamGenerator<T4> g4_;
- const ParamGenerator<T5> g5_;
-};
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6>
-class CartesianProductGenerator6
- : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5,
- T6> > {
- public:
- typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> ParamType;
-
- CartesianProductGenerator6(const ParamGenerator<T1>& g1,
- const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
- const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
- const ParamGenerator<T6>& g6)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
- virtual ~CartesianProductGenerator6() {}
-
- virtual ParamIteratorInterface<ParamType>* Begin() const {
- return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
- g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin());
- }
- virtual ParamIteratorInterface<ParamType>* End() const {
- return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
- g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end());
- }
-
- private:
- class Iterator : public ParamIteratorInterface<ParamType> {
- public:
- Iterator(const ParamGeneratorInterface<ParamType>* base,
- const ParamGenerator<T1>& g1,
- const typename ParamGenerator<T1>::iterator& current1,
- const ParamGenerator<T2>& g2,
- const typename ParamGenerator<T2>::iterator& current2,
- const ParamGenerator<T3>& g3,
- const typename ParamGenerator<T3>::iterator& current3,
- const ParamGenerator<T4>& g4,
- const typename ParamGenerator<T4>::iterator& current4,
- const ParamGenerator<T5>& g5,
- const typename ParamGenerator<T5>::iterator& current5,
- const ParamGenerator<T6>& g6,
- const typename ParamGenerator<T6>::iterator& current6)
- : base_(base),
- begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
- begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
- begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
- begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
- begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
- begin6_(g6.begin()), end6_(g6.end()), current6_(current6) {
- ComputeCurrentValue();
- }
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
- return base_;
- }
- // Advance should not be called on beyond-of-range iterators
- // so no component iterators must be beyond end of range, either.
- virtual void Advance() {
- assert(!AtEnd());
- ++current6_;
- if (current6_ == end6_) {
- current6_ = begin6_;
- ++current5_;
- }
- if (current5_ == end5_) {
- current5_ = begin5_;
- ++current4_;
- }
- if (current4_ == end4_) {
- current4_ = begin4_;
- ++current3_;
- }
- if (current3_ == end3_) {
- current3_ = begin3_;
- ++current2_;
- }
- if (current2_ == end2_) {
- current2_ = begin2_;
- ++current1_;
- }
- ComputeCurrentValue();
- }
- virtual ParamIteratorInterface<ParamType>* Clone() const {
- return new Iterator(*this);
- }
- virtual const ParamType* Current() const { return ¤t_value_; }
- virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const Iterator* typed_other =
- CheckedDowncastToActualType<const Iterator>(&other);
- // We must report iterators equal if they both point beyond their
- // respective ranges. That can happen in a variety of fashions,
- // so we have to consult AtEnd().
- return (AtEnd() && typed_other->AtEnd()) ||
- (
- current1_ == typed_other->current1_ &&
- current2_ == typed_other->current2_ &&
- current3_ == typed_other->current3_ &&
- current4_ == typed_other->current4_ &&
- current5_ == typed_other->current5_ &&
- current6_ == typed_other->current6_);
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_),
- begin1_(other.begin1_),
- end1_(other.end1_),
- current1_(other.current1_),
- begin2_(other.begin2_),
- end2_(other.end2_),
- current2_(other.current2_),
- begin3_(other.begin3_),
- end3_(other.end3_),
- current3_(other.current3_),
- begin4_(other.begin4_),
- end4_(other.end4_),
- current4_(other.current4_),
- begin5_(other.begin5_),
- end5_(other.end5_),
- current5_(other.current5_),
- begin6_(other.begin6_),
- end6_(other.end6_),
- current6_(other.current6_) {
- ComputeCurrentValue();
- }
-
- void ComputeCurrentValue() {
- if (!AtEnd())
- current_value_ = ParamType(*current1_, *current2_, *current3_,
- *current4_, *current5_, *current6_);
- }
- bool AtEnd() const {
- // We must report iterator past the end of the range when either of the
- // component iterators has reached the end of its range.
- return
- current1_ == end1_ ||
- current2_ == end2_ ||
- current3_ == end3_ ||
- current4_ == end4_ ||
- current5_ == end5_ ||
- current6_ == end6_;
- }
-
- const ParamGeneratorInterface<ParamType>* const base_;
- // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
- // current[i]_ is the actual traversing iterator.
- const typename ParamGenerator<T1>::iterator begin1_;
- const typename ParamGenerator<T1>::iterator end1_;
- typename ParamGenerator<T1>::iterator current1_;
- const typename ParamGenerator<T2>::iterator begin2_;
- const typename ParamGenerator<T2>::iterator end2_;
- typename ParamGenerator<T2>::iterator current2_;
- const typename ParamGenerator<T3>::iterator begin3_;
- const typename ParamGenerator<T3>::iterator end3_;
- typename ParamGenerator<T3>::iterator current3_;
- const typename ParamGenerator<T4>::iterator begin4_;
- const typename ParamGenerator<T4>::iterator end4_;
- typename ParamGenerator<T4>::iterator current4_;
- const typename ParamGenerator<T5>::iterator begin5_;
- const typename ParamGenerator<T5>::iterator end5_;
- typename ParamGenerator<T5>::iterator current5_;
- const typename ParamGenerator<T6>::iterator begin6_;
- const typename ParamGenerator<T6>::iterator end6_;
- typename ParamGenerator<T6>::iterator current6_;
- ParamType current_value_;
- };
-
- const ParamGenerator<T1> g1_;
- const ParamGenerator<T2> g2_;
- const ParamGenerator<T3> g3_;
- const ParamGenerator<T4> g4_;
- const ParamGenerator<T5> g5_;
- const ParamGenerator<T6> g6_;
-};
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7>
-class CartesianProductGenerator7
- : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
- T7> > {
- public:
- typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7> ParamType;
-
- CartesianProductGenerator7(const ParamGenerator<T1>& g1,
- const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
- const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
- const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
- virtual ~CartesianProductGenerator7() {}
-
- virtual ParamIteratorInterface<ParamType>* Begin() const {
- return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
- g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
- g7_.begin());
- }
- virtual ParamIteratorInterface<ParamType>* End() const {
- return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
- g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end());
- }
-
- private:
- class Iterator : public ParamIteratorInterface<ParamType> {
- public:
- Iterator(const ParamGeneratorInterface<ParamType>* base,
- const ParamGenerator<T1>& g1,
- const typename ParamGenerator<T1>::iterator& current1,
- const ParamGenerator<T2>& g2,
- const typename ParamGenerator<T2>::iterator& current2,
- const ParamGenerator<T3>& g3,
- const typename ParamGenerator<T3>::iterator& current3,
- const ParamGenerator<T4>& g4,
- const typename ParamGenerator<T4>::iterator& current4,
- const ParamGenerator<T5>& g5,
- const typename ParamGenerator<T5>::iterator& current5,
- const ParamGenerator<T6>& g6,
- const typename ParamGenerator<T6>::iterator& current6,
- const ParamGenerator<T7>& g7,
- const typename ParamGenerator<T7>::iterator& current7)
- : base_(base),
- begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
- begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
- begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
- begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
- begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
- begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
- begin7_(g7.begin()), end7_(g7.end()), current7_(current7) {
- ComputeCurrentValue();
- }
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
- return base_;
- }
- // Advance should not be called on beyond-of-range iterators
- // so no component iterators must be beyond end of range, either.
- virtual void Advance() {
- assert(!AtEnd());
- ++current7_;
- if (current7_ == end7_) {
- current7_ = begin7_;
- ++current6_;
- }
- if (current6_ == end6_) {
- current6_ = begin6_;
- ++current5_;
- }
- if (current5_ == end5_) {
- current5_ = begin5_;
- ++current4_;
- }
- if (current4_ == end4_) {
- current4_ = begin4_;
- ++current3_;
- }
- if (current3_ == end3_) {
- current3_ = begin3_;
- ++current2_;
- }
- if (current2_ == end2_) {
- current2_ = begin2_;
- ++current1_;
- }
- ComputeCurrentValue();
- }
- virtual ParamIteratorInterface<ParamType>* Clone() const {
- return new Iterator(*this);
- }
- virtual const ParamType* Current() const { return ¤t_value_; }
- virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const Iterator* typed_other =
- CheckedDowncastToActualType<const Iterator>(&other);
- // We must report iterators equal if they both point beyond their
- // respective ranges. That can happen in a variety of fashions,
- // so we have to consult AtEnd().
- return (AtEnd() && typed_other->AtEnd()) ||
- (
- current1_ == typed_other->current1_ &&
- current2_ == typed_other->current2_ &&
- current3_ == typed_other->current3_ &&
- current4_ == typed_other->current4_ &&
- current5_ == typed_other->current5_ &&
- current6_ == typed_other->current6_ &&
- current7_ == typed_other->current7_);
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_),
- begin1_(other.begin1_),
- end1_(other.end1_),
- current1_(other.current1_),
- begin2_(other.begin2_),
- end2_(other.end2_),
- current2_(other.current2_),
- begin3_(other.begin3_),
- end3_(other.end3_),
- current3_(other.current3_),
- begin4_(other.begin4_),
- end4_(other.end4_),
- current4_(other.current4_),
- begin5_(other.begin5_),
- end5_(other.end5_),
- current5_(other.current5_),
- begin6_(other.begin6_),
- end6_(other.end6_),
- current6_(other.current6_),
- begin7_(other.begin7_),
- end7_(other.end7_),
- current7_(other.current7_) {
- ComputeCurrentValue();
- }
-
- void ComputeCurrentValue() {
- if (!AtEnd())
- current_value_ = ParamType(*current1_, *current2_, *current3_,
- *current4_, *current5_, *current6_, *current7_);
- }
- bool AtEnd() const {
- // We must report iterator past the end of the range when either of the
- // component iterators has reached the end of its range.
- return
- current1_ == end1_ ||
- current2_ == end2_ ||
- current3_ == end3_ ||
- current4_ == end4_ ||
- current5_ == end5_ ||
- current6_ == end6_ ||
- current7_ == end7_;
- }
-
- const ParamGeneratorInterface<ParamType>* const base_;
- // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
- // current[i]_ is the actual traversing iterator.
- const typename ParamGenerator<T1>::iterator begin1_;
- const typename ParamGenerator<T1>::iterator end1_;
- typename ParamGenerator<T1>::iterator current1_;
- const typename ParamGenerator<T2>::iterator begin2_;
- const typename ParamGenerator<T2>::iterator end2_;
- typename ParamGenerator<T2>::iterator current2_;
- const typename ParamGenerator<T3>::iterator begin3_;
- const typename ParamGenerator<T3>::iterator end3_;
- typename ParamGenerator<T3>::iterator current3_;
- const typename ParamGenerator<T4>::iterator begin4_;
- const typename ParamGenerator<T4>::iterator end4_;
- typename ParamGenerator<T4>::iterator current4_;
- const typename ParamGenerator<T5>::iterator begin5_;
- const typename ParamGenerator<T5>::iterator end5_;
- typename ParamGenerator<T5>::iterator current5_;
- const typename ParamGenerator<T6>::iterator begin6_;
- const typename ParamGenerator<T6>::iterator end6_;
- typename ParamGenerator<T6>::iterator current6_;
- const typename ParamGenerator<T7>::iterator begin7_;
- const typename ParamGenerator<T7>::iterator end7_;
- typename ParamGenerator<T7>::iterator current7_;
- ParamType current_value_;
- };
-
- const ParamGenerator<T1> g1_;
- const ParamGenerator<T2> g2_;
- const ParamGenerator<T3> g3_;
- const ParamGenerator<T4> g4_;
- const ParamGenerator<T5> g5_;
- const ParamGenerator<T6> g6_;
- const ParamGenerator<T7> g7_;
-};
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8>
-class CartesianProductGenerator8
- : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
- T7, T8> > {
- public:
- typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8> ParamType;
-
- CartesianProductGenerator8(const ParamGenerator<T1>& g1,
- const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
- const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
- const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
- const ParamGenerator<T8>& g8)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
- g8_(g8) {}
- virtual ~CartesianProductGenerator8() {}
-
- virtual ParamIteratorInterface<ParamType>* Begin() const {
- return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
- g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
- g7_.begin(), g8_, g8_.begin());
- }
- virtual ParamIteratorInterface<ParamType>* End() const {
- return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
- g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
- g8_.end());
- }
-
- private:
- class Iterator : public ParamIteratorInterface<ParamType> {
- public:
- Iterator(const ParamGeneratorInterface<ParamType>* base,
- const ParamGenerator<T1>& g1,
- const typename ParamGenerator<T1>::iterator& current1,
- const ParamGenerator<T2>& g2,
- const typename ParamGenerator<T2>::iterator& current2,
- const ParamGenerator<T3>& g3,
- const typename ParamGenerator<T3>::iterator& current3,
- const ParamGenerator<T4>& g4,
- const typename ParamGenerator<T4>::iterator& current4,
- const ParamGenerator<T5>& g5,
- const typename ParamGenerator<T5>::iterator& current5,
- const ParamGenerator<T6>& g6,
- const typename ParamGenerator<T6>::iterator& current6,
- const ParamGenerator<T7>& g7,
- const typename ParamGenerator<T7>::iterator& current7,
- const ParamGenerator<T8>& g8,
- const typename ParamGenerator<T8>::iterator& current8)
- : base_(base),
- begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
- begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
- begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
- begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
- begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
- begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
- begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
- begin8_(g8.begin()), end8_(g8.end()), current8_(current8) {
- ComputeCurrentValue();
- }
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
- return base_;
- }
- // Advance should not be called on beyond-of-range iterators
- // so no component iterators must be beyond end of range, either.
- virtual void Advance() {
- assert(!AtEnd());
- ++current8_;
- if (current8_ == end8_) {
- current8_ = begin8_;
- ++current7_;
- }
- if (current7_ == end7_) {
- current7_ = begin7_;
- ++current6_;
- }
- if (current6_ == end6_) {
- current6_ = begin6_;
- ++current5_;
- }
- if (current5_ == end5_) {
- current5_ = begin5_;
- ++current4_;
- }
- if (current4_ == end4_) {
- current4_ = begin4_;
- ++current3_;
- }
- if (current3_ == end3_) {
- current3_ = begin3_;
- ++current2_;
- }
- if (current2_ == end2_) {
- current2_ = begin2_;
- ++current1_;
- }
- ComputeCurrentValue();
- }
- virtual ParamIteratorInterface<ParamType>* Clone() const {
- return new Iterator(*this);
- }
- virtual const ParamType* Current() const { return ¤t_value_; }
- virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const Iterator* typed_other =
- CheckedDowncastToActualType<const Iterator>(&other);
- // We must report iterators equal if they both point beyond their
- // respective ranges. That can happen in a variety of fashions,
- // so we have to consult AtEnd().
- return (AtEnd() && typed_other->AtEnd()) ||
- (
- current1_ == typed_other->current1_ &&
- current2_ == typed_other->current2_ &&
- current3_ == typed_other->current3_ &&
- current4_ == typed_other->current4_ &&
- current5_ == typed_other->current5_ &&
- current6_ == typed_other->current6_ &&
- current7_ == typed_other->current7_ &&
- current8_ == typed_other->current8_);
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_),
- begin1_(other.begin1_),
- end1_(other.end1_),
- current1_(other.current1_),
- begin2_(other.begin2_),
- end2_(other.end2_),
- current2_(other.current2_),
- begin3_(other.begin3_),
- end3_(other.end3_),
- current3_(other.current3_),
- begin4_(other.begin4_),
- end4_(other.end4_),
- current4_(other.current4_),
- begin5_(other.begin5_),
- end5_(other.end5_),
- current5_(other.current5_),
- begin6_(other.begin6_),
- end6_(other.end6_),
- current6_(other.current6_),
- begin7_(other.begin7_),
- end7_(other.end7_),
- current7_(other.current7_),
- begin8_(other.begin8_),
- end8_(other.end8_),
- current8_(other.current8_) {
- ComputeCurrentValue();
- }
-
- void ComputeCurrentValue() {
- if (!AtEnd())
- current_value_ = ParamType(*current1_, *current2_, *current3_,
- *current4_, *current5_, *current6_, *current7_, *current8_);
- }
- bool AtEnd() const {
- // We must report iterator past the end of the range when either of the
- // component iterators has reached the end of its range.
- return
- current1_ == end1_ ||
- current2_ == end2_ ||
- current3_ == end3_ ||
- current4_ == end4_ ||
- current5_ == end5_ ||
- current6_ == end6_ ||
- current7_ == end7_ ||
- current8_ == end8_;
- }
-
- const ParamGeneratorInterface<ParamType>* const base_;
- // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
- // current[i]_ is the actual traversing iterator.
- const typename ParamGenerator<T1>::iterator begin1_;
- const typename ParamGenerator<T1>::iterator end1_;
- typename ParamGenerator<T1>::iterator current1_;
- const typename ParamGenerator<T2>::iterator begin2_;
- const typename ParamGenerator<T2>::iterator end2_;
- typename ParamGenerator<T2>::iterator current2_;
- const typename ParamGenerator<T3>::iterator begin3_;
- const typename ParamGenerator<T3>::iterator end3_;
- typename ParamGenerator<T3>::iterator current3_;
- const typename ParamGenerator<T4>::iterator begin4_;
- const typename ParamGenerator<T4>::iterator end4_;
- typename ParamGenerator<T4>::iterator current4_;
- const typename ParamGenerator<T5>::iterator begin5_;
- const typename ParamGenerator<T5>::iterator end5_;
- typename ParamGenerator<T5>::iterator current5_;
- const typename ParamGenerator<T6>::iterator begin6_;
- const typename ParamGenerator<T6>::iterator end6_;
- typename ParamGenerator<T6>::iterator current6_;
- const typename ParamGenerator<T7>::iterator begin7_;
- const typename ParamGenerator<T7>::iterator end7_;
- typename ParamGenerator<T7>::iterator current7_;
- const typename ParamGenerator<T8>::iterator begin8_;
- const typename ParamGenerator<T8>::iterator end8_;
- typename ParamGenerator<T8>::iterator current8_;
- ParamType current_value_;
- };
-
- const ParamGenerator<T1> g1_;
- const ParamGenerator<T2> g2_;
- const ParamGenerator<T3> g3_;
- const ParamGenerator<T4> g4_;
- const ParamGenerator<T5> g5_;
- const ParamGenerator<T6> g6_;
- const ParamGenerator<T7> g7_;
- const ParamGenerator<T8> g8_;
-};
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9>
-class CartesianProductGenerator9
- : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
- T7, T8, T9> > {
- public:
- typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9> ParamType;
-
- CartesianProductGenerator9(const ParamGenerator<T1>& g1,
- const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
- const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
- const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
- const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
- g9_(g9) {}
- virtual ~CartesianProductGenerator9() {}
-
- virtual ParamIteratorInterface<ParamType>* Begin() const {
- return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
- g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
- g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin());
- }
- virtual ParamIteratorInterface<ParamType>* End() const {
- return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
- g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
- g8_.end(), g9_, g9_.end());
- }
-
- private:
- class Iterator : public ParamIteratorInterface<ParamType> {
- public:
- Iterator(const ParamGeneratorInterface<ParamType>* base,
- const ParamGenerator<T1>& g1,
- const typename ParamGenerator<T1>::iterator& current1,
- const ParamGenerator<T2>& g2,
- const typename ParamGenerator<T2>::iterator& current2,
- const ParamGenerator<T3>& g3,
- const typename ParamGenerator<T3>::iterator& current3,
- const ParamGenerator<T4>& g4,
- const typename ParamGenerator<T4>::iterator& current4,
- const ParamGenerator<T5>& g5,
- const typename ParamGenerator<T5>::iterator& current5,
- const ParamGenerator<T6>& g6,
- const typename ParamGenerator<T6>::iterator& current6,
- const ParamGenerator<T7>& g7,
- const typename ParamGenerator<T7>::iterator& current7,
- const ParamGenerator<T8>& g8,
- const typename ParamGenerator<T8>::iterator& current8,
- const ParamGenerator<T9>& g9,
- const typename ParamGenerator<T9>::iterator& current9)
- : base_(base),
- begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
- begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
- begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
- begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
- begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
- begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
- begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
- begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
- begin9_(g9.begin()), end9_(g9.end()), current9_(current9) {
- ComputeCurrentValue();
- }
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
- return base_;
- }
- // Advance should not be called on beyond-of-range iterators
- // so no component iterators must be beyond end of range, either.
- virtual void Advance() {
- assert(!AtEnd());
- ++current9_;
- if (current9_ == end9_) {
- current9_ = begin9_;
- ++current8_;
- }
- if (current8_ == end8_) {
- current8_ = begin8_;
- ++current7_;
- }
- if (current7_ == end7_) {
- current7_ = begin7_;
- ++current6_;
- }
- if (current6_ == end6_) {
- current6_ = begin6_;
- ++current5_;
- }
- if (current5_ == end5_) {
- current5_ = begin5_;
- ++current4_;
- }
- if (current4_ == end4_) {
- current4_ = begin4_;
- ++current3_;
- }
- if (current3_ == end3_) {
- current3_ = begin3_;
- ++current2_;
- }
- if (current2_ == end2_) {
- current2_ = begin2_;
- ++current1_;
- }
- ComputeCurrentValue();
- }
- virtual ParamIteratorInterface<ParamType>* Clone() const {
- return new Iterator(*this);
- }
- virtual const ParamType* Current() const { return ¤t_value_; }
- virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const Iterator* typed_other =
- CheckedDowncastToActualType<const Iterator>(&other);
- // We must report iterators equal if they both point beyond their
- // respective ranges. That can happen in a variety of fashions,
- // so we have to consult AtEnd().
- return (AtEnd() && typed_other->AtEnd()) ||
- (
- current1_ == typed_other->current1_ &&
- current2_ == typed_other->current2_ &&
- current3_ == typed_other->current3_ &&
- current4_ == typed_other->current4_ &&
- current5_ == typed_other->current5_ &&
- current6_ == typed_other->current6_ &&
- current7_ == typed_other->current7_ &&
- current8_ == typed_other->current8_ &&
- current9_ == typed_other->current9_);
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_),
- begin1_(other.begin1_),
- end1_(other.end1_),
- current1_(other.current1_),
- begin2_(other.begin2_),
- end2_(other.end2_),
- current2_(other.current2_),
- begin3_(other.begin3_),
- end3_(other.end3_),
- current3_(other.current3_),
- begin4_(other.begin4_),
- end4_(other.end4_),
- current4_(other.current4_),
- begin5_(other.begin5_),
- end5_(other.end5_),
- current5_(other.current5_),
- begin6_(other.begin6_),
- end6_(other.end6_),
- current6_(other.current6_),
- begin7_(other.begin7_),
- end7_(other.end7_),
- current7_(other.current7_),
- begin8_(other.begin8_),
- end8_(other.end8_),
- current8_(other.current8_),
- begin9_(other.begin9_),
- end9_(other.end9_),
- current9_(other.current9_) {
- ComputeCurrentValue();
- }
-
- void ComputeCurrentValue() {
- if (!AtEnd())
- current_value_ = ParamType(*current1_, *current2_, *current3_,
- *current4_, *current5_, *current6_, *current7_, *current8_,
- *current9_);
- }
- bool AtEnd() const {
- // We must report iterator past the end of the range when either of the
- // component iterators has reached the end of its range.
- return
- current1_ == end1_ ||
- current2_ == end2_ ||
- current3_ == end3_ ||
- current4_ == end4_ ||
- current5_ == end5_ ||
- current6_ == end6_ ||
- current7_ == end7_ ||
- current8_ == end8_ ||
- current9_ == end9_;
- }
-
- const ParamGeneratorInterface<ParamType>* const base_;
- // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
- // current[i]_ is the actual traversing iterator.
- const typename ParamGenerator<T1>::iterator begin1_;
- const typename ParamGenerator<T1>::iterator end1_;
- typename ParamGenerator<T1>::iterator current1_;
- const typename ParamGenerator<T2>::iterator begin2_;
- const typename ParamGenerator<T2>::iterator end2_;
- typename ParamGenerator<T2>::iterator current2_;
- const typename ParamGenerator<T3>::iterator begin3_;
- const typename ParamGenerator<T3>::iterator end3_;
- typename ParamGenerator<T3>::iterator current3_;
- const typename ParamGenerator<T4>::iterator begin4_;
- const typename ParamGenerator<T4>::iterator end4_;
- typename ParamGenerator<T4>::iterator current4_;
- const typename ParamGenerator<T5>::iterator begin5_;
- const typename ParamGenerator<T5>::iterator end5_;
- typename ParamGenerator<T5>::iterator current5_;
- const typename ParamGenerator<T6>::iterator begin6_;
- const typename ParamGenerator<T6>::iterator end6_;
- typename ParamGenerator<T6>::iterator current6_;
- const typename ParamGenerator<T7>::iterator begin7_;
- const typename ParamGenerator<T7>::iterator end7_;
- typename ParamGenerator<T7>::iterator current7_;
- const typename ParamGenerator<T8>::iterator begin8_;
- const typename ParamGenerator<T8>::iterator end8_;
- typename ParamGenerator<T8>::iterator current8_;
- const typename ParamGenerator<T9>::iterator begin9_;
- const typename ParamGenerator<T9>::iterator end9_;
- typename ParamGenerator<T9>::iterator current9_;
- ParamType current_value_;
- };
-
- const ParamGenerator<T1> g1_;
- const ParamGenerator<T2> g2_;
- const ParamGenerator<T3> g3_;
- const ParamGenerator<T4> g4_;
- const ParamGenerator<T5> g5_;
- const ParamGenerator<T6> g6_;
- const ParamGenerator<T7> g7_;
- const ParamGenerator<T8> g8_;
- const ParamGenerator<T9> g9_;
-};
-
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10>
-class CartesianProductGenerator10
- : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
- T7, T8, T9, T10> > {
- public:
- typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ParamType;
-
- CartesianProductGenerator10(const ParamGenerator<T1>& g1,
- const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
- const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
- const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
- const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9,
- const ParamGenerator<T10>& g10)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
- g9_(g9), g10_(g10) {}
- virtual ~CartesianProductGenerator10() {}
-
- virtual ParamIteratorInterface<ParamType>* Begin() const {
- return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
- g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
- g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin());
- }
- virtual ParamIteratorInterface<ParamType>* End() const {
- return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
- g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
- g8_.end(), g9_, g9_.end(), g10_, g10_.end());
- }
-
- private:
- class Iterator : public ParamIteratorInterface<ParamType> {
- public:
- Iterator(const ParamGeneratorInterface<ParamType>* base,
- const ParamGenerator<T1>& g1,
- const typename ParamGenerator<T1>::iterator& current1,
- const ParamGenerator<T2>& g2,
- const typename ParamGenerator<T2>::iterator& current2,
- const ParamGenerator<T3>& g3,
- const typename ParamGenerator<T3>::iterator& current3,
- const ParamGenerator<T4>& g4,
- const typename ParamGenerator<T4>::iterator& current4,
- const ParamGenerator<T5>& g5,
- const typename ParamGenerator<T5>::iterator& current5,
- const ParamGenerator<T6>& g6,
- const typename ParamGenerator<T6>::iterator& current6,
- const ParamGenerator<T7>& g7,
- const typename ParamGenerator<T7>::iterator& current7,
- const ParamGenerator<T8>& g8,
- const typename ParamGenerator<T8>::iterator& current8,
- const ParamGenerator<T9>& g9,
- const typename ParamGenerator<T9>::iterator& current9,
- const ParamGenerator<T10>& g10,
- const typename ParamGenerator<T10>::iterator& current10)
- : base_(base),
- begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
- begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
- begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
- begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
- begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
- begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
- begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
- begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
- begin9_(g9.begin()), end9_(g9.end()), current9_(current9),
- begin10_(g10.begin()), end10_(g10.end()), current10_(current10) {
- ComputeCurrentValue();
- }
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
- return base_;
- }
- // Advance should not be called on beyond-of-range iterators
- // so no component iterators must be beyond end of range, either.
- virtual void Advance() {
- assert(!AtEnd());
- ++current10_;
- if (current10_ == end10_) {
- current10_ = begin10_;
- ++current9_;
- }
- if (current9_ == end9_) {
- current9_ = begin9_;
- ++current8_;
- }
- if (current8_ == end8_) {
- current8_ = begin8_;
- ++current7_;
- }
- if (current7_ == end7_) {
- current7_ = begin7_;
- ++current6_;
- }
- if (current6_ == end6_) {
- current6_ = begin6_;
- ++current5_;
- }
- if (current5_ == end5_) {
- current5_ = begin5_;
- ++current4_;
- }
- if (current4_ == end4_) {
- current4_ = begin4_;
- ++current3_;
- }
- if (current3_ == end3_) {
- current3_ = begin3_;
- ++current2_;
- }
- if (current2_ == end2_) {
- current2_ = begin2_;
- ++current1_;
- }
- ComputeCurrentValue();
- }
- virtual ParamIteratorInterface<ParamType>* Clone() const {
- return new Iterator(*this);
- }
- virtual const ParamType* Current() const { return ¤t_value_; }
- virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const Iterator* typed_other =
- CheckedDowncastToActualType<const Iterator>(&other);
- // We must report iterators equal if they both point beyond their
- // respective ranges. That can happen in a variety of fashions,
- // so we have to consult AtEnd().
- return (AtEnd() && typed_other->AtEnd()) ||
- (
- current1_ == typed_other->current1_ &&
- current2_ == typed_other->current2_ &&
- current3_ == typed_other->current3_ &&
- current4_ == typed_other->current4_ &&
- current5_ == typed_other->current5_ &&
- current6_ == typed_other->current6_ &&
- current7_ == typed_other->current7_ &&
- current8_ == typed_other->current8_ &&
- current9_ == typed_other->current9_ &&
- current10_ == typed_other->current10_);
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_),
- begin1_(other.begin1_),
- end1_(other.end1_),
- current1_(other.current1_),
- begin2_(other.begin2_),
- end2_(other.end2_),
- current2_(other.current2_),
- begin3_(other.begin3_),
- end3_(other.end3_),
- current3_(other.current3_),
- begin4_(other.begin4_),
- end4_(other.end4_),
- current4_(other.current4_),
- begin5_(other.begin5_),
- end5_(other.end5_),
- current5_(other.current5_),
- begin6_(other.begin6_),
- end6_(other.end6_),
- current6_(other.current6_),
- begin7_(other.begin7_),
- end7_(other.end7_),
- current7_(other.current7_),
- begin8_(other.begin8_),
- end8_(other.end8_),
- current8_(other.current8_),
- begin9_(other.begin9_),
- end9_(other.end9_),
- current9_(other.current9_),
- begin10_(other.begin10_),
- end10_(other.end10_),
- current10_(other.current10_) {
- ComputeCurrentValue();
- }
-
- void ComputeCurrentValue() {
- if (!AtEnd())
- current_value_ = ParamType(*current1_, *current2_, *current3_,
- *current4_, *current5_, *current6_, *current7_, *current8_,
- *current9_, *current10_);
- }
- bool AtEnd() const {
- // We must report iterator past the end of the range when either of the
- // component iterators has reached the end of its range.
- return
- current1_ == end1_ ||
- current2_ == end2_ ||
- current3_ == end3_ ||
- current4_ == end4_ ||
- current5_ == end5_ ||
- current6_ == end6_ ||
- current7_ == end7_ ||
- current8_ == end8_ ||
- current9_ == end9_ ||
- current10_ == end10_;
- }
-
- const ParamGeneratorInterface<ParamType>* const base_;
- // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
- // current[i]_ is the actual traversing iterator.
- const typename ParamGenerator<T1>::iterator begin1_;
- const typename ParamGenerator<T1>::iterator end1_;
- typename ParamGenerator<T1>::iterator current1_;
- const typename ParamGenerator<T2>::iterator begin2_;
- const typename ParamGenerator<T2>::iterator end2_;
- typename ParamGenerator<T2>::iterator current2_;
- const typename ParamGenerator<T3>::iterator begin3_;
- const typename ParamGenerator<T3>::iterator end3_;
- typename ParamGenerator<T3>::iterator current3_;
- const typename ParamGenerator<T4>::iterator begin4_;
- const typename ParamGenerator<T4>::iterator end4_;
- typename ParamGenerator<T4>::iterator current4_;
- const typename ParamGenerator<T5>::iterator begin5_;
- const typename ParamGenerator<T5>::iterator end5_;
- typename ParamGenerator<T5>::iterator current5_;
- const typename ParamGenerator<T6>::iterator begin6_;
- const typename ParamGenerator<T6>::iterator end6_;
- typename ParamGenerator<T6>::iterator current6_;
- const typename ParamGenerator<T7>::iterator begin7_;
- const typename ParamGenerator<T7>::iterator end7_;
- typename ParamGenerator<T7>::iterator current7_;
- const typename ParamGenerator<T8>::iterator begin8_;
- const typename ParamGenerator<T8>::iterator end8_;
- typename ParamGenerator<T8>::iterator current8_;
- const typename ParamGenerator<T9>::iterator begin9_;
- const typename ParamGenerator<T9>::iterator end9_;
- typename ParamGenerator<T9>::iterator current9_;
- const typename ParamGenerator<T10>::iterator begin10_;
- const typename ParamGenerator<T10>::iterator end10_;
- typename ParamGenerator<T10>::iterator current10_;
- ParamType current_value_;
- };
-
- const ParamGenerator<T1> g1_;
- const ParamGenerator<T2> g2_;
- const ParamGenerator<T3> g3_;
- const ParamGenerator<T4> g4_;
- const ParamGenerator<T5> g5_;
- const ParamGenerator<T6> g6_;
- const ParamGenerator<T7> g7_;
- const ParamGenerator<T8> g8_;
- const ParamGenerator<T9> g9_;
- const ParamGenerator<T10> g10_;
-};
-
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Helper classes providing Combine() with polymorphic features. They allow
-// casting CartesianProductGeneratorN<T> to ParamGenerator<U> if T is
-// convertible to U.
-//
-template <class Generator1, class Generator2>
-class CartesianProductHolder2 {
- public:
-CartesianProductHolder2(const Generator1& g1, const Generator2& g2)
- : g1_(g1), g2_(g2) {}
- template <typename T1, typename T2>
- operator ParamGenerator< ::std::tr1::tuple<T1, T2> >() const {
- return ParamGenerator< ::std::tr1::tuple<T1, T2> >(
- new CartesianProductGenerator2<T1, T2>(
- static_cast<ParamGenerator<T1> >(g1_),
- static_cast<ParamGenerator<T2> >(g2_)));
- }
-
- private:
- const Generator1 g1_;
- const Generator2 g2_;
-};
-
-template <class Generator1, class Generator2, class Generator3>
-class CartesianProductHolder3 {
- public:
-CartesianProductHolder3(const Generator1& g1, const Generator2& g2,
- const Generator3& g3)
- : g1_(g1), g2_(g2), g3_(g3) {}
- template <typename T1, typename T2, typename T3>
- operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3> >() const {
- return ParamGenerator< ::std::tr1::tuple<T1, T2, T3> >(
- new CartesianProductGenerator3<T1, T2, T3>(
- static_cast<ParamGenerator<T1> >(g1_),
- static_cast<ParamGenerator<T2> >(g2_),
- static_cast<ParamGenerator<T3> >(g3_)));
- }
-
- private:
- const Generator1 g1_;
- const Generator2 g2_;
- const Generator3 g3_;
-};
-
-template <class Generator1, class Generator2, class Generator3,
- class Generator4>
-class CartesianProductHolder4 {
- public:
-CartesianProductHolder4(const Generator1& g1, const Generator2& g2,
- const Generator3& g3, const Generator4& g4)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
- template <typename T1, typename T2, typename T3, typename T4>
- operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4> >() const {
- return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4> >(
- new CartesianProductGenerator4<T1, T2, T3, T4>(
- static_cast<ParamGenerator<T1> >(g1_),
- static_cast<ParamGenerator<T2> >(g2_),
- static_cast<ParamGenerator<T3> >(g3_),
- static_cast<ParamGenerator<T4> >(g4_)));
- }
-
- private:
- const Generator1 g1_;
- const Generator2 g2_;
- const Generator3 g3_;
- const Generator4 g4_;
-};
-
-template <class Generator1, class Generator2, class Generator3,
- class Generator4, class Generator5>
-class CartesianProductHolder5 {
- public:
-CartesianProductHolder5(const Generator1& g1, const Generator2& g2,
- const Generator3& g3, const Generator4& g4, const Generator5& g5)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
- template <typename T1, typename T2, typename T3, typename T4, typename T5>
- operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5> >() const {
- return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5> >(
- new CartesianProductGenerator5<T1, T2, T3, T4, T5>(
- static_cast<ParamGenerator<T1> >(g1_),
- static_cast<ParamGenerator<T2> >(g2_),
- static_cast<ParamGenerator<T3> >(g3_),
- static_cast<ParamGenerator<T4> >(g4_),
- static_cast<ParamGenerator<T5> >(g5_)));
- }
-
- private:
- const Generator1 g1_;
- const Generator2 g2_;
- const Generator3 g3_;
- const Generator4 g4_;
- const Generator5 g5_;
-};
-
-template <class Generator1, class Generator2, class Generator3,
- class Generator4, class Generator5, class Generator6>
-class CartesianProductHolder6 {
- public:
-CartesianProductHolder6(const Generator1& g1, const Generator2& g2,
- const Generator3& g3, const Generator4& g4, const Generator5& g5,
- const Generator6& g6)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6>
- operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> >() const {
- return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> >(
- new CartesianProductGenerator6<T1, T2, T3, T4, T5, T6>(
- static_cast<ParamGenerator<T1> >(g1_),
- static_cast<ParamGenerator<T2> >(g2_),
- static_cast<ParamGenerator<T3> >(g3_),
- static_cast<ParamGenerator<T4> >(g4_),
- static_cast<ParamGenerator<T5> >(g5_),
- static_cast<ParamGenerator<T6> >(g6_)));
- }
-
- private:
- const Generator1 g1_;
- const Generator2 g2_;
- const Generator3 g3_;
- const Generator4 g4_;
- const Generator5 g5_;
- const Generator6 g6_;
-};
-
-template <class Generator1, class Generator2, class Generator3,
- class Generator4, class Generator5, class Generator6, class Generator7>
-class CartesianProductHolder7 {
- public:
-CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
- const Generator3& g3, const Generator4& g4, const Generator5& g5,
- const Generator6& g6, const Generator7& g7)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7>
- operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
- T7> >() const {
- return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7> >(
- new CartesianProductGenerator7<T1, T2, T3, T4, T5, T6, T7>(
- static_cast<ParamGenerator<T1> >(g1_),
- static_cast<ParamGenerator<T2> >(g2_),
- static_cast<ParamGenerator<T3> >(g3_),
- static_cast<ParamGenerator<T4> >(g4_),
- static_cast<ParamGenerator<T5> >(g5_),
- static_cast<ParamGenerator<T6> >(g6_),
- static_cast<ParamGenerator<T7> >(g7_)));
- }
-
- private:
- const Generator1 g1_;
- const Generator2 g2_;
- const Generator3 g3_;
- const Generator4 g4_;
- const Generator5 g5_;
- const Generator6 g6_;
- const Generator7 g7_;
-};
-
-template <class Generator1, class Generator2, class Generator3,
- class Generator4, class Generator5, class Generator6, class Generator7,
- class Generator8>
-class CartesianProductHolder8 {
- public:
-CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
- const Generator3& g3, const Generator4& g4, const Generator5& g5,
- const Generator6& g6, const Generator7& g7, const Generator8& g8)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
- g8_(g8) {}
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8>
- operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7,
- T8> >() const {
- return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8> >(
- new CartesianProductGenerator8<T1, T2, T3, T4, T5, T6, T7, T8>(
- static_cast<ParamGenerator<T1> >(g1_),
- static_cast<ParamGenerator<T2> >(g2_),
- static_cast<ParamGenerator<T3> >(g3_),
- static_cast<ParamGenerator<T4> >(g4_),
- static_cast<ParamGenerator<T5> >(g5_),
- static_cast<ParamGenerator<T6> >(g6_),
- static_cast<ParamGenerator<T7> >(g7_),
- static_cast<ParamGenerator<T8> >(g8_)));
- }
-
- private:
- const Generator1 g1_;
- const Generator2 g2_;
- const Generator3 g3_;
- const Generator4 g4_;
- const Generator5 g5_;
- const Generator6 g6_;
- const Generator7 g7_;
- const Generator8 g8_;
-};
-
-template <class Generator1, class Generator2, class Generator3,
- class Generator4, class Generator5, class Generator6, class Generator7,
- class Generator8, class Generator9>
-class CartesianProductHolder9 {
- public:
-CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
- const Generator3& g3, const Generator4& g4, const Generator5& g5,
- const Generator6& g6, const Generator7& g7, const Generator8& g8,
- const Generator9& g9)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
- g9_(g9) {}
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9>
- operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
- T9> >() const {
- return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
- T9> >(
- new CartesianProductGenerator9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(
- static_cast<ParamGenerator<T1> >(g1_),
- static_cast<ParamGenerator<T2> >(g2_),
- static_cast<ParamGenerator<T3> >(g3_),
- static_cast<ParamGenerator<T4> >(g4_),
- static_cast<ParamGenerator<T5> >(g5_),
- static_cast<ParamGenerator<T6> >(g6_),
- static_cast<ParamGenerator<T7> >(g7_),
- static_cast<ParamGenerator<T8> >(g8_),
- static_cast<ParamGenerator<T9> >(g9_)));
- }
-
- private:
- const Generator1 g1_;
- const Generator2 g2_;
- const Generator3 g3_;
- const Generator4 g4_;
- const Generator5 g5_;
- const Generator6 g6_;
- const Generator7 g7_;
- const Generator8 g8_;
- const Generator9 g9_;
-};
-
-template <class Generator1, class Generator2, class Generator3,
- class Generator4, class Generator5, class Generator6, class Generator7,
- class Generator8, class Generator9, class Generator10>
-class CartesianProductHolder10 {
- public:
-CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
- const Generator3& g3, const Generator4& g4, const Generator5& g5,
- const Generator6& g6, const Generator7& g7, const Generator8& g8,
- const Generator9& g9, const Generator10& g10)
- : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
- g9_(g9), g10_(g10) {}
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10>
- operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
- T9, T10> >() const {
- return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
- T9, T10> >(
- new CartesianProductGenerator10<T1, T2, T3, T4, T5, T6, T7, T8, T9,
- T10>(
- static_cast<ParamGenerator<T1> >(g1_),
- static_cast<ParamGenerator<T2> >(g2_),
- static_cast<ParamGenerator<T3> >(g3_),
- static_cast<ParamGenerator<T4> >(g4_),
- static_cast<ParamGenerator<T5> >(g5_),
- static_cast<ParamGenerator<T6> >(g6_),
- static_cast<ParamGenerator<T7> >(g7_),
- static_cast<ParamGenerator<T8> >(g8_),
- static_cast<ParamGenerator<T9> >(g9_),
- static_cast<ParamGenerator<T10> >(g10_)));
- }
-
- private:
- const Generator1 g1_;
- const Generator2 g2_;
- const Generator3 g3_;
- const Generator4 g4_;
- const Generator5 g5_;
- const Generator6 g6_;
- const Generator7 g7_;
- const Generator8 g8_;
- const Generator9 g9_;
- const Generator10 g10_;
-};
-
-#endif // GTEST_HAS_COMBINE
-
-} // namespace internal
-} // namespace testing
-
-#endif // GTEST_HAS_PARAM_TEST
-
-#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util.h
deleted file mode 100644
index 3bb07ec..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util.h
+++ /dev/null
@@ -1,629 +0,0 @@
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: vladl at google.com (Vlad Losev)
-
-// Type and function utilities for implementing parameterized tests.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
-
-#include <iterator>
-#include <utility>
-#include <vector>
-
-#include <gtest/internal/gtest-port.h>
-
-#ifdef GTEST_HAS_PARAM_TEST
-
-#if GTEST_HAS_RTTI
-#include <typeinfo>
-#endif // GTEST_HAS_RTTI
-
-#include <gtest/internal/gtest-linked_ptr.h>
-#include <gtest/internal/gtest-internal.h>
-
-namespace testing {
-namespace internal {
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Outputs a message explaining invalid registration of different
-// fixture class for the same test case. This may happen when
-// TEST_P macro is used to define two tests with the same name
-// but in different namespaces.
-void ReportInvalidTestCaseType(const char* test_case_name,
- const char* file, int line);
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Downcasts the pointer of type Base to Derived.
-// Derived must be a subclass of Base. The parameter MUST
-// point to a class of type Derived, not any subclass of it.
-// When RTTI is available, the function performs a runtime
-// check to enforce this.
-template <class Derived, class Base>
-Derived* CheckedDowncastToActualType(Base* base) {
-#if GTEST_HAS_RTTI
- GTEST_CHECK_(typeid(*base) == typeid(Derived));
- Derived* derived = dynamic_cast<Derived*>(base); // NOLINT
-#else
- Derived* derived = static_cast<Derived*>(base); // Poor man's downcast.
-#endif // GTEST_HAS_RTTI
- return derived;
-}
-
-template <typename> class ParamGeneratorInterface;
-template <typename> class ParamGenerator;
-
-// Interface for iterating over elements provided by an implementation
-// of ParamGeneratorInterface<T>.
-template <typename T>
-class ParamIteratorInterface {
- public:
- virtual ~ParamIteratorInterface() {}
- // A pointer to the base generator instance.
- // Used only for the purposes of iterator comparison
- // to make sure that two iterators belong to the same generator.
- virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;
- // Advances iterator to point to the next element
- // provided by the generator. The caller is responsible
- // for not calling Advance() on an iterator equal to
- // BaseGenerator()->End().
- virtual void Advance() = 0;
- // Clones the iterator object. Used for implementing copy semantics
- // of ParamIterator<T>.
- virtual ParamIteratorInterface* Clone() const = 0;
- // Dereferences the current iterator and provides (read-only) access
- // to the pointed value. It is the caller's responsibility not to call
- // Current() on an iterator equal to BaseGenerator()->End().
- // Used for implementing ParamGenerator<T>::operator*().
- virtual const T* Current() const = 0;
- // Determines whether the given iterator and other point to the same
- // element in the sequence generated by the generator.
- // Used for implementing ParamGenerator<T>::operator==().
- virtual bool Equals(const ParamIteratorInterface& other) const = 0;
-};
-
-// Class iterating over elements provided by an implementation of
-// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>
-// and implements the const forward iterator concept.
-template <typename T>
-class ParamIterator {
- public:
- typedef T value_type;
- typedef const T& reference;
- typedef ptrdiff_t difference_type;
-
- // ParamIterator assumes ownership of the impl_ pointer.
- ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}
- ParamIterator& operator=(const ParamIterator& other) {
- if (this != &other)
- impl_.reset(other.impl_->Clone());
- return *this;
- }
-
- const T& operator*() const { return *impl_->Current(); }
- const T* operator->() const { return impl_->Current(); }
- // Prefix version of operator++.
- ParamIterator& operator++() {
- impl_->Advance();
- return *this;
- }
- // Postfix version of operator++.
- ParamIterator operator++(int /*unused*/) {
- ParamIteratorInterface<T>* clone = impl_->Clone();
- impl_->Advance();
- return ParamIterator(clone);
- }
- bool operator==(const ParamIterator& other) const {
- return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);
- }
- bool operator!=(const ParamIterator& other) const {
- return !(*this == other);
- }
-
- private:
- friend class ParamGenerator<T>;
- explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}
- scoped_ptr<ParamIteratorInterface<T> > impl_;
-};
-
-// ParamGeneratorInterface<T> is the binary interface to access generators
-// defined in other translation units.
-template <typename T>
-class ParamGeneratorInterface {
- public:
- typedef T ParamType;
-
- virtual ~ParamGeneratorInterface() {}
-
- // Generator interface definition
- virtual ParamIteratorInterface<T>* Begin() const = 0;
- virtual ParamIteratorInterface<T>* End() const = 0;
-};
-
-// Wraps ParamGeneratorInetrface<T> and provides general generator syntax
-// compatible with the STL Container concept.
-// This class implements copy initialization semantics and the contained
-// ParamGeneratorInterface<T> instance is shared among all copies
-// of the original object. This is possible because that instance is immutable.
-template<typename T>
-class ParamGenerator {
- public:
- typedef ParamIterator<T> iterator;
-
- explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}
- ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}
-
- ParamGenerator& operator=(const ParamGenerator& other) {
- impl_ = other.impl_;
- return *this;
- }
-
- iterator begin() const { return iterator(impl_->Begin()); }
- iterator end() const { return iterator(impl_->End()); }
-
- private:
- ::testing::internal::linked_ptr<const ParamGeneratorInterface<T> > impl_;
-};
-
-// Generates values from a range of two comparable values. Can be used to
-// generate sequences of user-defined types that implement operator+() and
-// operator<().
-// This class is used in the Range() function.
-template <typename T, typename IncrementT>
-class RangeGenerator : public ParamGeneratorInterface<T> {
- public:
- RangeGenerator(T begin, T end, IncrementT step)
- : begin_(begin), end_(end),
- step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}
- virtual ~RangeGenerator() {}
-
- virtual ParamIteratorInterface<T>* Begin() const {
- return new Iterator(this, begin_, 0, step_);
- }
- virtual ParamIteratorInterface<T>* End() const {
- return new Iterator(this, end_, end_index_, step_);
- }
-
- private:
- class Iterator : public ParamIteratorInterface<T> {
- public:
- Iterator(const ParamGeneratorInterface<T>* base, T value, int index,
- IncrementT step)
- : base_(base), value_(value), index_(index), step_(step) {}
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
- return base_;
- }
- virtual void Advance() {
- value_ = value_ + step_;
- index_++;
- }
- virtual ParamIteratorInterface<T>* Clone() const {
- return new Iterator(*this);
- }
- virtual const T* Current() const { return &value_; }
- virtual bool Equals(const ParamIteratorInterface<T>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- const int other_index =
- CheckedDowncastToActualType<const Iterator>(&other)->index_;
- return index_ == other_index;
- }
-
- private:
- Iterator(const Iterator& other)
- : base_(other.base_), value_(other.value_), index_(other.index_),
- step_(other.step_) {}
-
- const ParamGeneratorInterface<T>* const base_;
- T value_;
- int index_;
- const IncrementT step_;
- }; // class RangeGenerator::Iterator
-
- static int CalculateEndIndex(const T& begin,
- const T& end,
- const IncrementT& step) {
- int end_index = 0;
- for (T i = begin; i < end; i = i + step)
- end_index++;
- return end_index;
- }
-
- const T begin_;
- const T end_;
- const IncrementT step_;
- // The index for the end() iterator. All the elements in the generated
- // sequence are indexed (0-based) to aid iterator comparison.
- const int end_index_;
-}; // class RangeGenerator
-
-
-// Generates values from a pair of STL-style iterators. Used in the
-// ValuesIn() function. The elements are copied from the source range
-// since the source can be located on the stack, and the generator
-// is likely to persist beyond that stack frame.
-template <typename T>
-class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
- public:
- template <typename ForwardIterator>
- ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)
- : container_(begin, end) {}
- virtual ~ValuesInIteratorRangeGenerator() {}
-
- virtual ParamIteratorInterface<T>* Begin() const {
- return new Iterator(this, container_.begin());
- }
- virtual ParamIteratorInterface<T>* End() const {
- return new Iterator(this, container_.end());
- }
-
- private:
- typedef typename ::std::vector<T> ContainerType;
-
- class Iterator : public ParamIteratorInterface<T> {
- public:
- Iterator(const ParamGeneratorInterface<T>* base,
- typename ContainerType::const_iterator iterator)
- : base_(base), iterator_(iterator) {}
- virtual ~Iterator() {}
-
- virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
- return base_;
- }
- virtual void Advance() {
- ++iterator_;
- value_.reset();
- }
- virtual ParamIteratorInterface<T>* Clone() const {
- return new Iterator(*this);
- }
- // We need to use cached value referenced by iterator_ because *iterator_
- // can return a temporary object (and of type other then T), so just
- // having "return &*iterator_;" doesn't work.
- // value_ is updated here and not in Advance() because Advance()
- // can advance iterator_ beyond the end of the range, and we cannot
- // detect that fact. The client code, on the other hand, is
- // responsible for not calling Current() on an out-of-range iterator.
- virtual const T* Current() const {
- if (value_.get() == NULL)
- value_.reset(new T(*iterator_));
- return value_.get();
- }
- virtual bool Equals(const ParamIteratorInterface<T>& other) const {
- // Having the same base generator guarantees that the other
- // iterator is of the same type and we can downcast.
- GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
- << "The program attempted to compare iterators "
- << "from different generators." << std::endl;
- return iterator_ ==
- CheckedDowncastToActualType<const Iterator>(&other)->iterator_;
- }
-
- private:
- Iterator(const Iterator& other)
- // The explicit constructor call suppresses a false warning
- // emitted by gcc when supplied with the -Wextra option.
- : ParamIteratorInterface<T>(),
- base_(other.base_),
- iterator_(other.iterator_) {}
-
- const ParamGeneratorInterface<T>* const base_;
- typename ContainerType::const_iterator iterator_;
- // A cached value of *iterator_. We keep it here to allow access by
- // pointer in the wrapping iterator's operator->().
- // value_ needs to be mutable to be accessed in Current().
- // Use of scoped_ptr helps manage cached value's lifetime,
- // which is bound by the lifespan of the iterator itself.
- mutable scoped_ptr<const T> value_;
- };
-
- const ContainerType container_;
-}; // class ValuesInIteratorRangeGenerator
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Stores a parameter value and later creates tests parameterized with that
-// value.
-template <class TestClass>
-class ParameterizedTestFactory : public TestFactoryBase {
- public:
- typedef typename TestClass::ParamType ParamType;
- explicit ParameterizedTestFactory(ParamType parameter) :
- parameter_(parameter) {}
- virtual Test* CreateTest() {
- TestClass::SetParam(¶meter_);
- return new TestClass();
- }
-
- private:
- const ParamType parameter_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);
-};
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// TestMetaFactoryBase is a base class for meta-factories that create
-// test factories for passing into MakeAndRegisterTestInfo function.
-template <class ParamType>
-class TestMetaFactoryBase {
- public:
- virtual ~TestMetaFactoryBase() {}
-
- virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;
-};
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// TestMetaFactory creates test factories for passing into
-// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives
-// ownership of test factory pointer, same factory object cannot be passed
-// into that method twice. But ParameterizedTestCaseInfo is going to call
-// it for each Test/Parameter value combination. Thus it needs meta factory
-// creator class.
-template <class TestCase>
-class TestMetaFactory
- : public TestMetaFactoryBase<typename TestCase::ParamType> {
- public:
- typedef typename TestCase::ParamType ParamType;
-
- TestMetaFactory() {}
-
- virtual TestFactoryBase* CreateTestFactory(ParamType parameter) {
- return new ParameterizedTestFactory<TestCase>(parameter);
- }
-
- private:
- GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);
-};
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// ParameterizedTestCaseInfoBase is a generic interface
-// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase
-// accumulates test information provided by TEST_P macro invocations
-// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations
-// and uses that information to register all resulting test instances
-// in RegisterTests method. The ParameterizeTestCaseRegistry class holds
-// a collection of pointers to the ParameterizedTestCaseInfo objects
-// and calls RegisterTests() on each of them when asked.
-class ParameterizedTestCaseInfoBase {
- public:
- virtual ~ParameterizedTestCaseInfoBase() {}
-
- // Base part of test case name for display purposes.
- virtual const String& GetTestCaseName() const = 0;
- // Test case id to verify identity.
- virtual TypeId GetTestCaseTypeId() const = 0;
- // UnitTest class invokes this method to register tests in this
- // test case right before running them in RUN_ALL_TESTS macro.
- // This method should not be called more then once on any single
- // instance of a ParameterizedTestCaseInfoBase derived class.
- virtual void RegisterTests() = 0;
-
- protected:
- ParameterizedTestCaseInfoBase() {}
-
- private:
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase);
-};
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P
-// macro invocations for a particular test case and generators
-// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that
-// test case. It registers tests with all values generated by all
-// generators when asked.
-template <class TestCase>
-class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
- public:
- // ParamType and GeneratorCreationFunc are private types but are required
- // for declarations of public methods AddTestPattern() and
- // AddTestCaseInstantiation().
- typedef typename TestCase::ParamType ParamType;
- // A function that returns an instance of appropriate generator type.
- typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();
-
- explicit ParameterizedTestCaseInfo(const char* name)
- : test_case_name_(name) {}
-
- // Test case base name for display purposes.
- virtual const String& GetTestCaseName() const { return test_case_name_; }
- // Test case id to verify identity.
- virtual TypeId GetTestCaseTypeId() const { return GetTypeId<TestCase>(); }
- // TEST_P macro uses AddTestPattern() to record information
- // about a single test in a LocalTestInfo structure.
- // test_case_name is the base name of the test case (without invocation
- // prefix). test_base_name is the name of an individual test without
- // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is
- // test case base name and DoBar is test base name.
- void AddTestPattern(const char* test_case_name,
- const char* test_base_name,
- TestMetaFactoryBase<ParamType>* meta_factory) {
- tests_.push_back(linked_ptr<TestInfo>(new TestInfo(test_case_name,
- test_base_name,
- meta_factory)));
- }
- // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information
- // about a generator.
- int AddTestCaseInstantiation(const char* instantiation_name,
- GeneratorCreationFunc* func,
- const char* file,
- int line) {
- instantiations_.push_back(::std::make_pair(instantiation_name, func));
- return 0; // Return value used only to run this method in namespace scope.
- }
- // UnitTest class invokes this method to register tests in this test case
- // test cases right before running tests in RUN_ALL_TESTS macro.
- // This method should not be called more then once on any single
- // instance of a ParameterizedTestCaseInfoBase derived class.
- // UnitTest has a guard to prevent from calling this method more then once.
- virtual void RegisterTests() {
- for (typename TestInfoContainer::iterator test_it = tests_.begin();
- test_it != tests_.end(); ++test_it) {
- linked_ptr<TestInfo> test_info = *test_it;
- for (typename InstantiationContainer::iterator gen_it =
- instantiations_.begin(); gen_it != instantiations_.end();
- ++gen_it) {
- const String& instantiation_name = gen_it->first;
- ParamGenerator<ParamType> generator((*gen_it->second)());
-
- Message test_case_name_stream;
- if ( !instantiation_name.empty() )
- test_case_name_stream << instantiation_name.c_str() << "/";
- test_case_name_stream << test_info->test_case_base_name.c_str();
-
- int i = 0;
- for (typename ParamGenerator<ParamType>::iterator param_it =
- generator.begin();
- param_it != generator.end(); ++param_it, ++i) {
- Message test_name_stream;
- test_name_stream << test_info->test_base_name.c_str() << "/" << i;
- ::testing::internal::MakeAndRegisterTestInfo(
- test_case_name_stream.GetString().c_str(),
- test_name_stream.GetString().c_str(),
- "", // test_case_comment
- "", // comment; TODO(vladl at google.com): provide parameter value
- // representation.
- GetTestCaseTypeId(),
- TestCase::SetUpTestCase,
- TestCase::TearDownTestCase,
- test_info->test_meta_factory->CreateTestFactory(*param_it));
- } // for param_it
- } // for gen_it
- } // for test_it
- } // RegisterTests
-
- private:
- // LocalTestInfo structure keeps information about a single test registered
- // with TEST_P macro.
- struct TestInfo {
- TestInfo(const char* test_case_base_name,
- const char* test_base_name,
- TestMetaFactoryBase<ParamType>* test_meta_factory) :
- test_case_base_name(test_case_base_name),
- test_base_name(test_base_name),
- test_meta_factory(test_meta_factory) {}
-
- const String test_case_base_name;
- const String test_base_name;
- const scoped_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;
- };
- typedef ::std::vector<linked_ptr<TestInfo> > TestInfoContainer;
- // Keeps pairs of <Instantiation name, Sequence generator creation function>
- // received from INSTANTIATE_TEST_CASE_P macros.
- typedef ::std::vector<std::pair<String, GeneratorCreationFunc*> >
- InstantiationContainer;
-
- const String test_case_name_;
- TestInfoContainer tests_;
- InstantiationContainer instantiations_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo);
-}; // class ParameterizedTestCaseInfo
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase
-// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P
-// macros use it to locate their corresponding ParameterizedTestCaseInfo
-// descriptors.
-class ParameterizedTestCaseRegistry {
- public:
- ParameterizedTestCaseRegistry() {}
- ~ParameterizedTestCaseRegistry() {
- for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
- it != test_case_infos_.end(); ++it) {
- delete *it;
- }
- }
-
- // Looks up or creates and returns a structure containing information about
- // tests and instantiations of a particular test case.
- template <class TestCase>
- ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(
- const char* test_case_name,
- const char* file,
- int line) {
- ParameterizedTestCaseInfo<TestCase>* typed_test_info = NULL;
- for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
- it != test_case_infos_.end(); ++it) {
- if ((*it)->GetTestCaseName() == test_case_name) {
- if ((*it)->GetTestCaseTypeId() != GetTypeId<TestCase>()) {
- // Complain about incorrect usage of Google Test facilities
- // and terminate the program since we cannot guaranty correct
- // test case setup and tear-down in this case.
- ReportInvalidTestCaseType(test_case_name, file, line);
- abort();
- } else {
- // At this point we are sure that the object we found is of the same
- // type we are looking for, so we downcast it to that type
- // without further checks.
- typed_test_info = CheckedDowncastToActualType<
- ParameterizedTestCaseInfo<TestCase> >(*it);
- }
- break;
- }
- }
- if (typed_test_info == NULL) {
- typed_test_info = new ParameterizedTestCaseInfo<TestCase>(test_case_name);
- test_case_infos_.push_back(typed_test_info);
- }
- return typed_test_info;
- }
- void RegisterTests() {
- for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
- it != test_case_infos_.end(); ++it) {
- (*it)->RegisterTests();
- }
- }
-
- private:
- typedef ::std::vector<ParameterizedTestCaseInfoBase*> TestCaseInfoContainer;
-
- TestCaseInfoContainer test_case_infos_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry);
-};
-
-} // namespace internal
-} // namespace testing
-
-#endif // GTEST_HAS_PARAM_TEST
-
-#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-port.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-port.h
deleted file mode 100644
index 3e49993..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-port.h
+++ /dev/null
@@ -1,864 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: wan at google.com (Zhanyong Wan)
-//
-// Low-level types and utilities for porting Google Test to various
-// platforms. They are subject to change without notice. DO NOT USE
-// THEM IN USER CODE.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
-
-// The user can define the following macros in the build script to
-// control Google Test's behavior. If the user doesn't define a macro
-// in this list, Google Test will define it.
-//
-// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2)
-// is/isn't available.
-// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string
-// is/isn't available (some systems define
-// ::string, which is different to std::string).
-// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string
-// is/isn't available (some systems define
-// ::wstring, which is different to std::wstring).
-// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that <pthread.h>
-// is/isn't available.
-// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't
-// enabled.
-// GTEST_HAS_STD_STRING - Define it to 1/0 to indicate that
-// std::string does/doesn't work (Google Test can
-// be used where std::string is unavailable).
-// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that
-// std::wstring does/doesn't work (Google Test can
-// be used where std::wstring is unavailable).
-// GTEST_HAS_TR1_TUPLE 1 - Define it to 1/0 to indicate tr1::tuple
-// is/isn't available.
-
-// This header defines the following utilities:
-//
-// Macros indicating the name of the Google C++ Testing Framework project:
-// GTEST_NAME - a string literal of the project name.
-// GTEST_FLAG_PREFIX - a string literal of the prefix all Google
-// Test flag names share.
-// GTEST_FLAG_PREFIX_UPPER - a string literal of the prefix all Google
-// Test flag names share, in upper case.
-//
-// Macros indicating the current platform:
-// GTEST_OS_CYGWIN - defined iff compiled on Cygwin.
-// GTEST_OS_LINUX - defined iff compiled on Linux.
-// GTEST_OS_MAC - defined iff compiled on Mac OS X.
-// GTEST_OS_SOLARIS - defined iff compiled on Sun Solaris.
-// GTEST_OS_SYMBIAN - defined iff compiled for Symbian.
-// GTEST_OS_WINDOWS - defined iff compiled on Windows.
-// GTEST_OS_ZOS - defined iff compiled on IBM z/OS.
-//
-// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the
-// most stable support. Since core members of the Google Test project
-// don't have access to other platforms, support for them may be less
-// stable. If you notice any problems on your platform, please notify
-// googletestframework at googlegroups.com (patches for fixing them are
-// even more welcome!).
-//
-// Note that it is possible that none of the GTEST_OS_ macros are defined.
-//
-// Macros indicating available Google Test features:
-// GTEST_HAS_COMBINE - defined iff Combine construct is supported
-// in value-parameterized tests.
-// GTEST_HAS_DEATH_TEST - defined iff death tests are supported.
-// GTEST_HAS_PARAM_TEST - defined iff value-parameterized tests are
-// supported.
-// GTEST_HAS_TYPED_TEST - defined iff typed tests are supported.
-// GTEST_HAS_TYPED_TEST_P - defined iff type-parameterized tests are
-// supported.
-//
-// Macros for basic C++ coding:
-// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
-// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances don't have to
-// be used.
-// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
-// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used.
-//
-// Synchronization:
-// Mutex, MutexLock, ThreadLocal, GetThreadCount()
-// - synchronization primitives.
-// GTEST_IS_THREADSAFE - defined to 1 to indicate that the above
-// synchronization primitives have real implementations
-// and Google Test is thread-safe; or 0 otherwise.
-//
-// Template meta programming:
-// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only.
-//
-// Smart pointers:
-// scoped_ptr - as in TR2.
-//
-// Regular expressions:
-// RE - a simple regular expression class using the POSIX
-// Extended Regular Expression syntax. Not available on
-// Windows.
-//
-// Logging:
-// GTEST_LOG_() - logs messages at the specified severity level.
-// LogToStderr() - directs all log messages to stderr.
-// FlushInfoLog() - flushes informational log messages.
-//
-// Stderr capturing:
-// CaptureStderr() - starts capturing stderr.
-// GetCapturedStderr() - stops capturing stderr and returns the captured
-// string.
-//
-// Integer types:
-// TypeWithSize - maps an integer to a int type.
-// Int32, UInt32, Int64, UInt64, TimeInMillis
-// - integers of known sizes.
-// BiggestInt - the biggest signed integer type.
-//
-// Command-line utilities:
-// GTEST_FLAG() - references a flag.
-// GTEST_DECLARE_*() - declares a flag.
-// GTEST_DEFINE_*() - defines a flag.
-// GetArgvs() - returns the command line as a vector of strings.
-//
-// Environment variable utilities:
-// GetEnv() - gets the value of an environment variable.
-// BoolFromGTestEnv() - parses a bool environment variable.
-// Int32FromGTestEnv() - parses an Int32 environment variable.
-// StringFromGTestEnv() - parses a string environment variable.
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <iostream> // Used for GTEST_CHECK_
-
-#define GTEST_NAME "Google Test"
-#define GTEST_FLAG_PREFIX "gtest_"
-#define GTEST_FLAG_PREFIX_UPPER "GTEST_"
-
-// Determines the version of gcc that is used to compile this.
-#ifdef __GNUC__
-// 40302 means version 4.3.2.
-#define GTEST_GCC_VER_ \
- (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
-#endif // __GNUC__
-
-// Determines the platform on which Google Test is compiled.
-#ifdef __CYGWIN__
-#define GTEST_OS_CYGWIN
-#elif __SYMBIAN32__
-#define GTEST_OS_SYMBIAN
-#elif defined _MSC_VER
-// TODO(kenton at google.com): GTEST_OS_WINDOWS is currently used to mean
-// both "The OS is Windows" and "The compiler is MSVC". These
-// meanings really should be separated in order to better support
-// Windows compilers other than MSVC.
-#define GTEST_OS_WINDOWS
-#elif defined __APPLE__
-#define GTEST_OS_MAC
-#elif defined __linux__
-#define GTEST_OS_LINUX
-#elif defined __MVS__
-#define GTEST_OS_ZOS
-#elif defined(__sun) && defined(__SVR4)
-#define GTEST_OS_SOLARIS
-#elif defined(__HAIKU__)
-#define GTEST_OS_HAIKU
-#endif // _MSC_VER
-
-// Determines whether ::std::string and ::string are available.
-
-#ifndef GTEST_HAS_STD_STRING
-// The user didn't tell us whether ::std::string is available, so we
-// need to figure it out.
-
-#ifdef GTEST_OS_WINDOWS
-// Assumes that exceptions are enabled by default.
-#ifndef _HAS_EXCEPTIONS
-#define _HAS_EXCEPTIONS 1
-#endif // _HAS_EXCEPTIONS
-// GTEST_HAS_EXCEPTIONS is non-zero iff exceptions are enabled. It is
-// always defined, while _HAS_EXCEPTIONS is defined only on Windows.
-#define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
-// On Windows, we can use ::std::string if the compiler version is VS
-// 2005 or above, or if exceptions are enabled.
-#define GTEST_HAS_STD_STRING ((_MSC_VER >= 1400) || GTEST_HAS_EXCEPTIONS)
-#else // We are on Linux or Mac OS.
-#define GTEST_HAS_EXCEPTIONS 0
-#define GTEST_HAS_STD_STRING 1
-#endif // GTEST_OS_WINDOWS
-
-#endif // GTEST_HAS_STD_STRING
-
-#ifndef GTEST_HAS_GLOBAL_STRING
-// The user didn't tell us whether ::string is available, so we need
-// to figure it out.
-
-#define GTEST_HAS_GLOBAL_STRING 0
-
-#endif // GTEST_HAS_GLOBAL_STRING
-
-#ifndef GTEST_HAS_STD_WSTRING
-// The user didn't tell us whether ::std::wstring is available, so we need
-// to figure it out.
-// TODO(wan at google.com): uses autoconf to detect whether ::std::wstring
-// is available.
-
-#if defined(GTEST_OS_CYGWIN) || defined(GTEST_OS_SOLARIS) || defined(GTEST_OS_HAIKU)
-// At least some versions of cygwin don't support ::std::wstring.
-// Solaris' libc++ doesn't support it either.
-#define GTEST_HAS_STD_WSTRING 0
-#else
-#define GTEST_HAS_STD_WSTRING GTEST_HAS_STD_STRING
-#endif // defined(GTEST_OS_CYGWIN) || defined(GTEST_OS_SOLARIS)
-
-#endif // GTEST_HAS_STD_WSTRING
-
-#ifndef GTEST_HAS_GLOBAL_WSTRING
-// The user didn't tell us whether ::wstring is available, so we need
-// to figure it out.
-#define GTEST_HAS_GLOBAL_WSTRING GTEST_HAS_GLOBAL_STRING
-#endif // GTEST_HAS_GLOBAL_WSTRING
-
-#if GTEST_HAS_STD_STRING || GTEST_HAS_GLOBAL_STRING || \
- GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
-#include <string> // NOLINT
-#endif // GTEST_HAS_STD_STRING || GTEST_HAS_GLOBAL_STRING ||
- // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
-
-#if GTEST_HAS_STD_STRING
-#include <sstream> // NOLINT
-#else
-#include <strstream> // NOLINT
-#endif // GTEST_HAS_STD_STRING
-
-// Determines whether RTTI is available.
-#ifndef GTEST_HAS_RTTI
-// The user didn't tell us whether RTTI is enabled, so we need to
-// figure it out.
-
-#ifdef _MSC_VER
-
-#ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled.
-#define GTEST_HAS_RTTI 1
-#else
-#define GTEST_HAS_RTTI 0
-#endif // _CPPRTTI
-
-#elif defined(__GNUC__)
-
-// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
-#if GTEST_GCC_VER_ >= 40302
-#ifdef __GXX_RTTI
-#define GTEST_HAS_RTTI 1
-#else
-#define GTEST_HAS_RTTI 0
-#endif // __GXX_RTTI
-#else
-// For gcc versions smaller than 4.3.2, we assume RTTI is enabled.
-#define GTEST_HAS_RTTI 1
-#endif // GTEST_GCC_VER >= 40302
-
-#else
-
-// Unknown compiler - assume RTTI is enabled.
-#define GTEST_HAS_RTTI 1
-
-#endif // _MSC_VER
-
-#endif // GTEST_HAS_RTTI
-
-// Determines whether <pthread.h> is available.
-#ifndef GTEST_HAS_PTHREAD
-// The user didn't tell us, so we need to figure it out.
-
-#if defined(GTEST_OS_LINUX) || defined(GTEST_OS_MAC)
-#define GTEST_HAS_PTHREAD 1
-#else
-#define GTEST_HAS_PTHREAD 0
-#endif // GTEST_OS_LINUX || GTEST_OS_MAC
-
-#endif // GTEST_HAS_PTHREAD
-
-// Determines whether tr1/tuple is available. If you have tr1/tuple
-// on your platform, define GTEST_HAS_TR1_TUPLE=1 for both the Google
-// Test project and your tests. If you would like Google Test to detect
-// tr1/tuple on your platform automatically, please open an issue
-// ticket at http://code.google.com/p/googletest.
-#ifndef GTEST_HAS_TR1_TUPLE
-// The user didn't tell us, so we need to figure it out.
-
-// GCC provides <tr1/tuple> since 4.0.0.
-#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
-#define GTEST_HAS_TR1_TUPLE 1
-#else
-#define GTEST_HAS_TR1_TUPLE 0
-#endif // __GNUC__
-#endif // GTEST_HAS_TR1_TUPLE
-
-// To avoid conditional compilation everywhere, we make it
-// gtest-port.h's responsibility to #include the header implementing
-// tr1/tuple.
-#if GTEST_HAS_TR1_TUPLE
-#if defined(__GNUC__)
-// GCC implements tr1/tuple in the <tr1/tuple> header. This does not
-// conform to the TR1 spec, which requires the header to be <tuple>.
-#include <tr1/tuple>
-#else
-// If the compiler is not GCC, we assume the user is using a
-// spec-conforming TR1 implementation.
-#include <tuple>
-#endif // __GNUC__
-#endif // GTEST_HAS_TR1_TUPLE
-
-// Determines whether clone(2) is supported.
-// Usually it will only be available on Linux, excluding
-// Linux on the Itanium architecture.
-// Also see http://linux.die.net/man/2/clone.
-#ifndef GTEST_HAS_CLONE
-// The user didn't tell us, so we need to figure it out.
-
-#if defined(GTEST_OS_LINUX) && !defined(__ia64__)
-#define GTEST_HAS_CLONE 1
-#else
-#define GTEST_HAS_CLONE 0
-#endif // defined(GTEST_OS_LINUX) && !defined(__ia64__)
-
-#endif // GTEST_HAS_CLONE
-
-// Determines whether to support death tests.
-#if GTEST_HAS_STD_STRING && GTEST_HAS_CLONE
-#define GTEST_HAS_DEATH_TEST
-// On some platforms, <regex.h> needs someone to define size_t, and
-// won't compile otherwise. We can #include it here as we already
-// included <stdlib.h>, which is guaranteed to define size_t through
-// <stddef.h>.
-#include <regex.h>
-#include <vector>
-#include <fcntl.h>
-#include <sys/mman.h>
-#endif // GTEST_HAS_STD_STRING && GTEST_HAS_CLONE
-
-// Determines whether to support value-parameterized tests.
-
-#if defined(__GNUC__) || (_MSC_VER >= 1400)
-// TODO(vladl at google.com): get the implementation rid of vector and list
-// to compile on MSVC 7.1.
-#define GTEST_HAS_PARAM_TEST
-#endif // defined(__GNUC__) || (_MSC_VER >= 1400)
-
-// Determines whether to support type-driven tests.
-
-// Typed tests need <typeinfo> and variadic macros, which gcc and VC
-// 8.0+ support.
-#if defined(__GNUC__) || (_MSC_VER >= 1400)
-#define GTEST_HAS_TYPED_TEST
-#define GTEST_HAS_TYPED_TEST_P
-#endif // defined(__GNUC__) || (_MSC_VER >= 1400)
-
-// Determines whether to support Combine(). This only makes sense when
-// value-parameterized tests are enabled.
-#if defined(GTEST_HAS_PARAM_TEST) && GTEST_HAS_TR1_TUPLE
-#define GTEST_HAS_COMBINE
-#endif // defined(GTEST_HAS_PARAM_TEST) && GTEST_HAS_TR1_TUPLE
-
-// Determines whether the system compiler uses UTF-16 for encoding wide strings.
-#if defined(GTEST_OS_WINDOWS) || defined(GTEST_OS_CYGWIN) || \
- defined(GTEST_OS_SYMBIAN)
-#define GTEST_WIDE_STRING_USES_UTF16_ 1
-#endif
-
-// Defines some utility macros.
-
-// The GNU compiler emits a warning if nested "if" statements are followed by
-// an "else" statement and braces are not used to explicitly disambiguate the
-// "else" binding. This leads to problems with code like:
-//
-// if (gate)
-// ASSERT_*(condition) << "Some message";
-//
-// The "switch (0) case 0:" idiom is used to suppress this.
-#ifdef __INTEL_COMPILER
-#define GTEST_AMBIGUOUS_ELSE_BLOCKER_
-#else
-#define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: // NOLINT
-#endif
-
-// Use this annotation at the end of a struct / class definition to
-// prevent the compiler from optimizing away instances that are never
-// used. This is useful when all interesting logic happens inside the
-// c'tor and / or d'tor. Example:
-//
-// struct Foo {
-// Foo() { ... }
-// } GTEST_ATTRIBUTE_UNUSED_;
-#if defined(__GNUC__) && !defined(COMPILER_ICC)
-#define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
-#else
-#define GTEST_ATTRIBUTE_UNUSED_
-#endif
-
-// A macro to disallow the evil copy constructor and operator= functions
-// This should be used in the private: declarations for a class.
-#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\
- type(const type &);\
- void operator=(const type &)
-
-// Tell the compiler to warn about unused return values for functions declared
-// with this macro. The macro should be used on function declarations
-// following the argument list:
-//
-// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;
-#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC)
-#define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))
-#else
-#define GTEST_MUST_USE_RESULT_
-#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC
-
-namespace testing {
-
-class Message;
-
-namespace internal {
-
-class String;
-
-// std::strstream is deprecated. However, we have to use it on
-// Windows as std::stringstream won't compile on Windows when
-// exceptions are disabled. We use std::stringstream on other
-// platforms to avoid compiler warnings there.
-#if GTEST_HAS_STD_STRING
-typedef ::std::stringstream StrStream;
-#else
-typedef ::std::strstream StrStream;
-#endif // GTEST_HAS_STD_STRING
-
-// Defines scoped_ptr.
-
-// This implementation of scoped_ptr is PARTIAL - it only contains
-// enough stuff to satisfy Google Test's need.
-template <typename T>
-class scoped_ptr {
- public:
- explicit scoped_ptr(T* p = NULL) : ptr_(p) {}
- ~scoped_ptr() { reset(); }
-
- T& operator*() const { return *ptr_; }
- T* operator->() const { return ptr_; }
- T* get() const { return ptr_; }
-
- T* release() {
- T* const ptr = ptr_;
- ptr_ = NULL;
- return ptr;
- }
-
- void reset(T* p = NULL) {
- if (p != ptr_) {
- if (sizeof(T) > 0) { // Makes sure T is a complete type.
- delete ptr_;
- }
- ptr_ = p;
- }
- }
- private:
- T* ptr_;
-
- GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);
-};
-
-#ifdef GTEST_HAS_DEATH_TEST
-
-// Defines RE.
-
-// A simple C++ wrapper for <regex.h>. It uses the POSIX Enxtended
-// Regular Expression syntax.
-class RE {
- public:
- // Constructs an RE from a string.
-#if GTEST_HAS_STD_STRING
- RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT
-#endif // GTEST_HAS_STD_STRING
-
-#if GTEST_HAS_GLOBAL_STRING
- RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT
-#endif // GTEST_HAS_GLOBAL_STRING
-
- RE(const char* regex) { Init(regex); } // NOLINT
- ~RE();
-
- // Returns the string representation of the regex.
- const char* pattern() const { return pattern_; }
-
- // FullMatch(str, re) returns true iff regular expression re matches
- // the entire str.
- // PartialMatch(str, re) returns true iff regular expression re
- // matches a substring of str (including str itself).
- //
- // TODO(wan at google.com): make FullMatch() and PartialMatch() work
- // when str contains NUL characters.
-#if GTEST_HAS_STD_STRING
- static bool FullMatch(const ::std::string& str, const RE& re) {
- return FullMatch(str.c_str(), re);
- }
- static bool PartialMatch(const ::std::string& str, const RE& re) {
- return PartialMatch(str.c_str(), re);
- }
-#endif // GTEST_HAS_STD_STRING
-
-#if GTEST_HAS_GLOBAL_STRING
- static bool FullMatch(const ::string& str, const RE& re) {
- return FullMatch(str.c_str(), re);
- }
- static bool PartialMatch(const ::string& str, const RE& re) {
- return PartialMatch(str.c_str(), re);
- }
-#endif // GTEST_HAS_GLOBAL_STRING
-
- static bool FullMatch(const char* str, const RE& re);
- static bool PartialMatch(const char* str, const RE& re);
-
- private:
- void Init(const char* regex);
-
- // We use a const char* instead of a string, as Google Test may be used
- // where string is not available. We also do not use Google Test's own
- // String type here, in order to simplify dependencies between the
- // files.
- const char* pattern_;
- regex_t full_regex_; // For FullMatch().
- regex_t partial_regex_; // For PartialMatch().
- bool is_valid_;
-};
-
-#endif // GTEST_HAS_DEATH_TEST
-
-// Defines logging utilities:
-// GTEST_LOG_() - logs messages at the specified severity level.
-// LogToStderr() - directs all log messages to stderr.
-// FlushInfoLog() - flushes informational log messages.
-
-enum GTestLogSeverity {
- GTEST_INFO,
- GTEST_WARNING,
- GTEST_ERROR,
- GTEST_FATAL
-};
-
-void GTestLog(GTestLogSeverity severity, const char* file,
- int line, const char* msg);
-
-#define GTEST_LOG_(severity, msg)\
- ::testing::internal::GTestLog(\
- ::testing::internal::GTEST_##severity, __FILE__, __LINE__, \
- (::testing::Message() << (msg)).GetString().c_str())
-
-inline void LogToStderr() {}
-inline void FlushInfoLog() { fflush(NULL); }
-
-// Defines the stderr capturer:
-// CaptureStderr - starts capturing stderr.
-// GetCapturedStderr - stops capturing stderr and returns the captured string.
-
-#ifdef GTEST_HAS_DEATH_TEST
-
-// A copy of all command line arguments. Set by InitGoogleTest().
-extern ::std::vector<String> g_argvs;
-
-void CaptureStderr();
-// GTEST_HAS_DEATH_TEST implies we have ::std::string.
-::std::string GetCapturedStderr();
-const ::std::vector<String>& GetArgvs();
-
-#endif // GTEST_HAS_DEATH_TEST
-
-// Defines synchronization primitives.
-
-// A dummy implementation of synchronization primitives (mutex, lock,
-// and thread-local variable). Necessary for compiling Google Test where
-// mutex is not supported - using Google Test in multiple threads is not
-// supported on such platforms.
-
-class Mutex {
- public:
- Mutex() {}
- explicit Mutex(int /*unused*/) {}
- void AssertHeld() const {}
- enum { NO_CONSTRUCTOR_NEEDED_FOR_STATIC_MUTEX = 0 };
-};
-
-// We cannot call it MutexLock directly as the ctor declaration would
-// conflict with a macro named MutexLock, which is defined on some
-// platforms. Hence the typedef trick below.
-class GTestMutexLock {
- public:
- explicit GTestMutexLock(Mutex*) {} // NOLINT
-};
-
-typedef GTestMutexLock MutexLock;
-
-template <typename T>
-class ThreadLocal {
- public:
- ThreadLocal() : value_() {}
- explicit ThreadLocal(const T& value) : value_(value) {}
- T* pointer() { return &value_; }
- const T* pointer() const { return &value_; }
- const T& get() const { return value_; }
- void set(const T& value) { value_ = value; }
- private:
- T value_;
-};
-
-// There's no portable way to detect the number of threads, so we just
-// return 0 to indicate that we cannot detect it.
-inline size_t GetThreadCount() { return 0; }
-
-// The above synchronization primitives have dummy implementations.
-// Therefore Google Test is not thread-safe.
-#define GTEST_IS_THREADSAFE 0
-
-#if defined(__SYMBIAN32__) || defined(__IBMCPP__)
-
-// Passing non-POD classes through ellipsis (...) crashes the ARM
-// compiler. The Nokia Symbian and the IBM XL C/C++ compiler try to
-// instantiate a copy constructor for objects passed through ellipsis
-// (...), failing for uncopyable objects. We define this to indicate
-// the fact.
-#define GTEST_ELLIPSIS_NEEDS_COPY_ 1
-
-// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between
-// const T& and const T* in a function template. These compilers
-// _can_ decide between class template specializations for T and T*,
-// so a tr1::type_traits-like is_pointer works.
-#define GTEST_NEEDS_IS_POINTER_ 1
-
-#endif // defined(__SYMBIAN32__) || defined(__IBMCPP__)
-
-template <bool bool_value>
-struct bool_constant {
- typedef bool_constant<bool_value> type;
- static const bool value = bool_value;
-};
-template <bool bool_value> const bool bool_constant<bool_value>::value;
-
-typedef bool_constant<false> false_type;
-typedef bool_constant<true> true_type;
-
-template <typename T>
-struct is_pointer : public false_type {};
-
-template <typename T>
-struct is_pointer<T*> : public true_type {};
-
-// Defines BiggestInt as the biggest signed integer type the compiler
-// supports.
-
-#ifdef GTEST_OS_WINDOWS
-typedef __int64 BiggestInt;
-#else
-typedef long long BiggestInt; // NOLINT
-#endif // GTEST_OS_WINDOWS
-
-// The maximum number a BiggestInt can represent. This definition
-// works no matter BiggestInt is represented in one's complement or
-// two's complement.
-//
-// We cannot rely on numeric_limits in STL, as __int64 and long long
-// are not part of standard C++ and numeric_limits doesn't need to be
-// defined for them.
-const BiggestInt kMaxBiggestInt =
- ~(static_cast<BiggestInt>(1) << (8*sizeof(BiggestInt) - 1));
-
-// This template class serves as a compile-time function from size to
-// type. It maps a size in bytes to a primitive type with that
-// size. e.g.
-//
-// TypeWithSize<4>::UInt
-//
-// is typedef-ed to be unsigned int (unsigned integer made up of 4
-// bytes).
-//
-// Such functionality should belong to STL, but I cannot find it
-// there.
-//
-// Google Test uses this class in the implementation of floating-point
-// comparison.
-//
-// For now it only handles UInt (unsigned int) as that's all Google Test
-// needs. Other types can be easily added in the future if need
-// arises.
-template <size_t size>
-class TypeWithSize {
- public:
- // This prevents the user from using TypeWithSize<N> with incorrect
- // values of N.
- typedef void UInt;
-};
-
-// The specialization for size 4.
-template <>
-class TypeWithSize<4> {
- public:
- // unsigned int has size 4 in both gcc and MSVC.
- //
- // As base/basictypes.h doesn't compile on Windows, we cannot use
- // uint32, uint64, and etc here.
- typedef int Int;
- typedef unsigned int UInt;
-};
-
-// The specialization for size 8.
-template <>
-class TypeWithSize<8> {
- public:
-#ifdef GTEST_OS_WINDOWS
- typedef __int64 Int;
- typedef unsigned __int64 UInt;
-#else
- typedef long long Int; // NOLINT
- typedef unsigned long long UInt; // NOLINT
-#endif // GTEST_OS_WINDOWS
-};
-
-// Integer types of known sizes.
-typedef TypeWithSize<4>::Int Int32;
-typedef TypeWithSize<4>::UInt UInt32;
-typedef TypeWithSize<8>::Int Int64;
-typedef TypeWithSize<8>::UInt UInt64;
-typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds.
-
-// Utilities for command line flags and environment variables.
-
-// A wrapper for getenv() that works on Linux, Windows, and Mac OS.
-inline const char* GetEnv(const char* name) {
-#ifdef _WIN32_WCE // We are on Windows CE.
- // CE has no environment variables.
- return NULL;
-#elif defined(GTEST_OS_WINDOWS) // We are on Windows proper.
- // MSVC 8 deprecates getenv(), so we want to suppress warning 4996
- // (deprecated function) there.
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
- return getenv(name);
-#pragma warning(pop) // Restores the warning state.
-#else // We are on Linux or Mac OS.
- return getenv(name);
-#endif
-}
-
-#ifdef _WIN32_WCE
-// Windows CE has no C library. The abort() function is used in
-// several places in Google Test. This implementation provides a reasonable
-// imitation of standard behaviour.
-void abort();
-#else
-inline void abort() { ::abort(); }
-#endif // _WIN32_WCE
-
-// INTERNAL IMPLEMENTATION - DO NOT USE.
-//
-// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
-// is not satisfied.
-// Synopsys:
-// GTEST_CHECK_(boolean_condition);
-// or
-// GTEST_CHECK_(boolean_condition) << "Additional message";
-//
-// This checks the condition and if the condition is not satisfied
-// it prints message about the condition violation, including the
-// condition itself, plus additional message streamed into it, if any,
-// and then it aborts the program. It aborts the program irrespective of
-// whether it is built in the debug mode or not.
-class GTestCheckProvider {
- public:
- GTestCheckProvider(const char* condition, const char* file, int line) {
- FormatFileLocation(file, line);
- ::std::cerr << " ERROR: Condition " << condition << " failed. ";
- }
- ~GTestCheckProvider() {
- ::std::cerr << ::std::endl;
- abort();
- }
- void FormatFileLocation(const char* file, int line) {
- if (file == NULL)
- file = "unknown file";
- if (line < 0) {
- ::std::cerr << file << ":";
- } else {
-#if _MSC_VER
- ::std::cerr << file << "(" << line << "):";
-#else
- ::std::cerr << file << ":" << line << ":";
-#endif
- }
- }
- ::std::ostream& GetStream() { return ::std::cerr; }
-};
-#define GTEST_CHECK_(condition) \
- GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (condition) \
- ; \
- else \
- ::testing::internal::GTestCheckProvider(\
- #condition, __FILE__, __LINE__).GetStream()
-
-// Macro for referencing flags.
-#define GTEST_FLAG(name) FLAGS_gtest_##name
-
-// Macros for declaring flags.
-#define GTEST_DECLARE_bool_(name) extern bool GTEST_FLAG(name)
-#define GTEST_DECLARE_int32_(name) \
- extern ::testing::internal::Int32 GTEST_FLAG(name)
-#define GTEST_DECLARE_string_(name) \
- extern ::testing::internal::String GTEST_FLAG(name)
-
-// Macros for defining flags.
-#define GTEST_DEFINE_bool_(name, default_val, doc) \
- bool GTEST_FLAG(name) = (default_val)
-#define GTEST_DEFINE_int32_(name, default_val, doc) \
- ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)
-#define GTEST_DEFINE_string_(name, default_val, doc) \
- ::testing::internal::String GTEST_FLAG(name) = (default_val)
-
-// Parses 'str' for a 32-bit signed integer. If successful, writes the result
-// to *value and returns true; otherwise leaves *value unchanged and returns
-// false.
-// TODO(chandlerc): Find a better way to refactor flag and environment parsing
-// out of both gtest-port.cc and gtest.cc to avoid exporting this utility
-// function.
-bool ParseInt32(const Message& src_text, const char* str, Int32* value);
-
-// Parses a bool/Int32/string from the environment variable
-// corresponding to the given Google Test flag.
-bool BoolFromGTestEnv(const char* flag, bool default_val);
-Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);
-const char* StringFromGTestEnv(const char* flag, const char* default_val);
-
-} // namespace internal
-} // namespace testing
-
-#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-string.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-string.h
deleted file mode 100644
index 178f14e..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-string.h
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Authors: wan at google.com (Zhanyong Wan), eefacm at gmail.com (Sean Mcafee)
-//
-// The Google C++ Testing Framework (Google Test)
-//
-// This header file declares the String class and functions used internally by
-// Google Test. They are subject to change without notice. They should not used
-// by code external to Google Test.
-//
-// This header file is #included by testing/base/internal/gtest-internal.h.
-// It should not be #included by other files.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
-
-#include <string.h>
-#include <gtest/internal/gtest-port.h>
-
-#if GTEST_HAS_GLOBAL_STRING || GTEST_HAS_STD_STRING
-#include <string>
-#endif // GTEST_HAS_GLOBAL_STRING || GTEST_HAS_STD_STRING
-
-namespace testing {
-namespace internal {
-
-// String - a UTF-8 string class.
-//
-// We cannot use std::string as Microsoft's STL implementation in
-// Visual C++ 7.1 has problems when exception is disabled. There is a
-// hack to work around this, but we've seen cases where the hack fails
-// to work.
-//
-// Also, String is different from std::string in that it can represent
-// both NULL and the empty string, while std::string cannot represent
-// NULL.
-//
-// NULL and the empty string are considered different. NULL is less
-// than anything (including the empty string) except itself.
-//
-// This class only provides minimum functionality necessary for
-// implementing Google Test. We do not intend to implement a full-fledged
-// string class here.
-//
-// Since the purpose of this class is to provide a substitute for
-// std::string on platforms where it cannot be used, we define a copy
-// constructor and assignment operators such that we don't need
-// conditional compilation in a lot of places.
-//
-// In order to make the representation efficient, the d'tor of String
-// is not virtual. Therefore DO NOT INHERIT FROM String.
-class String {
- public:
- // Static utility methods
-
- // Returns the input if it's not NULL, otherwise returns "(null)".
- // This function serves two purposes:
- //
- // 1. ShowCString(NULL) has type 'const char *', instead of the
- // type of NULL (which is int).
- //
- // 2. In MSVC, streaming a null char pointer to StrStream generates
- // an access violation, so we need to convert NULL to "(null)"
- // before streaming it.
- static inline const char* ShowCString(const char* c_str) {
- return c_str ? c_str : "(null)";
- }
-
- // Returns the input enclosed in double quotes if it's not NULL;
- // otherwise returns "(null)". For example, "\"Hello\"" is returned
- // for input "Hello".
- //
- // This is useful for printing a C string in the syntax of a literal.
- //
- // Known issue: escape sequences are not handled yet.
- static String ShowCStringQuoted(const char* c_str);
-
- // Clones a 0-terminated C string, allocating memory using new. The
- // caller is responsible for deleting the return value using
- // delete[]. Returns the cloned string, or NULL if the input is
- // NULL.
- //
- // This is different from strdup() in string.h, which allocates
- // memory using malloc().
- static const char* CloneCString(const char* c_str);
-
-#ifdef _WIN32_WCE
- // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
- // able to pass strings to Win32 APIs on CE we need to convert them
- // to 'Unicode', UTF-16.
-
- // Creates a UTF-16 wide string from the given ANSI string, allocating
- // memory using new. The caller is responsible for deleting the return
- // value using delete[]. Returns the wide string, or NULL if the
- // input is NULL.
- //
- // The wide string is created using the ANSI codepage (CP_ACP) to
- // match the behaviour of the ANSI versions of Win32 calls and the
- // C runtime.
- static LPCWSTR AnsiToUtf16(const char* c_str);
-
- // Creates an ANSI string from the given wide string, allocating
- // memory using new. The caller is responsible for deleting the return
- // value using delete[]. Returns the ANSI string, or NULL if the
- // input is NULL.
- //
- // The returned string is created using the ANSI codepage (CP_ACP) to
- // match the behaviour of the ANSI versions of Win32 calls and the
- // C runtime.
- static const char* Utf16ToAnsi(LPCWSTR utf16_str);
-#endif
-
- // Compares two C strings. Returns true iff they have the same content.
- //
- // Unlike strcmp(), this function can handle NULL argument(s). A
- // NULL C string is considered different to any non-NULL C string,
- // including the empty string.
- static bool CStringEquals(const char* lhs, const char* rhs);
-
- // Converts a wide C string to a String using the UTF-8 encoding.
- // NULL will be converted to "(null)". If an error occurred during
- // the conversion, "(failed to convert from wide string)" is
- // returned.
- static String ShowWideCString(const wchar_t* wide_c_str);
-
- // Similar to ShowWideCString(), except that this function encloses
- // the converted string in double quotes.
- static String ShowWideCStringQuoted(const wchar_t* wide_c_str);
-
- // Compares two wide C strings. Returns true iff they have the same
- // content.
- //
- // Unlike wcscmp(), this function can handle NULL argument(s). A
- // NULL C string is considered different to any non-NULL C string,
- // including the empty string.
- static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);
-
- // Compares two C strings, ignoring case. Returns true iff they
- // have the same content.
- //
- // Unlike strcasecmp(), this function can handle NULL argument(s).
- // A NULL C string is considered different to any non-NULL C string,
- // including the empty string.
- static bool CaseInsensitiveCStringEquals(const char* lhs,
- const char* rhs);
-
- // Compares two wide C strings, ignoring case. Returns true iff they
- // have the same content.
- //
- // Unlike wcscasecmp(), this function can handle NULL argument(s).
- // A NULL C string is considered different to any non-NULL wide C string,
- // including the empty string.
- // NB: The implementations on different platforms slightly differ.
- // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
- // environment variable. On GNU platform this method uses wcscasecmp
- // which compares according to LC_CTYPE category of the current locale.
- // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
- // current locale.
- static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
- const wchar_t* rhs);
-
- // Formats a list of arguments to a String, using the same format
- // spec string as for printf.
- //
- // We do not use the StringPrintf class as it is not universally
- // available.
- //
- // The result is limited to 4096 characters (including the tailing
- // 0). If 4096 characters are not enough to format the input,
- // "<buffer exceeded>" is returned.
- static String Format(const char* format, ...);
-
- // C'tors
-
- // The default c'tor constructs a NULL string.
- String() : c_str_(NULL) {}
-
- // Constructs a String by cloning a 0-terminated C string.
- String(const char* c_str) : c_str_(NULL) { // NOLINT
- *this = c_str;
- }
-
- // Constructs a String by copying a given number of chars from a
- // buffer. E.g. String("hello", 3) will create the string "hel".
- String(const char* buffer, size_t len);
-
- // The copy c'tor creates a new copy of the string. The two
- // String objects do not share content.
- String(const String& str) : c_str_(NULL) {
- *this = str;
- }
-
- // D'tor. String is intended to be a final class, so the d'tor
- // doesn't need to be virtual.
- ~String() { delete[] c_str_; }
-
- // Allows a String to be implicitly converted to an ::std::string or
- // ::string, and vice versa. Converting a String containing a NULL
- // pointer to ::std::string or ::string is undefined behavior.
- // Converting a ::std::string or ::string containing an embedded NUL
- // character to a String will result in the prefix up to the first
- // NUL character.
-#if GTEST_HAS_STD_STRING
- String(const ::std::string& str) : c_str_(NULL) { *this = str.c_str(); }
-
- operator ::std::string() const { return ::std::string(c_str_); }
-#endif // GTEST_HAS_STD_STRING
-
-#if GTEST_HAS_GLOBAL_STRING
- String(const ::string& str) : c_str_(NULL) { *this = str.c_str(); }
-
- operator ::string() const { return ::string(c_str_); }
-#endif // GTEST_HAS_GLOBAL_STRING
-
- // Returns true iff this is an empty string (i.e. "").
- bool empty() const {
- return (c_str_ != NULL) && (*c_str_ == '\0');
- }
-
- // Compares this with another String.
- // Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0
- // if this is greater than rhs.
- int Compare(const String& rhs) const;
-
- // Returns true iff this String equals the given C string. A NULL
- // string and a non-NULL string are considered not equal.
- bool operator==(const char* c_str) const {
- return CStringEquals(c_str_, c_str);
- }
-
- // Returns true iff this String is less than the given C string. A NULL
- // string is considered less than "".
- bool operator<(const String& rhs) const { return Compare(rhs) < 0; }
-
- // Returns true iff this String doesn't equal the given C string. A NULL
- // string and a non-NULL string are considered not equal.
- bool operator!=(const char* c_str) const {
- return !CStringEquals(c_str_, c_str);
- }
-
- // Returns true iff this String ends with the given suffix. *Any*
- // String is considered to end with a NULL or empty suffix.
- bool EndsWith(const char* suffix) const;
-
- // Returns true iff this String ends with the given suffix, not considering
- // case. Any String is considered to end with a NULL or empty suffix.
- bool EndsWithCaseInsensitive(const char* suffix) const;
-
- // Returns the length of the encapsulated string, or -1 if the
- // string is NULL.
- int GetLength() const {
- return c_str_ ? static_cast<int>(strlen(c_str_)) : -1;
- }
-
- // Gets the 0-terminated C string this String object represents.
- // The String object still owns the string. Therefore the caller
- // should NOT delete the return value.
- const char* c_str() const { return c_str_; }
-
- // Sets the 0-terminated C string this String object represents.
- // The old string in this object is deleted, and this object will
- // own a clone of the input string. This function copies only up to
- // length bytes (plus a terminating null byte), or until the first
- // null byte, whichever comes first.
- //
- // This function works even when the c_str parameter has the same
- // value as that of the c_str_ field.
- void Set(const char* c_str, size_t length);
-
- // Assigns a C string to this object. Self-assignment works.
- const String& operator=(const char* c_str);
-
- // Assigns a String object to this object. Self-assignment works.
- const String& operator=(const String &rhs) {
- *this = rhs.c_str_;
- return *this;
- }
-
- private:
- const char* c_str_;
-};
-
-// Streams a String to an ostream.
-inline ::std::ostream& operator <<(::std::ostream& os, const String& str) {
- // We call String::ShowCString() to convert NULL to "(null)".
- // Otherwise we'll get an access violation on Windows.
- return os << String::ShowCString(str.c_str());
-}
-
-// Gets the content of the StrStream's buffer as a String. Each '\0'
-// character in the buffer is replaced with "\\0".
-String StrStreamToString(StrStream* stream);
-
-// Converts a streamable value to a String. A NULL pointer is
-// converted to "(null)". When the input value is a ::string,
-// ::std::string, ::wstring, or ::std::wstring object, each NUL
-// character in it is replaced with "\\0".
-
-// Declared here but defined in gtest.h, so that it has access
-// to the definition of the Message class, required by the ARM
-// compiler.
-template <typename T>
-String StreamableToString(const T& streamable);
-
-} // namespace internal
-} // namespace testing
-
-#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
diff --git a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-type-util.h b/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-type-util.h
deleted file mode 100644
index 815da4b..0000000
--- a/libclamav/c++/llvm/utils/unittest/googletest/include/gtest/internal/gtest-type-util.h
+++ /dev/null
@@ -1,3319 +0,0 @@
-// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
-
-// Copyright 2008 Google Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: wan at google.com (Zhanyong Wan)
-
-// Type utilities needed for implementing typed and type-parameterized
-// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
-//
-// Currently we support at most 50 types in a list, and at most 50
-// type-parameterized tests in one type-parameterized test case.
-// Please contact googletestframework at googlegroups.com if you need
-// more.
-
-#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
-#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
-
-#include <gtest/internal/gtest-port.h>
-#include <gtest/internal/gtest-string.h>
-
-#if defined(GTEST_HAS_TYPED_TEST) || defined(GTEST_HAS_TYPED_TEST_P)
-
-#ifdef __GNUC__
-#include <cxxabi.h>
-#endif // __GNUC__
-
-#include <typeinfo>
-
-namespace testing {
-namespace internal {
-
-// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
-// type. This can be used as a compile-time assertion to ensure that
-// two types are equal.
-
-template <typename T1, typename T2>
-struct AssertTypeEq;
-
-template <typename T>
-struct AssertTypeEq<T, T> {
- typedef bool type;
-};
-
-// GetTypeName<T>() returns a human-readable name of type T.
-template <typename T>
-String GetTypeName() {
-#if GTEST_HAS_RTTI
-
- const char* const name = typeid(T).name();
-#ifdef __GNUC__
- int status = 0;
- // gcc's implementation of typeid(T).name() mangles the type name,
- // so we have to demangle it.
- char* const readable_name = abi::__cxa_demangle(name, 0, 0, &status);
- const String name_str(status == 0 ? readable_name : name);
- free(readable_name);
- return name_str;
-#else
- return name;
-#endif // __GNUC__
-
-#else
- return "<type>";
-#endif // GTEST_HAS_RTTI
-}
-
-// A unique type used as the default value for the arguments of class
-// template Types. This allows us to simulate variadic templates
-// (e.g. Types<int>, Type<int, double>, and etc), which C++ doesn't
-// support directly.
-struct None {};
-
-// The following family of struct and struct templates are used to
-// represent type lists. In particular, TypesN<T1, T2, ..., TN>
-// represents a type list with N types (T1, T2, ..., and TN) in it.
-// Except for Types0, every struct in the family has two member types:
-// Head for the first type in the list, and Tail for the rest of the
-// list.
-
-// The empty type list.
-struct Types0 {};
-
-// Type lists of length 1, 2, 3, and so on.
-
-template <typename T1>
-struct Types1 {
- typedef T1 Head;
- typedef Types0 Tail;
-};
-template <typename T1, typename T2>
-struct Types2 {
- typedef T1 Head;
- typedef Types1<T2> Tail;
-};
-
-template <typename T1, typename T2, typename T3>
-struct Types3 {
- typedef T1 Head;
- typedef Types2<T2, T3> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4>
-struct Types4 {
- typedef T1 Head;
- typedef Types3<T2, T3, T4> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-struct Types5 {
- typedef T1 Head;
- typedef Types4<T2, T3, T4, T5> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6>
-struct Types6 {
- typedef T1 Head;
- typedef Types5<T2, T3, T4, T5, T6> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7>
-struct Types7 {
- typedef T1 Head;
- typedef Types6<T2, T3, T4, T5, T6, T7> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8>
-struct Types8 {
- typedef T1 Head;
- typedef Types7<T2, T3, T4, T5, T6, T7, T8> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9>
-struct Types9 {
- typedef T1 Head;
- typedef Types8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10>
-struct Types10 {
- typedef T1 Head;
- typedef Types9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11>
-struct Types11 {
- typedef T1 Head;
- typedef Types10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12>
-struct Types12 {
- typedef T1 Head;
- typedef Types11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13>
-struct Types13 {
- typedef T1 Head;
- typedef Types12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14>
-struct Types14 {
- typedef T1 Head;
- typedef Types13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15>
-struct Types15 {
- typedef T1 Head;
- typedef Types14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16>
-struct Types16 {
- typedef T1 Head;
- typedef Types15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17>
-struct Types17 {
- typedef T1 Head;
- typedef Types16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18>
-struct Types18 {
- typedef T1 Head;
- typedef Types17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19>
-struct Types19 {
- typedef T1 Head;
- typedef Types18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20>
-struct Types20 {
- typedef T1 Head;
- typedef Types19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21>
-struct Types21 {
- typedef T1 Head;
- typedef Types20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22>
-struct Types22 {
- typedef T1 Head;
- typedef Types21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23>
-struct Types23 {
- typedef T1 Head;
- typedef Types22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24>
-struct Types24 {
- typedef T1 Head;
- typedef Types23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25>
-struct Types25 {
- typedef T1 Head;
- typedef Types24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26>
-struct Types26 {
- typedef T1 Head;
- typedef Types25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27>
-struct Types27 {
- typedef T1 Head;
- typedef Types26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28>
-struct Types28 {
- typedef T1 Head;
- typedef Types27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29>
-struct Types29 {
- typedef T1 Head;
- typedef Types28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30>
-struct Types30 {
- typedef T1 Head;
- typedef Types29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31>
-struct Types31 {
- typedef T1 Head;
- typedef Types30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32>
-struct Types32 {
- typedef T1 Head;
- typedef Types31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33>
-struct Types33 {
- typedef T1 Head;
- typedef Types32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34>
-struct Types34 {
- typedef T1 Head;
- typedef Types33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35>
-struct Types35 {
- typedef T1 Head;
- typedef Types34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36>
-struct Types36 {
- typedef T1 Head;
- typedef Types35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37>
-struct Types37 {
- typedef T1 Head;
- typedef Types36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38>
-struct Types38 {
- typedef T1 Head;
- typedef Types37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39>
-struct Types39 {
- typedef T1 Head;
- typedef Types38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40>
-struct Types40 {
- typedef T1 Head;
- typedef Types39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41>
-struct Types41 {
- typedef T1 Head;
- typedef Types40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42>
-struct Types42 {
- typedef T1 Head;
- typedef Types41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43>
-struct Types43 {
- typedef T1 Head;
- typedef Types42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44>
-struct Types44 {
- typedef T1 Head;
- typedef Types43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45>
-struct Types45 {
- typedef T1 Head;
- typedef Types44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46>
-struct Types46 {
- typedef T1 Head;
- typedef Types45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47>
-struct Types47 {
- typedef T1 Head;
- typedef Types46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46, T47> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48>
-struct Types48 {
- typedef T1 Head;
- typedef Types47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46, T47, T48> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48, typename T49>
-struct Types49 {
- typedef T1 Head;
- typedef Types48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46, T47, T48, T49> Tail;
-};
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48, typename T49, typename T50>
-struct Types50 {
- typedef T1 Head;
- typedef Types49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46, T47, T48, T49, T50> Tail;
-};
-
-
-} // namespace internal
-
-// We don't want to require the users to write TypesN<...> directly,
-// as that would require them to count the length. Types<...> is much
-// easier to write, but generates horrible messages when there is a
-// compiler error, as gcc insists on printing out each template
-// argument, even if it has the default value (this means Types<int>
-// will appear as Types<int, None, None, ..., None> in the compiler
-// errors).
-//
-// Our solution is to combine the best part of the two approaches: a
-// user would write Types<T1, ..., TN>, and Google Test will translate
-// that to TypesN<T1, ..., TN> internally to make error messages
-// readable. The translation is done by the 'type' member of the
-// Types template.
-template <typename T1 = internal::None, typename T2 = internal::None,
- typename T3 = internal::None, typename T4 = internal::None,
- typename T5 = internal::None, typename T6 = internal::None,
- typename T7 = internal::None, typename T8 = internal::None,
- typename T9 = internal::None, typename T10 = internal::None,
- typename T11 = internal::None, typename T12 = internal::None,
- typename T13 = internal::None, typename T14 = internal::None,
- typename T15 = internal::None, typename T16 = internal::None,
- typename T17 = internal::None, typename T18 = internal::None,
- typename T19 = internal::None, typename T20 = internal::None,
- typename T21 = internal::None, typename T22 = internal::None,
- typename T23 = internal::None, typename T24 = internal::None,
- typename T25 = internal::None, typename T26 = internal::None,
- typename T27 = internal::None, typename T28 = internal::None,
- typename T29 = internal::None, typename T30 = internal::None,
- typename T31 = internal::None, typename T32 = internal::None,
- typename T33 = internal::None, typename T34 = internal::None,
- typename T35 = internal::None, typename T36 = internal::None,
- typename T37 = internal::None, typename T38 = internal::None,
- typename T39 = internal::None, typename T40 = internal::None,
- typename T41 = internal::None, typename T42 = internal::None,
- typename T43 = internal::None, typename T44 = internal::None,
- typename T45 = internal::None, typename T46 = internal::None,
- typename T47 = internal::None, typename T48 = internal::None,
- typename T49 = internal::None, typename T50 = internal::None>
-struct Types {
- typedef internal::Types50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
-};
-
-template <>
-struct Types<internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types0 type;
-};
-template <typename T1>
-struct Types<T1, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types1<T1> type;
-};
-template <typename T1, typename T2>
-struct Types<T1, T2, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types2<T1, T2> type;
-};
-template <typename T1, typename T2, typename T3>
-struct Types<T1, T2, T3, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types3<T1, T2, T3> type;
-};
-template <typename T1, typename T2, typename T3, typename T4>
-struct Types<T1, T2, T3, T4, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types4<T1, T2, T3, T4> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5>
-struct Types<T1, T2, T3, T4, T5, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types5<T1, T2, T3, T4, T5> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6>
-struct Types<T1, T2, T3, T4, T5, T6, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types6<T1, T2, T3, T4, T5, T6> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7>
-struct Types<T1, T2, T3, T4, T5, T6, T7, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types7<T1, T2, T3, T4, T5, T6, T7> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types8<T1, T2, T3, T4, T5, T6, T7, T8> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
- T12> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
- T26> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
- T40> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, internal::None,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None, internal::None> {
- typedef internal::Types43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42, T43> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
- internal::None, internal::None, internal::None, internal::None,
- internal::None, internal::None> {
- typedef internal::Types44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42, T43, T44> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
- internal::None, internal::None, internal::None, internal::None,
- internal::None> {
- typedef internal::Types45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42, T43, T44, T45> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
- T46, internal::None, internal::None, internal::None, internal::None> {
- typedef internal::Types46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42, T43, T44, T45, T46> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
- T46, T47, internal::None, internal::None, internal::None> {
- typedef internal::Types47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42, T43, T44, T45, T46, T47> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
- T46, T47, T48, internal::None, internal::None> {
- typedef internal::Types48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42, T43, T44, T45, T46, T47, T48> type;
-};
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48, typename T49>
-struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
- T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
- T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
- T46, T47, T48, T49, internal::None> {
- typedef internal::Types49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42, T43, T44, T45, T46, T47, T48, T49> type;
-};
-
-namespace internal {
-
-#define GTEST_TEMPLATE_ template <typename T> class
-
-// The template "selector" struct TemplateSel<Tmpl> is used to
-// represent Tmpl, which must be a class template with one type
-// parameter, as a type. TemplateSel<Tmpl>::Bind<T>::type is defined
-// as the type Tmpl<T>. This allows us to actually instantiate the
-// template "selected" by TemplateSel<Tmpl>.
-//
-// This trick is necessary for simulating typedef for class templates,
-// which C++ doesn't support directly.
-template <GTEST_TEMPLATE_ Tmpl>
-struct TemplateSel {
- template <typename T>
- struct Bind {
- typedef Tmpl<T> type;
- };
-};
-
-#define GTEST_BIND_(TmplSel, T) \
- TmplSel::template Bind<T>::type
-
-// A unique struct template used as the default value for the
-// arguments of class template Templates. This allows us to simulate
-// variadic templates (e.g. Templates<int>, Templates<int, double>,
-// and etc), which C++ doesn't support directly.
-template <typename T>
-struct NoneT {};
-
-// The following family of struct and struct templates are used to
-// represent template lists. In particular, TemplatesN<T1, T2, ...,
-// TN> represents a list of N templates (T1, T2, ..., and TN). Except
-// for Templates0, every struct in the family has two member types:
-// Head for the selector of the first template in the list, and Tail
-// for the rest of the list.
-
-// The empty template list.
-struct Templates0 {};
-
-// Template lists of length 1, 2, 3, and so on.
-
-template <GTEST_TEMPLATE_ T1>
-struct Templates1 {
- typedef TemplateSel<T1> Head;
- typedef Templates0 Tail;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
-struct Templates2 {
- typedef TemplateSel<T1> Head;
- typedef Templates1<T2> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
-struct Templates3 {
- typedef TemplateSel<T1> Head;
- typedef Templates2<T2, T3> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4>
-struct Templates4 {
- typedef TemplateSel<T1> Head;
- typedef Templates3<T2, T3, T4> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
-struct Templates5 {
- typedef TemplateSel<T1> Head;
- typedef Templates4<T2, T3, T4, T5> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
-struct Templates6 {
- typedef TemplateSel<T1> Head;
- typedef Templates5<T2, T3, T4, T5, T6> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7>
-struct Templates7 {
- typedef TemplateSel<T1> Head;
- typedef Templates6<T2, T3, T4, T5, T6, T7> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
-struct Templates8 {
- typedef TemplateSel<T1> Head;
- typedef Templates7<T2, T3, T4, T5, T6, T7, T8> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
-struct Templates9 {
- typedef TemplateSel<T1> Head;
- typedef Templates8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10>
-struct Templates10 {
- typedef TemplateSel<T1> Head;
- typedef Templates9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
-struct Templates11 {
- typedef TemplateSel<T1> Head;
- typedef Templates10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
-struct Templates12 {
- typedef TemplateSel<T1> Head;
- typedef Templates11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13>
-struct Templates13 {
- typedef TemplateSel<T1> Head;
- typedef Templates12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
-struct Templates14 {
- typedef TemplateSel<T1> Head;
- typedef Templates13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
-struct Templates15 {
- typedef TemplateSel<T1> Head;
- typedef Templates14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16>
-struct Templates16 {
- typedef TemplateSel<T1> Head;
- typedef Templates15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
-struct Templates17 {
- typedef TemplateSel<T1> Head;
- typedef Templates16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
-struct Templates18 {
- typedef TemplateSel<T1> Head;
- typedef Templates17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19>
-struct Templates19 {
- typedef TemplateSel<T1> Head;
- typedef Templates18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
-struct Templates20 {
- typedef TemplateSel<T1> Head;
- typedef Templates19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
-struct Templates21 {
- typedef TemplateSel<T1> Head;
- typedef Templates20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22>
-struct Templates22 {
- typedef TemplateSel<T1> Head;
- typedef Templates21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
-struct Templates23 {
- typedef TemplateSel<T1> Head;
- typedef Templates22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
-struct Templates24 {
- typedef TemplateSel<T1> Head;
- typedef Templates23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25>
-struct Templates25 {
- typedef TemplateSel<T1> Head;
- typedef Templates24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
-struct Templates26 {
- typedef TemplateSel<T1> Head;
- typedef Templates25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
-struct Templates27 {
- typedef TemplateSel<T1> Head;
- typedef Templates26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28>
-struct Templates28 {
- typedef TemplateSel<T1> Head;
- typedef Templates27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
-struct Templates29 {
- typedef TemplateSel<T1> Head;
- typedef Templates28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
-struct Templates30 {
- typedef TemplateSel<T1> Head;
- typedef Templates29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31>
-struct Templates31 {
- typedef TemplateSel<T1> Head;
- typedef Templates30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
-struct Templates32 {
- typedef TemplateSel<T1> Head;
- typedef Templates31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
-struct Templates33 {
- typedef TemplateSel<T1> Head;
- typedef Templates32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34>
-struct Templates34 {
- typedef TemplateSel<T1> Head;
- typedef Templates33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
-struct Templates35 {
- typedef TemplateSel<T1> Head;
- typedef Templates34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
-struct Templates36 {
- typedef TemplateSel<T1> Head;
- typedef Templates35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37>
-struct Templates37 {
- typedef TemplateSel<T1> Head;
- typedef Templates36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
-struct Templates38 {
- typedef TemplateSel<T1> Head;
- typedef Templates37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
-struct Templates39 {
- typedef TemplateSel<T1> Head;
- typedef Templates38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40>
-struct Templates40 {
- typedef TemplateSel<T1> Head;
- typedef Templates39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
-struct Templates41 {
- typedef TemplateSel<T1> Head;
- typedef Templates40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
-struct Templates42 {
- typedef TemplateSel<T1> Head;
- typedef Templates41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43>
-struct Templates43 {
- typedef TemplateSel<T1> Head;
- typedef Templates42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
-struct Templates44 {
- typedef TemplateSel<T1> Head;
- typedef Templates43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43, T44> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
-struct Templates45 {
- typedef TemplateSel<T1> Head;
- typedef Templates44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43, T44, T45> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
- GTEST_TEMPLATE_ T46>
-struct Templates46 {
- typedef TemplateSel<T1> Head;
- typedef Templates45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43, T44, T45, T46> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
- GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
-struct Templates47 {
- typedef TemplateSel<T1> Head;
- typedef Templates46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43, T44, T45, T46, T47> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
- GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
-struct Templates48 {
- typedef TemplateSel<T1> Head;
- typedef Templates47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43, T44, T45, T46, T47, T48> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
- GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
- GTEST_TEMPLATE_ T49>
-struct Templates49 {
- typedef TemplateSel<T1> Head;
- typedef Templates48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43, T44, T45, T46, T47, T48, T49> Tail;
-};
-
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
- GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
- GTEST_TEMPLATE_ T49, GTEST_TEMPLATE_ T50>
-struct Templates50 {
- typedef TemplateSel<T1> Head;
- typedef Templates49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
- T43, T44, T45, T46, T47, T48, T49, T50> Tail;
-};
-
-
-// We don't want to require the users to write TemplatesN<...> directly,
-// as that would require them to count the length. Templates<...> is much
-// easier to write, but generates horrible messages when there is a
-// compiler error, as gcc insists on printing out each template
-// argument, even if it has the default value (this means Templates<list>
-// will appear as Templates<list, NoneT, NoneT, ..., NoneT> in the compiler
-// errors).
-//
-// Our solution is to combine the best part of the two approaches: a
-// user would write Templates<T1, ..., TN>, and Google Test will translate
-// that to TemplatesN<T1, ..., TN> internally to make error messages
-// readable. The translation is done by the 'type' member of the
-// Templates template.
-template <GTEST_TEMPLATE_ T1 = NoneT, GTEST_TEMPLATE_ T2 = NoneT,
- GTEST_TEMPLATE_ T3 = NoneT, GTEST_TEMPLATE_ T4 = NoneT,
- GTEST_TEMPLATE_ T5 = NoneT, GTEST_TEMPLATE_ T6 = NoneT,
- GTEST_TEMPLATE_ T7 = NoneT, GTEST_TEMPLATE_ T8 = NoneT,
- GTEST_TEMPLATE_ T9 = NoneT, GTEST_TEMPLATE_ T10 = NoneT,
- GTEST_TEMPLATE_ T11 = NoneT, GTEST_TEMPLATE_ T12 = NoneT,
- GTEST_TEMPLATE_ T13 = NoneT, GTEST_TEMPLATE_ T14 = NoneT,
- GTEST_TEMPLATE_ T15 = NoneT, GTEST_TEMPLATE_ T16 = NoneT,
- GTEST_TEMPLATE_ T17 = NoneT, GTEST_TEMPLATE_ T18 = NoneT,
- GTEST_TEMPLATE_ T19 = NoneT, GTEST_TEMPLATE_ T20 = NoneT,
- GTEST_TEMPLATE_ T21 = NoneT, GTEST_TEMPLATE_ T22 = NoneT,
- GTEST_TEMPLATE_ T23 = NoneT, GTEST_TEMPLATE_ T24 = NoneT,
- GTEST_TEMPLATE_ T25 = NoneT, GTEST_TEMPLATE_ T26 = NoneT,
- GTEST_TEMPLATE_ T27 = NoneT, GTEST_TEMPLATE_ T28 = NoneT,
- GTEST_TEMPLATE_ T29 = NoneT, GTEST_TEMPLATE_ T30 = NoneT,
- GTEST_TEMPLATE_ T31 = NoneT, GTEST_TEMPLATE_ T32 = NoneT,
- GTEST_TEMPLATE_ T33 = NoneT, GTEST_TEMPLATE_ T34 = NoneT,
- GTEST_TEMPLATE_ T35 = NoneT, GTEST_TEMPLATE_ T36 = NoneT,
- GTEST_TEMPLATE_ T37 = NoneT, GTEST_TEMPLATE_ T38 = NoneT,
- GTEST_TEMPLATE_ T39 = NoneT, GTEST_TEMPLATE_ T40 = NoneT,
- GTEST_TEMPLATE_ T41 = NoneT, GTEST_TEMPLATE_ T42 = NoneT,
- GTEST_TEMPLATE_ T43 = NoneT, GTEST_TEMPLATE_ T44 = NoneT,
- GTEST_TEMPLATE_ T45 = NoneT, GTEST_TEMPLATE_ T46 = NoneT,
- GTEST_TEMPLATE_ T47 = NoneT, GTEST_TEMPLATE_ T48 = NoneT,
- GTEST_TEMPLATE_ T49 = NoneT, GTEST_TEMPLATE_ T50 = NoneT>
-struct Templates {
- typedef Templates50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
-};
-
-template <>
-struct Templates<NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT> {
- typedef Templates0 type;
-};
-template <GTEST_TEMPLATE_ T1>
-struct Templates<T1, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT> {
- typedef Templates1<T1> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
-struct Templates<T1, T2, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT> {
- typedef Templates2<T1, T2> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
-struct Templates<T1, T2, T3, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates3<T1, T2, T3> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4>
-struct Templates<T1, T2, T3, T4, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates4<T1, T2, T3, T4> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
-struct Templates<T1, T2, T3, T4, T5, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates5<T1, T2, T3, T4, T5> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
-struct Templates<T1, T2, T3, T4, T5, T6, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates6<T1, T2, T3, T4, T5, T6> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates7<T1, T2, T3, T4, T5, T6, T7> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates8<T1, T2, T3, T4, T5, T6, T7, T8> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT> {
- typedef Templates18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT> {
- typedef Templates19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT> {
- typedef Templates20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT> {
- typedef Templates21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT> {
- typedef Templates22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT> {
- typedef Templates23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT> {
- typedef Templates24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT> {
- typedef Templates25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT> {
- typedef Templates26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT> {
- typedef Templates27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT> {
- typedef Templates28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT> {
- typedef Templates29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, NoneT, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, NoneT, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, NoneT, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, NoneT, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, NoneT,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42, T43> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
- NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42, T43, T44> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
- T45, NoneT, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42, T43, T44, T45> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
- GTEST_TEMPLATE_ T46>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
- T45, T46, NoneT, NoneT, NoneT, NoneT> {
- typedef Templates46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42, T43, T44, T45, T46> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
- GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
- T45, T46, T47, NoneT, NoneT, NoneT> {
- typedef Templates47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42, T43, T44, T45, T46, T47> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
- GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
- T45, T46, T47, T48, NoneT, NoneT> {
- typedef Templates48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42, T43, T44, T45, T46, T47, T48> type;
-};
-template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
- GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
- GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
- GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
- GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
- GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
- GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
- GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
- GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
- GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
- GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
- GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
- GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
- GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
- GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
- GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
- GTEST_TEMPLATE_ T49>
-struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
- T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
- T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
- T45, T46, T47, T48, T49, NoneT> {
- typedef Templates49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
- T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
- T42, T43, T44, T45, T46, T47, T48, T49> type;
-};
-
-// The TypeList template makes it possible to use either a single type
-// or a Types<...> list in TYPED_TEST_CASE() and
-// INSTANTIATE_TYPED_TEST_CASE_P().
-
-template <typename T>
-struct TypeList { typedef Types1<T> type; };
-
-template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11, typename T12, typename T13, typename T14, typename T15,
- typename T16, typename T17, typename T18, typename T19, typename T20,
- typename T21, typename T22, typename T23, typename T24, typename T25,
- typename T26, typename T27, typename T28, typename T29, typename T30,
- typename T31, typename T32, typename T33, typename T34, typename T35,
- typename T36, typename T37, typename T38, typename T39, typename T40,
- typename T41, typename T42, typename T43, typename T44, typename T45,
- typename T46, typename T47, typename T48, typename T49, typename T50>
-struct TypeList<Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
- T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
- T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
- T44, T45, T46, T47, T48, T49, T50> > {
- typedef typename Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
- T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
- T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
- T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>::type type;
-};
-
-} // namespace internal
-} // namespace testing
-
-#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
-
-#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
diff --git a/libclamav/c++/llvm/utils/userloc.pl b/libclamav/c++/llvm/utils/userloc.pl
deleted file mode 100755
index 4da2f40..0000000
--- a/libclamav/c++/llvm/utils/userloc.pl
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/perl -w
-#
-# Program: userloc.pl
-#
-# Synopsis: This program uses "cvs annotate" to get a summary of how many lines
-# of code the various developres are responsible for. It takes one
-# argument, the directory to process. If the argument is not specified
-# then the cwd is used. The directory must be an LLVM tree checked out
-# from cvs.
-#
-# Syntax: userloc.pl [-tag=tag|-html... <directory>...
-#
-# Options:
-# -tag=tag
-# Use "tag" to select the revision (as per cvs -r option)
-# -filedetails
-# Report details about lines of code in each file for each user
-# -html
-# Generate HTML output instead of text output
-# -topdir
-# Specify where the top llvm source directory is. Otherwise the
-# llvm-config tool is used to find it.
-# Directories:
-# The directories passed after the options should be relative paths to
-# directories of interest from the top of the llvm source tree, e.g. "lib"
-# or "include", etc.
-
-die "Usage userloc.pl [-tag=tag|-html] <directories>..."
- if ($#ARGV < 0);
-
-my $tag = "";
-my $html = 0;
-my $debug = 0;
-my $filedetails = "";
-my $srcroot = "";
-while ( defined($ARGV[0]) && substr($ARGV[0],0,1) eq '-' )
-{
- if ($ARGV[0] =~ /-tag=.*/) {
- $tag = $ARGV[0];
- $tag =~ s#-tag=(.*)#$1#;
- } elsif ($ARGV[0] =~ /-filedetails/) {
- $filedetails = 1;
- } elsif ($ARGV[0] eq "-html") {
- $html = 1;
- } elsif ($ARGV[0] eq "-debug") {
- $debug = 1;
- } elsif ($ARGV[0] eq "-topdir") {
- shift; $srcroot = $ARGV[0]; shift;
- } else {
- die "Invalid option: $ARGV[0]";
- }
- shift;
-}
-
-if (length($srcroot) == 0) {
- chomp($srcroot = `llvm-config --src-root`);
-}
-if (! -d "$srcroot") {
- die "Invalid source root: $srcroot\n";
-}
-chdir($srcroot);
-my $llvmdo = "$srcroot/utils/llvmdo -topdir '$srcroot'";
-my %Stats;
-my %FileStats;
-
-my $annotate = "cvs -z6 annotate -lf ";
-if (length($tag) > 0)
-{
- $annotate = $annotate . " -r" . $tag;
-}
-
-sub GetCVSFiles
-{
- my $d = $_[0];
- my $files ="";
- open FILELIST,
- "$llvmdo -dirs \"$d\" -code-only echo |" || die "Can't get list of files with llvmdo";
- while ( defined($line = <FILELIST>) ) {
- chomp($file = $line);
- print "File: $file\n" if ($debug);
- $files = "$files $file";
- }
- return $files;
-}
-
-sub ScanDir
-{
- my $Dir = $_[0];
- my $files = GetCVSFiles($Dir);
-
- open (DATA,"$annotate $files 2>&1 |")
- || die "Can't read cvs annotation data";
-
- my $curfile = "";
- while ( defined($line = <DATA>) )
- {
- chomp($line);
- if ($line =~ '^Annotations for.*') {
- $curfile = $line;
- $curfile =~ s#^Annotations for ([[:print:]]*)#$1#;
- print "Scanning: $curfile\n" if ($debug);
- } elsif ($line =~ /^[0-9.]*[ \t]*\([^)]*\):/) {
- $uname = $line;
- $uname =~ s#^[0-9.]*[ \t]*\(([a-zA-Z0-9_.-]*) [^)]*\):.*#$1#;
- $Stats{$uname}++;
- if ($filedetails) {
- $FileStats{$uname} = {} unless exists $FileStats{$uname};
- ${$FileStats{$uname}}{$curfile}++;
- }
- }
- }
- close DATA;
-}
-
-sub printStats
-{
- my $dir = $_[0];
- my $hash = $_[1];
- my $user;
- my $total = 0;
-
- foreach $user (keys %Stats) { $total += $Stats{$user}; }
-
- if ($html) {
- print "<p>Total Source Lines: $total<br/></p>\n";
- print "<table>";
- print " <tr><th style=\"text-align:right\">LOC</th>\n";
- print " <th style=\"text-align:right\">\%LOC</th>\n";
- print " <th style=\"text-align:left\">User</th>\n";
- print "</tr>\n";
- }
-
- foreach $user ( sort keys %Stats )
- {
- my $v = $Stats{$user};
- if (defined($v))
- {
- if ($html) {
- printf "<tr><td style=\"text-align:right\">%d</td><td style=\"text-align:right\">(%4.1f%%)</td><td style=\"text-align:left\">", $v, (100.0/$total)*$v;
- if ($filedetails) {
- print "<a href=\"#$user\">$user</a></td></tr>";
- } else {
- print $user,"</td></tr>";
- }
- } else {
- printf "%8d (%4.1f%%) %s\n", $v, (100.0/$total)*$v, $user;
- }
- }
- }
- print "</table>\n" if ($html);
-
- if ($filedetails) {
- foreach $user (sort keys %FileStats) {
- my $total = 0;
- foreach $file (sort keys %{$FileStats{$user}}) {
- $total += ${$FileStats{$user}}{$file}
- }
- if ($html) {
- print "<table><tr><th style=\"text-align:left\" colspan=\"3\"><a name=\"$user\">$user</a></th></tr>\n";
- } else {
- print $user,":\n";
- }
- foreach $file (sort keys %{$FileStats{$user}}) {
- my $v = ${$FileStats{$user}}{$file};
- if ($html) {
- printf "<tr><td style=\"text-align:right\"> %d</td><td
- style=\"text-align:right\"> %4.1f%%</td><td
- style=\"text-align:left\">%s</td></tr>",$v, (100.0/$total)*$v,$file;
- } else {
- printf "%8d (%4.1f%%) %s\n", $v, (100.0/$total)*$v, $file;
- }
- }
- if ($html) { print "</table>\n"; }
- }
- }
-}
-
-
-if ($html)
-{
-print "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">\n";
-print "<html>\n<head>\n";
-print " <title>LLVM LOC Based On CVS Annotation</title>\n";
-print " <link rel=\"stylesheet\" href=\"llvm.css\" type=\"text/css\"/>\n";
-print "</head>\n";
-print "<body><div class=\"doc_title\">LLVM LOC Based On CVS Annotation</div>\n";
-print "<p>This document shows the total lines of code per user in each\n";
-print "LLVM directory. Lines of code are attributed by the user that last\n";
-print "committed the line. This does not necessarily reflect authorship.</p>\n";
-}
-
-my @DIRS;
-if ($#ARGV > 0) {
- @DIRS = @ARGV;
-} else {
- push @DIRS, 'include';
- push @DIRS, 'lib';
- push @DIRS, 'tools';
- push @DIRS, 'runtime';
- push @DIRS, 'docs';
- push @DIRS, 'test';
- push @DIRS, 'utils';
- push @DIRS, 'examples';
- push @DIRS, 'projects/Stacker';
- push @DIRS, 'projects/sample';
- push @DIRS, 'autoconf';
-}
-
-for $Index ( 0 .. $#DIRS) {
- print "Scanning Dir: $DIRS[$Index]\n" if ($debug);
- ScanDir($DIRS[$Index]);
-}
-
-printStats;
-
-print "</body></html>\n" if ($html) ;
diff --git a/libclamav/c++/llvm/utils/valgrind/i386-pc-linux-gnu.supp b/libclamav/c++/llvm/utils/valgrind/i386-pc-linux-gnu.supp
new file mode 100644
index 0000000..c9f68a0
--- /dev/null
+++ b/libclamav/c++/llvm/utils/valgrind/i386-pc-linux-gnu.supp
@@ -0,0 +1,41 @@
+{
+ False leak under RegisterPass
+ Memcheck:Leak
+ ...
+ fun:_ZN83_GLOBAL_*PassRegistrar12RegisterPassERKN4llvm8PassInfoE
+ fun:_ZN4llvm8PassInfo12registerPassEv
+}
+
+# Python false positives according to
+# http://svn.python.org/projects/python/trunk/Misc/README.valgrind
+
+{
+ ADDRESS_IN_RANGE/Invalid read of size 4
+ Memcheck:Addr4
+ obj:/usr/bin/python2.5
+}
+
+{
+ ADDRESS_IN_RANGE/Invalid read of size 4
+ Memcheck:Value4
+ obj:/usr/bin/python2.5
+}
+
+{
+ ADDRESS_IN_RANGE/Conditional jump or move depends on uninitialised value
+ Memcheck:Cond
+ obj:/usr/bin/python2.5
+}
+
+{
+ We don't care if as leaks
+ Memcheck:Leak
+ obj:/usr/bin/as
+}
+
+{
+ We don't care if python leaks
+ Memcheck:Leak
+ fun:malloc
+ obj:/usr/bin/python2.5
+}
diff --git a/libclamav/c++/llvm/utils/valgrind/x86_64-pc-linux-gnu.supp b/libclamav/c++/llvm/utils/valgrind/x86_64-pc-linux-gnu.supp
new file mode 100644
index 0000000..f5aae99
--- /dev/null
+++ b/libclamav/c++/llvm/utils/valgrind/x86_64-pc-linux-gnu.supp
@@ -0,0 +1,46 @@
+{
+ False leak under RegisterPass
+ Memcheck:Leak
+ ...
+ fun:_ZN4llvm12PassRegistry12registerPassERKNS_8PassInfoE
+}
+
+# Python false positives according to
+# http://svn.python.org/projects/python/trunk/Misc/README.valgrind
+
+{
+ ADDRESS_IN_RANGE/Invalid read of size 4
+ Memcheck:Addr4
+ obj:/usr/bin/python2.5
+}
+
+{
+ ADDRESS_IN_RANGE/Invalid read of size 4
+ Memcheck:Value8
+ obj:/usr/bin/python2.5
+}
+
+{
+ ADDRESS_IN_RANGE/Conditional jump or move depends on uninitialised value
+ Memcheck:Cond
+ obj:/usr/bin/python2.5
+}
+
+{
+ We don't care if as leaks
+ Memcheck:Leak
+ obj:/usr/bin/as
+}
+
+{
+ We don't care if grep leaks
+ Memcheck:Leak
+ obj:/bin/grep
+}
+
+{
+ We don't care if python leaks
+ Memcheck:Leak
+ fun:malloc
+ obj:/usr/bin/python2.5
+}
diff --git a/libclamav/c++/llvm/utils/valgrind/x86_64-pc-linux-gnu_gcc-4.3.3.supp b/libclamav/c++/llvm/utils/valgrind/x86_64-pc-linux-gnu_gcc-4.3.3.supp
deleted file mode 100644
index a86be6c..0000000
--- a/libclamav/c++/llvm/utils/valgrind/x86_64-pc-linux-gnu_gcc-4.3.3.supp
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- libstdcxx_overlapped_memcpy_in_stable_sort_1
- Memcheck:Overlap
- fun:memcpy
- ...
- fun:_ZSt11stable_sortIN9__gnu_cxx17__normal_iteratorIPSt4pairIPKN4llvm5ValueEjESt6vectorIS7_SaIS7_EEEEN12_GLOBAL__N_116CstSortPredicateEEvT_SF_T0_
-}
-
-{
- libstdcxx_overlapped_memcpy_in_stable_sort_2
- Memcheck:Overlap
- fun:memcpy
- ...
- fun:_ZSt11stable_sortIN9__gnu_cxx17__normal_iteratorIPSt4pairIPKN4llvm5ValueEjESt6vectorIS7_SaIS7_EEEEN12_GLOBAL__N_116CstSortPredicateEEvT_SF_T0_
-}
-
-{
- libstdcxx_overlapped_memcpy_in_stable_sort_3
- Memcheck:Overlap
- fun:memcpy
- ...
- fun:_ZSt11stable_sortIN9__gnu_cxx17__normal_iteratorIPSt4pairIPKN4llvm4TypeEjESt6vectorIS7_SaIS7_EEEEPFbRKS7_SE_EEvT_SH_T0_
-}
diff --git a/libclamav/c++/llvm/utils/vim/llvm.vim b/libclamav/c++/llvm/utils/vim/llvm.vim
index ffdf711..28ab2e3 100644
--- a/libclamav/c++/llvm/utils/vim/llvm.vim
+++ b/libclamav/c++/llvm/utils/vim/llvm.vim
@@ -57,14 +57,12 @@ syn keyword llvmKeyword module asm align tail to
syn keyword llvmKeyword addrspace section alias sideeffect c gc
syn keyword llvmKeyword target datalayout triple
syn keyword llvmKeyword blockaddress
-syn keyword llvmKeyword union
" Obsolete keywords.
-syn keyword llvmError uninitialized implementation
-syn keyword llvmError getresult big little endian begin end
+syn keyword llvmError getresult begin end
" Misc syntax.
-syn match llvmIgnore /[%@]\d\+\>/
+syn match llvmNoName /[%@]\d\+\>/
syn match llvmNumber /-\?\<\d\+\>/
syn match llvmFloat /-\?\<\d\+\.\d*\(e[+-]\d\+\)\?\>/
syn match llvmFloat /\<0x\x\+\>/
@@ -99,7 +97,7 @@ if version >= 508 || !exists("did_c_syn_inits")
HiLink llvmKeyword Keyword
HiLink llvmBoolean Boolean
HiLink llvmFloat Float
- HiLink llvmIgnore Ignore
+ HiLink llvmNoName Identifier
HiLink llvmConstant Constant
HiLink llvmSpecialComment SpecialComment
HiLink llvmError Error
diff --git a/libclamav/c++/llvm/utils/vim/vimrc b/libclamav/c++/llvm/utils/vim/vimrc
index 8ae3e67..51a2365 100644
--- a/libclamav/c++/llvm/utils/vim/vimrc
+++ b/libclamav/c++/llvm/utils/vim/vimrc
@@ -91,3 +91,130 @@ augroup END
"set showmode
"set incsearch
"set ruler
+
+" Clang code-completion support. This is highly experimental!
+
+" A path to a clang executable.
+let g:clang_path = "clang++"
+
+" A list of options to add to the clang commandline, for example to add
+" include paths, predefined macros, and language options.
+let g:clang_opts = [
+ \ "-x","c++",
+ \ "-D__STDC_LIMIT_MACROS=1","-D__STDC_CONSTANT_MACROS=1",
+ \ "-Iinclude" ]
+
+function! ClangComplete(findstart, base)
+ if a:findstart == 1
+ " In findstart mode, look for the beginning of the current identifier.
+ let l:line = getline('.')
+ let l:start = col('.') - 1
+ while l:start > 0 && l:line[l:start - 1] =~ '\i'
+ let l:start -= 1
+ endwhile
+ return l:start
+ endif
+
+ " Get the current line and column numbers.
+ let l:l = line('.')
+ let l:c = col('.')
+
+ " Build a clang commandline to do code completion on stdin.
+ let l:the_command = shellescape(g:clang_path) .
+ \ " -cc1 -code-completion-at=-:" . l:l . ":" . l:c
+ for l:opt in g:clang_opts
+ let l:the_command .= " " . shellescape(l:opt)
+ endfor
+
+ " Copy the contents of the current buffer into a string for stdin.
+ " TODO: The extra space at the end is for working around clang's
+ " apparent inability to do code completion at the very end of the
+ " input.
+ " TODO: Is it better to feed clang the entire file instead of truncating
+ " it at the current line?
+ let l:process_input = join(getline(1, l:l), "\n") . " "
+
+ " Run it!
+ let l:input_lines = split(system(l:the_command, l:process_input), "\n")
+
+ " Parse the output.
+ for l:input_line in l:input_lines
+ " Vim's substring operator is annoyingly inconsistent with python's.
+ if l:input_line[:11] == 'COMPLETION: '
+ let l:value = l:input_line[12:]
+
+ " Chop off anything after " : ", if present, and move it to the menu.
+ let l:menu = ""
+ let l:spacecolonspace = stridx(l:value, " : ")
+ if l:spacecolonspace != -1
+ let l:menu = l:value[l:spacecolonspace+3:]
+ let l:value = l:value[:l:spacecolonspace-1]
+ endif
+
+ " Chop off " (Hidden)", if present, and move it to the menu.
+ let l:hidden = stridx(l:value, " (Hidden)")
+ if l:hidden != -1
+ let l:menu .= " (Hidden)"
+ let l:value = l:value[:l:hidden-1]
+ endif
+
+ " Handle "Pattern". TODO: Make clang less weird.
+ if l:value == "Pattern"
+ let l:value = l:menu
+ let l:pound = stridx(l:value, "#")
+ " Truncate the at the first [#, <#, or {#.
+ if l:pound != -1
+ let l:value = l:value[:l:pound-2]
+ endif
+ endif
+
+ " Filter out results which don't match the base string.
+ if a:base != ""
+ if l:value[:strlen(a:base)-1] != a:base
+ continue
+ end
+ endif
+
+ " TODO: Don't dump the raw input into info, though it's nice for now.
+ " TODO: The kind string?
+ let l:item = {
+ \ "word": l:value,
+ \ "menu": l:menu,
+ \ "info": l:input_line,
+ \ "dup": 1 }
+
+ " Report a result.
+ if complete_add(l:item) == 0
+ return []
+ endif
+ if complete_check()
+ return []
+ endif
+
+ elseif l:input_line[:9] == "OVERLOAD: "
+ " An overload candidate. Use a crazy hack to get vim to
+ " display the results. TODO: Make this better.
+ let l:value = l:input_line[10:]
+ let l:item = {
+ \ "word": " ",
+ \ "menu": l:value,
+ \ "info": l:input_line,
+ \ "dup": 1}
+
+ " Report a result.
+ if complete_add(l:item) == 0
+ return []
+ endif
+ if complete_check()
+ return []
+ endif
+
+ endif
+ endfor
+
+
+ return []
+endfunction ClangComplete
+
+" Uncomment this to enable the highly-broken autocompletion support.
+"set omnifunc=ClangComplete
diff --git a/libclamav/cache.c b/libclamav/cache.c
index aeb745a..3e947fc 100644
--- a/libclamav/cache.c
+++ b/libclamav/cache.c
@@ -589,7 +589,7 @@ struct CACHE {
/* Allocates the trees for the engine cache */
int cli_cache_init(struct cl_engine *engine) {
- static struct CACHE *cache;
+ struct CACHE *cache;
unsigned int i, j;
if(!engine) {
@@ -623,7 +623,7 @@ int cli_cache_init(struct cl_engine *engine) {
/* Frees the engine cache */
void cli_cache_destroy(struct cl_engine *engine) {
- static struct CACHE *cache;
+ struct CACHE *cache;
unsigned int i;
if(!engine || !(cache = engine->cache))
diff --git a/libclamav/clamav.h b/libclamav/clamav.h
index e32fa8d..862562c 100644
--- a/libclamav/clamav.h
+++ b/libclamav/clamav.h
@@ -112,6 +112,7 @@ typedef enum {
#define CL_SCAN_STRUCTURED_SSN_STRIPPED 0x20000
#define CL_SCAN_PARTIAL_MESSAGE 0x40000
#define CL_SCAN_HEURISTIC_PRECEDENCE 0x80000
+#define CL_SCAN_BLOCKMACROS 0x100000
#define CL_SCAN_INTERNAL_COLLECT_SHA 0x80000000 /* Enables hash output in sha-collect builds - for internal use only */
@@ -238,6 +239,29 @@ WARNING: Some signatures (notably ldb, cbc) can be dependent upon other signatur
*/
extern void cl_engine_set_clcb_sigload(struct cl_engine *engine, clcb_sigload callback, void *context);
+/* LibClamAV messages callback
+ * The specified callback will be called instead of logging to stderr.
+ * Messages of lower severity than specified are logged as usual.
+ *
+ * Just like with cl_debug() this must be called before going multithreaded.
+ * Callable before cl_init, if you want to log messages from cl_init() itself.
+ *
+ * You can use context of cl_scandesc_callback to convey more information to the callback (such as the filename!)
+ * Note: setting a 2nd callbacks overwrites previous, multiple callbacks are not
+ * supported
+ */
+enum cl_msg {
+ /* leave room for more message levels in the future */
+ CL_MSG_INFO_VERBOSE = 32, /* verbose */
+ CL_MSG_WARN = 64, /* LibClamAV WARNING: */
+ CL_MSG_ERROR = 128/* LibClamAV ERROR: */
+};
+typedef void (*clcb_msg)(enum cl_msg severity, const char *fullmsg, const char *msg, void *context);
+extern void cl_set_clcb_msg(clcb_msg callback);
+
+/* LibClamAV hash stats callback */
+typedef void (*clcb_hash)(int fd, unsigned long long size, const unsigned char *md5, const char *virname, void *context);
+extern void cl_engine_set_clcb_hash(struct cl_engine *engine, clcb_hash callback);
struct cl_stat {
char *dir;
@@ -263,6 +287,7 @@ extern int cl_scandesc(int desc, const char **virname, unsigned long int *scanne
extern int cl_scandesc_callback(int desc, const char **virname, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions, void *context);
extern int cl_scanfile(const char *filename, const char **virname, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions);
+extern int cl_scanfile_callback(const char *filename, const char **virname, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions, void *context);
/* database handling */
extern int cl_load(const char *path, struct cl_engine *engine, unsigned int *signo, unsigned int dboptions);
diff --git a/libclamav/disasm.c b/libclamav/disasm.c
index 5ac062b..61bd7cf 100644
--- a/libclamav/disasm.c
+++ b/libclamav/disasm.c
@@ -785,7 +785,7 @@ static const struct OPCODES x86ops[2][256] = {{
PUSHOP(0x92, ADDR_REG_EDX, SIZE_WD, ADDR_REG_EAX, SIZE_WD, OP_XCHG),
PUSHOP(0x93, ADDR_REG_EBX, SIZE_WD, ADDR_REG_EAX, SIZE_WD, OP_XCHG),
PUSHOP(0x94, ADDR_REG_ESP, SIZE_WD, ADDR_REG_EAX, SIZE_WD, OP_XCHG),
- PUSHOP(0x95, ADDR_REG_EBX, SIZE_WD, ADDR_REG_EAX, SIZE_WD, OP_XCHG),
+ PUSHOP(0x95, ADDR_REG_EBP, SIZE_WD, ADDR_REG_EAX, SIZE_WD, OP_XCHG),
PUSHOP(0x96, ADDR_REG_ESI, SIZE_WD, ADDR_REG_EAX, SIZE_WD, OP_XCHG),
PUSHOP(0x97, ADDR_REG_EDI, SIZE_WD, ADDR_REG_EAX, SIZE_WD, OP_XCHG),
PUSHOP(0x98, ADDR_NOADDR, SIZE_WD, ADDR_NOADDR, SIZE_NOSIZE, OP_CWDE),
diff --git a/libclamav/libclamav.map b/libclamav/libclamav.map
index 64ddc37..9b2c74d 100644
--- a/libclamav/libclamav.map
+++ b/libclamav/libclamav.map
@@ -11,6 +11,8 @@ CLAMAV_PUBLIC {
cl_engine_get_num;
cl_engine_set_str;
cl_engine_get_str;
+ cl_engine_set_clcb_hash;
+ cl_set_clcb_msg;
cl_engine_set_clcb_pre_scan;
cl_engine_set_clcb_post_scan;
cl_engine_set_clcb_sigload;
@@ -25,7 +27,9 @@ CLAMAV_PUBLIC {
cl_retflevel;
cl_retver;
cl_scandesc;
+ cl_scandesc_callback;
cl_scanfile;
+ cl_scanfile_callback;
cl_statchkdir;
cl_statfree;
cl_statinidir;
@@ -128,8 +132,6 @@ CLAMAV_PRIVATE {
cli_initroots;
cli_scanbuff;
cli_fmap_scandesc;
- cli_scandesc_stats;
- cli_scanfile_stats;
html_screnc_decode;
mpool_create;
mpool_calloc;
diff --git a/libclamav/matcher-ac.c b/libclamav/matcher-ac.c
index a64dc95..17b83f2 100644
--- a/libclamav/matcher-ac.c
+++ b/libclamav/matcher-ac.c
@@ -1057,12 +1057,12 @@ inline static int ac_addtype(struct cli_matched_type **list, cli_file_t type, of
return CL_SUCCESS;
}
-static inline void lsig_sub_matched(const struct cli_matcher *root, struct cli_ac_data *mdata, uint32_t lsigid1, uint32_t lsigid2, uint32_t realoff)
+static inline void lsig_sub_matched(const struct cli_matcher *root, struct cli_ac_data *mdata, uint32_t lsigid1, uint32_t lsigid2, uint32_t realoff, int partial)
{
const struct cli_lsig_tdb *tdb = &root->ac_lsigtable[lsigid1]->tdb;
if(realoff != CLI_OFF_NONE) {
- if(mdata->lsigsuboff[lsigid1][lsigid2] != CLI_OFF_NONE && realoff <= mdata->lsigsuboff[lsigid1][lsigid2])
+ if(mdata->lsigsuboff[lsigid1][lsigid2] != CLI_OFF_NONE && ((!partial && realoff <= mdata->lsigsuboff[lsigid1][lsigid2]) || (partial && realoff < mdata->lsigsuboff[lsigid1][lsigid2])))
return;
mdata->lsigcnt[lsigid1][lsigid2]++;
if(mdata->lsigcnt[lsigid1][lsigid2] <= 1 || !tdb->macro_ptids || !tdb->macro_ptids[lsigid2])
@@ -1111,7 +1111,7 @@ void cli_ac_chkmacro(struct cli_matcher *root, struct cli_ac_data *data, unsigne
/* Loop through all subsigs, and if they are tied to macros check that the
* macro matched at a correct distance */
for (i=0;i<tdb->subsigs;i++) {
- lsig_sub_matched(root, data, lsigid1, i, CLI_OFF_NONE);
+ lsig_sub_matched(root, data, lsigid1, i, CLI_OFF_NONE, 0);
}
}
@@ -1122,7 +1122,7 @@ int cli_ac_scanbuff(const unsigned char *buffer, uint32_t length, const char **v
struct cli_ac_patt *patt, *pt;
uint32_t i, bp, realoff, matchend;
uint16_t j;
- int32_t **offmatrix;
+ int32_t **offmatrix, swp;
uint8_t found;
int type = CL_CLEAN;
struct cli_ac_result *newres;
@@ -1246,8 +1246,17 @@ int cli_ac_scanbuff(const unsigned char *buffer, uint32_t length, const char **v
}
}
- if(pt->partno == 2 && found > 1)
+ if(pt->partno == 2 && found > 1) {
+ swp = offmatrix[0][1];
offmatrix[0][1] = offmatrix[0][found];
+ offmatrix[0][found] = swp;
+
+ if(pt->type != CL_TYPE_MSEXE) {
+ swp = offmatrix[pt->parts - 1][1];
+ offmatrix[pt->parts - 1][1] = offmatrix[pt->parts - 1][found];
+ offmatrix[pt->parts - 1][found] = swp;
+ }
+ }
if(pt->partno == 1 || (found && (pt->partno != pt->parts))) {
if(offmatrix[pt->partno - 1][0] == CLI_DEFAULT_AC_TRACKLEN + 1)
@@ -1282,7 +1291,7 @@ int cli_ac_scanbuff(const unsigned char *buffer, uint32_t length, const char **v
} else { /* !pt->type */
if(pt->lsigid[0]) {
- lsig_sub_matched(root, mdata, pt->lsigid[1], pt->lsigid[2], offmatrix[pt->parts - 1][1]);
+ lsig_sub_matched(root, mdata, pt->lsigid[1], pt->lsigid[2], offmatrix[pt->parts - 1][1], 1);
pt = pt->next_same;
continue;
}
@@ -1325,7 +1334,7 @@ int cli_ac_scanbuff(const unsigned char *buffer, uint32_t length, const char **v
}
} else {
if(pt->lsigid[0]) {
- lsig_sub_matched(root, mdata, pt->lsigid[1], pt->lsigid[2], realoff);
+ lsig_sub_matched(root, mdata, pt->lsigid[1], pt->lsigid[2], realoff, 0);
pt = pt->next_same;
continue;
}
diff --git a/libclamav/matcher.c b/libclamav/matcher.c
index 23c771e..51f09c9 100644
--- a/libclamav/matcher.c
+++ b/libclamav/matcher.c
@@ -421,12 +421,8 @@ int cli_checkfp(unsigned char *digest, size_t size, cli_ctx *ctx)
}
#endif
- if(ctx->virsize && !*ctx->virsize) {
- if(ctx->virsize)
- *ctx->virsize = size;
- if(ctx->virhash)
- strcpy(ctx->virhash, md5);
- }
+ if (ctx->engine->cb_hash)
+ ctx->engine->cb_hash(ctx->fmap[0]->fd, size, md5, ctx->virname ? *ctx->virname : NULL, ctx->cb_ctx);
return CL_VIRUS;
}
diff --git a/libclamav/mpool.c b/libclamav/mpool.c
index 46288db..3f3044f 100644
--- a/libclamav/mpool.c
+++ b/libclamav/mpool.c
@@ -434,6 +434,7 @@ void mpool_flush(struct MP *mp) {
mp->u.mpm.size = mused - sizeof(*mp);
}
used += mp->u.mpm.size;
+ cli_dbgmsg("pool memory used: %.3f MB\n", used/(1024*1024.0));
spam("Map flushed @%p, in use: %lu\n", mp, used);
}
diff --git a/libclamav/msexpand.c b/libclamav/msexpand.c
index d670b82..43e15b4 100644
--- a/libclamav/msexpand.c
+++ b/libclamav/msexpand.c
@@ -27,6 +27,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
+#include <string.h>
#include "clamav.h"
#include "cltypes.h"
@@ -111,6 +112,7 @@ int cli_msexpand(int fd, int ofd, cli_ctx *ctx)
if(cli_checklimits("MSEXPAND", ctx, EC32(hdr.fsize), 0, 0)!=CL_CLEAN)
return CL_SUCCESS;
+ memset(buff, 0, BSIZE);
while(1) {
if(!rbytes || (r == rbytes)) {
diff --git a/libclamav/mspack.c b/libclamav/mspack.c
index 8bae3c5..60d0110 100644
--- a/libclamav/mspack.c
+++ b/libclamav/mspack.c
@@ -1610,6 +1610,18 @@ static int qtm_read_input(struct qtm_stream *qtm) {
return qtm->error = CL_EFORMAT;
}
+ if (nread == 0) {
+ if (qtm->input_end) {
+ cli_dbgmsg("qtm_read_input: out of input bytes\n");
+ return qtm->error = CL_EREAD;
+ }
+ else {
+ nread = 2;
+ qtm->inbuf[0] = qtm->inbuf[1] = 0;
+ qtm->input_end = 1;
+ }
+ }
+
qtm->i_ptr = &qtm->inbuf[0];
qtm->i_end = &qtm->inbuf[nread];
return CL_SUCCESS;
diff --git a/libclamav/mspack.h b/libclamav/mspack.h
index a6788a8..5f9e6cf 100644
--- a/libclamav/mspack.h
+++ b/libclamav/mspack.h
@@ -130,7 +130,7 @@ struct qtm_stream {
unsigned char header_read; /* have we started decoding a new frame? */
unsigned char wflag; /* write flag */
- int error;
+ int error, input_end;
/* data tables */
unsigned int position_base[42];
diff --git a/libclamav/others.c b/libclamav/others.c
index 13668ce..3272f53 100644
--- a/libclamav/others.c
+++ b/libclamav/others.c
@@ -248,9 +248,9 @@ const char *cl_strerror(int clerror)
case CL_EFORMAT:
return "CL_EFORMAT: Bad format or broken data";
case CL_EBYTECODE:
- return "CL_EBYTECODE: error during bytecode execution";
+ return "Error during bytecode execution";
case CL_EBYTECODE_TESTFAIL:
- return "CL_EBYTECODE_TESTFAIL: failure in bytecode testmode";
+ return "Failure in bytecode testmode";
default:
return "Unknown error code";
}
@@ -427,8 +427,11 @@ int cl_engine_set_num(struct cl_engine *engine, enum cl_engine_field field, long
}
if (num == CL_BYTECODE_MODE_OFF) {
cli_errmsg("cl_engine_set_num: CL_BYTECODE_MODE_OFF is not settable, use dboptions to turn off!\n");
+ return CL_EARG;
}
engine->bytecode_mode = num;
+ if (num == CL_BYTECODE_MODE_TEST)
+ cli_infomsg(NULL, "bytecode engine in test mode\n");
break;
default:
cli_errmsg("cl_engine_set_num: Incorrect field number\n");
@@ -1058,3 +1061,8 @@ void cl_engine_set_clcb_sigload(struct cl_engine *engine, clcb_sigload callback,
engine->cb_sigload = callback;
engine->cb_sigload_ctx = callback ? context : NULL;
}
+
+void cl_engine_set_clcb_hash(struct cl_engine *engine, clcb_hash callback)
+{
+ engine->cb_hash = callback;
+}
diff --git a/libclamav/others.h b/libclamav/others.h
index 6b908fd..308b8c9 100644
--- a/libclamav/others.h
+++ b/libclamav/others.h
@@ -53,7 +53,7 @@
* in re-enabling affected modules.
*/
-#define CL_FLEVEL 56
+#define CL_FLEVEL 58
#define CL_FLEVEL_DCONF CL_FLEVEL
#define CL_FLEVEL_SIGTOOL CL_FLEVEL
@@ -109,8 +109,6 @@ typedef struct bitset_tag
/* internal clamav context */
typedef struct cli_ctx_tag {
const char **virname;
- char *virhash;
- unsigned int *virsize;
unsigned long int *scanned;
const struct cli_matcher *root;
const struct cl_engine *engine;
@@ -255,6 +253,9 @@ struct cl_engine {
clcb_post_scan cb_post_scan;
clcb_sigload cb_sigload;
void *cb_sigload_ctx;
+ clcb_msg cb_msg;
+ clcb_hash cb_hash;
+ enum cl_msg cb_msg_minseverity;
/* Used for bytecode */
struct cli_all_bc bcs;
@@ -302,6 +303,7 @@ extern int have_rar;
#define DETECT_ENCRYPTED (ctx->options & CL_SCAN_BLOCKENCRYPTED)
/* #define BLOCKMAX (ctx->options & CL_SCAN_BLOCKMAX) */
#define DETECT_BROKEN (ctx->options & CL_SCAN_BLOCKBROKEN)
+#define BLOCK_MACROS (ctx->options & CL_SCAN_BLOCKMACROS)
#define SCAN_STRUCTURED (ctx->options & CL_SCAN_STRUCTURED)
/* based on macros from A. Melnikoff */
@@ -434,6 +436,7 @@ void cli_infomsg(const cli_ctx* ctx, const char *fmt, ...);
#endif
void cli_logg_setup(const cli_ctx* ctx);
+void cli_logg_unsetup(void);
/* tell compiler about branches that are very rarely taken,
* such as debug paths, and error paths */
diff --git a/libclamav/others_common.c b/libclamav/others_common.c
index 4bb0473..1cc2a93 100644
--- a/libclamav/others_common.c
+++ b/libclamav/others_common.c
@@ -75,39 +75,89 @@ static pthread_mutex_t cli_gentemp_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t cli_ctime_mutex = PTHREAD_MUTEX_INITIALIZER;
# endif
static pthread_mutex_t cli_strerror_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_key_t cli_ctx_tls_key;
+static pthread_once_t cli_ctx_tls_key_once = PTHREAD_ONCE_INIT;
+
+static void cli_ctx_tls_key_alloc(void)
+{
+ pthread_key_create(&cli_ctx_tls_key, NULL);
+}
+
+void cli_logg_setup(const cli_ctx *ctx)
+{
+ pthread_once(&cli_ctx_tls_key_once, cli_ctx_tls_key_alloc);
+ pthread_setspecific(cli_ctx_tls_key, ctx);
+}
+
+void cli_logg_unsetup(void)
+{
+ pthread_setspecific(cli_ctx_tls_key, NULL);
+}
+
+static inline void *cli_getctx(void)
+{
+ cli_ctx *ctx = pthread_getspecific(cli_ctx_tls_key);
+ return ctx ? ctx->cb_ctx : NULL;
+}
+#else
+
+static const cli_ctx *current_ctx = NULL;
+void cli_logg_setup(const cli_ctx *ctx)
+{
+ current_ctx = ctx;
+}
+
+static inline void *cli_getctx(void)
+{
+ return current_ctx ? current_ctx->cb_ctx : NULL;
+}
#endif
uint8_t cli_debug_flag = 0;
-#define MSGCODE(x) \
+static void fputs_callback(enum cl_msg severity, const char *fullmsg, const char *msg, void *context)
+{
+ fputs(fullmsg, stderr);
+}
+
+static clcb_msg msg_callback = fputs_callback;
+
+void cl_set_clcb_msg(clcb_msg callback)
+{
+ msg_callback = callback;
+}
+
+#define MSGCODE(buff, len, x) \
va_list args; \
int len = sizeof(x) - 1; \
char buff[BUFSIZ]; \
strncpy(buff, x, len); \
- buff[BUFSIZ-1]='\0'; \
va_start(args, str); \
vsnprintf(buff + len, sizeof(buff) - len, str, args); \
buff[sizeof(buff) - 1] = '\0'; \
- fputs(buff, stderr); \
va_end(args)
void cli_warnmsg(const char *str, ...)
{
- MSGCODE("LibClamAV Warning: ");
+ MSGCODE(buff, len, "LibClamAV Warning: ");
+ msg_callback(CL_MSG_WARN, buff, buff+len, cli_getctx());
}
void cli_errmsg(const char *str, ...)
{
- MSGCODE("LibClamAV Error: ");
+ MSGCODE(buff, len, "LibClamAV Error: ");
+ msg_callback(CL_MSG_ERROR, buff, buff+len, cli_getctx());
}
-void cli_dbgmsg_internal(const char *str, ...)
+void cli_infomsg(const cli_ctx* ctx, const char *str, ...)
{
- MSGCODE("LibClamAV debug: ");
+ MSGCODE(buff, len, "LibClamAV info: ");
+ msg_callback(CL_MSG_INFO_VERBOSE, buff, buff+len, ctx ? ctx->cb_ctx : NULL);
}
-void cli_infomsg(const cli_ctx *ctx, const char *str, ...)
+void cli_dbgmsg_internal(const char *str, ...)
{
- MSGCODE("LibClamAV info: ");
+ MSGCODE(buff, len, "LibClamAV debug: ");
+ fputs(buff, stderr);
}
int cli_matchregex(const char *str, const char *regex)
@@ -340,7 +390,7 @@ int cli_filecopy(const char *src, const char *dest)
{
#ifdef _WIN32
- return (!CopyFileA(src, dest, 0));
+ return !CopyFileA(src, dest, 0) ? 0 : -1;
#else
char *buffer;
int s, d, bytes;
diff --git a/libclamav/pdf.c b/libclamav/pdf.c
index af852ec..ae9d03c 100644
--- a/libclamav/pdf.c
+++ b/libclamav/pdf.c
@@ -113,7 +113,10 @@ static int find_stream_bounds(const char *start, off_t bytesleft, off_t byteslef
const char *q2, *q;
if ((q2 = cli_memstr(start, bytesleft, "stream", 6))) {
q2 += 6;
- if (q2[0] == '\xd' && q2[1] == '\xa')
+ bytesleft -= q2 - start;
+ if (bytesleft < 1)
+ return 0;
+ if (bytesleft >= 2 && q2[0] == '\xd' && q2[1] == '\xa')
q2 += 2;
if (q2[0] == '\xa')
q2++;
@@ -348,9 +351,9 @@ static int filter_flatedecode(struct pdf_struct *pdf, struct pdf_obj *obj,
const char *q = pdf_nextlinestart(buf, len);
if (q) {
skipped = 1;
- buf = q;
inflateEnd(&stream);
len -= q - buf;
+ buf = q;
stream.next_in = (Bytef *)buf;
stream.avail_in = len;
stream.next_out = (Bytef *)output;
@@ -468,6 +471,10 @@ static int find_length(struct pdf_struct *pdf,
return 0;
}
q = pdf_nextobject(pdf->map+obj->start, pdf->size - obj->start);
+ if (!q) {
+ cli_dbgmsg("cli_pdf: next object not found\n");
+ return 0;
+ }
length = atoi(q);
}
}
@@ -1145,7 +1152,8 @@ int cli_pdf(const char *dir, cli_ctx *ctx, off_t offset)
}
cli_dbgmsg("cli_pdf: returning %d\n", rc);
free(pdf.objs);
- return rc;
+ /* PDF hooks may abort, don't return CL_BREAK to caller! */
+ return rc == CL_BREAK ? CL_CLEAN : rc;
}
#else
diff --git a/libclamav/pe.c b/libclamav/pe.c
index 122b370..abcb2cd 100644
--- a/libclamav/pe.c
+++ b/libclamav/pe.c
@@ -1348,6 +1348,11 @@ int cli_scanpe(cli_ctx *ctx)
}
}
+
+ /* !!!!!!!!!!!!!! PACKERS START HERE !!!!!!!!!!!!!! */
+ ctx->corrupted_input = 2; /* caller will reset on return */
+
+
/* UPX, FSG, MEW support */
/* try to find the first section with physical size == 0 */
diff --git a/libclamav/pe_icons.c b/libclamav/pe_icons.c
index 2b80b4b..21de670 100644
--- a/libclamav/pe_icons.c
+++ b/libclamav/pe_icons.c
@@ -67,7 +67,7 @@ static int icon_cb(void *ptr, uint32_t type, uint32_t name, uint32_t lang, uint3
struct ICONS *icons = ptr;
type = type; lang = lang;
cli_dbgmsg("icon_cb: got icon %x\n", name);
- if(icons->cnt > 100)
+ if(icons->cnt >= 100)
return 1;
icons->rvas[icons->cnt] = rva;
icons->cnt++;
diff --git a/libclamav/scanners.c b/libclamav/scanners.c
index 0cf176b..8f22ffb 100644
--- a/libclamav/scanners.c
+++ b/libclamav/scanners.c
@@ -767,7 +767,7 @@ static int cli_scanmscab(int desc, cli_ctx *ctx, off_t sfx_offset)
static int cli_vba_scandir(const char *dirname, cli_ctx *ctx, struct uniq *U)
{
- int ret = CL_CLEAN, i, j, fd, data_len;
+ int ret = CL_CLEAN, i, j, fd, data_len, hasmacros = 0;
vba_project_t *vba_project;
DIR *dd;
struct dirent *dent;
@@ -798,7 +798,7 @@ static int cli_vba_scandir(const char *dirname, cli_ctx *ctx, struct uniq *U)
cli_dbgmsg("VBADir: Decompress VBA project '%s_%u'\n", vba_project->name[i], j);
data = (unsigned char *)cli_vba_inflate(fd, vba_project->offset[i], &data_len);
close(fd);
-
+ hasmacros++;
if(!data) {
cli_dbgmsg("VBADir: WARNING: VBA project '%s_%u' decompressed to NULL\n", vba_project->name[i], j);
} else {
@@ -830,6 +830,7 @@ static int cli_vba_scandir(const char *dirname, cli_ctx *ctx, struct uniq *U)
fd = open(vbaname, O_RDONLY|O_BINARY);
if (fd == -1) continue;
if ((fullname = cli_ppt_vba_read(fd, ctx))) {
+ hasmacros++;
if(cli_scandir(fullname, ctx) == CL_VIRUS) {
ret = CL_VIRUS;
}
@@ -856,7 +857,7 @@ static int cli_vba_scandir(const char *dirname, cli_ctx *ctx, struct uniq *U)
for (i = 0; i < vba_project->count; i++) {
cli_dbgmsg("VBADir: Decompress WM project macro:%d key:%d length:%d\n", i, vba_project->key[i], vba_project->length[i]);
data = (unsigned char *)cli_wm_decrypt_macro(fd, vba_project->offset[i], vba_project->length[i], vba_project->key[i]);
-
+ hasmacros++;
if(!data) {
cli_dbgmsg("VBADir: WARNING: WM project '%s' macro %d decrypted to NULL\n", vba_project->name[i], i);
} else {
@@ -945,6 +946,10 @@ static int cli_vba_scandir(const char *dirname, cli_ctx *ctx, struct uniq *U)
}
closedir(dd);
+ if(BLOCK_MACROS && hasmacros) {
+ *ctx->virname = "Heuristics.OLE2.ContainsMacros";
+ ret = CL_VIRUS;
+ }
return ret;
}
@@ -1922,7 +1927,7 @@ static void emax_reached(cli_ctx *ctx) {
default: \
cli_warnmsg("cli_magic_scandesc: ignoring bad return code from callback\n"); \
} \
- } \
+ }\
return retcode; \
} while(0)
@@ -2351,8 +2356,11 @@ static int magic_scandesc(int desc, cli_ctx *ctx, cli_file_t type)
* in raw mode. Now we will try to unpack them
*/
case CL_TYPE_MSEXE:
- if(SCAN_PE && ctx->dconf->pe)
+ if(SCAN_PE && ctx->dconf->pe) {
+ unsigned int corrupted_input = ctx->corrupted_input;
ret = cli_scanpe(ctx);
+ ctx->corrupted_input = corrupted_input;
+ }
break;
default:
break;
@@ -2390,63 +2398,11 @@ int cli_magic_scandesc_type(int desc, cli_ctx *ctx, cli_file_t type)
return magic_scandesc(desc, ctx, type);
}
-int cli_scandesc_stats(int desc, const char **virname, char *virhash, unsigned int *virsize, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions)
-{
- cli_ctx ctx;
- int rc;
-
- memset(&ctx, '\0', sizeof(cli_ctx));
- ctx.engine = engine;
- ctx.virname = virname;
- if(virsize) {
- *virsize = 0;
- ctx.virsize = virsize;
- ctx.virhash = virhash;
- }
- ctx.scanned = scanned;
- ctx.options = scanoptions;
- ctx.found_possibly_unwanted = 0;
- ctx.container_type = CL_TYPE_ANY;
- ctx.container_size = 0;
- ctx.dconf = (struct cli_dconf *) engine->dconf;
- ctx.fmap = cli_calloc(sizeof(fmap_t *), ctx.engine->maxreclevel + 2);
- if(!ctx.fmap)
- return CL_EMEM;
- if (!(ctx.hook_lsig_matches = cli_bitset_init())) {
- free(ctx.fmap);
- return CL_EMEM;
- }
-
-#ifdef HAVE__INTERNAL__SHA_COLLECT
- if(scanoptions & CL_SCAN_INTERNAL_COLLECT_SHA) {
- char link[32];
- ssize_t linksz;
-
- snprintf(link, sizeof(link), "/proc/self/fd/%u", desc);
- link[sizeof(link)-1]='\0';
- if((linksz=readlink(link, ctx.entry_filename, sizeof(ctx.entry_filename)))==-1) {
- cli_errmsg("failed to resolve filename for descriptor %d (%s)\n", desc, link);
- strcpy(ctx.entry_filename, "NO_IDEA");
- } else
- ctx.entry_filename[linksz]='\0';
- } while(0);
-#endif
-
- rc = cli_magic_scandesc(desc, &ctx);
-
- cli_bitset_free(ctx.hook_lsig_matches);
- free(ctx.fmap);
- if(rc == CL_CLEAN && ctx.found_possibly_unwanted)
- rc = CL_VIRUS;
- return rc;
-}
-
int cl_scandesc(int desc, const char **virname, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions)
{
- return cli_scandesc_stats(desc, virname, NULL, NULL, scanned, engine, scanoptions);
+ return cl_scandesc_callback(desc, virname, scanned, engine, scanoptions, NULL);
}
-
int cl_scandesc_callback(int desc, const char **virname, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions, void *context)
{
cli_ctx ctx;
@@ -2485,12 +2441,14 @@ int cl_scandesc_callback(int desc, const char **virname, unsigned long int *scan
} while(0);
#endif
+ cli_logg_setup(&ctx);
rc = cli_magic_scandesc(desc, &ctx);
cli_bitset_free(ctx.hook_lsig_matches);
free(ctx.fmap);
if(rc == CL_CLEAN && ctx.found_possibly_unwanted)
- rc = CL_VIRUS;
+ rc = CL_VIRUS;
+ cli_logg_unsetup();
return rc;
}
@@ -2532,25 +2490,17 @@ static int cli_scanfile(const char *filename, cli_ctx *ctx)
int cl_scanfile(const char *filename, const char **virname, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions)
{
- int fd, ret;
-
- if((fd = safe_open(filename, O_RDONLY|O_BINARY)) == -1)
- return CL_EOPEN;
-
- ret = cl_scandesc(fd, virname, scanned, engine, scanoptions);
- close(fd);
-
- return ret;
+ return cl_scanfile_callback(filename, virname, scanned, engine, scanoptions, NULL);
}
-int cli_scanfile_stats(const char *filename, const char **virname, char *virhash, unsigned int *virsize, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions)
+int cl_scanfile_callback(const char *filename, const char **virname, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions, void *context)
{
int fd, ret;
if((fd = safe_open(filename, O_RDONLY|O_BINARY)) == -1)
return CL_EOPEN;
- ret = cli_scandesc_stats(fd, virname, virhash, virsize, scanned, engine, scanoptions);
+ ret = cl_scandesc_callback(fd, virname, scanned, engine, scanoptions, context);
close(fd);
return ret;
diff --git a/libclamav/scanners.h b/libclamav/scanners.h
index 674640d..22c356f 100644
--- a/libclamav/scanners.h
+++ b/libclamav/scanners.h
@@ -28,7 +28,5 @@
int cli_magic_scandesc(int desc, cli_ctx *ctx);
int cli_magic_scandesc_type(int desc, cli_ctx *ctx, cli_file_t type);
int cli_found_possibly_unwanted(cli_ctx* ctx);
-int cli_scandesc_stats(int desc, const char **virname, char *virhash, unsigned int *virsize, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions);
-int cli_scanfile_stats(const char *filename, const char **virname, char *virhash, unsigned int *virsize, unsigned long int *scanned, const struct cl_engine *engine, unsigned int scanoptions);
#endif
diff --git a/m4/ltdl.m4 b/m4/ltdl.m4
index ca70368..aeae738 100644
--- a/m4/ltdl.m4
+++ b/m4/ltdl.m4
@@ -637,7 +637,6 @@ AC_SUBST([LT_DLLOADERS])
AC_LANG_PUSH([C])
LIBADD_DLOPEN=
-lt_save_LIBS="$LIBS"
AC_SEARCH_LIBS([dlopen], [dl],
[AC_DEFINE([HAVE_LIBDL], [1],
[Define if you have the libdl library or equivalent.])
@@ -661,10 +660,11 @@ AC_SEARCH_LIBS([dlopen], [dl],
LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la"])])])
if test x"$libltdl_cv_func_dlopen" = xyes || test x"$libltdl_cv_lib_dl_dlopen" = xyes
then
+ lt_save_LIBS="$LIBS"
LIBS="$LIBS $LIBADD_DLOPEN"
AC_CHECK_FUNCS([dlerror])
+ LIBS="$lt_save_LIBS"
fi
-LIBS="$lt_save_LIBS"
AC_SUBST([LIBADD_DLOPEN])
LIBADD_SHL_LOAD=
diff --git a/shared/clamdcom.c b/shared/clamdcom.c
new file mode 100644
index 0000000..63cebf5
--- /dev/null
+++ b/shared/clamdcom.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2009-2010 Sourcefire, Inc.
+ *
+ * Author: aCaB
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#if HAVE_CONFIG_H
+#include "clamav-config.h"
+#endif
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <fcntl.h>
+#include <errno.h>
+
+#include "shared/output.h"
+#include "shared/clamdcom.h"
+
+/* Sends bytes over a socket
+ * Returns 0 on success */
+int sendln(int sockd, const char *line, unsigned int len) {
+ while(len) {
+ int sent = send(sockd, line, len, 0);
+ if(sent <= 0) {
+ if(sent && errno == EINTR) continue;
+ logg("!Can't send to clamd: %s\n", strerror(errno));
+ return 1;
+ }
+ line += sent;
+ len -= sent;
+ }
+ return 0;
+}
+
+/* Inits a RECVLN struct before it can be used in recvln() - see below */
+void recvlninit(struct RCVLN *s, int sockd) {
+ s->sockd = sockd;
+ s->bol = s->cur = s->buf;
+ s->r = 0;
+}
+
+/* Receives a full (terminated with \0) line from a socket
+ * Sets rbol to the begin of the received line, and optionally
+ * reol to the end of line.
+ * Should be called repeatedly untill all input is consumed
+ * Returns:
+ * - the lenght of the line (a positive number) on success
+ * - 0 if the connection is closed
+ * - -1 on error
+ */
+int recvln(struct RCVLN *s, char **rbol, char **reol) {
+ char *eol;
+
+ while(1) {
+ if(!s->r) {
+ s->r = recv(s->sockd, s->cur, sizeof(s->buf) - (s->cur - s->buf), 0);
+ if(s->r<=0) {
+ if(s->r && errno == EINTR) {
+ s->r = 0;
+ continue;
+ }
+ if(s->r || s->cur!=s->buf) {
+ *s->cur = '\0';
+ if(strcmp(s->buf, "UNKNOWN COMMAND\n"))
+ logg("!Communication error\n");
+ else
+ logg("!Command rejected by clamd (wrong clamd version?)\n");
+ return -1;
+ }
+ return 0;
+ }
+ }
+ if((eol = memchr(s->cur, 0, s->r))) {
+ int ret = 0;
+ eol++;
+ s->r -= eol - s->cur;
+ *rbol = s->bol;
+ if(reol) *reol = eol;
+ ret = eol - s->bol;
+ if(s->r)
+ s->bol = s->cur = eol;
+ else
+ s->bol = s->cur = s->buf;
+ return ret;
+ }
+ s->r += s->cur - s->bol;
+ if(!eol && s->r==sizeof(s->buf)) {
+ logg("!Overlong reply from clamd\n");
+ return -1;
+ }
+ if(!eol) {
+ if(s->buf != s->bol) { /* old memmove sux */
+ memmove(s->buf, s->bol, s->r);
+ s->bol = s->buf;
+ }
+ s->cur = &s->bol[s->r];
+ s->r = 0;
+ }
+ }
+}
+
diff --git a/shared/clamdcom.h b/shared/clamdcom.h
new file mode 100644
index 0000000..c82b7a8
--- /dev/null
+++ b/shared/clamdcom.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2009-2010 Sourcefire, Inc.
+ *
+ * Author: aCaB
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#ifndef __CLAMDCOM_H
+#define __CLAMDCOM_H
+
+#if HAVE_CONFIG_H
+#include "clamav-config.h"
+#endif
+
+#if HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#include "shared/misc.h"
+
+struct RCVLN {
+ char buf[PATH_MAX+1024]; /* FIXME must match that in clamd - bb1349 */
+ int sockd;
+ int r;
+ char *cur;
+ char *bol;
+};
+
+int sendln(int sockd, const char *line, unsigned int len);
+void recvlninit(struct RCVLN *s, int sockd);
+int recvln(struct RCVLN *s, char **rbol, char **reol);
+
+#endif
diff --git a/shared/optparser.c b/shared/optparser.c
index 9866ce5..a22b4c7 100644
--- a/shared/optparser.c
+++ b/shared/optparser.c
@@ -177,7 +177,7 @@ const struct clam_option __clam_options[] = {
{ "LogVerbose", NULL, 0, TYPE_BOOL, MATCH_BOOL, 0, NULL, 0, OPT_CLAMD | OPT_FRESHCLAM | OPT_MILTER, "Enable verbose logging.", "yes" },
- { "ExtendedDetectionInfo", NULL, 0, TYPE_BOOL, MATCH_BOOL, 0, NULL, 0, OPT_CLAMD, "Provide additional information about the infected file, such as its\nsize and hash, together with the virus name.", "yes" },
+ { "ExtendedDetectionInfo", NULL, 0, TYPE_BOOL, MATCH_BOOL, 0, NULL, 0, OPT_CLAMD, "Log additional information about the infected file, such as its\nsize and hash, together with the virus name.", "yes" },
{ "PidFile", "pid", 'p', TYPE_STRING, NULL, -1, NULL, 0, OPT_CLAMD | OPT_FRESHCLAM | OPT_MILTER, "Save the process ID to a file.", "/var/run/clam.pid" },
@@ -298,6 +298,8 @@ const struct clam_option __clam_options[] = {
{ "ScanOLE2", "scan-ole2", 0, TYPE_BOOL, MATCH_BOOL, 1, NULL, 0, OPT_CLAMD | OPT_CLAMSCAN, "This option enables scanning of OLE2 files, such as Microsoft Office\ndocuments and .msi files.", "yes" },
+ { "OLE2BlockMacros", NULL, 0, TYPE_BOOL, MATCH_BOOL, 0, NULL, 0, OPT_CLAMD, "With this option enabled OLE2 files with VBA macros, which were not\ndetected by signatures will be marked as \"Heuristics.OLE2.ContainsMacros\".", "no" },
+
{ "ScanPDF", "scan-pdf", 0, TYPE_BOOL, MATCH_BOOL, 1, NULL, 0, OPT_CLAMD | OPT_CLAMSCAN, "This option enables scanning within PDF files.", "yes" },
{ "ScanArchive", "scan-archive", 0, TYPE_BOOL, MATCH_BOOL, 1, NULL, 0, OPT_CLAMD | OPT_CLAMSCAN, "Scan within archives and compressed files.", "yes" },
@@ -336,6 +338,7 @@ const struct clam_option __clam_options[] = {
#ifdef HAVE__INTERNAL__SHA_COLLECT
{ "DevCollectHashes", "dev-collect-hashes", 0, TYPE_BOOL, MATCH_BOOL, -1, NULL, FLAG_HIDDEN, OPT_CLAMD | OPT_CLAMSCAN, "", "" },
#endif
+ { "DevLiblog", "dev-liblog", 0, TYPE_BOOL, MATCH_BOOL, -1, NULL, FLAG_HIDDEN, OPT_CLAMD, "", "" },
/* Freshclam-only entries */
@@ -348,7 +351,7 @@ const struct clam_option __clam_options[] = {
{ "DNSDatabaseInfo", NULL, 0, TYPE_STRING, NULL, -1, "current.cvd.clamav.net", FLAG_REQUIRED, OPT_FRESHCLAM, "Use DNS to verify the virus database version. Freshclam uses DNS TXT records\nto verify the versions of the database and software itself. With this\ndirective you can change the database verification domain.\nWARNING: Please don't change it unless you're configuring freshclam to use\nyour own database verification domain.", "current.cvd.clamav.net" },
- { "DatabaseMirror", NULL, 0, TYPE_STRING, NULL, -1, NULL, FLAG_MULTIPLE, OPT_FRESHCLAM, "DatabaseMirror specifies to which mirror(s) freshclam should connect.\nYou should have at least two entries: db.XY.clamav.net and\ndatabase.clamav.net (in this order). Please replace XY with your country\ncode (see http://www.iana.org/cctld/cctld-whois.htm). database.clamav.net\nis a round-robin record which points to our most reliable mirrors. It's used\nas a fall back in case db.XY.clamav.net is not working.", "db.XY.clamav.net\ndatabase.clamav.net" },
+ { "DatabaseMirror", NULL, 0, TYPE_STRING, NULL, -1, NULL, FLAG_MULTIPLE, OPT_FRESHCLAM, "DatabaseMirror specifies to which mirror(s) freshclam should connect.\nYou should have at least two entries: db.XY.clamav.net (or db.XY.ipv6.clamav.net\nfor IPv6) and database.clamav.net (in this order). Please replace XY with your\ncountry code (see http://www.iana.org/cctld/cctld-whois.htm).\ndatabase.clamav.net is a round-robin record which points to our most reliable\nmirrors. It's used as a fall back in case db.XY.clamav.net is not working.", "db.XY.clamav.net\ndatabase.clamav.net" },
{ "MaxAttempts", NULL, 0, TYPE_NUMBER, MATCH_NUMBER, 3, NULL, 0, OPT_FRESHCLAM, "This option defines how many attempts freshclam should make before giving up.", "5" },
@@ -360,6 +363,8 @@ const struct clam_option __clam_options[] = {
{ "ExtraDatabase", NULL, 0, TYPE_STRING, NULL, -1, NULL, FLAG_MULTIPLE, OPT_FRESHCLAM, "Download additional database. This option can be used multiple times.", "dbname1\ndbname2" },
+ { "DatabaseCustomURL", NULL, 0, TYPE_STRING, NULL, -1, NULL, FLAG_MULTIPLE, OPT_FRESHCLAM, "With this option you can provide custom sources (http:// or file://) for database files.\nThis option can be used multiple times.", "http://myserver.com/mysigs.ndb\nfile:///mnt/nfs/local.hdb" },
+
{ "HTTPProxyServer", NULL, 0, TYPE_STRING, NULL, -1, NULL, 0, OPT_FRESHCLAM, "If you're behind a proxy, please enter its address here.", "your-proxy" },
{ "HTTPProxyPort", NULL, 0, TYPE_NUMBER, MATCH_NUMBER, -1, NULL, 0, OPT_FRESHCLAM, "HTTP proxy's port", "8080" },
@@ -385,7 +390,7 @@ const struct clam_option __clam_options[] = {
{ "ReceiveTimeout", NULL, 0, TYPE_NUMBER, MATCH_NUMBER, 30, NULL, 0, OPT_FRESHCLAM, "Timeout in seconds when reading from database server.", "30" },
- { "SubmitDetectionStats", NULL, 0, TYPE_STRING, NULL, -1, NULL, 0, OPT_FRESHCLAM, "When enabled freshclam will submit statistics to the ClamAV Project about\nthe latest virus detections in your environment. The ClamAV maintainers\nwill then use this data to determine what types of malware are the most\ndetected in the field and in what geographic area they are.\nThis feature requires LogTime and LogFile to be enabled in clamd.conf.", "/path/to/clamd.conf" },
+ { "SubmitDetectionStats", NULL, 0, TYPE_STRING, NULL, -1, NULL, 0, OPT_FRESHCLAM, "When enabled freshclam will submit statistics to the ClamAV Project about\nthe latest virus detections in your environment. The ClamAV maintainers\nwill then use this data to determine what types of malware are the most\ndetected in the field and in what geographic area they are.\nFreshclam will connect to clamd in order to get recent statistics.", "/path/to/clamd.conf" },
{ "DetectionStatsCountry", NULL, 0, TYPE_STRING, NULL, -1, NULL, 0, OPT_FRESHCLAM, "Country of origin of malware/detection statistics (for statistical\npurposes only). The statistics collector at ClamAV.net will look up\nyour IP address to determine the geographical origin of the malware\nreported by your installation. If this installation is mainly used to\nscan data which comes from a different location, please enable this\noption and enter a two-letter code (see http://www.iana.org/domains/root/db/)\nof the country of origin.", "country-code" },
diff --git a/shared/output.c b/shared/output.c
index 36b78b2..ac75df9 100644
--- a/shared/output.c
+++ b/shared/output.c
@@ -80,7 +80,7 @@ pthread_mutex_t mdprintf_mutex = PTHREAD_MUTEX_INITIALIZER;
FILE *logg_fp = NULL;
-short int logg_verbose = 0, logg_nowarn = 0, logg_lock = 1, logg_time = 0, logg_foreground = 1;
+short int logg_verbose = 0, logg_nowarn = 0, logg_lock = 1, logg_time = 0, logg_foreground = 1, logg_noflush = 0;
unsigned int logg_size = 0;
const char *logg_file = NULL;
#if defined(USE_SYSLOG) && !defined(C_AIX)
@@ -329,6 +329,7 @@ int logg(const char *str, ...)
}
if(logg_fp) {
+ char flush = !logg_noflush;
/* Need to avoid logging time for verbose messages when logverbose
is not set or we get a bunch of timestamps in the log without
newlines... */
@@ -343,9 +344,11 @@ int logg(const char *str, ...)
if(*buff == '!') {
fprintf(logg_fp, "ERROR: %s", buff + 1);
+ flush = 1;
} else if(*buff == '^') {
if(!logg_nowarn)
fprintf(logg_fp, "WARNING: %s", buff + 1);
+ flush = 1;
} else if(*buff == '*' || *buff == '$') {
fprintf(logg_fp, "%s", buff + 1);
} else if(*buff == '#' || *buff == '~') {
@@ -353,7 +356,8 @@ int logg(const char *str, ...)
} else
fprintf(logg_fp, "%s", buff);
- fflush(logg_fp);
+ if (flush)
+ fflush(logg_fp);
}
}
diff --git a/shared/output.h b/shared/output.h
index 80b2aed..46a252b 100644
--- a/shared/output.h
+++ b/shared/output.h
@@ -40,7 +40,7 @@ int logg(const char *str, ...);
#endif
void logg_close(void);
-extern short int logg_verbose, logg_nowarn, logg_lock, logg_time;
+extern short int logg_verbose, logg_nowarn, logg_lock, logg_time, logg_noflush;
extern unsigned int logg_size;
extern const char *logg_file;
diff --git a/sigtool/sigtool.c b/sigtool/sigtool.c
index 859e844..7d2db91 100644
--- a/sigtool/sigtool.c
+++ b/sigtool/sigtool.c
@@ -388,7 +388,7 @@ static char *sha256file(const char *file, unsigned int *size)
sha256_init(&ctx);
- if(!(fh = fopen(file, "r"))) {
+ if(!(fh = fopen(file, "rb"))) {
mprintf("!sha256file: Can't open file %s\n", file);
return NULL;
}
@@ -424,7 +424,7 @@ static int writeinfo(const char *dbname, const char *builder, const char *header
}
}
- if(!(fh = fopen(file, "w+"))) {
+ if(!(fh = fopen(file, "wb+"))) {
mprintf("!writeinfo: Can't create file %s\n", file);
return -1;
}
@@ -1461,7 +1461,7 @@ static int comparesha(const char *dbname)
snprintf(info, sizeof(info), "%s.info", getdbname(dbname));
- if(!(fh = fopen(info, "r"))) {
+ if(!(fh = fopen(info, "rb"))) {
mprintf("!verifydiff: Can't open %s\n", info);
return -1;
}
@@ -1518,7 +1518,7 @@ static int rundiff(const struct optstruct *opts)
return -1;
}
- if((fd = open(diff, O_RDONLY)) == -1) {
+ if((fd = open(diff, O_RDONLY | O_BINARY)) == -1) {
mprintf("!rundiff: Can't open file %s\n", diff);
return -1;
}
@@ -1613,14 +1613,14 @@ static int compare(const char *oldpath, const char *newpath, FILE *diff)
fprintf(diff, "OPEN %s\n", newpath);
- if(!(new = fopen(newpath, "r"))) {
+ if(!(new = fopen(newpath, "rb"))) {
mprintf("!compare: Can't open file %s for reading\n", newpath);
free(obuff);
free(nbuff);
free(tbuff);
return -1;
}
- old = fopen(oldpath, "r");
+ old = fopen(oldpath, "rb");
while(fgets(nbuff, l1, new)) {
i = strlen(nbuff);
@@ -1801,7 +1801,7 @@ static int verifydiff(const char *diff, const char *cvd, const char *incdir)
return -1;
}
- if((fd = open(diff, O_RDONLY)) == -1) {
+ if((fd = open(diff, O_RDONLY | O_BINARY)) == -1) {
mprintf("!verifydiff: Can't open diff file %s\n", diff);
cli_rmdirs(tempdir);
free(tempdir);
@@ -2478,7 +2478,7 @@ static int diffdirs(const char *old, const char *new, const char *patch)
return -1;
}
- if(!(diff = fopen(patch, "w"))) {
+ if(!(diff = fopen(patch, "wb"))) {
mprintf("!diffdirs: Can't open %s for writing\n", patch);
return -1;
}
diff --git a/test/Makefile.am b/test/Makefile.am
index a677f5b..70f9164 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -19,3 +19,5 @@ clean-local:
rm -f $(FILES)
EXTRA_DIST = .split
+
+check: all
diff --git a/test/Makefile.in b/test/Makefile.in
index d67f53b..24aaba0 100644
--- a/test/Makefile.in
+++ b/test/Makefile.in
@@ -429,6 +429,8 @@ $(FILES) :
clean-local:
rm -f $(FILES)
+check: all
+
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
diff --git a/unit_tests/check_bytecode.c b/unit_tests/check_bytecode.c
index cba3d4d..e723385 100644
--- a/unit_tests/check_bytecode.c
+++ b/unit_tests/check_bytecode.c
@@ -38,6 +38,9 @@
#include "../libclamav/dconf.h"
#include "../libclamav/bytecode_priv.h"
#include "../libclamav/pe.h"
+#ifdef CL_THREAD_SAFE
+#include <pthread.h>
+#endif
static void runtest(const char *file, uint64_t expected, int fail, int nojit,
const char *infile, struct cli_pe_hook_data *pedata,
@@ -519,6 +522,47 @@ START_TEST (test_load_bytecode_int)
}
END_TEST
+#if defined(CL_THREAD_SAFE) && defined(C_LINUX) && ((__GLIBC__ << 16) + __GLIBC_MINOR__ >= (2 << 16) + 3)
+#define DO_BARRIER
+#endif
+
+#ifdef DO_BARRIER
+static pthread_barrier_t barrier;
+static void* thread(void *arg)
+{
+ struct cl_engine *engine;
+ engine = cl_engine_new();
+ fail_unless(!!engine, "failed to create engine\n");
+ /* run all cl_load at once, to maximize chance of a crash
+ * in case of a race condition */
+ pthread_barrier_wait(&barrier);
+ runload("input/bytecode.cvd", engine, 5);
+ cl_engine_free(engine);
+ return NULL;
+}
+
+START_TEST (test_parallel_load)
+{
+#define N 5
+ pthread_t threads[N];
+ unsigned i;
+
+ cl_init(CL_INIT_DEFAULT);
+ pthread_barrier_init(&barrier, NULL, N);
+ for (i=0;i<N;i++) {
+ pthread_create(&threads[i], NULL, thread, NULL);
+ }
+ for (i=0;i<N;i++) {
+ pthread_join(threads[i], NULL);
+ }
+ /* DB load used to crash due to 'static' variable in cache.c,
+ * and also due to something wrong in LLVM 2.7.
+ * Enabled the mutex around codegen in bytecode2llvm.cpp, and this test is
+ * here to make sure it doesn't crash */
+}
+END_TEST
+#endif
+
Suite *test_bytecode_suite(void)
{
Suite *s = suite_create("bytecode");
@@ -575,6 +619,9 @@ Suite *test_bytecode_suite(void)
tcase_add_test(tc_cli_arith, test_load_bytecode_jit);
tcase_add_test(tc_cli_arith, test_load_bytecode_int);
+#ifdef DO_BARRIER
+ tcase_add_test(tc_cli_arith, test_parallel_load);
+#endif
return s;
}
diff --git a/unit_tests/check_clamd.c b/unit_tests/check_clamd.c
index 7bd323f..767fb07 100644
--- a/unit_tests/check_clamd.c
+++ b/unit_tests/check_clamd.c
@@ -151,7 +151,7 @@ static void commands_teardown(void)
#define VERSION_REPLY "ClamAV "REPO_VERSION""VERSION_SUFFIX
-#define VCMDS_REPLY VERSION_REPLY"| COMMANDS: SCAN QUIT RELOAD PING CONTSCAN VERSIONCOMMANDS VERSION STREAM END SHUTDOWN MULTISCAN FILDES STATS IDSESSION INSTREAM"
+#define VCMDS_REPLY VERSION_REPLY"| COMMANDS: SCAN QUIT RELOAD PING CONTSCAN VERSIONCOMMANDS VERSION STREAM END SHUTDOWN MULTISCAN FILDES STATS IDSESSION INSTREAM DETSTATSCLEAR DETSTATS"
enum idsession_support {
IDS_OK, /* accepted */
diff --git a/unit_tests/check_common.sh b/unit_tests/check_common.sh
index b02a85f..60fee7f 100644
--- a/unit_tests/check_common.sh
+++ b/unit_tests/check_common.sh
@@ -57,6 +57,10 @@ test_start() {
cat <<EOF >test-db/test.hdb
aa15bcf478d165efd2065190eb473bcb:544:ClamAV-Test-File
EOF
+ port=331$1
+ if test "x$RANDOM" != "x"; then
+ port=1`expr 100 + \( $RANDOM % 899 \)`$1
+ fi
cat <<EOF >test-clamd.conf
LogFile `pwd`/clamd-test.log
LogFileMaxSize 0
@@ -69,7 +73,7 @@ DatabaseDirectory `pwd`/test-db
LocalSocket clamd-test.socket
TCPAddr 127.0.0.1
# using different port here to avoid conflicts with system clamd daemon
-TCPSocket 331$1
+TCPSocket $port
ExitOnOOM yes
DetectPUA yes
ScanPDF yes
--
Debian repository for ClamAV
More information about the Pkg-clamav-commits
mailing list